aboutsummaryrefslogtreecommitdiff
path: root/src/core/NEON/kernels
diff options
context:
space:
mode:
authorMichael Tyler <michael.tyler@arm.com>2023-01-17 11:04:14 +0000
committerGian Marco Iodice <gianmarco.iodice@arm.com>2023-01-18 09:43:38 +0000
commitbe13cead34e566bdd561ad3ffc3f645b460e482e (patch)
treecdc086de205d5a07fdd816afa6333d0b2f38d4e9 /src/core/NEON/kernels
parent13bab71a76096985752a9e12711507021e25858d (diff)
downloadComputeLibrary-be13cead34e566bdd561ad3ffc3f645b460e482e.tar.gz
Revert "Update CPU kernels to remove x19"
This reverts commit 3c59f01c209d2732a15d97d65565ead964787a8b. Resolves: COMPMID-5817 Change-Id: Ie2443a21854a95db1e3d0cafa2121c0187a5e237 Signed-off-by: Michael Tyler <michael.tyler@arm.com> Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/8974 Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Gian Marco Iodice <gianmarco.iodice@arm.com> Benchmark: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'src/core/NEON/kernels')
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/interleaves/a64_s8q_3x3_dot.cpp256
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/interleaves/a64_u8q_3x3_dot.cpp256
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/interleaves/sve_s8q_3x3_dot.cpp116
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/interleaves/sve_u8q_3x3_dot.cpp116
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_direct.cpp622
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_indirect.cpp502
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_direct.cpp932
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_indirect.cpp1212
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_direct.cpp1356
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_indirect.cpp1882
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_direct.cpp816
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_indirect.cpp780
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst/generic_direct.cpp1176
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst/generic_indirect.cpp1048
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_generic_output9_mla_depthfirst/generic.cpp739
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst/generic.cpp1631
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_direct.cpp672
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_indirect.cpp670
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_direct.cpp1126
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_indirect.cpp1214
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_direct.cpp1837
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_indirect.cpp2044
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_direct.cpp788
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_indirect.cpp820
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst/generic_direct.cpp832
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst/generic_indirect.cpp726
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_generic_output9_mla_depthfirst/generic.cpp517
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_packed_to_nhwc_3x3_s2_with_multiplier_output3x3_mla_depthfirst/generic.cpp283
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_packed_to_nhwc_5x5_s1_with_multiplier_output2x4_mla_depthfirst/generic.cpp247
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst/generic.cpp1391
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_nhwc_3x3_s1_output2x2_dot_depthfirst/generic.cpp2738
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp1764
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp1866
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp3374
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_nhwc_generic_output9_mla_depthfirst/generic.cpp946
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst/generic.cpp866
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst/generic.cpp1084
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst/generic.cpp2624
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8qs_nhwc_3x3_s1_output2x2_dot_depthfirst/generic.cpp2512
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_3x3_s1_output2x2_dot_depthfirst/generic.cpp2738
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp1764
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp1866
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp3374
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_generic_output9_mla_depthfirst/generic.cpp946
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst/generic.cpp866
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst/generic.cpp1084
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst/generic.cpp2624
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8qa_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp1704
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8qa_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp1850
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8qa_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp2860
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8s8u8q_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp1764
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8s8u8q_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp1866
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8s8u8q_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp3374
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8s8u8q_nhwc_generic_output9_mla_depthfirst/generic.cpp946
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8s8u8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst/generic.cpp2624
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_direct.cpp294
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_indirect.cpp204
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_direct.cpp224
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_indirect.cpp328
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_direct.cpp362
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_indirect.cpp478
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_direct.cpp200
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_indirect.cpp268
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_planar_3x3_s1_4rows_mla_za/generic.cpp460
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_planar_3x3_s2_4rows_mla_za/generic.cpp700
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_planar_5x5_s1_4rows_mla_za/generic.cpp986
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_planar_5x5_s2_4rows_mla_za/generic.cpp1450
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32bf16fp32_planar_3x3_s1_4rows_dot_za/generic.cpp486
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32bf16fp32_planar_3x3_s2_4rows_dot_za/generic.cpp754
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32bf16fp32_planar_5x5_s1_4rows_dot_za/generic.cpp998
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32bf16fp32_planar_5x5_s2_4rows_dot_za/generic.cpp1296
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_s8q_planar_3x3_s1_4rows_dot_za/generic.cpp696
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_s8q_planar_3x3_s2_2rows_dot_za/generic.cpp592
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_s8q_planar_3x3_s2_4rows_dot_za/generic.cpp836
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_s8q_planar_5x5_s1_4rows_dot_za/generic.cpp1020
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_s8q_planar_5x5_s2_4rows_dot_za/generic.cpp1306
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8q_planar_3x3_s1_4rows_dot_za/generic.cpp696
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8q_planar_3x3_s2_2rows_dot_za/generic.cpp592
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8q_planar_3x3_s2_4rows_dot_za/generic.cpp836
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8q_planar_5x5_s1_4rows_dot_za/generic.cpp1020
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8q_planar_5x5_s2_4rows_dot_za/generic.cpp1306
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8s8u8q_planar_3x3_s1_4rows_dot_za/generic.cpp696
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8s8u8q_planar_3x3_s2_2rows_dot_za/generic.cpp592
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8s8u8q_planar_3x3_s2_4rows_dot_za/generic.cpp836
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8s8u8q_planar_5x5_s1_4rows_dot_za/generic.cpp1020
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8s8u8q_planar_5x5_s2_4rows_dot_za/generic.cpp1306
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_direct.cpp404
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_indirect.cpp400
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_direct.cpp652
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_indirect.cpp712
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_direct.cpp1050
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_indirect.cpp1172
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_direct.cpp448
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_indirect.cpp460
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst/generic_direct.cpp788
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst/generic_indirect.cpp874
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp255
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_direct.cpp404
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_indirect.cpp400
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst_strided.hpp66
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst_strided/generic.cpp247
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_direct.cpp652
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_indirect.cpp712
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_direct.cpp1050
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_indirect.cpp1172
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_direct.cpp448
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_indirect.cpp460
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst/generic_direct.cpp788
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst/generic_indirect.cpp874
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_generic_output9_mla_depthfirst/generic.cpp194
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_packed_to_nhwc_3x3_s2_with_multiplier_output3x3_mla_depthfirst/generic.cpp352
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_packed_to_nhwc_5x5_s1_with_multiplier_output2x4_mla_depthfirst/generic.cpp610
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst/generic.cpp752
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_nhwc_3x3_s1_output2x2_dot_depthfirst/generic.cpp848
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp554
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp620
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp1018
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst/generic.cpp548
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst/generic.cpp646
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8qs_nhwc_3x3_s1_output2x2_dot_depthfirst/generic.cpp704
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_nhwc_3x3_s1_output2x2_dot_depthfirst/generic.cpp848
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp554
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp620
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp1018
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst/generic.cpp548
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst/generic.cpp646
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8s8u8q_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp596
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8s8u8q_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp620
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8s8u8q_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp1014
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst/generic.cpp192
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp16_nhwc_avg_generic_depthfirst/generic.cpp396
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp16_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp156
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp16_nhwc_max_generic_depthfirst/generic.cpp392
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst/generic.cpp192
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp32_nhwc_avg_generic_depthfirst/generic.cpp348
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp32_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp156
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp32_nhwc_max_generic_depthfirst/generic.cpp344
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8_nhwc_avg_generic_depthfirst/generic.cpp246
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp156
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8_nhwc_max_generic_depthfirst/generic.cpp478
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8q_nhwc_avg_generic_depthfirst/generic.cpp252
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8q_nhwc_max_generic_depthfirst/generic.cpp496
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8_nhwc_avg_generic_depthfirst/generic.cpp246
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp156
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8_nhwc_max_generic_depthfirst/generic.cpp478
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8q_nhwc_avg_generic_depthfirst/generic.cpp264
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8q_nhwc_max_generic_depthfirst/generic.cpp518
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst/generic.cpp146
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp16_nhwc_avg_generic_depthfirst/generic.cpp188
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp16_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp126
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp16_nhwc_max_generic_depthfirst/generic.cpp186
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst/generic.cpp146
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp32_nhwc_avg_generic_depthfirst/generic.cpp188
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp32_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp126
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp32_nhwc_max_generic_depthfirst/generic.cpp186
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/sme_s8_nhwc_avg_generic_depthfirst/generic.cpp147
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/sme_s8_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp126
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/sme_s8_nhwc_max_generic_depthfirst/generic.cpp186
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/sme_s8q_nhwc_avg_generic_depthfirst/generic.cpp149
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/sme_s8q_nhwc_max_generic_depthfirst/generic.cpp212
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/sme_u8_nhwc_avg_generic_depthfirst/generic.cpp147
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/sme_u8_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp126
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/sme_u8_nhwc_max_generic_depthfirst/generic.cpp186
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/sme_u8q_nhwc_avg_generic_depthfirst/generic.cpp155
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/sme_u8q_nhwc_max_generic_depthfirst/generic.cpp228
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst/generic.cpp146
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp16_nhwc_avg_generic_depthfirst/generic.cpp188
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp16_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp120
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp16_nhwc_max_generic_depthfirst/generic.cpp186
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst/generic.cpp146
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp32_nhwc_avg_generic_depthfirst/generic.cpp188
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp32_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp120
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp32_nhwc_max_generic_depthfirst/generic.cpp186
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8_nhwc_avg_generic_depthfirst/generic.cpp140
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp120
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8_nhwc_max_generic_depthfirst/generic.cpp186
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8q_nhwc_avg_generic_depthfirst/generic.cpp140
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8q_nhwc_max_generic_depthfirst/generic.cpp212
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8_nhwc_avg_generic_depthfirst/generic.cpp140
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp120
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8_nhwc_max_generic_depthfirst/generic.cpp186
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8q_nhwc_avg_generic_depthfirst/generic.cpp148
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8q_nhwc_max_generic_depthfirst/generic.cpp228
-rw-r--r--src/core/NEON/kernels/arm_gemm/gemm_fp32.cpp10
-rw-r--r--src/core/NEON/kernels/arm_gemm/gemm_int8.cpp10
-rw-r--r--src/core/NEON/kernels/arm_gemm/gemm_qint8.cpp10
-rw-r--r--src/core/NEON/kernels/arm_gemm/gemm_quint8.cpp10
-rw-r--r--src/core/NEON/kernels/arm_gemm/gemm_uint8.cpp10
-rw-r--r--src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave4_block16_s8_s8.hpp170
-rw-r--r--src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave4_block16_s8_s8_summing.hpp196
-rw-r--r--src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave4_block16_u8_u8_summing.hpp196
-rw-r--r--src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_bf16_fp32.hpp240
-rw-r--r--src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_fp16_fp16.hpp276
-rw-r--r--src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_fp16_fp32.hpp230
-rw-r--r--src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_fp32_fp32.hpp212
-rw-r--r--src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_s16_s16.hpp276
-rw-r--r--src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_s16_s16_summing.hpp288
-rw-r--r--src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_s8_s16.hpp336
-rw-r--r--src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_s8_s16_summing.hpp332
-rw-r--r--src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_u16_u16_summing.hpp288
-rw-r--r--src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_u8_u16.hpp336
-rw-r--r--src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_u8_u16_summing.hpp332
-rw-r--r--src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block2_bf16_bf16.hpp290
-rw-r--r--src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block2_fp32_fp32.hpp186
-rw-r--r--src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block4_bf16_bf16.hpp254
-rw-r--r--src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block4_fp32_bf16.hpp148
-rw-r--r--src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block4_s8_s8.hpp426
-rw-r--r--src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block4_s8_s8_summing.hpp478
-rw-r--r--src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block4_u8_u8_summing.hpp478
-rw-r--r--src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block8_s8_s8.hpp390
-rw-r--r--src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block8_s8_s8_summing.hpp440
-rw-r--r--src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block8_u8_u8_summing.hpp440
-rw-r--r--src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme2_interleave1VL_block2_fp32_bf16.hpp136
-rw-r--r--src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme2_interleave2VL_block2_fp32_bf16.hpp162
-rw-r--r--src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme2_interleave4VL_block2_fp32_bf16.hpp138
-rw-r--r--src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_bf16_bf16.hpp226
-rw-r--r--src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_block2_bf16_bf16.hpp286
-rw-r--r--src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_block4_s8_s8.hpp242
-rw-r--r--src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_block4_s8_s8_summing.hpp294
-rw-r--r--src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_block4_u8_u8.hpp242
-rw-r--r--src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_block4_u8_u8_summing.hpp294
-rw-r--r--src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_fp16_fp16.hpp226
-rw-r--r--src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_fp32_fp32.hpp268
-rw-r--r--src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_bf16_bf16.hpp84
-rw-r--r--src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_block2_bf16_bf16.hpp448
-rw-r--r--src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_block2_fp16_fp16.hpp448
-rw-r--r--src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_block4_s8_s8.hpp440
-rw-r--r--src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_block4_s8_s8_summing.hpp468
-rw-r--r--src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_block4_u8_u8.hpp440
-rw-r--r--src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_block4_u8_u8_summing.hpp448
-rw-r--r--src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_fp16_fp16.hpp84
-rw-r--r--src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_fp32_fp32.hpp430
-rw-r--r--src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave4VL_block2_bf16_bf16.hpp134
-rw-r--r--src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave4VL_block4_s8_s8.hpp116
-rw-r--r--src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave4VL_block4_s8_s8_summing.hpp194
-rw-r--r--src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave4VL_block4_u8_u8.hpp116
-rw-r--r--src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave4VL_block4_u8_u8_summing.hpp190
-rw-r--r--src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave4VL_fp32_fp32.hpp128
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_bf16fp32_mmla_6x16/generic.cpp4066
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_fp16_mla_6x32/generic.cpp5920
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_fp32_mla_6x16/generic.cpp3712
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_fp32bf16fp32_mmla_4x24/generic.cpp2864
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_ffinterleaved_bf16fp32_dot_8x12/generic.cpp92
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_ffinterleaved_bf16fp32_mmla_8x12/generic.cpp116
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_ffinterleaved_fp16_mla_8x24/generic.cpp92
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_ffinterleaved_fp32_mla_8x12/generic.cpp112
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_bf16fp32_dot_6x16/generic.cpp3210
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_bf16fp32_mmla_6x16/generic.cpp3466
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp16_mla_6x32/a55.cpp3080
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp16_mla_6x32/generic.cpp4566
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_4x24/a55.cpp1990
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_4x24/generic.cpp2336
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_6x16/a55.cpp2216
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_6x16/generic.cpp3126
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_8x4/a55.cpp538
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_8x4/generic.cpp2246
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32bf16fp32_mmla_4x24/generic.cpp2398
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32bf16fp32_mmla_6x16/generic.cpp3230
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qa_dot_4x16/a55.cpp1812
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qa_dot_4x16/generic.cpp1456
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qa_mmla_4x16/generic.cpp1614
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qs_dot_6x16/a55.cpp2752
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qs_dot_6x16/generic.cpp2942
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qs_mmla_6x16/generic.cpp3280
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8s32_dot_6x16/a55.cpp3690
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8s32_dot_6x16/generic.cpp3664
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8s32_mmla_6x16/generic.cpp4062
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8qa_dot_4x16/a55.cpp1812
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8qa_dot_4x16/generic.cpp1456
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8qa_mmla_4x16/generic.cpp1614
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8u32_dot_6x16/a55.cpp3690
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8u32_dot_6x16/generic.cpp3664
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8u32_mmla_6x16/generic.cpp4062
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_bf16fp32_dot_8x12/generic.cpp80
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_bf16fp32_mmla_8x12/a510.cpp114
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_bf16fp32_mmla_8x12/generic.cpp134
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_s8s32_mmla_8x12/a510.cpp114
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_s8s32_mmla_8x12/generic.cpp209
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_u8u32_mmla_8x12/a510.cpp114
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_u8u32_mmla_8x12/generic.cpp134
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sme2_gemv_bf16fp32_dot_16VL/generic.cpp558
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sme2_gemv_fp32_mla_16VL/generic.cpp558
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sme2_gemv_fp32bf16fp32_dot_16VL/generic.cpp592
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sme2_gemv_s8qa_dot_16VL/generic.cpp600
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sme2_gemv_u8qa_dot_16VL/generic.cpp600
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_bf16fp32_mopa_1VLx4VL/generic.cpp322
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_bf16fp32_mopa_2VLx2VL/generic.cpp416
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_bf16fp32_mopa_4VLx1VL/generic.cpp610
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp32_mopa_1VLx4VL/generic.cpp318
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp32_mopa_2VLx2VL/generic.cpp412
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp32_mopa_4VLx1VL/generic.cpp606
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8q_mopa_1VLx4VL/generic.cpp274
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8q_mopa_2VLx2VL/generic.cpp342
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8q_mopa_4VLx1VL/generic.cpp444
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8s32_mopa_1VLx4VL/generic.cpp270
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8s32_mopa_2VLx2VL/generic.cpp316
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8s32_mopa_4VLx1VL/generic.cpp414
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_u8q_mopa_1VLx4VL/generic.cpp274
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_u8q_mopa_2VLx2VL/generic.cpp342
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_u8q_mopa_4VLx1VL/generic.cpp444
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_bf16fp32_mmla_6x4VL/generic.cpp1888
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp16_mla_6x4VL/a64fx.cpp1508
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp16_mla_6x4VL/generic.cpp2542
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp32_mla_6x4VL/a64fx.cpp1508
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp32_mla_6x4VL/generic.cpp1974
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp32bf16fp32_mmla_4x6VL/generic.cpp1244
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_bf16fp32_mmla_8x3VL/generic.cpp122
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_fp16_mla_8x3VL/a64fx.cpp96
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_fp16_mla_8x3VL/generic.cpp98
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_fp32_mla_8x3VL/a64fx.cpp96
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_fp32_mla_8x3VL/generic.cpp98
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_bf16fp32_dot_6x4VL/generic.cpp1698
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_bf16fp32_mmla_6x4VL/generic.cpp1661
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp16_mla_6x4VL/a64fx.cpp1266
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp16_mla_6x4VL/generic.cpp1882
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_6x4VL/a64fx.cpp1266
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_6x4VL/generic.cpp1698
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_8x1VL/a64fx.cpp1250
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_8x1VL/generic.cpp1510
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32bf16fp32_mmla_4x6VL/generic.cpp1024
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32bf16fp32_mmla_6x4VL/generic.cpp1400
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8qa_dot_4x4VL/generic.cpp1020
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8qa_mmla_4x4VL/generic.cpp996
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8qs_dot_6x4VL/generic.cpp1948
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8qs_mmla_6x4VL/generic.cpp1878
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8s32_dot_6x4VL/a64fx.cpp1134
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8s32_dot_6x4VL/generic.cpp1562
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8s32_mmla_6x4VL/generic.cpp1518
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8qa_dot_4x4VL/generic.cpp1020
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8qa_mmla_4x4VL/generic.cpp996
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8u32_dot_6x4VL/a64fx.cpp1134
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8u32_dot_6x4VL/generic.cpp1562
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8u32_mmla_6x4VL/generic.cpp1518
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_bf16fp32_dot_8x3VL/generic.cpp78
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_bf16fp32_mmla_8x3VL/generic.cpp140
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp16_mla_8x3VL/a64fx.cpp70
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp16_mla_8x3VL/generic.cpp76
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp32_mla_8x3VL/a64fx.cpp70
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp32_mla_8x3VL/generic.cpp78
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_s8s32_dot_8x3VL/a64fx.cpp70
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_s8s32_dot_8x3VL/generic.cpp78
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_s8s32_mmla_8x3VL/generic.cpp130
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_u8u32_dot_8x3VL/a64fx.cpp70
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_u8u32_dot_8x3VL/generic.cpp78
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_u8u32_mmla_8x3VL/generic.cpp130
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_smallK_hybrid_fp32_mla_8x1VL.hpp88
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_smallK_hybrid_fp32_mla_8x1VL/generic.cpp18807
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_smallK_hybrid_s8s32_dot_8x1VL.hpp88
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_smallK_hybrid_s8s32_dot_8x1VL/generic.cpp8747
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_smallK_hybrid_u8u32_dot_8x1VL.hpp88
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_smallK_hybrid_u8u32_dot_8x1VL/generic.cpp8747
-rw-r--r--src/core/NEON/kernels/arm_gemm/rowsum_indirect_s8.cpp1308
-rw-r--r--src/core/NEON/kernels/arm_gemm/rowsum_indirect_u8.cpp1308
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_128.hpp354
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_12_1x4.hpp590
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_12_1x8.hpp452
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_12_2x2.hpp428
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_12_2x4.hpp644
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_12_2x4_fp32bf16.hpp1201
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_12_s8s16.hpp344
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_12_u8u16.hpp344
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_16.hpp106
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_16_1x4.hpp394
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_16_1x8.hpp360
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_16_2x2.hpp266
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_16_2x4.hpp780
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_16_2x4_fp32bf16.hpp656
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_24.hpp283
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_24_2x4_fp32bf16.hpp1303
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_24_bf16fp32.hpp368
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_24_fp16fp32.hpp369
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_32_1x4.hpp728
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_32_2x2.hpp496
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_48.hpp229
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_4_1x16.hpp394
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_4_1x4.hpp398
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_64.hpp247
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_96.hpp319
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_16VL.hpp162
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_16VL_1x4.hpp128
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_16VL_2x2.hpp136
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_16VL_2x2_fp32bf16.hpp200
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_1VL.hpp198
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_1VL_1x4.hpp94
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_1VL_2x2.hpp178
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_1VL_2x2_fp32bf16.hpp200
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_2VL.hpp222
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_2VL_1x4.hpp92
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_2VL_2x2.hpp172
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_2VL_2x2_fp32bf16.hpp224
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_4VL.hpp164
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_4VL_1x4.hpp60
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_4VL_2x2.hpp120
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_4VL_2x2_fp32bf16.hpp164
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_12VL_2x4_fp32bf16.hpp318
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_1VL.hpp152
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_1VL_1x4.hpp386
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_3VL.hpp118
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_3VL_1x4.hpp501
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_3VL_2x2.hpp413
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_4VL.hpp140
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_4VL_1x4.hpp414
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_4VL_2x2.hpp474
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_6VL_1x8.hpp382
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_6VL_2x4.hpp588
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_6VL_2x4_fp32bf16.hpp184
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_6VL_4x2.hpp435
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_8VL.hpp391
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_8VL_1x4.hpp398
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_8VL_1x8.hpp308
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_8VL_2x2.hpp549
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_8VL_2x4.hpp703
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_8VL_2x4_fp32bf16.hpp224
-rw-r--r--src/core/NEON/kernels/convolution/winograd/input_transforms/a64_fp32_6x6.cpp44
-rw-r--r--src/core/NEON/kernels/convolution/winograd/input_transforms/sme_fp32_mla_6x6.cpp18
-rw-r--r--src/core/NEON/kernels/convolution/winograd/input_transforms/sve_fp32_6x6.cpp18
-rw-r--r--src/core/NEON/kernels/convolution/winograd/output_transforms/sme_fp32_mopa_4x4_3x3.cpp90
417 files changed, 197862 insertions, 159431 deletions
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/interleaves/a64_s8q_3x3_dot.cpp b/src/core/NEON/kernels/arm_conv/depthwise/interleaves/a64_s8q_3x3_dot.cpp
index adda78f164..3d3447bf3c 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/interleaves/a64_s8q_3x3_dot.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/interleaves/a64_s8q_3x3_dot.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -51,76 +51,76 @@ size_t interleave_a64_s8q_3x3_dot::get_packed_size(const DepthwiseArgs &args)
void interleave_a64_s8q_3x3_dot::pack_parameters(unsigned int n_channels, void *outptr, const int32_t *bias, const int8_t *weights, const arm_gemm::Requantize32 &qp, size_t ld_weight_col, size_t ld_weight_row)
{
__asm__ __volatile__(
+ "movi v0.16b, #0x0\n"
"cmp %x[ld_weight_col], XZR\n"
+ "movi v31.16b, #0x1\n"
"csel %x[ld_weight_col], %x[ld_weight_col], %x[n_channels], NE\n"
"movi v16.4s, #0x9\n"
- "movi v0.16b, #0x0\n"
- "mov x21, #0x3\n"
- "mul x21, %x[ld_weight_col], x21\n"
- "add x20, %x[qp], %[offsetof_input_offset]\n"
- "ld1r { v31.4s }, [x20]\n"
- "add x20, %x[qp], %[offsetof_weights_offset]\n"
- "ld1r { v30.4s }, [x20]\n"
+ "mov x19, #0x3\n"
"cmp %x[ld_weight_row], XZR\n"
- "mul v30.4s, v30.4s, v31.4s\n"
- "csel %x[ld_weight_row], %x[ld_weight_row], x21, NE\n"
- "lsr x21, %x[n_channels], #0x2\n"
- "movi v29.16b, #0x1\n"
- "mul v30.4s, v30.4s, v16.4s\n"
- "add x25, %x[weights], %x[ld_weight_row]\n"
- "add x20, %x[qp], %[offsetof_per_layer_mul]\n"
- "ld1r { v28.4s }, [x20]\n"
- "add x20, %x[qp], %[offsetof_per_layer_right_shift]\n"
- "ld1r { v27.4s }, [x20]\n"
- "add x24, x25, %x[ld_weight_row]\n"
- "add x23, %x[ld_weight_col], %x[ld_weight_col]\n"
- "mov x22, #0x0\n"
- "cbz x21, 4f\n"
+ "mul x19, %x[ld_weight_col], x19\n"
+ "csel %x[ld_weight_row], %x[ld_weight_row], x19, NE\n"
+ "add x24, %x[weights], %x[ld_weight_row]\n"
+ "add x23, x24, %x[ld_weight_row]\n"
+ "add x22, %x[ld_weight_col], %x[ld_weight_col]\n"
+ "lsr x20, %x[n_channels], #0x2\n"
+ "mov x21, #0x0\n"
+ "add x19, %x[qp], %[offsetof_input_offset]\n"
+ "ld1r { v30.4s }, [x19]\n"
+ "add x19, %x[qp], %[offsetof_weights_offset]\n"
+ "ld1r { v29.4s }, [x19]\n"
+ "mul v29.4s, v29.4s, v30.4s\n"
+ "add x19, %x[qp], %[offsetof_per_layer_mul]\n"
+ "ld1r { v28.4s }, [x19]\n"
+ "mul v29.4s, v29.4s, v16.4s\n"
+ "add x19, %x[qp], %[offsetof_per_layer_right_shift]\n"
+ "ld1r { v27.4s }, [x19]\n"
+ "cbz x20, 4f\n"
"1:" // Loop
"movi v26.4s, #0x0\n"
"cbz %x[bias], 2f\n"
- "ldr q26, [%x[bias], x22]\n"
+ "ldr q26, [%x[bias], x21]\n"
"2:" // Loop: Skip bias load
- "ldr s25, [%x[weights], #0x0]\n"
- "ldr s22, [%x[weights], %x[ld_weight_col]]\n"
- "zip1 v22.16b, v22.16b, v0.16b\n"
- "movi v24.4s, #0x0\n"
- "ldr s20, [%x[weights], x23]\n"
- "ldr s23, [x25, #0x0]\n"
- "zip1 v20.16b, v25.16b, v20.16b\n"
- "zip1 v22.16b, v20.16b, v22.16b\n"
- "ldr s21, [x25, %x[ld_weight_col]]\n"
- "ldr s18, [x25, x23]\n"
- "zip1 v20.16b, v23.16b, v18.16b\n"
- "zip1 v18.16b, v21.16b, v0.16b\n"
- "ldr s17, [x24, #0x0]\n"
- "ldr s19, [x24, %x[ld_weight_col]]\n"
- ".inst 0x4e9697b8 // sdot v24.4s, v29.16b, v22.16b\n"
- "zip1 v18.16b, v20.16b, v18.16b\n"
- "ldr s16, [x24, x23]\n"
- "zip1 v17.16b, v17.16b, v16.16b\n"
- "zip1 v16.16b, v19.16b, v0.16b\n"
- ".inst 0x4e9297b8 // sdot v24.4s, v29.16b, v18.16b\n"
- "zip1 v16.16b, v17.16b, v16.16b\n"
- ".inst 0x4e9097b8 // sdot v24.4s, v29.16b, v16.16b\n"
+ "movi v25.4s, #0x0\n"
+ "ldr s24, [%x[weights], #0x0]\n"
+ "ldr s23, [%x[weights], %x[ld_weight_col]]\n"
+ "zip1 v23.16b, v23.16b, v0.16b\n"
+ "ldr s21, [%x[weights], x22]\n"
"add %x[weights], %x[weights], #0x4\n"
- "add x25, x25, #0x4\n"
- "mls v26.4s, v24.4s, v31.4s\n"
+ "zip1 v21.16b, v24.16b, v21.16b\n"
+ "ldr s22, [x24, #0x0]\n"
+ "ldr s20, [x24, %x[ld_weight_col]]\n"
+ "zip1 v21.16b, v21.16b, v23.16b\n"
+ "ldr s18, [x24, x22]\n"
+ ".inst 0x4e9597f9 // sdot v25.4s, v31.16b, v21.16b\n"
"add x24, x24, #0x4\n"
- "add v26.4s, v26.4s, v30.4s\n"
+ "zip1 v20.16b, v20.16b, v0.16b\n"
+ "ldr s19, [x23, #0x0]\n"
+ "ldr s17, [x23, %x[ld_weight_col]]\n"
+ "zip1 v18.16b, v22.16b, v18.16b\n"
+ "ldr s16, [x23, x22]\n"
+ "zip1 v18.16b, v18.16b, v20.16b\n"
+ "add x23, x23, #0x4\n"
+ ".inst 0x4e9297f9 // sdot v25.4s, v31.16b, v18.16b\n"
+ "zip1 v17.16b, v17.16b, v0.16b\n"
+ "zip1 v16.16b, v19.16b, v16.16b\n"
+ "zip1 v16.16b, v16.16b, v17.16b\n"
+ ".inst 0x4e9097f9 // sdot v25.4s, v31.16b, v16.16b\n"
+ "mls v26.4s, v25.4s, v30.4s\n"
+ "add v26.4s, v26.4s, v29.4s\n"
"str q26, [%x[outptr], #0x0]\n"
- "str q22, [%x[outptr], #0x10]\n"
+ "str q21, [%x[outptr], #0x10]\n"
"str q18, [%x[outptr], #0x20]\n"
"str q16, [%x[outptr], #0x30]\n"
"add %x[outptr], %x[outptr], #0x40\n"
"cbz %x[rq_mul_perchannel], 3f\n"
- "ldr q28, [%x[rq_mul_perchannel], x22]\n"
- "ldr q27, [%x[rq_shift_perchannel], x22]\n"
+ "ldr q28, [%x[rq_mul_perchannel], x21]\n"
+ "ldr q27, [%x[rq_shift_perchannel], x21]\n"
"3:" // Loop: Quantisation parameters: Store
- "subs x21, x21, #0x1\n"
"str q28, [%x[outptr], #0x0]\n"
- "add x22, x22, #0x10\n"
+ "add x21, x21, #0x10\n"
"str q27, [%x[outptr], #0x10]\n"
+ "subs x20, x20, #0x1\n"
"add %x[outptr], %x[outptr], #0x20\n"
"bgt 1b\n"
"tst %x[n_channels], #0x3\n"
@@ -128,113 +128,119 @@ void interleave_a64_s8q_3x3_dot::pack_parameters(unsigned int n_channels, void *
"4:" // Oddments
"movi v26.4s, #0x0\n"
"cbz %x[bias], 7f\n"
- "add %x[bias], %x[bias], x22\n"
+ "add %x[bias], %x[bias], x21\n"
"tbz %x[n_channels], #1, 5f\n"
"ld1 { v26.d }[0], [%x[bias]], #0x8\n"
"tbz %x[n_channels], #0, 6f\n"
"ld1 { v26.s }[2], [%x[bias]], #0x4\n"
"b 6f\n"
"5:" // Oddments: Load bias: Bit 1: Unset
+ "tbz %x[n_channels], #0, 6f\n"
"ld1 { v26.s }[0], [%x[bias]], #0x4\n"
"6:" // Oddments: Load bias: Bit 1: End
+
"7:" // Oddments: Skip bias load
"tbz %x[n_channels], #1, 8f\n"
- "ld1 { v25.h }[0], [%x[weights]]\n"
- "ld1 { v23.h }[0], [x25]\n"
- "add x21, %x[weights], %x[ld_weight_col]\n"
- "add x20, %x[weights], x23\n"
- "ld1 { v22.h }[0], [x21]\n"
- "ld1 { v20.h }[0], [x20]\n"
- "add x21, x25, %x[ld_weight_col]\n"
- "add x20, x25, x23\n"
- "ld1 { v21.h }[0], [x21]\n"
- "ld1 { v18.h }[0], [x20]\n"
- "add x21, x24, %x[ld_weight_col]\n"
- "add x20, x24, x23\n"
- "ld1 { v17.h }[0], [x24]\n"
- "ld1 { v19.h }[0], [x21]\n"
+ "ld1 { v24.h }[0], [%x[weights]]\n"
+ "ld1 { v22.h }[0], [x24]\n"
+ "add x20, %x[weights], %x[ld_weight_col]\n"
+ "ld1 { v19.h }[0], [x23]\n"
+ "add x19, %x[weights], x22\n"
+ "ld1 { v23.h }[0], [x20]\n"
"add %x[weights], %x[weights], #0x2\n"
- "add x25, x25, #0x2\n"
- "ld1 { v16.h }[0], [x20]\n"
+ "ld1 { v21.h }[0], [x19]\n"
+ "add x20, x24, %x[ld_weight_col]\n"
+ "add x19, x24, x22\n"
+ "ld1 { v20.h }[0], [x20]\n"
+ "ld1 { v18.h }[0], [x19]\n"
"add x24, x24, #0x2\n"
+ "add x19, x23, %x[ld_weight_col]\n"
+ "ld1 { v17.h }[0], [x19]\n"
+ "add x19, x23, x22\n"
+ "ld1 { v16.h }[0], [x19]\n"
+ "add x23, x23, #0x2\n"
"tbz %x[n_channels], #0, 9f\n"
- "ld1 { v25.b }[2], [%x[weights]]\n"
- "ld1 { v23.b }[2], [x25]\n"
- "add x21, %x[weights], %x[ld_weight_col]\n"
- "add x20, %x[weights], x23\n"
- "ld1 { v22.b }[2], [x21]\n"
- "ld1 { v20.b }[2], [x20]\n"
- "add x21, x25, %x[ld_weight_col]\n"
- "add x20, x25, x23\n"
- "ld1 { v21.b }[2], [x21]\n"
- "ld1 { v18.b }[2], [x20]\n"
- "add x21, x24, %x[ld_weight_col]\n"
- "add x20, x24, x23\n"
- "ld1 { v17.b }[2], [x24]\n"
- "ld1 { v19.b }[2], [x21]\n"
+ "ld1 { v24.b }[2], [%x[weights]]\n"
+ "ld1 { v22.b }[2], [x24]\n"
+ "add x20, %x[weights], %x[ld_weight_col]\n"
+ "ld1 { v19.b }[2], [x23]\n"
+ "add x19, %x[weights], x22\n"
+ "ld1 { v23.b }[2], [x20]\n"
"add %x[weights], %x[weights], #0x1\n"
- "ld1 { v16.b }[2], [x20]\n"
+ "ld1 { v21.b }[2], [x19]\n"
+ "add x20, x24, %x[ld_weight_col]\n"
+ "add x19, x24, x22\n"
+ "ld1 { v20.b }[2], [x20]\n"
+ "ld1 { v18.b }[2], [x19]\n"
+ "add x20, x23, %x[ld_weight_col]\n"
+ "add x19, x23, x22\n"
+ "ld1 { v17.b }[2], [x20]\n"
+ "ld1 { v16.b }[2], [x19]\n"
"b 9f\n"
"8:" // Oddments: Load weights: Bit 1: Unset
- "ld1 { v25.b }[0], [%x[weights]]\n"
- "ld1 { v23.b }[0], [x25]\n"
- "add x21, %x[weights], %x[ld_weight_col]\n"
- "add x20, %x[weights], x23\n"
- "ld1 { v22.b }[0], [x21]\n"
- "ld1 { v20.b }[0], [x20]\n"
- "add x21, x25, %x[ld_weight_col]\n"
- "add x20, x25, x23\n"
- "ld1 { v21.b }[0], [x21]\n"
- "ld1 { v18.b }[0], [x20]\n"
- "add x21, x24, %x[ld_weight_col]\n"
- "add x20, x24, x23\n"
- "ld1 { v17.b }[0], [x24]\n"
- "ld1 { v19.b }[0], [x21]\n"
+ "tbz %x[n_channels], #0, 9f\n"
+ "ld1 { v24.b }[0], [%x[weights]]\n"
+ "ld1 { v22.b }[0], [x24]\n"
+ "add x20, %x[weights], %x[ld_weight_col]\n"
+ "ld1 { v19.b }[0], [x23]\n"
+ "add x19, %x[weights], x22\n"
+ "ld1 { v23.b }[0], [x20]\n"
"add %x[weights], %x[weights], #0x1\n"
- "ld1 { v16.b }[0], [x20]\n"
+ "ld1 { v21.b }[0], [x19]\n"
+ "add x20, x24, %x[ld_weight_col]\n"
+ "add x19, x24, x22\n"
+ "ld1 { v20.b }[0], [x20]\n"
+ "ld1 { v18.b }[0], [x19]\n"
+ "add x20, x23, %x[ld_weight_col]\n"
+ "add x19, x23, x22\n"
+ "ld1 { v17.b }[0], [x20]\n"
+ "ld1 { v16.b }[0], [x19]\n"
"9:" // Oddments: Load weights: Bit 1: End
- "zip1 v20.16b, v25.16b, v20.16b\n"
- "zip1 v22.16b, v22.16b, v0.16b\n"
- "zip1 v22.16b, v20.16b, v22.16b\n"
- "zip1 v20.16b, v23.16b, v18.16b\n"
- "zip1 v18.16b, v21.16b, v0.16b\n"
- "movi v24.4s, #0x0\n"
- ".inst 0x4e9697b8 // sdot v24.4s, v29.16b, v22.16b\n"
- "zip1 v18.16b, v20.16b, v18.16b\n"
- "zip1 v17.16b, v17.16b, v16.16b\n"
- ".inst 0x4e9297b8 // sdot v24.4s, v29.16b, v18.16b\n"
- "zip1 v16.16b, v19.16b, v0.16b\n"
- "zip1 v16.16b, v17.16b, v16.16b\n"
- ".inst 0x4e9097b8 // sdot v24.4s, v29.16b, v16.16b\n"
- "mls v26.4s, v24.4s, v31.4s\n"
- "add v26.4s, v26.4s, v30.4s\n"
+ "zip1 v21.16b, v24.16b, v21.16b\n"
+ "zip1 v23.16b, v23.16b, v0.16b\n"
+ "zip1 v18.16b, v22.16b, v18.16b\n"
+ "zip1 v20.16b, v20.16b, v0.16b\n"
+ "zip1 v16.16b, v19.16b, v16.16b\n"
+ "zip1 v17.16b, v17.16b, v0.16b\n"
+ "zip1 v21.16b, v21.16b, v23.16b\n"
+ "zip1 v18.16b, v18.16b, v20.16b\n"
+ "zip1 v16.16b, v16.16b, v17.16b\n"
+ "movi v25.4s, #0x0\n"
+ ".inst 0x4e9597f9 // sdot v25.4s, v31.16b, v21.16b\n"
+ ".inst 0x4e9297f9 // sdot v25.4s, v31.16b, v18.16b\n"
+ ".inst 0x4e9097f9 // sdot v25.4s, v31.16b, v16.16b\n"
+ "mls v26.4s, v25.4s, v30.4s\n"
+ "add v26.4s, v26.4s, v29.4s\n"
"str q26, [%x[outptr], #0x0]\n"
- "str q22, [%x[outptr], #0x10]\n"
+ "str q21, [%x[outptr], #0x10]\n"
"str q18, [%x[outptr], #0x20]\n"
"str q16, [%x[outptr], #0x30]\n"
"add %x[outptr], %x[outptr], #0x40\n"
"cbz %x[rq_mul_perchannel], 12f\n"
- "add x21, %x[rq_mul_perchannel], x22\n"
- "add x20, %x[rq_shift_perchannel], x22\n"
+ "add x20, %x[rq_mul_perchannel], x21\n"
+ "add x19, %x[rq_shift_perchannel], x21\n"
"tbz %x[n_channels], #1, 10f\n"
- "ld1 { v28.d }[0], [x21], #0x8\n"
- "ld1 { v27.d }[0], [x20], #0x8\n"
+ "ld1 { v28.d }[0], [x20], #0x8\n"
+ "ld1 { v27.d }[0], [x19], #0x8\n"
"tbz %x[n_channels], #0, 11f\n"
- "ld1 { v28.s }[2], [x21], #0x4\n"
- "ld1 { v27.s }[2], [x20], #0x4\n"
+ "ld1 { v28.s }[2], [x20], #0x4\n"
+ "ld1 { v27.s }[2], [x19], #0x4\n"
"b 11f\n"
"10:" // Oddments: Quantisation parameters: Load quant params: Bit 1: Unset
- "ld1 { v28.s }[0], [x21], #0x4\n"
- "ld1 { v27.s }[0], [x20], #0x4\n"
+ "tbz %x[n_channels], #0, 11f\n"
+ "ld1 { v28.s }[0], [x20], #0x4\n"
+ "ld1 { v27.s }[0], [x19], #0x4\n"
"11:" // Oddments: Quantisation parameters: Load quant params: Bit 1: End
+
"12:" // Oddments: Quantisation parameters: Store
"str q28, [%x[outptr], #0x0]\n"
"str q27, [%x[outptr], #0x10]\n"
"add %x[outptr], %x[outptr], #0x20\n"
"13:" // End
+
: [bias] "+&r" (bias), [ld_weight_col] "+&r" (ld_weight_col), [ld_weight_row] "+&r" (ld_weight_row), [outptr] "+&r" (outptr), [weights] "+&r" (weights)
: [n_channels] "r" (n_channels), [offsetof_input_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_per_layer_mul] "I" (offsetof(arm_gemm::Requantize32, per_layer_mul)), [offsetof_per_layer_right_shift] "I" (offsetof(arm_gemm::Requantize32, per_layer_right_shift)), [offsetof_weights_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [qp] "r" (&qp), [rq_mul_perchannel] "r" (qp.per_channel_muls), [rq_shift_perchannel] "r" (qp.per_channel_right_shifts)
- : "cc", "memory", "v0", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x20", "x21", "x22", "x23", "x24", "x25"
+ : "cc", "memory", "v0", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22", "x23", "x24"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/interleaves/a64_u8q_3x3_dot.cpp b/src/core/NEON/kernels/arm_conv/depthwise/interleaves/a64_u8q_3x3_dot.cpp
index b89886ae0c..a725dcab59 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/interleaves/a64_u8q_3x3_dot.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/interleaves/a64_u8q_3x3_dot.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -51,76 +51,76 @@ size_t interleave_a64_u8q_3x3_dot::get_packed_size(const DepthwiseArgs &args)
void interleave_a64_u8q_3x3_dot::pack_parameters(unsigned int n_channels, void *outptr, const int32_t *bias, const uint8_t *weights, const arm_gemm::Requantize32 &qp, size_t ld_weight_col, size_t ld_weight_row)
{
__asm__ __volatile__(
+ "movi v0.16b, #0x0\n"
"cmp %x[ld_weight_col], XZR\n"
+ "movi v31.16b, #0x1\n"
"csel %x[ld_weight_col], %x[ld_weight_col], %x[n_channels], NE\n"
"movi v16.4s, #0x9\n"
- "movi v0.16b, #0x0\n"
- "mov x21, #0x3\n"
- "mul x21, %x[ld_weight_col], x21\n"
- "add x20, %x[qp], %[offsetof_input_offset]\n"
- "ld1r { v31.4s }, [x20]\n"
- "add x20, %x[qp], %[offsetof_weights_offset]\n"
- "ld1r { v30.4s }, [x20]\n"
+ "mov x19, #0x3\n"
"cmp %x[ld_weight_row], XZR\n"
- "mul v30.4s, v30.4s, v31.4s\n"
- "csel %x[ld_weight_row], %x[ld_weight_row], x21, NE\n"
- "lsr x21, %x[n_channels], #0x2\n"
- "movi v29.16b, #0x1\n"
- "mul v30.4s, v30.4s, v16.4s\n"
- "add x25, %x[weights], %x[ld_weight_row]\n"
- "add x20, %x[qp], %[offsetof_per_layer_mul]\n"
- "ld1r { v28.4s }, [x20]\n"
- "add x20, %x[qp], %[offsetof_per_layer_right_shift]\n"
- "ld1r { v27.4s }, [x20]\n"
- "add x24, x25, %x[ld_weight_row]\n"
- "add x23, %x[ld_weight_col], %x[ld_weight_col]\n"
- "mov x22, #0x0\n"
- "cbz x21, 4f\n"
+ "mul x19, %x[ld_weight_col], x19\n"
+ "csel %x[ld_weight_row], %x[ld_weight_row], x19, NE\n"
+ "add x24, %x[weights], %x[ld_weight_row]\n"
+ "add x23, x24, %x[ld_weight_row]\n"
+ "add x22, %x[ld_weight_col], %x[ld_weight_col]\n"
+ "lsr x20, %x[n_channels], #0x2\n"
+ "mov x21, #0x0\n"
+ "add x19, %x[qp], %[offsetof_input_offset]\n"
+ "ld1r { v30.4s }, [x19]\n"
+ "add x19, %x[qp], %[offsetof_weights_offset]\n"
+ "ld1r { v29.4s }, [x19]\n"
+ "mul v29.4s, v29.4s, v30.4s\n"
+ "add x19, %x[qp], %[offsetof_per_layer_mul]\n"
+ "ld1r { v28.4s }, [x19]\n"
+ "mul v29.4s, v29.4s, v16.4s\n"
+ "add x19, %x[qp], %[offsetof_per_layer_right_shift]\n"
+ "ld1r { v27.4s }, [x19]\n"
+ "cbz x20, 4f\n"
"1:" // Loop
"movi v26.4s, #0x0\n"
"cbz %x[bias], 2f\n"
- "ldr q26, [%x[bias], x22]\n"
+ "ldr q26, [%x[bias], x21]\n"
"2:" // Loop: Skip bias load
- "ldr s25, [%x[weights], #0x0]\n"
- "ldr s22, [%x[weights], %x[ld_weight_col]]\n"
- "zip1 v22.16b, v22.16b, v0.16b\n"
- "movi v24.4s, #0x0\n"
- "ldr s20, [%x[weights], x23]\n"
- "ldr s23, [x25, #0x0]\n"
- "zip1 v20.16b, v25.16b, v20.16b\n"
- "zip1 v22.16b, v20.16b, v22.16b\n"
- "ldr s21, [x25, %x[ld_weight_col]]\n"
- "ldr s18, [x25, x23]\n"
- "zip1 v20.16b, v23.16b, v18.16b\n"
- "zip1 v18.16b, v21.16b, v0.16b\n"
- "ldr s17, [x24, #0x0]\n"
- "ldr s19, [x24, %x[ld_weight_col]]\n"
- ".inst 0x6e9697b8 // udot v24.4s, v29.16b, v22.16b\n"
- "zip1 v18.16b, v20.16b, v18.16b\n"
- "ldr s16, [x24, x23]\n"
- "zip1 v17.16b, v17.16b, v16.16b\n"
- "zip1 v16.16b, v19.16b, v0.16b\n"
- ".inst 0x6e9297b8 // udot v24.4s, v29.16b, v18.16b\n"
- "zip1 v16.16b, v17.16b, v16.16b\n"
- ".inst 0x6e9097b8 // udot v24.4s, v29.16b, v16.16b\n"
+ "movi v25.4s, #0x0\n"
+ "ldr s24, [%x[weights], #0x0]\n"
+ "ldr s23, [%x[weights], %x[ld_weight_col]]\n"
+ "zip1 v23.16b, v23.16b, v0.16b\n"
+ "ldr s21, [%x[weights], x22]\n"
"add %x[weights], %x[weights], #0x4\n"
- "add x25, x25, #0x4\n"
- "mls v26.4s, v24.4s, v31.4s\n"
+ "zip1 v21.16b, v24.16b, v21.16b\n"
+ "ldr s22, [x24, #0x0]\n"
+ "ldr s20, [x24, %x[ld_weight_col]]\n"
+ "zip1 v21.16b, v21.16b, v23.16b\n"
+ "ldr s18, [x24, x22]\n"
+ ".inst 0x6e9597f9 // udot v25.4s, v31.16b, v21.16b\n"
"add x24, x24, #0x4\n"
- "add v26.4s, v26.4s, v30.4s\n"
+ "zip1 v20.16b, v20.16b, v0.16b\n"
+ "ldr s19, [x23, #0x0]\n"
+ "ldr s17, [x23, %x[ld_weight_col]]\n"
+ "zip1 v18.16b, v22.16b, v18.16b\n"
+ "ldr s16, [x23, x22]\n"
+ "zip1 v18.16b, v18.16b, v20.16b\n"
+ "add x23, x23, #0x4\n"
+ ".inst 0x6e9297f9 // udot v25.4s, v31.16b, v18.16b\n"
+ "zip1 v17.16b, v17.16b, v0.16b\n"
+ "zip1 v16.16b, v19.16b, v16.16b\n"
+ "zip1 v16.16b, v16.16b, v17.16b\n"
+ ".inst 0x6e9097f9 // udot v25.4s, v31.16b, v16.16b\n"
+ "mls v26.4s, v25.4s, v30.4s\n"
+ "add v26.4s, v26.4s, v29.4s\n"
"str q26, [%x[outptr], #0x0]\n"
- "str q22, [%x[outptr], #0x10]\n"
+ "str q21, [%x[outptr], #0x10]\n"
"str q18, [%x[outptr], #0x20]\n"
"str q16, [%x[outptr], #0x30]\n"
"add %x[outptr], %x[outptr], #0x40\n"
"cbz %x[rq_mul_perchannel], 3f\n"
- "ldr q28, [%x[rq_mul_perchannel], x22]\n"
- "ldr q27, [%x[rq_shift_perchannel], x22]\n"
+ "ldr q28, [%x[rq_mul_perchannel], x21]\n"
+ "ldr q27, [%x[rq_shift_perchannel], x21]\n"
"3:" // Loop: Quantisation parameters: Store
- "subs x21, x21, #0x1\n"
"str q28, [%x[outptr], #0x0]\n"
- "add x22, x22, #0x10\n"
+ "add x21, x21, #0x10\n"
"str q27, [%x[outptr], #0x10]\n"
+ "subs x20, x20, #0x1\n"
"add %x[outptr], %x[outptr], #0x20\n"
"bgt 1b\n"
"tst %x[n_channels], #0x3\n"
@@ -128,113 +128,119 @@ void interleave_a64_u8q_3x3_dot::pack_parameters(unsigned int n_channels, void *
"4:" // Oddments
"movi v26.4s, #0x0\n"
"cbz %x[bias], 7f\n"
- "add %x[bias], %x[bias], x22\n"
+ "add %x[bias], %x[bias], x21\n"
"tbz %x[n_channels], #1, 5f\n"
"ld1 { v26.d }[0], [%x[bias]], #0x8\n"
"tbz %x[n_channels], #0, 6f\n"
"ld1 { v26.s }[2], [%x[bias]], #0x4\n"
"b 6f\n"
"5:" // Oddments: Load bias: Bit 1: Unset
+ "tbz %x[n_channels], #0, 6f\n"
"ld1 { v26.s }[0], [%x[bias]], #0x4\n"
"6:" // Oddments: Load bias: Bit 1: End
+
"7:" // Oddments: Skip bias load
"tbz %x[n_channels], #1, 8f\n"
- "ld1 { v25.h }[0], [%x[weights]]\n"
- "ld1 { v23.h }[0], [x25]\n"
- "add x21, %x[weights], %x[ld_weight_col]\n"
- "add x20, %x[weights], x23\n"
- "ld1 { v22.h }[0], [x21]\n"
- "ld1 { v20.h }[0], [x20]\n"
- "add x21, x25, %x[ld_weight_col]\n"
- "add x20, x25, x23\n"
- "ld1 { v21.h }[0], [x21]\n"
- "ld1 { v18.h }[0], [x20]\n"
- "add x21, x24, %x[ld_weight_col]\n"
- "add x20, x24, x23\n"
- "ld1 { v17.h }[0], [x24]\n"
- "ld1 { v19.h }[0], [x21]\n"
+ "ld1 { v24.h }[0], [%x[weights]]\n"
+ "ld1 { v22.h }[0], [x24]\n"
+ "add x20, %x[weights], %x[ld_weight_col]\n"
+ "ld1 { v19.h }[0], [x23]\n"
+ "add x19, %x[weights], x22\n"
+ "ld1 { v23.h }[0], [x20]\n"
"add %x[weights], %x[weights], #0x2\n"
- "add x25, x25, #0x2\n"
- "ld1 { v16.h }[0], [x20]\n"
+ "ld1 { v21.h }[0], [x19]\n"
+ "add x20, x24, %x[ld_weight_col]\n"
+ "add x19, x24, x22\n"
+ "ld1 { v20.h }[0], [x20]\n"
+ "ld1 { v18.h }[0], [x19]\n"
"add x24, x24, #0x2\n"
+ "add x19, x23, %x[ld_weight_col]\n"
+ "ld1 { v17.h }[0], [x19]\n"
+ "add x19, x23, x22\n"
+ "ld1 { v16.h }[0], [x19]\n"
+ "add x23, x23, #0x2\n"
"tbz %x[n_channels], #0, 9f\n"
- "ld1 { v25.b }[2], [%x[weights]]\n"
- "ld1 { v23.b }[2], [x25]\n"
- "add x21, %x[weights], %x[ld_weight_col]\n"
- "add x20, %x[weights], x23\n"
- "ld1 { v22.b }[2], [x21]\n"
- "ld1 { v20.b }[2], [x20]\n"
- "add x21, x25, %x[ld_weight_col]\n"
- "add x20, x25, x23\n"
- "ld1 { v21.b }[2], [x21]\n"
- "ld1 { v18.b }[2], [x20]\n"
- "add x21, x24, %x[ld_weight_col]\n"
- "add x20, x24, x23\n"
- "ld1 { v17.b }[2], [x24]\n"
- "ld1 { v19.b }[2], [x21]\n"
+ "ld1 { v24.b }[2], [%x[weights]]\n"
+ "ld1 { v22.b }[2], [x24]\n"
+ "add x20, %x[weights], %x[ld_weight_col]\n"
+ "ld1 { v19.b }[2], [x23]\n"
+ "add x19, %x[weights], x22\n"
+ "ld1 { v23.b }[2], [x20]\n"
"add %x[weights], %x[weights], #0x1\n"
- "ld1 { v16.b }[2], [x20]\n"
+ "ld1 { v21.b }[2], [x19]\n"
+ "add x20, x24, %x[ld_weight_col]\n"
+ "add x19, x24, x22\n"
+ "ld1 { v20.b }[2], [x20]\n"
+ "ld1 { v18.b }[2], [x19]\n"
+ "add x20, x23, %x[ld_weight_col]\n"
+ "add x19, x23, x22\n"
+ "ld1 { v17.b }[2], [x20]\n"
+ "ld1 { v16.b }[2], [x19]\n"
"b 9f\n"
"8:" // Oddments: Load weights: Bit 1: Unset
- "ld1 { v25.b }[0], [%x[weights]]\n"
- "ld1 { v23.b }[0], [x25]\n"
- "add x21, %x[weights], %x[ld_weight_col]\n"
- "add x20, %x[weights], x23\n"
- "ld1 { v22.b }[0], [x21]\n"
- "ld1 { v20.b }[0], [x20]\n"
- "add x21, x25, %x[ld_weight_col]\n"
- "add x20, x25, x23\n"
- "ld1 { v21.b }[0], [x21]\n"
- "ld1 { v18.b }[0], [x20]\n"
- "add x21, x24, %x[ld_weight_col]\n"
- "add x20, x24, x23\n"
- "ld1 { v17.b }[0], [x24]\n"
- "ld1 { v19.b }[0], [x21]\n"
+ "tbz %x[n_channels], #0, 9f\n"
+ "ld1 { v24.b }[0], [%x[weights]]\n"
+ "ld1 { v22.b }[0], [x24]\n"
+ "add x20, %x[weights], %x[ld_weight_col]\n"
+ "ld1 { v19.b }[0], [x23]\n"
+ "add x19, %x[weights], x22\n"
+ "ld1 { v23.b }[0], [x20]\n"
"add %x[weights], %x[weights], #0x1\n"
- "ld1 { v16.b }[0], [x20]\n"
+ "ld1 { v21.b }[0], [x19]\n"
+ "add x20, x24, %x[ld_weight_col]\n"
+ "add x19, x24, x22\n"
+ "ld1 { v20.b }[0], [x20]\n"
+ "ld1 { v18.b }[0], [x19]\n"
+ "add x20, x23, %x[ld_weight_col]\n"
+ "add x19, x23, x22\n"
+ "ld1 { v17.b }[0], [x20]\n"
+ "ld1 { v16.b }[0], [x19]\n"
"9:" // Oddments: Load weights: Bit 1: End
- "zip1 v20.16b, v25.16b, v20.16b\n"
- "zip1 v22.16b, v22.16b, v0.16b\n"
- "zip1 v22.16b, v20.16b, v22.16b\n"
- "zip1 v20.16b, v23.16b, v18.16b\n"
- "zip1 v18.16b, v21.16b, v0.16b\n"
- "movi v24.4s, #0x0\n"
- ".inst 0x6e9697b8 // udot v24.4s, v29.16b, v22.16b\n"
- "zip1 v18.16b, v20.16b, v18.16b\n"
- "zip1 v17.16b, v17.16b, v16.16b\n"
- ".inst 0x6e9297b8 // udot v24.4s, v29.16b, v18.16b\n"
- "zip1 v16.16b, v19.16b, v0.16b\n"
- "zip1 v16.16b, v17.16b, v16.16b\n"
- ".inst 0x6e9097b8 // udot v24.4s, v29.16b, v16.16b\n"
- "mls v26.4s, v24.4s, v31.4s\n"
- "add v26.4s, v26.4s, v30.4s\n"
+ "zip1 v21.16b, v24.16b, v21.16b\n"
+ "zip1 v23.16b, v23.16b, v0.16b\n"
+ "zip1 v18.16b, v22.16b, v18.16b\n"
+ "zip1 v20.16b, v20.16b, v0.16b\n"
+ "zip1 v16.16b, v19.16b, v16.16b\n"
+ "zip1 v17.16b, v17.16b, v0.16b\n"
+ "zip1 v21.16b, v21.16b, v23.16b\n"
+ "zip1 v18.16b, v18.16b, v20.16b\n"
+ "zip1 v16.16b, v16.16b, v17.16b\n"
+ "movi v25.4s, #0x0\n"
+ ".inst 0x6e9597f9 // udot v25.4s, v31.16b, v21.16b\n"
+ ".inst 0x6e9297f9 // udot v25.4s, v31.16b, v18.16b\n"
+ ".inst 0x6e9097f9 // udot v25.4s, v31.16b, v16.16b\n"
+ "mls v26.4s, v25.4s, v30.4s\n"
+ "add v26.4s, v26.4s, v29.4s\n"
"str q26, [%x[outptr], #0x0]\n"
- "str q22, [%x[outptr], #0x10]\n"
+ "str q21, [%x[outptr], #0x10]\n"
"str q18, [%x[outptr], #0x20]\n"
"str q16, [%x[outptr], #0x30]\n"
"add %x[outptr], %x[outptr], #0x40\n"
"cbz %x[rq_mul_perchannel], 12f\n"
- "add x21, %x[rq_mul_perchannel], x22\n"
- "add x20, %x[rq_shift_perchannel], x22\n"
+ "add x20, %x[rq_mul_perchannel], x21\n"
+ "add x19, %x[rq_shift_perchannel], x21\n"
"tbz %x[n_channels], #1, 10f\n"
- "ld1 { v28.d }[0], [x21], #0x8\n"
- "ld1 { v27.d }[0], [x20], #0x8\n"
+ "ld1 { v28.d }[0], [x20], #0x8\n"
+ "ld1 { v27.d }[0], [x19], #0x8\n"
"tbz %x[n_channels], #0, 11f\n"
- "ld1 { v28.s }[2], [x21], #0x4\n"
- "ld1 { v27.s }[2], [x20], #0x4\n"
+ "ld1 { v28.s }[2], [x20], #0x4\n"
+ "ld1 { v27.s }[2], [x19], #0x4\n"
"b 11f\n"
"10:" // Oddments: Quantisation parameters: Load quant params: Bit 1: Unset
- "ld1 { v28.s }[0], [x21], #0x4\n"
- "ld1 { v27.s }[0], [x20], #0x4\n"
+ "tbz %x[n_channels], #0, 11f\n"
+ "ld1 { v28.s }[0], [x20], #0x4\n"
+ "ld1 { v27.s }[0], [x19], #0x4\n"
"11:" // Oddments: Quantisation parameters: Load quant params: Bit 1: End
+
"12:" // Oddments: Quantisation parameters: Store
"str q28, [%x[outptr], #0x0]\n"
"str q27, [%x[outptr], #0x10]\n"
"add %x[outptr], %x[outptr], #0x20\n"
"13:" // End
+
: [bias] "+&r" (bias), [ld_weight_col] "+&r" (ld_weight_col), [ld_weight_row] "+&r" (ld_weight_row), [outptr] "+&r" (outptr), [weights] "+&r" (weights)
: [n_channels] "r" (n_channels), [offsetof_input_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_per_layer_mul] "I" (offsetof(arm_gemm::Requantize32, per_layer_mul)), [offsetof_per_layer_right_shift] "I" (offsetof(arm_gemm::Requantize32, per_layer_right_shift)), [offsetof_weights_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [qp] "r" (&qp), [rq_mul_perchannel] "r" (qp.per_channel_muls), [rq_shift_perchannel] "r" (qp.per_channel_right_shifts)
- : "cc", "memory", "v0", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x20", "x21", "x22", "x23", "x24", "x25"
+ : "cc", "memory", "v0", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22", "x23", "x24"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/interleaves/sve_s8q_3x3_dot.cpp b/src/core/NEON/kernels/arm_conv/depthwise/interleaves/sve_s8q_3x3_dot.cpp
index 0cf8044733..dfb6457ed9 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/interleaves/sve_s8q_3x3_dot.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/interleaves/sve_s8q_3x3_dot.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -51,82 +51,82 @@ size_t interleave_sve_s8q_3x3_dot::get_packed_size(const DepthwiseArgs &args)
void interleave_sve_s8q_3x3_dot::pack_parameters(unsigned int n_channels, void *outptr, const int32_t *bias, const int8_t *weights, const arm_gemm::Requantize32 &qp, size_t ld_weight_col, size_t ld_weight_row)
{
__asm__ __volatile__(
+ "mov z30.b, #0x0\n"
+ "ptrue p2.b\n"
+ "ld1rw { z29.s }, p2/Z, [%x[qp], %[offsetof_input_offset]]\n"
+ "mov z28.b, #0x1\n"
"cmp %x[ld_weight_col], XZR\n"
- "csel %x[ld_weight_col], %x[ld_weight_col], %x[n_channels], NE\n"
"mov z16.s, #0x9\n"
- "mov z28.b, #0x0\n"
- "mov x20, #0x3\n"
- "ptrue p2.b\n"
- "mul x20, %x[ld_weight_col], x20\n"
- "ld1rw { z27.s }, p2/Z, [%x[qp], %[offsetof_input_offset]]\n"
- "ld1rw { z26.s }, p2/Z, [%x[qp], %[offsetof_weights_offset]]\n"
+ "ld1rw { z27.s }, p2/Z, [%x[qp], %[offsetof_weights_offset]]\n"
+ "csel %x[ld_weight_col], %x[ld_weight_col], %x[n_channels], NE\n"
+ "mul z27.s, p2/M, z27.s, z29.s\n"
+ "ld1rw { z26.s }, p2/Z, [%x[qp], %[offsetof_per_layer_mul]]\n"
+ "mov x19, #0x3\n"
+ "mul z27.s, p2/M, z27.s, z16.s\n"
+ "ld1rw { z25.s }, p2/Z, [%x[qp], %[offsetof_per_layer_right_shift]]\n"
+ "mul x19, %x[ld_weight_col], x19\n"
"cmp %x[ld_weight_row], XZR\n"
- "csel %x[ld_weight_row], %x[ld_weight_row], x20, NE\n"
- "mov z25.b, #0x1\n"
- "mul z26.s, p2/M, z26.s, z27.s\n"
- "add x24, %x[weights], %x[ld_weight_row]\n"
- "ld1rw { z24.s }, p2/Z, [%x[qp], %[offsetof_per_layer_mul]]\n"
- "ld1rw { z23.s }, p2/Z, [%x[qp], %[offsetof_per_layer_right_shift]]\n"
- "add x23, x24, %x[ld_weight_row]\n"
- "add x22, %x[ld_weight_col], %x[ld_weight_col]\n"
+ "add x23, %x[ld_weight_col], %x[ld_weight_col]\n"
+ "csel %x[ld_weight_row], %x[ld_weight_row], x19, NE\n"
+ "add x22, %x[weights], %x[ld_weight_row]\n"
+ "add x21, x22, %x[ld_weight_row]\n"
"whilelt p1.s, XZR, %x[n_channels]\n"
- "mov x21, #0x0\n"
- "mul z26.s, p2/M, z26.s, z16.s\n"
+ "mov x20, #0x0\n"
"pfalse p8.b\n"
"cbz %x[bias], 1f\n"
"ptrue p8.s\n"
"1:" // No bias
"2:" // Loop
- "cntp x20, p2, p1.s\n"
- "whilelt p0.b, XZR, x20\n"
- "ld1b { z18.b }, p0/Z, [%x[weights]]\n"
- "ld1b { z17.b }, p0/Z, [%x[weights], %x[ld_weight_col]]\n"
- "ld1b { z16.b }, p0/Z, [%x[weights], x22]\n"
- "zip1 z20.b, z18.b, z16.b\n"
- "zip1 z19.b, z17.b, z28.b\n"
- "ld1b { z18.b }, p0/Z, [x24]\n"
- "ld1b { z17.b }, p0/Z, [x24, %x[ld_weight_col]]\n"
- "ld1b { z16.b }, p0/Z, [x24, x22]\n"
- "zip1 z22.b, z20.b, z19.b\n"
- "zip1 z21.b, z18.b, z16.b\n"
- "zip1 z19.b, z17.b, z28.b\n"
- "mov z20.s, #0x0\n"
- "ld1b { z18.b }, p0/Z, [x23]\n"
- "ld1b { z17.b }, p0/Z, [x23, %x[ld_weight_col]]\n"
- "ld1b { z16.b }, p0/Z, [x23, x22]\n"
- "sdot z20.s, z25.b, z22.b\n"
- "zip1 z19.b, z21.b, z19.b\n"
- "sdot z20.s, z25.b, z19.b\n"
- "zip1 z18.b, z18.b, z16.b\n"
- "zip1 z16.b, z17.b, z28.b\n"
+ "mov z24.s, #0x0\n"
+ "cntp x19, p2, p1.s\n"
"and p0.b, p2/Z, p8.b, p1.b\n"
- "ld1w { z17.s }, p0/Z, [%x[bias], x21, LSL #2]\n"
- "zip1 z16.b, z18.b, z16.b\n"
- "sdot z20.s, z25.b, z16.b\n"
- "mls z17.s, p2/M, z20.s, z27.s\n"
- "add %x[weights], %x[weights], x20\n"
- "add x24, x24, x20\n"
- "add x23, x23, x20\n"
- "add z17.s, z17.s, z26.s\n"
- "st1w { z17.s }, p2, [%x[outptr]]\n"
- "st1b { z22.b }, p2, [%x[outptr], #1, MUL VL]\n"
- "st1b { z19.b }, p2, [%x[outptr], #2, MUL VL]\n"
+ "ld1w { z23.s }, p0/Z, [%x[bias], x20, LSL #2]\n"
+ "whilelt p0.b, XZR, x19\n"
+ "ld1b { z17.b }, p0/Z, [%x[weights]]\n"
+ "ld1b { z16.b }, p0/Z, [%x[weights], %x[ld_weight_col]]\n"
+ "zip1 z18.b, z16.b, z30.b\n"
+ "ld1b { z16.b }, p0/Z, [%x[weights], x23]\n"
+ "add %x[weights], %x[weights], x19\n"
+ "zip1 z16.b, z17.b, z16.b\n"
+ "ld1b { z22.b }, p0/Z, [x22]\n"
+ "ld1b { z17.b }, p0/Z, [x22, %x[ld_weight_col]]\n"
+ "zip1 z21.b, z16.b, z18.b\n"
+ "ld1b { z16.b }, p0/Z, [x22, x23]\n"
+ "sdot z24.s, z28.b, z21.b\n"
+ "add x22, x22, x19\n"
+ "zip1 z18.b, z17.b, z30.b\n"
+ "ld1b { z20.b }, p0/Z, [x21]\n"
+ "ld1b { z19.b }, p0/Z, [x21, %x[ld_weight_col]]\n"
+ "zip1 z17.b, z22.b, z16.b\n"
+ "ld1b { z16.b }, p0/Z, [x21, x23]\n"
+ "zip1 z18.b, z17.b, z18.b\n"
+ "add x21, x21, x19\n"
+ "zip1 z17.b, z19.b, z30.b\n"
+ "sdot z24.s, z28.b, z18.b\n"
+ "zip1 z16.b, z20.b, z16.b\n"
+ "zip1 z16.b, z16.b, z17.b\n"
+ "sdot z24.s, z28.b, z16.b\n"
+ "mls z23.s, p2/M, z24.s, z29.s\n"
+ "add z23.s, z23.s, z27.s\n"
+ "st1w { z23.s }, p2, [%x[outptr]]\n"
+ "st1b { z21.b }, p2, [%x[outptr], #1, MUL VL]\n"
+ "st1b { z18.b }, p2, [%x[outptr], #2, MUL VL]\n"
"st1b { z16.b }, p2, [%x[outptr], #3, MUL VL]\n"
"addvl %x[outptr], %x[outptr], #4\n"
"cbz %x[rq_mul_perchannel], 3f\n"
- "ld1w { z24.s }, p1/Z, [%x[rq_mul_perchannel], x21, LSL #2]\n"
- "ld1w { z23.s }, p1/Z, [%x[rq_shift_perchannel], x21, LSL #2]\n"
+ "ld1w { z26.s }, p1/Z, [%x[rq_mul_perchannel], x20, LSL #2]\n"
+ "ld1w { z25.s }, p1/Z, [%x[rq_shift_perchannel], x20, LSL #2]\n"
"3:" // Loop: Quantisation parameters: Store
- "incw x21\n"
- "whilelt p1.s, x21, %x[n_channels]\n"
- "st1w { z24.s }, p2, [%x[outptr]]\n"
- "st1w { z23.s }, p2, [%x[outptr], #1, MUL VL]\n"
+ "st1w { z26.s }, p2, [%x[outptr]]\n"
+ "incw x20\n"
+ "st1w { z25.s }, p2, [%x[outptr], #1, MUL VL]\n"
+ "whilelt p1.s, x20, %x[n_channels]\n"
"addvl %x[outptr], %x[outptr], #2\n"
"b.any 2b\n"
: [ld_weight_col] "+&r" (ld_weight_col), [ld_weight_row] "+&r" (ld_weight_row), [outptr] "+&r" (outptr), [weights] "+&r" (weights)
: [bias] "r" (bias), [n_channels] "r" (n_channels), [offsetof_input_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_per_layer_mul] "I" (offsetof(arm_gemm::Requantize32, per_layer_mul)), [offsetof_per_layer_right_shift] "I" (offsetof(arm_gemm::Requantize32, per_layer_right_shift)), [offsetof_weights_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [qp] "r" (&qp), [rq_mul_perchannel] "r" (qp.per_channel_muls), [rq_shift_perchannel] "r" (qp.per_channel_right_shifts)
- : "cc", "memory", "p0", "p1", "p2", "p8", "x20", "x21", "x22", "x23", "x24", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28"
+ : "cc", "memory", "p0", "p1", "p2", "p8", "x19", "x20", "x21", "x22", "x23", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/interleaves/sve_u8q_3x3_dot.cpp b/src/core/NEON/kernels/arm_conv/depthwise/interleaves/sve_u8q_3x3_dot.cpp
index e5bc8198f8..6c16bdc2fb 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/interleaves/sve_u8q_3x3_dot.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/interleaves/sve_u8q_3x3_dot.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -51,82 +51,82 @@ size_t interleave_sve_u8q_3x3_dot::get_packed_size(const DepthwiseArgs &args)
void interleave_sve_u8q_3x3_dot::pack_parameters(unsigned int n_channels, void *outptr, const int32_t *bias, const uint8_t *weights, const arm_gemm::Requantize32 &qp, size_t ld_weight_col, size_t ld_weight_row)
{
__asm__ __volatile__(
+ "mov z30.b, #0x0\n"
+ "ptrue p2.b\n"
+ "ld1rw { z29.s }, p2/Z, [%x[qp], %[offsetof_input_offset]]\n"
+ "mov z28.b, #0x1\n"
"cmp %x[ld_weight_col], XZR\n"
- "csel %x[ld_weight_col], %x[ld_weight_col], %x[n_channels], NE\n"
"mov z16.s, #0x9\n"
- "mov z28.b, #0x0\n"
- "mov x20, #0x3\n"
- "ptrue p2.b\n"
- "mul x20, %x[ld_weight_col], x20\n"
- "ld1rw { z27.s }, p2/Z, [%x[qp], %[offsetof_input_offset]]\n"
- "ld1rw { z26.s }, p2/Z, [%x[qp], %[offsetof_weights_offset]]\n"
+ "ld1rw { z27.s }, p2/Z, [%x[qp], %[offsetof_weights_offset]]\n"
+ "csel %x[ld_weight_col], %x[ld_weight_col], %x[n_channels], NE\n"
+ "mul z27.s, p2/M, z27.s, z29.s\n"
+ "ld1rw { z26.s }, p2/Z, [%x[qp], %[offsetof_per_layer_mul]]\n"
+ "mov x19, #0x3\n"
+ "mul z27.s, p2/M, z27.s, z16.s\n"
+ "ld1rw { z25.s }, p2/Z, [%x[qp], %[offsetof_per_layer_right_shift]]\n"
+ "mul x19, %x[ld_weight_col], x19\n"
"cmp %x[ld_weight_row], XZR\n"
- "csel %x[ld_weight_row], %x[ld_weight_row], x20, NE\n"
- "mov z25.b, #0x1\n"
- "mul z26.s, p2/M, z26.s, z27.s\n"
- "add x24, %x[weights], %x[ld_weight_row]\n"
- "ld1rw { z24.s }, p2/Z, [%x[qp], %[offsetof_per_layer_mul]]\n"
- "ld1rw { z23.s }, p2/Z, [%x[qp], %[offsetof_per_layer_right_shift]]\n"
- "add x23, x24, %x[ld_weight_row]\n"
- "add x22, %x[ld_weight_col], %x[ld_weight_col]\n"
+ "add x23, %x[ld_weight_col], %x[ld_weight_col]\n"
+ "csel %x[ld_weight_row], %x[ld_weight_row], x19, NE\n"
+ "add x22, %x[weights], %x[ld_weight_row]\n"
+ "add x21, x22, %x[ld_weight_row]\n"
"whilelt p1.s, XZR, %x[n_channels]\n"
- "mov x21, #0x0\n"
- "mul z26.s, p2/M, z26.s, z16.s\n"
+ "mov x20, #0x0\n"
"pfalse p8.b\n"
"cbz %x[bias], 1f\n"
"ptrue p8.s\n"
"1:" // No bias
"2:" // Loop
- "cntp x20, p2, p1.s\n"
- "whilelt p0.b, XZR, x20\n"
- "ld1b { z18.b }, p0/Z, [%x[weights]]\n"
- "ld1b { z17.b }, p0/Z, [%x[weights], %x[ld_weight_col]]\n"
- "ld1b { z16.b }, p0/Z, [%x[weights], x22]\n"
- "zip1 z20.b, z18.b, z16.b\n"
- "zip1 z19.b, z17.b, z28.b\n"
- "ld1b { z18.b }, p0/Z, [x24]\n"
- "ld1b { z17.b }, p0/Z, [x24, %x[ld_weight_col]]\n"
- "ld1b { z16.b }, p0/Z, [x24, x22]\n"
- "zip1 z22.b, z20.b, z19.b\n"
- "zip1 z21.b, z18.b, z16.b\n"
- "zip1 z19.b, z17.b, z28.b\n"
- "mov z20.s, #0x0\n"
- "ld1b { z18.b }, p0/Z, [x23]\n"
- "ld1b { z17.b }, p0/Z, [x23, %x[ld_weight_col]]\n"
- "ld1b { z16.b }, p0/Z, [x23, x22]\n"
- "udot z20.s, z25.b, z22.b\n"
- "zip1 z19.b, z21.b, z19.b\n"
- "udot z20.s, z25.b, z19.b\n"
- "zip1 z18.b, z18.b, z16.b\n"
- "zip1 z16.b, z17.b, z28.b\n"
+ "mov z24.s, #0x0\n"
+ "cntp x19, p2, p1.s\n"
"and p0.b, p2/Z, p8.b, p1.b\n"
- "ld1w { z17.s }, p0/Z, [%x[bias], x21, LSL #2]\n"
- "zip1 z16.b, z18.b, z16.b\n"
- "udot z20.s, z25.b, z16.b\n"
- "mls z17.s, p2/M, z20.s, z27.s\n"
- "add %x[weights], %x[weights], x20\n"
- "add x24, x24, x20\n"
- "add x23, x23, x20\n"
- "add z17.s, z17.s, z26.s\n"
- "st1w { z17.s }, p2, [%x[outptr]]\n"
- "st1b { z22.b }, p2, [%x[outptr], #1, MUL VL]\n"
- "st1b { z19.b }, p2, [%x[outptr], #2, MUL VL]\n"
+ "ld1w { z23.s }, p0/Z, [%x[bias], x20, LSL #2]\n"
+ "whilelt p0.b, XZR, x19\n"
+ "ld1b { z17.b }, p0/Z, [%x[weights]]\n"
+ "ld1b { z16.b }, p0/Z, [%x[weights], %x[ld_weight_col]]\n"
+ "zip1 z18.b, z16.b, z30.b\n"
+ "ld1b { z16.b }, p0/Z, [%x[weights], x23]\n"
+ "add %x[weights], %x[weights], x19\n"
+ "zip1 z16.b, z17.b, z16.b\n"
+ "ld1b { z22.b }, p0/Z, [x22]\n"
+ "ld1b { z17.b }, p0/Z, [x22, %x[ld_weight_col]]\n"
+ "zip1 z21.b, z16.b, z18.b\n"
+ "ld1b { z16.b }, p0/Z, [x22, x23]\n"
+ "udot z24.s, z28.b, z21.b\n"
+ "add x22, x22, x19\n"
+ "zip1 z18.b, z17.b, z30.b\n"
+ "ld1b { z20.b }, p0/Z, [x21]\n"
+ "ld1b { z19.b }, p0/Z, [x21, %x[ld_weight_col]]\n"
+ "zip1 z17.b, z22.b, z16.b\n"
+ "ld1b { z16.b }, p0/Z, [x21, x23]\n"
+ "zip1 z18.b, z17.b, z18.b\n"
+ "add x21, x21, x19\n"
+ "zip1 z17.b, z19.b, z30.b\n"
+ "udot z24.s, z28.b, z18.b\n"
+ "zip1 z16.b, z20.b, z16.b\n"
+ "zip1 z16.b, z16.b, z17.b\n"
+ "udot z24.s, z28.b, z16.b\n"
+ "mls z23.s, p2/M, z24.s, z29.s\n"
+ "add z23.s, z23.s, z27.s\n"
+ "st1w { z23.s }, p2, [%x[outptr]]\n"
+ "st1b { z21.b }, p2, [%x[outptr], #1, MUL VL]\n"
+ "st1b { z18.b }, p2, [%x[outptr], #2, MUL VL]\n"
"st1b { z16.b }, p2, [%x[outptr], #3, MUL VL]\n"
"addvl %x[outptr], %x[outptr], #4\n"
"cbz %x[rq_mul_perchannel], 3f\n"
- "ld1w { z24.s }, p1/Z, [%x[rq_mul_perchannel], x21, LSL #2]\n"
- "ld1w { z23.s }, p1/Z, [%x[rq_shift_perchannel], x21, LSL #2]\n"
+ "ld1w { z26.s }, p1/Z, [%x[rq_mul_perchannel], x20, LSL #2]\n"
+ "ld1w { z25.s }, p1/Z, [%x[rq_shift_perchannel], x20, LSL #2]\n"
"3:" // Loop: Quantisation parameters: Store
- "incw x21\n"
- "whilelt p1.s, x21, %x[n_channels]\n"
- "st1w { z24.s }, p2, [%x[outptr]]\n"
- "st1w { z23.s }, p2, [%x[outptr], #1, MUL VL]\n"
+ "st1w { z26.s }, p2, [%x[outptr]]\n"
+ "incw x20\n"
+ "st1w { z25.s }, p2, [%x[outptr], #1, MUL VL]\n"
+ "whilelt p1.s, x20, %x[n_channels]\n"
"addvl %x[outptr], %x[outptr], #2\n"
"b.any 2b\n"
: [ld_weight_col] "+&r" (ld_weight_col), [ld_weight_row] "+&r" (ld_weight_row), [outptr] "+&r" (outptr), [weights] "+&r" (weights)
: [bias] "r" (bias), [n_channels] "r" (n_channels), [offsetof_input_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_per_layer_mul] "I" (offsetof(arm_gemm::Requantize32, per_layer_mul)), [offsetof_per_layer_right_shift] "I" (offsetof(arm_gemm::Requantize32, per_layer_right_shift)), [offsetof_weights_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [qp] "r" (&qp), [rq_mul_perchannel] "r" (qp.per_channel_muls), [rq_shift_perchannel] "r" (qp.per_channel_right_shifts)
- : "cc", "memory", "p0", "p1", "p2", "p8", "x20", "x21", "x22", "x23", "x24", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28"
+ : "cc", "memory", "p0", "p1", "p2", "p8", "x19", "x20", "x21", "x22", "x23", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_direct.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_direct.cpp
index f4027df375..a85e44360e 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_direct.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_direct.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -87,198 +87,198 @@ void a64_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst_direct_impl(
);
__asm__ __volatile__(
- "mov x23, #0x0\n"
"mov x22, #0x0\n"
+ "mov x21, #0x0\n"
"1:" // Tile loop
- "str x23, [%x[params_struct], %[offsetof_args_tile_i]]\n"
- "mov x27, #0x2\n"
+ "str x22, [%x[params_struct], %[offsetof_args_tile_i]]\n"
"mov x26, #0x2\n"
- "str x22, [%x[params_struct], %[offsetof_args_tile_j]]\n"
- "ldr x25, [%x[params_struct], %[offsetof_args_ld_input_row]]\n"
- "ldr x24, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
- "mul x21, x23, x25\n" // offset = tile_i * ld_input_row
- "ldr x15, [%x[params_struct], %[offsetof_args_ld_input_col]]\n"
- "ldr x14, [%x[params_struct], %[offsetof_args_ld_output_col]]\n"
- "mul x20, x23, x24\n" // offset = tile_i * ld_output_row
- "mov x23, #0x10\n" // cntb _, ALL, #1
- "madd x21, x22, x15, x21\n" // offset += tile_j * ld_input_col
- "ldr x13, [%x[params_struct], %[offsetof_args_inptr]]\n"
- "lsl x15, x15, #0x1\n"
- "ldr x12, [%x[params_struct], %[offsetof_args_outptr]]\n"
- "madd x20, x22, x14, x20\n" // offset += tile_j * ld_output_col
- "lsr x22, %x[n_channels], #0x3\n"
- "add x11, x15, x15\n"
- "ldr x10, [%x[params_struct], %[offsetof_args_params]]\n"
- "mul x21, x21, x27\n" // offset *= kernel_stride * output_size
- "add x13, x13, x21, LSL #1\n" // inptr[0] += offset * sizeof(__fp16)
- "add x9, x13, x25, LSL #1\n"
- "mul x20, x20, x26\n" // offset *= output_tile_size
- "add x28, x9, x25, LSL #1\n"
- "add x12, x12, x20, LSL #1\n" // outptrs[0] += offset * sizeof(__fp16)
+ "mov x25, #0x2\n"
+ "str x21, [%x[params_struct], %[offsetof_args_tile_j]]\n"
+ "ldr x24, [%x[params_struct], %[offsetof_args_ld_input_row]]\n"
+ "ldr x23, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
+ "mul x20, x22, x24\n" // offset = tile_i * ld_input_row
+ "ldr x14, [%x[params_struct], %[offsetof_args_ld_input_col]]\n"
+ "ldr x13, [%x[params_struct], %[offsetof_args_ld_output_col]]\n"
+ "mul x19, x22, x23\n" // offset = tile_i * ld_output_row
+ "mov x22, #0x10\n" // cntb _, ALL, #1
+ "madd x20, x21, x14, x20\n" // offset += tile_j * ld_input_col
+ "ldr x12, [%x[params_struct], %[offsetof_args_inptr]]\n"
+ "lsl x14, x14, #0x1\n"
+ "ldr x11, [%x[params_struct], %[offsetof_args_outptr]]\n"
+ "madd x19, x21, x13, x19\n" // offset += tile_j * ld_output_col
+ "lsr x21, %x[n_channels], #0x3\n"
+ "add x10, x14, x14\n"
+ "ldr x9, [%x[params_struct], %[offsetof_args_params]]\n"
+ "mul x20, x20, x26\n" // offset *= kernel_stride * output_size
+ "add x12, x12, x20, LSL #1\n" // inptr[0] += offset * sizeof(__fp16)
+ "add x28, x12, x24, LSL #1\n"
+ "mul x19, x19, x25\n" // offset *= output_tile_size
+ "add x27, x28, x24, LSL #1\n"
+ "add x11, x11, x19, LSL #1\n" // outptrs[0] += offset * sizeof(__fp16)
"add x20, %x[params_struct], %[offsetof_args_min]\n"
+ "add x19, %x[params_struct], %[offsetof_args_max]\n"
"ld1r { v18.8h }, [x20]\n"
- "add x20, %x[params_struct], %[offsetof_args_max]\n"
- "ld1r { v17.8h }, [x20]\n"
- "add x27, x28, x25, LSL #1\n"
- "add x26, x11, x15\n"
- "add x25, x12, x24, LSL #1\n"
- "lsl x14, x14, #0x1\n"
- "mov x21, #0x0\n"
- "sub x20, XZR, x23\n"
- "cbz x22, 4f\n"
- "ldr q16, [x10, #0x0]\n"
- "ldr q0, [x10, #0x10]\n"
- "cmp x23, x22, LSL #4\n"
- "ldr q1, [x10, #0x20]\n"
- "ldr q2, [x10, #0x30]\n"
- "ldr q3, [x10, #0x40]\n"
- "ldr q4, [x10, #0x50]\n"
- "ldr q5, [x10, #0x60]\n"
- "ldr q6, [x10, #0x70]\n"
- "ldr q7, [x10, #0x80]\n"
- "ldr q8, [x10, #0x90]\n"
- "add x10, x10, #0xa0\n"
- "ldr q9, [x9, x15]\n"
- "ld1 { v10.8h }, [x13]\n"
- "ldr q11, [x13, x26]\n"
- "ldr q12, [x9, x11]\n"
- "ldr q13, [x28, x15]\n"
+ "ld1r { v17.8h }, [x19]\n"
+ "add x26, x27, x24, LSL #1\n"
+ "add x25, x10, x14\n"
+ "add x24, x11, x23, LSL #1\n"
+ "lsl x13, x13, #0x1\n"
+ "mov x20, #0x0\n"
+ "sub x19, XZR, x22\n"
+ "cbz x21, 4f\n"
+ "ldr q16, [x9, #0x0]\n"
+ "cmp x22, x21, LSL #4\n"
+ "ldr q0, [x9, #0x10]\n"
+ "ldr q1, [x9, #0x20]\n"
+ "ldr q2, [x9, #0x30]\n"
+ "ldr q3, [x9, #0x40]\n"
+ "ldr q4, [x9, #0x50]\n"
+ "ldr q5, [x9, #0x60]\n"
+ "ldr q6, [x9, #0x70]\n"
+ "ldr q7, [x9, #0x80]\n"
+ "ldr q8, [x9, #0x90]\n"
+ "ldr q9, [x28, x14]\n"
+ "add x9, x9, #0xa0\n"
+ "ld1 { v10.8h }, [x12]\n"
+ "ldr q11, [x12, x25]\n"
+ "ldr q12, [x28, x10]\n"
+ "ldr q13, [x27, x14]\n"
"bge 3f\n"
"2:" // Tile loop: Channel loop
"mov v28.16b, v16.16b\n fmla v28.8h, v4.8h, v9.8h\n"
"mov v29.16b, v16.16b\n fmla v29.8h, v3.8h, v9.8h\n"
- "add x23, x23, #0x10\n"
- "cmp x23, x22, LSL #4\n"
+ "add x22, x22, #0x10\n"
+ "cmp x22, x21, LSL #4\n"
"mov v30.16b, v16.16b\n fmla v30.8h, v1.8h, v9.8h\n"
"mov v31.16b, v16.16b\n fmla v31.8h, v0.8h, v9.8h\n"
- "ld1 { v9.8h }, [x27]\n"
- "ldr q16, [x10, #0x0]\n"
+ "ld1 { v9.8h }, [x26]\n"
+ "add x19, x19, #0x10\n"
"fmla v28.8h, v0.8h, v10.8h\n"
- "ldr q10, [x28, x11]\n"
"fmla v29.8h, v2.8h, v11.8h\n"
- "ldr q11, [x27, x26]\n"
+ "ldr q11, [x26, x25]\n"
+ "ldr q10, [x27, x10]\n"
"fmla v30.8h, v2.8h, v12.8h\n"
"fmla v31.8h, v1.8h, v12.8h\n"
"add x20, x20, #0x10\n"
- "add x21, x21, #0x10\n"
+ "ldr q16, [x9, #0x0]\n"
"fmla v28.8h, v5.8h, v12.8h\n"
"fmla v29.8h, v4.8h, v12.8h\n"
- "ldr q12, [x13, x15]\n"
+ "ldr q12, [x12, x14]\n"
"fmla v30.8h, v6.8h, v9.8h\n"
- "ldr q9, [x13, x11]\n"
"fmla v31.8h, v3.8h, v13.8h\n"
- "add x13, x13, #0x10\n"
+ "ldr q9, [x12, x10]\n"
+ "add x12, x12, #0x10\n"
"fmla v28.8h, v7.8h, v13.8h\n"
"fmla v29.8h, v6.8h, v13.8h\n"
"fmla v30.8h, v4.8h, v13.8h\n"
"fmla v31.8h, v8.8h, v11.8h\n"
- "ld1 { v11.8h }, [x9]\n"
+ "ld1 { v11.8h }, [x28]\n"
"fmla v28.8h, v1.8h, v12.8h\n"
"fmla v29.8h, v0.8h, v12.8h\n"
- "ldr q12, [x9, x26]\n"
- "add x9, x9, #0x10\n"
+ "ldr q12, [x28, x25]\n"
+ "add x28, x28, #0x10\n"
"fmla v30.8h, v5.8h, v10.8h\n"
"fmla v31.8h, v4.8h, v10.8h\n"
- "ldr q4, [x10, #0x50]\n"
+ "ldr q4, [x9, #0x50]\n"
"fmla v28.8h, v2.8h, v9.8h\n"
"fmla v29.8h, v1.8h, v9.8h\n"
- "ld1 { v9.8h }, [x28]\n"
- "ldr q1, [x10, #0x20]\n"
+ "ld1 { v9.8h }, [x27]\n"
+ "ldr q1, [x9, #0x20]\n"
"fmla v30.8h, v0.8h, v11.8h\n"
- "ldr q0, [x10, #0x10]\n"
"fmla v31.8h, v2.8h, v12.8h\n"
- "ldr q2, [x10, #0x30]\n"
+ "ldr q0, [x9, #0x10]\n"
+ "ldr q2, [x9, #0x30]\n"
"fmla v28.8h, v8.8h, v10.8h\n"
"fmla v29.8h, v7.8h, v10.8h\n"
- "ldr q10, [x28, x26]\n"
- "add x28, x28, #0x10\n"
- "ldr q13, [x28, x15]\n"
+ "ldr q10, [x27, x25]\n"
+ "add x27, x27, #0x10\n"
"fmla v30.8h, v3.8h, v9.8h\n"
"fmla v31.8h, v5.8h, v10.8h\n"
+ "ldr q13, [x27, x14]\n"
"fmla v28.8h, v3.8h, v11.8h\n"
- "ldr q11, [x27, x15]\n"
- "ldr q3, [x10, #0x40]\n"
+ "ldr q11, [x26, x14]\n"
"fmla v29.8h, v5.8h, v12.8h\n"
- "ldr q12, [x27, x11]\n"
- "ldr q5, [x10, #0x60]\n"
+ "ldr q12, [x26, x10]\n"
"fmla v30.8h, v7.8h, v11.8h\n"
"fmla v31.8h, v6.8h, v11.8h\n"
- "ldr q11, [x13, x26]\n"
+ "add x26, x26, #0x10\n"
+ "ldr q11, [x12, x25]\n"
"fmla v28.8h, v6.8h, v9.8h\n"
- "ldr q9, [x9, x15]\n"
"fmla v29.8h, v8.8h, v10.8h\n"
- "ld1 { v10.8h }, [x13]\n"
- "ldr q6, [x10, #0x70]\n"
+ "fmax v28.8h, v28.8h, v18.8h\n"
+ "ldr q9, [x28, x14]\n"
"fmla v30.8h, v8.8h, v12.8h\n"
"fmla v31.8h, v7.8h, v12.8h\n"
- "ldr q12, [x9, x11]\n"
- "ldr q7, [x10, #0x80]\n"
- "fmax v28.8h, v28.8h, v18.8h\n"
"fmax v29.8h, v29.8h, v18.8h\n"
- "ldr q8, [x10, #0x90]\n"
+ "ld1 { v10.8h }, [x12]\n"
"fmax v30.8h, v30.8h, v18.8h\n"
"fmax v31.8h, v31.8h, v18.8h\n"
- "add x27, x27, #0x10\n"
+ "ldr q12, [x28, x10]\n"
+ "ldr q3, [x9, #0x40]\n"
"fmin v28.8h, v28.8h, v17.8h\n"
"fmin v29.8h, v29.8h, v17.8h\n"
- "st1 { v28.8h }, [x12]\n"
- "add x10, x10, #0xa0\n"
+ "st1 { v28.8h }, [x11]\n"
+ "ldr q5, [x9, #0x60]\n"
"fmin v30.8h, v30.8h, v17.8h\n"
"fmin v31.8h, v31.8h, v17.8h\n"
- "str q29, [x12, x14]\n"
- "add x12, x12, #0x10\n"
- "st1 { v30.8h }, [x25]\n"
- "str q31, [x25, x14]\n"
- "add x25, x25, #0x10\n"
+ "str q29, [x11, x13]\n"
+ "add x11, x11, #0x10\n"
+ "st1 { v30.8h }, [x24]\n"
+ "ldr q6, [x9, #0x70]\n"
+ "ldr q7, [x9, #0x80]\n"
+ "str q31, [x24, x13]\n"
+ "add x24, x24, #0x10\n"
+ "ldr q8, [x9, #0x90]\n"
+ "add x9, x9, #0xa0\n"
"blt 2b\n"
"3:" // Tile loop: Channel tail
"mov v28.16b, v16.16b\n fmla v28.8h, v4.8h, v9.8h\n"
"mov v29.16b, v16.16b\n fmla v29.8h, v3.8h, v9.8h\n"
"mov v30.16b, v16.16b\n fmla v30.8h, v1.8h, v9.8h\n"
"mov v31.16b, v16.16b\n fmla v31.8h, v0.8h, v9.8h\n"
- "ld1 { v9.8h }, [x27]\n"
+ "ld1 { v9.8h }, [x26]\n"
"fmla v28.8h, v0.8h, v10.8h\n"
- "ldr q10, [x28, x11]\n"
"fmla v29.8h, v2.8h, v11.8h\n"
- "ldr q11, [x27, x26]\n"
+ "ldr q11, [x26, x25]\n"
+ "ldr q10, [x27, x10]\n"
"fmla v30.8h, v2.8h, v12.8h\n"
"fmla v31.8h, v1.8h, v12.8h\n"
"fmla v28.8h, v5.8h, v12.8h\n"
"fmla v29.8h, v4.8h, v12.8h\n"
- "ldr q12, [x13, x15]\n"
+ "ldr q12, [x12, x14]\n"
"fmla v30.8h, v6.8h, v9.8h\n"
- "ldr q9, [x13, x11]\n"
"fmla v31.8h, v3.8h, v13.8h\n"
- "add x13, x13, #0x10\n"
+ "ldr q9, [x12, x10]\n"
+ "add x12, x12, #0x10\n"
"fmla v28.8h, v7.8h, v13.8h\n"
"fmla v29.8h, v6.8h, v13.8h\n"
"fmla v30.8h, v4.8h, v13.8h\n"
"fmla v31.8h, v8.8h, v11.8h\n"
- "ld1 { v11.8h }, [x9]\n"
+ "ld1 { v11.8h }, [x28]\n"
"fmla v28.8h, v1.8h, v12.8h\n"
"fmla v29.8h, v0.8h, v12.8h\n"
- "ldr q12, [x9, x26]\n"
- "add x9, x9, #0x10\n"
+ "ldr q12, [x28, x25]\n"
+ "add x28, x28, #0x10\n"
"fmla v30.8h, v5.8h, v10.8h\n"
"fmla v31.8h, v4.8h, v10.8h\n"
"fmla v28.8h, v2.8h, v9.8h\n"
"fmla v29.8h, v1.8h, v9.8h\n"
- "ld1 { v9.8h }, [x28]\n"
+ "ld1 { v9.8h }, [x27]\n"
"fmla v30.8h, v0.8h, v11.8h\n"
"fmla v31.8h, v2.8h, v12.8h\n"
"fmla v28.8h, v8.8h, v10.8h\n"
"fmla v29.8h, v7.8h, v10.8h\n"
- "ldr q10, [x28, x26]\n"
- "add x28, x28, #0x10\n"
+ "ldr q10, [x27, x25]\n"
+ "add x27, x27, #0x10\n"
"fmla v30.8h, v3.8h, v9.8h\n"
"fmla v31.8h, v5.8h, v10.8h\n"
"fmla v28.8h, v3.8h, v11.8h\n"
- "ldr q11, [x27, x15]\n"
+ "ldr q11, [x26, x14]\n"
"fmla v29.8h, v5.8h, v12.8h\n"
- "ldr q12, [x27, x11]\n"
+ "ldr q12, [x26, x10]\n"
"fmla v30.8h, v7.8h, v11.8h\n"
"fmla v31.8h, v6.8h, v11.8h\n"
- "add x27, x27, #0x10\n"
+ "add x26, x26, #0x10\n"
"fmla v28.8h, v6.8h, v9.8h\n"
"fmla v29.8h, v8.8h, v10.8h\n"
"fmax v28.8h, v28.8h, v18.8h\n"
@@ -289,83 +289,83 @@ void a64_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst_direct_impl(
"fmax v31.8h, v31.8h, v18.8h\n"
"fmin v28.8h, v28.8h, v17.8h\n"
"fmin v29.8h, v29.8h, v17.8h\n"
- "st1 { v28.8h }, [x12]\n"
+ "st1 { v28.8h }, [x11]\n"
"fmin v30.8h, v30.8h, v17.8h\n"
"fmin v31.8h, v31.8h, v17.8h\n"
- "str q29, [x12, x14]\n"
- "add x12, x12, #0x10\n"
- "st1 { v30.8h }, [x25]\n"
- "str q31, [x25, x14]\n"
- "add x25, x25, #0x10\n"
+ "str q29, [x11, x13]\n"
+ "add x11, x11, #0x10\n"
+ "st1 { v30.8h }, [x24]\n"
+ "str q31, [x24, x13]\n"
+ "add x24, x24, #0x10\n"
"4:" // Tile loop: Oddments
"tst %x[n_channels], #0x7\n"
"beq 57f\n"
- "ldr q16, [x10, #0x0]\n"
- "ldr q0, [x10, #0x10]\n"
- "add x24, x9, x15\n"
- "add x23, x13, XZR\n"
- "ldr q1, [x10, #0x20]\n"
- "ldr q2, [x10, #0x30]\n"
- "add x22, x13, x26\n"
- "add x21, x9, x11\n"
- "ldr q3, [x10, #0x40]\n"
- "ldr q4, [x10, #0x50]\n"
- "add x20, x28, x15\n"
- "ldr q5, [x10, #0x60]\n"
- "ldr q6, [x10, #0x70]\n"
- "ldr q7, [x10, #0x80]\n"
- "ldr q8, [x10, #0x90]\n"
+ "ldr q16, [x9, #0x0]\n"
+ "ldr q0, [x9, #0x10]\n"
+ "ldr q1, [x9, #0x20]\n"
+ "ldr q2, [x9, #0x30]\n"
+ "add x23, x28, x14\n"
+ "add x22, x12, XZR\n"
+ "ldr q3, [x9, #0x40]\n"
+ "ldr q4, [x9, #0x50]\n"
+ "add x21, x12, x25\n"
+ "add x20, x28, x10\n"
+ "ldr q5, [x9, #0x60]\n"
+ "ldr q6, [x9, #0x70]\n"
+ "add x19, x27, x14\n"
+ "ldr q7, [x9, #0x80]\n"
+ "ldr q8, [x9, #0x90]\n"
"tbz %x[n_channels], #2, 6f\n"
- "ldr d9, [x24], #0x8\n"
- "ldr d10, [x23], #0x8\n"
- "ldr d11, [x22], #0x8\n"
- "ldr d12, [x21], #0x8\n"
- "ldr d13, [x20], #0x8\n"
+ "ldr d9, [x23], #0x8\n"
+ "ldr d10, [x22], #0x8\n"
+ "ldr d11, [x21], #0x8\n"
+ "ldr d12, [x20], #0x8\n"
+ "ldr d13, [x19], #0x8\n"
"tbz %x[n_channels], #1, 5f\n"
- "ld1 { v9.s }[2], [x24], #0x4\n"
- "ld1 { v10.s }[2], [x23], #0x4\n"
- "ld1 { v11.s }[2], [x22], #0x4\n"
- "ld1 { v12.s }[2], [x21], #0x4\n"
- "ld1 { v13.s }[2], [x20], #0x4\n"
+ "ld1 { v9.s }[2], [x23], #0x4\n"
+ "ld1 { v10.s }[2], [x22], #0x4\n"
+ "ld1 { v11.s }[2], [x21], #0x4\n"
+ "ld1 { v12.s }[2], [x20], #0x4\n"
+ "ld1 { v13.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 8f\n"
- "ld1 { v9.h }[6], [x24]\n"
- "ld1 { v10.h }[6], [x23]\n"
- "ld1 { v11.h }[6], [x22]\n"
- "ld1 { v12.h }[6], [x21]\n"
- "ld1 { v13.h }[6], [x20]\n"
+ "ld1 { v9.h }[6], [x23]\n"
+ "ld1 { v10.h }[6], [x22]\n"
+ "ld1 { v11.h }[6], [x21]\n"
+ "ld1 { v12.h }[6], [x20]\n"
+ "ld1 { v13.h }[6], [x19]\n"
"b 8f\n"
"5:" // Tile loop: Oddments: Load inputs: (1, 1), (0, 0), (0, 3), (1, 2), (2, 1): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 8f\n"
- "ld1 { v9.h }[4], [x24]\n"
- "ld1 { v10.h }[4], [x23]\n"
- "ld1 { v11.h }[4], [x22]\n"
- "ld1 { v12.h }[4], [x21]\n"
- "ld1 { v13.h }[4], [x20]\n"
+ "ld1 { v9.h }[4], [x23]\n"
+ "ld1 { v10.h }[4], [x22]\n"
+ "ld1 { v11.h }[4], [x21]\n"
+ "ld1 { v12.h }[4], [x20]\n"
+ "ld1 { v13.h }[4], [x19]\n"
"b 8f\n"
"6:" // Tile loop: Oddments: Load inputs: (1, 1), (0, 0), (0, 3), (1, 2), (2, 1): Bit 2: Unset
"tbz %x[n_channels], #1, 7f\n"
- "ldr s9, [x24], #0x4\n"
- "ldr s10, [x23], #0x4\n"
- "ldr s11, [x22], #0x4\n"
- "ldr s12, [x21], #0x4\n"
- "ldr s13, [x20], #0x4\n"
+ "ldr s9, [x23], #0x4\n"
+ "ldr s10, [x22], #0x4\n"
+ "ldr s11, [x21], #0x4\n"
+ "ldr s12, [x20], #0x4\n"
+ "ldr s13, [x19], #0x4\n"
"tbz %x[n_channels], #0, 8f\n"
- "ld1 { v9.h }[2], [x24]\n"
- "ld1 { v10.h }[2], [x23]\n"
- "ld1 { v11.h }[2], [x22]\n"
- "ld1 { v12.h }[2], [x21]\n"
- "ld1 { v13.h }[2], [x20]\n"
+ "ld1 { v9.h }[2], [x23]\n"
+ "ld1 { v10.h }[2], [x22]\n"
+ "ld1 { v11.h }[2], [x21]\n"
+ "ld1 { v12.h }[2], [x20]\n"
+ "ld1 { v13.h }[2], [x19]\n"
"b 8f\n"
"7:" // Tile loop: Oddments: Load inputs: (1, 1), (0, 0), (0, 3), (1, 2), (2, 1): Bit 2: Unset: Bit 1: Unset
- "ldr h9, [x24, #0x0]\n"
- "ldr h10, [x23, #0x0]\n"
- "ldr h11, [x22, #0x0]\n"
- "ldr h12, [x21, #0x0]\n"
- "ldr h13, [x20, #0x0]\n"
+ "ldr h9, [x23, #0x0]\n"
+ "ldr h10, [x22, #0x0]\n"
+ "ldr h11, [x21, #0x0]\n"
+ "ldr h12, [x20, #0x0]\n"
+ "ldr h13, [x19, #0x0]\n"
"8:" // Tile loop: Oddments: Load inputs: (1, 1), (0, 0), (0, 3), (1, 2), (2, 1): Bit 2: End
"mov v28.16b, v16.16b\n fmla v28.8h, v4.8h, v9.8h\n"
"mov v29.16b, v16.16b\n fmla v29.8h, v3.8h, v9.8h\n"
- "add x20, x27, XZR\n"
+ "add x19, x26, XZR\n"
"mov v30.16b, v16.16b\n fmla v30.8h, v1.8h, v9.8h\n"
"mov v31.16b, v16.16b\n fmla v31.8h, v0.8h, v9.8h\n"
"fmla v28.8h, v0.8h, v10.8h\n"
@@ -375,258 +375,258 @@ void a64_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst_direct_impl(
"fmla v30.8h, v2.8h, v12.8h\n"
"fmla v31.8h, v1.8h, v12.8h\n"
"tbz %x[n_channels], #2, 10f\n"
- "ldr d9, [x20], #0x8\n"
+ "ldr d9, [x19], #0x8\n"
"tbz %x[n_channels], #1, 9f\n"
- "ld1 { v9.s }[2], [x20], #0x4\n"
+ "ld1 { v9.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 12f\n"
- "ld1 { v9.h }[6], [x20]\n"
+ "ld1 { v9.h }[6], [x19]\n"
"b 12f\n"
"9:" // Tile loop: Oddments: Load inputs: (3, 0): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 12f\n"
- "ld1 { v9.h }[4], [x20]\n"
+ "ld1 { v9.h }[4], [x19]\n"
"b 12f\n"
"10:" // Tile loop: Oddments: Load inputs: (3, 0): Bit 2: Unset
"tbz %x[n_channels], #1, 11f\n"
- "ldr s9, [x20], #0x4\n"
+ "ldr s9, [x19], #0x4\n"
"tbz %x[n_channels], #0, 12f\n"
- "ld1 { v9.h }[2], [x20]\n"
+ "ld1 { v9.h }[2], [x19]\n"
"b 12f\n"
"11:" // Tile loop: Oddments: Load inputs: (3, 0): Bit 2: Unset: Bit 1: Unset
- "ldr h9, [x20, #0x0]\n"
+ "ldr h9, [x19, #0x0]\n"
"12:" // Tile loop: Oddments: Load inputs: (3, 0): Bit 2: End
"fmla v30.8h, v6.8h, v9.8h\n"
"fmla v28.8h, v7.8h, v13.8h\n"
- "add x20, x27, x26\n"
+ "add x19, x26, x25\n"
"fmla v29.8h, v6.8h, v13.8h\n"
"fmla v30.8h, v4.8h, v13.8h\n"
"fmla v31.8h, v3.8h, v13.8h\n"
"tbz %x[n_channels], #2, 14f\n"
- "ldr d11, [x20], #0x8\n"
+ "ldr d11, [x19], #0x8\n"
"tbz %x[n_channels], #1, 13f\n"
- "ld1 { v11.s }[2], [x20], #0x4\n"
+ "ld1 { v11.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 16f\n"
- "ld1 { v11.h }[6], [x20]\n"
+ "ld1 { v11.h }[6], [x19]\n"
"b 16f\n"
"13:" // Tile loop: Oddments: Load inputs: (3, 3): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 16f\n"
- "ld1 { v11.h }[4], [x20]\n"
+ "ld1 { v11.h }[4], [x19]\n"
"b 16f\n"
"14:" // Tile loop: Oddments: Load inputs: (3, 3): Bit 2: Unset
"tbz %x[n_channels], #1, 15f\n"
- "ldr s11, [x20], #0x4\n"
+ "ldr s11, [x19], #0x4\n"
"tbz %x[n_channels], #0, 16f\n"
- "ld1 { v11.h }[2], [x20]\n"
+ "ld1 { v11.h }[2], [x19]\n"
"b 16f\n"
"15:" // Tile loop: Oddments: Load inputs: (3, 3): Bit 2: Unset: Bit 1: Unset
- "ldr h11, [x20, #0x0]\n"
+ "ldr h11, [x19, #0x0]\n"
"16:" // Tile loop: Oddments: Load inputs: (3, 3): Bit 2: End
"fmla v31.8h, v8.8h, v11.8h\n"
- "add x20, x13, x15\n"
+ "add x19, x12, x14\n"
"tbz %x[n_channels], #2, 18f\n"
- "ldr d12, [x20], #0x8\n"
+ "ldr d12, [x19], #0x8\n"
"tbz %x[n_channels], #1, 17f\n"
- "ld1 { v12.s }[2], [x20], #0x4\n"
+ "ld1 { v12.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 20f\n"
- "ld1 { v12.h }[6], [x20]\n"
+ "ld1 { v12.h }[6], [x19]\n"
"b 20f\n"
"17:" // Tile loop: Oddments: Load inputs: (0, 1): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 20f\n"
- "ld1 { v12.h }[4], [x20]\n"
+ "ld1 { v12.h }[4], [x19]\n"
"b 20f\n"
"18:" // Tile loop: Oddments: Load inputs: (0, 1): Bit 2: Unset
"tbz %x[n_channels], #1, 19f\n"
- "ldr s12, [x20], #0x4\n"
+ "ldr s12, [x19], #0x4\n"
"tbz %x[n_channels], #0, 20f\n"
- "ld1 { v12.h }[2], [x20]\n"
+ "ld1 { v12.h }[2], [x19]\n"
"b 20f\n"
"19:" // Tile loop: Oddments: Load inputs: (0, 1): Bit 2: Unset: Bit 1: Unset
- "ldr h12, [x20, #0x0]\n"
+ "ldr h12, [x19, #0x0]\n"
"20:" // Tile loop: Oddments: Load inputs: (0, 1): Bit 2: End
"fmla v28.8h, v1.8h, v12.8h\n"
"fmla v29.8h, v0.8h, v12.8h\n"
- "add x20, x13, x11\n"
+ "add x19, x12, x10\n"
"tbz %x[n_channels], #2, 22f\n"
- "ldr d9, [x20], #0x8\n"
+ "ldr d9, [x19], #0x8\n"
"tbz %x[n_channels], #1, 21f\n"
- "ld1 { v9.s }[2], [x20], #0x4\n"
+ "ld1 { v9.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 24f\n"
- "ld1 { v9.h }[6], [x20]\n"
+ "ld1 { v9.h }[6], [x19]\n"
"b 24f\n"
"21:" // Tile loop: Oddments: Load inputs: (0, 2): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 24f\n"
- "ld1 { v9.h }[4], [x20]\n"
+ "ld1 { v9.h }[4], [x19]\n"
"b 24f\n"
"22:" // Tile loop: Oddments: Load inputs: (0, 2): Bit 2: Unset
"tbz %x[n_channels], #1, 23f\n"
- "ldr s9, [x20], #0x4\n"
+ "ldr s9, [x19], #0x4\n"
"tbz %x[n_channels], #0, 24f\n"
- "ld1 { v9.h }[2], [x20]\n"
+ "ld1 { v9.h }[2], [x19]\n"
"b 24f\n"
"23:" // Tile loop: Oddments: Load inputs: (0, 2): Bit 2: Unset: Bit 1: Unset
- "ldr h9, [x20, #0x0]\n"
+ "ldr h9, [x19, #0x0]\n"
"24:" // Tile loop: Oddments: Load inputs: (0, 2): Bit 2: End
"fmla v28.8h, v2.8h, v9.8h\n"
"fmla v29.8h, v1.8h, v9.8h\n"
- "add x20, x28, x11\n"
+ "add x19, x27, x10\n"
"tbz %x[n_channels], #2, 26f\n"
- "ldr d10, [x20], #0x8\n"
+ "ldr d10, [x19], #0x8\n"
"tbz %x[n_channels], #1, 25f\n"
- "ld1 { v10.s }[2], [x20], #0x4\n"
+ "ld1 { v10.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 28f\n"
- "ld1 { v10.h }[6], [x20]\n"
+ "ld1 { v10.h }[6], [x19]\n"
"b 28f\n"
"25:" // Tile loop: Oddments: Load inputs: (2, 2): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 28f\n"
- "ld1 { v10.h }[4], [x20]\n"
+ "ld1 { v10.h }[4], [x19]\n"
"b 28f\n"
"26:" // Tile loop: Oddments: Load inputs: (2, 2): Bit 2: Unset
"tbz %x[n_channels], #1, 27f\n"
- "ldr s10, [x20], #0x4\n"
+ "ldr s10, [x19], #0x4\n"
"tbz %x[n_channels], #0, 28f\n"
- "ld1 { v10.h }[2], [x20]\n"
+ "ld1 { v10.h }[2], [x19]\n"
"b 28f\n"
"27:" // Tile loop: Oddments: Load inputs: (2, 2): Bit 2: Unset: Bit 1: Unset
- "ldr h10, [x20, #0x0]\n"
+ "ldr h10, [x19, #0x0]\n"
"28:" // Tile loop: Oddments: Load inputs: (2, 2): Bit 2: End
"fmla v28.8h, v8.8h, v10.8h\n"
"fmla v29.8h, v7.8h, v10.8h\n"
- "add x20, x9, XZR\n"
+ "add x19, x28, XZR\n"
"fmla v30.8h, v5.8h, v10.8h\n"
"fmla v31.8h, v4.8h, v10.8h\n"
"tbz %x[n_channels], #2, 30f\n"
- "ldr d11, [x20], #0x8\n"
+ "ldr d11, [x19], #0x8\n"
"tbz %x[n_channels], #1, 29f\n"
- "ld1 { v11.s }[2], [x20], #0x4\n"
+ "ld1 { v11.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 32f\n"
- "ld1 { v11.h }[6], [x20]\n"
+ "ld1 { v11.h }[6], [x19]\n"
"b 32f\n"
"29:" // Tile loop: Oddments: Load inputs: (1, 0): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 32f\n"
- "ld1 { v11.h }[4], [x20]\n"
+ "ld1 { v11.h }[4], [x19]\n"
"b 32f\n"
"30:" // Tile loop: Oddments: Load inputs: (1, 0): Bit 2: Unset
"tbz %x[n_channels], #1, 31f\n"
- "ldr s11, [x20], #0x4\n"
+ "ldr s11, [x19], #0x4\n"
"tbz %x[n_channels], #0, 32f\n"
- "ld1 { v11.h }[2], [x20]\n"
+ "ld1 { v11.h }[2], [x19]\n"
"b 32f\n"
"31:" // Tile loop: Oddments: Load inputs: (1, 0): Bit 2: Unset: Bit 1: Unset
- "ldr h11, [x20, #0x0]\n"
+ "ldr h11, [x19, #0x0]\n"
"32:" // Tile loop: Oddments: Load inputs: (1, 0): Bit 2: End
"fmla v28.8h, v3.8h, v11.8h\n"
"fmla v30.8h, v0.8h, v11.8h\n"
- "add x20, x9, x26\n"
+ "add x19, x28, x25\n"
"tbz %x[n_channels], #2, 34f\n"
- "ldr d12, [x20], #0x8\n"
+ "ldr d12, [x19], #0x8\n"
"tbz %x[n_channels], #1, 33f\n"
- "ld1 { v12.s }[2], [x20], #0x4\n"
+ "ld1 { v12.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 36f\n"
- "ld1 { v12.h }[6], [x20]\n"
+ "ld1 { v12.h }[6], [x19]\n"
"b 36f\n"
"33:" // Tile loop: Oddments: Load inputs: (1, 3): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 36f\n"
- "ld1 { v12.h }[4], [x20]\n"
+ "ld1 { v12.h }[4], [x19]\n"
"b 36f\n"
"34:" // Tile loop: Oddments: Load inputs: (1, 3): Bit 2: Unset
"tbz %x[n_channels], #1, 35f\n"
- "ldr s12, [x20], #0x4\n"
+ "ldr s12, [x19], #0x4\n"
"tbz %x[n_channels], #0, 36f\n"
- "ld1 { v12.h }[2], [x20]\n"
+ "ld1 { v12.h }[2], [x19]\n"
"b 36f\n"
"35:" // Tile loop: Oddments: Load inputs: (1, 3): Bit 2: Unset: Bit 1: Unset
- "ldr h12, [x20, #0x0]\n"
+ "ldr h12, [x19, #0x0]\n"
"36:" // Tile loop: Oddments: Load inputs: (1, 3): Bit 2: End
"fmla v29.8h, v5.8h, v12.8h\n"
"fmla v31.8h, v2.8h, v12.8h\n"
- "add x20, x28, XZR\n"
+ "add x19, x27, XZR\n"
"tbz %x[n_channels], #2, 38f\n"
- "ldr d9, [x20], #0x8\n"
+ "ldr d9, [x19], #0x8\n"
"tbz %x[n_channels], #1, 37f\n"
- "ld1 { v9.s }[2], [x20], #0x4\n"
+ "ld1 { v9.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 40f\n"
- "ld1 { v9.h }[6], [x20]\n"
+ "ld1 { v9.h }[6], [x19]\n"
"b 40f\n"
"37:" // Tile loop: Oddments: Load inputs: (2, 0): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 40f\n"
- "ld1 { v9.h }[4], [x20]\n"
+ "ld1 { v9.h }[4], [x19]\n"
"b 40f\n"
"38:" // Tile loop: Oddments: Load inputs: (2, 0): Bit 2: Unset
"tbz %x[n_channels], #1, 39f\n"
- "ldr s9, [x20], #0x4\n"
+ "ldr s9, [x19], #0x4\n"
"tbz %x[n_channels], #0, 40f\n"
- "ld1 { v9.h }[2], [x20]\n"
+ "ld1 { v9.h }[2], [x19]\n"
"b 40f\n"
"39:" // Tile loop: Oddments: Load inputs: (2, 0): Bit 2: Unset: Bit 1: Unset
- "ldr h9, [x20, #0x0]\n"
+ "ldr h9, [x19, #0x0]\n"
"40:" // Tile loop: Oddments: Load inputs: (2, 0): Bit 2: End
"fmla v28.8h, v6.8h, v9.8h\n"
"fmla v30.8h, v3.8h, v9.8h\n"
- "add x20, x28, x26\n"
+ "add x19, x27, x25\n"
"tbz %x[n_channels], #2, 42f\n"
- "ldr d10, [x20], #0x8\n"
+ "ldr d10, [x19], #0x8\n"
"tbz %x[n_channels], #1, 41f\n"
- "ld1 { v10.s }[2], [x20], #0x4\n"
+ "ld1 { v10.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 44f\n"
- "ld1 { v10.h }[6], [x20]\n"
+ "ld1 { v10.h }[6], [x19]\n"
"b 44f\n"
"41:" // Tile loop: Oddments: Load inputs: (2, 3): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 44f\n"
- "ld1 { v10.h }[4], [x20]\n"
+ "ld1 { v10.h }[4], [x19]\n"
"b 44f\n"
"42:" // Tile loop: Oddments: Load inputs: (2, 3): Bit 2: Unset
"tbz %x[n_channels], #1, 43f\n"
- "ldr s10, [x20], #0x4\n"
+ "ldr s10, [x19], #0x4\n"
"tbz %x[n_channels], #0, 44f\n"
- "ld1 { v10.h }[2], [x20]\n"
+ "ld1 { v10.h }[2], [x19]\n"
"b 44f\n"
"43:" // Tile loop: Oddments: Load inputs: (2, 3): Bit 2: Unset: Bit 1: Unset
- "ldr h10, [x20, #0x0]\n"
+ "ldr h10, [x19, #0x0]\n"
"44:" // Tile loop: Oddments: Load inputs: (2, 3): Bit 2: End
"fmla v29.8h, v8.8h, v10.8h\n"
"fmla v31.8h, v5.8h, v10.8h\n"
- "add x20, x27, x15\n"
+ "add x19, x26, x14\n"
"tbz %x[n_channels], #2, 46f\n"
- "ldr d11, [x20], #0x8\n"
+ "ldr d11, [x19], #0x8\n"
"tbz %x[n_channels], #1, 45f\n"
- "ld1 { v11.s }[2], [x20], #0x4\n"
+ "ld1 { v11.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 48f\n"
- "ld1 { v11.h }[6], [x20]\n"
+ "ld1 { v11.h }[6], [x19]\n"
"b 48f\n"
"45:" // Tile loop: Oddments: Load inputs: (3, 1): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 48f\n"
- "ld1 { v11.h }[4], [x20]\n"
+ "ld1 { v11.h }[4], [x19]\n"
"b 48f\n"
"46:" // Tile loop: Oddments: Load inputs: (3, 1): Bit 2: Unset
"tbz %x[n_channels], #1, 47f\n"
- "ldr s11, [x20], #0x4\n"
+ "ldr s11, [x19], #0x4\n"
"tbz %x[n_channels], #0, 48f\n"
- "ld1 { v11.h }[2], [x20]\n"
+ "ld1 { v11.h }[2], [x19]\n"
"b 48f\n"
"47:" // Tile loop: Oddments: Load inputs: (3, 1): Bit 2: Unset: Bit 1: Unset
- "ldr h11, [x20, #0x0]\n"
+ "ldr h11, [x19, #0x0]\n"
"48:" // Tile loop: Oddments: Load inputs: (3, 1): Bit 2: End
"fmla v30.8h, v7.8h, v11.8h\n"
"fmla v31.8h, v6.8h, v11.8h\n"
- "add x20, x27, x11\n"
+ "add x19, x26, x10\n"
"tbz %x[n_channels], #2, 50f\n"
- "ldr d12, [x20], #0x8\n"
+ "ldr d12, [x19], #0x8\n"
"tbz %x[n_channels], #1, 49f\n"
- "ld1 { v12.s }[2], [x20], #0x4\n"
+ "ld1 { v12.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 52f\n"
- "ld1 { v12.h }[6], [x20]\n"
+ "ld1 { v12.h }[6], [x19]\n"
"b 52f\n"
"49:" // Tile loop: Oddments: Load inputs: (3, 2): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 52f\n"
- "ld1 { v12.h }[4], [x20]\n"
+ "ld1 { v12.h }[4], [x19]\n"
"b 52f\n"
"50:" // Tile loop: Oddments: Load inputs: (3, 2): Bit 2: Unset
"tbz %x[n_channels], #1, 51f\n"
- "ldr s12, [x20], #0x4\n"
+ "ldr s12, [x19], #0x4\n"
"tbz %x[n_channels], #0, 52f\n"
- "ld1 { v12.h }[2], [x20]\n"
+ "ld1 { v12.h }[2], [x19]\n"
"b 52f\n"
"51:" // Tile loop: Oddments: Load inputs: (3, 2): Bit 2: Unset: Bit 1: Unset
- "ldr h12, [x20, #0x0]\n"
+ "ldr h12, [x19, #0x0]\n"
"52:" // Tile loop: Oddments: Load inputs: (3, 2): Bit 2: End
"fmla v30.8h, v8.8h, v12.8h\n"
"fmla v31.8h, v7.8h, v12.8h\n"
@@ -639,82 +639,82 @@ void a64_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst_direct_impl(
"fmin v30.8h, v30.8h, v17.8h\n"
"fmin v31.8h, v31.8h, v17.8h\n"
"tbz %x[n_channels], #2, 54f\n"
- "mov x21, x12\n"
- "mov x20, x25\n"
- "st1 { v28.d }[0], [x21], x14\n"
- "st1 { v30.d }[0], [x20], x14\n"
- "add x12, x12, #0x8\n"
- "add x25, x25, #0x8\n"
- "st1 { v29.d }[0], [x21]\n"
- "st1 { v31.d }[0], [x20]\n"
+ "mov x20, x11\n"
+ "mov x19, x24\n"
+ "st1 { v28.d }[0], [x20], x13\n"
+ "add x11, x11, #0x8\n"
+ "add x24, x24, #0x8\n"
+ "st1 { v30.d }[0], [x19], x13\n"
+ "st1 { v29.d }[0], [x20]\n"
+ "st1 { v31.d }[0], [x19]\n"
"tbz %x[n_channels], #1, 53f\n"
- "mov x21, x12\n"
- "mov x20, x25\n"
- "st1 { v28.s }[2], [x21], x14\n"
- "st1 { v30.s }[2], [x20], x14\n"
- "add x12, x12, #0x4\n"
- "add x25, x25, #0x4\n"
- "st1 { v29.s }[2], [x21]\n"
- "st1 { v31.s }[2], [x20]\n"
+ "mov x20, x11\n"
+ "mov x19, x24\n"
+ "st1 { v28.s }[2], [x20], x13\n"
+ "add x11, x11, #0x4\n"
+ "add x24, x24, #0x4\n"
+ "st1 { v30.s }[2], [x19], x13\n"
+ "st1 { v29.s }[2], [x20]\n"
+ "st1 { v31.s }[2], [x19]\n"
"tbz %x[n_channels], #0, 56f\n"
- "mov x21, x12\n"
- "mov x20, x25\n"
- "st1 { v28.h }[6], [x21], x14\n"
- "st1 { v30.h }[6], [x20], x14\n"
- "st1 { v29.h }[6], [x21]\n"
- "st1 { v31.h }[6], [x20]\n"
+ "mov x20, x11\n"
+ "mov x19, x24\n"
+ "st1 { v28.h }[6], [x20], x13\n"
+ "st1 { v30.h }[6], [x19], x13\n"
+ "st1 { v29.h }[6], [x20]\n"
+ "st1 { v31.h }[6], [x19]\n"
"b 56f\n"
"53:" // Tile loop: Oddments: Store: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 56f\n"
- "mov x21, x12\n"
- "mov x20, x25\n"
- "st1 { v28.h }[4], [x21], x14\n"
- "st1 { v30.h }[4], [x20], x14\n"
- "st1 { v29.h }[4], [x21]\n"
- "st1 { v31.h }[4], [x20]\n"
+ "mov x20, x11\n"
+ "mov x19, x24\n"
+ "st1 { v28.h }[4], [x20], x13\n"
+ "st1 { v30.h }[4], [x19], x13\n"
+ "st1 { v29.h }[4], [x20]\n"
+ "st1 { v31.h }[4], [x19]\n"
"b 56f\n"
"54:" // Tile loop: Oddments: Store: Bit 2: Unset
"tbz %x[n_channels], #1, 55f\n"
- "mov x21, x12\n"
- "mov x20, x25\n"
- "st1 { v28.s }[0], [x21], x14\n"
- "st1 { v30.s }[0], [x20], x14\n"
- "add x12, x12, #0x4\n"
- "add x25, x25, #0x4\n"
- "st1 { v29.s }[0], [x21]\n"
- "st1 { v31.s }[0], [x20]\n"
+ "mov x20, x11\n"
+ "mov x19, x24\n"
+ "st1 { v28.s }[0], [x20], x13\n"
+ "st1 { v30.s }[0], [x19], x13\n"
+ "add x11, x11, #0x4\n"
+ "add x24, x24, #0x4\n"
+ "st1 { v29.s }[0], [x20]\n"
+ "st1 { v31.s }[0], [x19]\n"
"tbz %x[n_channels], #0, 56f\n"
- "mov x21, x12\n"
- "mov x20, x25\n"
- "st1 { v28.h }[2], [x21], x14\n"
- "st1 { v30.h }[2], [x20], x14\n"
- "st1 { v29.h }[2], [x21]\n"
- "st1 { v31.h }[2], [x20]\n"
+ "mov x20, x11\n"
+ "mov x19, x24\n"
+ "st1 { v28.h }[2], [x20], x13\n"
+ "st1 { v30.h }[2], [x19], x13\n"
+ "st1 { v29.h }[2], [x20]\n"
+ "st1 { v31.h }[2], [x19]\n"
"b 56f\n"
"55:" // Tile loop: Oddments: Store: Bit 2: Unset: Bit 1: Unset
- "mov x21, x12\n"
- "mov x20, x25\n"
- "st1 { v28.h }[0], [x21], x14\n"
- "st1 { v30.h }[0], [x20], x14\n"
- "st1 { v29.h }[0], [x21]\n"
- "st1 { v31.h }[0], [x20]\n"
+ "mov x20, x11\n"
+ "mov x19, x24\n"
+ "st1 { v28.h }[0], [x20], x13\n"
+ "st1 { v30.h }[0], [x19], x13\n"
+ "st1 { v29.h }[0], [x20]\n"
+ "st1 { v31.h }[0], [x19]\n"
"56:" // Tile loop: Oddments: Store: Bit 2: End
"57:" // Tile loop: End
- "ldr x22, [%x[params_struct], %[offsetof_args_tile_j]]\n"
- "ldr x23, [%x[params_struct], %[offsetof_args_tile_i]]\n"
- "add x22, x22, #0x1\n"
- "add x21, x23, #0x1\n"
- "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
- "cmp x22, x20\n"
- "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
- "csel x23, x23, x21, LT\n"
- "csel x22, x22, XZR, LT\n"
- "cmp x23, x20\n"
+ "ldr x21, [%x[params_struct], %[offsetof_args_tile_j]]\n"
+ "ldr x22, [%x[params_struct], %[offsetof_args_tile_i]]\n"
+ "add x21, x21, #0x1\n"
+ "add x20, x22, #0x1\n"
+ "ldr x19, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
+ "cmp x21, x19\n"
+ "ldr x19, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
+ "csel x22, x22, x20, LT\n"
+ "csel x21, x21, XZR, LT\n"
+ "cmp x22, x19\n"
"blt 1b\n"
:
: [n_channels] "r" ((unsigned long) n_channels), [offsetof_args_inptr] "I" (offsetof(Args, inptr)), [offsetof_args_ld_input_col] "I" (offsetof(Args, ld_input_col)), [offsetof_args_ld_input_row] "I" (offsetof(Args, ld_input_row)), [offsetof_args_ld_output_col] "I" (offsetof(Args, ld_output_col)), [offsetof_args_ld_output_row] "I" (offsetof(Args, ld_output_row)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_n_tile_cols] "I" (offsetof(Args, n_tile_cols)), [offsetof_args_n_tile_rows] "I" (offsetof(Args, n_tile_rows)), [offsetof_args_outptr] "I" (offsetof(Args, outptr)), [offsetof_args_params] "I" (offsetof(Args, params)), [offsetof_args_tile_i] "I" (offsetof(Args, tile_i)), [offsetof_args_tile_j] "I" (offsetof(Args, tile_j)), [params_struct] "r" (&params_struct)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v16", "v17", "v18", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v16", "v17", "v18", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_indirect.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_indirect.cpp
index bea4715313..a0a44997d1 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_indirect.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_indirect.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -83,18 +83,21 @@ void a64_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst_indirect_impl(
"lsr x15, %x[n_channels], #0x3\n"
"ldr x14, [%x[params_struct], %[offsetof_args_params]]\n"
"add x20, %x[params_struct], %[offsetof_args_min]\n"
+ "add x19, %x[params_struct], %[offsetof_args_max]\n"
+ "ldp x13, x12, [x21, #0x0]\n"
+ "ldp x11, x10, [x21, #0x10]\n"
+ "add x9, %x[params_struct], %[offsetof_Args_inptrs]\n"
"ld1r { v18.8h }, [x20]\n"
- "add x20, %x[params_struct], %[offsetof_args_max]\n"
- "ld1r { v17.8h }, [x20]\n"
- "add x13, %x[params_struct], %[offsetof_Args_inptrs]\n"
- "ldp x12, x11, [x21, #0x0]\n"
- "ldp x10, x9, [x21, #0x10]\n"
+ "ld1r { v17.8h }, [x19]\n"
"mov x28, #0x0\n"
"sub x27, XZR, x16\n"
"cbz x15, 3f\n"
+ "ldp x26, x25, [x9, #0x0]\n"
+ "ldp x24, x23, [x9, #0x10]\n"
+ "ldr x22, [x9, #0x20]\n"
+ "cmp x16, x15, LSL #4\n"
"ldr q16, [x14, #0x0]\n"
"ldr q0, [x14, #0x10]\n"
- "cmp x16, x15, LSL #4\n"
"ldr q1, [x14, #0x20]\n"
"ldr q2, [x14, #0x30]\n"
"ldr q3, [x14, #0x40]\n"
@@ -103,165 +106,162 @@ void a64_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst_indirect_impl(
"ldr q6, [x14, #0x70]\n"
"ldr q7, [x14, #0x80]\n"
"ldr q8, [x14, #0x90]\n"
- "add x14, x14, #0xa0\n"
- "ldp x26, x22, [x13, #0x0]\n"
"ldr q9, [x26, x28]\n"
- "ldr q10, [x22, x28]\n"
- "ldp x25, x24, [x13, #0x10]\n"
- "ldr q11, [x25, x28]\n"
- "ldr q12, [x24, x28]\n"
- "ldr x23, [x13, #0x20]\n"
- "ldr q13, [x23, x28]\n"
+ "add x14, x14, #0xa0\n"
+ "ldr q10, [x25, x28]\n"
+ "ldr q11, [x24, x28]\n"
+ "ldr q12, [x23, x28]\n"
+ "ldr q13, [x22, x28]\n"
"bge 2f\n"
"1:" // Channel loop
"mov v28.16b, v16.16b\n fmla v28.8h, v4.8h, v9.8h\n"
"mov v29.16b, v16.16b\n fmla v29.8h, v3.8h, v9.8h\n"
- "ldr x22, [x13, #0x28]\n"
- "ldr x21, [x13, #0x30]\n"
+ "ldr x21, [x9, #0x28]\n"
+ "ldr x20, [x9, #0x30]\n"
"mov v30.16b, v16.16b\n fmla v30.8h, v1.8h, v9.8h\n"
"mov v31.16b, v16.16b\n fmla v31.8h, v0.8h, v9.8h\n"
- "ldr q9, [x22, x28]\n"
- "ldr q16, [x14, #0x0]\n"
+ "ldr q9, [x21, x28]\n"
+ "ldr x19, [x9, #0x38]\n"
"fmla v28.8h, v0.8h, v10.8h\n"
"fmla v29.8h, v2.8h, v11.8h\n"
- "ldr q11, [x21, x28]\n"
- "ldr x20, [x13, #0x38]\n"
+ "ldr q11, [x20, x28]\n"
+ "ldr x25, [x9, #0x48]\n"
"fmla v30.8h, v2.8h, v12.8h\n"
"fmla v31.8h, v1.8h, v12.8h\n"
- "ldr x22, [x13, #0x48]\n"
- "ldr q10, [x22, x28]\n"
+ "ldr x26, [x9, #0x40]\n"
+ "ldr q10, [x25, x28]\n"
"fmla v28.8h, v5.8h, v12.8h\n"
"fmla v29.8h, v4.8h, v12.8h\n"
- "ldr q12, [x20, x28]\n"
- "ldr x26, [x13, #0x40]\n"
+ "ldr q12, [x19, x28]\n"
+ "ldr x24, [x9, #0x50]\n"
"fmla v30.8h, v6.8h, v9.8h\n"
- "ldr q9, [x26, x28]\n"
"fmla v31.8h, v3.8h, v13.8h\n"
- "ldr x25, [x13, #0x50]\n"
+ "ldr q9, [x26, x28]\n"
+ "ldr x23, [x9, #0x58]\n"
"fmla v28.8h, v7.8h, v13.8h\n"
"fmla v29.8h, v6.8h, v13.8h\n"
- "ldr x24, [x13, #0x58]\n"
- "ldr x23, [x13, #0x60]\n"
+ "ldr x22, [x9, #0x60]\n"
+ "ldr x21, [x9, #0x68]\n"
"fmla v30.8h, v4.8h, v13.8h\n"
"fmla v31.8h, v8.8h, v11.8h\n"
- "ldr q11, [x25, x28]\n"
- "ldr x22, [x13, #0x68]\n"
+ "ldr q11, [x24, x28]\n"
+ "ldr x20, [x9, #0x70]\n"
"fmla v28.8h, v1.8h, v12.8h\n"
"fmla v29.8h, v0.8h, v12.8h\n"
- "ldr q12, [x24, x28]\n"
- "ldr x21, [x13, #0x70]\n"
+ "ldr q12, [x23, x28]\n"
+ "ldr x19, [x9, #0x78]\n"
"fmla v30.8h, v5.8h, v10.8h\n"
"fmla v31.8h, v4.8h, v10.8h\n"
- "ldr q4, [x14, #0x50]\n"
- "ldr x20, [x13, #0x78]\n"
+ "ldp x26, x25, [x9, #0x0]\n"
+ "ldp x24, x23, [x9, #0x10]\n"
"fmla v28.8h, v2.8h, v9.8h\n"
"fmla v29.8h, v1.8h, v9.8h\n"
- "ldr q9, [x23, x28]\n"
- "ldr q1, [x14, #0x20]\n"
+ "ldr q9, [x22, x28]\n"
+ "ldr x22, [x9, #0x20]\n"
"fmla v30.8h, v0.8h, v11.8h\n"
- "ldr q0, [x14, #0x10]\n"
"fmla v31.8h, v2.8h, v12.8h\n"
- "ldr q2, [x14, #0x30]\n"
+ "ldr q13, [x22, x16]\n"
+ "add x27, x27, #0x10\n"
"fmla v28.8h, v8.8h, v10.8h\n"
"fmla v29.8h, v7.8h, v10.8h\n"
- "ldr q10, [x22, x28]\n"
- "ldp x26, x22, [x13, #0x0]\n"
+ "ldr q10, [x21, x28]\n"
+ "ldr q16, [x14, #0x0]\n"
"fmla v30.8h, v3.8h, v9.8h\n"
"fmla v31.8h, v5.8h, v10.8h\n"
- "ldp x25, x24, [x13, #0x10]\n"
- "ldr x23, [x13, #0x20]\n"
- "ldr q13, [x23, x16]\n"
+ "ldr q0, [x14, #0x10]\n"
+ "ldr q1, [x14, #0x20]\n"
"fmla v28.8h, v3.8h, v11.8h\n"
- "ldr q11, [x21, x28]\n"
+ "ldr q11, [x20, x28]\n"
"fmla v29.8h, v5.8h, v12.8h\n"
- "ldr q12, [x20, x28]\n"
- "ldr q3, [x14, #0x40]\n"
+ "ldr q12, [x19, x28]\n"
"fmla v30.8h, v7.8h, v11.8h\n"
"fmla v31.8h, v6.8h, v11.8h\n"
- "ldr q11, [x25, x16]\n"
- "ldr q5, [x14, #0x60]\n"
+ "ldr q11, [x24, x16]\n"
+ "add x28, x28, #0x10\n"
"fmla v28.8h, v6.8h, v9.8h\n"
"fmla v29.8h, v8.8h, v10.8h\n"
+ "fmax v28.8h, v28.8h, v18.8h\n"
"ldr q9, [x26, x16]\n"
- "ldr q10, [x22, x16]\n"
"fmla v30.8h, v8.8h, v12.8h\n"
"fmla v31.8h, v7.8h, v12.8h\n"
- "ldr q12, [x24, x16]\n"
- "ldr q6, [x14, #0x70]\n"
- "fmax v28.8h, v28.8h, v18.8h\n"
"fmax v29.8h, v29.8h, v18.8h\n"
- "ldr q7, [x14, #0x80]\n"
- "ldr q8, [x14, #0x90]\n"
+ "ldr q10, [x25, x16]\n"
"fmax v30.8h, v30.8h, v18.8h\n"
"fmax v31.8h, v31.8h, v18.8h\n"
+ "ldr q12, [x23, x16]\n"
"add x16, x16, #0x10\n"
- "add x27, x27, #0x10\n"
+ "cmp x16, x15, LSL #4\n"
"fmin v28.8h, v28.8h, v17.8h\n"
"fmin v29.8h, v29.8h, v17.8h\n"
- "cmp x16, x15, LSL #4\n"
+ "str q28, [x13, x27]\n"
"fmin v30.8h, v30.8h, v17.8h\n"
"fmin v31.8h, v31.8h, v17.8h\n"
- "add x28, x28, #0x10\n"
- "str q28, [x12, x27]\n"
+ "str q29, [x12, x27]\n"
+ "ldr q2, [x14, #0x30]\n"
+ "str q30, [x11, x27]\n"
+ "ldr q3, [x14, #0x40]\n"
+ "ldr q4, [x14, #0x50]\n"
+ "str q31, [x10, x27]\n"
+ "ldr q5, [x14, #0x60]\n"
+ "ldr q6, [x14, #0x70]\n"
+ "ldr q7, [x14, #0x80]\n"
+ "ldr q8, [x14, #0x90]\n"
"add x14, x14, #0xa0\n"
- "str q29, [x11, x27]\n"
- "str q30, [x10, x27]\n"
- "str q31, [x9, x27]\n"
"blt 1b\n"
"2:" // Channel tail
"mov v28.16b, v16.16b\n fmla v28.8h, v4.8h, v9.8h\n"
"mov v29.16b, v16.16b\n fmla v29.8h, v3.8h, v9.8h\n"
- "ldr x22, [x13, #0x28]\n"
- "ldr x21, [x13, #0x30]\n"
+ "ldr x21, [x9, #0x28]\n"
+ "ldr x20, [x9, #0x30]\n"
"mov v30.16b, v16.16b\n fmla v30.8h, v1.8h, v9.8h\n"
"mov v31.16b, v16.16b\n fmla v31.8h, v0.8h, v9.8h\n"
- "ldr q9, [x22, x28]\n"
- "ldr x20, [x13, #0x38]\n"
+ "ldr q9, [x21, x28]\n"
+ "ldr x19, [x9, #0x38]\n"
"fmla v28.8h, v0.8h, v10.8h\n"
"fmla v29.8h, v2.8h, v11.8h\n"
- "ldr q11, [x21, x28]\n"
- "ldr x22, [x13, #0x48]\n"
- "ldr q10, [x22, x28]\n"
+ "ldr q11, [x20, x28]\n"
+ "ldr x25, [x9, #0x48]\n"
"fmla v30.8h, v2.8h, v12.8h\n"
"fmla v31.8h, v1.8h, v12.8h\n"
- "ldr x26, [x13, #0x40]\n"
+ "ldr x26, [x9, #0x40]\n"
+ "ldr q10, [x25, x28]\n"
"fmla v28.8h, v5.8h, v12.8h\n"
"fmla v29.8h, v4.8h, v12.8h\n"
- "ldr q12, [x20, x28]\n"
- "ldr x25, [x13, #0x50]\n"
+ "ldr q12, [x19, x28]\n"
+ "ldr x24, [x9, #0x50]\n"
"fmla v30.8h, v6.8h, v9.8h\n"
- "ldr q9, [x26, x28]\n"
"fmla v31.8h, v3.8h, v13.8h\n"
- "ldr x24, [x13, #0x58]\n"
+ "ldr q9, [x26, x28]\n"
+ "ldr x23, [x9, #0x58]\n"
"fmla v28.8h, v7.8h, v13.8h\n"
"fmla v29.8h, v6.8h, v13.8h\n"
- "ldr x23, [x13, #0x60]\n"
- "ldr x22, [x13, #0x68]\n"
+ "ldr x22, [x9, #0x60]\n"
+ "ldr x21, [x9, #0x68]\n"
"fmla v30.8h, v4.8h, v13.8h\n"
"fmla v31.8h, v8.8h, v11.8h\n"
- "ldr q11, [x25, x28]\n"
- "ldr x21, [x13, #0x70]\n"
+ "ldr q11, [x24, x28]\n"
+ "ldr x20, [x9, #0x70]\n"
"fmla v28.8h, v1.8h, v12.8h\n"
"fmla v29.8h, v0.8h, v12.8h\n"
- "ldr q12, [x24, x28]\n"
- "ldr x20, [x13, #0x78]\n"
+ "ldr q12, [x23, x28]\n"
+ "ldr x19, [x9, #0x78]\n"
"fmla v30.8h, v5.8h, v10.8h\n"
"fmla v31.8h, v4.8h, v10.8h\n"
"add x27, x27, #0x10\n"
"fmla v28.8h, v2.8h, v9.8h\n"
"fmla v29.8h, v1.8h, v9.8h\n"
- "ldr q9, [x23, x28]\n"
+ "ldr q9, [x22, x28]\n"
"fmla v30.8h, v0.8h, v11.8h\n"
"fmla v31.8h, v2.8h, v12.8h\n"
"fmla v28.8h, v8.8h, v10.8h\n"
"fmla v29.8h, v7.8h, v10.8h\n"
- "ldr q10, [x22, x28]\n"
+ "ldr q10, [x21, x28]\n"
"fmla v30.8h, v3.8h, v9.8h\n"
"fmla v31.8h, v5.8h, v10.8h\n"
"fmla v28.8h, v3.8h, v11.8h\n"
- "ldr q11, [x21, x28]\n"
+ "ldr q11, [x20, x28]\n"
"fmla v29.8h, v5.8h, v12.8h\n"
- "ldr q12, [x20, x28]\n"
+ "ldr q12, [x19, x28]\n"
"fmla v30.8h, v7.8h, v11.8h\n"
"fmla v31.8h, v6.8h, v11.8h\n"
"add x28, x28, #0x10\n"
@@ -275,92 +275,92 @@ void a64_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst_indirect_impl(
"fmax v31.8h, v31.8h, v18.8h\n"
"fmin v28.8h, v28.8h, v17.8h\n"
"fmin v29.8h, v29.8h, v17.8h\n"
- "str q28, [x12, x27]\n"
+ "str q28, [x13, x27]\n"
"fmin v30.8h, v30.8h, v17.8h\n"
"fmin v31.8h, v31.8h, v17.8h\n"
- "str q29, [x11, x27]\n"
- "str q30, [x10, x27]\n"
- "str q31, [x9, x27]\n"
+ "str q29, [x12, x27]\n"
+ "str q30, [x11, x27]\n"
+ "str q31, [x10, x27]\n"
"3:" // Oddments
"tst %x[n_channels], #0x7\n"
"beq 56f\n"
- "ldr q16, [x14, #0x0]\n"
- "ldr q0, [x14, #0x10]\n"
"mov x27, x28\n"
+ "ldr x26, [x9, #0x0]\n"
+ "ldr x25, [x9, #0x8]\n"
+ "ldr x24, [x9, #0x10]\n"
+ "add x13, x13, x27\n"
"add x12, x12, x27\n"
- "ldr q1, [x14, #0x20]\n"
- "ldr q2, [x14, #0x30]\n"
+ "ldr x23, [x9, #0x18]\n"
+ "ldr x22, [x9, #0x20]\n"
"add x11, x11, x27\n"
"add x10, x10, x27\n"
+ "ldr q16, [x14, #0x0]\n"
+ "ldr q0, [x14, #0x10]\n"
+ "add x26, x26, x28\n"
+ "add x25, x25, x28\n"
+ "ldr q1, [x14, #0x20]\n"
+ "ldr q2, [x14, #0x30]\n"
+ "add x24, x24, x28\n"
+ "add x23, x23, x28\n"
"ldr q3, [x14, #0x40]\n"
"ldr q4, [x14, #0x50]\n"
- "add x9, x9, x27\n"
+ "add x22, x22, x28\n"
"ldr q5, [x14, #0x60]\n"
"ldr q6, [x14, #0x70]\n"
"ldr q7, [x14, #0x80]\n"
"ldr q8, [x14, #0x90]\n"
- "ldr x24, [x13, #0x0]\n"
- "ldr x23, [x13, #0x8]\n"
- "add x24, x24, x28\n"
- "add x23, x23, x28\n"
- "ldr x22, [x13, #0x10]\n"
- "ldr x21, [x13, #0x18]\n"
- "add x22, x22, x28\n"
- "add x21, x21, x28\n"
- "ldr x20, [x13, #0x20]\n"
- "add x20, x20, x28\n"
"tbz %x[n_channels], #2, 5f\n"
- "ld1 { v9.d }[0], [x24], #0x8\n"
- "ld1 { v10.d }[0], [x23], #0x8\n"
- "ld1 { v11.d }[0], [x22], #0x8\n"
- "ld1 { v12.d }[0], [x21], #0x8\n"
- "ld1 { v13.d }[0], [x20], #0x8\n"
+ "ld1 { v9.d }[0], [x26], #0x8\n"
+ "ld1 { v10.d }[0], [x25], #0x8\n"
+ "ld1 { v11.d }[0], [x24], #0x8\n"
+ "ld1 { v12.d }[0], [x23], #0x8\n"
+ "ld1 { v13.d }[0], [x22], #0x8\n"
"tbz %x[n_channels], #1, 4f\n"
- "ld1 { v9.s }[2], [x24], #0x4\n"
- "ld1 { v10.s }[2], [x23], #0x4\n"
- "ld1 { v11.s }[2], [x22], #0x4\n"
- "ld1 { v12.s }[2], [x21], #0x4\n"
- "ld1 { v13.s }[2], [x20], #0x4\n"
+ "ld1 { v9.s }[2], [x26], #0x4\n"
+ "ld1 { v10.s }[2], [x25], #0x4\n"
+ "ld1 { v11.s }[2], [x24], #0x4\n"
+ "ld1 { v12.s }[2], [x23], #0x4\n"
+ "ld1 { v13.s }[2], [x22], #0x4\n"
"tbz %x[n_channels], #0, 7f\n"
- "ld1 { v9.h }[6], [x24], #0x2\n"
- "ld1 { v10.h }[6], [x23], #0x2\n"
- "ld1 { v11.h }[6], [x22], #0x2\n"
- "ld1 { v12.h }[6], [x21], #0x2\n"
- "ld1 { v13.h }[6], [x20], #0x2\n"
+ "ld1 { v9.h }[6], [x26], #0x2\n"
+ "ld1 { v10.h }[6], [x25], #0x2\n"
+ "ld1 { v11.h }[6], [x24], #0x2\n"
+ "ld1 { v12.h }[6], [x23], #0x2\n"
+ "ld1 { v13.h }[6], [x22], #0x2\n"
"b 7f\n"
"4:" // Oddments: Load inputs (1, 1), (0, 0), (0, 3), (1, 2), (2, 1): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 7f\n"
- "ld1 { v9.h }[4], [x24], #0x2\n"
- "ld1 { v10.h }[4], [x23], #0x2\n"
- "ld1 { v11.h }[4], [x22], #0x2\n"
- "ld1 { v12.h }[4], [x21], #0x2\n"
- "ld1 { v13.h }[4], [x20], #0x2\n"
+ "ld1 { v9.h }[4], [x26], #0x2\n"
+ "ld1 { v10.h }[4], [x25], #0x2\n"
+ "ld1 { v11.h }[4], [x24], #0x2\n"
+ "ld1 { v12.h }[4], [x23], #0x2\n"
+ "ld1 { v13.h }[4], [x22], #0x2\n"
"b 7f\n"
"5:" // Oddments: Load inputs (1, 1), (0, 0), (0, 3), (1, 2), (2, 1): Bit 2: Unset
"tbz %x[n_channels], #1, 6f\n"
- "ld1 { v9.s }[0], [x24], #0x4\n"
- "ld1 { v10.s }[0], [x23], #0x4\n"
- "ld1 { v11.s }[0], [x22], #0x4\n"
- "ld1 { v12.s }[0], [x21], #0x4\n"
- "ld1 { v13.s }[0], [x20], #0x4\n"
+ "ld1 { v9.s }[0], [x26], #0x4\n"
+ "ld1 { v10.s }[0], [x25], #0x4\n"
+ "ld1 { v11.s }[0], [x24], #0x4\n"
+ "ld1 { v12.s }[0], [x23], #0x4\n"
+ "ld1 { v13.s }[0], [x22], #0x4\n"
"tbz %x[n_channels], #0, 7f\n"
- "ld1 { v9.h }[2], [x24], #0x2\n"
- "ld1 { v10.h }[2], [x23], #0x2\n"
- "ld1 { v11.h }[2], [x22], #0x2\n"
- "ld1 { v12.h }[2], [x21], #0x2\n"
- "ld1 { v13.h }[2], [x20], #0x2\n"
+ "ld1 { v9.h }[2], [x26], #0x2\n"
+ "ld1 { v10.h }[2], [x25], #0x2\n"
+ "ld1 { v11.h }[2], [x24], #0x2\n"
+ "ld1 { v12.h }[2], [x23], #0x2\n"
+ "ld1 { v13.h }[2], [x22], #0x2\n"
"b 7f\n"
"6:" // Oddments: Load inputs (1, 1), (0, 0), (0, 3), (1, 2), (2, 1): Bit 2: Unset: Bit 1: Unset
- "ld1 { v9.h }[0], [x24], #0x2\n"
- "ld1 { v10.h }[0], [x23], #0x2\n"
- "ld1 { v11.h }[0], [x22], #0x2\n"
- "ld1 { v12.h }[0], [x21], #0x2\n"
- "ld1 { v13.h }[0], [x20], #0x2\n"
+ "ld1 { v9.h }[0], [x26], #0x2\n"
+ "ld1 { v10.h }[0], [x25], #0x2\n"
+ "ld1 { v11.h }[0], [x24], #0x2\n"
+ "ld1 { v12.h }[0], [x23], #0x2\n"
+ "ld1 { v13.h }[0], [x22], #0x2\n"
"7:" // Oddments: Load inputs (1, 1), (0, 0), (0, 3), (1, 2), (2, 1): Bit 2: End
"mov v28.16b, v16.16b\n fmla v28.8h, v4.8h, v9.8h\n"
"mov v29.16b, v16.16b\n fmla v29.8h, v3.8h, v9.8h\n"
- "ldr x20, [x13, #0x28]\n"
- "add x20, x20, x28\n"
+ "ldr x21, [x9, #0x28]\n"
+ "add x21, x21, x28\n"
"mov v30.16b, v16.16b\n fmla v30.8h, v1.8h, v9.8h\n"
"mov v31.16b, v16.16b\n fmla v31.8h, v0.8h, v9.8h\n"
"fmla v28.8h, v0.8h, v10.8h\n"
@@ -370,27 +370,27 @@ void a64_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst_indirect_impl(
"fmla v30.8h, v2.8h, v12.8h\n"
"fmla v31.8h, v1.8h, v12.8h\n"
"tbz %x[n_channels], #2, 9f\n"
- "ld1 { v9.d }[0], [x20], #0x8\n"
+ "ld1 { v9.d }[0], [x21], #0x8\n"
"tbz %x[n_channels], #1, 8f\n"
- "ld1 { v9.s }[2], [x20], #0x4\n"
+ "ld1 { v9.s }[2], [x21], #0x4\n"
"tbz %x[n_channels], #0, 11f\n"
- "ld1 { v9.h }[6], [x20], #0x2\n"
+ "ld1 { v9.h }[6], [x21], #0x2\n"
"b 11f\n"
"8:" // Oddments: Load input (3, 0): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 11f\n"
- "ld1 { v9.h }[4], [x20], #0x2\n"
+ "ld1 { v9.h }[4], [x21], #0x2\n"
"b 11f\n"
"9:" // Oddments: Load input (3, 0): Bit 2: Unset
"tbz %x[n_channels], #1, 10f\n"
- "ld1 { v9.s }[0], [x20], #0x4\n"
+ "ld1 { v9.s }[0], [x21], #0x4\n"
"tbz %x[n_channels], #0, 11f\n"
- "ld1 { v9.h }[2], [x20], #0x2\n"
+ "ld1 { v9.h }[2], [x21], #0x2\n"
"b 11f\n"
"10:" // Oddments: Load input (3, 0): Bit 2: Unset: Bit 1: Unset
- "ld1 { v9.h }[0], [x20], #0x2\n"
+ "ld1 { v9.h }[0], [x21], #0x2\n"
"11:" // Oddments: Load input (3, 0): Bit 2: End
"fmla v30.8h, v6.8h, v9.8h\n"
- "ldr x20, [x13, #0x30]\n"
+ "ldr x20, [x9, #0x30]\n"
"fmla v28.8h, v7.8h, v13.8h\n"
"add x20, x20, x28\n"
"fmla v29.8h, v6.8h, v13.8h\n"
@@ -416,176 +416,176 @@ void a64_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst_indirect_impl(
"14:" // Oddments: Load input (3, 3): Bit 2: Unset: Bit 1: Unset
"ld1 { v11.h }[0], [x20], #0x2\n"
"15:" // Oddments: Load input (3, 3): Bit 2: End
- "ldr x20, [x13, #0x38]\n"
+ "ldr x19, [x9, #0x38]\n"
"fmla v31.8h, v8.8h, v11.8h\n"
- "add x20, x20, x28\n"
+ "add x19, x19, x28\n"
"tbz %x[n_channels], #2, 17f\n"
- "ld1 { v12.d }[0], [x20], #0x8\n"
+ "ld1 { v12.d }[0], [x19], #0x8\n"
"tbz %x[n_channels], #1, 16f\n"
- "ld1 { v12.s }[2], [x20], #0x4\n"
+ "ld1 { v12.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 19f\n"
- "ld1 { v12.h }[6], [x20], #0x2\n"
+ "ld1 { v12.h }[6], [x19], #0x2\n"
"b 19f\n"
"16:" // Oddments: Load input (0, 1): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 19f\n"
- "ld1 { v12.h }[4], [x20], #0x2\n"
+ "ld1 { v12.h }[4], [x19], #0x2\n"
"b 19f\n"
"17:" // Oddments: Load input (0, 1): Bit 2: Unset
"tbz %x[n_channels], #1, 18f\n"
- "ld1 { v12.s }[0], [x20], #0x4\n"
+ "ld1 { v12.s }[0], [x19], #0x4\n"
"tbz %x[n_channels], #0, 19f\n"
- "ld1 { v12.h }[2], [x20], #0x2\n"
+ "ld1 { v12.h }[2], [x19], #0x2\n"
"b 19f\n"
"18:" // Oddments: Load input (0, 1): Bit 2: Unset: Bit 1: Unset
- "ld1 { v12.h }[0], [x20], #0x2\n"
+ "ld1 { v12.h }[0], [x19], #0x2\n"
"19:" // Oddments: Load input (0, 1): Bit 2: End
- "ldr x20, [x13, #0x40]\n"
+ "ldr x26, [x9, #0x40]\n"
"fmla v28.8h, v1.8h, v12.8h\n"
"fmla v29.8h, v0.8h, v12.8h\n"
- "add x20, x20, x28\n"
+ "add x26, x26, x28\n"
"tbz %x[n_channels], #2, 21f\n"
- "ld1 { v9.d }[0], [x20], #0x8\n"
+ "ld1 { v9.d }[0], [x26], #0x8\n"
"tbz %x[n_channels], #1, 20f\n"
- "ld1 { v9.s }[2], [x20], #0x4\n"
+ "ld1 { v9.s }[2], [x26], #0x4\n"
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v9.h }[6], [x20], #0x2\n"
+ "ld1 { v9.h }[6], [x26], #0x2\n"
"b 23f\n"
"20:" // Oddments: Load input (0, 2): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v9.h }[4], [x20], #0x2\n"
+ "ld1 { v9.h }[4], [x26], #0x2\n"
"b 23f\n"
"21:" // Oddments: Load input (0, 2): Bit 2: Unset
"tbz %x[n_channels], #1, 22f\n"
- "ld1 { v9.s }[0], [x20], #0x4\n"
+ "ld1 { v9.s }[0], [x26], #0x4\n"
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v9.h }[2], [x20], #0x2\n"
+ "ld1 { v9.h }[2], [x26], #0x2\n"
"b 23f\n"
"22:" // Oddments: Load input (0, 2): Bit 2: Unset: Bit 1: Unset
- "ld1 { v9.h }[0], [x20], #0x2\n"
+ "ld1 { v9.h }[0], [x26], #0x2\n"
"23:" // Oddments: Load input (0, 2): Bit 2: End
- "ldr x20, [x13, #0x48]\n"
+ "ldr x25, [x9, #0x48]\n"
"fmla v28.8h, v2.8h, v9.8h\n"
"fmla v29.8h, v1.8h, v9.8h\n"
- "add x20, x20, x28\n"
+ "add x25, x25, x28\n"
"tbz %x[n_channels], #2, 25f\n"
- "ld1 { v10.d }[0], [x20], #0x8\n"
+ "ld1 { v10.d }[0], [x25], #0x8\n"
"tbz %x[n_channels], #1, 24f\n"
- "ld1 { v10.s }[2], [x20], #0x4\n"
+ "ld1 { v10.s }[2], [x25], #0x4\n"
"tbz %x[n_channels], #0, 27f\n"
- "ld1 { v10.h }[6], [x20], #0x2\n"
+ "ld1 { v10.h }[6], [x25], #0x2\n"
"b 27f\n"
"24:" // Oddments: Load input (2, 2): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 27f\n"
- "ld1 { v10.h }[4], [x20], #0x2\n"
+ "ld1 { v10.h }[4], [x25], #0x2\n"
"b 27f\n"
"25:" // Oddments: Load input (2, 2): Bit 2: Unset
"tbz %x[n_channels], #1, 26f\n"
- "ld1 { v10.s }[0], [x20], #0x4\n"
+ "ld1 { v10.s }[0], [x25], #0x4\n"
"tbz %x[n_channels], #0, 27f\n"
- "ld1 { v10.h }[2], [x20], #0x2\n"
+ "ld1 { v10.h }[2], [x25], #0x2\n"
"b 27f\n"
"26:" // Oddments: Load input (2, 2): Bit 2: Unset: Bit 1: Unset
- "ld1 { v10.h }[0], [x20], #0x2\n"
+ "ld1 { v10.h }[0], [x25], #0x2\n"
"27:" // Oddments: Load input (2, 2): Bit 2: End
- "ldr x20, [x13, #0x50]\n"
+ "ldr x24, [x9, #0x50]\n"
"fmla v28.8h, v8.8h, v10.8h\n"
"fmla v29.8h, v7.8h, v10.8h\n"
- "add x20, x20, x28\n"
+ "add x24, x24, x28\n"
"fmla v30.8h, v5.8h, v10.8h\n"
"fmla v31.8h, v4.8h, v10.8h\n"
"tbz %x[n_channels], #2, 29f\n"
- "ld1 { v11.d }[0], [x20], #0x8\n"
+ "ld1 { v11.d }[0], [x24], #0x8\n"
"tbz %x[n_channels], #1, 28f\n"
- "ld1 { v11.s }[2], [x20], #0x4\n"
+ "ld1 { v11.s }[2], [x24], #0x4\n"
"tbz %x[n_channels], #0, 31f\n"
- "ld1 { v11.h }[6], [x20], #0x2\n"
+ "ld1 { v11.h }[6], [x24], #0x2\n"
"b 31f\n"
"28:" // Oddments: Load input (1, 0): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 31f\n"
- "ld1 { v11.h }[4], [x20], #0x2\n"
+ "ld1 { v11.h }[4], [x24], #0x2\n"
"b 31f\n"
"29:" // Oddments: Load input (1, 0): Bit 2: Unset
"tbz %x[n_channels], #1, 30f\n"
- "ld1 { v11.s }[0], [x20], #0x4\n"
+ "ld1 { v11.s }[0], [x24], #0x4\n"
"tbz %x[n_channels], #0, 31f\n"
- "ld1 { v11.h }[2], [x20], #0x2\n"
+ "ld1 { v11.h }[2], [x24], #0x2\n"
"b 31f\n"
"30:" // Oddments: Load input (1, 0): Bit 2: Unset: Bit 1: Unset
- "ld1 { v11.h }[0], [x20], #0x2\n"
+ "ld1 { v11.h }[0], [x24], #0x2\n"
"31:" // Oddments: Load input (1, 0): Bit 2: End
- "ldr x20, [x13, #0x58]\n"
+ "ldr x23, [x9, #0x58]\n"
"fmla v28.8h, v3.8h, v11.8h\n"
"fmla v30.8h, v0.8h, v11.8h\n"
- "add x20, x20, x28\n"
+ "add x23, x23, x28\n"
"tbz %x[n_channels], #2, 33f\n"
- "ld1 { v12.d }[0], [x20], #0x8\n"
+ "ld1 { v12.d }[0], [x23], #0x8\n"
"tbz %x[n_channels], #1, 32f\n"
- "ld1 { v12.s }[2], [x20], #0x4\n"
+ "ld1 { v12.s }[2], [x23], #0x4\n"
"tbz %x[n_channels], #0, 35f\n"
- "ld1 { v12.h }[6], [x20], #0x2\n"
+ "ld1 { v12.h }[6], [x23], #0x2\n"
"b 35f\n"
"32:" // Oddments: Load input (1, 3): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 35f\n"
- "ld1 { v12.h }[4], [x20], #0x2\n"
+ "ld1 { v12.h }[4], [x23], #0x2\n"
"b 35f\n"
"33:" // Oddments: Load input (1, 3): Bit 2: Unset
"tbz %x[n_channels], #1, 34f\n"
- "ld1 { v12.s }[0], [x20], #0x4\n"
+ "ld1 { v12.s }[0], [x23], #0x4\n"
"tbz %x[n_channels], #0, 35f\n"
- "ld1 { v12.h }[2], [x20], #0x2\n"
+ "ld1 { v12.h }[2], [x23], #0x2\n"
"b 35f\n"
"34:" // Oddments: Load input (1, 3): Bit 2: Unset: Bit 1: Unset
- "ld1 { v12.h }[0], [x20], #0x2\n"
+ "ld1 { v12.h }[0], [x23], #0x2\n"
"35:" // Oddments: Load input (1, 3): Bit 2: End
- "ldr x20, [x13, #0x60]\n"
+ "ldr x22, [x9, #0x60]\n"
"fmla v29.8h, v5.8h, v12.8h\n"
"fmla v31.8h, v2.8h, v12.8h\n"
- "add x20, x20, x28\n"
+ "add x22, x22, x28\n"
"tbz %x[n_channels], #2, 37f\n"
- "ld1 { v9.d }[0], [x20], #0x8\n"
+ "ld1 { v9.d }[0], [x22], #0x8\n"
"tbz %x[n_channels], #1, 36f\n"
- "ld1 { v9.s }[2], [x20], #0x4\n"
+ "ld1 { v9.s }[2], [x22], #0x4\n"
"tbz %x[n_channels], #0, 39f\n"
- "ld1 { v9.h }[6], [x20], #0x2\n"
+ "ld1 { v9.h }[6], [x22], #0x2\n"
"b 39f\n"
"36:" // Oddments: Load input (2, 0): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 39f\n"
- "ld1 { v9.h }[4], [x20], #0x2\n"
+ "ld1 { v9.h }[4], [x22], #0x2\n"
"b 39f\n"
"37:" // Oddments: Load input (2, 0): Bit 2: Unset
"tbz %x[n_channels], #1, 38f\n"
- "ld1 { v9.s }[0], [x20], #0x4\n"
+ "ld1 { v9.s }[0], [x22], #0x4\n"
"tbz %x[n_channels], #0, 39f\n"
- "ld1 { v9.h }[2], [x20], #0x2\n"
+ "ld1 { v9.h }[2], [x22], #0x2\n"
"b 39f\n"
"38:" // Oddments: Load input (2, 0): Bit 2: Unset: Bit 1: Unset
- "ld1 { v9.h }[0], [x20], #0x2\n"
+ "ld1 { v9.h }[0], [x22], #0x2\n"
"39:" // Oddments: Load input (2, 0): Bit 2: End
- "ldr x20, [x13, #0x68]\n"
+ "ldr x21, [x9, #0x68]\n"
"fmla v28.8h, v6.8h, v9.8h\n"
"fmla v30.8h, v3.8h, v9.8h\n"
- "add x20, x20, x28\n"
+ "add x21, x21, x28\n"
"tbz %x[n_channels], #2, 41f\n"
- "ld1 { v10.d }[0], [x20], #0x8\n"
+ "ld1 { v10.d }[0], [x21], #0x8\n"
"tbz %x[n_channels], #1, 40f\n"
- "ld1 { v10.s }[2], [x20], #0x4\n"
+ "ld1 { v10.s }[2], [x21], #0x4\n"
"tbz %x[n_channels], #0, 43f\n"
- "ld1 { v10.h }[6], [x20], #0x2\n"
+ "ld1 { v10.h }[6], [x21], #0x2\n"
"b 43f\n"
"40:" // Oddments: Load input (2, 3): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 43f\n"
- "ld1 { v10.h }[4], [x20], #0x2\n"
+ "ld1 { v10.h }[4], [x21], #0x2\n"
"b 43f\n"
"41:" // Oddments: Load input (2, 3): Bit 2: Unset
"tbz %x[n_channels], #1, 42f\n"
- "ld1 { v10.s }[0], [x20], #0x4\n"
+ "ld1 { v10.s }[0], [x21], #0x4\n"
"tbz %x[n_channels], #0, 43f\n"
- "ld1 { v10.h }[2], [x20], #0x2\n"
+ "ld1 { v10.h }[2], [x21], #0x2\n"
"b 43f\n"
"42:" // Oddments: Load input (2, 3): Bit 2: Unset: Bit 1: Unset
- "ld1 { v10.h }[0], [x20], #0x2\n"
+ "ld1 { v10.h }[0], [x21], #0x2\n"
"43:" // Oddments: Load input (2, 3): Bit 2: End
- "ldr x20, [x13, #0x70]\n"
+ "ldr x20, [x9, #0x70]\n"
"fmla v29.8h, v8.8h, v10.8h\n"
"fmla v31.8h, v5.8h, v10.8h\n"
"add x20, x20, x28\n"
@@ -609,29 +609,29 @@ void a64_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst_indirect_impl(
"46:" // Oddments: Load input (3, 1): Bit 2: Unset: Bit 1: Unset
"ld1 { v11.h }[0], [x20], #0x2\n"
"47:" // Oddments: Load input (3, 1): Bit 2: End
- "ldr x20, [x13, #0x78]\n"
+ "ldr x19, [x9, #0x78]\n"
"fmla v30.8h, v7.8h, v11.8h\n"
"fmla v31.8h, v6.8h, v11.8h\n"
- "add x20, x20, x28\n"
+ "add x19, x19, x28\n"
"tbz %x[n_channels], #2, 49f\n"
- "ld1 { v12.d }[0], [x20], #0x8\n"
+ "ld1 { v12.d }[0], [x19], #0x8\n"
"tbz %x[n_channels], #1, 48f\n"
- "ld1 { v12.s }[2], [x20], #0x4\n"
+ "ld1 { v12.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 51f\n"
- "ld1 { v12.h }[6], [x20], #0x2\n"
+ "ld1 { v12.h }[6], [x19], #0x2\n"
"b 51f\n"
"48:" // Oddments: Load input (3, 2): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 51f\n"
- "ld1 { v12.h }[4], [x20], #0x2\n"
+ "ld1 { v12.h }[4], [x19], #0x2\n"
"b 51f\n"
"49:" // Oddments: Load input (3, 2): Bit 2: Unset
"tbz %x[n_channels], #1, 50f\n"
- "ld1 { v12.s }[0], [x20], #0x4\n"
+ "ld1 { v12.s }[0], [x19], #0x4\n"
"tbz %x[n_channels], #0, 51f\n"
- "ld1 { v12.h }[2], [x20], #0x2\n"
+ "ld1 { v12.h }[2], [x19], #0x2\n"
"b 51f\n"
"50:" // Oddments: Load input (3, 2): Bit 2: Unset: Bit 1: Unset
- "ld1 { v12.h }[0], [x20], #0x2\n"
+ "ld1 { v12.h }[0], [x19], #0x2\n"
"51:" // Oddments: Load input (3, 2): Bit 2: End
"fmla v30.8h, v8.8h, v12.8h\n"
"fmla v31.8h, v7.8h, v12.8h\n"
@@ -644,50 +644,52 @@ void a64_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst_indirect_impl(
"fmin v30.8h, v30.8h, v17.8h\n"
"fmin v31.8h, v31.8h, v17.8h\n"
"tbz %x[n_channels], #2, 53f\n"
- "st1 { v28.d }[0], [x12], #0x8\n"
- "st1 { v29.d }[0], [x11], #0x8\n"
- "st1 { v30.d }[0], [x10], #0x8\n"
- "st1 { v31.d }[0], [x9], #0x8\n"
+ "st1 { v28.d }[0], [x13], #0x8\n"
+ "st1 { v29.d }[0], [x12], #0x8\n"
+ "st1 { v30.d }[0], [x11], #0x8\n"
+ "st1 { v31.d }[0], [x10], #0x8\n"
"tbz %x[n_channels], #1, 52f\n"
- "st1 { v28.s }[2], [x12], #0x4\n"
- "st1 { v29.s }[2], [x11], #0x4\n"
- "st1 { v30.s }[2], [x10], #0x4\n"
- "st1 { v31.s }[2], [x9], #0x4\n"
+ "st1 { v28.s }[2], [x13], #0x4\n"
+ "st1 { v29.s }[2], [x12], #0x4\n"
+ "st1 { v30.s }[2], [x11], #0x4\n"
+ "st1 { v31.s }[2], [x10], #0x4\n"
"tbz %x[n_channels], #0, 55f\n"
- "st1 { v28.h }[6], [x12], #0x2\n"
- "st1 { v29.h }[6], [x11], #0x2\n"
- "st1 { v30.h }[6], [x10], #0x2\n"
- "st1 { v31.h }[6], [x9], #0x2\n"
+ "st1 { v28.h }[6], [x13], #0x2\n"
+ "st1 { v29.h }[6], [x12], #0x2\n"
+ "st1 { v30.h }[6], [x11], #0x2\n"
+ "st1 { v31.h }[6], [x10], #0x2\n"
"b 55f\n"
"52:" // Oddments: Store: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 55f\n"
- "st1 { v28.h }[4], [x12], #0x2\n"
- "st1 { v29.h }[4], [x11], #0x2\n"
- "st1 { v30.h }[4], [x10], #0x2\n"
- "st1 { v31.h }[4], [x9], #0x2\n"
+ "st1 { v28.h }[4], [x13], #0x2\n"
+ "st1 { v29.h }[4], [x12], #0x2\n"
+ "st1 { v30.h }[4], [x11], #0x2\n"
+ "st1 { v31.h }[4], [x10], #0x2\n"
"b 55f\n"
"53:" // Oddments: Store: Bit 2: Unset
"tbz %x[n_channels], #1, 54f\n"
- "st1 { v28.s }[0], [x12], #0x4\n"
- "st1 { v29.s }[0], [x11], #0x4\n"
- "st1 { v30.s }[0], [x10], #0x4\n"
- "st1 { v31.s }[0], [x9], #0x4\n"
+ "st1 { v28.s }[0], [x13], #0x4\n"
+ "st1 { v29.s }[0], [x12], #0x4\n"
+ "st1 { v30.s }[0], [x11], #0x4\n"
+ "st1 { v31.s }[0], [x10], #0x4\n"
"tbz %x[n_channels], #0, 55f\n"
- "st1 { v28.h }[2], [x12], #0x2\n"
- "st1 { v29.h }[2], [x11], #0x2\n"
- "st1 { v30.h }[2], [x10], #0x2\n"
- "st1 { v31.h }[2], [x9], #0x2\n"
+ "st1 { v28.h }[2], [x13], #0x2\n"
+ "st1 { v29.h }[2], [x12], #0x2\n"
+ "st1 { v30.h }[2], [x11], #0x2\n"
+ "st1 { v31.h }[2], [x10], #0x2\n"
"b 55f\n"
"54:" // Oddments: Store: Bit 2: Unset: Bit 1: Unset
- "st1 { v28.h }[0], [x12], #0x2\n"
- "st1 { v29.h }[0], [x11], #0x2\n"
- "st1 { v30.h }[0], [x10], #0x2\n"
- "st1 { v31.h }[0], [x9], #0x2\n"
+ "st1 { v28.h }[0], [x13], #0x2\n"
+ "st1 { v29.h }[0], [x12], #0x2\n"
+ "st1 { v30.h }[0], [x11], #0x2\n"
+ "st1 { v31.h }[0], [x10], #0x2\n"
"55:" // Oddments: Store: Bit 2: End
+
"56:" // End
+
:
: [n_channels] "r" ((unsigned long) n_channels), [offsetof_Args_inptrs] "I" (offsetof(Args, inptrs)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_args_params] "I" (offsetof(Args, params)), [params_struct] "r" (&params_struct)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v16", "v17", "v18", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v16", "v17", "v18", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_direct.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_direct.cpp
index 2b1dc3646d..9b4b3ee50d 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_direct.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_direct.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -87,83 +87,83 @@ void a64_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst_direct_impl(
);
__asm__ __volatile__(
- "mov x24, #0x0\n"
"mov x23, #0x0\n"
+ "mov x22, #0x0\n"
"1:" // Tile loop
- "str x24, [%x[params_struct], %[offsetof_args_tile_i]]\n"
- "mov x27, #0x3\n"
+ "str x23, [%x[params_struct], %[offsetof_args_tile_i]]\n"
"mov x26, #0x3\n"
- "str x23, [%x[params_struct], %[offsetof_args_tile_j]]\n"
- "ldr x25, [%x[params_struct], %[offsetof_args_ld_input_row]]\n"
- "ldr x22, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
- "mul x21, x24, x25\n" // offset = tile_i * ld_input_row
- "ldr x8, [%x[params_struct], %[offsetof_args_ld_input_col]]\n"
- "ldr x17, [%x[params_struct], %[offsetof_args_ld_output_col]]\n"
- "mul x20, x24, x22\n" // offset = tile_i * ld_output_row
- "mov x24, #0x10\n" // cntb _, ALL, #1
- "madd x21, x23, x8, x21\n" // offset += tile_j * ld_input_col
- "ldr x16, [%x[params_struct], %[offsetof_args_inptr]]\n"
- "lsl x8, x8, #0x1\n"
- "ldr x15, [%x[params_struct], %[offsetof_args_outptr]]\n"
- "madd x20, x23, x17, x20\n" // offset += tile_j * ld_output_col
+ "mov x25, #0x3\n"
+ "str x22, [%x[params_struct], %[offsetof_args_tile_j]]\n"
+ "ldr x24, [%x[params_struct], %[offsetof_args_ld_input_row]]\n"
+ "ldr x21, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
+ "mul x20, x23, x24\n" // offset = tile_i * ld_input_row
+ "ldr x17, [%x[params_struct], %[offsetof_args_ld_input_col]]\n"
+ "ldr x16, [%x[params_struct], %[offsetof_args_ld_output_col]]\n"
+ "mul x19, x23, x21\n" // offset = tile_i * ld_output_row
+ "mov x23, #0x10\n" // cntb _, ALL, #1
+ "madd x20, x22, x17, x20\n" // offset += tile_j * ld_input_col
+ "ldr x15, [%x[params_struct], %[offsetof_args_inptr]]\n"
"lsl x17, x17, #0x1\n"
- "lsr x23, %x[n_channels], #0x3\n"
- "ldr x14, [%x[params_struct], %[offsetof_args_params]]\n"
- "mul x21, x21, x27\n" // offset *= kernel_stride * output_size
- "add x16, x16, x21, LSL #1\n" // inptr[0] += offset * sizeof(__fp16)
- "add x13, x16, x25, LSL #1\n"
- "mul x20, x20, x26\n" // offset *= output_tile_size
- "add x12, x13, x25, LSL #1\n"
- "add x11, x8, x8\n"
- "add x15, x15, x20, LSL #1\n" // outptrs[0] += offset * sizeof(__fp16)
- "add x10, x12, x25, LSL #1\n"
- "add x9, x11, x8\n"
- "add x28, x15, x22, LSL #1\n"
+ "ldr x14, [%x[params_struct], %[offsetof_args_outptr]]\n"
+ "madd x19, x22, x16, x19\n" // offset += tile_j * ld_output_col
+ "lsl x16, x16, #0x1\n"
+ "lsr x22, %x[n_channels], #0x3\n"
+ "ldr x13, [%x[params_struct], %[offsetof_args_params]]\n"
+ "mul x20, x20, x26\n" // offset *= kernel_stride * output_size
+ "add x15, x15, x20, LSL #1\n" // inptr[0] += offset * sizeof(__fp16)
+ "add x12, x15, x24, LSL #1\n"
+ "mul x19, x19, x25\n" // offset *= output_tile_size
+ "add x11, x12, x24, LSL #1\n"
+ "add x10, x17, x17\n"
+ "add x14, x14, x19, LSL #1\n" // outptrs[0] += offset * sizeof(__fp16)
+ "add x9, x11, x24, LSL #1\n"
+ "add x28, x10, x17\n"
+ "add x27, x14, x21, LSL #1\n"
"add x20, %x[params_struct], %[offsetof_args_min]\n"
+ "add x19, %x[params_struct], %[offsetof_args_max]\n"
"ld1r { v18.8h }, [x20]\n"
- "add x20, %x[params_struct], %[offsetof_args_max]\n"
- "ld1r { v17.8h }, [x20]\n"
- "add x27, x10, x25, LSL #1\n"
- "add x26, x9, x8\n"
- "add x25, x28, x22, LSL #1\n"
- "add x22, x17, x17\n"
- "mov x21, #0x0\n"
- "sub x20, XZR, x24\n"
- "cbz x23, 4f\n"
- "ldr q16, [x14, #0x0]\n"
- "ldr q0, [x14, #0x10]\n"
- "cmp x24, x23, LSL #4\n"
- "ldr q1, [x14, #0x20]\n"
- "ldr q2, [x14, #0x30]\n"
- "ldr q3, [x14, #0x40]\n"
- "ldr q4, [x14, #0x50]\n"
- "ldr q5, [x14, #0x60]\n"
- "ldr q6, [x14, #0x70]\n"
- "ldr q7, [x14, #0x80]\n"
- "ldr q8, [x14, #0x90]\n"
- "add x14, x14, #0xa0\n"
- "ldr q9, [x12, x11]\n"
- "ld1 { v10.8h }, [x16]\n"
- "ldr q11, [x16, x26]\n"
- "ld1 { v12.8h }, [x27]\n"
- "ldr q13, [x13, x11]\n"
+ "ld1r { v17.8h }, [x19]\n"
+ "add x26, x9, x24, LSL #1\n"
+ "add x25, x28, x17\n"
+ "add x24, x27, x21, LSL #1\n"
+ "add x21, x16, x16\n"
+ "mov x20, #0x0\n"
+ "sub x19, XZR, x23\n"
+ "cbz x22, 4f\n"
+ "ldr q16, [x13, #0x0]\n"
+ "cmp x23, x22, LSL #4\n"
+ "ldr q0, [x13, #0x10]\n"
+ "ldr q1, [x13, #0x20]\n"
+ "ldr q2, [x13, #0x30]\n"
+ "ldr q3, [x13, #0x40]\n"
+ "ldr q4, [x13, #0x50]\n"
+ "ldr q5, [x13, #0x60]\n"
+ "ldr q6, [x13, #0x70]\n"
+ "ldr q7, [x13, #0x80]\n"
+ "ldr q8, [x13, #0x90]\n"
+ "ldr q9, [x11, x10]\n"
+ "add x13, x13, #0xa0\n"
+ "ld1 { v10.8h }, [x15]\n"
+ "ldr q11, [x15, x25]\n"
+ "ld1 { v12.8h }, [x26]\n"
+ "ldr q13, [x12, x10]\n"
"bge 3f\n"
"2:" // Tile loop: Channel loop
"mov v24.16b, v16.16b\n fmla v24.8h, v7.8h, v9.8h\n"
"mov v23.16b, v16.16b\n fmla v23.8h, v8.8h, v9.8h\n"
- "add x24, x24, #0x10\n"
- "cmp x24, x23, LSL #4\n"
+ "add x23, x23, #0x10\n"
+ "cmp x23, x22, LSL #4\n"
"mov v25.16b, v16.16b\n fmla v25.8h, v6.8h, v9.8h\n"
"fmla v24.8h, v4.8h, v13.8h\n"
+ "add x19, x19, #0x10\n"
"add x20, x20, #0x10\n"
- "add x21, x21, #0x10\n"
"mov v26.16b, v16.16b\n fmla v26.8h, v5.8h, v9.8h\n"
"mov v27.16b, v16.16b\n fmla v27.8h, v4.8h, v9.8h\n"
"mov v28.16b, v16.16b\n fmla v28.8h, v3.8h, v9.8h\n"
"fmla v23.8h, v0.8h, v10.8h\n"
- "ldr q10, [x12, x9]\n"
+ "ldr q10, [x11, x28]\n"
"fmla v25.8h, v2.8h, v11.8h\n"
- "ldr q11, [x12, x8]\n"
+ "ldr q11, [x11, x17]\n"
"mov v29.16b, v16.16b\n fmla v29.8h, v2.8h, v9.8h\n"
"fmla v24.8h, v6.8h, v11.8h\n"
"mov v31.16b, v16.16b\n fmla v31.8h, v0.8h, v9.8h\n"
@@ -172,139 +172,139 @@ void a64_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst_direct_impl(
"fmla v26.8h, v2.8h, v13.8h\n"
"fmla v27.8h, v1.8h, v13.8h\n"
"fmla v28.8h, v0.8h, v13.8h\n"
- "ldr q13, [x16, x8]\n"
+ "ldr q13, [x15, x17]\n"
"fmla v29.8h, v6.8h, v12.8h\n"
- "ldr q12, [x27, x26]\n"
+ "ldr q12, [x26, x25]\n"
"mov v30.16b, v16.16b\n fmla v30.8h, v1.8h, v9.8h\n"
- "ldr q16, [x14, #0x0]\n"
"fmla v24.8h, v0.8h, v13.8h\n"
+ "ldr q16, [x13, #0x0]\n"
"fmla v31.8h, v8.8h, v12.8h\n"
- "ldr q12, [x16, x9]\n"
+ "ldr q12, [x15, x28]\n"
"fmla v23.8h, v7.8h, v11.8h\n"
"fmla v30.8h, v0.8h, v11.8h\n"
"fmla v26.8h, v4.8h, v11.8h\n"
"fmla v27.8h, v3.8h, v11.8h\n"
"fmla v29.8h, v1.8h, v11.8h\n"
- "ld1 { v11.8h }, [x13]\n"
+ "ld1 { v11.8h }, [x12]\n"
"fmla v24.8h, v2.8h, v12.8h\n"
"fmla v25.8h, v1.8h, v12.8h\n"
- "ld1 { v12.8h }, [x10]\n"
+ "ld1 { v12.8h }, [x9]\n"
"fmla v28.8h, v4.8h, v10.8h\n"
"fmla v23.8h, v1.8h, v13.8h\n"
- "ldr q13, [x13, x26]\n"
+ "ldr q13, [x12, x25]\n"
"fmla v30.8h, v2.8h, v10.8h\n"
"fmla v31.8h, v1.8h, v10.8h\n"
"fmla v24.8h, v8.8h, v10.8h\n"
"fmla v25.8h, v7.8h, v10.8h\n"
"fmla v27.8h, v5.8h, v10.8h\n"
- "ldr q10, [x10, x11]\n"
"fmla v26.8h, v0.8h, v11.8h\n"
+ "ldr q10, [x9, x10]\n"
"fmla v29.8h, v3.8h, v12.8h\n"
"fmla v28.8h, v2.8h, v13.8h\n"
"fmla v30.8h, v4.8h, v10.8h\n"
"fmla v31.8h, v3.8h, v10.8h\n"
"fmla v23.8h, v3.8h, v11.8h\n"
- "ldr q11, [x10, x26]\n"
"fmla v25.8h, v5.8h, v13.8h\n"
- "ldr q13, [x27, x8]\n"
+ "ldr q11, [x9, x25]\n"
+ "ldr q13, [x26, x17]\n"
"fmla v26.8h, v6.8h, v12.8h\n"
- "ldr q12, [x13, x8]\n"
"fmla v27.8h, v7.8h, v10.8h\n"
+ "ldr q12, [x12, x17]\n"
"fmla v29.8h, v5.8h, v10.8h\n"
"fmla v28.8h, v6.8h, v10.8h\n"
"fmla v31.8h, v5.8h, v11.8h\n"
"fmla v30.8h, v6.8h, v13.8h\n"
"fmla v26.8h, v8.8h, v10.8h\n"
"fmla v29.8h, v7.8h, v13.8h\n"
- "ldr q13, [x27, x9]\n"
+ "ldr q13, [x26, x28]\n"
"fmla v24.8h, v3.8h, v12.8h\n"
"fmla v27.8h, v0.8h, v12.8h\n"
"fmla v28.8h, v8.8h, v11.8h\n"
- "ldr q11, [x13, x9]\n"
+ "ldr q11, [x12, x28]\n"
"fmla v30.8h, v8.8h, v13.8h\n"
- "add x13, x13, #0x10\n"
+ "add x12, x12, #0x10\n"
"fmla v31.8h, v7.8h, v13.8h\n"
- "ldr q13, [x10, x9]\n"
"fmla v23.8h, v4.8h, v12.8h\n"
+ "ldr q13, [x9, x28]\n"
"fmla v26.8h, v1.8h, v12.8h\n"
- "ldr q12, [x10, x8]\n"
"fmla v24.8h, v5.8h, v11.8h\n"
- "add x10, x10, #0x10\n"
+ "ldr q12, [x9, x17]\n"
+ "add x9, x9, #0x10\n"
"fmla v25.8h, v4.8h, v11.8h\n"
"fmla v27.8h, v2.8h, v11.8h\n"
"fmla v28.8h, v1.8h, v11.8h\n"
- "ldr q11, [x16, x11]\n"
+ "ldr q11, [x15, x10]\n"
"fmla v29.8h, v4.8h, v12.8h\n"
- "add x16, x16, #0x10\n"
- "ld1 { v10.8h }, [x16]\n"
+ "add x15, x15, #0x10\n"
"fmla v30.8h, v3.8h, v12.8h\n"
"fmla v31.8h, v4.8h, v13.8h\n"
- "ldr q4, [x14, #0x50]\n"
+ "ld1 { v10.8h }, [x15]\n"
+ "ldr q4, [x13, #0x50]\n"
"fmla v26.8h, v7.8h, v12.8h\n"
"fmla v27.8h, v6.8h, v12.8h\n"
- "ld1 { v12.8h }, [x12]\n"
+ "ld1 { v12.8h }, [x11]\n"
"fmla v23.8h, v2.8h, v11.8h\n"
"fmla v24.8h, v1.8h, v11.8h\n"
- "ldr q1, [x14, #0x20]\n"
"fmax v24.8h, v24.8h, v18.8h\n"
+ "ldr q1, [x13, #0x20]\n"
"fmla v25.8h, v0.8h, v11.8h\n"
- "ldr q11, [x12, x26]\n"
+ "ldr q11, [x11, x25]\n"
"fmla v28.8h, v7.8h, v13.8h\n"
- "add x12, x12, #0x10\n"
- "ldr q9, [x12, x11]\n"
+ "add x11, x11, #0x10\n"
"fmla v30.8h, v5.8h, v13.8h\n"
"fmla v29.8h, v0.8h, v12.8h\n"
- "ldr q0, [x14, #0x10]\n"
+ "fmin v24.8h, v24.8h, v17.8h\n"
+ "ldr q9, [x11, x10]\n"
"fmla v31.8h, v2.8h, v11.8h\n"
- "ldr q2, [x14, #0x30]\n"
"fmla v27.8h, v8.8h, v13.8h\n"
- "ldr q13, [x27, x11]\n"
+ "ldr q13, [x26, x10]\n"
+ "fmax v27.8h, v27.8h, v18.8h\n"
"fmla v23.8h, v6.8h, v12.8h\n"
"fmla v26.8h, v3.8h, v12.8h\n"
- "ldr q3, [x14, #0x40]\n"
"fmax v23.8h, v23.8h, v18.8h\n"
+ "add x26, x26, #0x10\n"
"fmla v25.8h, v8.8h, v11.8h\n"
"fmla v28.8h, v5.8h, v11.8h\n"
- "ldr q11, [x16, x26]\n"
- "ldr q5, [x14, #0x60]\n"
+ "fmax v25.8h, v25.8h, v18.8h\n"
+ "ldr q11, [x15, x25]\n"
"fmla v29.8h, v8.8h, v13.8h\n"
- "ldr q8, [x14, #0x90]\n"
"fmla v30.8h, v7.8h, v13.8h\n"
- "ldr q7, [x14, #0x80]\n"
- "fmla v31.8h, v6.8h, v13.8h\n"
- "ldr q13, [x13, x11]\n"
- "ldr q6, [x14, #0x70]\n"
- "fmax v25.8h, v25.8h, v18.8h\n"
"fmax v26.8h, v26.8h, v18.8h\n"
- "fmax v27.8h, v27.8h, v18.8h\n"
- "add x27, x27, #0x10\n"
- "ld1 { v12.8h }, [x27]\n"
+ "ld1 { v12.8h }, [x26]\n"
+ "fmla v31.8h, v6.8h, v13.8h\n"
"fmax v28.8h, v28.8h, v18.8h\n"
+ "ldr q13, [x12, x10]\n"
+ "ldr q0, [x13, #0x10]\n"
"fmax v29.8h, v29.8h, v18.8h\n"
- "add x14, x14, #0xa0\n"
"fmax v30.8h, v30.8h, v18.8h\n"
+ "ldr q2, [x13, #0x30]\n"
+ "ldr q3, [x13, #0x40]\n"
"fmax v31.8h, v31.8h, v18.8h\n"
"fmin v23.8h, v23.8h, v17.8h\n"
- "fmin v24.8h, v24.8h, v17.8h\n"
- "st1 { v23.8h }, [x15]\n"
+ "st1 { v23.8h }, [x14]\n"
+ "ldr q5, [x13, #0x60]\n"
"fmin v25.8h, v25.8h, v17.8h\n"
"fmin v26.8h, v26.8h, v17.8h\n"
- "str q24, [x15, x17]\n"
+ "str q24, [x14, x16]\n"
+ "ldr q6, [x13, #0x70]\n"
"fmin v27.8h, v27.8h, v17.8h\n"
"fmin v28.8h, v28.8h, v17.8h\n"
- "str q25, [x15, x22]\n"
- "add x15, x15, #0x10\n"
+ "str q25, [x14, x21]\n"
+ "add x14, x14, #0x10\n"
"fmin v29.8h, v29.8h, v17.8h\n"
"fmin v30.8h, v30.8h, v17.8h\n"
- "st1 { v26.8h }, [x28]\n"
+ "st1 { v26.8h }, [x27]\n"
+ "ldr q7, [x13, #0x80]\n"
"fmin v31.8h, v31.8h, v17.8h\n"
- "str q27, [x28, x17]\n"
- "str q28, [x28, x22]\n"
- "add x28, x28, #0x10\n"
- "st1 { v29.8h }, [x25]\n"
- "str q30, [x25, x17]\n"
- "str q31, [x25, x22]\n"
- "add x25, x25, #0x10\n"
+ "str q27, [x27, x16]\n"
+ "ldr q8, [x13, #0x90]\n"
+ "add x13, x13, #0xa0\n"
+ "str q28, [x27, x21]\n"
+ "add x27, x27, #0x10\n"
+ "st1 { v29.8h }, [x24]\n"
+ "str q30, [x24, x16]\n"
+ "str q31, [x24, x21]\n"
+ "add x24, x24, #0x10\n"
"blt 2b\n"
"3:" // Tile loop: Channel tail
"mov v24.16b, v16.16b\n fmla v24.8h, v7.8h, v9.8h\n"
@@ -315,9 +315,9 @@ void a64_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst_direct_impl(
"mov v27.16b, v16.16b\n fmla v27.8h, v4.8h, v9.8h\n"
"mov v28.16b, v16.16b\n fmla v28.8h, v3.8h, v9.8h\n"
"fmla v23.8h, v0.8h, v10.8h\n"
- "ldr q10, [x12, x9]\n"
+ "ldr q10, [x11, x28]\n"
"fmla v25.8h, v2.8h, v11.8h\n"
- "ldr q11, [x12, x8]\n"
+ "ldr q11, [x11, x17]\n"
"mov v29.16b, v16.16b\n fmla v29.8h, v2.8h, v9.8h\n"
"fmla v24.8h, v6.8h, v11.8h\n"
"mov v31.16b, v16.16b\n fmla v31.8h, v0.8h, v9.8h\n"
@@ -326,92 +326,92 @@ void a64_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst_direct_impl(
"fmla v26.8h, v2.8h, v13.8h\n"
"fmla v27.8h, v1.8h, v13.8h\n"
"fmla v28.8h, v0.8h, v13.8h\n"
- "ldr q13, [x16, x8]\n"
+ "ldr q13, [x15, x17]\n"
"fmla v29.8h, v6.8h, v12.8h\n"
- "ldr q12, [x27, x26]\n"
+ "ldr q12, [x26, x25]\n"
"mov v30.16b, v16.16b\n fmla v30.8h, v1.8h, v9.8h\n"
"fmla v24.8h, v0.8h, v13.8h\n"
"fmla v31.8h, v8.8h, v12.8h\n"
- "ldr q12, [x16, x9]\n"
+ "ldr q12, [x15, x28]\n"
"fmla v23.8h, v7.8h, v11.8h\n"
"fmla v30.8h, v0.8h, v11.8h\n"
"fmla v26.8h, v4.8h, v11.8h\n"
"fmla v27.8h, v3.8h, v11.8h\n"
"fmla v29.8h, v1.8h, v11.8h\n"
- "ld1 { v11.8h }, [x13]\n"
+ "ld1 { v11.8h }, [x12]\n"
"fmla v24.8h, v2.8h, v12.8h\n"
"fmla v25.8h, v1.8h, v12.8h\n"
- "ld1 { v12.8h }, [x10]\n"
+ "ld1 { v12.8h }, [x9]\n"
"fmla v28.8h, v4.8h, v10.8h\n"
"fmla v23.8h, v1.8h, v13.8h\n"
- "ldr q13, [x13, x26]\n"
+ "ldr q13, [x12, x25]\n"
"fmla v30.8h, v2.8h, v10.8h\n"
"fmla v31.8h, v1.8h, v10.8h\n"
"fmla v24.8h, v8.8h, v10.8h\n"
"fmla v25.8h, v7.8h, v10.8h\n"
"fmla v27.8h, v5.8h, v10.8h\n"
- "ldr q10, [x10, x11]\n"
"fmla v26.8h, v0.8h, v11.8h\n"
+ "ldr q10, [x9, x10]\n"
"fmla v29.8h, v3.8h, v12.8h\n"
"fmla v28.8h, v2.8h, v13.8h\n"
"fmla v30.8h, v4.8h, v10.8h\n"
"fmla v31.8h, v3.8h, v10.8h\n"
"fmla v23.8h, v3.8h, v11.8h\n"
- "ldr q11, [x10, x26]\n"
"fmla v25.8h, v5.8h, v13.8h\n"
- "ldr q13, [x27, x8]\n"
+ "ldr q11, [x9, x25]\n"
+ "ldr q13, [x26, x17]\n"
"fmla v26.8h, v6.8h, v12.8h\n"
- "ldr q12, [x13, x8]\n"
"fmla v27.8h, v7.8h, v10.8h\n"
+ "ldr q12, [x12, x17]\n"
"fmla v29.8h, v5.8h, v10.8h\n"
"fmla v28.8h, v6.8h, v10.8h\n"
"fmla v31.8h, v5.8h, v11.8h\n"
"fmla v30.8h, v6.8h, v13.8h\n"
"fmla v26.8h, v8.8h, v10.8h\n"
"fmla v29.8h, v7.8h, v13.8h\n"
- "ldr q13, [x27, x9]\n"
+ "ldr q13, [x26, x28]\n"
"fmla v24.8h, v3.8h, v12.8h\n"
"fmla v27.8h, v0.8h, v12.8h\n"
"fmla v28.8h, v8.8h, v11.8h\n"
- "ldr q11, [x13, x9]\n"
+ "ldr q11, [x12, x28]\n"
"fmla v30.8h, v8.8h, v13.8h\n"
- "add x13, x13, #0x10\n"
+ "add x12, x12, #0x10\n"
"fmla v31.8h, v7.8h, v13.8h\n"
- "ldr q13, [x10, x9]\n"
"fmla v23.8h, v4.8h, v12.8h\n"
+ "ldr q13, [x9, x28]\n"
"fmla v26.8h, v1.8h, v12.8h\n"
- "ldr q12, [x10, x8]\n"
"fmla v24.8h, v5.8h, v11.8h\n"
- "add x10, x10, #0x10\n"
+ "ldr q12, [x9, x17]\n"
+ "add x9, x9, #0x10\n"
"fmla v25.8h, v4.8h, v11.8h\n"
"fmla v27.8h, v2.8h, v11.8h\n"
"fmla v28.8h, v1.8h, v11.8h\n"
- "ldr q11, [x16, x11]\n"
+ "ldr q11, [x15, x10]\n"
"fmla v29.8h, v4.8h, v12.8h\n"
- "add x16, x16, #0x10\n"
+ "add x15, x15, #0x10\n"
"fmla v30.8h, v3.8h, v12.8h\n"
"fmla v31.8h, v4.8h, v13.8h\n"
"fmla v26.8h, v7.8h, v12.8h\n"
"fmla v27.8h, v6.8h, v12.8h\n"
- "ld1 { v12.8h }, [x12]\n"
+ "ld1 { v12.8h }, [x11]\n"
"fmla v23.8h, v2.8h, v11.8h\n"
"fmla v24.8h, v1.8h, v11.8h\n"
"fmax v24.8h, v24.8h, v18.8h\n"
"fmla v25.8h, v0.8h, v11.8h\n"
- "ldr q11, [x12, x26]\n"
+ "ldr q11, [x11, x25]\n"
"fmla v28.8h, v7.8h, v13.8h\n"
"fmin v24.8h, v24.8h, v17.8h\n"
"fmla v30.8h, v5.8h, v13.8h\n"
"fmla v29.8h, v0.8h, v12.8h\n"
- "add x12, x12, #0x10\n"
+ "add x11, x11, #0x10\n"
"fmla v31.8h, v2.8h, v11.8h\n"
"fmla v27.8h, v8.8h, v13.8h\n"
- "ldr q13, [x27, x11]\n"
+ "ldr q13, [x26, x10]\n"
"fmax v27.8h, v27.8h, v18.8h\n"
"fmla v23.8h, v6.8h, v12.8h\n"
"fmla v26.8h, v3.8h, v12.8h\n"
"fmax v23.8h, v23.8h, v18.8h\n"
- "add x27, x27, #0x10\n"
+ "add x26, x26, #0x10\n"
"fmla v25.8h, v8.8h, v11.8h\n"
"fmla v28.8h, v5.8h, v11.8h\n"
"fmax v25.8h, v25.8h, v18.8h\n"
@@ -424,94 +424,94 @@ void a64_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst_direct_impl(
"fmax v30.8h, v30.8h, v18.8h\n"
"fmax v31.8h, v31.8h, v18.8h\n"
"fmin v23.8h, v23.8h, v17.8h\n"
- "st1 { v23.8h }, [x15]\n"
+ "st1 { v23.8h }, [x14]\n"
"fmin v25.8h, v25.8h, v17.8h\n"
"fmin v26.8h, v26.8h, v17.8h\n"
- "str q24, [x15, x17]\n"
+ "str q24, [x14, x16]\n"
"fmin v27.8h, v27.8h, v17.8h\n"
"fmin v28.8h, v28.8h, v17.8h\n"
- "str q25, [x15, x22]\n"
- "add x15, x15, #0x10\n"
+ "str q25, [x14, x21]\n"
+ "add x14, x14, #0x10\n"
"fmin v29.8h, v29.8h, v17.8h\n"
"fmin v30.8h, v30.8h, v17.8h\n"
- "st1 { v26.8h }, [x28]\n"
+ "st1 { v26.8h }, [x27]\n"
"fmin v31.8h, v31.8h, v17.8h\n"
- "str q27, [x28, x17]\n"
- "str q28, [x28, x22]\n"
- "add x28, x28, #0x10\n"
- "st1 { v29.8h }, [x25]\n"
- "str q30, [x25, x17]\n"
- "str q31, [x25, x22]\n"
- "add x25, x25, #0x10\n"
+ "str q27, [x27, x16]\n"
+ "str q28, [x27, x21]\n"
+ "add x27, x27, #0x10\n"
+ "st1 { v29.8h }, [x24]\n"
+ "str q30, [x24, x16]\n"
+ "str q31, [x24, x21]\n"
+ "add x24, x24, #0x10\n"
"4:" // Tile loop: Oddments
"tst %x[n_channels], #0x7\n"
"beq 93f\n"
- "ldr q16, [x14, #0x0]\n"
- "ldr q0, [x14, #0x10]\n"
- "add x24, x12, x11\n"
- "add x23, x16, XZR\n"
- "ldr q1, [x14, #0x20]\n"
- "ldr q2, [x14, #0x30]\n"
- "add x22, x16, x26\n"
- "add x21, x27, XZR\n"
- "ldr q3, [x14, #0x40]\n"
- "ldr q4, [x14, #0x50]\n"
- "add x20, x13, x11\n"
- "ldr q5, [x14, #0x60]\n"
- "ldr q6, [x14, #0x70]\n"
- "ldr q7, [x14, #0x80]\n"
- "ldr q8, [x14, #0x90]\n"
+ "ldr q16, [x13, #0x0]\n"
+ "ldr q0, [x13, #0x10]\n"
+ "ldr q1, [x13, #0x20]\n"
+ "ldr q2, [x13, #0x30]\n"
+ "add x23, x11, x10\n"
+ "add x22, x15, XZR\n"
+ "ldr q3, [x13, #0x40]\n"
+ "ldr q4, [x13, #0x50]\n"
+ "add x21, x15, x25\n"
+ "add x20, x26, XZR\n"
+ "ldr q5, [x13, #0x60]\n"
+ "ldr q6, [x13, #0x70]\n"
+ "add x19, x12, x10\n"
+ "ldr q7, [x13, #0x80]\n"
+ "ldr q8, [x13, #0x90]\n"
"tbz %x[n_channels], #2, 6f\n"
- "ldr d9, [x24], #0x8\n"
- "ldr d10, [x23], #0x8\n"
- "ldr d11, [x22], #0x8\n"
- "ldr d12, [x21], #0x8\n"
- "ldr d13, [x20], #0x8\n"
+ "ldr d9, [x23], #0x8\n"
+ "ldr d10, [x22], #0x8\n"
+ "ldr d11, [x21], #0x8\n"
+ "ldr d12, [x20], #0x8\n"
+ "ldr d13, [x19], #0x8\n"
"tbz %x[n_channels], #1, 5f\n"
- "ld1 { v9.s }[2], [x24], #0x4\n"
- "ld1 { v10.s }[2], [x23], #0x4\n"
- "ld1 { v11.s }[2], [x22], #0x4\n"
- "ld1 { v12.s }[2], [x21], #0x4\n"
- "ld1 { v13.s }[2], [x20], #0x4\n"
+ "ld1 { v9.s }[2], [x23], #0x4\n"
+ "ld1 { v10.s }[2], [x22], #0x4\n"
+ "ld1 { v11.s }[2], [x21], #0x4\n"
+ "ld1 { v12.s }[2], [x20], #0x4\n"
+ "ld1 { v13.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 8f\n"
- "ld1 { v9.h }[6], [x24]\n"
- "ld1 { v10.h }[6], [x23]\n"
- "ld1 { v11.h }[6], [x22]\n"
- "ld1 { v12.h }[6], [x21]\n"
- "ld1 { v13.h }[6], [x20]\n"
+ "ld1 { v9.h }[6], [x23]\n"
+ "ld1 { v10.h }[6], [x22]\n"
+ "ld1 { v11.h }[6], [x21]\n"
+ "ld1 { v12.h }[6], [x20]\n"
+ "ld1 { v13.h }[6], [x19]\n"
"b 8f\n"
"5:" // Tile loop: Oddments: Load inputs: (2, 2), (0, 0), (0, 4), (4, 0), (1, 2): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 8f\n"
- "ld1 { v9.h }[4], [x24]\n"
- "ld1 { v10.h }[4], [x23]\n"
- "ld1 { v11.h }[4], [x22]\n"
- "ld1 { v12.h }[4], [x21]\n"
- "ld1 { v13.h }[4], [x20]\n"
+ "ld1 { v9.h }[4], [x23]\n"
+ "ld1 { v10.h }[4], [x22]\n"
+ "ld1 { v11.h }[4], [x21]\n"
+ "ld1 { v12.h }[4], [x20]\n"
+ "ld1 { v13.h }[4], [x19]\n"
"b 8f\n"
"6:" // Tile loop: Oddments: Load inputs: (2, 2), (0, 0), (0, 4), (4, 0), (1, 2): Bit 2: Unset
"tbz %x[n_channels], #1, 7f\n"
- "ldr s9, [x24], #0x4\n"
- "ldr s10, [x23], #0x4\n"
- "ldr s11, [x22], #0x4\n"
- "ldr s12, [x21], #0x4\n"
- "ldr s13, [x20], #0x4\n"
+ "ldr s9, [x23], #0x4\n"
+ "ldr s10, [x22], #0x4\n"
+ "ldr s11, [x21], #0x4\n"
+ "ldr s12, [x20], #0x4\n"
+ "ldr s13, [x19], #0x4\n"
"tbz %x[n_channels], #0, 8f\n"
- "ld1 { v9.h }[2], [x24]\n"
- "ld1 { v10.h }[2], [x23]\n"
- "ld1 { v11.h }[2], [x22]\n"
- "ld1 { v12.h }[2], [x21]\n"
- "ld1 { v13.h }[2], [x20]\n"
+ "ld1 { v9.h }[2], [x23]\n"
+ "ld1 { v10.h }[2], [x22]\n"
+ "ld1 { v11.h }[2], [x21]\n"
+ "ld1 { v12.h }[2], [x20]\n"
+ "ld1 { v13.h }[2], [x19]\n"
"b 8f\n"
"7:" // Tile loop: Oddments: Load inputs: (2, 2), (0, 0), (0, 4), (4, 0), (1, 2): Bit 2: Unset: Bit 1: Unset
- "ldr h9, [x24, #0x0]\n"
- "ldr h10, [x23, #0x0]\n"
- "ldr h11, [x22, #0x0]\n"
- "ldr h12, [x21, #0x0]\n"
- "ldr h13, [x20, #0x0]\n"
+ "ldr h9, [x23, #0x0]\n"
+ "ldr h10, [x22, #0x0]\n"
+ "ldr h11, [x21, #0x0]\n"
+ "ldr h12, [x20, #0x0]\n"
+ "ldr h13, [x19, #0x0]\n"
"8:" // Tile loop: Oddments: Load inputs: (2, 2), (0, 0), (0, 4), (4, 0), (1, 2): Bit 2: End
"mov v23.16b, v16.16b\n fmla v23.8h, v8.8h, v9.8h\n"
"mov v25.16b, v16.16b\n fmla v25.8h, v6.8h, v9.8h\n"
- "add x20, x27, x26\n"
+ "add x19, x26, x25\n"
"mov v24.16b, v16.16b\n fmla v24.8h, v7.8h, v9.8h\n"
"mov v26.16b, v16.16b\n fmla v26.8h, v5.8h, v9.8h\n"
"mov v27.16b, v16.16b\n fmla v27.8h, v4.8h, v9.8h\n"
@@ -529,483 +529,483 @@ void a64_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst_direct_impl(
"fmla v27.8h, v1.8h, v13.8h\n"
"fmla v28.8h, v0.8h, v13.8h\n"
"tbz %x[n_channels], #2, 10f\n"
- "ldr d12, [x20], #0x8\n"
+ "ldr d12, [x19], #0x8\n"
"tbz %x[n_channels], #1, 9f\n"
- "ld1 { v12.s }[2], [x20], #0x4\n"
+ "ld1 { v12.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 12f\n"
- "ld1 { v12.h }[6], [x20]\n"
+ "ld1 { v12.h }[6], [x19]\n"
"b 12f\n"
"9:" // Tile loop: Oddments: Load inputs: (4, 4): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 12f\n"
- "ld1 { v12.h }[4], [x20]\n"
+ "ld1 { v12.h }[4], [x19]\n"
"b 12f\n"
"10:" // Tile loop: Oddments: Load inputs: (4, 4): Bit 2: Unset
"tbz %x[n_channels], #1, 11f\n"
- "ldr s12, [x20], #0x4\n"
+ "ldr s12, [x19], #0x4\n"
"tbz %x[n_channels], #0, 12f\n"
- "ld1 { v12.h }[2], [x20]\n"
+ "ld1 { v12.h }[2], [x19]\n"
"b 12f\n"
"11:" // Tile loop: Oddments: Load inputs: (4, 4): Bit 2: Unset: Bit 1: Unset
- "ldr h12, [x20, #0x0]\n"
+ "ldr h12, [x19, #0x0]\n"
"12:" // Tile loop: Oddments: Load inputs: (4, 4): Bit 2: End
"fmla v31.8h, v8.8h, v12.8h\n"
- "add x20, x12, x8\n"
+ "add x19, x11, x17\n"
"tbz %x[n_channels], #2, 14f\n"
- "ldr d11, [x20], #0x8\n"
+ "ldr d11, [x19], #0x8\n"
"tbz %x[n_channels], #1, 13f\n"
- "ld1 { v11.s }[2], [x20], #0x4\n"
+ "ld1 { v11.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 16f\n"
- "ld1 { v11.h }[6], [x20]\n"
+ "ld1 { v11.h }[6], [x19]\n"
"b 16f\n"
"13:" // Tile loop: Oddments: Load inputs: (2, 1): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 16f\n"
- "ld1 { v11.h }[4], [x20]\n"
+ "ld1 { v11.h }[4], [x19]\n"
"b 16f\n"
"14:" // Tile loop: Oddments: Load inputs: (2, 1): Bit 2: Unset
"tbz %x[n_channels], #1, 15f\n"
- "ldr s11, [x20], #0x4\n"
+ "ldr s11, [x19], #0x4\n"
"tbz %x[n_channels], #0, 16f\n"
- "ld1 { v11.h }[2], [x20]\n"
+ "ld1 { v11.h }[2], [x19]\n"
"b 16f\n"
"15:" // Tile loop: Oddments: Load inputs: (2, 1): Bit 2: Unset: Bit 1: Unset
- "ldr h11, [x20, #0x0]\n"
+ "ldr h11, [x19, #0x0]\n"
"16:" // Tile loop: Oddments: Load inputs: (2, 1): Bit 2: End
"fmla v23.8h, v7.8h, v11.8h\n"
"fmla v24.8h, v6.8h, v11.8h\n"
- "add x20, x16, x8\n"
+ "add x19, x15, x17\n"
"fmla v26.8h, v4.8h, v11.8h\n"
"fmla v27.8h, v3.8h, v11.8h\n"
"fmla v29.8h, v1.8h, v11.8h\n"
"fmla v30.8h, v0.8h, v11.8h\n"
"tbz %x[n_channels], #2, 18f\n"
- "ldr d13, [x20], #0x8\n"
+ "ldr d13, [x19], #0x8\n"
"tbz %x[n_channels], #1, 17f\n"
- "ld1 { v13.s }[2], [x20], #0x4\n"
+ "ld1 { v13.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 20f\n"
- "ld1 { v13.h }[6], [x20]\n"
+ "ld1 { v13.h }[6], [x19]\n"
"b 20f\n"
"17:" // Tile loop: Oddments: Load inputs: (0, 1): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 20f\n"
- "ld1 { v13.h }[4], [x20]\n"
+ "ld1 { v13.h }[4], [x19]\n"
"b 20f\n"
"18:" // Tile loop: Oddments: Load inputs: (0, 1): Bit 2: Unset
"tbz %x[n_channels], #1, 19f\n"
- "ldr s13, [x20], #0x4\n"
+ "ldr s13, [x19], #0x4\n"
"tbz %x[n_channels], #0, 20f\n"
- "ld1 { v13.h }[2], [x20]\n"
+ "ld1 { v13.h }[2], [x19]\n"
"b 20f\n"
"19:" // Tile loop: Oddments: Load inputs: (0, 1): Bit 2: Unset: Bit 1: Unset
- "ldr h13, [x20, #0x0]\n"
+ "ldr h13, [x19, #0x0]\n"
"20:" // Tile loop: Oddments: Load inputs: (0, 1): Bit 2: End
"fmla v23.8h, v1.8h, v13.8h\n"
"fmla v24.8h, v0.8h, v13.8h\n"
- "add x20, x16, x9\n"
+ "add x19, x15, x28\n"
"tbz %x[n_channels], #2, 22f\n"
- "ldr d12, [x20], #0x8\n"
+ "ldr d12, [x19], #0x8\n"
"tbz %x[n_channels], #1, 21f\n"
- "ld1 { v12.s }[2], [x20], #0x4\n"
+ "ld1 { v12.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 24f\n"
- "ld1 { v12.h }[6], [x20]\n"
+ "ld1 { v12.h }[6], [x19]\n"
"b 24f\n"
"21:" // Tile loop: Oddments: Load inputs: (0, 3): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 24f\n"
- "ld1 { v12.h }[4], [x20]\n"
+ "ld1 { v12.h }[4], [x19]\n"
"b 24f\n"
"22:" // Tile loop: Oddments: Load inputs: (0, 3): Bit 2: Unset
"tbz %x[n_channels], #1, 23f\n"
- "ldr s12, [x20], #0x4\n"
+ "ldr s12, [x19], #0x4\n"
"tbz %x[n_channels], #0, 24f\n"
- "ld1 { v12.h }[2], [x20]\n"
+ "ld1 { v12.h }[2], [x19]\n"
"b 24f\n"
"23:" // Tile loop: Oddments: Load inputs: (0, 3): Bit 2: Unset: Bit 1: Unset
- "ldr h12, [x20, #0x0]\n"
+ "ldr h12, [x19, #0x0]\n"
"24:" // Tile loop: Oddments: Load inputs: (0, 3): Bit 2: End
"fmla v24.8h, v2.8h, v12.8h\n"
"fmla v25.8h, v1.8h, v12.8h\n"
- "add x20, x12, x9\n"
+ "add x19, x11, x28\n"
"tbz %x[n_channels], #2, 26f\n"
- "ldr d10, [x20], #0x8\n"
+ "ldr d10, [x19], #0x8\n"
"tbz %x[n_channels], #1, 25f\n"
- "ld1 { v10.s }[2], [x20], #0x4\n"
+ "ld1 { v10.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 28f\n"
- "ld1 { v10.h }[6], [x20]\n"
+ "ld1 { v10.h }[6], [x19]\n"
"b 28f\n"
"25:" // Tile loop: Oddments: Load inputs: (2, 3): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 28f\n"
- "ld1 { v10.h }[4], [x20]\n"
+ "ld1 { v10.h }[4], [x19]\n"
"b 28f\n"
"26:" // Tile loop: Oddments: Load inputs: (2, 3): Bit 2: Unset
"tbz %x[n_channels], #1, 27f\n"
- "ldr s10, [x20], #0x4\n"
+ "ldr s10, [x19], #0x4\n"
"tbz %x[n_channels], #0, 28f\n"
- "ld1 { v10.h }[2], [x20]\n"
+ "ld1 { v10.h }[2], [x19]\n"
"b 28f\n"
"27:" // Tile loop: Oddments: Load inputs: (2, 3): Bit 2: Unset: Bit 1: Unset
- "ldr h10, [x20, #0x0]\n"
+ "ldr h10, [x19, #0x0]\n"
"28:" // Tile loop: Oddments: Load inputs: (2, 3): Bit 2: End
"fmla v24.8h, v8.8h, v10.8h\n"
"fmla v25.8h, v7.8h, v10.8h\n"
- "add x20, x13, XZR\n"
+ "add x19, x12, XZR\n"
"fmla v27.8h, v5.8h, v10.8h\n"
"fmla v28.8h, v4.8h, v10.8h\n"
"fmla v30.8h, v2.8h, v10.8h\n"
"fmla v31.8h, v1.8h, v10.8h\n"
"tbz %x[n_channels], #2, 30f\n"
- "ldr d11, [x20], #0x8\n"
+ "ldr d11, [x19], #0x8\n"
"tbz %x[n_channels], #1, 29f\n"
- "ld1 { v11.s }[2], [x20], #0x4\n"
+ "ld1 { v11.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 32f\n"
- "ld1 { v11.h }[6], [x20]\n"
+ "ld1 { v11.h }[6], [x19]\n"
"b 32f\n"
"29:" // Tile loop: Oddments: Load inputs: (1, 0): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 32f\n"
- "ld1 { v11.h }[4], [x20]\n"
+ "ld1 { v11.h }[4], [x19]\n"
"b 32f\n"
"30:" // Tile loop: Oddments: Load inputs: (1, 0): Bit 2: Unset
"tbz %x[n_channels], #1, 31f\n"
- "ldr s11, [x20], #0x4\n"
+ "ldr s11, [x19], #0x4\n"
"tbz %x[n_channels], #0, 32f\n"
- "ld1 { v11.h }[2], [x20]\n"
+ "ld1 { v11.h }[2], [x19]\n"
"b 32f\n"
"31:" // Tile loop: Oddments: Load inputs: (1, 0): Bit 2: Unset: Bit 1: Unset
- "ldr h11, [x20, #0x0]\n"
+ "ldr h11, [x19, #0x0]\n"
"32:" // Tile loop: Oddments: Load inputs: (1, 0): Bit 2: End
"fmla v23.8h, v3.8h, v11.8h\n"
"fmla v26.8h, v0.8h, v11.8h\n"
- "add x20, x13, x26\n"
+ "add x19, x12, x25\n"
"tbz %x[n_channels], #2, 34f\n"
- "ldr d13, [x20], #0x8\n"
+ "ldr d13, [x19], #0x8\n"
"tbz %x[n_channels], #1, 33f\n"
- "ld1 { v13.s }[2], [x20], #0x4\n"
+ "ld1 { v13.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 36f\n"
- "ld1 { v13.h }[6], [x20]\n"
+ "ld1 { v13.h }[6], [x19]\n"
"b 36f\n"
"33:" // Tile loop: Oddments: Load inputs: (1, 4): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 36f\n"
- "ld1 { v13.h }[4], [x20]\n"
+ "ld1 { v13.h }[4], [x19]\n"
"b 36f\n"
"34:" // Tile loop: Oddments: Load inputs: (1, 4): Bit 2: Unset
"tbz %x[n_channels], #1, 35f\n"
- "ldr s13, [x20], #0x4\n"
+ "ldr s13, [x19], #0x4\n"
"tbz %x[n_channels], #0, 36f\n"
- "ld1 { v13.h }[2], [x20]\n"
+ "ld1 { v13.h }[2], [x19]\n"
"b 36f\n"
"35:" // Tile loop: Oddments: Load inputs: (1, 4): Bit 2: Unset: Bit 1: Unset
- "ldr h13, [x20, #0x0]\n"
+ "ldr h13, [x19, #0x0]\n"
"36:" // Tile loop: Oddments: Load inputs: (1, 4): Bit 2: End
"fmla v25.8h, v5.8h, v13.8h\n"
"fmla v28.8h, v2.8h, v13.8h\n"
- "add x20, x10, XZR\n"
+ "add x19, x9, XZR\n"
"tbz %x[n_channels], #2, 38f\n"
- "ldr d12, [x20], #0x8\n"
+ "ldr d12, [x19], #0x8\n"
"tbz %x[n_channels], #1, 37f\n"
- "ld1 { v12.s }[2], [x20], #0x4\n"
+ "ld1 { v12.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 40f\n"
- "ld1 { v12.h }[6], [x20]\n"
+ "ld1 { v12.h }[6], [x19]\n"
"b 40f\n"
"37:" // Tile loop: Oddments: Load inputs: (3, 0): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 40f\n"
- "ld1 { v12.h }[4], [x20]\n"
+ "ld1 { v12.h }[4], [x19]\n"
"b 40f\n"
"38:" // Tile loop: Oddments: Load inputs: (3, 0): Bit 2: Unset
"tbz %x[n_channels], #1, 39f\n"
- "ldr s12, [x20], #0x4\n"
+ "ldr s12, [x19], #0x4\n"
"tbz %x[n_channels], #0, 40f\n"
- "ld1 { v12.h }[2], [x20]\n"
+ "ld1 { v12.h }[2], [x19]\n"
"b 40f\n"
"39:" // Tile loop: Oddments: Load inputs: (3, 0): Bit 2: Unset: Bit 1: Unset
- "ldr h12, [x20, #0x0]\n"
+ "ldr h12, [x19, #0x0]\n"
"40:" // Tile loop: Oddments: Load inputs: (3, 0): Bit 2: End
"fmla v26.8h, v6.8h, v12.8h\n"
"fmla v29.8h, v3.8h, v12.8h\n"
- "add x20, x10, x11\n"
+ "add x19, x9, x10\n"
"tbz %x[n_channels], #2, 42f\n"
- "ldr d10, [x20], #0x8\n"
+ "ldr d10, [x19], #0x8\n"
"tbz %x[n_channels], #1, 41f\n"
- "ld1 { v10.s }[2], [x20], #0x4\n"
+ "ld1 { v10.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 44f\n"
- "ld1 { v10.h }[6], [x20]\n"
+ "ld1 { v10.h }[6], [x19]\n"
"b 44f\n"
"41:" // Tile loop: Oddments: Load inputs: (3, 2): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 44f\n"
- "ld1 { v10.h }[4], [x20]\n"
+ "ld1 { v10.h }[4], [x19]\n"
"b 44f\n"
"42:" // Tile loop: Oddments: Load inputs: (3, 2): Bit 2: Unset
"tbz %x[n_channels], #1, 43f\n"
- "ldr s10, [x20], #0x4\n"
+ "ldr s10, [x19], #0x4\n"
"tbz %x[n_channels], #0, 44f\n"
- "ld1 { v10.h }[2], [x20]\n"
+ "ld1 { v10.h }[2], [x19]\n"
"b 44f\n"
"43:" // Tile loop: Oddments: Load inputs: (3, 2): Bit 2: Unset: Bit 1: Unset
- "ldr h10, [x20, #0x0]\n"
+ "ldr h10, [x19, #0x0]\n"
"44:" // Tile loop: Oddments: Load inputs: (3, 2): Bit 2: End
"fmla v26.8h, v8.8h, v10.8h\n"
"fmla v27.8h, v7.8h, v10.8h\n"
- "add x20, x10, x26\n"
+ "add x19, x9, x25\n"
"fmla v28.8h, v6.8h, v10.8h\n"
"fmla v29.8h, v5.8h, v10.8h\n"
"fmla v30.8h, v4.8h, v10.8h\n"
"fmla v31.8h, v3.8h, v10.8h\n"
"tbz %x[n_channels], #2, 46f\n"
- "ldr d11, [x20], #0x8\n"
+ "ldr d11, [x19], #0x8\n"
"tbz %x[n_channels], #1, 45f\n"
- "ld1 { v11.s }[2], [x20], #0x4\n"
+ "ld1 { v11.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 48f\n"
- "ld1 { v11.h }[6], [x20]\n"
+ "ld1 { v11.h }[6], [x19]\n"
"b 48f\n"
"45:" // Tile loop: Oddments: Load inputs: (3, 4): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 48f\n"
- "ld1 { v11.h }[4], [x20]\n"
+ "ld1 { v11.h }[4], [x19]\n"
"b 48f\n"
"46:" // Tile loop: Oddments: Load inputs: (3, 4): Bit 2: Unset
"tbz %x[n_channels], #1, 47f\n"
- "ldr s11, [x20], #0x4\n"
+ "ldr s11, [x19], #0x4\n"
"tbz %x[n_channels], #0, 48f\n"
- "ld1 { v11.h }[2], [x20]\n"
+ "ld1 { v11.h }[2], [x19]\n"
"b 48f\n"
"47:" // Tile loop: Oddments: Load inputs: (3, 4): Bit 2: Unset: Bit 1: Unset
- "ldr h11, [x20, #0x0]\n"
+ "ldr h11, [x19, #0x0]\n"
"48:" // Tile loop: Oddments: Load inputs: (3, 4): Bit 2: End
"fmla v28.8h, v8.8h, v11.8h\n"
"fmla v31.8h, v5.8h, v11.8h\n"
- "add x20, x27, x8\n"
+ "add x19, x26, x17\n"
"tbz %x[n_channels], #2, 50f\n"
- "ldr d13, [x20], #0x8\n"
+ "ldr d13, [x19], #0x8\n"
"tbz %x[n_channels], #1, 49f\n"
- "ld1 { v13.s }[2], [x20], #0x4\n"
+ "ld1 { v13.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 52f\n"
- "ld1 { v13.h }[6], [x20]\n"
+ "ld1 { v13.h }[6], [x19]\n"
"b 52f\n"
"49:" // Tile loop: Oddments: Load inputs: (4, 1): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 52f\n"
- "ld1 { v13.h }[4], [x20]\n"
+ "ld1 { v13.h }[4], [x19]\n"
"b 52f\n"
"50:" // Tile loop: Oddments: Load inputs: (4, 1): Bit 2: Unset
"tbz %x[n_channels], #1, 51f\n"
- "ldr s13, [x20], #0x4\n"
+ "ldr s13, [x19], #0x4\n"
"tbz %x[n_channels], #0, 52f\n"
- "ld1 { v13.h }[2], [x20]\n"
+ "ld1 { v13.h }[2], [x19]\n"
"b 52f\n"
"51:" // Tile loop: Oddments: Load inputs: (4, 1): Bit 2: Unset: Bit 1: Unset
- "ldr h13, [x20, #0x0]\n"
+ "ldr h13, [x19, #0x0]\n"
"52:" // Tile loop: Oddments: Load inputs: (4, 1): Bit 2: End
"fmla v29.8h, v7.8h, v13.8h\n"
"fmla v30.8h, v6.8h, v13.8h\n"
- "add x20, x13, x8\n"
+ "add x19, x12, x17\n"
"tbz %x[n_channels], #2, 54f\n"
- "ldr d12, [x20], #0x8\n"
+ "ldr d12, [x19], #0x8\n"
"tbz %x[n_channels], #1, 53f\n"
- "ld1 { v12.s }[2], [x20], #0x4\n"
+ "ld1 { v12.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 56f\n"
- "ld1 { v12.h }[6], [x20]\n"
+ "ld1 { v12.h }[6], [x19]\n"
"b 56f\n"
"53:" // Tile loop: Oddments: Load inputs: (1, 1): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 56f\n"
- "ld1 { v12.h }[4], [x20]\n"
+ "ld1 { v12.h }[4], [x19]\n"
"b 56f\n"
"54:" // Tile loop: Oddments: Load inputs: (1, 1): Bit 2: Unset
"tbz %x[n_channels], #1, 55f\n"
- "ldr s12, [x20], #0x4\n"
+ "ldr s12, [x19], #0x4\n"
"tbz %x[n_channels], #0, 56f\n"
- "ld1 { v12.h }[2], [x20]\n"
+ "ld1 { v12.h }[2], [x19]\n"
"b 56f\n"
"55:" // Tile loop: Oddments: Load inputs: (1, 1): Bit 2: Unset: Bit 1: Unset
- "ldr h12, [x20, #0x0]\n"
+ "ldr h12, [x19, #0x0]\n"
"56:" // Tile loop: Oddments: Load inputs: (1, 1): Bit 2: End
"fmla v23.8h, v4.8h, v12.8h\n"
"fmla v24.8h, v3.8h, v12.8h\n"
- "add x20, x13, x9\n"
+ "add x19, x12, x28\n"
"fmla v26.8h, v1.8h, v12.8h\n"
"fmla v27.8h, v0.8h, v12.8h\n"
"tbz %x[n_channels], #2, 58f\n"
- "ldr d11, [x20], #0x8\n"
+ "ldr d11, [x19], #0x8\n"
"tbz %x[n_channels], #1, 57f\n"
- "ld1 { v11.s }[2], [x20], #0x4\n"
+ "ld1 { v11.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 60f\n"
- "ld1 { v11.h }[6], [x20]\n"
+ "ld1 { v11.h }[6], [x19]\n"
"b 60f\n"
"57:" // Tile loop: Oddments: Load inputs: (1, 3): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 60f\n"
- "ld1 { v11.h }[4], [x20]\n"
+ "ld1 { v11.h }[4], [x19]\n"
"b 60f\n"
"58:" // Tile loop: Oddments: Load inputs: (1, 3): Bit 2: Unset
"tbz %x[n_channels], #1, 59f\n"
- "ldr s11, [x20], #0x4\n"
+ "ldr s11, [x19], #0x4\n"
"tbz %x[n_channels], #0, 60f\n"
- "ld1 { v11.h }[2], [x20]\n"
+ "ld1 { v11.h }[2], [x19]\n"
"b 60f\n"
"59:" // Tile loop: Oddments: Load inputs: (1, 3): Bit 2: Unset: Bit 1: Unset
- "ldr h11, [x20, #0x0]\n"
+ "ldr h11, [x19, #0x0]\n"
"60:" // Tile loop: Oddments: Load inputs: (1, 3): Bit 2: End
"fmla v24.8h, v5.8h, v11.8h\n"
"fmla v25.8h, v4.8h, v11.8h\n"
- "add x20, x27, x9\n"
+ "add x19, x26, x28\n"
"fmla v27.8h, v2.8h, v11.8h\n"
"fmla v28.8h, v1.8h, v11.8h\n"
"tbz %x[n_channels], #2, 62f\n"
- "ldr d13, [x20], #0x8\n"
+ "ldr d13, [x19], #0x8\n"
"tbz %x[n_channels], #1, 61f\n"
- "ld1 { v13.s }[2], [x20], #0x4\n"
+ "ld1 { v13.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 64f\n"
- "ld1 { v13.h }[6], [x20]\n"
+ "ld1 { v13.h }[6], [x19]\n"
"b 64f\n"
"61:" // Tile loop: Oddments: Load inputs: (4, 3): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 64f\n"
- "ld1 { v13.h }[4], [x20]\n"
+ "ld1 { v13.h }[4], [x19]\n"
"b 64f\n"
"62:" // Tile loop: Oddments: Load inputs: (4, 3): Bit 2: Unset
"tbz %x[n_channels], #1, 63f\n"
- "ldr s13, [x20], #0x4\n"
+ "ldr s13, [x19], #0x4\n"
"tbz %x[n_channels], #0, 64f\n"
- "ld1 { v13.h }[2], [x20]\n"
+ "ld1 { v13.h }[2], [x19]\n"
"b 64f\n"
"63:" // Tile loop: Oddments: Load inputs: (4, 3): Bit 2: Unset: Bit 1: Unset
- "ldr h13, [x20, #0x0]\n"
+ "ldr h13, [x19, #0x0]\n"
"64:" // Tile loop: Oddments: Load inputs: (4, 3): Bit 2: End
"fmla v30.8h, v8.8h, v13.8h\n"
"fmla v31.8h, v7.8h, v13.8h\n"
- "add x20, x10, x8\n"
+ "add x19, x9, x17\n"
"tbz %x[n_channels], #2, 66f\n"
- "ldr d12, [x20], #0x8\n"
+ "ldr d12, [x19], #0x8\n"
"tbz %x[n_channels], #1, 65f\n"
- "ld1 { v12.s }[2], [x20], #0x4\n"
+ "ld1 { v12.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 68f\n"
- "ld1 { v12.h }[6], [x20]\n"
+ "ld1 { v12.h }[6], [x19]\n"
"b 68f\n"
"65:" // Tile loop: Oddments: Load inputs: (3, 1): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 68f\n"
- "ld1 { v12.h }[4], [x20]\n"
+ "ld1 { v12.h }[4], [x19]\n"
"b 68f\n"
"66:" // Tile loop: Oddments: Load inputs: (3, 1): Bit 2: Unset
"tbz %x[n_channels], #1, 67f\n"
- "ldr s12, [x20], #0x4\n"
+ "ldr s12, [x19], #0x4\n"
"tbz %x[n_channels], #0, 68f\n"
- "ld1 { v12.h }[2], [x20]\n"
+ "ld1 { v12.h }[2], [x19]\n"
"b 68f\n"
"67:" // Tile loop: Oddments: Load inputs: (3, 1): Bit 2: Unset: Bit 1: Unset
- "ldr h12, [x20, #0x0]\n"
+ "ldr h12, [x19, #0x0]\n"
"68:" // Tile loop: Oddments: Load inputs: (3, 1): Bit 2: End
"fmla v26.8h, v7.8h, v12.8h\n"
"fmla v27.8h, v6.8h, v12.8h\n"
- "add x20, x16, x11\n"
+ "add x19, x15, x10\n"
"fmla v29.8h, v4.8h, v12.8h\n"
"fmla v30.8h, v3.8h, v12.8h\n"
"tbz %x[n_channels], #2, 70f\n"
- "ldr d11, [x20], #0x8\n"
+ "ldr d11, [x19], #0x8\n"
"tbz %x[n_channels], #1, 69f\n"
- "ld1 { v11.s }[2], [x20], #0x4\n"
+ "ld1 { v11.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 72f\n"
- "ld1 { v11.h }[6], [x20]\n"
+ "ld1 { v11.h }[6], [x19]\n"
"b 72f\n"
"69:" // Tile loop: Oddments: Load inputs: (0, 2): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 72f\n"
- "ld1 { v11.h }[4], [x20]\n"
+ "ld1 { v11.h }[4], [x19]\n"
"b 72f\n"
"70:" // Tile loop: Oddments: Load inputs: (0, 2): Bit 2: Unset
"tbz %x[n_channels], #1, 71f\n"
- "ldr s11, [x20], #0x4\n"
+ "ldr s11, [x19], #0x4\n"
"tbz %x[n_channels], #0, 72f\n"
- "ld1 { v11.h }[2], [x20]\n"
+ "ld1 { v11.h }[2], [x19]\n"
"b 72f\n"
"71:" // Tile loop: Oddments: Load inputs: (0, 2): Bit 2: Unset: Bit 1: Unset
- "ldr h11, [x20, #0x0]\n"
+ "ldr h11, [x19, #0x0]\n"
"72:" // Tile loop: Oddments: Load inputs: (0, 2): Bit 2: End
"fmla v23.8h, v2.8h, v11.8h\n"
"fmla v24.8h, v1.8h, v11.8h\n"
- "add x20, x10, x9\n"
+ "add x19, x9, x28\n"
"fmla v25.8h, v0.8h, v11.8h\n"
"tbz %x[n_channels], #2, 74f\n"
- "ldr d13, [x20], #0x8\n"
+ "ldr d13, [x19], #0x8\n"
"tbz %x[n_channels], #1, 73f\n"
- "ld1 { v13.s }[2], [x20], #0x4\n"
+ "ld1 { v13.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 76f\n"
- "ld1 { v13.h }[6], [x20]\n"
+ "ld1 { v13.h }[6], [x19]\n"
"b 76f\n"
"73:" // Tile loop: Oddments: Load inputs: (3, 3): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 76f\n"
- "ld1 { v13.h }[4], [x20]\n"
+ "ld1 { v13.h }[4], [x19]\n"
"b 76f\n"
"74:" // Tile loop: Oddments: Load inputs: (3, 3): Bit 2: Unset
"tbz %x[n_channels], #1, 75f\n"
- "ldr s13, [x20], #0x4\n"
+ "ldr s13, [x19], #0x4\n"
"tbz %x[n_channels], #0, 76f\n"
- "ld1 { v13.h }[2], [x20]\n"
+ "ld1 { v13.h }[2], [x19]\n"
"b 76f\n"
"75:" // Tile loop: Oddments: Load inputs: (3, 3): Bit 2: Unset: Bit 1: Unset
- "ldr h13, [x20, #0x0]\n"
+ "ldr h13, [x19, #0x0]\n"
"76:" // Tile loop: Oddments: Load inputs: (3, 3): Bit 2: End
"fmla v27.8h, v8.8h, v13.8h\n"
"fmla v28.8h, v7.8h, v13.8h\n"
- "add x20, x12, XZR\n"
+ "add x19, x11, XZR\n"
"fmla v30.8h, v5.8h, v13.8h\n"
"fmla v31.8h, v4.8h, v13.8h\n"
"tbz %x[n_channels], #2, 78f\n"
- "ldr d12, [x20], #0x8\n"
+ "ldr d12, [x19], #0x8\n"
"tbz %x[n_channels], #1, 77f\n"
- "ld1 { v12.s }[2], [x20], #0x4\n"
+ "ld1 { v12.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 80f\n"
- "ld1 { v12.h }[6], [x20]\n"
+ "ld1 { v12.h }[6], [x19]\n"
"b 80f\n"
"77:" // Tile loop: Oddments: Load inputs: (2, 0): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 80f\n"
- "ld1 { v12.h }[4], [x20]\n"
+ "ld1 { v12.h }[4], [x19]\n"
"b 80f\n"
"78:" // Tile loop: Oddments: Load inputs: (2, 0): Bit 2: Unset
"tbz %x[n_channels], #1, 79f\n"
- "ldr s12, [x20], #0x4\n"
+ "ldr s12, [x19], #0x4\n"
"tbz %x[n_channels], #0, 80f\n"
- "ld1 { v12.h }[2], [x20]\n"
+ "ld1 { v12.h }[2], [x19]\n"
"b 80f\n"
"79:" // Tile loop: Oddments: Load inputs: (2, 0): Bit 2: Unset: Bit 1: Unset
- "ldr h12, [x20, #0x0]\n"
+ "ldr h12, [x19, #0x0]\n"
"80:" // Tile loop: Oddments: Load inputs: (2, 0): Bit 2: End
"fmla v23.8h, v6.8h, v12.8h\n"
"fmla v26.8h, v3.8h, v12.8h\n"
- "add x20, x12, x26\n"
+ "add x19, x11, x25\n"
"fmla v29.8h, v0.8h, v12.8h\n"
"tbz %x[n_channels], #2, 82f\n"
- "ldr d11, [x20], #0x8\n"
+ "ldr d11, [x19], #0x8\n"
"tbz %x[n_channels], #1, 81f\n"
- "ld1 { v11.s }[2], [x20], #0x4\n"
+ "ld1 { v11.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 84f\n"
- "ld1 { v11.h }[6], [x20]\n"
+ "ld1 { v11.h }[6], [x19]\n"
"b 84f\n"
"81:" // Tile loop: Oddments: Load inputs: (2, 4): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 84f\n"
- "ld1 { v11.h }[4], [x20]\n"
+ "ld1 { v11.h }[4], [x19]\n"
"b 84f\n"
"82:" // Tile loop: Oddments: Load inputs: (2, 4): Bit 2: Unset
"tbz %x[n_channels], #1, 83f\n"
- "ldr s11, [x20], #0x4\n"
+ "ldr s11, [x19], #0x4\n"
"tbz %x[n_channels], #0, 84f\n"
- "ld1 { v11.h }[2], [x20]\n"
+ "ld1 { v11.h }[2], [x19]\n"
"b 84f\n"
"83:" // Tile loop: Oddments: Load inputs: (2, 4): Bit 2: Unset: Bit 1: Unset
- "ldr h11, [x20, #0x0]\n"
+ "ldr h11, [x19, #0x0]\n"
"84:" // Tile loop: Oddments: Load inputs: (2, 4): Bit 2: End
"fmla v25.8h, v8.8h, v11.8h\n"
"fmla v28.8h, v5.8h, v11.8h\n"
- "add x20, x27, x11\n"
+ "add x19, x26, x10\n"
"fmla v31.8h, v2.8h, v11.8h\n"
"tbz %x[n_channels], #2, 86f\n"
- "ldr d13, [x20], #0x8\n"
+ "ldr d13, [x19], #0x8\n"
"tbz %x[n_channels], #1, 85f\n"
- "ld1 { v13.s }[2], [x20], #0x4\n"
+ "ld1 { v13.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 88f\n"
- "ld1 { v13.h }[6], [x20]\n"
+ "ld1 { v13.h }[6], [x19]\n"
"b 88f\n"
"85:" // Tile loop: Oddments: Load inputs: (4, 2): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 88f\n"
- "ld1 { v13.h }[4], [x20]\n"
+ "ld1 { v13.h }[4], [x19]\n"
"b 88f\n"
"86:" // Tile loop: Oddments: Load inputs: (4, 2): Bit 2: Unset
"tbz %x[n_channels], #1, 87f\n"
- "ldr s13, [x20], #0x4\n"
+ "ldr s13, [x19], #0x4\n"
"tbz %x[n_channels], #0, 88f\n"
- "ld1 { v13.h }[2], [x20]\n"
+ "ld1 { v13.h }[2], [x19]\n"
"b 88f\n"
"87:" // Tile loop: Oddments: Load inputs: (4, 2): Bit 2: Unset: Bit 1: Unset
- "ldr h13, [x20, #0x0]\n"
+ "ldr h13, [x19, #0x0]\n"
"88:" // Tile loop: Oddments: Load inputs: (4, 2): Bit 2: End
"fmla v29.8h, v8.8h, v13.8h\n"
"fmla v30.8h, v7.8h, v13.8h\n"
@@ -1029,127 +1029,127 @@ void a64_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst_direct_impl(
"fmin v30.8h, v30.8h, v17.8h\n"
"fmin v31.8h, v31.8h, v17.8h\n"
"tbz %x[n_channels], #2, 90f\n"
- "mov x22, x15\n"
- "mov x21, x28\n"
- "st1 { v23.d }[0], [x22], x17\n"
- "mov x20, x25\n"
- "st1 { v26.d }[0], [x21], x17\n"
- "add x15, x15, #0x8\n"
- "st1 { v29.d }[0], [x20], x17\n"
- "add x28, x28, #0x8\n"
- "add x25, x25, #0x8\n"
- "st1 { v24.d }[0], [x22], x17\n"
- "st1 { v27.d }[0], [x21], x17\n"
- "st1 { v30.d }[0], [x20], x17\n"
- "st1 { v25.d }[0], [x22]\n"
- "st1 { v28.d }[0], [x21]\n"
- "st1 { v31.d }[0], [x20]\n"
+ "mov x21, x14\n"
+ "mov x20, x27\n"
+ "mov x19, x24\n"
+ "st1 { v23.d }[0], [x21], x16\n"
+ "st1 { v26.d }[0], [x20], x16\n"
+ "add x14, x14, #0x8\n"
+ "add x27, x27, #0x8\n"
+ "st1 { v29.d }[0], [x19], x16\n"
+ "add x24, x24, #0x8\n"
+ "st1 { v24.d }[0], [x21], x16\n"
+ "st1 { v27.d }[0], [x20], x16\n"
+ "st1 { v30.d }[0], [x19], x16\n"
+ "st1 { v25.d }[0], [x21]\n"
+ "st1 { v28.d }[0], [x20]\n"
+ "st1 { v31.d }[0], [x19]\n"
"tbz %x[n_channels], #1, 89f\n"
- "mov x22, x15\n"
- "mov x21, x28\n"
- "st1 { v23.s }[2], [x22], x17\n"
- "mov x20, x25\n"
- "st1 { v26.s }[2], [x21], x17\n"
- "add x15, x15, #0x4\n"
- "st1 { v29.s }[2], [x20], x17\n"
- "add x28, x28, #0x4\n"
- "add x25, x25, #0x4\n"
- "st1 { v24.s }[2], [x22], x17\n"
- "st1 { v27.s }[2], [x21], x17\n"
- "st1 { v30.s }[2], [x20], x17\n"
- "st1 { v25.s }[2], [x22]\n"
- "st1 { v28.s }[2], [x21]\n"
- "st1 { v31.s }[2], [x20]\n"
+ "mov x21, x14\n"
+ "mov x20, x27\n"
+ "mov x19, x24\n"
+ "st1 { v23.s }[2], [x21], x16\n"
+ "add x14, x14, #0x4\n"
+ "st1 { v26.s }[2], [x20], x16\n"
+ "add x27, x27, #0x4\n"
+ "add x24, x24, #0x4\n"
+ "st1 { v29.s }[2], [x19], x16\n"
+ "st1 { v24.s }[2], [x21], x16\n"
+ "st1 { v27.s }[2], [x20], x16\n"
+ "st1 { v30.s }[2], [x19], x16\n"
+ "st1 { v25.s }[2], [x21]\n"
+ "st1 { v28.s }[2], [x20]\n"
+ "st1 { v31.s }[2], [x19]\n"
"tbz %x[n_channels], #0, 92f\n"
- "mov x22, x15\n"
- "mov x21, x28\n"
- "st1 { v23.h }[6], [x22], x17\n"
- "mov x20, x25\n"
- "st1 { v26.h }[6], [x21], x17\n"
- "st1 { v29.h }[6], [x20], x17\n"
- "st1 { v24.h }[6], [x22], x17\n"
- "st1 { v27.h }[6], [x21], x17\n"
- "st1 { v30.h }[6], [x20], x17\n"
- "st1 { v25.h }[6], [x22]\n"
- "st1 { v28.h }[6], [x21]\n"
- "st1 { v31.h }[6], [x20]\n"
+ "mov x21, x14\n"
+ "mov x20, x27\n"
+ "mov x19, x24\n"
+ "st1 { v23.h }[6], [x21], x16\n"
+ "st1 { v26.h }[6], [x20], x16\n"
+ "st1 { v29.h }[6], [x19], x16\n"
+ "st1 { v24.h }[6], [x21], x16\n"
+ "st1 { v27.h }[6], [x20], x16\n"
+ "st1 { v30.h }[6], [x19], x16\n"
+ "st1 { v25.h }[6], [x21]\n"
+ "st1 { v28.h }[6], [x20]\n"
+ "st1 { v31.h }[6], [x19]\n"
"b 92f\n"
"89:" // Tile loop: Oddments: Store: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 92f\n"
- "mov x22, x15\n"
- "mov x21, x28\n"
- "st1 { v23.h }[4], [x22], x17\n"
- "mov x20, x25\n"
- "st1 { v26.h }[4], [x21], x17\n"
- "st1 { v29.h }[4], [x20], x17\n"
- "st1 { v24.h }[4], [x22], x17\n"
- "st1 { v27.h }[4], [x21], x17\n"
- "st1 { v30.h }[4], [x20], x17\n"
- "st1 { v25.h }[4], [x22]\n"
- "st1 { v28.h }[4], [x21]\n"
- "st1 { v31.h }[4], [x20]\n"
+ "mov x21, x14\n"
+ "mov x20, x27\n"
+ "st1 { v23.h }[4], [x21], x16\n"
+ "mov x19, x24\n"
+ "st1 { v26.h }[4], [x20], x16\n"
+ "st1 { v29.h }[4], [x19], x16\n"
+ "st1 { v24.h }[4], [x21], x16\n"
+ "st1 { v27.h }[4], [x20], x16\n"
+ "st1 { v30.h }[4], [x19], x16\n"
+ "st1 { v25.h }[4], [x21]\n"
+ "st1 { v28.h }[4], [x20]\n"
+ "st1 { v31.h }[4], [x19]\n"
"b 92f\n"
"90:" // Tile loop: Oddments: Store: Bit 2: Unset
"tbz %x[n_channels], #1, 91f\n"
- "mov x22, x15\n"
- "mov x21, x28\n"
- "st1 { v23.s }[0], [x22], x17\n"
- "mov x20, x25\n"
- "st1 { v26.s }[0], [x21], x17\n"
- "add x15, x15, #0x4\n"
- "st1 { v29.s }[0], [x20], x17\n"
- "add x28, x28, #0x4\n"
- "add x25, x25, #0x4\n"
- "st1 { v24.s }[0], [x22], x17\n"
- "st1 { v27.s }[0], [x21], x17\n"
- "st1 { v30.s }[0], [x20], x17\n"
- "st1 { v25.s }[0], [x22]\n"
- "st1 { v28.s }[0], [x21]\n"
- "st1 { v31.s }[0], [x20]\n"
+ "mov x21, x14\n"
+ "mov x20, x27\n"
+ "st1 { v23.s }[0], [x21], x16\n"
+ "mov x19, x24\n"
+ "st1 { v26.s }[0], [x20], x16\n"
+ "add x14, x14, #0x4\n"
+ "st1 { v29.s }[0], [x19], x16\n"
+ "add x27, x27, #0x4\n"
+ "add x24, x24, #0x4\n"
+ "st1 { v24.s }[0], [x21], x16\n"
+ "st1 { v27.s }[0], [x20], x16\n"
+ "st1 { v30.s }[0], [x19], x16\n"
+ "st1 { v25.s }[0], [x21]\n"
+ "st1 { v28.s }[0], [x20]\n"
+ "st1 { v31.s }[0], [x19]\n"
"tbz %x[n_channels], #0, 92f\n"
- "mov x22, x15\n"
- "mov x21, x28\n"
- "st1 { v23.h }[2], [x22], x17\n"
- "mov x20, x25\n"
- "st1 { v26.h }[2], [x21], x17\n"
- "st1 { v29.h }[2], [x20], x17\n"
- "st1 { v24.h }[2], [x22], x17\n"
- "st1 { v27.h }[2], [x21], x17\n"
- "st1 { v30.h }[2], [x20], x17\n"
- "st1 { v25.h }[2], [x22]\n"
- "st1 { v28.h }[2], [x21]\n"
- "st1 { v31.h }[2], [x20]\n"
+ "mov x21, x14\n"
+ "mov x20, x27\n"
+ "mov x19, x24\n"
+ "st1 { v23.h }[2], [x21], x16\n"
+ "st1 { v26.h }[2], [x20], x16\n"
+ "st1 { v29.h }[2], [x19], x16\n"
+ "st1 { v24.h }[2], [x21], x16\n"
+ "st1 { v27.h }[2], [x20], x16\n"
+ "st1 { v30.h }[2], [x19], x16\n"
+ "st1 { v25.h }[2], [x21]\n"
+ "st1 { v28.h }[2], [x20]\n"
+ "st1 { v31.h }[2], [x19]\n"
"b 92f\n"
"91:" // Tile loop: Oddments: Store: Bit 2: Unset: Bit 1: Unset
- "mov x22, x15\n"
- "mov x21, x28\n"
- "st1 { v23.h }[0], [x22], x17\n"
- "mov x20, x25\n"
- "st1 { v26.h }[0], [x21], x17\n"
- "st1 { v29.h }[0], [x20], x17\n"
- "st1 { v24.h }[0], [x22], x17\n"
- "st1 { v27.h }[0], [x21], x17\n"
- "st1 { v30.h }[0], [x20], x17\n"
- "st1 { v25.h }[0], [x22]\n"
- "st1 { v28.h }[0], [x21]\n"
- "st1 { v31.h }[0], [x20]\n"
+ "mov x21, x14\n"
+ "mov x20, x27\n"
+ "st1 { v23.h }[0], [x21], x16\n"
+ "mov x19, x24\n"
+ "st1 { v26.h }[0], [x20], x16\n"
+ "st1 { v29.h }[0], [x19], x16\n"
+ "st1 { v24.h }[0], [x21], x16\n"
+ "st1 { v27.h }[0], [x20], x16\n"
+ "st1 { v30.h }[0], [x19], x16\n"
+ "st1 { v25.h }[0], [x21]\n"
+ "st1 { v28.h }[0], [x20]\n"
+ "st1 { v31.h }[0], [x19]\n"
"92:" // Tile loop: Oddments: Store: Bit 2: End
"93:" // Tile loop: End
- "ldr x23, [%x[params_struct], %[offsetof_args_tile_j]]\n"
- "ldr x24, [%x[params_struct], %[offsetof_args_tile_i]]\n"
- "add x23, x23, #0x1\n"
- "add x21, x24, #0x1\n"
- "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
- "cmp x23, x20\n"
- "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
- "csel x24, x24, x21, LT\n"
- "csel x23, x23, XZR, LT\n"
- "cmp x24, x20\n"
+ "ldr x22, [%x[params_struct], %[offsetof_args_tile_j]]\n"
+ "ldr x23, [%x[params_struct], %[offsetof_args_tile_i]]\n"
+ "add x22, x22, #0x1\n"
+ "add x20, x23, #0x1\n"
+ "ldr x19, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
+ "cmp x22, x19\n"
+ "ldr x19, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
+ "csel x23, x23, x20, LT\n"
+ "csel x22, x22, XZR, LT\n"
+ "cmp x23, x19\n"
"blt 1b\n"
:
: [n_channels] "r" ((unsigned long) n_channels), [offsetof_args_inptr] "I" (offsetof(Args, inptr)), [offsetof_args_ld_input_col] "I" (offsetof(Args, ld_input_col)), [offsetof_args_ld_input_row] "I" (offsetof(Args, ld_input_row)), [offsetof_args_ld_output_col] "I" (offsetof(Args, ld_output_col)), [offsetof_args_ld_output_row] "I" (offsetof(Args, ld_output_row)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_n_tile_cols] "I" (offsetof(Args, n_tile_cols)), [offsetof_args_n_tile_rows] "I" (offsetof(Args, n_tile_rows)), [offsetof_args_outptr] "I" (offsetof(Args, outptr)), [offsetof_args_params] "I" (offsetof(Args, params)), [offsetof_args_tile_i] "I" (offsetof(Args, tile_i)), [offsetof_args_tile_j] "I" (offsetof(Args, tile_j)), [params_struct] "r" (&params_struct)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v16", "v17", "v18", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v16", "v17", "v18", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_indirect.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_indirect.cpp
index 878aa29bcf..faf6c91181 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_indirect.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_indirect.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -87,457 +87,457 @@ void a64_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst_indirect_impl(
activation_min, activation_max);
__asm__ __volatile__(
- "mov x8, #0x10\n" // cntb _, ALL, #1
- "lsr x17, %x[n_channels], #0x3\n"
- "ldr x16, [%x[params_struct], %[offsetof_args_outptrs]]\n"
- "ldr x15, [%x[params_struct], %[offsetof_args_params]]\n"
+ "mov x17, #0x10\n" // cntb _, ALL, #1
+ "lsr x16, %x[n_channels], #0x3\n"
+ "ldr x15, [%x[params_struct], %[offsetof_args_outptrs]]\n"
+ "ldr x14, [%x[params_struct], %[offsetof_args_params]]\n"
"add x20, %x[params_struct], %[offsetof_args_min]\n"
+ "add x19, %x[params_struct], %[offsetof_args_max]\n"
"ld1r { v18.8h }, [x20]\n"
- "add x20, %x[params_struct], %[offsetof_args_max]\n"
- "ld1r { v17.8h }, [x20]\n"
- "add x14, %x[params_struct], %[offsetof_Args_inptrs]\n"
- "mov x13, #0x0\n"
- "sub x12, XZR, x8\n"
- "cbz x17, 3f\n"
- "ldr q16, [x15, #0x0]\n"
- "ldr q0, [x15, #0x10]\n"
- "cmp x8, x17, LSL #4\n"
- "ldr q1, [x15, #0x20]\n"
- "ldr q2, [x15, #0x30]\n"
- "ldr q3, [x15, #0x40]\n"
- "ldr q4, [x15, #0x50]\n"
- "ldr q5, [x15, #0x60]\n"
- "ldr q6, [x15, #0x70]\n"
- "ldr q7, [x15, #0x80]\n"
- "ldr q8, [x15, #0x90]\n"
- "add x15, x15, #0xa0\n"
- "ldp x11, x10, [x14, #0x0]\n"
- "ldr q9, [x11, x13]\n"
- "ldr q10, [x10, x13]\n"
- "ldp x9, x28, [x14, #0x10]\n"
- "ldr q11, [x9, x13]\n"
- "ldr q12, [x28, x13]\n"
- "ldr x27, [x14, #0x20]\n"
- "ldr q13, [x27, x13]\n"
+ "ld1r { v17.8h }, [x19]\n"
+ "add x13, %x[params_struct], %[offsetof_Args_inptrs]\n"
+ "mov x12, #0x0\n"
+ "sub x11, XZR, x17\n"
+ "cbz x16, 3f\n"
+ "ldp x10, x9, [x13, #0x0]\n"
+ "ldp x28, x27, [x13, #0x10]\n"
+ "ldr x26, [x13, #0x20]\n"
+ "cmp x17, x16, LSL #4\n"
+ "ldr q16, [x14, #0x0]\n"
+ "ldr q0, [x14, #0x10]\n"
+ "ldr q1, [x14, #0x20]\n"
+ "ldr q2, [x14, #0x30]\n"
+ "ldr q3, [x14, #0x40]\n"
+ "ldr q4, [x14, #0x50]\n"
+ "ldr q5, [x14, #0x60]\n"
+ "ldr q6, [x14, #0x70]\n"
+ "ldr q7, [x14, #0x80]\n"
+ "ldr q8, [x14, #0x90]\n"
+ "ldr q9, [x10, x12]\n"
+ "add x14, x14, #0xa0\n"
+ "ldr q10, [x9, x12]\n"
+ "ldr q11, [x28, x12]\n"
+ "ldr q12, [x27, x12]\n"
+ "ldr q13, [x26, x12]\n"
"bge 2f\n"
"1:" // Channel loop
"mov v23.16b, v16.16b\n fmla v23.8h, v8.8h, v9.8h\n"
"mov v24.16b, v16.16b\n fmla v24.8h, v7.8h, v9.8h\n"
- "ldr x26, [x14, #0x30]\n"
- "ldr x25, [x14, #0x38]\n"
+ "ldr x25, [x13, #0x30]\n"
+ "ldr x24, [x13, #0x38]\n"
"mov v25.16b, v16.16b\n fmla v25.8h, v6.8h, v9.8h\n"
"fmla v23.8h, v0.8h, v10.8h\n"
- "ldr x24, [x14, #0x28]\n"
- "ldr x10, [x14, #0x48]\n"
- "ldr q10, [x10, x13]\n"
+ "ldr x23, [x13, #0x28]\n"
+ "ldr x9, [x13, #0x48]\n"
"fmla v24.8h, v4.8h, v13.8h\n"
"mov v26.16b, v16.16b\n fmla v26.8h, v5.8h, v9.8h\n"
- "ldr x11, [x14, #0x40]\n"
+ "ldr x10, [x13, #0x40]\n"
+ "ldr q10, [x9, x12]\n"
"mov v27.16b, v16.16b\n fmla v27.8h, v4.8h, v9.8h\n"
"mov v28.16b, v16.16b\n fmla v28.8h, v3.8h, v9.8h\n"
- "ldr x9, [x14, #0x50]\n"
- "ldr x28, [x14, #0x58]\n"
+ "ldr x28, [x13, #0x50]\n"
+ "ldr x27, [x13, #0x58]\n"
"fmla v25.8h, v2.8h, v11.8h\n"
- "ldr q11, [x26, x13]\n"
+ "ldr q11, [x25, x12]\n"
"mov v29.16b, v16.16b\n fmla v29.8h, v2.8h, v9.8h\n"
- "ldr x27, [x14, #0x60]\n"
+ "ldr x26, [x13, #0x60]\n"
"fmla v23.8h, v5.8h, v13.8h\n"
"fmla v24.8h, v6.8h, v11.8h\n"
- "ldr x26, [x14, #0x70]\n"
- "ldr x10, [x14, #0x88]\n"
+ "ldr x25, [x13, #0x70]\n"
+ "ldr x9, [x13, #0x88]\n"
"mov v31.16b, v16.16b\n fmla v31.8h, v0.8h, v9.8h\n"
"fmla v25.8h, v3.8h, v13.8h\n"
- "ldr x23, [x16, #0x0]\n"
- "add x12, x12, #0x10\n"
+ "ldr x22, [x15, #0x0]\n"
+ "add x11, x11, #0x10\n"
"fmla v26.8h, v2.8h, v13.8h\n"
"fmla v27.8h, v1.8h, v13.8h\n"
- "ldr x22, [x16, #0x8]\n"
- "ldr x21, [x16, #0x10]\n"
+ "ldr x21, [x15, #0x8]\n"
+ "ldr x20, [x15, #0x10]\n"
"fmla v28.8h, v0.8h, v13.8h\n"
- "ldr q13, [x25, x13]\n"
+ "ldr q13, [x24, x12]\n"
"fmla v29.8h, v6.8h, v12.8h\n"
- "ldr q12, [x24, x13]\n"
+ "ldr q12, [x23, x12]\n"
"mov v30.16b, v16.16b\n fmla v30.8h, v1.8h, v9.8h\n"
- "ldr q16, [x15, #0x0]\n"
"fmla v23.8h, v7.8h, v11.8h\n"
- "ldr x24, [x14, #0x68]\n"
+ "ldr x23, [x13, #0x68]\n"
+ "ldr x24, [x13, #0x78]\n"
"fmla v24.8h, v0.8h, v13.8h\n"
"fmla v31.8h, v8.8h, v12.8h\n"
- "ldr q12, [x11, x13]\n"
- "ldr x25, [x14, #0x78]\n"
+ "ldr q12, [x10, x12]\n"
+ "ldr x10, [x13, #0x80]\n"
"fmla v26.8h, v4.8h, v11.8h\n"
"fmla v27.8h, v3.8h, v11.8h\n"
- "ldr x11, [x14, #0x80]\n"
- "ldr x20, [x16, #0x18]\n"
+ "ldr x19, [x15, #0x18]\n"
+ "ldr q16, [x14, #0x0]\n"
"fmla v30.8h, v0.8h, v11.8h\n"
"fmla v28.8h, v4.8h, v10.8h\n"
"fmla v29.8h, v1.8h, v11.8h\n"
- "ldr q11, [x9, x13]\n"
"fmla v23.8h, v1.8h, v13.8h\n"
- "ldr q13, [x28, x13]\n"
+ "ldr q11, [x28, x12]\n"
+ "ldr q13, [x27, x12]\n"
"fmla v24.8h, v2.8h, v12.8h\n"
"fmla v25.8h, v1.8h, v12.8h\n"
- "ldr q12, [x27, x13]\n"
- "ldr x9, [x14, #0x90]\n"
+ "ldr q12, [x26, x12]\n"
+ "ldr x28, [x13, #0x90]\n"
"fmla v27.8h, v5.8h, v10.8h\n"
"fmla v30.8h, v2.8h, v10.8h\n"
- "ldr x27, [x14, #0xa0]\n"
- "ldr x28, [x14, #0x98]\n"
+ "ldr x26, [x13, #0xa0]\n"
+ "ldr x27, [x13, #0x98]\n"
"fmla v26.8h, v0.8h, v11.8h\n"
"fmla v28.8h, v2.8h, v13.8h\n"
"fmla v24.8h, v8.8h, v10.8h\n"
"fmla v25.8h, v7.8h, v10.8h\n"
"fmla v31.8h, v1.8h, v10.8h\n"
- "ldr q10, [x24, x13]\n"
"fmla v29.8h, v3.8h, v12.8h\n"
- "ldr x24, [x14, #0xa8]\n"
+ "ldr q10, [x23, x12]\n"
+ "ldr x23, [x13, #0xa8]\n"
"fmla v26.8h, v6.8h, v12.8h\n"
- "ldr q12, [x11, x13]\n"
"fmla v27.8h, v7.8h, v10.8h\n"
- "ldr x11, [x14, #0xc0]\n"
+ "ldr q12, [x10, x12]\n"
+ "ldr x10, [x13, #0xc0]\n"
"fmla v28.8h, v6.8h, v10.8h\n"
"fmla v30.8h, v4.8h, v10.8h\n"
"fmla v23.8h, v3.8h, v11.8h\n"
- "ldr q11, [x26, x13]\n"
"fmla v25.8h, v5.8h, v13.8h\n"
- "ldr q13, [x25, x13]\n"
+ "ldr q11, [x25, x12]\n"
+ "ldr q13, [x24, x12]\n"
"fmla v29.8h, v5.8h, v10.8h\n"
"fmla v31.8h, v3.8h, v10.8h\n"
- "ldr x26, [x14, #0xb0]\n"
- "ldr x25, [x14, #0xb8]\n"
+ "ldr x25, [x13, #0xb0]\n"
+ "ldr x24, [x13, #0xb8]\n"
"fmla v26.8h, v8.8h, v10.8h\n"
"fmla v28.8h, v8.8h, v11.8h\n"
"fmla v30.8h, v6.8h, v13.8h\n"
"fmla v24.8h, v3.8h, v12.8h\n"
"fmla v27.8h, v0.8h, v12.8h\n"
"fmla v31.8h, v5.8h, v11.8h\n"
- "ldr q11, [x10, x13]\n"
+ "ldr q11, [x9, x12]\n"
"fmla v29.8h, v7.8h, v13.8h\n"
- "ldr q13, [x9, x13]\n"
+ "ldr q13, [x28, x12]\n"
"fmla v23.8h, v4.8h, v12.8h\n"
"fmla v26.8h, v1.8h, v12.8h\n"
- "ldr q12, [x28, x13]\n"
"fmla v24.8h, v5.8h, v11.8h\n"
+ "ldr q12, [x27, x12]\n"
"fmla v25.8h, v4.8h, v11.8h\n"
"fmla v27.8h, v2.8h, v11.8h\n"
"fmla v28.8h, v1.8h, v11.8h\n"
- "ldr q11, [x27, x13]\n"
"fmla v30.8h, v8.8h, v13.8h\n"
- "ldr x27, [x14, #0x20]\n"
+ "ldr q11, [x26, x12]\n"
+ "ldr x26, [x13, #0x20]\n"
"fmla v31.8h, v7.8h, v13.8h\n"
- "ldr q13, [x24, x13]\n"
+ "ldr q13, [x23, x12]\n"
"fmla v23.8h, v2.8h, v11.8h\n"
"fmla v26.8h, v7.8h, v12.8h\n"
"fmla v27.8h, v6.8h, v12.8h\n"
"fmla v29.8h, v4.8h, v12.8h\n"
"fmla v30.8h, v3.8h, v12.8h\n"
- "ldr q12, [x26, x13]\n"
+ "ldr q12, [x25, x12]\n"
"fmla v31.8h, v4.8h, v13.8h\n"
- "ldr q4, [x15, #0x50]\n"
"fmla v24.8h, v1.8h, v11.8h\n"
- "ldr q1, [x15, #0x20]\n"
+ "fmax v24.8h, v24.8h, v18.8h\n"
+ "ldr q1, [x14, #0x20]\n"
"fmla v25.8h, v0.8h, v11.8h\n"
- "ldr q11, [x25, x13]\n"
+ "ldr q11, [x24, x12]\n"
"fmla v23.8h, v6.8h, v12.8h\n"
"fmax v23.8h, v23.8h, v18.8h\n"
"fmla v28.8h, v7.8h, v13.8h\n"
"fmla v30.8h, v5.8h, v13.8h\n"
"fmin v23.8h, v23.8h, v17.8h\n"
- "str q23, [x23, x12]\n"
+ "str q23, [x22, x11]\n"
"fmla v29.8h, v0.8h, v12.8h\n"
- "ldr q0, [x15, #0x10]\n"
"fmla v31.8h, v2.8h, v11.8h\n"
- "ldr q2, [x15, #0x30]\n"
+ "ldr x22, [x15, #0x20]\n"
+ "fmin v24.8h, v24.8h, v17.8h\n"
"fmla v27.8h, v8.8h, v13.8h\n"
- "ldr q13, [x11, x13]\n"
"fmla v26.8h, v3.8h, v12.8h\n"
- "ldr q3, [x15, #0x40]\n"
+ "ldr q13, [x10, x12]\n"
+ "fmax v26.8h, v26.8h, v18.8h\n"
"fmla v25.8h, v8.8h, v11.8h\n"
"fmla v28.8h, v5.8h, v11.8h\n"
- "ldr q5, [x15, #0x60]\n"
- "fmax v24.8h, v24.8h, v18.8h\n"
+ "fmax v25.8h, v25.8h, v18.8h\n"
+ "ldp x10, x9, [x13, #0x0]\n"
"fmla v29.8h, v8.8h, v13.8h\n"
- "ldr q8, [x15, #0x90]\n"
"fmla v30.8h, v7.8h, v13.8h\n"
- "ldr q7, [x15, #0x80]\n"
- "fmla v31.8h, v6.8h, v13.8h\n"
- "ldr q13, [x27, x8]\n"
- "ldr q6, [x15, #0x70]\n"
- "fmax v25.8h, v25.8h, v18.8h\n"
- "fmax v26.8h, v26.8h, v18.8h\n"
"fmax v27.8h, v27.8h, v18.8h\n"
- "ldr x23, [x16, #0x20]\n"
- "ldp x11, x10, [x14, #0x0]\n"
- "ldr q9, [x11, x8]\n"
- "ldr q10, [x10, x8]\n"
- "fmin v24.8h, v24.8h, v17.8h\n"
+ "ldp x28, x27, [x13, #0x10]\n"
+ "fmla v31.8h, v6.8h, v13.8h\n"
"fmin v25.8h, v25.8h, v17.8h\n"
- "ldp x9, x28, [x14, #0x10]\n"
- "ldr q11, [x9, x8]\n"
+ "str q24, [x21, x11]\n"
+ "ldr x21, [x15, #0x28]\n"
"fmin v26.8h, v26.8h, v17.8h\n"
"fmin v27.8h, v27.8h, v17.8h\n"
- "ldr q12, [x28, x8]\n"
+ "str q25, [x20, x11]\n"
+ "ldr x20, [x15, #0x30]\n"
"fmax v28.8h, v28.8h, v18.8h\n"
"fmax v29.8h, v29.8h, v18.8h\n"
- "str q24, [x22, x12]\n"
+ "str q26, [x19, x11]\n"
+ "ldr x19, [x15, #0x38]\n"
"fmax v30.8h, v30.8h, v18.8h\n"
"fmax v31.8h, v31.8h, v18.8h\n"
- "str q25, [x21, x12]\n"
- "ldr x22, [x16, #0x28]\n"
- "str q26, [x20, x12]\n"
- "ldr x21, [x16, #0x30]\n"
- "ldr x20, [x16, #0x38]\n"
- "add x8, x8, #0x10\n"
- "str q27, [x23, x12]\n"
- "ldr x23, [x16, #0x40]\n"
- "cmp x8, x17, LSL #4\n"
+ "str q27, [x22, x11]\n"
+ "ldr x22, [x15, #0x40]\n"
+ "ldr q9, [x10, x17]\n"
+ "ldr q10, [x9, x17]\n"
"fmin v28.8h, v28.8h, v17.8h\n"
"fmin v29.8h, v29.8h, v17.8h\n"
+ "ldr q11, [x28, x17]\n"
+ "ldr q12, [x27, x17]\n"
"fmin v30.8h, v30.8h, v17.8h\n"
- "add x13, x13, #0x10\n"
- "str q28, [x22, x12]\n"
"fmin v31.8h, v31.8h, v17.8h\n"
- "str q29, [x21, x12]\n"
- "add x15, x15, #0xa0\n"
- "str q30, [x20, x12]\n"
- "str q31, [x23, x12]\n"
+ "ldr q13, [x26, x17]\n"
+ "add x17, x17, #0x10\n"
+ "cmp x17, x16, LSL #4\n"
+ "str q28, [x21, x11]\n"
+ "add x12, x12, #0x10\n"
+ "str q29, [x20, x11]\n"
+ "ldr q0, [x14, #0x10]\n"
+ "ldr q2, [x14, #0x30]\n"
+ "str q30, [x19, x11]\n"
+ "ldr q3, [x14, #0x40]\n"
+ "ldr q4, [x14, #0x50]\n"
+ "str q31, [x22, x11]\n"
+ "ldr q5, [x14, #0x60]\n"
+ "ldr q6, [x14, #0x70]\n"
+ "ldr q7, [x14, #0x80]\n"
+ "ldr q8, [x14, #0x90]\n"
+ "add x14, x14, #0xa0\n"
"blt 1b\n"
"2:" // Channel tail
"mov v23.16b, v16.16b\n fmla v23.8h, v8.8h, v9.8h\n"
"mov v24.16b, v16.16b\n fmla v24.8h, v7.8h, v9.8h\n"
- "ldr x26, [x14, #0x30]\n"
- "ldr x25, [x14, #0x38]\n"
+ "ldr x25, [x13, #0x30]\n"
+ "ldr x24, [x13, #0x38]\n"
"mov v25.16b, v16.16b\n fmla v25.8h, v6.8h, v9.8h\n"
"fmla v23.8h, v0.8h, v10.8h\n"
- "ldr x24, [x14, #0x28]\n"
- "ldr x10, [x14, #0x48]\n"
- "ldr q10, [x10, x13]\n"
+ "ldr x23, [x13, #0x28]\n"
+ "ldr x9, [x13, #0x48]\n"
"fmla v24.8h, v4.8h, v13.8h\n"
"mov v26.16b, v16.16b\n fmla v26.8h, v5.8h, v9.8h\n"
- "ldr x11, [x14, #0x40]\n"
+ "ldr x10, [x13, #0x40]\n"
+ "ldr q10, [x9, x12]\n"
"mov v27.16b, v16.16b\n fmla v27.8h, v4.8h, v9.8h\n"
"mov v28.16b, v16.16b\n fmla v28.8h, v3.8h, v9.8h\n"
- "ldr x9, [x14, #0x50]\n"
- "ldr x28, [x14, #0x58]\n"
+ "ldr x28, [x13, #0x50]\n"
+ "ldr x27, [x13, #0x58]\n"
"fmla v25.8h, v2.8h, v11.8h\n"
- "ldr q11, [x26, x13]\n"
+ "ldr q11, [x25, x12]\n"
"mov v29.16b, v16.16b\n fmla v29.8h, v2.8h, v9.8h\n"
- "ldr x27, [x14, #0x60]\n"
+ "ldr x26, [x13, #0x60]\n"
"fmla v23.8h, v5.8h, v13.8h\n"
"fmla v24.8h, v6.8h, v11.8h\n"
- "ldr x26, [x14, #0x70]\n"
- "ldr x10, [x14, #0x88]\n"
+ "ldr x25, [x13, #0x70]\n"
+ "ldr x9, [x13, #0x88]\n"
"mov v31.16b, v16.16b\n fmla v31.8h, v0.8h, v9.8h\n"
"fmla v25.8h, v3.8h, v13.8h\n"
- "ldr x23, [x16, #0x0]\n"
- "add x12, x12, #0x10\n"
+ "ldr x22, [x15, #0x0]\n"
+ "add x11, x11, #0x10\n"
"fmla v26.8h, v2.8h, v13.8h\n"
"fmla v27.8h, v1.8h, v13.8h\n"
- "ldr x22, [x16, #0x8]\n"
- "ldr x21, [x16, #0x10]\n"
+ "ldr x21, [x15, #0x8]\n"
+ "ldr x20, [x15, #0x10]\n"
"fmla v28.8h, v0.8h, v13.8h\n"
- "ldr q13, [x25, x13]\n"
+ "ldr q13, [x24, x12]\n"
"fmla v29.8h, v6.8h, v12.8h\n"
- "ldr q12, [x24, x13]\n"
+ "ldr q12, [x23, x12]\n"
"mov v30.16b, v16.16b\n fmla v30.8h, v1.8h, v9.8h\n"
"fmla v23.8h, v7.8h, v11.8h\n"
- "ldr x24, [x14, #0x68]\n"
- "ldr x25, [x14, #0x78]\n"
+ "ldr x23, [x13, #0x68]\n"
+ "ldr x24, [x13, #0x78]\n"
"fmla v24.8h, v0.8h, v13.8h\n"
"fmla v31.8h, v8.8h, v12.8h\n"
- "ldr q12, [x11, x13]\n"
- "ldr x11, [x14, #0x80]\n"
+ "ldr q12, [x10, x12]\n"
+ "ldr x10, [x13, #0x80]\n"
"fmla v26.8h, v4.8h, v11.8h\n"
"fmla v27.8h, v3.8h, v11.8h\n"
- "ldr x20, [x16, #0x18]\n"
+ "ldr x19, [x15, #0x18]\n"
"fmla v30.8h, v0.8h, v11.8h\n"
"fmla v28.8h, v4.8h, v10.8h\n"
"fmla v29.8h, v1.8h, v11.8h\n"
- "ldr q11, [x9, x13]\n"
"fmla v23.8h, v1.8h, v13.8h\n"
- "ldr q13, [x28, x13]\n"
+ "ldr q11, [x28, x12]\n"
+ "ldr q13, [x27, x12]\n"
"fmla v24.8h, v2.8h, v12.8h\n"
"fmla v25.8h, v1.8h, v12.8h\n"
- "ldr q12, [x27, x13]\n"
- "ldr x9, [x14, #0x90]\n"
+ "ldr q12, [x26, x12]\n"
+ "ldr x28, [x13, #0x90]\n"
"fmla v27.8h, v5.8h, v10.8h\n"
"fmla v30.8h, v2.8h, v10.8h\n"
- "ldr x27, [x14, #0xa0]\n"
- "ldr x28, [x14, #0x98]\n"
+ "ldr x26, [x13, #0xa0]\n"
+ "ldr x27, [x13, #0x98]\n"
"fmla v26.8h, v0.8h, v11.8h\n"
"fmla v28.8h, v2.8h, v13.8h\n"
"fmla v24.8h, v8.8h, v10.8h\n"
"fmla v25.8h, v7.8h, v10.8h\n"
"fmla v31.8h, v1.8h, v10.8h\n"
- "ldr q10, [x24, x13]\n"
"fmla v29.8h, v3.8h, v12.8h\n"
- "ldr x24, [x14, #0xa8]\n"
+ "ldr q10, [x23, x12]\n"
+ "ldr x23, [x13, #0xa8]\n"
"fmla v26.8h, v6.8h, v12.8h\n"
- "ldr q12, [x11, x13]\n"
"fmla v27.8h, v7.8h, v10.8h\n"
- "ldr x11, [x14, #0xc0]\n"
+ "ldr q12, [x10, x12]\n"
+ "ldr x10, [x13, #0xc0]\n"
"fmla v28.8h, v6.8h, v10.8h\n"
"fmla v30.8h, v4.8h, v10.8h\n"
"fmla v23.8h, v3.8h, v11.8h\n"
- "ldr q11, [x26, x13]\n"
"fmla v25.8h, v5.8h, v13.8h\n"
- "ldr q13, [x25, x13]\n"
+ "ldr q11, [x25, x12]\n"
+ "ldr q13, [x24, x12]\n"
"fmla v29.8h, v5.8h, v10.8h\n"
"fmla v31.8h, v3.8h, v10.8h\n"
- "ldr x26, [x14, #0xb0]\n"
- "ldr x25, [x14, #0xb8]\n"
+ "ldr x25, [x13, #0xb0]\n"
+ "ldr x24, [x13, #0xb8]\n"
"fmla v26.8h, v8.8h, v10.8h\n"
"fmla v28.8h, v8.8h, v11.8h\n"
"fmla v30.8h, v6.8h, v13.8h\n"
"fmla v24.8h, v3.8h, v12.8h\n"
"fmla v27.8h, v0.8h, v12.8h\n"
"fmla v31.8h, v5.8h, v11.8h\n"
- "ldr q11, [x10, x13]\n"
+ "ldr q11, [x9, x12]\n"
"fmla v29.8h, v7.8h, v13.8h\n"
- "ldr q13, [x9, x13]\n"
+ "ldr q13, [x28, x12]\n"
"fmla v23.8h, v4.8h, v12.8h\n"
"fmla v26.8h, v1.8h, v12.8h\n"
- "ldr q12, [x28, x13]\n"
"fmla v24.8h, v5.8h, v11.8h\n"
+ "ldr q12, [x27, x12]\n"
"fmla v25.8h, v4.8h, v11.8h\n"
"fmla v27.8h, v2.8h, v11.8h\n"
"fmla v28.8h, v1.8h, v11.8h\n"
- "ldr q11, [x27, x13]\n"
"fmla v30.8h, v8.8h, v13.8h\n"
+ "ldr q11, [x26, x12]\n"
"fmla v31.8h, v7.8h, v13.8h\n"
- "ldr q13, [x24, x13]\n"
+ "ldr q13, [x23, x12]\n"
"fmla v23.8h, v2.8h, v11.8h\n"
"fmla v26.8h, v7.8h, v12.8h\n"
"fmla v27.8h, v6.8h, v12.8h\n"
"fmla v29.8h, v4.8h, v12.8h\n"
"fmla v30.8h, v3.8h, v12.8h\n"
- "ldr q12, [x26, x13]\n"
+ "ldr q12, [x25, x12]\n"
"fmla v31.8h, v4.8h, v13.8h\n"
"fmla v24.8h, v1.8h, v11.8h\n"
"fmax v24.8h, v24.8h, v18.8h\n"
"fmla v25.8h, v0.8h, v11.8h\n"
- "ldr q11, [x25, x13]\n"
+ "ldr q11, [x24, x12]\n"
"fmla v23.8h, v6.8h, v12.8h\n"
"fmax v23.8h, v23.8h, v18.8h\n"
"fmla v28.8h, v7.8h, v13.8h\n"
"fmla v30.8h, v5.8h, v13.8h\n"
"fmin v23.8h, v23.8h, v17.8h\n"
- "str q23, [x23, x12]\n"
+ "str q23, [x22, x11]\n"
"fmla v29.8h, v0.8h, v12.8h\n"
"fmla v31.8h, v2.8h, v11.8h\n"
- "ldr x23, [x16, #0x20]\n"
+ "ldr x22, [x15, #0x20]\n"
"fmin v24.8h, v24.8h, v17.8h\n"
"fmla v27.8h, v8.8h, v13.8h\n"
- "ldr q13, [x11, x13]\n"
"fmla v26.8h, v3.8h, v12.8h\n"
+ "ldr q13, [x10, x12]\n"
"fmax v26.8h, v26.8h, v18.8h\n"
"fmla v25.8h, v8.8h, v11.8h\n"
"fmla v28.8h, v5.8h, v11.8h\n"
"fmax v25.8h, v25.8h, v18.8h\n"
- "str q24, [x22, x12]\n"
+ "str q24, [x21, x11]\n"
"fmla v29.8h, v8.8h, v13.8h\n"
"fmla v30.8h, v7.8h, v13.8h\n"
"fmax v27.8h, v27.8h, v18.8h\n"
- "ldr x22, [x16, #0x28]\n"
+ "ldr x21, [x15, #0x28]\n"
"fmla v31.8h, v6.8h, v13.8h\n"
"fmin v25.8h, v25.8h, v17.8h\n"
- "str q25, [x21, x12]\n"
- "ldr x21, [x16, #0x30]\n"
+ "str q25, [x20, x11]\n"
+ "ldr x20, [x15, #0x30]\n"
"fmin v26.8h, v26.8h, v17.8h\n"
"fmin v27.8h, v27.8h, v17.8h\n"
- "str q26, [x20, x12]\n"
- "ldr x20, [x16, #0x38]\n"
+ "str q26, [x19, x11]\n"
+ "ldr x19, [x15, #0x38]\n"
"fmax v28.8h, v28.8h, v18.8h\n"
"fmax v29.8h, v29.8h, v18.8h\n"
- "str q27, [x23, x12]\n"
- "ldr x23, [x16, #0x40]\n"
+ "str q27, [x22, x11]\n"
+ "ldr x22, [x15, #0x40]\n"
"fmax v30.8h, v30.8h, v18.8h\n"
"fmax v31.8h, v31.8h, v18.8h\n"
- "add x13, x13, #0x10\n"
+ "add x12, x12, #0x10\n"
"fmin v28.8h, v28.8h, v17.8h\n"
"fmin v29.8h, v29.8h, v17.8h\n"
- "str q28, [x22, x12]\n"
+ "str q28, [x21, x11]\n"
"fmin v30.8h, v30.8h, v17.8h\n"
"fmin v31.8h, v31.8h, v17.8h\n"
- "str q29, [x21, x12]\n"
- "str q30, [x20, x12]\n"
- "str q31, [x23, x12]\n"
+ "str q29, [x20, x11]\n"
+ "str q30, [x19, x11]\n"
+ "str q31, [x22, x11]\n"
"3:" // Oddments
"tst %x[n_channels], #0x7\n"
"beq 92f\n"
- "ldr q16, [x15, #0x0]\n"
- "ldr q0, [x15, #0x10]\n"
- "mov x12, x13\n"
- "ldr q1, [x15, #0x20]\n"
- "ldr q2, [x15, #0x30]\n"
- "ldr q3, [x15, #0x40]\n"
- "ldr q4, [x15, #0x50]\n"
- "ldr q5, [x15, #0x60]\n"
- "ldr q6, [x15, #0x70]\n"
- "ldr q7, [x15, #0x80]\n"
- "ldr q8, [x15, #0x90]\n"
- "ldr x24, [x14, #0x0]\n"
- "ldr x23, [x14, #0x8]\n"
- "add x24, x24, x13\n"
- "add x23, x23, x13\n"
- "ldr x22, [x14, #0x10]\n"
- "ldr x21, [x14, #0x18]\n"
- "add x22, x22, x13\n"
- "add x21, x21, x13\n"
- "ldr x20, [x14, #0x20]\n"
- "add x20, x20, x13\n"
+ "ldr x10, [x13, #0x0]\n"
+ "ldr x9, [x13, #0x8]\n"
+ "ldr x28, [x13, #0x10]\n"
+ "ldr x27, [x13, #0x18]\n"
+ "mov x11, x12\n"
+ "add x10, x10, x12\n"
+ "ldr x26, [x13, #0x20]\n"
+ "ldr q16, [x14, #0x0]\n"
+ "add x9, x9, x12\n"
+ "add x28, x28, x12\n"
+ "ldr q0, [x14, #0x10]\n"
+ "ldr q1, [x14, #0x20]\n"
+ "add x27, x27, x12\n"
+ "add x26, x26, x12\n"
+ "ldr q2, [x14, #0x30]\n"
+ "ldr q3, [x14, #0x40]\n"
+ "ldr q4, [x14, #0x50]\n"
+ "ldr q5, [x14, #0x60]\n"
+ "ldr q6, [x14, #0x70]\n"
+ "ldr q7, [x14, #0x80]\n"
+ "ldr q8, [x14, #0x90]\n"
"tbz %x[n_channels], #2, 5f\n"
- "ld1 { v9.d }[0], [x24], #0x8\n"
- "ld1 { v10.d }[0], [x23], #0x8\n"
- "ld1 { v11.d }[0], [x22], #0x8\n"
- "ld1 { v12.d }[0], [x21], #0x8\n"
- "ld1 { v13.d }[0], [x20], #0x8\n"
+ "ld1 { v9.d }[0], [x10], #0x8\n"
+ "ld1 { v10.d }[0], [x9], #0x8\n"
+ "ld1 { v11.d }[0], [x28], #0x8\n"
+ "ld1 { v12.d }[0], [x27], #0x8\n"
+ "ld1 { v13.d }[0], [x26], #0x8\n"
"tbz %x[n_channels], #1, 4f\n"
- "ld1 { v9.s }[2], [x24], #0x4\n"
- "ld1 { v10.s }[2], [x23], #0x4\n"
- "ld1 { v11.s }[2], [x22], #0x4\n"
- "ld1 { v12.s }[2], [x21], #0x4\n"
- "ld1 { v13.s }[2], [x20], #0x4\n"
+ "ld1 { v9.s }[2], [x10], #0x4\n"
+ "ld1 { v10.s }[2], [x9], #0x4\n"
+ "ld1 { v11.s }[2], [x28], #0x4\n"
+ "ld1 { v12.s }[2], [x27], #0x4\n"
+ "ld1 { v13.s }[2], [x26], #0x4\n"
"tbz %x[n_channels], #0, 7f\n"
- "ld1 { v9.h }[6], [x24], #0x2\n"
- "ld1 { v10.h }[6], [x23], #0x2\n"
- "ld1 { v11.h }[6], [x22], #0x2\n"
- "ld1 { v12.h }[6], [x21], #0x2\n"
- "ld1 { v13.h }[6], [x20], #0x2\n"
+ "ld1 { v9.h }[6], [x10], #0x2\n"
+ "ld1 { v10.h }[6], [x9], #0x2\n"
+ "ld1 { v11.h }[6], [x28], #0x2\n"
+ "ld1 { v12.h }[6], [x27], #0x2\n"
+ "ld1 { v13.h }[6], [x26], #0x2\n"
"b 7f\n"
"4:" // Oddments: Load inputs (2, 2), (0, 0), (0, 4), (4, 0), (1, 2): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 7f\n"
- "ld1 { v9.h }[4], [x24], #0x2\n"
- "ld1 { v10.h }[4], [x23], #0x2\n"
- "ld1 { v11.h }[4], [x22], #0x2\n"
- "ld1 { v12.h }[4], [x21], #0x2\n"
- "ld1 { v13.h }[4], [x20], #0x2\n"
+ "ld1 { v9.h }[4], [x10], #0x2\n"
+ "ld1 { v10.h }[4], [x9], #0x2\n"
+ "ld1 { v11.h }[4], [x28], #0x2\n"
+ "ld1 { v12.h }[4], [x27], #0x2\n"
+ "ld1 { v13.h }[4], [x26], #0x2\n"
"b 7f\n"
"5:" // Oddments: Load inputs (2, 2), (0, 0), (0, 4), (4, 0), (1, 2): Bit 2: Unset
"tbz %x[n_channels], #1, 6f\n"
- "ld1 { v9.s }[0], [x24], #0x4\n"
- "ld1 { v10.s }[0], [x23], #0x4\n"
- "ld1 { v11.s }[0], [x22], #0x4\n"
- "ld1 { v12.s }[0], [x21], #0x4\n"
- "ld1 { v13.s }[0], [x20], #0x4\n"
+ "ld1 { v9.s }[0], [x10], #0x4\n"
+ "ld1 { v10.s }[0], [x9], #0x4\n"
+ "ld1 { v11.s }[0], [x28], #0x4\n"
+ "ld1 { v12.s }[0], [x27], #0x4\n"
+ "ld1 { v13.s }[0], [x26], #0x4\n"
"tbz %x[n_channels], #0, 7f\n"
- "ld1 { v9.h }[2], [x24], #0x2\n"
- "ld1 { v10.h }[2], [x23], #0x2\n"
- "ld1 { v11.h }[2], [x22], #0x2\n"
- "ld1 { v12.h }[2], [x21], #0x2\n"
- "ld1 { v13.h }[2], [x20], #0x2\n"
+ "ld1 { v9.h }[2], [x10], #0x2\n"
+ "ld1 { v10.h }[2], [x9], #0x2\n"
+ "ld1 { v11.h }[2], [x28], #0x2\n"
+ "ld1 { v12.h }[2], [x27], #0x2\n"
+ "ld1 { v13.h }[2], [x26], #0x2\n"
"b 7f\n"
"6:" // Oddments: Load inputs (2, 2), (0, 0), (0, 4), (4, 0), (1, 2): Bit 2: Unset: Bit 1: Unset
- "ld1 { v9.h }[0], [x24], #0x2\n"
- "ld1 { v10.h }[0], [x23], #0x2\n"
- "ld1 { v11.h }[0], [x22], #0x2\n"
- "ld1 { v12.h }[0], [x21], #0x2\n"
- "ld1 { v13.h }[0], [x20], #0x2\n"
+ "ld1 { v9.h }[0], [x10], #0x2\n"
+ "ld1 { v10.h }[0], [x9], #0x2\n"
+ "ld1 { v11.h }[0], [x28], #0x2\n"
+ "ld1 { v12.h }[0], [x27], #0x2\n"
+ "ld1 { v13.h }[0], [x26], #0x2\n"
"7:" // Oddments: Load inputs (2, 2), (0, 0), (0, 4), (4, 0), (1, 2): Bit 2: End
"mov v23.16b, v16.16b\n fmla v23.8h, v8.8h, v9.8h\n"
"mov v25.16b, v16.16b\n fmla v25.8h, v6.8h, v9.8h\n"
- "ldr x20, [x14, #0x28]\n"
- "add x20, x20, x13\n"
+ "ldr x23, [x13, #0x28]\n"
+ "add x23, x23, x12\n"
"mov v24.16b, v16.16b\n fmla v24.8h, v7.8h, v9.8h\n"
"mov v26.16b, v16.16b\n fmla v26.8h, v5.8h, v9.8h\n"
"mov v27.16b, v16.16b\n fmla v27.8h, v4.8h, v9.8h\n"
@@ -555,502 +555,502 @@ void a64_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst_indirect_impl(
"fmla v27.8h, v1.8h, v13.8h\n"
"fmla v28.8h, v0.8h, v13.8h\n"
"tbz %x[n_channels], #2, 9f\n"
- "ld1 { v12.d }[0], [x20], #0x8\n"
+ "ld1 { v12.d }[0], [x23], #0x8\n"
"tbz %x[n_channels], #1, 8f\n"
- "ld1 { v12.s }[2], [x20], #0x4\n"
+ "ld1 { v12.s }[2], [x23], #0x4\n"
"tbz %x[n_channels], #0, 11f\n"
- "ld1 { v12.h }[6], [x20], #0x2\n"
+ "ld1 { v12.h }[6], [x23], #0x2\n"
"b 11f\n"
"8:" // Oddments: Load input (4, 4): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 11f\n"
- "ld1 { v12.h }[4], [x20], #0x2\n"
+ "ld1 { v12.h }[4], [x23], #0x2\n"
"b 11f\n"
"9:" // Oddments: Load input (4, 4): Bit 2: Unset
"tbz %x[n_channels], #1, 10f\n"
- "ld1 { v12.s }[0], [x20], #0x4\n"
+ "ld1 { v12.s }[0], [x23], #0x4\n"
"tbz %x[n_channels], #0, 11f\n"
- "ld1 { v12.h }[2], [x20], #0x2\n"
+ "ld1 { v12.h }[2], [x23], #0x2\n"
"b 11f\n"
"10:" // Oddments: Load input (4, 4): Bit 2: Unset: Bit 1: Unset
- "ld1 { v12.h }[0], [x20], #0x2\n"
+ "ld1 { v12.h }[0], [x23], #0x2\n"
"11:" // Oddments: Load input (4, 4): Bit 2: End
- "ldr x20, [x14, #0x30]\n"
+ "ldr x25, [x13, #0x30]\n"
"fmla v31.8h, v8.8h, v12.8h\n"
- "add x20, x20, x13\n"
+ "add x25, x25, x12\n"
"tbz %x[n_channels], #2, 13f\n"
- "ld1 { v11.d }[0], [x20], #0x8\n"
+ "ld1 { v11.d }[0], [x25], #0x8\n"
"tbz %x[n_channels], #1, 12f\n"
- "ld1 { v11.s }[2], [x20], #0x4\n"
+ "ld1 { v11.s }[2], [x25], #0x4\n"
"tbz %x[n_channels], #0, 15f\n"
- "ld1 { v11.h }[6], [x20], #0x2\n"
+ "ld1 { v11.h }[6], [x25], #0x2\n"
"b 15f\n"
"12:" // Oddments: Load input (2, 1): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 15f\n"
- "ld1 { v11.h }[4], [x20], #0x2\n"
+ "ld1 { v11.h }[4], [x25], #0x2\n"
"b 15f\n"
"13:" // Oddments: Load input (2, 1): Bit 2: Unset
"tbz %x[n_channels], #1, 14f\n"
- "ld1 { v11.s }[0], [x20], #0x4\n"
+ "ld1 { v11.s }[0], [x25], #0x4\n"
"tbz %x[n_channels], #0, 15f\n"
- "ld1 { v11.h }[2], [x20], #0x2\n"
+ "ld1 { v11.h }[2], [x25], #0x2\n"
"b 15f\n"
"14:" // Oddments: Load input (2, 1): Bit 2: Unset: Bit 1: Unset
- "ld1 { v11.h }[0], [x20], #0x2\n"
+ "ld1 { v11.h }[0], [x25], #0x2\n"
"15:" // Oddments: Load input (2, 1): Bit 2: End
- "ldr x20, [x14, #0x38]\n"
+ "ldr x24, [x13, #0x38]\n"
"fmla v23.8h, v7.8h, v11.8h\n"
"fmla v24.8h, v6.8h, v11.8h\n"
- "add x20, x20, x13\n"
+ "add x24, x24, x12\n"
"fmla v26.8h, v4.8h, v11.8h\n"
"fmla v27.8h, v3.8h, v11.8h\n"
"fmla v29.8h, v1.8h, v11.8h\n"
"fmla v30.8h, v0.8h, v11.8h\n"
"tbz %x[n_channels], #2, 17f\n"
- "ld1 { v13.d }[0], [x20], #0x8\n"
+ "ld1 { v13.d }[0], [x24], #0x8\n"
"tbz %x[n_channels], #1, 16f\n"
- "ld1 { v13.s }[2], [x20], #0x4\n"
+ "ld1 { v13.s }[2], [x24], #0x4\n"
"tbz %x[n_channels], #0, 19f\n"
- "ld1 { v13.h }[6], [x20], #0x2\n"
+ "ld1 { v13.h }[6], [x24], #0x2\n"
"b 19f\n"
"16:" // Oddments: Load input (0, 1): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 19f\n"
- "ld1 { v13.h }[4], [x20], #0x2\n"
+ "ld1 { v13.h }[4], [x24], #0x2\n"
"b 19f\n"
"17:" // Oddments: Load input (0, 1): Bit 2: Unset
"tbz %x[n_channels], #1, 18f\n"
- "ld1 { v13.s }[0], [x20], #0x4\n"
+ "ld1 { v13.s }[0], [x24], #0x4\n"
"tbz %x[n_channels], #0, 19f\n"
- "ld1 { v13.h }[2], [x20], #0x2\n"
+ "ld1 { v13.h }[2], [x24], #0x2\n"
"b 19f\n"
"18:" // Oddments: Load input (0, 1): Bit 2: Unset: Bit 1: Unset
- "ld1 { v13.h }[0], [x20], #0x2\n"
+ "ld1 { v13.h }[0], [x24], #0x2\n"
"19:" // Oddments: Load input (0, 1): Bit 2: End
- "ldr x20, [x14, #0x40]\n"
+ "ldr x10, [x13, #0x40]\n"
"fmla v23.8h, v1.8h, v13.8h\n"
"fmla v24.8h, v0.8h, v13.8h\n"
- "add x20, x20, x13\n"
+ "add x10, x10, x12\n"
"tbz %x[n_channels], #2, 21f\n"
- "ld1 { v12.d }[0], [x20], #0x8\n"
+ "ld1 { v12.d }[0], [x10], #0x8\n"
"tbz %x[n_channels], #1, 20f\n"
- "ld1 { v12.s }[2], [x20], #0x4\n"
+ "ld1 { v12.s }[2], [x10], #0x4\n"
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v12.h }[6], [x20], #0x2\n"
+ "ld1 { v12.h }[6], [x10], #0x2\n"
"b 23f\n"
"20:" // Oddments: Load input (0, 3): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v12.h }[4], [x20], #0x2\n"
+ "ld1 { v12.h }[4], [x10], #0x2\n"
"b 23f\n"
"21:" // Oddments: Load input (0, 3): Bit 2: Unset
"tbz %x[n_channels], #1, 22f\n"
- "ld1 { v12.s }[0], [x20], #0x4\n"
+ "ld1 { v12.s }[0], [x10], #0x4\n"
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v12.h }[2], [x20], #0x2\n"
+ "ld1 { v12.h }[2], [x10], #0x2\n"
"b 23f\n"
"22:" // Oddments: Load input (0, 3): Bit 2: Unset: Bit 1: Unset
- "ld1 { v12.h }[0], [x20], #0x2\n"
+ "ld1 { v12.h }[0], [x10], #0x2\n"
"23:" // Oddments: Load input (0, 3): Bit 2: End
- "ldr x20, [x14, #0x48]\n"
+ "ldr x9, [x13, #0x48]\n"
"fmla v24.8h, v2.8h, v12.8h\n"
"fmla v25.8h, v1.8h, v12.8h\n"
- "add x20, x20, x13\n"
+ "add x9, x9, x12\n"
"tbz %x[n_channels], #2, 25f\n"
- "ld1 { v10.d }[0], [x20], #0x8\n"
+ "ld1 { v10.d }[0], [x9], #0x8\n"
"tbz %x[n_channels], #1, 24f\n"
- "ld1 { v10.s }[2], [x20], #0x4\n"
+ "ld1 { v10.s }[2], [x9], #0x4\n"
"tbz %x[n_channels], #0, 27f\n"
- "ld1 { v10.h }[6], [x20], #0x2\n"
+ "ld1 { v10.h }[6], [x9], #0x2\n"
"b 27f\n"
"24:" // Oddments: Load input (2, 3): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 27f\n"
- "ld1 { v10.h }[4], [x20], #0x2\n"
+ "ld1 { v10.h }[4], [x9], #0x2\n"
"b 27f\n"
"25:" // Oddments: Load input (2, 3): Bit 2: Unset
"tbz %x[n_channels], #1, 26f\n"
- "ld1 { v10.s }[0], [x20], #0x4\n"
+ "ld1 { v10.s }[0], [x9], #0x4\n"
"tbz %x[n_channels], #0, 27f\n"
- "ld1 { v10.h }[2], [x20], #0x2\n"
+ "ld1 { v10.h }[2], [x9], #0x2\n"
"b 27f\n"
"26:" // Oddments: Load input (2, 3): Bit 2: Unset: Bit 1: Unset
- "ld1 { v10.h }[0], [x20], #0x2\n"
+ "ld1 { v10.h }[0], [x9], #0x2\n"
"27:" // Oddments: Load input (2, 3): Bit 2: End
- "ldr x20, [x14, #0x50]\n"
+ "ldr x28, [x13, #0x50]\n"
"fmla v24.8h, v8.8h, v10.8h\n"
"fmla v25.8h, v7.8h, v10.8h\n"
- "add x20, x20, x13\n"
+ "add x28, x28, x12\n"
"fmla v27.8h, v5.8h, v10.8h\n"
"fmla v28.8h, v4.8h, v10.8h\n"
"fmla v30.8h, v2.8h, v10.8h\n"
"fmla v31.8h, v1.8h, v10.8h\n"
"tbz %x[n_channels], #2, 29f\n"
- "ld1 { v11.d }[0], [x20], #0x8\n"
+ "ld1 { v11.d }[0], [x28], #0x8\n"
"tbz %x[n_channels], #1, 28f\n"
- "ld1 { v11.s }[2], [x20], #0x4\n"
+ "ld1 { v11.s }[2], [x28], #0x4\n"
"tbz %x[n_channels], #0, 31f\n"
- "ld1 { v11.h }[6], [x20], #0x2\n"
+ "ld1 { v11.h }[6], [x28], #0x2\n"
"b 31f\n"
"28:" // Oddments: Load input (1, 0): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 31f\n"
- "ld1 { v11.h }[4], [x20], #0x2\n"
+ "ld1 { v11.h }[4], [x28], #0x2\n"
"b 31f\n"
"29:" // Oddments: Load input (1, 0): Bit 2: Unset
"tbz %x[n_channels], #1, 30f\n"
- "ld1 { v11.s }[0], [x20], #0x4\n"
+ "ld1 { v11.s }[0], [x28], #0x4\n"
"tbz %x[n_channels], #0, 31f\n"
- "ld1 { v11.h }[2], [x20], #0x2\n"
+ "ld1 { v11.h }[2], [x28], #0x2\n"
"b 31f\n"
"30:" // Oddments: Load input (1, 0): Bit 2: Unset: Bit 1: Unset
- "ld1 { v11.h }[0], [x20], #0x2\n"
+ "ld1 { v11.h }[0], [x28], #0x2\n"
"31:" // Oddments: Load input (1, 0): Bit 2: End
- "ldr x20, [x14, #0x58]\n"
+ "ldr x27, [x13, #0x58]\n"
"fmla v23.8h, v3.8h, v11.8h\n"
"fmla v26.8h, v0.8h, v11.8h\n"
- "add x20, x20, x13\n"
+ "add x27, x27, x12\n"
"tbz %x[n_channels], #2, 33f\n"
- "ld1 { v13.d }[0], [x20], #0x8\n"
+ "ld1 { v13.d }[0], [x27], #0x8\n"
"tbz %x[n_channels], #1, 32f\n"
- "ld1 { v13.s }[2], [x20], #0x4\n"
+ "ld1 { v13.s }[2], [x27], #0x4\n"
"tbz %x[n_channels], #0, 35f\n"
- "ld1 { v13.h }[6], [x20], #0x2\n"
+ "ld1 { v13.h }[6], [x27], #0x2\n"
"b 35f\n"
"32:" // Oddments: Load input (1, 4): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 35f\n"
- "ld1 { v13.h }[4], [x20], #0x2\n"
+ "ld1 { v13.h }[4], [x27], #0x2\n"
"b 35f\n"
"33:" // Oddments: Load input (1, 4): Bit 2: Unset
"tbz %x[n_channels], #1, 34f\n"
- "ld1 { v13.s }[0], [x20], #0x4\n"
+ "ld1 { v13.s }[0], [x27], #0x4\n"
"tbz %x[n_channels], #0, 35f\n"
- "ld1 { v13.h }[2], [x20], #0x2\n"
+ "ld1 { v13.h }[2], [x27], #0x2\n"
"b 35f\n"
"34:" // Oddments: Load input (1, 4): Bit 2: Unset: Bit 1: Unset
- "ld1 { v13.h }[0], [x20], #0x2\n"
+ "ld1 { v13.h }[0], [x27], #0x2\n"
"35:" // Oddments: Load input (1, 4): Bit 2: End
- "ldr x20, [x14, #0x60]\n"
+ "ldr x26, [x13, #0x60]\n"
"fmla v25.8h, v5.8h, v13.8h\n"
"fmla v28.8h, v2.8h, v13.8h\n"
- "add x20, x20, x13\n"
+ "add x26, x26, x12\n"
"tbz %x[n_channels], #2, 37f\n"
- "ld1 { v12.d }[0], [x20], #0x8\n"
+ "ld1 { v12.d }[0], [x26], #0x8\n"
"tbz %x[n_channels], #1, 36f\n"
- "ld1 { v12.s }[2], [x20], #0x4\n"
+ "ld1 { v12.s }[2], [x26], #0x4\n"
"tbz %x[n_channels], #0, 39f\n"
- "ld1 { v12.h }[6], [x20], #0x2\n"
+ "ld1 { v12.h }[6], [x26], #0x2\n"
"b 39f\n"
"36:" // Oddments: Load input (3, 0): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 39f\n"
- "ld1 { v12.h }[4], [x20], #0x2\n"
+ "ld1 { v12.h }[4], [x26], #0x2\n"
"b 39f\n"
"37:" // Oddments: Load input (3, 0): Bit 2: Unset
"tbz %x[n_channels], #1, 38f\n"
- "ld1 { v12.s }[0], [x20], #0x4\n"
+ "ld1 { v12.s }[0], [x26], #0x4\n"
"tbz %x[n_channels], #0, 39f\n"
- "ld1 { v12.h }[2], [x20], #0x2\n"
+ "ld1 { v12.h }[2], [x26], #0x2\n"
"b 39f\n"
"38:" // Oddments: Load input (3, 0): Bit 2: Unset: Bit 1: Unset
- "ld1 { v12.h }[0], [x20], #0x2\n"
+ "ld1 { v12.h }[0], [x26], #0x2\n"
"39:" // Oddments: Load input (3, 0): Bit 2: End
- "ldr x20, [x14, #0x68]\n"
+ "ldr x23, [x13, #0x68]\n"
"fmla v26.8h, v6.8h, v12.8h\n"
"fmla v29.8h, v3.8h, v12.8h\n"
- "add x20, x20, x13\n"
+ "add x23, x23, x12\n"
"tbz %x[n_channels], #2, 41f\n"
- "ld1 { v10.d }[0], [x20], #0x8\n"
+ "ld1 { v10.d }[0], [x23], #0x8\n"
"tbz %x[n_channels], #1, 40f\n"
- "ld1 { v10.s }[2], [x20], #0x4\n"
+ "ld1 { v10.s }[2], [x23], #0x4\n"
"tbz %x[n_channels], #0, 43f\n"
- "ld1 { v10.h }[6], [x20], #0x2\n"
+ "ld1 { v10.h }[6], [x23], #0x2\n"
"b 43f\n"
"40:" // Oddments: Load input (3, 2): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 43f\n"
- "ld1 { v10.h }[4], [x20], #0x2\n"
+ "ld1 { v10.h }[4], [x23], #0x2\n"
"b 43f\n"
"41:" // Oddments: Load input (3, 2): Bit 2: Unset
"tbz %x[n_channels], #1, 42f\n"
- "ld1 { v10.s }[0], [x20], #0x4\n"
+ "ld1 { v10.s }[0], [x23], #0x4\n"
"tbz %x[n_channels], #0, 43f\n"
- "ld1 { v10.h }[2], [x20], #0x2\n"
+ "ld1 { v10.h }[2], [x23], #0x2\n"
"b 43f\n"
"42:" // Oddments: Load input (3, 2): Bit 2: Unset: Bit 1: Unset
- "ld1 { v10.h }[0], [x20], #0x2\n"
+ "ld1 { v10.h }[0], [x23], #0x2\n"
"43:" // Oddments: Load input (3, 2): Bit 2: End
- "ldr x20, [x14, #0x70]\n"
+ "ldr x25, [x13, #0x70]\n"
"fmla v26.8h, v8.8h, v10.8h\n"
"fmla v27.8h, v7.8h, v10.8h\n"
- "add x20, x20, x13\n"
+ "add x25, x25, x12\n"
"fmla v28.8h, v6.8h, v10.8h\n"
"fmla v29.8h, v5.8h, v10.8h\n"
"fmla v30.8h, v4.8h, v10.8h\n"
"fmla v31.8h, v3.8h, v10.8h\n"
"tbz %x[n_channels], #2, 45f\n"
- "ld1 { v11.d }[0], [x20], #0x8\n"
+ "ld1 { v11.d }[0], [x25], #0x8\n"
"tbz %x[n_channels], #1, 44f\n"
- "ld1 { v11.s }[2], [x20], #0x4\n"
+ "ld1 { v11.s }[2], [x25], #0x4\n"
"tbz %x[n_channels], #0, 47f\n"
- "ld1 { v11.h }[6], [x20], #0x2\n"
+ "ld1 { v11.h }[6], [x25], #0x2\n"
"b 47f\n"
"44:" // Oddments: Load input (3, 4): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 47f\n"
- "ld1 { v11.h }[4], [x20], #0x2\n"
+ "ld1 { v11.h }[4], [x25], #0x2\n"
"b 47f\n"
"45:" // Oddments: Load input (3, 4): Bit 2: Unset
"tbz %x[n_channels], #1, 46f\n"
- "ld1 { v11.s }[0], [x20], #0x4\n"
+ "ld1 { v11.s }[0], [x25], #0x4\n"
"tbz %x[n_channels], #0, 47f\n"
- "ld1 { v11.h }[2], [x20], #0x2\n"
+ "ld1 { v11.h }[2], [x25], #0x2\n"
"b 47f\n"
"46:" // Oddments: Load input (3, 4): Bit 2: Unset: Bit 1: Unset
- "ld1 { v11.h }[0], [x20], #0x2\n"
+ "ld1 { v11.h }[0], [x25], #0x2\n"
"47:" // Oddments: Load input (3, 4): Bit 2: End
- "ldr x20, [x14, #0x78]\n"
+ "ldr x24, [x13, #0x78]\n"
"fmla v28.8h, v8.8h, v11.8h\n"
"fmla v31.8h, v5.8h, v11.8h\n"
- "add x20, x20, x13\n"
+ "add x24, x24, x12\n"
"tbz %x[n_channels], #2, 49f\n"
- "ld1 { v13.d }[0], [x20], #0x8\n"
+ "ld1 { v13.d }[0], [x24], #0x8\n"
"tbz %x[n_channels], #1, 48f\n"
- "ld1 { v13.s }[2], [x20], #0x4\n"
+ "ld1 { v13.s }[2], [x24], #0x4\n"
"tbz %x[n_channels], #0, 51f\n"
- "ld1 { v13.h }[6], [x20], #0x2\n"
+ "ld1 { v13.h }[6], [x24], #0x2\n"
"b 51f\n"
"48:" // Oddments: Load input (4, 1): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 51f\n"
- "ld1 { v13.h }[4], [x20], #0x2\n"
+ "ld1 { v13.h }[4], [x24], #0x2\n"
"b 51f\n"
"49:" // Oddments: Load input (4, 1): Bit 2: Unset
"tbz %x[n_channels], #1, 50f\n"
- "ld1 { v13.s }[0], [x20], #0x4\n"
+ "ld1 { v13.s }[0], [x24], #0x4\n"
"tbz %x[n_channels], #0, 51f\n"
- "ld1 { v13.h }[2], [x20], #0x2\n"
+ "ld1 { v13.h }[2], [x24], #0x2\n"
"b 51f\n"
"50:" // Oddments: Load input (4, 1): Bit 2: Unset: Bit 1: Unset
- "ld1 { v13.h }[0], [x20], #0x2\n"
+ "ld1 { v13.h }[0], [x24], #0x2\n"
"51:" // Oddments: Load input (4, 1): Bit 2: End
- "ldr x20, [x14, #0x80]\n"
+ "ldr x10, [x13, #0x80]\n"
"fmla v29.8h, v7.8h, v13.8h\n"
"fmla v30.8h, v6.8h, v13.8h\n"
- "add x20, x20, x13\n"
+ "add x10, x10, x12\n"
"tbz %x[n_channels], #2, 53f\n"
- "ld1 { v12.d }[0], [x20], #0x8\n"
+ "ld1 { v12.d }[0], [x10], #0x8\n"
"tbz %x[n_channels], #1, 52f\n"
- "ld1 { v12.s }[2], [x20], #0x4\n"
+ "ld1 { v12.s }[2], [x10], #0x4\n"
"tbz %x[n_channels], #0, 55f\n"
- "ld1 { v12.h }[6], [x20], #0x2\n"
+ "ld1 { v12.h }[6], [x10], #0x2\n"
"b 55f\n"
"52:" // Oddments: Load input (1, 1): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 55f\n"
- "ld1 { v12.h }[4], [x20], #0x2\n"
+ "ld1 { v12.h }[4], [x10], #0x2\n"
"b 55f\n"
"53:" // Oddments: Load input (1, 1): Bit 2: Unset
"tbz %x[n_channels], #1, 54f\n"
- "ld1 { v12.s }[0], [x20], #0x4\n"
+ "ld1 { v12.s }[0], [x10], #0x4\n"
"tbz %x[n_channels], #0, 55f\n"
- "ld1 { v12.h }[2], [x20], #0x2\n"
+ "ld1 { v12.h }[2], [x10], #0x2\n"
"b 55f\n"
"54:" // Oddments: Load input (1, 1): Bit 2: Unset: Bit 1: Unset
- "ld1 { v12.h }[0], [x20], #0x2\n"
+ "ld1 { v12.h }[0], [x10], #0x2\n"
"55:" // Oddments: Load input (1, 1): Bit 2: End
- "ldr x20, [x14, #0x88]\n"
+ "ldr x9, [x13, #0x88]\n"
"fmla v23.8h, v4.8h, v12.8h\n"
"fmla v24.8h, v3.8h, v12.8h\n"
- "add x20, x20, x13\n"
+ "add x9, x9, x12\n"
"fmla v26.8h, v1.8h, v12.8h\n"
"fmla v27.8h, v0.8h, v12.8h\n"
"tbz %x[n_channels], #2, 57f\n"
- "ld1 { v11.d }[0], [x20], #0x8\n"
+ "ld1 { v11.d }[0], [x9], #0x8\n"
"tbz %x[n_channels], #1, 56f\n"
- "ld1 { v11.s }[2], [x20], #0x4\n"
+ "ld1 { v11.s }[2], [x9], #0x4\n"
"tbz %x[n_channels], #0, 59f\n"
- "ld1 { v11.h }[6], [x20], #0x2\n"
+ "ld1 { v11.h }[6], [x9], #0x2\n"
"b 59f\n"
"56:" // Oddments: Load input (1, 3): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 59f\n"
- "ld1 { v11.h }[4], [x20], #0x2\n"
+ "ld1 { v11.h }[4], [x9], #0x2\n"
"b 59f\n"
"57:" // Oddments: Load input (1, 3): Bit 2: Unset
"tbz %x[n_channels], #1, 58f\n"
- "ld1 { v11.s }[0], [x20], #0x4\n"
+ "ld1 { v11.s }[0], [x9], #0x4\n"
"tbz %x[n_channels], #0, 59f\n"
- "ld1 { v11.h }[2], [x20], #0x2\n"
+ "ld1 { v11.h }[2], [x9], #0x2\n"
"b 59f\n"
"58:" // Oddments: Load input (1, 3): Bit 2: Unset: Bit 1: Unset
- "ld1 { v11.h }[0], [x20], #0x2\n"
+ "ld1 { v11.h }[0], [x9], #0x2\n"
"59:" // Oddments: Load input (1, 3): Bit 2: End
- "ldr x20, [x14, #0x90]\n"
+ "ldr x28, [x13, #0x90]\n"
"fmla v24.8h, v5.8h, v11.8h\n"
"fmla v25.8h, v4.8h, v11.8h\n"
- "add x20, x20, x13\n"
+ "add x28, x28, x12\n"
"fmla v27.8h, v2.8h, v11.8h\n"
"fmla v28.8h, v1.8h, v11.8h\n"
"tbz %x[n_channels], #2, 61f\n"
- "ld1 { v13.d }[0], [x20], #0x8\n"
+ "ld1 { v13.d }[0], [x28], #0x8\n"
"tbz %x[n_channels], #1, 60f\n"
- "ld1 { v13.s }[2], [x20], #0x4\n"
+ "ld1 { v13.s }[2], [x28], #0x4\n"
"tbz %x[n_channels], #0, 63f\n"
- "ld1 { v13.h }[6], [x20], #0x2\n"
+ "ld1 { v13.h }[6], [x28], #0x2\n"
"b 63f\n"
"60:" // Oddments: Load input (4, 3): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 63f\n"
- "ld1 { v13.h }[4], [x20], #0x2\n"
+ "ld1 { v13.h }[4], [x28], #0x2\n"
"b 63f\n"
"61:" // Oddments: Load input (4, 3): Bit 2: Unset
"tbz %x[n_channels], #1, 62f\n"
- "ld1 { v13.s }[0], [x20], #0x4\n"
+ "ld1 { v13.s }[0], [x28], #0x4\n"
"tbz %x[n_channels], #0, 63f\n"
- "ld1 { v13.h }[2], [x20], #0x2\n"
+ "ld1 { v13.h }[2], [x28], #0x2\n"
"b 63f\n"
"62:" // Oddments: Load input (4, 3): Bit 2: Unset: Bit 1: Unset
- "ld1 { v13.h }[0], [x20], #0x2\n"
+ "ld1 { v13.h }[0], [x28], #0x2\n"
"63:" // Oddments: Load input (4, 3): Bit 2: End
- "ldr x20, [x14, #0x98]\n"
+ "ldr x27, [x13, #0x98]\n"
"fmla v30.8h, v8.8h, v13.8h\n"
"fmla v31.8h, v7.8h, v13.8h\n"
- "add x20, x20, x13\n"
+ "add x27, x27, x12\n"
"tbz %x[n_channels], #2, 65f\n"
- "ld1 { v12.d }[0], [x20], #0x8\n"
+ "ld1 { v12.d }[0], [x27], #0x8\n"
"tbz %x[n_channels], #1, 64f\n"
- "ld1 { v12.s }[2], [x20], #0x4\n"
+ "ld1 { v12.s }[2], [x27], #0x4\n"
"tbz %x[n_channels], #0, 67f\n"
- "ld1 { v12.h }[6], [x20], #0x2\n"
+ "ld1 { v12.h }[6], [x27], #0x2\n"
"b 67f\n"
"64:" // Oddments: Load input (3, 1): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 67f\n"
- "ld1 { v12.h }[4], [x20], #0x2\n"
+ "ld1 { v12.h }[4], [x27], #0x2\n"
"b 67f\n"
"65:" // Oddments: Load input (3, 1): Bit 2: Unset
"tbz %x[n_channels], #1, 66f\n"
- "ld1 { v12.s }[0], [x20], #0x4\n"
+ "ld1 { v12.s }[0], [x27], #0x4\n"
"tbz %x[n_channels], #0, 67f\n"
- "ld1 { v12.h }[2], [x20], #0x2\n"
+ "ld1 { v12.h }[2], [x27], #0x2\n"
"b 67f\n"
"66:" // Oddments: Load input (3, 1): Bit 2: Unset: Bit 1: Unset
- "ld1 { v12.h }[0], [x20], #0x2\n"
+ "ld1 { v12.h }[0], [x27], #0x2\n"
"67:" // Oddments: Load input (3, 1): Bit 2: End
- "ldr x20, [x14, #0xa0]\n"
+ "ldr x26, [x13, #0xa0]\n"
"fmla v26.8h, v7.8h, v12.8h\n"
"fmla v27.8h, v6.8h, v12.8h\n"
- "add x20, x20, x13\n"
+ "add x26, x26, x12\n"
"fmla v29.8h, v4.8h, v12.8h\n"
"fmla v30.8h, v3.8h, v12.8h\n"
"tbz %x[n_channels], #2, 69f\n"
- "ld1 { v11.d }[0], [x20], #0x8\n"
+ "ld1 { v11.d }[0], [x26], #0x8\n"
"tbz %x[n_channels], #1, 68f\n"
- "ld1 { v11.s }[2], [x20], #0x4\n"
+ "ld1 { v11.s }[2], [x26], #0x4\n"
"tbz %x[n_channels], #0, 71f\n"
- "ld1 { v11.h }[6], [x20], #0x2\n"
+ "ld1 { v11.h }[6], [x26], #0x2\n"
"b 71f\n"
"68:" // Oddments: Load input (0, 2): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 71f\n"
- "ld1 { v11.h }[4], [x20], #0x2\n"
+ "ld1 { v11.h }[4], [x26], #0x2\n"
"b 71f\n"
"69:" // Oddments: Load input (0, 2): Bit 2: Unset
"tbz %x[n_channels], #1, 70f\n"
- "ld1 { v11.s }[0], [x20], #0x4\n"
+ "ld1 { v11.s }[0], [x26], #0x4\n"
"tbz %x[n_channels], #0, 71f\n"
- "ld1 { v11.h }[2], [x20], #0x2\n"
+ "ld1 { v11.h }[2], [x26], #0x2\n"
"b 71f\n"
"70:" // Oddments: Load input (0, 2): Bit 2: Unset: Bit 1: Unset
- "ld1 { v11.h }[0], [x20], #0x2\n"
+ "ld1 { v11.h }[0], [x26], #0x2\n"
"71:" // Oddments: Load input (0, 2): Bit 2: End
- "ldr x20, [x14, #0xa8]\n"
+ "ldr x23, [x13, #0xa8]\n"
"fmla v23.8h, v2.8h, v11.8h\n"
"fmla v24.8h, v1.8h, v11.8h\n"
- "add x20, x20, x13\n"
+ "add x23, x23, x12\n"
"fmla v25.8h, v0.8h, v11.8h\n"
"tbz %x[n_channels], #2, 73f\n"
- "ld1 { v13.d }[0], [x20], #0x8\n"
+ "ld1 { v13.d }[0], [x23], #0x8\n"
"tbz %x[n_channels], #1, 72f\n"
- "ld1 { v13.s }[2], [x20], #0x4\n"
+ "ld1 { v13.s }[2], [x23], #0x4\n"
"tbz %x[n_channels], #0, 75f\n"
- "ld1 { v13.h }[6], [x20], #0x2\n"
+ "ld1 { v13.h }[6], [x23], #0x2\n"
"b 75f\n"
"72:" // Oddments: Load input (3, 3): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 75f\n"
- "ld1 { v13.h }[4], [x20], #0x2\n"
+ "ld1 { v13.h }[4], [x23], #0x2\n"
"b 75f\n"
"73:" // Oddments: Load input (3, 3): Bit 2: Unset
"tbz %x[n_channels], #1, 74f\n"
- "ld1 { v13.s }[0], [x20], #0x4\n"
+ "ld1 { v13.s }[0], [x23], #0x4\n"
"tbz %x[n_channels], #0, 75f\n"
- "ld1 { v13.h }[2], [x20], #0x2\n"
+ "ld1 { v13.h }[2], [x23], #0x2\n"
"b 75f\n"
"74:" // Oddments: Load input (3, 3): Bit 2: Unset: Bit 1: Unset
- "ld1 { v13.h }[0], [x20], #0x2\n"
+ "ld1 { v13.h }[0], [x23], #0x2\n"
"75:" // Oddments: Load input (3, 3): Bit 2: End
- "ldr x20, [x14, #0xb0]\n"
+ "ldr x25, [x13, #0xb0]\n"
"fmla v27.8h, v8.8h, v13.8h\n"
"fmla v28.8h, v7.8h, v13.8h\n"
- "add x20, x20, x13\n"
+ "add x25, x25, x12\n"
"fmla v30.8h, v5.8h, v13.8h\n"
"fmla v31.8h, v4.8h, v13.8h\n"
"tbz %x[n_channels], #2, 77f\n"
- "ld1 { v12.d }[0], [x20], #0x8\n"
+ "ld1 { v12.d }[0], [x25], #0x8\n"
"tbz %x[n_channels], #1, 76f\n"
- "ld1 { v12.s }[2], [x20], #0x4\n"
+ "ld1 { v12.s }[2], [x25], #0x4\n"
"tbz %x[n_channels], #0, 79f\n"
- "ld1 { v12.h }[6], [x20], #0x2\n"
+ "ld1 { v12.h }[6], [x25], #0x2\n"
"b 79f\n"
"76:" // Oddments: Load input (2, 0): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 79f\n"
- "ld1 { v12.h }[4], [x20], #0x2\n"
+ "ld1 { v12.h }[4], [x25], #0x2\n"
"b 79f\n"
"77:" // Oddments: Load input (2, 0): Bit 2: Unset
"tbz %x[n_channels], #1, 78f\n"
- "ld1 { v12.s }[0], [x20], #0x4\n"
+ "ld1 { v12.s }[0], [x25], #0x4\n"
"tbz %x[n_channels], #0, 79f\n"
- "ld1 { v12.h }[2], [x20], #0x2\n"
+ "ld1 { v12.h }[2], [x25], #0x2\n"
"b 79f\n"
"78:" // Oddments: Load input (2, 0): Bit 2: Unset: Bit 1: Unset
- "ld1 { v12.h }[0], [x20], #0x2\n"
+ "ld1 { v12.h }[0], [x25], #0x2\n"
"79:" // Oddments: Load input (2, 0): Bit 2: End
- "ldr x20, [x14, #0xb8]\n"
+ "ldr x24, [x13, #0xb8]\n"
"fmla v23.8h, v6.8h, v12.8h\n"
"fmla v26.8h, v3.8h, v12.8h\n"
- "add x20, x20, x13\n"
+ "add x24, x24, x12\n"
"fmla v29.8h, v0.8h, v12.8h\n"
"tbz %x[n_channels], #2, 81f\n"
- "ld1 { v11.d }[0], [x20], #0x8\n"
+ "ld1 { v11.d }[0], [x24], #0x8\n"
"tbz %x[n_channels], #1, 80f\n"
- "ld1 { v11.s }[2], [x20], #0x4\n"
+ "ld1 { v11.s }[2], [x24], #0x4\n"
"tbz %x[n_channels], #0, 83f\n"
- "ld1 { v11.h }[6], [x20], #0x2\n"
+ "ld1 { v11.h }[6], [x24], #0x2\n"
"b 83f\n"
"80:" // Oddments: Load input (2, 4): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 83f\n"
- "ld1 { v11.h }[4], [x20], #0x2\n"
+ "ld1 { v11.h }[4], [x24], #0x2\n"
"b 83f\n"
"81:" // Oddments: Load input (2, 4): Bit 2: Unset
"tbz %x[n_channels], #1, 82f\n"
- "ld1 { v11.s }[0], [x20], #0x4\n"
+ "ld1 { v11.s }[0], [x24], #0x4\n"
"tbz %x[n_channels], #0, 83f\n"
- "ld1 { v11.h }[2], [x20], #0x2\n"
+ "ld1 { v11.h }[2], [x24], #0x2\n"
"b 83f\n"
"82:" // Oddments: Load input (2, 4): Bit 2: Unset: Bit 1: Unset
- "ld1 { v11.h }[0], [x20], #0x2\n"
+ "ld1 { v11.h }[0], [x24], #0x2\n"
"83:" // Oddments: Load input (2, 4): Bit 2: End
- "ldr x20, [x14, #0xc0]\n"
+ "ldr x10, [x13, #0xc0]\n"
"fmla v25.8h, v8.8h, v11.8h\n"
"fmla v28.8h, v5.8h, v11.8h\n"
- "add x20, x20, x13\n"
+ "add x10, x10, x12\n"
"fmla v31.8h, v2.8h, v11.8h\n"
"tbz %x[n_channels], #2, 85f\n"
- "ld1 { v13.d }[0], [x20], #0x8\n"
+ "ld1 { v13.d }[0], [x10], #0x8\n"
"tbz %x[n_channels], #1, 84f\n"
- "ld1 { v13.s }[2], [x20], #0x4\n"
+ "ld1 { v13.s }[2], [x10], #0x4\n"
"tbz %x[n_channels], #0, 87f\n"
- "ld1 { v13.h }[6], [x20], #0x2\n"
+ "ld1 { v13.h }[6], [x10], #0x2\n"
"b 87f\n"
"84:" // Oddments: Load input (4, 2): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 87f\n"
- "ld1 { v13.h }[4], [x20], #0x2\n"
+ "ld1 { v13.h }[4], [x10], #0x2\n"
"b 87f\n"
"85:" // Oddments: Load input (4, 2): Bit 2: Unset
"tbz %x[n_channels], #1, 86f\n"
- "ld1 { v13.s }[0], [x20], #0x4\n"
+ "ld1 { v13.s }[0], [x10], #0x4\n"
"tbz %x[n_channels], #0, 87f\n"
- "ld1 { v13.h }[2], [x20], #0x2\n"
+ "ld1 { v13.h }[2], [x10], #0x2\n"
"b 87f\n"
"86:" // Oddments: Load input (4, 2): Bit 2: Unset: Bit 1: Unset
- "ld1 { v13.h }[0], [x20], #0x2\n"
+ "ld1 { v13.h }[0], [x10], #0x2\n"
"87:" // Oddments: Load input (4, 2): Bit 2: End
"fmla v29.8h, v8.8h, v13.8h\n"
"fmla v30.8h, v7.8h, v13.8h\n"
@@ -1074,216 +1074,216 @@ void a64_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst_indirect_impl(
"fmin v30.8h, v30.8h, v17.8h\n"
"fmin v31.8h, v31.8h, v17.8h\n"
"tbz %x[n_channels], #2, 89f\n"
- "ldr x23, [x16, #0x0]\n"
- "add x23, x23, x12\n"
- "st1 { v23.d }[0], [x23]\n"
- "ldr x22, [x16, #0x8]\n"
- "ldr x21, [x16, #0x10]\n"
- "ldr x20, [x16, #0x18]\n"
- "add x22, x22, x12\n"
- "add x21, x21, x12\n"
- "ldr x23, [x16, #0x20]\n"
- "add x20, x20, x12\n"
- "add x23, x23, x12\n"
- "st1 { v24.d }[0], [x22]\n"
- "st1 { v25.d }[0], [x21]\n"
- "ldr x22, [x16, #0x28]\n"
- "ldr x21, [x16, #0x30]\n"
- "add x22, x22, x12\n"
- "st1 { v26.d }[0], [x20]\n"
- "ldr x20, [x16, #0x38]\n"
- "add x21, x21, x12\n"
- "add x20, x20, x12\n"
- "st1 { v27.d }[0], [x23]\n"
- "ldr x23, [x16, #0x40]\n"
- "add x23, x23, x12\n"
- "add x12, x12, #0x8\n"
- "st1 { v28.d }[0], [x22]\n"
- "st1 { v29.d }[0], [x21]\n"
- "st1 { v30.d }[0], [x20]\n"
- "st1 { v31.d }[0], [x23]\n"
+ "ldr x22, [x15, #0x0]\n"
+ "add x22, x22, x11\n"
+ "st1 { v23.d }[0], [x22]\n"
+ "ldr x21, [x15, #0x8]\n"
+ "ldr x20, [x15, #0x10]\n"
+ "ldr x19, [x15, #0x18]\n"
+ "ldr x22, [x15, #0x20]\n"
+ "add x21, x21, x11\n"
+ "add x20, x20, x11\n"
+ "add x19, x19, x11\n"
+ "add x22, x22, x11\n"
+ "st1 { v24.d }[0], [x21]\n"
+ "ldr x21, [x15, #0x28]\n"
+ "st1 { v25.d }[0], [x20]\n"
+ "ldr x20, [x15, #0x30]\n"
+ "add x21, x21, x11\n"
+ "add x20, x20, x11\n"
+ "st1 { v26.d }[0], [x19]\n"
+ "ldr x19, [x15, #0x38]\n"
+ "add x19, x19, x11\n"
+ "st1 { v27.d }[0], [x22]\n"
+ "ldr x22, [x15, #0x40]\n"
+ "add x22, x22, x11\n"
+ "add x11, x11, #0x8\n"
+ "st1 { v28.d }[0], [x21]\n"
+ "st1 { v29.d }[0], [x20]\n"
+ "st1 { v30.d }[0], [x19]\n"
+ "st1 { v31.d }[0], [x22]\n"
"tbz %x[n_channels], #1, 88f\n"
- "ldr x23, [x16, #0x0]\n"
- "add x23, x23, x12\n"
- "st1 { v23.s }[2], [x23]\n"
- "ldr x22, [x16, #0x8]\n"
- "ldr x21, [x16, #0x10]\n"
- "ldr x20, [x16, #0x18]\n"
- "add x22, x22, x12\n"
- "add x21, x21, x12\n"
- "ldr x23, [x16, #0x20]\n"
- "add x20, x20, x12\n"
- "add x23, x23, x12\n"
- "st1 { v24.s }[2], [x22]\n"
- "st1 { v25.s }[2], [x21]\n"
- "ldr x22, [x16, #0x28]\n"
- "ldr x21, [x16, #0x30]\n"
- "add x22, x22, x12\n"
- "st1 { v26.s }[2], [x20]\n"
- "ldr x20, [x16, #0x38]\n"
- "add x21, x21, x12\n"
- "add x20, x20, x12\n"
- "st1 { v27.s }[2], [x23]\n"
- "ldr x23, [x16, #0x40]\n"
- "add x23, x23, x12\n"
- "add x12, x12, #0x4\n"
- "st1 { v28.s }[2], [x22]\n"
- "st1 { v29.s }[2], [x21]\n"
- "st1 { v30.s }[2], [x20]\n"
- "st1 { v31.s }[2], [x23]\n"
+ "ldr x22, [x15, #0x0]\n"
+ "add x22, x22, x11\n"
+ "st1 { v23.s }[2], [x22]\n"
+ "ldr x21, [x15, #0x8]\n"
+ "ldr x20, [x15, #0x10]\n"
+ "add x21, x21, x11\n"
+ "ldr x19, [x15, #0x18]\n"
+ "ldr x22, [x15, #0x20]\n"
+ "add x20, x20, x11\n"
+ "add x19, x19, x11\n"
+ "add x22, x22, x11\n"
+ "st1 { v24.s }[2], [x21]\n"
+ "ldr x21, [x15, #0x28]\n"
+ "add x21, x21, x11\n"
+ "st1 { v25.s }[2], [x20]\n"
+ "ldr x20, [x15, #0x30]\n"
+ "add x20, x20, x11\n"
+ "st1 { v26.s }[2], [x19]\n"
+ "ldr x19, [x15, #0x38]\n"
+ "add x19, x19, x11\n"
+ "st1 { v27.s }[2], [x22]\n"
+ "ldr x22, [x15, #0x40]\n"
+ "add x22, x22, x11\n"
+ "add x11, x11, #0x4\n"
+ "st1 { v28.s }[2], [x21]\n"
+ "st1 { v29.s }[2], [x20]\n"
+ "st1 { v30.s }[2], [x19]\n"
+ "st1 { v31.s }[2], [x22]\n"
"tbz %x[n_channels], #0, 91f\n"
- "ldr x23, [x16, #0x0]\n"
- "add x23, x23, x12\n"
- "st1 { v23.h }[6], [x23]\n"
- "ldr x22, [x16, #0x8]\n"
- "ldr x21, [x16, #0x10]\n"
- "ldr x20, [x16, #0x18]\n"
- "add x22, x22, x12\n"
- "add x21, x21, x12\n"
- "ldr x23, [x16, #0x20]\n"
- "add x20, x20, x12\n"
- "add x23, x23, x12\n"
- "st1 { v24.h }[6], [x22]\n"
- "st1 { v25.h }[6], [x21]\n"
- "ldr x22, [x16, #0x28]\n"
- "ldr x21, [x16, #0x30]\n"
- "add x22, x22, x12\n"
- "st1 { v26.h }[6], [x20]\n"
- "ldr x20, [x16, #0x38]\n"
- "add x21, x21, x12\n"
- "add x20, x20, x12\n"
- "st1 { v27.h }[6], [x23]\n"
- "ldr x23, [x16, #0x40]\n"
- "add x23, x23, x12\n"
- "st1 { v28.h }[6], [x22]\n"
- "st1 { v29.h }[6], [x21]\n"
- "st1 { v30.h }[6], [x20]\n"
- "st1 { v31.h }[6], [x23]\n"
+ "ldr x22, [x15, #0x0]\n"
+ "add x22, x22, x11\n"
+ "st1 { v23.h }[6], [x22]\n"
+ "ldr x21, [x15, #0x8]\n"
+ "ldr x20, [x15, #0x10]\n"
+ "add x21, x21, x11\n"
+ "ldr x19, [x15, #0x18]\n"
+ "ldr x22, [x15, #0x20]\n"
+ "add x20, x20, x11\n"
+ "add x19, x19, x11\n"
+ "add x22, x22, x11\n"
+ "st1 { v24.h }[6], [x21]\n"
+ "ldr x21, [x15, #0x28]\n"
+ "add x21, x21, x11\n"
+ "st1 { v25.h }[6], [x20]\n"
+ "ldr x20, [x15, #0x30]\n"
+ "add x20, x20, x11\n"
+ "st1 { v26.h }[6], [x19]\n"
+ "ldr x19, [x15, #0x38]\n"
+ "add x19, x19, x11\n"
+ "st1 { v27.h }[6], [x22]\n"
+ "ldr x22, [x15, #0x40]\n"
+ "add x22, x22, x11\n"
+ "st1 { v28.h }[6], [x21]\n"
+ "st1 { v29.h }[6], [x20]\n"
+ "st1 { v30.h }[6], [x19]\n"
+ "st1 { v31.h }[6], [x22]\n"
"b 91f\n"
"88:" // Oddments: Store: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 91f\n"
- "ldr x23, [x16, #0x0]\n"
- "add x23, x23, x12\n"
- "st1 { v23.h }[4], [x23]\n"
- "ldr x22, [x16, #0x8]\n"
- "ldr x21, [x16, #0x10]\n"
- "ldr x20, [x16, #0x18]\n"
- "add x22, x22, x12\n"
- "add x21, x21, x12\n"
- "ldr x23, [x16, #0x20]\n"
- "add x20, x20, x12\n"
- "add x23, x23, x12\n"
- "st1 { v24.h }[4], [x22]\n"
- "st1 { v25.h }[4], [x21]\n"
- "ldr x22, [x16, #0x28]\n"
- "ldr x21, [x16, #0x30]\n"
- "add x22, x22, x12\n"
- "st1 { v26.h }[4], [x20]\n"
- "ldr x20, [x16, #0x38]\n"
- "add x21, x21, x12\n"
- "add x20, x20, x12\n"
- "st1 { v27.h }[4], [x23]\n"
- "ldr x23, [x16, #0x40]\n"
- "add x23, x23, x12\n"
- "st1 { v28.h }[4], [x22]\n"
- "st1 { v29.h }[4], [x21]\n"
- "st1 { v30.h }[4], [x20]\n"
- "st1 { v31.h }[4], [x23]\n"
+ "ldr x22, [x15, #0x0]\n"
+ "add x22, x22, x11\n"
+ "st1 { v23.h }[4], [x22]\n"
+ "ldr x21, [x15, #0x8]\n"
+ "ldr x20, [x15, #0x10]\n"
+ "add x21, x21, x11\n"
+ "add x20, x20, x11\n"
+ "ldr x19, [x15, #0x18]\n"
+ "ldr x22, [x15, #0x20]\n"
+ "add x19, x19, x11\n"
+ "add x22, x22, x11\n"
+ "st1 { v24.h }[4], [x21]\n"
+ "ldr x21, [x15, #0x28]\n"
+ "add x21, x21, x11\n"
+ "st1 { v25.h }[4], [x20]\n"
+ "ldr x20, [x15, #0x30]\n"
+ "add x20, x20, x11\n"
+ "st1 { v26.h }[4], [x19]\n"
+ "ldr x19, [x15, #0x38]\n"
+ "add x19, x19, x11\n"
+ "st1 { v27.h }[4], [x22]\n"
+ "ldr x22, [x15, #0x40]\n"
+ "add x22, x22, x11\n"
+ "st1 { v28.h }[4], [x21]\n"
+ "st1 { v29.h }[4], [x20]\n"
+ "st1 { v30.h }[4], [x19]\n"
+ "st1 { v31.h }[4], [x22]\n"
"b 91f\n"
"89:" // Oddments: Store: Bit 2: Unset
"tbz %x[n_channels], #1, 90f\n"
- "ldr x23, [x16, #0x0]\n"
- "add x23, x23, x12\n"
- "st1 { v23.s }[0], [x23]\n"
- "ldr x22, [x16, #0x8]\n"
- "ldr x21, [x16, #0x10]\n"
- "ldr x20, [x16, #0x18]\n"
- "add x22, x22, x12\n"
- "add x21, x21, x12\n"
- "ldr x23, [x16, #0x20]\n"
- "add x20, x20, x12\n"
- "add x23, x23, x12\n"
- "st1 { v24.s }[0], [x22]\n"
- "st1 { v25.s }[0], [x21]\n"
- "ldr x22, [x16, #0x28]\n"
- "ldr x21, [x16, #0x30]\n"
- "add x22, x22, x12\n"
- "st1 { v26.s }[0], [x20]\n"
- "ldr x20, [x16, #0x38]\n"
- "add x21, x21, x12\n"
- "add x20, x20, x12\n"
- "st1 { v27.s }[0], [x23]\n"
- "ldr x23, [x16, #0x40]\n"
- "add x23, x23, x12\n"
- "add x12, x12, #0x4\n"
- "st1 { v28.s }[0], [x22]\n"
- "st1 { v29.s }[0], [x21]\n"
- "st1 { v30.s }[0], [x20]\n"
- "st1 { v31.s }[0], [x23]\n"
+ "ldr x22, [x15, #0x0]\n"
+ "add x22, x22, x11\n"
+ "st1 { v23.s }[0], [x22]\n"
+ "ldr x21, [x15, #0x8]\n"
+ "ldr x20, [x15, #0x10]\n"
+ "add x21, x21, x11\n"
+ "add x20, x20, x11\n"
+ "ldr x19, [x15, #0x18]\n"
+ "ldr x22, [x15, #0x20]\n"
+ "add x19, x19, x11\n"
+ "add x22, x22, x11\n"
+ "st1 { v24.s }[0], [x21]\n"
+ "ldr x21, [x15, #0x28]\n"
+ "add x21, x21, x11\n"
+ "st1 { v25.s }[0], [x20]\n"
+ "ldr x20, [x15, #0x30]\n"
+ "add x20, x20, x11\n"
+ "st1 { v26.s }[0], [x19]\n"
+ "ldr x19, [x15, #0x38]\n"
+ "add x19, x19, x11\n"
+ "st1 { v27.s }[0], [x22]\n"
+ "ldr x22, [x15, #0x40]\n"
+ "add x22, x22, x11\n"
+ "add x11, x11, #0x4\n"
+ "st1 { v28.s }[0], [x21]\n"
+ "st1 { v29.s }[0], [x20]\n"
+ "st1 { v30.s }[0], [x19]\n"
+ "st1 { v31.s }[0], [x22]\n"
"tbz %x[n_channels], #0, 91f\n"
- "ldr x23, [x16, #0x0]\n"
- "add x23, x23, x12\n"
- "st1 { v23.h }[2], [x23]\n"
- "ldr x22, [x16, #0x8]\n"
- "ldr x21, [x16, #0x10]\n"
- "ldr x20, [x16, #0x18]\n"
- "add x22, x22, x12\n"
- "add x21, x21, x12\n"
- "ldr x23, [x16, #0x20]\n"
- "add x20, x20, x12\n"
- "add x23, x23, x12\n"
- "st1 { v24.h }[2], [x22]\n"
- "st1 { v25.h }[2], [x21]\n"
- "ldr x22, [x16, #0x28]\n"
- "ldr x21, [x16, #0x30]\n"
- "add x22, x22, x12\n"
- "st1 { v26.h }[2], [x20]\n"
- "ldr x20, [x16, #0x38]\n"
- "add x21, x21, x12\n"
- "add x20, x20, x12\n"
- "st1 { v27.h }[2], [x23]\n"
- "ldr x23, [x16, #0x40]\n"
- "add x23, x23, x12\n"
- "st1 { v28.h }[2], [x22]\n"
- "st1 { v29.h }[2], [x21]\n"
- "st1 { v30.h }[2], [x20]\n"
- "st1 { v31.h }[2], [x23]\n"
+ "ldr x22, [x15, #0x0]\n"
+ "add x22, x22, x11\n"
+ "st1 { v23.h }[2], [x22]\n"
+ "ldr x21, [x15, #0x8]\n"
+ "ldr x20, [x15, #0x10]\n"
+ "add x21, x21, x11\n"
+ "ldr x19, [x15, #0x18]\n"
+ "ldr x22, [x15, #0x20]\n"
+ "add x20, x20, x11\n"
+ "add x19, x19, x11\n"
+ "add x22, x22, x11\n"
+ "st1 { v24.h }[2], [x21]\n"
+ "ldr x21, [x15, #0x28]\n"
+ "add x21, x21, x11\n"
+ "st1 { v25.h }[2], [x20]\n"
+ "ldr x20, [x15, #0x30]\n"
+ "add x20, x20, x11\n"
+ "st1 { v26.h }[2], [x19]\n"
+ "ldr x19, [x15, #0x38]\n"
+ "add x19, x19, x11\n"
+ "st1 { v27.h }[2], [x22]\n"
+ "ldr x22, [x15, #0x40]\n"
+ "add x22, x22, x11\n"
+ "st1 { v28.h }[2], [x21]\n"
+ "st1 { v29.h }[2], [x20]\n"
+ "st1 { v30.h }[2], [x19]\n"
+ "st1 { v31.h }[2], [x22]\n"
"b 91f\n"
"90:" // Oddments: Store: Bit 2: Unset: Bit 1: Unset
- "ldr x23, [x16, #0x0]\n"
- "add x23, x23, x12\n"
- "st1 { v23.h }[0], [x23]\n"
- "ldr x22, [x16, #0x8]\n"
- "ldr x21, [x16, #0x10]\n"
- "ldr x20, [x16, #0x18]\n"
- "add x22, x22, x12\n"
- "add x21, x21, x12\n"
- "ldr x23, [x16, #0x20]\n"
- "add x20, x20, x12\n"
- "add x23, x23, x12\n"
- "st1 { v24.h }[0], [x22]\n"
- "st1 { v25.h }[0], [x21]\n"
- "ldr x22, [x16, #0x28]\n"
- "ldr x21, [x16, #0x30]\n"
- "add x22, x22, x12\n"
- "st1 { v26.h }[0], [x20]\n"
- "ldr x20, [x16, #0x38]\n"
- "add x21, x21, x12\n"
- "add x20, x20, x12\n"
- "st1 { v27.h }[0], [x23]\n"
- "ldr x23, [x16, #0x40]\n"
- "add x23, x23, x12\n"
- "st1 { v28.h }[0], [x22]\n"
- "st1 { v29.h }[0], [x21]\n"
- "st1 { v30.h }[0], [x20]\n"
- "st1 { v31.h }[0], [x23]\n"
+ "ldr x22, [x15, #0x0]\n"
+ "add x22, x22, x11\n"
+ "st1 { v23.h }[0], [x22]\n"
+ "ldr x21, [x15, #0x8]\n"
+ "ldr x20, [x15, #0x10]\n"
+ "ldr x19, [x15, #0x18]\n"
+ "add x21, x21, x11\n"
+ "add x20, x20, x11\n"
+ "ldr x22, [x15, #0x20]\n"
+ "add x19, x19, x11\n"
+ "add x22, x22, x11\n"
+ "st1 { v24.h }[0], [x21]\n"
+ "st1 { v25.h }[0], [x20]\n"
+ "ldr x21, [x15, #0x28]\n"
+ "ldr x20, [x15, #0x30]\n"
+ "add x21, x21, x11\n"
+ "st1 { v26.h }[0], [x19]\n"
+ "ldr x19, [x15, #0x38]\n"
+ "add x20, x20, x11\n"
+ "add x19, x19, x11\n"
+ "st1 { v27.h }[0], [x22]\n"
+ "ldr x22, [x15, #0x40]\n"
+ "add x22, x22, x11\n"
+ "st1 { v28.h }[0], [x21]\n"
+ "st1 { v29.h }[0], [x20]\n"
+ "st1 { v30.h }[0], [x19]\n"
+ "st1 { v31.h }[0], [x22]\n"
"91:" // Oddments: Store: Bit 2: End
"92:" // End
:
: [n_channels] "r" ((unsigned long) n_channels), [offsetof_Args_inptrs] "I" (offsetof(Args, inptrs)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_args_params] "I" (offsetof(Args, params)), [params_struct] "r" (&params_struct)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v16", "v17", "v18", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v16", "v17", "v18", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_direct.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_direct.cpp
index a3a372be05..b5bee7ae7c 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_direct.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_direct.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -87,186 +87,187 @@ void a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
);
__asm__ __volatile__(
- "mov x27, #0x0\n"
"mov x26, #0x0\n"
+ "mov x25, #0x0\n"
"1:" // Tile loop
- "str x27, [%x[params_struct], %[offsetof_args_tile_i]]\n"
- "mov x25, #0x4\n"
- "mov x23, #0x4\n"
- "str x26, [%x[params_struct], %[offsetof_args_tile_j]]\n"
- "ldr x24, [%x[params_struct], %[offsetof_args_ld_input_row]]\n"
- "ldr x22, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
- "mul x21, x27, x24\n" // offset = tile_i * ld_input_row
- "ldr x4, [%x[params_struct], %[offsetof_args_ld_input_col]]\n"
- "ldr x5, [%x[params_struct], %[offsetof_args_ld_output_col]]\n"
- "mul x20, x27, x22\n" // offset = tile_i * ld_output_row
- "mov x6, #0x10\n" // cntb _, ALL, #1
- "madd x21, x26, x4, x21\n" // offset += tile_j * ld_input_col
- "ldr x7, [%x[params_struct], %[offsetof_args_inptr]]\n"
- "lsl x4, x4, #0x1\n"
- "ldr x8, [%x[params_struct], %[offsetof_args_outptr]]\n"
- "madd x20, x26, x5, x20\n" // offset += tile_j * ld_output_col
+ "str x26, [%x[params_struct], %[offsetof_args_tile_i]]\n"
+ "mov x24, #0x4\n"
+ "mov x22, #0x4\n"
+ "str x25, [%x[params_struct], %[offsetof_args_tile_j]]\n"
+ "ldr x23, [%x[params_struct], %[offsetof_args_ld_input_row]]\n"
+ "ldr x21, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
+ "mul x20, x26, x23\n" // offset = tile_i * ld_input_row
+ "ldr x5, [%x[params_struct], %[offsetof_args_ld_input_col]]\n"
+ "ldr x6, [%x[params_struct], %[offsetof_args_ld_output_col]]\n"
+ "mul x19, x26, x21\n" // offset = tile_i * ld_output_row
+ "mov x7, #0x10\n" // cntb _, ALL, #1
+ "madd x20, x25, x5, x20\n" // offset += tile_j * ld_input_col
+ "ldr x8, [%x[params_struct], %[offsetof_args_inptr]]\n"
"lsl x5, x5, #0x1\n"
- "add x17, x4, x4\n"
- "ldr x16, [%x[params_struct], %[offsetof_args_params]]\n"
- "mul x21, x21, x25\n" // offset *= kernel_stride * output_size
- "add x7, x7, x21, LSL #1\n" // inptr[0] += offset * sizeof(__fp16)
- "add x15, x7, x24, LSL #1\n"
- "mul x20, x20, x23\n" // offset *= output_tile_size
- "add x14, x15, x24, LSL #1\n"
- "add x8, x8, x20, LSL #1\n" // outptrs[0] += offset * sizeof(__fp16)
- "lsr x13, %x[n_channels], #0x3\n"
- "add x12, x14, x24, LSL #1\n"
- "add x11, x17, x4\n"
- "add x10, x8, x22, LSL #1\n"
- "add x9, x12, x24, LSL #1\n"
- "add x28, x11, x4\n"
- "add x27, x10, x22, LSL #1\n"
- "add x23, x5, x5\n"
+ "ldr x17, [%x[params_struct], %[offsetof_args_outptr]]\n"
+ "madd x19, x25, x6, x19\n" // offset += tile_j * ld_output_col
+ "lsl x6, x6, #0x1\n"
+ "add x16, x5, x5\n"
+ "ldr x15, [%x[params_struct], %[offsetof_args_params]]\n"
+ "mul x20, x20, x24\n" // offset *= kernel_stride * output_size
+ "add x8, x8, x20, LSL #1\n" // inptr[0] += offset * sizeof(__fp16)
+ "add x14, x8, x23, LSL #1\n"
+ "mul x19, x19, x22\n" // offset *= output_tile_size
+ "add x13, x14, x23, LSL #1\n"
+ "add x17, x17, x19, LSL #1\n" // outptrs[0] += offset * sizeof(__fp16)
+ "lsr x12, %x[n_channels], #0x3\n"
+ "add x11, x13, x23, LSL #1\n"
+ "add x10, x16, x5\n"
+ "add x9, x17, x21, LSL #1\n"
+ "add x28, x11, x23, LSL #1\n"
+ "add x27, x10, x5\n"
+ "add x26, x9, x21, LSL #1\n"
+ "add x22, x6, x6\n"
"add x20, %x[params_struct], %[offsetof_args_min]\n"
+ "add x19, %x[params_struct], %[offsetof_args_max]\n"
"ld1r { v15.8h }, [x20]\n"
- "add x20, %x[params_struct], %[offsetof_args_max]\n"
- "ld1r { v14.8h }, [x20]\n"
- "add x26, x9, x24, LSL #1\n"
- "add x25, x28, x4\n"
- "add x24, x27, x22, LSL #1\n"
- "add x22, x23, x5\n"
- "mov x21, #0x0\n"
- "sub x20, XZR, x6\n"
- "cbz x13, 4f\n"
- "ldr q13, [x16, #0x0]\n"
- "ldr q0, [x16, #0x10]\n"
- "cmp x6, x13, LSL #4\n"
- "ldr q1, [x16, #0x20]\n"
- "ldr q2, [x16, #0x30]\n"
- "ldr q3, [x16, #0x40]\n"
- "ldr q4, [x16, #0x50]\n"
- "ldr q5, [x16, #0x60]\n"
- "ldr q6, [x16, #0x70]\n"
- "ldr q7, [x16, #0x80]\n"
- "ldr q8, [x16, #0x90]\n"
- "add x16, x16, #0xa0\n"
- "ldr q9, [x14, x17]\n"
- "ld1 { v10.8h }, [x7]\n"
- "ldr q11, [x7, x25]\n"
- "ldr q12, [x14, x11]\n"
+ "ld1r { v14.8h }, [x19]\n"
+ "add x25, x28, x23, LSL #1\n"
+ "add x24, x27, x5\n"
+ "add x23, x26, x21, LSL #1\n"
+ "add x21, x22, x6\n"
+ "mov x20, #0x0\n"
+ "sub x19, XZR, x7\n"
+ "cbz x12, 4f\n"
+ "ldr q13, [x15, #0x0]\n"
+ "cmp x7, x12, LSL #4\n"
+ "ldr q0, [x15, #0x10]\n"
+ "ldr q1, [x15, #0x20]\n"
+ "ldr q2, [x15, #0x30]\n"
+ "ldr q3, [x15, #0x40]\n"
+ "ldr q4, [x15, #0x50]\n"
+ "ldr q5, [x15, #0x60]\n"
+ "ldr q6, [x15, #0x70]\n"
+ "ldr q7, [x15, #0x80]\n"
+ "ldr q8, [x15, #0x90]\n"
+ "ldr q9, [x13, x16]\n"
+ "add x15, x15, #0xa0\n"
+ "ld1 { v10.8h }, [x8]\n"
+ "ldr q11, [x8, x24]\n"
+ "ldr q12, [x13, x10]\n"
"bge 3f\n"
"2:" // Tile loop: Channel loop
"mov v21.16b, v13.16b\n fmla v21.8h, v4.8h, v9.8h\n"
"mov v16.16b, v13.16b\n fmla v16.8h, v8.8h, v9.8h\n"
- "add x6, x6, #0x10\n"
- "cmp x6, x13, LSL #4\n"
+ "add x7, x7, #0x10\n"
+ "cmp x7, x12, LSL #4\n"
"mov v22.16b, v13.16b\n fmla v22.8h, v3.8h, v9.8h\n"
"mov v25.16b, v13.16b\n fmla v25.8h, v1.8h, v9.8h\n"
+ "add x19, x19, #0x10\n"
"add x20, x20, #0x10\n"
- "add x21, x21, #0x10\n"
"mov v26.16b, v13.16b\n fmla v26.8h, v0.8h, v9.8h\n"
"fmla v21.8h, v5.8h, v12.8h\n"
"mov v17.16b, v13.16b\n fmla v17.8h, v7.8h, v9.8h\n"
"mov v18.16b, v13.16b\n fmla v18.8h, v6.8h, v9.8h\n"
"mov v20.16b, v13.16b\n fmla v20.8h, v5.8h, v9.8h\n"
"mov v24.16b, v13.16b\n fmla v24.8h, v2.8h, v9.8h\n"
- "ldr q9, [x12, x17]\n"
+ "ldr q9, [x11, x16]\n"
"fmla v16.8h, v0.8h, v10.8h\n"
- "ld1 { v10.8h }, [x26]\n"
"mov v19.16b, v13.16b\n fmla v19.8h, v2.8h, v11.8h\n"
- "ldr q11, [x26, x25]\n"
+ "ld1 { v10.8h }, [x25]\n"
+ "ldr q11, [x25, x24]\n"
"fmla v22.8h, v4.8h, v12.8h\n"
"fmla v25.8h, v2.8h, v12.8h\n"
"fmla v26.8h, v1.8h, v12.8h\n"
"mov v28.16b, v13.16b\n fmla v28.8h, v6.8h, v10.8h\n"
- "ldr q10, [x12, x11]\n"
+ "ldr q10, [x11, x10]\n"
"fmla v21.8h, v7.8h, v9.8h\n"
"fmla v17.8h, v8.8h, v12.8h\n"
"fmla v18.8h, v7.8h, v12.8h\n"
"fmla v19.8h, v6.8h, v12.8h\n"
"mov v23.16b, v13.16b\n fmla v23.8h, v3.8h, v12.8h\n"
"mov v27.16b, v13.16b\n fmla v27.8h, v0.8h, v12.8h\n"
- "ldr q12, [x7, x4]\n"
+ "ldr q12, [x8, x5]\n"
"mov v31.16b, v13.16b\n fmla v31.8h, v8.8h, v11.8h\n"
- "ldr q11, [x7, x28]\n"
"fmla v22.8h, v6.8h, v9.8h\n"
+ "ldr q11, [x8, x27]\n"
"fmla v25.8h, v4.8h, v9.8h\n"
"fmla v26.8h, v3.8h, v9.8h\n"
+ "mov v29.16b, v13.16b\n fmla v29.8h, v1.8h, v9.8h\n"
+ "mov v30.16b, v13.16b\n fmla v30.8h, v0.8h, v9.8h\n"
+ "ldr q13, [x15, #0x0]\n"
"fmla v20.8h, v8.8h, v9.8h\n"
"fmla v24.8h, v5.8h, v9.8h\n"
"fmla v28.8h, v2.8h, v9.8h\n"
"fmla v21.8h, v8.8h, v10.8h\n"
+ "ld1 { v9.8h }, [x14]\n"
"fmla v16.8h, v1.8h, v12.8h\n"
"fmla v17.8h, v0.8h, v12.8h\n"
- "ldr q12, [x15, x25]\n"
+ "ldr q12, [x14, x24]\n"
"fmla v18.8h, v2.8h, v11.8h\n"
"fmla v19.8h, v1.8h, v11.8h\n"
- "ld1 { v11.8h }, [x9]\n"
+ "ld1 { v11.8h }, [x28]\n"
"fmla v22.8h, v7.8h, v10.8h\n"
"fmla v23.8h, v6.8h, v10.8h\n"
"fmla v25.8h, v5.8h, v10.8h\n"
"fmla v26.8h, v4.8h, v10.8h\n"
"fmla v27.8h, v3.8h, v10.8h\n"
- "fmla v31.8h, v0.8h, v10.8h\n"
- "fmla v24.8h, v6.8h, v11.8h\n"
- "fmla v28.8h, v3.8h, v11.8h\n"
- "ldr q11, [x9, x25]\n"
- "fmla v19.8h, v5.8h, v12.8h\n"
- "fmla v23.8h, v2.8h, v12.8h\n"
- "ldr q12, [x15, x11]\n"
- "fmla v27.8h, v8.8h, v11.8h\n"
- "fmla v31.8h, v5.8h, v11.8h\n"
- "mov v29.16b, v13.16b\n fmla v29.8h, v1.8h, v9.8h\n"
- "mov v30.16b, v13.16b\n fmla v30.8h, v0.8h, v9.8h\n"
- "ld1 { v9.8h }, [x15]\n"
"fmla v29.8h, v2.8h, v10.8h\n"
"fmla v30.8h, v1.8h, v10.8h\n"
- "ldr q10, [x15, x17]\n"
+ "fmla v31.8h, v0.8h, v10.8h\n"
+ "ldr q10, [x14, x16]\n"
"fmla v20.8h, v0.8h, v9.8h\n"
+ "fmla v24.8h, v6.8h, v11.8h\n"
+ "fmla v28.8h, v3.8h, v11.8h\n"
"fmla v21.8h, v1.8h, v10.8h\n"
+ "ldr q11, [x28, x24]\n"
"fmla v16.8h, v3.8h, v9.8h\n"
- "ldr q11, [x26, x4]\n"
+ "fmla v19.8h, v5.8h, v12.8h\n"
+ "fmla v23.8h, v2.8h, v12.8h\n"
"fmla v17.8h, v4.8h, v10.8h\n"
+ "ldr q12, [x14, x10]\n"
"fmla v18.8h, v3.8h, v10.8h\n"
"fmla v22.8h, v0.8h, v10.8h\n"
+ "fmla v27.8h, v8.8h, v11.8h\n"
+ "fmla v31.8h, v5.8h, v11.8h\n"
+ "ldr q11, [x25, x5]\n"
"fmla v20.8h, v2.8h, v10.8h\n"
"fmla v21.8h, v2.8h, v12.8h\n"
"fmla v16.8h, v5.8h, v10.8h\n"
- "ldr q10, [x14, x4]\n"
"fmla v17.8h, v5.8h, v12.8h\n"
+ "ldr q10, [x13, x5]\n"
"fmla v18.8h, v4.8h, v12.8h\n"
"fmla v19.8h, v3.8h, v12.8h\n"
"fmla v22.8h, v1.8h, v12.8h\n"
"fmla v23.8h, v0.8h, v12.8h\n"
- "ldr q12, [x14, x28]\n"
+ "ldr q12, [x13, x27]\n"
"fmla v28.8h, v7.8h, v11.8h\n"
"fmla v29.8h, v6.8h, v11.8h\n"
- "ldr q11, [x26, x28]\n"
+ "ldr q11, [x25, x27]\n"
"fmla v20.8h, v4.8h, v10.8h\n"
"fmla v21.8h, v3.8h, v10.8h\n"
"fmla v24.8h, v1.8h, v10.8h\n"
"fmla v25.8h, v0.8h, v10.8h\n"
"fmla v16.8h, v7.8h, v10.8h\n"
"fmla v17.8h, v6.8h, v10.8h\n"
- "ldr q10, [x7, x17]\n"
+ "ldr q10, [x8, x16]\n"
"fmla v30.8h, v8.8h, v11.8h\n"
"fmla v31.8h, v7.8h, v11.8h\n"
- "ldr q11, [x12, x4]\n"
+ "ldr q11, [x11, x5]\n"
"fmla v18.8h, v8.8h, v12.8h\n"
"fmla v19.8h, v7.8h, v12.8h\n"
"fmla v22.8h, v5.8h, v12.8h\n"
"fmla v23.8h, v4.8h, v12.8h\n"
"fmla v26.8h, v2.8h, v12.8h\n"
"fmla v27.8h, v1.8h, v12.8h\n"
- "ldr q12, [x7, x11]\n"
- "add x7, x7, #0x10\n"
+ "ldr q12, [x8, x10]\n"
+ "add x8, x8, #0x10\n"
"fmla v20.8h, v7.8h, v11.8h\n"
"fmla v21.8h, v6.8h, v11.8h\n"
"fmla v24.8h, v4.8h, v11.8h\n"
"fmla v25.8h, v3.8h, v11.8h\n"
"fmla v28.8h, v1.8h, v11.8h\n"
"fmla v29.8h, v0.8h, v11.8h\n"
- "ldr q11, [x12, x28]\n"
+ "ldr q11, [x11, x27]\n"
"fmla v16.8h, v2.8h, v10.8h\n"
"fmla v17.8h, v1.8h, v10.8h\n"
"fmla v18.8h, v0.8h, v10.8h\n"
- "ld1 { v10.8h }, [x14]\n"
+ "ld1 { v10.8h }, [x13]\n"
"fmla v30.8h, v2.8h, v11.8h\n"
"fmla v19.8h, v0.8h, v12.8h\n"
"fmla v20.8h, v3.8h, v10.8h\n"
@@ -276,24 +277,25 @@ void a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"fmla v26.8h, v5.8h, v11.8h\n"
"fmla v27.8h, v4.8h, v11.8h\n"
"fmla v31.8h, v1.8h, v11.8h\n"
- "ldr q11, [x9, x17]\n"
+ "ldr q11, [x28, x16]\n"
"fmla v17.8h, v2.8h, v12.8h\n"
"fmla v18.8h, v1.8h, v12.8h\n"
- "ldr q12, [x14, x25]\n"
- "add x14, x14, #0x10\n"
+ "ldr q12, [x13, x24]\n"
+ "add x13, x13, #0x10\n"
"fmla v16.8h, v6.8h, v10.8h\n"
- "ld1 { v10.8h }, [x12]\n"
+ "ld1 { v10.8h }, [x11]\n"
"fmla v29.8h, v4.8h, v11.8h\n"
+ "ldr q9, [x13, x16]\n"
"fmla v30.8h, v3.8h, v11.8h\n"
"fmla v19.8h, v8.8h, v12.8h\n"
"fmla v23.8h, v5.8h, v12.8h\n"
"fmla v27.8h, v2.8h, v12.8h\n"
- "ldr q12, [x12, x25]\n"
- "add x12, x12, #0x10\n"
+ "ldr q12, [x11, x24]\n"
+ "add x11, x11, #0x10\n"
"fmla v20.8h, v6.8h, v10.8h\n"
"fmla v24.8h, v3.8h, v10.8h\n"
"fmla v28.8h, v0.8h, v10.8h\n"
- "ldr q10, [x26, x17]\n"
+ "ldr q10, [x25, x16]\n"
"fmla v31.8h, v2.8h, v12.8h\n"
"fmla v29.8h, v7.8h, v10.8h\n"
"fmla v30.8h, v6.8h, v10.8h\n"
@@ -301,110 +303,108 @@ void a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"fmla v25.8h, v7.8h, v11.8h\n"
"fmla v26.8h, v6.8h, v11.8h\n"
"fmla v28.8h, v5.8h, v11.8h\n"
- "ldr q11, [x9, x11]\n"
+ "ldr q11, [x28, x10]\n"
"fmla v27.8h, v5.8h, v12.8h\n"
"fmla v29.8h, v5.8h, v11.8h\n"
"fmla v30.8h, v4.8h, v11.8h\n"
"fmla v31.8h, v3.8h, v11.8h\n"
"fmla v23.8h, v8.8h, v12.8h\n"
- "ldr q12, [x26, x11]\n"
+ "ldr q12, [x25, x10]\n"
"fmla v28.8h, v8.8h, v10.8h\n"
- "ldr q10, [x15, x4]\n"
+ "ldr q10, [x14, x5]\n"
"fmla v25.8h, v8.8h, v11.8h\n"
"fmla v26.8h, v7.8h, v11.8h\n"
- "add x26, x26, #0x10\n"
+ "add x25, x25, #0x10\n"
"fmla v27.8h, v6.8h, v11.8h\n"
- "ldr q11, [x15, x28]\n"
"fmla v29.8h, v8.8h, v12.8h\n"
- "add x15, x15, #0x10\n"
+ "ldr q11, [x14, x27]\n"
+ "add x14, x14, #0x10\n"
"fmla v30.8h, v7.8h, v12.8h\n"
"fmla v31.8h, v6.8h, v12.8h\n"
- "ldr q12, [x9, x4]\n"
+ "ldr q12, [x28, x5]\n"
"fmla v16.8h, v4.8h, v10.8h\n"
"fmla v17.8h, v3.8h, v10.8h\n"
"fmax v16.8h, v16.8h, v15.8h\n"
"fmla v20.8h, v1.8h, v10.8h\n"
"fmla v21.8h, v0.8h, v10.8h\n"
- "ldr q10, [x9, x28]\n"
- "ldr q9, [x14, x17]\n"
+ "ldr q10, [x28, x27]\n"
+ "fmax v17.8h, v17.8h, v15.8h\n"
"fmla v18.8h, v5.8h, v11.8h\n"
"fmla v19.8h, v4.8h, v11.8h\n"
- "fmax v17.8h, v17.8h, v15.8h\n"
- "add x9, x9, #0x10\n"
+ "fmax v18.8h, v18.8h, v15.8h\n"
+ "add x28, x28, #0x10\n"
"fmla v22.8h, v2.8h, v11.8h\n"
- "ldr q13, [x16, #0x0]\n"
"fmla v23.8h, v1.8h, v11.8h\n"
- "ldr q11, [x7, x25]\n"
- "ldr q0, [x16, #0x10]\n"
+ "fmax v19.8h, v19.8h, v15.8h\n"
+ "ldr q11, [x8, x24]\n"
"fmla v24.8h, v7.8h, v12.8h\n"
"fmla v25.8h, v6.8h, v12.8h\n"
- "ldr q1, [x16, #0x20]\n"
+ "fmax v20.8h, v20.8h, v15.8h\n"
+ "ldr q0, [x15, #0x10]\n"
"fmla v28.8h, v4.8h, v12.8h\n"
"fmla v29.8h, v3.8h, v12.8h\n"
- "ldr q12, [x14, x11]\n"
- "ldr q2, [x16, #0x30]\n"
+ "fmax v21.8h, v21.8h, v15.8h\n"
+ "ldr q12, [x13, x10]\n"
"fmla v26.8h, v8.8h, v10.8h\n"
- "ldr q3, [x16, #0x40]\n"
"fmla v27.8h, v7.8h, v10.8h\n"
- "ldr q6, [x16, #0x70]\n"
+ "fmax v22.8h, v22.8h, v15.8h\n"
+ "ldr q1, [x15, #0x20]\n"
"fmla v30.8h, v5.8h, v10.8h\n"
- "ldr q5, [x16, #0x60]\n"
"fmla v31.8h, v4.8h, v10.8h\n"
- "ld1 { v10.8h }, [x7]\n"
- "ldr q4, [x16, #0x50]\n"
- "fmax v18.8h, v18.8h, v15.8h\n"
- "fmax v19.8h, v19.8h, v15.8h\n"
- "fmax v20.8h, v20.8h, v15.8h\n"
- "fmax v21.8h, v21.8h, v15.8h\n"
- "fmax v22.8h, v22.8h, v15.8h\n"
"fmax v23.8h, v23.8h, v15.8h\n"
+ "ld1 { v10.8h }, [x8]\n"
"fmax v24.8h, v24.8h, v15.8h\n"
"fmax v25.8h, v25.8h, v15.8h\n"
+ "ldr q2, [x15, #0x30]\n"
+ "ldr q3, [x15, #0x40]\n"
"fmax v26.8h, v26.8h, v15.8h\n"
"fmax v27.8h, v27.8h, v15.8h\n"
+ "ldr q4, [x15, #0x50]\n"
+ "ldr q5, [x15, #0x60]\n"
"fmax v28.8h, v28.8h, v15.8h\n"
"fmax v29.8h, v29.8h, v15.8h\n"
+ "ldr q6, [x15, #0x70]\n"
+ "ldr q7, [x15, #0x80]\n"
"fmax v30.8h, v30.8h, v15.8h\n"
"fmax v31.8h, v31.8h, v15.8h\n"
+ "ldr q8, [x15, #0x90]\n"
+ "add x15, x15, #0xa0\n"
"fmin v16.8h, v16.8h, v14.8h\n"
"fmin v17.8h, v17.8h, v14.8h\n"
- "st1 { v16.8h }, [x8]\n"
- "ldr q7, [x16, #0x80]\n"
+ "st1 { v16.8h }, [x17]\n"
"fmin v18.8h, v18.8h, v14.8h\n"
"fmin v19.8h, v19.8h, v14.8h\n"
- "str q17, [x8, x5]\n"
- "ldr q8, [x16, #0x90]\n"
+ "str q17, [x17, x6]\n"
"fmin v20.8h, v20.8h, v14.8h\n"
"fmin v21.8h, v21.8h, v14.8h\n"
- "str q18, [x8, x23]\n"
- "add x16, x16, #0xa0\n"
+ "str q18, [x17, x22]\n"
"fmin v22.8h, v22.8h, v14.8h\n"
"fmin v23.8h, v23.8h, v14.8h\n"
- "str q19, [x8, x22]\n"
- "add x8, x8, #0x10\n"
+ "str q19, [x17, x21]\n"
+ "add x17, x17, #0x10\n"
"fmin v24.8h, v24.8h, v14.8h\n"
"fmin v25.8h, v25.8h, v14.8h\n"
- "st1 { v20.8h }, [x10]\n"
+ "st1 { v20.8h }, [x9]\n"
"fmin v26.8h, v26.8h, v14.8h\n"
"fmin v27.8h, v27.8h, v14.8h\n"
- "str q21, [x10, x5]\n"
+ "str q21, [x9, x6]\n"
"fmin v28.8h, v28.8h, v14.8h\n"
"fmin v29.8h, v29.8h, v14.8h\n"
- "str q22, [x10, x23]\n"
+ "str q22, [x9, x22]\n"
"fmin v30.8h, v30.8h, v14.8h\n"
"fmin v31.8h, v31.8h, v14.8h\n"
- "str q23, [x10, x22]\n"
- "add x10, x10, #0x10\n"
- "st1 { v24.8h }, [x27]\n"
- "str q25, [x27, x5]\n"
- "str q26, [x27, x23]\n"
- "str q27, [x27, x22]\n"
- "add x27, x27, #0x10\n"
- "st1 { v28.8h }, [x24]\n"
- "str q29, [x24, x5]\n"
- "str q30, [x24, x23]\n"
- "str q31, [x24, x22]\n"
- "add x24, x24, #0x10\n"
+ "str q23, [x9, x21]\n"
+ "add x9, x9, #0x10\n"
+ "st1 { v24.8h }, [x26]\n"
+ "str q25, [x26, x6]\n"
+ "str q26, [x26, x22]\n"
+ "str q27, [x26, x21]\n"
+ "add x26, x26, #0x10\n"
+ "st1 { v28.8h }, [x23]\n"
+ "str q29, [x23, x6]\n"
+ "str q30, [x23, x22]\n"
+ "str q31, [x23, x21]\n"
+ "add x23, x23, #0x10\n"
"blt 2b\n"
"3:" // Tile loop: Channel tail
"mov v21.16b, v13.16b\n fmla v21.8h, v4.8h, v9.8h\n"
@@ -417,107 +417,107 @@ void a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"mov v18.16b, v13.16b\n fmla v18.8h, v6.8h, v9.8h\n"
"mov v20.16b, v13.16b\n fmla v20.8h, v5.8h, v9.8h\n"
"mov v24.16b, v13.16b\n fmla v24.8h, v2.8h, v9.8h\n"
- "ldr q9, [x12, x17]\n"
+ "ldr q9, [x11, x16]\n"
"fmla v16.8h, v0.8h, v10.8h\n"
- "ld1 { v10.8h }, [x26]\n"
"mov v19.16b, v13.16b\n fmla v19.8h, v2.8h, v11.8h\n"
- "ldr q11, [x26, x25]\n"
+ "ld1 { v10.8h }, [x25]\n"
+ "ldr q11, [x25, x24]\n"
"fmla v22.8h, v4.8h, v12.8h\n"
"fmla v25.8h, v2.8h, v12.8h\n"
"fmla v26.8h, v1.8h, v12.8h\n"
"mov v28.16b, v13.16b\n fmla v28.8h, v6.8h, v10.8h\n"
- "ldr q10, [x12, x11]\n"
+ "ldr q10, [x11, x10]\n"
"fmla v21.8h, v7.8h, v9.8h\n"
"fmla v17.8h, v8.8h, v12.8h\n"
"fmla v18.8h, v7.8h, v12.8h\n"
"fmla v19.8h, v6.8h, v12.8h\n"
"mov v23.16b, v13.16b\n fmla v23.8h, v3.8h, v12.8h\n"
"mov v27.16b, v13.16b\n fmla v27.8h, v0.8h, v12.8h\n"
- "ldr q12, [x7, x4]\n"
+ "ldr q12, [x8, x5]\n"
"mov v31.16b, v13.16b\n fmla v31.8h, v8.8h, v11.8h\n"
- "ldr q11, [x7, x28]\n"
"fmla v22.8h, v6.8h, v9.8h\n"
+ "ldr q11, [x8, x27]\n"
"fmla v25.8h, v4.8h, v9.8h\n"
"fmla v26.8h, v3.8h, v9.8h\n"
+ "mov v29.16b, v13.16b\n fmla v29.8h, v1.8h, v9.8h\n"
+ "mov v30.16b, v13.16b\n fmla v30.8h, v0.8h, v9.8h\n"
"fmla v20.8h, v8.8h, v9.8h\n"
"fmla v24.8h, v5.8h, v9.8h\n"
"fmla v28.8h, v2.8h, v9.8h\n"
"fmla v21.8h, v8.8h, v10.8h\n"
+ "ld1 { v9.8h }, [x14]\n"
"fmla v16.8h, v1.8h, v12.8h\n"
"fmla v17.8h, v0.8h, v12.8h\n"
- "ldr q12, [x15, x25]\n"
+ "ldr q12, [x14, x24]\n"
"fmla v18.8h, v2.8h, v11.8h\n"
"fmla v19.8h, v1.8h, v11.8h\n"
- "ld1 { v11.8h }, [x9]\n"
+ "ld1 { v11.8h }, [x28]\n"
"fmla v22.8h, v7.8h, v10.8h\n"
"fmla v23.8h, v6.8h, v10.8h\n"
"fmla v25.8h, v5.8h, v10.8h\n"
"fmla v26.8h, v4.8h, v10.8h\n"
"fmla v27.8h, v3.8h, v10.8h\n"
- "fmla v31.8h, v0.8h, v10.8h\n"
- "fmla v24.8h, v6.8h, v11.8h\n"
- "fmla v28.8h, v3.8h, v11.8h\n"
- "ldr q11, [x9, x25]\n"
- "fmla v19.8h, v5.8h, v12.8h\n"
- "fmla v23.8h, v2.8h, v12.8h\n"
- "ldr q12, [x15, x11]\n"
- "fmla v27.8h, v8.8h, v11.8h\n"
- "fmla v31.8h, v5.8h, v11.8h\n"
- "mov v29.16b, v13.16b\n fmla v29.8h, v1.8h, v9.8h\n"
- "mov v30.16b, v13.16b\n fmla v30.8h, v0.8h, v9.8h\n"
- "ld1 { v9.8h }, [x15]\n"
"fmla v29.8h, v2.8h, v10.8h\n"
"fmla v30.8h, v1.8h, v10.8h\n"
- "ldr q10, [x15, x17]\n"
+ "fmla v31.8h, v0.8h, v10.8h\n"
+ "ldr q10, [x14, x16]\n"
"fmla v20.8h, v0.8h, v9.8h\n"
+ "fmla v24.8h, v6.8h, v11.8h\n"
+ "fmla v28.8h, v3.8h, v11.8h\n"
"fmla v21.8h, v1.8h, v10.8h\n"
+ "ldr q11, [x28, x24]\n"
"fmla v16.8h, v3.8h, v9.8h\n"
- "ldr q11, [x26, x4]\n"
+ "fmla v19.8h, v5.8h, v12.8h\n"
+ "fmla v23.8h, v2.8h, v12.8h\n"
"fmla v17.8h, v4.8h, v10.8h\n"
+ "ldr q12, [x14, x10]\n"
"fmla v18.8h, v3.8h, v10.8h\n"
"fmla v22.8h, v0.8h, v10.8h\n"
+ "fmla v27.8h, v8.8h, v11.8h\n"
+ "fmla v31.8h, v5.8h, v11.8h\n"
+ "ldr q11, [x25, x5]\n"
"fmla v20.8h, v2.8h, v10.8h\n"
"fmla v21.8h, v2.8h, v12.8h\n"
"fmla v16.8h, v5.8h, v10.8h\n"
- "ldr q10, [x14, x4]\n"
"fmla v17.8h, v5.8h, v12.8h\n"
+ "ldr q10, [x13, x5]\n"
"fmla v18.8h, v4.8h, v12.8h\n"
"fmla v19.8h, v3.8h, v12.8h\n"
"fmla v22.8h, v1.8h, v12.8h\n"
"fmla v23.8h, v0.8h, v12.8h\n"
- "ldr q12, [x14, x28]\n"
+ "ldr q12, [x13, x27]\n"
"fmla v28.8h, v7.8h, v11.8h\n"
"fmla v29.8h, v6.8h, v11.8h\n"
- "ldr q11, [x26, x28]\n"
+ "ldr q11, [x25, x27]\n"
"fmla v20.8h, v4.8h, v10.8h\n"
"fmla v21.8h, v3.8h, v10.8h\n"
"fmla v24.8h, v1.8h, v10.8h\n"
"fmla v25.8h, v0.8h, v10.8h\n"
"fmla v16.8h, v7.8h, v10.8h\n"
"fmla v17.8h, v6.8h, v10.8h\n"
- "ldr q10, [x7, x17]\n"
+ "ldr q10, [x8, x16]\n"
"fmla v30.8h, v8.8h, v11.8h\n"
"fmla v31.8h, v7.8h, v11.8h\n"
- "ldr q11, [x12, x4]\n"
+ "ldr q11, [x11, x5]\n"
"fmla v18.8h, v8.8h, v12.8h\n"
"fmla v19.8h, v7.8h, v12.8h\n"
"fmla v22.8h, v5.8h, v12.8h\n"
"fmla v23.8h, v4.8h, v12.8h\n"
"fmla v26.8h, v2.8h, v12.8h\n"
"fmla v27.8h, v1.8h, v12.8h\n"
- "ldr q12, [x7, x11]\n"
- "add x7, x7, #0x10\n"
+ "ldr q12, [x8, x10]\n"
+ "add x8, x8, #0x10\n"
"fmla v20.8h, v7.8h, v11.8h\n"
"fmla v21.8h, v6.8h, v11.8h\n"
"fmla v24.8h, v4.8h, v11.8h\n"
"fmla v25.8h, v3.8h, v11.8h\n"
"fmla v28.8h, v1.8h, v11.8h\n"
"fmla v29.8h, v0.8h, v11.8h\n"
- "ldr q11, [x12, x28]\n"
+ "ldr q11, [x11, x27]\n"
"fmla v16.8h, v2.8h, v10.8h\n"
"fmla v17.8h, v1.8h, v10.8h\n"
"fmla v18.8h, v0.8h, v10.8h\n"
- "ld1 { v10.8h }, [x14]\n"
+ "ld1 { v10.8h }, [x13]\n"
"fmla v30.8h, v2.8h, v11.8h\n"
"fmla v19.8h, v0.8h, v12.8h\n"
"fmla v20.8h, v3.8h, v10.8h\n"
@@ -527,24 +527,24 @@ void a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"fmla v26.8h, v5.8h, v11.8h\n"
"fmla v27.8h, v4.8h, v11.8h\n"
"fmla v31.8h, v1.8h, v11.8h\n"
- "ldr q11, [x9, x17]\n"
+ "ldr q11, [x28, x16]\n"
"fmla v17.8h, v2.8h, v12.8h\n"
"fmla v18.8h, v1.8h, v12.8h\n"
- "ldr q12, [x14, x25]\n"
- "add x14, x14, #0x10\n"
+ "ldr q12, [x13, x24]\n"
+ "add x13, x13, #0x10\n"
"fmla v16.8h, v6.8h, v10.8h\n"
- "ld1 { v10.8h }, [x12]\n"
+ "ld1 { v10.8h }, [x11]\n"
"fmla v29.8h, v4.8h, v11.8h\n"
"fmla v30.8h, v3.8h, v11.8h\n"
"fmla v19.8h, v8.8h, v12.8h\n"
"fmla v23.8h, v5.8h, v12.8h\n"
"fmla v27.8h, v2.8h, v12.8h\n"
- "ldr q12, [x12, x25]\n"
- "add x12, x12, #0x10\n"
+ "ldr q12, [x11, x24]\n"
+ "add x11, x11, #0x10\n"
"fmla v20.8h, v6.8h, v10.8h\n"
"fmla v24.8h, v3.8h, v10.8h\n"
"fmla v28.8h, v0.8h, v10.8h\n"
- "ldr q10, [x26, x17]\n"
+ "ldr q10, [x25, x16]\n"
"fmla v31.8h, v2.8h, v12.8h\n"
"fmla v29.8h, v7.8h, v10.8h\n"
"fmla v30.8h, v6.8h, v10.8h\n"
@@ -552,36 +552,36 @@ void a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"fmla v25.8h, v7.8h, v11.8h\n"
"fmla v26.8h, v6.8h, v11.8h\n"
"fmla v28.8h, v5.8h, v11.8h\n"
- "ldr q11, [x9, x11]\n"
+ "ldr q11, [x28, x10]\n"
"fmla v27.8h, v5.8h, v12.8h\n"
"fmla v29.8h, v5.8h, v11.8h\n"
"fmla v30.8h, v4.8h, v11.8h\n"
"fmla v31.8h, v3.8h, v11.8h\n"
"fmla v23.8h, v8.8h, v12.8h\n"
- "ldr q12, [x26, x11]\n"
+ "ldr q12, [x25, x10]\n"
"fmla v28.8h, v8.8h, v10.8h\n"
- "ldr q10, [x15, x4]\n"
+ "ldr q10, [x14, x5]\n"
"fmla v25.8h, v8.8h, v11.8h\n"
"fmla v26.8h, v7.8h, v11.8h\n"
- "add x26, x26, #0x10\n"
+ "add x25, x25, #0x10\n"
"fmla v27.8h, v6.8h, v11.8h\n"
- "ldr q11, [x15, x28]\n"
"fmla v29.8h, v8.8h, v12.8h\n"
- "add x15, x15, #0x10\n"
+ "ldr q11, [x14, x27]\n"
+ "add x14, x14, #0x10\n"
"fmla v30.8h, v7.8h, v12.8h\n"
"fmla v31.8h, v6.8h, v12.8h\n"
- "ldr q12, [x9, x4]\n"
+ "ldr q12, [x28, x5]\n"
"fmla v16.8h, v4.8h, v10.8h\n"
"fmla v17.8h, v3.8h, v10.8h\n"
"fmax v16.8h, v16.8h, v15.8h\n"
"fmla v20.8h, v1.8h, v10.8h\n"
"fmla v21.8h, v0.8h, v10.8h\n"
- "ldr q10, [x9, x28]\n"
+ "ldr q10, [x28, x27]\n"
"fmax v17.8h, v17.8h, v15.8h\n"
"fmla v18.8h, v5.8h, v11.8h\n"
"fmla v19.8h, v4.8h, v11.8h\n"
"fmax v18.8h, v18.8h, v15.8h\n"
- "add x9, x9, #0x10\n"
+ "add x28, x28, #0x10\n"
"fmla v22.8h, v2.8h, v11.8h\n"
"fmla v23.8h, v1.8h, v11.8h\n"
"fmax v19.8h, v19.8h, v15.8h\n"
@@ -607,101 +607,101 @@ void a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"fmax v31.8h, v31.8h, v15.8h\n"
"fmin v16.8h, v16.8h, v14.8h\n"
"fmin v17.8h, v17.8h, v14.8h\n"
- "st1 { v16.8h }, [x8]\n"
+ "st1 { v16.8h }, [x17]\n"
"fmin v18.8h, v18.8h, v14.8h\n"
"fmin v19.8h, v19.8h, v14.8h\n"
- "str q17, [x8, x5]\n"
+ "str q17, [x17, x6]\n"
"fmin v20.8h, v20.8h, v14.8h\n"
"fmin v21.8h, v21.8h, v14.8h\n"
- "str q18, [x8, x23]\n"
+ "str q18, [x17, x22]\n"
"fmin v22.8h, v22.8h, v14.8h\n"
"fmin v23.8h, v23.8h, v14.8h\n"
- "str q19, [x8, x22]\n"
- "add x8, x8, #0x10\n"
+ "str q19, [x17, x21]\n"
+ "add x17, x17, #0x10\n"
"fmin v24.8h, v24.8h, v14.8h\n"
"fmin v25.8h, v25.8h, v14.8h\n"
- "st1 { v20.8h }, [x10]\n"
+ "st1 { v20.8h }, [x9]\n"
"fmin v26.8h, v26.8h, v14.8h\n"
"fmin v27.8h, v27.8h, v14.8h\n"
- "str q21, [x10, x5]\n"
+ "str q21, [x9, x6]\n"
"fmin v28.8h, v28.8h, v14.8h\n"
"fmin v29.8h, v29.8h, v14.8h\n"
- "str q22, [x10, x23]\n"
+ "str q22, [x9, x22]\n"
"fmin v30.8h, v30.8h, v14.8h\n"
"fmin v31.8h, v31.8h, v14.8h\n"
- "str q23, [x10, x22]\n"
- "add x10, x10, #0x10\n"
- "st1 { v24.8h }, [x27]\n"
- "str q25, [x27, x5]\n"
- "str q26, [x27, x23]\n"
- "str q27, [x27, x22]\n"
- "add x27, x27, #0x10\n"
- "st1 { v28.8h }, [x24]\n"
- "str q29, [x24, x5]\n"
- "str q30, [x24, x23]\n"
- "str q31, [x24, x22]\n"
- "add x24, x24, #0x10\n"
+ "str q23, [x9, x21]\n"
+ "add x9, x9, #0x10\n"
+ "st1 { v24.8h }, [x26]\n"
+ "str q25, [x26, x6]\n"
+ "str q26, [x26, x22]\n"
+ "str q27, [x26, x21]\n"
+ "add x26, x26, #0x10\n"
+ "st1 { v28.8h }, [x23]\n"
+ "str q29, [x23, x6]\n"
+ "str q30, [x23, x22]\n"
+ "str q31, [x23, x21]\n"
+ "add x23, x23, #0x10\n"
"4:" // Tile loop: Oddments
"tst %x[n_channels], #0x7\n"
"beq 141f\n"
- "ldr q13, [x16, #0x0]\n"
- "ldr q0, [x16, #0x10]\n"
- "add x23, x14, x17\n"
- "add x22, x7, XZR\n"
- "ldr q1, [x16, #0x20]\n"
- "ldr q2, [x16, #0x30]\n"
- "add x21, x7, x25\n"
- "add x20, x14, x11\n"
- "ldr q3, [x16, #0x40]\n"
- "ldr q4, [x16, #0x50]\n"
- "ldr q5, [x16, #0x60]\n"
- "ldr q6, [x16, #0x70]\n"
- "ldr q7, [x16, #0x80]\n"
- "ldr q8, [x16, #0x90]\n"
+ "ldr q13, [x15, #0x0]\n"
+ "ldr q0, [x15, #0x10]\n"
+ "ldr q1, [x15, #0x20]\n"
+ "ldr q2, [x15, #0x30]\n"
+ "add x22, x13, x16\n"
+ "add x21, x8, XZR\n"
+ "ldr q3, [x15, #0x40]\n"
+ "ldr q4, [x15, #0x50]\n"
+ "add x20, x8, x24\n"
+ "add x19, x13, x10\n"
+ "ldr q5, [x15, #0x60]\n"
+ "ldr q6, [x15, #0x70]\n"
+ "ldr q7, [x15, #0x80]\n"
+ "ldr q8, [x15, #0x90]\n"
"tbz %x[n_channels], #2, 6f\n"
- "ldr d9, [x23], #0x8\n"
- "ldr d10, [x22], #0x8\n"
- "ldr d11, [x21], #0x8\n"
- "ldr d12, [x20], #0x8\n"
+ "ldr d9, [x22], #0x8\n"
+ "ldr d10, [x21], #0x8\n"
+ "ldr d11, [x20], #0x8\n"
+ "ldr d12, [x19], #0x8\n"
"tbz %x[n_channels], #1, 5f\n"
- "ld1 { v9.s }[2], [x23], #0x4\n"
- "ld1 { v10.s }[2], [x22], #0x4\n"
- "ld1 { v11.s }[2], [x21], #0x4\n"
- "ld1 { v12.s }[2], [x20], #0x4\n"
+ "ld1 { v9.s }[2], [x22], #0x4\n"
+ "ld1 { v10.s }[2], [x21], #0x4\n"
+ "ld1 { v11.s }[2], [x20], #0x4\n"
+ "ld1 { v12.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 8f\n"
- "ld1 { v9.h }[6], [x23]\n"
- "ld1 { v10.h }[6], [x22]\n"
- "ld1 { v11.h }[6], [x21]\n"
- "ld1 { v12.h }[6], [x20]\n"
+ "ld1 { v9.h }[6], [x22]\n"
+ "ld1 { v10.h }[6], [x21]\n"
+ "ld1 { v11.h }[6], [x20]\n"
+ "ld1 { v12.h }[6], [x19]\n"
"b 8f\n"
"5:" // Tile loop: Oddments: Load inputs: (2, 2), (0, 0), (0, 5), (2, 3): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 8f\n"
- "ld1 { v9.h }[4], [x23]\n"
- "ld1 { v10.h }[4], [x22]\n"
- "ld1 { v11.h }[4], [x21]\n"
- "ld1 { v12.h }[4], [x20]\n"
+ "ld1 { v9.h }[4], [x22]\n"
+ "ld1 { v10.h }[4], [x21]\n"
+ "ld1 { v11.h }[4], [x20]\n"
+ "ld1 { v12.h }[4], [x19]\n"
"b 8f\n"
"6:" // Tile loop: Oddments: Load inputs: (2, 2), (0, 0), (0, 5), (2, 3): Bit 2: Unset
"tbz %x[n_channels], #1, 7f\n"
- "ldr s9, [x23], #0x4\n"
- "ldr s10, [x22], #0x4\n"
- "ldr s11, [x21], #0x4\n"
- "ldr s12, [x20], #0x4\n"
+ "ldr s9, [x22], #0x4\n"
+ "ldr s10, [x21], #0x4\n"
+ "ldr s11, [x20], #0x4\n"
+ "ldr s12, [x19], #0x4\n"
"tbz %x[n_channels], #0, 8f\n"
- "ld1 { v9.h }[2], [x23]\n"
- "ld1 { v10.h }[2], [x22]\n"
- "ld1 { v11.h }[2], [x21]\n"
- "ld1 { v12.h }[2], [x20]\n"
+ "ld1 { v9.h }[2], [x22]\n"
+ "ld1 { v10.h }[2], [x21]\n"
+ "ld1 { v11.h }[2], [x20]\n"
+ "ld1 { v12.h }[2], [x19]\n"
"b 8f\n"
"7:" // Tile loop: Oddments: Load inputs: (2, 2), (0, 0), (0, 5), (2, 3): Bit 2: Unset: Bit 1: Unset
- "ldr h9, [x23, #0x0]\n"
- "ldr h10, [x22, #0x0]\n"
- "ldr h11, [x21, #0x0]\n"
- "ldr h12, [x20, #0x0]\n"
+ "ldr h9, [x22, #0x0]\n"
+ "ldr h10, [x21, #0x0]\n"
+ "ldr h11, [x20, #0x0]\n"
+ "ldr h12, [x19, #0x0]\n"
"8:" // Tile loop: Oddments: Load inputs: (2, 2), (0, 0), (0, 5), (2, 3): Bit 2: End
"mov v16.16b, v13.16b\n fmla v16.8h, v8.8h, v9.8h\n"
"mov v17.16b, v13.16b\n fmla v17.8h, v7.8h, v9.8h\n"
- "add x20, x26, XZR\n"
+ "add x19, x25, XZR\n"
"mov v18.16b, v13.16b\n fmla v18.8h, v6.8h, v9.8h\n"
"mov v21.16b, v13.16b\n fmla v21.8h, v4.8h, v9.8h\n"
"mov v22.16b, v13.16b\n fmla v22.8h, v3.8h, v9.8h\n"
@@ -721,72 +721,72 @@ void a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"fmla v26.8h, v1.8h, v12.8h\n"
"mov v27.16b, v13.16b\n fmla v27.8h, v0.8h, v12.8h\n"
"tbz %x[n_channels], #2, 10f\n"
- "ldr d10, [x20], #0x8\n"
+ "ldr d10, [x19], #0x8\n"
"tbz %x[n_channels], #1, 9f\n"
- "ld1 { v10.s }[2], [x20], #0x4\n"
+ "ld1 { v10.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 12f\n"
- "ld1 { v10.h }[6], [x20]\n"
+ "ld1 { v10.h }[6], [x19]\n"
"b 12f\n"
"9:" // Tile loop: Oddments: Load inputs: (5, 0): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 12f\n"
- "ld1 { v10.h }[4], [x20]\n"
+ "ld1 { v10.h }[4], [x19]\n"
"b 12f\n"
"10:" // Tile loop: Oddments: Load inputs: (5, 0): Bit 2: Unset
"tbz %x[n_channels], #1, 11f\n"
- "ldr s10, [x20], #0x4\n"
+ "ldr s10, [x19], #0x4\n"
"tbz %x[n_channels], #0, 12f\n"
- "ld1 { v10.h }[2], [x20]\n"
+ "ld1 { v10.h }[2], [x19]\n"
"b 12f\n"
"11:" // Tile loop: Oddments: Load inputs: (5, 0): Bit 2: Unset: Bit 1: Unset
- "ldr h10, [x20, #0x0]\n"
+ "ldr h10, [x19, #0x0]\n"
"12:" // Tile loop: Oddments: Load inputs: (5, 0): Bit 2: End
"mov v28.16b, v13.16b\n fmla v28.8h, v6.8h, v10.8h\n"
- "add x20, x26, x25\n"
+ "add x19, x25, x24\n"
"tbz %x[n_channels], #2, 14f\n"
- "ldr d11, [x20], #0x8\n"
+ "ldr d11, [x19], #0x8\n"
"tbz %x[n_channels], #1, 13f\n"
- "ld1 { v11.s }[2], [x20], #0x4\n"
+ "ld1 { v11.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 16f\n"
- "ld1 { v11.h }[6], [x20]\n"
+ "ld1 { v11.h }[6], [x19]\n"
"b 16f\n"
"13:" // Tile loop: Oddments: Load inputs: (5, 5): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 16f\n"
- "ld1 { v11.h }[4], [x20]\n"
+ "ld1 { v11.h }[4], [x19]\n"
"b 16f\n"
"14:" // Tile loop: Oddments: Load inputs: (5, 5): Bit 2: Unset
"tbz %x[n_channels], #1, 15f\n"
- "ldr s11, [x20], #0x4\n"
+ "ldr s11, [x19], #0x4\n"
"tbz %x[n_channels], #0, 16f\n"
- "ld1 { v11.h }[2], [x20]\n"
+ "ld1 { v11.h }[2], [x19]\n"
"b 16f\n"
"15:" // Tile loop: Oddments: Load inputs: (5, 5): Bit 2: Unset: Bit 1: Unset
- "ldr h11, [x20, #0x0]\n"
+ "ldr h11, [x19, #0x0]\n"
"16:" // Tile loop: Oddments: Load inputs: (5, 5): Bit 2: End
"mov v31.16b, v13.16b\n fmla v31.8h, v8.8h, v11.8h\n"
- "add x20, x12, x17\n"
+ "add x19, x11, x16\n"
"tbz %x[n_channels], #2, 18f\n"
- "ldr d9, [x20], #0x8\n"
+ "ldr d9, [x19], #0x8\n"
"tbz %x[n_channels], #1, 17f\n"
- "ld1 { v9.s }[2], [x20], #0x4\n"
+ "ld1 { v9.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 20f\n"
- "ld1 { v9.h }[6], [x20]\n"
+ "ld1 { v9.h }[6], [x19]\n"
"b 20f\n"
"17:" // Tile loop: Oddments: Load inputs: (3, 2): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 20f\n"
- "ld1 { v9.h }[4], [x20]\n"
+ "ld1 { v9.h }[4], [x19]\n"
"b 20f\n"
"18:" // Tile loop: Oddments: Load inputs: (3, 2): Bit 2: Unset
"tbz %x[n_channels], #1, 19f\n"
- "ldr s9, [x20], #0x4\n"
+ "ldr s9, [x19], #0x4\n"
"tbz %x[n_channels], #0, 20f\n"
- "ld1 { v9.h }[2], [x20]\n"
+ "ld1 { v9.h }[2], [x19]\n"
"b 20f\n"
"19:" // Tile loop: Oddments: Load inputs: (3, 2): Bit 2: Unset: Bit 1: Unset
- "ldr h9, [x20, #0x0]\n"
+ "ldr h9, [x19, #0x0]\n"
"20:" // Tile loop: Oddments: Load inputs: (3, 2): Bit 2: End
"fmla v20.8h, v8.8h, v9.8h\n"
"fmla v21.8h, v7.8h, v9.8h\n"
- "add x20, x7, x4\n"
+ "add x19, x8, x5\n"
"fmla v22.8h, v6.8h, v9.8h\n"
"fmla v24.8h, v5.8h, v9.8h\n"
"fmla v25.8h, v4.8h, v9.8h\n"
@@ -795,74 +795,74 @@ void a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"mov v29.16b, v13.16b\n fmla v29.8h, v1.8h, v9.8h\n"
"mov v30.16b, v13.16b\n fmla v30.8h, v0.8h, v9.8h\n"
"tbz %x[n_channels], #2, 22f\n"
- "ldr d12, [x20], #0x8\n"
+ "ldr d12, [x19], #0x8\n"
"tbz %x[n_channels], #1, 21f\n"
- "ld1 { v12.s }[2], [x20], #0x4\n"
+ "ld1 { v12.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 24f\n"
- "ld1 { v12.h }[6], [x20]\n"
+ "ld1 { v12.h }[6], [x19]\n"
"b 24f\n"
"21:" // Tile loop: Oddments: Load inputs: (0, 1): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 24f\n"
- "ld1 { v12.h }[4], [x20]\n"
+ "ld1 { v12.h }[4], [x19]\n"
"b 24f\n"
"22:" // Tile loop: Oddments: Load inputs: (0, 1): Bit 2: Unset
"tbz %x[n_channels], #1, 23f\n"
- "ldr s12, [x20], #0x4\n"
+ "ldr s12, [x19], #0x4\n"
"tbz %x[n_channels], #0, 24f\n"
- "ld1 { v12.h }[2], [x20]\n"
+ "ld1 { v12.h }[2], [x19]\n"
"b 24f\n"
"23:" // Tile loop: Oddments: Load inputs: (0, 1): Bit 2: Unset: Bit 1: Unset
- "ldr h12, [x20, #0x0]\n"
+ "ldr h12, [x19, #0x0]\n"
"24:" // Tile loop: Oddments: Load inputs: (0, 1): Bit 2: End
"fmla v16.8h, v1.8h, v12.8h\n"
"fmla v17.8h, v0.8h, v12.8h\n"
- "add x20, x7, x28\n"
+ "add x19, x8, x27\n"
"tbz %x[n_channels], #2, 26f\n"
- "ldr d11, [x20], #0x8\n"
+ "ldr d11, [x19], #0x8\n"
"tbz %x[n_channels], #1, 25f\n"
- "ld1 { v11.s }[2], [x20], #0x4\n"
+ "ld1 { v11.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 28f\n"
- "ld1 { v11.h }[6], [x20]\n"
+ "ld1 { v11.h }[6], [x19]\n"
"b 28f\n"
"25:" // Tile loop: Oddments: Load inputs: (0, 4): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 28f\n"
- "ld1 { v11.h }[4], [x20]\n"
+ "ld1 { v11.h }[4], [x19]\n"
"b 28f\n"
"26:" // Tile loop: Oddments: Load inputs: (0, 4): Bit 2: Unset
"tbz %x[n_channels], #1, 27f\n"
- "ldr s11, [x20], #0x4\n"
+ "ldr s11, [x19], #0x4\n"
"tbz %x[n_channels], #0, 28f\n"
- "ld1 { v11.h }[2], [x20]\n"
+ "ld1 { v11.h }[2], [x19]\n"
"b 28f\n"
"27:" // Tile loop: Oddments: Load inputs: (0, 4): Bit 2: Unset: Bit 1: Unset
- "ldr h11, [x20, #0x0]\n"
+ "ldr h11, [x19, #0x0]\n"
"28:" // Tile loop: Oddments: Load inputs: (0, 4): Bit 2: End
"fmla v18.8h, v2.8h, v11.8h\n"
"fmla v19.8h, v1.8h, v11.8h\n"
- "add x20, x12, x11\n"
+ "add x19, x11, x10\n"
"tbz %x[n_channels], #2, 30f\n"
- "ldr d10, [x20], #0x8\n"
+ "ldr d10, [x19], #0x8\n"
"tbz %x[n_channels], #1, 29f\n"
- "ld1 { v10.s }[2], [x20], #0x4\n"
+ "ld1 { v10.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 32f\n"
- "ld1 { v10.h }[6], [x20]\n"
+ "ld1 { v10.h }[6], [x19]\n"
"b 32f\n"
"29:" // Tile loop: Oddments: Load inputs: (3, 3): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 32f\n"
- "ld1 { v10.h }[4], [x20]\n"
+ "ld1 { v10.h }[4], [x19]\n"
"b 32f\n"
"30:" // Tile loop: Oddments: Load inputs: (3, 3): Bit 2: Unset
"tbz %x[n_channels], #1, 31f\n"
- "ldr s10, [x20], #0x4\n"
+ "ldr s10, [x19], #0x4\n"
"tbz %x[n_channels], #0, 32f\n"
- "ld1 { v10.h }[2], [x20]\n"
+ "ld1 { v10.h }[2], [x19]\n"
"b 32f\n"
"31:" // Tile loop: Oddments: Load inputs: (3, 3): Bit 2: Unset: Bit 1: Unset
- "ldr h10, [x20, #0x0]\n"
+ "ldr h10, [x19, #0x0]\n"
"32:" // Tile loop: Oddments: Load inputs: (3, 3): Bit 2: End
"fmla v21.8h, v8.8h, v10.8h\n"
"fmla v22.8h, v7.8h, v10.8h\n"
- "add x20, x15, XZR\n"
+ "add x19, x14, XZR\n"
"fmla v23.8h, v6.8h, v10.8h\n"
"fmla v25.8h, v5.8h, v10.8h\n"
"fmla v26.8h, v4.8h, v10.8h\n"
@@ -871,645 +871,645 @@ void a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"fmla v30.8h, v1.8h, v10.8h\n"
"fmla v31.8h, v0.8h, v10.8h\n"
"tbz %x[n_channels], #2, 34f\n"
- "ldr d9, [x20], #0x8\n"
+ "ldr d9, [x19], #0x8\n"
"tbz %x[n_channels], #1, 33f\n"
- "ld1 { v9.s }[2], [x20], #0x4\n"
+ "ld1 { v9.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 36f\n"
- "ld1 { v9.h }[6], [x20]\n"
+ "ld1 { v9.h }[6], [x19]\n"
"b 36f\n"
"33:" // Tile loop: Oddments: Load inputs: (1, 0): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 36f\n"
- "ld1 { v9.h }[4], [x20]\n"
+ "ld1 { v9.h }[4], [x19]\n"
"b 36f\n"
"34:" // Tile loop: Oddments: Load inputs: (1, 0): Bit 2: Unset
"tbz %x[n_channels], #1, 35f\n"
- "ldr s9, [x20], #0x4\n"
+ "ldr s9, [x19], #0x4\n"
"tbz %x[n_channels], #0, 36f\n"
- "ld1 { v9.h }[2], [x20]\n"
+ "ld1 { v9.h }[2], [x19]\n"
"b 36f\n"
"35:" // Tile loop: Oddments: Load inputs: (1, 0): Bit 2: Unset: Bit 1: Unset
- "ldr h9, [x20, #0x0]\n"
+ "ldr h9, [x19, #0x0]\n"
"36:" // Tile loop: Oddments: Load inputs: (1, 0): Bit 2: End
"fmla v16.8h, v3.8h, v9.8h\n"
"fmla v20.8h, v0.8h, v9.8h\n"
- "add x20, x15, x25\n"
+ "add x19, x14, x24\n"
"tbz %x[n_channels], #2, 38f\n"
- "ldr d12, [x20], #0x8\n"
+ "ldr d12, [x19], #0x8\n"
"tbz %x[n_channels], #1, 37f\n"
- "ld1 { v12.s }[2], [x20], #0x4\n"
+ "ld1 { v12.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 40f\n"
- "ld1 { v12.h }[6], [x20]\n"
+ "ld1 { v12.h }[6], [x19]\n"
"b 40f\n"
"37:" // Tile loop: Oddments: Load inputs: (1, 5): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 40f\n"
- "ld1 { v12.h }[4], [x20]\n"
+ "ld1 { v12.h }[4], [x19]\n"
"b 40f\n"
"38:" // Tile loop: Oddments: Load inputs: (1, 5): Bit 2: Unset
"tbz %x[n_channels], #1, 39f\n"
- "ldr s12, [x20], #0x4\n"
+ "ldr s12, [x19], #0x4\n"
"tbz %x[n_channels], #0, 40f\n"
- "ld1 { v12.h }[2], [x20]\n"
+ "ld1 { v12.h }[2], [x19]\n"
"b 40f\n"
"39:" // Tile loop: Oddments: Load inputs: (1, 5): Bit 2: Unset: Bit 1: Unset
- "ldr h12, [x20, #0x0]\n"
+ "ldr h12, [x19, #0x0]\n"
"40:" // Tile loop: Oddments: Load inputs: (1, 5): Bit 2: End
"fmla v19.8h, v5.8h, v12.8h\n"
"fmla v23.8h, v2.8h, v12.8h\n"
- "add x20, x9, XZR\n"
+ "add x19, x28, XZR\n"
"tbz %x[n_channels], #2, 42f\n"
- "ldr d11, [x20], #0x8\n"
+ "ldr d11, [x19], #0x8\n"
"tbz %x[n_channels], #1, 41f\n"
- "ld1 { v11.s }[2], [x20], #0x4\n"
+ "ld1 { v11.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 44f\n"
- "ld1 { v11.h }[6], [x20]\n"
+ "ld1 { v11.h }[6], [x19]\n"
"b 44f\n"
"41:" // Tile loop: Oddments: Load inputs: (4, 0): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 44f\n"
- "ld1 { v11.h }[4], [x20]\n"
+ "ld1 { v11.h }[4], [x19]\n"
"b 44f\n"
"42:" // Tile loop: Oddments: Load inputs: (4, 0): Bit 2: Unset
"tbz %x[n_channels], #1, 43f\n"
- "ldr s11, [x20], #0x4\n"
+ "ldr s11, [x19], #0x4\n"
"tbz %x[n_channels], #0, 44f\n"
- "ld1 { v11.h }[2], [x20]\n"
+ "ld1 { v11.h }[2], [x19]\n"
"b 44f\n"
"43:" // Tile loop: Oddments: Load inputs: (4, 0): Bit 2: Unset: Bit 1: Unset
- "ldr h11, [x20, #0x0]\n"
+ "ldr h11, [x19, #0x0]\n"
"44:" // Tile loop: Oddments: Load inputs: (4, 0): Bit 2: End
"fmla v24.8h, v6.8h, v11.8h\n"
"fmla v28.8h, v3.8h, v11.8h\n"
- "add x20, x15, x17\n"
+ "add x19, x14, x16\n"
"tbz %x[n_channels], #2, 46f\n"
- "ldr d10, [x20], #0x8\n"
+ "ldr d10, [x19], #0x8\n"
"tbz %x[n_channels], #1, 45f\n"
- "ld1 { v10.s }[2], [x20], #0x4\n"
+ "ld1 { v10.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 48f\n"
- "ld1 { v10.h }[6], [x20]\n"
+ "ld1 { v10.h }[6], [x19]\n"
"b 48f\n"
"45:" // Tile loop: Oddments: Load inputs: (1, 2): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 48f\n"
- "ld1 { v10.h }[4], [x20]\n"
+ "ld1 { v10.h }[4], [x19]\n"
"b 48f\n"
"46:" // Tile loop: Oddments: Load inputs: (1, 2): Bit 2: Unset
"tbz %x[n_channels], #1, 47f\n"
- "ldr s10, [x20], #0x4\n"
+ "ldr s10, [x19], #0x4\n"
"tbz %x[n_channels], #0, 48f\n"
- "ld1 { v10.h }[2], [x20]\n"
+ "ld1 { v10.h }[2], [x19]\n"
"b 48f\n"
"47:" // Tile loop: Oddments: Load inputs: (1, 2): Bit 2: Unset: Bit 1: Unset
- "ldr h10, [x20, #0x0]\n"
+ "ldr h10, [x19, #0x0]\n"
"48:" // Tile loop: Oddments: Load inputs: (1, 2): Bit 2: End
"fmla v16.8h, v5.8h, v10.8h\n"
"fmla v17.8h, v4.8h, v10.8h\n"
- "add x20, x9, x25\n"
+ "add x19, x28, x24\n"
"fmla v18.8h, v3.8h, v10.8h\n"
"fmla v20.8h, v2.8h, v10.8h\n"
"fmla v21.8h, v1.8h, v10.8h\n"
"fmla v22.8h, v0.8h, v10.8h\n"
"tbz %x[n_channels], #2, 50f\n"
- "ldr d11, [x20], #0x8\n"
+ "ldr d11, [x19], #0x8\n"
"tbz %x[n_channels], #1, 49f\n"
- "ld1 { v11.s }[2], [x20], #0x4\n"
+ "ld1 { v11.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 52f\n"
- "ld1 { v11.h }[6], [x20]\n"
+ "ld1 { v11.h }[6], [x19]\n"
"b 52f\n"
"49:" // Tile loop: Oddments: Load inputs: (4, 5): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 52f\n"
- "ld1 { v11.h }[4], [x20]\n"
+ "ld1 { v11.h }[4], [x19]\n"
"b 52f\n"
"50:" // Tile loop: Oddments: Load inputs: (4, 5): Bit 2: Unset
"tbz %x[n_channels], #1, 51f\n"
- "ldr s11, [x20], #0x4\n"
+ "ldr s11, [x19], #0x4\n"
"tbz %x[n_channels], #0, 52f\n"
- "ld1 { v11.h }[2], [x20]\n"
+ "ld1 { v11.h }[2], [x19]\n"
"b 52f\n"
"51:" // Tile loop: Oddments: Load inputs: (4, 5): Bit 2: Unset: Bit 1: Unset
- "ldr h11, [x20, #0x0]\n"
+ "ldr h11, [x19, #0x0]\n"
"52:" // Tile loop: Oddments: Load inputs: (4, 5): Bit 2: End
"fmla v27.8h, v8.8h, v11.8h\n"
"fmla v31.8h, v5.8h, v11.8h\n"
- "add x20, x15, x11\n"
+ "add x19, x14, x10\n"
"tbz %x[n_channels], #2, 54f\n"
- "ldr d12, [x20], #0x8\n"
+ "ldr d12, [x19], #0x8\n"
"tbz %x[n_channels], #1, 53f\n"
- "ld1 { v12.s }[2], [x20], #0x4\n"
+ "ld1 { v12.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 56f\n"
- "ld1 { v12.h }[6], [x20]\n"
+ "ld1 { v12.h }[6], [x19]\n"
"b 56f\n"
"53:" // Tile loop: Oddments: Load inputs: (1, 3): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 56f\n"
- "ld1 { v12.h }[4], [x20]\n"
+ "ld1 { v12.h }[4], [x19]\n"
"b 56f\n"
"54:" // Tile loop: Oddments: Load inputs: (1, 3): Bit 2: Unset
"tbz %x[n_channels], #1, 55f\n"
- "ldr s12, [x20], #0x4\n"
+ "ldr s12, [x19], #0x4\n"
"tbz %x[n_channels], #0, 56f\n"
- "ld1 { v12.h }[2], [x20]\n"
+ "ld1 { v12.h }[2], [x19]\n"
"b 56f\n"
"55:" // Tile loop: Oddments: Load inputs: (1, 3): Bit 2: Unset: Bit 1: Unset
- "ldr h12, [x20, #0x0]\n"
+ "ldr h12, [x19, #0x0]\n"
"56:" // Tile loop: Oddments: Load inputs: (1, 3): Bit 2: End
"fmla v17.8h, v5.8h, v12.8h\n"
"fmla v18.8h, v4.8h, v12.8h\n"
- "add x20, x26, x4\n"
+ "add x19, x25, x5\n"
"fmla v19.8h, v3.8h, v12.8h\n"
"fmla v21.8h, v2.8h, v12.8h\n"
"fmla v22.8h, v1.8h, v12.8h\n"
"fmla v23.8h, v0.8h, v12.8h\n"
"tbz %x[n_channels], #2, 58f\n"
- "ldr d11, [x20], #0x8\n"
+ "ldr d11, [x19], #0x8\n"
"tbz %x[n_channels], #1, 57f\n"
- "ld1 { v11.s }[2], [x20], #0x4\n"
+ "ld1 { v11.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 60f\n"
- "ld1 { v11.h }[6], [x20]\n"
+ "ld1 { v11.h }[6], [x19]\n"
"b 60f\n"
"57:" // Tile loop: Oddments: Load inputs: (5, 1): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 60f\n"
- "ld1 { v11.h }[4], [x20]\n"
+ "ld1 { v11.h }[4], [x19]\n"
"b 60f\n"
"58:" // Tile loop: Oddments: Load inputs: (5, 1): Bit 2: Unset
"tbz %x[n_channels], #1, 59f\n"
- "ldr s11, [x20], #0x4\n"
+ "ldr s11, [x19], #0x4\n"
"tbz %x[n_channels], #0, 60f\n"
- "ld1 { v11.h }[2], [x20]\n"
+ "ld1 { v11.h }[2], [x19]\n"
"b 60f\n"
"59:" // Tile loop: Oddments: Load inputs: (5, 1): Bit 2: Unset: Bit 1: Unset
- "ldr h11, [x20, #0x0]\n"
+ "ldr h11, [x19, #0x0]\n"
"60:" // Tile loop: Oddments: Load inputs: (5, 1): Bit 2: End
"fmla v28.8h, v7.8h, v11.8h\n"
"fmla v29.8h, v6.8h, v11.8h\n"
- "add x20, x14, x4\n"
+ "add x19, x13, x5\n"
"tbz %x[n_channels], #2, 62f\n"
- "ldr d10, [x20], #0x8\n"
+ "ldr d10, [x19], #0x8\n"
"tbz %x[n_channels], #1, 61f\n"
- "ld1 { v10.s }[2], [x20], #0x4\n"
+ "ld1 { v10.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 64f\n"
- "ld1 { v10.h }[6], [x20]\n"
+ "ld1 { v10.h }[6], [x19]\n"
"b 64f\n"
"61:" // Tile loop: Oddments: Load inputs: (2, 1): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 64f\n"
- "ld1 { v10.h }[4], [x20]\n"
+ "ld1 { v10.h }[4], [x19]\n"
"b 64f\n"
"62:" // Tile loop: Oddments: Load inputs: (2, 1): Bit 2: Unset
"tbz %x[n_channels], #1, 63f\n"
- "ldr s10, [x20], #0x4\n"
+ "ldr s10, [x19], #0x4\n"
"tbz %x[n_channels], #0, 64f\n"
- "ld1 { v10.h }[2], [x20]\n"
+ "ld1 { v10.h }[2], [x19]\n"
"b 64f\n"
"63:" // Tile loop: Oddments: Load inputs: (2, 1): Bit 2: Unset: Bit 1: Unset
- "ldr h10, [x20, #0x0]\n"
+ "ldr h10, [x19, #0x0]\n"
"64:" // Tile loop: Oddments: Load inputs: (2, 1): Bit 2: End
"fmla v16.8h, v7.8h, v10.8h\n"
"fmla v17.8h, v6.8h, v10.8h\n"
- "add x20, x26, x28\n"
+ "add x19, x25, x27\n"
"fmla v20.8h, v4.8h, v10.8h\n"
"fmla v21.8h, v3.8h, v10.8h\n"
"fmla v24.8h, v1.8h, v10.8h\n"
"fmla v25.8h, v0.8h, v10.8h\n"
"tbz %x[n_channels], #2, 66f\n"
- "ldr d11, [x20], #0x8\n"
+ "ldr d11, [x19], #0x8\n"
"tbz %x[n_channels], #1, 65f\n"
- "ld1 { v11.s }[2], [x20], #0x4\n"
+ "ld1 { v11.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 68f\n"
- "ld1 { v11.h }[6], [x20]\n"
+ "ld1 { v11.h }[6], [x19]\n"
"b 68f\n"
"65:" // Tile loop: Oddments: Load inputs: (5, 4): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 68f\n"
- "ld1 { v11.h }[4], [x20]\n"
+ "ld1 { v11.h }[4], [x19]\n"
"b 68f\n"
"66:" // Tile loop: Oddments: Load inputs: (5, 4): Bit 2: Unset
"tbz %x[n_channels], #1, 67f\n"
- "ldr s11, [x20], #0x4\n"
+ "ldr s11, [x19], #0x4\n"
"tbz %x[n_channels], #0, 68f\n"
- "ld1 { v11.h }[2], [x20]\n"
+ "ld1 { v11.h }[2], [x19]\n"
"b 68f\n"
"67:" // Tile loop: Oddments: Load inputs: (5, 4): Bit 2: Unset: Bit 1: Unset
- "ldr h11, [x20, #0x0]\n"
+ "ldr h11, [x19, #0x0]\n"
"68:" // Tile loop: Oddments: Load inputs: (5, 4): Bit 2: End
"fmla v30.8h, v8.8h, v11.8h\n"
"fmla v31.8h, v7.8h, v11.8h\n"
- "add x20, x14, x28\n"
+ "add x19, x13, x27\n"
"tbz %x[n_channels], #2, 70f\n"
- "ldr d12, [x20], #0x8\n"
+ "ldr d12, [x19], #0x8\n"
"tbz %x[n_channels], #1, 69f\n"
- "ld1 { v12.s }[2], [x20], #0x4\n"
+ "ld1 { v12.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 72f\n"
- "ld1 { v12.h }[6], [x20]\n"
+ "ld1 { v12.h }[6], [x19]\n"
"b 72f\n"
"69:" // Tile loop: Oddments: Load inputs: (2, 4): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 72f\n"
- "ld1 { v12.h }[4], [x20]\n"
+ "ld1 { v12.h }[4], [x19]\n"
"b 72f\n"
"70:" // Tile loop: Oddments: Load inputs: (2, 4): Bit 2: Unset
"tbz %x[n_channels], #1, 71f\n"
- "ldr s12, [x20], #0x4\n"
+ "ldr s12, [x19], #0x4\n"
"tbz %x[n_channels], #0, 72f\n"
- "ld1 { v12.h }[2], [x20]\n"
+ "ld1 { v12.h }[2], [x19]\n"
"b 72f\n"
"71:" // Tile loop: Oddments: Load inputs: (2, 4): Bit 2: Unset: Bit 1: Unset
- "ldr h12, [x20, #0x0]\n"
+ "ldr h12, [x19, #0x0]\n"
"72:" // Tile loop: Oddments: Load inputs: (2, 4): Bit 2: End
"fmla v18.8h, v8.8h, v12.8h\n"
"fmla v19.8h, v7.8h, v12.8h\n"
- "add x20, x7, x17\n"
+ "add x19, x8, x16\n"
"fmla v22.8h, v5.8h, v12.8h\n"
"fmla v23.8h, v4.8h, v12.8h\n"
"fmla v26.8h, v2.8h, v12.8h\n"
"fmla v27.8h, v1.8h, v12.8h\n"
"tbz %x[n_channels], #2, 74f\n"
- "ldr d10, [x20], #0x8\n"
+ "ldr d10, [x19], #0x8\n"
"tbz %x[n_channels], #1, 73f\n"
- "ld1 { v10.s }[2], [x20], #0x4\n"
+ "ld1 { v10.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 76f\n"
- "ld1 { v10.h }[6], [x20]\n"
+ "ld1 { v10.h }[6], [x19]\n"
"b 76f\n"
"73:" // Tile loop: Oddments: Load inputs: (0, 2): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 76f\n"
- "ld1 { v10.h }[4], [x20]\n"
+ "ld1 { v10.h }[4], [x19]\n"
"b 76f\n"
"74:" // Tile loop: Oddments: Load inputs: (0, 2): Bit 2: Unset
"tbz %x[n_channels], #1, 75f\n"
- "ldr s10, [x20], #0x4\n"
+ "ldr s10, [x19], #0x4\n"
"tbz %x[n_channels], #0, 76f\n"
- "ld1 { v10.h }[2], [x20]\n"
+ "ld1 { v10.h }[2], [x19]\n"
"b 76f\n"
"75:" // Tile loop: Oddments: Load inputs: (0, 2): Bit 2: Unset: Bit 1: Unset
- "ldr h10, [x20, #0x0]\n"
+ "ldr h10, [x19, #0x0]\n"
"76:" // Tile loop: Oddments: Load inputs: (0, 2): Bit 2: End
"fmla v16.8h, v2.8h, v10.8h\n"
"fmla v17.8h, v1.8h, v10.8h\n"
- "add x20, x12, x4\n"
+ "add x19, x11, x5\n"
"fmla v18.8h, v0.8h, v10.8h\n"
"tbz %x[n_channels], #2, 78f\n"
- "ldr d11, [x20], #0x8\n"
+ "ldr d11, [x19], #0x8\n"
"tbz %x[n_channels], #1, 77f\n"
- "ld1 { v11.s }[2], [x20], #0x4\n"
+ "ld1 { v11.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 80f\n"
- "ld1 { v11.h }[6], [x20]\n"
+ "ld1 { v11.h }[6], [x19]\n"
"b 80f\n"
"77:" // Tile loop: Oddments: Load inputs: (3, 1): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 80f\n"
- "ld1 { v11.h }[4], [x20]\n"
+ "ld1 { v11.h }[4], [x19]\n"
"b 80f\n"
"78:" // Tile loop: Oddments: Load inputs: (3, 1): Bit 2: Unset
"tbz %x[n_channels], #1, 79f\n"
- "ldr s11, [x20], #0x4\n"
+ "ldr s11, [x19], #0x4\n"
"tbz %x[n_channels], #0, 80f\n"
- "ld1 { v11.h }[2], [x20]\n"
+ "ld1 { v11.h }[2], [x19]\n"
"b 80f\n"
"79:" // Tile loop: Oddments: Load inputs: (3, 1): Bit 2: Unset: Bit 1: Unset
- "ldr h11, [x20, #0x0]\n"
+ "ldr h11, [x19, #0x0]\n"
"80:" // Tile loop: Oddments: Load inputs: (3, 1): Bit 2: End
"fmla v20.8h, v7.8h, v11.8h\n"
"fmla v21.8h, v6.8h, v11.8h\n"
- "add x20, x7, x11\n"
+ "add x19, x8, x10\n"
"fmla v24.8h, v4.8h, v11.8h\n"
"fmla v25.8h, v3.8h, v11.8h\n"
"fmla v28.8h, v1.8h, v11.8h\n"
"fmla v29.8h, v0.8h, v11.8h\n"
"tbz %x[n_channels], #2, 82f\n"
- "ldr d12, [x20], #0x8\n"
+ "ldr d12, [x19], #0x8\n"
"tbz %x[n_channels], #1, 81f\n"
- "ld1 { v12.s }[2], [x20], #0x4\n"
+ "ld1 { v12.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 84f\n"
- "ld1 { v12.h }[6], [x20]\n"
+ "ld1 { v12.h }[6], [x19]\n"
"b 84f\n"
"81:" // Tile loop: Oddments: Load inputs: (0, 3): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 84f\n"
- "ld1 { v12.h }[4], [x20]\n"
+ "ld1 { v12.h }[4], [x19]\n"
"b 84f\n"
"82:" // Tile loop: Oddments: Load inputs: (0, 3): Bit 2: Unset
"tbz %x[n_channels], #1, 83f\n"
- "ldr s12, [x20], #0x4\n"
+ "ldr s12, [x19], #0x4\n"
"tbz %x[n_channels], #0, 84f\n"
- "ld1 { v12.h }[2], [x20]\n"
+ "ld1 { v12.h }[2], [x19]\n"
"b 84f\n"
"83:" // Tile loop: Oddments: Load inputs: (0, 3): Bit 2: Unset: Bit 1: Unset
- "ldr h12, [x20, #0x0]\n"
+ "ldr h12, [x19, #0x0]\n"
"84:" // Tile loop: Oddments: Load inputs: (0, 3): Bit 2: End
"fmla v17.8h, v2.8h, v12.8h\n"
"fmla v18.8h, v1.8h, v12.8h\n"
- "add x20, x14, XZR\n"
+ "add x19, x13, XZR\n"
"fmla v19.8h, v0.8h, v12.8h\n"
"tbz %x[n_channels], #2, 86f\n"
- "ldr d10, [x20], #0x8\n"
+ "ldr d10, [x19], #0x8\n"
"tbz %x[n_channels], #1, 85f\n"
- "ld1 { v10.s }[2], [x20], #0x4\n"
+ "ld1 { v10.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 88f\n"
- "ld1 { v10.h }[6], [x20]\n"
+ "ld1 { v10.h }[6], [x19]\n"
"b 88f\n"
"85:" // Tile loop: Oddments: Load inputs: (2, 0): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 88f\n"
- "ld1 { v10.h }[4], [x20]\n"
+ "ld1 { v10.h }[4], [x19]\n"
"b 88f\n"
"86:" // Tile loop: Oddments: Load inputs: (2, 0): Bit 2: Unset
"tbz %x[n_channels], #1, 87f\n"
- "ldr s10, [x20], #0x4\n"
+ "ldr s10, [x19], #0x4\n"
"tbz %x[n_channels], #0, 88f\n"
- "ld1 { v10.h }[2], [x20]\n"
+ "ld1 { v10.h }[2], [x19]\n"
"b 88f\n"
"87:" // Tile loop: Oddments: Load inputs: (2, 0): Bit 2: Unset: Bit 1: Unset
- "ldr h10, [x20, #0x0]\n"
+ "ldr h10, [x19, #0x0]\n"
"88:" // Tile loop: Oddments: Load inputs: (2, 0): Bit 2: End
"fmla v16.8h, v6.8h, v10.8h\n"
"fmla v20.8h, v3.8h, v10.8h\n"
- "add x20, x12, x28\n"
+ "add x19, x11, x27\n"
"fmla v24.8h, v0.8h, v10.8h\n"
"tbz %x[n_channels], #2, 90f\n"
- "ldr d11, [x20], #0x8\n"
+ "ldr d11, [x19], #0x8\n"
"tbz %x[n_channels], #1, 89f\n"
- "ld1 { v11.s }[2], [x20], #0x4\n"
+ "ld1 { v11.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 92f\n"
- "ld1 { v11.h }[6], [x20]\n"
+ "ld1 { v11.h }[6], [x19]\n"
"b 92f\n"
"89:" // Tile loop: Oddments: Load inputs: (3, 4): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 92f\n"
- "ld1 { v11.h }[4], [x20]\n"
+ "ld1 { v11.h }[4], [x19]\n"
"b 92f\n"
"90:" // Tile loop: Oddments: Load inputs: (3, 4): Bit 2: Unset
"tbz %x[n_channels], #1, 91f\n"
- "ldr s11, [x20], #0x4\n"
+ "ldr s11, [x19], #0x4\n"
"tbz %x[n_channels], #0, 92f\n"
- "ld1 { v11.h }[2], [x20]\n"
+ "ld1 { v11.h }[2], [x19]\n"
"b 92f\n"
"91:" // Tile loop: Oddments: Load inputs: (3, 4): Bit 2: Unset: Bit 1: Unset
- "ldr h11, [x20, #0x0]\n"
+ "ldr h11, [x19, #0x0]\n"
"92:" // Tile loop: Oddments: Load inputs: (3, 4): Bit 2: End
"fmla v22.8h, v8.8h, v11.8h\n"
"fmla v23.8h, v7.8h, v11.8h\n"
- "add x20, x14, x25\n"
+ "add x19, x13, x24\n"
"fmla v26.8h, v5.8h, v11.8h\n"
"fmla v27.8h, v4.8h, v11.8h\n"
"fmla v30.8h, v2.8h, v11.8h\n"
"fmla v31.8h, v1.8h, v11.8h\n"
"tbz %x[n_channels], #2, 94f\n"
- "ldr d12, [x20], #0x8\n"
+ "ldr d12, [x19], #0x8\n"
"tbz %x[n_channels], #1, 93f\n"
- "ld1 { v12.s }[2], [x20], #0x4\n"
+ "ld1 { v12.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 96f\n"
- "ld1 { v12.h }[6], [x20]\n"
+ "ld1 { v12.h }[6], [x19]\n"
"b 96f\n"
"93:" // Tile loop: Oddments: Load inputs: (2, 5): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 96f\n"
- "ld1 { v12.h }[4], [x20]\n"
+ "ld1 { v12.h }[4], [x19]\n"
"b 96f\n"
"94:" // Tile loop: Oddments: Load inputs: (2, 5): Bit 2: Unset
"tbz %x[n_channels], #1, 95f\n"
- "ldr s12, [x20], #0x4\n"
+ "ldr s12, [x19], #0x4\n"
"tbz %x[n_channels], #0, 96f\n"
- "ld1 { v12.h }[2], [x20]\n"
+ "ld1 { v12.h }[2], [x19]\n"
"b 96f\n"
"95:" // Tile loop: Oddments: Load inputs: (2, 5): Bit 2: Unset: Bit 1: Unset
- "ldr h12, [x20, #0x0]\n"
+ "ldr h12, [x19, #0x0]\n"
"96:" // Tile loop: Oddments: Load inputs: (2, 5): Bit 2: End
"fmla v19.8h, v8.8h, v12.8h\n"
"fmla v23.8h, v5.8h, v12.8h\n"
- "add x20, x12, XZR\n"
+ "add x19, x11, XZR\n"
"fmla v27.8h, v2.8h, v12.8h\n"
"tbz %x[n_channels], #2, 98f\n"
- "ldr d10, [x20], #0x8\n"
+ "ldr d10, [x19], #0x8\n"
"tbz %x[n_channels], #1, 97f\n"
- "ld1 { v10.s }[2], [x20], #0x4\n"
+ "ld1 { v10.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 100f\n"
- "ld1 { v10.h }[6], [x20]\n"
+ "ld1 { v10.h }[6], [x19]\n"
"b 100f\n"
"97:" // Tile loop: Oddments: Load inputs: (3, 0): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 100f\n"
- "ld1 { v10.h }[4], [x20]\n"
+ "ld1 { v10.h }[4], [x19]\n"
"b 100f\n"
"98:" // Tile loop: Oddments: Load inputs: (3, 0): Bit 2: Unset
"tbz %x[n_channels], #1, 99f\n"
- "ldr s10, [x20], #0x4\n"
+ "ldr s10, [x19], #0x4\n"
"tbz %x[n_channels], #0, 100f\n"
- "ld1 { v10.h }[2], [x20]\n"
+ "ld1 { v10.h }[2], [x19]\n"
"b 100f\n"
"99:" // Tile loop: Oddments: Load inputs: (3, 0): Bit 2: Unset: Bit 1: Unset
- "ldr h10, [x20, #0x0]\n"
+ "ldr h10, [x19, #0x0]\n"
"100:" // Tile loop: Oddments: Load inputs: (3, 0): Bit 2: End
"fmla v20.8h, v6.8h, v10.8h\n"
"fmla v24.8h, v3.8h, v10.8h\n"
- "add x20, x9, x17\n"
+ "add x19, x28, x16\n"
"fmla v28.8h, v0.8h, v10.8h\n"
"tbz %x[n_channels], #2, 102f\n"
- "ldr d11, [x20], #0x8\n"
+ "ldr d11, [x19], #0x8\n"
"tbz %x[n_channels], #1, 101f\n"
- "ld1 { v11.s }[2], [x20], #0x4\n"
+ "ld1 { v11.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 104f\n"
- "ld1 { v11.h }[6], [x20]\n"
+ "ld1 { v11.h }[6], [x19]\n"
"b 104f\n"
"101:" // Tile loop: Oddments: Load inputs: (4, 2): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 104f\n"
- "ld1 { v11.h }[4], [x20]\n"
+ "ld1 { v11.h }[4], [x19]\n"
"b 104f\n"
"102:" // Tile loop: Oddments: Load inputs: (4, 2): Bit 2: Unset
"tbz %x[n_channels], #1, 103f\n"
- "ldr s11, [x20], #0x4\n"
+ "ldr s11, [x19], #0x4\n"
"tbz %x[n_channels], #0, 104f\n"
- "ld1 { v11.h }[2], [x20]\n"
+ "ld1 { v11.h }[2], [x19]\n"
"b 104f\n"
"103:" // Tile loop: Oddments: Load inputs: (4, 2): Bit 2: Unset: Bit 1: Unset
- "ldr h11, [x20, #0x0]\n"
+ "ldr h11, [x19, #0x0]\n"
"104:" // Tile loop: Oddments: Load inputs: (4, 2): Bit 2: End
"fmla v24.8h, v8.8h, v11.8h\n"
"fmla v25.8h, v7.8h, v11.8h\n"
- "add x20, x12, x25\n"
+ "add x19, x11, x24\n"
"fmla v26.8h, v6.8h, v11.8h\n"
"fmla v28.8h, v5.8h, v11.8h\n"
"fmla v29.8h, v4.8h, v11.8h\n"
"fmla v30.8h, v3.8h, v11.8h\n"
"tbz %x[n_channels], #2, 106f\n"
- "ldr d12, [x20], #0x8\n"
+ "ldr d12, [x19], #0x8\n"
"tbz %x[n_channels], #1, 105f\n"
- "ld1 { v12.s }[2], [x20], #0x4\n"
+ "ld1 { v12.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 108f\n"
- "ld1 { v12.h }[6], [x20]\n"
+ "ld1 { v12.h }[6], [x19]\n"
"b 108f\n"
"105:" // Tile loop: Oddments: Load inputs: (3, 5): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 108f\n"
- "ld1 { v12.h }[4], [x20]\n"
+ "ld1 { v12.h }[4], [x19]\n"
"b 108f\n"
"106:" // Tile loop: Oddments: Load inputs: (3, 5): Bit 2: Unset
"tbz %x[n_channels], #1, 107f\n"
- "ldr s12, [x20], #0x4\n"
+ "ldr s12, [x19], #0x4\n"
"tbz %x[n_channels], #0, 108f\n"
- "ld1 { v12.h }[2], [x20]\n"
+ "ld1 { v12.h }[2], [x19]\n"
"b 108f\n"
"107:" // Tile loop: Oddments: Load inputs: (3, 5): Bit 2: Unset: Bit 1: Unset
- "ldr h12, [x20, #0x0]\n"
+ "ldr h12, [x19, #0x0]\n"
"108:" // Tile loop: Oddments: Load inputs: (3, 5): Bit 2: End
"fmla v23.8h, v8.8h, v12.8h\n"
"fmla v27.8h, v5.8h, v12.8h\n"
- "add x20, x26, x17\n"
+ "add x19, x25, x16\n"
"fmla v31.8h, v2.8h, v12.8h\n"
"tbz %x[n_channels], #2, 110f\n"
- "ldr d10, [x20], #0x8\n"
+ "ldr d10, [x19], #0x8\n"
"tbz %x[n_channels], #1, 109f\n"
- "ld1 { v10.s }[2], [x20], #0x4\n"
+ "ld1 { v10.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 112f\n"
- "ld1 { v10.h }[6], [x20]\n"
+ "ld1 { v10.h }[6], [x19]\n"
"b 112f\n"
"109:" // Tile loop: Oddments: Load inputs: (5, 2): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 112f\n"
- "ld1 { v10.h }[4], [x20]\n"
+ "ld1 { v10.h }[4], [x19]\n"
"b 112f\n"
"110:" // Tile loop: Oddments: Load inputs: (5, 2): Bit 2: Unset
"tbz %x[n_channels], #1, 111f\n"
- "ldr s10, [x20], #0x4\n"
+ "ldr s10, [x19], #0x4\n"
"tbz %x[n_channels], #0, 112f\n"
- "ld1 { v10.h }[2], [x20]\n"
+ "ld1 { v10.h }[2], [x19]\n"
"b 112f\n"
"111:" // Tile loop: Oddments: Load inputs: (5, 2): Bit 2: Unset: Bit 1: Unset
- "ldr h10, [x20, #0x0]\n"
+ "ldr h10, [x19, #0x0]\n"
"112:" // Tile loop: Oddments: Load inputs: (5, 2): Bit 2: End
"fmla v28.8h, v8.8h, v10.8h\n"
"fmla v29.8h, v7.8h, v10.8h\n"
- "add x20, x9, x11\n"
+ "add x19, x28, x10\n"
"fmla v30.8h, v6.8h, v10.8h\n"
"tbz %x[n_channels], #2, 114f\n"
- "ldr d11, [x20], #0x8\n"
+ "ldr d11, [x19], #0x8\n"
"tbz %x[n_channels], #1, 113f\n"
- "ld1 { v11.s }[2], [x20], #0x4\n"
+ "ld1 { v11.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 116f\n"
- "ld1 { v11.h }[6], [x20]\n"
+ "ld1 { v11.h }[6], [x19]\n"
"b 116f\n"
"113:" // Tile loop: Oddments: Load inputs: (4, 3): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 116f\n"
- "ld1 { v11.h }[4], [x20]\n"
+ "ld1 { v11.h }[4], [x19]\n"
"b 116f\n"
"114:" // Tile loop: Oddments: Load inputs: (4, 3): Bit 2: Unset
"tbz %x[n_channels], #1, 115f\n"
- "ldr s11, [x20], #0x4\n"
+ "ldr s11, [x19], #0x4\n"
"tbz %x[n_channels], #0, 116f\n"
- "ld1 { v11.h }[2], [x20]\n"
+ "ld1 { v11.h }[2], [x19]\n"
"b 116f\n"
"115:" // Tile loop: Oddments: Load inputs: (4, 3): Bit 2: Unset: Bit 1: Unset
- "ldr h11, [x20, #0x0]\n"
+ "ldr h11, [x19, #0x0]\n"
"116:" // Tile loop: Oddments: Load inputs: (4, 3): Bit 2: End
"fmla v25.8h, v8.8h, v11.8h\n"
"fmla v26.8h, v7.8h, v11.8h\n"
- "add x20, x26, x11\n"
+ "add x19, x25, x10\n"
"fmla v27.8h, v6.8h, v11.8h\n"
"fmla v29.8h, v5.8h, v11.8h\n"
"fmla v30.8h, v4.8h, v11.8h\n"
"fmla v31.8h, v3.8h, v11.8h\n"
"tbz %x[n_channels], #2, 118f\n"
- "ldr d12, [x20], #0x8\n"
+ "ldr d12, [x19], #0x8\n"
"tbz %x[n_channels], #1, 117f\n"
- "ld1 { v12.s }[2], [x20], #0x4\n"
+ "ld1 { v12.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 120f\n"
- "ld1 { v12.h }[6], [x20]\n"
+ "ld1 { v12.h }[6], [x19]\n"
"b 120f\n"
"117:" // Tile loop: Oddments: Load inputs: (5, 3): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 120f\n"
- "ld1 { v12.h }[4], [x20]\n"
+ "ld1 { v12.h }[4], [x19]\n"
"b 120f\n"
"118:" // Tile loop: Oddments: Load inputs: (5, 3): Bit 2: Unset
"tbz %x[n_channels], #1, 119f\n"
- "ldr s12, [x20], #0x4\n"
+ "ldr s12, [x19], #0x4\n"
"tbz %x[n_channels], #0, 120f\n"
- "ld1 { v12.h }[2], [x20]\n"
+ "ld1 { v12.h }[2], [x19]\n"
"b 120f\n"
"119:" // Tile loop: Oddments: Load inputs: (5, 3): Bit 2: Unset: Bit 1: Unset
- "ldr h12, [x20, #0x0]\n"
+ "ldr h12, [x19, #0x0]\n"
"120:" // Tile loop: Oddments: Load inputs: (5, 3): Bit 2: End
"fmla v29.8h, v8.8h, v12.8h\n"
"fmla v30.8h, v7.8h, v12.8h\n"
- "add x20, x15, x4\n"
+ "add x19, x14, x5\n"
"fmla v31.8h, v6.8h, v12.8h\n"
"tbz %x[n_channels], #2, 122f\n"
- "ldr d10, [x20], #0x8\n"
+ "ldr d10, [x19], #0x8\n"
"tbz %x[n_channels], #1, 121f\n"
- "ld1 { v10.s }[2], [x20], #0x4\n"
+ "ld1 { v10.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 124f\n"
- "ld1 { v10.h }[6], [x20]\n"
+ "ld1 { v10.h }[6], [x19]\n"
"b 124f\n"
"121:" // Tile loop: Oddments: Load inputs: (1, 1): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 124f\n"
- "ld1 { v10.h }[4], [x20]\n"
+ "ld1 { v10.h }[4], [x19]\n"
"b 124f\n"
"122:" // Tile loop: Oddments: Load inputs: (1, 1): Bit 2: Unset
"tbz %x[n_channels], #1, 123f\n"
- "ldr s10, [x20], #0x4\n"
+ "ldr s10, [x19], #0x4\n"
"tbz %x[n_channels], #0, 124f\n"
- "ld1 { v10.h }[2], [x20]\n"
+ "ld1 { v10.h }[2], [x19]\n"
"b 124f\n"
"123:" // Tile loop: Oddments: Load inputs: (1, 1): Bit 2: Unset: Bit 1: Unset
- "ldr h10, [x20, #0x0]\n"
+ "ldr h10, [x19, #0x0]\n"
"124:" // Tile loop: Oddments: Load inputs: (1, 1): Bit 2: End
"fmla v16.8h, v4.8h, v10.8h\n"
"fmla v17.8h, v3.8h, v10.8h\n"
- "add x20, x15, x28\n"
+ "add x19, x14, x27\n"
"fmla v20.8h, v1.8h, v10.8h\n"
"fmla v21.8h, v0.8h, v10.8h\n"
"tbz %x[n_channels], #2, 126f\n"
- "ldr d11, [x20], #0x8\n"
+ "ldr d11, [x19], #0x8\n"
"tbz %x[n_channels], #1, 125f\n"
- "ld1 { v11.s }[2], [x20], #0x4\n"
+ "ld1 { v11.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 128f\n"
- "ld1 { v11.h }[6], [x20]\n"
+ "ld1 { v11.h }[6], [x19]\n"
"b 128f\n"
"125:" // Tile loop: Oddments: Load inputs: (1, 4): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 128f\n"
- "ld1 { v11.h }[4], [x20]\n"
+ "ld1 { v11.h }[4], [x19]\n"
"b 128f\n"
"126:" // Tile loop: Oddments: Load inputs: (1, 4): Bit 2: Unset
"tbz %x[n_channels], #1, 127f\n"
- "ldr s11, [x20], #0x4\n"
+ "ldr s11, [x19], #0x4\n"
"tbz %x[n_channels], #0, 128f\n"
- "ld1 { v11.h }[2], [x20]\n"
+ "ld1 { v11.h }[2], [x19]\n"
"b 128f\n"
"127:" // Tile loop: Oddments: Load inputs: (1, 4): Bit 2: Unset: Bit 1: Unset
- "ldr h11, [x20, #0x0]\n"
+ "ldr h11, [x19, #0x0]\n"
"128:" // Tile loop: Oddments: Load inputs: (1, 4): Bit 2: End
"fmla v18.8h, v5.8h, v11.8h\n"
"fmla v19.8h, v4.8h, v11.8h\n"
- "add x20, x9, x4\n"
+ "add x19, x28, x5\n"
"fmla v22.8h, v2.8h, v11.8h\n"
"fmla v23.8h, v1.8h, v11.8h\n"
"tbz %x[n_channels], #2, 130f\n"
- "ldr d12, [x20], #0x8\n"
+ "ldr d12, [x19], #0x8\n"
"tbz %x[n_channels], #1, 129f\n"
- "ld1 { v12.s }[2], [x20], #0x4\n"
+ "ld1 { v12.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 132f\n"
- "ld1 { v12.h }[6], [x20]\n"
+ "ld1 { v12.h }[6], [x19]\n"
"b 132f\n"
"129:" // Tile loop: Oddments: Load inputs: (4, 1): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 132f\n"
- "ld1 { v12.h }[4], [x20]\n"
+ "ld1 { v12.h }[4], [x19]\n"
"b 132f\n"
"130:" // Tile loop: Oddments: Load inputs: (4, 1): Bit 2: Unset
"tbz %x[n_channels], #1, 131f\n"
- "ldr s12, [x20], #0x4\n"
+ "ldr s12, [x19], #0x4\n"
"tbz %x[n_channels], #0, 132f\n"
- "ld1 { v12.h }[2], [x20]\n"
+ "ld1 { v12.h }[2], [x19]\n"
"b 132f\n"
"131:" // Tile loop: Oddments: Load inputs: (4, 1): Bit 2: Unset: Bit 1: Unset
- "ldr h12, [x20, #0x0]\n"
+ "ldr h12, [x19, #0x0]\n"
"132:" // Tile loop: Oddments: Load inputs: (4, 1): Bit 2: End
"fmla v24.8h, v7.8h, v12.8h\n"
"fmla v25.8h, v6.8h, v12.8h\n"
- "add x20, x9, x28\n"
+ "add x19, x28, x27\n"
"fmla v28.8h, v4.8h, v12.8h\n"
"fmla v29.8h, v3.8h, v12.8h\n"
"tbz %x[n_channels], #2, 134f\n"
- "ldr d10, [x20], #0x8\n"
+ "ldr d10, [x19], #0x8\n"
"tbz %x[n_channels], #1, 133f\n"
- "ld1 { v10.s }[2], [x20], #0x4\n"
+ "ld1 { v10.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 136f\n"
- "ld1 { v10.h }[6], [x20]\n"
+ "ld1 { v10.h }[6], [x19]\n"
"b 136f\n"
"133:" // Tile loop: Oddments: Load inputs: (4, 4): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 136f\n"
- "ld1 { v10.h }[4], [x20]\n"
+ "ld1 { v10.h }[4], [x19]\n"
"b 136f\n"
"134:" // Tile loop: Oddments: Load inputs: (4, 4): Bit 2: Unset
"tbz %x[n_channels], #1, 135f\n"
- "ldr s10, [x20], #0x4\n"
+ "ldr s10, [x19], #0x4\n"
"tbz %x[n_channels], #0, 136f\n"
- "ld1 { v10.h }[2], [x20]\n"
+ "ld1 { v10.h }[2], [x19]\n"
"b 136f\n"
"135:" // Tile loop: Oddments: Load inputs: (4, 4): Bit 2: Unset: Bit 1: Unset
- "ldr h10, [x20, #0x0]\n"
+ "ldr h10, [x19, #0x0]\n"
"136:" // Tile loop: Oddments: Load inputs: (4, 4): Bit 2: End
"fmla v26.8h, v8.8h, v10.8h\n"
"fmla v27.8h, v7.8h, v10.8h\n"
@@ -1548,186 +1548,186 @@ void a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"fmin v30.8h, v30.8h, v14.8h\n"
"fmin v31.8h, v31.8h, v14.8h\n"
"tbz %x[n_channels], #2, 138f\n"
- "mov x23, x8\n"
- "mov x22, x10\n"
- "st1 { v16.d }[0], [x23], x5\n"
- "mov x21, x27\n"
- "mov x20, x24\n"
- "st1 { v20.d }[0], [x22], x5\n"
- "st1 { v24.d }[0], [x21], x5\n"
- "add x8, x8, #0x8\n"
- "add x10, x10, #0x8\n"
- "st1 { v28.d }[0], [x20], x5\n"
- "add x27, x27, #0x8\n"
- "add x24, x24, #0x8\n"
- "st1 { v17.d }[0], [x23], x5\n"
- "st1 { v21.d }[0], [x22], x5\n"
- "st1 { v25.d }[0], [x21], x5\n"
- "st1 { v29.d }[0], [x20], x5\n"
- "st1 { v18.d }[0], [x23], x5\n"
- "st1 { v22.d }[0], [x22], x5\n"
- "st1 { v26.d }[0], [x21], x5\n"
- "st1 { v30.d }[0], [x20], x5\n"
- "st1 { v19.d }[0], [x23]\n"
- "st1 { v23.d }[0], [x22]\n"
- "st1 { v27.d }[0], [x21]\n"
- "st1 { v31.d }[0], [x20]\n"
+ "mov x22, x17\n"
+ "mov x21, x9\n"
+ "mov x20, x26\n"
+ "st1 { v16.d }[0], [x22], x6\n"
+ "mov x19, x23\n"
+ "st1 { v20.d }[0], [x21], x6\n"
+ "add x17, x17, #0x8\n"
+ "st1 { v24.d }[0], [x20], x6\n"
+ "add x9, x9, #0x8\n"
+ "add x26, x26, #0x8\n"
+ "st1 { v28.d }[0], [x19], x6\n"
+ "add x23, x23, #0x8\n"
+ "st1 { v17.d }[0], [x22], x6\n"
+ "st1 { v21.d }[0], [x21], x6\n"
+ "st1 { v25.d }[0], [x20], x6\n"
+ "st1 { v29.d }[0], [x19], x6\n"
+ "st1 { v18.d }[0], [x22], x6\n"
+ "st1 { v22.d }[0], [x21], x6\n"
+ "st1 { v26.d }[0], [x20], x6\n"
+ "st1 { v30.d }[0], [x19], x6\n"
+ "st1 { v19.d }[0], [x22]\n"
+ "st1 { v23.d }[0], [x21]\n"
+ "st1 { v27.d }[0], [x20]\n"
+ "st1 { v31.d }[0], [x19]\n"
"tbz %x[n_channels], #1, 137f\n"
- "mov x23, x8\n"
- "mov x22, x10\n"
- "st1 { v16.s }[2], [x23], x5\n"
- "mov x21, x27\n"
- "mov x20, x24\n"
- "st1 { v20.s }[2], [x22], x5\n"
- "st1 { v24.s }[2], [x21], x5\n"
- "add x8, x8, #0x4\n"
- "add x10, x10, #0x4\n"
- "st1 { v28.s }[2], [x20], x5\n"
- "add x27, x27, #0x4\n"
- "add x24, x24, #0x4\n"
- "st1 { v17.s }[2], [x23], x5\n"
- "st1 { v21.s }[2], [x22], x5\n"
- "st1 { v25.s }[2], [x21], x5\n"
- "st1 { v29.s }[2], [x20], x5\n"
- "st1 { v18.s }[2], [x23], x5\n"
- "st1 { v22.s }[2], [x22], x5\n"
- "st1 { v26.s }[2], [x21], x5\n"
- "st1 { v30.s }[2], [x20], x5\n"
- "st1 { v19.s }[2], [x23]\n"
- "st1 { v23.s }[2], [x22]\n"
- "st1 { v27.s }[2], [x21]\n"
- "st1 { v31.s }[2], [x20]\n"
+ "mov x22, x17\n"
+ "mov x21, x9\n"
+ "mov x20, x26\n"
+ "mov x19, x23\n"
+ "st1 { v16.s }[2], [x22], x6\n"
+ "st1 { v20.s }[2], [x21], x6\n"
+ "add x17, x17, #0x4\n"
+ "add x9, x9, #0x4\n"
+ "st1 { v24.s }[2], [x20], x6\n"
+ "add x26, x26, #0x4\n"
+ "add x23, x23, #0x4\n"
+ "st1 { v28.s }[2], [x19], x6\n"
+ "st1 { v17.s }[2], [x22], x6\n"
+ "st1 { v21.s }[2], [x21], x6\n"
+ "st1 { v25.s }[2], [x20], x6\n"
+ "st1 { v29.s }[2], [x19], x6\n"
+ "st1 { v18.s }[2], [x22], x6\n"
+ "st1 { v22.s }[2], [x21], x6\n"
+ "st1 { v26.s }[2], [x20], x6\n"
+ "st1 { v30.s }[2], [x19], x6\n"
+ "st1 { v19.s }[2], [x22]\n"
+ "st1 { v23.s }[2], [x21]\n"
+ "st1 { v27.s }[2], [x20]\n"
+ "st1 { v31.s }[2], [x19]\n"
"tbz %x[n_channels], #0, 140f\n"
- "mov x23, x8\n"
- "mov x22, x10\n"
- "st1 { v16.h }[6], [x23], x5\n"
- "mov x21, x27\n"
- "mov x20, x24\n"
- "st1 { v20.h }[6], [x22], x5\n"
- "st1 { v24.h }[6], [x21], x5\n"
- "st1 { v28.h }[6], [x20], x5\n"
- "st1 { v17.h }[6], [x23], x5\n"
- "st1 { v21.h }[6], [x22], x5\n"
- "st1 { v25.h }[6], [x21], x5\n"
- "st1 { v29.h }[6], [x20], x5\n"
- "st1 { v18.h }[6], [x23], x5\n"
- "st1 { v22.h }[6], [x22], x5\n"
- "st1 { v26.h }[6], [x21], x5\n"
- "st1 { v30.h }[6], [x20], x5\n"
- "st1 { v19.h }[6], [x23]\n"
- "st1 { v23.h }[6], [x22]\n"
- "st1 { v27.h }[6], [x21]\n"
- "st1 { v31.h }[6], [x20]\n"
+ "mov x22, x17\n"
+ "mov x21, x9\n"
+ "mov x20, x26\n"
+ "mov x19, x23\n"
+ "st1 { v16.h }[6], [x22], x6\n"
+ "st1 { v20.h }[6], [x21], x6\n"
+ "st1 { v24.h }[6], [x20], x6\n"
+ "st1 { v28.h }[6], [x19], x6\n"
+ "st1 { v17.h }[6], [x22], x6\n"
+ "st1 { v21.h }[6], [x21], x6\n"
+ "st1 { v25.h }[6], [x20], x6\n"
+ "st1 { v29.h }[6], [x19], x6\n"
+ "st1 { v18.h }[6], [x22], x6\n"
+ "st1 { v22.h }[6], [x21], x6\n"
+ "st1 { v26.h }[6], [x20], x6\n"
+ "st1 { v30.h }[6], [x19], x6\n"
+ "st1 { v19.h }[6], [x22]\n"
+ "st1 { v23.h }[6], [x21]\n"
+ "st1 { v27.h }[6], [x20]\n"
+ "st1 { v31.h }[6], [x19]\n"
"b 140f\n"
"137:" // Tile loop: Oddments: Store: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 140f\n"
- "mov x23, x8\n"
- "mov x22, x10\n"
- "st1 { v16.h }[4], [x23], x5\n"
- "mov x21, x27\n"
- "mov x20, x24\n"
- "st1 { v20.h }[4], [x22], x5\n"
- "st1 { v24.h }[4], [x21], x5\n"
- "st1 { v28.h }[4], [x20], x5\n"
- "st1 { v17.h }[4], [x23], x5\n"
- "st1 { v21.h }[4], [x22], x5\n"
- "st1 { v25.h }[4], [x21], x5\n"
- "st1 { v29.h }[4], [x20], x5\n"
- "st1 { v18.h }[4], [x23], x5\n"
- "st1 { v22.h }[4], [x22], x5\n"
- "st1 { v26.h }[4], [x21], x5\n"
- "st1 { v30.h }[4], [x20], x5\n"
- "st1 { v19.h }[4], [x23]\n"
- "st1 { v23.h }[4], [x22]\n"
- "st1 { v27.h }[4], [x21]\n"
- "st1 { v31.h }[4], [x20]\n"
+ "mov x22, x17\n"
+ "mov x21, x9\n"
+ "st1 { v16.h }[4], [x22], x6\n"
+ "mov x20, x26\n"
+ "mov x19, x23\n"
+ "st1 { v20.h }[4], [x21], x6\n"
+ "st1 { v24.h }[4], [x20], x6\n"
+ "st1 { v28.h }[4], [x19], x6\n"
+ "st1 { v17.h }[4], [x22], x6\n"
+ "st1 { v21.h }[4], [x21], x6\n"
+ "st1 { v25.h }[4], [x20], x6\n"
+ "st1 { v29.h }[4], [x19], x6\n"
+ "st1 { v18.h }[4], [x22], x6\n"
+ "st1 { v22.h }[4], [x21], x6\n"
+ "st1 { v26.h }[4], [x20], x6\n"
+ "st1 { v30.h }[4], [x19], x6\n"
+ "st1 { v19.h }[4], [x22]\n"
+ "st1 { v23.h }[4], [x21]\n"
+ "st1 { v27.h }[4], [x20]\n"
+ "st1 { v31.h }[4], [x19]\n"
"b 140f\n"
"138:" // Tile loop: Oddments: Store: Bit 2: Unset
"tbz %x[n_channels], #1, 139f\n"
- "mov x23, x8\n"
- "mov x22, x10\n"
- "st1 { v16.s }[0], [x23], x5\n"
- "mov x21, x27\n"
- "mov x20, x24\n"
- "st1 { v20.s }[0], [x22], x5\n"
- "st1 { v24.s }[0], [x21], x5\n"
- "add x8, x8, #0x4\n"
- "add x10, x10, #0x4\n"
- "st1 { v28.s }[0], [x20], x5\n"
- "add x27, x27, #0x4\n"
- "add x24, x24, #0x4\n"
- "st1 { v17.s }[0], [x23], x5\n"
- "st1 { v21.s }[0], [x22], x5\n"
- "st1 { v25.s }[0], [x21], x5\n"
- "st1 { v29.s }[0], [x20], x5\n"
- "st1 { v18.s }[0], [x23], x5\n"
- "st1 { v22.s }[0], [x22], x5\n"
- "st1 { v26.s }[0], [x21], x5\n"
- "st1 { v30.s }[0], [x20], x5\n"
- "st1 { v19.s }[0], [x23]\n"
- "st1 { v23.s }[0], [x22]\n"
- "st1 { v27.s }[0], [x21]\n"
- "st1 { v31.s }[0], [x20]\n"
+ "mov x22, x17\n"
+ "mov x21, x9\n"
+ "st1 { v16.s }[0], [x22], x6\n"
+ "mov x20, x26\n"
+ "mov x19, x23\n"
+ "st1 { v20.s }[0], [x21], x6\n"
+ "st1 { v24.s }[0], [x20], x6\n"
+ "add x17, x17, #0x4\n"
+ "add x9, x9, #0x4\n"
+ "st1 { v28.s }[0], [x19], x6\n"
+ "add x26, x26, #0x4\n"
+ "add x23, x23, #0x4\n"
+ "st1 { v17.s }[0], [x22], x6\n"
+ "st1 { v21.s }[0], [x21], x6\n"
+ "st1 { v25.s }[0], [x20], x6\n"
+ "st1 { v29.s }[0], [x19], x6\n"
+ "st1 { v18.s }[0], [x22], x6\n"
+ "st1 { v22.s }[0], [x21], x6\n"
+ "st1 { v26.s }[0], [x20], x6\n"
+ "st1 { v30.s }[0], [x19], x6\n"
+ "st1 { v19.s }[0], [x22]\n"
+ "st1 { v23.s }[0], [x21]\n"
+ "st1 { v27.s }[0], [x20]\n"
+ "st1 { v31.s }[0], [x19]\n"
"tbz %x[n_channels], #0, 140f\n"
- "mov x23, x8\n"
- "mov x22, x10\n"
- "st1 { v16.h }[2], [x23], x5\n"
- "mov x21, x27\n"
- "mov x20, x24\n"
- "st1 { v20.h }[2], [x22], x5\n"
- "st1 { v24.h }[2], [x21], x5\n"
- "st1 { v28.h }[2], [x20], x5\n"
- "st1 { v17.h }[2], [x23], x5\n"
- "st1 { v21.h }[2], [x22], x5\n"
- "st1 { v25.h }[2], [x21], x5\n"
- "st1 { v29.h }[2], [x20], x5\n"
- "st1 { v18.h }[2], [x23], x5\n"
- "st1 { v22.h }[2], [x22], x5\n"
- "st1 { v26.h }[2], [x21], x5\n"
- "st1 { v30.h }[2], [x20], x5\n"
- "st1 { v19.h }[2], [x23]\n"
- "st1 { v23.h }[2], [x22]\n"
- "st1 { v27.h }[2], [x21]\n"
- "st1 { v31.h }[2], [x20]\n"
+ "mov x22, x17\n"
+ "mov x21, x9\n"
+ "mov x20, x26\n"
+ "mov x19, x23\n"
+ "st1 { v16.h }[2], [x22], x6\n"
+ "st1 { v20.h }[2], [x21], x6\n"
+ "st1 { v24.h }[2], [x20], x6\n"
+ "st1 { v28.h }[2], [x19], x6\n"
+ "st1 { v17.h }[2], [x22], x6\n"
+ "st1 { v21.h }[2], [x21], x6\n"
+ "st1 { v25.h }[2], [x20], x6\n"
+ "st1 { v29.h }[2], [x19], x6\n"
+ "st1 { v18.h }[2], [x22], x6\n"
+ "st1 { v22.h }[2], [x21], x6\n"
+ "st1 { v26.h }[2], [x20], x6\n"
+ "st1 { v30.h }[2], [x19], x6\n"
+ "st1 { v19.h }[2], [x22]\n"
+ "st1 { v23.h }[2], [x21]\n"
+ "st1 { v27.h }[2], [x20]\n"
+ "st1 { v31.h }[2], [x19]\n"
"b 140f\n"
"139:" // Tile loop: Oddments: Store: Bit 2: Unset: Bit 1: Unset
- "mov x23, x8\n"
- "mov x22, x10\n"
- "st1 { v16.h }[0], [x23], x5\n"
- "mov x21, x27\n"
- "mov x20, x24\n"
- "st1 { v20.h }[0], [x22], x5\n"
- "st1 { v24.h }[0], [x21], x5\n"
- "st1 { v28.h }[0], [x20], x5\n"
- "st1 { v17.h }[0], [x23], x5\n"
- "st1 { v21.h }[0], [x22], x5\n"
- "st1 { v25.h }[0], [x21], x5\n"
- "st1 { v29.h }[0], [x20], x5\n"
- "st1 { v18.h }[0], [x23], x5\n"
- "st1 { v22.h }[0], [x22], x5\n"
- "st1 { v26.h }[0], [x21], x5\n"
- "st1 { v30.h }[0], [x20], x5\n"
- "st1 { v19.h }[0], [x23]\n"
- "st1 { v23.h }[0], [x22]\n"
- "st1 { v27.h }[0], [x21]\n"
- "st1 { v31.h }[0], [x20]\n"
+ "mov x22, x17\n"
+ "mov x21, x9\n"
+ "st1 { v16.h }[0], [x22], x6\n"
+ "mov x20, x26\n"
+ "mov x19, x23\n"
+ "st1 { v20.h }[0], [x21], x6\n"
+ "st1 { v24.h }[0], [x20], x6\n"
+ "st1 { v28.h }[0], [x19], x6\n"
+ "st1 { v17.h }[0], [x22], x6\n"
+ "st1 { v21.h }[0], [x21], x6\n"
+ "st1 { v25.h }[0], [x20], x6\n"
+ "st1 { v29.h }[0], [x19], x6\n"
+ "st1 { v18.h }[0], [x22], x6\n"
+ "st1 { v22.h }[0], [x21], x6\n"
+ "st1 { v26.h }[0], [x20], x6\n"
+ "st1 { v30.h }[0], [x19], x6\n"
+ "st1 { v19.h }[0], [x22]\n"
+ "st1 { v23.h }[0], [x21]\n"
+ "st1 { v27.h }[0], [x20]\n"
+ "st1 { v31.h }[0], [x19]\n"
"140:" // Tile loop: Oddments: Store: Bit 2: End
"141:" // Tile loop: End
- "ldr x26, [%x[params_struct], %[offsetof_args_tile_j]]\n"
- "ldr x27, [%x[params_struct], %[offsetof_args_tile_i]]\n"
- "add x26, x26, #0x1\n"
- "add x21, x27, #0x1\n"
- "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
- "cmp x26, x20\n"
- "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
- "csel x27, x27, x21, LT\n"
- "csel x26, x26, XZR, LT\n"
- "cmp x27, x20\n"
+ "ldr x25, [%x[params_struct], %[offsetof_args_tile_j]]\n"
+ "ldr x26, [%x[params_struct], %[offsetof_args_tile_i]]\n"
+ "add x25, x25, #0x1\n"
+ "add x20, x26, #0x1\n"
+ "ldr x19, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
+ "cmp x25, x19\n"
+ "ldr x19, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
+ "csel x26, x26, x20, LT\n"
+ "csel x25, x25, XZR, LT\n"
+ "cmp x26, x19\n"
"blt 1b\n"
:
: [n_channels] "r" ((unsigned long) n_channels), [offsetof_args_inptr] "I" (offsetof(Args, inptr)), [offsetof_args_ld_input_col] "I" (offsetof(Args, ld_input_col)), [offsetof_args_ld_input_row] "I" (offsetof(Args, ld_input_row)), [offsetof_args_ld_output_col] "I" (offsetof(Args, ld_output_col)), [offsetof_args_ld_output_row] "I" (offsetof(Args, ld_output_row)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_n_tile_cols] "I" (offsetof(Args, n_tile_cols)), [offsetof_args_n_tile_rows] "I" (offsetof(Args, n_tile_rows)), [offsetof_args_outptr] "I" (offsetof(Args, outptr)), [offsetof_args_params] "I" (offsetof(Args, params)), [offsetof_args_tile_i] "I" (offsetof(Args, tile_i)), [offsetof_args_tile_j] "I" (offsetof(Args, tile_j)), [params_struct] "r" (&params_struct)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_indirect.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_indirect.cpp
index 16326150fd..e493104c03 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_indirect.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_indirect.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -98,210 +98,211 @@ void a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_indirect_impl(
activation_min, activation_max);
__asm__ __volatile__(
- "mov x8, #0x10\n" // cntb _, ALL, #1
- "lsr x17, %x[n_channels], #0x3\n"
- "ldr x16, [%x[params_struct], %[offsetof_args_outptrs]]\n"
- "ldr x15, [%x[params_struct], %[offsetof_args_params]]\n"
+ "mov x17, #0x10\n" // cntb _, ALL, #1
+ "lsr x16, %x[n_channels], #0x3\n"
+ "ldr x15, [%x[params_struct], %[offsetof_args_outptrs]]\n"
+ "ldr x14, [%x[params_struct], %[offsetof_args_params]]\n"
"add x20, %x[params_struct], %[offsetof_args_min]\n"
+ "add x19, %x[params_struct], %[offsetof_args_max]\n"
"ld1r { v15.8h }, [x20]\n"
- "add x20, %x[params_struct], %[offsetof_args_max]\n"
- "ld1r { v14.8h }, [x20]\n"
- "add x14, %x[params_struct], %[offsetof_Args_inptrs]\n"
- "mov x13, #0x0\n"
- "sub x12, XZR, x8\n"
- "cbz x17, 3f\n"
- "ldr q13, [x15, #0x0]\n"
- "ldr q0, [x15, #0x10]\n"
- "cmp x8, x17, LSL #4\n"
- "ldr q1, [x15, #0x20]\n"
- "ldr q2, [x15, #0x30]\n"
- "ldr q3, [x15, #0x40]\n"
- "ldr q4, [x15, #0x50]\n"
- "ldr q5, [x15, #0x60]\n"
- "ldr q6, [x15, #0x70]\n"
- "ldr q7, [x15, #0x80]\n"
- "ldr q8, [x15, #0x90]\n"
- "add x15, x15, #0xa0\n"
- "ldp x11, x10, [x14, #0x0]\n"
- "ldr q9, [x11, x13]\n"
- "ldr q10, [x10, x13]\n"
- "ldp x9, x28, [x14, #0x10]\n"
- "ldr q11, [x9, x13]\n"
- "ldr q12, [x28, x13]\n"
+ "ld1r { v14.8h }, [x19]\n"
+ "add x13, %x[params_struct], %[offsetof_Args_inptrs]\n"
+ "mov x12, #0x0\n"
+ "sub x11, XZR, x17\n"
+ "cbz x16, 3f\n"
+ "ldp x10, x9, [x13, #0x0]\n"
+ "ldp x28, x27, [x13, #0x10]\n"
+ "cmp x17, x16, LSL #4\n"
+ "ldr q13, [x14, #0x0]\n"
+ "ldr q0, [x14, #0x10]\n"
+ "ldr q1, [x14, #0x20]\n"
+ "ldr q2, [x14, #0x30]\n"
+ "ldr q3, [x14, #0x40]\n"
+ "ldr q4, [x14, #0x50]\n"
+ "ldr q5, [x14, #0x60]\n"
+ "ldr q6, [x14, #0x70]\n"
+ "ldr q7, [x14, #0x80]\n"
+ "ldr q8, [x14, #0x90]\n"
+ "add x14, x14, #0xa0\n"
+ "ldr q9, [x10, x12]\n"
+ "ldr q10, [x9, x12]\n"
+ "ldr q11, [x28, x12]\n"
+ "ldr q12, [x27, x12]\n"
"bge 2f\n"
"1:" // Channel loop
"mov v21.16b, v13.16b\n fmla v21.8h, v4.8h, v9.8h\n"
"mov v16.16b, v13.16b\n fmla v16.8h, v8.8h, v9.8h\n"
- "ldr x27, [x14, #0x20]\n"
- "ldr x26, [x14, #0x30]\n"
+ "ldr x26, [x13, #0x20]\n"
+ "ldr x25, [x13, #0x30]\n"
"mov v22.16b, v13.16b\n fmla v22.8h, v3.8h, v9.8h\n"
"mov v25.16b, v13.16b\n fmla v25.8h, v1.8h, v9.8h\n"
- "ldr x25, [x14, #0x28]\n"
- "ldr x24, [x14, #0x38]\n"
+ "ldr x24, [x13, #0x28]\n"
+ "ldr x23, [x13, #0x38]\n"
"mov v26.16b, v13.16b\n fmla v26.8h, v0.8h, v9.8h\n"
"mov v17.16b, v13.16b\n fmla v17.8h, v7.8h, v9.8h\n"
- "ldr x11, [x14, #0x40]\n"
- "ldr x10, [x14, #0x48]\n"
+ "ldr x10, [x13, #0x40]\n"
+ "ldr x9, [x13, #0x48]\n"
"mov v18.16b, v13.16b\n fmla v18.8h, v6.8h, v9.8h\n"
"fmla v21.8h, v5.8h, v12.8h\n"
- "ldr x9, [x14, #0x50]\n"
- "ldr x28, [x14, #0x58]\n"
+ "ldr x28, [x13, #0x50]\n"
+ "ldr x27, [x13, #0x58]\n"
"mov v20.16b, v13.16b\n fmla v20.8h, v5.8h, v9.8h\n"
"mov v24.16b, v13.16b\n fmla v24.8h, v2.8h, v9.8h\n"
- "ldr q9, [x26, x13]\n"
- "ldr x26, [x14, #0x70]\n"
+ "ldr q9, [x25, x12]\n"
+ "ldr x25, [x13, #0x70]\n"
"fmla v16.8h, v0.8h, v10.8h\n"
- "ldr q10, [x27, x13]\n"
"mov v19.16b, v13.16b\n fmla v19.8h, v2.8h, v11.8h\n"
- "ldr q11, [x25, x13]\n"
+ "ldr q10, [x26, x12]\n"
+ "ldr q11, [x24, x12]\n"
"fmla v22.8h, v4.8h, v12.8h\n"
"fmla v25.8h, v2.8h, v12.8h\n"
- "ldr x27, [x14, #0x60]\n"
- "ldr x25, [x14, #0x68]\n"
+ "ldr x26, [x13, #0x60]\n"
+ "ldr x24, [x13, #0x68]\n"
"fmla v26.8h, v1.8h, v12.8h\n"
"fmla v17.8h, v8.8h, v12.8h\n"
- "ldr x23, [x16, #0x0]\n"
- "ldr x22, [x16, #0x8]\n"
+ "ldr x22, [x15, #0x0]\n"
+ "ldr x21, [x15, #0x8]\n"
"fmla v18.8h, v7.8h, v12.8h\n"
"mov v28.16b, v13.16b\n fmla v28.8h, v6.8h, v10.8h\n"
- "ldr q10, [x10, x13]\n"
- "ldr x10, [x14, #0x88]\n"
+ "ldr q10, [x9, x12]\n"
+ "ldr x9, [x13, #0x88]\n"
"fmla v21.8h, v7.8h, v9.8h\n"
"fmla v19.8h, v6.8h, v12.8h\n"
- "ldr x21, [x16, #0x10]\n"
- "ldr x20, [x16, #0x18]\n"
+ "ldr x20, [x15, #0x10]\n"
+ "ldr x19, [x15, #0x18]\n"
"mov v23.16b, v13.16b\n fmla v23.8h, v3.8h, v12.8h\n"
"mov v27.16b, v13.16b\n fmla v27.8h, v0.8h, v12.8h\n"
- "ldr q12, [x24, x13]\n"
- "ldr x24, [x14, #0x78]\n"
+ "ldr q12, [x23, x12]\n"
+ "ldr x23, [x13, #0x78]\n"
"mov v31.16b, v13.16b\n fmla v31.8h, v8.8h, v11.8h\n"
- "ldr q11, [x11, x13]\n"
"fmla v22.8h, v6.8h, v9.8h\n"
- "ldr x11, [x14, #0x80]\n"
+ "ldr q11, [x10, x12]\n"
+ "ldr x10, [x13, #0x80]\n"
"fmla v25.8h, v4.8h, v9.8h\n"
"fmla v26.8h, v3.8h, v9.8h\n"
- "add x12, x12, #0x10\n"
+ "add x11, x11, #0x10\n"
+ "mov v29.16b, v13.16b\n fmla v29.8h, v1.8h, v9.8h\n"
+ "mov v30.16b, v13.16b\n fmla v30.8h, v0.8h, v9.8h\n"
+ "ldr q13, [x14, #0x0]\n"
"fmla v20.8h, v8.8h, v9.8h\n"
"fmla v24.8h, v5.8h, v9.8h\n"
"fmla v28.8h, v2.8h, v9.8h\n"
"fmla v16.8h, v1.8h, v12.8h\n"
+ "ldr q9, [x28, x12]\n"
+ "ldr x28, [x13, #0x90]\n"
"fmla v17.8h, v0.8h, v12.8h\n"
- "ldr q12, [x28, x13]\n"
"fmla v18.8h, v2.8h, v11.8h\n"
- "ldr x28, [x14, #0x98]\n"
+ "ldr q12, [x27, x12]\n"
+ "ldr x27, [x13, #0x98]\n"
"fmla v21.8h, v8.8h, v10.8h\n"
"fmla v19.8h, v1.8h, v11.8h\n"
- "ldr q11, [x27, x13]\n"
- "ldr x27, [x14, #0xa0]\n"
+ "ldr q11, [x26, x12]\n"
+ "ldr x26, [x13, #0xa0]\n"
"fmla v22.8h, v7.8h, v10.8h\n"
"fmla v23.8h, v6.8h, v10.8h\n"
"fmla v25.8h, v5.8h, v10.8h\n"
"fmla v26.8h, v4.8h, v10.8h\n"
"fmla v27.8h, v3.8h, v10.8h\n"
- "fmla v31.8h, v0.8h, v10.8h\n"
- "fmla v24.8h, v6.8h, v11.8h\n"
- "fmla v28.8h, v3.8h, v11.8h\n"
- "ldr q11, [x26, x13]\n"
- "ldr x26, [x14, #0xb0]\n"
- "fmla v19.8h, v5.8h, v12.8h\n"
- "fmla v23.8h, v2.8h, v12.8h\n"
- "ldr q12, [x24, x13]\n"
- "ldr x24, [x14, #0xb8]\n"
- "fmla v27.8h, v8.8h, v11.8h\n"
- "fmla v31.8h, v5.8h, v11.8h\n"
- "mov v29.16b, v13.16b\n fmla v29.8h, v1.8h, v9.8h\n"
- "mov v30.16b, v13.16b\n fmla v30.8h, v0.8h, v9.8h\n"
- "ldr q9, [x9, x13]\n"
- "ldr x9, [x14, #0x90]\n"
"fmla v29.8h, v2.8h, v10.8h\n"
"fmla v30.8h, v1.8h, v10.8h\n"
- "ldr q10, [x25, x13]\n"
- "ldr x25, [x14, #0xa8]\n"
+ "fmla v31.8h, v0.8h, v10.8h\n"
+ "ldr q10, [x24, x12]\n"
+ "ldr x24, [x13, #0xa8]\n"
"fmla v16.8h, v3.8h, v9.8h\n"
"fmla v20.8h, v0.8h, v9.8h\n"
- "ldr q11, [x11, x13]\n"
- "ldr x11, [x14, #0xc0]\n"
+ "fmla v24.8h, v6.8h, v11.8h\n"
+ "fmla v28.8h, v3.8h, v11.8h\n"
+ "ldr q11, [x25, x12]\n"
+ "ldr x25, [x13, #0xb0]\n"
"fmla v17.8h, v4.8h, v10.8h\n"
"fmla v18.8h, v3.8h, v10.8h\n"
"fmla v21.8h, v1.8h, v10.8h\n"
+ "fmla v19.8h, v5.8h, v12.8h\n"
+ "fmla v23.8h, v2.8h, v12.8h\n"
"fmla v22.8h, v0.8h, v10.8h\n"
+ "ldr q12, [x23, x12]\n"
+ "ldr x23, [x13, #0xb8]\n"
+ "fmla v27.8h, v8.8h, v11.8h\n"
+ "fmla v31.8h, v5.8h, v11.8h\n"
+ "ldr q11, [x10, x12]\n"
+ "ldr x10, [x13, #0xc0]\n"
"fmla v16.8h, v5.8h, v10.8h\n"
"fmla v20.8h, v2.8h, v10.8h\n"
- "ldr q10, [x10, x13]\n"
- "ldr x10, [x14, #0xc8]\n"
+ "ldr q10, [x9, x12]\n"
+ "ldr x9, [x13, #0xc8]\n"
"fmla v17.8h, v5.8h, v12.8h\n"
"fmla v18.8h, v4.8h, v12.8h\n"
"fmla v21.8h, v2.8h, v12.8h\n"
"fmla v19.8h, v3.8h, v12.8h\n"
"fmla v22.8h, v1.8h, v12.8h\n"
"fmla v23.8h, v0.8h, v12.8h\n"
- "ldr q12, [x28, x13]\n"
- "ldr x28, [x14, #0xd8]\n"
+ "ldr q12, [x27, x12]\n"
+ "ldr x27, [x13, #0xd8]\n"
"fmla v28.8h, v7.8h, v11.8h\n"
"fmla v29.8h, v6.8h, v11.8h\n"
- "ldr q11, [x9, x13]\n"
- "ldr x9, [x14, #0xd0]\n"
+ "ldr q11, [x28, x12]\n"
+ "ldr x28, [x13, #0xd0]\n"
"fmla v16.8h, v7.8h, v10.8h\n"
"fmla v17.8h, v6.8h, v10.8h\n"
"fmla v20.8h, v4.8h, v10.8h\n"
"fmla v21.8h, v3.8h, v10.8h\n"
"fmla v24.8h, v1.8h, v10.8h\n"
"fmla v25.8h, v0.8h, v10.8h\n"
- "ldr q10, [x27, x13]\n"
- "ldr x27, [x14, #0xe0]\n"
+ "ldr q10, [x26, x12]\n"
+ "ldr x26, [x13, #0xe0]\n"
"fmla v18.8h, v8.8h, v12.8h\n"
"fmla v30.8h, v8.8h, v11.8h\n"
"fmla v31.8h, v7.8h, v11.8h\n"
- "ldr q11, [x25, x13]\n"
+ "ldr q11, [x24, x12]\n"
"fmla v27.8h, v1.8h, v12.8h\n"
- "ldr x25, [x14, #0xe8]\n"
+ "ldr x24, [x13, #0xe8]\n"
"fmla v19.8h, v7.8h, v12.8h\n"
"fmla v22.8h, v5.8h, v12.8h\n"
"fmla v23.8h, v4.8h, v12.8h\n"
"fmla v26.8h, v2.8h, v12.8h\n"
- "ldr q12, [x26, x13]\n"
- "ldr x26, [x14, #0xf0]\n"
+ "ldr q12, [x25, x12]\n"
+ "ldr x25, [x13, #0xf0]\n"
"fmla v16.8h, v2.8h, v10.8h\n"
"fmla v17.8h, v1.8h, v10.8h\n"
"fmla v18.8h, v0.8h, v10.8h\n"
- "ldr q10, [x24, x13]\n"
"fmla v20.8h, v7.8h, v11.8h\n"
- "ldr x24, [x14, #0xf8]\n"
+ "ldr q10, [x23, x12]\n"
+ "ldr x23, [x13, #0xf8]\n"
"fmla v21.8h, v6.8h, v11.8h\n"
"fmla v24.8h, v4.8h, v11.8h\n"
"fmla v25.8h, v3.8h, v11.8h\n"
"fmla v28.8h, v1.8h, v11.8h\n"
"fmla v29.8h, v0.8h, v11.8h\n"
- "ldr q11, [x11, x13]\n"
+ "ldr q11, [x10, x12]\n"
"fmla v27.8h, v4.8h, v11.8h\n"
- "ldr x11, [x14, #0x100]\n"
+ "ldr x10, [x13, #0x100]\n"
"fmla v30.8h, v2.8h, v11.8h\n"
"fmla v17.8h, v2.8h, v12.8h\n"
"fmla v18.8h, v1.8h, v12.8h\n"
"fmla v19.8h, v0.8h, v12.8h\n"
- "ldr q12, [x10, x13]\n"
- "ldr x10, [x14, #0x108]\n"
+ "ldr q12, [x9, x12]\n"
+ "ldr x9, [x13, #0x108]\n"
"fmla v16.8h, v6.8h, v10.8h\n"
"fmla v20.8h, v3.8h, v10.8h\n"
"fmla v24.8h, v0.8h, v10.8h\n"
- "ldr q10, [x9, x13]\n"
"fmla v22.8h, v8.8h, v11.8h\n"
- "ldr x9, [x14, #0x110]\n"
+ "ldr q10, [x28, x12]\n"
+ "ldr x28, [x13, #0x110]\n"
"fmla v23.8h, v7.8h, v11.8h\n"
"fmla v26.8h, v5.8h, v11.8h\n"
"fmla v31.8h, v1.8h, v11.8h\n"
- "ldr q11, [x28, x13]\n"
+ "ldr q11, [x27, x12]\n"
"fmla v27.8h, v2.8h, v12.8h\n"
- "ldr x28, [x14, #0x118]\n"
+ "ldr x27, [x13, #0x118]\n"
"fmla v28.8h, v0.8h, v10.8h\n"
"fmla v29.8h, v4.8h, v11.8h\n"
"fmla v30.8h, v3.8h, v11.8h\n"
"fmla v19.8h, v8.8h, v12.8h\n"
"fmla v23.8h, v5.8h, v12.8h\n"
- "ldr q12, [x27, x13]\n"
"fmla v20.8h, v6.8h, v10.8h\n"
+ "ldr q12, [x26, x12]\n"
"fmla v24.8h, v3.8h, v10.8h\n"
- "ldr q10, [x25, x13]\n"
+ "ldr q10, [x24, x12]\n"
"fmla v25.8h, v7.8h, v11.8h\n"
"fmla v26.8h, v6.8h, v11.8h\n"
"fmla v28.8h, v5.8h, v11.8h\n"
@@ -310,293 +311,292 @@ void a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_indirect_impl(
"fmla v29.8h, v7.8h, v10.8h\n"
"fmla v30.8h, v6.8h, v10.8h\n"
"fmla v24.8h, v8.8h, v11.8h\n"
- "ldr q11, [x26, x13]\n"
+ "ldr q11, [x25, x12]\n"
"fmla v28.8h, v8.8h, v10.8h\n"
- "ldr q10, [x11, x13]\n"
+ "ldr q10, [x10, x12]\n"
"fmla v25.8h, v8.8h, v11.8h\n"
"fmla v26.8h, v7.8h, v11.8h\n"
"fmla v27.8h, v6.8h, v11.8h\n"
"fmla v29.8h, v5.8h, v11.8h\n"
"fmla v30.8h, v4.8h, v11.8h\n"
"fmla v31.8h, v3.8h, v11.8h\n"
- "ldr q11, [x10, x13]\n"
- "ldp x11, x10, [x14, #0x0]\n"
+ "ldr q11, [x9, x12]\n"
+ "ldp x10, x9, [x13, #0x0]\n"
"fmla v23.8h, v8.8h, v12.8h\n"
- "ldr q12, [x24, x13]\n"
+ "ldr q12, [x23, x12]\n"
"fmla v16.8h, v4.8h, v10.8h\n"
"fmax v16.8h, v16.8h, v15.8h\n"
"fmla v17.8h, v3.8h, v10.8h\n"
"fmla v18.8h, v5.8h, v11.8h\n"
"fmax v17.8h, v17.8h, v15.8h\n"
+ "ldr q9, [x10, x17]\n"
"fmla v19.8h, v4.8h, v11.8h\n"
"fmla v29.8h, v8.8h, v12.8h\n"
"fmax v18.8h, v18.8h, v15.8h\n"
"fmla v30.8h, v7.8h, v12.8h\n"
"fmla v31.8h, v6.8h, v12.8h\n"
- "ldr q12, [x9, x13]\n"
+ "ldr q12, [x28, x12]\n"
"fmax v19.8h, v19.8h, v15.8h\n"
"fmla v20.8h, v1.8h, v10.8h\n"
"fmla v21.8h, v0.8h, v10.8h\n"
- "ldr q10, [x28, x13]\n"
- "ldr q9, [x11, x8]\n"
+ "ldr q10, [x27, x12]\n"
+ "fmin v16.8h, v16.8h, v14.8h\n"
"fmla v22.8h, v2.8h, v11.8h\n"
- "ldr q13, [x15, #0x0]\n"
"fmla v23.8h, v1.8h, v11.8h\n"
- "ldr q0, [x15, #0x10]\n"
- "ldr q1, [x15, #0x20]\n"
+ "fmin v17.8h, v17.8h, v14.8h\n"
+ "str q16, [x22, x11]\n"
"fmla v24.8h, v7.8h, v12.8h\n"
"fmla v25.8h, v6.8h, v12.8h\n"
- "ldr q2, [x15, #0x30]\n"
+ "fmin v18.8h, v18.8h, v14.8h\n"
+ "str q17, [x21, x11]\n"
"fmla v26.8h, v8.8h, v10.8h\n"
- "ldr q6, [x15, #0x70]\n"
"fmla v27.8h, v7.8h, v10.8h\n"
- "ldr q7, [x15, #0x80]\n"
- "fmin v16.8h, v16.8h, v14.8h\n"
- "fmin v17.8h, v17.8h, v14.8h\n"
- "str q16, [x23, x12]\n"
- "ldr q8, [x15, #0x90]\n"
- "fmin v18.8h, v18.8h, v14.8h\n"
"fmin v19.8h, v19.8h, v14.8h\n"
- "str q17, [x22, x12]\n"
- "ldr x23, [x16, #0x20]\n"
+ "str q18, [x20, x11]\n"
"fmax v20.8h, v20.8h, v15.8h\n"
"fmax v21.8h, v21.8h, v15.8h\n"
- "str q18, [x21, x12]\n"
- "ldr x22, [x16, #0x28]\n"
+ "str q19, [x19, x11]\n"
+ "ldr x22, [x15, #0x20]\n"
"fmax v22.8h, v22.8h, v15.8h\n"
"fmax v23.8h, v23.8h, v15.8h\n"
- "str q19, [x20, x12]\n"
- "ldr x21, [x16, #0x30]\n"
- "ldr x20, [x16, #0x38]\n"
+ "ldr x21, [x15, #0x28]\n"
+ "ldr x20, [x15, #0x30]\n"
+ "ldr x19, [x15, #0x38]\n"
"fmla v28.8h, v4.8h, v12.8h\n"
"fmla v29.8h, v3.8h, v12.8h\n"
- "ldr q3, [x15, #0x40]\n"
+ "fmin v20.8h, v20.8h, v14.8h\n"
"fmla v30.8h, v5.8h, v10.8h\n"
- "ldr q5, [x15, #0x60]\n"
"fmla v31.8h, v4.8h, v10.8h\n"
- "ldr q10, [x10, x8]\n"
- "ldr q4, [x15, #0x50]\n"
- "fmin v20.8h, v20.8h, v14.8h\n"
"fmin v21.8h, v21.8h, v14.8h\n"
- "str q20, [x23, x12]\n"
+ "str q20, [x22, x11]\n"
"fmin v22.8h, v22.8h, v14.8h\n"
"fmin v23.8h, v23.8h, v14.8h\n"
- "str q21, [x22, x12]\n"
- "ldr x23, [x16, #0x40]\n"
+ "str q21, [x21, x11]\n"
+ "ldr x22, [x15, #0x40]\n"
"fmax v24.8h, v24.8h, v15.8h\n"
"fmax v25.8h, v25.8h, v15.8h\n"
- "str q22, [x21, x12]\n"
- "ldr x22, [x16, #0x48]\n"
+ "str q22, [x20, x11]\n"
+ "ldr x21, [x15, #0x48]\n"
"fmax v26.8h, v26.8h, v15.8h\n"
"fmax v27.8h, v27.8h, v15.8h\n"
- "str q23, [x20, x12]\n"
- "ldr x21, [x16, #0x50]\n"
- "ldr x20, [x16, #0x58]\n"
- "ldp x9, x28, [x14, #0x10]\n"
+ "str q23, [x19, x11]\n"
+ "ldr x20, [x15, #0x50]\n"
+ "ldr x19, [x15, #0x58]\n"
+ "ldp x28, x27, [x13, #0x10]\n"
"fmin v24.8h, v24.8h, v14.8h\n"
"fmin v25.8h, v25.8h, v14.8h\n"
- "ldr q11, [x9, x8]\n"
- "ldr q12, [x28, x8]\n"
"fmin v26.8h, v26.8h, v14.8h\n"
"fmin v27.8h, v27.8h, v14.8h\n"
+ "str q24, [x22, x11]\n"
+ "ldr x22, [x15, #0x60]\n"
"fmax v28.8h, v28.8h, v15.8h\n"
"fmax v29.8h, v29.8h, v15.8h\n"
- "str q24, [x23, x12]\n"
- "ldr x23, [x16, #0x60]\n"
+ "str q25, [x21, x11]\n"
+ "ldr x21, [x15, #0x68]\n"
"fmax v30.8h, v30.8h, v15.8h\n"
"fmax v31.8h, v31.8h, v15.8h\n"
- "str q25, [x22, x12]\n"
- "ldr x22, [x16, #0x68]\n"
- "str q26, [x21, x12]\n"
- "ldr x21, [x16, #0x70]\n"
- "add x8, x8, #0x10\n"
- "cmp x8, x17, LSL #4\n"
- "str q27, [x20, x12]\n"
- "ldr x20, [x16, #0x78]\n"
+ "str q26, [x20, x11]\n"
+ "ldr x20, [x15, #0x70]\n"
+ "str q27, [x19, x11]\n"
+ "ldr x19, [x15, #0x78]\n"
+ "ldr q10, [x9, x17]\n"
"fmin v28.8h, v28.8h, v14.8h\n"
+ "ldr q11, [x28, x17]\n"
+ "ldr q12, [x27, x17]\n"
+ "add x17, x17, #0x10\n"
+ "cmp x17, x16, LSL #4\n"
"fmin v29.8h, v29.8h, v14.8h\n"
"fmin v30.8h, v30.8h, v14.8h\n"
+ "add x12, x12, #0x10\n"
+ "str q28, [x22, x11]\n"
"fmin v31.8h, v31.8h, v14.8h\n"
- "add x13, x13, #0x10\n"
- "str q28, [x23, x12]\n"
- "str q29, [x22, x12]\n"
- "add x15, x15, #0xa0\n"
- "str q30, [x21, x12]\n"
- "str q31, [x20, x12]\n"
+ "str q29, [x21, x11]\n"
+ "ldr q0, [x14, #0x10]\n"
+ "ldr q1, [x14, #0x20]\n"
+ "str q30, [x20, x11]\n"
+ "ldr q2, [x14, #0x30]\n"
+ "ldr q3, [x14, #0x40]\n"
+ "str q31, [x19, x11]\n"
+ "ldr q4, [x14, #0x50]\n"
+ "ldr q5, [x14, #0x60]\n"
+ "ldr q6, [x14, #0x70]\n"
+ "ldr q7, [x14, #0x80]\n"
+ "ldr q8, [x14, #0x90]\n"
+ "add x14, x14, #0xa0\n"
"blt 1b\n"
"2:" // Channel tail
"mov v21.16b, v13.16b\n fmla v21.8h, v4.8h, v9.8h\n"
"mov v16.16b, v13.16b\n fmla v16.8h, v8.8h, v9.8h\n"
- "ldr x27, [x14, #0x20]\n"
- "ldr x26, [x14, #0x30]\n"
+ "ldr x26, [x13, #0x20]\n"
+ "ldr x25, [x13, #0x30]\n"
"mov v22.16b, v13.16b\n fmla v22.8h, v3.8h, v9.8h\n"
"mov v25.16b, v13.16b\n fmla v25.8h, v1.8h, v9.8h\n"
- "ldr x25, [x14, #0x28]\n"
- "ldr x24, [x14, #0x38]\n"
+ "ldr x24, [x13, #0x28]\n"
+ "ldr x23, [x13, #0x38]\n"
"mov v26.16b, v13.16b\n fmla v26.8h, v0.8h, v9.8h\n"
"mov v17.16b, v13.16b\n fmla v17.8h, v7.8h, v9.8h\n"
- "ldr x11, [x14, #0x40]\n"
- "ldr x10, [x14, #0x48]\n"
+ "ldr x10, [x13, #0x40]\n"
+ "ldr x9, [x13, #0x48]\n"
"mov v18.16b, v13.16b\n fmla v18.8h, v6.8h, v9.8h\n"
"fmla v21.8h, v5.8h, v12.8h\n"
- "ldr x9, [x14, #0x50]\n"
- "ldr x28, [x14, #0x58]\n"
+ "ldr x28, [x13, #0x50]\n"
+ "ldr x27, [x13, #0x58]\n"
"mov v20.16b, v13.16b\n fmla v20.8h, v5.8h, v9.8h\n"
"mov v24.16b, v13.16b\n fmla v24.8h, v2.8h, v9.8h\n"
- "ldr q9, [x26, x13]\n"
- "ldr x26, [x14, #0x70]\n"
+ "ldr q9, [x25, x12]\n"
+ "ldr x25, [x13, #0x70]\n"
"fmla v16.8h, v0.8h, v10.8h\n"
- "ldr q10, [x27, x13]\n"
"mov v19.16b, v13.16b\n fmla v19.8h, v2.8h, v11.8h\n"
- "ldr q11, [x25, x13]\n"
+ "ldr q10, [x26, x12]\n"
+ "ldr q11, [x24, x12]\n"
"fmla v22.8h, v4.8h, v12.8h\n"
"fmla v25.8h, v2.8h, v12.8h\n"
- "ldr x27, [x14, #0x60]\n"
- "ldr x25, [x14, #0x68]\n"
+ "ldr x26, [x13, #0x60]\n"
+ "ldr x24, [x13, #0x68]\n"
"fmla v26.8h, v1.8h, v12.8h\n"
"fmla v17.8h, v8.8h, v12.8h\n"
- "ldr x23, [x16, #0x0]\n"
- "ldr x22, [x16, #0x8]\n"
+ "ldr x22, [x15, #0x0]\n"
+ "ldr x21, [x15, #0x8]\n"
"fmla v18.8h, v7.8h, v12.8h\n"
"mov v28.16b, v13.16b\n fmla v28.8h, v6.8h, v10.8h\n"
- "ldr q10, [x10, x13]\n"
- "ldr x10, [x14, #0x88]\n"
+ "ldr q10, [x9, x12]\n"
+ "ldr x9, [x13, #0x88]\n"
"fmla v21.8h, v7.8h, v9.8h\n"
"fmla v19.8h, v6.8h, v12.8h\n"
- "ldr x21, [x16, #0x10]\n"
- "ldr x20, [x16, #0x18]\n"
+ "ldr x20, [x15, #0x10]\n"
+ "ldr x19, [x15, #0x18]\n"
"mov v23.16b, v13.16b\n fmla v23.8h, v3.8h, v12.8h\n"
"mov v27.16b, v13.16b\n fmla v27.8h, v0.8h, v12.8h\n"
- "ldr q12, [x24, x13]\n"
- "ldr x24, [x14, #0x78]\n"
+ "ldr q12, [x23, x12]\n"
+ "ldr x23, [x13, #0x78]\n"
"mov v31.16b, v13.16b\n fmla v31.8h, v8.8h, v11.8h\n"
- "ldr q11, [x11, x13]\n"
"fmla v22.8h, v6.8h, v9.8h\n"
- "ldr x11, [x14, #0x80]\n"
+ "ldr q11, [x10, x12]\n"
+ "ldr x10, [x13, #0x80]\n"
"fmla v25.8h, v4.8h, v9.8h\n"
"fmla v26.8h, v3.8h, v9.8h\n"
- "add x12, x12, #0x10\n"
+ "add x11, x11, #0x10\n"
+ "mov v29.16b, v13.16b\n fmla v29.8h, v1.8h, v9.8h\n"
+ "mov v30.16b, v13.16b\n fmla v30.8h, v0.8h, v9.8h\n"
"fmla v20.8h, v8.8h, v9.8h\n"
"fmla v24.8h, v5.8h, v9.8h\n"
"fmla v28.8h, v2.8h, v9.8h\n"
"fmla v16.8h, v1.8h, v12.8h\n"
+ "ldr q9, [x28, x12]\n"
+ "ldr x28, [x13, #0x90]\n"
"fmla v17.8h, v0.8h, v12.8h\n"
- "ldr q12, [x28, x13]\n"
"fmla v18.8h, v2.8h, v11.8h\n"
- "ldr x28, [x14, #0x98]\n"
+ "ldr q12, [x27, x12]\n"
+ "ldr x27, [x13, #0x98]\n"
"fmla v21.8h, v8.8h, v10.8h\n"
"fmla v19.8h, v1.8h, v11.8h\n"
- "ldr q11, [x27, x13]\n"
- "ldr x27, [x14, #0xa0]\n"
+ "ldr q11, [x26, x12]\n"
+ "ldr x26, [x13, #0xa0]\n"
"fmla v22.8h, v7.8h, v10.8h\n"
"fmla v23.8h, v6.8h, v10.8h\n"
"fmla v25.8h, v5.8h, v10.8h\n"
"fmla v26.8h, v4.8h, v10.8h\n"
"fmla v27.8h, v3.8h, v10.8h\n"
- "fmla v31.8h, v0.8h, v10.8h\n"
- "fmla v24.8h, v6.8h, v11.8h\n"
- "fmla v28.8h, v3.8h, v11.8h\n"
- "ldr q11, [x26, x13]\n"
- "ldr x26, [x14, #0xb0]\n"
- "fmla v19.8h, v5.8h, v12.8h\n"
- "fmla v23.8h, v2.8h, v12.8h\n"
- "ldr q12, [x24, x13]\n"
- "ldr x24, [x14, #0xb8]\n"
- "fmla v27.8h, v8.8h, v11.8h\n"
- "fmla v31.8h, v5.8h, v11.8h\n"
- "mov v29.16b, v13.16b\n fmla v29.8h, v1.8h, v9.8h\n"
- "mov v30.16b, v13.16b\n fmla v30.8h, v0.8h, v9.8h\n"
- "ldr q9, [x9, x13]\n"
- "ldr x9, [x14, #0x90]\n"
"fmla v29.8h, v2.8h, v10.8h\n"
"fmla v30.8h, v1.8h, v10.8h\n"
- "ldr q10, [x25, x13]\n"
- "ldr x25, [x14, #0xa8]\n"
+ "fmla v31.8h, v0.8h, v10.8h\n"
+ "ldr q10, [x24, x12]\n"
+ "ldr x24, [x13, #0xa8]\n"
"fmla v16.8h, v3.8h, v9.8h\n"
"fmla v20.8h, v0.8h, v9.8h\n"
- "ldr q11, [x11, x13]\n"
- "ldr x11, [x14, #0xc0]\n"
+ "fmla v24.8h, v6.8h, v11.8h\n"
+ "fmla v28.8h, v3.8h, v11.8h\n"
+ "ldr q11, [x25, x12]\n"
+ "ldr x25, [x13, #0xb0]\n"
"fmla v17.8h, v4.8h, v10.8h\n"
"fmla v18.8h, v3.8h, v10.8h\n"
"fmla v21.8h, v1.8h, v10.8h\n"
+ "fmla v19.8h, v5.8h, v12.8h\n"
+ "fmla v23.8h, v2.8h, v12.8h\n"
"fmla v22.8h, v0.8h, v10.8h\n"
+ "ldr q12, [x23, x12]\n"
+ "ldr x23, [x13, #0xb8]\n"
+ "fmla v27.8h, v8.8h, v11.8h\n"
+ "fmla v31.8h, v5.8h, v11.8h\n"
+ "ldr q11, [x10, x12]\n"
+ "ldr x10, [x13, #0xc0]\n"
"fmla v16.8h, v5.8h, v10.8h\n"
"fmla v20.8h, v2.8h, v10.8h\n"
- "ldr q10, [x10, x13]\n"
- "ldr x10, [x14, #0xc8]\n"
+ "ldr q10, [x9, x12]\n"
+ "ldr x9, [x13, #0xc8]\n"
"fmla v17.8h, v5.8h, v12.8h\n"
"fmla v18.8h, v4.8h, v12.8h\n"
"fmla v21.8h, v2.8h, v12.8h\n"
"fmla v19.8h, v3.8h, v12.8h\n"
"fmla v22.8h, v1.8h, v12.8h\n"
"fmla v23.8h, v0.8h, v12.8h\n"
- "ldr q12, [x28, x13]\n"
- "ldr x28, [x14, #0xd8]\n"
+ "ldr q12, [x27, x12]\n"
+ "ldr x27, [x13, #0xd8]\n"
"fmla v28.8h, v7.8h, v11.8h\n"
"fmla v29.8h, v6.8h, v11.8h\n"
- "ldr q11, [x9, x13]\n"
- "ldr x9, [x14, #0xd0]\n"
+ "ldr q11, [x28, x12]\n"
+ "ldr x28, [x13, #0xd0]\n"
"fmla v16.8h, v7.8h, v10.8h\n"
"fmla v17.8h, v6.8h, v10.8h\n"
"fmla v20.8h, v4.8h, v10.8h\n"
"fmla v21.8h, v3.8h, v10.8h\n"
"fmla v24.8h, v1.8h, v10.8h\n"
"fmla v25.8h, v0.8h, v10.8h\n"
- "ldr q10, [x27, x13]\n"
- "ldr x27, [x14, #0xe0]\n"
+ "ldr q10, [x26, x12]\n"
+ "ldr x26, [x13, #0xe0]\n"
"fmla v18.8h, v8.8h, v12.8h\n"
"fmla v30.8h, v8.8h, v11.8h\n"
"fmla v31.8h, v7.8h, v11.8h\n"
- "ldr q11, [x25, x13]\n"
+ "ldr q11, [x24, x12]\n"
"fmla v27.8h, v1.8h, v12.8h\n"
- "ldr x25, [x14, #0xe8]\n"
+ "ldr x24, [x13, #0xe8]\n"
"fmla v19.8h, v7.8h, v12.8h\n"
"fmla v22.8h, v5.8h, v12.8h\n"
"fmla v23.8h, v4.8h, v12.8h\n"
"fmla v26.8h, v2.8h, v12.8h\n"
- "ldr q12, [x26, x13]\n"
- "ldr x26, [x14, #0xf0]\n"
+ "ldr q12, [x25, x12]\n"
+ "ldr x25, [x13, #0xf0]\n"
"fmla v16.8h, v2.8h, v10.8h\n"
"fmla v17.8h, v1.8h, v10.8h\n"
"fmla v18.8h, v0.8h, v10.8h\n"
- "ldr q10, [x24, x13]\n"
"fmla v20.8h, v7.8h, v11.8h\n"
- "ldr x24, [x14, #0xf8]\n"
+ "ldr q10, [x23, x12]\n"
+ "ldr x23, [x13, #0xf8]\n"
"fmla v21.8h, v6.8h, v11.8h\n"
"fmla v24.8h, v4.8h, v11.8h\n"
"fmla v25.8h, v3.8h, v11.8h\n"
"fmla v28.8h, v1.8h, v11.8h\n"
"fmla v29.8h, v0.8h, v11.8h\n"
- "ldr q11, [x11, x13]\n"
+ "ldr q11, [x10, x12]\n"
"fmla v27.8h, v4.8h, v11.8h\n"
- "ldr x11, [x14, #0x100]\n"
+ "ldr x10, [x13, #0x100]\n"
"fmla v30.8h, v2.8h, v11.8h\n"
"fmla v17.8h, v2.8h, v12.8h\n"
"fmla v18.8h, v1.8h, v12.8h\n"
"fmla v19.8h, v0.8h, v12.8h\n"
- "ldr q12, [x10, x13]\n"
- "ldr x10, [x14, #0x108]\n"
+ "ldr q12, [x9, x12]\n"
+ "ldr x9, [x13, #0x108]\n"
"fmla v16.8h, v6.8h, v10.8h\n"
"fmla v20.8h, v3.8h, v10.8h\n"
"fmla v24.8h, v0.8h, v10.8h\n"
- "ldr q10, [x9, x13]\n"
"fmla v22.8h, v8.8h, v11.8h\n"
- "ldr x9, [x14, #0x110]\n"
+ "ldr q10, [x28, x12]\n"
+ "ldr x28, [x13, #0x110]\n"
"fmla v23.8h, v7.8h, v11.8h\n"
"fmla v26.8h, v5.8h, v11.8h\n"
"fmla v31.8h, v1.8h, v11.8h\n"
- "ldr q11, [x28, x13]\n"
+ "ldr q11, [x27, x12]\n"
"fmla v27.8h, v2.8h, v12.8h\n"
- "ldr x28, [x14, #0x118]\n"
+ "ldr x27, [x13, #0x118]\n"
"fmla v28.8h, v0.8h, v10.8h\n"
"fmla v29.8h, v4.8h, v11.8h\n"
"fmla v30.8h, v3.8h, v11.8h\n"
"fmla v19.8h, v8.8h, v12.8h\n"
"fmla v23.8h, v5.8h, v12.8h\n"
- "ldr q12, [x27, x13]\n"
"fmla v20.8h, v6.8h, v10.8h\n"
+ "ldr q12, [x26, x12]\n"
"fmla v24.8h, v3.8h, v10.8h\n"
- "ldr q10, [x25, x13]\n"
+ "ldr q10, [x24, x12]\n"
"fmla v25.8h, v7.8h, v11.8h\n"
"fmla v26.8h, v6.8h, v11.8h\n"
"fmla v28.8h, v5.8h, v11.8h\n"
@@ -605,18 +605,18 @@ void a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_indirect_impl(
"fmla v29.8h, v7.8h, v10.8h\n"
"fmla v30.8h, v6.8h, v10.8h\n"
"fmla v24.8h, v8.8h, v11.8h\n"
- "ldr q11, [x26, x13]\n"
+ "ldr q11, [x25, x12]\n"
"fmla v28.8h, v8.8h, v10.8h\n"
- "ldr q10, [x11, x13]\n"
+ "ldr q10, [x10, x12]\n"
"fmla v25.8h, v8.8h, v11.8h\n"
"fmla v26.8h, v7.8h, v11.8h\n"
"fmla v27.8h, v6.8h, v11.8h\n"
"fmla v29.8h, v5.8h, v11.8h\n"
"fmla v30.8h, v4.8h, v11.8h\n"
"fmla v31.8h, v3.8h, v11.8h\n"
- "ldr q11, [x10, x13]\n"
+ "ldr q11, [x9, x12]\n"
"fmla v23.8h, v8.8h, v12.8h\n"
- "ldr q12, [x24, x13]\n"
+ "ldr q12, [x23, x12]\n"
"fmla v16.8h, v4.8h, v10.8h\n"
"fmax v16.8h, v16.8h, v15.8h\n"
"fmla v17.8h, v3.8h, v10.8h\n"
@@ -627,145 +627,145 @@ void a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_indirect_impl(
"fmax v18.8h, v18.8h, v15.8h\n"
"fmla v30.8h, v7.8h, v12.8h\n"
"fmla v31.8h, v6.8h, v12.8h\n"
- "ldr q12, [x9, x13]\n"
+ "ldr q12, [x28, x12]\n"
"fmax v19.8h, v19.8h, v15.8h\n"
"fmla v20.8h, v1.8h, v10.8h\n"
"fmla v21.8h, v0.8h, v10.8h\n"
- "ldr q10, [x28, x13]\n"
+ "ldr q10, [x27, x12]\n"
"fmin v16.8h, v16.8h, v14.8h\n"
"fmla v22.8h, v2.8h, v11.8h\n"
"fmla v23.8h, v1.8h, v11.8h\n"
"fmin v17.8h, v17.8h, v14.8h\n"
- "str q16, [x23, x12]\n"
+ "str q16, [x22, x11]\n"
"fmla v24.8h, v7.8h, v12.8h\n"
"fmla v25.8h, v6.8h, v12.8h\n"
"fmin v18.8h, v18.8h, v14.8h\n"
- "str q17, [x22, x12]\n"
+ "str q17, [x21, x11]\n"
"fmla v26.8h, v8.8h, v10.8h\n"
"fmla v27.8h, v7.8h, v10.8h\n"
"fmin v19.8h, v19.8h, v14.8h\n"
- "str q18, [x21, x12]\n"
+ "str q18, [x20, x11]\n"
"fmax v20.8h, v20.8h, v15.8h\n"
"fmax v21.8h, v21.8h, v15.8h\n"
- "str q19, [x20, x12]\n"
- "ldr x23, [x16, #0x20]\n"
+ "str q19, [x19, x11]\n"
+ "ldr x22, [x15, #0x20]\n"
"fmax v22.8h, v22.8h, v15.8h\n"
"fmax v23.8h, v23.8h, v15.8h\n"
- "ldr x22, [x16, #0x28]\n"
- "ldr x21, [x16, #0x30]\n"
- "ldr x20, [x16, #0x38]\n"
+ "ldr x21, [x15, #0x28]\n"
+ "ldr x20, [x15, #0x30]\n"
+ "ldr x19, [x15, #0x38]\n"
"fmla v28.8h, v4.8h, v12.8h\n"
"fmla v29.8h, v3.8h, v12.8h\n"
"fmin v20.8h, v20.8h, v14.8h\n"
"fmla v30.8h, v5.8h, v10.8h\n"
"fmla v31.8h, v4.8h, v10.8h\n"
"fmin v21.8h, v21.8h, v14.8h\n"
- "str q20, [x23, x12]\n"
+ "str q20, [x22, x11]\n"
"fmin v22.8h, v22.8h, v14.8h\n"
"fmin v23.8h, v23.8h, v14.8h\n"
- "str q21, [x22, x12]\n"
- "ldr x23, [x16, #0x40]\n"
+ "str q21, [x21, x11]\n"
+ "ldr x22, [x15, #0x40]\n"
"fmax v24.8h, v24.8h, v15.8h\n"
"fmax v25.8h, v25.8h, v15.8h\n"
- "str q22, [x21, x12]\n"
- "ldr x22, [x16, #0x48]\n"
+ "str q22, [x20, x11]\n"
+ "ldr x21, [x15, #0x48]\n"
"fmax v26.8h, v26.8h, v15.8h\n"
"fmax v27.8h, v27.8h, v15.8h\n"
- "str q23, [x20, x12]\n"
- "ldr x21, [x16, #0x50]\n"
- "ldr x20, [x16, #0x58]\n"
+ "str q23, [x19, x11]\n"
+ "ldr x20, [x15, #0x50]\n"
+ "ldr x19, [x15, #0x58]\n"
"fmin v24.8h, v24.8h, v14.8h\n"
"fmin v25.8h, v25.8h, v14.8h\n"
- "str q24, [x23, x12]\n"
+ "str q24, [x22, x11]\n"
"fmin v26.8h, v26.8h, v14.8h\n"
"fmin v27.8h, v27.8h, v14.8h\n"
- "str q25, [x22, x12]\n"
- "ldr x23, [x16, #0x60]\n"
+ "str q25, [x21, x11]\n"
+ "ldr x22, [x15, #0x60]\n"
"fmax v28.8h, v28.8h, v15.8h\n"
"fmax v29.8h, v29.8h, v15.8h\n"
- "str q26, [x21, x12]\n"
- "ldr x22, [x16, #0x68]\n"
+ "str q26, [x20, x11]\n"
+ "ldr x21, [x15, #0x68]\n"
"fmax v30.8h, v30.8h, v15.8h\n"
"fmax v31.8h, v31.8h, v15.8h\n"
- "str q27, [x20, x12]\n"
- "ldr x21, [x16, #0x70]\n"
- "ldr x20, [x16, #0x78]\n"
+ "str q27, [x19, x11]\n"
+ "ldr x20, [x15, #0x70]\n"
+ "ldr x19, [x15, #0x78]\n"
"fmin v28.8h, v28.8h, v14.8h\n"
"fmin v29.8h, v29.8h, v14.8h\n"
- "str q28, [x23, x12]\n"
+ "str q28, [x22, x11]\n"
"fmin v30.8h, v30.8h, v14.8h\n"
"fmin v31.8h, v31.8h, v14.8h\n"
- "str q29, [x22, x12]\n"
- "add x13, x13, #0x10\n"
- "str q30, [x21, x12]\n"
- "str q31, [x20, x12]\n"
+ "str q29, [x21, x11]\n"
+ "add x12, x12, #0x10\n"
+ "str q30, [x20, x11]\n"
+ "str q31, [x19, x11]\n"
"3:" // Oddments
"tst %x[n_channels], #0x7\n"
"beq 140f\n"
- "ldr q13, [x15, #0x0]\n"
- "ldr q0, [x15, #0x10]\n"
- "mov x12, x13\n"
- "ldr q1, [x15, #0x20]\n"
- "ldr q2, [x15, #0x30]\n"
- "ldr q3, [x15, #0x40]\n"
- "ldr q4, [x15, #0x50]\n"
- "ldr q5, [x15, #0x60]\n"
- "ldr q6, [x15, #0x70]\n"
- "ldr q7, [x15, #0x80]\n"
- "ldr q8, [x15, #0x90]\n"
- "ldr x23, [x14, #0x0]\n"
- "ldr x22, [x14, #0x8]\n"
- "add x23, x23, x13\n"
- "add x22, x22, x13\n"
- "ldr x21, [x14, #0x10]\n"
- "ldr x20, [x14, #0x18]\n"
- "add x21, x21, x13\n"
- "add x20, x20, x13\n"
+ "ldr x10, [x13, #0x0]\n"
+ "ldr x9, [x13, #0x8]\n"
+ "ldr x28, [x13, #0x10]\n"
+ "ldr x27, [x13, #0x18]\n"
+ "mov x11, x12\n"
+ "add x10, x10, x12\n"
+ "ldr q13, [x14, #0x0]\n"
+ "ldr q0, [x14, #0x10]\n"
+ "add x9, x9, x12\n"
+ "add x28, x28, x12\n"
+ "ldr q1, [x14, #0x20]\n"
+ "ldr q2, [x14, #0x30]\n"
+ "add x27, x27, x12\n"
+ "ldr q3, [x14, #0x40]\n"
+ "ldr q4, [x14, #0x50]\n"
+ "ldr q5, [x14, #0x60]\n"
+ "ldr q6, [x14, #0x70]\n"
+ "ldr q7, [x14, #0x80]\n"
+ "ldr q8, [x14, #0x90]\n"
"tbz %x[n_channels], #2, 5f\n"
- "ld1 { v9.d }[0], [x23], #0x8\n"
- "ld1 { v10.d }[0], [x22], #0x8\n"
- "ld1 { v11.d }[0], [x21], #0x8\n"
- "ld1 { v12.d }[0], [x20], #0x8\n"
+ "ld1 { v9.d }[0], [x10], #0x8\n"
+ "ld1 { v10.d }[0], [x9], #0x8\n"
+ "ld1 { v11.d }[0], [x28], #0x8\n"
+ "ld1 { v12.d }[0], [x27], #0x8\n"
"tbz %x[n_channels], #1, 4f\n"
- "ld1 { v9.s }[2], [x23], #0x4\n"
- "ld1 { v10.s }[2], [x22], #0x4\n"
- "ld1 { v11.s }[2], [x21], #0x4\n"
- "ld1 { v12.s }[2], [x20], #0x4\n"
+ "ld1 { v9.s }[2], [x10], #0x4\n"
+ "ld1 { v10.s }[2], [x9], #0x4\n"
+ "ld1 { v11.s }[2], [x28], #0x4\n"
+ "ld1 { v12.s }[2], [x27], #0x4\n"
"tbz %x[n_channels], #0, 7f\n"
- "ld1 { v9.h }[6], [x23], #0x2\n"
- "ld1 { v10.h }[6], [x22], #0x2\n"
- "ld1 { v11.h }[6], [x21], #0x2\n"
- "ld1 { v12.h }[6], [x20], #0x2\n"
+ "ld1 { v9.h }[6], [x10], #0x2\n"
+ "ld1 { v10.h }[6], [x9], #0x2\n"
+ "ld1 { v11.h }[6], [x28], #0x2\n"
+ "ld1 { v12.h }[6], [x27], #0x2\n"
"b 7f\n"
"4:" // Oddments: Load inputs (2, 2), (0, 0), (0, 5), (2, 3): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 7f\n"
- "ld1 { v9.h }[4], [x23], #0x2\n"
- "ld1 { v10.h }[4], [x22], #0x2\n"
- "ld1 { v11.h }[4], [x21], #0x2\n"
- "ld1 { v12.h }[4], [x20], #0x2\n"
+ "ld1 { v9.h }[4], [x10], #0x2\n"
+ "ld1 { v10.h }[4], [x9], #0x2\n"
+ "ld1 { v11.h }[4], [x28], #0x2\n"
+ "ld1 { v12.h }[4], [x27], #0x2\n"
"b 7f\n"
"5:" // Oddments: Load inputs (2, 2), (0, 0), (0, 5), (2, 3): Bit 2: Unset
"tbz %x[n_channels], #1, 6f\n"
- "ld1 { v9.s }[0], [x23], #0x4\n"
- "ld1 { v10.s }[0], [x22], #0x4\n"
- "ld1 { v11.s }[0], [x21], #0x4\n"
- "ld1 { v12.s }[0], [x20], #0x4\n"
+ "ld1 { v9.s }[0], [x10], #0x4\n"
+ "ld1 { v10.s }[0], [x9], #0x4\n"
+ "ld1 { v11.s }[0], [x28], #0x4\n"
+ "ld1 { v12.s }[0], [x27], #0x4\n"
"tbz %x[n_channels], #0, 7f\n"
- "ld1 { v9.h }[2], [x23], #0x2\n"
- "ld1 { v10.h }[2], [x22], #0x2\n"
- "ld1 { v11.h }[2], [x21], #0x2\n"
- "ld1 { v12.h }[2], [x20], #0x2\n"
+ "ld1 { v9.h }[2], [x10], #0x2\n"
+ "ld1 { v10.h }[2], [x9], #0x2\n"
+ "ld1 { v11.h }[2], [x28], #0x2\n"
+ "ld1 { v12.h }[2], [x27], #0x2\n"
"b 7f\n"
"6:" // Oddments: Load inputs (2, 2), (0, 0), (0, 5), (2, 3): Bit 2: Unset: Bit 1: Unset
- "ld1 { v9.h }[0], [x23], #0x2\n"
- "ld1 { v10.h }[0], [x22], #0x2\n"
- "ld1 { v11.h }[0], [x21], #0x2\n"
- "ld1 { v12.h }[0], [x20], #0x2\n"
+ "ld1 { v9.h }[0], [x10], #0x2\n"
+ "ld1 { v10.h }[0], [x9], #0x2\n"
+ "ld1 { v11.h }[0], [x28], #0x2\n"
+ "ld1 { v12.h }[0], [x27], #0x2\n"
"7:" // Oddments: Load inputs (2, 2), (0, 0), (0, 5), (2, 3): Bit 2: End
"mov v16.16b, v13.16b\n fmla v16.8h, v8.8h, v9.8h\n"
"mov v17.16b, v13.16b\n fmla v17.8h, v7.8h, v9.8h\n"
- "ldr x20, [x14, #0x20]\n"
- "add x20, x20, x13\n"
+ "ldr x26, [x13, #0x20]\n"
+ "add x26, x26, x12\n"
"mov v18.16b, v13.16b\n fmla v18.8h, v6.8h, v9.8h\n"
"mov v21.16b, v13.16b\n fmla v21.8h, v4.8h, v9.8h\n"
"mov v22.16b, v13.16b\n fmla v22.8h, v3.8h, v9.8h\n"
@@ -785,75 +785,75 @@ void a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_indirect_impl(
"fmla v26.8h, v1.8h, v12.8h\n"
"mov v27.16b, v13.16b\n fmla v27.8h, v0.8h, v12.8h\n"
"tbz %x[n_channels], #2, 9f\n"
- "ld1 { v10.d }[0], [x20], #0x8\n"
+ "ld1 { v10.d }[0], [x26], #0x8\n"
"tbz %x[n_channels], #1, 8f\n"
- "ld1 { v10.s }[2], [x20], #0x4\n"
+ "ld1 { v10.s }[2], [x26], #0x4\n"
"tbz %x[n_channels], #0, 11f\n"
- "ld1 { v10.h }[6], [x20], #0x2\n"
+ "ld1 { v10.h }[6], [x26], #0x2\n"
"b 11f\n"
"8:" // Oddments: Load input (5, 0): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 11f\n"
- "ld1 { v10.h }[4], [x20], #0x2\n"
+ "ld1 { v10.h }[4], [x26], #0x2\n"
"b 11f\n"
"9:" // Oddments: Load input (5, 0): Bit 2: Unset
"tbz %x[n_channels], #1, 10f\n"
- "ld1 { v10.s }[0], [x20], #0x4\n"
+ "ld1 { v10.s }[0], [x26], #0x4\n"
"tbz %x[n_channels], #0, 11f\n"
- "ld1 { v10.h }[2], [x20], #0x2\n"
+ "ld1 { v10.h }[2], [x26], #0x2\n"
"b 11f\n"
"10:" // Oddments: Load input (5, 0): Bit 2: Unset: Bit 1: Unset
- "ld1 { v10.h }[0], [x20], #0x2\n"
+ "ld1 { v10.h }[0], [x26], #0x2\n"
"11:" // Oddments: Load input (5, 0): Bit 2: End
- "ldr x20, [x14, #0x28]\n"
+ "ldr x24, [x13, #0x28]\n"
"mov v28.16b, v13.16b\n fmla v28.8h, v6.8h, v10.8h\n"
- "add x20, x20, x13\n"
+ "add x24, x24, x12\n"
"tbz %x[n_channels], #2, 13f\n"
- "ld1 { v11.d }[0], [x20], #0x8\n"
+ "ld1 { v11.d }[0], [x24], #0x8\n"
"tbz %x[n_channels], #1, 12f\n"
- "ld1 { v11.s }[2], [x20], #0x4\n"
+ "ld1 { v11.s }[2], [x24], #0x4\n"
"tbz %x[n_channels], #0, 15f\n"
- "ld1 { v11.h }[6], [x20], #0x2\n"
+ "ld1 { v11.h }[6], [x24], #0x2\n"
"b 15f\n"
"12:" // Oddments: Load input (5, 5): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 15f\n"
- "ld1 { v11.h }[4], [x20], #0x2\n"
+ "ld1 { v11.h }[4], [x24], #0x2\n"
"b 15f\n"
"13:" // Oddments: Load input (5, 5): Bit 2: Unset
"tbz %x[n_channels], #1, 14f\n"
- "ld1 { v11.s }[0], [x20], #0x4\n"
+ "ld1 { v11.s }[0], [x24], #0x4\n"
"tbz %x[n_channels], #0, 15f\n"
- "ld1 { v11.h }[2], [x20], #0x2\n"
+ "ld1 { v11.h }[2], [x24], #0x2\n"
"b 15f\n"
"14:" // Oddments: Load input (5, 5): Bit 2: Unset: Bit 1: Unset
- "ld1 { v11.h }[0], [x20], #0x2\n"
+ "ld1 { v11.h }[0], [x24], #0x2\n"
"15:" // Oddments: Load input (5, 5): Bit 2: End
- "ldr x20, [x14, #0x30]\n"
+ "ldr x25, [x13, #0x30]\n"
"mov v31.16b, v13.16b\n fmla v31.8h, v8.8h, v11.8h\n"
- "add x20, x20, x13\n"
+ "add x25, x25, x12\n"
"tbz %x[n_channels], #2, 17f\n"
- "ld1 { v9.d }[0], [x20], #0x8\n"
+ "ld1 { v9.d }[0], [x25], #0x8\n"
"tbz %x[n_channels], #1, 16f\n"
- "ld1 { v9.s }[2], [x20], #0x4\n"
+ "ld1 { v9.s }[2], [x25], #0x4\n"
"tbz %x[n_channels], #0, 19f\n"
- "ld1 { v9.h }[6], [x20], #0x2\n"
+ "ld1 { v9.h }[6], [x25], #0x2\n"
"b 19f\n"
"16:" // Oddments: Load input (3, 2): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 19f\n"
- "ld1 { v9.h }[4], [x20], #0x2\n"
+ "ld1 { v9.h }[4], [x25], #0x2\n"
"b 19f\n"
"17:" // Oddments: Load input (3, 2): Bit 2: Unset
"tbz %x[n_channels], #1, 18f\n"
- "ld1 { v9.s }[0], [x20], #0x4\n"
+ "ld1 { v9.s }[0], [x25], #0x4\n"
"tbz %x[n_channels], #0, 19f\n"
- "ld1 { v9.h }[2], [x20], #0x2\n"
+ "ld1 { v9.h }[2], [x25], #0x2\n"
"b 19f\n"
"18:" // Oddments: Load input (3, 2): Bit 2: Unset: Bit 1: Unset
- "ld1 { v9.h }[0], [x20], #0x2\n"
+ "ld1 { v9.h }[0], [x25], #0x2\n"
"19:" // Oddments: Load input (3, 2): Bit 2: End
- "ldr x20, [x14, #0x38]\n"
+ "ldr x23, [x13, #0x38]\n"
"fmla v20.8h, v8.8h, v9.8h\n"
"fmla v21.8h, v7.8h, v9.8h\n"
- "add x20, x20, x13\n"
+ "add x23, x23, x12\n"
"fmla v22.8h, v6.8h, v9.8h\n"
"fmla v24.8h, v5.8h, v9.8h\n"
"fmla v25.8h, v4.8h, v9.8h\n"
@@ -862,77 +862,77 @@ void a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_indirect_impl(
"mov v29.16b, v13.16b\n fmla v29.8h, v1.8h, v9.8h\n"
"mov v30.16b, v13.16b\n fmla v30.8h, v0.8h, v9.8h\n"
"tbz %x[n_channels], #2, 21f\n"
- "ld1 { v12.d }[0], [x20], #0x8\n"
+ "ld1 { v12.d }[0], [x23], #0x8\n"
"tbz %x[n_channels], #1, 20f\n"
- "ld1 { v12.s }[2], [x20], #0x4\n"
+ "ld1 { v12.s }[2], [x23], #0x4\n"
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v12.h }[6], [x20], #0x2\n"
+ "ld1 { v12.h }[6], [x23], #0x2\n"
"b 23f\n"
"20:" // Oddments: Load input (0, 1): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v12.h }[4], [x20], #0x2\n"
+ "ld1 { v12.h }[4], [x23], #0x2\n"
"b 23f\n"
"21:" // Oddments: Load input (0, 1): Bit 2: Unset
"tbz %x[n_channels], #1, 22f\n"
- "ld1 { v12.s }[0], [x20], #0x4\n"
+ "ld1 { v12.s }[0], [x23], #0x4\n"
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v12.h }[2], [x20], #0x2\n"
+ "ld1 { v12.h }[2], [x23], #0x2\n"
"b 23f\n"
"22:" // Oddments: Load input (0, 1): Bit 2: Unset: Bit 1: Unset
- "ld1 { v12.h }[0], [x20], #0x2\n"
+ "ld1 { v12.h }[0], [x23], #0x2\n"
"23:" // Oddments: Load input (0, 1): Bit 2: End
- "ldr x20, [x14, #0x40]\n"
+ "ldr x10, [x13, #0x40]\n"
"fmla v16.8h, v1.8h, v12.8h\n"
"fmla v17.8h, v0.8h, v12.8h\n"
- "add x20, x20, x13\n"
+ "add x10, x10, x12\n"
"tbz %x[n_channels], #2, 25f\n"
- "ld1 { v11.d }[0], [x20], #0x8\n"
+ "ld1 { v11.d }[0], [x10], #0x8\n"
"tbz %x[n_channels], #1, 24f\n"
- "ld1 { v11.s }[2], [x20], #0x4\n"
+ "ld1 { v11.s }[2], [x10], #0x4\n"
"tbz %x[n_channels], #0, 27f\n"
- "ld1 { v11.h }[6], [x20], #0x2\n"
+ "ld1 { v11.h }[6], [x10], #0x2\n"
"b 27f\n"
"24:" // Oddments: Load input (0, 4): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 27f\n"
- "ld1 { v11.h }[4], [x20], #0x2\n"
+ "ld1 { v11.h }[4], [x10], #0x2\n"
"b 27f\n"
"25:" // Oddments: Load input (0, 4): Bit 2: Unset
"tbz %x[n_channels], #1, 26f\n"
- "ld1 { v11.s }[0], [x20], #0x4\n"
+ "ld1 { v11.s }[0], [x10], #0x4\n"
"tbz %x[n_channels], #0, 27f\n"
- "ld1 { v11.h }[2], [x20], #0x2\n"
+ "ld1 { v11.h }[2], [x10], #0x2\n"
"b 27f\n"
"26:" // Oddments: Load input (0, 4): Bit 2: Unset: Bit 1: Unset
- "ld1 { v11.h }[0], [x20], #0x2\n"
+ "ld1 { v11.h }[0], [x10], #0x2\n"
"27:" // Oddments: Load input (0, 4): Bit 2: End
- "ldr x20, [x14, #0x48]\n"
+ "ldr x9, [x13, #0x48]\n"
"fmla v18.8h, v2.8h, v11.8h\n"
"fmla v19.8h, v1.8h, v11.8h\n"
- "add x20, x20, x13\n"
+ "add x9, x9, x12\n"
"tbz %x[n_channels], #2, 29f\n"
- "ld1 { v10.d }[0], [x20], #0x8\n"
+ "ld1 { v10.d }[0], [x9], #0x8\n"
"tbz %x[n_channels], #1, 28f\n"
- "ld1 { v10.s }[2], [x20], #0x4\n"
+ "ld1 { v10.s }[2], [x9], #0x4\n"
"tbz %x[n_channels], #0, 31f\n"
- "ld1 { v10.h }[6], [x20], #0x2\n"
+ "ld1 { v10.h }[6], [x9], #0x2\n"
"b 31f\n"
"28:" // Oddments: Load input (3, 3): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 31f\n"
- "ld1 { v10.h }[4], [x20], #0x2\n"
+ "ld1 { v10.h }[4], [x9], #0x2\n"
"b 31f\n"
"29:" // Oddments: Load input (3, 3): Bit 2: Unset
"tbz %x[n_channels], #1, 30f\n"
- "ld1 { v10.s }[0], [x20], #0x4\n"
+ "ld1 { v10.s }[0], [x9], #0x4\n"
"tbz %x[n_channels], #0, 31f\n"
- "ld1 { v10.h }[2], [x20], #0x2\n"
+ "ld1 { v10.h }[2], [x9], #0x2\n"
"b 31f\n"
"30:" // Oddments: Load input (3, 3): Bit 2: Unset: Bit 1: Unset
- "ld1 { v10.h }[0], [x20], #0x2\n"
+ "ld1 { v10.h }[0], [x9], #0x2\n"
"31:" // Oddments: Load input (3, 3): Bit 2: End
- "ldr x20, [x14, #0x50]\n"
+ "ldr x28, [x13, #0x50]\n"
"fmla v21.8h, v8.8h, v10.8h\n"
"fmla v22.8h, v7.8h, v10.8h\n"
- "add x20, x20, x13\n"
+ "add x28, x28, x12\n"
"fmla v23.8h, v6.8h, v10.8h\n"
"fmla v25.8h, v5.8h, v10.8h\n"
"fmla v26.8h, v4.8h, v10.8h\n"
@@ -941,670 +941,670 @@ void a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_indirect_impl(
"fmla v30.8h, v1.8h, v10.8h\n"
"fmla v31.8h, v0.8h, v10.8h\n"
"tbz %x[n_channels], #2, 33f\n"
- "ld1 { v9.d }[0], [x20], #0x8\n"
+ "ld1 { v9.d }[0], [x28], #0x8\n"
"tbz %x[n_channels], #1, 32f\n"
- "ld1 { v9.s }[2], [x20], #0x4\n"
+ "ld1 { v9.s }[2], [x28], #0x4\n"
"tbz %x[n_channels], #0, 35f\n"
- "ld1 { v9.h }[6], [x20], #0x2\n"
+ "ld1 { v9.h }[6], [x28], #0x2\n"
"b 35f\n"
"32:" // Oddments: Load input (1, 0): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 35f\n"
- "ld1 { v9.h }[4], [x20], #0x2\n"
+ "ld1 { v9.h }[4], [x28], #0x2\n"
"b 35f\n"
"33:" // Oddments: Load input (1, 0): Bit 2: Unset
"tbz %x[n_channels], #1, 34f\n"
- "ld1 { v9.s }[0], [x20], #0x4\n"
+ "ld1 { v9.s }[0], [x28], #0x4\n"
"tbz %x[n_channels], #0, 35f\n"
- "ld1 { v9.h }[2], [x20], #0x2\n"
+ "ld1 { v9.h }[2], [x28], #0x2\n"
"b 35f\n"
"34:" // Oddments: Load input (1, 0): Bit 2: Unset: Bit 1: Unset
- "ld1 { v9.h }[0], [x20], #0x2\n"
+ "ld1 { v9.h }[0], [x28], #0x2\n"
"35:" // Oddments: Load input (1, 0): Bit 2: End
- "ldr x20, [x14, #0x58]\n"
+ "ldr x27, [x13, #0x58]\n"
"fmla v16.8h, v3.8h, v9.8h\n"
"fmla v20.8h, v0.8h, v9.8h\n"
- "add x20, x20, x13\n"
+ "add x27, x27, x12\n"
"tbz %x[n_channels], #2, 37f\n"
- "ld1 { v12.d }[0], [x20], #0x8\n"
+ "ld1 { v12.d }[0], [x27], #0x8\n"
"tbz %x[n_channels], #1, 36f\n"
- "ld1 { v12.s }[2], [x20], #0x4\n"
+ "ld1 { v12.s }[2], [x27], #0x4\n"
"tbz %x[n_channels], #0, 39f\n"
- "ld1 { v12.h }[6], [x20], #0x2\n"
+ "ld1 { v12.h }[6], [x27], #0x2\n"
"b 39f\n"
"36:" // Oddments: Load input (1, 5): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 39f\n"
- "ld1 { v12.h }[4], [x20], #0x2\n"
+ "ld1 { v12.h }[4], [x27], #0x2\n"
"b 39f\n"
"37:" // Oddments: Load input (1, 5): Bit 2: Unset
"tbz %x[n_channels], #1, 38f\n"
- "ld1 { v12.s }[0], [x20], #0x4\n"
+ "ld1 { v12.s }[0], [x27], #0x4\n"
"tbz %x[n_channels], #0, 39f\n"
- "ld1 { v12.h }[2], [x20], #0x2\n"
+ "ld1 { v12.h }[2], [x27], #0x2\n"
"b 39f\n"
"38:" // Oddments: Load input (1, 5): Bit 2: Unset: Bit 1: Unset
- "ld1 { v12.h }[0], [x20], #0x2\n"
+ "ld1 { v12.h }[0], [x27], #0x2\n"
"39:" // Oddments: Load input (1, 5): Bit 2: End
- "ldr x20, [x14, #0x60]\n"
+ "ldr x26, [x13, #0x60]\n"
"fmla v19.8h, v5.8h, v12.8h\n"
"fmla v23.8h, v2.8h, v12.8h\n"
- "add x20, x20, x13\n"
+ "add x26, x26, x12\n"
"tbz %x[n_channels], #2, 41f\n"
- "ld1 { v11.d }[0], [x20], #0x8\n"
+ "ld1 { v11.d }[0], [x26], #0x8\n"
"tbz %x[n_channels], #1, 40f\n"
- "ld1 { v11.s }[2], [x20], #0x4\n"
+ "ld1 { v11.s }[2], [x26], #0x4\n"
"tbz %x[n_channels], #0, 43f\n"
- "ld1 { v11.h }[6], [x20], #0x2\n"
+ "ld1 { v11.h }[6], [x26], #0x2\n"
"b 43f\n"
"40:" // Oddments: Load input (4, 0): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 43f\n"
- "ld1 { v11.h }[4], [x20], #0x2\n"
+ "ld1 { v11.h }[4], [x26], #0x2\n"
"b 43f\n"
"41:" // Oddments: Load input (4, 0): Bit 2: Unset
"tbz %x[n_channels], #1, 42f\n"
- "ld1 { v11.s }[0], [x20], #0x4\n"
+ "ld1 { v11.s }[0], [x26], #0x4\n"
"tbz %x[n_channels], #0, 43f\n"
- "ld1 { v11.h }[2], [x20], #0x2\n"
+ "ld1 { v11.h }[2], [x26], #0x2\n"
"b 43f\n"
"42:" // Oddments: Load input (4, 0): Bit 2: Unset: Bit 1: Unset
- "ld1 { v11.h }[0], [x20], #0x2\n"
+ "ld1 { v11.h }[0], [x26], #0x2\n"
"43:" // Oddments: Load input (4, 0): Bit 2: End
- "ldr x20, [x14, #0x68]\n"
+ "ldr x24, [x13, #0x68]\n"
"fmla v24.8h, v6.8h, v11.8h\n"
"fmla v28.8h, v3.8h, v11.8h\n"
- "add x20, x20, x13\n"
+ "add x24, x24, x12\n"
"tbz %x[n_channels], #2, 45f\n"
- "ld1 { v10.d }[0], [x20], #0x8\n"
+ "ld1 { v10.d }[0], [x24], #0x8\n"
"tbz %x[n_channels], #1, 44f\n"
- "ld1 { v10.s }[2], [x20], #0x4\n"
+ "ld1 { v10.s }[2], [x24], #0x4\n"
"tbz %x[n_channels], #0, 47f\n"
- "ld1 { v10.h }[6], [x20], #0x2\n"
+ "ld1 { v10.h }[6], [x24], #0x2\n"
"b 47f\n"
"44:" // Oddments: Load input (1, 2): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 47f\n"
- "ld1 { v10.h }[4], [x20], #0x2\n"
+ "ld1 { v10.h }[4], [x24], #0x2\n"
"b 47f\n"
"45:" // Oddments: Load input (1, 2): Bit 2: Unset
"tbz %x[n_channels], #1, 46f\n"
- "ld1 { v10.s }[0], [x20], #0x4\n"
+ "ld1 { v10.s }[0], [x24], #0x4\n"
"tbz %x[n_channels], #0, 47f\n"
- "ld1 { v10.h }[2], [x20], #0x2\n"
+ "ld1 { v10.h }[2], [x24], #0x2\n"
"b 47f\n"
"46:" // Oddments: Load input (1, 2): Bit 2: Unset: Bit 1: Unset
- "ld1 { v10.h }[0], [x20], #0x2\n"
+ "ld1 { v10.h }[0], [x24], #0x2\n"
"47:" // Oddments: Load input (1, 2): Bit 2: End
- "ldr x20, [x14, #0x70]\n"
+ "ldr x25, [x13, #0x70]\n"
"fmla v16.8h, v5.8h, v10.8h\n"
"fmla v17.8h, v4.8h, v10.8h\n"
- "add x20, x20, x13\n"
+ "add x25, x25, x12\n"
"fmla v18.8h, v3.8h, v10.8h\n"
"fmla v20.8h, v2.8h, v10.8h\n"
"fmla v21.8h, v1.8h, v10.8h\n"
"fmla v22.8h, v0.8h, v10.8h\n"
"tbz %x[n_channels], #2, 49f\n"
- "ld1 { v11.d }[0], [x20], #0x8\n"
+ "ld1 { v11.d }[0], [x25], #0x8\n"
"tbz %x[n_channels], #1, 48f\n"
- "ld1 { v11.s }[2], [x20], #0x4\n"
+ "ld1 { v11.s }[2], [x25], #0x4\n"
"tbz %x[n_channels], #0, 51f\n"
- "ld1 { v11.h }[6], [x20], #0x2\n"
+ "ld1 { v11.h }[6], [x25], #0x2\n"
"b 51f\n"
"48:" // Oddments: Load input (4, 5): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 51f\n"
- "ld1 { v11.h }[4], [x20], #0x2\n"
+ "ld1 { v11.h }[4], [x25], #0x2\n"
"b 51f\n"
"49:" // Oddments: Load input (4, 5): Bit 2: Unset
"tbz %x[n_channels], #1, 50f\n"
- "ld1 { v11.s }[0], [x20], #0x4\n"
+ "ld1 { v11.s }[0], [x25], #0x4\n"
"tbz %x[n_channels], #0, 51f\n"
- "ld1 { v11.h }[2], [x20], #0x2\n"
+ "ld1 { v11.h }[2], [x25], #0x2\n"
"b 51f\n"
"50:" // Oddments: Load input (4, 5): Bit 2: Unset: Bit 1: Unset
- "ld1 { v11.h }[0], [x20], #0x2\n"
+ "ld1 { v11.h }[0], [x25], #0x2\n"
"51:" // Oddments: Load input (4, 5): Bit 2: End
- "ldr x20, [x14, #0x78]\n"
+ "ldr x23, [x13, #0x78]\n"
"fmla v27.8h, v8.8h, v11.8h\n"
"fmla v31.8h, v5.8h, v11.8h\n"
- "add x20, x20, x13\n"
+ "add x23, x23, x12\n"
"tbz %x[n_channels], #2, 53f\n"
- "ld1 { v12.d }[0], [x20], #0x8\n"
+ "ld1 { v12.d }[0], [x23], #0x8\n"
"tbz %x[n_channels], #1, 52f\n"
- "ld1 { v12.s }[2], [x20], #0x4\n"
+ "ld1 { v12.s }[2], [x23], #0x4\n"
"tbz %x[n_channels], #0, 55f\n"
- "ld1 { v12.h }[6], [x20], #0x2\n"
+ "ld1 { v12.h }[6], [x23], #0x2\n"
"b 55f\n"
"52:" // Oddments: Load input (1, 3): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 55f\n"
- "ld1 { v12.h }[4], [x20], #0x2\n"
+ "ld1 { v12.h }[4], [x23], #0x2\n"
"b 55f\n"
"53:" // Oddments: Load input (1, 3): Bit 2: Unset
"tbz %x[n_channels], #1, 54f\n"
- "ld1 { v12.s }[0], [x20], #0x4\n"
+ "ld1 { v12.s }[0], [x23], #0x4\n"
"tbz %x[n_channels], #0, 55f\n"
- "ld1 { v12.h }[2], [x20], #0x2\n"
+ "ld1 { v12.h }[2], [x23], #0x2\n"
"b 55f\n"
"54:" // Oddments: Load input (1, 3): Bit 2: Unset: Bit 1: Unset
- "ld1 { v12.h }[0], [x20], #0x2\n"
+ "ld1 { v12.h }[0], [x23], #0x2\n"
"55:" // Oddments: Load input (1, 3): Bit 2: End
- "ldr x20, [x14, #0x80]\n"
+ "ldr x10, [x13, #0x80]\n"
"fmla v17.8h, v5.8h, v12.8h\n"
"fmla v18.8h, v4.8h, v12.8h\n"
- "add x20, x20, x13\n"
+ "add x10, x10, x12\n"
"fmla v19.8h, v3.8h, v12.8h\n"
"fmla v21.8h, v2.8h, v12.8h\n"
"fmla v22.8h, v1.8h, v12.8h\n"
"fmla v23.8h, v0.8h, v12.8h\n"
"tbz %x[n_channels], #2, 57f\n"
- "ld1 { v11.d }[0], [x20], #0x8\n"
+ "ld1 { v11.d }[0], [x10], #0x8\n"
"tbz %x[n_channels], #1, 56f\n"
- "ld1 { v11.s }[2], [x20], #0x4\n"
+ "ld1 { v11.s }[2], [x10], #0x4\n"
"tbz %x[n_channels], #0, 59f\n"
- "ld1 { v11.h }[6], [x20], #0x2\n"
+ "ld1 { v11.h }[6], [x10], #0x2\n"
"b 59f\n"
"56:" // Oddments: Load input (5, 1): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 59f\n"
- "ld1 { v11.h }[4], [x20], #0x2\n"
+ "ld1 { v11.h }[4], [x10], #0x2\n"
"b 59f\n"
"57:" // Oddments: Load input (5, 1): Bit 2: Unset
"tbz %x[n_channels], #1, 58f\n"
- "ld1 { v11.s }[0], [x20], #0x4\n"
+ "ld1 { v11.s }[0], [x10], #0x4\n"
"tbz %x[n_channels], #0, 59f\n"
- "ld1 { v11.h }[2], [x20], #0x2\n"
+ "ld1 { v11.h }[2], [x10], #0x2\n"
"b 59f\n"
"58:" // Oddments: Load input (5, 1): Bit 2: Unset: Bit 1: Unset
- "ld1 { v11.h }[0], [x20], #0x2\n"
+ "ld1 { v11.h }[0], [x10], #0x2\n"
"59:" // Oddments: Load input (5, 1): Bit 2: End
- "ldr x20, [x14, #0x88]\n"
+ "ldr x9, [x13, #0x88]\n"
"fmla v28.8h, v7.8h, v11.8h\n"
"fmla v29.8h, v6.8h, v11.8h\n"
- "add x20, x20, x13\n"
+ "add x9, x9, x12\n"
"tbz %x[n_channels], #2, 61f\n"
- "ld1 { v10.d }[0], [x20], #0x8\n"
+ "ld1 { v10.d }[0], [x9], #0x8\n"
"tbz %x[n_channels], #1, 60f\n"
- "ld1 { v10.s }[2], [x20], #0x4\n"
+ "ld1 { v10.s }[2], [x9], #0x4\n"
"tbz %x[n_channels], #0, 63f\n"
- "ld1 { v10.h }[6], [x20], #0x2\n"
+ "ld1 { v10.h }[6], [x9], #0x2\n"
"b 63f\n"
"60:" // Oddments: Load input (2, 1): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 63f\n"
- "ld1 { v10.h }[4], [x20], #0x2\n"
+ "ld1 { v10.h }[4], [x9], #0x2\n"
"b 63f\n"
"61:" // Oddments: Load input (2, 1): Bit 2: Unset
"tbz %x[n_channels], #1, 62f\n"
- "ld1 { v10.s }[0], [x20], #0x4\n"
+ "ld1 { v10.s }[0], [x9], #0x4\n"
"tbz %x[n_channels], #0, 63f\n"
- "ld1 { v10.h }[2], [x20], #0x2\n"
+ "ld1 { v10.h }[2], [x9], #0x2\n"
"b 63f\n"
"62:" // Oddments: Load input (2, 1): Bit 2: Unset: Bit 1: Unset
- "ld1 { v10.h }[0], [x20], #0x2\n"
+ "ld1 { v10.h }[0], [x9], #0x2\n"
"63:" // Oddments: Load input (2, 1): Bit 2: End
- "ldr x20, [x14, #0x90]\n"
+ "ldr x28, [x13, #0x90]\n"
"fmla v16.8h, v7.8h, v10.8h\n"
"fmla v17.8h, v6.8h, v10.8h\n"
- "add x20, x20, x13\n"
+ "add x28, x28, x12\n"
"fmla v20.8h, v4.8h, v10.8h\n"
"fmla v21.8h, v3.8h, v10.8h\n"
"fmla v24.8h, v1.8h, v10.8h\n"
"fmla v25.8h, v0.8h, v10.8h\n"
"tbz %x[n_channels], #2, 65f\n"
- "ld1 { v11.d }[0], [x20], #0x8\n"
+ "ld1 { v11.d }[0], [x28], #0x8\n"
"tbz %x[n_channels], #1, 64f\n"
- "ld1 { v11.s }[2], [x20], #0x4\n"
+ "ld1 { v11.s }[2], [x28], #0x4\n"
"tbz %x[n_channels], #0, 67f\n"
- "ld1 { v11.h }[6], [x20], #0x2\n"
+ "ld1 { v11.h }[6], [x28], #0x2\n"
"b 67f\n"
"64:" // Oddments: Load input (5, 4): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 67f\n"
- "ld1 { v11.h }[4], [x20], #0x2\n"
+ "ld1 { v11.h }[4], [x28], #0x2\n"
"b 67f\n"
"65:" // Oddments: Load input (5, 4): Bit 2: Unset
"tbz %x[n_channels], #1, 66f\n"
- "ld1 { v11.s }[0], [x20], #0x4\n"
+ "ld1 { v11.s }[0], [x28], #0x4\n"
"tbz %x[n_channels], #0, 67f\n"
- "ld1 { v11.h }[2], [x20], #0x2\n"
+ "ld1 { v11.h }[2], [x28], #0x2\n"
"b 67f\n"
"66:" // Oddments: Load input (5, 4): Bit 2: Unset: Bit 1: Unset
- "ld1 { v11.h }[0], [x20], #0x2\n"
+ "ld1 { v11.h }[0], [x28], #0x2\n"
"67:" // Oddments: Load input (5, 4): Bit 2: End
- "ldr x20, [x14, #0x98]\n"
+ "ldr x27, [x13, #0x98]\n"
"fmla v30.8h, v8.8h, v11.8h\n"
"fmla v31.8h, v7.8h, v11.8h\n"
- "add x20, x20, x13\n"
+ "add x27, x27, x12\n"
"tbz %x[n_channels], #2, 69f\n"
- "ld1 { v12.d }[0], [x20], #0x8\n"
+ "ld1 { v12.d }[0], [x27], #0x8\n"
"tbz %x[n_channels], #1, 68f\n"
- "ld1 { v12.s }[2], [x20], #0x4\n"
+ "ld1 { v12.s }[2], [x27], #0x4\n"
"tbz %x[n_channels], #0, 71f\n"
- "ld1 { v12.h }[6], [x20], #0x2\n"
+ "ld1 { v12.h }[6], [x27], #0x2\n"
"b 71f\n"
"68:" // Oddments: Load input (2, 4): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 71f\n"
- "ld1 { v12.h }[4], [x20], #0x2\n"
+ "ld1 { v12.h }[4], [x27], #0x2\n"
"b 71f\n"
"69:" // Oddments: Load input (2, 4): Bit 2: Unset
"tbz %x[n_channels], #1, 70f\n"
- "ld1 { v12.s }[0], [x20], #0x4\n"
+ "ld1 { v12.s }[0], [x27], #0x4\n"
"tbz %x[n_channels], #0, 71f\n"
- "ld1 { v12.h }[2], [x20], #0x2\n"
+ "ld1 { v12.h }[2], [x27], #0x2\n"
"b 71f\n"
"70:" // Oddments: Load input (2, 4): Bit 2: Unset: Bit 1: Unset
- "ld1 { v12.h }[0], [x20], #0x2\n"
+ "ld1 { v12.h }[0], [x27], #0x2\n"
"71:" // Oddments: Load input (2, 4): Bit 2: End
- "ldr x20, [x14, #0xa0]\n"
+ "ldr x26, [x13, #0xa0]\n"
"fmla v18.8h, v8.8h, v12.8h\n"
"fmla v19.8h, v7.8h, v12.8h\n"
- "add x20, x20, x13\n"
+ "add x26, x26, x12\n"
"fmla v22.8h, v5.8h, v12.8h\n"
"fmla v23.8h, v4.8h, v12.8h\n"
"fmla v26.8h, v2.8h, v12.8h\n"
"fmla v27.8h, v1.8h, v12.8h\n"
"tbz %x[n_channels], #2, 73f\n"
- "ld1 { v10.d }[0], [x20], #0x8\n"
+ "ld1 { v10.d }[0], [x26], #0x8\n"
"tbz %x[n_channels], #1, 72f\n"
- "ld1 { v10.s }[2], [x20], #0x4\n"
+ "ld1 { v10.s }[2], [x26], #0x4\n"
"tbz %x[n_channels], #0, 75f\n"
- "ld1 { v10.h }[6], [x20], #0x2\n"
+ "ld1 { v10.h }[6], [x26], #0x2\n"
"b 75f\n"
"72:" // Oddments: Load input (0, 2): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 75f\n"
- "ld1 { v10.h }[4], [x20], #0x2\n"
+ "ld1 { v10.h }[4], [x26], #0x2\n"
"b 75f\n"
"73:" // Oddments: Load input (0, 2): Bit 2: Unset
"tbz %x[n_channels], #1, 74f\n"
- "ld1 { v10.s }[0], [x20], #0x4\n"
+ "ld1 { v10.s }[0], [x26], #0x4\n"
"tbz %x[n_channels], #0, 75f\n"
- "ld1 { v10.h }[2], [x20], #0x2\n"
+ "ld1 { v10.h }[2], [x26], #0x2\n"
"b 75f\n"
"74:" // Oddments: Load input (0, 2): Bit 2: Unset: Bit 1: Unset
- "ld1 { v10.h }[0], [x20], #0x2\n"
+ "ld1 { v10.h }[0], [x26], #0x2\n"
"75:" // Oddments: Load input (0, 2): Bit 2: End
- "ldr x20, [x14, #0xa8]\n"
+ "ldr x24, [x13, #0xa8]\n"
"fmla v16.8h, v2.8h, v10.8h\n"
"fmla v17.8h, v1.8h, v10.8h\n"
- "add x20, x20, x13\n"
+ "add x24, x24, x12\n"
"fmla v18.8h, v0.8h, v10.8h\n"
"tbz %x[n_channels], #2, 77f\n"
- "ld1 { v11.d }[0], [x20], #0x8\n"
+ "ld1 { v11.d }[0], [x24], #0x8\n"
"tbz %x[n_channels], #1, 76f\n"
- "ld1 { v11.s }[2], [x20], #0x4\n"
+ "ld1 { v11.s }[2], [x24], #0x4\n"
"tbz %x[n_channels], #0, 79f\n"
- "ld1 { v11.h }[6], [x20], #0x2\n"
+ "ld1 { v11.h }[6], [x24], #0x2\n"
"b 79f\n"
"76:" // Oddments: Load input (3, 1): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 79f\n"
- "ld1 { v11.h }[4], [x20], #0x2\n"
+ "ld1 { v11.h }[4], [x24], #0x2\n"
"b 79f\n"
"77:" // Oddments: Load input (3, 1): Bit 2: Unset
"tbz %x[n_channels], #1, 78f\n"
- "ld1 { v11.s }[0], [x20], #0x4\n"
+ "ld1 { v11.s }[0], [x24], #0x4\n"
"tbz %x[n_channels], #0, 79f\n"
- "ld1 { v11.h }[2], [x20], #0x2\n"
+ "ld1 { v11.h }[2], [x24], #0x2\n"
"b 79f\n"
"78:" // Oddments: Load input (3, 1): Bit 2: Unset: Bit 1: Unset
- "ld1 { v11.h }[0], [x20], #0x2\n"
+ "ld1 { v11.h }[0], [x24], #0x2\n"
"79:" // Oddments: Load input (3, 1): Bit 2: End
- "ldr x20, [x14, #0xb0]\n"
+ "ldr x25, [x13, #0xb0]\n"
"fmla v20.8h, v7.8h, v11.8h\n"
"fmla v21.8h, v6.8h, v11.8h\n"
- "add x20, x20, x13\n"
+ "add x25, x25, x12\n"
"fmla v24.8h, v4.8h, v11.8h\n"
"fmla v25.8h, v3.8h, v11.8h\n"
"fmla v28.8h, v1.8h, v11.8h\n"
"fmla v29.8h, v0.8h, v11.8h\n"
"tbz %x[n_channels], #2, 81f\n"
- "ld1 { v12.d }[0], [x20], #0x8\n"
+ "ld1 { v12.d }[0], [x25], #0x8\n"
"tbz %x[n_channels], #1, 80f\n"
- "ld1 { v12.s }[2], [x20], #0x4\n"
+ "ld1 { v12.s }[2], [x25], #0x4\n"
"tbz %x[n_channels], #0, 83f\n"
- "ld1 { v12.h }[6], [x20], #0x2\n"
+ "ld1 { v12.h }[6], [x25], #0x2\n"
"b 83f\n"
"80:" // Oddments: Load input (0, 3): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 83f\n"
- "ld1 { v12.h }[4], [x20], #0x2\n"
+ "ld1 { v12.h }[4], [x25], #0x2\n"
"b 83f\n"
"81:" // Oddments: Load input (0, 3): Bit 2: Unset
"tbz %x[n_channels], #1, 82f\n"
- "ld1 { v12.s }[0], [x20], #0x4\n"
+ "ld1 { v12.s }[0], [x25], #0x4\n"
"tbz %x[n_channels], #0, 83f\n"
- "ld1 { v12.h }[2], [x20], #0x2\n"
+ "ld1 { v12.h }[2], [x25], #0x2\n"
"b 83f\n"
"82:" // Oddments: Load input (0, 3): Bit 2: Unset: Bit 1: Unset
- "ld1 { v12.h }[0], [x20], #0x2\n"
+ "ld1 { v12.h }[0], [x25], #0x2\n"
"83:" // Oddments: Load input (0, 3): Bit 2: End
- "ldr x20, [x14, #0xb8]\n"
+ "ldr x23, [x13, #0xb8]\n"
"fmla v17.8h, v2.8h, v12.8h\n"
"fmla v18.8h, v1.8h, v12.8h\n"
- "add x20, x20, x13\n"
+ "add x23, x23, x12\n"
"fmla v19.8h, v0.8h, v12.8h\n"
"tbz %x[n_channels], #2, 85f\n"
- "ld1 { v10.d }[0], [x20], #0x8\n"
+ "ld1 { v10.d }[0], [x23], #0x8\n"
"tbz %x[n_channels], #1, 84f\n"
- "ld1 { v10.s }[2], [x20], #0x4\n"
+ "ld1 { v10.s }[2], [x23], #0x4\n"
"tbz %x[n_channels], #0, 87f\n"
- "ld1 { v10.h }[6], [x20], #0x2\n"
+ "ld1 { v10.h }[6], [x23], #0x2\n"
"b 87f\n"
"84:" // Oddments: Load input (2, 0): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 87f\n"
- "ld1 { v10.h }[4], [x20], #0x2\n"
+ "ld1 { v10.h }[4], [x23], #0x2\n"
"b 87f\n"
"85:" // Oddments: Load input (2, 0): Bit 2: Unset
"tbz %x[n_channels], #1, 86f\n"
- "ld1 { v10.s }[0], [x20], #0x4\n"
+ "ld1 { v10.s }[0], [x23], #0x4\n"
"tbz %x[n_channels], #0, 87f\n"
- "ld1 { v10.h }[2], [x20], #0x2\n"
+ "ld1 { v10.h }[2], [x23], #0x2\n"
"b 87f\n"
"86:" // Oddments: Load input (2, 0): Bit 2: Unset: Bit 1: Unset
- "ld1 { v10.h }[0], [x20], #0x2\n"
+ "ld1 { v10.h }[0], [x23], #0x2\n"
"87:" // Oddments: Load input (2, 0): Bit 2: End
- "ldr x20, [x14, #0xc0]\n"
+ "ldr x10, [x13, #0xc0]\n"
"fmla v16.8h, v6.8h, v10.8h\n"
"fmla v20.8h, v3.8h, v10.8h\n"
- "add x20, x20, x13\n"
+ "add x10, x10, x12\n"
"fmla v24.8h, v0.8h, v10.8h\n"
"tbz %x[n_channels], #2, 89f\n"
- "ld1 { v11.d }[0], [x20], #0x8\n"
+ "ld1 { v11.d }[0], [x10], #0x8\n"
"tbz %x[n_channels], #1, 88f\n"
- "ld1 { v11.s }[2], [x20], #0x4\n"
+ "ld1 { v11.s }[2], [x10], #0x4\n"
"tbz %x[n_channels], #0, 91f\n"
- "ld1 { v11.h }[6], [x20], #0x2\n"
+ "ld1 { v11.h }[6], [x10], #0x2\n"
"b 91f\n"
"88:" // Oddments: Load input (3, 4): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 91f\n"
- "ld1 { v11.h }[4], [x20], #0x2\n"
+ "ld1 { v11.h }[4], [x10], #0x2\n"
"b 91f\n"
"89:" // Oddments: Load input (3, 4): Bit 2: Unset
"tbz %x[n_channels], #1, 90f\n"
- "ld1 { v11.s }[0], [x20], #0x4\n"
+ "ld1 { v11.s }[0], [x10], #0x4\n"
"tbz %x[n_channels], #0, 91f\n"
- "ld1 { v11.h }[2], [x20], #0x2\n"
+ "ld1 { v11.h }[2], [x10], #0x2\n"
"b 91f\n"
"90:" // Oddments: Load input (3, 4): Bit 2: Unset: Bit 1: Unset
- "ld1 { v11.h }[0], [x20], #0x2\n"
+ "ld1 { v11.h }[0], [x10], #0x2\n"
"91:" // Oddments: Load input (3, 4): Bit 2: End
- "ldr x20, [x14, #0xc8]\n"
+ "ldr x9, [x13, #0xc8]\n"
"fmla v22.8h, v8.8h, v11.8h\n"
"fmla v23.8h, v7.8h, v11.8h\n"
- "add x20, x20, x13\n"
+ "add x9, x9, x12\n"
"fmla v26.8h, v5.8h, v11.8h\n"
"fmla v27.8h, v4.8h, v11.8h\n"
"fmla v30.8h, v2.8h, v11.8h\n"
"fmla v31.8h, v1.8h, v11.8h\n"
"tbz %x[n_channels], #2, 93f\n"
- "ld1 { v12.d }[0], [x20], #0x8\n"
+ "ld1 { v12.d }[0], [x9], #0x8\n"
"tbz %x[n_channels], #1, 92f\n"
- "ld1 { v12.s }[2], [x20], #0x4\n"
+ "ld1 { v12.s }[2], [x9], #0x4\n"
"tbz %x[n_channels], #0, 95f\n"
- "ld1 { v12.h }[6], [x20], #0x2\n"
+ "ld1 { v12.h }[6], [x9], #0x2\n"
"b 95f\n"
"92:" // Oddments: Load input (2, 5): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 95f\n"
- "ld1 { v12.h }[4], [x20], #0x2\n"
+ "ld1 { v12.h }[4], [x9], #0x2\n"
"b 95f\n"
"93:" // Oddments: Load input (2, 5): Bit 2: Unset
"tbz %x[n_channels], #1, 94f\n"
- "ld1 { v12.s }[0], [x20], #0x4\n"
+ "ld1 { v12.s }[0], [x9], #0x4\n"
"tbz %x[n_channels], #0, 95f\n"
- "ld1 { v12.h }[2], [x20], #0x2\n"
+ "ld1 { v12.h }[2], [x9], #0x2\n"
"b 95f\n"
"94:" // Oddments: Load input (2, 5): Bit 2: Unset: Bit 1: Unset
- "ld1 { v12.h }[0], [x20], #0x2\n"
+ "ld1 { v12.h }[0], [x9], #0x2\n"
"95:" // Oddments: Load input (2, 5): Bit 2: End
- "ldr x20, [x14, #0xd0]\n"
+ "ldr x28, [x13, #0xd0]\n"
"fmla v19.8h, v8.8h, v12.8h\n"
"fmla v23.8h, v5.8h, v12.8h\n"
- "add x20, x20, x13\n"
+ "add x28, x28, x12\n"
"fmla v27.8h, v2.8h, v12.8h\n"
"tbz %x[n_channels], #2, 97f\n"
- "ld1 { v10.d }[0], [x20], #0x8\n"
+ "ld1 { v10.d }[0], [x28], #0x8\n"
"tbz %x[n_channels], #1, 96f\n"
- "ld1 { v10.s }[2], [x20], #0x4\n"
+ "ld1 { v10.s }[2], [x28], #0x4\n"
"tbz %x[n_channels], #0, 99f\n"
- "ld1 { v10.h }[6], [x20], #0x2\n"
+ "ld1 { v10.h }[6], [x28], #0x2\n"
"b 99f\n"
"96:" // Oddments: Load input (3, 0): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 99f\n"
- "ld1 { v10.h }[4], [x20], #0x2\n"
+ "ld1 { v10.h }[4], [x28], #0x2\n"
"b 99f\n"
"97:" // Oddments: Load input (3, 0): Bit 2: Unset
"tbz %x[n_channels], #1, 98f\n"
- "ld1 { v10.s }[0], [x20], #0x4\n"
+ "ld1 { v10.s }[0], [x28], #0x4\n"
"tbz %x[n_channels], #0, 99f\n"
- "ld1 { v10.h }[2], [x20], #0x2\n"
+ "ld1 { v10.h }[2], [x28], #0x2\n"
"b 99f\n"
"98:" // Oddments: Load input (3, 0): Bit 2: Unset: Bit 1: Unset
- "ld1 { v10.h }[0], [x20], #0x2\n"
+ "ld1 { v10.h }[0], [x28], #0x2\n"
"99:" // Oddments: Load input (3, 0): Bit 2: End
- "ldr x20, [x14, #0xd8]\n"
+ "ldr x27, [x13, #0xd8]\n"
"fmla v20.8h, v6.8h, v10.8h\n"
"fmla v24.8h, v3.8h, v10.8h\n"
- "add x20, x20, x13\n"
+ "add x27, x27, x12\n"
"fmla v28.8h, v0.8h, v10.8h\n"
"tbz %x[n_channels], #2, 101f\n"
- "ld1 { v11.d }[0], [x20], #0x8\n"
+ "ld1 { v11.d }[0], [x27], #0x8\n"
"tbz %x[n_channels], #1, 100f\n"
- "ld1 { v11.s }[2], [x20], #0x4\n"
+ "ld1 { v11.s }[2], [x27], #0x4\n"
"tbz %x[n_channels], #0, 103f\n"
- "ld1 { v11.h }[6], [x20], #0x2\n"
+ "ld1 { v11.h }[6], [x27], #0x2\n"
"b 103f\n"
"100:" // Oddments: Load input (4, 2): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 103f\n"
- "ld1 { v11.h }[4], [x20], #0x2\n"
+ "ld1 { v11.h }[4], [x27], #0x2\n"
"b 103f\n"
"101:" // Oddments: Load input (4, 2): Bit 2: Unset
"tbz %x[n_channels], #1, 102f\n"
- "ld1 { v11.s }[0], [x20], #0x4\n"
+ "ld1 { v11.s }[0], [x27], #0x4\n"
"tbz %x[n_channels], #0, 103f\n"
- "ld1 { v11.h }[2], [x20], #0x2\n"
+ "ld1 { v11.h }[2], [x27], #0x2\n"
"b 103f\n"
"102:" // Oddments: Load input (4, 2): Bit 2: Unset: Bit 1: Unset
- "ld1 { v11.h }[0], [x20], #0x2\n"
+ "ld1 { v11.h }[0], [x27], #0x2\n"
"103:" // Oddments: Load input (4, 2): Bit 2: End
- "ldr x20, [x14, #0xe0]\n"
+ "ldr x26, [x13, #0xe0]\n"
"fmla v24.8h, v8.8h, v11.8h\n"
"fmla v25.8h, v7.8h, v11.8h\n"
- "add x20, x20, x13\n"
+ "add x26, x26, x12\n"
"fmla v26.8h, v6.8h, v11.8h\n"
"fmla v28.8h, v5.8h, v11.8h\n"
"fmla v29.8h, v4.8h, v11.8h\n"
"fmla v30.8h, v3.8h, v11.8h\n"
"tbz %x[n_channels], #2, 105f\n"
- "ld1 { v12.d }[0], [x20], #0x8\n"
+ "ld1 { v12.d }[0], [x26], #0x8\n"
"tbz %x[n_channels], #1, 104f\n"
- "ld1 { v12.s }[2], [x20], #0x4\n"
+ "ld1 { v12.s }[2], [x26], #0x4\n"
"tbz %x[n_channels], #0, 107f\n"
- "ld1 { v12.h }[6], [x20], #0x2\n"
+ "ld1 { v12.h }[6], [x26], #0x2\n"
"b 107f\n"
"104:" // Oddments: Load input (3, 5): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 107f\n"
- "ld1 { v12.h }[4], [x20], #0x2\n"
+ "ld1 { v12.h }[4], [x26], #0x2\n"
"b 107f\n"
"105:" // Oddments: Load input (3, 5): Bit 2: Unset
"tbz %x[n_channels], #1, 106f\n"
- "ld1 { v12.s }[0], [x20], #0x4\n"
+ "ld1 { v12.s }[0], [x26], #0x4\n"
"tbz %x[n_channels], #0, 107f\n"
- "ld1 { v12.h }[2], [x20], #0x2\n"
+ "ld1 { v12.h }[2], [x26], #0x2\n"
"b 107f\n"
"106:" // Oddments: Load input (3, 5): Bit 2: Unset: Bit 1: Unset
- "ld1 { v12.h }[0], [x20], #0x2\n"
+ "ld1 { v12.h }[0], [x26], #0x2\n"
"107:" // Oddments: Load input (3, 5): Bit 2: End
- "ldr x20, [x14, #0xe8]\n"
+ "ldr x24, [x13, #0xe8]\n"
"fmla v23.8h, v8.8h, v12.8h\n"
"fmla v27.8h, v5.8h, v12.8h\n"
- "add x20, x20, x13\n"
+ "add x24, x24, x12\n"
"fmla v31.8h, v2.8h, v12.8h\n"
"tbz %x[n_channels], #2, 109f\n"
- "ld1 { v10.d }[0], [x20], #0x8\n"
+ "ld1 { v10.d }[0], [x24], #0x8\n"
"tbz %x[n_channels], #1, 108f\n"
- "ld1 { v10.s }[2], [x20], #0x4\n"
+ "ld1 { v10.s }[2], [x24], #0x4\n"
"tbz %x[n_channels], #0, 111f\n"
- "ld1 { v10.h }[6], [x20], #0x2\n"
+ "ld1 { v10.h }[6], [x24], #0x2\n"
"b 111f\n"
"108:" // Oddments: Load input (5, 2): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 111f\n"
- "ld1 { v10.h }[4], [x20], #0x2\n"
+ "ld1 { v10.h }[4], [x24], #0x2\n"
"b 111f\n"
"109:" // Oddments: Load input (5, 2): Bit 2: Unset
"tbz %x[n_channels], #1, 110f\n"
- "ld1 { v10.s }[0], [x20], #0x4\n"
+ "ld1 { v10.s }[0], [x24], #0x4\n"
"tbz %x[n_channels], #0, 111f\n"
- "ld1 { v10.h }[2], [x20], #0x2\n"
+ "ld1 { v10.h }[2], [x24], #0x2\n"
"b 111f\n"
"110:" // Oddments: Load input (5, 2): Bit 2: Unset: Bit 1: Unset
- "ld1 { v10.h }[0], [x20], #0x2\n"
+ "ld1 { v10.h }[0], [x24], #0x2\n"
"111:" // Oddments: Load input (5, 2): Bit 2: End
- "ldr x20, [x14, #0xf0]\n"
+ "ldr x25, [x13, #0xf0]\n"
"fmla v28.8h, v8.8h, v10.8h\n"
"fmla v29.8h, v7.8h, v10.8h\n"
- "add x20, x20, x13\n"
+ "add x25, x25, x12\n"
"fmla v30.8h, v6.8h, v10.8h\n"
"tbz %x[n_channels], #2, 113f\n"
- "ld1 { v11.d }[0], [x20], #0x8\n"
+ "ld1 { v11.d }[0], [x25], #0x8\n"
"tbz %x[n_channels], #1, 112f\n"
- "ld1 { v11.s }[2], [x20], #0x4\n"
+ "ld1 { v11.s }[2], [x25], #0x4\n"
"tbz %x[n_channels], #0, 115f\n"
- "ld1 { v11.h }[6], [x20], #0x2\n"
+ "ld1 { v11.h }[6], [x25], #0x2\n"
"b 115f\n"
"112:" // Oddments: Load input (4, 3): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 115f\n"
- "ld1 { v11.h }[4], [x20], #0x2\n"
+ "ld1 { v11.h }[4], [x25], #0x2\n"
"b 115f\n"
"113:" // Oddments: Load input (4, 3): Bit 2: Unset
"tbz %x[n_channels], #1, 114f\n"
- "ld1 { v11.s }[0], [x20], #0x4\n"
+ "ld1 { v11.s }[0], [x25], #0x4\n"
"tbz %x[n_channels], #0, 115f\n"
- "ld1 { v11.h }[2], [x20], #0x2\n"
+ "ld1 { v11.h }[2], [x25], #0x2\n"
"b 115f\n"
"114:" // Oddments: Load input (4, 3): Bit 2: Unset: Bit 1: Unset
- "ld1 { v11.h }[0], [x20], #0x2\n"
+ "ld1 { v11.h }[0], [x25], #0x2\n"
"115:" // Oddments: Load input (4, 3): Bit 2: End
- "ldr x20, [x14, #0xf8]\n"
+ "ldr x23, [x13, #0xf8]\n"
"fmla v25.8h, v8.8h, v11.8h\n"
"fmla v26.8h, v7.8h, v11.8h\n"
- "add x20, x20, x13\n"
+ "add x23, x23, x12\n"
"fmla v27.8h, v6.8h, v11.8h\n"
"fmla v29.8h, v5.8h, v11.8h\n"
"fmla v30.8h, v4.8h, v11.8h\n"
"fmla v31.8h, v3.8h, v11.8h\n"
"tbz %x[n_channels], #2, 117f\n"
- "ld1 { v12.d }[0], [x20], #0x8\n"
+ "ld1 { v12.d }[0], [x23], #0x8\n"
"tbz %x[n_channels], #1, 116f\n"
- "ld1 { v12.s }[2], [x20], #0x4\n"
+ "ld1 { v12.s }[2], [x23], #0x4\n"
"tbz %x[n_channels], #0, 119f\n"
- "ld1 { v12.h }[6], [x20], #0x2\n"
+ "ld1 { v12.h }[6], [x23], #0x2\n"
"b 119f\n"
"116:" // Oddments: Load input (5, 3): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 119f\n"
- "ld1 { v12.h }[4], [x20], #0x2\n"
+ "ld1 { v12.h }[4], [x23], #0x2\n"
"b 119f\n"
"117:" // Oddments: Load input (5, 3): Bit 2: Unset
"tbz %x[n_channels], #1, 118f\n"
- "ld1 { v12.s }[0], [x20], #0x4\n"
+ "ld1 { v12.s }[0], [x23], #0x4\n"
"tbz %x[n_channels], #0, 119f\n"
- "ld1 { v12.h }[2], [x20], #0x2\n"
+ "ld1 { v12.h }[2], [x23], #0x2\n"
"b 119f\n"
"118:" // Oddments: Load input (5, 3): Bit 2: Unset: Bit 1: Unset
- "ld1 { v12.h }[0], [x20], #0x2\n"
+ "ld1 { v12.h }[0], [x23], #0x2\n"
"119:" // Oddments: Load input (5, 3): Bit 2: End
- "ldr x20, [x14, #0x100]\n"
+ "ldr x10, [x13, #0x100]\n"
"fmla v29.8h, v8.8h, v12.8h\n"
"fmla v30.8h, v7.8h, v12.8h\n"
- "add x20, x20, x13\n"
+ "add x10, x10, x12\n"
"fmla v31.8h, v6.8h, v12.8h\n"
"tbz %x[n_channels], #2, 121f\n"
- "ld1 { v10.d }[0], [x20], #0x8\n"
+ "ld1 { v10.d }[0], [x10], #0x8\n"
"tbz %x[n_channels], #1, 120f\n"
- "ld1 { v10.s }[2], [x20], #0x4\n"
+ "ld1 { v10.s }[2], [x10], #0x4\n"
"tbz %x[n_channels], #0, 123f\n"
- "ld1 { v10.h }[6], [x20], #0x2\n"
+ "ld1 { v10.h }[6], [x10], #0x2\n"
"b 123f\n"
"120:" // Oddments: Load input (1, 1): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 123f\n"
- "ld1 { v10.h }[4], [x20], #0x2\n"
+ "ld1 { v10.h }[4], [x10], #0x2\n"
"b 123f\n"
"121:" // Oddments: Load input (1, 1): Bit 2: Unset
"tbz %x[n_channels], #1, 122f\n"
- "ld1 { v10.s }[0], [x20], #0x4\n"
+ "ld1 { v10.s }[0], [x10], #0x4\n"
"tbz %x[n_channels], #0, 123f\n"
- "ld1 { v10.h }[2], [x20], #0x2\n"
+ "ld1 { v10.h }[2], [x10], #0x2\n"
"b 123f\n"
"122:" // Oddments: Load input (1, 1): Bit 2: Unset: Bit 1: Unset
- "ld1 { v10.h }[0], [x20], #0x2\n"
+ "ld1 { v10.h }[0], [x10], #0x2\n"
"123:" // Oddments: Load input (1, 1): Bit 2: End
- "ldr x20, [x14, #0x108]\n"
+ "ldr x9, [x13, #0x108]\n"
"fmla v16.8h, v4.8h, v10.8h\n"
"fmla v17.8h, v3.8h, v10.8h\n"
- "add x20, x20, x13\n"
+ "add x9, x9, x12\n"
"fmla v20.8h, v1.8h, v10.8h\n"
"fmla v21.8h, v0.8h, v10.8h\n"
"tbz %x[n_channels], #2, 125f\n"
- "ld1 { v11.d }[0], [x20], #0x8\n"
+ "ld1 { v11.d }[0], [x9], #0x8\n"
"tbz %x[n_channels], #1, 124f\n"
- "ld1 { v11.s }[2], [x20], #0x4\n"
+ "ld1 { v11.s }[2], [x9], #0x4\n"
"tbz %x[n_channels], #0, 127f\n"
- "ld1 { v11.h }[6], [x20], #0x2\n"
+ "ld1 { v11.h }[6], [x9], #0x2\n"
"b 127f\n"
"124:" // Oddments: Load input (1, 4): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 127f\n"
- "ld1 { v11.h }[4], [x20], #0x2\n"
+ "ld1 { v11.h }[4], [x9], #0x2\n"
"b 127f\n"
"125:" // Oddments: Load input (1, 4): Bit 2: Unset
"tbz %x[n_channels], #1, 126f\n"
- "ld1 { v11.s }[0], [x20], #0x4\n"
+ "ld1 { v11.s }[0], [x9], #0x4\n"
"tbz %x[n_channels], #0, 127f\n"
- "ld1 { v11.h }[2], [x20], #0x2\n"
+ "ld1 { v11.h }[2], [x9], #0x2\n"
"b 127f\n"
"126:" // Oddments: Load input (1, 4): Bit 2: Unset: Bit 1: Unset
- "ld1 { v11.h }[0], [x20], #0x2\n"
+ "ld1 { v11.h }[0], [x9], #0x2\n"
"127:" // Oddments: Load input (1, 4): Bit 2: End
- "ldr x20, [x14, #0x110]\n"
+ "ldr x28, [x13, #0x110]\n"
"fmla v18.8h, v5.8h, v11.8h\n"
"fmla v19.8h, v4.8h, v11.8h\n"
- "add x20, x20, x13\n"
+ "add x28, x28, x12\n"
"fmla v22.8h, v2.8h, v11.8h\n"
"fmla v23.8h, v1.8h, v11.8h\n"
"tbz %x[n_channels], #2, 129f\n"
- "ld1 { v12.d }[0], [x20], #0x8\n"
+ "ld1 { v12.d }[0], [x28], #0x8\n"
"tbz %x[n_channels], #1, 128f\n"
- "ld1 { v12.s }[2], [x20], #0x4\n"
+ "ld1 { v12.s }[2], [x28], #0x4\n"
"tbz %x[n_channels], #0, 131f\n"
- "ld1 { v12.h }[6], [x20], #0x2\n"
+ "ld1 { v12.h }[6], [x28], #0x2\n"
"b 131f\n"
"128:" // Oddments: Load input (4, 1): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 131f\n"
- "ld1 { v12.h }[4], [x20], #0x2\n"
+ "ld1 { v12.h }[4], [x28], #0x2\n"
"b 131f\n"
"129:" // Oddments: Load input (4, 1): Bit 2: Unset
"tbz %x[n_channels], #1, 130f\n"
- "ld1 { v12.s }[0], [x20], #0x4\n"
+ "ld1 { v12.s }[0], [x28], #0x4\n"
"tbz %x[n_channels], #0, 131f\n"
- "ld1 { v12.h }[2], [x20], #0x2\n"
+ "ld1 { v12.h }[2], [x28], #0x2\n"
"b 131f\n"
"130:" // Oddments: Load input (4, 1): Bit 2: Unset: Bit 1: Unset
- "ld1 { v12.h }[0], [x20], #0x2\n"
+ "ld1 { v12.h }[0], [x28], #0x2\n"
"131:" // Oddments: Load input (4, 1): Bit 2: End
- "ldr x20, [x14, #0x118]\n"
+ "ldr x27, [x13, #0x118]\n"
"fmla v24.8h, v7.8h, v12.8h\n"
"fmla v25.8h, v6.8h, v12.8h\n"
- "add x20, x20, x13\n"
+ "add x27, x27, x12\n"
"fmla v28.8h, v4.8h, v12.8h\n"
"fmla v29.8h, v3.8h, v12.8h\n"
"tbz %x[n_channels], #2, 133f\n"
- "ld1 { v10.d }[0], [x20], #0x8\n"
+ "ld1 { v10.d }[0], [x27], #0x8\n"
"tbz %x[n_channels], #1, 132f\n"
- "ld1 { v10.s }[2], [x20], #0x4\n"
+ "ld1 { v10.s }[2], [x27], #0x4\n"
"tbz %x[n_channels], #0, 135f\n"
- "ld1 { v10.h }[6], [x20], #0x2\n"
+ "ld1 { v10.h }[6], [x27], #0x2\n"
"b 135f\n"
"132:" // Oddments: Load input (4, 4): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 135f\n"
- "ld1 { v10.h }[4], [x20], #0x2\n"
+ "ld1 { v10.h }[4], [x27], #0x2\n"
"b 135f\n"
"133:" // Oddments: Load input (4, 4): Bit 2: Unset
"tbz %x[n_channels], #1, 134f\n"
- "ld1 { v10.s }[0], [x20], #0x4\n"
+ "ld1 { v10.s }[0], [x27], #0x4\n"
"tbz %x[n_channels], #0, 135f\n"
- "ld1 { v10.h }[2], [x20], #0x2\n"
+ "ld1 { v10.h }[2], [x27], #0x2\n"
"b 135f\n"
"134:" // Oddments: Load input (4, 4): Bit 2: Unset: Bit 1: Unset
- "ld1 { v10.h }[0], [x20], #0x2\n"
+ "ld1 { v10.h }[0], [x27], #0x2\n"
"135:" // Oddments: Load input (4, 4): Bit 2: End
"fmla v26.8h, v8.8h, v10.8h\n"
"fmla v27.8h, v7.8h, v10.8h\n"
@@ -1643,363 +1643,363 @@ void a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_indirect_impl(
"fmin v30.8h, v30.8h, v14.8h\n"
"fmin v31.8h, v31.8h, v14.8h\n"
"tbz %x[n_channels], #2, 137f\n"
- "ldr x23, [x16, #0x0]\n"
- "ldr x22, [x16, #0x8]\n"
- "add x23, x23, x12\n"
- "add x22, x22, x12\n"
- "ldr x21, [x16, #0x10]\n"
- "ldr x20, [x16, #0x18]\n"
- "add x21, x21, x12\n"
- "add x20, x20, x12\n"
- "st1 { v16.d }[0], [x23]\n"
- "ldr x23, [x16, #0x20]\n"
- "add x23, x23, x12\n"
- "st1 { v17.d }[0], [x22]\n"
- "ldr x22, [x16, #0x28]\n"
- "add x22, x22, x12\n"
- "st1 { v18.d }[0], [x21]\n"
- "ldr x21, [x16, #0x30]\n"
- "add x21, x21, x12\n"
- "st1 { v19.d }[0], [x20]\n"
- "ldr x20, [x16, #0x38]\n"
- "add x20, x20, x12\n"
- "st1 { v20.d }[0], [x23]\n"
- "ldr x23, [x16, #0x40]\n"
- "add x23, x23, x12\n"
- "st1 { v21.d }[0], [x22]\n"
- "ldr x22, [x16, #0x48]\n"
- "add x22, x22, x12\n"
- "st1 { v22.d }[0], [x21]\n"
- "ldr x21, [x16, #0x50]\n"
- "add x21, x21, x12\n"
- "st1 { v23.d }[0], [x20]\n"
- "ldr x20, [x16, #0x58]\n"
- "add x20, x20, x12\n"
- "st1 { v24.d }[0], [x23]\n"
- "ldr x23, [x16, #0x60]\n"
- "add x23, x23, x12\n"
- "st1 { v25.d }[0], [x22]\n"
- "ldr x22, [x16, #0x68]\n"
- "add x22, x22, x12\n"
- "st1 { v26.d }[0], [x21]\n"
- "ldr x21, [x16, #0x70]\n"
- "add x21, x21, x12\n"
- "st1 { v27.d }[0], [x20]\n"
- "ldr x20, [x16, #0x78]\n"
- "add x20, x20, x12\n"
- "add x12, x12, #0x8\n"
- "st1 { v28.d }[0], [x23]\n"
- "st1 { v29.d }[0], [x22]\n"
- "st1 { v30.d }[0], [x21]\n"
- "st1 { v31.d }[0], [x20]\n"
+ "ldr x22, [x15, #0x0]\n"
+ "ldr x21, [x15, #0x8]\n"
+ "ldr x20, [x15, #0x10]\n"
+ "add x22, x22, x11\n"
+ "add x21, x21, x11\n"
+ "ldr x19, [x15, #0x18]\n"
+ "add x20, x20, x11\n"
+ "add x19, x19, x11\n"
+ "st1 { v16.d }[0], [x22]\n"
+ "st1 { v17.d }[0], [x21]\n"
+ "ldr x22, [x15, #0x20]\n"
+ "ldr x21, [x15, #0x28]\n"
+ "add x22, x22, x11\n"
+ "st1 { v18.d }[0], [x20]\n"
+ "ldr x20, [x15, #0x30]\n"
+ "add x21, x21, x11\n"
+ "add x20, x20, x11\n"
+ "st1 { v19.d }[0], [x19]\n"
+ "ldr x19, [x15, #0x38]\n"
+ "add x19, x19, x11\n"
+ "st1 { v20.d }[0], [x22]\n"
+ "ldr x22, [x15, #0x40]\n"
+ "add x22, x22, x11\n"
+ "st1 { v21.d }[0], [x21]\n"
+ "ldr x21, [x15, #0x48]\n"
+ "add x21, x21, x11\n"
+ "st1 { v22.d }[0], [x20]\n"
+ "ldr x20, [x15, #0x50]\n"
+ "add x20, x20, x11\n"
+ "st1 { v23.d }[0], [x19]\n"
+ "ldr x19, [x15, #0x58]\n"
+ "add x19, x19, x11\n"
+ "st1 { v24.d }[0], [x22]\n"
+ "ldr x22, [x15, #0x60]\n"
+ "add x22, x22, x11\n"
+ "st1 { v25.d }[0], [x21]\n"
+ "ldr x21, [x15, #0x68]\n"
+ "add x21, x21, x11\n"
+ "st1 { v26.d }[0], [x20]\n"
+ "ldr x20, [x15, #0x70]\n"
+ "add x20, x20, x11\n"
+ "st1 { v27.d }[0], [x19]\n"
+ "ldr x19, [x15, #0x78]\n"
+ "add x19, x19, x11\n"
+ "add x11, x11, #0x8\n"
+ "st1 { v28.d }[0], [x22]\n"
+ "st1 { v29.d }[0], [x21]\n"
+ "st1 { v30.d }[0], [x20]\n"
+ "st1 { v31.d }[0], [x19]\n"
"tbz %x[n_channels], #1, 136f\n"
- "ldr x23, [x16, #0x0]\n"
- "ldr x22, [x16, #0x8]\n"
- "add x23, x23, x12\n"
- "add x22, x22, x12\n"
- "ldr x21, [x16, #0x10]\n"
- "ldr x20, [x16, #0x18]\n"
- "add x21, x21, x12\n"
- "add x20, x20, x12\n"
- "st1 { v16.s }[2], [x23]\n"
- "ldr x23, [x16, #0x20]\n"
- "add x23, x23, x12\n"
- "st1 { v17.s }[2], [x22]\n"
- "ldr x22, [x16, #0x28]\n"
- "add x22, x22, x12\n"
- "st1 { v18.s }[2], [x21]\n"
- "ldr x21, [x16, #0x30]\n"
- "add x21, x21, x12\n"
- "st1 { v19.s }[2], [x20]\n"
- "ldr x20, [x16, #0x38]\n"
- "add x20, x20, x12\n"
- "st1 { v20.s }[2], [x23]\n"
- "ldr x23, [x16, #0x40]\n"
- "add x23, x23, x12\n"
- "st1 { v21.s }[2], [x22]\n"
- "ldr x22, [x16, #0x48]\n"
- "add x22, x22, x12\n"
- "st1 { v22.s }[2], [x21]\n"
- "ldr x21, [x16, #0x50]\n"
- "add x21, x21, x12\n"
- "st1 { v23.s }[2], [x20]\n"
- "ldr x20, [x16, #0x58]\n"
- "add x20, x20, x12\n"
- "st1 { v24.s }[2], [x23]\n"
- "ldr x23, [x16, #0x60]\n"
- "add x23, x23, x12\n"
- "st1 { v25.s }[2], [x22]\n"
- "ldr x22, [x16, #0x68]\n"
- "add x22, x22, x12\n"
- "st1 { v26.s }[2], [x21]\n"
- "ldr x21, [x16, #0x70]\n"
- "add x21, x21, x12\n"
- "st1 { v27.s }[2], [x20]\n"
- "ldr x20, [x16, #0x78]\n"
- "add x20, x20, x12\n"
- "add x12, x12, #0x4\n"
- "st1 { v28.s }[2], [x23]\n"
- "st1 { v29.s }[2], [x22]\n"
- "st1 { v30.s }[2], [x21]\n"
- "st1 { v31.s }[2], [x20]\n"
+ "ldr x22, [x15, #0x0]\n"
+ "ldr x21, [x15, #0x8]\n"
+ "ldr x20, [x15, #0x10]\n"
+ "ldr x19, [x15, #0x18]\n"
+ "add x22, x22, x11\n"
+ "add x21, x21, x11\n"
+ "add x20, x20, x11\n"
+ "add x19, x19, x11\n"
+ "st1 { v16.s }[2], [x22]\n"
+ "ldr x22, [x15, #0x20]\n"
+ "st1 { v17.s }[2], [x21]\n"
+ "ldr x21, [x15, #0x28]\n"
+ "add x22, x22, x11\n"
+ "add x21, x21, x11\n"
+ "st1 { v18.s }[2], [x20]\n"
+ "ldr x20, [x15, #0x30]\n"
+ "add x20, x20, x11\n"
+ "st1 { v19.s }[2], [x19]\n"
+ "ldr x19, [x15, #0x38]\n"
+ "add x19, x19, x11\n"
+ "st1 { v20.s }[2], [x22]\n"
+ "ldr x22, [x15, #0x40]\n"
+ "add x22, x22, x11\n"
+ "st1 { v21.s }[2], [x21]\n"
+ "ldr x21, [x15, #0x48]\n"
+ "add x21, x21, x11\n"
+ "st1 { v22.s }[2], [x20]\n"
+ "ldr x20, [x15, #0x50]\n"
+ "add x20, x20, x11\n"
+ "st1 { v23.s }[2], [x19]\n"
+ "ldr x19, [x15, #0x58]\n"
+ "add x19, x19, x11\n"
+ "st1 { v24.s }[2], [x22]\n"
+ "ldr x22, [x15, #0x60]\n"
+ "add x22, x22, x11\n"
+ "st1 { v25.s }[2], [x21]\n"
+ "ldr x21, [x15, #0x68]\n"
+ "add x21, x21, x11\n"
+ "st1 { v26.s }[2], [x20]\n"
+ "ldr x20, [x15, #0x70]\n"
+ "add x20, x20, x11\n"
+ "st1 { v27.s }[2], [x19]\n"
+ "ldr x19, [x15, #0x78]\n"
+ "add x19, x19, x11\n"
+ "add x11, x11, #0x4\n"
+ "st1 { v28.s }[2], [x22]\n"
+ "st1 { v29.s }[2], [x21]\n"
+ "st1 { v30.s }[2], [x20]\n"
+ "st1 { v31.s }[2], [x19]\n"
"tbz %x[n_channels], #0, 139f\n"
- "ldr x23, [x16, #0x0]\n"
- "ldr x22, [x16, #0x8]\n"
- "add x23, x23, x12\n"
- "add x22, x22, x12\n"
- "ldr x21, [x16, #0x10]\n"
- "ldr x20, [x16, #0x18]\n"
- "add x21, x21, x12\n"
- "add x20, x20, x12\n"
- "st1 { v16.h }[6], [x23]\n"
- "ldr x23, [x16, #0x20]\n"
- "add x23, x23, x12\n"
- "st1 { v17.h }[6], [x22]\n"
- "ldr x22, [x16, #0x28]\n"
- "add x22, x22, x12\n"
- "st1 { v18.h }[6], [x21]\n"
- "ldr x21, [x16, #0x30]\n"
- "add x21, x21, x12\n"
- "st1 { v19.h }[6], [x20]\n"
- "ldr x20, [x16, #0x38]\n"
- "add x20, x20, x12\n"
- "st1 { v20.h }[6], [x23]\n"
- "ldr x23, [x16, #0x40]\n"
- "add x23, x23, x12\n"
- "st1 { v21.h }[6], [x22]\n"
- "ldr x22, [x16, #0x48]\n"
- "add x22, x22, x12\n"
- "st1 { v22.h }[6], [x21]\n"
- "ldr x21, [x16, #0x50]\n"
- "add x21, x21, x12\n"
- "st1 { v23.h }[6], [x20]\n"
- "ldr x20, [x16, #0x58]\n"
- "add x20, x20, x12\n"
- "st1 { v24.h }[6], [x23]\n"
- "ldr x23, [x16, #0x60]\n"
- "add x23, x23, x12\n"
- "st1 { v25.h }[6], [x22]\n"
- "ldr x22, [x16, #0x68]\n"
- "add x22, x22, x12\n"
- "st1 { v26.h }[6], [x21]\n"
- "ldr x21, [x16, #0x70]\n"
- "add x21, x21, x12\n"
- "st1 { v27.h }[6], [x20]\n"
- "ldr x20, [x16, #0x78]\n"
- "add x20, x20, x12\n"
- "st1 { v28.h }[6], [x23]\n"
- "st1 { v29.h }[6], [x22]\n"
- "st1 { v30.h }[6], [x21]\n"
- "st1 { v31.h }[6], [x20]\n"
+ "ldr x22, [x15, #0x0]\n"
+ "ldr x21, [x15, #0x8]\n"
+ "ldr x20, [x15, #0x10]\n"
+ "ldr x19, [x15, #0x18]\n"
+ "add x22, x22, x11\n"
+ "add x21, x21, x11\n"
+ "add x20, x20, x11\n"
+ "add x19, x19, x11\n"
+ "st1 { v16.h }[6], [x22]\n"
+ "ldr x22, [x15, #0x20]\n"
+ "st1 { v17.h }[6], [x21]\n"
+ "ldr x21, [x15, #0x28]\n"
+ "add x22, x22, x11\n"
+ "add x21, x21, x11\n"
+ "st1 { v18.h }[6], [x20]\n"
+ "ldr x20, [x15, #0x30]\n"
+ "add x20, x20, x11\n"
+ "st1 { v19.h }[6], [x19]\n"
+ "ldr x19, [x15, #0x38]\n"
+ "add x19, x19, x11\n"
+ "st1 { v20.h }[6], [x22]\n"
+ "ldr x22, [x15, #0x40]\n"
+ "add x22, x22, x11\n"
+ "st1 { v21.h }[6], [x21]\n"
+ "ldr x21, [x15, #0x48]\n"
+ "add x21, x21, x11\n"
+ "st1 { v22.h }[6], [x20]\n"
+ "ldr x20, [x15, #0x50]\n"
+ "add x20, x20, x11\n"
+ "st1 { v23.h }[6], [x19]\n"
+ "ldr x19, [x15, #0x58]\n"
+ "add x19, x19, x11\n"
+ "st1 { v24.h }[6], [x22]\n"
+ "ldr x22, [x15, #0x60]\n"
+ "add x22, x22, x11\n"
+ "st1 { v25.h }[6], [x21]\n"
+ "ldr x21, [x15, #0x68]\n"
+ "add x21, x21, x11\n"
+ "st1 { v26.h }[6], [x20]\n"
+ "ldr x20, [x15, #0x70]\n"
+ "add x20, x20, x11\n"
+ "st1 { v27.h }[6], [x19]\n"
+ "ldr x19, [x15, #0x78]\n"
+ "add x19, x19, x11\n"
+ "st1 { v28.h }[6], [x22]\n"
+ "st1 { v29.h }[6], [x21]\n"
+ "st1 { v30.h }[6], [x20]\n"
+ "st1 { v31.h }[6], [x19]\n"
"b 139f\n"
"136:" // Oddments: Store: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 139f\n"
- "ldr x23, [x16, #0x0]\n"
- "ldr x22, [x16, #0x8]\n"
- "add x23, x23, x12\n"
- "add x22, x22, x12\n"
- "ldr x21, [x16, #0x10]\n"
- "ldr x20, [x16, #0x18]\n"
- "add x21, x21, x12\n"
- "add x20, x20, x12\n"
- "st1 { v16.h }[4], [x23]\n"
- "ldr x23, [x16, #0x20]\n"
- "add x23, x23, x12\n"
- "st1 { v17.h }[4], [x22]\n"
- "ldr x22, [x16, #0x28]\n"
- "add x22, x22, x12\n"
- "st1 { v18.h }[4], [x21]\n"
- "ldr x21, [x16, #0x30]\n"
- "add x21, x21, x12\n"
- "st1 { v19.h }[4], [x20]\n"
- "ldr x20, [x16, #0x38]\n"
- "add x20, x20, x12\n"
- "st1 { v20.h }[4], [x23]\n"
- "ldr x23, [x16, #0x40]\n"
- "add x23, x23, x12\n"
- "st1 { v21.h }[4], [x22]\n"
- "ldr x22, [x16, #0x48]\n"
- "add x22, x22, x12\n"
- "st1 { v22.h }[4], [x21]\n"
- "ldr x21, [x16, #0x50]\n"
- "add x21, x21, x12\n"
- "st1 { v23.h }[4], [x20]\n"
- "ldr x20, [x16, #0x58]\n"
- "add x20, x20, x12\n"
- "st1 { v24.h }[4], [x23]\n"
- "ldr x23, [x16, #0x60]\n"
- "add x23, x23, x12\n"
- "st1 { v25.h }[4], [x22]\n"
- "ldr x22, [x16, #0x68]\n"
- "add x22, x22, x12\n"
- "st1 { v26.h }[4], [x21]\n"
- "ldr x21, [x16, #0x70]\n"
- "add x21, x21, x12\n"
- "st1 { v27.h }[4], [x20]\n"
- "ldr x20, [x16, #0x78]\n"
- "add x20, x20, x12\n"
- "st1 { v28.h }[4], [x23]\n"
- "st1 { v29.h }[4], [x22]\n"
- "st1 { v30.h }[4], [x21]\n"
- "st1 { v31.h }[4], [x20]\n"
+ "ldr x22, [x15, #0x0]\n"
+ "ldr x21, [x15, #0x8]\n"
+ "add x22, x22, x11\n"
+ "ldr x20, [x15, #0x10]\n"
+ "ldr x19, [x15, #0x18]\n"
+ "add x21, x21, x11\n"
+ "add x20, x20, x11\n"
+ "add x19, x19, x11\n"
+ "st1 { v16.h }[4], [x22]\n"
+ "ldr x22, [x15, #0x20]\n"
+ "add x22, x22, x11\n"
+ "st1 { v17.h }[4], [x21]\n"
+ "ldr x21, [x15, #0x28]\n"
+ "add x21, x21, x11\n"
+ "st1 { v18.h }[4], [x20]\n"
+ "ldr x20, [x15, #0x30]\n"
+ "add x20, x20, x11\n"
+ "st1 { v19.h }[4], [x19]\n"
+ "ldr x19, [x15, #0x38]\n"
+ "add x19, x19, x11\n"
+ "st1 { v20.h }[4], [x22]\n"
+ "ldr x22, [x15, #0x40]\n"
+ "add x22, x22, x11\n"
+ "st1 { v21.h }[4], [x21]\n"
+ "ldr x21, [x15, #0x48]\n"
+ "add x21, x21, x11\n"
+ "st1 { v22.h }[4], [x20]\n"
+ "ldr x20, [x15, #0x50]\n"
+ "add x20, x20, x11\n"
+ "st1 { v23.h }[4], [x19]\n"
+ "ldr x19, [x15, #0x58]\n"
+ "add x19, x19, x11\n"
+ "st1 { v24.h }[4], [x22]\n"
+ "ldr x22, [x15, #0x60]\n"
+ "add x22, x22, x11\n"
+ "st1 { v25.h }[4], [x21]\n"
+ "ldr x21, [x15, #0x68]\n"
+ "add x21, x21, x11\n"
+ "st1 { v26.h }[4], [x20]\n"
+ "ldr x20, [x15, #0x70]\n"
+ "add x20, x20, x11\n"
+ "st1 { v27.h }[4], [x19]\n"
+ "ldr x19, [x15, #0x78]\n"
+ "add x19, x19, x11\n"
+ "st1 { v28.h }[4], [x22]\n"
+ "st1 { v29.h }[4], [x21]\n"
+ "st1 { v30.h }[4], [x20]\n"
+ "st1 { v31.h }[4], [x19]\n"
"b 139f\n"
"137:" // Oddments: Store: Bit 2: Unset
"tbz %x[n_channels], #1, 138f\n"
- "ldr x23, [x16, #0x0]\n"
- "ldr x22, [x16, #0x8]\n"
- "add x23, x23, x12\n"
- "add x22, x22, x12\n"
- "ldr x21, [x16, #0x10]\n"
- "ldr x20, [x16, #0x18]\n"
- "add x21, x21, x12\n"
- "add x20, x20, x12\n"
- "st1 { v16.s }[0], [x23]\n"
- "ldr x23, [x16, #0x20]\n"
- "add x23, x23, x12\n"
- "st1 { v17.s }[0], [x22]\n"
- "ldr x22, [x16, #0x28]\n"
- "add x22, x22, x12\n"
- "st1 { v18.s }[0], [x21]\n"
- "ldr x21, [x16, #0x30]\n"
- "add x21, x21, x12\n"
- "st1 { v19.s }[0], [x20]\n"
- "ldr x20, [x16, #0x38]\n"
- "add x20, x20, x12\n"
- "st1 { v20.s }[0], [x23]\n"
- "ldr x23, [x16, #0x40]\n"
- "add x23, x23, x12\n"
- "st1 { v21.s }[0], [x22]\n"
- "ldr x22, [x16, #0x48]\n"
- "add x22, x22, x12\n"
- "st1 { v22.s }[0], [x21]\n"
- "ldr x21, [x16, #0x50]\n"
- "add x21, x21, x12\n"
- "st1 { v23.s }[0], [x20]\n"
- "ldr x20, [x16, #0x58]\n"
- "add x20, x20, x12\n"
- "st1 { v24.s }[0], [x23]\n"
- "ldr x23, [x16, #0x60]\n"
- "add x23, x23, x12\n"
- "st1 { v25.s }[0], [x22]\n"
- "ldr x22, [x16, #0x68]\n"
- "add x22, x22, x12\n"
- "st1 { v26.s }[0], [x21]\n"
- "ldr x21, [x16, #0x70]\n"
- "add x21, x21, x12\n"
- "st1 { v27.s }[0], [x20]\n"
- "ldr x20, [x16, #0x78]\n"
- "add x20, x20, x12\n"
- "add x12, x12, #0x4\n"
- "st1 { v28.s }[0], [x23]\n"
- "st1 { v29.s }[0], [x22]\n"
- "st1 { v30.s }[0], [x21]\n"
- "st1 { v31.s }[0], [x20]\n"
+ "ldr x22, [x15, #0x0]\n"
+ "ldr x21, [x15, #0x8]\n"
+ "add x22, x22, x11\n"
+ "ldr x20, [x15, #0x10]\n"
+ "ldr x19, [x15, #0x18]\n"
+ "add x21, x21, x11\n"
+ "add x20, x20, x11\n"
+ "add x19, x19, x11\n"
+ "st1 { v16.s }[0], [x22]\n"
+ "ldr x22, [x15, #0x20]\n"
+ "add x22, x22, x11\n"
+ "st1 { v17.s }[0], [x21]\n"
+ "ldr x21, [x15, #0x28]\n"
+ "add x21, x21, x11\n"
+ "st1 { v18.s }[0], [x20]\n"
+ "ldr x20, [x15, #0x30]\n"
+ "add x20, x20, x11\n"
+ "st1 { v19.s }[0], [x19]\n"
+ "ldr x19, [x15, #0x38]\n"
+ "add x19, x19, x11\n"
+ "st1 { v20.s }[0], [x22]\n"
+ "ldr x22, [x15, #0x40]\n"
+ "add x22, x22, x11\n"
+ "st1 { v21.s }[0], [x21]\n"
+ "ldr x21, [x15, #0x48]\n"
+ "add x21, x21, x11\n"
+ "st1 { v22.s }[0], [x20]\n"
+ "ldr x20, [x15, #0x50]\n"
+ "add x20, x20, x11\n"
+ "st1 { v23.s }[0], [x19]\n"
+ "ldr x19, [x15, #0x58]\n"
+ "add x19, x19, x11\n"
+ "st1 { v24.s }[0], [x22]\n"
+ "ldr x22, [x15, #0x60]\n"
+ "add x22, x22, x11\n"
+ "st1 { v25.s }[0], [x21]\n"
+ "ldr x21, [x15, #0x68]\n"
+ "add x21, x21, x11\n"
+ "st1 { v26.s }[0], [x20]\n"
+ "ldr x20, [x15, #0x70]\n"
+ "add x20, x20, x11\n"
+ "st1 { v27.s }[0], [x19]\n"
+ "ldr x19, [x15, #0x78]\n"
+ "add x19, x19, x11\n"
+ "add x11, x11, #0x4\n"
+ "st1 { v28.s }[0], [x22]\n"
+ "st1 { v29.s }[0], [x21]\n"
+ "st1 { v30.s }[0], [x20]\n"
+ "st1 { v31.s }[0], [x19]\n"
"tbz %x[n_channels], #0, 139f\n"
- "ldr x23, [x16, #0x0]\n"
- "ldr x22, [x16, #0x8]\n"
- "add x23, x23, x12\n"
- "add x22, x22, x12\n"
- "ldr x21, [x16, #0x10]\n"
- "ldr x20, [x16, #0x18]\n"
- "add x21, x21, x12\n"
- "add x20, x20, x12\n"
- "st1 { v16.h }[2], [x23]\n"
- "ldr x23, [x16, #0x20]\n"
- "add x23, x23, x12\n"
- "st1 { v17.h }[2], [x22]\n"
- "ldr x22, [x16, #0x28]\n"
- "add x22, x22, x12\n"
- "st1 { v18.h }[2], [x21]\n"
- "ldr x21, [x16, #0x30]\n"
- "add x21, x21, x12\n"
- "st1 { v19.h }[2], [x20]\n"
- "ldr x20, [x16, #0x38]\n"
- "add x20, x20, x12\n"
- "st1 { v20.h }[2], [x23]\n"
- "ldr x23, [x16, #0x40]\n"
- "add x23, x23, x12\n"
- "st1 { v21.h }[2], [x22]\n"
- "ldr x22, [x16, #0x48]\n"
- "add x22, x22, x12\n"
- "st1 { v22.h }[2], [x21]\n"
- "ldr x21, [x16, #0x50]\n"
- "add x21, x21, x12\n"
- "st1 { v23.h }[2], [x20]\n"
- "ldr x20, [x16, #0x58]\n"
- "add x20, x20, x12\n"
- "st1 { v24.h }[2], [x23]\n"
- "ldr x23, [x16, #0x60]\n"
- "add x23, x23, x12\n"
- "st1 { v25.h }[2], [x22]\n"
- "ldr x22, [x16, #0x68]\n"
- "add x22, x22, x12\n"
- "st1 { v26.h }[2], [x21]\n"
- "ldr x21, [x16, #0x70]\n"
- "add x21, x21, x12\n"
- "st1 { v27.h }[2], [x20]\n"
- "ldr x20, [x16, #0x78]\n"
- "add x20, x20, x12\n"
- "st1 { v28.h }[2], [x23]\n"
- "st1 { v29.h }[2], [x22]\n"
- "st1 { v30.h }[2], [x21]\n"
- "st1 { v31.h }[2], [x20]\n"
+ "ldr x22, [x15, #0x0]\n"
+ "ldr x21, [x15, #0x8]\n"
+ "ldr x20, [x15, #0x10]\n"
+ "ldr x19, [x15, #0x18]\n"
+ "add x22, x22, x11\n"
+ "add x21, x21, x11\n"
+ "add x20, x20, x11\n"
+ "add x19, x19, x11\n"
+ "st1 { v16.h }[2], [x22]\n"
+ "ldr x22, [x15, #0x20]\n"
+ "st1 { v17.h }[2], [x21]\n"
+ "ldr x21, [x15, #0x28]\n"
+ "add x22, x22, x11\n"
+ "add x21, x21, x11\n"
+ "st1 { v18.h }[2], [x20]\n"
+ "ldr x20, [x15, #0x30]\n"
+ "add x20, x20, x11\n"
+ "st1 { v19.h }[2], [x19]\n"
+ "ldr x19, [x15, #0x38]\n"
+ "add x19, x19, x11\n"
+ "st1 { v20.h }[2], [x22]\n"
+ "ldr x22, [x15, #0x40]\n"
+ "add x22, x22, x11\n"
+ "st1 { v21.h }[2], [x21]\n"
+ "ldr x21, [x15, #0x48]\n"
+ "add x21, x21, x11\n"
+ "st1 { v22.h }[2], [x20]\n"
+ "ldr x20, [x15, #0x50]\n"
+ "add x20, x20, x11\n"
+ "st1 { v23.h }[2], [x19]\n"
+ "ldr x19, [x15, #0x58]\n"
+ "add x19, x19, x11\n"
+ "st1 { v24.h }[2], [x22]\n"
+ "ldr x22, [x15, #0x60]\n"
+ "add x22, x22, x11\n"
+ "st1 { v25.h }[2], [x21]\n"
+ "ldr x21, [x15, #0x68]\n"
+ "add x21, x21, x11\n"
+ "st1 { v26.h }[2], [x20]\n"
+ "ldr x20, [x15, #0x70]\n"
+ "add x20, x20, x11\n"
+ "st1 { v27.h }[2], [x19]\n"
+ "ldr x19, [x15, #0x78]\n"
+ "add x19, x19, x11\n"
+ "st1 { v28.h }[2], [x22]\n"
+ "st1 { v29.h }[2], [x21]\n"
+ "st1 { v30.h }[2], [x20]\n"
+ "st1 { v31.h }[2], [x19]\n"
"b 139f\n"
"138:" // Oddments: Store: Bit 2: Unset: Bit 1: Unset
- "ldr x23, [x16, #0x0]\n"
- "ldr x22, [x16, #0x8]\n"
- "add x23, x23, x12\n"
- "add x22, x22, x12\n"
- "ldr x21, [x16, #0x10]\n"
- "ldr x20, [x16, #0x18]\n"
- "add x21, x21, x12\n"
- "add x20, x20, x12\n"
- "st1 { v16.h }[0], [x23]\n"
- "ldr x23, [x16, #0x20]\n"
- "add x23, x23, x12\n"
- "st1 { v17.h }[0], [x22]\n"
- "ldr x22, [x16, #0x28]\n"
- "add x22, x22, x12\n"
- "st1 { v18.h }[0], [x21]\n"
- "ldr x21, [x16, #0x30]\n"
- "add x21, x21, x12\n"
- "st1 { v19.h }[0], [x20]\n"
- "ldr x20, [x16, #0x38]\n"
- "add x20, x20, x12\n"
- "st1 { v20.h }[0], [x23]\n"
- "ldr x23, [x16, #0x40]\n"
- "add x23, x23, x12\n"
- "st1 { v21.h }[0], [x22]\n"
- "ldr x22, [x16, #0x48]\n"
- "add x22, x22, x12\n"
- "st1 { v22.h }[0], [x21]\n"
- "ldr x21, [x16, #0x50]\n"
- "add x21, x21, x12\n"
- "st1 { v23.h }[0], [x20]\n"
- "ldr x20, [x16, #0x58]\n"
- "add x20, x20, x12\n"
- "st1 { v24.h }[0], [x23]\n"
- "ldr x23, [x16, #0x60]\n"
- "add x23, x23, x12\n"
- "st1 { v25.h }[0], [x22]\n"
- "ldr x22, [x16, #0x68]\n"
- "add x22, x22, x12\n"
- "st1 { v26.h }[0], [x21]\n"
- "ldr x21, [x16, #0x70]\n"
- "add x21, x21, x12\n"
- "st1 { v27.h }[0], [x20]\n"
- "ldr x20, [x16, #0x78]\n"
- "add x20, x20, x12\n"
- "st1 { v28.h }[0], [x23]\n"
- "st1 { v29.h }[0], [x22]\n"
- "st1 { v30.h }[0], [x21]\n"
- "st1 { v31.h }[0], [x20]\n"
+ "ldr x22, [x15, #0x0]\n"
+ "ldr x21, [x15, #0x8]\n"
+ "add x22, x22, x11\n"
+ "add x21, x21, x11\n"
+ "ldr x20, [x15, #0x10]\n"
+ "ldr x19, [x15, #0x18]\n"
+ "add x20, x20, x11\n"
+ "add x19, x19, x11\n"
+ "st1 { v16.h }[0], [x22]\n"
+ "ldr x22, [x15, #0x20]\n"
+ "add x22, x22, x11\n"
+ "st1 { v17.h }[0], [x21]\n"
+ "ldr x21, [x15, #0x28]\n"
+ "add x21, x21, x11\n"
+ "st1 { v18.h }[0], [x20]\n"
+ "ldr x20, [x15, #0x30]\n"
+ "add x20, x20, x11\n"
+ "st1 { v19.h }[0], [x19]\n"
+ "ldr x19, [x15, #0x38]\n"
+ "add x19, x19, x11\n"
+ "st1 { v20.h }[0], [x22]\n"
+ "ldr x22, [x15, #0x40]\n"
+ "add x22, x22, x11\n"
+ "st1 { v21.h }[0], [x21]\n"
+ "ldr x21, [x15, #0x48]\n"
+ "add x21, x21, x11\n"
+ "st1 { v22.h }[0], [x20]\n"
+ "ldr x20, [x15, #0x50]\n"
+ "add x20, x20, x11\n"
+ "st1 { v23.h }[0], [x19]\n"
+ "ldr x19, [x15, #0x58]\n"
+ "add x19, x19, x11\n"
+ "st1 { v24.h }[0], [x22]\n"
+ "ldr x22, [x15, #0x60]\n"
+ "add x22, x22, x11\n"
+ "st1 { v25.h }[0], [x21]\n"
+ "ldr x21, [x15, #0x68]\n"
+ "add x21, x21, x11\n"
+ "st1 { v26.h }[0], [x20]\n"
+ "ldr x20, [x15, #0x70]\n"
+ "add x20, x20, x11\n"
+ "st1 { v27.h }[0], [x19]\n"
+ "ldr x19, [x15, #0x78]\n"
+ "add x19, x19, x11\n"
+ "st1 { v28.h }[0], [x22]\n"
+ "st1 { v29.h }[0], [x21]\n"
+ "st1 { v30.h }[0], [x20]\n"
+ "st1 { v31.h }[0], [x19]\n"
"139:" // Oddments: Store: Bit 2: End
"140:" // End
:
: [n_channels] "r" ((unsigned long) n_channels), [offsetof_Args_inptrs] "I" (offsetof(Args, inptrs)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_args_params] "I" (offsetof(Args, params)), [params_struct] "r" (&params_struct)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_direct.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_direct.cpp
index 268dda531d..a5df51c4f9 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_direct.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_direct.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -87,331 +87,331 @@ void a64_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst_direct_impl(
);
__asm__ __volatile__(
- "mov x23, #0x0\n"
- "mov x27, #0x0\n"
+ "mov x22, #0x0\n"
+ "mov x26, #0x0\n"
"1:" // Tile loop
- "str x23, [%x[params_struct], %[offsetof_args_tile_i]]\n"
- "mov x26, #0x4\n"
+ "str x22, [%x[params_struct], %[offsetof_args_tile_i]]\n"
+ "mov x21, #0x4\n"
"mov x25, #0x2\n"
- "str x27, [%x[params_struct], %[offsetof_args_tile_j]]\n"
+ "str x26, [%x[params_struct], %[offsetof_args_tile_j]]\n"
"ldr x24, [%x[params_struct], %[offsetof_args_ld_input_row]]\n"
- "ldr x6, [%x[params_struct], %[offsetof_args_ld_input_col]]\n"
- "mul x22, x23, x24\n" // offset = tile_i * ld_input_row
- "ldr x21, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
- "madd x22, x27, x6, x22\n" // offset += tile_j * ld_input_col
- "ldr x7, [%x[params_struct], %[offsetof_args_ld_output_col]]\n"
- "lsl x6, x6, #0x1\n"
- "mul x20, x23, x21\n" // offset = tile_i * ld_output_row
- "ldr x8, [%x[params_struct], %[offsetof_args_inptr]]\n"
- "ldr x17, [%x[params_struct], %[offsetof_args_outptr]]\n"
- "mov x23, #0x10\n" // cntb _, ALL, #1
- "mul x22, x22, x26\n" // offset *= kernel_stride * output_size
- "add x8, x8, x22, LSL #1\n" // inptr[0] += offset * sizeof(__fp16)
- "add x16, x8, x24, LSL #1\n"
- "ldr x15, [%x[params_struct], %[offsetof_args_params]]\n"
- "madd x20, x27, x7, x20\n" // offset += tile_j * ld_output_col
- "lsr x22, %x[n_channels], #0x3\n"
- "add x14, x16, x24, LSL #1\n"
- "mul x20, x20, x25\n" // offset *= output_tile_size
- "add x13, x6, x6\n"
- "add x12, x14, x24, LSL #1\n"
- "add x11, x13, x6\n"
- "add x17, x17, x20, LSL #1\n" // outptrs[0] += offset * sizeof(__fp16)
+ "ldr x7, [%x[params_struct], %[offsetof_args_ld_input_col]]\n"
+ "mul x20, x22, x24\n" // offset = tile_i * ld_input_row
+ "ldr x23, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
+ "madd x20, x26, x7, x20\n" // offset += tile_j * ld_input_col
+ "ldr x8, [%x[params_struct], %[offsetof_args_ld_output_col]]\n"
+ "lsl x7, x7, #0x1\n"
+ "mul x19, x22, x23\n" // offset = tile_i * ld_output_row
+ "ldr x17, [%x[params_struct], %[offsetof_args_inptr]]\n"
+ "ldr x16, [%x[params_struct], %[offsetof_args_outptr]]\n"
+ "mov x22, #0x10\n" // cntb _, ALL, #1
+ "mul x20, x20, x21\n" // offset *= kernel_stride * output_size
+ "add x17, x17, x20, LSL #1\n" // inptr[0] += offset * sizeof(__fp16)
+ "add x15, x17, x24, LSL #1\n"
+ "ldr x14, [%x[params_struct], %[offsetof_args_params]]\n"
+ "madd x19, x26, x8, x19\n" // offset += tile_j * ld_output_col
+ "lsr x21, %x[n_channels], #0x3\n"
+ "add x13, x15, x24, LSL #1\n"
+ "mul x19, x19, x25\n" // offset *= output_tile_size
+ "add x12, x7, x7\n"
+ "add x11, x13, x24, LSL #1\n"
+ "add x10, x12, x7\n"
+ "add x16, x16, x19, LSL #1\n" // outptrs[0] += offset * sizeof(__fp16)
"add x20, %x[params_struct], %[offsetof_args_min]\n"
+ "add x19, %x[params_struct], %[offsetof_args_max]\n"
"ld1r { v19.8h }, [x20]\n"
- "add x20, %x[params_struct], %[offsetof_args_max]\n"
- "ld1r { v18.8h }, [x20]\n"
- "add x10, x12, x24, LSL #1\n"
- "add x9, x11, x6\n"
- "add x28, x17, x21, LSL #1\n"
- "lsl x7, x7, #0x1\n"
- "mov x21, #0x0\n"
- "sub x20, XZR, x23\n"
- "cbz x22, 4f\n"
- "ldr q17, [x15, #0x0]\n"
- "ldr q0, [x15, #0x10]\n"
- "cmp x23, x22, LSL #4\n"
- "ldr q1, [x15, #0x20]\n"
- "ldr q2, [x15, #0x30]\n"
- "ldr q3, [x15, #0x40]\n"
- "ldr q4, [x15, #0x50]\n"
- "ldr q5, [x15, #0x60]\n"
- "ldr q6, [x15, #0x70]\n"
- "ldr q7, [x15, #0x80]\n"
- "ldr q8, [x15, #0x90]\n"
- "add x15, x15, #0xa0\n"
- "ldr q9, [x14, x13]\n"
- "ld1 { v10.8h }, [x8]\n"
- "ldr q11, [x8, x6]\n"
- "ldr q12, [x8, x11]\n"
- "ldr q13, [x8, x9]\n"
- "ld1 { v14.8h }, [x16]\n"
- "ldr q15, [x16, x6]\n"
- "ldr q16, [x8, x13]\n"
+ "ld1r { v18.8h }, [x19]\n"
+ "add x9, x11, x24, LSL #1\n"
+ "add x28, x10, x7\n"
+ "add x27, x16, x23, LSL #1\n"
+ "lsl x8, x8, #0x1\n"
+ "mov x20, #0x0\n"
+ "sub x19, XZR, x22\n"
+ "cbz x21, 4f\n"
+ "ldr q17, [x14, #0x0]\n"
+ "cmp x22, x21, LSL #4\n"
+ "ldr q0, [x14, #0x10]\n"
+ "ldr q1, [x14, #0x20]\n"
+ "ldr q2, [x14, #0x30]\n"
+ "ldr q3, [x14, #0x40]\n"
+ "ldr q4, [x14, #0x50]\n"
+ "ldr q5, [x14, #0x60]\n"
+ "ldr q6, [x14, #0x70]\n"
+ "ldr q7, [x14, #0x80]\n"
+ "ldr q8, [x14, #0x90]\n"
+ "ldr q9, [x13, x12]\n"
+ "add x14, x14, #0xa0\n"
+ "ld1 { v10.8h }, [x17]\n"
+ "ldr q11, [x17, x7]\n"
+ "ldr q12, [x17, x10]\n"
+ "ldr q13, [x17, x28]\n"
+ "ld1 { v14.8h }, [x15]\n"
+ "ldr q15, [x15, x7]\n"
+ "ldr q16, [x17, x12]\n"
"bge 3f\n"
"2:" // Tile loop: Channel loop
"mov v28.16b, v17.16b\n fmla v28.8h, v8.8h, v9.8h\n"
"mov v29.16b, v17.16b\n fmla v29.8h, v6.8h, v9.8h\n"
- "add x23, x23, #0x10\n"
- "add x8, x8, #0x10\n"
+ "add x22, x22, #0x10\n"
+ "add x17, x17, #0x10\n"
"fmla v28.8h, v0.8h, v10.8h\n"
- "ld1 { v10.8h }, [x8]\n"
"fmla v29.8h, v1.8h, v12.8h\n"
- "ldr q12, [x16, x9]\n"
+ "ldr q12, [x15, x28]\n"
+ "cmp x22, x21, LSL #4\n"
"fmla v28.8h, v1.8h, v11.8h\n"
- "ldr q11, [x16, x11]\n"
"fmla v29.8h, v2.8h, v13.8h\n"
- "ldr q13, [x16, x13]\n"
+ "ldr q11, [x15, x10]\n"
+ "ldr q13, [x15, x12]\n"
"fmla v28.8h, v3.8h, v14.8h\n"
- "ld1 { v14.8h }, [x12]\n"
"fmla v29.8h, v0.8h, v16.8h\n"
- "add x16, x16, #0x10\n"
+ "ld1 { v14.8h }, [x11]\n"
+ "add x15, x15, #0x10\n"
"fmla v28.8h, v4.8h, v15.8h\n"
- "ld1 { v15.8h }, [x14]\n"
"fmla v29.8h, v4.8h, v11.8h\n"
- "ldr q11, [x12, x6]\n"
+ "ld1 { v15.8h }, [x13]\n"
+ "ldr q11, [x11, x7]\n"
"fmla v28.8h, v2.8h, v16.8h\n"
- "ldr q16, [x14, x6]\n"
"fmla v29.8h, v5.8h, v12.8h\n"
- "ldr q12, [x14, x11]\n"
+ "ldr q12, [x13, x10]\n"
+ "ldr q16, [x13, x7]\n"
"mov v30.16b, v17.16b\n fmla v30.8h, v2.8h, v9.8h\n"
"mov v31.16b, v17.16b\n fmla v31.8h, v0.8h, v9.8h\n"
- "ldr q17, [x15, #0x0]\n"
- "cmp x23, x22, LSL #4\n"
+ "add x19, x19, #0x10\n"
+ "add x20, x20, #0x10\n"
"fmla v28.8h, v5.8h, v13.8h\n"
"fmla v29.8h, v3.8h, v13.8h\n"
- "ldr q13, [x12, x11]\n"
- "add x20, x20, #0x10\n"
+ "ldr q13, [x11, x10]\n"
+ "ld1 { v10.8h }, [x17]\n"
"fmla v30.8h, v3.8h, v14.8h\n"
- "ldr q14, [x12, x9]\n"
"fmla v31.8h, v4.8h, v13.8h\n"
- "ldr q13, [x10, x6]\n"
+ "ldr q14, [x11, x28]\n"
+ "ldr q13, [x9, x7]\n"
"fmla v30.8h, v0.8h, v15.8h\n"
- "ldr q0, [x15, #0x10]\n"
"fmla v31.8h, v1.8h, v12.8h\n"
- "add x21, x21, #0x10\n"
+ "ldr q17, [x14, #0x0]\n"
+ "ldr q0, [x14, #0x10]\n"
"fmla v30.8h, v4.8h, v11.8h\n"
- "ldr q11, [x14, x9]\n"
- "ldr q4, [x15, #0x50]\n"
"fmla v31.8h, v5.8h, v14.8h\n"
- "ldr q14, [x10, x11]\n"
+ "ldr q11, [x13, x28]\n"
+ "ldr q14, [x9, x10]\n"
"fmla v28.8h, v6.8h, v15.8h\n"
- "ld1 { v15.8h }, [x10]\n"
"fmla v30.8h, v1.8h, v16.8h\n"
- "ldr q1, [x15, #0x20]\n"
+ "ld1 { v15.8h }, [x9]\n"
+ "add x13, x13, #0x10\n"
"fmla v31.8h, v2.8h, v11.8h\n"
- "ldr q2, [x15, #0x30]\n"
"fmla v28.8h, v7.8h, v16.8h\n"
- "ldr q16, [x12, x13]\n"
+ "ldr q16, [x11, x12]\n"
+ "fmax v28.8h, v28.8h, v19.8h\n"
"fmla v30.8h, v6.8h, v15.8h\n"
- "ldr q15, [x10, x13]\n"
"fmla v31.8h, v3.8h, v16.8h\n"
- "ldr q3, [x15, #0x40]\n"
+ "ldr q15, [x9, x12]\n"
+ "fmin v28.8h, v28.8h, v18.8h\n"
"fmla v30.8h, v7.8h, v13.8h\n"
- "ldr q13, [x8, x9]\n"
"fmla v31.8h, v7.8h, v14.8h\n"
- "ld1 { v14.8h }, [x16]\n"
+ "add x11, x11, #0x10\n"
+ "ldr q9, [x13, x12]\n"
"fmla v29.8h, v7.8h, v12.8h\n"
- "ldr q12, [x8, x11]\n"
"fmla v30.8h, v5.8h, v16.8h\n"
- "ldr q16, [x8, x13]\n"
- "ldr q5, [x15, #0x60]\n"
+ "ldr q12, [x17, x10]\n"
+ "ldr q13, [x17, x28]\n"
"fmla v31.8h, v6.8h, v15.8h\n"
"fmla v29.8h, v8.8h, v11.8h\n"
- "ldr q11, [x10, x9]\n"
- "ldr q6, [x15, #0x70]\n"
+ "ldr q11, [x9, x28]\n"
+ "fmax v29.8h, v29.8h, v19.8h\n"
"fmla v30.8h, v8.8h, v15.8h\n"
"fmla v31.8h, v8.8h, v11.8h\n"
- "ldr q11, [x8, x6]\n"
- "ldr q15, [x16, x6]\n"
- "fmax v28.8h, v28.8h, v19.8h\n"
- "fmax v29.8h, v29.8h, v19.8h\n"
- "ldr q7, [x15, #0x80]\n"
- "ldr q8, [x15, #0x90]\n"
"fmax v30.8h, v30.8h, v19.8h\n"
+ "add x9, x9, #0x10\n"
"fmax v31.8h, v31.8h, v19.8h\n"
- "add x14, x14, #0x10\n"
- "ldr q9, [x14, x13]\n"
- "fmin v28.8h, v28.8h, v18.8h\n"
"fmin v29.8h, v29.8h, v18.8h\n"
+ "ldr q11, [x17, x7]\n"
+ "ld1 { v14.8h }, [x15]\n"
"fmin v30.8h, v30.8h, v18.8h\n"
"fmin v31.8h, v31.8h, v18.8h\n"
- "add x12, x12, #0x10\n"
- "add x10, x10, #0x10\n"
- "st1 { v28.8h }, [x17]\n"
- "add x15, x15, #0xa0\n"
- "str q29, [x17, x7]\n"
- "add x17, x17, #0x10\n"
- "st1 { v30.8h }, [x28]\n"
- "str q31, [x28, x7]\n"
- "add x28, x28, #0x10\n"
+ "ldr q15, [x15, x7]\n"
+ "ldr q16, [x17, x12]\n"
+ "st1 { v28.8h }, [x16]\n"
+ "ldr q1, [x14, #0x20]\n"
+ "ldr q2, [x14, #0x30]\n"
+ "str q29, [x16, x8]\n"
+ "add x16, x16, #0x10\n"
+ "ldr q3, [x14, #0x40]\n"
+ "ldr q4, [x14, #0x50]\n"
+ "st1 { v30.8h }, [x27]\n"
+ "ldr q5, [x14, #0x60]\n"
+ "ldr q6, [x14, #0x70]\n"
+ "str q31, [x27, x8]\n"
+ "add x27, x27, #0x10\n"
+ "ldr q7, [x14, #0x80]\n"
+ "ldr q8, [x14, #0x90]\n"
+ "add x14, x14, #0xa0\n"
"blt 2b\n"
"3:" // Tile loop: Channel tail
"mov v28.16b, v17.16b\n fmla v28.8h, v8.8h, v9.8h\n"
"mov v29.16b, v17.16b\n fmla v29.8h, v6.8h, v9.8h\n"
- "add x8, x8, #0x10\n"
+ "add x17, x17, #0x10\n"
"fmla v28.8h, v0.8h, v10.8h\n"
"fmla v29.8h, v1.8h, v12.8h\n"
- "ldr q12, [x16, x9]\n"
+ "ldr q12, [x15, x28]\n"
"fmla v28.8h, v1.8h, v11.8h\n"
- "ldr q11, [x16, x11]\n"
"fmla v29.8h, v2.8h, v13.8h\n"
- "ldr q13, [x16, x13]\n"
+ "ldr q11, [x15, x10]\n"
+ "ldr q13, [x15, x12]\n"
"fmla v28.8h, v3.8h, v14.8h\n"
- "ld1 { v14.8h }, [x12]\n"
"fmla v29.8h, v0.8h, v16.8h\n"
- "add x16, x16, #0x10\n"
+ "ld1 { v14.8h }, [x11]\n"
+ "add x15, x15, #0x10\n"
"fmla v28.8h, v4.8h, v15.8h\n"
- "ld1 { v15.8h }, [x14]\n"
"fmla v29.8h, v4.8h, v11.8h\n"
- "ldr q11, [x12, x6]\n"
+ "ld1 { v15.8h }, [x13]\n"
+ "ldr q11, [x11, x7]\n"
"fmla v28.8h, v2.8h, v16.8h\n"
- "ldr q16, [x14, x6]\n"
"fmla v29.8h, v5.8h, v12.8h\n"
- "ldr q12, [x14, x11]\n"
+ "ldr q12, [x13, x10]\n"
+ "ldr q16, [x13, x7]\n"
"mov v30.16b, v17.16b\n fmla v30.8h, v2.8h, v9.8h\n"
"mov v31.16b, v17.16b\n fmla v31.8h, v0.8h, v9.8h\n"
"fmla v28.8h, v5.8h, v13.8h\n"
"fmla v29.8h, v3.8h, v13.8h\n"
- "ldr q13, [x12, x11]\n"
+ "ldr q13, [x11, x10]\n"
"fmla v30.8h, v3.8h, v14.8h\n"
- "ldr q14, [x12, x9]\n"
"fmla v31.8h, v4.8h, v13.8h\n"
- "ldr q13, [x10, x6]\n"
+ "ldr q14, [x11, x28]\n"
+ "ldr q13, [x9, x7]\n"
"fmla v30.8h, v0.8h, v15.8h\n"
"fmla v31.8h, v1.8h, v12.8h\n"
"fmla v30.8h, v4.8h, v11.8h\n"
- "ldr q11, [x14, x9]\n"
"fmla v31.8h, v5.8h, v14.8h\n"
- "ldr q14, [x10, x11]\n"
+ "ldr q11, [x13, x28]\n"
+ "ldr q14, [x9, x10]\n"
"fmla v28.8h, v6.8h, v15.8h\n"
- "ld1 { v15.8h }, [x10]\n"
"fmla v30.8h, v1.8h, v16.8h\n"
- "add x14, x14, #0x10\n"
+ "ld1 { v15.8h }, [x9]\n"
+ "add x13, x13, #0x10\n"
"fmla v31.8h, v2.8h, v11.8h\n"
"fmla v28.8h, v7.8h, v16.8h\n"
- "ldr q16, [x12, x13]\n"
+ "ldr q16, [x11, x12]\n"
"fmax v28.8h, v28.8h, v19.8h\n"
"fmla v30.8h, v6.8h, v15.8h\n"
- "ldr q15, [x10, x13]\n"
"fmla v31.8h, v3.8h, v16.8h\n"
+ "ldr q15, [x9, x12]\n"
"fmin v28.8h, v28.8h, v18.8h\n"
"fmla v30.8h, v7.8h, v13.8h\n"
"fmla v31.8h, v7.8h, v14.8h\n"
- "st1 { v28.8h }, [x17]\n"
- "add x12, x12, #0x10\n"
+ "st1 { v28.8h }, [x16]\n"
+ "add x11, x11, #0x10\n"
"fmla v29.8h, v7.8h, v12.8h\n"
"fmla v30.8h, v5.8h, v16.8h\n"
"fmla v31.8h, v6.8h, v15.8h\n"
"fmla v29.8h, v8.8h, v11.8h\n"
- "ldr q11, [x10, x9]\n"
+ "ldr q11, [x9, x28]\n"
"fmax v29.8h, v29.8h, v19.8h\n"
"fmla v30.8h, v8.8h, v15.8h\n"
"fmla v31.8h, v8.8h, v11.8h\n"
"fmax v30.8h, v30.8h, v19.8h\n"
- "add x10, x10, #0x10\n"
+ "add x9, x9, #0x10\n"
"fmax v31.8h, v31.8h, v19.8h\n"
"fmin v29.8h, v29.8h, v18.8h\n"
- "str q29, [x17, x7]\n"
- "add x17, x17, #0x10\n"
+ "str q29, [x16, x8]\n"
+ "add x16, x16, #0x10\n"
"fmin v30.8h, v30.8h, v18.8h\n"
"fmin v31.8h, v31.8h, v18.8h\n"
- "st1 { v30.8h }, [x28]\n"
- "str q31, [x28, x7]\n"
- "add x28, x28, #0x10\n"
+ "st1 { v30.8h }, [x27]\n"
+ "str q31, [x27, x8]\n"
+ "add x27, x27, #0x10\n"
"4:" // Tile loop: Oddments
"tst %x[n_channels], #0x7\n"
"beq 81f\n"
- "ldr q17, [x15, #0x0]\n"
- "ldr q0, [x15, #0x10]\n"
- "add x27, x14, x13\n"
- "add x26, x8, XZR\n"
- "ldr q1, [x15, #0x20]\n"
- "ldr q2, [x15, #0x30]\n"
- "add x25, x8, x6\n"
- "add x24, x8, x11\n"
- "ldr q3, [x15, #0x40]\n"
- "ldr q4, [x15, #0x50]\n"
- "add x23, x8, x9\n"
- "add x22, x16, XZR\n"
- "ldr q5, [x15, #0x60]\n"
- "ldr q6, [x15, #0x70]\n"
- "add x21, x16, x6\n"
- "add x20, x8, x13\n"
- "ldr q7, [x15, #0x80]\n"
- "ldr q8, [x15, #0x90]\n"
+ "ldr q17, [x14, #0x0]\n"
+ "ldr q0, [x14, #0x10]\n"
+ "ldr q1, [x14, #0x20]\n"
+ "ldr q2, [x14, #0x30]\n"
+ "add x26, x13, x12\n"
+ "add x25, x17, XZR\n"
+ "ldr q3, [x14, #0x40]\n"
+ "ldr q4, [x14, #0x50]\n"
+ "add x24, x17, x7\n"
+ "add x23, x17, x10\n"
+ "ldr q5, [x14, #0x60]\n"
+ "ldr q6, [x14, #0x70]\n"
+ "add x22, x17, x28\n"
+ "add x21, x15, XZR\n"
+ "ldr q7, [x14, #0x80]\n"
+ "ldr q8, [x14, #0x90]\n"
+ "add x20, x15, x7\n"
+ "add x19, x17, x12\n"
"tbz %x[n_channels], #2, 6f\n"
- "ldr d9, [x27], #0x8\n"
- "ldr d10, [x26], #0x8\n"
- "ldr d11, [x25], #0x8\n"
- "ldr d12, [x24], #0x8\n"
- "ldr d13, [x23], #0x8\n"
- "ldr d14, [x22], #0x8\n"
- "ldr d15, [x21], #0x8\n"
- "ldr d16, [x20], #0x8\n"
+ "ldr d9, [x26], #0x8\n"
+ "ldr d10, [x25], #0x8\n"
+ "ldr d11, [x24], #0x8\n"
+ "ldr d12, [x23], #0x8\n"
+ "ldr d13, [x22], #0x8\n"
+ "ldr d14, [x21], #0x8\n"
+ "ldr d15, [x20], #0x8\n"
+ "ldr d16, [x19], #0x8\n"
"tbz %x[n_channels], #1, 5f\n"
- "ld1 { v9.s }[2], [x27], #0x4\n"
- "ld1 { v10.s }[2], [x26], #0x4\n"
- "ld1 { v11.s }[2], [x25], #0x4\n"
- "ld1 { v12.s }[2], [x24], #0x4\n"
- "ld1 { v13.s }[2], [x23], #0x4\n"
- "ld1 { v14.s }[2], [x22], #0x4\n"
- "ld1 { v15.s }[2], [x21], #0x4\n"
- "ld1 { v16.s }[2], [x20], #0x4\n"
+ "ld1 { v9.s }[2], [x26], #0x4\n"
+ "ld1 { v10.s }[2], [x25], #0x4\n"
+ "ld1 { v11.s }[2], [x24], #0x4\n"
+ "ld1 { v12.s }[2], [x23], #0x4\n"
+ "ld1 { v13.s }[2], [x22], #0x4\n"
+ "ld1 { v14.s }[2], [x21], #0x4\n"
+ "ld1 { v15.s }[2], [x20], #0x4\n"
+ "ld1 { v16.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 8f\n"
- "ld1 { v9.h }[6], [x27]\n"
- "ld1 { v10.h }[6], [x26]\n"
- "ld1 { v11.h }[6], [x25]\n"
- "ld1 { v12.h }[6], [x24]\n"
- "ld1 { v13.h }[6], [x23]\n"
- "ld1 { v14.h }[6], [x22]\n"
- "ld1 { v15.h }[6], [x21]\n"
- "ld1 { v16.h }[6], [x20]\n"
+ "ld1 { v9.h }[6], [x26]\n"
+ "ld1 { v10.h }[6], [x25]\n"
+ "ld1 { v11.h }[6], [x24]\n"
+ "ld1 { v12.h }[6], [x23]\n"
+ "ld1 { v13.h }[6], [x22]\n"
+ "ld1 { v14.h }[6], [x21]\n"
+ "ld1 { v15.h }[6], [x20]\n"
+ "ld1 { v16.h }[6], [x19]\n"
"b 8f\n"
"5:" // Tile loop: Oddments: Load inputs: (2, 2), (0, 0), (0, 1), (0, 3), (0, 4), (1, 0), (1, 1), (0, 2): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 8f\n"
- "ld1 { v9.h }[4], [x27]\n"
- "ld1 { v10.h }[4], [x26]\n"
- "ld1 { v11.h }[4], [x25]\n"
- "ld1 { v12.h }[4], [x24]\n"
- "ld1 { v13.h }[4], [x23]\n"
- "ld1 { v14.h }[4], [x22]\n"
- "ld1 { v15.h }[4], [x21]\n"
- "ld1 { v16.h }[4], [x20]\n"
+ "ld1 { v9.h }[4], [x26]\n"
+ "ld1 { v10.h }[4], [x25]\n"
+ "ld1 { v11.h }[4], [x24]\n"
+ "ld1 { v12.h }[4], [x23]\n"
+ "ld1 { v13.h }[4], [x22]\n"
+ "ld1 { v14.h }[4], [x21]\n"
+ "ld1 { v15.h }[4], [x20]\n"
+ "ld1 { v16.h }[4], [x19]\n"
"b 8f\n"
"6:" // Tile loop: Oddments: Load inputs: (2, 2), (0, 0), (0, 1), (0, 3), (0, 4), (1, 0), (1, 1), (0, 2): Bit 2: Unset
"tbz %x[n_channels], #1, 7f\n"
- "ldr s9, [x27], #0x4\n"
- "ldr s10, [x26], #0x4\n"
- "ldr s11, [x25], #0x4\n"
- "ldr s12, [x24], #0x4\n"
- "ldr s13, [x23], #0x4\n"
- "ldr s14, [x22], #0x4\n"
- "ldr s15, [x21], #0x4\n"
- "ldr s16, [x20], #0x4\n"
+ "ldr s9, [x26], #0x4\n"
+ "ldr s10, [x25], #0x4\n"
+ "ldr s11, [x24], #0x4\n"
+ "ldr s12, [x23], #0x4\n"
+ "ldr s13, [x22], #0x4\n"
+ "ldr s14, [x21], #0x4\n"
+ "ldr s15, [x20], #0x4\n"
+ "ldr s16, [x19], #0x4\n"
"tbz %x[n_channels], #0, 8f\n"
- "ld1 { v9.h }[2], [x27]\n"
- "ld1 { v10.h }[2], [x26]\n"
- "ld1 { v11.h }[2], [x25]\n"
- "ld1 { v12.h }[2], [x24]\n"
- "ld1 { v13.h }[2], [x23]\n"
- "ld1 { v14.h }[2], [x22]\n"
- "ld1 { v15.h }[2], [x21]\n"
- "ld1 { v16.h }[2], [x20]\n"
+ "ld1 { v9.h }[2], [x26]\n"
+ "ld1 { v10.h }[2], [x25]\n"
+ "ld1 { v11.h }[2], [x24]\n"
+ "ld1 { v12.h }[2], [x23]\n"
+ "ld1 { v13.h }[2], [x22]\n"
+ "ld1 { v14.h }[2], [x21]\n"
+ "ld1 { v15.h }[2], [x20]\n"
+ "ld1 { v16.h }[2], [x19]\n"
"b 8f\n"
"7:" // Tile loop: Oddments: Load inputs: (2, 2), (0, 0), (0, 1), (0, 3), (0, 4), (1, 0), (1, 1), (0, 2): Bit 2: Unset: Bit 1: Unset
- "ldr h9, [x27, #0x0]\n"
- "ldr h10, [x26, #0x0]\n"
- "ldr h11, [x25, #0x0]\n"
- "ldr h12, [x24, #0x0]\n"
- "ldr h13, [x23, #0x0]\n"
- "ldr h14, [x22, #0x0]\n"
- "ldr h15, [x21, #0x0]\n"
- "ldr h16, [x20, #0x0]\n"
+ "ldr h9, [x26, #0x0]\n"
+ "ldr h10, [x25, #0x0]\n"
+ "ldr h11, [x24, #0x0]\n"
+ "ldr h12, [x23, #0x0]\n"
+ "ldr h13, [x22, #0x0]\n"
+ "ldr h14, [x21, #0x0]\n"
+ "ldr h15, [x20, #0x0]\n"
+ "ldr h16, [x19, #0x0]\n"
"8:" // Tile loop: Oddments: Load inputs: (2, 2), (0, 0), (0, 1), (0, 3), (0, 4), (1, 0), (1, 1), (0, 2): Bit 2: End
"mov v28.16b, v17.16b\n fmla v28.8h, v8.8h, v9.8h\n"
"fmla v28.8h, v0.8h, v10.8h\n"
- "add x20, x16, x11\n"
+ "add x19, x15, x10\n"
"mov v29.16b, v17.16b\n fmla v29.8h, v6.8h, v9.8h\n"
"fmla v28.8h, v1.8h, v11.8h\n"
"fmla v29.8h, v1.8h, v12.8h\n"
@@ -423,383 +423,383 @@ void a64_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst_direct_impl(
"fmla v28.8h, v2.8h, v16.8h\n"
"fmla v29.8h, v0.8h, v16.8h\n"
"tbz %x[n_channels], #2, 10f\n"
- "ldr d11, [x20], #0x8\n"
+ "ldr d11, [x19], #0x8\n"
"tbz %x[n_channels], #1, 9f\n"
- "ld1 { v11.s }[2], [x20], #0x4\n"
+ "ld1 { v11.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 12f\n"
- "ld1 { v11.h }[6], [x20]\n"
+ "ld1 { v11.h }[6], [x19]\n"
"b 12f\n"
"9:" // Tile loop: Oddments: Load inputs: (1, 3): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 12f\n"
- "ld1 { v11.h }[4], [x20]\n"
+ "ld1 { v11.h }[4], [x19]\n"
"b 12f\n"
"10:" // Tile loop: Oddments: Load inputs: (1, 3): Bit 2: Unset
"tbz %x[n_channels], #1, 11f\n"
- "ldr s11, [x20], #0x4\n"
+ "ldr s11, [x19], #0x4\n"
"tbz %x[n_channels], #0, 12f\n"
- "ld1 { v11.h }[2], [x20]\n"
+ "ld1 { v11.h }[2], [x19]\n"
"b 12f\n"
"11:" // Tile loop: Oddments: Load inputs: (1, 3): Bit 2: Unset: Bit 1: Unset
- "ldr h11, [x20, #0x0]\n"
+ "ldr h11, [x19, #0x0]\n"
"12:" // Tile loop: Oddments: Load inputs: (1, 3): Bit 2: End
"fmla v29.8h, v4.8h, v11.8h\n"
- "add x20, x16, x9\n"
+ "add x19, x15, x28\n"
"tbz %x[n_channels], #2, 14f\n"
- "ldr d12, [x20], #0x8\n"
+ "ldr d12, [x19], #0x8\n"
"tbz %x[n_channels], #1, 13f\n"
- "ld1 { v12.s }[2], [x20], #0x4\n"
+ "ld1 { v12.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 16f\n"
- "ld1 { v12.h }[6], [x20]\n"
+ "ld1 { v12.h }[6], [x19]\n"
"b 16f\n"
"13:" // Tile loop: Oddments: Load inputs: (1, 4): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 16f\n"
- "ld1 { v12.h }[4], [x20]\n"
+ "ld1 { v12.h }[4], [x19]\n"
"b 16f\n"
"14:" // Tile loop: Oddments: Load inputs: (1, 4): Bit 2: Unset
"tbz %x[n_channels], #1, 15f\n"
- "ldr s12, [x20], #0x4\n"
+ "ldr s12, [x19], #0x4\n"
"tbz %x[n_channels], #0, 16f\n"
- "ld1 { v12.h }[2], [x20]\n"
+ "ld1 { v12.h }[2], [x19]\n"
"b 16f\n"
"15:" // Tile loop: Oddments: Load inputs: (1, 4): Bit 2: Unset: Bit 1: Unset
- "ldr h12, [x20, #0x0]\n"
+ "ldr h12, [x19, #0x0]\n"
"16:" // Tile loop: Oddments: Load inputs: (1, 4): Bit 2: End
"fmla v29.8h, v5.8h, v12.8h\n"
- "add x20, x16, x13\n"
+ "add x19, x15, x12\n"
"tbz %x[n_channels], #2, 18f\n"
- "ldr d13, [x20], #0x8\n"
+ "ldr d13, [x19], #0x8\n"
"tbz %x[n_channels], #1, 17f\n"
- "ld1 { v13.s }[2], [x20], #0x4\n"
+ "ld1 { v13.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 20f\n"
- "ld1 { v13.h }[6], [x20]\n"
+ "ld1 { v13.h }[6], [x19]\n"
"b 20f\n"
"17:" // Tile loop: Oddments: Load inputs: (1, 2): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 20f\n"
- "ld1 { v13.h }[4], [x20]\n"
+ "ld1 { v13.h }[4], [x19]\n"
"b 20f\n"
"18:" // Tile loop: Oddments: Load inputs: (1, 2): Bit 2: Unset
"tbz %x[n_channels], #1, 19f\n"
- "ldr s13, [x20], #0x4\n"
+ "ldr s13, [x19], #0x4\n"
"tbz %x[n_channels], #0, 20f\n"
- "ld1 { v13.h }[2], [x20]\n"
+ "ld1 { v13.h }[2], [x19]\n"
"b 20f\n"
"19:" // Tile loop: Oddments: Load inputs: (1, 2): Bit 2: Unset: Bit 1: Unset
- "ldr h13, [x20, #0x0]\n"
+ "ldr h13, [x19, #0x0]\n"
"20:" // Tile loop: Oddments: Load inputs: (1, 2): Bit 2: End
"fmla v28.8h, v5.8h, v13.8h\n"
"fmla v29.8h, v3.8h, v13.8h\n"
- "add x20, x12, XZR\n"
+ "add x19, x11, XZR\n"
"tbz %x[n_channels], #2, 22f\n"
- "ldr d14, [x20], #0x8\n"
+ "ldr d14, [x19], #0x8\n"
"tbz %x[n_channels], #1, 21f\n"
- "ld1 { v14.s }[2], [x20], #0x4\n"
+ "ld1 { v14.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 24f\n"
- "ld1 { v14.h }[6], [x20]\n"
+ "ld1 { v14.h }[6], [x19]\n"
"b 24f\n"
"21:" // Tile loop: Oddments: Load inputs: (3, 0): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 24f\n"
- "ld1 { v14.h }[4], [x20]\n"
+ "ld1 { v14.h }[4], [x19]\n"
"b 24f\n"
"22:" // Tile loop: Oddments: Load inputs: (3, 0): Bit 2: Unset
"tbz %x[n_channels], #1, 23f\n"
- "ldr s14, [x20], #0x4\n"
+ "ldr s14, [x19], #0x4\n"
"tbz %x[n_channels], #0, 24f\n"
- "ld1 { v14.h }[2], [x20]\n"
+ "ld1 { v14.h }[2], [x19]\n"
"b 24f\n"
"23:" // Tile loop: Oddments: Load inputs: (3, 0): Bit 2: Unset: Bit 1: Unset
- "ldr h14, [x20, #0x0]\n"
+ "ldr h14, [x19, #0x0]\n"
"24:" // Tile loop: Oddments: Load inputs: (3, 0): Bit 2: End
"fmla v30.8h, v3.8h, v14.8h\n"
- "add x20, x14, XZR\n"
+ "add x19, x13, XZR\n"
"tbz %x[n_channels], #2, 26f\n"
- "ldr d15, [x20], #0x8\n"
+ "ldr d15, [x19], #0x8\n"
"tbz %x[n_channels], #1, 25f\n"
- "ld1 { v15.s }[2], [x20], #0x4\n"
+ "ld1 { v15.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 28f\n"
- "ld1 { v15.h }[6], [x20]\n"
+ "ld1 { v15.h }[6], [x19]\n"
"b 28f\n"
"25:" // Tile loop: Oddments: Load inputs: (2, 0): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 28f\n"
- "ld1 { v15.h }[4], [x20]\n"
+ "ld1 { v15.h }[4], [x19]\n"
"b 28f\n"
"26:" // Tile loop: Oddments: Load inputs: (2, 0): Bit 2: Unset
"tbz %x[n_channels], #1, 27f\n"
- "ldr s15, [x20], #0x4\n"
+ "ldr s15, [x19], #0x4\n"
"tbz %x[n_channels], #0, 28f\n"
- "ld1 { v15.h }[2], [x20]\n"
+ "ld1 { v15.h }[2], [x19]\n"
"b 28f\n"
"27:" // Tile loop: Oddments: Load inputs: (2, 0): Bit 2: Unset: Bit 1: Unset
- "ldr h15, [x20, #0x0]\n"
+ "ldr h15, [x19, #0x0]\n"
"28:" // Tile loop: Oddments: Load inputs: (2, 0): Bit 2: End
"fmla v28.8h, v6.8h, v15.8h\n"
"fmla v30.8h, v0.8h, v15.8h\n"
- "add x20, x12, x6\n"
+ "add x19, x11, x7\n"
"tbz %x[n_channels], #2, 30f\n"
- "ldr d11, [x20], #0x8\n"
+ "ldr d11, [x19], #0x8\n"
"tbz %x[n_channels], #1, 29f\n"
- "ld1 { v11.s }[2], [x20], #0x4\n"
+ "ld1 { v11.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 32f\n"
- "ld1 { v11.h }[6], [x20]\n"
+ "ld1 { v11.h }[6], [x19]\n"
"b 32f\n"
"29:" // Tile loop: Oddments: Load inputs: (3, 1): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 32f\n"
- "ld1 { v11.h }[4], [x20]\n"
+ "ld1 { v11.h }[4], [x19]\n"
"b 32f\n"
"30:" // Tile loop: Oddments: Load inputs: (3, 1): Bit 2: Unset
"tbz %x[n_channels], #1, 31f\n"
- "ldr s11, [x20], #0x4\n"
+ "ldr s11, [x19], #0x4\n"
"tbz %x[n_channels], #0, 32f\n"
- "ld1 { v11.h }[2], [x20]\n"
+ "ld1 { v11.h }[2], [x19]\n"
"b 32f\n"
"31:" // Tile loop: Oddments: Load inputs: (3, 1): Bit 2: Unset: Bit 1: Unset
- "ldr h11, [x20, #0x0]\n"
+ "ldr h11, [x19, #0x0]\n"
"32:" // Tile loop: Oddments: Load inputs: (3, 1): Bit 2: End
"fmla v30.8h, v4.8h, v11.8h\n"
- "add x20, x14, x6\n"
+ "add x19, x13, x7\n"
"tbz %x[n_channels], #2, 34f\n"
- "ldr d16, [x20], #0x8\n"
+ "ldr d16, [x19], #0x8\n"
"tbz %x[n_channels], #1, 33f\n"
- "ld1 { v16.s }[2], [x20], #0x4\n"
+ "ld1 { v16.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 36f\n"
- "ld1 { v16.h }[6], [x20]\n"
+ "ld1 { v16.h }[6], [x19]\n"
"b 36f\n"
"33:" // Tile loop: Oddments: Load inputs: (2, 1): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 36f\n"
- "ld1 { v16.h }[4], [x20]\n"
+ "ld1 { v16.h }[4], [x19]\n"
"b 36f\n"
"34:" // Tile loop: Oddments: Load inputs: (2, 1): Bit 2: Unset
"tbz %x[n_channels], #1, 35f\n"
- "ldr s16, [x20], #0x4\n"
+ "ldr s16, [x19], #0x4\n"
"tbz %x[n_channels], #0, 36f\n"
- "ld1 { v16.h }[2], [x20]\n"
+ "ld1 { v16.h }[2], [x19]\n"
"b 36f\n"
"35:" // Tile loop: Oddments: Load inputs: (2, 1): Bit 2: Unset: Bit 1: Unset
- "ldr h16, [x20, #0x0]\n"
+ "ldr h16, [x19, #0x0]\n"
"36:" // Tile loop: Oddments: Load inputs: (2, 1): Bit 2: End
"fmla v28.8h, v7.8h, v16.8h\n"
"fmla v30.8h, v1.8h, v16.8h\n"
- "add x20, x12, x11\n"
+ "add x19, x11, x10\n"
"tbz %x[n_channels], #2, 38f\n"
- "ldr d13, [x20], #0x8\n"
+ "ldr d13, [x19], #0x8\n"
"tbz %x[n_channels], #1, 37f\n"
- "ld1 { v13.s }[2], [x20], #0x4\n"
+ "ld1 { v13.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 40f\n"
- "ld1 { v13.h }[6], [x20]\n"
+ "ld1 { v13.h }[6], [x19]\n"
"b 40f\n"
"37:" // Tile loop: Oddments: Load inputs: (3, 3): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 40f\n"
- "ld1 { v13.h }[4], [x20]\n"
+ "ld1 { v13.h }[4], [x19]\n"
"b 40f\n"
"38:" // Tile loop: Oddments: Load inputs: (3, 3): Bit 2: Unset
"tbz %x[n_channels], #1, 39f\n"
- "ldr s13, [x20], #0x4\n"
+ "ldr s13, [x19], #0x4\n"
"tbz %x[n_channels], #0, 40f\n"
- "ld1 { v13.h }[2], [x20]\n"
+ "ld1 { v13.h }[2], [x19]\n"
"b 40f\n"
"39:" // Tile loop: Oddments: Load inputs: (3, 3): Bit 2: Unset: Bit 1: Unset
- "ldr h13, [x20, #0x0]\n"
+ "ldr h13, [x19, #0x0]\n"
"40:" // Tile loop: Oddments: Load inputs: (3, 3): Bit 2: End
"fmla v31.8h, v4.8h, v13.8h\n"
- "add x20, x14, x11\n"
+ "add x19, x13, x10\n"
"tbz %x[n_channels], #2, 42f\n"
- "ldr d12, [x20], #0x8\n"
+ "ldr d12, [x19], #0x8\n"
"tbz %x[n_channels], #1, 41f\n"
- "ld1 { v12.s }[2], [x20], #0x4\n"
+ "ld1 { v12.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 44f\n"
- "ld1 { v12.h }[6], [x20]\n"
+ "ld1 { v12.h }[6], [x19]\n"
"b 44f\n"
"41:" // Tile loop: Oddments: Load inputs: (2, 3): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 44f\n"
- "ld1 { v12.h }[4], [x20]\n"
+ "ld1 { v12.h }[4], [x19]\n"
"b 44f\n"
"42:" // Tile loop: Oddments: Load inputs: (2, 3): Bit 2: Unset
"tbz %x[n_channels], #1, 43f\n"
- "ldr s12, [x20], #0x4\n"
+ "ldr s12, [x19], #0x4\n"
"tbz %x[n_channels], #0, 44f\n"
- "ld1 { v12.h }[2], [x20]\n"
+ "ld1 { v12.h }[2], [x19]\n"
"b 44f\n"
"43:" // Tile loop: Oddments: Load inputs: (2, 3): Bit 2: Unset: Bit 1: Unset
- "ldr h12, [x20, #0x0]\n"
+ "ldr h12, [x19, #0x0]\n"
"44:" // Tile loop: Oddments: Load inputs: (2, 3): Bit 2: End
"fmla v29.8h, v7.8h, v12.8h\n"
"fmla v31.8h, v1.8h, v12.8h\n"
- "add x20, x12, x9\n"
+ "add x19, x11, x28\n"
"tbz %x[n_channels], #2, 46f\n"
- "ldr d14, [x20], #0x8\n"
+ "ldr d14, [x19], #0x8\n"
"tbz %x[n_channels], #1, 45f\n"
- "ld1 { v14.s }[2], [x20], #0x4\n"
+ "ld1 { v14.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 48f\n"
- "ld1 { v14.h }[6], [x20]\n"
+ "ld1 { v14.h }[6], [x19]\n"
"b 48f\n"
"45:" // Tile loop: Oddments: Load inputs: (3, 4): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 48f\n"
- "ld1 { v14.h }[4], [x20]\n"
+ "ld1 { v14.h }[4], [x19]\n"
"b 48f\n"
"46:" // Tile loop: Oddments: Load inputs: (3, 4): Bit 2: Unset
"tbz %x[n_channels], #1, 47f\n"
- "ldr s14, [x20], #0x4\n"
+ "ldr s14, [x19], #0x4\n"
"tbz %x[n_channels], #0, 48f\n"
- "ld1 { v14.h }[2], [x20]\n"
+ "ld1 { v14.h }[2], [x19]\n"
"b 48f\n"
"47:" // Tile loop: Oddments: Load inputs: (3, 4): Bit 2: Unset: Bit 1: Unset
- "ldr h14, [x20, #0x0]\n"
+ "ldr h14, [x19, #0x0]\n"
"48:" // Tile loop: Oddments: Load inputs: (3, 4): Bit 2: End
"fmla v31.8h, v5.8h, v14.8h\n"
- "add x20, x10, XZR\n"
+ "add x19, x9, XZR\n"
"tbz %x[n_channels], #2, 50f\n"
- "ldr d15, [x20], #0x8\n"
+ "ldr d15, [x19], #0x8\n"
"tbz %x[n_channels], #1, 49f\n"
- "ld1 { v15.s }[2], [x20], #0x4\n"
+ "ld1 { v15.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 52f\n"
- "ld1 { v15.h }[6], [x20]\n"
+ "ld1 { v15.h }[6], [x19]\n"
"b 52f\n"
"49:" // Tile loop: Oddments: Load inputs: (4, 0): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 52f\n"
- "ld1 { v15.h }[4], [x20]\n"
+ "ld1 { v15.h }[4], [x19]\n"
"b 52f\n"
"50:" // Tile loop: Oddments: Load inputs: (4, 0): Bit 2: Unset
"tbz %x[n_channels], #1, 51f\n"
- "ldr s15, [x20], #0x4\n"
+ "ldr s15, [x19], #0x4\n"
"tbz %x[n_channels], #0, 52f\n"
- "ld1 { v15.h }[2], [x20]\n"
+ "ld1 { v15.h }[2], [x19]\n"
"b 52f\n"
"51:" // Tile loop: Oddments: Load inputs: (4, 0): Bit 2: Unset: Bit 1: Unset
- "ldr h15, [x20, #0x0]\n"
+ "ldr h15, [x19, #0x0]\n"
"52:" // Tile loop: Oddments: Load inputs: (4, 0): Bit 2: End
"fmla v30.8h, v6.8h, v15.8h\n"
- "add x20, x14, x9\n"
+ "add x19, x13, x28\n"
"tbz %x[n_channels], #2, 54f\n"
- "ldr d11, [x20], #0x8\n"
+ "ldr d11, [x19], #0x8\n"
"tbz %x[n_channels], #1, 53f\n"
- "ld1 { v11.s }[2], [x20], #0x4\n"
+ "ld1 { v11.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 56f\n"
- "ld1 { v11.h }[6], [x20]\n"
+ "ld1 { v11.h }[6], [x19]\n"
"b 56f\n"
"53:" // Tile loop: Oddments: Load inputs: (2, 4): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 56f\n"
- "ld1 { v11.h }[4], [x20]\n"
+ "ld1 { v11.h }[4], [x19]\n"
"b 56f\n"
"54:" // Tile loop: Oddments: Load inputs: (2, 4): Bit 2: Unset
"tbz %x[n_channels], #1, 55f\n"
- "ldr s11, [x20], #0x4\n"
+ "ldr s11, [x19], #0x4\n"
"tbz %x[n_channels], #0, 56f\n"
- "ld1 { v11.h }[2], [x20]\n"
+ "ld1 { v11.h }[2], [x19]\n"
"b 56f\n"
"55:" // Tile loop: Oddments: Load inputs: (2, 4): Bit 2: Unset: Bit 1: Unset
- "ldr h11, [x20, #0x0]\n"
+ "ldr h11, [x19, #0x0]\n"
"56:" // Tile loop: Oddments: Load inputs: (2, 4): Bit 2: End
"fmla v29.8h, v8.8h, v11.8h\n"
"fmla v31.8h, v2.8h, v11.8h\n"
- "add x20, x10, x6\n"
+ "add x19, x9, x7\n"
"tbz %x[n_channels], #2, 58f\n"
- "ldr d13, [x20], #0x8\n"
+ "ldr d13, [x19], #0x8\n"
"tbz %x[n_channels], #1, 57f\n"
- "ld1 { v13.s }[2], [x20], #0x4\n"
+ "ld1 { v13.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 60f\n"
- "ld1 { v13.h }[6], [x20]\n"
+ "ld1 { v13.h }[6], [x19]\n"
"b 60f\n"
"57:" // Tile loop: Oddments: Load inputs: (4, 1): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 60f\n"
- "ld1 { v13.h }[4], [x20]\n"
+ "ld1 { v13.h }[4], [x19]\n"
"b 60f\n"
"58:" // Tile loop: Oddments: Load inputs: (4, 1): Bit 2: Unset
"tbz %x[n_channels], #1, 59f\n"
- "ldr s13, [x20], #0x4\n"
+ "ldr s13, [x19], #0x4\n"
"tbz %x[n_channels], #0, 60f\n"
- "ld1 { v13.h }[2], [x20]\n"
+ "ld1 { v13.h }[2], [x19]\n"
"b 60f\n"
"59:" // Tile loop: Oddments: Load inputs: (4, 1): Bit 2: Unset: Bit 1: Unset
- "ldr h13, [x20, #0x0]\n"
+ "ldr h13, [x19, #0x0]\n"
"60:" // Tile loop: Oddments: Load inputs: (4, 1): Bit 2: End
"fmla v30.8h, v7.8h, v13.8h\n"
- "add x20, x12, x13\n"
+ "add x19, x11, x12\n"
"tbz %x[n_channels], #2, 62f\n"
- "ldr d16, [x20], #0x8\n"
+ "ldr d16, [x19], #0x8\n"
"tbz %x[n_channels], #1, 61f\n"
- "ld1 { v16.s }[2], [x20], #0x4\n"
+ "ld1 { v16.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 64f\n"
- "ld1 { v16.h }[6], [x20]\n"
+ "ld1 { v16.h }[6], [x19]\n"
"b 64f\n"
"61:" // Tile loop: Oddments: Load inputs: (3, 2): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 64f\n"
- "ld1 { v16.h }[4], [x20]\n"
+ "ld1 { v16.h }[4], [x19]\n"
"b 64f\n"
"62:" // Tile loop: Oddments: Load inputs: (3, 2): Bit 2: Unset
"tbz %x[n_channels], #1, 63f\n"
- "ldr s16, [x20], #0x4\n"
+ "ldr s16, [x19], #0x4\n"
"tbz %x[n_channels], #0, 64f\n"
- "ld1 { v16.h }[2], [x20]\n"
+ "ld1 { v16.h }[2], [x19]\n"
"b 64f\n"
"63:" // Tile loop: Oddments: Load inputs: (3, 2): Bit 2: Unset: Bit 1: Unset
- "ldr h16, [x20, #0x0]\n"
+ "ldr h16, [x19, #0x0]\n"
"64:" // Tile loop: Oddments: Load inputs: (3, 2): Bit 2: End
"fmla v30.8h, v5.8h, v16.8h\n"
"fmla v31.8h, v3.8h, v16.8h\n"
- "add x20, x10, x11\n"
+ "add x19, x9, x10\n"
"tbz %x[n_channels], #2, 66f\n"
- "ldr d14, [x20], #0x8\n"
+ "ldr d14, [x19], #0x8\n"
"tbz %x[n_channels], #1, 65f\n"
- "ld1 { v14.s }[2], [x20], #0x4\n"
+ "ld1 { v14.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 68f\n"
- "ld1 { v14.h }[6], [x20]\n"
+ "ld1 { v14.h }[6], [x19]\n"
"b 68f\n"
"65:" // Tile loop: Oddments: Load inputs: (4, 3): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 68f\n"
- "ld1 { v14.h }[4], [x20]\n"
+ "ld1 { v14.h }[4], [x19]\n"
"b 68f\n"
"66:" // Tile loop: Oddments: Load inputs: (4, 3): Bit 2: Unset
"tbz %x[n_channels], #1, 67f\n"
- "ldr s14, [x20], #0x4\n"
+ "ldr s14, [x19], #0x4\n"
"tbz %x[n_channels], #0, 68f\n"
- "ld1 { v14.h }[2], [x20]\n"
+ "ld1 { v14.h }[2], [x19]\n"
"b 68f\n"
"67:" // Tile loop: Oddments: Load inputs: (4, 3): Bit 2: Unset: Bit 1: Unset
- "ldr h14, [x20, #0x0]\n"
+ "ldr h14, [x19, #0x0]\n"
"68:" // Tile loop: Oddments: Load inputs: (4, 3): Bit 2: End
"fmla v31.8h, v7.8h, v14.8h\n"
- "add x20, x10, x13\n"
+ "add x19, x9, x12\n"
"tbz %x[n_channels], #2, 70f\n"
- "ldr d15, [x20], #0x8\n"
+ "ldr d15, [x19], #0x8\n"
"tbz %x[n_channels], #1, 69f\n"
- "ld1 { v15.s }[2], [x20], #0x4\n"
+ "ld1 { v15.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 72f\n"
- "ld1 { v15.h }[6], [x20]\n"
+ "ld1 { v15.h }[6], [x19]\n"
"b 72f\n"
"69:" // Tile loop: Oddments: Load inputs: (4, 2): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 72f\n"
- "ld1 { v15.h }[4], [x20]\n"
+ "ld1 { v15.h }[4], [x19]\n"
"b 72f\n"
"70:" // Tile loop: Oddments: Load inputs: (4, 2): Bit 2: Unset
"tbz %x[n_channels], #1, 71f\n"
- "ldr s15, [x20], #0x4\n"
+ "ldr s15, [x19], #0x4\n"
"tbz %x[n_channels], #0, 72f\n"
- "ld1 { v15.h }[2], [x20]\n"
+ "ld1 { v15.h }[2], [x19]\n"
"b 72f\n"
"71:" // Tile loop: Oddments: Load inputs: (4, 2): Bit 2: Unset: Bit 1: Unset
- "ldr h15, [x20, #0x0]\n"
+ "ldr h15, [x19, #0x0]\n"
"72:" // Tile loop: Oddments: Load inputs: (4, 2): Bit 2: End
"fmla v30.8h, v8.8h, v15.8h\n"
"fmla v31.8h, v6.8h, v15.8h\n"
- "add x20, x10, x9\n"
+ "add x19, x9, x28\n"
"tbz %x[n_channels], #2, 74f\n"
- "ldr d11, [x20], #0x8\n"
+ "ldr d11, [x19], #0x8\n"
"tbz %x[n_channels], #1, 73f\n"
- "ld1 { v11.s }[2], [x20], #0x4\n"
+ "ld1 { v11.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 76f\n"
- "ld1 { v11.h }[6], [x20]\n"
+ "ld1 { v11.h }[6], [x19]\n"
"b 76f\n"
"73:" // Tile loop: Oddments: Load inputs: (4, 4): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 76f\n"
- "ld1 { v11.h }[4], [x20]\n"
+ "ld1 { v11.h }[4], [x19]\n"
"b 76f\n"
"74:" // Tile loop: Oddments: Load inputs: (4, 4): Bit 2: Unset
"tbz %x[n_channels], #1, 75f\n"
- "ldr s11, [x20], #0x4\n"
+ "ldr s11, [x19], #0x4\n"
"tbz %x[n_channels], #0, 76f\n"
- "ld1 { v11.h }[2], [x20]\n"
+ "ld1 { v11.h }[2], [x19]\n"
"b 76f\n"
"75:" // Tile loop: Oddments: Load inputs: (4, 4): Bit 2: Unset: Bit 1: Unset
- "ldr h11, [x20, #0x0]\n"
+ "ldr h11, [x19, #0x0]\n"
"76:" // Tile loop: Oddments: Load inputs: (4, 4): Bit 2: End
"fmla v31.8h, v8.8h, v11.8h\n"
"fmax v28.8h, v28.8h, v19.8h\n"
@@ -811,82 +811,82 @@ void a64_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst_direct_impl(
"fmin v30.8h, v30.8h, v18.8h\n"
"fmin v31.8h, v31.8h, v18.8h\n"
"tbz %x[n_channels], #2, 78f\n"
- "mov x21, x17\n"
- "mov x20, x28\n"
- "st1 { v28.d }[0], [x21], x7\n"
- "st1 { v30.d }[0], [x20], x7\n"
- "add x17, x17, #0x8\n"
- "add x28, x28, #0x8\n"
- "st1 { v29.d }[0], [x21]\n"
- "st1 { v31.d }[0], [x20]\n"
+ "mov x20, x16\n"
+ "mov x19, x27\n"
+ "st1 { v28.d }[0], [x20], x8\n"
+ "add x16, x16, #0x8\n"
+ "add x27, x27, #0x8\n"
+ "st1 { v30.d }[0], [x19], x8\n"
+ "st1 { v29.d }[0], [x20]\n"
+ "st1 { v31.d }[0], [x19]\n"
"tbz %x[n_channels], #1, 77f\n"
- "mov x21, x17\n"
- "mov x20, x28\n"
- "st1 { v28.s }[2], [x21], x7\n"
- "st1 { v30.s }[2], [x20], x7\n"
- "add x17, x17, #0x4\n"
- "add x28, x28, #0x4\n"
- "st1 { v29.s }[2], [x21]\n"
- "st1 { v31.s }[2], [x20]\n"
+ "mov x20, x16\n"
+ "mov x19, x27\n"
+ "st1 { v28.s }[2], [x20], x8\n"
+ "add x16, x16, #0x4\n"
+ "add x27, x27, #0x4\n"
+ "st1 { v30.s }[2], [x19], x8\n"
+ "st1 { v29.s }[2], [x20]\n"
+ "st1 { v31.s }[2], [x19]\n"
"tbz %x[n_channels], #0, 80f\n"
- "mov x21, x17\n"
- "mov x20, x28\n"
- "st1 { v28.h }[6], [x21], x7\n"
- "st1 { v30.h }[6], [x20], x7\n"
- "st1 { v29.h }[6], [x21]\n"
- "st1 { v31.h }[6], [x20]\n"
+ "mov x20, x16\n"
+ "mov x19, x27\n"
+ "st1 { v28.h }[6], [x20], x8\n"
+ "st1 { v30.h }[6], [x19], x8\n"
+ "st1 { v29.h }[6], [x20]\n"
+ "st1 { v31.h }[6], [x19]\n"
"b 80f\n"
"77:" // Tile loop: Oddments: Store: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 80f\n"
- "mov x21, x17\n"
- "mov x20, x28\n"
- "st1 { v28.h }[4], [x21], x7\n"
- "st1 { v30.h }[4], [x20], x7\n"
- "st1 { v29.h }[4], [x21]\n"
- "st1 { v31.h }[4], [x20]\n"
+ "mov x20, x16\n"
+ "mov x19, x27\n"
+ "st1 { v28.h }[4], [x20], x8\n"
+ "st1 { v30.h }[4], [x19], x8\n"
+ "st1 { v29.h }[4], [x20]\n"
+ "st1 { v31.h }[4], [x19]\n"
"b 80f\n"
"78:" // Tile loop: Oddments: Store: Bit 2: Unset
"tbz %x[n_channels], #1, 79f\n"
- "mov x21, x17\n"
- "mov x20, x28\n"
- "st1 { v28.s }[0], [x21], x7\n"
- "st1 { v30.s }[0], [x20], x7\n"
- "add x17, x17, #0x4\n"
- "add x28, x28, #0x4\n"
- "st1 { v29.s }[0], [x21]\n"
- "st1 { v31.s }[0], [x20]\n"
+ "mov x20, x16\n"
+ "mov x19, x27\n"
+ "st1 { v28.s }[0], [x20], x8\n"
+ "st1 { v30.s }[0], [x19], x8\n"
+ "add x16, x16, #0x4\n"
+ "add x27, x27, #0x4\n"
+ "st1 { v29.s }[0], [x20]\n"
+ "st1 { v31.s }[0], [x19]\n"
"tbz %x[n_channels], #0, 80f\n"
- "mov x21, x17\n"
- "mov x20, x28\n"
- "st1 { v28.h }[2], [x21], x7\n"
- "st1 { v30.h }[2], [x20], x7\n"
- "st1 { v29.h }[2], [x21]\n"
- "st1 { v31.h }[2], [x20]\n"
+ "mov x20, x16\n"
+ "mov x19, x27\n"
+ "st1 { v28.h }[2], [x20], x8\n"
+ "st1 { v30.h }[2], [x19], x8\n"
+ "st1 { v29.h }[2], [x20]\n"
+ "st1 { v31.h }[2], [x19]\n"
"b 80f\n"
"79:" // Tile loop: Oddments: Store: Bit 2: Unset: Bit 1: Unset
- "mov x21, x17\n"
- "mov x20, x28\n"
- "st1 { v28.h }[0], [x21], x7\n"
- "st1 { v30.h }[0], [x20], x7\n"
- "st1 { v29.h }[0], [x21]\n"
- "st1 { v31.h }[0], [x20]\n"
+ "mov x20, x16\n"
+ "mov x19, x27\n"
+ "st1 { v28.h }[0], [x20], x8\n"
+ "st1 { v30.h }[0], [x19], x8\n"
+ "st1 { v29.h }[0], [x20]\n"
+ "st1 { v31.h }[0], [x19]\n"
"80:" // Tile loop: Oddments: Store: Bit 2: End
"81:" // Tile loop: End
- "ldr x27, [%x[params_struct], %[offsetof_args_tile_j]]\n"
- "ldr x23, [%x[params_struct], %[offsetof_args_tile_i]]\n"
- "add x27, x27, #0x1\n"
- "add x21, x23, #0x1\n"
- "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
- "cmp x27, x20\n"
- "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
- "csel x23, x23, x21, LT\n"
- "csel x27, x27, XZR, LT\n"
- "cmp x23, x20\n"
+ "ldr x26, [%x[params_struct], %[offsetof_args_tile_j]]\n"
+ "ldr x22, [%x[params_struct], %[offsetof_args_tile_i]]\n"
+ "add x26, x26, #0x1\n"
+ "add x20, x22, #0x1\n"
+ "ldr x19, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
+ "cmp x26, x19\n"
+ "ldr x19, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
+ "csel x22, x22, x20, LT\n"
+ "csel x26, x26, XZR, LT\n"
+ "cmp x22, x19\n"
"blt 1b\n"
:
: [n_channels] "r" ((unsigned long) n_channels), [offsetof_args_inptr] "I" (offsetof(Args, inptr)), [offsetof_args_ld_input_col] "I" (offsetof(Args, ld_input_col)), [offsetof_args_ld_input_row] "I" (offsetof(Args, ld_input_row)), [offsetof_args_ld_output_col] "I" (offsetof(Args, ld_output_col)), [offsetof_args_ld_output_row] "I" (offsetof(Args, ld_output_row)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_n_tile_cols] "I" (offsetof(Args, n_tile_cols)), [offsetof_args_n_tile_rows] "I" (offsetof(Args, n_tile_rows)), [offsetof_args_outptr] "I" (offsetof(Args, outptr)), [offsetof_args_params] "I" (offsetof(Args, params)), [offsetof_args_tile_i] "I" (offsetof(Args, tile_i)), [offsetof_args_tile_j] "I" (offsetof(Args, tile_j)), [params_struct] "r" (&params_struct)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v28", "v29", "v30", "v31", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v28", "v29", "v30", "v31", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_indirect.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_indirect.cpp
index 144d11fb39..61c58186f5 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_indirect.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_indirect.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -88,347 +88,347 @@ void a64_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst_indirect_impl(
__asm__ __volatile__(
"ldr x21, [%x[params_struct], %[offsetof_args_outptrs]]\n"
- "mov x26, #0x10\n" // cntb _, ALL, #1
- "lsr x25, %x[n_channels], #0x3\n"
- "ldr x24, [%x[params_struct], %[offsetof_args_params]]\n"
+ "mov x16, #0x10\n" // cntb _, ALL, #1
+ "lsr x15, %x[n_channels], #0x3\n"
+ "ldr x14, [%x[params_struct], %[offsetof_args_params]]\n"
"add x20, %x[params_struct], %[offsetof_args_min]\n"
+ "add x19, %x[params_struct], %[offsetof_args_max]\n"
+ "ldp x13, x12, [x21, #0x0]\n"
+ "ldp x11, x10, [x21, #0x10]\n"
+ "add x9, %x[params_struct], %[offsetof_Args_inptrs]\n"
"ld1r { v19.8h }, [x20]\n"
- "add x20, %x[params_struct], %[offsetof_args_max]\n"
- "ld1r { v18.8h }, [x20]\n"
- "add x13, %x[params_struct], %[offsetof_Args_inptrs]\n"
- "ldp x12, x11, [x21, #0x0]\n"
- "ldp x10, x9, [x21, #0x10]\n"
+ "ld1r { v18.8h }, [x19]\n"
"mov x28, #0x0\n"
- "sub x23, XZR, x26\n"
- "cbz x25, 3f\n"
- "ldr q17, [x24, #0x0]\n"
- "ldr q0, [x24, #0x10]\n"
- "cmp x26, x25, LSL #4\n"
- "ldr q1, [x24, #0x20]\n"
- "ldr q2, [x24, #0x30]\n"
- "ldr q3, [x24, #0x40]\n"
- "ldr q4, [x24, #0x50]\n"
- "ldr q5, [x24, #0x60]\n"
- "ldr q6, [x24, #0x70]\n"
- "ldr q7, [x24, #0x80]\n"
- "ldr q8, [x24, #0x90]\n"
- "add x24, x24, #0xa0\n"
- "ldp x22, x20, [x13, #0x0]\n"
- "ldr q9, [x22, x28]\n"
- "ldr q10, [x20, x28]\n"
- "ldp x21, x20, [x13, #0x10]\n"
- "ldr q11, [x21, x28]\n"
- "ldr q12, [x20, x28]\n"
- "ldp x22, x21, [x13, #0x20]\n"
+ "sub x27, XZR, x16\n"
+ "cbz x15, 3f\n"
+ "ldp x26, x25, [x9, #0x0]\n"
+ "ldp x24, x23, [x9, #0x10]\n"
+ "ldp x22, x21, [x9, #0x20]\n"
+ "ldp x20, x19, [x9, #0x30]\n"
+ "cmp x16, x15, LSL #4\n"
+ "ldr q17, [x14, #0x0]\n"
+ "ldr q0, [x14, #0x10]\n"
+ "ldr q1, [x14, #0x20]\n"
+ "ldr q2, [x14, #0x30]\n"
+ "ldr q3, [x14, #0x40]\n"
+ "ldr q4, [x14, #0x50]\n"
+ "ldr q5, [x14, #0x60]\n"
+ "ldr q6, [x14, #0x70]\n"
+ "ldr q7, [x14, #0x80]\n"
+ "ldr q8, [x14, #0x90]\n"
+ "add x14, x14, #0xa0\n"
+ "ldr q9, [x26, x28]\n"
+ "ldr q10, [x25, x28]\n"
+ "ldr q11, [x24, x28]\n"
+ "ldr q12, [x23, x28]\n"
"ldr q13, [x22, x28]\n"
"ldr q14, [x21, x28]\n"
- "ldp x21, x20, [x13, #0x30]\n"
- "ldr q15, [x21, x28]\n"
- "ldr q16, [x20, x28]\n"
+ "ldr q15, [x20, x28]\n"
+ "ldr q16, [x19, x28]\n"
"bge 2f\n"
"1:" // Channel loop
"mov v28.16b, v17.16b\n fmla v28.8h, v8.8h, v9.8h\n"
"mov v29.16b, v17.16b\n fmla v29.8h, v6.8h, v9.8h\n"
- "ldr x22, [x13, #0x40]\n"
- "ldr x20, [x13, #0x48]\n"
+ "ldr x26, [x9, #0x40]\n"
+ "ldr x25, [x9, #0x48]\n"
"fmla v28.8h, v0.8h, v10.8h\n"
"fmla v29.8h, v1.8h, v12.8h\n"
- "ldr q12, [x20, x28]\n"
- "ldr x21, [x13, #0x50]\n"
+ "ldr q12, [x25, x28]\n"
+ "ldr x24, [x9, #0x50]\n"
"fmla v28.8h, v1.8h, v11.8h\n"
- "ldr q11, [x22, x28]\n"
"fmla v29.8h, v2.8h, v13.8h\n"
- "ldr q13, [x21, x28]\n"
+ "ldr q11, [x26, x28]\n"
+ "ldr q13, [x24, x28]\n"
"fmla v28.8h, v3.8h, v14.8h\n"
"fmla v29.8h, v0.8h, v16.8h\n"
- "ldr x20, [x13, #0x58]\n"
- "ldr q14, [x20, x28]\n"
+ "ldr x23, [x9, #0x58]\n"
+ "ldr x19, [x9, #0x78]\n"
"fmla v28.8h, v4.8h, v15.8h\n"
"fmla v29.8h, v4.8h, v11.8h\n"
- "ldr x20, [x13, #0x78]\n"
- "ldr x22, [x13, #0x60]\n"
- "ldr q15, [x22, x28]\n"
+ "ldr q14, [x23, x28]\n"
+ "ldr x22, [x9, #0x60]\n"
"fmla v28.8h, v2.8h, v16.8h\n"
"fmla v29.8h, v5.8h, v12.8h\n"
- "ldr x22, [x13, #0x80]\n"
- "ldr q12, [x22, x28]\n"
+ "ldr x26, [x9, #0x80]\n"
+ "ldr q15, [x22, x28]\n"
"mov v30.16b, v17.16b\n fmla v30.8h, v2.8h, v9.8h\n"
"mov v31.16b, v17.16b\n fmla v31.8h, v0.8h, v9.8h\n"
- "ldr q17, [x24, #0x0]\n"
+ "ldr q12, [x26, x28]\n"
+ "ldr x21, [x9, #0x68]\n"
"fmla v28.8h, v5.8h, v13.8h\n"
"fmla v29.8h, v3.8h, v13.8h\n"
- "ldr q13, [x20, x28]\n"
- "ldr x21, [x13, #0x68]\n"
- "ldr q11, [x21, x28]\n"
+ "ldr q13, [x19, x28]\n"
+ "ldr x25, [x9, #0x88]\n"
"fmla v30.8h, v3.8h, v14.8h\n"
"fmla v31.8h, v4.8h, v13.8h\n"
- "ldr x20, [x13, #0x88]\n"
- "ldr q14, [x20, x28]\n"
+ "ldr q11, [x21, x28]\n"
+ "ldr q14, [x25, x28]\n"
"fmla v30.8h, v0.8h, v15.8h\n"
- "ldr q0, [x24, #0x10]\n"
"fmla v31.8h, v1.8h, v12.8h\n"
- "ldr x21, [x13, #0x70]\n"
- "ldr q16, [x21, x28]\n"
+ "ldr x20, [x9, #0x70]\n"
+ "ldr x23, [x9, #0x98]\n"
"fmla v30.8h, v4.8h, v11.8h\n"
"fmla v31.8h, v5.8h, v14.8h\n"
- "ldr q4, [x24, #0x50]\n"
- "ldr x20, [x13, #0x98]\n"
+ "ldr q16, [x20, x28]\n"
+ "ldr q11, [x23, x28]\n"
"fmla v28.8h, v6.8h, v15.8h\n"
+ "ldr x24, [x9, #0x90]\n"
+ "ldr x21, [x9, #0xa8]\n"
"fmla v30.8h, v1.8h, v16.8h\n"
- "ldr q11, [x20, x28]\n"
- "ldr q1, [x24, #0x20]\n"
"fmla v31.8h, v2.8h, v11.8h\n"
"fmla v28.8h, v7.8h, v16.8h\n"
- "ldr q2, [x24, #0x30]\n"
- "ldr x21, [x13, #0x90]\n"
- "fmla v29.8h, v7.8h, v12.8h\n"
- "fmla v29.8h, v8.8h, v11.8h\n"
- "ldr q15, [x21, x28]\n"
- "ldr x21, [x13, #0xa8]\n"
- "fmla v30.8h, v6.8h, v15.8h\n"
- "fmax v28.8h, v28.8h, v19.8h\n"
+ "ldr q15, [x24, x28]\n"
"ldr q16, [x21, x28]\n"
- "ldr x22, [x13, #0xa0]\n"
+ "ldr x22, [x9, #0xa0]\n"
+ "ldr x20, [x9, #0xb0]\n"
+ "fmla v30.8h, v6.8h, v15.8h\n"
"fmla v31.8h, v3.8h, v16.8h\n"
- "fmax v29.8h, v29.8h, v19.8h\n"
"ldr q13, [x22, x28]\n"
- "ldr q3, [x24, #0x40]\n"
+ "ldr q14, [x20, x28]\n"
"fmla v30.8h, v7.8h, v13.8h\n"
- "fmla v30.8h, v5.8h, v16.8h\n"
- "ldr q5, [x24, #0x60]\n"
- "ldr x21, [x13, #0xb0]\n"
- "add x23, x23, #0x10\n"
- "fmin v28.8h, v28.8h, v18.8h\n"
- "ldr q14, [x21, x28]\n"
- "ldr x20, [x13, #0xb8]\n"
"fmla v31.8h, v7.8h, v14.8h\n"
- "fmin v29.8h, v29.8h, v18.8h\n"
- "ldr q15, [x20, x28]\n"
- "ldr q7, [x24, #0x80]\n"
+ "ldr x19, [x9, #0xb8]\n"
+ "fmla v29.8h, v7.8h, v12.8h\n"
+ "ldr q15, [x19, x28]\n"
+ "fmla v30.8h, v5.8h, v16.8h\n"
+ "ldr x26, [x9, #0xc0]\n"
"fmla v31.8h, v6.8h, v15.8h\n"
+ "fmla v29.8h, v8.8h, v11.8h\n"
+ "ldr q11, [x26, x28]\n"
"fmla v30.8h, v8.8h, v15.8h\n"
- "ldr q6, [x24, #0x70]\n"
- "ldr x22, [x13, #0xc0]\n"
- "fmax v30.8h, v30.8h, v19.8h\n"
- "fmin v30.8h, v30.8h, v18.8h\n"
- "ldr q11, [x22, x28]\n"
"fmla v31.8h, v8.8h, v11.8h\n"
- "ldr q8, [x24, #0x90]\n"
+ "ldp x26, x25, [x9, #0x0]\n"
+ "ldp x24, x23, [x9, #0x10]\n"
+ "ldp x22, x21, [x9, #0x20]\n"
+ "ldp x20, x19, [x9, #0x30]\n"
+ "fmax v28.8h, v28.8h, v19.8h\n"
+ "fmax v29.8h, v29.8h, v19.8h\n"
+ "fmax v30.8h, v30.8h, v19.8h\n"
"fmax v31.8h, v31.8h, v19.8h\n"
- "ldp x22, x20, [x13, #0x0]\n"
- "ldr q9, [x22, x26]\n"
+ "ldr q9, [x26, x16]\n"
+ "ldr q10, [x25, x16]\n"
+ "ldr q11, [x24, x16]\n"
+ "ldr q12, [x23, x16]\n"
+ "add x27, x27, #0x10\n"
+ "fmin v28.8h, v28.8h, v18.8h\n"
+ "ldr q13, [x22, x16]\n"
+ "ldr q14, [x21, x16]\n"
+ "fmin v29.8h, v29.8h, v18.8h\n"
+ "fmin v30.8h, v30.8h, v18.8h\n"
+ "ldr q15, [x20, x16]\n"
+ "ldr q16, [x19, x16]\n"
+ "add x16, x16, #0x10\n"
+ "cmp x16, x15, LSL #4\n"
"fmin v31.8h, v31.8h, v18.8h\n"
"add x28, x28, #0x10\n"
- "ldr q10, [x20, x26]\n"
- "ldp x21, x20, [x13, #0x10]\n"
- "str q28, [x12, x23]\n"
- "add x24, x24, #0xa0\n"
- "ldr q11, [x21, x26]\n"
- "ldr q12, [x20, x26]\n"
- "str q29, [x11, x23]\n"
- "ldp x22, x21, [x13, #0x20]\n"
- "ldr q13, [x22, x26]\n"
- "str q30, [x10, x23]\n"
- "ldr q14, [x21, x26]\n"
- "ldp x21, x20, [x13, #0x30]\n"
- "str q31, [x9, x23]\n"
- "ldr q15, [x21, x26]\n"
- "ldr q16, [x20, x26]\n"
- "add x26, x26, #0x10\n"
- "cmp x26, x25, LSL #4\n"
+ "str q28, [x13, x27]\n"
+ "ldr q17, [x14, #0x0]\n"
+ "str q29, [x12, x27]\n"
+ "ldr q0, [x14, #0x10]\n"
+ "ldr q1, [x14, #0x20]\n"
+ "str q30, [x11, x27]\n"
+ "ldr q2, [x14, #0x30]\n"
+ "ldr q3, [x14, #0x40]\n"
+ "str q31, [x10, x27]\n"
+ "ldr q4, [x14, #0x50]\n"
+ "ldr q5, [x14, #0x60]\n"
+ "ldr q6, [x14, #0x70]\n"
+ "ldr q7, [x14, #0x80]\n"
+ "ldr q8, [x14, #0x90]\n"
+ "add x14, x14, #0xa0\n"
"blt 1b\n"
"2:" // Channel tail
"mov v28.16b, v17.16b\n fmla v28.8h, v8.8h, v9.8h\n"
"mov v29.16b, v17.16b\n fmla v29.8h, v6.8h, v9.8h\n"
- "ldr x22, [x13, #0x40]\n"
- "ldr x20, [x13, #0x48]\n"
+ "ldr x26, [x9, #0x40]\n"
+ "ldr x25, [x9, #0x48]\n"
"fmla v28.8h, v0.8h, v10.8h\n"
"fmla v29.8h, v1.8h, v12.8h\n"
- "ldr q12, [x20, x28]\n"
- "ldr x21, [x13, #0x50]\n"
+ "ldr q12, [x25, x28]\n"
+ "ldr x24, [x9, #0x50]\n"
"fmla v28.8h, v1.8h, v11.8h\n"
- "ldr q11, [x22, x28]\n"
"fmla v29.8h, v2.8h, v13.8h\n"
- "ldr q13, [x21, x28]\n"
+ "ldr q11, [x26, x28]\n"
+ "ldr q13, [x24, x28]\n"
"fmla v28.8h, v3.8h, v14.8h\n"
"fmla v29.8h, v0.8h, v16.8h\n"
- "ldr x20, [x13, #0x58]\n"
- "ldr q14, [x20, x28]\n"
+ "ldr x23, [x9, #0x58]\n"
+ "ldr x19, [x9, #0x78]\n"
"fmla v28.8h, v4.8h, v15.8h\n"
"fmla v29.8h, v4.8h, v11.8h\n"
- "ldr x20, [x13, #0x78]\n"
- "ldr x22, [x13, #0x60]\n"
- "ldr q15, [x22, x28]\n"
+ "ldr q14, [x23, x28]\n"
+ "ldr x22, [x9, #0x60]\n"
"fmla v28.8h, v2.8h, v16.8h\n"
"fmla v29.8h, v5.8h, v12.8h\n"
- "ldr x22, [x13, #0x80]\n"
- "ldr q12, [x22, x28]\n"
+ "ldr x26, [x9, #0x80]\n"
+ "ldr q15, [x22, x28]\n"
"mov v30.16b, v17.16b\n fmla v30.8h, v2.8h, v9.8h\n"
"mov v31.16b, v17.16b\n fmla v31.8h, v0.8h, v9.8h\n"
- "ldr x21, [x13, #0x68]\n"
- "ldr q11, [x21, x28]\n"
+ "ldr q12, [x26, x28]\n"
+ "ldr x21, [x9, #0x68]\n"
"fmla v28.8h, v5.8h, v13.8h\n"
"fmla v29.8h, v3.8h, v13.8h\n"
- "ldr q13, [x20, x28]\n"
+ "ldr q13, [x19, x28]\n"
+ "ldr x25, [x9, #0x88]\n"
"fmla v30.8h, v3.8h, v14.8h\n"
"fmla v31.8h, v4.8h, v13.8h\n"
- "ldr x20, [x13, #0x88]\n"
- "ldr q14, [x20, x28]\n"
+ "ldr q11, [x21, x28]\n"
+ "ldr q14, [x25, x28]\n"
"fmla v30.8h, v0.8h, v15.8h\n"
"fmla v31.8h, v1.8h, v12.8h\n"
- "ldr x21, [x13, #0x70]\n"
- "ldr q16, [x21, x28]\n"
- "ldr x20, [x13, #0x98]\n"
+ "ldr x20, [x9, #0x70]\n"
+ "ldr x23, [x9, #0x98]\n"
"fmla v30.8h, v4.8h, v11.8h\n"
- "ldr q11, [x20, x28]\n"
"fmla v31.8h, v5.8h, v14.8h\n"
+ "ldr q16, [x20, x28]\n"
+ "ldr q11, [x23, x28]\n"
"fmla v28.8h, v6.8h, v15.8h\n"
- "ldr x21, [x13, #0x90]\n"
- "ldr q15, [x21, x28]\n"
+ "ldr x24, [x9, #0x90]\n"
+ "ldr x21, [x9, #0xa8]\n"
"fmla v30.8h, v1.8h, v16.8h\n"
- "ldr x21, [x13, #0xa8]\n"
"fmla v31.8h, v2.8h, v11.8h\n"
"fmla v28.8h, v7.8h, v16.8h\n"
+ "ldr q15, [x24, x28]\n"
"ldr q16, [x21, x28]\n"
- "ldr x22, [x13, #0xa0]\n"
- "ldr q13, [x22, x28]\n"
+ "ldr x22, [x9, #0xa0]\n"
+ "ldr x20, [x9, #0xb0]\n"
"fmla v30.8h, v6.8h, v15.8h\n"
"fmla v31.8h, v3.8h, v16.8h\n"
- "ldr x21, [x13, #0xb0]\n"
- "ldr q14, [x21, x28]\n"
+ "ldr q13, [x22, x28]\n"
+ "ldr q14, [x20, x28]\n"
"fmla v30.8h, v7.8h, v13.8h\n"
"fmla v31.8h, v7.8h, v14.8h\n"
- "ldr x20, [x13, #0xb8]\n"
- "ldr q15, [x20, x28]\n"
+ "ldr x19, [x9, #0xb8]\n"
"fmla v29.8h, v7.8h, v12.8h\n"
+ "ldr q15, [x19, x28]\n"
"fmla v30.8h, v5.8h, v16.8h\n"
- "ldr x22, [x13, #0xc0]\n"
+ "ldr x26, [x9, #0xc0]\n"
"fmla v31.8h, v6.8h, v15.8h\n"
"fmla v29.8h, v8.8h, v11.8h\n"
- "ldr q11, [x22, x28]\n"
+ "ldr q11, [x26, x28]\n"
"fmla v30.8h, v8.8h, v15.8h\n"
"fmla v31.8h, v8.8h, v11.8h\n"
"fmax v28.8h, v28.8h, v19.8h\n"
- "add x23, x23, #0x10\n"
+ "add x27, x27, #0x10\n"
"fmax v29.8h, v29.8h, v19.8h\n"
"fmax v30.8h, v30.8h, v19.8h\n"
"add x28, x28, #0x10\n"
"fmax v31.8h, v31.8h, v19.8h\n"
"fmin v28.8h, v28.8h, v18.8h\n"
- "str q28, [x12, x23]\n"
+ "str q28, [x13, x27]\n"
"fmin v29.8h, v29.8h, v18.8h\n"
"fmin v30.8h, v30.8h, v18.8h\n"
- "str q29, [x11, x23]\n"
+ "str q29, [x12, x27]\n"
"fmin v31.8h, v31.8h, v18.8h\n"
- "str q30, [x10, x23]\n"
- "str q31, [x9, x23]\n"
+ "str q30, [x11, x27]\n"
+ "str q31, [x10, x27]\n"
"3:" // Oddments
"tst %x[n_channels], #0x7\n"
"beq 80f\n"
- "ldr q17, [x24, #0x0]\n"
- "ldr q0, [x24, #0x10]\n"
- "mov x23, x28\n"
- "add x12, x12, x23\n"
- "ldr q1, [x24, #0x20]\n"
- "ldr q2, [x24, #0x30]\n"
- "add x11, x11, x23\n"
- "add x10, x10, x23\n"
- "ldr q3, [x24, #0x40]\n"
- "ldr q4, [x24, #0x50]\n"
- "add x9, x9, x23\n"
- "ldr q5, [x24, #0x60]\n"
- "ldr q6, [x24, #0x70]\n"
- "ldr q7, [x24, #0x80]\n"
- "ldr q8, [x24, #0x90]\n"
- "ldr x27, [x13, #0x0]\n"
- "ldr x26, [x13, #0x8]\n"
- "add x27, x27, x28\n"
+ "mov x27, x28\n"
+ "ldr x26, [x9, #0x0]\n"
+ "ldr x25, [x9, #0x8]\n"
+ "ldr x24, [x9, #0x10]\n"
+ "add x13, x13, x27\n"
+ "add x12, x12, x27\n"
+ "ldr x23, [x9, #0x18]\n"
+ "ldr x22, [x9, #0x20]\n"
+ "add x11, x11, x27\n"
+ "add x10, x10, x27\n"
+ "ldr x21, [x9, #0x28]\n"
+ "ldr x20, [x9, #0x30]\n"
"add x26, x26, x28\n"
- "ldr x25, [x13, #0x10]\n"
- "ldr x24, [x13, #0x18]\n"
"add x25, x25, x28\n"
+ "ldr x19, [x9, #0x38]\n"
+ "ldr q17, [x14, #0x0]\n"
"add x24, x24, x28\n"
- "ldr x23, [x13, #0x20]\n"
- "ldr x22, [x13, #0x28]\n"
"add x23, x23, x28\n"
+ "ldr q0, [x14, #0x10]\n"
+ "ldr q1, [x14, #0x20]\n"
"add x22, x22, x28\n"
- "ldr x21, [x13, #0x30]\n"
- "ldr x20, [x13, #0x38]\n"
"add x21, x21, x28\n"
+ "ldr q2, [x14, #0x30]\n"
+ "ldr q3, [x14, #0x40]\n"
"add x20, x20, x28\n"
+ "add x19, x19, x28\n"
+ "ldr q4, [x14, #0x50]\n"
+ "ldr q5, [x14, #0x60]\n"
+ "ldr q6, [x14, #0x70]\n"
+ "ldr q7, [x14, #0x80]\n"
+ "ldr q8, [x14, #0x90]\n"
"tbz %x[n_channels], #2, 5f\n"
- "ld1 { v9.d }[0], [x27], #0x8\n"
- "ld1 { v10.d }[0], [x26], #0x8\n"
- "ld1 { v11.d }[0], [x25], #0x8\n"
- "ld1 { v12.d }[0], [x24], #0x8\n"
- "ld1 { v13.d }[0], [x23], #0x8\n"
- "ld1 { v14.d }[0], [x22], #0x8\n"
- "ld1 { v15.d }[0], [x21], #0x8\n"
- "ld1 { v16.d }[0], [x20], #0x8\n"
+ "ld1 { v9.d }[0], [x26], #0x8\n"
+ "ld1 { v10.d }[0], [x25], #0x8\n"
+ "ld1 { v11.d }[0], [x24], #0x8\n"
+ "ld1 { v12.d }[0], [x23], #0x8\n"
+ "ld1 { v13.d }[0], [x22], #0x8\n"
+ "ld1 { v14.d }[0], [x21], #0x8\n"
+ "ld1 { v15.d }[0], [x20], #0x8\n"
+ "ld1 { v16.d }[0], [x19], #0x8\n"
"tbz %x[n_channels], #1, 4f\n"
- "ld1 { v9.s }[2], [x27], #0x4\n"
- "ld1 { v10.s }[2], [x26], #0x4\n"
- "ld1 { v11.s }[2], [x25], #0x4\n"
- "ld1 { v12.s }[2], [x24], #0x4\n"
- "ld1 { v13.s }[2], [x23], #0x4\n"
- "ld1 { v14.s }[2], [x22], #0x4\n"
- "ld1 { v15.s }[2], [x21], #0x4\n"
- "ld1 { v16.s }[2], [x20], #0x4\n"
+ "ld1 { v9.s }[2], [x26], #0x4\n"
+ "ld1 { v10.s }[2], [x25], #0x4\n"
+ "ld1 { v11.s }[2], [x24], #0x4\n"
+ "ld1 { v12.s }[2], [x23], #0x4\n"
+ "ld1 { v13.s }[2], [x22], #0x4\n"
+ "ld1 { v14.s }[2], [x21], #0x4\n"
+ "ld1 { v15.s }[2], [x20], #0x4\n"
+ "ld1 { v16.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 7f\n"
- "ld1 { v9.h }[6], [x27], #0x2\n"
- "ld1 { v10.h }[6], [x26], #0x2\n"
- "ld1 { v11.h }[6], [x25], #0x2\n"
- "ld1 { v12.h }[6], [x24], #0x2\n"
- "ld1 { v13.h }[6], [x23], #0x2\n"
- "ld1 { v14.h }[6], [x22], #0x2\n"
- "ld1 { v15.h }[6], [x21], #0x2\n"
- "ld1 { v16.h }[6], [x20], #0x2\n"
+ "ld1 { v9.h }[6], [x26], #0x2\n"
+ "ld1 { v10.h }[6], [x25], #0x2\n"
+ "ld1 { v11.h }[6], [x24], #0x2\n"
+ "ld1 { v12.h }[6], [x23], #0x2\n"
+ "ld1 { v13.h }[6], [x22], #0x2\n"
+ "ld1 { v14.h }[6], [x21], #0x2\n"
+ "ld1 { v15.h }[6], [x20], #0x2\n"
+ "ld1 { v16.h }[6], [x19], #0x2\n"
"b 7f\n"
"4:" // Oddments: Load inputs (2, 2), (0, 0), (0, 1), (0, 3), (0, 4), (1, 0), (1, 1), (0, 2): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 7f\n"
- "ld1 { v9.h }[4], [x27], #0x2\n"
- "ld1 { v10.h }[4], [x26], #0x2\n"
- "ld1 { v11.h }[4], [x25], #0x2\n"
- "ld1 { v12.h }[4], [x24], #0x2\n"
- "ld1 { v13.h }[4], [x23], #0x2\n"
- "ld1 { v14.h }[4], [x22], #0x2\n"
- "ld1 { v15.h }[4], [x21], #0x2\n"
- "ld1 { v16.h }[4], [x20], #0x2\n"
+ "ld1 { v9.h }[4], [x26], #0x2\n"
+ "ld1 { v10.h }[4], [x25], #0x2\n"
+ "ld1 { v11.h }[4], [x24], #0x2\n"
+ "ld1 { v12.h }[4], [x23], #0x2\n"
+ "ld1 { v13.h }[4], [x22], #0x2\n"
+ "ld1 { v14.h }[4], [x21], #0x2\n"
+ "ld1 { v15.h }[4], [x20], #0x2\n"
+ "ld1 { v16.h }[4], [x19], #0x2\n"
"b 7f\n"
"5:" // Oddments: Load inputs (2, 2), (0, 0), (0, 1), (0, 3), (0, 4), (1, 0), (1, 1), (0, 2): Bit 2: Unset
"tbz %x[n_channels], #1, 6f\n"
- "ld1 { v9.s }[0], [x27], #0x4\n"
- "ld1 { v10.s }[0], [x26], #0x4\n"
- "ld1 { v11.s }[0], [x25], #0x4\n"
- "ld1 { v12.s }[0], [x24], #0x4\n"
- "ld1 { v13.s }[0], [x23], #0x4\n"
- "ld1 { v14.s }[0], [x22], #0x4\n"
- "ld1 { v15.s }[0], [x21], #0x4\n"
- "ld1 { v16.s }[0], [x20], #0x4\n"
+ "ld1 { v9.s }[0], [x26], #0x4\n"
+ "ld1 { v10.s }[0], [x25], #0x4\n"
+ "ld1 { v11.s }[0], [x24], #0x4\n"
+ "ld1 { v12.s }[0], [x23], #0x4\n"
+ "ld1 { v13.s }[0], [x22], #0x4\n"
+ "ld1 { v14.s }[0], [x21], #0x4\n"
+ "ld1 { v15.s }[0], [x20], #0x4\n"
+ "ld1 { v16.s }[0], [x19], #0x4\n"
"tbz %x[n_channels], #0, 7f\n"
- "ld1 { v9.h }[2], [x27], #0x2\n"
- "ld1 { v10.h }[2], [x26], #0x2\n"
- "ld1 { v11.h }[2], [x25], #0x2\n"
- "ld1 { v12.h }[2], [x24], #0x2\n"
- "ld1 { v13.h }[2], [x23], #0x2\n"
- "ld1 { v14.h }[2], [x22], #0x2\n"
- "ld1 { v15.h }[2], [x21], #0x2\n"
- "ld1 { v16.h }[2], [x20], #0x2\n"
+ "ld1 { v9.h }[2], [x26], #0x2\n"
+ "ld1 { v10.h }[2], [x25], #0x2\n"
+ "ld1 { v11.h }[2], [x24], #0x2\n"
+ "ld1 { v12.h }[2], [x23], #0x2\n"
+ "ld1 { v13.h }[2], [x22], #0x2\n"
+ "ld1 { v14.h }[2], [x21], #0x2\n"
+ "ld1 { v15.h }[2], [x20], #0x2\n"
+ "ld1 { v16.h }[2], [x19], #0x2\n"
"b 7f\n"
"6:" // Oddments: Load inputs (2, 2), (0, 0), (0, 1), (0, 3), (0, 4), (1, 0), (1, 1), (0, 2): Bit 2: Unset: Bit 1: Unset
- "ld1 { v9.h }[0], [x27], #0x2\n"
- "ld1 { v10.h }[0], [x26], #0x2\n"
- "ld1 { v11.h }[0], [x25], #0x2\n"
- "ld1 { v12.h }[0], [x24], #0x2\n"
- "ld1 { v13.h }[0], [x23], #0x2\n"
- "ld1 { v14.h }[0], [x22], #0x2\n"
- "ld1 { v15.h }[0], [x21], #0x2\n"
- "ld1 { v16.h }[0], [x20], #0x2\n"
+ "ld1 { v9.h }[0], [x26], #0x2\n"
+ "ld1 { v10.h }[0], [x25], #0x2\n"
+ "ld1 { v11.h }[0], [x24], #0x2\n"
+ "ld1 { v12.h }[0], [x23], #0x2\n"
+ "ld1 { v13.h }[0], [x22], #0x2\n"
+ "ld1 { v14.h }[0], [x21], #0x2\n"
+ "ld1 { v15.h }[0], [x20], #0x2\n"
+ "ld1 { v16.h }[0], [x19], #0x2\n"
"7:" // Oddments: Load inputs (2, 2), (0, 0), (0, 1), (0, 3), (0, 4), (1, 0), (1, 1), (0, 2): Bit 2: End
"mov v28.16b, v17.16b\n fmla v28.8h, v8.8h, v9.8h\n"
"fmla v28.8h, v0.8h, v10.8h\n"
- "ldr x20, [x13, #0x40]\n"
- "add x20, x20, x28\n"
+ "ldr x26, [x9, #0x40]\n"
+ "add x26, x26, x28\n"
"mov v29.16b, v17.16b\n fmla v29.8h, v6.8h, v9.8h\n"
"fmla v28.8h, v1.8h, v11.8h\n"
"fmla v29.8h, v1.8h, v12.8h\n"
@@ -440,143 +440,143 @@ void a64_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst_indirect_impl(
"fmla v28.8h, v2.8h, v16.8h\n"
"fmla v29.8h, v0.8h, v16.8h\n"
"tbz %x[n_channels], #2, 9f\n"
- "ld1 { v11.d }[0], [x20], #0x8\n"
+ "ld1 { v11.d }[0], [x26], #0x8\n"
"tbz %x[n_channels], #1, 8f\n"
- "ld1 { v11.s }[2], [x20], #0x4\n"
+ "ld1 { v11.s }[2], [x26], #0x4\n"
"tbz %x[n_channels], #0, 11f\n"
- "ld1 { v11.h }[6], [x20], #0x2\n"
+ "ld1 { v11.h }[6], [x26], #0x2\n"
"b 11f\n"
"8:" // Oddments: Load input (1, 3): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 11f\n"
- "ld1 { v11.h }[4], [x20], #0x2\n"
+ "ld1 { v11.h }[4], [x26], #0x2\n"
"b 11f\n"
"9:" // Oddments: Load input (1, 3): Bit 2: Unset
"tbz %x[n_channels], #1, 10f\n"
- "ld1 { v11.s }[0], [x20], #0x4\n"
+ "ld1 { v11.s }[0], [x26], #0x4\n"
"tbz %x[n_channels], #0, 11f\n"
- "ld1 { v11.h }[2], [x20], #0x2\n"
+ "ld1 { v11.h }[2], [x26], #0x2\n"
"b 11f\n"
"10:" // Oddments: Load input (1, 3): Bit 2: Unset: Bit 1: Unset
- "ld1 { v11.h }[0], [x20], #0x2\n"
+ "ld1 { v11.h }[0], [x26], #0x2\n"
"11:" // Oddments: Load input (1, 3): Bit 2: End
- "ldr x20, [x13, #0x48]\n"
+ "ldr x25, [x9, #0x48]\n"
"fmla v29.8h, v4.8h, v11.8h\n"
- "add x20, x20, x28\n"
+ "add x25, x25, x28\n"
"tbz %x[n_channels], #2, 13f\n"
- "ld1 { v12.d }[0], [x20], #0x8\n"
+ "ld1 { v12.d }[0], [x25], #0x8\n"
"tbz %x[n_channels], #1, 12f\n"
- "ld1 { v12.s }[2], [x20], #0x4\n"
+ "ld1 { v12.s }[2], [x25], #0x4\n"
"tbz %x[n_channels], #0, 15f\n"
- "ld1 { v12.h }[6], [x20], #0x2\n"
+ "ld1 { v12.h }[6], [x25], #0x2\n"
"b 15f\n"
"12:" // Oddments: Load input (1, 4): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 15f\n"
- "ld1 { v12.h }[4], [x20], #0x2\n"
+ "ld1 { v12.h }[4], [x25], #0x2\n"
"b 15f\n"
"13:" // Oddments: Load input (1, 4): Bit 2: Unset
"tbz %x[n_channels], #1, 14f\n"
- "ld1 { v12.s }[0], [x20], #0x4\n"
+ "ld1 { v12.s }[0], [x25], #0x4\n"
"tbz %x[n_channels], #0, 15f\n"
- "ld1 { v12.h }[2], [x20], #0x2\n"
+ "ld1 { v12.h }[2], [x25], #0x2\n"
"b 15f\n"
"14:" // Oddments: Load input (1, 4): Bit 2: Unset: Bit 1: Unset
- "ld1 { v12.h }[0], [x20], #0x2\n"
+ "ld1 { v12.h }[0], [x25], #0x2\n"
"15:" // Oddments: Load input (1, 4): Bit 2: End
- "ldr x20, [x13, #0x50]\n"
+ "ldr x24, [x9, #0x50]\n"
"fmla v29.8h, v5.8h, v12.8h\n"
- "add x20, x20, x28\n"
+ "add x24, x24, x28\n"
"tbz %x[n_channels], #2, 17f\n"
- "ld1 { v13.d }[0], [x20], #0x8\n"
+ "ld1 { v13.d }[0], [x24], #0x8\n"
"tbz %x[n_channels], #1, 16f\n"
- "ld1 { v13.s }[2], [x20], #0x4\n"
+ "ld1 { v13.s }[2], [x24], #0x4\n"
"tbz %x[n_channels], #0, 19f\n"
- "ld1 { v13.h }[6], [x20], #0x2\n"
+ "ld1 { v13.h }[6], [x24], #0x2\n"
"b 19f\n"
"16:" // Oddments: Load input (1, 2): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 19f\n"
- "ld1 { v13.h }[4], [x20], #0x2\n"
+ "ld1 { v13.h }[4], [x24], #0x2\n"
"b 19f\n"
"17:" // Oddments: Load input (1, 2): Bit 2: Unset
"tbz %x[n_channels], #1, 18f\n"
- "ld1 { v13.s }[0], [x20], #0x4\n"
+ "ld1 { v13.s }[0], [x24], #0x4\n"
"tbz %x[n_channels], #0, 19f\n"
- "ld1 { v13.h }[2], [x20], #0x2\n"
+ "ld1 { v13.h }[2], [x24], #0x2\n"
"b 19f\n"
"18:" // Oddments: Load input (1, 2): Bit 2: Unset: Bit 1: Unset
- "ld1 { v13.h }[0], [x20], #0x2\n"
+ "ld1 { v13.h }[0], [x24], #0x2\n"
"19:" // Oddments: Load input (1, 2): Bit 2: End
- "ldr x20, [x13, #0x58]\n"
+ "ldr x23, [x9, #0x58]\n"
"fmla v28.8h, v5.8h, v13.8h\n"
"fmla v29.8h, v3.8h, v13.8h\n"
- "add x20, x20, x28\n"
+ "add x23, x23, x28\n"
"tbz %x[n_channels], #2, 21f\n"
- "ld1 { v14.d }[0], [x20], #0x8\n"
+ "ld1 { v14.d }[0], [x23], #0x8\n"
"tbz %x[n_channels], #1, 20f\n"
- "ld1 { v14.s }[2], [x20], #0x4\n"
+ "ld1 { v14.s }[2], [x23], #0x4\n"
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v14.h }[6], [x20], #0x2\n"
+ "ld1 { v14.h }[6], [x23], #0x2\n"
"b 23f\n"
"20:" // Oddments: Load input (3, 0): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v14.h }[4], [x20], #0x2\n"
+ "ld1 { v14.h }[4], [x23], #0x2\n"
"b 23f\n"
"21:" // Oddments: Load input (3, 0): Bit 2: Unset
"tbz %x[n_channels], #1, 22f\n"
- "ld1 { v14.s }[0], [x20], #0x4\n"
+ "ld1 { v14.s }[0], [x23], #0x4\n"
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v14.h }[2], [x20], #0x2\n"
+ "ld1 { v14.h }[2], [x23], #0x2\n"
"b 23f\n"
"22:" // Oddments: Load input (3, 0): Bit 2: Unset: Bit 1: Unset
- "ld1 { v14.h }[0], [x20], #0x2\n"
+ "ld1 { v14.h }[0], [x23], #0x2\n"
"23:" // Oddments: Load input (3, 0): Bit 2: End
- "ldr x20, [x13, #0x60]\n"
+ "ldr x22, [x9, #0x60]\n"
"fmla v30.8h, v3.8h, v14.8h\n"
- "add x20, x20, x28\n"
+ "add x22, x22, x28\n"
"tbz %x[n_channels], #2, 25f\n"
- "ld1 { v15.d }[0], [x20], #0x8\n"
+ "ld1 { v15.d }[0], [x22], #0x8\n"
"tbz %x[n_channels], #1, 24f\n"
- "ld1 { v15.s }[2], [x20], #0x4\n"
+ "ld1 { v15.s }[2], [x22], #0x4\n"
"tbz %x[n_channels], #0, 27f\n"
- "ld1 { v15.h }[6], [x20], #0x2\n"
+ "ld1 { v15.h }[6], [x22], #0x2\n"
"b 27f\n"
"24:" // Oddments: Load input (2, 0): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 27f\n"
- "ld1 { v15.h }[4], [x20], #0x2\n"
+ "ld1 { v15.h }[4], [x22], #0x2\n"
"b 27f\n"
"25:" // Oddments: Load input (2, 0): Bit 2: Unset
"tbz %x[n_channels], #1, 26f\n"
- "ld1 { v15.s }[0], [x20], #0x4\n"
+ "ld1 { v15.s }[0], [x22], #0x4\n"
"tbz %x[n_channels], #0, 27f\n"
- "ld1 { v15.h }[2], [x20], #0x2\n"
+ "ld1 { v15.h }[2], [x22], #0x2\n"
"b 27f\n"
"26:" // Oddments: Load input (2, 0): Bit 2: Unset: Bit 1: Unset
- "ld1 { v15.h }[0], [x20], #0x2\n"
+ "ld1 { v15.h }[0], [x22], #0x2\n"
"27:" // Oddments: Load input (2, 0): Bit 2: End
- "ldr x20, [x13, #0x68]\n"
+ "ldr x21, [x9, #0x68]\n"
"fmla v28.8h, v6.8h, v15.8h\n"
"fmla v30.8h, v0.8h, v15.8h\n"
- "add x20, x20, x28\n"
+ "add x21, x21, x28\n"
"tbz %x[n_channels], #2, 29f\n"
- "ld1 { v11.d }[0], [x20], #0x8\n"
+ "ld1 { v11.d }[0], [x21], #0x8\n"
"tbz %x[n_channels], #1, 28f\n"
- "ld1 { v11.s }[2], [x20], #0x4\n"
+ "ld1 { v11.s }[2], [x21], #0x4\n"
"tbz %x[n_channels], #0, 31f\n"
- "ld1 { v11.h }[6], [x20], #0x2\n"
+ "ld1 { v11.h }[6], [x21], #0x2\n"
"b 31f\n"
"28:" // Oddments: Load input (3, 1): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 31f\n"
- "ld1 { v11.h }[4], [x20], #0x2\n"
+ "ld1 { v11.h }[4], [x21], #0x2\n"
"b 31f\n"
"29:" // Oddments: Load input (3, 1): Bit 2: Unset
"tbz %x[n_channels], #1, 30f\n"
- "ld1 { v11.s }[0], [x20], #0x4\n"
+ "ld1 { v11.s }[0], [x21], #0x4\n"
"tbz %x[n_channels], #0, 31f\n"
- "ld1 { v11.h }[2], [x20], #0x2\n"
+ "ld1 { v11.h }[2], [x21], #0x2\n"
"b 31f\n"
"30:" // Oddments: Load input (3, 1): Bit 2: Unset: Bit 1: Unset
- "ld1 { v11.h }[0], [x20], #0x2\n"
+ "ld1 { v11.h }[0], [x21], #0x2\n"
"31:" // Oddments: Load input (3, 1): Bit 2: End
- "ldr x20, [x13, #0x70]\n"
+ "ldr x20, [x9, #0x70]\n"
"fmla v30.8h, v4.8h, v11.8h\n"
"add x20, x20, x28\n"
"tbz %x[n_channels], #2, 33f\n"
@@ -599,171 +599,171 @@ void a64_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst_indirect_impl(
"34:" // Oddments: Load input (2, 1): Bit 2: Unset: Bit 1: Unset
"ld1 { v16.h }[0], [x20], #0x2\n"
"35:" // Oddments: Load input (2, 1): Bit 2: End
- "ldr x20, [x13, #0x78]\n"
+ "ldr x19, [x9, #0x78]\n"
"fmla v28.8h, v7.8h, v16.8h\n"
"fmla v30.8h, v1.8h, v16.8h\n"
- "add x20, x20, x28\n"
+ "add x19, x19, x28\n"
"tbz %x[n_channels], #2, 37f\n"
- "ld1 { v13.d }[0], [x20], #0x8\n"
+ "ld1 { v13.d }[0], [x19], #0x8\n"
"tbz %x[n_channels], #1, 36f\n"
- "ld1 { v13.s }[2], [x20], #0x4\n"
+ "ld1 { v13.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 39f\n"
- "ld1 { v13.h }[6], [x20], #0x2\n"
+ "ld1 { v13.h }[6], [x19], #0x2\n"
"b 39f\n"
"36:" // Oddments: Load input (3, 3): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 39f\n"
- "ld1 { v13.h }[4], [x20], #0x2\n"
+ "ld1 { v13.h }[4], [x19], #0x2\n"
"b 39f\n"
"37:" // Oddments: Load input (3, 3): Bit 2: Unset
"tbz %x[n_channels], #1, 38f\n"
- "ld1 { v13.s }[0], [x20], #0x4\n"
+ "ld1 { v13.s }[0], [x19], #0x4\n"
"tbz %x[n_channels], #0, 39f\n"
- "ld1 { v13.h }[2], [x20], #0x2\n"
+ "ld1 { v13.h }[2], [x19], #0x2\n"
"b 39f\n"
"38:" // Oddments: Load input (3, 3): Bit 2: Unset: Bit 1: Unset
- "ld1 { v13.h }[0], [x20], #0x2\n"
+ "ld1 { v13.h }[0], [x19], #0x2\n"
"39:" // Oddments: Load input (3, 3): Bit 2: End
- "ldr x20, [x13, #0x80]\n"
+ "ldr x26, [x9, #0x80]\n"
"fmla v31.8h, v4.8h, v13.8h\n"
- "add x20, x20, x28\n"
+ "add x26, x26, x28\n"
"tbz %x[n_channels], #2, 41f\n"
- "ld1 { v12.d }[0], [x20], #0x8\n"
+ "ld1 { v12.d }[0], [x26], #0x8\n"
"tbz %x[n_channels], #1, 40f\n"
- "ld1 { v12.s }[2], [x20], #0x4\n"
+ "ld1 { v12.s }[2], [x26], #0x4\n"
"tbz %x[n_channels], #0, 43f\n"
- "ld1 { v12.h }[6], [x20], #0x2\n"
+ "ld1 { v12.h }[6], [x26], #0x2\n"
"b 43f\n"
"40:" // Oddments: Load input (2, 3): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 43f\n"
- "ld1 { v12.h }[4], [x20], #0x2\n"
+ "ld1 { v12.h }[4], [x26], #0x2\n"
"b 43f\n"
"41:" // Oddments: Load input (2, 3): Bit 2: Unset
"tbz %x[n_channels], #1, 42f\n"
- "ld1 { v12.s }[0], [x20], #0x4\n"
+ "ld1 { v12.s }[0], [x26], #0x4\n"
"tbz %x[n_channels], #0, 43f\n"
- "ld1 { v12.h }[2], [x20], #0x2\n"
+ "ld1 { v12.h }[2], [x26], #0x2\n"
"b 43f\n"
"42:" // Oddments: Load input (2, 3): Bit 2: Unset: Bit 1: Unset
- "ld1 { v12.h }[0], [x20], #0x2\n"
+ "ld1 { v12.h }[0], [x26], #0x2\n"
"43:" // Oddments: Load input (2, 3): Bit 2: End
- "ldr x20, [x13, #0x88]\n"
+ "ldr x25, [x9, #0x88]\n"
"fmla v29.8h, v7.8h, v12.8h\n"
"fmla v31.8h, v1.8h, v12.8h\n"
- "add x20, x20, x28\n"
+ "add x25, x25, x28\n"
"tbz %x[n_channels], #2, 45f\n"
- "ld1 { v14.d }[0], [x20], #0x8\n"
+ "ld1 { v14.d }[0], [x25], #0x8\n"
"tbz %x[n_channels], #1, 44f\n"
- "ld1 { v14.s }[2], [x20], #0x4\n"
+ "ld1 { v14.s }[2], [x25], #0x4\n"
"tbz %x[n_channels], #0, 47f\n"
- "ld1 { v14.h }[6], [x20], #0x2\n"
+ "ld1 { v14.h }[6], [x25], #0x2\n"
"b 47f\n"
"44:" // Oddments: Load input (3, 4): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 47f\n"
- "ld1 { v14.h }[4], [x20], #0x2\n"
+ "ld1 { v14.h }[4], [x25], #0x2\n"
"b 47f\n"
"45:" // Oddments: Load input (3, 4): Bit 2: Unset
"tbz %x[n_channels], #1, 46f\n"
- "ld1 { v14.s }[0], [x20], #0x4\n"
+ "ld1 { v14.s }[0], [x25], #0x4\n"
"tbz %x[n_channels], #0, 47f\n"
- "ld1 { v14.h }[2], [x20], #0x2\n"
+ "ld1 { v14.h }[2], [x25], #0x2\n"
"b 47f\n"
"46:" // Oddments: Load input (3, 4): Bit 2: Unset: Bit 1: Unset
- "ld1 { v14.h }[0], [x20], #0x2\n"
+ "ld1 { v14.h }[0], [x25], #0x2\n"
"47:" // Oddments: Load input (3, 4): Bit 2: End
- "ldr x20, [x13, #0x90]\n"
+ "ldr x24, [x9, #0x90]\n"
"fmla v31.8h, v5.8h, v14.8h\n"
- "add x20, x20, x28\n"
+ "add x24, x24, x28\n"
"tbz %x[n_channels], #2, 49f\n"
- "ld1 { v15.d }[0], [x20], #0x8\n"
+ "ld1 { v15.d }[0], [x24], #0x8\n"
"tbz %x[n_channels], #1, 48f\n"
- "ld1 { v15.s }[2], [x20], #0x4\n"
+ "ld1 { v15.s }[2], [x24], #0x4\n"
"tbz %x[n_channels], #0, 51f\n"
- "ld1 { v15.h }[6], [x20], #0x2\n"
+ "ld1 { v15.h }[6], [x24], #0x2\n"
"b 51f\n"
"48:" // Oddments: Load input (4, 0): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 51f\n"
- "ld1 { v15.h }[4], [x20], #0x2\n"
+ "ld1 { v15.h }[4], [x24], #0x2\n"
"b 51f\n"
"49:" // Oddments: Load input (4, 0): Bit 2: Unset
"tbz %x[n_channels], #1, 50f\n"
- "ld1 { v15.s }[0], [x20], #0x4\n"
+ "ld1 { v15.s }[0], [x24], #0x4\n"
"tbz %x[n_channels], #0, 51f\n"
- "ld1 { v15.h }[2], [x20], #0x2\n"
+ "ld1 { v15.h }[2], [x24], #0x2\n"
"b 51f\n"
"50:" // Oddments: Load input (4, 0): Bit 2: Unset: Bit 1: Unset
- "ld1 { v15.h }[0], [x20], #0x2\n"
+ "ld1 { v15.h }[0], [x24], #0x2\n"
"51:" // Oddments: Load input (4, 0): Bit 2: End
- "ldr x20, [x13, #0x98]\n"
+ "ldr x23, [x9, #0x98]\n"
"fmla v30.8h, v6.8h, v15.8h\n"
- "add x20, x20, x28\n"
+ "add x23, x23, x28\n"
"tbz %x[n_channels], #2, 53f\n"
- "ld1 { v11.d }[0], [x20], #0x8\n"
+ "ld1 { v11.d }[0], [x23], #0x8\n"
"tbz %x[n_channels], #1, 52f\n"
- "ld1 { v11.s }[2], [x20], #0x4\n"
+ "ld1 { v11.s }[2], [x23], #0x4\n"
"tbz %x[n_channels], #0, 55f\n"
- "ld1 { v11.h }[6], [x20], #0x2\n"
+ "ld1 { v11.h }[6], [x23], #0x2\n"
"b 55f\n"
"52:" // Oddments: Load input (2, 4): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 55f\n"
- "ld1 { v11.h }[4], [x20], #0x2\n"
+ "ld1 { v11.h }[4], [x23], #0x2\n"
"b 55f\n"
"53:" // Oddments: Load input (2, 4): Bit 2: Unset
"tbz %x[n_channels], #1, 54f\n"
- "ld1 { v11.s }[0], [x20], #0x4\n"
+ "ld1 { v11.s }[0], [x23], #0x4\n"
"tbz %x[n_channels], #0, 55f\n"
- "ld1 { v11.h }[2], [x20], #0x2\n"
+ "ld1 { v11.h }[2], [x23], #0x2\n"
"b 55f\n"
"54:" // Oddments: Load input (2, 4): Bit 2: Unset: Bit 1: Unset
- "ld1 { v11.h }[0], [x20], #0x2\n"
+ "ld1 { v11.h }[0], [x23], #0x2\n"
"55:" // Oddments: Load input (2, 4): Bit 2: End
- "ldr x20, [x13, #0xa0]\n"
+ "ldr x22, [x9, #0xa0]\n"
"fmla v29.8h, v8.8h, v11.8h\n"
"fmla v31.8h, v2.8h, v11.8h\n"
- "add x20, x20, x28\n"
+ "add x22, x22, x28\n"
"tbz %x[n_channels], #2, 57f\n"
- "ld1 { v13.d }[0], [x20], #0x8\n"
+ "ld1 { v13.d }[0], [x22], #0x8\n"
"tbz %x[n_channels], #1, 56f\n"
- "ld1 { v13.s }[2], [x20], #0x4\n"
+ "ld1 { v13.s }[2], [x22], #0x4\n"
"tbz %x[n_channels], #0, 59f\n"
- "ld1 { v13.h }[6], [x20], #0x2\n"
+ "ld1 { v13.h }[6], [x22], #0x2\n"
"b 59f\n"
"56:" // Oddments: Load input (4, 1): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 59f\n"
- "ld1 { v13.h }[4], [x20], #0x2\n"
+ "ld1 { v13.h }[4], [x22], #0x2\n"
"b 59f\n"
"57:" // Oddments: Load input (4, 1): Bit 2: Unset
"tbz %x[n_channels], #1, 58f\n"
- "ld1 { v13.s }[0], [x20], #0x4\n"
+ "ld1 { v13.s }[0], [x22], #0x4\n"
"tbz %x[n_channels], #0, 59f\n"
- "ld1 { v13.h }[2], [x20], #0x2\n"
+ "ld1 { v13.h }[2], [x22], #0x2\n"
"b 59f\n"
"58:" // Oddments: Load input (4, 1): Bit 2: Unset: Bit 1: Unset
- "ld1 { v13.h }[0], [x20], #0x2\n"
+ "ld1 { v13.h }[0], [x22], #0x2\n"
"59:" // Oddments: Load input (4, 1): Bit 2: End
- "ldr x20, [x13, #0xa8]\n"
+ "ldr x21, [x9, #0xa8]\n"
"fmla v30.8h, v7.8h, v13.8h\n"
- "add x20, x20, x28\n"
+ "add x21, x21, x28\n"
"tbz %x[n_channels], #2, 61f\n"
- "ld1 { v16.d }[0], [x20], #0x8\n"
+ "ld1 { v16.d }[0], [x21], #0x8\n"
"tbz %x[n_channels], #1, 60f\n"
- "ld1 { v16.s }[2], [x20], #0x4\n"
+ "ld1 { v16.s }[2], [x21], #0x4\n"
"tbz %x[n_channels], #0, 63f\n"
- "ld1 { v16.h }[6], [x20], #0x2\n"
+ "ld1 { v16.h }[6], [x21], #0x2\n"
"b 63f\n"
"60:" // Oddments: Load input (3, 2): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 63f\n"
- "ld1 { v16.h }[4], [x20], #0x2\n"
+ "ld1 { v16.h }[4], [x21], #0x2\n"
"b 63f\n"
"61:" // Oddments: Load input (3, 2): Bit 2: Unset
"tbz %x[n_channels], #1, 62f\n"
- "ld1 { v16.s }[0], [x20], #0x4\n"
+ "ld1 { v16.s }[0], [x21], #0x4\n"
"tbz %x[n_channels], #0, 63f\n"
- "ld1 { v16.h }[2], [x20], #0x2\n"
+ "ld1 { v16.h }[2], [x21], #0x2\n"
"b 63f\n"
"62:" // Oddments: Load input (3, 2): Bit 2: Unset: Bit 1: Unset
- "ld1 { v16.h }[0], [x20], #0x2\n"
+ "ld1 { v16.h }[0], [x21], #0x2\n"
"63:" // Oddments: Load input (3, 2): Bit 2: End
- "ldr x20, [x13, #0xb0]\n"
+ "ldr x20, [x9, #0xb0]\n"
"fmla v30.8h, v5.8h, v16.8h\n"
"fmla v31.8h, v3.8h, v16.8h\n"
"add x20, x20, x28\n"
@@ -787,52 +787,52 @@ void a64_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst_indirect_impl(
"66:" // Oddments: Load input (4, 3): Bit 2: Unset: Bit 1: Unset
"ld1 { v14.h }[0], [x20], #0x2\n"
"67:" // Oddments: Load input (4, 3): Bit 2: End
- "ldr x20, [x13, #0xb8]\n"
+ "ldr x19, [x9, #0xb8]\n"
"fmla v31.8h, v7.8h, v14.8h\n"
- "add x20, x20, x28\n"
+ "add x19, x19, x28\n"
"tbz %x[n_channels], #2, 69f\n"
- "ld1 { v15.d }[0], [x20], #0x8\n"
+ "ld1 { v15.d }[0], [x19], #0x8\n"
"tbz %x[n_channels], #1, 68f\n"
- "ld1 { v15.s }[2], [x20], #0x4\n"
+ "ld1 { v15.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 71f\n"
- "ld1 { v15.h }[6], [x20], #0x2\n"
+ "ld1 { v15.h }[6], [x19], #0x2\n"
"b 71f\n"
"68:" // Oddments: Load input (4, 2): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 71f\n"
- "ld1 { v15.h }[4], [x20], #0x2\n"
+ "ld1 { v15.h }[4], [x19], #0x2\n"
"b 71f\n"
"69:" // Oddments: Load input (4, 2): Bit 2: Unset
"tbz %x[n_channels], #1, 70f\n"
- "ld1 { v15.s }[0], [x20], #0x4\n"
+ "ld1 { v15.s }[0], [x19], #0x4\n"
"tbz %x[n_channels], #0, 71f\n"
- "ld1 { v15.h }[2], [x20], #0x2\n"
+ "ld1 { v15.h }[2], [x19], #0x2\n"
"b 71f\n"
"70:" // Oddments: Load input (4, 2): Bit 2: Unset: Bit 1: Unset
- "ld1 { v15.h }[0], [x20], #0x2\n"
+ "ld1 { v15.h }[0], [x19], #0x2\n"
"71:" // Oddments: Load input (4, 2): Bit 2: End
- "ldr x20, [x13, #0xc0]\n"
+ "ldr x26, [x9, #0xc0]\n"
"fmla v30.8h, v8.8h, v15.8h\n"
"fmla v31.8h, v6.8h, v15.8h\n"
- "add x20, x20, x28\n"
+ "add x26, x26, x28\n"
"tbz %x[n_channels], #2, 73f\n"
- "ld1 { v11.d }[0], [x20], #0x8\n"
+ "ld1 { v11.d }[0], [x26], #0x8\n"
"tbz %x[n_channels], #1, 72f\n"
- "ld1 { v11.s }[2], [x20], #0x4\n"
+ "ld1 { v11.s }[2], [x26], #0x4\n"
"tbz %x[n_channels], #0, 75f\n"
- "ld1 { v11.h }[6], [x20], #0x2\n"
+ "ld1 { v11.h }[6], [x26], #0x2\n"
"b 75f\n"
"72:" // Oddments: Load input (4, 4): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 75f\n"
- "ld1 { v11.h }[4], [x20], #0x2\n"
+ "ld1 { v11.h }[4], [x26], #0x2\n"
"b 75f\n"
"73:" // Oddments: Load input (4, 4): Bit 2: Unset
"tbz %x[n_channels], #1, 74f\n"
- "ld1 { v11.s }[0], [x20], #0x4\n"
+ "ld1 { v11.s }[0], [x26], #0x4\n"
"tbz %x[n_channels], #0, 75f\n"
- "ld1 { v11.h }[2], [x20], #0x2\n"
+ "ld1 { v11.h }[2], [x26], #0x2\n"
"b 75f\n"
"74:" // Oddments: Load input (4, 4): Bit 2: Unset: Bit 1: Unset
- "ld1 { v11.h }[0], [x20], #0x2\n"
+ "ld1 { v11.h }[0], [x26], #0x2\n"
"75:" // Oddments: Load input (4, 4): Bit 2: End
"fmla v31.8h, v8.8h, v11.8h\n"
"fmax v28.8h, v28.8h, v19.8h\n"
@@ -844,50 +844,52 @@ void a64_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst_indirect_impl(
"fmin v30.8h, v30.8h, v18.8h\n"
"fmin v31.8h, v31.8h, v18.8h\n"
"tbz %x[n_channels], #2, 77f\n"
- "st1 { v28.d }[0], [x12], #0x8\n"
- "st1 { v29.d }[0], [x11], #0x8\n"
- "st1 { v30.d }[0], [x10], #0x8\n"
- "st1 { v31.d }[0], [x9], #0x8\n"
+ "st1 { v28.d }[0], [x13], #0x8\n"
+ "st1 { v29.d }[0], [x12], #0x8\n"
+ "st1 { v30.d }[0], [x11], #0x8\n"
+ "st1 { v31.d }[0], [x10], #0x8\n"
"tbz %x[n_channels], #1, 76f\n"
- "st1 { v28.s }[2], [x12], #0x4\n"
- "st1 { v29.s }[2], [x11], #0x4\n"
- "st1 { v30.s }[2], [x10], #0x4\n"
- "st1 { v31.s }[2], [x9], #0x4\n"
+ "st1 { v28.s }[2], [x13], #0x4\n"
+ "st1 { v29.s }[2], [x12], #0x4\n"
+ "st1 { v30.s }[2], [x11], #0x4\n"
+ "st1 { v31.s }[2], [x10], #0x4\n"
"tbz %x[n_channels], #0, 79f\n"
- "st1 { v28.h }[6], [x12], #0x2\n"
- "st1 { v29.h }[6], [x11], #0x2\n"
- "st1 { v30.h }[6], [x10], #0x2\n"
- "st1 { v31.h }[6], [x9], #0x2\n"
+ "st1 { v28.h }[6], [x13], #0x2\n"
+ "st1 { v29.h }[6], [x12], #0x2\n"
+ "st1 { v30.h }[6], [x11], #0x2\n"
+ "st1 { v31.h }[6], [x10], #0x2\n"
"b 79f\n"
"76:" // Oddments: Store: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 79f\n"
- "st1 { v28.h }[4], [x12], #0x2\n"
- "st1 { v29.h }[4], [x11], #0x2\n"
- "st1 { v30.h }[4], [x10], #0x2\n"
- "st1 { v31.h }[4], [x9], #0x2\n"
+ "st1 { v28.h }[4], [x13], #0x2\n"
+ "st1 { v29.h }[4], [x12], #0x2\n"
+ "st1 { v30.h }[4], [x11], #0x2\n"
+ "st1 { v31.h }[4], [x10], #0x2\n"
"b 79f\n"
"77:" // Oddments: Store: Bit 2: Unset
"tbz %x[n_channels], #1, 78f\n"
- "st1 { v28.s }[0], [x12], #0x4\n"
- "st1 { v29.s }[0], [x11], #0x4\n"
- "st1 { v30.s }[0], [x10], #0x4\n"
- "st1 { v31.s }[0], [x9], #0x4\n"
+ "st1 { v28.s }[0], [x13], #0x4\n"
+ "st1 { v29.s }[0], [x12], #0x4\n"
+ "st1 { v30.s }[0], [x11], #0x4\n"
+ "st1 { v31.s }[0], [x10], #0x4\n"
"tbz %x[n_channels], #0, 79f\n"
- "st1 { v28.h }[2], [x12], #0x2\n"
- "st1 { v29.h }[2], [x11], #0x2\n"
- "st1 { v30.h }[2], [x10], #0x2\n"
- "st1 { v31.h }[2], [x9], #0x2\n"
+ "st1 { v28.h }[2], [x13], #0x2\n"
+ "st1 { v29.h }[2], [x12], #0x2\n"
+ "st1 { v30.h }[2], [x11], #0x2\n"
+ "st1 { v31.h }[2], [x10], #0x2\n"
"b 79f\n"
"78:" // Oddments: Store: Bit 2: Unset: Bit 1: Unset
- "st1 { v28.h }[0], [x12], #0x2\n"
- "st1 { v29.h }[0], [x11], #0x2\n"
- "st1 { v30.h }[0], [x10], #0x2\n"
- "st1 { v31.h }[0], [x9], #0x2\n"
+ "st1 { v28.h }[0], [x13], #0x2\n"
+ "st1 { v29.h }[0], [x12], #0x2\n"
+ "st1 { v30.h }[0], [x11], #0x2\n"
+ "st1 { v31.h }[0], [x10], #0x2\n"
"79:" // Oddments: Store: Bit 2: End
+
"80:" // End
+
:
: [n_channels] "r" ((unsigned long) n_channels), [offsetof_Args_inptrs] "I" (offsetof(Args, inptrs)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_args_params] "I" (offsetof(Args, params)), [params_struct] "r" (&params_struct)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst/generic_direct.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst/generic_direct.cpp
index 8807f5d306..b08059db0a 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst/generic_direct.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst/generic_direct.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -87,403 +87,403 @@ void a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst_direct_impl(
);
__asm__ __volatile__(
- "mov x27, #0x0\n"
"mov x26, #0x0\n"
+ "mov x25, #0x0\n"
"1:" // Tile loop
- "str x27, [%x[params_struct], %[offsetof_args_tile_i]]\n"
- "mov x23, #0x2\n"
- "mov x25, #0x2\n"
- "str x26, [%x[params_struct], %[offsetof_args_tile_j]]\n"
+ "str x26, [%x[params_struct], %[offsetof_args_tile_i]]\n"
+ "mov x22, #0x2\n"
+ "mov x21, #0x2\n"
+ "str x25, [%x[params_struct], %[offsetof_args_tile_j]]\n"
"ldr x24, [%x[params_struct], %[offsetof_args_ld_input_row]]\n"
- "ldr x2, [%x[params_struct], %[offsetof_args_ld_input_col]]\n"
- "mul x22, x27, x24\n" // offset = tile_i * ld_input_row
- "ldr x21, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
- "madd x22, x26, x2, x22\n" // offset += tile_j * ld_input_col
- "ldr x3, [%x[params_struct], %[offsetof_args_ld_output_col]]\n"
- "lsl x2, x2, #0x1\n"
- "mul x20, x27, x21\n" // offset = tile_i * ld_output_row
- "ldr x4, [%x[params_struct], %[offsetof_args_inptr]]\n"
- "ldr x5, [%x[params_struct], %[offsetof_args_outptr]]\n"
- "add x6, x2, x2\n"
- "mul x22, x22, x23\n" // offset *= kernel_stride * output_size
- "add x4, x4, x22, LSL #1\n" // inptr[0] += offset * sizeof(__fp16)
- "add x7, x4, x24, LSL #1\n"
- "ldr x8, [%x[params_struct], %[offsetof_args_params]]\n"
- "madd x20, x26, x3, x20\n" // offset += tile_j * ld_output_col
- "add x17, x7, x24, LSL #1\n"
- "mov x23, #0x10\n" // cntb _, ALL, #1
- "mul x20, x20, x25\n" // offset *= output_tile_size
- "lsr x22, %x[n_channels], #0x3\n"
- "add x16, x17, x24, LSL #1\n"
- "add x15, x6, x2\n"
- "add x14, x16, x24, LSL #1\n"
- "add x13, x15, x2\n"
- "add x5, x5, x20, LSL #1\n" // outptrs[0] += offset * sizeof(__fp16)
+ "ldr x3, [%x[params_struct], %[offsetof_args_ld_input_col]]\n"
+ "mul x20, x26, x24\n" // offset = tile_i * ld_input_row
+ "ldr x23, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
+ "madd x20, x25, x3, x20\n" // offset += tile_j * ld_input_col
+ "ldr x4, [%x[params_struct], %[offsetof_args_ld_output_col]]\n"
+ "lsl x3, x3, #0x1\n"
+ "mul x19, x26, x23\n" // offset = tile_i * ld_output_row
+ "ldr x5, [%x[params_struct], %[offsetof_args_inptr]]\n"
+ "ldr x6, [%x[params_struct], %[offsetof_args_outptr]]\n"
+ "add x7, x3, x3\n"
+ "mul x20, x20, x22\n" // offset *= kernel_stride * output_size
+ "add x5, x5, x20, LSL #1\n" // inptr[0] += offset * sizeof(__fp16)
+ "add x8, x5, x24, LSL #1\n"
+ "ldr x17, [%x[params_struct], %[offsetof_args_params]]\n"
+ "madd x19, x25, x4, x19\n" // offset += tile_j * ld_output_col
+ "add x16, x8, x24, LSL #1\n"
+ "mov x22, #0x10\n" // cntb _, ALL, #1
+ "mul x19, x19, x21\n" // offset *= output_tile_size
+ "lsr x21, %x[n_channels], #0x3\n"
+ "add x15, x16, x24, LSL #1\n"
+ "add x14, x7, x3\n"
+ "add x13, x15, x24, LSL #1\n"
+ "add x12, x14, x3\n"
+ "add x6, x6, x19, LSL #1\n" // outptrs[0] += offset * sizeof(__fp16)
"add x20, %x[params_struct], %[offsetof_args_min]\n"
+ "add x19, %x[params_struct], %[offsetof_args_max]\n"
"ld1r { v18.8h }, [x20]\n"
- "add x20, %x[params_struct], %[offsetof_args_max]\n"
- "ld1r { v17.8h }, [x20]\n"
- "add x12, x14, x24, LSL #1\n"
- "add x11, x13, x2\n"
- "add x10, x5, x21, LSL #1\n"
- "lsl x3, x3, #0x1\n"
- "mov x21, #0x0\n"
- "sub x20, XZR, x23\n"
- "cbz x22, 4f\n"
- "ldr q16, [x8, #0x0]\n"
- "ldr q0, [x8, #0x10]\n"
- "cmp x23, x22, LSL #4\n"
- "ldr q1, [x8, #0x20]\n"
- "ldr q2, [x8, #0x30]\n"
- "ldr q3, [x8, #0x40]\n"
- "ldr q4, [x8, #0x50]\n"
- "add x8, x8, #0x60\n"
- "ld1 { v5.8h }, [x4]\n"
- "ldr q6, [x4, x2]\n"
- "ld1 { v7.8h }, [x7]\n"
- "ldr q8, [x7, x2]\n"
- "ldr q9, [x4, x6]\n"
- "ldr q13, [x7, x6]\n"
- "ldr q11, [x4, x15]\n"
- "ldr q12, [x4, x13]\n"
- "ldr q10, [x7, x11]\n"
- "ld1 { v14.8h }, [x17]\n"
+ "ld1r { v17.8h }, [x19]\n"
+ "add x11, x13, x24, LSL #1\n"
+ "add x10, x12, x3\n"
+ "add x9, x6, x23, LSL #1\n"
+ "lsl x4, x4, #0x1\n"
+ "mov x20, #0x0\n"
+ "sub x19, XZR, x22\n"
+ "cbz x21, 4f\n"
+ "ldr q16, [x17, #0x0]\n"
+ "cmp x22, x21, LSL #4\n"
+ "ldr q0, [x17, #0x10]\n"
+ "ldr q1, [x17, #0x20]\n"
+ "ldr q2, [x17, #0x30]\n"
+ "ldr q3, [x17, #0x40]\n"
+ "ldr q4, [x17, #0x50]\n"
+ "ld1 { v5.8h }, [x5]\n"
+ "add x17, x17, #0x60\n"
+ "ldr q6, [x5, x3]\n"
+ "ld1 { v7.8h }, [x8]\n"
+ "ldr q8, [x8, x3]\n"
+ "ldr q9, [x5, x7]\n"
+ "ldr q13, [x8, x7]\n"
+ "ldr q11, [x5, x14]\n"
+ "ldr q12, [x5, x12]\n"
+ "ldr q10, [x8, x10]\n"
+ "ld1 { v14.8h }, [x16]\n"
"bge 3f\n"
"2:" // Tile loop: Channel loop
"mov v28.16b, v16.16b\n fmla v28.8h, v0.8h, v5.8h\n"
- "ldr q5, [x7, x15]\n"
"mov v29.16b, v16.16b\n fmla v29.8h, v0.8h, v6.8h\n"
- "add x23, x23, #0x10\n"
+ "ldr q5, [x8, x14]\n"
+ "add x22, x22, #0x10\n"
"mov v30.16b, v16.16b\n fmla v30.8h, v0.8h, v7.8h\n"
"mov v31.16b, v16.16b\n fmla v31.8h, v0.8h, v8.8h\n"
- "ldr q0, [x8, #0x0]\n"
- "ldr q16, [x8, #0x140]\n"
+ "ldr q0, [x17, #0x0]\n"
+ "cmp x22, x21, LSL #4\n"
"fmla v28.8h, v1.8h, v6.8h\n"
- "ldr q6, [x7, x13]\n"
"fmla v29.8h, v1.8h, v9.8h\n"
- "add x7, x7, #0x10\n"
+ "ldr q6, [x8, x12]\n"
+ "add x8, x8, #0x10\n"
"fmla v30.8h, v1.8h, v8.8h\n"
"fmla v31.8h, v1.8h, v13.8h\n"
- "ldr q1, [x8, #0x10]\n"
- "cmp x23, x22, LSL #4\n"
+ "ldr q1, [x17, #0x10]\n"
+ "add x19, x19, #0x10\n"
"fmla v28.8h, v2.8h, v9.8h\n"
- "ldr q9, [x4, x11]\n"
"fmla v29.8h, v2.8h, v11.8h\n"
- "add x4, x4, #0x10\n"
+ "ldr q9, [x5, x10]\n"
+ "add x5, x5, #0x10\n"
"fmla v30.8h, v2.8h, v13.8h\n"
"fmla v31.8h, v2.8h, v5.8h\n"
- "ldr q2, [x8, #0x20]\n"
+ "ldr q2, [x17, #0x20]\n"
"add x20, x20, #0x10\n"
"fmla v28.8h, v3.8h, v11.8h\n"
- "ldr q11, [x17, x2]\n"
"fmla v29.8h, v3.8h, v12.8h\n"
- "add x21, x21, #0x10\n"
+ "ldr q11, [x16, x3]\n"
+ "ldr q16, [x17, #0x140]\n"
"fmla v30.8h, v3.8h, v5.8h\n"
"fmla v31.8h, v3.8h, v6.8h\n"
- "ldr q3, [x8, #0x30]\n"
+ "ldr q3, [x17, #0x30]\n"
"fmla v28.8h, v4.8h, v12.8h\n"
- "ldr q12, [x17, x6]\n"
"fmla v29.8h, v4.8h, v9.8h\n"
- "ldr q9, [x17, x15]\n"
+ "ldr q12, [x16, x7]\n"
+ "ldr q9, [x16, x14]\n"
"fmla v30.8h, v4.8h, v6.8h\n"
"fmla v31.8h, v4.8h, v10.8h\n"
- "ldr q4, [x8, #0x40]\n"
+ "ldr q4, [x17, #0x40]\n"
"fmla v28.8h, v0.8h, v7.8h\n"
- "ld1 { v7.8h }, [x7]\n"
"fmla v29.8h, v0.8h, v8.8h\n"
+ "ld1 { v7.8h }, [x8]\n"
"fmla v30.8h, v0.8h, v14.8h\n"
"fmla v31.8h, v0.8h, v11.8h\n"
- "ldr q0, [x8, #0x50]\n"
+ "ldr q0, [x17, #0x50]\n"
"fmla v28.8h, v1.8h, v8.8h\n"
- "ldr q8, [x17, x11]\n"
"fmla v29.8h, v1.8h, v13.8h\n"
+ "ldr q8, [x16, x10]\n"
"fmla v30.8h, v1.8h, v11.8h\n"
"fmla v31.8h, v1.8h, v12.8h\n"
- "ldr q1, [x8, #0x60]\n"
+ "ldr q1, [x17, #0x60]\n"
"fmla v28.8h, v2.8h, v13.8h\n"
- "ldr q13, [x17, x13]\n"
"fmla v29.8h, v2.8h, v5.8h\n"
- "add x17, x17, #0x10\n"
+ "ldr q13, [x16, x12]\n"
+ "add x16, x16, #0x10\n"
"fmla v30.8h, v2.8h, v12.8h\n"
"fmla v31.8h, v2.8h, v9.8h\n"
- "ldr q2, [x8, #0x70]\n"
+ "ldr q2, [x17, #0x70]\n"
"fmla v28.8h, v3.8h, v5.8h\n"
- "ld1 { v5.8h }, [x16]\n"
"fmla v29.8h, v3.8h, v6.8h\n"
+ "ld1 { v5.8h }, [x15]\n"
"fmla v30.8h, v3.8h, v9.8h\n"
"fmla v31.8h, v3.8h, v13.8h\n"
- "ldr q3, [x8, #0x80]\n"
+ "ldr q3, [x17, #0x80]\n"
"fmla v28.8h, v4.8h, v6.8h\n"
- "ldr q6, [x16, x2]\n"
"fmla v29.8h, v4.8h, v10.8h\n"
- "ldr q10, [x16, x6]\n"
+ "ldr q6, [x15, x3]\n"
+ "ldr q10, [x15, x7]\n"
"fmla v30.8h, v4.8h, v13.8h\n"
"fmla v31.8h, v4.8h, v8.8h\n"
- "ldr q4, [x8, #0x90]\n"
+ "ldr q4, [x17, #0x90]\n"
"fmla v28.8h, v0.8h, v14.8h\n"
- "ldr q14, [x16, x11]\n"
"fmla v29.8h, v0.8h, v11.8h\n"
+ "ldr q14, [x15, x10]\n"
"fmla v30.8h, v0.8h, v5.8h\n"
"fmla v31.8h, v0.8h, v6.8h\n"
- "ldr q0, [x8, #0xa0]\n"
+ "ldr q0, [x17, #0xa0]\n"
"fmla v28.8h, v1.8h, v11.8h\n"
- "ldr q11, [x16, x15]\n"
"fmla v29.8h, v1.8h, v12.8h\n"
+ "ldr q11, [x15, x14]\n"
"fmla v30.8h, v1.8h, v6.8h\n"
"fmla v31.8h, v1.8h, v10.8h\n"
- "ldr q1, [x8, #0xb0]\n"
+ "ldr q1, [x17, #0xb0]\n"
"fmla v28.8h, v2.8h, v12.8h\n"
- "ldr q12, [x16, x13]\n"
"fmla v29.8h, v2.8h, v9.8h\n"
- "add x16, x16, #0x10\n"
+ "ldr q12, [x15, x12]\n"
+ "add x15, x15, #0x10\n"
"fmla v30.8h, v2.8h, v10.8h\n"
"fmla v31.8h, v2.8h, v11.8h\n"
- "ldr q2, [x8, #0xc0]\n"
+ "ldr q2, [x17, #0xc0]\n"
"fmla v28.8h, v3.8h, v9.8h\n"
- "ld1 { v9.8h }, [x14]\n"
"fmla v29.8h, v3.8h, v13.8h\n"
+ "ld1 { v9.8h }, [x13]\n"
"fmla v30.8h, v3.8h, v11.8h\n"
"fmla v31.8h, v3.8h, v12.8h\n"
- "ldr q3, [x8, #0xd0]\n"
+ "ldr q3, [x17, #0xd0]\n"
"fmla v28.8h, v4.8h, v13.8h\n"
- "ldr q13, [x14, x2]\n"
"fmla v29.8h, v4.8h, v8.8h\n"
- "ldr q8, [x14, x13]\n"
+ "ldr q13, [x13, x3]\n"
+ "ldr q8, [x13, x12]\n"
"fmla v30.8h, v4.8h, v12.8h\n"
"fmla v31.8h, v4.8h, v14.8h\n"
- "ldr q4, [x8, #0xe0]\n"
+ "ldr q4, [x17, #0xe0]\n"
"fmla v28.8h, v0.8h, v5.8h\n"
- "ldr q5, [x14, x6]\n"
"fmla v29.8h, v0.8h, v6.8h\n"
+ "ldr q5, [x13, x7]\n"
"fmla v30.8h, v0.8h, v9.8h\n"
"fmla v31.8h, v0.8h, v13.8h\n"
- "ldr q0, [x8, #0xf0]\n"
+ "ldr q0, [x17, #0xf0]\n"
"fmla v28.8h, v1.8h, v6.8h\n"
- "ldr q6, [x14, x15]\n"
"fmla v29.8h, v1.8h, v10.8h\n"
+ "ldr q6, [x13, x14]\n"
"fmla v30.8h, v1.8h, v13.8h\n"
"fmla v31.8h, v1.8h, v5.8h\n"
- "ldr q1, [x8, #0x100]\n"
+ "ldr q1, [x17, #0x100]\n"
"fmla v28.8h, v2.8h, v10.8h\n"
- "ldr q10, [x14, x11]\n"
"fmla v29.8h, v2.8h, v11.8h\n"
- "add x14, x14, #0x10\n"
+ "ldr q10, [x13, x10]\n"
+ "add x13, x13, #0x10\n"
"fmla v30.8h, v2.8h, v5.8h\n"
"fmla v31.8h, v2.8h, v6.8h\n"
- "ldr q2, [x8, #0x110]\n"
+ "ldr q2, [x17, #0x110]\n"
"fmla v28.8h, v3.8h, v11.8h\n"
- "ld1 { v11.8h }, [x12]\n"
"fmla v29.8h, v3.8h, v12.8h\n"
+ "ld1 { v11.8h }, [x11]\n"
"fmla v30.8h, v3.8h, v6.8h\n"
"fmla v31.8h, v3.8h, v8.8h\n"
- "ldr q3, [x8, #0x120]\n"
+ "ldr q3, [x17, #0x120]\n"
"fmla v28.8h, v4.8h, v12.8h\n"
- "ldr q12, [x12, x2]\n"
"fmla v29.8h, v4.8h, v14.8h\n"
- "ld1 { v14.8h }, [x17]\n"
+ "ldr q12, [x11, x3]\n"
+ "ld1 { v14.8h }, [x16]\n"
"fmla v30.8h, v4.8h, v8.8h\n"
"fmla v31.8h, v4.8h, v10.8h\n"
- "ldr q4, [x8, #0x130]\n"
+ "ldr q4, [x17, #0x130]\n"
"fmla v28.8h, v0.8h, v9.8h\n"
- "ldr q9, [x12, x6]\n"
"fmla v29.8h, v0.8h, v13.8h\n"
+ "ldr q9, [x11, x7]\n"
"fmla v30.8h, v0.8h, v11.8h\n"
- "ldr q11, [x12, x15]\n"
"fmla v31.8h, v0.8h, v12.8h\n"
- "ldr q0, [x8, #0x150]\n"
+ "ldr q11, [x11, x14]\n"
+ "ldr q0, [x17, #0x150]\n"
"fmla v28.8h, v1.8h, v13.8h\n"
- "ldr q13, [x7, x6]\n"
"fmla v29.8h, v1.8h, v5.8h\n"
+ "ldr q13, [x8, x7]\n"
"fmla v30.8h, v1.8h, v12.8h\n"
- "ldr q12, [x12, x13]\n"
"fmla v31.8h, v1.8h, v9.8h\n"
- "ldr q1, [x8, #0x160]\n"
+ "ldr q12, [x11, x12]\n"
+ "ldr q1, [x17, #0x160]\n"
"fmla v28.8h, v2.8h, v5.8h\n"
- "ld1 { v5.8h }, [x4]\n"
"fmla v29.8h, v2.8h, v6.8h\n"
+ "ld1 { v5.8h }, [x5]\n"
"fmla v30.8h, v2.8h, v9.8h\n"
- "ldr q9, [x12, x11]\n"
"fmla v31.8h, v2.8h, v11.8h\n"
- "ldr q2, [x8, #0x170]\n"
+ "ldr q9, [x11, x10]\n"
+ "add x11, x11, #0x10\n"
"fmla v28.8h, v3.8h, v6.8h\n"
- "ldr q6, [x4, x2]\n"
"fmla v29.8h, v3.8h, v8.8h\n"
- "add x12, x12, #0x10\n"
+ "ldr q6, [x5, x3]\n"
+ "ldr q2, [x17, #0x170]\n"
"fmla v30.8h, v3.8h, v11.8h\n"
- "ldr q11, [x4, x15]\n"
"fmla v31.8h, v3.8h, v12.8h\n"
- "ldr q3, [x8, #0x180]\n"
+ "ldr q11, [x5, x14]\n"
+ "ldr q3, [x17, #0x180]\n"
"fmla v28.8h, v4.8h, v8.8h\n"
- "ldr q8, [x7, x2]\n"
"fmla v29.8h, v4.8h, v10.8h\n"
- "ldr q10, [x7, x11]\n"
+ "fmax v28.8h, v28.8h, v18.8h\n"
+ "ldr q8, [x8, x3]\n"
"fmla v30.8h, v4.8h, v12.8h\n"
- "ldr q12, [x4, x13]\n"
"fmla v31.8h, v4.8h, v9.8h\n"
- "ldr q9, [x4, x6]\n"
- "ldr q4, [x8, #0x190]\n"
- "fmax v28.8h, v28.8h, v18.8h\n"
"fmax v29.8h, v29.8h, v18.8h\n"
- "add x8, x8, #0x1a0\n"
+ "ldr q9, [x5, x7]\n"
"fmax v30.8h, v30.8h, v18.8h\n"
"fmax v31.8h, v31.8h, v18.8h\n"
+ "ldr q12, [x5, x12]\n"
+ "ldr q10, [x8, x10]\n"
"fmin v28.8h, v28.8h, v17.8h\n"
"fmin v29.8h, v29.8h, v17.8h\n"
- "st1 { v28.8h }, [x5]\n"
+ "st1 { v28.8h }, [x6]\n"
+ "ldr q4, [x17, #0x190]\n"
"fmin v30.8h, v30.8h, v17.8h\n"
"fmin v31.8h, v31.8h, v17.8h\n"
- "str q29, [x5, x3]\n"
- "add x5, x5, #0x10\n"
- "st1 { v30.8h }, [x10]\n"
- "str q31, [x10, x3]\n"
- "add x10, x10, #0x10\n"
+ "str q29, [x6, x4]\n"
+ "add x6, x6, #0x10\n"
+ "st1 { v30.8h }, [x9]\n"
+ "add x17, x17, #0x1a0\n"
+ "str q31, [x9, x4]\n"
+ "add x9, x9, #0x10\n"
"blt 2b\n"
"3:" // Tile loop: Channel tail
"mov v28.16b, v16.16b\n fmla v28.8h, v0.8h, v5.8h\n"
- "ldr q5, [x7, x15]\n"
"mov v29.16b, v16.16b\n fmla v29.8h, v0.8h, v6.8h\n"
+ "ldr q5, [x8, x14]\n"
"mov v30.16b, v16.16b\n fmla v30.8h, v0.8h, v7.8h\n"
"mov v31.16b, v16.16b\n fmla v31.8h, v0.8h, v8.8h\n"
- "ldr q0, [x8, #0x0]\n"
+ "ldr q0, [x17, #0x0]\n"
"fmla v28.8h, v1.8h, v6.8h\n"
- "ldr q6, [x7, x13]\n"
"fmla v29.8h, v1.8h, v9.8h\n"
- "add x7, x7, #0x10\n"
+ "ldr q6, [x8, x12]\n"
+ "add x8, x8, #0x10\n"
"fmla v30.8h, v1.8h, v8.8h\n"
"fmla v31.8h, v1.8h, v13.8h\n"
- "ldr q1, [x8, #0x10]\n"
+ "ldr q1, [x17, #0x10]\n"
"fmla v28.8h, v2.8h, v9.8h\n"
- "ldr q9, [x4, x11]\n"
"fmla v29.8h, v2.8h, v11.8h\n"
- "add x4, x4, #0x10\n"
+ "ldr q9, [x5, x10]\n"
+ "add x5, x5, #0x10\n"
"fmla v30.8h, v2.8h, v13.8h\n"
"fmla v31.8h, v2.8h, v5.8h\n"
- "ldr q2, [x8, #0x20]\n"
+ "ldr q2, [x17, #0x20]\n"
"fmla v28.8h, v3.8h, v11.8h\n"
- "ldr q11, [x17, x2]\n"
"fmla v29.8h, v3.8h, v12.8h\n"
+ "ldr q11, [x16, x3]\n"
"fmla v30.8h, v3.8h, v5.8h\n"
"fmla v31.8h, v3.8h, v6.8h\n"
- "ldr q3, [x8, #0x30]\n"
+ "ldr q3, [x17, #0x30]\n"
"fmla v28.8h, v4.8h, v12.8h\n"
- "ldr q12, [x17, x6]\n"
"fmla v29.8h, v4.8h, v9.8h\n"
- "ldr q9, [x17, x15]\n"
+ "ldr q12, [x16, x7]\n"
+ "ldr q9, [x16, x14]\n"
"fmla v30.8h, v4.8h, v6.8h\n"
"fmla v31.8h, v4.8h, v10.8h\n"
- "ldr q4, [x8, #0x40]\n"
+ "ldr q4, [x17, #0x40]\n"
"fmla v28.8h, v0.8h, v7.8h\n"
"fmla v29.8h, v0.8h, v8.8h\n"
"fmla v30.8h, v0.8h, v14.8h\n"
"fmla v31.8h, v0.8h, v11.8h\n"
- "ldr q0, [x8, #0x50]\n"
+ "ldr q0, [x17, #0x50]\n"
"fmla v28.8h, v1.8h, v8.8h\n"
- "ldr q8, [x17, x11]\n"
"fmla v29.8h, v1.8h, v13.8h\n"
+ "ldr q8, [x16, x10]\n"
"fmla v30.8h, v1.8h, v11.8h\n"
"fmla v31.8h, v1.8h, v12.8h\n"
- "ldr q1, [x8, #0x60]\n"
+ "ldr q1, [x17, #0x60]\n"
"fmla v28.8h, v2.8h, v13.8h\n"
- "ldr q13, [x17, x13]\n"
"fmla v29.8h, v2.8h, v5.8h\n"
- "add x17, x17, #0x10\n"
+ "ldr q13, [x16, x12]\n"
+ "add x16, x16, #0x10\n"
"fmla v30.8h, v2.8h, v12.8h\n"
"fmla v31.8h, v2.8h, v9.8h\n"
- "ldr q2, [x8, #0x70]\n"
+ "ldr q2, [x17, #0x70]\n"
"fmla v28.8h, v3.8h, v5.8h\n"
- "ld1 { v5.8h }, [x16]\n"
"fmla v29.8h, v3.8h, v6.8h\n"
+ "ld1 { v5.8h }, [x15]\n"
"fmla v30.8h, v3.8h, v9.8h\n"
"fmla v31.8h, v3.8h, v13.8h\n"
- "ldr q3, [x8, #0x80]\n"
+ "ldr q3, [x17, #0x80]\n"
"fmla v28.8h, v4.8h, v6.8h\n"
- "ldr q6, [x16, x2]\n"
"fmla v29.8h, v4.8h, v10.8h\n"
- "ldr q10, [x16, x6]\n"
+ "ldr q6, [x15, x3]\n"
+ "ldr q10, [x15, x7]\n"
"fmla v30.8h, v4.8h, v13.8h\n"
"fmla v31.8h, v4.8h, v8.8h\n"
- "ldr q4, [x8, #0x90]\n"
+ "ldr q4, [x17, #0x90]\n"
"fmla v28.8h, v0.8h, v14.8h\n"
- "ldr q14, [x16, x11]\n"
"fmla v29.8h, v0.8h, v11.8h\n"
+ "ldr q14, [x15, x10]\n"
"fmla v30.8h, v0.8h, v5.8h\n"
"fmla v31.8h, v0.8h, v6.8h\n"
- "ldr q0, [x8, #0xa0]\n"
+ "ldr q0, [x17, #0xa0]\n"
"fmla v28.8h, v1.8h, v11.8h\n"
- "ldr q11, [x16, x15]\n"
"fmla v29.8h, v1.8h, v12.8h\n"
+ "ldr q11, [x15, x14]\n"
"fmla v30.8h, v1.8h, v6.8h\n"
"fmla v31.8h, v1.8h, v10.8h\n"
- "ldr q1, [x8, #0xb0]\n"
+ "ldr q1, [x17, #0xb0]\n"
"fmla v28.8h, v2.8h, v12.8h\n"
- "ldr q12, [x16, x13]\n"
"fmla v29.8h, v2.8h, v9.8h\n"
- "add x16, x16, #0x10\n"
+ "ldr q12, [x15, x12]\n"
+ "add x15, x15, #0x10\n"
"fmla v30.8h, v2.8h, v10.8h\n"
"fmla v31.8h, v2.8h, v11.8h\n"
- "ldr q2, [x8, #0xc0]\n"
+ "ldr q2, [x17, #0xc0]\n"
"fmla v28.8h, v3.8h, v9.8h\n"
- "ld1 { v9.8h }, [x14]\n"
"fmla v29.8h, v3.8h, v13.8h\n"
+ "ld1 { v9.8h }, [x13]\n"
"fmla v30.8h, v3.8h, v11.8h\n"
"fmla v31.8h, v3.8h, v12.8h\n"
- "ldr q3, [x8, #0xd0]\n"
+ "ldr q3, [x17, #0xd0]\n"
"fmla v28.8h, v4.8h, v13.8h\n"
- "ldr q13, [x14, x2]\n"
"fmla v29.8h, v4.8h, v8.8h\n"
- "ldr q8, [x14, x13]\n"
+ "ldr q13, [x13, x3]\n"
+ "ldr q8, [x13, x12]\n"
"fmla v30.8h, v4.8h, v12.8h\n"
"fmla v31.8h, v4.8h, v14.8h\n"
- "ldr q4, [x8, #0xe0]\n"
+ "ldr q4, [x17, #0xe0]\n"
"fmla v28.8h, v0.8h, v5.8h\n"
- "ldr q5, [x14, x6]\n"
"fmla v29.8h, v0.8h, v6.8h\n"
+ "ldr q5, [x13, x7]\n"
"fmla v30.8h, v0.8h, v9.8h\n"
"fmla v31.8h, v0.8h, v13.8h\n"
- "ldr q0, [x8, #0xf0]\n"
+ "ldr q0, [x17, #0xf0]\n"
"fmla v28.8h, v1.8h, v6.8h\n"
- "ldr q6, [x14, x15]\n"
"fmla v29.8h, v1.8h, v10.8h\n"
+ "ldr q6, [x13, x14]\n"
"fmla v30.8h, v1.8h, v13.8h\n"
"fmla v31.8h, v1.8h, v5.8h\n"
- "ldr q1, [x8, #0x100]\n"
+ "ldr q1, [x17, #0x100]\n"
"fmla v28.8h, v2.8h, v10.8h\n"
- "ldr q10, [x14, x11]\n"
"fmla v29.8h, v2.8h, v11.8h\n"
- "add x14, x14, #0x10\n"
+ "ldr q10, [x13, x10]\n"
+ "add x13, x13, #0x10\n"
"fmla v30.8h, v2.8h, v5.8h\n"
"fmla v31.8h, v2.8h, v6.8h\n"
- "ldr q2, [x8, #0x110]\n"
+ "ldr q2, [x17, #0x110]\n"
"fmla v28.8h, v3.8h, v11.8h\n"
- "ld1 { v11.8h }, [x12]\n"
"fmla v29.8h, v3.8h, v12.8h\n"
+ "ld1 { v11.8h }, [x11]\n"
"fmla v30.8h, v3.8h, v6.8h\n"
"fmla v31.8h, v3.8h, v8.8h\n"
- "ldr q3, [x8, #0x120]\n"
+ "ldr q3, [x17, #0x120]\n"
"fmla v28.8h, v4.8h, v12.8h\n"
- "ldr q12, [x12, x2]\n"
"fmla v29.8h, v4.8h, v14.8h\n"
+ "ldr q12, [x11, x3]\n"
"fmla v30.8h, v4.8h, v8.8h\n"
"fmla v31.8h, v4.8h, v10.8h\n"
- "ldr q4, [x8, #0x130]\n"
- "add x8, x8, #0x140\n"
+ "ldr q4, [x17, #0x130]\n"
+ "add x17, x17, #0x140\n"
"fmla v28.8h, v0.8h, v9.8h\n"
- "ldr q9, [x12, x6]\n"
"fmla v29.8h, v0.8h, v13.8h\n"
+ "ldr q9, [x11, x7]\n"
"fmla v30.8h, v0.8h, v11.8h\n"
- "ldr q11, [x12, x15]\n"
"fmla v31.8h, v0.8h, v12.8h\n"
+ "ldr q11, [x11, x14]\n"
"fmla v28.8h, v1.8h, v13.8h\n"
"fmla v29.8h, v1.8h, v5.8h\n"
"fmla v30.8h, v1.8h, v12.8h\n"
- "ldr q12, [x12, x13]\n"
"fmla v31.8h, v1.8h, v9.8h\n"
+ "ldr q12, [x11, x12]\n"
"fmla v28.8h, v2.8h, v5.8h\n"
"fmla v29.8h, v2.8h, v6.8h\n"
"fmla v30.8h, v2.8h, v9.8h\n"
- "ldr q9, [x12, x11]\n"
"fmla v31.8h, v2.8h, v11.8h\n"
- "add x12, x12, #0x10\n"
+ "ldr q9, [x11, x10]\n"
+ "add x11, x11, #0x10\n"
"fmla v28.8h, v3.8h, v6.8h\n"
"fmla v29.8h, v3.8h, v8.8h\n"
"fmla v30.8h, v3.8h, v11.8h\n"
@@ -498,120 +498,120 @@ void a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst_direct_impl(
"fmax v31.8h, v31.8h, v18.8h\n"
"fmin v28.8h, v28.8h, v17.8h\n"
"fmin v29.8h, v29.8h, v17.8h\n"
- "st1 { v28.8h }, [x5]\n"
+ "st1 { v28.8h }, [x6]\n"
"fmin v30.8h, v30.8h, v17.8h\n"
"fmin v31.8h, v31.8h, v17.8h\n"
- "str q29, [x5, x3]\n"
- "add x5, x5, #0x10\n"
- "st1 { v30.8h }, [x10]\n"
- "str q31, [x10, x3]\n"
- "add x10, x10, #0x10\n"
+ "str q29, [x6, x4]\n"
+ "add x6, x6, #0x10\n"
+ "st1 { v30.8h }, [x9]\n"
+ "str q31, [x9, x4]\n"
+ "add x9, x9, #0x10\n"
"4:" // Tile loop: Oddments
"tst %x[n_channels], #0x7\n"
"beq 117f\n"
- "ldr q16, [x8, #0x0]\n"
- "ldr q0, [x8, #0x10]\n"
- "add x9, x4, XZR\n"
- "add x28, x4, x2\n"
- "ldr q1, [x8, #0x20]\n"
- "ldr q2, [x8, #0x30]\n"
- "add x27, x7, XZR\n"
- "add x26, x7, x2\n"
- "ldr q3, [x8, #0x40]\n"
- "ldr q4, [x8, #0x50]\n"
- "add x25, x4, x6\n"
- "add x24, x7, x6\n"
- "add x23, x4, x15\n"
- "add x22, x4, x13\n"
- "add x21, x7, x11\n"
- "add x20, x17, XZR\n"
- "add x8, x8, #0x60\n"
+ "ldr q16, [x17, #0x0]\n"
+ "ldr q0, [x17, #0x10]\n"
+ "ldr q1, [x17, #0x20]\n"
+ "ldr q2, [x17, #0x30]\n"
+ "add x28, x5, XZR\n"
+ "add x27, x5, x3\n"
+ "ldr q3, [x17, #0x40]\n"
+ "ldr q4, [x17, #0x50]\n"
+ "add x26, x8, XZR\n"
+ "add x25, x8, x3\n"
+ "add x24, x5, x7\n"
+ "add x23, x8, x7\n"
+ "add x22, x5, x14\n"
+ "add x21, x5, x12\n"
+ "add x20, x8, x10\n"
+ "add x19, x16, XZR\n"
+ "add x17, x17, #0x60\n"
"tbz %x[n_channels], #2, 6f\n"
- "ldr d5, [x9], #0x8\n"
- "ldr d6, [x28], #0x8\n"
- "ldr d7, [x27], #0x8\n"
- "ldr d8, [x26], #0x8\n"
- "ldr d9, [x25], #0x8\n"
- "ldr d13, [x24], #0x8\n"
- "ldr d11, [x23], #0x8\n"
- "ldr d12, [x22], #0x8\n"
- "ldr d10, [x21], #0x8\n"
- "ldr d14, [x20], #0x8\n"
+ "ldr d5, [x28], #0x8\n"
+ "ldr d6, [x27], #0x8\n"
+ "ldr d7, [x26], #0x8\n"
+ "ldr d8, [x25], #0x8\n"
+ "ldr d9, [x24], #0x8\n"
+ "ldr d13, [x23], #0x8\n"
+ "ldr d11, [x22], #0x8\n"
+ "ldr d12, [x21], #0x8\n"
+ "ldr d10, [x20], #0x8\n"
+ "ldr d14, [x19], #0x8\n"
"tbz %x[n_channels], #1, 5f\n"
- "ld1 { v5.s }[2], [x9], #0x4\n"
- "ld1 { v6.s }[2], [x28], #0x4\n"
- "ld1 { v7.s }[2], [x27], #0x4\n"
- "ld1 { v8.s }[2], [x26], #0x4\n"
- "ld1 { v9.s }[2], [x25], #0x4\n"
- "ld1 { v13.s }[2], [x24], #0x4\n"
- "ld1 { v11.s }[2], [x23], #0x4\n"
- "ld1 { v12.s }[2], [x22], #0x4\n"
- "ld1 { v10.s }[2], [x21], #0x4\n"
- "ld1 { v14.s }[2], [x20], #0x4\n"
+ "ld1 { v5.s }[2], [x28], #0x4\n"
+ "ld1 { v6.s }[2], [x27], #0x4\n"
+ "ld1 { v7.s }[2], [x26], #0x4\n"
+ "ld1 { v8.s }[2], [x25], #0x4\n"
+ "ld1 { v9.s }[2], [x24], #0x4\n"
+ "ld1 { v13.s }[2], [x23], #0x4\n"
+ "ld1 { v11.s }[2], [x22], #0x4\n"
+ "ld1 { v12.s }[2], [x21], #0x4\n"
+ "ld1 { v10.s }[2], [x20], #0x4\n"
+ "ld1 { v14.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 8f\n"
- "ld1 { v5.h }[6], [x9]\n"
- "ld1 { v6.h }[6], [x28]\n"
- "ld1 { v7.h }[6], [x27]\n"
- "ld1 { v8.h }[6], [x26]\n"
- "ld1 { v9.h }[6], [x25]\n"
- "ld1 { v13.h }[6], [x24]\n"
- "ld1 { v11.h }[6], [x23]\n"
- "ld1 { v12.h }[6], [x22]\n"
- "ld1 { v10.h }[6], [x21]\n"
- "ld1 { v14.h }[6], [x20]\n"
+ "ld1 { v5.h }[6], [x28]\n"
+ "ld1 { v6.h }[6], [x27]\n"
+ "ld1 { v7.h }[6], [x26]\n"
+ "ld1 { v8.h }[6], [x25]\n"
+ "ld1 { v9.h }[6], [x24]\n"
+ "ld1 { v13.h }[6], [x23]\n"
+ "ld1 { v11.h }[6], [x22]\n"
+ "ld1 { v12.h }[6], [x21]\n"
+ "ld1 { v10.h }[6], [x20]\n"
+ "ld1 { v14.h }[6], [x19]\n"
"b 8f\n"
"5:" // Tile loop: Oddments: Load inputs: (0, 0), (0, 1), (1, 0), (1, 1), (0, 2), (1, 2), (0, 3), (0, 4), (1, 5), (2, 0): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 8f\n"
- "ld1 { v5.h }[4], [x9]\n"
- "ld1 { v6.h }[4], [x28]\n"
- "ld1 { v7.h }[4], [x27]\n"
- "ld1 { v8.h }[4], [x26]\n"
- "ld1 { v9.h }[4], [x25]\n"
- "ld1 { v13.h }[4], [x24]\n"
- "ld1 { v11.h }[4], [x23]\n"
- "ld1 { v12.h }[4], [x22]\n"
- "ld1 { v10.h }[4], [x21]\n"
- "ld1 { v14.h }[4], [x20]\n"
+ "ld1 { v5.h }[4], [x28]\n"
+ "ld1 { v6.h }[4], [x27]\n"
+ "ld1 { v7.h }[4], [x26]\n"
+ "ld1 { v8.h }[4], [x25]\n"
+ "ld1 { v9.h }[4], [x24]\n"
+ "ld1 { v13.h }[4], [x23]\n"
+ "ld1 { v11.h }[4], [x22]\n"
+ "ld1 { v12.h }[4], [x21]\n"
+ "ld1 { v10.h }[4], [x20]\n"
+ "ld1 { v14.h }[4], [x19]\n"
"b 8f\n"
"6:" // Tile loop: Oddments: Load inputs: (0, 0), (0, 1), (1, 0), (1, 1), (0, 2), (1, 2), (0, 3), (0, 4), (1, 5), (2, 0): Bit 2: Unset
"tbz %x[n_channels], #1, 7f\n"
- "ldr s5, [x9], #0x4\n"
- "ldr s6, [x28], #0x4\n"
- "ldr s7, [x27], #0x4\n"
- "ldr s8, [x26], #0x4\n"
- "ldr s9, [x25], #0x4\n"
- "ldr s13, [x24], #0x4\n"
- "ldr s11, [x23], #0x4\n"
- "ldr s12, [x22], #0x4\n"
- "ldr s10, [x21], #0x4\n"
- "ldr s14, [x20], #0x4\n"
+ "ldr s5, [x28], #0x4\n"
+ "ldr s6, [x27], #0x4\n"
+ "ldr s7, [x26], #0x4\n"
+ "ldr s8, [x25], #0x4\n"
+ "ldr s9, [x24], #0x4\n"
+ "ldr s13, [x23], #0x4\n"
+ "ldr s11, [x22], #0x4\n"
+ "ldr s12, [x21], #0x4\n"
+ "ldr s10, [x20], #0x4\n"
+ "ldr s14, [x19], #0x4\n"
"tbz %x[n_channels], #0, 8f\n"
- "ld1 { v5.h }[2], [x9]\n"
- "ld1 { v6.h }[2], [x28]\n"
- "ld1 { v7.h }[2], [x27]\n"
- "ld1 { v8.h }[2], [x26]\n"
- "ld1 { v9.h }[2], [x25]\n"
- "ld1 { v13.h }[2], [x24]\n"
- "ld1 { v11.h }[2], [x23]\n"
- "ld1 { v12.h }[2], [x22]\n"
- "ld1 { v10.h }[2], [x21]\n"
- "ld1 { v14.h }[2], [x20]\n"
+ "ld1 { v5.h }[2], [x28]\n"
+ "ld1 { v6.h }[2], [x27]\n"
+ "ld1 { v7.h }[2], [x26]\n"
+ "ld1 { v8.h }[2], [x25]\n"
+ "ld1 { v9.h }[2], [x24]\n"
+ "ld1 { v13.h }[2], [x23]\n"
+ "ld1 { v11.h }[2], [x22]\n"
+ "ld1 { v12.h }[2], [x21]\n"
+ "ld1 { v10.h }[2], [x20]\n"
+ "ld1 { v14.h }[2], [x19]\n"
"b 8f\n"
"7:" // Tile loop: Oddments: Load inputs: (0, 0), (0, 1), (1, 0), (1, 1), (0, 2), (1, 2), (0, 3), (0, 4), (1, 5), (2, 0): Bit 2: Unset: Bit 1: Unset
- "ldr h5, [x9, #0x0]\n"
- "ldr h6, [x28, #0x0]\n"
- "ldr h7, [x27, #0x0]\n"
- "ldr h8, [x26, #0x0]\n"
- "ldr h9, [x25, #0x0]\n"
- "ldr h13, [x24, #0x0]\n"
- "ldr h11, [x23, #0x0]\n"
- "ldr h12, [x22, #0x0]\n"
- "ldr h10, [x21, #0x0]\n"
- "ldr h14, [x20, #0x0]\n"
+ "ldr h5, [x28, #0x0]\n"
+ "ldr h6, [x27, #0x0]\n"
+ "ldr h7, [x26, #0x0]\n"
+ "ldr h8, [x25, #0x0]\n"
+ "ldr h9, [x24, #0x0]\n"
+ "ldr h13, [x23, #0x0]\n"
+ "ldr h11, [x22, #0x0]\n"
+ "ldr h12, [x21, #0x0]\n"
+ "ldr h10, [x20, #0x0]\n"
+ "ldr h14, [x19, #0x0]\n"
"8:" // Tile loop: Oddments: Load inputs: (0, 0), (0, 1), (1, 0), (1, 1), (0, 2), (1, 2), (0, 3), (0, 4), (1, 5), (2, 0): Bit 2: End
"mov v28.16b, v16.16b\n fmla v28.8h, v0.8h, v5.8h\n"
"mov v29.16b, v16.16b\n fmla v29.8h, v0.8h, v6.8h\n"
- "add x20, x7, x15\n"
+ "add x19, x8, x14\n"
"mov v30.16b, v16.16b\n fmla v30.8h, v0.8h, v7.8h\n"
"mov v31.16b, v16.16b\n fmla v31.8h, v0.8h, v8.8h\n"
"fmla v28.8h, v1.8h, v6.8h\n"
@@ -622,676 +622,676 @@ void a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst_direct_impl(
"fmla v29.8h, v2.8h, v11.8h\n"
"fmla v30.8h, v2.8h, v13.8h\n"
"tbz %x[n_channels], #2, 10f\n"
- "ldr d5, [x20], #0x8\n"
+ "ldr d5, [x19], #0x8\n"
"tbz %x[n_channels], #1, 9f\n"
- "ld1 { v5.s }[2], [x20], #0x4\n"
+ "ld1 { v5.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 12f\n"
- "ld1 { v5.h }[6], [x20]\n"
+ "ld1 { v5.h }[6], [x19]\n"
"b 12f\n"
"9:" // Tile loop: Oddments: Load inputs: (1, 3): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 12f\n"
- "ld1 { v5.h }[4], [x20]\n"
+ "ld1 { v5.h }[4], [x19]\n"
"b 12f\n"
"10:" // Tile loop: Oddments: Load inputs: (1, 3): Bit 2: Unset
"tbz %x[n_channels], #1, 11f\n"
- "ldr s5, [x20], #0x4\n"
+ "ldr s5, [x19], #0x4\n"
"tbz %x[n_channels], #0, 12f\n"
- "ld1 { v5.h }[2], [x20]\n"
+ "ld1 { v5.h }[2], [x19]\n"
"b 12f\n"
"11:" // Tile loop: Oddments: Load inputs: (1, 3): Bit 2: Unset: Bit 1: Unset
- "ldr h5, [x20, #0x0]\n"
+ "ldr h5, [x19, #0x0]\n"
"12:" // Tile loop: Oddments: Load inputs: (1, 3): Bit 2: End
"fmla v31.8h, v2.8h, v5.8h\n"
"fmla v28.8h, v3.8h, v11.8h\n"
- "add x20, x7, x13\n"
+ "add x19, x8, x12\n"
"fmla v29.8h, v3.8h, v12.8h\n"
"fmla v30.8h, v3.8h, v5.8h\n"
"tbz %x[n_channels], #2, 14f\n"
- "ldr d6, [x20], #0x8\n"
+ "ldr d6, [x19], #0x8\n"
"tbz %x[n_channels], #1, 13f\n"
- "ld1 { v6.s }[2], [x20], #0x4\n"
+ "ld1 { v6.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 16f\n"
- "ld1 { v6.h }[6], [x20]\n"
+ "ld1 { v6.h }[6], [x19]\n"
"b 16f\n"
"13:" // Tile loop: Oddments: Load inputs: (1, 4): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 16f\n"
- "ld1 { v6.h }[4], [x20]\n"
+ "ld1 { v6.h }[4], [x19]\n"
"b 16f\n"
"14:" // Tile loop: Oddments: Load inputs: (1, 4): Bit 2: Unset
"tbz %x[n_channels], #1, 15f\n"
- "ldr s6, [x20], #0x4\n"
+ "ldr s6, [x19], #0x4\n"
"tbz %x[n_channels], #0, 16f\n"
- "ld1 { v6.h }[2], [x20]\n"
+ "ld1 { v6.h }[2], [x19]\n"
"b 16f\n"
"15:" // Tile loop: Oddments: Load inputs: (1, 4): Bit 2: Unset: Bit 1: Unset
- "ldr h6, [x20, #0x0]\n"
+ "ldr h6, [x19, #0x0]\n"
"16:" // Tile loop: Oddments: Load inputs: (1, 4): Bit 2: End
"fmla v31.8h, v3.8h, v6.8h\n"
"fmla v28.8h, v4.8h, v12.8h\n"
- "add x20, x4, x11\n"
+ "add x19, x5, x10\n"
"tbz %x[n_channels], #2, 18f\n"
- "ldr d9, [x20], #0x8\n"
+ "ldr d9, [x19], #0x8\n"
"tbz %x[n_channels], #1, 17f\n"
- "ld1 { v9.s }[2], [x20], #0x4\n"
+ "ld1 { v9.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 20f\n"
- "ld1 { v9.h }[6], [x20]\n"
+ "ld1 { v9.h }[6], [x19]\n"
"b 20f\n"
"17:" // Tile loop: Oddments: Load inputs: (0, 5): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 20f\n"
- "ld1 { v9.h }[4], [x20]\n"
+ "ld1 { v9.h }[4], [x19]\n"
"b 20f\n"
"18:" // Tile loop: Oddments: Load inputs: (0, 5): Bit 2: Unset
"tbz %x[n_channels], #1, 19f\n"
- "ldr s9, [x20], #0x4\n"
+ "ldr s9, [x19], #0x4\n"
"tbz %x[n_channels], #0, 20f\n"
- "ld1 { v9.h }[2], [x20]\n"
+ "ld1 { v9.h }[2], [x19]\n"
"b 20f\n"
"19:" // Tile loop: Oddments: Load inputs: (0, 5): Bit 2: Unset: Bit 1: Unset
- "ldr h9, [x20, #0x0]\n"
+ "ldr h9, [x19, #0x0]\n"
"20:" // Tile loop: Oddments: Load inputs: (0, 5): Bit 2: End
- "ldr q0, [x8, #0x0]\n"
"fmla v29.8h, v4.8h, v9.8h\n"
"fmla v30.8h, v4.8h, v6.8h\n"
- "add x20, x17, x2\n"
+ "ldr q0, [x17, #0x0]\n"
+ "add x19, x16, x3\n"
"fmla v31.8h, v4.8h, v10.8h\n"
"fmla v28.8h, v0.8h, v7.8h\n"
- "add x8, x8, #0x10\n"
+ "add x17, x17, #0x10\n"
"fmla v29.8h, v0.8h, v8.8h\n"
"fmla v30.8h, v0.8h, v14.8h\n"
"tbz %x[n_channels], #2, 22f\n"
- "ldr d11, [x20], #0x8\n"
+ "ldr d11, [x19], #0x8\n"
"tbz %x[n_channels], #1, 21f\n"
- "ld1 { v11.s }[2], [x20], #0x4\n"
+ "ld1 { v11.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 24f\n"
- "ld1 { v11.h }[6], [x20]\n"
+ "ld1 { v11.h }[6], [x19]\n"
"b 24f\n"
"21:" // Tile loop: Oddments: Load inputs: (2, 1): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 24f\n"
- "ld1 { v11.h }[4], [x20]\n"
+ "ld1 { v11.h }[4], [x19]\n"
"b 24f\n"
"22:" // Tile loop: Oddments: Load inputs: (2, 1): Bit 2: Unset
"tbz %x[n_channels], #1, 23f\n"
- "ldr s11, [x20], #0x4\n"
+ "ldr s11, [x19], #0x4\n"
"tbz %x[n_channels], #0, 24f\n"
- "ld1 { v11.h }[2], [x20]\n"
+ "ld1 { v11.h }[2], [x19]\n"
"b 24f\n"
"23:" // Tile loop: Oddments: Load inputs: (2, 1): Bit 2: Unset: Bit 1: Unset
- "ldr h11, [x20, #0x0]\n"
+ "ldr h11, [x19, #0x0]\n"
"24:" // Tile loop: Oddments: Load inputs: (2, 1): Bit 2: End
- "ldr q1, [x8, #0x0]\n"
+ "ldr q1, [x17, #0x0]\n"
"fmla v31.8h, v0.8h, v11.8h\n"
"fmla v28.8h, v1.8h, v8.8h\n"
- "add x20, x17, x6\n"
+ "add x19, x16, x7\n"
"fmla v29.8h, v1.8h, v13.8h\n"
"fmla v30.8h, v1.8h, v11.8h\n"
- "add x8, x8, #0x10\n"
+ "add x17, x17, #0x10\n"
"tbz %x[n_channels], #2, 26f\n"
- "ldr d12, [x20], #0x8\n"
+ "ldr d12, [x19], #0x8\n"
"tbz %x[n_channels], #1, 25f\n"
- "ld1 { v12.s }[2], [x20], #0x4\n"
+ "ld1 { v12.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 28f\n"
- "ld1 { v12.h }[6], [x20]\n"
+ "ld1 { v12.h }[6], [x19]\n"
"b 28f\n"
"25:" // Tile loop: Oddments: Load inputs: (2, 2): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 28f\n"
- "ld1 { v12.h }[4], [x20]\n"
+ "ld1 { v12.h }[4], [x19]\n"
"b 28f\n"
"26:" // Tile loop: Oddments: Load inputs: (2, 2): Bit 2: Unset
"tbz %x[n_channels], #1, 27f\n"
- "ldr s12, [x20], #0x4\n"
+ "ldr s12, [x19], #0x4\n"
"tbz %x[n_channels], #0, 28f\n"
- "ld1 { v12.h }[2], [x20]\n"
+ "ld1 { v12.h }[2], [x19]\n"
"b 28f\n"
"27:" // Tile loop: Oddments: Load inputs: (2, 2): Bit 2: Unset: Bit 1: Unset
- "ldr h12, [x20, #0x0]\n"
+ "ldr h12, [x19, #0x0]\n"
"28:" // Tile loop: Oddments: Load inputs: (2, 2): Bit 2: End
- "ldr q2, [x8, #0x0]\n"
+ "ldr q2, [x17, #0x0]\n"
"fmla v31.8h, v1.8h, v12.8h\n"
"fmla v28.8h, v2.8h, v13.8h\n"
- "add x20, x17, x15\n"
+ "add x19, x16, x14\n"
"fmla v29.8h, v2.8h, v5.8h\n"
"fmla v30.8h, v2.8h, v12.8h\n"
- "add x8, x8, #0x10\n"
+ "add x17, x17, #0x10\n"
"tbz %x[n_channels], #2, 30f\n"
- "ldr d9, [x20], #0x8\n"
+ "ldr d9, [x19], #0x8\n"
"tbz %x[n_channels], #1, 29f\n"
- "ld1 { v9.s }[2], [x20], #0x4\n"
+ "ld1 { v9.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 32f\n"
- "ld1 { v9.h }[6], [x20]\n"
+ "ld1 { v9.h }[6], [x19]\n"
"b 32f\n"
"29:" // Tile loop: Oddments: Load inputs: (2, 3): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 32f\n"
- "ld1 { v9.h }[4], [x20]\n"
+ "ld1 { v9.h }[4], [x19]\n"
"b 32f\n"
"30:" // Tile loop: Oddments: Load inputs: (2, 3): Bit 2: Unset
"tbz %x[n_channels], #1, 31f\n"
- "ldr s9, [x20], #0x4\n"
+ "ldr s9, [x19], #0x4\n"
"tbz %x[n_channels], #0, 32f\n"
- "ld1 { v9.h }[2], [x20]\n"
+ "ld1 { v9.h }[2], [x19]\n"
"b 32f\n"
"31:" // Tile loop: Oddments: Load inputs: (2, 3): Bit 2: Unset: Bit 1: Unset
- "ldr h9, [x20, #0x0]\n"
+ "ldr h9, [x19, #0x0]\n"
"32:" // Tile loop: Oddments: Load inputs: (2, 3): Bit 2: End
- "ldr q3, [x8, #0x0]\n"
+ "ldr q3, [x17, #0x0]\n"
"fmla v31.8h, v2.8h, v9.8h\n"
"fmla v28.8h, v3.8h, v5.8h\n"
- "add x20, x17, x13\n"
+ "add x19, x16, x12\n"
"fmla v29.8h, v3.8h, v6.8h\n"
"fmla v30.8h, v3.8h, v9.8h\n"
- "add x8, x8, #0x10\n"
+ "add x17, x17, #0x10\n"
"tbz %x[n_channels], #2, 34f\n"
- "ldr d13, [x20], #0x8\n"
+ "ldr d13, [x19], #0x8\n"
"tbz %x[n_channels], #1, 33f\n"
- "ld1 { v13.s }[2], [x20], #0x4\n"
+ "ld1 { v13.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 36f\n"
- "ld1 { v13.h }[6], [x20]\n"
+ "ld1 { v13.h }[6], [x19]\n"
"b 36f\n"
"33:" // Tile loop: Oddments: Load inputs: (2, 4): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 36f\n"
- "ld1 { v13.h }[4], [x20]\n"
+ "ld1 { v13.h }[4], [x19]\n"
"b 36f\n"
"34:" // Tile loop: Oddments: Load inputs: (2, 4): Bit 2: Unset
"tbz %x[n_channels], #1, 35f\n"
- "ldr s13, [x20], #0x4\n"
+ "ldr s13, [x19], #0x4\n"
"tbz %x[n_channels], #0, 36f\n"
- "ld1 { v13.h }[2], [x20]\n"
+ "ld1 { v13.h }[2], [x19]\n"
"b 36f\n"
"35:" // Tile loop: Oddments: Load inputs: (2, 4): Bit 2: Unset: Bit 1: Unset
- "ldr h13, [x20, #0x0]\n"
+ "ldr h13, [x19, #0x0]\n"
"36:" // Tile loop: Oddments: Load inputs: (2, 4): Bit 2: End
- "ldr q4, [x8, #0x0]\n"
+ "ldr q4, [x17, #0x0]\n"
"fmla v31.8h, v3.8h, v13.8h\n"
"fmla v28.8h, v4.8h, v6.8h\n"
- "add x20, x17, x11\n"
+ "add x19, x16, x10\n"
"fmla v29.8h, v4.8h, v10.8h\n"
"fmla v30.8h, v4.8h, v13.8h\n"
- "add x8, x8, #0x10\n"
+ "add x17, x17, #0x10\n"
"tbz %x[n_channels], #2, 38f\n"
- "ldr d8, [x20], #0x8\n"
+ "ldr d8, [x19], #0x8\n"
"tbz %x[n_channels], #1, 37f\n"
- "ld1 { v8.s }[2], [x20], #0x4\n"
+ "ld1 { v8.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 40f\n"
- "ld1 { v8.h }[6], [x20]\n"
+ "ld1 { v8.h }[6], [x19]\n"
"b 40f\n"
"37:" // Tile loop: Oddments: Load inputs: (2, 5): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 40f\n"
- "ld1 { v8.h }[4], [x20]\n"
+ "ld1 { v8.h }[4], [x19]\n"
"b 40f\n"
"38:" // Tile loop: Oddments: Load inputs: (2, 5): Bit 2: Unset
"tbz %x[n_channels], #1, 39f\n"
- "ldr s8, [x20], #0x4\n"
+ "ldr s8, [x19], #0x4\n"
"tbz %x[n_channels], #0, 40f\n"
- "ld1 { v8.h }[2], [x20]\n"
+ "ld1 { v8.h }[2], [x19]\n"
"b 40f\n"
"39:" // Tile loop: Oddments: Load inputs: (2, 5): Bit 2: Unset: Bit 1: Unset
- "ldr h8, [x20, #0x0]\n"
+ "ldr h8, [x19, #0x0]\n"
"40:" // Tile loop: Oddments: Load inputs: (2, 5): Bit 2: End
- "ldr q0, [x8, #0x0]\n"
+ "ldr q0, [x17, #0x0]\n"
"fmla v31.8h, v4.8h, v8.8h\n"
"fmla v28.8h, v0.8h, v14.8h\n"
- "add x20, x16, XZR\n"
+ "add x19, x15, XZR\n"
"fmla v29.8h, v0.8h, v11.8h\n"
- "add x8, x8, #0x10\n"
+ "add x17, x17, #0x10\n"
"tbz %x[n_channels], #2, 42f\n"
- "ldr d5, [x20], #0x8\n"
+ "ldr d5, [x19], #0x8\n"
"tbz %x[n_channels], #1, 41f\n"
- "ld1 { v5.s }[2], [x20], #0x4\n"
+ "ld1 { v5.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 44f\n"
- "ld1 { v5.h }[6], [x20]\n"
+ "ld1 { v5.h }[6], [x19]\n"
"b 44f\n"
"41:" // Tile loop: Oddments: Load inputs: (3, 0): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 44f\n"
- "ld1 { v5.h }[4], [x20]\n"
+ "ld1 { v5.h }[4], [x19]\n"
"b 44f\n"
"42:" // Tile loop: Oddments: Load inputs: (3, 0): Bit 2: Unset
"tbz %x[n_channels], #1, 43f\n"
- "ldr s5, [x20], #0x4\n"
+ "ldr s5, [x19], #0x4\n"
"tbz %x[n_channels], #0, 44f\n"
- "ld1 { v5.h }[2], [x20]\n"
+ "ld1 { v5.h }[2], [x19]\n"
"b 44f\n"
"43:" // Tile loop: Oddments: Load inputs: (3, 0): Bit 2: Unset: Bit 1: Unset
- "ldr h5, [x20, #0x0]\n"
+ "ldr h5, [x19, #0x0]\n"
"44:" // Tile loop: Oddments: Load inputs: (3, 0): Bit 2: End
"fmla v30.8h, v0.8h, v5.8h\n"
- "add x20, x16, x2\n"
+ "add x19, x15, x3\n"
"tbz %x[n_channels], #2, 46f\n"
- "ldr d6, [x20], #0x8\n"
+ "ldr d6, [x19], #0x8\n"
"tbz %x[n_channels], #1, 45f\n"
- "ld1 { v6.s }[2], [x20], #0x4\n"
+ "ld1 { v6.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 48f\n"
- "ld1 { v6.h }[6], [x20]\n"
+ "ld1 { v6.h }[6], [x19]\n"
"b 48f\n"
"45:" // Tile loop: Oddments: Load inputs: (3, 1): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 48f\n"
- "ld1 { v6.h }[4], [x20]\n"
+ "ld1 { v6.h }[4], [x19]\n"
"b 48f\n"
"46:" // Tile loop: Oddments: Load inputs: (3, 1): Bit 2: Unset
"tbz %x[n_channels], #1, 47f\n"
- "ldr s6, [x20], #0x4\n"
+ "ldr s6, [x19], #0x4\n"
"tbz %x[n_channels], #0, 48f\n"
- "ld1 { v6.h }[2], [x20]\n"
+ "ld1 { v6.h }[2], [x19]\n"
"b 48f\n"
"47:" // Tile loop: Oddments: Load inputs: (3, 1): Bit 2: Unset: Bit 1: Unset
- "ldr h6, [x20, #0x0]\n"
+ "ldr h6, [x19, #0x0]\n"
"48:" // Tile loop: Oddments: Load inputs: (3, 1): Bit 2: End
- "ldr q1, [x8, #0x0]\n"
+ "ldr q1, [x17, #0x0]\n"
"fmla v31.8h, v0.8h, v6.8h\n"
"fmla v28.8h, v1.8h, v11.8h\n"
- "add x20, x16, x6\n"
+ "add x19, x15, x7\n"
"fmla v29.8h, v1.8h, v12.8h\n"
"fmla v30.8h, v1.8h, v6.8h\n"
- "add x8, x8, #0x10\n"
+ "add x17, x17, #0x10\n"
"tbz %x[n_channels], #2, 50f\n"
- "ldr d10, [x20], #0x8\n"
+ "ldr d10, [x19], #0x8\n"
"tbz %x[n_channels], #1, 49f\n"
- "ld1 { v10.s }[2], [x20], #0x4\n"
+ "ld1 { v10.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 52f\n"
- "ld1 { v10.h }[6], [x20]\n"
+ "ld1 { v10.h }[6], [x19]\n"
"b 52f\n"
"49:" // Tile loop: Oddments: Load inputs: (3, 2): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 52f\n"
- "ld1 { v10.h }[4], [x20]\n"
+ "ld1 { v10.h }[4], [x19]\n"
"b 52f\n"
"50:" // Tile loop: Oddments: Load inputs: (3, 2): Bit 2: Unset
"tbz %x[n_channels], #1, 51f\n"
- "ldr s10, [x20], #0x4\n"
+ "ldr s10, [x19], #0x4\n"
"tbz %x[n_channels], #0, 52f\n"
- "ld1 { v10.h }[2], [x20]\n"
+ "ld1 { v10.h }[2], [x19]\n"
"b 52f\n"
"51:" // Tile loop: Oddments: Load inputs: (3, 2): Bit 2: Unset: Bit 1: Unset
- "ldr h10, [x20, #0x0]\n"
+ "ldr h10, [x19, #0x0]\n"
"52:" // Tile loop: Oddments: Load inputs: (3, 2): Bit 2: End
- "ldr q2, [x8, #0x0]\n"
+ "ldr q2, [x17, #0x0]\n"
"fmla v31.8h, v1.8h, v10.8h\n"
"fmla v28.8h, v2.8h, v12.8h\n"
- "add x20, x16, x15\n"
+ "add x19, x15, x14\n"
"fmla v29.8h, v2.8h, v9.8h\n"
"fmla v30.8h, v2.8h, v10.8h\n"
- "add x8, x8, #0x10\n"
+ "add x17, x17, #0x10\n"
"tbz %x[n_channels], #2, 54f\n"
- "ldr d11, [x20], #0x8\n"
+ "ldr d11, [x19], #0x8\n"
"tbz %x[n_channels], #1, 53f\n"
- "ld1 { v11.s }[2], [x20], #0x4\n"
+ "ld1 { v11.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 56f\n"
- "ld1 { v11.h }[6], [x20]\n"
+ "ld1 { v11.h }[6], [x19]\n"
"b 56f\n"
"53:" // Tile loop: Oddments: Load inputs: (3, 3): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 56f\n"
- "ld1 { v11.h }[4], [x20]\n"
+ "ld1 { v11.h }[4], [x19]\n"
"b 56f\n"
"54:" // Tile loop: Oddments: Load inputs: (3, 3): Bit 2: Unset
"tbz %x[n_channels], #1, 55f\n"
- "ldr s11, [x20], #0x4\n"
+ "ldr s11, [x19], #0x4\n"
"tbz %x[n_channels], #0, 56f\n"
- "ld1 { v11.h }[2], [x20]\n"
+ "ld1 { v11.h }[2], [x19]\n"
"b 56f\n"
"55:" // Tile loop: Oddments: Load inputs: (3, 3): Bit 2: Unset: Bit 1: Unset
- "ldr h11, [x20, #0x0]\n"
+ "ldr h11, [x19, #0x0]\n"
"56:" // Tile loop: Oddments: Load inputs: (3, 3): Bit 2: End
- "ldr q3, [x8, #0x0]\n"
+ "ldr q3, [x17, #0x0]\n"
"fmla v31.8h, v2.8h, v11.8h\n"
"fmla v28.8h, v3.8h, v9.8h\n"
- "add x20, x16, x13\n"
+ "add x19, x15, x12\n"
"fmla v29.8h, v3.8h, v13.8h\n"
"fmla v30.8h, v3.8h, v11.8h\n"
- "add x8, x8, #0x10\n"
+ "add x17, x17, #0x10\n"
"tbz %x[n_channels], #2, 58f\n"
- "ldr d12, [x20], #0x8\n"
+ "ldr d12, [x19], #0x8\n"
"tbz %x[n_channels], #1, 57f\n"
- "ld1 { v12.s }[2], [x20], #0x4\n"
+ "ld1 { v12.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 60f\n"
- "ld1 { v12.h }[6], [x20]\n"
+ "ld1 { v12.h }[6], [x19]\n"
"b 60f\n"
"57:" // Tile loop: Oddments: Load inputs: (3, 4): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 60f\n"
- "ld1 { v12.h }[4], [x20]\n"
+ "ld1 { v12.h }[4], [x19]\n"
"b 60f\n"
"58:" // Tile loop: Oddments: Load inputs: (3, 4): Bit 2: Unset
"tbz %x[n_channels], #1, 59f\n"
- "ldr s12, [x20], #0x4\n"
+ "ldr s12, [x19], #0x4\n"
"tbz %x[n_channels], #0, 60f\n"
- "ld1 { v12.h }[2], [x20]\n"
+ "ld1 { v12.h }[2], [x19]\n"
"b 60f\n"
"59:" // Tile loop: Oddments: Load inputs: (3, 4): Bit 2: Unset: Bit 1: Unset
- "ldr h12, [x20, #0x0]\n"
+ "ldr h12, [x19, #0x0]\n"
"60:" // Tile loop: Oddments: Load inputs: (3, 4): Bit 2: End
- "ldr q4, [x8, #0x0]\n"
+ "ldr q4, [x17, #0x0]\n"
"fmla v31.8h, v3.8h, v12.8h\n"
"fmla v28.8h, v4.8h, v13.8h\n"
- "add x20, x16, x11\n"
+ "add x19, x15, x10\n"
"fmla v29.8h, v4.8h, v8.8h\n"
"fmla v30.8h, v4.8h, v12.8h\n"
- "add x8, x8, #0x10\n"
+ "add x17, x17, #0x10\n"
"tbz %x[n_channels], #2, 62f\n"
- "ldr d14, [x20], #0x8\n"
+ "ldr d14, [x19], #0x8\n"
"tbz %x[n_channels], #1, 61f\n"
- "ld1 { v14.s }[2], [x20], #0x4\n"
+ "ld1 { v14.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 64f\n"
- "ld1 { v14.h }[6], [x20]\n"
+ "ld1 { v14.h }[6], [x19]\n"
"b 64f\n"
"61:" // Tile loop: Oddments: Load inputs: (3, 5): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 64f\n"
- "ld1 { v14.h }[4], [x20]\n"
+ "ld1 { v14.h }[4], [x19]\n"
"b 64f\n"
"62:" // Tile loop: Oddments: Load inputs: (3, 5): Bit 2: Unset
"tbz %x[n_channels], #1, 63f\n"
- "ldr s14, [x20], #0x4\n"
+ "ldr s14, [x19], #0x4\n"
"tbz %x[n_channels], #0, 64f\n"
- "ld1 { v14.h }[2], [x20]\n"
+ "ld1 { v14.h }[2], [x19]\n"
"b 64f\n"
"63:" // Tile loop: Oddments: Load inputs: (3, 5): Bit 2: Unset: Bit 1: Unset
- "ldr h14, [x20, #0x0]\n"
+ "ldr h14, [x19, #0x0]\n"
"64:" // Tile loop: Oddments: Load inputs: (3, 5): Bit 2: End
- "ldr q0, [x8, #0x0]\n"
+ "ldr q0, [x17, #0x0]\n"
"fmla v31.8h, v4.8h, v14.8h\n"
"fmla v28.8h, v0.8h, v5.8h\n"
- "add x20, x14, XZR\n"
+ "add x19, x13, XZR\n"
"fmla v29.8h, v0.8h, v6.8h\n"
- "add x8, x8, #0x10\n"
+ "add x17, x17, #0x10\n"
"tbz %x[n_channels], #2, 66f\n"
- "ldr d9, [x20], #0x8\n"
+ "ldr d9, [x19], #0x8\n"
"tbz %x[n_channels], #1, 65f\n"
- "ld1 { v9.s }[2], [x20], #0x4\n"
+ "ld1 { v9.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 68f\n"
- "ld1 { v9.h }[6], [x20]\n"
+ "ld1 { v9.h }[6], [x19]\n"
"b 68f\n"
"65:" // Tile loop: Oddments: Load inputs: (4, 0): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 68f\n"
- "ld1 { v9.h }[4], [x20]\n"
+ "ld1 { v9.h }[4], [x19]\n"
"b 68f\n"
"66:" // Tile loop: Oddments: Load inputs: (4, 0): Bit 2: Unset
"tbz %x[n_channels], #1, 67f\n"
- "ldr s9, [x20], #0x4\n"
+ "ldr s9, [x19], #0x4\n"
"tbz %x[n_channels], #0, 68f\n"
- "ld1 { v9.h }[2], [x20]\n"
+ "ld1 { v9.h }[2], [x19]\n"
"b 68f\n"
"67:" // Tile loop: Oddments: Load inputs: (4, 0): Bit 2: Unset: Bit 1: Unset
- "ldr h9, [x20, #0x0]\n"
+ "ldr h9, [x19, #0x0]\n"
"68:" // Tile loop: Oddments: Load inputs: (4, 0): Bit 2: End
"fmla v30.8h, v0.8h, v9.8h\n"
- "add x20, x14, x2\n"
+ "add x19, x13, x3\n"
"tbz %x[n_channels], #2, 70f\n"
- "ldr d13, [x20], #0x8\n"
+ "ldr d13, [x19], #0x8\n"
"tbz %x[n_channels], #1, 69f\n"
- "ld1 { v13.s }[2], [x20], #0x4\n"
+ "ld1 { v13.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 72f\n"
- "ld1 { v13.h }[6], [x20]\n"
+ "ld1 { v13.h }[6], [x19]\n"
"b 72f\n"
"69:" // Tile loop: Oddments: Load inputs: (4, 1): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 72f\n"
- "ld1 { v13.h }[4], [x20]\n"
+ "ld1 { v13.h }[4], [x19]\n"
"b 72f\n"
"70:" // Tile loop: Oddments: Load inputs: (4, 1): Bit 2: Unset
"tbz %x[n_channels], #1, 71f\n"
- "ldr s13, [x20], #0x4\n"
+ "ldr s13, [x19], #0x4\n"
"tbz %x[n_channels], #0, 72f\n"
- "ld1 { v13.h }[2], [x20]\n"
+ "ld1 { v13.h }[2], [x19]\n"
"b 72f\n"
"71:" // Tile loop: Oddments: Load inputs: (4, 1): Bit 2: Unset: Bit 1: Unset
- "ldr h13, [x20, #0x0]\n"
+ "ldr h13, [x19, #0x0]\n"
"72:" // Tile loop: Oddments: Load inputs: (4, 1): Bit 2: End
- "ldr q1, [x8, #0x0]\n"
+ "ldr q1, [x17, #0x0]\n"
"fmla v31.8h, v0.8h, v13.8h\n"
"fmla v28.8h, v1.8h, v6.8h\n"
- "add x20, x14, x6\n"
+ "add x19, x13, x7\n"
"fmla v29.8h, v1.8h, v10.8h\n"
"fmla v30.8h, v1.8h, v13.8h\n"
- "add x8, x8, #0x10\n"
+ "add x17, x17, #0x10\n"
"tbz %x[n_channels], #2, 74f\n"
- "ldr d5, [x20], #0x8\n"
+ "ldr d5, [x19], #0x8\n"
"tbz %x[n_channels], #1, 73f\n"
- "ld1 { v5.s }[2], [x20], #0x4\n"
+ "ld1 { v5.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 76f\n"
- "ld1 { v5.h }[6], [x20]\n"
+ "ld1 { v5.h }[6], [x19]\n"
"b 76f\n"
"73:" // Tile loop: Oddments: Load inputs: (4, 2): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 76f\n"
- "ld1 { v5.h }[4], [x20]\n"
+ "ld1 { v5.h }[4], [x19]\n"
"b 76f\n"
"74:" // Tile loop: Oddments: Load inputs: (4, 2): Bit 2: Unset
"tbz %x[n_channels], #1, 75f\n"
- "ldr s5, [x20], #0x4\n"
+ "ldr s5, [x19], #0x4\n"
"tbz %x[n_channels], #0, 76f\n"
- "ld1 { v5.h }[2], [x20]\n"
+ "ld1 { v5.h }[2], [x19]\n"
"b 76f\n"
"75:" // Tile loop: Oddments: Load inputs: (4, 2): Bit 2: Unset: Bit 1: Unset
- "ldr h5, [x20, #0x0]\n"
+ "ldr h5, [x19, #0x0]\n"
"76:" // Tile loop: Oddments: Load inputs: (4, 2): Bit 2: End
- "ldr q2, [x8, #0x0]\n"
+ "ldr q2, [x17, #0x0]\n"
"fmla v31.8h, v1.8h, v5.8h\n"
"fmla v28.8h, v2.8h, v10.8h\n"
- "add x20, x14, x15\n"
+ "add x19, x13, x14\n"
"fmla v29.8h, v2.8h, v11.8h\n"
"fmla v30.8h, v2.8h, v5.8h\n"
- "add x8, x8, #0x10\n"
+ "add x17, x17, #0x10\n"
"tbz %x[n_channels], #2, 78f\n"
- "ldr d6, [x20], #0x8\n"
+ "ldr d6, [x19], #0x8\n"
"tbz %x[n_channels], #1, 77f\n"
- "ld1 { v6.s }[2], [x20], #0x4\n"
+ "ld1 { v6.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 80f\n"
- "ld1 { v6.h }[6], [x20]\n"
+ "ld1 { v6.h }[6], [x19]\n"
"b 80f\n"
"77:" // Tile loop: Oddments: Load inputs: (4, 3): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 80f\n"
- "ld1 { v6.h }[4], [x20]\n"
+ "ld1 { v6.h }[4], [x19]\n"
"b 80f\n"
"78:" // Tile loop: Oddments: Load inputs: (4, 3): Bit 2: Unset
"tbz %x[n_channels], #1, 79f\n"
- "ldr s6, [x20], #0x4\n"
+ "ldr s6, [x19], #0x4\n"
"tbz %x[n_channels], #0, 80f\n"
- "ld1 { v6.h }[2], [x20]\n"
+ "ld1 { v6.h }[2], [x19]\n"
"b 80f\n"
"79:" // Tile loop: Oddments: Load inputs: (4, 3): Bit 2: Unset: Bit 1: Unset
- "ldr h6, [x20, #0x0]\n"
+ "ldr h6, [x19, #0x0]\n"
"80:" // Tile loop: Oddments: Load inputs: (4, 3): Bit 2: End
- "ldr q3, [x8, #0x0]\n"
+ "ldr q3, [x17, #0x0]\n"
"fmla v31.8h, v2.8h, v6.8h\n"
"fmla v28.8h, v3.8h, v11.8h\n"
- "add x20, x14, x13\n"
+ "add x19, x13, x12\n"
"fmla v29.8h, v3.8h, v12.8h\n"
"fmla v30.8h, v3.8h, v6.8h\n"
- "add x8, x8, #0x10\n"
+ "add x17, x17, #0x10\n"
"tbz %x[n_channels], #2, 82f\n"
- "ldr d8, [x20], #0x8\n"
+ "ldr d8, [x19], #0x8\n"
"tbz %x[n_channels], #1, 81f\n"
- "ld1 { v8.s }[2], [x20], #0x4\n"
+ "ld1 { v8.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 84f\n"
- "ld1 { v8.h }[6], [x20]\n"
+ "ld1 { v8.h }[6], [x19]\n"
"b 84f\n"
"81:" // Tile loop: Oddments: Load inputs: (4, 4): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 84f\n"
- "ld1 { v8.h }[4], [x20]\n"
+ "ld1 { v8.h }[4], [x19]\n"
"b 84f\n"
"82:" // Tile loop: Oddments: Load inputs: (4, 4): Bit 2: Unset
"tbz %x[n_channels], #1, 83f\n"
- "ldr s8, [x20], #0x4\n"
+ "ldr s8, [x19], #0x4\n"
"tbz %x[n_channels], #0, 84f\n"
- "ld1 { v8.h }[2], [x20]\n"
+ "ld1 { v8.h }[2], [x19]\n"
"b 84f\n"
"83:" // Tile loop: Oddments: Load inputs: (4, 4): Bit 2: Unset: Bit 1: Unset
- "ldr h8, [x20, #0x0]\n"
+ "ldr h8, [x19, #0x0]\n"
"84:" // Tile loop: Oddments: Load inputs: (4, 4): Bit 2: End
- "ldr q4, [x8, #0x0]\n"
+ "ldr q4, [x17, #0x0]\n"
"fmla v31.8h, v3.8h, v8.8h\n"
"fmla v28.8h, v4.8h, v12.8h\n"
- "add x20, x14, x11\n"
+ "add x19, x13, x10\n"
"fmla v29.8h, v4.8h, v14.8h\n"
"fmla v30.8h, v4.8h, v8.8h\n"
- "add x8, x8, #0x10\n"
+ "add x17, x17, #0x10\n"
"tbz %x[n_channels], #2, 86f\n"
- "ldr d10, [x20], #0x8\n"
+ "ldr d10, [x19], #0x8\n"
"tbz %x[n_channels], #1, 85f\n"
- "ld1 { v10.s }[2], [x20], #0x4\n"
+ "ld1 { v10.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 88f\n"
- "ld1 { v10.h }[6], [x20]\n"
+ "ld1 { v10.h }[6], [x19]\n"
"b 88f\n"
"85:" // Tile loop: Oddments: Load inputs: (4, 5): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 88f\n"
- "ld1 { v10.h }[4], [x20]\n"
+ "ld1 { v10.h }[4], [x19]\n"
"b 88f\n"
"86:" // Tile loop: Oddments: Load inputs: (4, 5): Bit 2: Unset
"tbz %x[n_channels], #1, 87f\n"
- "ldr s10, [x20], #0x4\n"
+ "ldr s10, [x19], #0x4\n"
"tbz %x[n_channels], #0, 88f\n"
- "ld1 { v10.h }[2], [x20]\n"
+ "ld1 { v10.h }[2], [x19]\n"
"b 88f\n"
"87:" // Tile loop: Oddments: Load inputs: (4, 5): Bit 2: Unset: Bit 1: Unset
- "ldr h10, [x20, #0x0]\n"
+ "ldr h10, [x19, #0x0]\n"
"88:" // Tile loop: Oddments: Load inputs: (4, 5): Bit 2: End
- "ldr q0, [x8, #0x0]\n"
+ "ldr q0, [x17, #0x0]\n"
"fmla v31.8h, v4.8h, v10.8h\n"
"fmla v28.8h, v0.8h, v9.8h\n"
- "add x20, x12, XZR\n"
+ "add x19, x11, XZR\n"
"fmla v29.8h, v0.8h, v13.8h\n"
- "add x8, x8, #0x10\n"
+ "add x17, x17, #0x10\n"
"tbz %x[n_channels], #2, 90f\n"
- "ldr d11, [x20], #0x8\n"
+ "ldr d11, [x19], #0x8\n"
"tbz %x[n_channels], #1, 89f\n"
- "ld1 { v11.s }[2], [x20], #0x4\n"
+ "ld1 { v11.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 92f\n"
- "ld1 { v11.h }[6], [x20]\n"
+ "ld1 { v11.h }[6], [x19]\n"
"b 92f\n"
"89:" // Tile loop: Oddments: Load inputs: (5, 0): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 92f\n"
- "ld1 { v11.h }[4], [x20]\n"
+ "ld1 { v11.h }[4], [x19]\n"
"b 92f\n"
"90:" // Tile loop: Oddments: Load inputs: (5, 0): Bit 2: Unset
"tbz %x[n_channels], #1, 91f\n"
- "ldr s11, [x20], #0x4\n"
+ "ldr s11, [x19], #0x4\n"
"tbz %x[n_channels], #0, 92f\n"
- "ld1 { v11.h }[2], [x20]\n"
+ "ld1 { v11.h }[2], [x19]\n"
"b 92f\n"
"91:" // Tile loop: Oddments: Load inputs: (5, 0): Bit 2: Unset: Bit 1: Unset
- "ldr h11, [x20, #0x0]\n"
+ "ldr h11, [x19, #0x0]\n"
"92:" // Tile loop: Oddments: Load inputs: (5, 0): Bit 2: End
"fmla v30.8h, v0.8h, v11.8h\n"
- "add x20, x12, x2\n"
+ "add x19, x11, x3\n"
"tbz %x[n_channels], #2, 94f\n"
- "ldr d12, [x20], #0x8\n"
+ "ldr d12, [x19], #0x8\n"
"tbz %x[n_channels], #1, 93f\n"
- "ld1 { v12.s }[2], [x20], #0x4\n"
+ "ld1 { v12.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 96f\n"
- "ld1 { v12.h }[6], [x20]\n"
+ "ld1 { v12.h }[6], [x19]\n"
"b 96f\n"
"93:" // Tile loop: Oddments: Load inputs: (5, 1): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 96f\n"
- "ld1 { v12.h }[4], [x20]\n"
+ "ld1 { v12.h }[4], [x19]\n"
"b 96f\n"
"94:" // Tile loop: Oddments: Load inputs: (5, 1): Bit 2: Unset
"tbz %x[n_channels], #1, 95f\n"
- "ldr s12, [x20], #0x4\n"
+ "ldr s12, [x19], #0x4\n"
"tbz %x[n_channels], #0, 96f\n"
- "ld1 { v12.h }[2], [x20]\n"
+ "ld1 { v12.h }[2], [x19]\n"
"b 96f\n"
"95:" // Tile loop: Oddments: Load inputs: (5, 1): Bit 2: Unset: Bit 1: Unset
- "ldr h12, [x20, #0x0]\n"
+ "ldr h12, [x19, #0x0]\n"
"96:" // Tile loop: Oddments: Load inputs: (5, 1): Bit 2: End
- "ldr q1, [x8, #0x0]\n"
+ "ldr q1, [x17, #0x0]\n"
"fmla v31.8h, v0.8h, v12.8h\n"
"fmla v28.8h, v1.8h, v13.8h\n"
- "add x20, x12, x6\n"
+ "add x19, x11, x7\n"
"fmla v29.8h, v1.8h, v5.8h\n"
"fmla v30.8h, v1.8h, v12.8h\n"
- "add x8, x8, #0x10\n"
+ "add x17, x17, #0x10\n"
"tbz %x[n_channels], #2, 98f\n"
- "ldr d9, [x20], #0x8\n"
+ "ldr d9, [x19], #0x8\n"
"tbz %x[n_channels], #1, 97f\n"
- "ld1 { v9.s }[2], [x20], #0x4\n"
+ "ld1 { v9.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 100f\n"
- "ld1 { v9.h }[6], [x20]\n"
+ "ld1 { v9.h }[6], [x19]\n"
"b 100f\n"
"97:" // Tile loop: Oddments: Load inputs: (5, 2): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 100f\n"
- "ld1 { v9.h }[4], [x20]\n"
+ "ld1 { v9.h }[4], [x19]\n"
"b 100f\n"
"98:" // Tile loop: Oddments: Load inputs: (5, 2): Bit 2: Unset
"tbz %x[n_channels], #1, 99f\n"
- "ldr s9, [x20], #0x4\n"
+ "ldr s9, [x19], #0x4\n"
"tbz %x[n_channels], #0, 100f\n"
- "ld1 { v9.h }[2], [x20]\n"
+ "ld1 { v9.h }[2], [x19]\n"
"b 100f\n"
"99:" // Tile loop: Oddments: Load inputs: (5, 2): Bit 2: Unset: Bit 1: Unset
- "ldr h9, [x20, #0x0]\n"
+ "ldr h9, [x19, #0x0]\n"
"100:" // Tile loop: Oddments: Load inputs: (5, 2): Bit 2: End
- "ldr q2, [x8, #0x0]\n"
+ "ldr q2, [x17, #0x0]\n"
"fmla v31.8h, v1.8h, v9.8h\n"
"fmla v28.8h, v2.8h, v5.8h\n"
- "add x20, x12, x15\n"
+ "add x19, x11, x14\n"
"fmla v29.8h, v2.8h, v6.8h\n"
"fmla v30.8h, v2.8h, v9.8h\n"
- "add x8, x8, #0x10\n"
+ "add x17, x17, #0x10\n"
"tbz %x[n_channels], #2, 102f\n"
- "ldr d11, [x20], #0x8\n"
+ "ldr d11, [x19], #0x8\n"
"tbz %x[n_channels], #1, 101f\n"
- "ld1 { v11.s }[2], [x20], #0x4\n"
+ "ld1 { v11.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 104f\n"
- "ld1 { v11.h }[6], [x20]\n"
+ "ld1 { v11.h }[6], [x19]\n"
"b 104f\n"
"101:" // Tile loop: Oddments: Load inputs: (5, 3): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 104f\n"
- "ld1 { v11.h }[4], [x20]\n"
+ "ld1 { v11.h }[4], [x19]\n"
"b 104f\n"
"102:" // Tile loop: Oddments: Load inputs: (5, 3): Bit 2: Unset
"tbz %x[n_channels], #1, 103f\n"
- "ldr s11, [x20], #0x4\n"
+ "ldr s11, [x19], #0x4\n"
"tbz %x[n_channels], #0, 104f\n"
- "ld1 { v11.h }[2], [x20]\n"
+ "ld1 { v11.h }[2], [x19]\n"
"b 104f\n"
"103:" // Tile loop: Oddments: Load inputs: (5, 3): Bit 2: Unset: Bit 1: Unset
- "ldr h11, [x20, #0x0]\n"
+ "ldr h11, [x19, #0x0]\n"
"104:" // Tile loop: Oddments: Load inputs: (5, 3): Bit 2: End
- "ldr q3, [x8, #0x0]\n"
+ "ldr q3, [x17, #0x0]\n"
"fmla v31.8h, v2.8h, v11.8h\n"
"fmla v28.8h, v3.8h, v6.8h\n"
- "add x20, x12, x13\n"
+ "add x19, x11, x12\n"
"fmla v29.8h, v3.8h, v8.8h\n"
"fmla v30.8h, v3.8h, v11.8h\n"
- "add x8, x8, #0x10\n"
+ "add x17, x17, #0x10\n"
"tbz %x[n_channels], #2, 106f\n"
- "ldr d12, [x20], #0x8\n"
+ "ldr d12, [x19], #0x8\n"
"tbz %x[n_channels], #1, 105f\n"
- "ld1 { v12.s }[2], [x20], #0x4\n"
+ "ld1 { v12.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 108f\n"
- "ld1 { v12.h }[6], [x20]\n"
+ "ld1 { v12.h }[6], [x19]\n"
"b 108f\n"
"105:" // Tile loop: Oddments: Load inputs: (5, 4): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 108f\n"
- "ld1 { v12.h }[4], [x20]\n"
+ "ld1 { v12.h }[4], [x19]\n"
"b 108f\n"
"106:" // Tile loop: Oddments: Load inputs: (5, 4): Bit 2: Unset
"tbz %x[n_channels], #1, 107f\n"
- "ldr s12, [x20], #0x4\n"
+ "ldr s12, [x19], #0x4\n"
"tbz %x[n_channels], #0, 108f\n"
- "ld1 { v12.h }[2], [x20]\n"
+ "ld1 { v12.h }[2], [x19]\n"
"b 108f\n"
"107:" // Tile loop: Oddments: Load inputs: (5, 4): Bit 2: Unset: Bit 1: Unset
- "ldr h12, [x20, #0x0]\n"
+ "ldr h12, [x19, #0x0]\n"
"108:" // Tile loop: Oddments: Load inputs: (5, 4): Bit 2: End
- "ldr q4, [x8, #0x0]\n"
+ "ldr q4, [x17, #0x0]\n"
"fmla v31.8h, v3.8h, v12.8h\n"
"fmla v28.8h, v4.8h, v8.8h\n"
- "add x20, x12, x11\n"
+ "add x19, x11, x10\n"
"fmla v29.8h, v4.8h, v10.8h\n"
"fmla v30.8h, v4.8h, v12.8h\n"
"tbz %x[n_channels], #2, 110f\n"
- "ldr d9, [x20], #0x8\n"
+ "ldr d9, [x19], #0x8\n"
"tbz %x[n_channels], #1, 109f\n"
- "ld1 { v9.s }[2], [x20], #0x4\n"
+ "ld1 { v9.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 112f\n"
- "ld1 { v9.h }[6], [x20]\n"
+ "ld1 { v9.h }[6], [x19]\n"
"b 112f\n"
"109:" // Tile loop: Oddments: Load inputs: (5, 5): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 112f\n"
- "ld1 { v9.h }[4], [x20]\n"
+ "ld1 { v9.h }[4], [x19]\n"
"b 112f\n"
"110:" // Tile loop: Oddments: Load inputs: (5, 5): Bit 2: Unset
"tbz %x[n_channels], #1, 111f\n"
- "ldr s9, [x20], #0x4\n"
+ "ldr s9, [x19], #0x4\n"
"tbz %x[n_channels], #0, 112f\n"
- "ld1 { v9.h }[2], [x20]\n"
+ "ld1 { v9.h }[2], [x19]\n"
"b 112f\n"
"111:" // Tile loop: Oddments: Load inputs: (5, 5): Bit 2: Unset: Bit 1: Unset
- "ldr h9, [x20, #0x0]\n"
+ "ldr h9, [x19, #0x0]\n"
"112:" // Tile loop: Oddments: Load inputs: (5, 5): Bit 2: End
"fmla v31.8h, v4.8h, v9.8h\n"
"fmax v28.8h, v28.8h, v18.8h\n"
@@ -1303,82 +1303,82 @@ void a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst_direct_impl(
"fmin v30.8h, v30.8h, v17.8h\n"
"fmin v31.8h, v31.8h, v17.8h\n"
"tbz %x[n_channels], #2, 114f\n"
- "mov x21, x5\n"
- "mov x20, x10\n"
- "st1 { v28.d }[0], [x21], x3\n"
- "st1 { v30.d }[0], [x20], x3\n"
- "add x5, x5, #0x8\n"
- "add x10, x10, #0x8\n"
- "st1 { v29.d }[0], [x21]\n"
- "st1 { v31.d }[0], [x20]\n"
+ "mov x20, x6\n"
+ "mov x19, x9\n"
+ "st1 { v28.d }[0], [x20], x4\n"
+ "add x6, x6, #0x8\n"
+ "add x9, x9, #0x8\n"
+ "st1 { v30.d }[0], [x19], x4\n"
+ "st1 { v29.d }[0], [x20]\n"
+ "st1 { v31.d }[0], [x19]\n"
"tbz %x[n_channels], #1, 113f\n"
- "mov x21, x5\n"
- "mov x20, x10\n"
- "st1 { v28.s }[2], [x21], x3\n"
- "st1 { v30.s }[2], [x20], x3\n"
- "add x5, x5, #0x4\n"
- "add x10, x10, #0x4\n"
- "st1 { v29.s }[2], [x21]\n"
- "st1 { v31.s }[2], [x20]\n"
+ "mov x20, x6\n"
+ "mov x19, x9\n"
+ "st1 { v28.s }[2], [x20], x4\n"
+ "add x6, x6, #0x4\n"
+ "add x9, x9, #0x4\n"
+ "st1 { v30.s }[2], [x19], x4\n"
+ "st1 { v29.s }[2], [x20]\n"
+ "st1 { v31.s }[2], [x19]\n"
"tbz %x[n_channels], #0, 116f\n"
- "mov x21, x5\n"
- "mov x20, x10\n"
- "st1 { v28.h }[6], [x21], x3\n"
- "st1 { v30.h }[6], [x20], x3\n"
- "st1 { v29.h }[6], [x21]\n"
- "st1 { v31.h }[6], [x20]\n"
+ "mov x20, x6\n"
+ "mov x19, x9\n"
+ "st1 { v28.h }[6], [x20], x4\n"
+ "st1 { v30.h }[6], [x19], x4\n"
+ "st1 { v29.h }[6], [x20]\n"
+ "st1 { v31.h }[6], [x19]\n"
"b 116f\n"
"113:" // Tile loop: Oddments: Store: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 116f\n"
- "mov x21, x5\n"
- "mov x20, x10\n"
- "st1 { v28.h }[4], [x21], x3\n"
- "st1 { v30.h }[4], [x20], x3\n"
- "st1 { v29.h }[4], [x21]\n"
- "st1 { v31.h }[4], [x20]\n"
+ "mov x20, x6\n"
+ "mov x19, x9\n"
+ "st1 { v28.h }[4], [x20], x4\n"
+ "st1 { v30.h }[4], [x19], x4\n"
+ "st1 { v29.h }[4], [x20]\n"
+ "st1 { v31.h }[4], [x19]\n"
"b 116f\n"
"114:" // Tile loop: Oddments: Store: Bit 2: Unset
"tbz %x[n_channels], #1, 115f\n"
- "mov x21, x5\n"
- "mov x20, x10\n"
- "st1 { v28.s }[0], [x21], x3\n"
- "st1 { v30.s }[0], [x20], x3\n"
- "add x5, x5, #0x4\n"
- "add x10, x10, #0x4\n"
- "st1 { v29.s }[0], [x21]\n"
- "st1 { v31.s }[0], [x20]\n"
+ "mov x20, x6\n"
+ "mov x19, x9\n"
+ "st1 { v28.s }[0], [x20], x4\n"
+ "st1 { v30.s }[0], [x19], x4\n"
+ "add x6, x6, #0x4\n"
+ "add x9, x9, #0x4\n"
+ "st1 { v29.s }[0], [x20]\n"
+ "st1 { v31.s }[0], [x19]\n"
"tbz %x[n_channels], #0, 116f\n"
- "mov x21, x5\n"
- "mov x20, x10\n"
- "st1 { v28.h }[2], [x21], x3\n"
- "st1 { v30.h }[2], [x20], x3\n"
- "st1 { v29.h }[2], [x21]\n"
- "st1 { v31.h }[2], [x20]\n"
+ "mov x20, x6\n"
+ "mov x19, x9\n"
+ "st1 { v28.h }[2], [x20], x4\n"
+ "st1 { v30.h }[2], [x19], x4\n"
+ "st1 { v29.h }[2], [x20]\n"
+ "st1 { v31.h }[2], [x19]\n"
"b 116f\n"
"115:" // Tile loop: Oddments: Store: Bit 2: Unset: Bit 1: Unset
- "mov x21, x5\n"
- "mov x20, x10\n"
- "st1 { v28.h }[0], [x21], x3\n"
- "st1 { v30.h }[0], [x20], x3\n"
- "st1 { v29.h }[0], [x21]\n"
- "st1 { v31.h }[0], [x20]\n"
+ "mov x20, x6\n"
+ "mov x19, x9\n"
+ "st1 { v28.h }[0], [x20], x4\n"
+ "st1 { v30.h }[0], [x19], x4\n"
+ "st1 { v29.h }[0], [x20]\n"
+ "st1 { v31.h }[0], [x19]\n"
"116:" // Tile loop: Oddments: Store: Bit 2: End
"117:" // Tile loop: End
- "ldr x26, [%x[params_struct], %[offsetof_args_tile_j]]\n"
- "ldr x27, [%x[params_struct], %[offsetof_args_tile_i]]\n"
- "add x26, x26, #0x1\n"
- "add x21, x27, #0x1\n"
- "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
- "cmp x26, x20\n"
- "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
- "csel x27, x27, x21, LT\n"
- "csel x26, x26, XZR, LT\n"
- "cmp x27, x20\n"
+ "ldr x25, [%x[params_struct], %[offsetof_args_tile_j]]\n"
+ "ldr x26, [%x[params_struct], %[offsetof_args_tile_i]]\n"
+ "add x25, x25, #0x1\n"
+ "add x20, x26, #0x1\n"
+ "ldr x19, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
+ "cmp x25, x19\n"
+ "ldr x19, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
+ "csel x26, x26, x20, LT\n"
+ "csel x25, x25, XZR, LT\n"
+ "cmp x26, x19\n"
"blt 1b\n"
:
: [n_channels] "r" ((unsigned long) n_channels), [offsetof_args_inptr] "I" (offsetof(Args, inptr)), [offsetof_args_ld_input_col] "I" (offsetof(Args, ld_input_col)), [offsetof_args_ld_input_row] "I" (offsetof(Args, ld_input_row)), [offsetof_args_ld_output_col] "I" (offsetof(Args, ld_output_col)), [offsetof_args_ld_output_row] "I" (offsetof(Args, ld_output_row)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_n_tile_cols] "I" (offsetof(Args, n_tile_cols)), [offsetof_args_n_tile_rows] "I" (offsetof(Args, n_tile_rows)), [offsetof_args_outptr] "I" (offsetof(Args, outptr)), [offsetof_args_params] "I" (offsetof(Args, params)), [offsetof_args_tile_i] "I" (offsetof(Args, tile_i)), [offsetof_args_tile_j] "I" (offsetof(Args, tile_j)), [params_struct] "r" (&params_struct)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v16", "v17", "v18", "v28", "v29", "v30", "v31", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v16", "v17", "v18", "v28", "v29", "v30", "v31", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst/generic_indirect.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst/generic_indirect.cpp
index a2791d277e..5b086ec1ff 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst/generic_indirect.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst/generic_indirect.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -99,422 +99,422 @@ void a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst_indirect_impl(
__asm__ __volatile__(
"ldr x21, [%x[params_struct], %[offsetof_args_outptrs]]\n"
- "mov x17, #0x10\n" // cntb _, ALL, #1
- "lsr x9, %x[n_channels], #0x3\n"
+ "mov x28, #0x10\n" // cntb _, ALL, #1
+ "lsr x27, %x[n_channels], #0x3\n"
"ldr x16, [%x[params_struct], %[offsetof_args_params]]\n"
"add x20, %x[params_struct], %[offsetof_args_min]\n"
+ "add x19, %x[params_struct], %[offsetof_args_max]\n"
+ "ldp x15, x14, [x21, #0x0]\n"
+ "ldp x13, x12, [x21, #0x10]\n"
+ "add x11, %x[params_struct], %[offsetof_Args_inptrs]\n"
"ld1r { v18.8h }, [x20]\n"
- "add x20, %x[params_struct], %[offsetof_args_max]\n"
- "ld1r { v17.8h }, [x20]\n"
- "add x15, %x[params_struct], %[offsetof_Args_inptrs]\n"
- "ldp x14, x13, [x21, #0x0]\n"
- "ldp x12, x11, [x21, #0x10]\n"
+ "ld1r { v17.8h }, [x19]\n"
"mov x10, #0x0\n"
- "sub x28, XZR, x17\n"
- "cbz x9, 3f\n"
+ "sub x9, XZR, x28\n"
+ "cbz x27, 3f\n"
+ "ldp x26, x25, [x11, #0x0]\n"
+ "ldr q5, [x26, x10]\n"
+ "ldr q6, [x25, x10]\n"
+ "ldp x24, x23, [x11, #0x10]\n"
+ "cmp x28, x27, LSL #4\n"
+ "ldp x22, x21, [x11, #0x20]\n"
+ "ldp x20, x19, [x11, #0x30]\n"
+ "ldp x26, x25, [x11, #0x40]\n"
"ldr q16, [x16, #0x0]\n"
"ldr q0, [x16, #0x10]\n"
- "cmp x17, x9, LSL #4\n"
"ldr q1, [x16, #0x20]\n"
"ldr q2, [x16, #0x30]\n"
"ldr q3, [x16, #0x40]\n"
"ldr q4, [x16, #0x50]\n"
+ "ldr q7, [x24, x10]\n"
"add x16, x16, #0x60\n"
- "ldp x27, x26, [x15, #0x0]\n"
- "ldr q5, [x27, x10]\n"
- "ldr q6, [x26, x10]\n"
- "ldp x25, x24, [x15, #0x10]\n"
- "ldr q7, [x25, x10]\n"
- "ldr q8, [x24, x10]\n"
- "ldp x23, x22, [x15, #0x20]\n"
- "ldr q9, [x23, x10]\n"
- "ldr q13, [x22, x10]\n"
- "ldp x21, x20, [x15, #0x30]\n"
- "ldr q11, [x21, x10]\n"
- "ldr q12, [x20, x10]\n"
- "ldp x27, x26, [x15, #0x40]\n"
- "ldr q10, [x27, x10]\n"
- "ldr q14, [x26, x10]\n"
+ "ldr q8, [x23, x10]\n"
+ "ldr q9, [x22, x10]\n"
+ "ldr q13, [x21, x10]\n"
+ "ldr q11, [x20, x10]\n"
+ "ldr q12, [x19, x10]\n"
+ "ldr q10, [x26, x10]\n"
+ "ldr q14, [x25, x10]\n"
"bge 2f\n"
"1:" // Channel loop
"mov v28.16b, v16.16b\n fmla v28.8h, v0.8h, v5.8h\n"
"mov v29.16b, v16.16b\n fmla v29.8h, v0.8h, v6.8h\n"
- "ldr x25, [x15, #0x50]\n"
- "ldr q5, [x25, x10]\n"
+ "ldr x24, [x11, #0x50]\n"
+ "ldr q5, [x24, x10]\n"
"mov v30.16b, v16.16b\n fmla v30.8h, v0.8h, v7.8h\n"
"mov v31.16b, v16.16b\n fmla v31.8h, v0.8h, v8.8h\n"
- "ldr q0, [x16, #0x0]\n"
- "ldr q16, [x16, #0x140]\n"
+ "ldr x23, [x11, #0x58]\n"
+ "ldr x22, [x11, #0x60]\n"
"fmla v28.8h, v1.8h, v6.8h\n"
"fmla v29.8h, v1.8h, v9.8h\n"
- "ldr x24, [x15, #0x58]\n"
- "ldr q6, [x24, x10]\n"
+ "ldr q6, [x23, x10]\n"
+ "ldr x21, [x11, #0x68]\n"
"fmla v30.8h, v1.8h, v8.8h\n"
"fmla v31.8h, v1.8h, v13.8h\n"
- "ldr q1, [x16, #0x10]\n"
- "ldr x23, [x15, #0x60]\n"
+ "ldr q0, [x16, #0x0]\n"
+ "ldr x20, [x11, #0x70]\n"
"fmla v28.8h, v2.8h, v9.8h\n"
- "ldr q9, [x23, x10]\n"
"fmla v29.8h, v2.8h, v11.8h\n"
- "ldr x22, [x15, #0x68]\n"
+ "ldr q9, [x22, x10]\n"
+ "ldr q1, [x16, #0x10]\n"
"fmla v30.8h, v2.8h, v13.8h\n"
"fmla v31.8h, v2.8h, v5.8h\n"
+ "ldr x19, [x11, #0x78]\n"
"ldr q2, [x16, #0x20]\n"
- "ldr x21, [x15, #0x70]\n"
"fmla v28.8h, v3.8h, v11.8h\n"
- "ldr q11, [x22, x10]\n"
"fmla v29.8h, v3.8h, v12.8h\n"
- "ldr x20, [x15, #0x78]\n"
+ "ldr q11, [x21, x10]\n"
+ "ldr x26, [x11, #0x80]\n"
"fmla v30.8h, v3.8h, v5.8h\n"
"fmla v31.8h, v3.8h, v6.8h\n"
"ldr q3, [x16, #0x30]\n"
- "ldr x27, [x15, #0x80]\n"
+ "ldr x25, [x11, #0x88]\n"
"fmla v28.8h, v4.8h, v12.8h\n"
- "ldr q12, [x21, x10]\n"
"fmla v29.8h, v4.8h, v9.8h\n"
- "ldr q9, [x20, x10]\n"
+ "ldr q12, [x20, x10]\n"
+ "ldr q9, [x19, x10]\n"
"fmla v30.8h, v4.8h, v6.8h\n"
"fmla v31.8h, v4.8h, v10.8h\n"
"ldr q4, [x16, #0x40]\n"
- "ldr x26, [x15, #0x88]\n"
+ "ldr x24, [x11, #0x90]\n"
"fmla v28.8h, v0.8h, v7.8h\n"
"fmla v29.8h, v0.8h, v8.8h\n"
- "ldr x25, [x15, #0x90]\n"
- "ldr x24, [x15, #0x98]\n"
+ "ldr x23, [x11, #0x98]\n"
+ "ldr x22, [x11, #0xa0]\n"
"fmla v30.8h, v0.8h, v14.8h\n"
"fmla v31.8h, v0.8h, v11.8h\n"
"ldr q0, [x16, #0x50]\n"
- "ldr x23, [x15, #0xa0]\n"
+ "ldr x21, [x11, #0xa8]\n"
"fmla v28.8h, v1.8h, v8.8h\n"
- "ldr q8, [x26, x10]\n"
"fmla v29.8h, v1.8h, v13.8h\n"
- "ldr x22, [x15, #0xa8]\n"
+ "ldr q8, [x25, x10]\n"
+ "ldr x20, [x11, #0xb0]\n"
"fmla v30.8h, v1.8h, v11.8h\n"
"fmla v31.8h, v1.8h, v12.8h\n"
"ldr q1, [x16, #0x60]\n"
- "ldr x21, [x15, #0xb0]\n"
+ "ldr x19, [x11, #0xb8]\n"
"fmla v28.8h, v2.8h, v13.8h\n"
- "ldr q13, [x27, x10]\n"
"fmla v29.8h, v2.8h, v5.8h\n"
- "ldr x20, [x15, #0xb8]\n"
+ "ldr q13, [x26, x10]\n"
+ "ldr x26, [x11, #0xc0]\n"
"fmla v30.8h, v2.8h, v12.8h\n"
"fmla v31.8h, v2.8h, v9.8h\n"
"ldr q2, [x16, #0x70]\n"
- "ldr x27, [x15, #0xc0]\n"
+ "ldr x25, [x11, #0xc8]\n"
"fmla v28.8h, v3.8h, v5.8h\n"
- "ldr q5, [x25, x10]\n"
"fmla v29.8h, v3.8h, v6.8h\n"
- "ldr x26, [x15, #0xc8]\n"
+ "ldr q5, [x24, x10]\n"
+ "ldr x24, [x11, #0xd0]\n"
"fmla v30.8h, v3.8h, v9.8h\n"
"fmla v31.8h, v3.8h, v13.8h\n"
"ldr q3, [x16, #0x80]\n"
- "ldr x25, [x15, #0xd0]\n"
+ "add x9, x9, #0x10\n"
"fmla v28.8h, v4.8h, v6.8h\n"
- "ldr q6, [x24, x10]\n"
"fmla v29.8h, v4.8h, v10.8h\n"
- "ldr q10, [x23, x10]\n"
+ "ldr q6, [x23, x10]\n"
+ "ldr q10, [x22, x10]\n"
"fmla v30.8h, v4.8h, v13.8h\n"
"fmla v31.8h, v4.8h, v8.8h\n"
"ldr q4, [x16, #0x90]\n"
- "ldr x24, [x15, #0xd8]\n"
+ "ldr x23, [x11, #0xd8]\n"
"fmla v28.8h, v0.8h, v14.8h\n"
- "ldr q14, [x20, x10]\n"
"fmla v29.8h, v0.8h, v11.8h\n"
- "ldr x23, [x15, #0xe0]\n"
+ "ldr q14, [x19, x10]\n"
+ "ldr x22, [x11, #0xe0]\n"
"fmla v30.8h, v0.8h, v5.8h\n"
"fmla v31.8h, v0.8h, v6.8h\n"
"ldr q0, [x16, #0xa0]\n"
- "ldr x20, [x15, #0xf8]\n"
+ "ldr x19, [x11, #0xf8]\n"
"fmla v28.8h, v1.8h, v11.8h\n"
- "ldr q11, [x22, x10]\n"
"fmla v29.8h, v1.8h, v12.8h\n"
- "ldr x22, [x15, #0xe8]\n"
+ "ldr q11, [x21, x10]\n"
+ "ldr x21, [x11, #0xe8]\n"
"fmla v30.8h, v1.8h, v6.8h\n"
"fmla v31.8h, v1.8h, v10.8h\n"
"ldr q1, [x16, #0xb0]\n"
- "add x28, x28, #0x10\n"
+ "ldr q16, [x16, #0x140]\n"
"fmla v28.8h, v2.8h, v12.8h\n"
- "ldr q12, [x21, x10]\n"
"fmla v29.8h, v2.8h, v9.8h\n"
- "ldr x21, [x15, #0xf0]\n"
+ "ldr q12, [x20, x10]\n"
+ "ldr x20, [x11, #0xf0]\n"
"fmla v30.8h, v2.8h, v10.8h\n"
"fmla v31.8h, v2.8h, v11.8h\n"
"ldr q2, [x16, #0xc0]\n"
"fmla v28.8h, v3.8h, v9.8h\n"
- "ldr q9, [x27, x10]\n"
"fmla v29.8h, v3.8h, v13.8h\n"
- "ldr x27, [x15, #0x100]\n"
+ "ldr q9, [x26, x10]\n"
+ "ldr x26, [x11, #0x100]\n"
"fmla v30.8h, v3.8h, v11.8h\n"
"fmla v31.8h, v3.8h, v12.8h\n"
"ldr q3, [x16, #0xd0]\n"
"fmla v28.8h, v4.8h, v13.8h\n"
- "ldr q13, [x26, x10]\n"
"fmla v29.8h, v4.8h, v8.8h\n"
- "ldr q8, [x23, x10]\n"
+ "ldr q13, [x25, x10]\n"
+ "ldr q8, [x22, x10]\n"
"fmla v30.8h, v4.8h, v12.8h\n"
"fmla v31.8h, v4.8h, v14.8h\n"
"ldr q4, [x16, #0xe0]\n"
- "ldr x26, [x15, #0x108]\n"
+ "ldr x25, [x11, #0x108]\n"
"fmla v28.8h, v0.8h, v5.8h\n"
- "ldr q5, [x25, x10]\n"
"fmla v29.8h, v0.8h, v6.8h\n"
- "ldr x25, [x15, #0x110]\n"
+ "ldr q5, [x24, x10]\n"
+ "ldr x24, [x11, #0x110]\n"
"fmla v30.8h, v0.8h, v9.8h\n"
"fmla v31.8h, v0.8h, v13.8h\n"
"ldr q0, [x16, #0xf0]\n"
"fmla v28.8h, v1.8h, v6.8h\n"
- "ldr q6, [x24, x10]\n"
"fmla v29.8h, v1.8h, v10.8h\n"
- "ldr x24, [x15, #0x118]\n"
+ "ldr q6, [x23, x10]\n"
+ "ldr x23, [x11, #0x118]\n"
"fmla v30.8h, v1.8h, v13.8h\n"
"fmla v31.8h, v1.8h, v5.8h\n"
"ldr q1, [x16, #0x100]\n"
"fmla v28.8h, v2.8h, v10.8h\n"
- "ldr q10, [x22, x10]\n"
"fmla v29.8h, v2.8h, v11.8h\n"
+ "ldr q10, [x21, x10]\n"
"fmla v30.8h, v2.8h, v5.8h\n"
"fmla v31.8h, v2.8h, v6.8h\n"
"ldr q2, [x16, #0x110]\n"
"fmla v28.8h, v3.8h, v11.8h\n"
- "ldr q11, [x21, x10]\n"
"fmla v29.8h, v3.8h, v12.8h\n"
+ "ldr q11, [x20, x10]\n"
"fmla v30.8h, v3.8h, v6.8h\n"
"fmla v31.8h, v3.8h, v8.8h\n"
"ldr q3, [x16, #0x120]\n"
"fmla v28.8h, v4.8h, v12.8h\n"
- "ldr q12, [x20, x10]\n"
"fmla v29.8h, v4.8h, v14.8h\n"
+ "ldr q12, [x19, x10]\n"
"fmla v30.8h, v4.8h, v8.8h\n"
"fmla v31.8h, v4.8h, v10.8h\n"
"ldr q4, [x16, #0x130]\n"
"fmla v28.8h, v0.8h, v9.8h\n"
- "ldr q9, [x27, x10]\n"
"fmla v29.8h, v0.8h, v13.8h\n"
+ "ldr q9, [x26, x10]\n"
"fmla v30.8h, v0.8h, v11.8h\n"
- "ldr q11, [x26, x10]\n"
"fmla v31.8h, v0.8h, v12.8h\n"
- "ldr q0, [x16, #0x150]\n"
+ "ldr q11, [x25, x10]\n"
+ "ldp x26, x25, [x11, #0x0]\n"
"fmla v28.8h, v1.8h, v13.8h\n"
"fmla v29.8h, v1.8h, v5.8h\n"
- "ldp x27, x26, [x15, #0x0]\n"
+ "ldr q0, [x16, #0x150]\n"
"fmla v30.8h, v1.8h, v12.8h\n"
- "ldr q12, [x25, x10]\n"
"fmla v31.8h, v1.8h, v9.8h\n"
+ "ldr q12, [x24, x10]\n"
"ldr q1, [x16, #0x160]\n"
"fmla v28.8h, v2.8h, v5.8h\n"
- "ldr q5, [x27, x17]\n"
"fmla v29.8h, v2.8h, v6.8h\n"
+ "ldr q5, [x26, x28]\n"
"fmla v30.8h, v2.8h, v9.8h\n"
- "ldr q9, [x24, x10]\n"
"fmla v31.8h, v2.8h, v11.8h\n"
- "ldr q2, [x16, #0x170]\n"
+ "ldr q9, [x23, x10]\n"
+ "ldp x24, x23, [x11, #0x10]\n"
"fmla v28.8h, v3.8h, v6.8h\n"
- "ldr q6, [x26, x17]\n"
"fmla v29.8h, v3.8h, v8.8h\n"
- "ldp x25, x24, [x15, #0x10]\n"
- "ldr q7, [x25, x17]\n"
+ "ldr q6, [x25, x28]\n"
+ "ldp x22, x21, [x11, #0x20]\n"
"fmla v30.8h, v3.8h, v11.8h\n"
"fmla v31.8h, v3.8h, v12.8h\n"
- "ldr q3, [x16, #0x180]\n"
+ "ldp x20, x19, [x11, #0x30]\n"
+ "ldp x26, x25, [x11, #0x40]\n"
"fmla v28.8h, v4.8h, v8.8h\n"
- "ldr q8, [x24, x17]\n"
"fmla v29.8h, v4.8h, v10.8h\n"
- "ldp x23, x22, [x15, #0x20]\n"
- "ldr q13, [x22, x17]\n"
+ "fmax v28.8h, v28.8h, v18.8h\n"
+ "ldr q7, [x24, x28]\n"
"fmla v30.8h, v4.8h, v12.8h\n"
"fmla v31.8h, v4.8h, v9.8h\n"
- "ldr q9, [x23, x17]\n"
- "ldr q4, [x16, #0x190]\n"
- "ldp x21, x20, [x15, #0x30]\n"
- "fmax v28.8h, v28.8h, v18.8h\n"
"fmax v29.8h, v29.8h, v18.8h\n"
- "ldr q11, [x21, x17]\n"
- "ldr q12, [x20, x17]\n"
+ "ldr q8, [x23, x28]\n"
"fmax v30.8h, v30.8h, v18.8h\n"
"fmax v31.8h, v31.8h, v18.8h\n"
- "ldp x27, x26, [x15, #0x40]\n"
- "ldr q10, [x27, x17]\n"
+ "ldr q9, [x22, x28]\n"
+ "ldr q13, [x21, x28]\n"
+ "ldr q11, [x20, x28]\n"
+ "ldr q12, [x19, x28]\n"
"fmin v28.8h, v28.8h, v17.8h\n"
"fmin v29.8h, v29.8h, v17.8h\n"
- "ldr q14, [x26, x17]\n"
- "add x17, x17, #0x10\n"
- "cmp x17, x9, LSL #4\n"
+ "ldr q10, [x26, x28]\n"
+ "ldr q14, [x25, x28]\n"
+ "add x28, x28, #0x10\n"
+ "cmp x28, x27, LSL #4\n"
"fmin v30.8h, v30.8h, v17.8h\n"
"fmin v31.8h, v31.8h, v17.8h\n"
"add x10, x10, #0x10\n"
- "str q28, [x14, x28]\n"
+ "str q28, [x15, x9]\n"
+ "str q29, [x14, x9]\n"
+ "ldr q2, [x16, #0x170]\n"
+ "ldr q3, [x16, #0x180]\n"
+ "str q30, [x13, x9]\n"
+ "ldr q4, [x16, #0x190]\n"
"add x16, x16, #0x1a0\n"
- "str q29, [x13, x28]\n"
- "str q30, [x12, x28]\n"
- "str q31, [x11, x28]\n"
+ "str q31, [x12, x9]\n"
"blt 1b\n"
"2:" // Channel tail
"mov v28.16b, v16.16b\n fmla v28.8h, v0.8h, v5.8h\n"
"mov v29.16b, v16.16b\n fmla v29.8h, v0.8h, v6.8h\n"
- "ldr x25, [x15, #0x50]\n"
- "ldr q5, [x25, x10]\n"
+ "ldr x24, [x11, #0x50]\n"
+ "ldr q5, [x24, x10]\n"
"mov v30.16b, v16.16b\n fmla v30.8h, v0.8h, v7.8h\n"
"mov v31.16b, v16.16b\n fmla v31.8h, v0.8h, v8.8h\n"
- "ldr q0, [x16, #0x0]\n"
- "ldr x24, [x15, #0x58]\n"
+ "ldr x23, [x11, #0x58]\n"
+ "ldr x22, [x11, #0x60]\n"
"fmla v28.8h, v1.8h, v6.8h\n"
- "ldr q6, [x24, x10]\n"
"fmla v29.8h, v1.8h, v9.8h\n"
- "ldr x23, [x15, #0x60]\n"
+ "ldr q6, [x23, x10]\n"
+ "ldr x21, [x11, #0x68]\n"
"fmla v30.8h, v1.8h, v8.8h\n"
"fmla v31.8h, v1.8h, v13.8h\n"
- "ldr q1, [x16, #0x10]\n"
- "ldr x22, [x15, #0x68]\n"
+ "ldr q0, [x16, #0x0]\n"
+ "ldr x20, [x11, #0x70]\n"
"fmla v28.8h, v2.8h, v9.8h\n"
- "ldr q9, [x23, x10]\n"
"fmla v29.8h, v2.8h, v11.8h\n"
- "ldr x21, [x15, #0x70]\n"
+ "ldr q9, [x22, x10]\n"
+ "ldr q1, [x16, #0x10]\n"
"fmla v30.8h, v2.8h, v13.8h\n"
"fmla v31.8h, v2.8h, v5.8h\n"
+ "ldr x19, [x11, #0x78]\n"
"ldr q2, [x16, #0x20]\n"
- "ldr x20, [x15, #0x78]\n"
"fmla v28.8h, v3.8h, v11.8h\n"
- "ldr q11, [x22, x10]\n"
"fmla v29.8h, v3.8h, v12.8h\n"
- "ldr x27, [x15, #0x80]\n"
+ "ldr q11, [x21, x10]\n"
+ "ldr x26, [x11, #0x80]\n"
"fmla v30.8h, v3.8h, v5.8h\n"
"fmla v31.8h, v3.8h, v6.8h\n"
"ldr q3, [x16, #0x30]\n"
- "ldr x26, [x15, #0x88]\n"
+ "ldr x25, [x11, #0x88]\n"
"fmla v28.8h, v4.8h, v12.8h\n"
- "ldr q12, [x21, x10]\n"
"fmla v29.8h, v4.8h, v9.8h\n"
- "ldr q9, [x20, x10]\n"
+ "ldr q12, [x20, x10]\n"
+ "ldr q9, [x19, x10]\n"
"fmla v30.8h, v4.8h, v6.8h\n"
"fmla v31.8h, v4.8h, v10.8h\n"
"ldr q4, [x16, #0x40]\n"
- "ldr x25, [x15, #0x90]\n"
+ "ldr x24, [x11, #0x90]\n"
"fmla v28.8h, v0.8h, v7.8h\n"
"fmla v29.8h, v0.8h, v8.8h\n"
- "ldr x24, [x15, #0x98]\n"
- "ldr x23, [x15, #0xa0]\n"
+ "ldr x23, [x11, #0x98]\n"
+ "ldr x22, [x11, #0xa0]\n"
"fmla v30.8h, v0.8h, v14.8h\n"
"fmla v31.8h, v0.8h, v11.8h\n"
"ldr q0, [x16, #0x50]\n"
- "ldr x22, [x15, #0xa8]\n"
+ "ldr x21, [x11, #0xa8]\n"
"fmla v28.8h, v1.8h, v8.8h\n"
- "ldr q8, [x26, x10]\n"
"fmla v29.8h, v1.8h, v13.8h\n"
- "ldr x21, [x15, #0xb0]\n"
+ "ldr q8, [x25, x10]\n"
+ "ldr x20, [x11, #0xb0]\n"
"fmla v30.8h, v1.8h, v11.8h\n"
"fmla v31.8h, v1.8h, v12.8h\n"
"ldr q1, [x16, #0x60]\n"
- "ldr x20, [x15, #0xb8]\n"
+ "ldr x19, [x11, #0xb8]\n"
"fmla v28.8h, v2.8h, v13.8h\n"
- "ldr q13, [x27, x10]\n"
"fmla v29.8h, v2.8h, v5.8h\n"
- "ldr x27, [x15, #0xc0]\n"
+ "ldr q13, [x26, x10]\n"
+ "ldr x26, [x11, #0xc0]\n"
"fmla v30.8h, v2.8h, v12.8h\n"
"fmla v31.8h, v2.8h, v9.8h\n"
"ldr q2, [x16, #0x70]\n"
- "ldr x26, [x15, #0xc8]\n"
+ "ldr x25, [x11, #0xc8]\n"
"fmla v28.8h, v3.8h, v5.8h\n"
- "ldr q5, [x25, x10]\n"
"fmla v29.8h, v3.8h, v6.8h\n"
- "ldr x25, [x15, #0xd0]\n"
+ "ldr q5, [x24, x10]\n"
+ "ldr x24, [x11, #0xd0]\n"
"fmla v30.8h, v3.8h, v9.8h\n"
"fmla v31.8h, v3.8h, v13.8h\n"
"ldr q3, [x16, #0x80]\n"
- "add x28, x28, #0x10\n"
+ "add x9, x9, #0x10\n"
"fmla v28.8h, v4.8h, v6.8h\n"
- "ldr q6, [x24, x10]\n"
"fmla v29.8h, v4.8h, v10.8h\n"
- "ldr q10, [x23, x10]\n"
+ "ldr q6, [x23, x10]\n"
+ "ldr q10, [x22, x10]\n"
"fmla v30.8h, v4.8h, v13.8h\n"
"fmla v31.8h, v4.8h, v8.8h\n"
"ldr q4, [x16, #0x90]\n"
- "ldr x24, [x15, #0xd8]\n"
+ "ldr x23, [x11, #0xd8]\n"
"fmla v28.8h, v0.8h, v14.8h\n"
- "ldr q14, [x20, x10]\n"
"fmla v29.8h, v0.8h, v11.8h\n"
- "ldr x23, [x15, #0xe0]\n"
+ "ldr q14, [x19, x10]\n"
+ "ldr x22, [x11, #0xe0]\n"
"fmla v30.8h, v0.8h, v5.8h\n"
"fmla v31.8h, v0.8h, v6.8h\n"
"ldr q0, [x16, #0xa0]\n"
- "ldr x20, [x15, #0xf8]\n"
+ "ldr x19, [x11, #0xf8]\n"
"fmla v28.8h, v1.8h, v11.8h\n"
- "ldr q11, [x22, x10]\n"
"fmla v29.8h, v1.8h, v12.8h\n"
- "ldr x22, [x15, #0xe8]\n"
+ "ldr q11, [x21, x10]\n"
+ "ldr x21, [x11, #0xe8]\n"
"fmla v30.8h, v1.8h, v6.8h\n"
"fmla v31.8h, v1.8h, v10.8h\n"
"ldr q1, [x16, #0xb0]\n"
"fmla v28.8h, v2.8h, v12.8h\n"
- "ldr q12, [x21, x10]\n"
"fmla v29.8h, v2.8h, v9.8h\n"
- "ldr x21, [x15, #0xf0]\n"
+ "ldr q12, [x20, x10]\n"
+ "ldr x20, [x11, #0xf0]\n"
"fmla v30.8h, v2.8h, v10.8h\n"
"fmla v31.8h, v2.8h, v11.8h\n"
"ldr q2, [x16, #0xc0]\n"
"fmla v28.8h, v3.8h, v9.8h\n"
- "ldr q9, [x27, x10]\n"
"fmla v29.8h, v3.8h, v13.8h\n"
- "ldr x27, [x15, #0x100]\n"
+ "ldr q9, [x26, x10]\n"
+ "ldr x26, [x11, #0x100]\n"
"fmla v30.8h, v3.8h, v11.8h\n"
"fmla v31.8h, v3.8h, v12.8h\n"
"ldr q3, [x16, #0xd0]\n"
"fmla v28.8h, v4.8h, v13.8h\n"
- "ldr q13, [x26, x10]\n"
"fmla v29.8h, v4.8h, v8.8h\n"
- "ldr q8, [x23, x10]\n"
+ "ldr q13, [x25, x10]\n"
+ "ldr q8, [x22, x10]\n"
"fmla v30.8h, v4.8h, v12.8h\n"
"fmla v31.8h, v4.8h, v14.8h\n"
"ldr q4, [x16, #0xe0]\n"
- "ldr x26, [x15, #0x108]\n"
+ "ldr x25, [x11, #0x108]\n"
"fmla v28.8h, v0.8h, v5.8h\n"
- "ldr q5, [x25, x10]\n"
"fmla v29.8h, v0.8h, v6.8h\n"
- "ldr x25, [x15, #0x110]\n"
+ "ldr q5, [x24, x10]\n"
+ "ldr x24, [x11, #0x110]\n"
"fmla v30.8h, v0.8h, v9.8h\n"
"fmla v31.8h, v0.8h, v13.8h\n"
"ldr q0, [x16, #0xf0]\n"
"fmla v28.8h, v1.8h, v6.8h\n"
- "ldr q6, [x24, x10]\n"
"fmla v29.8h, v1.8h, v10.8h\n"
- "ldr x24, [x15, #0x118]\n"
+ "ldr q6, [x23, x10]\n"
+ "ldr x23, [x11, #0x118]\n"
"fmla v30.8h, v1.8h, v13.8h\n"
"fmla v31.8h, v1.8h, v5.8h\n"
"ldr q1, [x16, #0x100]\n"
"fmla v28.8h, v2.8h, v10.8h\n"
- "ldr q10, [x22, x10]\n"
"fmla v29.8h, v2.8h, v11.8h\n"
+ "ldr q10, [x21, x10]\n"
"fmla v30.8h, v2.8h, v5.8h\n"
"fmla v31.8h, v2.8h, v6.8h\n"
"ldr q2, [x16, #0x110]\n"
"fmla v28.8h, v3.8h, v11.8h\n"
- "ldr q11, [x21, x10]\n"
"fmla v29.8h, v3.8h, v12.8h\n"
+ "ldr q11, [x20, x10]\n"
"fmla v30.8h, v3.8h, v6.8h\n"
"fmla v31.8h, v3.8h, v8.8h\n"
"ldr q3, [x16, #0x120]\n"
"fmla v28.8h, v4.8h, v12.8h\n"
- "ldr q12, [x20, x10]\n"
"fmla v29.8h, v4.8h, v14.8h\n"
+ "ldr q12, [x19, x10]\n"
"fmla v30.8h, v4.8h, v8.8h\n"
"fmla v31.8h, v4.8h, v10.8h\n"
"ldr q4, [x16, #0x130]\n"
"add x16, x16, #0x140\n"
"fmla v28.8h, v0.8h, v9.8h\n"
- "ldr q9, [x27, x10]\n"
"fmla v29.8h, v0.8h, v13.8h\n"
+ "ldr q9, [x26, x10]\n"
"fmla v30.8h, v0.8h, v11.8h\n"
- "ldr q11, [x26, x10]\n"
"fmla v31.8h, v0.8h, v12.8h\n"
+ "ldr q11, [x25, x10]\n"
"fmla v28.8h, v1.8h, v13.8h\n"
"fmla v29.8h, v1.8h, v5.8h\n"
"fmla v30.8h, v1.8h, v12.8h\n"
- "ldr q12, [x25, x10]\n"
"fmla v31.8h, v1.8h, v9.8h\n"
+ "ldr q12, [x24, x10]\n"
"fmla v28.8h, v2.8h, v5.8h\n"
"fmla v29.8h, v2.8h, v6.8h\n"
"fmla v30.8h, v2.8h, v9.8h\n"
- "ldr q9, [x24, x10]\n"
"fmla v31.8h, v2.8h, v11.8h\n"
+ "ldr q9, [x23, x10]\n"
"add x10, x10, #0x10\n"
"fmla v28.8h, v3.8h, v6.8h\n"
"fmla v29.8h, v3.8h, v8.8h\n"
@@ -530,134 +530,134 @@ void a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst_indirect_impl(
"fmax v31.8h, v31.8h, v18.8h\n"
"fmin v28.8h, v28.8h, v17.8h\n"
"fmin v29.8h, v29.8h, v17.8h\n"
- "str q28, [x14, x28]\n"
+ "str q28, [x15, x9]\n"
"fmin v30.8h, v30.8h, v17.8h\n"
"fmin v31.8h, v31.8h, v17.8h\n"
- "str q29, [x13, x28]\n"
- "str q30, [x12, x28]\n"
- "str q31, [x11, x28]\n"
+ "str q29, [x14, x9]\n"
+ "str q30, [x13, x9]\n"
+ "str q31, [x12, x9]\n"
"3:" // Oddments
"tst %x[n_channels], #0x7\n"
"beq 116f\n"
- "ldr q16, [x16, #0x0]\n"
- "ldr q0, [x16, #0x10]\n"
- "mov x28, x10\n"
- "add x14, x14, x28\n"
- "ldr q1, [x16, #0x20]\n"
- "ldr q2, [x16, #0x30]\n"
- "add x13, x13, x28\n"
- "add x12, x12, x28\n"
- "ldr q3, [x16, #0x40]\n"
- "ldr q4, [x16, #0x50]\n"
- "add x11, x11, x28\n"
- "ldr x9, [x15, #0x0]\n"
- "ldr x28, [x15, #0x8]\n"
- "add x9, x9, x10\n"
+ "mov x9, x10\n"
+ "ldr x28, [x11, #0x0]\n"
+ "ldr x27, [x11, #0x8]\n"
+ "ldr x26, [x11, #0x10]\n"
+ "add x15, x15, x9\n"
+ "add x14, x14, x9\n"
+ "ldr x25, [x11, #0x18]\n"
+ "ldr x24, [x11, #0x20]\n"
+ "add x13, x13, x9\n"
+ "add x12, x12, x9\n"
+ "ldr x23, [x11, #0x28]\n"
+ "ldr x22, [x11, #0x30]\n"
"add x28, x28, x10\n"
- "ldr x27, [x15, #0x10]\n"
- "ldr x26, [x15, #0x18]\n"
"add x27, x27, x10\n"
+ "ldr x21, [x11, #0x38]\n"
+ "ldr x20, [x11, #0x40]\n"
"add x26, x26, x10\n"
- "ldr x25, [x15, #0x20]\n"
- "ldr x24, [x15, #0x28]\n"
"add x25, x25, x10\n"
+ "ldr x19, [x11, #0x48]\n"
+ "ldr q16, [x16, #0x0]\n"
"add x24, x24, x10\n"
- "ldr x23, [x15, #0x30]\n"
- "ldr x22, [x15, #0x38]\n"
"add x23, x23, x10\n"
+ "ldr q0, [x16, #0x10]\n"
+ "ldr q1, [x16, #0x20]\n"
"add x22, x22, x10\n"
- "ldr x21, [x15, #0x40]\n"
- "ldr x20, [x15, #0x48]\n"
"add x21, x21, x10\n"
+ "ldr q2, [x16, #0x30]\n"
+ "ldr q3, [x16, #0x40]\n"
"add x20, x20, x10\n"
+ "add x19, x19, x10\n"
+ "ldr q4, [x16, #0x50]\n"
"add x16, x16, #0x60\n"
"tbz %x[n_channels], #2, 5f\n"
- "ld1 { v5.d }[0], [x9], #0x8\n"
- "ld1 { v6.d }[0], [x28], #0x8\n"
- "ld1 { v7.d }[0], [x27], #0x8\n"
- "ld1 { v8.d }[0], [x26], #0x8\n"
- "ld1 { v9.d }[0], [x25], #0x8\n"
- "ld1 { v13.d }[0], [x24], #0x8\n"
- "ld1 { v11.d }[0], [x23], #0x8\n"
- "ld1 { v12.d }[0], [x22], #0x8\n"
- "ld1 { v10.d }[0], [x21], #0x8\n"
- "ld1 { v14.d }[0], [x20], #0x8\n"
+ "ld1 { v5.d }[0], [x28], #0x8\n"
+ "ld1 { v6.d }[0], [x27], #0x8\n"
+ "ld1 { v7.d }[0], [x26], #0x8\n"
+ "ld1 { v8.d }[0], [x25], #0x8\n"
+ "ld1 { v9.d }[0], [x24], #0x8\n"
+ "ld1 { v13.d }[0], [x23], #0x8\n"
+ "ld1 { v11.d }[0], [x22], #0x8\n"
+ "ld1 { v12.d }[0], [x21], #0x8\n"
+ "ld1 { v10.d }[0], [x20], #0x8\n"
+ "ld1 { v14.d }[0], [x19], #0x8\n"
"tbz %x[n_channels], #1, 4f\n"
- "ld1 { v5.s }[2], [x9], #0x4\n"
- "ld1 { v6.s }[2], [x28], #0x4\n"
- "ld1 { v7.s }[2], [x27], #0x4\n"
- "ld1 { v8.s }[2], [x26], #0x4\n"
- "ld1 { v9.s }[2], [x25], #0x4\n"
- "ld1 { v13.s }[2], [x24], #0x4\n"
- "ld1 { v11.s }[2], [x23], #0x4\n"
- "ld1 { v12.s }[2], [x22], #0x4\n"
- "ld1 { v10.s }[2], [x21], #0x4\n"
- "ld1 { v14.s }[2], [x20], #0x4\n"
+ "ld1 { v5.s }[2], [x28], #0x4\n"
+ "ld1 { v6.s }[2], [x27], #0x4\n"
+ "ld1 { v7.s }[2], [x26], #0x4\n"
+ "ld1 { v8.s }[2], [x25], #0x4\n"
+ "ld1 { v9.s }[2], [x24], #0x4\n"
+ "ld1 { v13.s }[2], [x23], #0x4\n"
+ "ld1 { v11.s }[2], [x22], #0x4\n"
+ "ld1 { v12.s }[2], [x21], #0x4\n"
+ "ld1 { v10.s }[2], [x20], #0x4\n"
+ "ld1 { v14.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 7f\n"
- "ld1 { v5.h }[6], [x9], #0x2\n"
- "ld1 { v6.h }[6], [x28], #0x2\n"
- "ld1 { v7.h }[6], [x27], #0x2\n"
- "ld1 { v8.h }[6], [x26], #0x2\n"
- "ld1 { v9.h }[6], [x25], #0x2\n"
- "ld1 { v13.h }[6], [x24], #0x2\n"
- "ld1 { v11.h }[6], [x23], #0x2\n"
- "ld1 { v12.h }[6], [x22], #0x2\n"
- "ld1 { v10.h }[6], [x21], #0x2\n"
- "ld1 { v14.h }[6], [x20], #0x2\n"
+ "ld1 { v5.h }[6], [x28], #0x2\n"
+ "ld1 { v6.h }[6], [x27], #0x2\n"
+ "ld1 { v7.h }[6], [x26], #0x2\n"
+ "ld1 { v8.h }[6], [x25], #0x2\n"
+ "ld1 { v9.h }[6], [x24], #0x2\n"
+ "ld1 { v13.h }[6], [x23], #0x2\n"
+ "ld1 { v11.h }[6], [x22], #0x2\n"
+ "ld1 { v12.h }[6], [x21], #0x2\n"
+ "ld1 { v10.h }[6], [x20], #0x2\n"
+ "ld1 { v14.h }[6], [x19], #0x2\n"
"b 7f\n"
"4:" // Oddments: Load inputs (0, 0), (0, 1), (1, 0), (1, 1), (0, 2), (1, 2), (0, 3), (0, 4), (1, 5), (2, 0): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 7f\n"
- "ld1 { v5.h }[4], [x9], #0x2\n"
- "ld1 { v6.h }[4], [x28], #0x2\n"
- "ld1 { v7.h }[4], [x27], #0x2\n"
- "ld1 { v8.h }[4], [x26], #0x2\n"
- "ld1 { v9.h }[4], [x25], #0x2\n"
- "ld1 { v13.h }[4], [x24], #0x2\n"
- "ld1 { v11.h }[4], [x23], #0x2\n"
- "ld1 { v12.h }[4], [x22], #0x2\n"
- "ld1 { v10.h }[4], [x21], #0x2\n"
- "ld1 { v14.h }[4], [x20], #0x2\n"
+ "ld1 { v5.h }[4], [x28], #0x2\n"
+ "ld1 { v6.h }[4], [x27], #0x2\n"
+ "ld1 { v7.h }[4], [x26], #0x2\n"
+ "ld1 { v8.h }[4], [x25], #0x2\n"
+ "ld1 { v9.h }[4], [x24], #0x2\n"
+ "ld1 { v13.h }[4], [x23], #0x2\n"
+ "ld1 { v11.h }[4], [x22], #0x2\n"
+ "ld1 { v12.h }[4], [x21], #0x2\n"
+ "ld1 { v10.h }[4], [x20], #0x2\n"
+ "ld1 { v14.h }[4], [x19], #0x2\n"
"b 7f\n"
"5:" // Oddments: Load inputs (0, 0), (0, 1), (1, 0), (1, 1), (0, 2), (1, 2), (0, 3), (0, 4), (1, 5), (2, 0): Bit 2: Unset
"tbz %x[n_channels], #1, 6f\n"
- "ld1 { v5.s }[0], [x9], #0x4\n"
- "ld1 { v6.s }[0], [x28], #0x4\n"
- "ld1 { v7.s }[0], [x27], #0x4\n"
- "ld1 { v8.s }[0], [x26], #0x4\n"
- "ld1 { v9.s }[0], [x25], #0x4\n"
- "ld1 { v13.s }[0], [x24], #0x4\n"
- "ld1 { v11.s }[0], [x23], #0x4\n"
- "ld1 { v12.s }[0], [x22], #0x4\n"
- "ld1 { v10.s }[0], [x21], #0x4\n"
- "ld1 { v14.s }[0], [x20], #0x4\n"
+ "ld1 { v5.s }[0], [x28], #0x4\n"
+ "ld1 { v6.s }[0], [x27], #0x4\n"
+ "ld1 { v7.s }[0], [x26], #0x4\n"
+ "ld1 { v8.s }[0], [x25], #0x4\n"
+ "ld1 { v9.s }[0], [x24], #0x4\n"
+ "ld1 { v13.s }[0], [x23], #0x4\n"
+ "ld1 { v11.s }[0], [x22], #0x4\n"
+ "ld1 { v12.s }[0], [x21], #0x4\n"
+ "ld1 { v10.s }[0], [x20], #0x4\n"
+ "ld1 { v14.s }[0], [x19], #0x4\n"
"tbz %x[n_channels], #0, 7f\n"
- "ld1 { v5.h }[2], [x9], #0x2\n"
- "ld1 { v6.h }[2], [x28], #0x2\n"
- "ld1 { v7.h }[2], [x27], #0x2\n"
- "ld1 { v8.h }[2], [x26], #0x2\n"
- "ld1 { v9.h }[2], [x25], #0x2\n"
- "ld1 { v13.h }[2], [x24], #0x2\n"
- "ld1 { v11.h }[2], [x23], #0x2\n"
- "ld1 { v12.h }[2], [x22], #0x2\n"
- "ld1 { v10.h }[2], [x21], #0x2\n"
- "ld1 { v14.h }[2], [x20], #0x2\n"
+ "ld1 { v5.h }[2], [x28], #0x2\n"
+ "ld1 { v6.h }[2], [x27], #0x2\n"
+ "ld1 { v7.h }[2], [x26], #0x2\n"
+ "ld1 { v8.h }[2], [x25], #0x2\n"
+ "ld1 { v9.h }[2], [x24], #0x2\n"
+ "ld1 { v13.h }[2], [x23], #0x2\n"
+ "ld1 { v11.h }[2], [x22], #0x2\n"
+ "ld1 { v12.h }[2], [x21], #0x2\n"
+ "ld1 { v10.h }[2], [x20], #0x2\n"
+ "ld1 { v14.h }[2], [x19], #0x2\n"
"b 7f\n"
"6:" // Oddments: Load inputs (0, 0), (0, 1), (1, 0), (1, 1), (0, 2), (1, 2), (0, 3), (0, 4), (1, 5), (2, 0): Bit 2: Unset: Bit 1: Unset
- "ld1 { v5.h }[0], [x9], #0x2\n"
- "ld1 { v6.h }[0], [x28], #0x2\n"
- "ld1 { v7.h }[0], [x27], #0x2\n"
- "ld1 { v8.h }[0], [x26], #0x2\n"
- "ld1 { v9.h }[0], [x25], #0x2\n"
- "ld1 { v13.h }[0], [x24], #0x2\n"
- "ld1 { v11.h }[0], [x23], #0x2\n"
- "ld1 { v12.h }[0], [x22], #0x2\n"
- "ld1 { v10.h }[0], [x21], #0x2\n"
- "ld1 { v14.h }[0], [x20], #0x2\n"
+ "ld1 { v5.h }[0], [x28], #0x2\n"
+ "ld1 { v6.h }[0], [x27], #0x2\n"
+ "ld1 { v7.h }[0], [x26], #0x2\n"
+ "ld1 { v8.h }[0], [x25], #0x2\n"
+ "ld1 { v9.h }[0], [x24], #0x2\n"
+ "ld1 { v13.h }[0], [x23], #0x2\n"
+ "ld1 { v11.h }[0], [x22], #0x2\n"
+ "ld1 { v12.h }[0], [x21], #0x2\n"
+ "ld1 { v10.h }[0], [x20], #0x2\n"
+ "ld1 { v14.h }[0], [x19], #0x2\n"
"7:" // Oddments: Load inputs (0, 0), (0, 1), (1, 0), (1, 1), (0, 2), (1, 2), (0, 3), (0, 4), (1, 5), (2, 0): Bit 2: End
"mov v28.16b, v16.16b\n fmla v28.8h, v0.8h, v5.8h\n"
"mov v29.16b, v16.16b\n fmla v29.8h, v0.8h, v6.8h\n"
- "ldr x20, [x15, #0x50]\n"
- "add x20, x20, x10\n"
+ "ldr x19, [x11, #0x50]\n"
+ "add x19, x19, x10\n"
"mov v30.16b, v16.16b\n fmla v30.8h, v0.8h, v7.8h\n"
"mov v31.16b, v16.16b\n fmla v31.8h, v0.8h, v8.8h\n"
"fmla v28.8h, v1.8h, v6.8h\n"
@@ -668,701 +668,701 @@ void a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst_indirect_impl(
"fmla v29.8h, v2.8h, v11.8h\n"
"fmla v30.8h, v2.8h, v13.8h\n"
"tbz %x[n_channels], #2, 9f\n"
- "ld1 { v5.d }[0], [x20], #0x8\n"
+ "ld1 { v5.d }[0], [x19], #0x8\n"
"tbz %x[n_channels], #1, 8f\n"
- "ld1 { v5.s }[2], [x20], #0x4\n"
+ "ld1 { v5.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 11f\n"
- "ld1 { v5.h }[6], [x20], #0x2\n"
+ "ld1 { v5.h }[6], [x19], #0x2\n"
"b 11f\n"
"8:" // Oddments: Load input (1, 3): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 11f\n"
- "ld1 { v5.h }[4], [x20], #0x2\n"
+ "ld1 { v5.h }[4], [x19], #0x2\n"
"b 11f\n"
"9:" // Oddments: Load input (1, 3): Bit 2: Unset
"tbz %x[n_channels], #1, 10f\n"
- "ld1 { v5.s }[0], [x20], #0x4\n"
+ "ld1 { v5.s }[0], [x19], #0x4\n"
"tbz %x[n_channels], #0, 11f\n"
- "ld1 { v5.h }[2], [x20], #0x2\n"
+ "ld1 { v5.h }[2], [x19], #0x2\n"
"b 11f\n"
"10:" // Oddments: Load input (1, 3): Bit 2: Unset: Bit 1: Unset
- "ld1 { v5.h }[0], [x20], #0x2\n"
+ "ld1 { v5.h }[0], [x19], #0x2\n"
"11:" // Oddments: Load input (1, 3): Bit 2: End
- "ldr x20, [x15, #0x58]\n"
+ "ldr x19, [x11, #0x58]\n"
"fmla v31.8h, v2.8h, v5.8h\n"
"fmla v28.8h, v3.8h, v11.8h\n"
- "add x20, x20, x10\n"
+ "add x19, x19, x10\n"
"fmla v29.8h, v3.8h, v12.8h\n"
"fmla v30.8h, v3.8h, v5.8h\n"
"tbz %x[n_channels], #2, 13f\n"
- "ld1 { v6.d }[0], [x20], #0x8\n"
+ "ld1 { v6.d }[0], [x19], #0x8\n"
"tbz %x[n_channels], #1, 12f\n"
- "ld1 { v6.s }[2], [x20], #0x4\n"
+ "ld1 { v6.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 15f\n"
- "ld1 { v6.h }[6], [x20], #0x2\n"
+ "ld1 { v6.h }[6], [x19], #0x2\n"
"b 15f\n"
"12:" // Oddments: Load input (1, 4): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 15f\n"
- "ld1 { v6.h }[4], [x20], #0x2\n"
+ "ld1 { v6.h }[4], [x19], #0x2\n"
"b 15f\n"
"13:" // Oddments: Load input (1, 4): Bit 2: Unset
"tbz %x[n_channels], #1, 14f\n"
- "ld1 { v6.s }[0], [x20], #0x4\n"
+ "ld1 { v6.s }[0], [x19], #0x4\n"
"tbz %x[n_channels], #0, 15f\n"
- "ld1 { v6.h }[2], [x20], #0x2\n"
+ "ld1 { v6.h }[2], [x19], #0x2\n"
"b 15f\n"
"14:" // Oddments: Load input (1, 4): Bit 2: Unset: Bit 1: Unset
- "ld1 { v6.h }[0], [x20], #0x2\n"
+ "ld1 { v6.h }[0], [x19], #0x2\n"
"15:" // Oddments: Load input (1, 4): Bit 2: End
- "ldr x20, [x15, #0x60]\n"
+ "ldr x19, [x11, #0x60]\n"
"fmla v31.8h, v3.8h, v6.8h\n"
"fmla v28.8h, v4.8h, v12.8h\n"
- "add x20, x20, x10\n"
+ "add x19, x19, x10\n"
"tbz %x[n_channels], #2, 17f\n"
- "ld1 { v9.d }[0], [x20], #0x8\n"
+ "ld1 { v9.d }[0], [x19], #0x8\n"
"tbz %x[n_channels], #1, 16f\n"
- "ld1 { v9.s }[2], [x20], #0x4\n"
+ "ld1 { v9.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 19f\n"
- "ld1 { v9.h }[6], [x20], #0x2\n"
+ "ld1 { v9.h }[6], [x19], #0x2\n"
"b 19f\n"
"16:" // Oddments: Load input (0, 5): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 19f\n"
- "ld1 { v9.h }[4], [x20], #0x2\n"
+ "ld1 { v9.h }[4], [x19], #0x2\n"
"b 19f\n"
"17:" // Oddments: Load input (0, 5): Bit 2: Unset
"tbz %x[n_channels], #1, 18f\n"
- "ld1 { v9.s }[0], [x20], #0x4\n"
+ "ld1 { v9.s }[0], [x19], #0x4\n"
"tbz %x[n_channels], #0, 19f\n"
- "ld1 { v9.h }[2], [x20], #0x2\n"
+ "ld1 { v9.h }[2], [x19], #0x2\n"
"b 19f\n"
"18:" // Oddments: Load input (0, 5): Bit 2: Unset: Bit 1: Unset
- "ld1 { v9.h }[0], [x20], #0x2\n"
+ "ld1 { v9.h }[0], [x19], #0x2\n"
"19:" // Oddments: Load input (0, 5): Bit 2: End
- "ldr q0, [x16, #0x0]\n"
"fmla v29.8h, v4.8h, v9.8h\n"
"fmla v30.8h, v4.8h, v6.8h\n"
- "ldr x20, [x15, #0x68]\n"
+ "ldr q0, [x16, #0x0]\n"
+ "ldr x19, [x11, #0x68]\n"
"fmla v31.8h, v4.8h, v10.8h\n"
"fmla v28.8h, v0.8h, v7.8h\n"
- "add x20, x20, x10\n"
+ "add x19, x19, x10\n"
"fmla v29.8h, v0.8h, v8.8h\n"
"fmla v30.8h, v0.8h, v14.8h\n"
"add x16, x16, #0x10\n"
"tbz %x[n_channels], #2, 21f\n"
- "ld1 { v11.d }[0], [x20], #0x8\n"
+ "ld1 { v11.d }[0], [x19], #0x8\n"
"tbz %x[n_channels], #1, 20f\n"
- "ld1 { v11.s }[2], [x20], #0x4\n"
+ "ld1 { v11.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v11.h }[6], [x20], #0x2\n"
+ "ld1 { v11.h }[6], [x19], #0x2\n"
"b 23f\n"
"20:" // Oddments: Load input (2, 1): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v11.h }[4], [x20], #0x2\n"
+ "ld1 { v11.h }[4], [x19], #0x2\n"
"b 23f\n"
"21:" // Oddments: Load input (2, 1): Bit 2: Unset
"tbz %x[n_channels], #1, 22f\n"
- "ld1 { v11.s }[0], [x20], #0x4\n"
+ "ld1 { v11.s }[0], [x19], #0x4\n"
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v11.h }[2], [x20], #0x2\n"
+ "ld1 { v11.h }[2], [x19], #0x2\n"
"b 23f\n"
"22:" // Oddments: Load input (2, 1): Bit 2: Unset: Bit 1: Unset
- "ld1 { v11.h }[0], [x20], #0x2\n"
+ "ld1 { v11.h }[0], [x19], #0x2\n"
"23:" // Oddments: Load input (2, 1): Bit 2: End
"ldr q1, [x16, #0x0]\n"
- "ldr x20, [x15, #0x70]\n"
+ "ldr x19, [x11, #0x70]\n"
"fmla v31.8h, v0.8h, v11.8h\n"
"fmla v28.8h, v1.8h, v8.8h\n"
"fmla v29.8h, v1.8h, v13.8h\n"
"fmla v30.8h, v1.8h, v11.8h\n"
- "add x20, x20, x10\n"
+ "add x19, x19, x10\n"
"add x16, x16, #0x10\n"
"tbz %x[n_channels], #2, 25f\n"
- "ld1 { v12.d }[0], [x20], #0x8\n"
+ "ld1 { v12.d }[0], [x19], #0x8\n"
"tbz %x[n_channels], #1, 24f\n"
- "ld1 { v12.s }[2], [x20], #0x4\n"
+ "ld1 { v12.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 27f\n"
- "ld1 { v12.h }[6], [x20], #0x2\n"
+ "ld1 { v12.h }[6], [x19], #0x2\n"
"b 27f\n"
"24:" // Oddments: Load input (2, 2): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 27f\n"
- "ld1 { v12.h }[4], [x20], #0x2\n"
+ "ld1 { v12.h }[4], [x19], #0x2\n"
"b 27f\n"
"25:" // Oddments: Load input (2, 2): Bit 2: Unset
"tbz %x[n_channels], #1, 26f\n"
- "ld1 { v12.s }[0], [x20], #0x4\n"
+ "ld1 { v12.s }[0], [x19], #0x4\n"
"tbz %x[n_channels], #0, 27f\n"
- "ld1 { v12.h }[2], [x20], #0x2\n"
+ "ld1 { v12.h }[2], [x19], #0x2\n"
"b 27f\n"
"26:" // Oddments: Load input (2, 2): Bit 2: Unset: Bit 1: Unset
- "ld1 { v12.h }[0], [x20], #0x2\n"
+ "ld1 { v12.h }[0], [x19], #0x2\n"
"27:" // Oddments: Load input (2, 2): Bit 2: End
"ldr q2, [x16, #0x0]\n"
- "ldr x20, [x15, #0x78]\n"
+ "ldr x19, [x11, #0x78]\n"
"fmla v31.8h, v1.8h, v12.8h\n"
"fmla v28.8h, v2.8h, v13.8h\n"
"fmla v29.8h, v2.8h, v5.8h\n"
"fmla v30.8h, v2.8h, v12.8h\n"
- "add x20, x20, x10\n"
+ "add x19, x19, x10\n"
"add x16, x16, #0x10\n"
"tbz %x[n_channels], #2, 29f\n"
- "ld1 { v9.d }[0], [x20], #0x8\n"
+ "ld1 { v9.d }[0], [x19], #0x8\n"
"tbz %x[n_channels], #1, 28f\n"
- "ld1 { v9.s }[2], [x20], #0x4\n"
+ "ld1 { v9.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 31f\n"
- "ld1 { v9.h }[6], [x20], #0x2\n"
+ "ld1 { v9.h }[6], [x19], #0x2\n"
"b 31f\n"
"28:" // Oddments: Load input (2, 3): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 31f\n"
- "ld1 { v9.h }[4], [x20], #0x2\n"
+ "ld1 { v9.h }[4], [x19], #0x2\n"
"b 31f\n"
"29:" // Oddments: Load input (2, 3): Bit 2: Unset
"tbz %x[n_channels], #1, 30f\n"
- "ld1 { v9.s }[0], [x20], #0x4\n"
+ "ld1 { v9.s }[0], [x19], #0x4\n"
"tbz %x[n_channels], #0, 31f\n"
- "ld1 { v9.h }[2], [x20], #0x2\n"
+ "ld1 { v9.h }[2], [x19], #0x2\n"
"b 31f\n"
"30:" // Oddments: Load input (2, 3): Bit 2: Unset: Bit 1: Unset
- "ld1 { v9.h }[0], [x20], #0x2\n"
+ "ld1 { v9.h }[0], [x19], #0x2\n"
"31:" // Oddments: Load input (2, 3): Bit 2: End
"ldr q3, [x16, #0x0]\n"
- "ldr x20, [x15, #0x80]\n"
+ "ldr x19, [x11, #0x80]\n"
"fmla v31.8h, v2.8h, v9.8h\n"
"fmla v28.8h, v3.8h, v5.8h\n"
"fmla v29.8h, v3.8h, v6.8h\n"
"fmla v30.8h, v3.8h, v9.8h\n"
- "add x20, x20, x10\n"
+ "add x19, x19, x10\n"
"add x16, x16, #0x10\n"
"tbz %x[n_channels], #2, 33f\n"
- "ld1 { v13.d }[0], [x20], #0x8\n"
+ "ld1 { v13.d }[0], [x19], #0x8\n"
"tbz %x[n_channels], #1, 32f\n"
- "ld1 { v13.s }[2], [x20], #0x4\n"
+ "ld1 { v13.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 35f\n"
- "ld1 { v13.h }[6], [x20], #0x2\n"
+ "ld1 { v13.h }[6], [x19], #0x2\n"
"b 35f\n"
"32:" // Oddments: Load input (2, 4): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 35f\n"
- "ld1 { v13.h }[4], [x20], #0x2\n"
+ "ld1 { v13.h }[4], [x19], #0x2\n"
"b 35f\n"
"33:" // Oddments: Load input (2, 4): Bit 2: Unset
"tbz %x[n_channels], #1, 34f\n"
- "ld1 { v13.s }[0], [x20], #0x4\n"
+ "ld1 { v13.s }[0], [x19], #0x4\n"
"tbz %x[n_channels], #0, 35f\n"
- "ld1 { v13.h }[2], [x20], #0x2\n"
+ "ld1 { v13.h }[2], [x19], #0x2\n"
"b 35f\n"
"34:" // Oddments: Load input (2, 4): Bit 2: Unset: Bit 1: Unset
- "ld1 { v13.h }[0], [x20], #0x2\n"
+ "ld1 { v13.h }[0], [x19], #0x2\n"
"35:" // Oddments: Load input (2, 4): Bit 2: End
"ldr q4, [x16, #0x0]\n"
- "ldr x20, [x15, #0x88]\n"
+ "ldr x19, [x11, #0x88]\n"
"fmla v31.8h, v3.8h, v13.8h\n"
"fmla v28.8h, v4.8h, v6.8h\n"
"fmla v29.8h, v4.8h, v10.8h\n"
"fmla v30.8h, v4.8h, v13.8h\n"
- "add x20, x20, x10\n"
+ "add x19, x19, x10\n"
"add x16, x16, #0x10\n"
"tbz %x[n_channels], #2, 37f\n"
- "ld1 { v8.d }[0], [x20], #0x8\n"
+ "ld1 { v8.d }[0], [x19], #0x8\n"
"tbz %x[n_channels], #1, 36f\n"
- "ld1 { v8.s }[2], [x20], #0x4\n"
+ "ld1 { v8.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 39f\n"
- "ld1 { v8.h }[6], [x20], #0x2\n"
+ "ld1 { v8.h }[6], [x19], #0x2\n"
"b 39f\n"
"36:" // Oddments: Load input (2, 5): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 39f\n"
- "ld1 { v8.h }[4], [x20], #0x2\n"
+ "ld1 { v8.h }[4], [x19], #0x2\n"
"b 39f\n"
"37:" // Oddments: Load input (2, 5): Bit 2: Unset
"tbz %x[n_channels], #1, 38f\n"
- "ld1 { v8.s }[0], [x20], #0x4\n"
+ "ld1 { v8.s }[0], [x19], #0x4\n"
"tbz %x[n_channels], #0, 39f\n"
- "ld1 { v8.h }[2], [x20], #0x2\n"
+ "ld1 { v8.h }[2], [x19], #0x2\n"
"b 39f\n"
"38:" // Oddments: Load input (2, 5): Bit 2: Unset: Bit 1: Unset
- "ld1 { v8.h }[0], [x20], #0x2\n"
+ "ld1 { v8.h }[0], [x19], #0x2\n"
"39:" // Oddments: Load input (2, 5): Bit 2: End
"ldr q0, [x16, #0x0]\n"
- "ldr x20, [x15, #0x90]\n"
+ "ldr x19, [x11, #0x90]\n"
"fmla v31.8h, v4.8h, v8.8h\n"
"fmla v28.8h, v0.8h, v14.8h\n"
"fmla v29.8h, v0.8h, v11.8h\n"
- "add x20, x20, x10\n"
+ "add x19, x19, x10\n"
"add x16, x16, #0x10\n"
"tbz %x[n_channels], #2, 41f\n"
- "ld1 { v5.d }[0], [x20], #0x8\n"
+ "ld1 { v5.d }[0], [x19], #0x8\n"
"tbz %x[n_channels], #1, 40f\n"
- "ld1 { v5.s }[2], [x20], #0x4\n"
+ "ld1 { v5.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 43f\n"
- "ld1 { v5.h }[6], [x20], #0x2\n"
+ "ld1 { v5.h }[6], [x19], #0x2\n"
"b 43f\n"
"40:" // Oddments: Load input (3, 0): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 43f\n"
- "ld1 { v5.h }[4], [x20], #0x2\n"
+ "ld1 { v5.h }[4], [x19], #0x2\n"
"b 43f\n"
"41:" // Oddments: Load input (3, 0): Bit 2: Unset
"tbz %x[n_channels], #1, 42f\n"
- "ld1 { v5.s }[0], [x20], #0x4\n"
+ "ld1 { v5.s }[0], [x19], #0x4\n"
"tbz %x[n_channels], #0, 43f\n"
- "ld1 { v5.h }[2], [x20], #0x2\n"
+ "ld1 { v5.h }[2], [x19], #0x2\n"
"b 43f\n"
"42:" // Oddments: Load input (3, 0): Bit 2: Unset: Bit 1: Unset
- "ld1 { v5.h }[0], [x20], #0x2\n"
+ "ld1 { v5.h }[0], [x19], #0x2\n"
"43:" // Oddments: Load input (3, 0): Bit 2: End
- "ldr x20, [x15, #0x98]\n"
+ "ldr x19, [x11, #0x98]\n"
"fmla v30.8h, v0.8h, v5.8h\n"
- "add x20, x20, x10\n"
+ "add x19, x19, x10\n"
"tbz %x[n_channels], #2, 45f\n"
- "ld1 { v6.d }[0], [x20], #0x8\n"
+ "ld1 { v6.d }[0], [x19], #0x8\n"
"tbz %x[n_channels], #1, 44f\n"
- "ld1 { v6.s }[2], [x20], #0x4\n"
+ "ld1 { v6.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 47f\n"
- "ld1 { v6.h }[6], [x20], #0x2\n"
+ "ld1 { v6.h }[6], [x19], #0x2\n"
"b 47f\n"
"44:" // Oddments: Load input (3, 1): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 47f\n"
- "ld1 { v6.h }[4], [x20], #0x2\n"
+ "ld1 { v6.h }[4], [x19], #0x2\n"
"b 47f\n"
"45:" // Oddments: Load input (3, 1): Bit 2: Unset
"tbz %x[n_channels], #1, 46f\n"
- "ld1 { v6.s }[0], [x20], #0x4\n"
+ "ld1 { v6.s }[0], [x19], #0x4\n"
"tbz %x[n_channels], #0, 47f\n"
- "ld1 { v6.h }[2], [x20], #0x2\n"
+ "ld1 { v6.h }[2], [x19], #0x2\n"
"b 47f\n"
"46:" // Oddments: Load input (3, 1): Bit 2: Unset: Bit 1: Unset
- "ld1 { v6.h }[0], [x20], #0x2\n"
+ "ld1 { v6.h }[0], [x19], #0x2\n"
"47:" // Oddments: Load input (3, 1): Bit 2: End
"ldr q1, [x16, #0x0]\n"
- "ldr x20, [x15, #0xa0]\n"
+ "ldr x19, [x11, #0xa0]\n"
"fmla v31.8h, v0.8h, v6.8h\n"
"fmla v28.8h, v1.8h, v11.8h\n"
"fmla v29.8h, v1.8h, v12.8h\n"
"fmla v30.8h, v1.8h, v6.8h\n"
- "add x20, x20, x10\n"
+ "add x19, x19, x10\n"
"add x16, x16, #0x10\n"
"tbz %x[n_channels], #2, 49f\n"
- "ld1 { v10.d }[0], [x20], #0x8\n"
+ "ld1 { v10.d }[0], [x19], #0x8\n"
"tbz %x[n_channels], #1, 48f\n"
- "ld1 { v10.s }[2], [x20], #0x4\n"
+ "ld1 { v10.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 51f\n"
- "ld1 { v10.h }[6], [x20], #0x2\n"
+ "ld1 { v10.h }[6], [x19], #0x2\n"
"b 51f\n"
"48:" // Oddments: Load input (3, 2): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 51f\n"
- "ld1 { v10.h }[4], [x20], #0x2\n"
+ "ld1 { v10.h }[4], [x19], #0x2\n"
"b 51f\n"
"49:" // Oddments: Load input (3, 2): Bit 2: Unset
"tbz %x[n_channels], #1, 50f\n"
- "ld1 { v10.s }[0], [x20], #0x4\n"
+ "ld1 { v10.s }[0], [x19], #0x4\n"
"tbz %x[n_channels], #0, 51f\n"
- "ld1 { v10.h }[2], [x20], #0x2\n"
+ "ld1 { v10.h }[2], [x19], #0x2\n"
"b 51f\n"
"50:" // Oddments: Load input (3, 2): Bit 2: Unset: Bit 1: Unset
- "ld1 { v10.h }[0], [x20], #0x2\n"
+ "ld1 { v10.h }[0], [x19], #0x2\n"
"51:" // Oddments: Load input (3, 2): Bit 2: End
"ldr q2, [x16, #0x0]\n"
- "ldr x20, [x15, #0xa8]\n"
+ "ldr x19, [x11, #0xa8]\n"
"fmla v31.8h, v1.8h, v10.8h\n"
"fmla v28.8h, v2.8h, v12.8h\n"
"fmla v29.8h, v2.8h, v9.8h\n"
"fmla v30.8h, v2.8h, v10.8h\n"
- "add x20, x20, x10\n"
+ "add x19, x19, x10\n"
"add x16, x16, #0x10\n"
"tbz %x[n_channels], #2, 53f\n"
- "ld1 { v11.d }[0], [x20], #0x8\n"
+ "ld1 { v11.d }[0], [x19], #0x8\n"
"tbz %x[n_channels], #1, 52f\n"
- "ld1 { v11.s }[2], [x20], #0x4\n"
+ "ld1 { v11.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 55f\n"
- "ld1 { v11.h }[6], [x20], #0x2\n"
+ "ld1 { v11.h }[6], [x19], #0x2\n"
"b 55f\n"
"52:" // Oddments: Load input (3, 3): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 55f\n"
- "ld1 { v11.h }[4], [x20], #0x2\n"
+ "ld1 { v11.h }[4], [x19], #0x2\n"
"b 55f\n"
"53:" // Oddments: Load input (3, 3): Bit 2: Unset
"tbz %x[n_channels], #1, 54f\n"
- "ld1 { v11.s }[0], [x20], #0x4\n"
+ "ld1 { v11.s }[0], [x19], #0x4\n"
"tbz %x[n_channels], #0, 55f\n"
- "ld1 { v11.h }[2], [x20], #0x2\n"
+ "ld1 { v11.h }[2], [x19], #0x2\n"
"b 55f\n"
"54:" // Oddments: Load input (3, 3): Bit 2: Unset: Bit 1: Unset
- "ld1 { v11.h }[0], [x20], #0x2\n"
+ "ld1 { v11.h }[0], [x19], #0x2\n"
"55:" // Oddments: Load input (3, 3): Bit 2: End
"ldr q3, [x16, #0x0]\n"
- "ldr x20, [x15, #0xb0]\n"
+ "ldr x19, [x11, #0xb0]\n"
"fmla v31.8h, v2.8h, v11.8h\n"
"fmla v28.8h, v3.8h, v9.8h\n"
"fmla v29.8h, v3.8h, v13.8h\n"
"fmla v30.8h, v3.8h, v11.8h\n"
- "add x20, x20, x10\n"
+ "add x19, x19, x10\n"
"add x16, x16, #0x10\n"
"tbz %x[n_channels], #2, 57f\n"
- "ld1 { v12.d }[0], [x20], #0x8\n"
+ "ld1 { v12.d }[0], [x19], #0x8\n"
"tbz %x[n_channels], #1, 56f\n"
- "ld1 { v12.s }[2], [x20], #0x4\n"
+ "ld1 { v12.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 59f\n"
- "ld1 { v12.h }[6], [x20], #0x2\n"
+ "ld1 { v12.h }[6], [x19], #0x2\n"
"b 59f\n"
"56:" // Oddments: Load input (3, 4): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 59f\n"
- "ld1 { v12.h }[4], [x20], #0x2\n"
+ "ld1 { v12.h }[4], [x19], #0x2\n"
"b 59f\n"
"57:" // Oddments: Load input (3, 4): Bit 2: Unset
"tbz %x[n_channels], #1, 58f\n"
- "ld1 { v12.s }[0], [x20], #0x4\n"
+ "ld1 { v12.s }[0], [x19], #0x4\n"
"tbz %x[n_channels], #0, 59f\n"
- "ld1 { v12.h }[2], [x20], #0x2\n"
+ "ld1 { v12.h }[2], [x19], #0x2\n"
"b 59f\n"
"58:" // Oddments: Load input (3, 4): Bit 2: Unset: Bit 1: Unset
- "ld1 { v12.h }[0], [x20], #0x2\n"
+ "ld1 { v12.h }[0], [x19], #0x2\n"
"59:" // Oddments: Load input (3, 4): Bit 2: End
"ldr q4, [x16, #0x0]\n"
- "ldr x20, [x15, #0xb8]\n"
+ "ldr x19, [x11, #0xb8]\n"
"fmla v31.8h, v3.8h, v12.8h\n"
"fmla v28.8h, v4.8h, v13.8h\n"
"fmla v29.8h, v4.8h, v8.8h\n"
"fmla v30.8h, v4.8h, v12.8h\n"
- "add x20, x20, x10\n"
+ "add x19, x19, x10\n"
"add x16, x16, #0x10\n"
"tbz %x[n_channels], #2, 61f\n"
- "ld1 { v14.d }[0], [x20], #0x8\n"
+ "ld1 { v14.d }[0], [x19], #0x8\n"
"tbz %x[n_channels], #1, 60f\n"
- "ld1 { v14.s }[2], [x20], #0x4\n"
+ "ld1 { v14.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 63f\n"
- "ld1 { v14.h }[6], [x20], #0x2\n"
+ "ld1 { v14.h }[6], [x19], #0x2\n"
"b 63f\n"
"60:" // Oddments: Load input (3, 5): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 63f\n"
- "ld1 { v14.h }[4], [x20], #0x2\n"
+ "ld1 { v14.h }[4], [x19], #0x2\n"
"b 63f\n"
"61:" // Oddments: Load input (3, 5): Bit 2: Unset
"tbz %x[n_channels], #1, 62f\n"
- "ld1 { v14.s }[0], [x20], #0x4\n"
+ "ld1 { v14.s }[0], [x19], #0x4\n"
"tbz %x[n_channels], #0, 63f\n"
- "ld1 { v14.h }[2], [x20], #0x2\n"
+ "ld1 { v14.h }[2], [x19], #0x2\n"
"b 63f\n"
"62:" // Oddments: Load input (3, 5): Bit 2: Unset: Bit 1: Unset
- "ld1 { v14.h }[0], [x20], #0x2\n"
+ "ld1 { v14.h }[0], [x19], #0x2\n"
"63:" // Oddments: Load input (3, 5): Bit 2: End
"ldr q0, [x16, #0x0]\n"
- "ldr x20, [x15, #0xc0]\n"
+ "ldr x19, [x11, #0xc0]\n"
"fmla v31.8h, v4.8h, v14.8h\n"
"fmla v28.8h, v0.8h, v5.8h\n"
"fmla v29.8h, v0.8h, v6.8h\n"
- "add x20, x20, x10\n"
+ "add x19, x19, x10\n"
"add x16, x16, #0x10\n"
"tbz %x[n_channels], #2, 65f\n"
- "ld1 { v9.d }[0], [x20], #0x8\n"
+ "ld1 { v9.d }[0], [x19], #0x8\n"
"tbz %x[n_channels], #1, 64f\n"
- "ld1 { v9.s }[2], [x20], #0x4\n"
+ "ld1 { v9.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 67f\n"
- "ld1 { v9.h }[6], [x20], #0x2\n"
+ "ld1 { v9.h }[6], [x19], #0x2\n"
"b 67f\n"
"64:" // Oddments: Load input (4, 0): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 67f\n"
- "ld1 { v9.h }[4], [x20], #0x2\n"
+ "ld1 { v9.h }[4], [x19], #0x2\n"
"b 67f\n"
"65:" // Oddments: Load input (4, 0): Bit 2: Unset
"tbz %x[n_channels], #1, 66f\n"
- "ld1 { v9.s }[0], [x20], #0x4\n"
+ "ld1 { v9.s }[0], [x19], #0x4\n"
"tbz %x[n_channels], #0, 67f\n"
- "ld1 { v9.h }[2], [x20], #0x2\n"
+ "ld1 { v9.h }[2], [x19], #0x2\n"
"b 67f\n"
"66:" // Oddments: Load input (4, 0): Bit 2: Unset: Bit 1: Unset
- "ld1 { v9.h }[0], [x20], #0x2\n"
+ "ld1 { v9.h }[0], [x19], #0x2\n"
"67:" // Oddments: Load input (4, 0): Bit 2: End
- "ldr x20, [x15, #0xc8]\n"
+ "ldr x19, [x11, #0xc8]\n"
"fmla v30.8h, v0.8h, v9.8h\n"
- "add x20, x20, x10\n"
+ "add x19, x19, x10\n"
"tbz %x[n_channels], #2, 69f\n"
- "ld1 { v13.d }[0], [x20], #0x8\n"
+ "ld1 { v13.d }[0], [x19], #0x8\n"
"tbz %x[n_channels], #1, 68f\n"
- "ld1 { v13.s }[2], [x20], #0x4\n"
+ "ld1 { v13.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 71f\n"
- "ld1 { v13.h }[6], [x20], #0x2\n"
+ "ld1 { v13.h }[6], [x19], #0x2\n"
"b 71f\n"
"68:" // Oddments: Load input (4, 1): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 71f\n"
- "ld1 { v13.h }[4], [x20], #0x2\n"
+ "ld1 { v13.h }[4], [x19], #0x2\n"
"b 71f\n"
"69:" // Oddments: Load input (4, 1): Bit 2: Unset
"tbz %x[n_channels], #1, 70f\n"
- "ld1 { v13.s }[0], [x20], #0x4\n"
+ "ld1 { v13.s }[0], [x19], #0x4\n"
"tbz %x[n_channels], #0, 71f\n"
- "ld1 { v13.h }[2], [x20], #0x2\n"
+ "ld1 { v13.h }[2], [x19], #0x2\n"
"b 71f\n"
"70:" // Oddments: Load input (4, 1): Bit 2: Unset: Bit 1: Unset
- "ld1 { v13.h }[0], [x20], #0x2\n"
+ "ld1 { v13.h }[0], [x19], #0x2\n"
"71:" // Oddments: Load input (4, 1): Bit 2: End
"ldr q1, [x16, #0x0]\n"
- "ldr x20, [x15, #0xd0]\n"
+ "ldr x19, [x11, #0xd0]\n"
"fmla v31.8h, v0.8h, v13.8h\n"
"fmla v28.8h, v1.8h, v6.8h\n"
"fmla v29.8h, v1.8h, v10.8h\n"
"fmla v30.8h, v1.8h, v13.8h\n"
- "add x20, x20, x10\n"
+ "add x19, x19, x10\n"
"add x16, x16, #0x10\n"
"tbz %x[n_channels], #2, 73f\n"
- "ld1 { v5.d }[0], [x20], #0x8\n"
+ "ld1 { v5.d }[0], [x19], #0x8\n"
"tbz %x[n_channels], #1, 72f\n"
- "ld1 { v5.s }[2], [x20], #0x4\n"
+ "ld1 { v5.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 75f\n"
- "ld1 { v5.h }[6], [x20], #0x2\n"
+ "ld1 { v5.h }[6], [x19], #0x2\n"
"b 75f\n"
"72:" // Oddments: Load input (4, 2): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 75f\n"
- "ld1 { v5.h }[4], [x20], #0x2\n"
+ "ld1 { v5.h }[4], [x19], #0x2\n"
"b 75f\n"
"73:" // Oddments: Load input (4, 2): Bit 2: Unset
"tbz %x[n_channels], #1, 74f\n"
- "ld1 { v5.s }[0], [x20], #0x4\n"
+ "ld1 { v5.s }[0], [x19], #0x4\n"
"tbz %x[n_channels], #0, 75f\n"
- "ld1 { v5.h }[2], [x20], #0x2\n"
+ "ld1 { v5.h }[2], [x19], #0x2\n"
"b 75f\n"
"74:" // Oddments: Load input (4, 2): Bit 2: Unset: Bit 1: Unset
- "ld1 { v5.h }[0], [x20], #0x2\n"
+ "ld1 { v5.h }[0], [x19], #0x2\n"
"75:" // Oddments: Load input (4, 2): Bit 2: End
"ldr q2, [x16, #0x0]\n"
- "ldr x20, [x15, #0xd8]\n"
+ "ldr x19, [x11, #0xd8]\n"
"fmla v31.8h, v1.8h, v5.8h\n"
"fmla v28.8h, v2.8h, v10.8h\n"
"fmla v29.8h, v2.8h, v11.8h\n"
"fmla v30.8h, v2.8h, v5.8h\n"
- "add x20, x20, x10\n"
+ "add x19, x19, x10\n"
"add x16, x16, #0x10\n"
"tbz %x[n_channels], #2, 77f\n"
- "ld1 { v6.d }[0], [x20], #0x8\n"
+ "ld1 { v6.d }[0], [x19], #0x8\n"
"tbz %x[n_channels], #1, 76f\n"
- "ld1 { v6.s }[2], [x20], #0x4\n"
+ "ld1 { v6.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 79f\n"
- "ld1 { v6.h }[6], [x20], #0x2\n"
+ "ld1 { v6.h }[6], [x19], #0x2\n"
"b 79f\n"
"76:" // Oddments: Load input (4, 3): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 79f\n"
- "ld1 { v6.h }[4], [x20], #0x2\n"
+ "ld1 { v6.h }[4], [x19], #0x2\n"
"b 79f\n"
"77:" // Oddments: Load input (4, 3): Bit 2: Unset
"tbz %x[n_channels], #1, 78f\n"
- "ld1 { v6.s }[0], [x20], #0x4\n"
+ "ld1 { v6.s }[0], [x19], #0x4\n"
"tbz %x[n_channels], #0, 79f\n"
- "ld1 { v6.h }[2], [x20], #0x2\n"
+ "ld1 { v6.h }[2], [x19], #0x2\n"
"b 79f\n"
"78:" // Oddments: Load input (4, 3): Bit 2: Unset: Bit 1: Unset
- "ld1 { v6.h }[0], [x20], #0x2\n"
+ "ld1 { v6.h }[0], [x19], #0x2\n"
"79:" // Oddments: Load input (4, 3): Bit 2: End
"ldr q3, [x16, #0x0]\n"
- "ldr x20, [x15, #0xe0]\n"
+ "ldr x19, [x11, #0xe0]\n"
"fmla v31.8h, v2.8h, v6.8h\n"
"fmla v28.8h, v3.8h, v11.8h\n"
"fmla v29.8h, v3.8h, v12.8h\n"
"fmla v30.8h, v3.8h, v6.8h\n"
- "add x20, x20, x10\n"
+ "add x19, x19, x10\n"
"add x16, x16, #0x10\n"
"tbz %x[n_channels], #2, 81f\n"
- "ld1 { v8.d }[0], [x20], #0x8\n"
+ "ld1 { v8.d }[0], [x19], #0x8\n"
"tbz %x[n_channels], #1, 80f\n"
- "ld1 { v8.s }[2], [x20], #0x4\n"
+ "ld1 { v8.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 83f\n"
- "ld1 { v8.h }[6], [x20], #0x2\n"
+ "ld1 { v8.h }[6], [x19], #0x2\n"
"b 83f\n"
"80:" // Oddments: Load input (4, 4): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 83f\n"
- "ld1 { v8.h }[4], [x20], #0x2\n"
+ "ld1 { v8.h }[4], [x19], #0x2\n"
"b 83f\n"
"81:" // Oddments: Load input (4, 4): Bit 2: Unset
"tbz %x[n_channels], #1, 82f\n"
- "ld1 { v8.s }[0], [x20], #0x4\n"
+ "ld1 { v8.s }[0], [x19], #0x4\n"
"tbz %x[n_channels], #0, 83f\n"
- "ld1 { v8.h }[2], [x20], #0x2\n"
+ "ld1 { v8.h }[2], [x19], #0x2\n"
"b 83f\n"
"82:" // Oddments: Load input (4, 4): Bit 2: Unset: Bit 1: Unset
- "ld1 { v8.h }[0], [x20], #0x2\n"
+ "ld1 { v8.h }[0], [x19], #0x2\n"
"83:" // Oddments: Load input (4, 4): Bit 2: End
"ldr q4, [x16, #0x0]\n"
- "ldr x20, [x15, #0xe8]\n"
+ "ldr x19, [x11, #0xe8]\n"
"fmla v31.8h, v3.8h, v8.8h\n"
"fmla v28.8h, v4.8h, v12.8h\n"
"fmla v29.8h, v4.8h, v14.8h\n"
"fmla v30.8h, v4.8h, v8.8h\n"
- "add x20, x20, x10\n"
+ "add x19, x19, x10\n"
"add x16, x16, #0x10\n"
"tbz %x[n_channels], #2, 85f\n"
- "ld1 { v10.d }[0], [x20], #0x8\n"
+ "ld1 { v10.d }[0], [x19], #0x8\n"
"tbz %x[n_channels], #1, 84f\n"
- "ld1 { v10.s }[2], [x20], #0x4\n"
+ "ld1 { v10.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 87f\n"
- "ld1 { v10.h }[6], [x20], #0x2\n"
+ "ld1 { v10.h }[6], [x19], #0x2\n"
"b 87f\n"
"84:" // Oddments: Load input (4, 5): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 87f\n"
- "ld1 { v10.h }[4], [x20], #0x2\n"
+ "ld1 { v10.h }[4], [x19], #0x2\n"
"b 87f\n"
"85:" // Oddments: Load input (4, 5): Bit 2: Unset
"tbz %x[n_channels], #1, 86f\n"
- "ld1 { v10.s }[0], [x20], #0x4\n"
+ "ld1 { v10.s }[0], [x19], #0x4\n"
"tbz %x[n_channels], #0, 87f\n"
- "ld1 { v10.h }[2], [x20], #0x2\n"
+ "ld1 { v10.h }[2], [x19], #0x2\n"
"b 87f\n"
"86:" // Oddments: Load input (4, 5): Bit 2: Unset: Bit 1: Unset
- "ld1 { v10.h }[0], [x20], #0x2\n"
+ "ld1 { v10.h }[0], [x19], #0x2\n"
"87:" // Oddments: Load input (4, 5): Bit 2: End
"ldr q0, [x16, #0x0]\n"
- "ldr x20, [x15, #0xf0]\n"
+ "ldr x19, [x11, #0xf0]\n"
"fmla v31.8h, v4.8h, v10.8h\n"
"fmla v28.8h, v0.8h, v9.8h\n"
"fmla v29.8h, v0.8h, v13.8h\n"
- "add x20, x20, x10\n"
+ "add x19, x19, x10\n"
"add x16, x16, #0x10\n"
"tbz %x[n_channels], #2, 89f\n"
- "ld1 { v11.d }[0], [x20], #0x8\n"
+ "ld1 { v11.d }[0], [x19], #0x8\n"
"tbz %x[n_channels], #1, 88f\n"
- "ld1 { v11.s }[2], [x20], #0x4\n"
+ "ld1 { v11.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 91f\n"
- "ld1 { v11.h }[6], [x20], #0x2\n"
+ "ld1 { v11.h }[6], [x19], #0x2\n"
"b 91f\n"
"88:" // Oddments: Load input (5, 0): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 91f\n"
- "ld1 { v11.h }[4], [x20], #0x2\n"
+ "ld1 { v11.h }[4], [x19], #0x2\n"
"b 91f\n"
"89:" // Oddments: Load input (5, 0): Bit 2: Unset
"tbz %x[n_channels], #1, 90f\n"
- "ld1 { v11.s }[0], [x20], #0x4\n"
+ "ld1 { v11.s }[0], [x19], #0x4\n"
"tbz %x[n_channels], #0, 91f\n"
- "ld1 { v11.h }[2], [x20], #0x2\n"
+ "ld1 { v11.h }[2], [x19], #0x2\n"
"b 91f\n"
"90:" // Oddments: Load input (5, 0): Bit 2: Unset: Bit 1: Unset
- "ld1 { v11.h }[0], [x20], #0x2\n"
+ "ld1 { v11.h }[0], [x19], #0x2\n"
"91:" // Oddments: Load input (5, 0): Bit 2: End
- "ldr x20, [x15, #0xf8]\n"
+ "ldr x19, [x11, #0xf8]\n"
"fmla v30.8h, v0.8h, v11.8h\n"
- "add x20, x20, x10\n"
+ "add x19, x19, x10\n"
"tbz %x[n_channels], #2, 93f\n"
- "ld1 { v12.d }[0], [x20], #0x8\n"
+ "ld1 { v12.d }[0], [x19], #0x8\n"
"tbz %x[n_channels], #1, 92f\n"
- "ld1 { v12.s }[2], [x20], #0x4\n"
+ "ld1 { v12.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 95f\n"
- "ld1 { v12.h }[6], [x20], #0x2\n"
+ "ld1 { v12.h }[6], [x19], #0x2\n"
"b 95f\n"
"92:" // Oddments: Load input (5, 1): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 95f\n"
- "ld1 { v12.h }[4], [x20], #0x2\n"
+ "ld1 { v12.h }[4], [x19], #0x2\n"
"b 95f\n"
"93:" // Oddments: Load input (5, 1): Bit 2: Unset
"tbz %x[n_channels], #1, 94f\n"
- "ld1 { v12.s }[0], [x20], #0x4\n"
+ "ld1 { v12.s }[0], [x19], #0x4\n"
"tbz %x[n_channels], #0, 95f\n"
- "ld1 { v12.h }[2], [x20], #0x2\n"
+ "ld1 { v12.h }[2], [x19], #0x2\n"
"b 95f\n"
"94:" // Oddments: Load input (5, 1): Bit 2: Unset: Bit 1: Unset
- "ld1 { v12.h }[0], [x20], #0x2\n"
+ "ld1 { v12.h }[0], [x19], #0x2\n"
"95:" // Oddments: Load input (5, 1): Bit 2: End
"ldr q1, [x16, #0x0]\n"
- "ldr x20, [x15, #0x100]\n"
+ "ldr x19, [x11, #0x100]\n"
"fmla v31.8h, v0.8h, v12.8h\n"
"fmla v28.8h, v1.8h, v13.8h\n"
"fmla v29.8h, v1.8h, v5.8h\n"
"fmla v30.8h, v1.8h, v12.8h\n"
- "add x20, x20, x10\n"
+ "add x19, x19, x10\n"
"add x16, x16, #0x10\n"
"tbz %x[n_channels], #2, 97f\n"
- "ld1 { v9.d }[0], [x20], #0x8\n"
+ "ld1 { v9.d }[0], [x19], #0x8\n"
"tbz %x[n_channels], #1, 96f\n"
- "ld1 { v9.s }[2], [x20], #0x4\n"
+ "ld1 { v9.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 99f\n"
- "ld1 { v9.h }[6], [x20], #0x2\n"
+ "ld1 { v9.h }[6], [x19], #0x2\n"
"b 99f\n"
"96:" // Oddments: Load input (5, 2): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 99f\n"
- "ld1 { v9.h }[4], [x20], #0x2\n"
+ "ld1 { v9.h }[4], [x19], #0x2\n"
"b 99f\n"
"97:" // Oddments: Load input (5, 2): Bit 2: Unset
"tbz %x[n_channels], #1, 98f\n"
- "ld1 { v9.s }[0], [x20], #0x4\n"
+ "ld1 { v9.s }[0], [x19], #0x4\n"
"tbz %x[n_channels], #0, 99f\n"
- "ld1 { v9.h }[2], [x20], #0x2\n"
+ "ld1 { v9.h }[2], [x19], #0x2\n"
"b 99f\n"
"98:" // Oddments: Load input (5, 2): Bit 2: Unset: Bit 1: Unset
- "ld1 { v9.h }[0], [x20], #0x2\n"
+ "ld1 { v9.h }[0], [x19], #0x2\n"
"99:" // Oddments: Load input (5, 2): Bit 2: End
"ldr q2, [x16, #0x0]\n"
- "ldr x20, [x15, #0x108]\n"
+ "ldr x19, [x11, #0x108]\n"
"fmla v31.8h, v1.8h, v9.8h\n"
"fmla v28.8h, v2.8h, v5.8h\n"
"fmla v29.8h, v2.8h, v6.8h\n"
"fmla v30.8h, v2.8h, v9.8h\n"
- "add x20, x20, x10\n"
+ "add x19, x19, x10\n"
"add x16, x16, #0x10\n"
"tbz %x[n_channels], #2, 101f\n"
- "ld1 { v11.d }[0], [x20], #0x8\n"
+ "ld1 { v11.d }[0], [x19], #0x8\n"
"tbz %x[n_channels], #1, 100f\n"
- "ld1 { v11.s }[2], [x20], #0x4\n"
+ "ld1 { v11.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 103f\n"
- "ld1 { v11.h }[6], [x20], #0x2\n"
+ "ld1 { v11.h }[6], [x19], #0x2\n"
"b 103f\n"
"100:" // Oddments: Load input (5, 3): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 103f\n"
- "ld1 { v11.h }[4], [x20], #0x2\n"
+ "ld1 { v11.h }[4], [x19], #0x2\n"
"b 103f\n"
"101:" // Oddments: Load input (5, 3): Bit 2: Unset
"tbz %x[n_channels], #1, 102f\n"
- "ld1 { v11.s }[0], [x20], #0x4\n"
+ "ld1 { v11.s }[0], [x19], #0x4\n"
"tbz %x[n_channels], #0, 103f\n"
- "ld1 { v11.h }[2], [x20], #0x2\n"
+ "ld1 { v11.h }[2], [x19], #0x2\n"
"b 103f\n"
"102:" // Oddments: Load input (5, 3): Bit 2: Unset: Bit 1: Unset
- "ld1 { v11.h }[0], [x20], #0x2\n"
+ "ld1 { v11.h }[0], [x19], #0x2\n"
"103:" // Oddments: Load input (5, 3): Bit 2: End
"ldr q3, [x16, #0x0]\n"
- "ldr x20, [x15, #0x110]\n"
+ "ldr x19, [x11, #0x110]\n"
"fmla v31.8h, v2.8h, v11.8h\n"
"fmla v28.8h, v3.8h, v6.8h\n"
"fmla v29.8h, v3.8h, v8.8h\n"
"fmla v30.8h, v3.8h, v11.8h\n"
- "add x20, x20, x10\n"
+ "add x19, x19, x10\n"
"add x16, x16, #0x10\n"
"tbz %x[n_channels], #2, 105f\n"
- "ld1 { v12.d }[0], [x20], #0x8\n"
+ "ld1 { v12.d }[0], [x19], #0x8\n"
"tbz %x[n_channels], #1, 104f\n"
- "ld1 { v12.s }[2], [x20], #0x4\n"
+ "ld1 { v12.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 107f\n"
- "ld1 { v12.h }[6], [x20], #0x2\n"
+ "ld1 { v12.h }[6], [x19], #0x2\n"
"b 107f\n"
"104:" // Oddments: Load input (5, 4): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 107f\n"
- "ld1 { v12.h }[4], [x20], #0x2\n"
+ "ld1 { v12.h }[4], [x19], #0x2\n"
"b 107f\n"
"105:" // Oddments: Load input (5, 4): Bit 2: Unset
"tbz %x[n_channels], #1, 106f\n"
- "ld1 { v12.s }[0], [x20], #0x4\n"
+ "ld1 { v12.s }[0], [x19], #0x4\n"
"tbz %x[n_channels], #0, 107f\n"
- "ld1 { v12.h }[2], [x20], #0x2\n"
+ "ld1 { v12.h }[2], [x19], #0x2\n"
"b 107f\n"
"106:" // Oddments: Load input (5, 4): Bit 2: Unset: Bit 1: Unset
- "ld1 { v12.h }[0], [x20], #0x2\n"
+ "ld1 { v12.h }[0], [x19], #0x2\n"
"107:" // Oddments: Load input (5, 4): Bit 2: End
"ldr q4, [x16, #0x0]\n"
- "ldr x20, [x15, #0x118]\n"
+ "ldr x19, [x11, #0x118]\n"
"fmla v31.8h, v3.8h, v12.8h\n"
"fmla v28.8h, v4.8h, v8.8h\n"
"fmla v29.8h, v4.8h, v10.8h\n"
"fmla v30.8h, v4.8h, v12.8h\n"
- "add x20, x20, x10\n"
+ "add x19, x19, x10\n"
"tbz %x[n_channels], #2, 109f\n"
- "ld1 { v9.d }[0], [x20], #0x8\n"
+ "ld1 { v9.d }[0], [x19], #0x8\n"
"tbz %x[n_channels], #1, 108f\n"
- "ld1 { v9.s }[2], [x20], #0x4\n"
+ "ld1 { v9.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 111f\n"
- "ld1 { v9.h }[6], [x20], #0x2\n"
+ "ld1 { v9.h }[6], [x19], #0x2\n"
"b 111f\n"
"108:" // Oddments: Load input (5, 5): Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 111f\n"
- "ld1 { v9.h }[4], [x20], #0x2\n"
+ "ld1 { v9.h }[4], [x19], #0x2\n"
"b 111f\n"
"109:" // Oddments: Load input (5, 5): Bit 2: Unset
"tbz %x[n_channels], #1, 110f\n"
- "ld1 { v9.s }[0], [x20], #0x4\n"
+ "ld1 { v9.s }[0], [x19], #0x4\n"
"tbz %x[n_channels], #0, 111f\n"
- "ld1 { v9.h }[2], [x20], #0x2\n"
+ "ld1 { v9.h }[2], [x19], #0x2\n"
"b 111f\n"
"110:" // Oddments: Load input (5, 5): Bit 2: Unset: Bit 1: Unset
- "ld1 { v9.h }[0], [x20], #0x2\n"
+ "ld1 { v9.h }[0], [x19], #0x2\n"
"111:" // Oddments: Load input (5, 5): Bit 2: End
"fmla v31.8h, v4.8h, v9.8h\n"
"fmax v28.8h, v28.8h, v18.8h\n"
@@ -1374,50 +1374,52 @@ void a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst_indirect_impl(
"fmin v30.8h, v30.8h, v17.8h\n"
"fmin v31.8h, v31.8h, v17.8h\n"
"tbz %x[n_channels], #2, 113f\n"
- "st1 { v28.d }[0], [x14], #0x8\n"
- "st1 { v29.d }[0], [x13], #0x8\n"
- "st1 { v30.d }[0], [x12], #0x8\n"
- "st1 { v31.d }[0], [x11], #0x8\n"
+ "st1 { v28.d }[0], [x15], #0x8\n"
+ "st1 { v29.d }[0], [x14], #0x8\n"
+ "st1 { v30.d }[0], [x13], #0x8\n"
+ "st1 { v31.d }[0], [x12], #0x8\n"
"tbz %x[n_channels], #1, 112f\n"
- "st1 { v28.s }[2], [x14], #0x4\n"
- "st1 { v29.s }[2], [x13], #0x4\n"
- "st1 { v30.s }[2], [x12], #0x4\n"
- "st1 { v31.s }[2], [x11], #0x4\n"
+ "st1 { v28.s }[2], [x15], #0x4\n"
+ "st1 { v29.s }[2], [x14], #0x4\n"
+ "st1 { v30.s }[2], [x13], #0x4\n"
+ "st1 { v31.s }[2], [x12], #0x4\n"
"tbz %x[n_channels], #0, 115f\n"
- "st1 { v28.h }[6], [x14], #0x2\n"
- "st1 { v29.h }[6], [x13], #0x2\n"
- "st1 { v30.h }[6], [x12], #0x2\n"
- "st1 { v31.h }[6], [x11], #0x2\n"
+ "st1 { v28.h }[6], [x15], #0x2\n"
+ "st1 { v29.h }[6], [x14], #0x2\n"
+ "st1 { v30.h }[6], [x13], #0x2\n"
+ "st1 { v31.h }[6], [x12], #0x2\n"
"b 115f\n"
"112:" // Oddments: Store: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 115f\n"
- "st1 { v28.h }[4], [x14], #0x2\n"
- "st1 { v29.h }[4], [x13], #0x2\n"
- "st1 { v30.h }[4], [x12], #0x2\n"
- "st1 { v31.h }[4], [x11], #0x2\n"
+ "st1 { v28.h }[4], [x15], #0x2\n"
+ "st1 { v29.h }[4], [x14], #0x2\n"
+ "st1 { v30.h }[4], [x13], #0x2\n"
+ "st1 { v31.h }[4], [x12], #0x2\n"
"b 115f\n"
"113:" // Oddments: Store: Bit 2: Unset
"tbz %x[n_channels], #1, 114f\n"
- "st1 { v28.s }[0], [x14], #0x4\n"
- "st1 { v29.s }[0], [x13], #0x4\n"
- "st1 { v30.s }[0], [x12], #0x4\n"
- "st1 { v31.s }[0], [x11], #0x4\n"
+ "st1 { v28.s }[0], [x15], #0x4\n"
+ "st1 { v29.s }[0], [x14], #0x4\n"
+ "st1 { v30.s }[0], [x13], #0x4\n"
+ "st1 { v31.s }[0], [x12], #0x4\n"
"tbz %x[n_channels], #0, 115f\n"
- "st1 { v28.h }[2], [x14], #0x2\n"
- "st1 { v29.h }[2], [x13], #0x2\n"
- "st1 { v30.h }[2], [x12], #0x2\n"
- "st1 { v31.h }[2], [x11], #0x2\n"
+ "st1 { v28.h }[2], [x15], #0x2\n"
+ "st1 { v29.h }[2], [x14], #0x2\n"
+ "st1 { v30.h }[2], [x13], #0x2\n"
+ "st1 { v31.h }[2], [x12], #0x2\n"
"b 115f\n"
"114:" // Oddments: Store: Bit 2: Unset: Bit 1: Unset
- "st1 { v28.h }[0], [x14], #0x2\n"
- "st1 { v29.h }[0], [x13], #0x2\n"
- "st1 { v30.h }[0], [x12], #0x2\n"
- "st1 { v31.h }[0], [x11], #0x2\n"
+ "st1 { v28.h }[0], [x15], #0x2\n"
+ "st1 { v29.h }[0], [x14], #0x2\n"
+ "st1 { v30.h }[0], [x13], #0x2\n"
+ "st1 { v31.h }[0], [x12], #0x2\n"
"115:" // Oddments: Store: Bit 2: End
+
"116:" // End
+
:
: [n_channels] "r" ((unsigned long) n_channels), [offsetof_Args_inptrs] "I" (offsetof(Args, inptrs)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_args_params] "I" (offsetof(Args, params)), [params_struct] "r" (&params_struct)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v16", "v17", "v18", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v16", "v17", "v18", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_generic_output9_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_generic_output9_mla_depthfirst/generic.cpp
index 418530fdc4..423ee4190c 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_generic_output9_mla_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_generic_output9_mla_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -44,475 +44,480 @@ void a64_fp16_nhwc_generic_output9_mla_depthfirst_impl(
const __fp16 minmax_vals[2] = { activation_min, activation_max };
__asm__ __volatile__(
- "ld1r { v2.8h }, [%x[minmax_vals]]\n"
- "lsr x12, %x[n_channels], #0x3\n"
- "add x20, %x[minmax_vals], #0x2\n"
- "ld1r { v1.8h }, [x20]\n"
+ "ld1r { v4.8h }, [%x[minmax_vals]]\n"
+ "add x19, %x[minmax_vals], #0x2\n"
"mov x11, #0x0\n"
- "cbz x12, 5f\n"
+ "ld1r { v3.8h }, [x19]\n"
+ "lsr x10, %x[n_channels], #0x3\n"
+ "cbz x10, 5f\n"
"1:" // Channel loop
- "movi v23.16b, #0x0\n"
+ "movi v25.16b, #0x0\n"
"cbz %x[bias], 2f\n"
- "ldr q23, [%x[bias], x11]\n"
+ "ldr q25, [%x[bias], x11]\n"
"2:" // Channel loop: Load bias: Done
- "ldr q0, [%x[params], #0x0]\n"
- "mov x21, %x[inptrs]\n"
- "ldp x10, x9, [x21], #0x10\n"
- "subs x20, %x[n_points], #0x1\n"
- "ldr q14, [x10, x11]\n"
- "ldr q15, [x9, x11]\n"
- "mov v24.16b, v23.16b\n"
- "mov v25.16b, v23.16b\n"
- "ldp x28, x27, [x21], #0x10\n"
- "ldr q16, [x28, x11]\n"
- "mov v26.16b, v23.16b\n"
- "mov v27.16b, v23.16b\n"
- "ldr q17, [x27, x11]\n"
- "ldp x26, x25, [x21], #0x10\n"
- "mov v28.16b, v23.16b\n"
- "mov v29.16b, v23.16b\n"
- "ldr q18, [x26, x11]\n"
- "ldr q19, [x25, x11]\n"
- "mov v30.16b, v23.16b\n"
- "mov v31.16b, v23.16b\n"
- "ldp x24, x23, [x21], #0x10\n"
- "ldr q20, [x24, x11]\n"
+ "mov v24.16b, v25.16b\n"
+ "ldr q23, [%x[params], #0x0]\n"
+ "mov x20, %x[inptrs]\n"
+ "mov v22.16b, v25.16b\n"
+ "ldp x9, x28, [x20], #0x10\n"
+ "subs x19, %x[n_points], #0x1\n"
+ "mov v21.16b, v25.16b\n"
+ "ldr q2, [x9, x11]\n"
+ "mov v20.16b, v25.16b\n"
"add %x[params], %x[params], #0x10\n"
- "ldr q21, [x23, x11]\n"
- "ldr x22, [x21], #0x8\n"
- "ldr q22, [x22, x11]\n"
+ "mov v19.16b, v25.16b\n"
+ "ldr q1, [x28, x11]\n"
+ "mov v18.16b, v25.16b\n"
+ "ldp x27, x26, [x20], #0x10\n"
+ "mov v17.16b, v25.16b\n"
+ "ldr q0, [x27, x11]\n"
+ "mov v16.16b, v25.16b\n"
+ "ldr q31, [x26, x11]\n"
+ "ldp x25, x24, [x20], #0x10\n"
+ "ldr q30, [x25, x11]\n"
+ "ldr q29, [x24, x11]\n"
+ "ldp x23, x22, [x20], #0x10\n"
+ "ldr q28, [x23, x11]\n"
+ "ldr q27, [x22, x11]\n"
+ "ldr x21, [x20], #0x8\n"
+ "ldr q26, [x21, x11]\n"
"ble 4f\n"
"3:" // Channel loop: Planar loop
- "ldp x10, x9, [x21], #0x10\n"
- "ldp x28, x27, [x21], #0x10\n"
- "subs x20, x20, #0x1\n"
- "fmla v23.8h, v14.8h, v0.8h\n"
- "ldr q14, [x10, x11]\n"
- "ldp x26, x25, [x21], #0x10\n"
- "fmla v24.8h, v15.8h, v0.8h\n"
- "fmla v25.8h, v16.8h, v0.8h\n"
- "ldr q15, [x9, x11]\n"
- "ldr q16, [x28, x11]\n"
- "fmla v26.8h, v17.8h, v0.8h\n"
- "fmla v27.8h, v18.8h, v0.8h\n"
- "ldr q17, [x27, x11]\n"
- "ldr q18, [x26, x11]\n"
- "fmla v28.8h, v19.8h, v0.8h\n"
- "fmla v29.8h, v20.8h, v0.8h\n"
- "ldr q19, [x25, x11]\n"
- "ldp x24, x23, [x21], #0x10\n"
- "fmla v30.8h, v21.8h, v0.8h\n"
- "fmla v31.8h, v22.8h, v0.8h\n"
- "ldr q0, [%x[params], #0x0]\n"
- "ldr q20, [x24, x11]\n"
+ "fmla v25.8h, v2.8h, v23.8h\n"
+ "ldp x9, x28, [x20], #0x10\n"
+ "subs x19, x19, #0x1\n"
+ "fmla v24.8h, v1.8h, v23.8h\n"
+ "ldr q2, [x9, x11]\n"
+ "fmla v22.8h, v0.8h, v23.8h\n"
+ "fmla v21.8h, v31.8h, v23.8h\n"
+ "ldr q1, [x28, x11]\n"
+ "fmla v20.8h, v30.8h, v23.8h\n"
+ "ldp x27, x26, [x20], #0x10\n"
+ "fmla v19.8h, v29.8h, v23.8h\n"
+ "fmla v18.8h, v28.8h, v23.8h\n"
+ "ldr q0, [x27, x11]\n"
+ "fmla v17.8h, v27.8h, v23.8h\n"
+ "fmla v16.8h, v26.8h, v23.8h\n"
+ "ldr q23, [%x[params], #0x0]\n"
"add %x[params], %x[params], #0x10\n"
- "ldr q21, [x23, x11]\n"
- "ldr x22, [x21], #0x8\n"
- "ldr q22, [x22, x11]\n"
+ "ldr q31, [x26, x11]\n"
+ "ldp x25, x24, [x20], #0x10\n"
+ "ldr q30, [x25, x11]\n"
+ "ldr q29, [x24, x11]\n"
+ "ldp x23, x22, [x20], #0x10\n"
+ "ldr q28, [x23, x11]\n"
+ "ldr q27, [x22, x11]\n"
+ "ldr x21, [x20], #0x8\n"
+ "ldr q26, [x21, x11]\n"
"bgt 3b\n"
"4:" // Channel loop: Planar tail
- "fmla v23.8h, v14.8h, v0.8h\n"
- "fmla v24.8h, v15.8h, v0.8h\n"
- "fmax v23.8h, v23.8h, v2.8h\n"
- "ldp x28, x27, [%x[outptrs], #0x0]\n"
- "fmla v25.8h, v16.8h, v0.8h\n"
- "fmla v26.8h, v17.8h, v0.8h\n"
- "fmax v24.8h, v24.8h, v2.8h\n"
- "ldp x26, x25, [%x[outptrs], #0x10]\n"
- "fmla v27.8h, v18.8h, v0.8h\n"
- "fmla v28.8h, v19.8h, v0.8h\n"
- "fmax v25.8h, v25.8h, v2.8h\n"
- "ldp x24, x23, [%x[outptrs], #0x20]\n"
- "fmla v29.8h, v20.8h, v0.8h\n"
- "fmla v30.8h, v21.8h, v0.8h\n"
- "fmax v26.8h, v26.8h, v2.8h\n"
- "ldp x22, x21, [%x[outptrs], #0x30]\n"
- "fmla v31.8h, v22.8h, v0.8h\n"
- "fmax v27.8h, v27.8h, v2.8h\n"
- "ldr x20, [%x[outptrs], #0x40]\n"
- "fmax v28.8h, v28.8h, v2.8h\n"
- "fmax v29.8h, v29.8h, v2.8h\n"
- "fmax v30.8h, v30.8h, v2.8h\n"
- "fmax v31.8h, v31.8h, v2.8h\n"
- "fmin v23.8h, v23.8h, v1.8h\n"
- "fmin v24.8h, v24.8h, v1.8h\n"
- "str q23, [x28, x11]\n"
- "fmin v25.8h, v25.8h, v1.8h\n"
- "fmin v26.8h, v26.8h, v1.8h\n"
- "str q24, [x27, x11]\n"
- "fmin v27.8h, v27.8h, v1.8h\n"
- "fmin v28.8h, v28.8h, v1.8h\n"
- "str q25, [x26, x11]\n"
- "fmin v29.8h, v29.8h, v1.8h\n"
- "fmin v30.8h, v30.8h, v1.8h\n"
- "str q26, [x25, x11]\n"
- "fmin v31.8h, v31.8h, v1.8h\n"
- "str q27, [x24, x11]\n"
- "str q28, [x23, x11]\n"
- "str q29, [x22, x11]\n"
- "str q30, [x21, x11]\n"
- "str q31, [x20, x11]\n"
+ "fmla v25.8h, v2.8h, v23.8h\n"
+ "ldp x27, x26, [%x[outptrs], #0x0]\n"
+ "fmla v24.8h, v1.8h, v23.8h\n"
+ "ldp x25, x24, [%x[outptrs], #0x10]\n"
+ "fmla v22.8h, v0.8h, v23.8h\n"
+ "ldp x23, x22, [%x[outptrs], #0x20]\n"
+ "fmla v21.8h, v31.8h, v23.8h\n"
+ "ldp x21, x20, [%x[outptrs], #0x30]\n"
+ "fmla v20.8h, v30.8h, v23.8h\n"
+ "ldr x19, [%x[outptrs], #0x40]\n"
+ "fmla v19.8h, v29.8h, v23.8h\n"
+ "fmla v18.8h, v28.8h, v23.8h\n"
+ "fmla v17.8h, v27.8h, v23.8h\n"
+ "fmla v16.8h, v26.8h, v23.8h\n"
+ "fmax v25.8h, v25.8h, v4.8h\n"
+ "fmax v24.8h, v24.8h, v4.8h\n"
+ "fmax v22.8h, v22.8h, v4.8h\n"
+ "fmin v25.8h, v25.8h, v3.8h\n"
+ "str q25, [x27, x11]\n"
+ "fmin v24.8h, v24.8h, v3.8h\n"
+ "fmin v22.8h, v22.8h, v3.8h\n"
+ "str q24, [x26, x11]\n"
+ "fmax v21.8h, v21.8h, v4.8h\n"
+ "fmax v20.8h, v20.8h, v4.8h\n"
+ "str q22, [x25, x11]\n"
+ "fmax v19.8h, v19.8h, v4.8h\n"
+ "fmax v18.8h, v18.8h, v4.8h\n"
+ "fmin v21.8h, v21.8h, v3.8h\n"
+ "str q21, [x24, x11]\n"
+ "fmin v20.8h, v20.8h, v3.8h\n"
+ "fmin v19.8h, v19.8h, v3.8h\n"
+ "str q20, [x23, x11]\n"
+ "fmin v18.8h, v18.8h, v3.8h\n"
+ "fmax v17.8h, v17.8h, v4.8h\n"
+ "str q19, [x22, x11]\n"
+ "fmax v16.8h, v16.8h, v4.8h\n"
+ "str q18, [x21, x11]\n"
+ "fmin v17.8h, v17.8h, v3.8h\n"
+ "fmin v16.8h, v16.8h, v3.8h\n"
+ "str q17, [x20, x11]\n"
+ "str q16, [x19, x11]\n"
"add x11, x11, #0x10\n"
- "cmp x11, x12, LSL #4\n"
+ "cmp x11, x10, LSL #4\n"
"blt 1b\n"
"5:" // Oddments
"tst %x[n_channels], #0x7\n"
"beq 25f\n"
- "movi v23.16b, #0x0\n"
+ "movi v25.16b, #0x0\n"
"cbz %x[bias], 10f\n"
- "add x20, %x[bias], x11\n"
+ "add x19, %x[bias], x11\n"
"tbz %x[n_channels], #2, 7f\n"
- "ld1 { v23.d }[0], [x20], #0x8\n"
+ "ld1 { v25.d }[0], [x19], #0x8\n"
"tbz %x[n_channels], #1, 6f\n"
- "ld1 { v23.s }[2], [x20], #0x4\n"
+ "ld1 { v25.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 9f\n"
- "ld1 { v23.h }[6], [x20], #0x2\n"
+ "ld1 { v25.h }[6], [x19], #0x2\n"
"b 9f\n"
"6:" // Oddments: Load bias: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 9f\n"
- "ld1 { v23.h }[4], [x20], #0x2\n"
+ "ld1 { v25.h }[4], [x19], #0x2\n"
"b 9f\n"
"7:" // Oddments: Load bias: Bit 2: Unset
"tbz %x[n_channels], #1, 8f\n"
- "ld1 { v23.s }[0], [x20], #0x4\n"
+ "ld1 { v25.s }[0], [x19], #0x4\n"
"tbz %x[n_channels], #0, 9f\n"
- "ld1 { v23.h }[2], [x20], #0x2\n"
+ "ld1 { v25.h }[2], [x19], #0x2\n"
"b 9f\n"
"8:" // Oddments: Load bias: Bit 2: Unset: Bit 1: Unset
- "ld1 { v23.h }[0], [x20], #0x2\n"
+ "tbz %x[n_channels], #0, 9f\n"
+ "ld1 { v25.h }[0], [x19], #0x2\n"
"9:" // Oddments: Load bias: Bit 2: End
+
"10:" // Oddments: Load bias: Done
- "ldr q0, [%x[params], #0x0]\n"
- "mov x21, %x[inptrs]\n"
- "ldp x10, x9, [x21], #0x10\n"
- "mov v24.16b, v23.16b\n"
- "ldp x28, x27, [x21], #0x10\n"
- "ldp x26, x25, [x21], #0x10\n"
- "mov v25.16b, v23.16b\n"
- "mov v26.16b, v23.16b\n"
- "ldp x24, x23, [x21], #0x10\n"
- "ldr x22, [x21], #0x8\n"
- "mov v27.16b, v23.16b\n"
- "mov v28.16b, v23.16b\n"
- "mov v29.16b, v23.16b\n"
- "mov v30.16b, v23.16b\n"
- "add x10, x10, x11\n"
+ "mov v24.16b, v25.16b\n"
+ "ldr q23, [%x[params], #0x0]\n"
+ "mov x20, %x[inptrs]\n"
+ "mov v22.16b, v25.16b\n"
+ "ldp x9, x28, [x20], #0x10\n"
+ "add %x[params], %x[params], #0x10\n"
+ "mov v21.16b, v25.16b\n"
+ "ldp x27, x26, [x20], #0x10\n"
+ "mov v20.16b, v25.16b\n"
"add x9, x9, x11\n"
- "mov v31.16b, v23.16b\n"
+ "mov v19.16b, v25.16b\n"
+ "ldp x25, x24, [x20], #0x10\n"
+ "mov v18.16b, v25.16b\n"
"add x28, x28, x11\n"
+ "mov v17.16b, v25.16b\n"
+ "ldp x23, x22, [x20], #0x10\n"
+ "mov v16.16b, v25.16b\n"
"add x27, x27, x11\n"
+ "ldr x21, [x20], #0x8\n"
"add x26, x26, x11\n"
"add x25, x25, x11\n"
"add x24, x24, x11\n"
"add x23, x23, x11\n"
"add x22, x22, x11\n"
- "add %x[params], %x[params], #0x10\n"
+ "add x21, x21, x11\n"
"tbz %x[n_channels], #2, 12f\n"
- "ldr d14, [x10], #0x8\n"
- "ldr d15, [x9], #0x8\n"
- "ldr d16, [x28], #0x8\n"
- "ldr d17, [x27], #0x8\n"
- "ldr d18, [x26], #0x8\n"
- "ldr d19, [x25], #0x8\n"
- "ldr d20, [x24], #0x8\n"
- "ldr d21, [x23], #0x8\n"
- "ldr d22, [x22], #0x8\n"
+ "ldr d2, [x9], #0x8\n"
+ "ldr d1, [x28], #0x8\n"
+ "ldr d0, [x27], #0x8\n"
+ "ldr d31, [x26], #0x8\n"
+ "ldr d30, [x25], #0x8\n"
+ "ldr d29, [x24], #0x8\n"
+ "ldr d28, [x23], #0x8\n"
+ "ldr d27, [x22], #0x8\n"
+ "ldr d26, [x21], #0x8\n"
"tbz %x[n_channels], #1, 11f\n"
- "ld1 { v14.s }[2], [x10], #0x4\n"
- "ld1 { v15.s }[2], [x9], #0x4\n"
- "ld1 { v16.s }[2], [x28], #0x4\n"
- "ld1 { v17.s }[2], [x27], #0x4\n"
- "ld1 { v18.s }[2], [x26], #0x4\n"
- "ld1 { v19.s }[2], [x25], #0x4\n"
- "ld1 { v20.s }[2], [x24], #0x4\n"
- "ld1 { v21.s }[2], [x23], #0x4\n"
- "ld1 { v22.s }[2], [x22], #0x4\n"
+ "ld1 { v2.s }[2], [x9], #0x4\n"
+ "ld1 { v1.s }[2], [x28], #0x4\n"
+ "ld1 { v0.s }[2], [x27], #0x4\n"
+ "ld1 { v31.s }[2], [x26], #0x4\n"
+ "ld1 { v30.s }[2], [x25], #0x4\n"
+ "ld1 { v29.s }[2], [x24], #0x4\n"
+ "ld1 { v28.s }[2], [x23], #0x4\n"
+ "ld1 { v27.s }[2], [x22], #0x4\n"
+ "ld1 { v26.s }[2], [x21], #0x4\n"
"tbz %x[n_channels], #0, 14f\n"
- "ld1 { v14.h }[6], [x10], #0x2\n"
- "ld1 { v15.h }[6], [x9], #0x2\n"
- "ld1 { v16.h }[6], [x28], #0x2\n"
- "ld1 { v17.h }[6], [x27], #0x2\n"
- "ld1 { v18.h }[6], [x26], #0x2\n"
- "ld1 { v19.h }[6], [x25], #0x2\n"
- "ld1 { v20.h }[6], [x24], #0x2\n"
- "ld1 { v21.h }[6], [x23], #0x2\n"
- "ld1 { v22.h }[6], [x22], #0x2\n"
+ "ld1 { v2.h }[6], [x9], #0x2\n"
+ "ld1 { v1.h }[6], [x28], #0x2\n"
+ "ld1 { v0.h }[6], [x27], #0x2\n"
+ "ld1 { v31.h }[6], [x26], #0x2\n"
+ "ld1 { v30.h }[6], [x25], #0x2\n"
+ "ld1 { v29.h }[6], [x24], #0x2\n"
+ "ld1 { v28.h }[6], [x23], #0x2\n"
+ "ld1 { v27.h }[6], [x22], #0x2\n"
+ "ld1 { v26.h }[6], [x21], #0x2\n"
"b 14f\n"
"11:" // Oddments: Load: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 14f\n"
- "ld1 { v14.h }[4], [x10], #0x2\n"
- "ld1 { v15.h }[4], [x9], #0x2\n"
- "ld1 { v16.h }[4], [x28], #0x2\n"
- "ld1 { v17.h }[4], [x27], #0x2\n"
- "ld1 { v18.h }[4], [x26], #0x2\n"
- "ld1 { v19.h }[4], [x25], #0x2\n"
- "ld1 { v20.h }[4], [x24], #0x2\n"
- "ld1 { v21.h }[4], [x23], #0x2\n"
- "ld1 { v22.h }[4], [x22], #0x2\n"
+ "ld1 { v2.h }[4], [x9], #0x2\n"
+ "ld1 { v1.h }[4], [x28], #0x2\n"
+ "ld1 { v0.h }[4], [x27], #0x2\n"
+ "ld1 { v31.h }[4], [x26], #0x2\n"
+ "ld1 { v30.h }[4], [x25], #0x2\n"
+ "ld1 { v29.h }[4], [x24], #0x2\n"
+ "ld1 { v28.h }[4], [x23], #0x2\n"
+ "ld1 { v27.h }[4], [x22], #0x2\n"
+ "ld1 { v26.h }[4], [x21], #0x2\n"
"b 14f\n"
"12:" // Oddments: Load: Bit 2: Unset
"tbz %x[n_channels], #1, 13f\n"
- "ldr s14, [x10], #0x4\n"
- "ldr s15, [x9], #0x4\n"
- "ldr s16, [x28], #0x4\n"
- "ldr s17, [x27], #0x4\n"
- "ldr s18, [x26], #0x4\n"
- "ldr s19, [x25], #0x4\n"
- "ldr s20, [x24], #0x4\n"
- "ldr s21, [x23], #0x4\n"
- "ldr s22, [x22], #0x4\n"
+ "ldr s2, [x9], #0x4\n"
+ "ldr s1, [x28], #0x4\n"
+ "ldr s0, [x27], #0x4\n"
+ "ldr s31, [x26], #0x4\n"
+ "ldr s30, [x25], #0x4\n"
+ "ldr s29, [x24], #0x4\n"
+ "ldr s28, [x23], #0x4\n"
+ "ldr s27, [x22], #0x4\n"
+ "ldr s26, [x21], #0x4\n"
"tbz %x[n_channels], #0, 14f\n"
- "ld1 { v14.h }[2], [x10], #0x2\n"
- "ld1 { v15.h }[2], [x9], #0x2\n"
- "ld1 { v16.h }[2], [x28], #0x2\n"
- "ld1 { v17.h }[2], [x27], #0x2\n"
- "ld1 { v18.h }[2], [x26], #0x2\n"
- "ld1 { v19.h }[2], [x25], #0x2\n"
- "ld1 { v20.h }[2], [x24], #0x2\n"
- "ld1 { v21.h }[2], [x23], #0x2\n"
- "ld1 { v22.h }[2], [x22], #0x2\n"
+ "ld1 { v2.h }[2], [x9], #0x2\n"
+ "ld1 { v1.h }[2], [x28], #0x2\n"
+ "ld1 { v0.h }[2], [x27], #0x2\n"
+ "ld1 { v31.h }[2], [x26], #0x2\n"
+ "ld1 { v30.h }[2], [x25], #0x2\n"
+ "ld1 { v29.h }[2], [x24], #0x2\n"
+ "ld1 { v28.h }[2], [x23], #0x2\n"
+ "ld1 { v27.h }[2], [x22], #0x2\n"
+ "ld1 { v26.h }[2], [x21], #0x2\n"
"b 14f\n"
"13:" // Oddments: Load: Bit 2: Unset: Bit 1: Unset
- "ldr h14, [x10], #0x2\n"
- "ldr h15, [x9], #0x2\n"
- "ldr h16, [x28], #0x2\n"
- "ldr h17, [x27], #0x2\n"
- "ldr h18, [x26], #0x2\n"
- "ldr h19, [x25], #0x2\n"
- "ldr h20, [x24], #0x2\n"
- "ldr h21, [x23], #0x2\n"
- "ldr h22, [x22], #0x2\n"
+ "tbz %x[n_channels], #0, 14f\n"
+ "ldr h2, [x9], #0x2\n"
+ "ldr h1, [x28], #0x2\n"
+ "ldr h0, [x27], #0x2\n"
+ "ldr h31, [x26], #0x2\n"
+ "ldr h30, [x25], #0x2\n"
+ "ldr h29, [x24], #0x2\n"
+ "ldr h28, [x23], #0x2\n"
+ "ldr h27, [x22], #0x2\n"
+ "ldr h26, [x21], #0x2\n"
"14:" // Oddments: Load: Bit 2: End
- "subs x20, %x[n_points], #0x1\n"
+ "subs x19, %x[n_points], #0x1\n"
"ble 20f\n"
"15:" // Oddments: Planar loop
- "ldp x10, x9, [x21], #0x10\n"
- "ldp x28, x27, [x21], #0x10\n"
- "fmla v23.8h, v14.8h, v0.8h\n"
- "fmla v24.8h, v15.8h, v0.8h\n"
- "ldp x26, x25, [x21], #0x10\n"
- "ldp x24, x23, [x21], #0x10\n"
- "fmla v25.8h, v16.8h, v0.8h\n"
- "fmla v26.8h, v17.8h, v0.8h\n"
- "ldr x22, [x21], #0x8\n"
- "fmla v27.8h, v18.8h, v0.8h\n"
- "fmla v28.8h, v19.8h, v0.8h\n"
- "add x10, x10, x11\n"
- "fmla v29.8h, v20.8h, v0.8h\n"
- "fmla v30.8h, v21.8h, v0.8h\n"
+ "fmla v25.8h, v2.8h, v23.8h\n"
+ "ldp x9, x28, [x20], #0x10\n"
"add x9, x9, x11\n"
+ "fmla v24.8h, v1.8h, v23.8h\n"
+ "ldp x27, x26, [x20], #0x10\n"
+ "fmla v22.8h, v0.8h, v23.8h\n"
+ "ldp x25, x24, [x20], #0x10\n"
+ "fmla v21.8h, v31.8h, v23.8h\n"
"add x28, x28, x11\n"
- "fmla v31.8h, v22.8h, v0.8h\n"
- "ldr q0, [%x[params], #0x0]\n"
+ "fmla v20.8h, v30.8h, v23.8h\n"
+ "ldp x23, x22, [x20], #0x10\n"
+ "fmla v19.8h, v29.8h, v23.8h\n"
"add x27, x27, x11\n"
+ "fmla v18.8h, v28.8h, v23.8h\n"
+ "ldr x21, [x20], #0x8\n"
+ "fmla v17.8h, v27.8h, v23.8h\n"
"add x26, x26, x11\n"
+ "fmla v16.8h, v26.8h, v23.8h\n"
+ "ldr q23, [%x[params], #0x0]\n"
"add x25, x25, x11\n"
"add x24, x24, x11\n"
"add x23, x23, x11\n"
"add x22, x22, x11\n"
+ "add x21, x21, x11\n"
"add %x[params], %x[params], #0x10\n"
"tbz %x[n_channels], #2, 17f\n"
- "ldr d14, [x10], #0x8\n"
- "ldr d15, [x9], #0x8\n"
- "ldr d16, [x28], #0x8\n"
- "ldr d17, [x27], #0x8\n"
- "ldr d18, [x26], #0x8\n"
- "ldr d19, [x25], #0x8\n"
- "ldr d20, [x24], #0x8\n"
- "ldr d21, [x23], #0x8\n"
- "ldr d22, [x22], #0x8\n"
+ "ldr d2, [x9], #0x8\n"
+ "ldr d1, [x28], #0x8\n"
+ "ldr d0, [x27], #0x8\n"
+ "ldr d31, [x26], #0x8\n"
+ "ldr d30, [x25], #0x8\n"
+ "ldr d29, [x24], #0x8\n"
+ "ldr d28, [x23], #0x8\n"
+ "ldr d27, [x22], #0x8\n"
+ "ldr d26, [x21], #0x8\n"
"tbz %x[n_channels], #1, 16f\n"
- "ld1 { v14.s }[2], [x10], #0x4\n"
- "ld1 { v15.s }[2], [x9], #0x4\n"
- "ld1 { v16.s }[2], [x28], #0x4\n"
- "ld1 { v17.s }[2], [x27], #0x4\n"
- "ld1 { v18.s }[2], [x26], #0x4\n"
- "ld1 { v19.s }[2], [x25], #0x4\n"
- "ld1 { v20.s }[2], [x24], #0x4\n"
- "ld1 { v21.s }[2], [x23], #0x4\n"
- "ld1 { v22.s }[2], [x22], #0x4\n"
+ "ld1 { v2.s }[2], [x9], #0x4\n"
+ "ld1 { v1.s }[2], [x28], #0x4\n"
+ "ld1 { v0.s }[2], [x27], #0x4\n"
+ "ld1 { v31.s }[2], [x26], #0x4\n"
+ "ld1 { v30.s }[2], [x25], #0x4\n"
+ "ld1 { v29.s }[2], [x24], #0x4\n"
+ "ld1 { v28.s }[2], [x23], #0x4\n"
+ "ld1 { v27.s }[2], [x22], #0x4\n"
+ "ld1 { v26.s }[2], [x21], #0x4\n"
"tbz %x[n_channels], #0, 19f\n"
- "ld1 { v14.h }[6], [x10], #0x2\n"
- "ld1 { v15.h }[6], [x9], #0x2\n"
- "ld1 { v16.h }[6], [x28], #0x2\n"
- "ld1 { v17.h }[6], [x27], #0x2\n"
- "ld1 { v18.h }[6], [x26], #0x2\n"
- "ld1 { v19.h }[6], [x25], #0x2\n"
- "ld1 { v20.h }[6], [x24], #0x2\n"
- "ld1 { v21.h }[6], [x23], #0x2\n"
- "ld1 { v22.h }[6], [x22], #0x2\n"
+ "ld1 { v2.h }[6], [x9], #0x2\n"
+ "ld1 { v1.h }[6], [x28], #0x2\n"
+ "ld1 { v0.h }[6], [x27], #0x2\n"
+ "ld1 { v31.h }[6], [x26], #0x2\n"
+ "ld1 { v30.h }[6], [x25], #0x2\n"
+ "ld1 { v29.h }[6], [x24], #0x2\n"
+ "ld1 { v28.h }[6], [x23], #0x2\n"
+ "ld1 { v27.h }[6], [x22], #0x2\n"
+ "ld1 { v26.h }[6], [x21], #0x2\n"
"b 19f\n"
"16:" // Oddments: Planar loop: Load: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 19f\n"
- "ld1 { v14.h }[4], [x10], #0x2\n"
- "ld1 { v15.h }[4], [x9], #0x2\n"
- "ld1 { v16.h }[4], [x28], #0x2\n"
- "ld1 { v17.h }[4], [x27], #0x2\n"
- "ld1 { v18.h }[4], [x26], #0x2\n"
- "ld1 { v19.h }[4], [x25], #0x2\n"
- "ld1 { v20.h }[4], [x24], #0x2\n"
- "ld1 { v21.h }[4], [x23], #0x2\n"
- "ld1 { v22.h }[4], [x22], #0x2\n"
+ "ld1 { v2.h }[4], [x9], #0x2\n"
+ "ld1 { v1.h }[4], [x28], #0x2\n"
+ "ld1 { v0.h }[4], [x27], #0x2\n"
+ "ld1 { v31.h }[4], [x26], #0x2\n"
+ "ld1 { v30.h }[4], [x25], #0x2\n"
+ "ld1 { v29.h }[4], [x24], #0x2\n"
+ "ld1 { v28.h }[4], [x23], #0x2\n"
+ "ld1 { v27.h }[4], [x22], #0x2\n"
+ "ld1 { v26.h }[4], [x21], #0x2\n"
"b 19f\n"
"17:" // Oddments: Planar loop: Load: Bit 2: Unset
"tbz %x[n_channels], #1, 18f\n"
- "ldr s14, [x10], #0x4\n"
- "ldr s15, [x9], #0x4\n"
- "ldr s16, [x28], #0x4\n"
- "ldr s17, [x27], #0x4\n"
- "ldr s18, [x26], #0x4\n"
- "ldr s19, [x25], #0x4\n"
- "ldr s20, [x24], #0x4\n"
- "ldr s21, [x23], #0x4\n"
- "ldr s22, [x22], #0x4\n"
+ "ldr s2, [x9], #0x4\n"
+ "ldr s1, [x28], #0x4\n"
+ "ldr s0, [x27], #0x4\n"
+ "ldr s31, [x26], #0x4\n"
+ "ldr s30, [x25], #0x4\n"
+ "ldr s29, [x24], #0x4\n"
+ "ldr s28, [x23], #0x4\n"
+ "ldr s27, [x22], #0x4\n"
+ "ldr s26, [x21], #0x4\n"
"tbz %x[n_channels], #0, 19f\n"
- "ld1 { v14.h }[2], [x10], #0x2\n"
- "ld1 { v15.h }[2], [x9], #0x2\n"
- "ld1 { v16.h }[2], [x28], #0x2\n"
- "ld1 { v17.h }[2], [x27], #0x2\n"
- "ld1 { v18.h }[2], [x26], #0x2\n"
- "ld1 { v19.h }[2], [x25], #0x2\n"
- "ld1 { v20.h }[2], [x24], #0x2\n"
- "ld1 { v21.h }[2], [x23], #0x2\n"
- "ld1 { v22.h }[2], [x22], #0x2\n"
+ "ld1 { v2.h }[2], [x9], #0x2\n"
+ "ld1 { v1.h }[2], [x28], #0x2\n"
+ "ld1 { v0.h }[2], [x27], #0x2\n"
+ "ld1 { v31.h }[2], [x26], #0x2\n"
+ "ld1 { v30.h }[2], [x25], #0x2\n"
+ "ld1 { v29.h }[2], [x24], #0x2\n"
+ "ld1 { v28.h }[2], [x23], #0x2\n"
+ "ld1 { v27.h }[2], [x22], #0x2\n"
+ "ld1 { v26.h }[2], [x21], #0x2\n"
"b 19f\n"
"18:" // Oddments: Planar loop: Load: Bit 2: Unset: Bit 1: Unset
- "ldr h14, [x10], #0x2\n"
- "ldr h15, [x9], #0x2\n"
- "ldr h16, [x28], #0x2\n"
- "ldr h17, [x27], #0x2\n"
- "ldr h18, [x26], #0x2\n"
- "ldr h19, [x25], #0x2\n"
- "ldr h20, [x24], #0x2\n"
- "ldr h21, [x23], #0x2\n"
- "ldr h22, [x22], #0x2\n"
+ "tbz %x[n_channels], #0, 19f\n"
+ "ldr h2, [x9], #0x2\n"
+ "ldr h1, [x28], #0x2\n"
+ "ldr h0, [x27], #0x2\n"
+ "ldr h31, [x26], #0x2\n"
+ "ldr h30, [x25], #0x2\n"
+ "ldr h29, [x24], #0x2\n"
+ "ldr h28, [x23], #0x2\n"
+ "ldr h27, [x22], #0x2\n"
+ "ldr h26, [x21], #0x2\n"
"19:" // Oddments: Planar loop: Load: Bit 2: End
- "subs x20, x20, #0x1\n"
+ "subs x19, x19, #0x1\n"
"bgt 15b\n"
"20:" // Oddments: Planar tail
- "fmla v23.8h, v14.8h, v0.8h\n"
- "fmla v24.8h, v15.8h, v0.8h\n"
- "fmax v23.8h, v23.8h, v2.8h\n"
- "ldp x28, x27, [%x[outptrs], #0x0]\n"
- "fmla v25.8h, v16.8h, v0.8h\n"
- "fmla v26.8h, v17.8h, v0.8h\n"
- "fmax v24.8h, v24.8h, v2.8h\n"
- "ldp x26, x25, [%x[outptrs], #0x10]\n"
- "fmla v27.8h, v18.8h, v0.8h\n"
- "fmla v28.8h, v19.8h, v0.8h\n"
- "fmax v25.8h, v25.8h, v2.8h\n"
- "ldp x24, x23, [%x[outptrs], #0x20]\n"
- "fmla v29.8h, v20.8h, v0.8h\n"
- "fmla v30.8h, v21.8h, v0.8h\n"
- "fmax v26.8h, v26.8h, v2.8h\n"
- "ldp x22, x21, [%x[outptrs], #0x30]\n"
- "fmla v31.8h, v22.8h, v0.8h\n"
- "fmax v27.8h, v27.8h, v2.8h\n"
- "ldr x20, [%x[outptrs], #0x40]\n"
- "add x28, x28, x11\n"
- "fmax v28.8h, v28.8h, v2.8h\n"
- "fmax v29.8h, v29.8h, v2.8h\n"
+ "fmla v25.8h, v2.8h, v23.8h\n"
+ "ldp x27, x26, [%x[outptrs], #0x0]\n"
"add x27, x27, x11\n"
+ "fmla v24.8h, v1.8h, v23.8h\n"
+ "ldp x25, x24, [%x[outptrs], #0x10]\n"
+ "fmla v22.8h, v0.8h, v23.8h\n"
+ "ldp x23, x22, [%x[outptrs], #0x20]\n"
"add x26, x26, x11\n"
- "fmax v30.8h, v30.8h, v2.8h\n"
- "fmax v31.8h, v31.8h, v2.8h\n"
+ "fmla v21.8h, v31.8h, v23.8h\n"
+ "ldp x21, x20, [%x[outptrs], #0x30]\n"
+ "fmla v20.8h, v30.8h, v23.8h\n"
+ "ldr x19, [%x[outptrs], #0x40]\n"
"add x25, x25, x11\n"
+ "fmla v19.8h, v29.8h, v23.8h\n"
"add x24, x24, x11\n"
- "fmin v23.8h, v23.8h, v1.8h\n"
- "fmin v24.8h, v24.8h, v1.8h\n"
+ "fmla v18.8h, v28.8h, v23.8h\n"
"add x23, x23, x11\n"
+ "fmla v17.8h, v27.8h, v23.8h\n"
"add x22, x22, x11\n"
- "fmin v25.8h, v25.8h, v1.8h\n"
- "fmin v26.8h, v26.8h, v1.8h\n"
+ "fmla v16.8h, v26.8h, v23.8h\n"
"add x21, x21, x11\n"
+ "fmax v25.8h, v25.8h, v4.8h\n"
"add x20, x20, x11\n"
- "fmin v27.8h, v27.8h, v1.8h\n"
- "fmin v28.8h, v28.8h, v1.8h\n"
- "fmin v29.8h, v29.8h, v1.8h\n"
- "fmin v30.8h, v30.8h, v1.8h\n"
- "fmin v31.8h, v31.8h, v1.8h\n"
+ "fmax v24.8h, v24.8h, v4.8h\n"
+ "add x19, x19, x11\n"
+ "fmax v22.8h, v22.8h, v4.8h\n"
+ "fmin v25.8h, v25.8h, v3.8h\n"
+ "fmin v24.8h, v24.8h, v3.8h\n"
+ "fmin v22.8h, v22.8h, v3.8h\n"
+ "fmax v21.8h, v21.8h, v4.8h\n"
+ "fmax v20.8h, v20.8h, v4.8h\n"
+ "fmax v19.8h, v19.8h, v4.8h\n"
+ "fmin v21.8h, v21.8h, v3.8h\n"
+ "fmin v20.8h, v20.8h, v3.8h\n"
+ "fmin v19.8h, v19.8h, v3.8h\n"
+ "fmax v18.8h, v18.8h, v4.8h\n"
+ "fmax v17.8h, v17.8h, v4.8h\n"
+ "fmax v16.8h, v16.8h, v4.8h\n"
+ "fmin v18.8h, v18.8h, v3.8h\n"
+ "fmin v17.8h, v17.8h, v3.8h\n"
+ "fmin v16.8h, v16.8h, v3.8h\n"
"tbz %x[n_channels], #2, 22f\n"
- "st1 { v23.d }[0], [x28], #0x8\n"
- "st1 { v24.d }[0], [x27], #0x8\n"
- "st1 { v25.d }[0], [x26], #0x8\n"
- "st1 { v26.d }[0], [x25], #0x8\n"
- "st1 { v27.d }[0], [x24], #0x8\n"
- "st1 { v28.d }[0], [x23], #0x8\n"
- "st1 { v29.d }[0], [x22], #0x8\n"
- "st1 { v30.d }[0], [x21], #0x8\n"
- "st1 { v31.d }[0], [x20], #0x8\n"
+ "st1 { v25.d }[0], [x27], #0x8\n"
+ "st1 { v24.d }[0], [x26], #0x8\n"
+ "st1 { v22.d }[0], [x25], #0x8\n"
+ "st1 { v21.d }[0], [x24], #0x8\n"
+ "st1 { v20.d }[0], [x23], #0x8\n"
+ "st1 { v19.d }[0], [x22], #0x8\n"
+ "st1 { v18.d }[0], [x21], #0x8\n"
+ "st1 { v17.d }[0], [x20], #0x8\n"
+ "st1 { v16.d }[0], [x19], #0x8\n"
"tbz %x[n_channels], #1, 21f\n"
- "st1 { v23.s }[2], [x28], #0x4\n"
- "st1 { v24.s }[2], [x27], #0x4\n"
- "st1 { v25.s }[2], [x26], #0x4\n"
- "st1 { v26.s }[2], [x25], #0x4\n"
- "st1 { v27.s }[2], [x24], #0x4\n"
- "st1 { v28.s }[2], [x23], #0x4\n"
- "st1 { v29.s }[2], [x22], #0x4\n"
- "st1 { v30.s }[2], [x21], #0x4\n"
- "st1 { v31.s }[2], [x20], #0x4\n"
+ "st1 { v25.s }[2], [x27], #0x4\n"
+ "st1 { v24.s }[2], [x26], #0x4\n"
+ "st1 { v22.s }[2], [x25], #0x4\n"
+ "st1 { v21.s }[2], [x24], #0x4\n"
+ "st1 { v20.s }[2], [x23], #0x4\n"
+ "st1 { v19.s }[2], [x22], #0x4\n"
+ "st1 { v18.s }[2], [x21], #0x4\n"
+ "st1 { v17.s }[2], [x20], #0x4\n"
+ "st1 { v16.s }[2], [x19], #0x4\n"
"tbz %x[n_channels], #0, 24f\n"
- "st1 { v23.h }[6], [x28], #0x2\n"
- "st1 { v24.h }[6], [x27], #0x2\n"
- "st1 { v25.h }[6], [x26], #0x2\n"
- "st1 { v26.h }[6], [x25], #0x2\n"
- "st1 { v27.h }[6], [x24], #0x2\n"
- "st1 { v28.h }[6], [x23], #0x2\n"
- "st1 { v29.h }[6], [x22], #0x2\n"
- "st1 { v30.h }[6], [x21], #0x2\n"
- "st1 { v31.h }[6], [x20], #0x2\n"
+ "st1 { v25.h }[6], [x27], #0x2\n"
+ "st1 { v24.h }[6], [x26], #0x2\n"
+ "st1 { v22.h }[6], [x25], #0x2\n"
+ "st1 { v21.h }[6], [x24], #0x2\n"
+ "st1 { v20.h }[6], [x23], #0x2\n"
+ "st1 { v19.h }[6], [x22], #0x2\n"
+ "st1 { v18.h }[6], [x21], #0x2\n"
+ "st1 { v17.h }[6], [x20], #0x2\n"
+ "st1 { v16.h }[6], [x19], #0x2\n"
"b 24f\n"
"21:" // Oddments: Store: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 24f\n"
- "st1 { v23.h }[4], [x28], #0x2\n"
- "st1 { v24.h }[4], [x27], #0x2\n"
- "st1 { v25.h }[4], [x26], #0x2\n"
- "st1 { v26.h }[4], [x25], #0x2\n"
- "st1 { v27.h }[4], [x24], #0x2\n"
- "st1 { v28.h }[4], [x23], #0x2\n"
- "st1 { v29.h }[4], [x22], #0x2\n"
- "st1 { v30.h }[4], [x21], #0x2\n"
- "st1 { v31.h }[4], [x20], #0x2\n"
+ "st1 { v25.h }[4], [x27], #0x2\n"
+ "st1 { v24.h }[4], [x26], #0x2\n"
+ "st1 { v22.h }[4], [x25], #0x2\n"
+ "st1 { v21.h }[4], [x24], #0x2\n"
+ "st1 { v20.h }[4], [x23], #0x2\n"
+ "st1 { v19.h }[4], [x22], #0x2\n"
+ "st1 { v18.h }[4], [x21], #0x2\n"
+ "st1 { v17.h }[4], [x20], #0x2\n"
+ "st1 { v16.h }[4], [x19], #0x2\n"
"b 24f\n"
"22:" // Oddments: Store: Bit 2: Unset
"tbz %x[n_channels], #1, 23f\n"
- "st1 { v23.s }[0], [x28], #0x4\n"
- "st1 { v24.s }[0], [x27], #0x4\n"
- "st1 { v25.s }[0], [x26], #0x4\n"
- "st1 { v26.s }[0], [x25], #0x4\n"
- "st1 { v27.s }[0], [x24], #0x4\n"
- "st1 { v28.s }[0], [x23], #0x4\n"
- "st1 { v29.s }[0], [x22], #0x4\n"
- "st1 { v30.s }[0], [x21], #0x4\n"
- "st1 { v31.s }[0], [x20], #0x4\n"
+ "st1 { v25.s }[0], [x27], #0x4\n"
+ "st1 { v24.s }[0], [x26], #0x4\n"
+ "st1 { v22.s }[0], [x25], #0x4\n"
+ "st1 { v21.s }[0], [x24], #0x4\n"
+ "st1 { v20.s }[0], [x23], #0x4\n"
+ "st1 { v19.s }[0], [x22], #0x4\n"
+ "st1 { v18.s }[0], [x21], #0x4\n"
+ "st1 { v17.s }[0], [x20], #0x4\n"
+ "st1 { v16.s }[0], [x19], #0x4\n"
"tbz %x[n_channels], #0, 24f\n"
- "st1 { v23.h }[2], [x28], #0x2\n"
- "st1 { v24.h }[2], [x27], #0x2\n"
- "st1 { v25.h }[2], [x26], #0x2\n"
- "st1 { v26.h }[2], [x25], #0x2\n"
- "st1 { v27.h }[2], [x24], #0x2\n"
- "st1 { v28.h }[2], [x23], #0x2\n"
- "st1 { v29.h }[2], [x22], #0x2\n"
- "st1 { v30.h }[2], [x21], #0x2\n"
- "st1 { v31.h }[2], [x20], #0x2\n"
+ "st1 { v25.h }[2], [x27], #0x2\n"
+ "st1 { v24.h }[2], [x26], #0x2\n"
+ "st1 { v22.h }[2], [x25], #0x2\n"
+ "st1 { v21.h }[2], [x24], #0x2\n"
+ "st1 { v20.h }[2], [x23], #0x2\n"
+ "st1 { v19.h }[2], [x22], #0x2\n"
+ "st1 { v18.h }[2], [x21], #0x2\n"
+ "st1 { v17.h }[2], [x20], #0x2\n"
+ "st1 { v16.h }[2], [x19], #0x2\n"
"b 24f\n"
"23:" // Oddments: Store: Bit 2: Unset: Bit 1: Unset
- "st1 { v23.h }[0], [x28], #0x2\n"
- "st1 { v24.h }[0], [x27], #0x2\n"
- "st1 { v25.h }[0], [x26], #0x2\n"
- "st1 { v26.h }[0], [x25], #0x2\n"
- "st1 { v27.h }[0], [x24], #0x2\n"
- "st1 { v28.h }[0], [x23], #0x2\n"
- "st1 { v29.h }[0], [x22], #0x2\n"
- "st1 { v30.h }[0], [x21], #0x2\n"
- "st1 { v31.h }[0], [x20], #0x2\n"
+ "tbz %x[n_channels], #0, 24f\n"
+ "st1 { v25.h }[0], [x27], #0x2\n"
+ "st1 { v24.h }[0], [x26], #0x2\n"
+ "st1 { v22.h }[0], [x25], #0x2\n"
+ "st1 { v21.h }[0], [x24], #0x2\n"
+ "st1 { v20.h }[0], [x23], #0x2\n"
+ "st1 { v19.h }[0], [x22], #0x2\n"
+ "st1 { v18.h }[0], [x21], #0x2\n"
+ "st1 { v17.h }[0], [x20], #0x2\n"
+ "st1 { v16.h }[0], [x19], #0x2\n"
"24:" // Oddments: Store: Bit 2: End
"25:" // End
: [params] "+&r" (params)
: [bias] "r" (bias), [inptrs] "r" (inptrs), [minmax_vals] "r" (minmax_vals), [n_channels] "r" ((uint64_t) n_channels), [n_points] "r" ((uint64_t) n_points), [outptrs] "r" (outptrs)
- : "cc", "memory", "v0", "v1", "v2", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst/generic.cpp
index f246cec87e..d9fc1403b2 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -45,998 +45,1001 @@ void a64_fp16_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst_im
__asm__ __volatile__(
"ld1r { v7.8h }, [%x[minmax_vals]]\n"
- "lsr x11, %x[n_output_channels], #0x3\n"
- "add x20, %x[minmax_vals], #0x2\n"
- "ld1r { v6.8h }, [x20]\n"
"mov x10, #0x0\n"
- "cbz x11, 8f\n"
+ "add x19, %x[minmax_vals], #0x2\n"
+ "ld1r { v6.8h }, [x19]\n"
+ "lsr x9, %x[n_output_channels], #0x3\n"
+ "cbz x9, 8f\n"
"1:" // Output channel loop
- "movi v31.16b, #0x0\n"
+ "movi v16.16b, #0x0\n"
"cbz %x[bias], 2f\n"
- "lsl x20, x10, #0x1\n"
- "ldr q31, [%x[bias], x20]\n"
+ "lsl x19, x10, #0x1\n"
+ "ldr q16, [%x[bias], x19]\n"
"2:" // Output channel loop: Load bias: Done
- "ldr q5, [%x[weights], #0x0]\n"
- "mov x20, %x[inptrs]\n"
- "ldp x24, x9, [x20], #0x10\n"
- "lsr x21, %x[kernel_points], #0x1\n"
- "ldr q4, [x24, #0x0]\n"
- "ldr q3, [x9, #0x0]\n"
- "mov v16.16b, v31.16b\n"
- "mov v17.16b, v31.16b\n"
- "mov v18.16b, v31.16b\n"
- "mov v19.16b, v31.16b\n"
+ "mov v5.16b, v16.16b\n"
+ "ldr q4, [%x[weights], #0x0]\n"
+ "mov x19, %x[inptrs]\n"
+ "mov v31.16b, v16.16b\n"
+ "ldp x25, x28, [x19], #0x10\n"
+ "lsr x20, %x[kernel_points], #0x1\n"
+ "mov v30.16b, v16.16b\n"
+ "ldr q3, [x25, #0x0]\n"
+ "mov v29.16b, v16.16b\n"
"add %x[weights], %x[weights], #0x10\n"
- "mov v20.16b, v31.16b\n"
- "mov v21.16b, v31.16b\n"
- "mov v22.16b, v31.16b\n"
- "mov v23.16b, v31.16b\n"
- "mov v24.16b, v31.16b\n"
- "mov v25.16b, v31.16b\n"
- "mov v26.16b, v31.16b\n"
- "mov v27.16b, v31.16b\n"
- "mov v28.16b, v31.16b\n"
- "mov v29.16b, v31.16b\n"
- "mov v30.16b, v31.16b\n"
- "mov v31.16b, v31.16b\n"
- "cbz x21, 6f\n"
- "ldr q2, [%x[weights], #0x0]\n"
- "ldp x24, x9, [x20], #0x10\n"
- "subs x21, x21, #0x1\n"
+ "mov v28.16b, v16.16b\n"
+ "ldr q2, [x28, #0x0]\n"
+ "mov v27.16b, v16.16b\n"
+ "mov v26.16b, v16.16b\n"
+ "mov v25.16b, v16.16b\n"
+ "mov v24.16b, v16.16b\n"
+ "mov v23.16b, v16.16b\n"
+ "mov v22.16b, v16.16b\n"
+ "mov v21.16b, v16.16b\n"
+ "mov v20.16b, v16.16b\n"
+ "mov v19.16b, v16.16b\n"
+ "mov v18.16b, v16.16b\n"
+ "mov v17.16b, v16.16b\n"
+ "cbz x20, 6f\n"
+ "ldp x25, x28, [x19], #0x10\n"
+ "ldr q16, [%x[weights], #0x0]\n"
+ "subs x20, x20, #0x1\n"
"add %x[weights], %x[weights], #0x10\n"
- "ldr q1, [x24, #0x0]\n"
- "ldr q0, [x9, #0x0]\n"
+ "ldr q1, [x25, #0x0]\n"
+ "ldr q0, [x28, #0x0]\n"
"beq 4f\n"
"3:" // Output channel loop: Kernel loop
- "ldp x24, x9, [x20], #0x10\n"
- "fmla v16.8h, v5.8h, v4.h[0]\n"
- "fmla v17.8h, v5.8h, v4.h[1]\n"
- "subs x21, x21, #0x1\n"
- "fmla v18.8h, v5.8h, v4.h[2]\n"
- "fmla v19.8h, v5.8h, v4.h[3]\n"
- "fmla v20.8h, v5.8h, v4.h[4]\n"
- "fmla v21.8h, v5.8h, v4.h[5]\n"
- "fmla v22.8h, v5.8h, v4.h[6]\n"
- "fmla v23.8h, v5.8h, v4.h[7]\n"
- "ldr q4, [x24, #0x0]\n"
- "fmla v24.8h, v5.8h, v3.h[0]\n"
- "fmla v25.8h, v5.8h, v3.h[1]\n"
- "fmla v26.8h, v5.8h, v3.h[2]\n"
- "fmla v27.8h, v5.8h, v3.h[3]\n"
- "fmla v28.8h, v5.8h, v3.h[4]\n"
- "fmla v29.8h, v5.8h, v3.h[5]\n"
- "fmla v30.8h, v5.8h, v3.h[6]\n"
- "fmla v31.8h, v5.8h, v3.h[7]\n"
- "ldr q3, [x9, #0x0]\n"
- "ldr q5, [%x[weights], #0x0]\n"
- "ldp x24, x9, [x20], #0x10\n"
- "fmla v16.8h, v2.8h, v1.h[0]\n"
- "fmla v17.8h, v2.8h, v1.h[1]\n"
- "fmla v18.8h, v2.8h, v1.h[2]\n"
- "fmla v19.8h, v2.8h, v1.h[3]\n"
- "fmla v20.8h, v2.8h, v1.h[4]\n"
- "fmla v21.8h, v2.8h, v1.h[5]\n"
- "fmla v22.8h, v2.8h, v1.h[6]\n"
- "fmla v23.8h, v2.8h, v1.h[7]\n"
- "ldr q1, [x24, #0x0]\n"
- "fmla v24.8h, v2.8h, v0.h[0]\n"
- "fmla v25.8h, v2.8h, v0.h[1]\n"
- "fmla v26.8h, v2.8h, v0.h[2]\n"
- "fmla v27.8h, v2.8h, v0.h[3]\n"
- "fmla v28.8h, v2.8h, v0.h[4]\n"
- "fmla v29.8h, v2.8h, v0.h[5]\n"
- "fmla v30.8h, v2.8h, v0.h[6]\n"
- "fmla v31.8h, v2.8h, v0.h[7]\n"
- "ldr q0, [x9, #0x0]\n"
- "ldr q2, [%x[weights], #0x10]\n"
+ "fmla v5.8h, v4.8h, v3.h[0]\n"
+ "ldp x25, x28, [x19], #0x10\n"
+ "subs x20, x20, #0x1\n"
+ "fmla v31.8h, v4.8h, v3.h[1]\n"
+ "fmla v30.8h, v4.8h, v3.h[2]\n"
+ "fmla v29.8h, v4.8h, v3.h[3]\n"
+ "fmla v28.8h, v4.8h, v3.h[4]\n"
+ "fmla v27.8h, v4.8h, v3.h[5]\n"
+ "fmla v26.8h, v4.8h, v3.h[6]\n"
+ "fmla v25.8h, v4.8h, v3.h[7]\n"
+ "ldr q3, [x25, #0x0]\n"
+ "fmla v24.8h, v4.8h, v2.h[0]\n"
+ "fmla v23.8h, v4.8h, v2.h[1]\n"
+ "fmla v22.8h, v4.8h, v2.h[2]\n"
+ "fmla v21.8h, v4.8h, v2.h[3]\n"
+ "fmla v20.8h, v4.8h, v2.h[4]\n"
+ "fmla v19.8h, v4.8h, v2.h[5]\n"
+ "fmla v18.8h, v4.8h, v2.h[6]\n"
+ "fmla v17.8h, v4.8h, v2.h[7]\n"
+ "ldr q2, [x28, #0x0]\n"
+ "fmla v5.8h, v16.8h, v1.h[0]\n"
+ "ldr q4, [%x[weights], #0x0]\n"
+ "fmla v31.8h, v16.8h, v1.h[1]\n"
+ "ldp x25, x28, [x19], #0x10\n"
+ "fmla v30.8h, v16.8h, v1.h[2]\n"
+ "fmla v29.8h, v16.8h, v1.h[3]\n"
+ "fmla v28.8h, v16.8h, v1.h[4]\n"
+ "fmla v27.8h, v16.8h, v1.h[5]\n"
+ "fmla v26.8h, v16.8h, v1.h[6]\n"
+ "fmla v25.8h, v16.8h, v1.h[7]\n"
+ "ldr q1, [x25, #0x0]\n"
+ "fmla v24.8h, v16.8h, v0.h[0]\n"
+ "fmla v23.8h, v16.8h, v0.h[1]\n"
+ "fmla v22.8h, v16.8h, v0.h[2]\n"
+ "fmla v21.8h, v16.8h, v0.h[3]\n"
+ "fmla v20.8h, v16.8h, v0.h[4]\n"
+ "fmla v19.8h, v16.8h, v0.h[5]\n"
+ "fmla v18.8h, v16.8h, v0.h[6]\n"
+ "fmla v17.8h, v16.8h, v0.h[7]\n"
+ "ldr q0, [x28, #0x0]\n"
+ "ldr q16, [%x[weights], #0x10]\n"
"add %x[weights], %x[weights], #0x20\n"
"bgt 3b\n"
"4:" // Output channel loop: Kernel loop tail
"tbnz %x[kernel_points], #0, 5f\n"
- "fmla v16.8h, v5.8h, v4.h[0]\n"
- "fmla v17.8h, v5.8h, v4.h[1]\n"
- "lsl x28, x10, #0x1\n"
- "ldr x20, [%x[outptrs], #0x0]\n"
- "fmla v18.8h, v5.8h, v4.h[2]\n"
- "fmla v19.8h, v5.8h, v4.h[3]\n"
- "ldr x21, [%x[outptrs], #0x8]\n"
- "ldr x22, [%x[outptrs], #0x10]\n"
- "fmla v20.8h, v5.8h, v4.h[4]\n"
- "fmla v21.8h, v5.8h, v4.h[5]\n"
- "ldr x23, [%x[outptrs], #0x18]\n"
- "ldr x24, [%x[outptrs], #0x20]\n"
- "fmla v22.8h, v5.8h, v4.h[6]\n"
- "fmla v23.8h, v5.8h, v4.h[7]\n"
- "ldr x25, [%x[outptrs], #0x28]\n"
- "ldr x26, [%x[outptrs], #0x30]\n"
- "fmla v24.8h, v5.8h, v3.h[0]\n"
- "fmla v25.8h, v5.8h, v3.h[1]\n"
- "ldr x27, [%x[outptrs], #0x38]\n"
- "fmla v26.8h, v5.8h, v3.h[2]\n"
- "fmla v27.8h, v5.8h, v3.h[3]\n"
- "fmla v28.8h, v5.8h, v3.h[4]\n"
- "fmla v29.8h, v5.8h, v3.h[5]\n"
- "fmla v30.8h, v5.8h, v3.h[6]\n"
- "fmla v31.8h, v5.8h, v3.h[7]\n"
- "fmla v16.8h, v2.8h, v1.h[0]\n"
- "fmla v17.8h, v2.8h, v1.h[1]\n"
- "fmin v16.8h, v16.8h, v6.8h\n"
- "fmla v18.8h, v2.8h, v1.h[2]\n"
- "fmla v19.8h, v2.8h, v1.h[3]\n"
- "fmin v17.8h, v17.8h, v6.8h\n"
- "fmla v20.8h, v2.8h, v1.h[4]\n"
- "fmla v21.8h, v2.8h, v1.h[5]\n"
- "fmin v18.8h, v18.8h, v6.8h\n"
- "fmla v22.8h, v2.8h, v1.h[6]\n"
- "fmla v23.8h, v2.8h, v1.h[7]\n"
- "fmin v19.8h, v19.8h, v6.8h\n"
- "fmla v24.8h, v2.8h, v0.h[0]\n"
- "fmla v25.8h, v2.8h, v0.h[1]\n"
- "fmin v20.8h, v20.8h, v6.8h\n"
- "fmla v26.8h, v2.8h, v0.h[2]\n"
- "fmla v27.8h, v2.8h, v0.h[3]\n"
- "fmin v21.8h, v21.8h, v6.8h\n"
- "fmla v28.8h, v2.8h, v0.h[4]\n"
- "fmla v29.8h, v2.8h, v0.h[5]\n"
- "fmin v22.8h, v22.8h, v6.8h\n"
- "fmla v30.8h, v2.8h, v0.h[6]\n"
- "fmla v31.8h, v2.8h, v0.h[7]\n"
- "fmin v23.8h, v23.8h, v6.8h\n"
- "fmax v16.8h, v16.8h, v7.8h\n"
- "fmax v17.8h, v17.8h, v7.8h\n"
- "str q16, [x20, x28]\n"
- "ldr x20, [%x[outptrs], #0x40]\n"
- "fmax v18.8h, v18.8h, v7.8h\n"
- "fmax v19.8h, v19.8h, v7.8h\n"
- "str q17, [x21, x28]\n"
- "ldr x21, [%x[outptrs], #0x48]\n"
- "fmax v20.8h, v20.8h, v7.8h\n"
- "fmax v21.8h, v21.8h, v7.8h\n"
- "str q18, [x22, x28]\n"
- "ldr x22, [%x[outptrs], #0x50]\n"
- "fmax v22.8h, v22.8h, v7.8h\n"
- "fmax v23.8h, v23.8h, v7.8h\n"
- "str q19, [x23, x28]\n"
- "ldr x23, [%x[outptrs], #0x58]\n"
- "fmin v24.8h, v24.8h, v6.8h\n"
- "fmin v25.8h, v25.8h, v6.8h\n"
- "str q20, [x24, x28]\n"
- "ldr x24, [%x[outptrs], #0x60]\n"
- "fmin v26.8h, v26.8h, v6.8h\n"
- "fmin v27.8h, v27.8h, v6.8h\n"
- "str q21, [x25, x28]\n"
- "ldr x25, [%x[outptrs], #0x68]\n"
- "fmin v28.8h, v28.8h, v6.8h\n"
- "fmin v29.8h, v29.8h, v6.8h\n"
- "str q22, [x26, x28]\n"
- "ldr x26, [%x[outptrs], #0x70]\n"
- "fmin v30.8h, v30.8h, v6.8h\n"
+ "fmla v5.8h, v4.8h, v3.h[0]\n"
+ "ldr x19, [%x[outptrs], #0x0]\n"
+ "fmla v31.8h, v4.8h, v3.h[1]\n"
+ "ldr x20, [%x[outptrs], #0x8]\n"
+ "lsl x27, x10, #0x1\n"
+ "fmla v30.8h, v4.8h, v3.h[2]\n"
+ "ldr x21, [%x[outptrs], #0x10]\n"
+ "fmla v29.8h, v4.8h, v3.h[3]\n"
+ "ldr x22, [%x[outptrs], #0x18]\n"
+ "fmla v28.8h, v4.8h, v3.h[4]\n"
+ "ldr x23, [%x[outptrs], #0x20]\n"
+ "fmla v27.8h, v4.8h, v3.h[5]\n"
+ "ldr x24, [%x[outptrs], #0x28]\n"
+ "fmla v26.8h, v4.8h, v3.h[6]\n"
+ "ldr x25, [%x[outptrs], #0x30]\n"
+ "fmla v25.8h, v4.8h, v3.h[7]\n"
+ "ldr x26, [%x[outptrs], #0x38]\n"
+ "fmla v24.8h, v4.8h, v2.h[0]\n"
+ "fmla v23.8h, v4.8h, v2.h[1]\n"
+ "fmla v22.8h, v4.8h, v2.h[2]\n"
+ "fmla v21.8h, v4.8h, v2.h[3]\n"
+ "fmla v20.8h, v4.8h, v2.h[4]\n"
+ "fmla v19.8h, v4.8h, v2.h[5]\n"
+ "fmla v18.8h, v4.8h, v2.h[6]\n"
+ "fmla v17.8h, v4.8h, v2.h[7]\n"
+ "fmla v5.8h, v16.8h, v1.h[0]\n"
+ "fmla v31.8h, v16.8h, v1.h[1]\n"
+ "fmla v30.8h, v16.8h, v1.h[2]\n"
+ "fmla v29.8h, v16.8h, v1.h[3]\n"
+ "fmla v28.8h, v16.8h, v1.h[4]\n"
+ "fmla v27.8h, v16.8h, v1.h[5]\n"
+ "fmla v26.8h, v16.8h, v1.h[6]\n"
+ "fmla v25.8h, v16.8h, v1.h[7]\n"
+ "fmla v24.8h, v16.8h, v0.h[0]\n"
+ "fmla v23.8h, v16.8h, v0.h[1]\n"
+ "fmla v22.8h, v16.8h, v0.h[2]\n"
+ "fmla v21.8h, v16.8h, v0.h[3]\n"
+ "fmla v20.8h, v16.8h, v0.h[4]\n"
+ "fmla v19.8h, v16.8h, v0.h[5]\n"
+ "fmla v18.8h, v16.8h, v0.h[6]\n"
+ "fmla v17.8h, v16.8h, v0.h[7]\n"
+ "fmin v5.8h, v5.8h, v6.8h\n"
"fmin v31.8h, v31.8h, v6.8h\n"
- "str q23, [x27, x28]\n"
- "ldr x27, [%x[outptrs], #0x78]\n"
- "fmax v24.8h, v24.8h, v7.8h\n"
- "fmax v25.8h, v25.8h, v7.8h\n"
- "str q24, [x20, x28]\n"
- "fmax v26.8h, v26.8h, v7.8h\n"
- "fmax v27.8h, v27.8h, v7.8h\n"
- "str q25, [x21, x28]\n"
- "fmax v28.8h, v28.8h, v7.8h\n"
- "fmax v29.8h, v29.8h, v7.8h\n"
- "str q26, [x22, x28]\n"
- "fmax v30.8h, v30.8h, v7.8h\n"
+ "fmin v30.8h, v30.8h, v6.8h\n"
+ "fmax v5.8h, v5.8h, v7.8h\n"
+ "str q5, [x19, x27]\n"
"fmax v31.8h, v31.8h, v7.8h\n"
- "str q27, [x23, x28]\n"
- "str q28, [x24, x28]\n"
- "str q29, [x25, x28]\n"
- "str q30, [x26, x28]\n"
- "str q31, [x27, x28]\n"
+ "fmax v30.8h, v30.8h, v7.8h\n"
+ "ldr x19, [%x[outptrs], #0x40]\n"
+ "fmin v29.8h, v29.8h, v6.8h\n"
+ "str q31, [x20, x27]\n"
+ "fmin v28.8h, v28.8h, v6.8h\n"
+ "fmin v27.8h, v27.8h, v6.8h\n"
+ "str q30, [x21, x27]\n"
+ "fmax v29.8h, v29.8h, v7.8h\n"
+ "ldr x20, [%x[outptrs], #0x48]\n"
+ "fmin v26.8h, v26.8h, v6.8h\n"
+ "ldr x21, [%x[outptrs], #0x50]\n"
+ "fmax v28.8h, v28.8h, v7.8h\n"
+ "str q29, [x22, x27]\n"
+ "fmax v27.8h, v27.8h, v7.8h\n"
+ "ldr x22, [%x[outptrs], #0x58]\n"
+ "fmax v26.8h, v26.8h, v7.8h\n"
+ "str q28, [x23, x27]\n"
+ "fmin v25.8h, v25.8h, v6.8h\n"
+ "str q27, [x24, x27]\n"
+ "fmin v24.8h, v24.8h, v6.8h\n"
+ "str q26, [x25, x27]\n"
+ "fmin v23.8h, v23.8h, v6.8h\n"
+ "ldr x23, [%x[outptrs], #0x60]\n"
+ "fmax v25.8h, v25.8h, v7.8h\n"
+ "ldr x24, [%x[outptrs], #0x68]\n"
+ "fmax v24.8h, v24.8h, v7.8h\n"
+ "ldr x25, [%x[outptrs], #0x70]\n"
+ "fmax v23.8h, v23.8h, v7.8h\n"
+ "str q25, [x26, x27]\n"
+ "fmin v22.8h, v22.8h, v6.8h\n"
+ "str q24, [x19, x27]\n"
+ "fmin v21.8h, v21.8h, v6.8h\n"
+ "str q23, [x20, x27]\n"
+ "fmin v20.8h, v20.8h, v6.8h\n"
+ "ldr x26, [%x[outptrs], #0x78]\n"
+ "fmax v22.8h, v22.8h, v7.8h\n"
+ "str q22, [x21, x27]\n"
+ "fmax v21.8h, v21.8h, v7.8h\n"
+ "fmax v20.8h, v20.8h, v7.8h\n"
+ "str q21, [x22, x27]\n"
+ "fmin v19.8h, v19.8h, v6.8h\n"
+ "fmin v18.8h, v18.8h, v6.8h\n"
+ "str q20, [x23, x27]\n"
+ "fmin v17.8h, v17.8h, v6.8h\n"
+ "fmax v19.8h, v19.8h, v7.8h\n"
+ "str q19, [x24, x27]\n"
+ "fmax v18.8h, v18.8h, v7.8h\n"
+ "fmax v17.8h, v17.8h, v7.8h\n"
+ "str q18, [x25, x27]\n"
+ "str q17, [x26, x27]\n"
"b 7f\n"
"5:" // Output channel loop: Odd tail
- "fmla v16.8h, v5.8h, v4.h[0]\n"
- "fmla v17.8h, v5.8h, v4.h[1]\n"
- "ldp x24, x9, [x20], #0x10\n"
- "lsl x28, x10, #0x1\n"
- "fmla v18.8h, v5.8h, v4.h[2]\n"
- "fmla v19.8h, v5.8h, v4.h[3]\n"
- "ldr x20, [%x[outptrs], #0x0]\n"
- "ldr x21, [%x[outptrs], #0x8]\n"
- "fmla v20.8h, v5.8h, v4.h[4]\n"
- "fmla v21.8h, v5.8h, v4.h[5]\n"
- "ldr x22, [%x[outptrs], #0x10]\n"
- "ldr x23, [%x[outptrs], #0x18]\n"
- "fmla v22.8h, v5.8h, v4.h[6]\n"
- "fmla v23.8h, v5.8h, v4.h[7]\n"
- "ldr q4, [x24, #0x0]\n"
- "ldr x24, [%x[outptrs], #0x20]\n"
- "fmla v24.8h, v5.8h, v3.h[0]\n"
- "fmla v25.8h, v5.8h, v3.h[1]\n"
- "ldr x25, [%x[outptrs], #0x28]\n"
- "ldr x26, [%x[outptrs], #0x30]\n"
- "fmla v26.8h, v5.8h, v3.h[2]\n"
- "fmla v27.8h, v5.8h, v3.h[3]\n"
- "ldr x27, [%x[outptrs], #0x38]\n"
- "fmla v28.8h, v5.8h, v3.h[4]\n"
- "fmla v29.8h, v5.8h, v3.h[5]\n"
- "fmla v30.8h, v5.8h, v3.h[6]\n"
- "fmla v31.8h, v5.8h, v3.h[7]\n"
- "ldr q5, [%x[weights], #0x0]\n"
- "ldr q3, [x9, #0x0]\n"
- "fmla v16.8h, v2.8h, v1.h[0]\n"
- "fmla v17.8h, v2.8h, v1.h[1]\n"
+ "fmla v5.8h, v4.8h, v3.h[0]\n"
+ "ldp x25, x28, [x19], #0x10\n"
+ "lsl x27, x10, #0x1\n"
+ "fmla v31.8h, v4.8h, v3.h[1]\n"
+ "ldr x19, [%x[outptrs], #0x0]\n"
+ "fmla v30.8h, v4.8h, v3.h[2]\n"
+ "ldr x20, [%x[outptrs], #0x8]\n"
+ "fmla v29.8h, v4.8h, v3.h[3]\n"
+ "ldr x21, [%x[outptrs], #0x10]\n"
+ "fmla v28.8h, v4.8h, v3.h[4]\n"
+ "ldr x22, [%x[outptrs], #0x18]\n"
+ "fmla v27.8h, v4.8h, v3.h[5]\n"
+ "ldr x23, [%x[outptrs], #0x20]\n"
+ "fmla v26.8h, v4.8h, v3.h[6]\n"
+ "ldr x24, [%x[outptrs], #0x28]\n"
+ "fmla v25.8h, v4.8h, v3.h[7]\n"
+ "ldr q3, [x25, #0x0]\n"
+ "fmla v24.8h, v4.8h, v2.h[0]\n"
+ "ldr x25, [%x[outptrs], #0x30]\n"
+ "fmla v23.8h, v4.8h, v2.h[1]\n"
+ "ldr x26, [%x[outptrs], #0x38]\n"
+ "fmla v22.8h, v4.8h, v2.h[2]\n"
+ "fmla v21.8h, v4.8h, v2.h[3]\n"
+ "fmla v20.8h, v4.8h, v2.h[4]\n"
+ "fmla v19.8h, v4.8h, v2.h[5]\n"
+ "fmla v18.8h, v4.8h, v2.h[6]\n"
+ "fmla v17.8h, v4.8h, v2.h[7]\n"
+ "ldr q2, [x28, #0x0]\n"
+ "fmla v5.8h, v16.8h, v1.h[0]\n"
+ "ldr q4, [%x[weights], #0x0]\n"
"add %x[weights], %x[weights], #0x10\n"
- "fmla v18.8h, v2.8h, v1.h[2]\n"
- "fmla v19.8h, v2.8h, v1.h[3]\n"
- "fmla v20.8h, v2.8h, v1.h[4]\n"
- "fmla v21.8h, v2.8h, v1.h[5]\n"
- "fmla v22.8h, v2.8h, v1.h[6]\n"
- "fmla v23.8h, v2.8h, v1.h[7]\n"
- "fmla v24.8h, v2.8h, v0.h[0]\n"
- "fmla v25.8h, v2.8h, v0.h[1]\n"
- "fmla v26.8h, v2.8h, v0.h[2]\n"
- "fmla v27.8h, v2.8h, v0.h[3]\n"
- "fmla v28.8h, v2.8h, v0.h[4]\n"
- "fmla v29.8h, v2.8h, v0.h[5]\n"
- "fmla v30.8h, v2.8h, v0.h[6]\n"
- "fmla v31.8h, v2.8h, v0.h[7]\n"
- "fmla v16.8h, v5.8h, v4.h[0]\n"
- "fmla v17.8h, v5.8h, v4.h[1]\n"
- "fmin v16.8h, v16.8h, v6.8h\n"
- "fmla v18.8h, v5.8h, v4.h[2]\n"
- "fmla v19.8h, v5.8h, v4.h[3]\n"
- "fmin v17.8h, v17.8h, v6.8h\n"
- "fmla v20.8h, v5.8h, v4.h[4]\n"
- "fmla v21.8h, v5.8h, v4.h[5]\n"
- "fmin v18.8h, v18.8h, v6.8h\n"
- "fmla v22.8h, v5.8h, v4.h[6]\n"
- "fmla v23.8h, v5.8h, v4.h[7]\n"
- "fmin v19.8h, v19.8h, v6.8h\n"
- "fmla v24.8h, v5.8h, v3.h[0]\n"
- "fmla v25.8h, v5.8h, v3.h[1]\n"
- "fmin v20.8h, v20.8h, v6.8h\n"
- "fmla v26.8h, v5.8h, v3.h[2]\n"
- "fmla v27.8h, v5.8h, v3.h[3]\n"
- "fmin v21.8h, v21.8h, v6.8h\n"
- "fmla v28.8h, v5.8h, v3.h[4]\n"
- "fmla v29.8h, v5.8h, v3.h[5]\n"
- "fmin v22.8h, v22.8h, v6.8h\n"
- "fmla v30.8h, v5.8h, v3.h[6]\n"
- "fmla v31.8h, v5.8h, v3.h[7]\n"
- "fmin v23.8h, v23.8h, v6.8h\n"
- "fmax v16.8h, v16.8h, v7.8h\n"
- "fmax v17.8h, v17.8h, v7.8h\n"
- "str q16, [x20, x28]\n"
- "ldr x20, [%x[outptrs], #0x40]\n"
- "fmax v18.8h, v18.8h, v7.8h\n"
- "fmax v19.8h, v19.8h, v7.8h\n"
- "str q17, [x21, x28]\n"
- "ldr x21, [%x[outptrs], #0x48]\n"
- "fmax v20.8h, v20.8h, v7.8h\n"
- "fmax v21.8h, v21.8h, v7.8h\n"
- "str q18, [x22, x28]\n"
- "ldr x22, [%x[outptrs], #0x50]\n"
- "fmax v22.8h, v22.8h, v7.8h\n"
- "fmax v23.8h, v23.8h, v7.8h\n"
- "str q19, [x23, x28]\n"
- "ldr x23, [%x[outptrs], #0x58]\n"
- "fmin v24.8h, v24.8h, v6.8h\n"
- "fmin v25.8h, v25.8h, v6.8h\n"
- "str q20, [x24, x28]\n"
- "ldr x24, [%x[outptrs], #0x60]\n"
- "fmin v26.8h, v26.8h, v6.8h\n"
- "fmin v27.8h, v27.8h, v6.8h\n"
- "str q21, [x25, x28]\n"
- "ldr x25, [%x[outptrs], #0x68]\n"
- "fmin v28.8h, v28.8h, v6.8h\n"
- "fmin v29.8h, v29.8h, v6.8h\n"
- "str q22, [x26, x28]\n"
- "ldr x26, [%x[outptrs], #0x70]\n"
- "fmin v30.8h, v30.8h, v6.8h\n"
+ "fmla v31.8h, v16.8h, v1.h[1]\n"
+ "fmla v30.8h, v16.8h, v1.h[2]\n"
+ "fmla v29.8h, v16.8h, v1.h[3]\n"
+ "fmla v28.8h, v16.8h, v1.h[4]\n"
+ "fmla v27.8h, v16.8h, v1.h[5]\n"
+ "fmla v26.8h, v16.8h, v1.h[6]\n"
+ "fmla v25.8h, v16.8h, v1.h[7]\n"
+ "fmla v24.8h, v16.8h, v0.h[0]\n"
+ "fmla v23.8h, v16.8h, v0.h[1]\n"
+ "fmla v22.8h, v16.8h, v0.h[2]\n"
+ "fmla v21.8h, v16.8h, v0.h[3]\n"
+ "fmla v20.8h, v16.8h, v0.h[4]\n"
+ "fmla v19.8h, v16.8h, v0.h[5]\n"
+ "fmla v18.8h, v16.8h, v0.h[6]\n"
+ "fmla v17.8h, v16.8h, v0.h[7]\n"
+ "fmla v5.8h, v4.8h, v3.h[0]\n"
+ "fmla v31.8h, v4.8h, v3.h[1]\n"
+ "fmla v30.8h, v4.8h, v3.h[2]\n"
+ "fmla v29.8h, v4.8h, v3.h[3]\n"
+ "fmla v28.8h, v4.8h, v3.h[4]\n"
+ "fmla v27.8h, v4.8h, v3.h[5]\n"
+ "fmla v26.8h, v4.8h, v3.h[6]\n"
+ "fmla v25.8h, v4.8h, v3.h[7]\n"
+ "fmla v24.8h, v4.8h, v2.h[0]\n"
+ "fmla v23.8h, v4.8h, v2.h[1]\n"
+ "fmla v22.8h, v4.8h, v2.h[2]\n"
+ "fmla v21.8h, v4.8h, v2.h[3]\n"
+ "fmla v20.8h, v4.8h, v2.h[4]\n"
+ "fmla v19.8h, v4.8h, v2.h[5]\n"
+ "fmla v18.8h, v4.8h, v2.h[6]\n"
+ "fmla v17.8h, v4.8h, v2.h[7]\n"
+ "fmin v5.8h, v5.8h, v6.8h\n"
"fmin v31.8h, v31.8h, v6.8h\n"
- "str q23, [x27, x28]\n"
- "ldr x27, [%x[outptrs], #0x78]\n"
- "fmax v24.8h, v24.8h, v7.8h\n"
- "fmax v25.8h, v25.8h, v7.8h\n"
- "str q24, [x20, x28]\n"
- "fmax v26.8h, v26.8h, v7.8h\n"
- "fmax v27.8h, v27.8h, v7.8h\n"
- "str q25, [x21, x28]\n"
- "fmax v28.8h, v28.8h, v7.8h\n"
- "fmax v29.8h, v29.8h, v7.8h\n"
- "str q26, [x22, x28]\n"
- "fmax v30.8h, v30.8h, v7.8h\n"
+ "fmin v30.8h, v30.8h, v6.8h\n"
+ "fmax v5.8h, v5.8h, v7.8h\n"
+ "str q5, [x19, x27]\n"
"fmax v31.8h, v31.8h, v7.8h\n"
- "str q27, [x23, x28]\n"
- "str q28, [x24, x28]\n"
- "str q29, [x25, x28]\n"
- "str q30, [x26, x28]\n"
- "str q31, [x27, x28]\n"
- "b 7f\n"
- "6:" // Output channel loop: Single kernel point
- "fmla v16.8h, v5.8h, v4.h[0]\n"
- "fmla v17.8h, v5.8h, v4.h[1]\n"
- "fmin v16.8h, v16.8h, v6.8h\n"
- "lsl x28, x10, #0x1\n"
- "fmla v18.8h, v5.8h, v4.h[2]\n"
- "fmla v19.8h, v5.8h, v4.h[3]\n"
- "fmin v17.8h, v17.8h, v6.8h\n"
- "ldr x20, [%x[outptrs], #0x0]\n"
- "fmla v20.8h, v5.8h, v4.h[4]\n"
- "fmla v21.8h, v5.8h, v4.h[5]\n"
- "fmin v18.8h, v18.8h, v6.8h\n"
- "ldr x21, [%x[outptrs], #0x8]\n"
- "fmla v22.8h, v5.8h, v4.h[6]\n"
- "fmla v23.8h, v5.8h, v4.h[7]\n"
- "fmin v19.8h, v19.8h, v6.8h\n"
- "ldr x22, [%x[outptrs], #0x10]\n"
- "fmla v24.8h, v5.8h, v3.h[0]\n"
- "fmla v25.8h, v5.8h, v3.h[1]\n"
- "fmin v20.8h, v20.8h, v6.8h\n"
- "ldr x23, [%x[outptrs], #0x18]\n"
- "fmla v26.8h, v5.8h, v3.h[2]\n"
- "fmla v27.8h, v5.8h, v3.h[3]\n"
- "fmin v21.8h, v21.8h, v6.8h\n"
- "ldr x24, [%x[outptrs], #0x20]\n"
- "fmla v28.8h, v5.8h, v3.h[4]\n"
- "fmla v29.8h, v5.8h, v3.h[5]\n"
- "fmin v22.8h, v22.8h, v6.8h\n"
- "ldr x25, [%x[outptrs], #0x28]\n"
- "fmla v30.8h, v5.8h, v3.h[6]\n"
- "fmla v31.8h, v5.8h, v3.h[7]\n"
+ "fmax v30.8h, v30.8h, v7.8h\n"
+ "ldr x19, [%x[outptrs], #0x40]\n"
+ "fmin v29.8h, v29.8h, v6.8h\n"
+ "str q31, [x20, x27]\n"
+ "fmin v28.8h, v28.8h, v6.8h\n"
+ "fmin v27.8h, v27.8h, v6.8h\n"
+ "str q30, [x21, x27]\n"
+ "fmax v29.8h, v29.8h, v7.8h\n"
+ "ldr x20, [%x[outptrs], #0x48]\n"
+ "fmin v26.8h, v26.8h, v6.8h\n"
+ "ldr x21, [%x[outptrs], #0x50]\n"
+ "fmax v28.8h, v28.8h, v7.8h\n"
+ "str q29, [x22, x27]\n"
+ "fmax v27.8h, v27.8h, v7.8h\n"
+ "ldr x22, [%x[outptrs], #0x58]\n"
+ "fmax v26.8h, v26.8h, v7.8h\n"
+ "str q28, [x23, x27]\n"
+ "fmin v25.8h, v25.8h, v6.8h\n"
+ "str q27, [x24, x27]\n"
+ "fmin v24.8h, v24.8h, v6.8h\n"
+ "str q26, [x25, x27]\n"
"fmin v23.8h, v23.8h, v6.8h\n"
- "ldr x26, [%x[outptrs], #0x30]\n"
- "ldr x27, [%x[outptrs], #0x38]\n"
- "fmax v16.8h, v16.8h, v7.8h\n"
- "fmax v17.8h, v17.8h, v7.8h\n"
- "str q16, [x20, x28]\n"
- "fmax v18.8h, v18.8h, v7.8h\n"
- "fmax v19.8h, v19.8h, v7.8h\n"
- "str q17, [x21, x28]\n"
- "ldr x20, [%x[outptrs], #0x40]\n"
- "fmax v20.8h, v20.8h, v7.8h\n"
- "fmax v21.8h, v21.8h, v7.8h\n"
- "str q18, [x22, x28]\n"
- "ldr x21, [%x[outptrs], #0x48]\n"
- "fmax v22.8h, v22.8h, v7.8h\n"
+ "ldr x23, [%x[outptrs], #0x60]\n"
+ "fmax v25.8h, v25.8h, v7.8h\n"
+ "ldr x24, [%x[outptrs], #0x68]\n"
+ "fmax v24.8h, v24.8h, v7.8h\n"
+ "ldr x25, [%x[outptrs], #0x70]\n"
"fmax v23.8h, v23.8h, v7.8h\n"
- "str q19, [x23, x28]\n"
- "ldr x22, [%x[outptrs], #0x50]\n"
- "fmin v24.8h, v24.8h, v6.8h\n"
- "fmin v25.8h, v25.8h, v6.8h\n"
- "str q20, [x24, x28]\n"
- "ldr x23, [%x[outptrs], #0x58]\n"
- "fmin v26.8h, v26.8h, v6.8h\n"
- "fmin v27.8h, v27.8h, v6.8h\n"
- "str q21, [x25, x28]\n"
- "ldr x24, [%x[outptrs], #0x60]\n"
- "fmin v28.8h, v28.8h, v6.8h\n"
- "fmin v29.8h, v29.8h, v6.8h\n"
- "str q22, [x26, x28]\n"
- "ldr x25, [%x[outptrs], #0x68]\n"
- "fmin v30.8h, v30.8h, v6.8h\n"
+ "str q25, [x26, x27]\n"
+ "fmin v22.8h, v22.8h, v6.8h\n"
+ "str q24, [x19, x27]\n"
+ "fmin v21.8h, v21.8h, v6.8h\n"
+ "str q23, [x20, x27]\n"
+ "fmin v20.8h, v20.8h, v6.8h\n"
+ "ldr x26, [%x[outptrs], #0x78]\n"
+ "fmax v22.8h, v22.8h, v7.8h\n"
+ "str q22, [x21, x27]\n"
+ "fmax v21.8h, v21.8h, v7.8h\n"
+ "fmax v20.8h, v20.8h, v7.8h\n"
+ "str q21, [x22, x27]\n"
+ "fmin v19.8h, v19.8h, v6.8h\n"
+ "fmin v18.8h, v18.8h, v6.8h\n"
+ "str q20, [x23, x27]\n"
+ "fmin v17.8h, v17.8h, v6.8h\n"
+ "fmax v19.8h, v19.8h, v7.8h\n"
+ "str q19, [x24, x27]\n"
+ "fmax v18.8h, v18.8h, v7.8h\n"
+ "fmax v17.8h, v17.8h, v7.8h\n"
+ "str q18, [x25, x27]\n"
+ "str q17, [x26, x27]\n"
+ "b 7f\n"
+ "6:" // Output channel loop: Single kernel point
+ "fmla v5.8h, v4.8h, v3.h[0]\n"
+ "ldr x19, [%x[outptrs], #0x0]\n"
+ "lsl x27, x10, #0x1\n"
+ "fmla v31.8h, v4.8h, v3.h[1]\n"
+ "ldr x20, [%x[outptrs], #0x8]\n"
+ "fmla v30.8h, v4.8h, v3.h[2]\n"
+ "ldr x21, [%x[outptrs], #0x10]\n"
+ "fmla v29.8h, v4.8h, v3.h[3]\n"
+ "ldr x22, [%x[outptrs], #0x18]\n"
+ "fmla v28.8h, v4.8h, v3.h[4]\n"
+ "ldr x23, [%x[outptrs], #0x20]\n"
+ "fmla v27.8h, v4.8h, v3.h[5]\n"
+ "ldr x24, [%x[outptrs], #0x28]\n"
+ "fmla v26.8h, v4.8h, v3.h[6]\n"
+ "ldr x25, [%x[outptrs], #0x30]\n"
+ "fmla v25.8h, v4.8h, v3.h[7]\n"
+ "ldr x26, [%x[outptrs], #0x38]\n"
+ "fmla v24.8h, v4.8h, v2.h[0]\n"
+ "fmla v23.8h, v4.8h, v2.h[1]\n"
+ "fmla v22.8h, v4.8h, v2.h[2]\n"
+ "fmla v21.8h, v4.8h, v2.h[3]\n"
+ "fmla v20.8h, v4.8h, v2.h[4]\n"
+ "fmla v19.8h, v4.8h, v2.h[5]\n"
+ "fmla v18.8h, v4.8h, v2.h[6]\n"
+ "fmla v17.8h, v4.8h, v2.h[7]\n"
+ "fmin v5.8h, v5.8h, v6.8h\n"
"fmin v31.8h, v31.8h, v6.8h\n"
- "str q23, [x27, x28]\n"
- "ldr x26, [%x[outptrs], #0x70]\n"
- "ldr x27, [%x[outptrs], #0x78]\n"
- "fmax v24.8h, v24.8h, v7.8h\n"
- "fmax v25.8h, v25.8h, v7.8h\n"
- "str q24, [x20, x28]\n"
- "fmax v26.8h, v26.8h, v7.8h\n"
- "fmax v27.8h, v27.8h, v7.8h\n"
- "str q25, [x21, x28]\n"
- "fmax v28.8h, v28.8h, v7.8h\n"
- "fmax v29.8h, v29.8h, v7.8h\n"
- "str q26, [x22, x28]\n"
- "fmax v30.8h, v30.8h, v7.8h\n"
+ "fmin v30.8h, v30.8h, v6.8h\n"
+ "fmax v5.8h, v5.8h, v7.8h\n"
+ "str q5, [x19, x27]\n"
"fmax v31.8h, v31.8h, v7.8h\n"
- "str q27, [x23, x28]\n"
- "str q28, [x24, x28]\n"
- "str q29, [x25, x28]\n"
- "str q30, [x26, x28]\n"
- "str q31, [x27, x28]\n"
+ "fmax v30.8h, v30.8h, v7.8h\n"
+ "ldr x19, [%x[outptrs], #0x40]\n"
+ "fmin v29.8h, v29.8h, v6.8h\n"
+ "str q31, [x20, x27]\n"
+ "fmin v28.8h, v28.8h, v6.8h\n"
+ "fmin v27.8h, v27.8h, v6.8h\n"
+ "str q30, [x21, x27]\n"
+ "fmax v29.8h, v29.8h, v7.8h\n"
+ "ldr x20, [%x[outptrs], #0x48]\n"
+ "fmin v26.8h, v26.8h, v6.8h\n"
+ "ldr x21, [%x[outptrs], #0x50]\n"
+ "fmax v28.8h, v28.8h, v7.8h\n"
+ "str q29, [x22, x27]\n"
+ "fmax v27.8h, v27.8h, v7.8h\n"
+ "ldr x22, [%x[outptrs], #0x58]\n"
+ "fmax v26.8h, v26.8h, v7.8h\n"
+ "str q28, [x23, x27]\n"
+ "fmin v25.8h, v25.8h, v6.8h\n"
+ "str q27, [x24, x27]\n"
+ "fmin v24.8h, v24.8h, v6.8h\n"
+ "str q26, [x25, x27]\n"
+ "fmin v23.8h, v23.8h, v6.8h\n"
+ "ldr x23, [%x[outptrs], #0x60]\n"
+ "fmax v25.8h, v25.8h, v7.8h\n"
+ "ldr x24, [%x[outptrs], #0x68]\n"
+ "fmax v24.8h, v24.8h, v7.8h\n"
+ "ldr x25, [%x[outptrs], #0x70]\n"
+ "fmax v23.8h, v23.8h, v7.8h\n"
+ "str q25, [x26, x27]\n"
+ "fmin v22.8h, v22.8h, v6.8h\n"
+ "str q24, [x19, x27]\n"
+ "fmin v21.8h, v21.8h, v6.8h\n"
+ "str q23, [x20, x27]\n"
+ "fmin v20.8h, v20.8h, v6.8h\n"
+ "ldr x26, [%x[outptrs], #0x78]\n"
+ "fmax v22.8h, v22.8h, v7.8h\n"
+ "str q22, [x21, x27]\n"
+ "fmax v21.8h, v21.8h, v7.8h\n"
+ "fmax v20.8h, v20.8h, v7.8h\n"
+ "str q21, [x22, x27]\n"
+ "fmin v19.8h, v19.8h, v6.8h\n"
+ "fmin v18.8h, v18.8h, v6.8h\n"
+ "str q20, [x23, x27]\n"
+ "fmin v17.8h, v17.8h, v6.8h\n"
+ "fmax v19.8h, v19.8h, v7.8h\n"
+ "str q19, [x24, x27]\n"
+ "fmax v18.8h, v18.8h, v7.8h\n"
+ "fmax v17.8h, v17.8h, v7.8h\n"
+ "str q18, [x25, x27]\n"
+ "str q17, [x26, x27]\n"
"7:" // Output channel loop: Done
"add x10, x10, #0x8\n"
- "cmp x10, x11, LSL #3\n"
+ "cmp x10, x9, LSL #3\n"
"blt 1b\n"
"tst %x[n_output_channels], #0x7\n"
"beq 23f\n"
"8:" // Output channel oddments
- "movi v31.16b, #0x0\n"
+ "movi v16.16b, #0x0\n"
"cbz %x[bias], 13f\n"
- "add x20, %x[bias], x10, LSL #1\n"
+ "add x19, %x[bias], x10, LSL #1\n"
"tbz %x[n_output_channels], #2, 10f\n"
- "ld1 { v31.d }[0], [x20], #0x8\n"
+ "ld1 { v16.d }[0], [x19], #0x8\n"
"tbz %x[n_output_channels], #1, 9f\n"
- "ld1 { v31.s }[2], [x20], #0x4\n"
+ "ld1 { v16.s }[2], [x19], #0x4\n"
"tbz %x[n_output_channels], #0, 12f\n"
- "ld1 { v31.h }[6], [x20]\n"
+ "ld1 { v16.h }[6], [x19]\n"
"b 12f\n"
"9:" // Output channel oddments: Load bias: Bit 2: Bit 1: Unset
"tbz %x[n_output_channels], #0, 12f\n"
- "ld1 { v31.h }[4], [x20]\n"
+ "ld1 { v16.h }[4], [x19]\n"
"b 12f\n"
"10:" // Output channel oddments: Load bias: Bit 2: Unset
"tbz %x[n_output_channels], #1, 11f\n"
- "ld1 { v31.s }[0], [x20], #0x4\n"
+ "ld1 { v16.s }[0], [x19], #0x4\n"
"tbz %x[n_output_channels], #0, 12f\n"
- "ld1 { v31.h }[2], [x20]\n"
+ "ld1 { v16.h }[2], [x19]\n"
"b 12f\n"
"11:" // Output channel oddments: Load bias: Bit 2: Unset: Bit 1: Unset
- "ld1 { v31.h }[0], [x20]\n"
+ "tbz %x[n_output_channels], #0, 12f\n"
+ "ld1 { v16.h }[0], [x19]\n"
"12:" // Output channel oddments: Load bias: Bit 2: End
+
"13:" // Output channel oddments: Load bias: Done
- "ldr q5, [%x[weights], #0x0]\n"
- "mov x20, %x[inptrs]\n"
- "ldp x24, x9, [x20], #0x10\n"
- "lsr x21, %x[kernel_points], #0x1\n"
- "ldr q4, [x24, #0x0]\n"
- "ldr q3, [x9, #0x0]\n"
- "mov v16.16b, v31.16b\n"
- "mov v17.16b, v31.16b\n"
- "mov v18.16b, v31.16b\n"
- "mov v19.16b, v31.16b\n"
+ "mov v5.16b, v16.16b\n"
+ "ldr q4, [%x[weights], #0x0]\n"
+ "mov x19, %x[inptrs]\n"
+ "mov v31.16b, v16.16b\n"
+ "ldp x25, x28, [x19], #0x10\n"
+ "lsr x20, %x[kernel_points], #0x1\n"
+ "mov v30.16b, v16.16b\n"
+ "ldr q3, [x25, #0x0]\n"
+ "mov v29.16b, v16.16b\n"
"add %x[weights], %x[weights], #0x10\n"
- "mov v20.16b, v31.16b\n"
- "mov v21.16b, v31.16b\n"
- "mov v22.16b, v31.16b\n"
- "mov v23.16b, v31.16b\n"
- "mov v24.16b, v31.16b\n"
- "mov v25.16b, v31.16b\n"
- "mov v26.16b, v31.16b\n"
- "mov v27.16b, v31.16b\n"
- "mov v28.16b, v31.16b\n"
- "mov v29.16b, v31.16b\n"
- "mov v30.16b, v31.16b\n"
- "mov v31.16b, v31.16b\n"
- "cbz x21, 17f\n"
- "ldr q2, [%x[weights], #0x0]\n"
- "ldp x24, x9, [x20], #0x10\n"
- "subs x21, x21, #0x1\n"
+ "mov v28.16b, v16.16b\n"
+ "ldr q2, [x28, #0x0]\n"
+ "mov v27.16b, v16.16b\n"
+ "mov v26.16b, v16.16b\n"
+ "mov v25.16b, v16.16b\n"
+ "mov v24.16b, v16.16b\n"
+ "mov v23.16b, v16.16b\n"
+ "mov v22.16b, v16.16b\n"
+ "mov v21.16b, v16.16b\n"
+ "mov v20.16b, v16.16b\n"
+ "mov v19.16b, v16.16b\n"
+ "mov v18.16b, v16.16b\n"
+ "mov v17.16b, v16.16b\n"
+ "cbz x20, 17f\n"
+ "ldp x25, x28, [x19], #0x10\n"
+ "ldr q16, [%x[weights], #0x0]\n"
+ "subs x20, x20, #0x1\n"
"add %x[weights], %x[weights], #0x10\n"
- "ldr q1, [x24, #0x0]\n"
- "ldr q0, [x9, #0x0]\n"
+ "ldr q1, [x25, #0x0]\n"
+ "ldr q0, [x28, #0x0]\n"
"beq 15f\n"
"14:" // Output channel oddments: Kernel loop
- "ldp x24, x9, [x20], #0x10\n"
- "fmla v16.8h, v5.8h, v4.h[0]\n"
- "fmla v17.8h, v5.8h, v4.h[1]\n"
- "subs x21, x21, #0x1\n"
- "fmla v18.8h, v5.8h, v4.h[2]\n"
- "fmla v19.8h, v5.8h, v4.h[3]\n"
- "fmla v20.8h, v5.8h, v4.h[4]\n"
- "fmla v21.8h, v5.8h, v4.h[5]\n"
- "fmla v22.8h, v5.8h, v4.h[6]\n"
- "fmla v23.8h, v5.8h, v4.h[7]\n"
- "ldr q4, [x24, #0x0]\n"
- "fmla v24.8h, v5.8h, v3.h[0]\n"
- "fmla v25.8h, v5.8h, v3.h[1]\n"
- "fmla v26.8h, v5.8h, v3.h[2]\n"
- "fmla v27.8h, v5.8h, v3.h[3]\n"
- "fmla v28.8h, v5.8h, v3.h[4]\n"
- "fmla v29.8h, v5.8h, v3.h[5]\n"
- "fmla v30.8h, v5.8h, v3.h[6]\n"
- "fmla v31.8h, v5.8h, v3.h[7]\n"
- "ldr q3, [x9, #0x0]\n"
- "ldr q5, [%x[weights], #0x0]\n"
- "ldp x24, x9, [x20], #0x10\n"
- "fmla v16.8h, v2.8h, v1.h[0]\n"
- "fmla v17.8h, v2.8h, v1.h[1]\n"
- "fmla v18.8h, v2.8h, v1.h[2]\n"
- "fmla v19.8h, v2.8h, v1.h[3]\n"
- "fmla v20.8h, v2.8h, v1.h[4]\n"
- "fmla v21.8h, v2.8h, v1.h[5]\n"
- "fmla v22.8h, v2.8h, v1.h[6]\n"
- "fmla v23.8h, v2.8h, v1.h[7]\n"
- "ldr q1, [x24, #0x0]\n"
- "fmla v24.8h, v2.8h, v0.h[0]\n"
- "fmla v25.8h, v2.8h, v0.h[1]\n"
- "fmla v26.8h, v2.8h, v0.h[2]\n"
- "fmla v27.8h, v2.8h, v0.h[3]\n"
- "fmla v28.8h, v2.8h, v0.h[4]\n"
- "fmla v29.8h, v2.8h, v0.h[5]\n"
- "fmla v30.8h, v2.8h, v0.h[6]\n"
- "fmla v31.8h, v2.8h, v0.h[7]\n"
- "ldr q0, [x9, #0x0]\n"
- "ldr q2, [%x[weights], #0x10]\n"
+ "fmla v5.8h, v4.8h, v3.h[0]\n"
+ "ldp x25, x28, [x19], #0x10\n"
+ "subs x20, x20, #0x1\n"
+ "fmla v31.8h, v4.8h, v3.h[1]\n"
+ "fmla v30.8h, v4.8h, v3.h[2]\n"
+ "fmla v29.8h, v4.8h, v3.h[3]\n"
+ "fmla v28.8h, v4.8h, v3.h[4]\n"
+ "fmla v27.8h, v4.8h, v3.h[5]\n"
+ "fmla v26.8h, v4.8h, v3.h[6]\n"
+ "fmla v25.8h, v4.8h, v3.h[7]\n"
+ "ldr q3, [x25, #0x0]\n"
+ "fmla v24.8h, v4.8h, v2.h[0]\n"
+ "fmla v23.8h, v4.8h, v2.h[1]\n"
+ "fmla v22.8h, v4.8h, v2.h[2]\n"
+ "fmla v21.8h, v4.8h, v2.h[3]\n"
+ "fmla v20.8h, v4.8h, v2.h[4]\n"
+ "fmla v19.8h, v4.8h, v2.h[5]\n"
+ "fmla v18.8h, v4.8h, v2.h[6]\n"
+ "fmla v17.8h, v4.8h, v2.h[7]\n"
+ "ldr q2, [x28, #0x0]\n"
+ "fmla v5.8h, v16.8h, v1.h[0]\n"
+ "ldr q4, [%x[weights], #0x0]\n"
+ "fmla v31.8h, v16.8h, v1.h[1]\n"
+ "ldp x25, x28, [x19], #0x10\n"
+ "fmla v30.8h, v16.8h, v1.h[2]\n"
+ "fmla v29.8h, v16.8h, v1.h[3]\n"
+ "fmla v28.8h, v16.8h, v1.h[4]\n"
+ "fmla v27.8h, v16.8h, v1.h[5]\n"
+ "fmla v26.8h, v16.8h, v1.h[6]\n"
+ "fmla v25.8h, v16.8h, v1.h[7]\n"
+ "ldr q1, [x25, #0x0]\n"
+ "fmla v24.8h, v16.8h, v0.h[0]\n"
+ "fmla v23.8h, v16.8h, v0.h[1]\n"
+ "fmla v22.8h, v16.8h, v0.h[2]\n"
+ "fmla v21.8h, v16.8h, v0.h[3]\n"
+ "fmla v20.8h, v16.8h, v0.h[4]\n"
+ "fmla v19.8h, v16.8h, v0.h[5]\n"
+ "fmla v18.8h, v16.8h, v0.h[6]\n"
+ "fmla v17.8h, v16.8h, v0.h[7]\n"
+ "ldr q0, [x28, #0x0]\n"
+ "ldr q16, [%x[weights], #0x10]\n"
"add %x[weights], %x[weights], #0x20\n"
"bgt 14b\n"
"15:" // Output channel oddments: Kernel loop tail
"tbnz %x[kernel_points], #0, 16f\n"
- "fmla v16.8h, v5.8h, v4.h[0]\n"
- "fmla v17.8h, v5.8h, v4.h[1]\n"
- "fmla v18.8h, v5.8h, v4.h[2]\n"
- "fmla v19.8h, v5.8h, v4.h[3]\n"
- "fmla v20.8h, v5.8h, v4.h[4]\n"
- "fmla v21.8h, v5.8h, v4.h[5]\n"
- "fmla v22.8h, v5.8h, v4.h[6]\n"
- "fmla v23.8h, v5.8h, v4.h[7]\n"
- "fmla v24.8h, v5.8h, v3.h[0]\n"
- "fmla v25.8h, v5.8h, v3.h[1]\n"
- "fmla v26.8h, v5.8h, v3.h[2]\n"
- "fmla v27.8h, v5.8h, v3.h[3]\n"
- "fmla v28.8h, v5.8h, v3.h[4]\n"
- "fmla v29.8h, v5.8h, v3.h[5]\n"
- "fmla v30.8h, v5.8h, v3.h[6]\n"
- "fmla v31.8h, v5.8h, v3.h[7]\n"
- "fmla v16.8h, v2.8h, v1.h[0]\n"
- "fmla v17.8h, v2.8h, v1.h[1]\n"
- "fmla v18.8h, v2.8h, v1.h[2]\n"
- "fmla v19.8h, v2.8h, v1.h[3]\n"
- "fmla v20.8h, v2.8h, v1.h[4]\n"
- "fmla v21.8h, v2.8h, v1.h[5]\n"
- "fmla v22.8h, v2.8h, v1.h[6]\n"
- "fmla v23.8h, v2.8h, v1.h[7]\n"
- "fmla v24.8h, v2.8h, v0.h[0]\n"
- "fmla v25.8h, v2.8h, v0.h[1]\n"
- "fmla v26.8h, v2.8h, v0.h[2]\n"
- "fmla v27.8h, v2.8h, v0.h[3]\n"
- "fmla v28.8h, v2.8h, v0.h[4]\n"
- "fmla v29.8h, v2.8h, v0.h[5]\n"
- "fmla v30.8h, v2.8h, v0.h[6]\n"
- "fmla v31.8h, v2.8h, v0.h[7]\n"
+ "fmla v5.8h, v4.8h, v3.h[0]\n"
+ "fmla v31.8h, v4.8h, v3.h[1]\n"
+ "fmla v30.8h, v4.8h, v3.h[2]\n"
+ "fmla v29.8h, v4.8h, v3.h[3]\n"
+ "fmla v28.8h, v4.8h, v3.h[4]\n"
+ "fmla v27.8h, v4.8h, v3.h[5]\n"
+ "fmla v26.8h, v4.8h, v3.h[6]\n"
+ "fmla v25.8h, v4.8h, v3.h[7]\n"
+ "fmla v24.8h, v4.8h, v2.h[0]\n"
+ "fmla v23.8h, v4.8h, v2.h[1]\n"
+ "fmla v22.8h, v4.8h, v2.h[2]\n"
+ "fmla v21.8h, v4.8h, v2.h[3]\n"
+ "fmla v20.8h, v4.8h, v2.h[4]\n"
+ "fmla v19.8h, v4.8h, v2.h[5]\n"
+ "fmla v18.8h, v4.8h, v2.h[6]\n"
+ "fmla v17.8h, v4.8h, v2.h[7]\n"
+ "fmla v5.8h, v16.8h, v1.h[0]\n"
+ "fmla v31.8h, v16.8h, v1.h[1]\n"
+ "fmla v30.8h, v16.8h, v1.h[2]\n"
+ "fmla v29.8h, v16.8h, v1.h[3]\n"
+ "fmla v28.8h, v16.8h, v1.h[4]\n"
+ "fmla v27.8h, v16.8h, v1.h[5]\n"
+ "fmla v26.8h, v16.8h, v1.h[6]\n"
+ "fmla v25.8h, v16.8h, v1.h[7]\n"
+ "fmla v24.8h, v16.8h, v0.h[0]\n"
+ "fmla v23.8h, v16.8h, v0.h[1]\n"
+ "fmla v22.8h, v16.8h, v0.h[2]\n"
+ "fmla v21.8h, v16.8h, v0.h[3]\n"
+ "fmla v20.8h, v16.8h, v0.h[4]\n"
+ "fmla v19.8h, v16.8h, v0.h[5]\n"
+ "fmla v18.8h, v16.8h, v0.h[6]\n"
+ "fmla v17.8h, v16.8h, v0.h[7]\n"
"b 18f\n"
"16:" // Output channel oddments: Odd tail
- "fmla v16.8h, v5.8h, v4.h[0]\n"
- "fmla v17.8h, v5.8h, v4.h[1]\n"
- "ldp x24, x9, [x20], #0x10\n"
- "fmla v18.8h, v5.8h, v4.h[2]\n"
- "fmla v19.8h, v5.8h, v4.h[3]\n"
- "fmla v20.8h, v5.8h, v4.h[4]\n"
- "fmla v21.8h, v5.8h, v4.h[5]\n"
- "fmla v22.8h, v5.8h, v4.h[6]\n"
- "fmla v23.8h, v5.8h, v4.h[7]\n"
- "ldr q4, [x24, #0x0]\n"
- "fmla v24.8h, v5.8h, v3.h[0]\n"
- "fmla v25.8h, v5.8h, v3.h[1]\n"
- "fmla v26.8h, v5.8h, v3.h[2]\n"
- "fmla v27.8h, v5.8h, v3.h[3]\n"
- "fmla v28.8h, v5.8h, v3.h[4]\n"
- "fmla v29.8h, v5.8h, v3.h[5]\n"
- "fmla v30.8h, v5.8h, v3.h[6]\n"
- "fmla v31.8h, v5.8h, v3.h[7]\n"
- "ldr q3, [x9, #0x0]\n"
- "ldr q5, [%x[weights], #0x0]\n"
- "fmla v16.8h, v2.8h, v1.h[0]\n"
- "fmla v17.8h, v2.8h, v1.h[1]\n"
+ "fmla v5.8h, v4.8h, v3.h[0]\n"
+ "ldp x25, x28, [x19], #0x10\n"
+ "fmla v31.8h, v4.8h, v3.h[1]\n"
+ "fmla v30.8h, v4.8h, v3.h[2]\n"
+ "fmla v29.8h, v4.8h, v3.h[3]\n"
+ "fmla v28.8h, v4.8h, v3.h[4]\n"
+ "fmla v27.8h, v4.8h, v3.h[5]\n"
+ "fmla v26.8h, v4.8h, v3.h[6]\n"
+ "fmla v25.8h, v4.8h, v3.h[7]\n"
+ "ldr q3, [x25, #0x0]\n"
+ "fmla v24.8h, v4.8h, v2.h[0]\n"
+ "fmla v23.8h, v4.8h, v2.h[1]\n"
+ "fmla v22.8h, v4.8h, v2.h[2]\n"
+ "fmla v21.8h, v4.8h, v2.h[3]\n"
+ "fmla v20.8h, v4.8h, v2.h[4]\n"
+ "fmla v19.8h, v4.8h, v2.h[5]\n"
+ "fmla v18.8h, v4.8h, v2.h[6]\n"
+ "fmla v17.8h, v4.8h, v2.h[7]\n"
+ "ldr q2, [x28, #0x0]\n"
+ "fmla v5.8h, v16.8h, v1.h[0]\n"
+ "ldr q4, [%x[weights], #0x0]\n"
"add %x[weights], %x[weights], #0x10\n"
- "fmla v18.8h, v2.8h, v1.h[2]\n"
- "fmla v19.8h, v2.8h, v1.h[3]\n"
- "fmla v20.8h, v2.8h, v1.h[4]\n"
- "fmla v21.8h, v2.8h, v1.h[5]\n"
- "fmla v22.8h, v2.8h, v1.h[6]\n"
- "fmla v23.8h, v2.8h, v1.h[7]\n"
- "fmla v24.8h, v2.8h, v0.h[0]\n"
- "fmla v25.8h, v2.8h, v0.h[1]\n"
- "fmla v26.8h, v2.8h, v0.h[2]\n"
- "fmla v27.8h, v2.8h, v0.h[3]\n"
- "fmla v28.8h, v2.8h, v0.h[4]\n"
- "fmla v29.8h, v2.8h, v0.h[5]\n"
- "fmla v30.8h, v2.8h, v0.h[6]\n"
- "fmla v31.8h, v2.8h, v0.h[7]\n"
- "fmla v16.8h, v5.8h, v4.h[0]\n"
- "fmla v17.8h, v5.8h, v4.h[1]\n"
- "fmla v18.8h, v5.8h, v4.h[2]\n"
- "fmla v19.8h, v5.8h, v4.h[3]\n"
- "fmla v20.8h, v5.8h, v4.h[4]\n"
- "fmla v21.8h, v5.8h, v4.h[5]\n"
- "fmla v22.8h, v5.8h, v4.h[6]\n"
- "fmla v23.8h, v5.8h, v4.h[7]\n"
- "fmla v24.8h, v5.8h, v3.h[0]\n"
- "fmla v25.8h, v5.8h, v3.h[1]\n"
- "fmla v26.8h, v5.8h, v3.h[2]\n"
- "fmla v27.8h, v5.8h, v3.h[3]\n"
- "fmla v28.8h, v5.8h, v3.h[4]\n"
- "fmla v29.8h, v5.8h, v3.h[5]\n"
- "fmla v30.8h, v5.8h, v3.h[6]\n"
- "fmla v31.8h, v5.8h, v3.h[7]\n"
+ "fmla v31.8h, v16.8h, v1.h[1]\n"
+ "fmla v30.8h, v16.8h, v1.h[2]\n"
+ "fmla v29.8h, v16.8h, v1.h[3]\n"
+ "fmla v28.8h, v16.8h, v1.h[4]\n"
+ "fmla v27.8h, v16.8h, v1.h[5]\n"
+ "fmla v26.8h, v16.8h, v1.h[6]\n"
+ "fmla v25.8h, v16.8h, v1.h[7]\n"
+ "fmla v24.8h, v16.8h, v0.h[0]\n"
+ "fmla v23.8h, v16.8h, v0.h[1]\n"
+ "fmla v22.8h, v16.8h, v0.h[2]\n"
+ "fmla v21.8h, v16.8h, v0.h[3]\n"
+ "fmla v20.8h, v16.8h, v0.h[4]\n"
+ "fmla v19.8h, v16.8h, v0.h[5]\n"
+ "fmla v18.8h, v16.8h, v0.h[6]\n"
+ "fmla v17.8h, v16.8h, v0.h[7]\n"
+ "fmla v5.8h, v4.8h, v3.h[0]\n"
+ "fmla v31.8h, v4.8h, v3.h[1]\n"
+ "fmla v30.8h, v4.8h, v3.h[2]\n"
+ "fmla v29.8h, v4.8h, v3.h[3]\n"
+ "fmla v28.8h, v4.8h, v3.h[4]\n"
+ "fmla v27.8h, v4.8h, v3.h[5]\n"
+ "fmla v26.8h, v4.8h, v3.h[6]\n"
+ "fmla v25.8h, v4.8h, v3.h[7]\n"
+ "fmla v24.8h, v4.8h, v2.h[0]\n"
+ "fmla v23.8h, v4.8h, v2.h[1]\n"
+ "fmla v22.8h, v4.8h, v2.h[2]\n"
+ "fmla v21.8h, v4.8h, v2.h[3]\n"
+ "fmla v20.8h, v4.8h, v2.h[4]\n"
+ "fmla v19.8h, v4.8h, v2.h[5]\n"
+ "fmla v18.8h, v4.8h, v2.h[6]\n"
+ "fmla v17.8h, v4.8h, v2.h[7]\n"
"b 18f\n"
"17:" // Output channel oddments: Single kernel point
- "fmla v16.8h, v5.8h, v4.h[0]\n"
- "fmla v17.8h, v5.8h, v4.h[1]\n"
- "fmla v18.8h, v5.8h, v4.h[2]\n"
- "fmla v19.8h, v5.8h, v4.h[3]\n"
- "fmla v20.8h, v5.8h, v4.h[4]\n"
- "fmla v21.8h, v5.8h, v4.h[5]\n"
- "fmla v22.8h, v5.8h, v4.h[6]\n"
- "fmla v23.8h, v5.8h, v4.h[7]\n"
- "fmla v24.8h, v5.8h, v3.h[0]\n"
- "fmla v25.8h, v5.8h, v3.h[1]\n"
- "fmla v26.8h, v5.8h, v3.h[2]\n"
- "fmla v27.8h, v5.8h, v3.h[3]\n"
- "fmla v28.8h, v5.8h, v3.h[4]\n"
- "fmla v29.8h, v5.8h, v3.h[5]\n"
- "fmla v30.8h, v5.8h, v3.h[6]\n"
- "fmla v31.8h, v5.8h, v3.h[7]\n"
+ "fmla v5.8h, v4.8h, v3.h[0]\n"
+ "fmla v31.8h, v4.8h, v3.h[1]\n"
+ "fmla v30.8h, v4.8h, v3.h[2]\n"
+ "fmla v29.8h, v4.8h, v3.h[3]\n"
+ "fmla v28.8h, v4.8h, v3.h[4]\n"
+ "fmla v27.8h, v4.8h, v3.h[5]\n"
+ "fmla v26.8h, v4.8h, v3.h[6]\n"
+ "fmla v25.8h, v4.8h, v3.h[7]\n"
+ "fmla v24.8h, v4.8h, v2.h[0]\n"
+ "fmla v23.8h, v4.8h, v2.h[1]\n"
+ "fmla v22.8h, v4.8h, v2.h[2]\n"
+ "fmla v21.8h, v4.8h, v2.h[3]\n"
+ "fmla v20.8h, v4.8h, v2.h[4]\n"
+ "fmla v19.8h, v4.8h, v2.h[5]\n"
+ "fmla v18.8h, v4.8h, v2.h[6]\n"
+ "fmla v17.8h, v4.8h, v2.h[7]\n"
"18:" // Output channel oddments: Done
- "fmin v16.8h, v16.8h, v6.8h\n"
- "fmin v17.8h, v17.8h, v6.8h\n"
- "fmin v18.8h, v18.8h, v6.8h\n"
- "fmin v19.8h, v19.8h, v6.8h\n"
- "fmin v20.8h, v20.8h, v6.8h\n"
- "fmin v21.8h, v21.8h, v6.8h\n"
- "fmin v22.8h, v22.8h, v6.8h\n"
- "fmin v23.8h, v23.8h, v6.8h\n"
- "fmin v24.8h, v24.8h, v6.8h\n"
- "fmin v25.8h, v25.8h, v6.8h\n"
- "fmin v26.8h, v26.8h, v6.8h\n"
- "fmin v27.8h, v27.8h, v6.8h\n"
- "fmin v28.8h, v28.8h, v6.8h\n"
- "fmin v29.8h, v29.8h, v6.8h\n"
- "fmin v30.8h, v30.8h, v6.8h\n"
+ "fmin v5.8h, v5.8h, v6.8h\n"
"fmin v31.8h, v31.8h, v6.8h\n"
- "fmax v16.8h, v16.8h, v7.8h\n"
- "fmax v17.8h, v17.8h, v7.8h\n"
- "fmax v18.8h, v18.8h, v7.8h\n"
- "fmax v19.8h, v19.8h, v7.8h\n"
- "fmax v20.8h, v20.8h, v7.8h\n"
- "fmax v21.8h, v21.8h, v7.8h\n"
- "fmax v22.8h, v22.8h, v7.8h\n"
- "fmax v23.8h, v23.8h, v7.8h\n"
- "fmax v24.8h, v24.8h, v7.8h\n"
- "fmax v25.8h, v25.8h, v7.8h\n"
- "fmax v26.8h, v26.8h, v7.8h\n"
- "fmax v27.8h, v27.8h, v7.8h\n"
- "fmax v28.8h, v28.8h, v7.8h\n"
- "fmax v29.8h, v29.8h, v7.8h\n"
- "fmax v30.8h, v30.8h, v7.8h\n"
+ "fmin v30.8h, v30.8h, v6.8h\n"
+ "fmin v29.8h, v29.8h, v6.8h\n"
+ "fmax v5.8h, v5.8h, v7.8h\n"
"fmax v31.8h, v31.8h, v7.8h\n"
+ "fmax v30.8h, v30.8h, v7.8h\n"
+ "fmax v29.8h, v29.8h, v7.8h\n"
+ "fmin v28.8h, v28.8h, v6.8h\n"
+ "fmin v27.8h, v27.8h, v6.8h\n"
+ "fmin v26.8h, v26.8h, v6.8h\n"
+ "fmax v28.8h, v28.8h, v7.8h\n"
+ "fmax v27.8h, v27.8h, v7.8h\n"
+ "fmax v26.8h, v26.8h, v7.8h\n"
+ "fmin v25.8h, v25.8h, v6.8h\n"
+ "fmin v24.8h, v24.8h, v6.8h\n"
+ "fmin v23.8h, v23.8h, v6.8h\n"
+ "fmax v25.8h, v25.8h, v7.8h\n"
+ "fmax v24.8h, v24.8h, v7.8h\n"
+ "fmax v23.8h, v23.8h, v7.8h\n"
+ "fmin v22.8h, v22.8h, v6.8h\n"
+ "fmin v21.8h, v21.8h, v6.8h\n"
+ "fmin v20.8h, v20.8h, v6.8h\n"
+ "fmax v22.8h, v22.8h, v7.8h\n"
+ "fmax v21.8h, v21.8h, v7.8h\n"
+ "fmax v20.8h, v20.8h, v7.8h\n"
+ "fmin v19.8h, v19.8h, v6.8h\n"
+ "fmin v18.8h, v18.8h, v6.8h\n"
+ "fmin v17.8h, v17.8h, v6.8h\n"
+ "fmax v19.8h, v19.8h, v7.8h\n"
+ "fmax v18.8h, v18.8h, v7.8h\n"
+ "fmax v17.8h, v17.8h, v7.8h\n"
"tbz %x[n_output_channels], #2, 20f\n"
- "ldr x20, [%x[outptrs], #0x0]\n"
- "ldr x21, [%x[outptrs], #0x8]\n"
+ "ldr x19, [%x[outptrs], #0x0]\n"
+ "ldr x20, [%x[outptrs], #0x8]\n"
+ "add x19, x19, x10, LSL #1\n"
+ "ldr x21, [%x[outptrs], #0x10]\n"
+ "ldr x22, [%x[outptrs], #0x18]\n"
"add x20, x20, x10, LSL #1\n"
+ "st1 { v5.d }[0], [x19]\n"
"add x21, x21, x10, LSL #1\n"
- "ldr x22, [%x[outptrs], #0x10]\n"
- "ldr x23, [%x[outptrs], #0x18]\n"
+ "st1 { v31.d }[0], [x20]\n"
+ "ldr x23, [%x[outptrs], #0x20]\n"
"add x22, x22, x10, LSL #1\n"
+ "st1 { v30.d }[0], [x21]\n"
"add x23, x23, x10, LSL #1\n"
- "ldr x24, [%x[outptrs], #0x20]\n"
- "ldr x25, [%x[outptrs], #0x28]\n"
+ "st1 { v29.d }[0], [x22]\n"
+ "ldr x24, [%x[outptrs], #0x28]\n"
"add x24, x24, x10, LSL #1\n"
+ "st1 { v28.d }[0], [x23]\n"
+ "ldr x25, [%x[outptrs], #0x30]\n"
"add x25, x25, x10, LSL #1\n"
- "ldr x26, [%x[outptrs], #0x30]\n"
- "ldr x27, [%x[outptrs], #0x38]\n"
+ "st1 { v27.d }[0], [x24]\n"
+ "ldr x26, [%x[outptrs], #0x38]\n"
"add x26, x26, x10, LSL #1\n"
- "add x27, x27, x10, LSL #1\n"
- "st1 { v16.d }[0], [x20]\n"
- "ldr x20, [%x[outptrs], #0x40]\n"
+ "st1 { v26.d }[0], [x25]\n"
+ "ldr x19, [%x[outptrs], #0x40]\n"
+ "add x19, x19, x10, LSL #1\n"
+ "st1 { v25.d }[0], [x26]\n"
+ "ldr x20, [%x[outptrs], #0x48]\n"
"add x20, x20, x10, LSL #1\n"
- "st1 { v17.d }[0], [x21]\n"
- "ldr x21, [%x[outptrs], #0x48]\n"
+ "st1 { v24.d }[0], [x19]\n"
+ "ldr x21, [%x[outptrs], #0x50]\n"
"add x21, x21, x10, LSL #1\n"
- "st1 { v18.d }[0], [x22]\n"
- "ldr x22, [%x[outptrs], #0x50]\n"
+ "st1 { v23.d }[0], [x20]\n"
+ "ldr x22, [%x[outptrs], #0x58]\n"
"add x22, x22, x10, LSL #1\n"
- "st1 { v19.d }[0], [x23]\n"
- "ldr x23, [%x[outptrs], #0x58]\n"
+ "st1 { v22.d }[0], [x21]\n"
+ "ldr x23, [%x[outptrs], #0x60]\n"
"add x23, x23, x10, LSL #1\n"
- "st1 { v20.d }[0], [x24]\n"
- "ldr x24, [%x[outptrs], #0x60]\n"
+ "st1 { v21.d }[0], [x22]\n"
+ "ldr x24, [%x[outptrs], #0x68]\n"
"add x24, x24, x10, LSL #1\n"
- "st1 { v21.d }[0], [x25]\n"
- "ldr x25, [%x[outptrs], #0x68]\n"
+ "st1 { v20.d }[0], [x23]\n"
+ "ldr x25, [%x[outptrs], #0x70]\n"
"add x25, x25, x10, LSL #1\n"
- "st1 { v22.d }[0], [x26]\n"
- "ldr x26, [%x[outptrs], #0x70]\n"
+ "st1 { v19.d }[0], [x24]\n"
+ "ldr x26, [%x[outptrs], #0x78]\n"
"add x26, x26, x10, LSL #1\n"
- "st1 { v23.d }[0], [x27]\n"
- "ldr x27, [%x[outptrs], #0x78]\n"
- "add x27, x27, x10, LSL #1\n"
+ "st1 { v18.d }[0], [x25]\n"
"add x10, x10, #0x4\n"
- "st1 { v24.d }[0], [x20]\n"
- "st1 { v25.d }[0], [x21]\n"
- "st1 { v26.d }[0], [x22]\n"
- "st1 { v27.d }[0], [x23]\n"
- "st1 { v28.d }[0], [x24]\n"
- "st1 { v29.d }[0], [x25]\n"
- "st1 { v30.d }[0], [x26]\n"
- "st1 { v31.d }[0], [x27]\n"
+ "st1 { v17.d }[0], [x26]\n"
"tbz %x[n_output_channels], #1, 19f\n"
- "ldr x20, [%x[outptrs], #0x0]\n"
- "ldr x21, [%x[outptrs], #0x8]\n"
+ "ldr x19, [%x[outptrs], #0x0]\n"
+ "ldr x20, [%x[outptrs], #0x8]\n"
+ "add x19, x19, x10, LSL #1\n"
+ "ldr x21, [%x[outptrs], #0x10]\n"
+ "ldr x22, [%x[outptrs], #0x18]\n"
"add x20, x20, x10, LSL #1\n"
+ "st1 { v5.s }[2], [x19]\n"
"add x21, x21, x10, LSL #1\n"
- "ldr x22, [%x[outptrs], #0x10]\n"
- "ldr x23, [%x[outptrs], #0x18]\n"
+ "st1 { v31.s }[2], [x20]\n"
+ "ldr x23, [%x[outptrs], #0x20]\n"
"add x22, x22, x10, LSL #1\n"
+ "st1 { v30.s }[2], [x21]\n"
"add x23, x23, x10, LSL #1\n"
- "ldr x24, [%x[outptrs], #0x20]\n"
- "ldr x25, [%x[outptrs], #0x28]\n"
+ "st1 { v29.s }[2], [x22]\n"
+ "ldr x24, [%x[outptrs], #0x28]\n"
"add x24, x24, x10, LSL #1\n"
+ "st1 { v28.s }[2], [x23]\n"
+ "ldr x25, [%x[outptrs], #0x30]\n"
"add x25, x25, x10, LSL #1\n"
- "ldr x26, [%x[outptrs], #0x30]\n"
- "ldr x27, [%x[outptrs], #0x38]\n"
+ "st1 { v27.s }[2], [x24]\n"
+ "ldr x26, [%x[outptrs], #0x38]\n"
"add x26, x26, x10, LSL #1\n"
- "add x27, x27, x10, LSL #1\n"
- "st1 { v16.s }[2], [x20]\n"
- "ldr x20, [%x[outptrs], #0x40]\n"
+ "st1 { v26.s }[2], [x25]\n"
+ "ldr x19, [%x[outptrs], #0x40]\n"
+ "add x19, x19, x10, LSL #1\n"
+ "st1 { v25.s }[2], [x26]\n"
+ "ldr x20, [%x[outptrs], #0x48]\n"
"add x20, x20, x10, LSL #1\n"
- "st1 { v17.s }[2], [x21]\n"
- "ldr x21, [%x[outptrs], #0x48]\n"
+ "st1 { v24.s }[2], [x19]\n"
+ "ldr x21, [%x[outptrs], #0x50]\n"
"add x21, x21, x10, LSL #1\n"
- "st1 { v18.s }[2], [x22]\n"
- "ldr x22, [%x[outptrs], #0x50]\n"
+ "st1 { v23.s }[2], [x20]\n"
+ "ldr x22, [%x[outptrs], #0x58]\n"
"add x22, x22, x10, LSL #1\n"
- "st1 { v19.s }[2], [x23]\n"
- "ldr x23, [%x[outptrs], #0x58]\n"
+ "st1 { v22.s }[2], [x21]\n"
+ "ldr x23, [%x[outptrs], #0x60]\n"
"add x23, x23, x10, LSL #1\n"
- "st1 { v20.s }[2], [x24]\n"
- "ldr x24, [%x[outptrs], #0x60]\n"
+ "st1 { v21.s }[2], [x22]\n"
+ "ldr x24, [%x[outptrs], #0x68]\n"
"add x24, x24, x10, LSL #1\n"
- "st1 { v21.s }[2], [x25]\n"
- "ldr x25, [%x[outptrs], #0x68]\n"
+ "st1 { v20.s }[2], [x23]\n"
+ "ldr x25, [%x[outptrs], #0x70]\n"
"add x25, x25, x10, LSL #1\n"
- "st1 { v22.s }[2], [x26]\n"
- "ldr x26, [%x[outptrs], #0x70]\n"
+ "st1 { v19.s }[2], [x24]\n"
+ "ldr x26, [%x[outptrs], #0x78]\n"
"add x26, x26, x10, LSL #1\n"
- "st1 { v23.s }[2], [x27]\n"
- "ldr x27, [%x[outptrs], #0x78]\n"
- "add x27, x27, x10, LSL #1\n"
+ "st1 { v18.s }[2], [x25]\n"
"add x10, x10, #0x2\n"
- "st1 { v24.s }[2], [x20]\n"
- "st1 { v25.s }[2], [x21]\n"
- "st1 { v26.s }[2], [x22]\n"
- "st1 { v27.s }[2], [x23]\n"
- "st1 { v28.s }[2], [x24]\n"
- "st1 { v29.s }[2], [x25]\n"
- "st1 { v30.s }[2], [x26]\n"
- "st1 { v31.s }[2], [x27]\n"
+ "st1 { v17.s }[2], [x26]\n"
"tbz %x[n_output_channels], #0, 22f\n"
- "ldr x20, [%x[outptrs], #0x0]\n"
- "ldr x21, [%x[outptrs], #0x8]\n"
+ "ldr x19, [%x[outptrs], #0x0]\n"
+ "ldr x20, [%x[outptrs], #0x8]\n"
+ "add x19, x19, x10, LSL #1\n"
+ "ldr x21, [%x[outptrs], #0x10]\n"
+ "ldr x22, [%x[outptrs], #0x18]\n"
"add x20, x20, x10, LSL #1\n"
+ "st1 { v5.h }[6], [x19]\n"
"add x21, x21, x10, LSL #1\n"
- "ldr x22, [%x[outptrs], #0x10]\n"
- "ldr x23, [%x[outptrs], #0x18]\n"
+ "st1 { v31.h }[6], [x20]\n"
+ "ldr x23, [%x[outptrs], #0x20]\n"
"add x22, x22, x10, LSL #1\n"
+ "st1 { v30.h }[6], [x21]\n"
"add x23, x23, x10, LSL #1\n"
- "ldr x24, [%x[outptrs], #0x20]\n"
- "ldr x25, [%x[outptrs], #0x28]\n"
+ "st1 { v29.h }[6], [x22]\n"
+ "ldr x24, [%x[outptrs], #0x28]\n"
"add x24, x24, x10, LSL #1\n"
+ "st1 { v28.h }[6], [x23]\n"
+ "ldr x25, [%x[outptrs], #0x30]\n"
"add x25, x25, x10, LSL #1\n"
- "ldr x26, [%x[outptrs], #0x30]\n"
- "ldr x27, [%x[outptrs], #0x38]\n"
+ "st1 { v27.h }[6], [x24]\n"
+ "ldr x26, [%x[outptrs], #0x38]\n"
"add x26, x26, x10, LSL #1\n"
- "add x27, x27, x10, LSL #1\n"
- "st1 { v16.h }[6], [x20]\n"
- "ldr x20, [%x[outptrs], #0x40]\n"
+ "st1 { v26.h }[6], [x25]\n"
+ "ldr x19, [%x[outptrs], #0x40]\n"
+ "add x19, x19, x10, LSL #1\n"
+ "st1 { v25.h }[6], [x26]\n"
+ "ldr x20, [%x[outptrs], #0x48]\n"
"add x20, x20, x10, LSL #1\n"
- "st1 { v17.h }[6], [x21]\n"
- "ldr x21, [%x[outptrs], #0x48]\n"
+ "st1 { v24.h }[6], [x19]\n"
+ "ldr x21, [%x[outptrs], #0x50]\n"
"add x21, x21, x10, LSL #1\n"
- "st1 { v18.h }[6], [x22]\n"
- "ldr x22, [%x[outptrs], #0x50]\n"
+ "st1 { v23.h }[6], [x20]\n"
+ "ldr x22, [%x[outptrs], #0x58]\n"
"add x22, x22, x10, LSL #1\n"
- "st1 { v19.h }[6], [x23]\n"
- "ldr x23, [%x[outptrs], #0x58]\n"
+ "st1 { v22.h }[6], [x21]\n"
+ "ldr x23, [%x[outptrs], #0x60]\n"
"add x23, x23, x10, LSL #1\n"
- "st1 { v20.h }[6], [x24]\n"
- "ldr x24, [%x[outptrs], #0x60]\n"
+ "st1 { v21.h }[6], [x22]\n"
+ "ldr x24, [%x[outptrs], #0x68]\n"
"add x24, x24, x10, LSL #1\n"
- "st1 { v21.h }[6], [x25]\n"
- "ldr x25, [%x[outptrs], #0x68]\n"
+ "st1 { v20.h }[6], [x23]\n"
+ "ldr x25, [%x[outptrs], #0x70]\n"
"add x25, x25, x10, LSL #1\n"
- "st1 { v22.h }[6], [x26]\n"
- "ldr x26, [%x[outptrs], #0x70]\n"
+ "st1 { v19.h }[6], [x24]\n"
+ "ldr x26, [%x[outptrs], #0x78]\n"
"add x26, x26, x10, LSL #1\n"
- "st1 { v23.h }[6], [x27]\n"
- "ldr x27, [%x[outptrs], #0x78]\n"
- "add x27, x27, x10, LSL #1\n"
- "st1 { v24.h }[6], [x20]\n"
- "st1 { v25.h }[6], [x21]\n"
- "st1 { v26.h }[6], [x22]\n"
- "st1 { v27.h }[6], [x23]\n"
- "st1 { v28.h }[6], [x24]\n"
- "st1 { v29.h }[6], [x25]\n"
- "st1 { v30.h }[6], [x26]\n"
- "st1 { v31.h }[6], [x27]\n"
+ "st1 { v18.h }[6], [x25]\n"
+ "st1 { v17.h }[6], [x26]\n"
"b 22f\n"
"19:" // Output channel oddments: Done: Store: Bit 2: Bit 1: Unset
"tbz %x[n_output_channels], #0, 22f\n"
- "ldr x20, [%x[outptrs], #0x0]\n"
- "ldr x21, [%x[outptrs], #0x8]\n"
+ "ldr x19, [%x[outptrs], #0x0]\n"
+ "ldr x20, [%x[outptrs], #0x8]\n"
+ "add x19, x19, x10, LSL #1\n"
+ "ldr x21, [%x[outptrs], #0x10]\n"
+ "ldr x22, [%x[outptrs], #0x18]\n"
"add x20, x20, x10, LSL #1\n"
+ "st1 { v5.h }[4], [x19]\n"
"add x21, x21, x10, LSL #1\n"
- "ldr x22, [%x[outptrs], #0x10]\n"
- "ldr x23, [%x[outptrs], #0x18]\n"
+ "st1 { v31.h }[4], [x20]\n"
+ "ldr x23, [%x[outptrs], #0x20]\n"
"add x22, x22, x10, LSL #1\n"
+ "st1 { v30.h }[4], [x21]\n"
"add x23, x23, x10, LSL #1\n"
- "ldr x24, [%x[outptrs], #0x20]\n"
- "ldr x25, [%x[outptrs], #0x28]\n"
+ "st1 { v29.h }[4], [x22]\n"
+ "ldr x24, [%x[outptrs], #0x28]\n"
"add x24, x24, x10, LSL #1\n"
+ "st1 { v28.h }[4], [x23]\n"
+ "ldr x25, [%x[outptrs], #0x30]\n"
"add x25, x25, x10, LSL #1\n"
- "ldr x26, [%x[outptrs], #0x30]\n"
- "ldr x27, [%x[outptrs], #0x38]\n"
+ "st1 { v27.h }[4], [x24]\n"
+ "ldr x26, [%x[outptrs], #0x38]\n"
"add x26, x26, x10, LSL #1\n"
- "add x27, x27, x10, LSL #1\n"
- "st1 { v16.h }[4], [x20]\n"
- "ldr x20, [%x[outptrs], #0x40]\n"
+ "st1 { v26.h }[4], [x25]\n"
+ "ldr x19, [%x[outptrs], #0x40]\n"
+ "add x19, x19, x10, LSL #1\n"
+ "st1 { v25.h }[4], [x26]\n"
+ "ldr x20, [%x[outptrs], #0x48]\n"
"add x20, x20, x10, LSL #1\n"
- "st1 { v17.h }[4], [x21]\n"
- "ldr x21, [%x[outptrs], #0x48]\n"
+ "st1 { v24.h }[4], [x19]\n"
+ "ldr x21, [%x[outptrs], #0x50]\n"
"add x21, x21, x10, LSL #1\n"
- "st1 { v18.h }[4], [x22]\n"
- "ldr x22, [%x[outptrs], #0x50]\n"
+ "st1 { v23.h }[4], [x20]\n"
+ "ldr x22, [%x[outptrs], #0x58]\n"
"add x22, x22, x10, LSL #1\n"
- "st1 { v19.h }[4], [x23]\n"
- "ldr x23, [%x[outptrs], #0x58]\n"
+ "st1 { v22.h }[4], [x21]\n"
+ "ldr x23, [%x[outptrs], #0x60]\n"
"add x23, x23, x10, LSL #1\n"
- "st1 { v20.h }[4], [x24]\n"
- "ldr x24, [%x[outptrs], #0x60]\n"
+ "st1 { v21.h }[4], [x22]\n"
+ "ldr x24, [%x[outptrs], #0x68]\n"
"add x24, x24, x10, LSL #1\n"
- "st1 { v21.h }[4], [x25]\n"
- "ldr x25, [%x[outptrs], #0x68]\n"
+ "st1 { v20.h }[4], [x23]\n"
+ "ldr x25, [%x[outptrs], #0x70]\n"
"add x25, x25, x10, LSL #1\n"
- "st1 { v22.h }[4], [x26]\n"
- "ldr x26, [%x[outptrs], #0x70]\n"
+ "st1 { v19.h }[4], [x24]\n"
+ "ldr x26, [%x[outptrs], #0x78]\n"
"add x26, x26, x10, LSL #1\n"
- "st1 { v23.h }[4], [x27]\n"
- "ldr x27, [%x[outptrs], #0x78]\n"
- "add x27, x27, x10, LSL #1\n"
- "st1 { v24.h }[4], [x20]\n"
- "st1 { v25.h }[4], [x21]\n"
- "st1 { v26.h }[4], [x22]\n"
- "st1 { v27.h }[4], [x23]\n"
- "st1 { v28.h }[4], [x24]\n"
- "st1 { v29.h }[4], [x25]\n"
- "st1 { v30.h }[4], [x26]\n"
- "st1 { v31.h }[4], [x27]\n"
+ "st1 { v18.h }[4], [x25]\n"
+ "st1 { v17.h }[4], [x26]\n"
"b 22f\n"
"20:" // Output channel oddments: Done: Store: Bit 2: Unset
"tbz %x[n_output_channels], #1, 21f\n"
- "ldr x20, [%x[outptrs], #0x0]\n"
- "ldr x21, [%x[outptrs], #0x8]\n"
+ "ldr x19, [%x[outptrs], #0x0]\n"
+ "ldr x20, [%x[outptrs], #0x8]\n"
+ "add x19, x19, x10, LSL #1\n"
+ "ldr x21, [%x[outptrs], #0x10]\n"
+ "ldr x22, [%x[outptrs], #0x18]\n"
"add x20, x20, x10, LSL #1\n"
+ "st1 { v5.s }[0], [x19]\n"
"add x21, x21, x10, LSL #1\n"
- "ldr x22, [%x[outptrs], #0x10]\n"
- "ldr x23, [%x[outptrs], #0x18]\n"
+ "st1 { v31.s }[0], [x20]\n"
+ "ldr x23, [%x[outptrs], #0x20]\n"
"add x22, x22, x10, LSL #1\n"
+ "st1 { v30.s }[0], [x21]\n"
"add x23, x23, x10, LSL #1\n"
- "ldr x24, [%x[outptrs], #0x20]\n"
- "ldr x25, [%x[outptrs], #0x28]\n"
+ "st1 { v29.s }[0], [x22]\n"
+ "ldr x24, [%x[outptrs], #0x28]\n"
"add x24, x24, x10, LSL #1\n"
+ "st1 { v28.s }[0], [x23]\n"
+ "ldr x25, [%x[outptrs], #0x30]\n"
"add x25, x25, x10, LSL #1\n"
- "ldr x26, [%x[outptrs], #0x30]\n"
- "ldr x27, [%x[outptrs], #0x38]\n"
+ "st1 { v27.s }[0], [x24]\n"
+ "ldr x26, [%x[outptrs], #0x38]\n"
"add x26, x26, x10, LSL #1\n"
- "add x27, x27, x10, LSL #1\n"
- "st1 { v16.s }[0], [x20]\n"
- "ldr x20, [%x[outptrs], #0x40]\n"
+ "st1 { v26.s }[0], [x25]\n"
+ "ldr x19, [%x[outptrs], #0x40]\n"
+ "add x19, x19, x10, LSL #1\n"
+ "st1 { v25.s }[0], [x26]\n"
+ "ldr x20, [%x[outptrs], #0x48]\n"
"add x20, x20, x10, LSL #1\n"
- "st1 { v17.s }[0], [x21]\n"
- "ldr x21, [%x[outptrs], #0x48]\n"
+ "st1 { v24.s }[0], [x19]\n"
+ "ldr x21, [%x[outptrs], #0x50]\n"
"add x21, x21, x10, LSL #1\n"
- "st1 { v18.s }[0], [x22]\n"
- "ldr x22, [%x[outptrs], #0x50]\n"
+ "st1 { v23.s }[0], [x20]\n"
+ "ldr x22, [%x[outptrs], #0x58]\n"
"add x22, x22, x10, LSL #1\n"
- "st1 { v19.s }[0], [x23]\n"
- "ldr x23, [%x[outptrs], #0x58]\n"
+ "st1 { v22.s }[0], [x21]\n"
+ "ldr x23, [%x[outptrs], #0x60]\n"
"add x23, x23, x10, LSL #1\n"
- "st1 { v20.s }[0], [x24]\n"
- "ldr x24, [%x[outptrs], #0x60]\n"
+ "st1 { v21.s }[0], [x22]\n"
+ "ldr x24, [%x[outptrs], #0x68]\n"
"add x24, x24, x10, LSL #1\n"
- "st1 { v21.s }[0], [x25]\n"
- "ldr x25, [%x[outptrs], #0x68]\n"
+ "st1 { v20.s }[0], [x23]\n"
+ "ldr x25, [%x[outptrs], #0x70]\n"
"add x25, x25, x10, LSL #1\n"
- "st1 { v22.s }[0], [x26]\n"
- "ldr x26, [%x[outptrs], #0x70]\n"
+ "st1 { v19.s }[0], [x24]\n"
+ "ldr x26, [%x[outptrs], #0x78]\n"
"add x26, x26, x10, LSL #1\n"
- "st1 { v23.s }[0], [x27]\n"
- "ldr x27, [%x[outptrs], #0x78]\n"
- "add x27, x27, x10, LSL #1\n"
+ "st1 { v18.s }[0], [x25]\n"
"add x10, x10, #0x2\n"
- "st1 { v24.s }[0], [x20]\n"
- "st1 { v25.s }[0], [x21]\n"
- "st1 { v26.s }[0], [x22]\n"
- "st1 { v27.s }[0], [x23]\n"
- "st1 { v28.s }[0], [x24]\n"
- "st1 { v29.s }[0], [x25]\n"
- "st1 { v30.s }[0], [x26]\n"
- "st1 { v31.s }[0], [x27]\n"
+ "st1 { v17.s }[0], [x26]\n"
"tbz %x[n_output_channels], #0, 22f\n"
- "ldr x20, [%x[outptrs], #0x0]\n"
- "ldr x21, [%x[outptrs], #0x8]\n"
+ "ldr x19, [%x[outptrs], #0x0]\n"
+ "ldr x20, [%x[outptrs], #0x8]\n"
+ "add x19, x19, x10, LSL #1\n"
+ "ldr x21, [%x[outptrs], #0x10]\n"
+ "ldr x22, [%x[outptrs], #0x18]\n"
"add x20, x20, x10, LSL #1\n"
+ "st1 { v5.h }[2], [x19]\n"
"add x21, x21, x10, LSL #1\n"
- "ldr x22, [%x[outptrs], #0x10]\n"
- "ldr x23, [%x[outptrs], #0x18]\n"
+ "st1 { v31.h }[2], [x20]\n"
+ "ldr x23, [%x[outptrs], #0x20]\n"
"add x22, x22, x10, LSL #1\n"
+ "st1 { v30.h }[2], [x21]\n"
"add x23, x23, x10, LSL #1\n"
- "ldr x24, [%x[outptrs], #0x20]\n"
- "ldr x25, [%x[outptrs], #0x28]\n"
+ "st1 { v29.h }[2], [x22]\n"
+ "ldr x24, [%x[outptrs], #0x28]\n"
"add x24, x24, x10, LSL #1\n"
+ "st1 { v28.h }[2], [x23]\n"
+ "ldr x25, [%x[outptrs], #0x30]\n"
"add x25, x25, x10, LSL #1\n"
- "ldr x26, [%x[outptrs], #0x30]\n"
- "ldr x27, [%x[outptrs], #0x38]\n"
+ "st1 { v27.h }[2], [x24]\n"
+ "ldr x26, [%x[outptrs], #0x38]\n"
"add x26, x26, x10, LSL #1\n"
- "add x27, x27, x10, LSL #1\n"
- "st1 { v16.h }[2], [x20]\n"
- "ldr x20, [%x[outptrs], #0x40]\n"
+ "st1 { v26.h }[2], [x25]\n"
+ "ldr x19, [%x[outptrs], #0x40]\n"
+ "add x19, x19, x10, LSL #1\n"
+ "st1 { v25.h }[2], [x26]\n"
+ "ldr x20, [%x[outptrs], #0x48]\n"
"add x20, x20, x10, LSL #1\n"
- "st1 { v17.h }[2], [x21]\n"
- "ldr x21, [%x[outptrs], #0x48]\n"
+ "st1 { v24.h }[2], [x19]\n"
+ "ldr x21, [%x[outptrs], #0x50]\n"
"add x21, x21, x10, LSL #1\n"
- "st1 { v18.h }[2], [x22]\n"
- "ldr x22, [%x[outptrs], #0x50]\n"
+ "st1 { v23.h }[2], [x20]\n"
+ "ldr x22, [%x[outptrs], #0x58]\n"
"add x22, x22, x10, LSL #1\n"
- "st1 { v19.h }[2], [x23]\n"
- "ldr x23, [%x[outptrs], #0x58]\n"
+ "st1 { v22.h }[2], [x21]\n"
+ "ldr x23, [%x[outptrs], #0x60]\n"
"add x23, x23, x10, LSL #1\n"
- "st1 { v20.h }[2], [x24]\n"
- "ldr x24, [%x[outptrs], #0x60]\n"
+ "st1 { v21.h }[2], [x22]\n"
+ "ldr x24, [%x[outptrs], #0x68]\n"
"add x24, x24, x10, LSL #1\n"
- "st1 { v21.h }[2], [x25]\n"
- "ldr x25, [%x[outptrs], #0x68]\n"
+ "st1 { v20.h }[2], [x23]\n"
+ "ldr x25, [%x[outptrs], #0x70]\n"
"add x25, x25, x10, LSL #1\n"
- "st1 { v22.h }[2], [x26]\n"
- "ldr x26, [%x[outptrs], #0x70]\n"
+ "st1 { v19.h }[2], [x24]\n"
+ "ldr x26, [%x[outptrs], #0x78]\n"
"add x26, x26, x10, LSL #1\n"
- "st1 { v23.h }[2], [x27]\n"
- "ldr x27, [%x[outptrs], #0x78]\n"
- "add x27, x27, x10, LSL #1\n"
- "st1 { v24.h }[2], [x20]\n"
- "st1 { v25.h }[2], [x21]\n"
- "st1 { v26.h }[2], [x22]\n"
- "st1 { v27.h }[2], [x23]\n"
- "st1 { v28.h }[2], [x24]\n"
- "st1 { v29.h }[2], [x25]\n"
- "st1 { v30.h }[2], [x26]\n"
- "st1 { v31.h }[2], [x27]\n"
+ "st1 { v18.h }[2], [x25]\n"
+ "st1 { v17.h }[2], [x26]\n"
"b 22f\n"
"21:" // Output channel oddments: Done: Store: Bit 2: Unset: Bit 1: Unset
- "ldr x20, [%x[outptrs], #0x0]\n"
- "ldr x21, [%x[outptrs], #0x8]\n"
+ "tbz %x[n_output_channels], #0, 22f\n"
+ "ldr x19, [%x[outptrs], #0x0]\n"
+ "ldr x20, [%x[outptrs], #0x8]\n"
+ "add x19, x19, x10, LSL #1\n"
+ "ldr x21, [%x[outptrs], #0x10]\n"
+ "ldr x22, [%x[outptrs], #0x18]\n"
"add x20, x20, x10, LSL #1\n"
+ "st1 { v5.h }[0], [x19]\n"
"add x21, x21, x10, LSL #1\n"
- "ldr x22, [%x[outptrs], #0x10]\n"
- "ldr x23, [%x[outptrs], #0x18]\n"
+ "st1 { v31.h }[0], [x20]\n"
+ "ldr x23, [%x[outptrs], #0x20]\n"
"add x22, x22, x10, LSL #1\n"
+ "st1 { v30.h }[0], [x21]\n"
"add x23, x23, x10, LSL #1\n"
- "ldr x24, [%x[outptrs], #0x20]\n"
- "ldr x25, [%x[outptrs], #0x28]\n"
+ "st1 { v29.h }[0], [x22]\n"
+ "ldr x24, [%x[outptrs], #0x28]\n"
"add x24, x24, x10, LSL #1\n"
+ "st1 { v28.h }[0], [x23]\n"
+ "ldr x25, [%x[outptrs], #0x30]\n"
"add x25, x25, x10, LSL #1\n"
- "ldr x26, [%x[outptrs], #0x30]\n"
- "ldr x27, [%x[outptrs], #0x38]\n"
+ "st1 { v27.h }[0], [x24]\n"
+ "ldr x26, [%x[outptrs], #0x38]\n"
"add x26, x26, x10, LSL #1\n"
- "add x27, x27, x10, LSL #1\n"
- "st1 { v16.h }[0], [x20]\n"
- "ldr x20, [%x[outptrs], #0x40]\n"
+ "st1 { v26.h }[0], [x25]\n"
+ "ldr x19, [%x[outptrs], #0x40]\n"
+ "add x19, x19, x10, LSL #1\n"
+ "st1 { v25.h }[0], [x26]\n"
+ "ldr x20, [%x[outptrs], #0x48]\n"
"add x20, x20, x10, LSL #1\n"
- "st1 { v17.h }[0], [x21]\n"
- "ldr x21, [%x[outptrs], #0x48]\n"
+ "st1 { v24.h }[0], [x19]\n"
+ "ldr x21, [%x[outptrs], #0x50]\n"
"add x21, x21, x10, LSL #1\n"
- "st1 { v18.h }[0], [x22]\n"
- "ldr x22, [%x[outptrs], #0x50]\n"
+ "st1 { v23.h }[0], [x20]\n"
+ "ldr x22, [%x[outptrs], #0x58]\n"
"add x22, x22, x10, LSL #1\n"
- "st1 { v19.h }[0], [x23]\n"
- "ldr x23, [%x[outptrs], #0x58]\n"
+ "st1 { v22.h }[0], [x21]\n"
+ "ldr x23, [%x[outptrs], #0x60]\n"
"add x23, x23, x10, LSL #1\n"
- "st1 { v20.h }[0], [x24]\n"
- "ldr x24, [%x[outptrs], #0x60]\n"
+ "st1 { v21.h }[0], [x22]\n"
+ "ldr x24, [%x[outptrs], #0x68]\n"
"add x24, x24, x10, LSL #1\n"
- "st1 { v21.h }[0], [x25]\n"
- "ldr x25, [%x[outptrs], #0x68]\n"
+ "st1 { v20.h }[0], [x23]\n"
+ "ldr x25, [%x[outptrs], #0x70]\n"
"add x25, x25, x10, LSL #1\n"
- "st1 { v22.h }[0], [x26]\n"
- "ldr x26, [%x[outptrs], #0x70]\n"
+ "st1 { v19.h }[0], [x24]\n"
+ "ldr x26, [%x[outptrs], #0x78]\n"
"add x26, x26, x10, LSL #1\n"
- "st1 { v23.h }[0], [x27]\n"
- "ldr x27, [%x[outptrs], #0x78]\n"
- "add x27, x27, x10, LSL #1\n"
- "st1 { v24.h }[0], [x20]\n"
- "st1 { v25.h }[0], [x21]\n"
- "st1 { v26.h }[0], [x22]\n"
- "st1 { v27.h }[0], [x23]\n"
- "st1 { v28.h }[0], [x24]\n"
- "st1 { v29.h }[0], [x25]\n"
- "st1 { v30.h }[0], [x26]\n"
- "st1 { v31.h }[0], [x27]\n"
+ "st1 { v18.h }[0], [x25]\n"
+ "st1 { v17.h }[0], [x26]\n"
"22:" // Output channel oddments: Done: Store: Bit 2: End
"23:" // Done
: [weights] "+&r" (weights)
: [bias] "r" (bias), [inptrs] "r" (inptrs), [kernel_points] "r" ((uint64_t) kernel_points), [minmax_vals] "r" (minmax_vals), [n_output_channels] "r" ((uint64_t) n_output_channels), [outptrs] "r" (outptrs)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_direct.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_direct.cpp
index 2ff03aa15a..73c1e07420 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_direct.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_direct.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -87,438 +87,438 @@ void a64_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst_direct_impl(
);
__asm__ __volatile__(
- "mov x23, #0x0\n"
- "mov x22, #0x0\n"
+ "mov x17, #0x0\n"
+ "mov x16, #0x0\n"
"1:" // Tile loop
- "str x23, [%x[params_struct], %[offsetof_args_tile_i]]\n"
- "mov x27, #0x2\n"
- "mov x26, #0x2\n"
- "str x22, [%x[params_struct], %[offsetof_args_tile_j]]\n"
- "ldr x25, [%x[params_struct], %[offsetof_args_ld_input_row]]\n"
- "ldr x24, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
- "mul x21, x23, x25\n" // offset = tile_i * ld_input_row
- "ldr x15, [%x[params_struct], %[offsetof_args_ld_input_col]]\n"
- "ldr x14, [%x[params_struct], %[offsetof_args_ld_output_col]]\n"
- "mul x20, x23, x24\n" // offset = tile_i * ld_output_row
- "mov x23, #0x10\n" // cntb _, ALL, #1
- "madd x21, x22, x15, x21\n" // offset += tile_j * ld_input_col
- "ldr x13, [%x[params_struct], %[offsetof_args_inptr]]\n"
- "lsl x15, x15, #0x2\n"
- "ldr x12, [%x[params_struct], %[offsetof_args_outptr]]\n"
- "madd x20, x22, x14, x20\n" // offset += tile_j * ld_output_col
- "lsr x22, %x[n_channels], #0x2\n"
- "add x11, x15, x15\n"
- "ldr x10, [%x[params_struct], %[offsetof_args_params]]\n"
- "mul x21, x21, x27\n" // offset *= kernel_stride * output_size
- "add x13, x13, x21, LSL #2\n" // inptr[0] += offset * sizeof(float)
- "add x9, x13, x25, LSL #2\n"
- "mul x20, x20, x26\n" // offset *= output_tile_size
- "add x28, x9, x25, LSL #2\n"
- "add x12, x12, x20, LSL #2\n" // outptrs[0] += offset * sizeof(float)
- "add x20, %x[params_struct], %[offsetof_args_min]\n"
- "ld1r { v18.4s }, [x20]\n"
- "add x20, %x[params_struct], %[offsetof_args_max]\n"
- "ld1r { v17.4s }, [x20]\n"
- "add x27, x28, x25, LSL #2\n"
- "add x26, x11, x15\n"
- "add x25, x12, x24, LSL #2\n"
- "lsl x14, x14, #0x2\n"
- "mov x21, #0x0\n"
- "sub x20, XZR, x23\n"
- "cbz x22, 4f\n"
- "ldr q16, [x10, #0x0]\n"
- "ldr q0, [x10, #0x10]\n"
- "cmp x23, x22, LSL #4\n"
- "ldr q1, [x10, #0x20]\n"
- "ldr q2, [x10, #0x30]\n"
- "ldr q3, [x10, #0x40]\n"
- "ldr q4, [x10, #0x50]\n"
- "ldr q5, [x10, #0x60]\n"
- "ldr q6, [x10, #0x70]\n"
- "ldr q7, [x10, #0x80]\n"
- "ldr q8, [x10, #0x90]\n"
- "add x10, x10, #0xa0\n"
- "ldr q9, [x9, x15]\n"
- "ld1 { v10.4s }, [x13]\n"
- "ldr q11, [x13, x26]\n"
- "ldr q12, [x9, x11]\n"
- "ldr q13, [x28, x15]\n"
+ "str x17, [%x[params_struct], %[offsetof_args_tile_i]]\n"
+ "mov x25, #0x2\n"
+ "str x16, [%x[params_struct], %[offsetof_args_tile_j]]\n"
+ "mov x15, #0x2\n"
+ "ldr x14, [%x[params_struct], %[offsetof_args_params]]\n"
+ "add x24, %x[params_struct], %[offsetof_args_min]\n"
+ "ldr x23, [%x[params_struct], %[offsetof_args_ld_input_row]]\n"
+ "add x21, %x[params_struct], %[offsetof_args_max]\n"
+ "ldr x13, [%x[params_struct], %[offsetof_args_ld_input_col]]\n"
+ "mov x22, #0x0\n"
+ "ldr x12, [%x[params_struct], %[offsetof_args_inptr]]\n"
+ "mul x19, x17, x23\n" // offset = tile_i * ld_input_row
+ "ldr x20, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
+ "madd x19, x16, x13, x19\n" // offset += tile_j * ld_input_col
+ "ldr x11, [%x[params_struct], %[offsetof_args_ld_output_col]]\n"
+ "mul x19, x19, x25\n" // offset *= kernel_stride * output_size
+ "ldr x10, [%x[params_struct], %[offsetof_args_outptr]]\n"
+ "add x12, x12, x19, LSL #2\n" // inptr[0] += offset * sizeof(float)
+ "ld1r { v18.4s }, [x24]\n"
+ "add x9, x12, x23, LSL #2\n"
+ "ld1r { v17.4s }, [x21]\n"
+ "add x28, x9, x23, LSL #2\n"
+ "lsl x13, x13, #0x2\n"
+ "add x27, x28, x23, LSL #2\n"
+ "add x26, x13, x13\n"
+ "add x25, x26, x13\n"
+ "mul x19, x17, x20\n" // offset = tile_i * ld_output_row
+ "madd x19, x16, x11, x19\n" // offset += tile_j * ld_output_col
+ "mul x19, x19, x15\n" // offset *= output_tile_size
+ "add x10, x10, x19, LSL #2\n" // outptrs[0] += offset * sizeof(float)
+ "add x24, x10, x20, LSL #2\n"
+ "lsl x11, x11, #0x2\n"
+ "mov x21, #0x10\n" // cntb _, ALL, #1
+ "sub x20, XZR, x21\n"
+ "lsr x19, %x[n_channels], #0x2\n"
+ "cbz x19, 4f\n"
+ "ldr q16, [x14, #0x0]\n"
+ "ldr q0, [x14, #0x10]\n"
+ "cmp x21, x19, LSL #4\n"
+ "ldr q1, [x14, #0x20]\n"
+ "ldr q2, [x14, #0x30]\n"
+ "ldr q3, [x14, #0x40]\n"
+ "ldr q4, [x14, #0x50]\n"
+ "ldr q5, [x14, #0x60]\n"
+ "ldr q6, [x14, #0x70]\n"
+ "ldr q7, [x14, #0x80]\n"
+ "ldr q8, [x14, #0x90]\n"
+ "add x14, x14, #0xa0\n"
+ "ldr q9, [x9, x13]\n"
+ "ld1 { v10.4s }, [x12]\n"
+ "ldr q11, [x12, x25]\n"
+ "ldr q12, [x9, x26]\n"
+ "ldr q13, [x28, x13]\n"
"bge 3f\n"
"2:" // Tile loop: Channel loop
- "mov v28.16b, v16.16b\n fmla v28.4s, v4.4s, v9.4s\n"
- "mov v29.16b, v16.16b\n fmla v29.4s, v3.4s, v9.4s\n"
- "add x23, x23, #0x10\n"
- "cmp x23, x22, LSL #4\n"
- "mov v30.16b, v16.16b\n fmla v30.4s, v1.4s, v9.4s\n"
- "mov v31.16b, v16.16b\n fmla v31.4s, v0.4s, v9.4s\n"
- "ld1 { v9.4s }, [x27]\n"
- "ldr q16, [x10, #0x0]\n"
- "fmla v28.4s, v0.4s, v10.4s\n"
- "ldr q10, [x28, x11]\n"
- "fmla v29.4s, v2.4s, v11.4s\n"
- "ldr q11, [x27, x26]\n"
- "fmla v30.4s, v2.4s, v12.4s\n"
- "fmla v31.4s, v1.4s, v12.4s\n"
+ "mov v31.16b, v16.16b\n fmla v31.4s, v4.4s, v9.4s\n"
"add x20, x20, #0x10\n"
+ "mov v30.16b, v16.16b\n fmla v30.4s, v3.4s, v9.4s\n"
+ "add x22, x22, #0x10\n"
+ "mov v29.16b, v16.16b\n fmla v29.4s, v1.4s, v9.4s\n"
"add x21, x21, #0x10\n"
- "fmla v28.4s, v5.4s, v12.4s\n"
- "fmla v29.4s, v4.4s, v12.4s\n"
- "ldr q12, [x13, x15]\n"
- "fmla v30.4s, v6.4s, v9.4s\n"
- "ldr q9, [x13, x11]\n"
- "fmla v31.4s, v3.4s, v13.4s\n"
- "add x13, x13, #0x10\n"
- "fmla v28.4s, v7.4s, v13.4s\n"
- "fmla v29.4s, v6.4s, v13.4s\n"
- "fmla v30.4s, v4.4s, v13.4s\n"
- "fmla v31.4s, v8.4s, v11.4s\n"
- "ld1 { v11.4s }, [x9]\n"
+ "mov v28.16b, v16.16b\n fmla v28.4s, v0.4s, v9.4s\n"
+ "ld1 { v9.4s }, [x27]\n"
+ "cmp x21, x19, LSL #4\n"
+ "fmla v31.4s, v0.4s, v10.4s\n"
+ "ldr q10, [x28, x26]\n"
+ "fmla v30.4s, v2.4s, v11.4s\n"
+ "ldr q11, [x27, x25]\n"
+ "fmla v29.4s, v2.4s, v12.4s\n"
+ "ldr q16, [x14, #0x0]\n"
"fmla v28.4s, v1.4s, v12.4s\n"
- "fmla v29.4s, v0.4s, v12.4s\n"
- "ldr q12, [x9, x26]\n"
+ "fmla v31.4s, v5.4s, v12.4s\n"
+ "fmla v30.4s, v4.4s, v12.4s\n"
+ "ldr q12, [x12, x13]\n"
+ "fmla v29.4s, v6.4s, v9.4s\n"
+ "ldr q9, [x12, x26]\n"
+ "add x12, x12, #0x10\n"
+ "fmla v28.4s, v3.4s, v13.4s\n"
+ "fmla v31.4s, v7.4s, v13.4s\n"
+ "fmla v30.4s, v6.4s, v13.4s\n"
+ "fmla v29.4s, v4.4s, v13.4s\n"
+ "fmla v28.4s, v8.4s, v11.4s\n"
+ "ld1 { v11.4s }, [x9]\n"
+ "fmla v31.4s, v1.4s, v12.4s\n"
+ "fmla v30.4s, v0.4s, v12.4s\n"
+ "ldr q12, [x9, x25]\n"
"add x9, x9, #0x10\n"
- "fmla v30.4s, v5.4s, v10.4s\n"
- "fmla v31.4s, v4.4s, v10.4s\n"
- "ldr q4, [x10, #0x50]\n"
- "fmla v28.4s, v2.4s, v9.4s\n"
- "fmla v29.4s, v1.4s, v9.4s\n"
+ "fmla v29.4s, v5.4s, v10.4s\n"
+ "fmla v28.4s, v4.4s, v10.4s\n"
+ "ldr q4, [x14, #0x50]\n"
+ "fmla v31.4s, v2.4s, v9.4s\n"
+ "fmla v30.4s, v1.4s, v9.4s\n"
"ld1 { v9.4s }, [x28]\n"
- "ldr q1, [x10, #0x20]\n"
- "fmla v30.4s, v0.4s, v11.4s\n"
- "ldr q0, [x10, #0x10]\n"
- "fmla v31.4s, v2.4s, v12.4s\n"
- "ldr q2, [x10, #0x30]\n"
- "fmla v28.4s, v8.4s, v10.4s\n"
- "fmla v29.4s, v7.4s, v10.4s\n"
- "ldr q10, [x28, x26]\n"
+ "ldr q1, [x14, #0x20]\n"
+ "fmla v29.4s, v0.4s, v11.4s\n"
+ "ldr q0, [x14, #0x10]\n"
+ "fmla v28.4s, v2.4s, v12.4s\n"
+ "ldr q2, [x14, #0x30]\n"
+ "fmla v31.4s, v8.4s, v10.4s\n"
+ "fmla v30.4s, v7.4s, v10.4s\n"
+ "ldr q10, [x28, x25]\n"
"add x28, x28, #0x10\n"
- "ldr q13, [x28, x15]\n"
- "fmla v30.4s, v3.4s, v9.4s\n"
- "fmla v31.4s, v5.4s, v10.4s\n"
- "fmla v28.4s, v3.4s, v11.4s\n"
- "ldr q11, [x27, x15]\n"
- "ldr q3, [x10, #0x40]\n"
- "fmla v29.4s, v5.4s, v12.4s\n"
- "ldr q12, [x27, x11]\n"
- "ldr q5, [x10, #0x60]\n"
- "fmla v30.4s, v7.4s, v11.4s\n"
- "fmla v31.4s, v6.4s, v11.4s\n"
- "ldr q11, [x13, x26]\n"
- "fmla v28.4s, v6.4s, v9.4s\n"
- "ldr q9, [x9, x15]\n"
- "fmla v29.4s, v8.4s, v10.4s\n"
- "ld1 { v10.4s }, [x13]\n"
- "ldr q6, [x10, #0x70]\n"
- "fmla v30.4s, v8.4s, v12.4s\n"
- "fmla v31.4s, v7.4s, v12.4s\n"
- "ldr q12, [x9, x11]\n"
- "ldr q7, [x10, #0x80]\n"
- "fmax v28.4s, v28.4s, v18.4s\n"
- "fmax v29.4s, v29.4s, v18.4s\n"
- "ldr q8, [x10, #0x90]\n"
- "fmax v30.4s, v30.4s, v18.4s\n"
- "fmax v31.4s, v31.4s, v18.4s\n"
+ "fmla v29.4s, v3.4s, v9.4s\n"
+ "ldr q13, [x28, x13]\n"
+ "fmla v31.4s, v3.4s, v11.4s\n"
+ "ldr q11, [x27, x13]\n"
+ "fmla v30.4s, v5.4s, v12.4s\n"
+ "ldr q12, [x27, x26]\n"
"add x27, x27, #0x10\n"
- "fmin v28.4s, v28.4s, v17.4s\n"
- "fmin v29.4s, v29.4s, v17.4s\n"
- "st1 { v28.4s }, [x12]\n"
- "add x10, x10, #0xa0\n"
- "fmin v30.4s, v30.4s, v17.4s\n"
+ "fmla v28.4s, v5.4s, v10.4s\n"
+ "ldr q3, [x14, #0x40]\n"
+ "ldr q5, [x14, #0x60]\n"
+ "fmla v31.4s, v6.4s, v9.4s\n"
+ "ldr q9, [x9, x13]\n"
+ "fmla v30.4s, v8.4s, v10.4s\n"
+ "ld1 { v10.4s }, [x12]\n"
+ "fmla v29.4s, v7.4s, v11.4s\n"
+ "fmla v28.4s, v6.4s, v11.4s\n"
+ "ldr q11, [x12, x25]\n"
+ "ldr q6, [x14, #0x70]\n"
+ "fmax v31.4s, v31.4s, v18.4s\n"
+ "fmax v30.4s, v30.4s, v18.4s\n"
+ "fmla v29.4s, v8.4s, v12.4s\n"
+ "ldr q8, [x14, #0x90]\n"
+ "fmla v28.4s, v7.4s, v12.4s\n"
+ "ldr q12, [x9, x26]\n"
"fmin v31.4s, v31.4s, v17.4s\n"
- "str q29, [x12, x14]\n"
- "add x12, x12, #0x10\n"
- "st1 { v30.4s }, [x25]\n"
- "str q31, [x25, x14]\n"
- "add x25, x25, #0x10\n"
+ "ldr q7, [x14, #0x80]\n"
+ "add x14, x14, #0xa0\n"
+ "fmin v30.4s, v30.4s, v17.4s\n"
+ "st1 { v31.4s }, [x10]\n"
+ "fmax v29.4s, v29.4s, v18.4s\n"
+ "fmax v28.4s, v28.4s, v18.4s\n"
+ "str q30, [x10, x11]\n"
+ "fmin v29.4s, v29.4s, v17.4s\n"
+ "st1 { v29.4s }, [x24]\n"
+ "fmin v28.4s, v28.4s, v17.4s\n"
+ "add x10, x10, #0x10\n"
+ "str q28, [x24, x11]\n"
+ "add x24, x24, #0x10\n"
"blt 2b\n"
"3:" // Tile loop: Channel tail
- "mov v28.16b, v16.16b\n fmla v28.4s, v4.4s, v9.4s\n"
- "mov v29.16b, v16.16b\n fmla v29.4s, v3.4s, v9.4s\n"
- "mov v30.16b, v16.16b\n fmla v30.4s, v1.4s, v9.4s\n"
- "mov v31.16b, v16.16b\n fmla v31.4s, v0.4s, v9.4s\n"
+ "mov v31.16b, v16.16b\n fmla v31.4s, v4.4s, v9.4s\n"
+ "mov v30.16b, v16.16b\n fmla v30.4s, v3.4s, v9.4s\n"
+ "mov v29.16b, v16.16b\n fmla v29.4s, v1.4s, v9.4s\n"
+ "mov v28.16b, v16.16b\n fmla v28.4s, v0.4s, v9.4s\n"
"ld1 { v9.4s }, [x27]\n"
- "fmla v28.4s, v0.4s, v10.4s\n"
- "ldr q10, [x28, x11]\n"
- "fmla v29.4s, v2.4s, v11.4s\n"
- "ldr q11, [x27, x26]\n"
- "fmla v30.4s, v2.4s, v12.4s\n"
- "fmla v31.4s, v1.4s, v12.4s\n"
- "fmla v28.4s, v5.4s, v12.4s\n"
- "fmla v29.4s, v4.4s, v12.4s\n"
- "ldr q12, [x13, x15]\n"
- "fmla v30.4s, v6.4s, v9.4s\n"
- "ldr q9, [x13, x11]\n"
- "fmla v31.4s, v3.4s, v13.4s\n"
- "add x13, x13, #0x10\n"
- "fmla v28.4s, v7.4s, v13.4s\n"
- "fmla v29.4s, v6.4s, v13.4s\n"
- "fmla v30.4s, v4.4s, v13.4s\n"
- "fmla v31.4s, v8.4s, v11.4s\n"
- "ld1 { v11.4s }, [x9]\n"
+ "fmla v31.4s, v0.4s, v10.4s\n"
+ "ldr q10, [x28, x26]\n"
+ "fmla v30.4s, v2.4s, v11.4s\n"
+ "ldr q11, [x27, x25]\n"
+ "fmla v29.4s, v2.4s, v12.4s\n"
"fmla v28.4s, v1.4s, v12.4s\n"
- "fmla v29.4s, v0.4s, v12.4s\n"
- "ldr q12, [x9, x26]\n"
+ "fmla v31.4s, v5.4s, v12.4s\n"
+ "fmla v30.4s, v4.4s, v12.4s\n"
+ "ldr q12, [x12, x13]\n"
+ "fmla v29.4s, v6.4s, v9.4s\n"
+ "ldr q9, [x12, x26]\n"
+ "add x12, x12, #0x10\n"
+ "fmla v28.4s, v3.4s, v13.4s\n"
+ "fmla v31.4s, v7.4s, v13.4s\n"
+ "fmla v30.4s, v6.4s, v13.4s\n"
+ "fmla v29.4s, v4.4s, v13.4s\n"
+ "fmla v28.4s, v8.4s, v11.4s\n"
+ "ld1 { v11.4s }, [x9]\n"
+ "fmla v31.4s, v1.4s, v12.4s\n"
+ "fmla v30.4s, v0.4s, v12.4s\n"
+ "ldr q12, [x9, x25]\n"
"add x9, x9, #0x10\n"
- "fmla v30.4s, v5.4s, v10.4s\n"
- "fmla v31.4s, v4.4s, v10.4s\n"
- "fmla v28.4s, v2.4s, v9.4s\n"
- "fmla v29.4s, v1.4s, v9.4s\n"
+ "fmla v29.4s, v5.4s, v10.4s\n"
+ "fmla v28.4s, v4.4s, v10.4s\n"
+ "fmla v31.4s, v2.4s, v9.4s\n"
+ "fmla v30.4s, v1.4s, v9.4s\n"
"ld1 { v9.4s }, [x28]\n"
- "fmla v30.4s, v0.4s, v11.4s\n"
- "fmla v31.4s, v2.4s, v12.4s\n"
- "fmla v28.4s, v8.4s, v10.4s\n"
- "fmla v29.4s, v7.4s, v10.4s\n"
- "ldr q10, [x28, x26]\n"
+ "fmla v29.4s, v0.4s, v11.4s\n"
+ "fmla v28.4s, v2.4s, v12.4s\n"
+ "fmla v31.4s, v8.4s, v10.4s\n"
+ "fmla v30.4s, v7.4s, v10.4s\n"
+ "ldr q10, [x28, x25]\n"
"add x28, x28, #0x10\n"
- "fmla v30.4s, v3.4s, v9.4s\n"
- "fmla v31.4s, v5.4s, v10.4s\n"
- "fmla v28.4s, v3.4s, v11.4s\n"
- "ldr q11, [x27, x15]\n"
- "fmla v29.4s, v5.4s, v12.4s\n"
- "ldr q12, [x27, x11]\n"
- "fmla v30.4s, v7.4s, v11.4s\n"
- "fmla v31.4s, v6.4s, v11.4s\n"
+ "fmla v29.4s, v3.4s, v9.4s\n"
+ "fmla v31.4s, v3.4s, v11.4s\n"
+ "ldr q11, [x27, x13]\n"
+ "fmla v30.4s, v5.4s, v12.4s\n"
+ "ldr q12, [x27, x26]\n"
"add x27, x27, #0x10\n"
- "fmla v28.4s, v6.4s, v9.4s\n"
- "fmla v29.4s, v8.4s, v10.4s\n"
- "fmax v28.4s, v28.4s, v18.4s\n"
- "fmla v30.4s, v8.4s, v12.4s\n"
- "fmla v31.4s, v7.4s, v12.4s\n"
- "fmax v29.4s, v29.4s, v18.4s\n"
- "fmax v30.4s, v30.4s, v18.4s\n"
+ "fmla v28.4s, v5.4s, v10.4s\n"
+ "fmla v31.4s, v6.4s, v9.4s\n"
+ "fmla v30.4s, v8.4s, v10.4s\n"
+ "fmla v29.4s, v7.4s, v11.4s\n"
+ "fmla v28.4s, v6.4s, v11.4s\n"
"fmax v31.4s, v31.4s, v18.4s\n"
- "fmin v28.4s, v28.4s, v17.4s\n"
- "fmin v29.4s, v29.4s, v17.4s\n"
- "st1 { v28.4s }, [x12]\n"
- "fmin v30.4s, v30.4s, v17.4s\n"
+ "fmax v30.4s, v30.4s, v18.4s\n"
+ "fmla v29.4s, v8.4s, v12.4s\n"
+ "fmla v28.4s, v7.4s, v12.4s\n"
"fmin v31.4s, v31.4s, v17.4s\n"
- "str q29, [x12, x14]\n"
- "add x12, x12, #0x10\n"
- "st1 { v30.4s }, [x25]\n"
- "str q31, [x25, x14]\n"
- "add x25, x25, #0x10\n"
+ "st1 { v31.4s }, [x10]\n"
+ "fmin v30.4s, v30.4s, v17.4s\n"
+ "fmax v29.4s, v29.4s, v18.4s\n"
+ "str q30, [x10, x11]\n"
+ "fmin v29.4s, v29.4s, v17.4s\n"
+ "add x10, x10, #0x10\n"
+ "fmax v28.4s, v28.4s, v18.4s\n"
+ "st1 { v29.4s }, [x24]\n"
+ "fmin v28.4s, v28.4s, v17.4s\n"
+ "str q28, [x24, x11]\n"
+ "add x24, x24, #0x10\n"
"4:" // Tile loop: Oddments
"tst %x[n_channels], #0x3\n"
"beq 31f\n"
- "ldr q16, [x10, #0x0]\n"
- "ldr q0, [x10, #0x10]\n"
- "add x24, x9, x15\n"
- "add x23, x13, XZR\n"
- "ldr q1, [x10, #0x20]\n"
- "ldr q2, [x10, #0x30]\n"
- "add x22, x13, x26\n"
- "add x21, x9, x11\n"
- "ldr q3, [x10, #0x40]\n"
- "ldr q4, [x10, #0x50]\n"
- "add x20, x28, x15\n"
- "ldr q5, [x10, #0x60]\n"
- "ldr q6, [x10, #0x70]\n"
- "ldr q7, [x10, #0x80]\n"
- "ldr q8, [x10, #0x90]\n"
+ "ldr q16, [x14, #0x0]\n"
+ "ldr q0, [x14, #0x10]\n"
+ "add x23, x9, x13\n"
+ "ldr q1, [x14, #0x20]\n"
+ "add x22, x12, XZR\n"
+ "ldr q2, [x14, #0x30]\n"
+ "add x21, x12, x25\n"
+ "ldr q3, [x14, #0x40]\n"
+ "add x20, x9, x26\n"
+ "ldr q4, [x14, #0x50]\n"
+ "add x19, x28, x13\n"
+ "ldr q5, [x14, #0x60]\n"
+ "ldr q6, [x14, #0x70]\n"
+ "ldr q7, [x14, #0x80]\n"
+ "ldr q8, [x14, #0x90]\n"
"tbz %x[n_channels], #1, 5f\n"
- "ldr d9, [x24], #0x8\n"
- "ldr d10, [x23], #0x8\n"
- "ldr d11, [x22], #0x8\n"
- "ldr d12, [x21], #0x8\n"
- "ldr d13, [x20], #0x8\n"
+ "ldr d9, [x23], #0x8\n"
+ "ldr d10, [x22], #0x8\n"
+ "ldr d11, [x21], #0x8\n"
+ "ldr d12, [x20], #0x8\n"
+ "ldr d13, [x19], #0x8\n"
"tbz %x[n_channels], #0, 6f\n"
- "ld1 { v9.s }[2], [x24]\n"
- "ld1 { v10.s }[2], [x23]\n"
- "ld1 { v11.s }[2], [x22]\n"
- "ld1 { v12.s }[2], [x21]\n"
- "ld1 { v13.s }[2], [x20]\n"
+ "ld1 { v9.s }[2], [x23]\n"
+ "ld1 { v10.s }[2], [x22]\n"
+ "ld1 { v11.s }[2], [x21]\n"
+ "ld1 { v12.s }[2], [x20]\n"
+ "ld1 { v13.s }[2], [x19]\n"
"b 6f\n"
"5:" // Tile loop: Oddments: Load inputs: (1, 1), (0, 0), (0, 3), (1, 2), (2, 1): Bit 1: Unset
- "ldr s9, [x24, #0x0]\n"
- "ldr s10, [x23, #0x0]\n"
- "ldr s11, [x22, #0x0]\n"
- "ldr s12, [x21, #0x0]\n"
- "ldr s13, [x20, #0x0]\n"
+ "ldr s9, [x23, #0x0]\n"
+ "ldr s10, [x22, #0x0]\n"
+ "ldr s11, [x21, #0x0]\n"
+ "ldr s12, [x20, #0x0]\n"
+ "ldr s13, [x19, #0x0]\n"
"6:" // Tile loop: Oddments: Load inputs: (1, 1), (0, 0), (0, 3), (1, 2), (2, 1): Bit 1: End
- "mov v28.16b, v16.16b\n fmla v28.4s, v4.4s, v9.4s\n"
- "mov v29.16b, v16.16b\n fmla v29.4s, v3.4s, v9.4s\n"
- "add x20, x27, XZR\n"
- "mov v30.16b, v16.16b\n fmla v30.4s, v1.4s, v9.4s\n"
- "mov v31.16b, v16.16b\n fmla v31.4s, v0.4s, v9.4s\n"
- "fmla v28.4s, v0.4s, v10.4s\n"
- "fmla v29.4s, v2.4s, v11.4s\n"
- "fmla v28.4s, v5.4s, v12.4s\n"
- "fmla v29.4s, v4.4s, v12.4s\n"
- "fmla v30.4s, v2.4s, v12.4s\n"
- "fmla v31.4s, v1.4s, v12.4s\n"
+ "mov v31.16b, v16.16b\n fmla v31.4s, v4.4s, v9.4s\n"
+ "add x19, x27, XZR\n"
+ "mov v30.16b, v16.16b\n fmla v30.4s, v3.4s, v9.4s\n"
+ "mov v29.16b, v16.16b\n fmla v29.4s, v1.4s, v9.4s\n"
+ "mov v28.16b, v16.16b\n fmla v28.4s, v0.4s, v9.4s\n"
+ "fmla v31.4s, v0.4s, v10.4s\n"
+ "fmla v30.4s, v2.4s, v11.4s\n"
+ "fmla v29.4s, v2.4s, v12.4s\n"
+ "fmla v28.4s, v1.4s, v12.4s\n"
+ "fmla v31.4s, v5.4s, v12.4s\n"
+ "fmla v30.4s, v4.4s, v12.4s\n"
"tbz %x[n_channels], #1, 7f\n"
- "ldr d9, [x20], #0x8\n"
+ "ldr d9, [x19], #0x8\n"
"tbz %x[n_channels], #0, 8f\n"
- "ld1 { v9.s }[2], [x20]\n"
+ "ld1 { v9.s }[2], [x19]\n"
"b 8f\n"
"7:" // Tile loop: Oddments: Load inputs: (3, 0): Bit 1: Unset
- "ldr s9, [x20, #0x0]\n"
+ "ldr s9, [x19, #0x0]\n"
"8:" // Tile loop: Oddments: Load inputs: (3, 0): Bit 1: End
- "fmla v30.4s, v6.4s, v9.4s\n"
- "fmla v28.4s, v7.4s, v13.4s\n"
- "add x20, x27, x26\n"
- "fmla v29.4s, v6.4s, v13.4s\n"
- "fmla v30.4s, v4.4s, v13.4s\n"
- "fmla v31.4s, v3.4s, v13.4s\n"
+ "fmla v29.4s, v6.4s, v9.4s\n"
+ "add x19, x27, x25\n"
+ "fmla v31.4s, v7.4s, v13.4s\n"
+ "fmla v30.4s, v6.4s, v13.4s\n"
+ "fmla v28.4s, v3.4s, v13.4s\n"
+ "fmla v29.4s, v4.4s, v13.4s\n"
"tbz %x[n_channels], #1, 9f\n"
- "ldr d11, [x20], #0x8\n"
+ "ldr d11, [x19], #0x8\n"
"tbz %x[n_channels], #0, 10f\n"
- "ld1 { v11.s }[2], [x20]\n"
+ "ld1 { v11.s }[2], [x19]\n"
"b 10f\n"
"9:" // Tile loop: Oddments: Load inputs: (3, 3): Bit 1: Unset
- "ldr s11, [x20, #0x0]\n"
+ "ldr s11, [x19, #0x0]\n"
"10:" // Tile loop: Oddments: Load inputs: (3, 3): Bit 1: End
- "fmla v31.4s, v8.4s, v11.4s\n"
- "add x20, x13, x15\n"
+ "fmla v28.4s, v8.4s, v11.4s\n"
+ "add x19, x12, x13\n"
"tbz %x[n_channels], #1, 11f\n"
- "ldr d12, [x20], #0x8\n"
+ "ldr d12, [x19], #0x8\n"
"tbz %x[n_channels], #0, 12f\n"
- "ld1 { v12.s }[2], [x20]\n"
+ "ld1 { v12.s }[2], [x19]\n"
"b 12f\n"
"11:" // Tile loop: Oddments: Load inputs: (0, 1): Bit 1: Unset
- "ldr s12, [x20, #0x0]\n"
+ "ldr s12, [x19, #0x0]\n"
"12:" // Tile loop: Oddments: Load inputs: (0, 1): Bit 1: End
- "fmla v28.4s, v1.4s, v12.4s\n"
- "fmla v29.4s, v0.4s, v12.4s\n"
- "add x20, x13, x11\n"
+ "fmla v31.4s, v1.4s, v12.4s\n"
+ "add x19, x12, x26\n"
+ "fmla v30.4s, v0.4s, v12.4s\n"
"tbz %x[n_channels], #1, 13f\n"
- "ldr d9, [x20], #0x8\n"
+ "ldr d9, [x19], #0x8\n"
"tbz %x[n_channels], #0, 14f\n"
- "ld1 { v9.s }[2], [x20]\n"
+ "ld1 { v9.s }[2], [x19]\n"
"b 14f\n"
"13:" // Tile loop: Oddments: Load inputs: (0, 2): Bit 1: Unset
- "ldr s9, [x20, #0x0]\n"
+ "ldr s9, [x19, #0x0]\n"
"14:" // Tile loop: Oddments: Load inputs: (0, 2): Bit 1: End
- "fmla v28.4s, v2.4s, v9.4s\n"
- "fmla v29.4s, v1.4s, v9.4s\n"
- "add x20, x28, x11\n"
+ "fmla v31.4s, v2.4s, v9.4s\n"
+ "add x19, x28, x26\n"
+ "fmla v30.4s, v1.4s, v9.4s\n"
"tbz %x[n_channels], #1, 15f\n"
- "ldr d10, [x20], #0x8\n"
+ "ldr d10, [x19], #0x8\n"
"tbz %x[n_channels], #0, 16f\n"
- "ld1 { v10.s }[2], [x20]\n"
+ "ld1 { v10.s }[2], [x19]\n"
"b 16f\n"
"15:" // Tile loop: Oddments: Load inputs: (2, 2): Bit 1: Unset
- "ldr s10, [x20, #0x0]\n"
+ "ldr s10, [x19, #0x0]\n"
"16:" // Tile loop: Oddments: Load inputs: (2, 2): Bit 1: End
- "fmla v28.4s, v8.4s, v10.4s\n"
- "fmla v29.4s, v7.4s, v10.4s\n"
- "add x20, x9, XZR\n"
- "fmla v30.4s, v5.4s, v10.4s\n"
- "fmla v31.4s, v4.4s, v10.4s\n"
+ "fmla v31.4s, v8.4s, v10.4s\n"
+ "add x19, x9, XZR\n"
+ "fmla v30.4s, v7.4s, v10.4s\n"
+ "fmla v29.4s, v5.4s, v10.4s\n"
+ "fmla v28.4s, v4.4s, v10.4s\n"
"tbz %x[n_channels], #1, 17f\n"
- "ldr d11, [x20], #0x8\n"
+ "ldr d11, [x19], #0x8\n"
"tbz %x[n_channels], #0, 18f\n"
- "ld1 { v11.s }[2], [x20]\n"
+ "ld1 { v11.s }[2], [x19]\n"
"b 18f\n"
"17:" // Tile loop: Oddments: Load inputs: (1, 0): Bit 1: Unset
- "ldr s11, [x20, #0x0]\n"
+ "ldr s11, [x19, #0x0]\n"
"18:" // Tile loop: Oddments: Load inputs: (1, 0): Bit 1: End
- "fmla v28.4s, v3.4s, v11.4s\n"
- "fmla v30.4s, v0.4s, v11.4s\n"
- "add x20, x9, x26\n"
+ "fmla v31.4s, v3.4s, v11.4s\n"
+ "add x19, x9, x25\n"
+ "fmla v29.4s, v0.4s, v11.4s\n"
"tbz %x[n_channels], #1, 19f\n"
- "ldr d12, [x20], #0x8\n"
+ "ldr d12, [x19], #0x8\n"
"tbz %x[n_channels], #0, 20f\n"
- "ld1 { v12.s }[2], [x20]\n"
+ "ld1 { v12.s }[2], [x19]\n"
"b 20f\n"
"19:" // Tile loop: Oddments: Load inputs: (1, 3): Bit 1: Unset
- "ldr s12, [x20, #0x0]\n"
+ "ldr s12, [x19, #0x0]\n"
"20:" // Tile loop: Oddments: Load inputs: (1, 3): Bit 1: End
- "fmla v29.4s, v5.4s, v12.4s\n"
- "fmla v31.4s, v2.4s, v12.4s\n"
- "add x20, x28, XZR\n"
+ "fmla v30.4s, v5.4s, v12.4s\n"
+ "add x19, x28, XZR\n"
+ "fmla v28.4s, v2.4s, v12.4s\n"
"tbz %x[n_channels], #1, 21f\n"
- "ldr d9, [x20], #0x8\n"
+ "ldr d9, [x19], #0x8\n"
"tbz %x[n_channels], #0, 22f\n"
- "ld1 { v9.s }[2], [x20]\n"
+ "ld1 { v9.s }[2], [x19]\n"
"b 22f\n"
"21:" // Tile loop: Oddments: Load inputs: (2, 0): Bit 1: Unset
- "ldr s9, [x20, #0x0]\n"
+ "ldr s9, [x19, #0x0]\n"
"22:" // Tile loop: Oddments: Load inputs: (2, 0): Bit 1: End
- "fmla v28.4s, v6.4s, v9.4s\n"
- "fmla v30.4s, v3.4s, v9.4s\n"
- "add x20, x28, x26\n"
+ "fmla v31.4s, v6.4s, v9.4s\n"
+ "add x19, x28, x25\n"
+ "fmla v29.4s, v3.4s, v9.4s\n"
"tbz %x[n_channels], #1, 23f\n"
- "ldr d10, [x20], #0x8\n"
+ "ldr d10, [x19], #0x8\n"
"tbz %x[n_channels], #0, 24f\n"
- "ld1 { v10.s }[2], [x20]\n"
+ "ld1 { v10.s }[2], [x19]\n"
"b 24f\n"
"23:" // Tile loop: Oddments: Load inputs: (2, 3): Bit 1: Unset
- "ldr s10, [x20, #0x0]\n"
+ "ldr s10, [x19, #0x0]\n"
"24:" // Tile loop: Oddments: Load inputs: (2, 3): Bit 1: End
- "fmla v29.4s, v8.4s, v10.4s\n"
- "fmla v31.4s, v5.4s, v10.4s\n"
- "add x20, x27, x15\n"
+ "fmla v30.4s, v8.4s, v10.4s\n"
+ "add x19, x27, x13\n"
+ "fmla v28.4s, v5.4s, v10.4s\n"
"tbz %x[n_channels], #1, 25f\n"
- "ldr d11, [x20], #0x8\n"
+ "ldr d11, [x19], #0x8\n"
"tbz %x[n_channels], #0, 26f\n"
- "ld1 { v11.s }[2], [x20]\n"
+ "ld1 { v11.s }[2], [x19]\n"
"b 26f\n"
"25:" // Tile loop: Oddments: Load inputs: (3, 1): Bit 1: Unset
- "ldr s11, [x20, #0x0]\n"
+ "ldr s11, [x19, #0x0]\n"
"26:" // Tile loop: Oddments: Load inputs: (3, 1): Bit 1: End
- "fmla v30.4s, v7.4s, v11.4s\n"
- "fmla v31.4s, v6.4s, v11.4s\n"
- "add x20, x27, x11\n"
+ "fmla v29.4s, v7.4s, v11.4s\n"
+ "add x19, x27, x26\n"
+ "fmla v28.4s, v6.4s, v11.4s\n"
"tbz %x[n_channels], #1, 27f\n"
- "ldr d12, [x20], #0x8\n"
+ "ldr d12, [x19], #0x8\n"
"tbz %x[n_channels], #0, 28f\n"
- "ld1 { v12.s }[2], [x20]\n"
+ "ld1 { v12.s }[2], [x19]\n"
"b 28f\n"
"27:" // Tile loop: Oddments: Load inputs: (3, 2): Bit 1: Unset
- "ldr s12, [x20, #0x0]\n"
+ "ldr s12, [x19, #0x0]\n"
"28:" // Tile loop: Oddments: Load inputs: (3, 2): Bit 1: End
- "fmla v30.4s, v8.4s, v12.4s\n"
- "fmla v31.4s, v7.4s, v12.4s\n"
- "fmax v28.4s, v28.4s, v18.4s\n"
- "fmax v29.4s, v29.4s, v18.4s\n"
- "fmax v30.4s, v30.4s, v18.4s\n"
+ "fmla v29.4s, v8.4s, v12.4s\n"
+ "fmla v28.4s, v7.4s, v12.4s\n"
"fmax v31.4s, v31.4s, v18.4s\n"
- "fmin v28.4s, v28.4s, v17.4s\n"
- "fmin v29.4s, v29.4s, v17.4s\n"
- "fmin v30.4s, v30.4s, v17.4s\n"
+ "fmax v30.4s, v30.4s, v18.4s\n"
"fmin v31.4s, v31.4s, v17.4s\n"
+ "fmax v29.4s, v29.4s, v18.4s\n"
+ "fmin v30.4s, v30.4s, v17.4s\n"
+ "fmax v28.4s, v28.4s, v18.4s\n"
+ "fmin v29.4s, v29.4s, v17.4s\n"
+ "fmin v28.4s, v28.4s, v17.4s\n"
"tbz %x[n_channels], #1, 29f\n"
- "mov x21, x12\n"
- "mov x20, x25\n"
- "st1 { v28.d }[0], [x21], x14\n"
- "st1 { v30.d }[0], [x20], x14\n"
- "add x12, x12, #0x8\n"
- "add x25, x25, #0x8\n"
- "st1 { v29.d }[0], [x21]\n"
- "st1 { v31.d }[0], [x20]\n"
+ "mov x19, x10\n"
+ "st1 { v31.d }[0], [x19], x11\n"
+ "add x10, x10, #0x8\n"
+ "st1 { v30.d }[0], [x19]\n"
+ "mov x19, x24\n"
+ "st1 { v29.d }[0], [x19], x11\n"
+ "add x24, x24, #0x8\n"
+ "st1 { v28.d }[0], [x19]\n"
"tbz %x[n_channels], #0, 30f\n"
- "mov x21, x12\n"
- "mov x20, x25\n"
- "st1 { v28.s }[2], [x21], x14\n"
- "st1 { v30.s }[2], [x20], x14\n"
- "st1 { v29.s }[2], [x21]\n"
- "st1 { v31.s }[2], [x20]\n"
+ "mov x20, x10\n"
+ "st1 { v31.s }[2], [x20], x11\n"
+ "mov x19, x24\n"
+ "st1 { v30.s }[2], [x20]\n"
+ "st1 { v29.s }[2], [x19], x11\n"
+ "st1 { v28.s }[2], [x19]\n"
"b 30f\n"
"29:" // Tile loop: Oddments: Store: Bit 1: Unset
- "mov x21, x12\n"
- "mov x20, x25\n"
- "st1 { v28.s }[0], [x21], x14\n"
- "st1 { v30.s }[0], [x20], x14\n"
- "st1 { v29.s }[0], [x21]\n"
- "st1 { v31.s }[0], [x20]\n"
+ "mov x20, x10\n"
+ "st1 { v31.s }[0], [x20], x11\n"
+ "mov x19, x24\n"
+ "st1 { v30.s }[0], [x20]\n"
+ "st1 { v29.s }[0], [x19], x11\n"
+ "st1 { v28.s }[0], [x19]\n"
"30:" // Tile loop: Oddments: Store: Bit 1: End
"31:" // Tile loop: End
- "ldr x22, [%x[params_struct], %[offsetof_args_tile_j]]\n"
- "ldr x23, [%x[params_struct], %[offsetof_args_tile_i]]\n"
- "add x22, x22, #0x1\n"
- "add x21, x23, #0x1\n"
- "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
- "cmp x22, x20\n"
+ "ldr x17, [%x[params_struct], %[offsetof_args_tile_i]]\n"
+ "add x21, x17, #0x1\n"
+ "ldr x16, [%x[params_struct], %[offsetof_args_tile_j]]\n"
"ldr x20, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
- "csel x23, x23, x21, LT\n"
- "csel x22, x22, XZR, LT\n"
- "cmp x23, x20\n"
+ "add x16, x16, #0x1\n"
+ "ldr x19, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
+ "cmp x16, x19\n"
+ "csel x16, x16, XZR, LT\n"
+ "csel x17, x17, x21, LT\n"
+ "cmp x17, x20\n"
"blt 1b\n"
:
: [n_channels] "r" ((unsigned long) n_channels), [offsetof_args_inptr] "I" (offsetof(Args, inptr)), [offsetof_args_ld_input_col] "I" (offsetof(Args, ld_input_col)), [offsetof_args_ld_input_row] "I" (offsetof(Args, ld_input_row)), [offsetof_args_ld_output_col] "I" (offsetof(Args, ld_output_col)), [offsetof_args_ld_output_row] "I" (offsetof(Args, ld_output_row)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_n_tile_cols] "I" (offsetof(Args, n_tile_cols)), [offsetof_args_n_tile_rows] "I" (offsetof(Args, n_tile_rows)), [offsetof_args_outptr] "I" (offsetof(Args, outptr)), [offsetof_args_params] "I" (offsetof(Args, params)), [offsetof_args_tile_i] "I" (offsetof(Args, tile_i)), [offsetof_args_tile_j] "I" (offsetof(Args, tile_j)), [params_struct] "r" (&params_struct)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v16", "v17", "v18", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v16", "v17", "v18", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_indirect.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_indirect.cpp
index 56e9ed2e1b..42931fba17 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_indirect.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_indirect.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -79,283 +79,283 @@ void a64_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst_indirect_impl(
__asm__ __volatile__(
"ldr x21, [%x[params_struct], %[offsetof_args_outptrs]]\n"
- "mov x16, #0x10\n" // cntb _, ALL, #1
- "lsr x15, %x[n_channels], #0x2\n"
- "ldr x14, [%x[params_struct], %[offsetof_args_params]]\n"
+ "add x16, %x[params_struct], %[offsetof_Args_inptrs]\n"
+ "ldr x15, [%x[params_struct], %[offsetof_args_params]]\n"
"add x20, %x[params_struct], %[offsetof_args_min]\n"
+ "add x19, %x[params_struct], %[offsetof_args_max]\n"
"ld1r { v18.4s }, [x20]\n"
- "add x20, %x[params_struct], %[offsetof_args_max]\n"
- "ld1r { v17.4s }, [x20]\n"
- "add x13, %x[params_struct], %[offsetof_Args_inptrs]\n"
- "ldp x12, x11, [x21, #0x0]\n"
+ "ld1r { v17.4s }, [x19]\n"
+ "mov x14, #0x0\n"
+ "ldp x13, x12, [x21, #0x0]\n"
+ "mov x11, #0x10\n" // cntb _, ALL, #1
"ldp x10, x9, [x21, #0x10]\n"
- "mov x28, #0x0\n"
- "sub x27, XZR, x16\n"
- "cbz x15, 3f\n"
- "ldr q16, [x14, #0x0]\n"
- "ldr q0, [x14, #0x10]\n"
- "cmp x16, x15, LSL #4\n"
- "ldr q1, [x14, #0x20]\n"
- "ldr q2, [x14, #0x30]\n"
- "ldr q3, [x14, #0x40]\n"
- "ldr q4, [x14, #0x50]\n"
- "ldr q5, [x14, #0x60]\n"
- "ldr q6, [x14, #0x70]\n"
- "ldr q7, [x14, #0x80]\n"
- "ldr q8, [x14, #0x90]\n"
- "add x14, x14, #0xa0\n"
- "ldp x26, x22, [x13, #0x0]\n"
- "ldr q9, [x26, x28]\n"
- "ldr q10, [x22, x28]\n"
- "ldp x25, x24, [x13, #0x10]\n"
- "ldr q11, [x25, x28]\n"
- "ldr q12, [x24, x28]\n"
- "ldr x23, [x13, #0x20]\n"
- "ldr q13, [x23, x28]\n"
+ "sub x28, XZR, x11\n"
+ "lsr x27, %x[n_channels], #0x2\n"
+ "cbz x27, 3f\n"
+ "ldr q16, [x15, #0x0]\n"
+ "ldr q0, [x15, #0x10]\n"
+ "cmp x11, x27, LSL #4\n"
+ "ldr q1, [x15, #0x20]\n"
+ "ldr q2, [x15, #0x30]\n"
+ "ldr q3, [x15, #0x40]\n"
+ "ldr q4, [x15, #0x50]\n"
+ "ldr q5, [x15, #0x60]\n"
+ "ldr q6, [x15, #0x70]\n"
+ "ldr q7, [x15, #0x80]\n"
+ "ldr q8, [x15, #0x90]\n"
+ "add x15, x15, #0xa0\n"
+ "ldp x26, x25, [x16, #0x0]\n"
+ "ldp x24, x23, [x16, #0x10]\n"
+ "ldr x22, [x16, #0x20]\n"
+ "ldr q9, [x26, x14]\n"
+ "ldr q10, [x25, x14]\n"
+ "ldr q11, [x24, x14]\n"
+ "ldr q12, [x23, x14]\n"
+ "ldr q13, [x22, x14]\n"
"bge 2f\n"
"1:" // Channel loop
- "mov v28.16b, v16.16b\n fmla v28.4s, v4.4s, v9.4s\n"
- "mov v29.16b, v16.16b\n fmla v29.4s, v3.4s, v9.4s\n"
- "ldr x22, [x13, #0x28]\n"
- "ldr x21, [x13, #0x30]\n"
- "mov v30.16b, v16.16b\n fmla v30.4s, v1.4s, v9.4s\n"
- "mov v31.16b, v16.16b\n fmla v31.4s, v0.4s, v9.4s\n"
- "ldr q9, [x22, x28]\n"
- "ldr q16, [x14, #0x0]\n"
- "fmla v28.4s, v0.4s, v10.4s\n"
- "fmla v29.4s, v2.4s, v11.4s\n"
- "ldr q11, [x21, x28]\n"
- "ldr x20, [x13, #0x38]\n"
- "fmla v30.4s, v2.4s, v12.4s\n"
- "fmla v31.4s, v1.4s, v12.4s\n"
- "ldr x22, [x13, #0x48]\n"
- "ldr q10, [x22, x28]\n"
- "fmla v28.4s, v5.4s, v12.4s\n"
- "fmla v29.4s, v4.4s, v12.4s\n"
- "ldr q12, [x20, x28]\n"
- "ldr x26, [x13, #0x40]\n"
- "fmla v30.4s, v6.4s, v9.4s\n"
- "ldr q9, [x26, x28]\n"
- "fmla v31.4s, v3.4s, v13.4s\n"
- "ldr x25, [x13, #0x50]\n"
- "fmla v28.4s, v7.4s, v13.4s\n"
- "fmla v29.4s, v6.4s, v13.4s\n"
- "ldr x24, [x13, #0x58]\n"
- "ldr x23, [x13, #0x60]\n"
- "fmla v30.4s, v4.4s, v13.4s\n"
- "fmla v31.4s, v8.4s, v11.4s\n"
- "ldr q11, [x25, x28]\n"
- "ldr x22, [x13, #0x68]\n"
+ "mov v31.16b, v16.16b\n fmla v31.4s, v4.4s, v9.4s\n"
+ "ldr x21, [x16, #0x28]\n"
+ "add x28, x28, #0x10\n"
+ "mov v30.16b, v16.16b\n fmla v30.4s, v3.4s, v9.4s\n"
+ "ldr x20, [x16, #0x30]\n"
+ "mov v29.16b, v16.16b\n fmla v29.4s, v1.4s, v9.4s\n"
+ "ldr x19, [x16, #0x38]\n"
+ "mov v28.16b, v16.16b\n fmla v28.4s, v0.4s, v9.4s\n"
+ "ldr q9, [x21, x14]\n"
+ "ldr x26, [x16, #0x40]\n"
+ "fmla v31.4s, v0.4s, v10.4s\n"
+ "ldr x25, [x16, #0x48]\n"
+ "fmla v30.4s, v2.4s, v11.4s\n"
+ "fmla v29.4s, v2.4s, v12.4s\n"
+ "ldr q11, [x20, x14]\n"
"fmla v28.4s, v1.4s, v12.4s\n"
- "fmla v29.4s, v0.4s, v12.4s\n"
- "ldr q12, [x24, x28]\n"
- "ldr x21, [x13, #0x70]\n"
- "fmla v30.4s, v5.4s, v10.4s\n"
- "fmla v31.4s, v4.4s, v10.4s\n"
- "ldr q4, [x14, #0x50]\n"
- "ldr x20, [x13, #0x78]\n"
- "fmla v28.4s, v2.4s, v9.4s\n"
- "fmla v29.4s, v1.4s, v9.4s\n"
- "ldr q9, [x23, x28]\n"
- "ldr q1, [x14, #0x20]\n"
- "fmla v30.4s, v0.4s, v11.4s\n"
- "ldr q0, [x14, #0x10]\n"
- "fmla v31.4s, v2.4s, v12.4s\n"
- "ldr q2, [x14, #0x30]\n"
- "fmla v28.4s, v8.4s, v10.4s\n"
- "fmla v29.4s, v7.4s, v10.4s\n"
- "ldr q10, [x22, x28]\n"
- "ldp x26, x22, [x13, #0x0]\n"
- "fmla v30.4s, v3.4s, v9.4s\n"
- "fmla v31.4s, v5.4s, v10.4s\n"
- "ldp x25, x24, [x13, #0x10]\n"
- "ldr x23, [x13, #0x20]\n"
- "ldr q13, [x23, x16]\n"
- "fmla v28.4s, v3.4s, v11.4s\n"
- "ldr q11, [x21, x28]\n"
- "fmla v29.4s, v5.4s, v12.4s\n"
- "ldr q12, [x20, x28]\n"
- "ldr q3, [x14, #0x40]\n"
- "fmla v30.4s, v7.4s, v11.4s\n"
- "fmla v31.4s, v6.4s, v11.4s\n"
- "ldr q11, [x25, x16]\n"
- "ldr q5, [x14, #0x60]\n"
- "fmla v28.4s, v6.4s, v9.4s\n"
- "fmla v29.4s, v8.4s, v10.4s\n"
- "ldr q9, [x26, x16]\n"
- "ldr q10, [x22, x16]\n"
- "fmla v30.4s, v8.4s, v12.4s\n"
- "fmla v31.4s, v7.4s, v12.4s\n"
- "ldr q12, [x24, x16]\n"
- "ldr q6, [x14, #0x70]\n"
- "fmax v28.4s, v28.4s, v18.4s\n"
- "fmax v29.4s, v29.4s, v18.4s\n"
- "ldr q7, [x14, #0x80]\n"
- "ldr q8, [x14, #0x90]\n"
- "fmax v30.4s, v30.4s, v18.4s\n"
+ "ldr q10, [x25, x14]\n"
+ "ldr x24, [x16, #0x50]\n"
+ "fmla v31.4s, v5.4s, v12.4s\n"
+ "ldr x23, [x16, #0x58]\n"
+ "fmla v30.4s, v4.4s, v12.4s\n"
+ "fmla v29.4s, v6.4s, v9.4s\n"
+ "ldr q12, [x19, x14]\n"
+ "fmla v28.4s, v3.4s, v13.4s\n"
+ "ldr q9, [x26, x14]\n"
+ "ldr x22, [x16, #0x60]\n"
+ "fmla v31.4s, v7.4s, v13.4s\n"
+ "ldr x21, [x16, #0x68]\n"
+ "fmla v30.4s, v6.4s, v13.4s\n"
+ "fmla v29.4s, v4.4s, v13.4s\n"
+ "ldr x20, [x16, #0x70]\n"
+ "fmla v28.4s, v8.4s, v11.4s\n"
+ "ldr q11, [x24, x14]\n"
+ "ldr x19, [x16, #0x78]\n"
+ "fmla v31.4s, v1.4s, v12.4s\n"
+ "ldp x26, x25, [x16, #0x0]\n"
+ "fmla v30.4s, v0.4s, v12.4s\n"
+ "fmla v29.4s, v5.4s, v10.4s\n"
+ "ldr q12, [x23, x14]\n"
+ "fmla v28.4s, v4.4s, v10.4s\n"
+ "ldp x24, x23, [x16, #0x10]\n"
+ "ldr q16, [x15, #0x0]\n"
+ "fmla v31.4s, v2.4s, v9.4s\n"
+ "ldr q4, [x15, #0x50]\n"
+ "fmla v30.4s, v1.4s, v9.4s\n"
+ "fmla v29.4s, v0.4s, v11.4s\n"
+ "ldr q9, [x22, x14]\n"
+ "fmla v28.4s, v2.4s, v12.4s\n"
+ "ldr x22, [x16, #0x20]\n"
+ "ldr q0, [x15, #0x10]\n"
+ "fmla v31.4s, v8.4s, v10.4s\n"
+ "ldr q1, [x15, #0x20]\n"
+ "fmla v30.4s, v7.4s, v10.4s\n"
+ "ldr q10, [x21, x14]\n"
+ "fmla v29.4s, v3.4s, v9.4s\n"
+ "ldr q13, [x22, x11]\n"
+ "fmla v31.4s, v3.4s, v11.4s\n"
+ "ldr q11, [x20, x14]\n"
+ "ldr q2, [x15, #0x30]\n"
+ "fmla v30.4s, v5.4s, v12.4s\n"
+ "fmla v28.4s, v5.4s, v10.4s\n"
+ "ldr q12, [x19, x14]\n"
+ "add x14, x14, #0x10\n"
+ "fmla v31.4s, v6.4s, v9.4s\n"
+ "ldr q9, [x26, x11]\n"
+ "fmla v29.4s, v7.4s, v11.4s\n"
+ "ldr q3, [x15, #0x40]\n"
+ "fmla v30.4s, v8.4s, v10.4s\n"
+ "ldr q10, [x25, x11]\n"
+ "fmla v28.4s, v6.4s, v11.4s\n"
+ "ldr q11, [x24, x11]\n"
+ "ldr q5, [x15, #0x60]\n"
+ "fmla v29.4s, v8.4s, v12.4s\n"
"fmax v31.4s, v31.4s, v18.4s\n"
- "add x16, x16, #0x10\n"
- "add x27, x27, #0x10\n"
- "fmin v28.4s, v28.4s, v17.4s\n"
- "fmin v29.4s, v29.4s, v17.4s\n"
- "cmp x16, x15, LSL #4\n"
- "fmin v30.4s, v30.4s, v17.4s\n"
+ "ldr q6, [x15, #0x70]\n"
+ "fmax v30.4s, v30.4s, v18.4s\n"
+ "ldr q8, [x15, #0x90]\n"
+ "fmla v28.4s, v7.4s, v12.4s\n"
+ "ldr q12, [x23, x11]\n"
+ "add x11, x11, #0x10\n"
"fmin v31.4s, v31.4s, v17.4s\n"
- "add x28, x28, #0x10\n"
- "str q28, [x12, x27]\n"
- "add x14, x14, #0xa0\n"
- "str q29, [x11, x27]\n"
- "str q30, [x10, x27]\n"
- "str q31, [x9, x27]\n"
+ "ldr q7, [x15, #0x80]\n"
+ "cmp x11, x27, LSL #4\n"
+ "fmin v30.4s, v30.4s, v17.4s\n"
+ "str q31, [x13, x28]\n"
+ "fmax v29.4s, v29.4s, v18.4s\n"
+ "add x15, x15, #0xa0\n"
+ "fmax v28.4s, v28.4s, v18.4s\n"
+ "str q30, [x12, x28]\n"
+ "fmin v29.4s, v29.4s, v17.4s\n"
+ "str q29, [x10, x28]\n"
+ "fmin v28.4s, v28.4s, v17.4s\n"
+ "str q28, [x9, x28]\n"
"blt 1b\n"
"2:" // Channel tail
- "mov v28.16b, v16.16b\n fmla v28.4s, v4.4s, v9.4s\n"
- "mov v29.16b, v16.16b\n fmla v29.4s, v3.4s, v9.4s\n"
- "ldr x22, [x13, #0x28]\n"
- "ldr x21, [x13, #0x30]\n"
- "mov v30.16b, v16.16b\n fmla v30.4s, v1.4s, v9.4s\n"
- "mov v31.16b, v16.16b\n fmla v31.4s, v0.4s, v9.4s\n"
- "ldr q9, [x22, x28]\n"
- "ldr x20, [x13, #0x38]\n"
- "fmla v28.4s, v0.4s, v10.4s\n"
- "fmla v29.4s, v2.4s, v11.4s\n"
- "ldr q11, [x21, x28]\n"
- "ldr x22, [x13, #0x48]\n"
- "ldr q10, [x22, x28]\n"
- "fmla v30.4s, v2.4s, v12.4s\n"
- "fmla v31.4s, v1.4s, v12.4s\n"
- "ldr x26, [x13, #0x40]\n"
- "fmla v28.4s, v5.4s, v12.4s\n"
- "fmla v29.4s, v4.4s, v12.4s\n"
- "ldr q12, [x20, x28]\n"
- "ldr x25, [x13, #0x50]\n"
- "fmla v30.4s, v6.4s, v9.4s\n"
- "ldr q9, [x26, x28]\n"
- "fmla v31.4s, v3.4s, v13.4s\n"
- "ldr x24, [x13, #0x58]\n"
- "fmla v28.4s, v7.4s, v13.4s\n"
- "fmla v29.4s, v6.4s, v13.4s\n"
- "ldr x23, [x13, #0x60]\n"
- "ldr x22, [x13, #0x68]\n"
- "fmla v30.4s, v4.4s, v13.4s\n"
- "fmla v31.4s, v8.4s, v11.4s\n"
- "ldr q11, [x25, x28]\n"
- "ldr x21, [x13, #0x70]\n"
- "fmla v28.4s, v1.4s, v12.4s\n"
- "fmla v29.4s, v0.4s, v12.4s\n"
- "ldr q12, [x24, x28]\n"
- "ldr x20, [x13, #0x78]\n"
- "fmla v30.4s, v5.4s, v10.4s\n"
- "fmla v31.4s, v4.4s, v10.4s\n"
- "add x27, x27, #0x10\n"
- "fmla v28.4s, v2.4s, v9.4s\n"
- "fmla v29.4s, v1.4s, v9.4s\n"
- "ldr q9, [x23, x28]\n"
- "fmla v30.4s, v0.4s, v11.4s\n"
- "fmla v31.4s, v2.4s, v12.4s\n"
- "fmla v28.4s, v8.4s, v10.4s\n"
- "fmla v29.4s, v7.4s, v10.4s\n"
- "ldr q10, [x22, x28]\n"
- "fmla v30.4s, v3.4s, v9.4s\n"
- "fmla v31.4s, v5.4s, v10.4s\n"
- "fmla v28.4s, v3.4s, v11.4s\n"
- "ldr q11, [x21, x28]\n"
- "fmla v29.4s, v5.4s, v12.4s\n"
- "ldr q12, [x20, x28]\n"
- "fmla v30.4s, v7.4s, v11.4s\n"
- "fmla v31.4s, v6.4s, v11.4s\n"
+ "mov v31.16b, v16.16b\n fmla v31.4s, v4.4s, v9.4s\n"
+ "ldr x21, [x16, #0x28]\n"
"add x28, x28, #0x10\n"
- "fmla v28.4s, v6.4s, v9.4s\n"
- "fmla v29.4s, v8.4s, v10.4s\n"
- "fmax v28.4s, v28.4s, v18.4s\n"
- "fmla v30.4s, v8.4s, v12.4s\n"
- "fmla v31.4s, v7.4s, v12.4s\n"
- "fmax v29.4s, v29.4s, v18.4s\n"
- "fmax v30.4s, v30.4s, v18.4s\n"
+ "mov v30.16b, v16.16b\n fmla v30.4s, v3.4s, v9.4s\n"
+ "ldr x20, [x16, #0x30]\n"
+ "mov v29.16b, v16.16b\n fmla v29.4s, v1.4s, v9.4s\n"
+ "ldr x19, [x16, #0x38]\n"
+ "mov v28.16b, v16.16b\n fmla v28.4s, v0.4s, v9.4s\n"
+ "ldr q9, [x21, x14]\n"
+ "ldr x26, [x16, #0x40]\n"
+ "fmla v31.4s, v0.4s, v10.4s\n"
+ "ldr x25, [x16, #0x48]\n"
+ "fmla v30.4s, v2.4s, v11.4s\n"
+ "fmla v29.4s, v2.4s, v12.4s\n"
+ "ldr q11, [x20, x14]\n"
+ "fmla v28.4s, v1.4s, v12.4s\n"
+ "ldr q10, [x25, x14]\n"
+ "ldr x24, [x16, #0x50]\n"
+ "fmla v31.4s, v5.4s, v12.4s\n"
+ "ldr x23, [x16, #0x58]\n"
+ "fmla v30.4s, v4.4s, v12.4s\n"
+ "fmla v29.4s, v6.4s, v9.4s\n"
+ "ldr q12, [x19, x14]\n"
+ "fmla v28.4s, v3.4s, v13.4s\n"
+ "ldr q9, [x26, x14]\n"
+ "ldr x22, [x16, #0x60]\n"
+ "fmla v31.4s, v7.4s, v13.4s\n"
+ "ldr x21, [x16, #0x68]\n"
+ "fmla v30.4s, v6.4s, v13.4s\n"
+ "fmla v29.4s, v4.4s, v13.4s\n"
+ "ldr x20, [x16, #0x70]\n"
+ "fmla v28.4s, v8.4s, v11.4s\n"
+ "ldr q11, [x24, x14]\n"
+ "ldr x19, [x16, #0x78]\n"
+ "fmla v31.4s, v1.4s, v12.4s\n"
+ "fmla v30.4s, v0.4s, v12.4s\n"
+ "ldr q12, [x23, x14]\n"
+ "fmla v29.4s, v5.4s, v10.4s\n"
+ "fmla v28.4s, v4.4s, v10.4s\n"
+ "fmla v31.4s, v2.4s, v9.4s\n"
+ "fmla v30.4s, v1.4s, v9.4s\n"
+ "ldr q9, [x22, x14]\n"
+ "fmla v29.4s, v0.4s, v11.4s\n"
+ "fmla v28.4s, v2.4s, v12.4s\n"
+ "fmla v31.4s, v8.4s, v10.4s\n"
+ "fmla v30.4s, v7.4s, v10.4s\n"
+ "ldr q10, [x21, x14]\n"
+ "fmla v29.4s, v3.4s, v9.4s\n"
+ "fmla v31.4s, v3.4s, v11.4s\n"
+ "ldr q11, [x20, x14]\n"
+ "fmla v30.4s, v5.4s, v12.4s\n"
+ "ldr q12, [x19, x14]\n"
+ "add x14, x14, #0x10\n"
+ "fmla v28.4s, v5.4s, v10.4s\n"
+ "fmla v31.4s, v6.4s, v9.4s\n"
+ "fmla v29.4s, v7.4s, v11.4s\n"
+ "fmla v30.4s, v8.4s, v10.4s\n"
+ "fmla v28.4s, v6.4s, v11.4s\n"
+ "fmla v29.4s, v8.4s, v12.4s\n"
"fmax v31.4s, v31.4s, v18.4s\n"
- "fmin v28.4s, v28.4s, v17.4s\n"
- "fmin v29.4s, v29.4s, v17.4s\n"
- "str q28, [x12, x27]\n"
- "fmin v30.4s, v30.4s, v17.4s\n"
+ "fmax v30.4s, v30.4s, v18.4s\n"
+ "fmla v28.4s, v7.4s, v12.4s\n"
"fmin v31.4s, v31.4s, v17.4s\n"
- "str q29, [x11, x27]\n"
- "str q30, [x10, x27]\n"
- "str q31, [x9, x27]\n"
+ "str q31, [x13, x28]\n"
+ "fmin v30.4s, v30.4s, v17.4s\n"
+ "fmax v29.4s, v29.4s, v18.4s\n"
+ "str q30, [x12, x28]\n"
+ "fmin v29.4s, v29.4s, v17.4s\n"
+ "fmax v28.4s, v28.4s, v18.4s\n"
+ "str q29, [x10, x28]\n"
+ "fmin v28.4s, v28.4s, v17.4s\n"
+ "str q28, [x9, x28]\n"
"3:" // Oddments
"tst %x[n_channels], #0x3\n"
"beq 30f\n"
- "ldr q16, [x14, #0x0]\n"
- "ldr q0, [x14, #0x10]\n"
- "mov x27, x28\n"
- "add x12, x12, x27\n"
- "ldr q1, [x14, #0x20]\n"
- "ldr q2, [x14, #0x30]\n"
- "add x11, x11, x27\n"
- "add x10, x10, x27\n"
- "ldr q3, [x14, #0x40]\n"
- "ldr q4, [x14, #0x50]\n"
- "add x9, x9, x27\n"
- "ldr q5, [x14, #0x60]\n"
- "ldr q6, [x14, #0x70]\n"
- "ldr q7, [x14, #0x80]\n"
- "ldr q8, [x14, #0x90]\n"
- "ldr x24, [x13, #0x0]\n"
- "ldr x23, [x13, #0x8]\n"
- "add x24, x24, x28\n"
- "add x23, x23, x28\n"
- "ldr x22, [x13, #0x10]\n"
- "ldr x21, [x13, #0x18]\n"
- "add x22, x22, x28\n"
- "add x21, x21, x28\n"
- "ldr x20, [x13, #0x20]\n"
- "add x20, x20, x28\n"
+ "ldr q16, [x15, #0x0]\n"
+ "ldr q0, [x15, #0x10]\n"
+ "mov x28, x14\n"
+ "ldr q1, [x15, #0x20]\n"
+ "add x13, x13, x28\n"
+ "ldr q2, [x15, #0x30]\n"
+ "add x12, x12, x28\n"
+ "ldr q3, [x15, #0x40]\n"
+ "add x10, x10, x28\n"
+ "ldr q4, [x15, #0x50]\n"
+ "add x9, x9, x28\n"
+ "ldr q5, [x15, #0x60]\n"
+ "ldr q6, [x15, #0x70]\n"
+ "ldr q7, [x15, #0x80]\n"
+ "ldr q8, [x15, #0x90]\n"
+ "ldr x26, [x16, #0x0]\n"
+ "ldr x25, [x16, #0x8]\n"
+ "add x26, x26, x14\n"
+ "ldr x24, [x16, #0x10]\n"
+ "ldr x23, [x16, #0x18]\n"
+ "add x25, x25, x14\n"
+ "ldr x22, [x16, #0x20]\n"
+ "add x24, x24, x14\n"
+ "add x23, x23, x14\n"
+ "add x22, x22, x14\n"
"tbz %x[n_channels], #1, 4f\n"
- "ld1 { v9.d }[0], [x24], #0x8\n"
- "ld1 { v10.d }[0], [x23], #0x8\n"
- "ld1 { v11.d }[0], [x22], #0x8\n"
- "ld1 { v12.d }[0], [x21], #0x8\n"
- "ld1 { v13.d }[0], [x20], #0x8\n"
+ "ld1 { v9.d }[0], [x26], #0x8\n"
+ "ld1 { v10.d }[0], [x25], #0x8\n"
+ "ld1 { v11.d }[0], [x24], #0x8\n"
+ "ld1 { v12.d }[0], [x23], #0x8\n"
+ "ld1 { v13.d }[0], [x22], #0x8\n"
"tbz %x[n_channels], #0, 5f\n"
- "ld1 { v9.s }[2], [x24], #0x4\n"
- "ld1 { v10.s }[2], [x23], #0x4\n"
- "ld1 { v11.s }[2], [x22], #0x4\n"
- "ld1 { v12.s }[2], [x21], #0x4\n"
- "ld1 { v13.s }[2], [x20], #0x4\n"
+ "ld1 { v9.s }[2], [x26], #0x4\n"
+ "ld1 { v10.s }[2], [x25], #0x4\n"
+ "ld1 { v11.s }[2], [x24], #0x4\n"
+ "ld1 { v12.s }[2], [x23], #0x4\n"
+ "ld1 { v13.s }[2], [x22], #0x4\n"
"b 5f\n"
"4:" // Oddments: Load inputs (1, 1), (0, 0), (0, 3), (1, 2), (2, 1): Bit 1: Unset
- "ld1 { v9.s }[0], [x24], #0x4\n"
- "ld1 { v10.s }[0], [x23], #0x4\n"
- "ld1 { v11.s }[0], [x22], #0x4\n"
- "ld1 { v12.s }[0], [x21], #0x4\n"
- "ld1 { v13.s }[0], [x20], #0x4\n"
+ "ld1 { v9.s }[0], [x26], #0x4\n"
+ "ld1 { v10.s }[0], [x25], #0x4\n"
+ "ld1 { v11.s }[0], [x24], #0x4\n"
+ "ld1 { v12.s }[0], [x23], #0x4\n"
+ "ld1 { v13.s }[0], [x22], #0x4\n"
"5:" // Oddments: Load inputs (1, 1), (0, 0), (0, 3), (1, 2), (2, 1): Bit 1: End
- "mov v28.16b, v16.16b\n fmla v28.4s, v4.4s, v9.4s\n"
- "mov v29.16b, v16.16b\n fmla v29.4s, v3.4s, v9.4s\n"
- "ldr x20, [x13, #0x28]\n"
- "add x20, x20, x28\n"
- "mov v30.16b, v16.16b\n fmla v30.4s, v1.4s, v9.4s\n"
- "mov v31.16b, v16.16b\n fmla v31.4s, v0.4s, v9.4s\n"
- "fmla v28.4s, v0.4s, v10.4s\n"
- "fmla v29.4s, v2.4s, v11.4s\n"
- "fmla v28.4s, v5.4s, v12.4s\n"
- "fmla v29.4s, v4.4s, v12.4s\n"
- "fmla v30.4s, v2.4s, v12.4s\n"
- "fmla v31.4s, v1.4s, v12.4s\n"
+ "mov v31.16b, v16.16b\n fmla v31.4s, v4.4s, v9.4s\n"
+ "ldr x21, [x16, #0x28]\n"
+ "add x21, x21, x14\n"
+ "mov v30.16b, v16.16b\n fmla v30.4s, v3.4s, v9.4s\n"
+ "mov v29.16b, v16.16b\n fmla v29.4s, v1.4s, v9.4s\n"
+ "mov v28.16b, v16.16b\n fmla v28.4s, v0.4s, v9.4s\n"
+ "fmla v31.4s, v0.4s, v10.4s\n"
+ "fmla v30.4s, v2.4s, v11.4s\n"
+ "fmla v29.4s, v2.4s, v12.4s\n"
+ "fmla v28.4s, v1.4s, v12.4s\n"
+ "fmla v31.4s, v5.4s, v12.4s\n"
+ "fmla v30.4s, v4.4s, v12.4s\n"
"tbz %x[n_channels], #1, 6f\n"
- "ld1 { v9.d }[0], [x20], #0x8\n"
+ "ld1 { v9.d }[0], [x21], #0x8\n"
"tbz %x[n_channels], #0, 7f\n"
- "ld1 { v9.s }[2], [x20], #0x4\n"
+ "ld1 { v9.s }[2], [x21], #0x4\n"
"b 7f\n"
"6:" // Oddments: Load input (3, 0): Bit 1: Unset
- "ld1 { v9.s }[0], [x20], #0x4\n"
+ "ld1 { v9.s }[0], [x21], #0x4\n"
"7:" // Oddments: Load input (3, 0): Bit 1: End
- "fmla v30.4s, v6.4s, v9.4s\n"
- "ldr x20, [x13, #0x30]\n"
- "fmla v28.4s, v7.4s, v13.4s\n"
- "add x20, x20, x28\n"
- "fmla v29.4s, v6.4s, v13.4s\n"
- "fmla v30.4s, v4.4s, v13.4s\n"
- "fmla v31.4s, v3.4s, v13.4s\n"
+ "fmla v29.4s, v6.4s, v9.4s\n"
+ "ldr x20, [x16, #0x30]\n"
+ "fmla v31.4s, v7.4s, v13.4s\n"
+ "add x20, x20, x14\n"
+ "fmla v30.4s, v6.4s, v13.4s\n"
+ "fmla v28.4s, v3.4s, v13.4s\n"
+ "fmla v29.4s, v4.4s, v13.4s\n"
"tbz %x[n_channels], #1, 8f\n"
"ld1 { v11.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #0, 9f\n"
@@ -364,95 +364,95 @@ void a64_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst_indirect_impl(
"8:" // Oddments: Load input (3, 3): Bit 1: Unset
"ld1 { v11.s }[0], [x20], #0x4\n"
"9:" // Oddments: Load input (3, 3): Bit 1: End
- "ldr x20, [x13, #0x38]\n"
- "fmla v31.4s, v8.4s, v11.4s\n"
- "add x20, x20, x28\n"
+ "fmla v28.4s, v8.4s, v11.4s\n"
+ "ldr x19, [x16, #0x38]\n"
+ "add x19, x19, x14\n"
"tbz %x[n_channels], #1, 10f\n"
- "ld1 { v12.d }[0], [x20], #0x8\n"
+ "ld1 { v12.d }[0], [x19], #0x8\n"
"tbz %x[n_channels], #0, 11f\n"
- "ld1 { v12.s }[2], [x20], #0x4\n"
+ "ld1 { v12.s }[2], [x19], #0x4\n"
"b 11f\n"
"10:" // Oddments: Load input (0, 1): Bit 1: Unset
- "ld1 { v12.s }[0], [x20], #0x4\n"
+ "ld1 { v12.s }[0], [x19], #0x4\n"
"11:" // Oddments: Load input (0, 1): Bit 1: End
- "ldr x20, [x13, #0x40]\n"
- "fmla v28.4s, v1.4s, v12.4s\n"
- "fmla v29.4s, v0.4s, v12.4s\n"
- "add x20, x20, x28\n"
+ "fmla v31.4s, v1.4s, v12.4s\n"
+ "ldr x26, [x16, #0x40]\n"
+ "fmla v30.4s, v0.4s, v12.4s\n"
+ "add x26, x26, x14\n"
"tbz %x[n_channels], #1, 12f\n"
- "ld1 { v9.d }[0], [x20], #0x8\n"
+ "ld1 { v9.d }[0], [x26], #0x8\n"
"tbz %x[n_channels], #0, 13f\n"
- "ld1 { v9.s }[2], [x20], #0x4\n"
+ "ld1 { v9.s }[2], [x26], #0x4\n"
"b 13f\n"
"12:" // Oddments: Load input (0, 2): Bit 1: Unset
- "ld1 { v9.s }[0], [x20], #0x4\n"
+ "ld1 { v9.s }[0], [x26], #0x4\n"
"13:" // Oddments: Load input (0, 2): Bit 1: End
- "ldr x20, [x13, #0x48]\n"
- "fmla v28.4s, v2.4s, v9.4s\n"
- "fmla v29.4s, v1.4s, v9.4s\n"
- "add x20, x20, x28\n"
+ "fmla v31.4s, v2.4s, v9.4s\n"
+ "ldr x25, [x16, #0x48]\n"
+ "fmla v30.4s, v1.4s, v9.4s\n"
+ "add x25, x25, x14\n"
"tbz %x[n_channels], #1, 14f\n"
- "ld1 { v10.d }[0], [x20], #0x8\n"
+ "ld1 { v10.d }[0], [x25], #0x8\n"
"tbz %x[n_channels], #0, 15f\n"
- "ld1 { v10.s }[2], [x20], #0x4\n"
+ "ld1 { v10.s }[2], [x25], #0x4\n"
"b 15f\n"
"14:" // Oddments: Load input (2, 2): Bit 1: Unset
- "ld1 { v10.s }[0], [x20], #0x4\n"
+ "ld1 { v10.s }[0], [x25], #0x4\n"
"15:" // Oddments: Load input (2, 2): Bit 1: End
- "ldr x20, [x13, #0x50]\n"
- "fmla v28.4s, v8.4s, v10.4s\n"
- "fmla v29.4s, v7.4s, v10.4s\n"
- "add x20, x20, x28\n"
- "fmla v30.4s, v5.4s, v10.4s\n"
- "fmla v31.4s, v4.4s, v10.4s\n"
+ "fmla v31.4s, v8.4s, v10.4s\n"
+ "ldr x24, [x16, #0x50]\n"
+ "fmla v30.4s, v7.4s, v10.4s\n"
+ "add x24, x24, x14\n"
+ "fmla v29.4s, v5.4s, v10.4s\n"
+ "fmla v28.4s, v4.4s, v10.4s\n"
"tbz %x[n_channels], #1, 16f\n"
- "ld1 { v11.d }[0], [x20], #0x8\n"
+ "ld1 { v11.d }[0], [x24], #0x8\n"
"tbz %x[n_channels], #0, 17f\n"
- "ld1 { v11.s }[2], [x20], #0x4\n"
+ "ld1 { v11.s }[2], [x24], #0x4\n"
"b 17f\n"
"16:" // Oddments: Load input (1, 0): Bit 1: Unset
- "ld1 { v11.s }[0], [x20], #0x4\n"
+ "ld1 { v11.s }[0], [x24], #0x4\n"
"17:" // Oddments: Load input (1, 0): Bit 1: End
- "ldr x20, [x13, #0x58]\n"
- "fmla v28.4s, v3.4s, v11.4s\n"
- "fmla v30.4s, v0.4s, v11.4s\n"
- "add x20, x20, x28\n"
+ "fmla v31.4s, v3.4s, v11.4s\n"
+ "ldr x23, [x16, #0x58]\n"
+ "fmla v29.4s, v0.4s, v11.4s\n"
+ "add x23, x23, x14\n"
"tbz %x[n_channels], #1, 18f\n"
- "ld1 { v12.d }[0], [x20], #0x8\n"
+ "ld1 { v12.d }[0], [x23], #0x8\n"
"tbz %x[n_channels], #0, 19f\n"
- "ld1 { v12.s }[2], [x20], #0x4\n"
+ "ld1 { v12.s }[2], [x23], #0x4\n"
"b 19f\n"
"18:" // Oddments: Load input (1, 3): Bit 1: Unset
- "ld1 { v12.s }[0], [x20], #0x4\n"
+ "ld1 { v12.s }[0], [x23], #0x4\n"
"19:" // Oddments: Load input (1, 3): Bit 1: End
- "ldr x20, [x13, #0x60]\n"
- "fmla v29.4s, v5.4s, v12.4s\n"
- "fmla v31.4s, v2.4s, v12.4s\n"
- "add x20, x20, x28\n"
+ "fmla v30.4s, v5.4s, v12.4s\n"
+ "ldr x22, [x16, #0x60]\n"
+ "fmla v28.4s, v2.4s, v12.4s\n"
+ "add x22, x22, x14\n"
"tbz %x[n_channels], #1, 20f\n"
- "ld1 { v9.d }[0], [x20], #0x8\n"
+ "ld1 { v9.d }[0], [x22], #0x8\n"
"tbz %x[n_channels], #0, 21f\n"
- "ld1 { v9.s }[2], [x20], #0x4\n"
+ "ld1 { v9.s }[2], [x22], #0x4\n"
"b 21f\n"
"20:" // Oddments: Load input (2, 0): Bit 1: Unset
- "ld1 { v9.s }[0], [x20], #0x4\n"
+ "ld1 { v9.s }[0], [x22], #0x4\n"
"21:" // Oddments: Load input (2, 0): Bit 1: End
- "ldr x20, [x13, #0x68]\n"
- "fmla v28.4s, v6.4s, v9.4s\n"
- "fmla v30.4s, v3.4s, v9.4s\n"
- "add x20, x20, x28\n"
+ "fmla v31.4s, v6.4s, v9.4s\n"
+ "ldr x21, [x16, #0x68]\n"
+ "fmla v29.4s, v3.4s, v9.4s\n"
+ "add x21, x21, x14\n"
"tbz %x[n_channels], #1, 22f\n"
- "ld1 { v10.d }[0], [x20], #0x8\n"
+ "ld1 { v10.d }[0], [x21], #0x8\n"
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v10.s }[2], [x20], #0x4\n"
+ "ld1 { v10.s }[2], [x21], #0x4\n"
"b 23f\n"
"22:" // Oddments: Load input (2, 3): Bit 1: Unset
- "ld1 { v10.s }[0], [x20], #0x4\n"
+ "ld1 { v10.s }[0], [x21], #0x4\n"
"23:" // Oddments: Load input (2, 3): Bit 1: End
- "ldr x20, [x13, #0x70]\n"
- "fmla v29.4s, v8.4s, v10.4s\n"
- "fmla v31.4s, v5.4s, v10.4s\n"
- "add x20, x20, x28\n"
+ "fmla v30.4s, v8.4s, v10.4s\n"
+ "ldr x20, [x16, #0x70]\n"
+ "fmla v28.4s, v5.4s, v10.4s\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #1, 24f\n"
"ld1 { v11.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #0, 25f\n"
@@ -461,49 +461,51 @@ void a64_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst_indirect_impl(
"24:" // Oddments: Load input (3, 1): Bit 1: Unset
"ld1 { v11.s }[0], [x20], #0x4\n"
"25:" // Oddments: Load input (3, 1): Bit 1: End
- "ldr x20, [x13, #0x78]\n"
- "fmla v30.4s, v7.4s, v11.4s\n"
- "fmla v31.4s, v6.4s, v11.4s\n"
- "add x20, x20, x28\n"
+ "fmla v29.4s, v7.4s, v11.4s\n"
+ "ldr x19, [x16, #0x78]\n"
+ "fmla v28.4s, v6.4s, v11.4s\n"
+ "add x19, x19, x14\n"
"tbz %x[n_channels], #1, 26f\n"
- "ld1 { v12.d }[0], [x20], #0x8\n"
+ "ld1 { v12.d }[0], [x19], #0x8\n"
"tbz %x[n_channels], #0, 27f\n"
- "ld1 { v12.s }[2], [x20], #0x4\n"
+ "ld1 { v12.s }[2], [x19], #0x4\n"
"b 27f\n"
"26:" // Oddments: Load input (3, 2): Bit 1: Unset
- "ld1 { v12.s }[0], [x20], #0x4\n"
+ "ld1 { v12.s }[0], [x19], #0x4\n"
"27:" // Oddments: Load input (3, 2): Bit 1: End
- "fmla v30.4s, v8.4s, v12.4s\n"
- "fmla v31.4s, v7.4s, v12.4s\n"
- "fmax v28.4s, v28.4s, v18.4s\n"
- "fmax v29.4s, v29.4s, v18.4s\n"
- "fmax v30.4s, v30.4s, v18.4s\n"
+ "fmla v29.4s, v8.4s, v12.4s\n"
+ "fmla v28.4s, v7.4s, v12.4s\n"
"fmax v31.4s, v31.4s, v18.4s\n"
- "fmin v28.4s, v28.4s, v17.4s\n"
- "fmin v29.4s, v29.4s, v17.4s\n"
- "fmin v30.4s, v30.4s, v17.4s\n"
+ "fmax v30.4s, v30.4s, v18.4s\n"
"fmin v31.4s, v31.4s, v17.4s\n"
+ "fmax v29.4s, v29.4s, v18.4s\n"
+ "fmin v30.4s, v30.4s, v17.4s\n"
+ "fmax v28.4s, v28.4s, v18.4s\n"
+ "fmin v29.4s, v29.4s, v17.4s\n"
+ "fmin v28.4s, v28.4s, v17.4s\n"
"tbz %x[n_channels], #1, 28f\n"
- "st1 { v28.d }[0], [x12], #0x8\n"
- "st1 { v29.d }[0], [x11], #0x8\n"
- "st1 { v30.d }[0], [x10], #0x8\n"
- "st1 { v31.d }[0], [x9], #0x8\n"
+ "st1 { v31.d }[0], [x13], #0x8\n"
+ "st1 { v30.d }[0], [x12], #0x8\n"
+ "st1 { v29.d }[0], [x10], #0x8\n"
+ "st1 { v28.d }[0], [x9], #0x8\n"
"tbz %x[n_channels], #0, 29f\n"
- "st1 { v28.s }[2], [x12], #0x4\n"
- "st1 { v29.s }[2], [x11], #0x4\n"
- "st1 { v30.s }[2], [x10], #0x4\n"
- "st1 { v31.s }[2], [x9], #0x4\n"
+ "st1 { v31.s }[2], [x13], #0x4\n"
+ "st1 { v30.s }[2], [x12], #0x4\n"
+ "st1 { v29.s }[2], [x10], #0x4\n"
+ "st1 { v28.s }[2], [x9], #0x4\n"
"b 29f\n"
"28:" // Oddments: Store: Bit 1: Unset
- "st1 { v28.s }[0], [x12], #0x4\n"
- "st1 { v29.s }[0], [x11], #0x4\n"
- "st1 { v30.s }[0], [x10], #0x4\n"
- "st1 { v31.s }[0], [x9], #0x4\n"
+ "st1 { v31.s }[0], [x13], #0x4\n"
+ "st1 { v30.s }[0], [x12], #0x4\n"
+ "st1 { v29.s }[0], [x10], #0x4\n"
+ "st1 { v28.s }[0], [x9], #0x4\n"
"29:" // Oddments: Store: Bit 1: End
+
"30:" // End
+
:
: [n_channels] "r" ((unsigned long) n_channels), [offsetof_Args_inptrs] "I" (offsetof(Args, inptrs)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_args_params] "I" (offsetof(Args, params)), [params_struct] "r" (&params_struct)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v16", "v17", "v18", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v16", "v17", "v18", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_direct.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_direct.cpp
index 620319bc7c..6d185e7274 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_direct.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_direct.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -87,739 +87,739 @@ void a64_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst_direct_impl(
);
__asm__ __volatile__(
- "mov x24, #0x0\n"
- "mov x23, #0x0\n"
+ "mov x7, #0x0\n"
+ "mov x8, #0x0\n"
"1:" // Tile loop
- "str x24, [%x[params_struct], %[offsetof_args_tile_i]]\n"
- "mov x27, #0x3\n"
+ "str x7, [%x[params_struct], %[offsetof_args_tile_i]]\n"
"mov x26, #0x3\n"
- "str x23, [%x[params_struct], %[offsetof_args_tile_j]]\n"
- "ldr x25, [%x[params_struct], %[offsetof_args_ld_input_row]]\n"
- "ldr x22, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
- "mul x21, x24, x25\n" // offset = tile_i * ld_input_row
- "ldr x8, [%x[params_struct], %[offsetof_args_ld_input_col]]\n"
- "ldr x17, [%x[params_struct], %[offsetof_args_ld_output_col]]\n"
- "mul x20, x24, x22\n" // offset = tile_i * ld_output_row
- "mov x24, #0x10\n" // cntb _, ALL, #1
- "madd x21, x23, x8, x21\n" // offset += tile_j * ld_input_col
- "ldr x16, [%x[params_struct], %[offsetof_args_inptr]]\n"
- "lsl x8, x8, #0x2\n"
- "ldr x15, [%x[params_struct], %[offsetof_args_outptr]]\n"
- "madd x20, x23, x17, x20\n" // offset += tile_j * ld_output_col
- "lsl x17, x17, #0x2\n"
- "lsr x23, %x[n_channels], #0x2\n"
- "ldr x14, [%x[params_struct], %[offsetof_args_params]]\n"
- "mul x21, x21, x27\n" // offset *= kernel_stride * output_size
- "add x16, x16, x21, LSL #2\n" // inptr[0] += offset * sizeof(float)
- "add x13, x16, x25, LSL #2\n"
- "mul x20, x20, x26\n" // offset *= output_tile_size
- "add x12, x13, x25, LSL #2\n"
- "add x11, x8, x8\n"
- "add x15, x15, x20, LSL #2\n" // outptrs[0] += offset * sizeof(float)
- "add x10, x12, x25, LSL #2\n"
- "add x9, x11, x8\n"
- "add x28, x15, x22, LSL #2\n"
- "add x20, %x[params_struct], %[offsetof_args_min]\n"
- "ld1r { v18.4s }, [x20]\n"
- "add x20, %x[params_struct], %[offsetof_args_max]\n"
- "ld1r { v17.4s }, [x20]\n"
- "add x27, x10, x25, LSL #2\n"
- "add x26, x9, x8\n"
- "add x25, x28, x22, LSL #2\n"
- "add x22, x17, x17\n"
- "mov x21, #0x0\n"
- "sub x20, XZR, x24\n"
- "cbz x23, 4f\n"
- "ldr q16, [x14, #0x0]\n"
- "ldr q0, [x14, #0x10]\n"
- "cmp x24, x23, LSL #4\n"
- "ldr q1, [x14, #0x20]\n"
- "ldr q2, [x14, #0x30]\n"
- "ldr q3, [x14, #0x40]\n"
- "ldr q4, [x14, #0x50]\n"
- "ldr q5, [x14, #0x60]\n"
- "ldr q6, [x14, #0x70]\n"
- "ldr q7, [x14, #0x80]\n"
- "ldr q8, [x14, #0x90]\n"
- "add x14, x14, #0xa0\n"
- "ldr q9, [x12, x11]\n"
- "ld1 { v10.4s }, [x16]\n"
- "ldr q11, [x16, x26]\n"
- "ld1 { v12.4s }, [x27]\n"
- "ldr q13, [x13, x11]\n"
+ "str x8, [%x[params_struct], %[offsetof_args_tile_j]]\n"
+ "mov x25, #0x3\n"
+ "ldr x17, [%x[params_struct], %[offsetof_args_params]]\n"
+ "add x24, %x[params_struct], %[offsetof_args_min]\n"
+ "ldr x22, [%x[params_struct], %[offsetof_args_ld_input_row]]\n"
+ "add x21, %x[params_struct], %[offsetof_args_max]\n"
+ "ldr x16, [%x[params_struct], %[offsetof_args_ld_input_col]]\n"
+ "mov x23, #0x0\n"
+ "ldr x15, [%x[params_struct], %[offsetof_args_inptr]]\n"
+ "mul x19, x7, x22\n" // offset = tile_i * ld_input_row
+ "ldr x20, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
+ "madd x19, x8, x16, x19\n" // offset += tile_j * ld_input_col
+ "ldr x14, [%x[params_struct], %[offsetof_args_ld_output_col]]\n"
+ "mul x19, x19, x26\n" // offset *= kernel_stride * output_size
+ "ldr x13, [%x[params_struct], %[offsetof_args_outptr]]\n"
+ "add x15, x15, x19, LSL #2\n" // inptr[0] += offset * sizeof(float)
+ "ld1r { v18.4s }, [x24]\n"
+ "add x12, x15, x22, LSL #2\n"
+ "ld1r { v17.4s }, [x21]\n"
+ "add x11, x12, x22, LSL #2\n"
+ "lsl x16, x16, #0x2\n"
+ "add x10, x11, x22, LSL #2\n"
+ "add x9, x10, x22, LSL #2\n"
+ "add x28, x16, x16\n"
+ "add x27, x28, x16\n"
+ "add x26, x27, x16\n"
+ "mul x19, x7, x20\n" // offset = tile_i * ld_output_row
+ "madd x19, x8, x14, x19\n" // offset += tile_j * ld_output_col
+ "mul x19, x19, x25\n" // offset *= output_tile_size
+ "add x13, x13, x19, LSL #2\n" // outptrs[0] += offset * sizeof(float)
+ "add x25, x13, x20, LSL #2\n"
+ "add x24, x25, x20, LSL #2\n"
+ "lsl x14, x14, #0x2\n"
+ "add x22, x14, x14\n"
+ "mov x21, #0x10\n" // cntb _, ALL, #1
+ "sub x20, XZR, x21\n"
+ "lsr x19, %x[n_channels], #0x2\n"
+ "cbz x19, 4f\n"
+ "ldr q16, [x17, #0x0]\n"
+ "ldr q0, [x17, #0x10]\n"
+ "cmp x21, x19, LSL #4\n"
+ "ldr q1, [x17, #0x20]\n"
+ "ldr q2, [x17, #0x30]\n"
+ "ldr q3, [x17, #0x40]\n"
+ "ldr q4, [x17, #0x50]\n"
+ "ldr q5, [x17, #0x60]\n"
+ "ldr q6, [x17, #0x70]\n"
+ "ldr q7, [x17, #0x80]\n"
+ "ldr q8, [x17, #0x90]\n"
+ "add x17, x17, #0xa0\n"
+ "ldr q9, [x11, x28]\n"
+ "ld1 { v10.4s }, [x15]\n"
+ "ldr q11, [x15, x26]\n"
+ "ld1 { v12.4s }, [x9]\n"
+ "ldr q13, [x12, x28]\n"
"bge 3f\n"
"2:" // Tile loop: Channel loop
- "mov v24.16b, v16.16b\n fmla v24.4s, v7.4s, v9.4s\n"
- "mov v23.16b, v16.16b\n fmla v23.4s, v8.4s, v9.4s\n"
- "add x24, x24, #0x10\n"
- "cmp x24, x23, LSL #4\n"
- "mov v25.16b, v16.16b\n fmla v25.4s, v6.4s, v9.4s\n"
- "fmla v24.4s, v4.4s, v13.4s\n"
+ "mov v31.16b, v16.16b\n fmla v31.4s, v8.4s, v9.4s\n"
"add x20, x20, #0x10\n"
+ "mov v30.16b, v16.16b\n fmla v30.4s, v7.4s, v9.4s\n"
+ "add x23, x23, #0x10\n"
+ "mov v29.16b, v16.16b\n fmla v29.4s, v6.4s, v9.4s\n"
"add x21, x21, #0x10\n"
- "mov v26.16b, v16.16b\n fmla v26.4s, v5.4s, v9.4s\n"
+ "mov v28.16b, v16.16b\n fmla v28.4s, v5.4s, v9.4s\n"
+ "cmp x21, x19, LSL #4\n"
"mov v27.16b, v16.16b\n fmla v27.4s, v4.4s, v9.4s\n"
- "mov v28.16b, v16.16b\n fmla v28.4s, v3.4s, v9.4s\n"
- "fmla v23.4s, v0.4s, v10.4s\n"
- "ldr q10, [x12, x9]\n"
- "fmla v25.4s, v2.4s, v11.4s\n"
- "ldr q11, [x12, x8]\n"
- "mov v29.16b, v16.16b\n fmla v29.4s, v2.4s, v9.4s\n"
- "fmla v24.4s, v6.4s, v11.4s\n"
- "mov v31.16b, v16.16b\n fmla v31.4s, v0.4s, v9.4s\n"
- "fmla v23.4s, v5.4s, v13.4s\n"
- "fmla v25.4s, v3.4s, v13.4s\n"
- "fmla v26.4s, v2.4s, v13.4s\n"
+ "mov v26.16b, v16.16b\n fmla v26.4s, v3.4s, v9.4s\n"
+ "mov v25.16b, v16.16b\n fmla v25.4s, v2.4s, v9.4s\n"
+ "mov v24.16b, v16.16b\n fmla v24.4s, v1.4s, v9.4s\n"
+ "mov v23.16b, v16.16b\n fmla v23.4s, v0.4s, v9.4s\n"
+ "ldr q16, [x17, #0x0]\n"
+ "fmla v31.4s, v0.4s, v10.4s\n"
+ "ldr q10, [x11, x27]\n"
+ "fmla v29.4s, v2.4s, v11.4s\n"
+ "ldr q11, [x11, x16]\n"
+ "fmla v25.4s, v6.4s, v12.4s\n"
+ "ldr q12, [x9, x26]\n"
+ "fmla v30.4s, v4.4s, v13.4s\n"
+ "fmla v31.4s, v5.4s, v13.4s\n"
+ "fmla v29.4s, v3.4s, v13.4s\n"
+ "fmla v28.4s, v2.4s, v13.4s\n"
"fmla v27.4s, v1.4s, v13.4s\n"
- "fmla v28.4s, v0.4s, v13.4s\n"
- "ldr q13, [x16, x8]\n"
- "fmla v29.4s, v6.4s, v12.4s\n"
- "ldr q12, [x27, x26]\n"
- "mov v30.16b, v16.16b\n fmla v30.4s, v1.4s, v9.4s\n"
- "ldr q16, [x14, #0x0]\n"
- "fmla v24.4s, v0.4s, v13.4s\n"
- "fmla v31.4s, v8.4s, v12.4s\n"
- "ldr q12, [x16, x9]\n"
- "fmla v23.4s, v7.4s, v11.4s\n"
- "fmla v30.4s, v0.4s, v11.4s\n"
- "fmla v26.4s, v4.4s, v11.4s\n"
+ "fmla v26.4s, v0.4s, v13.4s\n"
+ "ldr q13, [x15, x16]\n"
+ "fmla v23.4s, v8.4s, v12.4s\n"
+ "ldr q12, [x15, x27]\n"
+ "fmla v31.4s, v7.4s, v11.4s\n"
+ "fmla v30.4s, v6.4s, v11.4s\n"
+ "fmla v28.4s, v4.4s, v11.4s\n"
"fmla v27.4s, v3.4s, v11.4s\n"
- "fmla v29.4s, v1.4s, v11.4s\n"
- "ld1 { v11.4s }, [x13]\n"
- "fmla v24.4s, v2.4s, v12.4s\n"
- "fmla v25.4s, v1.4s, v12.4s\n"
- "ld1 { v12.4s }, [x10]\n"
- "fmla v28.4s, v4.4s, v10.4s\n"
- "fmla v23.4s, v1.4s, v13.4s\n"
- "ldr q13, [x13, x26]\n"
- "fmla v30.4s, v2.4s, v10.4s\n"
- "fmla v31.4s, v1.4s, v10.4s\n"
- "fmla v24.4s, v8.4s, v10.4s\n"
- "fmla v25.4s, v7.4s, v10.4s\n"
+ "fmla v25.4s, v1.4s, v11.4s\n"
+ "fmla v24.4s, v0.4s, v11.4s\n"
+ "ld1 { v11.4s }, [x12]\n"
+ "fmla v31.4s, v1.4s, v13.4s\n"
+ "fmla v30.4s, v0.4s, v13.4s\n"
+ "ldr q13, [x12, x26]\n"
+ "fmla v29.4s, v1.4s, v12.4s\n"
"fmla v27.4s, v5.4s, v10.4s\n"
- "ldr q10, [x10, x11]\n"
- "fmla v26.4s, v0.4s, v11.4s\n"
- "fmla v29.4s, v3.4s, v12.4s\n"
- "fmla v28.4s, v2.4s, v13.4s\n"
- "fmla v30.4s, v4.4s, v10.4s\n"
- "fmla v31.4s, v3.4s, v10.4s\n"
- "fmla v23.4s, v3.4s, v11.4s\n"
+ "fmla v26.4s, v4.4s, v10.4s\n"
+ "fmla v30.4s, v2.4s, v12.4s\n"
+ "ld1 { v12.4s }, [x10]\n"
+ "fmla v29.4s, v7.4s, v10.4s\n"
+ "fmla v24.4s, v2.4s, v10.4s\n"
+ "fmla v23.4s, v1.4s, v10.4s\n"
+ "fmla v30.4s, v8.4s, v10.4s\n"
+ "ldr q10, [x10, x28]\n"
+ "fmla v31.4s, v3.4s, v11.4s\n"
+ "fmla v28.4s, v0.4s, v11.4s\n"
"ldr q11, [x10, x26]\n"
- "fmla v25.4s, v5.4s, v13.4s\n"
- "ldr q13, [x27, x8]\n"
- "fmla v26.4s, v6.4s, v12.4s\n"
- "ldr q12, [x13, x8]\n"
+ "fmla v29.4s, v5.4s, v13.4s\n"
+ "fmla v26.4s, v2.4s, v13.4s\n"
+ "ldr q13, [x9, x16]\n"
+ "fmla v25.4s, v3.4s, v12.4s\n"
+ "fmla v28.4s, v6.4s, v12.4s\n"
+ "ldr q12, [x12, x16]\n"
"fmla v27.4s, v7.4s, v10.4s\n"
- "fmla v29.4s, v5.4s, v10.4s\n"
- "fmla v28.4s, v6.4s, v10.4s\n"
- "fmla v31.4s, v5.4s, v11.4s\n"
- "fmla v30.4s, v6.4s, v13.4s\n"
- "fmla v26.4s, v8.4s, v10.4s\n"
- "fmla v29.4s, v7.4s, v13.4s\n"
- "ldr q13, [x27, x9]\n"
- "fmla v24.4s, v3.4s, v12.4s\n"
+ "fmla v26.4s, v6.4s, v10.4s\n"
+ "fmla v25.4s, v5.4s, v10.4s\n"
+ "fmla v28.4s, v8.4s, v10.4s\n"
+ "fmla v24.4s, v4.4s, v10.4s\n"
+ "fmla v23.4s, v3.4s, v10.4s\n"
+ "fmla v26.4s, v8.4s, v11.4s\n"
+ "fmla v25.4s, v7.4s, v13.4s\n"
+ "fmla v24.4s, v6.4s, v13.4s\n"
+ "ldr q13, [x9, x27]\n"
+ "fmla v23.4s, v5.4s, v11.4s\n"
+ "ldr q11, [x12, x27]\n"
+ "add x12, x12, #0x10\n"
+ "fmla v31.4s, v4.4s, v12.4s\n"
+ "fmla v30.4s, v3.4s, v12.4s\n"
+ "fmla v28.4s, v1.4s, v12.4s\n"
"fmla v27.4s, v0.4s, v12.4s\n"
- "fmla v28.4s, v8.4s, v11.4s\n"
- "ldr q11, [x13, x9]\n"
- "fmla v30.4s, v8.4s, v13.4s\n"
- "add x13, x13, #0x10\n"
- "fmla v31.4s, v7.4s, v13.4s\n"
- "ldr q13, [x10, x9]\n"
- "fmla v23.4s, v4.4s, v12.4s\n"
- "fmla v26.4s, v1.4s, v12.4s\n"
- "ldr q12, [x10, x8]\n"
- "fmla v24.4s, v5.4s, v11.4s\n"
- "add x10, x10, #0x10\n"
- "fmla v25.4s, v4.4s, v11.4s\n"
+ "ldr q12, [x10, x16]\n"
+ "fmla v29.4s, v4.4s, v11.4s\n"
+ "fmla v30.4s, v5.4s, v11.4s\n"
+ "fmla v26.4s, v1.4s, v11.4s\n"
"fmla v27.4s, v2.4s, v11.4s\n"
- "fmla v28.4s, v1.4s, v11.4s\n"
- "ldr q11, [x16, x11]\n"
- "fmla v29.4s, v4.4s, v12.4s\n"
- "add x16, x16, #0x10\n"
- "ld1 { v10.4s }, [x16]\n"
- "fmla v30.4s, v3.4s, v12.4s\n"
- "fmla v31.4s, v4.4s, v13.4s\n"
- "ldr q4, [x14, #0x50]\n"
- "fmla v26.4s, v7.4s, v12.4s\n"
+ "ldr q11, [x15, x28]\n"
+ "add x15, x15, #0x10\n"
+ "fmla v24.4s, v8.4s, v13.4s\n"
+ "ld1 { v10.4s }, [x15]\n"
+ "fmla v23.4s, v7.4s, v13.4s\n"
+ "ldr q13, [x10, x27]\n"
+ "add x10, x10, #0x10\n"
+ "fmla v28.4s, v7.4s, v12.4s\n"
"fmla v27.4s, v6.4s, v12.4s\n"
- "ld1 { v12.4s }, [x12]\n"
- "fmla v23.4s, v2.4s, v11.4s\n"
- "fmla v24.4s, v1.4s, v11.4s\n"
- "ldr q1, [x14, #0x20]\n"
- "fmax v24.4s, v24.4s, v18.4s\n"
- "fmla v25.4s, v0.4s, v11.4s\n"
- "ldr q11, [x12, x26]\n"
- "fmla v28.4s, v7.4s, v13.4s\n"
- "add x12, x12, #0x10\n"
- "ldr q9, [x12, x11]\n"
- "fmla v30.4s, v5.4s, v13.4s\n"
- "fmla v29.4s, v0.4s, v12.4s\n"
- "ldr q0, [x14, #0x10]\n"
+ "fmla v25.4s, v4.4s, v12.4s\n"
+ "fmla v24.4s, v3.4s, v12.4s\n"
+ "ld1 { v12.4s }, [x11]\n"
"fmla v31.4s, v2.4s, v11.4s\n"
- "ldr q2, [x14, #0x30]\n"
+ "fmla v30.4s, v1.4s, v11.4s\n"
+ "ldr q1, [x17, #0x20]\n"
+ "fmla v29.4s, v0.4s, v11.4s\n"
+ "ldr q11, [x11, x26]\n"
+ "add x11, x11, #0x10\n"
"fmla v27.4s, v8.4s, v13.4s\n"
- "ldr q13, [x27, x11]\n"
- "fmla v23.4s, v6.4s, v12.4s\n"
- "fmla v26.4s, v3.4s, v12.4s\n"
- "ldr q3, [x14, #0x40]\n"
- "fmax v23.4s, v23.4s, v18.4s\n"
- "fmla v25.4s, v8.4s, v11.4s\n"
- "fmla v28.4s, v5.4s, v11.4s\n"
- "ldr q11, [x16, x26]\n"
- "ldr q5, [x14, #0x60]\n"
- "fmla v29.4s, v8.4s, v13.4s\n"
- "ldr q8, [x14, #0x90]\n"
- "fmla v30.4s, v7.4s, v13.4s\n"
- "ldr q7, [x14, #0x80]\n"
- "fmla v31.4s, v6.4s, v13.4s\n"
- "ldr q13, [x13, x11]\n"
- "ldr q6, [x14, #0x70]\n"
- "fmax v25.4s, v25.4s, v18.4s\n"
- "fmax v26.4s, v26.4s, v18.4s\n"
- "fmax v27.4s, v27.4s, v18.4s\n"
- "add x27, x27, #0x10\n"
- "ld1 { v12.4s }, [x27]\n"
- "fmax v28.4s, v28.4s, v18.4s\n"
- "fmax v29.4s, v29.4s, v18.4s\n"
- "add x14, x14, #0xa0\n"
- "fmax v30.4s, v30.4s, v18.4s\n"
+ "ldr q9, [x11, x28]\n"
+ "fmla v26.4s, v7.4s, v13.4s\n"
+ "fmla v24.4s, v5.4s, v13.4s\n"
+ "fmla v23.4s, v4.4s, v13.4s\n"
+ "ldr q13, [x9, x28]\n"
+ "add x9, x9, #0x10\n"
+ "fmla v31.4s, v6.4s, v12.4s\n"
+ "ldr q4, [x17, #0x50]\n"
+ "fmla v28.4s, v3.4s, v12.4s\n"
+ "ldr q3, [x17, #0x40]\n"
+ "fmla v25.4s, v0.4s, v12.4s\n"
+ "ld1 { v12.4s }, [x9]\n"
+ "fmla v29.4s, v8.4s, v11.4s\n"
+ "ldr q0, [x17, #0x10]\n"
+ "fmla v26.4s, v5.4s, v11.4s\n"
+ "ldr q5, [x17, #0x60]\n"
+ "fmla v23.4s, v2.4s, v11.4s\n"
+ "ldr q11, [x15, x26]\n"
+ "fmla v25.4s, v8.4s, v13.4s\n"
+ "ldr q2, [x17, #0x30]\n"
+ "fmla v24.4s, v7.4s, v13.4s\n"
+ "ldr q7, [x17, #0x80]\n"
"fmax v31.4s, v31.4s, v18.4s\n"
- "fmin v23.4s, v23.4s, v17.4s\n"
- "fmin v24.4s, v24.4s, v17.4s\n"
- "st1 { v23.4s }, [x15]\n"
- "fmin v25.4s, v25.4s, v17.4s\n"
- "fmin v26.4s, v26.4s, v17.4s\n"
- "str q24, [x15, x17]\n"
- "fmin v27.4s, v27.4s, v17.4s\n"
- "fmin v28.4s, v28.4s, v17.4s\n"
- "str q25, [x15, x22]\n"
- "add x15, x15, #0x10\n"
- "fmin v29.4s, v29.4s, v17.4s\n"
- "fmin v30.4s, v30.4s, v17.4s\n"
- "st1 { v26.4s }, [x28]\n"
+ "ldr q8, [x17, #0x90]\n"
+ "fmla v23.4s, v6.4s, v13.4s\n"
+ "ldr q13, [x12, x28]\n"
+ "fmax v30.4s, v30.4s, v18.4s\n"
+ "ldr q6, [x17, #0x70]\n"
+ "add x17, x17, #0xa0\n"
"fmin v31.4s, v31.4s, v17.4s\n"
- "str q27, [x28, x17]\n"
- "str q28, [x28, x22]\n"
- "add x28, x28, #0x10\n"
- "st1 { v29.4s }, [x25]\n"
- "str q30, [x25, x17]\n"
- "str q31, [x25, x22]\n"
+ "st1 { v31.4s }, [x13]\n"
+ "fmin v30.4s, v30.4s, v17.4s\n"
+ "fmax v29.4s, v29.4s, v18.4s\n"
+ "str q30, [x13, x14]\n"
+ "fmin v29.4s, v29.4s, v17.4s\n"
+ "fmax v28.4s, v28.4s, v18.4s\n"
+ "str q29, [x13, x22]\n"
+ "fmin v28.4s, v28.4s, v17.4s\n"
+ "add x13, x13, #0x10\n"
+ "fmax v27.4s, v27.4s, v18.4s\n"
+ "st1 { v28.4s }, [x25]\n"
+ "fmax v26.4s, v26.4s, v18.4s\n"
+ "fmax v25.4s, v25.4s, v18.4s\n"
+ "fmin v27.4s, v27.4s, v17.4s\n"
+ "str q27, [x25, x14]\n"
+ "fmin v26.4s, v26.4s, v17.4s\n"
+ "fmin v25.4s, v25.4s, v17.4s\n"
+ "str q26, [x25, x22]\n"
+ "fmax v24.4s, v24.4s, v18.4s\n"
"add x25, x25, #0x10\n"
+ "fmax v23.4s, v23.4s, v18.4s\n"
+ "st1 { v25.4s }, [x24]\n"
+ "fmin v24.4s, v24.4s, v17.4s\n"
+ "str q24, [x24, x14]\n"
+ "fmin v23.4s, v23.4s, v17.4s\n"
+ "str q23, [x24, x22]\n"
+ "add x24, x24, #0x10\n"
"blt 2b\n"
"3:" // Tile loop: Channel tail
- "mov v24.16b, v16.16b\n fmla v24.4s, v7.4s, v9.4s\n"
- "mov v23.16b, v16.16b\n fmla v23.4s, v8.4s, v9.4s\n"
- "mov v25.16b, v16.16b\n fmla v25.4s, v6.4s, v9.4s\n"
- "fmla v24.4s, v4.4s, v13.4s\n"
- "mov v26.16b, v16.16b\n fmla v26.4s, v5.4s, v9.4s\n"
+ "mov v31.16b, v16.16b\n fmla v31.4s, v8.4s, v9.4s\n"
+ "mov v30.16b, v16.16b\n fmla v30.4s, v7.4s, v9.4s\n"
+ "mov v29.16b, v16.16b\n fmla v29.4s, v6.4s, v9.4s\n"
+ "mov v28.16b, v16.16b\n fmla v28.4s, v5.4s, v9.4s\n"
"mov v27.16b, v16.16b\n fmla v27.4s, v4.4s, v9.4s\n"
- "mov v28.16b, v16.16b\n fmla v28.4s, v3.4s, v9.4s\n"
- "fmla v23.4s, v0.4s, v10.4s\n"
- "ldr q10, [x12, x9]\n"
- "fmla v25.4s, v2.4s, v11.4s\n"
- "ldr q11, [x12, x8]\n"
- "mov v29.16b, v16.16b\n fmla v29.4s, v2.4s, v9.4s\n"
- "fmla v24.4s, v6.4s, v11.4s\n"
- "mov v31.16b, v16.16b\n fmla v31.4s, v0.4s, v9.4s\n"
- "fmla v23.4s, v5.4s, v13.4s\n"
- "fmla v25.4s, v3.4s, v13.4s\n"
- "fmla v26.4s, v2.4s, v13.4s\n"
+ "mov v26.16b, v16.16b\n fmla v26.4s, v3.4s, v9.4s\n"
+ "mov v25.16b, v16.16b\n fmla v25.4s, v2.4s, v9.4s\n"
+ "mov v24.16b, v16.16b\n fmla v24.4s, v1.4s, v9.4s\n"
+ "mov v23.16b, v16.16b\n fmla v23.4s, v0.4s, v9.4s\n"
+ "fmla v31.4s, v0.4s, v10.4s\n"
+ "ldr q10, [x11, x27]\n"
+ "fmla v29.4s, v2.4s, v11.4s\n"
+ "ldr q11, [x11, x16]\n"
+ "fmla v25.4s, v6.4s, v12.4s\n"
+ "ldr q12, [x9, x26]\n"
+ "fmla v30.4s, v4.4s, v13.4s\n"
+ "fmla v31.4s, v5.4s, v13.4s\n"
+ "fmla v29.4s, v3.4s, v13.4s\n"
+ "fmla v28.4s, v2.4s, v13.4s\n"
"fmla v27.4s, v1.4s, v13.4s\n"
- "fmla v28.4s, v0.4s, v13.4s\n"
- "ldr q13, [x16, x8]\n"
- "fmla v29.4s, v6.4s, v12.4s\n"
- "ldr q12, [x27, x26]\n"
- "mov v30.16b, v16.16b\n fmla v30.4s, v1.4s, v9.4s\n"
- "fmla v24.4s, v0.4s, v13.4s\n"
- "fmla v31.4s, v8.4s, v12.4s\n"
- "ldr q12, [x16, x9]\n"
- "fmla v23.4s, v7.4s, v11.4s\n"
- "fmla v30.4s, v0.4s, v11.4s\n"
- "fmla v26.4s, v4.4s, v11.4s\n"
+ "fmla v26.4s, v0.4s, v13.4s\n"
+ "ldr q13, [x15, x16]\n"
+ "fmla v23.4s, v8.4s, v12.4s\n"
+ "ldr q12, [x15, x27]\n"
+ "fmla v31.4s, v7.4s, v11.4s\n"
+ "fmla v30.4s, v6.4s, v11.4s\n"
+ "fmla v28.4s, v4.4s, v11.4s\n"
"fmla v27.4s, v3.4s, v11.4s\n"
- "fmla v29.4s, v1.4s, v11.4s\n"
- "ld1 { v11.4s }, [x13]\n"
- "fmla v24.4s, v2.4s, v12.4s\n"
- "fmla v25.4s, v1.4s, v12.4s\n"
- "ld1 { v12.4s }, [x10]\n"
- "fmla v28.4s, v4.4s, v10.4s\n"
- "fmla v23.4s, v1.4s, v13.4s\n"
- "ldr q13, [x13, x26]\n"
- "fmla v30.4s, v2.4s, v10.4s\n"
- "fmla v31.4s, v1.4s, v10.4s\n"
- "fmla v24.4s, v8.4s, v10.4s\n"
- "fmla v25.4s, v7.4s, v10.4s\n"
+ "fmla v25.4s, v1.4s, v11.4s\n"
+ "fmla v24.4s, v0.4s, v11.4s\n"
+ "ld1 { v11.4s }, [x12]\n"
+ "fmla v31.4s, v1.4s, v13.4s\n"
+ "fmla v30.4s, v0.4s, v13.4s\n"
+ "ldr q13, [x12, x26]\n"
+ "fmla v29.4s, v1.4s, v12.4s\n"
"fmla v27.4s, v5.4s, v10.4s\n"
- "ldr q10, [x10, x11]\n"
- "fmla v26.4s, v0.4s, v11.4s\n"
- "fmla v29.4s, v3.4s, v12.4s\n"
- "fmla v28.4s, v2.4s, v13.4s\n"
- "fmla v30.4s, v4.4s, v10.4s\n"
- "fmla v31.4s, v3.4s, v10.4s\n"
- "fmla v23.4s, v3.4s, v11.4s\n"
+ "fmla v26.4s, v4.4s, v10.4s\n"
+ "fmla v30.4s, v2.4s, v12.4s\n"
+ "ld1 { v12.4s }, [x10]\n"
+ "fmla v29.4s, v7.4s, v10.4s\n"
+ "fmla v24.4s, v2.4s, v10.4s\n"
+ "fmla v23.4s, v1.4s, v10.4s\n"
+ "fmla v30.4s, v8.4s, v10.4s\n"
+ "ldr q10, [x10, x28]\n"
+ "fmla v31.4s, v3.4s, v11.4s\n"
+ "fmla v28.4s, v0.4s, v11.4s\n"
"ldr q11, [x10, x26]\n"
- "fmla v25.4s, v5.4s, v13.4s\n"
- "ldr q13, [x27, x8]\n"
- "fmla v26.4s, v6.4s, v12.4s\n"
- "ldr q12, [x13, x8]\n"
+ "fmla v29.4s, v5.4s, v13.4s\n"
+ "fmla v26.4s, v2.4s, v13.4s\n"
+ "ldr q13, [x9, x16]\n"
+ "fmla v25.4s, v3.4s, v12.4s\n"
+ "fmla v28.4s, v6.4s, v12.4s\n"
+ "ldr q12, [x12, x16]\n"
"fmla v27.4s, v7.4s, v10.4s\n"
- "fmla v29.4s, v5.4s, v10.4s\n"
- "fmla v28.4s, v6.4s, v10.4s\n"
- "fmla v31.4s, v5.4s, v11.4s\n"
- "fmla v30.4s, v6.4s, v13.4s\n"
- "fmla v26.4s, v8.4s, v10.4s\n"
- "fmla v29.4s, v7.4s, v13.4s\n"
- "ldr q13, [x27, x9]\n"
- "fmla v24.4s, v3.4s, v12.4s\n"
+ "fmla v26.4s, v6.4s, v10.4s\n"
+ "fmla v25.4s, v5.4s, v10.4s\n"
+ "fmla v28.4s, v8.4s, v10.4s\n"
+ "fmla v24.4s, v4.4s, v10.4s\n"
+ "fmla v23.4s, v3.4s, v10.4s\n"
+ "fmla v26.4s, v8.4s, v11.4s\n"
+ "fmla v25.4s, v7.4s, v13.4s\n"
+ "fmla v24.4s, v6.4s, v13.4s\n"
+ "ldr q13, [x9, x27]\n"
+ "fmla v23.4s, v5.4s, v11.4s\n"
+ "ldr q11, [x12, x27]\n"
+ "add x12, x12, #0x10\n"
+ "fmla v31.4s, v4.4s, v12.4s\n"
+ "fmla v30.4s, v3.4s, v12.4s\n"
+ "fmla v28.4s, v1.4s, v12.4s\n"
"fmla v27.4s, v0.4s, v12.4s\n"
- "fmla v28.4s, v8.4s, v11.4s\n"
- "ldr q11, [x13, x9]\n"
- "fmla v30.4s, v8.4s, v13.4s\n"
- "add x13, x13, #0x10\n"
- "fmla v31.4s, v7.4s, v13.4s\n"
- "ldr q13, [x10, x9]\n"
- "fmla v23.4s, v4.4s, v12.4s\n"
- "fmla v26.4s, v1.4s, v12.4s\n"
- "ldr q12, [x10, x8]\n"
- "fmla v24.4s, v5.4s, v11.4s\n"
- "add x10, x10, #0x10\n"
- "fmla v25.4s, v4.4s, v11.4s\n"
+ "ldr q12, [x10, x16]\n"
+ "fmla v29.4s, v4.4s, v11.4s\n"
+ "fmla v30.4s, v5.4s, v11.4s\n"
+ "fmla v26.4s, v1.4s, v11.4s\n"
"fmla v27.4s, v2.4s, v11.4s\n"
- "fmla v28.4s, v1.4s, v11.4s\n"
- "ldr q11, [x16, x11]\n"
- "fmla v29.4s, v4.4s, v12.4s\n"
- "add x16, x16, #0x10\n"
- "fmla v30.4s, v3.4s, v12.4s\n"
- "fmla v31.4s, v4.4s, v13.4s\n"
- "fmla v26.4s, v7.4s, v12.4s\n"
+ "ldr q11, [x15, x28]\n"
+ "add x15, x15, #0x10\n"
+ "fmla v24.4s, v8.4s, v13.4s\n"
+ "fmla v23.4s, v7.4s, v13.4s\n"
+ "ldr q13, [x10, x27]\n"
+ "add x10, x10, #0x10\n"
+ "fmla v28.4s, v7.4s, v12.4s\n"
"fmla v27.4s, v6.4s, v12.4s\n"
- "ld1 { v12.4s }, [x12]\n"
- "fmla v23.4s, v2.4s, v11.4s\n"
- "fmla v24.4s, v1.4s, v11.4s\n"
- "fmax v24.4s, v24.4s, v18.4s\n"
- "fmla v25.4s, v0.4s, v11.4s\n"
- "ldr q11, [x12, x26]\n"
- "fmla v28.4s, v7.4s, v13.4s\n"
- "fmin v24.4s, v24.4s, v17.4s\n"
- "fmla v30.4s, v5.4s, v13.4s\n"
- "fmla v29.4s, v0.4s, v12.4s\n"
- "add x12, x12, #0x10\n"
+ "fmla v25.4s, v4.4s, v12.4s\n"
+ "fmla v24.4s, v3.4s, v12.4s\n"
+ "ld1 { v12.4s }, [x11]\n"
"fmla v31.4s, v2.4s, v11.4s\n"
+ "fmla v30.4s, v1.4s, v11.4s\n"
+ "fmla v29.4s, v0.4s, v11.4s\n"
+ "ldr q11, [x11, x26]\n"
+ "add x11, x11, #0x10\n"
"fmla v27.4s, v8.4s, v13.4s\n"
- "ldr q13, [x27, x11]\n"
+ "fmla v26.4s, v7.4s, v13.4s\n"
+ "fmla v24.4s, v5.4s, v13.4s\n"
+ "fmla v23.4s, v4.4s, v13.4s\n"
+ "ldr q13, [x9, x28]\n"
+ "add x9, x9, #0x10\n"
+ "fmla v31.4s, v6.4s, v12.4s\n"
+ "fmla v28.4s, v3.4s, v12.4s\n"
+ "fmla v25.4s, v0.4s, v12.4s\n"
+ "fmla v29.4s, v8.4s, v11.4s\n"
+ "fmla v26.4s, v5.4s, v11.4s\n"
+ "fmla v23.4s, v2.4s, v11.4s\n"
+ "fmla v25.4s, v8.4s, v13.4s\n"
+ "fmla v24.4s, v7.4s, v13.4s\n"
+ "fmax v31.4s, v31.4s, v18.4s\n"
+ "fmla v23.4s, v6.4s, v13.4s\n"
+ "fmax v30.4s, v30.4s, v18.4s\n"
+ "fmin v31.4s, v31.4s, v17.4s\n"
+ "st1 { v31.4s }, [x13]\n"
+ "fmin v30.4s, v30.4s, v17.4s\n"
+ "fmax v29.4s, v29.4s, v18.4s\n"
+ "str q30, [x13, x14]\n"
+ "fmin v29.4s, v29.4s, v17.4s\n"
+ "fmax v28.4s, v28.4s, v18.4s\n"
+ "str q29, [x13, x22]\n"
+ "fmin v28.4s, v28.4s, v17.4s\n"
+ "add x13, x13, #0x10\n"
"fmax v27.4s, v27.4s, v18.4s\n"
- "fmla v23.4s, v6.4s, v12.4s\n"
- "fmla v26.4s, v3.4s, v12.4s\n"
- "fmax v23.4s, v23.4s, v18.4s\n"
- "add x27, x27, #0x10\n"
- "fmla v25.4s, v8.4s, v11.4s\n"
- "fmla v28.4s, v5.4s, v11.4s\n"
- "fmax v25.4s, v25.4s, v18.4s\n"
- "fmla v29.4s, v8.4s, v13.4s\n"
- "fmla v30.4s, v7.4s, v13.4s\n"
+ "st1 { v28.4s }, [x25]\n"
"fmax v26.4s, v26.4s, v18.4s\n"
- "fmla v31.4s, v6.4s, v13.4s\n"
- "fmax v28.4s, v28.4s, v18.4s\n"
- "fmax v29.4s, v29.4s, v18.4s\n"
- "fmax v30.4s, v30.4s, v18.4s\n"
- "fmax v31.4s, v31.4s, v18.4s\n"
- "fmin v23.4s, v23.4s, v17.4s\n"
- "st1 { v23.4s }, [x15]\n"
- "fmin v25.4s, v25.4s, v17.4s\n"
- "fmin v26.4s, v26.4s, v17.4s\n"
- "str q24, [x15, x17]\n"
+ "fmax v25.4s, v25.4s, v18.4s\n"
"fmin v27.4s, v27.4s, v17.4s\n"
- "fmin v28.4s, v28.4s, v17.4s\n"
- "str q25, [x15, x22]\n"
- "add x15, x15, #0x10\n"
- "fmin v29.4s, v29.4s, v17.4s\n"
- "fmin v30.4s, v30.4s, v17.4s\n"
- "st1 { v26.4s }, [x28]\n"
- "fmin v31.4s, v31.4s, v17.4s\n"
- "str q27, [x28, x17]\n"
- "str q28, [x28, x22]\n"
- "add x28, x28, #0x10\n"
- "st1 { v29.4s }, [x25]\n"
- "str q30, [x25, x17]\n"
- "str q31, [x25, x22]\n"
+ "str q27, [x25, x14]\n"
+ "fmin v26.4s, v26.4s, v17.4s\n"
+ "fmin v25.4s, v25.4s, v17.4s\n"
+ "str q26, [x25, x22]\n"
+ "fmax v24.4s, v24.4s, v18.4s\n"
"add x25, x25, #0x10\n"
+ "fmax v23.4s, v23.4s, v18.4s\n"
+ "st1 { v25.4s }, [x24]\n"
+ "fmin v24.4s, v24.4s, v17.4s\n"
+ "str q24, [x24, x14]\n"
+ "fmin v23.4s, v23.4s, v17.4s\n"
+ "str q23, [x24, x22]\n"
+ "add x24, x24, #0x10\n"
"4:" // Tile loop: Oddments
"tst %x[n_channels], #0x3\n"
"beq 49f\n"
- "ldr q16, [x14, #0x0]\n"
- "ldr q0, [x14, #0x10]\n"
- "add x24, x12, x11\n"
- "add x23, x16, XZR\n"
- "ldr q1, [x14, #0x20]\n"
- "ldr q2, [x14, #0x30]\n"
- "add x22, x16, x26\n"
- "add x21, x27, XZR\n"
- "ldr q3, [x14, #0x40]\n"
- "ldr q4, [x14, #0x50]\n"
- "add x20, x13, x11\n"
- "ldr q5, [x14, #0x60]\n"
- "ldr q6, [x14, #0x70]\n"
- "ldr q7, [x14, #0x80]\n"
- "ldr q8, [x14, #0x90]\n"
+ "ldr q16, [x17, #0x0]\n"
+ "ldr q0, [x17, #0x10]\n"
+ "add x23, x11, x28\n"
+ "ldr q1, [x17, #0x20]\n"
+ "add x22, x15, XZR\n"
+ "ldr q2, [x17, #0x30]\n"
+ "add x21, x15, x26\n"
+ "ldr q3, [x17, #0x40]\n"
+ "add x20, x9, XZR\n"
+ "ldr q4, [x17, #0x50]\n"
+ "add x19, x12, x28\n"
+ "ldr q5, [x17, #0x60]\n"
+ "ldr q6, [x17, #0x70]\n"
+ "ldr q7, [x17, #0x80]\n"
+ "ldr q8, [x17, #0x90]\n"
"tbz %x[n_channels], #1, 5f\n"
- "ldr d9, [x24], #0x8\n"
- "ldr d10, [x23], #0x8\n"
- "ldr d11, [x22], #0x8\n"
- "ldr d12, [x21], #0x8\n"
- "ldr d13, [x20], #0x8\n"
+ "ldr d9, [x23], #0x8\n"
+ "ldr d10, [x22], #0x8\n"
+ "ldr d11, [x21], #0x8\n"
+ "ldr d12, [x20], #0x8\n"
+ "ldr d13, [x19], #0x8\n"
"tbz %x[n_channels], #0, 6f\n"
- "ld1 { v9.s }[2], [x24]\n"
- "ld1 { v10.s }[2], [x23]\n"
- "ld1 { v11.s }[2], [x22]\n"
- "ld1 { v12.s }[2], [x21]\n"
- "ld1 { v13.s }[2], [x20]\n"
+ "ld1 { v9.s }[2], [x23]\n"
+ "ld1 { v10.s }[2], [x22]\n"
+ "ld1 { v11.s }[2], [x21]\n"
+ "ld1 { v12.s }[2], [x20]\n"
+ "ld1 { v13.s }[2], [x19]\n"
"b 6f\n"
"5:" // Tile loop: Oddments: Load inputs: (2, 2), (0, 0), (0, 4), (4, 0), (1, 2): Bit 1: Unset
- "ldr s9, [x24, #0x0]\n"
- "ldr s10, [x23, #0x0]\n"
- "ldr s11, [x22, #0x0]\n"
- "ldr s12, [x21, #0x0]\n"
- "ldr s13, [x20, #0x0]\n"
+ "ldr s9, [x23, #0x0]\n"
+ "ldr s10, [x22, #0x0]\n"
+ "ldr s11, [x21, #0x0]\n"
+ "ldr s12, [x20, #0x0]\n"
+ "ldr s13, [x19, #0x0]\n"
"6:" // Tile loop: Oddments: Load inputs: (2, 2), (0, 0), (0, 4), (4, 0), (1, 2): Bit 1: End
- "mov v23.16b, v16.16b\n fmla v23.4s, v8.4s, v9.4s\n"
- "mov v25.16b, v16.16b\n fmla v25.4s, v6.4s, v9.4s\n"
- "add x20, x27, x26\n"
- "mov v24.16b, v16.16b\n fmla v24.4s, v7.4s, v9.4s\n"
- "mov v26.16b, v16.16b\n fmla v26.4s, v5.4s, v9.4s\n"
+ "mov v31.16b, v16.16b\n fmla v31.4s, v8.4s, v9.4s\n"
+ "add x19, x9, x26\n"
+ "mov v30.16b, v16.16b\n fmla v30.4s, v7.4s, v9.4s\n"
+ "mov v29.16b, v16.16b\n fmla v29.4s, v6.4s, v9.4s\n"
+ "mov v28.16b, v16.16b\n fmla v28.4s, v5.4s, v9.4s\n"
"mov v27.16b, v16.16b\n fmla v27.4s, v4.4s, v9.4s\n"
- "mov v28.16b, v16.16b\n fmla v28.4s, v3.4s, v9.4s\n"
- "mov v29.16b, v16.16b\n fmla v29.4s, v2.4s, v9.4s\n"
- "fmla v23.4s, v0.4s, v10.4s\n"
- "fmla v25.4s, v2.4s, v11.4s\n"
- "mov v30.16b, v16.16b\n fmla v30.4s, v1.4s, v9.4s\n"
- "mov v31.16b, v16.16b\n fmla v31.4s, v0.4s, v9.4s\n"
- "fmla v29.4s, v6.4s, v12.4s\n"
- "fmla v23.4s, v5.4s, v13.4s\n"
- "fmla v24.4s, v4.4s, v13.4s\n"
- "fmla v25.4s, v3.4s, v13.4s\n"
- "fmla v26.4s, v2.4s, v13.4s\n"
+ "mov v26.16b, v16.16b\n fmla v26.4s, v3.4s, v9.4s\n"
+ "mov v25.16b, v16.16b\n fmla v25.4s, v2.4s, v9.4s\n"
+ "mov v24.16b, v16.16b\n fmla v24.4s, v1.4s, v9.4s\n"
+ "mov v23.16b, v16.16b\n fmla v23.4s, v0.4s, v9.4s\n"
+ "fmla v31.4s, v0.4s, v10.4s\n"
+ "fmla v29.4s, v2.4s, v11.4s\n"
+ "fmla v25.4s, v6.4s, v12.4s\n"
+ "fmla v30.4s, v4.4s, v13.4s\n"
+ "fmla v31.4s, v5.4s, v13.4s\n"
+ "fmla v29.4s, v3.4s, v13.4s\n"
+ "fmla v28.4s, v2.4s, v13.4s\n"
"fmla v27.4s, v1.4s, v13.4s\n"
- "fmla v28.4s, v0.4s, v13.4s\n"
+ "fmla v26.4s, v0.4s, v13.4s\n"
"tbz %x[n_channels], #1, 7f\n"
- "ldr d12, [x20], #0x8\n"
+ "ldr d12, [x19], #0x8\n"
"tbz %x[n_channels], #0, 8f\n"
- "ld1 { v12.s }[2], [x20]\n"
+ "ld1 { v12.s }[2], [x19]\n"
"b 8f\n"
"7:" // Tile loop: Oddments: Load inputs: (4, 4): Bit 1: Unset
- "ldr s12, [x20, #0x0]\n"
+ "ldr s12, [x19, #0x0]\n"
"8:" // Tile loop: Oddments: Load inputs: (4, 4): Bit 1: End
- "fmla v31.4s, v8.4s, v12.4s\n"
- "add x20, x12, x8\n"
+ "fmla v23.4s, v8.4s, v12.4s\n"
+ "add x19, x11, x16\n"
"tbz %x[n_channels], #1, 9f\n"
- "ldr d11, [x20], #0x8\n"
+ "ldr d11, [x19], #0x8\n"
"tbz %x[n_channels], #0, 10f\n"
- "ld1 { v11.s }[2], [x20]\n"
+ "ld1 { v11.s }[2], [x19]\n"
"b 10f\n"
"9:" // Tile loop: Oddments: Load inputs: (2, 1): Bit 1: Unset
- "ldr s11, [x20, #0x0]\n"
+ "ldr s11, [x19, #0x0]\n"
"10:" // Tile loop: Oddments: Load inputs: (2, 1): Bit 1: End
- "fmla v23.4s, v7.4s, v11.4s\n"
- "fmla v24.4s, v6.4s, v11.4s\n"
- "add x20, x16, x8\n"
- "fmla v26.4s, v4.4s, v11.4s\n"
+ "fmla v31.4s, v7.4s, v11.4s\n"
+ "add x19, x15, x16\n"
+ "fmla v30.4s, v6.4s, v11.4s\n"
+ "fmla v28.4s, v4.4s, v11.4s\n"
"fmla v27.4s, v3.4s, v11.4s\n"
- "fmla v29.4s, v1.4s, v11.4s\n"
- "fmla v30.4s, v0.4s, v11.4s\n"
+ "fmla v25.4s, v1.4s, v11.4s\n"
+ "fmla v24.4s, v0.4s, v11.4s\n"
"tbz %x[n_channels], #1, 11f\n"
- "ldr d13, [x20], #0x8\n"
+ "ldr d13, [x19], #0x8\n"
"tbz %x[n_channels], #0, 12f\n"
- "ld1 { v13.s }[2], [x20]\n"
+ "ld1 { v13.s }[2], [x19]\n"
"b 12f\n"
"11:" // Tile loop: Oddments: Load inputs: (0, 1): Bit 1: Unset
- "ldr s13, [x20, #0x0]\n"
+ "ldr s13, [x19, #0x0]\n"
"12:" // Tile loop: Oddments: Load inputs: (0, 1): Bit 1: End
- "fmla v23.4s, v1.4s, v13.4s\n"
- "fmla v24.4s, v0.4s, v13.4s\n"
- "add x20, x16, x9\n"
+ "fmla v31.4s, v1.4s, v13.4s\n"
+ "add x19, x15, x27\n"
+ "fmla v30.4s, v0.4s, v13.4s\n"
"tbz %x[n_channels], #1, 13f\n"
- "ldr d12, [x20], #0x8\n"
+ "ldr d12, [x19], #0x8\n"
"tbz %x[n_channels], #0, 14f\n"
- "ld1 { v12.s }[2], [x20]\n"
+ "ld1 { v12.s }[2], [x19]\n"
"b 14f\n"
"13:" // Tile loop: Oddments: Load inputs: (0, 3): Bit 1: Unset
- "ldr s12, [x20, #0x0]\n"
+ "ldr s12, [x19, #0x0]\n"
"14:" // Tile loop: Oddments: Load inputs: (0, 3): Bit 1: End
- "fmla v24.4s, v2.4s, v12.4s\n"
- "fmla v25.4s, v1.4s, v12.4s\n"
- "add x20, x12, x9\n"
+ "fmla v30.4s, v2.4s, v12.4s\n"
+ "add x19, x11, x27\n"
+ "fmla v29.4s, v1.4s, v12.4s\n"
"tbz %x[n_channels], #1, 15f\n"
- "ldr d10, [x20], #0x8\n"
+ "ldr d10, [x19], #0x8\n"
"tbz %x[n_channels], #0, 16f\n"
- "ld1 { v10.s }[2], [x20]\n"
+ "ld1 { v10.s }[2], [x19]\n"
"b 16f\n"
"15:" // Tile loop: Oddments: Load inputs: (2, 3): Bit 1: Unset
- "ldr s10, [x20, #0x0]\n"
+ "ldr s10, [x19, #0x0]\n"
"16:" // Tile loop: Oddments: Load inputs: (2, 3): Bit 1: End
- "fmla v24.4s, v8.4s, v10.4s\n"
- "fmla v25.4s, v7.4s, v10.4s\n"
- "add x20, x13, XZR\n"
+ "fmla v30.4s, v8.4s, v10.4s\n"
+ "add x19, x12, XZR\n"
+ "fmla v29.4s, v7.4s, v10.4s\n"
"fmla v27.4s, v5.4s, v10.4s\n"
- "fmla v28.4s, v4.4s, v10.4s\n"
- "fmla v30.4s, v2.4s, v10.4s\n"
- "fmla v31.4s, v1.4s, v10.4s\n"
+ "fmla v26.4s, v4.4s, v10.4s\n"
+ "fmla v24.4s, v2.4s, v10.4s\n"
+ "fmla v23.4s, v1.4s, v10.4s\n"
"tbz %x[n_channels], #1, 17f\n"
- "ldr d11, [x20], #0x8\n"
+ "ldr d11, [x19], #0x8\n"
"tbz %x[n_channels], #0, 18f\n"
- "ld1 { v11.s }[2], [x20]\n"
+ "ld1 { v11.s }[2], [x19]\n"
"b 18f\n"
"17:" // Tile loop: Oddments: Load inputs: (1, 0): Bit 1: Unset
- "ldr s11, [x20, #0x0]\n"
+ "ldr s11, [x19, #0x0]\n"
"18:" // Tile loop: Oddments: Load inputs: (1, 0): Bit 1: End
- "fmla v23.4s, v3.4s, v11.4s\n"
- "fmla v26.4s, v0.4s, v11.4s\n"
- "add x20, x13, x26\n"
+ "fmla v31.4s, v3.4s, v11.4s\n"
+ "add x19, x12, x26\n"
+ "fmla v28.4s, v0.4s, v11.4s\n"
"tbz %x[n_channels], #1, 19f\n"
- "ldr d13, [x20], #0x8\n"
+ "ldr d13, [x19], #0x8\n"
"tbz %x[n_channels], #0, 20f\n"
- "ld1 { v13.s }[2], [x20]\n"
+ "ld1 { v13.s }[2], [x19]\n"
"b 20f\n"
"19:" // Tile loop: Oddments: Load inputs: (1, 4): Bit 1: Unset
- "ldr s13, [x20, #0x0]\n"
+ "ldr s13, [x19, #0x0]\n"
"20:" // Tile loop: Oddments: Load inputs: (1, 4): Bit 1: End
- "fmla v25.4s, v5.4s, v13.4s\n"
- "fmla v28.4s, v2.4s, v13.4s\n"
- "add x20, x10, XZR\n"
+ "fmla v29.4s, v5.4s, v13.4s\n"
+ "add x19, x10, XZR\n"
+ "fmla v26.4s, v2.4s, v13.4s\n"
"tbz %x[n_channels], #1, 21f\n"
- "ldr d12, [x20], #0x8\n"
+ "ldr d12, [x19], #0x8\n"
"tbz %x[n_channels], #0, 22f\n"
- "ld1 { v12.s }[2], [x20]\n"
+ "ld1 { v12.s }[2], [x19]\n"
"b 22f\n"
"21:" // Tile loop: Oddments: Load inputs: (3, 0): Bit 1: Unset
- "ldr s12, [x20, #0x0]\n"
+ "ldr s12, [x19, #0x0]\n"
"22:" // Tile loop: Oddments: Load inputs: (3, 0): Bit 1: End
- "fmla v26.4s, v6.4s, v12.4s\n"
- "fmla v29.4s, v3.4s, v12.4s\n"
- "add x20, x10, x11\n"
+ "fmla v28.4s, v6.4s, v12.4s\n"
+ "add x19, x10, x28\n"
+ "fmla v25.4s, v3.4s, v12.4s\n"
"tbz %x[n_channels], #1, 23f\n"
- "ldr d10, [x20], #0x8\n"
+ "ldr d10, [x19], #0x8\n"
"tbz %x[n_channels], #0, 24f\n"
- "ld1 { v10.s }[2], [x20]\n"
+ "ld1 { v10.s }[2], [x19]\n"
"b 24f\n"
"23:" // Tile loop: Oddments: Load inputs: (3, 2): Bit 1: Unset
- "ldr s10, [x20, #0x0]\n"
+ "ldr s10, [x19, #0x0]\n"
"24:" // Tile loop: Oddments: Load inputs: (3, 2): Bit 1: End
- "fmla v26.4s, v8.4s, v10.4s\n"
+ "fmla v28.4s, v8.4s, v10.4s\n"
+ "add x19, x10, x26\n"
"fmla v27.4s, v7.4s, v10.4s\n"
- "add x20, x10, x26\n"
- "fmla v28.4s, v6.4s, v10.4s\n"
- "fmla v29.4s, v5.4s, v10.4s\n"
- "fmla v30.4s, v4.4s, v10.4s\n"
- "fmla v31.4s, v3.4s, v10.4s\n"
+ "fmla v26.4s, v6.4s, v10.4s\n"
+ "fmla v25.4s, v5.4s, v10.4s\n"
+ "fmla v24.4s, v4.4s, v10.4s\n"
+ "fmla v23.4s, v3.4s, v10.4s\n"
"tbz %x[n_channels], #1, 25f\n"
- "ldr d11, [x20], #0x8\n"
+ "ldr d11, [x19], #0x8\n"
"tbz %x[n_channels], #0, 26f\n"
- "ld1 { v11.s }[2], [x20]\n"
+ "ld1 { v11.s }[2], [x19]\n"
"b 26f\n"
"25:" // Tile loop: Oddments: Load inputs: (3, 4): Bit 1: Unset
- "ldr s11, [x20, #0x0]\n"
+ "ldr s11, [x19, #0x0]\n"
"26:" // Tile loop: Oddments: Load inputs: (3, 4): Bit 1: End
- "fmla v28.4s, v8.4s, v11.4s\n"
- "fmla v31.4s, v5.4s, v11.4s\n"
- "add x20, x27, x8\n"
+ "fmla v26.4s, v8.4s, v11.4s\n"
+ "add x19, x9, x16\n"
+ "fmla v23.4s, v5.4s, v11.4s\n"
"tbz %x[n_channels], #1, 27f\n"
- "ldr d13, [x20], #0x8\n"
+ "ldr d13, [x19], #0x8\n"
"tbz %x[n_channels], #0, 28f\n"
- "ld1 { v13.s }[2], [x20]\n"
+ "ld1 { v13.s }[2], [x19]\n"
"b 28f\n"
"27:" // Tile loop: Oddments: Load inputs: (4, 1): Bit 1: Unset
- "ldr s13, [x20, #0x0]\n"
+ "ldr s13, [x19, #0x0]\n"
"28:" // Tile loop: Oddments: Load inputs: (4, 1): Bit 1: End
- "fmla v29.4s, v7.4s, v13.4s\n"
- "fmla v30.4s, v6.4s, v13.4s\n"
- "add x20, x13, x8\n"
+ "fmla v25.4s, v7.4s, v13.4s\n"
+ "add x19, x12, x16\n"
+ "fmla v24.4s, v6.4s, v13.4s\n"
"tbz %x[n_channels], #1, 29f\n"
- "ldr d12, [x20], #0x8\n"
+ "ldr d12, [x19], #0x8\n"
"tbz %x[n_channels], #0, 30f\n"
- "ld1 { v12.s }[2], [x20]\n"
+ "ld1 { v12.s }[2], [x19]\n"
"b 30f\n"
"29:" // Tile loop: Oddments: Load inputs: (1, 1): Bit 1: Unset
- "ldr s12, [x20, #0x0]\n"
+ "ldr s12, [x19, #0x0]\n"
"30:" // Tile loop: Oddments: Load inputs: (1, 1): Bit 1: End
- "fmla v23.4s, v4.4s, v12.4s\n"
- "fmla v24.4s, v3.4s, v12.4s\n"
- "add x20, x13, x9\n"
- "fmla v26.4s, v1.4s, v12.4s\n"
+ "fmla v31.4s, v4.4s, v12.4s\n"
+ "add x19, x12, x27\n"
+ "fmla v30.4s, v3.4s, v12.4s\n"
+ "fmla v28.4s, v1.4s, v12.4s\n"
"fmla v27.4s, v0.4s, v12.4s\n"
"tbz %x[n_channels], #1, 31f\n"
- "ldr d11, [x20], #0x8\n"
+ "ldr d11, [x19], #0x8\n"
"tbz %x[n_channels], #0, 32f\n"
- "ld1 { v11.s }[2], [x20]\n"
+ "ld1 { v11.s }[2], [x19]\n"
"b 32f\n"
"31:" // Tile loop: Oddments: Load inputs: (1, 3): Bit 1: Unset
- "ldr s11, [x20, #0x0]\n"
+ "ldr s11, [x19, #0x0]\n"
"32:" // Tile loop: Oddments: Load inputs: (1, 3): Bit 1: End
- "fmla v24.4s, v5.4s, v11.4s\n"
- "fmla v25.4s, v4.4s, v11.4s\n"
- "add x20, x27, x9\n"
+ "fmla v30.4s, v5.4s, v11.4s\n"
+ "add x19, x9, x27\n"
+ "fmla v29.4s, v4.4s, v11.4s\n"
"fmla v27.4s, v2.4s, v11.4s\n"
- "fmla v28.4s, v1.4s, v11.4s\n"
+ "fmla v26.4s, v1.4s, v11.4s\n"
"tbz %x[n_channels], #1, 33f\n"
- "ldr d13, [x20], #0x8\n"
+ "ldr d13, [x19], #0x8\n"
"tbz %x[n_channels], #0, 34f\n"
- "ld1 { v13.s }[2], [x20]\n"
+ "ld1 { v13.s }[2], [x19]\n"
"b 34f\n"
"33:" // Tile loop: Oddments: Load inputs: (4, 3): Bit 1: Unset
- "ldr s13, [x20, #0x0]\n"
+ "ldr s13, [x19, #0x0]\n"
"34:" // Tile loop: Oddments: Load inputs: (4, 3): Bit 1: End
- "fmla v30.4s, v8.4s, v13.4s\n"
- "fmla v31.4s, v7.4s, v13.4s\n"
- "add x20, x10, x8\n"
+ "fmla v24.4s, v8.4s, v13.4s\n"
+ "add x19, x10, x16\n"
+ "fmla v23.4s, v7.4s, v13.4s\n"
"tbz %x[n_channels], #1, 35f\n"
- "ldr d12, [x20], #0x8\n"
+ "ldr d12, [x19], #0x8\n"
"tbz %x[n_channels], #0, 36f\n"
- "ld1 { v12.s }[2], [x20]\n"
+ "ld1 { v12.s }[2], [x19]\n"
"b 36f\n"
"35:" // Tile loop: Oddments: Load inputs: (3, 1): Bit 1: Unset
- "ldr s12, [x20, #0x0]\n"
+ "ldr s12, [x19, #0x0]\n"
"36:" // Tile loop: Oddments: Load inputs: (3, 1): Bit 1: End
- "fmla v26.4s, v7.4s, v12.4s\n"
+ "fmla v28.4s, v7.4s, v12.4s\n"
+ "add x19, x15, x28\n"
"fmla v27.4s, v6.4s, v12.4s\n"
- "add x20, x16, x11\n"
- "fmla v29.4s, v4.4s, v12.4s\n"
- "fmla v30.4s, v3.4s, v12.4s\n"
+ "fmla v25.4s, v4.4s, v12.4s\n"
+ "fmla v24.4s, v3.4s, v12.4s\n"
"tbz %x[n_channels], #1, 37f\n"
- "ldr d11, [x20], #0x8\n"
+ "ldr d11, [x19], #0x8\n"
"tbz %x[n_channels], #0, 38f\n"
- "ld1 { v11.s }[2], [x20]\n"
+ "ld1 { v11.s }[2], [x19]\n"
"b 38f\n"
"37:" // Tile loop: Oddments: Load inputs: (0, 2): Bit 1: Unset
- "ldr s11, [x20, #0x0]\n"
+ "ldr s11, [x19, #0x0]\n"
"38:" // Tile loop: Oddments: Load inputs: (0, 2): Bit 1: End
- "fmla v23.4s, v2.4s, v11.4s\n"
- "fmla v24.4s, v1.4s, v11.4s\n"
- "add x20, x10, x9\n"
- "fmla v25.4s, v0.4s, v11.4s\n"
+ "fmla v31.4s, v2.4s, v11.4s\n"
+ "add x19, x10, x27\n"
+ "fmla v30.4s, v1.4s, v11.4s\n"
+ "fmla v29.4s, v0.4s, v11.4s\n"
"tbz %x[n_channels], #1, 39f\n"
- "ldr d13, [x20], #0x8\n"
+ "ldr d13, [x19], #0x8\n"
"tbz %x[n_channels], #0, 40f\n"
- "ld1 { v13.s }[2], [x20]\n"
+ "ld1 { v13.s }[2], [x19]\n"
"b 40f\n"
"39:" // Tile loop: Oddments: Load inputs: (3, 3): Bit 1: Unset
- "ldr s13, [x20, #0x0]\n"
+ "ldr s13, [x19, #0x0]\n"
"40:" // Tile loop: Oddments: Load inputs: (3, 3): Bit 1: End
"fmla v27.4s, v8.4s, v13.4s\n"
- "fmla v28.4s, v7.4s, v13.4s\n"
- "add x20, x12, XZR\n"
- "fmla v30.4s, v5.4s, v13.4s\n"
- "fmla v31.4s, v4.4s, v13.4s\n"
+ "add x19, x11, XZR\n"
+ "fmla v26.4s, v7.4s, v13.4s\n"
+ "fmla v24.4s, v5.4s, v13.4s\n"
+ "fmla v23.4s, v4.4s, v13.4s\n"
"tbz %x[n_channels], #1, 41f\n"
- "ldr d12, [x20], #0x8\n"
+ "ldr d12, [x19], #0x8\n"
"tbz %x[n_channels], #0, 42f\n"
- "ld1 { v12.s }[2], [x20]\n"
+ "ld1 { v12.s }[2], [x19]\n"
"b 42f\n"
"41:" // Tile loop: Oddments: Load inputs: (2, 0): Bit 1: Unset
- "ldr s12, [x20, #0x0]\n"
+ "ldr s12, [x19, #0x0]\n"
"42:" // Tile loop: Oddments: Load inputs: (2, 0): Bit 1: End
- "fmla v23.4s, v6.4s, v12.4s\n"
- "fmla v26.4s, v3.4s, v12.4s\n"
- "add x20, x12, x26\n"
- "fmla v29.4s, v0.4s, v12.4s\n"
+ "fmla v31.4s, v6.4s, v12.4s\n"
+ "add x19, x11, x26\n"
+ "fmla v28.4s, v3.4s, v12.4s\n"
+ "fmla v25.4s, v0.4s, v12.4s\n"
"tbz %x[n_channels], #1, 43f\n"
- "ldr d11, [x20], #0x8\n"
+ "ldr d11, [x19], #0x8\n"
"tbz %x[n_channels], #0, 44f\n"
- "ld1 { v11.s }[2], [x20]\n"
+ "ld1 { v11.s }[2], [x19]\n"
"b 44f\n"
"43:" // Tile loop: Oddments: Load inputs: (2, 4): Bit 1: Unset
- "ldr s11, [x20, #0x0]\n"
+ "ldr s11, [x19, #0x0]\n"
"44:" // Tile loop: Oddments: Load inputs: (2, 4): Bit 1: End
- "fmla v25.4s, v8.4s, v11.4s\n"
- "fmla v28.4s, v5.4s, v11.4s\n"
- "add x20, x27, x11\n"
- "fmla v31.4s, v2.4s, v11.4s\n"
+ "fmla v29.4s, v8.4s, v11.4s\n"
+ "add x19, x9, x28\n"
+ "fmla v26.4s, v5.4s, v11.4s\n"
+ "fmla v23.4s, v2.4s, v11.4s\n"
"tbz %x[n_channels], #1, 45f\n"
- "ldr d13, [x20], #0x8\n"
+ "ldr d13, [x19], #0x8\n"
"tbz %x[n_channels], #0, 46f\n"
- "ld1 { v13.s }[2], [x20]\n"
+ "ld1 { v13.s }[2], [x19]\n"
"b 46f\n"
"45:" // Tile loop: Oddments: Load inputs: (4, 2): Bit 1: Unset
- "ldr s13, [x20, #0x0]\n"
+ "ldr s13, [x19, #0x0]\n"
"46:" // Tile loop: Oddments: Load inputs: (4, 2): Bit 1: End
- "fmla v29.4s, v8.4s, v13.4s\n"
- "fmla v30.4s, v7.4s, v13.4s\n"
- "fmax v23.4s, v23.4s, v18.4s\n"
- "fmla v31.4s, v6.4s, v13.4s\n"
- "fmax v24.4s, v24.4s, v18.4s\n"
- "fmax v25.4s, v25.4s, v18.4s\n"
- "fmax v26.4s, v26.4s, v18.4s\n"
- "fmax v27.4s, v27.4s, v18.4s\n"
- "fmax v28.4s, v28.4s, v18.4s\n"
- "fmax v29.4s, v29.4s, v18.4s\n"
- "fmax v30.4s, v30.4s, v18.4s\n"
+ "fmla v25.4s, v8.4s, v13.4s\n"
+ "fmla v24.4s, v7.4s, v13.4s\n"
+ "fmla v23.4s, v6.4s, v13.4s\n"
"fmax v31.4s, v31.4s, v18.4s\n"
- "fmin v23.4s, v23.4s, v17.4s\n"
- "fmin v24.4s, v24.4s, v17.4s\n"
- "fmin v25.4s, v25.4s, v17.4s\n"
- "fmin v26.4s, v26.4s, v17.4s\n"
- "fmin v27.4s, v27.4s, v17.4s\n"
- "fmin v28.4s, v28.4s, v17.4s\n"
- "fmin v29.4s, v29.4s, v17.4s\n"
- "fmin v30.4s, v30.4s, v17.4s\n"
+ "fmax v30.4s, v30.4s, v18.4s\n"
+ "fmax v29.4s, v29.4s, v18.4s\n"
"fmin v31.4s, v31.4s, v17.4s\n"
+ "fmin v30.4s, v30.4s, v17.4s\n"
+ "fmin v29.4s, v29.4s, v17.4s\n"
+ "fmax v28.4s, v28.4s, v18.4s\n"
+ "fmax v27.4s, v27.4s, v18.4s\n"
+ "fmax v26.4s, v26.4s, v18.4s\n"
+ "fmin v28.4s, v28.4s, v17.4s\n"
+ "fmin v27.4s, v27.4s, v17.4s\n"
+ "fmin v26.4s, v26.4s, v17.4s\n"
+ "fmax v25.4s, v25.4s, v18.4s\n"
+ "fmax v24.4s, v24.4s, v18.4s\n"
+ "fmax v23.4s, v23.4s, v18.4s\n"
+ "fmin v25.4s, v25.4s, v17.4s\n"
+ "fmin v24.4s, v24.4s, v17.4s\n"
+ "fmin v23.4s, v23.4s, v17.4s\n"
"tbz %x[n_channels], #1, 47f\n"
- "mov x22, x15\n"
- "mov x21, x28\n"
- "st1 { v23.d }[0], [x22], x17\n"
+ "mov x19, x13\n"
+ "st1 { v31.d }[0], [x19], x14\n"
+ "add x13, x13, #0x8\n"
+ "st1 { v30.d }[0], [x19], x14\n"
"mov x20, x25\n"
- "st1 { v26.d }[0], [x21], x17\n"
- "add x15, x15, #0x8\n"
- "st1 { v29.d }[0], [x20], x17\n"
- "add x28, x28, #0x8\n"
+ "st1 { v29.d }[0], [x19]\n"
+ "st1 { v28.d }[0], [x20], x14\n"
"add x25, x25, #0x8\n"
- "st1 { v24.d }[0], [x22], x17\n"
- "st1 { v27.d }[0], [x21], x17\n"
- "st1 { v30.d }[0], [x20], x17\n"
- "st1 { v25.d }[0], [x22]\n"
- "st1 { v28.d }[0], [x21]\n"
- "st1 { v31.d }[0], [x20]\n"
+ "st1 { v27.d }[0], [x20], x14\n"
+ "mov x19, x24\n"
+ "st1 { v26.d }[0], [x20]\n"
+ "add x24, x24, #0x8\n"
+ "st1 { v25.d }[0], [x19], x14\n"
+ "st1 { v24.d }[0], [x19], x14\n"
+ "st1 { v23.d }[0], [x19]\n"
"tbz %x[n_channels], #0, 48f\n"
- "mov x22, x15\n"
- "mov x21, x28\n"
- "st1 { v23.s }[2], [x22], x17\n"
+ "mov x21, x13\n"
+ "st1 { v31.s }[2], [x21], x14\n"
"mov x20, x25\n"
- "st1 { v26.s }[2], [x21], x17\n"
- "st1 { v29.s }[2], [x20], x17\n"
- "st1 { v24.s }[2], [x22], x17\n"
- "st1 { v27.s }[2], [x21], x17\n"
- "st1 { v30.s }[2], [x20], x17\n"
- "st1 { v25.s }[2], [x22]\n"
- "st1 { v28.s }[2], [x21]\n"
- "st1 { v31.s }[2], [x20]\n"
+ "st1 { v30.s }[2], [x21], x14\n"
+ "st1 { v28.s }[2], [x20], x14\n"
+ "mov x19, x24\n"
+ "st1 { v29.s }[2], [x21]\n"
+ "st1 { v27.s }[2], [x20], x14\n"
+ "st1 { v26.s }[2], [x20]\n"
+ "st1 { v25.s }[2], [x19], x14\n"
+ "st1 { v24.s }[2], [x19], x14\n"
+ "st1 { v23.s }[2], [x19]\n"
"b 48f\n"
"47:" // Tile loop: Oddments: Store: Bit 1: Unset
- "mov x22, x15\n"
- "mov x21, x28\n"
- "st1 { v23.s }[0], [x22], x17\n"
+ "mov x21, x13\n"
+ "st1 { v31.s }[0], [x21], x14\n"
"mov x20, x25\n"
- "st1 { v26.s }[0], [x21], x17\n"
- "st1 { v29.s }[0], [x20], x17\n"
- "st1 { v24.s }[0], [x22], x17\n"
- "st1 { v27.s }[0], [x21], x17\n"
- "st1 { v30.s }[0], [x20], x17\n"
- "st1 { v25.s }[0], [x22]\n"
- "st1 { v28.s }[0], [x21]\n"
- "st1 { v31.s }[0], [x20]\n"
+ "mov x19, x24\n"
+ "st1 { v30.s }[0], [x21], x14\n"
+ "st1 { v28.s }[0], [x20], x14\n"
+ "st1 { v29.s }[0], [x21]\n"
+ "st1 { v27.s }[0], [x20], x14\n"
+ "st1 { v26.s }[0], [x20]\n"
+ "st1 { v25.s }[0], [x19], x14\n"
+ "st1 { v24.s }[0], [x19], x14\n"
+ "st1 { v23.s }[0], [x19]\n"
"48:" // Tile loop: Oddments: Store: Bit 1: End
"49:" // Tile loop: End
- "ldr x23, [%x[params_struct], %[offsetof_args_tile_j]]\n"
- "ldr x24, [%x[params_struct], %[offsetof_args_tile_i]]\n"
- "add x23, x23, #0x1\n"
- "add x21, x24, #0x1\n"
- "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
- "cmp x23, x20\n"
+ "ldr x7, [%x[params_struct], %[offsetof_args_tile_i]]\n"
+ "add x21, x7, #0x1\n"
+ "ldr x8, [%x[params_struct], %[offsetof_args_tile_j]]\n"
"ldr x20, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
- "csel x24, x24, x21, LT\n"
- "csel x23, x23, XZR, LT\n"
- "cmp x24, x20\n"
+ "add x8, x8, #0x1\n"
+ "ldr x19, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
+ "cmp x8, x19\n"
+ "csel x8, x8, XZR, LT\n"
+ "csel x7, x7, x21, LT\n"
+ "cmp x7, x20\n"
"blt 1b\n"
:
: [n_channels] "r" ((unsigned long) n_channels), [offsetof_args_inptr] "I" (offsetof(Args, inptr)), [offsetof_args_ld_input_col] "I" (offsetof(Args, ld_input_col)), [offsetof_args_ld_input_row] "I" (offsetof(Args, ld_input_row)), [offsetof_args_ld_output_col] "I" (offsetof(Args, ld_output_col)), [offsetof_args_ld_output_row] "I" (offsetof(Args, ld_output_row)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_n_tile_cols] "I" (offsetof(Args, n_tile_cols)), [offsetof_args_n_tile_rows] "I" (offsetof(Args, n_tile_rows)), [offsetof_args_outptr] "I" (offsetof(Args, outptr)), [offsetof_args_params] "I" (offsetof(Args, params)), [offsetof_args_tile_i] "I" (offsetof(Args, tile_i)), [offsetof_args_tile_j] "I" (offsetof(Args, tile_j)), [params_struct] "r" (&params_struct)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v16", "v17", "v18", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v16", "v17", "v18", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_indirect.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_indirect.cpp
index 15053a337a..aa7d35e3e1 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_indirect.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_indirect.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -87,21 +87,21 @@ void a64_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst_indirect_impl(
activation_min, activation_max);
__asm__ __volatile__(
- "mov x8, #0x10\n" // cntb _, ALL, #1
- "lsr x17, %x[n_channels], #0x2\n"
- "ldr x16, [%x[params_struct], %[offsetof_args_outptrs]]\n"
+ "ldr x17, [%x[params_struct], %[offsetof_args_outptrs]]\n"
+ "add x16, %x[params_struct], %[offsetof_Args_inptrs]\n"
"ldr x15, [%x[params_struct], %[offsetof_args_params]]\n"
"add x20, %x[params_struct], %[offsetof_args_min]\n"
+ "add x19, %x[params_struct], %[offsetof_args_max]\n"
"ld1r { v18.4s }, [x20]\n"
- "add x20, %x[params_struct], %[offsetof_args_max]\n"
- "ld1r { v17.4s }, [x20]\n"
- "add x14, %x[params_struct], %[offsetof_Args_inptrs]\n"
- "mov x13, #0x0\n"
- "sub x12, XZR, x8\n"
- "cbz x17, 3f\n"
+ "ld1r { v17.4s }, [x19]\n"
+ "mov x14, #0x0\n"
+ "mov x13, #0x10\n" // cntb _, ALL, #1
+ "sub x12, XZR, x13\n"
+ "lsr x11, %x[n_channels], #0x2\n"
+ "cbz x11, 3f\n"
"ldr q16, [x15, #0x0]\n"
"ldr q0, [x15, #0x10]\n"
- "cmp x8, x17, LSL #4\n"
+ "cmp x13, x11, LSL #4\n"
"ldr q1, [x15, #0x20]\n"
"ldr q2, [x15, #0x30]\n"
"ldr q3, [x15, #0x40]\n"
@@ -111,363 +111,363 @@ void a64_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst_indirect_impl(
"ldr q7, [x15, #0x80]\n"
"ldr q8, [x15, #0x90]\n"
"add x15, x15, #0xa0\n"
- "ldp x11, x10, [x14, #0x0]\n"
- "ldr q9, [x11, x13]\n"
- "ldr q10, [x10, x13]\n"
- "ldp x9, x28, [x14, #0x10]\n"
- "ldr q11, [x9, x13]\n"
- "ldr q12, [x28, x13]\n"
- "ldr x27, [x14, #0x20]\n"
- "ldr q13, [x27, x13]\n"
+ "ldp x10, x9, [x16, #0x0]\n"
+ "ldp x28, x27, [x16, #0x10]\n"
+ "ldr x26, [x16, #0x20]\n"
+ "ldr q9, [x10, x14]\n"
+ "ldr q10, [x9, x14]\n"
+ "ldr q11, [x28, x14]\n"
+ "ldr q12, [x27, x14]\n"
+ "ldr q13, [x26, x14]\n"
"bge 2f\n"
"1:" // Channel loop
- "mov v23.16b, v16.16b\n fmla v23.4s, v8.4s, v9.4s\n"
- "mov v24.16b, v16.16b\n fmla v24.4s, v7.4s, v9.4s\n"
- "ldr x26, [x14, #0x30]\n"
- "ldr x25, [x14, #0x38]\n"
- "mov v25.16b, v16.16b\n fmla v25.4s, v6.4s, v9.4s\n"
- "fmla v23.4s, v0.4s, v10.4s\n"
- "ldr x24, [x14, #0x28]\n"
- "ldr x10, [x14, #0x48]\n"
- "ldr q10, [x10, x13]\n"
- "fmla v24.4s, v4.4s, v13.4s\n"
- "mov v26.16b, v16.16b\n fmla v26.4s, v5.4s, v9.4s\n"
- "ldr x11, [x14, #0x40]\n"
- "mov v27.16b, v16.16b\n fmla v27.4s, v4.4s, v9.4s\n"
- "mov v28.16b, v16.16b\n fmla v28.4s, v3.4s, v9.4s\n"
- "ldr x9, [x14, #0x50]\n"
- "ldr x28, [x14, #0x58]\n"
- "fmla v25.4s, v2.4s, v11.4s\n"
- "ldr q11, [x26, x13]\n"
- "mov v29.16b, v16.16b\n fmla v29.4s, v2.4s, v9.4s\n"
- "ldr x27, [x14, #0x60]\n"
- "fmla v23.4s, v5.4s, v13.4s\n"
- "fmla v24.4s, v6.4s, v11.4s\n"
- "ldr x26, [x14, #0x70]\n"
- "ldr x10, [x14, #0x88]\n"
- "mov v31.16b, v16.16b\n fmla v31.4s, v0.4s, v9.4s\n"
- "fmla v25.4s, v3.4s, v13.4s\n"
- "ldr x23, [x16, #0x0]\n"
+ "mov v31.16b, v16.16b\n fmla v31.4s, v8.4s, v9.4s\n"
+ "ldr x25, [x16, #0x28]\n"
"add x12, x12, #0x10\n"
- "fmla v26.4s, v2.4s, v13.4s\n"
+ "mov v30.16b, v16.16b\n fmla v30.4s, v7.4s, v9.4s\n"
+ "ldr x24, [x16, #0x30]\n"
+ "mov v29.16b, v16.16b\n fmla v29.4s, v6.4s, v9.4s\n"
+ "ldr x23, [x16, #0x38]\n"
+ "mov v28.16b, v16.16b\n fmla v28.4s, v5.4s, v9.4s\n"
+ "ldr x10, [x16, #0x40]\n"
+ "mov v27.16b, v16.16b\n fmla v27.4s, v4.4s, v9.4s\n"
+ "ldr x9, [x16, #0x48]\n"
+ "mov v26.16b, v16.16b\n fmla v26.4s, v3.4s, v9.4s\n"
+ "ldr x28, [x16, #0x50]\n"
+ "mov v25.16b, v16.16b\n fmla v25.4s, v2.4s, v9.4s\n"
+ "ldr x27, [x16, #0x58]\n"
+ "mov v24.16b, v16.16b\n fmla v24.4s, v1.4s, v9.4s\n"
+ "ldr x26, [x16, #0x60]\n"
+ "mov v23.16b, v16.16b\n fmla v23.4s, v0.4s, v9.4s\n"
+ "ldr x22, [x17, #0x0]\n"
+ "fmla v31.4s, v0.4s, v10.4s\n"
+ "ldr q10, [x9, x14]\n"
+ "fmla v29.4s, v2.4s, v11.4s\n"
+ "ldr q11, [x24, x14]\n"
+ "fmla v25.4s, v6.4s, v12.4s\n"
+ "ldr q12, [x25, x14]\n"
+ "fmla v30.4s, v4.4s, v13.4s\n"
+ "ldr x25, [x16, #0x68]\n"
+ "fmla v31.4s, v5.4s, v13.4s\n"
+ "ldr x24, [x16, #0x70]\n"
+ "fmla v29.4s, v3.4s, v13.4s\n"
+ "ldr x9, [x16, #0x88]\n"
+ "fmla v28.4s, v2.4s, v13.4s\n"
+ "ldr x21, [x17, #0x8]\n"
"fmla v27.4s, v1.4s, v13.4s\n"
- "ldr x22, [x16, #0x8]\n"
- "ldr x21, [x16, #0x10]\n"
- "fmla v28.4s, v0.4s, v13.4s\n"
- "ldr q13, [x25, x13]\n"
- "fmla v29.4s, v6.4s, v12.4s\n"
- "ldr q12, [x24, x13]\n"
- "mov v30.16b, v16.16b\n fmla v30.4s, v1.4s, v9.4s\n"
- "ldr q16, [x15, #0x0]\n"
- "fmla v23.4s, v7.4s, v11.4s\n"
- "ldr x24, [x14, #0x68]\n"
- "fmla v24.4s, v0.4s, v13.4s\n"
- "fmla v31.4s, v8.4s, v12.4s\n"
- "ldr q12, [x11, x13]\n"
- "ldr x25, [x14, #0x78]\n"
- "fmla v26.4s, v4.4s, v11.4s\n"
+ "ldr x20, [x17, #0x10]\n"
+ "fmla v26.4s, v0.4s, v13.4s\n"
+ "ldr q13, [x23, x14]\n"
+ "fmla v23.4s, v8.4s, v12.4s\n"
+ "ldr q12, [x10, x14]\n"
+ "fmla v31.4s, v7.4s, v11.4s\n"
+ "ldr x23, [x16, #0x78]\n"
+ "fmla v30.4s, v6.4s, v11.4s\n"
+ "ldr x10, [x16, #0x80]\n"
+ "fmla v28.4s, v4.4s, v11.4s\n"
+ "ldr x19, [x17, #0x18]\n"
"fmla v27.4s, v3.4s, v11.4s\n"
- "ldr x11, [x14, #0x80]\n"
- "ldr x20, [x16, #0x18]\n"
- "fmla v30.4s, v0.4s, v11.4s\n"
- "fmla v28.4s, v4.4s, v10.4s\n"
- "fmla v29.4s, v1.4s, v11.4s\n"
- "ldr q11, [x9, x13]\n"
- "fmla v23.4s, v1.4s, v13.4s\n"
- "ldr q13, [x28, x13]\n"
- "fmla v24.4s, v2.4s, v12.4s\n"
- "fmla v25.4s, v1.4s, v12.4s\n"
- "ldr q12, [x27, x13]\n"
- "ldr x9, [x14, #0x90]\n"
+ "ldr q16, [x15, #0x0]\n"
+ "fmla v25.4s, v1.4s, v11.4s\n"
+ "fmla v24.4s, v0.4s, v11.4s\n"
+ "ldr q11, [x28, x14]\n"
+ "fmla v31.4s, v1.4s, v13.4s\n"
+ "ldr x28, [x16, #0x90]\n"
+ "fmla v30.4s, v0.4s, v13.4s\n"
+ "ldr q13, [x27, x14]\n"
+ "fmla v29.4s, v1.4s, v12.4s\n"
+ "ldr x27, [x16, #0x98]\n"
"fmla v27.4s, v5.4s, v10.4s\n"
- "fmla v30.4s, v2.4s, v10.4s\n"
- "ldr x27, [x14, #0xa0]\n"
- "ldr x28, [x14, #0x98]\n"
- "fmla v26.4s, v0.4s, v11.4s\n"
- "fmla v28.4s, v2.4s, v13.4s\n"
- "fmla v24.4s, v8.4s, v10.4s\n"
- "fmla v25.4s, v7.4s, v10.4s\n"
- "fmla v31.4s, v1.4s, v10.4s\n"
- "ldr q10, [x24, x13]\n"
- "fmla v29.4s, v3.4s, v12.4s\n"
- "ldr x24, [x14, #0xa8]\n"
- "fmla v26.4s, v6.4s, v12.4s\n"
- "ldr q12, [x11, x13]\n"
+ "fmla v26.4s, v4.4s, v10.4s\n"
+ "fmla v30.4s, v2.4s, v12.4s\n"
+ "ldr q12, [x26, x14]\n"
+ "fmla v29.4s, v7.4s, v10.4s\n"
+ "ldr x26, [x16, #0xa0]\n"
+ "fmla v24.4s, v2.4s, v10.4s\n"
+ "fmla v23.4s, v1.4s, v10.4s\n"
+ "fmla v30.4s, v8.4s, v10.4s\n"
+ "ldr q10, [x25, x14]\n"
+ "fmla v31.4s, v3.4s, v11.4s\n"
+ "ldr x25, [x16, #0xa8]\n"
+ "fmla v28.4s, v0.4s, v11.4s\n"
+ "ldr q11, [x24, x14]\n"
+ "fmla v29.4s, v5.4s, v13.4s\n"
+ "ldr x24, [x16, #0xb0]\n"
+ "fmla v26.4s, v2.4s, v13.4s\n"
+ "ldr q13, [x23, x14]\n"
+ "fmla v25.4s, v3.4s, v12.4s\n"
+ "ldr x23, [x16, #0xb8]\n"
+ "fmla v28.4s, v6.4s, v12.4s\n"
+ "ldr q12, [x10, x14]\n"
"fmla v27.4s, v7.4s, v10.4s\n"
- "ldr x11, [x14, #0xc0]\n"
- "fmla v28.4s, v6.4s, v10.4s\n"
- "fmla v30.4s, v4.4s, v10.4s\n"
- "fmla v23.4s, v3.4s, v11.4s\n"
- "ldr q11, [x26, x13]\n"
- "fmla v25.4s, v5.4s, v13.4s\n"
- "ldr q13, [x25, x13]\n"
- "fmla v29.4s, v5.4s, v10.4s\n"
- "fmla v31.4s, v3.4s, v10.4s\n"
- "ldr x26, [x14, #0xb0]\n"
- "ldr x25, [x14, #0xb8]\n"
- "fmla v26.4s, v8.4s, v10.4s\n"
- "fmla v28.4s, v8.4s, v11.4s\n"
- "fmla v30.4s, v6.4s, v13.4s\n"
- "fmla v24.4s, v3.4s, v12.4s\n"
+ "ldr x10, [x16, #0xc0]\n"
+ "fmla v26.4s, v6.4s, v10.4s\n"
+ "fmla v25.4s, v5.4s, v10.4s\n"
+ "fmla v28.4s, v8.4s, v10.4s\n"
+ "fmla v24.4s, v4.4s, v10.4s\n"
+ "fmla v23.4s, v3.4s, v10.4s\n"
+ "fmla v26.4s, v8.4s, v11.4s\n"
+ "fmla v25.4s, v7.4s, v13.4s\n"
+ "fmla v24.4s, v6.4s, v13.4s\n"
+ "ldr q13, [x28, x14]\n"
+ "fmla v23.4s, v5.4s, v11.4s\n"
+ "ldr q11, [x9, x14]\n"
+ "fmla v31.4s, v4.4s, v12.4s\n"
+ "fmla v30.4s, v3.4s, v12.4s\n"
+ "fmla v28.4s, v1.4s, v12.4s\n"
"fmla v27.4s, v0.4s, v12.4s\n"
- "fmla v31.4s, v5.4s, v11.4s\n"
- "ldr q11, [x10, x13]\n"
- "fmla v29.4s, v7.4s, v13.4s\n"
- "ldr q13, [x9, x13]\n"
- "fmla v23.4s, v4.4s, v12.4s\n"
- "fmla v26.4s, v1.4s, v12.4s\n"
- "ldr q12, [x28, x13]\n"
- "fmla v24.4s, v5.4s, v11.4s\n"
- "fmla v25.4s, v4.4s, v11.4s\n"
+ "ldr q12, [x27, x14]\n"
+ "fmla v29.4s, v4.4s, v11.4s\n"
+ "fmla v30.4s, v5.4s, v11.4s\n"
+ "fmla v26.4s, v1.4s, v11.4s\n"
"fmla v27.4s, v2.4s, v11.4s\n"
- "fmla v28.4s, v1.4s, v11.4s\n"
- "ldr q11, [x27, x13]\n"
- "fmla v30.4s, v8.4s, v13.4s\n"
- "ldr x27, [x14, #0x20]\n"
- "fmla v31.4s, v7.4s, v13.4s\n"
- "ldr q13, [x24, x13]\n"
- "fmla v23.4s, v2.4s, v11.4s\n"
- "fmla v26.4s, v7.4s, v12.4s\n"
+ "ldr q11, [x26, x14]\n"
+ "fmla v24.4s, v8.4s, v13.4s\n"
+ "ldr x26, [x16, #0x20]\n"
+ "fmla v23.4s, v7.4s, v13.4s\n"
+ "ldr q13, [x25, x14]\n"
+ "fmla v28.4s, v7.4s, v12.4s\n"
"fmla v27.4s, v6.4s, v12.4s\n"
- "fmla v29.4s, v4.4s, v12.4s\n"
- "fmla v30.4s, v3.4s, v12.4s\n"
- "ldr q12, [x26, x13]\n"
- "fmla v31.4s, v4.4s, v13.4s\n"
- "ldr q4, [x15, #0x50]\n"
- "fmla v24.4s, v1.4s, v11.4s\n"
+ "fmla v25.4s, v4.4s, v12.4s\n"
+ "fmla v24.4s, v3.4s, v12.4s\n"
+ "ldr q12, [x24, x14]\n"
+ "fmla v31.4s, v2.4s, v11.4s\n"
+ "fmla v30.4s, v1.4s, v11.4s\n"
"ldr q1, [x15, #0x20]\n"
- "fmla v25.4s, v0.4s, v11.4s\n"
- "ldr q11, [x25, x13]\n"
- "fmla v23.4s, v6.4s, v12.4s\n"
- "fmax v23.4s, v23.4s, v18.4s\n"
- "fmla v28.4s, v7.4s, v13.4s\n"
- "fmla v30.4s, v5.4s, v13.4s\n"
- "fmin v23.4s, v23.4s, v17.4s\n"
- "str q23, [x23, x12]\n"
- "fmla v29.4s, v0.4s, v12.4s\n"
+ "fmla v29.4s, v0.4s, v11.4s\n"
+ "ldr q11, [x23, x14]\n"
+ "fmla v27.4s, v8.4s, v13.4s\n"
+ "fmla v26.4s, v7.4s, v13.4s\n"
+ "fmla v24.4s, v5.4s, v13.4s\n"
+ "fmla v23.4s, v4.4s, v13.4s\n"
+ "ldr q13, [x10, x14]\n"
+ "add x14, x14, #0x10\n"
+ "fmla v31.4s, v6.4s, v12.4s\n"
+ "ldp x10, x9, [x16, #0x0]\n"
+ "fmla v28.4s, v3.4s, v12.4s\n"
+ "ldp x28, x27, [x16, #0x10]\n"
+ "fmla v25.4s, v0.4s, v12.4s\n"
"ldr q0, [x15, #0x10]\n"
- "fmla v31.4s, v2.4s, v11.4s\n"
+ "fmla v29.4s, v8.4s, v11.4s\n"
+ "ldr q9, [x10, x13]\n"
+ "fmla v26.4s, v5.4s, v11.4s\n"
+ "ldr q10, [x9, x13]\n"
+ "fmla v23.4s, v2.4s, v11.4s\n"
+ "ldr q11, [x28, x13]\n"
+ "fmla v25.4s, v8.4s, v13.4s\n"
+ "ldr q12, [x27, x13]\n"
+ "fmla v24.4s, v7.4s, v13.4s\n"
"ldr q2, [x15, #0x30]\n"
- "fmla v27.4s, v8.4s, v13.4s\n"
- "ldr q13, [x11, x13]\n"
- "fmla v26.4s, v3.4s, v12.4s\n"
+ "fmax v31.4s, v31.4s, v18.4s\n"
"ldr q3, [x15, #0x40]\n"
- "fmla v25.4s, v8.4s, v11.4s\n"
- "fmla v28.4s, v5.4s, v11.4s\n"
+ "fmla v23.4s, v6.4s, v13.4s\n"
+ "ldr q13, [x26, x13]\n"
+ "add x13, x13, #0x10\n"
+ "fmin v31.4s, v31.4s, v17.4s\n"
+ "ldr q4, [x15, #0x50]\n"
+ "cmp x13, x11, LSL #4\n"
+ "fmax v30.4s, v30.4s, v18.4s\n"
"ldr q5, [x15, #0x60]\n"
- "fmax v24.4s, v24.4s, v18.4s\n"
- "fmla v29.4s, v8.4s, v13.4s\n"
- "ldr q8, [x15, #0x90]\n"
- "fmla v30.4s, v7.4s, v13.4s\n"
- "ldr q7, [x15, #0x80]\n"
- "fmla v31.4s, v6.4s, v13.4s\n"
- "ldr q13, [x27, x8]\n"
+ "fmax v29.4s, v29.4s, v18.4s\n"
"ldr q6, [x15, #0x70]\n"
- "fmax v25.4s, v25.4s, v18.4s\n"
- "fmax v26.4s, v26.4s, v18.4s\n"
- "fmax v27.4s, v27.4s, v18.4s\n"
- "ldr x23, [x16, #0x20]\n"
- "ldp x11, x10, [x14, #0x0]\n"
- "ldr q9, [x11, x8]\n"
- "ldr q10, [x10, x8]\n"
- "fmin v24.4s, v24.4s, v17.4s\n"
- "fmin v25.4s, v25.4s, v17.4s\n"
- "ldp x9, x28, [x14, #0x10]\n"
- "ldr q11, [x9, x8]\n"
- "fmin v26.4s, v26.4s, v17.4s\n"
- "fmin v27.4s, v27.4s, v17.4s\n"
- "ldr q12, [x28, x8]\n"
"fmax v28.4s, v28.4s, v18.4s\n"
- "fmax v29.4s, v29.4s, v18.4s\n"
- "str q24, [x22, x12]\n"
- "fmax v30.4s, v30.4s, v18.4s\n"
- "fmax v31.4s, v31.4s, v18.4s\n"
- "str q25, [x21, x12]\n"
- "ldr x22, [x16, #0x28]\n"
- "str q26, [x20, x12]\n"
- "ldr x21, [x16, #0x30]\n"
- "ldr x20, [x16, #0x38]\n"
- "add x8, x8, #0x10\n"
- "str q27, [x23, x12]\n"
- "ldr x23, [x16, #0x40]\n"
- "cmp x8, x17, LSL #4\n"
- "fmin v28.4s, v28.4s, v17.4s\n"
- "fmin v29.4s, v29.4s, v17.4s\n"
+ "str q31, [x22, x12]\n"
+ "fmax v27.4s, v27.4s, v18.4s\n"
+ "ldr x22, [x17, #0x20]\n"
"fmin v30.4s, v30.4s, v17.4s\n"
- "add x13, x13, #0x10\n"
- "str q28, [x22, x12]\n"
- "fmin v31.4s, v31.4s, v17.4s\n"
- "str q29, [x21, x12]\n"
+ "ldr q7, [x15, #0x80]\n"
+ "fmin v29.4s, v29.4s, v17.4s\n"
+ "ldr q8, [x15, #0x90]\n"
"add x15, x15, #0xa0\n"
- "str q30, [x20, x12]\n"
- "str q31, [x23, x12]\n"
+ "fmin v28.4s, v28.4s, v17.4s\n"
+ "str q30, [x21, x12]\n"
+ "fmin v27.4s, v27.4s, v17.4s\n"
+ "str q29, [x20, x12]\n"
+ "fmax v26.4s, v26.4s, v18.4s\n"
+ "ldr x21, [x17, #0x28]\n"
+ "fmax v25.4s, v25.4s, v18.4s\n"
+ "str q28, [x19, x12]\n"
+ "fmax v24.4s, v24.4s, v18.4s\n"
+ "str q27, [x22, x12]\n"
+ "fmin v26.4s, v26.4s, v17.4s\n"
+ "ldr x20, [x17, #0x30]\n"
+ "fmin v25.4s, v25.4s, v17.4s\n"
+ "ldr x19, [x17, #0x38]\n"
+ "fmin v24.4s, v24.4s, v17.4s\n"
+ "str q26, [x21, x12]\n"
+ "fmax v23.4s, v23.4s, v18.4s\n"
+ "str q25, [x20, x12]\n"
+ "ldr x22, [x17, #0x40]\n"
+ "fmin v23.4s, v23.4s, v17.4s\n"
+ "str q24, [x19, x12]\n"
+ "str q23, [x22, x12]\n"
"blt 1b\n"
"2:" // Channel tail
- "mov v23.16b, v16.16b\n fmla v23.4s, v8.4s, v9.4s\n"
- "mov v24.16b, v16.16b\n fmla v24.4s, v7.4s, v9.4s\n"
- "ldr x26, [x14, #0x30]\n"
- "ldr x25, [x14, #0x38]\n"
- "mov v25.16b, v16.16b\n fmla v25.4s, v6.4s, v9.4s\n"
- "fmla v23.4s, v0.4s, v10.4s\n"
- "ldr x24, [x14, #0x28]\n"
- "ldr x10, [x14, #0x48]\n"
- "ldr q10, [x10, x13]\n"
- "fmla v24.4s, v4.4s, v13.4s\n"
- "mov v26.16b, v16.16b\n fmla v26.4s, v5.4s, v9.4s\n"
- "ldr x11, [x14, #0x40]\n"
- "mov v27.16b, v16.16b\n fmla v27.4s, v4.4s, v9.4s\n"
- "mov v28.16b, v16.16b\n fmla v28.4s, v3.4s, v9.4s\n"
- "ldr x9, [x14, #0x50]\n"
- "ldr x28, [x14, #0x58]\n"
- "fmla v25.4s, v2.4s, v11.4s\n"
- "ldr q11, [x26, x13]\n"
- "mov v29.16b, v16.16b\n fmla v29.4s, v2.4s, v9.4s\n"
- "ldr x27, [x14, #0x60]\n"
- "fmla v23.4s, v5.4s, v13.4s\n"
- "fmla v24.4s, v6.4s, v11.4s\n"
- "ldr x26, [x14, #0x70]\n"
- "ldr x10, [x14, #0x88]\n"
- "mov v31.16b, v16.16b\n fmla v31.4s, v0.4s, v9.4s\n"
- "fmla v25.4s, v3.4s, v13.4s\n"
- "ldr x23, [x16, #0x0]\n"
+ "mov v31.16b, v16.16b\n fmla v31.4s, v8.4s, v9.4s\n"
+ "ldr x25, [x16, #0x28]\n"
"add x12, x12, #0x10\n"
- "fmla v26.4s, v2.4s, v13.4s\n"
+ "mov v30.16b, v16.16b\n fmla v30.4s, v7.4s, v9.4s\n"
+ "ldr x24, [x16, #0x30]\n"
+ "mov v29.16b, v16.16b\n fmla v29.4s, v6.4s, v9.4s\n"
+ "ldr x23, [x16, #0x38]\n"
+ "mov v28.16b, v16.16b\n fmla v28.4s, v5.4s, v9.4s\n"
+ "ldr x10, [x16, #0x40]\n"
+ "mov v27.16b, v16.16b\n fmla v27.4s, v4.4s, v9.4s\n"
+ "ldr x9, [x16, #0x48]\n"
+ "mov v26.16b, v16.16b\n fmla v26.4s, v3.4s, v9.4s\n"
+ "ldr x28, [x16, #0x50]\n"
+ "mov v25.16b, v16.16b\n fmla v25.4s, v2.4s, v9.4s\n"
+ "ldr x27, [x16, #0x58]\n"
+ "mov v24.16b, v16.16b\n fmla v24.4s, v1.4s, v9.4s\n"
+ "ldr x26, [x16, #0x60]\n"
+ "mov v23.16b, v16.16b\n fmla v23.4s, v0.4s, v9.4s\n"
+ "ldr x22, [x17, #0x0]\n"
+ "fmla v31.4s, v0.4s, v10.4s\n"
+ "ldr q10, [x9, x14]\n"
+ "fmla v29.4s, v2.4s, v11.4s\n"
+ "ldr q11, [x24, x14]\n"
+ "fmla v25.4s, v6.4s, v12.4s\n"
+ "ldr q12, [x25, x14]\n"
+ "fmla v30.4s, v4.4s, v13.4s\n"
+ "ldr x25, [x16, #0x68]\n"
+ "fmla v31.4s, v5.4s, v13.4s\n"
+ "ldr x24, [x16, #0x70]\n"
+ "fmla v29.4s, v3.4s, v13.4s\n"
+ "ldr x9, [x16, #0x88]\n"
+ "fmla v28.4s, v2.4s, v13.4s\n"
+ "ldr x21, [x17, #0x8]\n"
"fmla v27.4s, v1.4s, v13.4s\n"
- "ldr x22, [x16, #0x8]\n"
- "ldr x21, [x16, #0x10]\n"
- "fmla v28.4s, v0.4s, v13.4s\n"
- "ldr q13, [x25, x13]\n"
- "fmla v29.4s, v6.4s, v12.4s\n"
- "ldr q12, [x24, x13]\n"
- "mov v30.16b, v16.16b\n fmla v30.4s, v1.4s, v9.4s\n"
- "fmla v23.4s, v7.4s, v11.4s\n"
- "ldr x24, [x14, #0x68]\n"
- "ldr x25, [x14, #0x78]\n"
- "fmla v24.4s, v0.4s, v13.4s\n"
- "fmla v31.4s, v8.4s, v12.4s\n"
- "ldr q12, [x11, x13]\n"
- "ldr x11, [x14, #0x80]\n"
- "fmla v26.4s, v4.4s, v11.4s\n"
+ "ldr x20, [x17, #0x10]\n"
+ "fmla v26.4s, v0.4s, v13.4s\n"
+ "ldr q13, [x23, x14]\n"
+ "fmla v23.4s, v8.4s, v12.4s\n"
+ "ldr q12, [x10, x14]\n"
+ "fmla v31.4s, v7.4s, v11.4s\n"
+ "ldr x23, [x16, #0x78]\n"
+ "fmla v30.4s, v6.4s, v11.4s\n"
+ "ldr x10, [x16, #0x80]\n"
+ "fmla v28.4s, v4.4s, v11.4s\n"
+ "ldr x19, [x17, #0x18]\n"
"fmla v27.4s, v3.4s, v11.4s\n"
- "ldr x20, [x16, #0x18]\n"
- "fmla v30.4s, v0.4s, v11.4s\n"
- "fmla v28.4s, v4.4s, v10.4s\n"
- "fmla v29.4s, v1.4s, v11.4s\n"
- "ldr q11, [x9, x13]\n"
- "fmla v23.4s, v1.4s, v13.4s\n"
- "ldr q13, [x28, x13]\n"
- "fmla v24.4s, v2.4s, v12.4s\n"
- "fmla v25.4s, v1.4s, v12.4s\n"
- "ldr q12, [x27, x13]\n"
- "ldr x9, [x14, #0x90]\n"
+ "fmla v25.4s, v1.4s, v11.4s\n"
+ "fmla v24.4s, v0.4s, v11.4s\n"
+ "ldr q11, [x28, x14]\n"
+ "fmla v31.4s, v1.4s, v13.4s\n"
+ "ldr x28, [x16, #0x90]\n"
+ "fmla v30.4s, v0.4s, v13.4s\n"
+ "ldr q13, [x27, x14]\n"
+ "fmla v29.4s, v1.4s, v12.4s\n"
+ "ldr x27, [x16, #0x98]\n"
"fmla v27.4s, v5.4s, v10.4s\n"
- "fmla v30.4s, v2.4s, v10.4s\n"
- "ldr x27, [x14, #0xa0]\n"
- "ldr x28, [x14, #0x98]\n"
- "fmla v26.4s, v0.4s, v11.4s\n"
- "fmla v28.4s, v2.4s, v13.4s\n"
- "fmla v24.4s, v8.4s, v10.4s\n"
- "fmla v25.4s, v7.4s, v10.4s\n"
- "fmla v31.4s, v1.4s, v10.4s\n"
- "ldr q10, [x24, x13]\n"
- "fmla v29.4s, v3.4s, v12.4s\n"
- "ldr x24, [x14, #0xa8]\n"
- "fmla v26.4s, v6.4s, v12.4s\n"
- "ldr q12, [x11, x13]\n"
+ "fmla v26.4s, v4.4s, v10.4s\n"
+ "fmla v30.4s, v2.4s, v12.4s\n"
+ "ldr q12, [x26, x14]\n"
+ "fmla v29.4s, v7.4s, v10.4s\n"
+ "ldr x26, [x16, #0xa0]\n"
+ "fmla v24.4s, v2.4s, v10.4s\n"
+ "fmla v23.4s, v1.4s, v10.4s\n"
+ "fmla v30.4s, v8.4s, v10.4s\n"
+ "ldr q10, [x25, x14]\n"
+ "fmla v31.4s, v3.4s, v11.4s\n"
+ "ldr x25, [x16, #0xa8]\n"
+ "fmla v28.4s, v0.4s, v11.4s\n"
+ "ldr q11, [x24, x14]\n"
+ "fmla v29.4s, v5.4s, v13.4s\n"
+ "ldr x24, [x16, #0xb0]\n"
+ "fmla v26.4s, v2.4s, v13.4s\n"
+ "ldr q13, [x23, x14]\n"
+ "fmla v25.4s, v3.4s, v12.4s\n"
+ "ldr x23, [x16, #0xb8]\n"
+ "fmla v28.4s, v6.4s, v12.4s\n"
+ "ldr q12, [x10, x14]\n"
"fmla v27.4s, v7.4s, v10.4s\n"
- "ldr x11, [x14, #0xc0]\n"
- "fmla v28.4s, v6.4s, v10.4s\n"
- "fmla v30.4s, v4.4s, v10.4s\n"
- "fmla v23.4s, v3.4s, v11.4s\n"
- "ldr q11, [x26, x13]\n"
- "fmla v25.4s, v5.4s, v13.4s\n"
- "ldr q13, [x25, x13]\n"
- "fmla v29.4s, v5.4s, v10.4s\n"
- "fmla v31.4s, v3.4s, v10.4s\n"
- "ldr x26, [x14, #0xb0]\n"
- "ldr x25, [x14, #0xb8]\n"
- "fmla v26.4s, v8.4s, v10.4s\n"
- "fmla v28.4s, v8.4s, v11.4s\n"
- "fmla v30.4s, v6.4s, v13.4s\n"
- "fmla v24.4s, v3.4s, v12.4s\n"
+ "ldr x10, [x16, #0xc0]\n"
+ "fmla v26.4s, v6.4s, v10.4s\n"
+ "fmla v25.4s, v5.4s, v10.4s\n"
+ "fmla v28.4s, v8.4s, v10.4s\n"
+ "fmla v24.4s, v4.4s, v10.4s\n"
+ "fmla v23.4s, v3.4s, v10.4s\n"
+ "fmla v26.4s, v8.4s, v11.4s\n"
+ "fmla v25.4s, v7.4s, v13.4s\n"
+ "fmla v24.4s, v6.4s, v13.4s\n"
+ "ldr q13, [x28, x14]\n"
+ "fmla v23.4s, v5.4s, v11.4s\n"
+ "ldr q11, [x9, x14]\n"
+ "fmla v31.4s, v4.4s, v12.4s\n"
+ "fmla v30.4s, v3.4s, v12.4s\n"
+ "fmla v28.4s, v1.4s, v12.4s\n"
"fmla v27.4s, v0.4s, v12.4s\n"
- "fmla v31.4s, v5.4s, v11.4s\n"
- "ldr q11, [x10, x13]\n"
- "fmla v29.4s, v7.4s, v13.4s\n"
- "ldr q13, [x9, x13]\n"
- "fmla v23.4s, v4.4s, v12.4s\n"
- "fmla v26.4s, v1.4s, v12.4s\n"
- "ldr q12, [x28, x13]\n"
- "fmla v24.4s, v5.4s, v11.4s\n"
- "fmla v25.4s, v4.4s, v11.4s\n"
+ "ldr q12, [x27, x14]\n"
+ "fmla v29.4s, v4.4s, v11.4s\n"
+ "fmla v30.4s, v5.4s, v11.4s\n"
+ "fmla v26.4s, v1.4s, v11.4s\n"
"fmla v27.4s, v2.4s, v11.4s\n"
- "fmla v28.4s, v1.4s, v11.4s\n"
- "ldr q11, [x27, x13]\n"
- "fmla v30.4s, v8.4s, v13.4s\n"
- "fmla v31.4s, v7.4s, v13.4s\n"
- "ldr q13, [x24, x13]\n"
- "fmla v23.4s, v2.4s, v11.4s\n"
- "fmla v26.4s, v7.4s, v12.4s\n"
+ "ldr q11, [x26, x14]\n"
+ "fmla v24.4s, v8.4s, v13.4s\n"
+ "fmla v23.4s, v7.4s, v13.4s\n"
+ "ldr q13, [x25, x14]\n"
+ "fmla v28.4s, v7.4s, v12.4s\n"
"fmla v27.4s, v6.4s, v12.4s\n"
- "fmla v29.4s, v4.4s, v12.4s\n"
- "fmla v30.4s, v3.4s, v12.4s\n"
- "ldr q12, [x26, x13]\n"
- "fmla v31.4s, v4.4s, v13.4s\n"
- "fmla v24.4s, v1.4s, v11.4s\n"
- "fmax v24.4s, v24.4s, v18.4s\n"
- "fmla v25.4s, v0.4s, v11.4s\n"
- "ldr q11, [x25, x13]\n"
- "fmla v23.4s, v6.4s, v12.4s\n"
- "fmax v23.4s, v23.4s, v18.4s\n"
- "fmla v28.4s, v7.4s, v13.4s\n"
- "fmla v30.4s, v5.4s, v13.4s\n"
- "fmin v23.4s, v23.4s, v17.4s\n"
- "str q23, [x23, x12]\n"
- "fmla v29.4s, v0.4s, v12.4s\n"
+ "fmla v25.4s, v4.4s, v12.4s\n"
+ "fmla v24.4s, v3.4s, v12.4s\n"
+ "ldr q12, [x24, x14]\n"
"fmla v31.4s, v2.4s, v11.4s\n"
- "ldr x23, [x16, #0x20]\n"
- "fmin v24.4s, v24.4s, v17.4s\n"
+ "fmla v30.4s, v1.4s, v11.4s\n"
+ "fmla v29.4s, v0.4s, v11.4s\n"
+ "ldr q11, [x23, x14]\n"
"fmla v27.4s, v8.4s, v13.4s\n"
- "ldr q13, [x11, x13]\n"
- "fmla v26.4s, v3.4s, v12.4s\n"
+ "fmla v26.4s, v7.4s, v13.4s\n"
+ "fmla v24.4s, v5.4s, v13.4s\n"
+ "fmla v23.4s, v4.4s, v13.4s\n"
+ "ldr q13, [x10, x14]\n"
+ "add x14, x14, #0x10\n"
+ "fmla v31.4s, v6.4s, v12.4s\n"
+ "fmla v28.4s, v3.4s, v12.4s\n"
+ "fmla v25.4s, v0.4s, v12.4s\n"
+ "fmla v29.4s, v8.4s, v11.4s\n"
+ "fmla v26.4s, v5.4s, v11.4s\n"
+ "fmla v23.4s, v2.4s, v11.4s\n"
+ "fmla v25.4s, v8.4s, v13.4s\n"
+ "fmla v24.4s, v7.4s, v13.4s\n"
+ "fmax v31.4s, v31.4s, v18.4s\n"
+ "fmla v23.4s, v6.4s, v13.4s\n"
+ "fmax v30.4s, v30.4s, v18.4s\n"
+ "fmin v31.4s, v31.4s, v17.4s\n"
+ "str q31, [x22, x12]\n"
+ "fmin v30.4s, v30.4s, v17.4s\n"
+ "fmax v29.4s, v29.4s, v18.4s\n"
+ "ldr x22, [x17, #0x20]\n"
+ "fmax v28.4s, v28.4s, v18.4s\n"
+ "str q30, [x21, x12]\n"
+ "fmin v29.4s, v29.4s, v17.4s\n"
+ "fmax v27.4s, v27.4s, v18.4s\n"
+ "ldr x21, [x17, #0x28]\n"
+ "fmin v28.4s, v28.4s, v17.4s\n"
+ "str q29, [x20, x12]\n"
+ "fmin v27.4s, v27.4s, v17.4s\n"
"fmax v26.4s, v26.4s, v18.4s\n"
- "fmla v25.4s, v8.4s, v11.4s\n"
- "fmla v28.4s, v5.4s, v11.4s\n"
+ "str q28, [x19, x12]\n"
"fmax v25.4s, v25.4s, v18.4s\n"
- "str q24, [x22, x12]\n"
- "fmla v29.4s, v8.4s, v13.4s\n"
- "fmla v30.4s, v7.4s, v13.4s\n"
- "fmax v27.4s, v27.4s, v18.4s\n"
- "ldr x22, [x16, #0x28]\n"
- "fmla v31.4s, v6.4s, v13.4s\n"
- "fmin v25.4s, v25.4s, v17.4s\n"
- "str q25, [x21, x12]\n"
- "ldr x21, [x16, #0x30]\n"
+ "ldr x20, [x17, #0x30]\n"
+ "fmax v24.4s, v24.4s, v18.4s\n"
+ "str q27, [x22, x12]\n"
"fmin v26.4s, v26.4s, v17.4s\n"
- "fmin v27.4s, v27.4s, v17.4s\n"
- "str q26, [x20, x12]\n"
- "ldr x20, [x16, #0x38]\n"
- "fmax v28.4s, v28.4s, v18.4s\n"
- "fmax v29.4s, v29.4s, v18.4s\n"
- "str q27, [x23, x12]\n"
- "ldr x23, [x16, #0x40]\n"
- "fmax v30.4s, v30.4s, v18.4s\n"
- "fmax v31.4s, v31.4s, v18.4s\n"
- "add x13, x13, #0x10\n"
- "fmin v28.4s, v28.4s, v17.4s\n"
- "fmin v29.4s, v29.4s, v17.4s\n"
- "str q28, [x22, x12]\n"
- "fmin v30.4s, v30.4s, v17.4s\n"
- "fmin v31.4s, v31.4s, v17.4s\n"
- "str q29, [x21, x12]\n"
- "str q30, [x20, x12]\n"
- "str q31, [x23, x12]\n"
+ "ldr x19, [x17, #0x38]\n"
+ "fmin v25.4s, v25.4s, v17.4s\n"
+ "ldr x22, [x17, #0x40]\n"
+ "fmin v24.4s, v24.4s, v17.4s\n"
+ "str q26, [x21, x12]\n"
+ "fmax v23.4s, v23.4s, v18.4s\n"
+ "str q25, [x20, x12]\n"
+ "str q24, [x19, x12]\n"
+ "fmin v23.4s, v23.4s, v17.4s\n"
+ "str q23, [x22, x12]\n"
"3:" // Oddments
"tst %x[n_channels], #0x3\n"
"beq 48f\n"
"ldr q16, [x15, #0x0]\n"
"ldr q0, [x15, #0x10]\n"
- "mov x12, x13\n"
+ "mov x12, x14\n"
"ldr q1, [x15, #0x20]\n"
"ldr q2, [x15, #0x30]\n"
"ldr q3, [x15, #0x40]\n"
@@ -476,426 +476,428 @@ void a64_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst_indirect_impl(
"ldr q6, [x15, #0x70]\n"
"ldr q7, [x15, #0x80]\n"
"ldr q8, [x15, #0x90]\n"
- "ldr x24, [x14, #0x0]\n"
- "ldr x23, [x14, #0x8]\n"
- "add x24, x24, x13\n"
- "add x23, x23, x13\n"
- "ldr x22, [x14, #0x10]\n"
- "ldr x21, [x14, #0x18]\n"
- "add x22, x22, x13\n"
- "add x21, x21, x13\n"
- "ldr x20, [x14, #0x20]\n"
- "add x20, x20, x13\n"
+ "ldr x10, [x16, #0x0]\n"
+ "add x10, x10, x14\n"
+ "ldr x9, [x16, #0x8]\n"
+ "ldr x28, [x16, #0x10]\n"
+ "add x9, x9, x14\n"
+ "ldr x27, [x16, #0x18]\n"
+ "ldr x26, [x16, #0x20]\n"
+ "add x28, x28, x14\n"
+ "add x27, x27, x14\n"
+ "add x26, x26, x14\n"
"tbz %x[n_channels], #1, 4f\n"
- "ld1 { v9.d }[0], [x24], #0x8\n"
- "ld1 { v10.d }[0], [x23], #0x8\n"
- "ld1 { v11.d }[0], [x22], #0x8\n"
- "ld1 { v12.d }[0], [x21], #0x8\n"
- "ld1 { v13.d }[0], [x20], #0x8\n"
+ "ld1 { v9.d }[0], [x10], #0x8\n"
+ "ld1 { v10.d }[0], [x9], #0x8\n"
+ "ld1 { v11.d }[0], [x28], #0x8\n"
+ "ld1 { v12.d }[0], [x27], #0x8\n"
+ "ld1 { v13.d }[0], [x26], #0x8\n"
"tbz %x[n_channels], #0, 5f\n"
- "ld1 { v9.s }[2], [x24], #0x4\n"
- "ld1 { v10.s }[2], [x23], #0x4\n"
- "ld1 { v11.s }[2], [x22], #0x4\n"
- "ld1 { v12.s }[2], [x21], #0x4\n"
- "ld1 { v13.s }[2], [x20], #0x4\n"
+ "ld1 { v9.s }[2], [x10], #0x4\n"
+ "ld1 { v10.s }[2], [x9], #0x4\n"
+ "ld1 { v11.s }[2], [x28], #0x4\n"
+ "ld1 { v12.s }[2], [x27], #0x4\n"
+ "ld1 { v13.s }[2], [x26], #0x4\n"
"b 5f\n"
"4:" // Oddments: Load inputs (2, 2), (0, 0), (0, 4), (4, 0), (1, 2): Bit 1: Unset
- "ld1 { v9.s }[0], [x24], #0x4\n"
- "ld1 { v10.s }[0], [x23], #0x4\n"
- "ld1 { v11.s }[0], [x22], #0x4\n"
- "ld1 { v12.s }[0], [x21], #0x4\n"
- "ld1 { v13.s }[0], [x20], #0x4\n"
+ "ld1 { v9.s }[0], [x10], #0x4\n"
+ "ld1 { v10.s }[0], [x9], #0x4\n"
+ "ld1 { v11.s }[0], [x28], #0x4\n"
+ "ld1 { v12.s }[0], [x27], #0x4\n"
+ "ld1 { v13.s }[0], [x26], #0x4\n"
"5:" // Oddments: Load inputs (2, 2), (0, 0), (0, 4), (4, 0), (1, 2): Bit 1: End
- "mov v23.16b, v16.16b\n fmla v23.4s, v8.4s, v9.4s\n"
- "mov v25.16b, v16.16b\n fmla v25.4s, v6.4s, v9.4s\n"
- "ldr x20, [x14, #0x28]\n"
- "add x20, x20, x13\n"
- "mov v24.16b, v16.16b\n fmla v24.4s, v7.4s, v9.4s\n"
- "mov v26.16b, v16.16b\n fmla v26.4s, v5.4s, v9.4s\n"
+ "mov v31.16b, v16.16b\n fmla v31.4s, v8.4s, v9.4s\n"
+ "ldr x25, [x16, #0x28]\n"
+ "add x25, x25, x14\n"
+ "mov v30.16b, v16.16b\n fmla v30.4s, v7.4s, v9.4s\n"
+ "mov v29.16b, v16.16b\n fmla v29.4s, v6.4s, v9.4s\n"
+ "mov v28.16b, v16.16b\n fmla v28.4s, v5.4s, v9.4s\n"
"mov v27.16b, v16.16b\n fmla v27.4s, v4.4s, v9.4s\n"
- "mov v28.16b, v16.16b\n fmla v28.4s, v3.4s, v9.4s\n"
- "mov v29.16b, v16.16b\n fmla v29.4s, v2.4s, v9.4s\n"
- "fmla v23.4s, v0.4s, v10.4s\n"
- "fmla v25.4s, v2.4s, v11.4s\n"
- "mov v30.16b, v16.16b\n fmla v30.4s, v1.4s, v9.4s\n"
- "mov v31.16b, v16.16b\n fmla v31.4s, v0.4s, v9.4s\n"
- "fmla v29.4s, v6.4s, v12.4s\n"
- "fmla v23.4s, v5.4s, v13.4s\n"
- "fmla v24.4s, v4.4s, v13.4s\n"
- "fmla v25.4s, v3.4s, v13.4s\n"
- "fmla v26.4s, v2.4s, v13.4s\n"
+ "mov v26.16b, v16.16b\n fmla v26.4s, v3.4s, v9.4s\n"
+ "mov v25.16b, v16.16b\n fmla v25.4s, v2.4s, v9.4s\n"
+ "mov v24.16b, v16.16b\n fmla v24.4s, v1.4s, v9.4s\n"
+ "mov v23.16b, v16.16b\n fmla v23.4s, v0.4s, v9.4s\n"
+ "fmla v31.4s, v0.4s, v10.4s\n"
+ "fmla v29.4s, v2.4s, v11.4s\n"
+ "fmla v25.4s, v6.4s, v12.4s\n"
+ "fmla v30.4s, v4.4s, v13.4s\n"
+ "fmla v31.4s, v5.4s, v13.4s\n"
+ "fmla v29.4s, v3.4s, v13.4s\n"
+ "fmla v28.4s, v2.4s, v13.4s\n"
"fmla v27.4s, v1.4s, v13.4s\n"
- "fmla v28.4s, v0.4s, v13.4s\n"
+ "fmla v26.4s, v0.4s, v13.4s\n"
"tbz %x[n_channels], #1, 6f\n"
- "ld1 { v12.d }[0], [x20], #0x8\n"
+ "ld1 { v12.d }[0], [x25], #0x8\n"
"tbz %x[n_channels], #0, 7f\n"
- "ld1 { v12.s }[2], [x20], #0x4\n"
+ "ld1 { v12.s }[2], [x25], #0x4\n"
"b 7f\n"
"6:" // Oddments: Load input (4, 4): Bit 1: Unset
- "ld1 { v12.s }[0], [x20], #0x4\n"
+ "ld1 { v12.s }[0], [x25], #0x4\n"
"7:" // Oddments: Load input (4, 4): Bit 1: End
- "ldr x20, [x14, #0x30]\n"
- "fmla v31.4s, v8.4s, v12.4s\n"
- "add x20, x20, x13\n"
+ "fmla v23.4s, v8.4s, v12.4s\n"
+ "ldr x24, [x16, #0x30]\n"
+ "add x24, x24, x14\n"
"tbz %x[n_channels], #1, 8f\n"
- "ld1 { v11.d }[0], [x20], #0x8\n"
+ "ld1 { v11.d }[0], [x24], #0x8\n"
"tbz %x[n_channels], #0, 9f\n"
- "ld1 { v11.s }[2], [x20], #0x4\n"
+ "ld1 { v11.s }[2], [x24], #0x4\n"
"b 9f\n"
"8:" // Oddments: Load input (2, 1): Bit 1: Unset
- "ld1 { v11.s }[0], [x20], #0x4\n"
+ "ld1 { v11.s }[0], [x24], #0x4\n"
"9:" // Oddments: Load input (2, 1): Bit 1: End
- "ldr x20, [x14, #0x38]\n"
- "fmla v23.4s, v7.4s, v11.4s\n"
- "fmla v24.4s, v6.4s, v11.4s\n"
- "add x20, x20, x13\n"
- "fmla v26.4s, v4.4s, v11.4s\n"
+ "fmla v31.4s, v7.4s, v11.4s\n"
+ "ldr x23, [x16, #0x38]\n"
+ "fmla v30.4s, v6.4s, v11.4s\n"
+ "add x23, x23, x14\n"
+ "fmla v28.4s, v4.4s, v11.4s\n"
"fmla v27.4s, v3.4s, v11.4s\n"
- "fmla v29.4s, v1.4s, v11.4s\n"
- "fmla v30.4s, v0.4s, v11.4s\n"
+ "fmla v25.4s, v1.4s, v11.4s\n"
+ "fmla v24.4s, v0.4s, v11.4s\n"
"tbz %x[n_channels], #1, 10f\n"
- "ld1 { v13.d }[0], [x20], #0x8\n"
+ "ld1 { v13.d }[0], [x23], #0x8\n"
"tbz %x[n_channels], #0, 11f\n"
- "ld1 { v13.s }[2], [x20], #0x4\n"
+ "ld1 { v13.s }[2], [x23], #0x4\n"
"b 11f\n"
"10:" // Oddments: Load input (0, 1): Bit 1: Unset
- "ld1 { v13.s }[0], [x20], #0x4\n"
+ "ld1 { v13.s }[0], [x23], #0x4\n"
"11:" // Oddments: Load input (0, 1): Bit 1: End
- "ldr x20, [x14, #0x40]\n"
- "fmla v23.4s, v1.4s, v13.4s\n"
- "fmla v24.4s, v0.4s, v13.4s\n"
- "add x20, x20, x13\n"
+ "fmla v31.4s, v1.4s, v13.4s\n"
+ "ldr x10, [x16, #0x40]\n"
+ "fmla v30.4s, v0.4s, v13.4s\n"
+ "add x10, x10, x14\n"
"tbz %x[n_channels], #1, 12f\n"
- "ld1 { v12.d }[0], [x20], #0x8\n"
+ "ld1 { v12.d }[0], [x10], #0x8\n"
"tbz %x[n_channels], #0, 13f\n"
- "ld1 { v12.s }[2], [x20], #0x4\n"
+ "ld1 { v12.s }[2], [x10], #0x4\n"
"b 13f\n"
"12:" // Oddments: Load input (0, 3): Bit 1: Unset
- "ld1 { v12.s }[0], [x20], #0x4\n"
+ "ld1 { v12.s }[0], [x10], #0x4\n"
"13:" // Oddments: Load input (0, 3): Bit 1: End
- "ldr x20, [x14, #0x48]\n"
- "fmla v24.4s, v2.4s, v12.4s\n"
- "fmla v25.4s, v1.4s, v12.4s\n"
- "add x20, x20, x13\n"
+ "fmla v30.4s, v2.4s, v12.4s\n"
+ "ldr x9, [x16, #0x48]\n"
+ "fmla v29.4s, v1.4s, v12.4s\n"
+ "add x9, x9, x14\n"
"tbz %x[n_channels], #1, 14f\n"
- "ld1 { v10.d }[0], [x20], #0x8\n"
+ "ld1 { v10.d }[0], [x9], #0x8\n"
"tbz %x[n_channels], #0, 15f\n"
- "ld1 { v10.s }[2], [x20], #0x4\n"
+ "ld1 { v10.s }[2], [x9], #0x4\n"
"b 15f\n"
"14:" // Oddments: Load input (2, 3): Bit 1: Unset
- "ld1 { v10.s }[0], [x20], #0x4\n"
+ "ld1 { v10.s }[0], [x9], #0x4\n"
"15:" // Oddments: Load input (2, 3): Bit 1: End
- "ldr x20, [x14, #0x50]\n"
- "fmla v24.4s, v8.4s, v10.4s\n"
- "fmla v25.4s, v7.4s, v10.4s\n"
- "add x20, x20, x13\n"
+ "fmla v30.4s, v8.4s, v10.4s\n"
+ "ldr x28, [x16, #0x50]\n"
+ "fmla v29.4s, v7.4s, v10.4s\n"
+ "add x28, x28, x14\n"
"fmla v27.4s, v5.4s, v10.4s\n"
- "fmla v28.4s, v4.4s, v10.4s\n"
- "fmla v30.4s, v2.4s, v10.4s\n"
- "fmla v31.4s, v1.4s, v10.4s\n"
+ "fmla v26.4s, v4.4s, v10.4s\n"
+ "fmla v24.4s, v2.4s, v10.4s\n"
+ "fmla v23.4s, v1.4s, v10.4s\n"
"tbz %x[n_channels], #1, 16f\n"
- "ld1 { v11.d }[0], [x20], #0x8\n"
+ "ld1 { v11.d }[0], [x28], #0x8\n"
"tbz %x[n_channels], #0, 17f\n"
- "ld1 { v11.s }[2], [x20], #0x4\n"
+ "ld1 { v11.s }[2], [x28], #0x4\n"
"b 17f\n"
"16:" // Oddments: Load input (1, 0): Bit 1: Unset
- "ld1 { v11.s }[0], [x20], #0x4\n"
+ "ld1 { v11.s }[0], [x28], #0x4\n"
"17:" // Oddments: Load input (1, 0): Bit 1: End
- "ldr x20, [x14, #0x58]\n"
- "fmla v23.4s, v3.4s, v11.4s\n"
- "fmla v26.4s, v0.4s, v11.4s\n"
- "add x20, x20, x13\n"
+ "fmla v31.4s, v3.4s, v11.4s\n"
+ "ldr x27, [x16, #0x58]\n"
+ "fmla v28.4s, v0.4s, v11.4s\n"
+ "add x27, x27, x14\n"
"tbz %x[n_channels], #1, 18f\n"
- "ld1 { v13.d }[0], [x20], #0x8\n"
+ "ld1 { v13.d }[0], [x27], #0x8\n"
"tbz %x[n_channels], #0, 19f\n"
- "ld1 { v13.s }[2], [x20], #0x4\n"
+ "ld1 { v13.s }[2], [x27], #0x4\n"
"b 19f\n"
"18:" // Oddments: Load input (1, 4): Bit 1: Unset
- "ld1 { v13.s }[0], [x20], #0x4\n"
+ "ld1 { v13.s }[0], [x27], #0x4\n"
"19:" // Oddments: Load input (1, 4): Bit 1: End
- "ldr x20, [x14, #0x60]\n"
- "fmla v25.4s, v5.4s, v13.4s\n"
- "fmla v28.4s, v2.4s, v13.4s\n"
- "add x20, x20, x13\n"
+ "fmla v29.4s, v5.4s, v13.4s\n"
+ "ldr x26, [x16, #0x60]\n"
+ "fmla v26.4s, v2.4s, v13.4s\n"
+ "add x26, x26, x14\n"
"tbz %x[n_channels], #1, 20f\n"
- "ld1 { v12.d }[0], [x20], #0x8\n"
+ "ld1 { v12.d }[0], [x26], #0x8\n"
"tbz %x[n_channels], #0, 21f\n"
- "ld1 { v12.s }[2], [x20], #0x4\n"
+ "ld1 { v12.s }[2], [x26], #0x4\n"
"b 21f\n"
"20:" // Oddments: Load input (3, 0): Bit 1: Unset
- "ld1 { v12.s }[0], [x20], #0x4\n"
+ "ld1 { v12.s }[0], [x26], #0x4\n"
"21:" // Oddments: Load input (3, 0): Bit 1: End
- "ldr x20, [x14, #0x68]\n"
- "fmla v26.4s, v6.4s, v12.4s\n"
- "fmla v29.4s, v3.4s, v12.4s\n"
- "add x20, x20, x13\n"
+ "fmla v28.4s, v6.4s, v12.4s\n"
+ "ldr x25, [x16, #0x68]\n"
+ "fmla v25.4s, v3.4s, v12.4s\n"
+ "add x25, x25, x14\n"
"tbz %x[n_channels], #1, 22f\n"
- "ld1 { v10.d }[0], [x20], #0x8\n"
+ "ld1 { v10.d }[0], [x25], #0x8\n"
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v10.s }[2], [x20], #0x4\n"
+ "ld1 { v10.s }[2], [x25], #0x4\n"
"b 23f\n"
"22:" // Oddments: Load input (3, 2): Bit 1: Unset
- "ld1 { v10.s }[0], [x20], #0x4\n"
+ "ld1 { v10.s }[0], [x25], #0x4\n"
"23:" // Oddments: Load input (3, 2): Bit 1: End
- "ldr x20, [x14, #0x70]\n"
- "fmla v26.4s, v8.4s, v10.4s\n"
+ "fmla v28.4s, v8.4s, v10.4s\n"
+ "ldr x24, [x16, #0x70]\n"
"fmla v27.4s, v7.4s, v10.4s\n"
- "add x20, x20, x13\n"
- "fmla v28.4s, v6.4s, v10.4s\n"
- "fmla v29.4s, v5.4s, v10.4s\n"
- "fmla v30.4s, v4.4s, v10.4s\n"
- "fmla v31.4s, v3.4s, v10.4s\n"
+ "add x24, x24, x14\n"
+ "fmla v26.4s, v6.4s, v10.4s\n"
+ "fmla v25.4s, v5.4s, v10.4s\n"
+ "fmla v24.4s, v4.4s, v10.4s\n"
+ "fmla v23.4s, v3.4s, v10.4s\n"
"tbz %x[n_channels], #1, 24f\n"
- "ld1 { v11.d }[0], [x20], #0x8\n"
+ "ld1 { v11.d }[0], [x24], #0x8\n"
"tbz %x[n_channels], #0, 25f\n"
- "ld1 { v11.s }[2], [x20], #0x4\n"
+ "ld1 { v11.s }[2], [x24], #0x4\n"
"b 25f\n"
"24:" // Oddments: Load input (3, 4): Bit 1: Unset
- "ld1 { v11.s }[0], [x20], #0x4\n"
+ "ld1 { v11.s }[0], [x24], #0x4\n"
"25:" // Oddments: Load input (3, 4): Bit 1: End
- "ldr x20, [x14, #0x78]\n"
- "fmla v28.4s, v8.4s, v11.4s\n"
- "fmla v31.4s, v5.4s, v11.4s\n"
- "add x20, x20, x13\n"
+ "fmla v26.4s, v8.4s, v11.4s\n"
+ "ldr x23, [x16, #0x78]\n"
+ "fmla v23.4s, v5.4s, v11.4s\n"
+ "add x23, x23, x14\n"
"tbz %x[n_channels], #1, 26f\n"
- "ld1 { v13.d }[0], [x20], #0x8\n"
+ "ld1 { v13.d }[0], [x23], #0x8\n"
"tbz %x[n_channels], #0, 27f\n"
- "ld1 { v13.s }[2], [x20], #0x4\n"
+ "ld1 { v13.s }[2], [x23], #0x4\n"
"b 27f\n"
"26:" // Oddments: Load input (4, 1): Bit 1: Unset
- "ld1 { v13.s }[0], [x20], #0x4\n"
+ "ld1 { v13.s }[0], [x23], #0x4\n"
"27:" // Oddments: Load input (4, 1): Bit 1: End
- "ldr x20, [x14, #0x80]\n"
- "fmla v29.4s, v7.4s, v13.4s\n"
- "fmla v30.4s, v6.4s, v13.4s\n"
- "add x20, x20, x13\n"
+ "fmla v25.4s, v7.4s, v13.4s\n"
+ "ldr x10, [x16, #0x80]\n"
+ "fmla v24.4s, v6.4s, v13.4s\n"
+ "add x10, x10, x14\n"
"tbz %x[n_channels], #1, 28f\n"
- "ld1 { v12.d }[0], [x20], #0x8\n"
+ "ld1 { v12.d }[0], [x10], #0x8\n"
"tbz %x[n_channels], #0, 29f\n"
- "ld1 { v12.s }[2], [x20], #0x4\n"
+ "ld1 { v12.s }[2], [x10], #0x4\n"
"b 29f\n"
"28:" // Oddments: Load input (1, 1): Bit 1: Unset
- "ld1 { v12.s }[0], [x20], #0x4\n"
+ "ld1 { v12.s }[0], [x10], #0x4\n"
"29:" // Oddments: Load input (1, 1): Bit 1: End
- "ldr x20, [x14, #0x88]\n"
- "fmla v23.4s, v4.4s, v12.4s\n"
- "fmla v24.4s, v3.4s, v12.4s\n"
- "add x20, x20, x13\n"
- "fmla v26.4s, v1.4s, v12.4s\n"
+ "fmla v31.4s, v4.4s, v12.4s\n"
+ "ldr x9, [x16, #0x88]\n"
+ "fmla v30.4s, v3.4s, v12.4s\n"
+ "add x9, x9, x14\n"
+ "fmla v28.4s, v1.4s, v12.4s\n"
"fmla v27.4s, v0.4s, v12.4s\n"
"tbz %x[n_channels], #1, 30f\n"
- "ld1 { v11.d }[0], [x20], #0x8\n"
+ "ld1 { v11.d }[0], [x9], #0x8\n"
"tbz %x[n_channels], #0, 31f\n"
- "ld1 { v11.s }[2], [x20], #0x4\n"
+ "ld1 { v11.s }[2], [x9], #0x4\n"
"b 31f\n"
"30:" // Oddments: Load input (1, 3): Bit 1: Unset
- "ld1 { v11.s }[0], [x20], #0x4\n"
+ "ld1 { v11.s }[0], [x9], #0x4\n"
"31:" // Oddments: Load input (1, 3): Bit 1: End
- "ldr x20, [x14, #0x90]\n"
- "fmla v24.4s, v5.4s, v11.4s\n"
- "fmla v25.4s, v4.4s, v11.4s\n"
- "add x20, x20, x13\n"
+ "fmla v30.4s, v5.4s, v11.4s\n"
+ "ldr x28, [x16, #0x90]\n"
+ "fmla v29.4s, v4.4s, v11.4s\n"
+ "add x28, x28, x14\n"
"fmla v27.4s, v2.4s, v11.4s\n"
- "fmla v28.4s, v1.4s, v11.4s\n"
+ "fmla v26.4s, v1.4s, v11.4s\n"
"tbz %x[n_channels], #1, 32f\n"
- "ld1 { v13.d }[0], [x20], #0x8\n"
+ "ld1 { v13.d }[0], [x28], #0x8\n"
"tbz %x[n_channels], #0, 33f\n"
- "ld1 { v13.s }[2], [x20], #0x4\n"
+ "ld1 { v13.s }[2], [x28], #0x4\n"
"b 33f\n"
"32:" // Oddments: Load input (4, 3): Bit 1: Unset
- "ld1 { v13.s }[0], [x20], #0x4\n"
+ "ld1 { v13.s }[0], [x28], #0x4\n"
"33:" // Oddments: Load input (4, 3): Bit 1: End
- "ldr x20, [x14, #0x98]\n"
- "fmla v30.4s, v8.4s, v13.4s\n"
- "fmla v31.4s, v7.4s, v13.4s\n"
- "add x20, x20, x13\n"
+ "fmla v24.4s, v8.4s, v13.4s\n"
+ "ldr x27, [x16, #0x98]\n"
+ "fmla v23.4s, v7.4s, v13.4s\n"
+ "add x27, x27, x14\n"
"tbz %x[n_channels], #1, 34f\n"
- "ld1 { v12.d }[0], [x20], #0x8\n"
+ "ld1 { v12.d }[0], [x27], #0x8\n"
"tbz %x[n_channels], #0, 35f\n"
- "ld1 { v12.s }[2], [x20], #0x4\n"
+ "ld1 { v12.s }[2], [x27], #0x4\n"
"b 35f\n"
"34:" // Oddments: Load input (3, 1): Bit 1: Unset
- "ld1 { v12.s }[0], [x20], #0x4\n"
+ "ld1 { v12.s }[0], [x27], #0x4\n"
"35:" // Oddments: Load input (3, 1): Bit 1: End
- "ldr x20, [x14, #0xa0]\n"
- "fmla v26.4s, v7.4s, v12.4s\n"
+ "fmla v28.4s, v7.4s, v12.4s\n"
+ "ldr x26, [x16, #0xa0]\n"
"fmla v27.4s, v6.4s, v12.4s\n"
- "add x20, x20, x13\n"
- "fmla v29.4s, v4.4s, v12.4s\n"
- "fmla v30.4s, v3.4s, v12.4s\n"
+ "add x26, x26, x14\n"
+ "fmla v25.4s, v4.4s, v12.4s\n"
+ "fmla v24.4s, v3.4s, v12.4s\n"
"tbz %x[n_channels], #1, 36f\n"
- "ld1 { v11.d }[0], [x20], #0x8\n"
+ "ld1 { v11.d }[0], [x26], #0x8\n"
"tbz %x[n_channels], #0, 37f\n"
- "ld1 { v11.s }[2], [x20], #0x4\n"
+ "ld1 { v11.s }[2], [x26], #0x4\n"
"b 37f\n"
"36:" // Oddments: Load input (0, 2): Bit 1: Unset
- "ld1 { v11.s }[0], [x20], #0x4\n"
+ "ld1 { v11.s }[0], [x26], #0x4\n"
"37:" // Oddments: Load input (0, 2): Bit 1: End
- "ldr x20, [x14, #0xa8]\n"
- "fmla v23.4s, v2.4s, v11.4s\n"
- "fmla v24.4s, v1.4s, v11.4s\n"
- "add x20, x20, x13\n"
- "fmla v25.4s, v0.4s, v11.4s\n"
+ "fmla v31.4s, v2.4s, v11.4s\n"
+ "ldr x25, [x16, #0xa8]\n"
+ "fmla v30.4s, v1.4s, v11.4s\n"
+ "add x25, x25, x14\n"
+ "fmla v29.4s, v0.4s, v11.4s\n"
"tbz %x[n_channels], #1, 38f\n"
- "ld1 { v13.d }[0], [x20], #0x8\n"
+ "ld1 { v13.d }[0], [x25], #0x8\n"
"tbz %x[n_channels], #0, 39f\n"
- "ld1 { v13.s }[2], [x20], #0x4\n"
+ "ld1 { v13.s }[2], [x25], #0x4\n"
"b 39f\n"
"38:" // Oddments: Load input (3, 3): Bit 1: Unset
- "ld1 { v13.s }[0], [x20], #0x4\n"
+ "ld1 { v13.s }[0], [x25], #0x4\n"
"39:" // Oddments: Load input (3, 3): Bit 1: End
- "ldr x20, [x14, #0xb0]\n"
"fmla v27.4s, v8.4s, v13.4s\n"
- "fmla v28.4s, v7.4s, v13.4s\n"
- "add x20, x20, x13\n"
- "fmla v30.4s, v5.4s, v13.4s\n"
- "fmla v31.4s, v4.4s, v13.4s\n"
+ "ldr x24, [x16, #0xb0]\n"
+ "fmla v26.4s, v7.4s, v13.4s\n"
+ "add x24, x24, x14\n"
+ "fmla v24.4s, v5.4s, v13.4s\n"
+ "fmla v23.4s, v4.4s, v13.4s\n"
"tbz %x[n_channels], #1, 40f\n"
- "ld1 { v12.d }[0], [x20], #0x8\n"
+ "ld1 { v12.d }[0], [x24], #0x8\n"
"tbz %x[n_channels], #0, 41f\n"
- "ld1 { v12.s }[2], [x20], #0x4\n"
+ "ld1 { v12.s }[2], [x24], #0x4\n"
"b 41f\n"
"40:" // Oddments: Load input (2, 0): Bit 1: Unset
- "ld1 { v12.s }[0], [x20], #0x4\n"
+ "ld1 { v12.s }[0], [x24], #0x4\n"
"41:" // Oddments: Load input (2, 0): Bit 1: End
- "ldr x20, [x14, #0xb8]\n"
- "fmla v23.4s, v6.4s, v12.4s\n"
- "fmla v26.4s, v3.4s, v12.4s\n"
- "add x20, x20, x13\n"
- "fmla v29.4s, v0.4s, v12.4s\n"
+ "fmla v31.4s, v6.4s, v12.4s\n"
+ "ldr x23, [x16, #0xb8]\n"
+ "fmla v28.4s, v3.4s, v12.4s\n"
+ "add x23, x23, x14\n"
+ "fmla v25.4s, v0.4s, v12.4s\n"
"tbz %x[n_channels], #1, 42f\n"
- "ld1 { v11.d }[0], [x20], #0x8\n"
+ "ld1 { v11.d }[0], [x23], #0x8\n"
"tbz %x[n_channels], #0, 43f\n"
- "ld1 { v11.s }[2], [x20], #0x4\n"
+ "ld1 { v11.s }[2], [x23], #0x4\n"
"b 43f\n"
"42:" // Oddments: Load input (2, 4): Bit 1: Unset
- "ld1 { v11.s }[0], [x20], #0x4\n"
+ "ld1 { v11.s }[0], [x23], #0x4\n"
"43:" // Oddments: Load input (2, 4): Bit 1: End
- "ldr x20, [x14, #0xc0]\n"
- "fmla v25.4s, v8.4s, v11.4s\n"
- "fmla v28.4s, v5.4s, v11.4s\n"
- "add x20, x20, x13\n"
- "fmla v31.4s, v2.4s, v11.4s\n"
+ "fmla v29.4s, v8.4s, v11.4s\n"
+ "ldr x10, [x16, #0xc0]\n"
+ "fmla v26.4s, v5.4s, v11.4s\n"
+ "add x10, x10, x14\n"
+ "fmla v23.4s, v2.4s, v11.4s\n"
"tbz %x[n_channels], #1, 44f\n"
- "ld1 { v13.d }[0], [x20], #0x8\n"
+ "ld1 { v13.d }[0], [x10], #0x8\n"
"tbz %x[n_channels], #0, 45f\n"
- "ld1 { v13.s }[2], [x20], #0x4\n"
+ "ld1 { v13.s }[2], [x10], #0x4\n"
"b 45f\n"
"44:" // Oddments: Load input (4, 2): Bit 1: Unset
- "ld1 { v13.s }[0], [x20], #0x4\n"
+ "ld1 { v13.s }[0], [x10], #0x4\n"
"45:" // Oddments: Load input (4, 2): Bit 1: End
- "fmla v29.4s, v8.4s, v13.4s\n"
- "fmla v30.4s, v7.4s, v13.4s\n"
- "fmax v23.4s, v23.4s, v18.4s\n"
- "fmla v31.4s, v6.4s, v13.4s\n"
- "fmax v24.4s, v24.4s, v18.4s\n"
- "fmax v25.4s, v25.4s, v18.4s\n"
- "fmax v26.4s, v26.4s, v18.4s\n"
- "fmax v27.4s, v27.4s, v18.4s\n"
- "fmax v28.4s, v28.4s, v18.4s\n"
- "fmax v29.4s, v29.4s, v18.4s\n"
- "fmax v30.4s, v30.4s, v18.4s\n"
+ "fmla v25.4s, v8.4s, v13.4s\n"
+ "fmla v24.4s, v7.4s, v13.4s\n"
+ "fmla v23.4s, v6.4s, v13.4s\n"
"fmax v31.4s, v31.4s, v18.4s\n"
- "fmin v23.4s, v23.4s, v17.4s\n"
- "fmin v24.4s, v24.4s, v17.4s\n"
- "fmin v25.4s, v25.4s, v17.4s\n"
- "fmin v26.4s, v26.4s, v17.4s\n"
- "fmin v27.4s, v27.4s, v17.4s\n"
- "fmin v28.4s, v28.4s, v17.4s\n"
- "fmin v29.4s, v29.4s, v17.4s\n"
- "fmin v30.4s, v30.4s, v17.4s\n"
+ "fmax v30.4s, v30.4s, v18.4s\n"
+ "fmax v29.4s, v29.4s, v18.4s\n"
"fmin v31.4s, v31.4s, v17.4s\n"
+ "fmin v30.4s, v30.4s, v17.4s\n"
+ "fmin v29.4s, v29.4s, v17.4s\n"
+ "fmax v28.4s, v28.4s, v18.4s\n"
+ "fmax v27.4s, v27.4s, v18.4s\n"
+ "fmax v26.4s, v26.4s, v18.4s\n"
+ "fmin v28.4s, v28.4s, v17.4s\n"
+ "fmin v27.4s, v27.4s, v17.4s\n"
+ "fmin v26.4s, v26.4s, v17.4s\n"
+ "fmax v25.4s, v25.4s, v18.4s\n"
+ "fmax v24.4s, v24.4s, v18.4s\n"
+ "fmax v23.4s, v23.4s, v18.4s\n"
+ "fmin v25.4s, v25.4s, v17.4s\n"
+ "fmin v24.4s, v24.4s, v17.4s\n"
+ "fmin v23.4s, v23.4s, v17.4s\n"
"tbz %x[n_channels], #1, 46f\n"
- "ldr x23, [x16, #0x0]\n"
- "add x23, x23, x12\n"
- "st1 { v23.d }[0], [x23]\n"
- "ldr x22, [x16, #0x8]\n"
- "ldr x21, [x16, #0x10]\n"
- "ldr x20, [x16, #0x18]\n"
+ "ldr x22, [x17, #0x0]\n"
+ "ldr x21, [x17, #0x8]\n"
"add x22, x22, x12\n"
+ "ldr x20, [x17, #0x10]\n"
+ "ldr x19, [x17, #0x18]\n"
"add x21, x21, x12\n"
- "ldr x23, [x16, #0x20]\n"
+ "st1 { v31.d }[0], [x22]\n"
"add x20, x20, x12\n"
- "add x23, x23, x12\n"
- "st1 { v24.d }[0], [x22]\n"
- "st1 { v25.d }[0], [x21]\n"
- "ldr x22, [x16, #0x28]\n"
- "ldr x21, [x16, #0x30]\n"
+ "st1 { v30.d }[0], [x21]\n"
+ "ldr x22, [x17, #0x20]\n"
+ "add x19, x19, x12\n"
+ "st1 { v29.d }[0], [x20]\n"
"add x22, x22, x12\n"
- "st1 { v26.d }[0], [x20]\n"
- "ldr x20, [x16, #0x38]\n"
+ "st1 { v28.d }[0], [x19]\n"
+ "ldr x21, [x17, #0x28]\n"
"add x21, x21, x12\n"
+ "st1 { v27.d }[0], [x22]\n"
+ "ldr x20, [x17, #0x30]\n"
"add x20, x20, x12\n"
- "st1 { v27.d }[0], [x23]\n"
- "ldr x23, [x16, #0x40]\n"
- "add x23, x23, x12\n"
+ "st1 { v26.d }[0], [x21]\n"
+ "ldr x19, [x17, #0x38]\n"
+ "add x19, x19, x12\n"
+ "st1 { v25.d }[0], [x20]\n"
+ "ldr x22, [x17, #0x40]\n"
+ "add x22, x22, x12\n"
+ "st1 { v24.d }[0], [x19]\n"
"add x12, x12, #0x8\n"
- "st1 { v28.d }[0], [x22]\n"
- "st1 { v29.d }[0], [x21]\n"
- "st1 { v30.d }[0], [x20]\n"
- "st1 { v31.d }[0], [x23]\n"
+ "st1 { v23.d }[0], [x22]\n"
"tbz %x[n_channels], #0, 47f\n"
- "ldr x23, [x16, #0x0]\n"
- "add x23, x23, x12\n"
- "st1 { v23.s }[2], [x23]\n"
- "ldr x22, [x16, #0x8]\n"
- "ldr x21, [x16, #0x10]\n"
- "ldr x20, [x16, #0x18]\n"
+ "ldr x22, [x17, #0x0]\n"
+ "ldr x21, [x17, #0x8]\n"
"add x22, x22, x12\n"
+ "ldr x20, [x17, #0x10]\n"
+ "ldr x19, [x17, #0x18]\n"
"add x21, x21, x12\n"
- "ldr x23, [x16, #0x20]\n"
+ "st1 { v31.s }[2], [x22]\n"
"add x20, x20, x12\n"
- "add x23, x23, x12\n"
- "st1 { v24.s }[2], [x22]\n"
- "st1 { v25.s }[2], [x21]\n"
- "ldr x22, [x16, #0x28]\n"
- "ldr x21, [x16, #0x30]\n"
+ "st1 { v30.s }[2], [x21]\n"
+ "ldr x22, [x17, #0x20]\n"
+ "add x19, x19, x12\n"
+ "st1 { v29.s }[2], [x20]\n"
"add x22, x22, x12\n"
- "st1 { v26.s }[2], [x20]\n"
- "ldr x20, [x16, #0x38]\n"
+ "st1 { v28.s }[2], [x19]\n"
+ "ldr x21, [x17, #0x28]\n"
"add x21, x21, x12\n"
+ "st1 { v27.s }[2], [x22]\n"
+ "ldr x20, [x17, #0x30]\n"
"add x20, x20, x12\n"
- "st1 { v27.s }[2], [x23]\n"
- "ldr x23, [x16, #0x40]\n"
- "add x23, x23, x12\n"
- "st1 { v28.s }[2], [x22]\n"
- "st1 { v29.s }[2], [x21]\n"
- "st1 { v30.s }[2], [x20]\n"
- "st1 { v31.s }[2], [x23]\n"
+ "st1 { v26.s }[2], [x21]\n"
+ "ldr x19, [x17, #0x38]\n"
+ "add x19, x19, x12\n"
+ "st1 { v25.s }[2], [x20]\n"
+ "ldr x22, [x17, #0x40]\n"
+ "add x22, x22, x12\n"
+ "st1 { v24.s }[2], [x19]\n"
+ "st1 { v23.s }[2], [x22]\n"
"b 47f\n"
"46:" // Oddments: Store: Bit 1: Unset
- "ldr x23, [x16, #0x0]\n"
- "add x23, x23, x12\n"
- "st1 { v23.s }[0], [x23]\n"
- "ldr x22, [x16, #0x8]\n"
- "ldr x21, [x16, #0x10]\n"
- "ldr x20, [x16, #0x18]\n"
+ "ldr x22, [x17, #0x0]\n"
"add x22, x22, x12\n"
+ "ldr x21, [x17, #0x8]\n"
+ "ldr x20, [x17, #0x10]\n"
"add x21, x21, x12\n"
- "ldr x23, [x16, #0x20]\n"
+ "st1 { v31.s }[0], [x22]\n"
+ "ldr x19, [x17, #0x18]\n"
"add x20, x20, x12\n"
- "add x23, x23, x12\n"
- "st1 { v24.s }[0], [x22]\n"
- "st1 { v25.s }[0], [x21]\n"
- "ldr x22, [x16, #0x28]\n"
- "ldr x21, [x16, #0x30]\n"
+ "st1 { v30.s }[0], [x21]\n"
+ "add x19, x19, x12\n"
+ "st1 { v29.s }[0], [x20]\n"
+ "ldr x22, [x17, #0x20]\n"
"add x22, x22, x12\n"
- "st1 { v26.s }[0], [x20]\n"
- "ldr x20, [x16, #0x38]\n"
+ "st1 { v28.s }[0], [x19]\n"
+ "ldr x21, [x17, #0x28]\n"
"add x21, x21, x12\n"
+ "st1 { v27.s }[0], [x22]\n"
+ "ldr x20, [x17, #0x30]\n"
"add x20, x20, x12\n"
- "st1 { v27.s }[0], [x23]\n"
- "ldr x23, [x16, #0x40]\n"
- "add x23, x23, x12\n"
- "st1 { v28.s }[0], [x22]\n"
- "st1 { v29.s }[0], [x21]\n"
- "st1 { v30.s }[0], [x20]\n"
- "st1 { v31.s }[0], [x23]\n"
+ "st1 { v26.s }[0], [x21]\n"
+ "ldr x19, [x17, #0x38]\n"
+ "add x19, x19, x12\n"
+ "st1 { v25.s }[0], [x20]\n"
+ "ldr x22, [x17, #0x40]\n"
+ "add x22, x22, x12\n"
+ "st1 { v24.s }[0], [x19]\n"
+ "st1 { v23.s }[0], [x22]\n"
"47:" // Oddments: Store: Bit 1: End
+
"48:" // End
+
:
: [n_channels] "r" ((unsigned long) n_channels), [offsetof_Args_inptrs] "I" (offsetof(Args, inptrs)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_args_params] "I" (offsetof(Args, params)), [params_struct] "r" (&params_struct)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v16", "v17", "v18", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v16", "v17", "v18", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_direct.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_direct.cpp
index 6d2b6ee998..6faacf144a 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_direct.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_direct.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -87,1142 +87,1143 @@ void a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
);
__asm__ __volatile__(
- "mov x27, #0x0\n"
+ "mov x4, #0x0\n"
"mov x26, #0x0\n"
"1:" // Tile loop
- "str x27, [%x[params_struct], %[offsetof_args_tile_i]]\n"
+ "str x4, [%x[params_struct], %[offsetof_args_tile_i]]\n"
"mov x25, #0x4\n"
- "mov x23, #0x4\n"
"str x26, [%x[params_struct], %[offsetof_args_tile_j]]\n"
- "ldr x24, [%x[params_struct], %[offsetof_args_ld_input_row]]\n"
- "ldr x22, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
- "mul x21, x27, x24\n" // offset = tile_i * ld_input_row
- "ldr x4, [%x[params_struct], %[offsetof_args_ld_input_col]]\n"
- "ldr x5, [%x[params_struct], %[offsetof_args_ld_output_col]]\n"
- "mul x20, x27, x22\n" // offset = tile_i * ld_output_row
- "mov x6, #0x10\n" // cntb _, ALL, #1
- "madd x21, x26, x4, x21\n" // offset += tile_j * ld_input_col
- "ldr x7, [%x[params_struct], %[offsetof_args_inptr]]\n"
- "lsl x4, x4, #0x2\n"
- "ldr x8, [%x[params_struct], %[offsetof_args_outptr]]\n"
- "madd x20, x26, x5, x20\n" // offset += tile_j * ld_output_col
- "lsl x5, x5, #0x2\n"
- "add x17, x4, x4\n"
- "ldr x16, [%x[params_struct], %[offsetof_args_params]]\n"
- "mul x21, x21, x25\n" // offset *= kernel_stride * output_size
- "add x7, x7, x21, LSL #2\n" // inptr[0] += offset * sizeof(float)
- "add x15, x7, x24, LSL #2\n"
- "mul x20, x20, x23\n" // offset *= output_tile_size
- "add x14, x15, x24, LSL #2\n"
- "add x8, x8, x20, LSL #2\n" // outptrs[0] += offset * sizeof(float)
- "lsr x13, %x[n_channels], #0x2\n"
- "add x12, x14, x24, LSL #2\n"
- "add x11, x17, x4\n"
- "add x10, x8, x22, LSL #2\n"
- "add x9, x12, x24, LSL #2\n"
- "add x28, x11, x4\n"
- "add x27, x10, x22, LSL #2\n"
- "add x23, x5, x5\n"
- "add x20, %x[params_struct], %[offsetof_args_min]\n"
- "ld1r { v15.4s }, [x20]\n"
- "add x20, %x[params_struct], %[offsetof_args_max]\n"
- "ld1r { v14.4s }, [x20]\n"
- "add x26, x9, x24, LSL #2\n"
- "add x25, x28, x4\n"
- "add x24, x27, x22, LSL #2\n"
- "add x22, x23, x5\n"
- "mov x21, #0x0\n"
- "sub x20, XZR, x6\n"
- "cbz x13, 4f\n"
- "ldr q13, [x16, #0x0]\n"
- "ldr q0, [x16, #0x10]\n"
- "cmp x6, x13, LSL #4\n"
- "ldr q1, [x16, #0x20]\n"
- "ldr q2, [x16, #0x30]\n"
- "ldr q3, [x16, #0x40]\n"
- "ldr q4, [x16, #0x50]\n"
- "ldr q5, [x16, #0x60]\n"
- "ldr q6, [x16, #0x70]\n"
- "ldr q7, [x16, #0x80]\n"
- "ldr q8, [x16, #0x90]\n"
- "add x16, x16, #0xa0\n"
- "ldr q9, [x14, x17]\n"
- "ld1 { v10.4s }, [x7]\n"
- "ldr q11, [x7, x25]\n"
- "ldr q12, [x14, x11]\n"
+ "mov x24, #0x4\n"
+ "ldr x5, [%x[params_struct], %[offsetof_args_params]]\n"
+ "add x23, %x[params_struct], %[offsetof_args_min]\n"
+ "ldr x22, [%x[params_struct], %[offsetof_args_ld_input_row]]\n"
+ "add x21, %x[params_struct], %[offsetof_args_max]\n"
+ "ldr x6, [%x[params_struct], %[offsetof_args_ld_input_col]]\n"
+ "mov x7, #0x0\n"
+ "ldr x8, [%x[params_struct], %[offsetof_args_inptr]]\n"
+ "mul x19, x4, x22\n" // offset = tile_i * ld_input_row
+ "ldr x20, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
+ "madd x19, x26, x6, x19\n" // offset += tile_j * ld_input_col
+ "ldr x17, [%x[params_struct], %[offsetof_args_ld_output_col]]\n"
+ "mul x19, x19, x25\n" // offset *= kernel_stride * output_size
+ "ldr x16, [%x[params_struct], %[offsetof_args_outptr]]\n"
+ "add x8, x8, x19, LSL #2\n" // inptr[0] += offset * sizeof(float)
+ "ld1r { v15.4s }, [x23]\n"
+ "add x15, x8, x22, LSL #2\n"
+ "ld1r { v14.4s }, [x21]\n"
+ "add x14, x15, x22, LSL #2\n"
+ "lsl x6, x6, #0x2\n"
+ "add x13, x14, x22, LSL #2\n"
+ "add x12, x13, x22, LSL #2\n"
+ "add x11, x12, x22, LSL #2\n"
+ "add x10, x6, x6\n"
+ "add x9, x10, x6\n"
+ "add x28, x9, x6\n"
+ "add x27, x28, x6\n"
+ "mul x19, x4, x20\n" // offset = tile_i * ld_output_row
+ "madd x19, x26, x17, x19\n" // offset += tile_j * ld_output_col
+ "mul x19, x19, x24\n" // offset *= output_tile_size
+ "add x16, x16, x19, LSL #2\n" // outptrs[0] += offset * sizeof(float)
+ "add x26, x16, x20, LSL #2\n"
+ "add x25, x26, x20, LSL #2\n"
+ "add x24, x25, x20, LSL #2\n"
+ "lsl x17, x17, #0x2\n"
+ "add x23, x17, x17\n"
+ "add x22, x23, x17\n"
+ "mov x21, #0x10\n" // cntb _, ALL, #1
+ "sub x20, XZR, x21\n"
+ "lsr x19, %x[n_channels], #0x2\n"
+ "cbz x19, 4f\n"
+ "ldr q13, [x5, #0x0]\n"
+ "ldr q0, [x5, #0x10]\n"
+ "cmp x21, x19, LSL #4\n"
+ "ldr q1, [x5, #0x20]\n"
+ "ldr q2, [x5, #0x30]\n"
+ "ldr q3, [x5, #0x40]\n"
+ "ldr q4, [x5, #0x50]\n"
+ "ldr q5, [x5, #0x60]\n"
+ "ldr q6, [x5, #0x70]\n"
+ "ldr q7, [x5, #0x80]\n"
+ "ldr q8, [x5, #0x90]\n"
+ "add x5, x5, #0xa0\n"
+ "ldr q9, [x14, x10]\n"
+ "ld1 { v10.4s }, [x8]\n"
+ "ldr q11, [x8, x27]\n"
+ "ldr q12, [x14, x9]\n"
"bge 3f\n"
"2:" // Tile loop: Channel loop
- "mov v21.16b, v13.16b\n fmla v21.4s, v4.4s, v9.4s\n"
- "mov v16.16b, v13.16b\n fmla v16.4s, v8.4s, v9.4s\n"
- "add x6, x6, #0x10\n"
- "cmp x6, x13, LSL #4\n"
- "mov v22.16b, v13.16b\n fmla v22.4s, v3.4s, v9.4s\n"
- "mov v25.16b, v13.16b\n fmla v25.4s, v1.4s, v9.4s\n"
+ "mov v31.16b, v13.16b\n fmla v31.4s, v8.4s, v9.4s\n"
"add x20, x20, #0x10\n"
+ "mov v30.16b, v13.16b\n fmla v30.4s, v7.4s, v9.4s\n"
+ "add x7, x7, #0x10\n"
+ "mov v29.16b, v13.16b\n fmla v29.4s, v6.4s, v9.4s\n"
"add x21, x21, #0x10\n"
- "mov v26.16b, v13.16b\n fmla v26.4s, v0.4s, v9.4s\n"
- "fmla v21.4s, v5.4s, v12.4s\n"
- "mov v17.16b, v13.16b\n fmla v17.4s, v7.4s, v9.4s\n"
- "mov v18.16b, v13.16b\n fmla v18.4s, v6.4s, v9.4s\n"
- "mov v20.16b, v13.16b\n fmla v20.4s, v5.4s, v9.4s\n"
- "mov v24.16b, v13.16b\n fmla v24.4s, v2.4s, v9.4s\n"
- "ldr q9, [x12, x17]\n"
- "fmla v16.4s, v0.4s, v10.4s\n"
- "ld1 { v10.4s }, [x26]\n"
- "mov v19.16b, v13.16b\n fmla v19.4s, v2.4s, v11.4s\n"
- "ldr q11, [x26, x25]\n"
- "fmla v22.4s, v4.4s, v12.4s\n"
- "fmla v25.4s, v2.4s, v12.4s\n"
- "fmla v26.4s, v1.4s, v12.4s\n"
- "mov v28.16b, v13.16b\n fmla v28.4s, v6.4s, v10.4s\n"
- "ldr q10, [x12, x11]\n"
- "fmla v21.4s, v7.4s, v9.4s\n"
- "fmla v17.4s, v8.4s, v12.4s\n"
- "fmla v18.4s, v7.4s, v12.4s\n"
- "fmla v19.4s, v6.4s, v12.4s\n"
- "mov v23.16b, v13.16b\n fmla v23.4s, v3.4s, v12.4s\n"
- "mov v27.16b, v13.16b\n fmla v27.4s, v0.4s, v12.4s\n"
- "ldr q12, [x7, x4]\n"
- "mov v31.16b, v13.16b\n fmla v31.4s, v8.4s, v11.4s\n"
- "ldr q11, [x7, x28]\n"
- "fmla v22.4s, v6.4s, v9.4s\n"
- "fmla v25.4s, v4.4s, v9.4s\n"
- "fmla v26.4s, v3.4s, v9.4s\n"
- "fmla v20.4s, v8.4s, v9.4s\n"
- "fmla v24.4s, v5.4s, v9.4s\n"
- "fmla v28.4s, v2.4s, v9.4s\n"
- "fmla v21.4s, v8.4s, v10.4s\n"
- "fmla v16.4s, v1.4s, v12.4s\n"
- "fmla v17.4s, v0.4s, v12.4s\n"
- "ldr q12, [x15, x25]\n"
- "fmla v18.4s, v2.4s, v11.4s\n"
- "fmla v19.4s, v1.4s, v11.4s\n"
- "ld1 { v11.4s }, [x9]\n"
- "fmla v22.4s, v7.4s, v10.4s\n"
- "fmla v23.4s, v6.4s, v10.4s\n"
- "fmla v25.4s, v5.4s, v10.4s\n"
- "fmla v26.4s, v4.4s, v10.4s\n"
- "fmla v27.4s, v3.4s, v10.4s\n"
+ "mov v27.16b, v13.16b\n fmla v27.4s, v5.4s, v9.4s\n"
+ "cmp x21, x19, LSL #4\n"
+ "mov v26.16b, v13.16b\n fmla v26.4s, v4.4s, v9.4s\n"
+ "mov v25.16b, v13.16b\n fmla v25.4s, v3.4s, v9.4s\n"
+ "mov v23.16b, v13.16b\n fmla v23.4s, v2.4s, v9.4s\n"
+ "mov v22.16b, v13.16b\n fmla v22.4s, v1.4s, v9.4s\n"
+ "mov v21.16b, v13.16b\n fmla v21.4s, v0.4s, v9.4s\n"
+ "ldr q9, [x13, x10]\n"
"fmla v31.4s, v0.4s, v10.4s\n"
- "fmla v24.4s, v6.4s, v11.4s\n"
- "fmla v28.4s, v3.4s, v11.4s\n"
- "ldr q11, [x9, x25]\n"
- "fmla v19.4s, v5.4s, v12.4s\n"
- "fmla v23.4s, v2.4s, v12.4s\n"
- "ldr q12, [x15, x11]\n"
- "fmla v27.4s, v8.4s, v11.4s\n"
- "fmla v31.4s, v5.4s, v11.4s\n"
- "mov v29.16b, v13.16b\n fmla v29.4s, v1.4s, v9.4s\n"
- "mov v30.16b, v13.16b\n fmla v30.4s, v0.4s, v9.4s\n"
+ "ld1 { v10.4s }, [x11]\n"
+ "mov v28.16b, v13.16b\n fmla v28.4s, v2.4s, v11.4s\n"
+ "ldr q11, [x11, x27]\n"
+ "fmla v30.4s, v8.4s, v12.4s\n"
+ "fmla v29.4s, v7.4s, v12.4s\n"
+ "fmla v26.4s, v5.4s, v12.4s\n"
+ "fmla v28.4s, v6.4s, v12.4s\n"
+ "fmla v25.4s, v4.4s, v12.4s\n"
+ "mov v24.16b, v13.16b\n fmla v24.4s, v3.4s, v12.4s\n"
+ "fmla v22.4s, v2.4s, v12.4s\n"
+ "fmla v21.4s, v1.4s, v12.4s\n"
+ "mov v20.16b, v13.16b\n fmla v20.4s, v0.4s, v12.4s\n"
+ "ldr q12, [x8, x6]\n"
+ "mov v19.16b, v13.16b\n fmla v19.4s, v6.4s, v10.4s\n"
+ "ldr q10, [x13, x9]\n"
+ "mov v16.16b, v13.16b\n fmla v16.4s, v8.4s, v11.4s\n"
+ "ldr q11, [x8, x28]\n"
+ "fmla v27.4s, v8.4s, v9.4s\n"
+ "fmla v26.4s, v7.4s, v9.4s\n"
+ "fmla v25.4s, v6.4s, v9.4s\n"
+ "fmla v23.4s, v5.4s, v9.4s\n"
+ "fmla v22.4s, v4.4s, v9.4s\n"
+ "fmla v21.4s, v3.4s, v9.4s\n"
+ "fmla v19.4s, v2.4s, v9.4s\n"
+ "mov v18.16b, v13.16b\n fmla v18.4s, v1.4s, v9.4s\n"
+ "mov v17.16b, v13.16b\n fmla v17.4s, v0.4s, v9.4s\n"
"ld1 { v9.4s }, [x15]\n"
- "fmla v29.4s, v2.4s, v10.4s\n"
- "fmla v30.4s, v1.4s, v10.4s\n"
- "ldr q10, [x15, x17]\n"
- "fmla v20.4s, v0.4s, v9.4s\n"
- "fmla v21.4s, v1.4s, v10.4s\n"
- "fmla v16.4s, v3.4s, v9.4s\n"
- "ldr q11, [x26, x4]\n"
- "fmla v17.4s, v4.4s, v10.4s\n"
- "fmla v18.4s, v3.4s, v10.4s\n"
- "fmla v22.4s, v0.4s, v10.4s\n"
- "fmla v20.4s, v2.4s, v10.4s\n"
- "fmla v21.4s, v2.4s, v12.4s\n"
- "fmla v16.4s, v5.4s, v10.4s\n"
- "ldr q10, [x14, x4]\n"
- "fmla v17.4s, v5.4s, v12.4s\n"
- "fmla v18.4s, v4.4s, v12.4s\n"
- "fmla v19.4s, v3.4s, v12.4s\n"
- "fmla v22.4s, v1.4s, v12.4s\n"
- "fmla v23.4s, v0.4s, v12.4s\n"
- "ldr q12, [x14, x28]\n"
- "fmla v28.4s, v7.4s, v11.4s\n"
- "fmla v29.4s, v6.4s, v11.4s\n"
- "ldr q11, [x26, x28]\n"
- "fmla v20.4s, v4.4s, v10.4s\n"
- "fmla v21.4s, v3.4s, v10.4s\n"
- "fmla v24.4s, v1.4s, v10.4s\n"
- "fmla v25.4s, v0.4s, v10.4s\n"
- "fmla v16.4s, v7.4s, v10.4s\n"
- "fmla v17.4s, v6.4s, v10.4s\n"
- "ldr q10, [x7, x17]\n"
- "fmla v30.4s, v8.4s, v11.4s\n"
- "fmla v31.4s, v7.4s, v11.4s\n"
- "ldr q11, [x12, x4]\n"
- "fmla v18.4s, v8.4s, v12.4s\n"
- "fmla v19.4s, v7.4s, v12.4s\n"
- "fmla v22.4s, v5.4s, v12.4s\n"
- "fmla v23.4s, v4.4s, v12.4s\n"
- "fmla v26.4s, v2.4s, v12.4s\n"
- "fmla v27.4s, v1.4s, v12.4s\n"
- "ldr q12, [x7, x11]\n"
- "add x7, x7, #0x10\n"
- "fmla v20.4s, v7.4s, v11.4s\n"
- "fmla v21.4s, v6.4s, v11.4s\n"
- "fmla v24.4s, v4.4s, v11.4s\n"
- "fmla v25.4s, v3.4s, v11.4s\n"
+ "fmla v31.4s, v1.4s, v12.4s\n"
+ "ldr q13, [x5, #0x0]\n"
+ "fmla v30.4s, v0.4s, v12.4s\n"
+ "ldr q12, [x15, x27]\n"
+ "fmla v29.4s, v2.4s, v11.4s\n"
"fmla v28.4s, v1.4s, v11.4s\n"
- "fmla v29.4s, v0.4s, v11.4s\n"
- "ldr q11, [x12, x28]\n"
- "fmla v16.4s, v2.4s, v10.4s\n"
- "fmla v17.4s, v1.4s, v10.4s\n"
- "fmla v18.4s, v0.4s, v10.4s\n"
- "ld1 { v10.4s }, [x14]\n"
- "fmla v30.4s, v2.4s, v11.4s\n"
- "fmla v19.4s, v0.4s, v12.4s\n"
+ "ld1 { v11.4s }, [x12]\n"
+ "fmla v26.4s, v8.4s, v10.4s\n"
+ "fmla v25.4s, v7.4s, v10.4s\n"
+ "fmla v24.4s, v6.4s, v10.4s\n"
+ "fmla v22.4s, v5.4s, v10.4s\n"
+ "fmla v21.4s, v4.4s, v10.4s\n"
"fmla v20.4s, v3.4s, v10.4s\n"
- "fmla v24.4s, v0.4s, v10.4s\n"
- "fmla v22.4s, v8.4s, v11.4s\n"
- "fmla v23.4s, v7.4s, v11.4s\n"
- "fmla v26.4s, v5.4s, v11.4s\n"
- "fmla v27.4s, v4.4s, v11.4s\n"
- "fmla v31.4s, v1.4s, v11.4s\n"
- "ldr q11, [x9, x17]\n"
- "fmla v17.4s, v2.4s, v12.4s\n"
- "fmla v18.4s, v1.4s, v12.4s\n"
- "ldr q12, [x14, x25]\n"
- "add x14, x14, #0x10\n"
- "fmla v16.4s, v6.4s, v10.4s\n"
- "ld1 { v10.4s }, [x12]\n"
- "fmla v29.4s, v4.4s, v11.4s\n"
- "fmla v30.4s, v3.4s, v11.4s\n"
- "fmla v19.4s, v8.4s, v12.4s\n"
- "fmla v23.4s, v5.4s, v12.4s\n"
- "fmla v27.4s, v2.4s, v12.4s\n"
- "ldr q12, [x12, x25]\n"
- "add x12, x12, #0x10\n"
- "fmla v20.4s, v6.4s, v10.4s\n"
- "fmla v24.4s, v3.4s, v10.4s\n"
- "fmla v28.4s, v0.4s, v10.4s\n"
- "ldr q10, [x26, x17]\n"
- "fmla v31.4s, v2.4s, v12.4s\n"
- "fmla v29.4s, v7.4s, v10.4s\n"
+ "fmla v18.4s, v2.4s, v10.4s\n"
+ "fmla v17.4s, v1.4s, v10.4s\n"
+ "fmla v16.4s, v0.4s, v10.4s\n"
+ "ldr q10, [x15, x10]\n"
+ "fmla v31.4s, v3.4s, v9.4s\n"
+ "fmla v27.4s, v0.4s, v9.4s\n"
+ "fmla v28.4s, v5.4s, v12.4s\n"
+ "fmla v24.4s, v2.4s, v12.4s\n"
+ "ldr q12, [x15, x9]\n"
+ "fmla v23.4s, v6.4s, v11.4s\n"
+ "fmla v19.4s, v3.4s, v11.4s\n"
+ "ldr q11, [x12, x27]\n"
+ "fmla v31.4s, v5.4s, v10.4s\n"
+ "fmla v30.4s, v4.4s, v10.4s\n"
+ "fmla v29.4s, v3.4s, v10.4s\n"
+ "fmla v27.4s, v2.4s, v10.4s\n"
+ "fmla v26.4s, v1.4s, v10.4s\n"
+ "fmla v25.4s, v0.4s, v10.4s\n"
+ "ldr q10, [x14, x6]\n"
+ "fmla v20.4s, v8.4s, v11.4s\n"
+ "fmla v16.4s, v5.4s, v11.4s\n"
+ "ldr q11, [x11, x6]\n"
+ "fmla v30.4s, v5.4s, v12.4s\n"
+ "fmla v29.4s, v4.4s, v12.4s\n"
+ "fmla v28.4s, v3.4s, v12.4s\n"
+ "fmla v26.4s, v2.4s, v12.4s\n"
+ "fmla v25.4s, v1.4s, v12.4s\n"
+ "fmla v24.4s, v0.4s, v12.4s\n"
+ "ldr q12, [x14, x28]\n"
+ "fmla v19.4s, v7.4s, v11.4s\n"
+ "fmla v18.4s, v6.4s, v11.4s\n"
+ "ldr q11, [x11, x28]\n"
+ "fmla v31.4s, v7.4s, v10.4s\n"
"fmla v30.4s, v6.4s, v10.4s\n"
- "fmla v24.4s, v8.4s, v11.4s\n"
- "fmla v25.4s, v7.4s, v11.4s\n"
+ "fmla v27.4s, v4.4s, v10.4s\n"
+ "fmla v26.4s, v3.4s, v10.4s\n"
+ "fmla v23.4s, v1.4s, v10.4s\n"
+ "fmla v22.4s, v0.4s, v10.4s\n"
+ "ldr q10, [x8, x10]\n"
+ "fmla v17.4s, v8.4s, v11.4s\n"
+ "fmla v16.4s, v7.4s, v11.4s\n"
+ "ldr q11, [x13, x6]\n"
+ "fmla v29.4s, v8.4s, v12.4s\n"
+ "fmla v28.4s, v7.4s, v12.4s\n"
+ "fmla v25.4s, v5.4s, v12.4s\n"
+ "fmla v24.4s, v4.4s, v12.4s\n"
+ "fmla v21.4s, v2.4s, v12.4s\n"
+ "fmla v20.4s, v1.4s, v12.4s\n"
+ "ldr q12, [x8, x9]\n"
+ "add x8, x8, #0x10\n"
+ "fmla v31.4s, v2.4s, v10.4s\n"
+ "fmla v30.4s, v1.4s, v10.4s\n"
+ "fmla v29.4s, v0.4s, v10.4s\n"
+ "ld1 { v10.4s }, [x14]\n"
+ "fmla v27.4s, v7.4s, v11.4s\n"
"fmla v26.4s, v6.4s, v11.4s\n"
- "fmla v28.4s, v5.4s, v11.4s\n"
- "ldr q11, [x9, x11]\n"
- "fmla v27.4s, v5.4s, v12.4s\n"
- "fmla v29.4s, v5.4s, v11.4s\n"
- "fmla v30.4s, v4.4s, v11.4s\n"
- "fmla v31.4s, v3.4s, v11.4s\n"
- "fmla v23.4s, v8.4s, v12.4s\n"
- "ldr q12, [x26, x11]\n"
- "fmla v28.4s, v8.4s, v10.4s\n"
- "ldr q10, [x15, x4]\n"
+ "fmla v23.4s, v4.4s, v11.4s\n"
+ "fmla v22.4s, v3.4s, v11.4s\n"
+ "fmla v19.4s, v1.4s, v11.4s\n"
+ "fmla v18.4s, v0.4s, v11.4s\n"
+ "ldr q11, [x13, x28]\n"
+ "fmla v30.4s, v2.4s, v12.4s\n"
+ "fmla v29.4s, v1.4s, v12.4s\n"
+ "fmla v28.4s, v0.4s, v12.4s\n"
+ "ldr q12, [x14, x27]\n"
+ "add x14, x14, #0x10\n"
+ "fmla v31.4s, v6.4s, v10.4s\n"
+ "ldr q9, [x14, x10]\n"
+ "fmla v27.4s, v3.4s, v10.4s\n"
+ "fmla v23.4s, v0.4s, v10.4s\n"
+ "ld1 { v10.4s }, [x13]\n"
"fmla v25.4s, v8.4s, v11.4s\n"
- "fmla v26.4s, v7.4s, v11.4s\n"
- "add x26, x26, #0x10\n"
- "fmla v27.4s, v6.4s, v11.4s\n"
+ "fmla v24.4s, v7.4s, v11.4s\n"
+ "fmla v21.4s, v5.4s, v11.4s\n"
+ "fmla v20.4s, v4.4s, v11.4s\n"
+ "fmla v17.4s, v2.4s, v11.4s\n"
+ "fmla v16.4s, v1.4s, v11.4s\n"
+ "ldr q11, [x12, x10]\n"
+ "fmla v28.4s, v8.4s, v12.4s\n"
+ "fmla v24.4s, v5.4s, v12.4s\n"
+ "fmla v20.4s, v2.4s, v12.4s\n"
+ "ldr q12, [x13, x27]\n"
+ "add x13, x13, #0x10\n"
+ "fmla v27.4s, v6.4s, v10.4s\n"
+ "fmla v23.4s, v3.4s, v10.4s\n"
+ "fmla v19.4s, v0.4s, v10.4s\n"
+ "ldr q10, [x11, x10]\n"
+ "fmla v22.4s, v7.4s, v11.4s\n"
+ "fmla v21.4s, v6.4s, v11.4s\n"
+ "fmla v23.4s, v8.4s, v11.4s\n"
+ "fmla v19.4s, v5.4s, v11.4s\n"
+ "fmla v18.4s, v4.4s, v11.4s\n"
+ "fmla v17.4s, v3.4s, v11.4s\n"
+ "ldr q11, [x12, x9]\n"
+ "fmla v24.4s, v8.4s, v12.4s\n"
+ "fmla v20.4s, v5.4s, v12.4s\n"
+ "fmla v16.4s, v2.4s, v12.4s\n"
+ "ldr q12, [x11, x9]\n"
+ "add x11, x11, #0x10\n"
+ "fmla v19.4s, v8.4s, v10.4s\n"
+ "fmla v18.4s, v7.4s, v10.4s\n"
+ "fmla v17.4s, v6.4s, v10.4s\n"
+ "ldr q10, [x15, x6]\n"
+ "fmla v22.4s, v8.4s, v11.4s\n"
+ "fmla v21.4s, v7.4s, v11.4s\n"
+ "fmla v20.4s, v6.4s, v11.4s\n"
+ "fmla v18.4s, v5.4s, v11.4s\n"
+ "fmla v17.4s, v4.4s, v11.4s\n"
+ "fmla v16.4s, v3.4s, v11.4s\n"
"ldr q11, [x15, x28]\n"
- "fmla v29.4s, v8.4s, v12.4s\n"
"add x15, x15, #0x10\n"
- "fmla v30.4s, v7.4s, v12.4s\n"
- "fmla v31.4s, v6.4s, v12.4s\n"
- "ldr q12, [x9, x4]\n"
- "fmla v16.4s, v4.4s, v10.4s\n"
- "fmla v17.4s, v3.4s, v10.4s\n"
- "fmax v16.4s, v16.4s, v15.4s\n"
- "fmla v20.4s, v1.4s, v10.4s\n"
- "fmla v21.4s, v0.4s, v10.4s\n"
- "ldr q10, [x9, x28]\n"
- "ldr q9, [x14, x17]\n"
- "fmla v18.4s, v5.4s, v11.4s\n"
- "fmla v19.4s, v4.4s, v11.4s\n"
- "fmax v17.4s, v17.4s, v15.4s\n"
- "add x9, x9, #0x10\n"
- "fmla v22.4s, v2.4s, v11.4s\n"
- "ldr q13, [x16, #0x0]\n"
- "fmla v23.4s, v1.4s, v11.4s\n"
- "ldr q11, [x7, x25]\n"
- "ldr q0, [x16, #0x10]\n"
- "fmla v24.4s, v7.4s, v12.4s\n"
- "fmla v25.4s, v6.4s, v12.4s\n"
- "ldr q1, [x16, #0x20]\n"
- "fmla v28.4s, v4.4s, v12.4s\n"
- "fmla v29.4s, v3.4s, v12.4s\n"
- "ldr q12, [x14, x11]\n"
- "ldr q2, [x16, #0x30]\n"
- "fmla v26.4s, v8.4s, v10.4s\n"
- "ldr q3, [x16, #0x40]\n"
- "fmla v27.4s, v7.4s, v10.4s\n"
- "ldr q6, [x16, #0x70]\n"
- "fmla v30.4s, v5.4s, v10.4s\n"
- "ldr q5, [x16, #0x60]\n"
+ "fmla v18.4s, v8.4s, v12.4s\n"
"fmla v31.4s, v4.4s, v10.4s\n"
- "ld1 { v10.4s }, [x7]\n"
- "ldr q4, [x16, #0x50]\n"
- "fmax v18.4s, v18.4s, v15.4s\n"
- "fmax v19.4s, v19.4s, v15.4s\n"
- "fmax v20.4s, v20.4s, v15.4s\n"
- "fmax v21.4s, v21.4s, v15.4s\n"
- "fmax v22.4s, v22.4s, v15.4s\n"
- "fmax v23.4s, v23.4s, v15.4s\n"
- "fmax v24.4s, v24.4s, v15.4s\n"
- "fmax v25.4s, v25.4s, v15.4s\n"
- "fmax v26.4s, v26.4s, v15.4s\n"
- "fmax v27.4s, v27.4s, v15.4s\n"
- "fmax v28.4s, v28.4s, v15.4s\n"
- "fmax v29.4s, v29.4s, v15.4s\n"
- "fmax v30.4s, v30.4s, v15.4s\n"
+ "fmla v17.4s, v7.4s, v12.4s\n"
+ "fmla v16.4s, v6.4s, v12.4s\n"
+ "ldr q12, [x12, x6]\n"
+ "fmla v30.4s, v3.4s, v10.4s\n"
+ "fmla v27.4s, v1.4s, v10.4s\n"
+ "fmla v26.4s, v0.4s, v10.4s\n"
+ "ldr q10, [x12, x28]\n"
+ "add x12, x12, #0x10\n"
+ "fmla v29.4s, v5.4s, v11.4s\n"
+ "ldr q0, [x5, #0x10]\n"
+ "fmla v28.4s, v4.4s, v11.4s\n"
+ "fmla v25.4s, v2.4s, v11.4s\n"
+ "ldr q2, [x5, #0x30]\n"
+ "fmla v24.4s, v1.4s, v11.4s\n"
+ "ldr q11, [x8, x27]\n"
+ "fmla v23.4s, v7.4s, v12.4s\n"
+ "ldr q1, [x5, #0x20]\n"
+ "fmla v22.4s, v6.4s, v12.4s\n"
+ "ldr q6, [x5, #0x70]\n"
+ "fmla v19.4s, v4.4s, v12.4s\n"
+ "fmla v18.4s, v3.4s, v12.4s\n"
+ "ldr q12, [x14, x9]\n"
+ "fmla v21.4s, v8.4s, v10.4s\n"
+ "ldr q3, [x5, #0x40]\n"
+ "fmla v20.4s, v7.4s, v10.4s\n"
+ "ldr q7, [x5, #0x80]\n"
+ "fmla v17.4s, v5.4s, v10.4s\n"
+ "ldr q5, [x5, #0x60]\n"
+ "fmla v16.4s, v4.4s, v10.4s\n"
+ "ld1 { v10.4s }, [x8]\n"
"fmax v31.4s, v31.4s, v15.4s\n"
- "fmin v16.4s, v16.4s, v14.4s\n"
- "fmin v17.4s, v17.4s, v14.4s\n"
- "st1 { v16.4s }, [x8]\n"
- "ldr q7, [x16, #0x80]\n"
- "fmin v18.4s, v18.4s, v14.4s\n"
- "fmin v19.4s, v19.4s, v14.4s\n"
- "str q17, [x8, x5]\n"
- "ldr q8, [x16, #0x90]\n"
- "fmin v20.4s, v20.4s, v14.4s\n"
- "fmin v21.4s, v21.4s, v14.4s\n"
- "str q18, [x8, x23]\n"
- "add x16, x16, #0xa0\n"
- "fmin v22.4s, v22.4s, v14.4s\n"
- "fmin v23.4s, v23.4s, v14.4s\n"
- "str q19, [x8, x22]\n"
- "add x8, x8, #0x10\n"
- "fmin v24.4s, v24.4s, v14.4s\n"
- "fmin v25.4s, v25.4s, v14.4s\n"
- "st1 { v20.4s }, [x10]\n"
- "fmin v26.4s, v26.4s, v14.4s\n"
- "fmin v27.4s, v27.4s, v14.4s\n"
- "str q21, [x10, x5]\n"
- "fmin v28.4s, v28.4s, v14.4s\n"
- "fmin v29.4s, v29.4s, v14.4s\n"
- "str q22, [x10, x23]\n"
- "fmin v30.4s, v30.4s, v14.4s\n"
+ "ldr q4, [x5, #0x50]\n"
+ "fmax v30.4s, v30.4s, v15.4s\n"
+ "ldr q8, [x5, #0x90]\n"
+ "add x5, x5, #0xa0\n"
"fmin v31.4s, v31.4s, v14.4s\n"
- "str q23, [x10, x22]\n"
- "add x10, x10, #0x10\n"
- "st1 { v24.4s }, [x27]\n"
- "str q25, [x27, x5]\n"
- "str q26, [x27, x23]\n"
- "str q27, [x27, x22]\n"
- "add x27, x27, #0x10\n"
- "st1 { v28.4s }, [x24]\n"
- "str q29, [x24, x5]\n"
- "str q30, [x24, x23]\n"
- "str q31, [x24, x22]\n"
+ "st1 { v31.4s }, [x16]\n"
+ "fmin v30.4s, v30.4s, v14.4s\n"
+ "fmax v29.4s, v29.4s, v15.4s\n"
+ "str q30, [x16, x17]\n"
+ "fmin v29.4s, v29.4s, v14.4s\n"
+ "fmax v28.4s, v28.4s, v15.4s\n"
+ "str q29, [x16, x23]\n"
+ "fmin v28.4s, v28.4s, v14.4s\n"
+ "fmax v27.4s, v27.4s, v15.4s\n"
+ "str q28, [x16, x22]\n"
+ "fmin v27.4s, v27.4s, v14.4s\n"
+ "add x16, x16, #0x10\n"
+ "fmax v26.4s, v26.4s, v15.4s\n"
+ "st1 { v27.4s }, [x26]\n"
+ "fmax v25.4s, v25.4s, v15.4s\n"
+ "fmax v24.4s, v24.4s, v15.4s\n"
+ "fmin v26.4s, v26.4s, v14.4s\n"
+ "str q26, [x26, x17]\n"
+ "fmin v25.4s, v25.4s, v14.4s\n"
+ "fmin v24.4s, v24.4s, v14.4s\n"
+ "str q25, [x26, x23]\n"
+ "fmax v23.4s, v23.4s, v15.4s\n"
+ "fmax v22.4s, v22.4s, v15.4s\n"
+ "str q24, [x26, x22]\n"
+ "add x26, x26, #0x10\n"
+ "fmax v21.4s, v21.4s, v15.4s\n"
+ "fmax v20.4s, v20.4s, v15.4s\n"
+ "fmin v23.4s, v23.4s, v14.4s\n"
+ "st1 { v23.4s }, [x25]\n"
+ "fmin v22.4s, v22.4s, v14.4s\n"
+ "fmin v21.4s, v21.4s, v14.4s\n"
+ "str q22, [x25, x17]\n"
+ "fmin v20.4s, v20.4s, v14.4s\n"
+ "fmax v19.4s, v19.4s, v15.4s\n"
+ "str q21, [x25, x23]\n"
+ "fmax v18.4s, v18.4s, v15.4s\n"
+ "str q20, [x25, x22]\n"
+ "fmin v19.4s, v19.4s, v14.4s\n"
+ "add x25, x25, #0x10\n"
+ "fmin v18.4s, v18.4s, v14.4s\n"
+ "st1 { v19.4s }, [x24]\n"
+ "fmax v17.4s, v17.4s, v15.4s\n"
+ "fmax v16.4s, v16.4s, v15.4s\n"
+ "str q18, [x24, x17]\n"
+ "fmin v17.4s, v17.4s, v14.4s\n"
+ "str q17, [x24, x23]\n"
+ "fmin v16.4s, v16.4s, v14.4s\n"
+ "str q16, [x24, x22]\n"
"add x24, x24, #0x10\n"
"blt 2b\n"
"3:" // Tile loop: Channel tail
- "mov v21.16b, v13.16b\n fmla v21.4s, v4.4s, v9.4s\n"
- "mov v16.16b, v13.16b\n fmla v16.4s, v8.4s, v9.4s\n"
- "mov v22.16b, v13.16b\n fmla v22.4s, v3.4s, v9.4s\n"
- "mov v25.16b, v13.16b\n fmla v25.4s, v1.4s, v9.4s\n"
- "mov v26.16b, v13.16b\n fmla v26.4s, v0.4s, v9.4s\n"
- "fmla v21.4s, v5.4s, v12.4s\n"
- "mov v17.16b, v13.16b\n fmla v17.4s, v7.4s, v9.4s\n"
- "mov v18.16b, v13.16b\n fmla v18.4s, v6.4s, v9.4s\n"
- "mov v20.16b, v13.16b\n fmla v20.4s, v5.4s, v9.4s\n"
- "mov v24.16b, v13.16b\n fmla v24.4s, v2.4s, v9.4s\n"
- "ldr q9, [x12, x17]\n"
- "fmla v16.4s, v0.4s, v10.4s\n"
- "ld1 { v10.4s }, [x26]\n"
- "mov v19.16b, v13.16b\n fmla v19.4s, v2.4s, v11.4s\n"
- "ldr q11, [x26, x25]\n"
- "fmla v22.4s, v4.4s, v12.4s\n"
- "fmla v25.4s, v2.4s, v12.4s\n"
- "fmla v26.4s, v1.4s, v12.4s\n"
- "mov v28.16b, v13.16b\n fmla v28.4s, v6.4s, v10.4s\n"
- "ldr q10, [x12, x11]\n"
- "fmla v21.4s, v7.4s, v9.4s\n"
- "fmla v17.4s, v8.4s, v12.4s\n"
- "fmla v18.4s, v7.4s, v12.4s\n"
- "fmla v19.4s, v6.4s, v12.4s\n"
- "mov v23.16b, v13.16b\n fmla v23.4s, v3.4s, v12.4s\n"
- "mov v27.16b, v13.16b\n fmla v27.4s, v0.4s, v12.4s\n"
- "ldr q12, [x7, x4]\n"
- "mov v31.16b, v13.16b\n fmla v31.4s, v8.4s, v11.4s\n"
- "ldr q11, [x7, x28]\n"
- "fmla v22.4s, v6.4s, v9.4s\n"
- "fmla v25.4s, v4.4s, v9.4s\n"
- "fmla v26.4s, v3.4s, v9.4s\n"
- "fmla v20.4s, v8.4s, v9.4s\n"
- "fmla v24.4s, v5.4s, v9.4s\n"
- "fmla v28.4s, v2.4s, v9.4s\n"
- "fmla v21.4s, v8.4s, v10.4s\n"
- "fmla v16.4s, v1.4s, v12.4s\n"
- "fmla v17.4s, v0.4s, v12.4s\n"
- "ldr q12, [x15, x25]\n"
- "fmla v18.4s, v2.4s, v11.4s\n"
- "fmla v19.4s, v1.4s, v11.4s\n"
- "ld1 { v11.4s }, [x9]\n"
- "fmla v22.4s, v7.4s, v10.4s\n"
- "fmla v23.4s, v6.4s, v10.4s\n"
- "fmla v25.4s, v5.4s, v10.4s\n"
- "fmla v26.4s, v4.4s, v10.4s\n"
- "fmla v27.4s, v3.4s, v10.4s\n"
+ "mov v31.16b, v13.16b\n fmla v31.4s, v8.4s, v9.4s\n"
+ "mov v30.16b, v13.16b\n fmla v30.4s, v7.4s, v9.4s\n"
+ "mov v29.16b, v13.16b\n fmla v29.4s, v6.4s, v9.4s\n"
+ "mov v27.16b, v13.16b\n fmla v27.4s, v5.4s, v9.4s\n"
+ "mov v26.16b, v13.16b\n fmla v26.4s, v4.4s, v9.4s\n"
+ "mov v25.16b, v13.16b\n fmla v25.4s, v3.4s, v9.4s\n"
+ "mov v23.16b, v13.16b\n fmla v23.4s, v2.4s, v9.4s\n"
+ "mov v22.16b, v13.16b\n fmla v22.4s, v1.4s, v9.4s\n"
+ "mov v21.16b, v13.16b\n fmla v21.4s, v0.4s, v9.4s\n"
+ "ldr q9, [x13, x10]\n"
"fmla v31.4s, v0.4s, v10.4s\n"
- "fmla v24.4s, v6.4s, v11.4s\n"
- "fmla v28.4s, v3.4s, v11.4s\n"
- "ldr q11, [x9, x25]\n"
- "fmla v19.4s, v5.4s, v12.4s\n"
- "fmla v23.4s, v2.4s, v12.4s\n"
- "ldr q12, [x15, x11]\n"
- "fmla v27.4s, v8.4s, v11.4s\n"
- "fmla v31.4s, v5.4s, v11.4s\n"
- "mov v29.16b, v13.16b\n fmla v29.4s, v1.4s, v9.4s\n"
- "mov v30.16b, v13.16b\n fmla v30.4s, v0.4s, v9.4s\n"
+ "ld1 { v10.4s }, [x11]\n"
+ "mov v28.16b, v13.16b\n fmla v28.4s, v2.4s, v11.4s\n"
+ "ldr q11, [x11, x27]\n"
+ "fmla v30.4s, v8.4s, v12.4s\n"
+ "fmla v29.4s, v7.4s, v12.4s\n"
+ "fmla v26.4s, v5.4s, v12.4s\n"
+ "fmla v28.4s, v6.4s, v12.4s\n"
+ "fmla v25.4s, v4.4s, v12.4s\n"
+ "mov v24.16b, v13.16b\n fmla v24.4s, v3.4s, v12.4s\n"
+ "fmla v22.4s, v2.4s, v12.4s\n"
+ "fmla v21.4s, v1.4s, v12.4s\n"
+ "mov v20.16b, v13.16b\n fmla v20.4s, v0.4s, v12.4s\n"
+ "ldr q12, [x8, x6]\n"
+ "mov v19.16b, v13.16b\n fmla v19.4s, v6.4s, v10.4s\n"
+ "ldr q10, [x13, x9]\n"
+ "mov v16.16b, v13.16b\n fmla v16.4s, v8.4s, v11.4s\n"
+ "ldr q11, [x8, x28]\n"
+ "fmla v27.4s, v8.4s, v9.4s\n"
+ "fmla v26.4s, v7.4s, v9.4s\n"
+ "fmla v25.4s, v6.4s, v9.4s\n"
+ "fmla v23.4s, v5.4s, v9.4s\n"
+ "fmla v22.4s, v4.4s, v9.4s\n"
+ "fmla v21.4s, v3.4s, v9.4s\n"
+ "fmla v19.4s, v2.4s, v9.4s\n"
+ "mov v18.16b, v13.16b\n fmla v18.4s, v1.4s, v9.4s\n"
+ "mov v17.16b, v13.16b\n fmla v17.4s, v0.4s, v9.4s\n"
"ld1 { v9.4s }, [x15]\n"
- "fmla v29.4s, v2.4s, v10.4s\n"
- "fmla v30.4s, v1.4s, v10.4s\n"
- "ldr q10, [x15, x17]\n"
- "fmla v20.4s, v0.4s, v9.4s\n"
- "fmla v21.4s, v1.4s, v10.4s\n"
- "fmla v16.4s, v3.4s, v9.4s\n"
- "ldr q11, [x26, x4]\n"
- "fmla v17.4s, v4.4s, v10.4s\n"
- "fmla v18.4s, v3.4s, v10.4s\n"
- "fmla v22.4s, v0.4s, v10.4s\n"
- "fmla v20.4s, v2.4s, v10.4s\n"
- "fmla v21.4s, v2.4s, v12.4s\n"
- "fmla v16.4s, v5.4s, v10.4s\n"
- "ldr q10, [x14, x4]\n"
- "fmla v17.4s, v5.4s, v12.4s\n"
- "fmla v18.4s, v4.4s, v12.4s\n"
- "fmla v19.4s, v3.4s, v12.4s\n"
- "fmla v22.4s, v1.4s, v12.4s\n"
- "fmla v23.4s, v0.4s, v12.4s\n"
- "ldr q12, [x14, x28]\n"
- "fmla v28.4s, v7.4s, v11.4s\n"
- "fmla v29.4s, v6.4s, v11.4s\n"
- "ldr q11, [x26, x28]\n"
- "fmla v20.4s, v4.4s, v10.4s\n"
- "fmla v21.4s, v3.4s, v10.4s\n"
- "fmla v24.4s, v1.4s, v10.4s\n"
- "fmla v25.4s, v0.4s, v10.4s\n"
- "fmla v16.4s, v7.4s, v10.4s\n"
- "fmla v17.4s, v6.4s, v10.4s\n"
- "ldr q10, [x7, x17]\n"
- "fmla v30.4s, v8.4s, v11.4s\n"
- "fmla v31.4s, v7.4s, v11.4s\n"
- "ldr q11, [x12, x4]\n"
- "fmla v18.4s, v8.4s, v12.4s\n"
- "fmla v19.4s, v7.4s, v12.4s\n"
- "fmla v22.4s, v5.4s, v12.4s\n"
- "fmla v23.4s, v4.4s, v12.4s\n"
- "fmla v26.4s, v2.4s, v12.4s\n"
- "fmla v27.4s, v1.4s, v12.4s\n"
- "ldr q12, [x7, x11]\n"
- "add x7, x7, #0x10\n"
- "fmla v20.4s, v7.4s, v11.4s\n"
- "fmla v21.4s, v6.4s, v11.4s\n"
- "fmla v24.4s, v4.4s, v11.4s\n"
- "fmla v25.4s, v3.4s, v11.4s\n"
+ "fmla v31.4s, v1.4s, v12.4s\n"
+ "fmla v30.4s, v0.4s, v12.4s\n"
+ "ldr q12, [x15, x27]\n"
+ "fmla v29.4s, v2.4s, v11.4s\n"
"fmla v28.4s, v1.4s, v11.4s\n"
- "fmla v29.4s, v0.4s, v11.4s\n"
- "ldr q11, [x12, x28]\n"
- "fmla v16.4s, v2.4s, v10.4s\n"
- "fmla v17.4s, v1.4s, v10.4s\n"
- "fmla v18.4s, v0.4s, v10.4s\n"
- "ld1 { v10.4s }, [x14]\n"
- "fmla v30.4s, v2.4s, v11.4s\n"
- "fmla v19.4s, v0.4s, v12.4s\n"
+ "ld1 { v11.4s }, [x12]\n"
+ "fmla v26.4s, v8.4s, v10.4s\n"
+ "fmla v25.4s, v7.4s, v10.4s\n"
+ "fmla v24.4s, v6.4s, v10.4s\n"
+ "fmla v22.4s, v5.4s, v10.4s\n"
+ "fmla v21.4s, v4.4s, v10.4s\n"
"fmla v20.4s, v3.4s, v10.4s\n"
- "fmla v24.4s, v0.4s, v10.4s\n"
- "fmla v22.4s, v8.4s, v11.4s\n"
- "fmla v23.4s, v7.4s, v11.4s\n"
- "fmla v26.4s, v5.4s, v11.4s\n"
- "fmla v27.4s, v4.4s, v11.4s\n"
- "fmla v31.4s, v1.4s, v11.4s\n"
- "ldr q11, [x9, x17]\n"
- "fmla v17.4s, v2.4s, v12.4s\n"
- "fmla v18.4s, v1.4s, v12.4s\n"
- "ldr q12, [x14, x25]\n"
- "add x14, x14, #0x10\n"
- "fmla v16.4s, v6.4s, v10.4s\n"
- "ld1 { v10.4s }, [x12]\n"
- "fmla v29.4s, v4.4s, v11.4s\n"
- "fmla v30.4s, v3.4s, v11.4s\n"
- "fmla v19.4s, v8.4s, v12.4s\n"
- "fmla v23.4s, v5.4s, v12.4s\n"
- "fmla v27.4s, v2.4s, v12.4s\n"
- "ldr q12, [x12, x25]\n"
- "add x12, x12, #0x10\n"
- "fmla v20.4s, v6.4s, v10.4s\n"
- "fmla v24.4s, v3.4s, v10.4s\n"
- "fmla v28.4s, v0.4s, v10.4s\n"
- "ldr q10, [x26, x17]\n"
- "fmla v31.4s, v2.4s, v12.4s\n"
- "fmla v29.4s, v7.4s, v10.4s\n"
+ "fmla v18.4s, v2.4s, v10.4s\n"
+ "fmla v17.4s, v1.4s, v10.4s\n"
+ "fmla v16.4s, v0.4s, v10.4s\n"
+ "ldr q10, [x15, x10]\n"
+ "fmla v31.4s, v3.4s, v9.4s\n"
+ "fmla v27.4s, v0.4s, v9.4s\n"
+ "fmla v28.4s, v5.4s, v12.4s\n"
+ "fmla v24.4s, v2.4s, v12.4s\n"
+ "ldr q12, [x15, x9]\n"
+ "fmla v23.4s, v6.4s, v11.4s\n"
+ "fmla v19.4s, v3.4s, v11.4s\n"
+ "ldr q11, [x12, x27]\n"
+ "fmla v31.4s, v5.4s, v10.4s\n"
+ "fmla v30.4s, v4.4s, v10.4s\n"
+ "fmla v29.4s, v3.4s, v10.4s\n"
+ "fmla v27.4s, v2.4s, v10.4s\n"
+ "fmla v26.4s, v1.4s, v10.4s\n"
+ "fmla v25.4s, v0.4s, v10.4s\n"
+ "ldr q10, [x14, x6]\n"
+ "fmla v20.4s, v8.4s, v11.4s\n"
+ "fmla v16.4s, v5.4s, v11.4s\n"
+ "ldr q11, [x11, x6]\n"
+ "fmla v30.4s, v5.4s, v12.4s\n"
+ "fmla v29.4s, v4.4s, v12.4s\n"
+ "fmla v28.4s, v3.4s, v12.4s\n"
+ "fmla v26.4s, v2.4s, v12.4s\n"
+ "fmla v25.4s, v1.4s, v12.4s\n"
+ "fmla v24.4s, v0.4s, v12.4s\n"
+ "ldr q12, [x14, x28]\n"
+ "fmla v19.4s, v7.4s, v11.4s\n"
+ "fmla v18.4s, v6.4s, v11.4s\n"
+ "ldr q11, [x11, x28]\n"
+ "fmla v31.4s, v7.4s, v10.4s\n"
"fmla v30.4s, v6.4s, v10.4s\n"
- "fmla v24.4s, v8.4s, v11.4s\n"
- "fmla v25.4s, v7.4s, v11.4s\n"
+ "fmla v27.4s, v4.4s, v10.4s\n"
+ "fmla v26.4s, v3.4s, v10.4s\n"
+ "fmla v23.4s, v1.4s, v10.4s\n"
+ "fmla v22.4s, v0.4s, v10.4s\n"
+ "ldr q10, [x8, x10]\n"
+ "fmla v17.4s, v8.4s, v11.4s\n"
+ "fmla v16.4s, v7.4s, v11.4s\n"
+ "ldr q11, [x13, x6]\n"
+ "fmla v29.4s, v8.4s, v12.4s\n"
+ "fmla v28.4s, v7.4s, v12.4s\n"
+ "fmla v25.4s, v5.4s, v12.4s\n"
+ "fmla v24.4s, v4.4s, v12.4s\n"
+ "fmla v21.4s, v2.4s, v12.4s\n"
+ "fmla v20.4s, v1.4s, v12.4s\n"
+ "ldr q12, [x8, x9]\n"
+ "add x8, x8, #0x10\n"
+ "fmla v31.4s, v2.4s, v10.4s\n"
+ "fmla v30.4s, v1.4s, v10.4s\n"
+ "fmla v29.4s, v0.4s, v10.4s\n"
+ "ld1 { v10.4s }, [x14]\n"
+ "fmla v27.4s, v7.4s, v11.4s\n"
"fmla v26.4s, v6.4s, v11.4s\n"
- "fmla v28.4s, v5.4s, v11.4s\n"
- "ldr q11, [x9, x11]\n"
- "fmla v27.4s, v5.4s, v12.4s\n"
- "fmla v29.4s, v5.4s, v11.4s\n"
- "fmla v30.4s, v4.4s, v11.4s\n"
- "fmla v31.4s, v3.4s, v11.4s\n"
- "fmla v23.4s, v8.4s, v12.4s\n"
- "ldr q12, [x26, x11]\n"
- "fmla v28.4s, v8.4s, v10.4s\n"
- "ldr q10, [x15, x4]\n"
+ "fmla v23.4s, v4.4s, v11.4s\n"
+ "fmla v22.4s, v3.4s, v11.4s\n"
+ "fmla v19.4s, v1.4s, v11.4s\n"
+ "fmla v18.4s, v0.4s, v11.4s\n"
+ "ldr q11, [x13, x28]\n"
+ "fmla v30.4s, v2.4s, v12.4s\n"
+ "fmla v29.4s, v1.4s, v12.4s\n"
+ "fmla v28.4s, v0.4s, v12.4s\n"
+ "ldr q12, [x14, x27]\n"
+ "add x14, x14, #0x10\n"
+ "fmla v31.4s, v6.4s, v10.4s\n"
+ "fmla v27.4s, v3.4s, v10.4s\n"
+ "fmla v23.4s, v0.4s, v10.4s\n"
+ "ld1 { v10.4s }, [x13]\n"
"fmla v25.4s, v8.4s, v11.4s\n"
- "fmla v26.4s, v7.4s, v11.4s\n"
- "add x26, x26, #0x10\n"
- "fmla v27.4s, v6.4s, v11.4s\n"
+ "fmla v24.4s, v7.4s, v11.4s\n"
+ "fmla v21.4s, v5.4s, v11.4s\n"
+ "fmla v20.4s, v4.4s, v11.4s\n"
+ "fmla v17.4s, v2.4s, v11.4s\n"
+ "fmla v16.4s, v1.4s, v11.4s\n"
+ "ldr q11, [x12, x10]\n"
+ "fmla v28.4s, v8.4s, v12.4s\n"
+ "fmla v24.4s, v5.4s, v12.4s\n"
+ "fmla v20.4s, v2.4s, v12.4s\n"
+ "ldr q12, [x13, x27]\n"
+ "add x13, x13, #0x10\n"
+ "fmla v27.4s, v6.4s, v10.4s\n"
+ "fmla v23.4s, v3.4s, v10.4s\n"
+ "fmla v19.4s, v0.4s, v10.4s\n"
+ "ldr q10, [x11, x10]\n"
+ "fmla v22.4s, v7.4s, v11.4s\n"
+ "fmla v21.4s, v6.4s, v11.4s\n"
+ "fmla v23.4s, v8.4s, v11.4s\n"
+ "fmla v19.4s, v5.4s, v11.4s\n"
+ "fmla v18.4s, v4.4s, v11.4s\n"
+ "fmla v17.4s, v3.4s, v11.4s\n"
+ "ldr q11, [x12, x9]\n"
+ "fmla v24.4s, v8.4s, v12.4s\n"
+ "fmla v20.4s, v5.4s, v12.4s\n"
+ "fmla v16.4s, v2.4s, v12.4s\n"
+ "ldr q12, [x11, x9]\n"
+ "add x11, x11, #0x10\n"
+ "fmla v19.4s, v8.4s, v10.4s\n"
+ "fmla v18.4s, v7.4s, v10.4s\n"
+ "fmla v17.4s, v6.4s, v10.4s\n"
+ "ldr q10, [x15, x6]\n"
+ "fmla v22.4s, v8.4s, v11.4s\n"
+ "fmla v21.4s, v7.4s, v11.4s\n"
+ "fmla v20.4s, v6.4s, v11.4s\n"
+ "fmla v18.4s, v5.4s, v11.4s\n"
+ "fmla v17.4s, v4.4s, v11.4s\n"
+ "fmla v16.4s, v3.4s, v11.4s\n"
"ldr q11, [x15, x28]\n"
- "fmla v29.4s, v8.4s, v12.4s\n"
"add x15, x15, #0x10\n"
- "fmla v30.4s, v7.4s, v12.4s\n"
- "fmla v31.4s, v6.4s, v12.4s\n"
- "ldr q12, [x9, x4]\n"
- "fmla v16.4s, v4.4s, v10.4s\n"
- "fmla v17.4s, v3.4s, v10.4s\n"
- "fmax v16.4s, v16.4s, v15.4s\n"
- "fmla v20.4s, v1.4s, v10.4s\n"
- "fmla v21.4s, v0.4s, v10.4s\n"
- "ldr q10, [x9, x28]\n"
- "fmax v17.4s, v17.4s, v15.4s\n"
- "fmla v18.4s, v5.4s, v11.4s\n"
- "fmla v19.4s, v4.4s, v11.4s\n"
- "fmax v18.4s, v18.4s, v15.4s\n"
- "add x9, x9, #0x10\n"
- "fmla v22.4s, v2.4s, v11.4s\n"
- "fmla v23.4s, v1.4s, v11.4s\n"
- "fmax v19.4s, v19.4s, v15.4s\n"
- "fmla v24.4s, v7.4s, v12.4s\n"
- "fmla v25.4s, v6.4s, v12.4s\n"
- "fmax v20.4s, v20.4s, v15.4s\n"
- "fmla v28.4s, v4.4s, v12.4s\n"
- "fmla v29.4s, v3.4s, v12.4s\n"
- "fmax v21.4s, v21.4s, v15.4s\n"
- "fmla v26.4s, v8.4s, v10.4s\n"
- "fmla v27.4s, v7.4s, v10.4s\n"
- "fmax v22.4s, v22.4s, v15.4s\n"
- "fmla v30.4s, v5.4s, v10.4s\n"
+ "fmla v18.4s, v8.4s, v12.4s\n"
"fmla v31.4s, v4.4s, v10.4s\n"
- "fmax v23.4s, v23.4s, v15.4s\n"
- "fmax v24.4s, v24.4s, v15.4s\n"
- "fmax v25.4s, v25.4s, v15.4s\n"
- "fmax v26.4s, v26.4s, v15.4s\n"
- "fmax v27.4s, v27.4s, v15.4s\n"
- "fmax v28.4s, v28.4s, v15.4s\n"
- "fmax v29.4s, v29.4s, v15.4s\n"
- "fmax v30.4s, v30.4s, v15.4s\n"
+ "fmla v17.4s, v7.4s, v12.4s\n"
+ "fmla v16.4s, v6.4s, v12.4s\n"
+ "ldr q12, [x12, x6]\n"
+ "fmla v30.4s, v3.4s, v10.4s\n"
+ "fmla v27.4s, v1.4s, v10.4s\n"
+ "fmla v26.4s, v0.4s, v10.4s\n"
+ "ldr q10, [x12, x28]\n"
+ "add x12, x12, #0x10\n"
+ "fmla v29.4s, v5.4s, v11.4s\n"
+ "fmla v28.4s, v4.4s, v11.4s\n"
+ "fmla v25.4s, v2.4s, v11.4s\n"
+ "fmla v24.4s, v1.4s, v11.4s\n"
+ "fmla v23.4s, v7.4s, v12.4s\n"
+ "fmla v22.4s, v6.4s, v12.4s\n"
+ "fmla v19.4s, v4.4s, v12.4s\n"
+ "fmla v18.4s, v3.4s, v12.4s\n"
+ "fmla v21.4s, v8.4s, v10.4s\n"
+ "fmla v20.4s, v7.4s, v10.4s\n"
+ "fmla v17.4s, v5.4s, v10.4s\n"
+ "fmla v16.4s, v4.4s, v10.4s\n"
"fmax v31.4s, v31.4s, v15.4s\n"
- "fmin v16.4s, v16.4s, v14.4s\n"
- "fmin v17.4s, v17.4s, v14.4s\n"
- "st1 { v16.4s }, [x8]\n"
- "fmin v18.4s, v18.4s, v14.4s\n"
- "fmin v19.4s, v19.4s, v14.4s\n"
- "str q17, [x8, x5]\n"
- "fmin v20.4s, v20.4s, v14.4s\n"
- "fmin v21.4s, v21.4s, v14.4s\n"
- "str q18, [x8, x23]\n"
- "fmin v22.4s, v22.4s, v14.4s\n"
- "fmin v23.4s, v23.4s, v14.4s\n"
- "str q19, [x8, x22]\n"
- "add x8, x8, #0x10\n"
- "fmin v24.4s, v24.4s, v14.4s\n"
- "fmin v25.4s, v25.4s, v14.4s\n"
- "st1 { v20.4s }, [x10]\n"
- "fmin v26.4s, v26.4s, v14.4s\n"
- "fmin v27.4s, v27.4s, v14.4s\n"
- "str q21, [x10, x5]\n"
- "fmin v28.4s, v28.4s, v14.4s\n"
- "fmin v29.4s, v29.4s, v14.4s\n"
- "str q22, [x10, x23]\n"
- "fmin v30.4s, v30.4s, v14.4s\n"
+ "fmax v30.4s, v30.4s, v15.4s\n"
+ "fmax v29.4s, v29.4s, v15.4s\n"
"fmin v31.4s, v31.4s, v14.4s\n"
- "str q23, [x10, x22]\n"
- "add x10, x10, #0x10\n"
- "st1 { v24.4s }, [x27]\n"
- "str q25, [x27, x5]\n"
- "str q26, [x27, x23]\n"
- "str q27, [x27, x22]\n"
- "add x27, x27, #0x10\n"
- "st1 { v28.4s }, [x24]\n"
- "str q29, [x24, x5]\n"
- "str q30, [x24, x23]\n"
- "str q31, [x24, x22]\n"
+ "st1 { v31.4s }, [x16]\n"
+ "fmin v30.4s, v30.4s, v14.4s\n"
+ "fmin v29.4s, v29.4s, v14.4s\n"
+ "str q30, [x16, x17]\n"
+ "fmax v28.4s, v28.4s, v15.4s\n"
+ "fmax v27.4s, v27.4s, v15.4s\n"
+ "str q29, [x16, x23]\n"
+ "fmax v26.4s, v26.4s, v15.4s\n"
+ "fmax v25.4s, v25.4s, v15.4s\n"
+ "fmin v28.4s, v28.4s, v14.4s\n"
+ "str q28, [x16, x22]\n"
+ "fmin v27.4s, v27.4s, v14.4s\n"
+ "add x16, x16, #0x10\n"
+ "fmin v26.4s, v26.4s, v14.4s\n"
+ "st1 { v27.4s }, [x26]\n"
+ "fmin v25.4s, v25.4s, v14.4s\n"
+ "fmax v24.4s, v24.4s, v15.4s\n"
+ "str q26, [x26, x17]\n"
+ "fmax v23.4s, v23.4s, v15.4s\n"
+ "str q25, [x26, x23]\n"
+ "fmin v24.4s, v24.4s, v14.4s\n"
+ "fmax v22.4s, v22.4s, v15.4s\n"
+ "str q24, [x26, x22]\n"
+ "fmin v23.4s, v23.4s, v14.4s\n"
+ "add x26, x26, #0x10\n"
+ "fmin v22.4s, v22.4s, v14.4s\n"
+ "st1 { v23.4s }, [x25]\n"
+ "fmax v21.4s, v21.4s, v15.4s\n"
+ "fmax v20.4s, v20.4s, v15.4s\n"
+ "str q22, [x25, x17]\n"
+ "fmax v19.4s, v19.4s, v15.4s\n"
+ "fmax v18.4s, v18.4s, v15.4s\n"
+ "fmin v21.4s, v21.4s, v14.4s\n"
+ "str q21, [x25, x23]\n"
+ "fmin v20.4s, v20.4s, v14.4s\n"
+ "fmin v19.4s, v19.4s, v14.4s\n"
+ "str q20, [x25, x22]\n"
+ "fmin v18.4s, v18.4s, v14.4s\n"
+ "add x25, x25, #0x10\n"
+ "fmax v17.4s, v17.4s, v15.4s\n"
+ "st1 { v19.4s }, [x24]\n"
+ "fmax v16.4s, v16.4s, v15.4s\n"
+ "str q18, [x24, x17]\n"
+ "fmin v17.4s, v17.4s, v14.4s\n"
+ "str q17, [x24, x23]\n"
+ "fmin v16.4s, v16.4s, v14.4s\n"
+ "str q16, [x24, x22]\n"
"add x24, x24, #0x10\n"
"4:" // Tile loop: Oddments
"tst %x[n_channels], #0x3\n"
"beq 73f\n"
- "ldr q13, [x16, #0x0]\n"
- "ldr q0, [x16, #0x10]\n"
- "add x23, x14, x17\n"
- "add x22, x7, XZR\n"
- "ldr q1, [x16, #0x20]\n"
- "ldr q2, [x16, #0x30]\n"
- "add x21, x7, x25\n"
- "add x20, x14, x11\n"
- "ldr q3, [x16, #0x40]\n"
- "ldr q4, [x16, #0x50]\n"
- "ldr q5, [x16, #0x60]\n"
- "ldr q6, [x16, #0x70]\n"
- "ldr q7, [x16, #0x80]\n"
- "ldr q8, [x16, #0x90]\n"
+ "ldr q13, [x5, #0x0]\n"
+ "ldr q0, [x5, #0x10]\n"
+ "add x22, x14, x10\n"
+ "ldr q1, [x5, #0x20]\n"
+ "add x21, x8, XZR\n"
+ "ldr q2, [x5, #0x30]\n"
+ "add x20, x8, x27\n"
+ "ldr q3, [x5, #0x40]\n"
+ "add x19, x14, x9\n"
+ "ldr q4, [x5, #0x50]\n"
+ "ldr q5, [x5, #0x60]\n"
+ "ldr q6, [x5, #0x70]\n"
+ "ldr q7, [x5, #0x80]\n"
+ "ldr q8, [x5, #0x90]\n"
"tbz %x[n_channels], #1, 5f\n"
- "ldr d9, [x23], #0x8\n"
- "ldr d10, [x22], #0x8\n"
- "ldr d11, [x21], #0x8\n"
- "ldr d12, [x20], #0x8\n"
+ "ldr d9, [x22], #0x8\n"
+ "ldr d10, [x21], #0x8\n"
+ "ldr d11, [x20], #0x8\n"
+ "ldr d12, [x19], #0x8\n"
"tbz %x[n_channels], #0, 6f\n"
- "ld1 { v9.s }[2], [x23]\n"
- "ld1 { v10.s }[2], [x22]\n"
- "ld1 { v11.s }[2], [x21]\n"
- "ld1 { v12.s }[2], [x20]\n"
+ "ld1 { v9.s }[2], [x22]\n"
+ "ld1 { v10.s }[2], [x21]\n"
+ "ld1 { v11.s }[2], [x20]\n"
+ "ld1 { v12.s }[2], [x19]\n"
"b 6f\n"
"5:" // Tile loop: Oddments: Load inputs: (2, 2), (0, 0), (0, 5), (2, 3): Bit 1: Unset
- "ldr s9, [x23, #0x0]\n"
- "ldr s10, [x22, #0x0]\n"
- "ldr s11, [x21, #0x0]\n"
- "ldr s12, [x20, #0x0]\n"
+ "ldr s9, [x22, #0x0]\n"
+ "ldr s10, [x21, #0x0]\n"
+ "ldr s11, [x20, #0x0]\n"
+ "ldr s12, [x19, #0x0]\n"
"6:" // Tile loop: Oddments: Load inputs: (2, 2), (0, 0), (0, 5), (2, 3): Bit 1: End
- "mov v16.16b, v13.16b\n fmla v16.4s, v8.4s, v9.4s\n"
- "mov v17.16b, v13.16b\n fmla v17.4s, v7.4s, v9.4s\n"
- "add x20, x26, XZR\n"
- "mov v18.16b, v13.16b\n fmla v18.4s, v6.4s, v9.4s\n"
- "mov v21.16b, v13.16b\n fmla v21.4s, v4.4s, v9.4s\n"
- "mov v22.16b, v13.16b\n fmla v22.4s, v3.4s, v9.4s\n"
- "mov v25.16b, v13.16b\n fmla v25.4s, v1.4s, v9.4s\n"
- "mov v26.16b, v13.16b\n fmla v26.4s, v0.4s, v9.4s\n"
- "mov v19.16b, v13.16b\n fmla v19.4s, v2.4s, v11.4s\n"
- "mov v20.16b, v13.16b\n fmla v20.4s, v5.4s, v9.4s\n"
- "mov v24.16b, v13.16b\n fmla v24.4s, v2.4s, v9.4s\n"
- "fmla v16.4s, v0.4s, v10.4s\n"
- "fmla v17.4s, v8.4s, v12.4s\n"
- "fmla v18.4s, v7.4s, v12.4s\n"
- "fmla v19.4s, v6.4s, v12.4s\n"
- "fmla v21.4s, v5.4s, v12.4s\n"
- "fmla v22.4s, v4.4s, v12.4s\n"
- "mov v23.16b, v13.16b\n fmla v23.4s, v3.4s, v12.4s\n"
- "fmla v25.4s, v2.4s, v12.4s\n"
- "fmla v26.4s, v1.4s, v12.4s\n"
- "mov v27.16b, v13.16b\n fmla v27.4s, v0.4s, v12.4s\n"
+ "mov v31.16b, v13.16b\n fmla v31.4s, v8.4s, v9.4s\n"
+ "add x19, x11, XZR\n"
+ "mov v30.16b, v13.16b\n fmla v30.4s, v7.4s, v9.4s\n"
+ "mov v29.16b, v13.16b\n fmla v29.4s, v6.4s, v9.4s\n"
+ "mov v27.16b, v13.16b\n fmla v27.4s, v5.4s, v9.4s\n"
+ "mov v26.16b, v13.16b\n fmla v26.4s, v4.4s, v9.4s\n"
+ "mov v25.16b, v13.16b\n fmla v25.4s, v3.4s, v9.4s\n"
+ "mov v23.16b, v13.16b\n fmla v23.4s, v2.4s, v9.4s\n"
+ "mov v22.16b, v13.16b\n fmla v22.4s, v1.4s, v9.4s\n"
+ "mov v21.16b, v13.16b\n fmla v21.4s, v0.4s, v9.4s\n"
+ "fmla v31.4s, v0.4s, v10.4s\n"
+ "mov v28.16b, v13.16b\n fmla v28.4s, v2.4s, v11.4s\n"
+ "fmla v30.4s, v8.4s, v12.4s\n"
+ "fmla v29.4s, v7.4s, v12.4s\n"
+ "fmla v26.4s, v5.4s, v12.4s\n"
+ "fmla v28.4s, v6.4s, v12.4s\n"
+ "fmla v25.4s, v4.4s, v12.4s\n"
+ "mov v24.16b, v13.16b\n fmla v24.4s, v3.4s, v12.4s\n"
+ "fmla v22.4s, v2.4s, v12.4s\n"
+ "fmla v21.4s, v1.4s, v12.4s\n"
+ "mov v20.16b, v13.16b\n fmla v20.4s, v0.4s, v12.4s\n"
"tbz %x[n_channels], #1, 7f\n"
- "ldr d10, [x20], #0x8\n"
+ "ldr d10, [x19], #0x8\n"
"tbz %x[n_channels], #0, 8f\n"
- "ld1 { v10.s }[2], [x20]\n"
+ "ld1 { v10.s }[2], [x19]\n"
"b 8f\n"
"7:" // Tile loop: Oddments: Load inputs: (5, 0): Bit 1: Unset
- "ldr s10, [x20, #0x0]\n"
+ "ldr s10, [x19, #0x0]\n"
"8:" // Tile loop: Oddments: Load inputs: (5, 0): Bit 1: End
- "mov v28.16b, v13.16b\n fmla v28.4s, v6.4s, v10.4s\n"
- "add x20, x26, x25\n"
+ "mov v19.16b, v13.16b\n fmla v19.4s, v6.4s, v10.4s\n"
+ "add x19, x11, x27\n"
"tbz %x[n_channels], #1, 9f\n"
- "ldr d11, [x20], #0x8\n"
+ "ldr d11, [x19], #0x8\n"
"tbz %x[n_channels], #0, 10f\n"
- "ld1 { v11.s }[2], [x20]\n"
+ "ld1 { v11.s }[2], [x19]\n"
"b 10f\n"
"9:" // Tile loop: Oddments: Load inputs: (5, 5): Bit 1: Unset
- "ldr s11, [x20, #0x0]\n"
+ "ldr s11, [x19, #0x0]\n"
"10:" // Tile loop: Oddments: Load inputs: (5, 5): Bit 1: End
- "mov v31.16b, v13.16b\n fmla v31.4s, v8.4s, v11.4s\n"
- "add x20, x12, x17\n"
+ "mov v16.16b, v13.16b\n fmla v16.4s, v8.4s, v11.4s\n"
+ "add x19, x13, x10\n"
"tbz %x[n_channels], #1, 11f\n"
- "ldr d9, [x20], #0x8\n"
+ "ldr d9, [x19], #0x8\n"
"tbz %x[n_channels], #0, 12f\n"
- "ld1 { v9.s }[2], [x20]\n"
+ "ld1 { v9.s }[2], [x19]\n"
"b 12f\n"
"11:" // Tile loop: Oddments: Load inputs: (3, 2): Bit 1: Unset
- "ldr s9, [x20, #0x0]\n"
+ "ldr s9, [x19, #0x0]\n"
"12:" // Tile loop: Oddments: Load inputs: (3, 2): Bit 1: End
- "fmla v20.4s, v8.4s, v9.4s\n"
- "fmla v21.4s, v7.4s, v9.4s\n"
- "add x20, x7, x4\n"
- "fmla v22.4s, v6.4s, v9.4s\n"
- "fmla v24.4s, v5.4s, v9.4s\n"
- "fmla v25.4s, v4.4s, v9.4s\n"
- "fmla v26.4s, v3.4s, v9.4s\n"
- "fmla v28.4s, v2.4s, v9.4s\n"
- "mov v29.16b, v13.16b\n fmla v29.4s, v1.4s, v9.4s\n"
- "mov v30.16b, v13.16b\n fmla v30.4s, v0.4s, v9.4s\n"
+ "fmla v27.4s, v8.4s, v9.4s\n"
+ "add x19, x8, x6\n"
+ "fmla v26.4s, v7.4s, v9.4s\n"
+ "fmla v25.4s, v6.4s, v9.4s\n"
+ "fmla v23.4s, v5.4s, v9.4s\n"
+ "fmla v22.4s, v4.4s, v9.4s\n"
+ "fmla v21.4s, v3.4s, v9.4s\n"
+ "fmla v19.4s, v2.4s, v9.4s\n"
+ "mov v18.16b, v13.16b\n fmla v18.4s, v1.4s, v9.4s\n"
+ "mov v17.16b, v13.16b\n fmla v17.4s, v0.4s, v9.4s\n"
"tbz %x[n_channels], #1, 13f\n"
- "ldr d12, [x20], #0x8\n"
+ "ldr d12, [x19], #0x8\n"
"tbz %x[n_channels], #0, 14f\n"
- "ld1 { v12.s }[2], [x20]\n"
+ "ld1 { v12.s }[2], [x19]\n"
"b 14f\n"
"13:" // Tile loop: Oddments: Load inputs: (0, 1): Bit 1: Unset
- "ldr s12, [x20, #0x0]\n"
+ "ldr s12, [x19, #0x0]\n"
"14:" // Tile loop: Oddments: Load inputs: (0, 1): Bit 1: End
- "fmla v16.4s, v1.4s, v12.4s\n"
- "fmla v17.4s, v0.4s, v12.4s\n"
- "add x20, x7, x28\n"
+ "fmla v31.4s, v1.4s, v12.4s\n"
+ "add x19, x8, x28\n"
+ "fmla v30.4s, v0.4s, v12.4s\n"
"tbz %x[n_channels], #1, 15f\n"
- "ldr d11, [x20], #0x8\n"
+ "ldr d11, [x19], #0x8\n"
"tbz %x[n_channels], #0, 16f\n"
- "ld1 { v11.s }[2], [x20]\n"
+ "ld1 { v11.s }[2], [x19]\n"
"b 16f\n"
"15:" // Tile loop: Oddments: Load inputs: (0, 4): Bit 1: Unset
- "ldr s11, [x20, #0x0]\n"
+ "ldr s11, [x19, #0x0]\n"
"16:" // Tile loop: Oddments: Load inputs: (0, 4): Bit 1: End
- "fmla v18.4s, v2.4s, v11.4s\n"
- "fmla v19.4s, v1.4s, v11.4s\n"
- "add x20, x12, x11\n"
+ "fmla v29.4s, v2.4s, v11.4s\n"
+ "add x19, x13, x9\n"
+ "fmla v28.4s, v1.4s, v11.4s\n"
"tbz %x[n_channels], #1, 17f\n"
- "ldr d10, [x20], #0x8\n"
+ "ldr d10, [x19], #0x8\n"
"tbz %x[n_channels], #0, 18f\n"
- "ld1 { v10.s }[2], [x20]\n"
+ "ld1 { v10.s }[2], [x19]\n"
"b 18f\n"
"17:" // Tile loop: Oddments: Load inputs: (3, 3): Bit 1: Unset
- "ldr s10, [x20, #0x0]\n"
+ "ldr s10, [x19, #0x0]\n"
"18:" // Tile loop: Oddments: Load inputs: (3, 3): Bit 1: End
- "fmla v21.4s, v8.4s, v10.4s\n"
- "fmla v22.4s, v7.4s, v10.4s\n"
- "add x20, x15, XZR\n"
- "fmla v23.4s, v6.4s, v10.4s\n"
- "fmla v25.4s, v5.4s, v10.4s\n"
- "fmla v26.4s, v4.4s, v10.4s\n"
- "fmla v27.4s, v3.4s, v10.4s\n"
- "fmla v29.4s, v2.4s, v10.4s\n"
- "fmla v30.4s, v1.4s, v10.4s\n"
- "fmla v31.4s, v0.4s, v10.4s\n"
+ "fmla v26.4s, v8.4s, v10.4s\n"
+ "add x19, x15, XZR\n"
+ "fmla v25.4s, v7.4s, v10.4s\n"
+ "fmla v24.4s, v6.4s, v10.4s\n"
+ "fmla v22.4s, v5.4s, v10.4s\n"
+ "fmla v21.4s, v4.4s, v10.4s\n"
+ "fmla v20.4s, v3.4s, v10.4s\n"
+ "fmla v18.4s, v2.4s, v10.4s\n"
+ "fmla v17.4s, v1.4s, v10.4s\n"
+ "fmla v16.4s, v0.4s, v10.4s\n"
"tbz %x[n_channels], #1, 19f\n"
- "ldr d9, [x20], #0x8\n"
+ "ldr d9, [x19], #0x8\n"
"tbz %x[n_channels], #0, 20f\n"
- "ld1 { v9.s }[2], [x20]\n"
+ "ld1 { v9.s }[2], [x19]\n"
"b 20f\n"
"19:" // Tile loop: Oddments: Load inputs: (1, 0): Bit 1: Unset
- "ldr s9, [x20, #0x0]\n"
+ "ldr s9, [x19, #0x0]\n"
"20:" // Tile loop: Oddments: Load inputs: (1, 0): Bit 1: End
- "fmla v16.4s, v3.4s, v9.4s\n"
- "fmla v20.4s, v0.4s, v9.4s\n"
- "add x20, x15, x25\n"
+ "fmla v31.4s, v3.4s, v9.4s\n"
+ "add x19, x15, x27\n"
+ "fmla v27.4s, v0.4s, v9.4s\n"
"tbz %x[n_channels], #1, 21f\n"
- "ldr d12, [x20], #0x8\n"
+ "ldr d12, [x19], #0x8\n"
"tbz %x[n_channels], #0, 22f\n"
- "ld1 { v12.s }[2], [x20]\n"
+ "ld1 { v12.s }[2], [x19]\n"
"b 22f\n"
"21:" // Tile loop: Oddments: Load inputs: (1, 5): Bit 1: Unset
- "ldr s12, [x20, #0x0]\n"
+ "ldr s12, [x19, #0x0]\n"
"22:" // Tile loop: Oddments: Load inputs: (1, 5): Bit 1: End
- "fmla v19.4s, v5.4s, v12.4s\n"
- "fmla v23.4s, v2.4s, v12.4s\n"
- "add x20, x9, XZR\n"
+ "fmla v28.4s, v5.4s, v12.4s\n"
+ "add x19, x12, XZR\n"
+ "fmla v24.4s, v2.4s, v12.4s\n"
"tbz %x[n_channels], #1, 23f\n"
- "ldr d11, [x20], #0x8\n"
+ "ldr d11, [x19], #0x8\n"
"tbz %x[n_channels], #0, 24f\n"
- "ld1 { v11.s }[2], [x20]\n"
+ "ld1 { v11.s }[2], [x19]\n"
"b 24f\n"
"23:" // Tile loop: Oddments: Load inputs: (4, 0): Bit 1: Unset
- "ldr s11, [x20, #0x0]\n"
+ "ldr s11, [x19, #0x0]\n"
"24:" // Tile loop: Oddments: Load inputs: (4, 0): Bit 1: End
- "fmla v24.4s, v6.4s, v11.4s\n"
- "fmla v28.4s, v3.4s, v11.4s\n"
- "add x20, x15, x17\n"
+ "fmla v23.4s, v6.4s, v11.4s\n"
+ "add x19, x15, x10\n"
+ "fmla v19.4s, v3.4s, v11.4s\n"
"tbz %x[n_channels], #1, 25f\n"
- "ldr d10, [x20], #0x8\n"
+ "ldr d10, [x19], #0x8\n"
"tbz %x[n_channels], #0, 26f\n"
- "ld1 { v10.s }[2], [x20]\n"
+ "ld1 { v10.s }[2], [x19]\n"
"b 26f\n"
"25:" // Tile loop: Oddments: Load inputs: (1, 2): Bit 1: Unset
- "ldr s10, [x20, #0x0]\n"
+ "ldr s10, [x19, #0x0]\n"
"26:" // Tile loop: Oddments: Load inputs: (1, 2): Bit 1: End
- "fmla v16.4s, v5.4s, v10.4s\n"
- "fmla v17.4s, v4.4s, v10.4s\n"
- "add x20, x9, x25\n"
- "fmla v18.4s, v3.4s, v10.4s\n"
- "fmla v20.4s, v2.4s, v10.4s\n"
- "fmla v21.4s, v1.4s, v10.4s\n"
- "fmla v22.4s, v0.4s, v10.4s\n"
+ "fmla v31.4s, v5.4s, v10.4s\n"
+ "add x19, x12, x27\n"
+ "fmla v30.4s, v4.4s, v10.4s\n"
+ "fmla v29.4s, v3.4s, v10.4s\n"
+ "fmla v27.4s, v2.4s, v10.4s\n"
+ "fmla v26.4s, v1.4s, v10.4s\n"
+ "fmla v25.4s, v0.4s, v10.4s\n"
"tbz %x[n_channels], #1, 27f\n"
- "ldr d11, [x20], #0x8\n"
+ "ldr d11, [x19], #0x8\n"
"tbz %x[n_channels], #0, 28f\n"
- "ld1 { v11.s }[2], [x20]\n"
+ "ld1 { v11.s }[2], [x19]\n"
"b 28f\n"
"27:" // Tile loop: Oddments: Load inputs: (4, 5): Bit 1: Unset
- "ldr s11, [x20, #0x0]\n"
+ "ldr s11, [x19, #0x0]\n"
"28:" // Tile loop: Oddments: Load inputs: (4, 5): Bit 1: End
- "fmla v27.4s, v8.4s, v11.4s\n"
- "fmla v31.4s, v5.4s, v11.4s\n"
- "add x20, x15, x11\n"
+ "fmla v20.4s, v8.4s, v11.4s\n"
+ "add x19, x15, x9\n"
+ "fmla v16.4s, v5.4s, v11.4s\n"
"tbz %x[n_channels], #1, 29f\n"
- "ldr d12, [x20], #0x8\n"
+ "ldr d12, [x19], #0x8\n"
"tbz %x[n_channels], #0, 30f\n"
- "ld1 { v12.s }[2], [x20]\n"
+ "ld1 { v12.s }[2], [x19]\n"
"b 30f\n"
"29:" // Tile loop: Oddments: Load inputs: (1, 3): Bit 1: Unset
- "ldr s12, [x20, #0x0]\n"
+ "ldr s12, [x19, #0x0]\n"
"30:" // Tile loop: Oddments: Load inputs: (1, 3): Bit 1: End
- "fmla v17.4s, v5.4s, v12.4s\n"
- "fmla v18.4s, v4.4s, v12.4s\n"
- "add x20, x26, x4\n"
- "fmla v19.4s, v3.4s, v12.4s\n"
- "fmla v21.4s, v2.4s, v12.4s\n"
- "fmla v22.4s, v1.4s, v12.4s\n"
- "fmla v23.4s, v0.4s, v12.4s\n"
+ "fmla v30.4s, v5.4s, v12.4s\n"
+ "add x19, x11, x6\n"
+ "fmla v29.4s, v4.4s, v12.4s\n"
+ "fmla v28.4s, v3.4s, v12.4s\n"
+ "fmla v26.4s, v2.4s, v12.4s\n"
+ "fmla v25.4s, v1.4s, v12.4s\n"
+ "fmla v24.4s, v0.4s, v12.4s\n"
"tbz %x[n_channels], #1, 31f\n"
- "ldr d11, [x20], #0x8\n"
+ "ldr d11, [x19], #0x8\n"
"tbz %x[n_channels], #0, 32f\n"
- "ld1 { v11.s }[2], [x20]\n"
+ "ld1 { v11.s }[2], [x19]\n"
"b 32f\n"
"31:" // Tile loop: Oddments: Load inputs: (5, 1): Bit 1: Unset
- "ldr s11, [x20, #0x0]\n"
+ "ldr s11, [x19, #0x0]\n"
"32:" // Tile loop: Oddments: Load inputs: (5, 1): Bit 1: End
- "fmla v28.4s, v7.4s, v11.4s\n"
- "fmla v29.4s, v6.4s, v11.4s\n"
- "add x20, x14, x4\n"
+ "fmla v19.4s, v7.4s, v11.4s\n"
+ "add x19, x14, x6\n"
+ "fmla v18.4s, v6.4s, v11.4s\n"
"tbz %x[n_channels], #1, 33f\n"
- "ldr d10, [x20], #0x8\n"
+ "ldr d10, [x19], #0x8\n"
"tbz %x[n_channels], #0, 34f\n"
- "ld1 { v10.s }[2], [x20]\n"
+ "ld1 { v10.s }[2], [x19]\n"
"b 34f\n"
"33:" // Tile loop: Oddments: Load inputs: (2, 1): Bit 1: Unset
- "ldr s10, [x20, #0x0]\n"
+ "ldr s10, [x19, #0x0]\n"
"34:" // Tile loop: Oddments: Load inputs: (2, 1): Bit 1: End
- "fmla v16.4s, v7.4s, v10.4s\n"
- "fmla v17.4s, v6.4s, v10.4s\n"
- "add x20, x26, x28\n"
- "fmla v20.4s, v4.4s, v10.4s\n"
- "fmla v21.4s, v3.4s, v10.4s\n"
- "fmla v24.4s, v1.4s, v10.4s\n"
- "fmla v25.4s, v0.4s, v10.4s\n"
+ "fmla v31.4s, v7.4s, v10.4s\n"
+ "add x19, x11, x28\n"
+ "fmla v30.4s, v6.4s, v10.4s\n"
+ "fmla v27.4s, v4.4s, v10.4s\n"
+ "fmla v26.4s, v3.4s, v10.4s\n"
+ "fmla v23.4s, v1.4s, v10.4s\n"
+ "fmla v22.4s, v0.4s, v10.4s\n"
"tbz %x[n_channels], #1, 35f\n"
- "ldr d11, [x20], #0x8\n"
+ "ldr d11, [x19], #0x8\n"
"tbz %x[n_channels], #0, 36f\n"
- "ld1 { v11.s }[2], [x20]\n"
+ "ld1 { v11.s }[2], [x19]\n"
"b 36f\n"
"35:" // Tile loop: Oddments: Load inputs: (5, 4): Bit 1: Unset
- "ldr s11, [x20, #0x0]\n"
+ "ldr s11, [x19, #0x0]\n"
"36:" // Tile loop: Oddments: Load inputs: (5, 4): Bit 1: End
- "fmla v30.4s, v8.4s, v11.4s\n"
- "fmla v31.4s, v7.4s, v11.4s\n"
- "add x20, x14, x28\n"
+ "fmla v17.4s, v8.4s, v11.4s\n"
+ "add x19, x14, x28\n"
+ "fmla v16.4s, v7.4s, v11.4s\n"
"tbz %x[n_channels], #1, 37f\n"
- "ldr d12, [x20], #0x8\n"
+ "ldr d12, [x19], #0x8\n"
"tbz %x[n_channels], #0, 38f\n"
- "ld1 { v12.s }[2], [x20]\n"
+ "ld1 { v12.s }[2], [x19]\n"
"b 38f\n"
"37:" // Tile loop: Oddments: Load inputs: (2, 4): Bit 1: Unset
- "ldr s12, [x20, #0x0]\n"
+ "ldr s12, [x19, #0x0]\n"
"38:" // Tile loop: Oddments: Load inputs: (2, 4): Bit 1: End
- "fmla v18.4s, v8.4s, v12.4s\n"
- "fmla v19.4s, v7.4s, v12.4s\n"
- "add x20, x7, x17\n"
- "fmla v22.4s, v5.4s, v12.4s\n"
- "fmla v23.4s, v4.4s, v12.4s\n"
- "fmla v26.4s, v2.4s, v12.4s\n"
- "fmla v27.4s, v1.4s, v12.4s\n"
+ "fmla v29.4s, v8.4s, v12.4s\n"
+ "add x19, x8, x10\n"
+ "fmla v28.4s, v7.4s, v12.4s\n"
+ "fmla v25.4s, v5.4s, v12.4s\n"
+ "fmla v24.4s, v4.4s, v12.4s\n"
+ "fmla v21.4s, v2.4s, v12.4s\n"
+ "fmla v20.4s, v1.4s, v12.4s\n"
"tbz %x[n_channels], #1, 39f\n"
- "ldr d10, [x20], #0x8\n"
+ "ldr d10, [x19], #0x8\n"
"tbz %x[n_channels], #0, 40f\n"
- "ld1 { v10.s }[2], [x20]\n"
+ "ld1 { v10.s }[2], [x19]\n"
"b 40f\n"
"39:" // Tile loop: Oddments: Load inputs: (0, 2): Bit 1: Unset
- "ldr s10, [x20, #0x0]\n"
+ "ldr s10, [x19, #0x0]\n"
"40:" // Tile loop: Oddments: Load inputs: (0, 2): Bit 1: End
- "fmla v16.4s, v2.4s, v10.4s\n"
- "fmla v17.4s, v1.4s, v10.4s\n"
- "add x20, x12, x4\n"
- "fmla v18.4s, v0.4s, v10.4s\n"
+ "fmla v31.4s, v2.4s, v10.4s\n"
+ "add x19, x13, x6\n"
+ "fmla v30.4s, v1.4s, v10.4s\n"
+ "fmla v29.4s, v0.4s, v10.4s\n"
"tbz %x[n_channels], #1, 41f\n"
- "ldr d11, [x20], #0x8\n"
+ "ldr d11, [x19], #0x8\n"
"tbz %x[n_channels], #0, 42f\n"
- "ld1 { v11.s }[2], [x20]\n"
+ "ld1 { v11.s }[2], [x19]\n"
"b 42f\n"
"41:" // Tile loop: Oddments: Load inputs: (3, 1): Bit 1: Unset
- "ldr s11, [x20, #0x0]\n"
+ "ldr s11, [x19, #0x0]\n"
"42:" // Tile loop: Oddments: Load inputs: (3, 1): Bit 1: End
- "fmla v20.4s, v7.4s, v11.4s\n"
- "fmla v21.4s, v6.4s, v11.4s\n"
- "add x20, x7, x11\n"
- "fmla v24.4s, v4.4s, v11.4s\n"
- "fmla v25.4s, v3.4s, v11.4s\n"
- "fmla v28.4s, v1.4s, v11.4s\n"
- "fmla v29.4s, v0.4s, v11.4s\n"
+ "fmla v27.4s, v7.4s, v11.4s\n"
+ "add x19, x8, x9\n"
+ "fmla v26.4s, v6.4s, v11.4s\n"
+ "fmla v23.4s, v4.4s, v11.4s\n"
+ "fmla v22.4s, v3.4s, v11.4s\n"
+ "fmla v19.4s, v1.4s, v11.4s\n"
+ "fmla v18.4s, v0.4s, v11.4s\n"
"tbz %x[n_channels], #1, 43f\n"
- "ldr d12, [x20], #0x8\n"
+ "ldr d12, [x19], #0x8\n"
"tbz %x[n_channels], #0, 44f\n"
- "ld1 { v12.s }[2], [x20]\n"
+ "ld1 { v12.s }[2], [x19]\n"
"b 44f\n"
"43:" // Tile loop: Oddments: Load inputs: (0, 3): Bit 1: Unset
- "ldr s12, [x20, #0x0]\n"
+ "ldr s12, [x19, #0x0]\n"
"44:" // Tile loop: Oddments: Load inputs: (0, 3): Bit 1: End
- "fmla v17.4s, v2.4s, v12.4s\n"
- "fmla v18.4s, v1.4s, v12.4s\n"
- "add x20, x14, XZR\n"
- "fmla v19.4s, v0.4s, v12.4s\n"
+ "fmla v30.4s, v2.4s, v12.4s\n"
+ "add x19, x14, XZR\n"
+ "fmla v29.4s, v1.4s, v12.4s\n"
+ "fmla v28.4s, v0.4s, v12.4s\n"
"tbz %x[n_channels], #1, 45f\n"
- "ldr d10, [x20], #0x8\n"
+ "ldr d10, [x19], #0x8\n"
"tbz %x[n_channels], #0, 46f\n"
- "ld1 { v10.s }[2], [x20]\n"
+ "ld1 { v10.s }[2], [x19]\n"
"b 46f\n"
"45:" // Tile loop: Oddments: Load inputs: (2, 0): Bit 1: Unset
- "ldr s10, [x20, #0x0]\n"
+ "ldr s10, [x19, #0x0]\n"
"46:" // Tile loop: Oddments: Load inputs: (2, 0): Bit 1: End
- "fmla v16.4s, v6.4s, v10.4s\n"
- "fmla v20.4s, v3.4s, v10.4s\n"
- "add x20, x12, x28\n"
- "fmla v24.4s, v0.4s, v10.4s\n"
+ "fmla v31.4s, v6.4s, v10.4s\n"
+ "add x19, x13, x28\n"
+ "fmla v27.4s, v3.4s, v10.4s\n"
+ "fmla v23.4s, v0.4s, v10.4s\n"
"tbz %x[n_channels], #1, 47f\n"
- "ldr d11, [x20], #0x8\n"
+ "ldr d11, [x19], #0x8\n"
"tbz %x[n_channels], #0, 48f\n"
- "ld1 { v11.s }[2], [x20]\n"
+ "ld1 { v11.s }[2], [x19]\n"
"b 48f\n"
"47:" // Tile loop: Oddments: Load inputs: (3, 4): Bit 1: Unset
- "ldr s11, [x20, #0x0]\n"
+ "ldr s11, [x19, #0x0]\n"
"48:" // Tile loop: Oddments: Load inputs: (3, 4): Bit 1: End
- "fmla v22.4s, v8.4s, v11.4s\n"
- "fmla v23.4s, v7.4s, v11.4s\n"
- "add x20, x14, x25\n"
- "fmla v26.4s, v5.4s, v11.4s\n"
- "fmla v27.4s, v4.4s, v11.4s\n"
- "fmla v30.4s, v2.4s, v11.4s\n"
- "fmla v31.4s, v1.4s, v11.4s\n"
+ "fmla v25.4s, v8.4s, v11.4s\n"
+ "add x19, x14, x27\n"
+ "fmla v24.4s, v7.4s, v11.4s\n"
+ "fmla v21.4s, v5.4s, v11.4s\n"
+ "fmla v20.4s, v4.4s, v11.4s\n"
+ "fmla v17.4s, v2.4s, v11.4s\n"
+ "fmla v16.4s, v1.4s, v11.4s\n"
"tbz %x[n_channels], #1, 49f\n"
- "ldr d12, [x20], #0x8\n"
+ "ldr d12, [x19], #0x8\n"
"tbz %x[n_channels], #0, 50f\n"
- "ld1 { v12.s }[2], [x20]\n"
+ "ld1 { v12.s }[2], [x19]\n"
"b 50f\n"
"49:" // Tile loop: Oddments: Load inputs: (2, 5): Bit 1: Unset
- "ldr s12, [x20, #0x0]\n"
+ "ldr s12, [x19, #0x0]\n"
"50:" // Tile loop: Oddments: Load inputs: (2, 5): Bit 1: End
- "fmla v19.4s, v8.4s, v12.4s\n"
- "fmla v23.4s, v5.4s, v12.4s\n"
- "add x20, x12, XZR\n"
- "fmla v27.4s, v2.4s, v12.4s\n"
+ "fmla v28.4s, v8.4s, v12.4s\n"
+ "add x19, x13, XZR\n"
+ "fmla v24.4s, v5.4s, v12.4s\n"
+ "fmla v20.4s, v2.4s, v12.4s\n"
"tbz %x[n_channels], #1, 51f\n"
- "ldr d10, [x20], #0x8\n"
+ "ldr d10, [x19], #0x8\n"
"tbz %x[n_channels], #0, 52f\n"
- "ld1 { v10.s }[2], [x20]\n"
+ "ld1 { v10.s }[2], [x19]\n"
"b 52f\n"
"51:" // Tile loop: Oddments: Load inputs: (3, 0): Bit 1: Unset
- "ldr s10, [x20, #0x0]\n"
+ "ldr s10, [x19, #0x0]\n"
"52:" // Tile loop: Oddments: Load inputs: (3, 0): Bit 1: End
- "fmla v20.4s, v6.4s, v10.4s\n"
- "fmla v24.4s, v3.4s, v10.4s\n"
- "add x20, x9, x17\n"
- "fmla v28.4s, v0.4s, v10.4s\n"
+ "fmla v27.4s, v6.4s, v10.4s\n"
+ "add x19, x12, x10\n"
+ "fmla v23.4s, v3.4s, v10.4s\n"
+ "fmla v19.4s, v0.4s, v10.4s\n"
"tbz %x[n_channels], #1, 53f\n"
- "ldr d11, [x20], #0x8\n"
+ "ldr d11, [x19], #0x8\n"
"tbz %x[n_channels], #0, 54f\n"
- "ld1 { v11.s }[2], [x20]\n"
+ "ld1 { v11.s }[2], [x19]\n"
"b 54f\n"
"53:" // Tile loop: Oddments: Load inputs: (4, 2): Bit 1: Unset
- "ldr s11, [x20, #0x0]\n"
+ "ldr s11, [x19, #0x0]\n"
"54:" // Tile loop: Oddments: Load inputs: (4, 2): Bit 1: End
- "fmla v24.4s, v8.4s, v11.4s\n"
- "fmla v25.4s, v7.4s, v11.4s\n"
- "add x20, x12, x25\n"
- "fmla v26.4s, v6.4s, v11.4s\n"
- "fmla v28.4s, v5.4s, v11.4s\n"
- "fmla v29.4s, v4.4s, v11.4s\n"
- "fmla v30.4s, v3.4s, v11.4s\n"
+ "fmla v23.4s, v8.4s, v11.4s\n"
+ "add x19, x13, x27\n"
+ "fmla v22.4s, v7.4s, v11.4s\n"
+ "fmla v21.4s, v6.4s, v11.4s\n"
+ "fmla v19.4s, v5.4s, v11.4s\n"
+ "fmla v18.4s, v4.4s, v11.4s\n"
+ "fmla v17.4s, v3.4s, v11.4s\n"
"tbz %x[n_channels], #1, 55f\n"
- "ldr d12, [x20], #0x8\n"
+ "ldr d12, [x19], #0x8\n"
"tbz %x[n_channels], #0, 56f\n"
- "ld1 { v12.s }[2], [x20]\n"
+ "ld1 { v12.s }[2], [x19]\n"
"b 56f\n"
"55:" // Tile loop: Oddments: Load inputs: (3, 5): Bit 1: Unset
- "ldr s12, [x20, #0x0]\n"
+ "ldr s12, [x19, #0x0]\n"
"56:" // Tile loop: Oddments: Load inputs: (3, 5): Bit 1: End
- "fmla v23.4s, v8.4s, v12.4s\n"
- "fmla v27.4s, v5.4s, v12.4s\n"
- "add x20, x26, x17\n"
- "fmla v31.4s, v2.4s, v12.4s\n"
+ "fmla v24.4s, v8.4s, v12.4s\n"
+ "add x19, x11, x10\n"
+ "fmla v20.4s, v5.4s, v12.4s\n"
+ "fmla v16.4s, v2.4s, v12.4s\n"
"tbz %x[n_channels], #1, 57f\n"
- "ldr d10, [x20], #0x8\n"
+ "ldr d10, [x19], #0x8\n"
"tbz %x[n_channels], #0, 58f\n"
- "ld1 { v10.s }[2], [x20]\n"
+ "ld1 { v10.s }[2], [x19]\n"
"b 58f\n"
"57:" // Tile loop: Oddments: Load inputs: (5, 2): Bit 1: Unset
- "ldr s10, [x20, #0x0]\n"
+ "ldr s10, [x19, #0x0]\n"
"58:" // Tile loop: Oddments: Load inputs: (5, 2): Bit 1: End
- "fmla v28.4s, v8.4s, v10.4s\n"
- "fmla v29.4s, v7.4s, v10.4s\n"
- "add x20, x9, x11\n"
- "fmla v30.4s, v6.4s, v10.4s\n"
+ "fmla v19.4s, v8.4s, v10.4s\n"
+ "add x19, x12, x9\n"
+ "fmla v18.4s, v7.4s, v10.4s\n"
+ "fmla v17.4s, v6.4s, v10.4s\n"
"tbz %x[n_channels], #1, 59f\n"
- "ldr d11, [x20], #0x8\n"
+ "ldr d11, [x19], #0x8\n"
"tbz %x[n_channels], #0, 60f\n"
- "ld1 { v11.s }[2], [x20]\n"
+ "ld1 { v11.s }[2], [x19]\n"
"b 60f\n"
"59:" // Tile loop: Oddments: Load inputs: (4, 3): Bit 1: Unset
- "ldr s11, [x20, #0x0]\n"
+ "ldr s11, [x19, #0x0]\n"
"60:" // Tile loop: Oddments: Load inputs: (4, 3): Bit 1: End
- "fmla v25.4s, v8.4s, v11.4s\n"
- "fmla v26.4s, v7.4s, v11.4s\n"
- "add x20, x26, x11\n"
- "fmla v27.4s, v6.4s, v11.4s\n"
- "fmla v29.4s, v5.4s, v11.4s\n"
- "fmla v30.4s, v4.4s, v11.4s\n"
- "fmla v31.4s, v3.4s, v11.4s\n"
+ "fmla v22.4s, v8.4s, v11.4s\n"
+ "add x19, x11, x9\n"
+ "fmla v21.4s, v7.4s, v11.4s\n"
+ "fmla v20.4s, v6.4s, v11.4s\n"
+ "fmla v18.4s, v5.4s, v11.4s\n"
+ "fmla v17.4s, v4.4s, v11.4s\n"
+ "fmla v16.4s, v3.4s, v11.4s\n"
"tbz %x[n_channels], #1, 61f\n"
- "ldr d12, [x20], #0x8\n"
+ "ldr d12, [x19], #0x8\n"
"tbz %x[n_channels], #0, 62f\n"
- "ld1 { v12.s }[2], [x20]\n"
+ "ld1 { v12.s }[2], [x19]\n"
"b 62f\n"
"61:" // Tile loop: Oddments: Load inputs: (5, 3): Bit 1: Unset
- "ldr s12, [x20, #0x0]\n"
+ "ldr s12, [x19, #0x0]\n"
"62:" // Tile loop: Oddments: Load inputs: (5, 3): Bit 1: End
- "fmla v29.4s, v8.4s, v12.4s\n"
- "fmla v30.4s, v7.4s, v12.4s\n"
- "add x20, x15, x4\n"
- "fmla v31.4s, v6.4s, v12.4s\n"
+ "fmla v18.4s, v8.4s, v12.4s\n"
+ "add x19, x15, x6\n"
+ "fmla v17.4s, v7.4s, v12.4s\n"
+ "fmla v16.4s, v6.4s, v12.4s\n"
"tbz %x[n_channels], #1, 63f\n"
- "ldr d10, [x20], #0x8\n"
+ "ldr d10, [x19], #0x8\n"
"tbz %x[n_channels], #0, 64f\n"
- "ld1 { v10.s }[2], [x20]\n"
+ "ld1 { v10.s }[2], [x19]\n"
"b 64f\n"
"63:" // Tile loop: Oddments: Load inputs: (1, 1): Bit 1: Unset
- "ldr s10, [x20, #0x0]\n"
+ "ldr s10, [x19, #0x0]\n"
"64:" // Tile loop: Oddments: Load inputs: (1, 1): Bit 1: End
- "fmla v16.4s, v4.4s, v10.4s\n"
- "fmla v17.4s, v3.4s, v10.4s\n"
- "add x20, x15, x28\n"
- "fmla v20.4s, v1.4s, v10.4s\n"
- "fmla v21.4s, v0.4s, v10.4s\n"
+ "fmla v31.4s, v4.4s, v10.4s\n"
+ "add x19, x15, x28\n"
+ "fmla v30.4s, v3.4s, v10.4s\n"
+ "fmla v27.4s, v1.4s, v10.4s\n"
+ "fmla v26.4s, v0.4s, v10.4s\n"
"tbz %x[n_channels], #1, 65f\n"
- "ldr d11, [x20], #0x8\n"
+ "ldr d11, [x19], #0x8\n"
"tbz %x[n_channels], #0, 66f\n"
- "ld1 { v11.s }[2], [x20]\n"
+ "ld1 { v11.s }[2], [x19]\n"
"b 66f\n"
"65:" // Tile loop: Oddments: Load inputs: (1, 4): Bit 1: Unset
- "ldr s11, [x20, #0x0]\n"
+ "ldr s11, [x19, #0x0]\n"
"66:" // Tile loop: Oddments: Load inputs: (1, 4): Bit 1: End
- "fmla v18.4s, v5.4s, v11.4s\n"
- "fmla v19.4s, v4.4s, v11.4s\n"
- "add x20, x9, x4\n"
- "fmla v22.4s, v2.4s, v11.4s\n"
- "fmla v23.4s, v1.4s, v11.4s\n"
+ "fmla v29.4s, v5.4s, v11.4s\n"
+ "add x19, x12, x6\n"
+ "fmla v28.4s, v4.4s, v11.4s\n"
+ "fmla v25.4s, v2.4s, v11.4s\n"
+ "fmla v24.4s, v1.4s, v11.4s\n"
"tbz %x[n_channels], #1, 67f\n"
- "ldr d12, [x20], #0x8\n"
+ "ldr d12, [x19], #0x8\n"
"tbz %x[n_channels], #0, 68f\n"
- "ld1 { v12.s }[2], [x20]\n"
+ "ld1 { v12.s }[2], [x19]\n"
"b 68f\n"
"67:" // Tile loop: Oddments: Load inputs: (4, 1): Bit 1: Unset
- "ldr s12, [x20, #0x0]\n"
+ "ldr s12, [x19, #0x0]\n"
"68:" // Tile loop: Oddments: Load inputs: (4, 1): Bit 1: End
- "fmla v24.4s, v7.4s, v12.4s\n"
- "fmla v25.4s, v6.4s, v12.4s\n"
- "add x20, x9, x28\n"
- "fmla v28.4s, v4.4s, v12.4s\n"
- "fmla v29.4s, v3.4s, v12.4s\n"
+ "fmla v23.4s, v7.4s, v12.4s\n"
+ "add x19, x12, x28\n"
+ "fmla v22.4s, v6.4s, v12.4s\n"
+ "fmla v19.4s, v4.4s, v12.4s\n"
+ "fmla v18.4s, v3.4s, v12.4s\n"
"tbz %x[n_channels], #1, 69f\n"
- "ldr d10, [x20], #0x8\n"
+ "ldr d10, [x19], #0x8\n"
"tbz %x[n_channels], #0, 70f\n"
- "ld1 { v10.s }[2], [x20]\n"
+ "ld1 { v10.s }[2], [x19]\n"
"b 70f\n"
"69:" // Tile loop: Oddments: Load inputs: (4, 4): Bit 1: Unset
- "ldr s10, [x20, #0x0]\n"
+ "ldr s10, [x19, #0x0]\n"
"70:" // Tile loop: Oddments: Load inputs: (4, 4): Bit 1: End
- "fmla v26.4s, v8.4s, v10.4s\n"
- "fmla v27.4s, v7.4s, v10.4s\n"
- "fmax v16.4s, v16.4s, v15.4s\n"
- "fmla v30.4s, v5.4s, v10.4s\n"
- "fmla v31.4s, v4.4s, v10.4s\n"
- "fmax v17.4s, v17.4s, v15.4s\n"
- "fmax v18.4s, v18.4s, v15.4s\n"
- "fmax v19.4s, v19.4s, v15.4s\n"
- "fmax v20.4s, v20.4s, v15.4s\n"
- "fmax v21.4s, v21.4s, v15.4s\n"
- "fmax v22.4s, v22.4s, v15.4s\n"
- "fmax v23.4s, v23.4s, v15.4s\n"
- "fmax v24.4s, v24.4s, v15.4s\n"
- "fmax v25.4s, v25.4s, v15.4s\n"
- "fmax v26.4s, v26.4s, v15.4s\n"
- "fmax v27.4s, v27.4s, v15.4s\n"
- "fmax v28.4s, v28.4s, v15.4s\n"
- "fmax v29.4s, v29.4s, v15.4s\n"
- "fmax v30.4s, v30.4s, v15.4s\n"
+ "fmla v21.4s, v8.4s, v10.4s\n"
+ "fmla v20.4s, v7.4s, v10.4s\n"
+ "fmla v17.4s, v5.4s, v10.4s\n"
+ "fmla v16.4s, v4.4s, v10.4s\n"
"fmax v31.4s, v31.4s, v15.4s\n"
- "fmin v16.4s, v16.4s, v14.4s\n"
- "fmin v17.4s, v17.4s, v14.4s\n"
- "fmin v18.4s, v18.4s, v14.4s\n"
- "fmin v19.4s, v19.4s, v14.4s\n"
- "fmin v20.4s, v20.4s, v14.4s\n"
- "fmin v21.4s, v21.4s, v14.4s\n"
- "fmin v22.4s, v22.4s, v14.4s\n"
- "fmin v23.4s, v23.4s, v14.4s\n"
- "fmin v24.4s, v24.4s, v14.4s\n"
- "fmin v25.4s, v25.4s, v14.4s\n"
- "fmin v26.4s, v26.4s, v14.4s\n"
- "fmin v27.4s, v27.4s, v14.4s\n"
- "fmin v28.4s, v28.4s, v14.4s\n"
- "fmin v29.4s, v29.4s, v14.4s\n"
- "fmin v30.4s, v30.4s, v14.4s\n"
+ "fmax v30.4s, v30.4s, v15.4s\n"
+ "fmax v29.4s, v29.4s, v15.4s\n"
"fmin v31.4s, v31.4s, v14.4s\n"
+ "fmin v30.4s, v30.4s, v14.4s\n"
+ "fmin v29.4s, v29.4s, v14.4s\n"
+ "fmax v28.4s, v28.4s, v15.4s\n"
+ "fmax v27.4s, v27.4s, v15.4s\n"
+ "fmax v26.4s, v26.4s, v15.4s\n"
+ "fmin v28.4s, v28.4s, v14.4s\n"
+ "fmin v27.4s, v27.4s, v14.4s\n"
+ "fmin v26.4s, v26.4s, v14.4s\n"
+ "fmax v25.4s, v25.4s, v15.4s\n"
+ "fmax v24.4s, v24.4s, v15.4s\n"
+ "fmax v23.4s, v23.4s, v15.4s\n"
+ "fmin v25.4s, v25.4s, v14.4s\n"
+ "fmin v24.4s, v24.4s, v14.4s\n"
+ "fmin v23.4s, v23.4s, v14.4s\n"
+ "fmax v22.4s, v22.4s, v15.4s\n"
+ "fmax v21.4s, v21.4s, v15.4s\n"
+ "fmax v20.4s, v20.4s, v15.4s\n"
+ "fmin v22.4s, v22.4s, v14.4s\n"
+ "fmin v21.4s, v21.4s, v14.4s\n"
+ "fmin v20.4s, v20.4s, v14.4s\n"
+ "fmax v19.4s, v19.4s, v15.4s\n"
+ "fmax v18.4s, v18.4s, v15.4s\n"
+ "fmax v17.4s, v17.4s, v15.4s\n"
+ "fmin v19.4s, v19.4s, v14.4s\n"
+ "fmin v18.4s, v18.4s, v14.4s\n"
+ "fmin v17.4s, v17.4s, v14.4s\n"
+ "fmax v16.4s, v16.4s, v15.4s\n"
+ "fmin v16.4s, v16.4s, v14.4s\n"
"tbz %x[n_channels], #1, 71f\n"
- "mov x23, x8\n"
- "mov x22, x10\n"
- "st1 { v16.d }[0], [x23], x5\n"
- "mov x21, x27\n"
- "mov x20, x24\n"
- "st1 { v20.d }[0], [x22], x5\n"
- "st1 { v24.d }[0], [x21], x5\n"
- "add x8, x8, #0x8\n"
- "add x10, x10, #0x8\n"
- "st1 { v28.d }[0], [x20], x5\n"
- "add x27, x27, #0x8\n"
+ "mov x19, x16\n"
+ "st1 { v31.d }[0], [x19], x17\n"
+ "add x16, x16, #0x8\n"
+ "st1 { v30.d }[0], [x19], x17\n"
+ "mov x21, x26\n"
+ "st1 { v29.d }[0], [x19], x17\n"
+ "st1 { v27.d }[0], [x21], x17\n"
+ "add x26, x26, #0x8\n"
+ "st1 { v28.d }[0], [x19]\n"
+ "mov x20, x25\n"
+ "st1 { v26.d }[0], [x21], x17\n"
+ "add x25, x25, #0x8\n"
+ "st1 { v25.d }[0], [x21], x17\n"
+ "mov x19, x24\n"
+ "st1 { v24.d }[0], [x21]\n"
"add x24, x24, #0x8\n"
- "st1 { v17.d }[0], [x23], x5\n"
- "st1 { v21.d }[0], [x22], x5\n"
- "st1 { v25.d }[0], [x21], x5\n"
- "st1 { v29.d }[0], [x20], x5\n"
- "st1 { v18.d }[0], [x23], x5\n"
- "st1 { v22.d }[0], [x22], x5\n"
- "st1 { v26.d }[0], [x21], x5\n"
- "st1 { v30.d }[0], [x20], x5\n"
- "st1 { v19.d }[0], [x23]\n"
- "st1 { v23.d }[0], [x22]\n"
- "st1 { v27.d }[0], [x21]\n"
- "st1 { v31.d }[0], [x20]\n"
+ "st1 { v23.d }[0], [x20], x17\n"
+ "st1 { v22.d }[0], [x20], x17\n"
+ "st1 { v21.d }[0], [x20], x17\n"
+ "st1 { v20.d }[0], [x20]\n"
+ "st1 { v19.d }[0], [x19], x17\n"
+ "st1 { v18.d }[0], [x19], x17\n"
+ "st1 { v17.d }[0], [x19], x17\n"
+ "st1 { v16.d }[0], [x19]\n"
"tbz %x[n_channels], #0, 72f\n"
- "mov x23, x8\n"
- "mov x22, x10\n"
- "st1 { v16.s }[2], [x23], x5\n"
- "mov x21, x27\n"
- "mov x20, x24\n"
- "st1 { v20.s }[2], [x22], x5\n"
- "st1 { v24.s }[2], [x21], x5\n"
- "st1 { v28.s }[2], [x20], x5\n"
- "st1 { v17.s }[2], [x23], x5\n"
- "st1 { v21.s }[2], [x22], x5\n"
- "st1 { v25.s }[2], [x21], x5\n"
- "st1 { v29.s }[2], [x20], x5\n"
- "st1 { v18.s }[2], [x23], x5\n"
- "st1 { v22.s }[2], [x22], x5\n"
- "st1 { v26.s }[2], [x21], x5\n"
- "st1 { v30.s }[2], [x20], x5\n"
- "st1 { v19.s }[2], [x23]\n"
- "st1 { v23.s }[2], [x22]\n"
- "st1 { v27.s }[2], [x21]\n"
- "st1 { v31.s }[2], [x20]\n"
+ "mov x22, x16\n"
+ "st1 { v31.s }[2], [x22], x17\n"
+ "mov x21, x26\n"
+ "st1 { v30.s }[2], [x22], x17\n"
+ "st1 { v27.s }[2], [x21], x17\n"
+ "mov x20, x25\n"
+ "st1 { v29.s }[2], [x22], x17\n"
+ "mov x19, x24\n"
+ "st1 { v28.s }[2], [x22]\n"
+ "st1 { v26.s }[2], [x21], x17\n"
+ "st1 { v25.s }[2], [x21], x17\n"
+ "st1 { v24.s }[2], [x21]\n"
+ "st1 { v23.s }[2], [x20], x17\n"
+ "st1 { v22.s }[2], [x20], x17\n"
+ "st1 { v21.s }[2], [x20], x17\n"
+ "st1 { v20.s }[2], [x20]\n"
+ "st1 { v19.s }[2], [x19], x17\n"
+ "st1 { v18.s }[2], [x19], x17\n"
+ "st1 { v17.s }[2], [x19], x17\n"
+ "st1 { v16.s }[2], [x19]\n"
"b 72f\n"
"71:" // Tile loop: Oddments: Store: Bit 1: Unset
- "mov x23, x8\n"
- "mov x22, x10\n"
- "st1 { v16.s }[0], [x23], x5\n"
- "mov x21, x27\n"
- "mov x20, x24\n"
- "st1 { v20.s }[0], [x22], x5\n"
- "st1 { v24.s }[0], [x21], x5\n"
- "st1 { v28.s }[0], [x20], x5\n"
- "st1 { v17.s }[0], [x23], x5\n"
- "st1 { v21.s }[0], [x22], x5\n"
- "st1 { v25.s }[0], [x21], x5\n"
- "st1 { v29.s }[0], [x20], x5\n"
- "st1 { v18.s }[0], [x23], x5\n"
- "st1 { v22.s }[0], [x22], x5\n"
- "st1 { v26.s }[0], [x21], x5\n"
- "st1 { v30.s }[0], [x20], x5\n"
- "st1 { v19.s }[0], [x23]\n"
- "st1 { v23.s }[0], [x22]\n"
- "st1 { v27.s }[0], [x21]\n"
- "st1 { v31.s }[0], [x20]\n"
+ "mov x22, x16\n"
+ "st1 { v31.s }[0], [x22], x17\n"
+ "mov x21, x26\n"
+ "mov x20, x25\n"
+ "st1 { v30.s }[0], [x22], x17\n"
+ "st1 { v27.s }[0], [x21], x17\n"
+ "mov x19, x24\n"
+ "st1 { v29.s }[0], [x22], x17\n"
+ "st1 { v28.s }[0], [x22]\n"
+ "st1 { v26.s }[0], [x21], x17\n"
+ "st1 { v25.s }[0], [x21], x17\n"
+ "st1 { v24.s }[0], [x21]\n"
+ "st1 { v23.s }[0], [x20], x17\n"
+ "st1 { v22.s }[0], [x20], x17\n"
+ "st1 { v21.s }[0], [x20], x17\n"
+ "st1 { v20.s }[0], [x20]\n"
+ "st1 { v19.s }[0], [x19], x17\n"
+ "st1 { v18.s }[0], [x19], x17\n"
+ "st1 { v17.s }[0], [x19], x17\n"
+ "st1 { v16.s }[0], [x19]\n"
"72:" // Tile loop: Oddments: Store: Bit 1: End
+
"73:" // Tile loop: End
+ "ldr x4, [%x[params_struct], %[offsetof_args_tile_i]]\n"
+ "add x21, x4, #0x1\n"
"ldr x26, [%x[params_struct], %[offsetof_args_tile_j]]\n"
- "ldr x27, [%x[params_struct], %[offsetof_args_tile_i]]\n"
- "add x26, x26, #0x1\n"
- "add x21, x27, #0x1\n"
- "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
- "cmp x26, x20\n"
"ldr x20, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
- "csel x27, x27, x21, LT\n"
+ "add x26, x26, #0x1\n"
+ "ldr x19, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
+ "cmp x26, x19\n"
"csel x26, x26, XZR, LT\n"
- "cmp x27, x20\n"
+ "csel x4, x4, x21, LT\n"
+ "cmp x4, x20\n"
"blt 1b\n"
:
: [n_channels] "r" ((unsigned long) n_channels), [offsetof_args_inptr] "I" (offsetof(Args, inptr)), [offsetof_args_ld_input_col] "I" (offsetof(Args, ld_input_col)), [offsetof_args_ld_input_row] "I" (offsetof(Args, ld_input_row)), [offsetof_args_ld_output_col] "I" (offsetof(Args, ld_output_col)), [offsetof_args_ld_output_row] "I" (offsetof(Args, ld_output_row)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_n_tile_cols] "I" (offsetof(Args, n_tile_cols)), [offsetof_args_n_tile_rows] "I" (offsetof(Args, n_tile_rows)), [offsetof_args_outptr] "I" (offsetof(Args, outptr)), [offsetof_args_params] "I" (offsetof(Args, params)), [offsetof_args_tile_i] "I" (offsetof(Args, tile_i)), [offsetof_args_tile_j] "I" (offsetof(Args, tile_j)), [params_struct] "r" (&params_struct)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_indirect.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_indirect.cpp
index 2353045021..aeaf1049f1 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_indirect.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_indirect.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -98,21 +98,21 @@ void a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_indirect_impl(
activation_min, activation_max);
__asm__ __volatile__(
- "mov x8, #0x10\n" // cntb _, ALL, #1
- "lsr x17, %x[n_channels], #0x2\n"
- "ldr x16, [%x[params_struct], %[offsetof_args_outptrs]]\n"
+ "ldr x17, [%x[params_struct], %[offsetof_args_outptrs]]\n"
+ "add x16, %x[params_struct], %[offsetof_Args_inptrs]\n"
"ldr x15, [%x[params_struct], %[offsetof_args_params]]\n"
"add x20, %x[params_struct], %[offsetof_args_min]\n"
+ "add x19, %x[params_struct], %[offsetof_args_max]\n"
"ld1r { v15.4s }, [x20]\n"
- "add x20, %x[params_struct], %[offsetof_args_max]\n"
- "ld1r { v14.4s }, [x20]\n"
- "add x14, %x[params_struct], %[offsetof_Args_inptrs]\n"
- "mov x13, #0x0\n"
- "sub x12, XZR, x8\n"
- "cbz x17, 3f\n"
+ "ld1r { v14.4s }, [x19]\n"
+ "mov x14, #0x0\n"
+ "mov x13, #0x10\n" // cntb _, ALL, #1
+ "sub x12, XZR, x13\n"
+ "lsr x11, %x[n_channels], #0x2\n"
+ "cbz x11, 3f\n"
"ldr q13, [x15, #0x0]\n"
"ldr q0, [x15, #0x10]\n"
- "cmp x8, x17, LSL #4\n"
+ "cmp x13, x11, LSL #4\n"
"ldr q1, [x15, #0x20]\n"
"ldr q2, [x15, #0x30]\n"
"ldr q3, [x15, #0x40]\n"
@@ -122,589 +122,589 @@ void a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_indirect_impl(
"ldr q7, [x15, #0x80]\n"
"ldr q8, [x15, #0x90]\n"
"add x15, x15, #0xa0\n"
- "ldp x11, x10, [x14, #0x0]\n"
- "ldr q9, [x11, x13]\n"
- "ldr q10, [x10, x13]\n"
- "ldp x9, x28, [x14, #0x10]\n"
- "ldr q11, [x9, x13]\n"
- "ldr q12, [x28, x13]\n"
+ "ldp x10, x9, [x16, #0x0]\n"
+ "ldp x28, x27, [x16, #0x10]\n"
+ "ldr q9, [x10, x14]\n"
+ "ldr q10, [x9, x14]\n"
+ "ldr q11, [x28, x14]\n"
+ "ldr q12, [x27, x14]\n"
"bge 2f\n"
"1:" // Channel loop
- "mov v21.16b, v13.16b\n fmla v21.4s, v4.4s, v9.4s\n"
- "mov v16.16b, v13.16b\n fmla v16.4s, v8.4s, v9.4s\n"
- "ldr x27, [x14, #0x20]\n"
- "ldr x26, [x14, #0x30]\n"
- "mov v22.16b, v13.16b\n fmla v22.4s, v3.4s, v9.4s\n"
- "mov v25.16b, v13.16b\n fmla v25.4s, v1.4s, v9.4s\n"
- "ldr x25, [x14, #0x28]\n"
- "ldr x24, [x14, #0x38]\n"
- "mov v26.16b, v13.16b\n fmla v26.4s, v0.4s, v9.4s\n"
- "mov v17.16b, v13.16b\n fmla v17.4s, v7.4s, v9.4s\n"
- "ldr x11, [x14, #0x40]\n"
- "ldr x10, [x14, #0x48]\n"
- "mov v18.16b, v13.16b\n fmla v18.4s, v6.4s, v9.4s\n"
- "fmla v21.4s, v5.4s, v12.4s\n"
- "ldr x9, [x14, #0x50]\n"
- "ldr x28, [x14, #0x58]\n"
- "mov v20.16b, v13.16b\n fmla v20.4s, v5.4s, v9.4s\n"
- "mov v24.16b, v13.16b\n fmla v24.4s, v2.4s, v9.4s\n"
- "ldr q9, [x26, x13]\n"
- "ldr x26, [x14, #0x70]\n"
- "fmla v16.4s, v0.4s, v10.4s\n"
- "ldr q10, [x27, x13]\n"
- "mov v19.16b, v13.16b\n fmla v19.4s, v2.4s, v11.4s\n"
- "ldr q11, [x25, x13]\n"
- "fmla v22.4s, v4.4s, v12.4s\n"
- "fmla v25.4s, v2.4s, v12.4s\n"
- "ldr x27, [x14, #0x60]\n"
- "ldr x25, [x14, #0x68]\n"
- "fmla v26.4s, v1.4s, v12.4s\n"
- "fmla v17.4s, v8.4s, v12.4s\n"
- "ldr x23, [x16, #0x0]\n"
- "ldr x22, [x16, #0x8]\n"
- "fmla v18.4s, v7.4s, v12.4s\n"
- "mov v28.16b, v13.16b\n fmla v28.4s, v6.4s, v10.4s\n"
- "ldr q10, [x10, x13]\n"
- "ldr x10, [x14, #0x88]\n"
- "fmla v21.4s, v7.4s, v9.4s\n"
- "fmla v19.4s, v6.4s, v12.4s\n"
- "ldr x21, [x16, #0x10]\n"
- "ldr x20, [x16, #0x18]\n"
- "mov v23.16b, v13.16b\n fmla v23.4s, v3.4s, v12.4s\n"
- "mov v27.16b, v13.16b\n fmla v27.4s, v0.4s, v12.4s\n"
- "ldr q12, [x24, x13]\n"
- "ldr x24, [x14, #0x78]\n"
- "mov v31.16b, v13.16b\n fmla v31.4s, v8.4s, v11.4s\n"
- "ldr q11, [x11, x13]\n"
- "fmla v22.4s, v6.4s, v9.4s\n"
- "ldr x11, [x14, #0x80]\n"
- "fmla v25.4s, v4.4s, v9.4s\n"
- "fmla v26.4s, v3.4s, v9.4s\n"
+ "mov v31.16b, v13.16b\n fmla v31.4s, v8.4s, v9.4s\n"
+ "ldr x26, [x16, #0x20]\n"
"add x12, x12, #0x10\n"
- "fmla v20.4s, v8.4s, v9.4s\n"
- "fmla v24.4s, v5.4s, v9.4s\n"
- "fmla v28.4s, v2.4s, v9.4s\n"
- "fmla v16.4s, v1.4s, v12.4s\n"
- "fmla v17.4s, v0.4s, v12.4s\n"
- "ldr q12, [x28, x13]\n"
- "fmla v18.4s, v2.4s, v11.4s\n"
- "ldr x28, [x14, #0x98]\n"
- "fmla v21.4s, v8.4s, v10.4s\n"
- "fmla v19.4s, v1.4s, v11.4s\n"
- "ldr q11, [x27, x13]\n"
- "ldr x27, [x14, #0xa0]\n"
- "fmla v22.4s, v7.4s, v10.4s\n"
- "fmla v23.4s, v6.4s, v10.4s\n"
- "fmla v25.4s, v5.4s, v10.4s\n"
- "fmla v26.4s, v4.4s, v10.4s\n"
- "fmla v27.4s, v3.4s, v10.4s\n"
+ "mov v30.16b, v13.16b\n fmla v30.4s, v7.4s, v9.4s\n"
+ "ldr x25, [x16, #0x28]\n"
+ "mov v29.16b, v13.16b\n fmla v29.4s, v6.4s, v9.4s\n"
+ "ldr x24, [x16, #0x30]\n"
+ "mov v27.16b, v13.16b\n fmla v27.4s, v5.4s, v9.4s\n"
+ "ldr x23, [x16, #0x38]\n"
+ "mov v26.16b, v13.16b\n fmla v26.4s, v4.4s, v9.4s\n"
+ "ldr x10, [x16, #0x40]\n"
+ "mov v25.16b, v13.16b\n fmla v25.4s, v3.4s, v9.4s\n"
+ "ldr x9, [x16, #0x48]\n"
+ "mov v23.16b, v13.16b\n fmla v23.4s, v2.4s, v9.4s\n"
+ "ldr x28, [x16, #0x50]\n"
+ "mov v22.16b, v13.16b\n fmla v22.4s, v1.4s, v9.4s\n"
+ "ldr x27, [x16, #0x58]\n"
+ "mov v21.16b, v13.16b\n fmla v21.4s, v0.4s, v9.4s\n"
+ "ldr q9, [x24, x14]\n"
"fmla v31.4s, v0.4s, v10.4s\n"
- "fmla v24.4s, v6.4s, v11.4s\n"
- "fmla v28.4s, v3.4s, v11.4s\n"
- "ldr q11, [x26, x13]\n"
- "ldr x26, [x14, #0xb0]\n"
- "fmla v19.4s, v5.4s, v12.4s\n"
- "fmla v23.4s, v2.4s, v12.4s\n"
- "ldr q12, [x24, x13]\n"
- "ldr x24, [x14, #0xb8]\n"
- "fmla v27.4s, v8.4s, v11.4s\n"
- "fmla v31.4s, v5.4s, v11.4s\n"
- "mov v29.16b, v13.16b\n fmla v29.4s, v1.4s, v9.4s\n"
- "mov v30.16b, v13.16b\n fmla v30.4s, v0.4s, v9.4s\n"
- "ldr q9, [x9, x13]\n"
- "ldr x9, [x14, #0x90]\n"
- "fmla v29.4s, v2.4s, v10.4s\n"
- "fmla v30.4s, v1.4s, v10.4s\n"
- "ldr q10, [x25, x13]\n"
- "ldr x25, [x14, #0xa8]\n"
- "fmla v16.4s, v3.4s, v9.4s\n"
- "fmla v20.4s, v0.4s, v9.4s\n"
- "ldr q11, [x11, x13]\n"
- "ldr x11, [x14, #0xc0]\n"
- "fmla v17.4s, v4.4s, v10.4s\n"
- "fmla v18.4s, v3.4s, v10.4s\n"
- "fmla v21.4s, v1.4s, v10.4s\n"
- "fmla v22.4s, v0.4s, v10.4s\n"
- "fmla v16.4s, v5.4s, v10.4s\n"
- "fmla v20.4s, v2.4s, v10.4s\n"
- "ldr q10, [x10, x13]\n"
- "ldr x10, [x14, #0xc8]\n"
- "fmla v17.4s, v5.4s, v12.4s\n"
- "fmla v18.4s, v4.4s, v12.4s\n"
- "fmla v21.4s, v2.4s, v12.4s\n"
- "fmla v19.4s, v3.4s, v12.4s\n"
- "fmla v22.4s, v1.4s, v12.4s\n"
- "fmla v23.4s, v0.4s, v12.4s\n"
- "ldr q12, [x28, x13]\n"
- "ldr x28, [x14, #0xd8]\n"
- "fmla v28.4s, v7.4s, v11.4s\n"
- "fmla v29.4s, v6.4s, v11.4s\n"
- "ldr q11, [x9, x13]\n"
- "ldr x9, [x14, #0xd0]\n"
- "fmla v16.4s, v7.4s, v10.4s\n"
- "fmla v17.4s, v6.4s, v10.4s\n"
- "fmla v20.4s, v4.4s, v10.4s\n"
- "fmla v21.4s, v3.4s, v10.4s\n"
- "fmla v24.4s, v1.4s, v10.4s\n"
- "fmla v25.4s, v0.4s, v10.4s\n"
- "ldr q10, [x27, x13]\n"
- "ldr x27, [x14, #0xe0]\n"
- "fmla v18.4s, v8.4s, v12.4s\n"
- "fmla v30.4s, v8.4s, v11.4s\n"
- "fmla v31.4s, v7.4s, v11.4s\n"
- "ldr q11, [x25, x13]\n"
- "fmla v27.4s, v1.4s, v12.4s\n"
- "ldr x25, [x14, #0xe8]\n"
- "fmla v19.4s, v7.4s, v12.4s\n"
- "fmla v22.4s, v5.4s, v12.4s\n"
- "fmla v23.4s, v4.4s, v12.4s\n"
- "fmla v26.4s, v2.4s, v12.4s\n"
- "ldr q12, [x26, x13]\n"
- "ldr x26, [x14, #0xf0]\n"
- "fmla v16.4s, v2.4s, v10.4s\n"
- "fmla v17.4s, v1.4s, v10.4s\n"
- "fmla v18.4s, v0.4s, v10.4s\n"
- "ldr q10, [x24, x13]\n"
- "fmla v20.4s, v7.4s, v11.4s\n"
- "ldr x24, [x14, #0xf8]\n"
- "fmla v21.4s, v6.4s, v11.4s\n"
- "fmla v24.4s, v4.4s, v11.4s\n"
- "fmla v25.4s, v3.4s, v11.4s\n"
+ "ldr q10, [x26, x14]\n"
+ "mov v28.16b, v13.16b\n fmla v28.4s, v2.4s, v11.4s\n"
+ "ldr q11, [x25, x14]\n"
+ "fmla v30.4s, v8.4s, v12.4s\n"
+ "ldr x26, [x16, #0x60]\n"
+ "fmla v29.4s, v7.4s, v12.4s\n"
+ "ldr x25, [x16, #0x68]\n"
+ "fmla v26.4s, v5.4s, v12.4s\n"
+ "ldr x24, [x16, #0x70]\n"
+ "fmla v28.4s, v6.4s, v12.4s\n"
+ "ldr x22, [x17, #0x0]\n"
+ "fmla v25.4s, v4.4s, v12.4s\n"
+ "ldr x21, [x17, #0x8]\n"
+ "mov v24.16b, v13.16b\n fmla v24.4s, v3.4s, v12.4s\n"
+ "ldr x20, [x17, #0x10]\n"
+ "fmla v22.4s, v2.4s, v12.4s\n"
+ "ldr x19, [x17, #0x18]\n"
+ "fmla v21.4s, v1.4s, v12.4s\n"
+ "mov v20.16b, v13.16b\n fmla v20.4s, v0.4s, v12.4s\n"
+ "ldr q12, [x23, x14]\n"
+ "mov v19.16b, v13.16b\n fmla v19.4s, v6.4s, v10.4s\n"
+ "ldr q10, [x9, x14]\n"
+ "mov v16.16b, v13.16b\n fmla v16.4s, v8.4s, v11.4s\n"
+ "ldr q11, [x10, x14]\n"
+ "fmla v27.4s, v8.4s, v9.4s\n"
+ "ldr x23, [x16, #0x78]\n"
+ "fmla v26.4s, v7.4s, v9.4s\n"
+ "ldr x10, [x16, #0x80]\n"
+ "fmla v25.4s, v6.4s, v9.4s\n"
+ "ldr x9, [x16, #0x88]\n"
+ "fmla v23.4s, v5.4s, v9.4s\n"
+ "fmla v22.4s, v4.4s, v9.4s\n"
+ "fmla v21.4s, v3.4s, v9.4s\n"
+ "fmla v19.4s, v2.4s, v9.4s\n"
+ "mov v18.16b, v13.16b\n fmla v18.4s, v1.4s, v9.4s\n"
+ "mov v17.16b, v13.16b\n fmla v17.4s, v0.4s, v9.4s\n"
+ "ldr q9, [x28, x14]\n"
+ "fmla v31.4s, v1.4s, v12.4s\n"
+ "ldr x28, [x16, #0x90]\n"
+ "fmla v30.4s, v0.4s, v12.4s\n"
+ "ldr q12, [x27, x14]\n"
+ "fmla v29.4s, v2.4s, v11.4s\n"
+ "ldr x27, [x16, #0x98]\n"
"fmla v28.4s, v1.4s, v11.4s\n"
- "fmla v29.4s, v0.4s, v11.4s\n"
- "ldr q11, [x11, x13]\n"
- "fmla v27.4s, v4.4s, v11.4s\n"
- "ldr x11, [x14, #0x100]\n"
- "fmla v30.4s, v2.4s, v11.4s\n"
- "fmla v17.4s, v2.4s, v12.4s\n"
- "fmla v18.4s, v1.4s, v12.4s\n"
- "fmla v19.4s, v0.4s, v12.4s\n"
- "ldr q12, [x10, x13]\n"
- "ldr x10, [x14, #0x108]\n"
- "fmla v16.4s, v6.4s, v10.4s\n"
+ "ldr q11, [x26, x14]\n"
+ "fmla v26.4s, v8.4s, v10.4s\n"
+ "ldr x26, [x16, #0xa0]\n"
+ "fmla v25.4s, v7.4s, v10.4s\n"
+ "ldr q13, [x15, #0x0]\n"
+ "fmla v24.4s, v6.4s, v10.4s\n"
+ "fmla v22.4s, v5.4s, v10.4s\n"
+ "fmla v21.4s, v4.4s, v10.4s\n"
"fmla v20.4s, v3.4s, v10.4s\n"
- "fmla v24.4s, v0.4s, v10.4s\n"
- "ldr q10, [x9, x13]\n"
- "fmla v22.4s, v8.4s, v11.4s\n"
- "ldr x9, [x14, #0x110]\n"
- "fmla v23.4s, v7.4s, v11.4s\n"
- "fmla v26.4s, v5.4s, v11.4s\n"
- "fmla v31.4s, v1.4s, v11.4s\n"
- "ldr q11, [x28, x13]\n"
- "fmla v27.4s, v2.4s, v12.4s\n"
- "ldr x28, [x14, #0x118]\n"
- "fmla v28.4s, v0.4s, v10.4s\n"
- "fmla v29.4s, v4.4s, v11.4s\n"
- "fmla v30.4s, v3.4s, v11.4s\n"
- "fmla v19.4s, v8.4s, v12.4s\n"
- "fmla v23.4s, v5.4s, v12.4s\n"
- "ldr q12, [x27, x13]\n"
- "fmla v20.4s, v6.4s, v10.4s\n"
- "fmla v24.4s, v3.4s, v10.4s\n"
- "ldr q10, [x25, x13]\n"
- "fmla v25.4s, v7.4s, v11.4s\n"
- "fmla v26.4s, v6.4s, v11.4s\n"
- "fmla v28.4s, v5.4s, v11.4s\n"
- "fmla v27.4s, v5.4s, v12.4s\n"
- "fmla v31.4s, v2.4s, v12.4s\n"
- "fmla v29.4s, v7.4s, v10.4s\n"
+ "fmla v18.4s, v2.4s, v10.4s\n"
+ "fmla v17.4s, v1.4s, v10.4s\n"
+ "fmla v16.4s, v0.4s, v10.4s\n"
+ "ldr q10, [x25, x14]\n"
+ "fmla v31.4s, v3.4s, v9.4s\n"
+ "ldr x25, [x16, #0xa8]\n"
+ "fmla v27.4s, v0.4s, v9.4s\n"
+ "fmla v28.4s, v5.4s, v12.4s\n"
+ "fmla v24.4s, v2.4s, v12.4s\n"
+ "ldr q12, [x23, x14]\n"
+ "fmla v23.4s, v6.4s, v11.4s\n"
+ "ldr x23, [x16, #0xb8]\n"
+ "fmla v19.4s, v3.4s, v11.4s\n"
+ "ldr q11, [x24, x14]\n"
+ "fmla v31.4s, v5.4s, v10.4s\n"
+ "ldr x24, [x16, #0xb0]\n"
+ "fmla v30.4s, v4.4s, v10.4s\n"
+ "fmla v29.4s, v3.4s, v10.4s\n"
+ "fmla v27.4s, v2.4s, v10.4s\n"
+ "fmla v26.4s, v1.4s, v10.4s\n"
+ "fmla v25.4s, v0.4s, v10.4s\n"
+ "ldr q10, [x9, x14]\n"
+ "fmla v20.4s, v8.4s, v11.4s\n"
+ "ldr x9, [x16, #0xc8]\n"
+ "fmla v16.4s, v5.4s, v11.4s\n"
+ "ldr q11, [x10, x14]\n"
+ "fmla v30.4s, v5.4s, v12.4s\n"
+ "ldr x10, [x16, #0xc0]\n"
+ "fmla v29.4s, v4.4s, v12.4s\n"
+ "fmla v28.4s, v3.4s, v12.4s\n"
+ "fmla v26.4s, v2.4s, v12.4s\n"
+ "fmla v25.4s, v1.4s, v12.4s\n"
+ "fmla v24.4s, v0.4s, v12.4s\n"
+ "ldr q12, [x27, x14]\n"
+ "fmla v19.4s, v7.4s, v11.4s\n"
+ "ldr x27, [x16, #0xd8]\n"
+ "fmla v18.4s, v6.4s, v11.4s\n"
+ "ldr q11, [x28, x14]\n"
+ "fmla v31.4s, v7.4s, v10.4s\n"
+ "ldr x28, [x16, #0xd0]\n"
"fmla v30.4s, v6.4s, v10.4s\n"
- "fmla v24.4s, v8.4s, v11.4s\n"
- "ldr q11, [x26, x13]\n"
- "fmla v28.4s, v8.4s, v10.4s\n"
- "ldr q10, [x11, x13]\n"
+ "fmla v27.4s, v4.4s, v10.4s\n"
+ "fmla v26.4s, v3.4s, v10.4s\n"
+ "fmla v23.4s, v1.4s, v10.4s\n"
+ "fmla v22.4s, v0.4s, v10.4s\n"
+ "ldr q10, [x26, x14]\n"
+ "fmla v17.4s, v8.4s, v11.4s\n"
+ "ldr x26, [x16, #0xe0]\n"
+ "fmla v16.4s, v7.4s, v11.4s\n"
+ "ldr q11, [x25, x14]\n"
+ "fmla v29.4s, v8.4s, v12.4s\n"
+ "ldr x25, [x16, #0xe8]\n"
+ "fmla v28.4s, v7.4s, v12.4s\n"
+ "fmla v25.4s, v5.4s, v12.4s\n"
+ "fmla v24.4s, v4.4s, v12.4s\n"
+ "fmla v21.4s, v2.4s, v12.4s\n"
+ "fmla v20.4s, v1.4s, v12.4s\n"
+ "ldr q12, [x24, x14]\n"
+ "fmla v31.4s, v2.4s, v10.4s\n"
+ "ldr x24, [x16, #0xf0]\n"
+ "fmla v30.4s, v1.4s, v10.4s\n"
+ "fmla v29.4s, v0.4s, v10.4s\n"
+ "ldr q10, [x23, x14]\n"
+ "fmla v27.4s, v7.4s, v11.4s\n"
+ "ldr x23, [x16, #0xf8]\n"
+ "fmla v26.4s, v6.4s, v11.4s\n"
+ "fmla v23.4s, v4.4s, v11.4s\n"
+ "fmla v22.4s, v3.4s, v11.4s\n"
+ "fmla v19.4s, v1.4s, v11.4s\n"
+ "fmla v18.4s, v0.4s, v11.4s\n"
+ "ldr q11, [x10, x14]\n"
+ "fmla v30.4s, v2.4s, v12.4s\n"
+ "ldr x10, [x16, #0x100]\n"
+ "fmla v29.4s, v1.4s, v12.4s\n"
+ "fmla v28.4s, v0.4s, v12.4s\n"
+ "ldr q12, [x9, x14]\n"
+ "fmla v31.4s, v6.4s, v10.4s\n"
+ "ldr x9, [x16, #0x108]\n"
+ "fmla v27.4s, v3.4s, v10.4s\n"
+ "fmla v23.4s, v0.4s, v10.4s\n"
+ "ldr q10, [x28, x14]\n"
"fmla v25.4s, v8.4s, v11.4s\n"
- "fmla v26.4s, v7.4s, v11.4s\n"
- "fmla v27.4s, v6.4s, v11.4s\n"
- "fmla v29.4s, v5.4s, v11.4s\n"
- "fmla v30.4s, v4.4s, v11.4s\n"
- "fmla v31.4s, v3.4s, v11.4s\n"
- "ldr q11, [x10, x13]\n"
- "ldp x11, x10, [x14, #0x0]\n"
- "fmla v23.4s, v8.4s, v12.4s\n"
- "ldr q12, [x24, x13]\n"
- "fmla v16.4s, v4.4s, v10.4s\n"
- "fmax v16.4s, v16.4s, v15.4s\n"
- "fmla v17.4s, v3.4s, v10.4s\n"
+ "ldr x28, [x16, #0x110]\n"
+ "fmla v24.4s, v7.4s, v11.4s\n"
+ "fmla v21.4s, v5.4s, v11.4s\n"
+ "fmla v20.4s, v4.4s, v11.4s\n"
+ "fmla v17.4s, v2.4s, v11.4s\n"
+ "fmla v16.4s, v1.4s, v11.4s\n"
+ "ldr q11, [x27, x14]\n"
+ "fmla v28.4s, v8.4s, v12.4s\n"
+ "ldr x27, [x16, #0x118]\n"
+ "fmla v24.4s, v5.4s, v12.4s\n"
+ "fmla v20.4s, v2.4s, v12.4s\n"
+ "ldr q12, [x26, x14]\n"
+ "fmla v27.4s, v6.4s, v10.4s\n"
+ "fmla v23.4s, v3.4s, v10.4s\n"
+ "fmla v19.4s, v0.4s, v10.4s\n"
+ "ldr q10, [x25, x14]\n"
+ "fmla v22.4s, v7.4s, v11.4s\n"
+ "fmla v21.4s, v6.4s, v11.4s\n"
+ "fmla v23.4s, v8.4s, v11.4s\n"
+ "fmla v19.4s, v5.4s, v11.4s\n"
+ "fmla v18.4s, v4.4s, v11.4s\n"
+ "fmla v17.4s, v3.4s, v11.4s\n"
+ "ldr q11, [x24, x14]\n"
+ "fmla v24.4s, v8.4s, v12.4s\n"
+ "fmla v20.4s, v5.4s, v12.4s\n"
+ "fmla v16.4s, v2.4s, v12.4s\n"
+ "ldr q12, [x23, x14]\n"
+ "fmla v19.4s, v8.4s, v10.4s\n"
+ "fmla v18.4s, v7.4s, v10.4s\n"
+ "fmla v17.4s, v6.4s, v10.4s\n"
+ "ldr q10, [x10, x14]\n"
+ "fmla v22.4s, v8.4s, v11.4s\n"
+ "fmla v21.4s, v7.4s, v11.4s\n"
+ "fmla v20.4s, v6.4s, v11.4s\n"
"fmla v18.4s, v5.4s, v11.4s\n"
- "fmax v17.4s, v17.4s, v15.4s\n"
- "fmla v19.4s, v4.4s, v11.4s\n"
- "fmla v29.4s, v8.4s, v12.4s\n"
- "fmax v18.4s, v18.4s, v15.4s\n"
- "fmla v30.4s, v7.4s, v12.4s\n"
- "fmla v31.4s, v6.4s, v12.4s\n"
- "ldr q12, [x9, x13]\n"
- "fmax v19.4s, v19.4s, v15.4s\n"
- "fmla v20.4s, v1.4s, v10.4s\n"
- "fmla v21.4s, v0.4s, v10.4s\n"
- "ldr q10, [x28, x13]\n"
- "ldr q9, [x11, x8]\n"
- "fmla v22.4s, v2.4s, v11.4s\n"
- "ldr q13, [x15, #0x0]\n"
- "fmla v23.4s, v1.4s, v11.4s\n"
+ "fmla v17.4s, v4.4s, v11.4s\n"
+ "fmla v16.4s, v3.4s, v11.4s\n"
+ "ldr q11, [x9, x14]\n"
+ "fmla v31.4s, v4.4s, v10.4s\n"
+ "ldp x10, x9, [x16, #0x0]\n"
+ "fmla v18.4s, v8.4s, v12.4s\n"
+ "ldr q9, [x10, x13]\n"
+ "fmla v17.4s, v7.4s, v12.4s\n"
+ "fmla v16.4s, v6.4s, v12.4s\n"
+ "ldr q12, [x28, x14]\n"
+ "fmla v30.4s, v3.4s, v10.4s\n"
+ "fmla v27.4s, v1.4s, v10.4s\n"
+ "fmla v26.4s, v0.4s, v10.4s\n"
+ "ldr q10, [x27, x14]\n"
+ "add x14, x14, #0x10\n"
+ "fmla v29.4s, v5.4s, v11.4s\n"
+ "ldp x28, x27, [x16, #0x10]\n"
+ "fmla v28.4s, v4.4s, v11.4s\n"
"ldr q0, [x15, #0x10]\n"
- "ldr q1, [x15, #0x20]\n"
- "fmla v24.4s, v7.4s, v12.4s\n"
- "fmla v25.4s, v6.4s, v12.4s\n"
+ "fmla v25.4s, v2.4s, v11.4s\n"
"ldr q2, [x15, #0x30]\n"
- "fmla v26.4s, v8.4s, v10.4s\n"
+ "fmla v24.4s, v1.4s, v11.4s\n"
+ "ldr q11, [x28, x13]\n"
+ "fmla v23.4s, v7.4s, v12.4s\n"
+ "ldr q1, [x15, #0x20]\n"
+ "fmla v22.4s, v6.4s, v12.4s\n"
"ldr q6, [x15, #0x70]\n"
- "fmla v27.4s, v7.4s, v10.4s\n"
- "ldr q7, [x15, #0x80]\n"
- "fmin v16.4s, v16.4s, v14.4s\n"
- "fmin v17.4s, v17.4s, v14.4s\n"
- "str q16, [x23, x12]\n"
- "ldr q8, [x15, #0x90]\n"
- "fmin v18.4s, v18.4s, v14.4s\n"
- "fmin v19.4s, v19.4s, v14.4s\n"
- "str q17, [x22, x12]\n"
- "ldr x23, [x16, #0x20]\n"
- "fmax v20.4s, v20.4s, v15.4s\n"
- "fmax v21.4s, v21.4s, v15.4s\n"
- "str q18, [x21, x12]\n"
- "ldr x22, [x16, #0x28]\n"
- "fmax v22.4s, v22.4s, v15.4s\n"
- "fmax v23.4s, v23.4s, v15.4s\n"
- "str q19, [x20, x12]\n"
- "ldr x21, [x16, #0x30]\n"
- "ldr x20, [x16, #0x38]\n"
- "fmla v28.4s, v4.4s, v12.4s\n"
- "fmla v29.4s, v3.4s, v12.4s\n"
+ "fmla v19.4s, v4.4s, v12.4s\n"
+ "fmla v18.4s, v3.4s, v12.4s\n"
+ "ldr q12, [x27, x13]\n"
+ "fmla v21.4s, v8.4s, v10.4s\n"
"ldr q3, [x15, #0x40]\n"
- "fmla v30.4s, v5.4s, v10.4s\n"
+ "fmla v20.4s, v7.4s, v10.4s\n"
+ "ldr q7, [x15, #0x80]\n"
+ "fmla v17.4s, v5.4s, v10.4s\n"
"ldr q5, [x15, #0x60]\n"
- "fmla v31.4s, v4.4s, v10.4s\n"
- "ldr q10, [x10, x8]\n"
+ "fmla v16.4s, v4.4s, v10.4s\n"
+ "ldr q10, [x9, x13]\n"
+ "add x13, x13, #0x10\n"
+ "fmax v31.4s, v31.4s, v15.4s\n"
"ldr q4, [x15, #0x50]\n"
- "fmin v20.4s, v20.4s, v14.4s\n"
- "fmin v21.4s, v21.4s, v14.4s\n"
- "str q20, [x23, x12]\n"
- "fmin v22.4s, v22.4s, v14.4s\n"
- "fmin v23.4s, v23.4s, v14.4s\n"
- "str q21, [x22, x12]\n"
- "ldr x23, [x16, #0x40]\n"
- "fmax v24.4s, v24.4s, v15.4s\n"
- "fmax v25.4s, v25.4s, v15.4s\n"
- "str q22, [x21, x12]\n"
- "ldr x22, [x16, #0x48]\n"
- "fmax v26.4s, v26.4s, v15.4s\n"
- "fmax v27.4s, v27.4s, v15.4s\n"
- "str q23, [x20, x12]\n"
- "ldr x21, [x16, #0x50]\n"
- "ldr x20, [x16, #0x58]\n"
- "ldp x9, x28, [x14, #0x10]\n"
- "fmin v24.4s, v24.4s, v14.4s\n"
- "fmin v25.4s, v25.4s, v14.4s\n"
- "ldr q11, [x9, x8]\n"
- "ldr q12, [x28, x8]\n"
- "fmin v26.4s, v26.4s, v14.4s\n"
- "fmin v27.4s, v27.4s, v14.4s\n"
- "fmax v28.4s, v28.4s, v15.4s\n"
- "fmax v29.4s, v29.4s, v15.4s\n"
- "str q24, [x23, x12]\n"
- "ldr x23, [x16, #0x60]\n"
+ "cmp x13, x11, LSL #4\n"
"fmax v30.4s, v30.4s, v15.4s\n"
- "fmax v31.4s, v31.4s, v15.4s\n"
- "str q25, [x22, x12]\n"
- "ldr x22, [x16, #0x68]\n"
- "str q26, [x21, x12]\n"
- "ldr x21, [x16, #0x70]\n"
- "add x8, x8, #0x10\n"
- "cmp x8, x17, LSL #4\n"
- "str q27, [x20, x12]\n"
- "ldr x20, [x16, #0x78]\n"
- "fmin v28.4s, v28.4s, v14.4s\n"
- "fmin v29.4s, v29.4s, v14.4s\n"
- "fmin v30.4s, v30.4s, v14.4s\n"
- "fmin v31.4s, v31.4s, v14.4s\n"
- "add x13, x13, #0x10\n"
- "str q28, [x23, x12]\n"
- "str q29, [x22, x12]\n"
+ "ldr q8, [x15, #0x90]\n"
"add x15, x15, #0xa0\n"
+ "fmax v29.4s, v29.4s, v15.4s\n"
+ "fmax v28.4s, v28.4s, v15.4s\n"
+ "fmin v31.4s, v31.4s, v14.4s\n"
+ "str q31, [x22, x12]\n"
+ "fmin v30.4s, v30.4s, v14.4s\n"
+ "fmin v29.4s, v29.4s, v14.4s\n"
+ "ldr x22, [x17, #0x20]\n"
+ "fmin v28.4s, v28.4s, v14.4s\n"
"str q30, [x21, x12]\n"
- "str q31, [x20, x12]\n"
+ "fmax v27.4s, v27.4s, v15.4s\n"
+ "fmax v26.4s, v26.4s, v15.4s\n"
+ "str q29, [x20, x12]\n"
+ "fmax v25.4s, v25.4s, v15.4s\n"
+ "str q28, [x19, x12]\n"
+ "fmax v24.4s, v24.4s, v15.4s\n"
+ "ldr x21, [x17, #0x28]\n"
+ "fmin v27.4s, v27.4s, v14.4s\n"
+ "ldr x20, [x17, #0x30]\n"
+ "fmin v26.4s, v26.4s, v14.4s\n"
+ "ldr x19, [x17, #0x38]\n"
+ "fmin v25.4s, v25.4s, v14.4s\n"
+ "str q27, [x22, x12]\n"
+ "fmin v24.4s, v24.4s, v14.4s\n"
+ "str q26, [x21, x12]\n"
+ "fmax v23.4s, v23.4s, v15.4s\n"
+ "str q25, [x20, x12]\n"
+ "fmax v22.4s, v22.4s, v15.4s\n"
+ "str q24, [x19, x12]\n"
+ "fmax v21.4s, v21.4s, v15.4s\n"
+ "ldr x22, [x17, #0x40]\n"
+ "fmin v23.4s, v23.4s, v14.4s\n"
+ "ldr x21, [x17, #0x48]\n"
+ "fmin v22.4s, v22.4s, v14.4s\n"
+ "ldr x20, [x17, #0x50]\n"
+ "fmin v21.4s, v21.4s, v14.4s\n"
+ "str q23, [x22, x12]\n"
+ "fmax v20.4s, v20.4s, v15.4s\n"
+ "str q22, [x21, x12]\n"
+ "fmax v19.4s, v19.4s, v15.4s\n"
+ "str q21, [x20, x12]\n"
+ "fmax v18.4s, v18.4s, v15.4s\n"
+ "ldr x19, [x17, #0x58]\n"
+ "fmin v20.4s, v20.4s, v14.4s\n"
+ "ldr x22, [x17, #0x60]\n"
+ "fmin v19.4s, v19.4s, v14.4s\n"
+ "ldr x21, [x17, #0x68]\n"
+ "fmin v18.4s, v18.4s, v14.4s\n"
+ "str q20, [x19, x12]\n"
+ "fmax v17.4s, v17.4s, v15.4s\n"
+ "str q19, [x22, x12]\n"
+ "fmax v16.4s, v16.4s, v15.4s\n"
+ "str q18, [x21, x12]\n"
+ "ldr x20, [x17, #0x70]\n"
+ "fmin v17.4s, v17.4s, v14.4s\n"
+ "ldr x19, [x17, #0x78]\n"
+ "fmin v16.4s, v16.4s, v14.4s\n"
+ "str q17, [x20, x12]\n"
+ "str q16, [x19, x12]\n"
"blt 1b\n"
"2:" // Channel tail
- "mov v21.16b, v13.16b\n fmla v21.4s, v4.4s, v9.4s\n"
- "mov v16.16b, v13.16b\n fmla v16.4s, v8.4s, v9.4s\n"
- "ldr x27, [x14, #0x20]\n"
- "ldr x26, [x14, #0x30]\n"
- "mov v22.16b, v13.16b\n fmla v22.4s, v3.4s, v9.4s\n"
- "mov v25.16b, v13.16b\n fmla v25.4s, v1.4s, v9.4s\n"
- "ldr x25, [x14, #0x28]\n"
- "ldr x24, [x14, #0x38]\n"
- "mov v26.16b, v13.16b\n fmla v26.4s, v0.4s, v9.4s\n"
- "mov v17.16b, v13.16b\n fmla v17.4s, v7.4s, v9.4s\n"
- "ldr x11, [x14, #0x40]\n"
- "ldr x10, [x14, #0x48]\n"
- "mov v18.16b, v13.16b\n fmla v18.4s, v6.4s, v9.4s\n"
- "fmla v21.4s, v5.4s, v12.4s\n"
- "ldr x9, [x14, #0x50]\n"
- "ldr x28, [x14, #0x58]\n"
- "mov v20.16b, v13.16b\n fmla v20.4s, v5.4s, v9.4s\n"
- "mov v24.16b, v13.16b\n fmla v24.4s, v2.4s, v9.4s\n"
- "ldr q9, [x26, x13]\n"
- "ldr x26, [x14, #0x70]\n"
- "fmla v16.4s, v0.4s, v10.4s\n"
- "ldr q10, [x27, x13]\n"
- "mov v19.16b, v13.16b\n fmla v19.4s, v2.4s, v11.4s\n"
- "ldr q11, [x25, x13]\n"
- "fmla v22.4s, v4.4s, v12.4s\n"
- "fmla v25.4s, v2.4s, v12.4s\n"
- "ldr x27, [x14, #0x60]\n"
- "ldr x25, [x14, #0x68]\n"
- "fmla v26.4s, v1.4s, v12.4s\n"
- "fmla v17.4s, v8.4s, v12.4s\n"
- "ldr x23, [x16, #0x0]\n"
- "ldr x22, [x16, #0x8]\n"
- "fmla v18.4s, v7.4s, v12.4s\n"
- "mov v28.16b, v13.16b\n fmla v28.4s, v6.4s, v10.4s\n"
- "ldr q10, [x10, x13]\n"
- "ldr x10, [x14, #0x88]\n"
- "fmla v21.4s, v7.4s, v9.4s\n"
- "fmla v19.4s, v6.4s, v12.4s\n"
- "ldr x21, [x16, #0x10]\n"
- "ldr x20, [x16, #0x18]\n"
- "mov v23.16b, v13.16b\n fmla v23.4s, v3.4s, v12.4s\n"
- "mov v27.16b, v13.16b\n fmla v27.4s, v0.4s, v12.4s\n"
- "ldr q12, [x24, x13]\n"
- "ldr x24, [x14, #0x78]\n"
- "mov v31.16b, v13.16b\n fmla v31.4s, v8.4s, v11.4s\n"
- "ldr q11, [x11, x13]\n"
- "fmla v22.4s, v6.4s, v9.4s\n"
- "ldr x11, [x14, #0x80]\n"
- "fmla v25.4s, v4.4s, v9.4s\n"
- "fmla v26.4s, v3.4s, v9.4s\n"
+ "mov v31.16b, v13.16b\n fmla v31.4s, v8.4s, v9.4s\n"
+ "ldr x26, [x16, #0x20]\n"
"add x12, x12, #0x10\n"
- "fmla v20.4s, v8.4s, v9.4s\n"
- "fmla v24.4s, v5.4s, v9.4s\n"
- "fmla v28.4s, v2.4s, v9.4s\n"
- "fmla v16.4s, v1.4s, v12.4s\n"
- "fmla v17.4s, v0.4s, v12.4s\n"
- "ldr q12, [x28, x13]\n"
- "fmla v18.4s, v2.4s, v11.4s\n"
- "ldr x28, [x14, #0x98]\n"
- "fmla v21.4s, v8.4s, v10.4s\n"
- "fmla v19.4s, v1.4s, v11.4s\n"
- "ldr q11, [x27, x13]\n"
- "ldr x27, [x14, #0xa0]\n"
- "fmla v22.4s, v7.4s, v10.4s\n"
- "fmla v23.4s, v6.4s, v10.4s\n"
- "fmla v25.4s, v5.4s, v10.4s\n"
- "fmla v26.4s, v4.4s, v10.4s\n"
- "fmla v27.4s, v3.4s, v10.4s\n"
+ "mov v30.16b, v13.16b\n fmla v30.4s, v7.4s, v9.4s\n"
+ "ldr x25, [x16, #0x28]\n"
+ "mov v29.16b, v13.16b\n fmla v29.4s, v6.4s, v9.4s\n"
+ "ldr x24, [x16, #0x30]\n"
+ "mov v27.16b, v13.16b\n fmla v27.4s, v5.4s, v9.4s\n"
+ "ldr x23, [x16, #0x38]\n"
+ "mov v26.16b, v13.16b\n fmla v26.4s, v4.4s, v9.4s\n"
+ "ldr x10, [x16, #0x40]\n"
+ "mov v25.16b, v13.16b\n fmla v25.4s, v3.4s, v9.4s\n"
+ "ldr x9, [x16, #0x48]\n"
+ "mov v23.16b, v13.16b\n fmla v23.4s, v2.4s, v9.4s\n"
+ "ldr x28, [x16, #0x50]\n"
+ "mov v22.16b, v13.16b\n fmla v22.4s, v1.4s, v9.4s\n"
+ "ldr x27, [x16, #0x58]\n"
+ "mov v21.16b, v13.16b\n fmla v21.4s, v0.4s, v9.4s\n"
+ "ldr q9, [x24, x14]\n"
"fmla v31.4s, v0.4s, v10.4s\n"
- "fmla v24.4s, v6.4s, v11.4s\n"
- "fmla v28.4s, v3.4s, v11.4s\n"
- "ldr q11, [x26, x13]\n"
- "ldr x26, [x14, #0xb0]\n"
- "fmla v19.4s, v5.4s, v12.4s\n"
- "fmla v23.4s, v2.4s, v12.4s\n"
- "ldr q12, [x24, x13]\n"
- "ldr x24, [x14, #0xb8]\n"
- "fmla v27.4s, v8.4s, v11.4s\n"
- "fmla v31.4s, v5.4s, v11.4s\n"
- "mov v29.16b, v13.16b\n fmla v29.4s, v1.4s, v9.4s\n"
- "mov v30.16b, v13.16b\n fmla v30.4s, v0.4s, v9.4s\n"
- "ldr q9, [x9, x13]\n"
- "ldr x9, [x14, #0x90]\n"
- "fmla v29.4s, v2.4s, v10.4s\n"
- "fmla v30.4s, v1.4s, v10.4s\n"
- "ldr q10, [x25, x13]\n"
- "ldr x25, [x14, #0xa8]\n"
- "fmla v16.4s, v3.4s, v9.4s\n"
- "fmla v20.4s, v0.4s, v9.4s\n"
- "ldr q11, [x11, x13]\n"
- "ldr x11, [x14, #0xc0]\n"
- "fmla v17.4s, v4.4s, v10.4s\n"
- "fmla v18.4s, v3.4s, v10.4s\n"
- "fmla v21.4s, v1.4s, v10.4s\n"
- "fmla v22.4s, v0.4s, v10.4s\n"
- "fmla v16.4s, v5.4s, v10.4s\n"
- "fmla v20.4s, v2.4s, v10.4s\n"
- "ldr q10, [x10, x13]\n"
- "ldr x10, [x14, #0xc8]\n"
- "fmla v17.4s, v5.4s, v12.4s\n"
- "fmla v18.4s, v4.4s, v12.4s\n"
- "fmla v21.4s, v2.4s, v12.4s\n"
- "fmla v19.4s, v3.4s, v12.4s\n"
- "fmla v22.4s, v1.4s, v12.4s\n"
- "fmla v23.4s, v0.4s, v12.4s\n"
- "ldr q12, [x28, x13]\n"
- "ldr x28, [x14, #0xd8]\n"
- "fmla v28.4s, v7.4s, v11.4s\n"
- "fmla v29.4s, v6.4s, v11.4s\n"
- "ldr q11, [x9, x13]\n"
- "ldr x9, [x14, #0xd0]\n"
- "fmla v16.4s, v7.4s, v10.4s\n"
- "fmla v17.4s, v6.4s, v10.4s\n"
- "fmla v20.4s, v4.4s, v10.4s\n"
- "fmla v21.4s, v3.4s, v10.4s\n"
- "fmla v24.4s, v1.4s, v10.4s\n"
- "fmla v25.4s, v0.4s, v10.4s\n"
- "ldr q10, [x27, x13]\n"
- "ldr x27, [x14, #0xe0]\n"
- "fmla v18.4s, v8.4s, v12.4s\n"
- "fmla v30.4s, v8.4s, v11.4s\n"
- "fmla v31.4s, v7.4s, v11.4s\n"
- "ldr q11, [x25, x13]\n"
- "fmla v27.4s, v1.4s, v12.4s\n"
- "ldr x25, [x14, #0xe8]\n"
- "fmla v19.4s, v7.4s, v12.4s\n"
- "fmla v22.4s, v5.4s, v12.4s\n"
- "fmla v23.4s, v4.4s, v12.4s\n"
- "fmla v26.4s, v2.4s, v12.4s\n"
- "ldr q12, [x26, x13]\n"
- "ldr x26, [x14, #0xf0]\n"
- "fmla v16.4s, v2.4s, v10.4s\n"
- "fmla v17.4s, v1.4s, v10.4s\n"
- "fmla v18.4s, v0.4s, v10.4s\n"
- "ldr q10, [x24, x13]\n"
- "fmla v20.4s, v7.4s, v11.4s\n"
- "ldr x24, [x14, #0xf8]\n"
- "fmla v21.4s, v6.4s, v11.4s\n"
- "fmla v24.4s, v4.4s, v11.4s\n"
- "fmla v25.4s, v3.4s, v11.4s\n"
+ "ldr q10, [x26, x14]\n"
+ "mov v28.16b, v13.16b\n fmla v28.4s, v2.4s, v11.4s\n"
+ "ldr q11, [x25, x14]\n"
+ "fmla v30.4s, v8.4s, v12.4s\n"
+ "ldr x26, [x16, #0x60]\n"
+ "fmla v29.4s, v7.4s, v12.4s\n"
+ "ldr x25, [x16, #0x68]\n"
+ "fmla v26.4s, v5.4s, v12.4s\n"
+ "ldr x24, [x16, #0x70]\n"
+ "fmla v28.4s, v6.4s, v12.4s\n"
+ "ldr x22, [x17, #0x0]\n"
+ "fmla v25.4s, v4.4s, v12.4s\n"
+ "ldr x21, [x17, #0x8]\n"
+ "mov v24.16b, v13.16b\n fmla v24.4s, v3.4s, v12.4s\n"
+ "ldr x20, [x17, #0x10]\n"
+ "fmla v22.4s, v2.4s, v12.4s\n"
+ "ldr x19, [x17, #0x18]\n"
+ "fmla v21.4s, v1.4s, v12.4s\n"
+ "mov v20.16b, v13.16b\n fmla v20.4s, v0.4s, v12.4s\n"
+ "ldr q12, [x23, x14]\n"
+ "mov v19.16b, v13.16b\n fmla v19.4s, v6.4s, v10.4s\n"
+ "ldr q10, [x9, x14]\n"
+ "mov v16.16b, v13.16b\n fmla v16.4s, v8.4s, v11.4s\n"
+ "ldr q11, [x10, x14]\n"
+ "fmla v27.4s, v8.4s, v9.4s\n"
+ "ldr x23, [x16, #0x78]\n"
+ "fmla v26.4s, v7.4s, v9.4s\n"
+ "ldr x10, [x16, #0x80]\n"
+ "fmla v25.4s, v6.4s, v9.4s\n"
+ "ldr x9, [x16, #0x88]\n"
+ "fmla v23.4s, v5.4s, v9.4s\n"
+ "fmla v22.4s, v4.4s, v9.4s\n"
+ "fmla v21.4s, v3.4s, v9.4s\n"
+ "fmla v19.4s, v2.4s, v9.4s\n"
+ "mov v18.16b, v13.16b\n fmla v18.4s, v1.4s, v9.4s\n"
+ "mov v17.16b, v13.16b\n fmla v17.4s, v0.4s, v9.4s\n"
+ "ldr q9, [x28, x14]\n"
+ "fmla v31.4s, v1.4s, v12.4s\n"
+ "ldr x28, [x16, #0x90]\n"
+ "fmla v30.4s, v0.4s, v12.4s\n"
+ "ldr q12, [x27, x14]\n"
+ "fmla v29.4s, v2.4s, v11.4s\n"
+ "ldr x27, [x16, #0x98]\n"
"fmla v28.4s, v1.4s, v11.4s\n"
- "fmla v29.4s, v0.4s, v11.4s\n"
- "ldr q11, [x11, x13]\n"
- "fmla v27.4s, v4.4s, v11.4s\n"
- "ldr x11, [x14, #0x100]\n"
- "fmla v30.4s, v2.4s, v11.4s\n"
- "fmla v17.4s, v2.4s, v12.4s\n"
- "fmla v18.4s, v1.4s, v12.4s\n"
- "fmla v19.4s, v0.4s, v12.4s\n"
- "ldr q12, [x10, x13]\n"
- "ldr x10, [x14, #0x108]\n"
- "fmla v16.4s, v6.4s, v10.4s\n"
+ "ldr q11, [x26, x14]\n"
+ "fmla v26.4s, v8.4s, v10.4s\n"
+ "ldr x26, [x16, #0xa0]\n"
+ "fmla v25.4s, v7.4s, v10.4s\n"
+ "fmla v24.4s, v6.4s, v10.4s\n"
+ "fmla v22.4s, v5.4s, v10.4s\n"
+ "fmla v21.4s, v4.4s, v10.4s\n"
"fmla v20.4s, v3.4s, v10.4s\n"
- "fmla v24.4s, v0.4s, v10.4s\n"
- "ldr q10, [x9, x13]\n"
- "fmla v22.4s, v8.4s, v11.4s\n"
- "ldr x9, [x14, #0x110]\n"
- "fmla v23.4s, v7.4s, v11.4s\n"
- "fmla v26.4s, v5.4s, v11.4s\n"
- "fmla v31.4s, v1.4s, v11.4s\n"
- "ldr q11, [x28, x13]\n"
- "fmla v27.4s, v2.4s, v12.4s\n"
- "ldr x28, [x14, #0x118]\n"
- "fmla v28.4s, v0.4s, v10.4s\n"
- "fmla v29.4s, v4.4s, v11.4s\n"
- "fmla v30.4s, v3.4s, v11.4s\n"
- "fmla v19.4s, v8.4s, v12.4s\n"
- "fmla v23.4s, v5.4s, v12.4s\n"
- "ldr q12, [x27, x13]\n"
- "fmla v20.4s, v6.4s, v10.4s\n"
- "fmla v24.4s, v3.4s, v10.4s\n"
- "ldr q10, [x25, x13]\n"
- "fmla v25.4s, v7.4s, v11.4s\n"
- "fmla v26.4s, v6.4s, v11.4s\n"
- "fmla v28.4s, v5.4s, v11.4s\n"
- "fmla v27.4s, v5.4s, v12.4s\n"
- "fmla v31.4s, v2.4s, v12.4s\n"
- "fmla v29.4s, v7.4s, v10.4s\n"
+ "fmla v18.4s, v2.4s, v10.4s\n"
+ "fmla v17.4s, v1.4s, v10.4s\n"
+ "fmla v16.4s, v0.4s, v10.4s\n"
+ "ldr q10, [x25, x14]\n"
+ "fmla v31.4s, v3.4s, v9.4s\n"
+ "ldr x25, [x16, #0xa8]\n"
+ "fmla v27.4s, v0.4s, v9.4s\n"
+ "fmla v28.4s, v5.4s, v12.4s\n"
+ "fmla v24.4s, v2.4s, v12.4s\n"
+ "ldr q12, [x23, x14]\n"
+ "fmla v23.4s, v6.4s, v11.4s\n"
+ "ldr x23, [x16, #0xb8]\n"
+ "fmla v19.4s, v3.4s, v11.4s\n"
+ "ldr q11, [x24, x14]\n"
+ "fmla v31.4s, v5.4s, v10.4s\n"
+ "ldr x24, [x16, #0xb0]\n"
+ "fmla v30.4s, v4.4s, v10.4s\n"
+ "fmla v29.4s, v3.4s, v10.4s\n"
+ "fmla v27.4s, v2.4s, v10.4s\n"
+ "fmla v26.4s, v1.4s, v10.4s\n"
+ "fmla v25.4s, v0.4s, v10.4s\n"
+ "ldr q10, [x9, x14]\n"
+ "fmla v20.4s, v8.4s, v11.4s\n"
+ "ldr x9, [x16, #0xc8]\n"
+ "fmla v16.4s, v5.4s, v11.4s\n"
+ "ldr q11, [x10, x14]\n"
+ "fmla v30.4s, v5.4s, v12.4s\n"
+ "ldr x10, [x16, #0xc0]\n"
+ "fmla v29.4s, v4.4s, v12.4s\n"
+ "fmla v28.4s, v3.4s, v12.4s\n"
+ "fmla v26.4s, v2.4s, v12.4s\n"
+ "fmla v25.4s, v1.4s, v12.4s\n"
+ "fmla v24.4s, v0.4s, v12.4s\n"
+ "ldr q12, [x27, x14]\n"
+ "fmla v19.4s, v7.4s, v11.4s\n"
+ "ldr x27, [x16, #0xd8]\n"
+ "fmla v18.4s, v6.4s, v11.4s\n"
+ "ldr q11, [x28, x14]\n"
+ "fmla v31.4s, v7.4s, v10.4s\n"
+ "ldr x28, [x16, #0xd0]\n"
"fmla v30.4s, v6.4s, v10.4s\n"
- "fmla v24.4s, v8.4s, v11.4s\n"
- "ldr q11, [x26, x13]\n"
- "fmla v28.4s, v8.4s, v10.4s\n"
- "ldr q10, [x11, x13]\n"
+ "fmla v27.4s, v4.4s, v10.4s\n"
+ "fmla v26.4s, v3.4s, v10.4s\n"
+ "fmla v23.4s, v1.4s, v10.4s\n"
+ "fmla v22.4s, v0.4s, v10.4s\n"
+ "ldr q10, [x26, x14]\n"
+ "fmla v17.4s, v8.4s, v11.4s\n"
+ "ldr x26, [x16, #0xe0]\n"
+ "fmla v16.4s, v7.4s, v11.4s\n"
+ "ldr q11, [x25, x14]\n"
+ "fmla v29.4s, v8.4s, v12.4s\n"
+ "ldr x25, [x16, #0xe8]\n"
+ "fmla v28.4s, v7.4s, v12.4s\n"
+ "fmla v25.4s, v5.4s, v12.4s\n"
+ "fmla v24.4s, v4.4s, v12.4s\n"
+ "fmla v21.4s, v2.4s, v12.4s\n"
+ "fmla v20.4s, v1.4s, v12.4s\n"
+ "ldr q12, [x24, x14]\n"
+ "fmla v31.4s, v2.4s, v10.4s\n"
+ "ldr x24, [x16, #0xf0]\n"
+ "fmla v30.4s, v1.4s, v10.4s\n"
+ "fmla v29.4s, v0.4s, v10.4s\n"
+ "ldr q10, [x23, x14]\n"
+ "fmla v27.4s, v7.4s, v11.4s\n"
+ "ldr x23, [x16, #0xf8]\n"
+ "fmla v26.4s, v6.4s, v11.4s\n"
+ "fmla v23.4s, v4.4s, v11.4s\n"
+ "fmla v22.4s, v3.4s, v11.4s\n"
+ "fmla v19.4s, v1.4s, v11.4s\n"
+ "fmla v18.4s, v0.4s, v11.4s\n"
+ "ldr q11, [x10, x14]\n"
+ "fmla v30.4s, v2.4s, v12.4s\n"
+ "ldr x10, [x16, #0x100]\n"
+ "fmla v29.4s, v1.4s, v12.4s\n"
+ "fmla v28.4s, v0.4s, v12.4s\n"
+ "ldr q12, [x9, x14]\n"
+ "fmla v31.4s, v6.4s, v10.4s\n"
+ "ldr x9, [x16, #0x108]\n"
+ "fmla v27.4s, v3.4s, v10.4s\n"
+ "fmla v23.4s, v0.4s, v10.4s\n"
+ "ldr q10, [x28, x14]\n"
"fmla v25.4s, v8.4s, v11.4s\n"
- "fmla v26.4s, v7.4s, v11.4s\n"
- "fmla v27.4s, v6.4s, v11.4s\n"
- "fmla v29.4s, v5.4s, v11.4s\n"
- "fmla v30.4s, v4.4s, v11.4s\n"
- "fmla v31.4s, v3.4s, v11.4s\n"
- "ldr q11, [x10, x13]\n"
- "fmla v23.4s, v8.4s, v12.4s\n"
- "ldr q12, [x24, x13]\n"
- "fmla v16.4s, v4.4s, v10.4s\n"
- "fmax v16.4s, v16.4s, v15.4s\n"
- "fmla v17.4s, v3.4s, v10.4s\n"
+ "ldr x28, [x16, #0x110]\n"
+ "fmla v24.4s, v7.4s, v11.4s\n"
+ "fmla v21.4s, v5.4s, v11.4s\n"
+ "fmla v20.4s, v4.4s, v11.4s\n"
+ "fmla v17.4s, v2.4s, v11.4s\n"
+ "fmla v16.4s, v1.4s, v11.4s\n"
+ "ldr q11, [x27, x14]\n"
+ "fmla v28.4s, v8.4s, v12.4s\n"
+ "ldr x27, [x16, #0x118]\n"
+ "fmla v24.4s, v5.4s, v12.4s\n"
+ "fmla v20.4s, v2.4s, v12.4s\n"
+ "ldr q12, [x26, x14]\n"
+ "fmla v27.4s, v6.4s, v10.4s\n"
+ "fmla v23.4s, v3.4s, v10.4s\n"
+ "fmla v19.4s, v0.4s, v10.4s\n"
+ "ldr q10, [x25, x14]\n"
+ "fmla v22.4s, v7.4s, v11.4s\n"
+ "fmla v21.4s, v6.4s, v11.4s\n"
+ "fmla v23.4s, v8.4s, v11.4s\n"
+ "fmla v19.4s, v5.4s, v11.4s\n"
+ "fmla v18.4s, v4.4s, v11.4s\n"
+ "fmla v17.4s, v3.4s, v11.4s\n"
+ "ldr q11, [x24, x14]\n"
+ "fmla v24.4s, v8.4s, v12.4s\n"
+ "fmla v20.4s, v5.4s, v12.4s\n"
+ "fmla v16.4s, v2.4s, v12.4s\n"
+ "ldr q12, [x23, x14]\n"
+ "fmla v19.4s, v8.4s, v10.4s\n"
+ "fmla v18.4s, v7.4s, v10.4s\n"
+ "fmla v17.4s, v6.4s, v10.4s\n"
+ "ldr q10, [x10, x14]\n"
+ "fmla v22.4s, v8.4s, v11.4s\n"
+ "fmla v21.4s, v7.4s, v11.4s\n"
+ "fmla v20.4s, v6.4s, v11.4s\n"
"fmla v18.4s, v5.4s, v11.4s\n"
- "fmax v17.4s, v17.4s, v15.4s\n"
- "fmla v19.4s, v4.4s, v11.4s\n"
- "fmla v29.4s, v8.4s, v12.4s\n"
- "fmax v18.4s, v18.4s, v15.4s\n"
- "fmla v30.4s, v7.4s, v12.4s\n"
- "fmla v31.4s, v6.4s, v12.4s\n"
- "ldr q12, [x9, x13]\n"
- "fmax v19.4s, v19.4s, v15.4s\n"
- "fmla v20.4s, v1.4s, v10.4s\n"
- "fmla v21.4s, v0.4s, v10.4s\n"
- "ldr q10, [x28, x13]\n"
- "fmin v16.4s, v16.4s, v14.4s\n"
- "fmla v22.4s, v2.4s, v11.4s\n"
- "fmla v23.4s, v1.4s, v11.4s\n"
- "fmin v17.4s, v17.4s, v14.4s\n"
- "str q16, [x23, x12]\n"
- "fmla v24.4s, v7.4s, v12.4s\n"
- "fmla v25.4s, v6.4s, v12.4s\n"
- "fmin v18.4s, v18.4s, v14.4s\n"
- "str q17, [x22, x12]\n"
- "fmla v26.4s, v8.4s, v10.4s\n"
- "fmla v27.4s, v7.4s, v10.4s\n"
- "fmin v19.4s, v19.4s, v14.4s\n"
- "str q18, [x21, x12]\n"
- "fmax v20.4s, v20.4s, v15.4s\n"
- "fmax v21.4s, v21.4s, v15.4s\n"
- "str q19, [x20, x12]\n"
- "ldr x23, [x16, #0x20]\n"
- "fmax v22.4s, v22.4s, v15.4s\n"
- "fmax v23.4s, v23.4s, v15.4s\n"
- "ldr x22, [x16, #0x28]\n"
- "ldr x21, [x16, #0x30]\n"
- "ldr x20, [x16, #0x38]\n"
- "fmla v28.4s, v4.4s, v12.4s\n"
- "fmla v29.4s, v3.4s, v12.4s\n"
- "fmin v20.4s, v20.4s, v14.4s\n"
- "fmla v30.4s, v5.4s, v10.4s\n"
+ "fmla v17.4s, v4.4s, v11.4s\n"
+ "fmla v16.4s, v3.4s, v11.4s\n"
+ "ldr q11, [x9, x14]\n"
"fmla v31.4s, v4.4s, v10.4s\n"
- "fmin v21.4s, v21.4s, v14.4s\n"
- "str q20, [x23, x12]\n"
- "fmin v22.4s, v22.4s, v14.4s\n"
- "fmin v23.4s, v23.4s, v14.4s\n"
- "str q21, [x22, x12]\n"
- "ldr x23, [x16, #0x40]\n"
- "fmax v24.4s, v24.4s, v15.4s\n"
- "fmax v25.4s, v25.4s, v15.4s\n"
- "str q22, [x21, x12]\n"
- "ldr x22, [x16, #0x48]\n"
- "fmax v26.4s, v26.4s, v15.4s\n"
- "fmax v27.4s, v27.4s, v15.4s\n"
- "str q23, [x20, x12]\n"
- "ldr x21, [x16, #0x50]\n"
- "ldr x20, [x16, #0x58]\n"
- "fmin v24.4s, v24.4s, v14.4s\n"
- "fmin v25.4s, v25.4s, v14.4s\n"
- "str q24, [x23, x12]\n"
- "fmin v26.4s, v26.4s, v14.4s\n"
- "fmin v27.4s, v27.4s, v14.4s\n"
- "str q25, [x22, x12]\n"
- "ldr x23, [x16, #0x60]\n"
- "fmax v28.4s, v28.4s, v15.4s\n"
- "fmax v29.4s, v29.4s, v15.4s\n"
- "str q26, [x21, x12]\n"
- "ldr x22, [x16, #0x68]\n"
- "fmax v30.4s, v30.4s, v15.4s\n"
+ "fmla v18.4s, v8.4s, v12.4s\n"
+ "fmla v17.4s, v7.4s, v12.4s\n"
+ "fmla v16.4s, v6.4s, v12.4s\n"
+ "ldr q12, [x28, x14]\n"
+ "fmla v30.4s, v3.4s, v10.4s\n"
+ "fmla v27.4s, v1.4s, v10.4s\n"
+ "fmla v26.4s, v0.4s, v10.4s\n"
+ "ldr q10, [x27, x14]\n"
+ "add x14, x14, #0x10\n"
+ "fmla v29.4s, v5.4s, v11.4s\n"
+ "fmla v28.4s, v4.4s, v11.4s\n"
+ "fmla v25.4s, v2.4s, v11.4s\n"
+ "fmla v24.4s, v1.4s, v11.4s\n"
+ "fmla v23.4s, v7.4s, v12.4s\n"
+ "fmla v22.4s, v6.4s, v12.4s\n"
+ "fmla v19.4s, v4.4s, v12.4s\n"
+ "fmla v18.4s, v3.4s, v12.4s\n"
+ "fmla v21.4s, v8.4s, v10.4s\n"
+ "fmla v20.4s, v7.4s, v10.4s\n"
+ "fmla v17.4s, v5.4s, v10.4s\n"
+ "fmla v16.4s, v4.4s, v10.4s\n"
"fmax v31.4s, v31.4s, v15.4s\n"
- "str q27, [x20, x12]\n"
- "ldr x21, [x16, #0x70]\n"
- "ldr x20, [x16, #0x78]\n"
- "fmin v28.4s, v28.4s, v14.4s\n"
- "fmin v29.4s, v29.4s, v14.4s\n"
- "str q28, [x23, x12]\n"
- "fmin v30.4s, v30.4s, v14.4s\n"
+ "fmax v30.4s, v30.4s, v15.4s\n"
+ "fmax v29.4s, v29.4s, v15.4s\n"
"fmin v31.4s, v31.4s, v14.4s\n"
- "str q29, [x22, x12]\n"
- "add x13, x13, #0x10\n"
+ "str q31, [x22, x12]\n"
+ "fmin v30.4s, v30.4s, v14.4s\n"
+ "fmin v29.4s, v29.4s, v14.4s\n"
+ "ldr x22, [x17, #0x20]\n"
+ "fmax v28.4s, v28.4s, v15.4s\n"
"str q30, [x21, x12]\n"
- "str q31, [x20, x12]\n"
+ "fmax v27.4s, v27.4s, v15.4s\n"
+ "fmax v26.4s, v26.4s, v15.4s\n"
+ "str q29, [x20, x12]\n"
+ "fmin v28.4s, v28.4s, v14.4s\n"
+ "ldr x21, [x17, #0x28]\n"
+ "fmax v25.4s, v25.4s, v15.4s\n"
+ "ldr x20, [x17, #0x30]\n"
+ "fmin v27.4s, v27.4s, v14.4s\n"
+ "str q28, [x19, x12]\n"
+ "fmin v26.4s, v26.4s, v14.4s\n"
+ "ldr x19, [x17, #0x38]\n"
+ "fmin v25.4s, v25.4s, v14.4s\n"
+ "str q27, [x22, x12]\n"
+ "fmax v24.4s, v24.4s, v15.4s\n"
+ "str q26, [x21, x12]\n"
+ "fmax v23.4s, v23.4s, v15.4s\n"
+ "str q25, [x20, x12]\n"
+ "fmax v22.4s, v22.4s, v15.4s\n"
+ "ldr x22, [x17, #0x40]\n"
+ "fmin v24.4s, v24.4s, v14.4s\n"
+ "ldr x21, [x17, #0x48]\n"
+ "fmin v23.4s, v23.4s, v14.4s\n"
+ "ldr x20, [x17, #0x50]\n"
+ "fmin v22.4s, v22.4s, v14.4s\n"
+ "str q24, [x19, x12]\n"
+ "fmax v21.4s, v21.4s, v15.4s\n"
+ "str q23, [x22, x12]\n"
+ "fmax v20.4s, v20.4s, v15.4s\n"
+ "str q22, [x21, x12]\n"
+ "fmax v19.4s, v19.4s, v15.4s\n"
+ "ldr x19, [x17, #0x58]\n"
+ "fmin v21.4s, v21.4s, v14.4s\n"
+ "ldr x22, [x17, #0x60]\n"
+ "fmin v20.4s, v20.4s, v14.4s\n"
+ "ldr x21, [x17, #0x68]\n"
+ "fmin v19.4s, v19.4s, v14.4s\n"
+ "str q21, [x20, x12]\n"
+ "fmax v18.4s, v18.4s, v15.4s\n"
+ "str q20, [x19, x12]\n"
+ "fmax v17.4s, v17.4s, v15.4s\n"
+ "str q19, [x22, x12]\n"
+ "fmax v16.4s, v16.4s, v15.4s\n"
+ "ldr x20, [x17, #0x70]\n"
+ "fmin v18.4s, v18.4s, v14.4s\n"
+ "ldr x19, [x17, #0x78]\n"
+ "fmin v17.4s, v17.4s, v14.4s\n"
+ "str q18, [x21, x12]\n"
+ "fmin v16.4s, v16.4s, v14.4s\n"
+ "str q17, [x20, x12]\n"
+ "str q16, [x19, x12]\n"
"3:" // Oddments
"tst %x[n_channels], #0x3\n"
"beq 72f\n"
"ldr q13, [x15, #0x0]\n"
"ldr q0, [x15, #0x10]\n"
- "mov x12, x13\n"
+ "mov x12, x14\n"
"ldr q1, [x15, #0x20]\n"
"ldr q2, [x15, #0x30]\n"
"ldr q3, [x15, #0x40]\n"
@@ -713,681 +713,683 @@ void a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_indirect_impl(
"ldr q6, [x15, #0x70]\n"
"ldr q7, [x15, #0x80]\n"
"ldr q8, [x15, #0x90]\n"
- "ldr x23, [x14, #0x0]\n"
- "ldr x22, [x14, #0x8]\n"
- "add x23, x23, x13\n"
- "add x22, x22, x13\n"
- "ldr x21, [x14, #0x10]\n"
- "ldr x20, [x14, #0x18]\n"
- "add x21, x21, x13\n"
- "add x20, x20, x13\n"
+ "ldr x10, [x16, #0x0]\n"
+ "add x10, x10, x14\n"
+ "ldr x9, [x16, #0x8]\n"
+ "ldr x28, [x16, #0x10]\n"
+ "add x9, x9, x14\n"
+ "ldr x27, [x16, #0x18]\n"
+ "add x28, x28, x14\n"
+ "add x27, x27, x14\n"
"tbz %x[n_channels], #1, 4f\n"
- "ld1 { v9.d }[0], [x23], #0x8\n"
- "ld1 { v10.d }[0], [x22], #0x8\n"
- "ld1 { v11.d }[0], [x21], #0x8\n"
- "ld1 { v12.d }[0], [x20], #0x8\n"
+ "ld1 { v9.d }[0], [x10], #0x8\n"
+ "ld1 { v10.d }[0], [x9], #0x8\n"
+ "ld1 { v11.d }[0], [x28], #0x8\n"
+ "ld1 { v12.d }[0], [x27], #0x8\n"
"tbz %x[n_channels], #0, 5f\n"
- "ld1 { v9.s }[2], [x23], #0x4\n"
- "ld1 { v10.s }[2], [x22], #0x4\n"
- "ld1 { v11.s }[2], [x21], #0x4\n"
- "ld1 { v12.s }[2], [x20], #0x4\n"
+ "ld1 { v9.s }[2], [x10], #0x4\n"
+ "ld1 { v10.s }[2], [x9], #0x4\n"
+ "ld1 { v11.s }[2], [x28], #0x4\n"
+ "ld1 { v12.s }[2], [x27], #0x4\n"
"b 5f\n"
"4:" // Oddments: Load inputs (2, 2), (0, 0), (0, 5), (2, 3): Bit 1: Unset
- "ld1 { v9.s }[0], [x23], #0x4\n"
- "ld1 { v10.s }[0], [x22], #0x4\n"
- "ld1 { v11.s }[0], [x21], #0x4\n"
- "ld1 { v12.s }[0], [x20], #0x4\n"
+ "ld1 { v9.s }[0], [x10], #0x4\n"
+ "ld1 { v10.s }[0], [x9], #0x4\n"
+ "ld1 { v11.s }[0], [x28], #0x4\n"
+ "ld1 { v12.s }[0], [x27], #0x4\n"
"5:" // Oddments: Load inputs (2, 2), (0, 0), (0, 5), (2, 3): Bit 1: End
- "mov v16.16b, v13.16b\n fmla v16.4s, v8.4s, v9.4s\n"
- "mov v17.16b, v13.16b\n fmla v17.4s, v7.4s, v9.4s\n"
- "ldr x20, [x14, #0x20]\n"
- "add x20, x20, x13\n"
- "mov v18.16b, v13.16b\n fmla v18.4s, v6.4s, v9.4s\n"
- "mov v21.16b, v13.16b\n fmla v21.4s, v4.4s, v9.4s\n"
- "mov v22.16b, v13.16b\n fmla v22.4s, v3.4s, v9.4s\n"
- "mov v25.16b, v13.16b\n fmla v25.4s, v1.4s, v9.4s\n"
- "mov v26.16b, v13.16b\n fmla v26.4s, v0.4s, v9.4s\n"
- "mov v19.16b, v13.16b\n fmla v19.4s, v2.4s, v11.4s\n"
- "mov v20.16b, v13.16b\n fmla v20.4s, v5.4s, v9.4s\n"
- "mov v24.16b, v13.16b\n fmla v24.4s, v2.4s, v9.4s\n"
- "fmla v16.4s, v0.4s, v10.4s\n"
- "fmla v17.4s, v8.4s, v12.4s\n"
- "fmla v18.4s, v7.4s, v12.4s\n"
- "fmla v19.4s, v6.4s, v12.4s\n"
- "fmla v21.4s, v5.4s, v12.4s\n"
- "fmla v22.4s, v4.4s, v12.4s\n"
- "mov v23.16b, v13.16b\n fmla v23.4s, v3.4s, v12.4s\n"
- "fmla v25.4s, v2.4s, v12.4s\n"
- "fmla v26.4s, v1.4s, v12.4s\n"
- "mov v27.16b, v13.16b\n fmla v27.4s, v0.4s, v12.4s\n"
+ "mov v31.16b, v13.16b\n fmla v31.4s, v8.4s, v9.4s\n"
+ "ldr x26, [x16, #0x20]\n"
+ "add x26, x26, x14\n"
+ "mov v30.16b, v13.16b\n fmla v30.4s, v7.4s, v9.4s\n"
+ "mov v29.16b, v13.16b\n fmla v29.4s, v6.4s, v9.4s\n"
+ "mov v27.16b, v13.16b\n fmla v27.4s, v5.4s, v9.4s\n"
+ "mov v26.16b, v13.16b\n fmla v26.4s, v4.4s, v9.4s\n"
+ "mov v25.16b, v13.16b\n fmla v25.4s, v3.4s, v9.4s\n"
+ "mov v23.16b, v13.16b\n fmla v23.4s, v2.4s, v9.4s\n"
+ "mov v22.16b, v13.16b\n fmla v22.4s, v1.4s, v9.4s\n"
+ "mov v21.16b, v13.16b\n fmla v21.4s, v0.4s, v9.4s\n"
+ "fmla v31.4s, v0.4s, v10.4s\n"
+ "mov v28.16b, v13.16b\n fmla v28.4s, v2.4s, v11.4s\n"
+ "fmla v30.4s, v8.4s, v12.4s\n"
+ "fmla v29.4s, v7.4s, v12.4s\n"
+ "fmla v26.4s, v5.4s, v12.4s\n"
+ "fmla v28.4s, v6.4s, v12.4s\n"
+ "fmla v25.4s, v4.4s, v12.4s\n"
+ "mov v24.16b, v13.16b\n fmla v24.4s, v3.4s, v12.4s\n"
+ "fmla v22.4s, v2.4s, v12.4s\n"
+ "fmla v21.4s, v1.4s, v12.4s\n"
+ "mov v20.16b, v13.16b\n fmla v20.4s, v0.4s, v12.4s\n"
"tbz %x[n_channels], #1, 6f\n"
- "ld1 { v10.d }[0], [x20], #0x8\n"
+ "ld1 { v10.d }[0], [x26], #0x8\n"
"tbz %x[n_channels], #0, 7f\n"
- "ld1 { v10.s }[2], [x20], #0x4\n"
+ "ld1 { v10.s }[2], [x26], #0x4\n"
"b 7f\n"
"6:" // Oddments: Load input (5, 0): Bit 1: Unset
- "ld1 { v10.s }[0], [x20], #0x4\n"
+ "ld1 { v10.s }[0], [x26], #0x4\n"
"7:" // Oddments: Load input (5, 0): Bit 1: End
- "ldr x20, [x14, #0x28]\n"
- "mov v28.16b, v13.16b\n fmla v28.4s, v6.4s, v10.4s\n"
- "add x20, x20, x13\n"
+ "mov v19.16b, v13.16b\n fmla v19.4s, v6.4s, v10.4s\n"
+ "ldr x25, [x16, #0x28]\n"
+ "add x25, x25, x14\n"
"tbz %x[n_channels], #1, 8f\n"
- "ld1 { v11.d }[0], [x20], #0x8\n"
+ "ld1 { v11.d }[0], [x25], #0x8\n"
"tbz %x[n_channels], #0, 9f\n"
- "ld1 { v11.s }[2], [x20], #0x4\n"
+ "ld1 { v11.s }[2], [x25], #0x4\n"
"b 9f\n"
"8:" // Oddments: Load input (5, 5): Bit 1: Unset
- "ld1 { v11.s }[0], [x20], #0x4\n"
+ "ld1 { v11.s }[0], [x25], #0x4\n"
"9:" // Oddments: Load input (5, 5): Bit 1: End
- "ldr x20, [x14, #0x30]\n"
- "mov v31.16b, v13.16b\n fmla v31.4s, v8.4s, v11.4s\n"
- "add x20, x20, x13\n"
+ "mov v16.16b, v13.16b\n fmla v16.4s, v8.4s, v11.4s\n"
+ "ldr x24, [x16, #0x30]\n"
+ "add x24, x24, x14\n"
"tbz %x[n_channels], #1, 10f\n"
- "ld1 { v9.d }[0], [x20], #0x8\n"
+ "ld1 { v9.d }[0], [x24], #0x8\n"
"tbz %x[n_channels], #0, 11f\n"
- "ld1 { v9.s }[2], [x20], #0x4\n"
+ "ld1 { v9.s }[2], [x24], #0x4\n"
"b 11f\n"
"10:" // Oddments: Load input (3, 2): Bit 1: Unset
- "ld1 { v9.s }[0], [x20], #0x4\n"
+ "ld1 { v9.s }[0], [x24], #0x4\n"
"11:" // Oddments: Load input (3, 2): Bit 1: End
- "ldr x20, [x14, #0x38]\n"
- "fmla v20.4s, v8.4s, v9.4s\n"
- "fmla v21.4s, v7.4s, v9.4s\n"
- "add x20, x20, x13\n"
- "fmla v22.4s, v6.4s, v9.4s\n"
- "fmla v24.4s, v5.4s, v9.4s\n"
- "fmla v25.4s, v4.4s, v9.4s\n"
- "fmla v26.4s, v3.4s, v9.4s\n"
- "fmla v28.4s, v2.4s, v9.4s\n"
- "mov v29.16b, v13.16b\n fmla v29.4s, v1.4s, v9.4s\n"
- "mov v30.16b, v13.16b\n fmla v30.4s, v0.4s, v9.4s\n"
+ "fmla v27.4s, v8.4s, v9.4s\n"
+ "ldr x23, [x16, #0x38]\n"
+ "fmla v26.4s, v7.4s, v9.4s\n"
+ "add x23, x23, x14\n"
+ "fmla v25.4s, v6.4s, v9.4s\n"
+ "fmla v23.4s, v5.4s, v9.4s\n"
+ "fmla v22.4s, v4.4s, v9.4s\n"
+ "fmla v21.4s, v3.4s, v9.4s\n"
+ "fmla v19.4s, v2.4s, v9.4s\n"
+ "mov v18.16b, v13.16b\n fmla v18.4s, v1.4s, v9.4s\n"
+ "mov v17.16b, v13.16b\n fmla v17.4s, v0.4s, v9.4s\n"
"tbz %x[n_channels], #1, 12f\n"
- "ld1 { v12.d }[0], [x20], #0x8\n"
+ "ld1 { v12.d }[0], [x23], #0x8\n"
"tbz %x[n_channels], #0, 13f\n"
- "ld1 { v12.s }[2], [x20], #0x4\n"
+ "ld1 { v12.s }[2], [x23], #0x4\n"
"b 13f\n"
"12:" // Oddments: Load input (0, 1): Bit 1: Unset
- "ld1 { v12.s }[0], [x20], #0x4\n"
+ "ld1 { v12.s }[0], [x23], #0x4\n"
"13:" // Oddments: Load input (0, 1): Bit 1: End
- "ldr x20, [x14, #0x40]\n"
- "fmla v16.4s, v1.4s, v12.4s\n"
- "fmla v17.4s, v0.4s, v12.4s\n"
- "add x20, x20, x13\n"
+ "fmla v31.4s, v1.4s, v12.4s\n"
+ "ldr x10, [x16, #0x40]\n"
+ "fmla v30.4s, v0.4s, v12.4s\n"
+ "add x10, x10, x14\n"
"tbz %x[n_channels], #1, 14f\n"
- "ld1 { v11.d }[0], [x20], #0x8\n"
+ "ld1 { v11.d }[0], [x10], #0x8\n"
"tbz %x[n_channels], #0, 15f\n"
- "ld1 { v11.s }[2], [x20], #0x4\n"
+ "ld1 { v11.s }[2], [x10], #0x4\n"
"b 15f\n"
"14:" // Oddments: Load input (0, 4): Bit 1: Unset
- "ld1 { v11.s }[0], [x20], #0x4\n"
+ "ld1 { v11.s }[0], [x10], #0x4\n"
"15:" // Oddments: Load input (0, 4): Bit 1: End
- "ldr x20, [x14, #0x48]\n"
- "fmla v18.4s, v2.4s, v11.4s\n"
- "fmla v19.4s, v1.4s, v11.4s\n"
- "add x20, x20, x13\n"
+ "fmla v29.4s, v2.4s, v11.4s\n"
+ "ldr x9, [x16, #0x48]\n"
+ "fmla v28.4s, v1.4s, v11.4s\n"
+ "add x9, x9, x14\n"
"tbz %x[n_channels], #1, 16f\n"
- "ld1 { v10.d }[0], [x20], #0x8\n"
+ "ld1 { v10.d }[0], [x9], #0x8\n"
"tbz %x[n_channels], #0, 17f\n"
- "ld1 { v10.s }[2], [x20], #0x4\n"
+ "ld1 { v10.s }[2], [x9], #0x4\n"
"b 17f\n"
"16:" // Oddments: Load input (3, 3): Bit 1: Unset
- "ld1 { v10.s }[0], [x20], #0x4\n"
+ "ld1 { v10.s }[0], [x9], #0x4\n"
"17:" // Oddments: Load input (3, 3): Bit 1: End
- "ldr x20, [x14, #0x50]\n"
- "fmla v21.4s, v8.4s, v10.4s\n"
- "fmla v22.4s, v7.4s, v10.4s\n"
- "add x20, x20, x13\n"
- "fmla v23.4s, v6.4s, v10.4s\n"
- "fmla v25.4s, v5.4s, v10.4s\n"
- "fmla v26.4s, v4.4s, v10.4s\n"
- "fmla v27.4s, v3.4s, v10.4s\n"
- "fmla v29.4s, v2.4s, v10.4s\n"
- "fmla v30.4s, v1.4s, v10.4s\n"
- "fmla v31.4s, v0.4s, v10.4s\n"
+ "fmla v26.4s, v8.4s, v10.4s\n"
+ "ldr x28, [x16, #0x50]\n"
+ "fmla v25.4s, v7.4s, v10.4s\n"
+ "add x28, x28, x14\n"
+ "fmla v24.4s, v6.4s, v10.4s\n"
+ "fmla v22.4s, v5.4s, v10.4s\n"
+ "fmla v21.4s, v4.4s, v10.4s\n"
+ "fmla v20.4s, v3.4s, v10.4s\n"
+ "fmla v18.4s, v2.4s, v10.4s\n"
+ "fmla v17.4s, v1.4s, v10.4s\n"
+ "fmla v16.4s, v0.4s, v10.4s\n"
"tbz %x[n_channels], #1, 18f\n"
- "ld1 { v9.d }[0], [x20], #0x8\n"
+ "ld1 { v9.d }[0], [x28], #0x8\n"
"tbz %x[n_channels], #0, 19f\n"
- "ld1 { v9.s }[2], [x20], #0x4\n"
+ "ld1 { v9.s }[2], [x28], #0x4\n"
"b 19f\n"
"18:" // Oddments: Load input (1, 0): Bit 1: Unset
- "ld1 { v9.s }[0], [x20], #0x4\n"
+ "ld1 { v9.s }[0], [x28], #0x4\n"
"19:" // Oddments: Load input (1, 0): Bit 1: End
- "ldr x20, [x14, #0x58]\n"
- "fmla v16.4s, v3.4s, v9.4s\n"
- "fmla v20.4s, v0.4s, v9.4s\n"
- "add x20, x20, x13\n"
+ "fmla v31.4s, v3.4s, v9.4s\n"
+ "ldr x27, [x16, #0x58]\n"
+ "fmla v27.4s, v0.4s, v9.4s\n"
+ "add x27, x27, x14\n"
"tbz %x[n_channels], #1, 20f\n"
- "ld1 { v12.d }[0], [x20], #0x8\n"
+ "ld1 { v12.d }[0], [x27], #0x8\n"
"tbz %x[n_channels], #0, 21f\n"
- "ld1 { v12.s }[2], [x20], #0x4\n"
+ "ld1 { v12.s }[2], [x27], #0x4\n"
"b 21f\n"
"20:" // Oddments: Load input (1, 5): Bit 1: Unset
- "ld1 { v12.s }[0], [x20], #0x4\n"
+ "ld1 { v12.s }[0], [x27], #0x4\n"
"21:" // Oddments: Load input (1, 5): Bit 1: End
- "ldr x20, [x14, #0x60]\n"
- "fmla v19.4s, v5.4s, v12.4s\n"
- "fmla v23.4s, v2.4s, v12.4s\n"
- "add x20, x20, x13\n"
+ "fmla v28.4s, v5.4s, v12.4s\n"
+ "ldr x26, [x16, #0x60]\n"
+ "fmla v24.4s, v2.4s, v12.4s\n"
+ "add x26, x26, x14\n"
"tbz %x[n_channels], #1, 22f\n"
- "ld1 { v11.d }[0], [x20], #0x8\n"
+ "ld1 { v11.d }[0], [x26], #0x8\n"
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v11.s }[2], [x20], #0x4\n"
+ "ld1 { v11.s }[2], [x26], #0x4\n"
"b 23f\n"
"22:" // Oddments: Load input (4, 0): Bit 1: Unset
- "ld1 { v11.s }[0], [x20], #0x4\n"
+ "ld1 { v11.s }[0], [x26], #0x4\n"
"23:" // Oddments: Load input (4, 0): Bit 1: End
- "ldr x20, [x14, #0x68]\n"
- "fmla v24.4s, v6.4s, v11.4s\n"
- "fmla v28.4s, v3.4s, v11.4s\n"
- "add x20, x20, x13\n"
+ "fmla v23.4s, v6.4s, v11.4s\n"
+ "ldr x25, [x16, #0x68]\n"
+ "fmla v19.4s, v3.4s, v11.4s\n"
+ "add x25, x25, x14\n"
"tbz %x[n_channels], #1, 24f\n"
- "ld1 { v10.d }[0], [x20], #0x8\n"
+ "ld1 { v10.d }[0], [x25], #0x8\n"
"tbz %x[n_channels], #0, 25f\n"
- "ld1 { v10.s }[2], [x20], #0x4\n"
+ "ld1 { v10.s }[2], [x25], #0x4\n"
"b 25f\n"
"24:" // Oddments: Load input (1, 2): Bit 1: Unset
- "ld1 { v10.s }[0], [x20], #0x4\n"
+ "ld1 { v10.s }[0], [x25], #0x4\n"
"25:" // Oddments: Load input (1, 2): Bit 1: End
- "ldr x20, [x14, #0x70]\n"
- "fmla v16.4s, v5.4s, v10.4s\n"
- "fmla v17.4s, v4.4s, v10.4s\n"
- "add x20, x20, x13\n"
- "fmla v18.4s, v3.4s, v10.4s\n"
- "fmla v20.4s, v2.4s, v10.4s\n"
- "fmla v21.4s, v1.4s, v10.4s\n"
- "fmla v22.4s, v0.4s, v10.4s\n"
+ "fmla v31.4s, v5.4s, v10.4s\n"
+ "ldr x24, [x16, #0x70]\n"
+ "fmla v30.4s, v4.4s, v10.4s\n"
+ "add x24, x24, x14\n"
+ "fmla v29.4s, v3.4s, v10.4s\n"
+ "fmla v27.4s, v2.4s, v10.4s\n"
+ "fmla v26.4s, v1.4s, v10.4s\n"
+ "fmla v25.4s, v0.4s, v10.4s\n"
"tbz %x[n_channels], #1, 26f\n"
- "ld1 { v11.d }[0], [x20], #0x8\n"
+ "ld1 { v11.d }[0], [x24], #0x8\n"
"tbz %x[n_channels], #0, 27f\n"
- "ld1 { v11.s }[2], [x20], #0x4\n"
+ "ld1 { v11.s }[2], [x24], #0x4\n"
"b 27f\n"
"26:" // Oddments: Load input (4, 5): Bit 1: Unset
- "ld1 { v11.s }[0], [x20], #0x4\n"
+ "ld1 { v11.s }[0], [x24], #0x4\n"
"27:" // Oddments: Load input (4, 5): Bit 1: End
- "ldr x20, [x14, #0x78]\n"
- "fmla v27.4s, v8.4s, v11.4s\n"
- "fmla v31.4s, v5.4s, v11.4s\n"
- "add x20, x20, x13\n"
+ "fmla v20.4s, v8.4s, v11.4s\n"
+ "ldr x23, [x16, #0x78]\n"
+ "fmla v16.4s, v5.4s, v11.4s\n"
+ "add x23, x23, x14\n"
"tbz %x[n_channels], #1, 28f\n"
- "ld1 { v12.d }[0], [x20], #0x8\n"
+ "ld1 { v12.d }[0], [x23], #0x8\n"
"tbz %x[n_channels], #0, 29f\n"
- "ld1 { v12.s }[2], [x20], #0x4\n"
+ "ld1 { v12.s }[2], [x23], #0x4\n"
"b 29f\n"
"28:" // Oddments: Load input (1, 3): Bit 1: Unset
- "ld1 { v12.s }[0], [x20], #0x4\n"
+ "ld1 { v12.s }[0], [x23], #0x4\n"
"29:" // Oddments: Load input (1, 3): Bit 1: End
- "ldr x20, [x14, #0x80]\n"
- "fmla v17.4s, v5.4s, v12.4s\n"
- "fmla v18.4s, v4.4s, v12.4s\n"
- "add x20, x20, x13\n"
- "fmla v19.4s, v3.4s, v12.4s\n"
- "fmla v21.4s, v2.4s, v12.4s\n"
- "fmla v22.4s, v1.4s, v12.4s\n"
- "fmla v23.4s, v0.4s, v12.4s\n"
+ "fmla v30.4s, v5.4s, v12.4s\n"
+ "ldr x10, [x16, #0x80]\n"
+ "fmla v29.4s, v4.4s, v12.4s\n"
+ "add x10, x10, x14\n"
+ "fmla v28.4s, v3.4s, v12.4s\n"
+ "fmla v26.4s, v2.4s, v12.4s\n"
+ "fmla v25.4s, v1.4s, v12.4s\n"
+ "fmla v24.4s, v0.4s, v12.4s\n"
"tbz %x[n_channels], #1, 30f\n"
- "ld1 { v11.d }[0], [x20], #0x8\n"
+ "ld1 { v11.d }[0], [x10], #0x8\n"
"tbz %x[n_channels], #0, 31f\n"
- "ld1 { v11.s }[2], [x20], #0x4\n"
+ "ld1 { v11.s }[2], [x10], #0x4\n"
"b 31f\n"
"30:" // Oddments: Load input (5, 1): Bit 1: Unset
- "ld1 { v11.s }[0], [x20], #0x4\n"
+ "ld1 { v11.s }[0], [x10], #0x4\n"
"31:" // Oddments: Load input (5, 1): Bit 1: End
- "ldr x20, [x14, #0x88]\n"
- "fmla v28.4s, v7.4s, v11.4s\n"
- "fmla v29.4s, v6.4s, v11.4s\n"
- "add x20, x20, x13\n"
+ "fmla v19.4s, v7.4s, v11.4s\n"
+ "ldr x9, [x16, #0x88]\n"
+ "fmla v18.4s, v6.4s, v11.4s\n"
+ "add x9, x9, x14\n"
"tbz %x[n_channels], #1, 32f\n"
- "ld1 { v10.d }[0], [x20], #0x8\n"
+ "ld1 { v10.d }[0], [x9], #0x8\n"
"tbz %x[n_channels], #0, 33f\n"
- "ld1 { v10.s }[2], [x20], #0x4\n"
+ "ld1 { v10.s }[2], [x9], #0x4\n"
"b 33f\n"
"32:" // Oddments: Load input (2, 1): Bit 1: Unset
- "ld1 { v10.s }[0], [x20], #0x4\n"
+ "ld1 { v10.s }[0], [x9], #0x4\n"
"33:" // Oddments: Load input (2, 1): Bit 1: End
- "ldr x20, [x14, #0x90]\n"
- "fmla v16.4s, v7.4s, v10.4s\n"
- "fmla v17.4s, v6.4s, v10.4s\n"
- "add x20, x20, x13\n"
- "fmla v20.4s, v4.4s, v10.4s\n"
- "fmla v21.4s, v3.4s, v10.4s\n"
- "fmla v24.4s, v1.4s, v10.4s\n"
- "fmla v25.4s, v0.4s, v10.4s\n"
+ "fmla v31.4s, v7.4s, v10.4s\n"
+ "ldr x28, [x16, #0x90]\n"
+ "fmla v30.4s, v6.4s, v10.4s\n"
+ "add x28, x28, x14\n"
+ "fmla v27.4s, v4.4s, v10.4s\n"
+ "fmla v26.4s, v3.4s, v10.4s\n"
+ "fmla v23.4s, v1.4s, v10.4s\n"
+ "fmla v22.4s, v0.4s, v10.4s\n"
"tbz %x[n_channels], #1, 34f\n"
- "ld1 { v11.d }[0], [x20], #0x8\n"
+ "ld1 { v11.d }[0], [x28], #0x8\n"
"tbz %x[n_channels], #0, 35f\n"
- "ld1 { v11.s }[2], [x20], #0x4\n"
+ "ld1 { v11.s }[2], [x28], #0x4\n"
"b 35f\n"
"34:" // Oddments: Load input (5, 4): Bit 1: Unset
- "ld1 { v11.s }[0], [x20], #0x4\n"
+ "ld1 { v11.s }[0], [x28], #0x4\n"
"35:" // Oddments: Load input (5, 4): Bit 1: End
- "ldr x20, [x14, #0x98]\n"
- "fmla v30.4s, v8.4s, v11.4s\n"
- "fmla v31.4s, v7.4s, v11.4s\n"
- "add x20, x20, x13\n"
+ "fmla v17.4s, v8.4s, v11.4s\n"
+ "ldr x27, [x16, #0x98]\n"
+ "fmla v16.4s, v7.4s, v11.4s\n"
+ "add x27, x27, x14\n"
"tbz %x[n_channels], #1, 36f\n"
- "ld1 { v12.d }[0], [x20], #0x8\n"
+ "ld1 { v12.d }[0], [x27], #0x8\n"
"tbz %x[n_channels], #0, 37f\n"
- "ld1 { v12.s }[2], [x20], #0x4\n"
+ "ld1 { v12.s }[2], [x27], #0x4\n"
"b 37f\n"
"36:" // Oddments: Load input (2, 4): Bit 1: Unset
- "ld1 { v12.s }[0], [x20], #0x4\n"
+ "ld1 { v12.s }[0], [x27], #0x4\n"
"37:" // Oddments: Load input (2, 4): Bit 1: End
- "ldr x20, [x14, #0xa0]\n"
- "fmla v18.4s, v8.4s, v12.4s\n"
- "fmla v19.4s, v7.4s, v12.4s\n"
- "add x20, x20, x13\n"
- "fmla v22.4s, v5.4s, v12.4s\n"
- "fmla v23.4s, v4.4s, v12.4s\n"
- "fmla v26.4s, v2.4s, v12.4s\n"
- "fmla v27.4s, v1.4s, v12.4s\n"
+ "fmla v29.4s, v8.4s, v12.4s\n"
+ "ldr x26, [x16, #0xa0]\n"
+ "fmla v28.4s, v7.4s, v12.4s\n"
+ "add x26, x26, x14\n"
+ "fmla v25.4s, v5.4s, v12.4s\n"
+ "fmla v24.4s, v4.4s, v12.4s\n"
+ "fmla v21.4s, v2.4s, v12.4s\n"
+ "fmla v20.4s, v1.4s, v12.4s\n"
"tbz %x[n_channels], #1, 38f\n"
- "ld1 { v10.d }[0], [x20], #0x8\n"
+ "ld1 { v10.d }[0], [x26], #0x8\n"
"tbz %x[n_channels], #0, 39f\n"
- "ld1 { v10.s }[2], [x20], #0x4\n"
+ "ld1 { v10.s }[2], [x26], #0x4\n"
"b 39f\n"
"38:" // Oddments: Load input (0, 2): Bit 1: Unset
- "ld1 { v10.s }[0], [x20], #0x4\n"
+ "ld1 { v10.s }[0], [x26], #0x4\n"
"39:" // Oddments: Load input (0, 2): Bit 1: End
- "ldr x20, [x14, #0xa8]\n"
- "fmla v16.4s, v2.4s, v10.4s\n"
- "fmla v17.4s, v1.4s, v10.4s\n"
- "add x20, x20, x13\n"
- "fmla v18.4s, v0.4s, v10.4s\n"
+ "fmla v31.4s, v2.4s, v10.4s\n"
+ "ldr x25, [x16, #0xa8]\n"
+ "fmla v30.4s, v1.4s, v10.4s\n"
+ "add x25, x25, x14\n"
+ "fmla v29.4s, v0.4s, v10.4s\n"
"tbz %x[n_channels], #1, 40f\n"
- "ld1 { v11.d }[0], [x20], #0x8\n"
+ "ld1 { v11.d }[0], [x25], #0x8\n"
"tbz %x[n_channels], #0, 41f\n"
- "ld1 { v11.s }[2], [x20], #0x4\n"
+ "ld1 { v11.s }[2], [x25], #0x4\n"
"b 41f\n"
"40:" // Oddments: Load input (3, 1): Bit 1: Unset
- "ld1 { v11.s }[0], [x20], #0x4\n"
+ "ld1 { v11.s }[0], [x25], #0x4\n"
"41:" // Oddments: Load input (3, 1): Bit 1: End
- "ldr x20, [x14, #0xb0]\n"
- "fmla v20.4s, v7.4s, v11.4s\n"
- "fmla v21.4s, v6.4s, v11.4s\n"
- "add x20, x20, x13\n"
- "fmla v24.4s, v4.4s, v11.4s\n"
- "fmla v25.4s, v3.4s, v11.4s\n"
- "fmla v28.4s, v1.4s, v11.4s\n"
- "fmla v29.4s, v0.4s, v11.4s\n"
+ "fmla v27.4s, v7.4s, v11.4s\n"
+ "ldr x24, [x16, #0xb0]\n"
+ "fmla v26.4s, v6.4s, v11.4s\n"
+ "add x24, x24, x14\n"
+ "fmla v23.4s, v4.4s, v11.4s\n"
+ "fmla v22.4s, v3.4s, v11.4s\n"
+ "fmla v19.4s, v1.4s, v11.4s\n"
+ "fmla v18.4s, v0.4s, v11.4s\n"
"tbz %x[n_channels], #1, 42f\n"
- "ld1 { v12.d }[0], [x20], #0x8\n"
+ "ld1 { v12.d }[0], [x24], #0x8\n"
"tbz %x[n_channels], #0, 43f\n"
- "ld1 { v12.s }[2], [x20], #0x4\n"
+ "ld1 { v12.s }[2], [x24], #0x4\n"
"b 43f\n"
"42:" // Oddments: Load input (0, 3): Bit 1: Unset
- "ld1 { v12.s }[0], [x20], #0x4\n"
+ "ld1 { v12.s }[0], [x24], #0x4\n"
"43:" // Oddments: Load input (0, 3): Bit 1: End
- "ldr x20, [x14, #0xb8]\n"
- "fmla v17.4s, v2.4s, v12.4s\n"
- "fmla v18.4s, v1.4s, v12.4s\n"
- "add x20, x20, x13\n"
- "fmla v19.4s, v0.4s, v12.4s\n"
+ "fmla v30.4s, v2.4s, v12.4s\n"
+ "ldr x23, [x16, #0xb8]\n"
+ "fmla v29.4s, v1.4s, v12.4s\n"
+ "add x23, x23, x14\n"
+ "fmla v28.4s, v0.4s, v12.4s\n"
"tbz %x[n_channels], #1, 44f\n"
- "ld1 { v10.d }[0], [x20], #0x8\n"
+ "ld1 { v10.d }[0], [x23], #0x8\n"
"tbz %x[n_channels], #0, 45f\n"
- "ld1 { v10.s }[2], [x20], #0x4\n"
+ "ld1 { v10.s }[2], [x23], #0x4\n"
"b 45f\n"
"44:" // Oddments: Load input (2, 0): Bit 1: Unset
- "ld1 { v10.s }[0], [x20], #0x4\n"
+ "ld1 { v10.s }[0], [x23], #0x4\n"
"45:" // Oddments: Load input (2, 0): Bit 1: End
- "ldr x20, [x14, #0xc0]\n"
- "fmla v16.4s, v6.4s, v10.4s\n"
- "fmla v20.4s, v3.4s, v10.4s\n"
- "add x20, x20, x13\n"
- "fmla v24.4s, v0.4s, v10.4s\n"
+ "fmla v31.4s, v6.4s, v10.4s\n"
+ "ldr x10, [x16, #0xc0]\n"
+ "fmla v27.4s, v3.4s, v10.4s\n"
+ "add x10, x10, x14\n"
+ "fmla v23.4s, v0.4s, v10.4s\n"
"tbz %x[n_channels], #1, 46f\n"
- "ld1 { v11.d }[0], [x20], #0x8\n"
+ "ld1 { v11.d }[0], [x10], #0x8\n"
"tbz %x[n_channels], #0, 47f\n"
- "ld1 { v11.s }[2], [x20], #0x4\n"
+ "ld1 { v11.s }[2], [x10], #0x4\n"
"b 47f\n"
"46:" // Oddments: Load input (3, 4): Bit 1: Unset
- "ld1 { v11.s }[0], [x20], #0x4\n"
+ "ld1 { v11.s }[0], [x10], #0x4\n"
"47:" // Oddments: Load input (3, 4): Bit 1: End
- "ldr x20, [x14, #0xc8]\n"
- "fmla v22.4s, v8.4s, v11.4s\n"
- "fmla v23.4s, v7.4s, v11.4s\n"
- "add x20, x20, x13\n"
- "fmla v26.4s, v5.4s, v11.4s\n"
- "fmla v27.4s, v4.4s, v11.4s\n"
- "fmla v30.4s, v2.4s, v11.4s\n"
- "fmla v31.4s, v1.4s, v11.4s\n"
+ "fmla v25.4s, v8.4s, v11.4s\n"
+ "ldr x9, [x16, #0xc8]\n"
+ "fmla v24.4s, v7.4s, v11.4s\n"
+ "add x9, x9, x14\n"
+ "fmla v21.4s, v5.4s, v11.4s\n"
+ "fmla v20.4s, v4.4s, v11.4s\n"
+ "fmla v17.4s, v2.4s, v11.4s\n"
+ "fmla v16.4s, v1.4s, v11.4s\n"
"tbz %x[n_channels], #1, 48f\n"
- "ld1 { v12.d }[0], [x20], #0x8\n"
+ "ld1 { v12.d }[0], [x9], #0x8\n"
"tbz %x[n_channels], #0, 49f\n"
- "ld1 { v12.s }[2], [x20], #0x4\n"
+ "ld1 { v12.s }[2], [x9], #0x4\n"
"b 49f\n"
"48:" // Oddments: Load input (2, 5): Bit 1: Unset
- "ld1 { v12.s }[0], [x20], #0x4\n"
+ "ld1 { v12.s }[0], [x9], #0x4\n"
"49:" // Oddments: Load input (2, 5): Bit 1: End
- "ldr x20, [x14, #0xd0]\n"
- "fmla v19.4s, v8.4s, v12.4s\n"
- "fmla v23.4s, v5.4s, v12.4s\n"
- "add x20, x20, x13\n"
- "fmla v27.4s, v2.4s, v12.4s\n"
+ "fmla v28.4s, v8.4s, v12.4s\n"
+ "ldr x28, [x16, #0xd0]\n"
+ "fmla v24.4s, v5.4s, v12.4s\n"
+ "add x28, x28, x14\n"
+ "fmla v20.4s, v2.4s, v12.4s\n"
"tbz %x[n_channels], #1, 50f\n"
- "ld1 { v10.d }[0], [x20], #0x8\n"
+ "ld1 { v10.d }[0], [x28], #0x8\n"
"tbz %x[n_channels], #0, 51f\n"
- "ld1 { v10.s }[2], [x20], #0x4\n"
+ "ld1 { v10.s }[2], [x28], #0x4\n"
"b 51f\n"
"50:" // Oddments: Load input (3, 0): Bit 1: Unset
- "ld1 { v10.s }[0], [x20], #0x4\n"
+ "ld1 { v10.s }[0], [x28], #0x4\n"
"51:" // Oddments: Load input (3, 0): Bit 1: End
- "ldr x20, [x14, #0xd8]\n"
- "fmla v20.4s, v6.4s, v10.4s\n"
- "fmla v24.4s, v3.4s, v10.4s\n"
- "add x20, x20, x13\n"
- "fmla v28.4s, v0.4s, v10.4s\n"
+ "fmla v27.4s, v6.4s, v10.4s\n"
+ "ldr x27, [x16, #0xd8]\n"
+ "fmla v23.4s, v3.4s, v10.4s\n"
+ "add x27, x27, x14\n"
+ "fmla v19.4s, v0.4s, v10.4s\n"
"tbz %x[n_channels], #1, 52f\n"
- "ld1 { v11.d }[0], [x20], #0x8\n"
+ "ld1 { v11.d }[0], [x27], #0x8\n"
"tbz %x[n_channels], #0, 53f\n"
- "ld1 { v11.s }[2], [x20], #0x4\n"
+ "ld1 { v11.s }[2], [x27], #0x4\n"
"b 53f\n"
"52:" // Oddments: Load input (4, 2): Bit 1: Unset
- "ld1 { v11.s }[0], [x20], #0x4\n"
+ "ld1 { v11.s }[0], [x27], #0x4\n"
"53:" // Oddments: Load input (4, 2): Bit 1: End
- "ldr x20, [x14, #0xe0]\n"
- "fmla v24.4s, v8.4s, v11.4s\n"
- "fmla v25.4s, v7.4s, v11.4s\n"
- "add x20, x20, x13\n"
- "fmla v26.4s, v6.4s, v11.4s\n"
- "fmla v28.4s, v5.4s, v11.4s\n"
- "fmla v29.4s, v4.4s, v11.4s\n"
- "fmla v30.4s, v3.4s, v11.4s\n"
+ "fmla v23.4s, v8.4s, v11.4s\n"
+ "ldr x26, [x16, #0xe0]\n"
+ "fmla v22.4s, v7.4s, v11.4s\n"
+ "add x26, x26, x14\n"
+ "fmla v21.4s, v6.4s, v11.4s\n"
+ "fmla v19.4s, v5.4s, v11.4s\n"
+ "fmla v18.4s, v4.4s, v11.4s\n"
+ "fmla v17.4s, v3.4s, v11.4s\n"
"tbz %x[n_channels], #1, 54f\n"
- "ld1 { v12.d }[0], [x20], #0x8\n"
+ "ld1 { v12.d }[0], [x26], #0x8\n"
"tbz %x[n_channels], #0, 55f\n"
- "ld1 { v12.s }[2], [x20], #0x4\n"
+ "ld1 { v12.s }[2], [x26], #0x4\n"
"b 55f\n"
"54:" // Oddments: Load input (3, 5): Bit 1: Unset
- "ld1 { v12.s }[0], [x20], #0x4\n"
+ "ld1 { v12.s }[0], [x26], #0x4\n"
"55:" // Oddments: Load input (3, 5): Bit 1: End
- "ldr x20, [x14, #0xe8]\n"
- "fmla v23.4s, v8.4s, v12.4s\n"
- "fmla v27.4s, v5.4s, v12.4s\n"
- "add x20, x20, x13\n"
- "fmla v31.4s, v2.4s, v12.4s\n"
+ "fmla v24.4s, v8.4s, v12.4s\n"
+ "ldr x25, [x16, #0xe8]\n"
+ "fmla v20.4s, v5.4s, v12.4s\n"
+ "add x25, x25, x14\n"
+ "fmla v16.4s, v2.4s, v12.4s\n"
"tbz %x[n_channels], #1, 56f\n"
- "ld1 { v10.d }[0], [x20], #0x8\n"
+ "ld1 { v10.d }[0], [x25], #0x8\n"
"tbz %x[n_channels], #0, 57f\n"
- "ld1 { v10.s }[2], [x20], #0x4\n"
+ "ld1 { v10.s }[2], [x25], #0x4\n"
"b 57f\n"
"56:" // Oddments: Load input (5, 2): Bit 1: Unset
- "ld1 { v10.s }[0], [x20], #0x4\n"
+ "ld1 { v10.s }[0], [x25], #0x4\n"
"57:" // Oddments: Load input (5, 2): Bit 1: End
- "ldr x20, [x14, #0xf0]\n"
- "fmla v28.4s, v8.4s, v10.4s\n"
- "fmla v29.4s, v7.4s, v10.4s\n"
- "add x20, x20, x13\n"
- "fmla v30.4s, v6.4s, v10.4s\n"
+ "fmla v19.4s, v8.4s, v10.4s\n"
+ "ldr x24, [x16, #0xf0]\n"
+ "fmla v18.4s, v7.4s, v10.4s\n"
+ "add x24, x24, x14\n"
+ "fmla v17.4s, v6.4s, v10.4s\n"
"tbz %x[n_channels], #1, 58f\n"
- "ld1 { v11.d }[0], [x20], #0x8\n"
+ "ld1 { v11.d }[0], [x24], #0x8\n"
"tbz %x[n_channels], #0, 59f\n"
- "ld1 { v11.s }[2], [x20], #0x4\n"
+ "ld1 { v11.s }[2], [x24], #0x4\n"
"b 59f\n"
"58:" // Oddments: Load input (4, 3): Bit 1: Unset
- "ld1 { v11.s }[0], [x20], #0x4\n"
+ "ld1 { v11.s }[0], [x24], #0x4\n"
"59:" // Oddments: Load input (4, 3): Bit 1: End
- "ldr x20, [x14, #0xf8]\n"
- "fmla v25.4s, v8.4s, v11.4s\n"
- "fmla v26.4s, v7.4s, v11.4s\n"
- "add x20, x20, x13\n"
- "fmla v27.4s, v6.4s, v11.4s\n"
- "fmla v29.4s, v5.4s, v11.4s\n"
- "fmla v30.4s, v4.4s, v11.4s\n"
- "fmla v31.4s, v3.4s, v11.4s\n"
+ "fmla v22.4s, v8.4s, v11.4s\n"
+ "ldr x23, [x16, #0xf8]\n"
+ "fmla v21.4s, v7.4s, v11.4s\n"
+ "add x23, x23, x14\n"
+ "fmla v20.4s, v6.4s, v11.4s\n"
+ "fmla v18.4s, v5.4s, v11.4s\n"
+ "fmla v17.4s, v4.4s, v11.4s\n"
+ "fmla v16.4s, v3.4s, v11.4s\n"
"tbz %x[n_channels], #1, 60f\n"
- "ld1 { v12.d }[0], [x20], #0x8\n"
+ "ld1 { v12.d }[0], [x23], #0x8\n"
"tbz %x[n_channels], #0, 61f\n"
- "ld1 { v12.s }[2], [x20], #0x4\n"
+ "ld1 { v12.s }[2], [x23], #0x4\n"
"b 61f\n"
"60:" // Oddments: Load input (5, 3): Bit 1: Unset
- "ld1 { v12.s }[0], [x20], #0x4\n"
+ "ld1 { v12.s }[0], [x23], #0x4\n"
"61:" // Oddments: Load input (5, 3): Bit 1: End
- "ldr x20, [x14, #0x100]\n"
- "fmla v29.4s, v8.4s, v12.4s\n"
- "fmla v30.4s, v7.4s, v12.4s\n"
- "add x20, x20, x13\n"
- "fmla v31.4s, v6.4s, v12.4s\n"
+ "fmla v18.4s, v8.4s, v12.4s\n"
+ "ldr x10, [x16, #0x100]\n"
+ "fmla v17.4s, v7.4s, v12.4s\n"
+ "add x10, x10, x14\n"
+ "fmla v16.4s, v6.4s, v12.4s\n"
"tbz %x[n_channels], #1, 62f\n"
- "ld1 { v10.d }[0], [x20], #0x8\n"
+ "ld1 { v10.d }[0], [x10], #0x8\n"
"tbz %x[n_channels], #0, 63f\n"
- "ld1 { v10.s }[2], [x20], #0x4\n"
+ "ld1 { v10.s }[2], [x10], #0x4\n"
"b 63f\n"
"62:" // Oddments: Load input (1, 1): Bit 1: Unset
- "ld1 { v10.s }[0], [x20], #0x4\n"
+ "ld1 { v10.s }[0], [x10], #0x4\n"
"63:" // Oddments: Load input (1, 1): Bit 1: End
- "ldr x20, [x14, #0x108]\n"
- "fmla v16.4s, v4.4s, v10.4s\n"
- "fmla v17.4s, v3.4s, v10.4s\n"
- "add x20, x20, x13\n"
- "fmla v20.4s, v1.4s, v10.4s\n"
- "fmla v21.4s, v0.4s, v10.4s\n"
+ "fmla v31.4s, v4.4s, v10.4s\n"
+ "ldr x9, [x16, #0x108]\n"
+ "fmla v30.4s, v3.4s, v10.4s\n"
+ "add x9, x9, x14\n"
+ "fmla v27.4s, v1.4s, v10.4s\n"
+ "fmla v26.4s, v0.4s, v10.4s\n"
"tbz %x[n_channels], #1, 64f\n"
- "ld1 { v11.d }[0], [x20], #0x8\n"
+ "ld1 { v11.d }[0], [x9], #0x8\n"
"tbz %x[n_channels], #0, 65f\n"
- "ld1 { v11.s }[2], [x20], #0x4\n"
+ "ld1 { v11.s }[2], [x9], #0x4\n"
"b 65f\n"
"64:" // Oddments: Load input (1, 4): Bit 1: Unset
- "ld1 { v11.s }[0], [x20], #0x4\n"
+ "ld1 { v11.s }[0], [x9], #0x4\n"
"65:" // Oddments: Load input (1, 4): Bit 1: End
- "ldr x20, [x14, #0x110]\n"
- "fmla v18.4s, v5.4s, v11.4s\n"
- "fmla v19.4s, v4.4s, v11.4s\n"
- "add x20, x20, x13\n"
- "fmla v22.4s, v2.4s, v11.4s\n"
- "fmla v23.4s, v1.4s, v11.4s\n"
+ "fmla v29.4s, v5.4s, v11.4s\n"
+ "ldr x28, [x16, #0x110]\n"
+ "fmla v28.4s, v4.4s, v11.4s\n"
+ "add x28, x28, x14\n"
+ "fmla v25.4s, v2.4s, v11.4s\n"
+ "fmla v24.4s, v1.4s, v11.4s\n"
"tbz %x[n_channels], #1, 66f\n"
- "ld1 { v12.d }[0], [x20], #0x8\n"
+ "ld1 { v12.d }[0], [x28], #0x8\n"
"tbz %x[n_channels], #0, 67f\n"
- "ld1 { v12.s }[2], [x20], #0x4\n"
+ "ld1 { v12.s }[2], [x28], #0x4\n"
"b 67f\n"
"66:" // Oddments: Load input (4, 1): Bit 1: Unset
- "ld1 { v12.s }[0], [x20], #0x4\n"
+ "ld1 { v12.s }[0], [x28], #0x4\n"
"67:" // Oddments: Load input (4, 1): Bit 1: End
- "ldr x20, [x14, #0x118]\n"
- "fmla v24.4s, v7.4s, v12.4s\n"
- "fmla v25.4s, v6.4s, v12.4s\n"
- "add x20, x20, x13\n"
- "fmla v28.4s, v4.4s, v12.4s\n"
- "fmla v29.4s, v3.4s, v12.4s\n"
+ "fmla v23.4s, v7.4s, v12.4s\n"
+ "ldr x27, [x16, #0x118]\n"
+ "fmla v22.4s, v6.4s, v12.4s\n"
+ "add x27, x27, x14\n"
+ "fmla v19.4s, v4.4s, v12.4s\n"
+ "fmla v18.4s, v3.4s, v12.4s\n"
"tbz %x[n_channels], #1, 68f\n"
- "ld1 { v10.d }[0], [x20], #0x8\n"
+ "ld1 { v10.d }[0], [x27], #0x8\n"
"tbz %x[n_channels], #0, 69f\n"
- "ld1 { v10.s }[2], [x20], #0x4\n"
+ "ld1 { v10.s }[2], [x27], #0x4\n"
"b 69f\n"
"68:" // Oddments: Load input (4, 4): Bit 1: Unset
- "ld1 { v10.s }[0], [x20], #0x4\n"
+ "ld1 { v10.s }[0], [x27], #0x4\n"
"69:" // Oddments: Load input (4, 4): Bit 1: End
- "fmla v26.4s, v8.4s, v10.4s\n"
- "fmla v27.4s, v7.4s, v10.4s\n"
- "fmax v16.4s, v16.4s, v15.4s\n"
- "fmla v30.4s, v5.4s, v10.4s\n"
- "fmla v31.4s, v4.4s, v10.4s\n"
- "fmax v17.4s, v17.4s, v15.4s\n"
- "fmax v18.4s, v18.4s, v15.4s\n"
- "fmax v19.4s, v19.4s, v15.4s\n"
- "fmax v20.4s, v20.4s, v15.4s\n"
- "fmax v21.4s, v21.4s, v15.4s\n"
- "fmax v22.4s, v22.4s, v15.4s\n"
- "fmax v23.4s, v23.4s, v15.4s\n"
- "fmax v24.4s, v24.4s, v15.4s\n"
- "fmax v25.4s, v25.4s, v15.4s\n"
- "fmax v26.4s, v26.4s, v15.4s\n"
- "fmax v27.4s, v27.4s, v15.4s\n"
- "fmax v28.4s, v28.4s, v15.4s\n"
- "fmax v29.4s, v29.4s, v15.4s\n"
- "fmax v30.4s, v30.4s, v15.4s\n"
+ "fmla v21.4s, v8.4s, v10.4s\n"
+ "fmla v20.4s, v7.4s, v10.4s\n"
+ "fmla v17.4s, v5.4s, v10.4s\n"
+ "fmla v16.4s, v4.4s, v10.4s\n"
"fmax v31.4s, v31.4s, v15.4s\n"
- "fmin v16.4s, v16.4s, v14.4s\n"
- "fmin v17.4s, v17.4s, v14.4s\n"
- "fmin v18.4s, v18.4s, v14.4s\n"
- "fmin v19.4s, v19.4s, v14.4s\n"
- "fmin v20.4s, v20.4s, v14.4s\n"
- "fmin v21.4s, v21.4s, v14.4s\n"
- "fmin v22.4s, v22.4s, v14.4s\n"
- "fmin v23.4s, v23.4s, v14.4s\n"
- "fmin v24.4s, v24.4s, v14.4s\n"
- "fmin v25.4s, v25.4s, v14.4s\n"
- "fmin v26.4s, v26.4s, v14.4s\n"
- "fmin v27.4s, v27.4s, v14.4s\n"
- "fmin v28.4s, v28.4s, v14.4s\n"
- "fmin v29.4s, v29.4s, v14.4s\n"
- "fmin v30.4s, v30.4s, v14.4s\n"
+ "fmax v30.4s, v30.4s, v15.4s\n"
+ "fmax v29.4s, v29.4s, v15.4s\n"
"fmin v31.4s, v31.4s, v14.4s\n"
+ "fmin v30.4s, v30.4s, v14.4s\n"
+ "fmin v29.4s, v29.4s, v14.4s\n"
+ "fmax v28.4s, v28.4s, v15.4s\n"
+ "fmax v27.4s, v27.4s, v15.4s\n"
+ "fmax v26.4s, v26.4s, v15.4s\n"
+ "fmin v28.4s, v28.4s, v14.4s\n"
+ "fmin v27.4s, v27.4s, v14.4s\n"
+ "fmin v26.4s, v26.4s, v14.4s\n"
+ "fmax v25.4s, v25.4s, v15.4s\n"
+ "fmax v24.4s, v24.4s, v15.4s\n"
+ "fmax v23.4s, v23.4s, v15.4s\n"
+ "fmin v25.4s, v25.4s, v14.4s\n"
+ "fmin v24.4s, v24.4s, v14.4s\n"
+ "fmin v23.4s, v23.4s, v14.4s\n"
+ "fmax v22.4s, v22.4s, v15.4s\n"
+ "fmax v21.4s, v21.4s, v15.4s\n"
+ "fmax v20.4s, v20.4s, v15.4s\n"
+ "fmin v22.4s, v22.4s, v14.4s\n"
+ "fmin v21.4s, v21.4s, v14.4s\n"
+ "fmin v20.4s, v20.4s, v14.4s\n"
+ "fmax v19.4s, v19.4s, v15.4s\n"
+ "fmax v18.4s, v18.4s, v15.4s\n"
+ "fmax v17.4s, v17.4s, v15.4s\n"
+ "fmin v19.4s, v19.4s, v14.4s\n"
+ "fmin v18.4s, v18.4s, v14.4s\n"
+ "fmin v17.4s, v17.4s, v14.4s\n"
+ "fmax v16.4s, v16.4s, v15.4s\n"
+ "fmin v16.4s, v16.4s, v14.4s\n"
"tbz %x[n_channels], #1, 70f\n"
- "ldr x23, [x16, #0x0]\n"
- "ldr x22, [x16, #0x8]\n"
- "add x23, x23, x12\n"
+ "ldr x22, [x17, #0x0]\n"
+ "ldr x21, [x17, #0x8]\n"
"add x22, x22, x12\n"
- "ldr x21, [x16, #0x10]\n"
- "ldr x20, [x16, #0x18]\n"
+ "ldr x20, [x17, #0x10]\n"
+ "ldr x19, [x17, #0x18]\n"
"add x21, x21, x12\n"
+ "st1 { v31.d }[0], [x22]\n"
"add x20, x20, x12\n"
- "st1 { v16.d }[0], [x23]\n"
- "ldr x23, [x16, #0x20]\n"
- "add x23, x23, x12\n"
- "st1 { v17.d }[0], [x22]\n"
- "ldr x22, [x16, #0x28]\n"
+ "st1 { v30.d }[0], [x21]\n"
+ "ldr x22, [x17, #0x20]\n"
+ "add x19, x19, x12\n"
+ "st1 { v29.d }[0], [x20]\n"
"add x22, x22, x12\n"
- "st1 { v18.d }[0], [x21]\n"
- "ldr x21, [x16, #0x30]\n"
+ "st1 { v28.d }[0], [x19]\n"
+ "ldr x21, [x17, #0x28]\n"
"add x21, x21, x12\n"
- "st1 { v19.d }[0], [x20]\n"
- "ldr x20, [x16, #0x38]\n"
+ "st1 { v27.d }[0], [x22]\n"
+ "ldr x20, [x17, #0x30]\n"
"add x20, x20, x12\n"
- "st1 { v20.d }[0], [x23]\n"
- "ldr x23, [x16, #0x40]\n"
- "add x23, x23, x12\n"
- "st1 { v21.d }[0], [x22]\n"
- "ldr x22, [x16, #0x48]\n"
+ "st1 { v26.d }[0], [x21]\n"
+ "ldr x19, [x17, #0x38]\n"
+ "add x19, x19, x12\n"
+ "st1 { v25.d }[0], [x20]\n"
+ "ldr x22, [x17, #0x40]\n"
"add x22, x22, x12\n"
- "st1 { v22.d }[0], [x21]\n"
- "ldr x21, [x16, #0x50]\n"
+ "st1 { v24.d }[0], [x19]\n"
+ "ldr x21, [x17, #0x48]\n"
"add x21, x21, x12\n"
- "st1 { v23.d }[0], [x20]\n"
- "ldr x20, [x16, #0x58]\n"
+ "st1 { v23.d }[0], [x22]\n"
+ "ldr x20, [x17, #0x50]\n"
"add x20, x20, x12\n"
- "st1 { v24.d }[0], [x23]\n"
- "ldr x23, [x16, #0x60]\n"
- "add x23, x23, x12\n"
- "st1 { v25.d }[0], [x22]\n"
- "ldr x22, [x16, #0x68]\n"
+ "st1 { v22.d }[0], [x21]\n"
+ "ldr x19, [x17, #0x58]\n"
+ "add x19, x19, x12\n"
+ "st1 { v21.d }[0], [x20]\n"
+ "ldr x22, [x17, #0x60]\n"
"add x22, x22, x12\n"
- "st1 { v26.d }[0], [x21]\n"
- "ldr x21, [x16, #0x70]\n"
+ "st1 { v20.d }[0], [x19]\n"
+ "ldr x21, [x17, #0x68]\n"
"add x21, x21, x12\n"
- "st1 { v27.d }[0], [x20]\n"
- "ldr x20, [x16, #0x78]\n"
+ "st1 { v19.d }[0], [x22]\n"
+ "ldr x20, [x17, #0x70]\n"
"add x20, x20, x12\n"
+ "st1 { v18.d }[0], [x21]\n"
+ "ldr x19, [x17, #0x78]\n"
+ "add x19, x19, x12\n"
+ "st1 { v17.d }[0], [x20]\n"
"add x12, x12, #0x8\n"
- "st1 { v28.d }[0], [x23]\n"
- "st1 { v29.d }[0], [x22]\n"
- "st1 { v30.d }[0], [x21]\n"
- "st1 { v31.d }[0], [x20]\n"
+ "st1 { v16.d }[0], [x19]\n"
"tbz %x[n_channels], #0, 71f\n"
- "ldr x23, [x16, #0x0]\n"
- "ldr x22, [x16, #0x8]\n"
- "add x23, x23, x12\n"
+ "ldr x22, [x17, #0x0]\n"
+ "ldr x21, [x17, #0x8]\n"
"add x22, x22, x12\n"
- "ldr x21, [x16, #0x10]\n"
- "ldr x20, [x16, #0x18]\n"
+ "ldr x20, [x17, #0x10]\n"
+ "ldr x19, [x17, #0x18]\n"
"add x21, x21, x12\n"
+ "st1 { v31.s }[2], [x22]\n"
"add x20, x20, x12\n"
- "st1 { v16.s }[2], [x23]\n"
- "ldr x23, [x16, #0x20]\n"
- "add x23, x23, x12\n"
- "st1 { v17.s }[2], [x22]\n"
- "ldr x22, [x16, #0x28]\n"
+ "st1 { v30.s }[2], [x21]\n"
+ "ldr x22, [x17, #0x20]\n"
+ "add x19, x19, x12\n"
+ "st1 { v29.s }[2], [x20]\n"
"add x22, x22, x12\n"
- "st1 { v18.s }[2], [x21]\n"
- "ldr x21, [x16, #0x30]\n"
+ "st1 { v28.s }[2], [x19]\n"
+ "ldr x21, [x17, #0x28]\n"
"add x21, x21, x12\n"
- "st1 { v19.s }[2], [x20]\n"
- "ldr x20, [x16, #0x38]\n"
+ "st1 { v27.s }[2], [x22]\n"
+ "ldr x20, [x17, #0x30]\n"
"add x20, x20, x12\n"
- "st1 { v20.s }[2], [x23]\n"
- "ldr x23, [x16, #0x40]\n"
- "add x23, x23, x12\n"
- "st1 { v21.s }[2], [x22]\n"
- "ldr x22, [x16, #0x48]\n"
+ "st1 { v26.s }[2], [x21]\n"
+ "ldr x19, [x17, #0x38]\n"
+ "add x19, x19, x12\n"
+ "st1 { v25.s }[2], [x20]\n"
+ "ldr x22, [x17, #0x40]\n"
"add x22, x22, x12\n"
- "st1 { v22.s }[2], [x21]\n"
- "ldr x21, [x16, #0x50]\n"
+ "st1 { v24.s }[2], [x19]\n"
+ "ldr x21, [x17, #0x48]\n"
"add x21, x21, x12\n"
- "st1 { v23.s }[2], [x20]\n"
- "ldr x20, [x16, #0x58]\n"
+ "st1 { v23.s }[2], [x22]\n"
+ "ldr x20, [x17, #0x50]\n"
"add x20, x20, x12\n"
- "st1 { v24.s }[2], [x23]\n"
- "ldr x23, [x16, #0x60]\n"
- "add x23, x23, x12\n"
- "st1 { v25.s }[2], [x22]\n"
- "ldr x22, [x16, #0x68]\n"
+ "st1 { v22.s }[2], [x21]\n"
+ "ldr x19, [x17, #0x58]\n"
+ "add x19, x19, x12\n"
+ "st1 { v21.s }[2], [x20]\n"
+ "ldr x22, [x17, #0x60]\n"
"add x22, x22, x12\n"
- "st1 { v26.s }[2], [x21]\n"
- "ldr x21, [x16, #0x70]\n"
+ "st1 { v20.s }[2], [x19]\n"
+ "ldr x21, [x17, #0x68]\n"
"add x21, x21, x12\n"
- "st1 { v27.s }[2], [x20]\n"
- "ldr x20, [x16, #0x78]\n"
+ "st1 { v19.s }[2], [x22]\n"
+ "ldr x20, [x17, #0x70]\n"
"add x20, x20, x12\n"
- "st1 { v28.s }[2], [x23]\n"
- "st1 { v29.s }[2], [x22]\n"
- "st1 { v30.s }[2], [x21]\n"
- "st1 { v31.s }[2], [x20]\n"
+ "st1 { v18.s }[2], [x21]\n"
+ "ldr x19, [x17, #0x78]\n"
+ "add x19, x19, x12\n"
+ "st1 { v17.s }[2], [x20]\n"
+ "st1 { v16.s }[2], [x19]\n"
"b 71f\n"
"70:" // Oddments: Store: Bit 1: Unset
- "ldr x23, [x16, #0x0]\n"
- "ldr x22, [x16, #0x8]\n"
- "add x23, x23, x12\n"
+ "ldr x22, [x17, #0x0]\n"
"add x22, x22, x12\n"
- "ldr x21, [x16, #0x10]\n"
- "ldr x20, [x16, #0x18]\n"
+ "ldr x21, [x17, #0x8]\n"
+ "ldr x20, [x17, #0x10]\n"
"add x21, x21, x12\n"
+ "st1 { v31.s }[0], [x22]\n"
+ "ldr x19, [x17, #0x18]\n"
"add x20, x20, x12\n"
- "st1 { v16.s }[0], [x23]\n"
- "ldr x23, [x16, #0x20]\n"
- "add x23, x23, x12\n"
- "st1 { v17.s }[0], [x22]\n"
- "ldr x22, [x16, #0x28]\n"
+ "st1 { v30.s }[0], [x21]\n"
+ "add x19, x19, x12\n"
+ "st1 { v29.s }[0], [x20]\n"
+ "ldr x22, [x17, #0x20]\n"
"add x22, x22, x12\n"
- "st1 { v18.s }[0], [x21]\n"
- "ldr x21, [x16, #0x30]\n"
+ "st1 { v28.s }[0], [x19]\n"
+ "ldr x21, [x17, #0x28]\n"
"add x21, x21, x12\n"
- "st1 { v19.s }[0], [x20]\n"
- "ldr x20, [x16, #0x38]\n"
+ "st1 { v27.s }[0], [x22]\n"
+ "ldr x20, [x17, #0x30]\n"
"add x20, x20, x12\n"
- "st1 { v20.s }[0], [x23]\n"
- "ldr x23, [x16, #0x40]\n"
- "add x23, x23, x12\n"
- "st1 { v21.s }[0], [x22]\n"
- "ldr x22, [x16, #0x48]\n"
+ "st1 { v26.s }[0], [x21]\n"
+ "ldr x19, [x17, #0x38]\n"
+ "add x19, x19, x12\n"
+ "st1 { v25.s }[0], [x20]\n"
+ "ldr x22, [x17, #0x40]\n"
"add x22, x22, x12\n"
- "st1 { v22.s }[0], [x21]\n"
- "ldr x21, [x16, #0x50]\n"
+ "st1 { v24.s }[0], [x19]\n"
+ "ldr x21, [x17, #0x48]\n"
"add x21, x21, x12\n"
- "st1 { v23.s }[0], [x20]\n"
- "ldr x20, [x16, #0x58]\n"
+ "st1 { v23.s }[0], [x22]\n"
+ "ldr x20, [x17, #0x50]\n"
"add x20, x20, x12\n"
- "st1 { v24.s }[0], [x23]\n"
- "ldr x23, [x16, #0x60]\n"
- "add x23, x23, x12\n"
- "st1 { v25.s }[0], [x22]\n"
- "ldr x22, [x16, #0x68]\n"
+ "st1 { v22.s }[0], [x21]\n"
+ "ldr x19, [x17, #0x58]\n"
+ "add x19, x19, x12\n"
+ "st1 { v21.s }[0], [x20]\n"
+ "ldr x22, [x17, #0x60]\n"
"add x22, x22, x12\n"
- "st1 { v26.s }[0], [x21]\n"
- "ldr x21, [x16, #0x70]\n"
+ "st1 { v20.s }[0], [x19]\n"
+ "ldr x21, [x17, #0x68]\n"
"add x21, x21, x12\n"
- "st1 { v27.s }[0], [x20]\n"
- "ldr x20, [x16, #0x78]\n"
+ "st1 { v19.s }[0], [x22]\n"
+ "ldr x20, [x17, #0x70]\n"
"add x20, x20, x12\n"
- "st1 { v28.s }[0], [x23]\n"
- "st1 { v29.s }[0], [x22]\n"
- "st1 { v30.s }[0], [x21]\n"
- "st1 { v31.s }[0], [x20]\n"
+ "st1 { v18.s }[0], [x21]\n"
+ "ldr x19, [x17, #0x78]\n"
+ "add x19, x19, x12\n"
+ "st1 { v17.s }[0], [x20]\n"
+ "st1 { v16.s }[0], [x19]\n"
"71:" // Oddments: Store: Bit 1: End
+
"72:" // End
+
:
: [n_channels] "r" ((unsigned long) n_channels), [offsetof_Args_inptrs] "I" (offsetof(Args, inptrs)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_args_params] "I" (offsetof(Args, params)), [params_struct] "r" (&params_struct)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_direct.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_direct.cpp
index e42ceffb50..65e487ddbb 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_direct.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_direct.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -87,526 +87,526 @@ void a64_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst_direct_impl(
);
__asm__ __volatile__(
- "mov x23, #0x0\n"
+ "mov x6, #0x0\n"
"mov x27, #0x0\n"
"1:" // Tile loop
- "str x23, [%x[params_struct], %[offsetof_args_tile_i]]\n"
+ "str x6, [%x[params_struct], %[offsetof_args_tile_i]]\n"
"mov x26, #0x4\n"
- "mov x25, #0x2\n"
"str x27, [%x[params_struct], %[offsetof_args_tile_j]]\n"
- "ldr x24, [%x[params_struct], %[offsetof_args_ld_input_row]]\n"
- "ldr x6, [%x[params_struct], %[offsetof_args_ld_input_col]]\n"
- "mul x22, x23, x24\n" // offset = tile_i * ld_input_row
- "ldr x21, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
- "madd x22, x27, x6, x22\n" // offset += tile_j * ld_input_col
- "ldr x7, [%x[params_struct], %[offsetof_args_ld_output_col]]\n"
- "lsl x6, x6, #0x2\n"
- "mul x20, x23, x21\n" // offset = tile_i * ld_output_row
- "ldr x8, [%x[params_struct], %[offsetof_args_inptr]]\n"
- "ldr x17, [%x[params_struct], %[offsetof_args_outptr]]\n"
- "mov x23, #0x10\n" // cntb _, ALL, #1
- "mul x22, x22, x26\n" // offset *= kernel_stride * output_size
- "add x8, x8, x22, LSL #2\n" // inptr[0] += offset * sizeof(float)
- "add x16, x8, x24, LSL #2\n"
- "ldr x15, [%x[params_struct], %[offsetof_args_params]]\n"
- "madd x20, x27, x7, x20\n" // offset += tile_j * ld_output_col
- "lsr x22, %x[n_channels], #0x2\n"
- "add x14, x16, x24, LSL #2\n"
- "mul x20, x20, x25\n" // offset *= output_tile_size
- "add x13, x6, x6\n"
- "add x12, x14, x24, LSL #2\n"
- "add x11, x13, x6\n"
- "add x17, x17, x20, LSL #2\n" // outptrs[0] += offset * sizeof(float)
- "add x20, %x[params_struct], %[offsetof_args_min]\n"
- "ld1r { v19.4s }, [x20]\n"
- "add x20, %x[params_struct], %[offsetof_args_max]\n"
- "ld1r { v18.4s }, [x20]\n"
- "add x10, x12, x24, LSL #2\n"
- "add x9, x11, x6\n"
- "add x28, x17, x21, LSL #2\n"
- "lsl x7, x7, #0x2\n"
- "mov x21, #0x0\n"
- "sub x20, XZR, x23\n"
- "cbz x22, 4f\n"
- "ldr q17, [x15, #0x0]\n"
- "ldr q0, [x15, #0x10]\n"
- "cmp x23, x22, LSL #4\n"
- "ldr q1, [x15, #0x20]\n"
- "ldr q2, [x15, #0x30]\n"
- "ldr q3, [x15, #0x40]\n"
- "ldr q4, [x15, #0x50]\n"
- "ldr q5, [x15, #0x60]\n"
- "ldr q6, [x15, #0x70]\n"
- "ldr q7, [x15, #0x80]\n"
- "ldr q8, [x15, #0x90]\n"
- "add x15, x15, #0xa0\n"
- "ldr q9, [x14, x13]\n"
- "ld1 { v10.4s }, [x8]\n"
- "ldr q11, [x8, x6]\n"
- "ldr q12, [x8, x11]\n"
- "ldr q13, [x8, x9]\n"
- "ld1 { v14.4s }, [x16]\n"
- "ldr q15, [x16, x6]\n"
- "ldr q16, [x8, x13]\n"
+ "mov x25, #0x2\n"
+ "ldr x7, [%x[params_struct], %[offsetof_args_params]]\n"
+ "add x24, %x[params_struct], %[offsetof_args_min]\n"
+ "ldr x23, [%x[params_struct], %[offsetof_args_ld_input_row]]\n"
+ "add x21, %x[params_struct], %[offsetof_args_max]\n"
+ "ldr x8, [%x[params_struct], %[offsetof_args_ld_input_col]]\n"
+ "mov x22, #0x0\n"
+ "ldr x17, [%x[params_struct], %[offsetof_args_inptr]]\n"
+ "mul x19, x6, x23\n" // offset = tile_i * ld_input_row
+ "ldr x20, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
+ "madd x19, x27, x8, x19\n" // offset += tile_j * ld_input_col
+ "ldr x16, [%x[params_struct], %[offsetof_args_ld_output_col]]\n"
+ "mul x19, x19, x26\n" // offset *= kernel_stride * output_size
+ "ldr x15, [%x[params_struct], %[offsetof_args_outptr]]\n"
+ "add x17, x17, x19, LSL #2\n" // inptr[0] += offset * sizeof(float)
+ "ld1r { v19.4s }, [x24]\n"
+ "add x14, x17, x23, LSL #2\n"
+ "ld1r { v18.4s }, [x21]\n"
+ "add x13, x14, x23, LSL #2\n"
+ "lsl x8, x8, #0x2\n"
+ "add x12, x13, x23, LSL #2\n"
+ "add x11, x12, x23, LSL #2\n"
+ "add x10, x8, x8\n"
+ "add x9, x10, x8\n"
+ "add x28, x9, x8\n"
+ "mul x19, x6, x20\n" // offset = tile_i * ld_output_row
+ "madd x19, x27, x16, x19\n" // offset += tile_j * ld_output_col
+ "mul x19, x19, x25\n" // offset *= output_tile_size
+ "add x15, x15, x19, LSL #2\n" // outptrs[0] += offset * sizeof(float)
+ "add x27, x15, x20, LSL #2\n"
+ "lsl x16, x16, #0x2\n"
+ "mov x21, #0x10\n" // cntb _, ALL, #1
+ "sub x20, XZR, x21\n"
+ "lsr x19, %x[n_channels], #0x2\n"
+ "cbz x19, 4f\n"
+ "ldr q17, [x7, #0x0]\n"
+ "ldr q0, [x7, #0x10]\n"
+ "cmp x21, x19, LSL #4\n"
+ "ldr q1, [x7, #0x20]\n"
+ "ldr q2, [x7, #0x30]\n"
+ "ldr q3, [x7, #0x40]\n"
+ "ldr q4, [x7, #0x50]\n"
+ "ldr q5, [x7, #0x60]\n"
+ "ldr q6, [x7, #0x70]\n"
+ "ldr q7, [x7, #0x80]\n"
+ "ldr q8, [x7, #0x90]\n"
+ "add x7, x7, #0xa0\n"
+ "ldr q9, [x13, x10]\n"
+ "ld1 { v10.4s }, [x17]\n"
+ "ldr q11, [x17, x8]\n"
+ "ldr q12, [x17, x9]\n"
+ "ldr q13, [x17, x28]\n"
+ "ld1 { v14.4s }, [x14]\n"
+ "ldr q15, [x14, x8]\n"
+ "ldr q16, [x17, x10]\n"
"bge 3f\n"
"2:" // Tile loop: Channel loop
- "mov v28.16b, v17.16b\n fmla v28.4s, v8.4s, v9.4s\n"
- "mov v29.16b, v17.16b\n fmla v29.4s, v6.4s, v9.4s\n"
- "add x23, x23, #0x10\n"
- "add x8, x8, #0x10\n"
- "fmla v28.4s, v0.4s, v10.4s\n"
- "ld1 { v10.4s }, [x8]\n"
- "fmla v29.4s, v1.4s, v12.4s\n"
- "ldr q12, [x16, x9]\n"
- "fmla v28.4s, v1.4s, v11.4s\n"
- "ldr q11, [x16, x11]\n"
- "fmla v29.4s, v2.4s, v13.4s\n"
- "ldr q13, [x16, x13]\n"
- "fmla v28.4s, v3.4s, v14.4s\n"
- "ld1 { v14.4s }, [x12]\n"
- "fmla v29.4s, v0.4s, v16.4s\n"
- "add x16, x16, #0x10\n"
- "fmla v28.4s, v4.4s, v15.4s\n"
- "ld1 { v15.4s }, [x14]\n"
- "fmla v29.4s, v4.4s, v11.4s\n"
- "ldr q11, [x12, x6]\n"
- "fmla v28.4s, v2.4s, v16.4s\n"
- "ldr q16, [x14, x6]\n"
- "fmla v29.4s, v5.4s, v12.4s\n"
- "ldr q12, [x14, x11]\n"
- "mov v30.16b, v17.16b\n fmla v30.4s, v2.4s, v9.4s\n"
- "mov v31.16b, v17.16b\n fmla v31.4s, v0.4s, v9.4s\n"
- "ldr q17, [x15, #0x0]\n"
- "cmp x23, x22, LSL #4\n"
- "fmla v28.4s, v5.4s, v13.4s\n"
- "fmla v29.4s, v3.4s, v13.4s\n"
- "ldr q13, [x12, x11]\n"
+ "mov v31.16b, v17.16b\n fmla v31.4s, v8.4s, v9.4s\n"
"add x20, x20, #0x10\n"
- "fmla v30.4s, v3.4s, v14.4s\n"
- "ldr q14, [x12, x9]\n"
- "fmla v31.4s, v4.4s, v13.4s\n"
- "ldr q13, [x10, x6]\n"
- "fmla v30.4s, v0.4s, v15.4s\n"
- "ldr q0, [x15, #0x10]\n"
- "fmla v31.4s, v1.4s, v12.4s\n"
+ "mov v30.16b, v17.16b\n fmla v30.4s, v6.4s, v9.4s\n"
+ "add x22, x22, #0x10\n"
+ "mov v29.16b, v17.16b\n fmla v29.4s, v2.4s, v9.4s\n"
+ "add x17, x17, #0x10\n"
+ "mov v28.16b, v17.16b\n fmla v28.4s, v0.4s, v9.4s\n"
+ "ldr q17, [x7, #0x0]\n"
"add x21, x21, #0x10\n"
- "fmla v30.4s, v4.4s, v11.4s\n"
+ "fmla v31.4s, v0.4s, v10.4s\n"
+ "ld1 { v10.4s }, [x17]\n"
+ "cmp x21, x19, LSL #4\n"
+ "fmla v30.4s, v1.4s, v12.4s\n"
+ "ldr q12, [x14, x28]\n"
+ "fmla v31.4s, v1.4s, v11.4s\n"
"ldr q11, [x14, x9]\n"
- "ldr q4, [x15, #0x50]\n"
- "fmla v31.4s, v5.4s, v14.4s\n"
- "ldr q14, [x10, x11]\n"
- "fmla v28.4s, v6.4s, v15.4s\n"
- "ld1 { v15.4s }, [x10]\n"
- "fmla v30.4s, v1.4s, v16.4s\n"
- "ldr q1, [x15, #0x20]\n"
- "fmla v31.4s, v2.4s, v11.4s\n"
- "ldr q2, [x15, #0x30]\n"
- "fmla v28.4s, v7.4s, v16.4s\n"
- "ldr q16, [x12, x13]\n"
- "fmla v30.4s, v6.4s, v15.4s\n"
- "ldr q15, [x10, x13]\n"
- "fmla v31.4s, v3.4s, v16.4s\n"
- "ldr q3, [x15, #0x40]\n"
- "fmla v30.4s, v7.4s, v13.4s\n"
- "ldr q13, [x8, x9]\n"
- "fmla v31.4s, v7.4s, v14.4s\n"
- "ld1 { v14.4s }, [x16]\n"
- "fmla v29.4s, v7.4s, v12.4s\n"
- "ldr q12, [x8, x11]\n"
- "fmla v30.4s, v5.4s, v16.4s\n"
- "ldr q16, [x8, x13]\n"
- "ldr q5, [x15, #0x60]\n"
+ "fmla v30.4s, v2.4s, v13.4s\n"
+ "ldr q13, [x14, x10]\n"
+ "add x14, x14, #0x10\n"
+ "fmla v31.4s, v3.4s, v14.4s\n"
+ "ld1 { v14.4s }, [x12]\n"
+ "fmla v30.4s, v0.4s, v16.4s\n"
+ "fmla v31.4s, v4.4s, v15.4s\n"
+ "ld1 { v15.4s }, [x13]\n"
+ "fmla v29.4s, v3.4s, v14.4s\n"
+ "ldr q14, [x12, x28]\n"
+ "fmla v30.4s, v4.4s, v11.4s\n"
+ "ldr q11, [x12, x8]\n"
+ "fmla v31.4s, v2.4s, v16.4s\n"
+ "ldr q16, [x13, x8]\n"
+ "fmla v29.4s, v0.4s, v15.4s\n"
+ "ldr q0, [x7, #0x10]\n"
+ "fmla v30.4s, v5.4s, v12.4s\n"
+ "ldr q12, [x13, x9]\n"
+ "fmla v31.4s, v5.4s, v13.4s\n"
+ "fmla v29.4s, v4.4s, v11.4s\n"
+ "ldr q11, [x13, x28]\n"
+ "add x13, x13, #0x10\n"
+ "fmla v30.4s, v3.4s, v13.4s\n"
+ "ldr q13, [x12, x9]\n"
+ "ldr q9, [x13, x10]\n"
"fmla v31.4s, v6.4s, v15.4s\n"
- "fmla v29.4s, v8.4s, v11.4s\n"
- "ldr q11, [x10, x9]\n"
- "ldr q6, [x15, #0x70]\n"
- "fmla v30.4s, v8.4s, v15.4s\n"
- "fmla v31.4s, v8.4s, v11.4s\n"
- "ldr q11, [x8, x6]\n"
- "ldr q15, [x16, x6]\n"
- "fmax v28.4s, v28.4s, v19.4s\n"
- "fmax v29.4s, v29.4s, v19.4s\n"
- "ldr q7, [x15, #0x80]\n"
- "ldr q8, [x15, #0x90]\n"
- "fmax v30.4s, v30.4s, v19.4s\n"
+ "ld1 { v15.4s }, [x11]\n"
+ "fmla v29.4s, v1.4s, v16.4s\n"
+ "fmla v28.4s, v4.4s, v13.4s\n"
+ "ldr q13, [x11, x8]\n"
+ "fmla v30.4s, v7.4s, v12.4s\n"
+ "ldr q4, [x7, #0x50]\n"
+ "fmla v31.4s, v7.4s, v16.4s\n"
+ "ldr q16, [x12, x10]\n"
+ "add x12, x12, #0x10\n"
+ "fmla v29.4s, v6.4s, v15.4s\n"
+ "ldr q15, [x11, x10]\n"
+ "fmla v28.4s, v1.4s, v12.4s\n"
+ "ldr q12, [x17, x9]\n"
+ "fmla v30.4s, v8.4s, v11.4s\n"
+ "ldr q1, [x7, #0x20]\n"
"fmax v31.4s, v31.4s, v19.4s\n"
- "add x14, x14, #0x10\n"
- "ldr q9, [x14, x13]\n"
- "fmin v28.4s, v28.4s, v18.4s\n"
- "fmin v29.4s, v29.4s, v18.4s\n"
- "fmin v30.4s, v30.4s, v18.4s\n"
+ "fmla v29.4s, v7.4s, v13.4s\n"
+ "ldr q13, [x17, x28]\n"
+ "fmla v28.4s, v5.4s, v14.4s\n"
+ "ldr q14, [x11, x9]\n"
+ "fmax v30.4s, v30.4s, v19.4s\n"
"fmin v31.4s, v31.4s, v18.4s\n"
- "add x12, x12, #0x10\n"
- "add x10, x10, #0x10\n"
- "st1 { v28.4s }, [x17]\n"
- "add x15, x15, #0xa0\n"
- "str q29, [x17, x7]\n"
- "add x17, x17, #0x10\n"
- "st1 { v30.4s }, [x28]\n"
- "str q31, [x28, x7]\n"
- "add x28, x28, #0x10\n"
+ "st1 { v31.4s }, [x15]\n"
+ "fmla v28.4s, v2.4s, v11.4s\n"
+ "fmla v29.4s, v5.4s, v16.4s\n"
+ "ldr q11, [x11, x28]\n"
+ "add x11, x11, #0x10\n"
+ "fmin v30.4s, v30.4s, v18.4s\n"
+ "ldr q2, [x7, #0x30]\n"
+ "ldr q5, [x7, #0x60]\n"
+ "fmla v28.4s, v3.4s, v16.4s\n"
+ "ldr q16, [x17, x10]\n"
+ "fmla v29.4s, v8.4s, v15.4s\n"
+ "str q30, [x15, x16]\n"
+ "add x15, x15, #0x10\n"
+ "fmla v28.4s, v7.4s, v14.4s\n"
+ "ld1 { v14.4s }, [x14]\n"
+ "fmax v29.4s, v29.4s, v19.4s\n"
+ "ldr q3, [x7, #0x40]\n"
+ "ldr q7, [x7, #0x80]\n"
+ "fmin v29.4s, v29.4s, v18.4s\n"
+ "st1 { v29.4s }, [x27]\n"
+ "fmla v28.4s, v6.4s, v15.4s\n"
+ "ldr q15, [x14, x8]\n"
+ "fmla v28.4s, v8.4s, v11.4s\n"
+ "ldr q11, [x17, x8]\n"
+ "ldr q6, [x7, #0x70]\n"
+ "fmax v28.4s, v28.4s, v19.4s\n"
+ "ldr q8, [x7, #0x90]\n"
+ "add x7, x7, #0xa0\n"
+ "fmin v28.4s, v28.4s, v18.4s\n"
+ "str q28, [x27, x16]\n"
+ "add x27, x27, #0x10\n"
"blt 2b\n"
"3:" // Tile loop: Channel tail
- "mov v28.16b, v17.16b\n fmla v28.4s, v8.4s, v9.4s\n"
- "mov v29.16b, v17.16b\n fmla v29.4s, v6.4s, v9.4s\n"
- "add x8, x8, #0x10\n"
- "fmla v28.4s, v0.4s, v10.4s\n"
- "fmla v29.4s, v1.4s, v12.4s\n"
- "ldr q12, [x16, x9]\n"
- "fmla v28.4s, v1.4s, v11.4s\n"
- "ldr q11, [x16, x11]\n"
- "fmla v29.4s, v2.4s, v13.4s\n"
- "ldr q13, [x16, x13]\n"
- "fmla v28.4s, v3.4s, v14.4s\n"
- "ld1 { v14.4s }, [x12]\n"
- "fmla v29.4s, v0.4s, v16.4s\n"
- "add x16, x16, #0x10\n"
- "fmla v28.4s, v4.4s, v15.4s\n"
- "ld1 { v15.4s }, [x14]\n"
- "fmla v29.4s, v4.4s, v11.4s\n"
- "ldr q11, [x12, x6]\n"
- "fmla v28.4s, v2.4s, v16.4s\n"
- "ldr q16, [x14, x6]\n"
- "fmla v29.4s, v5.4s, v12.4s\n"
- "ldr q12, [x14, x11]\n"
- "mov v30.16b, v17.16b\n fmla v30.4s, v2.4s, v9.4s\n"
- "mov v31.16b, v17.16b\n fmla v31.4s, v0.4s, v9.4s\n"
- "fmla v28.4s, v5.4s, v13.4s\n"
- "fmla v29.4s, v3.4s, v13.4s\n"
- "ldr q13, [x12, x11]\n"
- "fmla v30.4s, v3.4s, v14.4s\n"
- "ldr q14, [x12, x9]\n"
- "fmla v31.4s, v4.4s, v13.4s\n"
- "ldr q13, [x10, x6]\n"
- "fmla v30.4s, v0.4s, v15.4s\n"
- "fmla v31.4s, v1.4s, v12.4s\n"
- "fmla v30.4s, v4.4s, v11.4s\n"
+ "mov v31.16b, v17.16b\n fmla v31.4s, v8.4s, v9.4s\n"
+ "add x17, x17, #0x10\n"
+ "mov v30.16b, v17.16b\n fmla v30.4s, v6.4s, v9.4s\n"
+ "mov v29.16b, v17.16b\n fmla v29.4s, v2.4s, v9.4s\n"
+ "mov v28.16b, v17.16b\n fmla v28.4s, v0.4s, v9.4s\n"
+ "fmla v31.4s, v0.4s, v10.4s\n"
+ "fmla v30.4s, v1.4s, v12.4s\n"
+ "ldr q12, [x14, x28]\n"
+ "fmla v31.4s, v1.4s, v11.4s\n"
"ldr q11, [x14, x9]\n"
- "fmla v31.4s, v5.4s, v14.4s\n"
- "ldr q14, [x10, x11]\n"
- "fmla v28.4s, v6.4s, v15.4s\n"
- "ld1 { v15.4s }, [x10]\n"
- "fmla v30.4s, v1.4s, v16.4s\n"
+ "fmla v30.4s, v2.4s, v13.4s\n"
+ "ldr q13, [x14, x10]\n"
"add x14, x14, #0x10\n"
- "fmla v31.4s, v2.4s, v11.4s\n"
- "fmla v28.4s, v7.4s, v16.4s\n"
- "ldr q16, [x12, x13]\n"
- "fmax v28.4s, v28.4s, v19.4s\n"
- "fmla v30.4s, v6.4s, v15.4s\n"
- "ldr q15, [x10, x13]\n"
- "fmla v31.4s, v3.4s, v16.4s\n"
- "fmin v28.4s, v28.4s, v18.4s\n"
- "fmla v30.4s, v7.4s, v13.4s\n"
- "fmla v31.4s, v7.4s, v14.4s\n"
- "st1 { v28.4s }, [x17]\n"
- "add x12, x12, #0x10\n"
- "fmla v29.4s, v7.4s, v12.4s\n"
- "fmla v30.4s, v5.4s, v16.4s\n"
+ "fmla v31.4s, v3.4s, v14.4s\n"
+ "ld1 { v14.4s }, [x12]\n"
+ "fmla v30.4s, v0.4s, v16.4s\n"
+ "fmla v31.4s, v4.4s, v15.4s\n"
+ "ld1 { v15.4s }, [x13]\n"
+ "fmla v30.4s, v4.4s, v11.4s\n"
+ "ldr q11, [x12, x8]\n"
+ "fmla v29.4s, v3.4s, v14.4s\n"
+ "ldr q14, [x12, x28]\n"
+ "fmla v31.4s, v2.4s, v16.4s\n"
+ "ldr q16, [x13, x8]\n"
+ "fmla v30.4s, v5.4s, v12.4s\n"
+ "ldr q12, [x13, x9]\n"
+ "fmla v29.4s, v0.4s, v15.4s\n"
+ "fmla v31.4s, v5.4s, v13.4s\n"
+ "fmla v30.4s, v3.4s, v13.4s\n"
+ "ldr q13, [x12, x9]\n"
+ "fmla v29.4s, v4.4s, v11.4s\n"
+ "ldr q11, [x13, x28]\n"
+ "add x13, x13, #0x10\n"
"fmla v31.4s, v6.4s, v15.4s\n"
- "fmla v29.4s, v8.4s, v11.4s\n"
- "ldr q11, [x10, x9]\n"
- "fmax v29.4s, v29.4s, v19.4s\n"
- "fmla v30.4s, v8.4s, v15.4s\n"
- "fmla v31.4s, v8.4s, v11.4s\n"
- "fmax v30.4s, v30.4s, v19.4s\n"
- "add x10, x10, #0x10\n"
+ "ld1 { v15.4s }, [x11]\n"
+ "fmla v30.4s, v7.4s, v12.4s\n"
+ "fmla v29.4s, v1.4s, v16.4s\n"
+ "fmla v28.4s, v4.4s, v13.4s\n"
+ "ldr q13, [x11, x8]\n"
+ "fmla v31.4s, v7.4s, v16.4s\n"
+ "ldr q16, [x12, x10]\n"
+ "add x12, x12, #0x10\n"
+ "fmla v29.4s, v6.4s, v15.4s\n"
+ "ldr q15, [x11, x10]\n"
+ "fmla v30.4s, v8.4s, v11.4s\n"
+ "fmla v28.4s, v1.4s, v12.4s\n"
"fmax v31.4s, v31.4s, v19.4s\n"
- "fmin v29.4s, v29.4s, v18.4s\n"
- "str q29, [x17, x7]\n"
- "add x17, x17, #0x10\n"
- "fmin v30.4s, v30.4s, v18.4s\n"
+ "fmla v29.4s, v7.4s, v13.4s\n"
+ "fmax v30.4s, v30.4s, v19.4s\n"
+ "fmla v28.4s, v5.4s, v14.4s\n"
+ "ldr q14, [x11, x9]\n"
"fmin v31.4s, v31.4s, v18.4s\n"
- "st1 { v30.4s }, [x28]\n"
- "str q31, [x28, x7]\n"
- "add x28, x28, #0x10\n"
+ "st1 { v31.4s }, [x15]\n"
+ "fmla v28.4s, v2.4s, v11.4s\n"
+ "fmla v29.4s, v5.4s, v16.4s\n"
+ "ldr q11, [x11, x28]\n"
+ "add x11, x11, #0x10\n"
+ "fmin v30.4s, v30.4s, v18.4s\n"
+ "str q30, [x15, x16]\n"
+ "fmla v28.4s, v3.4s, v16.4s\n"
+ "add x15, x15, #0x10\n"
+ "fmla v29.4s, v8.4s, v15.4s\n"
+ "fmla v28.4s, v7.4s, v14.4s\n"
+ "fmax v29.4s, v29.4s, v19.4s\n"
+ "fmla v28.4s, v6.4s, v15.4s\n"
+ "fmin v29.4s, v29.4s, v18.4s\n"
+ "st1 { v29.4s }, [x27]\n"
+ "fmla v28.4s, v8.4s, v11.4s\n"
+ "fmax v28.4s, v28.4s, v19.4s\n"
+ "fmin v28.4s, v28.4s, v18.4s\n"
+ "str q28, [x27, x16]\n"
+ "add x27, x27, #0x10\n"
"4:" // Tile loop: Oddments
"tst %x[n_channels], #0x3\n"
"beq 43f\n"
- "ldr q17, [x15, #0x0]\n"
- "ldr q0, [x15, #0x10]\n"
- "add x27, x14, x13\n"
- "add x26, x8, XZR\n"
- "ldr q1, [x15, #0x20]\n"
- "ldr q2, [x15, #0x30]\n"
- "add x25, x8, x6\n"
- "add x24, x8, x11\n"
- "ldr q3, [x15, #0x40]\n"
- "ldr q4, [x15, #0x50]\n"
- "add x23, x8, x9\n"
- "add x22, x16, XZR\n"
- "ldr q5, [x15, #0x60]\n"
- "ldr q6, [x15, #0x70]\n"
- "add x21, x16, x6\n"
- "add x20, x8, x13\n"
- "ldr q7, [x15, #0x80]\n"
- "ldr q8, [x15, #0x90]\n"
+ "ldr q17, [x7, #0x0]\n"
+ "ldr q0, [x7, #0x10]\n"
+ "add x26, x13, x10\n"
+ "ldr q1, [x7, #0x20]\n"
+ "add x25, x17, XZR\n"
+ "ldr q2, [x7, #0x30]\n"
+ "add x24, x17, x8\n"
+ "ldr q3, [x7, #0x40]\n"
+ "add x23, x17, x9\n"
+ "ldr q4, [x7, #0x50]\n"
+ "add x22, x17, x28\n"
+ "ldr q5, [x7, #0x60]\n"
+ "add x21, x14, XZR\n"
+ "ldr q6, [x7, #0x70]\n"
+ "add x20, x14, x8\n"
+ "ldr q7, [x7, #0x80]\n"
+ "add x19, x17, x10\n"
+ "ldr q8, [x7, #0x90]\n"
"tbz %x[n_channels], #1, 5f\n"
- "ldr d9, [x27], #0x8\n"
- "ldr d10, [x26], #0x8\n"
- "ldr d11, [x25], #0x8\n"
- "ldr d12, [x24], #0x8\n"
- "ldr d13, [x23], #0x8\n"
- "ldr d14, [x22], #0x8\n"
- "ldr d15, [x21], #0x8\n"
- "ldr d16, [x20], #0x8\n"
+ "ldr d9, [x26], #0x8\n"
+ "ldr d10, [x25], #0x8\n"
+ "ldr d11, [x24], #0x8\n"
+ "ldr d12, [x23], #0x8\n"
+ "ldr d13, [x22], #0x8\n"
+ "ldr d14, [x21], #0x8\n"
+ "ldr d15, [x20], #0x8\n"
+ "ldr d16, [x19], #0x8\n"
"tbz %x[n_channels], #0, 6f\n"
- "ld1 { v9.s }[2], [x27]\n"
- "ld1 { v10.s }[2], [x26]\n"
- "ld1 { v11.s }[2], [x25]\n"
- "ld1 { v12.s }[2], [x24]\n"
- "ld1 { v13.s }[2], [x23]\n"
- "ld1 { v14.s }[2], [x22]\n"
- "ld1 { v15.s }[2], [x21]\n"
- "ld1 { v16.s }[2], [x20]\n"
+ "ld1 { v9.s }[2], [x26]\n"
+ "ld1 { v10.s }[2], [x25]\n"
+ "ld1 { v11.s }[2], [x24]\n"
+ "ld1 { v12.s }[2], [x23]\n"
+ "ld1 { v13.s }[2], [x22]\n"
+ "ld1 { v14.s }[2], [x21]\n"
+ "ld1 { v15.s }[2], [x20]\n"
+ "ld1 { v16.s }[2], [x19]\n"
"b 6f\n"
"5:" // Tile loop: Oddments: Load inputs: (2, 2), (0, 0), (0, 1), (0, 3), (0, 4), (1, 0), (1, 1), (0, 2): Bit 1: Unset
- "ldr s9, [x27, #0x0]\n"
- "ldr s10, [x26, #0x0]\n"
- "ldr s11, [x25, #0x0]\n"
- "ldr s12, [x24, #0x0]\n"
- "ldr s13, [x23, #0x0]\n"
- "ldr s14, [x22, #0x0]\n"
- "ldr s15, [x21, #0x0]\n"
- "ldr s16, [x20, #0x0]\n"
+ "ldr s9, [x26, #0x0]\n"
+ "ldr s10, [x25, #0x0]\n"
+ "ldr s11, [x24, #0x0]\n"
+ "ldr s12, [x23, #0x0]\n"
+ "ldr s13, [x22, #0x0]\n"
+ "ldr s14, [x21, #0x0]\n"
+ "ldr s15, [x20, #0x0]\n"
+ "ldr s16, [x19, #0x0]\n"
"6:" // Tile loop: Oddments: Load inputs: (2, 2), (0, 0), (0, 1), (0, 3), (0, 4), (1, 0), (1, 1), (0, 2): Bit 1: End
- "mov v28.16b, v17.16b\n fmla v28.4s, v8.4s, v9.4s\n"
- "fmla v28.4s, v0.4s, v10.4s\n"
- "add x20, x16, x11\n"
- "mov v29.16b, v17.16b\n fmla v29.4s, v6.4s, v9.4s\n"
- "fmla v28.4s, v1.4s, v11.4s\n"
- "fmla v29.4s, v1.4s, v12.4s\n"
- "fmla v28.4s, v3.4s, v14.4s\n"
- "fmla v29.4s, v2.4s, v13.4s\n"
- "fmla v28.4s, v4.4s, v15.4s\n"
- "mov v30.16b, v17.16b\n fmla v30.4s, v2.4s, v9.4s\n"
- "mov v31.16b, v17.16b\n fmla v31.4s, v0.4s, v9.4s\n"
- "fmla v28.4s, v2.4s, v16.4s\n"
- "fmla v29.4s, v0.4s, v16.4s\n"
+ "mov v31.16b, v17.16b\n fmla v31.4s, v8.4s, v9.4s\n"
+ "add x19, x14, x9\n"
+ "mov v30.16b, v17.16b\n fmla v30.4s, v6.4s, v9.4s\n"
+ "mov v29.16b, v17.16b\n fmla v29.4s, v2.4s, v9.4s\n"
+ "mov v28.16b, v17.16b\n fmla v28.4s, v0.4s, v9.4s\n"
+ "fmla v31.4s, v0.4s, v10.4s\n"
+ "fmla v30.4s, v1.4s, v12.4s\n"
+ "fmla v31.4s, v1.4s, v11.4s\n"
+ "fmla v30.4s, v2.4s, v13.4s\n"
+ "fmla v31.4s, v3.4s, v14.4s\n"
+ "fmla v30.4s, v0.4s, v16.4s\n"
+ "fmla v31.4s, v4.4s, v15.4s\n"
+ "fmla v31.4s, v2.4s, v16.4s\n"
"tbz %x[n_channels], #1, 7f\n"
- "ldr d11, [x20], #0x8\n"
+ "ldr d11, [x19], #0x8\n"
"tbz %x[n_channels], #0, 8f\n"
- "ld1 { v11.s }[2], [x20]\n"
+ "ld1 { v11.s }[2], [x19]\n"
"b 8f\n"
"7:" // Tile loop: Oddments: Load inputs: (1, 3): Bit 1: Unset
- "ldr s11, [x20, #0x0]\n"
+ "ldr s11, [x19, #0x0]\n"
"8:" // Tile loop: Oddments: Load inputs: (1, 3): Bit 1: End
- "fmla v29.4s, v4.4s, v11.4s\n"
- "add x20, x16, x9\n"
+ "fmla v30.4s, v4.4s, v11.4s\n"
+ "add x19, x14, x28\n"
"tbz %x[n_channels], #1, 9f\n"
- "ldr d12, [x20], #0x8\n"
+ "ldr d12, [x19], #0x8\n"
"tbz %x[n_channels], #0, 10f\n"
- "ld1 { v12.s }[2], [x20]\n"
+ "ld1 { v12.s }[2], [x19]\n"
"b 10f\n"
"9:" // Tile loop: Oddments: Load inputs: (1, 4): Bit 1: Unset
- "ldr s12, [x20, #0x0]\n"
+ "ldr s12, [x19, #0x0]\n"
"10:" // Tile loop: Oddments: Load inputs: (1, 4): Bit 1: End
- "fmla v29.4s, v5.4s, v12.4s\n"
- "add x20, x16, x13\n"
+ "fmla v30.4s, v5.4s, v12.4s\n"
+ "add x19, x14, x10\n"
"tbz %x[n_channels], #1, 11f\n"
- "ldr d13, [x20], #0x8\n"
+ "ldr d13, [x19], #0x8\n"
"tbz %x[n_channels], #0, 12f\n"
- "ld1 { v13.s }[2], [x20]\n"
+ "ld1 { v13.s }[2], [x19]\n"
"b 12f\n"
"11:" // Tile loop: Oddments: Load inputs: (1, 2): Bit 1: Unset
- "ldr s13, [x20, #0x0]\n"
+ "ldr s13, [x19, #0x0]\n"
"12:" // Tile loop: Oddments: Load inputs: (1, 2): Bit 1: End
- "fmla v28.4s, v5.4s, v13.4s\n"
- "fmla v29.4s, v3.4s, v13.4s\n"
- "add x20, x12, XZR\n"
+ "fmla v31.4s, v5.4s, v13.4s\n"
+ "add x19, x12, XZR\n"
+ "fmla v30.4s, v3.4s, v13.4s\n"
"tbz %x[n_channels], #1, 13f\n"
- "ldr d14, [x20], #0x8\n"
+ "ldr d14, [x19], #0x8\n"
"tbz %x[n_channels], #0, 14f\n"
- "ld1 { v14.s }[2], [x20]\n"
+ "ld1 { v14.s }[2], [x19]\n"
"b 14f\n"
"13:" // Tile loop: Oddments: Load inputs: (3, 0): Bit 1: Unset
- "ldr s14, [x20, #0x0]\n"
+ "ldr s14, [x19, #0x0]\n"
"14:" // Tile loop: Oddments: Load inputs: (3, 0): Bit 1: End
- "fmla v30.4s, v3.4s, v14.4s\n"
- "add x20, x14, XZR\n"
+ "fmla v29.4s, v3.4s, v14.4s\n"
+ "add x19, x13, XZR\n"
"tbz %x[n_channels], #1, 15f\n"
- "ldr d15, [x20], #0x8\n"
+ "ldr d15, [x19], #0x8\n"
"tbz %x[n_channels], #0, 16f\n"
- "ld1 { v15.s }[2], [x20]\n"
+ "ld1 { v15.s }[2], [x19]\n"
"b 16f\n"
"15:" // Tile loop: Oddments: Load inputs: (2, 0): Bit 1: Unset
- "ldr s15, [x20, #0x0]\n"
+ "ldr s15, [x19, #0x0]\n"
"16:" // Tile loop: Oddments: Load inputs: (2, 0): Bit 1: End
- "fmla v28.4s, v6.4s, v15.4s\n"
- "fmla v30.4s, v0.4s, v15.4s\n"
- "add x20, x12, x6\n"
+ "fmla v31.4s, v6.4s, v15.4s\n"
+ "add x19, x12, x8\n"
+ "fmla v29.4s, v0.4s, v15.4s\n"
"tbz %x[n_channels], #1, 17f\n"
- "ldr d11, [x20], #0x8\n"
+ "ldr d11, [x19], #0x8\n"
"tbz %x[n_channels], #0, 18f\n"
- "ld1 { v11.s }[2], [x20]\n"
+ "ld1 { v11.s }[2], [x19]\n"
"b 18f\n"
"17:" // Tile loop: Oddments: Load inputs: (3, 1): Bit 1: Unset
- "ldr s11, [x20, #0x0]\n"
+ "ldr s11, [x19, #0x0]\n"
"18:" // Tile loop: Oddments: Load inputs: (3, 1): Bit 1: End
- "fmla v30.4s, v4.4s, v11.4s\n"
- "add x20, x14, x6\n"
+ "fmla v29.4s, v4.4s, v11.4s\n"
+ "add x19, x13, x8\n"
"tbz %x[n_channels], #1, 19f\n"
- "ldr d16, [x20], #0x8\n"
+ "ldr d16, [x19], #0x8\n"
"tbz %x[n_channels], #0, 20f\n"
- "ld1 { v16.s }[2], [x20]\n"
+ "ld1 { v16.s }[2], [x19]\n"
"b 20f\n"
"19:" // Tile loop: Oddments: Load inputs: (2, 1): Bit 1: Unset
- "ldr s16, [x20, #0x0]\n"
+ "ldr s16, [x19, #0x0]\n"
"20:" // Tile loop: Oddments: Load inputs: (2, 1): Bit 1: End
- "fmla v28.4s, v7.4s, v16.4s\n"
- "fmla v30.4s, v1.4s, v16.4s\n"
- "add x20, x12, x11\n"
+ "fmla v31.4s, v7.4s, v16.4s\n"
+ "add x19, x12, x9\n"
+ "fmla v29.4s, v1.4s, v16.4s\n"
"tbz %x[n_channels], #1, 21f\n"
- "ldr d13, [x20], #0x8\n"
+ "ldr d13, [x19], #0x8\n"
"tbz %x[n_channels], #0, 22f\n"
- "ld1 { v13.s }[2], [x20]\n"
+ "ld1 { v13.s }[2], [x19]\n"
"b 22f\n"
"21:" // Tile loop: Oddments: Load inputs: (3, 3): Bit 1: Unset
- "ldr s13, [x20, #0x0]\n"
+ "ldr s13, [x19, #0x0]\n"
"22:" // Tile loop: Oddments: Load inputs: (3, 3): Bit 1: End
- "fmla v31.4s, v4.4s, v13.4s\n"
- "add x20, x14, x11\n"
+ "fmla v28.4s, v4.4s, v13.4s\n"
+ "add x19, x13, x9\n"
"tbz %x[n_channels], #1, 23f\n"
- "ldr d12, [x20], #0x8\n"
+ "ldr d12, [x19], #0x8\n"
"tbz %x[n_channels], #0, 24f\n"
- "ld1 { v12.s }[2], [x20]\n"
+ "ld1 { v12.s }[2], [x19]\n"
"b 24f\n"
"23:" // Tile loop: Oddments: Load inputs: (2, 3): Bit 1: Unset
- "ldr s12, [x20, #0x0]\n"
+ "ldr s12, [x19, #0x0]\n"
"24:" // Tile loop: Oddments: Load inputs: (2, 3): Bit 1: End
- "fmla v29.4s, v7.4s, v12.4s\n"
- "fmla v31.4s, v1.4s, v12.4s\n"
- "add x20, x12, x9\n"
+ "fmla v30.4s, v7.4s, v12.4s\n"
+ "add x19, x12, x28\n"
+ "fmla v28.4s, v1.4s, v12.4s\n"
"tbz %x[n_channels], #1, 25f\n"
- "ldr d14, [x20], #0x8\n"
+ "ldr d14, [x19], #0x8\n"
"tbz %x[n_channels], #0, 26f\n"
- "ld1 { v14.s }[2], [x20]\n"
+ "ld1 { v14.s }[2], [x19]\n"
"b 26f\n"
"25:" // Tile loop: Oddments: Load inputs: (3, 4): Bit 1: Unset
- "ldr s14, [x20, #0x0]\n"
+ "ldr s14, [x19, #0x0]\n"
"26:" // Tile loop: Oddments: Load inputs: (3, 4): Bit 1: End
- "fmla v31.4s, v5.4s, v14.4s\n"
- "add x20, x10, XZR\n"
+ "fmla v28.4s, v5.4s, v14.4s\n"
+ "add x19, x11, XZR\n"
"tbz %x[n_channels], #1, 27f\n"
- "ldr d15, [x20], #0x8\n"
+ "ldr d15, [x19], #0x8\n"
"tbz %x[n_channels], #0, 28f\n"
- "ld1 { v15.s }[2], [x20]\n"
+ "ld1 { v15.s }[2], [x19]\n"
"b 28f\n"
"27:" // Tile loop: Oddments: Load inputs: (4, 0): Bit 1: Unset
- "ldr s15, [x20, #0x0]\n"
+ "ldr s15, [x19, #0x0]\n"
"28:" // Tile loop: Oddments: Load inputs: (4, 0): Bit 1: End
- "fmla v30.4s, v6.4s, v15.4s\n"
- "add x20, x14, x9\n"
+ "fmla v29.4s, v6.4s, v15.4s\n"
+ "add x19, x13, x28\n"
"tbz %x[n_channels], #1, 29f\n"
- "ldr d11, [x20], #0x8\n"
+ "ldr d11, [x19], #0x8\n"
"tbz %x[n_channels], #0, 30f\n"
- "ld1 { v11.s }[2], [x20]\n"
+ "ld1 { v11.s }[2], [x19]\n"
"b 30f\n"
"29:" // Tile loop: Oddments: Load inputs: (2, 4): Bit 1: Unset
- "ldr s11, [x20, #0x0]\n"
+ "ldr s11, [x19, #0x0]\n"
"30:" // Tile loop: Oddments: Load inputs: (2, 4): Bit 1: End
- "fmla v29.4s, v8.4s, v11.4s\n"
- "fmla v31.4s, v2.4s, v11.4s\n"
- "add x20, x10, x6\n"
+ "fmla v30.4s, v8.4s, v11.4s\n"
+ "add x19, x11, x8\n"
+ "fmla v28.4s, v2.4s, v11.4s\n"
"tbz %x[n_channels], #1, 31f\n"
- "ldr d13, [x20], #0x8\n"
+ "ldr d13, [x19], #0x8\n"
"tbz %x[n_channels], #0, 32f\n"
- "ld1 { v13.s }[2], [x20]\n"
+ "ld1 { v13.s }[2], [x19]\n"
"b 32f\n"
"31:" // Tile loop: Oddments: Load inputs: (4, 1): Bit 1: Unset
- "ldr s13, [x20, #0x0]\n"
+ "ldr s13, [x19, #0x0]\n"
"32:" // Tile loop: Oddments: Load inputs: (4, 1): Bit 1: End
- "fmla v30.4s, v7.4s, v13.4s\n"
- "add x20, x12, x13\n"
+ "fmla v29.4s, v7.4s, v13.4s\n"
+ "add x19, x12, x10\n"
"tbz %x[n_channels], #1, 33f\n"
- "ldr d16, [x20], #0x8\n"
+ "ldr d16, [x19], #0x8\n"
"tbz %x[n_channels], #0, 34f\n"
- "ld1 { v16.s }[2], [x20]\n"
+ "ld1 { v16.s }[2], [x19]\n"
"b 34f\n"
"33:" // Tile loop: Oddments: Load inputs: (3, 2): Bit 1: Unset
- "ldr s16, [x20, #0x0]\n"
+ "ldr s16, [x19, #0x0]\n"
"34:" // Tile loop: Oddments: Load inputs: (3, 2): Bit 1: End
- "fmla v30.4s, v5.4s, v16.4s\n"
- "fmla v31.4s, v3.4s, v16.4s\n"
- "add x20, x10, x11\n"
+ "fmla v29.4s, v5.4s, v16.4s\n"
+ "add x19, x11, x9\n"
+ "fmla v28.4s, v3.4s, v16.4s\n"
"tbz %x[n_channels], #1, 35f\n"
- "ldr d14, [x20], #0x8\n"
+ "ldr d14, [x19], #0x8\n"
"tbz %x[n_channels], #0, 36f\n"
- "ld1 { v14.s }[2], [x20]\n"
+ "ld1 { v14.s }[2], [x19]\n"
"b 36f\n"
"35:" // Tile loop: Oddments: Load inputs: (4, 3): Bit 1: Unset
- "ldr s14, [x20, #0x0]\n"
+ "ldr s14, [x19, #0x0]\n"
"36:" // Tile loop: Oddments: Load inputs: (4, 3): Bit 1: End
- "fmla v31.4s, v7.4s, v14.4s\n"
- "add x20, x10, x13\n"
+ "fmla v28.4s, v7.4s, v14.4s\n"
+ "add x19, x11, x10\n"
"tbz %x[n_channels], #1, 37f\n"
- "ldr d15, [x20], #0x8\n"
+ "ldr d15, [x19], #0x8\n"
"tbz %x[n_channels], #0, 38f\n"
- "ld1 { v15.s }[2], [x20]\n"
+ "ld1 { v15.s }[2], [x19]\n"
"b 38f\n"
"37:" // Tile loop: Oddments: Load inputs: (4, 2): Bit 1: Unset
- "ldr s15, [x20, #0x0]\n"
+ "ldr s15, [x19, #0x0]\n"
"38:" // Tile loop: Oddments: Load inputs: (4, 2): Bit 1: End
- "fmla v30.4s, v8.4s, v15.4s\n"
- "fmla v31.4s, v6.4s, v15.4s\n"
- "add x20, x10, x9\n"
+ "fmla v29.4s, v8.4s, v15.4s\n"
+ "add x19, x11, x28\n"
+ "fmla v28.4s, v6.4s, v15.4s\n"
"tbz %x[n_channels], #1, 39f\n"
- "ldr d11, [x20], #0x8\n"
+ "ldr d11, [x19], #0x8\n"
"tbz %x[n_channels], #0, 40f\n"
- "ld1 { v11.s }[2], [x20]\n"
+ "ld1 { v11.s }[2], [x19]\n"
"b 40f\n"
"39:" // Tile loop: Oddments: Load inputs: (4, 4): Bit 1: Unset
- "ldr s11, [x20, #0x0]\n"
+ "ldr s11, [x19, #0x0]\n"
"40:" // Tile loop: Oddments: Load inputs: (4, 4): Bit 1: End
- "fmla v31.4s, v8.4s, v11.4s\n"
- "fmax v28.4s, v28.4s, v19.4s\n"
- "fmax v29.4s, v29.4s, v19.4s\n"
- "fmax v30.4s, v30.4s, v19.4s\n"
+ "fmla v28.4s, v8.4s, v11.4s\n"
"fmax v31.4s, v31.4s, v19.4s\n"
- "fmin v28.4s, v28.4s, v18.4s\n"
- "fmin v29.4s, v29.4s, v18.4s\n"
- "fmin v30.4s, v30.4s, v18.4s\n"
+ "fmax v30.4s, v30.4s, v19.4s\n"
+ "fmax v29.4s, v29.4s, v19.4s\n"
"fmin v31.4s, v31.4s, v18.4s\n"
+ "fmin v30.4s, v30.4s, v18.4s\n"
+ "fmin v29.4s, v29.4s, v18.4s\n"
+ "fmax v28.4s, v28.4s, v19.4s\n"
+ "fmin v28.4s, v28.4s, v18.4s\n"
"tbz %x[n_channels], #1, 41f\n"
- "mov x21, x17\n"
- "mov x20, x28\n"
- "st1 { v28.d }[0], [x21], x7\n"
- "st1 { v30.d }[0], [x20], x7\n"
- "add x17, x17, #0x8\n"
- "add x28, x28, #0x8\n"
- "st1 { v29.d }[0], [x21]\n"
- "st1 { v31.d }[0], [x20]\n"
+ "mov x19, x15\n"
+ "st1 { v31.d }[0], [x19], x16\n"
+ "add x15, x15, #0x8\n"
+ "st1 { v30.d }[0], [x19]\n"
+ "mov x19, x27\n"
+ "st1 { v29.d }[0], [x19], x16\n"
+ "add x27, x27, #0x8\n"
+ "st1 { v28.d }[0], [x19]\n"
"tbz %x[n_channels], #0, 42f\n"
- "mov x21, x17\n"
- "mov x20, x28\n"
- "st1 { v28.s }[2], [x21], x7\n"
- "st1 { v30.s }[2], [x20], x7\n"
- "st1 { v29.s }[2], [x21]\n"
- "st1 { v31.s }[2], [x20]\n"
+ "mov x20, x15\n"
+ "st1 { v31.s }[2], [x20], x16\n"
+ "mov x19, x27\n"
+ "st1 { v30.s }[2], [x20]\n"
+ "st1 { v29.s }[2], [x19], x16\n"
+ "st1 { v28.s }[2], [x19]\n"
"b 42f\n"
"41:" // Tile loop: Oddments: Store: Bit 1: Unset
- "mov x21, x17\n"
- "mov x20, x28\n"
- "st1 { v28.s }[0], [x21], x7\n"
- "st1 { v30.s }[0], [x20], x7\n"
- "st1 { v29.s }[0], [x21]\n"
- "st1 { v31.s }[0], [x20]\n"
+ "mov x20, x15\n"
+ "st1 { v31.s }[0], [x20], x16\n"
+ "mov x19, x27\n"
+ "st1 { v30.s }[0], [x20]\n"
+ "st1 { v29.s }[0], [x19], x16\n"
+ "st1 { v28.s }[0], [x19]\n"
"42:" // Tile loop: Oddments: Store: Bit 1: End
"43:" // Tile loop: End
+ "ldr x6, [%x[params_struct], %[offsetof_args_tile_i]]\n"
+ "add x21, x6, #0x1\n"
"ldr x27, [%x[params_struct], %[offsetof_args_tile_j]]\n"
- "ldr x23, [%x[params_struct], %[offsetof_args_tile_i]]\n"
- "add x27, x27, #0x1\n"
- "add x21, x23, #0x1\n"
- "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
- "cmp x27, x20\n"
"ldr x20, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
- "csel x23, x23, x21, LT\n"
+ "add x27, x27, #0x1\n"
+ "ldr x19, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
+ "cmp x27, x19\n"
"csel x27, x27, XZR, LT\n"
- "cmp x23, x20\n"
+ "csel x6, x6, x21, LT\n"
+ "cmp x6, x20\n"
"blt 1b\n"
:
: [n_channels] "r" ((unsigned long) n_channels), [offsetof_args_inptr] "I" (offsetof(Args, inptr)), [offsetof_args_ld_input_col] "I" (offsetof(Args, ld_input_col)), [offsetof_args_ld_input_row] "I" (offsetof(Args, ld_input_row)), [offsetof_args_ld_output_col] "I" (offsetof(Args, ld_output_col)), [offsetof_args_ld_output_row] "I" (offsetof(Args, ld_output_row)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_n_tile_cols] "I" (offsetof(Args, n_tile_cols)), [offsetof_args_n_tile_rows] "I" (offsetof(Args, n_tile_rows)), [offsetof_args_outptr] "I" (offsetof(Args, outptr)), [offsetof_args_params] "I" (offsetof(Args, params)), [offsetof_args_tile_i] "I" (offsetof(Args, tile_i)), [offsetof_args_tile_j] "I" (offsetof(Args, tile_j)), [params_struct] "r" (&params_struct)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v28", "v29", "v30", "v31", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v28", "v29", "v30", "v31", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_indirect.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_indirect.cpp
index f65633002e..4b24862eb2 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_indirect.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_indirect.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -88,385 +88,385 @@ void a64_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst_indirect_impl(
__asm__ __volatile__(
"ldr x21, [%x[params_struct], %[offsetof_args_outptrs]]\n"
- "mov x26, #0x10\n" // cntb _, ALL, #1
- "lsr x25, %x[n_channels], #0x2\n"
- "ldr x24, [%x[params_struct], %[offsetof_args_params]]\n"
+ "add x16, %x[params_struct], %[offsetof_Args_inptrs]\n"
+ "ldr x15, [%x[params_struct], %[offsetof_args_params]]\n"
"add x20, %x[params_struct], %[offsetof_args_min]\n"
+ "add x19, %x[params_struct], %[offsetof_args_max]\n"
"ld1r { v19.4s }, [x20]\n"
- "add x20, %x[params_struct], %[offsetof_args_max]\n"
- "ld1r { v18.4s }, [x20]\n"
- "add x13, %x[params_struct], %[offsetof_Args_inptrs]\n"
- "ldp x12, x11, [x21, #0x0]\n"
+ "ld1r { v18.4s }, [x19]\n"
+ "mov x14, #0x0\n"
+ "ldp x13, x12, [x21, #0x0]\n"
+ "mov x11, #0x10\n" // cntb _, ALL, #1
"ldp x10, x9, [x21, #0x10]\n"
- "mov x28, #0x0\n"
- "sub x23, XZR, x26\n"
- "cbz x25, 3f\n"
- "ldr q17, [x24, #0x0]\n"
- "ldr q0, [x24, #0x10]\n"
- "cmp x26, x25, LSL #4\n"
- "ldr q1, [x24, #0x20]\n"
- "ldr q2, [x24, #0x30]\n"
- "ldr q3, [x24, #0x40]\n"
- "ldr q4, [x24, #0x50]\n"
- "ldr q5, [x24, #0x60]\n"
- "ldr q6, [x24, #0x70]\n"
- "ldr q7, [x24, #0x80]\n"
- "ldr q8, [x24, #0x90]\n"
- "add x24, x24, #0xa0\n"
- "ldp x22, x20, [x13, #0x0]\n"
- "ldr q9, [x22, x28]\n"
- "ldr q10, [x20, x28]\n"
- "ldp x21, x20, [x13, #0x10]\n"
- "ldr q11, [x21, x28]\n"
- "ldr q12, [x20, x28]\n"
- "ldp x22, x21, [x13, #0x20]\n"
- "ldr q13, [x22, x28]\n"
- "ldr q14, [x21, x28]\n"
- "ldp x21, x20, [x13, #0x30]\n"
- "ldr q15, [x21, x28]\n"
- "ldr q16, [x20, x28]\n"
+ "sub x28, XZR, x11\n"
+ "lsr x27, %x[n_channels], #0x2\n"
+ "cbz x27, 3f\n"
+ "ldr q17, [x15, #0x0]\n"
+ "ldr q0, [x15, #0x10]\n"
+ "cmp x11, x27, LSL #4\n"
+ "ldr q1, [x15, #0x20]\n"
+ "ldr q2, [x15, #0x30]\n"
+ "ldr q3, [x15, #0x40]\n"
+ "ldr q4, [x15, #0x50]\n"
+ "ldr q5, [x15, #0x60]\n"
+ "ldr q6, [x15, #0x70]\n"
+ "ldr q7, [x15, #0x80]\n"
+ "ldr q8, [x15, #0x90]\n"
+ "add x15, x15, #0xa0\n"
+ "ldp x26, x25, [x16, #0x0]\n"
+ "ldp x24, x23, [x16, #0x10]\n"
+ "ldp x22, x21, [x16, #0x20]\n"
+ "ldr q9, [x26, x14]\n"
+ "ldr q10, [x25, x14]\n"
+ "ldr q11, [x24, x14]\n"
+ "ldr q12, [x23, x14]\n"
+ "ldr q13, [x22, x14]\n"
+ "ldr q14, [x21, x14]\n"
+ "ldp x20, x19, [x16, #0x30]\n"
+ "ldr q15, [x20, x14]\n"
+ "ldr q16, [x19, x14]\n"
"bge 2f\n"
"1:" // Channel loop
- "mov v28.16b, v17.16b\n fmla v28.4s, v8.4s, v9.4s\n"
- "mov v29.16b, v17.16b\n fmla v29.4s, v6.4s, v9.4s\n"
- "ldr x22, [x13, #0x40]\n"
- "ldr x20, [x13, #0x48]\n"
- "fmla v28.4s, v0.4s, v10.4s\n"
- "fmla v29.4s, v1.4s, v12.4s\n"
- "ldr q12, [x20, x28]\n"
- "ldr x21, [x13, #0x50]\n"
- "fmla v28.4s, v1.4s, v11.4s\n"
- "ldr q11, [x22, x28]\n"
- "fmla v29.4s, v2.4s, v13.4s\n"
- "ldr q13, [x21, x28]\n"
- "fmla v28.4s, v3.4s, v14.4s\n"
- "fmla v29.4s, v0.4s, v16.4s\n"
- "ldr x20, [x13, #0x58]\n"
- "ldr q14, [x20, x28]\n"
- "fmla v28.4s, v4.4s, v15.4s\n"
- "fmla v29.4s, v4.4s, v11.4s\n"
- "ldr x20, [x13, #0x78]\n"
- "ldr x22, [x13, #0x60]\n"
- "ldr q15, [x22, x28]\n"
- "fmla v28.4s, v2.4s, v16.4s\n"
- "fmla v29.4s, v5.4s, v12.4s\n"
- "ldr x22, [x13, #0x80]\n"
- "ldr q12, [x22, x28]\n"
- "mov v30.16b, v17.16b\n fmla v30.4s, v2.4s, v9.4s\n"
- "mov v31.16b, v17.16b\n fmla v31.4s, v0.4s, v9.4s\n"
- "ldr q17, [x24, #0x0]\n"
- "fmla v28.4s, v5.4s, v13.4s\n"
- "fmla v29.4s, v3.4s, v13.4s\n"
- "ldr q13, [x20, x28]\n"
- "ldr x21, [x13, #0x68]\n"
- "ldr q11, [x21, x28]\n"
- "fmla v30.4s, v3.4s, v14.4s\n"
- "fmla v31.4s, v4.4s, v13.4s\n"
- "ldr x20, [x13, #0x88]\n"
- "ldr q14, [x20, x28]\n"
- "fmla v30.4s, v0.4s, v15.4s\n"
- "ldr q0, [x24, #0x10]\n"
- "fmla v31.4s, v1.4s, v12.4s\n"
- "ldr x21, [x13, #0x70]\n"
- "ldr q16, [x21, x28]\n"
+ "mov v31.16b, v17.16b\n fmla v31.4s, v8.4s, v9.4s\n"
+ "ldr x26, [x16, #0x40]\n"
+ "add x28, x28, #0x10\n"
+ "mov v30.16b, v17.16b\n fmla v30.4s, v6.4s, v9.4s\n"
+ "ldr x25, [x16, #0x48]\n"
+ "mov v29.16b, v17.16b\n fmla v29.4s, v2.4s, v9.4s\n"
+ "ldr x24, [x16, #0x50]\n"
+ "mov v28.16b, v17.16b\n fmla v28.4s, v0.4s, v9.4s\n"
+ "ldr x23, [x16, #0x58]\n"
+ "ldr x22, [x16, #0x60]\n"
+ "fmla v31.4s, v0.4s, v10.4s\n"
+ "ldr x21, [x16, #0x68]\n"
+ "fmla v30.4s, v1.4s, v12.4s\n"
+ "ldr q12, [x25, x14]\n"
+ "fmla v31.4s, v1.4s, v11.4s\n"
+ "ldr q11, [x26, x14]\n"
+ "ldr x20, [x16, #0x70]\n"
+ "fmla v30.4s, v2.4s, v13.4s\n"
+ "ldr q13, [x24, x14]\n"
+ "fmla v31.4s, v3.4s, v14.4s\n"
+ "ldr q14, [x23, x14]\n"
+ "ldr x19, [x16, #0x78]\n"
+ "fmla v30.4s, v0.4s, v16.4s\n"
+ "ldr x26, [x16, #0x80]\n"
+ "fmla v31.4s, v4.4s, v15.4s\n"
+ "ldr q15, [x22, x14]\n"
+ "fmla v29.4s, v3.4s, v14.4s\n"
+ "ldr x25, [x16, #0x88]\n"
"fmla v30.4s, v4.4s, v11.4s\n"
- "fmla v31.4s, v5.4s, v14.4s\n"
- "ldr q4, [x24, #0x50]\n"
- "ldr x20, [x13, #0x98]\n"
- "fmla v28.4s, v6.4s, v15.4s\n"
- "fmla v30.4s, v1.4s, v16.4s\n"
- "ldr q11, [x20, x28]\n"
- "ldr q1, [x24, #0x20]\n"
- "fmla v31.4s, v2.4s, v11.4s\n"
- "fmla v28.4s, v7.4s, v16.4s\n"
- "ldr q2, [x24, #0x30]\n"
- "ldr x21, [x13, #0x90]\n"
- "fmla v29.4s, v7.4s, v12.4s\n"
- "fmla v29.4s, v8.4s, v11.4s\n"
- "ldr q15, [x21, x28]\n"
- "ldr x21, [x13, #0xa8]\n"
- "fmla v30.4s, v6.4s, v15.4s\n"
- "fmax v28.4s, v28.4s, v19.4s\n"
- "ldr q16, [x21, x28]\n"
- "ldr x22, [x13, #0xa0]\n"
- "fmla v31.4s, v3.4s, v16.4s\n"
- "fmax v29.4s, v29.4s, v19.4s\n"
- "ldr q13, [x22, x28]\n"
- "ldr q3, [x24, #0x40]\n"
- "fmla v30.4s, v7.4s, v13.4s\n"
- "fmla v30.4s, v5.4s, v16.4s\n"
- "ldr q5, [x24, #0x60]\n"
- "ldr x21, [x13, #0xb0]\n"
- "add x23, x23, #0x10\n"
- "fmin v28.4s, v28.4s, v18.4s\n"
- "ldr q14, [x21, x28]\n"
- "ldr x20, [x13, #0xb8]\n"
- "fmla v31.4s, v7.4s, v14.4s\n"
- "fmin v29.4s, v29.4s, v18.4s\n"
- "ldr q15, [x20, x28]\n"
- "ldr q7, [x24, #0x80]\n"
+ "ldr q11, [x21, x14]\n"
+ "ldr x24, [x16, #0x90]\n"
+ "fmla v31.4s, v2.4s, v16.4s\n"
+ "ldr q16, [x20, x14]\n"
+ "fmla v29.4s, v0.4s, v15.4s\n"
+ "ldr q14, [x25, x14]\n"
+ "fmla v30.4s, v5.4s, v12.4s\n"
+ "ldr q12, [x26, x14]\n"
+ "ldr x23, [x16, #0x98]\n"
+ "fmla v31.4s, v5.4s, v13.4s\n"
+ "ldr x22, [x16, #0xa0]\n"
+ "fmla v29.4s, v4.4s, v11.4s\n"
+ "ldr q11, [x23, x14]\n"
+ "fmla v30.4s, v3.4s, v13.4s\n"
+ "ldr q13, [x19, x14]\n"
+ "ldr x21, [x16, #0xa8]\n"
"fmla v31.4s, v6.4s, v15.4s\n"
- "fmla v30.4s, v8.4s, v15.4s\n"
- "ldr q6, [x24, #0x70]\n"
- "ldr x22, [x13, #0xc0]\n"
- "fmax v30.4s, v30.4s, v19.4s\n"
- "fmin v30.4s, v30.4s, v18.4s\n"
- "ldr q11, [x22, x28]\n"
- "fmla v31.4s, v8.4s, v11.4s\n"
- "ldr q8, [x24, #0x90]\n"
+ "ldr q15, [x24, x14]\n"
+ "fmla v29.4s, v1.4s, v16.4s\n"
+ "ldr x20, [x16, #0xb0]\n"
+ "fmla v30.4s, v7.4s, v12.4s\n"
+ "ldr x19, [x16, #0xb8]\n"
+ "fmla v28.4s, v4.4s, v13.4s\n"
+ "ldr q13, [x22, x14]\n"
+ "ldr x26, [x16, #0xc0]\n"
+ "fmla v31.4s, v7.4s, v16.4s\n"
+ "fmla v29.4s, v6.4s, v15.4s\n"
+ "ldr q16, [x21, x14]\n"
+ "fmla v30.4s, v8.4s, v11.4s\n"
+ "ldr q15, [x19, x14]\n"
+ "fmla v28.4s, v1.4s, v12.4s\n"
+ "ldr q17, [x15, #0x0]\n"
+ "ldr q0, [x15, #0x10]\n"
+ "fmla v29.4s, v7.4s, v13.4s\n"
"fmax v31.4s, v31.4s, v19.4s\n"
- "ldp x22, x20, [x13, #0x0]\n"
- "ldr q9, [x22, x26]\n"
+ "ldr q1, [x15, #0x20]\n"
+ "fmax v30.4s, v30.4s, v19.4s\n"
+ "ldr q4, [x15, #0x50]\n"
+ "fmla v28.4s, v5.4s, v14.4s\n"
+ "ldr q14, [x20, x14]\n"
"fmin v31.4s, v31.4s, v18.4s\n"
- "add x28, x28, #0x10\n"
- "ldr q10, [x20, x26]\n"
- "ldp x21, x20, [x13, #0x10]\n"
- "str q28, [x12, x23]\n"
- "add x24, x24, #0xa0\n"
- "ldr q11, [x21, x26]\n"
- "ldr q12, [x20, x26]\n"
- "str q29, [x11, x23]\n"
- "ldp x22, x21, [x13, #0x20]\n"
- "ldr q13, [x22, x26]\n"
- "str q30, [x10, x23]\n"
- "ldr q14, [x21, x26]\n"
- "ldp x21, x20, [x13, #0x30]\n"
- "str q31, [x9, x23]\n"
- "ldr q15, [x21, x26]\n"
- "ldr q16, [x20, x26]\n"
- "add x26, x26, #0x10\n"
- "cmp x26, x25, LSL #4\n"
+ "str q31, [x13, x28]\n"
+ "fmla v28.4s, v2.4s, v11.4s\n"
+ "fmla v29.4s, v5.4s, v16.4s\n"
+ "ldr q11, [x26, x14]\n"
+ "add x14, x14, #0x10\n"
+ "fmin v30.4s, v30.4s, v18.4s\n"
+ "ldp x26, x25, [x16, #0x0]\n"
+ "ldp x24, x23, [x16, #0x10]\n"
+ "fmla v28.4s, v3.4s, v16.4s\n"
+ "ldp x22, x21, [x16, #0x20]\n"
+ "fmla v29.4s, v8.4s, v15.4s\n"
+ "ldr q9, [x26, x11]\n"
+ "ldr q10, [x25, x11]\n"
+ "fmla v28.4s, v7.4s, v14.4s\n"
+ "ldr q12, [x23, x11]\n"
+ "fmax v29.4s, v29.4s, v19.4s\n"
+ "ldr q13, [x22, x11]\n"
+ "ldr q14, [x21, x11]\n"
+ "fmin v29.4s, v29.4s, v18.4s\n"
+ "ldp x20, x19, [x16, #0x30]\n"
+ "str q30, [x12, x28]\n"
+ "fmla v28.4s, v6.4s, v15.4s\n"
+ "ldr q2, [x15, #0x30]\n"
+ "fmla v28.4s, v8.4s, v11.4s\n"
+ "ldr q11, [x24, x11]\n"
+ "ldr q15, [x20, x11]\n"
+ "fmax v28.4s, v28.4s, v19.4s\n"
+ "ldr q16, [x19, x11]\n"
+ "add x11, x11, #0x10\n"
+ "fmin v28.4s, v28.4s, v18.4s\n"
+ "str q29, [x10, x28]\n"
+ "cmp x11, x27, LSL #4\n"
+ "ldr q3, [x15, #0x40]\n"
+ "ldr q5, [x15, #0x60]\n"
+ "ldr q6, [x15, #0x70]\n"
+ "str q28, [x9, x28]\n"
+ "ldr q7, [x15, #0x80]\n"
+ "ldr q8, [x15, #0x90]\n"
+ "add x15, x15, #0xa0\n"
"blt 1b\n"
"2:" // Channel tail
- "mov v28.16b, v17.16b\n fmla v28.4s, v8.4s, v9.4s\n"
- "mov v29.16b, v17.16b\n fmla v29.4s, v6.4s, v9.4s\n"
- "ldr x22, [x13, #0x40]\n"
- "ldr x20, [x13, #0x48]\n"
- "fmla v28.4s, v0.4s, v10.4s\n"
- "fmla v29.4s, v1.4s, v12.4s\n"
- "ldr q12, [x20, x28]\n"
- "ldr x21, [x13, #0x50]\n"
- "fmla v28.4s, v1.4s, v11.4s\n"
- "ldr q11, [x22, x28]\n"
- "fmla v29.4s, v2.4s, v13.4s\n"
- "ldr q13, [x21, x28]\n"
- "fmla v28.4s, v3.4s, v14.4s\n"
- "fmla v29.4s, v0.4s, v16.4s\n"
- "ldr x20, [x13, #0x58]\n"
- "ldr q14, [x20, x28]\n"
- "fmla v28.4s, v4.4s, v15.4s\n"
- "fmla v29.4s, v4.4s, v11.4s\n"
- "ldr x20, [x13, #0x78]\n"
- "ldr x22, [x13, #0x60]\n"
- "ldr q15, [x22, x28]\n"
- "fmla v28.4s, v2.4s, v16.4s\n"
- "fmla v29.4s, v5.4s, v12.4s\n"
- "ldr x22, [x13, #0x80]\n"
- "ldr q12, [x22, x28]\n"
- "mov v30.16b, v17.16b\n fmla v30.4s, v2.4s, v9.4s\n"
- "mov v31.16b, v17.16b\n fmla v31.4s, v0.4s, v9.4s\n"
- "ldr x21, [x13, #0x68]\n"
- "ldr q11, [x21, x28]\n"
- "fmla v28.4s, v5.4s, v13.4s\n"
- "fmla v29.4s, v3.4s, v13.4s\n"
- "ldr q13, [x20, x28]\n"
- "fmla v30.4s, v3.4s, v14.4s\n"
- "fmla v31.4s, v4.4s, v13.4s\n"
- "ldr x20, [x13, #0x88]\n"
- "ldr q14, [x20, x28]\n"
- "fmla v30.4s, v0.4s, v15.4s\n"
- "fmla v31.4s, v1.4s, v12.4s\n"
- "ldr x21, [x13, #0x70]\n"
- "ldr q16, [x21, x28]\n"
- "ldr x20, [x13, #0x98]\n"
+ "mov v31.16b, v17.16b\n fmla v31.4s, v8.4s, v9.4s\n"
+ "ldr x26, [x16, #0x40]\n"
+ "add x28, x28, #0x10\n"
+ "mov v30.16b, v17.16b\n fmla v30.4s, v6.4s, v9.4s\n"
+ "ldr x25, [x16, #0x48]\n"
+ "mov v29.16b, v17.16b\n fmla v29.4s, v2.4s, v9.4s\n"
+ "ldr x24, [x16, #0x50]\n"
+ "mov v28.16b, v17.16b\n fmla v28.4s, v0.4s, v9.4s\n"
+ "ldr x23, [x16, #0x58]\n"
+ "ldr x22, [x16, #0x60]\n"
+ "fmla v31.4s, v0.4s, v10.4s\n"
+ "ldr x21, [x16, #0x68]\n"
+ "fmla v30.4s, v1.4s, v12.4s\n"
+ "ldr q12, [x25, x14]\n"
+ "fmla v31.4s, v1.4s, v11.4s\n"
+ "ldr q11, [x26, x14]\n"
+ "ldr x20, [x16, #0x70]\n"
+ "fmla v30.4s, v2.4s, v13.4s\n"
+ "ldr q13, [x24, x14]\n"
+ "fmla v31.4s, v3.4s, v14.4s\n"
+ "ldr q14, [x23, x14]\n"
+ "ldr x19, [x16, #0x78]\n"
+ "fmla v30.4s, v0.4s, v16.4s\n"
+ "ldr x26, [x16, #0x80]\n"
+ "fmla v31.4s, v4.4s, v15.4s\n"
+ "ldr q15, [x22, x14]\n"
+ "fmla v29.4s, v3.4s, v14.4s\n"
+ "ldr x25, [x16, #0x88]\n"
"fmla v30.4s, v4.4s, v11.4s\n"
- "ldr q11, [x20, x28]\n"
- "fmla v31.4s, v5.4s, v14.4s\n"
- "fmla v28.4s, v6.4s, v15.4s\n"
- "ldr x21, [x13, #0x90]\n"
- "ldr q15, [x21, x28]\n"
- "fmla v30.4s, v1.4s, v16.4s\n"
- "ldr x21, [x13, #0xa8]\n"
- "fmla v31.4s, v2.4s, v11.4s\n"
- "fmla v28.4s, v7.4s, v16.4s\n"
- "ldr q16, [x21, x28]\n"
- "ldr x22, [x13, #0xa0]\n"
- "ldr q13, [x22, x28]\n"
- "fmla v30.4s, v6.4s, v15.4s\n"
- "fmla v31.4s, v3.4s, v16.4s\n"
- "ldr x21, [x13, #0xb0]\n"
- "ldr q14, [x21, x28]\n"
- "fmla v30.4s, v7.4s, v13.4s\n"
- "fmla v31.4s, v7.4s, v14.4s\n"
- "ldr x20, [x13, #0xb8]\n"
- "ldr q15, [x20, x28]\n"
- "fmla v29.4s, v7.4s, v12.4s\n"
- "fmla v30.4s, v5.4s, v16.4s\n"
- "ldr x22, [x13, #0xc0]\n"
+ "ldr q11, [x21, x14]\n"
+ "ldr x24, [x16, #0x90]\n"
+ "fmla v31.4s, v2.4s, v16.4s\n"
+ "ldr q16, [x20, x14]\n"
+ "fmla v29.4s, v0.4s, v15.4s\n"
+ "ldr q14, [x25, x14]\n"
+ "fmla v30.4s, v5.4s, v12.4s\n"
+ "ldr q12, [x26, x14]\n"
+ "ldr x23, [x16, #0x98]\n"
+ "fmla v31.4s, v5.4s, v13.4s\n"
+ "ldr x22, [x16, #0xa0]\n"
+ "fmla v29.4s, v4.4s, v11.4s\n"
+ "ldr q11, [x23, x14]\n"
+ "fmla v30.4s, v3.4s, v13.4s\n"
+ "ldr q13, [x19, x14]\n"
+ "ldr x21, [x16, #0xa8]\n"
"fmla v31.4s, v6.4s, v15.4s\n"
- "fmla v29.4s, v8.4s, v11.4s\n"
- "ldr q11, [x22, x28]\n"
- "fmla v30.4s, v8.4s, v15.4s\n"
- "fmla v31.4s, v8.4s, v11.4s\n"
- "fmax v28.4s, v28.4s, v19.4s\n"
- "add x23, x23, #0x10\n"
- "fmax v29.4s, v29.4s, v19.4s\n"
- "fmax v30.4s, v30.4s, v19.4s\n"
- "add x28, x28, #0x10\n"
+ "ldr q15, [x24, x14]\n"
+ "fmla v29.4s, v1.4s, v16.4s\n"
+ "ldr x20, [x16, #0xb0]\n"
+ "fmla v30.4s, v7.4s, v12.4s\n"
+ "ldr x19, [x16, #0xb8]\n"
+ "fmla v28.4s, v4.4s, v13.4s\n"
+ "ldr q13, [x22, x14]\n"
+ "ldr x26, [x16, #0xc0]\n"
+ "fmla v31.4s, v7.4s, v16.4s\n"
+ "fmla v29.4s, v6.4s, v15.4s\n"
+ "ldr q16, [x21, x14]\n"
+ "fmla v30.4s, v8.4s, v11.4s\n"
+ "ldr q15, [x19, x14]\n"
+ "fmla v28.4s, v1.4s, v12.4s\n"
+ "fmla v29.4s, v7.4s, v13.4s\n"
"fmax v31.4s, v31.4s, v19.4s\n"
- "fmin v28.4s, v28.4s, v18.4s\n"
- "str q28, [x12, x23]\n"
- "fmin v29.4s, v29.4s, v18.4s\n"
- "fmin v30.4s, v30.4s, v18.4s\n"
- "str q29, [x11, x23]\n"
+ "fmax v30.4s, v30.4s, v19.4s\n"
+ "fmla v28.4s, v5.4s, v14.4s\n"
+ "ldr q14, [x20, x14]\n"
"fmin v31.4s, v31.4s, v18.4s\n"
- "str q30, [x10, x23]\n"
- "str q31, [x9, x23]\n"
+ "str q31, [x13, x28]\n"
+ "fmla v28.4s, v2.4s, v11.4s\n"
+ "fmla v29.4s, v5.4s, v16.4s\n"
+ "ldr q11, [x26, x14]\n"
+ "add x14, x14, #0x10\n"
+ "fmin v30.4s, v30.4s, v18.4s\n"
+ "str q30, [x12, x28]\n"
+ "fmla v28.4s, v3.4s, v16.4s\n"
+ "fmla v29.4s, v8.4s, v15.4s\n"
+ "fmla v28.4s, v7.4s, v14.4s\n"
+ "fmax v29.4s, v29.4s, v19.4s\n"
+ "fmin v29.4s, v29.4s, v18.4s\n"
+ "str q29, [x10, x28]\n"
+ "fmla v28.4s, v6.4s, v15.4s\n"
+ "fmla v28.4s, v8.4s, v11.4s\n"
+ "fmax v28.4s, v28.4s, v19.4s\n"
+ "fmin v28.4s, v28.4s, v18.4s\n"
+ "str q28, [x9, x28]\n"
"3:" // Oddments
"tst %x[n_channels], #0x3\n"
"beq 42f\n"
- "ldr q17, [x24, #0x0]\n"
- "ldr q0, [x24, #0x10]\n"
- "mov x23, x28\n"
- "add x12, x12, x23\n"
- "ldr q1, [x24, #0x20]\n"
- "ldr q2, [x24, #0x30]\n"
- "add x11, x11, x23\n"
- "add x10, x10, x23\n"
- "ldr q3, [x24, #0x40]\n"
- "ldr q4, [x24, #0x50]\n"
- "add x9, x9, x23\n"
- "ldr q5, [x24, #0x60]\n"
- "ldr q6, [x24, #0x70]\n"
- "ldr q7, [x24, #0x80]\n"
- "ldr q8, [x24, #0x90]\n"
- "ldr x27, [x13, #0x0]\n"
- "ldr x26, [x13, #0x8]\n"
- "add x27, x27, x28\n"
- "add x26, x26, x28\n"
- "ldr x25, [x13, #0x10]\n"
- "ldr x24, [x13, #0x18]\n"
- "add x25, x25, x28\n"
- "add x24, x24, x28\n"
- "ldr x23, [x13, #0x20]\n"
- "ldr x22, [x13, #0x28]\n"
- "add x23, x23, x28\n"
- "add x22, x22, x28\n"
- "ldr x21, [x13, #0x30]\n"
- "ldr x20, [x13, #0x38]\n"
- "add x21, x21, x28\n"
- "add x20, x20, x28\n"
+ "ldr q17, [x15, #0x0]\n"
+ "ldr q0, [x15, #0x10]\n"
+ "mov x28, x14\n"
+ "ldr q1, [x15, #0x20]\n"
+ "add x13, x13, x28\n"
+ "ldr q2, [x15, #0x30]\n"
+ "add x12, x12, x28\n"
+ "ldr q3, [x15, #0x40]\n"
+ "add x10, x10, x28\n"
+ "ldr q4, [x15, #0x50]\n"
+ "add x9, x9, x28\n"
+ "ldr q5, [x15, #0x60]\n"
+ "ldr q6, [x15, #0x70]\n"
+ "ldr q7, [x15, #0x80]\n"
+ "ldr q8, [x15, #0x90]\n"
+ "ldr x26, [x16, #0x0]\n"
+ "ldr x25, [x16, #0x8]\n"
+ "ldr x24, [x16, #0x10]\n"
+ "add x26, x26, x14\n"
+ "ldr x23, [x16, #0x18]\n"
+ "add x25, x25, x14\n"
+ "ldr x22, [x16, #0x20]\n"
+ "add x24, x24, x14\n"
+ "ldr x21, [x16, #0x28]\n"
+ "add x23, x23, x14\n"
+ "ldr x20, [x16, #0x30]\n"
+ "add x22, x22, x14\n"
+ "ldr x19, [x16, #0x38]\n"
+ "add x21, x21, x14\n"
+ "add x20, x20, x14\n"
+ "add x19, x19, x14\n"
"tbz %x[n_channels], #1, 4f\n"
- "ld1 { v9.d }[0], [x27], #0x8\n"
- "ld1 { v10.d }[0], [x26], #0x8\n"
- "ld1 { v11.d }[0], [x25], #0x8\n"
- "ld1 { v12.d }[0], [x24], #0x8\n"
- "ld1 { v13.d }[0], [x23], #0x8\n"
- "ld1 { v14.d }[0], [x22], #0x8\n"
- "ld1 { v15.d }[0], [x21], #0x8\n"
- "ld1 { v16.d }[0], [x20], #0x8\n"
+ "ld1 { v9.d }[0], [x26], #0x8\n"
+ "ld1 { v10.d }[0], [x25], #0x8\n"
+ "ld1 { v11.d }[0], [x24], #0x8\n"
+ "ld1 { v12.d }[0], [x23], #0x8\n"
+ "ld1 { v13.d }[0], [x22], #0x8\n"
+ "ld1 { v14.d }[0], [x21], #0x8\n"
+ "ld1 { v15.d }[0], [x20], #0x8\n"
+ "ld1 { v16.d }[0], [x19], #0x8\n"
"tbz %x[n_channels], #0, 5f\n"
- "ld1 { v9.s }[2], [x27], #0x4\n"
- "ld1 { v10.s }[2], [x26], #0x4\n"
- "ld1 { v11.s }[2], [x25], #0x4\n"
- "ld1 { v12.s }[2], [x24], #0x4\n"
- "ld1 { v13.s }[2], [x23], #0x4\n"
- "ld1 { v14.s }[2], [x22], #0x4\n"
- "ld1 { v15.s }[2], [x21], #0x4\n"
- "ld1 { v16.s }[2], [x20], #0x4\n"
+ "ld1 { v9.s }[2], [x26], #0x4\n"
+ "ld1 { v10.s }[2], [x25], #0x4\n"
+ "ld1 { v11.s }[2], [x24], #0x4\n"
+ "ld1 { v12.s }[2], [x23], #0x4\n"
+ "ld1 { v13.s }[2], [x22], #0x4\n"
+ "ld1 { v14.s }[2], [x21], #0x4\n"
+ "ld1 { v15.s }[2], [x20], #0x4\n"
+ "ld1 { v16.s }[2], [x19], #0x4\n"
"b 5f\n"
"4:" // Oddments: Load inputs (2, 2), (0, 0), (0, 1), (0, 3), (0, 4), (1, 0), (1, 1), (0, 2): Bit 1: Unset
- "ld1 { v9.s }[0], [x27], #0x4\n"
- "ld1 { v10.s }[0], [x26], #0x4\n"
- "ld1 { v11.s }[0], [x25], #0x4\n"
- "ld1 { v12.s }[0], [x24], #0x4\n"
- "ld1 { v13.s }[0], [x23], #0x4\n"
- "ld1 { v14.s }[0], [x22], #0x4\n"
- "ld1 { v15.s }[0], [x21], #0x4\n"
- "ld1 { v16.s }[0], [x20], #0x4\n"
+ "ld1 { v9.s }[0], [x26], #0x4\n"
+ "ld1 { v10.s }[0], [x25], #0x4\n"
+ "ld1 { v11.s }[0], [x24], #0x4\n"
+ "ld1 { v12.s }[0], [x23], #0x4\n"
+ "ld1 { v13.s }[0], [x22], #0x4\n"
+ "ld1 { v14.s }[0], [x21], #0x4\n"
+ "ld1 { v15.s }[0], [x20], #0x4\n"
+ "ld1 { v16.s }[0], [x19], #0x4\n"
"5:" // Oddments: Load inputs (2, 2), (0, 0), (0, 1), (0, 3), (0, 4), (1, 0), (1, 1), (0, 2): Bit 1: End
- "mov v28.16b, v17.16b\n fmla v28.4s, v8.4s, v9.4s\n"
- "fmla v28.4s, v0.4s, v10.4s\n"
- "ldr x20, [x13, #0x40]\n"
- "add x20, x20, x28\n"
- "mov v29.16b, v17.16b\n fmla v29.4s, v6.4s, v9.4s\n"
- "fmla v28.4s, v1.4s, v11.4s\n"
- "fmla v29.4s, v1.4s, v12.4s\n"
- "fmla v28.4s, v3.4s, v14.4s\n"
- "fmla v29.4s, v2.4s, v13.4s\n"
- "fmla v28.4s, v4.4s, v15.4s\n"
- "mov v30.16b, v17.16b\n fmla v30.4s, v2.4s, v9.4s\n"
- "mov v31.16b, v17.16b\n fmla v31.4s, v0.4s, v9.4s\n"
- "fmla v28.4s, v2.4s, v16.4s\n"
- "fmla v29.4s, v0.4s, v16.4s\n"
+ "mov v31.16b, v17.16b\n fmla v31.4s, v8.4s, v9.4s\n"
+ "ldr x26, [x16, #0x40]\n"
+ "add x26, x26, x14\n"
+ "mov v30.16b, v17.16b\n fmla v30.4s, v6.4s, v9.4s\n"
+ "mov v29.16b, v17.16b\n fmla v29.4s, v2.4s, v9.4s\n"
+ "mov v28.16b, v17.16b\n fmla v28.4s, v0.4s, v9.4s\n"
+ "fmla v31.4s, v0.4s, v10.4s\n"
+ "fmla v30.4s, v1.4s, v12.4s\n"
+ "fmla v31.4s, v1.4s, v11.4s\n"
+ "fmla v30.4s, v2.4s, v13.4s\n"
+ "fmla v31.4s, v3.4s, v14.4s\n"
+ "fmla v30.4s, v0.4s, v16.4s\n"
+ "fmla v31.4s, v4.4s, v15.4s\n"
+ "fmla v31.4s, v2.4s, v16.4s\n"
"tbz %x[n_channels], #1, 6f\n"
- "ld1 { v11.d }[0], [x20], #0x8\n"
+ "ld1 { v11.d }[0], [x26], #0x8\n"
"tbz %x[n_channels], #0, 7f\n"
- "ld1 { v11.s }[2], [x20], #0x4\n"
+ "ld1 { v11.s }[2], [x26], #0x4\n"
"b 7f\n"
"6:" // Oddments: Load input (1, 3): Bit 1: Unset
- "ld1 { v11.s }[0], [x20], #0x4\n"
+ "ld1 { v11.s }[0], [x26], #0x4\n"
"7:" // Oddments: Load input (1, 3): Bit 1: End
- "ldr x20, [x13, #0x48]\n"
- "fmla v29.4s, v4.4s, v11.4s\n"
- "add x20, x20, x28\n"
+ "fmla v30.4s, v4.4s, v11.4s\n"
+ "ldr x25, [x16, #0x48]\n"
+ "add x25, x25, x14\n"
"tbz %x[n_channels], #1, 8f\n"
- "ld1 { v12.d }[0], [x20], #0x8\n"
+ "ld1 { v12.d }[0], [x25], #0x8\n"
"tbz %x[n_channels], #0, 9f\n"
- "ld1 { v12.s }[2], [x20], #0x4\n"
+ "ld1 { v12.s }[2], [x25], #0x4\n"
"b 9f\n"
"8:" // Oddments: Load input (1, 4): Bit 1: Unset
- "ld1 { v12.s }[0], [x20], #0x4\n"
+ "ld1 { v12.s }[0], [x25], #0x4\n"
"9:" // Oddments: Load input (1, 4): Bit 1: End
- "ldr x20, [x13, #0x50]\n"
- "fmla v29.4s, v5.4s, v12.4s\n"
- "add x20, x20, x28\n"
+ "fmla v30.4s, v5.4s, v12.4s\n"
+ "ldr x24, [x16, #0x50]\n"
+ "add x24, x24, x14\n"
"tbz %x[n_channels], #1, 10f\n"
- "ld1 { v13.d }[0], [x20], #0x8\n"
+ "ld1 { v13.d }[0], [x24], #0x8\n"
"tbz %x[n_channels], #0, 11f\n"
- "ld1 { v13.s }[2], [x20], #0x4\n"
+ "ld1 { v13.s }[2], [x24], #0x4\n"
"b 11f\n"
"10:" // Oddments: Load input (1, 2): Bit 1: Unset
- "ld1 { v13.s }[0], [x20], #0x4\n"
+ "ld1 { v13.s }[0], [x24], #0x4\n"
"11:" // Oddments: Load input (1, 2): Bit 1: End
- "ldr x20, [x13, #0x58]\n"
- "fmla v28.4s, v5.4s, v13.4s\n"
- "fmla v29.4s, v3.4s, v13.4s\n"
- "add x20, x20, x28\n"
+ "fmla v31.4s, v5.4s, v13.4s\n"
+ "ldr x23, [x16, #0x58]\n"
+ "fmla v30.4s, v3.4s, v13.4s\n"
+ "add x23, x23, x14\n"
"tbz %x[n_channels], #1, 12f\n"
- "ld1 { v14.d }[0], [x20], #0x8\n"
+ "ld1 { v14.d }[0], [x23], #0x8\n"
"tbz %x[n_channels], #0, 13f\n"
- "ld1 { v14.s }[2], [x20], #0x4\n"
+ "ld1 { v14.s }[2], [x23], #0x4\n"
"b 13f\n"
"12:" // Oddments: Load input (3, 0): Bit 1: Unset
- "ld1 { v14.s }[0], [x20], #0x4\n"
+ "ld1 { v14.s }[0], [x23], #0x4\n"
"13:" // Oddments: Load input (3, 0): Bit 1: End
- "ldr x20, [x13, #0x60]\n"
- "fmla v30.4s, v3.4s, v14.4s\n"
- "add x20, x20, x28\n"
+ "fmla v29.4s, v3.4s, v14.4s\n"
+ "ldr x22, [x16, #0x60]\n"
+ "add x22, x22, x14\n"
"tbz %x[n_channels], #1, 14f\n"
- "ld1 { v15.d }[0], [x20], #0x8\n"
+ "ld1 { v15.d }[0], [x22], #0x8\n"
"tbz %x[n_channels], #0, 15f\n"
- "ld1 { v15.s }[2], [x20], #0x4\n"
+ "ld1 { v15.s }[2], [x22], #0x4\n"
"b 15f\n"
"14:" // Oddments: Load input (2, 0): Bit 1: Unset
- "ld1 { v15.s }[0], [x20], #0x4\n"
+ "ld1 { v15.s }[0], [x22], #0x4\n"
"15:" // Oddments: Load input (2, 0): Bit 1: End
- "ldr x20, [x13, #0x68]\n"
- "fmla v28.4s, v6.4s, v15.4s\n"
- "fmla v30.4s, v0.4s, v15.4s\n"
- "add x20, x20, x28\n"
+ "fmla v31.4s, v6.4s, v15.4s\n"
+ "ldr x21, [x16, #0x68]\n"
+ "fmla v29.4s, v0.4s, v15.4s\n"
+ "add x21, x21, x14\n"
"tbz %x[n_channels], #1, 16f\n"
- "ld1 { v11.d }[0], [x20], #0x8\n"
+ "ld1 { v11.d }[0], [x21], #0x8\n"
"tbz %x[n_channels], #0, 17f\n"
- "ld1 { v11.s }[2], [x20], #0x4\n"
+ "ld1 { v11.s }[2], [x21], #0x4\n"
"b 17f\n"
"16:" // Oddments: Load input (3, 1): Bit 1: Unset
- "ld1 { v11.s }[0], [x20], #0x4\n"
+ "ld1 { v11.s }[0], [x21], #0x4\n"
"17:" // Oddments: Load input (3, 1): Bit 1: End
- "ldr x20, [x13, #0x70]\n"
- "fmla v30.4s, v4.4s, v11.4s\n"
- "add x20, x20, x28\n"
+ "fmla v29.4s, v4.4s, v11.4s\n"
+ "ldr x20, [x16, #0x70]\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #1, 18f\n"
"ld1 { v16.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #0, 19f\n"
@@ -475,90 +475,90 @@ void a64_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst_indirect_impl(
"18:" // Oddments: Load input (2, 1): Bit 1: Unset
"ld1 { v16.s }[0], [x20], #0x4\n"
"19:" // Oddments: Load input (2, 1): Bit 1: End
- "ldr x20, [x13, #0x78]\n"
- "fmla v28.4s, v7.4s, v16.4s\n"
- "fmla v30.4s, v1.4s, v16.4s\n"
- "add x20, x20, x28\n"
+ "fmla v31.4s, v7.4s, v16.4s\n"
+ "ldr x19, [x16, #0x78]\n"
+ "fmla v29.4s, v1.4s, v16.4s\n"
+ "add x19, x19, x14\n"
"tbz %x[n_channels], #1, 20f\n"
- "ld1 { v13.d }[0], [x20], #0x8\n"
+ "ld1 { v13.d }[0], [x19], #0x8\n"
"tbz %x[n_channels], #0, 21f\n"
- "ld1 { v13.s }[2], [x20], #0x4\n"
+ "ld1 { v13.s }[2], [x19], #0x4\n"
"b 21f\n"
"20:" // Oddments: Load input (3, 3): Bit 1: Unset
- "ld1 { v13.s }[0], [x20], #0x4\n"
+ "ld1 { v13.s }[0], [x19], #0x4\n"
"21:" // Oddments: Load input (3, 3): Bit 1: End
- "ldr x20, [x13, #0x80]\n"
- "fmla v31.4s, v4.4s, v13.4s\n"
- "add x20, x20, x28\n"
+ "fmla v28.4s, v4.4s, v13.4s\n"
+ "ldr x26, [x16, #0x80]\n"
+ "add x26, x26, x14\n"
"tbz %x[n_channels], #1, 22f\n"
- "ld1 { v12.d }[0], [x20], #0x8\n"
+ "ld1 { v12.d }[0], [x26], #0x8\n"
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v12.s }[2], [x20], #0x4\n"
+ "ld1 { v12.s }[2], [x26], #0x4\n"
"b 23f\n"
"22:" // Oddments: Load input (2, 3): Bit 1: Unset
- "ld1 { v12.s }[0], [x20], #0x4\n"
+ "ld1 { v12.s }[0], [x26], #0x4\n"
"23:" // Oddments: Load input (2, 3): Bit 1: End
- "ldr x20, [x13, #0x88]\n"
- "fmla v29.4s, v7.4s, v12.4s\n"
- "fmla v31.4s, v1.4s, v12.4s\n"
- "add x20, x20, x28\n"
+ "fmla v30.4s, v7.4s, v12.4s\n"
+ "ldr x25, [x16, #0x88]\n"
+ "fmla v28.4s, v1.4s, v12.4s\n"
+ "add x25, x25, x14\n"
"tbz %x[n_channels], #1, 24f\n"
- "ld1 { v14.d }[0], [x20], #0x8\n"
+ "ld1 { v14.d }[0], [x25], #0x8\n"
"tbz %x[n_channels], #0, 25f\n"
- "ld1 { v14.s }[2], [x20], #0x4\n"
+ "ld1 { v14.s }[2], [x25], #0x4\n"
"b 25f\n"
"24:" // Oddments: Load input (3, 4): Bit 1: Unset
- "ld1 { v14.s }[0], [x20], #0x4\n"
+ "ld1 { v14.s }[0], [x25], #0x4\n"
"25:" // Oddments: Load input (3, 4): Bit 1: End
- "ldr x20, [x13, #0x90]\n"
- "fmla v31.4s, v5.4s, v14.4s\n"
- "add x20, x20, x28\n"
+ "fmla v28.4s, v5.4s, v14.4s\n"
+ "ldr x24, [x16, #0x90]\n"
+ "add x24, x24, x14\n"
"tbz %x[n_channels], #1, 26f\n"
- "ld1 { v15.d }[0], [x20], #0x8\n"
+ "ld1 { v15.d }[0], [x24], #0x8\n"
"tbz %x[n_channels], #0, 27f\n"
- "ld1 { v15.s }[2], [x20], #0x4\n"
+ "ld1 { v15.s }[2], [x24], #0x4\n"
"b 27f\n"
"26:" // Oddments: Load input (4, 0): Bit 1: Unset
- "ld1 { v15.s }[0], [x20], #0x4\n"
+ "ld1 { v15.s }[0], [x24], #0x4\n"
"27:" // Oddments: Load input (4, 0): Bit 1: End
- "ldr x20, [x13, #0x98]\n"
- "fmla v30.4s, v6.4s, v15.4s\n"
- "add x20, x20, x28\n"
+ "fmla v29.4s, v6.4s, v15.4s\n"
+ "ldr x23, [x16, #0x98]\n"
+ "add x23, x23, x14\n"
"tbz %x[n_channels], #1, 28f\n"
- "ld1 { v11.d }[0], [x20], #0x8\n"
+ "ld1 { v11.d }[0], [x23], #0x8\n"
"tbz %x[n_channels], #0, 29f\n"
- "ld1 { v11.s }[2], [x20], #0x4\n"
+ "ld1 { v11.s }[2], [x23], #0x4\n"
"b 29f\n"
"28:" // Oddments: Load input (2, 4): Bit 1: Unset
- "ld1 { v11.s }[0], [x20], #0x4\n"
+ "ld1 { v11.s }[0], [x23], #0x4\n"
"29:" // Oddments: Load input (2, 4): Bit 1: End
- "ldr x20, [x13, #0xa0]\n"
- "fmla v29.4s, v8.4s, v11.4s\n"
- "fmla v31.4s, v2.4s, v11.4s\n"
- "add x20, x20, x28\n"
+ "fmla v30.4s, v8.4s, v11.4s\n"
+ "ldr x22, [x16, #0xa0]\n"
+ "fmla v28.4s, v2.4s, v11.4s\n"
+ "add x22, x22, x14\n"
"tbz %x[n_channels], #1, 30f\n"
- "ld1 { v13.d }[0], [x20], #0x8\n"
+ "ld1 { v13.d }[0], [x22], #0x8\n"
"tbz %x[n_channels], #0, 31f\n"
- "ld1 { v13.s }[2], [x20], #0x4\n"
+ "ld1 { v13.s }[2], [x22], #0x4\n"
"b 31f\n"
"30:" // Oddments: Load input (4, 1): Bit 1: Unset
- "ld1 { v13.s }[0], [x20], #0x4\n"
+ "ld1 { v13.s }[0], [x22], #0x4\n"
"31:" // Oddments: Load input (4, 1): Bit 1: End
- "ldr x20, [x13, #0xa8]\n"
- "fmla v30.4s, v7.4s, v13.4s\n"
- "add x20, x20, x28\n"
+ "fmla v29.4s, v7.4s, v13.4s\n"
+ "ldr x21, [x16, #0xa8]\n"
+ "add x21, x21, x14\n"
"tbz %x[n_channels], #1, 32f\n"
- "ld1 { v16.d }[0], [x20], #0x8\n"
+ "ld1 { v16.d }[0], [x21], #0x8\n"
"tbz %x[n_channels], #0, 33f\n"
- "ld1 { v16.s }[2], [x20], #0x4\n"
+ "ld1 { v16.s }[2], [x21], #0x4\n"
"b 33f\n"
"32:" // Oddments: Load input (3, 2): Bit 1: Unset
- "ld1 { v16.s }[0], [x20], #0x4\n"
+ "ld1 { v16.s }[0], [x21], #0x4\n"
"33:" // Oddments: Load input (3, 2): Bit 1: End
- "ldr x20, [x13, #0xb0]\n"
- "fmla v30.4s, v5.4s, v16.4s\n"
- "fmla v31.4s, v3.4s, v16.4s\n"
- "add x20, x20, x28\n"
+ "fmla v29.4s, v5.4s, v16.4s\n"
+ "ldr x20, [x16, #0xb0]\n"
+ "fmla v28.4s, v3.4s, v16.4s\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #1, 34f\n"
"ld1 { v14.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #0, 35f\n"
@@ -567,59 +567,61 @@ void a64_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst_indirect_impl(
"34:" // Oddments: Load input (4, 3): Bit 1: Unset
"ld1 { v14.s }[0], [x20], #0x4\n"
"35:" // Oddments: Load input (4, 3): Bit 1: End
- "ldr x20, [x13, #0xb8]\n"
- "fmla v31.4s, v7.4s, v14.4s\n"
- "add x20, x20, x28\n"
+ "fmla v28.4s, v7.4s, v14.4s\n"
+ "ldr x19, [x16, #0xb8]\n"
+ "add x19, x19, x14\n"
"tbz %x[n_channels], #1, 36f\n"
- "ld1 { v15.d }[0], [x20], #0x8\n"
+ "ld1 { v15.d }[0], [x19], #0x8\n"
"tbz %x[n_channels], #0, 37f\n"
- "ld1 { v15.s }[2], [x20], #0x4\n"
+ "ld1 { v15.s }[2], [x19], #0x4\n"
"b 37f\n"
"36:" // Oddments: Load input (4, 2): Bit 1: Unset
- "ld1 { v15.s }[0], [x20], #0x4\n"
+ "ld1 { v15.s }[0], [x19], #0x4\n"
"37:" // Oddments: Load input (4, 2): Bit 1: End
- "ldr x20, [x13, #0xc0]\n"
- "fmla v30.4s, v8.4s, v15.4s\n"
- "fmla v31.4s, v6.4s, v15.4s\n"
- "add x20, x20, x28\n"
+ "fmla v29.4s, v8.4s, v15.4s\n"
+ "ldr x26, [x16, #0xc0]\n"
+ "fmla v28.4s, v6.4s, v15.4s\n"
+ "add x26, x26, x14\n"
"tbz %x[n_channels], #1, 38f\n"
- "ld1 { v11.d }[0], [x20], #0x8\n"
+ "ld1 { v11.d }[0], [x26], #0x8\n"
"tbz %x[n_channels], #0, 39f\n"
- "ld1 { v11.s }[2], [x20], #0x4\n"
+ "ld1 { v11.s }[2], [x26], #0x4\n"
"b 39f\n"
"38:" // Oddments: Load input (4, 4): Bit 1: Unset
- "ld1 { v11.s }[0], [x20], #0x4\n"
+ "ld1 { v11.s }[0], [x26], #0x4\n"
"39:" // Oddments: Load input (4, 4): Bit 1: End
- "fmla v31.4s, v8.4s, v11.4s\n"
- "fmax v28.4s, v28.4s, v19.4s\n"
- "fmax v29.4s, v29.4s, v19.4s\n"
- "fmax v30.4s, v30.4s, v19.4s\n"
+ "fmla v28.4s, v8.4s, v11.4s\n"
"fmax v31.4s, v31.4s, v19.4s\n"
- "fmin v28.4s, v28.4s, v18.4s\n"
- "fmin v29.4s, v29.4s, v18.4s\n"
- "fmin v30.4s, v30.4s, v18.4s\n"
+ "fmax v30.4s, v30.4s, v19.4s\n"
+ "fmax v29.4s, v29.4s, v19.4s\n"
"fmin v31.4s, v31.4s, v18.4s\n"
+ "fmin v30.4s, v30.4s, v18.4s\n"
+ "fmin v29.4s, v29.4s, v18.4s\n"
+ "fmax v28.4s, v28.4s, v19.4s\n"
+ "fmin v28.4s, v28.4s, v18.4s\n"
"tbz %x[n_channels], #1, 40f\n"
- "st1 { v28.d }[0], [x12], #0x8\n"
- "st1 { v29.d }[0], [x11], #0x8\n"
- "st1 { v30.d }[0], [x10], #0x8\n"
- "st1 { v31.d }[0], [x9], #0x8\n"
+ "st1 { v31.d }[0], [x13], #0x8\n"
+ "st1 { v30.d }[0], [x12], #0x8\n"
+ "st1 { v29.d }[0], [x10], #0x8\n"
+ "st1 { v28.d }[0], [x9], #0x8\n"
"tbz %x[n_channels], #0, 41f\n"
- "st1 { v28.s }[2], [x12], #0x4\n"
- "st1 { v29.s }[2], [x11], #0x4\n"
- "st1 { v30.s }[2], [x10], #0x4\n"
- "st1 { v31.s }[2], [x9], #0x4\n"
+ "st1 { v31.s }[2], [x13], #0x4\n"
+ "st1 { v30.s }[2], [x12], #0x4\n"
+ "st1 { v29.s }[2], [x10], #0x4\n"
+ "st1 { v28.s }[2], [x9], #0x4\n"
"b 41f\n"
"40:" // Oddments: Store: Bit 1: Unset
- "st1 { v28.s }[0], [x12], #0x4\n"
- "st1 { v29.s }[0], [x11], #0x4\n"
- "st1 { v30.s }[0], [x10], #0x4\n"
- "st1 { v31.s }[0], [x9], #0x4\n"
+ "st1 { v31.s }[0], [x13], #0x4\n"
+ "st1 { v30.s }[0], [x12], #0x4\n"
+ "st1 { v29.s }[0], [x10], #0x4\n"
+ "st1 { v28.s }[0], [x9], #0x4\n"
"41:" // Oddments: Store: Bit 1: End
+
"42:" // End
+
:
: [n_channels] "r" ((unsigned long) n_channels), [offsetof_Args_inptrs] "I" (offsetof(Args, inptrs)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_args_params] "I" (offsetof(Args, params)), [params_struct] "r" (&params_struct)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst/generic_direct.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst/generic_direct.cpp
index 6ca3976f02..8b030ecc8b 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst/generic_direct.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst/generic_direct.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -87,403 +87,403 @@ void a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst_direct_impl(
);
__asm__ __volatile__(
- "mov x27, #0x0\n"
"mov x26, #0x0\n"
+ "mov x25, #0x0\n"
"1:" // Tile loop
- "str x27, [%x[params_struct], %[offsetof_args_tile_i]]\n"
- "mov x23, #0x2\n"
- "mov x25, #0x2\n"
- "str x26, [%x[params_struct], %[offsetof_args_tile_j]]\n"
+ "str x26, [%x[params_struct], %[offsetof_args_tile_i]]\n"
+ "mov x22, #0x2\n"
+ "mov x21, #0x2\n"
+ "str x25, [%x[params_struct], %[offsetof_args_tile_j]]\n"
"ldr x24, [%x[params_struct], %[offsetof_args_ld_input_row]]\n"
- "ldr x2, [%x[params_struct], %[offsetof_args_ld_input_col]]\n"
- "mul x22, x27, x24\n" // offset = tile_i * ld_input_row
- "ldr x21, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
- "madd x22, x26, x2, x22\n" // offset += tile_j * ld_input_col
- "ldr x3, [%x[params_struct], %[offsetof_args_ld_output_col]]\n"
- "lsl x2, x2, #0x2\n"
- "mul x20, x27, x21\n" // offset = tile_i * ld_output_row
- "ldr x4, [%x[params_struct], %[offsetof_args_inptr]]\n"
- "ldr x5, [%x[params_struct], %[offsetof_args_outptr]]\n"
- "add x6, x2, x2\n"
- "mul x22, x22, x23\n" // offset *= kernel_stride * output_size
- "add x4, x4, x22, LSL #2\n" // inptr[0] += offset * sizeof(float)
- "add x7, x4, x24, LSL #2\n"
- "ldr x8, [%x[params_struct], %[offsetof_args_params]]\n"
- "madd x20, x26, x3, x20\n" // offset += tile_j * ld_output_col
- "add x17, x7, x24, LSL #2\n"
- "mov x23, #0x10\n" // cntb _, ALL, #1
- "mul x20, x20, x25\n" // offset *= output_tile_size
- "lsr x22, %x[n_channels], #0x2\n"
- "add x16, x17, x24, LSL #2\n"
- "add x15, x6, x2\n"
- "add x14, x16, x24, LSL #2\n"
- "add x13, x15, x2\n"
- "add x5, x5, x20, LSL #2\n" // outptrs[0] += offset * sizeof(float)
+ "ldr x3, [%x[params_struct], %[offsetof_args_ld_input_col]]\n"
+ "mul x20, x26, x24\n" // offset = tile_i * ld_input_row
+ "ldr x23, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
+ "madd x20, x25, x3, x20\n" // offset += tile_j * ld_input_col
+ "ldr x4, [%x[params_struct], %[offsetof_args_ld_output_col]]\n"
+ "lsl x3, x3, #0x2\n"
+ "mul x19, x26, x23\n" // offset = tile_i * ld_output_row
+ "ldr x5, [%x[params_struct], %[offsetof_args_inptr]]\n"
+ "ldr x6, [%x[params_struct], %[offsetof_args_outptr]]\n"
+ "add x7, x3, x3\n"
+ "mul x20, x20, x22\n" // offset *= kernel_stride * output_size
+ "add x5, x5, x20, LSL #2\n" // inptr[0] += offset * sizeof(float)
+ "add x8, x5, x24, LSL #2\n"
+ "ldr x17, [%x[params_struct], %[offsetof_args_params]]\n"
+ "madd x19, x25, x4, x19\n" // offset += tile_j * ld_output_col
+ "add x16, x8, x24, LSL #2\n"
+ "mov x22, #0x10\n" // cntb _, ALL, #1
+ "mul x19, x19, x21\n" // offset *= output_tile_size
+ "lsr x21, %x[n_channels], #0x2\n"
+ "add x15, x16, x24, LSL #2\n"
+ "add x14, x7, x3\n"
+ "add x13, x15, x24, LSL #2\n"
+ "add x12, x14, x3\n"
+ "add x6, x6, x19, LSL #2\n" // outptrs[0] += offset * sizeof(float)
"add x20, %x[params_struct], %[offsetof_args_min]\n"
+ "add x19, %x[params_struct], %[offsetof_args_max]\n"
"ld1r { v18.4s }, [x20]\n"
- "add x20, %x[params_struct], %[offsetof_args_max]\n"
- "ld1r { v17.4s }, [x20]\n"
- "add x12, x14, x24, LSL #2\n"
- "add x11, x13, x2\n"
- "add x10, x5, x21, LSL #2\n"
- "lsl x3, x3, #0x2\n"
- "mov x21, #0x0\n"
- "sub x20, XZR, x23\n"
- "cbz x22, 4f\n"
- "ldr q16, [x8, #0x0]\n"
- "ldr q0, [x8, #0x10]\n"
- "cmp x23, x22, LSL #4\n"
- "ldr q1, [x8, #0x20]\n"
- "ldr q2, [x8, #0x30]\n"
- "ldr q3, [x8, #0x40]\n"
- "ldr q4, [x8, #0x50]\n"
- "add x8, x8, #0x60\n"
- "ld1 { v5.4s }, [x4]\n"
- "ldr q6, [x4, x2]\n"
- "ld1 { v7.4s }, [x7]\n"
- "ldr q8, [x7, x2]\n"
- "ldr q9, [x4, x6]\n"
- "ldr q13, [x7, x6]\n"
- "ldr q11, [x4, x15]\n"
- "ldr q12, [x4, x13]\n"
- "ldr q10, [x7, x11]\n"
- "ld1 { v14.4s }, [x17]\n"
+ "ld1r { v17.4s }, [x19]\n"
+ "add x11, x13, x24, LSL #2\n"
+ "add x10, x12, x3\n"
+ "add x9, x6, x23, LSL #2\n"
+ "lsl x4, x4, #0x2\n"
+ "mov x20, #0x0\n"
+ "sub x19, XZR, x22\n"
+ "cbz x21, 4f\n"
+ "ldr q16, [x17, #0x0]\n"
+ "cmp x22, x21, LSL #4\n"
+ "ldr q0, [x17, #0x10]\n"
+ "ldr q1, [x17, #0x20]\n"
+ "ldr q2, [x17, #0x30]\n"
+ "ldr q3, [x17, #0x40]\n"
+ "ldr q4, [x17, #0x50]\n"
+ "ld1 { v5.4s }, [x5]\n"
+ "add x17, x17, #0x60\n"
+ "ldr q6, [x5, x3]\n"
+ "ld1 { v7.4s }, [x8]\n"
+ "ldr q8, [x8, x3]\n"
+ "ldr q9, [x5, x7]\n"
+ "ldr q13, [x8, x7]\n"
+ "ldr q11, [x5, x14]\n"
+ "ldr q12, [x5, x12]\n"
+ "ldr q10, [x8, x10]\n"
+ "ld1 { v14.4s }, [x16]\n"
"bge 3f\n"
"2:" // Tile loop: Channel loop
"mov v28.16b, v16.16b\n fmla v28.4s, v0.4s, v5.4s\n"
- "ldr q5, [x7, x15]\n"
"mov v29.16b, v16.16b\n fmla v29.4s, v0.4s, v6.4s\n"
- "add x23, x23, #0x10\n"
+ "ldr q5, [x8, x14]\n"
+ "add x22, x22, #0x10\n"
"mov v30.16b, v16.16b\n fmla v30.4s, v0.4s, v7.4s\n"
"mov v31.16b, v16.16b\n fmla v31.4s, v0.4s, v8.4s\n"
- "ldr q0, [x8, #0x0]\n"
- "ldr q16, [x8, #0x140]\n"
+ "ldr q0, [x17, #0x0]\n"
+ "cmp x22, x21, LSL #4\n"
"fmla v28.4s, v1.4s, v6.4s\n"
- "ldr q6, [x7, x13]\n"
"fmla v29.4s, v1.4s, v9.4s\n"
- "add x7, x7, #0x10\n"
+ "ldr q6, [x8, x12]\n"
+ "add x8, x8, #0x10\n"
"fmla v30.4s, v1.4s, v8.4s\n"
"fmla v31.4s, v1.4s, v13.4s\n"
- "ldr q1, [x8, #0x10]\n"
- "cmp x23, x22, LSL #4\n"
+ "ldr q1, [x17, #0x10]\n"
+ "add x19, x19, #0x10\n"
"fmla v28.4s, v2.4s, v9.4s\n"
- "ldr q9, [x4, x11]\n"
"fmla v29.4s, v2.4s, v11.4s\n"
- "add x4, x4, #0x10\n"
+ "ldr q9, [x5, x10]\n"
+ "add x5, x5, #0x10\n"
"fmla v30.4s, v2.4s, v13.4s\n"
"fmla v31.4s, v2.4s, v5.4s\n"
- "ldr q2, [x8, #0x20]\n"
+ "ldr q2, [x17, #0x20]\n"
"add x20, x20, #0x10\n"
"fmla v28.4s, v3.4s, v11.4s\n"
- "ldr q11, [x17, x2]\n"
"fmla v29.4s, v3.4s, v12.4s\n"
- "add x21, x21, #0x10\n"
+ "ldr q11, [x16, x3]\n"
+ "ldr q16, [x17, #0x140]\n"
"fmla v30.4s, v3.4s, v5.4s\n"
"fmla v31.4s, v3.4s, v6.4s\n"
- "ldr q3, [x8, #0x30]\n"
+ "ldr q3, [x17, #0x30]\n"
"fmla v28.4s, v4.4s, v12.4s\n"
- "ldr q12, [x17, x6]\n"
"fmla v29.4s, v4.4s, v9.4s\n"
- "ldr q9, [x17, x15]\n"
+ "ldr q12, [x16, x7]\n"
+ "ldr q9, [x16, x14]\n"
"fmla v30.4s, v4.4s, v6.4s\n"
"fmla v31.4s, v4.4s, v10.4s\n"
- "ldr q4, [x8, #0x40]\n"
+ "ldr q4, [x17, #0x40]\n"
"fmla v28.4s, v0.4s, v7.4s\n"
- "ld1 { v7.4s }, [x7]\n"
"fmla v29.4s, v0.4s, v8.4s\n"
+ "ld1 { v7.4s }, [x8]\n"
"fmla v30.4s, v0.4s, v14.4s\n"
"fmla v31.4s, v0.4s, v11.4s\n"
- "ldr q0, [x8, #0x50]\n"
+ "ldr q0, [x17, #0x50]\n"
"fmla v28.4s, v1.4s, v8.4s\n"
- "ldr q8, [x17, x11]\n"
"fmla v29.4s, v1.4s, v13.4s\n"
+ "ldr q8, [x16, x10]\n"
"fmla v30.4s, v1.4s, v11.4s\n"
"fmla v31.4s, v1.4s, v12.4s\n"
- "ldr q1, [x8, #0x60]\n"
+ "ldr q1, [x17, #0x60]\n"
"fmla v28.4s, v2.4s, v13.4s\n"
- "ldr q13, [x17, x13]\n"
"fmla v29.4s, v2.4s, v5.4s\n"
- "add x17, x17, #0x10\n"
+ "ldr q13, [x16, x12]\n"
+ "add x16, x16, #0x10\n"
"fmla v30.4s, v2.4s, v12.4s\n"
"fmla v31.4s, v2.4s, v9.4s\n"
- "ldr q2, [x8, #0x70]\n"
+ "ldr q2, [x17, #0x70]\n"
"fmla v28.4s, v3.4s, v5.4s\n"
- "ld1 { v5.4s }, [x16]\n"
"fmla v29.4s, v3.4s, v6.4s\n"
+ "ld1 { v5.4s }, [x15]\n"
"fmla v30.4s, v3.4s, v9.4s\n"
"fmla v31.4s, v3.4s, v13.4s\n"
- "ldr q3, [x8, #0x80]\n"
+ "ldr q3, [x17, #0x80]\n"
"fmla v28.4s, v4.4s, v6.4s\n"
- "ldr q6, [x16, x2]\n"
"fmla v29.4s, v4.4s, v10.4s\n"
- "ldr q10, [x16, x6]\n"
+ "ldr q6, [x15, x3]\n"
+ "ldr q10, [x15, x7]\n"
"fmla v30.4s, v4.4s, v13.4s\n"
"fmla v31.4s, v4.4s, v8.4s\n"
- "ldr q4, [x8, #0x90]\n"
+ "ldr q4, [x17, #0x90]\n"
"fmla v28.4s, v0.4s, v14.4s\n"
- "ldr q14, [x16, x11]\n"
"fmla v29.4s, v0.4s, v11.4s\n"
+ "ldr q14, [x15, x10]\n"
"fmla v30.4s, v0.4s, v5.4s\n"
"fmla v31.4s, v0.4s, v6.4s\n"
- "ldr q0, [x8, #0xa0]\n"
+ "ldr q0, [x17, #0xa0]\n"
"fmla v28.4s, v1.4s, v11.4s\n"
- "ldr q11, [x16, x15]\n"
"fmla v29.4s, v1.4s, v12.4s\n"
+ "ldr q11, [x15, x14]\n"
"fmla v30.4s, v1.4s, v6.4s\n"
"fmla v31.4s, v1.4s, v10.4s\n"
- "ldr q1, [x8, #0xb0]\n"
+ "ldr q1, [x17, #0xb0]\n"
"fmla v28.4s, v2.4s, v12.4s\n"
- "ldr q12, [x16, x13]\n"
"fmla v29.4s, v2.4s, v9.4s\n"
- "add x16, x16, #0x10\n"
+ "ldr q12, [x15, x12]\n"
+ "add x15, x15, #0x10\n"
"fmla v30.4s, v2.4s, v10.4s\n"
"fmla v31.4s, v2.4s, v11.4s\n"
- "ldr q2, [x8, #0xc0]\n"
+ "ldr q2, [x17, #0xc0]\n"
"fmla v28.4s, v3.4s, v9.4s\n"
- "ld1 { v9.4s }, [x14]\n"
"fmla v29.4s, v3.4s, v13.4s\n"
+ "ld1 { v9.4s }, [x13]\n"
"fmla v30.4s, v3.4s, v11.4s\n"
"fmla v31.4s, v3.4s, v12.4s\n"
- "ldr q3, [x8, #0xd0]\n"
+ "ldr q3, [x17, #0xd0]\n"
"fmla v28.4s, v4.4s, v13.4s\n"
- "ldr q13, [x14, x2]\n"
"fmla v29.4s, v4.4s, v8.4s\n"
- "ldr q8, [x14, x13]\n"
+ "ldr q13, [x13, x3]\n"
+ "ldr q8, [x13, x12]\n"
"fmla v30.4s, v4.4s, v12.4s\n"
"fmla v31.4s, v4.4s, v14.4s\n"
- "ldr q4, [x8, #0xe0]\n"
+ "ldr q4, [x17, #0xe0]\n"
"fmla v28.4s, v0.4s, v5.4s\n"
- "ldr q5, [x14, x6]\n"
"fmla v29.4s, v0.4s, v6.4s\n"
+ "ldr q5, [x13, x7]\n"
"fmla v30.4s, v0.4s, v9.4s\n"
"fmla v31.4s, v0.4s, v13.4s\n"
- "ldr q0, [x8, #0xf0]\n"
+ "ldr q0, [x17, #0xf0]\n"
"fmla v28.4s, v1.4s, v6.4s\n"
- "ldr q6, [x14, x15]\n"
"fmla v29.4s, v1.4s, v10.4s\n"
+ "ldr q6, [x13, x14]\n"
"fmla v30.4s, v1.4s, v13.4s\n"
"fmla v31.4s, v1.4s, v5.4s\n"
- "ldr q1, [x8, #0x100]\n"
+ "ldr q1, [x17, #0x100]\n"
"fmla v28.4s, v2.4s, v10.4s\n"
- "ldr q10, [x14, x11]\n"
"fmla v29.4s, v2.4s, v11.4s\n"
- "add x14, x14, #0x10\n"
+ "ldr q10, [x13, x10]\n"
+ "add x13, x13, #0x10\n"
"fmla v30.4s, v2.4s, v5.4s\n"
"fmla v31.4s, v2.4s, v6.4s\n"
- "ldr q2, [x8, #0x110]\n"
+ "ldr q2, [x17, #0x110]\n"
"fmla v28.4s, v3.4s, v11.4s\n"
- "ld1 { v11.4s }, [x12]\n"
"fmla v29.4s, v3.4s, v12.4s\n"
+ "ld1 { v11.4s }, [x11]\n"
"fmla v30.4s, v3.4s, v6.4s\n"
"fmla v31.4s, v3.4s, v8.4s\n"
- "ldr q3, [x8, #0x120]\n"
+ "ldr q3, [x17, #0x120]\n"
"fmla v28.4s, v4.4s, v12.4s\n"
- "ldr q12, [x12, x2]\n"
"fmla v29.4s, v4.4s, v14.4s\n"
- "ld1 { v14.4s }, [x17]\n"
+ "ldr q12, [x11, x3]\n"
+ "ld1 { v14.4s }, [x16]\n"
"fmla v30.4s, v4.4s, v8.4s\n"
"fmla v31.4s, v4.4s, v10.4s\n"
- "ldr q4, [x8, #0x130]\n"
+ "ldr q4, [x17, #0x130]\n"
"fmla v28.4s, v0.4s, v9.4s\n"
- "ldr q9, [x12, x6]\n"
"fmla v29.4s, v0.4s, v13.4s\n"
+ "ldr q9, [x11, x7]\n"
"fmla v30.4s, v0.4s, v11.4s\n"
- "ldr q11, [x12, x15]\n"
"fmla v31.4s, v0.4s, v12.4s\n"
- "ldr q0, [x8, #0x150]\n"
+ "ldr q11, [x11, x14]\n"
+ "ldr q0, [x17, #0x150]\n"
"fmla v28.4s, v1.4s, v13.4s\n"
- "ldr q13, [x7, x6]\n"
"fmla v29.4s, v1.4s, v5.4s\n"
+ "ldr q13, [x8, x7]\n"
"fmla v30.4s, v1.4s, v12.4s\n"
- "ldr q12, [x12, x13]\n"
"fmla v31.4s, v1.4s, v9.4s\n"
- "ldr q1, [x8, #0x160]\n"
+ "ldr q12, [x11, x12]\n"
+ "ldr q1, [x17, #0x160]\n"
"fmla v28.4s, v2.4s, v5.4s\n"
- "ld1 { v5.4s }, [x4]\n"
"fmla v29.4s, v2.4s, v6.4s\n"
+ "ld1 { v5.4s }, [x5]\n"
"fmla v30.4s, v2.4s, v9.4s\n"
- "ldr q9, [x12, x11]\n"
"fmla v31.4s, v2.4s, v11.4s\n"
- "ldr q2, [x8, #0x170]\n"
+ "ldr q9, [x11, x10]\n"
+ "add x11, x11, #0x10\n"
"fmla v28.4s, v3.4s, v6.4s\n"
- "ldr q6, [x4, x2]\n"
"fmla v29.4s, v3.4s, v8.4s\n"
- "add x12, x12, #0x10\n"
+ "ldr q6, [x5, x3]\n"
+ "ldr q2, [x17, #0x170]\n"
"fmla v30.4s, v3.4s, v11.4s\n"
- "ldr q11, [x4, x15]\n"
"fmla v31.4s, v3.4s, v12.4s\n"
- "ldr q3, [x8, #0x180]\n"
+ "ldr q11, [x5, x14]\n"
+ "ldr q3, [x17, #0x180]\n"
"fmla v28.4s, v4.4s, v8.4s\n"
- "ldr q8, [x7, x2]\n"
"fmla v29.4s, v4.4s, v10.4s\n"
- "ldr q10, [x7, x11]\n"
+ "fmax v28.4s, v28.4s, v18.4s\n"
+ "ldr q8, [x8, x3]\n"
"fmla v30.4s, v4.4s, v12.4s\n"
- "ldr q12, [x4, x13]\n"
"fmla v31.4s, v4.4s, v9.4s\n"
- "ldr q9, [x4, x6]\n"
- "ldr q4, [x8, #0x190]\n"
- "fmax v28.4s, v28.4s, v18.4s\n"
"fmax v29.4s, v29.4s, v18.4s\n"
- "add x8, x8, #0x1a0\n"
+ "ldr q9, [x5, x7]\n"
"fmax v30.4s, v30.4s, v18.4s\n"
"fmax v31.4s, v31.4s, v18.4s\n"
+ "ldr q12, [x5, x12]\n"
+ "ldr q10, [x8, x10]\n"
"fmin v28.4s, v28.4s, v17.4s\n"
"fmin v29.4s, v29.4s, v17.4s\n"
- "st1 { v28.4s }, [x5]\n"
+ "st1 { v28.4s }, [x6]\n"
+ "ldr q4, [x17, #0x190]\n"
"fmin v30.4s, v30.4s, v17.4s\n"
"fmin v31.4s, v31.4s, v17.4s\n"
- "str q29, [x5, x3]\n"
- "add x5, x5, #0x10\n"
- "st1 { v30.4s }, [x10]\n"
- "str q31, [x10, x3]\n"
- "add x10, x10, #0x10\n"
+ "str q29, [x6, x4]\n"
+ "add x6, x6, #0x10\n"
+ "st1 { v30.4s }, [x9]\n"
+ "add x17, x17, #0x1a0\n"
+ "str q31, [x9, x4]\n"
+ "add x9, x9, #0x10\n"
"blt 2b\n"
"3:" // Tile loop: Channel tail
"mov v28.16b, v16.16b\n fmla v28.4s, v0.4s, v5.4s\n"
- "ldr q5, [x7, x15]\n"
"mov v29.16b, v16.16b\n fmla v29.4s, v0.4s, v6.4s\n"
+ "ldr q5, [x8, x14]\n"
"mov v30.16b, v16.16b\n fmla v30.4s, v0.4s, v7.4s\n"
"mov v31.16b, v16.16b\n fmla v31.4s, v0.4s, v8.4s\n"
- "ldr q0, [x8, #0x0]\n"
+ "ldr q0, [x17, #0x0]\n"
"fmla v28.4s, v1.4s, v6.4s\n"
- "ldr q6, [x7, x13]\n"
"fmla v29.4s, v1.4s, v9.4s\n"
- "add x7, x7, #0x10\n"
+ "ldr q6, [x8, x12]\n"
+ "add x8, x8, #0x10\n"
"fmla v30.4s, v1.4s, v8.4s\n"
"fmla v31.4s, v1.4s, v13.4s\n"
- "ldr q1, [x8, #0x10]\n"
+ "ldr q1, [x17, #0x10]\n"
"fmla v28.4s, v2.4s, v9.4s\n"
- "ldr q9, [x4, x11]\n"
"fmla v29.4s, v2.4s, v11.4s\n"
- "add x4, x4, #0x10\n"
+ "ldr q9, [x5, x10]\n"
+ "add x5, x5, #0x10\n"
"fmla v30.4s, v2.4s, v13.4s\n"
"fmla v31.4s, v2.4s, v5.4s\n"
- "ldr q2, [x8, #0x20]\n"
+ "ldr q2, [x17, #0x20]\n"
"fmla v28.4s, v3.4s, v11.4s\n"
- "ldr q11, [x17, x2]\n"
"fmla v29.4s, v3.4s, v12.4s\n"
+ "ldr q11, [x16, x3]\n"
"fmla v30.4s, v3.4s, v5.4s\n"
"fmla v31.4s, v3.4s, v6.4s\n"
- "ldr q3, [x8, #0x30]\n"
+ "ldr q3, [x17, #0x30]\n"
"fmla v28.4s, v4.4s, v12.4s\n"
- "ldr q12, [x17, x6]\n"
"fmla v29.4s, v4.4s, v9.4s\n"
- "ldr q9, [x17, x15]\n"
+ "ldr q12, [x16, x7]\n"
+ "ldr q9, [x16, x14]\n"
"fmla v30.4s, v4.4s, v6.4s\n"
"fmla v31.4s, v4.4s, v10.4s\n"
- "ldr q4, [x8, #0x40]\n"
+ "ldr q4, [x17, #0x40]\n"
"fmla v28.4s, v0.4s, v7.4s\n"
"fmla v29.4s, v0.4s, v8.4s\n"
"fmla v30.4s, v0.4s, v14.4s\n"
"fmla v31.4s, v0.4s, v11.4s\n"
- "ldr q0, [x8, #0x50]\n"
+ "ldr q0, [x17, #0x50]\n"
"fmla v28.4s, v1.4s, v8.4s\n"
- "ldr q8, [x17, x11]\n"
"fmla v29.4s, v1.4s, v13.4s\n"
+ "ldr q8, [x16, x10]\n"
"fmla v30.4s, v1.4s, v11.4s\n"
"fmla v31.4s, v1.4s, v12.4s\n"
- "ldr q1, [x8, #0x60]\n"
+ "ldr q1, [x17, #0x60]\n"
"fmla v28.4s, v2.4s, v13.4s\n"
- "ldr q13, [x17, x13]\n"
"fmla v29.4s, v2.4s, v5.4s\n"
- "add x17, x17, #0x10\n"
+ "ldr q13, [x16, x12]\n"
+ "add x16, x16, #0x10\n"
"fmla v30.4s, v2.4s, v12.4s\n"
"fmla v31.4s, v2.4s, v9.4s\n"
- "ldr q2, [x8, #0x70]\n"
+ "ldr q2, [x17, #0x70]\n"
"fmla v28.4s, v3.4s, v5.4s\n"
- "ld1 { v5.4s }, [x16]\n"
"fmla v29.4s, v3.4s, v6.4s\n"
+ "ld1 { v5.4s }, [x15]\n"
"fmla v30.4s, v3.4s, v9.4s\n"
"fmla v31.4s, v3.4s, v13.4s\n"
- "ldr q3, [x8, #0x80]\n"
+ "ldr q3, [x17, #0x80]\n"
"fmla v28.4s, v4.4s, v6.4s\n"
- "ldr q6, [x16, x2]\n"
"fmla v29.4s, v4.4s, v10.4s\n"
- "ldr q10, [x16, x6]\n"
+ "ldr q6, [x15, x3]\n"
+ "ldr q10, [x15, x7]\n"
"fmla v30.4s, v4.4s, v13.4s\n"
"fmla v31.4s, v4.4s, v8.4s\n"
- "ldr q4, [x8, #0x90]\n"
+ "ldr q4, [x17, #0x90]\n"
"fmla v28.4s, v0.4s, v14.4s\n"
- "ldr q14, [x16, x11]\n"
"fmla v29.4s, v0.4s, v11.4s\n"
+ "ldr q14, [x15, x10]\n"
"fmla v30.4s, v0.4s, v5.4s\n"
"fmla v31.4s, v0.4s, v6.4s\n"
- "ldr q0, [x8, #0xa0]\n"
+ "ldr q0, [x17, #0xa0]\n"
"fmla v28.4s, v1.4s, v11.4s\n"
- "ldr q11, [x16, x15]\n"
"fmla v29.4s, v1.4s, v12.4s\n"
+ "ldr q11, [x15, x14]\n"
"fmla v30.4s, v1.4s, v6.4s\n"
"fmla v31.4s, v1.4s, v10.4s\n"
- "ldr q1, [x8, #0xb0]\n"
+ "ldr q1, [x17, #0xb0]\n"
"fmla v28.4s, v2.4s, v12.4s\n"
- "ldr q12, [x16, x13]\n"
"fmla v29.4s, v2.4s, v9.4s\n"
- "add x16, x16, #0x10\n"
+ "ldr q12, [x15, x12]\n"
+ "add x15, x15, #0x10\n"
"fmla v30.4s, v2.4s, v10.4s\n"
"fmla v31.4s, v2.4s, v11.4s\n"
- "ldr q2, [x8, #0xc0]\n"
+ "ldr q2, [x17, #0xc0]\n"
"fmla v28.4s, v3.4s, v9.4s\n"
- "ld1 { v9.4s }, [x14]\n"
"fmla v29.4s, v3.4s, v13.4s\n"
+ "ld1 { v9.4s }, [x13]\n"
"fmla v30.4s, v3.4s, v11.4s\n"
"fmla v31.4s, v3.4s, v12.4s\n"
- "ldr q3, [x8, #0xd0]\n"
+ "ldr q3, [x17, #0xd0]\n"
"fmla v28.4s, v4.4s, v13.4s\n"
- "ldr q13, [x14, x2]\n"
"fmla v29.4s, v4.4s, v8.4s\n"
- "ldr q8, [x14, x13]\n"
+ "ldr q13, [x13, x3]\n"
+ "ldr q8, [x13, x12]\n"
"fmla v30.4s, v4.4s, v12.4s\n"
"fmla v31.4s, v4.4s, v14.4s\n"
- "ldr q4, [x8, #0xe0]\n"
+ "ldr q4, [x17, #0xe0]\n"
"fmla v28.4s, v0.4s, v5.4s\n"
- "ldr q5, [x14, x6]\n"
"fmla v29.4s, v0.4s, v6.4s\n"
+ "ldr q5, [x13, x7]\n"
"fmla v30.4s, v0.4s, v9.4s\n"
"fmla v31.4s, v0.4s, v13.4s\n"
- "ldr q0, [x8, #0xf0]\n"
+ "ldr q0, [x17, #0xf0]\n"
"fmla v28.4s, v1.4s, v6.4s\n"
- "ldr q6, [x14, x15]\n"
"fmla v29.4s, v1.4s, v10.4s\n"
+ "ldr q6, [x13, x14]\n"
"fmla v30.4s, v1.4s, v13.4s\n"
"fmla v31.4s, v1.4s, v5.4s\n"
- "ldr q1, [x8, #0x100]\n"
+ "ldr q1, [x17, #0x100]\n"
"fmla v28.4s, v2.4s, v10.4s\n"
- "ldr q10, [x14, x11]\n"
"fmla v29.4s, v2.4s, v11.4s\n"
- "add x14, x14, #0x10\n"
+ "ldr q10, [x13, x10]\n"
+ "add x13, x13, #0x10\n"
"fmla v30.4s, v2.4s, v5.4s\n"
"fmla v31.4s, v2.4s, v6.4s\n"
- "ldr q2, [x8, #0x110]\n"
+ "ldr q2, [x17, #0x110]\n"
"fmla v28.4s, v3.4s, v11.4s\n"
- "ld1 { v11.4s }, [x12]\n"
"fmla v29.4s, v3.4s, v12.4s\n"
+ "ld1 { v11.4s }, [x11]\n"
"fmla v30.4s, v3.4s, v6.4s\n"
"fmla v31.4s, v3.4s, v8.4s\n"
- "ldr q3, [x8, #0x120]\n"
+ "ldr q3, [x17, #0x120]\n"
"fmla v28.4s, v4.4s, v12.4s\n"
- "ldr q12, [x12, x2]\n"
"fmla v29.4s, v4.4s, v14.4s\n"
+ "ldr q12, [x11, x3]\n"
"fmla v30.4s, v4.4s, v8.4s\n"
"fmla v31.4s, v4.4s, v10.4s\n"
- "ldr q4, [x8, #0x130]\n"
- "add x8, x8, #0x140\n"
+ "ldr q4, [x17, #0x130]\n"
+ "add x17, x17, #0x140\n"
"fmla v28.4s, v0.4s, v9.4s\n"
- "ldr q9, [x12, x6]\n"
"fmla v29.4s, v0.4s, v13.4s\n"
+ "ldr q9, [x11, x7]\n"
"fmla v30.4s, v0.4s, v11.4s\n"
- "ldr q11, [x12, x15]\n"
"fmla v31.4s, v0.4s, v12.4s\n"
+ "ldr q11, [x11, x14]\n"
"fmla v28.4s, v1.4s, v13.4s\n"
"fmla v29.4s, v1.4s, v5.4s\n"
"fmla v30.4s, v1.4s, v12.4s\n"
- "ldr q12, [x12, x13]\n"
"fmla v31.4s, v1.4s, v9.4s\n"
+ "ldr q12, [x11, x12]\n"
"fmla v28.4s, v2.4s, v5.4s\n"
"fmla v29.4s, v2.4s, v6.4s\n"
"fmla v30.4s, v2.4s, v9.4s\n"
- "ldr q9, [x12, x11]\n"
"fmla v31.4s, v2.4s, v11.4s\n"
- "add x12, x12, #0x10\n"
+ "ldr q9, [x11, x10]\n"
+ "add x11, x11, #0x10\n"
"fmla v28.4s, v3.4s, v6.4s\n"
"fmla v29.4s, v3.4s, v8.4s\n"
"fmla v30.4s, v3.4s, v11.4s\n"
@@ -498,72 +498,72 @@ void a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst_direct_impl(
"fmax v31.4s, v31.4s, v18.4s\n"
"fmin v28.4s, v28.4s, v17.4s\n"
"fmin v29.4s, v29.4s, v17.4s\n"
- "st1 { v28.4s }, [x5]\n"
+ "st1 { v28.4s }, [x6]\n"
"fmin v30.4s, v30.4s, v17.4s\n"
"fmin v31.4s, v31.4s, v17.4s\n"
- "str q29, [x5, x3]\n"
- "add x5, x5, #0x10\n"
- "st1 { v30.4s }, [x10]\n"
- "str q31, [x10, x3]\n"
- "add x10, x10, #0x10\n"
+ "str q29, [x6, x4]\n"
+ "add x6, x6, #0x10\n"
+ "st1 { v30.4s }, [x9]\n"
+ "str q31, [x9, x4]\n"
+ "add x9, x9, #0x10\n"
"4:" // Tile loop: Oddments
"tst %x[n_channels], #0x3\n"
"beq 61f\n"
- "ldr q16, [x8, #0x0]\n"
- "ldr q0, [x8, #0x10]\n"
- "add x9, x4, XZR\n"
- "add x28, x4, x2\n"
- "ldr q1, [x8, #0x20]\n"
- "ldr q2, [x8, #0x30]\n"
- "add x27, x7, XZR\n"
- "add x26, x7, x2\n"
- "ldr q3, [x8, #0x40]\n"
- "ldr q4, [x8, #0x50]\n"
- "add x25, x4, x6\n"
- "add x24, x7, x6\n"
- "add x23, x4, x15\n"
- "add x22, x4, x13\n"
- "add x21, x7, x11\n"
- "add x20, x17, XZR\n"
- "add x8, x8, #0x60\n"
+ "ldr q16, [x17, #0x0]\n"
+ "ldr q0, [x17, #0x10]\n"
+ "ldr q1, [x17, #0x20]\n"
+ "ldr q2, [x17, #0x30]\n"
+ "add x28, x5, XZR\n"
+ "add x27, x5, x3\n"
+ "ldr q3, [x17, #0x40]\n"
+ "ldr q4, [x17, #0x50]\n"
+ "add x26, x8, XZR\n"
+ "add x25, x8, x3\n"
+ "add x24, x5, x7\n"
+ "add x23, x8, x7\n"
+ "add x22, x5, x14\n"
+ "add x21, x5, x12\n"
+ "add x20, x8, x10\n"
+ "add x19, x16, XZR\n"
+ "add x17, x17, #0x60\n"
"tbz %x[n_channels], #1, 5f\n"
- "ldr d5, [x9], #0x8\n"
- "ldr d6, [x28], #0x8\n"
- "ldr d7, [x27], #0x8\n"
- "ldr d8, [x26], #0x8\n"
- "ldr d9, [x25], #0x8\n"
- "ldr d13, [x24], #0x8\n"
- "ldr d11, [x23], #0x8\n"
- "ldr d12, [x22], #0x8\n"
- "ldr d10, [x21], #0x8\n"
- "ldr d14, [x20], #0x8\n"
+ "ldr d5, [x28], #0x8\n"
+ "ldr d6, [x27], #0x8\n"
+ "ldr d7, [x26], #0x8\n"
+ "ldr d8, [x25], #0x8\n"
+ "ldr d9, [x24], #0x8\n"
+ "ldr d13, [x23], #0x8\n"
+ "ldr d11, [x22], #0x8\n"
+ "ldr d12, [x21], #0x8\n"
+ "ldr d10, [x20], #0x8\n"
+ "ldr d14, [x19], #0x8\n"
"tbz %x[n_channels], #0, 6f\n"
- "ld1 { v5.s }[2], [x9]\n"
- "ld1 { v6.s }[2], [x28]\n"
- "ld1 { v7.s }[2], [x27]\n"
- "ld1 { v8.s }[2], [x26]\n"
- "ld1 { v9.s }[2], [x25]\n"
- "ld1 { v13.s }[2], [x24]\n"
- "ld1 { v11.s }[2], [x23]\n"
- "ld1 { v12.s }[2], [x22]\n"
- "ld1 { v10.s }[2], [x21]\n"
- "ld1 { v14.s }[2], [x20]\n"
+ "ld1 { v5.s }[2], [x28]\n"
+ "ld1 { v6.s }[2], [x27]\n"
+ "ld1 { v7.s }[2], [x26]\n"
+ "ld1 { v8.s }[2], [x25]\n"
+ "ld1 { v9.s }[2], [x24]\n"
+ "ld1 { v13.s }[2], [x23]\n"
+ "ld1 { v11.s }[2], [x22]\n"
+ "ld1 { v12.s }[2], [x21]\n"
+ "ld1 { v10.s }[2], [x20]\n"
+ "ld1 { v14.s }[2], [x19]\n"
"b 6f\n"
"5:" // Tile loop: Oddments: Load inputs: (0, 0), (0, 1), (1, 0), (1, 1), (0, 2), (1, 2), (0, 3), (0, 4), (1, 5), (2, 0): Bit 1: Unset
- "ldr s5, [x9, #0x0]\n"
- "ldr s6, [x28, #0x0]\n"
- "ldr s7, [x27, #0x0]\n"
- "ldr s8, [x26, #0x0]\n"
- "ldr s9, [x25, #0x0]\n"
- "ldr s13, [x24, #0x0]\n"
- "ldr s11, [x23, #0x0]\n"
- "ldr s12, [x22, #0x0]\n"
- "ldr s10, [x21, #0x0]\n"
- "ldr s14, [x20, #0x0]\n"
+ "ldr s5, [x28, #0x0]\n"
+ "ldr s6, [x27, #0x0]\n"
+ "ldr s7, [x26, #0x0]\n"
+ "ldr s8, [x25, #0x0]\n"
+ "ldr s9, [x24, #0x0]\n"
+ "ldr s13, [x23, #0x0]\n"
+ "ldr s11, [x22, #0x0]\n"
+ "ldr s12, [x21, #0x0]\n"
+ "ldr s10, [x20, #0x0]\n"
+ "ldr s14, [x19, #0x0]\n"
"6:" // Tile loop: Oddments: Load inputs: (0, 0), (0, 1), (1, 0), (1, 1), (0, 2), (1, 2), (0, 3), (0, 4), (1, 5), (2, 0): Bit 1: End
"mov v28.16b, v16.16b\n fmla v28.4s, v0.4s, v5.4s\n"
"mov v29.16b, v16.16b\n fmla v29.4s, v0.4s, v6.4s\n"
- "add x20, x7, x15\n"
+ "add x19, x8, x14\n"
"mov v30.16b, v16.16b\n fmla v30.4s, v0.4s, v7.4s\n"
"mov v31.16b, v16.16b\n fmla v31.4s, v0.4s, v8.4s\n"
"fmla v28.4s, v1.4s, v6.4s\n"
@@ -574,364 +574,364 @@ void a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst_direct_impl(
"fmla v29.4s, v2.4s, v11.4s\n"
"fmla v30.4s, v2.4s, v13.4s\n"
"tbz %x[n_channels], #1, 7f\n"
- "ldr d5, [x20], #0x8\n"
+ "ldr d5, [x19], #0x8\n"
"tbz %x[n_channels], #0, 8f\n"
- "ld1 { v5.s }[2], [x20]\n"
+ "ld1 { v5.s }[2], [x19]\n"
"b 8f\n"
"7:" // Tile loop: Oddments: Load inputs: (1, 3): Bit 1: Unset
- "ldr s5, [x20, #0x0]\n"
+ "ldr s5, [x19, #0x0]\n"
"8:" // Tile loop: Oddments: Load inputs: (1, 3): Bit 1: End
"fmla v31.4s, v2.4s, v5.4s\n"
"fmla v28.4s, v3.4s, v11.4s\n"
- "add x20, x7, x13\n"
+ "add x19, x8, x12\n"
"fmla v29.4s, v3.4s, v12.4s\n"
"fmla v30.4s, v3.4s, v5.4s\n"
"tbz %x[n_channels], #1, 9f\n"
- "ldr d6, [x20], #0x8\n"
+ "ldr d6, [x19], #0x8\n"
"tbz %x[n_channels], #0, 10f\n"
- "ld1 { v6.s }[2], [x20]\n"
+ "ld1 { v6.s }[2], [x19]\n"
"b 10f\n"
"9:" // Tile loop: Oddments: Load inputs: (1, 4): Bit 1: Unset
- "ldr s6, [x20, #0x0]\n"
+ "ldr s6, [x19, #0x0]\n"
"10:" // Tile loop: Oddments: Load inputs: (1, 4): Bit 1: End
"fmla v31.4s, v3.4s, v6.4s\n"
"fmla v28.4s, v4.4s, v12.4s\n"
- "add x20, x4, x11\n"
+ "add x19, x5, x10\n"
"tbz %x[n_channels], #1, 11f\n"
- "ldr d9, [x20], #0x8\n"
+ "ldr d9, [x19], #0x8\n"
"tbz %x[n_channels], #0, 12f\n"
- "ld1 { v9.s }[2], [x20]\n"
+ "ld1 { v9.s }[2], [x19]\n"
"b 12f\n"
"11:" // Tile loop: Oddments: Load inputs: (0, 5): Bit 1: Unset
- "ldr s9, [x20, #0x0]\n"
+ "ldr s9, [x19, #0x0]\n"
"12:" // Tile loop: Oddments: Load inputs: (0, 5): Bit 1: End
- "ldr q0, [x8, #0x0]\n"
"fmla v29.4s, v4.4s, v9.4s\n"
"fmla v30.4s, v4.4s, v6.4s\n"
- "add x20, x17, x2\n"
+ "ldr q0, [x17, #0x0]\n"
+ "add x19, x16, x3\n"
"fmla v31.4s, v4.4s, v10.4s\n"
"fmla v28.4s, v0.4s, v7.4s\n"
- "add x8, x8, #0x10\n"
+ "add x17, x17, #0x10\n"
"fmla v29.4s, v0.4s, v8.4s\n"
"fmla v30.4s, v0.4s, v14.4s\n"
"tbz %x[n_channels], #1, 13f\n"
- "ldr d11, [x20], #0x8\n"
+ "ldr d11, [x19], #0x8\n"
"tbz %x[n_channels], #0, 14f\n"
- "ld1 { v11.s }[2], [x20]\n"
+ "ld1 { v11.s }[2], [x19]\n"
"b 14f\n"
"13:" // Tile loop: Oddments: Load inputs: (2, 1): Bit 1: Unset
- "ldr s11, [x20, #0x0]\n"
+ "ldr s11, [x19, #0x0]\n"
"14:" // Tile loop: Oddments: Load inputs: (2, 1): Bit 1: End
- "ldr q1, [x8, #0x0]\n"
+ "ldr q1, [x17, #0x0]\n"
"fmla v31.4s, v0.4s, v11.4s\n"
"fmla v28.4s, v1.4s, v8.4s\n"
- "add x20, x17, x6\n"
+ "add x19, x16, x7\n"
"fmla v29.4s, v1.4s, v13.4s\n"
"fmla v30.4s, v1.4s, v11.4s\n"
- "add x8, x8, #0x10\n"
+ "add x17, x17, #0x10\n"
"tbz %x[n_channels], #1, 15f\n"
- "ldr d12, [x20], #0x8\n"
+ "ldr d12, [x19], #0x8\n"
"tbz %x[n_channels], #0, 16f\n"
- "ld1 { v12.s }[2], [x20]\n"
+ "ld1 { v12.s }[2], [x19]\n"
"b 16f\n"
"15:" // Tile loop: Oddments: Load inputs: (2, 2): Bit 1: Unset
- "ldr s12, [x20, #0x0]\n"
+ "ldr s12, [x19, #0x0]\n"
"16:" // Tile loop: Oddments: Load inputs: (2, 2): Bit 1: End
- "ldr q2, [x8, #0x0]\n"
+ "ldr q2, [x17, #0x0]\n"
"fmla v31.4s, v1.4s, v12.4s\n"
"fmla v28.4s, v2.4s, v13.4s\n"
- "add x20, x17, x15\n"
+ "add x19, x16, x14\n"
"fmla v29.4s, v2.4s, v5.4s\n"
"fmla v30.4s, v2.4s, v12.4s\n"
- "add x8, x8, #0x10\n"
+ "add x17, x17, #0x10\n"
"tbz %x[n_channels], #1, 17f\n"
- "ldr d9, [x20], #0x8\n"
+ "ldr d9, [x19], #0x8\n"
"tbz %x[n_channels], #0, 18f\n"
- "ld1 { v9.s }[2], [x20]\n"
+ "ld1 { v9.s }[2], [x19]\n"
"b 18f\n"
"17:" // Tile loop: Oddments: Load inputs: (2, 3): Bit 1: Unset
- "ldr s9, [x20, #0x0]\n"
+ "ldr s9, [x19, #0x0]\n"
"18:" // Tile loop: Oddments: Load inputs: (2, 3): Bit 1: End
- "ldr q3, [x8, #0x0]\n"
+ "ldr q3, [x17, #0x0]\n"
"fmla v31.4s, v2.4s, v9.4s\n"
"fmla v28.4s, v3.4s, v5.4s\n"
- "add x20, x17, x13\n"
+ "add x19, x16, x12\n"
"fmla v29.4s, v3.4s, v6.4s\n"
"fmla v30.4s, v3.4s, v9.4s\n"
- "add x8, x8, #0x10\n"
+ "add x17, x17, #0x10\n"
"tbz %x[n_channels], #1, 19f\n"
- "ldr d13, [x20], #0x8\n"
+ "ldr d13, [x19], #0x8\n"
"tbz %x[n_channels], #0, 20f\n"
- "ld1 { v13.s }[2], [x20]\n"
+ "ld1 { v13.s }[2], [x19]\n"
"b 20f\n"
"19:" // Tile loop: Oddments: Load inputs: (2, 4): Bit 1: Unset
- "ldr s13, [x20, #0x0]\n"
+ "ldr s13, [x19, #0x0]\n"
"20:" // Tile loop: Oddments: Load inputs: (2, 4): Bit 1: End
- "ldr q4, [x8, #0x0]\n"
+ "ldr q4, [x17, #0x0]\n"
"fmla v31.4s, v3.4s, v13.4s\n"
"fmla v28.4s, v4.4s, v6.4s\n"
- "add x20, x17, x11\n"
+ "add x19, x16, x10\n"
"fmla v29.4s, v4.4s, v10.4s\n"
"fmla v30.4s, v4.4s, v13.4s\n"
- "add x8, x8, #0x10\n"
+ "add x17, x17, #0x10\n"
"tbz %x[n_channels], #1, 21f\n"
- "ldr d8, [x20], #0x8\n"
+ "ldr d8, [x19], #0x8\n"
"tbz %x[n_channels], #0, 22f\n"
- "ld1 { v8.s }[2], [x20]\n"
+ "ld1 { v8.s }[2], [x19]\n"
"b 22f\n"
"21:" // Tile loop: Oddments: Load inputs: (2, 5): Bit 1: Unset
- "ldr s8, [x20, #0x0]\n"
+ "ldr s8, [x19, #0x0]\n"
"22:" // Tile loop: Oddments: Load inputs: (2, 5): Bit 1: End
- "ldr q0, [x8, #0x0]\n"
+ "ldr q0, [x17, #0x0]\n"
"fmla v31.4s, v4.4s, v8.4s\n"
"fmla v28.4s, v0.4s, v14.4s\n"
- "add x20, x16, XZR\n"
+ "add x19, x15, XZR\n"
"fmla v29.4s, v0.4s, v11.4s\n"
- "add x8, x8, #0x10\n"
+ "add x17, x17, #0x10\n"
"tbz %x[n_channels], #1, 23f\n"
- "ldr d5, [x20], #0x8\n"
+ "ldr d5, [x19], #0x8\n"
"tbz %x[n_channels], #0, 24f\n"
- "ld1 { v5.s }[2], [x20]\n"
+ "ld1 { v5.s }[2], [x19]\n"
"b 24f\n"
"23:" // Tile loop: Oddments: Load inputs: (3, 0): Bit 1: Unset
- "ldr s5, [x20, #0x0]\n"
+ "ldr s5, [x19, #0x0]\n"
"24:" // Tile loop: Oddments: Load inputs: (3, 0): Bit 1: End
"fmla v30.4s, v0.4s, v5.4s\n"
- "add x20, x16, x2\n"
+ "add x19, x15, x3\n"
"tbz %x[n_channels], #1, 25f\n"
- "ldr d6, [x20], #0x8\n"
+ "ldr d6, [x19], #0x8\n"
"tbz %x[n_channels], #0, 26f\n"
- "ld1 { v6.s }[2], [x20]\n"
+ "ld1 { v6.s }[2], [x19]\n"
"b 26f\n"
"25:" // Tile loop: Oddments: Load inputs: (3, 1): Bit 1: Unset
- "ldr s6, [x20, #0x0]\n"
+ "ldr s6, [x19, #0x0]\n"
"26:" // Tile loop: Oddments: Load inputs: (3, 1): Bit 1: End
- "ldr q1, [x8, #0x0]\n"
+ "ldr q1, [x17, #0x0]\n"
"fmla v31.4s, v0.4s, v6.4s\n"
"fmla v28.4s, v1.4s, v11.4s\n"
- "add x20, x16, x6\n"
+ "add x19, x15, x7\n"
"fmla v29.4s, v1.4s, v12.4s\n"
"fmla v30.4s, v1.4s, v6.4s\n"
- "add x8, x8, #0x10\n"
+ "add x17, x17, #0x10\n"
"tbz %x[n_channels], #1, 27f\n"
- "ldr d10, [x20], #0x8\n"
+ "ldr d10, [x19], #0x8\n"
"tbz %x[n_channels], #0, 28f\n"
- "ld1 { v10.s }[2], [x20]\n"
+ "ld1 { v10.s }[2], [x19]\n"
"b 28f\n"
"27:" // Tile loop: Oddments: Load inputs: (3, 2): Bit 1: Unset
- "ldr s10, [x20, #0x0]\n"
+ "ldr s10, [x19, #0x0]\n"
"28:" // Tile loop: Oddments: Load inputs: (3, 2): Bit 1: End
- "ldr q2, [x8, #0x0]\n"
+ "ldr q2, [x17, #0x0]\n"
"fmla v31.4s, v1.4s, v10.4s\n"
"fmla v28.4s, v2.4s, v12.4s\n"
- "add x20, x16, x15\n"
+ "add x19, x15, x14\n"
"fmla v29.4s, v2.4s, v9.4s\n"
"fmla v30.4s, v2.4s, v10.4s\n"
- "add x8, x8, #0x10\n"
+ "add x17, x17, #0x10\n"
"tbz %x[n_channels], #1, 29f\n"
- "ldr d11, [x20], #0x8\n"
+ "ldr d11, [x19], #0x8\n"
"tbz %x[n_channels], #0, 30f\n"
- "ld1 { v11.s }[2], [x20]\n"
+ "ld1 { v11.s }[2], [x19]\n"
"b 30f\n"
"29:" // Tile loop: Oddments: Load inputs: (3, 3): Bit 1: Unset
- "ldr s11, [x20, #0x0]\n"
+ "ldr s11, [x19, #0x0]\n"
"30:" // Tile loop: Oddments: Load inputs: (3, 3): Bit 1: End
- "ldr q3, [x8, #0x0]\n"
+ "ldr q3, [x17, #0x0]\n"
"fmla v31.4s, v2.4s, v11.4s\n"
"fmla v28.4s, v3.4s, v9.4s\n"
- "add x20, x16, x13\n"
+ "add x19, x15, x12\n"
"fmla v29.4s, v3.4s, v13.4s\n"
"fmla v30.4s, v3.4s, v11.4s\n"
- "add x8, x8, #0x10\n"
+ "add x17, x17, #0x10\n"
"tbz %x[n_channels], #1, 31f\n"
- "ldr d12, [x20], #0x8\n"
+ "ldr d12, [x19], #0x8\n"
"tbz %x[n_channels], #0, 32f\n"
- "ld1 { v12.s }[2], [x20]\n"
+ "ld1 { v12.s }[2], [x19]\n"
"b 32f\n"
"31:" // Tile loop: Oddments: Load inputs: (3, 4): Bit 1: Unset
- "ldr s12, [x20, #0x0]\n"
+ "ldr s12, [x19, #0x0]\n"
"32:" // Tile loop: Oddments: Load inputs: (3, 4): Bit 1: End
- "ldr q4, [x8, #0x0]\n"
+ "ldr q4, [x17, #0x0]\n"
"fmla v31.4s, v3.4s, v12.4s\n"
"fmla v28.4s, v4.4s, v13.4s\n"
- "add x20, x16, x11\n"
+ "add x19, x15, x10\n"
"fmla v29.4s, v4.4s, v8.4s\n"
"fmla v30.4s, v4.4s, v12.4s\n"
- "add x8, x8, #0x10\n"
+ "add x17, x17, #0x10\n"
"tbz %x[n_channels], #1, 33f\n"
- "ldr d14, [x20], #0x8\n"
+ "ldr d14, [x19], #0x8\n"
"tbz %x[n_channels], #0, 34f\n"
- "ld1 { v14.s }[2], [x20]\n"
+ "ld1 { v14.s }[2], [x19]\n"
"b 34f\n"
"33:" // Tile loop: Oddments: Load inputs: (3, 5): Bit 1: Unset
- "ldr s14, [x20, #0x0]\n"
+ "ldr s14, [x19, #0x0]\n"
"34:" // Tile loop: Oddments: Load inputs: (3, 5): Bit 1: End
- "ldr q0, [x8, #0x0]\n"
+ "ldr q0, [x17, #0x0]\n"
"fmla v31.4s, v4.4s, v14.4s\n"
"fmla v28.4s, v0.4s, v5.4s\n"
- "add x20, x14, XZR\n"
+ "add x19, x13, XZR\n"
"fmla v29.4s, v0.4s, v6.4s\n"
- "add x8, x8, #0x10\n"
+ "add x17, x17, #0x10\n"
"tbz %x[n_channels], #1, 35f\n"
- "ldr d9, [x20], #0x8\n"
+ "ldr d9, [x19], #0x8\n"
"tbz %x[n_channels], #0, 36f\n"
- "ld1 { v9.s }[2], [x20]\n"
+ "ld1 { v9.s }[2], [x19]\n"
"b 36f\n"
"35:" // Tile loop: Oddments: Load inputs: (4, 0): Bit 1: Unset
- "ldr s9, [x20, #0x0]\n"
+ "ldr s9, [x19, #0x0]\n"
"36:" // Tile loop: Oddments: Load inputs: (4, 0): Bit 1: End
"fmla v30.4s, v0.4s, v9.4s\n"
- "add x20, x14, x2\n"
+ "add x19, x13, x3\n"
"tbz %x[n_channels], #1, 37f\n"
- "ldr d13, [x20], #0x8\n"
+ "ldr d13, [x19], #0x8\n"
"tbz %x[n_channels], #0, 38f\n"
- "ld1 { v13.s }[2], [x20]\n"
+ "ld1 { v13.s }[2], [x19]\n"
"b 38f\n"
"37:" // Tile loop: Oddments: Load inputs: (4, 1): Bit 1: Unset
- "ldr s13, [x20, #0x0]\n"
+ "ldr s13, [x19, #0x0]\n"
"38:" // Tile loop: Oddments: Load inputs: (4, 1): Bit 1: End
- "ldr q1, [x8, #0x0]\n"
+ "ldr q1, [x17, #0x0]\n"
"fmla v31.4s, v0.4s, v13.4s\n"
"fmla v28.4s, v1.4s, v6.4s\n"
- "add x20, x14, x6\n"
+ "add x19, x13, x7\n"
"fmla v29.4s, v1.4s, v10.4s\n"
"fmla v30.4s, v1.4s, v13.4s\n"
- "add x8, x8, #0x10\n"
+ "add x17, x17, #0x10\n"
"tbz %x[n_channels], #1, 39f\n"
- "ldr d5, [x20], #0x8\n"
+ "ldr d5, [x19], #0x8\n"
"tbz %x[n_channels], #0, 40f\n"
- "ld1 { v5.s }[2], [x20]\n"
+ "ld1 { v5.s }[2], [x19]\n"
"b 40f\n"
"39:" // Tile loop: Oddments: Load inputs: (4, 2): Bit 1: Unset
- "ldr s5, [x20, #0x0]\n"
+ "ldr s5, [x19, #0x0]\n"
"40:" // Tile loop: Oddments: Load inputs: (4, 2): Bit 1: End
- "ldr q2, [x8, #0x0]\n"
+ "ldr q2, [x17, #0x0]\n"
"fmla v31.4s, v1.4s, v5.4s\n"
"fmla v28.4s, v2.4s, v10.4s\n"
- "add x20, x14, x15\n"
+ "add x19, x13, x14\n"
"fmla v29.4s, v2.4s, v11.4s\n"
"fmla v30.4s, v2.4s, v5.4s\n"
- "add x8, x8, #0x10\n"
+ "add x17, x17, #0x10\n"
"tbz %x[n_channels], #1, 41f\n"
- "ldr d6, [x20], #0x8\n"
+ "ldr d6, [x19], #0x8\n"
"tbz %x[n_channels], #0, 42f\n"
- "ld1 { v6.s }[2], [x20]\n"
+ "ld1 { v6.s }[2], [x19]\n"
"b 42f\n"
"41:" // Tile loop: Oddments: Load inputs: (4, 3): Bit 1: Unset
- "ldr s6, [x20, #0x0]\n"
+ "ldr s6, [x19, #0x0]\n"
"42:" // Tile loop: Oddments: Load inputs: (4, 3): Bit 1: End
- "ldr q3, [x8, #0x0]\n"
+ "ldr q3, [x17, #0x0]\n"
"fmla v31.4s, v2.4s, v6.4s\n"
"fmla v28.4s, v3.4s, v11.4s\n"
- "add x20, x14, x13\n"
+ "add x19, x13, x12\n"
"fmla v29.4s, v3.4s, v12.4s\n"
"fmla v30.4s, v3.4s, v6.4s\n"
- "add x8, x8, #0x10\n"
+ "add x17, x17, #0x10\n"
"tbz %x[n_channels], #1, 43f\n"
- "ldr d8, [x20], #0x8\n"
+ "ldr d8, [x19], #0x8\n"
"tbz %x[n_channels], #0, 44f\n"
- "ld1 { v8.s }[2], [x20]\n"
+ "ld1 { v8.s }[2], [x19]\n"
"b 44f\n"
"43:" // Tile loop: Oddments: Load inputs: (4, 4): Bit 1: Unset
- "ldr s8, [x20, #0x0]\n"
+ "ldr s8, [x19, #0x0]\n"
"44:" // Tile loop: Oddments: Load inputs: (4, 4): Bit 1: End
- "ldr q4, [x8, #0x0]\n"
+ "ldr q4, [x17, #0x0]\n"
"fmla v31.4s, v3.4s, v8.4s\n"
"fmla v28.4s, v4.4s, v12.4s\n"
- "add x20, x14, x11\n"
+ "add x19, x13, x10\n"
"fmla v29.4s, v4.4s, v14.4s\n"
"fmla v30.4s, v4.4s, v8.4s\n"
- "add x8, x8, #0x10\n"
+ "add x17, x17, #0x10\n"
"tbz %x[n_channels], #1, 45f\n"
- "ldr d10, [x20], #0x8\n"
+ "ldr d10, [x19], #0x8\n"
"tbz %x[n_channels], #0, 46f\n"
- "ld1 { v10.s }[2], [x20]\n"
+ "ld1 { v10.s }[2], [x19]\n"
"b 46f\n"
"45:" // Tile loop: Oddments: Load inputs: (4, 5): Bit 1: Unset
- "ldr s10, [x20, #0x0]\n"
+ "ldr s10, [x19, #0x0]\n"
"46:" // Tile loop: Oddments: Load inputs: (4, 5): Bit 1: End
- "ldr q0, [x8, #0x0]\n"
+ "ldr q0, [x17, #0x0]\n"
"fmla v31.4s, v4.4s, v10.4s\n"
"fmla v28.4s, v0.4s, v9.4s\n"
- "add x20, x12, XZR\n"
+ "add x19, x11, XZR\n"
"fmla v29.4s, v0.4s, v13.4s\n"
- "add x8, x8, #0x10\n"
+ "add x17, x17, #0x10\n"
"tbz %x[n_channels], #1, 47f\n"
- "ldr d11, [x20], #0x8\n"
+ "ldr d11, [x19], #0x8\n"
"tbz %x[n_channels], #0, 48f\n"
- "ld1 { v11.s }[2], [x20]\n"
+ "ld1 { v11.s }[2], [x19]\n"
"b 48f\n"
"47:" // Tile loop: Oddments: Load inputs: (5, 0): Bit 1: Unset
- "ldr s11, [x20, #0x0]\n"
+ "ldr s11, [x19, #0x0]\n"
"48:" // Tile loop: Oddments: Load inputs: (5, 0): Bit 1: End
"fmla v30.4s, v0.4s, v11.4s\n"
- "add x20, x12, x2\n"
+ "add x19, x11, x3\n"
"tbz %x[n_channels], #1, 49f\n"
- "ldr d12, [x20], #0x8\n"
+ "ldr d12, [x19], #0x8\n"
"tbz %x[n_channels], #0, 50f\n"
- "ld1 { v12.s }[2], [x20]\n"
+ "ld1 { v12.s }[2], [x19]\n"
"b 50f\n"
"49:" // Tile loop: Oddments: Load inputs: (5, 1): Bit 1: Unset
- "ldr s12, [x20, #0x0]\n"
+ "ldr s12, [x19, #0x0]\n"
"50:" // Tile loop: Oddments: Load inputs: (5, 1): Bit 1: End
- "ldr q1, [x8, #0x0]\n"
+ "ldr q1, [x17, #0x0]\n"
"fmla v31.4s, v0.4s, v12.4s\n"
"fmla v28.4s, v1.4s, v13.4s\n"
- "add x20, x12, x6\n"
+ "add x19, x11, x7\n"
"fmla v29.4s, v1.4s, v5.4s\n"
"fmla v30.4s, v1.4s, v12.4s\n"
- "add x8, x8, #0x10\n"
+ "add x17, x17, #0x10\n"
"tbz %x[n_channels], #1, 51f\n"
- "ldr d9, [x20], #0x8\n"
+ "ldr d9, [x19], #0x8\n"
"tbz %x[n_channels], #0, 52f\n"
- "ld1 { v9.s }[2], [x20]\n"
+ "ld1 { v9.s }[2], [x19]\n"
"b 52f\n"
"51:" // Tile loop: Oddments: Load inputs: (5, 2): Bit 1: Unset
- "ldr s9, [x20, #0x0]\n"
+ "ldr s9, [x19, #0x0]\n"
"52:" // Tile loop: Oddments: Load inputs: (5, 2): Bit 1: End
- "ldr q2, [x8, #0x0]\n"
+ "ldr q2, [x17, #0x0]\n"
"fmla v31.4s, v1.4s, v9.4s\n"
"fmla v28.4s, v2.4s, v5.4s\n"
- "add x20, x12, x15\n"
+ "add x19, x11, x14\n"
"fmla v29.4s, v2.4s, v6.4s\n"
"fmla v30.4s, v2.4s, v9.4s\n"
- "add x8, x8, #0x10\n"
+ "add x17, x17, #0x10\n"
"tbz %x[n_channels], #1, 53f\n"
- "ldr d11, [x20], #0x8\n"
+ "ldr d11, [x19], #0x8\n"
"tbz %x[n_channels], #0, 54f\n"
- "ld1 { v11.s }[2], [x20]\n"
+ "ld1 { v11.s }[2], [x19]\n"
"b 54f\n"
"53:" // Tile loop: Oddments: Load inputs: (5, 3): Bit 1: Unset
- "ldr s11, [x20, #0x0]\n"
+ "ldr s11, [x19, #0x0]\n"
"54:" // Tile loop: Oddments: Load inputs: (5, 3): Bit 1: End
- "ldr q3, [x8, #0x0]\n"
+ "ldr q3, [x17, #0x0]\n"
"fmla v31.4s, v2.4s, v11.4s\n"
"fmla v28.4s, v3.4s, v6.4s\n"
- "add x20, x12, x13\n"
+ "add x19, x11, x12\n"
"fmla v29.4s, v3.4s, v8.4s\n"
"fmla v30.4s, v3.4s, v11.4s\n"
- "add x8, x8, #0x10\n"
+ "add x17, x17, #0x10\n"
"tbz %x[n_channels], #1, 55f\n"
- "ldr d12, [x20], #0x8\n"
+ "ldr d12, [x19], #0x8\n"
"tbz %x[n_channels], #0, 56f\n"
- "ld1 { v12.s }[2], [x20]\n"
+ "ld1 { v12.s }[2], [x19]\n"
"b 56f\n"
"55:" // Tile loop: Oddments: Load inputs: (5, 4): Bit 1: Unset
- "ldr s12, [x20, #0x0]\n"
+ "ldr s12, [x19, #0x0]\n"
"56:" // Tile loop: Oddments: Load inputs: (5, 4): Bit 1: End
- "ldr q4, [x8, #0x0]\n"
+ "ldr q4, [x17, #0x0]\n"
"fmla v31.4s, v3.4s, v12.4s\n"
"fmla v28.4s, v4.4s, v8.4s\n"
- "add x20, x12, x11\n"
+ "add x19, x11, x10\n"
"fmla v29.4s, v4.4s, v10.4s\n"
"fmla v30.4s, v4.4s, v12.4s\n"
"tbz %x[n_channels], #1, 57f\n"
- "ldr d9, [x20], #0x8\n"
+ "ldr d9, [x19], #0x8\n"
"tbz %x[n_channels], #0, 58f\n"
- "ld1 { v9.s }[2], [x20]\n"
+ "ld1 { v9.s }[2], [x19]\n"
"b 58f\n"
"57:" // Tile loop: Oddments: Load inputs: (5, 5): Bit 1: Unset
- "ldr s9, [x20, #0x0]\n"
+ "ldr s9, [x19, #0x0]\n"
"58:" // Tile loop: Oddments: Load inputs: (5, 5): Bit 1: End
"fmla v31.4s, v4.4s, v9.4s\n"
"fmax v28.4s, v28.4s, v18.4s\n"
@@ -943,46 +943,46 @@ void a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst_direct_impl(
"fmin v30.4s, v30.4s, v17.4s\n"
"fmin v31.4s, v31.4s, v17.4s\n"
"tbz %x[n_channels], #1, 59f\n"
- "mov x21, x5\n"
- "mov x20, x10\n"
- "st1 { v28.d }[0], [x21], x3\n"
- "st1 { v30.d }[0], [x20], x3\n"
- "add x5, x5, #0x8\n"
- "add x10, x10, #0x8\n"
- "st1 { v29.d }[0], [x21]\n"
- "st1 { v31.d }[0], [x20]\n"
+ "mov x20, x6\n"
+ "mov x19, x9\n"
+ "st1 { v28.d }[0], [x20], x4\n"
+ "add x6, x6, #0x8\n"
+ "add x9, x9, #0x8\n"
+ "st1 { v30.d }[0], [x19], x4\n"
+ "st1 { v29.d }[0], [x20]\n"
+ "st1 { v31.d }[0], [x19]\n"
"tbz %x[n_channels], #0, 60f\n"
- "mov x21, x5\n"
- "mov x20, x10\n"
- "st1 { v28.s }[2], [x21], x3\n"
- "st1 { v30.s }[2], [x20], x3\n"
- "st1 { v29.s }[2], [x21]\n"
- "st1 { v31.s }[2], [x20]\n"
+ "mov x20, x6\n"
+ "mov x19, x9\n"
+ "st1 { v28.s }[2], [x20], x4\n"
+ "st1 { v30.s }[2], [x19], x4\n"
+ "st1 { v29.s }[2], [x20]\n"
+ "st1 { v31.s }[2], [x19]\n"
"b 60f\n"
"59:" // Tile loop: Oddments: Store: Bit 1: Unset
- "mov x21, x5\n"
- "mov x20, x10\n"
- "st1 { v28.s }[0], [x21], x3\n"
- "st1 { v30.s }[0], [x20], x3\n"
- "st1 { v29.s }[0], [x21]\n"
- "st1 { v31.s }[0], [x20]\n"
+ "mov x20, x6\n"
+ "mov x19, x9\n"
+ "st1 { v28.s }[0], [x20], x4\n"
+ "st1 { v30.s }[0], [x19], x4\n"
+ "st1 { v29.s }[0], [x20]\n"
+ "st1 { v31.s }[0], [x19]\n"
"60:" // Tile loop: Oddments: Store: Bit 1: End
"61:" // Tile loop: End
- "ldr x26, [%x[params_struct], %[offsetof_args_tile_j]]\n"
- "ldr x27, [%x[params_struct], %[offsetof_args_tile_i]]\n"
- "add x26, x26, #0x1\n"
- "add x21, x27, #0x1\n"
- "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
- "cmp x26, x20\n"
- "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
- "csel x27, x27, x21, LT\n"
- "csel x26, x26, XZR, LT\n"
- "cmp x27, x20\n"
+ "ldr x25, [%x[params_struct], %[offsetof_args_tile_j]]\n"
+ "ldr x26, [%x[params_struct], %[offsetof_args_tile_i]]\n"
+ "add x25, x25, #0x1\n"
+ "add x20, x26, #0x1\n"
+ "ldr x19, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
+ "cmp x25, x19\n"
+ "ldr x19, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
+ "csel x26, x26, x20, LT\n"
+ "csel x25, x25, XZR, LT\n"
+ "cmp x26, x19\n"
"blt 1b\n"
:
: [n_channels] "r" ((unsigned long) n_channels), [offsetof_args_inptr] "I" (offsetof(Args, inptr)), [offsetof_args_ld_input_col] "I" (offsetof(Args, ld_input_col)), [offsetof_args_ld_input_row] "I" (offsetof(Args, ld_input_row)), [offsetof_args_ld_output_col] "I" (offsetof(Args, ld_output_col)), [offsetof_args_ld_output_row] "I" (offsetof(Args, ld_output_row)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_n_tile_cols] "I" (offsetof(Args, n_tile_cols)), [offsetof_args_n_tile_rows] "I" (offsetof(Args, n_tile_rows)), [offsetof_args_outptr] "I" (offsetof(Args, outptr)), [offsetof_args_params] "I" (offsetof(Args, params)), [offsetof_args_tile_i] "I" (offsetof(Args, tile_i)), [offsetof_args_tile_j] "I" (offsetof(Args, tile_j)), [params_struct] "r" (&params_struct)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v16", "v17", "v18", "v28", "v29", "v30", "v31", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v16", "v17", "v18", "v28", "v29", "v30", "v31", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst/generic_indirect.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst/generic_indirect.cpp
index 860adac326..4754a6f6f1 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst/generic_indirect.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst/generic_indirect.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -99,422 +99,422 @@ void a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst_indirect_impl(
__asm__ __volatile__(
"ldr x21, [%x[params_struct], %[offsetof_args_outptrs]]\n"
- "mov x17, #0x10\n" // cntb _, ALL, #1
- "lsr x9, %x[n_channels], #0x2\n"
+ "mov x28, #0x10\n" // cntb _, ALL, #1
+ "lsr x27, %x[n_channels], #0x2\n"
"ldr x16, [%x[params_struct], %[offsetof_args_params]]\n"
"add x20, %x[params_struct], %[offsetof_args_min]\n"
+ "add x19, %x[params_struct], %[offsetof_args_max]\n"
+ "ldp x15, x14, [x21, #0x0]\n"
+ "ldp x13, x12, [x21, #0x10]\n"
+ "add x11, %x[params_struct], %[offsetof_Args_inptrs]\n"
"ld1r { v18.4s }, [x20]\n"
- "add x20, %x[params_struct], %[offsetof_args_max]\n"
- "ld1r { v17.4s }, [x20]\n"
- "add x15, %x[params_struct], %[offsetof_Args_inptrs]\n"
- "ldp x14, x13, [x21, #0x0]\n"
- "ldp x12, x11, [x21, #0x10]\n"
+ "ld1r { v17.4s }, [x19]\n"
"mov x10, #0x0\n"
- "sub x28, XZR, x17\n"
- "cbz x9, 3f\n"
+ "sub x9, XZR, x28\n"
+ "cbz x27, 3f\n"
+ "ldp x26, x25, [x11, #0x0]\n"
+ "ldr q5, [x26, x10]\n"
+ "ldr q6, [x25, x10]\n"
+ "ldp x24, x23, [x11, #0x10]\n"
+ "cmp x28, x27, LSL #4\n"
+ "ldp x22, x21, [x11, #0x20]\n"
+ "ldp x20, x19, [x11, #0x30]\n"
+ "ldp x26, x25, [x11, #0x40]\n"
"ldr q16, [x16, #0x0]\n"
"ldr q0, [x16, #0x10]\n"
- "cmp x17, x9, LSL #4\n"
"ldr q1, [x16, #0x20]\n"
"ldr q2, [x16, #0x30]\n"
"ldr q3, [x16, #0x40]\n"
"ldr q4, [x16, #0x50]\n"
+ "ldr q7, [x24, x10]\n"
"add x16, x16, #0x60\n"
- "ldp x27, x26, [x15, #0x0]\n"
- "ldr q5, [x27, x10]\n"
- "ldr q6, [x26, x10]\n"
- "ldp x25, x24, [x15, #0x10]\n"
- "ldr q7, [x25, x10]\n"
- "ldr q8, [x24, x10]\n"
- "ldp x23, x22, [x15, #0x20]\n"
- "ldr q9, [x23, x10]\n"
- "ldr q13, [x22, x10]\n"
- "ldp x21, x20, [x15, #0x30]\n"
- "ldr q11, [x21, x10]\n"
- "ldr q12, [x20, x10]\n"
- "ldp x27, x26, [x15, #0x40]\n"
- "ldr q10, [x27, x10]\n"
- "ldr q14, [x26, x10]\n"
+ "ldr q8, [x23, x10]\n"
+ "ldr q9, [x22, x10]\n"
+ "ldr q13, [x21, x10]\n"
+ "ldr q11, [x20, x10]\n"
+ "ldr q12, [x19, x10]\n"
+ "ldr q10, [x26, x10]\n"
+ "ldr q14, [x25, x10]\n"
"bge 2f\n"
"1:" // Channel loop
"mov v28.16b, v16.16b\n fmla v28.4s, v0.4s, v5.4s\n"
"mov v29.16b, v16.16b\n fmla v29.4s, v0.4s, v6.4s\n"
- "ldr x25, [x15, #0x50]\n"
- "ldr q5, [x25, x10]\n"
+ "ldr x24, [x11, #0x50]\n"
+ "ldr q5, [x24, x10]\n"
"mov v30.16b, v16.16b\n fmla v30.4s, v0.4s, v7.4s\n"
"mov v31.16b, v16.16b\n fmla v31.4s, v0.4s, v8.4s\n"
- "ldr q0, [x16, #0x0]\n"
- "ldr q16, [x16, #0x140]\n"
+ "ldr x23, [x11, #0x58]\n"
+ "ldr x22, [x11, #0x60]\n"
"fmla v28.4s, v1.4s, v6.4s\n"
"fmla v29.4s, v1.4s, v9.4s\n"
- "ldr x24, [x15, #0x58]\n"
- "ldr q6, [x24, x10]\n"
+ "ldr q6, [x23, x10]\n"
+ "ldr x21, [x11, #0x68]\n"
"fmla v30.4s, v1.4s, v8.4s\n"
"fmla v31.4s, v1.4s, v13.4s\n"
- "ldr q1, [x16, #0x10]\n"
- "ldr x23, [x15, #0x60]\n"
+ "ldr q0, [x16, #0x0]\n"
+ "ldr x20, [x11, #0x70]\n"
"fmla v28.4s, v2.4s, v9.4s\n"
- "ldr q9, [x23, x10]\n"
"fmla v29.4s, v2.4s, v11.4s\n"
- "ldr x22, [x15, #0x68]\n"
+ "ldr q9, [x22, x10]\n"
+ "ldr q1, [x16, #0x10]\n"
"fmla v30.4s, v2.4s, v13.4s\n"
"fmla v31.4s, v2.4s, v5.4s\n"
+ "ldr x19, [x11, #0x78]\n"
"ldr q2, [x16, #0x20]\n"
- "ldr x21, [x15, #0x70]\n"
"fmla v28.4s, v3.4s, v11.4s\n"
- "ldr q11, [x22, x10]\n"
"fmla v29.4s, v3.4s, v12.4s\n"
- "ldr x20, [x15, #0x78]\n"
+ "ldr q11, [x21, x10]\n"
+ "ldr x26, [x11, #0x80]\n"
"fmla v30.4s, v3.4s, v5.4s\n"
"fmla v31.4s, v3.4s, v6.4s\n"
"ldr q3, [x16, #0x30]\n"
- "ldr x27, [x15, #0x80]\n"
+ "ldr x25, [x11, #0x88]\n"
"fmla v28.4s, v4.4s, v12.4s\n"
- "ldr q12, [x21, x10]\n"
"fmla v29.4s, v4.4s, v9.4s\n"
- "ldr q9, [x20, x10]\n"
+ "ldr q12, [x20, x10]\n"
+ "ldr q9, [x19, x10]\n"
"fmla v30.4s, v4.4s, v6.4s\n"
"fmla v31.4s, v4.4s, v10.4s\n"
"ldr q4, [x16, #0x40]\n"
- "ldr x26, [x15, #0x88]\n"
+ "ldr x24, [x11, #0x90]\n"
"fmla v28.4s, v0.4s, v7.4s\n"
"fmla v29.4s, v0.4s, v8.4s\n"
- "ldr x25, [x15, #0x90]\n"
- "ldr x24, [x15, #0x98]\n"
+ "ldr x23, [x11, #0x98]\n"
+ "ldr x22, [x11, #0xa0]\n"
"fmla v30.4s, v0.4s, v14.4s\n"
"fmla v31.4s, v0.4s, v11.4s\n"
"ldr q0, [x16, #0x50]\n"
- "ldr x23, [x15, #0xa0]\n"
+ "ldr x21, [x11, #0xa8]\n"
"fmla v28.4s, v1.4s, v8.4s\n"
- "ldr q8, [x26, x10]\n"
"fmla v29.4s, v1.4s, v13.4s\n"
- "ldr x22, [x15, #0xa8]\n"
+ "ldr q8, [x25, x10]\n"
+ "ldr x20, [x11, #0xb0]\n"
"fmla v30.4s, v1.4s, v11.4s\n"
"fmla v31.4s, v1.4s, v12.4s\n"
"ldr q1, [x16, #0x60]\n"
- "ldr x21, [x15, #0xb0]\n"
+ "ldr x19, [x11, #0xb8]\n"
"fmla v28.4s, v2.4s, v13.4s\n"
- "ldr q13, [x27, x10]\n"
"fmla v29.4s, v2.4s, v5.4s\n"
- "ldr x20, [x15, #0xb8]\n"
+ "ldr q13, [x26, x10]\n"
+ "ldr x26, [x11, #0xc0]\n"
"fmla v30.4s, v2.4s, v12.4s\n"
"fmla v31.4s, v2.4s, v9.4s\n"
"ldr q2, [x16, #0x70]\n"
- "ldr x27, [x15, #0xc0]\n"
+ "ldr x25, [x11, #0xc8]\n"
"fmla v28.4s, v3.4s, v5.4s\n"
- "ldr q5, [x25, x10]\n"
"fmla v29.4s, v3.4s, v6.4s\n"
- "ldr x26, [x15, #0xc8]\n"
+ "ldr q5, [x24, x10]\n"
+ "ldr x24, [x11, #0xd0]\n"
"fmla v30.4s, v3.4s, v9.4s\n"
"fmla v31.4s, v3.4s, v13.4s\n"
"ldr q3, [x16, #0x80]\n"
- "ldr x25, [x15, #0xd0]\n"
+ "add x9, x9, #0x10\n"
"fmla v28.4s, v4.4s, v6.4s\n"
- "ldr q6, [x24, x10]\n"
"fmla v29.4s, v4.4s, v10.4s\n"
- "ldr q10, [x23, x10]\n"
+ "ldr q6, [x23, x10]\n"
+ "ldr q10, [x22, x10]\n"
"fmla v30.4s, v4.4s, v13.4s\n"
"fmla v31.4s, v4.4s, v8.4s\n"
"ldr q4, [x16, #0x90]\n"
- "ldr x24, [x15, #0xd8]\n"
+ "ldr x23, [x11, #0xd8]\n"
"fmla v28.4s, v0.4s, v14.4s\n"
- "ldr q14, [x20, x10]\n"
"fmla v29.4s, v0.4s, v11.4s\n"
- "ldr x23, [x15, #0xe0]\n"
+ "ldr q14, [x19, x10]\n"
+ "ldr x22, [x11, #0xe0]\n"
"fmla v30.4s, v0.4s, v5.4s\n"
"fmla v31.4s, v0.4s, v6.4s\n"
"ldr q0, [x16, #0xa0]\n"
- "ldr x20, [x15, #0xf8]\n"
+ "ldr x19, [x11, #0xf8]\n"
"fmla v28.4s, v1.4s, v11.4s\n"
- "ldr q11, [x22, x10]\n"
"fmla v29.4s, v1.4s, v12.4s\n"
- "ldr x22, [x15, #0xe8]\n"
+ "ldr q11, [x21, x10]\n"
+ "ldr x21, [x11, #0xe8]\n"
"fmla v30.4s, v1.4s, v6.4s\n"
"fmla v31.4s, v1.4s, v10.4s\n"
"ldr q1, [x16, #0xb0]\n"
- "add x28, x28, #0x10\n"
+ "ldr q16, [x16, #0x140]\n"
"fmla v28.4s, v2.4s, v12.4s\n"
- "ldr q12, [x21, x10]\n"
"fmla v29.4s, v2.4s, v9.4s\n"
- "ldr x21, [x15, #0xf0]\n"
+ "ldr q12, [x20, x10]\n"
+ "ldr x20, [x11, #0xf0]\n"
"fmla v30.4s, v2.4s, v10.4s\n"
"fmla v31.4s, v2.4s, v11.4s\n"
"ldr q2, [x16, #0xc0]\n"
"fmla v28.4s, v3.4s, v9.4s\n"
- "ldr q9, [x27, x10]\n"
"fmla v29.4s, v3.4s, v13.4s\n"
- "ldr x27, [x15, #0x100]\n"
+ "ldr q9, [x26, x10]\n"
+ "ldr x26, [x11, #0x100]\n"
"fmla v30.4s, v3.4s, v11.4s\n"
"fmla v31.4s, v3.4s, v12.4s\n"
"ldr q3, [x16, #0xd0]\n"
"fmla v28.4s, v4.4s, v13.4s\n"
- "ldr q13, [x26, x10]\n"
"fmla v29.4s, v4.4s, v8.4s\n"
- "ldr q8, [x23, x10]\n"
+ "ldr q13, [x25, x10]\n"
+ "ldr q8, [x22, x10]\n"
"fmla v30.4s, v4.4s, v12.4s\n"
"fmla v31.4s, v4.4s, v14.4s\n"
"ldr q4, [x16, #0xe0]\n"
- "ldr x26, [x15, #0x108]\n"
+ "ldr x25, [x11, #0x108]\n"
"fmla v28.4s, v0.4s, v5.4s\n"
- "ldr q5, [x25, x10]\n"
"fmla v29.4s, v0.4s, v6.4s\n"
- "ldr x25, [x15, #0x110]\n"
+ "ldr q5, [x24, x10]\n"
+ "ldr x24, [x11, #0x110]\n"
"fmla v30.4s, v0.4s, v9.4s\n"
"fmla v31.4s, v0.4s, v13.4s\n"
"ldr q0, [x16, #0xf0]\n"
"fmla v28.4s, v1.4s, v6.4s\n"
- "ldr q6, [x24, x10]\n"
"fmla v29.4s, v1.4s, v10.4s\n"
- "ldr x24, [x15, #0x118]\n"
+ "ldr q6, [x23, x10]\n"
+ "ldr x23, [x11, #0x118]\n"
"fmla v30.4s, v1.4s, v13.4s\n"
"fmla v31.4s, v1.4s, v5.4s\n"
"ldr q1, [x16, #0x100]\n"
"fmla v28.4s, v2.4s, v10.4s\n"
- "ldr q10, [x22, x10]\n"
"fmla v29.4s, v2.4s, v11.4s\n"
+ "ldr q10, [x21, x10]\n"
"fmla v30.4s, v2.4s, v5.4s\n"
"fmla v31.4s, v2.4s, v6.4s\n"
"ldr q2, [x16, #0x110]\n"
"fmla v28.4s, v3.4s, v11.4s\n"
- "ldr q11, [x21, x10]\n"
"fmla v29.4s, v3.4s, v12.4s\n"
+ "ldr q11, [x20, x10]\n"
"fmla v30.4s, v3.4s, v6.4s\n"
"fmla v31.4s, v3.4s, v8.4s\n"
"ldr q3, [x16, #0x120]\n"
"fmla v28.4s, v4.4s, v12.4s\n"
- "ldr q12, [x20, x10]\n"
"fmla v29.4s, v4.4s, v14.4s\n"
+ "ldr q12, [x19, x10]\n"
"fmla v30.4s, v4.4s, v8.4s\n"
"fmla v31.4s, v4.4s, v10.4s\n"
"ldr q4, [x16, #0x130]\n"
"fmla v28.4s, v0.4s, v9.4s\n"
- "ldr q9, [x27, x10]\n"
"fmla v29.4s, v0.4s, v13.4s\n"
+ "ldr q9, [x26, x10]\n"
"fmla v30.4s, v0.4s, v11.4s\n"
- "ldr q11, [x26, x10]\n"
"fmla v31.4s, v0.4s, v12.4s\n"
- "ldr q0, [x16, #0x150]\n"
+ "ldr q11, [x25, x10]\n"
+ "ldp x26, x25, [x11, #0x0]\n"
"fmla v28.4s, v1.4s, v13.4s\n"
"fmla v29.4s, v1.4s, v5.4s\n"
- "ldp x27, x26, [x15, #0x0]\n"
+ "ldr q0, [x16, #0x150]\n"
"fmla v30.4s, v1.4s, v12.4s\n"
- "ldr q12, [x25, x10]\n"
"fmla v31.4s, v1.4s, v9.4s\n"
+ "ldr q12, [x24, x10]\n"
"ldr q1, [x16, #0x160]\n"
"fmla v28.4s, v2.4s, v5.4s\n"
- "ldr q5, [x27, x17]\n"
"fmla v29.4s, v2.4s, v6.4s\n"
+ "ldr q5, [x26, x28]\n"
"fmla v30.4s, v2.4s, v9.4s\n"
- "ldr q9, [x24, x10]\n"
"fmla v31.4s, v2.4s, v11.4s\n"
- "ldr q2, [x16, #0x170]\n"
+ "ldr q9, [x23, x10]\n"
+ "ldp x24, x23, [x11, #0x10]\n"
"fmla v28.4s, v3.4s, v6.4s\n"
- "ldr q6, [x26, x17]\n"
"fmla v29.4s, v3.4s, v8.4s\n"
- "ldp x25, x24, [x15, #0x10]\n"
- "ldr q7, [x25, x17]\n"
+ "ldr q6, [x25, x28]\n"
+ "ldp x22, x21, [x11, #0x20]\n"
"fmla v30.4s, v3.4s, v11.4s\n"
"fmla v31.4s, v3.4s, v12.4s\n"
- "ldr q3, [x16, #0x180]\n"
+ "ldp x20, x19, [x11, #0x30]\n"
+ "ldp x26, x25, [x11, #0x40]\n"
"fmla v28.4s, v4.4s, v8.4s\n"
- "ldr q8, [x24, x17]\n"
"fmla v29.4s, v4.4s, v10.4s\n"
- "ldp x23, x22, [x15, #0x20]\n"
- "ldr q13, [x22, x17]\n"
+ "fmax v28.4s, v28.4s, v18.4s\n"
+ "ldr q7, [x24, x28]\n"
"fmla v30.4s, v4.4s, v12.4s\n"
"fmla v31.4s, v4.4s, v9.4s\n"
- "ldr q9, [x23, x17]\n"
- "ldr q4, [x16, #0x190]\n"
- "ldp x21, x20, [x15, #0x30]\n"
- "fmax v28.4s, v28.4s, v18.4s\n"
"fmax v29.4s, v29.4s, v18.4s\n"
- "ldr q11, [x21, x17]\n"
- "ldr q12, [x20, x17]\n"
+ "ldr q8, [x23, x28]\n"
"fmax v30.4s, v30.4s, v18.4s\n"
"fmax v31.4s, v31.4s, v18.4s\n"
- "ldp x27, x26, [x15, #0x40]\n"
- "ldr q10, [x27, x17]\n"
+ "ldr q9, [x22, x28]\n"
+ "ldr q13, [x21, x28]\n"
+ "ldr q11, [x20, x28]\n"
+ "ldr q12, [x19, x28]\n"
"fmin v28.4s, v28.4s, v17.4s\n"
"fmin v29.4s, v29.4s, v17.4s\n"
- "ldr q14, [x26, x17]\n"
- "add x17, x17, #0x10\n"
- "cmp x17, x9, LSL #4\n"
+ "ldr q10, [x26, x28]\n"
+ "ldr q14, [x25, x28]\n"
+ "add x28, x28, #0x10\n"
+ "cmp x28, x27, LSL #4\n"
"fmin v30.4s, v30.4s, v17.4s\n"
"fmin v31.4s, v31.4s, v17.4s\n"
"add x10, x10, #0x10\n"
- "str q28, [x14, x28]\n"
+ "str q28, [x15, x9]\n"
+ "str q29, [x14, x9]\n"
+ "ldr q2, [x16, #0x170]\n"
+ "ldr q3, [x16, #0x180]\n"
+ "str q30, [x13, x9]\n"
+ "ldr q4, [x16, #0x190]\n"
"add x16, x16, #0x1a0\n"
- "str q29, [x13, x28]\n"
- "str q30, [x12, x28]\n"
- "str q31, [x11, x28]\n"
+ "str q31, [x12, x9]\n"
"blt 1b\n"
"2:" // Channel tail
"mov v28.16b, v16.16b\n fmla v28.4s, v0.4s, v5.4s\n"
"mov v29.16b, v16.16b\n fmla v29.4s, v0.4s, v6.4s\n"
- "ldr x25, [x15, #0x50]\n"
- "ldr q5, [x25, x10]\n"
+ "ldr x24, [x11, #0x50]\n"
+ "ldr q5, [x24, x10]\n"
"mov v30.16b, v16.16b\n fmla v30.4s, v0.4s, v7.4s\n"
"mov v31.16b, v16.16b\n fmla v31.4s, v0.4s, v8.4s\n"
- "ldr q0, [x16, #0x0]\n"
- "ldr x24, [x15, #0x58]\n"
+ "ldr x23, [x11, #0x58]\n"
+ "ldr x22, [x11, #0x60]\n"
"fmla v28.4s, v1.4s, v6.4s\n"
- "ldr q6, [x24, x10]\n"
"fmla v29.4s, v1.4s, v9.4s\n"
- "ldr x23, [x15, #0x60]\n"
+ "ldr q6, [x23, x10]\n"
+ "ldr x21, [x11, #0x68]\n"
"fmla v30.4s, v1.4s, v8.4s\n"
"fmla v31.4s, v1.4s, v13.4s\n"
- "ldr q1, [x16, #0x10]\n"
- "ldr x22, [x15, #0x68]\n"
+ "ldr q0, [x16, #0x0]\n"
+ "ldr x20, [x11, #0x70]\n"
"fmla v28.4s, v2.4s, v9.4s\n"
- "ldr q9, [x23, x10]\n"
"fmla v29.4s, v2.4s, v11.4s\n"
- "ldr x21, [x15, #0x70]\n"
+ "ldr q9, [x22, x10]\n"
+ "ldr q1, [x16, #0x10]\n"
"fmla v30.4s, v2.4s, v13.4s\n"
"fmla v31.4s, v2.4s, v5.4s\n"
+ "ldr x19, [x11, #0x78]\n"
"ldr q2, [x16, #0x20]\n"
- "ldr x20, [x15, #0x78]\n"
"fmla v28.4s, v3.4s, v11.4s\n"
- "ldr q11, [x22, x10]\n"
"fmla v29.4s, v3.4s, v12.4s\n"
- "ldr x27, [x15, #0x80]\n"
+ "ldr q11, [x21, x10]\n"
+ "ldr x26, [x11, #0x80]\n"
"fmla v30.4s, v3.4s, v5.4s\n"
"fmla v31.4s, v3.4s, v6.4s\n"
"ldr q3, [x16, #0x30]\n"
- "ldr x26, [x15, #0x88]\n"
+ "ldr x25, [x11, #0x88]\n"
"fmla v28.4s, v4.4s, v12.4s\n"
- "ldr q12, [x21, x10]\n"
"fmla v29.4s, v4.4s, v9.4s\n"
- "ldr q9, [x20, x10]\n"
+ "ldr q12, [x20, x10]\n"
+ "ldr q9, [x19, x10]\n"
"fmla v30.4s, v4.4s, v6.4s\n"
"fmla v31.4s, v4.4s, v10.4s\n"
"ldr q4, [x16, #0x40]\n"
- "ldr x25, [x15, #0x90]\n"
+ "ldr x24, [x11, #0x90]\n"
"fmla v28.4s, v0.4s, v7.4s\n"
"fmla v29.4s, v0.4s, v8.4s\n"
- "ldr x24, [x15, #0x98]\n"
- "ldr x23, [x15, #0xa0]\n"
+ "ldr x23, [x11, #0x98]\n"
+ "ldr x22, [x11, #0xa0]\n"
"fmla v30.4s, v0.4s, v14.4s\n"
"fmla v31.4s, v0.4s, v11.4s\n"
"ldr q0, [x16, #0x50]\n"
- "ldr x22, [x15, #0xa8]\n"
+ "ldr x21, [x11, #0xa8]\n"
"fmla v28.4s, v1.4s, v8.4s\n"
- "ldr q8, [x26, x10]\n"
"fmla v29.4s, v1.4s, v13.4s\n"
- "ldr x21, [x15, #0xb0]\n"
+ "ldr q8, [x25, x10]\n"
+ "ldr x20, [x11, #0xb0]\n"
"fmla v30.4s, v1.4s, v11.4s\n"
"fmla v31.4s, v1.4s, v12.4s\n"
"ldr q1, [x16, #0x60]\n"
- "ldr x20, [x15, #0xb8]\n"
+ "ldr x19, [x11, #0xb8]\n"
"fmla v28.4s, v2.4s, v13.4s\n"
- "ldr q13, [x27, x10]\n"
"fmla v29.4s, v2.4s, v5.4s\n"
- "ldr x27, [x15, #0xc0]\n"
+ "ldr q13, [x26, x10]\n"
+ "ldr x26, [x11, #0xc0]\n"
"fmla v30.4s, v2.4s, v12.4s\n"
"fmla v31.4s, v2.4s, v9.4s\n"
"ldr q2, [x16, #0x70]\n"
- "ldr x26, [x15, #0xc8]\n"
+ "ldr x25, [x11, #0xc8]\n"
"fmla v28.4s, v3.4s, v5.4s\n"
- "ldr q5, [x25, x10]\n"
"fmla v29.4s, v3.4s, v6.4s\n"
- "ldr x25, [x15, #0xd0]\n"
+ "ldr q5, [x24, x10]\n"
+ "ldr x24, [x11, #0xd0]\n"
"fmla v30.4s, v3.4s, v9.4s\n"
"fmla v31.4s, v3.4s, v13.4s\n"
"ldr q3, [x16, #0x80]\n"
- "add x28, x28, #0x10\n"
+ "add x9, x9, #0x10\n"
"fmla v28.4s, v4.4s, v6.4s\n"
- "ldr q6, [x24, x10]\n"
"fmla v29.4s, v4.4s, v10.4s\n"
- "ldr q10, [x23, x10]\n"
+ "ldr q6, [x23, x10]\n"
+ "ldr q10, [x22, x10]\n"
"fmla v30.4s, v4.4s, v13.4s\n"
"fmla v31.4s, v4.4s, v8.4s\n"
"ldr q4, [x16, #0x90]\n"
- "ldr x24, [x15, #0xd8]\n"
+ "ldr x23, [x11, #0xd8]\n"
"fmla v28.4s, v0.4s, v14.4s\n"
- "ldr q14, [x20, x10]\n"
"fmla v29.4s, v0.4s, v11.4s\n"
- "ldr x23, [x15, #0xe0]\n"
+ "ldr q14, [x19, x10]\n"
+ "ldr x22, [x11, #0xe0]\n"
"fmla v30.4s, v0.4s, v5.4s\n"
"fmla v31.4s, v0.4s, v6.4s\n"
"ldr q0, [x16, #0xa0]\n"
- "ldr x20, [x15, #0xf8]\n"
+ "ldr x19, [x11, #0xf8]\n"
"fmla v28.4s, v1.4s, v11.4s\n"
- "ldr q11, [x22, x10]\n"
"fmla v29.4s, v1.4s, v12.4s\n"
- "ldr x22, [x15, #0xe8]\n"
+ "ldr q11, [x21, x10]\n"
+ "ldr x21, [x11, #0xe8]\n"
"fmla v30.4s, v1.4s, v6.4s\n"
"fmla v31.4s, v1.4s, v10.4s\n"
"ldr q1, [x16, #0xb0]\n"
"fmla v28.4s, v2.4s, v12.4s\n"
- "ldr q12, [x21, x10]\n"
"fmla v29.4s, v2.4s, v9.4s\n"
- "ldr x21, [x15, #0xf0]\n"
+ "ldr q12, [x20, x10]\n"
+ "ldr x20, [x11, #0xf0]\n"
"fmla v30.4s, v2.4s, v10.4s\n"
"fmla v31.4s, v2.4s, v11.4s\n"
"ldr q2, [x16, #0xc0]\n"
"fmla v28.4s, v3.4s, v9.4s\n"
- "ldr q9, [x27, x10]\n"
"fmla v29.4s, v3.4s, v13.4s\n"
- "ldr x27, [x15, #0x100]\n"
+ "ldr q9, [x26, x10]\n"
+ "ldr x26, [x11, #0x100]\n"
"fmla v30.4s, v3.4s, v11.4s\n"
"fmla v31.4s, v3.4s, v12.4s\n"
"ldr q3, [x16, #0xd0]\n"
"fmla v28.4s, v4.4s, v13.4s\n"
- "ldr q13, [x26, x10]\n"
"fmla v29.4s, v4.4s, v8.4s\n"
- "ldr q8, [x23, x10]\n"
+ "ldr q13, [x25, x10]\n"
+ "ldr q8, [x22, x10]\n"
"fmla v30.4s, v4.4s, v12.4s\n"
"fmla v31.4s, v4.4s, v14.4s\n"
"ldr q4, [x16, #0xe0]\n"
- "ldr x26, [x15, #0x108]\n"
+ "ldr x25, [x11, #0x108]\n"
"fmla v28.4s, v0.4s, v5.4s\n"
- "ldr q5, [x25, x10]\n"
"fmla v29.4s, v0.4s, v6.4s\n"
- "ldr x25, [x15, #0x110]\n"
+ "ldr q5, [x24, x10]\n"
+ "ldr x24, [x11, #0x110]\n"
"fmla v30.4s, v0.4s, v9.4s\n"
"fmla v31.4s, v0.4s, v13.4s\n"
"ldr q0, [x16, #0xf0]\n"
"fmla v28.4s, v1.4s, v6.4s\n"
- "ldr q6, [x24, x10]\n"
"fmla v29.4s, v1.4s, v10.4s\n"
- "ldr x24, [x15, #0x118]\n"
+ "ldr q6, [x23, x10]\n"
+ "ldr x23, [x11, #0x118]\n"
"fmla v30.4s, v1.4s, v13.4s\n"
"fmla v31.4s, v1.4s, v5.4s\n"
"ldr q1, [x16, #0x100]\n"
"fmla v28.4s, v2.4s, v10.4s\n"
- "ldr q10, [x22, x10]\n"
"fmla v29.4s, v2.4s, v11.4s\n"
+ "ldr q10, [x21, x10]\n"
"fmla v30.4s, v2.4s, v5.4s\n"
"fmla v31.4s, v2.4s, v6.4s\n"
"ldr q2, [x16, #0x110]\n"
"fmla v28.4s, v3.4s, v11.4s\n"
- "ldr q11, [x21, x10]\n"
"fmla v29.4s, v3.4s, v12.4s\n"
+ "ldr q11, [x20, x10]\n"
"fmla v30.4s, v3.4s, v6.4s\n"
"fmla v31.4s, v3.4s, v8.4s\n"
"ldr q3, [x16, #0x120]\n"
"fmla v28.4s, v4.4s, v12.4s\n"
- "ldr q12, [x20, x10]\n"
"fmla v29.4s, v4.4s, v14.4s\n"
+ "ldr q12, [x19, x10]\n"
"fmla v30.4s, v4.4s, v8.4s\n"
"fmla v31.4s, v4.4s, v10.4s\n"
"ldr q4, [x16, #0x130]\n"
"add x16, x16, #0x140\n"
"fmla v28.4s, v0.4s, v9.4s\n"
- "ldr q9, [x27, x10]\n"
"fmla v29.4s, v0.4s, v13.4s\n"
+ "ldr q9, [x26, x10]\n"
"fmla v30.4s, v0.4s, v11.4s\n"
- "ldr q11, [x26, x10]\n"
"fmla v31.4s, v0.4s, v12.4s\n"
+ "ldr q11, [x25, x10]\n"
"fmla v28.4s, v1.4s, v13.4s\n"
"fmla v29.4s, v1.4s, v5.4s\n"
"fmla v30.4s, v1.4s, v12.4s\n"
- "ldr q12, [x25, x10]\n"
"fmla v31.4s, v1.4s, v9.4s\n"
+ "ldr q12, [x24, x10]\n"
"fmla v28.4s, v2.4s, v5.4s\n"
"fmla v29.4s, v2.4s, v6.4s\n"
"fmla v30.4s, v2.4s, v9.4s\n"
- "ldr q9, [x24, x10]\n"
"fmla v31.4s, v2.4s, v11.4s\n"
+ "ldr q9, [x23, x10]\n"
"add x10, x10, #0x10\n"
"fmla v28.4s, v3.4s, v6.4s\n"
"fmla v29.4s, v3.4s, v8.4s\n"
@@ -530,86 +530,86 @@ void a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst_indirect_impl(
"fmax v31.4s, v31.4s, v18.4s\n"
"fmin v28.4s, v28.4s, v17.4s\n"
"fmin v29.4s, v29.4s, v17.4s\n"
- "str q28, [x14, x28]\n"
+ "str q28, [x15, x9]\n"
"fmin v30.4s, v30.4s, v17.4s\n"
"fmin v31.4s, v31.4s, v17.4s\n"
- "str q29, [x13, x28]\n"
- "str q30, [x12, x28]\n"
- "str q31, [x11, x28]\n"
+ "str q29, [x14, x9]\n"
+ "str q30, [x13, x9]\n"
+ "str q31, [x12, x9]\n"
"3:" // Oddments
"tst %x[n_channels], #0x3\n"
"beq 60f\n"
- "ldr q16, [x16, #0x0]\n"
- "ldr q0, [x16, #0x10]\n"
- "mov x28, x10\n"
- "add x14, x14, x28\n"
- "ldr q1, [x16, #0x20]\n"
- "ldr q2, [x16, #0x30]\n"
- "add x13, x13, x28\n"
- "add x12, x12, x28\n"
- "ldr q3, [x16, #0x40]\n"
- "ldr q4, [x16, #0x50]\n"
- "add x11, x11, x28\n"
- "ldr x9, [x15, #0x0]\n"
- "ldr x28, [x15, #0x8]\n"
- "add x9, x9, x10\n"
+ "mov x9, x10\n"
+ "ldr x28, [x11, #0x0]\n"
+ "ldr x27, [x11, #0x8]\n"
+ "ldr x26, [x11, #0x10]\n"
+ "add x15, x15, x9\n"
+ "add x14, x14, x9\n"
+ "ldr x25, [x11, #0x18]\n"
+ "ldr x24, [x11, #0x20]\n"
+ "add x13, x13, x9\n"
+ "add x12, x12, x9\n"
+ "ldr x23, [x11, #0x28]\n"
+ "ldr x22, [x11, #0x30]\n"
"add x28, x28, x10\n"
- "ldr x27, [x15, #0x10]\n"
- "ldr x26, [x15, #0x18]\n"
"add x27, x27, x10\n"
+ "ldr x21, [x11, #0x38]\n"
+ "ldr x20, [x11, #0x40]\n"
"add x26, x26, x10\n"
- "ldr x25, [x15, #0x20]\n"
- "ldr x24, [x15, #0x28]\n"
"add x25, x25, x10\n"
+ "ldr x19, [x11, #0x48]\n"
+ "ldr q16, [x16, #0x0]\n"
"add x24, x24, x10\n"
- "ldr x23, [x15, #0x30]\n"
- "ldr x22, [x15, #0x38]\n"
"add x23, x23, x10\n"
+ "ldr q0, [x16, #0x10]\n"
+ "ldr q1, [x16, #0x20]\n"
"add x22, x22, x10\n"
- "ldr x21, [x15, #0x40]\n"
- "ldr x20, [x15, #0x48]\n"
"add x21, x21, x10\n"
+ "ldr q2, [x16, #0x30]\n"
+ "ldr q3, [x16, #0x40]\n"
"add x20, x20, x10\n"
+ "add x19, x19, x10\n"
+ "ldr q4, [x16, #0x50]\n"
"add x16, x16, #0x60\n"
"tbz %x[n_channels], #1, 4f\n"
- "ld1 { v5.d }[0], [x9], #0x8\n"
- "ld1 { v6.d }[0], [x28], #0x8\n"
- "ld1 { v7.d }[0], [x27], #0x8\n"
- "ld1 { v8.d }[0], [x26], #0x8\n"
- "ld1 { v9.d }[0], [x25], #0x8\n"
- "ld1 { v13.d }[0], [x24], #0x8\n"
- "ld1 { v11.d }[0], [x23], #0x8\n"
- "ld1 { v12.d }[0], [x22], #0x8\n"
- "ld1 { v10.d }[0], [x21], #0x8\n"
- "ld1 { v14.d }[0], [x20], #0x8\n"
+ "ld1 { v5.d }[0], [x28], #0x8\n"
+ "ld1 { v6.d }[0], [x27], #0x8\n"
+ "ld1 { v7.d }[0], [x26], #0x8\n"
+ "ld1 { v8.d }[0], [x25], #0x8\n"
+ "ld1 { v9.d }[0], [x24], #0x8\n"
+ "ld1 { v13.d }[0], [x23], #0x8\n"
+ "ld1 { v11.d }[0], [x22], #0x8\n"
+ "ld1 { v12.d }[0], [x21], #0x8\n"
+ "ld1 { v10.d }[0], [x20], #0x8\n"
+ "ld1 { v14.d }[0], [x19], #0x8\n"
"tbz %x[n_channels], #0, 5f\n"
- "ld1 { v5.s }[2], [x9], #0x4\n"
- "ld1 { v6.s }[2], [x28], #0x4\n"
- "ld1 { v7.s }[2], [x27], #0x4\n"
- "ld1 { v8.s }[2], [x26], #0x4\n"
- "ld1 { v9.s }[2], [x25], #0x4\n"
- "ld1 { v13.s }[2], [x24], #0x4\n"
- "ld1 { v11.s }[2], [x23], #0x4\n"
- "ld1 { v12.s }[2], [x22], #0x4\n"
- "ld1 { v10.s }[2], [x21], #0x4\n"
- "ld1 { v14.s }[2], [x20], #0x4\n"
+ "ld1 { v5.s }[2], [x28], #0x4\n"
+ "ld1 { v6.s }[2], [x27], #0x4\n"
+ "ld1 { v7.s }[2], [x26], #0x4\n"
+ "ld1 { v8.s }[2], [x25], #0x4\n"
+ "ld1 { v9.s }[2], [x24], #0x4\n"
+ "ld1 { v13.s }[2], [x23], #0x4\n"
+ "ld1 { v11.s }[2], [x22], #0x4\n"
+ "ld1 { v12.s }[2], [x21], #0x4\n"
+ "ld1 { v10.s }[2], [x20], #0x4\n"
+ "ld1 { v14.s }[2], [x19], #0x4\n"
"b 5f\n"
"4:" // Oddments: Load inputs (0, 0), (0, 1), (1, 0), (1, 1), (0, 2), (1, 2), (0, 3), (0, 4), (1, 5), (2, 0): Bit 1: Unset
- "ld1 { v5.s }[0], [x9], #0x4\n"
- "ld1 { v6.s }[0], [x28], #0x4\n"
- "ld1 { v7.s }[0], [x27], #0x4\n"
- "ld1 { v8.s }[0], [x26], #0x4\n"
- "ld1 { v9.s }[0], [x25], #0x4\n"
- "ld1 { v13.s }[0], [x24], #0x4\n"
- "ld1 { v11.s }[0], [x23], #0x4\n"
- "ld1 { v12.s }[0], [x22], #0x4\n"
- "ld1 { v10.s }[0], [x21], #0x4\n"
- "ld1 { v14.s }[0], [x20], #0x4\n"
+ "ld1 { v5.s }[0], [x28], #0x4\n"
+ "ld1 { v6.s }[0], [x27], #0x4\n"
+ "ld1 { v7.s }[0], [x26], #0x4\n"
+ "ld1 { v8.s }[0], [x25], #0x4\n"
+ "ld1 { v9.s }[0], [x24], #0x4\n"
+ "ld1 { v13.s }[0], [x23], #0x4\n"
+ "ld1 { v11.s }[0], [x22], #0x4\n"
+ "ld1 { v12.s }[0], [x21], #0x4\n"
+ "ld1 { v10.s }[0], [x20], #0x4\n"
+ "ld1 { v14.s }[0], [x19], #0x4\n"
"5:" // Oddments: Load inputs (0, 0), (0, 1), (1, 0), (1, 1), (0, 2), (1, 2), (0, 3), (0, 4), (1, 5), (2, 0): Bit 1: End
"mov v28.16b, v16.16b\n fmla v28.4s, v0.4s, v5.4s\n"
"mov v29.16b, v16.16b\n fmla v29.4s, v0.4s, v6.4s\n"
- "ldr x20, [x15, #0x50]\n"
- "add x20, x20, x10\n"
+ "ldr x19, [x11, #0x50]\n"
+ "add x19, x19, x10\n"
"mov v30.16b, v16.16b\n fmla v30.4s, v0.4s, v7.4s\n"
"mov v31.16b, v16.16b\n fmla v31.4s, v0.4s, v8.4s\n"
"fmla v28.4s, v1.4s, v6.4s\n"
@@ -620,389 +620,389 @@ void a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst_indirect_impl(
"fmla v29.4s, v2.4s, v11.4s\n"
"fmla v30.4s, v2.4s, v13.4s\n"
"tbz %x[n_channels], #1, 6f\n"
- "ld1 { v5.d }[0], [x20], #0x8\n"
+ "ld1 { v5.d }[0], [x19], #0x8\n"
"tbz %x[n_channels], #0, 7f\n"
- "ld1 { v5.s }[2], [x20], #0x4\n"
+ "ld1 { v5.s }[2], [x19], #0x4\n"
"b 7f\n"
"6:" // Oddments: Load input (1, 3): Bit 1: Unset
- "ld1 { v5.s }[0], [x20], #0x4\n"
+ "ld1 { v5.s }[0], [x19], #0x4\n"
"7:" // Oddments: Load input (1, 3): Bit 1: End
- "ldr x20, [x15, #0x58]\n"
+ "ldr x19, [x11, #0x58]\n"
"fmla v31.4s, v2.4s, v5.4s\n"
"fmla v28.4s, v3.4s, v11.4s\n"
- "add x20, x20, x10\n"
+ "add x19, x19, x10\n"
"fmla v29.4s, v3.4s, v12.4s\n"
"fmla v30.4s, v3.4s, v5.4s\n"
"tbz %x[n_channels], #1, 8f\n"
- "ld1 { v6.d }[0], [x20], #0x8\n"
+ "ld1 { v6.d }[0], [x19], #0x8\n"
"tbz %x[n_channels], #0, 9f\n"
- "ld1 { v6.s }[2], [x20], #0x4\n"
+ "ld1 { v6.s }[2], [x19], #0x4\n"
"b 9f\n"
"8:" // Oddments: Load input (1, 4): Bit 1: Unset
- "ld1 { v6.s }[0], [x20], #0x4\n"
+ "ld1 { v6.s }[0], [x19], #0x4\n"
"9:" // Oddments: Load input (1, 4): Bit 1: End
- "ldr x20, [x15, #0x60]\n"
+ "ldr x19, [x11, #0x60]\n"
"fmla v31.4s, v3.4s, v6.4s\n"
"fmla v28.4s, v4.4s, v12.4s\n"
- "add x20, x20, x10\n"
+ "add x19, x19, x10\n"
"tbz %x[n_channels], #1, 10f\n"
- "ld1 { v9.d }[0], [x20], #0x8\n"
+ "ld1 { v9.d }[0], [x19], #0x8\n"
"tbz %x[n_channels], #0, 11f\n"
- "ld1 { v9.s }[2], [x20], #0x4\n"
+ "ld1 { v9.s }[2], [x19], #0x4\n"
"b 11f\n"
"10:" // Oddments: Load input (0, 5): Bit 1: Unset
- "ld1 { v9.s }[0], [x20], #0x4\n"
+ "ld1 { v9.s }[0], [x19], #0x4\n"
"11:" // Oddments: Load input (0, 5): Bit 1: End
- "ldr q0, [x16, #0x0]\n"
"fmla v29.4s, v4.4s, v9.4s\n"
"fmla v30.4s, v4.4s, v6.4s\n"
- "ldr x20, [x15, #0x68]\n"
+ "ldr q0, [x16, #0x0]\n"
+ "ldr x19, [x11, #0x68]\n"
"fmla v31.4s, v4.4s, v10.4s\n"
"fmla v28.4s, v0.4s, v7.4s\n"
- "add x20, x20, x10\n"
+ "add x19, x19, x10\n"
"fmla v29.4s, v0.4s, v8.4s\n"
"fmla v30.4s, v0.4s, v14.4s\n"
"add x16, x16, #0x10\n"
"tbz %x[n_channels], #1, 12f\n"
- "ld1 { v11.d }[0], [x20], #0x8\n"
+ "ld1 { v11.d }[0], [x19], #0x8\n"
"tbz %x[n_channels], #0, 13f\n"
- "ld1 { v11.s }[2], [x20], #0x4\n"
+ "ld1 { v11.s }[2], [x19], #0x4\n"
"b 13f\n"
"12:" // Oddments: Load input (2, 1): Bit 1: Unset
- "ld1 { v11.s }[0], [x20], #0x4\n"
+ "ld1 { v11.s }[0], [x19], #0x4\n"
"13:" // Oddments: Load input (2, 1): Bit 1: End
"ldr q1, [x16, #0x0]\n"
- "ldr x20, [x15, #0x70]\n"
+ "ldr x19, [x11, #0x70]\n"
"fmla v31.4s, v0.4s, v11.4s\n"
"fmla v28.4s, v1.4s, v8.4s\n"
"fmla v29.4s, v1.4s, v13.4s\n"
"fmla v30.4s, v1.4s, v11.4s\n"
- "add x20, x20, x10\n"
+ "add x19, x19, x10\n"
"add x16, x16, #0x10\n"
"tbz %x[n_channels], #1, 14f\n"
- "ld1 { v12.d }[0], [x20], #0x8\n"
+ "ld1 { v12.d }[0], [x19], #0x8\n"
"tbz %x[n_channels], #0, 15f\n"
- "ld1 { v12.s }[2], [x20], #0x4\n"
+ "ld1 { v12.s }[2], [x19], #0x4\n"
"b 15f\n"
"14:" // Oddments: Load input (2, 2): Bit 1: Unset
- "ld1 { v12.s }[0], [x20], #0x4\n"
+ "ld1 { v12.s }[0], [x19], #0x4\n"
"15:" // Oddments: Load input (2, 2): Bit 1: End
"ldr q2, [x16, #0x0]\n"
- "ldr x20, [x15, #0x78]\n"
+ "ldr x19, [x11, #0x78]\n"
"fmla v31.4s, v1.4s, v12.4s\n"
"fmla v28.4s, v2.4s, v13.4s\n"
"fmla v29.4s, v2.4s, v5.4s\n"
"fmla v30.4s, v2.4s, v12.4s\n"
- "add x20, x20, x10\n"
+ "add x19, x19, x10\n"
"add x16, x16, #0x10\n"
"tbz %x[n_channels], #1, 16f\n"
- "ld1 { v9.d }[0], [x20], #0x8\n"
+ "ld1 { v9.d }[0], [x19], #0x8\n"
"tbz %x[n_channels], #0, 17f\n"
- "ld1 { v9.s }[2], [x20], #0x4\n"
+ "ld1 { v9.s }[2], [x19], #0x4\n"
"b 17f\n"
"16:" // Oddments: Load input (2, 3): Bit 1: Unset
- "ld1 { v9.s }[0], [x20], #0x4\n"
+ "ld1 { v9.s }[0], [x19], #0x4\n"
"17:" // Oddments: Load input (2, 3): Bit 1: End
"ldr q3, [x16, #0x0]\n"
- "ldr x20, [x15, #0x80]\n"
+ "ldr x19, [x11, #0x80]\n"
"fmla v31.4s, v2.4s, v9.4s\n"
"fmla v28.4s, v3.4s, v5.4s\n"
"fmla v29.4s, v3.4s, v6.4s\n"
"fmla v30.4s, v3.4s, v9.4s\n"
- "add x20, x20, x10\n"
+ "add x19, x19, x10\n"
"add x16, x16, #0x10\n"
"tbz %x[n_channels], #1, 18f\n"
- "ld1 { v13.d }[0], [x20], #0x8\n"
+ "ld1 { v13.d }[0], [x19], #0x8\n"
"tbz %x[n_channels], #0, 19f\n"
- "ld1 { v13.s }[2], [x20], #0x4\n"
+ "ld1 { v13.s }[2], [x19], #0x4\n"
"b 19f\n"
"18:" // Oddments: Load input (2, 4): Bit 1: Unset
- "ld1 { v13.s }[0], [x20], #0x4\n"
+ "ld1 { v13.s }[0], [x19], #0x4\n"
"19:" // Oddments: Load input (2, 4): Bit 1: End
"ldr q4, [x16, #0x0]\n"
- "ldr x20, [x15, #0x88]\n"
+ "ldr x19, [x11, #0x88]\n"
"fmla v31.4s, v3.4s, v13.4s\n"
"fmla v28.4s, v4.4s, v6.4s\n"
"fmla v29.4s, v4.4s, v10.4s\n"
"fmla v30.4s, v4.4s, v13.4s\n"
- "add x20, x20, x10\n"
+ "add x19, x19, x10\n"
"add x16, x16, #0x10\n"
"tbz %x[n_channels], #1, 20f\n"
- "ld1 { v8.d }[0], [x20], #0x8\n"
+ "ld1 { v8.d }[0], [x19], #0x8\n"
"tbz %x[n_channels], #0, 21f\n"
- "ld1 { v8.s }[2], [x20], #0x4\n"
+ "ld1 { v8.s }[2], [x19], #0x4\n"
"b 21f\n"
"20:" // Oddments: Load input (2, 5): Bit 1: Unset
- "ld1 { v8.s }[0], [x20], #0x4\n"
+ "ld1 { v8.s }[0], [x19], #0x4\n"
"21:" // Oddments: Load input (2, 5): Bit 1: End
"ldr q0, [x16, #0x0]\n"
- "ldr x20, [x15, #0x90]\n"
+ "ldr x19, [x11, #0x90]\n"
"fmla v31.4s, v4.4s, v8.4s\n"
"fmla v28.4s, v0.4s, v14.4s\n"
"fmla v29.4s, v0.4s, v11.4s\n"
- "add x20, x20, x10\n"
+ "add x19, x19, x10\n"
"add x16, x16, #0x10\n"
"tbz %x[n_channels], #1, 22f\n"
- "ld1 { v5.d }[0], [x20], #0x8\n"
+ "ld1 { v5.d }[0], [x19], #0x8\n"
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v5.s }[2], [x20], #0x4\n"
+ "ld1 { v5.s }[2], [x19], #0x4\n"
"b 23f\n"
"22:" // Oddments: Load input (3, 0): Bit 1: Unset
- "ld1 { v5.s }[0], [x20], #0x4\n"
+ "ld1 { v5.s }[0], [x19], #0x4\n"
"23:" // Oddments: Load input (3, 0): Bit 1: End
- "ldr x20, [x15, #0x98]\n"
+ "ldr x19, [x11, #0x98]\n"
"fmla v30.4s, v0.4s, v5.4s\n"
- "add x20, x20, x10\n"
+ "add x19, x19, x10\n"
"tbz %x[n_channels], #1, 24f\n"
- "ld1 { v6.d }[0], [x20], #0x8\n"
+ "ld1 { v6.d }[0], [x19], #0x8\n"
"tbz %x[n_channels], #0, 25f\n"
- "ld1 { v6.s }[2], [x20], #0x4\n"
+ "ld1 { v6.s }[2], [x19], #0x4\n"
"b 25f\n"
"24:" // Oddments: Load input (3, 1): Bit 1: Unset
- "ld1 { v6.s }[0], [x20], #0x4\n"
+ "ld1 { v6.s }[0], [x19], #0x4\n"
"25:" // Oddments: Load input (3, 1): Bit 1: End
"ldr q1, [x16, #0x0]\n"
- "ldr x20, [x15, #0xa0]\n"
+ "ldr x19, [x11, #0xa0]\n"
"fmla v31.4s, v0.4s, v6.4s\n"
"fmla v28.4s, v1.4s, v11.4s\n"
"fmla v29.4s, v1.4s, v12.4s\n"
"fmla v30.4s, v1.4s, v6.4s\n"
- "add x20, x20, x10\n"
+ "add x19, x19, x10\n"
"add x16, x16, #0x10\n"
"tbz %x[n_channels], #1, 26f\n"
- "ld1 { v10.d }[0], [x20], #0x8\n"
+ "ld1 { v10.d }[0], [x19], #0x8\n"
"tbz %x[n_channels], #0, 27f\n"
- "ld1 { v10.s }[2], [x20], #0x4\n"
+ "ld1 { v10.s }[2], [x19], #0x4\n"
"b 27f\n"
"26:" // Oddments: Load input (3, 2): Bit 1: Unset
- "ld1 { v10.s }[0], [x20], #0x4\n"
+ "ld1 { v10.s }[0], [x19], #0x4\n"
"27:" // Oddments: Load input (3, 2): Bit 1: End
"ldr q2, [x16, #0x0]\n"
- "ldr x20, [x15, #0xa8]\n"
+ "ldr x19, [x11, #0xa8]\n"
"fmla v31.4s, v1.4s, v10.4s\n"
"fmla v28.4s, v2.4s, v12.4s\n"
"fmla v29.4s, v2.4s, v9.4s\n"
"fmla v30.4s, v2.4s, v10.4s\n"
- "add x20, x20, x10\n"
+ "add x19, x19, x10\n"
"add x16, x16, #0x10\n"
"tbz %x[n_channels], #1, 28f\n"
- "ld1 { v11.d }[0], [x20], #0x8\n"
+ "ld1 { v11.d }[0], [x19], #0x8\n"
"tbz %x[n_channels], #0, 29f\n"
- "ld1 { v11.s }[2], [x20], #0x4\n"
+ "ld1 { v11.s }[2], [x19], #0x4\n"
"b 29f\n"
"28:" // Oddments: Load input (3, 3): Bit 1: Unset
- "ld1 { v11.s }[0], [x20], #0x4\n"
+ "ld1 { v11.s }[0], [x19], #0x4\n"
"29:" // Oddments: Load input (3, 3): Bit 1: End
"ldr q3, [x16, #0x0]\n"
- "ldr x20, [x15, #0xb0]\n"
+ "ldr x19, [x11, #0xb0]\n"
"fmla v31.4s, v2.4s, v11.4s\n"
"fmla v28.4s, v3.4s, v9.4s\n"
"fmla v29.4s, v3.4s, v13.4s\n"
"fmla v30.4s, v3.4s, v11.4s\n"
- "add x20, x20, x10\n"
+ "add x19, x19, x10\n"
"add x16, x16, #0x10\n"
"tbz %x[n_channels], #1, 30f\n"
- "ld1 { v12.d }[0], [x20], #0x8\n"
+ "ld1 { v12.d }[0], [x19], #0x8\n"
"tbz %x[n_channels], #0, 31f\n"
- "ld1 { v12.s }[2], [x20], #0x4\n"
+ "ld1 { v12.s }[2], [x19], #0x4\n"
"b 31f\n"
"30:" // Oddments: Load input (3, 4): Bit 1: Unset
- "ld1 { v12.s }[0], [x20], #0x4\n"
+ "ld1 { v12.s }[0], [x19], #0x4\n"
"31:" // Oddments: Load input (3, 4): Bit 1: End
"ldr q4, [x16, #0x0]\n"
- "ldr x20, [x15, #0xb8]\n"
+ "ldr x19, [x11, #0xb8]\n"
"fmla v31.4s, v3.4s, v12.4s\n"
"fmla v28.4s, v4.4s, v13.4s\n"
"fmla v29.4s, v4.4s, v8.4s\n"
"fmla v30.4s, v4.4s, v12.4s\n"
- "add x20, x20, x10\n"
+ "add x19, x19, x10\n"
"add x16, x16, #0x10\n"
"tbz %x[n_channels], #1, 32f\n"
- "ld1 { v14.d }[0], [x20], #0x8\n"
+ "ld1 { v14.d }[0], [x19], #0x8\n"
"tbz %x[n_channels], #0, 33f\n"
- "ld1 { v14.s }[2], [x20], #0x4\n"
+ "ld1 { v14.s }[2], [x19], #0x4\n"
"b 33f\n"
"32:" // Oddments: Load input (3, 5): Bit 1: Unset
- "ld1 { v14.s }[0], [x20], #0x4\n"
+ "ld1 { v14.s }[0], [x19], #0x4\n"
"33:" // Oddments: Load input (3, 5): Bit 1: End
"ldr q0, [x16, #0x0]\n"
- "ldr x20, [x15, #0xc0]\n"
+ "ldr x19, [x11, #0xc0]\n"
"fmla v31.4s, v4.4s, v14.4s\n"
"fmla v28.4s, v0.4s, v5.4s\n"
"fmla v29.4s, v0.4s, v6.4s\n"
- "add x20, x20, x10\n"
+ "add x19, x19, x10\n"
"add x16, x16, #0x10\n"
"tbz %x[n_channels], #1, 34f\n"
- "ld1 { v9.d }[0], [x20], #0x8\n"
+ "ld1 { v9.d }[0], [x19], #0x8\n"
"tbz %x[n_channels], #0, 35f\n"
- "ld1 { v9.s }[2], [x20], #0x4\n"
+ "ld1 { v9.s }[2], [x19], #0x4\n"
"b 35f\n"
"34:" // Oddments: Load input (4, 0): Bit 1: Unset
- "ld1 { v9.s }[0], [x20], #0x4\n"
+ "ld1 { v9.s }[0], [x19], #0x4\n"
"35:" // Oddments: Load input (4, 0): Bit 1: End
- "ldr x20, [x15, #0xc8]\n"
+ "ldr x19, [x11, #0xc8]\n"
"fmla v30.4s, v0.4s, v9.4s\n"
- "add x20, x20, x10\n"
+ "add x19, x19, x10\n"
"tbz %x[n_channels], #1, 36f\n"
- "ld1 { v13.d }[0], [x20], #0x8\n"
+ "ld1 { v13.d }[0], [x19], #0x8\n"
"tbz %x[n_channels], #0, 37f\n"
- "ld1 { v13.s }[2], [x20], #0x4\n"
+ "ld1 { v13.s }[2], [x19], #0x4\n"
"b 37f\n"
"36:" // Oddments: Load input (4, 1): Bit 1: Unset
- "ld1 { v13.s }[0], [x20], #0x4\n"
+ "ld1 { v13.s }[0], [x19], #0x4\n"
"37:" // Oddments: Load input (4, 1): Bit 1: End
"ldr q1, [x16, #0x0]\n"
- "ldr x20, [x15, #0xd0]\n"
+ "ldr x19, [x11, #0xd0]\n"
"fmla v31.4s, v0.4s, v13.4s\n"
"fmla v28.4s, v1.4s, v6.4s\n"
"fmla v29.4s, v1.4s, v10.4s\n"
"fmla v30.4s, v1.4s, v13.4s\n"
- "add x20, x20, x10\n"
+ "add x19, x19, x10\n"
"add x16, x16, #0x10\n"
"tbz %x[n_channels], #1, 38f\n"
- "ld1 { v5.d }[0], [x20], #0x8\n"
+ "ld1 { v5.d }[0], [x19], #0x8\n"
"tbz %x[n_channels], #0, 39f\n"
- "ld1 { v5.s }[2], [x20], #0x4\n"
+ "ld1 { v5.s }[2], [x19], #0x4\n"
"b 39f\n"
"38:" // Oddments: Load input (4, 2): Bit 1: Unset
- "ld1 { v5.s }[0], [x20], #0x4\n"
+ "ld1 { v5.s }[0], [x19], #0x4\n"
"39:" // Oddments: Load input (4, 2): Bit 1: End
"ldr q2, [x16, #0x0]\n"
- "ldr x20, [x15, #0xd8]\n"
+ "ldr x19, [x11, #0xd8]\n"
"fmla v31.4s, v1.4s, v5.4s\n"
"fmla v28.4s, v2.4s, v10.4s\n"
"fmla v29.4s, v2.4s, v11.4s\n"
"fmla v30.4s, v2.4s, v5.4s\n"
- "add x20, x20, x10\n"
+ "add x19, x19, x10\n"
"add x16, x16, #0x10\n"
"tbz %x[n_channels], #1, 40f\n"
- "ld1 { v6.d }[0], [x20], #0x8\n"
+ "ld1 { v6.d }[0], [x19], #0x8\n"
"tbz %x[n_channels], #0, 41f\n"
- "ld1 { v6.s }[2], [x20], #0x4\n"
+ "ld1 { v6.s }[2], [x19], #0x4\n"
"b 41f\n"
"40:" // Oddments: Load input (4, 3): Bit 1: Unset
- "ld1 { v6.s }[0], [x20], #0x4\n"
+ "ld1 { v6.s }[0], [x19], #0x4\n"
"41:" // Oddments: Load input (4, 3): Bit 1: End
"ldr q3, [x16, #0x0]\n"
- "ldr x20, [x15, #0xe0]\n"
+ "ldr x19, [x11, #0xe0]\n"
"fmla v31.4s, v2.4s, v6.4s\n"
"fmla v28.4s, v3.4s, v11.4s\n"
"fmla v29.4s, v3.4s, v12.4s\n"
"fmla v30.4s, v3.4s, v6.4s\n"
- "add x20, x20, x10\n"
+ "add x19, x19, x10\n"
"add x16, x16, #0x10\n"
"tbz %x[n_channels], #1, 42f\n"
- "ld1 { v8.d }[0], [x20], #0x8\n"
+ "ld1 { v8.d }[0], [x19], #0x8\n"
"tbz %x[n_channels], #0, 43f\n"
- "ld1 { v8.s }[2], [x20], #0x4\n"
+ "ld1 { v8.s }[2], [x19], #0x4\n"
"b 43f\n"
"42:" // Oddments: Load input (4, 4): Bit 1: Unset
- "ld1 { v8.s }[0], [x20], #0x4\n"
+ "ld1 { v8.s }[0], [x19], #0x4\n"
"43:" // Oddments: Load input (4, 4): Bit 1: End
"ldr q4, [x16, #0x0]\n"
- "ldr x20, [x15, #0xe8]\n"
+ "ldr x19, [x11, #0xe8]\n"
"fmla v31.4s, v3.4s, v8.4s\n"
"fmla v28.4s, v4.4s, v12.4s\n"
"fmla v29.4s, v4.4s, v14.4s\n"
"fmla v30.4s, v4.4s, v8.4s\n"
- "add x20, x20, x10\n"
+ "add x19, x19, x10\n"
"add x16, x16, #0x10\n"
"tbz %x[n_channels], #1, 44f\n"
- "ld1 { v10.d }[0], [x20], #0x8\n"
+ "ld1 { v10.d }[0], [x19], #0x8\n"
"tbz %x[n_channels], #0, 45f\n"
- "ld1 { v10.s }[2], [x20], #0x4\n"
+ "ld1 { v10.s }[2], [x19], #0x4\n"
"b 45f\n"
"44:" // Oddments: Load input (4, 5): Bit 1: Unset
- "ld1 { v10.s }[0], [x20], #0x4\n"
+ "ld1 { v10.s }[0], [x19], #0x4\n"
"45:" // Oddments: Load input (4, 5): Bit 1: End
"ldr q0, [x16, #0x0]\n"
- "ldr x20, [x15, #0xf0]\n"
+ "ldr x19, [x11, #0xf0]\n"
"fmla v31.4s, v4.4s, v10.4s\n"
"fmla v28.4s, v0.4s, v9.4s\n"
"fmla v29.4s, v0.4s, v13.4s\n"
- "add x20, x20, x10\n"
+ "add x19, x19, x10\n"
"add x16, x16, #0x10\n"
"tbz %x[n_channels], #1, 46f\n"
- "ld1 { v11.d }[0], [x20], #0x8\n"
+ "ld1 { v11.d }[0], [x19], #0x8\n"
"tbz %x[n_channels], #0, 47f\n"
- "ld1 { v11.s }[2], [x20], #0x4\n"
+ "ld1 { v11.s }[2], [x19], #0x4\n"
"b 47f\n"
"46:" // Oddments: Load input (5, 0): Bit 1: Unset
- "ld1 { v11.s }[0], [x20], #0x4\n"
+ "ld1 { v11.s }[0], [x19], #0x4\n"
"47:" // Oddments: Load input (5, 0): Bit 1: End
- "ldr x20, [x15, #0xf8]\n"
+ "ldr x19, [x11, #0xf8]\n"
"fmla v30.4s, v0.4s, v11.4s\n"
- "add x20, x20, x10\n"
+ "add x19, x19, x10\n"
"tbz %x[n_channels], #1, 48f\n"
- "ld1 { v12.d }[0], [x20], #0x8\n"
+ "ld1 { v12.d }[0], [x19], #0x8\n"
"tbz %x[n_channels], #0, 49f\n"
- "ld1 { v12.s }[2], [x20], #0x4\n"
+ "ld1 { v12.s }[2], [x19], #0x4\n"
"b 49f\n"
"48:" // Oddments: Load input (5, 1): Bit 1: Unset
- "ld1 { v12.s }[0], [x20], #0x4\n"
+ "ld1 { v12.s }[0], [x19], #0x4\n"
"49:" // Oddments: Load input (5, 1): Bit 1: End
"ldr q1, [x16, #0x0]\n"
- "ldr x20, [x15, #0x100]\n"
+ "ldr x19, [x11, #0x100]\n"
"fmla v31.4s, v0.4s, v12.4s\n"
"fmla v28.4s, v1.4s, v13.4s\n"
"fmla v29.4s, v1.4s, v5.4s\n"
"fmla v30.4s, v1.4s, v12.4s\n"
- "add x20, x20, x10\n"
+ "add x19, x19, x10\n"
"add x16, x16, #0x10\n"
"tbz %x[n_channels], #1, 50f\n"
- "ld1 { v9.d }[0], [x20], #0x8\n"
+ "ld1 { v9.d }[0], [x19], #0x8\n"
"tbz %x[n_channels], #0, 51f\n"
- "ld1 { v9.s }[2], [x20], #0x4\n"
+ "ld1 { v9.s }[2], [x19], #0x4\n"
"b 51f\n"
"50:" // Oddments: Load input (5, 2): Bit 1: Unset
- "ld1 { v9.s }[0], [x20], #0x4\n"
+ "ld1 { v9.s }[0], [x19], #0x4\n"
"51:" // Oddments: Load input (5, 2): Bit 1: End
"ldr q2, [x16, #0x0]\n"
- "ldr x20, [x15, #0x108]\n"
+ "ldr x19, [x11, #0x108]\n"
"fmla v31.4s, v1.4s, v9.4s\n"
"fmla v28.4s, v2.4s, v5.4s\n"
"fmla v29.4s, v2.4s, v6.4s\n"
"fmla v30.4s, v2.4s, v9.4s\n"
- "add x20, x20, x10\n"
+ "add x19, x19, x10\n"
"add x16, x16, #0x10\n"
"tbz %x[n_channels], #1, 52f\n"
- "ld1 { v11.d }[0], [x20], #0x8\n"
+ "ld1 { v11.d }[0], [x19], #0x8\n"
"tbz %x[n_channels], #0, 53f\n"
- "ld1 { v11.s }[2], [x20], #0x4\n"
+ "ld1 { v11.s }[2], [x19], #0x4\n"
"b 53f\n"
"52:" // Oddments: Load input (5, 3): Bit 1: Unset
- "ld1 { v11.s }[0], [x20], #0x4\n"
+ "ld1 { v11.s }[0], [x19], #0x4\n"
"53:" // Oddments: Load input (5, 3): Bit 1: End
"ldr q3, [x16, #0x0]\n"
- "ldr x20, [x15, #0x110]\n"
+ "ldr x19, [x11, #0x110]\n"
"fmla v31.4s, v2.4s, v11.4s\n"
"fmla v28.4s, v3.4s, v6.4s\n"
"fmla v29.4s, v3.4s, v8.4s\n"
"fmla v30.4s, v3.4s, v11.4s\n"
- "add x20, x20, x10\n"
+ "add x19, x19, x10\n"
"add x16, x16, #0x10\n"
"tbz %x[n_channels], #1, 54f\n"
- "ld1 { v12.d }[0], [x20], #0x8\n"
+ "ld1 { v12.d }[0], [x19], #0x8\n"
"tbz %x[n_channels], #0, 55f\n"
- "ld1 { v12.s }[2], [x20], #0x4\n"
+ "ld1 { v12.s }[2], [x19], #0x4\n"
"b 55f\n"
"54:" // Oddments: Load input (5, 4): Bit 1: Unset
- "ld1 { v12.s }[0], [x20], #0x4\n"
+ "ld1 { v12.s }[0], [x19], #0x4\n"
"55:" // Oddments: Load input (5, 4): Bit 1: End
"ldr q4, [x16, #0x0]\n"
- "ldr x20, [x15, #0x118]\n"
+ "ldr x19, [x11, #0x118]\n"
"fmla v31.4s, v3.4s, v12.4s\n"
"fmla v28.4s, v4.4s, v8.4s\n"
"fmla v29.4s, v4.4s, v10.4s\n"
"fmla v30.4s, v4.4s, v12.4s\n"
- "add x20, x20, x10\n"
+ "add x19, x19, x10\n"
"tbz %x[n_channels], #1, 56f\n"
- "ld1 { v9.d }[0], [x20], #0x8\n"
+ "ld1 { v9.d }[0], [x19], #0x8\n"
"tbz %x[n_channels], #0, 57f\n"
- "ld1 { v9.s }[2], [x20], #0x4\n"
+ "ld1 { v9.s }[2], [x19], #0x4\n"
"b 57f\n"
"56:" // Oddments: Load input (5, 5): Bit 1: Unset
- "ld1 { v9.s }[0], [x20], #0x4\n"
+ "ld1 { v9.s }[0], [x19], #0x4\n"
"57:" // Oddments: Load input (5, 5): Bit 1: End
"fmla v31.4s, v4.4s, v9.4s\n"
"fmax v28.4s, v28.4s, v18.4s\n"
@@ -1014,28 +1014,28 @@ void a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst_indirect_impl(
"fmin v30.4s, v30.4s, v17.4s\n"
"fmin v31.4s, v31.4s, v17.4s\n"
"tbz %x[n_channels], #1, 58f\n"
- "st1 { v28.d }[0], [x14], #0x8\n"
- "st1 { v29.d }[0], [x13], #0x8\n"
- "st1 { v30.d }[0], [x12], #0x8\n"
- "st1 { v31.d }[0], [x11], #0x8\n"
+ "st1 { v28.d }[0], [x15], #0x8\n"
+ "st1 { v29.d }[0], [x14], #0x8\n"
+ "st1 { v30.d }[0], [x13], #0x8\n"
+ "st1 { v31.d }[0], [x12], #0x8\n"
"tbz %x[n_channels], #0, 59f\n"
- "st1 { v28.s }[2], [x14], #0x4\n"
- "st1 { v29.s }[2], [x13], #0x4\n"
- "st1 { v30.s }[2], [x12], #0x4\n"
- "st1 { v31.s }[2], [x11], #0x4\n"
+ "st1 { v28.s }[2], [x15], #0x4\n"
+ "st1 { v29.s }[2], [x14], #0x4\n"
+ "st1 { v30.s }[2], [x13], #0x4\n"
+ "st1 { v31.s }[2], [x12], #0x4\n"
"b 59f\n"
"58:" // Oddments: Store: Bit 1: Unset
- "st1 { v28.s }[0], [x14], #0x4\n"
- "st1 { v29.s }[0], [x13], #0x4\n"
- "st1 { v30.s }[0], [x12], #0x4\n"
- "st1 { v31.s }[0], [x11], #0x4\n"
+ "st1 { v28.s }[0], [x15], #0x4\n"
+ "st1 { v29.s }[0], [x14], #0x4\n"
+ "st1 { v30.s }[0], [x13], #0x4\n"
+ "st1 { v31.s }[0], [x12], #0x4\n"
"59:" // Oddments: Store: Bit 1: End
"60:" // End
:
: [n_channels] "r" ((unsigned long) n_channels), [offsetof_Args_inptrs] "I" (offsetof(Args, inptrs)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_args_params] "I" (offsetof(Args, params)), [params_struct] "r" (&params_struct)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v16", "v17", "v18", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v16", "v17", "v18", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_generic_output9_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_generic_output9_mla_depthfirst/generic.cpp
index 0ea3a8fbed..c0b87ada75 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_generic_output9_mla_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_generic_output9_mla_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -43,329 +43,336 @@ void a64_fp32_nhwc_generic_output9_mla_depthfirst_impl(
const float minmax_vals[2] = { activation_min, activation_max };
__asm__ __volatile__(
- "ld1r { v2.4s }, [%x[minmax_vals]]\n"
- "lsr x12, %x[n_channels], #0x2\n"
- "add x20, %x[minmax_vals], #0x4\n"
- "ld1r { v1.4s }, [x20]\n"
+ "ld1r { v4.4s }, [%x[minmax_vals]]\n"
+ "add x19, %x[minmax_vals], #0x4\n"
"mov x11, #0x0\n"
- "cbz x12, 5f\n"
+ "ld1r { v3.4s }, [x19]\n"
+ "lsr x10, %x[n_channels], #0x2\n"
+ "cbz x10, 5f\n"
"1:" // Channel loop
- "movi v23.16b, #0x0\n"
+ "movi v25.16b, #0x0\n"
"cbz %x[bias], 2f\n"
- "ldr q23, [%x[bias], x11]\n"
+ "ldr q25, [%x[bias], x11]\n"
"2:" // Channel loop: Load bias: Done
- "ldr q0, [%x[params], #0x0]\n"
- "mov x21, %x[inptrs]\n"
- "ldp x10, x9, [x21], #0x10\n"
- "subs x20, %x[n_points], #0x1\n"
- "ldr q14, [x10, x11]\n"
- "ldr q15, [x9, x11]\n"
- "mov v24.16b, v23.16b\n"
- "mov v25.16b, v23.16b\n"
- "ldp x28, x27, [x21], #0x10\n"
- "ldr q16, [x28, x11]\n"
- "mov v26.16b, v23.16b\n"
- "mov v27.16b, v23.16b\n"
- "ldr q17, [x27, x11]\n"
- "ldp x26, x25, [x21], #0x10\n"
- "mov v28.16b, v23.16b\n"
- "mov v29.16b, v23.16b\n"
- "ldr q18, [x26, x11]\n"
- "ldr q19, [x25, x11]\n"
- "mov v30.16b, v23.16b\n"
- "mov v31.16b, v23.16b\n"
- "ldp x24, x23, [x21], #0x10\n"
- "ldr q20, [x24, x11]\n"
+ "mov v24.16b, v25.16b\n"
+ "ldr q23, [%x[params], #0x0]\n"
+ "mov x20, %x[inptrs]\n"
+ "mov v22.16b, v25.16b\n"
+ "ldp x9, x28, [x20], #0x10\n"
+ "subs x19, %x[n_points], #0x1\n"
+ "mov v21.16b, v25.16b\n"
+ "ldr q2, [x9, x11]\n"
+ "mov v20.16b, v25.16b\n"
"add %x[params], %x[params], #0x10\n"
- "ldr q21, [x23, x11]\n"
- "ldr x22, [x21], #0x8\n"
- "ldr q22, [x22, x11]\n"
+ "mov v19.16b, v25.16b\n"
+ "ldr q1, [x28, x11]\n"
+ "mov v18.16b, v25.16b\n"
+ "ldp x27, x26, [x20], #0x10\n"
+ "mov v17.16b, v25.16b\n"
+ "ldr q0, [x27, x11]\n"
+ "mov v16.16b, v25.16b\n"
+ "ldr q31, [x26, x11]\n"
+ "ldp x25, x24, [x20], #0x10\n"
+ "ldr q30, [x25, x11]\n"
+ "ldr q29, [x24, x11]\n"
+ "ldp x23, x22, [x20], #0x10\n"
+ "ldr q28, [x23, x11]\n"
+ "ldr q27, [x22, x11]\n"
+ "ldr x21, [x20], #0x8\n"
+ "ldr q26, [x21, x11]\n"
"ble 4f\n"
"3:" // Channel loop: Planar loop
- "ldp x10, x9, [x21], #0x10\n"
- "ldp x28, x27, [x21], #0x10\n"
- "subs x20, x20, #0x1\n"
- "fmla v23.4s, v14.4s, v0.4s\n"
- "ldr q14, [x10, x11]\n"
- "ldp x26, x25, [x21], #0x10\n"
- "fmla v24.4s, v15.4s, v0.4s\n"
- "fmla v25.4s, v16.4s, v0.4s\n"
- "ldr q15, [x9, x11]\n"
- "ldr q16, [x28, x11]\n"
- "fmla v26.4s, v17.4s, v0.4s\n"
- "fmla v27.4s, v18.4s, v0.4s\n"
- "ldr q17, [x27, x11]\n"
- "ldr q18, [x26, x11]\n"
- "fmla v28.4s, v19.4s, v0.4s\n"
- "fmla v29.4s, v20.4s, v0.4s\n"
- "ldr q19, [x25, x11]\n"
- "ldp x24, x23, [x21], #0x10\n"
- "fmla v30.4s, v21.4s, v0.4s\n"
- "fmla v31.4s, v22.4s, v0.4s\n"
- "ldr q0, [%x[params], #0x0]\n"
- "ldr q20, [x24, x11]\n"
+ "fmla v25.4s, v2.4s, v23.4s\n"
+ "ldp x9, x28, [x20], #0x10\n"
+ "subs x19, x19, #0x1\n"
+ "fmla v24.4s, v1.4s, v23.4s\n"
+ "ldr q2, [x9, x11]\n"
+ "fmla v22.4s, v0.4s, v23.4s\n"
+ "fmla v21.4s, v31.4s, v23.4s\n"
+ "ldr q1, [x28, x11]\n"
+ "fmla v20.4s, v30.4s, v23.4s\n"
+ "ldp x27, x26, [x20], #0x10\n"
+ "fmla v19.4s, v29.4s, v23.4s\n"
+ "fmla v18.4s, v28.4s, v23.4s\n"
+ "ldr q0, [x27, x11]\n"
+ "fmla v17.4s, v27.4s, v23.4s\n"
+ "fmla v16.4s, v26.4s, v23.4s\n"
+ "ldr q23, [%x[params], #0x0]\n"
"add %x[params], %x[params], #0x10\n"
- "ldr q21, [x23, x11]\n"
- "ldr x22, [x21], #0x8\n"
- "ldr q22, [x22, x11]\n"
+ "ldr q31, [x26, x11]\n"
+ "ldp x25, x24, [x20], #0x10\n"
+ "ldr q30, [x25, x11]\n"
+ "ldr q29, [x24, x11]\n"
+ "ldp x23, x22, [x20], #0x10\n"
+ "ldr q28, [x23, x11]\n"
+ "ldr q27, [x22, x11]\n"
+ "ldr x21, [x20], #0x8\n"
+ "ldr q26, [x21, x11]\n"
"bgt 3b\n"
"4:" // Channel loop: Planar tail
- "fmla v23.4s, v14.4s, v0.4s\n"
- "fmla v24.4s, v15.4s, v0.4s\n"
- "fmax v23.4s, v23.4s, v2.4s\n"
- "ldp x28, x27, [%x[outptrs], #0x0]\n"
- "fmla v25.4s, v16.4s, v0.4s\n"
- "fmla v26.4s, v17.4s, v0.4s\n"
- "fmax v24.4s, v24.4s, v2.4s\n"
- "ldp x26, x25, [%x[outptrs], #0x10]\n"
- "fmla v27.4s, v18.4s, v0.4s\n"
- "fmla v28.4s, v19.4s, v0.4s\n"
- "fmax v25.4s, v25.4s, v2.4s\n"
- "ldp x24, x23, [%x[outptrs], #0x20]\n"
- "fmla v29.4s, v20.4s, v0.4s\n"
- "fmla v30.4s, v21.4s, v0.4s\n"
- "fmax v26.4s, v26.4s, v2.4s\n"
- "ldp x22, x21, [%x[outptrs], #0x30]\n"
- "fmla v31.4s, v22.4s, v0.4s\n"
- "fmax v27.4s, v27.4s, v2.4s\n"
- "ldr x20, [%x[outptrs], #0x40]\n"
- "fmax v28.4s, v28.4s, v2.4s\n"
- "fmax v29.4s, v29.4s, v2.4s\n"
- "fmax v30.4s, v30.4s, v2.4s\n"
- "fmax v31.4s, v31.4s, v2.4s\n"
- "fmin v23.4s, v23.4s, v1.4s\n"
- "fmin v24.4s, v24.4s, v1.4s\n"
- "str q23, [x28, x11]\n"
- "fmin v25.4s, v25.4s, v1.4s\n"
- "fmin v26.4s, v26.4s, v1.4s\n"
- "str q24, [x27, x11]\n"
- "fmin v27.4s, v27.4s, v1.4s\n"
- "fmin v28.4s, v28.4s, v1.4s\n"
- "str q25, [x26, x11]\n"
- "fmin v29.4s, v29.4s, v1.4s\n"
- "fmin v30.4s, v30.4s, v1.4s\n"
- "str q26, [x25, x11]\n"
- "fmin v31.4s, v31.4s, v1.4s\n"
- "str q27, [x24, x11]\n"
- "str q28, [x23, x11]\n"
- "str q29, [x22, x11]\n"
- "str q30, [x21, x11]\n"
- "str q31, [x20, x11]\n"
+ "fmla v25.4s, v2.4s, v23.4s\n"
+ "ldp x27, x26, [%x[outptrs], #0x0]\n"
+ "fmla v24.4s, v1.4s, v23.4s\n"
+ "ldp x25, x24, [%x[outptrs], #0x10]\n"
+ "fmla v22.4s, v0.4s, v23.4s\n"
+ "ldp x23, x22, [%x[outptrs], #0x20]\n"
+ "fmla v21.4s, v31.4s, v23.4s\n"
+ "ldp x21, x20, [%x[outptrs], #0x30]\n"
+ "fmla v20.4s, v30.4s, v23.4s\n"
+ "ldr x19, [%x[outptrs], #0x40]\n"
+ "fmla v19.4s, v29.4s, v23.4s\n"
+ "fmla v18.4s, v28.4s, v23.4s\n"
+ "fmla v17.4s, v27.4s, v23.4s\n"
+ "fmla v16.4s, v26.4s, v23.4s\n"
+ "fmax v25.4s, v25.4s, v4.4s\n"
+ "fmax v24.4s, v24.4s, v4.4s\n"
+ "fmax v22.4s, v22.4s, v4.4s\n"
+ "fmin v25.4s, v25.4s, v3.4s\n"
+ "str q25, [x27, x11]\n"
+ "fmin v24.4s, v24.4s, v3.4s\n"
+ "fmin v22.4s, v22.4s, v3.4s\n"
+ "str q24, [x26, x11]\n"
+ "fmax v21.4s, v21.4s, v4.4s\n"
+ "fmax v20.4s, v20.4s, v4.4s\n"
+ "str q22, [x25, x11]\n"
+ "fmax v19.4s, v19.4s, v4.4s\n"
+ "fmax v18.4s, v18.4s, v4.4s\n"
+ "fmin v21.4s, v21.4s, v3.4s\n"
+ "str q21, [x24, x11]\n"
+ "fmin v20.4s, v20.4s, v3.4s\n"
+ "fmin v19.4s, v19.4s, v3.4s\n"
+ "str q20, [x23, x11]\n"
+ "fmin v18.4s, v18.4s, v3.4s\n"
+ "fmax v17.4s, v17.4s, v4.4s\n"
+ "str q19, [x22, x11]\n"
+ "fmax v16.4s, v16.4s, v4.4s\n"
+ "str q18, [x21, x11]\n"
+ "fmin v17.4s, v17.4s, v3.4s\n"
+ "fmin v16.4s, v16.4s, v3.4s\n"
+ "str q17, [x20, x11]\n"
+ "str q16, [x19, x11]\n"
"add x11, x11, #0x10\n"
- "cmp x11, x12, LSL #4\n"
+ "cmp x11, x10, LSL #4\n"
"blt 1b\n"
"5:" // Oddments
"tst %x[n_channels], #0x3\n"
"beq 17f\n"
- "movi v23.16b, #0x0\n"
+ "movi v25.16b, #0x0\n"
"cbz %x[bias], 8f\n"
- "add x20, %x[bias], x11\n"
+ "add x19, %x[bias], x11\n"
"tbz %x[n_channels], #1, 6f\n"
- "ld1 { v23.d }[0], [x20], #0x8\n"
+ "ld1 { v25.d }[0], [x19], #0x8\n"
"tbz %x[n_channels], #0, 7f\n"
- "ld1 { v23.s }[2], [x20], #0x4\n"
+ "ld1 { v25.s }[2], [x19], #0x4\n"
"b 7f\n"
"6:" // Oddments: Load bias: Bit 1: Unset
- "ld1 { v23.s }[0], [x20], #0x4\n"
+ "tbz %x[n_channels], #0, 7f\n"
+ "ld1 { v25.s }[0], [x19], #0x4\n"
"7:" // Oddments: Load bias: Bit 1: End
+
"8:" // Oddments: Load bias: Done
- "ldr q0, [%x[params], #0x0]\n"
- "mov x21, %x[inptrs]\n"
- "ldp x10, x9, [x21], #0x10\n"
- "mov v24.16b, v23.16b\n"
- "ldp x28, x27, [x21], #0x10\n"
- "ldp x26, x25, [x21], #0x10\n"
- "mov v25.16b, v23.16b\n"
- "mov v26.16b, v23.16b\n"
- "ldp x24, x23, [x21], #0x10\n"
- "ldr x22, [x21], #0x8\n"
- "mov v27.16b, v23.16b\n"
- "mov v28.16b, v23.16b\n"
- "mov v29.16b, v23.16b\n"
- "mov v30.16b, v23.16b\n"
- "add x10, x10, x11\n"
+ "mov v24.16b, v25.16b\n"
+ "ldr q23, [%x[params], #0x0]\n"
+ "mov x20, %x[inptrs]\n"
+ "mov v22.16b, v25.16b\n"
+ "ldp x9, x28, [x20], #0x10\n"
+ "add %x[params], %x[params], #0x10\n"
+ "mov v21.16b, v25.16b\n"
+ "ldp x27, x26, [x20], #0x10\n"
+ "mov v20.16b, v25.16b\n"
"add x9, x9, x11\n"
- "mov v31.16b, v23.16b\n"
+ "mov v19.16b, v25.16b\n"
+ "ldp x25, x24, [x20], #0x10\n"
+ "mov v18.16b, v25.16b\n"
"add x28, x28, x11\n"
+ "mov v17.16b, v25.16b\n"
+ "ldp x23, x22, [x20], #0x10\n"
+ "mov v16.16b, v25.16b\n"
"add x27, x27, x11\n"
+ "ldr x21, [x20], #0x8\n"
"add x26, x26, x11\n"
"add x25, x25, x11\n"
"add x24, x24, x11\n"
"add x23, x23, x11\n"
"add x22, x22, x11\n"
- "add %x[params], %x[params], #0x10\n"
+ "add x21, x21, x11\n"
"tbz %x[n_channels], #1, 9f\n"
- "ldr d14, [x10], #0x8\n"
- "ldr d15, [x9], #0x8\n"
- "ldr d16, [x28], #0x8\n"
- "ldr d17, [x27], #0x8\n"
- "ldr d18, [x26], #0x8\n"
- "ldr d19, [x25], #0x8\n"
- "ldr d20, [x24], #0x8\n"
- "ldr d21, [x23], #0x8\n"
- "ldr d22, [x22], #0x8\n"
+ "ldr d2, [x9], #0x8\n"
+ "ldr d1, [x28], #0x8\n"
+ "ldr d0, [x27], #0x8\n"
+ "ldr d31, [x26], #0x8\n"
+ "ldr d30, [x25], #0x8\n"
+ "ldr d29, [x24], #0x8\n"
+ "ldr d28, [x23], #0x8\n"
+ "ldr d27, [x22], #0x8\n"
+ "ldr d26, [x21], #0x8\n"
"tbz %x[n_channels], #0, 10f\n"
- "ld1 { v14.s }[2], [x10], #0x4\n"
- "ld1 { v15.s }[2], [x9], #0x4\n"
- "ld1 { v16.s }[2], [x28], #0x4\n"
- "ld1 { v17.s }[2], [x27], #0x4\n"
- "ld1 { v18.s }[2], [x26], #0x4\n"
- "ld1 { v19.s }[2], [x25], #0x4\n"
- "ld1 { v20.s }[2], [x24], #0x4\n"
- "ld1 { v21.s }[2], [x23], #0x4\n"
- "ld1 { v22.s }[2], [x22], #0x4\n"
+ "ld1 { v2.s }[2], [x9], #0x4\n"
+ "ld1 { v1.s }[2], [x28], #0x4\n"
+ "ld1 { v0.s }[2], [x27], #0x4\n"
+ "ld1 { v31.s }[2], [x26], #0x4\n"
+ "ld1 { v30.s }[2], [x25], #0x4\n"
+ "ld1 { v29.s }[2], [x24], #0x4\n"
+ "ld1 { v28.s }[2], [x23], #0x4\n"
+ "ld1 { v27.s }[2], [x22], #0x4\n"
+ "ld1 { v26.s }[2], [x21], #0x4\n"
"b 10f\n"
"9:" // Oddments: Load: Bit 1: Unset
- "ldr s14, [x10], #0x4\n"
- "ldr s15, [x9], #0x4\n"
- "ldr s16, [x28], #0x4\n"
- "ldr s17, [x27], #0x4\n"
- "ldr s18, [x26], #0x4\n"
- "ldr s19, [x25], #0x4\n"
- "ldr s20, [x24], #0x4\n"
- "ldr s21, [x23], #0x4\n"
- "ldr s22, [x22], #0x4\n"
+ "tbz %x[n_channels], #0, 10f\n"
+ "ldr s2, [x9], #0x4\n"
+ "ldr s1, [x28], #0x4\n"
+ "ldr s0, [x27], #0x4\n"
+ "ldr s31, [x26], #0x4\n"
+ "ldr s30, [x25], #0x4\n"
+ "ldr s29, [x24], #0x4\n"
+ "ldr s28, [x23], #0x4\n"
+ "ldr s27, [x22], #0x4\n"
+ "ldr s26, [x21], #0x4\n"
"10:" // Oddments: Load: Bit 1: End
- "subs x20, %x[n_points], #0x1\n"
+ "subs x19, %x[n_points], #0x1\n"
"ble 14f\n"
"11:" // Oddments: Planar loop
- "ldp x10, x9, [x21], #0x10\n"
- "ldp x28, x27, [x21], #0x10\n"
- "fmla v23.4s, v14.4s, v0.4s\n"
- "fmla v24.4s, v15.4s, v0.4s\n"
- "ldp x26, x25, [x21], #0x10\n"
- "ldp x24, x23, [x21], #0x10\n"
- "fmla v25.4s, v16.4s, v0.4s\n"
- "fmla v26.4s, v17.4s, v0.4s\n"
- "ldr x22, [x21], #0x8\n"
- "fmla v27.4s, v18.4s, v0.4s\n"
- "fmla v28.4s, v19.4s, v0.4s\n"
- "add x10, x10, x11\n"
- "fmla v29.4s, v20.4s, v0.4s\n"
- "fmla v30.4s, v21.4s, v0.4s\n"
+ "fmla v25.4s, v2.4s, v23.4s\n"
+ "ldp x9, x28, [x20], #0x10\n"
"add x9, x9, x11\n"
+ "fmla v24.4s, v1.4s, v23.4s\n"
+ "ldp x27, x26, [x20], #0x10\n"
+ "fmla v22.4s, v0.4s, v23.4s\n"
+ "ldp x25, x24, [x20], #0x10\n"
+ "fmla v21.4s, v31.4s, v23.4s\n"
"add x28, x28, x11\n"
- "fmla v31.4s, v22.4s, v0.4s\n"
- "ldr q0, [%x[params], #0x0]\n"
+ "fmla v20.4s, v30.4s, v23.4s\n"
+ "ldp x23, x22, [x20], #0x10\n"
+ "fmla v19.4s, v29.4s, v23.4s\n"
"add x27, x27, x11\n"
+ "fmla v18.4s, v28.4s, v23.4s\n"
+ "ldr x21, [x20], #0x8\n"
+ "fmla v17.4s, v27.4s, v23.4s\n"
"add x26, x26, x11\n"
+ "fmla v16.4s, v26.4s, v23.4s\n"
+ "ldr q23, [%x[params], #0x0]\n"
"add x25, x25, x11\n"
"add x24, x24, x11\n"
"add x23, x23, x11\n"
"add x22, x22, x11\n"
+ "add x21, x21, x11\n"
"add %x[params], %x[params], #0x10\n"
"tbz %x[n_channels], #1, 12f\n"
- "ldr d14, [x10], #0x8\n"
- "ldr d15, [x9], #0x8\n"
- "ldr d16, [x28], #0x8\n"
- "ldr d17, [x27], #0x8\n"
- "ldr d18, [x26], #0x8\n"
- "ldr d19, [x25], #0x8\n"
- "ldr d20, [x24], #0x8\n"
- "ldr d21, [x23], #0x8\n"
- "ldr d22, [x22], #0x8\n"
+ "ldr d2, [x9], #0x8\n"
+ "ldr d1, [x28], #0x8\n"
+ "ldr d0, [x27], #0x8\n"
+ "ldr d31, [x26], #0x8\n"
+ "ldr d30, [x25], #0x8\n"
+ "ldr d29, [x24], #0x8\n"
+ "ldr d28, [x23], #0x8\n"
+ "ldr d27, [x22], #0x8\n"
+ "ldr d26, [x21], #0x8\n"
"tbz %x[n_channels], #0, 13f\n"
- "ld1 { v14.s }[2], [x10], #0x4\n"
- "ld1 { v15.s }[2], [x9], #0x4\n"
- "ld1 { v16.s }[2], [x28], #0x4\n"
- "ld1 { v17.s }[2], [x27], #0x4\n"
- "ld1 { v18.s }[2], [x26], #0x4\n"
- "ld1 { v19.s }[2], [x25], #0x4\n"
- "ld1 { v20.s }[2], [x24], #0x4\n"
- "ld1 { v21.s }[2], [x23], #0x4\n"
- "ld1 { v22.s }[2], [x22], #0x4\n"
+ "ld1 { v2.s }[2], [x9], #0x4\n"
+ "ld1 { v1.s }[2], [x28], #0x4\n"
+ "ld1 { v0.s }[2], [x27], #0x4\n"
+ "ld1 { v31.s }[2], [x26], #0x4\n"
+ "ld1 { v30.s }[2], [x25], #0x4\n"
+ "ld1 { v29.s }[2], [x24], #0x4\n"
+ "ld1 { v28.s }[2], [x23], #0x4\n"
+ "ld1 { v27.s }[2], [x22], #0x4\n"
+ "ld1 { v26.s }[2], [x21], #0x4\n"
"b 13f\n"
"12:" // Oddments: Planar loop: Load: Bit 1: Unset
- "ldr s14, [x10], #0x4\n"
- "ldr s15, [x9], #0x4\n"
- "ldr s16, [x28], #0x4\n"
- "ldr s17, [x27], #0x4\n"
- "ldr s18, [x26], #0x4\n"
- "ldr s19, [x25], #0x4\n"
- "ldr s20, [x24], #0x4\n"
- "ldr s21, [x23], #0x4\n"
- "ldr s22, [x22], #0x4\n"
+ "tbz %x[n_channels], #0, 13f\n"
+ "ldr s2, [x9], #0x4\n"
+ "ldr s1, [x28], #0x4\n"
+ "ldr s0, [x27], #0x4\n"
+ "ldr s31, [x26], #0x4\n"
+ "ldr s30, [x25], #0x4\n"
+ "ldr s29, [x24], #0x4\n"
+ "ldr s28, [x23], #0x4\n"
+ "ldr s27, [x22], #0x4\n"
+ "ldr s26, [x21], #0x4\n"
"13:" // Oddments: Planar loop: Load: Bit 1: End
- "subs x20, x20, #0x1\n"
+ "subs x19, x19, #0x1\n"
"bgt 11b\n"
"14:" // Oddments: Planar tail
- "fmla v23.4s, v14.4s, v0.4s\n"
- "fmla v24.4s, v15.4s, v0.4s\n"
- "fmax v23.4s, v23.4s, v2.4s\n"
- "ldp x28, x27, [%x[outptrs], #0x0]\n"
- "fmla v25.4s, v16.4s, v0.4s\n"
- "fmla v26.4s, v17.4s, v0.4s\n"
- "fmax v24.4s, v24.4s, v2.4s\n"
- "ldp x26, x25, [%x[outptrs], #0x10]\n"
- "fmla v27.4s, v18.4s, v0.4s\n"
- "fmla v28.4s, v19.4s, v0.4s\n"
- "fmax v25.4s, v25.4s, v2.4s\n"
- "ldp x24, x23, [%x[outptrs], #0x20]\n"
- "fmla v29.4s, v20.4s, v0.4s\n"
- "fmla v30.4s, v21.4s, v0.4s\n"
- "fmax v26.4s, v26.4s, v2.4s\n"
- "ldp x22, x21, [%x[outptrs], #0x30]\n"
- "fmla v31.4s, v22.4s, v0.4s\n"
- "fmax v27.4s, v27.4s, v2.4s\n"
- "ldr x20, [%x[outptrs], #0x40]\n"
- "add x28, x28, x11\n"
- "fmax v28.4s, v28.4s, v2.4s\n"
- "fmax v29.4s, v29.4s, v2.4s\n"
+ "fmla v25.4s, v2.4s, v23.4s\n"
+ "ldp x27, x26, [%x[outptrs], #0x0]\n"
"add x27, x27, x11\n"
+ "fmla v24.4s, v1.4s, v23.4s\n"
+ "ldp x25, x24, [%x[outptrs], #0x10]\n"
+ "fmla v22.4s, v0.4s, v23.4s\n"
+ "ldp x23, x22, [%x[outptrs], #0x20]\n"
"add x26, x26, x11\n"
- "fmax v30.4s, v30.4s, v2.4s\n"
- "fmax v31.4s, v31.4s, v2.4s\n"
+ "fmla v21.4s, v31.4s, v23.4s\n"
+ "ldp x21, x20, [%x[outptrs], #0x30]\n"
+ "fmla v20.4s, v30.4s, v23.4s\n"
+ "ldr x19, [%x[outptrs], #0x40]\n"
"add x25, x25, x11\n"
+ "fmla v19.4s, v29.4s, v23.4s\n"
"add x24, x24, x11\n"
- "fmin v23.4s, v23.4s, v1.4s\n"
- "fmin v24.4s, v24.4s, v1.4s\n"
+ "fmla v18.4s, v28.4s, v23.4s\n"
"add x23, x23, x11\n"
+ "fmla v17.4s, v27.4s, v23.4s\n"
"add x22, x22, x11\n"
- "fmin v25.4s, v25.4s, v1.4s\n"
- "fmin v26.4s, v26.4s, v1.4s\n"
+ "fmla v16.4s, v26.4s, v23.4s\n"
"add x21, x21, x11\n"
+ "fmax v25.4s, v25.4s, v4.4s\n"
"add x20, x20, x11\n"
- "fmin v27.4s, v27.4s, v1.4s\n"
- "fmin v28.4s, v28.4s, v1.4s\n"
- "fmin v29.4s, v29.4s, v1.4s\n"
- "fmin v30.4s, v30.4s, v1.4s\n"
- "fmin v31.4s, v31.4s, v1.4s\n"
+ "fmax v24.4s, v24.4s, v4.4s\n"
+ "add x19, x19, x11\n"
+ "fmax v22.4s, v22.4s, v4.4s\n"
+ "fmin v25.4s, v25.4s, v3.4s\n"
+ "fmin v24.4s, v24.4s, v3.4s\n"
+ "fmin v22.4s, v22.4s, v3.4s\n"
+ "fmax v21.4s, v21.4s, v4.4s\n"
+ "fmax v20.4s, v20.4s, v4.4s\n"
+ "fmax v19.4s, v19.4s, v4.4s\n"
+ "fmin v21.4s, v21.4s, v3.4s\n"
+ "fmin v20.4s, v20.4s, v3.4s\n"
+ "fmin v19.4s, v19.4s, v3.4s\n"
+ "fmax v18.4s, v18.4s, v4.4s\n"
+ "fmax v17.4s, v17.4s, v4.4s\n"
+ "fmax v16.4s, v16.4s, v4.4s\n"
+ "fmin v18.4s, v18.4s, v3.4s\n"
+ "fmin v17.4s, v17.4s, v3.4s\n"
+ "fmin v16.4s, v16.4s, v3.4s\n"
"tbz %x[n_channels], #1, 15f\n"
- "st1 { v23.d }[0], [x28], #0x8\n"
- "st1 { v24.d }[0], [x27], #0x8\n"
- "st1 { v25.d }[0], [x26], #0x8\n"
- "st1 { v26.d }[0], [x25], #0x8\n"
- "st1 { v27.d }[0], [x24], #0x8\n"
- "st1 { v28.d }[0], [x23], #0x8\n"
- "st1 { v29.d }[0], [x22], #0x8\n"
- "st1 { v30.d }[0], [x21], #0x8\n"
- "st1 { v31.d }[0], [x20], #0x8\n"
+ "st1 { v25.d }[0], [x27], #0x8\n"
+ "st1 { v24.d }[0], [x26], #0x8\n"
+ "st1 { v22.d }[0], [x25], #0x8\n"
+ "st1 { v21.d }[0], [x24], #0x8\n"
+ "st1 { v20.d }[0], [x23], #0x8\n"
+ "st1 { v19.d }[0], [x22], #0x8\n"
+ "st1 { v18.d }[0], [x21], #0x8\n"
+ "st1 { v17.d }[0], [x20], #0x8\n"
+ "st1 { v16.d }[0], [x19], #0x8\n"
"tbz %x[n_channels], #0, 16f\n"
- "st1 { v23.s }[2], [x28], #0x4\n"
- "st1 { v24.s }[2], [x27], #0x4\n"
- "st1 { v25.s }[2], [x26], #0x4\n"
- "st1 { v26.s }[2], [x25], #0x4\n"
- "st1 { v27.s }[2], [x24], #0x4\n"
- "st1 { v28.s }[2], [x23], #0x4\n"
- "st1 { v29.s }[2], [x22], #0x4\n"
- "st1 { v30.s }[2], [x21], #0x4\n"
- "st1 { v31.s }[2], [x20], #0x4\n"
+ "st1 { v25.s }[2], [x27], #0x4\n"
+ "st1 { v24.s }[2], [x26], #0x4\n"
+ "st1 { v22.s }[2], [x25], #0x4\n"
+ "st1 { v21.s }[2], [x24], #0x4\n"
+ "st1 { v20.s }[2], [x23], #0x4\n"
+ "st1 { v19.s }[2], [x22], #0x4\n"
+ "st1 { v18.s }[2], [x21], #0x4\n"
+ "st1 { v17.s }[2], [x20], #0x4\n"
+ "st1 { v16.s }[2], [x19], #0x4\n"
"b 16f\n"
"15:" // Oddments: Store: Bit 1: Unset
- "st1 { v23.s }[0], [x28], #0x4\n"
- "st1 { v24.s }[0], [x27], #0x4\n"
- "st1 { v25.s }[0], [x26], #0x4\n"
- "st1 { v26.s }[0], [x25], #0x4\n"
- "st1 { v27.s }[0], [x24], #0x4\n"
- "st1 { v28.s }[0], [x23], #0x4\n"
- "st1 { v29.s }[0], [x22], #0x4\n"
- "st1 { v30.s }[0], [x21], #0x4\n"
- "st1 { v31.s }[0], [x20], #0x4\n"
+ "tbz %x[n_channels], #0, 16f\n"
+ "st1 { v25.s }[0], [x27], #0x4\n"
+ "st1 { v24.s }[0], [x26], #0x4\n"
+ "st1 { v22.s }[0], [x25], #0x4\n"
+ "st1 { v21.s }[0], [x24], #0x4\n"
+ "st1 { v20.s }[0], [x23], #0x4\n"
+ "st1 { v19.s }[0], [x22], #0x4\n"
+ "st1 { v18.s }[0], [x21], #0x4\n"
+ "st1 { v17.s }[0], [x20], #0x4\n"
+ "st1 { v16.s }[0], [x19], #0x4\n"
"16:" // Oddments: Store: Bit 1: End
+
"17:" // End
+
: [params] "+&r" (params)
: [bias] "r" (bias), [inptrs] "r" (inptrs), [minmax_vals] "r" (minmax_vals), [n_channels] "r" ((uint64_t) n_channels), [n_points] "r" ((uint64_t) n_points), [outptrs] "r" (outptrs)
- : "cc", "memory", "v0", "v1", "v2", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_packed_to_nhwc_3x3_s2_with_multiplier_output3x3_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_packed_to_nhwc_3x3_s2_with_multiplier_output3x3_mla_depthfirst/generic.cpp
index 69b3865a65..04a7abd3bd 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_packed_to_nhwc_3x3_s2_with_multiplier_output3x3_mla_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_packed_to_nhwc_3x3_s2_with_multiplier_output3x3_mla_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -41,59 +41,59 @@ void a64_fp32_packed_to_nhwc_3x3_s2_with_multiplier_output3x3_mla_depthfirst_imp
const float minmax_vals[2] = { activation_min, activation_max };
__asm__ __volatile__(
- "ld1r { v24.4s }, [%x[clamps]]\n"
+ "ldp x14, x13, [%x[outptrs], #0x0]\n"
+ "add x12, %x[clamps], #0x4\n"
+ "ldp x11, x10, [%x[outptrs], #0x10]\n"
+ "mov x9, #0x0\n"
+ "ldp x28, x27, [%x[outptrs], #0x20]\n"
+ "mov x26, #0x0\n"
+ "ldp x25, x24, [%x[outptrs], #0x30]\n"
+ "lsr x23, %x[channel_multiplier], #0x2\n"
+ "ldr x22, [%x[outptrs], #0x40]\n"
"ldr x21, [%x[inptrs], #0x0]\n"
- "lsr x22, %x[channel_multiplier], #0x2\n"
- "add x20, %x[clamps], #0x4\n"
+ "ldr x20, [%x[inptrs], #0x8]\n"
+ "ldr x19, [%x[inptrs], #0x10]\n"
"ldr q0, [x21, #0x0]\n"
"ldr q1, [x21, #0x10]\n"
- "mov x21, #0x0\n"
- "mov x14, #0x0\n"
- "ld1r { v23.4s }, [x20]\n"
- "ldr x20, [%x[inptrs], #0x8]\n"
"ldr q2, [x20, #0x0]\n"
"ldr q3, [x20, #0x10]\n"
- "ldr x20, [%x[inptrs], #0x10]\n"
- "ldr q4, [x20, #0x0]\n"
- "ldr q5, [x20, #0x10]\n"
- "ldr x20, [%x[inptrs], #0x18]\n"
- "ldr q6, [x20, #0x0]\n"
- "ldr q7, [x20, #0x10]\n"
+ "ldr q4, [x19, #0x0]\n"
+ "ldr q5, [x19, #0x10]\n"
+ "ldr x21, [%x[inptrs], #0x18]\n"
"ldr x20, [%x[inptrs], #0x20]\n"
+ "ldr x19, [%x[inptrs], #0x28]\n"
+ "ldr q6, [x21, #0x0]\n"
+ "ldr q7, [x21, #0x10]\n"
"ldr q8, [x20, #0x0]\n"
"ldr q9, [x20, #0x10]\n"
- "ldr x20, [%x[inptrs], #0x28]\n"
- "ldr q10, [x20, #0x0]\n"
- "ldr q11, [x20, #0x10]\n"
- "ldr x20, [%x[inptrs], #0x30]\n"
- "ldr q12, [x20, #0x0]\n"
- "ldr q13, [x20, #0x10]\n"
- "ldp x13, x12, [%x[outptrs], #0x0]\n"
- "ldp x11, x10, [%x[outptrs], #0x10]\n"
- "ldp x9, x28, [%x[outptrs], #0x20]\n"
- "ldp x27, x26, [%x[outptrs], #0x30]\n"
- "ldr x25, [%x[outptrs], #0x40]\n"
- "cbz x22, 3f\n"
+ "ldr q10, [x19, #0x0]\n"
+ "ldr q11, [x19, #0x10]\n"
+ "ldr x19, [%x[inptrs], #0x30]\n"
+ "ld1r { v24.4s }, [%x[clamps]]\n"
+ "ld1r { v23.4s }, [x12]\n"
+ "ldr q12, [x19, #0x0]\n"
+ "ldr q13, [x19, #0x10]\n"
+ "cbz x23, 3f\n"
"ldr q14, [%x[params], #0x0]\n"
- "ldr q31, [%x[params], #0x10]\n"
- "subs x22, x22, #0x1\n"
"mov v15.16b, v14.16b\n"
- "ldr q30, [%x[params], #0x20]\n"
- "ldr q29, [%x[params], #0x30]\n"
+ "ldr q31, [%x[params], #0x10]\n"
+ "subs x23, x23, #0x1\n"
"mov v16.16b, v14.16b\n"
+ "ldr q30, [%x[params], #0x20]\n"
"mov v17.16b, v14.16b\n"
+ "ldr q29, [%x[params], #0x30]\n"
+ "add %x[params], %x[params], #0x40\n"
"mov v18.16b, v14.16b\n"
"mov v19.16b, v14.16b\n"
- "add %x[params], %x[params], #0x40\n"
"mov v20.16b, v14.16b\n"
"mov v21.16b, v14.16b\n"
"mov v22.16b, v14.16b\n"
"beq 2f\n"
"1:" // Output channel complete vector loop
"fmla v14.4s, v31.4s, v0.s[0]\n"
+ "add x9, x9, #0x4\n"
"fmla v15.4s, v31.4s, v0.s[2]\n"
- "subs x22, x22, #0x1\n"
- "add x21, x21, #0x4\n"
+ "subs x23, x23, #0x1\n"
"fmla v16.4s, v31.4s, v1.s[0]\n"
"fmla v17.4s, v31.4s, v4.s[0]\n"
"fmla v18.4s, v31.4s, v4.s[2]\n"
@@ -174,51 +174,51 @@ void a64_fp32_packed_to_nhwc_3x3_s2_with_multiplier_output3x3_mla_depthfirst_imp
"ldr q30, [%x[params], #0x80]\n"
"fmla v14.4s, v29.4s, v4.s[2]\n"
"fmla v15.4s, v29.4s, v5.s[0]\n"
- "fmin v14.4s, v14.4s, v23.4s\n"
"fmla v16.4s, v29.4s, v5.s[2]\n"
"fmla v17.4s, v29.4s, v8.s[2]\n"
- "fmax v14.4s, v14.4s, v24.4s\n"
- "str q14, [x13, x14]\n"
- "ldr q14, [%x[params], #0x60]\n"
"fmla v18.4s, v29.4s, v9.s[0]\n"
"fmla v19.4s, v29.4s, v9.s[2]\n"
- "fmin v15.4s, v15.4s, v23.4s\n"
"fmla v20.4s, v29.4s, v12.s[2]\n"
"fmla v21.4s, v29.4s, v13.s[0]\n"
- "fmin v16.4s, v16.4s, v23.4s\n"
"fmla v22.4s, v29.4s, v13.s[2]\n"
"ldr q29, [%x[params], #0x90]\n"
- "fmin v17.4s, v17.4s, v23.4s\n"
+ "fmin v14.4s, v14.4s, v23.4s\n"
+ "fmin v15.4s, v15.4s, v23.4s\n"
+ "fmin v16.4s, v16.4s, v23.4s\n"
+ "fmax v14.4s, v14.4s, v24.4s\n"
+ "str q14, [x14, x26]\n"
+ "fmax v15.4s, v15.4s, v24.4s\n"
+ "fmax v16.4s, v16.4s, v24.4s\n"
+ "ldr q14, [%x[params], #0x60]\n"
"add %x[params], %x[params], #0xa0\n"
+ "fmin v17.4s, v17.4s, v23.4s\n"
+ "str q15, [x13, x26]\n"
"fmin v18.4s, v18.4s, v23.4s\n"
"fmin v19.4s, v19.4s, v23.4s\n"
+ "str q16, [x11, x26]\n"
"fmin v20.4s, v20.4s, v23.4s\n"
- "fmin v21.4s, v21.4s, v23.4s\n"
- "fmin v22.4s, v22.4s, v23.4s\n"
- "fmax v15.4s, v15.4s, v24.4s\n"
- "str q15, [x12, x14]\n"
- "fmax v16.4s, v16.4s, v24.4s\n"
"fmax v17.4s, v17.4s, v24.4s\n"
- "str q16, [x11, x14]\n"
+ "str q17, [x10, x26]\n"
"fmax v18.4s, v18.4s, v24.4s\n"
"fmax v19.4s, v19.4s, v24.4s\n"
- "str q17, [x10, x14]\n"
+ "str q18, [x28, x26]\n"
"fmax v20.4s, v20.4s, v24.4s\n"
+ "fmin v21.4s, v21.4s, v23.4s\n"
+ "str q19, [x27, x26]\n"
+ "fmin v22.4s, v22.4s, v23.4s\n"
+ "str q20, [x25, x26]\n"
"fmax v21.4s, v21.4s, v24.4s\n"
- "str q18, [x9, x14]\n"
- "fmax v22.4s, v22.4s, v24.4s\n"
- "str q19, [x28, x14]\n"
"mov v15.16b, v14.16b\n"
- "str q20, [x27, x14]\n"
+ "str q21, [x24, x26]\n"
+ "fmax v22.4s, v22.4s, v24.4s\n"
"mov v16.16b, v14.16b\n"
+ "str q22, [x22, x26]\n"
"mov v17.16b, v14.16b\n"
- "str q21, [x26, x14]\n"
+ "add x26, x26, #0x10\n"
"mov v18.16b, v14.16b\n"
"mov v19.16b, v14.16b\n"
- "str q22, [x25, x14]\n"
"mov v20.16b, v14.16b\n"
"mov v21.16b, v14.16b\n"
- "add x14, x14, #0x10\n"
"mov v22.16b, v14.16b\n"
"bgt 1b\n"
"2:" // Output channel complete vector tail
@@ -303,58 +303,58 @@ void a64_fp32_packed_to_nhwc_3x3_s2_with_multiplier_output3x3_mla_depthfirst_imp
"fmla v22.4s, v30.4s, v13.s[1]\n"
"fmla v14.4s, v29.4s, v4.s[2]\n"
"fmla v15.4s, v29.4s, v5.s[0]\n"
- "fmin v14.4s, v14.4s, v23.4s\n"
"fmla v16.4s, v29.4s, v5.s[2]\n"
"fmla v17.4s, v29.4s, v8.s[2]\n"
- "fmin v15.4s, v15.4s, v23.4s\n"
"fmla v18.4s, v29.4s, v9.s[0]\n"
"fmla v19.4s, v29.4s, v9.s[2]\n"
- "fmin v16.4s, v16.4s, v23.4s\n"
"fmla v20.4s, v29.4s, v12.s[2]\n"
"fmla v21.4s, v29.4s, v13.s[0]\n"
- "fmin v17.4s, v17.4s, v23.4s\n"
"fmla v22.4s, v29.4s, v13.s[2]\n"
- "fmin v18.4s, v18.4s, v23.4s\n"
- "fmin v19.4s, v19.4s, v23.4s\n"
- "fmin v20.4s, v20.4s, v23.4s\n"
- "fmin v21.4s, v21.4s, v23.4s\n"
- "fmin v22.4s, v22.4s, v23.4s\n"
+ "fmin v14.4s, v14.4s, v23.4s\n"
+ "fmin v15.4s, v15.4s, v23.4s\n"
+ "fmin v16.4s, v16.4s, v23.4s\n"
"fmax v14.4s, v14.4s, v24.4s\n"
+ "str q14, [x14, x26]\n"
"fmax v15.4s, v15.4s, v24.4s\n"
- "str q14, [x13, x14]\n"
"fmax v16.4s, v16.4s, v24.4s\n"
+ "str q15, [x13, x26]\n"
+ "fmin v17.4s, v17.4s, v23.4s\n"
+ "fmin v18.4s, v18.4s, v23.4s\n"
+ "str q16, [x11, x26]\n"
+ "fmin v19.4s, v19.4s, v23.4s\n"
+ "fmin v20.4s, v20.4s, v23.4s\n"
"fmax v17.4s, v17.4s, v24.4s\n"
- "str q15, [x12, x14]\n"
+ "str q17, [x10, x26]\n"
"fmax v18.4s, v18.4s, v24.4s\n"
"fmax v19.4s, v19.4s, v24.4s\n"
- "str q16, [x11, x14]\n"
+ "str q18, [x28, x26]\n"
"fmax v20.4s, v20.4s, v24.4s\n"
+ "fmin v21.4s, v21.4s, v23.4s\n"
+ "str q19, [x27, x26]\n"
+ "fmin v22.4s, v22.4s, v23.4s\n"
+ "str q20, [x25, x26]\n"
"fmax v21.4s, v21.4s, v24.4s\n"
- "str q17, [x10, x14]\n"
"fmax v22.4s, v22.4s, v24.4s\n"
- "str q18, [x9, x14]\n"
- "str q19, [x28, x14]\n"
- "str q20, [x27, x14]\n"
- "str q21, [x26, x14]\n"
- "str q22, [x25, x14]\n"
- "add x14, x14, #0x10\n"
+ "str q21, [x24, x26]\n"
+ "str q22, [x22, x26]\n"
+ "add x26, x26, #0x10\n"
"3:" // Output channel oddments
"tst %x[channel_multiplier], #0x3\n"
"beq 6f\n"
"ldr q14, [%x[params], #0x0]\n"
- "ldr q31, [%x[params], #0x10]\n"
"mov v15.16b, v14.16b\n"
+ "ldr q31, [%x[params], #0x10]\n"
"mov v16.16b, v14.16b\n"
"ldr q30, [%x[params], #0x20]\n"
- "ldr q29, [%x[params], #0x30]\n"
"mov v17.16b, v14.16b\n"
+ "ldr q29, [%x[params], #0x30]\n"
"mov v18.16b, v14.16b\n"
"mov v19.16b, v14.16b\n"
"mov v20.16b, v14.16b\n"
- "fmla v15.4s, v31.4s, v0.s[2]\n"
"mov v21.16b, v14.16b\n"
"mov v22.16b, v14.16b\n"
"fmla v14.4s, v31.4s, v0.s[0]\n"
+ "fmla v15.4s, v31.4s, v0.s[2]\n"
"fmla v16.4s, v31.4s, v1.s[0]\n"
"fmla v17.4s, v31.4s, v4.s[0]\n"
"fmla v18.4s, v31.4s, v4.s[2]\n"
@@ -434,97 +434,98 @@ void a64_fp32_packed_to_nhwc_3x3_s2_with_multiplier_output3x3_mla_depthfirst_imp
"fmla v22.4s, v30.4s, v13.s[1]\n"
"fmla v14.4s, v29.4s, v4.s[2]\n"
"fmla v15.4s, v29.4s, v5.s[0]\n"
- "fmin v14.4s, v14.4s, v23.4s\n"
"fmla v16.4s, v29.4s, v5.s[2]\n"
"fmla v17.4s, v29.4s, v8.s[2]\n"
- "fmin v15.4s, v15.4s, v23.4s\n"
"fmla v18.4s, v29.4s, v9.s[0]\n"
"fmla v19.4s, v29.4s, v9.s[2]\n"
- "fmin v16.4s, v16.4s, v23.4s\n"
"fmla v20.4s, v29.4s, v12.s[2]\n"
"fmla v21.4s, v29.4s, v13.s[0]\n"
- "fmin v17.4s, v17.4s, v23.4s\n"
"fmla v22.4s, v29.4s, v13.s[2]\n"
- "fmin v18.4s, v18.4s, v23.4s\n"
- "fmin v19.4s, v19.4s, v23.4s\n"
- "fmin v20.4s, v20.4s, v23.4s\n"
- "fmin v21.4s, v21.4s, v23.4s\n"
- "fmin v22.4s, v22.4s, v23.4s\n"
+ "fmin v14.4s, v14.4s, v23.4s\n"
+ "fmin v15.4s, v15.4s, v23.4s\n"
+ "fmin v16.4s, v16.4s, v23.4s\n"
"fmax v14.4s, v14.4s, v24.4s\n"
"fmax v15.4s, v15.4s, v24.4s\n"
"fmax v16.4s, v16.4s, v24.4s\n"
+ "fmin v17.4s, v17.4s, v23.4s\n"
+ "fmin v18.4s, v18.4s, v23.4s\n"
+ "fmin v19.4s, v19.4s, v23.4s\n"
"fmax v17.4s, v17.4s, v24.4s\n"
"fmax v18.4s, v18.4s, v24.4s\n"
"fmax v19.4s, v19.4s, v24.4s\n"
+ "fmin v20.4s, v20.4s, v23.4s\n"
+ "fmin v21.4s, v21.4s, v23.4s\n"
+ "fmin v22.4s, v22.4s, v23.4s\n"
"fmax v20.4s, v20.4s, v24.4s\n"
"fmax v21.4s, v21.4s, v24.4s\n"
"fmax v22.4s, v22.4s, v24.4s\n"
"tbz %x[channel_multiplier], #1, 4f\n"
- "add x20, x13, x14\n"
- "add x22, x12, x14\n"
- "st1 { v14.d }[0], [x20]\n"
- "add x21, x11, x14\n"
- "add x20, x10, x14\n"
- "st1 { v15.d }[0], [x22]\n"
- "add x24, x9, x14\n"
- "add x23, x28, x14\n"
- "st1 { v16.d }[0], [x21]\n"
- "add x22, x27, x14\n"
- "add x21, x26, x14\n"
- "st1 { v17.d }[0], [x20]\n"
- "add x20, x25, x14\n"
- "st1 { v18.d }[0], [x24]\n"
- "add x14, x14, #0x8\n"
- "st1 { v19.d }[0], [x23]\n"
- "st1 { v20.d }[0], [x22]\n"
- "st1 { v21.d }[0], [x21]\n"
- "st1 { v22.d }[0], [x20]\n"
+ "add x19, x14, x26\n"
+ "st1 { v14.d }[0], [x19]\n"
+ "add x19, x13, x26\n"
+ "st1 { v15.d }[0], [x19]\n"
+ "add x19, x11, x26\n"
+ "st1 { v16.d }[0], [x19]\n"
+ "add x19, x10, x26\n"
+ "st1 { v17.d }[0], [x19]\n"
+ "add x19, x28, x26\n"
+ "st1 { v18.d }[0], [x19]\n"
+ "add x19, x27, x26\n"
+ "st1 { v19.d }[0], [x19]\n"
+ "add x19, x25, x26\n"
+ "st1 { v20.d }[0], [x19]\n"
+ "add x19, x24, x26\n"
+ "st1 { v21.d }[0], [x19]\n"
+ "add x19, x22, x26\n"
+ "st1 { v22.d }[0], [x19]\n"
+ "add x26, x26, #0x8\n"
"tbz %x[channel_multiplier], #0, 5f\n"
- "add x20, x13, x14\n"
- "add x22, x12, x14\n"
- "st1 { v14.s }[2], [x20]\n"
- "add x21, x11, x14\n"
- "add x20, x10, x14\n"
- "st1 { v15.s }[2], [x22]\n"
- "add x24, x9, x14\n"
- "add x23, x28, x14\n"
- "st1 { v16.s }[2], [x21]\n"
- "add x22, x27, x14\n"
- "add x21, x26, x14\n"
- "st1 { v17.s }[2], [x20]\n"
- "add x20, x25, x14\n"
- "st1 { v18.s }[2], [x24]\n"
- "st1 { v19.s }[2], [x23]\n"
- "st1 { v20.s }[2], [x22]\n"
- "st1 { v21.s }[2], [x21]\n"
- "st1 { v22.s }[2], [x20]\n"
+ "add x19, x14, x26\n"
+ "st1 { v14.s }[2], [x19]\n"
+ "add x19, x13, x26\n"
+ "st1 { v15.s }[2], [x19]\n"
+ "add x19, x11, x26\n"
+ "st1 { v16.s }[2], [x19]\n"
+ "add x19, x10, x26\n"
+ "st1 { v17.s }[2], [x19]\n"
+ "add x19, x28, x26\n"
+ "st1 { v18.s }[2], [x19]\n"
+ "add x19, x27, x26\n"
+ "st1 { v19.s }[2], [x19]\n"
+ "add x19, x25, x26\n"
+ "st1 { v20.s }[2], [x19]\n"
+ "add x19, x24, x26\n"
+ "st1 { v21.s }[2], [x19]\n"
+ "add x19, x22, x26\n"
+ "st1 { v22.s }[2], [x19]\n"
"b 5f\n"
"4:" // Output channel oddments: Store: Bit 1: Unset
- "add x20, x13, x14\n"
- "add x22, x12, x14\n"
- "st1 { v14.s }[0], [x20]\n"
- "add x21, x11, x14\n"
- "add x20, x10, x14\n"
- "st1 { v15.s }[0], [x22]\n"
- "add x24, x9, x14\n"
- "add x23, x28, x14\n"
- "st1 { v16.s }[0], [x21]\n"
- "add x22, x27, x14\n"
- "add x21, x26, x14\n"
- "st1 { v17.s }[0], [x20]\n"
- "add x20, x25, x14\n"
- "st1 { v18.s }[0], [x24]\n"
- "st1 { v19.s }[0], [x23]\n"
- "st1 { v20.s }[0], [x22]\n"
- "st1 { v21.s }[0], [x21]\n"
- "st1 { v22.s }[0], [x20]\n"
+ "tbz %x[channel_multiplier], #0, 5f\n"
+ "add x19, x14, x26\n"
+ "st1 { v14.s }[0], [x19]\n"
+ "add x19, x13, x26\n"
+ "st1 { v15.s }[0], [x19]\n"
+ "add x19, x11, x26\n"
+ "st1 { v16.s }[0], [x19]\n"
+ "add x19, x10, x26\n"
+ "st1 { v17.s }[0], [x19]\n"
+ "add x19, x28, x26\n"
+ "st1 { v18.s }[0], [x19]\n"
+ "add x19, x27, x26\n"
+ "st1 { v19.s }[0], [x19]\n"
+ "add x19, x25, x26\n"
+ "st1 { v20.s }[0], [x19]\n"
+ "add x19, x24, x26\n"
+ "st1 { v21.s }[0], [x19]\n"
+ "add x19, x22, x26\n"
+ "st1 { v22.s }[0], [x19]\n"
"5:" // Output channel oddments: Store: Bit 1: End
"6:" // End
: [params] "+&r" (params)
: [channel_multiplier] "r" (n_output_channels), [clamps] "r" (minmax_vals), [inptrs] "r" (inptrs), [outptrs] "r" (outptrs)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_packed_to_nhwc_5x5_s1_with_multiplier_output2x4_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_packed_to_nhwc_5x5_s1_with_multiplier_output2x4_mla_depthfirst/generic.cpp
index 50848cc2e8..67fc09b2ee 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_packed_to_nhwc_5x5_s1_with_multiplier_output2x4_mla_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_packed_to_nhwc_5x5_s1_with_multiplier_output2x4_mla_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -42,56 +42,56 @@ void a64_fp32_packed_to_nhwc_5x5_s1_with_multiplier_output2x4_mla_depthfirst_imp
const float minmax_vals[2] = { activation_min, activation_max };
__asm__ __volatile__(
- "ld1r { v21.4s }, [%x[clamps]]\n"
- "ldr x21, [%x[inptrs], #0x0]\n"
+ "ldp x13, x12, [%x[outptrs], #0x0]\n"
+ "add x11, %x[clamps], #0x4\n"
+ "ldp x10, x9, [%x[outptrs], #0x10]\n"
+ "mov x28, #0x0\n"
+ "ldp x27, x26, [%x[outptrs], #0x20]\n"
+ "mov x25, #0x0\n"
+ "ldp x24, x23, [%x[outptrs], #0x30]\n"
"lsr x22, %x[channel_multiplier], #0x2\n"
- "add x20, %x[clamps], #0x4\n"
+ "ldr x21, [%x[inptrs], #0x0]\n"
+ "ldr x20, [%x[inptrs], #0x8]\n"
+ "ldr x19, [%x[inptrs], #0x10]\n"
"ldr q0, [x21, #0x0]\n"
"ldr q1, [x21, #0x10]\n"
- "mov x21, #0x0\n"
- "mov x13, #0x0\n"
- "ld1r { v20.4s }, [x20]\n"
- "ldr x20, [%x[inptrs], #0x8]\n"
"ldr q2, [x20, #0x0]\n"
"ldr q3, [x20, #0x10]\n"
- "ldr x20, [%x[inptrs], #0x10]\n"
- "ldr q4, [x20, #0x0]\n"
- "ldr q5, [x20, #0x10]\n"
- "ldr x20, [%x[inptrs], #0x18]\n"
- "ldr q6, [x20, #0x0]\n"
- "ldr q7, [x20, #0x10]\n"
+ "ldr q4, [x19, #0x0]\n"
+ "ldr q5, [x19, #0x10]\n"
+ "ldr x21, [%x[inptrs], #0x18]\n"
"ldr x20, [%x[inptrs], #0x20]\n"
+ "ldr x19, [%x[inptrs], #0x28]\n"
+ "ldr q6, [x21, #0x0]\n"
+ "ldr q7, [x21, #0x10]\n"
"ldr q8, [x20, #0x0]\n"
"ldr q9, [x20, #0x10]\n"
- "ldr x20, [%x[inptrs], #0x28]\n"
- "ldr q10, [x20, #0x0]\n"
- "ldr q11, [x20, #0x10]\n"
- "ldp x12, x11, [%x[outptrs], #0x0]\n"
- "ldp x10, x9, [%x[outptrs], #0x10]\n"
- "ldp x28, x27, [%x[outptrs], #0x20]\n"
- "ldp x26, x25, [%x[outptrs], #0x30]\n"
+ "ldr q10, [x19, #0x0]\n"
+ "ldr q11, [x19, #0x10]\n"
+ "ld1r { v21.4s }, [%x[clamps]]\n"
+ "ld1r { v20.4s }, [x11]\n"
"cbz x22, 3f\n"
"ldr q12, [%x[params], #0x0]\n"
+ "mov v13.16b, v12.16b\n"
"ldr q31, [%x[params], #0x10]\n"
"subs x22, x22, #0x1\n"
- "mov v13.16b, v12.16b\n"
- "ldr q30, [%x[params], #0x20]\n"
- "ldr q29, [%x[params], #0x30]\n"
"mov v14.16b, v12.16b\n"
+ "ldr q30, [%x[params], #0x20]\n"
"mov v15.16b, v12.16b\n"
- "ldr q28, [%x[params], #0x40]\n"
- "ldr q27, [%x[params], #0x50]\n"
+ "ldr q29, [%x[params], #0x30]\n"
"mov v16.16b, v12.16b\n"
+ "ldr q28, [%x[params], #0x40]\n"
"mov v17.16b, v12.16b\n"
+ "ldr q27, [%x[params], #0x50]\n"
+ "add %x[params], %x[params], #0x60\n"
"mov v18.16b, v12.16b\n"
"mov v19.16b, v12.16b\n"
- "add %x[params], %x[params], #0x60\n"
"beq 2f\n"
"1:" // Output channel complete vector loop
"fmla v12.4s, v31.4s, v0.s[0]\n"
+ "add x28, x28, #0x4\n"
"fmla v13.4s, v31.4s, v0.s[1]\n"
"subs x22, x22, #0x1\n"
- "add x21, x21, #0x4\n"
"fmla v14.4s, v31.4s, v0.s[2]\n"
"fmla v15.4s, v31.4s, v0.s[3]\n"
"fmla v16.4s, v31.4s, v2.s[0]\n"
@@ -308,46 +308,46 @@ void a64_fp32_packed_to_nhwc_5x5_s1_with_multiplier_output2x4_mla_depthfirst_imp
"ldr q28, [%x[params], #0x180]\n"
"fmla v12.4s, v27.4s, v9.s[0]\n"
"fmla v13.4s, v27.4s, v9.s[1]\n"
- "fmin v12.4s, v12.4s, v20.4s\n"
"fmla v14.4s, v27.4s, v9.s[2]\n"
"fmla v15.4s, v27.4s, v9.s[3]\n"
- "fmax v12.4s, v12.4s, v21.4s\n"
- "str q12, [x12, x13]\n"
- "ldr q12, [%x[params], #0x140]\n"
"fmla v16.4s, v27.4s, v11.s[0]\n"
"fmla v17.4s, v27.4s, v11.s[1]\n"
- "fmin v13.4s, v13.4s, v20.4s\n"
"fmla v18.4s, v27.4s, v11.s[2]\n"
"fmla v19.4s, v27.4s, v11.s[3]\n"
"ldr q27, [%x[params], #0x190]\n"
+ "fmin v12.4s, v12.4s, v20.4s\n"
+ "fmin v13.4s, v13.4s, v20.4s\n"
"fmin v14.4s, v14.4s, v20.4s\n"
+ "fmax v12.4s, v12.4s, v21.4s\n"
+ "str q12, [x13, x25]\n"
+ "fmax v13.4s, v13.4s, v21.4s\n"
+ "fmax v14.4s, v14.4s, v21.4s\n"
+ "ldr q12, [%x[params], #0x140]\n"
+ "add %x[params], %x[params], #0x1a0\n"
"fmin v15.4s, v15.4s, v20.4s\n"
+ "str q13, [x12, x25]\n"
"fmin v16.4s, v16.4s, v20.4s\n"
- "add %x[params], %x[params], #0x1a0\n"
"fmin v17.4s, v17.4s, v20.4s\n"
+ "str q14, [x10, x25]\n"
"fmin v18.4s, v18.4s, v20.4s\n"
- "fmin v19.4s, v19.4s, v20.4s\n"
- "fmax v13.4s, v13.4s, v21.4s\n"
- "str q13, [x11, x13]\n"
- "fmax v14.4s, v14.4s, v21.4s\n"
"fmax v15.4s, v15.4s, v21.4s\n"
- "str q14, [x10, x13]\n"
+ "str q15, [x9, x25]\n"
"fmax v16.4s, v16.4s, v21.4s\n"
"fmax v17.4s, v17.4s, v21.4s\n"
- "str q15, [x9, x13]\n"
+ "str q16, [x27, x25]\n"
"fmax v18.4s, v18.4s, v21.4s\n"
- "fmax v19.4s, v19.4s, v21.4s\n"
- "str q16, [x28, x13]\n"
- "str q17, [x27, x13]\n"
+ "fmin v19.4s, v19.4s, v20.4s\n"
+ "str q17, [x26, x25]\n"
"mov v13.16b, v12.16b\n"
+ "str q18, [x24, x25]\n"
+ "fmax v19.4s, v19.4s, v21.4s\n"
"mov v14.16b, v12.16b\n"
- "str q18, [x26, x13]\n"
+ "str q19, [x23, x25]\n"
"mov v15.16b, v12.16b\n"
+ "add x25, x25, #0x10\n"
"mov v16.16b, v12.16b\n"
- "str q19, [x25, x13]\n"
"mov v17.16b, v12.16b\n"
"mov v18.16b, v12.16b\n"
- "add x13, x13, #0x10\n"
"mov v19.16b, v12.16b\n"
"bgt 1b\n"
"2:" // Output channel complete vector tail
@@ -566,51 +566,51 @@ void a64_fp32_packed_to_nhwc_5x5_s1_with_multiplier_output2x4_mla_depthfirst_imp
"fmla v19.4s, v28.4s, v11.s[2]\n"
"fmla v12.4s, v27.4s, v9.s[0]\n"
"fmla v13.4s, v27.4s, v9.s[1]\n"
- "fmin v12.4s, v12.4s, v20.4s\n"
"fmla v14.4s, v27.4s, v9.s[2]\n"
"fmla v15.4s, v27.4s, v9.s[3]\n"
- "fmin v13.4s, v13.4s, v20.4s\n"
"fmla v16.4s, v27.4s, v11.s[0]\n"
"fmla v17.4s, v27.4s, v11.s[1]\n"
- "fmin v14.4s, v14.4s, v20.4s\n"
"fmla v18.4s, v27.4s, v11.s[2]\n"
"fmla v19.4s, v27.4s, v11.s[3]\n"
+ "fmin v12.4s, v12.4s, v20.4s\n"
+ "fmin v13.4s, v13.4s, v20.4s\n"
+ "fmin v14.4s, v14.4s, v20.4s\n"
+ "fmax v12.4s, v12.4s, v21.4s\n"
+ "str q12, [x13, x25]\n"
+ "fmax v13.4s, v13.4s, v21.4s\n"
+ "fmax v14.4s, v14.4s, v21.4s\n"
+ "str q13, [x12, x25]\n"
"fmin v15.4s, v15.4s, v20.4s\n"
"fmin v16.4s, v16.4s, v20.4s\n"
+ "str q14, [x10, x25]\n"
"fmin v17.4s, v17.4s, v20.4s\n"
"fmin v18.4s, v18.4s, v20.4s\n"
- "fmin v19.4s, v19.4s, v20.4s\n"
- "fmax v12.4s, v12.4s, v21.4s\n"
- "fmax v13.4s, v13.4s, v21.4s\n"
- "str q12, [x12, x13]\n"
- "fmax v14.4s, v14.4s, v21.4s\n"
"fmax v15.4s, v15.4s, v21.4s\n"
- "str q13, [x11, x13]\n"
+ "str q15, [x9, x25]\n"
"fmax v16.4s, v16.4s, v21.4s\n"
"fmax v17.4s, v17.4s, v21.4s\n"
- "str q14, [x10, x13]\n"
+ "str q16, [x27, x25]\n"
"fmax v18.4s, v18.4s, v21.4s\n"
+ "fmin v19.4s, v19.4s, v20.4s\n"
+ "str q17, [x26, x25]\n"
"fmax v19.4s, v19.4s, v21.4s\n"
- "str q15, [x9, x13]\n"
- "str q16, [x28, x13]\n"
- "str q17, [x27, x13]\n"
- "str q18, [x26, x13]\n"
- "str q19, [x25, x13]\n"
- "add x13, x13, #0x10\n"
+ "str q18, [x24, x25]\n"
+ "str q19, [x23, x25]\n"
+ "add x25, x25, #0x10\n"
"3:" // Output channel oddments
"tst %x[channel_multiplier], #0x3\n"
"beq 6f\n"
"ldr q12, [%x[params], #0x0]\n"
- "ldr q31, [%x[params], #0x10]\n"
"mov v13.16b, v12.16b\n"
+ "ldr q31, [%x[params], #0x10]\n"
"mov v14.16b, v12.16b\n"
"ldr q30, [%x[params], #0x20]\n"
- "ldr q29, [%x[params], #0x30]\n"
"mov v15.16b, v12.16b\n"
+ "ldr q29, [%x[params], #0x30]\n"
"mov v16.16b, v12.16b\n"
"ldr q28, [%x[params], #0x40]\n"
- "ldr q27, [%x[params], #0x50]\n"
"mov v17.16b, v12.16b\n"
+ "ldr q27, [%x[params], #0x50]\n"
"mov v18.16b, v12.16b\n"
"mov v19.16b, v12.16b\n"
"fmla v12.4s, v31.4s, v0.s[0]\n"
@@ -828,88 +828,89 @@ void a64_fp32_packed_to_nhwc_5x5_s1_with_multiplier_output2x4_mla_depthfirst_imp
"fmla v19.4s, v28.4s, v11.s[2]\n"
"fmla v12.4s, v27.4s, v9.s[0]\n"
"fmla v13.4s, v27.4s, v9.s[1]\n"
- "fmin v12.4s, v12.4s, v20.4s\n"
"fmla v14.4s, v27.4s, v9.s[2]\n"
"fmla v15.4s, v27.4s, v9.s[3]\n"
- "fmin v13.4s, v13.4s, v20.4s\n"
"fmla v16.4s, v27.4s, v11.s[0]\n"
"fmla v17.4s, v27.4s, v11.s[1]\n"
- "fmin v14.4s, v14.4s, v20.4s\n"
"fmla v18.4s, v27.4s, v11.s[2]\n"
"fmla v19.4s, v27.4s, v11.s[3]\n"
- "fmin v15.4s, v15.4s, v20.4s\n"
- "fmin v16.4s, v16.4s, v20.4s\n"
- "fmin v17.4s, v17.4s, v20.4s\n"
- "fmin v18.4s, v18.4s, v20.4s\n"
- "fmin v19.4s, v19.4s, v20.4s\n"
+ "fmin v12.4s, v12.4s, v20.4s\n"
+ "fmin v13.4s, v13.4s, v20.4s\n"
+ "fmin v14.4s, v14.4s, v20.4s\n"
"fmax v12.4s, v12.4s, v21.4s\n"
"fmax v13.4s, v13.4s, v21.4s\n"
"fmax v14.4s, v14.4s, v21.4s\n"
+ "fmin v15.4s, v15.4s, v20.4s\n"
+ "fmin v16.4s, v16.4s, v20.4s\n"
+ "fmin v17.4s, v17.4s, v20.4s\n"
"fmax v15.4s, v15.4s, v21.4s\n"
"fmax v16.4s, v16.4s, v21.4s\n"
"fmax v17.4s, v17.4s, v21.4s\n"
+ "fmin v18.4s, v18.4s, v20.4s\n"
+ "fmin v19.4s, v19.4s, v20.4s\n"
"fmax v18.4s, v18.4s, v21.4s\n"
"fmax v19.4s, v19.4s, v21.4s\n"
"tbz %x[channel_multiplier], #1, 4f\n"
- "add x20, x12, x13\n"
- "add x21, x11, x13\n"
- "st1 { v12.d }[0], [x20]\n"
- "add x20, x10, x13\n"
- "add x24, x9, x13\n"
- "st1 { v13.d }[0], [x21]\n"
- "add x23, x28, x13\n"
- "add x22, x27, x13\n"
- "st1 { v14.d }[0], [x20]\n"
- "add x21, x26, x13\n"
- "add x20, x25, x13\n"
- "st1 { v15.d }[0], [x24]\n"
- "st1 { v16.d }[0], [x23]\n"
- "add x13, x13, #0x8\n"
- "st1 { v17.d }[0], [x22]\n"
- "st1 { v18.d }[0], [x21]\n"
- "st1 { v19.d }[0], [x20]\n"
+ "add x19, x13, x25\n"
+ "st1 { v12.d }[0], [x19]\n"
+ "add x19, x12, x25\n"
+ "st1 { v13.d }[0], [x19]\n"
+ "add x19, x10, x25\n"
+ "st1 { v14.d }[0], [x19]\n"
+ "add x19, x9, x25\n"
+ "st1 { v15.d }[0], [x19]\n"
+ "add x19, x27, x25\n"
+ "st1 { v16.d }[0], [x19]\n"
+ "add x19, x26, x25\n"
+ "st1 { v17.d }[0], [x19]\n"
+ "add x19, x24, x25\n"
+ "st1 { v18.d }[0], [x19]\n"
+ "add x19, x23, x25\n"
+ "st1 { v19.d }[0], [x19]\n"
+ "add x25, x25, #0x8\n"
"tbz %x[channel_multiplier], #0, 5f\n"
- "add x20, x12, x13\n"
- "add x21, x11, x13\n"
- "st1 { v12.s }[2], [x20]\n"
- "add x20, x10, x13\n"
- "add x24, x9, x13\n"
- "st1 { v13.s }[2], [x21]\n"
- "add x23, x28, x13\n"
- "add x22, x27, x13\n"
- "st1 { v14.s }[2], [x20]\n"
- "add x21, x26, x13\n"
- "add x20, x25, x13\n"
- "st1 { v15.s }[2], [x24]\n"
- "st1 { v16.s }[2], [x23]\n"
- "st1 { v17.s }[2], [x22]\n"
- "st1 { v18.s }[2], [x21]\n"
- "st1 { v19.s }[2], [x20]\n"
+ "add x19, x13, x25\n"
+ "st1 { v12.s }[2], [x19]\n"
+ "add x19, x12, x25\n"
+ "st1 { v13.s }[2], [x19]\n"
+ "add x19, x10, x25\n"
+ "st1 { v14.s }[2], [x19]\n"
+ "add x19, x9, x25\n"
+ "st1 { v15.s }[2], [x19]\n"
+ "add x19, x27, x25\n"
+ "st1 { v16.s }[2], [x19]\n"
+ "add x19, x26, x25\n"
+ "st1 { v17.s }[2], [x19]\n"
+ "add x19, x24, x25\n"
+ "st1 { v18.s }[2], [x19]\n"
+ "add x19, x23, x25\n"
+ "st1 { v19.s }[2], [x19]\n"
"b 5f\n"
"4:" // Output channel oddments: Store: Bit 1: Unset
- "add x20, x12, x13\n"
- "add x21, x11, x13\n"
- "st1 { v12.s }[0], [x20]\n"
- "add x20, x10, x13\n"
- "add x24, x9, x13\n"
- "st1 { v13.s }[0], [x21]\n"
- "add x23, x28, x13\n"
- "add x22, x27, x13\n"
- "st1 { v14.s }[0], [x20]\n"
- "add x21, x26, x13\n"
- "add x20, x25, x13\n"
- "st1 { v15.s }[0], [x24]\n"
- "st1 { v16.s }[0], [x23]\n"
- "st1 { v17.s }[0], [x22]\n"
- "st1 { v18.s }[0], [x21]\n"
- "st1 { v19.s }[0], [x20]\n"
+ "tbz %x[channel_multiplier], #0, 5f\n"
+ "add x19, x13, x25\n"
+ "st1 { v12.s }[0], [x19]\n"
+ "add x19, x12, x25\n"
+ "st1 { v13.s }[0], [x19]\n"
+ "add x19, x10, x25\n"
+ "st1 { v14.s }[0], [x19]\n"
+ "add x19, x9, x25\n"
+ "st1 { v15.s }[0], [x19]\n"
+ "add x19, x27, x25\n"
+ "st1 { v16.s }[0], [x19]\n"
+ "add x19, x26, x25\n"
+ "st1 { v17.s }[0], [x19]\n"
+ "add x19, x24, x25\n"
+ "st1 { v18.s }[0], [x19]\n"
+ "add x19, x23, x25\n"
+ "st1 { v19.s }[0], [x19]\n"
"5:" // Output channel oddments: Store: Bit 1: End
"6:" // End
: [params] "+&r" (params)
: [channel_multiplier] "r" (n_output_channels), [clamps] "r" (minmax_vals), [inptrs] "r" (inptrs), [outptrs] "r" (outptrs)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst/generic.cpp
index c28f29c4f9..46210e2964 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -44,804 +44,807 @@ void a64_fp32_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst_im
__asm__ __volatile__(
"ld1r { v11.4s }, [%x[minmax_vals]]\n"
- "lsr x11, %x[n_output_channels], #0x2\n"
- "add x20, %x[minmax_vals], #0x4\n"
- "ld1r { v10.4s }, [x20]\n"
"mov x10, #0x0\n"
- "cbz x11, 8f\n"
+ "add x19, %x[minmax_vals], #0x4\n"
+ "ld1r { v10.4s }, [x19]\n"
+ "lsr x9, %x[n_output_channels], #0x2\n"
+ "cbz x9, 8f\n"
"1:" // Output channel loop
- "movi v31.16b, #0x0\n"
+ "movi v16.16b, #0x0\n"
"cbz %x[bias], 2f\n"
- "lsl x20, x10, #0x2\n"
- "ldr q31, [%x[bias], x20]\n"
+ "lsl x19, x10, #0x2\n"
+ "ldr q16, [%x[bias], x19]\n"
"2:" // Output channel loop: Load bias: Done
- "ldr q9, [%x[weights], #0x0]\n"
- "mov x20, %x[inptrs]\n"
- "ldp x23, x9, [x20], #0x10\n"
- "lsr x21, %x[kernel_points], #0x1\n"
- "ldr q8, [x23, #0x0]\n"
- "ldr q7, [x23, #0x10]\n"
- "mov v16.16b, v31.16b\n"
- "mov v17.16b, v31.16b\n"
- "ldr q6, [x9, #0x0]\n"
- "ldr q5, [x9, #0x10]\n"
- "mov v18.16b, v31.16b\n"
- "mov v19.16b, v31.16b\n"
- "mov v20.16b, v31.16b\n"
- "mov v21.16b, v31.16b\n"
+ "mov v9.16b, v16.16b\n"
+ "ldr q8, [%x[weights], #0x0]\n"
+ "mov x19, %x[inptrs]\n"
+ "mov v7.16b, v16.16b\n"
+ "ldp x24, x28, [x19], #0x10\n"
+ "lsr x20, %x[kernel_points], #0x1\n"
+ "mov v6.16b, v16.16b\n"
+ "ldr q5, [x24, #0x0]\n"
+ "mov v4.16b, v16.16b\n"
"add %x[weights], %x[weights], #0x10\n"
- "mov v22.16b, v31.16b\n"
- "mov v23.16b, v31.16b\n"
- "mov v24.16b, v31.16b\n"
- "mov v25.16b, v31.16b\n"
- "mov v26.16b, v31.16b\n"
- "mov v27.16b, v31.16b\n"
- "mov v28.16b, v31.16b\n"
- "mov v29.16b, v31.16b\n"
- "mov v30.16b, v31.16b\n"
- "mov v31.16b, v31.16b\n"
- "cbz x21, 6f\n"
- "ldr q4, [%x[weights], #0x0]\n"
- "ldp x23, x9, [x20], #0x10\n"
- "subs x21, x21, #0x1\n"
+ "mov v3.16b, v16.16b\n"
+ "ldr q2, [x24, #0x10]\n"
+ "mov v1.16b, v16.16b\n"
+ "ldr q0, [x28, #0x0]\n"
+ "mov v31.16b, v16.16b\n"
+ "ldr q30, [x28, #0x10]\n"
+ "mov v29.16b, v16.16b\n"
+ "mov v28.16b, v16.16b\n"
+ "mov v27.16b, v16.16b\n"
+ "mov v26.16b, v16.16b\n"
+ "mov v25.16b, v16.16b\n"
+ "mov v24.16b, v16.16b\n"
+ "mov v23.16b, v16.16b\n"
+ "mov v22.16b, v16.16b\n"
+ "mov v21.16b, v16.16b\n"
+ "cbz x20, 6f\n"
+ "ldp x24, x28, [x19], #0x10\n"
+ "ldr q20, [%x[weights], #0x0]\n"
+ "subs x20, x20, #0x1\n"
"add %x[weights], %x[weights], #0x10\n"
- "ldr q3, [x23, #0x0]\n"
- "ldr q2, [x23, #0x10]\n"
- "ldr q1, [x9, #0x0]\n"
- "ldr q0, [x9, #0x10]\n"
+ "ldr q19, [x24, #0x0]\n"
+ "ldr q18, [x24, #0x10]\n"
+ "ldr q17, [x28, #0x0]\n"
+ "ldr q16, [x28, #0x10]\n"
"beq 4f\n"
"3:" // Output channel loop: Kernel loop
- "ldp x23, x9, [x20], #0x10\n"
- "fmla v16.4s, v9.4s, v8.s[0]\n"
- "fmla v17.4s, v9.4s, v8.s[1]\n"
- "subs x21, x21, #0x1\n"
- "fmla v18.4s, v9.4s, v8.s[2]\n"
- "fmla v19.4s, v9.4s, v8.s[3]\n"
- "ldr q8, [x23, #0x0]\n"
- "fmla v20.4s, v9.4s, v7.s[0]\n"
- "fmla v21.4s, v9.4s, v7.s[1]\n"
- "fmla v22.4s, v9.4s, v7.s[2]\n"
- "fmla v23.4s, v9.4s, v7.s[3]\n"
- "ldr q7, [x23, #0x10]\n"
- "fmla v24.4s, v9.4s, v6.s[0]\n"
- "fmla v25.4s, v9.4s, v6.s[1]\n"
- "fmla v26.4s, v9.4s, v6.s[2]\n"
- "fmla v27.4s, v9.4s, v6.s[3]\n"
- "ldr q6, [x9, #0x0]\n"
- "fmla v28.4s, v9.4s, v5.s[0]\n"
- "fmla v29.4s, v9.4s, v5.s[1]\n"
- "fmla v30.4s, v9.4s, v5.s[2]\n"
- "fmla v31.4s, v9.4s, v5.s[3]\n"
- "ldr q5, [x9, #0x10]\n"
- "ldr q9, [%x[weights], #0x0]\n"
- "ldp x23, x9, [x20], #0x10\n"
- "fmla v16.4s, v4.4s, v3.s[0]\n"
- "fmla v17.4s, v4.4s, v3.s[1]\n"
- "fmla v18.4s, v4.4s, v3.s[2]\n"
- "fmla v19.4s, v4.4s, v3.s[3]\n"
- "ldr q3, [x23, #0x0]\n"
- "fmla v20.4s, v4.4s, v2.s[0]\n"
- "fmla v21.4s, v4.4s, v2.s[1]\n"
- "fmla v22.4s, v4.4s, v2.s[2]\n"
- "fmla v23.4s, v4.4s, v2.s[3]\n"
- "ldr q2, [x23, #0x10]\n"
- "fmla v24.4s, v4.4s, v1.s[0]\n"
- "fmla v25.4s, v4.4s, v1.s[1]\n"
- "fmla v26.4s, v4.4s, v1.s[2]\n"
- "fmla v27.4s, v4.4s, v1.s[3]\n"
- "ldr q1, [x9, #0x0]\n"
- "fmla v28.4s, v4.4s, v0.s[0]\n"
- "fmla v29.4s, v4.4s, v0.s[1]\n"
- "fmla v30.4s, v4.4s, v0.s[2]\n"
- "fmla v31.4s, v4.4s, v0.s[3]\n"
- "ldr q0, [x9, #0x10]\n"
- "ldr q4, [%x[weights], #0x10]\n"
+ "fmla v9.4s, v8.4s, v5.s[0]\n"
+ "ldp x24, x28, [x19], #0x10\n"
+ "subs x20, x20, #0x1\n"
+ "fmla v7.4s, v8.4s, v5.s[1]\n"
+ "fmla v6.4s, v8.4s, v5.s[2]\n"
+ "fmla v4.4s, v8.4s, v5.s[3]\n"
+ "ldr q5, [x24, #0x0]\n"
+ "fmla v3.4s, v8.4s, v2.s[0]\n"
+ "fmla v1.4s, v8.4s, v2.s[1]\n"
+ "fmla v31.4s, v8.4s, v2.s[2]\n"
+ "fmla v29.4s, v8.4s, v2.s[3]\n"
+ "ldr q2, [x24, #0x10]\n"
+ "fmla v28.4s, v8.4s, v0.s[0]\n"
+ "fmla v27.4s, v8.4s, v0.s[1]\n"
+ "fmla v26.4s, v8.4s, v0.s[2]\n"
+ "fmla v25.4s, v8.4s, v0.s[3]\n"
+ "ldr q0, [x28, #0x0]\n"
+ "fmla v24.4s, v8.4s, v30.s[0]\n"
+ "fmla v23.4s, v8.4s, v30.s[1]\n"
+ "fmla v22.4s, v8.4s, v30.s[2]\n"
+ "fmla v21.4s, v8.4s, v30.s[3]\n"
+ "ldr q30, [x28, #0x10]\n"
+ "fmla v9.4s, v20.4s, v19.s[0]\n"
+ "ldr q8, [%x[weights], #0x0]\n"
+ "fmla v7.4s, v20.4s, v19.s[1]\n"
+ "ldp x24, x28, [x19], #0x10\n"
+ "fmla v6.4s, v20.4s, v19.s[2]\n"
+ "fmla v4.4s, v20.4s, v19.s[3]\n"
+ "ldr q19, [x24, #0x0]\n"
+ "fmla v3.4s, v20.4s, v18.s[0]\n"
+ "fmla v1.4s, v20.4s, v18.s[1]\n"
+ "fmla v31.4s, v20.4s, v18.s[2]\n"
+ "fmla v29.4s, v20.4s, v18.s[3]\n"
+ "ldr q18, [x24, #0x10]\n"
+ "fmla v28.4s, v20.4s, v17.s[0]\n"
+ "fmla v27.4s, v20.4s, v17.s[1]\n"
+ "fmla v26.4s, v20.4s, v17.s[2]\n"
+ "fmla v25.4s, v20.4s, v17.s[3]\n"
+ "ldr q17, [x28, #0x0]\n"
+ "fmla v24.4s, v20.4s, v16.s[0]\n"
+ "fmla v23.4s, v20.4s, v16.s[1]\n"
+ "fmla v22.4s, v20.4s, v16.s[2]\n"
+ "fmla v21.4s, v20.4s, v16.s[3]\n"
+ "ldr q16, [x28, #0x10]\n"
+ "ldr q20, [%x[weights], #0x10]\n"
"add %x[weights], %x[weights], #0x20\n"
"bgt 3b\n"
"4:" // Output channel loop: Kernel loop tail
"tbnz %x[kernel_points], #0, 5f\n"
- "fmla v16.4s, v9.4s, v8.s[0]\n"
- "fmla v17.4s, v9.4s, v8.s[1]\n"
- "lsl x28, x10, #0x2\n"
- "ldr x20, [%x[outptrs], #0x0]\n"
- "fmla v18.4s, v9.4s, v8.s[2]\n"
- "fmla v19.4s, v9.4s, v8.s[3]\n"
- "ldr x21, [%x[outptrs], #0x8]\n"
- "ldr x22, [%x[outptrs], #0x10]\n"
- "fmla v20.4s, v9.4s, v7.s[0]\n"
- "fmla v21.4s, v9.4s, v7.s[1]\n"
- "ldr x23, [%x[outptrs], #0x18]\n"
- "ldr x24, [%x[outptrs], #0x20]\n"
- "fmla v22.4s, v9.4s, v7.s[2]\n"
- "fmla v23.4s, v9.4s, v7.s[3]\n"
- "ldr x25, [%x[outptrs], #0x28]\n"
- "ldr x26, [%x[outptrs], #0x30]\n"
- "fmla v24.4s, v9.4s, v6.s[0]\n"
- "fmla v25.4s, v9.4s, v6.s[1]\n"
- "ldr x27, [%x[outptrs], #0x38]\n"
- "fmla v26.4s, v9.4s, v6.s[2]\n"
- "fmla v27.4s, v9.4s, v6.s[3]\n"
- "fmla v28.4s, v9.4s, v5.s[0]\n"
- "fmla v29.4s, v9.4s, v5.s[1]\n"
- "fmla v30.4s, v9.4s, v5.s[2]\n"
- "fmla v31.4s, v9.4s, v5.s[3]\n"
- "fmla v16.4s, v4.4s, v3.s[0]\n"
- "fmla v17.4s, v4.4s, v3.s[1]\n"
- "fmin v16.4s, v16.4s, v10.4s\n"
- "fmla v18.4s, v4.4s, v3.s[2]\n"
- "fmla v19.4s, v4.4s, v3.s[3]\n"
- "fmin v17.4s, v17.4s, v10.4s\n"
- "fmla v20.4s, v4.4s, v2.s[0]\n"
- "fmla v21.4s, v4.4s, v2.s[1]\n"
- "fmin v18.4s, v18.4s, v10.4s\n"
- "fmla v22.4s, v4.4s, v2.s[2]\n"
- "fmla v23.4s, v4.4s, v2.s[3]\n"
- "fmin v19.4s, v19.4s, v10.4s\n"
- "fmla v24.4s, v4.4s, v1.s[0]\n"
- "fmla v25.4s, v4.4s, v1.s[1]\n"
- "fmin v20.4s, v20.4s, v10.4s\n"
- "fmla v26.4s, v4.4s, v1.s[2]\n"
- "fmla v27.4s, v4.4s, v1.s[3]\n"
- "fmin v21.4s, v21.4s, v10.4s\n"
- "fmla v28.4s, v4.4s, v0.s[0]\n"
- "fmla v29.4s, v4.4s, v0.s[1]\n"
- "fmin v22.4s, v22.4s, v10.4s\n"
- "fmla v30.4s, v4.4s, v0.s[2]\n"
- "fmla v31.4s, v4.4s, v0.s[3]\n"
- "fmin v23.4s, v23.4s, v10.4s\n"
- "fmax v16.4s, v16.4s, v11.4s\n"
- "fmax v17.4s, v17.4s, v11.4s\n"
- "str q16, [x20, x28]\n"
- "ldr x20, [%x[outptrs], #0x40]\n"
- "fmax v18.4s, v18.4s, v11.4s\n"
- "fmax v19.4s, v19.4s, v11.4s\n"
- "str q17, [x21, x28]\n"
- "ldr x21, [%x[outptrs], #0x48]\n"
- "fmax v20.4s, v20.4s, v11.4s\n"
- "fmax v21.4s, v21.4s, v11.4s\n"
- "str q18, [x22, x28]\n"
- "ldr x22, [%x[outptrs], #0x50]\n"
- "fmax v22.4s, v22.4s, v11.4s\n"
- "fmax v23.4s, v23.4s, v11.4s\n"
- "str q19, [x23, x28]\n"
- "ldr x23, [%x[outptrs], #0x58]\n"
- "fmin v24.4s, v24.4s, v10.4s\n"
- "fmin v25.4s, v25.4s, v10.4s\n"
- "str q20, [x24, x28]\n"
- "ldr x24, [%x[outptrs], #0x60]\n"
- "fmin v26.4s, v26.4s, v10.4s\n"
- "fmin v27.4s, v27.4s, v10.4s\n"
- "str q21, [x25, x28]\n"
- "ldr x25, [%x[outptrs], #0x68]\n"
- "fmin v28.4s, v28.4s, v10.4s\n"
- "fmin v29.4s, v29.4s, v10.4s\n"
- "str q22, [x26, x28]\n"
- "ldr x26, [%x[outptrs], #0x70]\n"
- "fmin v30.4s, v30.4s, v10.4s\n"
+ "fmla v9.4s, v8.4s, v5.s[0]\n"
+ "ldr x19, [%x[outptrs], #0x0]\n"
+ "fmla v7.4s, v8.4s, v5.s[1]\n"
+ "ldr x20, [%x[outptrs], #0x8]\n"
+ "lsl x27, x10, #0x2\n"
+ "fmla v6.4s, v8.4s, v5.s[2]\n"
+ "ldr x21, [%x[outptrs], #0x10]\n"
+ "fmla v4.4s, v8.4s, v5.s[3]\n"
+ "ldr x22, [%x[outptrs], #0x18]\n"
+ "fmla v3.4s, v8.4s, v2.s[0]\n"
+ "ldr x23, [%x[outptrs], #0x20]\n"
+ "fmla v1.4s, v8.4s, v2.s[1]\n"
+ "ldr x24, [%x[outptrs], #0x28]\n"
+ "fmla v31.4s, v8.4s, v2.s[2]\n"
+ "ldr x25, [%x[outptrs], #0x30]\n"
+ "fmla v29.4s, v8.4s, v2.s[3]\n"
+ "ldr x26, [%x[outptrs], #0x38]\n"
+ "fmla v28.4s, v8.4s, v0.s[0]\n"
+ "fmla v27.4s, v8.4s, v0.s[1]\n"
+ "fmla v26.4s, v8.4s, v0.s[2]\n"
+ "fmla v25.4s, v8.4s, v0.s[3]\n"
+ "fmla v24.4s, v8.4s, v30.s[0]\n"
+ "fmla v23.4s, v8.4s, v30.s[1]\n"
+ "fmla v22.4s, v8.4s, v30.s[2]\n"
+ "fmla v21.4s, v8.4s, v30.s[3]\n"
+ "fmla v9.4s, v20.4s, v19.s[0]\n"
+ "fmla v7.4s, v20.4s, v19.s[1]\n"
+ "fmla v6.4s, v20.4s, v19.s[2]\n"
+ "fmla v4.4s, v20.4s, v19.s[3]\n"
+ "fmla v3.4s, v20.4s, v18.s[0]\n"
+ "fmla v1.4s, v20.4s, v18.s[1]\n"
+ "fmla v31.4s, v20.4s, v18.s[2]\n"
+ "fmla v29.4s, v20.4s, v18.s[3]\n"
+ "fmla v28.4s, v20.4s, v17.s[0]\n"
+ "fmla v27.4s, v20.4s, v17.s[1]\n"
+ "fmla v26.4s, v20.4s, v17.s[2]\n"
+ "fmla v25.4s, v20.4s, v17.s[3]\n"
+ "fmla v24.4s, v20.4s, v16.s[0]\n"
+ "fmla v23.4s, v20.4s, v16.s[1]\n"
+ "fmla v22.4s, v20.4s, v16.s[2]\n"
+ "fmla v21.4s, v20.4s, v16.s[3]\n"
+ "fmin v9.4s, v9.4s, v10.4s\n"
+ "fmin v7.4s, v7.4s, v10.4s\n"
+ "fmin v6.4s, v6.4s, v10.4s\n"
+ "fmax v9.4s, v9.4s, v11.4s\n"
+ "str q9, [x19, x27]\n"
+ "fmax v7.4s, v7.4s, v11.4s\n"
+ "fmax v6.4s, v6.4s, v11.4s\n"
+ "ldr x19, [%x[outptrs], #0x40]\n"
+ "fmin v4.4s, v4.4s, v10.4s\n"
+ "str q7, [x20, x27]\n"
+ "fmin v3.4s, v3.4s, v10.4s\n"
+ "fmin v1.4s, v1.4s, v10.4s\n"
+ "str q6, [x21, x27]\n"
+ "fmax v4.4s, v4.4s, v11.4s\n"
+ "ldr x20, [%x[outptrs], #0x48]\n"
"fmin v31.4s, v31.4s, v10.4s\n"
- "str q23, [x27, x28]\n"
- "ldr x27, [%x[outptrs], #0x78]\n"
- "fmax v24.4s, v24.4s, v11.4s\n"
- "fmax v25.4s, v25.4s, v11.4s\n"
- "str q24, [x20, x28]\n"
- "fmax v26.4s, v26.4s, v11.4s\n"
- "fmax v27.4s, v27.4s, v11.4s\n"
- "str q25, [x21, x28]\n"
- "fmax v28.4s, v28.4s, v11.4s\n"
- "fmax v29.4s, v29.4s, v11.4s\n"
- "str q26, [x22, x28]\n"
- "fmax v30.4s, v30.4s, v11.4s\n"
+ "ldr x21, [%x[outptrs], #0x50]\n"
+ "fmax v3.4s, v3.4s, v11.4s\n"
+ "str q4, [x22, x27]\n"
+ "fmax v1.4s, v1.4s, v11.4s\n"
+ "ldr x22, [%x[outptrs], #0x58]\n"
"fmax v31.4s, v31.4s, v11.4s\n"
- "str q27, [x23, x28]\n"
- "str q28, [x24, x28]\n"
- "str q29, [x25, x28]\n"
- "str q30, [x26, x28]\n"
- "str q31, [x27, x28]\n"
+ "str q3, [x23, x27]\n"
+ "fmin v29.4s, v29.4s, v10.4s\n"
+ "str q1, [x24, x27]\n"
+ "fmin v28.4s, v28.4s, v10.4s\n"
+ "str q31, [x25, x27]\n"
+ "fmin v27.4s, v27.4s, v10.4s\n"
+ "ldr x23, [%x[outptrs], #0x60]\n"
+ "fmax v29.4s, v29.4s, v11.4s\n"
+ "ldr x24, [%x[outptrs], #0x68]\n"
+ "fmax v28.4s, v28.4s, v11.4s\n"
+ "ldr x25, [%x[outptrs], #0x70]\n"
+ "fmax v27.4s, v27.4s, v11.4s\n"
+ "str q29, [x26, x27]\n"
+ "fmin v26.4s, v26.4s, v10.4s\n"
+ "str q28, [x19, x27]\n"
+ "fmin v25.4s, v25.4s, v10.4s\n"
+ "str q27, [x20, x27]\n"
+ "fmin v24.4s, v24.4s, v10.4s\n"
+ "ldr x26, [%x[outptrs], #0x78]\n"
+ "fmax v26.4s, v26.4s, v11.4s\n"
+ "str q26, [x21, x27]\n"
+ "fmax v25.4s, v25.4s, v11.4s\n"
+ "fmax v24.4s, v24.4s, v11.4s\n"
+ "str q25, [x22, x27]\n"
+ "fmin v23.4s, v23.4s, v10.4s\n"
+ "fmin v22.4s, v22.4s, v10.4s\n"
+ "str q24, [x23, x27]\n"
+ "fmin v21.4s, v21.4s, v10.4s\n"
+ "fmax v23.4s, v23.4s, v11.4s\n"
+ "str q23, [x24, x27]\n"
+ "fmax v22.4s, v22.4s, v11.4s\n"
+ "fmax v21.4s, v21.4s, v11.4s\n"
+ "str q22, [x25, x27]\n"
+ "str q21, [x26, x27]\n"
"b 7f\n"
"5:" // Output channel loop: Odd tail
- "fmla v16.4s, v9.4s, v8.s[0]\n"
- "fmla v17.4s, v9.4s, v8.s[1]\n"
- "ldp x23, x9, [x20], #0x10\n"
- "lsl x28, x10, #0x2\n"
- "fmla v18.4s, v9.4s, v8.s[2]\n"
- "fmla v19.4s, v9.4s, v8.s[3]\n"
- "ldr q8, [x23, #0x0]\n"
- "ldr x20, [%x[outptrs], #0x0]\n"
- "fmla v20.4s, v9.4s, v7.s[0]\n"
- "fmla v21.4s, v9.4s, v7.s[1]\n"
- "ldr x21, [%x[outptrs], #0x8]\n"
- "ldr x22, [%x[outptrs], #0x10]\n"
- "fmla v22.4s, v9.4s, v7.s[2]\n"
- "fmla v23.4s, v9.4s, v7.s[3]\n"
- "ldr q7, [x23, #0x10]\n"
- "ldr x23, [%x[outptrs], #0x18]\n"
- "fmla v24.4s, v9.4s, v6.s[0]\n"
- "fmla v25.4s, v9.4s, v6.s[1]\n"
- "ldr x24, [%x[outptrs], #0x20]\n"
- "ldr x25, [%x[outptrs], #0x28]\n"
- "fmla v26.4s, v9.4s, v6.s[2]\n"
- "fmla v27.4s, v9.4s, v6.s[3]\n"
- "ldr q6, [x9, #0x0]\n"
- "ldr x26, [%x[outptrs], #0x30]\n"
- "fmla v28.4s, v9.4s, v5.s[0]\n"
- "fmla v29.4s, v9.4s, v5.s[1]\n"
- "ldr x27, [%x[outptrs], #0x38]\n"
- "fmla v30.4s, v9.4s, v5.s[2]\n"
- "fmla v31.4s, v9.4s, v5.s[3]\n"
- "ldr q9, [%x[weights], #0x0]\n"
- "ldr q5, [x9, #0x10]\n"
- "fmla v16.4s, v4.4s, v3.s[0]\n"
- "fmla v17.4s, v4.4s, v3.s[1]\n"
+ "fmla v9.4s, v8.4s, v5.s[0]\n"
+ "ldp x24, x28, [x19], #0x10\n"
+ "lsl x27, x10, #0x2\n"
+ "fmla v7.4s, v8.4s, v5.s[1]\n"
+ "ldr x19, [%x[outptrs], #0x0]\n"
+ "fmla v6.4s, v8.4s, v5.s[2]\n"
+ "ldr x20, [%x[outptrs], #0x8]\n"
+ "fmla v4.4s, v8.4s, v5.s[3]\n"
+ "ldr q5, [x24, #0x0]\n"
+ "fmla v3.4s, v8.4s, v2.s[0]\n"
+ "ldr x21, [%x[outptrs], #0x10]\n"
+ "fmla v1.4s, v8.4s, v2.s[1]\n"
+ "ldr x22, [%x[outptrs], #0x18]\n"
+ "fmla v31.4s, v8.4s, v2.s[2]\n"
+ "ldr x23, [%x[outptrs], #0x20]\n"
+ "fmla v29.4s, v8.4s, v2.s[3]\n"
+ "ldr q2, [x24, #0x10]\n"
+ "fmla v28.4s, v8.4s, v0.s[0]\n"
+ "ldr x24, [%x[outptrs], #0x28]\n"
+ "fmla v27.4s, v8.4s, v0.s[1]\n"
+ "ldr x25, [%x[outptrs], #0x30]\n"
+ "fmla v26.4s, v8.4s, v0.s[2]\n"
+ "ldr x26, [%x[outptrs], #0x38]\n"
+ "fmla v25.4s, v8.4s, v0.s[3]\n"
+ "ldr q0, [x28, #0x0]\n"
+ "fmla v24.4s, v8.4s, v30.s[0]\n"
+ "fmla v23.4s, v8.4s, v30.s[1]\n"
+ "fmla v22.4s, v8.4s, v30.s[2]\n"
+ "fmla v21.4s, v8.4s, v30.s[3]\n"
+ "ldr q30, [x28, #0x10]\n"
+ "fmla v9.4s, v20.4s, v19.s[0]\n"
+ "ldr q8, [%x[weights], #0x0]\n"
"add %x[weights], %x[weights], #0x10\n"
- "fmla v18.4s, v4.4s, v3.s[2]\n"
- "fmla v19.4s, v4.4s, v3.s[3]\n"
- "fmla v20.4s, v4.4s, v2.s[0]\n"
- "fmla v21.4s, v4.4s, v2.s[1]\n"
- "fmla v22.4s, v4.4s, v2.s[2]\n"
- "fmla v23.4s, v4.4s, v2.s[3]\n"
- "fmla v24.4s, v4.4s, v1.s[0]\n"
- "fmla v25.4s, v4.4s, v1.s[1]\n"
- "fmla v26.4s, v4.4s, v1.s[2]\n"
- "fmla v27.4s, v4.4s, v1.s[3]\n"
- "fmla v28.4s, v4.4s, v0.s[0]\n"
- "fmla v29.4s, v4.4s, v0.s[1]\n"
- "fmla v30.4s, v4.4s, v0.s[2]\n"
- "fmla v31.4s, v4.4s, v0.s[3]\n"
- "fmla v16.4s, v9.4s, v8.s[0]\n"
- "fmla v17.4s, v9.4s, v8.s[1]\n"
- "fmin v16.4s, v16.4s, v10.4s\n"
- "fmla v18.4s, v9.4s, v8.s[2]\n"
- "fmla v19.4s, v9.4s, v8.s[3]\n"
- "fmin v17.4s, v17.4s, v10.4s\n"
- "fmla v20.4s, v9.4s, v7.s[0]\n"
- "fmla v21.4s, v9.4s, v7.s[1]\n"
- "fmin v18.4s, v18.4s, v10.4s\n"
- "fmla v22.4s, v9.4s, v7.s[2]\n"
- "fmla v23.4s, v9.4s, v7.s[3]\n"
- "fmin v19.4s, v19.4s, v10.4s\n"
- "fmla v24.4s, v9.4s, v6.s[0]\n"
- "fmla v25.4s, v9.4s, v6.s[1]\n"
- "fmin v20.4s, v20.4s, v10.4s\n"
- "fmla v26.4s, v9.4s, v6.s[2]\n"
- "fmla v27.4s, v9.4s, v6.s[3]\n"
- "fmin v21.4s, v21.4s, v10.4s\n"
- "fmla v28.4s, v9.4s, v5.s[0]\n"
- "fmla v29.4s, v9.4s, v5.s[1]\n"
- "fmin v22.4s, v22.4s, v10.4s\n"
- "fmla v30.4s, v9.4s, v5.s[2]\n"
- "fmla v31.4s, v9.4s, v5.s[3]\n"
- "fmin v23.4s, v23.4s, v10.4s\n"
- "fmax v16.4s, v16.4s, v11.4s\n"
- "fmax v17.4s, v17.4s, v11.4s\n"
- "str q16, [x20, x28]\n"
- "ldr x20, [%x[outptrs], #0x40]\n"
- "fmax v18.4s, v18.4s, v11.4s\n"
- "fmax v19.4s, v19.4s, v11.4s\n"
- "str q17, [x21, x28]\n"
- "ldr x21, [%x[outptrs], #0x48]\n"
- "fmax v20.4s, v20.4s, v11.4s\n"
- "fmax v21.4s, v21.4s, v11.4s\n"
- "str q18, [x22, x28]\n"
- "ldr x22, [%x[outptrs], #0x50]\n"
- "fmax v22.4s, v22.4s, v11.4s\n"
- "fmax v23.4s, v23.4s, v11.4s\n"
- "str q19, [x23, x28]\n"
- "ldr x23, [%x[outptrs], #0x58]\n"
- "fmin v24.4s, v24.4s, v10.4s\n"
- "fmin v25.4s, v25.4s, v10.4s\n"
- "str q20, [x24, x28]\n"
- "ldr x24, [%x[outptrs], #0x60]\n"
- "fmin v26.4s, v26.4s, v10.4s\n"
- "fmin v27.4s, v27.4s, v10.4s\n"
- "str q21, [x25, x28]\n"
- "ldr x25, [%x[outptrs], #0x68]\n"
- "fmin v28.4s, v28.4s, v10.4s\n"
- "fmin v29.4s, v29.4s, v10.4s\n"
- "str q22, [x26, x28]\n"
- "ldr x26, [%x[outptrs], #0x70]\n"
- "fmin v30.4s, v30.4s, v10.4s\n"
+ "fmla v7.4s, v20.4s, v19.s[1]\n"
+ "fmla v6.4s, v20.4s, v19.s[2]\n"
+ "fmla v4.4s, v20.4s, v19.s[3]\n"
+ "fmla v3.4s, v20.4s, v18.s[0]\n"
+ "fmla v1.4s, v20.4s, v18.s[1]\n"
+ "fmla v31.4s, v20.4s, v18.s[2]\n"
+ "fmla v29.4s, v20.4s, v18.s[3]\n"
+ "fmla v28.4s, v20.4s, v17.s[0]\n"
+ "fmla v27.4s, v20.4s, v17.s[1]\n"
+ "fmla v26.4s, v20.4s, v17.s[2]\n"
+ "fmla v25.4s, v20.4s, v17.s[3]\n"
+ "fmla v24.4s, v20.4s, v16.s[0]\n"
+ "fmla v23.4s, v20.4s, v16.s[1]\n"
+ "fmla v22.4s, v20.4s, v16.s[2]\n"
+ "fmla v21.4s, v20.4s, v16.s[3]\n"
+ "fmla v9.4s, v8.4s, v5.s[0]\n"
+ "fmla v7.4s, v8.4s, v5.s[1]\n"
+ "fmla v6.4s, v8.4s, v5.s[2]\n"
+ "fmla v4.4s, v8.4s, v5.s[3]\n"
+ "fmla v3.4s, v8.4s, v2.s[0]\n"
+ "fmla v1.4s, v8.4s, v2.s[1]\n"
+ "fmla v31.4s, v8.4s, v2.s[2]\n"
+ "fmla v29.4s, v8.4s, v2.s[3]\n"
+ "fmla v28.4s, v8.4s, v0.s[0]\n"
+ "fmla v27.4s, v8.4s, v0.s[1]\n"
+ "fmla v26.4s, v8.4s, v0.s[2]\n"
+ "fmla v25.4s, v8.4s, v0.s[3]\n"
+ "fmla v24.4s, v8.4s, v30.s[0]\n"
+ "fmla v23.4s, v8.4s, v30.s[1]\n"
+ "fmla v22.4s, v8.4s, v30.s[2]\n"
+ "fmla v21.4s, v8.4s, v30.s[3]\n"
+ "fmin v9.4s, v9.4s, v10.4s\n"
+ "fmin v7.4s, v7.4s, v10.4s\n"
+ "fmin v6.4s, v6.4s, v10.4s\n"
+ "fmax v9.4s, v9.4s, v11.4s\n"
+ "str q9, [x19, x27]\n"
+ "fmax v7.4s, v7.4s, v11.4s\n"
+ "fmax v6.4s, v6.4s, v11.4s\n"
+ "ldr x19, [%x[outptrs], #0x40]\n"
+ "fmin v4.4s, v4.4s, v10.4s\n"
+ "str q7, [x20, x27]\n"
+ "fmin v3.4s, v3.4s, v10.4s\n"
+ "fmin v1.4s, v1.4s, v10.4s\n"
+ "str q6, [x21, x27]\n"
+ "fmax v4.4s, v4.4s, v11.4s\n"
+ "ldr x20, [%x[outptrs], #0x48]\n"
"fmin v31.4s, v31.4s, v10.4s\n"
- "str q23, [x27, x28]\n"
- "ldr x27, [%x[outptrs], #0x78]\n"
- "fmax v24.4s, v24.4s, v11.4s\n"
- "fmax v25.4s, v25.4s, v11.4s\n"
- "str q24, [x20, x28]\n"
- "fmax v26.4s, v26.4s, v11.4s\n"
- "fmax v27.4s, v27.4s, v11.4s\n"
- "str q25, [x21, x28]\n"
- "fmax v28.4s, v28.4s, v11.4s\n"
- "fmax v29.4s, v29.4s, v11.4s\n"
- "str q26, [x22, x28]\n"
- "fmax v30.4s, v30.4s, v11.4s\n"
+ "ldr x21, [%x[outptrs], #0x50]\n"
+ "fmax v3.4s, v3.4s, v11.4s\n"
+ "str q4, [x22, x27]\n"
+ "fmax v1.4s, v1.4s, v11.4s\n"
+ "ldr x22, [%x[outptrs], #0x58]\n"
"fmax v31.4s, v31.4s, v11.4s\n"
- "str q27, [x23, x28]\n"
- "str q28, [x24, x28]\n"
- "str q29, [x25, x28]\n"
- "str q30, [x26, x28]\n"
- "str q31, [x27, x28]\n"
- "b 7f\n"
- "6:" // Output channel loop: Single kernel point
- "fmla v16.4s, v9.4s, v8.s[0]\n"
- "fmla v17.4s, v9.4s, v8.s[1]\n"
- "fmin v16.4s, v16.4s, v10.4s\n"
- "lsl x28, x10, #0x2\n"
- "fmla v18.4s, v9.4s, v8.s[2]\n"
- "fmla v19.4s, v9.4s, v8.s[3]\n"
- "fmin v17.4s, v17.4s, v10.4s\n"
- "ldr x20, [%x[outptrs], #0x0]\n"
- "fmla v20.4s, v9.4s, v7.s[0]\n"
- "fmla v21.4s, v9.4s, v7.s[1]\n"
- "fmin v18.4s, v18.4s, v10.4s\n"
- "ldr x21, [%x[outptrs], #0x8]\n"
- "fmla v22.4s, v9.4s, v7.s[2]\n"
- "fmla v23.4s, v9.4s, v7.s[3]\n"
- "fmin v19.4s, v19.4s, v10.4s\n"
- "ldr x22, [%x[outptrs], #0x10]\n"
- "fmla v24.4s, v9.4s, v6.s[0]\n"
- "fmla v25.4s, v9.4s, v6.s[1]\n"
- "fmin v20.4s, v20.4s, v10.4s\n"
- "ldr x23, [%x[outptrs], #0x18]\n"
- "fmla v26.4s, v9.4s, v6.s[2]\n"
- "fmla v27.4s, v9.4s, v6.s[3]\n"
- "fmin v21.4s, v21.4s, v10.4s\n"
- "ldr x24, [%x[outptrs], #0x20]\n"
- "fmla v28.4s, v9.4s, v5.s[0]\n"
- "fmla v29.4s, v9.4s, v5.s[1]\n"
- "fmin v22.4s, v22.4s, v10.4s\n"
- "ldr x25, [%x[outptrs], #0x28]\n"
- "fmla v30.4s, v9.4s, v5.s[2]\n"
- "fmla v31.4s, v9.4s, v5.s[3]\n"
+ "str q3, [x23, x27]\n"
+ "fmin v29.4s, v29.4s, v10.4s\n"
+ "str q1, [x24, x27]\n"
+ "fmin v28.4s, v28.4s, v10.4s\n"
+ "str q31, [x25, x27]\n"
+ "fmin v27.4s, v27.4s, v10.4s\n"
+ "ldr x23, [%x[outptrs], #0x60]\n"
+ "fmax v29.4s, v29.4s, v11.4s\n"
+ "ldr x24, [%x[outptrs], #0x68]\n"
+ "fmax v28.4s, v28.4s, v11.4s\n"
+ "ldr x25, [%x[outptrs], #0x70]\n"
+ "fmax v27.4s, v27.4s, v11.4s\n"
+ "str q29, [x26, x27]\n"
+ "fmin v26.4s, v26.4s, v10.4s\n"
+ "str q28, [x19, x27]\n"
+ "fmin v25.4s, v25.4s, v10.4s\n"
+ "str q27, [x20, x27]\n"
+ "fmin v24.4s, v24.4s, v10.4s\n"
+ "ldr x26, [%x[outptrs], #0x78]\n"
+ "fmax v26.4s, v26.4s, v11.4s\n"
+ "str q26, [x21, x27]\n"
+ "fmax v25.4s, v25.4s, v11.4s\n"
+ "fmax v24.4s, v24.4s, v11.4s\n"
+ "str q25, [x22, x27]\n"
"fmin v23.4s, v23.4s, v10.4s\n"
- "ldr x26, [%x[outptrs], #0x30]\n"
- "ldr x27, [%x[outptrs], #0x38]\n"
- "fmax v16.4s, v16.4s, v11.4s\n"
- "fmax v17.4s, v17.4s, v11.4s\n"
- "str q16, [x20, x28]\n"
- "fmax v18.4s, v18.4s, v11.4s\n"
- "fmax v19.4s, v19.4s, v11.4s\n"
- "str q17, [x21, x28]\n"
- "ldr x20, [%x[outptrs], #0x40]\n"
- "fmax v20.4s, v20.4s, v11.4s\n"
- "fmax v21.4s, v21.4s, v11.4s\n"
- "str q18, [x22, x28]\n"
- "ldr x21, [%x[outptrs], #0x48]\n"
- "fmax v22.4s, v22.4s, v11.4s\n"
+ "fmin v22.4s, v22.4s, v10.4s\n"
+ "str q24, [x23, x27]\n"
+ "fmin v21.4s, v21.4s, v10.4s\n"
"fmax v23.4s, v23.4s, v11.4s\n"
- "str q19, [x23, x28]\n"
- "ldr x22, [%x[outptrs], #0x50]\n"
- "fmin v24.4s, v24.4s, v10.4s\n"
- "fmin v25.4s, v25.4s, v10.4s\n"
- "str q20, [x24, x28]\n"
- "ldr x23, [%x[outptrs], #0x58]\n"
- "fmin v26.4s, v26.4s, v10.4s\n"
- "fmin v27.4s, v27.4s, v10.4s\n"
- "str q21, [x25, x28]\n"
- "ldr x24, [%x[outptrs], #0x60]\n"
- "fmin v28.4s, v28.4s, v10.4s\n"
- "fmin v29.4s, v29.4s, v10.4s\n"
- "str q22, [x26, x28]\n"
- "ldr x25, [%x[outptrs], #0x68]\n"
- "fmin v30.4s, v30.4s, v10.4s\n"
+ "str q23, [x24, x27]\n"
+ "fmax v22.4s, v22.4s, v11.4s\n"
+ "fmax v21.4s, v21.4s, v11.4s\n"
+ "str q22, [x25, x27]\n"
+ "str q21, [x26, x27]\n"
+ "b 7f\n"
+ "6:" // Output channel loop: Single kernel point
+ "fmla v9.4s, v8.4s, v5.s[0]\n"
+ "ldr x19, [%x[outptrs], #0x0]\n"
+ "lsl x27, x10, #0x2\n"
+ "fmla v7.4s, v8.4s, v5.s[1]\n"
+ "ldr x20, [%x[outptrs], #0x8]\n"
+ "fmla v6.4s, v8.4s, v5.s[2]\n"
+ "ldr x21, [%x[outptrs], #0x10]\n"
+ "fmla v4.4s, v8.4s, v5.s[3]\n"
+ "ldr x22, [%x[outptrs], #0x18]\n"
+ "fmla v3.4s, v8.4s, v2.s[0]\n"
+ "ldr x23, [%x[outptrs], #0x20]\n"
+ "fmla v1.4s, v8.4s, v2.s[1]\n"
+ "ldr x24, [%x[outptrs], #0x28]\n"
+ "fmla v31.4s, v8.4s, v2.s[2]\n"
+ "ldr x25, [%x[outptrs], #0x30]\n"
+ "fmla v29.4s, v8.4s, v2.s[3]\n"
+ "ldr x26, [%x[outptrs], #0x38]\n"
+ "fmla v28.4s, v8.4s, v0.s[0]\n"
+ "fmla v27.4s, v8.4s, v0.s[1]\n"
+ "fmla v26.4s, v8.4s, v0.s[2]\n"
+ "fmla v25.4s, v8.4s, v0.s[3]\n"
+ "fmla v24.4s, v8.4s, v30.s[0]\n"
+ "fmla v23.4s, v8.4s, v30.s[1]\n"
+ "fmla v22.4s, v8.4s, v30.s[2]\n"
+ "fmla v21.4s, v8.4s, v30.s[3]\n"
+ "fmin v9.4s, v9.4s, v10.4s\n"
+ "fmin v7.4s, v7.4s, v10.4s\n"
+ "fmin v6.4s, v6.4s, v10.4s\n"
+ "fmax v9.4s, v9.4s, v11.4s\n"
+ "str q9, [x19, x27]\n"
+ "fmax v7.4s, v7.4s, v11.4s\n"
+ "fmax v6.4s, v6.4s, v11.4s\n"
+ "ldr x19, [%x[outptrs], #0x40]\n"
+ "fmin v4.4s, v4.4s, v10.4s\n"
+ "str q7, [x20, x27]\n"
+ "fmin v3.4s, v3.4s, v10.4s\n"
+ "fmin v1.4s, v1.4s, v10.4s\n"
+ "str q6, [x21, x27]\n"
+ "fmax v4.4s, v4.4s, v11.4s\n"
+ "ldr x20, [%x[outptrs], #0x48]\n"
"fmin v31.4s, v31.4s, v10.4s\n"
- "str q23, [x27, x28]\n"
- "ldr x26, [%x[outptrs], #0x70]\n"
- "ldr x27, [%x[outptrs], #0x78]\n"
- "fmax v24.4s, v24.4s, v11.4s\n"
- "fmax v25.4s, v25.4s, v11.4s\n"
- "str q24, [x20, x28]\n"
- "fmax v26.4s, v26.4s, v11.4s\n"
- "fmax v27.4s, v27.4s, v11.4s\n"
- "str q25, [x21, x28]\n"
- "fmax v28.4s, v28.4s, v11.4s\n"
- "fmax v29.4s, v29.4s, v11.4s\n"
- "str q26, [x22, x28]\n"
- "fmax v30.4s, v30.4s, v11.4s\n"
+ "ldr x21, [%x[outptrs], #0x50]\n"
+ "fmax v3.4s, v3.4s, v11.4s\n"
+ "str q4, [x22, x27]\n"
+ "fmax v1.4s, v1.4s, v11.4s\n"
+ "ldr x22, [%x[outptrs], #0x58]\n"
"fmax v31.4s, v31.4s, v11.4s\n"
- "str q27, [x23, x28]\n"
- "str q28, [x24, x28]\n"
- "str q29, [x25, x28]\n"
- "str q30, [x26, x28]\n"
- "str q31, [x27, x28]\n"
+ "str q3, [x23, x27]\n"
+ "fmin v29.4s, v29.4s, v10.4s\n"
+ "str q1, [x24, x27]\n"
+ "fmin v28.4s, v28.4s, v10.4s\n"
+ "str q31, [x25, x27]\n"
+ "fmin v27.4s, v27.4s, v10.4s\n"
+ "ldr x23, [%x[outptrs], #0x60]\n"
+ "fmax v29.4s, v29.4s, v11.4s\n"
+ "ldr x24, [%x[outptrs], #0x68]\n"
+ "fmax v28.4s, v28.4s, v11.4s\n"
+ "ldr x25, [%x[outptrs], #0x70]\n"
+ "fmax v27.4s, v27.4s, v11.4s\n"
+ "str q29, [x26, x27]\n"
+ "fmin v26.4s, v26.4s, v10.4s\n"
+ "str q28, [x19, x27]\n"
+ "fmin v25.4s, v25.4s, v10.4s\n"
+ "str q27, [x20, x27]\n"
+ "fmin v24.4s, v24.4s, v10.4s\n"
+ "ldr x26, [%x[outptrs], #0x78]\n"
+ "fmax v26.4s, v26.4s, v11.4s\n"
+ "str q26, [x21, x27]\n"
+ "fmax v25.4s, v25.4s, v11.4s\n"
+ "fmax v24.4s, v24.4s, v11.4s\n"
+ "str q25, [x22, x27]\n"
+ "fmin v23.4s, v23.4s, v10.4s\n"
+ "fmin v22.4s, v22.4s, v10.4s\n"
+ "str q24, [x23, x27]\n"
+ "fmin v21.4s, v21.4s, v10.4s\n"
+ "fmax v23.4s, v23.4s, v11.4s\n"
+ "str q23, [x24, x27]\n"
+ "fmax v22.4s, v22.4s, v11.4s\n"
+ "fmax v21.4s, v21.4s, v11.4s\n"
+ "str q22, [x25, x27]\n"
+ "str q21, [x26, x27]\n"
"7:" // Output channel loop: Done
"add x10, x10, #0x4\n"
- "cmp x10, x11, LSL #2\n"
+ "cmp x10, x9, LSL #2\n"
"blt 1b\n"
"tst %x[n_output_channels], #0x3\n"
"beq 19f\n"
"8:" // Output channel oddments
- "movi v31.16b, #0x0\n"
+ "movi v16.16b, #0x0\n"
"cbz %x[bias], 11f\n"
- "add x20, %x[bias], x10, LSL #2\n"
+ "add x19, %x[bias], x10, LSL #2\n"
"tbz %x[n_output_channels], #1, 9f\n"
- "ld1 { v31.d }[0], [x20], #0x8\n"
+ "ld1 { v16.d }[0], [x19], #0x8\n"
"tbz %x[n_output_channels], #0, 10f\n"
- "ld1 { v31.s }[2], [x20]\n"
+ "ld1 { v16.s }[2], [x19]\n"
"b 10f\n"
"9:" // Output channel oddments: Load bias: Bit 1: Unset
- "ld1 { v31.s }[0], [x20]\n"
+ "tbz %x[n_output_channels], #0, 10f\n"
+ "ld1 { v16.s }[0], [x19]\n"
"10:" // Output channel oddments: Load bias: Bit 1: End
+
"11:" // Output channel oddments: Load bias: Done
- "ldr q9, [%x[weights], #0x0]\n"
- "mov x20, %x[inptrs]\n"
- "ldp x23, x9, [x20], #0x10\n"
- "lsr x21, %x[kernel_points], #0x1\n"
- "ldr q8, [x23, #0x0]\n"
- "ldr q7, [x23, #0x10]\n"
- "mov v16.16b, v31.16b\n"
- "mov v17.16b, v31.16b\n"
- "ldr q6, [x9, #0x0]\n"
- "ldr q5, [x9, #0x10]\n"
- "mov v18.16b, v31.16b\n"
- "mov v19.16b, v31.16b\n"
- "mov v20.16b, v31.16b\n"
- "mov v21.16b, v31.16b\n"
+ "mov v9.16b, v16.16b\n"
+ "ldr q8, [%x[weights], #0x0]\n"
+ "mov x19, %x[inptrs]\n"
+ "mov v7.16b, v16.16b\n"
+ "ldp x24, x28, [x19], #0x10\n"
+ "lsr x20, %x[kernel_points], #0x1\n"
+ "mov v6.16b, v16.16b\n"
+ "ldr q5, [x24, #0x0]\n"
+ "mov v4.16b, v16.16b\n"
"add %x[weights], %x[weights], #0x10\n"
- "mov v22.16b, v31.16b\n"
- "mov v23.16b, v31.16b\n"
- "mov v24.16b, v31.16b\n"
- "mov v25.16b, v31.16b\n"
- "mov v26.16b, v31.16b\n"
- "mov v27.16b, v31.16b\n"
- "mov v28.16b, v31.16b\n"
- "mov v29.16b, v31.16b\n"
- "mov v30.16b, v31.16b\n"
- "mov v31.16b, v31.16b\n"
- "cbz x21, 15f\n"
- "ldr q4, [%x[weights], #0x0]\n"
- "ldp x23, x9, [x20], #0x10\n"
- "subs x21, x21, #0x1\n"
+ "mov v3.16b, v16.16b\n"
+ "ldr q2, [x24, #0x10]\n"
+ "mov v1.16b, v16.16b\n"
+ "ldr q0, [x28, #0x0]\n"
+ "mov v31.16b, v16.16b\n"
+ "ldr q30, [x28, #0x10]\n"
+ "mov v29.16b, v16.16b\n"
+ "mov v28.16b, v16.16b\n"
+ "mov v27.16b, v16.16b\n"
+ "mov v26.16b, v16.16b\n"
+ "mov v25.16b, v16.16b\n"
+ "mov v24.16b, v16.16b\n"
+ "mov v23.16b, v16.16b\n"
+ "mov v22.16b, v16.16b\n"
+ "mov v21.16b, v16.16b\n"
+ "cbz x20, 15f\n"
+ "ldp x24, x28, [x19], #0x10\n"
+ "ldr q20, [%x[weights], #0x0]\n"
+ "subs x20, x20, #0x1\n"
"add %x[weights], %x[weights], #0x10\n"
- "ldr q3, [x23, #0x0]\n"
- "ldr q2, [x23, #0x10]\n"
- "ldr q1, [x9, #0x0]\n"
- "ldr q0, [x9, #0x10]\n"
+ "ldr q19, [x24, #0x0]\n"
+ "ldr q18, [x24, #0x10]\n"
+ "ldr q17, [x28, #0x0]\n"
+ "ldr q16, [x28, #0x10]\n"
"beq 13f\n"
"12:" // Output channel oddments: Kernel loop
- "ldp x23, x9, [x20], #0x10\n"
- "fmla v16.4s, v9.4s, v8.s[0]\n"
- "fmla v17.4s, v9.4s, v8.s[1]\n"
- "subs x21, x21, #0x1\n"
- "fmla v18.4s, v9.4s, v8.s[2]\n"
- "fmla v19.4s, v9.4s, v8.s[3]\n"
- "ldr q8, [x23, #0x0]\n"
- "fmla v20.4s, v9.4s, v7.s[0]\n"
- "fmla v21.4s, v9.4s, v7.s[1]\n"
- "fmla v22.4s, v9.4s, v7.s[2]\n"
- "fmla v23.4s, v9.4s, v7.s[3]\n"
- "ldr q7, [x23, #0x10]\n"
- "fmla v24.4s, v9.4s, v6.s[0]\n"
- "fmla v25.4s, v9.4s, v6.s[1]\n"
- "fmla v26.4s, v9.4s, v6.s[2]\n"
- "fmla v27.4s, v9.4s, v6.s[3]\n"
- "ldr q6, [x9, #0x0]\n"
- "fmla v28.4s, v9.4s, v5.s[0]\n"
- "fmla v29.4s, v9.4s, v5.s[1]\n"
- "fmla v30.4s, v9.4s, v5.s[2]\n"
- "fmla v31.4s, v9.4s, v5.s[3]\n"
- "ldr q5, [x9, #0x10]\n"
- "ldr q9, [%x[weights], #0x0]\n"
- "ldp x23, x9, [x20], #0x10\n"
- "fmla v16.4s, v4.4s, v3.s[0]\n"
- "fmla v17.4s, v4.4s, v3.s[1]\n"
- "fmla v18.4s, v4.4s, v3.s[2]\n"
- "fmla v19.4s, v4.4s, v3.s[3]\n"
- "ldr q3, [x23, #0x0]\n"
- "fmla v20.4s, v4.4s, v2.s[0]\n"
- "fmla v21.4s, v4.4s, v2.s[1]\n"
- "fmla v22.4s, v4.4s, v2.s[2]\n"
- "fmla v23.4s, v4.4s, v2.s[3]\n"
- "ldr q2, [x23, #0x10]\n"
- "fmla v24.4s, v4.4s, v1.s[0]\n"
- "fmla v25.4s, v4.4s, v1.s[1]\n"
- "fmla v26.4s, v4.4s, v1.s[2]\n"
- "fmla v27.4s, v4.4s, v1.s[3]\n"
- "ldr q1, [x9, #0x0]\n"
- "fmla v28.4s, v4.4s, v0.s[0]\n"
- "fmla v29.4s, v4.4s, v0.s[1]\n"
- "fmla v30.4s, v4.4s, v0.s[2]\n"
- "fmla v31.4s, v4.4s, v0.s[3]\n"
- "ldr q0, [x9, #0x10]\n"
- "ldr q4, [%x[weights], #0x10]\n"
+ "fmla v9.4s, v8.4s, v5.s[0]\n"
+ "ldp x24, x28, [x19], #0x10\n"
+ "subs x20, x20, #0x1\n"
+ "fmla v7.4s, v8.4s, v5.s[1]\n"
+ "fmla v6.4s, v8.4s, v5.s[2]\n"
+ "fmla v4.4s, v8.4s, v5.s[3]\n"
+ "ldr q5, [x24, #0x0]\n"
+ "fmla v3.4s, v8.4s, v2.s[0]\n"
+ "fmla v1.4s, v8.4s, v2.s[1]\n"
+ "fmla v31.4s, v8.4s, v2.s[2]\n"
+ "fmla v29.4s, v8.4s, v2.s[3]\n"
+ "ldr q2, [x24, #0x10]\n"
+ "fmla v28.4s, v8.4s, v0.s[0]\n"
+ "fmla v27.4s, v8.4s, v0.s[1]\n"
+ "fmla v26.4s, v8.4s, v0.s[2]\n"
+ "fmla v25.4s, v8.4s, v0.s[3]\n"
+ "ldr q0, [x28, #0x0]\n"
+ "fmla v24.4s, v8.4s, v30.s[0]\n"
+ "fmla v23.4s, v8.4s, v30.s[1]\n"
+ "fmla v22.4s, v8.4s, v30.s[2]\n"
+ "fmla v21.4s, v8.4s, v30.s[3]\n"
+ "ldr q30, [x28, #0x10]\n"
+ "fmla v9.4s, v20.4s, v19.s[0]\n"
+ "ldr q8, [%x[weights], #0x0]\n"
+ "fmla v7.4s, v20.4s, v19.s[1]\n"
+ "ldp x24, x28, [x19], #0x10\n"
+ "fmla v6.4s, v20.4s, v19.s[2]\n"
+ "fmla v4.4s, v20.4s, v19.s[3]\n"
+ "ldr q19, [x24, #0x0]\n"
+ "fmla v3.4s, v20.4s, v18.s[0]\n"
+ "fmla v1.4s, v20.4s, v18.s[1]\n"
+ "fmla v31.4s, v20.4s, v18.s[2]\n"
+ "fmla v29.4s, v20.4s, v18.s[3]\n"
+ "ldr q18, [x24, #0x10]\n"
+ "fmla v28.4s, v20.4s, v17.s[0]\n"
+ "fmla v27.4s, v20.4s, v17.s[1]\n"
+ "fmla v26.4s, v20.4s, v17.s[2]\n"
+ "fmla v25.4s, v20.4s, v17.s[3]\n"
+ "ldr q17, [x28, #0x0]\n"
+ "fmla v24.4s, v20.4s, v16.s[0]\n"
+ "fmla v23.4s, v20.4s, v16.s[1]\n"
+ "fmla v22.4s, v20.4s, v16.s[2]\n"
+ "fmla v21.4s, v20.4s, v16.s[3]\n"
+ "ldr q16, [x28, #0x10]\n"
+ "ldr q20, [%x[weights], #0x10]\n"
"add %x[weights], %x[weights], #0x20\n"
"bgt 12b\n"
"13:" // Output channel oddments: Kernel loop tail
"tbnz %x[kernel_points], #0, 14f\n"
- "fmla v16.4s, v9.4s, v8.s[0]\n"
- "fmla v17.4s, v9.4s, v8.s[1]\n"
- "fmla v18.4s, v9.4s, v8.s[2]\n"
- "fmla v19.4s, v9.4s, v8.s[3]\n"
- "fmla v20.4s, v9.4s, v7.s[0]\n"
- "fmla v21.4s, v9.4s, v7.s[1]\n"
- "fmla v22.4s, v9.4s, v7.s[2]\n"
- "fmla v23.4s, v9.4s, v7.s[3]\n"
- "fmla v24.4s, v9.4s, v6.s[0]\n"
- "fmla v25.4s, v9.4s, v6.s[1]\n"
- "fmla v26.4s, v9.4s, v6.s[2]\n"
- "fmla v27.4s, v9.4s, v6.s[3]\n"
- "fmla v28.4s, v9.4s, v5.s[0]\n"
- "fmla v29.4s, v9.4s, v5.s[1]\n"
- "fmla v30.4s, v9.4s, v5.s[2]\n"
- "fmla v31.4s, v9.4s, v5.s[3]\n"
- "fmla v16.4s, v4.4s, v3.s[0]\n"
- "fmla v17.4s, v4.4s, v3.s[1]\n"
- "fmla v18.4s, v4.4s, v3.s[2]\n"
- "fmla v19.4s, v4.4s, v3.s[3]\n"
- "fmla v20.4s, v4.4s, v2.s[0]\n"
- "fmla v21.4s, v4.4s, v2.s[1]\n"
- "fmla v22.4s, v4.4s, v2.s[2]\n"
- "fmla v23.4s, v4.4s, v2.s[3]\n"
- "fmla v24.4s, v4.4s, v1.s[0]\n"
- "fmla v25.4s, v4.4s, v1.s[1]\n"
- "fmla v26.4s, v4.4s, v1.s[2]\n"
- "fmla v27.4s, v4.4s, v1.s[3]\n"
- "fmla v28.4s, v4.4s, v0.s[0]\n"
- "fmla v29.4s, v4.4s, v0.s[1]\n"
- "fmla v30.4s, v4.4s, v0.s[2]\n"
- "fmla v31.4s, v4.4s, v0.s[3]\n"
+ "fmla v9.4s, v8.4s, v5.s[0]\n"
+ "fmla v7.4s, v8.4s, v5.s[1]\n"
+ "fmla v6.4s, v8.4s, v5.s[2]\n"
+ "fmla v4.4s, v8.4s, v5.s[3]\n"
+ "fmla v3.4s, v8.4s, v2.s[0]\n"
+ "fmla v1.4s, v8.4s, v2.s[1]\n"
+ "fmla v31.4s, v8.4s, v2.s[2]\n"
+ "fmla v29.4s, v8.4s, v2.s[3]\n"
+ "fmla v28.4s, v8.4s, v0.s[0]\n"
+ "fmla v27.4s, v8.4s, v0.s[1]\n"
+ "fmla v26.4s, v8.4s, v0.s[2]\n"
+ "fmla v25.4s, v8.4s, v0.s[3]\n"
+ "fmla v24.4s, v8.4s, v30.s[0]\n"
+ "fmla v23.4s, v8.4s, v30.s[1]\n"
+ "fmla v22.4s, v8.4s, v30.s[2]\n"
+ "fmla v21.4s, v8.4s, v30.s[3]\n"
+ "fmla v9.4s, v20.4s, v19.s[0]\n"
+ "fmla v7.4s, v20.4s, v19.s[1]\n"
+ "fmla v6.4s, v20.4s, v19.s[2]\n"
+ "fmla v4.4s, v20.4s, v19.s[3]\n"
+ "fmla v3.4s, v20.4s, v18.s[0]\n"
+ "fmla v1.4s, v20.4s, v18.s[1]\n"
+ "fmla v31.4s, v20.4s, v18.s[2]\n"
+ "fmla v29.4s, v20.4s, v18.s[3]\n"
+ "fmla v28.4s, v20.4s, v17.s[0]\n"
+ "fmla v27.4s, v20.4s, v17.s[1]\n"
+ "fmla v26.4s, v20.4s, v17.s[2]\n"
+ "fmla v25.4s, v20.4s, v17.s[3]\n"
+ "fmla v24.4s, v20.4s, v16.s[0]\n"
+ "fmla v23.4s, v20.4s, v16.s[1]\n"
+ "fmla v22.4s, v20.4s, v16.s[2]\n"
+ "fmla v21.4s, v20.4s, v16.s[3]\n"
"b 16f\n"
"14:" // Output channel oddments: Odd tail
- "fmla v16.4s, v9.4s, v8.s[0]\n"
- "fmla v17.4s, v9.4s, v8.s[1]\n"
- "ldp x23, x9, [x20], #0x10\n"
- "fmla v18.4s, v9.4s, v8.s[2]\n"
- "fmla v19.4s, v9.4s, v8.s[3]\n"
- "ldr q8, [x23, #0x0]\n"
- "fmla v20.4s, v9.4s, v7.s[0]\n"
- "fmla v21.4s, v9.4s, v7.s[1]\n"
- "fmla v22.4s, v9.4s, v7.s[2]\n"
- "fmla v23.4s, v9.4s, v7.s[3]\n"
- "ldr q7, [x23, #0x10]\n"
- "fmla v24.4s, v9.4s, v6.s[0]\n"
- "fmla v25.4s, v9.4s, v6.s[1]\n"
- "fmla v26.4s, v9.4s, v6.s[2]\n"
- "fmla v27.4s, v9.4s, v6.s[3]\n"
- "ldr q6, [x9, #0x0]\n"
- "fmla v28.4s, v9.4s, v5.s[0]\n"
- "fmla v29.4s, v9.4s, v5.s[1]\n"
- "fmla v30.4s, v9.4s, v5.s[2]\n"
- "fmla v31.4s, v9.4s, v5.s[3]\n"
- "ldr q5, [x9, #0x10]\n"
- "ldr q9, [%x[weights], #0x0]\n"
- "fmla v16.4s, v4.4s, v3.s[0]\n"
- "fmla v17.4s, v4.4s, v3.s[1]\n"
+ "fmla v9.4s, v8.4s, v5.s[0]\n"
+ "ldp x24, x28, [x19], #0x10\n"
+ "fmla v7.4s, v8.4s, v5.s[1]\n"
+ "fmla v6.4s, v8.4s, v5.s[2]\n"
+ "fmla v4.4s, v8.4s, v5.s[3]\n"
+ "ldr q5, [x24, #0x0]\n"
+ "fmla v3.4s, v8.4s, v2.s[0]\n"
+ "fmla v1.4s, v8.4s, v2.s[1]\n"
+ "fmla v31.4s, v8.4s, v2.s[2]\n"
+ "fmla v29.4s, v8.4s, v2.s[3]\n"
+ "ldr q2, [x24, #0x10]\n"
+ "fmla v28.4s, v8.4s, v0.s[0]\n"
+ "fmla v27.4s, v8.4s, v0.s[1]\n"
+ "fmla v26.4s, v8.4s, v0.s[2]\n"
+ "fmla v25.4s, v8.4s, v0.s[3]\n"
+ "ldr q0, [x28, #0x0]\n"
+ "fmla v24.4s, v8.4s, v30.s[0]\n"
+ "fmla v23.4s, v8.4s, v30.s[1]\n"
+ "fmla v22.4s, v8.4s, v30.s[2]\n"
+ "fmla v21.4s, v8.4s, v30.s[3]\n"
+ "ldr q30, [x28, #0x10]\n"
+ "fmla v9.4s, v20.4s, v19.s[0]\n"
+ "ldr q8, [%x[weights], #0x0]\n"
"add %x[weights], %x[weights], #0x10\n"
- "fmla v18.4s, v4.4s, v3.s[2]\n"
- "fmla v19.4s, v4.4s, v3.s[3]\n"
- "fmla v20.4s, v4.4s, v2.s[0]\n"
- "fmla v21.4s, v4.4s, v2.s[1]\n"
- "fmla v22.4s, v4.4s, v2.s[2]\n"
- "fmla v23.4s, v4.4s, v2.s[3]\n"
- "fmla v24.4s, v4.4s, v1.s[0]\n"
- "fmla v25.4s, v4.4s, v1.s[1]\n"
- "fmla v26.4s, v4.4s, v1.s[2]\n"
- "fmla v27.4s, v4.4s, v1.s[3]\n"
- "fmla v28.4s, v4.4s, v0.s[0]\n"
- "fmla v29.4s, v4.4s, v0.s[1]\n"
- "fmla v30.4s, v4.4s, v0.s[2]\n"
- "fmla v31.4s, v4.4s, v0.s[3]\n"
- "fmla v16.4s, v9.4s, v8.s[0]\n"
- "fmla v17.4s, v9.4s, v8.s[1]\n"
- "fmla v18.4s, v9.4s, v8.s[2]\n"
- "fmla v19.4s, v9.4s, v8.s[3]\n"
- "fmla v20.4s, v9.4s, v7.s[0]\n"
- "fmla v21.4s, v9.4s, v7.s[1]\n"
- "fmla v22.4s, v9.4s, v7.s[2]\n"
- "fmla v23.4s, v9.4s, v7.s[3]\n"
- "fmla v24.4s, v9.4s, v6.s[0]\n"
- "fmla v25.4s, v9.4s, v6.s[1]\n"
- "fmla v26.4s, v9.4s, v6.s[2]\n"
- "fmla v27.4s, v9.4s, v6.s[3]\n"
- "fmla v28.4s, v9.4s, v5.s[0]\n"
- "fmla v29.4s, v9.4s, v5.s[1]\n"
- "fmla v30.4s, v9.4s, v5.s[2]\n"
- "fmla v31.4s, v9.4s, v5.s[3]\n"
+ "fmla v7.4s, v20.4s, v19.s[1]\n"
+ "fmla v6.4s, v20.4s, v19.s[2]\n"
+ "fmla v4.4s, v20.4s, v19.s[3]\n"
+ "fmla v3.4s, v20.4s, v18.s[0]\n"
+ "fmla v1.4s, v20.4s, v18.s[1]\n"
+ "fmla v31.4s, v20.4s, v18.s[2]\n"
+ "fmla v29.4s, v20.4s, v18.s[3]\n"
+ "fmla v28.4s, v20.4s, v17.s[0]\n"
+ "fmla v27.4s, v20.4s, v17.s[1]\n"
+ "fmla v26.4s, v20.4s, v17.s[2]\n"
+ "fmla v25.4s, v20.4s, v17.s[3]\n"
+ "fmla v24.4s, v20.4s, v16.s[0]\n"
+ "fmla v23.4s, v20.4s, v16.s[1]\n"
+ "fmla v22.4s, v20.4s, v16.s[2]\n"
+ "fmla v21.4s, v20.4s, v16.s[3]\n"
+ "fmla v9.4s, v8.4s, v5.s[0]\n"
+ "fmla v7.4s, v8.4s, v5.s[1]\n"
+ "fmla v6.4s, v8.4s, v5.s[2]\n"
+ "fmla v4.4s, v8.4s, v5.s[3]\n"
+ "fmla v3.4s, v8.4s, v2.s[0]\n"
+ "fmla v1.4s, v8.4s, v2.s[1]\n"
+ "fmla v31.4s, v8.4s, v2.s[2]\n"
+ "fmla v29.4s, v8.4s, v2.s[3]\n"
+ "fmla v28.4s, v8.4s, v0.s[0]\n"
+ "fmla v27.4s, v8.4s, v0.s[1]\n"
+ "fmla v26.4s, v8.4s, v0.s[2]\n"
+ "fmla v25.4s, v8.4s, v0.s[3]\n"
+ "fmla v24.4s, v8.4s, v30.s[0]\n"
+ "fmla v23.4s, v8.4s, v30.s[1]\n"
+ "fmla v22.4s, v8.4s, v30.s[2]\n"
+ "fmla v21.4s, v8.4s, v30.s[3]\n"
"b 16f\n"
"15:" // Output channel oddments: Single kernel point
- "fmla v16.4s, v9.4s, v8.s[0]\n"
- "fmla v17.4s, v9.4s, v8.s[1]\n"
- "fmla v18.4s, v9.4s, v8.s[2]\n"
- "fmla v19.4s, v9.4s, v8.s[3]\n"
- "fmla v20.4s, v9.4s, v7.s[0]\n"
- "fmla v21.4s, v9.4s, v7.s[1]\n"
- "fmla v22.4s, v9.4s, v7.s[2]\n"
- "fmla v23.4s, v9.4s, v7.s[3]\n"
- "fmla v24.4s, v9.4s, v6.s[0]\n"
- "fmla v25.4s, v9.4s, v6.s[1]\n"
- "fmla v26.4s, v9.4s, v6.s[2]\n"
- "fmla v27.4s, v9.4s, v6.s[3]\n"
- "fmla v28.4s, v9.4s, v5.s[0]\n"
- "fmla v29.4s, v9.4s, v5.s[1]\n"
- "fmla v30.4s, v9.4s, v5.s[2]\n"
- "fmla v31.4s, v9.4s, v5.s[3]\n"
+ "fmla v9.4s, v8.4s, v5.s[0]\n"
+ "fmla v7.4s, v8.4s, v5.s[1]\n"
+ "fmla v6.4s, v8.4s, v5.s[2]\n"
+ "fmla v4.4s, v8.4s, v5.s[3]\n"
+ "fmla v3.4s, v8.4s, v2.s[0]\n"
+ "fmla v1.4s, v8.4s, v2.s[1]\n"
+ "fmla v31.4s, v8.4s, v2.s[2]\n"
+ "fmla v29.4s, v8.4s, v2.s[3]\n"
+ "fmla v28.4s, v8.4s, v0.s[0]\n"
+ "fmla v27.4s, v8.4s, v0.s[1]\n"
+ "fmla v26.4s, v8.4s, v0.s[2]\n"
+ "fmla v25.4s, v8.4s, v0.s[3]\n"
+ "fmla v24.4s, v8.4s, v30.s[0]\n"
+ "fmla v23.4s, v8.4s, v30.s[1]\n"
+ "fmla v22.4s, v8.4s, v30.s[2]\n"
+ "fmla v21.4s, v8.4s, v30.s[3]\n"
"16:" // Output channel oddments: Done
- "fmin v16.4s, v16.4s, v10.4s\n"
- "fmin v17.4s, v17.4s, v10.4s\n"
- "fmin v18.4s, v18.4s, v10.4s\n"
- "fmin v19.4s, v19.4s, v10.4s\n"
- "fmin v20.4s, v20.4s, v10.4s\n"
- "fmin v21.4s, v21.4s, v10.4s\n"
- "fmin v22.4s, v22.4s, v10.4s\n"
- "fmin v23.4s, v23.4s, v10.4s\n"
- "fmin v24.4s, v24.4s, v10.4s\n"
- "fmin v25.4s, v25.4s, v10.4s\n"
- "fmin v26.4s, v26.4s, v10.4s\n"
- "fmin v27.4s, v27.4s, v10.4s\n"
- "fmin v28.4s, v28.4s, v10.4s\n"
- "fmin v29.4s, v29.4s, v10.4s\n"
- "fmin v30.4s, v30.4s, v10.4s\n"
+ "fmin v9.4s, v9.4s, v10.4s\n"
+ "fmin v7.4s, v7.4s, v10.4s\n"
+ "fmin v6.4s, v6.4s, v10.4s\n"
+ "fmin v4.4s, v4.4s, v10.4s\n"
+ "fmax v9.4s, v9.4s, v11.4s\n"
+ "fmax v7.4s, v7.4s, v11.4s\n"
+ "fmax v6.4s, v6.4s, v11.4s\n"
+ "fmax v4.4s, v4.4s, v11.4s\n"
+ "fmin v3.4s, v3.4s, v10.4s\n"
+ "fmin v1.4s, v1.4s, v10.4s\n"
"fmin v31.4s, v31.4s, v10.4s\n"
- "fmax v16.4s, v16.4s, v11.4s\n"
- "fmax v17.4s, v17.4s, v11.4s\n"
- "fmax v18.4s, v18.4s, v11.4s\n"
- "fmax v19.4s, v19.4s, v11.4s\n"
- "fmax v20.4s, v20.4s, v11.4s\n"
- "fmax v21.4s, v21.4s, v11.4s\n"
- "fmax v22.4s, v22.4s, v11.4s\n"
- "fmax v23.4s, v23.4s, v11.4s\n"
- "fmax v24.4s, v24.4s, v11.4s\n"
- "fmax v25.4s, v25.4s, v11.4s\n"
- "fmax v26.4s, v26.4s, v11.4s\n"
- "fmax v27.4s, v27.4s, v11.4s\n"
- "fmax v28.4s, v28.4s, v11.4s\n"
- "fmax v29.4s, v29.4s, v11.4s\n"
- "fmax v30.4s, v30.4s, v11.4s\n"
+ "fmax v3.4s, v3.4s, v11.4s\n"
+ "fmax v1.4s, v1.4s, v11.4s\n"
"fmax v31.4s, v31.4s, v11.4s\n"
+ "fmin v29.4s, v29.4s, v10.4s\n"
+ "fmin v28.4s, v28.4s, v10.4s\n"
+ "fmin v27.4s, v27.4s, v10.4s\n"
+ "fmax v29.4s, v29.4s, v11.4s\n"
+ "fmax v28.4s, v28.4s, v11.4s\n"
+ "fmax v27.4s, v27.4s, v11.4s\n"
+ "fmin v26.4s, v26.4s, v10.4s\n"
+ "fmin v25.4s, v25.4s, v10.4s\n"
+ "fmin v24.4s, v24.4s, v10.4s\n"
+ "fmax v26.4s, v26.4s, v11.4s\n"
+ "fmax v25.4s, v25.4s, v11.4s\n"
+ "fmax v24.4s, v24.4s, v11.4s\n"
+ "fmin v23.4s, v23.4s, v10.4s\n"
+ "fmin v22.4s, v22.4s, v10.4s\n"
+ "fmin v21.4s, v21.4s, v10.4s\n"
+ "fmax v23.4s, v23.4s, v11.4s\n"
+ "fmax v22.4s, v22.4s, v11.4s\n"
+ "fmax v21.4s, v21.4s, v11.4s\n"
"tbz %x[n_output_channels], #1, 17f\n"
- "ldr x20, [%x[outptrs], #0x0]\n"
- "ldr x21, [%x[outptrs], #0x8]\n"
+ "ldr x19, [%x[outptrs], #0x0]\n"
+ "ldr x20, [%x[outptrs], #0x8]\n"
+ "add x19, x19, x10, LSL #2\n"
+ "ldr x21, [%x[outptrs], #0x10]\n"
+ "ldr x22, [%x[outptrs], #0x18]\n"
"add x20, x20, x10, LSL #2\n"
+ "st1 { v9.d }[0], [x19]\n"
"add x21, x21, x10, LSL #2\n"
- "ldr x22, [%x[outptrs], #0x10]\n"
- "ldr x23, [%x[outptrs], #0x18]\n"
+ "st1 { v7.d }[0], [x20]\n"
+ "ldr x23, [%x[outptrs], #0x20]\n"
"add x22, x22, x10, LSL #2\n"
+ "st1 { v6.d }[0], [x21]\n"
"add x23, x23, x10, LSL #2\n"
- "ldr x24, [%x[outptrs], #0x20]\n"
- "ldr x25, [%x[outptrs], #0x28]\n"
+ "st1 { v4.d }[0], [x22]\n"
+ "ldr x24, [%x[outptrs], #0x28]\n"
"add x24, x24, x10, LSL #2\n"
+ "st1 { v3.d }[0], [x23]\n"
+ "ldr x25, [%x[outptrs], #0x30]\n"
"add x25, x25, x10, LSL #2\n"
- "ldr x26, [%x[outptrs], #0x30]\n"
- "ldr x27, [%x[outptrs], #0x38]\n"
+ "st1 { v1.d }[0], [x24]\n"
+ "ldr x26, [%x[outptrs], #0x38]\n"
"add x26, x26, x10, LSL #2\n"
- "add x27, x27, x10, LSL #2\n"
- "st1 { v16.d }[0], [x20]\n"
- "ldr x20, [%x[outptrs], #0x40]\n"
+ "st1 { v31.d }[0], [x25]\n"
+ "ldr x19, [%x[outptrs], #0x40]\n"
+ "add x19, x19, x10, LSL #2\n"
+ "st1 { v29.d }[0], [x26]\n"
+ "ldr x20, [%x[outptrs], #0x48]\n"
"add x20, x20, x10, LSL #2\n"
- "st1 { v17.d }[0], [x21]\n"
- "ldr x21, [%x[outptrs], #0x48]\n"
+ "st1 { v28.d }[0], [x19]\n"
+ "ldr x21, [%x[outptrs], #0x50]\n"
"add x21, x21, x10, LSL #2\n"
- "st1 { v18.d }[0], [x22]\n"
- "ldr x22, [%x[outptrs], #0x50]\n"
+ "st1 { v27.d }[0], [x20]\n"
+ "ldr x22, [%x[outptrs], #0x58]\n"
"add x22, x22, x10, LSL #2\n"
- "st1 { v19.d }[0], [x23]\n"
- "ldr x23, [%x[outptrs], #0x58]\n"
+ "st1 { v26.d }[0], [x21]\n"
+ "ldr x23, [%x[outptrs], #0x60]\n"
"add x23, x23, x10, LSL #2\n"
- "st1 { v20.d }[0], [x24]\n"
- "ldr x24, [%x[outptrs], #0x60]\n"
+ "st1 { v25.d }[0], [x22]\n"
+ "ldr x24, [%x[outptrs], #0x68]\n"
"add x24, x24, x10, LSL #2\n"
- "st1 { v21.d }[0], [x25]\n"
- "ldr x25, [%x[outptrs], #0x68]\n"
+ "st1 { v24.d }[0], [x23]\n"
+ "ldr x25, [%x[outptrs], #0x70]\n"
"add x25, x25, x10, LSL #2\n"
- "st1 { v22.d }[0], [x26]\n"
- "ldr x26, [%x[outptrs], #0x70]\n"
+ "st1 { v23.d }[0], [x24]\n"
+ "ldr x26, [%x[outptrs], #0x78]\n"
"add x26, x26, x10, LSL #2\n"
- "st1 { v23.d }[0], [x27]\n"
- "ldr x27, [%x[outptrs], #0x78]\n"
- "add x27, x27, x10, LSL #2\n"
+ "st1 { v22.d }[0], [x25]\n"
"add x10, x10, #0x2\n"
- "st1 { v24.d }[0], [x20]\n"
- "st1 { v25.d }[0], [x21]\n"
- "st1 { v26.d }[0], [x22]\n"
- "st1 { v27.d }[0], [x23]\n"
- "st1 { v28.d }[0], [x24]\n"
- "st1 { v29.d }[0], [x25]\n"
- "st1 { v30.d }[0], [x26]\n"
- "st1 { v31.d }[0], [x27]\n"
+ "st1 { v21.d }[0], [x26]\n"
"tbz %x[n_output_channels], #0, 18f\n"
- "ldr x20, [%x[outptrs], #0x0]\n"
- "ldr x21, [%x[outptrs], #0x8]\n"
+ "ldr x19, [%x[outptrs], #0x0]\n"
+ "ldr x20, [%x[outptrs], #0x8]\n"
+ "add x19, x19, x10, LSL #2\n"
+ "ldr x21, [%x[outptrs], #0x10]\n"
+ "ldr x22, [%x[outptrs], #0x18]\n"
"add x20, x20, x10, LSL #2\n"
+ "st1 { v9.s }[2], [x19]\n"
"add x21, x21, x10, LSL #2\n"
- "ldr x22, [%x[outptrs], #0x10]\n"
- "ldr x23, [%x[outptrs], #0x18]\n"
+ "st1 { v7.s }[2], [x20]\n"
+ "ldr x23, [%x[outptrs], #0x20]\n"
"add x22, x22, x10, LSL #2\n"
+ "st1 { v6.s }[2], [x21]\n"
"add x23, x23, x10, LSL #2\n"
- "ldr x24, [%x[outptrs], #0x20]\n"
- "ldr x25, [%x[outptrs], #0x28]\n"
+ "st1 { v4.s }[2], [x22]\n"
+ "ldr x24, [%x[outptrs], #0x28]\n"
"add x24, x24, x10, LSL #2\n"
+ "st1 { v3.s }[2], [x23]\n"
+ "ldr x25, [%x[outptrs], #0x30]\n"
"add x25, x25, x10, LSL #2\n"
- "ldr x26, [%x[outptrs], #0x30]\n"
- "ldr x27, [%x[outptrs], #0x38]\n"
+ "st1 { v1.s }[2], [x24]\n"
+ "ldr x26, [%x[outptrs], #0x38]\n"
"add x26, x26, x10, LSL #2\n"
- "add x27, x27, x10, LSL #2\n"
- "st1 { v16.s }[2], [x20]\n"
- "ldr x20, [%x[outptrs], #0x40]\n"
+ "st1 { v31.s }[2], [x25]\n"
+ "ldr x19, [%x[outptrs], #0x40]\n"
+ "add x19, x19, x10, LSL #2\n"
+ "st1 { v29.s }[2], [x26]\n"
+ "ldr x20, [%x[outptrs], #0x48]\n"
"add x20, x20, x10, LSL #2\n"
- "st1 { v17.s }[2], [x21]\n"
- "ldr x21, [%x[outptrs], #0x48]\n"
+ "st1 { v28.s }[2], [x19]\n"
+ "ldr x21, [%x[outptrs], #0x50]\n"
"add x21, x21, x10, LSL #2\n"
- "st1 { v18.s }[2], [x22]\n"
- "ldr x22, [%x[outptrs], #0x50]\n"
+ "st1 { v27.s }[2], [x20]\n"
+ "ldr x22, [%x[outptrs], #0x58]\n"
"add x22, x22, x10, LSL #2\n"
- "st1 { v19.s }[2], [x23]\n"
- "ldr x23, [%x[outptrs], #0x58]\n"
+ "st1 { v26.s }[2], [x21]\n"
+ "ldr x23, [%x[outptrs], #0x60]\n"
"add x23, x23, x10, LSL #2\n"
- "st1 { v20.s }[2], [x24]\n"
- "ldr x24, [%x[outptrs], #0x60]\n"
+ "st1 { v25.s }[2], [x22]\n"
+ "ldr x24, [%x[outptrs], #0x68]\n"
"add x24, x24, x10, LSL #2\n"
- "st1 { v21.s }[2], [x25]\n"
- "ldr x25, [%x[outptrs], #0x68]\n"
+ "st1 { v24.s }[2], [x23]\n"
+ "ldr x25, [%x[outptrs], #0x70]\n"
"add x25, x25, x10, LSL #2\n"
- "st1 { v22.s }[2], [x26]\n"
- "ldr x26, [%x[outptrs], #0x70]\n"
+ "st1 { v23.s }[2], [x24]\n"
+ "ldr x26, [%x[outptrs], #0x78]\n"
"add x26, x26, x10, LSL #2\n"
- "st1 { v23.s }[2], [x27]\n"
- "ldr x27, [%x[outptrs], #0x78]\n"
- "add x27, x27, x10, LSL #2\n"
- "st1 { v24.s }[2], [x20]\n"
- "st1 { v25.s }[2], [x21]\n"
- "st1 { v26.s }[2], [x22]\n"
- "st1 { v27.s }[2], [x23]\n"
- "st1 { v28.s }[2], [x24]\n"
- "st1 { v29.s }[2], [x25]\n"
- "st1 { v30.s }[2], [x26]\n"
- "st1 { v31.s }[2], [x27]\n"
+ "st1 { v22.s }[2], [x25]\n"
+ "st1 { v21.s }[2], [x26]\n"
"b 18f\n"
"17:" // Output channel oddments: Done: Store: Bit 1: Unset
- "ldr x20, [%x[outptrs], #0x0]\n"
- "ldr x21, [%x[outptrs], #0x8]\n"
+ "tbz %x[n_output_channels], #0, 18f\n"
+ "ldr x19, [%x[outptrs], #0x0]\n"
+ "ldr x20, [%x[outptrs], #0x8]\n"
+ "add x19, x19, x10, LSL #2\n"
+ "ldr x21, [%x[outptrs], #0x10]\n"
+ "ldr x22, [%x[outptrs], #0x18]\n"
"add x20, x20, x10, LSL #2\n"
+ "st1 { v9.s }[0], [x19]\n"
"add x21, x21, x10, LSL #2\n"
- "ldr x22, [%x[outptrs], #0x10]\n"
- "ldr x23, [%x[outptrs], #0x18]\n"
+ "st1 { v7.s }[0], [x20]\n"
+ "ldr x23, [%x[outptrs], #0x20]\n"
"add x22, x22, x10, LSL #2\n"
+ "st1 { v6.s }[0], [x21]\n"
"add x23, x23, x10, LSL #2\n"
- "ldr x24, [%x[outptrs], #0x20]\n"
- "ldr x25, [%x[outptrs], #0x28]\n"
+ "st1 { v4.s }[0], [x22]\n"
+ "ldr x24, [%x[outptrs], #0x28]\n"
"add x24, x24, x10, LSL #2\n"
+ "st1 { v3.s }[0], [x23]\n"
+ "ldr x25, [%x[outptrs], #0x30]\n"
"add x25, x25, x10, LSL #2\n"
- "ldr x26, [%x[outptrs], #0x30]\n"
- "ldr x27, [%x[outptrs], #0x38]\n"
+ "st1 { v1.s }[0], [x24]\n"
+ "ldr x26, [%x[outptrs], #0x38]\n"
"add x26, x26, x10, LSL #2\n"
- "add x27, x27, x10, LSL #2\n"
- "st1 { v16.s }[0], [x20]\n"
- "ldr x20, [%x[outptrs], #0x40]\n"
+ "st1 { v31.s }[0], [x25]\n"
+ "ldr x19, [%x[outptrs], #0x40]\n"
+ "add x19, x19, x10, LSL #2\n"
+ "st1 { v29.s }[0], [x26]\n"
+ "ldr x20, [%x[outptrs], #0x48]\n"
"add x20, x20, x10, LSL #2\n"
- "st1 { v17.s }[0], [x21]\n"
- "ldr x21, [%x[outptrs], #0x48]\n"
+ "st1 { v28.s }[0], [x19]\n"
+ "ldr x21, [%x[outptrs], #0x50]\n"
"add x21, x21, x10, LSL #2\n"
- "st1 { v18.s }[0], [x22]\n"
- "ldr x22, [%x[outptrs], #0x50]\n"
+ "st1 { v27.s }[0], [x20]\n"
+ "ldr x22, [%x[outptrs], #0x58]\n"
"add x22, x22, x10, LSL #2\n"
- "st1 { v19.s }[0], [x23]\n"
- "ldr x23, [%x[outptrs], #0x58]\n"
+ "st1 { v26.s }[0], [x21]\n"
+ "ldr x23, [%x[outptrs], #0x60]\n"
"add x23, x23, x10, LSL #2\n"
- "st1 { v20.s }[0], [x24]\n"
- "ldr x24, [%x[outptrs], #0x60]\n"
+ "st1 { v25.s }[0], [x22]\n"
+ "ldr x24, [%x[outptrs], #0x68]\n"
"add x24, x24, x10, LSL #2\n"
- "st1 { v21.s }[0], [x25]\n"
- "ldr x25, [%x[outptrs], #0x68]\n"
+ "st1 { v24.s }[0], [x23]\n"
+ "ldr x25, [%x[outptrs], #0x70]\n"
"add x25, x25, x10, LSL #2\n"
- "st1 { v22.s }[0], [x26]\n"
- "ldr x26, [%x[outptrs], #0x70]\n"
+ "st1 { v23.s }[0], [x24]\n"
+ "ldr x26, [%x[outptrs], #0x78]\n"
"add x26, x26, x10, LSL #2\n"
- "st1 { v23.s }[0], [x27]\n"
- "ldr x27, [%x[outptrs], #0x78]\n"
- "add x27, x27, x10, LSL #2\n"
- "st1 { v24.s }[0], [x20]\n"
- "st1 { v25.s }[0], [x21]\n"
- "st1 { v26.s }[0], [x22]\n"
- "st1 { v27.s }[0], [x23]\n"
- "st1 { v28.s }[0], [x24]\n"
- "st1 { v29.s }[0], [x25]\n"
- "st1 { v30.s }[0], [x26]\n"
- "st1 { v31.s }[0], [x27]\n"
+ "st1 { v22.s }[0], [x25]\n"
+ "st1 { v21.s }[0], [x26]\n"
"18:" // Output channel oddments: Done: Store: Bit 1: End
"19:" // Done
: [weights] "+&r" (weights)
: [bias] "r" (bias), [inptrs] "r" (inptrs), [kernel_points] "r" ((uint64_t) kernel_points), [minmax_vals] "r" (minmax_vals), [n_output_channels] "r" ((uint64_t) n_output_channels), [outptrs] "r" (outptrs)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_nhwc_3x3_s1_output2x2_dot_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_nhwc_3x3_s1_output2x2_dot_depthfirst/generic.cpp
index fda88f94bb..f8245fc5d9 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_nhwc_3x3_s1_output2x2_dot_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_nhwc_3x3_s1_output2x2_dot_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -41,1622 +41,1282 @@ void a64_s8q_nhwc_3x3_s1_output2x2_dot_depthfirst_impl(
)
{
__asm__ __volatile__(
- "mov x20, #0x1\n"
- "orr x20, x20, #0x100\n"
- "ldp x15, x14, [%x[inptrs], #0x0]\n"
- "ldp x13, x12, [%x[inptrs], #0x10]\n"
- "orr x20, x20, #0x10000\n"
- "lsr x11, %x[n_channels], #0x4\n"
- "dup v14.4s, w20\n"
- "ldp x10, x9, [%x[inptrs], #0x20]\n"
- "add x20, %x[qp], %[offsetof_Requantize32_minval]\n"
- "ld1r { v13.4s }, [x20]\n"
- "add x20, %x[qp], %[offsetof_Requantize32_maxval]\n"
- "ld1r { v12.4s }, [x20]\n"
- "add x20, %x[qp], %[offsetof_Requantize32_b_offset]\n"
- "ld1r { v11.4s }, [x20]\n"
- "add x20, %x[qp], %[offsetof_Requantize32_c_offset]\n"
- "ld1r { v10.4s }, [x20]\n"
- "mov x28, #0x0\n"
- "mov x27, #0x0\n"
- "ldp x26, x25, [%x[inptrs], #0x30]\n"
- "ldp x24, x23, [%x[outptrs], #0x0]\n"
+ "ldp x13, x12, [%x[inptrs], #0x0]\n"
+ "add SP, SP, #-0x80\n"
+ "ldp x11, x10, [%x[inptrs], #0x10]\n"
+ "mov x19, #0x1\n"
+ "ldp x9, x28, [%x[inptrs], #0x20]\n"
+ "orr x19, x19, #0x100\n"
+ "ldp x27, x26, [%x[inptrs], #0x30]\n"
+ "orr x19, x19, #0x10000\n"
+ "dup v11.4s, w19\n"
+ "ldp x25, x24, [%x[outptrs], #0x0]\n"
+ "mov x23, #0x0\n"
"ldp x22, x21, [%x[outptrs], #0x10]\n"
- "cbz x11, 3f\n"
- "ldr q9, [x15, x28]\n"
- "ldr q8, [x14, x28]\n"
- "subs x11, x11, #0x1\n"
- "ldr q7, [x13, x28]\n"
- "ldr q6, [x12, x28]\n"
- "zip2 v5.16b, v9.16b, v7.16b\n"
- "zip1 v9.16b, v9.16b, v7.16b\n"
- "ldr q4, [x10, x28]\n"
- "ldr q3, [x9, x28]\n"
- "zip1 v7.16b, v8.16b, v6.16b\n"
- "zip2 v6.16b, v8.16b, v6.16b\n"
- "ldr q2, [x26, x28]\n"
- "ldr q1, [x25, x28]\n"
- "zip2 v8.16b, v9.16b, v7.16b\n"
- "zip1 v9.16b, v9.16b, v7.16b\n"
- "ldr q0, [%x[params], #0x10]\n"
- "ldr q16, [%x[params], #0x20]\n"
- "zip1 v7.16b, v5.16b, v6.16b\n"
- "zip2 v6.16b, v5.16b, v6.16b\n"
- "ldr q5, [%x[params], #0x0]\n"
- "ldr q31, [%x[params], #0x30]\n"
- "zip2 v30.16b, v4.16b, v2.16b\n"
- "zip1 v4.16b, v4.16b, v2.16b\n"
- "ldp x15, x14, [%x[inptrs], #0x40]\n"
- "ldr q29, [x15, x28]\n"
- "zip1 v2.16b, v3.16b, v1.16b\n"
- "zip2 v1.16b, v3.16b, v1.16b\n"
- "ldr q28, [x14, x28]\n"
- "ldp x13, x12, [%x[inptrs], #0x50]\n"
- "zip2 v3.16b, v4.16b, v2.16b\n"
- "zip1 v4.16b, v4.16b, v2.16b\n"
- "ldr q27, [x13, x28]\n"
- "ldr q26, [x12, x28]\n"
- "zip2 v25.16b, v29.16b, v27.16b\n"
- "zip1 v29.16b, v29.16b, v27.16b\n"
- "ldp x10, x9, [%x[inptrs], #0x60]\n"
- "ldr q24, [x10, x28]\n"
- "zip1 v27.16b, v28.16b, v26.16b\n"
- "zip2 v26.16b, v28.16b, v26.16b\n"
- "ldr q23, [x9, x28]\n"
- "ldp x26, x25, [%x[inptrs], #0x70]\n"
- "zip1 v2.16b, v30.16b, v1.16b\n"
- "zip2 v1.16b, v30.16b, v1.16b\n"
- "ldr q22, [x26, x28]\n"
- "ldr q21, [x25, x28]\n"
- "zip2 v20.16b, v24.16b, v22.16b\n"
- "zip1 v24.16b, v24.16b, v22.16b\n"
- "zip1 v22.16b, v23.16b, v21.16b\n"
- "zip2 v21.16b, v23.16b, v21.16b\n"
- "ldp x15, x14, [%x[inptrs], #0x0]\n"
- "ldp x13, x12, [%x[inptrs], #0x10]\n"
- "ldp x10, x9, [%x[inptrs], #0x20]\n"
- "ldp x26, x25, [%x[inptrs], #0x30]\n"
- "zip2 v28.16b, v29.16b, v27.16b\n"
- "zip1 v29.16b, v29.16b, v27.16b\n"
- "zip1 v27.16b, v25.16b, v26.16b\n"
- "zip2 v26.16b, v25.16b, v26.16b\n"
- "add %x[params], %x[params], #0x40\n"
- "zip2 v23.16b, v24.16b, v22.16b\n"
- "zip1 v24.16b, v24.16b, v22.16b\n"
- "zip1 v22.16b, v20.16b, v21.16b\n"
- "zip2 v21.16b, v20.16b, v21.16b\n"
- "mov v30.16b, v5.16b\n"
- "mov v25.16b, v5.16b\n"
- "mov v20.16b, v5.16b\n"
- "beq 2f\n"
+ "lsr x20, %x[n_channels], #0x4\n"
+ "add x19, %x[qp], %[offsetof_Requantize32_minval]\n"
+ "ld1r { v9.4s }, [x19]\n"
+ "add x19, %x[qp], %[offsetof_Requantize32_maxval]\n"
+ "ld1r { v12.4s }, [x19]\n"
+ "add x19, %x[qp], %[offsetof_Requantize32_b_offset]\n"
+ "ld1r { v14.4s }, [x19]\n"
+ "add x19, %x[qp], %[offsetof_Requantize32_c_offset]\n"
+ "ld1r { v13.4s }, [x19]\n"
+ "cbz x20, 2f\n"
"1:" // Loop
- "movi v19.4s, #0x0\n"
- ".inst 0x4e8495d3 // sdot v19.4s, v14.16b, v4.16b\n"
- ".inst 0x4e899405 // sdot v5.4s, v0.16b, v9.16b\n"
- "add x28, x28, #0x10\n"
- ".inst 0x4e9d95d3 // sdot v19.4s, v14.16b, v29.16b\n"
- ".inst 0x4e849419 // sdot v25.4s, v0.16b, v4.16b\n"
- "subs x11, x11, #0x1\n"
- ".inst 0x4e849605 // sdot v5.4s, v16.16b, v4.16b\n"
- "ext v4.16b, v4.16b, v4.16b, #0x1\n"
- "mov v18.16b, v19.16b\n .inst 0x4e9895d2 // sdot v18.4s, v14.16b, v24.16b\n"
- ".inst 0x4e8995d3 // sdot v19.4s, v14.16b, v9.16b\n"
- "ext v9.16b, v9.16b, v9.16b, #0x1\n"
- ".inst 0x4e9d9619 // sdot v25.4s, v16.16b, v29.16b\n"
- ".inst 0x4e9d97e5 // sdot v5.4s, v31.16b, v29.16b\n"
- "ext v29.16b, v29.16b, v29.16b, #0x1\n"
- ".inst 0x4e89941e // sdot v30.4s, v0.16b, v9.16b\n"
- ".inst 0x4e849414 // sdot v20.4s, v0.16b, v4.16b\n"
- "movi v17.4s, #0x0\n"
- ".inst 0x4e8495d1 // sdot v17.4s, v14.16b, v4.16b\n"
- ".inst 0x4e9d95d1 // sdot v17.4s, v14.16b, v29.16b\n"
- ".inst 0x4e9897f9 // sdot v25.4s, v31.16b, v24.16b\n"
- "ext v24.16b, v24.16b, v24.16b, #0x1\n"
- ".inst 0x4e84961e // sdot v30.4s, v16.16b, v4.16b\n"
- "ldr q4, [%x[params], #0x10]\n"
- ".inst 0x4e9d9614 // sdot v20.4s, v16.16b, v29.16b\n"
- "mls v5.4s, v19.4s, v11.4s\n"
- "mov v16.16b, v17.16b\n .inst 0x4e9895d0 // sdot v16.4s, v14.16b, v24.16b\n"
- ".inst 0x4e8995d1 // sdot v17.4s, v14.16b, v9.16b\n"
- "ldr q9, [%x[params], #0x0]\n"
- "sqrdmulh v5.4s, v5.4s, v9.4s\n"
- ".inst 0x4e9d97fe // sdot v30.4s, v31.16b, v29.16b\n"
- ".inst 0x4e9897f4 // sdot v20.4s, v31.16b, v24.16b\n"
- "mls v30.4s, v17.4s, v11.4s\n"
- "mls v25.4s, v18.4s, v11.4s\n"
- "mls v20.4s, v16.4s, v11.4s\n"
- "and v0.16b, v5.16b, v4.16b\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "sqrdmulh v30.4s, v30.4s, v9.4s\n"
- "sqrdmulh v25.4s, v25.4s, v9.4s\n"
- "sqrdmulh v20.4s, v20.4s, v9.4s\n"
- "ldr q9, [%x[params], #0x60]\n"
- "sqadd v5.4s, v5.4s, v0.4s\n"
- "and v16.16b, v30.16b, v4.16b\n"
- "and v31.16b, v25.16b, v4.16b\n"
- "and v0.16b, v20.16b, v4.16b\n"
+ "movi v15.4s, #0x0\n"
+ "ldr q27, [x13, x23]\n"
+ "subs x20, x20, #0x1\n"
+ "movi v10.4s, #0x0\n"
+ "ldr q1, [x12, x23]\n"
+ "ldp x13, x12, [%x[inptrs], #0x40]\n"
+ "ldr q25, [x11, x23]\n"
+ "zip1 v7.16b, v27.16b, v25.16b\n"
+ "ldr q23, [x10, x23]\n"
+ "zip2 v5.16b, v27.16b, v25.16b\n"
+ "ldp x11, x10, [%x[inptrs], #0x50]\n"
+ "ldr q31, [x9, x23]\n"
+ "zip1 v8.16b, v1.16b, v23.16b\n"
+ "ldr q28, [x28, x23]\n"
+ "zip2 v3.16b, v1.16b, v23.16b\n"
+ "ldp x9, x28, [%x[inptrs], #0x60]\n"
+ "zip1 v6.16b, v7.16b, v8.16b\n"
+ "ldr q21, [x27, x23]\n"
+ "zip2 v8.16b, v7.16b, v8.16b\n"
+ "ldr q26, [x26, x23]\n"
+ "zip1 v7.16b, v5.16b, v3.16b\n"
+ "ldp x27, x26, [%x[inptrs], #0x70]\n"
+ "zip2 v5.16b, v5.16b, v3.16b\n"
+ "ldr q24, [x13, x23]\n"
+ "ldr q22, [x12, x23]\n"
+ "zip1 v2.16b, v31.16b, v21.16b\n"
+ "zip2 v4.16b, v31.16b, v21.16b\n"
+ "ldp x13, x12, [%x[inptrs], #0x0]\n"
+ "zip1 v1.16b, v28.16b, v26.16b\n"
+ "ldr q20, [x11, x23]\n"
+ "zip2 v31.16b, v28.16b, v26.16b\n"
+ "ldr q16, [x10, x23]\n"
+ "zip1 v3.16b, v2.16b, v1.16b\n"
+ "ldp x11, x10, [%x[inptrs], #0x10]\n"
+ "zip2 v2.16b, v2.16b, v1.16b\n"
+ "ldr q19, [x9, x23]\n"
+ "zip1 v1.16b, v4.16b, v31.16b\n"
+ "ldr q0, [x28, x23]\n"
+ "zip1 v28.16b, v24.16b, v20.16b\n"
+ "ldp x9, x28, [%x[inptrs], #0x20]\n"
+ "zip2 v26.16b, v24.16b, v20.16b\n"
+ "ldr q18, [x27, x23]\n"
+ "zip1 v24.16b, v22.16b, v16.16b\n"
+ "ldr q17, [x26, x23]\n"
+ "zip2 v22.16b, v22.16b, v16.16b\n"
+ "ldp x27, x26, [%x[inptrs], #0x30]\n"
+ "zip2 v16.16b, v4.16b, v31.16b\n"
+ "str q7, [SP, #0x0]\n"
+ "zip1 v31.16b, v28.16b, v24.16b\n"
+ "str q5, [SP, #0x10]\n"
+ "zip1 v20.16b, v19.16b, v18.16b\n"
+ "str q1, [SP, #0x20]\n"
+ "zip2 v19.16b, v19.16b, v18.16b\n"
+ "str q16, [SP, #0x30]\n"
+ "zip1 v18.16b, v0.16b, v17.16b\n"
+ "ldr q30, [%x[params], #0x0]\n"
+ "zip2 v17.16b, v0.16b, v17.16b\n"
+ "ldr q29, [%x[params], #0x10]\n"
+ "zip2 v28.16b, v28.16b, v24.16b\n"
+ "ldr q27, [%x[params], #0x20]\n"
+ "zip1 v16.16b, v26.16b, v22.16b\n"
+ "str q16, [SP, #0x40]\n"
+ "zip2 v16.16b, v26.16b, v22.16b\n"
+ "str q16, [SP, #0x50]\n"
+ "zip1 v26.16b, v20.16b, v18.16b\n"
+ "ldr q25, [%x[params], #0x30]\n"
+ "zip2 v24.16b, v20.16b, v18.16b\n"
+ "ldr q23, [%x[params], #0x40]\n"
+ "zip1 v16.16b, v19.16b, v17.16b\n"
+ "str q16, [SP, #0x60]\n"
+ "zip2 v16.16b, v19.16b, v17.16b\n"
+ "str q16, [SP, #0x70]\n"
+ "mov v22.16b, v30.16b\n"
+ "ldr q21, [%x[params], #0x50]\n"
+ "mov v20.16b, v30.16b\n"
+ "mov v19.16b, v30.16b\n"
+ ".inst 0x4e8697be // sdot v30.4s, v29.16b, v6.16b\n"
+ ".inst 0x4e8397b4 // sdot v20.4s, v29.16b, v3.16b\n"
+ ".inst 0x4e83956f // sdot v15.4s, v11.16b, v3.16b\n"
+ ".inst 0x4e83977e // sdot v30.4s, v27.16b, v3.16b\n"
+ "ext v3.16b, v3.16b, v3.16b, #0x1\n"
+ ".inst 0x4e9f9774 // sdot v20.4s, v27.16b, v31.16b\n"
+ ".inst 0x4e9f956f // sdot v15.4s, v11.16b, v31.16b\n"
+ ".inst 0x4e9f973e // sdot v30.4s, v25.16b, v31.16b\n"
+ "ext v31.16b, v31.16b, v31.16b, #0x1\n"
+ ".inst 0x4e9a9734 // sdot v20.4s, v25.16b, v26.16b\n"
+ "mov v17.16b, v15.16b\n"
+ ".inst 0x4e86956f // sdot v15.4s, v11.16b, v6.16b\n"
+ "mls v30.4s, v15.4s, v14.4s\n"
+ ".inst 0x4e9a9571 // sdot v17.4s, v11.16b, v26.16b\n"
+ "ext v6.16b, v6.16b, v6.16b, #0x1\n"
+ "mls v20.4s, v17.4s, v14.4s\n"
+ "ext v26.16b, v26.16b, v26.16b, #0x1\n"
+ ".inst 0x4e8697b6 // sdot v22.4s, v29.16b, v6.16b\n"
+ ".inst 0x4e8397b3 // sdot v19.4s, v29.16b, v3.16b\n"
+ "ldr q29, [%x[params], #0x70]\n"
+ ".inst 0x4e83956a // sdot v10.4s, v11.16b, v3.16b\n"
+ "sqrdmulh v30.4s, v30.4s, v23.4s\n"
+ ".inst 0x4e839776 // sdot v22.4s, v27.16b, v3.16b\n"
+ "ldr q3, [SP, #0x20]\n"
+ ".inst 0x4e9f9773 // sdot v19.4s, v27.16b, v31.16b\n"
+ "ldr q27, [%x[params], #0x80]\n"
+ ".inst 0x4e9f956a // sdot v10.4s, v11.16b, v31.16b\n"
+ "and v18.16b, v30.16b, v21.16b\n"
+ "sshr v18.4s, v18.4s, #0x1f\n"
+ ".inst 0x4e9f9736 // sdot v22.4s, v25.16b, v31.16b\n"
+ "ldr q31, [SP, #0x40]\n"
+ ".inst 0x4e9a9733 // sdot v19.4s, v25.16b, v26.16b\n"
+ "ldr q25, [%x[params], #0x90]\n"
+ "mov v17.16b, v10.16b\n"
+ ".inst 0x4e86956a // sdot v10.4s, v11.16b, v6.16b\n"
+ "ldr q6, [SP, #0x0]\n"
+ "mls v22.4s, v10.4s, v14.4s\n"
+ ".inst 0x4e9a9571 // sdot v17.4s, v11.16b, v26.16b\n"
+ "ldr q26, [SP, #0x60]\n"
+ "sqadd v30.4s, v30.4s, v18.4s\n"
+ "mls v19.4s, v17.4s, v14.4s\n"
+ "sqrdmulh v20.4s, v20.4s, v23.4s\n"
+ "movi v15.4s, #0x0\n"
+ "srshl v30.4s, v30.4s, v21.4s\n"
+ "sqrdmulh v22.4s, v22.4s, v23.4s\n"
+ ".inst 0x4e82956f // sdot v15.4s, v11.16b, v2.16b\n"
+ "and v16.16b, v20.16b, v21.16b\n"
"sshr v16.4s, v16.4s, #0x1f\n"
- "sshr v31.4s, v31.4s, #0x1f\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "srshl v5.4s, v5.4s, v4.4s\n"
- "sqadd v30.4s, v30.4s, v16.4s\n"
- "ldr q16, [%x[params], #0x40]\n"
- "sqadd v25.4s, v25.4s, v31.4s\n"
- "ldr q31, [%x[params], #0x50]\n"
- "sqadd v20.4s, v20.4s, v0.4s\n"
- "ldr q0, [%x[params], #0x30]\n"
- "add v5.4s, v5.4s, v10.4s\n"
- "srshl v30.4s, v30.4s, v4.4s\n"
- "srshl v25.4s, v25.4s, v4.4s\n"
- "srshl v20.4s, v20.4s, v4.4s\n"
- "ldr q4, [%x[params], #0x70]\n"
- "smax v5.4s, v5.4s, v13.4s\n"
- "add v30.4s, v30.4s, v10.4s\n"
- "add v25.4s, v25.4s, v10.4s\n"
- "add v20.4s, v20.4s, v10.4s\n"
- "smin v5.4s, v5.4s, v12.4s\n"
- "smax v30.4s, v30.4s, v13.4s\n"
- "smax v25.4s, v25.4s, v13.4s\n"
- "smax v20.4s, v20.4s, v13.4s\n"
+ "add v30.4s, v30.4s, v13.4s\n"
+ "and v17.16b, v22.16b, v21.16b\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "smax v30.4s, v30.4s, v9.4s\n"
+ "sqrdmulh v19.4s, v19.4s, v23.4s\n"
+ "ldr q23, [%x[params], #0xa0]\n"
+ ".inst 0x4e9c956f // sdot v15.4s, v11.16b, v28.16b\n"
+ "sqadd v20.4s, v20.4s, v16.4s\n"
"smin v30.4s, v30.4s, v12.4s\n"
- "smin v25.4s, v25.4s, v12.4s\n"
- "smin v20.4s, v20.4s, v12.4s\n"
- "uzp1 v5.16b, v5.16b, v5.16b\n"
- "movi v19.4s, #0x0\n"
- ".inst 0x4e8395d3 // sdot v19.4s, v14.16b, v3.16b\n"
- ".inst 0x4e9c95d3 // sdot v19.4s, v14.16b, v28.16b\n"
- "uzp1 v5.16b, v5.16b, v5.16b\n"
+ "and v16.16b, v19.16b, v21.16b\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sqadd v22.4s, v22.4s, v17.4s\n"
+ "srshl v20.4s, v20.4s, v21.4s\n"
"uzp1 v30.16b, v30.16b, v30.16b\n"
- "str s5, [x24, x27]\n"
- "ldr q5, [%x[params], #0x20]\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
- "uzp1 v20.16b, v20.16b, v20.16b\n"
- "mov v18.16b, v19.16b\n .inst 0x4e9795d2 // sdot v18.4s, v14.16b, v23.16b\n"
+ "mov v17.16b, v15.16b\n"
"uzp1 v30.16b, v30.16b, v30.16b\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
- "str s30, [x23, x27]\n"
- ".inst 0x4e8895d3 // sdot v19.4s, v14.16b, v8.16b\n"
- "uzp1 v20.16b, v20.16b, v20.16b\n"
- "str s25, [x22, x27]\n"
- "mov v30.16b, v5.16b\n"
- "str s20, [x21, x27]\n"
- "mov v25.16b, v5.16b\n"
- "mov v20.16b, v5.16b\n"
- ".inst 0x4e889405 // sdot v5.4s, v0.16b, v8.16b\n"
- ".inst 0x4e839419 // sdot v25.4s, v0.16b, v3.16b\n"
- ".inst 0x4e839605 // sdot v5.4s, v16.16b, v3.16b\n"
- "ext v8.16b, v8.16b, v8.16b, #0x1\n"
- "add x27, x27, #0x4\n"
- "ext v3.16b, v3.16b, v3.16b, #0x1\n"
- "movi v17.4s, #0x0\n"
- ".inst 0x4e88941e // sdot v30.4s, v0.16b, v8.16b\n"
- ".inst 0x4e839414 // sdot v20.4s, v0.16b, v3.16b\n"
- ".inst 0x4e8395d1 // sdot v17.4s, v14.16b, v3.16b\n"
- ".inst 0x4e9c9619 // sdot v25.4s, v16.16b, v28.16b\n"
- ".inst 0x4e9c97e5 // sdot v5.4s, v31.16b, v28.16b\n"
- "ext v28.16b, v28.16b, v28.16b, #0x1\n"
- ".inst 0x4e83961e // sdot v30.4s, v16.16b, v3.16b\n"
- "ldr q3, [x9, x28]\n"
- ".inst 0x4e9c9614 // sdot v20.4s, v16.16b, v28.16b\n"
- "mls v5.4s, v19.4s, v11.4s\n"
- ".inst 0x4e9c95d1 // sdot v17.4s, v14.16b, v28.16b\n"
- ".inst 0x4e9797f9 // sdot v25.4s, v31.16b, v23.16b\n"
- "ext v23.16b, v23.16b, v23.16b, #0x1\n"
- ".inst 0x4e9c97fe // sdot v30.4s, v31.16b, v28.16b\n"
- ".inst 0x4e9797f4 // sdot v20.4s, v31.16b, v23.16b\n"
- "sqrdmulh v5.4s, v5.4s, v9.4s\n"
- "mov v16.16b, v17.16b\n .inst 0x4e9795d0 // sdot v16.4s, v14.16b, v23.16b\n"
- ".inst 0x4e8895d1 // sdot v17.4s, v14.16b, v8.16b\n"
- "ldr q8, [x14, x28]\n"
- "mls v30.4s, v17.4s, v11.4s\n"
- "mls v25.4s, v18.4s, v11.4s\n"
- "mls v20.4s, v16.4s, v11.4s\n"
- "and v0.16b, v5.16b, v4.16b\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "sqrdmulh v30.4s, v30.4s, v9.4s\n"
- "sqrdmulh v25.4s, v25.4s, v9.4s\n"
- "sqrdmulh v20.4s, v20.4s, v9.4s\n"
- "ldr q9, [%x[params], #0xc0]\n"
- "sqadd v5.4s, v5.4s, v0.4s\n"
- "and v16.16b, v30.16b, v4.16b\n"
- "and v31.16b, v25.16b, v4.16b\n"
- "and v0.16b, v20.16b, v4.16b\n"
- "sshr v16.4s, v16.4s, #0x1f\n"
- "sshr v31.4s, v31.4s, #0x1f\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "srshl v5.4s, v5.4s, v4.4s\n"
- "sqadd v30.4s, v30.4s, v16.4s\n"
- "ldr q16, [%x[params], #0xa0]\n"
- "sqadd v25.4s, v25.4s, v31.4s\n"
- "ldr q31, [%x[params], #0xb0]\n"
- "sqadd v20.4s, v20.4s, v0.4s\n"
- "ldr q0, [%x[params], #0x90]\n"
- "add v5.4s, v5.4s, v10.4s\n"
- "srshl v30.4s, v30.4s, v4.4s\n"
- "srshl v25.4s, v25.4s, v4.4s\n"
- "srshl v20.4s, v20.4s, v4.4s\n"
- "ldr q4, [%x[params], #0xd0]\n"
- "smax v5.4s, v5.4s, v13.4s\n"
- "add v30.4s, v30.4s, v10.4s\n"
- "add v25.4s, v25.4s, v10.4s\n"
- "add v20.4s, v20.4s, v10.4s\n"
- "smin v5.4s, v5.4s, v12.4s\n"
- "smax v30.4s, v30.4s, v13.4s\n"
- "smax v25.4s, v25.4s, v13.4s\n"
- "smax v20.4s, v20.4s, v13.4s\n"
- "smin v30.4s, v30.4s, v12.4s\n"
- "smin v25.4s, v25.4s, v12.4s\n"
+ "str s30, [x25, x23]\n"
+ "srshl v22.4s, v22.4s, v21.4s\n"
+ "add v20.4s, v20.4s, v13.4s\n"
+ "ldr q30, [%x[params], #0x60]\n"
+ "sqadd v19.4s, v19.4s, v16.4s\n"
+ ".inst 0x4e88956f // sdot v15.4s, v11.16b, v8.16b\n"
+ "smax v20.4s, v20.4s, v9.4s\n"
+ "add v22.4s, v22.4s, v13.4s\n"
+ "srshl v19.4s, v19.4s, v21.4s\n"
+ "ldr q21, [%x[params], #0xb0]\n"
"smin v20.4s, v20.4s, v12.4s\n"
- "uzp1 v5.16b, v5.16b, v5.16b\n"
- "movi v19.4s, #0x0\n"
- ".inst 0x4e8295d3 // sdot v19.4s, v14.16b, v2.16b\n"
- ".inst 0x4e9b95d3 // sdot v19.4s, v14.16b, v27.16b\n"
- "uzp1 v5.16b, v5.16b, v5.16b\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
- "str s5, [x24, x27]\n"
- "ldr q5, [%x[params], #0x80]\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
+ "smax v22.4s, v22.4s, v9.4s\n"
+ ".inst 0x4e989571 // sdot v17.4s, v11.16b, v24.16b\n"
+ "add v19.4s, v19.4s, v13.4s\n"
+ "smin v22.4s, v22.4s, v12.4s\n"
"uzp1 v20.16b, v20.16b, v20.16b\n"
- "mov v18.16b, v19.16b\n .inst 0x4e9695d2 // sdot v18.4s, v14.16b, v22.16b\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
- "str s30, [x23, x27]\n"
- ".inst 0x4e8795d3 // sdot v19.4s, v14.16b, v7.16b\n"
+ "smax v19.4s, v19.4s, v9.4s\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
"uzp1 v20.16b, v20.16b, v20.16b\n"
- "str s25, [x22, x27]\n"
- "mov v30.16b, v5.16b\n"
- "str s20, [x21, x27]\n"
- "mov v25.16b, v5.16b\n"
- "mov v20.16b, v5.16b\n"
- ".inst 0x4e879405 // sdot v5.4s, v0.16b, v7.16b\n"
- ".inst 0x4e829419 // sdot v25.4s, v0.16b, v2.16b\n"
- ".inst 0x4e829605 // sdot v5.4s, v16.16b, v2.16b\n"
- "ext v7.16b, v7.16b, v7.16b, #0x1\n"
- "add x27, x27, #0x4\n"
+ "str s20, [x22, x23]\n"
+ "smin v19.4s, v19.4s, v12.4s\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "str s22, [x24, x23]\n"
+ "mov v22.16b, v30.16b\n"
+ "mov v20.16b, v30.16b\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ ".inst 0x4e8297b4 // sdot v20.4s, v29.16b, v2.16b\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ "str s19, [x21, x23]\n"
+ "mov v19.16b, v30.16b\n"
+ "add x23, x23, #0x4\n"
+ ".inst 0x4e8897be // sdot v30.4s, v29.16b, v8.16b\n"
+ ".inst 0x4e9c9774 // sdot v20.4s, v27.16b, v28.16b\n"
+ "ext v8.16b, v8.16b, v8.16b, #0x1\n"
+ "movi v10.4s, #0x0\n"
+ ".inst 0x4e82977e // sdot v30.4s, v27.16b, v2.16b\n"
+ ".inst 0x4e989734 // sdot v20.4s, v25.16b, v24.16b\n"
+ "mls v20.4s, v17.4s, v14.4s\n"
+ ".inst 0x4e9c973e // sdot v30.4s, v25.16b, v28.16b\n"
"ext v2.16b, v2.16b, v2.16b, #0x1\n"
- "movi v17.4s, #0x0\n"
- ".inst 0x4e87941e // sdot v30.4s, v0.16b, v7.16b\n"
- ".inst 0x4e829414 // sdot v20.4s, v0.16b, v2.16b\n"
- ".inst 0x4e8295d1 // sdot v17.4s, v14.16b, v2.16b\n"
- ".inst 0x4e9b9619 // sdot v25.4s, v16.16b, v27.16b\n"
- ".inst 0x4e9b97e5 // sdot v5.4s, v31.16b, v27.16b\n"
- "ext v27.16b, v27.16b, v27.16b, #0x1\n"
- ".inst 0x4e82961e // sdot v30.4s, v16.16b, v2.16b\n"
- "ldr q2, [x26, x28]\n"
- ".inst 0x4e9b9614 // sdot v20.4s, v16.16b, v27.16b\n"
- "mls v5.4s, v19.4s, v11.4s\n"
- ".inst 0x4e9b95d1 // sdot v17.4s, v14.16b, v27.16b\n"
- ".inst 0x4e9697f9 // sdot v25.4s, v31.16b, v22.16b\n"
- "ext v22.16b, v22.16b, v22.16b, #0x1\n"
- ".inst 0x4e9b97fe // sdot v30.4s, v31.16b, v27.16b\n"
- ".inst 0x4e9697f4 // sdot v20.4s, v31.16b, v22.16b\n"
- "sqrdmulh v5.4s, v5.4s, v9.4s\n"
- "mov v16.16b, v17.16b\n .inst 0x4e9695d0 // sdot v16.4s, v14.16b, v22.16b\n"
- ".inst 0x4e8795d1 // sdot v17.4s, v14.16b, v7.16b\n"
- "ldr q7, [x13, x28]\n"
- "mls v30.4s, v17.4s, v11.4s\n"
- "mls v25.4s, v18.4s, v11.4s\n"
- "mls v20.4s, v16.4s, v11.4s\n"
- "and v0.16b, v5.16b, v4.16b\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "sqrdmulh v30.4s, v30.4s, v9.4s\n"
- "sqrdmulh v25.4s, v25.4s, v9.4s\n"
- "sqrdmulh v20.4s, v20.4s, v9.4s\n"
- "ldr q9, [%x[params], #0x120]\n"
- "sqadd v5.4s, v5.4s, v0.4s\n"
- "and v16.16b, v30.16b, v4.16b\n"
- "and v31.16b, v25.16b, v4.16b\n"
- "and v0.16b, v20.16b, v4.16b\n"
+ "mls v30.4s, v15.4s, v14.4s\n"
+ "ext v28.16b, v28.16b, v28.16b, #0x1\n"
+ "ext v24.16b, v24.16b, v24.16b, #0x1\n"
+ ".inst 0x4e8897b6 // sdot v22.4s, v29.16b, v8.16b\n"
+ ".inst 0x4e8297b3 // sdot v19.4s, v29.16b, v2.16b\n"
+ "ldr q29, [%x[params], #0xd0]\n"
+ ".inst 0x4e82956a // sdot v10.4s, v11.16b, v2.16b\n"
+ "sqrdmulh v20.4s, v20.4s, v23.4s\n"
+ ".inst 0x4e829776 // sdot v22.4s, v27.16b, v2.16b\n"
+ "ldr q2, [SP, #0x30]\n"
+ ".inst 0x4e9c9773 // sdot v19.4s, v27.16b, v28.16b\n"
+ "ldr q27, [%x[params], #0xe0]\n"
+ ".inst 0x4e9c956a // sdot v10.4s, v11.16b, v28.16b\n"
+ "sqrdmulh v30.4s, v30.4s, v23.4s\n"
+ ".inst 0x4e9c9736 // sdot v22.4s, v25.16b, v28.16b\n"
+ "ldr q28, [SP, #0x50]\n"
+ ".inst 0x4e989733 // sdot v19.4s, v25.16b, v24.16b\n"
+ "ldr q25, [%x[params], #0xf0]\n"
+ "mov v17.16b, v10.16b\n"
+ ".inst 0x4e88956a // sdot v10.4s, v11.16b, v8.16b\n"
+ "ldr q8, [SP, #0x10]\n"
+ "mls v22.4s, v10.4s, v14.4s\n"
+ ".inst 0x4e989571 // sdot v17.4s, v11.16b, v24.16b\n"
+ "ldr q24, [SP, #0x70]\n"
+ "and v18.16b, v30.16b, v21.16b\n"
+ "mls v19.4s, v17.4s, v14.4s\n"
+ "and v16.16b, v20.16b, v21.16b\n"
+ "movi v15.4s, #0x0\n"
+ "sshr v18.4s, v18.4s, #0x1f\n"
+ "sqrdmulh v22.4s, v22.4s, v23.4s\n"
"sshr v16.4s, v16.4s, #0x1f\n"
- "sshr v31.4s, v31.4s, #0x1f\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "srshl v5.4s, v5.4s, v4.4s\n"
- "sqadd v30.4s, v30.4s, v16.4s\n"
- "ldr q16, [%x[params], #0x100]\n"
- "sqadd v25.4s, v25.4s, v31.4s\n"
- "ldr q31, [%x[params], #0x110]\n"
- "sqadd v20.4s, v20.4s, v0.4s\n"
- "ldr q0, [%x[params], #0xf0]\n"
- "add v5.4s, v5.4s, v10.4s\n"
- "srshl v30.4s, v30.4s, v4.4s\n"
- "srshl v25.4s, v25.4s, v4.4s\n"
- "srshl v20.4s, v20.4s, v4.4s\n"
- "ldr q4, [%x[params], #0x130]\n"
- "smax v5.4s, v5.4s, v13.4s\n"
- "add v30.4s, v30.4s, v10.4s\n"
- "add v25.4s, v25.4s, v10.4s\n"
- "add v20.4s, v20.4s, v10.4s\n"
- "smin v5.4s, v5.4s, v12.4s\n"
- "smax v30.4s, v30.4s, v13.4s\n"
- "smax v25.4s, v25.4s, v13.4s\n"
- "smax v20.4s, v20.4s, v13.4s\n"
+ ".inst 0x4e83956f // sdot v15.4s, v11.16b, v3.16b\n"
+ "movi v10.4s, #0x0\n"
+ "and v17.16b, v22.16b, v21.16b\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "sqadd v30.4s, v30.4s, v18.4s\n"
+ "sqadd v20.4s, v20.4s, v16.4s\n"
+ "sqrdmulh v19.4s, v19.4s, v23.4s\n"
+ "ldr q23, [%x[params], #0x100]\n"
+ ".inst 0x4e9f956f // sdot v15.4s, v11.16b, v31.16b\n"
+ "srshl v30.4s, v30.4s, v21.4s\n"
+ "srshl v20.4s, v20.4s, v21.4s\n"
+ "sqadd v22.4s, v22.4s, v17.4s\n"
+ "and v16.16b, v19.16b, v21.16b\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "add v30.4s, v30.4s, v13.4s\n"
+ "srshl v22.4s, v22.4s, v21.4s\n"
+ "add v20.4s, v20.4s, v13.4s\n"
+ "mov v17.16b, v15.16b\n"
+ "smax v30.4s, v30.4s, v9.4s\n"
+ "add v22.4s, v22.4s, v13.4s\n"
+ "smax v20.4s, v20.4s, v9.4s\n"
"smin v30.4s, v30.4s, v12.4s\n"
- "smin v25.4s, v25.4s, v12.4s\n"
+ "smax v22.4s, v22.4s, v9.4s\n"
"smin v20.4s, v20.4s, v12.4s\n"
- "uzp1 v5.16b, v5.16b, v5.16b\n"
- "movi v19.4s, #0x0\n"
- ".inst 0x4e8195d3 // sdot v19.4s, v14.16b, v1.16b\n"
- ".inst 0x4e9a95d3 // sdot v19.4s, v14.16b, v26.16b\n"
- "uzp1 v5.16b, v5.16b, v5.16b\n"
+ "sqadd v19.4s, v19.4s, v16.4s\n"
+ "smin v22.4s, v22.4s, v12.4s\n"
"uzp1 v30.16b, v30.16b, v30.16b\n"
- "str s5, [x24, x27]\n"
- "ldr q5, [%x[params], #0xe0]\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
"uzp1 v20.16b, v20.16b, v20.16b\n"
- "mov v18.16b, v19.16b\n .inst 0x4e9595d2 // sdot v18.4s, v14.16b, v21.16b\n"
+ "srshl v19.4s, v19.4s, v21.4s\n"
+ "ldr q21, [%x[params], #0x110]\n"
"uzp1 v30.16b, v30.16b, v30.16b\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
- "str s30, [x23, x27]\n"
- ".inst 0x4e8695d3 // sdot v19.4s, v14.16b, v6.16b\n"
+ "str s30, [x25, x23]\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
"uzp1 v20.16b, v20.16b, v20.16b\n"
- "str s25, [x22, x27]\n"
- "mov v30.16b, v5.16b\n"
- "str s20, [x21, x27]\n"
- "mov v25.16b, v5.16b\n"
- "mov v20.16b, v5.16b\n"
- ".inst 0x4e869405 // sdot v5.4s, v0.16b, v6.16b\n"
- ".inst 0x4e819419 // sdot v25.4s, v0.16b, v1.16b\n"
- ".inst 0x4e819605 // sdot v5.4s, v16.16b, v1.16b\n"
+ "ldr q30, [%x[params], #0xc0]\n"
+ "add v19.4s, v19.4s, v13.4s\n"
+ "str s20, [x22, x23]\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "str s22, [x24, x23]\n"
+ "smax v19.4s, v19.4s, v9.4s\n"
+ ".inst 0x4e86956f // sdot v15.4s, v11.16b, v6.16b\n"
+ "mov v22.16b, v30.16b\n"
+ "mov v20.16b, v30.16b\n"
+ "smin v19.4s, v19.4s, v12.4s\n"
+ ".inst 0x4e8397b4 // sdot v20.4s, v29.16b, v3.16b\n"
+ ".inst 0x4e9a9571 // sdot v17.4s, v11.16b, v26.16b\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ "str s19, [x21, x23]\n"
+ "mov v19.16b, v30.16b\n"
+ "add x23, x23, #0x4\n"
+ ".inst 0x4e8697be // sdot v30.4s, v29.16b, v6.16b\n"
+ ".inst 0x4e9f9774 // sdot v20.4s, v27.16b, v31.16b\n"
"ext v6.16b, v6.16b, v6.16b, #0x1\n"
- "add x27, x27, #0x4\n"
- "ext v1.16b, v1.16b, v1.16b, #0x1\n"
- "movi v17.4s, #0x0\n"
- ".inst 0x4e86941e // sdot v30.4s, v0.16b, v6.16b\n"
- ".inst 0x4e819414 // sdot v20.4s, v0.16b, v1.16b\n"
- ".inst 0x4e8195d1 // sdot v17.4s, v14.16b, v1.16b\n"
- ".inst 0x4e9a9619 // sdot v25.4s, v16.16b, v26.16b\n"
- ".inst 0x4e9a97e5 // sdot v5.4s, v31.16b, v26.16b\n"
+ ".inst 0x4e83977e // sdot v30.4s, v27.16b, v3.16b\n"
+ ".inst 0x4e9a9734 // sdot v20.4s, v25.16b, v26.16b\n"
+ "mls v20.4s, v17.4s, v14.4s\n"
+ ".inst 0x4e9f973e // sdot v30.4s, v25.16b, v31.16b\n"
+ "ext v3.16b, v3.16b, v3.16b, #0x1\n"
+ "mls v30.4s, v15.4s, v14.4s\n"
+ "ext v31.16b, v31.16b, v31.16b, #0x1\n"
"ext v26.16b, v26.16b, v26.16b, #0x1\n"
- ".inst 0x4e81961e // sdot v30.4s, v16.16b, v1.16b\n"
- "ldr q1, [x25, x28]\n"
- ".inst 0x4e9a9614 // sdot v20.4s, v16.16b, v26.16b\n"
- "mls v5.4s, v19.4s, v11.4s\n"
- ".inst 0x4e9a95d1 // sdot v17.4s, v14.16b, v26.16b\n"
- ".inst 0x4e9597f9 // sdot v25.4s, v31.16b, v21.16b\n"
- "ext v21.16b, v21.16b, v21.16b, #0x1\n"
- ".inst 0x4e9a97fe // sdot v30.4s, v31.16b, v26.16b\n"
- ".inst 0x4e9597f4 // sdot v20.4s, v31.16b, v21.16b\n"
- "sqrdmulh v5.4s, v5.4s, v9.4s\n"
- "mov v16.16b, v17.16b\n .inst 0x4e9595d0 // sdot v16.4s, v14.16b, v21.16b\n"
- ".inst 0x4e8695d1 // sdot v17.4s, v14.16b, v6.16b\n"
- "ldr q6, [x12, x28]\n"
- "mls v30.4s, v17.4s, v11.4s\n"
- "mls v25.4s, v18.4s, v11.4s\n"
- "mls v20.4s, v16.4s, v11.4s\n"
- "and v0.16b, v5.16b, v4.16b\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "sqrdmulh v30.4s, v30.4s, v9.4s\n"
- "sqrdmulh v25.4s, v25.4s, v9.4s\n"
- "sqrdmulh v20.4s, v20.4s, v9.4s\n"
- "ldr q9, [x15, x28]\n"
- "sqadd v5.4s, v5.4s, v0.4s\n"
- "ldp x15, x14, [%x[inptrs], #0x40]\n"
- "ldr q29, [x15, x28]\n"
- "ldr q28, [x14, x28]\n"
- "and v16.16b, v30.16b, v4.16b\n"
- "and v31.16b, v25.16b, v4.16b\n"
- "and v0.16b, v20.16b, v4.16b\n"
+ ".inst 0x4e8697b6 // sdot v22.4s, v29.16b, v6.16b\n"
+ ".inst 0x4e8397b3 // sdot v19.4s, v29.16b, v3.16b\n"
+ "ldr q29, [%x[params], #0x130]\n"
+ ".inst 0x4e83956a // sdot v10.4s, v11.16b, v3.16b\n"
+ "sqrdmulh v20.4s, v20.4s, v23.4s\n"
+ ".inst 0x4e839776 // sdot v22.4s, v27.16b, v3.16b\n"
+ ".inst 0x4e9f9773 // sdot v19.4s, v27.16b, v31.16b\n"
+ "ldr q27, [%x[params], #0x140]\n"
+ ".inst 0x4e9f956a // sdot v10.4s, v11.16b, v31.16b\n"
+ "sqrdmulh v30.4s, v30.4s, v23.4s\n"
+ ".inst 0x4e9f9736 // sdot v22.4s, v25.16b, v31.16b\n"
+ ".inst 0x4e9a9733 // sdot v19.4s, v25.16b, v26.16b\n"
+ "ldr q25, [%x[params], #0x150]\n"
+ "mov v17.16b, v10.16b\n"
+ ".inst 0x4e86956a // sdot v10.4s, v11.16b, v6.16b\n"
+ "mls v22.4s, v10.4s, v14.4s\n"
+ ".inst 0x4e9a9571 // sdot v17.4s, v11.16b, v26.16b\n"
+ "and v18.16b, v30.16b, v21.16b\n"
+ "sshr v18.4s, v18.4s, #0x1f\n"
+ "and v16.16b, v20.16b, v21.16b\n"
+ "movi v15.4s, #0x0\n"
+ "mls v19.4s, v17.4s, v14.4s\n"
+ ".inst 0x4e82956f // sdot v15.4s, v11.16b, v2.16b\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sqrdmulh v22.4s, v22.4s, v23.4s\n"
+ "movi v10.4s, #0x0\n"
+ "sqadd v30.4s, v30.4s, v18.4s\n"
+ ".inst 0x4e9c956f // sdot v15.4s, v11.16b, v28.16b\n"
+ "sqrdmulh v19.4s, v19.4s, v23.4s\n"
+ "ldr q23, [%x[params], #0x160]\n"
+ "and v17.16b, v22.16b, v21.16b\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "srshl v30.4s, v30.4s, v21.4s\n"
+ "sqadd v20.4s, v20.4s, v16.4s\n"
+ "and v16.16b, v19.16b, v21.16b\n"
"sshr v16.4s, v16.4s, #0x1f\n"
- "ldp x13, x12, [%x[inptrs], #0x50]\n"
- "ldr q27, [x13, x28]\n"
- "ldr q26, [x12, x28]\n"
- "sshr v31.4s, v31.4s, #0x1f\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "srshl v5.4s, v5.4s, v4.4s\n"
- "sqadd v30.4s, v30.4s, v16.4s\n"
- "ldr q16, [%x[params], #0x160]\n"
- "sqadd v25.4s, v25.4s, v31.4s\n"
- "ldr q31, [%x[params], #0x170]\n"
- "sqadd v20.4s, v20.4s, v0.4s\n"
- "ldr q0, [%x[params], #0x150]\n"
- "add v5.4s, v5.4s, v10.4s\n"
- "srshl v30.4s, v30.4s, v4.4s\n"
- "srshl v25.4s, v25.4s, v4.4s\n"
- "srshl v20.4s, v20.4s, v4.4s\n"
- "ldr q4, [x10, x28]\n"
- "ldp x10, x9, [%x[inptrs], #0x60]\n"
- "ldr q24, [x10, x28]\n"
- "ldr q23, [x9, x28]\n"
- "smax v5.4s, v5.4s, v13.4s\n"
- "add v30.4s, v30.4s, v10.4s\n"
- "add v25.4s, v25.4s, v10.4s\n"
- "add v20.4s, v20.4s, v10.4s\n"
- "ldp x26, x25, [%x[inptrs], #0x70]\n"
- "ldr q22, [x26, x28]\n"
- "ldr q21, [x25, x28]\n"
- "smin v5.4s, v5.4s, v12.4s\n"
- "smax v30.4s, v30.4s, v13.4s\n"
- "ldp x15, x14, [%x[inptrs], #0x0]\n"
- "smax v25.4s, v25.4s, v13.4s\n"
- "smax v20.4s, v20.4s, v13.4s\n"
- "ldp x13, x12, [%x[inptrs], #0x10]\n"
- "ldp x10, x9, [%x[inptrs], #0x20]\n"
+ "add v30.4s, v30.4s, v13.4s\n"
+ "srshl v20.4s, v20.4s, v21.4s\n"
+ "sqadd v22.4s, v22.4s, v17.4s\n"
+ "mov v17.16b, v15.16b\n"
+ "smax v30.4s, v30.4s, v9.4s\n"
+ "add v20.4s, v20.4s, v13.4s\n"
+ "srshl v22.4s, v22.4s, v21.4s\n"
"smin v30.4s, v30.4s, v12.4s\n"
- "smin v25.4s, v25.4s, v12.4s\n"
- "ldp x26, x25, [%x[inptrs], #0x30]\n"
+ "smax v20.4s, v20.4s, v9.4s\n"
+ "sqadd v19.4s, v19.4s, v16.4s\n"
+ "add v22.4s, v22.4s, v13.4s\n"
"smin v20.4s, v20.4s, v12.4s\n"
- "uzp1 v5.16b, v5.16b, v5.16b\n"
- "uzp1 v5.16b, v5.16b, v5.16b\n"
"uzp1 v30.16b, v30.16b, v30.16b\n"
- "str s5, [x24, x27]\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
- "uzp1 v20.16b, v20.16b, v20.16b\n"
- "zip2 v5.16b, v9.16b, v7.16b\n"
- "zip1 v9.16b, v9.16b, v7.16b\n"
- "zip1 v7.16b, v8.16b, v6.16b\n"
- "zip2 v6.16b, v8.16b, v6.16b\n"
+ "smax v22.4s, v22.4s, v9.4s\n"
+ "srshl v19.4s, v19.4s, v21.4s\n"
+ "ldr q21, [%x[params], #0x170]\n"
"uzp1 v30.16b, v30.16b, v30.16b\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
- "str s30, [x23, x27]\n"
+ "str s30, [x25, x23]\n"
+ "smin v22.4s, v22.4s, v12.4s\n"
"uzp1 v20.16b, v20.16b, v20.16b\n"
- "str s25, [x22, x27]\n"
- "zip2 v8.16b, v9.16b, v7.16b\n"
- "str s20, [x21, x27]\n"
- "zip1 v9.16b, v9.16b, v7.16b\n"
- "zip1 v7.16b, v5.16b, v6.16b\n"
- "add x27, x27, #0x4\n"
- "zip2 v6.16b, v5.16b, v6.16b\n"
- "ldr q5, [%x[params], #0x140]\n"
- "zip2 v30.16b, v4.16b, v2.16b\n"
+ "ldr q30, [%x[params], #0x120]\n"
"add %x[params], %x[params], #0x180\n"
- "zip1 v4.16b, v4.16b, v2.16b\n"
- "zip1 v2.16b, v3.16b, v1.16b\n"
- "zip2 v1.16b, v3.16b, v1.16b\n"
- "zip2 v25.16b, v29.16b, v27.16b\n"
- "zip1 v29.16b, v29.16b, v27.16b\n"
- "zip1 v27.16b, v28.16b, v26.16b\n"
- "zip2 v26.16b, v28.16b, v26.16b\n"
- "zip2 v20.16b, v24.16b, v22.16b\n"
- "zip1 v24.16b, v24.16b, v22.16b\n"
- "zip1 v22.16b, v23.16b, v21.16b\n"
- "zip2 v21.16b, v23.16b, v21.16b\n"
- "zip2 v3.16b, v4.16b, v2.16b\n"
- "zip1 v4.16b, v4.16b, v2.16b\n"
- "zip1 v2.16b, v30.16b, v1.16b\n"
- "zip2 v1.16b, v30.16b, v1.16b\n"
- "zip2 v28.16b, v29.16b, v27.16b\n"
- "zip1 v29.16b, v29.16b, v27.16b\n"
- "zip1 v27.16b, v25.16b, v26.16b\n"
- "zip2 v26.16b, v25.16b, v26.16b\n"
- "zip2 v23.16b, v24.16b, v22.16b\n"
- "zip1 v24.16b, v24.16b, v22.16b\n"
- "zip1 v22.16b, v20.16b, v21.16b\n"
- "zip2 v21.16b, v20.16b, v21.16b\n"
- "mov v30.16b, v5.16b\n"
- "mov v25.16b, v5.16b\n"
- "mov v20.16b, v5.16b\n"
- "bgt 1b\n"
- "2:" // Detached iteration
- "movi v19.4s, #0x0\n"
- ".inst 0x4e8495d3 // sdot v19.4s, v14.16b, v4.16b\n"
- ".inst 0x4e899405 // sdot v5.4s, v0.16b, v9.16b\n"
- "tst %x[n_channels], #0xf\n"
- ".inst 0x4e9d95d3 // sdot v19.4s, v14.16b, v29.16b\n"
- ".inst 0x4e849419 // sdot v25.4s, v0.16b, v4.16b\n"
- "add x28, x28, #0x10\n"
- ".inst 0x4e849605 // sdot v5.4s, v16.16b, v4.16b\n"
- "ext v4.16b, v4.16b, v4.16b, #0x1\n"
- "mov v18.16b, v19.16b\n .inst 0x4e9895d2 // sdot v18.4s, v14.16b, v24.16b\n"
- ".inst 0x4e8995d3 // sdot v19.4s, v14.16b, v9.16b\n"
- "ext v9.16b, v9.16b, v9.16b, #0x1\n"
- ".inst 0x4e9d9619 // sdot v25.4s, v16.16b, v29.16b\n"
- ".inst 0x4e9d97e5 // sdot v5.4s, v31.16b, v29.16b\n"
- "ext v29.16b, v29.16b, v29.16b, #0x1\n"
- ".inst 0x4e89941e // sdot v30.4s, v0.16b, v9.16b\n"
- ".inst 0x4e849414 // sdot v20.4s, v0.16b, v4.16b\n"
- "movi v17.4s, #0x0\n"
- ".inst 0x4e8495d1 // sdot v17.4s, v14.16b, v4.16b\n"
- ".inst 0x4e9d95d1 // sdot v17.4s, v14.16b, v29.16b\n"
- ".inst 0x4e9897f9 // sdot v25.4s, v31.16b, v24.16b\n"
- "ext v24.16b, v24.16b, v24.16b, #0x1\n"
- ".inst 0x4e84961e // sdot v30.4s, v16.16b, v4.16b\n"
- "ldr q4, [%x[params], #0x10]\n"
- ".inst 0x4e9d9614 // sdot v20.4s, v16.16b, v29.16b\n"
- "mls v5.4s, v19.4s, v11.4s\n"
- "mov v16.16b, v17.16b\n .inst 0x4e9895d0 // sdot v16.4s, v14.16b, v24.16b\n"
- ".inst 0x4e8995d1 // sdot v17.4s, v14.16b, v9.16b\n"
- "ldr q9, [%x[params], #0x0]\n"
- "sqrdmulh v5.4s, v5.4s, v9.4s\n"
- ".inst 0x4e9d97fe // sdot v30.4s, v31.16b, v29.16b\n"
- ".inst 0x4e9897f4 // sdot v20.4s, v31.16b, v24.16b\n"
- "mls v30.4s, v17.4s, v11.4s\n"
- "mls v25.4s, v18.4s, v11.4s\n"
- "mls v20.4s, v16.4s, v11.4s\n"
- "and v0.16b, v5.16b, v4.16b\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "sqrdmulh v30.4s, v30.4s, v9.4s\n"
- "sqrdmulh v25.4s, v25.4s, v9.4s\n"
- "sqrdmulh v20.4s, v20.4s, v9.4s\n"
- "ldr q9, [%x[params], #0x60]\n"
- "sqadd v5.4s, v5.4s, v0.4s\n"
- "and v16.16b, v30.16b, v4.16b\n"
- "and v31.16b, v25.16b, v4.16b\n"
- "and v0.16b, v20.16b, v4.16b\n"
- "sshr v16.4s, v16.4s, #0x1f\n"
- "sshr v31.4s, v31.4s, #0x1f\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "srshl v5.4s, v5.4s, v4.4s\n"
- "sqadd v30.4s, v30.4s, v16.4s\n"
- "ldr q16, [%x[params], #0x40]\n"
- "sqadd v25.4s, v25.4s, v31.4s\n"
- "ldr q31, [%x[params], #0x50]\n"
- "sqadd v20.4s, v20.4s, v0.4s\n"
- "ldr q0, [%x[params], #0x30]\n"
- "add v5.4s, v5.4s, v10.4s\n"
- "srshl v30.4s, v30.4s, v4.4s\n"
- "srshl v25.4s, v25.4s, v4.4s\n"
- "srshl v20.4s, v20.4s, v4.4s\n"
- "ldr q4, [%x[params], #0x70]\n"
- "smax v5.4s, v5.4s, v13.4s\n"
- "add v30.4s, v30.4s, v10.4s\n"
- "add v25.4s, v25.4s, v10.4s\n"
- "add v20.4s, v20.4s, v10.4s\n"
- "smin v5.4s, v5.4s, v12.4s\n"
- "smax v30.4s, v30.4s, v13.4s\n"
- "smax v25.4s, v25.4s, v13.4s\n"
- "smax v20.4s, v20.4s, v13.4s\n"
- "smin v30.4s, v30.4s, v12.4s\n"
- "smin v25.4s, v25.4s, v12.4s\n"
- "smin v20.4s, v20.4s, v12.4s\n"
- "uzp1 v5.16b, v5.16b, v5.16b\n"
- "movi v19.4s, #0x0\n"
- ".inst 0x4e8395d3 // sdot v19.4s, v14.16b, v3.16b\n"
- ".inst 0x4e9c95d3 // sdot v19.4s, v14.16b, v28.16b\n"
- "uzp1 v5.16b, v5.16b, v5.16b\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
- "str s5, [x24, x27]\n"
- "ldr q5, [%x[params], #0x20]\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
- "uzp1 v20.16b, v20.16b, v20.16b\n"
- "mov v18.16b, v19.16b\n .inst 0x4e9795d2 // sdot v18.4s, v14.16b, v23.16b\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
- "str s30, [x23, x27]\n"
- ".inst 0x4e8895d3 // sdot v19.4s, v14.16b, v8.16b\n"
+ "add v19.4s, v19.4s, v13.4s\n"
"uzp1 v20.16b, v20.16b, v20.16b\n"
- "str s25, [x22, x27]\n"
- "mov v30.16b, v5.16b\n"
- "str s20, [x21, x27]\n"
- "mov v25.16b, v5.16b\n"
- "mov v20.16b, v5.16b\n"
- ".inst 0x4e889405 // sdot v5.4s, v0.16b, v8.16b\n"
- ".inst 0x4e839419 // sdot v25.4s, v0.16b, v3.16b\n"
- ".inst 0x4e839605 // sdot v5.4s, v16.16b, v3.16b\n"
+ "str s20, [x22, x23]\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ ".inst 0x4e88956f // sdot v15.4s, v11.16b, v8.16b\n"
+ "smax v19.4s, v19.4s, v9.4s\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "str s22, [x24, x23]\n"
+ "smin v19.4s, v19.4s, v12.4s\n"
+ "mov v22.16b, v30.16b\n"
+ "mov v20.16b, v30.16b\n"
+ ".inst 0x4e8297b4 // sdot v20.4s, v29.16b, v2.16b\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ ".inst 0x4e989571 // sdot v17.4s, v11.16b, v24.16b\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ "str s19, [x21, x23]\n"
+ "mov v19.16b, v30.16b\n"
+ "add x23, x23, #0x4\n"
+ ".inst 0x4e8897be // sdot v30.4s, v29.16b, v8.16b\n"
+ ".inst 0x4e9c9774 // sdot v20.4s, v27.16b, v28.16b\n"
"ext v8.16b, v8.16b, v8.16b, #0x1\n"
- "add x27, x27, #0x4\n"
- "ext v3.16b, v3.16b, v3.16b, #0x1\n"
- "movi v17.4s, #0x0\n"
- ".inst 0x4e88941e // sdot v30.4s, v0.16b, v8.16b\n"
- ".inst 0x4e839414 // sdot v20.4s, v0.16b, v3.16b\n"
- ".inst 0x4e8395d1 // sdot v17.4s, v14.16b, v3.16b\n"
- ".inst 0x4e9c9619 // sdot v25.4s, v16.16b, v28.16b\n"
- ".inst 0x4e9c97e5 // sdot v5.4s, v31.16b, v28.16b\n"
+ ".inst 0x4e82977e // sdot v30.4s, v27.16b, v2.16b\n"
+ ".inst 0x4e989734 // sdot v20.4s, v25.16b, v24.16b\n"
+ "mls v20.4s, v17.4s, v14.4s\n"
+ ".inst 0x4e9c973e // sdot v30.4s, v25.16b, v28.16b\n"
+ "ext v2.16b, v2.16b, v2.16b, #0x1\n"
+ "mls v30.4s, v15.4s, v14.4s\n"
"ext v28.16b, v28.16b, v28.16b, #0x1\n"
- ".inst 0x4e83961e // sdot v30.4s, v16.16b, v3.16b\n"
- ".inst 0x4e9c9614 // sdot v20.4s, v16.16b, v28.16b\n"
- "mls v5.4s, v19.4s, v11.4s\n"
- ".inst 0x4e9c95d1 // sdot v17.4s, v14.16b, v28.16b\n"
- ".inst 0x4e9797f9 // sdot v25.4s, v31.16b, v23.16b\n"
- "ext v23.16b, v23.16b, v23.16b, #0x1\n"
- ".inst 0x4e9c97fe // sdot v30.4s, v31.16b, v28.16b\n"
- ".inst 0x4e9797f4 // sdot v20.4s, v31.16b, v23.16b\n"
- "sqrdmulh v5.4s, v5.4s, v9.4s\n"
- "mov v16.16b, v17.16b\n .inst 0x4e9795d0 // sdot v16.4s, v14.16b, v23.16b\n"
- ".inst 0x4e8895d1 // sdot v17.4s, v14.16b, v8.16b\n"
- "mls v30.4s, v17.4s, v11.4s\n"
- "mls v25.4s, v18.4s, v11.4s\n"
- "mls v20.4s, v16.4s, v11.4s\n"
- "and v0.16b, v5.16b, v4.16b\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "sqrdmulh v30.4s, v30.4s, v9.4s\n"
- "sqrdmulh v25.4s, v25.4s, v9.4s\n"
- "sqrdmulh v20.4s, v20.4s, v9.4s\n"
- "ldr q9, [%x[params], #0xc0]\n"
- "sqadd v5.4s, v5.4s, v0.4s\n"
- "and v16.16b, v30.16b, v4.16b\n"
- "and v31.16b, v25.16b, v4.16b\n"
- "and v0.16b, v20.16b, v4.16b\n"
+ "ext v24.16b, v24.16b, v24.16b, #0x1\n"
+ ".inst 0x4e8897b6 // sdot v22.4s, v29.16b, v8.16b\n"
+ ".inst 0x4e8297b3 // sdot v19.4s, v29.16b, v2.16b\n"
+ ".inst 0x4e82956a // sdot v10.4s, v11.16b, v2.16b\n"
+ "sqrdmulh v20.4s, v20.4s, v23.4s\n"
+ ".inst 0x4e829776 // sdot v22.4s, v27.16b, v2.16b\n"
+ ".inst 0x4e9c9773 // sdot v19.4s, v27.16b, v28.16b\n"
+ ".inst 0x4e9c956a // sdot v10.4s, v11.16b, v28.16b\n"
+ "sqrdmulh v30.4s, v30.4s, v23.4s\n"
+ ".inst 0x4e9c9736 // sdot v22.4s, v25.16b, v28.16b\n"
+ ".inst 0x4e989733 // sdot v19.4s, v25.16b, v24.16b\n"
+ "mov v17.16b, v10.16b\n"
+ ".inst 0x4e88956a // sdot v10.4s, v11.16b, v8.16b\n"
+ "mls v22.4s, v10.4s, v14.4s\n"
+ ".inst 0x4e989571 // sdot v17.4s, v11.16b, v24.16b\n"
+ "and v18.16b, v30.16b, v21.16b\n"
+ "sshr v18.4s, v18.4s, #0x1f\n"
+ "and v16.16b, v20.16b, v21.16b\n"
+ "mls v19.4s, v17.4s, v14.4s\n"
+ "sqrdmulh v22.4s, v22.4s, v23.4s\n"
"sshr v16.4s, v16.4s, #0x1f\n"
- "sshr v31.4s, v31.4s, #0x1f\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "srshl v5.4s, v5.4s, v4.4s\n"
- "sqadd v30.4s, v30.4s, v16.4s\n"
- "ldr q16, [%x[params], #0xa0]\n"
- "sqadd v25.4s, v25.4s, v31.4s\n"
- "ldr q31, [%x[params], #0xb0]\n"
- "sqadd v20.4s, v20.4s, v0.4s\n"
- "ldr q0, [%x[params], #0x90]\n"
- "add v5.4s, v5.4s, v10.4s\n"
- "srshl v30.4s, v30.4s, v4.4s\n"
- "srshl v25.4s, v25.4s, v4.4s\n"
- "srshl v20.4s, v20.4s, v4.4s\n"
- "ldr q4, [%x[params], #0xd0]\n"
- "smax v5.4s, v5.4s, v13.4s\n"
- "add v30.4s, v30.4s, v10.4s\n"
- "add v25.4s, v25.4s, v10.4s\n"
- "add v20.4s, v20.4s, v10.4s\n"
- "smin v5.4s, v5.4s, v12.4s\n"
- "smax v30.4s, v30.4s, v13.4s\n"
- "smax v25.4s, v25.4s, v13.4s\n"
- "smax v20.4s, v20.4s, v13.4s\n"
- "smin v30.4s, v30.4s, v12.4s\n"
- "smin v25.4s, v25.4s, v12.4s\n"
- "smin v20.4s, v20.4s, v12.4s\n"
- "uzp1 v5.16b, v5.16b, v5.16b\n"
- "movi v19.4s, #0x0\n"
- ".inst 0x4e8295d3 // sdot v19.4s, v14.16b, v2.16b\n"
- ".inst 0x4e9b95d3 // sdot v19.4s, v14.16b, v27.16b\n"
- "uzp1 v5.16b, v5.16b, v5.16b\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
- "str s5, [x24, x27]\n"
- "ldr q5, [%x[params], #0x80]\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
- "uzp1 v20.16b, v20.16b, v20.16b\n"
- "mov v18.16b, v19.16b\n .inst 0x4e9695d2 // sdot v18.4s, v14.16b, v22.16b\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
- "str s30, [x23, x27]\n"
- ".inst 0x4e8795d3 // sdot v19.4s, v14.16b, v7.16b\n"
- "uzp1 v20.16b, v20.16b, v20.16b\n"
- "str s25, [x22, x27]\n"
- "mov v30.16b, v5.16b\n"
- "str s20, [x21, x27]\n"
- "mov v25.16b, v5.16b\n"
- "mov v20.16b, v5.16b\n"
- ".inst 0x4e879405 // sdot v5.4s, v0.16b, v7.16b\n"
- ".inst 0x4e829419 // sdot v25.4s, v0.16b, v2.16b\n"
- ".inst 0x4e829605 // sdot v5.4s, v16.16b, v2.16b\n"
- "ext v7.16b, v7.16b, v7.16b, #0x1\n"
- "add x27, x27, #0x4\n"
- "ext v2.16b, v2.16b, v2.16b, #0x1\n"
- "movi v17.4s, #0x0\n"
- ".inst 0x4e87941e // sdot v30.4s, v0.16b, v7.16b\n"
- ".inst 0x4e829414 // sdot v20.4s, v0.16b, v2.16b\n"
- ".inst 0x4e8295d1 // sdot v17.4s, v14.16b, v2.16b\n"
- ".inst 0x4e9b9619 // sdot v25.4s, v16.16b, v27.16b\n"
- ".inst 0x4e9b97e5 // sdot v5.4s, v31.16b, v27.16b\n"
- "ext v27.16b, v27.16b, v27.16b, #0x1\n"
- ".inst 0x4e82961e // sdot v30.4s, v16.16b, v2.16b\n"
- ".inst 0x4e9b9614 // sdot v20.4s, v16.16b, v27.16b\n"
- "mls v5.4s, v19.4s, v11.4s\n"
- ".inst 0x4e9b95d1 // sdot v17.4s, v14.16b, v27.16b\n"
- ".inst 0x4e9697f9 // sdot v25.4s, v31.16b, v22.16b\n"
- "ext v22.16b, v22.16b, v22.16b, #0x1\n"
- ".inst 0x4e9b97fe // sdot v30.4s, v31.16b, v27.16b\n"
- ".inst 0x4e9697f4 // sdot v20.4s, v31.16b, v22.16b\n"
- "sqrdmulh v5.4s, v5.4s, v9.4s\n"
- "mov v16.16b, v17.16b\n .inst 0x4e9695d0 // sdot v16.4s, v14.16b, v22.16b\n"
- ".inst 0x4e8795d1 // sdot v17.4s, v14.16b, v7.16b\n"
- "mls v30.4s, v17.4s, v11.4s\n"
- "mls v25.4s, v18.4s, v11.4s\n"
- "mls v20.4s, v16.4s, v11.4s\n"
- "and v0.16b, v5.16b, v4.16b\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "sqrdmulh v30.4s, v30.4s, v9.4s\n"
- "sqrdmulh v25.4s, v25.4s, v9.4s\n"
- "sqrdmulh v20.4s, v20.4s, v9.4s\n"
- "ldr q9, [%x[params], #0x120]\n"
- "sqadd v5.4s, v5.4s, v0.4s\n"
- "and v16.16b, v30.16b, v4.16b\n"
- "and v31.16b, v25.16b, v4.16b\n"
- "and v0.16b, v20.16b, v4.16b\n"
+ "sqadd v30.4s, v30.4s, v18.4s\n"
+ "and v17.16b, v22.16b, v21.16b\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "srshl v30.4s, v30.4s, v21.4s\n"
+ "sqadd v20.4s, v20.4s, v16.4s\n"
+ "sqrdmulh v19.4s, v19.4s, v23.4s\n"
+ "add v30.4s, v30.4s, v13.4s\n"
+ "srshl v20.4s, v20.4s, v21.4s\n"
+ "sqadd v22.4s, v22.4s, v17.4s\n"
+ "and v16.16b, v19.16b, v21.16b\n"
"sshr v16.4s, v16.4s, #0x1f\n"
- "sshr v31.4s, v31.4s, #0x1f\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "srshl v5.4s, v5.4s, v4.4s\n"
- "sqadd v30.4s, v30.4s, v16.4s\n"
- "ldr q16, [%x[params], #0x100]\n"
- "sqadd v25.4s, v25.4s, v31.4s\n"
- "ldr q31, [%x[params], #0x110]\n"
- "sqadd v20.4s, v20.4s, v0.4s\n"
- "ldr q0, [%x[params], #0xf0]\n"
- "add v5.4s, v5.4s, v10.4s\n"
- "srshl v30.4s, v30.4s, v4.4s\n"
- "srshl v25.4s, v25.4s, v4.4s\n"
- "srshl v20.4s, v20.4s, v4.4s\n"
- "ldr q4, [%x[params], #0x130]\n"
- "smax v5.4s, v5.4s, v13.4s\n"
- "add v30.4s, v30.4s, v10.4s\n"
- "add v25.4s, v25.4s, v10.4s\n"
- "add v20.4s, v20.4s, v10.4s\n"
- "smin v5.4s, v5.4s, v12.4s\n"
- "smax v30.4s, v30.4s, v13.4s\n"
- "smax v25.4s, v25.4s, v13.4s\n"
- "smax v20.4s, v20.4s, v13.4s\n"
+ "smax v30.4s, v30.4s, v9.4s\n"
+ "srshl v22.4s, v22.4s, v21.4s\n"
+ "add v20.4s, v20.4s, v13.4s\n"
"smin v30.4s, v30.4s, v12.4s\n"
- "smin v25.4s, v25.4s, v12.4s\n"
- "smin v20.4s, v20.4s, v12.4s\n"
- "uzp1 v5.16b, v5.16b, v5.16b\n"
- "movi v19.4s, #0x0\n"
- ".inst 0x4e8195d3 // sdot v19.4s, v14.16b, v1.16b\n"
- ".inst 0x4e9a95d3 // sdot v19.4s, v14.16b, v26.16b\n"
- "uzp1 v5.16b, v5.16b, v5.16b\n"
+ "add v22.4s, v22.4s, v13.4s\n"
+ "smax v20.4s, v20.4s, v9.4s\n"
+ "sqadd v19.4s, v19.4s, v16.4s\n"
"uzp1 v30.16b, v30.16b, v30.16b\n"
- "str s5, [x24, x27]\n"
- "ldr q5, [%x[params], #0xe0]\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
- "uzp1 v20.16b, v20.16b, v20.16b\n"
- "mov v18.16b, v19.16b\n .inst 0x4e9595d2 // sdot v18.4s, v14.16b, v21.16b\n"
- "add %x[params], %x[params], #0x140\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
- "str s30, [x23, x27]\n"
- ".inst 0x4e8695d3 // sdot v19.4s, v14.16b, v6.16b\n"
- "uzp1 v20.16b, v20.16b, v20.16b\n"
- "str s25, [x22, x27]\n"
- "mov v30.16b, v5.16b\n"
- "str s20, [x21, x27]\n"
- "mov v25.16b, v5.16b\n"
- "mov v20.16b, v5.16b\n"
- ".inst 0x4e869405 // sdot v5.4s, v0.16b, v6.16b\n"
- ".inst 0x4e819419 // sdot v25.4s, v0.16b, v1.16b\n"
- ".inst 0x4e819605 // sdot v5.4s, v16.16b, v1.16b\n"
- "ext v6.16b, v6.16b, v6.16b, #0x1\n"
- "add x27, x27, #0x4\n"
- "ext v1.16b, v1.16b, v1.16b, #0x1\n"
- "movi v17.4s, #0x0\n"
- ".inst 0x4e86941e // sdot v30.4s, v0.16b, v6.16b\n"
- ".inst 0x4e819414 // sdot v20.4s, v0.16b, v1.16b\n"
- ".inst 0x4e8195d1 // sdot v17.4s, v14.16b, v1.16b\n"
- ".inst 0x4e9a9619 // sdot v25.4s, v16.16b, v26.16b\n"
- ".inst 0x4e9a97e5 // sdot v5.4s, v31.16b, v26.16b\n"
- "ext v26.16b, v26.16b, v26.16b, #0x1\n"
- ".inst 0x4e81961e // sdot v30.4s, v16.16b, v1.16b\n"
- ".inst 0x4e9a9614 // sdot v20.4s, v16.16b, v26.16b\n"
- "mls v5.4s, v19.4s, v11.4s\n"
- ".inst 0x4e9a95d1 // sdot v17.4s, v14.16b, v26.16b\n"
- ".inst 0x4e9597f9 // sdot v25.4s, v31.16b, v21.16b\n"
- "ext v21.16b, v21.16b, v21.16b, #0x1\n"
- ".inst 0x4e9a97fe // sdot v30.4s, v31.16b, v26.16b\n"
- ".inst 0x4e9597f4 // sdot v20.4s, v31.16b, v21.16b\n"
- "sqrdmulh v5.4s, v5.4s, v9.4s\n"
- "mov v16.16b, v17.16b\n .inst 0x4e9595d0 // sdot v16.4s, v14.16b, v21.16b\n"
- ".inst 0x4e8695d1 // sdot v17.4s, v14.16b, v6.16b\n"
- "mls v30.4s, v17.4s, v11.4s\n"
- "mls v25.4s, v18.4s, v11.4s\n"
- "mls v20.4s, v16.4s, v11.4s\n"
- "and v0.16b, v5.16b, v4.16b\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "sqrdmulh v30.4s, v30.4s, v9.4s\n"
- "sqrdmulh v25.4s, v25.4s, v9.4s\n"
- "sqrdmulh v20.4s, v20.4s, v9.4s\n"
- "sqadd v5.4s, v5.4s, v0.4s\n"
- "and v16.16b, v30.16b, v4.16b\n"
- "and v31.16b, v25.16b, v4.16b\n"
- "and v0.16b, v20.16b, v4.16b\n"
- "sshr v16.4s, v16.4s, #0x1f\n"
- "sshr v31.4s, v31.4s, #0x1f\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "sqadd v30.4s, v30.4s, v16.4s\n"
- "sqadd v25.4s, v25.4s, v31.4s\n"
- "sqadd v20.4s, v20.4s, v0.4s\n"
- "srshl v5.4s, v5.4s, v4.4s\n"
- "srshl v30.4s, v30.4s, v4.4s\n"
- "srshl v25.4s, v25.4s, v4.4s\n"
- "srshl v20.4s, v20.4s, v4.4s\n"
- "add v5.4s, v5.4s, v10.4s\n"
- "add v30.4s, v30.4s, v10.4s\n"
- "add v25.4s, v25.4s, v10.4s\n"
- "add v20.4s, v20.4s, v10.4s\n"
- "smax v5.4s, v5.4s, v13.4s\n"
- "smax v30.4s, v30.4s, v13.4s\n"
- "smax v25.4s, v25.4s, v13.4s\n"
- "smax v20.4s, v20.4s, v13.4s\n"
- "smin v5.4s, v5.4s, v12.4s\n"
- "smin v30.4s, v30.4s, v12.4s\n"
- "smin v25.4s, v25.4s, v12.4s\n"
+ "smax v22.4s, v22.4s, v9.4s\n"
"smin v20.4s, v20.4s, v12.4s\n"
- "uzp1 v5.16b, v5.16b, v5.16b\n"
+ "srshl v19.4s, v19.4s, v21.4s\n"
+ "smin v22.4s, v22.4s, v12.4s\n"
"uzp1 v30.16b, v30.16b, v30.16b\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
+ "str s30, [x25, x23]\n"
+ "add v19.4s, v19.4s, v13.4s\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
"uzp1 v20.16b, v20.16b, v20.16b\n"
- "uzp1 v5.16b, v5.16b, v5.16b\n"
- "str s5, [x24, x27]\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
- "str s30, [x23, x27]\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "str s22, [x24, x23]\n"
+ "smax v19.4s, v19.4s, v9.4s\n"
"uzp1 v20.16b, v20.16b, v20.16b\n"
- "str s25, [x22, x27]\n"
- "str s20, [x21, x27]\n"
- "add x27, x27, #0x4\n"
- "beq 35f\n"
- "3:" // Oddments
- "and x20, %x[n_channels], #0xf\n"
- "add x15, x15, x28\n"
- "add x14, x14, x28\n"
- "add x13, x13, x28\n"
- "add x12, x12, x28\n"
- "add x10, x10, x28\n"
- "add x9, x9, x28\n"
- "add x26, x26, x28\n"
- "add x25, x25, x28\n"
- "tbz %x[n_channels], #3, 7f\n"
- "ldr d9, [x15], #0x8\n"
- "ldr d8, [x14], #0x8\n"
- "ldr d7, [x13], #0x8\n"
- "ldr d6, [x12], #0x8\n"
- "ldr d4, [x10], #0x8\n"
- "ldr d3, [x9], #0x8\n"
- "ldr d2, [x26], #0x8\n"
- "ldr d1, [x25], #0x8\n"
- "tbz %x[n_channels], #2, 5f\n"
- "ld1 { v9.s }[2], [x15], #0x4\n"
- "ld1 { v8.s }[2], [x14], #0x4\n"
- "ld1 { v7.s }[2], [x13], #0x4\n"
- "ld1 { v6.s }[2], [x12], #0x4\n"
- "ld1 { v4.s }[2], [x10], #0x4\n"
- "ld1 { v3.s }[2], [x9], #0x4\n"
- "ld1 { v2.s }[2], [x26], #0x4\n"
- "ld1 { v1.s }[2], [x25], #0x4\n"
- "tbz %x[n_channels], #1, 4f\n"
- "ld1 { v9.h }[6], [x15], #0x2\n"
- "ld1 { v8.h }[6], [x14], #0x2\n"
- "ld1 { v7.h }[6], [x13], #0x2\n"
- "ld1 { v6.h }[6], [x12], #0x2\n"
- "ld1 { v4.h }[6], [x10], #0x2\n"
- "ld1 { v3.h }[6], [x9], #0x2\n"
- "ld1 { v2.h }[6], [x26], #0x2\n"
- "ld1 { v1.h }[6], [x25], #0x2\n"
- "tbz %x[n_channels], #0, 11f\n"
- "ld1 { v9.b }[14], [x15], #0x1\n"
- "ld1 { v8.b }[14], [x14], #0x1\n"
- "ld1 { v7.b }[14], [x13], #0x1\n"
- "ld1 { v6.b }[14], [x12], #0x1\n"
- "ld1 { v4.b }[14], [x10], #0x1\n"
- "ld1 { v3.b }[14], [x9], #0x1\n"
- "ld1 { v2.b }[14], [x26], #0x1\n"
- "ld1 { v1.b }[14], [x25], #0x1\n"
- "b 11f\n"
- "4:" // Oddments: Load (A): Bit 3: Bit 2: Bit 1: Unset
- "tbz %x[n_channels], #0, 11f\n"
- "ld1 { v9.b }[12], [x15], #0x1\n"
- "ld1 { v8.b }[12], [x14], #0x1\n"
- "ld1 { v7.b }[12], [x13], #0x1\n"
- "ld1 { v6.b }[12], [x12], #0x1\n"
- "ld1 { v4.b }[12], [x10], #0x1\n"
- "ld1 { v3.b }[12], [x9], #0x1\n"
- "ld1 { v2.b }[12], [x26], #0x1\n"
- "ld1 { v1.b }[12], [x25], #0x1\n"
- "b 11f\n"
- "5:" // Oddments: Load (A): Bit 3: Bit 2: Unset
- "tbz %x[n_channels], #1, 6f\n"
- "ld1 { v9.h }[4], [x15], #0x2\n"
- "ld1 { v8.h }[4], [x14], #0x2\n"
- "ld1 { v7.h }[4], [x13], #0x2\n"
- "ld1 { v6.h }[4], [x12], #0x2\n"
- "ld1 { v4.h }[4], [x10], #0x2\n"
- "ld1 { v3.h }[4], [x9], #0x2\n"
- "ld1 { v2.h }[4], [x26], #0x2\n"
- "ld1 { v1.h }[4], [x25], #0x2\n"
- "tbz %x[n_channels], #0, 11f\n"
- "ld1 { v9.b }[10], [x15], #0x1\n"
- "ld1 { v8.b }[10], [x14], #0x1\n"
- "ld1 { v7.b }[10], [x13], #0x1\n"
- "ld1 { v6.b }[10], [x12], #0x1\n"
- "ld1 { v4.b }[10], [x10], #0x1\n"
- "ld1 { v3.b }[10], [x9], #0x1\n"
- "ld1 { v2.b }[10], [x26], #0x1\n"
- "ld1 { v1.b }[10], [x25], #0x1\n"
- "b 11f\n"
- "6:" // Oddments: Load (A): Bit 3: Bit 2: Unset: Bit 1: Unset
- "tbz %x[n_channels], #0, 11f\n"
- "ld1 { v9.b }[8], [x15], #0x1\n"
- "ld1 { v8.b }[8], [x14], #0x1\n"
- "ld1 { v7.b }[8], [x13], #0x1\n"
- "ld1 { v6.b }[8], [x12], #0x1\n"
- "ld1 { v4.b }[8], [x10], #0x1\n"
- "ld1 { v3.b }[8], [x9], #0x1\n"
- "ld1 { v2.b }[8], [x26], #0x1\n"
- "ld1 { v1.b }[8], [x25], #0x1\n"
- "b 11f\n"
- "7:" // Oddments: Load (A): Bit 3: Unset
- "tbz %x[n_channels], #2, 9f\n"
- "ldr s9, [x15], #0x4\n"
- "ldr s8, [x14], #0x4\n"
- "ldr s7, [x13], #0x4\n"
- "ldr s6, [x12], #0x4\n"
- "ldr s4, [x10], #0x4\n"
- "ldr s3, [x9], #0x4\n"
- "ldr s2, [x26], #0x4\n"
- "ldr s1, [x25], #0x4\n"
- "tbz %x[n_channels], #1, 8f\n"
- "ld1 { v9.h }[2], [x15], #0x2\n"
- "ld1 { v8.h }[2], [x14], #0x2\n"
- "ld1 { v7.h }[2], [x13], #0x2\n"
- "ld1 { v6.h }[2], [x12], #0x2\n"
- "ld1 { v4.h }[2], [x10], #0x2\n"
- "ld1 { v3.h }[2], [x9], #0x2\n"
- "ld1 { v2.h }[2], [x26], #0x2\n"
- "ld1 { v1.h }[2], [x25], #0x2\n"
- "tbz %x[n_channels], #0, 11f\n"
- "ld1 { v9.b }[6], [x15], #0x1\n"
- "ld1 { v8.b }[6], [x14], #0x1\n"
- "ld1 { v7.b }[6], [x13], #0x1\n"
- "ld1 { v6.b }[6], [x12], #0x1\n"
- "ld1 { v4.b }[6], [x10], #0x1\n"
- "ld1 { v3.b }[6], [x9], #0x1\n"
- "ld1 { v2.b }[6], [x26], #0x1\n"
- "ld1 { v1.b }[6], [x25], #0x1\n"
- "b 11f\n"
- "8:" // Oddments: Load (A): Bit 3: Unset: Bit 2: Bit 1: Unset
- "tbz %x[n_channels], #0, 11f\n"
- "ld1 { v9.b }[4], [x15], #0x1\n"
- "ld1 { v8.b }[4], [x14], #0x1\n"
- "ld1 { v7.b }[4], [x13], #0x1\n"
- "ld1 { v6.b }[4], [x12], #0x1\n"
- "ld1 { v4.b }[4], [x10], #0x1\n"
- "ld1 { v3.b }[4], [x9], #0x1\n"
- "ld1 { v2.b }[4], [x26], #0x1\n"
- "ld1 { v1.b }[4], [x25], #0x1\n"
- "b 11f\n"
- "9:" // Oddments: Load (A): Bit 3: Unset: Bit 2: Unset
- "tbz %x[n_channels], #1, 10f\n"
- "ldr h9, [x15], #0x2\n"
- "ldr h8, [x14], #0x2\n"
- "ldr h7, [x13], #0x2\n"
- "ldr h6, [x12], #0x2\n"
- "ldr h4, [x10], #0x2\n"
- "ldr h3, [x9], #0x2\n"
- "ldr h2, [x26], #0x2\n"
- "ldr h1, [x25], #0x2\n"
- "tbz %x[n_channels], #0, 11f\n"
- "ld1 { v9.b }[2], [x15], #0x1\n"
- "ld1 { v8.b }[2], [x14], #0x1\n"
- "ld1 { v7.b }[2], [x13], #0x1\n"
- "ld1 { v6.b }[2], [x12], #0x1\n"
- "ld1 { v4.b }[2], [x10], #0x1\n"
- "ld1 { v3.b }[2], [x9], #0x1\n"
- "ld1 { v2.b }[2], [x26], #0x1\n"
- "ld1 { v1.b }[2], [x25], #0x1\n"
- "b 11f\n"
- "10:" // Oddments: Load (A): Bit 3: Unset: Bit 2: Unset: Bit 1: Unset
- "ldr b9, [x15], #0x1\n"
- "ldr b8, [x14], #0x1\n"
- "ldr b7, [x13], #0x1\n"
- "ldr b6, [x12], #0x1\n"
- "ldr b4, [x10], #0x1\n"
- "ldr b3, [x9], #0x1\n"
- "ldr b2, [x26], #0x1\n"
- "ldr b1, [x25], #0x1\n"
- "11:" // Oddments: Load (A): Bit 3: End
- "ldp x15, x14, [%x[inptrs], #0x40]\n"
- "ldp x13, x12, [%x[inptrs], #0x50]\n"
- "add x15, x15, x28\n"
- "add x14, x14, x28\n"
- "ldp x10, x9, [%x[inptrs], #0x60]\n"
- "ldp x26, x25, [%x[inptrs], #0x70]\n"
- "add x13, x13, x28\n"
- "add x12, x12, x28\n"
- "add x10, x10, x28\n"
- "add x9, x9, x28\n"
- "add x26, x26, x28\n"
- "add x25, x25, x28\n"
- "tbz %x[n_channels], #3, 15f\n"
- "ldr d29, [x15], #0x8\n"
- "ldr d28, [x14], #0x8\n"
- "ldr d27, [x13], #0x8\n"
- "ldr d26, [x12], #0x8\n"
- "ldr d24, [x10], #0x8\n"
- "ldr d23, [x9], #0x8\n"
- "ldr d22, [x26], #0x8\n"
- "ldr d21, [x25], #0x8\n"
- "tbz %x[n_channels], #2, 13f\n"
- "ld1 { v29.s }[2], [x15], #0x4\n"
- "ld1 { v28.s }[2], [x14], #0x4\n"
+ "str s20, [x22, x23]\n"
+ "smin v19.4s, v19.4s, v12.4s\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ "str s19, [x21, x23]\n"
+ "add x23, x23, #0x4\n"
+ "bgt 1b\n"
+ "tst %x[n_channels], #0xf\n"
+ "beq 34f\n"
+ "2:" // Oddments
+ "and x19, %x[n_channels], #0xf\n"
+ "add x13, x13, x23\n"
+ "add x12, x12, x23\n"
+ "add x11, x11, x23\n"
+ "add x10, x10, x23\n"
+ "add x9, x9, x23\n"
+ "add x28, x28, x23\n"
+ "add x27, x27, x23\n"
+ "add x26, x26, x23\n"
+ "tbz %x[n_channels], #3, 6f\n"
+ "ld1 { v27.d }[0], [x13], #0x8\n"
+ "ld1 { v1.d }[0], [x12], #0x8\n"
+ "ld1 { v25.d }[0], [x11], #0x8\n"
+ "ld1 { v23.d }[0], [x10], #0x8\n"
+ "ld1 { v31.d }[0], [x9], #0x8\n"
+ "ld1 { v28.d }[0], [x28], #0x8\n"
+ "ld1 { v21.d }[0], [x27], #0x8\n"
+ "ld1 { v26.d }[0], [x26], #0x8\n"
+ "tbz %x[n_channels], #2, 4f\n"
"ld1 { v27.s }[2], [x13], #0x4\n"
- "ld1 { v26.s }[2], [x12], #0x4\n"
- "ld1 { v24.s }[2], [x10], #0x4\n"
- "ld1 { v23.s }[2], [x9], #0x4\n"
- "ld1 { v22.s }[2], [x26], #0x4\n"
- "ld1 { v21.s }[2], [x25], #0x4\n"
- "tbz %x[n_channels], #1, 12f\n"
- "ld1 { v29.h }[6], [x15], #0x2\n"
- "ld1 { v28.h }[6], [x14], #0x2\n"
+ "ld1 { v1.s }[2], [x12], #0x4\n"
+ "ld1 { v25.s }[2], [x11], #0x4\n"
+ "ld1 { v23.s }[2], [x10], #0x4\n"
+ "ld1 { v31.s }[2], [x9], #0x4\n"
+ "ld1 { v28.s }[2], [x28], #0x4\n"
+ "ld1 { v21.s }[2], [x27], #0x4\n"
+ "ld1 { v26.s }[2], [x26], #0x4\n"
+ "tbz %x[n_channels], #1, 3f\n"
"ld1 { v27.h }[6], [x13], #0x2\n"
- "ld1 { v26.h }[6], [x12], #0x2\n"
- "ld1 { v24.h }[6], [x10], #0x2\n"
- "ld1 { v23.h }[6], [x9], #0x2\n"
- "ld1 { v22.h }[6], [x26], #0x2\n"
- "ld1 { v21.h }[6], [x25], #0x2\n"
- "tbz %x[n_channels], #0, 19f\n"
- "ld1 { v29.b }[14], [x15], #0x1\n"
- "ld1 { v28.b }[14], [x14], #0x1\n"
+ "ld1 { v1.h }[6], [x12], #0x2\n"
+ "ld1 { v25.h }[6], [x11], #0x2\n"
+ "ld1 { v23.h }[6], [x10], #0x2\n"
+ "ld1 { v31.h }[6], [x9], #0x2\n"
+ "ld1 { v28.h }[6], [x28], #0x2\n"
+ "ld1 { v21.h }[6], [x27], #0x2\n"
+ "ld1 { v26.h }[6], [x26], #0x2\n"
+ "tbz %x[n_channels], #0, 10f\n"
"ld1 { v27.b }[14], [x13], #0x1\n"
- "ld1 { v26.b }[14], [x12], #0x1\n"
- "ld1 { v24.b }[14], [x10], #0x1\n"
- "ld1 { v23.b }[14], [x9], #0x1\n"
- "ld1 { v22.b }[14], [x26], #0x1\n"
- "ld1 { v21.b }[14], [x25], #0x1\n"
- "b 19f\n"
- "12:" // Oddments: Load (B): Bit 3: Bit 2: Bit 1: Unset
- "tbz %x[n_channels], #0, 19f\n"
- "ld1 { v29.b }[12], [x15], #0x1\n"
- "ld1 { v28.b }[12], [x14], #0x1\n"
+ "ld1 { v1.b }[14], [x12], #0x1\n"
+ "ld1 { v25.b }[14], [x11], #0x1\n"
+ "ld1 { v23.b }[14], [x10], #0x1\n"
+ "ld1 { v31.b }[14], [x9], #0x1\n"
+ "ld1 { v28.b }[14], [x28], #0x1\n"
+ "ld1 { v21.b }[14], [x27], #0x1\n"
+ "ld1 { v26.b }[14], [x26], #0x1\n"
+ "b 10f\n"
+ "3:" // Oddments: Load (A): Bit 3: Bit 2: Bit 1: Unset
+ "tbz %x[n_channels], #0, 10f\n"
"ld1 { v27.b }[12], [x13], #0x1\n"
- "ld1 { v26.b }[12], [x12], #0x1\n"
- "ld1 { v24.b }[12], [x10], #0x1\n"
- "ld1 { v23.b }[12], [x9], #0x1\n"
- "ld1 { v22.b }[12], [x26], #0x1\n"
- "ld1 { v21.b }[12], [x25], #0x1\n"
- "b 19f\n"
- "13:" // Oddments: Load (B): Bit 3: Bit 2: Unset
- "tbz %x[n_channels], #1, 14f\n"
- "ld1 { v29.h }[4], [x15], #0x2\n"
- "ld1 { v28.h }[4], [x14], #0x2\n"
+ "ld1 { v1.b }[12], [x12], #0x1\n"
+ "ld1 { v25.b }[12], [x11], #0x1\n"
+ "ld1 { v23.b }[12], [x10], #0x1\n"
+ "ld1 { v31.b }[12], [x9], #0x1\n"
+ "ld1 { v28.b }[12], [x28], #0x1\n"
+ "ld1 { v21.b }[12], [x27], #0x1\n"
+ "ld1 { v26.b }[12], [x26], #0x1\n"
+ "b 10f\n"
+ "4:" // Oddments: Load (A): Bit 3: Bit 2: Unset
+ "tbz %x[n_channels], #1, 5f\n"
"ld1 { v27.h }[4], [x13], #0x2\n"
- "ld1 { v26.h }[4], [x12], #0x2\n"
- "ld1 { v24.h }[4], [x10], #0x2\n"
- "ld1 { v23.h }[4], [x9], #0x2\n"
- "ld1 { v22.h }[4], [x26], #0x2\n"
- "ld1 { v21.h }[4], [x25], #0x2\n"
- "tbz %x[n_channels], #0, 19f\n"
- "ld1 { v29.b }[10], [x15], #0x1\n"
- "ld1 { v28.b }[10], [x14], #0x1\n"
+ "ld1 { v1.h }[4], [x12], #0x2\n"
+ "ld1 { v25.h }[4], [x11], #0x2\n"
+ "ld1 { v23.h }[4], [x10], #0x2\n"
+ "ld1 { v31.h }[4], [x9], #0x2\n"
+ "ld1 { v28.h }[4], [x28], #0x2\n"
+ "ld1 { v21.h }[4], [x27], #0x2\n"
+ "ld1 { v26.h }[4], [x26], #0x2\n"
+ "tbz %x[n_channels], #0, 10f\n"
"ld1 { v27.b }[10], [x13], #0x1\n"
- "ld1 { v26.b }[10], [x12], #0x1\n"
- "ld1 { v24.b }[10], [x10], #0x1\n"
- "ld1 { v23.b }[10], [x9], #0x1\n"
- "ld1 { v22.b }[10], [x26], #0x1\n"
- "ld1 { v21.b }[10], [x25], #0x1\n"
- "b 19f\n"
- "14:" // Oddments: Load (B): Bit 3: Bit 2: Unset: Bit 1: Unset
- "tbz %x[n_channels], #0, 19f\n"
- "ld1 { v29.b }[8], [x15], #0x1\n"
- "ld1 { v28.b }[8], [x14], #0x1\n"
+ "ld1 { v1.b }[10], [x12], #0x1\n"
+ "ld1 { v25.b }[10], [x11], #0x1\n"
+ "ld1 { v23.b }[10], [x10], #0x1\n"
+ "ld1 { v31.b }[10], [x9], #0x1\n"
+ "ld1 { v28.b }[10], [x28], #0x1\n"
+ "ld1 { v21.b }[10], [x27], #0x1\n"
+ "ld1 { v26.b }[10], [x26], #0x1\n"
+ "b 10f\n"
+ "5:" // Oddments: Load (A): Bit 3: Bit 2: Unset: Bit 1: Unset
+ "tbz %x[n_channels], #0, 10f\n"
"ld1 { v27.b }[8], [x13], #0x1\n"
- "ld1 { v26.b }[8], [x12], #0x1\n"
- "ld1 { v24.b }[8], [x10], #0x1\n"
- "ld1 { v23.b }[8], [x9], #0x1\n"
- "ld1 { v22.b }[8], [x26], #0x1\n"
- "ld1 { v21.b }[8], [x25], #0x1\n"
- "b 19f\n"
- "15:" // Oddments: Load (B): Bit 3: Unset
- "tbz %x[n_channels], #2, 17f\n"
- "ldr s29, [x15], #0x4\n"
- "ldr s28, [x14], #0x4\n"
- "ldr s27, [x13], #0x4\n"
- "ldr s26, [x12], #0x4\n"
- "ldr s24, [x10], #0x4\n"
- "ldr s23, [x9], #0x4\n"
- "ldr s22, [x26], #0x4\n"
- "ldr s21, [x25], #0x4\n"
- "tbz %x[n_channels], #1, 16f\n"
- "ld1 { v29.h }[2], [x15], #0x2\n"
- "ld1 { v28.h }[2], [x14], #0x2\n"
+ "ld1 { v1.b }[8], [x12], #0x1\n"
+ "ld1 { v25.b }[8], [x11], #0x1\n"
+ "ld1 { v23.b }[8], [x10], #0x1\n"
+ "ld1 { v31.b }[8], [x9], #0x1\n"
+ "ld1 { v28.b }[8], [x28], #0x1\n"
+ "ld1 { v21.b }[8], [x27], #0x1\n"
+ "ld1 { v26.b }[8], [x26], #0x1\n"
+ "b 10f\n"
+ "6:" // Oddments: Load (A): Bit 3: Unset
+ "tbz %x[n_channels], #2, 8f\n"
+ "ld1 { v27.s }[0], [x13], #0x4\n"
+ "ld1 { v1.s }[0], [x12], #0x4\n"
+ "ld1 { v25.s }[0], [x11], #0x4\n"
+ "ld1 { v23.s }[0], [x10], #0x4\n"
+ "ld1 { v31.s }[0], [x9], #0x4\n"
+ "ld1 { v28.s }[0], [x28], #0x4\n"
+ "ld1 { v21.s }[0], [x27], #0x4\n"
+ "ld1 { v26.s }[0], [x26], #0x4\n"
+ "tbz %x[n_channels], #1, 7f\n"
"ld1 { v27.h }[2], [x13], #0x2\n"
- "ld1 { v26.h }[2], [x12], #0x2\n"
- "ld1 { v24.h }[2], [x10], #0x2\n"
- "ld1 { v23.h }[2], [x9], #0x2\n"
- "ld1 { v22.h }[2], [x26], #0x2\n"
- "ld1 { v21.h }[2], [x25], #0x2\n"
- "tbz %x[n_channels], #0, 19f\n"
- "ld1 { v29.b }[6], [x15], #0x1\n"
- "ld1 { v28.b }[6], [x14], #0x1\n"
+ "ld1 { v1.h }[2], [x12], #0x2\n"
+ "ld1 { v25.h }[2], [x11], #0x2\n"
+ "ld1 { v23.h }[2], [x10], #0x2\n"
+ "ld1 { v31.h }[2], [x9], #0x2\n"
+ "ld1 { v28.h }[2], [x28], #0x2\n"
+ "ld1 { v21.h }[2], [x27], #0x2\n"
+ "ld1 { v26.h }[2], [x26], #0x2\n"
+ "tbz %x[n_channels], #0, 10f\n"
"ld1 { v27.b }[6], [x13], #0x1\n"
- "ld1 { v26.b }[6], [x12], #0x1\n"
- "ld1 { v24.b }[6], [x10], #0x1\n"
- "ld1 { v23.b }[6], [x9], #0x1\n"
- "ld1 { v22.b }[6], [x26], #0x1\n"
- "ld1 { v21.b }[6], [x25], #0x1\n"
- "b 19f\n"
- "16:" // Oddments: Load (B): Bit 3: Unset: Bit 2: Bit 1: Unset
- "tbz %x[n_channels], #0, 19f\n"
- "ld1 { v29.b }[4], [x15], #0x1\n"
- "ld1 { v28.b }[4], [x14], #0x1\n"
+ "ld1 { v1.b }[6], [x12], #0x1\n"
+ "ld1 { v25.b }[6], [x11], #0x1\n"
+ "ld1 { v23.b }[6], [x10], #0x1\n"
+ "ld1 { v31.b }[6], [x9], #0x1\n"
+ "ld1 { v28.b }[6], [x28], #0x1\n"
+ "ld1 { v21.b }[6], [x27], #0x1\n"
+ "ld1 { v26.b }[6], [x26], #0x1\n"
+ "b 10f\n"
+ "7:" // Oddments: Load (A): Bit 3: Unset: Bit 2: Bit 1: Unset
+ "tbz %x[n_channels], #0, 10f\n"
"ld1 { v27.b }[4], [x13], #0x1\n"
- "ld1 { v26.b }[4], [x12], #0x1\n"
- "ld1 { v24.b }[4], [x10], #0x1\n"
- "ld1 { v23.b }[4], [x9], #0x1\n"
- "ld1 { v22.b }[4], [x26], #0x1\n"
- "ld1 { v21.b }[4], [x25], #0x1\n"
- "b 19f\n"
- "17:" // Oddments: Load (B): Bit 3: Unset: Bit 2: Unset
- "tbz %x[n_channels], #1, 18f\n"
- "ldr h29, [x15], #0x2\n"
- "ldr h28, [x14], #0x2\n"
- "ldr h27, [x13], #0x2\n"
- "ldr h26, [x12], #0x2\n"
- "ldr h24, [x10], #0x2\n"
- "ldr h23, [x9], #0x2\n"
- "ldr h22, [x26], #0x2\n"
- "ldr h21, [x25], #0x2\n"
- "tbz %x[n_channels], #0, 19f\n"
- "ld1 { v29.b }[2], [x15], #0x1\n"
- "ld1 { v28.b }[2], [x14], #0x1\n"
+ "ld1 { v1.b }[4], [x12], #0x1\n"
+ "ld1 { v25.b }[4], [x11], #0x1\n"
+ "ld1 { v23.b }[4], [x10], #0x1\n"
+ "ld1 { v31.b }[4], [x9], #0x1\n"
+ "ld1 { v28.b }[4], [x28], #0x1\n"
+ "ld1 { v21.b }[4], [x27], #0x1\n"
+ "ld1 { v26.b }[4], [x26], #0x1\n"
+ "b 10f\n"
+ "8:" // Oddments: Load (A): Bit 3: Unset: Bit 2: Unset
+ "tbz %x[n_channels], #1, 9f\n"
+ "ld1 { v27.h }[0], [x13], #0x2\n"
+ "ld1 { v1.h }[0], [x12], #0x2\n"
+ "ld1 { v25.h }[0], [x11], #0x2\n"
+ "ld1 { v23.h }[0], [x10], #0x2\n"
+ "ld1 { v31.h }[0], [x9], #0x2\n"
+ "ld1 { v28.h }[0], [x28], #0x2\n"
+ "ld1 { v21.h }[0], [x27], #0x2\n"
+ "ld1 { v26.h }[0], [x26], #0x2\n"
+ "tbz %x[n_channels], #0, 10f\n"
"ld1 { v27.b }[2], [x13], #0x1\n"
- "ld1 { v26.b }[2], [x12], #0x1\n"
- "ld1 { v24.b }[2], [x10], #0x1\n"
- "ld1 { v23.b }[2], [x9], #0x1\n"
- "ld1 { v22.b }[2], [x26], #0x1\n"
- "ld1 { v21.b }[2], [x25], #0x1\n"
- "b 19f\n"
- "18:" // Oddments: Load (B): Bit 3: Unset: Bit 2: Unset: Bit 1: Unset
- "ldr b29, [x15], #0x1\n"
- "ldr b28, [x14], #0x1\n"
- "ldr b27, [x13], #0x1\n"
- "ldr b26, [x12], #0x1\n"
- "ldr b24, [x10], #0x1\n"
- "ldr b23, [x9], #0x1\n"
- "ldr b22, [x26], #0x1\n"
- "ldr b21, [x25], #0x1\n"
- "19:" // Oddments: Load (B): Bit 3: End
- "ldr q0, [%x[params], #0x10]\n"
- "ldr q16, [%x[params], #0x20]\n"
- "zip2 v30.16b, v4.16b, v2.16b\n"
- "zip1 v4.16b, v4.16b, v2.16b\n"
- "ldr q31, [%x[params], #0x30]\n"
- "zip1 v2.16b, v3.16b, v1.16b\n"
- "zip2 v5.16b, v9.16b, v7.16b\n"
- "cmp x20, #0x4\n"
- "zip1 v9.16b, v9.16b, v7.16b\n"
- "zip1 v7.16b, v8.16b, v6.16b\n"
- "zip2 v6.16b, v8.16b, v6.16b\n"
- "zip2 v1.16b, v3.16b, v1.16b\n"
- "zip2 v3.16b, v4.16b, v2.16b\n"
- "zip1 v4.16b, v4.16b, v2.16b\n"
- "zip2 v25.16b, v29.16b, v27.16b\n"
- "zip1 v29.16b, v29.16b, v27.16b\n"
- "zip1 v27.16b, v28.16b, v26.16b\n"
- "movi v19.4s, #0x0\n"
- ".inst 0x4e8495d3 // sdot v19.4s, v14.16b, v4.16b\n"
- "zip2 v8.16b, v9.16b, v7.16b\n"
- "zip1 v9.16b, v9.16b, v7.16b\n"
- "zip1 v7.16b, v5.16b, v6.16b\n"
- "zip2 v6.16b, v5.16b, v6.16b\n"
- "ldr q5, [%x[params], #0x0]\n"
- "zip2 v26.16b, v28.16b, v26.16b\n"
- "zip2 v20.16b, v24.16b, v22.16b\n"
- "zip1 v24.16b, v24.16b, v22.16b\n"
- "zip1 v22.16b, v23.16b, v21.16b\n"
- "zip2 v21.16b, v23.16b, v21.16b\n"
- "zip2 v28.16b, v29.16b, v27.16b\n"
- "zip1 v29.16b, v29.16b, v27.16b\n"
- "zip1 v2.16b, v30.16b, v1.16b\n"
- ".inst 0x4e9d95d3 // sdot v19.4s, v14.16b, v29.16b\n"
- "zip2 v1.16b, v30.16b, v1.16b\n"
- "zip1 v27.16b, v25.16b, v26.16b\n"
- "zip2 v26.16b, v25.16b, v26.16b\n"
- "zip2 v23.16b, v24.16b, v22.16b\n"
- "zip1 v24.16b, v24.16b, v22.16b\n"
- "zip1 v22.16b, v20.16b, v21.16b\n"
- "mov v18.16b, v19.16b\n .inst 0x4e9895d2 // sdot v18.4s, v14.16b, v24.16b\n"
- "zip2 v21.16b, v20.16b, v21.16b\n"
- "mov v30.16b, v5.16b\n"
- ".inst 0x4e8995d3 // sdot v19.4s, v14.16b, v9.16b\n"
- "mov v25.16b, v5.16b\n"
- "mov v20.16b, v5.16b\n"
- ".inst 0x4e899405 // sdot v5.4s, v0.16b, v9.16b\n"
- ".inst 0x4e849419 // sdot v25.4s, v0.16b, v4.16b\n"
- ".inst 0x4e849605 // sdot v5.4s, v16.16b, v4.16b\n"
- "ext v4.16b, v4.16b, v4.16b, #0x1\n"
- "ext v9.16b, v9.16b, v9.16b, #0x1\n"
- ".inst 0x4e9d9619 // sdot v25.4s, v16.16b, v29.16b\n"
- ".inst 0x4e9d97e5 // sdot v5.4s, v31.16b, v29.16b\n"
- "ext v29.16b, v29.16b, v29.16b, #0x1\n"
- ".inst 0x4e89941e // sdot v30.4s, v0.16b, v9.16b\n"
- ".inst 0x4e849414 // sdot v20.4s, v0.16b, v4.16b\n"
- "movi v17.4s, #0x0\n"
- ".inst 0x4e8495d1 // sdot v17.4s, v14.16b, v4.16b\n"
- ".inst 0x4e9d95d1 // sdot v17.4s, v14.16b, v29.16b\n"
- ".inst 0x4e9897f9 // sdot v25.4s, v31.16b, v24.16b\n"
- "ext v24.16b, v24.16b, v24.16b, #0x1\n"
- ".inst 0x4e84961e // sdot v30.4s, v16.16b, v4.16b\n"
- "ldr q4, [%x[params], #0x50]\n"
- ".inst 0x4e9d9614 // sdot v20.4s, v16.16b, v29.16b\n"
- "mov v16.16b, v17.16b\n .inst 0x4e9895d0 // sdot v16.4s, v14.16b, v24.16b\n"
- "mls v5.4s, v19.4s, v11.4s\n"
- ".inst 0x4e8995d1 // sdot v17.4s, v14.16b, v9.16b\n"
- "ldr q9, [%x[params], #0x40]\n"
- ".inst 0x4e9d97fe // sdot v30.4s, v31.16b, v29.16b\n"
- "sqrdmulh v5.4s, v5.4s, v9.4s\n"
- ".inst 0x4e9897f4 // sdot v20.4s, v31.16b, v24.16b\n"
- "mls v30.4s, v17.4s, v11.4s\n"
+ "ld1 { v1.b }[2], [x12], #0x1\n"
+ "ld1 { v25.b }[2], [x11], #0x1\n"
+ "ld1 { v23.b }[2], [x10], #0x1\n"
+ "ld1 { v31.b }[2], [x9], #0x1\n"
+ "ld1 { v28.b }[2], [x28], #0x1\n"
+ "ld1 { v21.b }[2], [x27], #0x1\n"
+ "ld1 { v26.b }[2], [x26], #0x1\n"
+ "b 10f\n"
+ "9:" // Oddments: Load (A): Bit 3: Unset: Bit 2: Unset: Bit 1: Unset
+ "tbz %x[n_channels], #0, 10f\n"
+ "ld1 { v27.b }[0], [x13], #0x1\n"
+ "ld1 { v1.b }[0], [x12], #0x1\n"
+ "ld1 { v25.b }[0], [x11], #0x1\n"
+ "ld1 { v23.b }[0], [x10], #0x1\n"
+ "ld1 { v31.b }[0], [x9], #0x1\n"
+ "ld1 { v28.b }[0], [x28], #0x1\n"
+ "ld1 { v21.b }[0], [x27], #0x1\n"
+ "ld1 { v26.b }[0], [x26], #0x1\n"
+ "10:" // Oddments: Load (A): Bit 3: End
+ "ldp x13, x12, [%x[inptrs], #0x40]\n"
+ "add x13, x13, x23\n"
+ "ldp x11, x10, [%x[inptrs], #0x50]\n"
+ "ldp x9, x28, [%x[inptrs], #0x60]\n"
+ "add x12, x12, x23\n"
+ "ldp x27, x26, [%x[inptrs], #0x70]\n"
+ "add x11, x11, x23\n"
+ "add x10, x10, x23\n"
+ "add x9, x9, x23\n"
+ "add x28, x28, x23\n"
+ "add x27, x27, x23\n"
+ "add x26, x26, x23\n"
+ "tbz %x[n_channels], #3, 14f\n"
+ "ld1 { v24.d }[0], [x13], #0x8\n"
+ "ld1 { v22.d }[0], [x12], #0x8\n"
+ "ld1 { v20.d }[0], [x11], #0x8\n"
+ "ld1 { v16.d }[0], [x10], #0x8\n"
+ "ld1 { v19.d }[0], [x9], #0x8\n"
+ "ld1 { v0.d }[0], [x28], #0x8\n"
+ "ld1 { v18.d }[0], [x27], #0x8\n"
+ "ld1 { v17.d }[0], [x26], #0x8\n"
+ "tbz %x[n_channels], #2, 12f\n"
+ "ld1 { v24.s }[2], [x13], #0x4\n"
+ "ld1 { v22.s }[2], [x12], #0x4\n"
+ "ld1 { v20.s }[2], [x11], #0x4\n"
+ "ld1 { v16.s }[2], [x10], #0x4\n"
+ "ld1 { v19.s }[2], [x9], #0x4\n"
+ "ld1 { v0.s }[2], [x28], #0x4\n"
+ "ld1 { v18.s }[2], [x27], #0x4\n"
+ "ld1 { v17.s }[2], [x26], #0x4\n"
+ "tbz %x[n_channels], #1, 11f\n"
+ "ld1 { v24.h }[6], [x13], #0x2\n"
+ "ld1 { v22.h }[6], [x12], #0x2\n"
+ "ld1 { v20.h }[6], [x11], #0x2\n"
+ "ld1 { v16.h }[6], [x10], #0x2\n"
+ "ld1 { v19.h }[6], [x9], #0x2\n"
+ "ld1 { v0.h }[6], [x28], #0x2\n"
+ "ld1 { v18.h }[6], [x27], #0x2\n"
+ "ld1 { v17.h }[6], [x26], #0x2\n"
+ "tbz %x[n_channels], #0, 18f\n"
+ "ld1 { v24.b }[14], [x13], #0x1\n"
+ "ld1 { v22.b }[14], [x12], #0x1\n"
+ "ld1 { v20.b }[14], [x11], #0x1\n"
+ "ld1 { v16.b }[14], [x10], #0x1\n"
+ "ld1 { v19.b }[14], [x9], #0x1\n"
+ "ld1 { v0.b }[14], [x28], #0x1\n"
+ "ld1 { v18.b }[14], [x27], #0x1\n"
+ "ld1 { v17.b }[14], [x26], #0x1\n"
+ "b 18f\n"
+ "11:" // Oddments: Load (B): Bit 3: Bit 2: Bit 1: Unset
+ "tbz %x[n_channels], #0, 18f\n"
+ "ld1 { v24.b }[12], [x13], #0x1\n"
+ "ld1 { v22.b }[12], [x12], #0x1\n"
+ "ld1 { v20.b }[12], [x11], #0x1\n"
+ "ld1 { v16.b }[12], [x10], #0x1\n"
+ "ld1 { v19.b }[12], [x9], #0x1\n"
+ "ld1 { v0.b }[12], [x28], #0x1\n"
+ "ld1 { v18.b }[12], [x27], #0x1\n"
+ "ld1 { v17.b }[12], [x26], #0x1\n"
+ "b 18f\n"
+ "12:" // Oddments: Load (B): Bit 3: Bit 2: Unset
+ "tbz %x[n_channels], #1, 13f\n"
+ "ld1 { v24.h }[4], [x13], #0x2\n"
+ "ld1 { v22.h }[4], [x12], #0x2\n"
+ "ld1 { v20.h }[4], [x11], #0x2\n"
+ "ld1 { v16.h }[4], [x10], #0x2\n"
+ "ld1 { v19.h }[4], [x9], #0x2\n"
+ "ld1 { v0.h }[4], [x28], #0x2\n"
+ "ld1 { v18.h }[4], [x27], #0x2\n"
+ "ld1 { v17.h }[4], [x26], #0x2\n"
+ "tbz %x[n_channels], #0, 18f\n"
+ "ld1 { v24.b }[10], [x13], #0x1\n"
+ "ld1 { v22.b }[10], [x12], #0x1\n"
+ "ld1 { v20.b }[10], [x11], #0x1\n"
+ "ld1 { v16.b }[10], [x10], #0x1\n"
+ "ld1 { v19.b }[10], [x9], #0x1\n"
+ "ld1 { v0.b }[10], [x28], #0x1\n"
+ "ld1 { v18.b }[10], [x27], #0x1\n"
+ "ld1 { v17.b }[10], [x26], #0x1\n"
+ "b 18f\n"
+ "13:" // Oddments: Load (B): Bit 3: Bit 2: Unset: Bit 1: Unset
+ "tbz %x[n_channels], #0, 18f\n"
+ "ld1 { v24.b }[8], [x13], #0x1\n"
+ "ld1 { v22.b }[8], [x12], #0x1\n"
+ "ld1 { v20.b }[8], [x11], #0x1\n"
+ "ld1 { v16.b }[8], [x10], #0x1\n"
+ "ld1 { v19.b }[8], [x9], #0x1\n"
+ "ld1 { v0.b }[8], [x28], #0x1\n"
+ "ld1 { v18.b }[8], [x27], #0x1\n"
+ "ld1 { v17.b }[8], [x26], #0x1\n"
+ "b 18f\n"
+ "14:" // Oddments: Load (B): Bit 3: Unset
+ "tbz %x[n_channels], #2, 16f\n"
+ "ld1 { v24.s }[0], [x13], #0x4\n"
+ "ld1 { v22.s }[0], [x12], #0x4\n"
+ "ld1 { v20.s }[0], [x11], #0x4\n"
+ "ld1 { v16.s }[0], [x10], #0x4\n"
+ "ld1 { v19.s }[0], [x9], #0x4\n"
+ "ld1 { v0.s }[0], [x28], #0x4\n"
+ "ld1 { v18.s }[0], [x27], #0x4\n"
+ "ld1 { v17.s }[0], [x26], #0x4\n"
+ "tbz %x[n_channels], #1, 15f\n"
+ "ld1 { v24.h }[2], [x13], #0x2\n"
+ "ld1 { v22.h }[2], [x12], #0x2\n"
+ "ld1 { v20.h }[2], [x11], #0x2\n"
+ "ld1 { v16.h }[2], [x10], #0x2\n"
+ "ld1 { v19.h }[2], [x9], #0x2\n"
+ "ld1 { v0.h }[2], [x28], #0x2\n"
+ "ld1 { v18.h }[2], [x27], #0x2\n"
+ "ld1 { v17.h }[2], [x26], #0x2\n"
+ "tbz %x[n_channels], #0, 18f\n"
+ "ld1 { v24.b }[6], [x13], #0x1\n"
+ "ld1 { v22.b }[6], [x12], #0x1\n"
+ "ld1 { v20.b }[6], [x11], #0x1\n"
+ "ld1 { v16.b }[6], [x10], #0x1\n"
+ "ld1 { v19.b }[6], [x9], #0x1\n"
+ "ld1 { v0.b }[6], [x28], #0x1\n"
+ "ld1 { v18.b }[6], [x27], #0x1\n"
+ "ld1 { v17.b }[6], [x26], #0x1\n"
+ "b 18f\n"
+ "15:" // Oddments: Load (B): Bit 3: Unset: Bit 2: Bit 1: Unset
+ "tbz %x[n_channels], #0, 18f\n"
+ "ld1 { v24.b }[4], [x13], #0x1\n"
+ "ld1 { v22.b }[4], [x12], #0x1\n"
+ "ld1 { v20.b }[4], [x11], #0x1\n"
+ "ld1 { v16.b }[4], [x10], #0x1\n"
+ "ld1 { v19.b }[4], [x9], #0x1\n"
+ "ld1 { v0.b }[4], [x28], #0x1\n"
+ "ld1 { v18.b }[4], [x27], #0x1\n"
+ "ld1 { v17.b }[4], [x26], #0x1\n"
+ "b 18f\n"
+ "16:" // Oddments: Load (B): Bit 3: Unset: Bit 2: Unset
+ "tbz %x[n_channels], #1, 17f\n"
+ "ld1 { v24.h }[0], [x13], #0x2\n"
+ "ld1 { v22.h }[0], [x12], #0x2\n"
+ "ld1 { v20.h }[0], [x11], #0x2\n"
+ "ld1 { v16.h }[0], [x10], #0x2\n"
+ "ld1 { v19.h }[0], [x9], #0x2\n"
+ "ld1 { v0.h }[0], [x28], #0x2\n"
+ "ld1 { v18.h }[0], [x27], #0x2\n"
+ "ld1 { v17.h }[0], [x26], #0x2\n"
+ "tbz %x[n_channels], #0, 18f\n"
+ "ld1 { v24.b }[2], [x13], #0x1\n"
+ "ld1 { v22.b }[2], [x12], #0x1\n"
+ "ld1 { v20.b }[2], [x11], #0x1\n"
+ "ld1 { v16.b }[2], [x10], #0x1\n"
+ "ld1 { v19.b }[2], [x9], #0x1\n"
+ "ld1 { v0.b }[2], [x28], #0x1\n"
+ "ld1 { v18.b }[2], [x27], #0x1\n"
+ "ld1 { v17.b }[2], [x26], #0x1\n"
+ "b 18f\n"
+ "17:" // Oddments: Load (B): Bit 3: Unset: Bit 2: Unset: Bit 1: Unset
+ "tbz %x[n_channels], #0, 18f\n"
+ "ld1 { v24.b }[0], [x13], #0x1\n"
+ "ld1 { v22.b }[0], [x12], #0x1\n"
+ "ld1 { v20.b }[0], [x11], #0x1\n"
+ "ld1 { v16.b }[0], [x10], #0x1\n"
+ "ld1 { v19.b }[0], [x9], #0x1\n"
+ "ld1 { v0.b }[0], [x28], #0x1\n"
+ "ld1 { v18.b }[0], [x27], #0x1\n"
+ "ld1 { v17.b }[0], [x26], #0x1\n"
+ "18:" // Oddments: Load (B): Bit 3: End
+ "zip1 v7.16b, v27.16b, v25.16b\n"
+ "ldr q30, [%x[params], #0x0]\n"
+ "cmp x19, #0x4\n"
+ "zip2 v5.16b, v27.16b, v25.16b\n"
+ "ldr q29, [%x[params], #0x10]\n"
+ "zip1 v8.16b, v1.16b, v23.16b\n"
+ "ldr q27, [%x[params], #0x20]\n"
+ "zip2 v3.16b, v1.16b, v23.16b\n"
+ "ldr q25, [%x[params], #0x30]\n"
+ "zip1 v2.16b, v31.16b, v21.16b\n"
+ "ldr q23, [%x[params], #0x40]\n"
+ "zip2 v4.16b, v31.16b, v21.16b\n"
+ "ldr q21, [%x[params], #0x50]\n"
"add %x[params], %x[params], #0x60\n"
- "mls v25.4s, v18.4s, v11.4s\n"
- "mls v20.4s, v16.4s, v11.4s\n"
- "and v0.16b, v5.16b, v4.16b\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "sqrdmulh v30.4s, v30.4s, v9.4s\n"
- "sqrdmulh v25.4s, v25.4s, v9.4s\n"
- "sqrdmulh v20.4s, v20.4s, v9.4s\n"
- "sqadd v5.4s, v5.4s, v0.4s\n"
- "and v16.16b, v30.16b, v4.16b\n"
- "and v31.16b, v25.16b, v4.16b\n"
- "and v0.16b, v20.16b, v4.16b\n"
+ "zip1 v1.16b, v28.16b, v26.16b\n"
+ "zip2 v31.16b, v28.16b, v26.16b\n"
+ "zip1 v28.16b, v24.16b, v20.16b\n"
+ "zip2 v26.16b, v24.16b, v20.16b\n"
+ "zip1 v24.16b, v22.16b, v16.16b\n"
+ "zip2 v22.16b, v22.16b, v16.16b\n"
+ "zip1 v20.16b, v19.16b, v18.16b\n"
+ "zip2 v19.16b, v19.16b, v18.16b\n"
+ "zip1 v18.16b, v0.16b, v17.16b\n"
+ "zip2 v17.16b, v0.16b, v17.16b\n"
+ "zip1 v6.16b, v7.16b, v8.16b\n"
+ "zip2 v8.16b, v7.16b, v8.16b\n"
+ "zip1 v7.16b, v5.16b, v3.16b\n"
+ "str q7, [SP, #0x0]\n"
+ "zip2 v5.16b, v5.16b, v3.16b\n"
+ "str q5, [SP, #0x10]\n"
+ "zip1 v3.16b, v2.16b, v1.16b\n"
+ "zip2 v2.16b, v2.16b, v1.16b\n"
+ "zip1 v1.16b, v4.16b, v31.16b\n"
+ "str q1, [SP, #0x20]\n"
+ "zip2 v16.16b, v4.16b, v31.16b\n"
+ "str q16, [SP, #0x30]\n"
+ "zip1 v31.16b, v28.16b, v24.16b\n"
+ "zip2 v28.16b, v28.16b, v24.16b\n"
+ "zip1 v16.16b, v26.16b, v22.16b\n"
+ "str q16, [SP, #0x40]\n"
+ "zip2 v16.16b, v26.16b, v22.16b\n"
+ "str q16, [SP, #0x50]\n"
+ "zip1 v26.16b, v20.16b, v18.16b\n"
+ "zip2 v24.16b, v20.16b, v18.16b\n"
+ "zip1 v16.16b, v19.16b, v17.16b\n"
+ "str q16, [SP, #0x60]\n"
+ "zip2 v16.16b, v19.16b, v17.16b\n"
+ "str q16, [SP, #0x70]\n"
+ "mov v22.16b, v30.16b\n"
+ "mov v20.16b, v30.16b\n"
+ "mov v19.16b, v30.16b\n"
+ ".inst 0x4e8697be // sdot v30.4s, v29.16b, v6.16b\n"
+ ".inst 0x4e8397b4 // sdot v20.4s, v29.16b, v3.16b\n"
+ "movi v15.4s, #0x0\n"
+ ".inst 0x4e83956f // sdot v15.4s, v11.16b, v3.16b\n"
+ ".inst 0x4e83977e // sdot v30.4s, v27.16b, v3.16b\n"
+ ".inst 0x4e9f9774 // sdot v20.4s, v27.16b, v31.16b\n"
+ "ext v3.16b, v3.16b, v3.16b, #0x1\n"
+ ".inst 0x4e9f956f // sdot v15.4s, v11.16b, v31.16b\n"
+ ".inst 0x4e9f973e // sdot v30.4s, v25.16b, v31.16b\n"
+ ".inst 0x4e9a9734 // sdot v20.4s, v25.16b, v26.16b\n"
+ "ext v31.16b, v31.16b, v31.16b, #0x1\n"
+ "mov v17.16b, v15.16b\n"
+ ".inst 0x4e86956f // sdot v15.4s, v11.16b, v6.16b\n"
+ "mls v30.4s, v15.4s, v14.4s\n"
+ ".inst 0x4e9a9571 // sdot v17.4s, v11.16b, v26.16b\n"
+ "ext v6.16b, v6.16b, v6.16b, #0x1\n"
+ "mls v20.4s, v17.4s, v14.4s\n"
+ "ext v26.16b, v26.16b, v26.16b, #0x1\n"
+ ".inst 0x4e8697b6 // sdot v22.4s, v29.16b, v6.16b\n"
+ ".inst 0x4e8397b3 // sdot v19.4s, v29.16b, v3.16b\n"
+ "movi v10.4s, #0x0\n"
+ ".inst 0x4e83956a // sdot v10.4s, v11.16b, v3.16b\n"
+ ".inst 0x4e839776 // sdot v22.4s, v27.16b, v3.16b\n"
+ ".inst 0x4e9f9773 // sdot v19.4s, v27.16b, v31.16b\n"
+ "sqrdmulh v30.4s, v30.4s, v23.4s\n"
+ ".inst 0x4e9f956a // sdot v10.4s, v11.16b, v31.16b\n"
+ ".inst 0x4e9f9736 // sdot v22.4s, v25.16b, v31.16b\n"
+ ".inst 0x4e9a9733 // sdot v19.4s, v25.16b, v26.16b\n"
+ "and v18.16b, v30.16b, v21.16b\n"
+ "sshr v18.4s, v18.4s, #0x1f\n"
+ "mov v17.16b, v10.16b\n"
+ ".inst 0x4e86956a // sdot v10.4s, v11.16b, v6.16b\n"
+ "mls v22.4s, v10.4s, v14.4s\n"
+ ".inst 0x4e9a9571 // sdot v17.4s, v11.16b, v26.16b\n"
+ "sqrdmulh v20.4s, v20.4s, v23.4s\n"
+ "mls v19.4s, v17.4s, v14.4s\n"
+ "sqadd v30.4s, v30.4s, v18.4s\n"
+ "and v16.16b, v20.16b, v21.16b\n"
"sshr v16.4s, v16.4s, #0x1f\n"
- "sshr v31.4s, v31.4s, #0x1f\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "sqadd v30.4s, v30.4s, v16.4s\n"
- "sqadd v25.4s, v25.4s, v31.4s\n"
- "sqadd v20.4s, v20.4s, v0.4s\n"
- "srshl v5.4s, v5.4s, v4.4s\n"
- "srshl v30.4s, v30.4s, v4.4s\n"
- "srshl v25.4s, v25.4s, v4.4s\n"
- "srshl v20.4s, v20.4s, v4.4s\n"
- "add v5.4s, v5.4s, v10.4s\n"
- "add v30.4s, v30.4s, v10.4s\n"
- "add v25.4s, v25.4s, v10.4s\n"
- "add v20.4s, v20.4s, v10.4s\n"
- "smax v5.4s, v5.4s, v13.4s\n"
- "smax v30.4s, v30.4s, v13.4s\n"
- "smax v25.4s, v25.4s, v13.4s\n"
- "smax v20.4s, v20.4s, v13.4s\n"
- "smin v5.4s, v5.4s, v12.4s\n"
+ "sqrdmulh v22.4s, v22.4s, v23.4s\n"
+ "srshl v30.4s, v30.4s, v21.4s\n"
+ "sqrdmulh v19.4s, v19.4s, v23.4s\n"
+ "and v17.16b, v22.16b, v21.16b\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "add v30.4s, v30.4s, v13.4s\n"
+ "sqadd v20.4s, v20.4s, v16.4s\n"
+ "and v16.16b, v19.16b, v21.16b\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "smax v30.4s, v30.4s, v9.4s\n"
+ "srshl v20.4s, v20.4s, v21.4s\n"
+ "sqadd v22.4s, v22.4s, v17.4s\n"
"smin v30.4s, v30.4s, v12.4s\n"
- "smin v25.4s, v25.4s, v12.4s\n"
- "smin v20.4s, v20.4s, v12.4s\n"
- "uzp1 v5.16b, v5.16b, v5.16b\n"
+ "add v20.4s, v20.4s, v13.4s\n"
+ "srshl v22.4s, v22.4s, v21.4s\n"
+ "sqadd v19.4s, v19.4s, v16.4s\n"
"uzp1 v30.16b, v30.16b, v30.16b\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
- "uzp1 v20.16b, v20.16b, v20.16b\n"
- "uzp1 v5.16b, v5.16b, v5.16b\n"
+ "smax v20.4s, v20.4s, v9.4s\n"
"uzp1 v30.16b, v30.16b, v30.16b\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
+ "add v22.4s, v22.4s, v13.4s\n"
+ "smin v20.4s, v20.4s, v12.4s\n"
+ "srshl v19.4s, v19.4s, v21.4s\n"
+ "smax v22.4s, v22.4s, v9.4s\n"
+ "uzp1 v20.16b, v20.16b, v20.16b\n"
+ "smin v22.4s, v22.4s, v12.4s\n"
+ "add v19.4s, v19.4s, v13.4s\n"
"uzp1 v20.16b, v20.16b, v20.16b\n"
- "blt 20f\n"
- "str s5, [x24, x27]\n"
- "str s30, [x23, x27]\n"
- "str s25, [x22, x27]\n"
- "str s20, [x21, x27]\n"
- "b 23f\n"
- "20:" // Oddments: Unroll 0: Oddment store
- "add x24, x24, x27\n"
- "add x23, x23, x27\n"
- "add x22, x22, x27\n"
- "add x21, x21, x27\n"
- "tbz x20, #1, 21f\n"
- "st1 { v5.h }[0], [x24], #0x2\n"
- "st1 { v30.h }[0], [x23], #0x2\n"
- "st1 { v25.h }[0], [x22], #0x2\n"
- "st1 { v20.h }[0], [x21], #0x2\n"
- "tbz x20, #0, 22f\n"
- "st1 { v5.b }[2], [x24], #0x1\n"
- "st1 { v30.b }[2], [x23], #0x1\n"
- "st1 { v25.b }[2], [x22], #0x1\n"
- "st1 { v20.b }[2], [x21], #0x1\n"
+ "smax v19.4s, v19.4s, v9.4s\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "smin v19.4s, v19.4s, v12.4s\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ "blt 19f\n"
+ "str s30, [x25, x23]\n"
+ "str s22, [x24, x23]\n"
+ "str s20, [x22, x23]\n"
+ "str s19, [x21, x23]\n"
"b 22f\n"
- "21:" // Oddments: Unroll 0: Oddment store: Bit 1: Unset
- "st1 { v5.b }[0], [x24], #0x1\n"
- "st1 { v30.b }[0], [x23], #0x1\n"
- "st1 { v25.b }[0], [x22], #0x1\n"
- "st1 { v20.b }[0], [x21], #0x1\n"
- "22:" // Oddments: Unroll 0: Oddment store: Bit 1: End
- "23:" // Oddments: Unroll 0: After oddment store
- "subs x20, x20, #0x4\n"
- "add x27, x27, #0x4\n"
- "ble 35f\n"
- "ldr q5, [%x[params], #0x0]\n"
- "ldr q0, [%x[params], #0x10]\n"
- "movi v19.4s, #0x0\n"
- ".inst 0x4e8395d3 // sdot v19.4s, v14.16b, v3.16b\n"
- "ldr q16, [%x[params], #0x20]\n"
- "ldr q31, [%x[params], #0x30]\n"
- "mov v30.16b, v5.16b\n"
- "mov v25.16b, v5.16b\n"
- "ldr q9, [%x[params], #0x40]\n"
- "ldr q4, [%x[params], #0x50]\n"
- "mov v20.16b, v5.16b\n"
- ".inst 0x4e889405 // sdot v5.4s, v0.16b, v8.16b\n"
- ".inst 0x4e9c95d3 // sdot v19.4s, v14.16b, v28.16b\n"
- ".inst 0x4e839419 // sdot v25.4s, v0.16b, v3.16b\n"
- "movi v17.4s, #0x0\n"
- "cmp x20, #0x4\n"
- ".inst 0x4e839605 // sdot v5.4s, v16.16b, v3.16b\n"
- "mov v18.16b, v19.16b\n .inst 0x4e9795d2 // sdot v18.4s, v14.16b, v23.16b\n"
- "ext v3.16b, v3.16b, v3.16b, #0x1\n"
+ "19:" // Oddments: Unroll 0: Oddment store
+ "add x25, x25, x23\n"
+ "add x24, x24, x23\n"
+ "add x22, x22, x23\n"
+ "add x21, x21, x23\n"
+ "tbz x19, #1, 20f\n"
+ "st1 { v30.h }[0], [x25], #0x2\n"
+ "st1 { v22.h }[0], [x24], #0x2\n"
+ "st1 { v20.h }[0], [x22], #0x2\n"
+ "st1 { v19.h }[0], [x21], #0x2\n"
+ "tbz x19, #0, 21f\n"
+ "st1 { v30.b }[2], [x25], #0x1\n"
+ "st1 { v22.b }[2], [x24], #0x1\n"
+ "st1 { v20.b }[2], [x22], #0x1\n"
+ "st1 { v19.b }[2], [x21], #0x1\n"
+ "b 21f\n"
+ "20:" // Oddments: Unroll 0: Oddment store: Bit 1: Unset
+ "tbz x19, #0, 21f\n"
+ "st1 { v30.b }[0], [x25], #0x1\n"
+ "st1 { v22.b }[0], [x24], #0x1\n"
+ "st1 { v20.b }[0], [x22], #0x1\n"
+ "st1 { v19.b }[0], [x21], #0x1\n"
+ "21:" // Oddments: Unroll 0: Oddment store: Bit 1: End
+
+ "22:" // Oddments: Unroll 0: After oddment store
+ "add x23, x23, #0x4\n"
+ "subs x19, x19, #0x4\n"
+ "ble 34f\n"
+ "movi v15.4s, #0x0\n"
+ "ldr q30, [%x[params], #0x0]\n"
+ ".inst 0x4e82956f // sdot v15.4s, v11.16b, v2.16b\n"
+ "ldr q29, [%x[params], #0x10]\n"
+ "cmp x19, #0x4\n"
+ "movi v10.4s, #0x0\n"
+ "ldr q27, [%x[params], #0x20]\n"
+ "ldr q25, [%x[params], #0x30]\n"
+ "mov v22.16b, v30.16b\n"
+ "ldr q23, [%x[params], #0x40]\n"
+ "mov v20.16b, v30.16b\n"
+ "ldr q21, [%x[params], #0x50]\n"
"add %x[params], %x[params], #0x60\n"
- ".inst 0x4e8895d3 // sdot v19.4s, v14.16b, v8.16b\n"
+ "mov v19.16b, v30.16b\n"
+ ".inst 0x4e8897be // sdot v30.4s, v29.16b, v8.16b\n"
+ ".inst 0x4e8297b4 // sdot v20.4s, v29.16b, v2.16b\n"
+ ".inst 0x4e9c956f // sdot v15.4s, v11.16b, v28.16b\n"
+ ".inst 0x4e82977e // sdot v30.4s, v27.16b, v2.16b\n"
+ "ext v2.16b, v2.16b, v2.16b, #0x1\n"
+ ".inst 0x4e9c9774 // sdot v20.4s, v27.16b, v28.16b\n"
+ "mov v17.16b, v15.16b\n"
+ ".inst 0x4e88956f // sdot v15.4s, v11.16b, v8.16b\n"
+ ".inst 0x4e9c973e // sdot v30.4s, v25.16b, v28.16b\n"
+ "mls v30.4s, v15.4s, v14.4s\n"
+ ".inst 0x4e989734 // sdot v20.4s, v25.16b, v24.16b\n"
+ ".inst 0x4e989571 // sdot v17.4s, v11.16b, v24.16b\n"
+ "mls v20.4s, v17.4s, v14.4s\n"
"ext v8.16b, v8.16b, v8.16b, #0x1\n"
- ".inst 0x4e88941e // sdot v30.4s, v0.16b, v8.16b\n"
- ".inst 0x4e839414 // sdot v20.4s, v0.16b, v3.16b\n"
- ".inst 0x4e8395d1 // sdot v17.4s, v14.16b, v3.16b\n"
- ".inst 0x4e9c9619 // sdot v25.4s, v16.16b, v28.16b\n"
- ".inst 0x4e9c97e5 // sdot v5.4s, v31.16b, v28.16b\n"
"ext v28.16b, v28.16b, v28.16b, #0x1\n"
- ".inst 0x4e83961e // sdot v30.4s, v16.16b, v3.16b\n"
- ".inst 0x4e9c9614 // sdot v20.4s, v16.16b, v28.16b\n"
- "mls v5.4s, v19.4s, v11.4s\n"
- ".inst 0x4e9c95d1 // sdot v17.4s, v14.16b, v28.16b\n"
- ".inst 0x4e9797f9 // sdot v25.4s, v31.16b, v23.16b\n"
- "ext v23.16b, v23.16b, v23.16b, #0x1\n"
- ".inst 0x4e9c97fe // sdot v30.4s, v31.16b, v28.16b\n"
- ".inst 0x4e9797f4 // sdot v20.4s, v31.16b, v23.16b\n"
- "sqrdmulh v5.4s, v5.4s, v9.4s\n"
- "mov v16.16b, v17.16b\n .inst 0x4e9795d0 // sdot v16.4s, v14.16b, v23.16b\n"
- ".inst 0x4e8895d1 // sdot v17.4s, v14.16b, v8.16b\n"
- "mls v30.4s, v17.4s, v11.4s\n"
- "mls v25.4s, v18.4s, v11.4s\n"
- "mls v20.4s, v16.4s, v11.4s\n"
- "and v0.16b, v5.16b, v4.16b\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "sqrdmulh v30.4s, v30.4s, v9.4s\n"
- "sqrdmulh v25.4s, v25.4s, v9.4s\n"
- "sqrdmulh v20.4s, v20.4s, v9.4s\n"
- "sqadd v5.4s, v5.4s, v0.4s\n"
- "and v16.16b, v30.16b, v4.16b\n"
- "and v31.16b, v25.16b, v4.16b\n"
- "and v0.16b, v20.16b, v4.16b\n"
+ "ext v24.16b, v24.16b, v24.16b, #0x1\n"
+ ".inst 0x4e8297b3 // sdot v19.4s, v29.16b, v2.16b\n"
+ ".inst 0x4e82956a // sdot v10.4s, v11.16b, v2.16b\n"
+ ".inst 0x4e8897b6 // sdot v22.4s, v29.16b, v8.16b\n"
+ "sqrdmulh v30.4s, v30.4s, v23.4s\n"
+ ".inst 0x4e9c9773 // sdot v19.4s, v27.16b, v28.16b\n"
+ ".inst 0x4e9c956a // sdot v10.4s, v11.16b, v28.16b\n"
+ ".inst 0x4e829776 // sdot v22.4s, v27.16b, v2.16b\n"
+ "and v18.16b, v30.16b, v21.16b\n"
+ "sshr v18.4s, v18.4s, #0x1f\n"
+ ".inst 0x4e9c9736 // sdot v22.4s, v25.16b, v28.16b\n"
+ ".inst 0x4e989733 // sdot v19.4s, v25.16b, v24.16b\n"
+ "mov v17.16b, v10.16b\n"
+ ".inst 0x4e88956a // sdot v10.4s, v11.16b, v8.16b\n"
+ "mls v22.4s, v10.4s, v14.4s\n"
+ ".inst 0x4e989571 // sdot v17.4s, v11.16b, v24.16b\n"
+ "sqadd v30.4s, v30.4s, v18.4s\n"
+ "mls v19.4s, v17.4s, v14.4s\n"
+ "srshl v30.4s, v30.4s, v21.4s\n"
+ "sqrdmulh v20.4s, v20.4s, v23.4s\n"
+ "sqrdmulh v22.4s, v22.4s, v23.4s\n"
+ "add v30.4s, v30.4s, v13.4s\n"
+ "and v16.16b, v20.16b, v21.16b\n"
"sshr v16.4s, v16.4s, #0x1f\n"
- "sshr v31.4s, v31.4s, #0x1f\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "sqadd v30.4s, v30.4s, v16.4s\n"
- "sqadd v25.4s, v25.4s, v31.4s\n"
- "sqadd v20.4s, v20.4s, v0.4s\n"
- "srshl v5.4s, v5.4s, v4.4s\n"
- "srshl v30.4s, v30.4s, v4.4s\n"
- "srshl v25.4s, v25.4s, v4.4s\n"
- "srshl v20.4s, v20.4s, v4.4s\n"
- "add v5.4s, v5.4s, v10.4s\n"
- "add v30.4s, v30.4s, v10.4s\n"
- "add v25.4s, v25.4s, v10.4s\n"
- "add v20.4s, v20.4s, v10.4s\n"
- "smax v5.4s, v5.4s, v13.4s\n"
- "smax v30.4s, v30.4s, v13.4s\n"
- "smax v25.4s, v25.4s, v13.4s\n"
- "smax v20.4s, v20.4s, v13.4s\n"
- "smin v5.4s, v5.4s, v12.4s\n"
+ "smax v30.4s, v30.4s, v9.4s\n"
+ "and v17.16b, v22.16b, v21.16b\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
"smin v30.4s, v30.4s, v12.4s\n"
- "smin v25.4s, v25.4s, v12.4s\n"
- "smin v20.4s, v20.4s, v12.4s\n"
- "uzp1 v5.16b, v5.16b, v5.16b\n"
+ "sqrdmulh v19.4s, v19.4s, v23.4s\n"
+ "sqadd v20.4s, v20.4s, v16.4s\n"
"uzp1 v30.16b, v30.16b, v30.16b\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
- "uzp1 v20.16b, v20.16b, v20.16b\n"
- "uzp1 v5.16b, v5.16b, v5.16b\n"
+ "and v16.16b, v19.16b, v21.16b\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sqadd v22.4s, v22.4s, v17.4s\n"
+ "srshl v20.4s, v20.4s, v21.4s\n"
"uzp1 v30.16b, v30.16b, v30.16b\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
+ "srshl v22.4s, v22.4s, v21.4s\n"
+ "add v20.4s, v20.4s, v13.4s\n"
+ "sqadd v19.4s, v19.4s, v16.4s\n"
+ "smax v20.4s, v20.4s, v9.4s\n"
+ "add v22.4s, v22.4s, v13.4s\n"
+ "srshl v19.4s, v19.4s, v21.4s\n"
+ "smin v20.4s, v20.4s, v12.4s\n"
+ "smax v22.4s, v22.4s, v9.4s\n"
+ "add v19.4s, v19.4s, v13.4s\n"
+ "uzp1 v20.16b, v20.16b, v20.16b\n"
+ "smin v22.4s, v22.4s, v12.4s\n"
"uzp1 v20.16b, v20.16b, v20.16b\n"
- "blt 24f\n"
- "str s5, [x24, x27]\n"
- "str s30, [x23, x27]\n"
- "str s25, [x22, x27]\n"
- "str s20, [x21, x27]\n"
- "b 27f\n"
- "24:" // Oddments: Unroll 1: Oddment store
- "add x24, x24, x27\n"
- "add x23, x23, x27\n"
- "add x22, x22, x27\n"
- "add x21, x21, x27\n"
- "tbz x20, #1, 25f\n"
- "st1 { v5.h }[0], [x24], #0x2\n"
- "st1 { v30.h }[0], [x23], #0x2\n"
- "st1 { v25.h }[0], [x22], #0x2\n"
- "st1 { v20.h }[0], [x21], #0x2\n"
- "tbz x20, #0, 26f\n"
- "st1 { v5.b }[2], [x24], #0x1\n"
- "st1 { v30.b }[2], [x23], #0x1\n"
- "st1 { v25.b }[2], [x22], #0x1\n"
- "st1 { v20.b }[2], [x21], #0x1\n"
+ "smax v19.4s, v19.4s, v9.4s\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "smin v19.4s, v19.4s, v12.4s\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ "blt 23f\n"
+ "str s30, [x25, x23]\n"
+ "str s22, [x24, x23]\n"
+ "str s20, [x22, x23]\n"
+ "str s19, [x21, x23]\n"
"b 26f\n"
- "25:" // Oddments: Unroll 1: Oddment store: Bit 1: Unset
- "st1 { v5.b }[0], [x24], #0x1\n"
- "st1 { v30.b }[0], [x23], #0x1\n"
- "st1 { v25.b }[0], [x22], #0x1\n"
- "st1 { v20.b }[0], [x21], #0x1\n"
- "26:" // Oddments: Unroll 1: Oddment store: Bit 1: End
- "27:" // Oddments: Unroll 1: After oddment store
- "subs x20, x20, #0x4\n"
- "add x27, x27, #0x4\n"
- "ble 35f\n"
- "ldr q5, [%x[params], #0x0]\n"
- "ldr q0, [%x[params], #0x10]\n"
- "movi v19.4s, #0x0\n"
- ".inst 0x4e8295d3 // sdot v19.4s, v14.16b, v2.16b\n"
- "ldr q16, [%x[params], #0x20]\n"
- "ldr q31, [%x[params], #0x30]\n"
- "mov v30.16b, v5.16b\n"
- "mov v25.16b, v5.16b\n"
- "ldr q9, [%x[params], #0x40]\n"
- "ldr q4, [%x[params], #0x50]\n"
- "mov v20.16b, v5.16b\n"
- ".inst 0x4e879405 // sdot v5.4s, v0.16b, v7.16b\n"
- ".inst 0x4e9b95d3 // sdot v19.4s, v14.16b, v27.16b\n"
- ".inst 0x4e829419 // sdot v25.4s, v0.16b, v2.16b\n"
- "movi v17.4s, #0x0\n"
- "cmp x20, #0x4\n"
- ".inst 0x4e829605 // sdot v5.4s, v16.16b, v2.16b\n"
- "mov v18.16b, v19.16b\n .inst 0x4e9695d2 // sdot v18.4s, v14.16b, v22.16b\n"
- "ext v2.16b, v2.16b, v2.16b, #0x1\n"
+ "23:" // Oddments: Unroll 1: Oddment store
+ "add x25, x25, x23\n"
+ "add x24, x24, x23\n"
+ "add x22, x22, x23\n"
+ "add x21, x21, x23\n"
+ "tbz x19, #1, 24f\n"
+ "st1 { v30.h }[0], [x25], #0x2\n"
+ "st1 { v22.h }[0], [x24], #0x2\n"
+ "st1 { v20.h }[0], [x22], #0x2\n"
+ "st1 { v19.h }[0], [x21], #0x2\n"
+ "tbz x19, #0, 25f\n"
+ "st1 { v30.b }[2], [x25], #0x1\n"
+ "st1 { v22.b }[2], [x24], #0x1\n"
+ "st1 { v20.b }[2], [x22], #0x1\n"
+ "st1 { v19.b }[2], [x21], #0x1\n"
+ "b 25f\n"
+ "24:" // Oddments: Unroll 1: Oddment store: Bit 1: Unset
+ "tbz x19, #0, 25f\n"
+ "st1 { v30.b }[0], [x25], #0x1\n"
+ "st1 { v22.b }[0], [x24], #0x1\n"
+ "st1 { v20.b }[0], [x22], #0x1\n"
+ "st1 { v19.b }[0], [x21], #0x1\n"
+ "25:" // Oddments: Unroll 1: Oddment store: Bit 1: End
+
+ "26:" // Oddments: Unroll 1: After oddment store
+ "add x23, x23, #0x4\n"
+ "subs x19, x19, #0x4\n"
+ "ble 34f\n"
+ "movi v15.4s, #0x0\n"
+ "ldr q6, [SP, #0x0]\n"
+ "movi v10.4s, #0x0\n"
+ "ldr q3, [SP, #0x20]\n"
+ "cmp x19, #0x4\n"
+ ".inst 0x4e83956f // sdot v15.4s, v11.16b, v3.16b\n"
+ "ldr q31, [SP, #0x40]\n"
+ "ldr q26, [SP, #0x60]\n"
+ ".inst 0x4e9f956f // sdot v15.4s, v11.16b, v31.16b\n"
+ "ldr q30, [%x[params], #0x0]\n"
+ "ldr q29, [%x[params], #0x10]\n"
+ "mov v22.16b, v30.16b\n"
+ "ldr q27, [%x[params], #0x20]\n"
+ "mov v20.16b, v30.16b\n"
+ "ldr q25, [%x[params], #0x30]\n"
+ "mov v19.16b, v30.16b\n"
+ "ldr q23, [%x[params], #0x40]\n"
+ ".inst 0x4e8697be // sdot v30.4s, v29.16b, v6.16b\n"
+ "ldr q21, [%x[params], #0x50]\n"
"add %x[params], %x[params], #0x60\n"
- ".inst 0x4e8795d3 // sdot v19.4s, v14.16b, v7.16b\n"
- "ext v7.16b, v7.16b, v7.16b, #0x1\n"
- ".inst 0x4e87941e // sdot v30.4s, v0.16b, v7.16b\n"
- ".inst 0x4e829414 // sdot v20.4s, v0.16b, v2.16b\n"
- ".inst 0x4e8295d1 // sdot v17.4s, v14.16b, v2.16b\n"
- ".inst 0x4e9b9619 // sdot v25.4s, v16.16b, v27.16b\n"
- ".inst 0x4e9b97e5 // sdot v5.4s, v31.16b, v27.16b\n"
- "ext v27.16b, v27.16b, v27.16b, #0x1\n"
- ".inst 0x4e82961e // sdot v30.4s, v16.16b, v2.16b\n"
- ".inst 0x4e9b9614 // sdot v20.4s, v16.16b, v27.16b\n"
- "mls v5.4s, v19.4s, v11.4s\n"
- ".inst 0x4e9b95d1 // sdot v17.4s, v14.16b, v27.16b\n"
- ".inst 0x4e9697f9 // sdot v25.4s, v31.16b, v22.16b\n"
- "ext v22.16b, v22.16b, v22.16b, #0x1\n"
- ".inst 0x4e9b97fe // sdot v30.4s, v31.16b, v27.16b\n"
- ".inst 0x4e9697f4 // sdot v20.4s, v31.16b, v22.16b\n"
- "sqrdmulh v5.4s, v5.4s, v9.4s\n"
- "mov v16.16b, v17.16b\n .inst 0x4e9695d0 // sdot v16.4s, v14.16b, v22.16b\n"
- ".inst 0x4e8795d1 // sdot v17.4s, v14.16b, v7.16b\n"
- "mls v30.4s, v17.4s, v11.4s\n"
- "mls v25.4s, v18.4s, v11.4s\n"
- "mls v20.4s, v16.4s, v11.4s\n"
- "and v0.16b, v5.16b, v4.16b\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "sqrdmulh v30.4s, v30.4s, v9.4s\n"
- "sqrdmulh v25.4s, v25.4s, v9.4s\n"
- "sqrdmulh v20.4s, v20.4s, v9.4s\n"
- "sqadd v5.4s, v5.4s, v0.4s\n"
- "and v16.16b, v30.16b, v4.16b\n"
- "and v31.16b, v25.16b, v4.16b\n"
- "and v0.16b, v20.16b, v4.16b\n"
+ ".inst 0x4e8397b4 // sdot v20.4s, v29.16b, v3.16b\n"
+ "mov v17.16b, v15.16b\n"
+ ".inst 0x4e86956f // sdot v15.4s, v11.16b, v6.16b\n"
+ ".inst 0x4e83977e // sdot v30.4s, v27.16b, v3.16b\n"
+ ".inst 0x4e9a9571 // sdot v17.4s, v11.16b, v26.16b\n"
+ ".inst 0x4e9f9774 // sdot v20.4s, v27.16b, v31.16b\n"
+ "ext v6.16b, v6.16b, v6.16b, #0x1\n"
+ ".inst 0x4e9f973e // sdot v30.4s, v25.16b, v31.16b\n"
+ "mls v30.4s, v15.4s, v14.4s\n"
+ ".inst 0x4e9a9734 // sdot v20.4s, v25.16b, v26.16b\n"
+ "ext v3.16b, v3.16b, v3.16b, #0x1\n"
+ "mls v20.4s, v17.4s, v14.4s\n"
+ "ext v31.16b, v31.16b, v31.16b, #0x1\n"
+ "ext v26.16b, v26.16b, v26.16b, #0x1\n"
+ ".inst 0x4e8697b6 // sdot v22.4s, v29.16b, v6.16b\n"
+ ".inst 0x4e8397b3 // sdot v19.4s, v29.16b, v3.16b\n"
+ ".inst 0x4e83956a // sdot v10.4s, v11.16b, v3.16b\n"
+ "sqrdmulh v30.4s, v30.4s, v23.4s\n"
+ ".inst 0x4e839776 // sdot v22.4s, v27.16b, v3.16b\n"
+ ".inst 0x4e9f9773 // sdot v19.4s, v27.16b, v31.16b\n"
+ ".inst 0x4e9f956a // sdot v10.4s, v11.16b, v31.16b\n"
+ "and v18.16b, v30.16b, v21.16b\n"
+ "sshr v18.4s, v18.4s, #0x1f\n"
+ ".inst 0x4e9f9736 // sdot v22.4s, v25.16b, v31.16b\n"
+ ".inst 0x4e9a9733 // sdot v19.4s, v25.16b, v26.16b\n"
+ "mov v17.16b, v10.16b\n"
+ ".inst 0x4e86956a // sdot v10.4s, v11.16b, v6.16b\n"
+ "mls v22.4s, v10.4s, v14.4s\n"
+ ".inst 0x4e9a9571 // sdot v17.4s, v11.16b, v26.16b\n"
+ "sqadd v30.4s, v30.4s, v18.4s\n"
+ "mls v19.4s, v17.4s, v14.4s\n"
+ "srshl v30.4s, v30.4s, v21.4s\n"
+ "sqrdmulh v20.4s, v20.4s, v23.4s\n"
+ "sqrdmulh v22.4s, v22.4s, v23.4s\n"
+ "add v30.4s, v30.4s, v13.4s\n"
+ "and v16.16b, v20.16b, v21.16b\n"
"sshr v16.4s, v16.4s, #0x1f\n"
- "sshr v31.4s, v31.4s, #0x1f\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "sqadd v30.4s, v30.4s, v16.4s\n"
- "sqadd v25.4s, v25.4s, v31.4s\n"
- "sqadd v20.4s, v20.4s, v0.4s\n"
- "srshl v5.4s, v5.4s, v4.4s\n"
- "srshl v30.4s, v30.4s, v4.4s\n"
- "srshl v25.4s, v25.4s, v4.4s\n"
- "srshl v20.4s, v20.4s, v4.4s\n"
- "add v5.4s, v5.4s, v10.4s\n"
- "add v30.4s, v30.4s, v10.4s\n"
- "add v25.4s, v25.4s, v10.4s\n"
- "add v20.4s, v20.4s, v10.4s\n"
- "smax v5.4s, v5.4s, v13.4s\n"
- "smax v30.4s, v30.4s, v13.4s\n"
- "smax v25.4s, v25.4s, v13.4s\n"
- "smax v20.4s, v20.4s, v13.4s\n"
- "smin v5.4s, v5.4s, v12.4s\n"
+ "smax v30.4s, v30.4s, v9.4s\n"
+ "and v17.16b, v22.16b, v21.16b\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
"smin v30.4s, v30.4s, v12.4s\n"
- "smin v25.4s, v25.4s, v12.4s\n"
- "smin v20.4s, v20.4s, v12.4s\n"
- "uzp1 v5.16b, v5.16b, v5.16b\n"
+ "sqrdmulh v19.4s, v19.4s, v23.4s\n"
+ "sqadd v20.4s, v20.4s, v16.4s\n"
"uzp1 v30.16b, v30.16b, v30.16b\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
- "uzp1 v20.16b, v20.16b, v20.16b\n"
- "uzp1 v5.16b, v5.16b, v5.16b\n"
+ "and v16.16b, v19.16b, v21.16b\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sqadd v22.4s, v22.4s, v17.4s\n"
+ "srshl v20.4s, v20.4s, v21.4s\n"
"uzp1 v30.16b, v30.16b, v30.16b\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
+ "srshl v22.4s, v22.4s, v21.4s\n"
+ "add v20.4s, v20.4s, v13.4s\n"
+ "sqadd v19.4s, v19.4s, v16.4s\n"
+ "smax v20.4s, v20.4s, v9.4s\n"
+ "add v22.4s, v22.4s, v13.4s\n"
+ "srshl v19.4s, v19.4s, v21.4s\n"
+ "smin v20.4s, v20.4s, v12.4s\n"
+ "smax v22.4s, v22.4s, v9.4s\n"
+ "add v19.4s, v19.4s, v13.4s\n"
"uzp1 v20.16b, v20.16b, v20.16b\n"
- "blt 28f\n"
- "str s5, [x24, x27]\n"
- "str s30, [x23, x27]\n"
- "str s25, [x22, x27]\n"
- "str s20, [x21, x27]\n"
- "b 31f\n"
- "28:" // Oddments: Unroll 2: Oddment store
- "add x24, x24, x27\n"
- "add x23, x23, x27\n"
- "add x22, x22, x27\n"
- "add x21, x21, x27\n"
- "tbz x20, #1, 29f\n"
- "st1 { v5.h }[0], [x24], #0x2\n"
- "st1 { v30.h }[0], [x23], #0x2\n"
- "st1 { v25.h }[0], [x22], #0x2\n"
- "st1 { v20.h }[0], [x21], #0x2\n"
- "tbz x20, #0, 30f\n"
- "st1 { v5.b }[2], [x24], #0x1\n"
- "st1 { v30.b }[2], [x23], #0x1\n"
- "st1 { v25.b }[2], [x22], #0x1\n"
- "st1 { v20.b }[2], [x21], #0x1\n"
+ "smin v22.4s, v22.4s, v12.4s\n"
+ "uzp1 v20.16b, v20.16b, v20.16b\n"
+ "smax v19.4s, v19.4s, v9.4s\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "smin v19.4s, v19.4s, v12.4s\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ "blt 27f\n"
+ "str s30, [x25, x23]\n"
+ "str s22, [x24, x23]\n"
+ "str s20, [x22, x23]\n"
+ "str s19, [x21, x23]\n"
"b 30f\n"
- "29:" // Oddments: Unroll 2: Oddment store: Bit 1: Unset
- "st1 { v5.b }[0], [x24], #0x1\n"
- "st1 { v30.b }[0], [x23], #0x1\n"
- "st1 { v25.b }[0], [x22], #0x1\n"
- "st1 { v20.b }[0], [x21], #0x1\n"
- "30:" // Oddments: Unroll 2: Oddment store: Bit 1: End
- "31:" // Oddments: Unroll 2: After oddment store
- "subs x20, x20, #0x4\n"
- "add x27, x27, #0x4\n"
- "ble 35f\n"
- "ldr q5, [%x[params], #0x0]\n"
- "ldr q0, [%x[params], #0x10]\n"
- "movi v19.4s, #0x0\n"
- ".inst 0x4e8195d3 // sdot v19.4s, v14.16b, v1.16b\n"
- "ldr q16, [%x[params], #0x20]\n"
- "ldr q31, [%x[params], #0x30]\n"
- "mov v30.16b, v5.16b\n"
- "mov v25.16b, v5.16b\n"
- "ldr q9, [%x[params], #0x40]\n"
- "ldr q4, [%x[params], #0x50]\n"
- "mov v20.16b, v5.16b\n"
- ".inst 0x4e869405 // sdot v5.4s, v0.16b, v6.16b\n"
- ".inst 0x4e9a95d3 // sdot v19.4s, v14.16b, v26.16b\n"
- ".inst 0x4e819419 // sdot v25.4s, v0.16b, v1.16b\n"
- "movi v17.4s, #0x0\n"
+ "27:" // Oddments: Unroll 2: Oddment store
+ "add x25, x25, x23\n"
+ "add x24, x24, x23\n"
+ "add x22, x22, x23\n"
+ "add x21, x21, x23\n"
+ "tbz x19, #1, 28f\n"
+ "st1 { v30.h }[0], [x25], #0x2\n"
+ "st1 { v22.h }[0], [x24], #0x2\n"
+ "st1 { v20.h }[0], [x22], #0x2\n"
+ "st1 { v19.h }[0], [x21], #0x2\n"
+ "tbz x19, #0, 29f\n"
+ "st1 { v30.b }[2], [x25], #0x1\n"
+ "st1 { v22.b }[2], [x24], #0x1\n"
+ "st1 { v20.b }[2], [x22], #0x1\n"
+ "st1 { v19.b }[2], [x21], #0x1\n"
+ "b 29f\n"
+ "28:" // Oddments: Unroll 2: Oddment store: Bit 1: Unset
+ "tbz x19, #0, 29f\n"
+ "st1 { v30.b }[0], [x25], #0x1\n"
+ "st1 { v22.b }[0], [x24], #0x1\n"
+ "st1 { v20.b }[0], [x22], #0x1\n"
+ "st1 { v19.b }[0], [x21], #0x1\n"
+ "29:" // Oddments: Unroll 2: Oddment store: Bit 1: End
+
+ "30:" // Oddments: Unroll 2: After oddment store
+ "add x23, x23, #0x4\n"
+ "subs x19, x19, #0x4\n"
+ "ble 34f\n"
+ "movi v15.4s, #0x0\n"
+ "ldr q8, [SP, #0x10]\n"
+ "movi v10.4s, #0x0\n"
+ "ldr q2, [SP, #0x30]\n"
+ "ldr q28, [SP, #0x50]\n"
+ ".inst 0x4e82956f // sdot v15.4s, v11.16b, v2.16b\n"
+ "ldr q24, [SP, #0x70]\n"
+ "ldr q30, [%x[params], #0x0]\n"
+ "mov v22.16b, v30.16b\n"
+ "ldr q29, [%x[params], #0x10]\n"
+ "mov v20.16b, v30.16b\n"
+ "ldr q27, [%x[params], #0x20]\n"
+ "mov v19.16b, v30.16b\n"
+ "ldr q25, [%x[params], #0x30]\n"
+ ".inst 0x4e9c956f // sdot v15.4s, v11.16b, v28.16b\n"
+ "ldr q23, [%x[params], #0x40]\n"
+ "ldr q21, [%x[params], #0x50]\n"
+ ".inst 0x4e8897be // sdot v30.4s, v29.16b, v8.16b\n"
"add %x[params], %x[params], #0x60\n"
- ".inst 0x4e819605 // sdot v5.4s, v16.16b, v1.16b\n"
- "mov v18.16b, v19.16b\n .inst 0x4e9595d2 // sdot v18.4s, v14.16b, v21.16b\n"
- "ext v1.16b, v1.16b, v1.16b, #0x1\n"
- ".inst 0x4e8695d3 // sdot v19.4s, v14.16b, v6.16b\n"
- "ext v6.16b, v6.16b, v6.16b, #0x1\n"
- ".inst 0x4e86941e // sdot v30.4s, v0.16b, v6.16b\n"
- ".inst 0x4e819414 // sdot v20.4s, v0.16b, v1.16b\n"
- ".inst 0x4e8195d1 // sdot v17.4s, v14.16b, v1.16b\n"
- ".inst 0x4e9a9619 // sdot v25.4s, v16.16b, v26.16b\n"
- ".inst 0x4e9a97e5 // sdot v5.4s, v31.16b, v26.16b\n"
- "ext v26.16b, v26.16b, v26.16b, #0x1\n"
- ".inst 0x4e81961e // sdot v30.4s, v16.16b, v1.16b\n"
- ".inst 0x4e9a9614 // sdot v20.4s, v16.16b, v26.16b\n"
- "mls v5.4s, v19.4s, v11.4s\n"
- ".inst 0x4e9a95d1 // sdot v17.4s, v14.16b, v26.16b\n"
- ".inst 0x4e9597f9 // sdot v25.4s, v31.16b, v21.16b\n"
- "ext v21.16b, v21.16b, v21.16b, #0x1\n"
- ".inst 0x4e9a97fe // sdot v30.4s, v31.16b, v26.16b\n"
- ".inst 0x4e9597f4 // sdot v20.4s, v31.16b, v21.16b\n"
- "sqrdmulh v5.4s, v5.4s, v9.4s\n"
- "mov v16.16b, v17.16b\n .inst 0x4e9595d0 // sdot v16.4s, v14.16b, v21.16b\n"
- ".inst 0x4e8695d1 // sdot v17.4s, v14.16b, v6.16b\n"
- "mls v30.4s, v17.4s, v11.4s\n"
- "mls v25.4s, v18.4s, v11.4s\n"
- "mls v20.4s, v16.4s, v11.4s\n"
- "and v0.16b, v5.16b, v4.16b\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "sqrdmulh v30.4s, v30.4s, v9.4s\n"
- "sqrdmulh v25.4s, v25.4s, v9.4s\n"
- "sqrdmulh v20.4s, v20.4s, v9.4s\n"
- "sqadd v5.4s, v5.4s, v0.4s\n"
- "and v16.16b, v30.16b, v4.16b\n"
- "and v31.16b, v25.16b, v4.16b\n"
- "and v0.16b, v20.16b, v4.16b\n"
+ ".inst 0x4e8297b4 // sdot v20.4s, v29.16b, v2.16b\n"
+ "mov v17.16b, v15.16b\n"
+ ".inst 0x4e88956f // sdot v15.4s, v11.16b, v8.16b\n"
+ ".inst 0x4e989571 // sdot v17.4s, v11.16b, v24.16b\n"
+ ".inst 0x4e82977e // sdot v30.4s, v27.16b, v2.16b\n"
+ ".inst 0x4e9c9774 // sdot v20.4s, v27.16b, v28.16b\n"
+ "ext v8.16b, v8.16b, v8.16b, #0x1\n"
+ "ext v2.16b, v2.16b, v2.16b, #0x1\n"
+ ".inst 0x4e9c973e // sdot v30.4s, v25.16b, v28.16b\n"
+ "mls v30.4s, v15.4s, v14.4s\n"
+ ".inst 0x4e989734 // sdot v20.4s, v25.16b, v24.16b\n"
+ "ext v28.16b, v28.16b, v28.16b, #0x1\n"
+ "mls v20.4s, v17.4s, v14.4s\n"
+ "ext v24.16b, v24.16b, v24.16b, #0x1\n"
+ ".inst 0x4e8897b6 // sdot v22.4s, v29.16b, v8.16b\n"
+ ".inst 0x4e8297b3 // sdot v19.4s, v29.16b, v2.16b\n"
+ ".inst 0x4e82956a // sdot v10.4s, v11.16b, v2.16b\n"
+ "sqrdmulh v30.4s, v30.4s, v23.4s\n"
+ ".inst 0x4e829776 // sdot v22.4s, v27.16b, v2.16b\n"
+ ".inst 0x4e9c9773 // sdot v19.4s, v27.16b, v28.16b\n"
+ ".inst 0x4e9c956a // sdot v10.4s, v11.16b, v28.16b\n"
+ "and v18.16b, v30.16b, v21.16b\n"
+ "sshr v18.4s, v18.4s, #0x1f\n"
+ ".inst 0x4e9c9736 // sdot v22.4s, v25.16b, v28.16b\n"
+ ".inst 0x4e989733 // sdot v19.4s, v25.16b, v24.16b\n"
+ "mov v17.16b, v10.16b\n"
+ ".inst 0x4e88956a // sdot v10.4s, v11.16b, v8.16b\n"
+ "mls v22.4s, v10.4s, v14.4s\n"
+ ".inst 0x4e989571 // sdot v17.4s, v11.16b, v24.16b\n"
+ "sqadd v30.4s, v30.4s, v18.4s\n"
+ "mls v19.4s, v17.4s, v14.4s\n"
+ "srshl v30.4s, v30.4s, v21.4s\n"
+ "sqrdmulh v20.4s, v20.4s, v23.4s\n"
+ "sqrdmulh v22.4s, v22.4s, v23.4s\n"
+ "add v30.4s, v30.4s, v13.4s\n"
+ "and v16.16b, v20.16b, v21.16b\n"
"sshr v16.4s, v16.4s, #0x1f\n"
- "sshr v31.4s, v31.4s, #0x1f\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "sqadd v30.4s, v30.4s, v16.4s\n"
- "sqadd v25.4s, v25.4s, v31.4s\n"
- "sqadd v20.4s, v20.4s, v0.4s\n"
- "srshl v5.4s, v5.4s, v4.4s\n"
- "srshl v30.4s, v30.4s, v4.4s\n"
- "srshl v25.4s, v25.4s, v4.4s\n"
- "srshl v20.4s, v20.4s, v4.4s\n"
- "add v5.4s, v5.4s, v10.4s\n"
- "add v30.4s, v30.4s, v10.4s\n"
- "add v25.4s, v25.4s, v10.4s\n"
- "add v20.4s, v20.4s, v10.4s\n"
- "smax v5.4s, v5.4s, v13.4s\n"
- "smax v30.4s, v30.4s, v13.4s\n"
- "smax v25.4s, v25.4s, v13.4s\n"
- "smax v20.4s, v20.4s, v13.4s\n"
- "smin v5.4s, v5.4s, v12.4s\n"
+ "smax v30.4s, v30.4s, v9.4s\n"
+ "and v17.16b, v22.16b, v21.16b\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
"smin v30.4s, v30.4s, v12.4s\n"
- "smin v25.4s, v25.4s, v12.4s\n"
- "smin v20.4s, v20.4s, v12.4s\n"
- "uzp1 v5.16b, v5.16b, v5.16b\n"
+ "sqrdmulh v19.4s, v19.4s, v23.4s\n"
+ "sqadd v20.4s, v20.4s, v16.4s\n"
"uzp1 v30.16b, v30.16b, v30.16b\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
- "uzp1 v20.16b, v20.16b, v20.16b\n"
- "uzp1 v5.16b, v5.16b, v5.16b\n"
+ "and v16.16b, v19.16b, v21.16b\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sqadd v22.4s, v22.4s, v17.4s\n"
+ "srshl v20.4s, v20.4s, v21.4s\n"
"uzp1 v30.16b, v30.16b, v30.16b\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
+ "srshl v22.4s, v22.4s, v21.4s\n"
+ "add v20.4s, v20.4s, v13.4s\n"
+ "sqadd v19.4s, v19.4s, v16.4s\n"
+ "smax v20.4s, v20.4s, v9.4s\n"
+ "add v22.4s, v22.4s, v13.4s\n"
+ "srshl v19.4s, v19.4s, v21.4s\n"
+ "smin v20.4s, v20.4s, v12.4s\n"
+ "smax v22.4s, v22.4s, v9.4s\n"
+ "add v19.4s, v19.4s, v13.4s\n"
"uzp1 v20.16b, v20.16b, v20.16b\n"
- "32:" // Oddments: Unroll 3: Oddment store
- "add x24, x24, x27\n"
- "add x23, x23, x27\n"
- "add x22, x22, x27\n"
- "add x21, x21, x27\n"
- "tbz x20, #1, 33f\n"
- "st1 { v5.h }[0], [x24], #0x2\n"
- "st1 { v30.h }[0], [x23], #0x2\n"
- "st1 { v25.h }[0], [x22], #0x2\n"
- "st1 { v20.h }[0], [x21], #0x2\n"
- "tbz x20, #0, 34f\n"
- "st1 { v5.b }[2], [x24], #0x1\n"
- "st1 { v30.b }[2], [x23], #0x1\n"
- "st1 { v25.b }[2], [x22], #0x1\n"
- "st1 { v20.b }[2], [x21], #0x1\n"
- "b 34f\n"
- "33:" // Oddments: Unroll 3: Oddment store: Bit 1: Unset
- "st1 { v5.b }[0], [x24], #0x1\n"
- "st1 { v30.b }[0], [x23], #0x1\n"
- "st1 { v25.b }[0], [x22], #0x1\n"
- "st1 { v20.b }[0], [x21], #0x1\n"
- "34:" // Oddments: Unroll 3: Oddment store: Bit 1: End
- "35:" // End
+ "smin v22.4s, v22.4s, v12.4s\n"
+ "uzp1 v20.16b, v20.16b, v20.16b\n"
+ "smax v19.4s, v19.4s, v9.4s\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "smin v19.4s, v19.4s, v12.4s\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ "31:" // Oddments: Unroll 3: Oddment store
+ "add x25, x25, x23\n"
+ "add x24, x24, x23\n"
+ "add x22, x22, x23\n"
+ "add x21, x21, x23\n"
+ "tbz x19, #1, 32f\n"
+ "st1 { v30.h }[0], [x25], #0x2\n"
+ "st1 { v22.h }[0], [x24], #0x2\n"
+ "st1 { v20.h }[0], [x22], #0x2\n"
+ "st1 { v19.h }[0], [x21], #0x2\n"
+ "tbz x19, #0, 33f\n"
+ "st1 { v30.b }[2], [x25], #0x1\n"
+ "st1 { v22.b }[2], [x24], #0x1\n"
+ "st1 { v20.b }[2], [x22], #0x1\n"
+ "st1 { v19.b }[2], [x21], #0x1\n"
+ "b 33f\n"
+ "32:" // Oddments: Unroll 3: Oddment store: Bit 1: Unset
+ "tbz x19, #0, 33f\n"
+ "st1 { v30.b }[0], [x25], #0x1\n"
+ "st1 { v22.b }[0], [x24], #0x1\n"
+ "st1 { v20.b }[0], [x22], #0x1\n"
+ "st1 { v19.b }[0], [x21], #0x1\n"
+ "33:" // Oddments: Unroll 3: Oddment store: Bit 1: End
+
+ "34:" // End
+ "add SP, SP, #0x80\n"
: [params] "+&r" (params)
- : [inptrs] "r" (inptrs), [n_channels] "r" (n_channels), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [outptrs] "r" (outptrs), [qp] "r" (&qp)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : [inptrs] "r" (inptrs), [n_channels] "r" ((long unsigned int) n_channels), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [outptrs] "r" (outptrs), [qp] "r" (&qp)
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp
index d69d0e1ef2..71729e0d1e 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -91,1072 +91,1072 @@ void a64_s8q_nhwc_3x3_s1_output2x2_mla_depthfirst_impl(
requant_muls, requant_shifts, outptrs);
__asm__ __volatile__(
- "ldr x6, [%x[params], %[offsetof_Params_n_channels]]\n"
- "ldr x23, [%x[params], %[offsetof_Params_requant]]\n"
- "lsr x7, x6, #0x3\n"
- "add x20, x23, %[offsetof_Requantize32_a_offset]\n"
- "ld1r { v24.16b }, [x20]\n"
+ "ldr x19, [%x[params], %[offsetof_Params_requant]]\n"
+ "ldr x8, [%x[params], %[offsetof_Params_n_channels]]\n"
+ "add x24, x19, %[offsetof_Requantize32_a_offset]\n"
+ "add x23, x19, %[offsetof_Requantize32_b_offset]\n"
"ldr x22, [%x[params], %[offsetof_Params_outptrs]]\n"
- "add x21, x23, %[offsetof_Requantize32_b_offset]\n"
- "add x20, x23, %[offsetof_Requantize32_c_offset]\n"
- "ld1r { v15.16b }, [x21]\n"
- "ld1r { v14.8h }, [x20]\n"
- "add x21, x23, %[offsetof_Requantize32_minval]\n"
- "add x20, x23, %[offsetof_Requantize32_maxval]\n"
- "ld1r { v12.8h }, [x21]\n"
- "ld1r { v11.8h }, [x20]\n"
- "mov x8, #0x0\n"
- "mov x17, #0x0\n"
- "add x16, %x[params], %[offsetof_Params_inptrs]\n"
- "ldr x15, [%x[params], %[offsetof_Params_weights]]\n"
- "ldr x14, [%x[params], %[offsetof_Params_requant_muls]]\n"
- "ldr x13, [%x[params], %[offsetof_Params_requant_shifts]]\n"
- "ldp x12, x11, [x22, #0x0]\n"
- "ldp x10, x9, [x22, #0x10]\n"
- "cbz x7, 3f\n"
- "ldr d0, [x15, #0x0]\n"
- "ldr d1, [x15, #0x8]\n"
- "subs x7, x7, #0x1\n"
- "ssubl v0.8h, v0.8b, v15.8b\n"
- "ldr d2, [x15, #0x10]\n"
- "ldr d3, [x15, #0x18]\n"
- "ssubl v1.8h, v1.8b, v15.8b\n"
- "ssubl v2.8h, v2.8b, v15.8b\n"
- "ldr d4, [x15, #0x20]\n"
- "ldr d5, [x15, #0x28]\n"
- "ssubl v3.8h, v3.8b, v15.8b\n"
- "ssubl v4.8h, v4.8b, v15.8b\n"
- "ldr d6, [x15, #0x30]\n"
- "ldr d7, [x15, #0x38]\n"
- "ssubl v5.8h, v5.8b, v15.8b\n"
- "ssubl v6.8h, v6.8b, v15.8b\n"
- "ldr d8, [x15, #0x40]\n"
- "ldr x28, [%x[params], %[offsetof_Params_bias]]\n"
- "ssubl v7.8h, v7.8b, v15.8b\n"
- "ssubl v8.8h, v8.8b, v15.8b\n"
- "ldr q13, [x28, #0x0]\n"
- "ldr q20, [x28, #0x10]\n"
- "add x28, x28, #0x20\n"
- "str x28, [%x[params], %[offsetof_Params_bias]]\n"
- "ldp x24, x23, [x16, #0x0]\n"
- "ldp x22, x21, [x16, #0x10]\n"
+ "add x21, x19, %[offsetof_Requantize32_c_offset]\n"
+ "add x20, x19, %[offsetof_Requantize32_minval]\n"
+ "ldr x17, [%x[params], %[offsetof_Params_weights]]\n"
+ "add x19, x19, %[offsetof_Requantize32_maxval]\n"
+ "ld1r { v22.16b }, [x24]\n"
+ "ld1r { v12.16b }, [x23]\n"
+ "lsr x16, x8, #0x3\n"
+ "ld1r { v14.8h }, [x21]\n"
+ "ld1r { v17.8h }, [x20]\n"
+ "mov x15, #0x0\n"
+ "mov x14, #0x0\n"
+ "ld1r { v15.8h }, [x19]\n"
+ "ldr x13, [%x[params], %[offsetof_Params_requant_muls]]\n"
+ "add x12, %x[params], %[offsetof_Params_inptrs]\n"
+ "ldr x11, [%x[params], %[offsetof_Params_requant_shifts]]\n"
+ "ldp x10, x9, [x22, #0x0]\n"
+ "ldp x28, x27, [x22, #0x10]\n"
+ "cbz x16, 3f\n"
+ "ldr x19, [%x[params], %[offsetof_Params_bias]]\n"
+ "ldr q13, [x19, #0x0]\n"
+ "subs x16, x16, #0x1\n"
+ "mov v19.16b, v13.16b\n"
+ "ldr q26, [x19, #0x10]\n"
+ "add x19, x19, #0x20\n"
+ "str x19, [%x[params], %[offsetof_Params_bias]]\n"
+ "ldr d0, [x17, #0x0]\n"
+ "ldr d1, [x17, #0x8]\n"
+ "ldr d2, [x17, #0x10]\n"
+ "mov v11.16b, v26.16b\n"
+ "mov v18.16b, v13.16b\n"
+ "ldr d3, [x17, #0x18]\n"
+ "ldr d4, [x17, #0x20]\n"
+ "mov v24.16b, v26.16b\n"
"mov v9.16b, v13.16b\n"
- "mov v18.16b, v20.16b\n"
- "ldr d31, [x24, x8]\n"
- "ldr d30, [x23, x8]\n"
- "mov v16.16b, v13.16b\n"
- "mov v26.16b, v20.16b\n"
- "ldr d29, [x22, x8]\n"
- "ldr d28, [x21, x8]\n"
- "mov v25.16b, v13.16b\n"
- "mov v10.16b, v20.16b\n"
- "ldr x20, [x16, #0x20]\n"
- "ldr d27, [x20, x8]\n"
- "ssubl v31.8h, v31.8b, v24.8b\n"
- "ssubl v30.8h, v30.8b, v24.8b\n"
- "ssubl v29.8h, v29.8b, v24.8b\n"
- "ssubl v28.8h, v28.8b, v24.8b\n"
- "ssubl v27.8h, v27.8b, v24.8b\n"
+ "ldr d5, [x17, #0x28]\n"
+ "ldr d6, [x17, #0x30]\n"
+ "mov v23.16b, v26.16b\n"
+ "ssubl v0.8h, v0.8b, v12.8b\n"
+ "ldr d7, [x17, #0x38]\n"
+ "ldr d8, [x17, #0x40]\n"
+ "ssubl v1.8h, v1.8b, v12.8b\n"
+ "ssubl v2.8h, v2.8b, v12.8b\n"
+ "ldp x23, x22, [x12, #0x0]\n"
+ "ldp x21, x20, [x12, #0x10]\n"
+ "ssubl v3.8h, v3.8b, v12.8b\n"
+ "ssubl v4.8h, v4.8b, v12.8b\n"
+ "ldr x19, [x12, #0x20]\n"
+ "ldr d31, [x23, x15]\n"
+ "ssubl v5.8h, v5.8b, v12.8b\n"
+ "ssubl v6.8h, v6.8b, v12.8b\n"
+ "ldr d30, [x22, x15]\n"
+ "ldr d29, [x21, x15]\n"
+ "ssubl v7.8h, v7.8b, v12.8b\n"
+ "ssubl v8.8h, v8.8b, v12.8b\n"
+ "ldr d28, [x20, x15]\n"
+ "ldr d27, [x19, x15]\n"
+ "ssubl v31.8h, v31.8b, v22.8b\n"
+ "ssubl v30.8h, v30.8b, v22.8b\n"
+ "ssubl v29.8h, v29.8b, v22.8b\n"
+ "ssubl v28.8h, v28.8b, v22.8b\n"
+ "ssubl v27.8h, v27.8b, v22.8b\n"
"beq 2f\n"
"1:" // Loop
- "ldr q17, [x14, #0x0]\n"
- "ldr q22, [x13, #0x0]\n"
"smlal v13.4s, v31.4h, v4.4h\n"
- "smlal2 v20.4s, v31.8h, v4.8h\n"
- "ldr q23, [x14, #0x10]\n"
- "smlal v9.4s, v31.4h, v3.4h\n"
- "smlal2 v18.4s, v31.8h, v3.8h\n"
- "ldr x21, [x16, #0x28]\n"
+ "smlal2 v26.4s, v31.8h, v4.8h\n"
+ "ldr x21, [x12, #0x28]\n"
+ "ldr x26, [x12, #0x38]\n"
+ "smlal v19.4s, v31.4h, v3.4h\n"
+ "smlal2 v11.4s, v31.8h, v3.8h\n"
+ "ldr x20, [x12, #0x30]\n"
+ "ldr x25, [x12, #0x40]\n"
"smlal v13.4s, v30.4h, v0.4h\n"
- "smlal2 v20.4s, v30.8h, v0.8h\n"
- "ldr q19, [x13, #0x10]\n"
- "ldr x28, [x16, #0x38]\n"
- "smlal v9.4s, v29.4h, v2.4h\n"
- "smlal2 v18.4s, v29.8h, v2.8h\n"
- "ldr x20, [x16, #0x30]\n"
- "ldr d29, [x20, x8]\n"
- "smlal v16.4s, v31.4h, v1.4h\n"
- "smlal2 v26.4s, v31.8h, v1.8h\n"
- "ldr x27, [x16, #0x40]\n"
- "ldr x26, [x16, #0x48]\n"
- "smlal v25.4s, v31.4h, v0.4h\n"
- "smlal2 v10.4s, v31.8h, v0.8h\n"
- "ldr d31, [x21, x8]\n"
- "ssubl v31.8h, v31.8b, v24.8b\n"
+ "smlal2 v26.4s, v30.8h, v0.8h\n"
+ "ldr x19, [x12, #0x48]\n"
+ "ldr d30, [x19, x15]\n"
+ "smlal v19.4s, v29.4h, v2.4h\n"
+ "smlal2 v11.4s, v29.8h, v2.8h\n"
+ "ldr d29, [x20, x15]\n"
+ "ssubl v29.8h, v29.8b, v22.8b\n"
+ "smlal v18.4s, v31.4h, v1.4h\n"
+ "smlal2 v24.4s, v31.8h, v1.8h\n"
+ "ldr x24, [x12, #0x50]\n"
+ "ldr x23, [x12, #0x58]\n"
+ "smlal v9.4s, v31.4h, v0.4h\n"
+ "smlal2 v23.4s, v31.8h, v0.8h\n"
+ "ldr d31, [x21, x15]\n"
+ "ssubl v31.8h, v31.8b, v22.8b\n"
"smlal v13.4s, v28.4h, v5.4h\n"
- "smlal2 v20.4s, v28.8h, v5.8h\n"
- "ssubl v29.8h, v29.8b, v24.8b\n"
- "ldr x25, [x16, #0x50]\n"
- "smlal v9.4s, v28.4h, v4.4h\n"
- "smlal2 v18.4s, v28.8h, v4.8h\n"
- "ldr x24, [x16, #0x58]\n"
- "ldr x23, [x16, #0x60]\n"
- "smlal v16.4s, v28.4h, v2.4h\n"
- "smlal2 v26.4s, v28.8h, v2.8h\n"
- "ldr x22, [x16, #0x68]\n"
- "ldr x21, [x16, #0x70]\n"
- "smlal v25.4s, v28.4h, v1.4h\n"
- "smlal2 v10.4s, v28.8h, v1.8h\n"
- "ldr d28, [x28, x8]\n"
- "ssubl v28.8h, v28.8b, v24.8b\n"
+ "smlal2 v26.4s, v28.8h, v5.8h\n"
+ "ssubl v30.8h, v30.8b, v22.8b\n"
+ "ldr x22, [x12, #0x60]\n"
+ "smlal v19.4s, v28.4h, v4.4h\n"
+ "smlal2 v11.4s, v28.8h, v4.8h\n"
+ "ldr x21, [x12, #0x68]\n"
+ "ldr x20, [x12, #0x70]\n"
+ "smlal v18.4s, v28.4h, v2.4h\n"
+ "smlal2 v24.4s, v28.8h, v2.8h\n"
+ "ldr x19, [x12, #0x78]\n"
+ "ldr q21, [x13, #0x0]\n"
+ "smlal v9.4s, v28.4h, v1.4h\n"
+ "smlal2 v23.4s, v28.8h, v1.8h\n"
+ "ldr d28, [x26, x15]\n"
+ "ssubl v28.8h, v28.8b, v22.8b\n"
"smlal v13.4s, v27.4h, v7.4h\n"
- "smlal2 v20.4s, v27.8h, v7.8h\n"
- "ldr x20, [x16, #0x78]\n"
- "ldr x28, [%x[params], %[offsetof_Params_bias]]\n"
- "smlal v9.4s, v27.4h, v6.4h\n"
- "smlal2 v18.4s, v27.8h, v6.8h\n"
- "add x15, x15, #0x48\n"
- "subs x7, x7, #0x1\n"
- "smlal v16.4s, v31.4h, v6.4h\n"
- "smlal2 v26.4s, v31.8h, v6.8h\n"
- "ldr d31, [x27, x8]\n"
- "ssubl v31.8h, v31.8b, v24.8b\n"
- "smlal v25.4s, v27.4h, v3.4h\n"
- "smlal2 v10.4s, v27.8h, v3.8h\n"
- "add x14, x14, #0x20\n"
+ "smlal2 v26.4s, v27.8h, v7.8h\n"
+ "ldr q25, [x11, #0x0]\n"
+ "ldr q10, [x13, #0x10]\n"
+ "smlal v19.4s, v27.4h, v6.4h\n"
+ "smlal2 v11.4s, v27.8h, v6.8h\n"
+ "ldr q16, [x11, #0x10]\n"
+ "add x17, x17, #0x48\n"
+ "smlal v18.4s, v31.4h, v6.4h\n"
+ "smlal2 v24.4s, v31.8h, v6.8h\n"
+ "ldr d31, [x25, x15]\n"
+ "ssubl v31.8h, v31.8b, v22.8b\n"
+ "smlal v9.4s, v27.4h, v3.4h\n"
+ "smlal2 v23.4s, v27.8h, v3.8h\n"
+ "subs x16, x16, #0x1\n"
"add x13, x13, #0x20\n"
"smlal v13.4s, v28.4h, v1.4h\n"
- "smlal2 v20.4s, v28.8h, v1.8h\n"
- "smlal v9.4s, v28.4h, v0.4h\n"
- "smlal2 v18.4s, v28.8h, v0.8h\n"
- "ldr d30, [x26, x8]\n"
- "ssubl v30.8h, v30.8b, v24.8b\n"
- "smlal v16.4s, v27.4h, v4.4h\n"
- "smlal v25.4s, v29.4h, v8.4h\n"
- "smlal2 v26.4s, v27.8h, v4.8h\n"
- "ldr d28, [x24, x8]\n"
- "smlal2 v10.4s, v29.8h, v8.8h\n"
- "ldr d29, [x25, x8]\n"
+ "smlal2 v26.4s, v28.8h, v1.8h\n"
+ "add x11, x11, #0x20\n"
+ "smlal v19.4s, v28.4h, v0.4h\n"
+ "smlal2 v11.4s, v28.8h, v0.8h\n"
+ "ldr d28, [x23, x15]\n"
+ "ssubl v28.8h, v28.8b, v22.8b\n"
+ "smlal v18.4s, v27.4h, v4.4h\n"
+ "smlal v9.4s, v29.4h, v8.4h\n"
+ "smlal2 v24.4s, v27.8h, v4.8h\n"
+ "smlal2 v23.4s, v29.8h, v8.8h\n"
+ "ldr d29, [x24, x15]\n"
+ "ssubl v29.8h, v29.8b, v22.8b\n"
"smlal v13.4s, v31.4h, v2.4h\n"
- "smlal2 v20.4s, v31.8h, v2.8h\n"
- "ssubl v29.8h, v29.8b, v24.8b\n"
- "smlal v9.4s, v31.4h, v1.4h\n"
- "smlal2 v18.4s, v31.8h, v1.8h\n"
- "ldr d31, [x23, x8]\n"
- "ssubl v28.8h, v28.8b, v24.8b\n"
- "smlal v16.4s, v30.4h, v5.4h\n"
- "smlal v25.4s, v30.4h, v4.4h\n"
- "ssubl v31.8h, v31.8b, v24.8b\n"
+ "smlal2 v26.4s, v31.8h, v2.8h\n"
+ "smlal v19.4s, v31.4h, v1.4h\n"
+ "smlal2 v11.4s, v31.8h, v1.8h\n"
+ "ldr d31, [x22, x15]\n"
+ "ssubl v31.8h, v31.8b, v22.8b\n"
+ "smlal v18.4s, v30.4h, v5.4h\n"
+ "smlal v9.4s, v30.4h, v4.4h\n"
"smlal v13.4s, v30.4h, v8.4h\n"
- "smlal2 v20.4s, v30.8h, v8.8h\n"
- "smlal v9.4s, v30.4h, v7.4h\n"
- "smlal2 v18.4s, v30.8h, v7.8h\n"
- "smlal2 v26.4s, v30.8h, v5.8h\n"
- "smlal2 v10.4s, v30.8h, v4.8h\n"
- "ldr d30, [x22, x8]\n"
- "ssubl v30.8h, v30.8b, v24.8b\n"
- "smlal v16.4s, v29.4h, v0.4h\n"
- "smlal v25.4s, v28.4h, v2.4h\n"
+ "smlal2 v26.4s, v30.8h, v8.8h\n"
+ "smlal v19.4s, v30.4h, v7.4h\n"
+ "smlal2 v11.4s, v30.8h, v7.8h\n"
+ "smlal2 v24.4s, v30.8h, v5.8h\n"
+ "smlal2 v23.4s, v30.8h, v4.8h\n"
+ "ldr d30, [x21, x15]\n"
+ "ssubl v30.8h, v30.8b, v22.8b\n"
+ "smlal v18.4s, v29.4h, v0.4h\n"
+ "smlal v9.4s, v28.4h, v2.4h\n"
"smlal v13.4s, v29.4h, v3.4h\n"
- "smlal2 v20.4s, v29.8h, v3.8h\n"
- "smlal2 v26.4s, v29.8h, v0.8h\n"
- "ldr d29, [x21, x8]\n"
- "smlal2 v10.4s, v28.8h, v2.8h\n"
- "ssubl v29.8h, v29.8b, v24.8b\n"
- "smlal v16.4s, v31.4h, v3.4h\n"
- "smlal v25.4s, v30.4h, v5.4h\n"
- "smlal v9.4s, v28.4h, v5.4h\n"
- "smlal2 v18.4s, v28.8h, v5.8h\n"
- "ldr d28, [x20, x8]\n"
- "ssubl v28.8h, v28.8b, v24.8b\n"
+ "smlal2 v26.4s, v29.8h, v3.8h\n"
+ "smlal2 v24.4s, v29.8h, v0.8h\n"
+ "ldr d29, [x20, x15]\n"
+ "smlal2 v23.4s, v28.8h, v2.8h\n"
+ "ssubl v29.8h, v29.8b, v22.8b\n"
+ "smlal v18.4s, v31.4h, v3.4h\n"
+ "smlal v9.4s, v30.4h, v5.4h\n"
+ "smlal v19.4s, v28.4h, v5.4h\n"
+ "smlal2 v11.4s, v28.8h, v5.8h\n"
+ "ldr d28, [x19, x15]\n"
+ "ssubl v28.8h, v28.8b, v22.8b\n"
+ "smlal2 v24.4s, v31.8h, v3.8h\n"
+ "smlal2 v23.4s, v30.8h, v5.8h\n"
+ "add x15, x15, #0x8\n"
+ "smlal v18.4s, v29.4h, v7.4h\n"
+ "smlal v9.4s, v29.4h, v6.4h\n"
+ "smlal2 v24.4s, v29.8h, v7.8h\n"
+ "smlal2 v23.4s, v29.8h, v6.8h\n"
"smlal v13.4s, v31.4h, v6.4h\n"
- "smlal2 v26.4s, v31.8h, v3.8h\n"
- "sqrdmulh v13.4s, v13.4s, v17.4s\n"
- "add x8, x8, #0x8\n"
- "smlal2 v10.4s, v30.8h, v5.8h\n"
- "smlal v16.4s, v29.4h, v7.4h\n"
- "and v21.16b, v13.16b, v22.16b\n"
- "smlal v25.4s, v29.4h, v6.4h\n"
- "smlal2 v20.4s, v31.8h, v6.8h\n"
- "sqrdmulh v20.4s, v20.4s, v23.4s\n"
- "smlal2 v26.4s, v29.8h, v7.8h\n"
- "smlal2 v10.4s, v29.8h, v6.8h\n"
- "sshr v21.4s, v21.4s, #0x1f\n"
- "smlal v9.4s, v30.4h, v8.4h\n"
- "smlal v16.4s, v28.4h, v8.4h\n"
- "and v29.16b, v20.16b, v19.16b\n"
- "smlal v25.4s, v28.4h, v7.4h\n"
- "smlal2 v18.4s, v30.8h, v8.8h\n"
- "sqrdmulh v9.4s, v9.4s, v17.4s\n"
- "smlal2 v26.4s, v28.8h, v8.8h\n"
- "smlal2 v10.4s, v28.8h, v7.8h\n"
- "sqrdmulh v16.4s, v16.4s, v17.4s\n"
- "sqrdmulh v25.4s, v25.4s, v17.4s\n"
- "sqadd v13.4s, v13.4s, v21.4s\n"
- "sshr v29.4s, v29.4s, #0x1f\n"
- "and v0.16b, v9.16b, v22.16b\n"
- "sqrdmulh v18.4s, v18.4s, v23.4s\n"
- "and v27.16b, v16.16b, v22.16b\n"
- "sqrdmulh v26.4s, v26.4s, v23.4s\n"
- "and v21.16b, v25.16b, v22.16b\n"
- "sqrdmulh v10.4s, v10.4s, v23.4s\n"
- "sqadd v20.4s, v20.4s, v29.4s\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "and v17.16b, v18.16b, v19.16b\n"
- "sshr v27.4s, v27.4s, #0x1f\n"
- "and v7.16b, v26.16b, v19.16b\n"
- "sshr v21.4s, v21.4s, #0x1f\n"
- "and v29.16b, v10.16b, v19.16b\n"
- "sqadd v9.4s, v9.4s, v0.4s\n"
- "sshr v17.4s, v17.4s, #0x1f\n"
- "sqadd v16.4s, v16.4s, v27.4s\n"
+ "smlal v19.4s, v30.4h, v8.4h\n"
+ "sqrdmulh v13.4s, v13.4s, v21.4s\n"
+ "smlal v18.4s, v28.4h, v8.4h\n"
+ "smlal v9.4s, v28.4h, v7.4h\n"
+ "sqrdmulh v19.4s, v19.4s, v21.4s\n"
+ "smlal2 v26.4s, v31.8h, v6.8h\n"
+ "smlal2 v11.4s, v30.8h, v8.8h\n"
+ "sqrdmulh v18.4s, v18.4s, v21.4s\n"
+ "smlal2 v24.4s, v28.8h, v8.8h\n"
+ "smlal2 v23.4s, v28.8h, v7.8h\n"
+ "sqrdmulh v9.4s, v9.4s, v21.4s\n"
+ "and v7.16b, v13.16b, v25.16b\n"
+ "sqrdmulh v26.4s, v26.4s, v10.4s\n"
+ "and v4.16b, v19.16b, v25.16b\n"
+ "sqrdmulh v11.4s, v11.4s, v10.4s\n"
+ "and v21.16b, v18.16b, v25.16b\n"
+ "sqrdmulh v24.4s, v24.4s, v10.4s\n"
+ "and v20.16b, v9.16b, v25.16b\n"
+ "sqrdmulh v23.4s, v23.4s, v10.4s\n"
"sshr v7.4s, v7.4s, #0x1f\n"
- "sqadd v25.4s, v25.4s, v21.4s\n"
+ "and v29.16b, v26.16b, v16.16b\n"
+ "sshr v4.4s, v4.4s, #0x1f\n"
+ "and v10.16b, v11.16b, v16.16b\n"
+ "sshr v21.4s, v21.4s, #0x1f\n"
+ "and v31.16b, v24.16b, v16.16b\n"
+ "sshr v20.4s, v20.4s, #0x1f\n"
+ "and v30.16b, v23.16b, v16.16b\n"
+ "sqadd v13.4s, v13.4s, v7.4s\n"
"sshr v29.4s, v29.4s, #0x1f\n"
- "srshl v13.4s, v13.4s, v22.4s\n"
- "srshl v9.4s, v9.4s, v22.4s\n"
- "sqadd v18.4s, v18.4s, v17.4s\n"
- "srshl v16.4s, v16.4s, v22.4s\n"
- "sqadd v26.4s, v26.4s, v7.4s\n"
- "srshl v25.4s, v25.4s, v22.4s\n"
- "sqadd v10.4s, v10.4s, v29.4s\n"
- "srshl v20.4s, v20.4s, v19.4s\n"
+ "sqadd v19.4s, v19.4s, v4.4s\n"
+ "sshr v10.4s, v10.4s, #0x1f\n"
+ "sqadd v18.4s, v18.4s, v21.4s\n"
+ "sshr v31.4s, v31.4s, #0x1f\n"
+ "sqadd v9.4s, v9.4s, v20.4s\n"
+ "sshr v30.4s, v30.4s, #0x1f\n"
+ "srshl v13.4s, v13.4s, v25.4s\n"
+ "sqadd v26.4s, v26.4s, v29.4s\n"
+ "srshl v19.4s, v19.4s, v25.4s\n"
+ "sqadd v11.4s, v11.4s, v10.4s\n"
+ "srshl v18.4s, v18.4s, v25.4s\n"
+ "sqadd v24.4s, v24.4s, v31.4s\n"
+ "srshl v9.4s, v9.4s, v25.4s\n"
+ "sqadd v23.4s, v23.4s, v30.4s\n"
+ "srshl v26.4s, v26.4s, v16.4s\n"
"sqxtn v13.4h, v13.4s\n"
- "srshl v18.4s, v18.4s, v19.4s\n"
+ "srshl v11.4s, v11.4s, v16.4s\n"
+ "sqxtn v19.4h, v19.4s\n"
+ "srshl v24.4s, v24.4s, v16.4s\n"
+ "sqxtn v18.4h, v18.4s\n"
+ "srshl v23.4s, v23.4s, v16.4s\n"
"sqxtn v9.4h, v9.4s\n"
- "srshl v26.4s, v26.4s, v19.4s\n"
- "sqxtn v16.4h, v16.4s\n"
- "srshl v10.4s, v10.4s, v19.4s\n"
- "sqxtn v25.4h, v25.4s\n"
- "sqxtn2 v13.8h, v20.4s\n"
- "sqxtn2 v9.8h, v18.4s\n"
- "sqxtn2 v16.8h, v26.4s\n"
- "sqxtn2 v25.8h, v10.4s\n"
+ "sqxtn2 v13.8h, v26.4s\n"
+ "sqxtn2 v19.8h, v11.4s\n"
+ "sqxtn2 v18.8h, v24.4s\n"
+ "sqxtn2 v9.8h, v23.4s\n"
"sqadd v13.8h, v13.8h, v14.8h\n"
+ "sqadd v19.8h, v19.8h, v14.8h\n"
+ "sqadd v18.8h, v18.8h, v14.8h\n"
"sqadd v9.8h, v9.8h, v14.8h\n"
- "sqadd v16.8h, v16.8h, v14.8h\n"
- "sqadd v25.8h, v25.8h, v14.8h\n"
- "smax v13.8h, v13.8h, v12.8h\n"
- "smax v9.8h, v9.8h, v12.8h\n"
- "smax v16.8h, v16.8h, v12.8h\n"
- "smax v25.8h, v25.8h, v12.8h\n"
- "smin v13.8h, v13.8h, v11.8h\n"
- "smin v9.8h, v9.8h, v11.8h\n"
- "smin v16.8h, v16.8h, v11.8h\n"
- "smin v25.8h, v25.8h, v11.8h\n"
+ "smax v13.8h, v13.8h, v17.8h\n"
+ "smax v19.8h, v19.8h, v17.8h\n"
+ "smax v18.8h, v18.8h, v17.8h\n"
+ "smax v9.8h, v9.8h, v17.8h\n"
+ "smin v13.8h, v13.8h, v15.8h\n"
+ "smin v19.8h, v19.8h, v15.8h\n"
+ "smin v18.8h, v18.8h, v15.8h\n"
+ "smin v9.8h, v9.8h, v15.8h\n"
"uzp1 v13.16b, v13.16b, v13.16b\n"
- "str d13, [x12, x17]\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ "str d13, [x10, x14]\n"
+ "uzp1 v18.16b, v18.16b, v18.16b\n"
"uzp1 v9.16b, v9.16b, v9.16b\n"
- "uzp1 v16.16b, v16.16b, v16.16b\n"
- "str d9, [x11, x17]\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
- "str d16, [x10, x17]\n"
- "str d25, [x9, x17]\n"
- "ldr q13, [x28, #0x0]\n"
- "ldr q20, [x28, #0x10]\n"
- "add x28, x28, #0x20\n"
- "ldr d0, [x15, #0x0]\n"
- "ldr d1, [x15, #0x8]\n"
- "add x17, x17, #0x8\n"
- "str x28, [%x[params], %[offsetof_Params_bias]]\n"
- "ldr d2, [x15, #0x10]\n"
- "ldr d3, [x15, #0x18]\n"
+ "str d19, [x9, x14]\n"
+ "str d18, [x28, x14]\n"
+ "str d9, [x27, x14]\n"
+ "ldr x19, [%x[params], %[offsetof_Params_bias]]\n"
+ "ldr q13, [x19, #0x0]\n"
+ "add x14, x14, #0x8\n"
+ "ldr q26, [x19, #0x10]\n"
+ "add x19, x19, #0x20\n"
+ "str x19, [%x[params], %[offsetof_Params_bias]]\n"
+ "ldr d0, [x17, #0x0]\n"
+ "ldr d1, [x17, #0x8]\n"
+ "ldr d2, [x17, #0x10]\n"
+ "mov v19.16b, v13.16b\n"
+ "mov v11.16b, v26.16b\n"
+ "ldr d3, [x17, #0x18]\n"
+ "ldr d4, [x17, #0x20]\n"
+ "mov v18.16b, v13.16b\n"
+ "mov v24.16b, v26.16b\n"
+ "ldr d5, [x17, #0x28]\n"
+ "ldr d6, [x17, #0x30]\n"
"mov v9.16b, v13.16b\n"
- "mov v18.16b, v20.16b\n"
- "ldr d4, [x15, #0x20]\n"
- "ldr d5, [x15, #0x28]\n"
- "mov v16.16b, v13.16b\n"
- "mov v26.16b, v20.16b\n"
- "ldr d6, [x15, #0x30]\n"
- "ldr d7, [x15, #0x38]\n"
- "mov v25.16b, v13.16b\n"
- "mov v10.16b, v20.16b\n"
- "ldr d8, [x15, #0x40]\n"
- "ldp x24, x23, [x16, #0x0]\n"
- "ssubl v0.8h, v0.8b, v15.8b\n"
- "ssubl v1.8h, v1.8b, v15.8b\n"
- "ldp x22, x21, [x16, #0x10]\n"
- "ldr d31, [x24, x8]\n"
- "ssubl v2.8h, v2.8b, v15.8b\n"
- "ssubl v3.8h, v3.8b, v15.8b\n"
- "ldr d30, [x23, x8]\n"
- "ldr d29, [x22, x8]\n"
- "ssubl v4.8h, v4.8b, v15.8b\n"
- "ssubl v5.8h, v5.8b, v15.8b\n"
- "ldr d28, [x21, x8]\n"
- "ldr x20, [x16, #0x20]\n"
- "ssubl v6.8h, v6.8b, v15.8b\n"
- "ssubl v7.8h, v7.8b, v15.8b\n"
- "ldr d27, [x20, x8]\n"
- "ssubl v8.8h, v8.8b, v15.8b\n"
- "ssubl v31.8h, v31.8b, v24.8b\n"
- "ssubl v30.8h, v30.8b, v24.8b\n"
- "ssubl v29.8h, v29.8b, v24.8b\n"
- "ssubl v28.8h, v28.8b, v24.8b\n"
- "ssubl v27.8h, v27.8b, v24.8b\n"
+ "mov v23.16b, v26.16b\n"
+ "ldr d7, [x17, #0x38]\n"
+ "ldr d8, [x17, #0x40]\n"
+ "ssubl v0.8h, v0.8b, v12.8b\n"
+ "ssubl v1.8h, v1.8b, v12.8b\n"
+ "ldp x23, x22, [x12, #0x0]\n"
+ "ldp x21, x20, [x12, #0x10]\n"
+ "ssubl v2.8h, v2.8b, v12.8b\n"
+ "ssubl v3.8h, v3.8b, v12.8b\n"
+ "ldr x19, [x12, #0x20]\n"
+ "ldr d31, [x23, x15]\n"
+ "ssubl v4.8h, v4.8b, v12.8b\n"
+ "ssubl v5.8h, v5.8b, v12.8b\n"
+ "ldr d30, [x22, x15]\n"
+ "ldr d29, [x21, x15]\n"
+ "ssubl v6.8h, v6.8b, v12.8b\n"
+ "ssubl v7.8h, v7.8b, v12.8b\n"
+ "ldr d28, [x20, x15]\n"
+ "ldr d27, [x19, x15]\n"
+ "ssubl v8.8h, v8.8b, v12.8b\n"
+ "ssubl v31.8h, v31.8b, v22.8b\n"
+ "ssubl v30.8h, v30.8b, v22.8b\n"
+ "ssubl v29.8h, v29.8b, v22.8b\n"
+ "ssubl v28.8h, v28.8b, v22.8b\n"
+ "ssubl v27.8h, v27.8b, v22.8b\n"
"bgt 1b\n"
"2:" // Tail
- "ldr q17, [x14, #0x0]\n"
- "ldr q22, [x13, #0x0]\n"
"smlal v13.4s, v31.4h, v4.4h\n"
- "smlal2 v20.4s, v31.8h, v4.8h\n"
- "ldr q23, [x14, #0x10]\n"
- "smlal v9.4s, v31.4h, v3.4h\n"
- "smlal2 v18.4s, v31.8h, v3.8h\n"
- "ldr x21, [x16, #0x28]\n"
+ "smlal2 v26.4s, v31.8h, v4.8h\n"
+ "ldr x21, [x12, #0x28]\n"
+ "ldr x26, [x12, #0x38]\n"
+ "smlal v19.4s, v31.4h, v3.4h\n"
+ "smlal2 v11.4s, v31.8h, v3.8h\n"
+ "ldr x20, [x12, #0x30]\n"
+ "ldr x25, [x12, #0x40]\n"
"smlal v13.4s, v30.4h, v0.4h\n"
- "smlal2 v20.4s, v30.8h, v0.8h\n"
- "ldr q19, [x13, #0x10]\n"
- "ldr x28, [x16, #0x38]\n"
- "smlal v9.4s, v29.4h, v2.4h\n"
- "smlal2 v18.4s, v29.8h, v2.8h\n"
- "ldr x20, [x16, #0x30]\n"
- "ldr d29, [x20, x8]\n"
- "smlal v16.4s, v31.4h, v1.4h\n"
- "smlal2 v26.4s, v31.8h, v1.8h\n"
- "ldr x27, [x16, #0x40]\n"
- "ldr x26, [x16, #0x48]\n"
- "smlal v25.4s, v31.4h, v0.4h\n"
- "smlal2 v10.4s, v31.8h, v0.8h\n"
- "ldr d31, [x21, x8]\n"
- "ssubl v31.8h, v31.8b, v24.8b\n"
+ "smlal2 v26.4s, v30.8h, v0.8h\n"
+ "ldr x19, [x12, #0x48]\n"
+ "ldr d30, [x19, x15]\n"
+ "smlal v19.4s, v29.4h, v2.4h\n"
+ "smlal2 v11.4s, v29.8h, v2.8h\n"
+ "ldr d29, [x20, x15]\n"
+ "ssubl v29.8h, v29.8b, v22.8b\n"
+ "smlal v18.4s, v31.4h, v1.4h\n"
+ "smlal2 v24.4s, v31.8h, v1.8h\n"
+ "ldr x24, [x12, #0x50]\n"
+ "ldr x23, [x12, #0x58]\n"
+ "smlal v9.4s, v31.4h, v0.4h\n"
+ "smlal2 v23.4s, v31.8h, v0.8h\n"
+ "ldr d31, [x21, x15]\n"
+ "ssubl v31.8h, v31.8b, v22.8b\n"
"smlal v13.4s, v28.4h, v5.4h\n"
- "smlal2 v20.4s, v28.8h, v5.8h\n"
- "ssubl v29.8h, v29.8b, v24.8b\n"
- "ldr x25, [x16, #0x50]\n"
- "smlal v9.4s, v28.4h, v4.4h\n"
- "smlal2 v18.4s, v28.8h, v4.8h\n"
- "ldr x24, [x16, #0x58]\n"
- "ldr x23, [x16, #0x60]\n"
- "smlal v16.4s, v28.4h, v2.4h\n"
- "smlal2 v26.4s, v28.8h, v2.8h\n"
- "ldr x22, [x16, #0x68]\n"
- "ldr x21, [x16, #0x70]\n"
- "smlal v25.4s, v28.4h, v1.4h\n"
- "smlal2 v10.4s, v28.8h, v1.8h\n"
- "ldr d28, [x28, x8]\n"
- "ssubl v28.8h, v28.8b, v24.8b\n"
+ "smlal2 v26.4s, v28.8h, v5.8h\n"
+ "ssubl v30.8h, v30.8b, v22.8b\n"
+ "ldr x22, [x12, #0x60]\n"
+ "smlal v19.4s, v28.4h, v4.4h\n"
+ "smlal2 v11.4s, v28.8h, v4.8h\n"
+ "ldr x21, [x12, #0x68]\n"
+ "ldr x20, [x12, #0x70]\n"
+ "smlal v18.4s, v28.4h, v2.4h\n"
+ "smlal2 v24.4s, v28.8h, v2.8h\n"
+ "ldr x19, [x12, #0x78]\n"
+ "ldr q21, [x13, #0x0]\n"
+ "smlal v9.4s, v28.4h, v1.4h\n"
+ "smlal2 v23.4s, v28.8h, v1.8h\n"
+ "ldr d28, [x26, x15]\n"
+ "ssubl v28.8h, v28.8b, v22.8b\n"
"smlal v13.4s, v27.4h, v7.4h\n"
- "smlal2 v20.4s, v27.8h, v7.8h\n"
- "ldr x20, [x16, #0x78]\n"
- "tst x6, #0x7\n"
- "smlal v9.4s, v27.4h, v6.4h\n"
- "smlal2 v18.4s, v27.8h, v6.8h\n"
- "add x14, x14, #0x20\n"
+ "smlal2 v26.4s, v27.8h, v7.8h\n"
+ "ldr q25, [x11, #0x0]\n"
+ "ldr q10, [x13, #0x10]\n"
+ "smlal v19.4s, v27.4h, v6.4h\n"
+ "smlal2 v11.4s, v27.8h, v6.8h\n"
+ "ldr q16, [x11, #0x10]\n"
+ "tst x8, #0x7\n"
+ "smlal v18.4s, v31.4h, v6.4h\n"
+ "smlal2 v24.4s, v31.8h, v6.8h\n"
+ "ldr d31, [x25, x15]\n"
+ "ssubl v31.8h, v31.8b, v22.8b\n"
+ "smlal v9.4s, v27.4h, v3.4h\n"
+ "smlal2 v23.4s, v27.8h, v3.8h\n"
"add x13, x13, #0x20\n"
- "smlal v16.4s, v31.4h, v6.4h\n"
- "smlal2 v26.4s, v31.8h, v6.8h\n"
- "ldr d31, [x27, x8]\n"
- "ssubl v31.8h, v31.8b, v24.8b\n"
- "smlal v25.4s, v27.4h, v3.4h\n"
- "smlal2 v10.4s, v27.8h, v3.8h\n"
+ "add x11, x11, #0x20\n"
"smlal v13.4s, v28.4h, v1.4h\n"
- "smlal2 v20.4s, v28.8h, v1.8h\n"
- "smlal v9.4s, v28.4h, v0.4h\n"
- "smlal2 v18.4s, v28.8h, v0.8h\n"
- "ldr d30, [x26, x8]\n"
- "ssubl v30.8h, v30.8b, v24.8b\n"
- "smlal v16.4s, v27.4h, v4.4h\n"
- "smlal v25.4s, v29.4h, v8.4h\n"
- "smlal2 v26.4s, v27.8h, v4.8h\n"
- "ldr d28, [x24, x8]\n"
- "smlal2 v10.4s, v29.8h, v8.8h\n"
- "ldr d29, [x25, x8]\n"
+ "smlal2 v26.4s, v28.8h, v1.8h\n"
+ "smlal v19.4s, v28.4h, v0.4h\n"
+ "smlal2 v11.4s, v28.8h, v0.8h\n"
+ "ldr d28, [x23, x15]\n"
+ "ssubl v28.8h, v28.8b, v22.8b\n"
+ "smlal v18.4s, v27.4h, v4.4h\n"
+ "smlal v9.4s, v29.4h, v8.4h\n"
+ "smlal2 v24.4s, v27.8h, v4.8h\n"
+ "smlal2 v23.4s, v29.8h, v8.8h\n"
+ "ldr d29, [x24, x15]\n"
+ "ssubl v29.8h, v29.8b, v22.8b\n"
"smlal v13.4s, v31.4h, v2.4h\n"
- "smlal2 v20.4s, v31.8h, v2.8h\n"
- "ssubl v29.8h, v29.8b, v24.8b\n"
- "smlal v9.4s, v31.4h, v1.4h\n"
- "smlal2 v18.4s, v31.8h, v1.8h\n"
- "ldr d31, [x23, x8]\n"
- "ssubl v28.8h, v28.8b, v24.8b\n"
- "smlal v16.4s, v30.4h, v5.4h\n"
- "smlal v25.4s, v30.4h, v4.4h\n"
- "ssubl v31.8h, v31.8b, v24.8b\n"
+ "smlal2 v26.4s, v31.8h, v2.8h\n"
+ "smlal v19.4s, v31.4h, v1.4h\n"
+ "smlal2 v11.4s, v31.8h, v1.8h\n"
+ "ldr d31, [x22, x15]\n"
+ "ssubl v31.8h, v31.8b, v22.8b\n"
+ "smlal v18.4s, v30.4h, v5.4h\n"
+ "smlal v9.4s, v30.4h, v4.4h\n"
"smlal v13.4s, v30.4h, v8.4h\n"
- "smlal2 v20.4s, v30.8h, v8.8h\n"
- "smlal v9.4s, v30.4h, v7.4h\n"
- "smlal2 v18.4s, v30.8h, v7.8h\n"
- "smlal2 v26.4s, v30.8h, v5.8h\n"
- "smlal2 v10.4s, v30.8h, v4.8h\n"
- "ldr d30, [x22, x8]\n"
- "ssubl v30.8h, v30.8b, v24.8b\n"
- "smlal v16.4s, v29.4h, v0.4h\n"
- "smlal v25.4s, v28.4h, v2.4h\n"
+ "smlal2 v26.4s, v30.8h, v8.8h\n"
+ "smlal v19.4s, v30.4h, v7.4h\n"
+ "smlal2 v11.4s, v30.8h, v7.8h\n"
+ "smlal2 v24.4s, v30.8h, v5.8h\n"
+ "smlal2 v23.4s, v30.8h, v4.8h\n"
+ "ldr d30, [x21, x15]\n"
+ "ssubl v30.8h, v30.8b, v22.8b\n"
+ "smlal v18.4s, v29.4h, v0.4h\n"
+ "smlal v9.4s, v28.4h, v2.4h\n"
"smlal v13.4s, v29.4h, v3.4h\n"
- "smlal2 v20.4s, v29.8h, v3.8h\n"
- "smlal2 v26.4s, v29.8h, v0.8h\n"
- "ldr d29, [x21, x8]\n"
- "smlal2 v10.4s, v28.8h, v2.8h\n"
- "ssubl v29.8h, v29.8b, v24.8b\n"
- "smlal v16.4s, v31.4h, v3.4h\n"
- "smlal v25.4s, v30.4h, v5.4h\n"
- "smlal v9.4s, v28.4h, v5.4h\n"
- "smlal2 v18.4s, v28.8h, v5.8h\n"
- "ldr d28, [x20, x8]\n"
- "ssubl v28.8h, v28.8b, v24.8b\n"
+ "smlal2 v26.4s, v29.8h, v3.8h\n"
+ "smlal2 v24.4s, v29.8h, v0.8h\n"
+ "ldr d29, [x20, x15]\n"
+ "smlal2 v23.4s, v28.8h, v2.8h\n"
+ "ssubl v29.8h, v29.8b, v22.8b\n"
+ "smlal v18.4s, v31.4h, v3.4h\n"
+ "smlal v9.4s, v30.4h, v5.4h\n"
+ "smlal v19.4s, v28.4h, v5.4h\n"
+ "smlal2 v11.4s, v28.8h, v5.8h\n"
+ "ldr d28, [x19, x15]\n"
+ "ssubl v28.8h, v28.8b, v22.8b\n"
+ "smlal2 v24.4s, v31.8h, v3.8h\n"
+ "smlal2 v23.4s, v30.8h, v5.8h\n"
+ "add x15, x15, #0x8\n"
+ "smlal v18.4s, v29.4h, v7.4h\n"
+ "smlal v9.4s, v29.4h, v6.4h\n"
+ "smlal2 v24.4s, v29.8h, v7.8h\n"
+ "smlal2 v23.4s, v29.8h, v6.8h\n"
"smlal v13.4s, v31.4h, v6.4h\n"
- "smlal2 v26.4s, v31.8h, v3.8h\n"
- "sqrdmulh v13.4s, v13.4s, v17.4s\n"
- "add x8, x8, #0x8\n"
- "smlal2 v10.4s, v30.8h, v5.8h\n"
- "smlal v16.4s, v29.4h, v7.4h\n"
- "and v21.16b, v13.16b, v22.16b\n"
- "smlal v25.4s, v29.4h, v6.4h\n"
- "smlal2 v20.4s, v31.8h, v6.8h\n"
- "sqrdmulh v20.4s, v20.4s, v23.4s\n"
- "smlal2 v26.4s, v29.8h, v7.8h\n"
- "smlal2 v10.4s, v29.8h, v6.8h\n"
- "sshr v21.4s, v21.4s, #0x1f\n"
- "smlal v9.4s, v30.4h, v8.4h\n"
- "smlal v16.4s, v28.4h, v8.4h\n"
- "and v29.16b, v20.16b, v19.16b\n"
- "smlal v25.4s, v28.4h, v7.4h\n"
- "smlal2 v18.4s, v30.8h, v8.8h\n"
- "sqrdmulh v9.4s, v9.4s, v17.4s\n"
- "smlal2 v26.4s, v28.8h, v8.8h\n"
- "smlal2 v10.4s, v28.8h, v7.8h\n"
- "sqrdmulh v16.4s, v16.4s, v17.4s\n"
- "sqrdmulh v25.4s, v25.4s, v17.4s\n"
- "sqadd v13.4s, v13.4s, v21.4s\n"
- "sshr v29.4s, v29.4s, #0x1f\n"
- "and v0.16b, v9.16b, v22.16b\n"
- "sqrdmulh v18.4s, v18.4s, v23.4s\n"
- "and v27.16b, v16.16b, v22.16b\n"
- "sqrdmulh v26.4s, v26.4s, v23.4s\n"
- "and v21.16b, v25.16b, v22.16b\n"
- "sqrdmulh v10.4s, v10.4s, v23.4s\n"
- "sqadd v20.4s, v20.4s, v29.4s\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "and v17.16b, v18.16b, v19.16b\n"
- "sshr v27.4s, v27.4s, #0x1f\n"
- "and v7.16b, v26.16b, v19.16b\n"
- "sshr v21.4s, v21.4s, #0x1f\n"
- "and v29.16b, v10.16b, v19.16b\n"
- "sqadd v9.4s, v9.4s, v0.4s\n"
- "sshr v17.4s, v17.4s, #0x1f\n"
- "sqadd v16.4s, v16.4s, v27.4s\n"
+ "smlal v19.4s, v30.4h, v8.4h\n"
+ "sqrdmulh v13.4s, v13.4s, v21.4s\n"
+ "smlal v18.4s, v28.4h, v8.4h\n"
+ "smlal v9.4s, v28.4h, v7.4h\n"
+ "sqrdmulh v19.4s, v19.4s, v21.4s\n"
+ "smlal2 v26.4s, v31.8h, v6.8h\n"
+ "smlal2 v11.4s, v30.8h, v8.8h\n"
+ "sqrdmulh v18.4s, v18.4s, v21.4s\n"
+ "smlal2 v24.4s, v28.8h, v8.8h\n"
+ "smlal2 v23.4s, v28.8h, v7.8h\n"
+ "sqrdmulh v9.4s, v9.4s, v21.4s\n"
+ "and v7.16b, v13.16b, v25.16b\n"
+ "sqrdmulh v26.4s, v26.4s, v10.4s\n"
+ "and v4.16b, v19.16b, v25.16b\n"
+ "sqrdmulh v11.4s, v11.4s, v10.4s\n"
+ "and v21.16b, v18.16b, v25.16b\n"
+ "sqrdmulh v24.4s, v24.4s, v10.4s\n"
+ "and v20.16b, v9.16b, v25.16b\n"
+ "sqrdmulh v23.4s, v23.4s, v10.4s\n"
"sshr v7.4s, v7.4s, #0x1f\n"
- "sqadd v25.4s, v25.4s, v21.4s\n"
+ "and v29.16b, v26.16b, v16.16b\n"
+ "sshr v4.4s, v4.4s, #0x1f\n"
+ "and v10.16b, v11.16b, v16.16b\n"
+ "sshr v21.4s, v21.4s, #0x1f\n"
+ "and v31.16b, v24.16b, v16.16b\n"
+ "sshr v20.4s, v20.4s, #0x1f\n"
+ "and v30.16b, v23.16b, v16.16b\n"
+ "sqadd v13.4s, v13.4s, v7.4s\n"
"sshr v29.4s, v29.4s, #0x1f\n"
- "srshl v13.4s, v13.4s, v22.4s\n"
- "srshl v9.4s, v9.4s, v22.4s\n"
- "sqadd v18.4s, v18.4s, v17.4s\n"
- "srshl v16.4s, v16.4s, v22.4s\n"
- "sqadd v26.4s, v26.4s, v7.4s\n"
- "srshl v25.4s, v25.4s, v22.4s\n"
- "sqadd v10.4s, v10.4s, v29.4s\n"
- "srshl v20.4s, v20.4s, v19.4s\n"
+ "sqadd v19.4s, v19.4s, v4.4s\n"
+ "sshr v10.4s, v10.4s, #0x1f\n"
+ "sqadd v18.4s, v18.4s, v21.4s\n"
+ "sshr v31.4s, v31.4s, #0x1f\n"
+ "sqadd v9.4s, v9.4s, v20.4s\n"
+ "sshr v30.4s, v30.4s, #0x1f\n"
+ "srshl v13.4s, v13.4s, v25.4s\n"
+ "sqadd v26.4s, v26.4s, v29.4s\n"
+ "srshl v19.4s, v19.4s, v25.4s\n"
+ "sqadd v11.4s, v11.4s, v10.4s\n"
+ "srshl v18.4s, v18.4s, v25.4s\n"
+ "sqadd v24.4s, v24.4s, v31.4s\n"
+ "srshl v9.4s, v9.4s, v25.4s\n"
+ "sqadd v23.4s, v23.4s, v30.4s\n"
+ "srshl v26.4s, v26.4s, v16.4s\n"
"sqxtn v13.4h, v13.4s\n"
- "srshl v18.4s, v18.4s, v19.4s\n"
+ "srshl v11.4s, v11.4s, v16.4s\n"
+ "sqxtn v19.4h, v19.4s\n"
+ "srshl v24.4s, v24.4s, v16.4s\n"
+ "sqxtn v18.4h, v18.4s\n"
+ "srshl v23.4s, v23.4s, v16.4s\n"
"sqxtn v9.4h, v9.4s\n"
- "srshl v26.4s, v26.4s, v19.4s\n"
- "sqxtn v16.4h, v16.4s\n"
- "srshl v10.4s, v10.4s, v19.4s\n"
- "sqxtn v25.4h, v25.4s\n"
- "sqxtn2 v13.8h, v20.4s\n"
- "sqxtn2 v9.8h, v18.4s\n"
- "sqxtn2 v16.8h, v26.4s\n"
- "sqxtn2 v25.8h, v10.4s\n"
+ "sqxtn2 v13.8h, v26.4s\n"
+ "sqxtn2 v19.8h, v11.4s\n"
+ "sqxtn2 v18.8h, v24.4s\n"
+ "sqxtn2 v9.8h, v23.4s\n"
"sqadd v13.8h, v13.8h, v14.8h\n"
+ "sqadd v19.8h, v19.8h, v14.8h\n"
+ "sqadd v18.8h, v18.8h, v14.8h\n"
"sqadd v9.8h, v9.8h, v14.8h\n"
- "sqadd v16.8h, v16.8h, v14.8h\n"
- "sqadd v25.8h, v25.8h, v14.8h\n"
- "smax v13.8h, v13.8h, v12.8h\n"
- "smax v9.8h, v9.8h, v12.8h\n"
- "smax v16.8h, v16.8h, v12.8h\n"
- "smax v25.8h, v25.8h, v12.8h\n"
- "smin v13.8h, v13.8h, v11.8h\n"
- "smin v9.8h, v9.8h, v11.8h\n"
- "smin v16.8h, v16.8h, v11.8h\n"
- "smin v25.8h, v25.8h, v11.8h\n"
+ "smax v13.8h, v13.8h, v17.8h\n"
+ "smax v19.8h, v19.8h, v17.8h\n"
+ "smax v18.8h, v18.8h, v17.8h\n"
+ "smax v9.8h, v9.8h, v17.8h\n"
+ "smin v13.8h, v13.8h, v15.8h\n"
+ "smin v19.8h, v19.8h, v15.8h\n"
+ "smin v18.8h, v18.8h, v15.8h\n"
+ "smin v9.8h, v9.8h, v15.8h\n"
"uzp1 v13.16b, v13.16b, v13.16b\n"
- "str d13, [x12, x17]\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ "str d13, [x10, x14]\n"
+ "uzp1 v18.16b, v18.16b, v18.16b\n"
"uzp1 v9.16b, v9.16b, v9.16b\n"
- "uzp1 v16.16b, v16.16b, v16.16b\n"
- "str d9, [x11, x17]\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
- "str d16, [x10, x17]\n"
- "str d25, [x9, x17]\n"
- "add x17, x17, #0x8\n"
+ "str d19, [x9, x14]\n"
+ "str d18, [x28, x14]\n"
+ "str d9, [x27, x14]\n"
+ "add x14, x14, #0x8\n"
"beq 64f\n"
- "add x15, x15, #0x48\n"
+ "add x17, x17, #0x48\n"
"3:" // Oddments
- "ldr x28, [%x[params], %[offsetof_Params_bias]]\n"
- "tbz x6, #2, 5f\n"
- "ld1 { v13.4s }, [x28], #0x10\n"
- "tbz x6, #1, 4f\n"
- "ld1 { v20.d }[0], [x28], #0x8\n"
- "tbz x6, #0, 7f\n"
- "ld1 { v20.s }[2], [x28]\n"
+ "ldr x19, [%x[params], %[offsetof_Params_bias]]\n"
+ "tbz x8, #2, 5f\n"
+ "ld1 { v13.4s }, [x19], #0x10\n"
+ "tbz x8, #1, 4f\n"
+ "ld1 { v26.d }[0], [x19], #0x8\n"
+ "tbz x8, #0, 7f\n"
+ "ld1 { v26.s }[2], [x19]\n"
"b 7f\n"
"4:" // Oddments: Load bias: Bit 2: Bit 1: Unset
- "tbz x6, #0, 7f\n"
- "ld1 { v20.s }[0], [x28]\n"
+ "tbz x8, #0, 7f\n"
+ "ld1 { v26.s }[0], [x19]\n"
"b 7f\n"
"5:" // Oddments: Load bias: Bit 2: Unset
- "tbz x6, #1, 6f\n"
- "ld1 { v13.d }[0], [x28], #0x8\n"
- "tbz x6, #0, 7f\n"
- "ld1 { v13.s }[2], [x28]\n"
+ "tbz x8, #1, 6f\n"
+ "ld1 { v13.d }[0], [x19], #0x8\n"
+ "tbz x8, #0, 7f\n"
+ "ld1 { v13.s }[2], [x19]\n"
"b 7f\n"
"6:" // Oddments: Load bias: Bit 2: Unset: Bit 1: Unset
- "tbz x6, #0, 7f\n"
- "ld1 { v13.s }[0], [x28]\n"
+ "tbz x8, #0, 7f\n"
+ "ld1 { v13.s }[0], [x19]\n"
"7:" // Oddments: Load bias: Bit 2: End
- "ldr d0, [x15, #0x0]\n"
- "ldr d1, [x15, #0x8]\n"
+ "ldr d0, [x17, #0x0]\n"
+ "ldr d1, [x17, #0x8]\n"
+ "mov v19.16b, v13.16b\n"
+ "mov v11.16b, v26.16b\n"
+ "ldr d2, [x17, #0x10]\n"
+ "ldr d3, [x17, #0x18]\n"
+ "mov v18.16b, v13.16b\n"
+ "mov v24.16b, v26.16b\n"
+ "ldr d4, [x17, #0x20]\n"
+ "ldr d5, [x17, #0x28]\n"
"mov v9.16b, v13.16b\n"
- "mov v18.16b, v20.16b\n"
- "ldr d2, [x15, #0x10]\n"
- "ldr d3, [x15, #0x18]\n"
- "mov v16.16b, v13.16b\n"
- "mov v26.16b, v20.16b\n"
- "ldr d4, [x15, #0x20]\n"
- "ldr d5, [x15, #0x28]\n"
- "mov v25.16b, v13.16b\n"
- "mov v10.16b, v20.16b\n"
- "ldr d6, [x15, #0x30]\n"
- "ldr d7, [x15, #0x38]\n"
- "ssubl v0.8h, v0.8b, v15.8b\n"
- "ssubl v1.8h, v1.8b, v15.8b\n"
- "ldr d8, [x15, #0x40]\n"
- "ldp x24, x23, [x16, #0x0]\n"
- "ssubl v2.8h, v2.8b, v15.8b\n"
- "ssubl v3.8h, v3.8b, v15.8b\n"
- "ldp x22, x21, [x16, #0x10]\n"
- "ldr x20, [x16, #0x20]\n"
- "ssubl v4.8h, v4.8b, v15.8b\n"
- "ssubl v5.8h, v5.8b, v15.8b\n"
- "ssubl v6.8h, v6.8b, v15.8b\n"
- "ssubl v7.8h, v7.8b, v15.8b\n"
- "ssubl v8.8h, v8.8b, v15.8b\n"
- "add x24, x24, x8\n"
- "add x23, x23, x8\n"
- "add x22, x22, x8\n"
- "add x21, x21, x8\n"
- "add x20, x20, x8\n"
- "tbz x6, #2, 9f\n"
- "ld1 { v31.s }[0], [x24], #0x4\n"
- "ld1 { v30.s }[0], [x23], #0x4\n"
- "ld1 { v29.s }[0], [x22], #0x4\n"
- "ld1 { v28.s }[0], [x21], #0x4\n"
- "ld1 { v27.s }[0], [x20], #0x4\n"
- "tbz x6, #1, 8f\n"
- "ld1 { v31.h }[2], [x24], #0x2\n"
- "ld1 { v30.h }[2], [x23], #0x2\n"
- "ld1 { v29.h }[2], [x22], #0x2\n"
- "ld1 { v28.h }[2], [x21], #0x2\n"
- "ld1 { v27.h }[2], [x20], #0x2\n"
- "tbz x6, #0, 11f\n"
- "ld1 { v31.b }[6], [x24]\n"
- "ld1 { v30.b }[6], [x23]\n"
- "ld1 { v29.b }[6], [x22]\n"
- "ld1 { v28.b }[6], [x21]\n"
- "ld1 { v27.b }[6], [x20]\n"
+ "mov v23.16b, v26.16b\n"
+ "ldr d6, [x17, #0x30]\n"
+ "ldr d7, [x17, #0x38]\n"
+ "ssubl v0.8h, v0.8b, v12.8b\n"
+ "ssubl v1.8h, v1.8b, v12.8b\n"
+ "ldr d8, [x17, #0x40]\n"
+ "ldp x23, x22, [x12, #0x0]\n"
+ "ssubl v2.8h, v2.8b, v12.8b\n"
+ "ssubl v3.8h, v3.8b, v12.8b\n"
+ "ldp x21, x20, [x12, #0x10]\n"
+ "ldr x19, [x12, #0x20]\n"
+ "ssubl v4.8h, v4.8b, v12.8b\n"
+ "ssubl v5.8h, v5.8b, v12.8b\n"
+ "ssubl v6.8h, v6.8b, v12.8b\n"
+ "ssubl v7.8h, v7.8b, v12.8b\n"
+ "ssubl v8.8h, v8.8b, v12.8b\n"
+ "add x23, x23, x15\n"
+ "add x22, x22, x15\n"
+ "add x21, x21, x15\n"
+ "add x20, x20, x15\n"
+ "add x19, x19, x15\n"
+ "tbz x8, #2, 9f\n"
+ "ld1 { v31.s }[0], [x23], #0x4\n"
+ "ld1 { v30.s }[0], [x22], #0x4\n"
+ "ld1 { v29.s }[0], [x21], #0x4\n"
+ "ld1 { v28.s }[0], [x20], #0x4\n"
+ "ld1 { v27.s }[0], [x19], #0x4\n"
+ "tbz x8, #1, 8f\n"
+ "ld1 { v31.h }[2], [x23], #0x2\n"
+ "ld1 { v30.h }[2], [x22], #0x2\n"
+ "ld1 { v29.h }[2], [x21], #0x2\n"
+ "ld1 { v28.h }[2], [x20], #0x2\n"
+ "ld1 { v27.h }[2], [x19], #0x2\n"
+ "tbz x8, #0, 11f\n"
+ "ld1 { v31.b }[6], [x23]\n"
+ "ld1 { v30.b }[6], [x22]\n"
+ "ld1 { v29.b }[6], [x21]\n"
+ "ld1 { v28.b }[6], [x20]\n"
+ "ld1 { v27.b }[6], [x19]\n"
"b 11f\n"
"8:" // Oddments: Initial loads: Bit 2: Bit 1: Unset
- "tbz x6, #0, 11f\n"
- "ld1 { v31.b }[4], [x24]\n"
- "ld1 { v30.b }[4], [x23]\n"
- "ld1 { v29.b }[4], [x22]\n"
- "ld1 { v28.b }[4], [x21]\n"
- "ld1 { v27.b }[4], [x20]\n"
+ "tbz x8, #0, 11f\n"
+ "ld1 { v31.b }[4], [x23]\n"
+ "ld1 { v30.b }[4], [x22]\n"
+ "ld1 { v29.b }[4], [x21]\n"
+ "ld1 { v28.b }[4], [x20]\n"
+ "ld1 { v27.b }[4], [x19]\n"
"b 11f\n"
"9:" // Oddments: Initial loads: Bit 2: Unset
- "tbz x6, #1, 10f\n"
- "ld1 { v31.h }[0], [x24], #0x2\n"
- "ld1 { v30.h }[0], [x23], #0x2\n"
- "ld1 { v29.h }[0], [x22], #0x2\n"
- "ld1 { v28.h }[0], [x21], #0x2\n"
- "ld1 { v27.h }[0], [x20], #0x2\n"
- "tbz x6, #0, 11f\n"
- "ld1 { v31.b }[2], [x24]\n"
- "ld1 { v30.b }[2], [x23]\n"
- "ld1 { v29.b }[2], [x22]\n"
- "ld1 { v28.b }[2], [x21]\n"
- "ld1 { v27.b }[2], [x20]\n"
+ "tbz x8, #1, 10f\n"
+ "ld1 { v31.h }[0], [x23], #0x2\n"
+ "ld1 { v30.h }[0], [x22], #0x2\n"
+ "ld1 { v29.h }[0], [x21], #0x2\n"
+ "ld1 { v28.h }[0], [x20], #0x2\n"
+ "ld1 { v27.h }[0], [x19], #0x2\n"
+ "tbz x8, #0, 11f\n"
+ "ld1 { v31.b }[2], [x23]\n"
+ "ld1 { v30.b }[2], [x22]\n"
+ "ld1 { v29.b }[2], [x21]\n"
+ "ld1 { v28.b }[2], [x20]\n"
+ "ld1 { v27.b }[2], [x19]\n"
"b 11f\n"
"10:" // Oddments: Initial loads: Bit 2: Unset: Bit 1: Unset
- "tbz x6, #0, 11f\n"
- "ld1 { v31.b }[0], [x24]\n"
- "ld1 { v30.b }[0], [x23]\n"
- "ld1 { v29.b }[0], [x22]\n"
- "ld1 { v28.b }[0], [x21]\n"
- "ld1 { v27.b }[0], [x20]\n"
+ "tbz x8, #0, 11f\n"
+ "ld1 { v31.b }[0], [x23]\n"
+ "ld1 { v30.b }[0], [x22]\n"
+ "ld1 { v29.b }[0], [x21]\n"
+ "ld1 { v28.b }[0], [x20]\n"
+ "ld1 { v27.b }[0], [x19]\n"
"11:" // Oddments: Initial loads: Bit 2: End
- "ssubl v31.8h, v31.8b, v24.8b\n"
+ "ssubl v31.8h, v31.8b, v22.8b\n"
"smlal v13.4s, v31.4h, v4.4h\n"
- "smlal2 v20.4s, v31.8h, v4.8h\n"
- "ldr x21, [x16, #0x28]\n"
- "smlal v9.4s, v31.4h, v3.4h\n"
- "smlal2 v18.4s, v31.8h, v3.8h\n"
- "ssubl v30.8h, v30.8b, v24.8b\n"
- "add x21, x21, x8\n"
- "ssubl v29.8h, v29.8b, v24.8b\n"
- "smlal v16.4s, v31.4h, v1.4h\n"
- "smlal2 v26.4s, v31.8h, v1.8h\n"
- "smlal v25.4s, v31.4h, v0.4h\n"
- "smlal2 v10.4s, v31.8h, v0.8h\n"
- "ssubl v28.8h, v28.8b, v24.8b\n"
+ "smlal2 v26.4s, v31.8h, v4.8h\n"
+ "ldr x21, [x12, #0x28]\n"
+ "smlal v19.4s, v31.4h, v3.4h\n"
+ "smlal2 v11.4s, v31.8h, v3.8h\n"
+ "ssubl v30.8h, v30.8b, v22.8b\n"
+ "add x21, x21, x15\n"
+ "ssubl v29.8h, v29.8b, v22.8b\n"
+ "smlal v18.4s, v31.4h, v1.4h\n"
+ "smlal2 v24.4s, v31.8h, v1.8h\n"
+ "smlal v9.4s, v31.4h, v0.4h\n"
+ "smlal2 v23.4s, v31.8h, v0.8h\n"
+ "ssubl v28.8h, v28.8b, v22.8b\n"
"smlal v13.4s, v30.4h, v0.4h\n"
- "smlal2 v20.4s, v30.8h, v0.8h\n"
- "ssubl v27.8h, v27.8b, v24.8b\n"
- "smlal v9.4s, v29.4h, v2.4h\n"
- "smlal2 v18.4s, v29.8h, v2.8h\n"
+ "smlal2 v26.4s, v30.8h, v0.8h\n"
+ "ssubl v27.8h, v27.8b, v22.8b\n"
+ "smlal v19.4s, v29.4h, v2.4h\n"
+ "smlal2 v11.4s, v29.8h, v2.8h\n"
"smlal v13.4s, v28.4h, v5.4h\n"
- "smlal2 v20.4s, v28.8h, v5.8h\n"
- "smlal v9.4s, v28.4h, v4.4h\n"
- "smlal2 v18.4s, v28.8h, v4.8h\n"
- "smlal v16.4s, v28.4h, v2.4h\n"
- "smlal2 v26.4s, v28.8h, v2.8h\n"
- "smlal v25.4s, v28.4h, v1.4h\n"
- "smlal2 v10.4s, v28.8h, v1.8h\n"
- "tbz x6, #2, 13f\n"
+ "smlal2 v26.4s, v28.8h, v5.8h\n"
+ "smlal v19.4s, v28.4h, v4.4h\n"
+ "smlal2 v11.4s, v28.8h, v4.8h\n"
+ "smlal v18.4s, v28.4h, v2.4h\n"
+ "smlal2 v24.4s, v28.8h, v2.8h\n"
+ "smlal v9.4s, v28.4h, v1.4h\n"
+ "smlal2 v23.4s, v28.8h, v1.8h\n"
+ "tbz x8, #2, 13f\n"
"ld1 { v31.s }[0], [x21], #0x4\n"
- "tbz x6, #1, 12f\n"
+ "tbz x8, #1, 12f\n"
"ld1 { v31.h }[2], [x21], #0x2\n"
- "tbz x6, #0, 15f\n"
+ "tbz x8, #0, 15f\n"
"ld1 { v31.b }[6], [x21]\n"
"b 15f\n"
"12:" // Oddments: Load (3, 0): Bit 2: Bit 1: Unset
- "tbz x6, #0, 15f\n"
+ "tbz x8, #0, 15f\n"
"ld1 { v31.b }[4], [x21]\n"
"b 15f\n"
"13:" // Oddments: Load (3, 0): Bit 2: Unset
- "tbz x6, #1, 14f\n"
+ "tbz x8, #1, 14f\n"
"ld1 { v31.h }[0], [x21], #0x2\n"
- "tbz x6, #0, 15f\n"
+ "tbz x8, #0, 15f\n"
"ld1 { v31.b }[2], [x21]\n"
"b 15f\n"
"14:" // Oddments: Load (3, 0): Bit 2: Unset: Bit 1: Unset
- "tbz x6, #0, 15f\n"
+ "tbz x8, #0, 15f\n"
"ld1 { v31.b }[0], [x21]\n"
"15:" // Oddments: Load (3, 0): Bit 2: End
- "ssubl v31.8h, v31.8b, v24.8b\n"
- "smlal v16.4s, v31.4h, v6.4h\n"
- "smlal2 v26.4s, v31.8h, v6.8h\n"
- "ldr x20, [x16, #0x30]\n"
+ "ssubl v31.8h, v31.8b, v22.8b\n"
+ "smlal v18.4s, v31.4h, v6.4h\n"
+ "smlal2 v24.4s, v31.8h, v6.8h\n"
+ "ldr x20, [x12, #0x30]\n"
"smlal v13.4s, v27.4h, v7.4h\n"
- "smlal2 v20.4s, v27.8h, v7.8h\n"
- "add x20, x20, x8\n"
- "smlal v9.4s, v27.4h, v6.4h\n"
- "smlal2 v18.4s, v27.8h, v6.8h\n"
- "smlal v16.4s, v27.4h, v4.4h\n"
- "smlal2 v26.4s, v27.8h, v4.8h\n"
- "smlal v25.4s, v27.4h, v3.4h\n"
- "smlal2 v10.4s, v27.8h, v3.8h\n"
- "tbz x6, #2, 17f\n"
+ "smlal2 v26.4s, v27.8h, v7.8h\n"
+ "add x20, x20, x15\n"
+ "smlal v19.4s, v27.4h, v6.4h\n"
+ "smlal2 v11.4s, v27.8h, v6.8h\n"
+ "smlal v18.4s, v27.4h, v4.4h\n"
+ "smlal2 v24.4s, v27.8h, v4.8h\n"
+ "smlal v9.4s, v27.4h, v3.4h\n"
+ "smlal2 v23.4s, v27.8h, v3.8h\n"
+ "tbz x8, #2, 17f\n"
"ld1 { v29.s }[0], [x20], #0x4\n"
- "tbz x6, #1, 16f\n"
+ "tbz x8, #1, 16f\n"
"ld1 { v29.h }[2], [x20], #0x2\n"
- "tbz x6, #0, 19f\n"
+ "tbz x8, #0, 19f\n"
"ld1 { v29.b }[6], [x20]\n"
"b 19f\n"
"16:" // Oddments: Load (3, 3): Bit 2: Bit 1: Unset
- "tbz x6, #0, 19f\n"
+ "tbz x8, #0, 19f\n"
"ld1 { v29.b }[4], [x20]\n"
"b 19f\n"
"17:" // Oddments: Load (3, 3): Bit 2: Unset
- "tbz x6, #1, 18f\n"
+ "tbz x8, #1, 18f\n"
"ld1 { v29.h }[0], [x20], #0x2\n"
- "tbz x6, #0, 19f\n"
+ "tbz x8, #0, 19f\n"
"ld1 { v29.b }[2], [x20]\n"
"b 19f\n"
"18:" // Oddments: Load (3, 3): Bit 2: Unset: Bit 1: Unset
- "tbz x6, #0, 19f\n"
+ "tbz x8, #0, 19f\n"
"ld1 { v29.b }[0], [x20]\n"
"19:" // Oddments: Load (3, 3): Bit 2: End
- "ssubl v29.8h, v29.8b, v24.8b\n"
- "ldr x28, [x16, #0x38]\n"
- "smlal v25.4s, v29.4h, v8.4h\n"
- "smlal2 v10.4s, v29.8h, v8.8h\n"
- "add x28, x28, x8\n"
- "tbz x6, #2, 21f\n"
- "ld1 { v28.s }[0], [x28], #0x4\n"
- "tbz x6, #1, 20f\n"
- "ld1 { v28.h }[2], [x28], #0x2\n"
- "tbz x6, #0, 23f\n"
- "ld1 { v28.b }[6], [x28]\n"
+ "ssubl v29.8h, v29.8b, v22.8b\n"
+ "ldr x26, [x12, #0x38]\n"
+ "smlal v9.4s, v29.4h, v8.4h\n"
+ "smlal2 v23.4s, v29.8h, v8.8h\n"
+ "add x26, x26, x15\n"
+ "tbz x8, #2, 21f\n"
+ "ld1 { v28.s }[0], [x26], #0x4\n"
+ "tbz x8, #1, 20f\n"
+ "ld1 { v28.h }[2], [x26], #0x2\n"
+ "tbz x8, #0, 23f\n"
+ "ld1 { v28.b }[6], [x26]\n"
"b 23f\n"
"20:" // Oddments: Load (0, 1): Bit 2: Bit 1: Unset
- "tbz x6, #0, 23f\n"
- "ld1 { v28.b }[4], [x28]\n"
+ "tbz x8, #0, 23f\n"
+ "ld1 { v28.b }[4], [x26]\n"
"b 23f\n"
"21:" // Oddments: Load (0, 1): Bit 2: Unset
- "tbz x6, #1, 22f\n"
- "ld1 { v28.h }[0], [x28], #0x2\n"
- "tbz x6, #0, 23f\n"
- "ld1 { v28.b }[2], [x28]\n"
+ "tbz x8, #1, 22f\n"
+ "ld1 { v28.h }[0], [x26], #0x2\n"
+ "tbz x8, #0, 23f\n"
+ "ld1 { v28.b }[2], [x26]\n"
"b 23f\n"
"22:" // Oddments: Load (0, 1): Bit 2: Unset: Bit 1: Unset
- "tbz x6, #0, 23f\n"
- "ld1 { v28.b }[0], [x28]\n"
+ "tbz x8, #0, 23f\n"
+ "ld1 { v28.b }[0], [x26]\n"
"23:" // Oddments: Load (0, 1): Bit 2: End
- "ssubl v28.8h, v28.8b, v24.8b\n"
- "ldr x27, [x16, #0x40]\n"
+ "ssubl v28.8h, v28.8b, v22.8b\n"
+ "ldr x25, [x12, #0x40]\n"
"smlal v13.4s, v28.4h, v1.4h\n"
- "smlal2 v20.4s, v28.8h, v1.8h\n"
- "smlal v9.4s, v28.4h, v0.4h\n"
- "smlal2 v18.4s, v28.8h, v0.8h\n"
- "add x27, x27, x8\n"
- "tbz x6, #2, 25f\n"
- "ld1 { v31.s }[0], [x27], #0x4\n"
- "tbz x6, #1, 24f\n"
- "ld1 { v31.h }[2], [x27], #0x2\n"
- "tbz x6, #0, 27f\n"
- "ld1 { v31.b }[6], [x27]\n"
+ "smlal2 v26.4s, v28.8h, v1.8h\n"
+ "smlal v19.4s, v28.4h, v0.4h\n"
+ "smlal2 v11.4s, v28.8h, v0.8h\n"
+ "add x25, x25, x15\n"
+ "tbz x8, #2, 25f\n"
+ "ld1 { v31.s }[0], [x25], #0x4\n"
+ "tbz x8, #1, 24f\n"
+ "ld1 { v31.h }[2], [x25], #0x2\n"
+ "tbz x8, #0, 27f\n"
+ "ld1 { v31.b }[6], [x25]\n"
"b 27f\n"
"24:" // Oddments: Load (0, 2): Bit 2: Bit 1: Unset
- "tbz x6, #0, 27f\n"
- "ld1 { v31.b }[4], [x27]\n"
+ "tbz x8, #0, 27f\n"
+ "ld1 { v31.b }[4], [x25]\n"
"b 27f\n"
"25:" // Oddments: Load (0, 2): Bit 2: Unset
- "tbz x6, #1, 26f\n"
- "ld1 { v31.h }[0], [x27], #0x2\n"
- "tbz x6, #0, 27f\n"
- "ld1 { v31.b }[2], [x27]\n"
+ "tbz x8, #1, 26f\n"
+ "ld1 { v31.h }[0], [x25], #0x2\n"
+ "tbz x8, #0, 27f\n"
+ "ld1 { v31.b }[2], [x25]\n"
"b 27f\n"
"26:" // Oddments: Load (0, 2): Bit 2: Unset: Bit 1: Unset
- "tbz x6, #0, 27f\n"
- "ld1 { v31.b }[0], [x27]\n"
+ "tbz x8, #0, 27f\n"
+ "ld1 { v31.b }[0], [x25]\n"
"27:" // Oddments: Load (0, 2): Bit 2: End
- "ssubl v31.8h, v31.8b, v24.8b\n"
- "ldr x26, [x16, #0x48]\n"
+ "ssubl v31.8h, v31.8b, v22.8b\n"
+ "ldr x19, [x12, #0x48]\n"
"smlal v13.4s, v31.4h, v2.4h\n"
- "smlal2 v20.4s, v31.8h, v2.8h\n"
- "smlal v9.4s, v31.4h, v1.4h\n"
- "smlal2 v18.4s, v31.8h, v1.8h\n"
- "add x26, x26, x8\n"
- "tbz x6, #2, 29f\n"
- "ld1 { v30.s }[0], [x26], #0x4\n"
- "tbz x6, #1, 28f\n"
- "ld1 { v30.h }[2], [x26], #0x2\n"
- "tbz x6, #0, 31f\n"
- "ld1 { v30.b }[6], [x26]\n"
+ "smlal2 v26.4s, v31.8h, v2.8h\n"
+ "smlal v19.4s, v31.4h, v1.4h\n"
+ "smlal2 v11.4s, v31.8h, v1.8h\n"
+ "add x19, x19, x15\n"
+ "tbz x8, #2, 29f\n"
+ "ld1 { v30.s }[0], [x19], #0x4\n"
+ "tbz x8, #1, 28f\n"
+ "ld1 { v30.h }[2], [x19], #0x2\n"
+ "tbz x8, #0, 31f\n"
+ "ld1 { v30.b }[6], [x19]\n"
"b 31f\n"
"28:" // Oddments: Load (2, 2): Bit 2: Bit 1: Unset
- "tbz x6, #0, 31f\n"
- "ld1 { v30.b }[4], [x26]\n"
+ "tbz x8, #0, 31f\n"
+ "ld1 { v30.b }[4], [x19]\n"
"b 31f\n"
"29:" // Oddments: Load (2, 2): Bit 2: Unset
- "tbz x6, #1, 30f\n"
- "ld1 { v30.h }[0], [x26], #0x2\n"
- "tbz x6, #0, 31f\n"
- "ld1 { v30.b }[2], [x26]\n"
+ "tbz x8, #1, 30f\n"
+ "ld1 { v30.h }[0], [x19], #0x2\n"
+ "tbz x8, #0, 31f\n"
+ "ld1 { v30.b }[2], [x19]\n"
"b 31f\n"
"30:" // Oddments: Load (2, 2): Bit 2: Unset: Bit 1: Unset
- "tbz x6, #0, 31f\n"
- "ld1 { v30.b }[0], [x26]\n"
+ "tbz x8, #0, 31f\n"
+ "ld1 { v30.b }[0], [x19]\n"
"31:" // Oddments: Load (2, 2): Bit 2: End
- "ssubl v30.8h, v30.8b, v24.8b\n"
- "ldr x25, [x16, #0x50]\n"
+ "ssubl v30.8h, v30.8b, v22.8b\n"
+ "ldr x24, [x12, #0x50]\n"
"smlal v13.4s, v30.4h, v8.4h\n"
- "smlal2 v20.4s, v30.8h, v8.8h\n"
- "smlal v9.4s, v30.4h, v7.4h\n"
- "smlal2 v18.4s, v30.8h, v7.8h\n"
- "add x25, x25, x8\n"
- "smlal v16.4s, v30.4h, v5.4h\n"
- "smlal2 v26.4s, v30.8h, v5.8h\n"
- "smlal v25.4s, v30.4h, v4.4h\n"
- "smlal2 v10.4s, v30.8h, v4.8h\n"
- "tbz x6, #2, 33f\n"
- "ld1 { v29.s }[0], [x25], #0x4\n"
- "tbz x6, #1, 32f\n"
- "ld1 { v29.h }[2], [x25], #0x2\n"
- "tbz x6, #0, 35f\n"
- "ld1 { v29.b }[6], [x25]\n"
+ "smlal2 v26.4s, v30.8h, v8.8h\n"
+ "smlal v19.4s, v30.4h, v7.4h\n"
+ "smlal2 v11.4s, v30.8h, v7.8h\n"
+ "add x24, x24, x15\n"
+ "smlal v18.4s, v30.4h, v5.4h\n"
+ "smlal2 v24.4s, v30.8h, v5.8h\n"
+ "smlal v9.4s, v30.4h, v4.4h\n"
+ "smlal2 v23.4s, v30.8h, v4.8h\n"
+ "tbz x8, #2, 33f\n"
+ "ld1 { v29.s }[0], [x24], #0x4\n"
+ "tbz x8, #1, 32f\n"
+ "ld1 { v29.h }[2], [x24], #0x2\n"
+ "tbz x8, #0, 35f\n"
+ "ld1 { v29.b }[6], [x24]\n"
"b 35f\n"
"32:" // Oddments: Load (1, 0): Bit 2: Bit 1: Unset
- "tbz x6, #0, 35f\n"
- "ld1 { v29.b }[4], [x25]\n"
+ "tbz x8, #0, 35f\n"
+ "ld1 { v29.b }[4], [x24]\n"
"b 35f\n"
"33:" // Oddments: Load (1, 0): Bit 2: Unset
- "tbz x6, #1, 34f\n"
- "ld1 { v29.h }[0], [x25], #0x2\n"
- "tbz x6, #0, 35f\n"
- "ld1 { v29.b }[2], [x25]\n"
+ "tbz x8, #1, 34f\n"
+ "ld1 { v29.h }[0], [x24], #0x2\n"
+ "tbz x8, #0, 35f\n"
+ "ld1 { v29.b }[2], [x24]\n"
"b 35f\n"
"34:" // Oddments: Load (1, 0): Bit 2: Unset: Bit 1: Unset
- "tbz x6, #0, 35f\n"
- "ld1 { v29.b }[0], [x25]\n"
+ "tbz x8, #0, 35f\n"
+ "ld1 { v29.b }[0], [x24]\n"
"35:" // Oddments: Load (1, 0): Bit 2: End
- "ssubl v29.8h, v29.8b, v24.8b\n"
- "ldr x24, [x16, #0x58]\n"
+ "ssubl v29.8h, v29.8b, v22.8b\n"
+ "ldr x23, [x12, #0x58]\n"
"smlal v13.4s, v29.4h, v3.4h\n"
- "smlal2 v20.4s, v29.8h, v3.8h\n"
- "smlal v16.4s, v29.4h, v0.4h\n"
- "smlal2 v26.4s, v29.8h, v0.8h\n"
- "add x24, x24, x8\n"
- "tbz x6, #2, 37f\n"
- "ld1 { v28.s }[0], [x24], #0x4\n"
- "tbz x6, #1, 36f\n"
- "ld1 { v28.h }[2], [x24], #0x2\n"
- "tbz x6, #0, 39f\n"
- "ld1 { v28.b }[6], [x24]\n"
+ "smlal2 v26.4s, v29.8h, v3.8h\n"
+ "smlal v18.4s, v29.4h, v0.4h\n"
+ "smlal2 v24.4s, v29.8h, v0.8h\n"
+ "add x23, x23, x15\n"
+ "tbz x8, #2, 37f\n"
+ "ld1 { v28.s }[0], [x23], #0x4\n"
+ "tbz x8, #1, 36f\n"
+ "ld1 { v28.h }[2], [x23], #0x2\n"
+ "tbz x8, #0, 39f\n"
+ "ld1 { v28.b }[6], [x23]\n"
"b 39f\n"
"36:" // Oddments: Load (1, 3): Bit 2: Bit 1: Unset
- "tbz x6, #0, 39f\n"
- "ld1 { v28.b }[4], [x24]\n"
+ "tbz x8, #0, 39f\n"
+ "ld1 { v28.b }[4], [x23]\n"
"b 39f\n"
"37:" // Oddments: Load (1, 3): Bit 2: Unset
- "tbz x6, #1, 38f\n"
- "ld1 { v28.h }[0], [x24], #0x2\n"
- "tbz x6, #0, 39f\n"
- "ld1 { v28.b }[2], [x24]\n"
+ "tbz x8, #1, 38f\n"
+ "ld1 { v28.h }[0], [x23], #0x2\n"
+ "tbz x8, #0, 39f\n"
+ "ld1 { v28.b }[2], [x23]\n"
"b 39f\n"
"38:" // Oddments: Load (1, 3): Bit 2: Unset: Bit 1: Unset
- "tbz x6, #0, 39f\n"
- "ld1 { v28.b }[0], [x24]\n"
+ "tbz x8, #0, 39f\n"
+ "ld1 { v28.b }[0], [x23]\n"
"39:" // Oddments: Load (1, 3): Bit 2: End
- "ssubl v28.8h, v28.8b, v24.8b\n"
- "ldr x23, [x16, #0x60]\n"
- "smlal v9.4s, v28.4h, v5.4h\n"
- "smlal2 v18.4s, v28.8h, v5.8h\n"
- "smlal v25.4s, v28.4h, v2.4h\n"
- "smlal2 v10.4s, v28.8h, v2.8h\n"
- "add x23, x23, x8\n"
- "tbz x6, #2, 41f\n"
- "ld1 { v31.s }[0], [x23], #0x4\n"
- "tbz x6, #1, 40f\n"
- "ld1 { v31.h }[2], [x23], #0x2\n"
- "tbz x6, #0, 43f\n"
- "ld1 { v31.b }[6], [x23]\n"
+ "ssubl v28.8h, v28.8b, v22.8b\n"
+ "ldr x22, [x12, #0x60]\n"
+ "smlal v19.4s, v28.4h, v5.4h\n"
+ "smlal2 v11.4s, v28.8h, v5.8h\n"
+ "smlal v9.4s, v28.4h, v2.4h\n"
+ "smlal2 v23.4s, v28.8h, v2.8h\n"
+ "add x22, x22, x15\n"
+ "tbz x8, #2, 41f\n"
+ "ld1 { v31.s }[0], [x22], #0x4\n"
+ "tbz x8, #1, 40f\n"
+ "ld1 { v31.h }[2], [x22], #0x2\n"
+ "tbz x8, #0, 43f\n"
+ "ld1 { v31.b }[6], [x22]\n"
"b 43f\n"
"40:" // Oddments: Load (2, 0): Bit 2: Bit 1: Unset
- "tbz x6, #0, 43f\n"
- "ld1 { v31.b }[4], [x23]\n"
+ "tbz x8, #0, 43f\n"
+ "ld1 { v31.b }[4], [x22]\n"
"b 43f\n"
"41:" // Oddments: Load (2, 0): Bit 2: Unset
- "tbz x6, #1, 42f\n"
- "ld1 { v31.h }[0], [x23], #0x2\n"
- "tbz x6, #0, 43f\n"
- "ld1 { v31.b }[2], [x23]\n"
+ "tbz x8, #1, 42f\n"
+ "ld1 { v31.h }[0], [x22], #0x2\n"
+ "tbz x8, #0, 43f\n"
+ "ld1 { v31.b }[2], [x22]\n"
"b 43f\n"
"42:" // Oddments: Load (2, 0): Bit 2: Unset: Bit 1: Unset
- "tbz x6, #0, 43f\n"
- "ld1 { v31.b }[0], [x23]\n"
+ "tbz x8, #0, 43f\n"
+ "ld1 { v31.b }[0], [x22]\n"
"43:" // Oddments: Load (2, 0): Bit 2: End
- "ssubl v31.8h, v31.8b, v24.8b\n"
- "ldr x22, [x16, #0x68]\n"
+ "ssubl v31.8h, v31.8b, v22.8b\n"
+ "ldr x21, [x12, #0x68]\n"
"smlal v13.4s, v31.4h, v6.4h\n"
- "smlal2 v20.4s, v31.8h, v6.8h\n"
- "smlal v16.4s, v31.4h, v3.4h\n"
- "smlal2 v26.4s, v31.8h, v3.8h\n"
- "add x22, x22, x8\n"
- "tbz x6, #2, 45f\n"
- "ld1 { v30.s }[0], [x22], #0x4\n"
- "tbz x6, #1, 44f\n"
- "ld1 { v30.h }[2], [x22], #0x2\n"
- "tbz x6, #0, 47f\n"
- "ld1 { v30.b }[6], [x22]\n"
+ "smlal2 v26.4s, v31.8h, v6.8h\n"
+ "smlal v18.4s, v31.4h, v3.4h\n"
+ "smlal2 v24.4s, v31.8h, v3.8h\n"
+ "add x21, x21, x15\n"
+ "tbz x8, #2, 45f\n"
+ "ld1 { v30.s }[0], [x21], #0x4\n"
+ "tbz x8, #1, 44f\n"
+ "ld1 { v30.h }[2], [x21], #0x2\n"
+ "tbz x8, #0, 47f\n"
+ "ld1 { v30.b }[6], [x21]\n"
"b 47f\n"
"44:" // Oddments: Load (2, 3): Bit 2: Bit 1: Unset
- "tbz x6, #0, 47f\n"
- "ld1 { v30.b }[4], [x22]\n"
+ "tbz x8, #0, 47f\n"
+ "ld1 { v30.b }[4], [x21]\n"
"b 47f\n"
"45:" // Oddments: Load (2, 3): Bit 2: Unset
- "tbz x6, #1, 46f\n"
- "ld1 { v30.h }[0], [x22], #0x2\n"
- "tbz x6, #0, 47f\n"
- "ld1 { v30.b }[2], [x22]\n"
+ "tbz x8, #1, 46f\n"
+ "ld1 { v30.h }[0], [x21], #0x2\n"
+ "tbz x8, #0, 47f\n"
+ "ld1 { v30.b }[2], [x21]\n"
"b 47f\n"
"46:" // Oddments: Load (2, 3): Bit 2: Unset: Bit 1: Unset
- "tbz x6, #0, 47f\n"
- "ld1 { v30.b }[0], [x22]\n"
+ "tbz x8, #0, 47f\n"
+ "ld1 { v30.b }[0], [x21]\n"
"47:" // Oddments: Load (2, 3): Bit 2: End
- "ssubl v30.8h, v30.8b, v24.8b\n"
- "ldr x21, [x16, #0x70]\n"
- "smlal v9.4s, v30.4h, v8.4h\n"
- "smlal2 v18.4s, v30.8h, v8.8h\n"
- "smlal v25.4s, v30.4h, v5.4h\n"
- "smlal2 v10.4s, v30.8h, v5.8h\n"
- "add x21, x21, x8\n"
- "tbz x6, #2, 49f\n"
- "ld1 { v29.s }[0], [x21], #0x4\n"
- "tbz x6, #1, 48f\n"
- "ld1 { v29.h }[2], [x21], #0x2\n"
- "tbz x6, #0, 51f\n"
- "ld1 { v29.b }[6], [x21]\n"
+ "ssubl v30.8h, v30.8b, v22.8b\n"
+ "ldr x20, [x12, #0x70]\n"
+ "smlal v19.4s, v30.4h, v8.4h\n"
+ "smlal2 v11.4s, v30.8h, v8.8h\n"
+ "smlal v9.4s, v30.4h, v5.4h\n"
+ "smlal2 v23.4s, v30.8h, v5.8h\n"
+ "add x20, x20, x15\n"
+ "tbz x8, #2, 49f\n"
+ "ld1 { v29.s }[0], [x20], #0x4\n"
+ "tbz x8, #1, 48f\n"
+ "ld1 { v29.h }[2], [x20], #0x2\n"
+ "tbz x8, #0, 51f\n"
+ "ld1 { v29.b }[6], [x20]\n"
"b 51f\n"
"48:" // Oddments: Load (3, 1): Bit 2: Bit 1: Unset
- "tbz x6, #0, 51f\n"
- "ld1 { v29.b }[4], [x21]\n"
+ "tbz x8, #0, 51f\n"
+ "ld1 { v29.b }[4], [x20]\n"
"b 51f\n"
"49:" // Oddments: Load (3, 1): Bit 2: Unset
- "tbz x6, #1, 50f\n"
- "ld1 { v29.h }[0], [x21], #0x2\n"
- "tbz x6, #0, 51f\n"
- "ld1 { v29.b }[2], [x21]\n"
+ "tbz x8, #1, 50f\n"
+ "ld1 { v29.h }[0], [x20], #0x2\n"
+ "tbz x8, #0, 51f\n"
+ "ld1 { v29.b }[2], [x20]\n"
"b 51f\n"
"50:" // Oddments: Load (3, 1): Bit 2: Unset: Bit 1: Unset
- "tbz x6, #0, 51f\n"
- "ld1 { v29.b }[0], [x21]\n"
+ "tbz x8, #0, 51f\n"
+ "ld1 { v29.b }[0], [x20]\n"
"51:" // Oddments: Load (3, 1): Bit 2: End
- "ssubl v29.8h, v29.8b, v24.8b\n"
- "ldr x20, [x16, #0x78]\n"
- "smlal v16.4s, v29.4h, v7.4h\n"
- "smlal2 v26.4s, v29.8h, v7.8h\n"
- "smlal v25.4s, v29.4h, v6.4h\n"
- "smlal2 v10.4s, v29.8h, v6.8h\n"
- "add x20, x20, x8\n"
- "tbz x6, #2, 53f\n"
- "ld1 { v28.s }[0], [x20], #0x4\n"
- "tbz x6, #1, 52f\n"
- "ld1 { v28.h }[2], [x20], #0x2\n"
- "tbz x6, #0, 55f\n"
- "ld1 { v28.b }[6], [x20]\n"
+ "ssubl v29.8h, v29.8b, v22.8b\n"
+ "ldr x19, [x12, #0x78]\n"
+ "smlal v18.4s, v29.4h, v7.4h\n"
+ "smlal2 v24.4s, v29.8h, v7.8h\n"
+ "smlal v9.4s, v29.4h, v6.4h\n"
+ "smlal2 v23.4s, v29.8h, v6.8h\n"
+ "add x19, x19, x15\n"
+ "tbz x8, #2, 53f\n"
+ "ld1 { v28.s }[0], [x19], #0x4\n"
+ "tbz x8, #1, 52f\n"
+ "ld1 { v28.h }[2], [x19], #0x2\n"
+ "tbz x8, #0, 55f\n"
+ "ld1 { v28.b }[6], [x19]\n"
"b 55f\n"
"52:" // Oddments: Load (3, 2): Bit 2: Bit 1: Unset
- "tbz x6, #0, 55f\n"
- "ld1 { v28.b }[4], [x20]\n"
+ "tbz x8, #0, 55f\n"
+ "ld1 { v28.b }[4], [x19]\n"
"b 55f\n"
"53:" // Oddments: Load (3, 2): Bit 2: Unset
- "tbz x6, #1, 54f\n"
- "ld1 { v28.h }[0], [x20], #0x2\n"
- "tbz x6, #0, 55f\n"
- "ld1 { v28.b }[2], [x20]\n"
+ "tbz x8, #1, 54f\n"
+ "ld1 { v28.h }[0], [x19], #0x2\n"
+ "tbz x8, #0, 55f\n"
+ "ld1 { v28.b }[2], [x19]\n"
"b 55f\n"
"54:" // Oddments: Load (3, 2): Bit 2: Unset: Bit 1: Unset
- "tbz x6, #0, 55f\n"
- "ld1 { v28.b }[0], [x20]\n"
+ "tbz x8, #0, 55f\n"
+ "ld1 { v28.b }[0], [x19]\n"
"55:" // Oddments: Load (3, 2): Bit 2: End
- "ssubl v28.8h, v28.8b, v24.8b\n"
- "smlal v16.4s, v28.4h, v8.4h\n"
- "smlal2 v26.4s, v28.8h, v8.8h\n"
- "smlal v25.4s, v28.4h, v7.4h\n"
- "smlal2 v10.4s, v28.8h, v7.8h\n"
- "tbz x6, #2, 57f\n"
- "ld1 { v17.4s }, [x14], #0x10\n"
- "ld1 { v22.4s }, [x13], #0x10\n"
- "tbz x6, #1, 56f\n"
- "ld1 { v23.d }[0], [x14], #0x8\n"
- "ld1 { v19.d }[0], [x13], #0x8\n"
- "tbz x6, #0, 59f\n"
- "ld1 { v23.s }[2], [x14]\n"
- "ld1 { v19.s }[2], [x13]\n"
+ "ssubl v28.8h, v28.8b, v22.8b\n"
+ "smlal v18.4s, v28.4h, v8.4h\n"
+ "smlal2 v24.4s, v28.8h, v8.8h\n"
+ "smlal v9.4s, v28.4h, v7.4h\n"
+ "smlal2 v23.4s, v28.8h, v7.8h\n"
+ "tbz x8, #2, 57f\n"
+ "ld1 { v21.4s }, [x13], #0x10\n"
+ "ld1 { v25.4s }, [x11], #0x10\n"
+ "tbz x8, #1, 56f\n"
+ "ld1 { v10.d }[0], [x13], #0x8\n"
+ "ld1 { v16.d }[0], [x11], #0x8\n"
+ "tbz x8, #0, 59f\n"
+ "ld1 { v10.s }[2], [x13]\n"
+ "ld1 { v16.s }[2], [x11]\n"
"b 59f\n"
"56:" // Oddments: Load requant params: Bit 2: Bit 1: Unset
- "tbz x6, #0, 59f\n"
- "ld1 { v23.s }[0], [x14]\n"
- "ld1 { v19.s }[0], [x13]\n"
+ "tbz x8, #0, 59f\n"
+ "ld1 { v10.s }[0], [x13]\n"
+ "ld1 { v16.s }[0], [x11]\n"
"b 59f\n"
"57:" // Oddments: Load requant params: Bit 2: Unset
- "tbz x6, #1, 58f\n"
- "ld1 { v17.d }[0], [x14], #0x8\n"
- "ld1 { v22.d }[0], [x13], #0x8\n"
- "tbz x6, #0, 59f\n"
- "ld1 { v17.s }[2], [x14]\n"
- "ld1 { v22.s }[2], [x13]\n"
+ "tbz x8, #1, 58f\n"
+ "ld1 { v21.d }[0], [x13], #0x8\n"
+ "ld1 { v25.d }[0], [x11], #0x8\n"
+ "tbz x8, #0, 59f\n"
+ "ld1 { v21.s }[2], [x13]\n"
+ "ld1 { v25.s }[2], [x11]\n"
"b 59f\n"
"58:" // Oddments: Load requant params: Bit 2: Unset: Bit 1: Unset
- "tbz x6, #0, 59f\n"
- "ld1 { v17.s }[0], [x14]\n"
- "ld1 { v22.s }[0], [x13]\n"
+ "tbz x8, #0, 59f\n"
+ "ld1 { v21.s }[0], [x13]\n"
+ "ld1 { v25.s }[0], [x11]\n"
"59:" // Oddments: Load requant params: Bit 2: End
- "sqrdmulh v13.4s, v13.4s, v17.4s\n"
- "and v21.16b, v13.16b, v22.16b\n"
- "add x12, x12, x17\n"
- "add x11, x11, x17\n"
- "sqrdmulh v20.4s, v20.4s, v23.4s\n"
- "sshr v21.4s, v21.4s, #0x1f\n"
- "add x10, x10, x17\n"
- "add x9, x9, x17\n"
- "and v29.16b, v20.16b, v19.16b\n"
- "sqrdmulh v9.4s, v9.4s, v17.4s\n"
- "sqrdmulh v16.4s, v16.4s, v17.4s\n"
- "sqrdmulh v25.4s, v25.4s, v17.4s\n"
- "sqadd v13.4s, v13.4s, v21.4s\n"
- "sshr v29.4s, v29.4s, #0x1f\n"
- "and v0.16b, v9.16b, v22.16b\n"
- "sqrdmulh v18.4s, v18.4s, v23.4s\n"
- "and v27.16b, v16.16b, v22.16b\n"
- "sqrdmulh v26.4s, v26.4s, v23.4s\n"
- "and v21.16b, v25.16b, v22.16b\n"
- "sqrdmulh v10.4s, v10.4s, v23.4s\n"
- "sqadd v20.4s, v20.4s, v29.4s\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "and v17.16b, v18.16b, v19.16b\n"
- "sshr v27.4s, v27.4s, #0x1f\n"
- "and v7.16b, v26.16b, v19.16b\n"
- "sshr v21.4s, v21.4s, #0x1f\n"
- "and v29.16b, v10.16b, v19.16b\n"
- "sqadd v9.4s, v9.4s, v0.4s\n"
- "sshr v17.4s, v17.4s, #0x1f\n"
- "sqadd v16.4s, v16.4s, v27.4s\n"
+ "sqrdmulh v13.4s, v13.4s, v21.4s\n"
+ "sqrdmulh v19.4s, v19.4s, v21.4s\n"
+ "add x10, x10, x14\n"
+ "add x9, x9, x14\n"
+ "sqrdmulh v18.4s, v18.4s, v21.4s\n"
+ "sqrdmulh v9.4s, v9.4s, v21.4s\n"
+ "add x28, x28, x14\n"
+ "add x27, x27, x14\n"
+ "and v7.16b, v13.16b, v25.16b\n"
+ "sqrdmulh v26.4s, v26.4s, v10.4s\n"
+ "and v4.16b, v19.16b, v25.16b\n"
+ "sqrdmulh v11.4s, v11.4s, v10.4s\n"
+ "and v21.16b, v18.16b, v25.16b\n"
+ "sqrdmulh v24.4s, v24.4s, v10.4s\n"
+ "and v20.16b, v9.16b, v25.16b\n"
+ "sqrdmulh v23.4s, v23.4s, v10.4s\n"
"sshr v7.4s, v7.4s, #0x1f\n"
- "sqadd v25.4s, v25.4s, v21.4s\n"
+ "and v29.16b, v26.16b, v16.16b\n"
+ "sshr v4.4s, v4.4s, #0x1f\n"
+ "and v10.16b, v11.16b, v16.16b\n"
+ "sshr v21.4s, v21.4s, #0x1f\n"
+ "and v31.16b, v24.16b, v16.16b\n"
+ "sshr v20.4s, v20.4s, #0x1f\n"
+ "and v30.16b, v23.16b, v16.16b\n"
+ "sqadd v13.4s, v13.4s, v7.4s\n"
"sshr v29.4s, v29.4s, #0x1f\n"
- "srshl v13.4s, v13.4s, v22.4s\n"
- "srshl v9.4s, v9.4s, v22.4s\n"
- "sqadd v18.4s, v18.4s, v17.4s\n"
- "srshl v16.4s, v16.4s, v22.4s\n"
- "sqadd v26.4s, v26.4s, v7.4s\n"
- "srshl v25.4s, v25.4s, v22.4s\n"
- "sqadd v10.4s, v10.4s, v29.4s\n"
- "srshl v20.4s, v20.4s, v19.4s\n"
+ "sqadd v19.4s, v19.4s, v4.4s\n"
+ "sshr v10.4s, v10.4s, #0x1f\n"
+ "sqadd v18.4s, v18.4s, v21.4s\n"
+ "sshr v31.4s, v31.4s, #0x1f\n"
+ "sqadd v9.4s, v9.4s, v20.4s\n"
+ "sshr v30.4s, v30.4s, #0x1f\n"
+ "srshl v13.4s, v13.4s, v25.4s\n"
+ "sqadd v26.4s, v26.4s, v29.4s\n"
+ "srshl v19.4s, v19.4s, v25.4s\n"
+ "sqadd v11.4s, v11.4s, v10.4s\n"
+ "srshl v18.4s, v18.4s, v25.4s\n"
+ "sqadd v24.4s, v24.4s, v31.4s\n"
+ "srshl v9.4s, v9.4s, v25.4s\n"
+ "sqadd v23.4s, v23.4s, v30.4s\n"
+ "srshl v26.4s, v26.4s, v16.4s\n"
"sqxtn v13.4h, v13.4s\n"
- "srshl v18.4s, v18.4s, v19.4s\n"
+ "srshl v11.4s, v11.4s, v16.4s\n"
+ "sqxtn v19.4h, v19.4s\n"
+ "srshl v24.4s, v24.4s, v16.4s\n"
+ "sqxtn v18.4h, v18.4s\n"
+ "srshl v23.4s, v23.4s, v16.4s\n"
"sqxtn v9.4h, v9.4s\n"
- "srshl v26.4s, v26.4s, v19.4s\n"
- "sqxtn v16.4h, v16.4s\n"
- "srshl v10.4s, v10.4s, v19.4s\n"
- "sqxtn v25.4h, v25.4s\n"
- "sqxtn2 v13.8h, v20.4s\n"
- "sqxtn2 v9.8h, v18.4s\n"
- "sqxtn2 v16.8h, v26.4s\n"
- "sqxtn2 v25.8h, v10.4s\n"
+ "sqxtn2 v13.8h, v26.4s\n"
+ "sqxtn2 v19.8h, v11.4s\n"
+ "sqxtn2 v18.8h, v24.4s\n"
+ "sqxtn2 v9.8h, v23.4s\n"
"sqadd v13.8h, v13.8h, v14.8h\n"
+ "sqadd v19.8h, v19.8h, v14.8h\n"
+ "sqadd v18.8h, v18.8h, v14.8h\n"
"sqadd v9.8h, v9.8h, v14.8h\n"
- "sqadd v16.8h, v16.8h, v14.8h\n"
- "sqadd v25.8h, v25.8h, v14.8h\n"
- "smax v13.8h, v13.8h, v12.8h\n"
- "smax v9.8h, v9.8h, v12.8h\n"
- "smax v16.8h, v16.8h, v12.8h\n"
- "smax v25.8h, v25.8h, v12.8h\n"
- "smin v13.8h, v13.8h, v11.8h\n"
- "smin v9.8h, v9.8h, v11.8h\n"
- "smin v16.8h, v16.8h, v11.8h\n"
- "smin v25.8h, v25.8h, v11.8h\n"
+ "smax v13.8h, v13.8h, v17.8h\n"
+ "smax v19.8h, v19.8h, v17.8h\n"
+ "smax v18.8h, v18.8h, v17.8h\n"
+ "smax v9.8h, v9.8h, v17.8h\n"
+ "smin v13.8h, v13.8h, v15.8h\n"
+ "smin v19.8h, v19.8h, v15.8h\n"
+ "smin v18.8h, v18.8h, v15.8h\n"
+ "smin v9.8h, v9.8h, v15.8h\n"
"uzp1 v13.16b, v13.16b, v13.16b\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ "uzp1 v18.16b, v18.16b, v18.16b\n"
"uzp1 v9.16b, v9.16b, v9.16b\n"
- "uzp1 v16.16b, v16.16b, v16.16b\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
- "tbz x6, #2, 61f\n"
- "st1 { v13.s }[0], [x12], #0x4\n"
- "st1 { v9.s }[0], [x11], #0x4\n"
- "st1 { v16.s }[0], [x10], #0x4\n"
- "st1 { v25.s }[0], [x9], #0x4\n"
- "tbz x6, #1, 60f\n"
- "st1 { v13.h }[2], [x12], #0x2\n"
- "st1 { v9.h }[2], [x11], #0x2\n"
- "st1 { v16.h }[2], [x10], #0x2\n"
- "st1 { v25.h }[2], [x9], #0x2\n"
- "tbz x6, #0, 63f\n"
- "st1 { v13.b }[6], [x12], #0x1\n"
- "st1 { v9.b }[6], [x11], #0x1\n"
- "st1 { v16.b }[6], [x10], #0x1\n"
- "st1 { v25.b }[6], [x9], #0x1\n"
+ "tbz x8, #2, 61f\n"
+ "st1 { v13.s }[0], [x10], #0x4\n"
+ "st1 { v19.s }[0], [x9], #0x4\n"
+ "st1 { v18.s }[0], [x28], #0x4\n"
+ "st1 { v9.s }[0], [x27], #0x4\n"
+ "tbz x8, #1, 60f\n"
+ "st1 { v13.h }[2], [x10], #0x2\n"
+ "st1 { v19.h }[2], [x9], #0x2\n"
+ "st1 { v18.h }[2], [x28], #0x2\n"
+ "st1 { v9.h }[2], [x27], #0x2\n"
+ "tbz x8, #0, 63f\n"
+ "st1 { v13.b }[6], [x10], #0x1\n"
+ "st1 { v19.b }[6], [x9], #0x1\n"
+ "st1 { v18.b }[6], [x28], #0x1\n"
+ "st1 { v9.b }[6], [x27], #0x1\n"
"b 63f\n"
"60:" // Oddments: Bit 2: Bit 1: Unset
- "tbz x6, #0, 63f\n"
- "st1 { v13.b }[4], [x12], #0x1\n"
- "st1 { v9.b }[4], [x11], #0x1\n"
- "st1 { v16.b }[4], [x10], #0x1\n"
- "st1 { v25.b }[4], [x9], #0x1\n"
+ "tbz x8, #0, 63f\n"
+ "st1 { v13.b }[4], [x10], #0x1\n"
+ "st1 { v19.b }[4], [x9], #0x1\n"
+ "st1 { v18.b }[4], [x28], #0x1\n"
+ "st1 { v9.b }[4], [x27], #0x1\n"
"b 63f\n"
"61:" // Oddments: Bit 2: Unset
- "tbz x6, #1, 62f\n"
- "st1 { v13.h }[0], [x12], #0x2\n"
- "st1 { v9.h }[0], [x11], #0x2\n"
- "st1 { v16.h }[0], [x10], #0x2\n"
- "st1 { v25.h }[0], [x9], #0x2\n"
- "tbz x6, #0, 63f\n"
- "st1 { v13.b }[2], [x12], #0x1\n"
- "st1 { v9.b }[2], [x11], #0x1\n"
- "st1 { v16.b }[2], [x10], #0x1\n"
- "st1 { v25.b }[2], [x9], #0x1\n"
+ "tbz x8, #1, 62f\n"
+ "st1 { v13.h }[0], [x10], #0x2\n"
+ "st1 { v19.h }[0], [x9], #0x2\n"
+ "st1 { v18.h }[0], [x28], #0x2\n"
+ "st1 { v9.h }[0], [x27], #0x2\n"
+ "tbz x8, #0, 63f\n"
+ "st1 { v13.b }[2], [x10], #0x1\n"
+ "st1 { v19.b }[2], [x9], #0x1\n"
+ "st1 { v18.b }[2], [x28], #0x1\n"
+ "st1 { v9.b }[2], [x27], #0x1\n"
"b 63f\n"
"62:" // Oddments: Bit 2: Unset: Bit 1: Unset
- "tbz x6, #0, 63f\n"
- "st1 { v13.b }[0], [x12], #0x1\n"
- "st1 { v9.b }[0], [x11], #0x1\n"
- "st1 { v16.b }[0], [x10], #0x1\n"
- "st1 { v25.b }[0], [x9], #0x1\n"
+ "tbz x8, #0, 63f\n"
+ "st1 { v13.b }[0], [x10], #0x1\n"
+ "st1 { v19.b }[0], [x9], #0x1\n"
+ "st1 { v18.b }[0], [x28], #0x1\n"
+ "st1 { v9.b }[0], [x27], #0x1\n"
"63:" // Oddments: Bit 2: End
"64:" // End
:
: [offsetof_Params_bias] "I" (offsetof(Params, bias)), [offsetof_Params_inptrs] "I" (offsetof(Params, inptrs)), [offsetof_Params_n_channels] "I" (offsetof(Params, n_channels)), [offsetof_Params_outptrs] "I" (offsetof(Params, outptrs)), [offsetof_Params_requant] "I" (offsetof(Params, requant)), [offsetof_Params_requant_muls] "I" (offsetof(Params, requant_muls)), [offsetof_Params_requant_shifts] "I" (offsetof(Params, requant_shifts)), [offsetof_Params_weights] "I" (offsetof(Params, weights)), [offsetof_Requantize32_a_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [params] "r" (&params)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp
index fa9ae97dee..0dc377c5c1 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -100,75 +100,75 @@ void a64_s8q_nhwc_3x3_s2_output2x2_mla_depthfirst_impl(
requant_muls, requant_shifts, outptrs);
__asm__ __volatile__(
- "ldr x7, [%x[params], %[offsetof_Params_n_channels]]\n"
- "ldr x23, [%x[params], %[offsetof_Params_requant]]\n"
- "lsr x8, x7, #0x3\n"
- "add x20, x23, %[offsetof_Requantize32_a_offset]\n"
- "ld1r { v12.16b }, [x20]\n"
+ "ldr x19, [%x[params], %[offsetof_Params_requant]]\n"
+ "ldr x8, [%x[params], %[offsetof_Params_n_channels]]\n"
+ "add x24, x19, %[offsetof_Requantize32_a_offset]\n"
+ "add x23, x19, %[offsetof_Requantize32_b_offset]\n"
"ldr x22, [%x[params], %[offsetof_Params_outptrs]]\n"
- "add x21, x23, %[offsetof_Requantize32_b_offset]\n"
- "add x20, x23, %[offsetof_Requantize32_c_offset]\n"
- "ld1r { v13.16b }, [x21]\n"
- "ld1r { v11.8h }, [x20]\n"
- "add x21, x23, %[offsetof_Requantize32_minval]\n"
- "add x20, x23, %[offsetof_Requantize32_maxval]\n"
- "ld1r { v16.8h }, [x21]\n"
- "ld1r { v14.8h }, [x20]\n"
- "mov x17, #0x0\n"
- "mov x16, #0x0\n"
- "add x15, %x[params], %[offsetof_Params_inptrs]\n"
- "ldr x14, [%x[params], %[offsetof_Params_weights]]\n"
+ "add x21, x19, %[offsetof_Requantize32_c_offset]\n"
+ "add x20, x19, %[offsetof_Requantize32_minval]\n"
+ "ldr x17, [%x[params], %[offsetof_Params_weights]]\n"
+ "add x19, x19, %[offsetof_Requantize32_maxval]\n"
+ "ld1r { v12.16b }, [x24]\n"
+ "ld1r { v13.16b }, [x23]\n"
+ "lsr x16, x8, #0x3\n"
+ "ld1r { v11.8h }, [x21]\n"
+ "ld1r { v17.8h }, [x20]\n"
+ "mov x15, #0x0\n"
+ "mov x14, #0x0\n"
+ "ld1r { v14.8h }, [x19]\n"
"ldr x13, [%x[params], %[offsetof_Params_requant_muls]]\n"
- "ldr x12, [%x[params], %[offsetof_Params_requant_shifts]]\n"
- "ldp x11, x10, [x22, #0x0]\n"
- "ldp x9, x28, [x22, #0x10]\n"
- "cbz x8, 3f\n"
- "ldr d0, [x14, #0x0]\n"
- "ldr d1, [x14, #0x8]\n"
- "subs x8, x8, #0x1\n"
+ "add x12, %x[params], %[offsetof_Params_inptrs]\n"
+ "ldr x11, [%x[params], %[offsetof_Params_requant_shifts]]\n"
+ "ldp x10, x9, [x22, #0x0]\n"
+ "ldp x28, x27, [x22, #0x10]\n"
+ "cbz x16, 3f\n"
+ "ldr x19, [%x[params], %[offsetof_Params_bias]]\n"
+ "ldr q15, [x19, #0x0]\n"
+ "subs x16, x16, #0x1\n"
+ "mov v9.16b, v15.16b\n"
+ "ldr q10, [x19, #0x10]\n"
+ "add x19, x19, #0x20\n"
+ "str x19, [%x[params], %[offsetof_Params_bias]]\n"
+ "ldr d0, [x17, #0x0]\n"
+ "ldr d1, [x17, #0x8]\n"
+ "ldr d2, [x17, #0x10]\n"
+ "mov v16.16b, v10.16b\n"
+ "mov v22.16b, v15.16b\n"
+ "ldr d3, [x17, #0x18]\n"
+ "ldr d4, [x17, #0x20]\n"
+ "mov v21.16b, v10.16b\n"
+ "mov v23.16b, v15.16b\n"
+ "ldr d5, [x17, #0x28]\n"
+ "ldr d6, [x17, #0x30]\n"
+ "mov v18.16b, v10.16b\n"
"ssubl v0.8h, v0.8b, v13.8b\n"
- "ldr d2, [x14, #0x10]\n"
- "ldr d3, [x14, #0x18]\n"
+ "ldr d7, [x17, #0x38]\n"
+ "ldr d8, [x17, #0x40]\n"
"ssubl v1.8h, v1.8b, v13.8b\n"
"ssubl v2.8h, v2.8b, v13.8b\n"
- "ldr d4, [x14, #0x20]\n"
- "ldr d5, [x14, #0x28]\n"
+ "ldp x26, x25, [x12, #0x0]\n"
+ "ldp x24, x23, [x12, #0x10]\n"
"ssubl v3.8h, v3.8b, v13.8b\n"
"ssubl v4.8h, v4.8b, v13.8b\n"
- "ldr d6, [x14, #0x30]\n"
- "ldr d7, [x14, #0x38]\n"
+ "ldp x22, x21, [x12, #0x20]\n"
+ "ldp x20, x19, [x12, #0x30]\n"
"ssubl v5.8h, v5.8b, v13.8b\n"
"ssubl v6.8h, v6.8b, v13.8b\n"
- "ldr d8, [x14, #0x40]\n"
- "ldr x24, [%x[params], %[offsetof_Params_bias]]\n"
+ "ldr d31, [x26, x15]\n"
+ "ldr d30, [x25, x15]\n"
"ssubl v7.8h, v7.8b, v13.8b\n"
"ssubl v8.8h, v8.8b, v13.8b\n"
- "ldr q15, [x24, #0x0]\n"
- "ldr q17, [x24, #0x10]\n"
- "add x24, x24, #0x20\n"
- "str x24, [%x[params], %[offsetof_Params_bias]]\n"
- "ldp x27, x26, [x15, #0x0]\n"
- "ldp x25, x24, [x15, #0x10]\n"
- "mov v10.16b, v15.16b\n"
- "mov v20.16b, v17.16b\n"
- "ldp x23, x22, [x15, #0x20]\n"
- "ldp x21, x20, [x15, #0x30]\n"
- "mov v9.16b, v15.16b\n"
- "mov v23.16b, v17.16b\n"
- "ldr d31, [x27, x17]\n"
- "ldr d30, [x26, x17]\n"
- "mov v21.16b, v15.16b\n"
- "mov v22.16b, v17.16b\n"
- "ldr d29, [x25, x17]\n"
- "ldr d28, [x24, x17]\n"
+ "ldr d29, [x24, x15]\n"
+ "ldr d28, [x23, x15]\n"
"ssubl v31.8h, v31.8b, v12.8b\n"
"ssubl v30.8h, v30.8b, v12.8b\n"
- "ldr d27, [x23, x17]\n"
- "ldr d26, [x22, x17]\n"
+ "ldr d27, [x22, x15]\n"
+ "ldr d26, [x21, x15]\n"
"ssubl v29.8h, v29.8b, v12.8b\n"
"ssubl v28.8h, v28.8b, v12.8b\n"
- "ldr d25, [x21, x17]\n"
- "ldr d24, [x20, x17]\n"
+ "ldr d25, [x20, x15]\n"
+ "ldr d24, [x19, x15]\n"
"ssubl v27.8h, v27.8b, v12.8b\n"
"ssubl v26.8h, v26.8b, v12.8b\n"
"ssubl v25.8h, v25.8b, v12.8b\n"
@@ -176,250 +176,250 @@ void a64_s8q_nhwc_3x3_s2_output2x2_mla_depthfirst_impl(
"beq 2f\n"
"1:" // Loop
"smlal v15.4s, v31.4h, v8.4h\n"
- "smlal2 v17.4s, v31.8h, v8.8h\n"
- "ldr x24, [x15, #0x40]\n"
- "ldr x22, [x15, #0x48]\n"
- "smlal v10.4s, v31.4h, v6.4h\n"
- "smlal2 v20.4s, v31.8h, v6.8h\n"
- "ldr x21, [x15, #0x50]\n"
- "ldr x20, [x15, #0x58]\n"
+ "smlal2 v10.4s, v31.8h, v8.8h\n"
+ "ldr x24, [x12, #0x40]\n"
+ "ldr x23, [x12, #0x48]\n"
+ "smlal v9.4s, v31.4h, v6.4h\n"
+ "smlal2 v16.4s, v31.8h, v6.8h\n"
+ "ldr x21, [x12, #0x50]\n"
+ "ldr x19, [x12, #0x58]\n"
"smlal v15.4s, v30.4h, v0.4h\n"
- "smlal2 v17.4s, v30.8h, v0.8h\n"
- "ldr q19, [x13, #0x0]\n"
- "ldr x23, [x15, #0x78]\n"
- "smlal v10.4s, v28.4h, v1.4h\n"
- "smlal2 v20.4s, v28.8h, v1.8h\n"
- "ldr d28, [x22, x17]\n"
+ "smlal2 v10.4s, v30.8h, v0.8h\n"
+ "ldr x22, [x12, #0x78]\n"
+ "ldr x20, [x12, #0x60]\n"
+ "smlal v9.4s, v28.4h, v1.4h\n"
+ "smlal2 v16.4s, v28.8h, v1.8h\n"
+ "ldr d28, [x23, x15]\n"
"ssubl v28.8h, v28.8b, v12.8b\n"
"smlal v15.4s, v29.4h, v1.4h\n"
- "smlal2 v17.4s, v29.8h, v1.8h\n"
- "ldr d29, [x24, x17]\n"
+ "smlal2 v10.4s, v29.8h, v1.8h\n"
+ "ldr d29, [x24, x15]\n"
"ssubl v29.8h, v29.8b, v12.8b\n"
- "smlal v10.4s, v27.4h, v2.4h\n"
- "smlal2 v20.4s, v27.8h, v2.8h\n"
- "ldr d27, [x21, x17]\n"
+ "smlal v9.4s, v27.4h, v2.4h\n"
+ "smlal2 v16.4s, v27.8h, v2.8h\n"
+ "ldr d27, [x21, x15]\n"
"ssubl v27.8h, v27.8b, v12.8b\n"
"smlal v15.4s, v26.4h, v3.4h\n"
- "smlal2 v17.4s, v26.8h, v3.8h\n"
- "ldr d26, [x20, x17]\n"
- "ldr x20, [x15, #0x60]\n"
- "smlal v10.4s, v24.4h, v0.4h\n"
- "smlal2 v20.4s, v24.8h, v0.8h\n"
+ "smlal2 v10.4s, v26.8h, v3.8h\n"
+ "ldr d26, [x19, x15]\n"
"ssubl v26.8h, v26.8b, v12.8b\n"
- "ldr x21, [x15, #0x80]\n"
+ "smlal v9.4s, v24.4h, v0.4h\n"
+ "smlal2 v16.4s, v24.8h, v0.8h\n"
+ "ldr x21, [x12, #0x80]\n"
+ "ldr x19, [x12, #0x68]\n"
"smlal v15.4s, v25.4h, v4.4h\n"
- "smlal2 v17.4s, v25.8h, v4.8h\n"
- "ldr d25, [x20, x17]\n"
- "ldr x20, [x15, #0x68]\n"
- "smlal v10.4s, v29.4h, v4.4h\n"
- "smlal2 v20.4s, v29.8h, v4.8h\n"
- "ldr d29, [x20, x17]\n"
+ "smlal2 v10.4s, v25.8h, v4.8h\n"
+ "ldr d25, [x20, x15]\n"
"ssubl v25.8h, v25.8b, v12.8b\n"
+ "smlal v9.4s, v29.4h, v4.4h\n"
+ "smlal2 v16.4s, v29.8h, v4.8h\n"
+ "ldr x20, [x12, #0x88]\n"
+ "ldr d29, [x19, x15]\n"
"smlal v15.4s, v24.4h, v2.4h\n"
- "smlal2 v17.4s, v24.8h, v2.8h\n"
- "ldr q18, [x12, #0x0]\n"
- "ldr x22, [x15, #0x88]\n"
- "smlal v10.4s, v28.4h, v5.4h\n"
- "smlal2 v20.4s, v28.8h, v5.8h\n"
- "ldr d28, [x21, x17]\n"
- "ldr x21, [x15, #0x70]\n"
- "smlal v9.4s, v31.4h, v2.4h\n"
- "smlal2 v23.4s, v31.8h, v2.8h\n"
+ "smlal2 v10.4s, v24.8h, v2.8h\n"
+ "ldr x19, [x12, #0x70]\n"
+ "ssubl v29.8h, v29.8b, v12.8b\n"
+ "smlal v9.4s, v28.4h, v5.4h\n"
+ "smlal2 v16.4s, v28.8h, v5.8h\n"
+ "ldr d28, [x21, x15]\n"
"ssubl v28.8h, v28.8b, v12.8b\n"
- "ldr x25, [x15, #0x98]\n"
+ "smlal v22.4s, v31.4h, v2.4h\n"
+ "smlal2 v21.4s, v31.8h, v2.8h\n"
+ "ldr x24, [x12, #0x98]\n"
+ "ldr d24, [x19, x15]\n"
"smlal v15.4s, v27.4h, v5.4h\n"
- "smlal2 v17.4s, v27.8h, v5.8h\n"
- "ssubl v29.8h, v29.8b, v12.8b\n"
- "ldr x24, [x15, #0x90]\n"
- "smlal v10.4s, v27.4h, v3.4h\n"
- "smlal2 v20.4s, v27.8h, v3.8h\n"
- "ldr d27, [x23, x17]\n"
+ "smlal2 v10.4s, v27.8h, v5.8h\n"
+ "ssubl v24.8h, v24.8b, v12.8b\n"
+ "ldr x23, [x12, #0x90]\n"
+ "smlal v9.4s, v27.4h, v3.4h\n"
+ "smlal2 v16.4s, v27.8h, v3.8h\n"
+ "ldr d27, [x22, x15]\n"
"ssubl v27.8h, v27.8b, v12.8b\n"
- "smlal v21.4s, v31.4h, v0.4h\n"
- "smlal v9.4s, v26.4h, v3.4h\n"
- "ldr x23, [x15, #0xa8]\n"
- "ldr x20, [x15, #0xa0]\n"
- "smlal2 v23.4s, v26.8h, v3.8h\n"
- "ldr d26, [x22, x17]\n"
- "smlal2 v22.4s, v31.8h, v0.8h\n"
- "ldr d24, [x21, x17]\n"
- "smlal v21.4s, v27.4h, v4.4h\n"
- "smlal v9.4s, v25.4h, v0.4h\n"
+ "smlal v23.4s, v31.4h, v0.4h\n"
+ "smlal v22.4s, v26.4h, v3.4h\n"
+ "ldr x22, [x12, #0xa8]\n"
+ "ldr x19, [x12, #0xa0]\n"
+ "smlal2 v21.4s, v26.8h, v3.8h\n"
+ "smlal2 v18.4s, v31.8h, v0.8h\n"
+ "ldr d26, [x20, x15]\n"
"ssubl v26.8h, v26.8b, v12.8b\n"
- "ldr x22, [x15, #0xb0]\n"
- "smlal2 v23.4s, v25.8h, v0.8h\n"
- "ldr q30, [x13, #0x10]\n"
- "smlal2 v22.4s, v27.8h, v4.8h\n"
- "ldr d27, [x20, x17]\n"
- "smlal v21.4s, v28.4h, v1.4h\n"
+ "smlal v23.4s, v27.4h, v4.4h\n"
+ "smlal v22.4s, v25.4h, v0.4h\n"
+ "ldr x21, [x12, #0xb0]\n"
+ "ldr x20, [x12, #0xb8]\n"
+ "smlal2 v21.4s, v25.8h, v0.8h\n"
+ "smlal2 v18.4s, v27.8h, v4.8h\n"
+ "ldr d27, [x19, x15]\n"
+ "ssubl v27.8h, v27.8b, v12.8b\n"
+ "smlal v23.4s, v28.4h, v1.4h\n"
"smlal v15.4s, v25.4h, v6.4h\n"
- "ssubl v24.8h, v24.8b, v12.8b\n"
- "ldr x21, [x15, #0xb8]\n"
- "smlal2 v17.4s, v25.8h, v6.8h\n"
- "ldr d25, [x24, x17]\n"
- "smlal v9.4s, v29.4h, v4.4h\n"
+ "ldr x19, [x12, #0xc0]\n"
+ "ldr q19, [x13, #0x0]\n"
+ "smlal2 v10.4s, v25.8h, v6.8h\n"
+ "smlal v22.4s, v29.4h, v4.4h\n"
+ "ldr d25, [x23, x15]\n"
"ssubl v25.8h, v25.8b, v12.8b\n"
- "smlal2 v23.4s, v29.8h, v4.8h\n"
- "ldr d29, [x25, x17]\n"
- "ldr q31, [x12, #0x10]\n"
- "smlal2 v22.4s, v28.8h, v1.8h\n"
- "smlal v21.4s, v26.4h, v5.4h\n"
+ "smlal2 v21.4s, v29.8h, v4.8h\n"
+ "ldr d29, [x24, x15]\n"
+ "smlal2 v18.4s, v28.8h, v1.8h\n"
"ssubl v29.8h, v29.8b, v12.8b\n"
+ "smlal v23.4s, v26.4h, v5.4h\n"
"smlal v15.4s, v24.4h, v7.4h\n"
- "ldr x20, [x15, #0xc0]\n"
- "smlal2 v17.4s, v24.8h, v7.8h\n"
- "smlal v9.4s, v24.4h, v1.4h\n"
- "ssubl v27.8h, v27.8b, v12.8b\n"
- "ldr x24, [%x[params], %[offsetof_Params_bias]]\n"
- "smlal2 v23.4s, v24.8h, v1.8h\n"
- "ldr d24, [x23, x17]\n"
- "smlal2 v22.4s, v26.8h, v5.8h\n"
- "ldr d26, [x22, x17]\n"
- "smlal v21.4s, v29.4h, v2.4h\n"
+ "ldr q0, [x11, #0x0]\n"
+ "ldr q4, [x13, #0x10]\n"
+ "smlal2 v10.4s, v24.8h, v7.8h\n"
+ "smlal v22.4s, v24.4h, v1.4h\n"
+ "sqrdmulh v15.4s, v15.4s, v19.4s\n"
+ "ldr q31, [x11, #0x10]\n"
+ "smlal2 v21.4s, v24.8h, v1.8h\n"
+ "ldr d24, [x22, x15]\n"
+ "smlal2 v18.4s, v26.8h, v5.8h\n"
"ssubl v24.8h, v24.8b, v12.8b\n"
- "smlal2 v22.4s, v29.8h, v2.8h\n"
- "add x14, x14, #0x48\n"
- "smlal v9.4s, v25.4h, v6.4h\n"
- "smlal v21.4s, v24.4h, v3.4h\n"
+ "smlal v23.4s, v29.4h, v2.4h\n"
+ "ldr d26, [x21, x15]\n"
+ "smlal2 v18.4s, v29.8h, v2.8h\n"
"ssubl v26.8h, v26.8b, v12.8b\n"
- "subs x8, x8, #0x1\n"
- "smlal v10.4s, v28.4h, v7.4h\n"
- "smlal2 v20.4s, v28.8h, v7.8h\n"
- "sqrdmulh v15.4s, v15.4s, v19.4s\n"
- "add x13, x13, #0x20\n"
- "smlal2 v23.4s, v25.8h, v6.8h\n"
- "ldr d25, [x21, x17]\n"
- "smlal2 v22.4s, v24.8h, v3.8h\n"
+ "smlal v22.4s, v25.4h, v6.4h\n"
+ "smlal v23.4s, v24.4h, v3.4h\n"
+ "and v30.16b, v15.16b, v0.16b\n"
+ "add x17, x17, #0x48\n"
+ "smlal v9.4s, v28.4h, v7.4h\n"
+ "smlal2 v16.4s, v28.8h, v7.8h\n"
+ "sqrdmulh v10.4s, v10.4s, v4.4s\n"
+ "subs x16, x16, #0x1\n"
+ "smlal2 v21.4s, v25.8h, v6.8h\n"
+ "ldr d25, [x20, x15]\n"
+ "smlal2 v18.4s, v24.8h, v3.8h\n"
"ssubl v25.8h, v25.8b, v12.8b\n"
- "smlal v9.4s, v27.4h, v7.4h\n"
- "smlal v21.4s, v26.4h, v7.4h\n"
- "and v0.16b, v15.16b, v18.16b\n"
- "add x12, x12, #0x20\n"
- "smlal v10.4s, v29.4h, v8.4h\n"
- "smlal2 v20.4s, v29.8h, v8.8h\n"
- "ldr d29, [x20, x17]\n"
+ "smlal v22.4s, v27.4h, v7.4h\n"
+ "smlal v23.4s, v26.4h, v7.4h\n"
+ "sshr v30.4s, v30.4s, #0x1f\n"
+ "add x13, x13, #0x20\n"
+ "smlal v9.4s, v29.4h, v8.4h\n"
+ "smlal2 v16.4s, v29.8h, v8.8h\n"
+ "ldr d29, [x19, x15]\n"
"ssubl v29.8h, v29.8b, v12.8b\n"
- "smlal2 v23.4s, v27.8h, v7.8h\n"
- "smlal2 v22.4s, v26.8h, v7.8h\n"
- "sqrdmulh v17.4s, v17.4s, v30.4s\n"
- "add x17, x17, #0x8\n"
- "smlal v9.4s, v24.4h, v5.4h\n"
- "smlal v21.4s, v25.4h, v6.4h\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "smlal2 v23.4s, v24.8h, v5.8h\n"
- "smlal2 v22.4s, v25.8h, v6.8h\n"
- "and v7.16b, v17.16b, v31.16b\n"
- "smlal v9.4s, v25.4h, v8.4h\n"
- "smlal v21.4s, v29.4h, v8.4h\n"
- "sqrdmulh v10.4s, v10.4s, v19.4s\n"
- "smlal2 v23.4s, v25.8h, v8.8h\n"
- "smlal2 v22.4s, v29.8h, v8.8h\n"
+ "smlal2 v21.4s, v27.8h, v7.8h\n"
+ "smlal2 v18.4s, v26.8h, v7.8h\n"
"sqrdmulh v9.4s, v9.4s, v19.4s\n"
- "sqrdmulh v21.4s, v21.4s, v19.4s\n"
- "sqadd v15.4s, v15.4s, v0.4s\n"
- "sshr v7.4s, v7.4s, #0x1f\n"
- "and v19.16b, v10.16b, v18.16b\n"
- "sqrdmulh v20.4s, v20.4s, v30.4s\n"
- "and v27.16b, v9.16b, v18.16b\n"
- "sqrdmulh v23.4s, v23.4s, v30.4s\n"
- "and v0.16b, v21.16b, v18.16b\n"
- "sqrdmulh v22.4s, v22.4s, v30.4s\n"
- "sqadd v17.4s, v17.4s, v7.4s\n"
+ "add x15, x15, #0x8\n"
+ "smlal v22.4s, v24.4h, v5.4h\n"
+ "smlal v23.4s, v25.4h, v6.4h\n"
+ "and v28.16b, v9.16b, v0.16b\n"
+ "add x11, x11, #0x20\n"
+ "smlal2 v21.4s, v24.8h, v5.8h\n"
+ "smlal2 v18.4s, v25.8h, v6.8h\n"
+ "sqrdmulh v16.4s, v16.4s, v4.4s\n"
+ "smlal v22.4s, v25.4h, v8.4h\n"
+ "smlal v23.4s, v29.4h, v8.4h\n"
+ "sqrdmulh v22.4s, v22.4s, v19.4s\n"
+ "smlal2 v21.4s, v25.8h, v8.8h\n"
+ "smlal2 v18.4s, v29.8h, v8.8h\n"
+ "sqrdmulh v23.4s, v23.4s, v19.4s\n"
+ "and v29.16b, v22.16b, v0.16b\n"
+ "sqrdmulh v21.4s, v21.4s, v4.4s\n"
+ "and v20.16b, v23.16b, v0.16b\n"
+ "sqrdmulh v18.4s, v18.4s, v4.4s\n"
+ "and v19.16b, v10.16b, v31.16b\n"
+ "sshr v28.4s, v28.4s, #0x1f\n"
+ "and v4.16b, v16.16b, v31.16b\n"
+ "sshr v29.4s, v29.4s, #0x1f\n"
+ "and v5.16b, v21.16b, v31.16b\n"
+ "sshr v20.4s, v20.4s, #0x1f\n"
+ "and v26.16b, v18.16b, v31.16b\n"
+ "sqadd v15.4s, v15.4s, v30.4s\n"
"sshr v19.4s, v19.4s, #0x1f\n"
- "and v5.16b, v20.16b, v31.16b\n"
- "sshr v27.4s, v27.4s, #0x1f\n"
- "and v4.16b, v23.16b, v31.16b\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "and v7.16b, v22.16b, v31.16b\n"
- "sqadd v10.4s, v10.4s, v19.4s\n"
- "sshr v5.4s, v5.4s, #0x1f\n"
- "sqadd v9.4s, v9.4s, v27.4s\n"
+ "sqadd v9.4s, v9.4s, v28.4s\n"
"sshr v4.4s, v4.4s, #0x1f\n"
- "sqadd v21.4s, v21.4s, v0.4s\n"
- "sshr v7.4s, v7.4s, #0x1f\n"
- "srshl v15.4s, v15.4s, v18.4s\n"
- "srshl v10.4s, v10.4s, v18.4s\n"
- "sqadd v20.4s, v20.4s, v5.4s\n"
- "srshl v9.4s, v9.4s, v18.4s\n"
- "sqadd v23.4s, v23.4s, v4.4s\n"
- "srshl v21.4s, v21.4s, v18.4s\n"
- "sqadd v22.4s, v22.4s, v7.4s\n"
- "srshl v17.4s, v17.4s, v31.4s\n"
+ "sqadd v22.4s, v22.4s, v29.4s\n"
+ "sshr v5.4s, v5.4s, #0x1f\n"
+ "sqadd v23.4s, v23.4s, v20.4s\n"
+ "sshr v26.4s, v26.4s, #0x1f\n"
+ "srshl v15.4s, v15.4s, v0.4s\n"
+ "sqadd v10.4s, v10.4s, v19.4s\n"
+ "srshl v9.4s, v9.4s, v0.4s\n"
+ "sqadd v16.4s, v16.4s, v4.4s\n"
+ "srshl v22.4s, v22.4s, v0.4s\n"
+ "sqadd v21.4s, v21.4s, v5.4s\n"
+ "srshl v23.4s, v23.4s, v0.4s\n"
+ "sqadd v18.4s, v18.4s, v26.4s\n"
+ "srshl v10.4s, v10.4s, v31.4s\n"
"sqxtn v15.4h, v15.4s\n"
- "srshl v20.4s, v20.4s, v31.4s\n"
- "sqxtn v10.4h, v10.4s\n"
- "srshl v23.4s, v23.4s, v31.4s\n"
+ "srshl v16.4s, v16.4s, v31.4s\n"
"sqxtn v9.4h, v9.4s\n"
- "srshl v22.4s, v22.4s, v31.4s\n"
- "sqxtn v21.4h, v21.4s\n"
- "sqxtn2 v15.8h, v17.4s\n"
- "sqxtn2 v10.8h, v20.4s\n"
- "sqxtn2 v9.8h, v23.4s\n"
- "sqxtn2 v21.8h, v22.4s\n"
+ "srshl v21.4s, v21.4s, v31.4s\n"
+ "sqxtn v22.4h, v22.4s\n"
+ "srshl v18.4s, v18.4s, v31.4s\n"
+ "sqxtn v23.4h, v23.4s\n"
+ "sqxtn2 v15.8h, v10.4s\n"
+ "sqxtn2 v9.8h, v16.4s\n"
+ "sqxtn2 v22.8h, v21.4s\n"
+ "sqxtn2 v23.8h, v18.4s\n"
"sqadd v15.8h, v15.8h, v11.8h\n"
- "sqadd v10.8h, v10.8h, v11.8h\n"
"sqadd v9.8h, v9.8h, v11.8h\n"
- "sqadd v21.8h, v21.8h, v11.8h\n"
- "smax v15.8h, v15.8h, v16.8h\n"
- "smax v10.8h, v10.8h, v16.8h\n"
- "smax v9.8h, v9.8h, v16.8h\n"
- "smax v21.8h, v21.8h, v16.8h\n"
+ "sqadd v22.8h, v22.8h, v11.8h\n"
+ "sqadd v23.8h, v23.8h, v11.8h\n"
+ "smax v15.8h, v15.8h, v17.8h\n"
+ "smax v9.8h, v9.8h, v17.8h\n"
+ "smax v22.8h, v22.8h, v17.8h\n"
+ "smax v23.8h, v23.8h, v17.8h\n"
"smin v15.8h, v15.8h, v14.8h\n"
- "smin v10.8h, v10.8h, v14.8h\n"
"smin v9.8h, v9.8h, v14.8h\n"
- "smin v21.8h, v21.8h, v14.8h\n"
+ "smin v22.8h, v22.8h, v14.8h\n"
+ "smin v23.8h, v23.8h, v14.8h\n"
"uzp1 v15.16b, v15.16b, v15.16b\n"
- "str d15, [x11, x16]\n"
- "uzp1 v10.16b, v10.16b, v10.16b\n"
+ "str d15, [x10, x14]\n"
"uzp1 v9.16b, v9.16b, v9.16b\n"
- "str d10, [x10, x16]\n"
- "uzp1 v21.16b, v21.16b, v21.16b\n"
- "str d9, [x9, x16]\n"
- "str d21, [x28, x16]\n"
- "ldr q15, [x24, #0x0]\n"
- "ldr q17, [x24, #0x10]\n"
- "add x24, x24, #0x20\n"
- "ldr d0, [x14, #0x0]\n"
- "ldr d1, [x14, #0x8]\n"
- "add x16, x16, #0x8\n"
- "str x24, [%x[params], %[offsetof_Params_bias]]\n"
- "ldr d2, [x14, #0x10]\n"
- "ldr d3, [x14, #0x18]\n"
- "mov v10.16b, v15.16b\n"
- "mov v20.16b, v17.16b\n"
- "ldr d4, [x14, #0x20]\n"
- "ldr d5, [x14, #0x28]\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "str d9, [x9, x14]\n"
+ "uzp1 v23.16b, v23.16b, v23.16b\n"
+ "str d22, [x28, x14]\n"
+ "str d23, [x27, x14]\n"
+ "ldr x19, [%x[params], %[offsetof_Params_bias]]\n"
+ "ldr q15, [x19, #0x0]\n"
+ "add x14, x14, #0x8\n"
+ "ldr q10, [x19, #0x10]\n"
+ "add x19, x19, #0x20\n"
+ "str x19, [%x[params], %[offsetof_Params_bias]]\n"
+ "ldr d0, [x17, #0x0]\n"
+ "ldr d1, [x17, #0x8]\n"
+ "ldr d2, [x17, #0x10]\n"
"mov v9.16b, v15.16b\n"
- "mov v23.16b, v17.16b\n"
- "ldr d6, [x14, #0x30]\n"
- "ldr d7, [x14, #0x38]\n"
- "mov v21.16b, v15.16b\n"
- "mov v22.16b, v17.16b\n"
- "ldr d8, [x14, #0x40]\n"
- "ldp x27, x26, [x15, #0x0]\n"
+ "mov v16.16b, v10.16b\n"
+ "ldr d3, [x17, #0x18]\n"
+ "ldr d4, [x17, #0x20]\n"
+ "mov v22.16b, v15.16b\n"
+ "mov v21.16b, v10.16b\n"
+ "ldr d5, [x17, #0x28]\n"
+ "ldr d6, [x17, #0x30]\n"
+ "mov v23.16b, v15.16b\n"
+ "mov v18.16b, v10.16b\n"
+ "ldr d7, [x17, #0x38]\n"
+ "ldr d8, [x17, #0x40]\n"
"ssubl v0.8h, v0.8b, v13.8b\n"
"ssubl v1.8h, v1.8b, v13.8b\n"
- "ldp x25, x24, [x15, #0x10]\n"
- "ldp x23, x22, [x15, #0x20]\n"
+ "ldp x26, x25, [x12, #0x0]\n"
+ "ldp x24, x23, [x12, #0x10]\n"
"ssubl v2.8h, v2.8b, v13.8b\n"
"ssubl v3.8h, v3.8b, v13.8b\n"
- "ldp x21, x20, [x15, #0x30]\n"
- "ldr d31, [x27, x17]\n"
+ "ldp x22, x21, [x12, #0x20]\n"
+ "ldp x20, x19, [x12, #0x30]\n"
"ssubl v4.8h, v4.8b, v13.8b\n"
"ssubl v5.8h, v5.8b, v13.8b\n"
- "ldr d30, [x26, x17]\n"
- "ldr d29, [x25, x17]\n"
+ "ldr d31, [x26, x15]\n"
+ "ldr d30, [x25, x15]\n"
"ssubl v6.8h, v6.8b, v13.8b\n"
"ssubl v7.8h, v7.8b, v13.8b\n"
- "ldr d28, [x24, x17]\n"
- "ldr d27, [x23, x17]\n"
+ "ldr d29, [x24, x15]\n"
+ "ldr d28, [x23, x15]\n"
"ssubl v8.8h, v8.8b, v13.8b\n"
"ssubl v31.8h, v31.8b, v12.8b\n"
- "ldr d26, [x22, x17]\n"
- "ldr d25, [x21, x17]\n"
+ "ldr d27, [x22, x15]\n"
+ "ldr d26, [x21, x15]\n"
"ssubl v30.8h, v30.8b, v12.8b\n"
"ssubl v29.8h, v29.8b, v12.8b\n"
- "ldr d24, [x20, x17]\n"
+ "ldr d25, [x20, x15]\n"
+ "ldr d24, [x19, x15]\n"
"ssubl v28.8h, v28.8b, v12.8b\n"
"ssubl v27.8h, v27.8b, v12.8b\n"
"ssubl v26.8h, v26.8b, v12.8b\n"
@@ -428,966 +428,966 @@ void a64_s8q_nhwc_3x3_s2_output2x2_mla_depthfirst_impl(
"bgt 1b\n"
"2:" // Tail
"smlal v15.4s, v31.4h, v8.4h\n"
- "smlal2 v17.4s, v31.8h, v8.8h\n"
- "ldr x24, [x15, #0x40]\n"
- "ldr x22, [x15, #0x48]\n"
- "smlal v10.4s, v31.4h, v6.4h\n"
- "smlal2 v20.4s, v31.8h, v6.8h\n"
- "ldr x21, [x15, #0x50]\n"
- "ldr x20, [x15, #0x58]\n"
+ "smlal2 v10.4s, v31.8h, v8.8h\n"
+ "ldr x24, [x12, #0x40]\n"
+ "ldr x23, [x12, #0x48]\n"
+ "smlal v9.4s, v31.4h, v6.4h\n"
+ "smlal2 v16.4s, v31.8h, v6.8h\n"
+ "ldr x21, [x12, #0x50]\n"
+ "ldr x19, [x12, #0x58]\n"
"smlal v15.4s, v30.4h, v0.4h\n"
- "smlal2 v17.4s, v30.8h, v0.8h\n"
- "ldr q19, [x13, #0x0]\n"
- "ldr x23, [x15, #0x78]\n"
- "smlal v10.4s, v28.4h, v1.4h\n"
- "smlal2 v20.4s, v28.8h, v1.8h\n"
- "ldr d28, [x22, x17]\n"
+ "smlal2 v10.4s, v30.8h, v0.8h\n"
+ "ldr x22, [x12, #0x78]\n"
+ "ldr x20, [x12, #0x60]\n"
+ "smlal v9.4s, v28.4h, v1.4h\n"
+ "smlal2 v16.4s, v28.8h, v1.8h\n"
+ "ldr d28, [x23, x15]\n"
"ssubl v28.8h, v28.8b, v12.8b\n"
"smlal v15.4s, v29.4h, v1.4h\n"
- "smlal2 v17.4s, v29.8h, v1.8h\n"
- "ldr d29, [x24, x17]\n"
+ "smlal2 v10.4s, v29.8h, v1.8h\n"
+ "ldr d29, [x24, x15]\n"
"ssubl v29.8h, v29.8b, v12.8b\n"
- "smlal v10.4s, v27.4h, v2.4h\n"
- "smlal2 v20.4s, v27.8h, v2.8h\n"
- "ldr d27, [x21, x17]\n"
+ "smlal v9.4s, v27.4h, v2.4h\n"
+ "smlal2 v16.4s, v27.8h, v2.8h\n"
+ "ldr d27, [x21, x15]\n"
"ssubl v27.8h, v27.8b, v12.8b\n"
"smlal v15.4s, v26.4h, v3.4h\n"
- "smlal2 v17.4s, v26.8h, v3.8h\n"
- "ldr d26, [x20, x17]\n"
- "ldr x20, [x15, #0x60]\n"
- "smlal v10.4s, v24.4h, v0.4h\n"
- "smlal2 v20.4s, v24.8h, v0.8h\n"
+ "smlal2 v10.4s, v26.8h, v3.8h\n"
+ "ldr d26, [x19, x15]\n"
"ssubl v26.8h, v26.8b, v12.8b\n"
- "ldr x21, [x15, #0x80]\n"
+ "smlal v9.4s, v24.4h, v0.4h\n"
+ "smlal2 v16.4s, v24.8h, v0.8h\n"
+ "ldr x21, [x12, #0x80]\n"
+ "ldr x19, [x12, #0x68]\n"
"smlal v15.4s, v25.4h, v4.4h\n"
- "smlal2 v17.4s, v25.8h, v4.8h\n"
- "ldr d25, [x20, x17]\n"
- "ldr x20, [x15, #0x68]\n"
- "smlal v10.4s, v29.4h, v4.4h\n"
- "smlal2 v20.4s, v29.8h, v4.8h\n"
- "ldr d29, [x20, x17]\n"
+ "smlal2 v10.4s, v25.8h, v4.8h\n"
+ "ldr d25, [x20, x15]\n"
"ssubl v25.8h, v25.8b, v12.8b\n"
+ "smlal v9.4s, v29.4h, v4.4h\n"
+ "smlal2 v16.4s, v29.8h, v4.8h\n"
+ "ldr x20, [x12, #0x88]\n"
+ "ldr d29, [x19, x15]\n"
"smlal v15.4s, v24.4h, v2.4h\n"
- "smlal2 v17.4s, v24.8h, v2.8h\n"
- "ldr q18, [x12, #0x0]\n"
- "ldr x22, [x15, #0x88]\n"
- "smlal v10.4s, v28.4h, v5.4h\n"
- "smlal2 v20.4s, v28.8h, v5.8h\n"
- "ldr d28, [x21, x17]\n"
- "ldr x21, [x15, #0x70]\n"
- "smlal v9.4s, v31.4h, v2.4h\n"
- "smlal2 v23.4s, v31.8h, v2.8h\n"
+ "smlal2 v10.4s, v24.8h, v2.8h\n"
+ "ldr x19, [x12, #0x70]\n"
+ "ssubl v29.8h, v29.8b, v12.8b\n"
+ "smlal v9.4s, v28.4h, v5.4h\n"
+ "smlal2 v16.4s, v28.8h, v5.8h\n"
+ "ldr d28, [x21, x15]\n"
"ssubl v28.8h, v28.8b, v12.8b\n"
- "ldr x25, [x15, #0x98]\n"
+ "smlal v22.4s, v31.4h, v2.4h\n"
+ "smlal2 v21.4s, v31.8h, v2.8h\n"
+ "ldr x24, [x12, #0x98]\n"
+ "ldr d24, [x19, x15]\n"
"smlal v15.4s, v27.4h, v5.4h\n"
- "smlal2 v17.4s, v27.8h, v5.8h\n"
- "ssubl v29.8h, v29.8b, v12.8b\n"
- "ldr x24, [x15, #0x90]\n"
- "smlal v10.4s, v27.4h, v3.4h\n"
- "smlal2 v20.4s, v27.8h, v3.8h\n"
- "ldr d27, [x23, x17]\n"
+ "smlal2 v10.4s, v27.8h, v5.8h\n"
+ "ssubl v24.8h, v24.8b, v12.8b\n"
+ "ldr x23, [x12, #0x90]\n"
+ "smlal v9.4s, v27.4h, v3.4h\n"
+ "smlal2 v16.4s, v27.8h, v3.8h\n"
+ "ldr d27, [x22, x15]\n"
"ssubl v27.8h, v27.8b, v12.8b\n"
- "smlal v21.4s, v31.4h, v0.4h\n"
- "smlal v9.4s, v26.4h, v3.4h\n"
- "ldr x23, [x15, #0xa8]\n"
- "ldr x20, [x15, #0xa0]\n"
- "smlal2 v23.4s, v26.8h, v3.8h\n"
- "ldr d26, [x22, x17]\n"
- "smlal2 v22.4s, v31.8h, v0.8h\n"
- "ldr d24, [x21, x17]\n"
- "smlal v21.4s, v27.4h, v4.4h\n"
- "smlal v9.4s, v25.4h, v0.4h\n"
+ "smlal v23.4s, v31.4h, v0.4h\n"
+ "smlal v22.4s, v26.4h, v3.4h\n"
+ "ldr x22, [x12, #0xa8]\n"
+ "ldr x19, [x12, #0xa0]\n"
+ "smlal2 v21.4s, v26.8h, v3.8h\n"
+ "smlal2 v18.4s, v31.8h, v0.8h\n"
+ "ldr d26, [x20, x15]\n"
"ssubl v26.8h, v26.8b, v12.8b\n"
- "ldr x22, [x15, #0xb0]\n"
- "smlal2 v23.4s, v25.8h, v0.8h\n"
- "ldr q30, [x13, #0x10]\n"
- "smlal2 v22.4s, v27.8h, v4.8h\n"
- "ldr d27, [x20, x17]\n"
- "smlal v21.4s, v28.4h, v1.4h\n"
+ "smlal v23.4s, v27.4h, v4.4h\n"
+ "smlal v22.4s, v25.4h, v0.4h\n"
+ "ldr x21, [x12, #0xb0]\n"
+ "ldr x20, [x12, #0xb8]\n"
+ "smlal2 v21.4s, v25.8h, v0.8h\n"
+ "smlal2 v18.4s, v27.8h, v4.8h\n"
+ "ldr d27, [x19, x15]\n"
+ "ssubl v27.8h, v27.8b, v12.8b\n"
+ "smlal v23.4s, v28.4h, v1.4h\n"
"smlal v15.4s, v25.4h, v6.4h\n"
- "ssubl v24.8h, v24.8b, v12.8b\n"
- "ldr x21, [x15, #0xb8]\n"
- "smlal2 v17.4s, v25.8h, v6.8h\n"
- "ldr d25, [x24, x17]\n"
- "smlal v9.4s, v29.4h, v4.4h\n"
+ "ldr x19, [x12, #0xc0]\n"
+ "ldr q19, [x13, #0x0]\n"
+ "smlal2 v10.4s, v25.8h, v6.8h\n"
+ "smlal v22.4s, v29.4h, v4.4h\n"
+ "ldr d25, [x23, x15]\n"
"ssubl v25.8h, v25.8b, v12.8b\n"
- "smlal2 v23.4s, v29.8h, v4.8h\n"
- "ldr d29, [x25, x17]\n"
- "ldr q31, [x12, #0x10]\n"
- "smlal2 v22.4s, v28.8h, v1.8h\n"
- "smlal v21.4s, v26.4h, v5.4h\n"
+ "smlal2 v21.4s, v29.8h, v4.8h\n"
+ "ldr d29, [x24, x15]\n"
+ "smlal2 v18.4s, v28.8h, v1.8h\n"
"ssubl v29.8h, v29.8b, v12.8b\n"
+ "smlal v23.4s, v26.4h, v5.4h\n"
"smlal v15.4s, v24.4h, v7.4h\n"
- "ldr x20, [x15, #0xc0]\n"
- "smlal2 v17.4s, v24.8h, v7.8h\n"
- "smlal v9.4s, v24.4h, v1.4h\n"
- "ssubl v27.8h, v27.8b, v12.8b\n"
- "tst x7, #0x7\n"
- "smlal2 v23.4s, v24.8h, v1.8h\n"
- "ldr d24, [x23, x17]\n"
- "smlal2 v22.4s, v26.8h, v5.8h\n"
- "ldr d26, [x22, x17]\n"
- "smlal v21.4s, v29.4h, v2.4h\n"
+ "ldr q0, [x11, #0x0]\n"
+ "ldr q4, [x13, #0x10]\n"
+ "smlal2 v10.4s, v24.8h, v7.8h\n"
+ "smlal v22.4s, v24.4h, v1.4h\n"
+ "sqrdmulh v15.4s, v15.4s, v19.4s\n"
+ "ldr q31, [x11, #0x10]\n"
+ "smlal2 v21.4s, v24.8h, v1.8h\n"
+ "ldr d24, [x22, x15]\n"
+ "smlal2 v18.4s, v26.8h, v5.8h\n"
"ssubl v24.8h, v24.8b, v12.8b\n"
- "smlal2 v22.4s, v29.8h, v2.8h\n"
- "add x13, x13, #0x20\n"
- "smlal v9.4s, v25.4h, v6.4h\n"
- "smlal v21.4s, v24.4h, v3.4h\n"
+ "smlal v23.4s, v29.4h, v2.4h\n"
+ "ldr d26, [x21, x15]\n"
+ "smlal2 v18.4s, v29.8h, v2.8h\n"
"ssubl v26.8h, v26.8b, v12.8b\n"
- "add x12, x12, #0x20\n"
- "smlal v10.4s, v28.4h, v7.4h\n"
- "smlal2 v20.4s, v28.8h, v7.8h\n"
- "sqrdmulh v15.4s, v15.4s, v19.4s\n"
- "smlal2 v23.4s, v25.8h, v6.8h\n"
- "ldr d25, [x21, x17]\n"
- "smlal2 v22.4s, v24.8h, v3.8h\n"
+ "smlal v22.4s, v25.4h, v6.4h\n"
+ "smlal v23.4s, v24.4h, v3.4h\n"
+ "and v30.16b, v15.16b, v0.16b\n"
+ "tst x8, #0x7\n"
+ "smlal v9.4s, v28.4h, v7.4h\n"
+ "smlal2 v16.4s, v28.8h, v7.8h\n"
+ "sqrdmulh v10.4s, v10.4s, v4.4s\n"
+ "add x13, x13, #0x20\n"
+ "smlal2 v21.4s, v25.8h, v6.8h\n"
+ "ldr d25, [x20, x15]\n"
+ "smlal2 v18.4s, v24.8h, v3.8h\n"
"ssubl v25.8h, v25.8b, v12.8b\n"
- "smlal v9.4s, v27.4h, v7.4h\n"
- "smlal v21.4s, v26.4h, v7.4h\n"
- "and v0.16b, v15.16b, v18.16b\n"
- "smlal v10.4s, v29.4h, v8.4h\n"
- "smlal2 v20.4s, v29.8h, v8.8h\n"
- "ldr d29, [x20, x17]\n"
+ "smlal v22.4s, v27.4h, v7.4h\n"
+ "smlal v23.4s, v26.4h, v7.4h\n"
+ "sshr v30.4s, v30.4s, #0x1f\n"
+ "add x11, x11, #0x20\n"
+ "smlal v9.4s, v29.4h, v8.4h\n"
+ "smlal2 v16.4s, v29.8h, v8.8h\n"
+ "ldr d29, [x19, x15]\n"
"ssubl v29.8h, v29.8b, v12.8b\n"
- "smlal2 v23.4s, v27.8h, v7.8h\n"
- "smlal2 v22.4s, v26.8h, v7.8h\n"
- "sqrdmulh v17.4s, v17.4s, v30.4s\n"
- "add x17, x17, #0x8\n"
- "smlal v9.4s, v24.4h, v5.4h\n"
- "smlal v21.4s, v25.4h, v6.4h\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "smlal2 v23.4s, v24.8h, v5.8h\n"
- "smlal2 v22.4s, v25.8h, v6.8h\n"
- "and v7.16b, v17.16b, v31.16b\n"
- "smlal v9.4s, v25.4h, v8.4h\n"
- "smlal v21.4s, v29.4h, v8.4h\n"
- "sqrdmulh v10.4s, v10.4s, v19.4s\n"
- "smlal2 v23.4s, v25.8h, v8.8h\n"
- "smlal2 v22.4s, v29.8h, v8.8h\n"
+ "smlal2 v21.4s, v27.8h, v7.8h\n"
+ "smlal2 v18.4s, v26.8h, v7.8h\n"
"sqrdmulh v9.4s, v9.4s, v19.4s\n"
- "sqrdmulh v21.4s, v21.4s, v19.4s\n"
- "sqadd v15.4s, v15.4s, v0.4s\n"
- "sshr v7.4s, v7.4s, #0x1f\n"
- "and v19.16b, v10.16b, v18.16b\n"
- "sqrdmulh v20.4s, v20.4s, v30.4s\n"
- "and v27.16b, v9.16b, v18.16b\n"
- "sqrdmulh v23.4s, v23.4s, v30.4s\n"
- "and v0.16b, v21.16b, v18.16b\n"
- "sqrdmulh v22.4s, v22.4s, v30.4s\n"
- "sqadd v17.4s, v17.4s, v7.4s\n"
+ "add x15, x15, #0x8\n"
+ "smlal v22.4s, v24.4h, v5.4h\n"
+ "smlal v23.4s, v25.4h, v6.4h\n"
+ "and v28.16b, v9.16b, v0.16b\n"
+ "smlal2 v21.4s, v24.8h, v5.8h\n"
+ "smlal2 v18.4s, v25.8h, v6.8h\n"
+ "sqrdmulh v16.4s, v16.4s, v4.4s\n"
+ "smlal v22.4s, v25.4h, v8.4h\n"
+ "smlal v23.4s, v29.4h, v8.4h\n"
+ "sqrdmulh v22.4s, v22.4s, v19.4s\n"
+ "smlal2 v21.4s, v25.8h, v8.8h\n"
+ "smlal2 v18.4s, v29.8h, v8.8h\n"
+ "sqrdmulh v23.4s, v23.4s, v19.4s\n"
+ "and v29.16b, v22.16b, v0.16b\n"
+ "sqrdmulh v21.4s, v21.4s, v4.4s\n"
+ "and v20.16b, v23.16b, v0.16b\n"
+ "sqrdmulh v18.4s, v18.4s, v4.4s\n"
+ "and v19.16b, v10.16b, v31.16b\n"
+ "sshr v28.4s, v28.4s, #0x1f\n"
+ "and v4.16b, v16.16b, v31.16b\n"
+ "sshr v29.4s, v29.4s, #0x1f\n"
+ "and v5.16b, v21.16b, v31.16b\n"
+ "sshr v20.4s, v20.4s, #0x1f\n"
+ "and v26.16b, v18.16b, v31.16b\n"
+ "sqadd v15.4s, v15.4s, v30.4s\n"
"sshr v19.4s, v19.4s, #0x1f\n"
- "and v5.16b, v20.16b, v31.16b\n"
- "sshr v27.4s, v27.4s, #0x1f\n"
- "and v4.16b, v23.16b, v31.16b\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "and v7.16b, v22.16b, v31.16b\n"
- "sqadd v10.4s, v10.4s, v19.4s\n"
- "sshr v5.4s, v5.4s, #0x1f\n"
- "sqadd v9.4s, v9.4s, v27.4s\n"
+ "sqadd v9.4s, v9.4s, v28.4s\n"
"sshr v4.4s, v4.4s, #0x1f\n"
- "sqadd v21.4s, v21.4s, v0.4s\n"
- "sshr v7.4s, v7.4s, #0x1f\n"
- "srshl v15.4s, v15.4s, v18.4s\n"
- "srshl v10.4s, v10.4s, v18.4s\n"
- "sqadd v20.4s, v20.4s, v5.4s\n"
- "srshl v9.4s, v9.4s, v18.4s\n"
- "sqadd v23.4s, v23.4s, v4.4s\n"
- "srshl v21.4s, v21.4s, v18.4s\n"
- "sqadd v22.4s, v22.4s, v7.4s\n"
- "srshl v17.4s, v17.4s, v31.4s\n"
+ "sqadd v22.4s, v22.4s, v29.4s\n"
+ "sshr v5.4s, v5.4s, #0x1f\n"
+ "sqadd v23.4s, v23.4s, v20.4s\n"
+ "sshr v26.4s, v26.4s, #0x1f\n"
+ "srshl v15.4s, v15.4s, v0.4s\n"
+ "sqadd v10.4s, v10.4s, v19.4s\n"
+ "srshl v9.4s, v9.4s, v0.4s\n"
+ "sqadd v16.4s, v16.4s, v4.4s\n"
+ "srshl v22.4s, v22.4s, v0.4s\n"
+ "sqadd v21.4s, v21.4s, v5.4s\n"
+ "srshl v23.4s, v23.4s, v0.4s\n"
+ "sqadd v18.4s, v18.4s, v26.4s\n"
+ "srshl v10.4s, v10.4s, v31.4s\n"
"sqxtn v15.4h, v15.4s\n"
- "srshl v20.4s, v20.4s, v31.4s\n"
- "sqxtn v10.4h, v10.4s\n"
- "srshl v23.4s, v23.4s, v31.4s\n"
+ "srshl v16.4s, v16.4s, v31.4s\n"
"sqxtn v9.4h, v9.4s\n"
- "srshl v22.4s, v22.4s, v31.4s\n"
- "sqxtn v21.4h, v21.4s\n"
- "sqxtn2 v15.8h, v17.4s\n"
- "sqxtn2 v10.8h, v20.4s\n"
- "sqxtn2 v9.8h, v23.4s\n"
- "sqxtn2 v21.8h, v22.4s\n"
+ "srshl v21.4s, v21.4s, v31.4s\n"
+ "sqxtn v22.4h, v22.4s\n"
+ "srshl v18.4s, v18.4s, v31.4s\n"
+ "sqxtn v23.4h, v23.4s\n"
+ "sqxtn2 v15.8h, v10.4s\n"
+ "sqxtn2 v9.8h, v16.4s\n"
+ "sqxtn2 v22.8h, v21.4s\n"
+ "sqxtn2 v23.8h, v18.4s\n"
"sqadd v15.8h, v15.8h, v11.8h\n"
- "sqadd v10.8h, v10.8h, v11.8h\n"
"sqadd v9.8h, v9.8h, v11.8h\n"
- "sqadd v21.8h, v21.8h, v11.8h\n"
- "smax v15.8h, v15.8h, v16.8h\n"
- "smax v10.8h, v10.8h, v16.8h\n"
- "smax v9.8h, v9.8h, v16.8h\n"
- "smax v21.8h, v21.8h, v16.8h\n"
+ "sqadd v22.8h, v22.8h, v11.8h\n"
+ "sqadd v23.8h, v23.8h, v11.8h\n"
+ "smax v15.8h, v15.8h, v17.8h\n"
+ "smax v9.8h, v9.8h, v17.8h\n"
+ "smax v22.8h, v22.8h, v17.8h\n"
+ "smax v23.8h, v23.8h, v17.8h\n"
"smin v15.8h, v15.8h, v14.8h\n"
- "smin v10.8h, v10.8h, v14.8h\n"
"smin v9.8h, v9.8h, v14.8h\n"
- "smin v21.8h, v21.8h, v14.8h\n"
+ "smin v22.8h, v22.8h, v14.8h\n"
+ "smin v23.8h, v23.8h, v14.8h\n"
"uzp1 v15.16b, v15.16b, v15.16b\n"
- "str d15, [x11, x16]\n"
- "uzp1 v10.16b, v10.16b, v10.16b\n"
+ "str d15, [x10, x14]\n"
"uzp1 v9.16b, v9.16b, v9.16b\n"
- "str d10, [x10, x16]\n"
- "uzp1 v21.16b, v21.16b, v21.16b\n"
- "str d9, [x9, x16]\n"
- "str d21, [x28, x16]\n"
- "add x16, x16, #0x8\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "str d9, [x9, x14]\n"
+ "uzp1 v23.16b, v23.16b, v23.16b\n"
+ "str d22, [x28, x14]\n"
+ "str d23, [x27, x14]\n"
+ "add x14, x14, #0x8\n"
"beq 88f\n"
- "add x14, x14, #0x48\n"
+ "add x17, x17, #0x48\n"
"3:" // Oddments
- "ldr x24, [%x[params], %[offsetof_Params_bias]]\n"
- "tbz x7, #2, 5f\n"
- "ld1 { v15.4s }, [x24], #0x10\n"
- "tbz x7, #1, 4f\n"
- "ld1 { v17.d }[0], [x24], #0x8\n"
- "tbz x7, #0, 7f\n"
- "ld1 { v17.s }[2], [x24]\n"
+ "ldr x19, [%x[params], %[offsetof_Params_bias]]\n"
+ "tbz x8, #2, 5f\n"
+ "ld1 { v15.4s }, [x19], #0x10\n"
+ "tbz x8, #1, 4f\n"
+ "ld1 { v10.d }[0], [x19], #0x8\n"
+ "tbz x8, #0, 7f\n"
+ "ld1 { v10.s }[2], [x19]\n"
"b 7f\n"
"4:" // Oddments: Load bias: Bit 2: Bit 1: Unset
- "tbz x7, #0, 7f\n"
- "ld1 { v17.s }[0], [x24]\n"
+ "tbz x8, #0, 7f\n"
+ "ld1 { v10.s }[0], [x19]\n"
"b 7f\n"
"5:" // Oddments: Load bias: Bit 2: Unset
- "tbz x7, #1, 6f\n"
- "ld1 { v15.d }[0], [x24], #0x8\n"
- "tbz x7, #0, 7f\n"
- "ld1 { v15.s }[2], [x24]\n"
+ "tbz x8, #1, 6f\n"
+ "ld1 { v15.d }[0], [x19], #0x8\n"
+ "tbz x8, #0, 7f\n"
+ "ld1 { v15.s }[2], [x19]\n"
"b 7f\n"
"6:" // Oddments: Load bias: Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 7f\n"
- "ld1 { v15.s }[0], [x24]\n"
+ "tbz x8, #0, 7f\n"
+ "ld1 { v15.s }[0], [x19]\n"
"7:" // Oddments: Load bias: Bit 2: End
- "ldr d0, [x14, #0x0]\n"
- "ldr d1, [x14, #0x8]\n"
- "mov v10.16b, v15.16b\n"
- "mov v20.16b, v17.16b\n"
- "ldr d2, [x14, #0x10]\n"
- "ldr d3, [x14, #0x18]\n"
+ "ldr d0, [x17, #0x0]\n"
+ "ldr d1, [x17, #0x8]\n"
"mov v9.16b, v15.16b\n"
- "mov v23.16b, v17.16b\n"
- "ldr d4, [x14, #0x20]\n"
- "ldr d5, [x14, #0x28]\n"
- "mov v21.16b, v15.16b\n"
- "mov v22.16b, v17.16b\n"
- "ldr d6, [x14, #0x30]\n"
- "ldr d7, [x14, #0x38]\n"
+ "mov v16.16b, v10.16b\n"
+ "ldr d2, [x17, #0x10]\n"
+ "ldr d3, [x17, #0x18]\n"
+ "mov v22.16b, v15.16b\n"
+ "mov v21.16b, v10.16b\n"
+ "ldr d4, [x17, #0x20]\n"
+ "ldr d5, [x17, #0x28]\n"
+ "mov v23.16b, v15.16b\n"
+ "mov v18.16b, v10.16b\n"
+ "ldr d6, [x17, #0x30]\n"
+ "ldr d7, [x17, #0x38]\n"
"ssubl v0.8h, v0.8b, v13.8b\n"
"ssubl v1.8h, v1.8b, v13.8b\n"
- "ldr d8, [x14, #0x40]\n"
- "ldp x27, x26, [x15, #0x0]\n"
+ "ldr d8, [x17, #0x40]\n"
+ "ldp x26, x25, [x12, #0x0]\n"
"ssubl v2.8h, v2.8b, v13.8b\n"
"ssubl v3.8h, v3.8b, v13.8b\n"
- "ldp x25, x24, [x15, #0x10]\n"
- "ldp x23, x22, [x15, #0x20]\n"
+ "ldp x24, x23, [x12, #0x10]\n"
+ "ldp x22, x21, [x12, #0x20]\n"
"ssubl v4.8h, v4.8b, v13.8b\n"
"ssubl v5.8h, v5.8b, v13.8b\n"
- "ldp x21, x20, [x15, #0x30]\n"
+ "ldp x20, x19, [x12, #0x30]\n"
"ssubl v6.8h, v6.8b, v13.8b\n"
"ssubl v7.8h, v7.8b, v13.8b\n"
"ssubl v8.8h, v8.8b, v13.8b\n"
- "add x27, x27, x17\n"
- "add x26, x26, x17\n"
- "add x25, x25, x17\n"
- "add x24, x24, x17\n"
- "add x23, x23, x17\n"
- "add x22, x22, x17\n"
- "add x21, x21, x17\n"
- "add x20, x20, x17\n"
- "tbz x7, #2, 9f\n"
- "ld1 { v31.s }[0], [x27], #0x4\n"
- "ld1 { v30.s }[0], [x26], #0x4\n"
- "ld1 { v29.s }[0], [x25], #0x4\n"
- "ld1 { v28.s }[0], [x24], #0x4\n"
- "ld1 { v27.s }[0], [x23], #0x4\n"
- "ld1 { v26.s }[0], [x22], #0x4\n"
- "ld1 { v25.s }[0], [x21], #0x4\n"
- "ld1 { v24.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 8f\n"
- "ld1 { v31.h }[2], [x27], #0x2\n"
- "ld1 { v30.h }[2], [x26], #0x2\n"
- "ld1 { v29.h }[2], [x25], #0x2\n"
- "ld1 { v28.h }[2], [x24], #0x2\n"
- "ld1 { v27.h }[2], [x23], #0x2\n"
- "ld1 { v26.h }[2], [x22], #0x2\n"
- "ld1 { v25.h }[2], [x21], #0x2\n"
- "ld1 { v24.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 11f\n"
- "ld1 { v31.b }[6], [x27]\n"
- "ld1 { v30.b }[6], [x26]\n"
- "ld1 { v29.b }[6], [x25]\n"
- "ld1 { v28.b }[6], [x24]\n"
- "ld1 { v27.b }[6], [x23]\n"
- "ld1 { v26.b }[6], [x22]\n"
- "ld1 { v25.b }[6], [x21]\n"
- "ld1 { v24.b }[6], [x20]\n"
+ "add x26, x26, x15\n"
+ "add x25, x25, x15\n"
+ "add x24, x24, x15\n"
+ "add x23, x23, x15\n"
+ "add x22, x22, x15\n"
+ "add x21, x21, x15\n"
+ "add x20, x20, x15\n"
+ "add x19, x19, x15\n"
+ "tbz x8, #2, 9f\n"
+ "ld1 { v31.s }[0], [x26], #0x4\n"
+ "ld1 { v30.s }[0], [x25], #0x4\n"
+ "ld1 { v29.s }[0], [x24], #0x4\n"
+ "ld1 { v28.s }[0], [x23], #0x4\n"
+ "ld1 { v27.s }[0], [x22], #0x4\n"
+ "ld1 { v26.s }[0], [x21], #0x4\n"
+ "ld1 { v25.s }[0], [x20], #0x4\n"
+ "ld1 { v24.s }[0], [x19], #0x4\n"
+ "tbz x8, #1, 8f\n"
+ "ld1 { v31.h }[2], [x26], #0x2\n"
+ "ld1 { v30.h }[2], [x25], #0x2\n"
+ "ld1 { v29.h }[2], [x24], #0x2\n"
+ "ld1 { v28.h }[2], [x23], #0x2\n"
+ "ld1 { v27.h }[2], [x22], #0x2\n"
+ "ld1 { v26.h }[2], [x21], #0x2\n"
+ "ld1 { v25.h }[2], [x20], #0x2\n"
+ "ld1 { v24.h }[2], [x19], #0x2\n"
+ "tbz x8, #0, 11f\n"
+ "ld1 { v31.b }[6], [x26]\n"
+ "ld1 { v30.b }[6], [x25]\n"
+ "ld1 { v29.b }[6], [x24]\n"
+ "ld1 { v28.b }[6], [x23]\n"
+ "ld1 { v27.b }[6], [x22]\n"
+ "ld1 { v26.b }[6], [x21]\n"
+ "ld1 { v25.b }[6], [x20]\n"
+ "ld1 { v24.b }[6], [x19]\n"
"b 11f\n"
"8:" // Oddments: Initial loads: Bit 2: Bit 1: Unset
- "tbz x7, #0, 11f\n"
- "ld1 { v31.b }[4], [x27]\n"
- "ld1 { v30.b }[4], [x26]\n"
- "ld1 { v29.b }[4], [x25]\n"
- "ld1 { v28.b }[4], [x24]\n"
- "ld1 { v27.b }[4], [x23]\n"
- "ld1 { v26.b }[4], [x22]\n"
- "ld1 { v25.b }[4], [x21]\n"
- "ld1 { v24.b }[4], [x20]\n"
+ "tbz x8, #0, 11f\n"
+ "ld1 { v31.b }[4], [x26]\n"
+ "ld1 { v30.b }[4], [x25]\n"
+ "ld1 { v29.b }[4], [x24]\n"
+ "ld1 { v28.b }[4], [x23]\n"
+ "ld1 { v27.b }[4], [x22]\n"
+ "ld1 { v26.b }[4], [x21]\n"
+ "ld1 { v25.b }[4], [x20]\n"
+ "ld1 { v24.b }[4], [x19]\n"
"b 11f\n"
"9:" // Oddments: Initial loads: Bit 2: Unset
- "tbz x7, #1, 10f\n"
- "ld1 { v31.h }[0], [x27], #0x2\n"
- "ld1 { v30.h }[0], [x26], #0x2\n"
- "ld1 { v29.h }[0], [x25], #0x2\n"
- "ld1 { v28.h }[0], [x24], #0x2\n"
- "ld1 { v27.h }[0], [x23], #0x2\n"
- "ld1 { v26.h }[0], [x22], #0x2\n"
- "ld1 { v25.h }[0], [x21], #0x2\n"
- "ld1 { v24.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 11f\n"
- "ld1 { v31.b }[2], [x27]\n"
- "ld1 { v30.b }[2], [x26]\n"
- "ld1 { v29.b }[2], [x25]\n"
- "ld1 { v28.b }[2], [x24]\n"
- "ld1 { v27.b }[2], [x23]\n"
- "ld1 { v26.b }[2], [x22]\n"
- "ld1 { v25.b }[2], [x21]\n"
- "ld1 { v24.b }[2], [x20]\n"
+ "tbz x8, #1, 10f\n"
+ "ld1 { v31.h }[0], [x26], #0x2\n"
+ "ld1 { v30.h }[0], [x25], #0x2\n"
+ "ld1 { v29.h }[0], [x24], #0x2\n"
+ "ld1 { v28.h }[0], [x23], #0x2\n"
+ "ld1 { v27.h }[0], [x22], #0x2\n"
+ "ld1 { v26.h }[0], [x21], #0x2\n"
+ "ld1 { v25.h }[0], [x20], #0x2\n"
+ "ld1 { v24.h }[0], [x19], #0x2\n"
+ "tbz x8, #0, 11f\n"
+ "ld1 { v31.b }[2], [x26]\n"
+ "ld1 { v30.b }[2], [x25]\n"
+ "ld1 { v29.b }[2], [x24]\n"
+ "ld1 { v28.b }[2], [x23]\n"
+ "ld1 { v27.b }[2], [x22]\n"
+ "ld1 { v26.b }[2], [x21]\n"
+ "ld1 { v25.b }[2], [x20]\n"
+ "ld1 { v24.b }[2], [x19]\n"
"b 11f\n"
"10:" // Oddments: Initial loads: Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 11f\n"
- "ld1 { v31.b }[0], [x27]\n"
- "ld1 { v30.b }[0], [x26]\n"
- "ld1 { v29.b }[0], [x25]\n"
- "ld1 { v28.b }[0], [x24]\n"
- "ld1 { v27.b }[0], [x23]\n"
- "ld1 { v26.b }[0], [x22]\n"
- "ld1 { v25.b }[0], [x21]\n"
- "ld1 { v24.b }[0], [x20]\n"
+ "tbz x8, #0, 11f\n"
+ "ld1 { v31.b }[0], [x26]\n"
+ "ld1 { v30.b }[0], [x25]\n"
+ "ld1 { v29.b }[0], [x24]\n"
+ "ld1 { v28.b }[0], [x23]\n"
+ "ld1 { v27.b }[0], [x22]\n"
+ "ld1 { v26.b }[0], [x21]\n"
+ "ld1 { v25.b }[0], [x20]\n"
+ "ld1 { v24.b }[0], [x19]\n"
"11:" // Oddments: Initial loads: Bit 2: End
"ssubl v31.8h, v31.8b, v12.8b\n"
"smlal v15.4s, v31.4h, v8.4h\n"
- "smlal2 v17.4s, v31.8h, v8.8h\n"
- "ldr x24, [x15, #0x40]\n"
+ "smlal2 v10.4s, v31.8h, v8.8h\n"
+ "ldr x24, [x12, #0x40]\n"
"ssubl v30.8h, v30.8b, v12.8b\n"
"smlal v15.4s, v30.4h, v0.4h\n"
- "smlal2 v17.4s, v30.8h, v0.8h\n"
- "add x24, x24, x17\n"
+ "smlal2 v10.4s, v30.8h, v0.8h\n"
+ "add x24, x24, x15\n"
"ssubl v29.8h, v29.8b, v12.8b\n"
- "smlal v10.4s, v31.4h, v6.4h\n"
- "smlal2 v20.4s, v31.8h, v6.8h\n"
+ "smlal v9.4s, v31.4h, v6.4h\n"
+ "smlal2 v16.4s, v31.8h, v6.8h\n"
"smlal v15.4s, v29.4h, v1.4h\n"
- "smlal2 v17.4s, v29.8h, v1.8h\n"
+ "smlal2 v10.4s, v29.8h, v1.8h\n"
"ssubl v28.8h, v28.8b, v12.8b\n"
"ssubl v26.8h, v26.8b, v12.8b\n"
- "smlal v10.4s, v28.4h, v1.4h\n"
- "smlal2 v20.4s, v28.8h, v1.8h\n"
+ "smlal v9.4s, v28.4h, v1.4h\n"
+ "smlal2 v16.4s, v28.8h, v1.8h\n"
"smlal v15.4s, v26.4h, v3.4h\n"
- "smlal2 v17.4s, v26.8h, v3.8h\n"
+ "smlal2 v10.4s, v26.8h, v3.8h\n"
"ssubl v27.8h, v27.8b, v12.8b\n"
"ssubl v25.8h, v25.8b, v12.8b\n"
- "smlal v10.4s, v27.4h, v2.4h\n"
- "smlal2 v20.4s, v27.8h, v2.8h\n"
+ "smlal v9.4s, v27.4h, v2.4h\n"
+ "smlal2 v16.4s, v27.8h, v2.8h\n"
"smlal v15.4s, v25.4h, v4.4h\n"
- "smlal2 v17.4s, v25.8h, v4.8h\n"
+ "smlal2 v10.4s, v25.8h, v4.8h\n"
"ssubl v24.8h, v24.8b, v12.8b\n"
- "smlal v9.4s, v31.4h, v2.4h\n"
- "smlal2 v23.4s, v31.8h, v2.8h\n"
- "smlal v21.4s, v31.4h, v0.4h\n"
- "smlal2 v22.4s, v31.8h, v0.8h\n"
+ "smlal v22.4s, v31.4h, v2.4h\n"
+ "smlal2 v21.4s, v31.8h, v2.8h\n"
+ "smlal v23.4s, v31.4h, v0.4h\n"
+ "smlal2 v18.4s, v31.8h, v0.8h\n"
"smlal v15.4s, v24.4h, v2.4h\n"
- "smlal2 v17.4s, v24.8h, v2.8h\n"
- "smlal v10.4s, v24.4h, v0.4h\n"
- "smlal2 v20.4s, v24.8h, v0.8h\n"
- "tbz x7, #2, 13f\n"
+ "smlal2 v10.4s, v24.8h, v2.8h\n"
+ "smlal v9.4s, v24.4h, v0.4h\n"
+ "smlal2 v16.4s, v24.8h, v0.8h\n"
+ "tbz x8, #2, 13f\n"
"ld1 { v29.s }[0], [x24], #0x4\n"
- "tbz x7, #1, 12f\n"
+ "tbz x8, #1, 12f\n"
"ld1 { v29.h }[2], [x24], #0x2\n"
- "tbz x7, #0, 15f\n"
+ "tbz x8, #0, 15f\n"
"ld1 { v29.b }[6], [x24]\n"
"b 15f\n"
"12:" // Oddments: Load (1, 3): Bit 2: Bit 1: Unset
- "tbz x7, #0, 15f\n"
+ "tbz x8, #0, 15f\n"
"ld1 { v29.b }[4], [x24]\n"
"b 15f\n"
"13:" // Oddments: Load (1, 3): Bit 2: Unset
- "tbz x7, #1, 14f\n"
+ "tbz x8, #1, 14f\n"
"ld1 { v29.h }[0], [x24], #0x2\n"
- "tbz x7, #0, 15f\n"
+ "tbz x8, #0, 15f\n"
"ld1 { v29.b }[2], [x24]\n"
"b 15f\n"
"14:" // Oddments: Load (1, 3): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 15f\n"
+ "tbz x8, #0, 15f\n"
"ld1 { v29.b }[0], [x24]\n"
"15:" // Oddments: Load (1, 3): Bit 2: End
"ssubl v29.8h, v29.8b, v12.8b\n"
- "ldr x22, [x15, #0x48]\n"
- "smlal v10.4s, v29.4h, v4.4h\n"
- "smlal2 v20.4s, v29.8h, v4.8h\n"
- "add x22, x22, x17\n"
- "tbz x7, #2, 17f\n"
- "ld1 { v28.s }[0], [x22], #0x4\n"
- "tbz x7, #1, 16f\n"
- "ld1 { v28.h }[2], [x22], #0x2\n"
- "tbz x7, #0, 19f\n"
- "ld1 { v28.b }[6], [x22]\n"
+ "ldr x23, [x12, #0x48]\n"
+ "smlal v9.4s, v29.4h, v4.4h\n"
+ "smlal2 v16.4s, v29.8h, v4.8h\n"
+ "add x23, x23, x15\n"
+ "tbz x8, #2, 17f\n"
+ "ld1 { v28.s }[0], [x23], #0x4\n"
+ "tbz x8, #1, 16f\n"
+ "ld1 { v28.h }[2], [x23], #0x2\n"
+ "tbz x8, #0, 19f\n"
+ "ld1 { v28.b }[6], [x23]\n"
"b 19f\n"
"16:" // Oddments: Load (1, 4): Bit 2: Bit 1: Unset
- "tbz x7, #0, 19f\n"
- "ld1 { v28.b }[4], [x22]\n"
+ "tbz x8, #0, 19f\n"
+ "ld1 { v28.b }[4], [x23]\n"
"b 19f\n"
"17:" // Oddments: Load (1, 4): Bit 2: Unset
- "tbz x7, #1, 18f\n"
- "ld1 { v28.h }[0], [x22], #0x2\n"
- "tbz x7, #0, 19f\n"
- "ld1 { v28.b }[2], [x22]\n"
+ "tbz x8, #1, 18f\n"
+ "ld1 { v28.h }[0], [x23], #0x2\n"
+ "tbz x8, #0, 19f\n"
+ "ld1 { v28.b }[2], [x23]\n"
"b 19f\n"
"18:" // Oddments: Load (1, 4): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 19f\n"
- "ld1 { v28.b }[0], [x22]\n"
+ "tbz x8, #0, 19f\n"
+ "ld1 { v28.b }[0], [x23]\n"
"19:" // Oddments: Load (1, 4): Bit 2: End
"ssubl v28.8h, v28.8b, v12.8b\n"
- "ldr x21, [x15, #0x50]\n"
- "smlal v10.4s, v28.4h, v5.4h\n"
- "smlal2 v20.4s, v28.8h, v5.8h\n"
- "add x21, x21, x17\n"
- "tbz x7, #2, 21f\n"
+ "ldr x21, [x12, #0x50]\n"
+ "smlal v9.4s, v28.4h, v5.4h\n"
+ "smlal2 v16.4s, v28.8h, v5.8h\n"
+ "add x21, x21, x15\n"
+ "tbz x8, #2, 21f\n"
"ld1 { v27.s }[0], [x21], #0x4\n"
- "tbz x7, #1, 20f\n"
+ "tbz x8, #1, 20f\n"
"ld1 { v27.h }[2], [x21], #0x2\n"
- "tbz x7, #0, 23f\n"
+ "tbz x8, #0, 23f\n"
"ld1 { v27.b }[6], [x21]\n"
"b 23f\n"
"20:" // Oddments: Load (1, 2): Bit 2: Bit 1: Unset
- "tbz x7, #0, 23f\n"
+ "tbz x8, #0, 23f\n"
"ld1 { v27.b }[4], [x21]\n"
"b 23f\n"
"21:" // Oddments: Load (1, 2): Bit 2: Unset
- "tbz x7, #1, 22f\n"
+ "tbz x8, #1, 22f\n"
"ld1 { v27.h }[0], [x21], #0x2\n"
- "tbz x7, #0, 23f\n"
+ "tbz x8, #0, 23f\n"
"ld1 { v27.b }[2], [x21]\n"
"b 23f\n"
"22:" // Oddments: Load (1, 2): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 23f\n"
+ "tbz x8, #0, 23f\n"
"ld1 { v27.b }[0], [x21]\n"
"23:" // Oddments: Load (1, 2): Bit 2: End
"ssubl v27.8h, v27.8b, v12.8b\n"
- "ldr x20, [x15, #0x58]\n"
+ "ldr x19, [x12, #0x58]\n"
"smlal v15.4s, v27.4h, v5.4h\n"
- "smlal2 v17.4s, v27.8h, v5.8h\n"
- "smlal v10.4s, v27.4h, v3.4h\n"
- "smlal2 v20.4s, v27.8h, v3.8h\n"
- "add x20, x20, x17\n"
- "tbz x7, #2, 25f\n"
- "ld1 { v26.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 24f\n"
- "ld1 { v26.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 27f\n"
- "ld1 { v26.b }[6], [x20]\n"
+ "smlal2 v10.4s, v27.8h, v5.8h\n"
+ "smlal v9.4s, v27.4h, v3.4h\n"
+ "smlal2 v16.4s, v27.8h, v3.8h\n"
+ "add x19, x19, x15\n"
+ "tbz x8, #2, 25f\n"
+ "ld1 { v26.s }[0], [x19], #0x4\n"
+ "tbz x8, #1, 24f\n"
+ "ld1 { v26.h }[2], [x19], #0x2\n"
+ "tbz x8, #0, 27f\n"
+ "ld1 { v26.b }[6], [x19]\n"
"b 27f\n"
"24:" // Oddments: Load (3, 0): Bit 2: Bit 1: Unset
- "tbz x7, #0, 27f\n"
- "ld1 { v26.b }[4], [x20]\n"
+ "tbz x8, #0, 27f\n"
+ "ld1 { v26.b }[4], [x19]\n"
"b 27f\n"
"25:" // Oddments: Load (3, 0): Bit 2: Unset
- "tbz x7, #1, 26f\n"
- "ld1 { v26.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 27f\n"
- "ld1 { v26.b }[2], [x20]\n"
+ "tbz x8, #1, 26f\n"
+ "ld1 { v26.h }[0], [x19], #0x2\n"
+ "tbz x8, #0, 27f\n"
+ "ld1 { v26.b }[2], [x19]\n"
"b 27f\n"
"26:" // Oddments: Load (3, 0): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 27f\n"
- "ld1 { v26.b }[0], [x20]\n"
+ "tbz x8, #0, 27f\n"
+ "ld1 { v26.b }[0], [x19]\n"
"27:" // Oddments: Load (3, 0): Bit 2: End
"ssubl v26.8h, v26.8b, v12.8b\n"
- "ldr x20, [x15, #0x60]\n"
- "smlal v9.4s, v26.4h, v3.4h\n"
- "smlal2 v23.4s, v26.8h, v3.8h\n"
- "add x20, x20, x17\n"
- "tbz x7, #2, 29f\n"
+ "ldr x20, [x12, #0x60]\n"
+ "smlal v22.4s, v26.4h, v3.4h\n"
+ "smlal2 v21.4s, v26.8h, v3.8h\n"
+ "add x20, x20, x15\n"
+ "tbz x8, #2, 29f\n"
"ld1 { v25.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 28f\n"
+ "tbz x8, #1, 28f\n"
"ld1 { v25.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 31f\n"
+ "tbz x8, #0, 31f\n"
"ld1 { v25.b }[6], [x20]\n"
"b 31f\n"
"28:" // Oddments: Load (2, 0): Bit 2: Bit 1: Unset
- "tbz x7, #0, 31f\n"
+ "tbz x8, #0, 31f\n"
"ld1 { v25.b }[4], [x20]\n"
"b 31f\n"
"29:" // Oddments: Load (2, 0): Bit 2: Unset
- "tbz x7, #1, 30f\n"
+ "tbz x8, #1, 30f\n"
"ld1 { v25.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 31f\n"
+ "tbz x8, #0, 31f\n"
"ld1 { v25.b }[2], [x20]\n"
"b 31f\n"
"30:" // Oddments: Load (2, 0): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 31f\n"
+ "tbz x8, #0, 31f\n"
"ld1 { v25.b }[0], [x20]\n"
"31:" // Oddments: Load (2, 0): Bit 2: End
"ssubl v25.8h, v25.8b, v12.8b\n"
- "ldr x20, [x15, #0x68]\n"
+ "ldr x19, [x12, #0x68]\n"
"smlal v15.4s, v25.4h, v6.4h\n"
- "smlal2 v17.4s, v25.8h, v6.8h\n"
- "smlal v9.4s, v25.4h, v0.4h\n"
- "smlal2 v23.4s, v25.8h, v0.8h\n"
- "add x20, x20, x17\n"
- "tbz x7, #2, 33f\n"
- "ld1 { v29.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 32f\n"
- "ld1 { v29.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 35f\n"
- "ld1 { v29.b }[6], [x20]\n"
+ "smlal2 v10.4s, v25.8h, v6.8h\n"
+ "smlal v22.4s, v25.4h, v0.4h\n"
+ "smlal2 v21.4s, v25.8h, v0.8h\n"
+ "add x19, x19, x15\n"
+ "tbz x8, #2, 33f\n"
+ "ld1 { v29.s }[0], [x19], #0x4\n"
+ "tbz x8, #1, 32f\n"
+ "ld1 { v29.h }[2], [x19], #0x2\n"
+ "tbz x8, #0, 35f\n"
+ "ld1 { v29.b }[6], [x19]\n"
"b 35f\n"
"32:" // Oddments: Load (3, 1): Bit 2: Bit 1: Unset
- "tbz x7, #0, 35f\n"
- "ld1 { v29.b }[4], [x20]\n"
+ "tbz x8, #0, 35f\n"
+ "ld1 { v29.b }[4], [x19]\n"
"b 35f\n"
"33:" // Oddments: Load (3, 1): Bit 2: Unset
- "tbz x7, #1, 34f\n"
- "ld1 { v29.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 35f\n"
- "ld1 { v29.b }[2], [x20]\n"
+ "tbz x8, #1, 34f\n"
+ "ld1 { v29.h }[0], [x19], #0x2\n"
+ "tbz x8, #0, 35f\n"
+ "ld1 { v29.b }[2], [x19]\n"
"b 35f\n"
"34:" // Oddments: Load (3, 1): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 35f\n"
- "ld1 { v29.b }[0], [x20]\n"
+ "tbz x8, #0, 35f\n"
+ "ld1 { v29.b }[0], [x19]\n"
"35:" // Oddments: Load (3, 1): Bit 2: End
"ssubl v29.8h, v29.8b, v12.8b\n"
- "ldr x21, [x15, #0x70]\n"
- "smlal v9.4s, v29.4h, v4.4h\n"
- "smlal2 v23.4s, v29.8h, v4.8h\n"
- "add x21, x21, x17\n"
- "tbz x7, #2, 37f\n"
- "ld1 { v24.s }[0], [x21], #0x4\n"
- "tbz x7, #1, 36f\n"
- "ld1 { v24.h }[2], [x21], #0x2\n"
- "tbz x7, #0, 39f\n"
- "ld1 { v24.b }[6], [x21]\n"
+ "ldr x19, [x12, #0x70]\n"
+ "smlal v22.4s, v29.4h, v4.4h\n"
+ "smlal2 v21.4s, v29.8h, v4.8h\n"
+ "add x19, x19, x15\n"
+ "tbz x8, #2, 37f\n"
+ "ld1 { v24.s }[0], [x19], #0x4\n"
+ "tbz x8, #1, 36f\n"
+ "ld1 { v24.h }[2], [x19], #0x2\n"
+ "tbz x8, #0, 39f\n"
+ "ld1 { v24.b }[6], [x19]\n"
"b 39f\n"
"36:" // Oddments: Load (2, 1): Bit 2: Bit 1: Unset
- "tbz x7, #0, 39f\n"
- "ld1 { v24.b }[4], [x21]\n"
+ "tbz x8, #0, 39f\n"
+ "ld1 { v24.b }[4], [x19]\n"
"b 39f\n"
"37:" // Oddments: Load (2, 1): Bit 2: Unset
- "tbz x7, #1, 38f\n"
- "ld1 { v24.h }[0], [x21], #0x2\n"
- "tbz x7, #0, 39f\n"
- "ld1 { v24.b }[2], [x21]\n"
+ "tbz x8, #1, 38f\n"
+ "ld1 { v24.h }[0], [x19], #0x2\n"
+ "tbz x8, #0, 39f\n"
+ "ld1 { v24.b }[2], [x19]\n"
"b 39f\n"
"38:" // Oddments: Load (2, 1): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 39f\n"
- "ld1 { v24.b }[0], [x21]\n"
+ "tbz x8, #0, 39f\n"
+ "ld1 { v24.b }[0], [x19]\n"
"39:" // Oddments: Load (2, 1): Bit 2: End
"ssubl v24.8h, v24.8b, v12.8b\n"
- "ldr x23, [x15, #0x78]\n"
+ "ldr x22, [x12, #0x78]\n"
"smlal v15.4s, v24.4h, v7.4h\n"
- "smlal2 v17.4s, v24.8h, v7.8h\n"
- "smlal v9.4s, v24.4h, v1.4h\n"
- "smlal2 v23.4s, v24.8h, v1.8h\n"
- "add x23, x23, x17\n"
- "tbz x7, #2, 41f\n"
- "ld1 { v27.s }[0], [x23], #0x4\n"
- "tbz x7, #1, 40f\n"
- "ld1 { v27.h }[2], [x23], #0x2\n"
- "tbz x7, #0, 43f\n"
- "ld1 { v27.b }[6], [x23]\n"
+ "smlal2 v10.4s, v24.8h, v7.8h\n"
+ "smlal v22.4s, v24.4h, v1.4h\n"
+ "smlal2 v21.4s, v24.8h, v1.8h\n"
+ "add x22, x22, x15\n"
+ "tbz x8, #2, 41f\n"
+ "ld1 { v27.s }[0], [x22], #0x4\n"
+ "tbz x8, #1, 40f\n"
+ "ld1 { v27.h }[2], [x22], #0x2\n"
+ "tbz x8, #0, 43f\n"
+ "ld1 { v27.b }[6], [x22]\n"
"b 43f\n"
"40:" // Oddments: Load (3, 3): Bit 2: Bit 1: Unset
- "tbz x7, #0, 43f\n"
- "ld1 { v27.b }[4], [x23]\n"
+ "tbz x8, #0, 43f\n"
+ "ld1 { v27.b }[4], [x22]\n"
"b 43f\n"
"41:" // Oddments: Load (3, 3): Bit 2: Unset
- "tbz x7, #1, 42f\n"
- "ld1 { v27.h }[0], [x23], #0x2\n"
- "tbz x7, #0, 43f\n"
- "ld1 { v27.b }[2], [x23]\n"
+ "tbz x8, #1, 42f\n"
+ "ld1 { v27.h }[0], [x22], #0x2\n"
+ "tbz x8, #0, 43f\n"
+ "ld1 { v27.b }[2], [x22]\n"
"b 43f\n"
"42:" // Oddments: Load (3, 3): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 43f\n"
- "ld1 { v27.b }[0], [x23]\n"
+ "tbz x8, #0, 43f\n"
+ "ld1 { v27.b }[0], [x22]\n"
"43:" // Oddments: Load (3, 3): Bit 2: End
"ssubl v27.8h, v27.8b, v12.8b\n"
- "ldr x21, [x15, #0x80]\n"
- "smlal v21.4s, v27.4h, v4.4h\n"
- "smlal2 v22.4s, v27.8h, v4.8h\n"
- "add x21, x21, x17\n"
- "tbz x7, #2, 45f\n"
+ "ldr x21, [x12, #0x80]\n"
+ "smlal v23.4s, v27.4h, v4.4h\n"
+ "smlal2 v18.4s, v27.8h, v4.8h\n"
+ "add x21, x21, x15\n"
+ "tbz x8, #2, 45f\n"
"ld1 { v28.s }[0], [x21], #0x4\n"
- "tbz x7, #1, 44f\n"
+ "tbz x8, #1, 44f\n"
"ld1 { v28.h }[2], [x21], #0x2\n"
- "tbz x7, #0, 47f\n"
+ "tbz x8, #0, 47f\n"
"ld1 { v28.b }[6], [x21]\n"
"b 47f\n"
"44:" // Oddments: Load (2, 3): Bit 2: Bit 1: Unset
- "tbz x7, #0, 47f\n"
+ "tbz x8, #0, 47f\n"
"ld1 { v28.b }[4], [x21]\n"
"b 47f\n"
"45:" // Oddments: Load (2, 3): Bit 2: Unset
- "tbz x7, #1, 46f\n"
+ "tbz x8, #1, 46f\n"
"ld1 { v28.h }[0], [x21], #0x2\n"
- "tbz x7, #0, 47f\n"
+ "tbz x8, #0, 47f\n"
"ld1 { v28.b }[2], [x21]\n"
"b 47f\n"
"46:" // Oddments: Load (2, 3): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 47f\n"
+ "tbz x8, #0, 47f\n"
"ld1 { v28.b }[0], [x21]\n"
"47:" // Oddments: Load (2, 3): Bit 2: End
"ssubl v28.8h, v28.8b, v12.8b\n"
- "ldr x22, [x15, #0x88]\n"
- "smlal v10.4s, v28.4h, v7.4h\n"
- "smlal2 v20.4s, v28.8h, v7.8h\n"
- "smlal v21.4s, v28.4h, v1.4h\n"
- "smlal2 v22.4s, v28.8h, v1.8h\n"
- "add x22, x22, x17\n"
- "tbz x7, #2, 49f\n"
- "ld1 { v26.s }[0], [x22], #0x4\n"
- "tbz x7, #1, 48f\n"
- "ld1 { v26.h }[2], [x22], #0x2\n"
- "tbz x7, #0, 51f\n"
- "ld1 { v26.b }[6], [x22]\n"
+ "ldr x20, [x12, #0x88]\n"
+ "smlal v9.4s, v28.4h, v7.4h\n"
+ "smlal2 v16.4s, v28.8h, v7.8h\n"
+ "smlal v23.4s, v28.4h, v1.4h\n"
+ "smlal2 v18.4s, v28.8h, v1.8h\n"
+ "add x20, x20, x15\n"
+ "tbz x8, #2, 49f\n"
+ "ld1 { v26.s }[0], [x20], #0x4\n"
+ "tbz x8, #1, 48f\n"
+ "ld1 { v26.h }[2], [x20], #0x2\n"
+ "tbz x8, #0, 51f\n"
+ "ld1 { v26.b }[6], [x20]\n"
"b 51f\n"
"48:" // Oddments: Load (3, 4): Bit 2: Bit 1: Unset
- "tbz x7, #0, 51f\n"
- "ld1 { v26.b }[4], [x22]\n"
+ "tbz x8, #0, 51f\n"
+ "ld1 { v26.b }[4], [x20]\n"
"b 51f\n"
"49:" // Oddments: Load (3, 4): Bit 2: Unset
- "tbz x7, #1, 50f\n"
- "ld1 { v26.h }[0], [x22], #0x2\n"
- "tbz x7, #0, 51f\n"
- "ld1 { v26.b }[2], [x22]\n"
+ "tbz x8, #1, 50f\n"
+ "ld1 { v26.h }[0], [x20], #0x2\n"
+ "tbz x8, #0, 51f\n"
+ "ld1 { v26.b }[2], [x20]\n"
"b 51f\n"
"50:" // Oddments: Load (3, 4): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 51f\n"
- "ld1 { v26.b }[0], [x22]\n"
+ "tbz x8, #0, 51f\n"
+ "ld1 { v26.b }[0], [x20]\n"
"51:" // Oddments: Load (3, 4): Bit 2: End
"ssubl v26.8h, v26.8b, v12.8b\n"
- "ldr x24, [x15, #0x90]\n"
- "smlal v21.4s, v26.4h, v5.4h\n"
- "smlal2 v22.4s, v26.8h, v5.8h\n"
- "add x24, x24, x17\n"
- "tbz x7, #2, 53f\n"
- "ld1 { v25.s }[0], [x24], #0x4\n"
- "tbz x7, #1, 52f\n"
- "ld1 { v25.h }[2], [x24], #0x2\n"
- "tbz x7, #0, 55f\n"
- "ld1 { v25.b }[6], [x24]\n"
+ "ldr x23, [x12, #0x90]\n"
+ "smlal v23.4s, v26.4h, v5.4h\n"
+ "smlal2 v18.4s, v26.8h, v5.8h\n"
+ "add x23, x23, x15\n"
+ "tbz x8, #2, 53f\n"
+ "ld1 { v25.s }[0], [x23], #0x4\n"
+ "tbz x8, #1, 52f\n"
+ "ld1 { v25.h }[2], [x23], #0x2\n"
+ "tbz x8, #0, 55f\n"
+ "ld1 { v25.b }[6], [x23]\n"
"b 55f\n"
"52:" // Oddments: Load (4, 0): Bit 2: Bit 1: Unset
- "tbz x7, #0, 55f\n"
- "ld1 { v25.b }[4], [x24]\n"
+ "tbz x8, #0, 55f\n"
+ "ld1 { v25.b }[4], [x23]\n"
"b 55f\n"
"53:" // Oddments: Load (4, 0): Bit 2: Unset
- "tbz x7, #1, 54f\n"
- "ld1 { v25.h }[0], [x24], #0x2\n"
- "tbz x7, #0, 55f\n"
- "ld1 { v25.b }[2], [x24]\n"
+ "tbz x8, #1, 54f\n"
+ "ld1 { v25.h }[0], [x23], #0x2\n"
+ "tbz x8, #0, 55f\n"
+ "ld1 { v25.b }[2], [x23]\n"
"b 55f\n"
"54:" // Oddments: Load (4, 0): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 55f\n"
- "ld1 { v25.b }[0], [x24]\n"
+ "tbz x8, #0, 55f\n"
+ "ld1 { v25.b }[0], [x23]\n"
"55:" // Oddments: Load (4, 0): Bit 2: End
"ssubl v25.8h, v25.8b, v12.8b\n"
- "ldr x25, [x15, #0x98]\n"
- "smlal v9.4s, v25.4h, v6.4h\n"
- "smlal2 v23.4s, v25.8h, v6.8h\n"
- "add x25, x25, x17\n"
- "tbz x7, #2, 57f\n"
- "ld1 { v29.s }[0], [x25], #0x4\n"
- "tbz x7, #1, 56f\n"
- "ld1 { v29.h }[2], [x25], #0x2\n"
- "tbz x7, #0, 59f\n"
- "ld1 { v29.b }[6], [x25]\n"
+ "ldr x24, [x12, #0x98]\n"
+ "smlal v22.4s, v25.4h, v6.4h\n"
+ "smlal2 v21.4s, v25.8h, v6.8h\n"
+ "add x24, x24, x15\n"
+ "tbz x8, #2, 57f\n"
+ "ld1 { v29.s }[0], [x24], #0x4\n"
+ "tbz x8, #1, 56f\n"
+ "ld1 { v29.h }[2], [x24], #0x2\n"
+ "tbz x8, #0, 59f\n"
+ "ld1 { v29.b }[6], [x24]\n"
"b 59f\n"
"56:" // Oddments: Load (2, 4): Bit 2: Bit 1: Unset
- "tbz x7, #0, 59f\n"
- "ld1 { v29.b }[4], [x25]\n"
+ "tbz x8, #0, 59f\n"
+ "ld1 { v29.b }[4], [x24]\n"
"b 59f\n"
"57:" // Oddments: Load (2, 4): Bit 2: Unset
- "tbz x7, #1, 58f\n"
- "ld1 { v29.h }[0], [x25], #0x2\n"
- "tbz x7, #0, 59f\n"
- "ld1 { v29.b }[2], [x25]\n"
+ "tbz x8, #1, 58f\n"
+ "ld1 { v29.h }[0], [x24], #0x2\n"
+ "tbz x8, #0, 59f\n"
+ "ld1 { v29.b }[2], [x24]\n"
"b 59f\n"
"58:" // Oddments: Load (2, 4): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 59f\n"
- "ld1 { v29.b }[0], [x25]\n"
+ "tbz x8, #0, 59f\n"
+ "ld1 { v29.b }[0], [x24]\n"
"59:" // Oddments: Load (2, 4): Bit 2: End
"ssubl v29.8h, v29.8b, v12.8b\n"
- "ldr x20, [x15, #0xa0]\n"
- "smlal v10.4s, v29.4h, v8.4h\n"
- "smlal2 v20.4s, v29.8h, v8.8h\n"
- "smlal v21.4s, v29.4h, v2.4h\n"
- "smlal2 v22.4s, v29.8h, v2.8h\n"
- "add x20, x20, x17\n"
- "tbz x7, #2, 61f\n"
- "ld1 { v27.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 60f\n"
- "ld1 { v27.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 63f\n"
- "ld1 { v27.b }[6], [x20]\n"
+ "ldr x19, [x12, #0xa0]\n"
+ "smlal v9.4s, v29.4h, v8.4h\n"
+ "smlal2 v16.4s, v29.8h, v8.8h\n"
+ "smlal v23.4s, v29.4h, v2.4h\n"
+ "smlal2 v18.4s, v29.8h, v2.8h\n"
+ "add x19, x19, x15\n"
+ "tbz x8, #2, 61f\n"
+ "ld1 { v27.s }[0], [x19], #0x4\n"
+ "tbz x8, #1, 60f\n"
+ "ld1 { v27.h }[2], [x19], #0x2\n"
+ "tbz x8, #0, 63f\n"
+ "ld1 { v27.b }[6], [x19]\n"
"b 63f\n"
"60:" // Oddments: Load (4, 1): Bit 2: Bit 1: Unset
- "tbz x7, #0, 63f\n"
- "ld1 { v27.b }[4], [x20]\n"
+ "tbz x8, #0, 63f\n"
+ "ld1 { v27.b }[4], [x19]\n"
"b 63f\n"
"61:" // Oddments: Load (4, 1): Bit 2: Unset
- "tbz x7, #1, 62f\n"
- "ld1 { v27.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 63f\n"
- "ld1 { v27.b }[2], [x20]\n"
+ "tbz x8, #1, 62f\n"
+ "ld1 { v27.h }[0], [x19], #0x2\n"
+ "tbz x8, #0, 63f\n"
+ "ld1 { v27.b }[2], [x19]\n"
"b 63f\n"
"62:" // Oddments: Load (4, 1): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 63f\n"
- "ld1 { v27.b }[0], [x20]\n"
+ "tbz x8, #0, 63f\n"
+ "ld1 { v27.b }[0], [x19]\n"
"63:" // Oddments: Load (4, 1): Bit 2: End
"ssubl v27.8h, v27.8b, v12.8b\n"
- "ldr x23, [x15, #0xa8]\n"
- "smlal v9.4s, v27.4h, v7.4h\n"
- "smlal2 v23.4s, v27.8h, v7.8h\n"
- "add x23, x23, x17\n"
- "tbz x7, #2, 65f\n"
- "ld1 { v24.s }[0], [x23], #0x4\n"
- "tbz x7, #1, 64f\n"
- "ld1 { v24.h }[2], [x23], #0x2\n"
- "tbz x7, #0, 67f\n"
- "ld1 { v24.b }[6], [x23]\n"
+ "ldr x22, [x12, #0xa8]\n"
+ "smlal v22.4s, v27.4h, v7.4h\n"
+ "smlal2 v21.4s, v27.8h, v7.8h\n"
+ "add x22, x22, x15\n"
+ "tbz x8, #2, 65f\n"
+ "ld1 { v24.s }[0], [x22], #0x4\n"
+ "tbz x8, #1, 64f\n"
+ "ld1 { v24.h }[2], [x22], #0x2\n"
+ "tbz x8, #0, 67f\n"
+ "ld1 { v24.b }[6], [x22]\n"
"b 67f\n"
"64:" // Oddments: Load (3, 2): Bit 2: Bit 1: Unset
- "tbz x7, #0, 67f\n"
- "ld1 { v24.b }[4], [x23]\n"
+ "tbz x8, #0, 67f\n"
+ "ld1 { v24.b }[4], [x22]\n"
"b 67f\n"
"65:" // Oddments: Load (3, 2): Bit 2: Unset
- "tbz x7, #1, 66f\n"
- "ld1 { v24.h }[0], [x23], #0x2\n"
- "tbz x7, #0, 67f\n"
- "ld1 { v24.b }[2], [x23]\n"
+ "tbz x8, #1, 66f\n"
+ "ld1 { v24.h }[0], [x22], #0x2\n"
+ "tbz x8, #0, 67f\n"
+ "ld1 { v24.b }[2], [x22]\n"
"b 67f\n"
"66:" // Oddments: Load (3, 2): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 67f\n"
- "ld1 { v24.b }[0], [x23]\n"
+ "tbz x8, #0, 67f\n"
+ "ld1 { v24.b }[0], [x22]\n"
"67:" // Oddments: Load (3, 2): Bit 2: End
"ssubl v24.8h, v24.8b, v12.8b\n"
- "ldr x22, [x15, #0xb0]\n"
- "smlal v9.4s, v24.4h, v5.4h\n"
- "smlal2 v23.4s, v24.8h, v5.8h\n"
- "smlal v21.4s, v24.4h, v3.4h\n"
- "smlal2 v22.4s, v24.8h, v3.8h\n"
- "add x22, x22, x17\n"
- "tbz x7, #2, 69f\n"
- "ld1 { v26.s }[0], [x22], #0x4\n"
- "tbz x7, #1, 68f\n"
- "ld1 { v26.h }[2], [x22], #0x2\n"
- "tbz x7, #0, 71f\n"
- "ld1 { v26.b }[6], [x22]\n"
+ "ldr x21, [x12, #0xb0]\n"
+ "smlal v22.4s, v24.4h, v5.4h\n"
+ "smlal2 v21.4s, v24.8h, v5.8h\n"
+ "smlal v23.4s, v24.4h, v3.4h\n"
+ "smlal2 v18.4s, v24.8h, v3.8h\n"
+ "add x21, x21, x15\n"
+ "tbz x8, #2, 69f\n"
+ "ld1 { v26.s }[0], [x21], #0x4\n"
+ "tbz x8, #1, 68f\n"
+ "ld1 { v26.h }[2], [x21], #0x2\n"
+ "tbz x8, #0, 71f\n"
+ "ld1 { v26.b }[6], [x21]\n"
"b 71f\n"
"68:" // Oddments: Load (4, 3): Bit 2: Bit 1: Unset
- "tbz x7, #0, 71f\n"
- "ld1 { v26.b }[4], [x22]\n"
+ "tbz x8, #0, 71f\n"
+ "ld1 { v26.b }[4], [x21]\n"
"b 71f\n"
"69:" // Oddments: Load (4, 3): Bit 2: Unset
- "tbz x7, #1, 70f\n"
- "ld1 { v26.h }[0], [x22], #0x2\n"
- "tbz x7, #0, 71f\n"
- "ld1 { v26.b }[2], [x22]\n"
+ "tbz x8, #1, 70f\n"
+ "ld1 { v26.h }[0], [x21], #0x2\n"
+ "tbz x8, #0, 71f\n"
+ "ld1 { v26.b }[2], [x21]\n"
"b 71f\n"
"70:" // Oddments: Load (4, 3): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 71f\n"
- "ld1 { v26.b }[0], [x22]\n"
+ "tbz x8, #0, 71f\n"
+ "ld1 { v26.b }[0], [x21]\n"
"71:" // Oddments: Load (4, 3): Bit 2: End
"ssubl v26.8h, v26.8b, v12.8b\n"
- "ldr x21, [x15, #0xb8]\n"
- "smlal v21.4s, v26.4h, v7.4h\n"
- "smlal2 v22.4s, v26.8h, v7.8h\n"
- "add x21, x21, x17\n"
- "tbz x7, #2, 73f\n"
- "ld1 { v25.s }[0], [x21], #0x4\n"
- "tbz x7, #1, 72f\n"
- "ld1 { v25.h }[2], [x21], #0x2\n"
- "tbz x7, #0, 75f\n"
- "ld1 { v25.b }[6], [x21]\n"
+ "ldr x20, [x12, #0xb8]\n"
+ "smlal v23.4s, v26.4h, v7.4h\n"
+ "smlal2 v18.4s, v26.8h, v7.8h\n"
+ "add x20, x20, x15\n"
+ "tbz x8, #2, 73f\n"
+ "ld1 { v25.s }[0], [x20], #0x4\n"
+ "tbz x8, #1, 72f\n"
+ "ld1 { v25.h }[2], [x20], #0x2\n"
+ "tbz x8, #0, 75f\n"
+ "ld1 { v25.b }[6], [x20]\n"
"b 75f\n"
"72:" // Oddments: Load (4, 2): Bit 2: Bit 1: Unset
- "tbz x7, #0, 75f\n"
- "ld1 { v25.b }[4], [x21]\n"
+ "tbz x8, #0, 75f\n"
+ "ld1 { v25.b }[4], [x20]\n"
"b 75f\n"
"73:" // Oddments: Load (4, 2): Bit 2: Unset
- "tbz x7, #1, 74f\n"
- "ld1 { v25.h }[0], [x21], #0x2\n"
- "tbz x7, #0, 75f\n"
- "ld1 { v25.b }[2], [x21]\n"
+ "tbz x8, #1, 74f\n"
+ "ld1 { v25.h }[0], [x20], #0x2\n"
+ "tbz x8, #0, 75f\n"
+ "ld1 { v25.b }[2], [x20]\n"
"b 75f\n"
"74:" // Oddments: Load (4, 2): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 75f\n"
- "ld1 { v25.b }[0], [x21]\n"
+ "tbz x8, #0, 75f\n"
+ "ld1 { v25.b }[0], [x20]\n"
"75:" // Oddments: Load (4, 2): Bit 2: End
"ssubl v25.8h, v25.8b, v12.8b\n"
- "ldr x20, [x15, #0xc0]\n"
- "smlal v9.4s, v25.4h, v8.4h\n"
- "smlal2 v23.4s, v25.8h, v8.8h\n"
- "smlal v21.4s, v25.4h, v6.4h\n"
- "smlal2 v22.4s, v25.8h, v6.8h\n"
- "add x20, x20, x17\n"
- "tbz x7, #2, 77f\n"
- "ld1 { v29.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 76f\n"
- "ld1 { v29.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 79f\n"
- "ld1 { v29.b }[6], [x20]\n"
+ "ldr x19, [x12, #0xc0]\n"
+ "smlal v22.4s, v25.4h, v8.4h\n"
+ "smlal2 v21.4s, v25.8h, v8.8h\n"
+ "smlal v23.4s, v25.4h, v6.4h\n"
+ "smlal2 v18.4s, v25.8h, v6.8h\n"
+ "add x19, x19, x15\n"
+ "tbz x8, #2, 77f\n"
+ "ld1 { v29.s }[0], [x19], #0x4\n"
+ "tbz x8, #1, 76f\n"
+ "ld1 { v29.h }[2], [x19], #0x2\n"
+ "tbz x8, #0, 79f\n"
+ "ld1 { v29.b }[6], [x19]\n"
"b 79f\n"
"76:" // Oddments: Load (4, 4): Bit 2: Bit 1: Unset
- "tbz x7, #0, 79f\n"
- "ld1 { v29.b }[4], [x20]\n"
+ "tbz x8, #0, 79f\n"
+ "ld1 { v29.b }[4], [x19]\n"
"b 79f\n"
"77:" // Oddments: Load (4, 4): Bit 2: Unset
- "tbz x7, #1, 78f\n"
- "ld1 { v29.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 79f\n"
- "ld1 { v29.b }[2], [x20]\n"
+ "tbz x8, #1, 78f\n"
+ "ld1 { v29.h }[0], [x19], #0x2\n"
+ "tbz x8, #0, 79f\n"
+ "ld1 { v29.b }[2], [x19]\n"
"b 79f\n"
"78:" // Oddments: Load (4, 4): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 79f\n"
- "ld1 { v29.b }[0], [x20]\n"
+ "tbz x8, #0, 79f\n"
+ "ld1 { v29.b }[0], [x19]\n"
"79:" // Oddments: Load (4, 4): Bit 2: End
"ssubl v29.8h, v29.8b, v12.8b\n"
- "smlal v21.4s, v29.4h, v8.4h\n"
- "smlal2 v22.4s, v29.8h, v8.8h\n"
- "tbz x7, #2, 81f\n"
+ "smlal v23.4s, v29.4h, v8.4h\n"
+ "smlal2 v18.4s, v29.8h, v8.8h\n"
+ "tbz x8, #2, 81f\n"
"ld1 { v19.4s }, [x13], #0x10\n"
- "ld1 { v18.4s }, [x12], #0x10\n"
- "tbz x7, #1, 80f\n"
- "ld1 { v30.d }[0], [x13], #0x8\n"
- "ld1 { v31.d }[0], [x12], #0x8\n"
- "tbz x7, #0, 83f\n"
- "ld1 { v30.s }[2], [x13]\n"
- "ld1 { v31.s }[2], [x12]\n"
+ "ld1 { v0.4s }, [x11], #0x10\n"
+ "tbz x8, #1, 80f\n"
+ "ld1 { v4.d }[0], [x13], #0x8\n"
+ "ld1 { v31.d }[0], [x11], #0x8\n"
+ "tbz x8, #0, 83f\n"
+ "ld1 { v4.s }[2], [x13]\n"
+ "ld1 { v31.s }[2], [x11]\n"
"b 83f\n"
"80:" // Oddments: Load requant params: Bit 2: Bit 1: Unset
- "tbz x7, #0, 83f\n"
- "ld1 { v30.s }[0], [x13]\n"
- "ld1 { v31.s }[0], [x12]\n"
+ "tbz x8, #0, 83f\n"
+ "ld1 { v4.s }[0], [x13]\n"
+ "ld1 { v31.s }[0], [x11]\n"
"b 83f\n"
"81:" // Oddments: Load requant params: Bit 2: Unset
- "tbz x7, #1, 82f\n"
+ "tbz x8, #1, 82f\n"
"ld1 { v19.d }[0], [x13], #0x8\n"
- "ld1 { v18.d }[0], [x12], #0x8\n"
- "tbz x7, #0, 83f\n"
+ "ld1 { v0.d }[0], [x11], #0x8\n"
+ "tbz x8, #0, 83f\n"
"ld1 { v19.s }[2], [x13]\n"
- "ld1 { v18.s }[2], [x12]\n"
+ "ld1 { v0.s }[2], [x11]\n"
"b 83f\n"
"82:" // Oddments: Load requant params: Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 83f\n"
+ "tbz x8, #0, 83f\n"
"ld1 { v19.s }[0], [x13]\n"
- "ld1 { v18.s }[0], [x12]\n"
+ "ld1 { v0.s }[0], [x11]\n"
"83:" // Oddments: Load requant params: Bit 2: End
"sqrdmulh v15.4s, v15.4s, v19.4s\n"
- "and v0.16b, v15.16b, v18.16b\n"
- "add x11, x11, x16\n"
- "add x10, x10, x16\n"
- "sqrdmulh v17.4s, v17.4s, v30.4s\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "add x9, x9, x16\n"
- "add x28, x28, x16\n"
- "and v7.16b, v17.16b, v31.16b\n"
- "sqrdmulh v10.4s, v10.4s, v19.4s\n"
"sqrdmulh v9.4s, v9.4s, v19.4s\n"
- "sqrdmulh v21.4s, v21.4s, v19.4s\n"
- "sqadd v15.4s, v15.4s, v0.4s\n"
- "sshr v7.4s, v7.4s, #0x1f\n"
- "and v19.16b, v10.16b, v18.16b\n"
- "sqrdmulh v20.4s, v20.4s, v30.4s\n"
- "and v27.16b, v9.16b, v18.16b\n"
- "sqrdmulh v23.4s, v23.4s, v30.4s\n"
- "and v0.16b, v21.16b, v18.16b\n"
- "sqrdmulh v22.4s, v22.4s, v30.4s\n"
- "sqadd v17.4s, v17.4s, v7.4s\n"
+ "add x10, x10, x14\n"
+ "add x9, x9, x14\n"
+ "sqrdmulh v22.4s, v22.4s, v19.4s\n"
+ "sqrdmulh v23.4s, v23.4s, v19.4s\n"
+ "add x28, x28, x14\n"
+ "add x27, x27, x14\n"
+ "and v30.16b, v15.16b, v0.16b\n"
+ "sqrdmulh v10.4s, v10.4s, v4.4s\n"
+ "and v28.16b, v9.16b, v0.16b\n"
+ "sqrdmulh v16.4s, v16.4s, v4.4s\n"
+ "and v29.16b, v22.16b, v0.16b\n"
+ "sqrdmulh v21.4s, v21.4s, v4.4s\n"
+ "and v20.16b, v23.16b, v0.16b\n"
+ "sqrdmulh v18.4s, v18.4s, v4.4s\n"
+ "sshr v30.4s, v30.4s, #0x1f\n"
+ "and v19.16b, v10.16b, v31.16b\n"
+ "sshr v28.4s, v28.4s, #0x1f\n"
+ "and v4.16b, v16.16b, v31.16b\n"
+ "sshr v29.4s, v29.4s, #0x1f\n"
+ "and v5.16b, v21.16b, v31.16b\n"
+ "sshr v20.4s, v20.4s, #0x1f\n"
+ "and v26.16b, v18.16b, v31.16b\n"
+ "sqadd v15.4s, v15.4s, v30.4s\n"
"sshr v19.4s, v19.4s, #0x1f\n"
- "and v5.16b, v20.16b, v31.16b\n"
- "sshr v27.4s, v27.4s, #0x1f\n"
- "and v4.16b, v23.16b, v31.16b\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "and v7.16b, v22.16b, v31.16b\n"
- "sqadd v10.4s, v10.4s, v19.4s\n"
- "sshr v5.4s, v5.4s, #0x1f\n"
- "sqadd v9.4s, v9.4s, v27.4s\n"
+ "sqadd v9.4s, v9.4s, v28.4s\n"
"sshr v4.4s, v4.4s, #0x1f\n"
- "sqadd v21.4s, v21.4s, v0.4s\n"
- "sshr v7.4s, v7.4s, #0x1f\n"
- "srshl v15.4s, v15.4s, v18.4s\n"
- "srshl v10.4s, v10.4s, v18.4s\n"
- "sqadd v20.4s, v20.4s, v5.4s\n"
- "srshl v9.4s, v9.4s, v18.4s\n"
- "sqadd v23.4s, v23.4s, v4.4s\n"
- "srshl v21.4s, v21.4s, v18.4s\n"
- "sqadd v22.4s, v22.4s, v7.4s\n"
- "srshl v17.4s, v17.4s, v31.4s\n"
+ "sqadd v22.4s, v22.4s, v29.4s\n"
+ "sshr v5.4s, v5.4s, #0x1f\n"
+ "sqadd v23.4s, v23.4s, v20.4s\n"
+ "sshr v26.4s, v26.4s, #0x1f\n"
+ "srshl v15.4s, v15.4s, v0.4s\n"
+ "sqadd v10.4s, v10.4s, v19.4s\n"
+ "srshl v9.4s, v9.4s, v0.4s\n"
+ "sqadd v16.4s, v16.4s, v4.4s\n"
+ "srshl v22.4s, v22.4s, v0.4s\n"
+ "sqadd v21.4s, v21.4s, v5.4s\n"
+ "srshl v23.4s, v23.4s, v0.4s\n"
+ "sqadd v18.4s, v18.4s, v26.4s\n"
+ "srshl v10.4s, v10.4s, v31.4s\n"
"sqxtn v15.4h, v15.4s\n"
- "srshl v20.4s, v20.4s, v31.4s\n"
- "sqxtn v10.4h, v10.4s\n"
- "srshl v23.4s, v23.4s, v31.4s\n"
+ "srshl v16.4s, v16.4s, v31.4s\n"
"sqxtn v9.4h, v9.4s\n"
- "srshl v22.4s, v22.4s, v31.4s\n"
- "sqxtn v21.4h, v21.4s\n"
- "sqxtn2 v15.8h, v17.4s\n"
- "sqxtn2 v10.8h, v20.4s\n"
- "sqxtn2 v9.8h, v23.4s\n"
- "sqxtn2 v21.8h, v22.4s\n"
+ "srshl v21.4s, v21.4s, v31.4s\n"
+ "sqxtn v22.4h, v22.4s\n"
+ "srshl v18.4s, v18.4s, v31.4s\n"
+ "sqxtn v23.4h, v23.4s\n"
+ "sqxtn2 v15.8h, v10.4s\n"
+ "sqxtn2 v9.8h, v16.4s\n"
+ "sqxtn2 v22.8h, v21.4s\n"
+ "sqxtn2 v23.8h, v18.4s\n"
"sqadd v15.8h, v15.8h, v11.8h\n"
- "sqadd v10.8h, v10.8h, v11.8h\n"
"sqadd v9.8h, v9.8h, v11.8h\n"
- "sqadd v21.8h, v21.8h, v11.8h\n"
- "smax v15.8h, v15.8h, v16.8h\n"
- "smax v10.8h, v10.8h, v16.8h\n"
- "smax v9.8h, v9.8h, v16.8h\n"
- "smax v21.8h, v21.8h, v16.8h\n"
+ "sqadd v22.8h, v22.8h, v11.8h\n"
+ "sqadd v23.8h, v23.8h, v11.8h\n"
+ "smax v15.8h, v15.8h, v17.8h\n"
+ "smax v9.8h, v9.8h, v17.8h\n"
+ "smax v22.8h, v22.8h, v17.8h\n"
+ "smax v23.8h, v23.8h, v17.8h\n"
"smin v15.8h, v15.8h, v14.8h\n"
- "smin v10.8h, v10.8h, v14.8h\n"
"smin v9.8h, v9.8h, v14.8h\n"
- "smin v21.8h, v21.8h, v14.8h\n"
+ "smin v22.8h, v22.8h, v14.8h\n"
+ "smin v23.8h, v23.8h, v14.8h\n"
"uzp1 v15.16b, v15.16b, v15.16b\n"
- "uzp1 v10.16b, v10.16b, v10.16b\n"
"uzp1 v9.16b, v9.16b, v9.16b\n"
- "uzp1 v21.16b, v21.16b, v21.16b\n"
- "tbz x7, #2, 85f\n"
- "st1 { v15.s }[0], [x11], #0x4\n"
- "st1 { v10.s }[0], [x10], #0x4\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "uzp1 v23.16b, v23.16b, v23.16b\n"
+ "tbz x8, #2, 85f\n"
+ "st1 { v15.s }[0], [x10], #0x4\n"
"st1 { v9.s }[0], [x9], #0x4\n"
- "st1 { v21.s }[0], [x28], #0x4\n"
- "tbz x7, #1, 84f\n"
- "st1 { v15.h }[2], [x11], #0x2\n"
- "st1 { v10.h }[2], [x10], #0x2\n"
+ "st1 { v22.s }[0], [x28], #0x4\n"
+ "st1 { v23.s }[0], [x27], #0x4\n"
+ "tbz x8, #1, 84f\n"
+ "st1 { v15.h }[2], [x10], #0x2\n"
"st1 { v9.h }[2], [x9], #0x2\n"
- "st1 { v21.h }[2], [x28], #0x2\n"
- "tbz x7, #0, 87f\n"
- "st1 { v15.b }[6], [x11], #0x1\n"
- "st1 { v10.b }[6], [x10], #0x1\n"
+ "st1 { v22.h }[2], [x28], #0x2\n"
+ "st1 { v23.h }[2], [x27], #0x2\n"
+ "tbz x8, #0, 87f\n"
+ "st1 { v15.b }[6], [x10], #0x1\n"
"st1 { v9.b }[6], [x9], #0x1\n"
- "st1 { v21.b }[6], [x28], #0x1\n"
+ "st1 { v22.b }[6], [x28], #0x1\n"
+ "st1 { v23.b }[6], [x27], #0x1\n"
"b 87f\n"
"84:" // Oddments: Bit 2: Bit 1: Unset
- "tbz x7, #0, 87f\n"
- "st1 { v15.b }[4], [x11], #0x1\n"
- "st1 { v10.b }[4], [x10], #0x1\n"
+ "tbz x8, #0, 87f\n"
+ "st1 { v15.b }[4], [x10], #0x1\n"
"st1 { v9.b }[4], [x9], #0x1\n"
- "st1 { v21.b }[4], [x28], #0x1\n"
+ "st1 { v22.b }[4], [x28], #0x1\n"
+ "st1 { v23.b }[4], [x27], #0x1\n"
"b 87f\n"
"85:" // Oddments: Bit 2: Unset
- "tbz x7, #1, 86f\n"
- "st1 { v15.h }[0], [x11], #0x2\n"
- "st1 { v10.h }[0], [x10], #0x2\n"
+ "tbz x8, #1, 86f\n"
+ "st1 { v15.h }[0], [x10], #0x2\n"
"st1 { v9.h }[0], [x9], #0x2\n"
- "st1 { v21.h }[0], [x28], #0x2\n"
- "tbz x7, #0, 87f\n"
- "st1 { v15.b }[2], [x11], #0x1\n"
- "st1 { v10.b }[2], [x10], #0x1\n"
+ "st1 { v22.h }[0], [x28], #0x2\n"
+ "st1 { v23.h }[0], [x27], #0x2\n"
+ "tbz x8, #0, 87f\n"
+ "st1 { v15.b }[2], [x10], #0x1\n"
"st1 { v9.b }[2], [x9], #0x1\n"
- "st1 { v21.b }[2], [x28], #0x1\n"
+ "st1 { v22.b }[2], [x28], #0x1\n"
+ "st1 { v23.b }[2], [x27], #0x1\n"
"b 87f\n"
"86:" // Oddments: Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 87f\n"
- "st1 { v15.b }[0], [x11], #0x1\n"
- "st1 { v10.b }[0], [x10], #0x1\n"
+ "tbz x8, #0, 87f\n"
+ "st1 { v15.b }[0], [x10], #0x1\n"
"st1 { v9.b }[0], [x9], #0x1\n"
- "st1 { v21.b }[0], [x28], #0x1\n"
+ "st1 { v22.b }[0], [x28], #0x1\n"
+ "st1 { v23.b }[0], [x27], #0x1\n"
"87:" // Oddments: Bit 2: End
"88:" // End
:
: [offsetof_Params_bias] "I" (offsetof(Params, bias)), [offsetof_Params_inptrs] "I" (offsetof(Params, inptrs)), [offsetof_Params_n_channels] "I" (offsetof(Params, n_channels)), [offsetof_Params_outptrs] "I" (offsetof(Params, outptrs)), [offsetof_Params_requant] "I" (offsetof(Params, requant)), [offsetof_Params_requant_muls] "I" (offsetof(Params, requant_muls)), [offsetof_Params_requant_shifts] "I" (offsetof(Params, requant_shifts)), [offsetof_Params_weights] "I" (offsetof(Params, weights)), [offsetof_Requantize32_a_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [params] "r" (&params)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp
index 4b0ad00187..663ea59a98 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -111,2073 +111,2073 @@ void a64_s8q_nhwc_5x5_s1_output2x2_mla_depthfirst_impl(
requant_muls, requant_shifts, outptrs);
__asm__ __volatile__(
- "ldr x1, [%x[params], %[offsetof_Params_n_channels]]\n"
- "ldr x13, [%x[params], %[offsetof_Params_requant]]\n"
- "lsr x2, x1, #0x3\n"
- "add x3, x13, %[offsetof_Requantize32_a_offset]\n"
- "ld1r { v9.16b }, [x3]\n"
- "ldr x24, [%x[params], %[offsetof_Params_outptrs]]\n"
- "add x11, x13, %[offsetof_Requantize32_b_offset]\n"
- "add x5, x13, %[offsetof_Requantize32_c_offset]\n"
- "ld1r { v15.16b }, [x11]\n"
- "ld1r { v14.8h }, [x5]\n"
- "add x3, x13, %[offsetof_Requantize32_minval]\n"
- "add x15, x13, %[offsetof_Requantize32_maxval]\n"
- "ld1r { v12.8h }, [x3]\n"
- "ld1r { v11.8h }, [x15]\n"
- "mov x0, #0x0\n"
- "mov x10, #0x0\n"
- "add x4, %x[params], %[offsetof_Params_inptrs]\n"
- "ldr x3, [%x[params], %[offsetof_Params_weights]]\n"
- "ldr x5, [%x[params], %[offsetof_Params_requant_muls]]\n"
- "ldr x8, [%x[params], %[offsetof_Params_requant_shifts]]\n"
- "ldp x17, x6, [x24, #0x0]\n"
- "ldp x7, x16, [x24, #0x10]\n"
- "cbz x2, 3f\n"
- "ldr d0, [x3, #0x0]\n"
- "ldr d1, [x3, #0x8]\n"
- "subs x2, x2, #0x1\n"
- "ssubl v0.8h, v0.8b, v15.8b\n"
- "ldr d2, [x3, #0x10]\n"
- "ldr d3, [x3, #0x18]\n"
- "ssubl v1.8h, v1.8b, v15.8b\n"
- "ssubl v2.8h, v2.8b, v15.8b\n"
- "ldr d4, [x3, #0x20]\n"
- "ldr x13, [%x[params], %[offsetof_Params_bias]]\n"
- "ssubl v3.8h, v3.8b, v15.8b\n"
- "ssubl v4.8h, v4.8b, v15.8b\n"
- "ldr q13, [x13, #0x0]\n"
- "ldr q19, [x13, #0x10]\n"
- "add x13, x13, #0x20\n"
- "str x13, [%x[params], %[offsetof_Params_bias]]\n"
- "ldp x9, x28, [x4, #0x0]\n"
- "ldp x27, x26, [x4, #0x10]\n"
- "mov v20.16b, v13.16b\n"
- "mov v10.16b, v19.16b\n"
- "ldp x25, x24, [x4, #0x20]\n"
- "ldp x23, x22, [x4, #0x30]\n"
- "mov v8.16b, v13.16b\n"
- "mov v7.16b, v19.16b\n"
- "ldp x21, x20, [x4, #0x40]\n"
- "ldr d31, [x9, x0]\n"
- "mov v17.16b, v13.16b\n"
- "mov v21.16b, v19.16b\n"
- "ldr d30, [x28, x0]\n"
- "ldr d29, [x27, x0]\n"
+ "ldr x10, [%x[params], %[offsetof_Params_requant]]\n"
+ "ldr x0, [%x[params], %[offsetof_Params_n_channels]]\n"
+ "add x17, x10, %[offsetof_Requantize32_a_offset]\n"
+ "add x9, x10, %[offsetof_Requantize32_b_offset]\n"
+ "ldr x25, [%x[params], %[offsetof_Params_outptrs]]\n"
+ "add x4, x10, %[offsetof_Requantize32_c_offset]\n"
+ "add x14, x10, %[offsetof_Requantize32_minval]\n"
+ "ldr x23, [%x[params], %[offsetof_Params_weights]]\n"
+ "add x5, x10, %[offsetof_Requantize32_maxval]\n"
+ "ld1r { v9.16b }, [x17]\n"
+ "ld1r { v14.16b }, [x9]\n"
+ "lsr x3, x0, #0x3\n"
+ "ld1r { v18.8h }, [x4]\n"
+ "ld1r { v11.8h }, [x14]\n"
+ "mov x24, #0x0\n"
+ "mov x22, #0x0\n"
+ "ld1r { v13.8h }, [x5]\n"
+ "ldr x10, [%x[params], %[offsetof_Params_requant_muls]]\n"
+ "add x20, %x[params], %[offsetof_Params_inptrs]\n"
+ "ldr x1, [%x[params], %[offsetof_Params_requant_shifts]]\n"
+ "ldp x16, x8, [x25, #0x0]\n"
+ "ldp x4, x7, [x25, #0x10]\n"
+ "cbz x3, 3f\n"
+ "ldr x19, [%x[params], %[offsetof_Params_bias]]\n"
+ "ldr q15, [x19, #0x0]\n"
+ "subs x3, x3, #0x1\n"
+ "mov v17.16b, v15.16b\n"
+ "ldr q16, [x19, #0x10]\n"
+ "add x19, x19, #0x20\n"
+ "str x19, [%x[params], %[offsetof_Params_bias]]\n"
+ "ldr d0, [x23, #0x0]\n"
+ "ldr d1, [x23, #0x8]\n"
+ "ldr d2, [x23, #0x10]\n"
+ "mov v8.16b, v16.16b\n"
+ "mov v10.16b, v15.16b\n"
+ "ldr d3, [x23, #0x18]\n"
+ "ldr d4, [x23, #0x20]\n"
+ "mov v7.16b, v16.16b\n"
+ "mov v6.16b, v15.16b\n"
+ "ldp x28, x6, [x20, #0x0]\n"
+ "ldp x26, x25, [x20, #0x10]\n"
+ "mov v5.16b, v16.16b\n"
+ "ssubl v0.8h, v0.8b, v14.8b\n"
+ "ldp x5, x2, [x20, #0x20]\n"
+ "ldp x27, x21, [x20, #0x30]\n"
+ "ssubl v1.8h, v1.8b, v14.8b\n"
+ "ssubl v2.8h, v2.8b, v14.8b\n"
+ "ldp x12, x19, [x20, #0x40]\n"
+ "ldr d31, [x28, x24]\n"
+ "ssubl v3.8h, v3.8b, v14.8b\n"
+ "ssubl v4.8h, v4.8b, v14.8b\n"
+ "ldr d30, [x6, x24]\n"
+ "ldr d29, [x26, x24]\n"
"ssubl v31.8h, v31.8b, v9.8b\n"
"ssubl v30.8h, v30.8b, v9.8b\n"
- "ldr d28, [x26, x0]\n"
- "ldr d27, [x25, x0]\n"
+ "ldr d28, [x25, x24]\n"
+ "ldr d27, [x5, x24]\n"
"ssubl v29.8h, v29.8b, v9.8b\n"
"ssubl v28.8h, v28.8b, v9.8b\n"
- "ldr d23, [x24, x0]\n"
- "ldr d25, [x23, x0]\n"
+ "ldr d23, [x2, x24]\n"
+ "ldr d25, [x27, x24]\n"
"ssubl v27.8h, v27.8b, v9.8b\n"
"ssubl v23.8h, v23.8b, v9.8b\n"
- "ldr d24, [x22, x0]\n"
- "ldr d26, [x21, x0]\n"
+ "ldr d24, [x21, x24]\n"
+ "ldr d26, [x12, x24]\n"
"ssubl v25.8h, v25.8b, v9.8b\n"
"ssubl v24.8h, v24.8b, v9.8b\n"
- "ldr d22, [x20, x0]\n"
+ "ldr d22, [x19, x24]\n"
"ssubl v26.8h, v26.8b, v9.8b\n"
"ssubl v22.8h, v22.8b, v9.8b\n"
"beq 2f\n"
"1:" // Loop
- "ldr q18, [x5, #0x0]\n"
- "ldr q6, [x8, #0x0]\n"
- "smlal v13.4s, v31.4h, v0.4h\n"
- "smlal2 v19.4s, v31.8h, v0.8h\n"
- "ldr q5, [x5, #0x10]\n"
- "smlal v13.4s, v30.4h, v1.4h\n"
- "ldr x20, [x4, #0x50]\n"
- "smlal v20.4s, v30.4h, v0.4h\n"
- "smlal v8.4s, v29.4h, v0.4h\n"
- "smlal v17.4s, v28.4h, v0.4h\n"
- "ldr x22, [x4, #0x58]\n"
- "ldr x21, [x4, #0x60]\n"
- "smlal2 v19.4s, v30.8h, v1.8h\n"
- "smlal2 v10.4s, v30.8h, v0.8h\n"
- "ldr d31, [x20, x0]\n"
+ "smlal v15.4s, v31.4h, v0.4h\n"
+ "smlal2 v16.4s, v31.8h, v0.8h\n"
+ "ldr x19, [x20, #0x50]\n"
+ "ldr d31, [x19, x24]\n"
+ "smlal v17.4s, v30.4h, v0.4h\n"
+ "smlal v10.4s, v29.4h, v0.4h\n"
+ "ldr x15, [x20, #0x58]\n"
"ssubl v31.8h, v31.8b, v9.8b\n"
+ "smlal v6.4s, v28.4h, v0.4h\n"
+ "smlal2 v8.4s, v30.8h, v0.8h\n"
+ "ldr x19, [x20, #0x60]\n"
+ "ldr x27, [x20, #0x68]\n"
"smlal2 v7.4s, v29.8h, v0.8h\n"
- "smlal v13.4s, v27.4h, v2.4h\n"
- "ldr x20, [x4, #0x68]\n"
- "ldr x26, [x4, #0x70]\n"
- "smlal2 v21.4s, v28.8h, v0.8h\n"
- "ldr d30, [x22, x0]\n"
- "smlal v20.4s, v27.4h, v1.4h\n"
+ "smlal v15.4s, v30.4h, v1.4h\n"
+ "ldr x5, [x20, #0x70]\n"
+ "ldr x11, [x20, #0x78]\n"
+ "smlal2 v16.4s, v30.8h, v1.8h\n"
+ "smlal2 v5.4s, v28.8h, v0.8h\n"
+ "ldr d30, [x15, x24]\n"
"ssubl v30.8h, v30.8b, v9.8b\n"
- "smlal v8.4s, v28.4h, v1.4h\n"
- "smlal v17.4s, v23.4h, v1.4h\n"
- "ldr x25, [x4, #0x78]\n"
- "ldr x23, [x4, #0x80]\n"
- "smlal2 v19.4s, v27.8h, v2.8h\n"
- "smlal2 v10.4s, v27.8h, v1.8h\n"
- "ldr d0, [x3, #0x28]\n"
- "ssubl v0.8h, v0.8b, v15.8b\n"
+ "smlal v17.4s, v27.4h, v1.4h\n"
+ "smlal v10.4s, v28.4h, v1.4h\n"
+ "ldr d0, [x23, #0x28]\n"
+ "ssubl v0.8h, v0.8b, v14.8b\n"
+ "smlal v6.4s, v23.4h, v1.4h\n"
+ "smlal2 v8.4s, v27.8h, v1.8h\n"
+ "ldr x12, [x20, #0x80]\n"
+ "ldr x26, [x20, #0x88]\n"
"smlal2 v7.4s, v28.8h, v1.8h\n"
- "smlal v13.4s, v25.4h, v3.4h\n"
- "ldr x24, [x4, #0x88]\n"
- "ldr x15, [x4, #0x90]\n"
- "smlal2 v21.4s, v23.8h, v1.8h\n"
- "ldr d27, [x21, x0]\n"
- "smlal v20.4s, v25.4h, v2.4h\n"
+ "smlal v15.4s, v27.4h, v2.4h\n"
+ "ldr x14, [x20, #0x90]\n"
+ "ldr x15, [x20, #0x98]\n"
+ "smlal2 v16.4s, v27.8h, v2.8h\n"
+ "smlal2 v5.4s, v23.8h, v1.8h\n"
+ "ldr d27, [x19, x24]\n"
"ssubl v27.8h, v27.8b, v9.8b\n"
- "smlal v8.4s, v23.4h, v2.4h\n"
- "smlal v17.4s, v31.4h, v2.4h\n"
- "ldr x21, [x4, #0x98]\n"
- "ldr x14, [x4, #0xa0]\n"
- "smlal2 v19.4s, v25.8h, v3.8h\n"
- "smlal2 v10.4s, v25.8h, v2.8h\n"
- "ldr d1, [x3, #0x30]\n"
- "ssubl v1.8h, v1.8b, v15.8b\n"
+ "smlal v17.4s, v25.4h, v2.4h\n"
+ "smlal v10.4s, v23.4h, v2.4h\n"
+ "ldr d1, [x23, #0x30]\n"
+ "ssubl v1.8h, v1.8b, v14.8b\n"
+ "smlal v6.4s, v31.4h, v2.4h\n"
+ "smlal2 v8.4s, v25.8h, v2.8h\n"
+ "ldr x21, [x20, #0xa0]\n"
+ "ldr x2, [x20, #0xa8]\n"
"smlal2 v7.4s, v23.8h, v2.8h\n"
- "smlal v13.4s, v24.4h, v4.4h\n"
- "ldr x13, [x4, #0xa8]\n"
- "ldr x12, [x4, #0xb0]\n"
- "smlal2 v21.4s, v31.8h, v2.8h\n"
- "ldr d25, [x20, x0]\n"
- "smlal v20.4s, v24.4h, v3.4h\n"
+ "smlal v15.4s, v25.4h, v3.4h\n"
+ "ldr x13, [x20, #0xb0]\n"
+ "ldr x9, [x20, #0xb8]\n"
+ "smlal2 v16.4s, v25.8h, v3.8h\n"
+ "smlal2 v5.4s, v31.8h, v2.8h\n"
+ "ldr d25, [x27, x24]\n"
"ssubl v25.8h, v25.8b, v9.8b\n"
- "smlal v8.4s, v31.4h, v3.4h\n"
- "smlal v17.4s, v30.4h, v3.4h\n"
- "ldr x20, [x4, #0xb8]\n"
- "ldr x11, [x4, #0xc0]\n"
- "smlal2 v19.4s, v24.8h, v4.8h\n"
- "smlal2 v10.4s, v24.8h, v3.8h\n"
- "ldr d2, [x3, #0x38]\n"
- "ssubl v2.8h, v2.8b, v15.8b\n"
+ "smlal v17.4s, v24.4h, v3.4h\n"
+ "smlal v10.4s, v31.4h, v3.4h\n"
+ "ldr d2, [x23, #0x38]\n"
+ "ssubl v2.8h, v2.8b, v14.8b\n"
+ "smlal v6.4s, v30.4h, v3.4h\n"
+ "smlal2 v8.4s, v24.8h, v3.8h\n"
+ "ldr x19, [x20, #0xc0]\n"
+ "ldr x28, [x20, #0xc8]\n"
"smlal2 v7.4s, v31.8h, v3.8h\n"
- "smlal v13.4s, v29.4h, v0.4h\n"
- "ldr x22, [x4, #0xc8]\n"
- "ldr x9, [x4, #0xd0]\n"
- "smlal2 v21.4s, v30.8h, v3.8h\n"
- "ldr d24, [x26, x0]\n"
- "smlal v20.4s, v27.4h, v4.4h\n"
+ "smlal v15.4s, v24.4h, v4.4h\n"
+ "ldr x6, [x20, #0xd0]\n"
+ "ldr x27, [x20, #0xd8]\n"
+ "smlal2 v16.4s, v24.8h, v4.8h\n"
+ "smlal2 v5.4s, v30.8h, v3.8h\n"
+ "ldr d24, [x5, x24]\n"
"ssubl v24.8h, v24.8b, v9.8b\n"
- "smlal v8.4s, v30.4h, v4.4h\n"
- "smlal v17.4s, v26.4h, v4.4h\n"
- "ldr x28, [x4, #0xd8]\n"
- "ldr x27, [x4, #0xe0]\n"
- "smlal2 v19.4s, v29.8h, v0.8h\n"
- "ldr d3, [x3, #0x40]\n"
- "smlal2 v10.4s, v27.8h, v4.8h\n"
- "ldr d27, [x25, x0]\n"
- "smlal2 v7.4s, v30.8h, v4.8h\n"
- "smlal v13.4s, v28.4h, v1.4h\n"
- "ssubl v3.8h, v3.8b, v15.8b\n"
- "ldr x26, [x4, #0xe8]\n"
- "smlal2 v21.4s, v26.8h, v4.8h\n"
- "ldr d4, [x3, #0x48]\n"
- "smlal v20.4s, v28.4h, v0.4h\n"
+ "smlal v17.4s, v27.4h, v4.4h\n"
+ "smlal v10.4s, v30.4h, v4.4h\n"
+ "ldr d3, [x23, #0x40]\n"
+ "ssubl v3.8h, v3.8b, v14.8b\n"
+ "smlal v6.4s, v26.4h, v4.4h\n"
+ "smlal2 v8.4s, v27.8h, v4.8h\n"
+ "ldr d27, [x11, x24]\n"
"ssubl v27.8h, v27.8b, v9.8b\n"
- "smlal v8.4s, v22.4h, v0.4h\n"
- "smlal v17.4s, v25.4h, v0.4h\n"
- "ssubl v4.8h, v4.8b, v15.8b\n"
- "subs x2, x2, #0x1\n"
- "smlal2 v19.4s, v28.8h, v1.8h\n"
- "smlal2 v10.4s, v28.8h, v0.8h\n"
- "ldr d28, [x24, x0]\n"
- "ssubl v28.8h, v28.8b, v9.8b\n"
+ "smlal2 v7.4s, v30.8h, v4.8h\n"
+ "smlal v15.4s, v29.4h, v0.4h\n"
+ "ldr x11, [x20, #0xe0]\n"
+ "ldr x17, [x20, #0xe8]\n"
+ "smlal2 v16.4s, v29.8h, v0.8h\n"
+ "smlal2 v5.4s, v26.8h, v4.8h\n"
+ "ldr d4, [x23, #0x48]\n"
+ "ssubl v4.8h, v4.8b, v14.8b\n"
+ "smlal v17.4s, v28.4h, v0.4h\n"
+ "smlal v10.4s, v22.4h, v0.4h\n"
+ "ldr x5, [x20, #0xf0]\n"
+ "ldr q12, [x10, #0x0]\n"
+ "smlal v6.4s, v25.4h, v0.4h\n"
+ "smlal2 v8.4s, v28.8h, v0.8h\n"
+ "ldr q19, [x1, #0x0]\n"
+ "ldr q20, [x10, #0x10]\n"
"smlal2 v7.4s, v22.8h, v0.8h\n"
- "smlal v13.4s, v23.4h, v2.4h\n"
- "ldr x25, [x4, #0xf0]\n"
- "add x5, x5, #0x20\n"
- "smlal2 v21.4s, v25.8h, v0.8h\n"
- "ldr d0, [x3, #0x50]\n"
- "smlal v20.4s, v23.4h, v1.4h\n"
- "ssubl v0.8h, v0.8b, v15.8b\n"
- "smlal v8.4s, v25.4h, v1.4h\n"
- "smlal v17.4s, v24.4h, v1.4h\n"
- "smlal2 v19.4s, v23.8h, v2.8h\n"
- "smlal2 v10.4s, v23.8h, v1.8h\n"
- "ldr d23, [x23, x0]\n"
- "ssubl v23.8h, v23.8b, v9.8b\n"
+ "smlal v15.4s, v28.4h, v1.4h\n"
+ "ldr q29, [x1, #0x10]\n"
+ "subs x3, x3, #0x1\n"
+ "smlal2 v16.4s, v28.8h, v1.8h\n"
+ "smlal2 v5.4s, v25.8h, v0.8h\n"
+ "ldr d28, [x26, x24]\n"
+ "ldr d0, [x23, #0x50]\n"
+ "smlal v17.4s, v23.4h, v1.4h\n"
+ "smlal v10.4s, v25.4h, v1.4h\n"
+ "ssubl v28.8h, v28.8b, v9.8b\n"
+ "ldr x25, [x20, #0xf8]\n"
+ "smlal v6.4s, v24.4h, v1.4h\n"
+ "smlal2 v8.4s, v23.8h, v1.8h\n"
+ "ssubl v0.8h, v0.8b, v14.8b\n"
+ "add x10, x10, #0x20\n"
"smlal2 v7.4s, v25.8h, v1.8h\n"
- "smlal v13.4s, v31.4h, v3.4h\n"
- "ldr x24, [x4, #0xf8]\n"
- "smlal2 v21.4s, v24.8h, v1.8h\n"
- "ldr d1, [x3, #0x58]\n"
- "smlal v20.4s, v31.4h, v2.4h\n"
- "ssubl v1.8h, v1.8b, v15.8b\n"
- "smlal v8.4s, v24.4h, v2.4h\n"
- "smlal v17.4s, v27.4h, v2.4h\n"
- "smlal2 v19.4s, v31.8h, v3.8h\n"
- "smlal2 v10.4s, v31.8h, v2.8h\n"
- "ldr d31, [x15, x0]\n"
- "ssubl v31.8h, v31.8b, v9.8b\n"
+ "smlal v15.4s, v23.4h, v2.4h\n"
+ "add x1, x1, #0x20\n"
+ "smlal2 v16.4s, v23.8h, v2.8h\n"
+ "ldr d23, [x12, x24]\n"
+ "smlal2 v5.4s, v24.8h, v1.8h\n"
+ "ssubl v23.8h, v23.8b, v9.8b\n"
+ "smlal v17.4s, v31.4h, v2.4h\n"
+ "smlal v10.4s, v24.4h, v2.4h\n"
+ "ldr d1, [x23, #0x58]\n"
+ "ssubl v1.8h, v1.8b, v14.8b\n"
+ "smlal v6.4s, v27.4h, v2.4h\n"
+ "smlal2 v8.4s, v31.8h, v2.8h\n"
+ "ldr x26, [x20, #0x100]\n"
"smlal2 v7.4s, v24.8h, v2.8h\n"
- "smlal v13.4s, v30.4h, v4.4h\n"
- "ldr x23, [x4, #0x100]\n"
- "smlal2 v21.4s, v27.8h, v2.8h\n"
- "ldr d2, [x3, #0x60]\n"
- "smlal v20.4s, v30.4h, v3.4h\n"
- "ssubl v2.8h, v2.8b, v15.8b\n"
- "smlal v8.4s, v27.4h, v3.4h\n"
- "smlal v17.4s, v23.4h, v3.4h\n"
- "smlal2 v19.4s, v30.8h, v4.8h\n"
- "smlal2 v10.4s, v30.8h, v3.8h\n"
- "ldr d30, [x21, x0]\n"
- "ssubl v30.8h, v30.8b, v9.8b\n"
+ "smlal v15.4s, v31.4h, v3.4h\n"
+ "smlal2 v16.4s, v31.8h, v3.8h\n"
+ "smlal2 v5.4s, v27.8h, v2.8h\n"
+ "ldr d31, [x14, x24]\n"
+ "ssubl v31.8h, v31.8b, v9.8b\n"
+ "smlal v17.4s, v30.4h, v3.4h\n"
+ "smlal v10.4s, v27.4h, v3.4h\n"
+ "ldr d2, [x23, #0x60]\n"
+ "ssubl v2.8h, v2.8b, v14.8b\n"
+ "smlal v6.4s, v23.4h, v3.4h\n"
+ "smlal2 v8.4s, v30.8h, v3.8h\n"
+ "ldr x12, [x20, #0x108]\n"
"smlal2 v7.4s, v27.8h, v3.8h\n"
- "smlal v13.4s, v22.4h, v0.4h\n"
- "ldr x15, [x4, #0x108]\n"
- "smlal2 v21.4s, v23.8h, v3.8h\n"
- "ldr d3, [x3, #0x68]\n"
- "smlal v20.4s, v26.4h, v4.4h\n"
- "ssubl v3.8h, v3.8b, v15.8b\n"
- "smlal v8.4s, v23.4h, v4.4h\n"
- "smlal v17.4s, v28.4h, v4.4h\n"
- "smlal2 v19.4s, v22.8h, v0.8h\n"
- "ldr d22, [x20, x0]\n"
- "smlal2 v10.4s, v26.8h, v4.8h\n"
- "ldr d26, [x14, x0]\n"
- "smlal2 v7.4s, v23.8h, v4.8h\n"
- "smlal v13.4s, v25.4h, v1.4h\n"
+ "smlal v15.4s, v30.4h, v4.4h\n"
+ "smlal2 v16.4s, v30.8h, v4.8h\n"
+ "ldr d30, [x15, x24]\n"
+ "smlal2 v5.4s, v23.8h, v3.8h\n"
+ "ssubl v30.8h, v30.8b, v9.8b\n"
+ "smlal v17.4s, v26.4h, v4.4h\n"
+ "smlal v10.4s, v23.4h, v4.4h\n"
+ "ldr d3, [x23, #0x68]\n"
+ "ssubl v3.8h, v3.8b, v14.8b\n"
+ "smlal v6.4s, v28.4h, v4.4h\n"
+ "smlal2 v8.4s, v26.8h, v4.8h\n"
+ "ldr d26, [x21, x24]\n"
"ssubl v26.8h, v26.8b, v9.8b\n"
- "ldr x21, [x4, #0x110]\n"
- "smlal2 v21.4s, v28.8h, v4.8h\n"
- "ldr d4, [x3, #0x70]\n"
- "smlal v20.4s, v25.4h, v0.4h\n"
- "ssubl v4.8h, v4.8b, v15.8b\n"
- "smlal v8.4s, v31.4h, v0.4h\n"
- "smlal v17.4s, v30.4h, v0.4h\n"
+ "smlal2 v7.4s, v23.8h, v4.8h\n"
+ "smlal v15.4s, v22.4h, v0.4h\n"
+ "ldr x14, [x20, #0x110]\n"
+ "ldr x21, [x20, #0x118]\n"
+ "smlal2 v16.4s, v22.8h, v0.8h\n"
+ "smlal2 v5.4s, v28.8h, v4.8h\n"
+ "ldr d4, [x23, #0x70]\n"
+ "ldr d22, [x9, x24]\n"
+ "smlal v17.4s, v25.4h, v0.4h\n"
+ "smlal v10.4s, v31.4h, v0.4h\n"
+ "ssubl v4.8h, v4.8b, v14.8b\n"
+ "smlal v6.4s, v30.4h, v0.4h\n"
+ "smlal2 v8.4s, v25.8h, v0.8h\n"
"ssubl v22.8h, v22.8b, v9.8b\n"
- "ldr x20, [x4, #0x118]\n"
- "smlal2 v19.4s, v25.8h, v1.8h\n"
- "smlal2 v10.4s, v25.8h, v0.8h\n"
- "ldr d25, [x13, x0]\n"
- "ssubl v25.8h, v25.8b, v9.8b\n"
"smlal2 v7.4s, v31.8h, v0.8h\n"
- "smlal v13.4s, v24.4h, v2.4h\n"
- "ldr x13, [%x[params], %[offsetof_Params_bias]]\n"
- "smlal2 v21.4s, v30.8h, v0.8h\n"
- "ldr d0, [x3, #0x78]\n"
- "smlal v20.4s, v24.4h, v1.4h\n"
- "ssubl v0.8h, v0.8b, v15.8b\n"
- "smlal v8.4s, v30.4h, v1.4h\n"
- "smlal v17.4s, v26.4h, v1.4h\n"
- "smlal2 v19.4s, v24.8h, v2.8h\n"
- "smlal2 v10.4s, v24.8h, v1.8h\n"
- "ldr d24, [x12, x0]\n"
- "ssubl v24.8h, v24.8b, v9.8b\n"
+ "smlal v15.4s, v25.4h, v1.4h\n"
+ "smlal2 v16.4s, v25.8h, v1.8h\n"
+ "ldr d25, [x2, x24]\n"
+ "smlal2 v5.4s, v30.8h, v0.8h\n"
+ "ssubl v25.8h, v25.8b, v9.8b\n"
+ "smlal v17.4s, v24.4h, v1.4h\n"
+ "smlal v10.4s, v30.4h, v1.4h\n"
+ "ldr d0, [x23, #0x78]\n"
+ "ssubl v0.8h, v0.8b, v14.8b\n"
+ "smlal v6.4s, v26.4h, v1.4h\n"
+ "smlal2 v8.4s, v24.8h, v1.8h\n"
"smlal2 v7.4s, v30.8h, v1.8h\n"
- "smlal v13.4s, v27.4h, v3.4h\n"
- "smlal2 v21.4s, v26.8h, v1.8h\n"
- "ldr d1, [x3, #0x80]\n"
- "smlal v20.4s, v27.4h, v2.4h\n"
- "ssubl v1.8h, v1.8b, v15.8b\n"
- "smlal v8.4s, v26.4h, v2.4h\n"
- "smlal v17.4s, v25.4h, v2.4h\n"
- "smlal2 v19.4s, v27.8h, v3.8h\n"
- "smlal2 v10.4s, v27.8h, v2.8h\n"
- "ldr d27, [x11, x0]\n"
- "ssubl v27.8h, v27.8b, v9.8b\n"
+ "smlal v15.4s, v24.4h, v2.4h\n"
+ "smlal2 v16.4s, v24.8h, v2.8h\n"
+ "ldr d24, [x13, x24]\n"
+ "smlal2 v5.4s, v26.8h, v1.8h\n"
+ "ssubl v24.8h, v24.8b, v9.8b\n"
+ "smlal v17.4s, v27.4h, v2.4h\n"
+ "smlal v10.4s, v26.4h, v2.4h\n"
+ "ldr d1, [x23, #0x80]\n"
+ "ssubl v1.8h, v1.8b, v14.8b\n"
+ "smlal v6.4s, v25.4h, v2.4h\n"
+ "smlal2 v8.4s, v27.8h, v2.8h\n"
"smlal2 v7.4s, v26.8h, v2.8h\n"
- "smlal v13.4s, v23.4h, v4.4h\n"
- "smlal2 v21.4s, v25.8h, v2.8h\n"
- "ldr d2, [x3, #0x88]\n"
- "smlal v20.4s, v23.4h, v3.4h\n"
- "ssubl v2.8h, v2.8b, v15.8b\n"
- "smlal v8.4s, v25.4h, v3.4h\n"
- "smlal v17.4s, v24.4h, v3.4h\n"
- "smlal2 v19.4s, v23.8h, v4.8h\n"
- "smlal2 v10.4s, v23.8h, v3.8h\n"
- "ldr d23, [x22, x0]\n"
- "ssubl v23.8h, v23.8b, v9.8b\n"
+ "smlal v15.4s, v27.4h, v3.4h\n"
+ "smlal2 v16.4s, v27.8h, v3.8h\n"
+ "smlal2 v5.4s, v25.8h, v2.8h\n"
+ "ldr d27, [x19, x24]\n"
+ "ssubl v27.8h, v27.8b, v9.8b\n"
+ "smlal v17.4s, v23.4h, v3.4h\n"
+ "smlal v10.4s, v25.4h, v3.4h\n"
+ "ldr d2, [x23, #0x88]\n"
+ "ssubl v2.8h, v2.8b, v14.8b\n"
+ "smlal v6.4s, v24.4h, v3.4h\n"
+ "smlal2 v8.4s, v23.8h, v3.8h\n"
"smlal2 v7.4s, v25.8h, v3.8h\n"
- "smlal v13.4s, v31.4h, v0.4h\n"
- "smlal2 v21.4s, v24.8h, v3.8h\n"
- "ldr d3, [x3, #0x90]\n"
- "smlal v20.4s, v28.4h, v4.4h\n"
- "ssubl v3.8h, v3.8b, v15.8b\n"
- "smlal v8.4s, v24.4h, v4.4h\n"
- "smlal v17.4s, v22.4h, v4.4h\n"
- "smlal2 v19.4s, v31.8h, v0.8h\n"
- "ldr d31, [x9, x0]\n"
- "smlal2 v10.4s, v28.8h, v4.8h\n"
- "ldr d28, [x27, x0]\n"
+ "smlal v15.4s, v23.4h, v4.4h\n"
+ "smlal2 v16.4s, v23.8h, v4.8h\n"
+ "ldr d23, [x28, x24]\n"
+ "smlal2 v5.4s, v24.8h, v3.8h\n"
+ "ssubl v23.8h, v23.8b, v9.8b\n"
+ "smlal v17.4s, v28.4h, v4.4h\n"
+ "smlal v10.4s, v24.4h, v4.4h\n"
+ "ldr d3, [x23, #0x90]\n"
+ "ssubl v3.8h, v3.8b, v14.8b\n"
+ "smlal v6.4s, v22.4h, v4.4h\n"
+ "smlal2 v8.4s, v28.8h, v4.8h\n"
+ "ldr d28, [x11, x24]\n"
+ "ssubl v28.8h, v28.8b, v9.8b\n"
"smlal2 v7.4s, v24.8h, v4.8h\n"
- "smlal v13.4s, v30.4h, v1.4h\n"
+ "smlal v15.4s, v31.4h, v0.4h\n"
+ "smlal2 v16.4s, v31.8h, v0.8h\n"
+ "ldr d31, [x6, x24]\n"
+ "smlal2 v5.4s, v22.8h, v4.8h\n"
"ssubl v31.8h, v31.8b, v9.8b\n"
- "smlal2 v21.4s, v22.8h, v4.8h\n"
- "ldr d4, [x3, #0x98]\n"
- "smlal v20.4s, v30.4h, v0.4h\n"
- "ssubl v4.8h, v4.8b, v15.8b\n"
- "smlal v8.4s, v27.4h, v0.4h\n"
- "smlal v17.4s, v23.4h, v0.4h\n"
- "ssubl v28.8h, v28.8b, v9.8b\n"
- "smlal2 v19.4s, v30.8h, v1.8h\n"
- "smlal2 v10.4s, v30.8h, v0.8h\n"
- "ldr d30, [x28, x0]\n"
- "ssubl v30.8h, v30.8b, v9.8b\n"
+ "smlal v17.4s, v30.4h, v0.4h\n"
+ "smlal v10.4s, v27.4h, v0.4h\n"
+ "ldr d4, [x23, #0x98]\n"
+ "ssubl v4.8h, v4.8b, v14.8b\n"
+ "smlal v6.4s, v23.4h, v0.4h\n"
+ "smlal2 v8.4s, v30.8h, v0.8h\n"
"smlal2 v7.4s, v27.8h, v0.8h\n"
- "smlal v13.4s, v26.4h, v2.4h\n"
- "smlal2 v21.4s, v23.8h, v0.8h\n"
- "ldr d0, [x3, #0xa0]\n"
- "smlal v20.4s, v26.4h, v1.4h\n"
- "ssubl v0.8h, v0.8b, v15.8b\n"
- "smlal v8.4s, v23.4h, v1.4h\n"
- "smlal v17.4s, v31.4h, v1.4h\n"
- "smlal2 v19.4s, v26.8h, v2.8h\n"
- "smlal2 v10.4s, v26.8h, v1.8h\n"
- "ldr d26, [x26, x0]\n"
- "ssubl v26.8h, v26.8b, v9.8b\n"
+ "smlal v15.4s, v30.4h, v1.4h\n"
+ "smlal2 v16.4s, v30.8h, v1.8h\n"
+ "ldr d30, [x27, x24]\n"
+ "smlal2 v5.4s, v23.8h, v0.8h\n"
+ "ssubl v30.8h, v30.8b, v9.8b\n"
+ "smlal v17.4s, v26.4h, v1.4h\n"
+ "smlal v10.4s, v23.4h, v1.4h\n"
+ "ldr d0, [x23, #0xa0]\n"
+ "ssubl v0.8h, v0.8b, v14.8b\n"
+ "smlal v6.4s, v31.4h, v1.4h\n"
+ "smlal2 v8.4s, v26.8h, v1.8h\n"
"smlal2 v7.4s, v23.8h, v1.8h\n"
- "smlal v13.4s, v25.4h, v3.4h\n"
- "smlal2 v21.4s, v31.8h, v1.8h\n"
- "ldr d1, [x3, #0xa8]\n"
- "smlal v20.4s, v25.4h, v2.4h\n"
- "ssubl v1.8h, v1.8b, v15.8b\n"
- "smlal v8.4s, v31.4h, v2.4h\n"
- "smlal v17.4s, v30.4h, v2.4h\n"
- "smlal2 v19.4s, v25.8h, v3.8h\n"
- "smlal2 v10.4s, v25.8h, v2.8h\n"
- "ldr d25, [x25, x0]\n"
- "ssubl v25.8h, v25.8b, v9.8b\n"
+ "smlal v15.4s, v26.4h, v2.4h\n"
+ "smlal2 v16.4s, v26.8h, v2.8h\n"
+ "smlal2 v5.4s, v31.8h, v1.8h\n"
+ "ldr d26, [x17, x24]\n"
+ "ssubl v26.8h, v26.8b, v9.8b\n"
+ "smlal v17.4s, v25.4h, v2.4h\n"
+ "smlal v10.4s, v31.4h, v2.4h\n"
+ "ldr d1, [x23, #0xa8]\n"
+ "ssubl v1.8h, v1.8b, v14.8b\n"
+ "smlal v6.4s, v30.4h, v2.4h\n"
+ "smlal2 v8.4s, v25.8h, v2.8h\n"
"smlal2 v7.4s, v31.8h, v2.8h\n"
- "smlal v13.4s, v24.4h, v4.4h\n"
- "smlal2 v21.4s, v30.8h, v2.8h\n"
- "ldr d2, [x3, #0xb0]\n"
- "smlal v20.4s, v24.4h, v3.4h\n"
- "ssubl v2.8h, v2.8b, v15.8b\n"
- "smlal v8.4s, v30.4h, v3.4h\n"
- "smlal v17.4s, v28.4h, v3.4h\n"
- "smlal2 v19.4s, v24.8h, v4.8h\n"
- "smlal2 v10.4s, v24.8h, v3.8h\n"
- "ldr d24, [x24, x0]\n"
- "ssubl v24.8h, v24.8b, v9.8b\n"
+ "smlal v15.4s, v25.4h, v3.4h\n"
+ "smlal2 v16.4s, v25.8h, v3.8h\n"
+ "smlal2 v5.4s, v30.8h, v2.8h\n"
+ "ldr d25, [x5, x24]\n"
+ "ssubl v25.8h, v25.8b, v9.8b\n"
+ "smlal v17.4s, v24.4h, v3.4h\n"
+ "smlal v10.4s, v30.4h, v3.4h\n"
+ "ldr d2, [x23, #0xb0]\n"
+ "ssubl v2.8h, v2.8b, v14.8b\n"
+ "smlal v6.4s, v28.4h, v3.4h\n"
+ "smlal2 v8.4s, v24.8h, v3.8h\n"
"smlal2 v7.4s, v30.8h, v3.8h\n"
- "smlal v13.4s, v27.4h, v0.4h\n"
- "smlal2 v21.4s, v28.8h, v3.8h\n"
- "ldr d3, [x3, #0xb8]\n"
- "smlal v20.4s, v22.4h, v4.4h\n"
- "ssubl v3.8h, v3.8b, v15.8b\n"
- "smlal v8.4s, v28.4h, v4.4h\n"
- "smlal v17.4s, v26.4h, v4.4h\n"
- "smlal2 v19.4s, v27.8h, v0.8h\n"
- "ldr d27, [x23, x0]\n"
+ "smlal v15.4s, v24.4h, v4.4h\n"
+ "smlal2 v16.4s, v24.8h, v4.8h\n"
+ "ldr d24, [x25, x24]\n"
+ "smlal2 v5.4s, v28.8h, v3.8h\n"
+ "ssubl v24.8h, v24.8b, v9.8b\n"
+ "smlal v17.4s, v22.4h, v4.4h\n"
+ "smlal v10.4s, v28.4h, v4.4h\n"
+ "ldr d3, [x23, #0xb8]\n"
+ "ssubl v3.8h, v3.8b, v14.8b\n"
+ "smlal v6.4s, v26.4h, v4.4h\n"
"smlal2 v7.4s, v28.8h, v4.8h\n"
+ "smlal v15.4s, v27.4h, v0.4h\n"
+ "smlal2 v16.4s, v27.8h, v0.8h\n"
+ "ldr d27, [x26, x24]\n"
"ssubl v27.8h, v27.8b, v9.8b\n"
- "smlal v13.4s, v23.4h, v1.4h\n"
- "smlal2 v10.4s, v22.8h, v4.8h\n"
- "ldr q22, [x8, #0x10]\n"
- "add x8, x8, #0x20\n"
- "smlal2 v21.4s, v26.8h, v4.8h\n"
- "ldr d4, [x3, #0xc0]\n"
- "smlal v20.4s, v23.4h, v0.4h\n"
- "ssubl v4.8h, v4.8b, v15.8b\n"
- "smlal v8.4s, v25.4h, v0.4h\n"
- "smlal v17.4s, v24.4h, v0.4h\n"
- "add x3, x3, #0xc8\n"
- "smlal2 v19.4s, v23.8h, v1.8h\n"
+ "smlal2 v8.4s, v22.8h, v4.8h\n"
+ "smlal2 v5.4s, v26.8h, v4.8h\n"
+ "ldr d4, [x23, #0xc0]\n"
+ "ssubl v4.8h, v4.8b, v14.8b\n"
+ "smlal v17.4s, v23.4h, v0.4h\n"
+ "smlal v10.4s, v25.4h, v0.4h\n"
+ "add x23, x23, #0xc8\n"
+ "smlal v6.4s, v24.4h, v0.4h\n"
"smlal2 v7.4s, v25.8h, v0.8h\n"
- "ldr d25, [x15, x0]\n"
+ "ldr d25, [x12, x24]\n"
"ssubl v25.8h, v25.8b, v9.8b\n"
- "smlal v13.4s, v31.4h, v2.4h\n"
- "smlal2 v10.4s, v23.8h, v0.8h\n"
- "smlal2 v21.4s, v24.8h, v0.8h\n"
- "smlal v20.4s, v31.4h, v1.4h\n"
- "smlal v8.4s, v24.4h, v1.4h\n"
- "smlal v17.4s, v27.4h, v1.4h\n"
- "smlal2 v19.4s, v31.8h, v2.8h\n"
+ "smlal2 v8.4s, v23.8h, v0.8h\n"
+ "smlal2 v5.4s, v24.8h, v0.8h\n"
+ "smlal v15.4s, v23.4h, v1.4h\n"
+ "smlal v17.4s, v31.4h, v1.4h\n"
+ "smlal v10.4s, v24.4h, v1.4h\n"
+ "smlal v6.4s, v27.4h, v1.4h\n"
"smlal2 v7.4s, v24.8h, v1.8h\n"
- "ldr d24, [x21, x0]\n"
+ "ldr d24, [x14, x24]\n"
+ "smlal2 v16.4s, v23.8h, v1.8h\n"
"ssubl v24.8h, v24.8b, v9.8b\n"
- "smlal v13.4s, v30.4h, v3.4h\n"
- "smlal2 v10.4s, v31.8h, v1.8h\n"
- "smlal2 v21.4s, v27.8h, v1.8h\n"
- "smlal v20.4s, v30.4h, v2.4h\n"
- "smlal v8.4s, v27.4h, v2.4h\n"
- "smlal v17.4s, v25.4h, v2.4h\n"
- "smlal2 v19.4s, v30.8h, v3.8h\n"
+ "smlal2 v8.4s, v31.8h, v1.8h\n"
+ "smlal2 v5.4s, v27.8h, v1.8h\n"
+ "smlal v15.4s, v31.4h, v2.4h\n"
+ "smlal v17.4s, v30.4h, v2.4h\n"
+ "smlal v10.4s, v27.4h, v2.4h\n"
+ "smlal v6.4s, v25.4h, v2.4h\n"
"smlal2 v7.4s, v27.8h, v2.8h\n"
- "ldr d27, [x20, x0]\n"
+ "ldr d27, [x21, x24]\n"
+ "smlal2 v16.4s, v31.8h, v2.8h\n"
"ssubl v27.8h, v27.8b, v9.8b\n"
- "smlal v13.4s, v28.4h, v4.4h\n"
- "smlal2 v10.4s, v30.8h, v2.8h\n"
- "sqrdmulh v13.4s, v13.4s, v18.4s\n"
- "add x0, x0, #0x8\n"
- "smlal2 v21.4s, v25.8h, v2.8h\n"
- "smlal v20.4s, v28.4h, v3.4h\n"
- "and v30.16b, v13.16b, v6.16b\n"
- "smlal v8.4s, v25.4h, v3.4h\n"
- "smlal v17.4s, v24.4h, v3.4h\n"
- "sshr v30.4s, v30.4s, #0x1f\n"
- "smlal2 v19.4s, v28.8h, v4.8h\n"
- "smlal2 v10.4s, v28.8h, v3.8h\n"
- "sqrdmulh v19.4s, v19.4s, v5.4s\n"
+ "smlal2 v8.4s, v30.8h, v2.8h\n"
+ "smlal2 v5.4s, v25.8h, v2.8h\n"
+ "add x24, x24, #0x8\n"
+ "smlal v15.4s, v30.4h, v3.4h\n"
+ "smlal v17.4s, v28.4h, v3.4h\n"
+ "smlal v10.4s, v25.4h, v3.4h\n"
+ "smlal v6.4s, v24.4h, v3.4h\n"
+ "smlal2 v16.4s, v30.8h, v3.8h\n"
+ "smlal2 v8.4s, v28.8h, v3.8h\n"
"smlal2 v7.4s, v25.8h, v3.8h\n"
- "smlal2 v21.4s, v24.8h, v3.8h\n"
- "and v16.16b, v19.16b, v22.16b\n"
- "smlal v20.4s, v26.4h, v4.4h\n"
- "smlal v8.4s, v24.4h, v4.4h\n"
- "sqrdmulh v20.4s, v20.4s, v18.4s\n"
- "smlal v17.4s, v27.4h, v4.4h\n"
- "smlal2 v10.4s, v26.8h, v4.8h\n"
- "sqrdmulh v8.4s, v8.4s, v18.4s\n"
+ "smlal2 v5.4s, v24.8h, v3.8h\n"
+ "smlal v15.4s, v28.4h, v4.4h\n"
+ "smlal v17.4s, v26.4h, v4.4h\n"
+ "sqrdmulh v15.4s, v15.4s, v12.4s\n"
+ "smlal v10.4s, v24.4h, v4.4h\n"
+ "smlal v6.4s, v27.4h, v4.4h\n"
+ "sqrdmulh v17.4s, v17.4s, v12.4s\n"
+ "smlal2 v16.4s, v28.8h, v4.8h\n"
+ "smlal2 v8.4s, v26.8h, v4.8h\n"
+ "sqrdmulh v10.4s, v10.4s, v12.4s\n"
"smlal2 v7.4s, v24.8h, v4.8h\n"
- "smlal2 v21.4s, v27.8h, v4.8h\n"
- "sqrdmulh v17.4s, v17.4s, v18.4s\n"
- "sqadd v13.4s, v13.4s, v30.4s\n"
- "sshr v16.4s, v16.4s, #0x1f\n"
- "and v0.16b, v20.16b, v6.16b\n"
- "sqrdmulh v10.4s, v10.4s, v5.4s\n"
- "and v18.16b, v8.16b, v6.16b\n"
- "sqrdmulh v7.4s, v7.4s, v5.4s\n"
- "and v30.16b, v17.16b, v6.16b\n"
- "sqrdmulh v21.4s, v21.4s, v5.4s\n"
- "sqadd v19.4s, v19.4s, v16.4s\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "and v26.16b, v10.16b, v22.16b\n"
- "sshr v18.4s, v18.4s, #0x1f\n"
- "and v23.16b, v7.16b, v22.16b\n"
- "sshr v30.4s, v30.4s, #0x1f\n"
- "and v16.16b, v21.16b, v22.16b\n"
- "sqadd v20.4s, v20.4s, v0.4s\n"
- "sshr v26.4s, v26.4s, #0x1f\n"
- "sqadd v8.4s, v8.4s, v18.4s\n"
+ "smlal2 v5.4s, v27.8h, v4.8h\n"
+ "sqrdmulh v6.4s, v6.4s, v12.4s\n"
+ "and v23.16b, v15.16b, v19.16b\n"
+ "sqrdmulh v16.4s, v16.4s, v20.4s\n"
+ "and v22.16b, v17.16b, v19.16b\n"
+ "sqrdmulh v8.4s, v8.4s, v20.4s\n"
+ "and v21.16b, v10.16b, v19.16b\n"
+ "sqrdmulh v7.4s, v7.4s, v20.4s\n"
+ "and v26.16b, v6.16b, v19.16b\n"
+ "sqrdmulh v5.4s, v5.4s, v20.4s\n"
"sshr v23.4s, v23.4s, #0x1f\n"
- "sqadd v17.4s, v17.4s, v30.4s\n"
- "sshr v16.4s, v16.4s, #0x1f\n"
- "srshl v13.4s, v13.4s, v6.4s\n"
- "srshl v20.4s, v20.4s, v6.4s\n"
- "sqadd v10.4s, v10.4s, v26.4s\n"
- "srshl v8.4s, v8.4s, v6.4s\n"
- "sqadd v7.4s, v7.4s, v23.4s\n"
- "srshl v17.4s, v17.4s, v6.4s\n"
- "sqadd v21.4s, v21.4s, v16.4s\n"
- "srshl v19.4s, v19.4s, v22.4s\n"
- "sqxtn v13.4h, v13.4s\n"
- "srshl v10.4s, v10.4s, v22.4s\n"
- "sqxtn v20.4h, v20.4s\n"
- "srshl v7.4s, v7.4s, v22.4s\n"
- "sqxtn v8.4h, v8.4s\n"
- "srshl v21.4s, v21.4s, v22.4s\n"
+ "and v4.16b, v16.16b, v29.16b\n"
+ "sshr v22.4s, v22.4s, #0x1f\n"
+ "and v2.16b, v8.16b, v29.16b\n"
+ "sshr v21.4s, v21.4s, #0x1f\n"
+ "and v3.16b, v7.16b, v29.16b\n"
+ "sshr v26.4s, v26.4s, #0x1f\n"
+ "and v25.16b, v5.16b, v29.16b\n"
+ "sqadd v15.4s, v15.4s, v23.4s\n"
+ "sshr v4.4s, v4.4s, #0x1f\n"
+ "sqadd v17.4s, v17.4s, v22.4s\n"
+ "sshr v2.4s, v2.4s, #0x1f\n"
+ "sqadd v10.4s, v10.4s, v21.4s\n"
+ "sshr v3.4s, v3.4s, #0x1f\n"
+ "sqadd v6.4s, v6.4s, v26.4s\n"
+ "sshr v25.4s, v25.4s, #0x1f\n"
+ "srshl v15.4s, v15.4s, v19.4s\n"
+ "sqadd v16.4s, v16.4s, v4.4s\n"
+ "srshl v17.4s, v17.4s, v19.4s\n"
+ "sqadd v8.4s, v8.4s, v2.4s\n"
+ "srshl v10.4s, v10.4s, v19.4s\n"
+ "sqadd v7.4s, v7.4s, v3.4s\n"
+ "srshl v6.4s, v6.4s, v19.4s\n"
+ "sqadd v5.4s, v5.4s, v25.4s\n"
+ "srshl v16.4s, v16.4s, v29.4s\n"
+ "sqxtn v15.4h, v15.4s\n"
+ "srshl v8.4s, v8.4s, v29.4s\n"
"sqxtn v17.4h, v17.4s\n"
- "sqxtn2 v13.8h, v19.4s\n"
- "sqxtn2 v20.8h, v10.4s\n"
- "sqxtn2 v8.8h, v7.4s\n"
- "sqxtn2 v17.8h, v21.4s\n"
- "sqadd v13.8h, v13.8h, v14.8h\n"
- "sqadd v20.8h, v20.8h, v14.8h\n"
- "sqadd v8.8h, v8.8h, v14.8h\n"
- "sqadd v17.8h, v17.8h, v14.8h\n"
- "smax v13.8h, v13.8h, v12.8h\n"
- "smax v20.8h, v20.8h, v12.8h\n"
- "smax v8.8h, v8.8h, v12.8h\n"
- "smax v17.8h, v17.8h, v12.8h\n"
- "smin v13.8h, v13.8h, v11.8h\n"
- "smin v20.8h, v20.8h, v11.8h\n"
- "smin v8.8h, v8.8h, v11.8h\n"
- "smin v17.8h, v17.8h, v11.8h\n"
- "uzp1 v13.16b, v13.16b, v13.16b\n"
- "uzp1 v20.16b, v20.16b, v20.16b\n"
- "str d13, [x17, x10]\n"
- "uzp1 v8.16b, v8.16b, v8.16b\n"
+ "srshl v7.4s, v7.4s, v29.4s\n"
+ "sqxtn v10.4h, v10.4s\n"
+ "srshl v5.4s, v5.4s, v29.4s\n"
+ "sqxtn v6.4h, v6.4s\n"
+ "sqxtn2 v15.8h, v16.4s\n"
+ "sqxtn2 v17.8h, v8.4s\n"
+ "sqxtn2 v10.8h, v7.4s\n"
+ "sqxtn2 v6.8h, v5.4s\n"
+ "sqadd v15.8h, v15.8h, v18.8h\n"
+ "sqadd v17.8h, v17.8h, v18.8h\n"
+ "sqadd v10.8h, v10.8h, v18.8h\n"
+ "sqadd v6.8h, v6.8h, v18.8h\n"
+ "smax v15.8h, v15.8h, v11.8h\n"
+ "smax v17.8h, v17.8h, v11.8h\n"
+ "smax v10.8h, v10.8h, v11.8h\n"
+ "smax v6.8h, v6.8h, v11.8h\n"
+ "smin v15.8h, v15.8h, v13.8h\n"
+ "smin v17.8h, v17.8h, v13.8h\n"
+ "smin v10.8h, v10.8h, v13.8h\n"
+ "smin v6.8h, v6.8h, v13.8h\n"
+ "uzp1 v15.16b, v15.16b, v15.16b\n"
"uzp1 v17.16b, v17.16b, v17.16b\n"
- "str d20, [x6, x10]\n"
- "str d8, [x7, x10]\n"
- "str d17, [x16, x10]\n"
- "ldr q13, [x13, #0x0]\n"
- "ldr q19, [x13, #0x10]\n"
- "add x13, x13, #0x20\n"
- "ldr d0, [x3, #0x0]\n"
- "ldr d1, [x3, #0x8]\n"
- "add x10, x10, #0x8\n"
- "str x13, [%x[params], %[offsetof_Params_bias]]\n"
- "ldr d2, [x3, #0x10]\n"
- "ldr d3, [x3, #0x18]\n"
- "mov v20.16b, v13.16b\n"
- "mov v10.16b, v19.16b\n"
- "ldr d4, [x3, #0x20]\n"
- "ldp x9, x28, [x4, #0x0]\n"
- "mov v8.16b, v13.16b\n"
- "mov v7.16b, v19.16b\n"
- "ldp x27, x26, [x4, #0x10]\n"
- "ldp x25, x24, [x4, #0x20]\n"
- "mov v17.16b, v13.16b\n"
- "mov v21.16b, v19.16b\n"
- "ldp x23, x22, [x4, #0x30]\n"
- "ldp x21, x20, [x4, #0x40]\n"
- "ssubl v0.8h, v0.8b, v15.8b\n"
- "ssubl v1.8h, v1.8b, v15.8b\n"
- "ldr d31, [x9, x0]\n"
- "ldr d30, [x28, x0]\n"
- "ssubl v2.8h, v2.8b, v15.8b\n"
- "ssubl v3.8h, v3.8b, v15.8b\n"
- "ldr d29, [x27, x0]\n"
- "ldr d28, [x26, x0]\n"
- "ssubl v4.8h, v4.8b, v15.8b\n"
+ "str d15, [x16, x22]\n"
+ "uzp1 v10.16b, v10.16b, v10.16b\n"
+ "uzp1 v6.16b, v6.16b, v6.16b\n"
+ "str d17, [x8, x22]\n"
+ "str d10, [x4, x22]\n"
+ "str d6, [x7, x22]\n"
+ "ldr x19, [%x[params], %[offsetof_Params_bias]]\n"
+ "ldr q15, [x19, #0x0]\n"
+ "add x22, x22, #0x8\n"
+ "ldr q16, [x19, #0x10]\n"
+ "add x19, x19, #0x20\n"
+ "str x19, [%x[params], %[offsetof_Params_bias]]\n"
+ "ldr d0, [x23, #0x0]\n"
+ "ldr d1, [x23, #0x8]\n"
+ "ldr d2, [x23, #0x10]\n"
+ "mov v17.16b, v15.16b\n"
+ "mov v8.16b, v16.16b\n"
+ "ldr d3, [x23, #0x18]\n"
+ "ldr d4, [x23, #0x20]\n"
+ "mov v10.16b, v15.16b\n"
+ "mov v7.16b, v16.16b\n"
+ "ldp x28, x6, [x20, #0x0]\n"
+ "ldp x26, x25, [x20, #0x10]\n"
+ "mov v6.16b, v15.16b\n"
+ "mov v5.16b, v16.16b\n"
+ "ldp x5, x2, [x20, #0x20]\n"
+ "ldp x27, x21, [x20, #0x30]\n"
+ "ssubl v0.8h, v0.8b, v14.8b\n"
+ "ssubl v1.8h, v1.8b, v14.8b\n"
+ "ldp x12, x19, [x20, #0x40]\n"
+ "ldr d31, [x28, x24]\n"
+ "ssubl v2.8h, v2.8b, v14.8b\n"
+ "ssubl v3.8h, v3.8b, v14.8b\n"
+ "ldr d30, [x6, x24]\n"
+ "ldr d29, [x26, x24]\n"
+ "ssubl v4.8h, v4.8b, v14.8b\n"
"ssubl v31.8h, v31.8b, v9.8b\n"
- "ldr d27, [x25, x0]\n"
- "ldr d23, [x24, x0]\n"
+ "ldr d28, [x25, x24]\n"
+ "ldr d27, [x5, x24]\n"
"ssubl v30.8h, v30.8b, v9.8b\n"
"ssubl v29.8h, v29.8b, v9.8b\n"
- "ldr d25, [x23, x0]\n"
- "ldr d24, [x22, x0]\n"
+ "ldr d23, [x2, x24]\n"
+ "ldr d25, [x27, x24]\n"
"ssubl v28.8h, v28.8b, v9.8b\n"
"ssubl v27.8h, v27.8b, v9.8b\n"
- "ldr d26, [x21, x0]\n"
- "ldr d22, [x20, x0]\n"
+ "ldr d24, [x21, x24]\n"
+ "ldr d26, [x12, x24]\n"
"ssubl v23.8h, v23.8b, v9.8b\n"
"ssubl v25.8h, v25.8b, v9.8b\n"
+ "ldr d22, [x19, x24]\n"
"ssubl v24.8h, v24.8b, v9.8b\n"
"ssubl v26.8h, v26.8b, v9.8b\n"
"ssubl v22.8h, v22.8b, v9.8b\n"
"bgt 1b\n"
"2:" // Tail
- "ldr q18, [x5, #0x0]\n"
- "ldr q6, [x8, #0x0]\n"
- "smlal v13.4s, v31.4h, v0.4h\n"
- "smlal2 v19.4s, v31.8h, v0.8h\n"
- "ldr q5, [x5, #0x10]\n"
- "smlal v13.4s, v30.4h, v1.4h\n"
- "ldr x20, [x4, #0x50]\n"
- "smlal v20.4s, v30.4h, v0.4h\n"
- "smlal v8.4s, v29.4h, v0.4h\n"
- "smlal v17.4s, v28.4h, v0.4h\n"
- "ldr x22, [x4, #0x58]\n"
- "ldr x21, [x4, #0x60]\n"
- "smlal2 v19.4s, v30.8h, v1.8h\n"
- "smlal2 v10.4s, v30.8h, v0.8h\n"
- "ldr d31, [x20, x0]\n"
+ "smlal v15.4s, v31.4h, v0.4h\n"
+ "smlal2 v16.4s, v31.8h, v0.8h\n"
+ "ldr x19, [x20, #0x50]\n"
+ "ldr d31, [x19, x24]\n"
+ "smlal v17.4s, v30.4h, v0.4h\n"
+ "smlal v10.4s, v29.4h, v0.4h\n"
+ "ldr x15, [x20, #0x58]\n"
"ssubl v31.8h, v31.8b, v9.8b\n"
+ "smlal v6.4s, v28.4h, v0.4h\n"
+ "smlal2 v8.4s, v30.8h, v0.8h\n"
+ "ldr x19, [x20, #0x60]\n"
+ "ldr x27, [x20, #0x68]\n"
"smlal2 v7.4s, v29.8h, v0.8h\n"
- "smlal v13.4s, v27.4h, v2.4h\n"
- "ldr x20, [x4, #0x68]\n"
- "ldr x26, [x4, #0x70]\n"
- "smlal2 v21.4s, v28.8h, v0.8h\n"
- "ldr d30, [x22, x0]\n"
- "smlal v20.4s, v27.4h, v1.4h\n"
+ "smlal v15.4s, v30.4h, v1.4h\n"
+ "ldr x5, [x20, #0x70]\n"
+ "ldr x11, [x20, #0x78]\n"
+ "smlal2 v16.4s, v30.8h, v1.8h\n"
+ "smlal2 v5.4s, v28.8h, v0.8h\n"
+ "ldr d30, [x15, x24]\n"
"ssubl v30.8h, v30.8b, v9.8b\n"
- "smlal v8.4s, v28.4h, v1.4h\n"
- "smlal v17.4s, v23.4h, v1.4h\n"
- "ldr x25, [x4, #0x78]\n"
- "ldr x23, [x4, #0x80]\n"
- "smlal2 v19.4s, v27.8h, v2.8h\n"
- "smlal2 v10.4s, v27.8h, v1.8h\n"
- "ldr d0, [x3, #0x28]\n"
- "ssubl v0.8h, v0.8b, v15.8b\n"
+ "smlal v17.4s, v27.4h, v1.4h\n"
+ "smlal v10.4s, v28.4h, v1.4h\n"
+ "ldr d0, [x23, #0x28]\n"
+ "ssubl v0.8h, v0.8b, v14.8b\n"
+ "smlal v6.4s, v23.4h, v1.4h\n"
+ "smlal2 v8.4s, v27.8h, v1.8h\n"
+ "ldr x12, [x20, #0x80]\n"
+ "ldr x26, [x20, #0x88]\n"
"smlal2 v7.4s, v28.8h, v1.8h\n"
- "smlal v13.4s, v25.4h, v3.4h\n"
- "ldr x24, [x4, #0x88]\n"
- "ldr x15, [x4, #0x90]\n"
- "smlal2 v21.4s, v23.8h, v1.8h\n"
- "ldr d27, [x21, x0]\n"
- "smlal v20.4s, v25.4h, v2.4h\n"
+ "smlal v15.4s, v27.4h, v2.4h\n"
+ "ldr x14, [x20, #0x90]\n"
+ "ldr x15, [x20, #0x98]\n"
+ "smlal2 v16.4s, v27.8h, v2.8h\n"
+ "smlal2 v5.4s, v23.8h, v1.8h\n"
+ "ldr d27, [x19, x24]\n"
"ssubl v27.8h, v27.8b, v9.8b\n"
- "smlal v8.4s, v23.4h, v2.4h\n"
- "smlal v17.4s, v31.4h, v2.4h\n"
- "ldr x21, [x4, #0x98]\n"
- "ldr x14, [x4, #0xa0]\n"
- "smlal2 v19.4s, v25.8h, v3.8h\n"
- "smlal2 v10.4s, v25.8h, v2.8h\n"
- "ldr d1, [x3, #0x30]\n"
- "ssubl v1.8h, v1.8b, v15.8b\n"
+ "smlal v17.4s, v25.4h, v2.4h\n"
+ "smlal v10.4s, v23.4h, v2.4h\n"
+ "ldr d1, [x23, #0x30]\n"
+ "ssubl v1.8h, v1.8b, v14.8b\n"
+ "smlal v6.4s, v31.4h, v2.4h\n"
+ "smlal2 v8.4s, v25.8h, v2.8h\n"
+ "ldr x21, [x20, #0xa0]\n"
+ "ldr x2, [x20, #0xa8]\n"
"smlal2 v7.4s, v23.8h, v2.8h\n"
- "smlal v13.4s, v24.4h, v4.4h\n"
- "ldr x13, [x4, #0xa8]\n"
- "ldr x12, [x4, #0xb0]\n"
- "smlal2 v21.4s, v31.8h, v2.8h\n"
- "ldr d25, [x20, x0]\n"
- "smlal v20.4s, v24.4h, v3.4h\n"
+ "smlal v15.4s, v25.4h, v3.4h\n"
+ "ldr x13, [x20, #0xb0]\n"
+ "ldr x9, [x20, #0xb8]\n"
+ "smlal2 v16.4s, v25.8h, v3.8h\n"
+ "smlal2 v5.4s, v31.8h, v2.8h\n"
+ "ldr d25, [x27, x24]\n"
"ssubl v25.8h, v25.8b, v9.8b\n"
- "smlal v8.4s, v31.4h, v3.4h\n"
- "smlal v17.4s, v30.4h, v3.4h\n"
- "ldr x20, [x4, #0xb8]\n"
- "ldr x11, [x4, #0xc0]\n"
- "smlal2 v19.4s, v24.8h, v4.8h\n"
- "smlal2 v10.4s, v24.8h, v3.8h\n"
- "ldr d2, [x3, #0x38]\n"
- "ssubl v2.8h, v2.8b, v15.8b\n"
+ "smlal v17.4s, v24.4h, v3.4h\n"
+ "smlal v10.4s, v31.4h, v3.4h\n"
+ "ldr d2, [x23, #0x38]\n"
+ "ssubl v2.8h, v2.8b, v14.8b\n"
+ "smlal v6.4s, v30.4h, v3.4h\n"
+ "smlal2 v8.4s, v24.8h, v3.8h\n"
+ "ldr x19, [x20, #0xc0]\n"
+ "ldr x28, [x20, #0xc8]\n"
"smlal2 v7.4s, v31.8h, v3.8h\n"
- "smlal v13.4s, v29.4h, v0.4h\n"
- "ldr x22, [x4, #0xc8]\n"
- "ldr x9, [x4, #0xd0]\n"
- "smlal2 v21.4s, v30.8h, v3.8h\n"
- "ldr d24, [x26, x0]\n"
- "smlal v20.4s, v27.4h, v4.4h\n"
+ "smlal v15.4s, v24.4h, v4.4h\n"
+ "ldr x6, [x20, #0xd0]\n"
+ "ldr x27, [x20, #0xd8]\n"
+ "smlal2 v16.4s, v24.8h, v4.8h\n"
+ "smlal2 v5.4s, v30.8h, v3.8h\n"
+ "ldr d24, [x5, x24]\n"
"ssubl v24.8h, v24.8b, v9.8b\n"
- "smlal v8.4s, v30.4h, v4.4h\n"
- "smlal v17.4s, v26.4h, v4.4h\n"
- "ldr x28, [x4, #0xd8]\n"
- "ldr x27, [x4, #0xe0]\n"
- "smlal2 v19.4s, v29.8h, v0.8h\n"
- "ldr d3, [x3, #0x40]\n"
- "smlal2 v10.4s, v27.8h, v4.8h\n"
- "ldr d27, [x25, x0]\n"
- "smlal2 v7.4s, v30.8h, v4.8h\n"
- "smlal v13.4s, v28.4h, v1.4h\n"
- "ssubl v3.8h, v3.8b, v15.8b\n"
- "ldr x26, [x4, #0xe8]\n"
- "smlal2 v21.4s, v26.8h, v4.8h\n"
- "ldr d4, [x3, #0x48]\n"
- "smlal v20.4s, v28.4h, v0.4h\n"
+ "smlal v17.4s, v27.4h, v4.4h\n"
+ "smlal v10.4s, v30.4h, v4.4h\n"
+ "ldr d3, [x23, #0x40]\n"
+ "ssubl v3.8h, v3.8b, v14.8b\n"
+ "smlal v6.4s, v26.4h, v4.4h\n"
+ "smlal2 v8.4s, v27.8h, v4.8h\n"
+ "ldr d27, [x11, x24]\n"
"ssubl v27.8h, v27.8b, v9.8b\n"
- "smlal v8.4s, v22.4h, v0.4h\n"
- "smlal v17.4s, v25.4h, v0.4h\n"
- "ssubl v4.8h, v4.8b, v15.8b\n"
- "ldr x25, [x4, #0xf0]\n"
- "smlal2 v19.4s, v28.8h, v1.8h\n"
- "smlal2 v10.4s, v28.8h, v0.8h\n"
- "ldr d28, [x24, x0]\n"
- "ssubl v28.8h, v28.8b, v9.8b\n"
+ "smlal2 v7.4s, v30.8h, v4.8h\n"
+ "smlal v15.4s, v29.4h, v0.4h\n"
+ "ldr x11, [x20, #0xe0]\n"
+ "ldr x17, [x20, #0xe8]\n"
+ "smlal2 v16.4s, v29.8h, v0.8h\n"
+ "smlal2 v5.4s, v26.8h, v4.8h\n"
+ "ldr d4, [x23, #0x48]\n"
+ "ssubl v4.8h, v4.8b, v14.8b\n"
+ "smlal v17.4s, v28.4h, v0.4h\n"
+ "smlal v10.4s, v22.4h, v0.4h\n"
+ "ldr x5, [x20, #0xf0]\n"
+ "ldr x25, [x20, #0xf8]\n"
+ "smlal v6.4s, v25.4h, v0.4h\n"
+ "smlal2 v8.4s, v28.8h, v0.8h\n"
+ "ldr q12, [x10, #0x0]\n"
+ "ldr q19, [x1, #0x0]\n"
"smlal2 v7.4s, v22.8h, v0.8h\n"
- "smlal v13.4s, v23.4h, v2.4h\n"
- "ldr x24, [x4, #0xf8]\n"
- "tst x1, #0x7\n"
- "smlal2 v21.4s, v25.8h, v0.8h\n"
- "ldr d0, [x3, #0x50]\n"
- "smlal v20.4s, v23.4h, v1.4h\n"
- "ssubl v0.8h, v0.8b, v15.8b\n"
- "smlal v8.4s, v25.4h, v1.4h\n"
- "smlal v17.4s, v24.4h, v1.4h\n"
- "add x5, x5, #0x20\n"
- "smlal2 v19.4s, v23.8h, v2.8h\n"
- "smlal2 v10.4s, v23.8h, v1.8h\n"
- "ldr d23, [x23, x0]\n"
- "ssubl v23.8h, v23.8b, v9.8b\n"
+ "smlal v15.4s, v28.4h, v1.4h\n"
+ "ldr q20, [x10, #0x10]\n"
+ "ldr q29, [x1, #0x10]\n"
+ "smlal2 v16.4s, v28.8h, v1.8h\n"
+ "smlal2 v5.4s, v25.8h, v0.8h\n"
+ "ldr d28, [x26, x24]\n"
+ "ldr d0, [x23, #0x50]\n"
+ "smlal v17.4s, v23.4h, v1.4h\n"
+ "smlal v10.4s, v25.4h, v1.4h\n"
+ "ssubl v28.8h, v28.8b, v9.8b\n"
+ "ldr x26, [x20, #0x100]\n"
+ "smlal v6.4s, v24.4h, v1.4h\n"
+ "smlal2 v8.4s, v23.8h, v1.8h\n"
+ "ssubl v0.8h, v0.8b, v14.8b\n"
+ "tst x0, #0x7\n"
"smlal2 v7.4s, v25.8h, v1.8h\n"
- "smlal v13.4s, v31.4h, v3.4h\n"
- "ldr x23, [x4, #0x100]\n"
- "smlal2 v21.4s, v24.8h, v1.8h\n"
- "ldr d1, [x3, #0x58]\n"
- "smlal v20.4s, v31.4h, v2.4h\n"
- "ssubl v1.8h, v1.8b, v15.8b\n"
- "smlal v8.4s, v24.4h, v2.4h\n"
- "smlal v17.4s, v27.4h, v2.4h\n"
- "smlal2 v19.4s, v31.8h, v3.8h\n"
- "smlal2 v10.4s, v31.8h, v2.8h\n"
- "ldr d31, [x15, x0]\n"
- "ssubl v31.8h, v31.8b, v9.8b\n"
+ "smlal v15.4s, v23.4h, v2.4h\n"
+ "add x10, x10, #0x20\n"
+ "add x1, x1, #0x20\n"
+ "smlal2 v16.4s, v23.8h, v2.8h\n"
+ "ldr d23, [x12, x24]\n"
+ "smlal2 v5.4s, v24.8h, v1.8h\n"
+ "ssubl v23.8h, v23.8b, v9.8b\n"
+ "smlal v17.4s, v31.4h, v2.4h\n"
+ "smlal v10.4s, v24.4h, v2.4h\n"
+ "ldr d1, [x23, #0x58]\n"
+ "ssubl v1.8h, v1.8b, v14.8b\n"
+ "smlal v6.4s, v27.4h, v2.4h\n"
+ "smlal2 v8.4s, v31.8h, v2.8h\n"
+ "ldr x12, [x20, #0x108]\n"
"smlal2 v7.4s, v24.8h, v2.8h\n"
- "smlal v13.4s, v30.4h, v4.4h\n"
- "ldr x15, [x4, #0x108]\n"
- "smlal2 v21.4s, v27.8h, v2.8h\n"
- "ldr d2, [x3, #0x60]\n"
- "smlal v20.4s, v30.4h, v3.4h\n"
- "ssubl v2.8h, v2.8b, v15.8b\n"
- "smlal v8.4s, v27.4h, v3.4h\n"
- "smlal v17.4s, v23.4h, v3.4h\n"
- "smlal2 v19.4s, v30.8h, v4.8h\n"
- "smlal2 v10.4s, v30.8h, v3.8h\n"
- "ldr d30, [x21, x0]\n"
- "ssubl v30.8h, v30.8b, v9.8b\n"
+ "smlal v15.4s, v31.4h, v3.4h\n"
+ "smlal2 v16.4s, v31.8h, v3.8h\n"
+ "smlal2 v5.4s, v27.8h, v2.8h\n"
+ "ldr d31, [x14, x24]\n"
+ "ssubl v31.8h, v31.8b, v9.8b\n"
+ "smlal v17.4s, v30.4h, v3.4h\n"
+ "smlal v10.4s, v27.4h, v3.4h\n"
+ "ldr d2, [x23, #0x60]\n"
+ "ssubl v2.8h, v2.8b, v14.8b\n"
+ "smlal v6.4s, v23.4h, v3.4h\n"
+ "smlal2 v8.4s, v30.8h, v3.8h\n"
+ "ldr x14, [x20, #0x110]\n"
"smlal2 v7.4s, v27.8h, v3.8h\n"
- "smlal v13.4s, v22.4h, v0.4h\n"
- "ldr x21, [x4, #0x110]\n"
- "smlal2 v21.4s, v23.8h, v3.8h\n"
- "ldr d3, [x3, #0x68]\n"
- "smlal v20.4s, v26.4h, v4.4h\n"
- "ssubl v3.8h, v3.8b, v15.8b\n"
- "smlal v8.4s, v23.4h, v4.4h\n"
- "smlal v17.4s, v28.4h, v4.4h\n"
- "smlal2 v19.4s, v22.8h, v0.8h\n"
- "ldr d22, [x20, x0]\n"
- "smlal2 v10.4s, v26.8h, v4.8h\n"
- "ldr d26, [x14, x0]\n"
- "smlal2 v7.4s, v23.8h, v4.8h\n"
- "smlal v13.4s, v25.4h, v1.4h\n"
+ "smlal v15.4s, v30.4h, v4.4h\n"
+ "smlal2 v16.4s, v30.8h, v4.8h\n"
+ "ldr d30, [x15, x24]\n"
+ "smlal2 v5.4s, v23.8h, v3.8h\n"
+ "ssubl v30.8h, v30.8b, v9.8b\n"
+ "smlal v17.4s, v26.4h, v4.4h\n"
+ "smlal v10.4s, v23.4h, v4.4h\n"
+ "ldr d3, [x23, #0x68]\n"
+ "ssubl v3.8h, v3.8b, v14.8b\n"
+ "smlal v6.4s, v28.4h, v4.4h\n"
+ "smlal2 v8.4s, v26.8h, v4.8h\n"
+ "ldr d26, [x21, x24]\n"
"ssubl v26.8h, v26.8b, v9.8b\n"
- "ldr x20, [x4, #0x118]\n"
- "smlal2 v21.4s, v28.8h, v4.8h\n"
- "ldr d4, [x3, #0x70]\n"
- "smlal v20.4s, v25.4h, v0.4h\n"
- "ssubl v4.8h, v4.8b, v15.8b\n"
- "smlal v8.4s, v31.4h, v0.4h\n"
- "smlal v17.4s, v30.4h, v0.4h\n"
+ "smlal2 v7.4s, v23.8h, v4.8h\n"
+ "smlal v15.4s, v22.4h, v0.4h\n"
+ "ldr x21, [x20, #0x118]\n"
+ "smlal2 v16.4s, v22.8h, v0.8h\n"
+ "smlal2 v5.4s, v28.8h, v4.8h\n"
+ "ldr d4, [x23, #0x70]\n"
+ "ldr d22, [x9, x24]\n"
+ "smlal v17.4s, v25.4h, v0.4h\n"
+ "smlal v10.4s, v31.4h, v0.4h\n"
+ "ssubl v4.8h, v4.8b, v14.8b\n"
+ "smlal v6.4s, v30.4h, v0.4h\n"
+ "smlal2 v8.4s, v25.8h, v0.8h\n"
"ssubl v22.8h, v22.8b, v9.8b\n"
- "smlal2 v19.4s, v25.8h, v1.8h\n"
- "smlal2 v10.4s, v25.8h, v0.8h\n"
- "ldr d25, [x13, x0]\n"
- "ssubl v25.8h, v25.8b, v9.8b\n"
"smlal2 v7.4s, v31.8h, v0.8h\n"
- "smlal v13.4s, v24.4h, v2.4h\n"
- "smlal2 v21.4s, v30.8h, v0.8h\n"
- "ldr d0, [x3, #0x78]\n"
- "smlal v20.4s, v24.4h, v1.4h\n"
- "ssubl v0.8h, v0.8b, v15.8b\n"
- "smlal v8.4s, v30.4h, v1.4h\n"
- "smlal v17.4s, v26.4h, v1.4h\n"
- "smlal2 v19.4s, v24.8h, v2.8h\n"
- "smlal2 v10.4s, v24.8h, v1.8h\n"
- "ldr d24, [x12, x0]\n"
- "ssubl v24.8h, v24.8b, v9.8b\n"
+ "smlal v15.4s, v25.4h, v1.4h\n"
+ "smlal2 v16.4s, v25.8h, v1.8h\n"
+ "ldr d25, [x2, x24]\n"
+ "smlal2 v5.4s, v30.8h, v0.8h\n"
+ "ssubl v25.8h, v25.8b, v9.8b\n"
+ "smlal v17.4s, v24.4h, v1.4h\n"
+ "smlal v10.4s, v30.4h, v1.4h\n"
+ "ldr d0, [x23, #0x78]\n"
+ "ssubl v0.8h, v0.8b, v14.8b\n"
+ "smlal v6.4s, v26.4h, v1.4h\n"
+ "smlal2 v8.4s, v24.8h, v1.8h\n"
"smlal2 v7.4s, v30.8h, v1.8h\n"
- "smlal v13.4s, v27.4h, v3.4h\n"
- "smlal2 v21.4s, v26.8h, v1.8h\n"
- "ldr d1, [x3, #0x80]\n"
- "smlal v20.4s, v27.4h, v2.4h\n"
- "ssubl v1.8h, v1.8b, v15.8b\n"
- "smlal v8.4s, v26.4h, v2.4h\n"
- "smlal v17.4s, v25.4h, v2.4h\n"
- "smlal2 v19.4s, v27.8h, v3.8h\n"
- "smlal2 v10.4s, v27.8h, v2.8h\n"
- "ldr d27, [x11, x0]\n"
- "ssubl v27.8h, v27.8b, v9.8b\n"
+ "smlal v15.4s, v24.4h, v2.4h\n"
+ "smlal2 v16.4s, v24.8h, v2.8h\n"
+ "ldr d24, [x13, x24]\n"
+ "smlal2 v5.4s, v26.8h, v1.8h\n"
+ "ssubl v24.8h, v24.8b, v9.8b\n"
+ "smlal v17.4s, v27.4h, v2.4h\n"
+ "smlal v10.4s, v26.4h, v2.4h\n"
+ "ldr d1, [x23, #0x80]\n"
+ "ssubl v1.8h, v1.8b, v14.8b\n"
+ "smlal v6.4s, v25.4h, v2.4h\n"
+ "smlal2 v8.4s, v27.8h, v2.8h\n"
"smlal2 v7.4s, v26.8h, v2.8h\n"
- "smlal v13.4s, v23.4h, v4.4h\n"
- "smlal2 v21.4s, v25.8h, v2.8h\n"
- "ldr d2, [x3, #0x88]\n"
- "smlal v20.4s, v23.4h, v3.4h\n"
- "ssubl v2.8h, v2.8b, v15.8b\n"
- "smlal v8.4s, v25.4h, v3.4h\n"
- "smlal v17.4s, v24.4h, v3.4h\n"
- "smlal2 v19.4s, v23.8h, v4.8h\n"
- "smlal2 v10.4s, v23.8h, v3.8h\n"
- "ldr d23, [x22, x0]\n"
- "ssubl v23.8h, v23.8b, v9.8b\n"
+ "smlal v15.4s, v27.4h, v3.4h\n"
+ "smlal2 v16.4s, v27.8h, v3.8h\n"
+ "smlal2 v5.4s, v25.8h, v2.8h\n"
+ "ldr d27, [x19, x24]\n"
+ "ssubl v27.8h, v27.8b, v9.8b\n"
+ "smlal v17.4s, v23.4h, v3.4h\n"
+ "smlal v10.4s, v25.4h, v3.4h\n"
+ "ldr d2, [x23, #0x88]\n"
+ "ssubl v2.8h, v2.8b, v14.8b\n"
+ "smlal v6.4s, v24.4h, v3.4h\n"
+ "smlal2 v8.4s, v23.8h, v3.8h\n"
"smlal2 v7.4s, v25.8h, v3.8h\n"
- "smlal v13.4s, v31.4h, v0.4h\n"
- "smlal2 v21.4s, v24.8h, v3.8h\n"
- "ldr d3, [x3, #0x90]\n"
- "smlal v20.4s, v28.4h, v4.4h\n"
- "ssubl v3.8h, v3.8b, v15.8b\n"
- "smlal v8.4s, v24.4h, v4.4h\n"
- "smlal v17.4s, v22.4h, v4.4h\n"
- "smlal2 v19.4s, v31.8h, v0.8h\n"
- "ldr d31, [x9, x0]\n"
- "smlal2 v10.4s, v28.8h, v4.8h\n"
- "ldr d28, [x27, x0]\n"
+ "smlal v15.4s, v23.4h, v4.4h\n"
+ "smlal2 v16.4s, v23.8h, v4.8h\n"
+ "ldr d23, [x28, x24]\n"
+ "smlal2 v5.4s, v24.8h, v3.8h\n"
+ "ssubl v23.8h, v23.8b, v9.8b\n"
+ "smlal v17.4s, v28.4h, v4.4h\n"
+ "smlal v10.4s, v24.4h, v4.4h\n"
+ "ldr d3, [x23, #0x90]\n"
+ "ssubl v3.8h, v3.8b, v14.8b\n"
+ "smlal v6.4s, v22.4h, v4.4h\n"
+ "smlal2 v8.4s, v28.8h, v4.8h\n"
+ "ldr d28, [x11, x24]\n"
+ "ssubl v28.8h, v28.8b, v9.8b\n"
"smlal2 v7.4s, v24.8h, v4.8h\n"
- "smlal v13.4s, v30.4h, v1.4h\n"
+ "smlal v15.4s, v31.4h, v0.4h\n"
+ "smlal2 v16.4s, v31.8h, v0.8h\n"
+ "ldr d31, [x6, x24]\n"
+ "smlal2 v5.4s, v22.8h, v4.8h\n"
"ssubl v31.8h, v31.8b, v9.8b\n"
- "smlal2 v21.4s, v22.8h, v4.8h\n"
- "ldr d4, [x3, #0x98]\n"
- "smlal v20.4s, v30.4h, v0.4h\n"
- "ssubl v4.8h, v4.8b, v15.8b\n"
- "smlal v8.4s, v27.4h, v0.4h\n"
- "smlal v17.4s, v23.4h, v0.4h\n"
- "ssubl v28.8h, v28.8b, v9.8b\n"
- "smlal2 v19.4s, v30.8h, v1.8h\n"
- "smlal2 v10.4s, v30.8h, v0.8h\n"
- "ldr d30, [x28, x0]\n"
- "ssubl v30.8h, v30.8b, v9.8b\n"
+ "smlal v17.4s, v30.4h, v0.4h\n"
+ "smlal v10.4s, v27.4h, v0.4h\n"
+ "ldr d4, [x23, #0x98]\n"
+ "ssubl v4.8h, v4.8b, v14.8b\n"
+ "smlal v6.4s, v23.4h, v0.4h\n"
+ "smlal2 v8.4s, v30.8h, v0.8h\n"
"smlal2 v7.4s, v27.8h, v0.8h\n"
- "smlal v13.4s, v26.4h, v2.4h\n"
- "smlal2 v21.4s, v23.8h, v0.8h\n"
- "ldr d0, [x3, #0xa0]\n"
- "smlal v20.4s, v26.4h, v1.4h\n"
- "ssubl v0.8h, v0.8b, v15.8b\n"
- "smlal v8.4s, v23.4h, v1.4h\n"
- "smlal v17.4s, v31.4h, v1.4h\n"
- "smlal2 v19.4s, v26.8h, v2.8h\n"
- "smlal2 v10.4s, v26.8h, v1.8h\n"
- "ldr d26, [x26, x0]\n"
- "ssubl v26.8h, v26.8b, v9.8b\n"
+ "smlal v15.4s, v30.4h, v1.4h\n"
+ "smlal2 v16.4s, v30.8h, v1.8h\n"
+ "ldr d30, [x27, x24]\n"
+ "smlal2 v5.4s, v23.8h, v0.8h\n"
+ "ssubl v30.8h, v30.8b, v9.8b\n"
+ "smlal v17.4s, v26.4h, v1.4h\n"
+ "smlal v10.4s, v23.4h, v1.4h\n"
+ "ldr d0, [x23, #0xa0]\n"
+ "ssubl v0.8h, v0.8b, v14.8b\n"
+ "smlal v6.4s, v31.4h, v1.4h\n"
+ "smlal2 v8.4s, v26.8h, v1.8h\n"
"smlal2 v7.4s, v23.8h, v1.8h\n"
- "smlal v13.4s, v25.4h, v3.4h\n"
- "smlal2 v21.4s, v31.8h, v1.8h\n"
- "ldr d1, [x3, #0xa8]\n"
- "smlal v20.4s, v25.4h, v2.4h\n"
- "ssubl v1.8h, v1.8b, v15.8b\n"
- "smlal v8.4s, v31.4h, v2.4h\n"
- "smlal v17.4s, v30.4h, v2.4h\n"
- "smlal2 v19.4s, v25.8h, v3.8h\n"
- "smlal2 v10.4s, v25.8h, v2.8h\n"
- "ldr d25, [x25, x0]\n"
- "ssubl v25.8h, v25.8b, v9.8b\n"
+ "smlal v15.4s, v26.4h, v2.4h\n"
+ "smlal2 v16.4s, v26.8h, v2.8h\n"
+ "smlal2 v5.4s, v31.8h, v1.8h\n"
+ "ldr d26, [x17, x24]\n"
+ "ssubl v26.8h, v26.8b, v9.8b\n"
+ "smlal v17.4s, v25.4h, v2.4h\n"
+ "smlal v10.4s, v31.4h, v2.4h\n"
+ "ldr d1, [x23, #0xa8]\n"
+ "ssubl v1.8h, v1.8b, v14.8b\n"
+ "smlal v6.4s, v30.4h, v2.4h\n"
+ "smlal2 v8.4s, v25.8h, v2.8h\n"
"smlal2 v7.4s, v31.8h, v2.8h\n"
- "smlal v13.4s, v24.4h, v4.4h\n"
- "smlal2 v21.4s, v30.8h, v2.8h\n"
- "ldr d2, [x3, #0xb0]\n"
- "smlal v20.4s, v24.4h, v3.4h\n"
- "ssubl v2.8h, v2.8b, v15.8b\n"
- "smlal v8.4s, v30.4h, v3.4h\n"
- "smlal v17.4s, v28.4h, v3.4h\n"
- "smlal2 v19.4s, v24.8h, v4.8h\n"
- "smlal2 v10.4s, v24.8h, v3.8h\n"
- "ldr d24, [x24, x0]\n"
- "ssubl v24.8h, v24.8b, v9.8b\n"
+ "smlal v15.4s, v25.4h, v3.4h\n"
+ "smlal2 v16.4s, v25.8h, v3.8h\n"
+ "smlal2 v5.4s, v30.8h, v2.8h\n"
+ "ldr d25, [x5, x24]\n"
+ "ssubl v25.8h, v25.8b, v9.8b\n"
+ "smlal v17.4s, v24.4h, v3.4h\n"
+ "smlal v10.4s, v30.4h, v3.4h\n"
+ "ldr d2, [x23, #0xb0]\n"
+ "ssubl v2.8h, v2.8b, v14.8b\n"
+ "smlal v6.4s, v28.4h, v3.4h\n"
+ "smlal2 v8.4s, v24.8h, v3.8h\n"
"smlal2 v7.4s, v30.8h, v3.8h\n"
- "smlal v13.4s, v27.4h, v0.4h\n"
- "smlal2 v21.4s, v28.8h, v3.8h\n"
- "ldr d3, [x3, #0xb8]\n"
- "smlal v20.4s, v22.4h, v4.4h\n"
- "ssubl v3.8h, v3.8b, v15.8b\n"
- "smlal v8.4s, v28.4h, v4.4h\n"
- "smlal v17.4s, v26.4h, v4.4h\n"
- "smlal2 v19.4s, v27.8h, v0.8h\n"
- "ldr d27, [x23, x0]\n"
+ "smlal v15.4s, v24.4h, v4.4h\n"
+ "smlal2 v16.4s, v24.8h, v4.8h\n"
+ "ldr d24, [x25, x24]\n"
+ "smlal2 v5.4s, v28.8h, v3.8h\n"
+ "ssubl v24.8h, v24.8b, v9.8b\n"
+ "smlal v17.4s, v22.4h, v4.4h\n"
+ "smlal v10.4s, v28.4h, v4.4h\n"
+ "ldr d3, [x23, #0xb8]\n"
+ "ssubl v3.8h, v3.8b, v14.8b\n"
+ "smlal v6.4s, v26.4h, v4.4h\n"
"smlal2 v7.4s, v28.8h, v4.8h\n"
+ "smlal v15.4s, v27.4h, v0.4h\n"
+ "smlal2 v16.4s, v27.8h, v0.8h\n"
+ "ldr d27, [x26, x24]\n"
"ssubl v27.8h, v27.8b, v9.8b\n"
- "smlal v13.4s, v23.4h, v1.4h\n"
- "smlal2 v10.4s, v22.8h, v4.8h\n"
- "ldr q22, [x8, #0x10]\n"
- "add x8, x8, #0x20\n"
- "smlal2 v21.4s, v26.8h, v4.8h\n"
- "ldr d4, [x3, #0xc0]\n"
- "smlal v20.4s, v23.4h, v0.4h\n"
- "ssubl v4.8h, v4.8b, v15.8b\n"
- "smlal v8.4s, v25.4h, v0.4h\n"
- "smlal v17.4s, v24.4h, v0.4h\n"
- "smlal2 v19.4s, v23.8h, v1.8h\n"
+ "smlal2 v8.4s, v22.8h, v4.8h\n"
+ "smlal2 v5.4s, v26.8h, v4.8h\n"
+ "ldr d4, [x23, #0xc0]\n"
+ "ssubl v4.8h, v4.8b, v14.8b\n"
+ "smlal v17.4s, v23.4h, v0.4h\n"
+ "smlal v10.4s, v25.4h, v0.4h\n"
+ "smlal v6.4s, v24.4h, v0.4h\n"
"smlal2 v7.4s, v25.8h, v0.8h\n"
- "ldr d25, [x15, x0]\n"
+ "ldr d25, [x12, x24]\n"
"ssubl v25.8h, v25.8b, v9.8b\n"
- "smlal v13.4s, v31.4h, v2.4h\n"
- "smlal2 v10.4s, v23.8h, v0.8h\n"
- "smlal2 v21.4s, v24.8h, v0.8h\n"
- "smlal v20.4s, v31.4h, v1.4h\n"
- "smlal v8.4s, v24.4h, v1.4h\n"
- "smlal v17.4s, v27.4h, v1.4h\n"
- "smlal2 v19.4s, v31.8h, v2.8h\n"
+ "smlal2 v8.4s, v23.8h, v0.8h\n"
+ "smlal2 v5.4s, v24.8h, v0.8h\n"
+ "smlal v15.4s, v23.4h, v1.4h\n"
+ "smlal v17.4s, v31.4h, v1.4h\n"
+ "smlal v10.4s, v24.4h, v1.4h\n"
+ "smlal v6.4s, v27.4h, v1.4h\n"
"smlal2 v7.4s, v24.8h, v1.8h\n"
- "ldr d24, [x21, x0]\n"
+ "ldr d24, [x14, x24]\n"
+ "smlal2 v16.4s, v23.8h, v1.8h\n"
"ssubl v24.8h, v24.8b, v9.8b\n"
- "smlal v13.4s, v30.4h, v3.4h\n"
- "smlal2 v10.4s, v31.8h, v1.8h\n"
- "smlal2 v21.4s, v27.8h, v1.8h\n"
- "smlal v20.4s, v30.4h, v2.4h\n"
- "smlal v8.4s, v27.4h, v2.4h\n"
- "smlal v17.4s, v25.4h, v2.4h\n"
- "smlal2 v19.4s, v30.8h, v3.8h\n"
+ "smlal2 v8.4s, v31.8h, v1.8h\n"
+ "smlal2 v5.4s, v27.8h, v1.8h\n"
+ "smlal v15.4s, v31.4h, v2.4h\n"
+ "smlal v17.4s, v30.4h, v2.4h\n"
+ "smlal v10.4s, v27.4h, v2.4h\n"
+ "smlal v6.4s, v25.4h, v2.4h\n"
"smlal2 v7.4s, v27.8h, v2.8h\n"
- "ldr d27, [x20, x0]\n"
+ "ldr d27, [x21, x24]\n"
+ "smlal2 v16.4s, v31.8h, v2.8h\n"
"ssubl v27.8h, v27.8b, v9.8b\n"
- "smlal v13.4s, v28.4h, v4.4h\n"
- "smlal2 v10.4s, v30.8h, v2.8h\n"
- "sqrdmulh v13.4s, v13.4s, v18.4s\n"
- "add x0, x0, #0x8\n"
- "smlal2 v21.4s, v25.8h, v2.8h\n"
- "smlal v20.4s, v28.4h, v3.4h\n"
- "and v30.16b, v13.16b, v6.16b\n"
- "smlal v8.4s, v25.4h, v3.4h\n"
- "smlal v17.4s, v24.4h, v3.4h\n"
- "sshr v30.4s, v30.4s, #0x1f\n"
- "smlal2 v19.4s, v28.8h, v4.8h\n"
- "smlal2 v10.4s, v28.8h, v3.8h\n"
- "sqrdmulh v19.4s, v19.4s, v5.4s\n"
+ "smlal2 v8.4s, v30.8h, v2.8h\n"
+ "smlal2 v5.4s, v25.8h, v2.8h\n"
+ "add x24, x24, #0x8\n"
+ "smlal v15.4s, v30.4h, v3.4h\n"
+ "smlal v17.4s, v28.4h, v3.4h\n"
+ "smlal v10.4s, v25.4h, v3.4h\n"
+ "smlal v6.4s, v24.4h, v3.4h\n"
+ "smlal2 v16.4s, v30.8h, v3.8h\n"
+ "smlal2 v8.4s, v28.8h, v3.8h\n"
"smlal2 v7.4s, v25.8h, v3.8h\n"
- "smlal2 v21.4s, v24.8h, v3.8h\n"
- "and v16.16b, v19.16b, v22.16b\n"
- "smlal v20.4s, v26.4h, v4.4h\n"
- "smlal v8.4s, v24.4h, v4.4h\n"
- "sqrdmulh v20.4s, v20.4s, v18.4s\n"
- "smlal v17.4s, v27.4h, v4.4h\n"
- "smlal2 v10.4s, v26.8h, v4.8h\n"
- "sqrdmulh v8.4s, v8.4s, v18.4s\n"
+ "smlal2 v5.4s, v24.8h, v3.8h\n"
+ "smlal v15.4s, v28.4h, v4.4h\n"
+ "smlal v17.4s, v26.4h, v4.4h\n"
+ "sqrdmulh v15.4s, v15.4s, v12.4s\n"
+ "smlal v10.4s, v24.4h, v4.4h\n"
+ "smlal v6.4s, v27.4h, v4.4h\n"
+ "sqrdmulh v17.4s, v17.4s, v12.4s\n"
+ "smlal2 v16.4s, v28.8h, v4.8h\n"
+ "smlal2 v8.4s, v26.8h, v4.8h\n"
+ "sqrdmulh v10.4s, v10.4s, v12.4s\n"
"smlal2 v7.4s, v24.8h, v4.8h\n"
- "smlal2 v21.4s, v27.8h, v4.8h\n"
- "sqrdmulh v17.4s, v17.4s, v18.4s\n"
- "sqadd v13.4s, v13.4s, v30.4s\n"
- "sshr v16.4s, v16.4s, #0x1f\n"
- "and v0.16b, v20.16b, v6.16b\n"
- "sqrdmulh v10.4s, v10.4s, v5.4s\n"
- "and v18.16b, v8.16b, v6.16b\n"
- "sqrdmulh v7.4s, v7.4s, v5.4s\n"
- "and v30.16b, v17.16b, v6.16b\n"
- "sqrdmulh v21.4s, v21.4s, v5.4s\n"
- "sqadd v19.4s, v19.4s, v16.4s\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "and v26.16b, v10.16b, v22.16b\n"
- "sshr v18.4s, v18.4s, #0x1f\n"
- "and v23.16b, v7.16b, v22.16b\n"
- "sshr v30.4s, v30.4s, #0x1f\n"
- "and v16.16b, v21.16b, v22.16b\n"
- "sqadd v20.4s, v20.4s, v0.4s\n"
- "sshr v26.4s, v26.4s, #0x1f\n"
- "sqadd v8.4s, v8.4s, v18.4s\n"
+ "smlal2 v5.4s, v27.8h, v4.8h\n"
+ "sqrdmulh v6.4s, v6.4s, v12.4s\n"
+ "and v23.16b, v15.16b, v19.16b\n"
+ "sqrdmulh v16.4s, v16.4s, v20.4s\n"
+ "and v22.16b, v17.16b, v19.16b\n"
+ "sqrdmulh v8.4s, v8.4s, v20.4s\n"
+ "and v21.16b, v10.16b, v19.16b\n"
+ "sqrdmulh v7.4s, v7.4s, v20.4s\n"
+ "and v26.16b, v6.16b, v19.16b\n"
+ "sqrdmulh v5.4s, v5.4s, v20.4s\n"
"sshr v23.4s, v23.4s, #0x1f\n"
- "sqadd v17.4s, v17.4s, v30.4s\n"
- "sshr v16.4s, v16.4s, #0x1f\n"
- "srshl v13.4s, v13.4s, v6.4s\n"
- "srshl v20.4s, v20.4s, v6.4s\n"
- "sqadd v10.4s, v10.4s, v26.4s\n"
- "srshl v8.4s, v8.4s, v6.4s\n"
- "sqadd v7.4s, v7.4s, v23.4s\n"
- "srshl v17.4s, v17.4s, v6.4s\n"
- "sqadd v21.4s, v21.4s, v16.4s\n"
- "srshl v19.4s, v19.4s, v22.4s\n"
- "sqxtn v13.4h, v13.4s\n"
- "srshl v10.4s, v10.4s, v22.4s\n"
- "sqxtn v20.4h, v20.4s\n"
- "srshl v7.4s, v7.4s, v22.4s\n"
- "sqxtn v8.4h, v8.4s\n"
- "srshl v21.4s, v21.4s, v22.4s\n"
+ "and v4.16b, v16.16b, v29.16b\n"
+ "sshr v22.4s, v22.4s, #0x1f\n"
+ "and v2.16b, v8.16b, v29.16b\n"
+ "sshr v21.4s, v21.4s, #0x1f\n"
+ "and v3.16b, v7.16b, v29.16b\n"
+ "sshr v26.4s, v26.4s, #0x1f\n"
+ "and v25.16b, v5.16b, v29.16b\n"
+ "sqadd v15.4s, v15.4s, v23.4s\n"
+ "sshr v4.4s, v4.4s, #0x1f\n"
+ "sqadd v17.4s, v17.4s, v22.4s\n"
+ "sshr v2.4s, v2.4s, #0x1f\n"
+ "sqadd v10.4s, v10.4s, v21.4s\n"
+ "sshr v3.4s, v3.4s, #0x1f\n"
+ "sqadd v6.4s, v6.4s, v26.4s\n"
+ "sshr v25.4s, v25.4s, #0x1f\n"
+ "srshl v15.4s, v15.4s, v19.4s\n"
+ "sqadd v16.4s, v16.4s, v4.4s\n"
+ "srshl v17.4s, v17.4s, v19.4s\n"
+ "sqadd v8.4s, v8.4s, v2.4s\n"
+ "srshl v10.4s, v10.4s, v19.4s\n"
+ "sqadd v7.4s, v7.4s, v3.4s\n"
+ "srshl v6.4s, v6.4s, v19.4s\n"
+ "sqadd v5.4s, v5.4s, v25.4s\n"
+ "srshl v16.4s, v16.4s, v29.4s\n"
+ "sqxtn v15.4h, v15.4s\n"
+ "srshl v8.4s, v8.4s, v29.4s\n"
"sqxtn v17.4h, v17.4s\n"
- "sqxtn2 v13.8h, v19.4s\n"
- "sqxtn2 v20.8h, v10.4s\n"
- "sqxtn2 v8.8h, v7.4s\n"
- "sqxtn2 v17.8h, v21.4s\n"
- "sqadd v13.8h, v13.8h, v14.8h\n"
- "sqadd v20.8h, v20.8h, v14.8h\n"
- "sqadd v8.8h, v8.8h, v14.8h\n"
- "sqadd v17.8h, v17.8h, v14.8h\n"
- "smax v13.8h, v13.8h, v12.8h\n"
- "smax v20.8h, v20.8h, v12.8h\n"
- "smax v8.8h, v8.8h, v12.8h\n"
- "smax v17.8h, v17.8h, v12.8h\n"
- "smin v13.8h, v13.8h, v11.8h\n"
- "smin v20.8h, v20.8h, v11.8h\n"
- "smin v8.8h, v8.8h, v11.8h\n"
- "smin v17.8h, v17.8h, v11.8h\n"
- "uzp1 v13.16b, v13.16b, v13.16b\n"
- "uzp1 v20.16b, v20.16b, v20.16b\n"
- "str d13, [x17, x10]\n"
- "uzp1 v8.16b, v8.16b, v8.16b\n"
+ "srshl v7.4s, v7.4s, v29.4s\n"
+ "sqxtn v10.4h, v10.4s\n"
+ "srshl v5.4s, v5.4s, v29.4s\n"
+ "sqxtn v6.4h, v6.4s\n"
+ "sqxtn2 v15.8h, v16.4s\n"
+ "sqxtn2 v17.8h, v8.4s\n"
+ "sqxtn2 v10.8h, v7.4s\n"
+ "sqxtn2 v6.8h, v5.4s\n"
+ "sqadd v15.8h, v15.8h, v18.8h\n"
+ "sqadd v17.8h, v17.8h, v18.8h\n"
+ "sqadd v10.8h, v10.8h, v18.8h\n"
+ "sqadd v6.8h, v6.8h, v18.8h\n"
+ "smax v15.8h, v15.8h, v11.8h\n"
+ "smax v17.8h, v17.8h, v11.8h\n"
+ "smax v10.8h, v10.8h, v11.8h\n"
+ "smax v6.8h, v6.8h, v11.8h\n"
+ "smin v15.8h, v15.8h, v13.8h\n"
+ "smin v17.8h, v17.8h, v13.8h\n"
+ "smin v10.8h, v10.8h, v13.8h\n"
+ "smin v6.8h, v6.8h, v13.8h\n"
+ "uzp1 v15.16b, v15.16b, v15.16b\n"
"uzp1 v17.16b, v17.16b, v17.16b\n"
- "str d20, [x6, x10]\n"
- "str d8, [x7, x10]\n"
- "str d17, [x16, x10]\n"
- "add x10, x10, #0x8\n"
+ "str d15, [x16, x22]\n"
+ "uzp1 v10.16b, v10.16b, v10.16b\n"
+ "uzp1 v6.16b, v6.16b, v6.16b\n"
+ "str d17, [x8, x22]\n"
+ "str d10, [x4, x22]\n"
+ "str d6, [x7, x22]\n"
+ "add x22, x22, #0x8\n"
"beq 124f\n"
- "add x3, x3, #0xc8\n"
+ "add x23, x23, #0xc8\n"
"3:" // Oddments
- "ldr x13, [%x[params], %[offsetof_Params_bias]]\n"
- "tbz x1, #2, 5f\n"
- "ld1 { v13.4s }, [x13], #0x10\n"
- "tbz x1, #1, 4f\n"
- "ld1 { v19.d }[0], [x13], #0x8\n"
- "tbz x1, #0, 7f\n"
- "ld1 { v19.s }[2], [x13]\n"
+ "ldr x19, [%x[params], %[offsetof_Params_bias]]\n"
+ "tbz x0, #2, 5f\n"
+ "ld1 { v15.4s }, [x19], #0x10\n"
+ "tbz x0, #1, 4f\n"
+ "ld1 { v16.d }[0], [x19], #0x8\n"
+ "tbz x0, #0, 7f\n"
+ "ld1 { v16.s }[2], [x19]\n"
"b 7f\n"
"4:" // Oddments: Load bias: Bit 2: Bit 1: Unset
- "tbz x1, #0, 7f\n"
- "ld1 { v19.s }[0], [x13]\n"
+ "tbz x0, #0, 7f\n"
+ "ld1 { v16.s }[0], [x19]\n"
"b 7f\n"
"5:" // Oddments: Load bias: Bit 2: Unset
- "tbz x1, #1, 6f\n"
- "ld1 { v13.d }[0], [x13], #0x8\n"
- "tbz x1, #0, 7f\n"
- "ld1 { v13.s }[2], [x13]\n"
+ "tbz x0, #1, 6f\n"
+ "ld1 { v15.d }[0], [x19], #0x8\n"
+ "tbz x0, #0, 7f\n"
+ "ld1 { v15.s }[2], [x19]\n"
"b 7f\n"
"6:" // Oddments: Load bias: Bit 2: Unset: Bit 1: Unset
- "tbz x1, #0, 7f\n"
- "ld1 { v13.s }[0], [x13]\n"
+ "tbz x0, #0, 7f\n"
+ "ld1 { v15.s }[0], [x19]\n"
"7:" // Oddments: Load bias: Bit 2: End
- "ldr d0, [x3, #0x0]\n"
- "ldr d1, [x3, #0x8]\n"
- "mov v20.16b, v13.16b\n"
- "mov v10.16b, v19.16b\n"
- "ldr d2, [x3, #0x10]\n"
- "ldr d3, [x3, #0x18]\n"
- "mov v8.16b, v13.16b\n"
- "mov v7.16b, v19.16b\n"
- "ldr d4, [x3, #0x20]\n"
- "ldp x9, x28, [x4, #0x0]\n"
- "mov v17.16b, v13.16b\n"
- "mov v21.16b, v19.16b\n"
- "ldp x27, x26, [x4, #0x10]\n"
- "ldp x25, x24, [x4, #0x20]\n"
- "ssubl v0.8h, v0.8b, v15.8b\n"
- "ssubl v1.8h, v1.8b, v15.8b\n"
- "ldp x23, x22, [x4, #0x30]\n"
- "ldp x21, x20, [x4, #0x40]\n"
- "ssubl v2.8h, v2.8b, v15.8b\n"
- "ssubl v3.8h, v3.8b, v15.8b\n"
- "ssubl v4.8h, v4.8b, v15.8b\n"
- "add x9, x9, x0\n"
- "add x28, x28, x0\n"
- "add x27, x27, x0\n"
- "add x26, x26, x0\n"
- "add x25, x25, x0\n"
- "add x24, x24, x0\n"
- "add x23, x23, x0\n"
- "add x22, x22, x0\n"
- "add x21, x21, x0\n"
- "add x20, x20, x0\n"
- "tbz x1, #2, 9f\n"
- "ld1 { v31.s }[0], [x9], #0x4\n"
- "ld1 { v30.s }[0], [x28], #0x4\n"
- "ld1 { v29.s }[0], [x27], #0x4\n"
- "ld1 { v28.s }[0], [x26], #0x4\n"
- "ld1 { v27.s }[0], [x25], #0x4\n"
- "ld1 { v23.s }[0], [x24], #0x4\n"
- "ld1 { v25.s }[0], [x23], #0x4\n"
- "ld1 { v24.s }[0], [x22], #0x4\n"
- "ld1 { v26.s }[0], [x21], #0x4\n"
- "ld1 { v22.s }[0], [x20], #0x4\n"
- "tbz x1, #1, 8f\n"
- "ld1 { v31.h }[2], [x9], #0x2\n"
- "ld1 { v30.h }[2], [x28], #0x2\n"
- "ld1 { v29.h }[2], [x27], #0x2\n"
- "ld1 { v28.h }[2], [x26], #0x2\n"
- "ld1 { v27.h }[2], [x25], #0x2\n"
- "ld1 { v23.h }[2], [x24], #0x2\n"
- "ld1 { v25.h }[2], [x23], #0x2\n"
- "ld1 { v24.h }[2], [x22], #0x2\n"
- "ld1 { v26.h }[2], [x21], #0x2\n"
- "ld1 { v22.h }[2], [x20], #0x2\n"
- "tbz x1, #0, 11f\n"
- "ld1 { v31.b }[6], [x9]\n"
- "ld1 { v30.b }[6], [x28]\n"
- "ld1 { v29.b }[6], [x27]\n"
- "ld1 { v28.b }[6], [x26]\n"
- "ld1 { v27.b }[6], [x25]\n"
- "ld1 { v23.b }[6], [x24]\n"
- "ld1 { v25.b }[6], [x23]\n"
- "ld1 { v24.b }[6], [x22]\n"
- "ld1 { v26.b }[6], [x21]\n"
- "ld1 { v22.b }[6], [x20]\n"
+ "ldr d0, [x23, #0x0]\n"
+ "ldr d1, [x23, #0x8]\n"
+ "mov v17.16b, v15.16b\n"
+ "mov v8.16b, v16.16b\n"
+ "ldr d2, [x23, #0x10]\n"
+ "ldr d3, [x23, #0x18]\n"
+ "mov v10.16b, v15.16b\n"
+ "mov v7.16b, v16.16b\n"
+ "ldr d4, [x23, #0x20]\n"
+ "ldp x28, x6, [x20, #0x0]\n"
+ "mov v6.16b, v15.16b\n"
+ "mov v5.16b, v16.16b\n"
+ "ldp x26, x25, [x20, #0x10]\n"
+ "ldp x5, x2, [x20, #0x20]\n"
+ "ssubl v0.8h, v0.8b, v14.8b\n"
+ "ssubl v1.8h, v1.8b, v14.8b\n"
+ "ldp x27, x21, [x20, #0x30]\n"
+ "ldp x12, x19, [x20, #0x40]\n"
+ "ssubl v2.8h, v2.8b, v14.8b\n"
+ "ssubl v3.8h, v3.8b, v14.8b\n"
+ "ssubl v4.8h, v4.8b, v14.8b\n"
+ "add x28, x28, x24\n"
+ "add x6, x6, x24\n"
+ "add x26, x26, x24\n"
+ "add x25, x25, x24\n"
+ "add x5, x5, x24\n"
+ "add x2, x2, x24\n"
+ "add x27, x27, x24\n"
+ "add x21, x21, x24\n"
+ "add x12, x12, x24\n"
+ "add x19, x19, x24\n"
+ "tbz x0, #2, 9f\n"
+ "ld1 { v31.s }[0], [x28], #0x4\n"
+ "ld1 { v30.s }[0], [x6], #0x4\n"
+ "ld1 { v29.s }[0], [x26], #0x4\n"
+ "ld1 { v28.s }[0], [x25], #0x4\n"
+ "ld1 { v27.s }[0], [x5], #0x4\n"
+ "ld1 { v23.s }[0], [x2], #0x4\n"
+ "ld1 { v25.s }[0], [x27], #0x4\n"
+ "ld1 { v24.s }[0], [x21], #0x4\n"
+ "ld1 { v26.s }[0], [x12], #0x4\n"
+ "ld1 { v22.s }[0], [x19], #0x4\n"
+ "tbz x0, #1, 8f\n"
+ "ld1 { v31.h }[2], [x28], #0x2\n"
+ "ld1 { v30.h }[2], [x6], #0x2\n"
+ "ld1 { v29.h }[2], [x26], #0x2\n"
+ "ld1 { v28.h }[2], [x25], #0x2\n"
+ "ld1 { v27.h }[2], [x5], #0x2\n"
+ "ld1 { v23.h }[2], [x2], #0x2\n"
+ "ld1 { v25.h }[2], [x27], #0x2\n"
+ "ld1 { v24.h }[2], [x21], #0x2\n"
+ "ld1 { v26.h }[2], [x12], #0x2\n"
+ "ld1 { v22.h }[2], [x19], #0x2\n"
+ "tbz x0, #0, 11f\n"
+ "ld1 { v31.b }[6], [x28]\n"
+ "ld1 { v30.b }[6], [x6]\n"
+ "ld1 { v29.b }[6], [x26]\n"
+ "ld1 { v28.b }[6], [x25]\n"
+ "ld1 { v27.b }[6], [x5]\n"
+ "ld1 { v23.b }[6], [x2]\n"
+ "ld1 { v25.b }[6], [x27]\n"
+ "ld1 { v24.b }[6], [x21]\n"
+ "ld1 { v26.b }[6], [x12]\n"
+ "ld1 { v22.b }[6], [x19]\n"
"b 11f\n"
"8:" // Oddments: Initial loads: Bit 2: Bit 1: Unset
- "tbz x1, #0, 11f\n"
- "ld1 { v31.b }[4], [x9]\n"
- "ld1 { v30.b }[4], [x28]\n"
- "ld1 { v29.b }[4], [x27]\n"
- "ld1 { v28.b }[4], [x26]\n"
- "ld1 { v27.b }[4], [x25]\n"
- "ld1 { v23.b }[4], [x24]\n"
- "ld1 { v25.b }[4], [x23]\n"
- "ld1 { v24.b }[4], [x22]\n"
- "ld1 { v26.b }[4], [x21]\n"
- "ld1 { v22.b }[4], [x20]\n"
+ "tbz x0, #0, 11f\n"
+ "ld1 { v31.b }[4], [x28]\n"
+ "ld1 { v30.b }[4], [x6]\n"
+ "ld1 { v29.b }[4], [x26]\n"
+ "ld1 { v28.b }[4], [x25]\n"
+ "ld1 { v27.b }[4], [x5]\n"
+ "ld1 { v23.b }[4], [x2]\n"
+ "ld1 { v25.b }[4], [x27]\n"
+ "ld1 { v24.b }[4], [x21]\n"
+ "ld1 { v26.b }[4], [x12]\n"
+ "ld1 { v22.b }[4], [x19]\n"
"b 11f\n"
"9:" // Oddments: Initial loads: Bit 2: Unset
- "tbz x1, #1, 10f\n"
- "ld1 { v31.h }[0], [x9], #0x2\n"
- "ld1 { v30.h }[0], [x28], #0x2\n"
- "ld1 { v29.h }[0], [x27], #0x2\n"
- "ld1 { v28.h }[0], [x26], #0x2\n"
- "ld1 { v27.h }[0], [x25], #0x2\n"
- "ld1 { v23.h }[0], [x24], #0x2\n"
- "ld1 { v25.h }[0], [x23], #0x2\n"
- "ld1 { v24.h }[0], [x22], #0x2\n"
- "ld1 { v26.h }[0], [x21], #0x2\n"
- "ld1 { v22.h }[0], [x20], #0x2\n"
- "tbz x1, #0, 11f\n"
- "ld1 { v31.b }[2], [x9]\n"
- "ld1 { v30.b }[2], [x28]\n"
- "ld1 { v29.b }[2], [x27]\n"
- "ld1 { v28.b }[2], [x26]\n"
- "ld1 { v27.b }[2], [x25]\n"
- "ld1 { v23.b }[2], [x24]\n"
- "ld1 { v25.b }[2], [x23]\n"
- "ld1 { v24.b }[2], [x22]\n"
- "ld1 { v26.b }[2], [x21]\n"
- "ld1 { v22.b }[2], [x20]\n"
+ "tbz x0, #1, 10f\n"
+ "ld1 { v31.h }[0], [x28], #0x2\n"
+ "ld1 { v30.h }[0], [x6], #0x2\n"
+ "ld1 { v29.h }[0], [x26], #0x2\n"
+ "ld1 { v28.h }[0], [x25], #0x2\n"
+ "ld1 { v27.h }[0], [x5], #0x2\n"
+ "ld1 { v23.h }[0], [x2], #0x2\n"
+ "ld1 { v25.h }[0], [x27], #0x2\n"
+ "ld1 { v24.h }[0], [x21], #0x2\n"
+ "ld1 { v26.h }[0], [x12], #0x2\n"
+ "ld1 { v22.h }[0], [x19], #0x2\n"
+ "tbz x0, #0, 11f\n"
+ "ld1 { v31.b }[2], [x28]\n"
+ "ld1 { v30.b }[2], [x6]\n"
+ "ld1 { v29.b }[2], [x26]\n"
+ "ld1 { v28.b }[2], [x25]\n"
+ "ld1 { v27.b }[2], [x5]\n"
+ "ld1 { v23.b }[2], [x2]\n"
+ "ld1 { v25.b }[2], [x27]\n"
+ "ld1 { v24.b }[2], [x21]\n"
+ "ld1 { v26.b }[2], [x12]\n"
+ "ld1 { v22.b }[2], [x19]\n"
"b 11f\n"
"10:" // Oddments: Initial loads: Bit 2: Unset: Bit 1: Unset
- "tbz x1, #0, 11f\n"
- "ld1 { v31.b }[0], [x9]\n"
- "ld1 { v30.b }[0], [x28]\n"
- "ld1 { v29.b }[0], [x27]\n"
- "ld1 { v28.b }[0], [x26]\n"
- "ld1 { v27.b }[0], [x25]\n"
- "ld1 { v23.b }[0], [x24]\n"
- "ld1 { v25.b }[0], [x23]\n"
- "ld1 { v24.b }[0], [x22]\n"
- "ld1 { v26.b }[0], [x21]\n"
- "ld1 { v22.b }[0], [x20]\n"
+ "tbz x0, #0, 11f\n"
+ "ld1 { v31.b }[0], [x28]\n"
+ "ld1 { v30.b }[0], [x6]\n"
+ "ld1 { v29.b }[0], [x26]\n"
+ "ld1 { v28.b }[0], [x25]\n"
+ "ld1 { v27.b }[0], [x5]\n"
+ "ld1 { v23.b }[0], [x2]\n"
+ "ld1 { v25.b }[0], [x27]\n"
+ "ld1 { v24.b }[0], [x21]\n"
+ "ld1 { v26.b }[0], [x12]\n"
+ "ld1 { v22.b }[0], [x19]\n"
"11:" // Oddments: Initial loads: Bit 2: End
"ssubl v31.8h, v31.8b, v9.8b\n"
"ssubl v30.8h, v30.8b, v9.8b\n"
- "smlal v13.4s, v31.4h, v0.4h\n"
- "ldr x20, [x4, #0x50]\n"
+ "smlal v15.4s, v31.4h, v0.4h\n"
+ "ldr x19, [x20, #0x50]\n"
"ssubl v29.8h, v29.8b, v9.8b\n"
- "smlal2 v19.4s, v31.8h, v0.8h\n"
- "smlal v20.4s, v30.4h, v0.4h\n"
- "smlal2 v10.4s, v30.8h, v0.8h\n"
- "smlal v8.4s, v29.4h, v0.4h\n"
+ "smlal2 v16.4s, v31.8h, v0.8h\n"
+ "smlal v17.4s, v30.4h, v0.4h\n"
+ "smlal2 v8.4s, v30.8h, v0.8h\n"
+ "smlal v10.4s, v29.4h, v0.4h\n"
"ssubl v28.8h, v28.8b, v9.8b\n"
- "add x20, x20, x0\n"
+ "add x19, x19, x24\n"
"smlal2 v7.4s, v29.8h, v0.8h\n"
"ssubl v27.8h, v27.8b, v9.8b\n"
- "smlal v17.4s, v28.4h, v0.4h\n"
- "smlal2 v21.4s, v28.8h, v0.8h\n"
- "smlal v13.4s, v30.4h, v1.4h\n"
+ "smlal v6.4s, v28.4h, v0.4h\n"
+ "smlal2 v5.4s, v28.8h, v0.8h\n"
+ "smlal v15.4s, v30.4h, v1.4h\n"
"ssubl v23.8h, v23.8b, v9.8b\n"
- "smlal2 v19.4s, v30.8h, v1.8h\n"
- "smlal v20.4s, v27.4h, v1.4h\n"
+ "smlal2 v16.4s, v30.8h, v1.8h\n"
+ "smlal v17.4s, v27.4h, v1.4h\n"
"ssubl v25.8h, v25.8b, v9.8b\n"
- "smlal2 v10.4s, v27.8h, v1.8h\n"
- "smlal v8.4s, v28.4h, v1.4h\n"
+ "smlal2 v8.4s, v27.8h, v1.8h\n"
+ "smlal v10.4s, v28.4h, v1.4h\n"
"ssubl v24.8h, v24.8b, v9.8b\n"
"smlal2 v7.4s, v28.8h, v1.8h\n"
"ssubl v26.8h, v26.8b, v9.8b\n"
- "smlal v17.4s, v23.4h, v1.4h\n"
+ "smlal v6.4s, v23.4h, v1.4h\n"
"ssubl v22.8h, v22.8b, v9.8b\n"
- "smlal2 v21.4s, v23.8h, v1.8h\n"
- "smlal v13.4s, v27.4h, v2.4h\n"
- "smlal2 v19.4s, v27.8h, v2.8h\n"
- "smlal v20.4s, v25.4h, v2.4h\n"
- "smlal2 v10.4s, v25.8h, v2.8h\n"
- "smlal v8.4s, v23.4h, v2.4h\n"
+ "smlal2 v5.4s, v23.8h, v1.8h\n"
+ "smlal v15.4s, v27.4h, v2.4h\n"
+ "smlal2 v16.4s, v27.8h, v2.8h\n"
+ "smlal v17.4s, v25.4h, v2.4h\n"
+ "smlal2 v8.4s, v25.8h, v2.8h\n"
+ "smlal v10.4s, v23.4h, v2.4h\n"
"smlal2 v7.4s, v23.8h, v2.8h\n"
- "tbz x1, #2, 13f\n"
- "ld1 { v31.s }[0], [x20], #0x4\n"
- "tbz x1, #1, 12f\n"
- "ld1 { v31.h }[2], [x20], #0x2\n"
- "tbz x1, #0, 15f\n"
- "ld1 { v31.b }[6], [x20]\n"
+ "tbz x0, #2, 13f\n"
+ "ld1 { v31.s }[0], [x19], #0x4\n"
+ "tbz x0, #1, 12f\n"
+ "ld1 { v31.h }[2], [x19], #0x2\n"
+ "tbz x0, #0, 15f\n"
+ "ld1 { v31.b }[6], [x19]\n"
"b 15f\n"
"12:" // Oddments: Load (1, 3): Bit 2: Bit 1: Unset
- "tbz x1, #0, 15f\n"
- "ld1 { v31.b }[4], [x20]\n"
+ "tbz x0, #0, 15f\n"
+ "ld1 { v31.b }[4], [x19]\n"
"b 15f\n"
"13:" // Oddments: Load (1, 3): Bit 2: Unset
- "tbz x1, #1, 14f\n"
- "ld1 { v31.h }[0], [x20], #0x2\n"
- "tbz x1, #0, 15f\n"
- "ld1 { v31.b }[2], [x20]\n"
+ "tbz x0, #1, 14f\n"
+ "ld1 { v31.h }[0], [x19], #0x2\n"
+ "tbz x0, #0, 15f\n"
+ "ld1 { v31.b }[2], [x19]\n"
"b 15f\n"
"14:" // Oddments: Load (1, 3): Bit 2: Unset: Bit 1: Unset
- "tbz x1, #0, 15f\n"
- "ld1 { v31.b }[0], [x20]\n"
+ "tbz x0, #0, 15f\n"
+ "ld1 { v31.b }[0], [x19]\n"
"15:" // Oddments: Load (1, 3): Bit 2: End
"ssubl v31.8h, v31.8b, v9.8b\n"
- "ldr x22, [x4, #0x58]\n"
- "smlal v17.4s, v31.4h, v2.4h\n"
- "smlal2 v21.4s, v31.8h, v2.8h\n"
- "smlal v13.4s, v25.4h, v3.4h\n"
- "smlal2 v19.4s, v25.8h, v3.8h\n"
- "add x22, x22, x0\n"
- "smlal v20.4s, v24.4h, v3.4h\n"
- "smlal2 v10.4s, v24.8h, v3.8h\n"
- "smlal v8.4s, v31.4h, v3.4h\n"
+ "ldr x15, [x20, #0x58]\n"
+ "smlal v6.4s, v31.4h, v2.4h\n"
+ "smlal2 v5.4s, v31.8h, v2.8h\n"
+ "smlal v15.4s, v25.4h, v3.4h\n"
+ "smlal2 v16.4s, v25.8h, v3.8h\n"
+ "add x15, x15, x24\n"
+ "smlal v17.4s, v24.4h, v3.4h\n"
+ "smlal2 v8.4s, v24.8h, v3.8h\n"
+ "smlal v10.4s, v31.4h, v3.4h\n"
"smlal2 v7.4s, v31.8h, v3.8h\n"
- "tbz x1, #2, 17f\n"
- "ld1 { v30.s }[0], [x22], #0x4\n"
- "tbz x1, #1, 16f\n"
- "ld1 { v30.h }[2], [x22], #0x2\n"
- "tbz x1, #0, 19f\n"
- "ld1 { v30.b }[6], [x22]\n"
+ "tbz x0, #2, 17f\n"
+ "ld1 { v30.s }[0], [x15], #0x4\n"
+ "tbz x0, #1, 16f\n"
+ "ld1 { v30.h }[2], [x15], #0x2\n"
+ "tbz x0, #0, 19f\n"
+ "ld1 { v30.b }[6], [x15]\n"
"b 19f\n"
"16:" // Oddments: Load (1, 4): Bit 2: Bit 1: Unset
- "tbz x1, #0, 19f\n"
- "ld1 { v30.b }[4], [x22]\n"
+ "tbz x0, #0, 19f\n"
+ "ld1 { v30.b }[4], [x15]\n"
"b 19f\n"
"17:" // Oddments: Load (1, 4): Bit 2: Unset
- "tbz x1, #1, 18f\n"
- "ld1 { v30.h }[0], [x22], #0x2\n"
- "tbz x1, #0, 19f\n"
- "ld1 { v30.b }[2], [x22]\n"
+ "tbz x0, #1, 18f\n"
+ "ld1 { v30.h }[0], [x15], #0x2\n"
+ "tbz x0, #0, 19f\n"
+ "ld1 { v30.b }[2], [x15]\n"
"b 19f\n"
"18:" // Oddments: Load (1, 4): Bit 2: Unset: Bit 1: Unset
- "tbz x1, #0, 19f\n"
- "ld1 { v30.b }[0], [x22]\n"
+ "tbz x0, #0, 19f\n"
+ "ld1 { v30.b }[0], [x15]\n"
"19:" // Oddments: Load (1, 4): Bit 2: End
"ssubl v30.8h, v30.8b, v9.8b\n"
- "ldr x21, [x4, #0x60]\n"
- "smlal v17.4s, v30.4h, v3.4h\n"
- "smlal2 v21.4s, v30.8h, v3.8h\n"
- "smlal v13.4s, v24.4h, v4.4h\n"
- "smlal2 v19.4s, v24.8h, v4.8h\n"
- "add x21, x21, x0\n"
- "tbz x1, #2, 21f\n"
- "ld1 { v27.s }[0], [x21], #0x4\n"
- "tbz x1, #1, 20f\n"
- "ld1 { v27.h }[2], [x21], #0x2\n"
- "tbz x1, #0, 23f\n"
- "ld1 { v27.b }[6], [x21]\n"
+ "ldr x19, [x20, #0x60]\n"
+ "smlal v6.4s, v30.4h, v3.4h\n"
+ "smlal2 v5.4s, v30.8h, v3.8h\n"
+ "smlal v15.4s, v24.4h, v4.4h\n"
+ "smlal2 v16.4s, v24.8h, v4.8h\n"
+ "add x19, x19, x24\n"
+ "tbz x0, #2, 21f\n"
+ "ld1 { v27.s }[0], [x19], #0x4\n"
+ "tbz x0, #1, 20f\n"
+ "ld1 { v27.h }[2], [x19], #0x2\n"
+ "tbz x0, #0, 23f\n"
+ "ld1 { v27.b }[6], [x19]\n"
"b 23f\n"
"20:" // Oddments: Load (0, 5): Bit 2: Bit 1: Unset
- "tbz x1, #0, 23f\n"
- "ld1 { v27.b }[4], [x21]\n"
+ "tbz x0, #0, 23f\n"
+ "ld1 { v27.b }[4], [x19]\n"
"b 23f\n"
"21:" // Oddments: Load (0, 5): Bit 2: Unset
- "tbz x1, #1, 22f\n"
- "ld1 { v27.h }[0], [x21], #0x2\n"
- "tbz x1, #0, 23f\n"
- "ld1 { v27.b }[2], [x21]\n"
+ "tbz x0, #1, 22f\n"
+ "ld1 { v27.h }[0], [x19], #0x2\n"
+ "tbz x0, #0, 23f\n"
+ "ld1 { v27.b }[2], [x19]\n"
"b 23f\n"
"22:" // Oddments: Load (0, 5): Bit 2: Unset: Bit 1: Unset
- "tbz x1, #0, 23f\n"
- "ld1 { v27.b }[0], [x21]\n"
+ "tbz x0, #0, 23f\n"
+ "ld1 { v27.b }[0], [x19]\n"
"23:" // Oddments: Load (0, 5): Bit 2: End
- "ldr d0, [x3, #0x28]\n"
"ssubl v27.8h, v27.8b, v9.8b\n"
- "smlal v20.4s, v27.4h, v4.4h\n"
- "smlal2 v10.4s, v27.8h, v4.8h\n"
- "smlal v8.4s, v30.4h, v4.4h\n"
+ "ldr d0, [x23, #0x28]\n"
+ "smlal v17.4s, v27.4h, v4.4h\n"
+ "smlal2 v8.4s, v27.8h, v4.8h\n"
+ "smlal v10.4s, v30.4h, v4.4h\n"
"smlal2 v7.4s, v30.8h, v4.8h\n"
- "ssubl v0.8h, v0.8b, v15.8b\n"
- "ldr x20, [x4, #0x68]\n"
- "smlal v17.4s, v26.4h, v4.4h\n"
- "smlal2 v21.4s, v26.8h, v4.8h\n"
- "add x20, x20, x0\n"
- "smlal v13.4s, v29.4h, v0.4h\n"
- "smlal2 v19.4s, v29.8h, v0.8h\n"
- "smlal v20.4s, v28.4h, v0.4h\n"
- "smlal2 v10.4s, v28.8h, v0.8h\n"
- "smlal v8.4s, v22.4h, v0.4h\n"
+ "ssubl v0.8h, v0.8b, v14.8b\n"
+ "ldr x27, [x20, #0x68]\n"
+ "smlal v6.4s, v26.4h, v4.4h\n"
+ "smlal2 v5.4s, v26.8h, v4.8h\n"
+ "add x27, x27, x24\n"
+ "smlal v15.4s, v29.4h, v0.4h\n"
+ "smlal2 v16.4s, v29.8h, v0.8h\n"
+ "smlal v17.4s, v28.4h, v0.4h\n"
+ "smlal2 v8.4s, v28.8h, v0.8h\n"
+ "smlal v10.4s, v22.4h, v0.4h\n"
"smlal2 v7.4s, v22.8h, v0.8h\n"
- "tbz x1, #2, 25f\n"
- "ld1 { v25.s }[0], [x20], #0x4\n"
- "tbz x1, #1, 24f\n"
- "ld1 { v25.h }[2], [x20], #0x2\n"
- "tbz x1, #0, 27f\n"
- "ld1 { v25.b }[6], [x20]\n"
+ "tbz x0, #2, 25f\n"
+ "ld1 { v25.s }[0], [x27], #0x4\n"
+ "tbz x0, #1, 24f\n"
+ "ld1 { v25.h }[2], [x27], #0x2\n"
+ "tbz x0, #0, 27f\n"
+ "ld1 { v25.b }[6], [x27]\n"
"b 27f\n"
"24:" // Oddments: Load (2, 1): Bit 2: Bit 1: Unset
- "tbz x1, #0, 27f\n"
- "ld1 { v25.b }[4], [x20]\n"
+ "tbz x0, #0, 27f\n"
+ "ld1 { v25.b }[4], [x27]\n"
"b 27f\n"
"25:" // Oddments: Load (2, 1): Bit 2: Unset
- "tbz x1, #1, 26f\n"
- "ld1 { v25.h }[0], [x20], #0x2\n"
- "tbz x1, #0, 27f\n"
- "ld1 { v25.b }[2], [x20]\n"
+ "tbz x0, #1, 26f\n"
+ "ld1 { v25.h }[0], [x27], #0x2\n"
+ "tbz x0, #0, 27f\n"
+ "ld1 { v25.b }[2], [x27]\n"
"b 27f\n"
"26:" // Oddments: Load (2, 1): Bit 2: Unset: Bit 1: Unset
- "tbz x1, #0, 27f\n"
- "ld1 { v25.b }[0], [x20]\n"
+ "tbz x0, #0, 27f\n"
+ "ld1 { v25.b }[0], [x27]\n"
"27:" // Oddments: Load (2, 1): Bit 2: End
- "ldr d1, [x3, #0x30]\n"
+ "ldr d1, [x23, #0x30]\n"
"ssubl v25.8h, v25.8b, v9.8b\n"
- "ssubl v1.8h, v1.8b, v15.8b\n"
- "ldr x26, [x4, #0x70]\n"
- "smlal v17.4s, v25.4h, v0.4h\n"
- "smlal2 v21.4s, v25.8h, v0.8h\n"
- "add x26, x26, x0\n"
- "smlal v13.4s, v28.4h, v1.4h\n"
- "smlal2 v19.4s, v28.8h, v1.8h\n"
- "smlal v20.4s, v23.4h, v1.4h\n"
- "smlal2 v10.4s, v23.8h, v1.8h\n"
- "smlal v8.4s, v25.4h, v1.4h\n"
+ "ssubl v1.8h, v1.8b, v14.8b\n"
+ "ldr x5, [x20, #0x70]\n"
+ "smlal v6.4s, v25.4h, v0.4h\n"
+ "smlal2 v5.4s, v25.8h, v0.8h\n"
+ "add x5, x5, x24\n"
+ "smlal v15.4s, v28.4h, v1.4h\n"
+ "smlal2 v16.4s, v28.8h, v1.8h\n"
+ "smlal v17.4s, v23.4h, v1.4h\n"
+ "smlal2 v8.4s, v23.8h, v1.8h\n"
+ "smlal v10.4s, v25.4h, v1.4h\n"
"smlal2 v7.4s, v25.8h, v1.8h\n"
- "tbz x1, #2, 29f\n"
- "ld1 { v24.s }[0], [x26], #0x4\n"
- "tbz x1, #1, 28f\n"
- "ld1 { v24.h }[2], [x26], #0x2\n"
- "tbz x1, #0, 31f\n"
- "ld1 { v24.b }[6], [x26]\n"
+ "tbz x0, #2, 29f\n"
+ "ld1 { v24.s }[0], [x5], #0x4\n"
+ "tbz x0, #1, 28f\n"
+ "ld1 { v24.h }[2], [x5], #0x2\n"
+ "tbz x0, #0, 31f\n"
+ "ld1 { v24.b }[6], [x5]\n"
"b 31f\n"
"28:" // Oddments: Load (2, 2): Bit 2: Bit 1: Unset
- "tbz x1, #0, 31f\n"
- "ld1 { v24.b }[4], [x26]\n"
+ "tbz x0, #0, 31f\n"
+ "ld1 { v24.b }[4], [x5]\n"
"b 31f\n"
"29:" // Oddments: Load (2, 2): Bit 2: Unset
- "tbz x1, #1, 30f\n"
- "ld1 { v24.h }[0], [x26], #0x2\n"
- "tbz x1, #0, 31f\n"
- "ld1 { v24.b }[2], [x26]\n"
+ "tbz x0, #1, 30f\n"
+ "ld1 { v24.h }[0], [x5], #0x2\n"
+ "tbz x0, #0, 31f\n"
+ "ld1 { v24.b }[2], [x5]\n"
"b 31f\n"
"30:" // Oddments: Load (2, 2): Bit 2: Unset: Bit 1: Unset
- "tbz x1, #0, 31f\n"
- "ld1 { v24.b }[0], [x26]\n"
+ "tbz x0, #0, 31f\n"
+ "ld1 { v24.b }[0], [x5]\n"
"31:" // Oddments: Load (2, 2): Bit 2: End
- "ldr d2, [x3, #0x38]\n"
+ "ldr d2, [x23, #0x38]\n"
"ssubl v24.8h, v24.8b, v9.8b\n"
- "ssubl v2.8h, v2.8b, v15.8b\n"
- "ldr x25, [x4, #0x78]\n"
- "smlal v17.4s, v24.4h, v1.4h\n"
- "smlal2 v21.4s, v24.8h, v1.8h\n"
- "add x25, x25, x0\n"
- "smlal v13.4s, v23.4h, v2.4h\n"
- "smlal2 v19.4s, v23.8h, v2.8h\n"
- "smlal v20.4s, v31.4h, v2.4h\n"
- "smlal2 v10.4s, v31.8h, v2.8h\n"
- "smlal v8.4s, v24.4h, v2.4h\n"
+ "ssubl v2.8h, v2.8b, v14.8b\n"
+ "ldr x11, [x20, #0x78]\n"
+ "smlal v6.4s, v24.4h, v1.4h\n"
+ "smlal2 v5.4s, v24.8h, v1.8h\n"
+ "add x11, x11, x24\n"
+ "smlal v15.4s, v23.4h, v2.4h\n"
+ "smlal2 v16.4s, v23.8h, v2.8h\n"
+ "smlal v17.4s, v31.4h, v2.4h\n"
+ "smlal2 v8.4s, v31.8h, v2.8h\n"
+ "smlal v10.4s, v24.4h, v2.4h\n"
"smlal2 v7.4s, v24.8h, v2.8h\n"
- "tbz x1, #2, 33f\n"
- "ld1 { v27.s }[0], [x25], #0x4\n"
- "tbz x1, #1, 32f\n"
- "ld1 { v27.h }[2], [x25], #0x2\n"
- "tbz x1, #0, 35f\n"
- "ld1 { v27.b }[6], [x25]\n"
+ "tbz x0, #2, 33f\n"
+ "ld1 { v27.s }[0], [x11], #0x4\n"
+ "tbz x0, #1, 32f\n"
+ "ld1 { v27.h }[2], [x11], #0x2\n"
+ "tbz x0, #0, 35f\n"
+ "ld1 { v27.b }[6], [x11]\n"
"b 35f\n"
"32:" // Oddments: Load (2, 3): Bit 2: Bit 1: Unset
- "tbz x1, #0, 35f\n"
- "ld1 { v27.b }[4], [x25]\n"
+ "tbz x0, #0, 35f\n"
+ "ld1 { v27.b }[4], [x11]\n"
"b 35f\n"
"33:" // Oddments: Load (2, 3): Bit 2: Unset
- "tbz x1, #1, 34f\n"
- "ld1 { v27.h }[0], [x25], #0x2\n"
- "tbz x1, #0, 35f\n"
- "ld1 { v27.b }[2], [x25]\n"
+ "tbz x0, #1, 34f\n"
+ "ld1 { v27.h }[0], [x11], #0x2\n"
+ "tbz x0, #0, 35f\n"
+ "ld1 { v27.b }[2], [x11]\n"
"b 35f\n"
"34:" // Oddments: Load (2, 3): Bit 2: Unset: Bit 1: Unset
- "tbz x1, #0, 35f\n"
- "ld1 { v27.b }[0], [x25]\n"
+ "tbz x0, #0, 35f\n"
+ "ld1 { v27.b }[0], [x11]\n"
"35:" // Oddments: Load (2, 3): Bit 2: End
- "ldr d3, [x3, #0x40]\n"
+ "ldr d3, [x23, #0x40]\n"
"ssubl v27.8h, v27.8b, v9.8b\n"
- "ssubl v3.8h, v3.8b, v15.8b\n"
- "ldr x23, [x4, #0x80]\n"
- "smlal v17.4s, v27.4h, v2.4h\n"
- "smlal2 v21.4s, v27.8h, v2.8h\n"
- "add x23, x23, x0\n"
- "smlal v13.4s, v31.4h, v3.4h\n"
- "smlal2 v19.4s, v31.8h, v3.8h\n"
- "smlal v20.4s, v30.4h, v3.4h\n"
- "smlal2 v10.4s, v30.8h, v3.8h\n"
- "smlal v8.4s, v27.4h, v3.4h\n"
+ "ssubl v3.8h, v3.8b, v14.8b\n"
+ "ldr x12, [x20, #0x80]\n"
+ "smlal v6.4s, v27.4h, v2.4h\n"
+ "smlal2 v5.4s, v27.8h, v2.8h\n"
+ "add x12, x12, x24\n"
+ "smlal v15.4s, v31.4h, v3.4h\n"
+ "smlal2 v16.4s, v31.8h, v3.8h\n"
+ "smlal v17.4s, v30.4h, v3.4h\n"
+ "smlal2 v8.4s, v30.8h, v3.8h\n"
+ "smlal v10.4s, v27.4h, v3.4h\n"
"smlal2 v7.4s, v27.8h, v3.8h\n"
- "tbz x1, #2, 37f\n"
- "ld1 { v23.s }[0], [x23], #0x4\n"
- "tbz x1, #1, 36f\n"
- "ld1 { v23.h }[2], [x23], #0x2\n"
- "tbz x1, #0, 39f\n"
- "ld1 { v23.b }[6], [x23]\n"
+ "tbz x0, #2, 37f\n"
+ "ld1 { v23.s }[0], [x12], #0x4\n"
+ "tbz x0, #1, 36f\n"
+ "ld1 { v23.h }[2], [x12], #0x2\n"
+ "tbz x0, #0, 39f\n"
+ "ld1 { v23.b }[6], [x12]\n"
"b 39f\n"
"36:" // Oddments: Load (2, 4): Bit 2: Bit 1: Unset
- "tbz x1, #0, 39f\n"
- "ld1 { v23.b }[4], [x23]\n"
+ "tbz x0, #0, 39f\n"
+ "ld1 { v23.b }[4], [x12]\n"
"b 39f\n"
"37:" // Oddments: Load (2, 4): Bit 2: Unset
- "tbz x1, #1, 38f\n"
- "ld1 { v23.h }[0], [x23], #0x2\n"
- "tbz x1, #0, 39f\n"
- "ld1 { v23.b }[2], [x23]\n"
+ "tbz x0, #1, 38f\n"
+ "ld1 { v23.h }[0], [x12], #0x2\n"
+ "tbz x0, #0, 39f\n"
+ "ld1 { v23.b }[2], [x12]\n"
"b 39f\n"
"38:" // Oddments: Load (2, 4): Bit 2: Unset: Bit 1: Unset
- "tbz x1, #0, 39f\n"
- "ld1 { v23.b }[0], [x23]\n"
+ "tbz x0, #0, 39f\n"
+ "ld1 { v23.b }[0], [x12]\n"
"39:" // Oddments: Load (2, 4): Bit 2: End
- "ldr d4, [x3, #0x48]\n"
+ "ldr d4, [x23, #0x48]\n"
"ssubl v23.8h, v23.8b, v9.8b\n"
- "ssubl v4.8h, v4.8b, v15.8b\n"
- "ldr x24, [x4, #0x88]\n"
- "smlal v17.4s, v23.4h, v3.4h\n"
- "smlal2 v21.4s, v23.8h, v3.8h\n"
- "add x24, x24, x0\n"
- "smlal v13.4s, v30.4h, v4.4h\n"
- "smlal2 v19.4s, v30.8h, v4.8h\n"
- "smlal v20.4s, v26.4h, v4.4h\n"
- "smlal2 v10.4s, v26.8h, v4.8h\n"
- "smlal v8.4s, v23.4h, v4.4h\n"
+ "ssubl v4.8h, v4.8b, v14.8b\n"
+ "ldr x26, [x20, #0x88]\n"
+ "smlal v6.4s, v23.4h, v3.4h\n"
+ "smlal2 v5.4s, v23.8h, v3.8h\n"
+ "add x26, x26, x24\n"
+ "smlal v15.4s, v30.4h, v4.4h\n"
+ "smlal2 v16.4s, v30.8h, v4.8h\n"
+ "smlal v17.4s, v26.4h, v4.4h\n"
+ "smlal2 v8.4s, v26.8h, v4.8h\n"
+ "smlal v10.4s, v23.4h, v4.4h\n"
"smlal2 v7.4s, v23.8h, v4.8h\n"
- "tbz x1, #2, 41f\n"
- "ld1 { v28.s }[0], [x24], #0x4\n"
- "tbz x1, #1, 40f\n"
- "ld1 { v28.h }[2], [x24], #0x2\n"
- "tbz x1, #0, 43f\n"
- "ld1 { v28.b }[6], [x24]\n"
+ "tbz x0, #2, 41f\n"
+ "ld1 { v28.s }[0], [x26], #0x4\n"
+ "tbz x0, #1, 40f\n"
+ "ld1 { v28.h }[2], [x26], #0x2\n"
+ "tbz x0, #0, 43f\n"
+ "ld1 { v28.b }[6], [x26]\n"
"b 43f\n"
"40:" // Oddments: Load (2, 5): Bit 2: Bit 1: Unset
- "tbz x1, #0, 43f\n"
- "ld1 { v28.b }[4], [x24]\n"
+ "tbz x0, #0, 43f\n"
+ "ld1 { v28.b }[4], [x26]\n"
"b 43f\n"
"41:" // Oddments: Load (2, 5): Bit 2: Unset
- "tbz x1, #1, 42f\n"
- "ld1 { v28.h }[0], [x24], #0x2\n"
- "tbz x1, #0, 43f\n"
- "ld1 { v28.b }[2], [x24]\n"
+ "tbz x0, #1, 42f\n"
+ "ld1 { v28.h }[0], [x26], #0x2\n"
+ "tbz x0, #0, 43f\n"
+ "ld1 { v28.b }[2], [x26]\n"
"b 43f\n"
"42:" // Oddments: Load (2, 5): Bit 2: Unset: Bit 1: Unset
- "tbz x1, #0, 43f\n"
- "ld1 { v28.b }[0], [x24]\n"
+ "tbz x0, #0, 43f\n"
+ "ld1 { v28.b }[0], [x26]\n"
"43:" // Oddments: Load (2, 5): Bit 2: End
- "ldr d0, [x3, #0x50]\n"
+ "ldr d0, [x23, #0x50]\n"
"ssubl v28.8h, v28.8b, v9.8b\n"
- "ssubl v0.8h, v0.8b, v15.8b\n"
- "ldr x15, [x4, #0x90]\n"
- "smlal v17.4s, v28.4h, v4.4h\n"
- "smlal2 v21.4s, v28.8h, v4.8h\n"
- "add x15, x15, x0\n"
- "smlal v13.4s, v22.4h, v0.4h\n"
- "smlal2 v19.4s, v22.8h, v0.8h\n"
- "smlal v20.4s, v25.4h, v0.4h\n"
- "smlal2 v10.4s, v25.8h, v0.8h\n"
- "tbz x1, #2, 45f\n"
- "ld1 { v31.s }[0], [x15], #0x4\n"
- "tbz x1, #1, 44f\n"
- "ld1 { v31.h }[2], [x15], #0x2\n"
- "tbz x1, #0, 47f\n"
- "ld1 { v31.b }[6], [x15]\n"
+ "ssubl v0.8h, v0.8b, v14.8b\n"
+ "ldr x14, [x20, #0x90]\n"
+ "smlal v6.4s, v28.4h, v4.4h\n"
+ "smlal2 v5.4s, v28.8h, v4.8h\n"
+ "add x14, x14, x24\n"
+ "smlal v15.4s, v22.4h, v0.4h\n"
+ "smlal2 v16.4s, v22.8h, v0.8h\n"
+ "smlal v17.4s, v25.4h, v0.4h\n"
+ "smlal2 v8.4s, v25.8h, v0.8h\n"
+ "tbz x0, #2, 45f\n"
+ "ld1 { v31.s }[0], [x14], #0x4\n"
+ "tbz x0, #1, 44f\n"
+ "ld1 { v31.h }[2], [x14], #0x2\n"
+ "tbz x0, #0, 47f\n"
+ "ld1 { v31.b }[6], [x14]\n"
"b 47f\n"
"44:" // Oddments: Load (3, 0): Bit 2: Bit 1: Unset
- "tbz x1, #0, 47f\n"
- "ld1 { v31.b }[4], [x15]\n"
+ "tbz x0, #0, 47f\n"
+ "ld1 { v31.b }[4], [x14]\n"
"b 47f\n"
"45:" // Oddments: Load (3, 0): Bit 2: Unset
- "tbz x1, #1, 46f\n"
- "ld1 { v31.h }[0], [x15], #0x2\n"
- "tbz x1, #0, 47f\n"
- "ld1 { v31.b }[2], [x15]\n"
+ "tbz x0, #1, 46f\n"
+ "ld1 { v31.h }[0], [x14], #0x2\n"
+ "tbz x0, #0, 47f\n"
+ "ld1 { v31.b }[2], [x14]\n"
"b 47f\n"
"46:" // Oddments: Load (3, 0): Bit 2: Unset: Bit 1: Unset
- "tbz x1, #0, 47f\n"
- "ld1 { v31.b }[0], [x15]\n"
+ "tbz x0, #0, 47f\n"
+ "ld1 { v31.b }[0], [x14]\n"
"47:" // Oddments: Load (3, 0): Bit 2: End
"ssubl v31.8h, v31.8b, v9.8b\n"
- "ldr x21, [x4, #0x98]\n"
- "smlal v8.4s, v31.4h, v0.4h\n"
+ "ldr x15, [x20, #0x98]\n"
+ "smlal v10.4s, v31.4h, v0.4h\n"
"smlal2 v7.4s, v31.8h, v0.8h\n"
- "add x21, x21, x0\n"
- "tbz x1, #2, 49f\n"
- "ld1 { v30.s }[0], [x21], #0x4\n"
- "tbz x1, #1, 48f\n"
- "ld1 { v30.h }[2], [x21], #0x2\n"
- "tbz x1, #0, 51f\n"
- "ld1 { v30.b }[6], [x21]\n"
+ "add x15, x15, x24\n"
+ "tbz x0, #2, 49f\n"
+ "ld1 { v30.s }[0], [x15], #0x4\n"
+ "tbz x0, #1, 48f\n"
+ "ld1 { v30.h }[2], [x15], #0x2\n"
+ "tbz x0, #0, 51f\n"
+ "ld1 { v30.b }[6], [x15]\n"
"b 51f\n"
"48:" // Oddments: Load (3, 1): Bit 2: Bit 1: Unset
- "tbz x1, #0, 51f\n"
- "ld1 { v30.b }[4], [x21]\n"
+ "tbz x0, #0, 51f\n"
+ "ld1 { v30.b }[4], [x15]\n"
"b 51f\n"
"49:" // Oddments: Load (3, 1): Bit 2: Unset
- "tbz x1, #1, 50f\n"
- "ld1 { v30.h }[0], [x21], #0x2\n"
- "tbz x1, #0, 51f\n"
- "ld1 { v30.b }[2], [x21]\n"
+ "tbz x0, #1, 50f\n"
+ "ld1 { v30.h }[0], [x15], #0x2\n"
+ "tbz x0, #0, 51f\n"
+ "ld1 { v30.b }[2], [x15]\n"
"b 51f\n"
"50:" // Oddments: Load (3, 1): Bit 2: Unset: Bit 1: Unset
- "tbz x1, #0, 51f\n"
- "ld1 { v30.b }[0], [x21]\n"
+ "tbz x0, #0, 51f\n"
+ "ld1 { v30.b }[0], [x15]\n"
"51:" // Oddments: Load (3, 1): Bit 2: End
- "ldr d1, [x3, #0x58]\n"
+ "ldr d1, [x23, #0x58]\n"
"ssubl v30.8h, v30.8b, v9.8b\n"
- "ssubl v1.8h, v1.8b, v15.8b\n"
- "ldr x14, [x4, #0xa0]\n"
- "smlal v17.4s, v30.4h, v0.4h\n"
- "smlal2 v21.4s, v30.8h, v0.8h\n"
- "add x14, x14, x0\n"
- "smlal v13.4s, v25.4h, v1.4h\n"
- "smlal2 v19.4s, v25.8h, v1.8h\n"
- "smlal v20.4s, v24.4h, v1.4h\n"
- "smlal2 v10.4s, v24.8h, v1.8h\n"
- "smlal v8.4s, v30.4h, v1.4h\n"
+ "ssubl v1.8h, v1.8b, v14.8b\n"
+ "ldr x21, [x20, #0xa0]\n"
+ "smlal v6.4s, v30.4h, v0.4h\n"
+ "smlal2 v5.4s, v30.8h, v0.8h\n"
+ "add x21, x21, x24\n"
+ "smlal v15.4s, v25.4h, v1.4h\n"
+ "smlal2 v16.4s, v25.8h, v1.8h\n"
+ "smlal v17.4s, v24.4h, v1.4h\n"
+ "smlal2 v8.4s, v24.8h, v1.8h\n"
+ "smlal v10.4s, v30.4h, v1.4h\n"
"smlal2 v7.4s, v30.8h, v1.8h\n"
- "tbz x1, #2, 53f\n"
- "ld1 { v26.s }[0], [x14], #0x4\n"
- "tbz x1, #1, 52f\n"
- "ld1 { v26.h }[2], [x14], #0x2\n"
- "tbz x1, #0, 55f\n"
- "ld1 { v26.b }[6], [x14]\n"
+ "tbz x0, #2, 53f\n"
+ "ld1 { v26.s }[0], [x21], #0x4\n"
+ "tbz x0, #1, 52f\n"
+ "ld1 { v26.h }[2], [x21], #0x2\n"
+ "tbz x0, #0, 55f\n"
+ "ld1 { v26.b }[6], [x21]\n"
"b 55f\n"
"52:" // Oddments: Load (3, 2): Bit 2: Bit 1: Unset
- "tbz x1, #0, 55f\n"
- "ld1 { v26.b }[4], [x14]\n"
+ "tbz x0, #0, 55f\n"
+ "ld1 { v26.b }[4], [x21]\n"
"b 55f\n"
"53:" // Oddments: Load (3, 2): Bit 2: Unset
- "tbz x1, #1, 54f\n"
- "ld1 { v26.h }[0], [x14], #0x2\n"
- "tbz x1, #0, 55f\n"
- "ld1 { v26.b }[2], [x14]\n"
+ "tbz x0, #1, 54f\n"
+ "ld1 { v26.h }[0], [x21], #0x2\n"
+ "tbz x0, #0, 55f\n"
+ "ld1 { v26.b }[2], [x21]\n"
"b 55f\n"
"54:" // Oddments: Load (3, 2): Bit 2: Unset: Bit 1: Unset
- "tbz x1, #0, 55f\n"
- "ld1 { v26.b }[0], [x14]\n"
+ "tbz x0, #0, 55f\n"
+ "ld1 { v26.b }[0], [x21]\n"
"55:" // Oddments: Load (3, 2): Bit 2: End
- "ldr d2, [x3, #0x60]\n"
+ "ldr d2, [x23, #0x60]\n"
"ssubl v26.8h, v26.8b, v9.8b\n"
- "ssubl v2.8h, v2.8b, v15.8b\n"
- "ldr x13, [x4, #0xa8]\n"
- "smlal v17.4s, v26.4h, v1.4h\n"
- "smlal2 v21.4s, v26.8h, v1.8h\n"
- "add x13, x13, x0\n"
- "smlal v13.4s, v24.4h, v2.4h\n"
- "smlal2 v19.4s, v24.8h, v2.8h\n"
- "smlal v20.4s, v27.4h, v2.4h\n"
- "smlal2 v10.4s, v27.8h, v2.8h\n"
- "smlal v8.4s, v26.4h, v2.4h\n"
+ "ssubl v2.8h, v2.8b, v14.8b\n"
+ "ldr x2, [x20, #0xa8]\n"
+ "smlal v6.4s, v26.4h, v1.4h\n"
+ "smlal2 v5.4s, v26.8h, v1.8h\n"
+ "add x2, x2, x24\n"
+ "smlal v15.4s, v24.4h, v2.4h\n"
+ "smlal2 v16.4s, v24.8h, v2.8h\n"
+ "smlal v17.4s, v27.4h, v2.4h\n"
+ "smlal2 v8.4s, v27.8h, v2.8h\n"
+ "smlal v10.4s, v26.4h, v2.4h\n"
"smlal2 v7.4s, v26.8h, v2.8h\n"
- "tbz x1, #2, 57f\n"
- "ld1 { v25.s }[0], [x13], #0x4\n"
- "tbz x1, #1, 56f\n"
- "ld1 { v25.h }[2], [x13], #0x2\n"
- "tbz x1, #0, 59f\n"
- "ld1 { v25.b }[6], [x13]\n"
+ "tbz x0, #2, 57f\n"
+ "ld1 { v25.s }[0], [x2], #0x4\n"
+ "tbz x0, #1, 56f\n"
+ "ld1 { v25.h }[2], [x2], #0x2\n"
+ "tbz x0, #0, 59f\n"
+ "ld1 { v25.b }[6], [x2]\n"
"b 59f\n"
"56:" // Oddments: Load (3, 3): Bit 2: Bit 1: Unset
- "tbz x1, #0, 59f\n"
- "ld1 { v25.b }[4], [x13]\n"
+ "tbz x0, #0, 59f\n"
+ "ld1 { v25.b }[4], [x2]\n"
"b 59f\n"
"57:" // Oddments: Load (3, 3): Bit 2: Unset
- "tbz x1, #1, 58f\n"
- "ld1 { v25.h }[0], [x13], #0x2\n"
- "tbz x1, #0, 59f\n"
- "ld1 { v25.b }[2], [x13]\n"
+ "tbz x0, #1, 58f\n"
+ "ld1 { v25.h }[0], [x2], #0x2\n"
+ "tbz x0, #0, 59f\n"
+ "ld1 { v25.b }[2], [x2]\n"
"b 59f\n"
"58:" // Oddments: Load (3, 3): Bit 2: Unset: Bit 1: Unset
- "tbz x1, #0, 59f\n"
- "ld1 { v25.b }[0], [x13]\n"
+ "tbz x0, #0, 59f\n"
+ "ld1 { v25.b }[0], [x2]\n"
"59:" // Oddments: Load (3, 3): Bit 2: End
- "ldr d3, [x3, #0x68]\n"
+ "ldr d3, [x23, #0x68]\n"
"ssubl v25.8h, v25.8b, v9.8b\n"
- "ssubl v3.8h, v3.8b, v15.8b\n"
- "ldr x12, [x4, #0xb0]\n"
- "smlal v17.4s, v25.4h, v2.4h\n"
- "smlal2 v21.4s, v25.8h, v2.8h\n"
- "add x12, x12, x0\n"
- "smlal v13.4s, v27.4h, v3.4h\n"
- "smlal2 v19.4s, v27.8h, v3.8h\n"
- "smlal v20.4s, v23.4h, v3.4h\n"
- "smlal2 v10.4s, v23.8h, v3.8h\n"
- "smlal v8.4s, v25.4h, v3.4h\n"
+ "ssubl v3.8h, v3.8b, v14.8b\n"
+ "ldr x13, [x20, #0xb0]\n"
+ "smlal v6.4s, v25.4h, v2.4h\n"
+ "smlal2 v5.4s, v25.8h, v2.8h\n"
+ "add x13, x13, x24\n"
+ "smlal v15.4s, v27.4h, v3.4h\n"
+ "smlal2 v16.4s, v27.8h, v3.8h\n"
+ "smlal v17.4s, v23.4h, v3.4h\n"
+ "smlal2 v8.4s, v23.8h, v3.8h\n"
+ "smlal v10.4s, v25.4h, v3.4h\n"
"smlal2 v7.4s, v25.8h, v3.8h\n"
- "tbz x1, #2, 61f\n"
- "ld1 { v24.s }[0], [x12], #0x4\n"
- "tbz x1, #1, 60f\n"
- "ld1 { v24.h }[2], [x12], #0x2\n"
- "tbz x1, #0, 63f\n"
- "ld1 { v24.b }[6], [x12]\n"
+ "tbz x0, #2, 61f\n"
+ "ld1 { v24.s }[0], [x13], #0x4\n"
+ "tbz x0, #1, 60f\n"
+ "ld1 { v24.h }[2], [x13], #0x2\n"
+ "tbz x0, #0, 63f\n"
+ "ld1 { v24.b }[6], [x13]\n"
"b 63f\n"
"60:" // Oddments: Load (3, 4): Bit 2: Bit 1: Unset
- "tbz x1, #0, 63f\n"
- "ld1 { v24.b }[4], [x12]\n"
+ "tbz x0, #0, 63f\n"
+ "ld1 { v24.b }[4], [x13]\n"
"b 63f\n"
"61:" // Oddments: Load (3, 4): Bit 2: Unset
- "tbz x1, #1, 62f\n"
- "ld1 { v24.h }[0], [x12], #0x2\n"
- "tbz x1, #0, 63f\n"
- "ld1 { v24.b }[2], [x12]\n"
+ "tbz x0, #1, 62f\n"
+ "ld1 { v24.h }[0], [x13], #0x2\n"
+ "tbz x0, #0, 63f\n"
+ "ld1 { v24.b }[2], [x13]\n"
"b 63f\n"
"62:" // Oddments: Load (3, 4): Bit 2: Unset: Bit 1: Unset
- "tbz x1, #0, 63f\n"
- "ld1 { v24.b }[0], [x12]\n"
+ "tbz x0, #0, 63f\n"
+ "ld1 { v24.b }[0], [x13]\n"
"63:" // Oddments: Load (3, 4): Bit 2: End
- "ldr d4, [x3, #0x70]\n"
+ "ldr d4, [x23, #0x70]\n"
"ssubl v24.8h, v24.8b, v9.8b\n"
- "ssubl v4.8h, v4.8b, v15.8b\n"
- "ldr x20, [x4, #0xb8]\n"
- "smlal v17.4s, v24.4h, v3.4h\n"
- "smlal2 v21.4s, v24.8h, v3.8h\n"
- "add x20, x20, x0\n"
- "smlal v13.4s, v23.4h, v4.4h\n"
- "smlal2 v19.4s, v23.8h, v4.8h\n"
- "smlal v20.4s, v28.4h, v4.4h\n"
- "smlal2 v10.4s, v28.8h, v4.8h\n"
- "smlal v8.4s, v24.4h, v4.4h\n"
+ "ssubl v4.8h, v4.8b, v14.8b\n"
+ "ldr x9, [x20, #0xb8]\n"
+ "smlal v6.4s, v24.4h, v3.4h\n"
+ "smlal2 v5.4s, v24.8h, v3.8h\n"
+ "add x9, x9, x24\n"
+ "smlal v15.4s, v23.4h, v4.4h\n"
+ "smlal2 v16.4s, v23.8h, v4.8h\n"
+ "smlal v17.4s, v28.4h, v4.4h\n"
+ "smlal2 v8.4s, v28.8h, v4.8h\n"
+ "smlal v10.4s, v24.4h, v4.4h\n"
"smlal2 v7.4s, v24.8h, v4.8h\n"
- "tbz x1, #2, 65f\n"
- "ld1 { v22.s }[0], [x20], #0x4\n"
- "tbz x1, #1, 64f\n"
- "ld1 { v22.h }[2], [x20], #0x2\n"
- "tbz x1, #0, 67f\n"
- "ld1 { v22.b }[6], [x20]\n"
+ "tbz x0, #2, 65f\n"
+ "ld1 { v22.s }[0], [x9], #0x4\n"
+ "tbz x0, #1, 64f\n"
+ "ld1 { v22.h }[2], [x9], #0x2\n"
+ "tbz x0, #0, 67f\n"
+ "ld1 { v22.b }[6], [x9]\n"
"b 67f\n"
"64:" // Oddments: Load (3, 5): Bit 2: Bit 1: Unset
- "tbz x1, #0, 67f\n"
- "ld1 { v22.b }[4], [x20]\n"
+ "tbz x0, #0, 67f\n"
+ "ld1 { v22.b }[4], [x9]\n"
"b 67f\n"
"65:" // Oddments: Load (3, 5): Bit 2: Unset
- "tbz x1, #1, 66f\n"
- "ld1 { v22.h }[0], [x20], #0x2\n"
- "tbz x1, #0, 67f\n"
- "ld1 { v22.b }[2], [x20]\n"
+ "tbz x0, #1, 66f\n"
+ "ld1 { v22.h }[0], [x9], #0x2\n"
+ "tbz x0, #0, 67f\n"
+ "ld1 { v22.b }[2], [x9]\n"
"b 67f\n"
"66:" // Oddments: Load (3, 5): Bit 2: Unset: Bit 1: Unset
- "tbz x1, #0, 67f\n"
- "ld1 { v22.b }[0], [x20]\n"
+ "tbz x0, #0, 67f\n"
+ "ld1 { v22.b }[0], [x9]\n"
"67:" // Oddments: Load (3, 5): Bit 2: End
- "ldr d0, [x3, #0x78]\n"
+ "ldr d0, [x23, #0x78]\n"
"ssubl v22.8h, v22.8b, v9.8b\n"
- "ssubl v0.8h, v0.8b, v15.8b\n"
- "ldr x11, [x4, #0xc0]\n"
- "smlal v17.4s, v22.4h, v4.4h\n"
- "smlal2 v21.4s, v22.8h, v4.8h\n"
- "add x11, x11, x0\n"
- "smlal v13.4s, v31.4h, v0.4h\n"
- "smlal2 v19.4s, v31.8h, v0.8h\n"
- "smlal v20.4s, v30.4h, v0.4h\n"
- "smlal2 v10.4s, v30.8h, v0.8h\n"
- "tbz x1, #2, 69f\n"
- "ld1 { v27.s }[0], [x11], #0x4\n"
- "tbz x1, #1, 68f\n"
- "ld1 { v27.h }[2], [x11], #0x2\n"
- "tbz x1, #0, 71f\n"
- "ld1 { v27.b }[6], [x11]\n"
+ "ssubl v0.8h, v0.8b, v14.8b\n"
+ "ldr x19, [x20, #0xc0]\n"
+ "smlal v6.4s, v22.4h, v4.4h\n"
+ "smlal2 v5.4s, v22.8h, v4.8h\n"
+ "add x19, x19, x24\n"
+ "smlal v15.4s, v31.4h, v0.4h\n"
+ "smlal2 v16.4s, v31.8h, v0.8h\n"
+ "smlal v17.4s, v30.4h, v0.4h\n"
+ "smlal2 v8.4s, v30.8h, v0.8h\n"
+ "tbz x0, #2, 69f\n"
+ "ld1 { v27.s }[0], [x19], #0x4\n"
+ "tbz x0, #1, 68f\n"
+ "ld1 { v27.h }[2], [x19], #0x2\n"
+ "tbz x0, #0, 71f\n"
+ "ld1 { v27.b }[6], [x19]\n"
"b 71f\n"
"68:" // Oddments: Load (4, 0): Bit 2: Bit 1: Unset
- "tbz x1, #0, 71f\n"
- "ld1 { v27.b }[4], [x11]\n"
+ "tbz x0, #0, 71f\n"
+ "ld1 { v27.b }[4], [x19]\n"
"b 71f\n"
"69:" // Oddments: Load (4, 0): Bit 2: Unset
- "tbz x1, #1, 70f\n"
- "ld1 { v27.h }[0], [x11], #0x2\n"
- "tbz x1, #0, 71f\n"
- "ld1 { v27.b }[2], [x11]\n"
+ "tbz x0, #1, 70f\n"
+ "ld1 { v27.h }[0], [x19], #0x2\n"
+ "tbz x0, #0, 71f\n"
+ "ld1 { v27.b }[2], [x19]\n"
"b 71f\n"
"70:" // Oddments: Load (4, 0): Bit 2: Unset: Bit 1: Unset
- "tbz x1, #0, 71f\n"
- "ld1 { v27.b }[0], [x11]\n"
+ "tbz x0, #0, 71f\n"
+ "ld1 { v27.b }[0], [x19]\n"
"71:" // Oddments: Load (4, 0): Bit 2: End
"ssubl v27.8h, v27.8b, v9.8b\n"
- "ldr x22, [x4, #0xc8]\n"
- "smlal v8.4s, v27.4h, v0.4h\n"
+ "ldr x28, [x20, #0xc8]\n"
+ "smlal v10.4s, v27.4h, v0.4h\n"
"smlal2 v7.4s, v27.8h, v0.8h\n"
- "add x22, x22, x0\n"
- "tbz x1, #2, 73f\n"
- "ld1 { v23.s }[0], [x22], #0x4\n"
- "tbz x1, #1, 72f\n"
- "ld1 { v23.h }[2], [x22], #0x2\n"
- "tbz x1, #0, 75f\n"
- "ld1 { v23.b }[6], [x22]\n"
+ "add x28, x28, x24\n"
+ "tbz x0, #2, 73f\n"
+ "ld1 { v23.s }[0], [x28], #0x4\n"
+ "tbz x0, #1, 72f\n"
+ "ld1 { v23.h }[2], [x28], #0x2\n"
+ "tbz x0, #0, 75f\n"
+ "ld1 { v23.b }[6], [x28]\n"
"b 75f\n"
"72:" // Oddments: Load (4, 1): Bit 2: Bit 1: Unset
- "tbz x1, #0, 75f\n"
- "ld1 { v23.b }[4], [x22]\n"
+ "tbz x0, #0, 75f\n"
+ "ld1 { v23.b }[4], [x28]\n"
"b 75f\n"
"73:" // Oddments: Load (4, 1): Bit 2: Unset
- "tbz x1, #1, 74f\n"
- "ld1 { v23.h }[0], [x22], #0x2\n"
- "tbz x1, #0, 75f\n"
- "ld1 { v23.b }[2], [x22]\n"
+ "tbz x0, #1, 74f\n"
+ "ld1 { v23.h }[0], [x28], #0x2\n"
+ "tbz x0, #0, 75f\n"
+ "ld1 { v23.b }[2], [x28]\n"
"b 75f\n"
"74:" // Oddments: Load (4, 1): Bit 2: Unset: Bit 1: Unset
- "tbz x1, #0, 75f\n"
- "ld1 { v23.b }[0], [x22]\n"
+ "tbz x0, #0, 75f\n"
+ "ld1 { v23.b }[0], [x28]\n"
"75:" // Oddments: Load (4, 1): Bit 2: End
- "ldr d1, [x3, #0x80]\n"
+ "ldr d1, [x23, #0x80]\n"
"ssubl v23.8h, v23.8b, v9.8b\n"
- "ssubl v1.8h, v1.8b, v15.8b\n"
- "ldr x9, [x4, #0xd0]\n"
- "smlal v17.4s, v23.4h, v0.4h\n"
- "smlal2 v21.4s, v23.8h, v0.8h\n"
- "add x9, x9, x0\n"
- "smlal v13.4s, v30.4h, v1.4h\n"
- "smlal2 v19.4s, v30.8h, v1.8h\n"
- "smlal v20.4s, v26.4h, v1.4h\n"
- "smlal2 v10.4s, v26.8h, v1.8h\n"
- "smlal v8.4s, v23.4h, v1.4h\n"
+ "ssubl v1.8h, v1.8b, v14.8b\n"
+ "ldr x6, [x20, #0xd0]\n"
+ "smlal v6.4s, v23.4h, v0.4h\n"
+ "smlal2 v5.4s, v23.8h, v0.8h\n"
+ "add x6, x6, x24\n"
+ "smlal v15.4s, v30.4h, v1.4h\n"
+ "smlal2 v16.4s, v30.8h, v1.8h\n"
+ "smlal v17.4s, v26.4h, v1.4h\n"
+ "smlal2 v8.4s, v26.8h, v1.8h\n"
+ "smlal v10.4s, v23.4h, v1.4h\n"
"smlal2 v7.4s, v23.8h, v1.8h\n"
- "tbz x1, #2, 77f\n"
- "ld1 { v31.s }[0], [x9], #0x4\n"
- "tbz x1, #1, 76f\n"
- "ld1 { v31.h }[2], [x9], #0x2\n"
- "tbz x1, #0, 79f\n"
- "ld1 { v31.b }[6], [x9]\n"
+ "tbz x0, #2, 77f\n"
+ "ld1 { v31.s }[0], [x6], #0x4\n"
+ "tbz x0, #1, 76f\n"
+ "ld1 { v31.h }[2], [x6], #0x2\n"
+ "tbz x0, #0, 79f\n"
+ "ld1 { v31.b }[6], [x6]\n"
"b 79f\n"
"76:" // Oddments: Load (4, 2): Bit 2: Bit 1: Unset
- "tbz x1, #0, 79f\n"
- "ld1 { v31.b }[4], [x9]\n"
+ "tbz x0, #0, 79f\n"
+ "ld1 { v31.b }[4], [x6]\n"
"b 79f\n"
"77:" // Oddments: Load (4, 2): Bit 2: Unset
- "tbz x1, #1, 78f\n"
- "ld1 { v31.h }[0], [x9], #0x2\n"
- "tbz x1, #0, 79f\n"
- "ld1 { v31.b }[2], [x9]\n"
+ "tbz x0, #1, 78f\n"
+ "ld1 { v31.h }[0], [x6], #0x2\n"
+ "tbz x0, #0, 79f\n"
+ "ld1 { v31.b }[2], [x6]\n"
"b 79f\n"
"78:" // Oddments: Load (4, 2): Bit 2: Unset: Bit 1: Unset
- "tbz x1, #0, 79f\n"
- "ld1 { v31.b }[0], [x9]\n"
+ "tbz x0, #0, 79f\n"
+ "ld1 { v31.b }[0], [x6]\n"
"79:" // Oddments: Load (4, 2): Bit 2: End
- "ldr d2, [x3, #0x88]\n"
+ "ldr d2, [x23, #0x88]\n"
"ssubl v31.8h, v31.8b, v9.8b\n"
- "ssubl v2.8h, v2.8b, v15.8b\n"
- "ldr x28, [x4, #0xd8]\n"
- "smlal v17.4s, v31.4h, v1.4h\n"
- "smlal2 v21.4s, v31.8h, v1.8h\n"
- "add x28, x28, x0\n"
- "smlal v13.4s, v26.4h, v2.4h\n"
- "smlal2 v19.4s, v26.8h, v2.8h\n"
- "smlal v20.4s, v25.4h, v2.4h\n"
- "smlal2 v10.4s, v25.8h, v2.8h\n"
- "smlal v8.4s, v31.4h, v2.4h\n"
+ "ssubl v2.8h, v2.8b, v14.8b\n"
+ "ldr x27, [x20, #0xd8]\n"
+ "smlal v6.4s, v31.4h, v1.4h\n"
+ "smlal2 v5.4s, v31.8h, v1.8h\n"
+ "add x27, x27, x24\n"
+ "smlal v15.4s, v26.4h, v2.4h\n"
+ "smlal2 v16.4s, v26.8h, v2.8h\n"
+ "smlal v17.4s, v25.4h, v2.4h\n"
+ "smlal2 v8.4s, v25.8h, v2.8h\n"
+ "smlal v10.4s, v31.4h, v2.4h\n"
"smlal2 v7.4s, v31.8h, v2.8h\n"
- "tbz x1, #2, 81f\n"
- "ld1 { v30.s }[0], [x28], #0x4\n"
- "tbz x1, #1, 80f\n"
- "ld1 { v30.h }[2], [x28], #0x2\n"
- "tbz x1, #0, 83f\n"
- "ld1 { v30.b }[6], [x28]\n"
+ "tbz x0, #2, 81f\n"
+ "ld1 { v30.s }[0], [x27], #0x4\n"
+ "tbz x0, #1, 80f\n"
+ "ld1 { v30.h }[2], [x27], #0x2\n"
+ "tbz x0, #0, 83f\n"
+ "ld1 { v30.b }[6], [x27]\n"
"b 83f\n"
"80:" // Oddments: Load (4, 3): Bit 2: Bit 1: Unset
- "tbz x1, #0, 83f\n"
- "ld1 { v30.b }[4], [x28]\n"
+ "tbz x0, #0, 83f\n"
+ "ld1 { v30.b }[4], [x27]\n"
"b 83f\n"
"81:" // Oddments: Load (4, 3): Bit 2: Unset
- "tbz x1, #1, 82f\n"
- "ld1 { v30.h }[0], [x28], #0x2\n"
- "tbz x1, #0, 83f\n"
- "ld1 { v30.b }[2], [x28]\n"
+ "tbz x0, #1, 82f\n"
+ "ld1 { v30.h }[0], [x27], #0x2\n"
+ "tbz x0, #0, 83f\n"
+ "ld1 { v30.b }[2], [x27]\n"
"b 83f\n"
"82:" // Oddments: Load (4, 3): Bit 2: Unset: Bit 1: Unset
- "tbz x1, #0, 83f\n"
- "ld1 { v30.b }[0], [x28]\n"
+ "tbz x0, #0, 83f\n"
+ "ld1 { v30.b }[0], [x27]\n"
"83:" // Oddments: Load (4, 3): Bit 2: End
- "ldr d3, [x3, #0x90]\n"
+ "ldr d3, [x23, #0x90]\n"
"ssubl v30.8h, v30.8b, v9.8b\n"
- "ssubl v3.8h, v3.8b, v15.8b\n"
- "ldr x27, [x4, #0xe0]\n"
- "smlal v17.4s, v30.4h, v2.4h\n"
- "smlal2 v21.4s, v30.8h, v2.8h\n"
- "add x27, x27, x0\n"
- "smlal v13.4s, v25.4h, v3.4h\n"
- "smlal2 v19.4s, v25.8h, v3.8h\n"
- "smlal v20.4s, v24.4h, v3.4h\n"
- "smlal2 v10.4s, v24.8h, v3.8h\n"
- "smlal v8.4s, v30.4h, v3.4h\n"
+ "ssubl v3.8h, v3.8b, v14.8b\n"
+ "ldr x11, [x20, #0xe0]\n"
+ "smlal v6.4s, v30.4h, v2.4h\n"
+ "smlal2 v5.4s, v30.8h, v2.8h\n"
+ "add x11, x11, x24\n"
+ "smlal v15.4s, v25.4h, v3.4h\n"
+ "smlal2 v16.4s, v25.8h, v3.8h\n"
+ "smlal v17.4s, v24.4h, v3.4h\n"
+ "smlal2 v8.4s, v24.8h, v3.8h\n"
+ "smlal v10.4s, v30.4h, v3.4h\n"
"smlal2 v7.4s, v30.8h, v3.8h\n"
- "tbz x1, #2, 85f\n"
- "ld1 { v28.s }[0], [x27], #0x4\n"
- "tbz x1, #1, 84f\n"
- "ld1 { v28.h }[2], [x27], #0x2\n"
- "tbz x1, #0, 87f\n"
- "ld1 { v28.b }[6], [x27]\n"
+ "tbz x0, #2, 85f\n"
+ "ld1 { v28.s }[0], [x11], #0x4\n"
+ "tbz x0, #1, 84f\n"
+ "ld1 { v28.h }[2], [x11], #0x2\n"
+ "tbz x0, #0, 87f\n"
+ "ld1 { v28.b }[6], [x11]\n"
"b 87f\n"
"84:" // Oddments: Load (4, 4): Bit 2: Bit 1: Unset
- "tbz x1, #0, 87f\n"
- "ld1 { v28.b }[4], [x27]\n"
+ "tbz x0, #0, 87f\n"
+ "ld1 { v28.b }[4], [x11]\n"
"b 87f\n"
"85:" // Oddments: Load (4, 4): Bit 2: Unset
- "tbz x1, #1, 86f\n"
- "ld1 { v28.h }[0], [x27], #0x2\n"
- "tbz x1, #0, 87f\n"
- "ld1 { v28.b }[2], [x27]\n"
+ "tbz x0, #1, 86f\n"
+ "ld1 { v28.h }[0], [x11], #0x2\n"
+ "tbz x0, #0, 87f\n"
+ "ld1 { v28.b }[2], [x11]\n"
"b 87f\n"
"86:" // Oddments: Load (4, 4): Bit 2: Unset: Bit 1: Unset
- "tbz x1, #0, 87f\n"
- "ld1 { v28.b }[0], [x27]\n"
+ "tbz x0, #0, 87f\n"
+ "ld1 { v28.b }[0], [x11]\n"
"87:" // Oddments: Load (4, 4): Bit 2: End
- "ldr d4, [x3, #0x98]\n"
+ "ldr d4, [x23, #0x98]\n"
"ssubl v28.8h, v28.8b, v9.8b\n"
- "ssubl v4.8h, v4.8b, v15.8b\n"
- "ldr x26, [x4, #0xe8]\n"
- "smlal v17.4s, v28.4h, v3.4h\n"
- "smlal2 v21.4s, v28.8h, v3.8h\n"
- "add x26, x26, x0\n"
- "smlal v13.4s, v24.4h, v4.4h\n"
- "smlal2 v19.4s, v24.8h, v4.8h\n"
- "smlal v20.4s, v22.4h, v4.4h\n"
- "smlal2 v10.4s, v22.8h, v4.8h\n"
- "smlal v8.4s, v28.4h, v4.4h\n"
+ "ssubl v4.8h, v4.8b, v14.8b\n"
+ "ldr x17, [x20, #0xe8]\n"
+ "smlal v6.4s, v28.4h, v3.4h\n"
+ "smlal2 v5.4s, v28.8h, v3.8h\n"
+ "add x17, x17, x24\n"
+ "smlal v15.4s, v24.4h, v4.4h\n"
+ "smlal2 v16.4s, v24.8h, v4.8h\n"
+ "smlal v17.4s, v22.4h, v4.4h\n"
+ "smlal2 v8.4s, v22.8h, v4.8h\n"
+ "smlal v10.4s, v28.4h, v4.4h\n"
"smlal2 v7.4s, v28.8h, v4.8h\n"
- "tbz x1, #2, 89f\n"
- "ld1 { v26.s }[0], [x26], #0x4\n"
- "tbz x1, #1, 88f\n"
- "ld1 { v26.h }[2], [x26], #0x2\n"
- "tbz x1, #0, 91f\n"
- "ld1 { v26.b }[6], [x26]\n"
+ "tbz x0, #2, 89f\n"
+ "ld1 { v26.s }[0], [x17], #0x4\n"
+ "tbz x0, #1, 88f\n"
+ "ld1 { v26.h }[2], [x17], #0x2\n"
+ "tbz x0, #0, 91f\n"
+ "ld1 { v26.b }[6], [x17]\n"
"b 91f\n"
"88:" // Oddments: Load (4, 5): Bit 2: Bit 1: Unset
- "tbz x1, #0, 91f\n"
- "ld1 { v26.b }[4], [x26]\n"
+ "tbz x0, #0, 91f\n"
+ "ld1 { v26.b }[4], [x17]\n"
"b 91f\n"
"89:" // Oddments: Load (4, 5): Bit 2: Unset
- "tbz x1, #1, 90f\n"
- "ld1 { v26.h }[0], [x26], #0x2\n"
- "tbz x1, #0, 91f\n"
- "ld1 { v26.b }[2], [x26]\n"
+ "tbz x0, #1, 90f\n"
+ "ld1 { v26.h }[0], [x17], #0x2\n"
+ "tbz x0, #0, 91f\n"
+ "ld1 { v26.b }[2], [x17]\n"
"b 91f\n"
"90:" // Oddments: Load (4, 5): Bit 2: Unset: Bit 1: Unset
- "tbz x1, #0, 91f\n"
- "ld1 { v26.b }[0], [x26]\n"
+ "tbz x0, #0, 91f\n"
+ "ld1 { v26.b }[0], [x17]\n"
"91:" // Oddments: Load (4, 5): Bit 2: End
- "ldr d0, [x3, #0xa0]\n"
+ "ldr d0, [x23, #0xa0]\n"
"ssubl v26.8h, v26.8b, v9.8b\n"
- "ssubl v0.8h, v0.8b, v15.8b\n"
- "ldr x25, [x4, #0xf0]\n"
- "smlal v17.4s, v26.4h, v4.4h\n"
- "smlal2 v21.4s, v26.8h, v4.8h\n"
- "add x25, x25, x0\n"
- "smlal v13.4s, v27.4h, v0.4h\n"
- "smlal2 v19.4s, v27.8h, v0.8h\n"
- "smlal v20.4s, v23.4h, v0.4h\n"
- "smlal2 v10.4s, v23.8h, v0.8h\n"
- "tbz x1, #2, 93f\n"
- "ld1 { v25.s }[0], [x25], #0x4\n"
- "tbz x1, #1, 92f\n"
- "ld1 { v25.h }[2], [x25], #0x2\n"
- "tbz x1, #0, 95f\n"
- "ld1 { v25.b }[6], [x25]\n"
+ "ssubl v0.8h, v0.8b, v14.8b\n"
+ "ldr x5, [x20, #0xf0]\n"
+ "smlal v6.4s, v26.4h, v4.4h\n"
+ "smlal2 v5.4s, v26.8h, v4.8h\n"
+ "add x5, x5, x24\n"
+ "smlal v15.4s, v27.4h, v0.4h\n"
+ "smlal2 v16.4s, v27.8h, v0.8h\n"
+ "smlal v17.4s, v23.4h, v0.4h\n"
+ "smlal2 v8.4s, v23.8h, v0.8h\n"
+ "tbz x0, #2, 93f\n"
+ "ld1 { v25.s }[0], [x5], #0x4\n"
+ "tbz x0, #1, 92f\n"
+ "ld1 { v25.h }[2], [x5], #0x2\n"
+ "tbz x0, #0, 95f\n"
+ "ld1 { v25.b }[6], [x5]\n"
"b 95f\n"
"92:" // Oddments: Load (5, 0): Bit 2: Bit 1: Unset
- "tbz x1, #0, 95f\n"
- "ld1 { v25.b }[4], [x25]\n"
+ "tbz x0, #0, 95f\n"
+ "ld1 { v25.b }[4], [x5]\n"
"b 95f\n"
"93:" // Oddments: Load (5, 0): Bit 2: Unset
- "tbz x1, #1, 94f\n"
- "ld1 { v25.h }[0], [x25], #0x2\n"
- "tbz x1, #0, 95f\n"
- "ld1 { v25.b }[2], [x25]\n"
+ "tbz x0, #1, 94f\n"
+ "ld1 { v25.h }[0], [x5], #0x2\n"
+ "tbz x0, #0, 95f\n"
+ "ld1 { v25.b }[2], [x5]\n"
"b 95f\n"
"94:" // Oddments: Load (5, 0): Bit 2: Unset: Bit 1: Unset
- "tbz x1, #0, 95f\n"
- "ld1 { v25.b }[0], [x25]\n"
+ "tbz x0, #0, 95f\n"
+ "ld1 { v25.b }[0], [x5]\n"
"95:" // Oddments: Load (5, 0): Bit 2: End
"ssubl v25.8h, v25.8b, v9.8b\n"
- "ldr x24, [x4, #0xf8]\n"
- "smlal v8.4s, v25.4h, v0.4h\n"
+ "ldr x25, [x20, #0xf8]\n"
+ "smlal v10.4s, v25.4h, v0.4h\n"
"smlal2 v7.4s, v25.8h, v0.8h\n"
- "add x24, x24, x0\n"
- "tbz x1, #2, 97f\n"
- "ld1 { v24.s }[0], [x24], #0x4\n"
- "tbz x1, #1, 96f\n"
- "ld1 { v24.h }[2], [x24], #0x2\n"
- "tbz x1, #0, 99f\n"
- "ld1 { v24.b }[6], [x24]\n"
+ "add x25, x25, x24\n"
+ "tbz x0, #2, 97f\n"
+ "ld1 { v24.s }[0], [x25], #0x4\n"
+ "tbz x0, #1, 96f\n"
+ "ld1 { v24.h }[2], [x25], #0x2\n"
+ "tbz x0, #0, 99f\n"
+ "ld1 { v24.b }[6], [x25]\n"
"b 99f\n"
"96:" // Oddments: Load (5, 1): Bit 2: Bit 1: Unset
- "tbz x1, #0, 99f\n"
- "ld1 { v24.b }[4], [x24]\n"
+ "tbz x0, #0, 99f\n"
+ "ld1 { v24.b }[4], [x25]\n"
"b 99f\n"
"97:" // Oddments: Load (5, 1): Bit 2: Unset
- "tbz x1, #1, 98f\n"
- "ld1 { v24.h }[0], [x24], #0x2\n"
- "tbz x1, #0, 99f\n"
- "ld1 { v24.b }[2], [x24]\n"
+ "tbz x0, #1, 98f\n"
+ "ld1 { v24.h }[0], [x25], #0x2\n"
+ "tbz x0, #0, 99f\n"
+ "ld1 { v24.b }[2], [x25]\n"
"b 99f\n"
"98:" // Oddments: Load (5, 1): Bit 2: Unset: Bit 1: Unset
- "tbz x1, #0, 99f\n"
- "ld1 { v24.b }[0], [x24]\n"
+ "tbz x0, #0, 99f\n"
+ "ld1 { v24.b }[0], [x25]\n"
"99:" // Oddments: Load (5, 1): Bit 2: End
- "ldr d1, [x3, #0xa8]\n"
+ "ldr d1, [x23, #0xa8]\n"
"ssubl v24.8h, v24.8b, v9.8b\n"
- "ssubl v1.8h, v1.8b, v15.8b\n"
- "ldr x23, [x4, #0x100]\n"
- "smlal v17.4s, v24.4h, v0.4h\n"
- "smlal2 v21.4s, v24.8h, v0.8h\n"
- "add x23, x23, x0\n"
- "smlal v13.4s, v23.4h, v1.4h\n"
- "smlal2 v19.4s, v23.8h, v1.8h\n"
- "smlal v20.4s, v31.4h, v1.4h\n"
- "smlal2 v10.4s, v31.8h, v1.8h\n"
- "smlal v8.4s, v24.4h, v1.4h\n"
+ "ssubl v1.8h, v1.8b, v14.8b\n"
+ "ldr x26, [x20, #0x100]\n"
+ "smlal v6.4s, v24.4h, v0.4h\n"
+ "smlal2 v5.4s, v24.8h, v0.8h\n"
+ "add x26, x26, x24\n"
+ "smlal v15.4s, v23.4h, v1.4h\n"
+ "smlal2 v16.4s, v23.8h, v1.8h\n"
+ "smlal v17.4s, v31.4h, v1.4h\n"
+ "smlal2 v8.4s, v31.8h, v1.8h\n"
+ "smlal v10.4s, v24.4h, v1.4h\n"
"smlal2 v7.4s, v24.8h, v1.8h\n"
- "tbz x1, #2, 101f\n"
- "ld1 { v27.s }[0], [x23], #0x4\n"
- "tbz x1, #1, 100f\n"
- "ld1 { v27.h }[2], [x23], #0x2\n"
- "tbz x1, #0, 103f\n"
- "ld1 { v27.b }[6], [x23]\n"
+ "tbz x0, #2, 101f\n"
+ "ld1 { v27.s }[0], [x26], #0x4\n"
+ "tbz x0, #1, 100f\n"
+ "ld1 { v27.h }[2], [x26], #0x2\n"
+ "tbz x0, #0, 103f\n"
+ "ld1 { v27.b }[6], [x26]\n"
"b 103f\n"
"100:" // Oddments: Load (5, 2): Bit 2: Bit 1: Unset
- "tbz x1, #0, 103f\n"
- "ld1 { v27.b }[4], [x23]\n"
+ "tbz x0, #0, 103f\n"
+ "ld1 { v27.b }[4], [x26]\n"
"b 103f\n"
"101:" // Oddments: Load (5, 2): Bit 2: Unset
- "tbz x1, #1, 102f\n"
- "ld1 { v27.h }[0], [x23], #0x2\n"
- "tbz x1, #0, 103f\n"
- "ld1 { v27.b }[2], [x23]\n"
+ "tbz x0, #1, 102f\n"
+ "ld1 { v27.h }[0], [x26], #0x2\n"
+ "tbz x0, #0, 103f\n"
+ "ld1 { v27.b }[2], [x26]\n"
"b 103f\n"
"102:" // Oddments: Load (5, 2): Bit 2: Unset: Bit 1: Unset
- "tbz x1, #0, 103f\n"
- "ld1 { v27.b }[0], [x23]\n"
+ "tbz x0, #0, 103f\n"
+ "ld1 { v27.b }[0], [x26]\n"
"103:" // Oddments: Load (5, 2): Bit 2: End
- "ldr d2, [x3, #0xb0]\n"
+ "ldr d2, [x23, #0xb0]\n"
"ssubl v27.8h, v27.8b, v9.8b\n"
- "ssubl v2.8h, v2.8b, v15.8b\n"
- "ldr x15, [x4, #0x108]\n"
- "smlal v17.4s, v27.4h, v1.4h\n"
- "smlal2 v21.4s, v27.8h, v1.8h\n"
- "add x15, x15, x0\n"
- "smlal v13.4s, v31.4h, v2.4h\n"
- "smlal2 v19.4s, v31.8h, v2.8h\n"
- "smlal v20.4s, v30.4h, v2.4h\n"
- "smlal2 v10.4s, v30.8h, v2.8h\n"
- "smlal v8.4s, v27.4h, v2.4h\n"
+ "ssubl v2.8h, v2.8b, v14.8b\n"
+ "ldr x12, [x20, #0x108]\n"
+ "smlal v6.4s, v27.4h, v1.4h\n"
+ "smlal2 v5.4s, v27.8h, v1.8h\n"
+ "add x12, x12, x24\n"
+ "smlal v15.4s, v31.4h, v2.4h\n"
+ "smlal2 v16.4s, v31.8h, v2.8h\n"
+ "smlal v17.4s, v30.4h, v2.4h\n"
+ "smlal2 v8.4s, v30.8h, v2.8h\n"
+ "smlal v10.4s, v27.4h, v2.4h\n"
"smlal2 v7.4s, v27.8h, v2.8h\n"
- "tbz x1, #2, 105f\n"
- "ld1 { v25.s }[0], [x15], #0x4\n"
- "tbz x1, #1, 104f\n"
- "ld1 { v25.h }[2], [x15], #0x2\n"
- "tbz x1, #0, 107f\n"
- "ld1 { v25.b }[6], [x15]\n"
+ "tbz x0, #2, 105f\n"
+ "ld1 { v25.s }[0], [x12], #0x4\n"
+ "tbz x0, #1, 104f\n"
+ "ld1 { v25.h }[2], [x12], #0x2\n"
+ "tbz x0, #0, 107f\n"
+ "ld1 { v25.b }[6], [x12]\n"
"b 107f\n"
"104:" // Oddments: Load (5, 3): Bit 2: Bit 1: Unset
- "tbz x1, #0, 107f\n"
- "ld1 { v25.b }[4], [x15]\n"
+ "tbz x0, #0, 107f\n"
+ "ld1 { v25.b }[4], [x12]\n"
"b 107f\n"
"105:" // Oddments: Load (5, 3): Bit 2: Unset
- "tbz x1, #1, 106f\n"
- "ld1 { v25.h }[0], [x15], #0x2\n"
- "tbz x1, #0, 107f\n"
- "ld1 { v25.b }[2], [x15]\n"
+ "tbz x0, #1, 106f\n"
+ "ld1 { v25.h }[0], [x12], #0x2\n"
+ "tbz x0, #0, 107f\n"
+ "ld1 { v25.b }[2], [x12]\n"
"b 107f\n"
"106:" // Oddments: Load (5, 3): Bit 2: Unset: Bit 1: Unset
- "tbz x1, #0, 107f\n"
- "ld1 { v25.b }[0], [x15]\n"
+ "tbz x0, #0, 107f\n"
+ "ld1 { v25.b }[0], [x12]\n"
"107:" // Oddments: Load (5, 3): Bit 2: End
- "ldr d3, [x3, #0xb8]\n"
+ "ldr d3, [x23, #0xb8]\n"
"ssubl v25.8h, v25.8b, v9.8b\n"
- "ssubl v3.8h, v3.8b, v15.8b\n"
- "ldr x21, [x4, #0x110]\n"
- "smlal v17.4s, v25.4h, v2.4h\n"
- "smlal2 v21.4s, v25.8h, v2.8h\n"
- "add x21, x21, x0\n"
- "smlal v13.4s, v30.4h, v3.4h\n"
- "smlal2 v19.4s, v30.8h, v3.8h\n"
- "smlal v20.4s, v28.4h, v3.4h\n"
- "smlal2 v10.4s, v28.8h, v3.8h\n"
- "smlal v8.4s, v25.4h, v3.4h\n"
+ "ssubl v3.8h, v3.8b, v14.8b\n"
+ "ldr x14, [x20, #0x110]\n"
+ "smlal v6.4s, v25.4h, v2.4h\n"
+ "smlal2 v5.4s, v25.8h, v2.8h\n"
+ "add x14, x14, x24\n"
+ "smlal v15.4s, v30.4h, v3.4h\n"
+ "smlal2 v16.4s, v30.8h, v3.8h\n"
+ "smlal v17.4s, v28.4h, v3.4h\n"
+ "smlal2 v8.4s, v28.8h, v3.8h\n"
+ "smlal v10.4s, v25.4h, v3.4h\n"
"smlal2 v7.4s, v25.8h, v3.8h\n"
- "tbz x1, #2, 109f\n"
- "ld1 { v24.s }[0], [x21], #0x4\n"
- "tbz x1, #1, 108f\n"
- "ld1 { v24.h }[2], [x21], #0x2\n"
- "tbz x1, #0, 111f\n"
- "ld1 { v24.b }[6], [x21]\n"
+ "tbz x0, #2, 109f\n"
+ "ld1 { v24.s }[0], [x14], #0x4\n"
+ "tbz x0, #1, 108f\n"
+ "ld1 { v24.h }[2], [x14], #0x2\n"
+ "tbz x0, #0, 111f\n"
+ "ld1 { v24.b }[6], [x14]\n"
"b 111f\n"
"108:" // Oddments: Load (5, 4): Bit 2: Bit 1: Unset
- "tbz x1, #0, 111f\n"
- "ld1 { v24.b }[4], [x21]\n"
+ "tbz x0, #0, 111f\n"
+ "ld1 { v24.b }[4], [x14]\n"
"b 111f\n"
"109:" // Oddments: Load (5, 4): Bit 2: Unset
- "tbz x1, #1, 110f\n"
- "ld1 { v24.h }[0], [x21], #0x2\n"
- "tbz x1, #0, 111f\n"
- "ld1 { v24.b }[2], [x21]\n"
+ "tbz x0, #1, 110f\n"
+ "ld1 { v24.h }[0], [x14], #0x2\n"
+ "tbz x0, #0, 111f\n"
+ "ld1 { v24.b }[2], [x14]\n"
"b 111f\n"
"110:" // Oddments: Load (5, 4): Bit 2: Unset: Bit 1: Unset
- "tbz x1, #0, 111f\n"
- "ld1 { v24.b }[0], [x21]\n"
+ "tbz x0, #0, 111f\n"
+ "ld1 { v24.b }[0], [x14]\n"
"111:" // Oddments: Load (5, 4): Bit 2: End
- "ldr d4, [x3, #0xc0]\n"
+ "ldr d4, [x23, #0xc0]\n"
"ssubl v24.8h, v24.8b, v9.8b\n"
- "ssubl v4.8h, v4.8b, v15.8b\n"
- "ldr x20, [x4, #0x118]\n"
- "smlal v17.4s, v24.4h, v3.4h\n"
- "smlal2 v21.4s, v24.8h, v3.8h\n"
- "add x20, x20, x0\n"
- "smlal v13.4s, v28.4h, v4.4h\n"
- "smlal2 v19.4s, v28.8h, v4.8h\n"
- "smlal v20.4s, v26.4h, v4.4h\n"
- "smlal2 v10.4s, v26.8h, v4.8h\n"
- "smlal v8.4s, v24.4h, v4.4h\n"
+ "ssubl v4.8h, v4.8b, v14.8b\n"
+ "ldr x21, [x20, #0x118]\n"
+ "smlal v6.4s, v24.4h, v3.4h\n"
+ "smlal2 v5.4s, v24.8h, v3.8h\n"
+ "add x21, x21, x24\n"
+ "smlal v15.4s, v28.4h, v4.4h\n"
+ "smlal2 v16.4s, v28.8h, v4.8h\n"
+ "smlal v17.4s, v26.4h, v4.4h\n"
+ "smlal2 v8.4s, v26.8h, v4.8h\n"
+ "smlal v10.4s, v24.4h, v4.4h\n"
"smlal2 v7.4s, v24.8h, v4.8h\n"
- "tbz x1, #2, 113f\n"
- "ld1 { v27.s }[0], [x20], #0x4\n"
- "tbz x1, #1, 112f\n"
- "ld1 { v27.h }[2], [x20], #0x2\n"
- "tbz x1, #0, 115f\n"
- "ld1 { v27.b }[6], [x20]\n"
+ "tbz x0, #2, 113f\n"
+ "ld1 { v27.s }[0], [x21], #0x4\n"
+ "tbz x0, #1, 112f\n"
+ "ld1 { v27.h }[2], [x21], #0x2\n"
+ "tbz x0, #0, 115f\n"
+ "ld1 { v27.b }[6], [x21]\n"
"b 115f\n"
"112:" // Oddments: Load (5, 5): Bit 2: Bit 1: Unset
- "tbz x1, #0, 115f\n"
- "ld1 { v27.b }[4], [x20]\n"
+ "tbz x0, #0, 115f\n"
+ "ld1 { v27.b }[4], [x21]\n"
"b 115f\n"
"113:" // Oddments: Load (5, 5): Bit 2: Unset
- "tbz x1, #1, 114f\n"
- "ld1 { v27.h }[0], [x20], #0x2\n"
- "tbz x1, #0, 115f\n"
- "ld1 { v27.b }[2], [x20]\n"
+ "tbz x0, #1, 114f\n"
+ "ld1 { v27.h }[0], [x21], #0x2\n"
+ "tbz x0, #0, 115f\n"
+ "ld1 { v27.b }[2], [x21]\n"
"b 115f\n"
"114:" // Oddments: Load (5, 5): Bit 2: Unset: Bit 1: Unset
- "tbz x1, #0, 115f\n"
- "ld1 { v27.b }[0], [x20]\n"
+ "tbz x0, #0, 115f\n"
+ "ld1 { v27.b }[0], [x21]\n"
"115:" // Oddments: Load (5, 5): Bit 2: End
"ssubl v27.8h, v27.8b, v9.8b\n"
- "smlal v17.4s, v27.4h, v4.4h\n"
- "smlal2 v21.4s, v27.8h, v4.8h\n"
- "tbz x1, #2, 117f\n"
- "ld1 { v18.4s }, [x5], #0x10\n"
- "ld1 { v6.4s }, [x8], #0x10\n"
- "tbz x1, #1, 116f\n"
- "ld1 { v5.d }[0], [x5], #0x8\n"
- "ld1 { v22.d }[0], [x8], #0x8\n"
- "tbz x1, #0, 119f\n"
- "ld1 { v5.s }[2], [x5]\n"
- "ld1 { v22.s }[2], [x8]\n"
+ "smlal v6.4s, v27.4h, v4.4h\n"
+ "smlal2 v5.4s, v27.8h, v4.8h\n"
+ "tbz x0, #2, 117f\n"
+ "ld1 { v12.4s }, [x10], #0x10\n"
+ "ld1 { v19.4s }, [x1], #0x10\n"
+ "tbz x0, #1, 116f\n"
+ "ld1 { v20.d }[0], [x10], #0x8\n"
+ "ld1 { v29.d }[0], [x1], #0x8\n"
+ "tbz x0, #0, 119f\n"
+ "ld1 { v20.s }[2], [x10]\n"
+ "ld1 { v29.s }[2], [x1]\n"
"b 119f\n"
"116:" // Oddments: Load requant params: Bit 2: Bit 1: Unset
- "tbz x1, #0, 119f\n"
- "ld1 { v5.s }[0], [x5]\n"
- "ld1 { v22.s }[0], [x8]\n"
+ "tbz x0, #0, 119f\n"
+ "ld1 { v20.s }[0], [x10]\n"
+ "ld1 { v29.s }[0], [x1]\n"
"b 119f\n"
"117:" // Oddments: Load requant params: Bit 2: Unset
- "tbz x1, #1, 118f\n"
- "ld1 { v18.d }[0], [x5], #0x8\n"
- "ld1 { v6.d }[0], [x8], #0x8\n"
- "tbz x1, #0, 119f\n"
- "ld1 { v18.s }[2], [x5]\n"
- "ld1 { v6.s }[2], [x8]\n"
+ "tbz x0, #1, 118f\n"
+ "ld1 { v12.d }[0], [x10], #0x8\n"
+ "ld1 { v19.d }[0], [x1], #0x8\n"
+ "tbz x0, #0, 119f\n"
+ "ld1 { v12.s }[2], [x10]\n"
+ "ld1 { v19.s }[2], [x1]\n"
"b 119f\n"
"118:" // Oddments: Load requant params: Bit 2: Unset: Bit 1: Unset
- "tbz x1, #0, 119f\n"
- "ld1 { v18.s }[0], [x5]\n"
- "ld1 { v6.s }[0], [x8]\n"
+ "tbz x0, #0, 119f\n"
+ "ld1 { v12.s }[0], [x10]\n"
+ "ld1 { v19.s }[0], [x1]\n"
"119:" // Oddments: Load requant params: Bit 2: End
- "sqrdmulh v13.4s, v13.4s, v18.4s\n"
- "and v30.16b, v13.16b, v6.16b\n"
- "add x17, x17, x10\n"
- "add x6, x6, x10\n"
- "sqrdmulh v19.4s, v19.4s, v5.4s\n"
- "sshr v30.4s, v30.4s, #0x1f\n"
- "add x7, x7, x10\n"
- "add x16, x16, x10\n"
- "and v16.16b, v19.16b, v22.16b\n"
- "sqrdmulh v20.4s, v20.4s, v18.4s\n"
- "sqrdmulh v8.4s, v8.4s, v18.4s\n"
- "sqrdmulh v17.4s, v17.4s, v18.4s\n"
- "sqadd v13.4s, v13.4s, v30.4s\n"
- "sshr v16.4s, v16.4s, #0x1f\n"
- "and v0.16b, v20.16b, v6.16b\n"
- "sqrdmulh v10.4s, v10.4s, v5.4s\n"
- "and v18.16b, v8.16b, v6.16b\n"
- "sqrdmulh v7.4s, v7.4s, v5.4s\n"
- "and v30.16b, v17.16b, v6.16b\n"
- "sqrdmulh v21.4s, v21.4s, v5.4s\n"
- "sqadd v19.4s, v19.4s, v16.4s\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "and v26.16b, v10.16b, v22.16b\n"
- "sshr v18.4s, v18.4s, #0x1f\n"
- "and v23.16b, v7.16b, v22.16b\n"
- "sshr v30.4s, v30.4s, #0x1f\n"
- "and v16.16b, v21.16b, v22.16b\n"
- "sqadd v20.4s, v20.4s, v0.4s\n"
- "sshr v26.4s, v26.4s, #0x1f\n"
- "sqadd v8.4s, v8.4s, v18.4s\n"
+ "sqrdmulh v15.4s, v15.4s, v12.4s\n"
+ "sqrdmulh v17.4s, v17.4s, v12.4s\n"
+ "add x16, x16, x22\n"
+ "add x8, x8, x22\n"
+ "sqrdmulh v10.4s, v10.4s, v12.4s\n"
+ "sqrdmulh v6.4s, v6.4s, v12.4s\n"
+ "add x4, x4, x22\n"
+ "add x7, x7, x22\n"
+ "and v23.16b, v15.16b, v19.16b\n"
+ "sqrdmulh v16.4s, v16.4s, v20.4s\n"
+ "and v22.16b, v17.16b, v19.16b\n"
+ "sqrdmulh v8.4s, v8.4s, v20.4s\n"
+ "and v21.16b, v10.16b, v19.16b\n"
+ "sqrdmulh v7.4s, v7.4s, v20.4s\n"
+ "and v26.16b, v6.16b, v19.16b\n"
+ "sqrdmulh v5.4s, v5.4s, v20.4s\n"
"sshr v23.4s, v23.4s, #0x1f\n"
- "sqadd v17.4s, v17.4s, v30.4s\n"
- "sshr v16.4s, v16.4s, #0x1f\n"
- "srshl v13.4s, v13.4s, v6.4s\n"
- "srshl v20.4s, v20.4s, v6.4s\n"
- "sqadd v10.4s, v10.4s, v26.4s\n"
- "srshl v8.4s, v8.4s, v6.4s\n"
- "sqadd v7.4s, v7.4s, v23.4s\n"
- "srshl v17.4s, v17.4s, v6.4s\n"
- "sqadd v21.4s, v21.4s, v16.4s\n"
- "srshl v19.4s, v19.4s, v22.4s\n"
- "sqxtn v13.4h, v13.4s\n"
- "srshl v10.4s, v10.4s, v22.4s\n"
- "sqxtn v20.4h, v20.4s\n"
- "srshl v7.4s, v7.4s, v22.4s\n"
- "sqxtn v8.4h, v8.4s\n"
- "srshl v21.4s, v21.4s, v22.4s\n"
+ "and v4.16b, v16.16b, v29.16b\n"
+ "sshr v22.4s, v22.4s, #0x1f\n"
+ "and v2.16b, v8.16b, v29.16b\n"
+ "sshr v21.4s, v21.4s, #0x1f\n"
+ "and v3.16b, v7.16b, v29.16b\n"
+ "sshr v26.4s, v26.4s, #0x1f\n"
+ "and v25.16b, v5.16b, v29.16b\n"
+ "sqadd v15.4s, v15.4s, v23.4s\n"
+ "sshr v4.4s, v4.4s, #0x1f\n"
+ "sqadd v17.4s, v17.4s, v22.4s\n"
+ "sshr v2.4s, v2.4s, #0x1f\n"
+ "sqadd v10.4s, v10.4s, v21.4s\n"
+ "sshr v3.4s, v3.4s, #0x1f\n"
+ "sqadd v6.4s, v6.4s, v26.4s\n"
+ "sshr v25.4s, v25.4s, #0x1f\n"
+ "srshl v15.4s, v15.4s, v19.4s\n"
+ "sqadd v16.4s, v16.4s, v4.4s\n"
+ "srshl v17.4s, v17.4s, v19.4s\n"
+ "sqadd v8.4s, v8.4s, v2.4s\n"
+ "srshl v10.4s, v10.4s, v19.4s\n"
+ "sqadd v7.4s, v7.4s, v3.4s\n"
+ "srshl v6.4s, v6.4s, v19.4s\n"
+ "sqadd v5.4s, v5.4s, v25.4s\n"
+ "srshl v16.4s, v16.4s, v29.4s\n"
+ "sqxtn v15.4h, v15.4s\n"
+ "srshl v8.4s, v8.4s, v29.4s\n"
"sqxtn v17.4h, v17.4s\n"
- "sqxtn2 v13.8h, v19.4s\n"
- "sqxtn2 v20.8h, v10.4s\n"
- "sqxtn2 v8.8h, v7.4s\n"
- "sqxtn2 v17.8h, v21.4s\n"
- "sqadd v13.8h, v13.8h, v14.8h\n"
- "sqadd v20.8h, v20.8h, v14.8h\n"
- "sqadd v8.8h, v8.8h, v14.8h\n"
- "sqadd v17.8h, v17.8h, v14.8h\n"
- "smax v13.8h, v13.8h, v12.8h\n"
- "smax v20.8h, v20.8h, v12.8h\n"
- "smax v8.8h, v8.8h, v12.8h\n"
- "smax v17.8h, v17.8h, v12.8h\n"
- "smin v13.8h, v13.8h, v11.8h\n"
- "smin v20.8h, v20.8h, v11.8h\n"
- "smin v8.8h, v8.8h, v11.8h\n"
- "smin v17.8h, v17.8h, v11.8h\n"
- "uzp1 v13.16b, v13.16b, v13.16b\n"
- "uzp1 v20.16b, v20.16b, v20.16b\n"
- "uzp1 v8.16b, v8.16b, v8.16b\n"
+ "srshl v7.4s, v7.4s, v29.4s\n"
+ "sqxtn v10.4h, v10.4s\n"
+ "srshl v5.4s, v5.4s, v29.4s\n"
+ "sqxtn v6.4h, v6.4s\n"
+ "sqxtn2 v15.8h, v16.4s\n"
+ "sqxtn2 v17.8h, v8.4s\n"
+ "sqxtn2 v10.8h, v7.4s\n"
+ "sqxtn2 v6.8h, v5.4s\n"
+ "sqadd v15.8h, v15.8h, v18.8h\n"
+ "sqadd v17.8h, v17.8h, v18.8h\n"
+ "sqadd v10.8h, v10.8h, v18.8h\n"
+ "sqadd v6.8h, v6.8h, v18.8h\n"
+ "smax v15.8h, v15.8h, v11.8h\n"
+ "smax v17.8h, v17.8h, v11.8h\n"
+ "smax v10.8h, v10.8h, v11.8h\n"
+ "smax v6.8h, v6.8h, v11.8h\n"
+ "smin v15.8h, v15.8h, v13.8h\n"
+ "smin v17.8h, v17.8h, v13.8h\n"
+ "smin v10.8h, v10.8h, v13.8h\n"
+ "smin v6.8h, v6.8h, v13.8h\n"
+ "uzp1 v15.16b, v15.16b, v15.16b\n"
"uzp1 v17.16b, v17.16b, v17.16b\n"
- "tbz x1, #2, 121f\n"
- "st1 { v13.s }[0], [x17], #0x4\n"
- "st1 { v20.s }[0], [x6], #0x4\n"
- "st1 { v8.s }[0], [x7], #0x4\n"
- "st1 { v17.s }[0], [x16], #0x4\n"
- "tbz x1, #1, 120f\n"
- "st1 { v13.h }[2], [x17], #0x2\n"
- "st1 { v20.h }[2], [x6], #0x2\n"
- "st1 { v8.h }[2], [x7], #0x2\n"
- "st1 { v17.h }[2], [x16], #0x2\n"
- "tbz x1, #0, 123f\n"
- "st1 { v13.b }[6], [x17], #0x1\n"
- "st1 { v20.b }[6], [x6], #0x1\n"
- "st1 { v8.b }[6], [x7], #0x1\n"
- "st1 { v17.b }[6], [x16], #0x1\n"
+ "uzp1 v10.16b, v10.16b, v10.16b\n"
+ "uzp1 v6.16b, v6.16b, v6.16b\n"
+ "tbz x0, #2, 121f\n"
+ "st1 { v15.s }[0], [x16], #0x4\n"
+ "st1 { v17.s }[0], [x8], #0x4\n"
+ "st1 { v10.s }[0], [x4], #0x4\n"
+ "st1 { v6.s }[0], [x7], #0x4\n"
+ "tbz x0, #1, 120f\n"
+ "st1 { v15.h }[2], [x16], #0x2\n"
+ "st1 { v17.h }[2], [x8], #0x2\n"
+ "st1 { v10.h }[2], [x4], #0x2\n"
+ "st1 { v6.h }[2], [x7], #0x2\n"
+ "tbz x0, #0, 123f\n"
+ "st1 { v15.b }[6], [x16], #0x1\n"
+ "st1 { v17.b }[6], [x8], #0x1\n"
+ "st1 { v10.b }[6], [x4], #0x1\n"
+ "st1 { v6.b }[6], [x7], #0x1\n"
"b 123f\n"
"120:" // Oddments: Bit 2: Bit 1: Unset
- "tbz x1, #0, 123f\n"
- "st1 { v13.b }[4], [x17], #0x1\n"
- "st1 { v20.b }[4], [x6], #0x1\n"
- "st1 { v8.b }[4], [x7], #0x1\n"
- "st1 { v17.b }[4], [x16], #0x1\n"
+ "tbz x0, #0, 123f\n"
+ "st1 { v15.b }[4], [x16], #0x1\n"
+ "st1 { v17.b }[4], [x8], #0x1\n"
+ "st1 { v10.b }[4], [x4], #0x1\n"
+ "st1 { v6.b }[4], [x7], #0x1\n"
"b 123f\n"
"121:" // Oddments: Bit 2: Unset
- "tbz x1, #1, 122f\n"
- "st1 { v13.h }[0], [x17], #0x2\n"
- "st1 { v20.h }[0], [x6], #0x2\n"
- "st1 { v8.h }[0], [x7], #0x2\n"
- "st1 { v17.h }[0], [x16], #0x2\n"
- "tbz x1, #0, 123f\n"
- "st1 { v13.b }[2], [x17], #0x1\n"
- "st1 { v20.b }[2], [x6], #0x1\n"
- "st1 { v8.b }[2], [x7], #0x1\n"
- "st1 { v17.b }[2], [x16], #0x1\n"
+ "tbz x0, #1, 122f\n"
+ "st1 { v15.h }[0], [x16], #0x2\n"
+ "st1 { v17.h }[0], [x8], #0x2\n"
+ "st1 { v10.h }[0], [x4], #0x2\n"
+ "st1 { v6.h }[0], [x7], #0x2\n"
+ "tbz x0, #0, 123f\n"
+ "st1 { v15.b }[2], [x16], #0x1\n"
+ "st1 { v17.b }[2], [x8], #0x1\n"
+ "st1 { v10.b }[2], [x4], #0x1\n"
+ "st1 { v6.b }[2], [x7], #0x1\n"
"b 123f\n"
"122:" // Oddments: Bit 2: Unset: Bit 1: Unset
- "tbz x1, #0, 123f\n"
- "st1 { v13.b }[0], [x17], #0x1\n"
- "st1 { v20.b }[0], [x6], #0x1\n"
- "st1 { v8.b }[0], [x7], #0x1\n"
- "st1 { v17.b }[0], [x16], #0x1\n"
+ "tbz x0, #0, 123f\n"
+ "st1 { v15.b }[0], [x16], #0x1\n"
+ "st1 { v17.b }[0], [x8], #0x1\n"
+ "st1 { v10.b }[0], [x4], #0x1\n"
+ "st1 { v6.b }[0], [x7], #0x1\n"
"123:" // Oddments: Bit 2: End
"124:" // End
:
: [offsetof_Params_bias] "I" (offsetof(Params, bias)), [offsetof_Params_inptrs] "I" (offsetof(Params, inptrs)), [offsetof_Params_n_channels] "I" (offsetof(Params, n_channels)), [offsetof_Params_outptrs] "I" (offsetof(Params, outptrs)), [offsetof_Params_requant] "I" (offsetof(Params, requant)), [offsetof_Params_requant_muls] "I" (offsetof(Params, requant_muls)), [offsetof_Params_requant_shifts] "I" (offsetof(Params, requant_shifts)), [offsetof_Params_weights] "I" (offsetof(Params, weights)), [offsetof_Requantize32_a_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [params] "r" (&params)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_nhwc_generic_output9_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_nhwc_generic_output9_mla_depthfirst/generic.cpp
index 3f345cf95a..78f748ad58 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_nhwc_generic_output9_mla_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_nhwc_generic_output9_mla_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -41,577 +41,583 @@ void a64_s8q_nhwc_generic_output9_mla_depthfirst_impl(
)
{
__asm__ __volatile__(
- "lsr x12, %x[n_channels], #0x2\n"
- "add x20, %x[qp], %[offsetof_Requantize32_minval]\n"
- "ld1r { v8.4s }, [x20]\n"
+ "add x19, %x[qp], %[offsetof_Requantize32_minval]\n"
+ "ld1r { v12.4s }, [x19]\n"
"add x20, %x[qp], %[offsetof_Requantize32_maxval]\n"
- "ld1r { v7.4s }, [x20]\n"
- "add x20, %x[qp], %[offsetof_Requantize32_a_offset]\n"
- "ld1r { v6.16b }, [x20]\n"
+ "add x19, %x[qp], %[offsetof_Requantize32_a_offset]\n"
+ "ld1r { v11.4s }, [x20]\n"
+ "ld1r { v10.16b }, [x19]\n"
"add x20, %x[qp], %[offsetof_Requantize32_b_offset]\n"
- "ld1r { v5.16b }, [x20]\n"
- "add x20, %x[qp], %[offsetof_Requantize32_c_offset]\n"
- "ld1r { v4.4s }, [x20]\n"
+ "add x19, %x[qp], %[offsetof_Requantize32_c_offset]\n"
+ "ld1r { v9.16b }, [x20]\n"
+ "ld1r { v8.4s }, [x19]\n"
"add x20, %x[qp], %[offsetof_Requantize32_per_layer_left_shift]\n"
- "ld1r { v3.4s }, [x20]\n"
- "add x20, %x[qp], %[offsetof_Requantize32_per_layer_mul]\n"
- "ld1r { v2.4s }, [x20]\n"
- "add x20, %x[qp], %[offsetof_Requantize32_per_layer_right_shift]\n"
- "ld1r { v1.4s }, [x20]\n"
+ "add x19, %x[qp], %[offsetof_Requantize32_per_layer_mul]\n"
+ "ld1r { v7.4s }, [x20]\n"
+ "ld1r { v6.4s }, [x19]\n"
+ "add x19, %x[qp], %[offsetof_Requantize32_per_layer_right_shift]\n"
"mov x11, #0x0\n"
- "cbz x12, 6f\n"
+ "ld1r { v5.4s }, [x19]\n"
+ "lsr x10, %x[n_channels], #0x2\n"
+ "cbz x10, 6f\n"
"1:" // Channel loop
- "movi v23.4s, #0x0\n"
+ "movi v27.4s, #0x0\n"
"cbz %x[bias], 2f\n"
- "lsl x20, x11, #0x2\n"
- "ldr q23, [%x[bias], x20]\n"
+ "lsl x19, x11, #0x2\n"
+ "ldr q27, [%x[bias], x19]\n"
"2:" // Channel loop: Load bias: Done
- "ldr s0, [%x[params]], #0x4\n"
- "mov x21, %x[inptrs]\n"
- "ldp x10, x9, [x21], #0x10\n"
- "subs x20, %x[n_points], #0x1\n"
- "ldr s14, [x10, x11]\n"
- "ldr s15, [x9, x11]\n"
- "mov v24.16b, v23.16b\n"
- "mov v25.16b, v23.16b\n"
- "ldp x28, x27, [x21], #0x10\n"
- "ldr s16, [x28, x11]\n"
- "mov v26.16b, v23.16b\n"
- "mov v27.16b, v23.16b\n"
- "ldr s17, [x27, x11]\n"
- "ldp x26, x25, [x21], #0x10\n"
- "mov v28.16b, v23.16b\n"
- "mov v29.16b, v23.16b\n"
- "ldr s18, [x26, x11]\n"
- "ldr s19, [x25, x11]\n"
- "mov v30.16b, v23.16b\n"
- "mov v31.16b, v23.16b\n"
- "ldp x24, x23, [x21], #0x10\n"
- "ldr s20, [x24, x11]\n"
- "ssubl v0.8h, v0.8b, v5.8b\n"
- "ssubl v14.8h, v14.8b, v6.8b\n"
- "ldr s21, [x23, x11]\n"
- "ldr x22, [x21], #0x8\n"
- "ssubl v15.8h, v15.8b, v6.8b\n"
- "ssubl v16.8h, v16.8b, v6.8b\n"
- "ldr s22, [x22, x11]\n"
- "ssubl v17.8h, v17.8b, v6.8b\n"
- "ssubl v18.8h, v18.8b, v6.8b\n"
- "ssubl v19.8h, v19.8b, v6.8b\n"
- "ssubl v20.8h, v20.8b, v6.8b\n"
- "ssubl v21.8h, v21.8b, v6.8b\n"
- "ssubl v22.8h, v22.8b, v6.8b\n"
+ "mov v26.16b, v27.16b\n"
+ "ldr s16, [%x[params]], #0x4\n"
+ "mov x20, %x[inptrs]\n"
+ "mov v25.16b, v27.16b\n"
+ "ldp x9, x28, [x20], #0x10\n"
+ "subs x19, %x[n_points], #0x1\n"
+ "mov v24.16b, v27.16b\n"
+ "ldr s4, [x9, x11]\n"
+ "mov v23.16b, v27.16b\n"
+ "mov v22.16b, v27.16b\n"
+ "ldr s3, [x28, x11]\n"
+ "mov v21.16b, v27.16b\n"
+ "ldp x27, x26, [x20], #0x10\n"
+ "mov v20.16b, v27.16b\n"
+ "ldr s2, [x27, x11]\n"
+ "mov v19.16b, v27.16b\n"
+ "ssubl v16.8h, v16.8b, v9.8b\n"
+ "ldr s1, [x26, x11]\n"
+ "ssubl v4.8h, v4.8b, v10.8b\n"
+ "ldp x25, x24, [x20], #0x10\n"
+ "ssubl v3.8h, v3.8b, v10.8b\n"
+ "ldr s0, [x25, x11]\n"
+ "ssubl v2.8h, v2.8b, v10.8b\n"
+ "ssubl v1.8h, v1.8b, v10.8b\n"
+ "ldr s31, [x24, x11]\n"
+ "ldp x23, x22, [x20], #0x10\n"
+ "ssubl v0.8h, v0.8b, v10.8b\n"
+ "ldr s30, [x23, x11]\n"
+ "ldr s29, [x22, x11]\n"
+ "ssubl v31.8h, v31.8b, v10.8b\n"
+ "ldr x21, [x20], #0x8\n"
+ "ssubl v30.8h, v30.8b, v10.8b\n"
+ "ldr s28, [x21, x11]\n"
+ "ssubl v29.8h, v29.8b, v10.8b\n"
+ "ssubl v28.8h, v28.8b, v10.8b\n"
"ble 4f\n"
"3:" // Channel loop: Planar loop
- "ldp x10, x9, [x21], #0x10\n"
- "ldp x28, x27, [x21], #0x10\n"
- "smlal v23.4s, v14.4h, v0.4h\n"
- "smlal v24.4s, v15.4h, v0.4h\n"
- "ldr s14, [x10, x11]\n"
- "ldr s15, [x9, x11]\n"
- "smlal v25.4s, v16.4h, v0.4h\n"
- "smlal v26.4s, v17.4h, v0.4h\n"
- "ldr s16, [x28, x11]\n"
- "ldr s17, [x27, x11]\n"
- "smlal v27.4s, v18.4h, v0.4h\n"
- "smlal v28.4s, v19.4h, v0.4h\n"
- "ldp x26, x25, [x21], #0x10\n"
- "ldr s18, [x26, x11]\n"
- "smlal v29.4s, v20.4h, v0.4h\n"
- "smlal v30.4s, v21.4h, v0.4h\n"
- "ldr s19, [x25, x11]\n"
- "ldp x24, x23, [x21], #0x10\n"
- "smlal v31.4s, v22.4h, v0.4h\n"
- "subs x20, x20, #0x1\n"
- "ldr s0, [%x[params]], #0x4\n"
- "ldr s20, [x24, x11]\n"
- "ssubl v0.8h, v0.8b, v5.8b\n"
- "ssubl v14.8h, v14.8b, v6.8b\n"
- "ldr s21, [x23, x11]\n"
- "ldr x22, [x21], #0x8\n"
- "ssubl v15.8h, v15.8b, v6.8b\n"
- "ssubl v16.8h, v16.8b, v6.8b\n"
- "ldr s22, [x22, x11]\n"
- "ssubl v17.8h, v17.8b, v6.8b\n"
- "ssubl v18.8h, v18.8b, v6.8b\n"
- "ssubl v19.8h, v19.8b, v6.8b\n"
- "ssubl v20.8h, v20.8b, v6.8b\n"
- "ssubl v21.8h, v21.8b, v6.8b\n"
- "ssubl v22.8h, v22.8b, v6.8b\n"
+ "smlal v27.4s, v4.4h, v16.4h\n"
+ "ldp x9, x28, [x20], #0x10\n"
+ "subs x19, x19, #0x1\n"
+ "smlal v26.4s, v3.4h, v16.4h\n"
+ "ldr s4, [x9, x11]\n"
+ "smlal v25.4s, v2.4h, v16.4h\n"
+ "smlal v24.4s, v1.4h, v16.4h\n"
+ "ldr s3, [x28, x11]\n"
+ "smlal v23.4s, v0.4h, v16.4h\n"
+ "ldp x27, x26, [x20], #0x10\n"
+ "smlal v22.4s, v31.4h, v16.4h\n"
+ "smlal v21.4s, v30.4h, v16.4h\n"
+ "ldr s2, [x27, x11]\n"
+ "smlal v20.4s, v29.4h, v16.4h\n"
+ "smlal v19.4s, v28.4h, v16.4h\n"
+ "ldr s16, [%x[params]], #0x4\n"
+ "ssubl v4.8h, v4.8b, v10.8b\n"
+ "ldr s1, [x26, x11]\n"
+ "ssubl v3.8h, v3.8b, v10.8b\n"
+ "ldp x25, x24, [x20], #0x10\n"
+ "ssubl v2.8h, v2.8b, v10.8b\n"
+ "ldr s0, [x25, x11]\n"
+ "ssubl v16.8h, v16.8b, v9.8b\n"
+ "ssubl v1.8h, v1.8b, v10.8b\n"
+ "ldr s31, [x24, x11]\n"
+ "ldp x23, x22, [x20], #0x10\n"
+ "ssubl v0.8h, v0.8b, v10.8b\n"
+ "ldr s30, [x23, x11]\n"
+ "ldr s29, [x22, x11]\n"
+ "ssubl v31.8h, v31.8b, v10.8b\n"
+ "ldr x21, [x20], #0x8\n"
+ "ssubl v30.8h, v30.8b, v10.8b\n"
+ "ldr s28, [x21, x11]\n"
+ "ssubl v29.8h, v29.8b, v10.8b\n"
+ "ssubl v28.8h, v28.8b, v10.8b\n"
"bgt 3b\n"
"4:" // Channel loop: Planar tail
- "smlal v23.4s, v14.4h, v0.4h\n"
- "smlal v24.4s, v15.4h, v0.4h\n"
- "smlal v25.4s, v16.4h, v0.4h\n"
- "smlal v26.4s, v17.4h, v0.4h\n"
- "smlal v27.4s, v18.4h, v0.4h\n"
- "smlal v28.4s, v19.4h, v0.4h\n"
- "smlal v29.4s, v20.4h, v0.4h\n"
- "smlal v30.4s, v21.4h, v0.4h\n"
- "smlal v31.4s, v22.4h, v0.4h\n"
+ "smlal v27.4s, v4.4h, v16.4h\n"
+ "smlal v26.4s, v3.4h, v16.4h\n"
+ "smlal v25.4s, v2.4h, v16.4h\n"
+ "smlal v24.4s, v1.4h, v16.4h\n"
+ "smlal v23.4s, v0.4h, v16.4h\n"
+ "smlal v22.4s, v31.4h, v16.4h\n"
+ "smlal v21.4s, v30.4h, v16.4h\n"
+ "smlal v20.4s, v29.4h, v16.4h\n"
+ "smlal v19.4s, v28.4h, v16.4h\n"
"cbz %x[rq_mul_ptr], 5f\n"
- "lsl x20, x11, #0x2\n"
- "ldr q2, [%x[rq_mul_ptr], x20]\n"
- "ldr q1, [%x[rq_right_shift_ptr], x20]\n"
+ "lsl x19, x11, #0x2\n"
+ "ldr q6, [%x[rq_mul_ptr], x19]\n"
+ "ldr q5, [%x[rq_right_shift_ptr], x19]\n"
"cbz %x[rq_left_shift_ptr], 5f\n"
- "ldr q3, [%x[rq_left_shift_ptr], x20]\n"
+ "ldr q7, [%x[rq_left_shift_ptr], x19]\n"
"5:" // Channel loop: Load quantisation parameters: Done
- "sshl v23.4s, v23.4s, v3.4s\n"
- "sshl v24.4s, v24.4s, v3.4s\n"
- "ldp x28, x27, [%x[outptrs], #0x0]\n"
- "ldp x26, x25, [%x[outptrs], #0x10]\n"
- "sshl v25.4s, v25.4s, v3.4s\n"
- "sqrdmulh v23.4s, v23.4s, v2.4s\n"
- "ldp x24, x23, [%x[outptrs], #0x20]\n"
- "ldp x22, x21, [%x[outptrs], #0x30]\n"
- "sqrdmulh v24.4s, v24.4s, v2.4s\n"
- "sqrdmulh v25.4s, v25.4s, v2.4s\n"
- "ldr x20, [%x[outptrs], #0x40]\n"
- "and v21.16b, v23.16b, v1.16b\n"
- "and v20.16b, v24.16b, v1.16b\n"
- "and v19.16b, v25.16b, v1.16b\n"
- "sshl v26.4s, v26.4s, v3.4s\n"
- "sshl v27.4s, v27.4s, v3.4s\n"
- "sshl v28.4s, v28.4s, v3.4s\n"
- "sshl v29.4s, v29.4s, v3.4s\n"
- "sshl v30.4s, v30.4s, v3.4s\n"
- "sshl v31.4s, v31.4s, v3.4s\n"
- "sshr v21.4s, v21.4s, #0x1f\n"
- "sshr v20.4s, v20.4s, #0x1f\n"
- "sshr v19.4s, v19.4s, #0x1f\n"
- "sqrdmulh v26.4s, v26.4s, v2.4s\n"
- "sqrdmulh v27.4s, v27.4s, v2.4s\n"
- "sqrdmulh v28.4s, v28.4s, v2.4s\n"
- "sqrdmulh v29.4s, v29.4s, v2.4s\n"
- "sqrdmulh v30.4s, v30.4s, v2.4s\n"
- "sqrdmulh v31.4s, v31.4s, v2.4s\n"
- "sqadd v23.4s, v23.4s, v21.4s\n"
- "sqadd v24.4s, v24.4s, v20.4s\n"
- "sqadd v25.4s, v25.4s, v19.4s\n"
- "and v18.16b, v26.16b, v1.16b\n"
- "and v17.16b, v27.16b, v1.16b\n"
- "and v16.16b, v28.16b, v1.16b\n"
- "and v21.16b, v29.16b, v1.16b\n"
- "and v20.16b, v30.16b, v1.16b\n"
- "and v19.16b, v31.16b, v1.16b\n"
+ "sshl v27.4s, v27.4s, v7.4s\n"
+ "ldp x27, x26, [%x[outptrs], #0x0]\n"
+ "sshl v26.4s, v26.4s, v7.4s\n"
+ "ldp x25, x24, [%x[outptrs], #0x10]\n"
+ "sshl v25.4s, v25.4s, v7.4s\n"
+ "ldp x23, x22, [%x[outptrs], #0x20]\n"
+ "sqrdmulh v27.4s, v27.4s, v6.4s\n"
+ "ldp x21, x20, [%x[outptrs], #0x30]\n"
+ "sqrdmulh v26.4s, v26.4s, v6.4s\n"
+ "ldr x19, [%x[outptrs], #0x40]\n"
+ "sqrdmulh v25.4s, v25.4s, v6.4s\n"
+ "sshl v24.4s, v24.4s, v7.4s\n"
+ "and v16.16b, v27.16b, v5.16b\n"
+ "and v18.16b, v26.16b, v5.16b\n"
+ "and v17.16b, v25.16b, v5.16b\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
"sshr v18.4s, v18.4s, #0x1f\n"
"sshr v17.4s, v17.4s, #0x1f\n"
- "sshr v16.4s, v16.4s, #0x1f\n"
- "sshr v21.4s, v21.4s, #0x1f\n"
- "sshr v20.4s, v20.4s, #0x1f\n"
- "sshr v19.4s, v19.4s, #0x1f\n"
+ "sqadd v27.4s, v27.4s, v16.4s\n"
"sqadd v26.4s, v26.4s, v18.4s\n"
- "sqadd v27.4s, v27.4s, v17.4s\n"
- "sqadd v28.4s, v28.4s, v16.4s\n"
- "sqadd v29.4s, v29.4s, v21.4s\n"
- "sqadd v30.4s, v30.4s, v20.4s\n"
- "sqadd v31.4s, v31.4s, v19.4s\n"
- "srshl v23.4s, v23.4s, v1.4s\n"
- "srshl v24.4s, v24.4s, v1.4s\n"
- "srshl v25.4s, v25.4s, v1.4s\n"
- "srshl v26.4s, v26.4s, v1.4s\n"
- "srshl v27.4s, v27.4s, v1.4s\n"
- "srshl v28.4s, v28.4s, v1.4s\n"
- "srshl v29.4s, v29.4s, v1.4s\n"
- "srshl v30.4s, v30.4s, v1.4s\n"
- "srshl v31.4s, v31.4s, v1.4s\n"
- "add v23.4s, v23.4s, v4.4s\n"
- "add v24.4s, v24.4s, v4.4s\n"
- "add v25.4s, v25.4s, v4.4s\n"
- "add v26.4s, v26.4s, v4.4s\n"
- "add v27.4s, v27.4s, v4.4s\n"
- "add v28.4s, v28.4s, v4.4s\n"
- "add v29.4s, v29.4s, v4.4s\n"
- "add v30.4s, v30.4s, v4.4s\n"
- "add v31.4s, v31.4s, v4.4s\n"
- "smax v23.4s, v23.4s, v8.4s\n"
- "smax v24.4s, v24.4s, v8.4s\n"
- "smax v25.4s, v25.4s, v8.4s\n"
- "smax v26.4s, v26.4s, v8.4s\n"
- "smax v27.4s, v27.4s, v8.4s\n"
- "smax v28.4s, v28.4s, v8.4s\n"
- "smax v29.4s, v29.4s, v8.4s\n"
- "smax v30.4s, v30.4s, v8.4s\n"
- "smax v31.4s, v31.4s, v8.4s\n"
- "smin v23.4s, v23.4s, v7.4s\n"
- "smin v24.4s, v24.4s, v7.4s\n"
- "smin v25.4s, v25.4s, v7.4s\n"
- "smin v26.4s, v26.4s, v7.4s\n"
- "smin v27.4s, v27.4s, v7.4s\n"
- "smin v28.4s, v28.4s, v7.4s\n"
- "smin v29.4s, v29.4s, v7.4s\n"
- "smin v30.4s, v30.4s, v7.4s\n"
- "smin v31.4s, v31.4s, v7.4s\n"
- "uzp1 v23.16b, v23.16b, v23.16b\n"
- "uzp1 v24.16b, v24.16b, v24.16b\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
- "uzp1 v26.16b, v26.16b, v26.16b\n"
+ "sqadd v25.4s, v25.4s, v17.4s\n"
+ "sqrdmulh v24.4s, v24.4s, v6.4s\n"
+ "srshl v27.4s, v27.4s, v5.4s\n"
+ "srshl v26.4s, v26.4s, v5.4s\n"
+ "srshl v25.4s, v25.4s, v5.4s\n"
+ "and v16.16b, v24.16b, v5.16b\n"
+ "add v27.4s, v27.4s, v8.4s\n"
+ "add v26.4s, v26.4s, v8.4s\n"
+ "add v25.4s, v25.4s, v8.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "smax v27.4s, v27.4s, v12.4s\n"
+ "smax v26.4s, v26.4s, v12.4s\n"
+ "sqadd v24.4s, v24.4s, v16.4s\n"
+ "smin v27.4s, v27.4s, v11.4s\n"
+ "smin v26.4s, v26.4s, v11.4s\n"
+ "smax v25.4s, v25.4s, v12.4s\n"
+ "srshl v24.4s, v24.4s, v5.4s\n"
"uzp1 v27.16b, v27.16b, v27.16b\n"
- "uzp1 v28.16b, v28.16b, v28.16b\n"
- "uzp1 v29.16b, v29.16b, v29.16b\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
- "uzp1 v31.16b, v31.16b, v31.16b\n"
- "uzp1 v23.16b, v23.16b, v23.16b\n"
- "uzp1 v24.16b, v24.16b, v24.16b\n"
- "str s23, [x28, x11]\n"
+ "smin v25.4s, v25.4s, v11.4s\n"
+ "uzp1 v27.16b, v27.16b, v27.16b\n"
+ "str s27, [x27, x11]\n"
+ "add v24.4s, v24.4s, v8.4s\n"
+ "uzp1 v26.16b, v26.16b, v26.16b\n"
"uzp1 v25.16b, v25.16b, v25.16b\n"
"uzp1 v26.16b, v26.16b, v26.16b\n"
- "str s24, [x27, x11]\n"
- "uzp1 v27.16b, v27.16b, v27.16b\n"
- "uzp1 v28.16b, v28.16b, v28.16b\n"
- "str s25, [x26, x11]\n"
- "uzp1 v29.16b, v29.16b, v29.16b\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
- "str s26, [x25, x11]\n"
- "uzp1 v31.16b, v31.16b, v31.16b\n"
- "str s27, [x24, x11]\n"
- "str s28, [x23, x11]\n"
- "str s29, [x22, x11]\n"
- "str s30, [x21, x11]\n"
- "str s31, [x20, x11]\n"
+ "str s26, [x26, x11]\n"
+ "smax v24.4s, v24.4s, v12.4s\n"
+ "uzp1 v25.16b, v25.16b, v25.16b\n"
+ "str s25, [x25, x11]\n"
+ "sshl v23.4s, v23.4s, v7.4s\n"
+ "sshl v22.4s, v22.4s, v7.4s\n"
+ "smin v24.4s, v24.4s, v11.4s\n"
+ "sqrdmulh v23.4s, v23.4s, v6.4s\n"
+ "sqrdmulh v22.4s, v22.4s, v6.4s\n"
+ "uzp1 v24.16b, v24.16b, v24.16b\n"
+ "sshl v21.4s, v21.4s, v7.4s\n"
+ "and v17.16b, v23.16b, v5.16b\n"
+ "and v16.16b, v22.16b, v5.16b\n"
+ "sqrdmulh v21.4s, v21.4s, v6.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "uzp1 v24.16b, v24.16b, v24.16b\n"
+ "str s24, [x24, x11]\n"
+ "sqadd v23.4s, v23.4s, v17.4s\n"
+ "sqadd v22.4s, v22.4s, v16.4s\n"
+ "and v16.16b, v21.16b, v5.16b\n"
+ "sshl v20.4s, v20.4s, v7.4s\n"
+ "sshl v19.4s, v19.4s, v7.4s\n"
+ "srshl v23.4s, v23.4s, v5.4s\n"
+ "srshl v22.4s, v22.4s, v5.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sqrdmulh v20.4s, v20.4s, v6.4s\n"
+ "add v23.4s, v23.4s, v8.4s\n"
+ "add v22.4s, v22.4s, v8.4s\n"
+ "sqadd v21.4s, v21.4s, v16.4s\n"
+ "and v17.16b, v20.16b, v5.16b\n"
+ "sqrdmulh v19.4s, v19.4s, v6.4s\n"
+ "smax v23.4s, v23.4s, v12.4s\n"
+ "srshl v21.4s, v21.4s, v5.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "and v16.16b, v19.16b, v5.16b\n"
+ "smin v23.4s, v23.4s, v11.4s\n"
+ "add v21.4s, v21.4s, v8.4s\n"
+ "sqadd v20.4s, v20.4s, v17.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "smax v22.4s, v22.4s, v12.4s\n"
+ "smax v21.4s, v21.4s, v12.4s\n"
+ "srshl v20.4s, v20.4s, v5.4s\n"
+ "sqadd v19.4s, v19.4s, v16.4s\n"
+ "smin v22.4s, v22.4s, v11.4s\n"
+ "smin v21.4s, v21.4s, v11.4s\n"
+ "add v20.4s, v20.4s, v8.4s\n"
+ "srshl v19.4s, v19.4s, v5.4s\n"
+ "uzp1 v23.16b, v23.16b, v23.16b\n"
+ "smax v20.4s, v20.4s, v12.4s\n"
+ "uzp1 v23.16b, v23.16b, v23.16b\n"
+ "str s23, [x23, x11]\n"
+ "add v19.4s, v19.4s, v8.4s\n"
+ "smin v20.4s, v20.4s, v11.4s\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "uzp1 v21.16b, v21.16b, v21.16b\n"
+ "smax v19.4s, v19.4s, v12.4s\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "str s22, [x22, x11]\n"
+ "smin v19.4s, v19.4s, v11.4s\n"
+ "uzp1 v21.16b, v21.16b, v21.16b\n"
+ "str s21, [x21, x11]\n"
+ "uzp1 v20.16b, v20.16b, v20.16b\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ "uzp1 v20.16b, v20.16b, v20.16b\n"
+ "str s20, [x20, x11]\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ "str s19, [x19, x11]\n"
"add x11, x11, #0x4\n"
- "cmp x11, x12, LSL #2\n"
+ "cmp x11, x10, LSL #2\n"
"blt 1b\n"
"6:" // Oddments
"tst %x[n_channels], #0x3\n"
"beq 24f\n"
- "movi v23.4s, #0x0\n"
+ "movi v27.4s, #0x0\n"
"cbz %x[bias], 9f\n"
- "add x20, %x[bias], x11, LSL #2\n"
+ "add x19, %x[bias], x11, LSL #2\n"
"tbz %x[n_channels], #1, 7f\n"
- "ld1 { v23.d }[0], [x20], #0x8\n"
+ "ld1 { v27.d }[0], [x19], #0x8\n"
"tbz %x[n_channels], #0, 8f\n"
- "ld1 { v23.s }[2], [x20], #0x4\n"
+ "ld1 { v27.s }[2], [x19], #0x4\n"
"b 8f\n"
"7:" // Oddments: Load bias: Bit 1: Unset
- "ld1 { v23.s }[0], [x20], #0x4\n"
+ "tbz %x[n_channels], #0, 8f\n"
+ "ld1 { v27.s }[0], [x19], #0x4\n"
"8:" // Oddments: Load bias: Bit 1: End
+
"9:" // Oddments: Load bias: Done
- "ldr s0, [%x[params]], #0x4\n"
- "mov x21, %x[inptrs]\n"
- "ldp x10, x9, [x21], #0x10\n"
- "mov v24.16b, v23.16b\n"
- "ldp x28, x27, [x21], #0x10\n"
- "ldp x26, x25, [x21], #0x10\n"
- "mov v25.16b, v23.16b\n"
- "mov v26.16b, v23.16b\n"
- "ldp x24, x23, [x21], #0x10\n"
- "ldr x22, [x21], #0x8\n"
- "mov v27.16b, v23.16b\n"
- "mov v28.16b, v23.16b\n"
- "mov v29.16b, v23.16b\n"
- "mov v30.16b, v23.16b\n"
- "add x10, x10, x11\n"
+ "mov v26.16b, v27.16b\n"
+ "ldr s16, [%x[params]], #0x4\n"
+ "mov x20, %x[inptrs]\n"
+ "mov v25.16b, v27.16b\n"
+ "ldp x9, x28, [x20], #0x10\n"
"add x9, x9, x11\n"
- "mov v31.16b, v23.16b\n"
- "ssubl v0.8h, v0.8b, v5.8b\n"
+ "mov v24.16b, v27.16b\n"
+ "ldp x27, x26, [x20], #0x10\n"
+ "mov v23.16b, v27.16b\n"
+ "ldp x25, x24, [x20], #0x10\n"
+ "mov v22.16b, v27.16b\n"
"add x28, x28, x11\n"
+ "mov v21.16b, v27.16b\n"
+ "ldp x23, x22, [x20], #0x10\n"
+ "mov v20.16b, v27.16b\n"
"add x27, x27, x11\n"
+ "mov v19.16b, v27.16b\n"
+ "ldr x21, [x20], #0x8\n"
+ "ssubl v16.8h, v16.8b, v9.8b\n"
"add x26, x26, x11\n"
"add x25, x25, x11\n"
"add x24, x24, x11\n"
"add x23, x23, x11\n"
"add x22, x22, x11\n"
+ "add x21, x21, x11\n"
"tbz %x[n_channels], #1, 10f\n"
- "ldr h14, [x10], #0x2\n"
- "ldr h15, [x9], #0x2\n"
- "ldr h16, [x28], #0x2\n"
- "ldr h17, [x27], #0x2\n"
- "ldr h18, [x26], #0x2\n"
- "ldr h19, [x25], #0x2\n"
- "ldr h20, [x24], #0x2\n"
- "ldr h21, [x23], #0x2\n"
- "ldr h22, [x22], #0x2\n"
+ "ldr h4, [x9], #0x2\n"
+ "ldr h3, [x28], #0x2\n"
+ "ldr h2, [x27], #0x2\n"
+ "ldr h1, [x26], #0x2\n"
+ "ldr h0, [x25], #0x2\n"
+ "ldr h31, [x24], #0x2\n"
+ "ldr h30, [x23], #0x2\n"
+ "ldr h29, [x22], #0x2\n"
+ "ldr h28, [x21], #0x2\n"
"tbz %x[n_channels], #0, 11f\n"
- "ld1 { v14.b }[2], [x10], #0x1\n"
- "ld1 { v15.b }[2], [x9], #0x1\n"
- "ld1 { v16.b }[2], [x28], #0x1\n"
- "ld1 { v17.b }[2], [x27], #0x1\n"
- "ld1 { v18.b }[2], [x26], #0x1\n"
- "ld1 { v19.b }[2], [x25], #0x1\n"
- "ld1 { v20.b }[2], [x24], #0x1\n"
- "ld1 { v21.b }[2], [x23], #0x1\n"
- "ld1 { v22.b }[2], [x22], #0x1\n"
+ "ld1 { v4.b }[2], [x9], #0x1\n"
+ "ld1 { v3.b }[2], [x28], #0x1\n"
+ "ld1 { v2.b }[2], [x27], #0x1\n"
+ "ld1 { v1.b }[2], [x26], #0x1\n"
+ "ld1 { v0.b }[2], [x25], #0x1\n"
+ "ld1 { v31.b }[2], [x24], #0x1\n"
+ "ld1 { v30.b }[2], [x23], #0x1\n"
+ "ld1 { v29.b }[2], [x22], #0x1\n"
+ "ld1 { v28.b }[2], [x21], #0x1\n"
"b 11f\n"
"10:" // Oddments: Load: Bit 1: Unset
- "ldr b14, [x10], #0x1\n"
- "ldr b15, [x9], #0x1\n"
- "ldr b16, [x28], #0x1\n"
- "ldr b17, [x27], #0x1\n"
- "ldr b18, [x26], #0x1\n"
- "ldr b19, [x25], #0x1\n"
- "ldr b20, [x24], #0x1\n"
- "ldr b21, [x23], #0x1\n"
- "ldr b22, [x22], #0x1\n"
+ "tbz %x[n_channels], #0, 11f\n"
+ "ldr b4, [x9], #0x1\n"
+ "ldr b3, [x28], #0x1\n"
+ "ldr b2, [x27], #0x1\n"
+ "ldr b1, [x26], #0x1\n"
+ "ldr b0, [x25], #0x1\n"
+ "ldr b31, [x24], #0x1\n"
+ "ldr b30, [x23], #0x1\n"
+ "ldr b29, [x22], #0x1\n"
+ "ldr b28, [x21], #0x1\n"
"11:" // Oddments: Load: Bit 1: End
- "subs x20, %x[n_points], #0x1\n"
- "ssubl v14.8h, v14.8b, v6.8b\n"
- "ssubl v15.8h, v15.8b, v6.8b\n"
- "ssubl v16.8h, v16.8b, v6.8b\n"
- "ssubl v17.8h, v17.8b, v6.8b\n"
- "ssubl v18.8h, v18.8b, v6.8b\n"
- "ssubl v19.8h, v19.8b, v6.8b\n"
- "ssubl v20.8h, v20.8b, v6.8b\n"
- "ssubl v21.8h, v21.8b, v6.8b\n"
- "ssubl v22.8h, v22.8b, v6.8b\n"
+ "ssubl v4.8h, v4.8b, v10.8b\n"
+ "subs x19, %x[n_points], #0x1\n"
+ "ssubl v3.8h, v3.8b, v10.8b\n"
+ "ssubl v2.8h, v2.8b, v10.8b\n"
+ "ssubl v1.8h, v1.8b, v10.8b\n"
+ "ssubl v0.8h, v0.8b, v10.8b\n"
+ "ssubl v31.8h, v31.8b, v10.8b\n"
+ "ssubl v30.8h, v30.8b, v10.8b\n"
+ "ssubl v29.8h, v29.8b, v10.8b\n"
+ "ssubl v28.8h, v28.8b, v10.8b\n"
"ble 15f\n"
"12:" // Oddments: Planar loop
- "ldp x10, x9, [x21], #0x10\n"
- "ldp x28, x27, [x21], #0x10\n"
- "smlal v23.4s, v14.4h, v0.4h\n"
- "smlal v24.4s, v15.4h, v0.4h\n"
- "ldp x26, x25, [x21], #0x10\n"
- "ldp x24, x23, [x21], #0x10\n"
- "smlal v25.4s, v16.4h, v0.4h\n"
- "smlal v26.4s, v17.4h, v0.4h\n"
- "smlal v27.4s, v18.4h, v0.4h\n"
- "smlal v28.4s, v19.4h, v0.4h\n"
- "ldr x22, [x21], #0x8\n"
- "add x10, x10, x11\n"
- "smlal v29.4s, v20.4h, v0.4h\n"
- "smlal v30.4s, v21.4h, v0.4h\n"
+ "smlal v27.4s, v4.4h, v16.4h\n"
+ "ldp x9, x28, [x20], #0x10\n"
"add x9, x9, x11\n"
+ "smlal v26.4s, v3.4h, v16.4h\n"
+ "ldp x27, x26, [x20], #0x10\n"
+ "smlal v25.4s, v2.4h, v16.4h\n"
+ "ldp x25, x24, [x20], #0x10\n"
+ "smlal v24.4s, v1.4h, v16.4h\n"
"add x28, x28, x11\n"
- "smlal v31.4s, v22.4h, v0.4h\n"
- "ldr s0, [%x[params]], #0x4\n"
- "ssubl v0.8h, v0.8b, v5.8b\n"
+ "smlal v23.4s, v0.4h, v16.4h\n"
+ "ldp x23, x22, [x20], #0x10\n"
+ "smlal v22.4s, v31.4h, v16.4h\n"
"add x27, x27, x11\n"
+ "smlal v21.4s, v30.4h, v16.4h\n"
+ "ldr x21, [x20], #0x8\n"
+ "smlal v20.4s, v29.4h, v16.4h\n"
"add x26, x26, x11\n"
+ "smlal v19.4s, v28.4h, v16.4h\n"
+ "ldr s16, [%x[params]], #0x4\n"
"add x25, x25, x11\n"
+ "ssubl v16.8h, v16.8b, v9.8b\n"
"add x24, x24, x11\n"
"add x23, x23, x11\n"
"add x22, x22, x11\n"
+ "add x21, x21, x11\n"
"tbz %x[n_channels], #1, 13f\n"
- "ldr h14, [x10], #0x2\n"
- "ldr h15, [x9], #0x2\n"
- "ldr h16, [x28], #0x2\n"
- "ldr h17, [x27], #0x2\n"
- "ldr h18, [x26], #0x2\n"
- "ldr h19, [x25], #0x2\n"
- "ldr h20, [x24], #0x2\n"
- "ldr h21, [x23], #0x2\n"
- "ldr h22, [x22], #0x2\n"
+ "ldr h4, [x9], #0x2\n"
+ "ldr h3, [x28], #0x2\n"
+ "ldr h2, [x27], #0x2\n"
+ "ldr h1, [x26], #0x2\n"
+ "ldr h0, [x25], #0x2\n"
+ "ldr h31, [x24], #0x2\n"
+ "ldr h30, [x23], #0x2\n"
+ "ldr h29, [x22], #0x2\n"
+ "ldr h28, [x21], #0x2\n"
"tbz %x[n_channels], #0, 14f\n"
- "ld1 { v14.b }[2], [x10], #0x1\n"
- "ld1 { v15.b }[2], [x9], #0x1\n"
- "ld1 { v16.b }[2], [x28], #0x1\n"
- "ld1 { v17.b }[2], [x27], #0x1\n"
- "ld1 { v18.b }[2], [x26], #0x1\n"
- "ld1 { v19.b }[2], [x25], #0x1\n"
- "ld1 { v20.b }[2], [x24], #0x1\n"
- "ld1 { v21.b }[2], [x23], #0x1\n"
- "ld1 { v22.b }[2], [x22], #0x1\n"
+ "ld1 { v4.b }[2], [x9], #0x1\n"
+ "ld1 { v3.b }[2], [x28], #0x1\n"
+ "ld1 { v2.b }[2], [x27], #0x1\n"
+ "ld1 { v1.b }[2], [x26], #0x1\n"
+ "ld1 { v0.b }[2], [x25], #0x1\n"
+ "ld1 { v31.b }[2], [x24], #0x1\n"
+ "ld1 { v30.b }[2], [x23], #0x1\n"
+ "ld1 { v29.b }[2], [x22], #0x1\n"
+ "ld1 { v28.b }[2], [x21], #0x1\n"
"b 14f\n"
"13:" // Oddments: Planar loop: Load: Bit 1: Unset
- "ldr b14, [x10], #0x1\n"
- "ldr b15, [x9], #0x1\n"
- "ldr b16, [x28], #0x1\n"
- "ldr b17, [x27], #0x1\n"
- "ldr b18, [x26], #0x1\n"
- "ldr b19, [x25], #0x1\n"
- "ldr b20, [x24], #0x1\n"
- "ldr b21, [x23], #0x1\n"
- "ldr b22, [x22], #0x1\n"
+ "tbz %x[n_channels], #0, 14f\n"
+ "ldr b4, [x9], #0x1\n"
+ "ldr b3, [x28], #0x1\n"
+ "ldr b2, [x27], #0x1\n"
+ "ldr b1, [x26], #0x1\n"
+ "ldr b0, [x25], #0x1\n"
+ "ldr b31, [x24], #0x1\n"
+ "ldr b30, [x23], #0x1\n"
+ "ldr b29, [x22], #0x1\n"
+ "ldr b28, [x21], #0x1\n"
"14:" // Oddments: Planar loop: Load: Bit 1: End
- "subs x20, x20, #0x1\n"
- "ssubl v14.8h, v14.8b, v6.8b\n"
- "ssubl v15.8h, v15.8b, v6.8b\n"
- "ssubl v16.8h, v16.8b, v6.8b\n"
- "ssubl v17.8h, v17.8b, v6.8b\n"
- "ssubl v18.8h, v18.8b, v6.8b\n"
- "ssubl v19.8h, v19.8b, v6.8b\n"
- "ssubl v20.8h, v20.8b, v6.8b\n"
- "ssubl v21.8h, v21.8b, v6.8b\n"
- "ssubl v22.8h, v22.8b, v6.8b\n"
+ "ssubl v4.8h, v4.8b, v10.8b\n"
+ "subs x19, x19, #0x1\n"
+ "ssubl v3.8h, v3.8b, v10.8b\n"
+ "ssubl v2.8h, v2.8b, v10.8b\n"
+ "ssubl v1.8h, v1.8b, v10.8b\n"
+ "ssubl v0.8h, v0.8b, v10.8b\n"
+ "ssubl v31.8h, v31.8b, v10.8b\n"
+ "ssubl v30.8h, v30.8b, v10.8b\n"
+ "ssubl v29.8h, v29.8b, v10.8b\n"
+ "ssubl v28.8h, v28.8b, v10.8b\n"
"bgt 12b\n"
"15:" // Oddments: Planar tail
- "smlal v23.4s, v14.4h, v0.4h\n"
- "smlal v24.4s, v15.4h, v0.4h\n"
- "smlal v25.4s, v16.4h, v0.4h\n"
- "smlal v26.4s, v17.4h, v0.4h\n"
- "smlal v27.4s, v18.4h, v0.4h\n"
- "smlal v28.4s, v19.4h, v0.4h\n"
- "smlal v29.4s, v20.4h, v0.4h\n"
- "smlal v30.4s, v21.4h, v0.4h\n"
- "smlal v31.4s, v22.4h, v0.4h\n"
+ "smlal v27.4s, v4.4h, v16.4h\n"
+ "smlal v26.4s, v3.4h, v16.4h\n"
+ "smlal v25.4s, v2.4h, v16.4h\n"
+ "smlal v24.4s, v1.4h, v16.4h\n"
+ "smlal v23.4s, v0.4h, v16.4h\n"
+ "smlal v22.4s, v31.4h, v16.4h\n"
+ "smlal v21.4s, v30.4h, v16.4h\n"
+ "smlal v20.4s, v29.4h, v16.4h\n"
+ "smlal v19.4s, v28.4h, v16.4h\n"
"cbz %x[rq_mul_ptr], 21f\n"
- "add x22, %x[rq_mul_ptr], x11, LSL #2\n"
- "add x21, %x[rq_right_shift_ptr], x11, LSL #2\n"
- "add x20, %x[rq_left_shift_ptr], x11, LSL #2\n"
+ "add x21, %x[rq_mul_ptr], x11, LSL #2\n"
+ "add x20, %x[rq_right_shift_ptr], x11, LSL #2\n"
+ "add x19, %x[rq_left_shift_ptr], x11, LSL #2\n"
"tbz %x[n_channels], #1, 18f\n"
- "ld1 { v2.d }[0], [x22], #0x8\n"
- "ld1 { v1.d }[0], [x21], #0x8\n"
+ "ld1 { v6.d }[0], [x21], #0x8\n"
+ "ld1 { v5.d }[0], [x20], #0x8\n"
"cbz %x[rq_left_shift_ptr], 16f\n"
- "ld1 { v3.d }[0], [x20], #0x8\n"
+ "ld1 { v7.d }[0], [x19], #0x8\n"
"16:" // Oddments: Load quantisation parameters: Bit 1: Load left shift: Done
"tbz %x[n_channels], #0, 20f\n"
- "ld1 { v2.s }[2], [x22], #0x4\n"
- "ld1 { v1.s }[2], [x21], #0x4\n"
+ "ld1 { v6.s }[2], [x21], #0x4\n"
+ "ld1 { v5.s }[2], [x20], #0x4\n"
"cbz %x[rq_left_shift_ptr], 17f\n"
- "ld1 { v3.s }[2], [x20], #0x4\n"
+ "ld1 { v7.s }[2], [x19], #0x4\n"
"17:" // Oddments: Load quantisation parameters: Bit 1: Bit 0: Load left shift: Done
"b 20f\n"
"18:" // Oddments: Load quantisation parameters: Bit 1: Unset
- "ld1 { v2.s }[0], [x22], #0x4\n"
- "ld1 { v1.s }[0], [x21], #0x4\n"
+ "tbz %x[n_channels], #0, 20f\n"
+ "ld1 { v6.s }[0], [x21], #0x4\n"
+ "ld1 { v5.s }[0], [x20], #0x4\n"
"cbz %x[rq_left_shift_ptr], 19f\n"
- "ld1 { v3.s }[0], [x20], #0x4\n"
+ "ld1 { v7.s }[0], [x19], #0x4\n"
"19:" // Oddments: Load quantisation parameters: Bit 1: Unset: Bit 0: Load left shift: Done
"20:" // Oddments: Load quantisation parameters: Bit 1: End
"21:" // Oddments: Load quantisation parameters: Done
- "sshl v23.4s, v23.4s, v3.4s\n"
- "sshl v24.4s, v24.4s, v3.4s\n"
- "ldp x28, x27, [%x[outptrs], #0x0]\n"
- "ldp x26, x25, [%x[outptrs], #0x10]\n"
- "sshl v25.4s, v25.4s, v3.4s\n"
- "sqrdmulh v23.4s, v23.4s, v2.4s\n"
- "ldp x24, x23, [%x[outptrs], #0x20]\n"
- "ldp x22, x21, [%x[outptrs], #0x30]\n"
- "sqrdmulh v24.4s, v24.4s, v2.4s\n"
- "sqrdmulh v25.4s, v25.4s, v2.4s\n"
- "ldr x20, [%x[outptrs], #0x40]\n"
- "add x28, x28, x11\n"
- "and v21.16b, v23.16b, v1.16b\n"
- "and v20.16b, v24.16b, v1.16b\n"
+ "sshl v27.4s, v27.4s, v7.4s\n"
+ "ldp x27, x26, [%x[outptrs], #0x0]\n"
"add x27, x27, x11\n"
+ "sqrdmulh v27.4s, v27.4s, v6.4s\n"
+ "ldp x25, x24, [%x[outptrs], #0x10]\n"
+ "sshl v26.4s, v26.4s, v7.4s\n"
+ "ldp x23, x22, [%x[outptrs], #0x20]\n"
"add x26, x26, x11\n"
- "and v19.16b, v25.16b, v1.16b\n"
- "sshl v26.4s, v26.4s, v3.4s\n"
+ "sshl v25.4s, v25.4s, v7.4s\n"
+ "ldp x21, x20, [%x[outptrs], #0x30]\n"
+ "sshl v24.4s, v24.4s, v7.4s\n"
+ "ldr x19, [%x[outptrs], #0x40]\n"
"add x25, x25, x11\n"
+ "and v16.16b, v27.16b, v5.16b\n"
"add x24, x24, x11\n"
- "sshl v27.4s, v27.4s, v3.4s\n"
- "sshl v28.4s, v28.4s, v3.4s\n"
+ "sqrdmulh v26.4s, v26.4s, v6.4s\n"
"add x23, x23, x11\n"
+ "sqrdmulh v25.4s, v25.4s, v6.4s\n"
"add x22, x22, x11\n"
- "sshl v29.4s, v29.4s, v3.4s\n"
- "sshl v30.4s, v30.4s, v3.4s\n"
+ "sqrdmulh v24.4s, v24.4s, v6.4s\n"
"add x21, x21, x11\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
"add x20, x20, x11\n"
- "sshl v31.4s, v31.4s, v3.4s\n"
- "sshr v21.4s, v21.4s, #0x1f\n"
- "sshr v20.4s, v20.4s, #0x1f\n"
- "sshr v19.4s, v19.4s, #0x1f\n"
- "sqrdmulh v26.4s, v26.4s, v2.4s\n"
- "sqrdmulh v27.4s, v27.4s, v2.4s\n"
- "sqrdmulh v28.4s, v28.4s, v2.4s\n"
- "sqrdmulh v29.4s, v29.4s, v2.4s\n"
- "sqrdmulh v30.4s, v30.4s, v2.4s\n"
- "sqrdmulh v31.4s, v31.4s, v2.4s\n"
- "sqadd v23.4s, v23.4s, v21.4s\n"
- "sqadd v24.4s, v24.4s, v20.4s\n"
- "sqadd v25.4s, v25.4s, v19.4s\n"
- "and v18.16b, v26.16b, v1.16b\n"
- "and v17.16b, v27.16b, v1.16b\n"
- "and v16.16b, v28.16b, v1.16b\n"
- "and v21.16b, v29.16b, v1.16b\n"
- "and v20.16b, v30.16b, v1.16b\n"
- "and v19.16b, v31.16b, v1.16b\n"
+ "and v18.16b, v26.16b, v5.16b\n"
+ "add x19, x19, x11\n"
+ "and v17.16b, v25.16b, v5.16b\n"
+ "sqadd v27.4s, v27.4s, v16.4s\n"
"sshr v18.4s, v18.4s, #0x1f\n"
"sshr v17.4s, v17.4s, #0x1f\n"
- "sshr v16.4s, v16.4s, #0x1f\n"
- "sshr v21.4s, v21.4s, #0x1f\n"
- "sshr v20.4s, v20.4s, #0x1f\n"
- "sshr v19.4s, v19.4s, #0x1f\n"
+ "and v16.16b, v24.16b, v5.16b\n"
+ "srshl v27.4s, v27.4s, v5.4s\n"
"sqadd v26.4s, v26.4s, v18.4s\n"
- "sqadd v27.4s, v27.4s, v17.4s\n"
- "sqadd v28.4s, v28.4s, v16.4s\n"
- "sqadd v29.4s, v29.4s, v21.4s\n"
- "sqadd v30.4s, v30.4s, v20.4s\n"
- "sqadd v31.4s, v31.4s, v19.4s\n"
- "srshl v23.4s, v23.4s, v1.4s\n"
- "srshl v24.4s, v24.4s, v1.4s\n"
- "srshl v25.4s, v25.4s, v1.4s\n"
- "srshl v26.4s, v26.4s, v1.4s\n"
- "srshl v27.4s, v27.4s, v1.4s\n"
- "srshl v28.4s, v28.4s, v1.4s\n"
- "srshl v29.4s, v29.4s, v1.4s\n"
- "srshl v30.4s, v30.4s, v1.4s\n"
- "srshl v31.4s, v31.4s, v1.4s\n"
- "add v23.4s, v23.4s, v4.4s\n"
- "add v24.4s, v24.4s, v4.4s\n"
- "add v25.4s, v25.4s, v4.4s\n"
- "add v26.4s, v26.4s, v4.4s\n"
- "add v27.4s, v27.4s, v4.4s\n"
- "add v28.4s, v28.4s, v4.4s\n"
- "add v29.4s, v29.4s, v4.4s\n"
- "add v30.4s, v30.4s, v4.4s\n"
- "add v31.4s, v31.4s, v4.4s\n"
- "smax v23.4s, v23.4s, v8.4s\n"
- "smax v24.4s, v24.4s, v8.4s\n"
- "smax v25.4s, v25.4s, v8.4s\n"
- "smax v26.4s, v26.4s, v8.4s\n"
- "smax v27.4s, v27.4s, v8.4s\n"
- "smax v28.4s, v28.4s, v8.4s\n"
- "smax v29.4s, v29.4s, v8.4s\n"
- "smax v30.4s, v30.4s, v8.4s\n"
- "smax v31.4s, v31.4s, v8.4s\n"
- "smin v23.4s, v23.4s, v7.4s\n"
- "smin v24.4s, v24.4s, v7.4s\n"
- "smin v25.4s, v25.4s, v7.4s\n"
- "smin v26.4s, v26.4s, v7.4s\n"
- "smin v27.4s, v27.4s, v7.4s\n"
- "smin v28.4s, v28.4s, v7.4s\n"
- "smin v29.4s, v29.4s, v7.4s\n"
- "smin v30.4s, v30.4s, v7.4s\n"
- "smin v31.4s, v31.4s, v7.4s\n"
- "uzp1 v23.16b, v23.16b, v23.16b\n"
- "uzp1 v24.16b, v24.16b, v24.16b\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
+ "sqadd v25.4s, v25.4s, v17.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "add v27.4s, v27.4s, v8.4s\n"
+ "srshl v26.4s, v26.4s, v5.4s\n"
+ "srshl v25.4s, v25.4s, v5.4s\n"
+ "sqadd v24.4s, v24.4s, v16.4s\n"
+ "smax v27.4s, v27.4s, v12.4s\n"
+ "add v26.4s, v26.4s, v8.4s\n"
+ "add v25.4s, v25.4s, v8.4s\n"
+ "srshl v24.4s, v24.4s, v5.4s\n"
+ "smin v27.4s, v27.4s, v11.4s\n"
+ "smax v26.4s, v26.4s, v12.4s\n"
+ "smax v25.4s, v25.4s, v12.4s\n"
+ "add v24.4s, v24.4s, v8.4s\n"
+ "smin v26.4s, v26.4s, v11.4s\n"
+ "smin v25.4s, v25.4s, v11.4s\n"
+ "smax v24.4s, v24.4s, v12.4s\n"
+ "uzp1 v27.16b, v27.16b, v27.16b\n"
"uzp1 v26.16b, v26.16b, v26.16b\n"
+ "smin v24.4s, v24.4s, v11.4s\n"
"uzp1 v27.16b, v27.16b, v27.16b\n"
- "uzp1 v28.16b, v28.16b, v28.16b\n"
- "uzp1 v29.16b, v29.16b, v29.16b\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
- "uzp1 v31.16b, v31.16b, v31.16b\n"
- "uzp1 v23.16b, v23.16b, v23.16b\n"
+ "uzp1 v26.16b, v26.16b, v26.16b\n"
+ "uzp1 v25.16b, v25.16b, v25.16b\n"
"uzp1 v24.16b, v24.16b, v24.16b\n"
"uzp1 v25.16b, v25.16b, v25.16b\n"
- "uzp1 v26.16b, v26.16b, v26.16b\n"
- "uzp1 v27.16b, v27.16b, v27.16b\n"
- "uzp1 v28.16b, v28.16b, v28.16b\n"
- "uzp1 v29.16b, v29.16b, v29.16b\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
- "uzp1 v31.16b, v31.16b, v31.16b\n"
+ "uzp1 v24.16b, v24.16b, v24.16b\n"
+ "sshl v23.4s, v23.4s, v7.4s\n"
+ "sshl v22.4s, v22.4s, v7.4s\n"
+ "sqrdmulh v23.4s, v23.4s, v6.4s\n"
+ "sqrdmulh v22.4s, v22.4s, v6.4s\n"
+ "sshl v21.4s, v21.4s, v7.4s\n"
+ "sshl v20.4s, v20.4s, v7.4s\n"
+ "and v17.16b, v23.16b, v5.16b\n"
+ "and v16.16b, v22.16b, v5.16b\n"
+ "sqrdmulh v21.4s, v21.4s, v6.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sqrdmulh v20.4s, v20.4s, v6.4s\n"
+ "sqadd v23.4s, v23.4s, v17.4s\n"
+ "sqadd v22.4s, v22.4s, v16.4s\n"
+ "and v16.16b, v21.16b, v5.16b\n"
+ "and v17.16b, v20.16b, v5.16b\n"
+ "srshl v23.4s, v23.4s, v5.4s\n"
+ "srshl v22.4s, v22.4s, v5.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "add v23.4s, v23.4s, v8.4s\n"
+ "add v22.4s, v22.4s, v8.4s\n"
+ "sqadd v21.4s, v21.4s, v16.4s\n"
+ "sqadd v20.4s, v20.4s, v17.4s\n"
+ "smax v23.4s, v23.4s, v12.4s\n"
+ "smax v22.4s, v22.4s, v12.4s\n"
+ "srshl v21.4s, v21.4s, v5.4s\n"
+ "srshl v20.4s, v20.4s, v5.4s\n"
+ "smin v23.4s, v23.4s, v11.4s\n"
+ "smin v22.4s, v22.4s, v11.4s\n"
+ "add v21.4s, v21.4s, v8.4s\n"
+ "add v20.4s, v20.4s, v8.4s\n"
+ "uzp1 v23.16b, v23.16b, v23.16b\n"
+ "smax v21.4s, v21.4s, v12.4s\n"
+ "smax v20.4s, v20.4s, v12.4s\n"
+ "uzp1 v23.16b, v23.16b, v23.16b\n"
+ "smin v21.4s, v21.4s, v11.4s\n"
+ "smin v20.4s, v20.4s, v11.4s\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "uzp1 v21.16b, v21.16b, v21.16b\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "uzp1 v21.16b, v21.16b, v21.16b\n"
+ "uzp1 v20.16b, v20.16b, v20.16b\n"
+ "sshl v19.4s, v19.4s, v7.4s\n"
+ "uzp1 v20.16b, v20.16b, v20.16b\n"
+ "sqrdmulh v19.4s, v19.4s, v6.4s\n"
+ "and v16.16b, v19.16b, v5.16b\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sqadd v19.4s, v19.4s, v16.4s\n"
+ "srshl v19.4s, v19.4s, v5.4s\n"
+ "add v19.4s, v19.4s, v8.4s\n"
+ "smax v19.4s, v19.4s, v12.4s\n"
+ "smin v19.4s, v19.4s, v11.4s\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
"tbz %x[n_channels], #1, 22f\n"
- "st1 { v23.h }[0], [x28], #0x2\n"
- "st1 { v24.h }[0], [x27], #0x2\n"
- "st1 { v25.h }[0], [x26], #0x2\n"
- "st1 { v26.h }[0], [x25], #0x2\n"
- "st1 { v27.h }[0], [x24], #0x2\n"
- "st1 { v28.h }[0], [x23], #0x2\n"
- "st1 { v29.h }[0], [x22], #0x2\n"
- "st1 { v30.h }[0], [x21], #0x2\n"
- "st1 { v31.h }[0], [x20], #0x2\n"
+ "st1 { v27.h }[0], [x27], #0x2\n"
+ "st1 { v26.h }[0], [x26], #0x2\n"
+ "st1 { v25.h }[0], [x25], #0x2\n"
+ "st1 { v24.h }[0], [x24], #0x2\n"
+ "st1 { v23.h }[0], [x23], #0x2\n"
+ "st1 { v22.h }[0], [x22], #0x2\n"
+ "st1 { v21.h }[0], [x21], #0x2\n"
+ "st1 { v20.h }[0], [x20], #0x2\n"
+ "st1 { v19.h }[0], [x19], #0x2\n"
"tbz %x[n_channels], #0, 23f\n"
- "st1 { v23.b }[2], [x28], #0x1\n"
- "st1 { v24.b }[2], [x27], #0x1\n"
- "st1 { v25.b }[2], [x26], #0x1\n"
- "st1 { v26.b }[2], [x25], #0x1\n"
- "st1 { v27.b }[2], [x24], #0x1\n"
- "st1 { v28.b }[2], [x23], #0x1\n"
- "st1 { v29.b }[2], [x22], #0x1\n"
- "st1 { v30.b }[2], [x21], #0x1\n"
- "st1 { v31.b }[2], [x20], #0x1\n"
+ "st1 { v27.b }[2], [x27], #0x1\n"
+ "st1 { v26.b }[2], [x26], #0x1\n"
+ "st1 { v25.b }[2], [x25], #0x1\n"
+ "st1 { v24.b }[2], [x24], #0x1\n"
+ "st1 { v23.b }[2], [x23], #0x1\n"
+ "st1 { v22.b }[2], [x22], #0x1\n"
+ "st1 { v21.b }[2], [x21], #0x1\n"
+ "st1 { v20.b }[2], [x20], #0x1\n"
+ "st1 { v19.b }[2], [x19], #0x1\n"
"b 23f\n"
"22:" // Oddments: Store: Bit 1: Unset
- "st1 { v23.b }[0], [x28], #0x1\n"
- "st1 { v24.b }[0], [x27], #0x1\n"
- "st1 { v25.b }[0], [x26], #0x1\n"
- "st1 { v26.b }[0], [x25], #0x1\n"
- "st1 { v27.b }[0], [x24], #0x1\n"
- "st1 { v28.b }[0], [x23], #0x1\n"
- "st1 { v29.b }[0], [x22], #0x1\n"
- "st1 { v30.b }[0], [x21], #0x1\n"
- "st1 { v31.b }[0], [x20], #0x1\n"
+ "tbz %x[n_channels], #0, 23f\n"
+ "st1 { v27.b }[0], [x27], #0x1\n"
+ "st1 { v26.b }[0], [x26], #0x1\n"
+ "st1 { v25.b }[0], [x25], #0x1\n"
+ "st1 { v24.b }[0], [x24], #0x1\n"
+ "st1 { v23.b }[0], [x23], #0x1\n"
+ "st1 { v22.b }[0], [x22], #0x1\n"
+ "st1 { v21.b }[0], [x21], #0x1\n"
+ "st1 { v20.b }[0], [x20], #0x1\n"
+ "st1 { v19.b }[0], [x19], #0x1\n"
"23:" // Oddments: Store: Bit 1: End
"24:" // End
: [params] "+&r" (params)
: [bias] "r" (qp.bias), [inptrs] "r" (inptrs), [n_channels] "r" ((uint64_t) n_channels), [n_points] "r" ((uint64_t) n_points), [offsetof_Requantize32_a_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [offsetof_Requantize32_per_layer_left_shift] "I" (offsetof(arm_gemm::Requantize32, per_layer_left_shift)), [offsetof_Requantize32_per_layer_mul] "I" (offsetof(arm_gemm::Requantize32, per_layer_mul)), [offsetof_Requantize32_per_layer_right_shift] "I" (offsetof(arm_gemm::Requantize32, per_layer_right_shift)), [outptrs] "r" (outptrs), [qp] "r" (&qp), [rq_left_shift_ptr] "r" (qp.per_channel_left_shifts), [rq_mul_ptr] "r" (qp.per_channel_muls), [rq_right_shift_ptr] "r" (qp.per_channel_right_shifts)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst/generic.cpp
index 342a297dd4..cbe3d2cd1c 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -40,475 +40,487 @@ void a64_s8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst_impl
)
{
__asm__ __volatile__(
- "ldr q14, [%x[params], #0x0]\n"
- "ldr q5, [%x[params], #0x10]\n"
- "movi v15.16b, #0x1\n"
- "ushr v15.4s, v15.4s, #0x8\n"
- "ldr q6, [%x[params], #0x20]\n"
- "ldr q7, [%x[params], #0x30]\n"
- "movi v26.4s, #0x0\n"
- "movi v27.4s, #0x0\n"
+ "movi v5.16b, #0x1\n"
+ "ldr x22, [%x[inptrs], #0x0]\n"
+ "add SP, SP, #-0x80\n"
+ "ushr v5.4s, v5.4s, #0x8\n"
"ldr x20, [%x[inptrs], #0x8]\n"
- "ld1 { v1.16b }, [x20]\n"
- "mov v29.16b, v1.16b\n"
- "mov v16.16b, v1.16b\n"
- "ldr x20, [%x[inptrs], #0x10]\n"
- "ld1 { v2.16b }, [x20]\n"
- "mov v28.16b, v1.16b\n"
- "mov v22.16b, v2.16b\n"
- "ldr x20, [%x[inptrs], #0x20]\n"
- "ld1 { v4.16b }, [x20]\n"
- "mov v31.16b, v2.16b\n"
- "mov v30.16b, v2.16b\n"
- "ldr x20, [%x[inptrs], #0x0]\n"
- "ld1 { v0.16b }, [x20]\n"
- "mov v23.16b, v4.16b\n"
- "mov v21.16b, v4.16b\n"
+ "add x21, %x[qp], %[offsetof_Requantize32_b_offset]\n"
+ "movi v26.4s, #0x0\n"
+ "ldr x19, [%x[inptrs], #0x10]\n"
+ "mov x11, #0x0\n"
+ "movi v1.4s, #0x0\n"
+ "ld1 { v15.16b }, [x22]\n"
+ "mov x10, #0x0\n"
+ "movi v22.4s, #0x0\n"
+ "ld1 { v29.16b }, [x20]\n"
+ "add x9, %x[qp], %[offsetof_Requantize32_c_offset]\n"
+ "movi v25.4s, #0x0\n"
+ "ld1 { v0.16b }, [x19]\n"
+ "add x28, %x[qp], %[offsetof_Requantize32_minval]\n"
+ "movi v13.4s, #0x0\n"
"ldr x20, [%x[inptrs], #0x18]\n"
- "ld1 { v3.16b }, [x20]\n"
- "mov v20.16b, v4.16b\n"
- "ext v29.16b, v29.16b, v29.16b, #0x2\n"
- "ext v16.16b, v16.16b, v16.16b, #0x4\n"
- "ext v28.16b, v28.16b, v28.16b, #0x6\n"
- "add x20, %x[qp], %[offsetof_Requantize32_b_offset]\n"
- "ld1r { v13.4s }, [x20]\n"
- "ext v22.16b, v22.16b, v22.16b, #0x2\n"
- "ext v31.16b, v31.16b, v31.16b, #0x4\n"
- "add x20, %x[qp], %[offsetof_Requantize32_c_offset]\n"
- "ld1r { v12.4s }, [x20]\n"
- "ext v30.16b, v30.16b, v30.16b, #0x6\n"
- "ext v23.16b, v23.16b, v23.16b, #0x2\n"
- "add x20, %x[qp], %[offsetof_Requantize32_minval]\n"
- "ld1r { v11.4s }, [x20]\n"
- "ext v21.16b, v21.16b, v21.16b, #0x4\n"
- "ext v20.16b, v20.16b, v20.16b, #0x6\n"
- "add x20, %x[qp], %[offsetof_Requantize32_maxval]\n"
- "ld1r { v10.4s }, [x20]\n"
- "mov v25.16b, v0.16b\n"
- "mov v19.16b, v0.16b\n"
+ "add x27, %x[qp], %[offsetof_Requantize32_maxval]\n"
+ "mov v20.16b, v15.16b\n"
+ "ldr x19, [%x[inptrs], #0x20]\n"
"cmp %x[n_channels], #0x4\n"
- "mov x9, #0x0\n"
- "mov v18.16b, v0.16b\n"
- "mov v24.16b, v3.16b\n"
- "mov x28, #0x0\n"
- "ldp x27, x26, [%x[outptrs], #0x0]\n"
- "mov v17.16b, v3.16b\n"
- "ext v25.16b, v25.16b, v25.16b, #0x2\n"
- "ldp x25, x24, [%x[outptrs], #0x10]\n"
- "ldp x23, x22, [%x[outptrs], #0x20]\n"
- "ext v19.16b, v19.16b, v19.16b, #0x4\n"
- "ext v18.16b, v18.16b, v18.16b, #0x6\n"
- "ldp x21, x20, [%x[outptrs], #0x30]\n"
+ "ext v20.16b, v20.16b, v20.16b, #0x2\n"
+ "ld1r { v4.4s }, [x21]\n"
+ "mov v17.16b, v15.16b\n"
+ "ld1 { v2.16b }, [x20]\n"
+ "ext v17.16b, v17.16b, v17.16b, #0x4\n"
+ "ld1 { v7.16b }, [x19]\n"
+ "mov v23.16b, v15.16b\n"
+ "ldp x26, x25, [%x[outptrs], #0x0]\n"
+ "ext v23.16b, v23.16b, v23.16b, #0x6\n"
+ "ldp x24, x23, [%x[outptrs], #0x10]\n"
+ "mov v18.16b, v29.16b\n"
+ "ldp x22, x21, [%x[outptrs], #0x20]\n"
+ "zip1 v15.4s, v15.4s, v17.4s\n"
+ "ldp x20, x19, [%x[outptrs], #0x30]\n"
+ "ext v18.16b, v18.16b, v18.16b, #0x2\n"
+ "ld1r { v14.4s }, [x9]\n"
+ "zip1 v20.4s, v20.4s, v23.4s\n"
+ "ld1r { v27.4s }, [x28]\n"
+ "zip1 v15.4s, v15.4s, v20.4s\n"
+ "ld1r { v23.4s }, [x27]\n"
+ "mov v17.16b, v29.16b\n"
+ "ldr q6, [%x[params], #0x0]\n"
+ "ext v17.16b, v17.16b, v17.16b, #0x4\n"
+ "ldr q8, [%x[params], #0x10]\n"
+ "mov v11.16b, v29.16b\n"
+ "ldr q9, [%x[params], #0x20]\n"
+ "ext v11.16b, v11.16b, v11.16b, #0x6\n"
+ "ldr q10, [%x[params], #0x30]\n"
"add %x[params], %x[params], #0x40\n"
- "zip1 v1.4s, v1.4s, v16.4s\n"
- "mov v16.16b, v3.16b\n"
- "zip1 v29.4s, v29.4s, v28.4s\n"
- "zip1 v2.4s, v2.4s, v31.4s\n"
- "zip1 v22.4s, v22.4s, v30.4s\n"
- "ext v24.16b, v24.16b, v24.16b, #0x2\n"
+ "zip1 v29.4s, v29.4s, v17.4s\n"
+ "mov v12.16b, v0.16b\n"
+ "ext v12.16b, v12.16b, v12.16b, #0x2\n"
+ "zip1 v18.4s, v18.4s, v11.4s\n"
+ "zip1 v29.4s, v29.4s, v18.4s\n"
+ "mov v17.16b, v0.16b\n"
"ext v17.16b, v17.16b, v17.16b, #0x4\n"
- "ext v16.16b, v16.16b, v16.16b, #0x6\n"
- "zip1 v4.4s, v4.4s, v21.4s\n"
- "zip1 v23.4s, v23.4s, v20.4s\n"
- "zip1 v0.4s, v0.4s, v19.4s\n"
- "zip1 v25.4s, v25.4s, v18.4s\n"
- "zip1 v1.4s, v1.4s, v29.4s\n"
- "zip1 v2.4s, v2.4s, v22.4s\n"
- ".inst 0x4f81e1fa // sdot v26.4s, v15.16b, v1.4b[0]\n"
- "zip1 v3.4s, v3.4s, v17.4s\n"
- "zip1 v24.4s, v24.4s, v16.4s\n"
- ".inst 0x4fa1e1fb // sdot v27.4s, v15.16b, v1.4b[1]\n"
- "zip1 v4.4s, v4.4s, v23.4s\n"
- "movi v23.4s, #0x0\n"
- ".inst 0x4f81e9f7 // sdot v23.4s, v15.16b, v1.4b[2]\n"
- "movi v22.4s, #0x0\n"
- "movi v21.4s, #0x0\n"
- ".inst 0x4fa1e9f6 // sdot v22.4s, v15.16b, v1.4b[3]\n"
- "movi v20.4s, #0x0\n"
- "movi v9.4s, #0x0\n"
- ".inst 0x4f82e1f5 // sdot v21.4s, v15.16b, v2.4b[0]\n"
- "movi v8.4s, #0x0\n"
+ "mov v11.16b, v0.16b\n"
+ "ext v11.16b, v11.16b, v11.16b, #0x6\n"
+ "mov v18.16b, v2.16b\n"
+ "zip1 v0.4s, v0.4s, v17.4s\n"
+ "ext v18.16b, v18.16b, v18.16b, #0x2\n"
+ "zip1 v12.4s, v12.4s, v11.4s\n"
+ "zip1 v0.4s, v0.4s, v12.4s\n"
+ "mov v17.16b, v2.16b\n"
+ "ext v17.16b, v17.16b, v17.16b, #0x4\n"
+ "mov v19.16b, v2.16b\n"
+ "ext v19.16b, v19.16b, v19.16b, #0x6\n"
+ "mov v28.16b, v7.16b\n"
+ "zip1 v2.4s, v2.4s, v17.4s\n"
+ "ext v28.16b, v28.16b, v28.16b, #0x2\n"
+ "zip1 v18.4s, v18.4s, v19.4s\n"
+ "zip1 v2.4s, v2.4s, v18.4s\n"
+ "mov v18.16b, v7.16b\n"
+ "ext v18.16b, v18.16b, v18.16b, #0x4\n"
+ "mov v21.16b, v7.16b\n"
+ "ext v21.16b, v21.16b, v21.16b, #0x6\n"
+ "movi v30.4s, #0x0\n"
+ "zip1 v7.4s, v7.4s, v18.4s\n"
+ "movi v3.4s, #0x0\n"
+ "zip1 v28.4s, v28.4s, v21.4s\n"
+ "zip1 v7.4s, v7.4s, v28.4s\n"
+ "movi v12.4s, #0x0\n"
+ "movi v11.4s, #0x0\n"
"movi v19.4s, #0x0\n"
- ".inst 0x4fa2e1f4 // sdot v20.4s, v15.16b, v2.4b[1]\n"
- "movi v18.4s, #0x0\n"
+ "movi v21.4s, #0x0\n"
"movi v17.4s, #0x0\n"
- ".inst 0x4f82e9e9 // sdot v9.4s, v15.16b, v2.4b[2]\n"
"movi v16.4s, #0x0\n"
- "zip1 v0.4s, v0.4s, v25.4s\n"
- ".inst 0x4fa2e9e8 // sdot v8.4s, v15.16b, v2.4b[3]\n"
- "zip1 v3.4s, v3.4s, v24.4s\n"
- ".inst 0x4f84e1f3 // sdot v19.4s, v15.16b, v4.4b[0]\n"
- ".inst 0x4fa4e1f2 // sdot v18.4s, v15.16b, v4.4b[1]\n"
- ".inst 0x4f84e9f1 // sdot v17.4s, v15.16b, v4.4b[2]\n"
- ".inst 0x4fa4e9f0 // sdot v16.4s, v15.16b, v4.4b[3]\n"
- "movi v31.4s, #0x0\n"
- "movi v30.4s, #0x0\n"
- "movi v29.4s, #0x0\n"
- ".inst 0x4f80e1ff // sdot v31.4s, v15.16b, v0.4b[0]\n"
"movi v28.4s, #0x0\n"
- ".inst 0x4fa0e1fe // sdot v30.4s, v15.16b, v0.4b[1]\n"
- ".inst 0x4f80e9fd // sdot v29.4s, v15.16b, v0.4b[2]\n"
- ".inst 0x4fa0e9fc // sdot v28.4s, v15.16b, v0.4b[3]\n"
- "add v24.4s, v26.4s, v21.4s\n"
- "add v25.4s, v27.4s, v20.4s\n"
- "add v26.4s, v23.4s, v9.4s\n"
- "add v27.4s, v22.4s, v8.4s\n"
- "add v23.4s, v19.4s, v21.4s\n"
- "movi v22.4s, #0x0\n"
- ".inst 0x4f83e1f6 // sdot v22.4s, v15.16b, v3.4b[0]\n"
- "add v21.4s, v18.4s, v20.4s\n"
- "movi v20.4s, #0x0\n"
- ".inst 0x4fa3e1f4 // sdot v20.4s, v15.16b, v3.4b[1]\n"
- "add v19.4s, v17.4s, v9.4s\n"
"movi v18.4s, #0x0\n"
- ".inst 0x4f83e9f2 // sdot v18.4s, v15.16b, v3.4b[2]\n"
- "add v17.4s, v16.4s, v8.4s\n"
- "movi v16.4s, #0x0\n"
- ".inst 0x4fa3e9f0 // sdot v16.4s, v15.16b, v3.4b[3]\n"
- "add v24.4s, v24.4s, v31.4s\n"
- "add v25.4s, v25.4s, v30.4s\n"
- "add v26.4s, v26.4s, v29.4s\n"
- "add v27.4s, v27.4s, v28.4s\n"
- "add v28.4s, v23.4s, v22.4s\n"
- "add v29.4s, v21.4s, v20.4s\n"
- "add v30.4s, v19.4s, v18.4s\n"
- "add v31.4s, v17.4s, v16.4s\n"
- "neg v13.4s, v13.4s\n"
- "mul v24.4s, v24.4s, v13.4s\n"
- "mul v25.4s, v25.4s, v13.4s\n"
- "mul v26.4s, v26.4s, v13.4s\n"
- "mul v27.4s, v27.4s, v13.4s\n"
- "mul v28.4s, v28.4s, v13.4s\n"
- "mul v29.4s, v29.4s, v13.4s\n"
- "mul v30.4s, v30.4s, v13.4s\n"
- "mul v31.4s, v31.4s, v13.4s\n"
- "zip1 v19.4s, v24.4s, v26.4s\n"
- "zip1 v18.4s, v25.4s, v27.4s\n"
- "zip1 v17.4s, v28.4s, v30.4s\n"
- "zip1 v16.4s, v29.4s, v31.4s\n"
- "zip1 v22.4s, v19.4s, v18.4s\n"
- "zip1 v23.4s, v17.4s, v16.4s\n"
- "add v24.4s, v24.4s, v14.4s\n"
- "add v25.4s, v25.4s, v14.4s\n"
- "add v26.4s, v26.4s, v14.4s\n"
- "add v27.4s, v27.4s, v14.4s\n"
- "add v28.4s, v28.4s, v14.4s\n"
- "add v29.4s, v29.4s, v14.4s\n"
- "add v30.4s, v30.4s, v14.4s\n"
- "add v31.4s, v31.4s, v14.4s\n"
+ "movi v20.4s, #0x0\n"
+ "movi v24.4s, #0x0\n"
+ "movi v31.4s, #0x0\n"
+ ".inst 0x4f8fe0ba // sdot v26.4s, v5.16b, v15.4b[0]\n"
+ ".inst 0x4fafe0a1 // sdot v1.4s, v5.16b, v15.4b[1]\n"
+ ".inst 0x4f8fe8b6 // sdot v22.4s, v5.16b, v15.4b[2]\n"
+ ".inst 0x4fafe8b9 // sdot v25.4s, v5.16b, v15.4b[3]\n"
+ ".inst 0x4f9de0ad // sdot v13.4s, v5.16b, v29.4b[0]\n"
+ ".inst 0x4fbde0be // sdot v30.4s, v5.16b, v29.4b[1]\n"
+ ".inst 0x4f9de8a3 // sdot v3.4s, v5.16b, v29.4b[2]\n"
+ ".inst 0x4fbde8ac // sdot v12.4s, v5.16b, v29.4b[3]\n"
+ ".inst 0x4f80e0ab // sdot v11.4s, v5.16b, v0.4b[0]\n"
+ ".inst 0x4fa0e0b3 // sdot v19.4s, v5.16b, v0.4b[1]\n"
+ ".inst 0x4f80e8b5 // sdot v21.4s, v5.16b, v0.4b[2]\n"
+ ".inst 0x4fa0e8b1 // sdot v17.4s, v5.16b, v0.4b[3]\n"
+ ".inst 0x4f82e0b0 // sdot v16.4s, v5.16b, v2.4b[0]\n"
+ ".inst 0x4fa2e0bc // sdot v28.4s, v5.16b, v2.4b[1]\n"
+ ".inst 0x4f82e8b2 // sdot v18.4s, v5.16b, v2.4b[2]\n"
+ ".inst 0x4fa2e8b4 // sdot v20.4s, v5.16b, v2.4b[3]\n"
+ ".inst 0x4f87e0b8 // sdot v24.4s, v5.16b, v7.4b[0]\n"
+ ".inst 0x4fa7e0bf // sdot v31.4s, v5.16b, v7.4b[1]\n"
+ "mov v26.16b, v26.16b\n"
+ "mov v1.16b, v1.16b\n"
+ "mov v22.16b, v22.16b\n"
+ "mov v25.16b, v25.16b\n"
+ "add v26.4s, v26.4s, v13.4s\n"
+ "movi v13.4s, #0x0\n"
+ ".inst 0x4f87e8ad // sdot v13.4s, v5.16b, v7.4b[2]\n"
+ "add v1.4s, v1.4s, v30.4s\n"
+ "movi v30.4s, #0x0\n"
+ ".inst 0x4fa7e8be // sdot v30.4s, v5.16b, v7.4b[3]\n"
+ "add v22.4s, v22.4s, v3.4s\n"
+ "add v25.4s, v25.4s, v12.4s\n"
+ "add v26.4s, v26.4s, v11.4s\n"
+ "add v1.4s, v1.4s, v19.4s\n"
+ "add v22.4s, v22.4s, v21.4s\n"
+ "add v25.4s, v25.4s, v17.4s\n"
+ "mov v11.16b, v11.16b\n"
+ "mov v3.16b, v19.16b\n"
+ "mov v19.16b, v21.16b\n"
+ "mov v21.16b, v17.16b\n"
+ "add v11.4s, v11.4s, v16.4s\n"
+ "add v3.4s, v3.4s, v28.4s\n"
+ "add v19.4s, v19.4s, v18.4s\n"
+ "add v21.4s, v21.4s, v20.4s\n"
+ "add v11.4s, v11.4s, v24.4s\n"
+ "add v3.4s, v3.4s, v31.4s\n"
+ "add v19.4s, v19.4s, v13.4s\n"
+ "add v21.4s, v21.4s, v30.4s\n"
+ "neg v4.4s, v4.4s\n"
+ "mul v26.4s, v26.4s, v4.4s\n"
+ "str q26, [SP, #0x0]\n"
+ "mul v1.4s, v1.4s, v4.4s\n"
+ "mul v22.4s, v22.4s, v4.4s\n"
+ "str q1, [SP, #0x10]\n"
+ "mul v25.4s, v25.4s, v4.4s\n"
+ "mul v11.4s, v11.4s, v4.4s\n"
+ "str q22, [SP, #0x20]\n"
+ "mul v3.4s, v3.4s, v4.4s\n"
+ "str q25, [SP, #0x30]\n"
+ "mul v19.4s, v19.4s, v4.4s\n"
+ "mul v21.4s, v21.4s, v4.4s\n"
+ "str q11, [SP, #0x40]\n"
+ "add v26.4s, v26.4s, v6.4s\n"
+ "str q3, [SP, #0x50]\n"
+ "add v1.4s, v1.4s, v6.4s\n"
+ "str q19, [SP, #0x60]\n"
+ "add v22.4s, v22.4s, v6.4s\n"
+ "add v25.4s, v25.4s, v6.4s\n"
+ "str q21, [SP, #0x70]\n"
+ "add v11.4s, v11.4s, v6.4s\n"
+ "add v3.4s, v3.4s, v6.4s\n"
+ "add v19.4s, v19.4s, v6.4s\n"
+ "add v21.4s, v21.4s, v6.4s\n"
"ble 2f\n"
"1:" // Loop
- "ldr q21, [%x[params], #0x0]\n"
- "ldr q20, [%x[params], #0x10]\n"
- ".inst 0x4f80e0b8 // sdot v24.4s, v5.16b, v0.4b[0]\n"
- ".inst 0x4fa0e0b9 // sdot v25.4s, v5.16b, v0.4b[1]\n"
- "ldr q14, [%x[params], #0x20]\n"
- ".inst 0x4f80e8ba // sdot v26.4s, v5.16b, v0.4b[2]\n"
- ".inst 0x4fa0e8bb // sdot v27.4s, v5.16b, v0.4b[3]\n"
+ ".inst 0x4f8fe11a // sdot v26.4s, v8.16b, v15.4b[0]\n"
+ "ldr q20, [%x[params], #0x0]\n"
+ "add x11, x11, #0x10\n"
+ ".inst 0x4fafe101 // sdot v1.4s, v8.16b, v15.4b[1]\n"
+ "ldr q4, [%x[params], #0x10]\n"
"sub %x[n_channels], %x[n_channels], #0x4\n"
- ".inst 0x4f81e0d8 // sdot v24.4s, v6.16b, v1.4b[0]\n"
- ".inst 0x4fa1e0d9 // sdot v25.4s, v6.16b, v1.4b[1]\n"
+ ".inst 0x4f8fe916 // sdot v22.4s, v8.16b, v15.4b[2]\n"
+ "ldr q6, [%x[params], #0x20]\n"
"cmp %x[n_channels], #0x4\n"
- "add x9, x9, #0x10\n"
- ".inst 0x4f81e8da // sdot v26.4s, v6.16b, v1.4b[2]\n"
- ".inst 0x4fa1e8db // sdot v27.4s, v6.16b, v1.4b[3]\n"
- ".inst 0x4f82e0bc // sdot v28.4s, v5.16b, v2.4b[0]\n"
- ".inst 0x4fa2e0bd // sdot v29.4s, v5.16b, v2.4b[1]\n"
- ".inst 0x4f82e8be // sdot v30.4s, v5.16b, v2.4b[2]\n"
- ".inst 0x4fa2e8bf // sdot v31.4s, v5.16b, v2.4b[3]\n"
- "ldr q5, [%x[params], #0x30]\n"
- ".inst 0x4f82e0f8 // sdot v24.4s, v7.16b, v2.4b[0]\n"
- ".inst 0x4fa2e0f9 // sdot v25.4s, v7.16b, v2.4b[1]\n"
- "sqrdmulh v24.4s, v24.4s, v21.4s\n"
- ".inst 0x4f82e8fa // sdot v26.4s, v7.16b, v2.4b[2]\n"
- ".inst 0x4fa2e8fb // sdot v27.4s, v7.16b, v2.4b[3]\n"
- "sqrdmulh v25.4s, v25.4s, v21.4s\n"
- ".inst 0x4f83e0dc // sdot v28.4s, v6.16b, v3.4b[0]\n"
- ".inst 0x4fa3e0dd // sdot v29.4s, v6.16b, v3.4b[1]\n"
- "sqrdmulh v26.4s, v26.4s, v21.4s\n"
- ".inst 0x4f83e8de // sdot v30.4s, v6.16b, v3.4b[2]\n"
- ".inst 0x4fa3e8df // sdot v31.4s, v6.16b, v3.4b[3]\n"
- "ldr q6, [%x[params], #0x40]\n"
- "sqrdmulh v27.4s, v27.4s, v21.4s\n"
- ".inst 0x4f84e0fc // sdot v28.4s, v7.16b, v4.4b[0]\n"
- ".inst 0x4fa4e0fd // sdot v29.4s, v7.16b, v4.4b[1]\n"
- "and v19.16b, v24.16b, v20.16b\n"
- ".inst 0x4f84e8fe // sdot v30.4s, v7.16b, v4.4b[2]\n"
- ".inst 0x4fa4e8ff // sdot v31.4s, v7.16b, v4.4b[3]\n"
- "ldr q7, [%x[params], #0x50]\n"
- "and v18.16b, v25.16b, v20.16b\n"
- "and v17.16b, v26.16b, v20.16b\n"
- "and v16.16b, v27.16b, v20.16b\n"
+ ".inst 0x4fafe919 // sdot v25.4s, v8.16b, v15.4b[3]\n"
+ ".inst 0x4f80e10b // sdot v11.4s, v8.16b, v0.4b[0]\n"
+ ".inst 0x4fa0e103 // sdot v3.4s, v8.16b, v0.4b[1]\n"
+ ".inst 0x4f80e913 // sdot v19.4s, v8.16b, v0.4b[2]\n"
+ ".inst 0x4fa0e915 // sdot v21.4s, v8.16b, v0.4b[3]\n"
+ "ldr q8, [%x[params], #0x30]\n"
+ ".inst 0x4f9de13a // sdot v26.4s, v9.16b, v29.4b[0]\n"
+ ".inst 0x4fbde121 // sdot v1.4s, v9.16b, v29.4b[1]\n"
+ ".inst 0x4f9de936 // sdot v22.4s, v9.16b, v29.4b[2]\n"
+ ".inst 0x4fbde939 // sdot v25.4s, v9.16b, v29.4b[3]\n"
+ ".inst 0x4f82e12b // sdot v11.4s, v9.16b, v2.4b[0]\n"
+ ".inst 0x4fa2e123 // sdot v3.4s, v9.16b, v2.4b[1]\n"
+ ".inst 0x4f82e933 // sdot v19.4s, v9.16b, v2.4b[2]\n"
+ ".inst 0x4fa2e935 // sdot v21.4s, v9.16b, v2.4b[3]\n"
+ "ldr q9, [%x[params], #0x40]\n"
+ ".inst 0x4f80e15a // sdot v26.4s, v10.16b, v0.4b[0]\n"
+ ".inst 0x4fa0e141 // sdot v1.4s, v10.16b, v0.4b[1]\n"
+ ".inst 0x4f80e956 // sdot v22.4s, v10.16b, v0.4b[2]\n"
+ ".inst 0x4fa0e959 // sdot v25.4s, v10.16b, v0.4b[3]\n"
+ ".inst 0x4f87e14b // sdot v11.4s, v10.16b, v7.4b[0]\n"
+ ".inst 0x4fa7e143 // sdot v3.4s, v10.16b, v7.4b[1]\n"
+ ".inst 0x4f87e953 // sdot v19.4s, v10.16b, v7.4b[2]\n"
+ ".inst 0x4fa7e955 // sdot v21.4s, v10.16b, v7.4b[3]\n"
+ "ldr q10, [%x[params], #0x50]\n"
"add %x[params], %x[params], #0x60\n"
- "sshr v19.4s, v19.4s, #0x1f\n"
- "sshr v18.4s, v18.4s, #0x1f\n"
+ "sqrdmulh v26.4s, v26.4s, v20.4s\n"
+ "sqrdmulh v1.4s, v1.4s, v20.4s\n"
+ "sqrdmulh v22.4s, v22.4s, v20.4s\n"
+ "sqrdmulh v25.4s, v25.4s, v20.4s\n"
+ "sqrdmulh v11.4s, v11.4s, v20.4s\n"
+ "and v30.16b, v26.16b, v4.16b\n"
+ "and v17.16b, v1.16b, v4.16b\n"
+ "and v16.16b, v22.16b, v4.16b\n"
+ "sshr v30.4s, v30.4s, #0x1f\n"
"sshr v17.4s, v17.4s, #0x1f\n"
"sshr v16.4s, v16.4s, #0x1f\n"
- "sqrdmulh v28.4s, v28.4s, v21.4s\n"
- "sqrdmulh v29.4s, v29.4s, v21.4s\n"
- "sqrdmulh v30.4s, v30.4s, v21.4s\n"
- "sqrdmulh v31.4s, v31.4s, v21.4s\n"
- "sqadd v24.4s, v24.4s, v19.4s\n"
- "sqadd v25.4s, v25.4s, v18.4s\n"
- "sqadd v26.4s, v26.4s, v17.4s\n"
- "sqadd v27.4s, v27.4s, v16.4s\n"
- "and v19.16b, v28.16b, v20.16b\n"
- "and v18.16b, v29.16b, v20.16b\n"
- "and v17.16b, v30.16b, v20.16b\n"
- "and v16.16b, v31.16b, v20.16b\n"
- "sshr v19.4s, v19.4s, #0x1f\n"
- "sshr v18.4s, v18.4s, #0x1f\n"
- "sshr v17.4s, v17.4s, #0x1f\n"
+ "sqadd v26.4s, v26.4s, v30.4s\n"
+ "sqadd v1.4s, v1.4s, v17.4s\n"
+ "sqadd v22.4s, v22.4s, v16.4s\n"
+ "and v16.16b, v25.16b, v4.16b\n"
+ "srshl v26.4s, v26.4s, v4.4s\n"
+ "srshl v1.4s, v1.4s, v4.4s\n"
+ "srshl v22.4s, v22.4s, v4.4s\n"
"sshr v16.4s, v16.4s, #0x1f\n"
- "sqadd v28.4s, v28.4s, v19.4s\n"
- "sqadd v29.4s, v29.4s, v18.4s\n"
- "sqadd v30.4s, v30.4s, v17.4s\n"
- "sqadd v31.4s, v31.4s, v16.4s\n"
- "srshl v24.4s, v24.4s, v20.4s\n"
- "srshl v25.4s, v25.4s, v20.4s\n"
- "srshl v26.4s, v26.4s, v20.4s\n"
- "srshl v27.4s, v27.4s, v20.4s\n"
- "srshl v28.4s, v28.4s, v20.4s\n"
- "srshl v29.4s, v29.4s, v20.4s\n"
- "srshl v30.4s, v30.4s, v20.4s\n"
- "srshl v31.4s, v31.4s, v20.4s\n"
- "add v24.4s, v24.4s, v12.4s\n"
- "add v25.4s, v25.4s, v12.4s\n"
- "add v26.4s, v26.4s, v12.4s\n"
- "add v27.4s, v27.4s, v12.4s\n"
- "add v28.4s, v28.4s, v12.4s\n"
- "add v29.4s, v29.4s, v12.4s\n"
- "add v30.4s, v30.4s, v12.4s\n"
- "add v31.4s, v31.4s, v12.4s\n"
- "smin v24.4s, v24.4s, v10.4s\n"
- "smin v25.4s, v25.4s, v10.4s\n"
- "smin v26.4s, v26.4s, v10.4s\n"
- "smin v27.4s, v27.4s, v10.4s\n"
- "smin v28.4s, v28.4s, v10.4s\n"
- "smin v29.4s, v29.4s, v10.4s\n"
- "smin v30.4s, v30.4s, v10.4s\n"
- "smin v31.4s, v31.4s, v10.4s\n"
- "smax v24.4s, v24.4s, v11.4s\n"
- "smax v25.4s, v25.4s, v11.4s\n"
- "smax v26.4s, v26.4s, v11.4s\n"
- "smax v27.4s, v27.4s, v11.4s\n"
- "smax v28.4s, v28.4s, v11.4s\n"
- "smax v29.4s, v29.4s, v11.4s\n"
- "smax v30.4s, v30.4s, v11.4s\n"
- "smax v31.4s, v31.4s, v11.4s\n"
- "uzp1 v24.16b, v24.16b, v24.16b\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
+ "add v26.4s, v26.4s, v14.4s\n"
+ "add v1.4s, v1.4s, v14.4s\n"
+ "add v22.4s, v22.4s, v14.4s\n"
+ "smin v26.4s, v26.4s, v23.4s\n"
+ "smin v1.4s, v1.4s, v23.4s\n"
+ "smin v22.4s, v22.4s, v23.4s\n"
+ "smax v26.4s, v26.4s, v27.4s\n"
+ "smax v1.4s, v1.4s, v27.4s\n"
+ "smax v22.4s, v22.4s, v27.4s\n"
"uzp1 v26.16b, v26.16b, v26.16b\n"
- "uzp1 v27.16b, v27.16b, v27.16b\n"
- "uzp1 v28.16b, v28.16b, v28.16b\n"
- "uzp1 v29.16b, v29.16b, v29.16b\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
- "uzp1 v31.16b, v31.16b, v31.16b\n"
- "uzp1 v24.16b, v24.16b, v24.16b\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
- "str s24, [x27, x28]\n"
+ "uzp1 v1.16b, v1.16b, v1.16b\n"
"uzp1 v26.16b, v26.16b, v26.16b\n"
- "uzp1 v27.16b, v27.16b, v27.16b\n"
- "str s25, [x26, x28]\n"
- "uzp1 v28.16b, v28.16b, v28.16b\n"
- "uzp1 v29.16b, v29.16b, v29.16b\n"
- "str s26, [x25, x28]\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
- "uzp1 v31.16b, v31.16b, v31.16b\n"
- "str s27, [x24, x28]\n"
- "str s28, [x23, x28]\n"
- "dup v24.4s, v22.s[0]\n"
- "dup v25.4s, v22.s[1]\n"
- "str s29, [x22, x28]\n"
- "dup v26.4s, v22.s[2]\n"
- "dup v27.4s, v22.s[3]\n"
- "str s30, [x21, x28]\n"
- "dup v28.4s, v23.s[0]\n"
- "dup v29.4s, v23.s[1]\n"
- "str s31, [x20, x28]\n"
- "dup v30.4s, v23.s[2]\n"
- "dup v31.4s, v23.s[3]\n"
- "add x28, x28, #0x4\n"
- "add v24.4s, v24.4s, v14.4s\n"
+ "str s26, [x26, x10]\n"
+ "uzp1 v1.16b, v1.16b, v1.16b\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "ldr q26, [SP, #0x0]\n"
+ "sqadd v25.4s, v25.4s, v16.4s\n"
+ "str s1, [x25, x10]\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "ldr q1, [SP, #0x10]\n"
+ "and v16.16b, v11.16b, v4.16b\n"
+ "str s22, [x24, x10]\n"
+ "sqrdmulh v3.4s, v3.4s, v20.4s\n"
+ "ldr q22, [SP, #0x20]\n"
+ "srshl v25.4s, v25.4s, v4.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sqrdmulh v19.4s, v19.4s, v20.4s\n"
+ "and v17.16b, v3.16b, v4.16b\n"
"add v25.4s, v25.4s, v14.4s\n"
- "add v26.4s, v26.4s, v14.4s\n"
- "add v27.4s, v27.4s, v14.4s\n"
- "add v28.4s, v28.4s, v14.4s\n"
- "add v29.4s, v29.4s, v14.4s\n"
- "add v30.4s, v30.4s, v14.4s\n"
- "add v31.4s, v31.4s, v14.4s\n"
+ "sqadd v11.4s, v11.4s, v16.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "smin v25.4s, v25.4s, v23.4s\n"
+ "and v16.16b, v19.16b, v4.16b\n"
+ "srshl v11.4s, v11.4s, v4.4s\n"
+ "smax v25.4s, v25.4s, v27.4s\n"
+ "sqadd v3.4s, v3.4s, v17.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "uzp1 v25.16b, v25.16b, v25.16b\n"
+ "add v11.4s, v11.4s, v14.4s\n"
+ "uzp1 v25.16b, v25.16b, v25.16b\n"
+ "str s25, [x23, x10]\n"
+ "smin v11.4s, v11.4s, v23.4s\n"
+ "srshl v3.4s, v3.4s, v4.4s\n"
+ "ldr q25, [SP, #0x30]\n"
+ "sqadd v19.4s, v19.4s, v16.4s\n"
+ "sqrdmulh v21.4s, v21.4s, v20.4s\n"
+ "smax v11.4s, v11.4s, v27.4s\n"
+ "add v3.4s, v3.4s, v14.4s\n"
+ "srshl v19.4s, v19.4s, v4.4s\n"
+ "uzp1 v11.16b, v11.16b, v11.16b\n"
+ "smin v3.4s, v3.4s, v23.4s\n"
+ "uzp1 v11.16b, v11.16b, v11.16b\n"
+ "str s11, [x22, x10]\n"
+ "smax v3.4s, v3.4s, v27.4s\n"
+ "add v19.4s, v19.4s, v14.4s\n"
+ "ldr q11, [SP, #0x40]\n"
+ "and v16.16b, v21.16b, v4.16b\n"
+ "add v26.4s, v26.4s, v6.4s\n"
+ "uzp1 v3.16b, v3.16b, v3.16b\n"
+ "smin v19.4s, v19.4s, v23.4s\n"
+ "uzp1 v3.16b, v3.16b, v3.16b\n"
+ "str s3, [x21, x10]\n"
+ "smax v19.4s, v19.4s, v27.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "ldr q3, [SP, #0x50]\n"
+ "add v1.4s, v1.4s, v6.4s\n"
+ "add v22.4s, v22.4s, v6.4s\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ "sqadd v21.4s, v21.4s, v16.4s\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ "str s19, [x20, x10]\n"
+ "add v25.4s, v25.4s, v6.4s\n"
+ "add v11.4s, v11.4s, v6.4s\n"
+ "ldr q19, [SP, #0x60]\n"
+ "srshl v21.4s, v21.4s, v4.4s\n"
+ "add v3.4s, v3.4s, v6.4s\n"
+ "add v21.4s, v21.4s, v14.4s\n"
+ "add v19.4s, v19.4s, v6.4s\n"
+ "smin v21.4s, v21.4s, v23.4s\n"
+ "smax v21.4s, v21.4s, v27.4s\n"
+ "uzp1 v21.16b, v21.16b, v21.16b\n"
+ "uzp1 v21.16b, v21.16b, v21.16b\n"
+ "str s21, [x19, x10]\n"
+ "add x10, x10, #0x4\n"
+ "ldr q21, [SP, #0x70]\n"
+ "add v21.4s, v21.4s, v6.4s\n"
"bgt 1b\n"
"2:" // Tail
- "ldr q21, [%x[params], #0x0]\n"
- "ldr q20, [%x[params], #0x10]\n"
- ".inst 0x4f80e0b8 // sdot v24.4s, v5.16b, v0.4b[0]\n"
- ".inst 0x4fa0e0b9 // sdot v25.4s, v5.16b, v0.4b[1]\n"
- ".inst 0x4f80e8ba // sdot v26.4s, v5.16b, v0.4b[2]\n"
- ".inst 0x4fa0e8bb // sdot v27.4s, v5.16b, v0.4b[3]\n"
+ ".inst 0x4f8fe11a // sdot v26.4s, v8.16b, v15.4b[0]\n"
+ "ldr q20, [%x[params], #0x0]\n"
+ "add x26, x26, x10\n"
+ ".inst 0x4fafe101 // sdot v1.4s, v8.16b, v15.4b[1]\n"
+ "ldr q4, [%x[params], #0x10]\n"
+ "add x25, x25, x10\n"
+ ".inst 0x4f8fe916 // sdot v22.4s, v8.16b, v15.4b[2]\n"
+ "add x24, x24, x10\n"
+ ".inst 0x4fafe919 // sdot v25.4s, v8.16b, v15.4b[3]\n"
+ "add x23, x23, x10\n"
+ ".inst 0x4f80e10b // sdot v11.4s, v8.16b, v0.4b[0]\n"
+ "add x22, x22, x10\n"
+ ".inst 0x4fa0e103 // sdot v3.4s, v8.16b, v0.4b[1]\n"
+ "add x21, x21, x10\n"
+ ".inst 0x4f80e913 // sdot v19.4s, v8.16b, v0.4b[2]\n"
+ "add x20, x20, x10\n"
+ ".inst 0x4fa0e915 // sdot v21.4s, v8.16b, v0.4b[3]\n"
+ "add x19, x19, x10\n"
+ ".inst 0x4f9de13a // sdot v26.4s, v9.16b, v29.4b[0]\n"
"cmp %x[n_channels], #0x4\n"
- "add x27, x27, x28\n"
- ".inst 0x4f81e0d8 // sdot v24.4s, v6.16b, v1.4b[0]\n"
- ".inst 0x4fa1e0d9 // sdot v25.4s, v6.16b, v1.4b[1]\n"
- "add x26, x26, x28\n"
- "add x25, x25, x28\n"
- ".inst 0x4f81e8da // sdot v26.4s, v6.16b, v1.4b[2]\n"
- ".inst 0x4fa1e8db // sdot v27.4s, v6.16b, v1.4b[3]\n"
- "add x24, x24, x28\n"
- "add x23, x23, x28\n"
- ".inst 0x4f82e0bc // sdot v28.4s, v5.16b, v2.4b[0]\n"
- ".inst 0x4fa2e0bd // sdot v29.4s, v5.16b, v2.4b[1]\n"
- "add x22, x22, x28\n"
- "add x21, x21, x28\n"
- ".inst 0x4f82e8be // sdot v30.4s, v5.16b, v2.4b[2]\n"
- ".inst 0x4fa2e8bf // sdot v31.4s, v5.16b, v2.4b[3]\n"
- "add x20, x20, x28\n"
+ ".inst 0x4fbde121 // sdot v1.4s, v9.16b, v29.4b[1]\n"
"add %x[params], %x[params], #0x20\n"
- ".inst 0x4f82e0f8 // sdot v24.4s, v7.16b, v2.4b[0]\n"
- ".inst 0x4fa2e0f9 // sdot v25.4s, v7.16b, v2.4b[1]\n"
- "sqrdmulh v24.4s, v24.4s, v21.4s\n"
- ".inst 0x4f82e8fa // sdot v26.4s, v7.16b, v2.4b[2]\n"
- ".inst 0x4fa2e8fb // sdot v27.4s, v7.16b, v2.4b[3]\n"
- "sqrdmulh v25.4s, v25.4s, v21.4s\n"
- ".inst 0x4f83e0dc // sdot v28.4s, v6.16b, v3.4b[0]\n"
- ".inst 0x4fa3e0dd // sdot v29.4s, v6.16b, v3.4b[1]\n"
- "sqrdmulh v26.4s, v26.4s, v21.4s\n"
- ".inst 0x4f83e8de // sdot v30.4s, v6.16b, v3.4b[2]\n"
- ".inst 0x4fa3e8df // sdot v31.4s, v6.16b, v3.4b[3]\n"
- "sqrdmulh v27.4s, v27.4s, v21.4s\n"
- ".inst 0x4f84e0fc // sdot v28.4s, v7.16b, v4.4b[0]\n"
- ".inst 0x4fa4e0fd // sdot v29.4s, v7.16b, v4.4b[1]\n"
- "and v19.16b, v24.16b, v20.16b\n"
- ".inst 0x4f84e8fe // sdot v30.4s, v7.16b, v4.4b[2]\n"
- ".inst 0x4fa4e8ff // sdot v31.4s, v7.16b, v4.4b[3]\n"
- "and v18.16b, v25.16b, v20.16b\n"
- "and v17.16b, v26.16b, v20.16b\n"
- "and v16.16b, v27.16b, v20.16b\n"
- "sshr v19.4s, v19.4s, #0x1f\n"
- "sshr v18.4s, v18.4s, #0x1f\n"
+ ".inst 0x4f9de936 // sdot v22.4s, v9.16b, v29.4b[2]\n"
+ ".inst 0x4fbde939 // sdot v25.4s, v9.16b, v29.4b[3]\n"
+ ".inst 0x4f82e12b // sdot v11.4s, v9.16b, v2.4b[0]\n"
+ ".inst 0x4fa2e123 // sdot v3.4s, v9.16b, v2.4b[1]\n"
+ ".inst 0x4f82e933 // sdot v19.4s, v9.16b, v2.4b[2]\n"
+ ".inst 0x4fa2e935 // sdot v21.4s, v9.16b, v2.4b[3]\n"
+ ".inst 0x4f80e15a // sdot v26.4s, v10.16b, v0.4b[0]\n"
+ ".inst 0x4fa0e141 // sdot v1.4s, v10.16b, v0.4b[1]\n"
+ ".inst 0x4f80e956 // sdot v22.4s, v10.16b, v0.4b[2]\n"
+ ".inst 0x4fa0e959 // sdot v25.4s, v10.16b, v0.4b[3]\n"
+ ".inst 0x4f87e14b // sdot v11.4s, v10.16b, v7.4b[0]\n"
+ ".inst 0x4fa7e143 // sdot v3.4s, v10.16b, v7.4b[1]\n"
+ ".inst 0x4f87e953 // sdot v19.4s, v10.16b, v7.4b[2]\n"
+ ".inst 0x4fa7e955 // sdot v21.4s, v10.16b, v7.4b[3]\n"
+ "sqrdmulh v26.4s, v26.4s, v20.4s\n"
+ "sqrdmulh v1.4s, v1.4s, v20.4s\n"
+ "sqrdmulh v22.4s, v22.4s, v20.4s\n"
+ "sqrdmulh v25.4s, v25.4s, v20.4s\n"
+ "and v30.16b, v26.16b, v4.16b\n"
+ "and v17.16b, v1.16b, v4.16b\n"
+ "and v16.16b, v22.16b, v4.16b\n"
+ "sshr v30.4s, v30.4s, #0x1f\n"
"sshr v17.4s, v17.4s, #0x1f\n"
"sshr v16.4s, v16.4s, #0x1f\n"
- "sqrdmulh v28.4s, v28.4s, v21.4s\n"
- "sqrdmulh v29.4s, v29.4s, v21.4s\n"
- "sqrdmulh v30.4s, v30.4s, v21.4s\n"
- "sqrdmulh v31.4s, v31.4s, v21.4s\n"
- "sqadd v24.4s, v24.4s, v19.4s\n"
- "sqadd v25.4s, v25.4s, v18.4s\n"
- "sqadd v26.4s, v26.4s, v17.4s\n"
- "sqadd v27.4s, v27.4s, v16.4s\n"
- "and v19.16b, v28.16b, v20.16b\n"
- "and v18.16b, v29.16b, v20.16b\n"
- "and v17.16b, v30.16b, v20.16b\n"
- "and v16.16b, v31.16b, v20.16b\n"
- "sshr v19.4s, v19.4s, #0x1f\n"
- "sshr v18.4s, v18.4s, #0x1f\n"
- "sshr v17.4s, v17.4s, #0x1f\n"
+ "sqadd v26.4s, v26.4s, v30.4s\n"
+ "sqadd v1.4s, v1.4s, v17.4s\n"
+ "sqadd v22.4s, v22.4s, v16.4s\n"
+ "and v16.16b, v25.16b, v4.16b\n"
+ "srshl v26.4s, v26.4s, v4.4s\n"
+ "srshl v1.4s, v1.4s, v4.4s\n"
+ "srshl v22.4s, v22.4s, v4.4s\n"
"sshr v16.4s, v16.4s, #0x1f\n"
- "sqadd v28.4s, v28.4s, v19.4s\n"
- "sqadd v29.4s, v29.4s, v18.4s\n"
- "sqadd v30.4s, v30.4s, v17.4s\n"
- "sqadd v31.4s, v31.4s, v16.4s\n"
- "srshl v24.4s, v24.4s, v20.4s\n"
- "srshl v25.4s, v25.4s, v20.4s\n"
- "srshl v26.4s, v26.4s, v20.4s\n"
- "srshl v27.4s, v27.4s, v20.4s\n"
- "srshl v28.4s, v28.4s, v20.4s\n"
- "srshl v29.4s, v29.4s, v20.4s\n"
- "srshl v30.4s, v30.4s, v20.4s\n"
- "srshl v31.4s, v31.4s, v20.4s\n"
- "add v24.4s, v24.4s, v12.4s\n"
- "add v25.4s, v25.4s, v12.4s\n"
- "add v26.4s, v26.4s, v12.4s\n"
- "add v27.4s, v27.4s, v12.4s\n"
- "add v28.4s, v28.4s, v12.4s\n"
- "add v29.4s, v29.4s, v12.4s\n"
- "add v30.4s, v30.4s, v12.4s\n"
- "add v31.4s, v31.4s, v12.4s\n"
- "smin v24.4s, v24.4s, v10.4s\n"
- "smin v25.4s, v25.4s, v10.4s\n"
- "smin v26.4s, v26.4s, v10.4s\n"
- "smin v27.4s, v27.4s, v10.4s\n"
- "smin v28.4s, v28.4s, v10.4s\n"
- "smin v29.4s, v29.4s, v10.4s\n"
- "smin v30.4s, v30.4s, v10.4s\n"
- "smin v31.4s, v31.4s, v10.4s\n"
- "smax v24.4s, v24.4s, v11.4s\n"
- "smax v25.4s, v25.4s, v11.4s\n"
- "smax v26.4s, v26.4s, v11.4s\n"
- "smax v27.4s, v27.4s, v11.4s\n"
- "smax v28.4s, v28.4s, v11.4s\n"
- "smax v29.4s, v29.4s, v11.4s\n"
- "smax v30.4s, v30.4s, v11.4s\n"
- "smax v31.4s, v31.4s, v11.4s\n"
- "uzp1 v24.16b, v24.16b, v24.16b\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
+ "add v26.4s, v26.4s, v14.4s\n"
+ "add v1.4s, v1.4s, v14.4s\n"
+ "add v22.4s, v22.4s, v14.4s\n"
+ "smin v26.4s, v26.4s, v23.4s\n"
+ "smin v1.4s, v1.4s, v23.4s\n"
+ "smin v22.4s, v22.4s, v23.4s\n"
+ "smax v26.4s, v26.4s, v27.4s\n"
+ "smax v1.4s, v1.4s, v27.4s\n"
+ "smax v22.4s, v22.4s, v27.4s\n"
"uzp1 v26.16b, v26.16b, v26.16b\n"
- "uzp1 v27.16b, v27.16b, v27.16b\n"
- "uzp1 v28.16b, v28.16b, v28.16b\n"
- "uzp1 v29.16b, v29.16b, v29.16b\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
- "uzp1 v31.16b, v31.16b, v31.16b\n"
- "uzp1 v24.16b, v24.16b, v24.16b\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
+ "uzp1 v1.16b, v1.16b, v1.16b\n"
"uzp1 v26.16b, v26.16b, v26.16b\n"
- "uzp1 v27.16b, v27.16b, v27.16b\n"
- "uzp1 v28.16b, v28.16b, v28.16b\n"
- "uzp1 v29.16b, v29.16b, v29.16b\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
- "uzp1 v31.16b, v31.16b, v31.16b\n"
+ "uzp1 v1.16b, v1.16b, v1.16b\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "sqadd v25.4s, v25.4s, v16.4s\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "sqrdmulh v11.4s, v11.4s, v20.4s\n"
+ "sqrdmulh v3.4s, v3.4s, v20.4s\n"
+ "srshl v25.4s, v25.4s, v4.4s\n"
+ "sqrdmulh v19.4s, v19.4s, v20.4s\n"
+ "and v16.16b, v11.16b, v4.16b\n"
+ "and v17.16b, v3.16b, v4.16b\n"
+ "add v25.4s, v25.4s, v14.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "smin v25.4s, v25.4s, v23.4s\n"
+ "sqadd v11.4s, v11.4s, v16.4s\n"
+ "sqadd v3.4s, v3.4s, v17.4s\n"
+ "smax v25.4s, v25.4s, v27.4s\n"
+ "and v16.16b, v19.16b, v4.16b\n"
+ "srshl v11.4s, v11.4s, v4.4s\n"
+ "uzp1 v25.16b, v25.16b, v25.16b\n"
+ "srshl v3.4s, v3.4s, v4.4s\n"
+ "uzp1 v25.16b, v25.16b, v25.16b\n"
+ "add v11.4s, v11.4s, v14.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "add v3.4s, v3.4s, v14.4s\n"
+ "smin v11.4s, v11.4s, v23.4s\n"
+ "sqadd v19.4s, v19.4s, v16.4s\n"
+ "smin v3.4s, v3.4s, v23.4s\n"
+ "smax v11.4s, v11.4s, v27.4s\n"
+ "sqrdmulh v21.4s, v21.4s, v20.4s\n"
+ "smax v3.4s, v3.4s, v27.4s\n"
+ "uzp1 v11.16b, v11.16b, v11.16b\n"
+ "srshl v19.4s, v19.4s, v4.4s\n"
+ "uzp1 v11.16b, v11.16b, v11.16b\n"
+ "uzp1 v3.16b, v3.16b, v3.16b\n"
+ "and v16.16b, v21.16b, v4.16b\n"
+ "uzp1 v3.16b, v3.16b, v3.16b\n"
+ "add v19.4s, v19.4s, v14.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "smin v19.4s, v19.4s, v23.4s\n"
+ "sqadd v21.4s, v21.4s, v16.4s\n"
+ "smax v19.4s, v19.4s, v27.4s\n"
+ "srshl v21.4s, v21.4s, v4.4s\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ "add v21.4s, v21.4s, v14.4s\n"
+ "smin v21.4s, v21.4s, v23.4s\n"
+ "smax v21.4s, v21.4s, v27.4s\n"
+ "uzp1 v21.16b, v21.16b, v21.16b\n"
+ "uzp1 v21.16b, v21.16b, v21.16b\n"
"blt 3f\n"
- "str s24, [x27, #0x0]\n"
- "str s25, [x26, #0x0]\n"
- "str s26, [x25, #0x0]\n"
- "str s27, [x24, #0x0]\n"
- "str s28, [x23, #0x0]\n"
- "str s29, [x22, #0x0]\n"
- "str s30, [x21, #0x0]\n"
- "str s31, [x20, #0x0]\n"
+ "str s26, [x26, #0x0]\n"
+ "str s1, [x25, #0x0]\n"
+ "str s22, [x24, #0x0]\n"
+ "str s25, [x23, #0x0]\n"
+ "str s11, [x22, #0x0]\n"
+ "str s3, [x21, #0x0]\n"
+ "str s19, [x20, #0x0]\n"
+ "str s21, [x19, #0x0]\n"
"b 4f\n"
"3:" // Tail: Oddments
+ "st1 { v26.b }[0], [x26], #0x1\n"
"subs %x[n_channels], %x[n_channels], #0x1\n"
- "st1 { v24.b }[0], [x27], #0x1\n"
- "st1 { v25.b }[0], [x26], #0x1\n"
- "st1 { v26.b }[0], [x25], #0x1\n"
- "st1 { v27.b }[0], [x24], #0x1\n"
- "st1 { v28.b }[0], [x23], #0x1\n"
- "st1 { v29.b }[0], [x22], #0x1\n"
- "st1 { v30.b }[0], [x21], #0x1\n"
- "st1 { v31.b }[0], [x20], #0x1\n"
+ "st1 { v1.b }[0], [x25], #0x1\n"
+ "st1 { v22.b }[0], [x24], #0x1\n"
+ "st1 { v25.b }[0], [x23], #0x1\n"
+ "st1 { v11.b }[0], [x22], #0x1\n"
+ "st1 { v3.b }[0], [x21], #0x1\n"
+ "st1 { v19.b }[0], [x20], #0x1\n"
+ "st1 { v21.b }[0], [x19], #0x1\n"
"beq 4f\n"
+ "st1 { v26.b }[1], [x26], #0x1\n"
"subs %x[n_channels], %x[n_channels], #0x1\n"
- "st1 { v24.b }[1], [x27], #0x1\n"
- "st1 { v25.b }[1], [x26], #0x1\n"
- "st1 { v26.b }[1], [x25], #0x1\n"
- "st1 { v27.b }[1], [x24], #0x1\n"
- "st1 { v28.b }[1], [x23], #0x1\n"
- "st1 { v29.b }[1], [x22], #0x1\n"
- "st1 { v30.b }[1], [x21], #0x1\n"
- "st1 { v31.b }[1], [x20], #0x1\n"
+ "st1 { v1.b }[1], [x25], #0x1\n"
+ "st1 { v22.b }[1], [x24], #0x1\n"
+ "st1 { v25.b }[1], [x23], #0x1\n"
+ "st1 { v11.b }[1], [x22], #0x1\n"
+ "st1 { v3.b }[1], [x21], #0x1\n"
+ "st1 { v19.b }[1], [x20], #0x1\n"
+ "st1 { v21.b }[1], [x19], #0x1\n"
"beq 4f\n"
+ "st1 { v26.b }[2], [x26], #0x1\n"
"subs %x[n_channels], %x[n_channels], #0x1\n"
- "st1 { v24.b }[2], [x27], #0x1\n"
- "st1 { v25.b }[2], [x26], #0x1\n"
- "st1 { v26.b }[2], [x25], #0x1\n"
- "st1 { v27.b }[2], [x24], #0x1\n"
- "st1 { v28.b }[2], [x23], #0x1\n"
- "st1 { v29.b }[2], [x22], #0x1\n"
- "st1 { v30.b }[2], [x21], #0x1\n"
- "st1 { v31.b }[2], [x20], #0x1\n"
+ "st1 { v1.b }[2], [x25], #0x1\n"
+ "st1 { v22.b }[2], [x24], #0x1\n"
+ "st1 { v25.b }[2], [x23], #0x1\n"
+ "st1 { v11.b }[2], [x22], #0x1\n"
+ "st1 { v3.b }[2], [x21], #0x1\n"
+ "st1 { v19.b }[2], [x20], #0x1\n"
+ "st1 { v21.b }[2], [x19], #0x1\n"
"beq 4f\n"
- "st1 { v24.b }[3], [x27], #0x1\n"
+ "st1 { v26.b }[3], [x26], #0x1\n"
"subs %x[n_channels], %x[n_channels], #0x1\n"
- "st1 { v25.b }[3], [x26], #0x1\n"
- "st1 { v26.b }[3], [x25], #0x1\n"
- "st1 { v27.b }[3], [x24], #0x1\n"
- "st1 { v28.b }[3], [x23], #0x1\n"
- "st1 { v29.b }[3], [x22], #0x1\n"
- "st1 { v30.b }[3], [x21], #0x1\n"
- "st1 { v31.b }[3], [x20], #0x1\n"
+ "st1 { v1.b }[3], [x25], #0x1\n"
+ "st1 { v22.b }[3], [x24], #0x1\n"
+ "st1 { v25.b }[3], [x23], #0x1\n"
+ "st1 { v11.b }[3], [x22], #0x1\n"
+ "st1 { v3.b }[3], [x21], #0x1\n"
+ "st1 { v19.b }[3], [x20], #0x1\n"
+ "st1 { v21.b }[3], [x19], #0x1\n"
"4:" // Tail: End
+ "add SP, SP, #0x80\n"
: [n_channels] "+&r" (n_output_channels), [params] "+&r" (params)
: [inptrs] "r" (inptrs), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [outptrs] "r" (outptrs), [qp] "r" (&qp)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst/generic.cpp
index 9fa38c6efe..b198eff6ac 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -40,596 +40,622 @@ void a64_s8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst_impl
)
{
__asm__ __volatile__(
- "ldr q12, [%x[params], #0x0]\n"
- "ldr q8, [%x[params], #0x10]\n"
- "movi v28.16b, #0x1\n"
- "movi v18.4s, #0x0\n"
- "ldr q9, [%x[params], #0x20]\n"
- "ldr q10, [%x[params], #0x30]\n"
- "movi v31.4s, #0x0\n"
+ "movi v15.16b, #0x1\n"
+ "ldr x21, [%x[inptrs], #0x0]\n"
+ "add SP, SP, #-0x80\n"
+ "movi v14.4s, #0x1\n"
+ "ldr x20, [%x[inptrs], #0x8]\n"
+ "add x22, %x[qp], %[offsetof_Requantize32_b_offset]\n"
+ "movi v28.4s, #0x0\n"
+ "ldr x19, [%x[inptrs], #0x10]\n"
+ "mov x11, #0x0\n"
+ "movi v27.4s, #0x0\n"
+ "ld1 { v13.16b }, [x21]\n"
+ "mov x10, #0x0\n"
+ "movi v26.4s, #0x0\n"
+ "ld1 { v12.16b }, [x20]\n"
+ "add x9, %x[qp], %[offsetof_Requantize32_c_offset]\n"
+ "movi v25.4s, #0x0\n"
+ "ld1 { v7.16b }, [x19]\n"
+ "add x28, %x[qp], %[offsetof_Requantize32_minval]\n"
"movi v24.4s, #0x0\n"
- "ldr q11, [%x[params], #0x40]\n"
- "ldr x20, [%x[inptrs], #0x18]\n"
- "movi v30.4s, #0x0\n"
- "movi v21.4s, #0x0\n"
- "ld1 { v3.16b }, [x20]\n"
+ "ldr x21, [%x[inptrs], #0x18]\n"
+ "add x27, %x[qp], %[offsetof_Requantize32_maxval]\n"
+ "mov v18.16b, v13.16b\n"
"ldr x20, [%x[inptrs], #0x20]\n"
- "mov v16.16b, v3.16b\n"
- "ext v16.16b, v16.16b, v16.16b, #0x1\n"
- "ld1 { v4.16b }, [x20]\n"
- "ldr x20, [%x[inptrs], #0x10]\n"
- "mov v15.16b, v4.16b\n"
- "ext v15.16b, v15.16b, v15.16b, #0x1\n"
- "ld1 { v2.16b }, [x20]\n"
- "ldr x20, [%x[inptrs], #0x8]\n"
- "mov v20.16b, v2.16b\n"
- "ext v20.16b, v20.16b, v20.16b, #0x1\n"
- "ld1 { v1.16b }, [x20]\n"
- "ldr x20, [%x[inptrs], #0x28]\n"
- "zip1 v3.2d, v3.2d, v16.2d\n"
- "zip1 v4.2d, v4.2d, v15.2d\n"
+ "cmp %x[n_channels], #0x4\n"
+ "ext v18.16b, v18.16b, v18.16b, #0x1\n"
+ "ldr x19, [%x[inptrs], #0x28]\n"
+ "mov v17.16b, v12.16b\n"
+ "ld1 { v6.16b }, [x21]\n"
+ "ext v17.16b, v17.16b, v17.16b, #0x1\n"
"ld1 { v5.16b }, [x20]\n"
+ "mov v16.16b, v7.16b\n"
+ "ld1 { v4.16b }, [x19]\n"
+ "ext v16.16b, v16.16b, v16.16b, #0x1\n"
"ldr x20, [%x[inptrs], #0x30]\n"
- "mov v26.16b, v1.16b\n"
- "mov v13.16b, v5.16b\n"
- "ld1 { v6.16b }, [x20]\n"
- "ldr x20, [%x[inptrs], #0x38]\n"
- "mov v19.16b, v6.16b\n"
- "ext v26.16b, v26.16b, v26.16b, #0x1\n"
- "ld1 { v7.16b }, [x20]\n"
- "ldr x20, [%x[inptrs], #0x0]\n"
- "mov v17.16b, v7.16b\n"
- "zip1 v2.2d, v2.2d, v20.2d\n"
- "ld1 { v0.16b }, [x20]\n"
- "ext v13.16b, v13.16b, v13.16b, #0x1\n"
- "ext v19.16b, v19.16b, v19.16b, #0x1\n"
- ".inst 0x4f83e392 // sdot v18.4s, v28.16b, v3.4b[0]\n"
+ "zip1 v13.2d, v13.2d, v18.2d\n"
+ "ldr x19, [%x[inptrs], #0x38]\n"
+ "zip1 v12.2d, v12.2d, v17.2d\n"
+ "ld1r { v3.4s }, [x22]\n"
+ "mov v18.16b, v6.16b\n"
+ "ld1 { v2.16b }, [x20]\n"
+ "zip1 v7.2d, v7.2d, v16.2d\n"
+ "ld1 { v1.16b }, [x19]\n"
+ "ext v18.16b, v18.16b, v18.16b, #0x1\n"
+ "ldp x26, x25, [%x[outptrs], #0x0]\n"
+ "mov v17.16b, v5.16b\n"
+ "ldp x24, x23, [%x[outptrs], #0x10]\n"
"ext v17.16b, v17.16b, v17.16b, #0x1\n"
- ".inst 0x4f83eb9f // sdot v31.4s, v28.16b, v3.4b[2]\n"
- ".inst 0x4f84e398 // sdot v24.4s, v28.16b, v4.4b[0]\n"
- "add x20, %x[qp], %[offsetof_Requantize32_b_offset]\n"
- "ld1r { v23.4s }, [x20]\n"
- ".inst 0x4f84eb9e // sdot v30.4s, v28.16b, v4.4b[2]\n"
- "mov v16.16b, v0.16b\n"
- ".inst 0x4f82e395 // sdot v21.4s, v28.16b, v2.4b[0]\n"
- "movi v20.4s, #0x0\n"
- "movi v29.4s, #0x1\n"
- ".inst 0x4f82eb94 // sdot v20.4s, v28.16b, v2.4b[2]\n"
- "add x20, %x[qp], %[offsetof_Requantize32_c_offset]\n"
- "ld1r { v14.4s }, [x20]\n"
+ "ldp x22, x21, [%x[outptrs], #0x20]\n"
+ "mov v16.16b, v4.16b\n"
+ "ldp x20, x19, [%x[outptrs], #0x30]\n"
+ "zip1 v6.2d, v6.2d, v18.2d\n"
+ "ld1r { v0.4s }, [x9]\n"
"ext v16.16b, v16.16b, v16.16b, #0x1\n"
- "zip1 v1.2d, v1.2d, v26.2d\n"
- ".inst 0x4fa3e3b2 // sdot v18.4s, v29.16b, v3.4b[1]\n"
- "zip1 v5.2d, v5.2d, v13.2d\n"
- "zip1 v6.2d, v6.2d, v19.2d\n"
- ".inst 0x4fa3ebbf // sdot v31.4s, v29.16b, v3.4b[3]\n"
- "add x20, %x[qp], %[offsetof_Requantize32_minval]\n"
- "ld1r { v13.4s }, [x20]\n"
- "zip1 v7.2d, v7.2d, v17.2d\n"
+ "ld1r { v31.4s }, [x28]\n"
+ "zip1 v5.2d, v5.2d, v17.2d\n"
+ "ld1r { v30.4s }, [x27]\n"
+ "mov v17.16b, v2.16b\n"
+ "ldr q29, [%x[params], #0x0]\n"
+ "ext v17.16b, v17.16b, v17.16b, #0x1\n"
+ "ldr q8, [%x[params], #0x10]\n"
+ "zip1 v4.2d, v4.2d, v16.2d\n"
+ "ldr q9, [%x[params], #0x20]\n"
+ "mov v16.16b, v1.16b\n"
+ "ldr q10, [%x[params], #0x30]\n"
+ "ext v16.16b, v16.16b, v16.16b, #0x1\n"
+ "ldr q11, [%x[params], #0x40]\n"
+ "add %x[params], %x[params], #0x50\n"
+ "zip1 v2.2d, v2.2d, v17.2d\n"
+ "movi v23.4s, #0x0\n"
"movi v22.4s, #0x0\n"
- ".inst 0x4fa4e3b8 // sdot v24.4s, v29.16b, v4.4b[1]\n"
- "movi v26.4s, #0x0\n"
- ".inst 0x4fa4ebbe // sdot v30.4s, v29.16b, v4.4b[3]\n"
- ".inst 0x4f81e396 // sdot v22.4s, v28.16b, v1.4b[0]\n"
- "add x20, %x[qp], %[offsetof_Requantize32_maxval]\n"
- "ld1r { v15.4s }, [x20]\n"
- "movi v25.4s, #0x0\n"
- "movi v27.4s, #0x0\n"
- ".inst 0x4f81eb9a // sdot v26.4s, v28.16b, v1.4b[2]\n"
- "zip1 v0.2d, v0.2d, v16.2d\n"
- "movi v19.4s, #0x0\n"
- ".inst 0x4f85e399 // sdot v25.4s, v28.16b, v5.4b[0]\n"
- "cmp %x[n_channels], #0x4\n"
- ".inst 0x4f85eb9b // sdot v27.4s, v28.16b, v5.4b[2]\n"
- ".inst 0x4f86e393 // sdot v19.4s, v28.16b, v6.4b[0]\n"
- "add v24.4s, v18.4s, v24.4s\n"
- "mov x9, #0x0\n"
+ "zip1 v1.2d, v1.2d, v16.2d\n"
+ "movi v21.4s, #0x0\n"
"movi v18.4s, #0x0\n"
- ".inst 0x4f86eb92 // sdot v18.4s, v28.16b, v6.4b[2]\n"
- ".inst 0x4fa2e3b5 // sdot v21.4s, v29.16b, v2.4b[1]\n"
- "mov x28, #0x0\n"
- ".inst 0x4fa2ebb4 // sdot v20.4s, v29.16b, v2.4b[3]\n"
- "add v17.4s, v31.4s, v30.4s\n"
- ".inst 0x4fa1e3b6 // sdot v22.4s, v29.16b, v1.4b[1]\n"
- "ldp x27, x26, [%x[outptrs], #0x0]\n"
- "movi v16.4s, #0x0\n"
- ".inst 0x4f87e390 // sdot v16.4s, v28.16b, v7.4b[0]\n"
- ".inst 0x4fa1ebba // sdot v26.4s, v29.16b, v1.4b[3]\n"
- "ldp x25, x24, [%x[outptrs], #0x10]\n"
- ".inst 0x4fa5e3b9 // sdot v25.4s, v29.16b, v5.4b[1]\n"
- ".inst 0x4fa5ebbb // sdot v27.4s, v29.16b, v5.4b[3]\n"
- "add v30.4s, v21.4s, v24.4s\n"
- "ldp x23, x22, [%x[outptrs], #0x20]\n"
- ".inst 0x4fa6e3b3 // sdot v19.4s, v29.16b, v6.4b[1]\n"
- ".inst 0x4fa6ebb2 // sdot v18.4s, v29.16b, v6.4b[3]\n"
- "add v31.4s, v20.4s, v17.4s\n"
- "ldp x21, x20, [%x[outptrs], #0x30]\n"
- ".inst 0x4fa7e3b0 // sdot v16.4s, v29.16b, v7.4b[1]\n"
- "add v22.4s, v22.4s, v30.4s\n"
- "add %x[params], %x[params], #0x50\n"
- "add v21.4s, v26.4s, v31.4s\n"
- "add v20.4s, v25.4s, v19.4s\n"
- "add v19.4s, v27.4s, v18.4s\n"
- "add v18.4s, v16.4s, v24.4s\n"
+ "movi v17.4s, #0x0\n"
"movi v16.4s, #0x0\n"
- ".inst 0x4f87eb90 // sdot v16.4s, v28.16b, v7.4b[2]\n"
- ".inst 0x4fa7ebb0 // sdot v16.4s, v29.16b, v7.4b[3]\n"
- "add v17.4s, v16.4s, v17.4s\n"
- "movi v16.4s, #0x0\n"
- ".inst 0x4f80e390 // sdot v16.4s, v28.16b, v0.4b[0]\n"
- ".inst 0x4fa0e3b0 // sdot v16.4s, v29.16b, v0.4b[1]\n"
- "add v24.4s, v22.4s, v16.4s\n"
- "add v26.4s, v22.4s, v25.4s\n"
+ "movi v20.4s, #0x0\n"
+ "movi v19.4s, #0x0\n"
+ ".inst 0x4f8de1fc // sdot v28.4s, v15.16b, v13.4b[0]\n"
+ ".inst 0x4f8de9fb // sdot v27.4s, v15.16b, v13.4b[2]\n"
+ ".inst 0x4f8ce1fa // sdot v26.4s, v15.16b, v12.4b[0]\n"
+ ".inst 0x4f8ce9f9 // sdot v25.4s, v15.16b, v12.4b[2]\n"
+ ".inst 0x4fade1dc // sdot v28.4s, v14.16b, v13.4b[1]\n"
+ ".inst 0x4fade9db // sdot v27.4s, v14.16b, v13.4b[3]\n"
+ ".inst 0x4face1da // sdot v26.4s, v14.16b, v12.4b[1]\n"
+ ".inst 0x4face9d9 // sdot v25.4s, v14.16b, v12.4b[3]\n"
+ ".inst 0x4f87e1f8 // sdot v24.4s, v15.16b, v7.4b[0]\n"
+ ".inst 0x4f87e9f7 // sdot v23.4s, v15.16b, v7.4b[2]\n"
+ ".inst 0x4f86e1f6 // sdot v22.4s, v15.16b, v6.4b[0]\n"
+ ".inst 0x4f86e9f5 // sdot v21.4s, v15.16b, v6.4b[2]\n"
+ ".inst 0x4fa7e1d8 // sdot v24.4s, v14.16b, v7.4b[1]\n"
+ ".inst 0x4fa7e9d7 // sdot v23.4s, v14.16b, v7.4b[3]\n"
+ ".inst 0x4fa6e1d6 // sdot v22.4s, v14.16b, v6.4b[1]\n"
+ ".inst 0x4fa6e9d5 // sdot v21.4s, v14.16b, v6.4b[3]\n"
+ ".inst 0x4f85e1f2 // sdot v18.4s, v15.16b, v5.4b[0]\n"
+ ".inst 0x4f85e9f1 // sdot v17.4s, v15.16b, v5.4b[2]\n"
+ ".inst 0x4f84e1f0 // sdot v16.4s, v15.16b, v4.4b[0]\n"
+ ".inst 0x4f84e9f4 // sdot v20.4s, v15.16b, v4.4b[2]\n"
+ ".inst 0x4fa5e1d2 // sdot v18.4s, v14.16b, v5.4b[1]\n"
+ ".inst 0x4fa5e9d1 // sdot v17.4s, v14.16b, v5.4b[3]\n"
+ ".inst 0x4fa4e1d0 // sdot v16.4s, v14.16b, v4.4b[1]\n"
+ ".inst 0x4fa4e9d4 // sdot v20.4s, v14.16b, v4.4b[3]\n"
+ ".inst 0x4f82e1f3 // sdot v19.4s, v15.16b, v2.4b[0]\n"
+ "mov v28.16b, v28.16b\n"
+ "mov v27.16b, v27.16b\n"
+ "add v28.4s, v28.4s, v26.4s\n"
+ ".inst 0x4fa2e1d3 // sdot v19.4s, v14.16b, v2.4b[1]\n"
+ "add v27.4s, v27.4s, v25.4s\n"
+ "add v28.4s, v28.4s, v24.4s\n"
+ "mov v26.16b, v26.16b\n"
+ "add v27.4s, v27.4s, v23.4s\n"
+ "add v28.4s, v28.4s, v22.4s\n"
+ "mov v25.16b, v25.16b\n"
+ "add v27.4s, v27.4s, v21.4s\n"
+ "add v28.4s, v28.4s, v18.4s\n"
+ "add v26.4s, v26.4s, v24.4s\n"
+ "add v27.4s, v27.4s, v17.4s\n"
+ "add v25.4s, v25.4s, v23.4s\n"
+ "add v26.4s, v26.4s, v22.4s\n"
+ "mov v24.16b, v24.16b\n"
+ "add v25.4s, v25.4s, v21.4s\n"
+ "add v26.4s, v26.4s, v18.4s\n"
+ "mov v23.16b, v23.16b\n"
+ "add v25.4s, v25.4s, v17.4s\n"
+ "add v26.4s, v26.4s, v16.4s\n"
+ "add v24.4s, v24.4s, v22.4s\n"
+ "add v25.4s, v25.4s, v20.4s\n"
+ "add v23.4s, v23.4s, v21.4s\n"
+ "add v24.4s, v24.4s, v18.4s\n"
+ "mov v22.16b, v22.16b\n"
+ "add v23.4s, v23.4s, v17.4s\n"
+ "add v24.4s, v24.4s, v16.4s\n"
+ "mov v21.16b, v21.16b\n"
+ "add v23.4s, v23.4s, v20.4s\n"
+ "add v24.4s, v24.4s, v19.4s\n"
+ "add v22.4s, v22.4s, v18.4s\n"
+ "movi v18.4s, #0x0\n"
+ ".inst 0x4f82e9f2 // sdot v18.4s, v15.16b, v2.4b[2]\n"
+ "add v21.4s, v21.4s, v17.4s\n"
+ "movi v17.4s, #0x0\n"
+ ".inst 0x4f81e1f1 // sdot v17.4s, v15.16b, v1.4b[0]\n"
+ ".inst 0x4fa2e9d2 // sdot v18.4s, v14.16b, v2.4b[3]\n"
+ "add v22.4s, v22.4s, v16.4s\n"
"movi v16.4s, #0x0\n"
- ".inst 0x4f80eb90 // sdot v16.4s, v28.16b, v0.4b[2]\n"
- ".inst 0x4fa0ebb0 // sdot v16.4s, v29.16b, v0.4b[3]\n"
- "add v25.4s, v21.4s, v16.4s\n"
- "add v27.4s, v21.4s, v27.4s\n"
- "add v28.4s, v20.4s, v30.4s\n"
- "add v29.4s, v19.4s, v31.4s\n"
- "add v30.4s, v18.4s, v20.4s\n"
- "add v31.4s, v17.4s, v19.4s\n"
- "neg v23.4s, v23.4s\n"
- "mul v24.4s, v24.4s, v23.4s\n"
- "mul v25.4s, v25.4s, v23.4s\n"
- "mul v26.4s, v26.4s, v23.4s\n"
- "mul v27.4s, v27.4s, v23.4s\n"
- "mul v28.4s, v28.4s, v23.4s\n"
- "mul v29.4s, v29.4s, v23.4s\n"
- "mul v30.4s, v30.4s, v23.4s\n"
- "mul v31.4s, v31.4s, v23.4s\n"
- "zip1 v19.4s, v24.4s, v26.4s\n"
- "zip1 v18.4s, v25.4s, v27.4s\n"
- "zip1 v17.4s, v28.4s, v30.4s\n"
- "zip1 v16.4s, v29.4s, v31.4s\n"
- "zip1 v22.4s, v19.4s, v18.4s\n"
- "zip1 v23.4s, v17.4s, v16.4s\n"
- "add v24.4s, v24.4s, v12.4s\n"
- "add v25.4s, v25.4s, v12.4s\n"
- "add v26.4s, v26.4s, v12.4s\n"
- "add v27.4s, v27.4s, v12.4s\n"
- "add v28.4s, v28.4s, v12.4s\n"
- "add v29.4s, v29.4s, v12.4s\n"
- "add v30.4s, v30.4s, v12.4s\n"
- "add v31.4s, v31.4s, v12.4s\n"
+ ".inst 0x4fa1e1d1 // sdot v17.4s, v14.16b, v1.4b[1]\n"
+ ".inst 0x4f81e9f0 // sdot v16.4s, v15.16b, v1.4b[2]\n"
+ "add v23.4s, v23.4s, v18.4s\n"
+ "add v21.4s, v21.4s, v20.4s\n"
+ "add v22.4s, v22.4s, v19.4s\n"
+ ".inst 0x4fa1e9d0 // sdot v16.4s, v14.16b, v1.4b[3]\n"
+ "add v21.4s, v21.4s, v18.4s\n"
+ "add v22.4s, v22.4s, v17.4s\n"
+ "neg v3.4s, v3.4s\n"
+ "add v21.4s, v21.4s, v16.4s\n"
+ "mul v28.4s, v28.4s, v3.4s\n"
+ "str q28, [SP, #0x0]\n"
+ "mul v27.4s, v27.4s, v3.4s\n"
+ "mul v26.4s, v26.4s, v3.4s\n"
+ "str q27, [SP, #0x10]\n"
+ "mul v25.4s, v25.4s, v3.4s\n"
+ "mul v24.4s, v24.4s, v3.4s\n"
+ "str q26, [SP, #0x20]\n"
+ "mul v23.4s, v23.4s, v3.4s\n"
+ "str q25, [SP, #0x30]\n"
+ "mul v22.4s, v22.4s, v3.4s\n"
+ "mul v21.4s, v21.4s, v3.4s\n"
+ "str q24, [SP, #0x40]\n"
+ "add v28.4s, v28.4s, v29.4s\n"
+ "str q23, [SP, #0x50]\n"
+ "add v27.4s, v27.4s, v29.4s\n"
+ "str q22, [SP, #0x60]\n"
+ "add v26.4s, v26.4s, v29.4s\n"
+ "add v25.4s, v25.4s, v29.4s\n"
+ "str q21, [SP, #0x70]\n"
+ "add v24.4s, v24.4s, v29.4s\n"
+ "add v23.4s, v23.4s, v29.4s\n"
+ "add v22.4s, v22.4s, v29.4s\n"
+ "add v21.4s, v21.4s, v29.4s\n"
"ble 2f\n"
"1:" // Loop
- "ldr q21, [%x[params], #0x60]\n"
- "ldr q20, [%x[params], #0x70]\n"
- ".inst 0x4f80e118 // sdot v24.4s, v8.16b, v0.4b[0]\n"
- ".inst 0x4f80e919 // sdot v25.4s, v8.16b, v0.4b[2]\n"
- "ldr q12, [%x[params], #0x80]\n"
- ".inst 0x4f81e11a // sdot v26.4s, v8.16b, v1.4b[0]\n"
- ".inst 0x4f81e91b // sdot v27.4s, v8.16b, v1.4b[2]\n"
+ ".inst 0x4f8de11c // sdot v28.4s, v8.16b, v13.4b[0]\n"
+ "ldr q20, [%x[params], #0x60]\n"
+ "add x11, x11, #0x10\n"
+ ".inst 0x4f8de91b // sdot v27.4s, v8.16b, v13.4b[2]\n"
+ "ldr q19, [%x[params], #0x70]\n"
"sub %x[n_channels], %x[n_channels], #0x4\n"
- ".inst 0x4fa0e138 // sdot v24.4s, v9.16b, v0.4b[1]\n"
- ".inst 0x4fa0e939 // sdot v25.4s, v9.16b, v0.4b[3]\n"
+ ".inst 0x4f8ce11a // sdot v26.4s, v8.16b, v12.4b[0]\n"
+ "ldr q29, [%x[params], #0x80]\n"
"cmp %x[n_channels], #0x4\n"
- "add x9, x9, #0x10\n"
- ".inst 0x4fa1e13a // sdot v26.4s, v9.16b, v1.4b[1]\n"
- ".inst 0x4fa1e93b // sdot v27.4s, v9.16b, v1.4b[3]\n"
- ".inst 0x4f82e11c // sdot v28.4s, v8.16b, v2.4b[0]\n"
- ".inst 0x4f82e91d // sdot v29.4s, v8.16b, v2.4b[2]\n"
- ".inst 0x4f83e11e // sdot v30.4s, v8.16b, v3.4b[0]\n"
- ".inst 0x4f83e91f // sdot v31.4s, v8.16b, v3.4b[2]\n"
+ ".inst 0x4f8ce919 // sdot v25.4s, v8.16b, v12.4b[2]\n"
+ ".inst 0x4f87e118 // sdot v24.4s, v8.16b, v7.4b[0]\n"
+ ".inst 0x4f87e917 // sdot v23.4s, v8.16b, v7.4b[2]\n"
+ ".inst 0x4f86e116 // sdot v22.4s, v8.16b, v6.4b[0]\n"
+ ".inst 0x4f86e915 // sdot v21.4s, v8.16b, v6.4b[2]\n"
"ldr q8, [%x[params], #0x0]\n"
- ".inst 0x4f81e158 // sdot v24.4s, v10.16b, v1.4b[0]\n"
- ".inst 0x4f81e959 // sdot v25.4s, v10.16b, v1.4b[2]\n"
- ".inst 0x4f82e15a // sdot v26.4s, v10.16b, v2.4b[0]\n"
- ".inst 0x4f82e95b // sdot v27.4s, v10.16b, v2.4b[2]\n"
- ".inst 0x4fa2e13c // sdot v28.4s, v9.16b, v2.4b[1]\n"
- ".inst 0x4fa2e93d // sdot v29.4s, v9.16b, v2.4b[3]\n"
- ".inst 0x4fa3e13e // sdot v30.4s, v9.16b, v3.4b[1]\n"
- ".inst 0x4fa3e93f // sdot v31.4s, v9.16b, v3.4b[3]\n"
+ ".inst 0x4fade13c // sdot v28.4s, v9.16b, v13.4b[1]\n"
+ ".inst 0x4fade93b // sdot v27.4s, v9.16b, v13.4b[3]\n"
+ ".inst 0x4face13a // sdot v26.4s, v9.16b, v12.4b[1]\n"
+ ".inst 0x4face939 // sdot v25.4s, v9.16b, v12.4b[3]\n"
+ ".inst 0x4fa7e138 // sdot v24.4s, v9.16b, v7.4b[1]\n"
+ ".inst 0x4fa7e937 // sdot v23.4s, v9.16b, v7.4b[3]\n"
+ ".inst 0x4fa6e136 // sdot v22.4s, v9.16b, v6.4b[1]\n"
+ ".inst 0x4fa6e935 // sdot v21.4s, v9.16b, v6.4b[3]\n"
"ldr q9, [%x[params], #0x10]\n"
- ".inst 0x4fa1e178 // sdot v24.4s, v11.16b, v1.4b[1]\n"
- ".inst 0x4fa1e979 // sdot v25.4s, v11.16b, v1.4b[3]\n"
- ".inst 0x4fa2e17a // sdot v26.4s, v11.16b, v2.4b[1]\n"
- ".inst 0x4fa2e97b // sdot v27.4s, v11.16b, v2.4b[3]\n"
- ".inst 0x4f83e15c // sdot v28.4s, v10.16b, v3.4b[0]\n"
- ".inst 0x4f83e95d // sdot v29.4s, v10.16b, v3.4b[2]\n"
- ".inst 0x4f84e15e // sdot v30.4s, v10.16b, v4.4b[0]\n"
- ".inst 0x4f84e95f // sdot v31.4s, v10.16b, v4.4b[2]\n"
+ ".inst 0x4f8ce15c // sdot v28.4s, v10.16b, v12.4b[0]\n"
+ ".inst 0x4f8ce95b // sdot v27.4s, v10.16b, v12.4b[2]\n"
+ ".inst 0x4f87e15a // sdot v26.4s, v10.16b, v7.4b[0]\n"
+ ".inst 0x4f87e959 // sdot v25.4s, v10.16b, v7.4b[2]\n"
+ ".inst 0x4f86e158 // sdot v24.4s, v10.16b, v6.4b[0]\n"
+ ".inst 0x4f86e957 // sdot v23.4s, v10.16b, v6.4b[2]\n"
+ ".inst 0x4f85e156 // sdot v22.4s, v10.16b, v5.4b[0]\n"
+ ".inst 0x4f85e955 // sdot v21.4s, v10.16b, v5.4b[2]\n"
"ldr q10, [%x[params], #0x20]\n"
- ".inst 0x4f82e118 // sdot v24.4s, v8.16b, v2.4b[0]\n"
- ".inst 0x4f82e919 // sdot v25.4s, v8.16b, v2.4b[2]\n"
- ".inst 0x4f83e11a // sdot v26.4s, v8.16b, v3.4b[0]\n"
- ".inst 0x4f83e91b // sdot v27.4s, v8.16b, v3.4b[2]\n"
- ".inst 0x4fa3e17c // sdot v28.4s, v11.16b, v3.4b[1]\n"
- ".inst 0x4fa3e97d // sdot v29.4s, v11.16b, v3.4b[3]\n"
- ".inst 0x4fa4e17e // sdot v30.4s, v11.16b, v4.4b[1]\n"
- ".inst 0x4fa4e97f // sdot v31.4s, v11.16b, v4.4b[3]\n"
+ ".inst 0x4face17c // sdot v28.4s, v11.16b, v12.4b[1]\n"
+ ".inst 0x4face97b // sdot v27.4s, v11.16b, v12.4b[3]\n"
+ ".inst 0x4fa7e17a // sdot v26.4s, v11.16b, v7.4b[1]\n"
+ ".inst 0x4fa7e979 // sdot v25.4s, v11.16b, v7.4b[3]\n"
+ ".inst 0x4fa6e178 // sdot v24.4s, v11.16b, v6.4b[1]\n"
+ ".inst 0x4fa6e977 // sdot v23.4s, v11.16b, v6.4b[3]\n"
+ ".inst 0x4fa5e176 // sdot v22.4s, v11.16b, v5.4b[1]\n"
+ ".inst 0x4fa5e975 // sdot v21.4s, v11.16b, v5.4b[3]\n"
"ldr q11, [%x[params], #0x30]\n"
- ".inst 0x4fa2e138 // sdot v24.4s, v9.16b, v2.4b[1]\n"
- ".inst 0x4fa2e939 // sdot v25.4s, v9.16b, v2.4b[3]\n"
- ".inst 0x4fa3e13a // sdot v26.4s, v9.16b, v3.4b[1]\n"
- ".inst 0x4fa3e93b // sdot v27.4s, v9.16b, v3.4b[3]\n"
- ".inst 0x4f84e11c // sdot v28.4s, v8.16b, v4.4b[0]\n"
- ".inst 0x4f84e91d // sdot v29.4s, v8.16b, v4.4b[2]\n"
- ".inst 0x4f85e11e // sdot v30.4s, v8.16b, v5.4b[0]\n"
- ".inst 0x4f85e91f // sdot v31.4s, v8.16b, v5.4b[2]\n"
+ ".inst 0x4f87e11c // sdot v28.4s, v8.16b, v7.4b[0]\n"
+ ".inst 0x4f87e91b // sdot v27.4s, v8.16b, v7.4b[2]\n"
+ ".inst 0x4f86e11a // sdot v26.4s, v8.16b, v6.4b[0]\n"
+ ".inst 0x4f86e919 // sdot v25.4s, v8.16b, v6.4b[2]\n"
+ ".inst 0x4f85e118 // sdot v24.4s, v8.16b, v5.4b[0]\n"
+ ".inst 0x4f85e917 // sdot v23.4s, v8.16b, v5.4b[2]\n"
+ ".inst 0x4f84e116 // sdot v22.4s, v8.16b, v4.4b[0]\n"
+ ".inst 0x4f84e915 // sdot v21.4s, v8.16b, v4.4b[2]\n"
"ldr q8, [%x[params], #0x40]\n"
- ".inst 0x4f83e158 // sdot v24.4s, v10.16b, v3.4b[0]\n"
- ".inst 0x4f83e959 // sdot v25.4s, v10.16b, v3.4b[2]\n"
- ".inst 0x4f84e15a // sdot v26.4s, v10.16b, v4.4b[0]\n"
- ".inst 0x4f84e95b // sdot v27.4s, v10.16b, v4.4b[2]\n"
- ".inst 0x4fa4e13c // sdot v28.4s, v9.16b, v4.4b[1]\n"
- ".inst 0x4fa4e93d // sdot v29.4s, v9.16b, v4.4b[3]\n"
- ".inst 0x4fa5e13e // sdot v30.4s, v9.16b, v5.4b[1]\n"
- ".inst 0x4fa5e93f // sdot v31.4s, v9.16b, v5.4b[3]\n"
+ ".inst 0x4fa7e13c // sdot v28.4s, v9.16b, v7.4b[1]\n"
+ ".inst 0x4fa7e93b // sdot v27.4s, v9.16b, v7.4b[3]\n"
+ ".inst 0x4fa6e13a // sdot v26.4s, v9.16b, v6.4b[1]\n"
+ ".inst 0x4fa6e939 // sdot v25.4s, v9.16b, v6.4b[3]\n"
+ ".inst 0x4fa5e138 // sdot v24.4s, v9.16b, v5.4b[1]\n"
+ ".inst 0x4fa5e937 // sdot v23.4s, v9.16b, v5.4b[3]\n"
+ ".inst 0x4fa4e136 // sdot v22.4s, v9.16b, v4.4b[1]\n"
+ ".inst 0x4fa4e935 // sdot v21.4s, v9.16b, v4.4b[3]\n"
"ldr q9, [%x[params], #0x50]\n"
- ".inst 0x4fa3e178 // sdot v24.4s, v11.16b, v3.4b[1]\n"
- ".inst 0x4fa3e979 // sdot v25.4s, v11.16b, v3.4b[3]\n"
- ".inst 0x4fa4e17a // sdot v26.4s, v11.16b, v4.4b[1]\n"
- ".inst 0x4fa4e97b // sdot v27.4s, v11.16b, v4.4b[3]\n"
- ".inst 0x4f85e15c // sdot v28.4s, v10.16b, v5.4b[0]\n"
- ".inst 0x4f85e95d // sdot v29.4s, v10.16b, v5.4b[2]\n"
- ".inst 0x4f86e15e // sdot v30.4s, v10.16b, v6.4b[0]\n"
- ".inst 0x4f86e95f // sdot v31.4s, v10.16b, v6.4b[2]\n"
+ ".inst 0x4f86e15c // sdot v28.4s, v10.16b, v6.4b[0]\n"
+ ".inst 0x4f86e95b // sdot v27.4s, v10.16b, v6.4b[2]\n"
+ ".inst 0x4f85e15a // sdot v26.4s, v10.16b, v5.4b[0]\n"
+ ".inst 0x4f85e959 // sdot v25.4s, v10.16b, v5.4b[2]\n"
+ ".inst 0x4f84e158 // sdot v24.4s, v10.16b, v4.4b[0]\n"
+ ".inst 0x4f84e957 // sdot v23.4s, v10.16b, v4.4b[2]\n"
+ ".inst 0x4f82e156 // sdot v22.4s, v10.16b, v2.4b[0]\n"
+ ".inst 0x4f82e955 // sdot v21.4s, v10.16b, v2.4b[2]\n"
"ldr q10, [%x[params], #0xb0]\n"
- ".inst 0x4f84e118 // sdot v24.4s, v8.16b, v4.4b[0]\n"
- ".inst 0x4f84e919 // sdot v25.4s, v8.16b, v4.4b[2]\n"
- ".inst 0x4f85e11a // sdot v26.4s, v8.16b, v5.4b[0]\n"
- ".inst 0x4f85e91b // sdot v27.4s, v8.16b, v5.4b[2]\n"
- ".inst 0x4fa5e17c // sdot v28.4s, v11.16b, v5.4b[1]\n"
- ".inst 0x4fa5e97d // sdot v29.4s, v11.16b, v5.4b[3]\n"
- ".inst 0x4fa6e17e // sdot v30.4s, v11.16b, v6.4b[1]\n"
- ".inst 0x4fa6e97f // sdot v31.4s, v11.16b, v6.4b[3]\n"
+ ".inst 0x4fa6e17c // sdot v28.4s, v11.16b, v6.4b[1]\n"
+ ".inst 0x4fa6e97b // sdot v27.4s, v11.16b, v6.4b[3]\n"
+ ".inst 0x4fa5e17a // sdot v26.4s, v11.16b, v5.4b[1]\n"
+ ".inst 0x4fa5e979 // sdot v25.4s, v11.16b, v5.4b[3]\n"
+ ".inst 0x4fa4e178 // sdot v24.4s, v11.16b, v4.4b[1]\n"
+ ".inst 0x4fa4e977 // sdot v23.4s, v11.16b, v4.4b[3]\n"
+ ".inst 0x4fa2e176 // sdot v22.4s, v11.16b, v2.4b[1]\n"
+ ".inst 0x4fa2e975 // sdot v21.4s, v11.16b, v2.4b[3]\n"
"ldr q11, [%x[params], #0xc0]\n"
- ".inst 0x4fa4e138 // sdot v24.4s, v9.16b, v4.4b[1]\n"
- ".inst 0x4fa4e939 // sdot v25.4s, v9.16b, v4.4b[3]\n"
- "sqrdmulh v24.4s, v24.4s, v21.4s\n"
- ".inst 0x4fa5e13a // sdot v26.4s, v9.16b, v5.4b[1]\n"
- ".inst 0x4fa5e93b // sdot v27.4s, v9.16b, v5.4b[3]\n"
- "sqrdmulh v25.4s, v25.4s, v21.4s\n"
- ".inst 0x4f86e11c // sdot v28.4s, v8.16b, v6.4b[0]\n"
- ".inst 0x4f86e91d // sdot v29.4s, v8.16b, v6.4b[2]\n"
- "sqrdmulh v26.4s, v26.4s, v21.4s\n"
- ".inst 0x4f87e11e // sdot v30.4s, v8.16b, v7.4b[0]\n"
- ".inst 0x4f87e91f // sdot v31.4s, v8.16b, v7.4b[2]\n"
+ ".inst 0x4f85e11c // sdot v28.4s, v8.16b, v5.4b[0]\n"
+ ".inst 0x4f85e91b // sdot v27.4s, v8.16b, v5.4b[2]\n"
+ ".inst 0x4f84e11a // sdot v26.4s, v8.16b, v4.4b[0]\n"
+ ".inst 0x4f84e919 // sdot v25.4s, v8.16b, v4.4b[2]\n"
+ ".inst 0x4f82e118 // sdot v24.4s, v8.16b, v2.4b[0]\n"
+ ".inst 0x4f82e917 // sdot v23.4s, v8.16b, v2.4b[2]\n"
+ ".inst 0x4f81e116 // sdot v22.4s, v8.16b, v1.4b[0]\n"
+ ".inst 0x4f81e915 // sdot v21.4s, v8.16b, v1.4b[2]\n"
"ldr q8, [%x[params], #0x90]\n"
- "sqrdmulh v27.4s, v27.4s, v21.4s\n"
- ".inst 0x4fa6e13c // sdot v28.4s, v9.16b, v6.4b[1]\n"
- ".inst 0x4fa6e93d // sdot v29.4s, v9.16b, v6.4b[3]\n"
- "and v19.16b, v24.16b, v20.16b\n"
- ".inst 0x4fa7e13e // sdot v30.4s, v9.16b, v7.4b[1]\n"
- ".inst 0x4fa7e93f // sdot v31.4s, v9.16b, v7.4b[3]\n"
+ ".inst 0x4fa5e13c // sdot v28.4s, v9.16b, v5.4b[1]\n"
+ ".inst 0x4fa5e93b // sdot v27.4s, v9.16b, v5.4b[3]\n"
+ ".inst 0x4fa4e13a // sdot v26.4s, v9.16b, v4.4b[1]\n"
+ ".inst 0x4fa4e939 // sdot v25.4s, v9.16b, v4.4b[3]\n"
+ ".inst 0x4fa2e138 // sdot v24.4s, v9.16b, v2.4b[1]\n"
+ ".inst 0x4fa2e937 // sdot v23.4s, v9.16b, v2.4b[3]\n"
+ ".inst 0x4fa1e136 // sdot v22.4s, v9.16b, v1.4b[1]\n"
+ ".inst 0x4fa1e935 // sdot v21.4s, v9.16b, v1.4b[3]\n"
"ldr q9, [%x[params], #0xa0]\n"
- "and v18.16b, v25.16b, v20.16b\n"
- "sshr v19.4s, v19.4s, #0x1f\n"
- "sshr v18.4s, v18.4s, #0x1f\n"
"add %x[params], %x[params], #0xd0\n"
- "sqrdmulh v28.4s, v28.4s, v21.4s\n"
- "sqrdmulh v29.4s, v29.4s, v21.4s\n"
- "sqrdmulh v30.4s, v30.4s, v21.4s\n"
- "sqrdmulh v31.4s, v31.4s, v21.4s\n"
- "and v17.16b, v26.16b, v20.16b\n"
- "sshr v17.4s, v17.4s, #0x1f\n"
- "sqadd v24.4s, v24.4s, v19.4s\n"
- "and v16.16b, v27.16b, v20.16b\n"
- "sshr v16.4s, v16.4s, #0x1f\n"
- "sqadd v25.4s, v25.4s, v18.4s\n"
- "sqadd v26.4s, v26.4s, v17.4s\n"
- "sqadd v27.4s, v27.4s, v16.4s\n"
- "and v19.16b, v28.16b, v20.16b\n"
- "and v18.16b, v29.16b, v20.16b\n"
- "and v17.16b, v30.16b, v20.16b\n"
- "sshr v19.4s, v19.4s, #0x1f\n"
+ "sqrdmulh v28.4s, v28.4s, v20.4s\n"
+ "sqrdmulh v27.4s, v27.4s, v20.4s\n"
+ "sqrdmulh v26.4s, v26.4s, v20.4s\n"
+ "sqrdmulh v25.4s, v25.4s, v20.4s\n"
+ "sqrdmulh v24.4s, v24.4s, v20.4s\n"
+ "and v18.16b, v28.16b, v19.16b\n"
+ "and v17.16b, v27.16b, v19.16b\n"
+ "and v16.16b, v26.16b, v19.16b\n"
"sshr v18.4s, v18.4s, #0x1f\n"
"sshr v17.4s, v17.4s, #0x1f\n"
- "sqadd v28.4s, v28.4s, v19.4s\n"
- "and v16.16b, v31.16b, v20.16b\n"
"sshr v16.4s, v16.4s, #0x1f\n"
- "sqadd v29.4s, v29.4s, v18.4s\n"
- "sqadd v30.4s, v30.4s, v17.4s\n"
- "sqadd v31.4s, v31.4s, v16.4s\n"
- "srshl v24.4s, v24.4s, v20.4s\n"
- "srshl v25.4s, v25.4s, v20.4s\n"
- "srshl v26.4s, v26.4s, v20.4s\n"
- "srshl v27.4s, v27.4s, v20.4s\n"
- "srshl v28.4s, v28.4s, v20.4s\n"
- "srshl v29.4s, v29.4s, v20.4s\n"
- "srshl v30.4s, v30.4s, v20.4s\n"
- "srshl v31.4s, v31.4s, v20.4s\n"
- "add v24.4s, v24.4s, v14.4s\n"
- "add v25.4s, v25.4s, v14.4s\n"
- "add v26.4s, v26.4s, v14.4s\n"
- "add v27.4s, v27.4s, v14.4s\n"
- "add v28.4s, v28.4s, v14.4s\n"
- "add v29.4s, v29.4s, v14.4s\n"
- "add v30.4s, v30.4s, v14.4s\n"
- "add v31.4s, v31.4s, v14.4s\n"
- "smin v24.4s, v24.4s, v15.4s\n"
- "smin v25.4s, v25.4s, v15.4s\n"
- "smin v26.4s, v26.4s, v15.4s\n"
- "smin v27.4s, v27.4s, v15.4s\n"
- "smin v28.4s, v28.4s, v15.4s\n"
- "smin v29.4s, v29.4s, v15.4s\n"
- "smin v30.4s, v30.4s, v15.4s\n"
- "smin v31.4s, v31.4s, v15.4s\n"
- "smax v24.4s, v24.4s, v13.4s\n"
- "smax v25.4s, v25.4s, v13.4s\n"
- "smax v26.4s, v26.4s, v13.4s\n"
- "smax v27.4s, v27.4s, v13.4s\n"
- "smax v28.4s, v28.4s, v13.4s\n"
- "smax v29.4s, v29.4s, v13.4s\n"
- "smax v30.4s, v30.4s, v13.4s\n"
- "smax v31.4s, v31.4s, v13.4s\n"
- "uzp1 v24.16b, v24.16b, v24.16b\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
- "uzp1 v26.16b, v26.16b, v26.16b\n"
- "uzp1 v27.16b, v27.16b, v27.16b\n"
+ "sqadd v28.4s, v28.4s, v18.4s\n"
+ "sqadd v27.4s, v27.4s, v17.4s\n"
+ "sqadd v26.4s, v26.4s, v16.4s\n"
+ "and v16.16b, v25.16b, v19.16b\n"
+ "srshl v28.4s, v28.4s, v19.4s\n"
+ "srshl v27.4s, v27.4s, v19.4s\n"
+ "srshl v26.4s, v26.4s, v19.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "add v28.4s, v28.4s, v0.4s\n"
+ "add v27.4s, v27.4s, v0.4s\n"
+ "add v26.4s, v26.4s, v0.4s\n"
+ "smin v28.4s, v28.4s, v30.4s\n"
+ "smin v27.4s, v27.4s, v30.4s\n"
+ "smin v26.4s, v26.4s, v30.4s\n"
+ "smax v28.4s, v28.4s, v31.4s\n"
+ "smax v27.4s, v27.4s, v31.4s\n"
+ "smax v26.4s, v26.4s, v31.4s\n"
"uzp1 v28.16b, v28.16b, v28.16b\n"
- "uzp1 v29.16b, v29.16b, v29.16b\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
- "uzp1 v31.16b, v31.16b, v31.16b\n"
- "uzp1 v24.16b, v24.16b, v24.16b\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
- "str s24, [x27, x28]\n"
- "uzp1 v26.16b, v26.16b, v26.16b\n"
"uzp1 v27.16b, v27.16b, v27.16b\n"
- "str s25, [x26, x28]\n"
"uzp1 v28.16b, v28.16b, v28.16b\n"
- "uzp1 v29.16b, v29.16b, v29.16b\n"
- "str s26, [x25, x28]\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
- "uzp1 v31.16b, v31.16b, v31.16b\n"
- "str s27, [x24, x28]\n"
- "str s28, [x23, x28]\n"
- "dup v24.4s, v22.s[0]\n"
- "dup v25.4s, v22.s[1]\n"
- "str s29, [x22, x28]\n"
- "dup v26.4s, v22.s[2]\n"
- "dup v27.4s, v22.s[3]\n"
- "str s30, [x21, x28]\n"
- "dup v28.4s, v23.s[0]\n"
- "dup v29.4s, v23.s[1]\n"
- "str s31, [x20, x28]\n"
- "dup v30.4s, v23.s[2]\n"
- "dup v31.4s, v23.s[3]\n"
- "add x28, x28, #0x4\n"
- "add v24.4s, v24.4s, v12.4s\n"
- "add v25.4s, v25.4s, v12.4s\n"
- "add v26.4s, v26.4s, v12.4s\n"
- "add v27.4s, v27.4s, v12.4s\n"
- "add v28.4s, v28.4s, v12.4s\n"
- "add v29.4s, v29.4s, v12.4s\n"
- "add v30.4s, v30.4s, v12.4s\n"
- "add v31.4s, v31.4s, v12.4s\n"
+ "str s28, [x26, x10]\n"
+ "uzp1 v27.16b, v27.16b, v27.16b\n"
+ "uzp1 v26.16b, v26.16b, v26.16b\n"
+ "ldr q28, [SP, #0x0]\n"
+ "sqadd v25.4s, v25.4s, v16.4s\n"
+ "str s27, [x25, x10]\n"
+ "uzp1 v26.16b, v26.16b, v26.16b\n"
+ "ldr q27, [SP, #0x10]\n"
+ "and v16.16b, v24.16b, v19.16b\n"
+ "str s26, [x24, x10]\n"
+ "sqrdmulh v23.4s, v23.4s, v20.4s\n"
+ "ldr q26, [SP, #0x20]\n"
+ "srshl v25.4s, v25.4s, v19.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sqrdmulh v22.4s, v22.4s, v20.4s\n"
+ "and v17.16b, v23.16b, v19.16b\n"
+ "add v25.4s, v25.4s, v0.4s\n"
+ "sqadd v24.4s, v24.4s, v16.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "smin v25.4s, v25.4s, v30.4s\n"
+ "and v16.16b, v22.16b, v19.16b\n"
+ "srshl v24.4s, v24.4s, v19.4s\n"
+ "smax v25.4s, v25.4s, v31.4s\n"
+ "sqadd v23.4s, v23.4s, v17.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "uzp1 v25.16b, v25.16b, v25.16b\n"
+ "add v24.4s, v24.4s, v0.4s\n"
+ "uzp1 v25.16b, v25.16b, v25.16b\n"
+ "str s25, [x23, x10]\n"
+ "smin v24.4s, v24.4s, v30.4s\n"
+ "srshl v23.4s, v23.4s, v19.4s\n"
+ "ldr q25, [SP, #0x30]\n"
+ "sqadd v22.4s, v22.4s, v16.4s\n"
+ "sqrdmulh v21.4s, v21.4s, v20.4s\n"
+ "smax v24.4s, v24.4s, v31.4s\n"
+ "add v23.4s, v23.4s, v0.4s\n"
+ "srshl v22.4s, v22.4s, v19.4s\n"
+ "uzp1 v24.16b, v24.16b, v24.16b\n"
+ "smin v23.4s, v23.4s, v30.4s\n"
+ "uzp1 v24.16b, v24.16b, v24.16b\n"
+ "str s24, [x22, x10]\n"
+ "smax v23.4s, v23.4s, v31.4s\n"
+ "add v22.4s, v22.4s, v0.4s\n"
+ "ldr q24, [SP, #0x40]\n"
+ "and v16.16b, v21.16b, v19.16b\n"
+ "add v28.4s, v28.4s, v29.4s\n"
+ "uzp1 v23.16b, v23.16b, v23.16b\n"
+ "smin v22.4s, v22.4s, v30.4s\n"
+ "uzp1 v23.16b, v23.16b, v23.16b\n"
+ "str s23, [x21, x10]\n"
+ "smax v22.4s, v22.4s, v31.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "ldr q23, [SP, #0x50]\n"
+ "add v27.4s, v27.4s, v29.4s\n"
+ "add v26.4s, v26.4s, v29.4s\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "sqadd v21.4s, v21.4s, v16.4s\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "str s22, [x20, x10]\n"
+ "add v25.4s, v25.4s, v29.4s\n"
+ "add v24.4s, v24.4s, v29.4s\n"
+ "ldr q22, [SP, #0x60]\n"
+ "srshl v21.4s, v21.4s, v19.4s\n"
+ "add v23.4s, v23.4s, v29.4s\n"
+ "add v21.4s, v21.4s, v0.4s\n"
+ "add v22.4s, v22.4s, v29.4s\n"
+ "smin v21.4s, v21.4s, v30.4s\n"
+ "smax v21.4s, v21.4s, v31.4s\n"
+ "uzp1 v21.16b, v21.16b, v21.16b\n"
+ "uzp1 v21.16b, v21.16b, v21.16b\n"
+ "str s21, [x19, x10]\n"
+ "add x10, x10, #0x4\n"
+ "ldr q21, [SP, #0x70]\n"
+ "add v21.4s, v21.4s, v29.4s\n"
"bgt 1b\n"
"2:" // Tail
- "ldr q21, [%x[params], #0x60]\n"
- "ldr q20, [%x[params], #0x70]\n"
- ".inst 0x4f80e118 // sdot v24.4s, v8.16b, v0.4b[0]\n"
- ".inst 0x4f80e919 // sdot v25.4s, v8.16b, v0.4b[2]\n"
- ".inst 0x4f81e11a // sdot v26.4s, v8.16b, v1.4b[0]\n"
- ".inst 0x4f81e91b // sdot v27.4s, v8.16b, v1.4b[2]\n"
- "cmp %x[n_channels], #0x4\n"
- "add x27, x27, x28\n"
- ".inst 0x4fa0e138 // sdot v24.4s, v9.16b, v0.4b[1]\n"
- ".inst 0x4fa0e939 // sdot v25.4s, v9.16b, v0.4b[3]\n"
- "add x26, x26, x28\n"
- "add x25, x25, x28\n"
- ".inst 0x4fa1e13a // sdot v26.4s, v9.16b, v1.4b[1]\n"
- ".inst 0x4fa1e93b // sdot v27.4s, v9.16b, v1.4b[3]\n"
- "add x24, x24, x28\n"
- "add x23, x23, x28\n"
- ".inst 0x4f82e11c // sdot v28.4s, v8.16b, v2.4b[0]\n"
- ".inst 0x4f82e91d // sdot v29.4s, v8.16b, v2.4b[2]\n"
- "add x22, x22, x28\n"
- "add x21, x21, x28\n"
- ".inst 0x4f83e11e // sdot v30.4s, v8.16b, v3.4b[0]\n"
- ".inst 0x4f83e91f // sdot v31.4s, v8.16b, v3.4b[2]\n"
+ ".inst 0x4f8de11c // sdot v28.4s, v8.16b, v13.4b[0]\n"
+ "ldr q20, [%x[params], #0x60]\n"
+ "add x26, x26, x10\n"
+ ".inst 0x4f8de91b // sdot v27.4s, v8.16b, v13.4b[2]\n"
+ "ldr q19, [%x[params], #0x70]\n"
+ "add x25, x25, x10\n"
+ ".inst 0x4f8ce11a // sdot v26.4s, v8.16b, v12.4b[0]\n"
+ "add x24, x24, x10\n"
+ ".inst 0x4f8ce919 // sdot v25.4s, v8.16b, v12.4b[2]\n"
+ "add x23, x23, x10\n"
+ ".inst 0x4f87e118 // sdot v24.4s, v8.16b, v7.4b[0]\n"
+ "add x22, x22, x10\n"
+ ".inst 0x4f87e917 // sdot v23.4s, v8.16b, v7.4b[2]\n"
+ "add x21, x21, x10\n"
+ ".inst 0x4f86e116 // sdot v22.4s, v8.16b, v6.4b[0]\n"
+ "add x20, x20, x10\n"
+ ".inst 0x4f86e915 // sdot v21.4s, v8.16b, v6.4b[2]\n"
"ldr q8, [%x[params], #0x0]\n"
- "add x20, x20, x28\n"
- ".inst 0x4f81e158 // sdot v24.4s, v10.16b, v1.4b[0]\n"
- ".inst 0x4f81e959 // sdot v25.4s, v10.16b, v1.4b[2]\n"
- ".inst 0x4f82e15a // sdot v26.4s, v10.16b, v2.4b[0]\n"
- ".inst 0x4f82e95b // sdot v27.4s, v10.16b, v2.4b[2]\n"
- ".inst 0x4fa2e13c // sdot v28.4s, v9.16b, v2.4b[1]\n"
- ".inst 0x4fa2e93d // sdot v29.4s, v9.16b, v2.4b[3]\n"
- ".inst 0x4fa3e13e // sdot v30.4s, v9.16b, v3.4b[1]\n"
- ".inst 0x4fa3e93f // sdot v31.4s, v9.16b, v3.4b[3]\n"
+ "add x19, x19, x10\n"
+ ".inst 0x4fade13c // sdot v28.4s, v9.16b, v13.4b[1]\n"
+ "cmp %x[n_channels], #0x4\n"
+ ".inst 0x4fade93b // sdot v27.4s, v9.16b, v13.4b[3]\n"
+ ".inst 0x4face13a // sdot v26.4s, v9.16b, v12.4b[1]\n"
+ ".inst 0x4face939 // sdot v25.4s, v9.16b, v12.4b[3]\n"
+ ".inst 0x4fa7e138 // sdot v24.4s, v9.16b, v7.4b[1]\n"
+ ".inst 0x4fa7e937 // sdot v23.4s, v9.16b, v7.4b[3]\n"
+ ".inst 0x4fa6e136 // sdot v22.4s, v9.16b, v6.4b[1]\n"
+ ".inst 0x4fa6e935 // sdot v21.4s, v9.16b, v6.4b[3]\n"
"ldr q9, [%x[params], #0x10]\n"
- ".inst 0x4fa1e178 // sdot v24.4s, v11.16b, v1.4b[1]\n"
- ".inst 0x4fa1e979 // sdot v25.4s, v11.16b, v1.4b[3]\n"
- ".inst 0x4fa2e17a // sdot v26.4s, v11.16b, v2.4b[1]\n"
- ".inst 0x4fa2e97b // sdot v27.4s, v11.16b, v2.4b[3]\n"
- ".inst 0x4f83e15c // sdot v28.4s, v10.16b, v3.4b[0]\n"
- ".inst 0x4f83e95d // sdot v29.4s, v10.16b, v3.4b[2]\n"
- ".inst 0x4f84e15e // sdot v30.4s, v10.16b, v4.4b[0]\n"
- ".inst 0x4f84e95f // sdot v31.4s, v10.16b, v4.4b[2]\n"
+ ".inst 0x4f8ce15c // sdot v28.4s, v10.16b, v12.4b[0]\n"
+ ".inst 0x4f8ce95b // sdot v27.4s, v10.16b, v12.4b[2]\n"
+ ".inst 0x4f87e15a // sdot v26.4s, v10.16b, v7.4b[0]\n"
+ ".inst 0x4f87e959 // sdot v25.4s, v10.16b, v7.4b[2]\n"
+ ".inst 0x4f86e158 // sdot v24.4s, v10.16b, v6.4b[0]\n"
+ ".inst 0x4f86e957 // sdot v23.4s, v10.16b, v6.4b[2]\n"
+ ".inst 0x4f85e156 // sdot v22.4s, v10.16b, v5.4b[0]\n"
+ ".inst 0x4f85e955 // sdot v21.4s, v10.16b, v5.4b[2]\n"
"ldr q10, [%x[params], #0x20]\n"
- ".inst 0x4f82e118 // sdot v24.4s, v8.16b, v2.4b[0]\n"
- ".inst 0x4f82e919 // sdot v25.4s, v8.16b, v2.4b[2]\n"
- ".inst 0x4f83e11a // sdot v26.4s, v8.16b, v3.4b[0]\n"
- ".inst 0x4f83e91b // sdot v27.4s, v8.16b, v3.4b[2]\n"
- ".inst 0x4fa3e17c // sdot v28.4s, v11.16b, v3.4b[1]\n"
- ".inst 0x4fa3e97d // sdot v29.4s, v11.16b, v3.4b[3]\n"
- ".inst 0x4fa4e17e // sdot v30.4s, v11.16b, v4.4b[1]\n"
- ".inst 0x4fa4e97f // sdot v31.4s, v11.16b, v4.4b[3]\n"
+ ".inst 0x4face17c // sdot v28.4s, v11.16b, v12.4b[1]\n"
+ ".inst 0x4face97b // sdot v27.4s, v11.16b, v12.4b[3]\n"
+ ".inst 0x4fa7e17a // sdot v26.4s, v11.16b, v7.4b[1]\n"
+ ".inst 0x4fa7e979 // sdot v25.4s, v11.16b, v7.4b[3]\n"
+ ".inst 0x4fa6e178 // sdot v24.4s, v11.16b, v6.4b[1]\n"
+ ".inst 0x4fa6e977 // sdot v23.4s, v11.16b, v6.4b[3]\n"
+ ".inst 0x4fa5e176 // sdot v22.4s, v11.16b, v5.4b[1]\n"
+ ".inst 0x4fa5e975 // sdot v21.4s, v11.16b, v5.4b[3]\n"
"ldr q11, [%x[params], #0x30]\n"
- ".inst 0x4fa2e138 // sdot v24.4s, v9.16b, v2.4b[1]\n"
- ".inst 0x4fa2e939 // sdot v25.4s, v9.16b, v2.4b[3]\n"
- ".inst 0x4fa3e13a // sdot v26.4s, v9.16b, v3.4b[1]\n"
- ".inst 0x4fa3e93b // sdot v27.4s, v9.16b, v3.4b[3]\n"
- ".inst 0x4f84e11c // sdot v28.4s, v8.16b, v4.4b[0]\n"
- ".inst 0x4f84e91d // sdot v29.4s, v8.16b, v4.4b[2]\n"
- ".inst 0x4f85e11e // sdot v30.4s, v8.16b, v5.4b[0]\n"
- ".inst 0x4f85e91f // sdot v31.4s, v8.16b, v5.4b[2]\n"
+ ".inst 0x4f87e11c // sdot v28.4s, v8.16b, v7.4b[0]\n"
+ ".inst 0x4f87e91b // sdot v27.4s, v8.16b, v7.4b[2]\n"
+ ".inst 0x4f86e11a // sdot v26.4s, v8.16b, v6.4b[0]\n"
+ ".inst 0x4f86e919 // sdot v25.4s, v8.16b, v6.4b[2]\n"
+ ".inst 0x4f85e118 // sdot v24.4s, v8.16b, v5.4b[0]\n"
+ ".inst 0x4f85e917 // sdot v23.4s, v8.16b, v5.4b[2]\n"
+ ".inst 0x4f84e116 // sdot v22.4s, v8.16b, v4.4b[0]\n"
+ ".inst 0x4f84e915 // sdot v21.4s, v8.16b, v4.4b[2]\n"
"ldr q8, [%x[params], #0x40]\n"
- ".inst 0x4f83e158 // sdot v24.4s, v10.16b, v3.4b[0]\n"
- ".inst 0x4f83e959 // sdot v25.4s, v10.16b, v3.4b[2]\n"
- ".inst 0x4f84e15a // sdot v26.4s, v10.16b, v4.4b[0]\n"
- ".inst 0x4f84e95b // sdot v27.4s, v10.16b, v4.4b[2]\n"
- ".inst 0x4fa4e13c // sdot v28.4s, v9.16b, v4.4b[1]\n"
- ".inst 0x4fa4e93d // sdot v29.4s, v9.16b, v4.4b[3]\n"
- ".inst 0x4fa5e13e // sdot v30.4s, v9.16b, v5.4b[1]\n"
- ".inst 0x4fa5e93f // sdot v31.4s, v9.16b, v5.4b[3]\n"
+ ".inst 0x4fa7e13c // sdot v28.4s, v9.16b, v7.4b[1]\n"
+ ".inst 0x4fa7e93b // sdot v27.4s, v9.16b, v7.4b[3]\n"
+ ".inst 0x4fa6e13a // sdot v26.4s, v9.16b, v6.4b[1]\n"
+ ".inst 0x4fa6e939 // sdot v25.4s, v9.16b, v6.4b[3]\n"
+ ".inst 0x4fa5e138 // sdot v24.4s, v9.16b, v5.4b[1]\n"
+ ".inst 0x4fa5e937 // sdot v23.4s, v9.16b, v5.4b[3]\n"
+ ".inst 0x4fa4e136 // sdot v22.4s, v9.16b, v4.4b[1]\n"
+ ".inst 0x4fa4e935 // sdot v21.4s, v9.16b, v4.4b[3]\n"
"ldr q9, [%x[params], #0x50]\n"
"add %x[params], %x[params], #0x80\n"
- ".inst 0x4fa3e178 // sdot v24.4s, v11.16b, v3.4b[1]\n"
- ".inst 0x4fa3e979 // sdot v25.4s, v11.16b, v3.4b[3]\n"
- ".inst 0x4fa4e17a // sdot v26.4s, v11.16b, v4.4b[1]\n"
- ".inst 0x4fa4e97b // sdot v27.4s, v11.16b, v4.4b[3]\n"
- ".inst 0x4f85e15c // sdot v28.4s, v10.16b, v5.4b[0]\n"
- ".inst 0x4f85e95d // sdot v29.4s, v10.16b, v5.4b[2]\n"
- ".inst 0x4f86e15e // sdot v30.4s, v10.16b, v6.4b[0]\n"
- ".inst 0x4f86e95f // sdot v31.4s, v10.16b, v6.4b[2]\n"
- ".inst 0x4f84e118 // sdot v24.4s, v8.16b, v4.4b[0]\n"
- ".inst 0x4f84e919 // sdot v25.4s, v8.16b, v4.4b[2]\n"
- ".inst 0x4f85e11a // sdot v26.4s, v8.16b, v5.4b[0]\n"
+ ".inst 0x4f86e15c // sdot v28.4s, v10.16b, v6.4b[0]\n"
+ ".inst 0x4f86e95b // sdot v27.4s, v10.16b, v6.4b[2]\n"
+ ".inst 0x4f85e15a // sdot v26.4s, v10.16b, v5.4b[0]\n"
+ ".inst 0x4f85e959 // sdot v25.4s, v10.16b, v5.4b[2]\n"
+ ".inst 0x4f84e158 // sdot v24.4s, v10.16b, v4.4b[0]\n"
+ ".inst 0x4f84e957 // sdot v23.4s, v10.16b, v4.4b[2]\n"
+ ".inst 0x4f82e156 // sdot v22.4s, v10.16b, v2.4b[0]\n"
+ ".inst 0x4f82e955 // sdot v21.4s, v10.16b, v2.4b[2]\n"
+ ".inst 0x4fa6e17c // sdot v28.4s, v11.16b, v6.4b[1]\n"
+ ".inst 0x4fa6e97b // sdot v27.4s, v11.16b, v6.4b[3]\n"
+ ".inst 0x4fa5e17a // sdot v26.4s, v11.16b, v5.4b[1]\n"
+ ".inst 0x4fa5e979 // sdot v25.4s, v11.16b, v5.4b[3]\n"
+ ".inst 0x4fa4e178 // sdot v24.4s, v11.16b, v4.4b[1]\n"
+ ".inst 0x4fa4e977 // sdot v23.4s, v11.16b, v4.4b[3]\n"
+ ".inst 0x4fa2e176 // sdot v22.4s, v11.16b, v2.4b[1]\n"
+ ".inst 0x4fa2e975 // sdot v21.4s, v11.16b, v2.4b[3]\n"
+ ".inst 0x4f85e11c // sdot v28.4s, v8.16b, v5.4b[0]\n"
".inst 0x4f85e91b // sdot v27.4s, v8.16b, v5.4b[2]\n"
- ".inst 0x4fa5e17c // sdot v28.4s, v11.16b, v5.4b[1]\n"
- ".inst 0x4fa5e97d // sdot v29.4s, v11.16b, v5.4b[3]\n"
- ".inst 0x4fa6e17e // sdot v30.4s, v11.16b, v6.4b[1]\n"
- ".inst 0x4fa6e97f // sdot v31.4s, v11.16b, v6.4b[3]\n"
- ".inst 0x4fa4e138 // sdot v24.4s, v9.16b, v4.4b[1]\n"
- ".inst 0x4fa4e939 // sdot v25.4s, v9.16b, v4.4b[3]\n"
- "sqrdmulh v24.4s, v24.4s, v21.4s\n"
- ".inst 0x4fa5e13a // sdot v26.4s, v9.16b, v5.4b[1]\n"
+ ".inst 0x4f84e11a // sdot v26.4s, v8.16b, v4.4b[0]\n"
+ ".inst 0x4f84e919 // sdot v25.4s, v8.16b, v4.4b[2]\n"
+ ".inst 0x4f82e118 // sdot v24.4s, v8.16b, v2.4b[0]\n"
+ ".inst 0x4f82e917 // sdot v23.4s, v8.16b, v2.4b[2]\n"
+ ".inst 0x4f81e116 // sdot v22.4s, v8.16b, v1.4b[0]\n"
+ ".inst 0x4f81e915 // sdot v21.4s, v8.16b, v1.4b[2]\n"
+ ".inst 0x4fa5e13c // sdot v28.4s, v9.16b, v5.4b[1]\n"
".inst 0x4fa5e93b // sdot v27.4s, v9.16b, v5.4b[3]\n"
- "sqrdmulh v25.4s, v25.4s, v21.4s\n"
- ".inst 0x4f86e11c // sdot v28.4s, v8.16b, v6.4b[0]\n"
- ".inst 0x4f86e91d // sdot v29.4s, v8.16b, v6.4b[2]\n"
- "sqrdmulh v26.4s, v26.4s, v21.4s\n"
- ".inst 0x4f87e11e // sdot v30.4s, v8.16b, v7.4b[0]\n"
- ".inst 0x4f87e91f // sdot v31.4s, v8.16b, v7.4b[2]\n"
- "sqrdmulh v27.4s, v27.4s, v21.4s\n"
- ".inst 0x4fa6e13c // sdot v28.4s, v9.16b, v6.4b[1]\n"
- ".inst 0x4fa6e93d // sdot v29.4s, v9.16b, v6.4b[3]\n"
- "and v19.16b, v24.16b, v20.16b\n"
- ".inst 0x4fa7e13e // sdot v30.4s, v9.16b, v7.4b[1]\n"
- ".inst 0x4fa7e93f // sdot v31.4s, v9.16b, v7.4b[3]\n"
- "and v18.16b, v25.16b, v20.16b\n"
- "and v17.16b, v26.16b, v20.16b\n"
- "and v16.16b, v27.16b, v20.16b\n"
- "sshr v19.4s, v19.4s, #0x1f\n"
+ ".inst 0x4fa4e13a // sdot v26.4s, v9.16b, v4.4b[1]\n"
+ ".inst 0x4fa4e939 // sdot v25.4s, v9.16b, v4.4b[3]\n"
+ ".inst 0x4fa2e138 // sdot v24.4s, v9.16b, v2.4b[1]\n"
+ ".inst 0x4fa2e937 // sdot v23.4s, v9.16b, v2.4b[3]\n"
+ ".inst 0x4fa1e136 // sdot v22.4s, v9.16b, v1.4b[1]\n"
+ ".inst 0x4fa1e935 // sdot v21.4s, v9.16b, v1.4b[3]\n"
+ "sqrdmulh v28.4s, v28.4s, v20.4s\n"
+ "sqrdmulh v27.4s, v27.4s, v20.4s\n"
+ "sqrdmulh v26.4s, v26.4s, v20.4s\n"
+ "sqrdmulh v25.4s, v25.4s, v20.4s\n"
+ "and v18.16b, v28.16b, v19.16b\n"
+ "and v17.16b, v27.16b, v19.16b\n"
+ "and v16.16b, v26.16b, v19.16b\n"
"sshr v18.4s, v18.4s, #0x1f\n"
"sshr v17.4s, v17.4s, #0x1f\n"
"sshr v16.4s, v16.4s, #0x1f\n"
- "sqrdmulh v28.4s, v28.4s, v21.4s\n"
- "sqrdmulh v29.4s, v29.4s, v21.4s\n"
- "sqrdmulh v30.4s, v30.4s, v21.4s\n"
- "sqrdmulh v31.4s, v31.4s, v21.4s\n"
- "sqadd v24.4s, v24.4s, v19.4s\n"
- "sqadd v25.4s, v25.4s, v18.4s\n"
- "sqadd v26.4s, v26.4s, v17.4s\n"
- "sqadd v27.4s, v27.4s, v16.4s\n"
- "and v19.16b, v28.16b, v20.16b\n"
- "and v18.16b, v29.16b, v20.16b\n"
- "and v17.16b, v30.16b, v20.16b\n"
- "and v16.16b, v31.16b, v20.16b\n"
- "sshr v19.4s, v19.4s, #0x1f\n"
- "sshr v18.4s, v18.4s, #0x1f\n"
- "sshr v17.4s, v17.4s, #0x1f\n"
+ "sqadd v28.4s, v28.4s, v18.4s\n"
+ "sqadd v27.4s, v27.4s, v17.4s\n"
+ "sqadd v26.4s, v26.4s, v16.4s\n"
+ "and v16.16b, v25.16b, v19.16b\n"
+ "srshl v28.4s, v28.4s, v19.4s\n"
+ "srshl v27.4s, v27.4s, v19.4s\n"
+ "srshl v26.4s, v26.4s, v19.4s\n"
"sshr v16.4s, v16.4s, #0x1f\n"
- "sqadd v28.4s, v28.4s, v19.4s\n"
- "sqadd v29.4s, v29.4s, v18.4s\n"
- "sqadd v30.4s, v30.4s, v17.4s\n"
- "sqadd v31.4s, v31.4s, v16.4s\n"
- "srshl v24.4s, v24.4s, v20.4s\n"
- "srshl v25.4s, v25.4s, v20.4s\n"
- "srshl v26.4s, v26.4s, v20.4s\n"
- "srshl v27.4s, v27.4s, v20.4s\n"
- "srshl v28.4s, v28.4s, v20.4s\n"
- "srshl v29.4s, v29.4s, v20.4s\n"
- "srshl v30.4s, v30.4s, v20.4s\n"
- "srshl v31.4s, v31.4s, v20.4s\n"
- "add v24.4s, v24.4s, v14.4s\n"
- "add v25.4s, v25.4s, v14.4s\n"
- "add v26.4s, v26.4s, v14.4s\n"
- "add v27.4s, v27.4s, v14.4s\n"
- "add v28.4s, v28.4s, v14.4s\n"
- "add v29.4s, v29.4s, v14.4s\n"
- "add v30.4s, v30.4s, v14.4s\n"
- "add v31.4s, v31.4s, v14.4s\n"
- "smin v24.4s, v24.4s, v15.4s\n"
- "smin v25.4s, v25.4s, v15.4s\n"
- "smin v26.4s, v26.4s, v15.4s\n"
- "smin v27.4s, v27.4s, v15.4s\n"
- "smin v28.4s, v28.4s, v15.4s\n"
- "smin v29.4s, v29.4s, v15.4s\n"
- "smin v30.4s, v30.4s, v15.4s\n"
- "smin v31.4s, v31.4s, v15.4s\n"
- "smax v24.4s, v24.4s, v13.4s\n"
- "smax v25.4s, v25.4s, v13.4s\n"
- "smax v26.4s, v26.4s, v13.4s\n"
- "smax v27.4s, v27.4s, v13.4s\n"
- "smax v28.4s, v28.4s, v13.4s\n"
- "smax v29.4s, v29.4s, v13.4s\n"
- "smax v30.4s, v30.4s, v13.4s\n"
- "smax v31.4s, v31.4s, v13.4s\n"
- "uzp1 v24.16b, v24.16b, v24.16b\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
- "uzp1 v26.16b, v26.16b, v26.16b\n"
- "uzp1 v27.16b, v27.16b, v27.16b\n"
+ "add v28.4s, v28.4s, v0.4s\n"
+ "add v27.4s, v27.4s, v0.4s\n"
+ "add v26.4s, v26.4s, v0.4s\n"
+ "smin v28.4s, v28.4s, v30.4s\n"
+ "smin v27.4s, v27.4s, v30.4s\n"
+ "smin v26.4s, v26.4s, v30.4s\n"
+ "smax v28.4s, v28.4s, v31.4s\n"
+ "smax v27.4s, v27.4s, v31.4s\n"
+ "smax v26.4s, v26.4s, v31.4s\n"
"uzp1 v28.16b, v28.16b, v28.16b\n"
- "uzp1 v29.16b, v29.16b, v29.16b\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
- "uzp1 v31.16b, v31.16b, v31.16b\n"
- "uzp1 v24.16b, v24.16b, v24.16b\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
- "uzp1 v26.16b, v26.16b, v26.16b\n"
"uzp1 v27.16b, v27.16b, v27.16b\n"
"uzp1 v28.16b, v28.16b, v28.16b\n"
- "uzp1 v29.16b, v29.16b, v29.16b\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
- "uzp1 v31.16b, v31.16b, v31.16b\n"
+ "uzp1 v27.16b, v27.16b, v27.16b\n"
+ "uzp1 v26.16b, v26.16b, v26.16b\n"
+ "sqadd v25.4s, v25.4s, v16.4s\n"
+ "uzp1 v26.16b, v26.16b, v26.16b\n"
+ "sqrdmulh v24.4s, v24.4s, v20.4s\n"
+ "sqrdmulh v23.4s, v23.4s, v20.4s\n"
+ "srshl v25.4s, v25.4s, v19.4s\n"
+ "sqrdmulh v22.4s, v22.4s, v20.4s\n"
+ "and v16.16b, v24.16b, v19.16b\n"
+ "and v17.16b, v23.16b, v19.16b\n"
+ "add v25.4s, v25.4s, v0.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "smin v25.4s, v25.4s, v30.4s\n"
+ "sqadd v24.4s, v24.4s, v16.4s\n"
+ "sqadd v23.4s, v23.4s, v17.4s\n"
+ "smax v25.4s, v25.4s, v31.4s\n"
+ "and v16.16b, v22.16b, v19.16b\n"
+ "srshl v24.4s, v24.4s, v19.4s\n"
+ "uzp1 v25.16b, v25.16b, v25.16b\n"
+ "srshl v23.4s, v23.4s, v19.4s\n"
+ "uzp1 v25.16b, v25.16b, v25.16b\n"
+ "add v24.4s, v24.4s, v0.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "add v23.4s, v23.4s, v0.4s\n"
+ "smin v24.4s, v24.4s, v30.4s\n"
+ "sqadd v22.4s, v22.4s, v16.4s\n"
+ "smin v23.4s, v23.4s, v30.4s\n"
+ "smax v24.4s, v24.4s, v31.4s\n"
+ "sqrdmulh v21.4s, v21.4s, v20.4s\n"
+ "smax v23.4s, v23.4s, v31.4s\n"
+ "uzp1 v24.16b, v24.16b, v24.16b\n"
+ "srshl v22.4s, v22.4s, v19.4s\n"
+ "uzp1 v24.16b, v24.16b, v24.16b\n"
+ "uzp1 v23.16b, v23.16b, v23.16b\n"
+ "and v16.16b, v21.16b, v19.16b\n"
+ "uzp1 v23.16b, v23.16b, v23.16b\n"
+ "add v22.4s, v22.4s, v0.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "smin v22.4s, v22.4s, v30.4s\n"
+ "sqadd v21.4s, v21.4s, v16.4s\n"
+ "smax v22.4s, v22.4s, v31.4s\n"
+ "srshl v21.4s, v21.4s, v19.4s\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "add v21.4s, v21.4s, v0.4s\n"
+ "smin v21.4s, v21.4s, v30.4s\n"
+ "smax v21.4s, v21.4s, v31.4s\n"
+ "uzp1 v21.16b, v21.16b, v21.16b\n"
+ "uzp1 v21.16b, v21.16b, v21.16b\n"
"blt 3f\n"
- "str s24, [x27, #0x0]\n"
- "str s25, [x26, #0x0]\n"
- "str s26, [x25, #0x0]\n"
- "str s27, [x24, #0x0]\n"
- "str s28, [x23, #0x0]\n"
- "str s29, [x22, #0x0]\n"
- "str s30, [x21, #0x0]\n"
- "str s31, [x20, #0x0]\n"
+ "str s28, [x26, #0x0]\n"
+ "str s27, [x25, #0x0]\n"
+ "str s26, [x24, #0x0]\n"
+ "str s25, [x23, #0x0]\n"
+ "str s24, [x22, #0x0]\n"
+ "str s23, [x21, #0x0]\n"
+ "str s22, [x20, #0x0]\n"
+ "str s21, [x19, #0x0]\n"
"b 4f\n"
"3:" // Tail: Oddments
+ "st1 { v28.b }[0], [x26], #0x1\n"
"subs %x[n_channels], %x[n_channels], #0x1\n"
- "st1 { v24.b }[0], [x27], #0x1\n"
- "st1 { v25.b }[0], [x26], #0x1\n"
- "st1 { v26.b }[0], [x25], #0x1\n"
- "st1 { v27.b }[0], [x24], #0x1\n"
- "st1 { v28.b }[0], [x23], #0x1\n"
- "st1 { v29.b }[0], [x22], #0x1\n"
- "st1 { v30.b }[0], [x21], #0x1\n"
- "st1 { v31.b }[0], [x20], #0x1\n"
+ "st1 { v27.b }[0], [x25], #0x1\n"
+ "st1 { v26.b }[0], [x24], #0x1\n"
+ "st1 { v25.b }[0], [x23], #0x1\n"
+ "st1 { v24.b }[0], [x22], #0x1\n"
+ "st1 { v23.b }[0], [x21], #0x1\n"
+ "st1 { v22.b }[0], [x20], #0x1\n"
+ "st1 { v21.b }[0], [x19], #0x1\n"
"beq 4f\n"
+ "st1 { v28.b }[1], [x26], #0x1\n"
"subs %x[n_channels], %x[n_channels], #0x1\n"
- "st1 { v24.b }[1], [x27], #0x1\n"
- "st1 { v25.b }[1], [x26], #0x1\n"
- "st1 { v26.b }[1], [x25], #0x1\n"
- "st1 { v27.b }[1], [x24], #0x1\n"
- "st1 { v28.b }[1], [x23], #0x1\n"
- "st1 { v29.b }[1], [x22], #0x1\n"
- "st1 { v30.b }[1], [x21], #0x1\n"
- "st1 { v31.b }[1], [x20], #0x1\n"
+ "st1 { v27.b }[1], [x25], #0x1\n"
+ "st1 { v26.b }[1], [x24], #0x1\n"
+ "st1 { v25.b }[1], [x23], #0x1\n"
+ "st1 { v24.b }[1], [x22], #0x1\n"
+ "st1 { v23.b }[1], [x21], #0x1\n"
+ "st1 { v22.b }[1], [x20], #0x1\n"
+ "st1 { v21.b }[1], [x19], #0x1\n"
"beq 4f\n"
+ "st1 { v28.b }[2], [x26], #0x1\n"
"subs %x[n_channels], %x[n_channels], #0x1\n"
- "st1 { v24.b }[2], [x27], #0x1\n"
- "st1 { v25.b }[2], [x26], #0x1\n"
- "st1 { v26.b }[2], [x25], #0x1\n"
- "st1 { v27.b }[2], [x24], #0x1\n"
- "st1 { v28.b }[2], [x23], #0x1\n"
- "st1 { v29.b }[2], [x22], #0x1\n"
- "st1 { v30.b }[2], [x21], #0x1\n"
- "st1 { v31.b }[2], [x20], #0x1\n"
+ "st1 { v27.b }[2], [x25], #0x1\n"
+ "st1 { v26.b }[2], [x24], #0x1\n"
+ "st1 { v25.b }[2], [x23], #0x1\n"
+ "st1 { v24.b }[2], [x22], #0x1\n"
+ "st1 { v23.b }[2], [x21], #0x1\n"
+ "st1 { v22.b }[2], [x20], #0x1\n"
+ "st1 { v21.b }[2], [x19], #0x1\n"
"beq 4f\n"
- "st1 { v24.b }[3], [x27], #0x1\n"
+ "st1 { v28.b }[3], [x26], #0x1\n"
"subs %x[n_channels], %x[n_channels], #0x1\n"
- "st1 { v25.b }[3], [x26], #0x1\n"
- "st1 { v26.b }[3], [x25], #0x1\n"
- "st1 { v27.b }[3], [x24], #0x1\n"
- "st1 { v28.b }[3], [x23], #0x1\n"
- "st1 { v29.b }[3], [x22], #0x1\n"
- "st1 { v30.b }[3], [x21], #0x1\n"
- "st1 { v31.b }[3], [x20], #0x1\n"
+ "st1 { v27.b }[3], [x25], #0x1\n"
+ "st1 { v26.b }[3], [x24], #0x1\n"
+ "st1 { v25.b }[3], [x23], #0x1\n"
+ "st1 { v24.b }[3], [x22], #0x1\n"
+ "st1 { v23.b }[3], [x21], #0x1\n"
+ "st1 { v22.b }[3], [x20], #0x1\n"
+ "st1 { v21.b }[3], [x19], #0x1\n"
"4:" // Tail: End
+ "add SP, SP, #0x80\n"
: [n_channels] "+&r" (n_output_channels), [params] "+&r" (params)
: [inptrs] "r" (inptrs), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [outptrs] "r" (outptrs), [qp] "r" (&qp)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst/generic.cpp
index 3a544e0697..bbfa9f439f 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -45,1433 +45,1439 @@ void a64_s8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst_imp
)
{
__asm__ __volatile__(
- "lsr x10, %x[n_output_channels], #0x2\n"
- "add x20, %x[qp], %[offsetof_Requantize32_minval]\n"
- "ld1r { v13.4s }, [x20]\n"
- "add x20, %x[qp], %[offsetof_Requantize32_maxval]\n"
- "ld1r { v11.4s }, [x20]\n"
- "add x20, %x[qp], %[offsetof_Requantize32_a_offset]\n"
- "ld1r { v3.16b }, [x20]\n"
- "add x20, %x[qp], %[offsetof_Requantize32_b_offset]\n"
- "ld1r { v12.16b }, [x20]\n"
- "add x20, %x[qp], %[offsetof_Requantize32_c_offset]\n"
- "ld1r { v14.4s }, [x20]\n"
- "add x20, %x[qp], %[offsetof_Requantize32_per_layer_left_shift]\n"
- "ld1r { v15.4s }, [x20]\n"
- "add x20, %x[qp], %[offsetof_Requantize32_per_layer_mul]\n"
- "ld1r { v9.4s }, [x20]\n"
- "add x20, %x[qp], %[offsetof_Requantize32_per_layer_right_shift]\n"
- "ld1r { v10.4s }, [x20]\n"
"mov x9, #0x0\n"
- "cbz x10, 9f\n"
+ "add x19, %x[qp], %[offsetof_Requantize32_minval]\n"
+ "ld1r { v14.4s }, [x19]\n"
+ "add x19, %x[qp], %[offsetof_Requantize32_maxval]\n"
+ "ld1r { v13.4s }, [x19]\n"
+ "add x19, %x[qp], %[offsetof_Requantize32_a_offset]\n"
+ "ld1r { v12.16b }, [x19]\n"
+ "add x19, %x[qp], %[offsetof_Requantize32_b_offset]\n"
+ "ld1r { v11.16b }, [x19]\n"
+ "add x19, %x[qp], %[offsetof_Requantize32_c_offset]\n"
+ "ld1r { v10.4s }, [x19]\n"
+ "add x19, %x[qp], %[offsetof_Requantize32_per_layer_left_shift]\n"
+ "ld1r { v9.4s }, [x19]\n"
+ "add x19, %x[qp], %[offsetof_Requantize32_per_layer_mul]\n"
+ "ld1r { v8.4s }, [x19]\n"
+ "add x19, %x[qp], %[offsetof_Requantize32_per_layer_right_shift]\n"
+ "ld1r { v7.4s }, [x19]\n"
+ "lsr x28, %x[n_output_channels], #0x2\n"
+ "cbz x28, 9f\n"
"1:" // Output channel loop
- "movi v31.4s, #0x0\n"
+ "movi v16.4s, #0x0\n"
"cbz %x[bias], 2f\n"
- "lsl x20, x9, #0x2\n"
- "ldr q31, [%x[bias], x20]\n"
+ "lsl x19, x9, #0x2\n"
+ "ldr q16, [%x[bias], x19]\n"
"2:" // Output channel loop: Load bias: Done
- "mov v16.16b, v31.16b\n"
- "mov v17.16b, v31.16b\n"
- "mov v18.16b, v31.16b\n"
- "mov v19.16b, v31.16b\n"
- "mov v20.16b, v31.16b\n"
- "mov v21.16b, v31.16b\n"
- "mov v22.16b, v31.16b\n"
- "mov v23.16b, v31.16b\n"
- "mov v24.16b, v31.16b\n"
- "mov v25.16b, v31.16b\n"
- "mov v26.16b, v31.16b\n"
- "mov v27.16b, v31.16b\n"
- "mov v28.16b, v31.16b\n"
- "mov v29.16b, v31.16b\n"
- "mov v30.16b, v31.16b\n"
- "mov v31.16b, v31.16b\n"
+ "mov v6.16b, v16.16b\n"
+ "mov v5.16b, v16.16b\n"
+ "mov v4.16b, v16.16b\n"
+ "mov v31.16b, v16.16b\n"
+ "mov v30.16b, v16.16b\n"
+ "mov v29.16b, v16.16b\n"
+ "mov v28.16b, v16.16b\n"
+ "mov v27.16b, v16.16b\n"
+ "mov v26.16b, v16.16b\n"
+ "mov v25.16b, v16.16b\n"
+ "mov v24.16b, v16.16b\n"
+ "mov v23.16b, v16.16b\n"
+ "mov v22.16b, v16.16b\n"
+ "mov v21.16b, v16.16b\n"
+ "mov v20.16b, v16.16b\n"
+ "mov v19.16b, v16.16b\n"
"cbz %x[rq_mul_ptr], 3f\n"
- "lsl x20, x9, #0x2\n"
- "ldr q9, [%x[rq_mul_ptr], x20]\n"
- "ldr q10, [%x[rq_right_shift_ptr], x20]\n"
+ "lsl x19, x9, #0x2\n"
+ "ldr q8, [%x[rq_mul_ptr], x19]\n"
+ "ldr q7, [%x[rq_right_shift_ptr], x19]\n"
"cbz %x[rq_left_shift_ptr], 3f\n"
- "ldr q15, [%x[rq_left_shift_ptr], x20]\n"
+ "ldr q9, [%x[rq_left_shift_ptr], x19]\n"
"3:" // Output channel loop: Load quantization parameters: Done
- "ldr s8, [%x[weights]], #0x4\n"
- "mov x20, %x[inptrs]\n"
- "ldp x25, x28, [x20], #0x10\n"
- "lsr x21, %x[kernel_points], #0x1\n"
- "ldr d2, [x25, #0x0]\n"
- "ldr d7, [x28, #0x0]\n"
- "ssubl v2.8h, v2.8b, v3.8b\n"
- "ssubl v7.8h, v7.8b, v3.8b\n"
- "ssubl v8.8h, v8.8b, v12.8b\n"
- "cbz x21, 7f\n"
- "ldr s6, [%x[weights]], #0x4\n"
- "ldp x25, x28, [x20], #0x10\n"
- "subs x21, x21, #0x1\n"
- "ssubl v6.8h, v6.8b, v12.8b\n"
+ "ldr s17, [%x[weights]], #0x4\n"
+ "ssubl v17.8h, v17.8b, v11.8b\n"
+ "mov x19, %x[inptrs]\n"
+ "ldp x25, x27, [x19], #0x10\n"
+ "lsr x20, %x[kernel_points], #0x1\n"
+ "ldr d3, [x25, #0x0]\n"
+ "ssubl v3.8h, v3.8b, v12.8b\n"
+ "ldr d2, [x27, #0x0]\n"
+ "ssubl v2.8h, v2.8b, v12.8b\n"
+ "cbz x20, 7f\n"
+ "ldp x25, x27, [x19], #0x10\n"
+ "ldr s16, [%x[weights]], #0x4\n"
+ "ssubl v16.8h, v16.8b, v11.8b\n"
"ldr d1, [x25, #0x0]\n"
- "ldr d0, [x28, #0x0]\n"
- "ssubl v1.8h, v1.8b, v3.8b\n"
- "ssubl v0.8h, v0.8b, v3.8b\n"
+ "subs x20, x20, #0x1\n"
+ "ssubl v1.8h, v1.8b, v12.8b\n"
+ "ldr d0, [x27, #0x0]\n"
+ "ssubl v0.8h, v0.8b, v12.8b\n"
"beq 5f\n"
"4:" // Output channel loop: Kernel loop
- "ldp x25, x28, [x20], #0x10\n"
- "smlal v16.4s, v8.4h, v2.h[0]\n"
- "smlal v17.4s, v8.4h, v2.h[1]\n"
- "subs x21, x21, #0x1\n"
- "smlal v18.4s, v8.4h, v2.h[2]\n"
- "smlal v19.4s, v8.4h, v2.h[3]\n"
- "smlal v20.4s, v8.4h, v2.h[4]\n"
- "smlal v21.4s, v8.4h, v2.h[5]\n"
- "smlal v22.4s, v8.4h, v2.h[6]\n"
- "smlal v23.4s, v8.4h, v2.h[7]\n"
- "ldr d2, [x25, #0x0]\n"
- "ssubl v2.8h, v2.8b, v3.8b\n"
- "smlal v24.4s, v8.4h, v7.h[0]\n"
- "smlal v25.4s, v8.4h, v7.h[1]\n"
- "smlal v26.4s, v8.4h, v7.h[2]\n"
- "smlal v27.4s, v8.4h, v7.h[3]\n"
- "smlal v28.4s, v8.4h, v7.h[4]\n"
- "smlal v29.4s, v8.4h, v7.h[5]\n"
- "smlal v30.4s, v8.4h, v7.h[6]\n"
- "smlal v31.4s, v8.4h, v7.h[7]\n"
- "ldr d7, [x28, #0x0]\n"
- "ldr s8, [%x[weights]], #0x4\n"
- "ldp x25, x28, [x20], #0x10\n"
- "smlal v16.4s, v6.4h, v1.h[0]\n"
- "smlal v17.4s, v6.4h, v1.h[1]\n"
- "ssubl v7.8h, v7.8b, v3.8b\n"
- "smlal v18.4s, v6.4h, v1.h[2]\n"
- "smlal v19.4s, v6.4h, v1.h[3]\n"
- "ssubl v8.8h, v8.8b, v12.8b\n"
- "smlal v20.4s, v6.4h, v1.h[4]\n"
- "smlal v21.4s, v6.4h, v1.h[5]\n"
- "smlal v22.4s, v6.4h, v1.h[6]\n"
- "smlal v23.4s, v6.4h, v1.h[7]\n"
+ "smlal v6.4s, v17.4h, v3.h[0]\n"
+ "ldp x25, x27, [x19], #0x10\n"
+ "subs x20, x20, #0x1\n"
+ "smlal v5.4s, v17.4h, v3.h[1]\n"
+ "smlal v4.4s, v17.4h, v3.h[2]\n"
+ "smlal v31.4s, v17.4h, v3.h[3]\n"
+ "smlal v30.4s, v17.4h, v3.h[4]\n"
+ "smlal v29.4s, v17.4h, v3.h[5]\n"
+ "smlal v28.4s, v17.4h, v3.h[6]\n"
+ "smlal v27.4s, v17.4h, v3.h[7]\n"
+ "ldr d3, [x25, #0x0]\n"
+ "smlal v26.4s, v17.4h, v2.h[0]\n"
+ "smlal v25.4s, v17.4h, v2.h[1]\n"
+ "smlal v24.4s, v17.4h, v2.h[2]\n"
+ "smlal v23.4s, v17.4h, v2.h[3]\n"
+ "smlal v22.4s, v17.4h, v2.h[4]\n"
+ "smlal v21.4s, v17.4h, v2.h[5]\n"
+ "smlal v20.4s, v17.4h, v2.h[6]\n"
+ "smlal v19.4s, v17.4h, v2.h[7]\n"
+ "ldr d2, [x27, #0x0]\n"
+ "ssubl v3.8h, v3.8b, v12.8b\n"
+ "ldr s17, [%x[weights]], #0x4\n"
+ "smlal v6.4s, v16.4h, v1.h[0]\n"
+ "ldp x25, x27, [x19], #0x10\n"
+ "smlal v5.4s, v16.4h, v1.h[1]\n"
+ "smlal v4.4s, v16.4h, v1.h[2]\n"
+ "ssubl v2.8h, v2.8b, v12.8b\n"
+ "ssubl v17.8h, v17.8b, v11.8b\n"
+ "smlal v31.4s, v16.4h, v1.h[3]\n"
+ "smlal v30.4s, v16.4h, v1.h[4]\n"
+ "smlal v29.4s, v16.4h, v1.h[5]\n"
+ "smlal v28.4s, v16.4h, v1.h[6]\n"
+ "smlal v27.4s, v16.4h, v1.h[7]\n"
"ldr d1, [x25, #0x0]\n"
- "ssubl v1.8h, v1.8b, v3.8b\n"
- "smlal v24.4s, v6.4h, v0.h[0]\n"
- "smlal v25.4s, v6.4h, v0.h[1]\n"
- "smlal v26.4s, v6.4h, v0.h[2]\n"
- "smlal v27.4s, v6.4h, v0.h[3]\n"
- "smlal v28.4s, v6.4h, v0.h[4]\n"
- "smlal v29.4s, v6.4h, v0.h[5]\n"
- "smlal v30.4s, v6.4h, v0.h[6]\n"
- "smlal v31.4s, v6.4h, v0.h[7]\n"
- "ldr d0, [x28, #0x0]\n"
- "ldr s6, [%x[weights]], #0x4\n"
- "ssubl v0.8h, v0.8b, v3.8b\n"
- "ssubl v6.8h, v6.8b, v12.8b\n"
+ "smlal v26.4s, v16.4h, v0.h[0]\n"
+ "smlal v25.4s, v16.4h, v0.h[1]\n"
+ "smlal v24.4s, v16.4h, v0.h[2]\n"
+ "smlal v23.4s, v16.4h, v0.h[3]\n"
+ "smlal v22.4s, v16.4h, v0.h[4]\n"
+ "smlal v21.4s, v16.4h, v0.h[5]\n"
+ "smlal v20.4s, v16.4h, v0.h[6]\n"
+ "smlal v19.4s, v16.4h, v0.h[7]\n"
+ "ldr d0, [x27, #0x0]\n"
+ "ssubl v1.8h, v1.8b, v12.8b\n"
+ "ldr s16, [%x[weights]], #0x4\n"
+ "ssubl v0.8h, v0.8b, v12.8b\n"
+ "ssubl v16.8h, v16.8b, v11.8b\n"
"bgt 4b\n"
"5:" // Output channel loop: Kernel loop tail
"tbnz %x[kernel_points], #0, 6f\n"
- "smlal v16.4s, v8.4h, v2.h[0]\n"
- "smlal v17.4s, v8.4h, v2.h[1]\n"
- "ldr x20, [%x[outptrs], #0x0]\n"
- "ldr x21, [%x[outptrs], #0x8]\n"
- "smlal v18.4s, v8.4h, v2.h[2]\n"
- "smlal v19.4s, v8.4h, v2.h[3]\n"
- "ldr x22, [%x[outptrs], #0x10]\n"
- "ldr x23, [%x[outptrs], #0x18]\n"
- "smlal v16.4s, v6.4h, v1.h[0]\n"
- "smlal v17.4s, v6.4h, v1.h[1]\n"
- "sshl v16.4s, v16.4s, v15.4s\n"
- "ldr x24, [%x[outptrs], #0x20]\n"
- "smlal v18.4s, v6.4h, v1.h[2]\n"
- "smlal v19.4s, v6.4h, v1.h[3]\n"
- "sshl v17.4s, v17.4s, v15.4s\n"
- "ldr x25, [%x[outptrs], #0x28]\n"
- "smlal v20.4s, v8.4h, v2.h[4]\n"
- "smlal v21.4s, v8.4h, v2.h[5]\n"
- "sshl v18.4s, v18.4s, v15.4s\n"
- "ldr x26, [%x[outptrs], #0x30]\n"
- "smlal v22.4s, v8.4h, v2.h[6]\n"
- "smlal v23.4s, v8.4h, v2.h[7]\n"
- "sshl v19.4s, v19.4s, v15.4s\n"
- "ldr x27, [%x[outptrs], #0x38]\n"
- "smlal v24.4s, v8.4h, v7.h[0]\n"
- "smlal v25.4s, v8.4h, v7.h[1]\n"
- "sqrdmulh v16.4s, v16.4s, v9.4s\n"
- "smlal v20.4s, v6.4h, v1.h[4]\n"
- "smlal v21.4s, v6.4h, v1.h[5]\n"
- "sqrdmulh v17.4s, v17.4s, v9.4s\n"
- "smlal v22.4s, v6.4h, v1.h[6]\n"
- "smlal v23.4s, v6.4h, v1.h[7]\n"
- "sqrdmulh v18.4s, v18.4s, v9.4s\n"
- "smlal v24.4s, v6.4h, v0.h[0]\n"
- "smlal v25.4s, v6.4h, v0.h[1]\n"
- "sqrdmulh v19.4s, v19.4s, v9.4s\n"
- "smlal v26.4s, v8.4h, v7.h[2]\n"
- "smlal v27.4s, v8.4h, v7.h[3]\n"
- "and v5.16b, v16.16b, v10.16b\n"
- "smlal v28.4s, v8.4h, v7.h[4]\n"
- "smlal v29.4s, v8.4h, v7.h[5]\n"
- "and v4.16b, v17.16b, v10.16b\n"
- "smlal v30.4s, v8.4h, v7.h[6]\n"
- "smlal v31.4s, v8.4h, v7.h[7]\n"
- "and v2.16b, v18.16b, v10.16b\n"
- "and v1.16b, v19.16b, v10.16b\n"
- "sshl v20.4s, v20.4s, v15.4s\n"
- "smlal v26.4s, v6.4h, v0.h[2]\n"
- "sshl v21.4s, v21.4s, v15.4s\n"
- "sshl v22.4s, v22.4s, v15.4s\n"
- "smlal v27.4s, v6.4h, v0.h[3]\n"
- "sshl v23.4s, v23.4s, v15.4s\n"
- "sshl v24.4s, v24.4s, v15.4s\n"
- "smlal v28.4s, v6.4h, v0.h[4]\n"
- "sshl v25.4s, v25.4s, v15.4s\n"
- "smlal v29.4s, v6.4h, v0.h[5]\n"
- "smlal v30.4s, v6.4h, v0.h[6]\n"
- "smlal v31.4s, v6.4h, v0.h[7]\n"
- "sshr v5.4s, v5.4s, #0x1f\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
- "sshr v2.4s, v2.4s, #0x1f\n"
- "sshr v1.4s, v1.4s, #0x1f\n"
- "sqrdmulh v20.4s, v20.4s, v9.4s\n"
- "sqrdmulh v21.4s, v21.4s, v9.4s\n"
- "sqrdmulh v22.4s, v22.4s, v9.4s\n"
- "sqrdmulh v23.4s, v23.4s, v9.4s\n"
- "sqrdmulh v24.4s, v24.4s, v9.4s\n"
- "sqrdmulh v25.4s, v25.4s, v9.4s\n"
- "sqadd v16.4s, v16.4s, v5.4s\n"
- "sqadd v17.4s, v17.4s, v4.4s\n"
- "sqadd v18.4s, v18.4s, v2.4s\n"
- "sqadd v19.4s, v19.4s, v1.4s\n"
- "and v8.16b, v20.16b, v10.16b\n"
- "and v0.16b, v21.16b, v10.16b\n"
- "and v5.16b, v22.16b, v10.16b\n"
- "and v4.16b, v23.16b, v10.16b\n"
- "and v2.16b, v24.16b, v10.16b\n"
- "and v1.16b, v25.16b, v10.16b\n"
- "sshl v26.4s, v26.4s, v15.4s\n"
- "sshl v27.4s, v27.4s, v15.4s\n"
- "sshl v28.4s, v28.4s, v15.4s\n"
- "sshl v29.4s, v29.4s, v15.4s\n"
- "sshl v30.4s, v30.4s, v15.4s\n"
- "sshl v31.4s, v31.4s, v15.4s\n"
- "sshr v8.4s, v8.4s, #0x1f\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "sshr v5.4s, v5.4s, #0x1f\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
- "sshr v2.4s, v2.4s, #0x1f\n"
- "sshr v1.4s, v1.4s, #0x1f\n"
- "sqrdmulh v26.4s, v26.4s, v9.4s\n"
- "sqrdmulh v27.4s, v27.4s, v9.4s\n"
- "sqrdmulh v28.4s, v28.4s, v9.4s\n"
- "sqrdmulh v29.4s, v29.4s, v9.4s\n"
- "sqrdmulh v30.4s, v30.4s, v9.4s\n"
- "sqrdmulh v31.4s, v31.4s, v9.4s\n"
- "sqadd v20.4s, v20.4s, v8.4s\n"
- "sqadd v21.4s, v21.4s, v0.4s\n"
- "sqadd v22.4s, v22.4s, v5.4s\n"
- "sqadd v23.4s, v23.4s, v4.4s\n"
- "sqadd v24.4s, v24.4s, v2.4s\n"
- "sqadd v25.4s, v25.4s, v1.4s\n"
- "and v8.16b, v26.16b, v10.16b\n"
- "and v0.16b, v27.16b, v10.16b\n"
- "and v5.16b, v28.16b, v10.16b\n"
- "and v4.16b, v29.16b, v10.16b\n"
- "and v2.16b, v30.16b, v10.16b\n"
- "and v1.16b, v31.16b, v10.16b\n"
- "sshr v8.4s, v8.4s, #0x1f\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "sshr v5.4s, v5.4s, #0x1f\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
- "sshr v2.4s, v2.4s, #0x1f\n"
- "sshr v1.4s, v1.4s, #0x1f\n"
- "srshl v16.4s, v16.4s, v10.4s\n"
- "srshl v17.4s, v17.4s, v10.4s\n"
- "srshl v18.4s, v18.4s, v10.4s\n"
- "srshl v19.4s, v19.4s, v10.4s\n"
- "srshl v20.4s, v20.4s, v10.4s\n"
- "srshl v21.4s, v21.4s, v10.4s\n"
- "srshl v22.4s, v22.4s, v10.4s\n"
- "srshl v23.4s, v23.4s, v10.4s\n"
- "sqadd v26.4s, v26.4s, v8.4s\n"
- "sqadd v27.4s, v27.4s, v0.4s\n"
- "sqadd v28.4s, v28.4s, v5.4s\n"
- "sqadd v29.4s, v29.4s, v4.4s\n"
- "sqadd v30.4s, v30.4s, v2.4s\n"
- "sqadd v31.4s, v31.4s, v1.4s\n"
- "add v16.4s, v16.4s, v14.4s\n"
- "add v17.4s, v17.4s, v14.4s\n"
- "add v18.4s, v18.4s, v14.4s\n"
- "add v19.4s, v19.4s, v14.4s\n"
- "add v20.4s, v20.4s, v14.4s\n"
- "add v21.4s, v21.4s, v14.4s\n"
- "add v22.4s, v22.4s, v14.4s\n"
- "add v23.4s, v23.4s, v14.4s\n"
- "srshl v24.4s, v24.4s, v10.4s\n"
- "srshl v25.4s, v25.4s, v10.4s\n"
- "srshl v26.4s, v26.4s, v10.4s\n"
- "srshl v27.4s, v27.4s, v10.4s\n"
- "srshl v28.4s, v28.4s, v10.4s\n"
- "srshl v29.4s, v29.4s, v10.4s\n"
- "srshl v30.4s, v30.4s, v10.4s\n"
- "srshl v31.4s, v31.4s, v10.4s\n"
- "smin v16.4s, v16.4s, v11.4s\n"
- "smin v17.4s, v17.4s, v11.4s\n"
- "smin v18.4s, v18.4s, v11.4s\n"
- "smin v19.4s, v19.4s, v11.4s\n"
- "smin v20.4s, v20.4s, v11.4s\n"
- "smin v21.4s, v21.4s, v11.4s\n"
- "smin v22.4s, v22.4s, v11.4s\n"
- "smin v23.4s, v23.4s, v11.4s\n"
- "add v24.4s, v24.4s, v14.4s\n"
- "add v25.4s, v25.4s, v14.4s\n"
- "add v26.4s, v26.4s, v14.4s\n"
- "add v27.4s, v27.4s, v14.4s\n"
- "add v28.4s, v28.4s, v14.4s\n"
- "add v29.4s, v29.4s, v14.4s\n"
- "add v30.4s, v30.4s, v14.4s\n"
- "add v31.4s, v31.4s, v14.4s\n"
- "smax v16.4s, v16.4s, v13.4s\n"
- "smax v17.4s, v17.4s, v13.4s\n"
- "smax v18.4s, v18.4s, v13.4s\n"
- "smax v19.4s, v19.4s, v13.4s\n"
- "smax v20.4s, v20.4s, v13.4s\n"
- "smax v21.4s, v21.4s, v13.4s\n"
- "smax v22.4s, v22.4s, v13.4s\n"
- "smax v23.4s, v23.4s, v13.4s\n"
- "smin v24.4s, v24.4s, v11.4s\n"
- "smin v25.4s, v25.4s, v11.4s\n"
- "smin v26.4s, v26.4s, v11.4s\n"
- "smin v27.4s, v27.4s, v11.4s\n"
- "smin v28.4s, v28.4s, v11.4s\n"
- "smin v29.4s, v29.4s, v11.4s\n"
- "smin v30.4s, v30.4s, v11.4s\n"
- "smin v31.4s, v31.4s, v11.4s\n"
- "uzp1 v16.16b, v16.16b, v16.16b\n"
- "uzp1 v17.16b, v17.16b, v17.16b\n"
- "uzp1 v18.16b, v18.16b, v18.16b\n"
- "uzp1 v19.16b, v19.16b, v19.16b\n"
- "uzp1 v20.16b, v20.16b, v20.16b\n"
- "uzp1 v21.16b, v21.16b, v21.16b\n"
- "uzp1 v22.16b, v22.16b, v22.16b\n"
- "uzp1 v23.16b, v23.16b, v23.16b\n"
- "smax v24.4s, v24.4s, v13.4s\n"
- "smax v25.4s, v25.4s, v13.4s\n"
- "smax v26.4s, v26.4s, v13.4s\n"
- "smax v27.4s, v27.4s, v13.4s\n"
- "smax v28.4s, v28.4s, v13.4s\n"
- "smax v29.4s, v29.4s, v13.4s\n"
- "smax v30.4s, v30.4s, v13.4s\n"
- "smax v31.4s, v31.4s, v13.4s\n"
- "uzp1 v16.16b, v16.16b, v16.16b\n"
- "str s16, [x20, x9]\n"
- "ldr x20, [%x[outptrs], #0x40]\n"
- "uzp1 v17.16b, v17.16b, v17.16b\n"
- "uzp1 v18.16b, v18.16b, v18.16b\n"
- "str s17, [x21, x9]\n"
- "ldr x21, [%x[outptrs], #0x48]\n"
- "uzp1 v19.16b, v19.16b, v19.16b\n"
- "uzp1 v20.16b, v20.16b, v20.16b\n"
- "str s18, [x22, x9]\n"
- "ldr x22, [%x[outptrs], #0x50]\n"
- "uzp1 v21.16b, v21.16b, v21.16b\n"
- "uzp1 v22.16b, v22.16b, v22.16b\n"
- "str s19, [x23, x9]\n"
- "ldr x23, [%x[outptrs], #0x58]\n"
- "uzp1 v23.16b, v23.16b, v23.16b\n"
- "uzp1 v24.16b, v24.16b, v24.16b\n"
- "str s20, [x24, x9]\n"
- "ldr x24, [%x[outptrs], #0x60]\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
- "uzp1 v26.16b, v26.16b, v26.16b\n"
- "str s21, [x25, x9]\n"
- "ldr x25, [%x[outptrs], #0x68]\n"
- "uzp1 v27.16b, v27.16b, v27.16b\n"
- "uzp1 v28.16b, v28.16b, v28.16b\n"
- "str s22, [x26, x9]\n"
- "ldr x26, [%x[outptrs], #0x70]\n"
- "uzp1 v29.16b, v29.16b, v29.16b\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
- "str s23, [x27, x9]\n"
- "ldr x27, [%x[outptrs], #0x78]\n"
+ "smlal v6.4s, v17.4h, v3.h[0]\n"
+ "ldr x19, [%x[outptrs], #0x0]\n"
+ "smlal v5.4s, v17.4h, v3.h[1]\n"
+ "ldr x20, [%x[outptrs], #0x8]\n"
+ "smlal v4.4s, v17.4h, v3.h[2]\n"
+ "ldr x21, [%x[outptrs], #0x10]\n"
+ "smlal v31.4s, v17.4h, v3.h[3]\n"
+ "ldr x22, [%x[outptrs], #0x18]\n"
+ "smlal v30.4s, v17.4h, v3.h[4]\n"
+ "ldr x23, [%x[outptrs], #0x20]\n"
+ "smlal v29.4s, v17.4h, v3.h[5]\n"
+ "ldr x24, [%x[outptrs], #0x28]\n"
+ "smlal v28.4s, v17.4h, v3.h[6]\n"
+ "ldr x25, [%x[outptrs], #0x30]\n"
+ "smlal v27.4s, v17.4h, v3.h[7]\n"
+ "ldr x26, [%x[outptrs], #0x38]\n"
+ "smlal v26.4s, v17.4h, v2.h[0]\n"
+ "smlal v25.4s, v17.4h, v2.h[1]\n"
+ "smlal v24.4s, v17.4h, v2.h[2]\n"
+ "smlal v23.4s, v17.4h, v2.h[3]\n"
+ "smlal v22.4s, v17.4h, v2.h[4]\n"
+ "smlal v21.4s, v17.4h, v2.h[5]\n"
+ "smlal v20.4s, v17.4h, v2.h[6]\n"
+ "smlal v19.4s, v17.4h, v2.h[7]\n"
+ "smlal v6.4s, v16.4h, v1.h[0]\n"
+ "smlal v5.4s, v16.4h, v1.h[1]\n"
+ "smlal v4.4s, v16.4h, v1.h[2]\n"
+ "smlal v31.4s, v16.4h, v1.h[3]\n"
+ "smlal v30.4s, v16.4h, v1.h[4]\n"
+ "smlal v29.4s, v16.4h, v1.h[5]\n"
+ "smlal v28.4s, v16.4h, v1.h[6]\n"
+ "smlal v27.4s, v16.4h, v1.h[7]\n"
+ "smlal v26.4s, v16.4h, v0.h[0]\n"
+ "smlal v25.4s, v16.4h, v0.h[1]\n"
+ "smlal v24.4s, v16.4h, v0.h[2]\n"
+ "smlal v23.4s, v16.4h, v0.h[3]\n"
+ "smlal v22.4s, v16.4h, v0.h[4]\n"
+ "smlal v21.4s, v16.4h, v0.h[5]\n"
+ "smlal v20.4s, v16.4h, v0.h[6]\n"
+ "smlal v19.4s, v16.4h, v0.h[7]\n"
+ "sshl v6.4s, v6.4s, v9.4s\n"
+ "sshl v5.4s, v5.4s, v9.4s\n"
+ "sqrdmulh v6.4s, v6.4s, v8.4s\n"
+ "sqrdmulh v5.4s, v5.4s, v8.4s\n"
+ "sshl v4.4s, v4.4s, v9.4s\n"
+ "sshl v31.4s, v31.4s, v9.4s\n"
+ "and v18.16b, v6.16b, v7.16b\n"
+ "and v16.16b, v5.16b, v7.16b\n"
+ "sqrdmulh v4.4s, v4.4s, v8.4s\n"
+ "sshr v18.4s, v18.4s, #0x1f\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sqrdmulh v31.4s, v31.4s, v8.4s\n"
+ "sqadd v6.4s, v6.4s, v18.4s\n"
+ "sqadd v5.4s, v5.4s, v16.4s\n"
+ "and v17.16b, v4.16b, v7.16b\n"
+ "and v16.16b, v31.16b, v7.16b\n"
+ "srshl v6.4s, v6.4s, v7.4s\n"
+ "srshl v5.4s, v5.4s, v7.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "add v6.4s, v6.4s, v10.4s\n"
+ "add v5.4s, v5.4s, v10.4s\n"
+ "sqadd v4.4s, v4.4s, v17.4s\n"
+ "smin v6.4s, v6.4s, v13.4s\n"
+ "smin v5.4s, v5.4s, v13.4s\n"
+ "sqadd v31.4s, v31.4s, v16.4s\n"
+ "smax v6.4s, v6.4s, v14.4s\n"
+ "smax v5.4s, v5.4s, v14.4s\n"
+ "srshl v4.4s, v4.4s, v7.4s\n"
+ "uzp1 v6.16b, v6.16b, v6.16b\n"
+ "uzp1 v5.16b, v5.16b, v5.16b\n"
+ "uzp1 v6.16b, v6.16b, v6.16b\n"
+ "str s6, [x19, x9]\n"
+ "uzp1 v5.16b, v5.16b, v5.16b\n"
+ "add v4.4s, v4.4s, v10.4s\n"
+ "ldr x19, [%x[outptrs], #0x40]\n"
+ "srshl v31.4s, v31.4s, v7.4s\n"
+ "str s5, [x20, x9]\n"
+ "sshl v30.4s, v30.4s, v9.4s\n"
+ "ldr x20, [%x[outptrs], #0x48]\n"
+ "smin v4.4s, v4.4s, v13.4s\n"
+ "sqrdmulh v30.4s, v30.4s, v8.4s\n"
+ "add v31.4s, v31.4s, v10.4s\n"
+ "smax v4.4s, v4.4s, v14.4s\n"
+ "sshl v29.4s, v29.4s, v9.4s\n"
+ "smin v31.4s, v31.4s, v13.4s\n"
+ "uzp1 v4.16b, v4.16b, v4.16b\n"
+ "and v16.16b, v30.16b, v7.16b\n"
+ "uzp1 v4.16b, v4.16b, v4.16b\n"
+ "str s4, [x21, x9]\n"
+ "smax v31.4s, v31.4s, v14.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "ldr x21, [%x[outptrs], #0x50]\n"
+ "sqrdmulh v29.4s, v29.4s, v8.4s\n"
+ "sshl v28.4s, v28.4s, v9.4s\n"
"uzp1 v31.16b, v31.16b, v31.16b\n"
- "uzp1 v24.16b, v24.16b, v24.16b\n"
- "str s24, [x20, x9]\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
- "uzp1 v26.16b, v26.16b, v26.16b\n"
- "str s25, [x21, x9]\n"
- "uzp1 v27.16b, v27.16b, v27.16b\n"
- "uzp1 v28.16b, v28.16b, v28.16b\n"
- "str s26, [x22, x9]\n"
- "uzp1 v29.16b, v29.16b, v29.16b\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
- "str s27, [x23, x9]\n"
+ "sqadd v30.4s, v30.4s, v16.4s\n"
"uzp1 v31.16b, v31.16b, v31.16b\n"
- "str s28, [x24, x9]\n"
- "str s29, [x25, x9]\n"
- "str s30, [x26, x9]\n"
- "str s31, [x27, x9]\n"
- "b 8f\n"
- "6:" // Output channel loop: Odd tail
- "ldp x25, x28, [x20], #0x10\n"
- "smlal v16.4s, v8.4h, v2.h[0]\n"
- "smlal v17.4s, v8.4h, v2.h[1]\n"
- "ldr x20, [%x[outptrs], #0x0]\n"
- "smlal v18.4s, v8.4h, v2.h[2]\n"
- "smlal v19.4s, v8.4h, v2.h[3]\n"
- "ldr x21, [%x[outptrs], #0x8]\n"
- "ldr x22, [%x[outptrs], #0x10]\n"
- "smlal v20.4s, v8.4h, v2.h[4]\n"
- "smlal v21.4s, v8.4h, v2.h[5]\n"
- "ldr x23, [%x[outptrs], #0x18]\n"
- "ldr x24, [%x[outptrs], #0x20]\n"
- "smlal v22.4s, v8.4h, v2.h[6]\n"
- "smlal v23.4s, v8.4h, v2.h[7]\n"
- "ldr d2, [x25, #0x0]\n"
- "ssubl v2.8h, v2.8b, v3.8b\n"
- "smlal v24.4s, v8.4h, v7.h[0]\n"
- "smlal v25.4s, v8.4h, v7.h[1]\n"
- "ldr x25, [%x[outptrs], #0x28]\n"
- "ldr x26, [%x[outptrs], #0x30]\n"
- "smlal v26.4s, v8.4h, v7.h[2]\n"
- "smlal v27.4s, v8.4h, v7.h[3]\n"
- "ldr x27, [%x[outptrs], #0x38]\n"
- "smlal v28.4s, v8.4h, v7.h[4]\n"
- "smlal v29.4s, v8.4h, v7.h[5]\n"
- "smlal v30.4s, v8.4h, v7.h[6]\n"
- "smlal v31.4s, v8.4h, v7.h[7]\n"
- "ldr s8, [%x[weights]], #0x4\n"
- "ldr d7, [x28, #0x0]\n"
- "smlal v16.4s, v6.4h, v1.h[0]\n"
- "smlal v17.4s, v6.4h, v1.h[1]\n"
- "ssubl v8.8h, v8.8b, v12.8b\n"
- "smlal v18.4s, v6.4h, v1.h[2]\n"
- "smlal v19.4s, v6.4h, v1.h[3]\n"
- "ssubl v7.8h, v7.8b, v3.8b\n"
- "smlal v16.4s, v8.4h, v2.h[0]\n"
- "smlal v17.4s, v8.4h, v2.h[1]\n"
- "sshl v16.4s, v16.4s, v15.4s\n"
- "smlal v18.4s, v8.4h, v2.h[2]\n"
- "smlal v19.4s, v8.4h, v2.h[3]\n"
- "sshl v17.4s, v17.4s, v15.4s\n"
- "smlal v20.4s, v6.4h, v1.h[4]\n"
- "smlal v21.4s, v6.4h, v1.h[5]\n"
- "sshl v18.4s, v18.4s, v15.4s\n"
- "smlal v22.4s, v6.4h, v1.h[6]\n"
- "smlal v23.4s, v6.4h, v1.h[7]\n"
- "sshl v19.4s, v19.4s, v15.4s\n"
- "smlal v24.4s, v6.4h, v0.h[0]\n"
- "smlal v25.4s, v6.4h, v0.h[1]\n"
- "sqrdmulh v16.4s, v16.4s, v9.4s\n"
- "smlal v20.4s, v8.4h, v2.h[4]\n"
- "smlal v21.4s, v8.4h, v2.h[5]\n"
- "sqrdmulh v17.4s, v17.4s, v9.4s\n"
- "smlal v22.4s, v8.4h, v2.h[6]\n"
- "smlal v23.4s, v8.4h, v2.h[7]\n"
- "sqrdmulh v18.4s, v18.4s, v9.4s\n"
- "smlal v24.4s, v8.4h, v7.h[0]\n"
- "smlal v25.4s, v8.4h, v7.h[1]\n"
- "sqrdmulh v19.4s, v19.4s, v9.4s\n"
- "smlal v26.4s, v6.4h, v0.h[2]\n"
- "smlal v27.4s, v6.4h, v0.h[3]\n"
- "and v5.16b, v16.16b, v10.16b\n"
- "smlal v28.4s, v6.4h, v0.h[4]\n"
- "smlal v29.4s, v6.4h, v0.h[5]\n"
- "and v4.16b, v17.16b, v10.16b\n"
- "smlal v30.4s, v6.4h, v0.h[6]\n"
- "smlal v31.4s, v6.4h, v0.h[7]\n"
- "and v2.16b, v18.16b, v10.16b\n"
- "and v1.16b, v19.16b, v10.16b\n"
- "sshl v20.4s, v20.4s, v15.4s\n"
- "smlal v26.4s, v8.4h, v7.h[2]\n"
- "sshl v21.4s, v21.4s, v15.4s\n"
- "sshl v22.4s, v22.4s, v15.4s\n"
- "smlal v27.4s, v8.4h, v7.h[3]\n"
- "sshl v23.4s, v23.4s, v15.4s\n"
- "sshl v24.4s, v24.4s, v15.4s\n"
- "smlal v28.4s, v8.4h, v7.h[4]\n"
- "sshl v25.4s, v25.4s, v15.4s\n"
- "smlal v29.4s, v8.4h, v7.h[5]\n"
- "smlal v30.4s, v8.4h, v7.h[6]\n"
- "smlal v31.4s, v8.4h, v7.h[7]\n"
- "sshr v5.4s, v5.4s, #0x1f\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
- "sshr v2.4s, v2.4s, #0x1f\n"
- "sshr v1.4s, v1.4s, #0x1f\n"
- "sqrdmulh v20.4s, v20.4s, v9.4s\n"
- "sqrdmulh v21.4s, v21.4s, v9.4s\n"
- "sqrdmulh v22.4s, v22.4s, v9.4s\n"
- "sqrdmulh v23.4s, v23.4s, v9.4s\n"
- "sqrdmulh v24.4s, v24.4s, v9.4s\n"
- "sqrdmulh v25.4s, v25.4s, v9.4s\n"
- "sqadd v16.4s, v16.4s, v5.4s\n"
- "sqadd v17.4s, v17.4s, v4.4s\n"
- "sqadd v18.4s, v18.4s, v2.4s\n"
- "sqadd v19.4s, v19.4s, v1.4s\n"
- "and v8.16b, v20.16b, v10.16b\n"
- "and v0.16b, v21.16b, v10.16b\n"
- "and v5.16b, v22.16b, v10.16b\n"
- "and v4.16b, v23.16b, v10.16b\n"
- "and v2.16b, v24.16b, v10.16b\n"
- "and v1.16b, v25.16b, v10.16b\n"
- "sshl v26.4s, v26.4s, v15.4s\n"
- "sshl v27.4s, v27.4s, v15.4s\n"
- "sshl v28.4s, v28.4s, v15.4s\n"
- "sshl v29.4s, v29.4s, v15.4s\n"
- "sshl v30.4s, v30.4s, v15.4s\n"
- "sshl v31.4s, v31.4s, v15.4s\n"
- "sshr v8.4s, v8.4s, #0x1f\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "sshr v5.4s, v5.4s, #0x1f\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
- "sshr v2.4s, v2.4s, #0x1f\n"
- "sshr v1.4s, v1.4s, #0x1f\n"
- "sqrdmulh v26.4s, v26.4s, v9.4s\n"
- "sqrdmulh v27.4s, v27.4s, v9.4s\n"
- "sqrdmulh v28.4s, v28.4s, v9.4s\n"
- "sqrdmulh v29.4s, v29.4s, v9.4s\n"
- "sqrdmulh v30.4s, v30.4s, v9.4s\n"
- "sqrdmulh v31.4s, v31.4s, v9.4s\n"
- "sqadd v20.4s, v20.4s, v8.4s\n"
- "sqadd v21.4s, v21.4s, v0.4s\n"
- "sqadd v22.4s, v22.4s, v5.4s\n"
- "sqadd v23.4s, v23.4s, v4.4s\n"
- "sqadd v24.4s, v24.4s, v2.4s\n"
- "sqadd v25.4s, v25.4s, v1.4s\n"
- "and v8.16b, v26.16b, v10.16b\n"
- "and v0.16b, v27.16b, v10.16b\n"
- "and v5.16b, v28.16b, v10.16b\n"
- "and v4.16b, v29.16b, v10.16b\n"
- "and v2.16b, v30.16b, v10.16b\n"
- "and v1.16b, v31.16b, v10.16b\n"
- "sshr v8.4s, v8.4s, #0x1f\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "sshr v5.4s, v5.4s, #0x1f\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
- "sshr v2.4s, v2.4s, #0x1f\n"
- "sshr v1.4s, v1.4s, #0x1f\n"
- "srshl v16.4s, v16.4s, v10.4s\n"
- "srshl v17.4s, v17.4s, v10.4s\n"
- "srshl v18.4s, v18.4s, v10.4s\n"
- "srshl v19.4s, v19.4s, v10.4s\n"
- "srshl v20.4s, v20.4s, v10.4s\n"
- "srshl v21.4s, v21.4s, v10.4s\n"
- "srshl v22.4s, v22.4s, v10.4s\n"
- "srshl v23.4s, v23.4s, v10.4s\n"
- "sqadd v26.4s, v26.4s, v8.4s\n"
- "sqadd v27.4s, v27.4s, v0.4s\n"
- "sqadd v28.4s, v28.4s, v5.4s\n"
- "sqadd v29.4s, v29.4s, v4.4s\n"
- "sqadd v30.4s, v30.4s, v2.4s\n"
- "sqadd v31.4s, v31.4s, v1.4s\n"
- "add v16.4s, v16.4s, v14.4s\n"
- "add v17.4s, v17.4s, v14.4s\n"
- "add v18.4s, v18.4s, v14.4s\n"
- "add v19.4s, v19.4s, v14.4s\n"
- "add v20.4s, v20.4s, v14.4s\n"
- "add v21.4s, v21.4s, v14.4s\n"
- "add v22.4s, v22.4s, v14.4s\n"
- "add v23.4s, v23.4s, v14.4s\n"
- "srshl v24.4s, v24.4s, v10.4s\n"
- "srshl v25.4s, v25.4s, v10.4s\n"
- "srshl v26.4s, v26.4s, v10.4s\n"
- "srshl v27.4s, v27.4s, v10.4s\n"
- "srshl v28.4s, v28.4s, v10.4s\n"
- "srshl v29.4s, v29.4s, v10.4s\n"
- "srshl v30.4s, v30.4s, v10.4s\n"
- "srshl v31.4s, v31.4s, v10.4s\n"
- "smin v16.4s, v16.4s, v11.4s\n"
- "smin v17.4s, v17.4s, v11.4s\n"
- "smin v18.4s, v18.4s, v11.4s\n"
- "smin v19.4s, v19.4s, v11.4s\n"
- "smin v20.4s, v20.4s, v11.4s\n"
- "smin v21.4s, v21.4s, v11.4s\n"
- "smin v22.4s, v22.4s, v11.4s\n"
- "smin v23.4s, v23.4s, v11.4s\n"
- "add v24.4s, v24.4s, v14.4s\n"
- "add v25.4s, v25.4s, v14.4s\n"
- "add v26.4s, v26.4s, v14.4s\n"
- "add v27.4s, v27.4s, v14.4s\n"
- "add v28.4s, v28.4s, v14.4s\n"
- "add v29.4s, v29.4s, v14.4s\n"
- "add v30.4s, v30.4s, v14.4s\n"
- "add v31.4s, v31.4s, v14.4s\n"
- "smax v16.4s, v16.4s, v13.4s\n"
- "smax v17.4s, v17.4s, v13.4s\n"
- "smax v18.4s, v18.4s, v13.4s\n"
- "smax v19.4s, v19.4s, v13.4s\n"
- "smax v20.4s, v20.4s, v13.4s\n"
- "smax v21.4s, v21.4s, v13.4s\n"
- "smax v22.4s, v22.4s, v13.4s\n"
- "smax v23.4s, v23.4s, v13.4s\n"
- "smin v24.4s, v24.4s, v11.4s\n"
- "smin v25.4s, v25.4s, v11.4s\n"
- "smin v26.4s, v26.4s, v11.4s\n"
- "smin v27.4s, v27.4s, v11.4s\n"
- "smin v28.4s, v28.4s, v11.4s\n"
- "smin v29.4s, v29.4s, v11.4s\n"
- "smin v30.4s, v30.4s, v11.4s\n"
- "smin v31.4s, v31.4s, v11.4s\n"
- "uzp1 v16.16b, v16.16b, v16.16b\n"
- "uzp1 v17.16b, v17.16b, v17.16b\n"
- "uzp1 v18.16b, v18.16b, v18.16b\n"
- "uzp1 v19.16b, v19.16b, v19.16b\n"
- "uzp1 v20.16b, v20.16b, v20.16b\n"
- "uzp1 v21.16b, v21.16b, v21.16b\n"
- "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "str s31, [x22, x9]\n"
+ "and v17.16b, v29.16b, v7.16b\n"
+ "sqrdmulh v28.4s, v28.4s, v8.4s\n"
+ "ldr x22, [%x[outptrs], #0x58]\n"
+ "srshl v30.4s, v30.4s, v7.4s\n"
+ "sshl v27.4s, v27.4s, v9.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "and v16.16b, v28.16b, v7.16b\n"
+ "add v30.4s, v30.4s, v10.4s\n"
+ "sqadd v29.4s, v29.4s, v17.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "smin v30.4s, v30.4s, v13.4s\n"
+ "sqrdmulh v27.4s, v27.4s, v8.4s\n"
+ "srshl v29.4s, v29.4s, v7.4s\n"
+ "smax v30.4s, v30.4s, v14.4s\n"
+ "sqadd v28.4s, v28.4s, v16.4s\n"
+ "and v16.16b, v27.16b, v7.16b\n"
+ "uzp1 v30.16b, v30.16b, v30.16b\n"
+ "add v29.4s, v29.4s, v10.4s\n"
+ "uzp1 v30.16b, v30.16b, v30.16b\n"
+ "str s30, [x23, x9]\n"
+ "smin v29.4s, v29.4s, v13.4s\n"
+ "srshl v28.4s, v28.4s, v7.4s\n"
+ "ldr x23, [%x[outptrs], #0x60]\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sshl v26.4s, v26.4s, v9.4s\n"
+ "smax v29.4s, v29.4s, v14.4s\n"
+ "add v28.4s, v28.4s, v10.4s\n"
+ "sqadd v27.4s, v27.4s, v16.4s\n"
+ "uzp1 v29.16b, v29.16b, v29.16b\n"
+ "smin v28.4s, v28.4s, v13.4s\n"
+ "uzp1 v29.16b, v29.16b, v29.16b\n"
+ "str s29, [x24, x9]\n"
+ "smax v28.4s, v28.4s, v14.4s\n"
+ "srshl v27.4s, v27.4s, v7.4s\n"
+ "ldr x24, [%x[outptrs], #0x68]\n"
+ "sqrdmulh v26.4s, v26.4s, v8.4s\n"
+ "sshl v25.4s, v25.4s, v9.4s\n"
+ "uzp1 v28.16b, v28.16b, v28.16b\n"
+ "add v27.4s, v27.4s, v10.4s\n"
+ "uzp1 v28.16b, v28.16b, v28.16b\n"
+ "str s28, [x25, x9]\n"
+ "smin v27.4s, v27.4s, v13.4s\n"
+ "and v17.16b, v26.16b, v7.16b\n"
+ "ldr x25, [%x[outptrs], #0x70]\n"
+ "sqrdmulh v25.4s, v25.4s, v8.4s\n"
+ "sshl v24.4s, v24.4s, v9.4s\n"
+ "smax v27.4s, v27.4s, v14.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "and v16.16b, v25.16b, v7.16b\n"
+ "uzp1 v27.16b, v27.16b, v27.16b\n"
+ "sqadd v26.4s, v26.4s, v17.4s\n"
+ "uzp1 v27.16b, v27.16b, v27.16b\n"
+ "str s27, [x26, x9]\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sqrdmulh v24.4s, v24.4s, v8.4s\n"
+ "ldr x26, [%x[outptrs], #0x78]\n"
+ "srshl v26.4s, v26.4s, v7.4s\n"
+ "sshl v23.4s, v23.4s, v9.4s\n"
+ "sqadd v25.4s, v25.4s, v16.4s\n"
+ "and v17.16b, v24.16b, v7.16b\n"
+ "add v26.4s, v26.4s, v10.4s\n"
+ "sqrdmulh v23.4s, v23.4s, v8.4s\n"
+ "srshl v25.4s, v25.4s, v7.4s\n"
+ "smin v26.4s, v26.4s, v13.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "and v16.16b, v23.16b, v7.16b\n"
+ "smax v26.4s, v26.4s, v14.4s\n"
+ "add v25.4s, v25.4s, v10.4s\n"
+ "sqadd v24.4s, v24.4s, v17.4s\n"
+ "uzp1 v26.16b, v26.16b, v26.16b\n"
+ "smin v25.4s, v25.4s, v13.4s\n"
+ "uzp1 v26.16b, v26.16b, v26.16b\n"
+ "str s26, [x19, x9]\n"
+ "smax v25.4s, v25.4s, v14.4s\n"
+ "srshl v24.4s, v24.4s, v7.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sshl v22.4s, v22.4s, v9.4s\n"
+ "uzp1 v25.16b, v25.16b, v25.16b\n"
+ "add v24.4s, v24.4s, v10.4s\n"
+ "uzp1 v25.16b, v25.16b, v25.16b\n"
+ "str s25, [x20, x9]\n"
+ "smin v24.4s, v24.4s, v13.4s\n"
+ "sqadd v23.4s, v23.4s, v16.4s\n"
+ "sqrdmulh v22.4s, v22.4s, v8.4s\n"
+ "sshl v21.4s, v21.4s, v9.4s\n"
+ "smax v24.4s, v24.4s, v14.4s\n"
+ "srshl v23.4s, v23.4s, v7.4s\n"
+ "and v17.16b, v22.16b, v7.16b\n"
+ "uzp1 v24.16b, v24.16b, v24.16b\n"
+ "sqrdmulh v21.4s, v21.4s, v8.4s\n"
+ "uzp1 v24.16b, v24.16b, v24.16b\n"
+ "str s24, [x21, x9]\n"
+ "add v23.4s, v23.4s, v10.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "and v16.16b, v21.16b, v7.16b\n"
+ "sshl v20.4s, v20.4s, v9.4s\n"
+ "smin v23.4s, v23.4s, v13.4s\n"
+ "sqadd v22.4s, v22.4s, v17.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "smax v23.4s, v23.4s, v14.4s\n"
+ "sqrdmulh v20.4s, v20.4s, v8.4s\n"
+ "srshl v22.4s, v22.4s, v7.4s\n"
"uzp1 v23.16b, v23.16b, v23.16b\n"
- "smax v24.4s, v24.4s, v13.4s\n"
- "smax v25.4s, v25.4s, v13.4s\n"
- "smax v26.4s, v26.4s, v13.4s\n"
- "smax v27.4s, v27.4s, v13.4s\n"
- "smax v28.4s, v28.4s, v13.4s\n"
- "smax v29.4s, v29.4s, v13.4s\n"
- "smax v30.4s, v30.4s, v13.4s\n"
- "smax v31.4s, v31.4s, v13.4s\n"
- "uzp1 v16.16b, v16.16b, v16.16b\n"
- "str s16, [x20, x9]\n"
- "ldr x20, [%x[outptrs], #0x40]\n"
- "uzp1 v17.16b, v17.16b, v17.16b\n"
- "uzp1 v18.16b, v18.16b, v18.16b\n"
- "str s17, [x21, x9]\n"
- "ldr x21, [%x[outptrs], #0x48]\n"
- "uzp1 v19.16b, v19.16b, v19.16b\n"
- "uzp1 v20.16b, v20.16b, v20.16b\n"
- "str s18, [x22, x9]\n"
- "ldr x22, [%x[outptrs], #0x50]\n"
- "uzp1 v21.16b, v21.16b, v21.16b\n"
- "uzp1 v22.16b, v22.16b, v22.16b\n"
- "str s19, [x23, x9]\n"
- "ldr x23, [%x[outptrs], #0x58]\n"
+ "sqadd v21.4s, v21.4s, v16.4s\n"
"uzp1 v23.16b, v23.16b, v23.16b\n"
- "uzp1 v24.16b, v24.16b, v24.16b\n"
- "str s20, [x24, x9]\n"
- "ldr x24, [%x[outptrs], #0x60]\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
- "uzp1 v26.16b, v26.16b, v26.16b\n"
- "str s21, [x25, x9]\n"
- "ldr x25, [%x[outptrs], #0x68]\n"
- "uzp1 v27.16b, v27.16b, v27.16b\n"
- "uzp1 v28.16b, v28.16b, v28.16b\n"
- "str s22, [x26, x9]\n"
- "ldr x26, [%x[outptrs], #0x70]\n"
- "uzp1 v29.16b, v29.16b, v29.16b\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
- "str s23, [x27, x9]\n"
- "ldr x27, [%x[outptrs], #0x78]\n"
+ "str s23, [x22, x9]\n"
+ "add v22.4s, v22.4s, v10.4s\n"
+ "and v16.16b, v20.16b, v7.16b\n"
+ "srshl v21.4s, v21.4s, v7.4s\n"
+ "sshl v19.4s, v19.4s, v9.4s\n"
+ "smin v22.4s, v22.4s, v13.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "add v21.4s, v21.4s, v10.4s\n"
+ "smax v22.4s, v22.4s, v14.4s\n"
+ "sqadd v20.4s, v20.4s, v16.4s\n"
+ "smin v21.4s, v21.4s, v13.4s\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "sqrdmulh v19.4s, v19.4s, v8.4s\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "str s22, [x23, x9]\n"
+ "smax v21.4s, v21.4s, v14.4s\n"
+ "srshl v20.4s, v20.4s, v7.4s\n"
+ "and v16.16b, v19.16b, v7.16b\n"
+ "uzp1 v21.16b, v21.16b, v21.16b\n"
+ "add v20.4s, v20.4s, v10.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "uzp1 v21.16b, v21.16b, v21.16b\n"
+ "str s21, [x24, x9]\n"
+ "smin v20.4s, v20.4s, v13.4s\n"
+ "sqadd v19.4s, v19.4s, v16.4s\n"
+ "smax v20.4s, v20.4s, v14.4s\n"
+ "srshl v19.4s, v19.4s, v7.4s\n"
+ "uzp1 v20.16b, v20.16b, v20.16b\n"
+ "uzp1 v20.16b, v20.16b, v20.16b\n"
+ "str s20, [x25, x9]\n"
+ "add v19.4s, v19.4s, v10.4s\n"
+ "smin v19.4s, v19.4s, v13.4s\n"
+ "smax v19.4s, v19.4s, v14.4s\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ "str s19, [x26, x9]\n"
+ "b 8f\n"
+ "6:" // Output channel loop: Odd tail
+ "smlal v6.4s, v17.4h, v3.h[0]\n"
+ "ldp x25, x27, [x19], #0x10\n"
+ "smlal v5.4s, v17.4h, v3.h[1]\n"
+ "ldr x19, [%x[outptrs], #0x0]\n"
+ "smlal v4.4s, v17.4h, v3.h[2]\n"
+ "ldr x20, [%x[outptrs], #0x8]\n"
+ "smlal v31.4s, v17.4h, v3.h[3]\n"
+ "ldr x21, [%x[outptrs], #0x10]\n"
+ "smlal v30.4s, v17.4h, v3.h[4]\n"
+ "ldr x22, [%x[outptrs], #0x18]\n"
+ "smlal v29.4s, v17.4h, v3.h[5]\n"
+ "ldr x23, [%x[outptrs], #0x20]\n"
+ "smlal v28.4s, v17.4h, v3.h[6]\n"
+ "ldr x24, [%x[outptrs], #0x28]\n"
+ "smlal v27.4s, v17.4h, v3.h[7]\n"
+ "ldr d3, [x25, #0x0]\n"
+ "smlal v26.4s, v17.4h, v2.h[0]\n"
+ "ldr x25, [%x[outptrs], #0x30]\n"
+ "smlal v25.4s, v17.4h, v2.h[1]\n"
+ "ldr x26, [%x[outptrs], #0x38]\n"
+ "smlal v24.4s, v17.4h, v2.h[2]\n"
+ "smlal v23.4s, v17.4h, v2.h[3]\n"
+ "smlal v22.4s, v17.4h, v2.h[4]\n"
+ "smlal v21.4s, v17.4h, v2.h[5]\n"
+ "smlal v20.4s, v17.4h, v2.h[6]\n"
+ "smlal v19.4s, v17.4h, v2.h[7]\n"
+ "ldr d2, [x27, #0x0]\n"
+ "ssubl v3.8h, v3.8b, v12.8b\n"
+ "ldr s17, [%x[weights]], #0x4\n"
+ "smlal v6.4s, v16.4h, v1.h[0]\n"
+ "smlal v5.4s, v16.4h, v1.h[1]\n"
+ "smlal v4.4s, v16.4h, v1.h[2]\n"
+ "ssubl v2.8h, v2.8b, v12.8b\n"
+ "ssubl v17.8h, v17.8b, v11.8b\n"
+ "smlal v31.4s, v16.4h, v1.h[3]\n"
+ "smlal v30.4s, v16.4h, v1.h[4]\n"
+ "smlal v29.4s, v16.4h, v1.h[5]\n"
+ "smlal v28.4s, v16.4h, v1.h[6]\n"
+ "smlal v27.4s, v16.4h, v1.h[7]\n"
+ "smlal v26.4s, v16.4h, v0.h[0]\n"
+ "smlal v25.4s, v16.4h, v0.h[1]\n"
+ "smlal v24.4s, v16.4h, v0.h[2]\n"
+ "smlal v23.4s, v16.4h, v0.h[3]\n"
+ "smlal v22.4s, v16.4h, v0.h[4]\n"
+ "smlal v21.4s, v16.4h, v0.h[5]\n"
+ "smlal v20.4s, v16.4h, v0.h[6]\n"
+ "smlal v19.4s, v16.4h, v0.h[7]\n"
+ "smlal v6.4s, v17.4h, v3.h[0]\n"
+ "smlal v5.4s, v17.4h, v3.h[1]\n"
+ "smlal v4.4s, v17.4h, v3.h[2]\n"
+ "smlal v31.4s, v17.4h, v3.h[3]\n"
+ "smlal v30.4s, v17.4h, v3.h[4]\n"
+ "smlal v29.4s, v17.4h, v3.h[5]\n"
+ "smlal v28.4s, v17.4h, v3.h[6]\n"
+ "smlal v27.4s, v17.4h, v3.h[7]\n"
+ "smlal v26.4s, v17.4h, v2.h[0]\n"
+ "smlal v25.4s, v17.4h, v2.h[1]\n"
+ "smlal v24.4s, v17.4h, v2.h[2]\n"
+ "smlal v23.4s, v17.4h, v2.h[3]\n"
+ "smlal v22.4s, v17.4h, v2.h[4]\n"
+ "smlal v21.4s, v17.4h, v2.h[5]\n"
+ "smlal v20.4s, v17.4h, v2.h[6]\n"
+ "smlal v19.4s, v17.4h, v2.h[7]\n"
+ "sshl v6.4s, v6.4s, v9.4s\n"
+ "sshl v5.4s, v5.4s, v9.4s\n"
+ "sqrdmulh v6.4s, v6.4s, v8.4s\n"
+ "sqrdmulh v5.4s, v5.4s, v8.4s\n"
+ "sshl v4.4s, v4.4s, v9.4s\n"
+ "sshl v31.4s, v31.4s, v9.4s\n"
+ "and v18.16b, v6.16b, v7.16b\n"
+ "and v16.16b, v5.16b, v7.16b\n"
+ "sqrdmulh v4.4s, v4.4s, v8.4s\n"
+ "sshr v18.4s, v18.4s, #0x1f\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sqrdmulh v31.4s, v31.4s, v8.4s\n"
+ "sqadd v6.4s, v6.4s, v18.4s\n"
+ "sqadd v5.4s, v5.4s, v16.4s\n"
+ "and v17.16b, v4.16b, v7.16b\n"
+ "and v16.16b, v31.16b, v7.16b\n"
+ "srshl v6.4s, v6.4s, v7.4s\n"
+ "srshl v5.4s, v5.4s, v7.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "add v6.4s, v6.4s, v10.4s\n"
+ "add v5.4s, v5.4s, v10.4s\n"
+ "sqadd v4.4s, v4.4s, v17.4s\n"
+ "smin v6.4s, v6.4s, v13.4s\n"
+ "smin v5.4s, v5.4s, v13.4s\n"
+ "sqadd v31.4s, v31.4s, v16.4s\n"
+ "smax v6.4s, v6.4s, v14.4s\n"
+ "smax v5.4s, v5.4s, v14.4s\n"
+ "srshl v4.4s, v4.4s, v7.4s\n"
+ "uzp1 v6.16b, v6.16b, v6.16b\n"
+ "uzp1 v5.16b, v5.16b, v5.16b\n"
+ "uzp1 v6.16b, v6.16b, v6.16b\n"
+ "str s6, [x19, x9]\n"
+ "uzp1 v5.16b, v5.16b, v5.16b\n"
+ "add v4.4s, v4.4s, v10.4s\n"
+ "ldr x19, [%x[outptrs], #0x40]\n"
+ "srshl v31.4s, v31.4s, v7.4s\n"
+ "str s5, [x20, x9]\n"
+ "sshl v30.4s, v30.4s, v9.4s\n"
+ "ldr x20, [%x[outptrs], #0x48]\n"
+ "smin v4.4s, v4.4s, v13.4s\n"
+ "sqrdmulh v30.4s, v30.4s, v8.4s\n"
+ "add v31.4s, v31.4s, v10.4s\n"
+ "smax v4.4s, v4.4s, v14.4s\n"
+ "sshl v29.4s, v29.4s, v9.4s\n"
+ "smin v31.4s, v31.4s, v13.4s\n"
+ "uzp1 v4.16b, v4.16b, v4.16b\n"
+ "and v16.16b, v30.16b, v7.16b\n"
+ "uzp1 v4.16b, v4.16b, v4.16b\n"
+ "str s4, [x21, x9]\n"
+ "smax v31.4s, v31.4s, v14.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "ldr x21, [%x[outptrs], #0x50]\n"
+ "sqrdmulh v29.4s, v29.4s, v8.4s\n"
+ "sshl v28.4s, v28.4s, v9.4s\n"
"uzp1 v31.16b, v31.16b, v31.16b\n"
- "uzp1 v24.16b, v24.16b, v24.16b\n"
- "str s24, [x20, x9]\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
- "uzp1 v26.16b, v26.16b, v26.16b\n"
- "str s25, [x21, x9]\n"
- "uzp1 v27.16b, v27.16b, v27.16b\n"
- "uzp1 v28.16b, v28.16b, v28.16b\n"
- "str s26, [x22, x9]\n"
- "uzp1 v29.16b, v29.16b, v29.16b\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
- "str s27, [x23, x9]\n"
+ "sqadd v30.4s, v30.4s, v16.4s\n"
"uzp1 v31.16b, v31.16b, v31.16b\n"
- "str s28, [x24, x9]\n"
- "str s29, [x25, x9]\n"
- "str s30, [x26, x9]\n"
- "str s31, [x27, x9]\n"
- "b 8f\n"
- "7:" // Output channel loop: Single kernel point
- "smlal v16.4s, v8.4h, v2.h[0]\n"
- "smlal v17.4s, v8.4h, v2.h[1]\n"
- "sshl v16.4s, v16.4s, v15.4s\n"
- "ldr x20, [%x[outptrs], #0x0]\n"
- "smlal v18.4s, v8.4h, v2.h[2]\n"
- "smlal v19.4s, v8.4h, v2.h[3]\n"
- "sshl v17.4s, v17.4s, v15.4s\n"
- "ldr x21, [%x[outptrs], #0x8]\n"
- "sshl v18.4s, v18.4s, v15.4s\n"
- "sshl v19.4s, v19.4s, v15.4s\n"
- "smlal v20.4s, v8.4h, v2.h[4]\n"
- "ldr x22, [%x[outptrs], #0x10]\n"
- "smlal v21.4s, v8.4h, v2.h[5]\n"
- "smlal v22.4s, v8.4h, v2.h[6]\n"
- "sqrdmulh v16.4s, v16.4s, v9.4s\n"
- "ldr x23, [%x[outptrs], #0x18]\n"
- "smlal v23.4s, v8.4h, v2.h[7]\n"
- "smlal v24.4s, v8.4h, v7.h[0]\n"
- "sqrdmulh v17.4s, v17.4s, v9.4s\n"
- "ldr x24, [%x[outptrs], #0x20]\n"
- "smlal v25.4s, v8.4h, v7.h[1]\n"
- "sqrdmulh v18.4s, v18.4s, v9.4s\n"
- "smlal v26.4s, v8.4h, v7.h[2]\n"
- "ldr x25, [%x[outptrs], #0x28]\n"
- "sqrdmulh v19.4s, v19.4s, v9.4s\n"
- "and v5.16b, v16.16b, v10.16b\n"
- "smlal v27.4s, v8.4h, v7.h[3]\n"
- "ldr x26, [%x[outptrs], #0x30]\n"
- "and v4.16b, v17.16b, v10.16b\n"
- "and v2.16b, v18.16b, v10.16b\n"
- "smlal v28.4s, v8.4h, v7.h[4]\n"
- "ldr x27, [%x[outptrs], #0x38]\n"
- "and v1.16b, v19.16b, v10.16b\n"
- "sshl v20.4s, v20.4s, v15.4s\n"
- "smlal v29.4s, v8.4h, v7.h[5]\n"
- "sshl v21.4s, v21.4s, v15.4s\n"
- "sshl v22.4s, v22.4s, v15.4s\n"
- "smlal v30.4s, v8.4h, v7.h[6]\n"
- "sshl v23.4s, v23.4s, v15.4s\n"
- "sshl v24.4s, v24.4s, v15.4s\n"
- "smlal v31.4s, v8.4h, v7.h[7]\n"
- "sshl v25.4s, v25.4s, v15.4s\n"
- "sshr v5.4s, v5.4s, #0x1f\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
- "sshr v2.4s, v2.4s, #0x1f\n"
- "sshr v1.4s, v1.4s, #0x1f\n"
- "sqrdmulh v20.4s, v20.4s, v9.4s\n"
- "sqrdmulh v21.4s, v21.4s, v9.4s\n"
- "sqrdmulh v22.4s, v22.4s, v9.4s\n"
- "sqrdmulh v23.4s, v23.4s, v9.4s\n"
- "sqrdmulh v24.4s, v24.4s, v9.4s\n"
- "sqrdmulh v25.4s, v25.4s, v9.4s\n"
- "sqadd v16.4s, v16.4s, v5.4s\n"
- "sqadd v17.4s, v17.4s, v4.4s\n"
- "sqadd v18.4s, v18.4s, v2.4s\n"
- "sqadd v19.4s, v19.4s, v1.4s\n"
- "and v8.16b, v20.16b, v10.16b\n"
- "and v0.16b, v21.16b, v10.16b\n"
- "and v5.16b, v22.16b, v10.16b\n"
- "and v4.16b, v23.16b, v10.16b\n"
- "and v2.16b, v24.16b, v10.16b\n"
- "and v1.16b, v25.16b, v10.16b\n"
- "sshl v26.4s, v26.4s, v15.4s\n"
- "sshl v27.4s, v27.4s, v15.4s\n"
- "sshl v28.4s, v28.4s, v15.4s\n"
- "sshl v29.4s, v29.4s, v15.4s\n"
- "sshl v30.4s, v30.4s, v15.4s\n"
- "sshl v31.4s, v31.4s, v15.4s\n"
- "sshr v8.4s, v8.4s, #0x1f\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "sshr v5.4s, v5.4s, #0x1f\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
- "sshr v2.4s, v2.4s, #0x1f\n"
- "sshr v1.4s, v1.4s, #0x1f\n"
- "sqrdmulh v26.4s, v26.4s, v9.4s\n"
- "sqrdmulh v27.4s, v27.4s, v9.4s\n"
- "sqrdmulh v28.4s, v28.4s, v9.4s\n"
- "sqrdmulh v29.4s, v29.4s, v9.4s\n"
- "sqrdmulh v30.4s, v30.4s, v9.4s\n"
- "sqrdmulh v31.4s, v31.4s, v9.4s\n"
- "sqadd v20.4s, v20.4s, v8.4s\n"
- "sqadd v21.4s, v21.4s, v0.4s\n"
- "sqadd v22.4s, v22.4s, v5.4s\n"
- "sqadd v23.4s, v23.4s, v4.4s\n"
- "sqadd v24.4s, v24.4s, v2.4s\n"
- "sqadd v25.4s, v25.4s, v1.4s\n"
- "and v8.16b, v26.16b, v10.16b\n"
- "and v0.16b, v27.16b, v10.16b\n"
- "and v5.16b, v28.16b, v10.16b\n"
- "and v4.16b, v29.16b, v10.16b\n"
- "and v2.16b, v30.16b, v10.16b\n"
- "and v1.16b, v31.16b, v10.16b\n"
- "sshr v8.4s, v8.4s, #0x1f\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "sshr v5.4s, v5.4s, #0x1f\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
- "sshr v2.4s, v2.4s, #0x1f\n"
- "sshr v1.4s, v1.4s, #0x1f\n"
- "srshl v16.4s, v16.4s, v10.4s\n"
- "srshl v17.4s, v17.4s, v10.4s\n"
- "srshl v18.4s, v18.4s, v10.4s\n"
- "srshl v19.4s, v19.4s, v10.4s\n"
- "srshl v20.4s, v20.4s, v10.4s\n"
- "srshl v21.4s, v21.4s, v10.4s\n"
- "srshl v22.4s, v22.4s, v10.4s\n"
- "srshl v23.4s, v23.4s, v10.4s\n"
- "sqadd v26.4s, v26.4s, v8.4s\n"
- "sqadd v27.4s, v27.4s, v0.4s\n"
- "sqadd v28.4s, v28.4s, v5.4s\n"
- "sqadd v29.4s, v29.4s, v4.4s\n"
- "sqadd v30.4s, v30.4s, v2.4s\n"
- "sqadd v31.4s, v31.4s, v1.4s\n"
- "add v16.4s, v16.4s, v14.4s\n"
- "add v17.4s, v17.4s, v14.4s\n"
- "add v18.4s, v18.4s, v14.4s\n"
- "add v19.4s, v19.4s, v14.4s\n"
- "add v20.4s, v20.4s, v14.4s\n"
- "add v21.4s, v21.4s, v14.4s\n"
- "add v22.4s, v22.4s, v14.4s\n"
- "add v23.4s, v23.4s, v14.4s\n"
- "srshl v24.4s, v24.4s, v10.4s\n"
- "srshl v25.4s, v25.4s, v10.4s\n"
- "srshl v26.4s, v26.4s, v10.4s\n"
- "srshl v27.4s, v27.4s, v10.4s\n"
- "srshl v28.4s, v28.4s, v10.4s\n"
- "srshl v29.4s, v29.4s, v10.4s\n"
- "srshl v30.4s, v30.4s, v10.4s\n"
- "srshl v31.4s, v31.4s, v10.4s\n"
- "smin v16.4s, v16.4s, v11.4s\n"
- "smin v17.4s, v17.4s, v11.4s\n"
- "smin v18.4s, v18.4s, v11.4s\n"
- "smin v19.4s, v19.4s, v11.4s\n"
- "smin v20.4s, v20.4s, v11.4s\n"
- "smin v21.4s, v21.4s, v11.4s\n"
- "smin v22.4s, v22.4s, v11.4s\n"
- "smin v23.4s, v23.4s, v11.4s\n"
- "add v24.4s, v24.4s, v14.4s\n"
- "add v25.4s, v25.4s, v14.4s\n"
- "add v26.4s, v26.4s, v14.4s\n"
- "add v27.4s, v27.4s, v14.4s\n"
- "add v28.4s, v28.4s, v14.4s\n"
- "add v29.4s, v29.4s, v14.4s\n"
- "add v30.4s, v30.4s, v14.4s\n"
- "add v31.4s, v31.4s, v14.4s\n"
- "smax v16.4s, v16.4s, v13.4s\n"
- "smax v17.4s, v17.4s, v13.4s\n"
- "smax v18.4s, v18.4s, v13.4s\n"
- "smax v19.4s, v19.4s, v13.4s\n"
- "smax v20.4s, v20.4s, v13.4s\n"
- "smax v21.4s, v21.4s, v13.4s\n"
- "smax v22.4s, v22.4s, v13.4s\n"
- "smax v23.4s, v23.4s, v13.4s\n"
- "smin v24.4s, v24.4s, v11.4s\n"
- "smin v25.4s, v25.4s, v11.4s\n"
- "smin v26.4s, v26.4s, v11.4s\n"
- "smin v27.4s, v27.4s, v11.4s\n"
- "smin v28.4s, v28.4s, v11.4s\n"
- "smin v29.4s, v29.4s, v11.4s\n"
- "smin v30.4s, v30.4s, v11.4s\n"
- "smin v31.4s, v31.4s, v11.4s\n"
- "uzp1 v16.16b, v16.16b, v16.16b\n"
- "uzp1 v17.16b, v17.16b, v17.16b\n"
- "uzp1 v18.16b, v18.16b, v18.16b\n"
- "uzp1 v19.16b, v19.16b, v19.16b\n"
- "uzp1 v20.16b, v20.16b, v20.16b\n"
- "uzp1 v21.16b, v21.16b, v21.16b\n"
- "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "str s31, [x22, x9]\n"
+ "and v17.16b, v29.16b, v7.16b\n"
+ "sqrdmulh v28.4s, v28.4s, v8.4s\n"
+ "ldr x22, [%x[outptrs], #0x58]\n"
+ "srshl v30.4s, v30.4s, v7.4s\n"
+ "sshl v27.4s, v27.4s, v9.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "and v16.16b, v28.16b, v7.16b\n"
+ "add v30.4s, v30.4s, v10.4s\n"
+ "sqadd v29.4s, v29.4s, v17.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "smin v30.4s, v30.4s, v13.4s\n"
+ "sqrdmulh v27.4s, v27.4s, v8.4s\n"
+ "srshl v29.4s, v29.4s, v7.4s\n"
+ "smax v30.4s, v30.4s, v14.4s\n"
+ "sqadd v28.4s, v28.4s, v16.4s\n"
+ "and v16.16b, v27.16b, v7.16b\n"
+ "uzp1 v30.16b, v30.16b, v30.16b\n"
+ "add v29.4s, v29.4s, v10.4s\n"
+ "uzp1 v30.16b, v30.16b, v30.16b\n"
+ "str s30, [x23, x9]\n"
+ "smin v29.4s, v29.4s, v13.4s\n"
+ "srshl v28.4s, v28.4s, v7.4s\n"
+ "ldr x23, [%x[outptrs], #0x60]\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sshl v26.4s, v26.4s, v9.4s\n"
+ "smax v29.4s, v29.4s, v14.4s\n"
+ "add v28.4s, v28.4s, v10.4s\n"
+ "sqadd v27.4s, v27.4s, v16.4s\n"
+ "uzp1 v29.16b, v29.16b, v29.16b\n"
+ "smin v28.4s, v28.4s, v13.4s\n"
+ "uzp1 v29.16b, v29.16b, v29.16b\n"
+ "str s29, [x24, x9]\n"
+ "smax v28.4s, v28.4s, v14.4s\n"
+ "srshl v27.4s, v27.4s, v7.4s\n"
+ "ldr x24, [%x[outptrs], #0x68]\n"
+ "sqrdmulh v26.4s, v26.4s, v8.4s\n"
+ "sshl v25.4s, v25.4s, v9.4s\n"
+ "uzp1 v28.16b, v28.16b, v28.16b\n"
+ "add v27.4s, v27.4s, v10.4s\n"
+ "uzp1 v28.16b, v28.16b, v28.16b\n"
+ "str s28, [x25, x9]\n"
+ "smin v27.4s, v27.4s, v13.4s\n"
+ "and v17.16b, v26.16b, v7.16b\n"
+ "ldr x25, [%x[outptrs], #0x70]\n"
+ "sqrdmulh v25.4s, v25.4s, v8.4s\n"
+ "sshl v24.4s, v24.4s, v9.4s\n"
+ "smax v27.4s, v27.4s, v14.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "and v16.16b, v25.16b, v7.16b\n"
+ "uzp1 v27.16b, v27.16b, v27.16b\n"
+ "sqadd v26.4s, v26.4s, v17.4s\n"
+ "uzp1 v27.16b, v27.16b, v27.16b\n"
+ "str s27, [x26, x9]\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sqrdmulh v24.4s, v24.4s, v8.4s\n"
+ "ldr x26, [%x[outptrs], #0x78]\n"
+ "srshl v26.4s, v26.4s, v7.4s\n"
+ "sshl v23.4s, v23.4s, v9.4s\n"
+ "sqadd v25.4s, v25.4s, v16.4s\n"
+ "and v17.16b, v24.16b, v7.16b\n"
+ "add v26.4s, v26.4s, v10.4s\n"
+ "sqrdmulh v23.4s, v23.4s, v8.4s\n"
+ "srshl v25.4s, v25.4s, v7.4s\n"
+ "smin v26.4s, v26.4s, v13.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "and v16.16b, v23.16b, v7.16b\n"
+ "smax v26.4s, v26.4s, v14.4s\n"
+ "add v25.4s, v25.4s, v10.4s\n"
+ "sqadd v24.4s, v24.4s, v17.4s\n"
+ "uzp1 v26.16b, v26.16b, v26.16b\n"
+ "smin v25.4s, v25.4s, v13.4s\n"
+ "uzp1 v26.16b, v26.16b, v26.16b\n"
+ "str s26, [x19, x9]\n"
+ "smax v25.4s, v25.4s, v14.4s\n"
+ "srshl v24.4s, v24.4s, v7.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sshl v22.4s, v22.4s, v9.4s\n"
+ "uzp1 v25.16b, v25.16b, v25.16b\n"
+ "add v24.4s, v24.4s, v10.4s\n"
+ "uzp1 v25.16b, v25.16b, v25.16b\n"
+ "str s25, [x20, x9]\n"
+ "smin v24.4s, v24.4s, v13.4s\n"
+ "sqadd v23.4s, v23.4s, v16.4s\n"
+ "sqrdmulh v22.4s, v22.4s, v8.4s\n"
+ "sshl v21.4s, v21.4s, v9.4s\n"
+ "smax v24.4s, v24.4s, v14.4s\n"
+ "srshl v23.4s, v23.4s, v7.4s\n"
+ "and v17.16b, v22.16b, v7.16b\n"
+ "uzp1 v24.16b, v24.16b, v24.16b\n"
+ "sqrdmulh v21.4s, v21.4s, v8.4s\n"
+ "uzp1 v24.16b, v24.16b, v24.16b\n"
+ "str s24, [x21, x9]\n"
+ "add v23.4s, v23.4s, v10.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "and v16.16b, v21.16b, v7.16b\n"
+ "sshl v20.4s, v20.4s, v9.4s\n"
+ "smin v23.4s, v23.4s, v13.4s\n"
+ "sqadd v22.4s, v22.4s, v17.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "smax v23.4s, v23.4s, v14.4s\n"
+ "sqrdmulh v20.4s, v20.4s, v8.4s\n"
+ "srshl v22.4s, v22.4s, v7.4s\n"
"uzp1 v23.16b, v23.16b, v23.16b\n"
- "smax v24.4s, v24.4s, v13.4s\n"
- "smax v25.4s, v25.4s, v13.4s\n"
- "smax v26.4s, v26.4s, v13.4s\n"
- "smax v27.4s, v27.4s, v13.4s\n"
- "smax v28.4s, v28.4s, v13.4s\n"
- "smax v29.4s, v29.4s, v13.4s\n"
- "smax v30.4s, v30.4s, v13.4s\n"
- "smax v31.4s, v31.4s, v13.4s\n"
- "uzp1 v16.16b, v16.16b, v16.16b\n"
- "str s16, [x20, x9]\n"
- "ldr x20, [%x[outptrs], #0x40]\n"
- "uzp1 v17.16b, v17.16b, v17.16b\n"
- "uzp1 v18.16b, v18.16b, v18.16b\n"
- "str s17, [x21, x9]\n"
- "ldr x21, [%x[outptrs], #0x48]\n"
- "uzp1 v19.16b, v19.16b, v19.16b\n"
- "uzp1 v20.16b, v20.16b, v20.16b\n"
- "str s18, [x22, x9]\n"
- "ldr x22, [%x[outptrs], #0x50]\n"
- "uzp1 v21.16b, v21.16b, v21.16b\n"
- "uzp1 v22.16b, v22.16b, v22.16b\n"
- "str s19, [x23, x9]\n"
- "ldr x23, [%x[outptrs], #0x58]\n"
+ "sqadd v21.4s, v21.4s, v16.4s\n"
"uzp1 v23.16b, v23.16b, v23.16b\n"
- "uzp1 v24.16b, v24.16b, v24.16b\n"
- "str s20, [x24, x9]\n"
- "ldr x24, [%x[outptrs], #0x60]\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
- "uzp1 v26.16b, v26.16b, v26.16b\n"
- "str s21, [x25, x9]\n"
- "ldr x25, [%x[outptrs], #0x68]\n"
- "uzp1 v27.16b, v27.16b, v27.16b\n"
- "uzp1 v28.16b, v28.16b, v28.16b\n"
- "str s22, [x26, x9]\n"
- "ldr x26, [%x[outptrs], #0x70]\n"
- "uzp1 v29.16b, v29.16b, v29.16b\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
- "str s23, [x27, x9]\n"
- "ldr x27, [%x[outptrs], #0x78]\n"
+ "str s23, [x22, x9]\n"
+ "add v22.4s, v22.4s, v10.4s\n"
+ "and v16.16b, v20.16b, v7.16b\n"
+ "srshl v21.4s, v21.4s, v7.4s\n"
+ "sshl v19.4s, v19.4s, v9.4s\n"
+ "smin v22.4s, v22.4s, v13.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "add v21.4s, v21.4s, v10.4s\n"
+ "smax v22.4s, v22.4s, v14.4s\n"
+ "sqadd v20.4s, v20.4s, v16.4s\n"
+ "smin v21.4s, v21.4s, v13.4s\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "sqrdmulh v19.4s, v19.4s, v8.4s\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "str s22, [x23, x9]\n"
+ "smax v21.4s, v21.4s, v14.4s\n"
+ "srshl v20.4s, v20.4s, v7.4s\n"
+ "and v16.16b, v19.16b, v7.16b\n"
+ "uzp1 v21.16b, v21.16b, v21.16b\n"
+ "add v20.4s, v20.4s, v10.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "uzp1 v21.16b, v21.16b, v21.16b\n"
+ "str s21, [x24, x9]\n"
+ "smin v20.4s, v20.4s, v13.4s\n"
+ "sqadd v19.4s, v19.4s, v16.4s\n"
+ "smax v20.4s, v20.4s, v14.4s\n"
+ "srshl v19.4s, v19.4s, v7.4s\n"
+ "uzp1 v20.16b, v20.16b, v20.16b\n"
+ "uzp1 v20.16b, v20.16b, v20.16b\n"
+ "str s20, [x25, x9]\n"
+ "add v19.4s, v19.4s, v10.4s\n"
+ "smin v19.4s, v19.4s, v13.4s\n"
+ "smax v19.4s, v19.4s, v14.4s\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ "str s19, [x26, x9]\n"
+ "b 8f\n"
+ "7:" // Output channel loop: Single kernel point
+ "smlal v6.4s, v17.4h, v3.h[0]\n"
+ "ldr x19, [%x[outptrs], #0x0]\n"
+ "smlal v5.4s, v17.4h, v3.h[1]\n"
+ "ldr x20, [%x[outptrs], #0x8]\n"
+ "smlal v4.4s, v17.4h, v3.h[2]\n"
+ "ldr x21, [%x[outptrs], #0x10]\n"
+ "smlal v31.4s, v17.4h, v3.h[3]\n"
+ "ldr x22, [%x[outptrs], #0x18]\n"
+ "smlal v30.4s, v17.4h, v3.h[4]\n"
+ "ldr x23, [%x[outptrs], #0x20]\n"
+ "smlal v29.4s, v17.4h, v3.h[5]\n"
+ "ldr x24, [%x[outptrs], #0x28]\n"
+ "smlal v28.4s, v17.4h, v3.h[6]\n"
+ "ldr x25, [%x[outptrs], #0x30]\n"
+ "smlal v27.4s, v17.4h, v3.h[7]\n"
+ "ldr x26, [%x[outptrs], #0x38]\n"
+ "smlal v26.4s, v17.4h, v2.h[0]\n"
+ "smlal v25.4s, v17.4h, v2.h[1]\n"
+ "smlal v24.4s, v17.4h, v2.h[2]\n"
+ "smlal v23.4s, v17.4h, v2.h[3]\n"
+ "smlal v22.4s, v17.4h, v2.h[4]\n"
+ "smlal v21.4s, v17.4h, v2.h[5]\n"
+ "smlal v20.4s, v17.4h, v2.h[6]\n"
+ "smlal v19.4s, v17.4h, v2.h[7]\n"
+ "sshl v6.4s, v6.4s, v9.4s\n"
+ "sshl v5.4s, v5.4s, v9.4s\n"
+ "sqrdmulh v6.4s, v6.4s, v8.4s\n"
+ "sqrdmulh v5.4s, v5.4s, v8.4s\n"
+ "sshl v4.4s, v4.4s, v9.4s\n"
+ "sshl v31.4s, v31.4s, v9.4s\n"
+ "and v18.16b, v6.16b, v7.16b\n"
+ "and v16.16b, v5.16b, v7.16b\n"
+ "sqrdmulh v4.4s, v4.4s, v8.4s\n"
+ "sshr v18.4s, v18.4s, #0x1f\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sqrdmulh v31.4s, v31.4s, v8.4s\n"
+ "sqadd v6.4s, v6.4s, v18.4s\n"
+ "sqadd v5.4s, v5.4s, v16.4s\n"
+ "and v17.16b, v4.16b, v7.16b\n"
+ "and v16.16b, v31.16b, v7.16b\n"
+ "srshl v6.4s, v6.4s, v7.4s\n"
+ "srshl v5.4s, v5.4s, v7.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "add v6.4s, v6.4s, v10.4s\n"
+ "add v5.4s, v5.4s, v10.4s\n"
+ "sqadd v4.4s, v4.4s, v17.4s\n"
+ "smin v6.4s, v6.4s, v13.4s\n"
+ "smin v5.4s, v5.4s, v13.4s\n"
+ "sqadd v31.4s, v31.4s, v16.4s\n"
+ "smax v6.4s, v6.4s, v14.4s\n"
+ "smax v5.4s, v5.4s, v14.4s\n"
+ "srshl v4.4s, v4.4s, v7.4s\n"
+ "uzp1 v6.16b, v6.16b, v6.16b\n"
+ "uzp1 v5.16b, v5.16b, v5.16b\n"
+ "uzp1 v6.16b, v6.16b, v6.16b\n"
+ "str s6, [x19, x9]\n"
+ "uzp1 v5.16b, v5.16b, v5.16b\n"
+ "add v4.4s, v4.4s, v10.4s\n"
+ "ldr x19, [%x[outptrs], #0x40]\n"
+ "srshl v31.4s, v31.4s, v7.4s\n"
+ "str s5, [x20, x9]\n"
+ "sshl v30.4s, v30.4s, v9.4s\n"
+ "ldr x20, [%x[outptrs], #0x48]\n"
+ "smin v4.4s, v4.4s, v13.4s\n"
+ "sqrdmulh v30.4s, v30.4s, v8.4s\n"
+ "add v31.4s, v31.4s, v10.4s\n"
+ "smax v4.4s, v4.4s, v14.4s\n"
+ "sshl v29.4s, v29.4s, v9.4s\n"
+ "smin v31.4s, v31.4s, v13.4s\n"
+ "uzp1 v4.16b, v4.16b, v4.16b\n"
+ "and v16.16b, v30.16b, v7.16b\n"
+ "uzp1 v4.16b, v4.16b, v4.16b\n"
+ "str s4, [x21, x9]\n"
+ "smax v31.4s, v31.4s, v14.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "ldr x21, [%x[outptrs], #0x50]\n"
+ "sqrdmulh v29.4s, v29.4s, v8.4s\n"
+ "sshl v28.4s, v28.4s, v9.4s\n"
"uzp1 v31.16b, v31.16b, v31.16b\n"
- "uzp1 v24.16b, v24.16b, v24.16b\n"
- "str s24, [x20, x9]\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
- "uzp1 v26.16b, v26.16b, v26.16b\n"
- "str s25, [x21, x9]\n"
- "uzp1 v27.16b, v27.16b, v27.16b\n"
- "uzp1 v28.16b, v28.16b, v28.16b\n"
- "str s26, [x22, x9]\n"
- "uzp1 v29.16b, v29.16b, v29.16b\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
- "str s27, [x23, x9]\n"
+ "sqadd v30.4s, v30.4s, v16.4s\n"
"uzp1 v31.16b, v31.16b, v31.16b\n"
- "str s28, [x24, x9]\n"
- "str s29, [x25, x9]\n"
- "str s30, [x26, x9]\n"
- "str s31, [x27, x9]\n"
+ "str s31, [x22, x9]\n"
+ "and v17.16b, v29.16b, v7.16b\n"
+ "sqrdmulh v28.4s, v28.4s, v8.4s\n"
+ "ldr x22, [%x[outptrs], #0x58]\n"
+ "srshl v30.4s, v30.4s, v7.4s\n"
+ "sshl v27.4s, v27.4s, v9.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "and v16.16b, v28.16b, v7.16b\n"
+ "add v30.4s, v30.4s, v10.4s\n"
+ "sqadd v29.4s, v29.4s, v17.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "smin v30.4s, v30.4s, v13.4s\n"
+ "sqrdmulh v27.4s, v27.4s, v8.4s\n"
+ "srshl v29.4s, v29.4s, v7.4s\n"
+ "smax v30.4s, v30.4s, v14.4s\n"
+ "sqadd v28.4s, v28.4s, v16.4s\n"
+ "and v16.16b, v27.16b, v7.16b\n"
+ "uzp1 v30.16b, v30.16b, v30.16b\n"
+ "add v29.4s, v29.4s, v10.4s\n"
+ "uzp1 v30.16b, v30.16b, v30.16b\n"
+ "str s30, [x23, x9]\n"
+ "smin v29.4s, v29.4s, v13.4s\n"
+ "srshl v28.4s, v28.4s, v7.4s\n"
+ "ldr x23, [%x[outptrs], #0x60]\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sshl v26.4s, v26.4s, v9.4s\n"
+ "smax v29.4s, v29.4s, v14.4s\n"
+ "add v28.4s, v28.4s, v10.4s\n"
+ "sqadd v27.4s, v27.4s, v16.4s\n"
+ "uzp1 v29.16b, v29.16b, v29.16b\n"
+ "smin v28.4s, v28.4s, v13.4s\n"
+ "uzp1 v29.16b, v29.16b, v29.16b\n"
+ "str s29, [x24, x9]\n"
+ "smax v28.4s, v28.4s, v14.4s\n"
+ "srshl v27.4s, v27.4s, v7.4s\n"
+ "ldr x24, [%x[outptrs], #0x68]\n"
+ "sqrdmulh v26.4s, v26.4s, v8.4s\n"
+ "sshl v25.4s, v25.4s, v9.4s\n"
+ "uzp1 v28.16b, v28.16b, v28.16b\n"
+ "add v27.4s, v27.4s, v10.4s\n"
+ "uzp1 v28.16b, v28.16b, v28.16b\n"
+ "str s28, [x25, x9]\n"
+ "smin v27.4s, v27.4s, v13.4s\n"
+ "and v17.16b, v26.16b, v7.16b\n"
+ "ldr x25, [%x[outptrs], #0x70]\n"
+ "sqrdmulh v25.4s, v25.4s, v8.4s\n"
+ "sshl v24.4s, v24.4s, v9.4s\n"
+ "smax v27.4s, v27.4s, v14.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "and v16.16b, v25.16b, v7.16b\n"
+ "uzp1 v27.16b, v27.16b, v27.16b\n"
+ "sqadd v26.4s, v26.4s, v17.4s\n"
+ "uzp1 v27.16b, v27.16b, v27.16b\n"
+ "str s27, [x26, x9]\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sqrdmulh v24.4s, v24.4s, v8.4s\n"
+ "ldr x26, [%x[outptrs], #0x78]\n"
+ "srshl v26.4s, v26.4s, v7.4s\n"
+ "sshl v23.4s, v23.4s, v9.4s\n"
+ "sqadd v25.4s, v25.4s, v16.4s\n"
+ "and v17.16b, v24.16b, v7.16b\n"
+ "add v26.4s, v26.4s, v10.4s\n"
+ "sqrdmulh v23.4s, v23.4s, v8.4s\n"
+ "srshl v25.4s, v25.4s, v7.4s\n"
+ "smin v26.4s, v26.4s, v13.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "and v16.16b, v23.16b, v7.16b\n"
+ "smax v26.4s, v26.4s, v14.4s\n"
+ "add v25.4s, v25.4s, v10.4s\n"
+ "sqadd v24.4s, v24.4s, v17.4s\n"
+ "uzp1 v26.16b, v26.16b, v26.16b\n"
+ "smin v25.4s, v25.4s, v13.4s\n"
+ "uzp1 v26.16b, v26.16b, v26.16b\n"
+ "str s26, [x19, x9]\n"
+ "smax v25.4s, v25.4s, v14.4s\n"
+ "srshl v24.4s, v24.4s, v7.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sshl v22.4s, v22.4s, v9.4s\n"
+ "uzp1 v25.16b, v25.16b, v25.16b\n"
+ "add v24.4s, v24.4s, v10.4s\n"
+ "uzp1 v25.16b, v25.16b, v25.16b\n"
+ "str s25, [x20, x9]\n"
+ "smin v24.4s, v24.4s, v13.4s\n"
+ "sqadd v23.4s, v23.4s, v16.4s\n"
+ "sqrdmulh v22.4s, v22.4s, v8.4s\n"
+ "sshl v21.4s, v21.4s, v9.4s\n"
+ "smax v24.4s, v24.4s, v14.4s\n"
+ "srshl v23.4s, v23.4s, v7.4s\n"
+ "and v17.16b, v22.16b, v7.16b\n"
+ "uzp1 v24.16b, v24.16b, v24.16b\n"
+ "sqrdmulh v21.4s, v21.4s, v8.4s\n"
+ "uzp1 v24.16b, v24.16b, v24.16b\n"
+ "str s24, [x21, x9]\n"
+ "add v23.4s, v23.4s, v10.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "and v16.16b, v21.16b, v7.16b\n"
+ "sshl v20.4s, v20.4s, v9.4s\n"
+ "smin v23.4s, v23.4s, v13.4s\n"
+ "sqadd v22.4s, v22.4s, v17.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "smax v23.4s, v23.4s, v14.4s\n"
+ "sqrdmulh v20.4s, v20.4s, v8.4s\n"
+ "srshl v22.4s, v22.4s, v7.4s\n"
+ "uzp1 v23.16b, v23.16b, v23.16b\n"
+ "sqadd v21.4s, v21.4s, v16.4s\n"
+ "uzp1 v23.16b, v23.16b, v23.16b\n"
+ "str s23, [x22, x9]\n"
+ "add v22.4s, v22.4s, v10.4s\n"
+ "and v16.16b, v20.16b, v7.16b\n"
+ "srshl v21.4s, v21.4s, v7.4s\n"
+ "sshl v19.4s, v19.4s, v9.4s\n"
+ "smin v22.4s, v22.4s, v13.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "add v21.4s, v21.4s, v10.4s\n"
+ "smax v22.4s, v22.4s, v14.4s\n"
+ "sqadd v20.4s, v20.4s, v16.4s\n"
+ "smin v21.4s, v21.4s, v13.4s\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "sqrdmulh v19.4s, v19.4s, v8.4s\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "str s22, [x23, x9]\n"
+ "smax v21.4s, v21.4s, v14.4s\n"
+ "srshl v20.4s, v20.4s, v7.4s\n"
+ "and v16.16b, v19.16b, v7.16b\n"
+ "uzp1 v21.16b, v21.16b, v21.16b\n"
+ "add v20.4s, v20.4s, v10.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "uzp1 v21.16b, v21.16b, v21.16b\n"
+ "str s21, [x24, x9]\n"
+ "smin v20.4s, v20.4s, v13.4s\n"
+ "sqadd v19.4s, v19.4s, v16.4s\n"
+ "smax v20.4s, v20.4s, v14.4s\n"
+ "srshl v19.4s, v19.4s, v7.4s\n"
+ "uzp1 v20.16b, v20.16b, v20.16b\n"
+ "uzp1 v20.16b, v20.16b, v20.16b\n"
+ "str s20, [x25, x9]\n"
+ "add v19.4s, v19.4s, v10.4s\n"
+ "smin v19.4s, v19.4s, v13.4s\n"
+ "smax v19.4s, v19.4s, v14.4s\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ "str s19, [x26, x9]\n"
"8:" // Output channel loop: Done
"add x9, x9, #0x4\n"
- "cmp x9, x10, LSL #2\n"
+ "cmp x9, x28, LSL #2\n"
"blt 1b\n"
"tst %x[n_output_channels], #0x3\n"
"beq 26f\n"
"9:" // Output channel oddments
- "movi v31.4s, #0x0\n"
+ "movi v16.4s, #0x0\n"
"cbz %x[bias], 12f\n"
- "add x20, %x[bias], x9, LSL #2\n"
+ "add x19, %x[bias], x9, LSL #2\n"
"tbz %x[n_output_channels], #1, 10f\n"
- "ld1 { v31.d }[0], [x20], #0x8\n"
+ "ld1 { v16.d }[0], [x19], #0x8\n"
"tbz %x[n_output_channels], #0, 11f\n"
- "ld1 { v31.s }[2], [x20]\n"
+ "ld1 { v16.s }[2], [x19]\n"
"b 11f\n"
"10:" // Output channel oddments: Load bias: Bit 1: Unset
- "ld1 { v31.s }[0], [x20]\n"
+ "tbz %x[n_output_channels], #0, 11f\n"
+ "ld1 { v16.s }[0], [x19]\n"
"11:" // Output channel oddments: Load bias: Bit 1: End
+
"12:" // Output channel oddments: Load bias: Done
- "mov v16.16b, v31.16b\n"
- "mov v17.16b, v31.16b\n"
- "mov v18.16b, v31.16b\n"
- "mov v19.16b, v31.16b\n"
- "mov v20.16b, v31.16b\n"
- "mov v21.16b, v31.16b\n"
- "mov v22.16b, v31.16b\n"
- "mov v23.16b, v31.16b\n"
- "mov v24.16b, v31.16b\n"
- "mov v25.16b, v31.16b\n"
- "mov v26.16b, v31.16b\n"
- "mov v27.16b, v31.16b\n"
- "mov v28.16b, v31.16b\n"
- "mov v29.16b, v31.16b\n"
- "mov v30.16b, v31.16b\n"
- "mov v31.16b, v31.16b\n"
+ "mov v6.16b, v16.16b\n"
+ "mov v5.16b, v16.16b\n"
+ "mov v4.16b, v16.16b\n"
+ "mov v31.16b, v16.16b\n"
+ "mov v30.16b, v16.16b\n"
+ "mov v29.16b, v16.16b\n"
+ "mov v28.16b, v16.16b\n"
+ "mov v27.16b, v16.16b\n"
+ "mov v26.16b, v16.16b\n"
+ "mov v25.16b, v16.16b\n"
+ "mov v24.16b, v16.16b\n"
+ "mov v23.16b, v16.16b\n"
+ "mov v22.16b, v16.16b\n"
+ "mov v21.16b, v16.16b\n"
+ "mov v20.16b, v16.16b\n"
+ "mov v19.16b, v16.16b\n"
"cbz %x[rq_mul_ptr], 18f\n"
- "add x22, %x[rq_mul_ptr], x9, LSL #2\n"
- "add x21, %x[rq_right_shift_ptr], x9, LSL #2\n"
- "add x20, %x[rq_left_shift_ptr], x9, LSL #2\n"
+ "add x21, %x[rq_mul_ptr], x9, LSL #2\n"
+ "add x20, %x[rq_right_shift_ptr], x9, LSL #2\n"
+ "add x19, %x[rq_left_shift_ptr], x9, LSL #2\n"
"cbz %x[rq_left_shift_ptr], 15f\n"
"tbz %x[n_output_channels], #1, 13f\n"
- "ld1 { v9.d }[0], [x22], #0x8\n"
- "ld1 { v10.d }[0], [x21], #0x8\n"
- "ld1 { v15.d }[0], [x20], #0x8\n"
+ "ld1 { v8.d }[0], [x21], #0x8\n"
+ "ld1 { v7.d }[0], [x20], #0x8\n"
+ "ld1 { v9.d }[0], [x19], #0x8\n"
"tbz %x[n_output_channels], #0, 14f\n"
- "ld1 { v9.s }[2], [x22], #0x4\n"
- "ld1 { v10.s }[2], [x21], #0x4\n"
- "ld1 { v15.s }[2], [x20], #0x4\n"
+ "ld1 { v8.s }[2], [x21], #0x4\n"
+ "ld1 { v7.s }[2], [x20], #0x4\n"
+ "ld1 { v9.s }[2], [x19], #0x4\n"
"b 14f\n"
"13:" // Output channel oddments: Load quantization parameters: With left shift: Bit 1: Unset
- "ld1 { v9.s }[0], [x22], #0x4\n"
- "ld1 { v10.s }[0], [x21], #0x4\n"
- "ld1 { v15.s }[0], [x20], #0x4\n"
+ "tbz %x[n_output_channels], #0, 14f\n"
+ "ld1 { v8.s }[0], [x21], #0x4\n"
+ "ld1 { v7.s }[0], [x20], #0x4\n"
+ "ld1 { v9.s }[0], [x19], #0x4\n"
"14:" // Output channel oddments: Load quantization parameters: With left shift: Bit 1: End
"b 18f\n"
"15:" // Output channel oddments: Load quantization parameters: No left shift
"tbz %x[n_output_channels], #1, 16f\n"
- "ld1 { v9.d }[0], [x22], #0x8\n"
- "ld1 { v10.d }[0], [x21], #0x8\n"
+ "ld1 { v8.d }[0], [x21], #0x8\n"
+ "ld1 { v7.d }[0], [x20], #0x8\n"
"tbz %x[n_output_channels], #0, 17f\n"
- "ld1 { v9.s }[2], [x22], #0x4\n"
- "ld1 { v10.s }[2], [x21], #0x4\n"
+ "ld1 { v8.s }[2], [x21], #0x4\n"
+ "ld1 { v7.s }[2], [x20], #0x4\n"
"b 17f\n"
"16:" // Output channel oddments: Load quantization parameters: No left shift: Bit 1: Unset
- "ld1 { v9.s }[0], [x22], #0x4\n"
- "ld1 { v10.s }[0], [x21], #0x4\n"
+ "tbz %x[n_output_channels], #0, 17f\n"
+ "ld1 { v8.s }[0], [x21], #0x4\n"
+ "ld1 { v7.s }[0], [x20], #0x4\n"
"17:" // Output channel oddments: Load quantization parameters: No left shift: Bit 1: End
+
"18:" // Output channel oddments: Load quantization parameters: Done
- "ldr s8, [%x[weights]], #0x4\n"
- "mov x20, %x[inptrs]\n"
- "ldp x25, x28, [x20], #0x10\n"
- "lsr x21, %x[kernel_points], #0x1\n"
- "ldr d2, [x25, #0x0]\n"
- "ldr d7, [x28, #0x0]\n"
- "ssubl v2.8h, v2.8b, v3.8b\n"
- "ssubl v7.8h, v7.8b, v3.8b\n"
- "ssubl v8.8h, v8.8b, v12.8b\n"
- "cbz x21, 22f\n"
- "ldr s6, [%x[weights]], #0x4\n"
- "ldp x25, x28, [x20], #0x10\n"
- "subs x21, x21, #0x1\n"
- "ssubl v6.8h, v6.8b, v12.8b\n"
+ "ldr s17, [%x[weights]], #0x4\n"
+ "ssubl v17.8h, v17.8b, v11.8b\n"
+ "mov x19, %x[inptrs]\n"
+ "ldp x25, x27, [x19], #0x10\n"
+ "lsr x20, %x[kernel_points], #0x1\n"
+ "ldr d3, [x25, #0x0]\n"
+ "ssubl v3.8h, v3.8b, v12.8b\n"
+ "ldr d2, [x27, #0x0]\n"
+ "ssubl v2.8h, v2.8b, v12.8b\n"
+ "cbz x20, 22f\n"
+ "ldp x25, x27, [x19], #0x10\n"
+ "ldr s16, [%x[weights]], #0x4\n"
+ "ssubl v16.8h, v16.8b, v11.8b\n"
"ldr d1, [x25, #0x0]\n"
- "ldr d0, [x28, #0x0]\n"
- "ssubl v1.8h, v1.8b, v3.8b\n"
- "ssubl v0.8h, v0.8b, v3.8b\n"
+ "subs x20, x20, #0x1\n"
+ "ssubl v1.8h, v1.8b, v12.8b\n"
+ "ldr d0, [x27, #0x0]\n"
+ "ssubl v0.8h, v0.8b, v12.8b\n"
"beq 20f\n"
"19:" // Output channel oddments: Kernel loop
- "ldp x25, x28, [x20], #0x10\n"
- "smlal v16.4s, v8.4h, v2.h[0]\n"
- "smlal v17.4s, v8.4h, v2.h[1]\n"
- "subs x21, x21, #0x1\n"
- "smlal v18.4s, v8.4h, v2.h[2]\n"
- "smlal v19.4s, v8.4h, v2.h[3]\n"
- "smlal v20.4s, v8.4h, v2.h[4]\n"
- "smlal v21.4s, v8.4h, v2.h[5]\n"
- "smlal v22.4s, v8.4h, v2.h[6]\n"
- "smlal v23.4s, v8.4h, v2.h[7]\n"
- "ldr d2, [x25, #0x0]\n"
- "ssubl v2.8h, v2.8b, v3.8b\n"
- "smlal v24.4s, v8.4h, v7.h[0]\n"
- "smlal v25.4s, v8.4h, v7.h[1]\n"
- "smlal v26.4s, v8.4h, v7.h[2]\n"
- "smlal v27.4s, v8.4h, v7.h[3]\n"
- "smlal v28.4s, v8.4h, v7.h[4]\n"
- "smlal v29.4s, v8.4h, v7.h[5]\n"
- "smlal v30.4s, v8.4h, v7.h[6]\n"
- "smlal v31.4s, v8.4h, v7.h[7]\n"
- "ldr d7, [x28, #0x0]\n"
- "ldr s8, [%x[weights]], #0x4\n"
- "ldp x25, x28, [x20], #0x10\n"
- "smlal v16.4s, v6.4h, v1.h[0]\n"
- "smlal v17.4s, v6.4h, v1.h[1]\n"
- "ssubl v7.8h, v7.8b, v3.8b\n"
- "smlal v18.4s, v6.4h, v1.h[2]\n"
- "smlal v19.4s, v6.4h, v1.h[3]\n"
- "ssubl v8.8h, v8.8b, v12.8b\n"
- "smlal v20.4s, v6.4h, v1.h[4]\n"
- "smlal v21.4s, v6.4h, v1.h[5]\n"
- "smlal v22.4s, v6.4h, v1.h[6]\n"
- "smlal v23.4s, v6.4h, v1.h[7]\n"
+ "smlal v6.4s, v17.4h, v3.h[0]\n"
+ "ldp x25, x27, [x19], #0x10\n"
+ "subs x20, x20, #0x1\n"
+ "smlal v5.4s, v17.4h, v3.h[1]\n"
+ "smlal v4.4s, v17.4h, v3.h[2]\n"
+ "smlal v31.4s, v17.4h, v3.h[3]\n"
+ "smlal v30.4s, v17.4h, v3.h[4]\n"
+ "smlal v29.4s, v17.4h, v3.h[5]\n"
+ "smlal v28.4s, v17.4h, v3.h[6]\n"
+ "smlal v27.4s, v17.4h, v3.h[7]\n"
+ "ldr d3, [x25, #0x0]\n"
+ "smlal v26.4s, v17.4h, v2.h[0]\n"
+ "smlal v25.4s, v17.4h, v2.h[1]\n"
+ "smlal v24.4s, v17.4h, v2.h[2]\n"
+ "smlal v23.4s, v17.4h, v2.h[3]\n"
+ "smlal v22.4s, v17.4h, v2.h[4]\n"
+ "smlal v21.4s, v17.4h, v2.h[5]\n"
+ "smlal v20.4s, v17.4h, v2.h[6]\n"
+ "smlal v19.4s, v17.4h, v2.h[7]\n"
+ "ldr d2, [x27, #0x0]\n"
+ "ssubl v3.8h, v3.8b, v12.8b\n"
+ "ldr s17, [%x[weights]], #0x4\n"
+ "smlal v6.4s, v16.4h, v1.h[0]\n"
+ "ldp x25, x27, [x19], #0x10\n"
+ "smlal v5.4s, v16.4h, v1.h[1]\n"
+ "smlal v4.4s, v16.4h, v1.h[2]\n"
+ "ssubl v2.8h, v2.8b, v12.8b\n"
+ "ssubl v17.8h, v17.8b, v11.8b\n"
+ "smlal v31.4s, v16.4h, v1.h[3]\n"
+ "smlal v30.4s, v16.4h, v1.h[4]\n"
+ "smlal v29.4s, v16.4h, v1.h[5]\n"
+ "smlal v28.4s, v16.4h, v1.h[6]\n"
+ "smlal v27.4s, v16.4h, v1.h[7]\n"
"ldr d1, [x25, #0x0]\n"
- "ssubl v1.8h, v1.8b, v3.8b\n"
- "smlal v24.4s, v6.4h, v0.h[0]\n"
- "smlal v25.4s, v6.4h, v0.h[1]\n"
- "smlal v26.4s, v6.4h, v0.h[2]\n"
- "smlal v27.4s, v6.4h, v0.h[3]\n"
- "smlal v28.4s, v6.4h, v0.h[4]\n"
- "smlal v29.4s, v6.4h, v0.h[5]\n"
- "smlal v30.4s, v6.4h, v0.h[6]\n"
- "smlal v31.4s, v6.4h, v0.h[7]\n"
- "ldr d0, [x28, #0x0]\n"
- "ldr s6, [%x[weights]], #0x4\n"
- "ssubl v0.8h, v0.8b, v3.8b\n"
- "ssubl v6.8h, v6.8b, v12.8b\n"
+ "smlal v26.4s, v16.4h, v0.h[0]\n"
+ "smlal v25.4s, v16.4h, v0.h[1]\n"
+ "smlal v24.4s, v16.4h, v0.h[2]\n"
+ "smlal v23.4s, v16.4h, v0.h[3]\n"
+ "smlal v22.4s, v16.4h, v0.h[4]\n"
+ "smlal v21.4s, v16.4h, v0.h[5]\n"
+ "smlal v20.4s, v16.4h, v0.h[6]\n"
+ "smlal v19.4s, v16.4h, v0.h[7]\n"
+ "ldr d0, [x27, #0x0]\n"
+ "ssubl v1.8h, v1.8b, v12.8b\n"
+ "ldr s16, [%x[weights]], #0x4\n"
+ "ssubl v0.8h, v0.8b, v12.8b\n"
+ "ssubl v16.8h, v16.8b, v11.8b\n"
"bgt 19b\n"
"20:" // Output channel oddments: Kernel loop tail
"tbnz %x[kernel_points], #0, 21f\n"
- "smlal v16.4s, v8.4h, v2.h[0]\n"
- "smlal v17.4s, v8.4h, v2.h[1]\n"
- "smlal v18.4s, v8.4h, v2.h[2]\n"
- "smlal v19.4s, v8.4h, v2.h[3]\n"
- "smlal v20.4s, v8.4h, v2.h[4]\n"
- "smlal v21.4s, v8.4h, v2.h[5]\n"
- "smlal v22.4s, v8.4h, v2.h[6]\n"
- "smlal v23.4s, v8.4h, v2.h[7]\n"
- "smlal v24.4s, v8.4h, v7.h[0]\n"
- "smlal v25.4s, v8.4h, v7.h[1]\n"
- "smlal v26.4s, v8.4h, v7.h[2]\n"
- "smlal v27.4s, v8.4h, v7.h[3]\n"
- "smlal v28.4s, v8.4h, v7.h[4]\n"
- "smlal v29.4s, v8.4h, v7.h[5]\n"
- "smlal v30.4s, v8.4h, v7.h[6]\n"
- "smlal v31.4s, v8.4h, v7.h[7]\n"
- "smlal v16.4s, v6.4h, v1.h[0]\n"
- "smlal v17.4s, v6.4h, v1.h[1]\n"
- "smlal v18.4s, v6.4h, v1.h[2]\n"
- "smlal v19.4s, v6.4h, v1.h[3]\n"
- "smlal v20.4s, v6.4h, v1.h[4]\n"
- "smlal v21.4s, v6.4h, v1.h[5]\n"
- "smlal v22.4s, v6.4h, v1.h[6]\n"
- "smlal v23.4s, v6.4h, v1.h[7]\n"
- "smlal v24.4s, v6.4h, v0.h[0]\n"
- "smlal v25.4s, v6.4h, v0.h[1]\n"
- "smlal v26.4s, v6.4h, v0.h[2]\n"
- "smlal v27.4s, v6.4h, v0.h[3]\n"
- "smlal v28.4s, v6.4h, v0.h[4]\n"
- "smlal v29.4s, v6.4h, v0.h[5]\n"
- "smlal v30.4s, v6.4h, v0.h[6]\n"
- "smlal v31.4s, v6.4h, v0.h[7]\n"
+ "smlal v6.4s, v17.4h, v3.h[0]\n"
+ "smlal v5.4s, v17.4h, v3.h[1]\n"
+ "smlal v4.4s, v17.4h, v3.h[2]\n"
+ "smlal v31.4s, v17.4h, v3.h[3]\n"
+ "smlal v30.4s, v17.4h, v3.h[4]\n"
+ "smlal v29.4s, v17.4h, v3.h[5]\n"
+ "smlal v28.4s, v17.4h, v3.h[6]\n"
+ "smlal v27.4s, v17.4h, v3.h[7]\n"
+ "smlal v26.4s, v17.4h, v2.h[0]\n"
+ "smlal v25.4s, v17.4h, v2.h[1]\n"
+ "smlal v24.4s, v17.4h, v2.h[2]\n"
+ "smlal v23.4s, v17.4h, v2.h[3]\n"
+ "smlal v22.4s, v17.4h, v2.h[4]\n"
+ "smlal v21.4s, v17.4h, v2.h[5]\n"
+ "smlal v20.4s, v17.4h, v2.h[6]\n"
+ "smlal v19.4s, v17.4h, v2.h[7]\n"
+ "smlal v6.4s, v16.4h, v1.h[0]\n"
+ "smlal v5.4s, v16.4h, v1.h[1]\n"
+ "smlal v4.4s, v16.4h, v1.h[2]\n"
+ "smlal v31.4s, v16.4h, v1.h[3]\n"
+ "smlal v30.4s, v16.4h, v1.h[4]\n"
+ "smlal v29.4s, v16.4h, v1.h[5]\n"
+ "smlal v28.4s, v16.4h, v1.h[6]\n"
+ "smlal v27.4s, v16.4h, v1.h[7]\n"
+ "smlal v26.4s, v16.4h, v0.h[0]\n"
+ "smlal v25.4s, v16.4h, v0.h[1]\n"
+ "smlal v24.4s, v16.4h, v0.h[2]\n"
+ "smlal v23.4s, v16.4h, v0.h[3]\n"
+ "smlal v22.4s, v16.4h, v0.h[4]\n"
+ "smlal v21.4s, v16.4h, v0.h[5]\n"
+ "smlal v20.4s, v16.4h, v0.h[6]\n"
+ "smlal v19.4s, v16.4h, v0.h[7]\n"
"b 23f\n"
"21:" // Output channel oddments: Odd tail
- "ldp x25, x28, [x20], #0x10\n"
- "smlal v16.4s, v8.4h, v2.h[0]\n"
- "smlal v17.4s, v8.4h, v2.h[1]\n"
- "smlal v18.4s, v8.4h, v2.h[2]\n"
- "smlal v19.4s, v8.4h, v2.h[3]\n"
- "smlal v20.4s, v8.4h, v2.h[4]\n"
- "smlal v21.4s, v8.4h, v2.h[5]\n"
- "smlal v22.4s, v8.4h, v2.h[6]\n"
- "smlal v23.4s, v8.4h, v2.h[7]\n"
- "ldr d2, [x25, #0x0]\n"
- "ssubl v2.8h, v2.8b, v3.8b\n"
- "smlal v24.4s, v8.4h, v7.h[0]\n"
- "smlal v25.4s, v8.4h, v7.h[1]\n"
- "smlal v26.4s, v8.4h, v7.h[2]\n"
- "smlal v27.4s, v8.4h, v7.h[3]\n"
- "smlal v28.4s, v8.4h, v7.h[4]\n"
- "smlal v29.4s, v8.4h, v7.h[5]\n"
- "smlal v30.4s, v8.4h, v7.h[6]\n"
- "smlal v31.4s, v8.4h, v7.h[7]\n"
- "ldr d7, [x28, #0x0]\n"
- "ldr s8, [%x[weights]], #0x4\n"
- "smlal v16.4s, v6.4h, v1.h[0]\n"
- "smlal v17.4s, v6.4h, v1.h[1]\n"
- "ssubl v7.8h, v7.8b, v3.8b\n"
- "smlal v18.4s, v6.4h, v1.h[2]\n"
- "smlal v19.4s, v6.4h, v1.h[3]\n"
- "ssubl v8.8h, v8.8b, v12.8b\n"
- "smlal v20.4s, v6.4h, v1.h[4]\n"
- "smlal v21.4s, v6.4h, v1.h[5]\n"
- "smlal v22.4s, v6.4h, v1.h[6]\n"
- "smlal v23.4s, v6.4h, v1.h[7]\n"
- "smlal v24.4s, v6.4h, v0.h[0]\n"
- "smlal v25.4s, v6.4h, v0.h[1]\n"
- "smlal v26.4s, v6.4h, v0.h[2]\n"
- "smlal v27.4s, v6.4h, v0.h[3]\n"
- "smlal v28.4s, v6.4h, v0.h[4]\n"
- "smlal v29.4s, v6.4h, v0.h[5]\n"
- "smlal v30.4s, v6.4h, v0.h[6]\n"
- "smlal v31.4s, v6.4h, v0.h[7]\n"
- "smlal v16.4s, v8.4h, v2.h[0]\n"
- "smlal v17.4s, v8.4h, v2.h[1]\n"
- "smlal v18.4s, v8.4h, v2.h[2]\n"
- "smlal v19.4s, v8.4h, v2.h[3]\n"
- "smlal v20.4s, v8.4h, v2.h[4]\n"
- "smlal v21.4s, v8.4h, v2.h[5]\n"
- "smlal v22.4s, v8.4h, v2.h[6]\n"
- "smlal v23.4s, v8.4h, v2.h[7]\n"
- "smlal v24.4s, v8.4h, v7.h[0]\n"
- "smlal v25.4s, v8.4h, v7.h[1]\n"
- "smlal v26.4s, v8.4h, v7.h[2]\n"
- "smlal v27.4s, v8.4h, v7.h[3]\n"
- "smlal v28.4s, v8.4h, v7.h[4]\n"
- "smlal v29.4s, v8.4h, v7.h[5]\n"
- "smlal v30.4s, v8.4h, v7.h[6]\n"
- "smlal v31.4s, v8.4h, v7.h[7]\n"
+ "smlal v6.4s, v17.4h, v3.h[0]\n"
+ "ldp x25, x27, [x19], #0x10\n"
+ "smlal v5.4s, v17.4h, v3.h[1]\n"
+ "smlal v4.4s, v17.4h, v3.h[2]\n"
+ "smlal v31.4s, v17.4h, v3.h[3]\n"
+ "smlal v30.4s, v17.4h, v3.h[4]\n"
+ "smlal v29.4s, v17.4h, v3.h[5]\n"
+ "smlal v28.4s, v17.4h, v3.h[6]\n"
+ "smlal v27.4s, v17.4h, v3.h[7]\n"
+ "ldr d3, [x25, #0x0]\n"
+ "smlal v26.4s, v17.4h, v2.h[0]\n"
+ "smlal v25.4s, v17.4h, v2.h[1]\n"
+ "smlal v24.4s, v17.4h, v2.h[2]\n"
+ "smlal v23.4s, v17.4h, v2.h[3]\n"
+ "smlal v22.4s, v17.4h, v2.h[4]\n"
+ "smlal v21.4s, v17.4h, v2.h[5]\n"
+ "smlal v20.4s, v17.4h, v2.h[6]\n"
+ "smlal v19.4s, v17.4h, v2.h[7]\n"
+ "ldr d2, [x27, #0x0]\n"
+ "ssubl v3.8h, v3.8b, v12.8b\n"
+ "ldr s17, [%x[weights]], #0x4\n"
+ "smlal v6.4s, v16.4h, v1.h[0]\n"
+ "smlal v5.4s, v16.4h, v1.h[1]\n"
+ "smlal v4.4s, v16.4h, v1.h[2]\n"
+ "ssubl v2.8h, v2.8b, v12.8b\n"
+ "ssubl v17.8h, v17.8b, v11.8b\n"
+ "smlal v31.4s, v16.4h, v1.h[3]\n"
+ "smlal v30.4s, v16.4h, v1.h[4]\n"
+ "smlal v29.4s, v16.4h, v1.h[5]\n"
+ "smlal v28.4s, v16.4h, v1.h[6]\n"
+ "smlal v27.4s, v16.4h, v1.h[7]\n"
+ "smlal v26.4s, v16.4h, v0.h[0]\n"
+ "smlal v25.4s, v16.4h, v0.h[1]\n"
+ "smlal v24.4s, v16.4h, v0.h[2]\n"
+ "smlal v23.4s, v16.4h, v0.h[3]\n"
+ "smlal v22.4s, v16.4h, v0.h[4]\n"
+ "smlal v21.4s, v16.4h, v0.h[5]\n"
+ "smlal v20.4s, v16.4h, v0.h[6]\n"
+ "smlal v19.4s, v16.4h, v0.h[7]\n"
+ "smlal v6.4s, v17.4h, v3.h[0]\n"
+ "smlal v5.4s, v17.4h, v3.h[1]\n"
+ "smlal v4.4s, v17.4h, v3.h[2]\n"
+ "smlal v31.4s, v17.4h, v3.h[3]\n"
+ "smlal v30.4s, v17.4h, v3.h[4]\n"
+ "smlal v29.4s, v17.4h, v3.h[5]\n"
+ "smlal v28.4s, v17.4h, v3.h[6]\n"
+ "smlal v27.4s, v17.4h, v3.h[7]\n"
+ "smlal v26.4s, v17.4h, v2.h[0]\n"
+ "smlal v25.4s, v17.4h, v2.h[1]\n"
+ "smlal v24.4s, v17.4h, v2.h[2]\n"
+ "smlal v23.4s, v17.4h, v2.h[3]\n"
+ "smlal v22.4s, v17.4h, v2.h[4]\n"
+ "smlal v21.4s, v17.4h, v2.h[5]\n"
+ "smlal v20.4s, v17.4h, v2.h[6]\n"
+ "smlal v19.4s, v17.4h, v2.h[7]\n"
"b 23f\n"
"22:" // Output channel oddments: Single kernel point
- "smlal v16.4s, v8.4h, v2.h[0]\n"
- "smlal v17.4s, v8.4h, v2.h[1]\n"
- "smlal v18.4s, v8.4h, v2.h[2]\n"
- "smlal v19.4s, v8.4h, v2.h[3]\n"
- "smlal v20.4s, v8.4h, v2.h[4]\n"
- "smlal v21.4s, v8.4h, v2.h[5]\n"
- "smlal v22.4s, v8.4h, v2.h[6]\n"
- "smlal v23.4s, v8.4h, v2.h[7]\n"
- "smlal v24.4s, v8.4h, v7.h[0]\n"
- "smlal v25.4s, v8.4h, v7.h[1]\n"
- "smlal v26.4s, v8.4h, v7.h[2]\n"
- "smlal v27.4s, v8.4h, v7.h[3]\n"
- "smlal v28.4s, v8.4h, v7.h[4]\n"
- "smlal v29.4s, v8.4h, v7.h[5]\n"
- "smlal v30.4s, v8.4h, v7.h[6]\n"
- "smlal v31.4s, v8.4h, v7.h[7]\n"
+ "smlal v6.4s, v17.4h, v3.h[0]\n"
+ "smlal v5.4s, v17.4h, v3.h[1]\n"
+ "smlal v4.4s, v17.4h, v3.h[2]\n"
+ "smlal v31.4s, v17.4h, v3.h[3]\n"
+ "smlal v30.4s, v17.4h, v3.h[4]\n"
+ "smlal v29.4s, v17.4h, v3.h[5]\n"
+ "smlal v28.4s, v17.4h, v3.h[6]\n"
+ "smlal v27.4s, v17.4h, v3.h[7]\n"
+ "smlal v26.4s, v17.4h, v2.h[0]\n"
+ "smlal v25.4s, v17.4h, v2.h[1]\n"
+ "smlal v24.4s, v17.4h, v2.h[2]\n"
+ "smlal v23.4s, v17.4h, v2.h[3]\n"
+ "smlal v22.4s, v17.4h, v2.h[4]\n"
+ "smlal v21.4s, v17.4h, v2.h[5]\n"
+ "smlal v20.4s, v17.4h, v2.h[6]\n"
+ "smlal v19.4s, v17.4h, v2.h[7]\n"
"23:" // Output channel oddments: Done
- "sshl v16.4s, v16.4s, v15.4s\n"
- "sshl v17.4s, v17.4s, v15.4s\n"
- "sshl v18.4s, v18.4s, v15.4s\n"
- "sshl v19.4s, v19.4s, v15.4s\n"
- "sqrdmulh v16.4s, v16.4s, v9.4s\n"
- "sqrdmulh v17.4s, v17.4s, v9.4s\n"
- "sqrdmulh v18.4s, v18.4s, v9.4s\n"
- "sqrdmulh v19.4s, v19.4s, v9.4s\n"
- "and v5.16b, v16.16b, v10.16b\n"
- "and v4.16b, v17.16b, v10.16b\n"
- "and v2.16b, v18.16b, v10.16b\n"
- "and v1.16b, v19.16b, v10.16b\n"
- "sshl v20.4s, v20.4s, v15.4s\n"
- "sshl v21.4s, v21.4s, v15.4s\n"
- "sshl v22.4s, v22.4s, v15.4s\n"
- "sshl v23.4s, v23.4s, v15.4s\n"
- "sshl v24.4s, v24.4s, v15.4s\n"
- "sshl v25.4s, v25.4s, v15.4s\n"
- "sshr v5.4s, v5.4s, #0x1f\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
- "sshr v2.4s, v2.4s, #0x1f\n"
- "sshr v1.4s, v1.4s, #0x1f\n"
- "sqrdmulh v20.4s, v20.4s, v9.4s\n"
- "sqrdmulh v21.4s, v21.4s, v9.4s\n"
- "sqrdmulh v22.4s, v22.4s, v9.4s\n"
- "sqrdmulh v23.4s, v23.4s, v9.4s\n"
- "sqrdmulh v24.4s, v24.4s, v9.4s\n"
- "sqrdmulh v25.4s, v25.4s, v9.4s\n"
- "sqadd v16.4s, v16.4s, v5.4s\n"
- "sqadd v17.4s, v17.4s, v4.4s\n"
- "sqadd v18.4s, v18.4s, v2.4s\n"
- "sqadd v19.4s, v19.4s, v1.4s\n"
- "and v8.16b, v20.16b, v10.16b\n"
- "and v0.16b, v21.16b, v10.16b\n"
- "and v5.16b, v22.16b, v10.16b\n"
- "and v4.16b, v23.16b, v10.16b\n"
- "and v2.16b, v24.16b, v10.16b\n"
- "and v1.16b, v25.16b, v10.16b\n"
- "sshl v26.4s, v26.4s, v15.4s\n"
- "sshl v27.4s, v27.4s, v15.4s\n"
- "sshl v28.4s, v28.4s, v15.4s\n"
- "sshl v29.4s, v29.4s, v15.4s\n"
- "sshl v30.4s, v30.4s, v15.4s\n"
- "sshl v31.4s, v31.4s, v15.4s\n"
- "sshr v8.4s, v8.4s, #0x1f\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "sshr v5.4s, v5.4s, #0x1f\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
- "sshr v2.4s, v2.4s, #0x1f\n"
- "sshr v1.4s, v1.4s, #0x1f\n"
- "sqrdmulh v26.4s, v26.4s, v9.4s\n"
- "sqrdmulh v27.4s, v27.4s, v9.4s\n"
- "sqrdmulh v28.4s, v28.4s, v9.4s\n"
- "sqrdmulh v29.4s, v29.4s, v9.4s\n"
- "sqrdmulh v30.4s, v30.4s, v9.4s\n"
- "sqrdmulh v31.4s, v31.4s, v9.4s\n"
- "sqadd v20.4s, v20.4s, v8.4s\n"
- "sqadd v21.4s, v21.4s, v0.4s\n"
- "sqadd v22.4s, v22.4s, v5.4s\n"
- "sqadd v23.4s, v23.4s, v4.4s\n"
- "sqadd v24.4s, v24.4s, v2.4s\n"
- "sqadd v25.4s, v25.4s, v1.4s\n"
- "and v8.16b, v26.16b, v10.16b\n"
- "and v0.16b, v27.16b, v10.16b\n"
- "and v5.16b, v28.16b, v10.16b\n"
- "and v4.16b, v29.16b, v10.16b\n"
- "and v2.16b, v30.16b, v10.16b\n"
- "and v1.16b, v31.16b, v10.16b\n"
- "sshr v8.4s, v8.4s, #0x1f\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "sshr v5.4s, v5.4s, #0x1f\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
- "sshr v2.4s, v2.4s, #0x1f\n"
- "sshr v1.4s, v1.4s, #0x1f\n"
- "sqadd v26.4s, v26.4s, v8.4s\n"
- "sqadd v27.4s, v27.4s, v0.4s\n"
- "sqadd v28.4s, v28.4s, v5.4s\n"
- "sqadd v29.4s, v29.4s, v4.4s\n"
- "sqadd v30.4s, v30.4s, v2.4s\n"
- "sqadd v31.4s, v31.4s, v1.4s\n"
- "srshl v16.4s, v16.4s, v10.4s\n"
- "srshl v17.4s, v17.4s, v10.4s\n"
- "srshl v18.4s, v18.4s, v10.4s\n"
- "srshl v19.4s, v19.4s, v10.4s\n"
- "srshl v20.4s, v20.4s, v10.4s\n"
- "srshl v21.4s, v21.4s, v10.4s\n"
- "srshl v22.4s, v22.4s, v10.4s\n"
- "srshl v23.4s, v23.4s, v10.4s\n"
- "srshl v24.4s, v24.4s, v10.4s\n"
- "srshl v25.4s, v25.4s, v10.4s\n"
- "srshl v26.4s, v26.4s, v10.4s\n"
- "srshl v27.4s, v27.4s, v10.4s\n"
- "srshl v28.4s, v28.4s, v10.4s\n"
- "srshl v29.4s, v29.4s, v10.4s\n"
- "srshl v30.4s, v30.4s, v10.4s\n"
- "srshl v31.4s, v31.4s, v10.4s\n"
- "add v16.4s, v16.4s, v14.4s\n"
- "add v17.4s, v17.4s, v14.4s\n"
- "add v18.4s, v18.4s, v14.4s\n"
- "add v19.4s, v19.4s, v14.4s\n"
- "add v20.4s, v20.4s, v14.4s\n"
- "add v21.4s, v21.4s, v14.4s\n"
- "add v22.4s, v22.4s, v14.4s\n"
- "add v23.4s, v23.4s, v14.4s\n"
- "add v24.4s, v24.4s, v14.4s\n"
- "add v25.4s, v25.4s, v14.4s\n"
- "add v26.4s, v26.4s, v14.4s\n"
- "add v27.4s, v27.4s, v14.4s\n"
- "add v28.4s, v28.4s, v14.4s\n"
- "add v29.4s, v29.4s, v14.4s\n"
- "add v30.4s, v30.4s, v14.4s\n"
- "add v31.4s, v31.4s, v14.4s\n"
- "smin v16.4s, v16.4s, v11.4s\n"
- "smin v17.4s, v17.4s, v11.4s\n"
- "smin v18.4s, v18.4s, v11.4s\n"
- "smin v19.4s, v19.4s, v11.4s\n"
- "smin v20.4s, v20.4s, v11.4s\n"
- "smin v21.4s, v21.4s, v11.4s\n"
- "smin v22.4s, v22.4s, v11.4s\n"
- "smin v23.4s, v23.4s, v11.4s\n"
- "smin v24.4s, v24.4s, v11.4s\n"
- "smin v25.4s, v25.4s, v11.4s\n"
- "smin v26.4s, v26.4s, v11.4s\n"
- "smin v27.4s, v27.4s, v11.4s\n"
- "smin v28.4s, v28.4s, v11.4s\n"
- "smin v29.4s, v29.4s, v11.4s\n"
- "smin v30.4s, v30.4s, v11.4s\n"
- "smin v31.4s, v31.4s, v11.4s\n"
- "smax v16.4s, v16.4s, v13.4s\n"
- "smax v17.4s, v17.4s, v13.4s\n"
- "smax v18.4s, v18.4s, v13.4s\n"
- "smax v19.4s, v19.4s, v13.4s\n"
- "smax v20.4s, v20.4s, v13.4s\n"
- "smax v21.4s, v21.4s, v13.4s\n"
- "smax v22.4s, v22.4s, v13.4s\n"
- "smax v23.4s, v23.4s, v13.4s\n"
- "smax v24.4s, v24.4s, v13.4s\n"
- "smax v25.4s, v25.4s, v13.4s\n"
- "smax v26.4s, v26.4s, v13.4s\n"
- "smax v27.4s, v27.4s, v13.4s\n"
- "smax v28.4s, v28.4s, v13.4s\n"
- "smax v29.4s, v29.4s, v13.4s\n"
- "smax v30.4s, v30.4s, v13.4s\n"
- "smax v31.4s, v31.4s, v13.4s\n"
- "uzp1 v16.16b, v16.16b, v16.16b\n"
- "uzp1 v17.16b, v17.16b, v17.16b\n"
- "uzp1 v18.16b, v18.16b, v18.16b\n"
- "uzp1 v19.16b, v19.16b, v19.16b\n"
- "uzp1 v20.16b, v20.16b, v20.16b\n"
- "uzp1 v21.16b, v21.16b, v21.16b\n"
- "uzp1 v22.16b, v22.16b, v22.16b\n"
- "uzp1 v23.16b, v23.16b, v23.16b\n"
- "uzp1 v24.16b, v24.16b, v24.16b\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
- "uzp1 v26.16b, v26.16b, v26.16b\n"
- "uzp1 v27.16b, v27.16b, v27.16b\n"
- "uzp1 v28.16b, v28.16b, v28.16b\n"
- "uzp1 v29.16b, v29.16b, v29.16b\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
+ "sshl v6.4s, v6.4s, v9.4s\n"
+ "sshl v5.4s, v5.4s, v9.4s\n"
+ "sshl v4.4s, v4.4s, v9.4s\n"
+ "sqrdmulh v6.4s, v6.4s, v8.4s\n"
+ "sqrdmulh v5.4s, v5.4s, v8.4s\n"
+ "sqrdmulh v4.4s, v4.4s, v8.4s\n"
+ "sshl v31.4s, v31.4s, v9.4s\n"
+ "and v18.16b, v6.16b, v7.16b\n"
+ "and v16.16b, v5.16b, v7.16b\n"
+ "and v17.16b, v4.16b, v7.16b\n"
+ "sshr v18.4s, v18.4s, #0x1f\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "sqadd v6.4s, v6.4s, v18.4s\n"
+ "sqadd v5.4s, v5.4s, v16.4s\n"
+ "sqadd v4.4s, v4.4s, v17.4s\n"
+ "sqrdmulh v31.4s, v31.4s, v8.4s\n"
+ "srshl v6.4s, v6.4s, v7.4s\n"
+ "srshl v5.4s, v5.4s, v7.4s\n"
+ "srshl v4.4s, v4.4s, v7.4s\n"
+ "and v16.16b, v31.16b, v7.16b\n"
+ "add v6.4s, v6.4s, v10.4s\n"
+ "add v5.4s, v5.4s, v10.4s\n"
+ "add v4.4s, v4.4s, v10.4s\n"
+ "smin v6.4s, v6.4s, v13.4s\n"
+ "smin v5.4s, v5.4s, v13.4s\n"
+ "smin v4.4s, v4.4s, v13.4s\n"
+ "smax v6.4s, v6.4s, v14.4s\n"
+ "smax v5.4s, v5.4s, v14.4s\n"
+ "smax v4.4s, v4.4s, v14.4s\n"
+ "uzp1 v6.16b, v6.16b, v6.16b\n"
+ "uzp1 v5.16b, v5.16b, v5.16b\n"
+ "uzp1 v6.16b, v6.16b, v6.16b\n"
+ "uzp1 v5.16b, v5.16b, v5.16b\n"
+ "uzp1 v4.16b, v4.16b, v4.16b\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "uzp1 v4.16b, v4.16b, v4.16b\n"
+ "sshl v30.4s, v30.4s, v9.4s\n"
+ "sqadd v31.4s, v31.4s, v16.4s\n"
+ "sqrdmulh v30.4s, v30.4s, v8.4s\n"
+ "sshl v29.4s, v29.4s, v9.4s\n"
+ "sshl v28.4s, v28.4s, v9.4s\n"
+ "srshl v31.4s, v31.4s, v7.4s\n"
+ "and v16.16b, v30.16b, v7.16b\n"
+ "sqrdmulh v29.4s, v29.4s, v8.4s\n"
+ "sqrdmulh v28.4s, v28.4s, v8.4s\n"
+ "add v31.4s, v31.4s, v10.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "and v17.16b, v29.16b, v7.16b\n"
+ "smin v31.4s, v31.4s, v13.4s\n"
+ "sqadd v30.4s, v30.4s, v16.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "smax v31.4s, v31.4s, v14.4s\n"
+ "and v16.16b, v28.16b, v7.16b\n"
+ "srshl v30.4s, v30.4s, v7.4s\n"
"uzp1 v31.16b, v31.16b, v31.16b\n"
- "uzp1 v16.16b, v16.16b, v16.16b\n"
- "uzp1 v17.16b, v17.16b, v17.16b\n"
- "uzp1 v18.16b, v18.16b, v18.16b\n"
- "uzp1 v19.16b, v19.16b, v19.16b\n"
- "uzp1 v20.16b, v20.16b, v20.16b\n"
- "uzp1 v21.16b, v21.16b, v21.16b\n"
- "uzp1 v22.16b, v22.16b, v22.16b\n"
- "uzp1 v23.16b, v23.16b, v23.16b\n"
- "uzp1 v24.16b, v24.16b, v24.16b\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
- "uzp1 v26.16b, v26.16b, v26.16b\n"
- "uzp1 v27.16b, v27.16b, v27.16b\n"
- "uzp1 v28.16b, v28.16b, v28.16b\n"
- "uzp1 v29.16b, v29.16b, v29.16b\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
+ "sqadd v29.4s, v29.4s, v17.4s\n"
"uzp1 v31.16b, v31.16b, v31.16b\n"
+ "add v30.4s, v30.4s, v10.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "srshl v29.4s, v29.4s, v7.4s\n"
+ "smin v30.4s, v30.4s, v13.4s\n"
+ "sqadd v28.4s, v28.4s, v16.4s\n"
+ "sshl v27.4s, v27.4s, v9.4s\n"
+ "smax v30.4s, v30.4s, v14.4s\n"
+ "add v29.4s, v29.4s, v10.4s\n"
+ "srshl v28.4s, v28.4s, v7.4s\n"
+ "uzp1 v30.16b, v30.16b, v30.16b\n"
+ "smin v29.4s, v29.4s, v13.4s\n"
+ "uzp1 v30.16b, v30.16b, v30.16b\n"
+ "add v28.4s, v28.4s, v10.4s\n"
+ "smax v29.4s, v29.4s, v14.4s\n"
+ "sqrdmulh v27.4s, v27.4s, v8.4s\n"
+ "smin v28.4s, v28.4s, v13.4s\n"
+ "uzp1 v29.16b, v29.16b, v29.16b\n"
+ "sshl v26.4s, v26.4s, v9.4s\n"
+ "uzp1 v29.16b, v29.16b, v29.16b\n"
+ "smax v28.4s, v28.4s, v14.4s\n"
+ "and v16.16b, v27.16b, v7.16b\n"
+ "sqrdmulh v26.4s, v26.4s, v8.4s\n"
+ "uzp1 v28.16b, v28.16b, v28.16b\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "uzp1 v28.16b, v28.16b, v28.16b\n"
+ "and v17.16b, v26.16b, v7.16b\n"
+ "sqadd v27.4s, v27.4s, v16.4s\n"
+ "sshl v25.4s, v25.4s, v9.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "sqrdmulh v25.4s, v25.4s, v8.4s\n"
+ "srshl v27.4s, v27.4s, v7.4s\n"
+ "sqadd v26.4s, v26.4s, v17.4s\n"
+ "sshl v24.4s, v24.4s, v9.4s\n"
+ "and v16.16b, v25.16b, v7.16b\n"
+ "add v27.4s, v27.4s, v10.4s\n"
+ "srshl v26.4s, v26.4s, v7.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "smin v27.4s, v27.4s, v13.4s\n"
+ "sqrdmulh v24.4s, v24.4s, v8.4s\n"
+ "add v26.4s, v26.4s, v10.4s\n"
+ "smax v27.4s, v27.4s, v14.4s\n"
+ "sqadd v25.4s, v25.4s, v16.4s\n"
+ "smin v26.4s, v26.4s, v13.4s\n"
+ "uzp1 v27.16b, v27.16b, v27.16b\n"
+ "and v17.16b, v24.16b, v7.16b\n"
+ "uzp1 v27.16b, v27.16b, v27.16b\n"
+ "smax v26.4s, v26.4s, v14.4s\n"
+ "srshl v25.4s, v25.4s, v7.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "uzp1 v26.16b, v26.16b, v26.16b\n"
+ "sshl v23.4s, v23.4s, v9.4s\n"
+ "uzp1 v26.16b, v26.16b, v26.16b\n"
+ "add v25.4s, v25.4s, v10.4s\n"
+ "sqadd v24.4s, v24.4s, v17.4s\n"
+ "sqrdmulh v23.4s, v23.4s, v8.4s\n"
+ "smin v25.4s, v25.4s, v13.4s\n"
+ "sshl v22.4s, v22.4s, v9.4s\n"
+ "srshl v24.4s, v24.4s, v7.4s\n"
+ "smax v25.4s, v25.4s, v14.4s\n"
+ "and v16.16b, v23.16b, v7.16b\n"
+ "sqrdmulh v22.4s, v22.4s, v8.4s\n"
+ "uzp1 v25.16b, v25.16b, v25.16b\n"
+ "add v24.4s, v24.4s, v10.4s\n"
+ "uzp1 v25.16b, v25.16b, v25.16b\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "smin v24.4s, v24.4s, v13.4s\n"
+ "and v17.16b, v22.16b, v7.16b\n"
+ "sqadd v23.4s, v23.4s, v16.4s\n"
+ "smax v24.4s, v24.4s, v14.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "sshl v21.4s, v21.4s, v9.4s\n"
+ "uzp1 v24.16b, v24.16b, v24.16b\n"
+ "srshl v23.4s, v23.4s, v7.4s\n"
+ "uzp1 v24.16b, v24.16b, v24.16b\n"
+ "sqadd v22.4s, v22.4s, v17.4s\n"
+ "sqrdmulh v21.4s, v21.4s, v8.4s\n"
+ "add v23.4s, v23.4s, v10.4s\n"
+ "sshl v20.4s, v20.4s, v9.4s\n"
+ "srshl v22.4s, v22.4s, v7.4s\n"
+ "smin v23.4s, v23.4s, v13.4s\n"
+ "and v16.16b, v21.16b, v7.16b\n"
+ "sqrdmulh v20.4s, v20.4s, v8.4s\n"
+ "smax v23.4s, v23.4s, v14.4s\n"
+ "add v22.4s, v22.4s, v10.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "uzp1 v23.16b, v23.16b, v23.16b\n"
+ "smin v22.4s, v22.4s, v13.4s\n"
+ "uzp1 v23.16b, v23.16b, v23.16b\n"
+ "sqadd v21.4s, v21.4s, v16.4s\n"
+ "smax v22.4s, v22.4s, v14.4s\n"
+ "and v16.16b, v20.16b, v7.16b\n"
+ "sshl v19.4s, v19.4s, v9.4s\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "srshl v21.4s, v21.4s, v7.4s\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sqrdmulh v19.4s, v19.4s, v8.4s\n"
+ "add v21.4s, v21.4s, v10.4s\n"
+ "sqadd v20.4s, v20.4s, v16.4s\n"
+ "smin v21.4s, v21.4s, v13.4s\n"
+ "and v16.16b, v19.16b, v7.16b\n"
+ "srshl v20.4s, v20.4s, v7.4s\n"
+ "smax v21.4s, v21.4s, v14.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "uzp1 v21.16b, v21.16b, v21.16b\n"
+ "add v20.4s, v20.4s, v10.4s\n"
+ "sqadd v19.4s, v19.4s, v16.4s\n"
+ "uzp1 v21.16b, v21.16b, v21.16b\n"
+ "smin v20.4s, v20.4s, v13.4s\n"
+ "srshl v19.4s, v19.4s, v7.4s\n"
+ "smax v20.4s, v20.4s, v14.4s\n"
+ "uzp1 v20.16b, v20.16b, v20.16b\n"
+ "add v19.4s, v19.4s, v10.4s\n"
+ "uzp1 v20.16b, v20.16b, v20.16b\n"
+ "smin v19.4s, v19.4s, v13.4s\n"
+ "smax v19.4s, v19.4s, v14.4s\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
"tbz %x[n_output_channels], #1, 24f\n"
- "ldr x20, [%x[outptrs], #0x0]\n"
- "ldr x21, [%x[outptrs], #0x8]\n"
+ "ldr x19, [%x[outptrs], #0x0]\n"
+ "ldr x20, [%x[outptrs], #0x8]\n"
+ "add x19, x19, x9\n"
+ "ldr x21, [%x[outptrs], #0x10]\n"
+ "ldr x22, [%x[outptrs], #0x18]\n"
"add x20, x20, x9\n"
+ "st1 { v6.h }[0], [x19]\n"
"add x21, x21, x9\n"
- "ldr x22, [%x[outptrs], #0x10]\n"
- "ldr x23, [%x[outptrs], #0x18]\n"
+ "st1 { v5.h }[0], [x20]\n"
+ "ldr x23, [%x[outptrs], #0x20]\n"
"add x22, x22, x9\n"
+ "st1 { v4.h }[0], [x21]\n"
"add x23, x23, x9\n"
- "ldr x24, [%x[outptrs], #0x20]\n"
- "ldr x25, [%x[outptrs], #0x28]\n"
+ "st1 { v31.h }[0], [x22]\n"
+ "ldr x24, [%x[outptrs], #0x28]\n"
"add x24, x24, x9\n"
+ "st1 { v30.h }[0], [x23]\n"
+ "ldr x25, [%x[outptrs], #0x30]\n"
"add x25, x25, x9\n"
- "ldr x26, [%x[outptrs], #0x30]\n"
- "ldr x27, [%x[outptrs], #0x38]\n"
+ "st1 { v29.h }[0], [x24]\n"
+ "ldr x26, [%x[outptrs], #0x38]\n"
"add x26, x26, x9\n"
- "add x27, x27, x9\n"
- "st1 { v16.h }[0], [x20]\n"
- "ldr x20, [%x[outptrs], #0x40]\n"
+ "st1 { v28.h }[0], [x25]\n"
+ "ldr x19, [%x[outptrs], #0x40]\n"
+ "add x19, x19, x9\n"
+ "st1 { v27.h }[0], [x26]\n"
+ "ldr x20, [%x[outptrs], #0x48]\n"
"add x20, x20, x9\n"
- "st1 { v17.h }[0], [x21]\n"
- "ldr x21, [%x[outptrs], #0x48]\n"
+ "st1 { v26.h }[0], [x19]\n"
+ "ldr x21, [%x[outptrs], #0x50]\n"
"add x21, x21, x9\n"
- "st1 { v18.h }[0], [x22]\n"
- "ldr x22, [%x[outptrs], #0x50]\n"
+ "st1 { v25.h }[0], [x20]\n"
+ "ldr x22, [%x[outptrs], #0x58]\n"
"add x22, x22, x9\n"
- "st1 { v19.h }[0], [x23]\n"
- "ldr x23, [%x[outptrs], #0x58]\n"
+ "st1 { v24.h }[0], [x21]\n"
+ "ldr x23, [%x[outptrs], #0x60]\n"
"add x23, x23, x9\n"
- "st1 { v20.h }[0], [x24]\n"
- "ldr x24, [%x[outptrs], #0x60]\n"
+ "st1 { v23.h }[0], [x22]\n"
+ "ldr x24, [%x[outptrs], #0x68]\n"
"add x24, x24, x9\n"
- "st1 { v21.h }[0], [x25]\n"
- "ldr x25, [%x[outptrs], #0x68]\n"
+ "st1 { v22.h }[0], [x23]\n"
+ "ldr x25, [%x[outptrs], #0x70]\n"
"add x25, x25, x9\n"
- "st1 { v22.h }[0], [x26]\n"
- "ldr x26, [%x[outptrs], #0x70]\n"
+ "st1 { v21.h }[0], [x24]\n"
+ "ldr x26, [%x[outptrs], #0x78]\n"
"add x26, x26, x9\n"
- "st1 { v23.h }[0], [x27]\n"
- "ldr x27, [%x[outptrs], #0x78]\n"
- "add x27, x27, x9\n"
+ "st1 { v20.h }[0], [x25]\n"
"add x9, x9, #0x2\n"
- "st1 { v24.h }[0], [x20]\n"
- "st1 { v25.h }[0], [x21]\n"
- "st1 { v26.h }[0], [x22]\n"
- "st1 { v27.h }[0], [x23]\n"
- "st1 { v28.h }[0], [x24]\n"
- "st1 { v29.h }[0], [x25]\n"
- "st1 { v30.h }[0], [x26]\n"
- "st1 { v31.h }[0], [x27]\n"
+ "st1 { v19.h }[0], [x26]\n"
"tbz %x[n_output_channels], #0, 25f\n"
- "ldr x20, [%x[outptrs], #0x0]\n"
- "ldr x21, [%x[outptrs], #0x8]\n"
+ "ldr x19, [%x[outptrs], #0x0]\n"
+ "ldr x20, [%x[outptrs], #0x8]\n"
+ "add x19, x19, x9\n"
+ "ldr x21, [%x[outptrs], #0x10]\n"
+ "ldr x22, [%x[outptrs], #0x18]\n"
"add x20, x20, x9\n"
+ "st1 { v6.b }[2], [x19]\n"
"add x21, x21, x9\n"
- "ldr x22, [%x[outptrs], #0x10]\n"
- "ldr x23, [%x[outptrs], #0x18]\n"
+ "st1 { v5.b }[2], [x20]\n"
+ "ldr x23, [%x[outptrs], #0x20]\n"
"add x22, x22, x9\n"
+ "st1 { v4.b }[2], [x21]\n"
"add x23, x23, x9\n"
- "ldr x24, [%x[outptrs], #0x20]\n"
- "ldr x25, [%x[outptrs], #0x28]\n"
+ "st1 { v31.b }[2], [x22]\n"
+ "ldr x24, [%x[outptrs], #0x28]\n"
"add x24, x24, x9\n"
+ "st1 { v30.b }[2], [x23]\n"
+ "ldr x25, [%x[outptrs], #0x30]\n"
"add x25, x25, x9\n"
- "ldr x26, [%x[outptrs], #0x30]\n"
- "ldr x27, [%x[outptrs], #0x38]\n"
+ "st1 { v29.b }[2], [x24]\n"
+ "ldr x26, [%x[outptrs], #0x38]\n"
"add x26, x26, x9\n"
- "add x27, x27, x9\n"
- "st1 { v16.b }[2], [x20]\n"
- "ldr x20, [%x[outptrs], #0x40]\n"
+ "st1 { v28.b }[2], [x25]\n"
+ "ldr x19, [%x[outptrs], #0x40]\n"
+ "add x19, x19, x9\n"
+ "st1 { v27.b }[2], [x26]\n"
+ "ldr x20, [%x[outptrs], #0x48]\n"
"add x20, x20, x9\n"
- "st1 { v17.b }[2], [x21]\n"
- "ldr x21, [%x[outptrs], #0x48]\n"
+ "st1 { v26.b }[2], [x19]\n"
+ "ldr x21, [%x[outptrs], #0x50]\n"
"add x21, x21, x9\n"
- "st1 { v18.b }[2], [x22]\n"
- "ldr x22, [%x[outptrs], #0x50]\n"
+ "st1 { v25.b }[2], [x20]\n"
+ "ldr x22, [%x[outptrs], #0x58]\n"
"add x22, x22, x9\n"
- "st1 { v19.b }[2], [x23]\n"
- "ldr x23, [%x[outptrs], #0x58]\n"
+ "st1 { v24.b }[2], [x21]\n"
+ "ldr x23, [%x[outptrs], #0x60]\n"
"add x23, x23, x9\n"
- "st1 { v20.b }[2], [x24]\n"
- "ldr x24, [%x[outptrs], #0x60]\n"
+ "st1 { v23.b }[2], [x22]\n"
+ "ldr x24, [%x[outptrs], #0x68]\n"
"add x24, x24, x9\n"
- "st1 { v21.b }[2], [x25]\n"
- "ldr x25, [%x[outptrs], #0x68]\n"
+ "st1 { v22.b }[2], [x23]\n"
+ "ldr x25, [%x[outptrs], #0x70]\n"
"add x25, x25, x9\n"
- "st1 { v22.b }[2], [x26]\n"
- "ldr x26, [%x[outptrs], #0x70]\n"
+ "st1 { v21.b }[2], [x24]\n"
+ "ldr x26, [%x[outptrs], #0x78]\n"
"add x26, x26, x9\n"
- "st1 { v23.b }[2], [x27]\n"
- "ldr x27, [%x[outptrs], #0x78]\n"
- "add x27, x27, x9\n"
- "st1 { v24.b }[2], [x20]\n"
- "st1 { v25.b }[2], [x21]\n"
- "st1 { v26.b }[2], [x22]\n"
- "st1 { v27.b }[2], [x23]\n"
- "st1 { v28.b }[2], [x24]\n"
- "st1 { v29.b }[2], [x25]\n"
- "st1 { v30.b }[2], [x26]\n"
- "st1 { v31.b }[2], [x27]\n"
+ "st1 { v20.b }[2], [x25]\n"
+ "st1 { v19.b }[2], [x26]\n"
"b 25f\n"
"24:" // Output channel oddments: Done: Store: Bit 1: Unset
- "ldr x20, [%x[outptrs], #0x0]\n"
- "ldr x21, [%x[outptrs], #0x8]\n"
+ "tbz %x[n_output_channels], #0, 25f\n"
+ "ldr x19, [%x[outptrs], #0x0]\n"
+ "ldr x20, [%x[outptrs], #0x8]\n"
+ "add x19, x19, x9\n"
+ "ldr x21, [%x[outptrs], #0x10]\n"
+ "ldr x22, [%x[outptrs], #0x18]\n"
"add x20, x20, x9\n"
+ "st1 { v6.b }[0], [x19]\n"
"add x21, x21, x9\n"
- "ldr x22, [%x[outptrs], #0x10]\n"
- "ldr x23, [%x[outptrs], #0x18]\n"
+ "st1 { v5.b }[0], [x20]\n"
+ "ldr x23, [%x[outptrs], #0x20]\n"
"add x22, x22, x9\n"
+ "st1 { v4.b }[0], [x21]\n"
"add x23, x23, x9\n"
- "ldr x24, [%x[outptrs], #0x20]\n"
- "ldr x25, [%x[outptrs], #0x28]\n"
+ "st1 { v31.b }[0], [x22]\n"
+ "ldr x24, [%x[outptrs], #0x28]\n"
"add x24, x24, x9\n"
+ "st1 { v30.b }[0], [x23]\n"
+ "ldr x25, [%x[outptrs], #0x30]\n"
"add x25, x25, x9\n"
- "ldr x26, [%x[outptrs], #0x30]\n"
- "ldr x27, [%x[outptrs], #0x38]\n"
+ "st1 { v29.b }[0], [x24]\n"
+ "ldr x26, [%x[outptrs], #0x38]\n"
"add x26, x26, x9\n"
- "add x27, x27, x9\n"
- "st1 { v16.b }[0], [x20]\n"
- "ldr x20, [%x[outptrs], #0x40]\n"
+ "st1 { v28.b }[0], [x25]\n"
+ "ldr x19, [%x[outptrs], #0x40]\n"
+ "add x19, x19, x9\n"
+ "st1 { v27.b }[0], [x26]\n"
+ "ldr x20, [%x[outptrs], #0x48]\n"
"add x20, x20, x9\n"
- "st1 { v17.b }[0], [x21]\n"
- "ldr x21, [%x[outptrs], #0x48]\n"
+ "st1 { v26.b }[0], [x19]\n"
+ "ldr x21, [%x[outptrs], #0x50]\n"
"add x21, x21, x9\n"
- "st1 { v18.b }[0], [x22]\n"
- "ldr x22, [%x[outptrs], #0x50]\n"
+ "st1 { v25.b }[0], [x20]\n"
+ "ldr x22, [%x[outptrs], #0x58]\n"
"add x22, x22, x9\n"
- "st1 { v19.b }[0], [x23]\n"
- "ldr x23, [%x[outptrs], #0x58]\n"
+ "st1 { v24.b }[0], [x21]\n"
+ "ldr x23, [%x[outptrs], #0x60]\n"
"add x23, x23, x9\n"
- "st1 { v20.b }[0], [x24]\n"
- "ldr x24, [%x[outptrs], #0x60]\n"
+ "st1 { v23.b }[0], [x22]\n"
+ "ldr x24, [%x[outptrs], #0x68]\n"
"add x24, x24, x9\n"
- "st1 { v21.b }[0], [x25]\n"
- "ldr x25, [%x[outptrs], #0x68]\n"
+ "st1 { v22.b }[0], [x23]\n"
+ "ldr x25, [%x[outptrs], #0x70]\n"
"add x25, x25, x9\n"
- "st1 { v22.b }[0], [x26]\n"
- "ldr x26, [%x[outptrs], #0x70]\n"
+ "st1 { v21.b }[0], [x24]\n"
+ "ldr x26, [%x[outptrs], #0x78]\n"
"add x26, x26, x9\n"
- "st1 { v23.b }[0], [x27]\n"
- "ldr x27, [%x[outptrs], #0x78]\n"
- "add x27, x27, x9\n"
- "st1 { v24.b }[0], [x20]\n"
- "st1 { v25.b }[0], [x21]\n"
- "st1 { v26.b }[0], [x22]\n"
- "st1 { v27.b }[0], [x23]\n"
- "st1 { v28.b }[0], [x24]\n"
- "st1 { v29.b }[0], [x25]\n"
- "st1 { v30.b }[0], [x26]\n"
- "st1 { v31.b }[0], [x27]\n"
+ "st1 { v20.b }[0], [x25]\n"
+ "st1 { v19.b }[0], [x26]\n"
"25:" // Output channel oddments: Done: Store: Bit 1: End
"26:" // Done
: [weights] "+&r" (weights)
: [bias] "r" (bias), [inptrs] "r" (inptrs), [kernel_points] "r" ((uint64_t) kernel_points), [n_output_channels] "r" ((uint64_t) n_output_channels), [offsetof_Requantize32_a_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [offsetof_Requantize32_per_layer_left_shift] "I" (offsetof(arm_gemm::Requantize32, per_layer_left_shift)), [offsetof_Requantize32_per_layer_mul] "I" (offsetof(arm_gemm::Requantize32, per_layer_mul)), [offsetof_Requantize32_per_layer_right_shift] "I" (offsetof(arm_gemm::Requantize32, per_layer_right_shift)), [outptrs] "r" (outptrs), [qp] "r" (&qp), [rq_left_shift_ptr] "r" (per_channel_left_shifts), [rq_mul_ptr] "r" (per_channel_muls), [rq_right_shift_ptr] "r" (per_channel_right_shifts)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8qs_nhwc_3x3_s1_output2x2_dot_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8qs_nhwc_3x3_s1_output2x2_dot_depthfirst/generic.cpp
index 3fc1b13d9c..761c7ec86e 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8qs_nhwc_3x3_s1_output2x2_dot_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8qs_nhwc_3x3_s1_output2x2_dot_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -41,1450 +41,1148 @@ void a64_s8qs_nhwc_3x3_s1_output2x2_dot_depthfirst_impl(
)
{
__asm__ __volatile__(
- "lsr x15, %x[n_channels], #0x4\n"
- "add x20, %x[qp], %[offsetof_Requantize32_minval]\n"
- "ld1r { v9.4s }, [x20]\n"
- "ldp x14, x13, [%x[inptrs], #0x0]\n"
- "add x20, %x[qp], %[offsetof_Requantize32_maxval]\n"
- "ld1r { v8.4s }, [x20]\n"
- "add x20, %x[qp], %[offsetof_Requantize32_c_offset]\n"
- "ld1r { v7.4s }, [x20]\n"
- "mov x12, #0x0\n"
+ "ldp x15, x14, [%x[inptrs], #0x0]\n"
+ "add SP, SP, #-0x80\n"
+ "ldp x13, x12, [%x[inptrs], #0x10]\n"
"mov x11, #0x0\n"
- "ldp x10, x9, [%x[inptrs], #0x10]\n"
- "ldp x28, x27, [%x[inptrs], #0x20]\n"
- "ldp x26, x25, [%x[inptrs], #0x30]\n"
+ "ldp x10, x9, [%x[inptrs], #0x20]\n"
+ "lsr x28, %x[n_channels], #0x4\n"
+ "ldp x27, x26, [%x[inptrs], #0x30]\n"
+ "add x25, %x[qp], %[offsetof_Requantize32_minval]\n"
"ldp x24, x23, [%x[outptrs], #0x0]\n"
- "ldp x22, x21, [%x[outptrs], #0x10]\n"
- "cbz x15, 3f\n"
- "ldr q6, [x14, x12]\n"
- "ldr q5, [x13, x12]\n"
- "subs x15, x15, #0x1\n"
- "ldr q4, [x10, x12]\n"
- "ldr q3, [x9, x12]\n"
- "zip2 v2.16b, v6.16b, v4.16b\n"
- "zip1 v6.16b, v6.16b, v4.16b\n"
- "ldr q1, [x28, x12]\n"
- "ldr q0, [x27, x12]\n"
- "zip1 v4.16b, v5.16b, v3.16b\n"
- "zip2 v3.16b, v5.16b, v3.16b\n"
- "ldr q31, [x26, x12]\n"
- "ldr q30, [x25, x12]\n"
- "zip2 v5.16b, v6.16b, v4.16b\n"
- "zip1 v6.16b, v6.16b, v4.16b\n"
- "ldr q29, [%x[params], #0x10]\n"
- "ldr q28, [%x[params], #0x20]\n"
- "zip1 v4.16b, v2.16b, v3.16b\n"
- "zip2 v3.16b, v2.16b, v3.16b\n"
- "ldr q2, [%x[params], #0x0]\n"
- "ldr q27, [%x[params], #0x30]\n"
- "zip2 v26.16b, v1.16b, v31.16b\n"
- "zip1 v1.16b, v1.16b, v31.16b\n"
- "ldp x14, x13, [%x[inptrs], #0x40]\n"
- "ldr q25, [x14, x12]\n"
- "zip1 v31.16b, v0.16b, v30.16b\n"
- "zip2 v30.16b, v0.16b, v30.16b\n"
- "ldr q24, [x13, x12]\n"
- "ldp x10, x9, [%x[inptrs], #0x50]\n"
- "zip2 v0.16b, v1.16b, v31.16b\n"
- "zip1 v1.16b, v1.16b, v31.16b\n"
- "ldr q23, [x10, x12]\n"
- "ldr q22, [x9, x12]\n"
- "zip2 v21.16b, v25.16b, v23.16b\n"
- "zip1 v25.16b, v25.16b, v23.16b\n"
- "ldp x28, x27, [%x[inptrs], #0x60]\n"
- "ldr q20, [x28, x12]\n"
- "zip1 v23.16b, v24.16b, v22.16b\n"
- "zip2 v22.16b, v24.16b, v22.16b\n"
- "ldr q19, [x27, x12]\n"
- "ldp x26, x25, [%x[inptrs], #0x70]\n"
- "zip1 v31.16b, v26.16b, v30.16b\n"
- "zip2 v30.16b, v26.16b, v30.16b\n"
- "ldr q18, [x26, x12]\n"
- "ldr q17, [x25, x12]\n"
- "zip2 v16.16b, v20.16b, v18.16b\n"
- "zip1 v20.16b, v20.16b, v18.16b\n"
- "zip1 v18.16b, v19.16b, v17.16b\n"
- "zip2 v17.16b, v19.16b, v17.16b\n"
- "ldp x14, x13, [%x[inptrs], #0x0]\n"
- "ldp x10, x9, [%x[inptrs], #0x10]\n"
- "ldp x28, x27, [%x[inptrs], #0x20]\n"
- "ldp x26, x25, [%x[inptrs], #0x30]\n"
- "zip2 v24.16b, v25.16b, v23.16b\n"
- "zip1 v25.16b, v25.16b, v23.16b\n"
- "zip1 v23.16b, v21.16b, v22.16b\n"
- "zip2 v22.16b, v21.16b, v22.16b\n"
- "add %x[params], %x[params], #0x40\n"
- "zip2 v19.16b, v20.16b, v18.16b\n"
- "zip1 v20.16b, v20.16b, v18.16b\n"
- "zip1 v18.16b, v16.16b, v17.16b\n"
- "zip2 v17.16b, v16.16b, v17.16b\n"
- "mov v26.16b, v2.16b\n"
- "mov v21.16b, v2.16b\n"
- "mov v16.16b, v2.16b\n"
- "beq 2f\n"
+ "add x22, %x[qp], %[offsetof_Requantize32_maxval]\n"
+ "ldp x21, x20, [%x[outptrs], #0x10]\n"
+ "add x19, %x[qp], %[offsetof_Requantize32_c_offset]\n"
+ "ld1r { v12.4s }, [x25]\n"
+ "ld1r { v11.4s }, [x22]\n"
+ "ld1r { v10.4s }, [x19]\n"
+ "cbz x28, 2f\n"
"1:" // Loop
- ".inst 0x4e8697a2 // sdot v2.4s, v29.16b, v6.16b\n"
- ".inst 0x4e8197b5 // sdot v21.4s, v29.16b, v1.16b\n"
- "ext v6.16b, v6.16b, v6.16b, #0x1\n"
- "add x12, x12, #0x10\n"
- ".inst 0x4e819782 // sdot v2.4s, v28.16b, v1.16b\n"
- "ext v1.16b, v1.16b, v1.16b, #0x1\n"
- ".inst 0x4e8697ba // sdot v26.4s, v29.16b, v6.16b\n"
- "ldr q6, [%x[params], #0x0]\n"
- ".inst 0x4e8197b0 // sdot v16.4s, v29.16b, v1.16b\n"
- ".inst 0x4e999795 // sdot v21.4s, v28.16b, v25.16b\n"
- "subs x15, x15, #0x1\n"
- ".inst 0x4e999762 // sdot v2.4s, v27.16b, v25.16b\n"
- "ext v25.16b, v25.16b, v25.16b, #0x1\n"
- ".inst 0x4e81979a // sdot v26.4s, v28.16b, v1.16b\n"
- "ldr q1, [%x[params], #0x10]\n"
- ".inst 0x4e999790 // sdot v16.4s, v28.16b, v25.16b\n"
- ".inst 0x4e949775 // sdot v21.4s, v27.16b, v20.16b\n"
- "ext v20.16b, v20.16b, v20.16b, #0x1\n"
- "sqrdmulh v2.4s, v2.4s, v6.4s\n"
- ".inst 0x4e99977a // sdot v26.4s, v27.16b, v25.16b\n"
- ".inst 0x4e949770 // sdot v16.4s, v27.16b, v20.16b\n"
- "and v29.16b, v2.16b, v1.16b\n"
- "sshr v29.4s, v29.4s, #0x1f\n"
- "sqrdmulh v26.4s, v26.4s, v6.4s\n"
- "sqrdmulh v21.4s, v21.4s, v6.4s\n"
- "sqrdmulh v16.4s, v16.4s, v6.4s\n"
- "ldr q6, [%x[params], #0x60]\n"
- "sqadd v2.4s, v2.4s, v29.4s\n"
- "and v28.16b, v26.16b, v1.16b\n"
- "and v27.16b, v21.16b, v1.16b\n"
- "and v29.16b, v16.16b, v1.16b\n"
- "sshr v28.4s, v28.4s, #0x1f\n"
- "sshr v27.4s, v27.4s, #0x1f\n"
- "sshr v29.4s, v29.4s, #0x1f\n"
- "srshl v2.4s, v2.4s, v1.4s\n"
- "sqadd v26.4s, v26.4s, v28.4s\n"
- "ldr q28, [%x[params], #0x40]\n"
- "sqadd v21.4s, v21.4s, v27.4s\n"
- "ldr q27, [%x[params], #0x50]\n"
- "sqadd v16.4s, v16.4s, v29.4s\n"
- "ldr q29, [%x[params], #0x30]\n"
- "add v2.4s, v2.4s, v7.4s\n"
- "srshl v26.4s, v26.4s, v1.4s\n"
- "srshl v21.4s, v21.4s, v1.4s\n"
- "srshl v16.4s, v16.4s, v1.4s\n"
- "ldr q1, [%x[params], #0x70]\n"
- "smax v2.4s, v2.4s, v9.4s\n"
- "add v26.4s, v26.4s, v7.4s\n"
- "add v21.4s, v21.4s, v7.4s\n"
- "add v16.4s, v16.4s, v7.4s\n"
- "smin v2.4s, v2.4s, v8.4s\n"
- "smax v26.4s, v26.4s, v9.4s\n"
- "smax v21.4s, v21.4s, v9.4s\n"
- "smax v16.4s, v16.4s, v9.4s\n"
- "smin v26.4s, v26.4s, v8.4s\n"
- "smin v21.4s, v21.4s, v8.4s\n"
- "smin v16.4s, v16.4s, v8.4s\n"
- "uzp1 v2.16b, v2.16b, v2.16b\n"
- "uzp1 v2.16b, v2.16b, v2.16b\n"
- "uzp1 v26.16b, v26.16b, v26.16b\n"
- "str s2, [x24, x11]\n"
- "ldr q2, [%x[params], #0x20]\n"
- "uzp1 v21.16b, v21.16b, v21.16b\n"
- "uzp1 v16.16b, v16.16b, v16.16b\n"
- "uzp1 v26.16b, v26.16b, v26.16b\n"
- "uzp1 v21.16b, v21.16b, v21.16b\n"
- "str s26, [x23, x11]\n"
- "uzp1 v16.16b, v16.16b, v16.16b\n"
- "str s21, [x22, x11]\n"
- "mov v26.16b, v2.16b\n"
- "str s16, [x21, x11]\n"
- "mov v21.16b, v2.16b\n"
- "mov v16.16b, v2.16b\n"
- ".inst 0x4e8597a2 // sdot v2.4s, v29.16b, v5.16b\n"
- ".inst 0x4e8097b5 // sdot v21.4s, v29.16b, v0.16b\n"
- ".inst 0x4e809782 // sdot v2.4s, v28.16b, v0.16b\n"
- "ext v5.16b, v5.16b, v5.16b, #0x1\n"
+ "ldr q27, [x15, x11]\n"
+ "subs x28, x28, #0x1\n"
+ "ldr q1, [x14, x11]\n"
+ "ldp x15, x14, [%x[inptrs], #0x40]\n"
+ "ldr q25, [x13, x11]\n"
+ "zip1 v6.16b, v27.16b, v25.16b\n"
+ "ldr q23, [x12, x11]\n"
+ "zip2 v9.16b, v27.16b, v25.16b\n"
+ "ldp x13, x12, [%x[inptrs], #0x50]\n"
+ "ldr q31, [x10, x11]\n"
+ "zip1 v5.16b, v1.16b, v23.16b\n"
+ "ldr q28, [x9, x11]\n"
+ "zip2 v3.16b, v1.16b, v23.16b\n"
+ "ldp x10, x9, [%x[inptrs], #0x60]\n"
+ "zip1 v8.16b, v6.16b, v5.16b\n"
+ "ldr q21, [x27, x11]\n"
+ "zip2 v7.16b, v6.16b, v5.16b\n"
+ "ldr q26, [x26, x11]\n"
+ "zip1 v6.16b, v9.16b, v3.16b\n"
+ "ldp x27, x26, [%x[inptrs], #0x70]\n"
+ "zip2 v5.16b, v9.16b, v3.16b\n"
+ "ldr q24, [x15, x11]\n"
+ "ldr q22, [x14, x11]\n"
+ "zip1 v2.16b, v31.16b, v21.16b\n"
+ "zip2 v4.16b, v31.16b, v21.16b\n"
+ "ldp x15, x14, [%x[inptrs], #0x0]\n"
+ "zip1 v1.16b, v28.16b, v26.16b\n"
+ "ldr q20, [x13, x11]\n"
+ "zip2 v31.16b, v28.16b, v26.16b\n"
+ "ldr q16, [x12, x11]\n"
+ "zip1 v3.16b, v2.16b, v1.16b\n"
+ "ldp x13, x12, [%x[inptrs], #0x10]\n"
+ "zip2 v2.16b, v2.16b, v1.16b\n"
+ "ldr q19, [x10, x11]\n"
+ "zip1 v1.16b, v4.16b, v31.16b\n"
+ "ldr q0, [x9, x11]\n"
+ "zip1 v28.16b, v24.16b, v20.16b\n"
+ "ldp x10, x9, [%x[inptrs], #0x20]\n"
+ "zip2 v26.16b, v24.16b, v20.16b\n"
+ "ldr q18, [x27, x11]\n"
+ "zip1 v24.16b, v22.16b, v16.16b\n"
+ "ldr q17, [x26, x11]\n"
+ "zip2 v22.16b, v22.16b, v16.16b\n"
+ "ldp x27, x26, [%x[inptrs], #0x30]\n"
+ "zip2 v16.16b, v4.16b, v31.16b\n"
+ "str q6, [SP, #0x0]\n"
+ "zip1 v31.16b, v28.16b, v24.16b\n"
+ "str q5, [SP, #0x10]\n"
+ "zip1 v20.16b, v19.16b, v18.16b\n"
+ "str q1, [SP, #0x20]\n"
+ "zip2 v19.16b, v19.16b, v18.16b\n"
+ "str q16, [SP, #0x30]\n"
+ "zip1 v18.16b, v0.16b, v17.16b\n"
+ "ldr q30, [%x[params], #0x0]\n"
+ "zip2 v17.16b, v0.16b, v17.16b\n"
+ "ldr q29, [%x[params], #0x10]\n"
+ "zip2 v28.16b, v28.16b, v24.16b\n"
+ "ldr q27, [%x[params], #0x20]\n"
+ "zip1 v16.16b, v26.16b, v22.16b\n"
+ "str q16, [SP, #0x40]\n"
+ "zip2 v16.16b, v26.16b, v22.16b\n"
+ "str q16, [SP, #0x50]\n"
+ "zip1 v26.16b, v20.16b, v18.16b\n"
+ "ldr q25, [%x[params], #0x30]\n"
+ "zip2 v24.16b, v20.16b, v18.16b\n"
+ "ldr q23, [%x[params], #0x40]\n"
+ "zip1 v16.16b, v19.16b, v17.16b\n"
+ "str q16, [SP, #0x60]\n"
+ "zip2 v16.16b, v19.16b, v17.16b\n"
+ "str q16, [SP, #0x70]\n"
+ "mov v22.16b, v30.16b\n"
+ "ldr q21, [%x[params], #0x50]\n"
+ "mov v20.16b, v30.16b\n"
+ "mov v19.16b, v30.16b\n"
+ ".inst 0x4e8897be // sdot v30.4s, v29.16b, v8.16b\n"
+ ".inst 0x4e8397b4 // sdot v20.4s, v29.16b, v3.16b\n"
+ "ext v8.16b, v8.16b, v8.16b, #0x1\n"
+ ".inst 0x4e83977e // sdot v30.4s, v27.16b, v3.16b\n"
+ "ext v3.16b, v3.16b, v3.16b, #0x1\n"
+ ".inst 0x4e9f9774 // sdot v20.4s, v27.16b, v31.16b\n"
+ ".inst 0x4e8897b6 // sdot v22.4s, v29.16b, v8.16b\n"
+ "ldr q8, [SP, #0x0]\n"
+ ".inst 0x4e9f973e // sdot v30.4s, v25.16b, v31.16b\n"
+ "ext v31.16b, v31.16b, v31.16b, #0x1\n"
+ ".inst 0x4e9a9734 // sdot v20.4s, v25.16b, v26.16b\n"
+ "ext v26.16b, v26.16b, v26.16b, #0x1\n"
+ ".inst 0x4e8397b3 // sdot v19.4s, v29.16b, v3.16b\n"
+ "ldr q29, [%x[params], #0x70]\n"
+ ".inst 0x4e839776 // sdot v22.4s, v27.16b, v3.16b\n"
+ "ldr q3, [SP, #0x20]\n"
+ "sqrdmulh v30.4s, v30.4s, v23.4s\n"
+ "sqrdmulh v20.4s, v20.4s, v23.4s\n"
+ ".inst 0x4e9f9773 // sdot v19.4s, v27.16b, v31.16b\n"
+ "ldr q27, [%x[params], #0x80]\n"
+ ".inst 0x4e9f9736 // sdot v22.4s, v25.16b, v31.16b\n"
+ "ldr q31, [SP, #0x40]\n"
+ "and v16.16b, v30.16b, v21.16b\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ ".inst 0x4e9a9733 // sdot v19.4s, v25.16b, v26.16b\n"
+ "ldr q25, [%x[params], #0x90]\n"
+ "sqrdmulh v22.4s, v22.4s, v23.4s\n"
+ "ldr q26, [SP, #0x60]\n"
+ "and v18.16b, v20.16b, v21.16b\n"
+ "sshr v18.4s, v18.4s, #0x1f\n"
+ "sqrdmulh v19.4s, v19.4s, v23.4s\n"
+ "ldr q23, [%x[params], #0xa0]\n"
+ "sqadd v30.4s, v30.4s, v16.4s\n"
+ "and v17.16b, v22.16b, v21.16b\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "and v16.16b, v19.16b, v21.16b\n"
+ "srshl v30.4s, v30.4s, v21.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sqadd v20.4s, v20.4s, v18.4s\n"
+ "add v30.4s, v30.4s, v10.4s\n"
+ "sqadd v22.4s, v22.4s, v17.4s\n"
+ "srshl v20.4s, v20.4s, v21.4s\n"
+ "smax v30.4s, v30.4s, v12.4s\n"
+ "sqadd v19.4s, v19.4s, v16.4s\n"
+ "srshl v22.4s, v22.4s, v21.4s\n"
+ "smin v30.4s, v30.4s, v11.4s\n"
+ "add v20.4s, v20.4s, v10.4s\n"
+ "srshl v19.4s, v19.4s, v21.4s\n"
+ "ldr q21, [%x[params], #0xb0]\n"
+ "add v22.4s, v22.4s, v10.4s\n"
+ "smax v20.4s, v20.4s, v12.4s\n"
+ "uzp1 v30.16b, v30.16b, v30.16b\n"
+ "smax v22.4s, v22.4s, v12.4s\n"
+ "smin v20.4s, v20.4s, v11.4s\n"
+ "add v19.4s, v19.4s, v10.4s\n"
+ "smin v22.4s, v22.4s, v11.4s\n"
+ "uzp1 v30.16b, v30.16b, v30.16b\n"
+ "str s30, [x24, x11]\n"
+ "smax v19.4s, v19.4s, v12.4s\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "ldr q30, [%x[params], #0x60]\n"
+ "uzp1 v20.16b, v20.16b, v20.16b\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "str s22, [x23, x11]\n"
+ "smin v19.4s, v19.4s, v11.4s\n"
+ "uzp1 v20.16b, v20.16b, v20.16b\n"
+ "str s20, [x21, x11]\n"
+ "mov v22.16b, v30.16b\n"
+ "mov v20.16b, v30.16b\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ ".inst 0x4e8297b4 // sdot v20.4s, v29.16b, v2.16b\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ "str s19, [x20, x11]\n"
+ "mov v19.16b, v30.16b\n"
"add x11, x11, #0x4\n"
- "ext v0.16b, v0.16b, v0.16b, #0x1\n"
- ".inst 0x4e8597ba // sdot v26.4s, v29.16b, v5.16b\n"
- "ldr q5, [x13, x12]\n"
- ".inst 0x4e8097b0 // sdot v16.4s, v29.16b, v0.16b\n"
- ".inst 0x4e989795 // sdot v21.4s, v28.16b, v24.16b\n"
- ".inst 0x4e989762 // sdot v2.4s, v27.16b, v24.16b\n"
+ ".inst 0x4e8797be // sdot v30.4s, v29.16b, v7.16b\n"
+ ".inst 0x4e9c9774 // sdot v20.4s, v27.16b, v28.16b\n"
+ "ext v7.16b, v7.16b, v7.16b, #0x1\n"
+ ".inst 0x4e82977e // sdot v30.4s, v27.16b, v2.16b\n"
+ ".inst 0x4e989734 // sdot v20.4s, v25.16b, v24.16b\n"
+ "ext v2.16b, v2.16b, v2.16b, #0x1\n"
"ext v24.16b, v24.16b, v24.16b, #0x1\n"
- ".inst 0x4e80979a // sdot v26.4s, v28.16b, v0.16b\n"
- "ldr q0, [x27, x12]\n"
- ".inst 0x4e989790 // sdot v16.4s, v28.16b, v24.16b\n"
- "sqrdmulh v2.4s, v2.4s, v6.4s\n"
- ".inst 0x4e939775 // sdot v21.4s, v27.16b, v19.16b\n"
- "ext v19.16b, v19.16b, v19.16b, #0x1\n"
- ".inst 0x4e98977a // sdot v26.4s, v27.16b, v24.16b\n"
- ".inst 0x4e939770 // sdot v16.4s, v27.16b, v19.16b\n"
- "and v29.16b, v2.16b, v1.16b\n"
- "sshr v29.4s, v29.4s, #0x1f\n"
- "sqrdmulh v26.4s, v26.4s, v6.4s\n"
- "sqrdmulh v21.4s, v21.4s, v6.4s\n"
- "sqrdmulh v16.4s, v16.4s, v6.4s\n"
- "ldr q6, [%x[params], #0xc0]\n"
- "sqadd v2.4s, v2.4s, v29.4s\n"
- "and v28.16b, v26.16b, v1.16b\n"
- "and v27.16b, v21.16b, v1.16b\n"
- "and v29.16b, v16.16b, v1.16b\n"
- "sshr v28.4s, v28.4s, #0x1f\n"
- "sshr v27.4s, v27.4s, #0x1f\n"
- "sshr v29.4s, v29.4s, #0x1f\n"
- "srshl v2.4s, v2.4s, v1.4s\n"
- "sqadd v26.4s, v26.4s, v28.4s\n"
- "ldr q28, [%x[params], #0xa0]\n"
- "sqadd v21.4s, v21.4s, v27.4s\n"
- "ldr q27, [%x[params], #0xb0]\n"
- "sqadd v16.4s, v16.4s, v29.4s\n"
- "ldr q29, [%x[params], #0x90]\n"
- "add v2.4s, v2.4s, v7.4s\n"
- "srshl v26.4s, v26.4s, v1.4s\n"
- "srshl v21.4s, v21.4s, v1.4s\n"
- "srshl v16.4s, v16.4s, v1.4s\n"
- "ldr q1, [%x[params], #0xd0]\n"
- "smax v2.4s, v2.4s, v9.4s\n"
- "add v26.4s, v26.4s, v7.4s\n"
- "add v21.4s, v21.4s, v7.4s\n"
- "add v16.4s, v16.4s, v7.4s\n"
- "smin v2.4s, v2.4s, v8.4s\n"
- "smax v26.4s, v26.4s, v9.4s\n"
- "smax v21.4s, v21.4s, v9.4s\n"
- "smax v16.4s, v16.4s, v9.4s\n"
- "smin v26.4s, v26.4s, v8.4s\n"
- "smin v21.4s, v21.4s, v8.4s\n"
- "smin v16.4s, v16.4s, v8.4s\n"
- "uzp1 v2.16b, v2.16b, v2.16b\n"
- "uzp1 v2.16b, v2.16b, v2.16b\n"
- "str s2, [x24, x11]\n"
- "ldr q2, [%x[params], #0x80]\n"
- "uzp1 v26.16b, v26.16b, v26.16b\n"
- "uzp1 v21.16b, v21.16b, v21.16b\n"
- "uzp1 v16.16b, v16.16b, v16.16b\n"
- "uzp1 v26.16b, v26.16b, v26.16b\n"
- "str s26, [x23, x11]\n"
- "uzp1 v21.16b, v21.16b, v21.16b\n"
- "uzp1 v16.16b, v16.16b, v16.16b\n"
- "str s21, [x22, x11]\n"
- "str s16, [x21, x11]\n"
- "mov v26.16b, v2.16b\n"
- "mov v21.16b, v2.16b\n"
- ".inst 0x4e9f97b5 // sdot v21.4s, v29.16b, v31.16b\n"
- "mov v16.16b, v2.16b\n"
- ".inst 0x4e8497a2 // sdot v2.4s, v29.16b, v4.16b\n"
- ".inst 0x4e9f9782 // sdot v2.4s, v28.16b, v31.16b\n"
+ ".inst 0x4e8797b6 // sdot v22.4s, v29.16b, v7.16b\n"
+ "ldr q7, [SP, #0x10]\n"
+ ".inst 0x4e9c973e // sdot v30.4s, v25.16b, v28.16b\n"
+ "ext v28.16b, v28.16b, v28.16b, #0x1\n"
+ ".inst 0x4e8297b3 // sdot v19.4s, v29.16b, v2.16b\n"
+ "ldr q29, [%x[params], #0xd0]\n"
+ ".inst 0x4e829776 // sdot v22.4s, v27.16b, v2.16b\n"
+ "ldr q2, [SP, #0x30]\n"
+ "sqrdmulh v30.4s, v30.4s, v23.4s\n"
+ "sqrdmulh v20.4s, v20.4s, v23.4s\n"
+ ".inst 0x4e9c9773 // sdot v19.4s, v27.16b, v28.16b\n"
+ "ldr q27, [%x[params], #0xe0]\n"
+ ".inst 0x4e9c9736 // sdot v22.4s, v25.16b, v28.16b\n"
+ "ldr q28, [SP, #0x50]\n"
+ "and v16.16b, v30.16b, v21.16b\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ ".inst 0x4e989733 // sdot v19.4s, v25.16b, v24.16b\n"
+ "ldr q25, [%x[params], #0xf0]\n"
+ "sqrdmulh v22.4s, v22.4s, v23.4s\n"
+ "ldr q24, [SP, #0x70]\n"
+ "and v18.16b, v20.16b, v21.16b\n"
+ "sshr v18.4s, v18.4s, #0x1f\n"
+ "sqrdmulh v19.4s, v19.4s, v23.4s\n"
+ "ldr q23, [%x[params], #0x100]\n"
+ "sqadd v30.4s, v30.4s, v16.4s\n"
+ "and v17.16b, v22.16b, v21.16b\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "and v16.16b, v19.16b, v21.16b\n"
+ "srshl v30.4s, v30.4s, v21.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sqadd v20.4s, v20.4s, v18.4s\n"
+ "add v30.4s, v30.4s, v10.4s\n"
+ "sqadd v22.4s, v22.4s, v17.4s\n"
+ "srshl v20.4s, v20.4s, v21.4s\n"
+ "smax v30.4s, v30.4s, v12.4s\n"
+ "sqadd v19.4s, v19.4s, v16.4s\n"
+ "srshl v22.4s, v22.4s, v21.4s\n"
+ "smin v30.4s, v30.4s, v11.4s\n"
+ "add v20.4s, v20.4s, v10.4s\n"
+ "srshl v19.4s, v19.4s, v21.4s\n"
+ "ldr q21, [%x[params], #0x110]\n"
+ "add v22.4s, v22.4s, v10.4s\n"
+ "smax v20.4s, v20.4s, v12.4s\n"
+ "uzp1 v30.16b, v30.16b, v30.16b\n"
+ "smax v22.4s, v22.4s, v12.4s\n"
+ "smin v20.4s, v20.4s, v11.4s\n"
+ "add v19.4s, v19.4s, v10.4s\n"
+ "smin v22.4s, v22.4s, v11.4s\n"
+ "uzp1 v30.16b, v30.16b, v30.16b\n"
+ "str s30, [x24, x11]\n"
+ "smax v19.4s, v19.4s, v12.4s\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "ldr q30, [%x[params], #0xc0]\n"
+ "uzp1 v20.16b, v20.16b, v20.16b\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "str s22, [x23, x11]\n"
+ "smin v19.4s, v19.4s, v11.4s\n"
+ "uzp1 v20.16b, v20.16b, v20.16b\n"
+ "str s20, [x21, x11]\n"
+ "mov v22.16b, v30.16b\n"
+ "mov v20.16b, v30.16b\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ ".inst 0x4e8397b4 // sdot v20.4s, v29.16b, v3.16b\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ "str s19, [x20, x11]\n"
+ "mov v19.16b, v30.16b\n"
"add x11, x11, #0x4\n"
- "ext v4.16b, v4.16b, v4.16b, #0x1\n"
- "ext v31.16b, v31.16b, v31.16b, #0x1\n"
- ".inst 0x4e8497ba // sdot v26.4s, v29.16b, v4.16b\n"
- "ldr q4, [x10, x12]\n"
- ".inst 0x4e9f97b0 // sdot v16.4s, v29.16b, v31.16b\n"
- ".inst 0x4e979795 // sdot v21.4s, v28.16b, v23.16b\n"
- ".inst 0x4e979762 // sdot v2.4s, v27.16b, v23.16b\n"
- "ext v23.16b, v23.16b, v23.16b, #0x1\n"
- ".inst 0x4e9f979a // sdot v26.4s, v28.16b, v31.16b\n"
- "ldr q31, [x26, x12]\n"
- ".inst 0x4e979790 // sdot v16.4s, v28.16b, v23.16b\n"
- ".inst 0x4e929775 // sdot v21.4s, v27.16b, v18.16b\n"
- "ext v18.16b, v18.16b, v18.16b, #0x1\n"
- "sqrdmulh v2.4s, v2.4s, v6.4s\n"
- ".inst 0x4e97977a // sdot v26.4s, v27.16b, v23.16b\n"
- ".inst 0x4e929770 // sdot v16.4s, v27.16b, v18.16b\n"
- "and v29.16b, v2.16b, v1.16b\n"
- "sshr v29.4s, v29.4s, #0x1f\n"
- "sqrdmulh v26.4s, v26.4s, v6.4s\n"
- "sqrdmulh v21.4s, v21.4s, v6.4s\n"
- "sqrdmulh v16.4s, v16.4s, v6.4s\n"
- "ldr q6, [%x[params], #0x120]\n"
- "sqadd v2.4s, v2.4s, v29.4s\n"
- "and v28.16b, v26.16b, v1.16b\n"
- "and v27.16b, v21.16b, v1.16b\n"
- "and v29.16b, v16.16b, v1.16b\n"
- "sshr v28.4s, v28.4s, #0x1f\n"
- "sshr v27.4s, v27.4s, #0x1f\n"
- "sshr v29.4s, v29.4s, #0x1f\n"
- "srshl v2.4s, v2.4s, v1.4s\n"
- "sqadd v26.4s, v26.4s, v28.4s\n"
- "ldr q28, [%x[params], #0x100]\n"
- "sqadd v21.4s, v21.4s, v27.4s\n"
- "ldr q27, [%x[params], #0x110]\n"
- "sqadd v16.4s, v16.4s, v29.4s\n"
- "ldr q29, [%x[params], #0xf0]\n"
- "add v2.4s, v2.4s, v7.4s\n"
- "srshl v26.4s, v26.4s, v1.4s\n"
- "srshl v21.4s, v21.4s, v1.4s\n"
- "srshl v16.4s, v16.4s, v1.4s\n"
- "ldr q1, [%x[params], #0x130]\n"
- "smax v2.4s, v2.4s, v9.4s\n"
- "add v26.4s, v26.4s, v7.4s\n"
- "add v21.4s, v21.4s, v7.4s\n"
- "add v16.4s, v16.4s, v7.4s\n"
- "smin v2.4s, v2.4s, v8.4s\n"
- "smax v26.4s, v26.4s, v9.4s\n"
- "smax v21.4s, v21.4s, v9.4s\n"
- "smax v16.4s, v16.4s, v9.4s\n"
- "smin v26.4s, v26.4s, v8.4s\n"
- "smin v21.4s, v21.4s, v8.4s\n"
- "smin v16.4s, v16.4s, v8.4s\n"
- "uzp1 v2.16b, v2.16b, v2.16b\n"
- "uzp1 v2.16b, v2.16b, v2.16b\n"
- "uzp1 v26.16b, v26.16b, v26.16b\n"
- "str s2, [x24, x11]\n"
- "ldr q2, [%x[params], #0xe0]\n"
- "uzp1 v21.16b, v21.16b, v21.16b\n"
- "uzp1 v16.16b, v16.16b, v16.16b\n"
- "uzp1 v26.16b, v26.16b, v26.16b\n"
- "uzp1 v21.16b, v21.16b, v21.16b\n"
- "str s26, [x23, x11]\n"
- "uzp1 v16.16b, v16.16b, v16.16b\n"
- "str s21, [x22, x11]\n"
- "mov v26.16b, v2.16b\n"
- "str s16, [x21, x11]\n"
- "mov v21.16b, v2.16b\n"
- "mov v16.16b, v2.16b\n"
- ".inst 0x4e8397a2 // sdot v2.4s, v29.16b, v3.16b\n"
- ".inst 0x4e9e97b5 // sdot v21.4s, v29.16b, v30.16b\n"
- ".inst 0x4e9e9782 // sdot v2.4s, v28.16b, v30.16b\n"
+ ".inst 0x4e8897be // sdot v30.4s, v29.16b, v8.16b\n"
+ ".inst 0x4e9f9774 // sdot v20.4s, v27.16b, v31.16b\n"
+ "ext v8.16b, v8.16b, v8.16b, #0x1\n"
+ ".inst 0x4e83977e // sdot v30.4s, v27.16b, v3.16b\n"
+ ".inst 0x4e9a9734 // sdot v20.4s, v25.16b, v26.16b\n"
"ext v3.16b, v3.16b, v3.16b, #0x1\n"
- "add x11, x11, #0x4\n"
- "ext v30.16b, v30.16b, v30.16b, #0x1\n"
- ".inst 0x4e8397ba // sdot v26.4s, v29.16b, v3.16b\n"
- "ldr q3, [x9, x12]\n"
- ".inst 0x4e9e97b0 // sdot v16.4s, v29.16b, v30.16b\n"
- ".inst 0x4e969795 // sdot v21.4s, v28.16b, v22.16b\n"
- ".inst 0x4e969762 // sdot v2.4s, v27.16b, v22.16b\n"
- "ext v22.16b, v22.16b, v22.16b, #0x1\n"
- ".inst 0x4e9e979a // sdot v26.4s, v28.16b, v30.16b\n"
- "ldr q30, [x25, x12]\n"
- ".inst 0x4e969790 // sdot v16.4s, v28.16b, v22.16b\n"
- "sqrdmulh v2.4s, v2.4s, v6.4s\n"
- ".inst 0x4e919775 // sdot v21.4s, v27.16b, v17.16b\n"
- "ext v17.16b, v17.16b, v17.16b, #0x1\n"
- ".inst 0x4e96977a // sdot v26.4s, v27.16b, v22.16b\n"
- ".inst 0x4e919770 // sdot v16.4s, v27.16b, v17.16b\n"
- "and v29.16b, v2.16b, v1.16b\n"
- "sshr v29.4s, v29.4s, #0x1f\n"
- "sqrdmulh v26.4s, v26.4s, v6.4s\n"
- "sqrdmulh v21.4s, v21.4s, v6.4s\n"
- "sqrdmulh v16.4s, v16.4s, v6.4s\n"
- "ldr q6, [x14, x12]\n"
- "ldp x14, x13, [%x[inptrs], #0x40]\n"
- "ldr q25, [x14, x12]\n"
- "ldr q24, [x13, x12]\n"
- "sqadd v2.4s, v2.4s, v29.4s\n"
- "and v28.16b, v26.16b, v1.16b\n"
- "and v27.16b, v21.16b, v1.16b\n"
- "and v29.16b, v16.16b, v1.16b\n"
- "ldp x10, x9, [%x[inptrs], #0x50]\n"
- "ldr q23, [x10, x12]\n"
- "ldr q22, [x9, x12]\n"
- "sshr v28.4s, v28.4s, #0x1f\n"
- "sshr v27.4s, v27.4s, #0x1f\n"
- "sshr v29.4s, v29.4s, #0x1f\n"
- "srshl v2.4s, v2.4s, v1.4s\n"
- "sqadd v26.4s, v26.4s, v28.4s\n"
- "ldr q28, [%x[params], #0x160]\n"
- "sqadd v21.4s, v21.4s, v27.4s\n"
- "ldr q27, [%x[params], #0x170]\n"
- "sqadd v16.4s, v16.4s, v29.4s\n"
- "ldr q29, [%x[params], #0x150]\n"
- "add v2.4s, v2.4s, v7.4s\n"
- "srshl v26.4s, v26.4s, v1.4s\n"
- "srshl v21.4s, v21.4s, v1.4s\n"
- "srshl v16.4s, v16.4s, v1.4s\n"
- "ldr q1, [x28, x12]\n"
- "smax v2.4s, v2.4s, v9.4s\n"
- "ldp x28, x27, [%x[inptrs], #0x60]\n"
- "ldr q20, [x28, x12]\n"
- "ldr q19, [x27, x12]\n"
- "add v26.4s, v26.4s, v7.4s\n"
- "add v21.4s, v21.4s, v7.4s\n"
- "add v16.4s, v16.4s, v7.4s\n"
- "smin v2.4s, v2.4s, v8.4s\n"
- "ldp x26, x25, [%x[inptrs], #0x70]\n"
- "ldr q18, [x26, x12]\n"
- "ldr q17, [x25, x12]\n"
- "smax v26.4s, v26.4s, v9.4s\n"
- "smax v21.4s, v21.4s, v9.4s\n"
- "ldp x14, x13, [%x[inptrs], #0x0]\n"
- "smax v16.4s, v16.4s, v9.4s\n"
- "smin v26.4s, v26.4s, v8.4s\n"
- "ldp x10, x9, [%x[inptrs], #0x10]\n"
- "ldp x28, x27, [%x[inptrs], #0x20]\n"
- "smin v21.4s, v21.4s, v8.4s\n"
- "smin v16.4s, v16.4s, v8.4s\n"
- "ldp x26, x25, [%x[inptrs], #0x30]\n"
- "uzp1 v2.16b, v2.16b, v2.16b\n"
- "uzp1 v2.16b, v2.16b, v2.16b\n"
- "str s2, [x24, x11]\n"
- "uzp1 v26.16b, v26.16b, v26.16b\n"
- "uzp1 v21.16b, v21.16b, v21.16b\n"
- "uzp1 v16.16b, v16.16b, v16.16b\n"
- "zip2 v2.16b, v6.16b, v4.16b\n"
- "zip1 v6.16b, v6.16b, v4.16b\n"
- "zip1 v4.16b, v5.16b, v3.16b\n"
- "zip2 v3.16b, v5.16b, v3.16b\n"
- "uzp1 v26.16b, v26.16b, v26.16b\n"
- "str s26, [x23, x11]\n"
- "uzp1 v21.16b, v21.16b, v21.16b\n"
- "uzp1 v16.16b, v16.16b, v16.16b\n"
- "str s21, [x22, x11]\n"
- "str s16, [x21, x11]\n"
- "zip2 v5.16b, v6.16b, v4.16b\n"
- "zip1 v6.16b, v6.16b, v4.16b\n"
- "add x11, x11, #0x4\n"
- "zip1 v4.16b, v2.16b, v3.16b\n"
- "zip2 v3.16b, v2.16b, v3.16b\n"
- "ldr q2, [%x[params], #0x140]\n"
+ "ext v26.16b, v26.16b, v26.16b, #0x1\n"
+ ".inst 0x4e8897b6 // sdot v22.4s, v29.16b, v8.16b\n"
+ ".inst 0x4e9f973e // sdot v30.4s, v25.16b, v31.16b\n"
+ "ext v31.16b, v31.16b, v31.16b, #0x1\n"
+ ".inst 0x4e8397b3 // sdot v19.4s, v29.16b, v3.16b\n"
+ "ldr q29, [%x[params], #0x130]\n"
+ ".inst 0x4e839776 // sdot v22.4s, v27.16b, v3.16b\n"
+ "sqrdmulh v30.4s, v30.4s, v23.4s\n"
+ "sqrdmulh v20.4s, v20.4s, v23.4s\n"
+ ".inst 0x4e9f9773 // sdot v19.4s, v27.16b, v31.16b\n"
+ "ldr q27, [%x[params], #0x140]\n"
+ ".inst 0x4e9f9736 // sdot v22.4s, v25.16b, v31.16b\n"
+ "and v16.16b, v30.16b, v21.16b\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ ".inst 0x4e9a9733 // sdot v19.4s, v25.16b, v26.16b\n"
+ "ldr q25, [%x[params], #0x150]\n"
+ "sqrdmulh v22.4s, v22.4s, v23.4s\n"
+ "and v18.16b, v20.16b, v21.16b\n"
+ "sshr v18.4s, v18.4s, #0x1f\n"
+ "sqrdmulh v19.4s, v19.4s, v23.4s\n"
+ "ldr q23, [%x[params], #0x160]\n"
+ "sqadd v30.4s, v30.4s, v16.4s\n"
+ "and v17.16b, v22.16b, v21.16b\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "and v16.16b, v19.16b, v21.16b\n"
+ "srshl v30.4s, v30.4s, v21.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sqadd v20.4s, v20.4s, v18.4s\n"
+ "add v30.4s, v30.4s, v10.4s\n"
+ "sqadd v22.4s, v22.4s, v17.4s\n"
+ "srshl v20.4s, v20.4s, v21.4s\n"
+ "smax v30.4s, v30.4s, v12.4s\n"
+ "sqadd v19.4s, v19.4s, v16.4s\n"
+ "srshl v22.4s, v22.4s, v21.4s\n"
+ "smin v30.4s, v30.4s, v11.4s\n"
+ "add v20.4s, v20.4s, v10.4s\n"
+ "srshl v19.4s, v19.4s, v21.4s\n"
+ "ldr q21, [%x[params], #0x170]\n"
+ "add v22.4s, v22.4s, v10.4s\n"
+ "smax v20.4s, v20.4s, v12.4s\n"
+ "uzp1 v30.16b, v30.16b, v30.16b\n"
+ "smax v22.4s, v22.4s, v12.4s\n"
+ "smin v20.4s, v20.4s, v11.4s\n"
+ "add v19.4s, v19.4s, v10.4s\n"
+ "smin v22.4s, v22.4s, v11.4s\n"
+ "uzp1 v30.16b, v30.16b, v30.16b\n"
+ "str s30, [x24, x11]\n"
+ "smax v19.4s, v19.4s, v12.4s\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "ldr q30, [%x[params], #0x120]\n"
"add %x[params], %x[params], #0x180\n"
- "zip2 v26.16b, v1.16b, v31.16b\n"
- "zip1 v1.16b, v1.16b, v31.16b\n"
- "zip1 v31.16b, v0.16b, v30.16b\n"
- "zip2 v30.16b, v0.16b, v30.16b\n"
- "zip2 v21.16b, v25.16b, v23.16b\n"
- "zip1 v25.16b, v25.16b, v23.16b\n"
- "zip1 v23.16b, v24.16b, v22.16b\n"
- "zip2 v22.16b, v24.16b, v22.16b\n"
- "zip2 v16.16b, v20.16b, v18.16b\n"
- "zip1 v20.16b, v20.16b, v18.16b\n"
- "zip1 v18.16b, v19.16b, v17.16b\n"
- "zip2 v17.16b, v19.16b, v17.16b\n"
- "zip2 v0.16b, v1.16b, v31.16b\n"
- "zip1 v1.16b, v1.16b, v31.16b\n"
- "zip1 v31.16b, v26.16b, v30.16b\n"
- "zip2 v30.16b, v26.16b, v30.16b\n"
- "zip2 v24.16b, v25.16b, v23.16b\n"
- "zip1 v25.16b, v25.16b, v23.16b\n"
- "zip1 v23.16b, v21.16b, v22.16b\n"
- "zip2 v22.16b, v21.16b, v22.16b\n"
- "zip2 v19.16b, v20.16b, v18.16b\n"
- "zip1 v20.16b, v20.16b, v18.16b\n"
- "zip1 v18.16b, v16.16b, v17.16b\n"
- "zip2 v17.16b, v16.16b, v17.16b\n"
- "mov v26.16b, v2.16b\n"
- "mov v21.16b, v2.16b\n"
- "mov v16.16b, v2.16b\n"
- "bgt 1b\n"
- "2:" // Detached iteration
- ".inst 0x4e8697a2 // sdot v2.4s, v29.16b, v6.16b\n"
- ".inst 0x4e8197b5 // sdot v21.4s, v29.16b, v1.16b\n"
- "ext v6.16b, v6.16b, v6.16b, #0x1\n"
- "tst %x[n_channels], #0xf\n"
- ".inst 0x4e819782 // sdot v2.4s, v28.16b, v1.16b\n"
- "ext v1.16b, v1.16b, v1.16b, #0x1\n"
- ".inst 0x4e8697ba // sdot v26.4s, v29.16b, v6.16b\n"
- "ldr q6, [%x[params], #0x0]\n"
- ".inst 0x4e8197b0 // sdot v16.4s, v29.16b, v1.16b\n"
- ".inst 0x4e999795 // sdot v21.4s, v28.16b, v25.16b\n"
- "add x12, x12, #0x10\n"
- ".inst 0x4e999762 // sdot v2.4s, v27.16b, v25.16b\n"
- "ext v25.16b, v25.16b, v25.16b, #0x1\n"
- ".inst 0x4e81979a // sdot v26.4s, v28.16b, v1.16b\n"
- "ldr q1, [%x[params], #0x10]\n"
- ".inst 0x4e999790 // sdot v16.4s, v28.16b, v25.16b\n"
- ".inst 0x4e949775 // sdot v21.4s, v27.16b, v20.16b\n"
- "ext v20.16b, v20.16b, v20.16b, #0x1\n"
- "sqrdmulh v2.4s, v2.4s, v6.4s\n"
- ".inst 0x4e99977a // sdot v26.4s, v27.16b, v25.16b\n"
- ".inst 0x4e949770 // sdot v16.4s, v27.16b, v20.16b\n"
- "and v29.16b, v2.16b, v1.16b\n"
- "sshr v29.4s, v29.4s, #0x1f\n"
- "sqrdmulh v26.4s, v26.4s, v6.4s\n"
- "sqrdmulh v21.4s, v21.4s, v6.4s\n"
- "sqrdmulh v16.4s, v16.4s, v6.4s\n"
- "ldr q6, [%x[params], #0x60]\n"
- "sqadd v2.4s, v2.4s, v29.4s\n"
- "and v28.16b, v26.16b, v1.16b\n"
- "and v27.16b, v21.16b, v1.16b\n"
- "and v29.16b, v16.16b, v1.16b\n"
- "sshr v28.4s, v28.4s, #0x1f\n"
- "sshr v27.4s, v27.4s, #0x1f\n"
- "sshr v29.4s, v29.4s, #0x1f\n"
- "srshl v2.4s, v2.4s, v1.4s\n"
- "sqadd v26.4s, v26.4s, v28.4s\n"
- "ldr q28, [%x[params], #0x40]\n"
- "sqadd v21.4s, v21.4s, v27.4s\n"
- "ldr q27, [%x[params], #0x50]\n"
- "sqadd v16.4s, v16.4s, v29.4s\n"
- "ldr q29, [%x[params], #0x30]\n"
- "add v2.4s, v2.4s, v7.4s\n"
- "srshl v26.4s, v26.4s, v1.4s\n"
- "srshl v21.4s, v21.4s, v1.4s\n"
- "srshl v16.4s, v16.4s, v1.4s\n"
- "ldr q1, [%x[params], #0x70]\n"
- "smax v2.4s, v2.4s, v9.4s\n"
- "add v26.4s, v26.4s, v7.4s\n"
- "add v21.4s, v21.4s, v7.4s\n"
- "add v16.4s, v16.4s, v7.4s\n"
- "smin v2.4s, v2.4s, v8.4s\n"
- "smax v26.4s, v26.4s, v9.4s\n"
- "smax v21.4s, v21.4s, v9.4s\n"
- "smax v16.4s, v16.4s, v9.4s\n"
- "smin v26.4s, v26.4s, v8.4s\n"
- "smin v21.4s, v21.4s, v8.4s\n"
- "smin v16.4s, v16.4s, v8.4s\n"
- "uzp1 v2.16b, v2.16b, v2.16b\n"
- "uzp1 v2.16b, v2.16b, v2.16b\n"
- "uzp1 v26.16b, v26.16b, v26.16b\n"
- "str s2, [x24, x11]\n"
- "ldr q2, [%x[params], #0x20]\n"
- "uzp1 v21.16b, v21.16b, v21.16b\n"
- "uzp1 v16.16b, v16.16b, v16.16b\n"
- "uzp1 v26.16b, v26.16b, v26.16b\n"
- "uzp1 v21.16b, v21.16b, v21.16b\n"
- "str s26, [x23, x11]\n"
- "uzp1 v16.16b, v16.16b, v16.16b\n"
- "str s21, [x22, x11]\n"
- "mov v26.16b, v2.16b\n"
- "str s16, [x21, x11]\n"
- "mov v21.16b, v2.16b\n"
- "mov v16.16b, v2.16b\n"
- ".inst 0x4e8597a2 // sdot v2.4s, v29.16b, v5.16b\n"
- ".inst 0x4e8097b5 // sdot v21.4s, v29.16b, v0.16b\n"
- ".inst 0x4e809782 // sdot v2.4s, v28.16b, v0.16b\n"
- "ext v5.16b, v5.16b, v5.16b, #0x1\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "str s22, [x23, x11]\n"
+ "smin v19.4s, v19.4s, v11.4s\n"
+ "uzp1 v20.16b, v20.16b, v20.16b\n"
+ "mov v22.16b, v30.16b\n"
+ "uzp1 v20.16b, v20.16b, v20.16b\n"
+ "str s20, [x21, x11]\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ "mov v20.16b, v30.16b\n"
+ ".inst 0x4e8297b4 // sdot v20.4s, v29.16b, v2.16b\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ "str s19, [x20, x11]\n"
+ "mov v19.16b, v30.16b\n"
"add x11, x11, #0x4\n"
- "ext v0.16b, v0.16b, v0.16b, #0x1\n"
- ".inst 0x4e8597ba // sdot v26.4s, v29.16b, v5.16b\n"
- ".inst 0x4e8097b0 // sdot v16.4s, v29.16b, v0.16b\n"
- ".inst 0x4e989795 // sdot v21.4s, v28.16b, v24.16b\n"
- ".inst 0x4e989762 // sdot v2.4s, v27.16b, v24.16b\n"
+ ".inst 0x4e8797be // sdot v30.4s, v29.16b, v7.16b\n"
+ ".inst 0x4e9c9774 // sdot v20.4s, v27.16b, v28.16b\n"
+ "ext v7.16b, v7.16b, v7.16b, #0x1\n"
+ ".inst 0x4e82977e // sdot v30.4s, v27.16b, v2.16b\n"
+ ".inst 0x4e989734 // sdot v20.4s, v25.16b, v24.16b\n"
+ "ext v2.16b, v2.16b, v2.16b, #0x1\n"
"ext v24.16b, v24.16b, v24.16b, #0x1\n"
- ".inst 0x4e80979a // sdot v26.4s, v28.16b, v0.16b\n"
- ".inst 0x4e989790 // sdot v16.4s, v28.16b, v24.16b\n"
- "sqrdmulh v2.4s, v2.4s, v6.4s\n"
- ".inst 0x4e939775 // sdot v21.4s, v27.16b, v19.16b\n"
- "ext v19.16b, v19.16b, v19.16b, #0x1\n"
- ".inst 0x4e98977a // sdot v26.4s, v27.16b, v24.16b\n"
- ".inst 0x4e939770 // sdot v16.4s, v27.16b, v19.16b\n"
- "and v29.16b, v2.16b, v1.16b\n"
- "sshr v29.4s, v29.4s, #0x1f\n"
- "sqrdmulh v26.4s, v26.4s, v6.4s\n"
- "sqrdmulh v21.4s, v21.4s, v6.4s\n"
- "sqrdmulh v16.4s, v16.4s, v6.4s\n"
- "ldr q6, [%x[params], #0xc0]\n"
- "sqadd v2.4s, v2.4s, v29.4s\n"
- "and v28.16b, v26.16b, v1.16b\n"
- "and v27.16b, v21.16b, v1.16b\n"
- "and v29.16b, v16.16b, v1.16b\n"
- "sshr v28.4s, v28.4s, #0x1f\n"
- "sshr v27.4s, v27.4s, #0x1f\n"
- "sshr v29.4s, v29.4s, #0x1f\n"
- "srshl v2.4s, v2.4s, v1.4s\n"
- "sqadd v26.4s, v26.4s, v28.4s\n"
- "ldr q28, [%x[params], #0xa0]\n"
- "sqadd v21.4s, v21.4s, v27.4s\n"
- "ldr q27, [%x[params], #0xb0]\n"
- "sqadd v16.4s, v16.4s, v29.4s\n"
- "ldr q29, [%x[params], #0x90]\n"
- "add v2.4s, v2.4s, v7.4s\n"
- "srshl v26.4s, v26.4s, v1.4s\n"
- "srshl v21.4s, v21.4s, v1.4s\n"
- "srshl v16.4s, v16.4s, v1.4s\n"
- "ldr q1, [%x[params], #0xd0]\n"
- "smax v2.4s, v2.4s, v9.4s\n"
- "add v26.4s, v26.4s, v7.4s\n"
- "add v21.4s, v21.4s, v7.4s\n"
- "add v16.4s, v16.4s, v7.4s\n"
- "smin v2.4s, v2.4s, v8.4s\n"
- "smax v26.4s, v26.4s, v9.4s\n"
- "smax v21.4s, v21.4s, v9.4s\n"
- "smax v16.4s, v16.4s, v9.4s\n"
- "smin v26.4s, v26.4s, v8.4s\n"
- "smin v21.4s, v21.4s, v8.4s\n"
- "smin v16.4s, v16.4s, v8.4s\n"
- "uzp1 v2.16b, v2.16b, v2.16b\n"
- "uzp1 v2.16b, v2.16b, v2.16b\n"
- "str s2, [x24, x11]\n"
- "ldr q2, [%x[params], #0x80]\n"
- "uzp1 v26.16b, v26.16b, v26.16b\n"
- "uzp1 v21.16b, v21.16b, v21.16b\n"
- "uzp1 v16.16b, v16.16b, v16.16b\n"
- "uzp1 v26.16b, v26.16b, v26.16b\n"
- "str s26, [x23, x11]\n"
- "uzp1 v21.16b, v21.16b, v21.16b\n"
- "uzp1 v16.16b, v16.16b, v16.16b\n"
- "str s21, [x22, x11]\n"
- "str s16, [x21, x11]\n"
- "mov v26.16b, v2.16b\n"
- "mov v21.16b, v2.16b\n"
- ".inst 0x4e9f97b5 // sdot v21.4s, v29.16b, v31.16b\n"
- "mov v16.16b, v2.16b\n"
- ".inst 0x4e8497a2 // sdot v2.4s, v29.16b, v4.16b\n"
- ".inst 0x4e9f9782 // sdot v2.4s, v28.16b, v31.16b\n"
- "add x11, x11, #0x4\n"
- "ext v4.16b, v4.16b, v4.16b, #0x1\n"
- "ext v31.16b, v31.16b, v31.16b, #0x1\n"
- ".inst 0x4e8497ba // sdot v26.4s, v29.16b, v4.16b\n"
- ".inst 0x4e9f97b0 // sdot v16.4s, v29.16b, v31.16b\n"
- ".inst 0x4e979795 // sdot v21.4s, v28.16b, v23.16b\n"
- ".inst 0x4e979762 // sdot v2.4s, v27.16b, v23.16b\n"
- "ext v23.16b, v23.16b, v23.16b, #0x1\n"
- ".inst 0x4e9f979a // sdot v26.4s, v28.16b, v31.16b\n"
- ".inst 0x4e979790 // sdot v16.4s, v28.16b, v23.16b\n"
- ".inst 0x4e929775 // sdot v21.4s, v27.16b, v18.16b\n"
- "ext v18.16b, v18.16b, v18.16b, #0x1\n"
- "sqrdmulh v2.4s, v2.4s, v6.4s\n"
- ".inst 0x4e97977a // sdot v26.4s, v27.16b, v23.16b\n"
- ".inst 0x4e929770 // sdot v16.4s, v27.16b, v18.16b\n"
- "and v29.16b, v2.16b, v1.16b\n"
- "sshr v29.4s, v29.4s, #0x1f\n"
- "sqrdmulh v26.4s, v26.4s, v6.4s\n"
- "sqrdmulh v21.4s, v21.4s, v6.4s\n"
- "sqrdmulh v16.4s, v16.4s, v6.4s\n"
- "ldr q6, [%x[params], #0x120]\n"
- "sqadd v2.4s, v2.4s, v29.4s\n"
- "and v28.16b, v26.16b, v1.16b\n"
- "and v27.16b, v21.16b, v1.16b\n"
- "and v29.16b, v16.16b, v1.16b\n"
- "sshr v28.4s, v28.4s, #0x1f\n"
- "sshr v27.4s, v27.4s, #0x1f\n"
- "sshr v29.4s, v29.4s, #0x1f\n"
- "srshl v2.4s, v2.4s, v1.4s\n"
- "sqadd v26.4s, v26.4s, v28.4s\n"
- "ldr q28, [%x[params], #0x100]\n"
- "sqadd v21.4s, v21.4s, v27.4s\n"
- "ldr q27, [%x[params], #0x110]\n"
- "sqadd v16.4s, v16.4s, v29.4s\n"
- "ldr q29, [%x[params], #0xf0]\n"
- "add v2.4s, v2.4s, v7.4s\n"
- "srshl v26.4s, v26.4s, v1.4s\n"
- "srshl v21.4s, v21.4s, v1.4s\n"
- "srshl v16.4s, v16.4s, v1.4s\n"
- "ldr q1, [%x[params], #0x130]\n"
- "smax v2.4s, v2.4s, v9.4s\n"
- "add v26.4s, v26.4s, v7.4s\n"
- "add v21.4s, v21.4s, v7.4s\n"
- "add v16.4s, v16.4s, v7.4s\n"
- "smin v2.4s, v2.4s, v8.4s\n"
- "smax v26.4s, v26.4s, v9.4s\n"
- "smax v21.4s, v21.4s, v9.4s\n"
- "smax v16.4s, v16.4s, v9.4s\n"
- "smin v26.4s, v26.4s, v8.4s\n"
- "smin v21.4s, v21.4s, v8.4s\n"
- "smin v16.4s, v16.4s, v8.4s\n"
- "uzp1 v2.16b, v2.16b, v2.16b\n"
- "uzp1 v2.16b, v2.16b, v2.16b\n"
- "uzp1 v26.16b, v26.16b, v26.16b\n"
- "str s2, [x24, x11]\n"
- "ldr q2, [%x[params], #0xe0]\n"
- "uzp1 v21.16b, v21.16b, v21.16b\n"
- "uzp1 v16.16b, v16.16b, v16.16b\n"
- "add %x[params], %x[params], #0x140\n"
- "uzp1 v26.16b, v26.16b, v26.16b\n"
- "uzp1 v21.16b, v21.16b, v21.16b\n"
- "str s26, [x23, x11]\n"
- "uzp1 v16.16b, v16.16b, v16.16b\n"
- "str s21, [x22, x11]\n"
- "mov v26.16b, v2.16b\n"
- "str s16, [x21, x11]\n"
- "mov v21.16b, v2.16b\n"
- "mov v16.16b, v2.16b\n"
- ".inst 0x4e8397a2 // sdot v2.4s, v29.16b, v3.16b\n"
- ".inst 0x4e9e97b5 // sdot v21.4s, v29.16b, v30.16b\n"
- ".inst 0x4e9e9782 // sdot v2.4s, v28.16b, v30.16b\n"
- "ext v3.16b, v3.16b, v3.16b, #0x1\n"
- "add x11, x11, #0x4\n"
- "ext v30.16b, v30.16b, v30.16b, #0x1\n"
- ".inst 0x4e8397ba // sdot v26.4s, v29.16b, v3.16b\n"
- ".inst 0x4e9e97b0 // sdot v16.4s, v29.16b, v30.16b\n"
- ".inst 0x4e969795 // sdot v21.4s, v28.16b, v22.16b\n"
- ".inst 0x4e969762 // sdot v2.4s, v27.16b, v22.16b\n"
- "ext v22.16b, v22.16b, v22.16b, #0x1\n"
- ".inst 0x4e9e979a // sdot v26.4s, v28.16b, v30.16b\n"
- ".inst 0x4e969790 // sdot v16.4s, v28.16b, v22.16b\n"
- "sqrdmulh v2.4s, v2.4s, v6.4s\n"
- ".inst 0x4e919775 // sdot v21.4s, v27.16b, v17.16b\n"
- "ext v17.16b, v17.16b, v17.16b, #0x1\n"
- ".inst 0x4e96977a // sdot v26.4s, v27.16b, v22.16b\n"
- ".inst 0x4e919770 // sdot v16.4s, v27.16b, v17.16b\n"
- "and v29.16b, v2.16b, v1.16b\n"
- "sshr v29.4s, v29.4s, #0x1f\n"
- "sqrdmulh v26.4s, v26.4s, v6.4s\n"
- "sqrdmulh v21.4s, v21.4s, v6.4s\n"
- "sqrdmulh v16.4s, v16.4s, v6.4s\n"
- "sqadd v2.4s, v2.4s, v29.4s\n"
- "and v28.16b, v26.16b, v1.16b\n"
- "and v27.16b, v21.16b, v1.16b\n"
- "and v29.16b, v16.16b, v1.16b\n"
- "sshr v28.4s, v28.4s, #0x1f\n"
- "sshr v27.4s, v27.4s, #0x1f\n"
- "sshr v29.4s, v29.4s, #0x1f\n"
- "sqadd v26.4s, v26.4s, v28.4s\n"
- "sqadd v21.4s, v21.4s, v27.4s\n"
- "sqadd v16.4s, v16.4s, v29.4s\n"
- "srshl v2.4s, v2.4s, v1.4s\n"
- "srshl v26.4s, v26.4s, v1.4s\n"
- "srshl v21.4s, v21.4s, v1.4s\n"
- "srshl v16.4s, v16.4s, v1.4s\n"
- "add v2.4s, v2.4s, v7.4s\n"
- "add v26.4s, v26.4s, v7.4s\n"
- "add v21.4s, v21.4s, v7.4s\n"
- "add v16.4s, v16.4s, v7.4s\n"
- "smax v2.4s, v2.4s, v9.4s\n"
- "smax v26.4s, v26.4s, v9.4s\n"
- "smax v21.4s, v21.4s, v9.4s\n"
- "smax v16.4s, v16.4s, v9.4s\n"
- "smin v2.4s, v2.4s, v8.4s\n"
- "smin v26.4s, v26.4s, v8.4s\n"
- "smin v21.4s, v21.4s, v8.4s\n"
- "smin v16.4s, v16.4s, v8.4s\n"
- "uzp1 v2.16b, v2.16b, v2.16b\n"
- "uzp1 v26.16b, v26.16b, v26.16b\n"
- "uzp1 v21.16b, v21.16b, v21.16b\n"
- "uzp1 v16.16b, v16.16b, v16.16b\n"
- "uzp1 v2.16b, v2.16b, v2.16b\n"
- "uzp1 v26.16b, v26.16b, v26.16b\n"
- "str s2, [x24, x11]\n"
- "uzp1 v21.16b, v21.16b, v21.16b\n"
- "uzp1 v16.16b, v16.16b, v16.16b\n"
- "str s26, [x23, x11]\n"
- "str s21, [x22, x11]\n"
- "str s16, [x21, x11]\n"
+ ".inst 0x4e8797b6 // sdot v22.4s, v29.16b, v7.16b\n"
+ ".inst 0x4e9c973e // sdot v30.4s, v25.16b, v28.16b\n"
+ "ext v28.16b, v28.16b, v28.16b, #0x1\n"
+ ".inst 0x4e8297b3 // sdot v19.4s, v29.16b, v2.16b\n"
+ ".inst 0x4e829776 // sdot v22.4s, v27.16b, v2.16b\n"
+ "sqrdmulh v30.4s, v30.4s, v23.4s\n"
+ "sqrdmulh v20.4s, v20.4s, v23.4s\n"
+ ".inst 0x4e9c9773 // sdot v19.4s, v27.16b, v28.16b\n"
+ ".inst 0x4e9c9736 // sdot v22.4s, v25.16b, v28.16b\n"
+ "and v16.16b, v30.16b, v21.16b\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ ".inst 0x4e989733 // sdot v19.4s, v25.16b, v24.16b\n"
+ "sqrdmulh v22.4s, v22.4s, v23.4s\n"
+ "and v18.16b, v20.16b, v21.16b\n"
+ "sshr v18.4s, v18.4s, #0x1f\n"
+ "and v17.16b, v22.16b, v21.16b\n"
+ "sqrdmulh v19.4s, v19.4s, v23.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "sqadd v30.4s, v30.4s, v16.4s\n"
+ "and v16.16b, v19.16b, v21.16b\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "srshl v30.4s, v30.4s, v21.4s\n"
+ "sqadd v20.4s, v20.4s, v18.4s\n"
+ "sqadd v22.4s, v22.4s, v17.4s\n"
+ "add v30.4s, v30.4s, v10.4s\n"
+ "srshl v20.4s, v20.4s, v21.4s\n"
+ "srshl v22.4s, v22.4s, v21.4s\n"
+ "sqadd v19.4s, v19.4s, v16.4s\n"
+ "smax v30.4s, v30.4s, v12.4s\n"
+ "add v20.4s, v20.4s, v10.4s\n"
+ "add v22.4s, v22.4s, v10.4s\n"
+ "smin v30.4s, v30.4s, v11.4s\n"
+ "smax v20.4s, v20.4s, v12.4s\n"
+ "smax v22.4s, v22.4s, v12.4s\n"
+ "srshl v19.4s, v19.4s, v21.4s\n"
+ "smin v20.4s, v20.4s, v11.4s\n"
+ "smin v22.4s, v22.4s, v11.4s\n"
+ "uzp1 v30.16b, v30.16b, v30.16b\n"
+ "add v19.4s, v19.4s, v10.4s\n"
+ "uzp1 v30.16b, v30.16b, v30.16b\n"
+ "str s30, [x24, x11]\n"
+ "smax v19.4s, v19.4s, v12.4s\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "uzp1 v20.16b, v20.16b, v20.16b\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "str s22, [x23, x11]\n"
+ "smin v19.4s, v19.4s, v11.4s\n"
+ "uzp1 v20.16b, v20.16b, v20.16b\n"
+ "str s20, [x21, x11]\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ "str s19, [x20, x11]\n"
"add x11, x11, #0x4\n"
- "beq 35f\n"
- "3:" // Oddments
- "and x20, %x[n_channels], #0xf\n"
- "add x14, x14, x12\n"
- "add x13, x13, x12\n"
- "add x10, x10, x12\n"
- "add x9, x9, x12\n"
- "add x28, x28, x12\n"
- "add x27, x27, x12\n"
- "add x26, x26, x12\n"
- "add x25, x25, x12\n"
- "tbz %x[n_channels], #3, 7f\n"
- "ldr d6, [x14], #0x8\n"
- "ldr d5, [x13], #0x8\n"
- "ldr d4, [x10], #0x8\n"
- "ldr d3, [x9], #0x8\n"
- "ldr d1, [x28], #0x8\n"
- "ldr d0, [x27], #0x8\n"
- "ldr d31, [x26], #0x8\n"
- "ldr d30, [x25], #0x8\n"
- "tbz %x[n_channels], #2, 5f\n"
- "ld1 { v6.s }[2], [x14], #0x4\n"
- "ld1 { v5.s }[2], [x13], #0x4\n"
- "ld1 { v4.s }[2], [x10], #0x4\n"
- "ld1 { v3.s }[2], [x9], #0x4\n"
- "ld1 { v1.s }[2], [x28], #0x4\n"
- "ld1 { v0.s }[2], [x27], #0x4\n"
- "ld1 { v31.s }[2], [x26], #0x4\n"
- "ld1 { v30.s }[2], [x25], #0x4\n"
- "tbz %x[n_channels], #1, 4f\n"
- "ld1 { v6.h }[6], [x14], #0x2\n"
- "ld1 { v5.h }[6], [x13], #0x2\n"
- "ld1 { v4.h }[6], [x10], #0x2\n"
- "ld1 { v3.h }[6], [x9], #0x2\n"
- "ld1 { v1.h }[6], [x28], #0x2\n"
- "ld1 { v0.h }[6], [x27], #0x2\n"
- "ld1 { v31.h }[6], [x26], #0x2\n"
- "ld1 { v30.h }[6], [x25], #0x2\n"
- "tbz %x[n_channels], #0, 11f\n"
- "ld1 { v6.b }[14], [x14], #0x1\n"
- "ld1 { v5.b }[14], [x13], #0x1\n"
- "ld1 { v4.b }[14], [x10], #0x1\n"
- "ld1 { v3.b }[14], [x9], #0x1\n"
- "ld1 { v1.b }[14], [x28], #0x1\n"
- "ld1 { v0.b }[14], [x27], #0x1\n"
- "ld1 { v31.b }[14], [x26], #0x1\n"
- "ld1 { v30.b }[14], [x25], #0x1\n"
- "b 11f\n"
- "4:" // Oddments: Load (A): Bit 3: Bit 2: Bit 1: Unset
- "tbz %x[n_channels], #0, 11f\n"
- "ld1 { v6.b }[12], [x14], #0x1\n"
- "ld1 { v5.b }[12], [x13], #0x1\n"
- "ld1 { v4.b }[12], [x10], #0x1\n"
- "ld1 { v3.b }[12], [x9], #0x1\n"
- "ld1 { v1.b }[12], [x28], #0x1\n"
- "ld1 { v0.b }[12], [x27], #0x1\n"
- "ld1 { v31.b }[12], [x26], #0x1\n"
- "ld1 { v30.b }[12], [x25], #0x1\n"
- "b 11f\n"
- "5:" // Oddments: Load (A): Bit 3: Bit 2: Unset
- "tbz %x[n_channels], #1, 6f\n"
- "ld1 { v6.h }[4], [x14], #0x2\n"
- "ld1 { v5.h }[4], [x13], #0x2\n"
- "ld1 { v4.h }[4], [x10], #0x2\n"
- "ld1 { v3.h }[4], [x9], #0x2\n"
- "ld1 { v1.h }[4], [x28], #0x2\n"
- "ld1 { v0.h }[4], [x27], #0x2\n"
- "ld1 { v31.h }[4], [x26], #0x2\n"
- "ld1 { v30.h }[4], [x25], #0x2\n"
- "tbz %x[n_channels], #0, 11f\n"
- "ld1 { v6.b }[10], [x14], #0x1\n"
- "ld1 { v5.b }[10], [x13], #0x1\n"
- "ld1 { v4.b }[10], [x10], #0x1\n"
- "ld1 { v3.b }[10], [x9], #0x1\n"
- "ld1 { v1.b }[10], [x28], #0x1\n"
- "ld1 { v0.b }[10], [x27], #0x1\n"
- "ld1 { v31.b }[10], [x26], #0x1\n"
- "ld1 { v30.b }[10], [x25], #0x1\n"
- "b 11f\n"
- "6:" // Oddments: Load (A): Bit 3: Bit 2: Unset: Bit 1: Unset
- "tbz %x[n_channels], #0, 11f\n"
- "ld1 { v6.b }[8], [x14], #0x1\n"
- "ld1 { v5.b }[8], [x13], #0x1\n"
- "ld1 { v4.b }[8], [x10], #0x1\n"
- "ld1 { v3.b }[8], [x9], #0x1\n"
- "ld1 { v1.b }[8], [x28], #0x1\n"
- "ld1 { v0.b }[8], [x27], #0x1\n"
- "ld1 { v31.b }[8], [x26], #0x1\n"
- "ld1 { v30.b }[8], [x25], #0x1\n"
- "b 11f\n"
- "7:" // Oddments: Load (A): Bit 3: Unset
- "tbz %x[n_channels], #2, 9f\n"
- "ldr s6, [x14], #0x4\n"
- "ldr s5, [x13], #0x4\n"
- "ldr s4, [x10], #0x4\n"
- "ldr s3, [x9], #0x4\n"
- "ldr s1, [x28], #0x4\n"
- "ldr s0, [x27], #0x4\n"
- "ldr s31, [x26], #0x4\n"
- "ldr s30, [x25], #0x4\n"
- "tbz %x[n_channels], #1, 8f\n"
- "ld1 { v6.h }[2], [x14], #0x2\n"
- "ld1 { v5.h }[2], [x13], #0x2\n"
- "ld1 { v4.h }[2], [x10], #0x2\n"
- "ld1 { v3.h }[2], [x9], #0x2\n"
- "ld1 { v1.h }[2], [x28], #0x2\n"
- "ld1 { v0.h }[2], [x27], #0x2\n"
- "ld1 { v31.h }[2], [x26], #0x2\n"
- "ld1 { v30.h }[2], [x25], #0x2\n"
- "tbz %x[n_channels], #0, 11f\n"
- "ld1 { v6.b }[6], [x14], #0x1\n"
- "ld1 { v5.b }[6], [x13], #0x1\n"
- "ld1 { v4.b }[6], [x10], #0x1\n"
- "ld1 { v3.b }[6], [x9], #0x1\n"
- "ld1 { v1.b }[6], [x28], #0x1\n"
- "ld1 { v0.b }[6], [x27], #0x1\n"
- "ld1 { v31.b }[6], [x26], #0x1\n"
- "ld1 { v30.b }[6], [x25], #0x1\n"
- "b 11f\n"
- "8:" // Oddments: Load (A): Bit 3: Unset: Bit 2: Bit 1: Unset
- "tbz %x[n_channels], #0, 11f\n"
- "ld1 { v6.b }[4], [x14], #0x1\n"
- "ld1 { v5.b }[4], [x13], #0x1\n"
- "ld1 { v4.b }[4], [x10], #0x1\n"
- "ld1 { v3.b }[4], [x9], #0x1\n"
- "ld1 { v1.b }[4], [x28], #0x1\n"
- "ld1 { v0.b }[4], [x27], #0x1\n"
- "ld1 { v31.b }[4], [x26], #0x1\n"
- "ld1 { v30.b }[4], [x25], #0x1\n"
- "b 11f\n"
- "9:" // Oddments: Load (A): Bit 3: Unset: Bit 2: Unset
- "tbz %x[n_channels], #1, 10f\n"
- "ldr h6, [x14], #0x2\n"
- "ldr h5, [x13], #0x2\n"
- "ldr h4, [x10], #0x2\n"
- "ldr h3, [x9], #0x2\n"
- "ldr h1, [x28], #0x2\n"
- "ldr h0, [x27], #0x2\n"
- "ldr h31, [x26], #0x2\n"
- "ldr h30, [x25], #0x2\n"
- "tbz %x[n_channels], #0, 11f\n"
- "ld1 { v6.b }[2], [x14], #0x1\n"
- "ld1 { v5.b }[2], [x13], #0x1\n"
- "ld1 { v4.b }[2], [x10], #0x1\n"
- "ld1 { v3.b }[2], [x9], #0x1\n"
- "ld1 { v1.b }[2], [x28], #0x1\n"
- "ld1 { v0.b }[2], [x27], #0x1\n"
- "ld1 { v31.b }[2], [x26], #0x1\n"
- "ld1 { v30.b }[2], [x25], #0x1\n"
- "b 11f\n"
- "10:" // Oddments: Load (A): Bit 3: Unset: Bit 2: Unset: Bit 1: Unset
- "ldr b6, [x14], #0x1\n"
- "ldr b5, [x13], #0x1\n"
- "ldr b4, [x10], #0x1\n"
- "ldr b3, [x9], #0x1\n"
- "ldr b1, [x28], #0x1\n"
- "ldr b0, [x27], #0x1\n"
- "ldr b31, [x26], #0x1\n"
- "ldr b30, [x25], #0x1\n"
- "11:" // Oddments: Load (A): Bit 3: End
- "ldp x14, x13, [%x[inptrs], #0x40]\n"
- "ldp x10, x9, [%x[inptrs], #0x50]\n"
- "add x14, x14, x12\n"
- "add x13, x13, x12\n"
- "ldp x28, x27, [%x[inptrs], #0x60]\n"
- "ldp x26, x25, [%x[inptrs], #0x70]\n"
- "add x10, x10, x12\n"
- "add x9, x9, x12\n"
- "add x28, x28, x12\n"
- "add x27, x27, x12\n"
- "add x26, x26, x12\n"
- "add x25, x25, x12\n"
- "tbz %x[n_channels], #3, 15f\n"
- "ldr d25, [x14], #0x8\n"
- "ldr d24, [x13], #0x8\n"
- "ldr d23, [x10], #0x8\n"
- "ldr d22, [x9], #0x8\n"
- "ldr d20, [x28], #0x8\n"
- "ldr d19, [x27], #0x8\n"
- "ldr d18, [x26], #0x8\n"
- "ldr d17, [x25], #0x8\n"
- "tbz %x[n_channels], #2, 13f\n"
- "ld1 { v25.s }[2], [x14], #0x4\n"
- "ld1 { v24.s }[2], [x13], #0x4\n"
- "ld1 { v23.s }[2], [x10], #0x4\n"
- "ld1 { v22.s }[2], [x9], #0x4\n"
- "ld1 { v20.s }[2], [x28], #0x4\n"
- "ld1 { v19.s }[2], [x27], #0x4\n"
- "ld1 { v18.s }[2], [x26], #0x4\n"
- "ld1 { v17.s }[2], [x25], #0x4\n"
- "tbz %x[n_channels], #1, 12f\n"
- "ld1 { v25.h }[6], [x14], #0x2\n"
- "ld1 { v24.h }[6], [x13], #0x2\n"
- "ld1 { v23.h }[6], [x10], #0x2\n"
- "ld1 { v22.h }[6], [x9], #0x2\n"
- "ld1 { v20.h }[6], [x28], #0x2\n"
- "ld1 { v19.h }[6], [x27], #0x2\n"
- "ld1 { v18.h }[6], [x26], #0x2\n"
- "ld1 { v17.h }[6], [x25], #0x2\n"
- "tbz %x[n_channels], #0, 19f\n"
- "ld1 { v25.b }[14], [x14], #0x1\n"
- "ld1 { v24.b }[14], [x13], #0x1\n"
- "ld1 { v23.b }[14], [x10], #0x1\n"
- "ld1 { v22.b }[14], [x9], #0x1\n"
- "ld1 { v20.b }[14], [x28], #0x1\n"
- "ld1 { v19.b }[14], [x27], #0x1\n"
- "ld1 { v18.b }[14], [x26], #0x1\n"
- "ld1 { v17.b }[14], [x25], #0x1\n"
- "b 19f\n"
- "12:" // Oddments: Load (B): Bit 3: Bit 2: Bit 1: Unset
- "tbz %x[n_channels], #0, 19f\n"
- "ld1 { v25.b }[12], [x14], #0x1\n"
- "ld1 { v24.b }[12], [x13], #0x1\n"
- "ld1 { v23.b }[12], [x10], #0x1\n"
- "ld1 { v22.b }[12], [x9], #0x1\n"
- "ld1 { v20.b }[12], [x28], #0x1\n"
- "ld1 { v19.b }[12], [x27], #0x1\n"
- "ld1 { v18.b }[12], [x26], #0x1\n"
- "ld1 { v17.b }[12], [x25], #0x1\n"
- "b 19f\n"
- "13:" // Oddments: Load (B): Bit 3: Bit 2: Unset
- "tbz %x[n_channels], #1, 14f\n"
- "ld1 { v25.h }[4], [x14], #0x2\n"
- "ld1 { v24.h }[4], [x13], #0x2\n"
- "ld1 { v23.h }[4], [x10], #0x2\n"
- "ld1 { v22.h }[4], [x9], #0x2\n"
- "ld1 { v20.h }[4], [x28], #0x2\n"
- "ld1 { v19.h }[4], [x27], #0x2\n"
- "ld1 { v18.h }[4], [x26], #0x2\n"
- "ld1 { v17.h }[4], [x25], #0x2\n"
- "tbz %x[n_channels], #0, 19f\n"
- "ld1 { v25.b }[10], [x14], #0x1\n"
- "ld1 { v24.b }[10], [x13], #0x1\n"
- "ld1 { v23.b }[10], [x10], #0x1\n"
- "ld1 { v22.b }[10], [x9], #0x1\n"
- "ld1 { v20.b }[10], [x28], #0x1\n"
- "ld1 { v19.b }[10], [x27], #0x1\n"
- "ld1 { v18.b }[10], [x26], #0x1\n"
- "ld1 { v17.b }[10], [x25], #0x1\n"
- "b 19f\n"
- "14:" // Oddments: Load (B): Bit 3: Bit 2: Unset: Bit 1: Unset
- "tbz %x[n_channels], #0, 19f\n"
- "ld1 { v25.b }[8], [x14], #0x1\n"
- "ld1 { v24.b }[8], [x13], #0x1\n"
- "ld1 { v23.b }[8], [x10], #0x1\n"
- "ld1 { v22.b }[8], [x9], #0x1\n"
- "ld1 { v20.b }[8], [x28], #0x1\n"
- "ld1 { v19.b }[8], [x27], #0x1\n"
- "ld1 { v18.b }[8], [x26], #0x1\n"
- "ld1 { v17.b }[8], [x25], #0x1\n"
- "b 19f\n"
- "15:" // Oddments: Load (B): Bit 3: Unset
- "tbz %x[n_channels], #2, 17f\n"
- "ldr s25, [x14], #0x4\n"
- "ldr s24, [x13], #0x4\n"
- "ldr s23, [x10], #0x4\n"
- "ldr s22, [x9], #0x4\n"
- "ldr s20, [x28], #0x4\n"
- "ldr s19, [x27], #0x4\n"
- "ldr s18, [x26], #0x4\n"
- "ldr s17, [x25], #0x4\n"
- "tbz %x[n_channels], #1, 16f\n"
- "ld1 { v25.h }[2], [x14], #0x2\n"
- "ld1 { v24.h }[2], [x13], #0x2\n"
- "ld1 { v23.h }[2], [x10], #0x2\n"
- "ld1 { v22.h }[2], [x9], #0x2\n"
- "ld1 { v20.h }[2], [x28], #0x2\n"
- "ld1 { v19.h }[2], [x27], #0x2\n"
- "ld1 { v18.h }[2], [x26], #0x2\n"
- "ld1 { v17.h }[2], [x25], #0x2\n"
- "tbz %x[n_channels], #0, 19f\n"
- "ld1 { v25.b }[6], [x14], #0x1\n"
- "ld1 { v24.b }[6], [x13], #0x1\n"
- "ld1 { v23.b }[6], [x10], #0x1\n"
- "ld1 { v22.b }[6], [x9], #0x1\n"
- "ld1 { v20.b }[6], [x28], #0x1\n"
- "ld1 { v19.b }[6], [x27], #0x1\n"
- "ld1 { v18.b }[6], [x26], #0x1\n"
- "ld1 { v17.b }[6], [x25], #0x1\n"
- "b 19f\n"
- "16:" // Oddments: Load (B): Bit 3: Unset: Bit 2: Bit 1: Unset
- "tbz %x[n_channels], #0, 19f\n"
- "ld1 { v25.b }[4], [x14], #0x1\n"
- "ld1 { v24.b }[4], [x13], #0x1\n"
- "ld1 { v23.b }[4], [x10], #0x1\n"
- "ld1 { v22.b }[4], [x9], #0x1\n"
- "ld1 { v20.b }[4], [x28], #0x1\n"
- "ld1 { v19.b }[4], [x27], #0x1\n"
- "ld1 { v18.b }[4], [x26], #0x1\n"
- "ld1 { v17.b }[4], [x25], #0x1\n"
- "b 19f\n"
- "17:" // Oddments: Load (B): Bit 3: Unset: Bit 2: Unset
- "tbz %x[n_channels], #1, 18f\n"
- "ldr h25, [x14], #0x2\n"
- "ldr h24, [x13], #0x2\n"
- "ldr h23, [x10], #0x2\n"
- "ldr h22, [x9], #0x2\n"
- "ldr h20, [x28], #0x2\n"
- "ldr h19, [x27], #0x2\n"
- "ldr h18, [x26], #0x2\n"
- "ldr h17, [x25], #0x2\n"
- "tbz %x[n_channels], #0, 19f\n"
- "ld1 { v25.b }[2], [x14], #0x1\n"
- "ld1 { v24.b }[2], [x13], #0x1\n"
- "ld1 { v23.b }[2], [x10], #0x1\n"
- "ld1 { v22.b }[2], [x9], #0x1\n"
- "ld1 { v20.b }[2], [x28], #0x1\n"
- "ld1 { v19.b }[2], [x27], #0x1\n"
- "ld1 { v18.b }[2], [x26], #0x1\n"
- "ld1 { v17.b }[2], [x25], #0x1\n"
- "b 19f\n"
- "18:" // Oddments: Load (B): Bit 3: Unset: Bit 2: Unset: Bit 1: Unset
- "ldr b25, [x14], #0x1\n"
- "ldr b24, [x13], #0x1\n"
- "ldr b23, [x10], #0x1\n"
- "ldr b22, [x9], #0x1\n"
- "ldr b20, [x28], #0x1\n"
- "ldr b19, [x27], #0x1\n"
- "ldr b18, [x26], #0x1\n"
- "ldr b17, [x25], #0x1\n"
- "19:" // Oddments: Load (B): Bit 3: End
+ "bgt 1b\n"
+ "tst %x[n_channels], #0xf\n"
+ "beq 34f\n"
+ "2:" // Oddments
+ "and x19, %x[n_channels], #0xf\n"
+ "add x15, x15, x11\n"
+ "add x14, x14, x11\n"
+ "add x13, x13, x11\n"
+ "add x12, x12, x11\n"
+ "add x10, x10, x11\n"
+ "add x9, x9, x11\n"
+ "add x27, x27, x11\n"
+ "add x26, x26, x11\n"
+ "tbz %x[n_channels], #3, 6f\n"
+ "ld1 { v27.d }[0], [x15], #0x8\n"
+ "ld1 { v1.d }[0], [x14], #0x8\n"
+ "ld1 { v25.d }[0], [x13], #0x8\n"
+ "ld1 { v23.d }[0], [x12], #0x8\n"
+ "ld1 { v31.d }[0], [x10], #0x8\n"
+ "ld1 { v28.d }[0], [x9], #0x8\n"
+ "ld1 { v21.d }[0], [x27], #0x8\n"
+ "ld1 { v26.d }[0], [x26], #0x8\n"
+ "tbz %x[n_channels], #2, 4f\n"
+ "ld1 { v27.s }[2], [x15], #0x4\n"
+ "ld1 { v1.s }[2], [x14], #0x4\n"
+ "ld1 { v25.s }[2], [x13], #0x4\n"
+ "ld1 { v23.s }[2], [x12], #0x4\n"
+ "ld1 { v31.s }[2], [x10], #0x4\n"
+ "ld1 { v28.s }[2], [x9], #0x4\n"
+ "ld1 { v21.s }[2], [x27], #0x4\n"
+ "ld1 { v26.s }[2], [x26], #0x4\n"
+ "tbz %x[n_channels], #1, 3f\n"
+ "ld1 { v27.h }[6], [x15], #0x2\n"
+ "ld1 { v1.h }[6], [x14], #0x2\n"
+ "ld1 { v25.h }[6], [x13], #0x2\n"
+ "ld1 { v23.h }[6], [x12], #0x2\n"
+ "ld1 { v31.h }[6], [x10], #0x2\n"
+ "ld1 { v28.h }[6], [x9], #0x2\n"
+ "ld1 { v21.h }[6], [x27], #0x2\n"
+ "ld1 { v26.h }[6], [x26], #0x2\n"
+ "tbz %x[n_channels], #0, 10f\n"
+ "ld1 { v27.b }[14], [x15], #0x1\n"
+ "ld1 { v1.b }[14], [x14], #0x1\n"
+ "ld1 { v25.b }[14], [x13], #0x1\n"
+ "ld1 { v23.b }[14], [x12], #0x1\n"
+ "ld1 { v31.b }[14], [x10], #0x1\n"
+ "ld1 { v28.b }[14], [x9], #0x1\n"
+ "ld1 { v21.b }[14], [x27], #0x1\n"
+ "ld1 { v26.b }[14], [x26], #0x1\n"
+ "b 10f\n"
+ "3:" // Oddments: Load (A): Bit 3: Bit 2: Bit 1: Unset
+ "tbz %x[n_channels], #0, 10f\n"
+ "ld1 { v27.b }[12], [x15], #0x1\n"
+ "ld1 { v1.b }[12], [x14], #0x1\n"
+ "ld1 { v25.b }[12], [x13], #0x1\n"
+ "ld1 { v23.b }[12], [x12], #0x1\n"
+ "ld1 { v31.b }[12], [x10], #0x1\n"
+ "ld1 { v28.b }[12], [x9], #0x1\n"
+ "ld1 { v21.b }[12], [x27], #0x1\n"
+ "ld1 { v26.b }[12], [x26], #0x1\n"
+ "b 10f\n"
+ "4:" // Oddments: Load (A): Bit 3: Bit 2: Unset
+ "tbz %x[n_channels], #1, 5f\n"
+ "ld1 { v27.h }[4], [x15], #0x2\n"
+ "ld1 { v1.h }[4], [x14], #0x2\n"
+ "ld1 { v25.h }[4], [x13], #0x2\n"
+ "ld1 { v23.h }[4], [x12], #0x2\n"
+ "ld1 { v31.h }[4], [x10], #0x2\n"
+ "ld1 { v28.h }[4], [x9], #0x2\n"
+ "ld1 { v21.h }[4], [x27], #0x2\n"
+ "ld1 { v26.h }[4], [x26], #0x2\n"
+ "tbz %x[n_channels], #0, 10f\n"
+ "ld1 { v27.b }[10], [x15], #0x1\n"
+ "ld1 { v1.b }[10], [x14], #0x1\n"
+ "ld1 { v25.b }[10], [x13], #0x1\n"
+ "ld1 { v23.b }[10], [x12], #0x1\n"
+ "ld1 { v31.b }[10], [x10], #0x1\n"
+ "ld1 { v28.b }[10], [x9], #0x1\n"
+ "ld1 { v21.b }[10], [x27], #0x1\n"
+ "ld1 { v26.b }[10], [x26], #0x1\n"
+ "b 10f\n"
+ "5:" // Oddments: Load (A): Bit 3: Bit 2: Unset: Bit 1: Unset
+ "tbz %x[n_channels], #0, 10f\n"
+ "ld1 { v27.b }[8], [x15], #0x1\n"
+ "ld1 { v1.b }[8], [x14], #0x1\n"
+ "ld1 { v25.b }[8], [x13], #0x1\n"
+ "ld1 { v23.b }[8], [x12], #0x1\n"
+ "ld1 { v31.b }[8], [x10], #0x1\n"
+ "ld1 { v28.b }[8], [x9], #0x1\n"
+ "ld1 { v21.b }[8], [x27], #0x1\n"
+ "ld1 { v26.b }[8], [x26], #0x1\n"
+ "b 10f\n"
+ "6:" // Oddments: Load (A): Bit 3: Unset
+ "tbz %x[n_channels], #2, 8f\n"
+ "ld1 { v27.s }[0], [x15], #0x4\n"
+ "ld1 { v1.s }[0], [x14], #0x4\n"
+ "ld1 { v25.s }[0], [x13], #0x4\n"
+ "ld1 { v23.s }[0], [x12], #0x4\n"
+ "ld1 { v31.s }[0], [x10], #0x4\n"
+ "ld1 { v28.s }[0], [x9], #0x4\n"
+ "ld1 { v21.s }[0], [x27], #0x4\n"
+ "ld1 { v26.s }[0], [x26], #0x4\n"
+ "tbz %x[n_channels], #1, 7f\n"
+ "ld1 { v27.h }[2], [x15], #0x2\n"
+ "ld1 { v1.h }[2], [x14], #0x2\n"
+ "ld1 { v25.h }[2], [x13], #0x2\n"
+ "ld1 { v23.h }[2], [x12], #0x2\n"
+ "ld1 { v31.h }[2], [x10], #0x2\n"
+ "ld1 { v28.h }[2], [x9], #0x2\n"
+ "ld1 { v21.h }[2], [x27], #0x2\n"
+ "ld1 { v26.h }[2], [x26], #0x2\n"
+ "tbz %x[n_channels], #0, 10f\n"
+ "ld1 { v27.b }[6], [x15], #0x1\n"
+ "ld1 { v1.b }[6], [x14], #0x1\n"
+ "ld1 { v25.b }[6], [x13], #0x1\n"
+ "ld1 { v23.b }[6], [x12], #0x1\n"
+ "ld1 { v31.b }[6], [x10], #0x1\n"
+ "ld1 { v28.b }[6], [x9], #0x1\n"
+ "ld1 { v21.b }[6], [x27], #0x1\n"
+ "ld1 { v26.b }[6], [x26], #0x1\n"
+ "b 10f\n"
+ "7:" // Oddments: Load (A): Bit 3: Unset: Bit 2: Bit 1: Unset
+ "tbz %x[n_channels], #0, 10f\n"
+ "ld1 { v27.b }[4], [x15], #0x1\n"
+ "ld1 { v1.b }[4], [x14], #0x1\n"
+ "ld1 { v25.b }[4], [x13], #0x1\n"
+ "ld1 { v23.b }[4], [x12], #0x1\n"
+ "ld1 { v31.b }[4], [x10], #0x1\n"
+ "ld1 { v28.b }[4], [x9], #0x1\n"
+ "ld1 { v21.b }[4], [x27], #0x1\n"
+ "ld1 { v26.b }[4], [x26], #0x1\n"
+ "b 10f\n"
+ "8:" // Oddments: Load (A): Bit 3: Unset: Bit 2: Unset
+ "tbz %x[n_channels], #1, 9f\n"
+ "ld1 { v27.h }[0], [x15], #0x2\n"
+ "ld1 { v1.h }[0], [x14], #0x2\n"
+ "ld1 { v25.h }[0], [x13], #0x2\n"
+ "ld1 { v23.h }[0], [x12], #0x2\n"
+ "ld1 { v31.h }[0], [x10], #0x2\n"
+ "ld1 { v28.h }[0], [x9], #0x2\n"
+ "ld1 { v21.h }[0], [x27], #0x2\n"
+ "ld1 { v26.h }[0], [x26], #0x2\n"
+ "tbz %x[n_channels], #0, 10f\n"
+ "ld1 { v27.b }[2], [x15], #0x1\n"
+ "ld1 { v1.b }[2], [x14], #0x1\n"
+ "ld1 { v25.b }[2], [x13], #0x1\n"
+ "ld1 { v23.b }[2], [x12], #0x1\n"
+ "ld1 { v31.b }[2], [x10], #0x1\n"
+ "ld1 { v28.b }[2], [x9], #0x1\n"
+ "ld1 { v21.b }[2], [x27], #0x1\n"
+ "ld1 { v26.b }[2], [x26], #0x1\n"
+ "b 10f\n"
+ "9:" // Oddments: Load (A): Bit 3: Unset: Bit 2: Unset: Bit 1: Unset
+ "tbz %x[n_channels], #0, 10f\n"
+ "ld1 { v27.b }[0], [x15], #0x1\n"
+ "ld1 { v1.b }[0], [x14], #0x1\n"
+ "ld1 { v25.b }[0], [x13], #0x1\n"
+ "ld1 { v23.b }[0], [x12], #0x1\n"
+ "ld1 { v31.b }[0], [x10], #0x1\n"
+ "ld1 { v28.b }[0], [x9], #0x1\n"
+ "ld1 { v21.b }[0], [x27], #0x1\n"
+ "ld1 { v26.b }[0], [x26], #0x1\n"
+ "10:" // Oddments: Load (A): Bit 3: End
+ "ldp x15, x14, [%x[inptrs], #0x40]\n"
+ "add x15, x15, x11\n"
+ "ldp x13, x12, [%x[inptrs], #0x50]\n"
+ "ldp x10, x9, [%x[inptrs], #0x60]\n"
+ "add x14, x14, x11\n"
+ "ldp x27, x26, [%x[inptrs], #0x70]\n"
+ "add x13, x13, x11\n"
+ "add x12, x12, x11\n"
+ "add x10, x10, x11\n"
+ "add x9, x9, x11\n"
+ "add x27, x27, x11\n"
+ "add x26, x26, x11\n"
+ "tbz %x[n_channels], #3, 14f\n"
+ "ld1 { v24.d }[0], [x15], #0x8\n"
+ "ld1 { v22.d }[0], [x14], #0x8\n"
+ "ld1 { v20.d }[0], [x13], #0x8\n"
+ "ld1 { v16.d }[0], [x12], #0x8\n"
+ "ld1 { v19.d }[0], [x10], #0x8\n"
+ "ld1 { v0.d }[0], [x9], #0x8\n"
+ "ld1 { v18.d }[0], [x27], #0x8\n"
+ "ld1 { v17.d }[0], [x26], #0x8\n"
+ "tbz %x[n_channels], #2, 12f\n"
+ "ld1 { v24.s }[2], [x15], #0x4\n"
+ "ld1 { v22.s }[2], [x14], #0x4\n"
+ "ld1 { v20.s }[2], [x13], #0x4\n"
+ "ld1 { v16.s }[2], [x12], #0x4\n"
+ "ld1 { v19.s }[2], [x10], #0x4\n"
+ "ld1 { v0.s }[2], [x9], #0x4\n"
+ "ld1 { v18.s }[2], [x27], #0x4\n"
+ "ld1 { v17.s }[2], [x26], #0x4\n"
+ "tbz %x[n_channels], #1, 11f\n"
+ "ld1 { v24.h }[6], [x15], #0x2\n"
+ "ld1 { v22.h }[6], [x14], #0x2\n"
+ "ld1 { v20.h }[6], [x13], #0x2\n"
+ "ld1 { v16.h }[6], [x12], #0x2\n"
+ "ld1 { v19.h }[6], [x10], #0x2\n"
+ "ld1 { v0.h }[6], [x9], #0x2\n"
+ "ld1 { v18.h }[6], [x27], #0x2\n"
+ "ld1 { v17.h }[6], [x26], #0x2\n"
+ "tbz %x[n_channels], #0, 18f\n"
+ "ld1 { v24.b }[14], [x15], #0x1\n"
+ "ld1 { v22.b }[14], [x14], #0x1\n"
+ "ld1 { v20.b }[14], [x13], #0x1\n"
+ "ld1 { v16.b }[14], [x12], #0x1\n"
+ "ld1 { v19.b }[14], [x10], #0x1\n"
+ "ld1 { v0.b }[14], [x9], #0x1\n"
+ "ld1 { v18.b }[14], [x27], #0x1\n"
+ "ld1 { v17.b }[14], [x26], #0x1\n"
+ "b 18f\n"
+ "11:" // Oddments: Load (B): Bit 3: Bit 2: Bit 1: Unset
+ "tbz %x[n_channels], #0, 18f\n"
+ "ld1 { v24.b }[12], [x15], #0x1\n"
+ "ld1 { v22.b }[12], [x14], #0x1\n"
+ "ld1 { v20.b }[12], [x13], #0x1\n"
+ "ld1 { v16.b }[12], [x12], #0x1\n"
+ "ld1 { v19.b }[12], [x10], #0x1\n"
+ "ld1 { v0.b }[12], [x9], #0x1\n"
+ "ld1 { v18.b }[12], [x27], #0x1\n"
+ "ld1 { v17.b }[12], [x26], #0x1\n"
+ "b 18f\n"
+ "12:" // Oddments: Load (B): Bit 3: Bit 2: Unset
+ "tbz %x[n_channels], #1, 13f\n"
+ "ld1 { v24.h }[4], [x15], #0x2\n"
+ "ld1 { v22.h }[4], [x14], #0x2\n"
+ "ld1 { v20.h }[4], [x13], #0x2\n"
+ "ld1 { v16.h }[4], [x12], #0x2\n"
+ "ld1 { v19.h }[4], [x10], #0x2\n"
+ "ld1 { v0.h }[4], [x9], #0x2\n"
+ "ld1 { v18.h }[4], [x27], #0x2\n"
+ "ld1 { v17.h }[4], [x26], #0x2\n"
+ "tbz %x[n_channels], #0, 18f\n"
+ "ld1 { v24.b }[10], [x15], #0x1\n"
+ "ld1 { v22.b }[10], [x14], #0x1\n"
+ "ld1 { v20.b }[10], [x13], #0x1\n"
+ "ld1 { v16.b }[10], [x12], #0x1\n"
+ "ld1 { v19.b }[10], [x10], #0x1\n"
+ "ld1 { v0.b }[10], [x9], #0x1\n"
+ "ld1 { v18.b }[10], [x27], #0x1\n"
+ "ld1 { v17.b }[10], [x26], #0x1\n"
+ "b 18f\n"
+ "13:" // Oddments: Load (B): Bit 3: Bit 2: Unset: Bit 1: Unset
+ "tbz %x[n_channels], #0, 18f\n"
+ "ld1 { v24.b }[8], [x15], #0x1\n"
+ "ld1 { v22.b }[8], [x14], #0x1\n"
+ "ld1 { v20.b }[8], [x13], #0x1\n"
+ "ld1 { v16.b }[8], [x12], #0x1\n"
+ "ld1 { v19.b }[8], [x10], #0x1\n"
+ "ld1 { v0.b }[8], [x9], #0x1\n"
+ "ld1 { v18.b }[8], [x27], #0x1\n"
+ "ld1 { v17.b }[8], [x26], #0x1\n"
+ "b 18f\n"
+ "14:" // Oddments: Load (B): Bit 3: Unset
+ "tbz %x[n_channels], #2, 16f\n"
+ "ld1 { v24.s }[0], [x15], #0x4\n"
+ "ld1 { v22.s }[0], [x14], #0x4\n"
+ "ld1 { v20.s }[0], [x13], #0x4\n"
+ "ld1 { v16.s }[0], [x12], #0x4\n"
+ "ld1 { v19.s }[0], [x10], #0x4\n"
+ "ld1 { v0.s }[0], [x9], #0x4\n"
+ "ld1 { v18.s }[0], [x27], #0x4\n"
+ "ld1 { v17.s }[0], [x26], #0x4\n"
+ "tbz %x[n_channels], #1, 15f\n"
+ "ld1 { v24.h }[2], [x15], #0x2\n"
+ "ld1 { v22.h }[2], [x14], #0x2\n"
+ "ld1 { v20.h }[2], [x13], #0x2\n"
+ "ld1 { v16.h }[2], [x12], #0x2\n"
+ "ld1 { v19.h }[2], [x10], #0x2\n"
+ "ld1 { v0.h }[2], [x9], #0x2\n"
+ "ld1 { v18.h }[2], [x27], #0x2\n"
+ "ld1 { v17.h }[2], [x26], #0x2\n"
+ "tbz %x[n_channels], #0, 18f\n"
+ "ld1 { v24.b }[6], [x15], #0x1\n"
+ "ld1 { v22.b }[6], [x14], #0x1\n"
+ "ld1 { v20.b }[6], [x13], #0x1\n"
+ "ld1 { v16.b }[6], [x12], #0x1\n"
+ "ld1 { v19.b }[6], [x10], #0x1\n"
+ "ld1 { v0.b }[6], [x9], #0x1\n"
+ "ld1 { v18.b }[6], [x27], #0x1\n"
+ "ld1 { v17.b }[6], [x26], #0x1\n"
+ "b 18f\n"
+ "15:" // Oddments: Load (B): Bit 3: Unset: Bit 2: Bit 1: Unset
+ "tbz %x[n_channels], #0, 18f\n"
+ "ld1 { v24.b }[4], [x15], #0x1\n"
+ "ld1 { v22.b }[4], [x14], #0x1\n"
+ "ld1 { v20.b }[4], [x13], #0x1\n"
+ "ld1 { v16.b }[4], [x12], #0x1\n"
+ "ld1 { v19.b }[4], [x10], #0x1\n"
+ "ld1 { v0.b }[4], [x9], #0x1\n"
+ "ld1 { v18.b }[4], [x27], #0x1\n"
+ "ld1 { v17.b }[4], [x26], #0x1\n"
+ "b 18f\n"
+ "16:" // Oddments: Load (B): Bit 3: Unset: Bit 2: Unset
+ "tbz %x[n_channels], #1, 17f\n"
+ "ld1 { v24.h }[0], [x15], #0x2\n"
+ "ld1 { v22.h }[0], [x14], #0x2\n"
+ "ld1 { v20.h }[0], [x13], #0x2\n"
+ "ld1 { v16.h }[0], [x12], #0x2\n"
+ "ld1 { v19.h }[0], [x10], #0x2\n"
+ "ld1 { v0.h }[0], [x9], #0x2\n"
+ "ld1 { v18.h }[0], [x27], #0x2\n"
+ "ld1 { v17.h }[0], [x26], #0x2\n"
+ "tbz %x[n_channels], #0, 18f\n"
+ "ld1 { v24.b }[2], [x15], #0x1\n"
+ "ld1 { v22.b }[2], [x14], #0x1\n"
+ "ld1 { v20.b }[2], [x13], #0x1\n"
+ "ld1 { v16.b }[2], [x12], #0x1\n"
+ "ld1 { v19.b }[2], [x10], #0x1\n"
+ "ld1 { v0.b }[2], [x9], #0x1\n"
+ "ld1 { v18.b }[2], [x27], #0x1\n"
+ "ld1 { v17.b }[2], [x26], #0x1\n"
+ "b 18f\n"
+ "17:" // Oddments: Load (B): Bit 3: Unset: Bit 2: Unset: Bit 1: Unset
+ "tbz %x[n_channels], #0, 18f\n"
+ "ld1 { v24.b }[0], [x15], #0x1\n"
+ "ld1 { v22.b }[0], [x14], #0x1\n"
+ "ld1 { v20.b }[0], [x13], #0x1\n"
+ "ld1 { v16.b }[0], [x12], #0x1\n"
+ "ld1 { v19.b }[0], [x10], #0x1\n"
+ "ld1 { v0.b }[0], [x9], #0x1\n"
+ "ld1 { v18.b }[0], [x27], #0x1\n"
+ "ld1 { v17.b }[0], [x26], #0x1\n"
+ "18:" // Oddments: Load (B): Bit 3: End
+ "zip1 v6.16b, v27.16b, v25.16b\n"
+ "ldr q30, [%x[params], #0x0]\n"
+ "cmp x19, #0x4\n"
+ "zip2 v9.16b, v27.16b, v25.16b\n"
"ldr q29, [%x[params], #0x10]\n"
- "ldr q28, [%x[params], #0x20]\n"
- "zip2 v2.16b, v6.16b, v4.16b\n"
- "zip1 v6.16b, v6.16b, v4.16b\n"
- "ldr q27, [%x[params], #0x30]\n"
- "zip1 v4.16b, v5.16b, v3.16b\n"
- "zip2 v3.16b, v5.16b, v3.16b\n"
- "cmp x20, #0x4\n"
- "zip2 v5.16b, v6.16b, v4.16b\n"
- "zip1 v6.16b, v6.16b, v4.16b\n"
- "zip1 v4.16b, v2.16b, v3.16b\n"
- "zip2 v3.16b, v2.16b, v3.16b\n"
- "ldr q2, [%x[params], #0x0]\n"
- "zip2 v26.16b, v1.16b, v31.16b\n"
- "zip1 v1.16b, v1.16b, v31.16b\n"
- "zip1 v31.16b, v0.16b, v30.16b\n"
- "zip2 v30.16b, v0.16b, v30.16b\n"
- "zip2 v21.16b, v25.16b, v23.16b\n"
- "zip1 v25.16b, v25.16b, v23.16b\n"
- "zip1 v23.16b, v24.16b, v22.16b\n"
- "zip2 v22.16b, v24.16b, v22.16b\n"
- "zip2 v16.16b, v20.16b, v18.16b\n"
- "zip1 v20.16b, v20.16b, v18.16b\n"
- "zip1 v18.16b, v19.16b, v17.16b\n"
- "zip2 v17.16b, v19.16b, v17.16b\n"
- "zip2 v0.16b, v1.16b, v31.16b\n"
- "zip1 v1.16b, v1.16b, v31.16b\n"
- "zip1 v31.16b, v26.16b, v30.16b\n"
- "zip2 v30.16b, v26.16b, v30.16b\n"
- "zip2 v24.16b, v25.16b, v23.16b\n"
- "zip1 v25.16b, v25.16b, v23.16b\n"
- "zip1 v23.16b, v21.16b, v22.16b\n"
- "zip2 v22.16b, v21.16b, v22.16b\n"
- "zip2 v19.16b, v20.16b, v18.16b\n"
- "zip1 v20.16b, v20.16b, v18.16b\n"
- "zip1 v18.16b, v16.16b, v17.16b\n"
- "zip2 v17.16b, v16.16b, v17.16b\n"
- "mov v26.16b, v2.16b\n"
- "mov v21.16b, v2.16b\n"
- ".inst 0x4e8197b5 // sdot v21.4s, v29.16b, v1.16b\n"
- "mov v16.16b, v2.16b\n"
- ".inst 0x4e8697a2 // sdot v2.4s, v29.16b, v6.16b\n"
- ".inst 0x4e819782 // sdot v2.4s, v28.16b, v1.16b\n"
- "ext v6.16b, v6.16b, v6.16b, #0x1\n"
- "ext v1.16b, v1.16b, v1.16b, #0x1\n"
- ".inst 0x4e8697ba // sdot v26.4s, v29.16b, v6.16b\n"
- "ldr q6, [%x[params], #0x40]\n"
- ".inst 0x4e8197b0 // sdot v16.4s, v29.16b, v1.16b\n"
- ".inst 0x4e999795 // sdot v21.4s, v28.16b, v25.16b\n"
- ".inst 0x4e999762 // sdot v2.4s, v27.16b, v25.16b\n"
- "ext v25.16b, v25.16b, v25.16b, #0x1\n"
- ".inst 0x4e81979a // sdot v26.4s, v28.16b, v1.16b\n"
- "ldr q1, [%x[params], #0x50]\n"
- ".inst 0x4e999790 // sdot v16.4s, v28.16b, v25.16b\n"
- ".inst 0x4e949775 // sdot v21.4s, v27.16b, v20.16b\n"
- "ext v20.16b, v20.16b, v20.16b, #0x1\n"
+ "zip1 v5.16b, v1.16b, v23.16b\n"
+ "ldr q27, [%x[params], #0x20]\n"
+ "zip2 v3.16b, v1.16b, v23.16b\n"
+ "ldr q25, [%x[params], #0x30]\n"
+ "zip1 v2.16b, v31.16b, v21.16b\n"
+ "ldr q23, [%x[params], #0x40]\n"
+ "zip2 v4.16b, v31.16b, v21.16b\n"
+ "ldr q21, [%x[params], #0x50]\n"
"add %x[params], %x[params], #0x60\n"
- "sqrdmulh v2.4s, v2.4s, v6.4s\n"
- ".inst 0x4e99977a // sdot v26.4s, v27.16b, v25.16b\n"
- ".inst 0x4e949770 // sdot v16.4s, v27.16b, v20.16b\n"
- "and v29.16b, v2.16b, v1.16b\n"
- "sshr v29.4s, v29.4s, #0x1f\n"
- "sqrdmulh v26.4s, v26.4s, v6.4s\n"
- "sqrdmulh v21.4s, v21.4s, v6.4s\n"
- "sqrdmulh v16.4s, v16.4s, v6.4s\n"
- "sqadd v2.4s, v2.4s, v29.4s\n"
- "and v28.16b, v26.16b, v1.16b\n"
- "and v27.16b, v21.16b, v1.16b\n"
- "and v29.16b, v16.16b, v1.16b\n"
- "sshr v28.4s, v28.4s, #0x1f\n"
- "sshr v27.4s, v27.4s, #0x1f\n"
- "sshr v29.4s, v29.4s, #0x1f\n"
- "sqadd v26.4s, v26.4s, v28.4s\n"
- "sqadd v21.4s, v21.4s, v27.4s\n"
- "sqadd v16.4s, v16.4s, v29.4s\n"
- "srshl v2.4s, v2.4s, v1.4s\n"
- "srshl v26.4s, v26.4s, v1.4s\n"
- "srshl v21.4s, v21.4s, v1.4s\n"
- "srshl v16.4s, v16.4s, v1.4s\n"
- "add v2.4s, v2.4s, v7.4s\n"
- "add v26.4s, v26.4s, v7.4s\n"
- "add v21.4s, v21.4s, v7.4s\n"
- "add v16.4s, v16.4s, v7.4s\n"
- "smax v2.4s, v2.4s, v9.4s\n"
- "smax v26.4s, v26.4s, v9.4s\n"
- "smax v21.4s, v21.4s, v9.4s\n"
- "smax v16.4s, v16.4s, v9.4s\n"
- "smin v2.4s, v2.4s, v8.4s\n"
- "smin v26.4s, v26.4s, v8.4s\n"
- "smin v21.4s, v21.4s, v8.4s\n"
- "smin v16.4s, v16.4s, v8.4s\n"
- "uzp1 v2.16b, v2.16b, v2.16b\n"
- "uzp1 v26.16b, v26.16b, v26.16b\n"
- "uzp1 v21.16b, v21.16b, v21.16b\n"
- "uzp1 v16.16b, v16.16b, v16.16b\n"
- "uzp1 v2.16b, v2.16b, v2.16b\n"
- "uzp1 v26.16b, v26.16b, v26.16b\n"
- "uzp1 v21.16b, v21.16b, v21.16b\n"
- "uzp1 v16.16b, v16.16b, v16.16b\n"
- "blt 20f\n"
- "str s2, [x24, x11]\n"
- "str s26, [x23, x11]\n"
- "str s21, [x22, x11]\n"
- "str s16, [x21, x11]\n"
- "b 23f\n"
- "20:" // Oddments: Unroll 0: Oddment store
+ "zip1 v1.16b, v28.16b, v26.16b\n"
+ "zip2 v31.16b, v28.16b, v26.16b\n"
+ "zip1 v28.16b, v24.16b, v20.16b\n"
+ "zip2 v26.16b, v24.16b, v20.16b\n"
+ "zip1 v24.16b, v22.16b, v16.16b\n"
+ "zip2 v22.16b, v22.16b, v16.16b\n"
+ "zip1 v20.16b, v19.16b, v18.16b\n"
+ "zip2 v19.16b, v19.16b, v18.16b\n"
+ "zip1 v18.16b, v0.16b, v17.16b\n"
+ "zip2 v17.16b, v0.16b, v17.16b\n"
+ "zip1 v8.16b, v6.16b, v5.16b\n"
+ "zip2 v7.16b, v6.16b, v5.16b\n"
+ "zip1 v6.16b, v9.16b, v3.16b\n"
+ "str q6, [SP, #0x0]\n"
+ "zip2 v5.16b, v9.16b, v3.16b\n"
+ "str q5, [SP, #0x10]\n"
+ "zip1 v3.16b, v2.16b, v1.16b\n"
+ "zip2 v2.16b, v2.16b, v1.16b\n"
+ "zip1 v1.16b, v4.16b, v31.16b\n"
+ "str q1, [SP, #0x20]\n"
+ "zip2 v16.16b, v4.16b, v31.16b\n"
+ "str q16, [SP, #0x30]\n"
+ "zip1 v31.16b, v28.16b, v24.16b\n"
+ "zip2 v28.16b, v28.16b, v24.16b\n"
+ "zip1 v16.16b, v26.16b, v22.16b\n"
+ "str q16, [SP, #0x40]\n"
+ "zip2 v16.16b, v26.16b, v22.16b\n"
+ "str q16, [SP, #0x50]\n"
+ "zip1 v26.16b, v20.16b, v18.16b\n"
+ "zip2 v24.16b, v20.16b, v18.16b\n"
+ "zip1 v16.16b, v19.16b, v17.16b\n"
+ "str q16, [SP, #0x60]\n"
+ "zip2 v16.16b, v19.16b, v17.16b\n"
+ "str q16, [SP, #0x70]\n"
+ "mov v22.16b, v30.16b\n"
+ "mov v20.16b, v30.16b\n"
+ "mov v19.16b, v30.16b\n"
+ ".inst 0x4e8897be // sdot v30.4s, v29.16b, v8.16b\n"
+ ".inst 0x4e8397b4 // sdot v20.4s, v29.16b, v3.16b\n"
+ "ext v8.16b, v8.16b, v8.16b, #0x1\n"
+ ".inst 0x4e83977e // sdot v30.4s, v27.16b, v3.16b\n"
+ "ext v3.16b, v3.16b, v3.16b, #0x1\n"
+ ".inst 0x4e9f9774 // sdot v20.4s, v27.16b, v31.16b\n"
+ ".inst 0x4e8897b6 // sdot v22.4s, v29.16b, v8.16b\n"
+ ".inst 0x4e9f973e // sdot v30.4s, v25.16b, v31.16b\n"
+ "ext v31.16b, v31.16b, v31.16b, #0x1\n"
+ ".inst 0x4e9a9734 // sdot v20.4s, v25.16b, v26.16b\n"
+ "ext v26.16b, v26.16b, v26.16b, #0x1\n"
+ ".inst 0x4e8397b3 // sdot v19.4s, v29.16b, v3.16b\n"
+ ".inst 0x4e839776 // sdot v22.4s, v27.16b, v3.16b\n"
+ "sqrdmulh v30.4s, v30.4s, v23.4s\n"
+ "sqrdmulh v20.4s, v20.4s, v23.4s\n"
+ ".inst 0x4e9f9773 // sdot v19.4s, v27.16b, v31.16b\n"
+ ".inst 0x4e9f9736 // sdot v22.4s, v25.16b, v31.16b\n"
+ "and v16.16b, v30.16b, v21.16b\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ ".inst 0x4e9a9733 // sdot v19.4s, v25.16b, v26.16b\n"
+ "sqrdmulh v22.4s, v22.4s, v23.4s\n"
+ "and v18.16b, v20.16b, v21.16b\n"
+ "sshr v18.4s, v18.4s, #0x1f\n"
+ "and v17.16b, v22.16b, v21.16b\n"
+ "sqrdmulh v19.4s, v19.4s, v23.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "sqadd v30.4s, v30.4s, v16.4s\n"
+ "and v16.16b, v19.16b, v21.16b\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "srshl v30.4s, v30.4s, v21.4s\n"
+ "sqadd v20.4s, v20.4s, v18.4s\n"
+ "sqadd v22.4s, v22.4s, v17.4s\n"
+ "add v30.4s, v30.4s, v10.4s\n"
+ "srshl v20.4s, v20.4s, v21.4s\n"
+ "srshl v22.4s, v22.4s, v21.4s\n"
+ "sqadd v19.4s, v19.4s, v16.4s\n"
+ "smax v30.4s, v30.4s, v12.4s\n"
+ "add v20.4s, v20.4s, v10.4s\n"
+ "add v22.4s, v22.4s, v10.4s\n"
+ "smin v30.4s, v30.4s, v11.4s\n"
+ "smax v20.4s, v20.4s, v12.4s\n"
+ "smax v22.4s, v22.4s, v12.4s\n"
+ "srshl v19.4s, v19.4s, v21.4s\n"
+ "smin v20.4s, v20.4s, v11.4s\n"
+ "smin v22.4s, v22.4s, v11.4s\n"
+ "uzp1 v30.16b, v30.16b, v30.16b\n"
+ "add v19.4s, v19.4s, v10.4s\n"
+ "uzp1 v30.16b, v30.16b, v30.16b\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "smax v19.4s, v19.4s, v12.4s\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "uzp1 v20.16b, v20.16b, v20.16b\n"
+ "smin v19.4s, v19.4s, v11.4s\n"
+ "uzp1 v20.16b, v20.16b, v20.16b\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ "blt 19f\n"
+ "str s30, [x24, x11]\n"
+ "str s22, [x23, x11]\n"
+ "str s20, [x21, x11]\n"
+ "str s19, [x20, x11]\n"
+ "b 22f\n"
+ "19:" // Oddments: Unroll 0: Oddment store
"add x24, x24, x11\n"
"add x23, x23, x11\n"
- "add x22, x22, x11\n"
"add x21, x21, x11\n"
- "tbz x20, #1, 21f\n"
- "st1 { v2.h }[0], [x24], #0x2\n"
- "st1 { v26.h }[0], [x23], #0x2\n"
- "st1 { v21.h }[0], [x22], #0x2\n"
- "st1 { v16.h }[0], [x21], #0x2\n"
- "tbz x20, #0, 22f\n"
- "st1 { v2.b }[2], [x24], #0x1\n"
- "st1 { v26.b }[2], [x23], #0x1\n"
- "st1 { v21.b }[2], [x22], #0x1\n"
- "st1 { v16.b }[2], [x21], #0x1\n"
- "b 22f\n"
- "21:" // Oddments: Unroll 0: Oddment store: Bit 1: Unset
- "st1 { v2.b }[0], [x24], #0x1\n"
- "st1 { v26.b }[0], [x23], #0x1\n"
- "st1 { v21.b }[0], [x22], #0x1\n"
- "st1 { v16.b }[0], [x21], #0x1\n"
- "22:" // Oddments: Unroll 0: Oddment store: Bit 1: End
- "23:" // Oddments: Unroll 0: After oddment store
- "subs x20, x20, #0x4\n"
+ "add x20, x20, x11\n"
+ "tbz x19, #1, 20f\n"
+ "st1 { v30.h }[0], [x24], #0x2\n"
+ "st1 { v22.h }[0], [x23], #0x2\n"
+ "st1 { v20.h }[0], [x21], #0x2\n"
+ "st1 { v19.h }[0], [x20], #0x2\n"
+ "tbz x19, #0, 21f\n"
+ "st1 { v30.b }[2], [x24], #0x1\n"
+ "st1 { v22.b }[2], [x23], #0x1\n"
+ "st1 { v20.b }[2], [x21], #0x1\n"
+ "st1 { v19.b }[2], [x20], #0x1\n"
+ "b 21f\n"
+ "20:" // Oddments: Unroll 0: Oddment store: Bit 1: Unset
+ "tbz x19, #0, 21f\n"
+ "st1 { v30.b }[0], [x24], #0x1\n"
+ "st1 { v22.b }[0], [x23], #0x1\n"
+ "st1 { v20.b }[0], [x21], #0x1\n"
+ "st1 { v19.b }[0], [x20], #0x1\n"
+ "21:" // Oddments: Unroll 0: Oddment store: Bit 1: End
+
+ "22:" // Oddments: Unroll 0: After oddment store
"add x11, x11, #0x4\n"
- "ble 35f\n"
- "ldr q2, [%x[params], #0x0]\n"
+ "subs x19, x19, #0x4\n"
+ "ble 34f\n"
+ "ldr q30, [%x[params], #0x0]\n"
+ "mov v22.16b, v30.16b\n"
"ldr q29, [%x[params], #0x10]\n"
- "mov v26.16b, v2.16b\n"
- "mov v21.16b, v2.16b\n"
- "ldr q28, [%x[params], #0x20]\n"
- "ldr q27, [%x[params], #0x30]\n"
- "mov v16.16b, v2.16b\n"
- ".inst 0x4e8597a2 // sdot v2.4s, v29.16b, v5.16b\n"
- "ldr q6, [%x[params], #0x40]\n"
- "ldr q1, [%x[params], #0x50]\n"
- ".inst 0x4e8097b5 // sdot v21.4s, v29.16b, v0.16b\n"
- ".inst 0x4e809782 // sdot v2.4s, v28.16b, v0.16b\n"
- "ext v5.16b, v5.16b, v5.16b, #0x1\n"
- "ext v0.16b, v0.16b, v0.16b, #0x1\n"
- ".inst 0x4e8597ba // sdot v26.4s, v29.16b, v5.16b\n"
- "cmp x20, #0x4\n"
- ".inst 0x4e8097b0 // sdot v16.4s, v29.16b, v0.16b\n"
- ".inst 0x4e989795 // sdot v21.4s, v28.16b, v24.16b\n"
+ "cmp x19, #0x4\n"
+ "mov v20.16b, v30.16b\n"
+ "ldr q27, [%x[params], #0x20]\n"
+ "mov v19.16b, v30.16b\n"
+ "ldr q25, [%x[params], #0x30]\n"
+ "ldr q23, [%x[params], #0x40]\n"
+ ".inst 0x4e8797be // sdot v30.4s, v29.16b, v7.16b\n"
+ "ldr q21, [%x[params], #0x50]\n"
"add %x[params], %x[params], #0x60\n"
- ".inst 0x4e989762 // sdot v2.4s, v27.16b, v24.16b\n"
+ ".inst 0x4e8297b4 // sdot v20.4s, v29.16b, v2.16b\n"
+ "ext v7.16b, v7.16b, v7.16b, #0x1\n"
+ ".inst 0x4e82977e // sdot v30.4s, v27.16b, v2.16b\n"
+ "ext v2.16b, v2.16b, v2.16b, #0x1\n"
+ ".inst 0x4e9c9774 // sdot v20.4s, v27.16b, v28.16b\n"
+ ".inst 0x4e8797b6 // sdot v22.4s, v29.16b, v7.16b\n"
+ ".inst 0x4e9c973e // sdot v30.4s, v25.16b, v28.16b\n"
+ "ext v28.16b, v28.16b, v28.16b, #0x1\n"
+ ".inst 0x4e989734 // sdot v20.4s, v25.16b, v24.16b\n"
"ext v24.16b, v24.16b, v24.16b, #0x1\n"
- ".inst 0x4e80979a // sdot v26.4s, v28.16b, v0.16b\n"
- ".inst 0x4e989790 // sdot v16.4s, v28.16b, v24.16b\n"
- ".inst 0x4e939775 // sdot v21.4s, v27.16b, v19.16b\n"
- "ext v19.16b, v19.16b, v19.16b, #0x1\n"
- "sqrdmulh v2.4s, v2.4s, v6.4s\n"
- ".inst 0x4e98977a // sdot v26.4s, v27.16b, v24.16b\n"
- ".inst 0x4e939770 // sdot v16.4s, v27.16b, v19.16b\n"
- "and v29.16b, v2.16b, v1.16b\n"
- "sshr v29.4s, v29.4s, #0x1f\n"
- "sqrdmulh v26.4s, v26.4s, v6.4s\n"
- "sqrdmulh v21.4s, v21.4s, v6.4s\n"
- "sqrdmulh v16.4s, v16.4s, v6.4s\n"
- "sqadd v2.4s, v2.4s, v29.4s\n"
- "and v28.16b, v26.16b, v1.16b\n"
- "and v27.16b, v21.16b, v1.16b\n"
- "and v29.16b, v16.16b, v1.16b\n"
- "sshr v28.4s, v28.4s, #0x1f\n"
- "sshr v27.4s, v27.4s, #0x1f\n"
- "sshr v29.4s, v29.4s, #0x1f\n"
- "sqadd v26.4s, v26.4s, v28.4s\n"
- "sqadd v21.4s, v21.4s, v27.4s\n"
- "sqadd v16.4s, v16.4s, v29.4s\n"
- "srshl v2.4s, v2.4s, v1.4s\n"
- "srshl v26.4s, v26.4s, v1.4s\n"
- "srshl v21.4s, v21.4s, v1.4s\n"
- "srshl v16.4s, v16.4s, v1.4s\n"
- "add v2.4s, v2.4s, v7.4s\n"
- "add v26.4s, v26.4s, v7.4s\n"
- "add v21.4s, v21.4s, v7.4s\n"
- "add v16.4s, v16.4s, v7.4s\n"
- "smax v2.4s, v2.4s, v9.4s\n"
- "smax v26.4s, v26.4s, v9.4s\n"
- "smax v21.4s, v21.4s, v9.4s\n"
- "smax v16.4s, v16.4s, v9.4s\n"
- "smin v2.4s, v2.4s, v8.4s\n"
- "smin v26.4s, v26.4s, v8.4s\n"
- "smin v21.4s, v21.4s, v8.4s\n"
- "smin v16.4s, v16.4s, v8.4s\n"
- "uzp1 v2.16b, v2.16b, v2.16b\n"
- "uzp1 v26.16b, v26.16b, v26.16b\n"
- "uzp1 v21.16b, v21.16b, v21.16b\n"
- "uzp1 v16.16b, v16.16b, v16.16b\n"
- "uzp1 v2.16b, v2.16b, v2.16b\n"
- "uzp1 v26.16b, v26.16b, v26.16b\n"
- "uzp1 v21.16b, v21.16b, v21.16b\n"
- "uzp1 v16.16b, v16.16b, v16.16b\n"
- "blt 24f\n"
- "str s2, [x24, x11]\n"
- "str s26, [x23, x11]\n"
- "str s21, [x22, x11]\n"
- "str s16, [x21, x11]\n"
- "b 27f\n"
- "24:" // Oddments: Unroll 1: Oddment store
+ ".inst 0x4e8297b3 // sdot v19.4s, v29.16b, v2.16b\n"
+ ".inst 0x4e829776 // sdot v22.4s, v27.16b, v2.16b\n"
+ "sqrdmulh v30.4s, v30.4s, v23.4s\n"
+ "sqrdmulh v20.4s, v20.4s, v23.4s\n"
+ ".inst 0x4e9c9773 // sdot v19.4s, v27.16b, v28.16b\n"
+ ".inst 0x4e9c9736 // sdot v22.4s, v25.16b, v28.16b\n"
+ "and v16.16b, v30.16b, v21.16b\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ ".inst 0x4e989733 // sdot v19.4s, v25.16b, v24.16b\n"
+ "sqrdmulh v22.4s, v22.4s, v23.4s\n"
+ "and v18.16b, v20.16b, v21.16b\n"
+ "sshr v18.4s, v18.4s, #0x1f\n"
+ "and v17.16b, v22.16b, v21.16b\n"
+ "sqrdmulh v19.4s, v19.4s, v23.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "sqadd v30.4s, v30.4s, v16.4s\n"
+ "and v16.16b, v19.16b, v21.16b\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "srshl v30.4s, v30.4s, v21.4s\n"
+ "sqadd v20.4s, v20.4s, v18.4s\n"
+ "sqadd v22.4s, v22.4s, v17.4s\n"
+ "add v30.4s, v30.4s, v10.4s\n"
+ "srshl v20.4s, v20.4s, v21.4s\n"
+ "srshl v22.4s, v22.4s, v21.4s\n"
+ "sqadd v19.4s, v19.4s, v16.4s\n"
+ "smax v30.4s, v30.4s, v12.4s\n"
+ "add v20.4s, v20.4s, v10.4s\n"
+ "add v22.4s, v22.4s, v10.4s\n"
+ "smin v30.4s, v30.4s, v11.4s\n"
+ "smax v20.4s, v20.4s, v12.4s\n"
+ "smax v22.4s, v22.4s, v12.4s\n"
+ "srshl v19.4s, v19.4s, v21.4s\n"
+ "smin v20.4s, v20.4s, v11.4s\n"
+ "smin v22.4s, v22.4s, v11.4s\n"
+ "uzp1 v30.16b, v30.16b, v30.16b\n"
+ "add v19.4s, v19.4s, v10.4s\n"
+ "uzp1 v30.16b, v30.16b, v30.16b\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "smax v19.4s, v19.4s, v12.4s\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "uzp1 v20.16b, v20.16b, v20.16b\n"
+ "smin v19.4s, v19.4s, v11.4s\n"
+ "uzp1 v20.16b, v20.16b, v20.16b\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ "blt 23f\n"
+ "str s30, [x24, x11]\n"
+ "str s22, [x23, x11]\n"
+ "str s20, [x21, x11]\n"
+ "str s19, [x20, x11]\n"
+ "b 26f\n"
+ "23:" // Oddments: Unroll 1: Oddment store
"add x24, x24, x11\n"
"add x23, x23, x11\n"
- "add x22, x22, x11\n"
"add x21, x21, x11\n"
- "tbz x20, #1, 25f\n"
- "st1 { v2.h }[0], [x24], #0x2\n"
- "st1 { v26.h }[0], [x23], #0x2\n"
- "st1 { v21.h }[0], [x22], #0x2\n"
- "st1 { v16.h }[0], [x21], #0x2\n"
- "tbz x20, #0, 26f\n"
- "st1 { v2.b }[2], [x24], #0x1\n"
- "st1 { v26.b }[2], [x23], #0x1\n"
- "st1 { v21.b }[2], [x22], #0x1\n"
- "st1 { v16.b }[2], [x21], #0x1\n"
- "b 26f\n"
- "25:" // Oddments: Unroll 1: Oddment store: Bit 1: Unset
- "st1 { v2.b }[0], [x24], #0x1\n"
- "st1 { v26.b }[0], [x23], #0x1\n"
- "st1 { v21.b }[0], [x22], #0x1\n"
- "st1 { v16.b }[0], [x21], #0x1\n"
- "26:" // Oddments: Unroll 1: Oddment store: Bit 1: End
- "27:" // Oddments: Unroll 1: After oddment store
- "subs x20, x20, #0x4\n"
+ "add x20, x20, x11\n"
+ "tbz x19, #1, 24f\n"
+ "st1 { v30.h }[0], [x24], #0x2\n"
+ "st1 { v22.h }[0], [x23], #0x2\n"
+ "st1 { v20.h }[0], [x21], #0x2\n"
+ "st1 { v19.h }[0], [x20], #0x2\n"
+ "tbz x19, #0, 25f\n"
+ "st1 { v30.b }[2], [x24], #0x1\n"
+ "st1 { v22.b }[2], [x23], #0x1\n"
+ "st1 { v20.b }[2], [x21], #0x1\n"
+ "st1 { v19.b }[2], [x20], #0x1\n"
+ "b 25f\n"
+ "24:" // Oddments: Unroll 1: Oddment store: Bit 1: Unset
+ "tbz x19, #0, 25f\n"
+ "st1 { v30.b }[0], [x24], #0x1\n"
+ "st1 { v22.b }[0], [x23], #0x1\n"
+ "st1 { v20.b }[0], [x21], #0x1\n"
+ "st1 { v19.b }[0], [x20], #0x1\n"
+ "25:" // Oddments: Unroll 1: Oddment store: Bit 1: End
+
+ "26:" // Oddments: Unroll 1: After oddment store
"add x11, x11, #0x4\n"
- "ble 35f\n"
- "ldr q2, [%x[params], #0x0]\n"
+ "subs x19, x19, #0x4\n"
+ "ble 34f\n"
+ "ldr q8, [SP, #0x0]\n"
+ "ldr q3, [SP, #0x20]\n"
+ "cmp x19, #0x4\n"
+ "ldr q31, [SP, #0x40]\n"
+ "ldr q26, [SP, #0x60]\n"
+ "ldr q30, [%x[params], #0x0]\n"
+ "mov v22.16b, v30.16b\n"
"ldr q29, [%x[params], #0x10]\n"
- "mov v26.16b, v2.16b\n"
- "mov v21.16b, v2.16b\n"
- "ldr q28, [%x[params], #0x20]\n"
- "ldr q27, [%x[params], #0x30]\n"
- "mov v16.16b, v2.16b\n"
- ".inst 0x4e8497a2 // sdot v2.4s, v29.16b, v4.16b\n"
- "ldr q6, [%x[params], #0x40]\n"
- "ldr q1, [%x[params], #0x50]\n"
- ".inst 0x4e9f97b5 // sdot v21.4s, v29.16b, v31.16b\n"
- ".inst 0x4e9f9782 // sdot v2.4s, v28.16b, v31.16b\n"
- "ext v4.16b, v4.16b, v4.16b, #0x1\n"
- "ext v31.16b, v31.16b, v31.16b, #0x1\n"
- ".inst 0x4e8497ba // sdot v26.4s, v29.16b, v4.16b\n"
- "cmp x20, #0x4\n"
- ".inst 0x4e9f97b0 // sdot v16.4s, v29.16b, v31.16b\n"
- ".inst 0x4e979795 // sdot v21.4s, v28.16b, v23.16b\n"
+ "mov v20.16b, v30.16b\n"
+ "ldr q27, [%x[params], #0x20]\n"
+ "mov v19.16b, v30.16b\n"
+ "ldr q25, [%x[params], #0x30]\n"
+ "ldr q23, [%x[params], #0x40]\n"
+ ".inst 0x4e8897be // sdot v30.4s, v29.16b, v8.16b\n"
+ "ldr q21, [%x[params], #0x50]\n"
"add %x[params], %x[params], #0x60\n"
- ".inst 0x4e979762 // sdot v2.4s, v27.16b, v23.16b\n"
- "ext v23.16b, v23.16b, v23.16b, #0x1\n"
- ".inst 0x4e9f979a // sdot v26.4s, v28.16b, v31.16b\n"
- ".inst 0x4e979790 // sdot v16.4s, v28.16b, v23.16b\n"
- ".inst 0x4e929775 // sdot v21.4s, v27.16b, v18.16b\n"
- "ext v18.16b, v18.16b, v18.16b, #0x1\n"
- "sqrdmulh v2.4s, v2.4s, v6.4s\n"
- ".inst 0x4e97977a // sdot v26.4s, v27.16b, v23.16b\n"
- ".inst 0x4e929770 // sdot v16.4s, v27.16b, v18.16b\n"
- "and v29.16b, v2.16b, v1.16b\n"
- "sshr v29.4s, v29.4s, #0x1f\n"
- "sqrdmulh v26.4s, v26.4s, v6.4s\n"
- "sqrdmulh v21.4s, v21.4s, v6.4s\n"
- "sqrdmulh v16.4s, v16.4s, v6.4s\n"
- "sqadd v2.4s, v2.4s, v29.4s\n"
- "and v28.16b, v26.16b, v1.16b\n"
- "and v27.16b, v21.16b, v1.16b\n"
- "and v29.16b, v16.16b, v1.16b\n"
- "sshr v28.4s, v28.4s, #0x1f\n"
- "sshr v27.4s, v27.4s, #0x1f\n"
- "sshr v29.4s, v29.4s, #0x1f\n"
- "sqadd v26.4s, v26.4s, v28.4s\n"
- "sqadd v21.4s, v21.4s, v27.4s\n"
- "sqadd v16.4s, v16.4s, v29.4s\n"
- "srshl v2.4s, v2.4s, v1.4s\n"
- "srshl v26.4s, v26.4s, v1.4s\n"
- "srshl v21.4s, v21.4s, v1.4s\n"
- "srshl v16.4s, v16.4s, v1.4s\n"
- "add v2.4s, v2.4s, v7.4s\n"
- "add v26.4s, v26.4s, v7.4s\n"
- "add v21.4s, v21.4s, v7.4s\n"
- "add v16.4s, v16.4s, v7.4s\n"
- "smax v2.4s, v2.4s, v9.4s\n"
- "smax v26.4s, v26.4s, v9.4s\n"
- "smax v21.4s, v21.4s, v9.4s\n"
- "smax v16.4s, v16.4s, v9.4s\n"
- "smin v2.4s, v2.4s, v8.4s\n"
- "smin v26.4s, v26.4s, v8.4s\n"
- "smin v21.4s, v21.4s, v8.4s\n"
- "smin v16.4s, v16.4s, v8.4s\n"
- "uzp1 v2.16b, v2.16b, v2.16b\n"
- "uzp1 v26.16b, v26.16b, v26.16b\n"
- "uzp1 v21.16b, v21.16b, v21.16b\n"
- "uzp1 v16.16b, v16.16b, v16.16b\n"
- "uzp1 v2.16b, v2.16b, v2.16b\n"
- "uzp1 v26.16b, v26.16b, v26.16b\n"
- "uzp1 v21.16b, v21.16b, v21.16b\n"
- "uzp1 v16.16b, v16.16b, v16.16b\n"
- "blt 28f\n"
- "str s2, [x24, x11]\n"
- "str s26, [x23, x11]\n"
- "str s21, [x22, x11]\n"
- "str s16, [x21, x11]\n"
- "b 31f\n"
- "28:" // Oddments: Unroll 2: Oddment store
+ ".inst 0x4e8397b4 // sdot v20.4s, v29.16b, v3.16b\n"
+ "ext v8.16b, v8.16b, v8.16b, #0x1\n"
+ ".inst 0x4e83977e // sdot v30.4s, v27.16b, v3.16b\n"
+ "ext v3.16b, v3.16b, v3.16b, #0x1\n"
+ ".inst 0x4e9f9774 // sdot v20.4s, v27.16b, v31.16b\n"
+ ".inst 0x4e8897b6 // sdot v22.4s, v29.16b, v8.16b\n"
+ ".inst 0x4e9f973e // sdot v30.4s, v25.16b, v31.16b\n"
+ "ext v31.16b, v31.16b, v31.16b, #0x1\n"
+ ".inst 0x4e9a9734 // sdot v20.4s, v25.16b, v26.16b\n"
+ "ext v26.16b, v26.16b, v26.16b, #0x1\n"
+ ".inst 0x4e8397b3 // sdot v19.4s, v29.16b, v3.16b\n"
+ ".inst 0x4e839776 // sdot v22.4s, v27.16b, v3.16b\n"
+ "sqrdmulh v30.4s, v30.4s, v23.4s\n"
+ "sqrdmulh v20.4s, v20.4s, v23.4s\n"
+ ".inst 0x4e9f9773 // sdot v19.4s, v27.16b, v31.16b\n"
+ ".inst 0x4e9f9736 // sdot v22.4s, v25.16b, v31.16b\n"
+ "and v16.16b, v30.16b, v21.16b\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ ".inst 0x4e9a9733 // sdot v19.4s, v25.16b, v26.16b\n"
+ "sqrdmulh v22.4s, v22.4s, v23.4s\n"
+ "and v18.16b, v20.16b, v21.16b\n"
+ "sshr v18.4s, v18.4s, #0x1f\n"
+ "and v17.16b, v22.16b, v21.16b\n"
+ "sqrdmulh v19.4s, v19.4s, v23.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "sqadd v30.4s, v30.4s, v16.4s\n"
+ "and v16.16b, v19.16b, v21.16b\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "srshl v30.4s, v30.4s, v21.4s\n"
+ "sqadd v20.4s, v20.4s, v18.4s\n"
+ "sqadd v22.4s, v22.4s, v17.4s\n"
+ "add v30.4s, v30.4s, v10.4s\n"
+ "srshl v20.4s, v20.4s, v21.4s\n"
+ "srshl v22.4s, v22.4s, v21.4s\n"
+ "sqadd v19.4s, v19.4s, v16.4s\n"
+ "smax v30.4s, v30.4s, v12.4s\n"
+ "add v20.4s, v20.4s, v10.4s\n"
+ "add v22.4s, v22.4s, v10.4s\n"
+ "smin v30.4s, v30.4s, v11.4s\n"
+ "smax v20.4s, v20.4s, v12.4s\n"
+ "smax v22.4s, v22.4s, v12.4s\n"
+ "srshl v19.4s, v19.4s, v21.4s\n"
+ "smin v20.4s, v20.4s, v11.4s\n"
+ "smin v22.4s, v22.4s, v11.4s\n"
+ "uzp1 v30.16b, v30.16b, v30.16b\n"
+ "add v19.4s, v19.4s, v10.4s\n"
+ "uzp1 v30.16b, v30.16b, v30.16b\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "smax v19.4s, v19.4s, v12.4s\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "uzp1 v20.16b, v20.16b, v20.16b\n"
+ "smin v19.4s, v19.4s, v11.4s\n"
+ "uzp1 v20.16b, v20.16b, v20.16b\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ "blt 27f\n"
+ "str s30, [x24, x11]\n"
+ "str s22, [x23, x11]\n"
+ "str s20, [x21, x11]\n"
+ "str s19, [x20, x11]\n"
+ "b 30f\n"
+ "27:" // Oddments: Unroll 2: Oddment store
"add x24, x24, x11\n"
"add x23, x23, x11\n"
- "add x22, x22, x11\n"
"add x21, x21, x11\n"
- "tbz x20, #1, 29f\n"
- "st1 { v2.h }[0], [x24], #0x2\n"
- "st1 { v26.h }[0], [x23], #0x2\n"
- "st1 { v21.h }[0], [x22], #0x2\n"
- "st1 { v16.h }[0], [x21], #0x2\n"
- "tbz x20, #0, 30f\n"
- "st1 { v2.b }[2], [x24], #0x1\n"
- "st1 { v26.b }[2], [x23], #0x1\n"
- "st1 { v21.b }[2], [x22], #0x1\n"
- "st1 { v16.b }[2], [x21], #0x1\n"
- "b 30f\n"
- "29:" // Oddments: Unroll 2: Oddment store: Bit 1: Unset
- "st1 { v2.b }[0], [x24], #0x1\n"
- "st1 { v26.b }[0], [x23], #0x1\n"
- "st1 { v21.b }[0], [x22], #0x1\n"
- "st1 { v16.b }[0], [x21], #0x1\n"
- "30:" // Oddments: Unroll 2: Oddment store: Bit 1: End
+ "add x20, x20, x11\n"
+ "tbz x19, #1, 28f\n"
+ "st1 { v30.h }[0], [x24], #0x2\n"
+ "st1 { v22.h }[0], [x23], #0x2\n"
+ "st1 { v20.h }[0], [x21], #0x2\n"
+ "st1 { v19.h }[0], [x20], #0x2\n"
+ "tbz x19, #0, 29f\n"
+ "st1 { v30.b }[2], [x24], #0x1\n"
+ "st1 { v22.b }[2], [x23], #0x1\n"
+ "st1 { v20.b }[2], [x21], #0x1\n"
+ "st1 { v19.b }[2], [x20], #0x1\n"
+ "b 29f\n"
+ "28:" // Oddments: Unroll 2: Oddment store: Bit 1: Unset
+ "tbz x19, #0, 29f\n"
+ "st1 { v30.b }[0], [x24], #0x1\n"
+ "st1 { v22.b }[0], [x23], #0x1\n"
+ "st1 { v20.b }[0], [x21], #0x1\n"
+ "st1 { v19.b }[0], [x20], #0x1\n"
+ "29:" // Oddments: Unroll 2: Oddment store: Bit 1: End
- "31:" // Oddments: Unroll 2: After oddment store
- "subs x20, x20, #0x4\n"
+ "30:" // Oddments: Unroll 2: After oddment store
"add x11, x11, #0x4\n"
- "ble 35f\n"
- "ldr q2, [%x[params], #0x0]\n"
+ "subs x19, x19, #0x4\n"
+ "ble 34f\n"
+ "ldr q7, [SP, #0x10]\n"
+ "ldr q2, [SP, #0x30]\n"
+ "ldr q28, [SP, #0x50]\n"
+ "ldr q24, [SP, #0x70]\n"
+ "ldr q30, [%x[params], #0x0]\n"
+ "mov v22.16b, v30.16b\n"
"ldr q29, [%x[params], #0x10]\n"
- "mov v26.16b, v2.16b\n"
- "mov v21.16b, v2.16b\n"
- "ldr q28, [%x[params], #0x20]\n"
- "ldr q27, [%x[params], #0x30]\n"
- "mov v16.16b, v2.16b\n"
- ".inst 0x4e8397a2 // sdot v2.4s, v29.16b, v3.16b\n"
- "ldr q6, [%x[params], #0x40]\n"
- "ldr q1, [%x[params], #0x50]\n"
- ".inst 0x4e9e97b5 // sdot v21.4s, v29.16b, v30.16b\n"
- ".inst 0x4e9e9782 // sdot v2.4s, v28.16b, v30.16b\n"
- "ext v3.16b, v3.16b, v3.16b, #0x1\n"
- "ext v30.16b, v30.16b, v30.16b, #0x1\n"
- ".inst 0x4e8397ba // sdot v26.4s, v29.16b, v3.16b\n"
+ "mov v20.16b, v30.16b\n"
+ "ldr q27, [%x[params], #0x20]\n"
+ "mov v19.16b, v30.16b\n"
+ "ldr q25, [%x[params], #0x30]\n"
+ "ldr q23, [%x[params], #0x40]\n"
+ ".inst 0x4e8797be // sdot v30.4s, v29.16b, v7.16b\n"
+ "ldr q21, [%x[params], #0x50]\n"
"add %x[params], %x[params], #0x60\n"
- ".inst 0x4e9e97b0 // sdot v16.4s, v29.16b, v30.16b\n"
- ".inst 0x4e969795 // sdot v21.4s, v28.16b, v22.16b\n"
- ".inst 0x4e969762 // sdot v2.4s, v27.16b, v22.16b\n"
- "ext v22.16b, v22.16b, v22.16b, #0x1\n"
- ".inst 0x4e9e979a // sdot v26.4s, v28.16b, v30.16b\n"
- ".inst 0x4e969790 // sdot v16.4s, v28.16b, v22.16b\n"
- ".inst 0x4e919775 // sdot v21.4s, v27.16b, v17.16b\n"
- "ext v17.16b, v17.16b, v17.16b, #0x1\n"
- "sqrdmulh v2.4s, v2.4s, v6.4s\n"
- ".inst 0x4e96977a // sdot v26.4s, v27.16b, v22.16b\n"
- ".inst 0x4e919770 // sdot v16.4s, v27.16b, v17.16b\n"
- "and v29.16b, v2.16b, v1.16b\n"
- "sshr v29.4s, v29.4s, #0x1f\n"
- "sqrdmulh v26.4s, v26.4s, v6.4s\n"
- "sqrdmulh v21.4s, v21.4s, v6.4s\n"
- "sqrdmulh v16.4s, v16.4s, v6.4s\n"
- "sqadd v2.4s, v2.4s, v29.4s\n"
- "and v28.16b, v26.16b, v1.16b\n"
- "and v27.16b, v21.16b, v1.16b\n"
- "and v29.16b, v16.16b, v1.16b\n"
- "sshr v28.4s, v28.4s, #0x1f\n"
- "sshr v27.4s, v27.4s, #0x1f\n"
- "sshr v29.4s, v29.4s, #0x1f\n"
- "sqadd v26.4s, v26.4s, v28.4s\n"
- "sqadd v21.4s, v21.4s, v27.4s\n"
- "sqadd v16.4s, v16.4s, v29.4s\n"
- "srshl v2.4s, v2.4s, v1.4s\n"
- "srshl v26.4s, v26.4s, v1.4s\n"
- "srshl v21.4s, v21.4s, v1.4s\n"
- "srshl v16.4s, v16.4s, v1.4s\n"
- "add v2.4s, v2.4s, v7.4s\n"
- "add v26.4s, v26.4s, v7.4s\n"
- "add v21.4s, v21.4s, v7.4s\n"
- "add v16.4s, v16.4s, v7.4s\n"
- "smax v2.4s, v2.4s, v9.4s\n"
- "smax v26.4s, v26.4s, v9.4s\n"
- "smax v21.4s, v21.4s, v9.4s\n"
- "smax v16.4s, v16.4s, v9.4s\n"
- "smin v2.4s, v2.4s, v8.4s\n"
- "smin v26.4s, v26.4s, v8.4s\n"
- "smin v21.4s, v21.4s, v8.4s\n"
- "smin v16.4s, v16.4s, v8.4s\n"
- "uzp1 v2.16b, v2.16b, v2.16b\n"
- "uzp1 v26.16b, v26.16b, v26.16b\n"
- "uzp1 v21.16b, v21.16b, v21.16b\n"
- "uzp1 v16.16b, v16.16b, v16.16b\n"
- "uzp1 v2.16b, v2.16b, v2.16b\n"
- "uzp1 v26.16b, v26.16b, v26.16b\n"
- "uzp1 v21.16b, v21.16b, v21.16b\n"
- "uzp1 v16.16b, v16.16b, v16.16b\n"
- "32:" // Oddments: Unroll 3: Oddment store
+ ".inst 0x4e8297b4 // sdot v20.4s, v29.16b, v2.16b\n"
+ "ext v7.16b, v7.16b, v7.16b, #0x1\n"
+ ".inst 0x4e82977e // sdot v30.4s, v27.16b, v2.16b\n"
+ "ext v2.16b, v2.16b, v2.16b, #0x1\n"
+ ".inst 0x4e9c9774 // sdot v20.4s, v27.16b, v28.16b\n"
+ ".inst 0x4e8797b6 // sdot v22.4s, v29.16b, v7.16b\n"
+ ".inst 0x4e9c973e // sdot v30.4s, v25.16b, v28.16b\n"
+ "ext v28.16b, v28.16b, v28.16b, #0x1\n"
+ ".inst 0x4e989734 // sdot v20.4s, v25.16b, v24.16b\n"
+ "ext v24.16b, v24.16b, v24.16b, #0x1\n"
+ ".inst 0x4e8297b3 // sdot v19.4s, v29.16b, v2.16b\n"
+ ".inst 0x4e829776 // sdot v22.4s, v27.16b, v2.16b\n"
+ "sqrdmulh v30.4s, v30.4s, v23.4s\n"
+ "sqrdmulh v20.4s, v20.4s, v23.4s\n"
+ ".inst 0x4e9c9773 // sdot v19.4s, v27.16b, v28.16b\n"
+ ".inst 0x4e9c9736 // sdot v22.4s, v25.16b, v28.16b\n"
+ "and v16.16b, v30.16b, v21.16b\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ ".inst 0x4e989733 // sdot v19.4s, v25.16b, v24.16b\n"
+ "sqrdmulh v22.4s, v22.4s, v23.4s\n"
+ "and v18.16b, v20.16b, v21.16b\n"
+ "sshr v18.4s, v18.4s, #0x1f\n"
+ "and v17.16b, v22.16b, v21.16b\n"
+ "sqrdmulh v19.4s, v19.4s, v23.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "sqadd v30.4s, v30.4s, v16.4s\n"
+ "and v16.16b, v19.16b, v21.16b\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "srshl v30.4s, v30.4s, v21.4s\n"
+ "sqadd v20.4s, v20.4s, v18.4s\n"
+ "sqadd v22.4s, v22.4s, v17.4s\n"
+ "add v30.4s, v30.4s, v10.4s\n"
+ "srshl v20.4s, v20.4s, v21.4s\n"
+ "srshl v22.4s, v22.4s, v21.4s\n"
+ "sqadd v19.4s, v19.4s, v16.4s\n"
+ "smax v30.4s, v30.4s, v12.4s\n"
+ "add v20.4s, v20.4s, v10.4s\n"
+ "add v22.4s, v22.4s, v10.4s\n"
+ "smin v30.4s, v30.4s, v11.4s\n"
+ "smax v20.4s, v20.4s, v12.4s\n"
+ "smax v22.4s, v22.4s, v12.4s\n"
+ "srshl v19.4s, v19.4s, v21.4s\n"
+ "smin v20.4s, v20.4s, v11.4s\n"
+ "smin v22.4s, v22.4s, v11.4s\n"
+ "uzp1 v30.16b, v30.16b, v30.16b\n"
+ "add v19.4s, v19.4s, v10.4s\n"
+ "uzp1 v30.16b, v30.16b, v30.16b\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "smax v19.4s, v19.4s, v12.4s\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "uzp1 v20.16b, v20.16b, v20.16b\n"
+ "smin v19.4s, v19.4s, v11.4s\n"
+ "uzp1 v20.16b, v20.16b, v20.16b\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ "31:" // Oddments: Unroll 3: Oddment store
"add x24, x24, x11\n"
"add x23, x23, x11\n"
- "add x22, x22, x11\n"
"add x21, x21, x11\n"
- "tbz x20, #1, 33f\n"
- "st1 { v2.h }[0], [x24], #0x2\n"
- "st1 { v26.h }[0], [x23], #0x2\n"
- "st1 { v21.h }[0], [x22], #0x2\n"
- "st1 { v16.h }[0], [x21], #0x2\n"
- "tbz x20, #0, 34f\n"
- "st1 { v2.b }[2], [x24], #0x1\n"
- "st1 { v26.b }[2], [x23], #0x1\n"
- "st1 { v21.b }[2], [x22], #0x1\n"
- "st1 { v16.b }[2], [x21], #0x1\n"
- "b 34f\n"
- "33:" // Oddments: Unroll 3: Oddment store: Bit 1: Unset
- "st1 { v2.b }[0], [x24], #0x1\n"
- "st1 { v26.b }[0], [x23], #0x1\n"
- "st1 { v21.b }[0], [x22], #0x1\n"
- "st1 { v16.b }[0], [x21], #0x1\n"
- "34:" // Oddments: Unroll 3: Oddment store: Bit 1: End
+ "add x20, x20, x11\n"
+ "tbz x19, #1, 32f\n"
+ "st1 { v30.h }[0], [x24], #0x2\n"
+ "st1 { v22.h }[0], [x23], #0x2\n"
+ "st1 { v20.h }[0], [x21], #0x2\n"
+ "st1 { v19.h }[0], [x20], #0x2\n"
+ "tbz x19, #0, 33f\n"
+ "st1 { v30.b }[2], [x24], #0x1\n"
+ "st1 { v22.b }[2], [x23], #0x1\n"
+ "st1 { v20.b }[2], [x21], #0x1\n"
+ "st1 { v19.b }[2], [x20], #0x1\n"
+ "b 33f\n"
+ "32:" // Oddments: Unroll 3: Oddment store: Bit 1: Unset
+ "tbz x19, #0, 33f\n"
+ "st1 { v30.b }[0], [x24], #0x1\n"
+ "st1 { v22.b }[0], [x23], #0x1\n"
+ "st1 { v20.b }[0], [x21], #0x1\n"
+ "st1 { v19.b }[0], [x20], #0x1\n"
+ "33:" // Oddments: Unroll 3: Oddment store: Bit 1: End
- "35:" // End
+ "34:" // End
+ "add SP, SP, #0x80\n"
: [params] "+&r" (params)
- : [inptrs] "r" (inptrs), [n_channels] "r" (n_channels), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [outptrs] "r" (outptrs), [qp] "r" (&qp)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : [inptrs] "r" (inptrs), [n_channels] "r" ((long unsigned int) n_channels), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [outptrs] "r" (outptrs), [qp] "r" (&qp)
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_3x3_s1_output2x2_dot_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_3x3_s1_output2x2_dot_depthfirst/generic.cpp
index 986937f3b4..64b305c21d 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_3x3_s1_output2x2_dot_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_3x3_s1_output2x2_dot_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -41,1622 +41,1282 @@ void a64_u8q_nhwc_3x3_s1_output2x2_dot_depthfirst_impl(
)
{
__asm__ __volatile__(
- "mov x20, #0x1\n"
- "orr x20, x20, #0x100\n"
- "ldp x15, x14, [%x[inptrs], #0x0]\n"
- "ldp x13, x12, [%x[inptrs], #0x10]\n"
- "orr x20, x20, #0x10000\n"
- "lsr x11, %x[n_channels], #0x4\n"
- "dup v14.4s, w20\n"
- "ldp x10, x9, [%x[inptrs], #0x20]\n"
- "add x20, %x[qp], %[offsetof_Requantize32_minval]\n"
- "ld1r { v13.4s }, [x20]\n"
- "add x20, %x[qp], %[offsetof_Requantize32_maxval]\n"
- "ld1r { v12.4s }, [x20]\n"
- "add x20, %x[qp], %[offsetof_Requantize32_b_offset]\n"
- "ld1r { v11.4s }, [x20]\n"
- "add x20, %x[qp], %[offsetof_Requantize32_c_offset]\n"
- "ld1r { v10.4s }, [x20]\n"
- "mov x28, #0x0\n"
- "mov x27, #0x0\n"
- "ldp x26, x25, [%x[inptrs], #0x30]\n"
- "ldp x24, x23, [%x[outptrs], #0x0]\n"
+ "ldp x13, x12, [%x[inptrs], #0x0]\n"
+ "add SP, SP, #-0x80\n"
+ "ldp x11, x10, [%x[inptrs], #0x10]\n"
+ "mov x19, #0x1\n"
+ "ldp x9, x28, [%x[inptrs], #0x20]\n"
+ "orr x19, x19, #0x100\n"
+ "ldp x27, x26, [%x[inptrs], #0x30]\n"
+ "orr x19, x19, #0x10000\n"
+ "dup v11.4s, w19\n"
+ "ldp x25, x24, [%x[outptrs], #0x0]\n"
+ "mov x23, #0x0\n"
"ldp x22, x21, [%x[outptrs], #0x10]\n"
- "cbz x11, 3f\n"
- "ldr q9, [x15, x28]\n"
- "ldr q8, [x14, x28]\n"
- "subs x11, x11, #0x1\n"
- "ldr q7, [x13, x28]\n"
- "ldr q6, [x12, x28]\n"
- "zip2 v5.16b, v9.16b, v7.16b\n"
- "zip1 v9.16b, v9.16b, v7.16b\n"
- "ldr q4, [x10, x28]\n"
- "ldr q3, [x9, x28]\n"
- "zip1 v7.16b, v8.16b, v6.16b\n"
- "zip2 v6.16b, v8.16b, v6.16b\n"
- "ldr q2, [x26, x28]\n"
- "ldr q1, [x25, x28]\n"
- "zip2 v8.16b, v9.16b, v7.16b\n"
- "zip1 v9.16b, v9.16b, v7.16b\n"
- "ldr q0, [%x[params], #0x10]\n"
- "ldr q16, [%x[params], #0x20]\n"
- "zip1 v7.16b, v5.16b, v6.16b\n"
- "zip2 v6.16b, v5.16b, v6.16b\n"
- "ldr q5, [%x[params], #0x0]\n"
- "ldr q31, [%x[params], #0x30]\n"
- "zip2 v30.16b, v4.16b, v2.16b\n"
- "zip1 v4.16b, v4.16b, v2.16b\n"
- "ldp x15, x14, [%x[inptrs], #0x40]\n"
- "ldr q29, [x15, x28]\n"
- "zip1 v2.16b, v3.16b, v1.16b\n"
- "zip2 v1.16b, v3.16b, v1.16b\n"
- "ldr q28, [x14, x28]\n"
- "ldp x13, x12, [%x[inptrs], #0x50]\n"
- "zip2 v3.16b, v4.16b, v2.16b\n"
- "zip1 v4.16b, v4.16b, v2.16b\n"
- "ldr q27, [x13, x28]\n"
- "ldr q26, [x12, x28]\n"
- "zip2 v25.16b, v29.16b, v27.16b\n"
- "zip1 v29.16b, v29.16b, v27.16b\n"
- "ldp x10, x9, [%x[inptrs], #0x60]\n"
- "ldr q24, [x10, x28]\n"
- "zip1 v27.16b, v28.16b, v26.16b\n"
- "zip2 v26.16b, v28.16b, v26.16b\n"
- "ldr q23, [x9, x28]\n"
- "ldp x26, x25, [%x[inptrs], #0x70]\n"
- "zip1 v2.16b, v30.16b, v1.16b\n"
- "zip2 v1.16b, v30.16b, v1.16b\n"
- "ldr q22, [x26, x28]\n"
- "ldr q21, [x25, x28]\n"
- "zip2 v20.16b, v24.16b, v22.16b\n"
- "zip1 v24.16b, v24.16b, v22.16b\n"
- "zip1 v22.16b, v23.16b, v21.16b\n"
- "zip2 v21.16b, v23.16b, v21.16b\n"
- "ldp x15, x14, [%x[inptrs], #0x0]\n"
- "ldp x13, x12, [%x[inptrs], #0x10]\n"
- "ldp x10, x9, [%x[inptrs], #0x20]\n"
- "ldp x26, x25, [%x[inptrs], #0x30]\n"
- "zip2 v28.16b, v29.16b, v27.16b\n"
- "zip1 v29.16b, v29.16b, v27.16b\n"
- "zip1 v27.16b, v25.16b, v26.16b\n"
- "zip2 v26.16b, v25.16b, v26.16b\n"
- "add %x[params], %x[params], #0x40\n"
- "zip2 v23.16b, v24.16b, v22.16b\n"
- "zip1 v24.16b, v24.16b, v22.16b\n"
- "zip1 v22.16b, v20.16b, v21.16b\n"
- "zip2 v21.16b, v20.16b, v21.16b\n"
- "mov v30.16b, v5.16b\n"
- "mov v25.16b, v5.16b\n"
- "mov v20.16b, v5.16b\n"
- "beq 2f\n"
+ "lsr x20, %x[n_channels], #0x4\n"
+ "add x19, %x[qp], %[offsetof_Requantize32_minval]\n"
+ "ld1r { v9.4s }, [x19]\n"
+ "add x19, %x[qp], %[offsetof_Requantize32_maxval]\n"
+ "ld1r { v12.4s }, [x19]\n"
+ "add x19, %x[qp], %[offsetof_Requantize32_b_offset]\n"
+ "ld1r { v14.4s }, [x19]\n"
+ "add x19, %x[qp], %[offsetof_Requantize32_c_offset]\n"
+ "ld1r { v13.4s }, [x19]\n"
+ "cbz x20, 2f\n"
"1:" // Loop
- "movi v19.4s, #0x0\n"
- ".inst 0x6e8495d3 // udot v19.4s, v14.16b, v4.16b\n"
- ".inst 0x6e899405 // udot v5.4s, v0.16b, v9.16b\n"
- "add x28, x28, #0x10\n"
- ".inst 0x6e9d95d3 // udot v19.4s, v14.16b, v29.16b\n"
- ".inst 0x6e849419 // udot v25.4s, v0.16b, v4.16b\n"
- "subs x11, x11, #0x1\n"
- ".inst 0x6e849605 // udot v5.4s, v16.16b, v4.16b\n"
- "ext v4.16b, v4.16b, v4.16b, #0x1\n"
- "mov v18.16b, v19.16b\n .inst 0x6e9895d2 // udot v18.4s, v14.16b, v24.16b\n"
- ".inst 0x6e8995d3 // udot v19.4s, v14.16b, v9.16b\n"
- "ext v9.16b, v9.16b, v9.16b, #0x1\n"
- ".inst 0x6e9d9619 // udot v25.4s, v16.16b, v29.16b\n"
- ".inst 0x6e9d97e5 // udot v5.4s, v31.16b, v29.16b\n"
- "ext v29.16b, v29.16b, v29.16b, #0x1\n"
- ".inst 0x6e89941e // udot v30.4s, v0.16b, v9.16b\n"
- ".inst 0x6e849414 // udot v20.4s, v0.16b, v4.16b\n"
- "movi v17.4s, #0x0\n"
- ".inst 0x6e8495d1 // udot v17.4s, v14.16b, v4.16b\n"
- ".inst 0x6e9d95d1 // udot v17.4s, v14.16b, v29.16b\n"
- ".inst 0x6e9897f9 // udot v25.4s, v31.16b, v24.16b\n"
- "ext v24.16b, v24.16b, v24.16b, #0x1\n"
- ".inst 0x6e84961e // udot v30.4s, v16.16b, v4.16b\n"
- "ldr q4, [%x[params], #0x10]\n"
- ".inst 0x6e9d9614 // udot v20.4s, v16.16b, v29.16b\n"
- "mls v5.4s, v19.4s, v11.4s\n"
- "mov v16.16b, v17.16b\n .inst 0x6e9895d0 // udot v16.4s, v14.16b, v24.16b\n"
- ".inst 0x6e8995d1 // udot v17.4s, v14.16b, v9.16b\n"
- "ldr q9, [%x[params], #0x0]\n"
- "sqrdmulh v5.4s, v5.4s, v9.4s\n"
- ".inst 0x6e9d97fe // udot v30.4s, v31.16b, v29.16b\n"
- ".inst 0x6e9897f4 // udot v20.4s, v31.16b, v24.16b\n"
- "mls v30.4s, v17.4s, v11.4s\n"
- "mls v25.4s, v18.4s, v11.4s\n"
- "mls v20.4s, v16.4s, v11.4s\n"
- "and v0.16b, v5.16b, v4.16b\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "sqrdmulh v30.4s, v30.4s, v9.4s\n"
- "sqrdmulh v25.4s, v25.4s, v9.4s\n"
- "sqrdmulh v20.4s, v20.4s, v9.4s\n"
- "ldr q9, [%x[params], #0x60]\n"
- "sqadd v5.4s, v5.4s, v0.4s\n"
- "and v16.16b, v30.16b, v4.16b\n"
- "and v31.16b, v25.16b, v4.16b\n"
- "and v0.16b, v20.16b, v4.16b\n"
+ "movi v15.4s, #0x0\n"
+ "ldr q27, [x13, x23]\n"
+ "subs x20, x20, #0x1\n"
+ "movi v10.4s, #0x0\n"
+ "ldr q1, [x12, x23]\n"
+ "ldp x13, x12, [%x[inptrs], #0x40]\n"
+ "ldr q25, [x11, x23]\n"
+ "zip1 v7.16b, v27.16b, v25.16b\n"
+ "ldr q23, [x10, x23]\n"
+ "zip2 v5.16b, v27.16b, v25.16b\n"
+ "ldp x11, x10, [%x[inptrs], #0x50]\n"
+ "ldr q31, [x9, x23]\n"
+ "zip1 v8.16b, v1.16b, v23.16b\n"
+ "ldr q28, [x28, x23]\n"
+ "zip2 v3.16b, v1.16b, v23.16b\n"
+ "ldp x9, x28, [%x[inptrs], #0x60]\n"
+ "zip1 v6.16b, v7.16b, v8.16b\n"
+ "ldr q21, [x27, x23]\n"
+ "zip2 v8.16b, v7.16b, v8.16b\n"
+ "ldr q26, [x26, x23]\n"
+ "zip1 v7.16b, v5.16b, v3.16b\n"
+ "ldp x27, x26, [%x[inptrs], #0x70]\n"
+ "zip2 v5.16b, v5.16b, v3.16b\n"
+ "ldr q24, [x13, x23]\n"
+ "ldr q22, [x12, x23]\n"
+ "zip1 v2.16b, v31.16b, v21.16b\n"
+ "zip2 v4.16b, v31.16b, v21.16b\n"
+ "ldp x13, x12, [%x[inptrs], #0x0]\n"
+ "zip1 v1.16b, v28.16b, v26.16b\n"
+ "ldr q20, [x11, x23]\n"
+ "zip2 v31.16b, v28.16b, v26.16b\n"
+ "ldr q16, [x10, x23]\n"
+ "zip1 v3.16b, v2.16b, v1.16b\n"
+ "ldp x11, x10, [%x[inptrs], #0x10]\n"
+ "zip2 v2.16b, v2.16b, v1.16b\n"
+ "ldr q19, [x9, x23]\n"
+ "zip1 v1.16b, v4.16b, v31.16b\n"
+ "ldr q0, [x28, x23]\n"
+ "zip1 v28.16b, v24.16b, v20.16b\n"
+ "ldp x9, x28, [%x[inptrs], #0x20]\n"
+ "zip2 v26.16b, v24.16b, v20.16b\n"
+ "ldr q18, [x27, x23]\n"
+ "zip1 v24.16b, v22.16b, v16.16b\n"
+ "ldr q17, [x26, x23]\n"
+ "zip2 v22.16b, v22.16b, v16.16b\n"
+ "ldp x27, x26, [%x[inptrs], #0x30]\n"
+ "zip2 v16.16b, v4.16b, v31.16b\n"
+ "str q7, [SP, #0x0]\n"
+ "zip1 v31.16b, v28.16b, v24.16b\n"
+ "str q5, [SP, #0x10]\n"
+ "zip1 v20.16b, v19.16b, v18.16b\n"
+ "str q1, [SP, #0x20]\n"
+ "zip2 v19.16b, v19.16b, v18.16b\n"
+ "str q16, [SP, #0x30]\n"
+ "zip1 v18.16b, v0.16b, v17.16b\n"
+ "ldr q30, [%x[params], #0x0]\n"
+ "zip2 v17.16b, v0.16b, v17.16b\n"
+ "ldr q29, [%x[params], #0x10]\n"
+ "zip2 v28.16b, v28.16b, v24.16b\n"
+ "ldr q27, [%x[params], #0x20]\n"
+ "zip1 v16.16b, v26.16b, v22.16b\n"
+ "str q16, [SP, #0x40]\n"
+ "zip2 v16.16b, v26.16b, v22.16b\n"
+ "str q16, [SP, #0x50]\n"
+ "zip1 v26.16b, v20.16b, v18.16b\n"
+ "ldr q25, [%x[params], #0x30]\n"
+ "zip2 v24.16b, v20.16b, v18.16b\n"
+ "ldr q23, [%x[params], #0x40]\n"
+ "zip1 v16.16b, v19.16b, v17.16b\n"
+ "str q16, [SP, #0x60]\n"
+ "zip2 v16.16b, v19.16b, v17.16b\n"
+ "str q16, [SP, #0x70]\n"
+ "mov v22.16b, v30.16b\n"
+ "ldr q21, [%x[params], #0x50]\n"
+ "mov v20.16b, v30.16b\n"
+ "mov v19.16b, v30.16b\n"
+ ".inst 0x6e8697be // udot v30.4s, v29.16b, v6.16b\n"
+ ".inst 0x6e8397b4 // udot v20.4s, v29.16b, v3.16b\n"
+ ".inst 0x6e83956f // udot v15.4s, v11.16b, v3.16b\n"
+ ".inst 0x6e83977e // udot v30.4s, v27.16b, v3.16b\n"
+ "ext v3.16b, v3.16b, v3.16b, #0x1\n"
+ ".inst 0x6e9f9774 // udot v20.4s, v27.16b, v31.16b\n"
+ ".inst 0x6e9f956f // udot v15.4s, v11.16b, v31.16b\n"
+ ".inst 0x6e9f973e // udot v30.4s, v25.16b, v31.16b\n"
+ "ext v31.16b, v31.16b, v31.16b, #0x1\n"
+ ".inst 0x6e9a9734 // udot v20.4s, v25.16b, v26.16b\n"
+ "mov v17.16b, v15.16b\n"
+ ".inst 0x6e86956f // udot v15.4s, v11.16b, v6.16b\n"
+ "mls v30.4s, v15.4s, v14.4s\n"
+ ".inst 0x6e9a9571 // udot v17.4s, v11.16b, v26.16b\n"
+ "ext v6.16b, v6.16b, v6.16b, #0x1\n"
+ "mls v20.4s, v17.4s, v14.4s\n"
+ "ext v26.16b, v26.16b, v26.16b, #0x1\n"
+ ".inst 0x6e8697b6 // udot v22.4s, v29.16b, v6.16b\n"
+ ".inst 0x6e8397b3 // udot v19.4s, v29.16b, v3.16b\n"
+ "ldr q29, [%x[params], #0x70]\n"
+ ".inst 0x6e83956a // udot v10.4s, v11.16b, v3.16b\n"
+ "sqrdmulh v30.4s, v30.4s, v23.4s\n"
+ ".inst 0x6e839776 // udot v22.4s, v27.16b, v3.16b\n"
+ "ldr q3, [SP, #0x20]\n"
+ ".inst 0x6e9f9773 // udot v19.4s, v27.16b, v31.16b\n"
+ "ldr q27, [%x[params], #0x80]\n"
+ ".inst 0x6e9f956a // udot v10.4s, v11.16b, v31.16b\n"
+ "and v18.16b, v30.16b, v21.16b\n"
+ "sshr v18.4s, v18.4s, #0x1f\n"
+ ".inst 0x6e9f9736 // udot v22.4s, v25.16b, v31.16b\n"
+ "ldr q31, [SP, #0x40]\n"
+ ".inst 0x6e9a9733 // udot v19.4s, v25.16b, v26.16b\n"
+ "ldr q25, [%x[params], #0x90]\n"
+ "mov v17.16b, v10.16b\n"
+ ".inst 0x6e86956a // udot v10.4s, v11.16b, v6.16b\n"
+ "ldr q6, [SP, #0x0]\n"
+ "mls v22.4s, v10.4s, v14.4s\n"
+ ".inst 0x6e9a9571 // udot v17.4s, v11.16b, v26.16b\n"
+ "ldr q26, [SP, #0x60]\n"
+ "sqadd v30.4s, v30.4s, v18.4s\n"
+ "mls v19.4s, v17.4s, v14.4s\n"
+ "sqrdmulh v20.4s, v20.4s, v23.4s\n"
+ "movi v15.4s, #0x0\n"
+ "srshl v30.4s, v30.4s, v21.4s\n"
+ "sqrdmulh v22.4s, v22.4s, v23.4s\n"
+ ".inst 0x6e82956f // udot v15.4s, v11.16b, v2.16b\n"
+ "and v16.16b, v20.16b, v21.16b\n"
"sshr v16.4s, v16.4s, #0x1f\n"
- "sshr v31.4s, v31.4s, #0x1f\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "srshl v5.4s, v5.4s, v4.4s\n"
- "sqadd v30.4s, v30.4s, v16.4s\n"
- "ldr q16, [%x[params], #0x40]\n"
- "sqadd v25.4s, v25.4s, v31.4s\n"
- "ldr q31, [%x[params], #0x50]\n"
- "sqadd v20.4s, v20.4s, v0.4s\n"
- "ldr q0, [%x[params], #0x30]\n"
- "add v5.4s, v5.4s, v10.4s\n"
- "srshl v30.4s, v30.4s, v4.4s\n"
- "srshl v25.4s, v25.4s, v4.4s\n"
- "srshl v20.4s, v20.4s, v4.4s\n"
- "ldr q4, [%x[params], #0x70]\n"
- "smax v5.4s, v5.4s, v13.4s\n"
- "add v30.4s, v30.4s, v10.4s\n"
- "add v25.4s, v25.4s, v10.4s\n"
- "add v20.4s, v20.4s, v10.4s\n"
- "smin v5.4s, v5.4s, v12.4s\n"
- "smax v30.4s, v30.4s, v13.4s\n"
- "smax v25.4s, v25.4s, v13.4s\n"
- "smax v20.4s, v20.4s, v13.4s\n"
+ "add v30.4s, v30.4s, v13.4s\n"
+ "and v17.16b, v22.16b, v21.16b\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "smax v30.4s, v30.4s, v9.4s\n"
+ "sqrdmulh v19.4s, v19.4s, v23.4s\n"
+ "ldr q23, [%x[params], #0xa0]\n"
+ ".inst 0x6e9c956f // udot v15.4s, v11.16b, v28.16b\n"
+ "sqadd v20.4s, v20.4s, v16.4s\n"
"smin v30.4s, v30.4s, v12.4s\n"
- "smin v25.4s, v25.4s, v12.4s\n"
- "smin v20.4s, v20.4s, v12.4s\n"
- "uzp1 v5.16b, v5.16b, v5.16b\n"
- "movi v19.4s, #0x0\n"
- ".inst 0x6e8395d3 // udot v19.4s, v14.16b, v3.16b\n"
- ".inst 0x6e9c95d3 // udot v19.4s, v14.16b, v28.16b\n"
- "uzp1 v5.16b, v5.16b, v5.16b\n"
+ "and v16.16b, v19.16b, v21.16b\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sqadd v22.4s, v22.4s, v17.4s\n"
+ "srshl v20.4s, v20.4s, v21.4s\n"
"uzp1 v30.16b, v30.16b, v30.16b\n"
- "str s5, [x24, x27]\n"
- "ldr q5, [%x[params], #0x20]\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
- "uzp1 v20.16b, v20.16b, v20.16b\n"
- "mov v18.16b, v19.16b\n .inst 0x6e9795d2 // udot v18.4s, v14.16b, v23.16b\n"
+ "mov v17.16b, v15.16b\n"
"uzp1 v30.16b, v30.16b, v30.16b\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
- "str s30, [x23, x27]\n"
- ".inst 0x6e8895d3 // udot v19.4s, v14.16b, v8.16b\n"
- "uzp1 v20.16b, v20.16b, v20.16b\n"
- "str s25, [x22, x27]\n"
- "mov v30.16b, v5.16b\n"
- "str s20, [x21, x27]\n"
- "mov v25.16b, v5.16b\n"
- "mov v20.16b, v5.16b\n"
- ".inst 0x6e889405 // udot v5.4s, v0.16b, v8.16b\n"
- ".inst 0x6e839419 // udot v25.4s, v0.16b, v3.16b\n"
- ".inst 0x6e839605 // udot v5.4s, v16.16b, v3.16b\n"
- "ext v8.16b, v8.16b, v8.16b, #0x1\n"
- "add x27, x27, #0x4\n"
- "ext v3.16b, v3.16b, v3.16b, #0x1\n"
- "movi v17.4s, #0x0\n"
- ".inst 0x6e88941e // udot v30.4s, v0.16b, v8.16b\n"
- ".inst 0x6e839414 // udot v20.4s, v0.16b, v3.16b\n"
- ".inst 0x6e8395d1 // udot v17.4s, v14.16b, v3.16b\n"
- ".inst 0x6e9c9619 // udot v25.4s, v16.16b, v28.16b\n"
- ".inst 0x6e9c97e5 // udot v5.4s, v31.16b, v28.16b\n"
- "ext v28.16b, v28.16b, v28.16b, #0x1\n"
- ".inst 0x6e83961e // udot v30.4s, v16.16b, v3.16b\n"
- "ldr q3, [x9, x28]\n"
- ".inst 0x6e9c9614 // udot v20.4s, v16.16b, v28.16b\n"
- "mls v5.4s, v19.4s, v11.4s\n"
- ".inst 0x6e9c95d1 // udot v17.4s, v14.16b, v28.16b\n"
- ".inst 0x6e9797f9 // udot v25.4s, v31.16b, v23.16b\n"
- "ext v23.16b, v23.16b, v23.16b, #0x1\n"
- ".inst 0x6e9c97fe // udot v30.4s, v31.16b, v28.16b\n"
- ".inst 0x6e9797f4 // udot v20.4s, v31.16b, v23.16b\n"
- "sqrdmulh v5.4s, v5.4s, v9.4s\n"
- "mov v16.16b, v17.16b\n .inst 0x6e9795d0 // udot v16.4s, v14.16b, v23.16b\n"
- ".inst 0x6e8895d1 // udot v17.4s, v14.16b, v8.16b\n"
- "ldr q8, [x14, x28]\n"
- "mls v30.4s, v17.4s, v11.4s\n"
- "mls v25.4s, v18.4s, v11.4s\n"
- "mls v20.4s, v16.4s, v11.4s\n"
- "and v0.16b, v5.16b, v4.16b\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "sqrdmulh v30.4s, v30.4s, v9.4s\n"
- "sqrdmulh v25.4s, v25.4s, v9.4s\n"
- "sqrdmulh v20.4s, v20.4s, v9.4s\n"
- "ldr q9, [%x[params], #0xc0]\n"
- "sqadd v5.4s, v5.4s, v0.4s\n"
- "and v16.16b, v30.16b, v4.16b\n"
- "and v31.16b, v25.16b, v4.16b\n"
- "and v0.16b, v20.16b, v4.16b\n"
- "sshr v16.4s, v16.4s, #0x1f\n"
- "sshr v31.4s, v31.4s, #0x1f\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "srshl v5.4s, v5.4s, v4.4s\n"
- "sqadd v30.4s, v30.4s, v16.4s\n"
- "ldr q16, [%x[params], #0xa0]\n"
- "sqadd v25.4s, v25.4s, v31.4s\n"
- "ldr q31, [%x[params], #0xb0]\n"
- "sqadd v20.4s, v20.4s, v0.4s\n"
- "ldr q0, [%x[params], #0x90]\n"
- "add v5.4s, v5.4s, v10.4s\n"
- "srshl v30.4s, v30.4s, v4.4s\n"
- "srshl v25.4s, v25.4s, v4.4s\n"
- "srshl v20.4s, v20.4s, v4.4s\n"
- "ldr q4, [%x[params], #0xd0]\n"
- "smax v5.4s, v5.4s, v13.4s\n"
- "add v30.4s, v30.4s, v10.4s\n"
- "add v25.4s, v25.4s, v10.4s\n"
- "add v20.4s, v20.4s, v10.4s\n"
- "smin v5.4s, v5.4s, v12.4s\n"
- "smax v30.4s, v30.4s, v13.4s\n"
- "smax v25.4s, v25.4s, v13.4s\n"
- "smax v20.4s, v20.4s, v13.4s\n"
- "smin v30.4s, v30.4s, v12.4s\n"
- "smin v25.4s, v25.4s, v12.4s\n"
+ "str s30, [x25, x23]\n"
+ "srshl v22.4s, v22.4s, v21.4s\n"
+ "add v20.4s, v20.4s, v13.4s\n"
+ "ldr q30, [%x[params], #0x60]\n"
+ "sqadd v19.4s, v19.4s, v16.4s\n"
+ ".inst 0x6e88956f // udot v15.4s, v11.16b, v8.16b\n"
+ "smax v20.4s, v20.4s, v9.4s\n"
+ "add v22.4s, v22.4s, v13.4s\n"
+ "srshl v19.4s, v19.4s, v21.4s\n"
+ "ldr q21, [%x[params], #0xb0]\n"
"smin v20.4s, v20.4s, v12.4s\n"
- "uzp1 v5.16b, v5.16b, v5.16b\n"
- "movi v19.4s, #0x0\n"
- ".inst 0x6e8295d3 // udot v19.4s, v14.16b, v2.16b\n"
- ".inst 0x6e9b95d3 // udot v19.4s, v14.16b, v27.16b\n"
- "uzp1 v5.16b, v5.16b, v5.16b\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
- "str s5, [x24, x27]\n"
- "ldr q5, [%x[params], #0x80]\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
+ "smax v22.4s, v22.4s, v9.4s\n"
+ ".inst 0x6e989571 // udot v17.4s, v11.16b, v24.16b\n"
+ "add v19.4s, v19.4s, v13.4s\n"
+ "smin v22.4s, v22.4s, v12.4s\n"
"uzp1 v20.16b, v20.16b, v20.16b\n"
- "mov v18.16b, v19.16b\n .inst 0x6e9695d2 // udot v18.4s, v14.16b, v22.16b\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
- "str s30, [x23, x27]\n"
- ".inst 0x6e8795d3 // udot v19.4s, v14.16b, v7.16b\n"
+ "smax v19.4s, v19.4s, v9.4s\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
"uzp1 v20.16b, v20.16b, v20.16b\n"
- "str s25, [x22, x27]\n"
- "mov v30.16b, v5.16b\n"
- "str s20, [x21, x27]\n"
- "mov v25.16b, v5.16b\n"
- "mov v20.16b, v5.16b\n"
- ".inst 0x6e879405 // udot v5.4s, v0.16b, v7.16b\n"
- ".inst 0x6e829419 // udot v25.4s, v0.16b, v2.16b\n"
- ".inst 0x6e829605 // udot v5.4s, v16.16b, v2.16b\n"
- "ext v7.16b, v7.16b, v7.16b, #0x1\n"
- "add x27, x27, #0x4\n"
+ "str s20, [x22, x23]\n"
+ "smin v19.4s, v19.4s, v12.4s\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "str s22, [x24, x23]\n"
+ "mov v22.16b, v30.16b\n"
+ "mov v20.16b, v30.16b\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ ".inst 0x6e8297b4 // udot v20.4s, v29.16b, v2.16b\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ "str s19, [x21, x23]\n"
+ "mov v19.16b, v30.16b\n"
+ "add x23, x23, #0x4\n"
+ ".inst 0x6e8897be // udot v30.4s, v29.16b, v8.16b\n"
+ ".inst 0x6e9c9774 // udot v20.4s, v27.16b, v28.16b\n"
+ "ext v8.16b, v8.16b, v8.16b, #0x1\n"
+ "movi v10.4s, #0x0\n"
+ ".inst 0x6e82977e // udot v30.4s, v27.16b, v2.16b\n"
+ ".inst 0x6e989734 // udot v20.4s, v25.16b, v24.16b\n"
+ "mls v20.4s, v17.4s, v14.4s\n"
+ ".inst 0x6e9c973e // udot v30.4s, v25.16b, v28.16b\n"
"ext v2.16b, v2.16b, v2.16b, #0x1\n"
- "movi v17.4s, #0x0\n"
- ".inst 0x6e87941e // udot v30.4s, v0.16b, v7.16b\n"
- ".inst 0x6e829414 // udot v20.4s, v0.16b, v2.16b\n"
- ".inst 0x6e8295d1 // udot v17.4s, v14.16b, v2.16b\n"
- ".inst 0x6e9b9619 // udot v25.4s, v16.16b, v27.16b\n"
- ".inst 0x6e9b97e5 // udot v5.4s, v31.16b, v27.16b\n"
- "ext v27.16b, v27.16b, v27.16b, #0x1\n"
- ".inst 0x6e82961e // udot v30.4s, v16.16b, v2.16b\n"
- "ldr q2, [x26, x28]\n"
- ".inst 0x6e9b9614 // udot v20.4s, v16.16b, v27.16b\n"
- "mls v5.4s, v19.4s, v11.4s\n"
- ".inst 0x6e9b95d1 // udot v17.4s, v14.16b, v27.16b\n"
- ".inst 0x6e9697f9 // udot v25.4s, v31.16b, v22.16b\n"
- "ext v22.16b, v22.16b, v22.16b, #0x1\n"
- ".inst 0x6e9b97fe // udot v30.4s, v31.16b, v27.16b\n"
- ".inst 0x6e9697f4 // udot v20.4s, v31.16b, v22.16b\n"
- "sqrdmulh v5.4s, v5.4s, v9.4s\n"
- "mov v16.16b, v17.16b\n .inst 0x6e9695d0 // udot v16.4s, v14.16b, v22.16b\n"
- ".inst 0x6e8795d1 // udot v17.4s, v14.16b, v7.16b\n"
- "ldr q7, [x13, x28]\n"
- "mls v30.4s, v17.4s, v11.4s\n"
- "mls v25.4s, v18.4s, v11.4s\n"
- "mls v20.4s, v16.4s, v11.4s\n"
- "and v0.16b, v5.16b, v4.16b\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "sqrdmulh v30.4s, v30.4s, v9.4s\n"
- "sqrdmulh v25.4s, v25.4s, v9.4s\n"
- "sqrdmulh v20.4s, v20.4s, v9.4s\n"
- "ldr q9, [%x[params], #0x120]\n"
- "sqadd v5.4s, v5.4s, v0.4s\n"
- "and v16.16b, v30.16b, v4.16b\n"
- "and v31.16b, v25.16b, v4.16b\n"
- "and v0.16b, v20.16b, v4.16b\n"
+ "mls v30.4s, v15.4s, v14.4s\n"
+ "ext v28.16b, v28.16b, v28.16b, #0x1\n"
+ "ext v24.16b, v24.16b, v24.16b, #0x1\n"
+ ".inst 0x6e8897b6 // udot v22.4s, v29.16b, v8.16b\n"
+ ".inst 0x6e8297b3 // udot v19.4s, v29.16b, v2.16b\n"
+ "ldr q29, [%x[params], #0xd0]\n"
+ ".inst 0x6e82956a // udot v10.4s, v11.16b, v2.16b\n"
+ "sqrdmulh v20.4s, v20.4s, v23.4s\n"
+ ".inst 0x6e829776 // udot v22.4s, v27.16b, v2.16b\n"
+ "ldr q2, [SP, #0x30]\n"
+ ".inst 0x6e9c9773 // udot v19.4s, v27.16b, v28.16b\n"
+ "ldr q27, [%x[params], #0xe0]\n"
+ ".inst 0x6e9c956a // udot v10.4s, v11.16b, v28.16b\n"
+ "sqrdmulh v30.4s, v30.4s, v23.4s\n"
+ ".inst 0x6e9c9736 // udot v22.4s, v25.16b, v28.16b\n"
+ "ldr q28, [SP, #0x50]\n"
+ ".inst 0x6e989733 // udot v19.4s, v25.16b, v24.16b\n"
+ "ldr q25, [%x[params], #0xf0]\n"
+ "mov v17.16b, v10.16b\n"
+ ".inst 0x6e88956a // udot v10.4s, v11.16b, v8.16b\n"
+ "ldr q8, [SP, #0x10]\n"
+ "mls v22.4s, v10.4s, v14.4s\n"
+ ".inst 0x6e989571 // udot v17.4s, v11.16b, v24.16b\n"
+ "ldr q24, [SP, #0x70]\n"
+ "and v18.16b, v30.16b, v21.16b\n"
+ "mls v19.4s, v17.4s, v14.4s\n"
+ "and v16.16b, v20.16b, v21.16b\n"
+ "movi v15.4s, #0x0\n"
+ "sshr v18.4s, v18.4s, #0x1f\n"
+ "sqrdmulh v22.4s, v22.4s, v23.4s\n"
"sshr v16.4s, v16.4s, #0x1f\n"
- "sshr v31.4s, v31.4s, #0x1f\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "srshl v5.4s, v5.4s, v4.4s\n"
- "sqadd v30.4s, v30.4s, v16.4s\n"
- "ldr q16, [%x[params], #0x100]\n"
- "sqadd v25.4s, v25.4s, v31.4s\n"
- "ldr q31, [%x[params], #0x110]\n"
- "sqadd v20.4s, v20.4s, v0.4s\n"
- "ldr q0, [%x[params], #0xf0]\n"
- "add v5.4s, v5.4s, v10.4s\n"
- "srshl v30.4s, v30.4s, v4.4s\n"
- "srshl v25.4s, v25.4s, v4.4s\n"
- "srshl v20.4s, v20.4s, v4.4s\n"
- "ldr q4, [%x[params], #0x130]\n"
- "smax v5.4s, v5.4s, v13.4s\n"
- "add v30.4s, v30.4s, v10.4s\n"
- "add v25.4s, v25.4s, v10.4s\n"
- "add v20.4s, v20.4s, v10.4s\n"
- "smin v5.4s, v5.4s, v12.4s\n"
- "smax v30.4s, v30.4s, v13.4s\n"
- "smax v25.4s, v25.4s, v13.4s\n"
- "smax v20.4s, v20.4s, v13.4s\n"
+ ".inst 0x6e83956f // udot v15.4s, v11.16b, v3.16b\n"
+ "movi v10.4s, #0x0\n"
+ "and v17.16b, v22.16b, v21.16b\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "sqadd v30.4s, v30.4s, v18.4s\n"
+ "sqadd v20.4s, v20.4s, v16.4s\n"
+ "sqrdmulh v19.4s, v19.4s, v23.4s\n"
+ "ldr q23, [%x[params], #0x100]\n"
+ ".inst 0x6e9f956f // udot v15.4s, v11.16b, v31.16b\n"
+ "srshl v30.4s, v30.4s, v21.4s\n"
+ "srshl v20.4s, v20.4s, v21.4s\n"
+ "sqadd v22.4s, v22.4s, v17.4s\n"
+ "and v16.16b, v19.16b, v21.16b\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "add v30.4s, v30.4s, v13.4s\n"
+ "srshl v22.4s, v22.4s, v21.4s\n"
+ "add v20.4s, v20.4s, v13.4s\n"
+ "mov v17.16b, v15.16b\n"
+ "smax v30.4s, v30.4s, v9.4s\n"
+ "add v22.4s, v22.4s, v13.4s\n"
+ "smax v20.4s, v20.4s, v9.4s\n"
"smin v30.4s, v30.4s, v12.4s\n"
- "smin v25.4s, v25.4s, v12.4s\n"
+ "smax v22.4s, v22.4s, v9.4s\n"
"smin v20.4s, v20.4s, v12.4s\n"
- "uzp1 v5.16b, v5.16b, v5.16b\n"
- "movi v19.4s, #0x0\n"
- ".inst 0x6e8195d3 // udot v19.4s, v14.16b, v1.16b\n"
- ".inst 0x6e9a95d3 // udot v19.4s, v14.16b, v26.16b\n"
- "uzp1 v5.16b, v5.16b, v5.16b\n"
+ "sqadd v19.4s, v19.4s, v16.4s\n"
+ "smin v22.4s, v22.4s, v12.4s\n"
"uzp1 v30.16b, v30.16b, v30.16b\n"
- "str s5, [x24, x27]\n"
- "ldr q5, [%x[params], #0xe0]\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
"uzp1 v20.16b, v20.16b, v20.16b\n"
- "mov v18.16b, v19.16b\n .inst 0x6e9595d2 // udot v18.4s, v14.16b, v21.16b\n"
+ "srshl v19.4s, v19.4s, v21.4s\n"
+ "ldr q21, [%x[params], #0x110]\n"
"uzp1 v30.16b, v30.16b, v30.16b\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
- "str s30, [x23, x27]\n"
- ".inst 0x6e8695d3 // udot v19.4s, v14.16b, v6.16b\n"
+ "str s30, [x25, x23]\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
"uzp1 v20.16b, v20.16b, v20.16b\n"
- "str s25, [x22, x27]\n"
- "mov v30.16b, v5.16b\n"
- "str s20, [x21, x27]\n"
- "mov v25.16b, v5.16b\n"
- "mov v20.16b, v5.16b\n"
- ".inst 0x6e869405 // udot v5.4s, v0.16b, v6.16b\n"
- ".inst 0x6e819419 // udot v25.4s, v0.16b, v1.16b\n"
- ".inst 0x6e819605 // udot v5.4s, v16.16b, v1.16b\n"
+ "ldr q30, [%x[params], #0xc0]\n"
+ "add v19.4s, v19.4s, v13.4s\n"
+ "str s20, [x22, x23]\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "str s22, [x24, x23]\n"
+ "smax v19.4s, v19.4s, v9.4s\n"
+ ".inst 0x6e86956f // udot v15.4s, v11.16b, v6.16b\n"
+ "mov v22.16b, v30.16b\n"
+ "mov v20.16b, v30.16b\n"
+ "smin v19.4s, v19.4s, v12.4s\n"
+ ".inst 0x6e8397b4 // udot v20.4s, v29.16b, v3.16b\n"
+ ".inst 0x6e9a9571 // udot v17.4s, v11.16b, v26.16b\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ "str s19, [x21, x23]\n"
+ "mov v19.16b, v30.16b\n"
+ "add x23, x23, #0x4\n"
+ ".inst 0x6e8697be // udot v30.4s, v29.16b, v6.16b\n"
+ ".inst 0x6e9f9774 // udot v20.4s, v27.16b, v31.16b\n"
"ext v6.16b, v6.16b, v6.16b, #0x1\n"
- "add x27, x27, #0x4\n"
- "ext v1.16b, v1.16b, v1.16b, #0x1\n"
- "movi v17.4s, #0x0\n"
- ".inst 0x6e86941e // udot v30.4s, v0.16b, v6.16b\n"
- ".inst 0x6e819414 // udot v20.4s, v0.16b, v1.16b\n"
- ".inst 0x6e8195d1 // udot v17.4s, v14.16b, v1.16b\n"
- ".inst 0x6e9a9619 // udot v25.4s, v16.16b, v26.16b\n"
- ".inst 0x6e9a97e5 // udot v5.4s, v31.16b, v26.16b\n"
+ ".inst 0x6e83977e // udot v30.4s, v27.16b, v3.16b\n"
+ ".inst 0x6e9a9734 // udot v20.4s, v25.16b, v26.16b\n"
+ "mls v20.4s, v17.4s, v14.4s\n"
+ ".inst 0x6e9f973e // udot v30.4s, v25.16b, v31.16b\n"
+ "ext v3.16b, v3.16b, v3.16b, #0x1\n"
+ "mls v30.4s, v15.4s, v14.4s\n"
+ "ext v31.16b, v31.16b, v31.16b, #0x1\n"
"ext v26.16b, v26.16b, v26.16b, #0x1\n"
- ".inst 0x6e81961e // udot v30.4s, v16.16b, v1.16b\n"
- "ldr q1, [x25, x28]\n"
- ".inst 0x6e9a9614 // udot v20.4s, v16.16b, v26.16b\n"
- "mls v5.4s, v19.4s, v11.4s\n"
- ".inst 0x6e9a95d1 // udot v17.4s, v14.16b, v26.16b\n"
- ".inst 0x6e9597f9 // udot v25.4s, v31.16b, v21.16b\n"
- "ext v21.16b, v21.16b, v21.16b, #0x1\n"
- ".inst 0x6e9a97fe // udot v30.4s, v31.16b, v26.16b\n"
- ".inst 0x6e9597f4 // udot v20.4s, v31.16b, v21.16b\n"
- "sqrdmulh v5.4s, v5.4s, v9.4s\n"
- "mov v16.16b, v17.16b\n .inst 0x6e9595d0 // udot v16.4s, v14.16b, v21.16b\n"
- ".inst 0x6e8695d1 // udot v17.4s, v14.16b, v6.16b\n"
- "ldr q6, [x12, x28]\n"
- "mls v30.4s, v17.4s, v11.4s\n"
- "mls v25.4s, v18.4s, v11.4s\n"
- "mls v20.4s, v16.4s, v11.4s\n"
- "and v0.16b, v5.16b, v4.16b\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "sqrdmulh v30.4s, v30.4s, v9.4s\n"
- "sqrdmulh v25.4s, v25.4s, v9.4s\n"
- "sqrdmulh v20.4s, v20.4s, v9.4s\n"
- "ldr q9, [x15, x28]\n"
- "sqadd v5.4s, v5.4s, v0.4s\n"
- "ldp x15, x14, [%x[inptrs], #0x40]\n"
- "ldr q29, [x15, x28]\n"
- "ldr q28, [x14, x28]\n"
- "and v16.16b, v30.16b, v4.16b\n"
- "and v31.16b, v25.16b, v4.16b\n"
- "and v0.16b, v20.16b, v4.16b\n"
+ ".inst 0x6e8697b6 // udot v22.4s, v29.16b, v6.16b\n"
+ ".inst 0x6e8397b3 // udot v19.4s, v29.16b, v3.16b\n"
+ "ldr q29, [%x[params], #0x130]\n"
+ ".inst 0x6e83956a // udot v10.4s, v11.16b, v3.16b\n"
+ "sqrdmulh v20.4s, v20.4s, v23.4s\n"
+ ".inst 0x6e839776 // udot v22.4s, v27.16b, v3.16b\n"
+ ".inst 0x6e9f9773 // udot v19.4s, v27.16b, v31.16b\n"
+ "ldr q27, [%x[params], #0x140]\n"
+ ".inst 0x6e9f956a // udot v10.4s, v11.16b, v31.16b\n"
+ "sqrdmulh v30.4s, v30.4s, v23.4s\n"
+ ".inst 0x6e9f9736 // udot v22.4s, v25.16b, v31.16b\n"
+ ".inst 0x6e9a9733 // udot v19.4s, v25.16b, v26.16b\n"
+ "ldr q25, [%x[params], #0x150]\n"
+ "mov v17.16b, v10.16b\n"
+ ".inst 0x6e86956a // udot v10.4s, v11.16b, v6.16b\n"
+ "mls v22.4s, v10.4s, v14.4s\n"
+ ".inst 0x6e9a9571 // udot v17.4s, v11.16b, v26.16b\n"
+ "and v18.16b, v30.16b, v21.16b\n"
+ "sshr v18.4s, v18.4s, #0x1f\n"
+ "and v16.16b, v20.16b, v21.16b\n"
+ "movi v15.4s, #0x0\n"
+ "mls v19.4s, v17.4s, v14.4s\n"
+ ".inst 0x6e82956f // udot v15.4s, v11.16b, v2.16b\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sqrdmulh v22.4s, v22.4s, v23.4s\n"
+ "movi v10.4s, #0x0\n"
+ "sqadd v30.4s, v30.4s, v18.4s\n"
+ ".inst 0x6e9c956f // udot v15.4s, v11.16b, v28.16b\n"
+ "sqrdmulh v19.4s, v19.4s, v23.4s\n"
+ "ldr q23, [%x[params], #0x160]\n"
+ "and v17.16b, v22.16b, v21.16b\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "srshl v30.4s, v30.4s, v21.4s\n"
+ "sqadd v20.4s, v20.4s, v16.4s\n"
+ "and v16.16b, v19.16b, v21.16b\n"
"sshr v16.4s, v16.4s, #0x1f\n"
- "ldp x13, x12, [%x[inptrs], #0x50]\n"
- "ldr q27, [x13, x28]\n"
- "ldr q26, [x12, x28]\n"
- "sshr v31.4s, v31.4s, #0x1f\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "srshl v5.4s, v5.4s, v4.4s\n"
- "sqadd v30.4s, v30.4s, v16.4s\n"
- "ldr q16, [%x[params], #0x160]\n"
- "sqadd v25.4s, v25.4s, v31.4s\n"
- "ldr q31, [%x[params], #0x170]\n"
- "sqadd v20.4s, v20.4s, v0.4s\n"
- "ldr q0, [%x[params], #0x150]\n"
- "add v5.4s, v5.4s, v10.4s\n"
- "srshl v30.4s, v30.4s, v4.4s\n"
- "srshl v25.4s, v25.4s, v4.4s\n"
- "srshl v20.4s, v20.4s, v4.4s\n"
- "ldr q4, [x10, x28]\n"
- "ldp x10, x9, [%x[inptrs], #0x60]\n"
- "ldr q24, [x10, x28]\n"
- "ldr q23, [x9, x28]\n"
- "smax v5.4s, v5.4s, v13.4s\n"
- "add v30.4s, v30.4s, v10.4s\n"
- "add v25.4s, v25.4s, v10.4s\n"
- "add v20.4s, v20.4s, v10.4s\n"
- "ldp x26, x25, [%x[inptrs], #0x70]\n"
- "ldr q22, [x26, x28]\n"
- "ldr q21, [x25, x28]\n"
- "smin v5.4s, v5.4s, v12.4s\n"
- "smax v30.4s, v30.4s, v13.4s\n"
- "ldp x15, x14, [%x[inptrs], #0x0]\n"
- "smax v25.4s, v25.4s, v13.4s\n"
- "smax v20.4s, v20.4s, v13.4s\n"
- "ldp x13, x12, [%x[inptrs], #0x10]\n"
- "ldp x10, x9, [%x[inptrs], #0x20]\n"
+ "add v30.4s, v30.4s, v13.4s\n"
+ "srshl v20.4s, v20.4s, v21.4s\n"
+ "sqadd v22.4s, v22.4s, v17.4s\n"
+ "mov v17.16b, v15.16b\n"
+ "smax v30.4s, v30.4s, v9.4s\n"
+ "add v20.4s, v20.4s, v13.4s\n"
+ "srshl v22.4s, v22.4s, v21.4s\n"
"smin v30.4s, v30.4s, v12.4s\n"
- "smin v25.4s, v25.4s, v12.4s\n"
- "ldp x26, x25, [%x[inptrs], #0x30]\n"
+ "smax v20.4s, v20.4s, v9.4s\n"
+ "sqadd v19.4s, v19.4s, v16.4s\n"
+ "add v22.4s, v22.4s, v13.4s\n"
"smin v20.4s, v20.4s, v12.4s\n"
- "uzp1 v5.16b, v5.16b, v5.16b\n"
- "uzp1 v5.16b, v5.16b, v5.16b\n"
"uzp1 v30.16b, v30.16b, v30.16b\n"
- "str s5, [x24, x27]\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
- "uzp1 v20.16b, v20.16b, v20.16b\n"
- "zip2 v5.16b, v9.16b, v7.16b\n"
- "zip1 v9.16b, v9.16b, v7.16b\n"
- "zip1 v7.16b, v8.16b, v6.16b\n"
- "zip2 v6.16b, v8.16b, v6.16b\n"
+ "smax v22.4s, v22.4s, v9.4s\n"
+ "srshl v19.4s, v19.4s, v21.4s\n"
+ "ldr q21, [%x[params], #0x170]\n"
"uzp1 v30.16b, v30.16b, v30.16b\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
- "str s30, [x23, x27]\n"
+ "str s30, [x25, x23]\n"
+ "smin v22.4s, v22.4s, v12.4s\n"
"uzp1 v20.16b, v20.16b, v20.16b\n"
- "str s25, [x22, x27]\n"
- "zip2 v8.16b, v9.16b, v7.16b\n"
- "str s20, [x21, x27]\n"
- "zip1 v9.16b, v9.16b, v7.16b\n"
- "zip1 v7.16b, v5.16b, v6.16b\n"
- "add x27, x27, #0x4\n"
- "zip2 v6.16b, v5.16b, v6.16b\n"
- "ldr q5, [%x[params], #0x140]\n"
- "zip2 v30.16b, v4.16b, v2.16b\n"
+ "ldr q30, [%x[params], #0x120]\n"
"add %x[params], %x[params], #0x180\n"
- "zip1 v4.16b, v4.16b, v2.16b\n"
- "zip1 v2.16b, v3.16b, v1.16b\n"
- "zip2 v1.16b, v3.16b, v1.16b\n"
- "zip2 v25.16b, v29.16b, v27.16b\n"
- "zip1 v29.16b, v29.16b, v27.16b\n"
- "zip1 v27.16b, v28.16b, v26.16b\n"
- "zip2 v26.16b, v28.16b, v26.16b\n"
- "zip2 v20.16b, v24.16b, v22.16b\n"
- "zip1 v24.16b, v24.16b, v22.16b\n"
- "zip1 v22.16b, v23.16b, v21.16b\n"
- "zip2 v21.16b, v23.16b, v21.16b\n"
- "zip2 v3.16b, v4.16b, v2.16b\n"
- "zip1 v4.16b, v4.16b, v2.16b\n"
- "zip1 v2.16b, v30.16b, v1.16b\n"
- "zip2 v1.16b, v30.16b, v1.16b\n"
- "zip2 v28.16b, v29.16b, v27.16b\n"
- "zip1 v29.16b, v29.16b, v27.16b\n"
- "zip1 v27.16b, v25.16b, v26.16b\n"
- "zip2 v26.16b, v25.16b, v26.16b\n"
- "zip2 v23.16b, v24.16b, v22.16b\n"
- "zip1 v24.16b, v24.16b, v22.16b\n"
- "zip1 v22.16b, v20.16b, v21.16b\n"
- "zip2 v21.16b, v20.16b, v21.16b\n"
- "mov v30.16b, v5.16b\n"
- "mov v25.16b, v5.16b\n"
- "mov v20.16b, v5.16b\n"
- "bgt 1b\n"
- "2:" // Detached iteration
- "movi v19.4s, #0x0\n"
- ".inst 0x6e8495d3 // udot v19.4s, v14.16b, v4.16b\n"
- ".inst 0x6e899405 // udot v5.4s, v0.16b, v9.16b\n"
- "tst %x[n_channels], #0xf\n"
- ".inst 0x6e9d95d3 // udot v19.4s, v14.16b, v29.16b\n"
- ".inst 0x6e849419 // udot v25.4s, v0.16b, v4.16b\n"
- "add x28, x28, #0x10\n"
- ".inst 0x6e849605 // udot v5.4s, v16.16b, v4.16b\n"
- "ext v4.16b, v4.16b, v4.16b, #0x1\n"
- "mov v18.16b, v19.16b\n .inst 0x6e9895d2 // udot v18.4s, v14.16b, v24.16b\n"
- ".inst 0x6e8995d3 // udot v19.4s, v14.16b, v9.16b\n"
- "ext v9.16b, v9.16b, v9.16b, #0x1\n"
- ".inst 0x6e9d9619 // udot v25.4s, v16.16b, v29.16b\n"
- ".inst 0x6e9d97e5 // udot v5.4s, v31.16b, v29.16b\n"
- "ext v29.16b, v29.16b, v29.16b, #0x1\n"
- ".inst 0x6e89941e // udot v30.4s, v0.16b, v9.16b\n"
- ".inst 0x6e849414 // udot v20.4s, v0.16b, v4.16b\n"
- "movi v17.4s, #0x0\n"
- ".inst 0x6e8495d1 // udot v17.4s, v14.16b, v4.16b\n"
- ".inst 0x6e9d95d1 // udot v17.4s, v14.16b, v29.16b\n"
- ".inst 0x6e9897f9 // udot v25.4s, v31.16b, v24.16b\n"
- "ext v24.16b, v24.16b, v24.16b, #0x1\n"
- ".inst 0x6e84961e // udot v30.4s, v16.16b, v4.16b\n"
- "ldr q4, [%x[params], #0x10]\n"
- ".inst 0x6e9d9614 // udot v20.4s, v16.16b, v29.16b\n"
- "mls v5.4s, v19.4s, v11.4s\n"
- "mov v16.16b, v17.16b\n .inst 0x6e9895d0 // udot v16.4s, v14.16b, v24.16b\n"
- ".inst 0x6e8995d1 // udot v17.4s, v14.16b, v9.16b\n"
- "ldr q9, [%x[params], #0x0]\n"
- "sqrdmulh v5.4s, v5.4s, v9.4s\n"
- ".inst 0x6e9d97fe // udot v30.4s, v31.16b, v29.16b\n"
- ".inst 0x6e9897f4 // udot v20.4s, v31.16b, v24.16b\n"
- "mls v30.4s, v17.4s, v11.4s\n"
- "mls v25.4s, v18.4s, v11.4s\n"
- "mls v20.4s, v16.4s, v11.4s\n"
- "and v0.16b, v5.16b, v4.16b\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "sqrdmulh v30.4s, v30.4s, v9.4s\n"
- "sqrdmulh v25.4s, v25.4s, v9.4s\n"
- "sqrdmulh v20.4s, v20.4s, v9.4s\n"
- "ldr q9, [%x[params], #0x60]\n"
- "sqadd v5.4s, v5.4s, v0.4s\n"
- "and v16.16b, v30.16b, v4.16b\n"
- "and v31.16b, v25.16b, v4.16b\n"
- "and v0.16b, v20.16b, v4.16b\n"
- "sshr v16.4s, v16.4s, #0x1f\n"
- "sshr v31.4s, v31.4s, #0x1f\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "srshl v5.4s, v5.4s, v4.4s\n"
- "sqadd v30.4s, v30.4s, v16.4s\n"
- "ldr q16, [%x[params], #0x40]\n"
- "sqadd v25.4s, v25.4s, v31.4s\n"
- "ldr q31, [%x[params], #0x50]\n"
- "sqadd v20.4s, v20.4s, v0.4s\n"
- "ldr q0, [%x[params], #0x30]\n"
- "add v5.4s, v5.4s, v10.4s\n"
- "srshl v30.4s, v30.4s, v4.4s\n"
- "srshl v25.4s, v25.4s, v4.4s\n"
- "srshl v20.4s, v20.4s, v4.4s\n"
- "ldr q4, [%x[params], #0x70]\n"
- "smax v5.4s, v5.4s, v13.4s\n"
- "add v30.4s, v30.4s, v10.4s\n"
- "add v25.4s, v25.4s, v10.4s\n"
- "add v20.4s, v20.4s, v10.4s\n"
- "smin v5.4s, v5.4s, v12.4s\n"
- "smax v30.4s, v30.4s, v13.4s\n"
- "smax v25.4s, v25.4s, v13.4s\n"
- "smax v20.4s, v20.4s, v13.4s\n"
- "smin v30.4s, v30.4s, v12.4s\n"
- "smin v25.4s, v25.4s, v12.4s\n"
- "smin v20.4s, v20.4s, v12.4s\n"
- "uzp1 v5.16b, v5.16b, v5.16b\n"
- "movi v19.4s, #0x0\n"
- ".inst 0x6e8395d3 // udot v19.4s, v14.16b, v3.16b\n"
- ".inst 0x6e9c95d3 // udot v19.4s, v14.16b, v28.16b\n"
- "uzp1 v5.16b, v5.16b, v5.16b\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
- "str s5, [x24, x27]\n"
- "ldr q5, [%x[params], #0x20]\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
- "uzp1 v20.16b, v20.16b, v20.16b\n"
- "mov v18.16b, v19.16b\n .inst 0x6e9795d2 // udot v18.4s, v14.16b, v23.16b\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
- "str s30, [x23, x27]\n"
- ".inst 0x6e8895d3 // udot v19.4s, v14.16b, v8.16b\n"
+ "add v19.4s, v19.4s, v13.4s\n"
"uzp1 v20.16b, v20.16b, v20.16b\n"
- "str s25, [x22, x27]\n"
- "mov v30.16b, v5.16b\n"
- "str s20, [x21, x27]\n"
- "mov v25.16b, v5.16b\n"
- "mov v20.16b, v5.16b\n"
- ".inst 0x6e889405 // udot v5.4s, v0.16b, v8.16b\n"
- ".inst 0x6e839419 // udot v25.4s, v0.16b, v3.16b\n"
- ".inst 0x6e839605 // udot v5.4s, v16.16b, v3.16b\n"
+ "str s20, [x22, x23]\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ ".inst 0x6e88956f // udot v15.4s, v11.16b, v8.16b\n"
+ "smax v19.4s, v19.4s, v9.4s\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "str s22, [x24, x23]\n"
+ "smin v19.4s, v19.4s, v12.4s\n"
+ "mov v22.16b, v30.16b\n"
+ "mov v20.16b, v30.16b\n"
+ ".inst 0x6e8297b4 // udot v20.4s, v29.16b, v2.16b\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ ".inst 0x6e989571 // udot v17.4s, v11.16b, v24.16b\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ "str s19, [x21, x23]\n"
+ "mov v19.16b, v30.16b\n"
+ "add x23, x23, #0x4\n"
+ ".inst 0x6e8897be // udot v30.4s, v29.16b, v8.16b\n"
+ ".inst 0x6e9c9774 // udot v20.4s, v27.16b, v28.16b\n"
"ext v8.16b, v8.16b, v8.16b, #0x1\n"
- "add x27, x27, #0x4\n"
- "ext v3.16b, v3.16b, v3.16b, #0x1\n"
- "movi v17.4s, #0x0\n"
- ".inst 0x6e88941e // udot v30.4s, v0.16b, v8.16b\n"
- ".inst 0x6e839414 // udot v20.4s, v0.16b, v3.16b\n"
- ".inst 0x6e8395d1 // udot v17.4s, v14.16b, v3.16b\n"
- ".inst 0x6e9c9619 // udot v25.4s, v16.16b, v28.16b\n"
- ".inst 0x6e9c97e5 // udot v5.4s, v31.16b, v28.16b\n"
+ ".inst 0x6e82977e // udot v30.4s, v27.16b, v2.16b\n"
+ ".inst 0x6e989734 // udot v20.4s, v25.16b, v24.16b\n"
+ "mls v20.4s, v17.4s, v14.4s\n"
+ ".inst 0x6e9c973e // udot v30.4s, v25.16b, v28.16b\n"
+ "ext v2.16b, v2.16b, v2.16b, #0x1\n"
+ "mls v30.4s, v15.4s, v14.4s\n"
"ext v28.16b, v28.16b, v28.16b, #0x1\n"
- ".inst 0x6e83961e // udot v30.4s, v16.16b, v3.16b\n"
- ".inst 0x6e9c9614 // udot v20.4s, v16.16b, v28.16b\n"
- "mls v5.4s, v19.4s, v11.4s\n"
- ".inst 0x6e9c95d1 // udot v17.4s, v14.16b, v28.16b\n"
- ".inst 0x6e9797f9 // udot v25.4s, v31.16b, v23.16b\n"
- "ext v23.16b, v23.16b, v23.16b, #0x1\n"
- ".inst 0x6e9c97fe // udot v30.4s, v31.16b, v28.16b\n"
- ".inst 0x6e9797f4 // udot v20.4s, v31.16b, v23.16b\n"
- "sqrdmulh v5.4s, v5.4s, v9.4s\n"
- "mov v16.16b, v17.16b\n .inst 0x6e9795d0 // udot v16.4s, v14.16b, v23.16b\n"
- ".inst 0x6e8895d1 // udot v17.4s, v14.16b, v8.16b\n"
- "mls v30.4s, v17.4s, v11.4s\n"
- "mls v25.4s, v18.4s, v11.4s\n"
- "mls v20.4s, v16.4s, v11.4s\n"
- "and v0.16b, v5.16b, v4.16b\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "sqrdmulh v30.4s, v30.4s, v9.4s\n"
- "sqrdmulh v25.4s, v25.4s, v9.4s\n"
- "sqrdmulh v20.4s, v20.4s, v9.4s\n"
- "ldr q9, [%x[params], #0xc0]\n"
- "sqadd v5.4s, v5.4s, v0.4s\n"
- "and v16.16b, v30.16b, v4.16b\n"
- "and v31.16b, v25.16b, v4.16b\n"
- "and v0.16b, v20.16b, v4.16b\n"
+ "ext v24.16b, v24.16b, v24.16b, #0x1\n"
+ ".inst 0x6e8897b6 // udot v22.4s, v29.16b, v8.16b\n"
+ ".inst 0x6e8297b3 // udot v19.4s, v29.16b, v2.16b\n"
+ ".inst 0x6e82956a // udot v10.4s, v11.16b, v2.16b\n"
+ "sqrdmulh v20.4s, v20.4s, v23.4s\n"
+ ".inst 0x6e829776 // udot v22.4s, v27.16b, v2.16b\n"
+ ".inst 0x6e9c9773 // udot v19.4s, v27.16b, v28.16b\n"
+ ".inst 0x6e9c956a // udot v10.4s, v11.16b, v28.16b\n"
+ "sqrdmulh v30.4s, v30.4s, v23.4s\n"
+ ".inst 0x6e9c9736 // udot v22.4s, v25.16b, v28.16b\n"
+ ".inst 0x6e989733 // udot v19.4s, v25.16b, v24.16b\n"
+ "mov v17.16b, v10.16b\n"
+ ".inst 0x6e88956a // udot v10.4s, v11.16b, v8.16b\n"
+ "mls v22.4s, v10.4s, v14.4s\n"
+ ".inst 0x6e989571 // udot v17.4s, v11.16b, v24.16b\n"
+ "and v18.16b, v30.16b, v21.16b\n"
+ "sshr v18.4s, v18.4s, #0x1f\n"
+ "and v16.16b, v20.16b, v21.16b\n"
+ "mls v19.4s, v17.4s, v14.4s\n"
+ "sqrdmulh v22.4s, v22.4s, v23.4s\n"
"sshr v16.4s, v16.4s, #0x1f\n"
- "sshr v31.4s, v31.4s, #0x1f\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "srshl v5.4s, v5.4s, v4.4s\n"
- "sqadd v30.4s, v30.4s, v16.4s\n"
- "ldr q16, [%x[params], #0xa0]\n"
- "sqadd v25.4s, v25.4s, v31.4s\n"
- "ldr q31, [%x[params], #0xb0]\n"
- "sqadd v20.4s, v20.4s, v0.4s\n"
- "ldr q0, [%x[params], #0x90]\n"
- "add v5.4s, v5.4s, v10.4s\n"
- "srshl v30.4s, v30.4s, v4.4s\n"
- "srshl v25.4s, v25.4s, v4.4s\n"
- "srshl v20.4s, v20.4s, v4.4s\n"
- "ldr q4, [%x[params], #0xd0]\n"
- "smax v5.4s, v5.4s, v13.4s\n"
- "add v30.4s, v30.4s, v10.4s\n"
- "add v25.4s, v25.4s, v10.4s\n"
- "add v20.4s, v20.4s, v10.4s\n"
- "smin v5.4s, v5.4s, v12.4s\n"
- "smax v30.4s, v30.4s, v13.4s\n"
- "smax v25.4s, v25.4s, v13.4s\n"
- "smax v20.4s, v20.4s, v13.4s\n"
- "smin v30.4s, v30.4s, v12.4s\n"
- "smin v25.4s, v25.4s, v12.4s\n"
- "smin v20.4s, v20.4s, v12.4s\n"
- "uzp1 v5.16b, v5.16b, v5.16b\n"
- "movi v19.4s, #0x0\n"
- ".inst 0x6e8295d3 // udot v19.4s, v14.16b, v2.16b\n"
- ".inst 0x6e9b95d3 // udot v19.4s, v14.16b, v27.16b\n"
- "uzp1 v5.16b, v5.16b, v5.16b\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
- "str s5, [x24, x27]\n"
- "ldr q5, [%x[params], #0x80]\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
- "uzp1 v20.16b, v20.16b, v20.16b\n"
- "mov v18.16b, v19.16b\n .inst 0x6e9695d2 // udot v18.4s, v14.16b, v22.16b\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
- "str s30, [x23, x27]\n"
- ".inst 0x6e8795d3 // udot v19.4s, v14.16b, v7.16b\n"
- "uzp1 v20.16b, v20.16b, v20.16b\n"
- "str s25, [x22, x27]\n"
- "mov v30.16b, v5.16b\n"
- "str s20, [x21, x27]\n"
- "mov v25.16b, v5.16b\n"
- "mov v20.16b, v5.16b\n"
- ".inst 0x6e879405 // udot v5.4s, v0.16b, v7.16b\n"
- ".inst 0x6e829419 // udot v25.4s, v0.16b, v2.16b\n"
- ".inst 0x6e829605 // udot v5.4s, v16.16b, v2.16b\n"
- "ext v7.16b, v7.16b, v7.16b, #0x1\n"
- "add x27, x27, #0x4\n"
- "ext v2.16b, v2.16b, v2.16b, #0x1\n"
- "movi v17.4s, #0x0\n"
- ".inst 0x6e87941e // udot v30.4s, v0.16b, v7.16b\n"
- ".inst 0x6e829414 // udot v20.4s, v0.16b, v2.16b\n"
- ".inst 0x6e8295d1 // udot v17.4s, v14.16b, v2.16b\n"
- ".inst 0x6e9b9619 // udot v25.4s, v16.16b, v27.16b\n"
- ".inst 0x6e9b97e5 // udot v5.4s, v31.16b, v27.16b\n"
- "ext v27.16b, v27.16b, v27.16b, #0x1\n"
- ".inst 0x6e82961e // udot v30.4s, v16.16b, v2.16b\n"
- ".inst 0x6e9b9614 // udot v20.4s, v16.16b, v27.16b\n"
- "mls v5.4s, v19.4s, v11.4s\n"
- ".inst 0x6e9b95d1 // udot v17.4s, v14.16b, v27.16b\n"
- ".inst 0x6e9697f9 // udot v25.4s, v31.16b, v22.16b\n"
- "ext v22.16b, v22.16b, v22.16b, #0x1\n"
- ".inst 0x6e9b97fe // udot v30.4s, v31.16b, v27.16b\n"
- ".inst 0x6e9697f4 // udot v20.4s, v31.16b, v22.16b\n"
- "sqrdmulh v5.4s, v5.4s, v9.4s\n"
- "mov v16.16b, v17.16b\n .inst 0x6e9695d0 // udot v16.4s, v14.16b, v22.16b\n"
- ".inst 0x6e8795d1 // udot v17.4s, v14.16b, v7.16b\n"
- "mls v30.4s, v17.4s, v11.4s\n"
- "mls v25.4s, v18.4s, v11.4s\n"
- "mls v20.4s, v16.4s, v11.4s\n"
- "and v0.16b, v5.16b, v4.16b\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "sqrdmulh v30.4s, v30.4s, v9.4s\n"
- "sqrdmulh v25.4s, v25.4s, v9.4s\n"
- "sqrdmulh v20.4s, v20.4s, v9.4s\n"
- "ldr q9, [%x[params], #0x120]\n"
- "sqadd v5.4s, v5.4s, v0.4s\n"
- "and v16.16b, v30.16b, v4.16b\n"
- "and v31.16b, v25.16b, v4.16b\n"
- "and v0.16b, v20.16b, v4.16b\n"
+ "sqadd v30.4s, v30.4s, v18.4s\n"
+ "and v17.16b, v22.16b, v21.16b\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "srshl v30.4s, v30.4s, v21.4s\n"
+ "sqadd v20.4s, v20.4s, v16.4s\n"
+ "sqrdmulh v19.4s, v19.4s, v23.4s\n"
+ "add v30.4s, v30.4s, v13.4s\n"
+ "srshl v20.4s, v20.4s, v21.4s\n"
+ "sqadd v22.4s, v22.4s, v17.4s\n"
+ "and v16.16b, v19.16b, v21.16b\n"
"sshr v16.4s, v16.4s, #0x1f\n"
- "sshr v31.4s, v31.4s, #0x1f\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "srshl v5.4s, v5.4s, v4.4s\n"
- "sqadd v30.4s, v30.4s, v16.4s\n"
- "ldr q16, [%x[params], #0x100]\n"
- "sqadd v25.4s, v25.4s, v31.4s\n"
- "ldr q31, [%x[params], #0x110]\n"
- "sqadd v20.4s, v20.4s, v0.4s\n"
- "ldr q0, [%x[params], #0xf0]\n"
- "add v5.4s, v5.4s, v10.4s\n"
- "srshl v30.4s, v30.4s, v4.4s\n"
- "srshl v25.4s, v25.4s, v4.4s\n"
- "srshl v20.4s, v20.4s, v4.4s\n"
- "ldr q4, [%x[params], #0x130]\n"
- "smax v5.4s, v5.4s, v13.4s\n"
- "add v30.4s, v30.4s, v10.4s\n"
- "add v25.4s, v25.4s, v10.4s\n"
- "add v20.4s, v20.4s, v10.4s\n"
- "smin v5.4s, v5.4s, v12.4s\n"
- "smax v30.4s, v30.4s, v13.4s\n"
- "smax v25.4s, v25.4s, v13.4s\n"
- "smax v20.4s, v20.4s, v13.4s\n"
+ "smax v30.4s, v30.4s, v9.4s\n"
+ "srshl v22.4s, v22.4s, v21.4s\n"
+ "add v20.4s, v20.4s, v13.4s\n"
"smin v30.4s, v30.4s, v12.4s\n"
- "smin v25.4s, v25.4s, v12.4s\n"
- "smin v20.4s, v20.4s, v12.4s\n"
- "uzp1 v5.16b, v5.16b, v5.16b\n"
- "movi v19.4s, #0x0\n"
- ".inst 0x6e8195d3 // udot v19.4s, v14.16b, v1.16b\n"
- ".inst 0x6e9a95d3 // udot v19.4s, v14.16b, v26.16b\n"
- "uzp1 v5.16b, v5.16b, v5.16b\n"
+ "add v22.4s, v22.4s, v13.4s\n"
+ "smax v20.4s, v20.4s, v9.4s\n"
+ "sqadd v19.4s, v19.4s, v16.4s\n"
"uzp1 v30.16b, v30.16b, v30.16b\n"
- "str s5, [x24, x27]\n"
- "ldr q5, [%x[params], #0xe0]\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
- "uzp1 v20.16b, v20.16b, v20.16b\n"
- "mov v18.16b, v19.16b\n .inst 0x6e9595d2 // udot v18.4s, v14.16b, v21.16b\n"
- "add %x[params], %x[params], #0x140\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
- "str s30, [x23, x27]\n"
- ".inst 0x6e8695d3 // udot v19.4s, v14.16b, v6.16b\n"
- "uzp1 v20.16b, v20.16b, v20.16b\n"
- "str s25, [x22, x27]\n"
- "mov v30.16b, v5.16b\n"
- "str s20, [x21, x27]\n"
- "mov v25.16b, v5.16b\n"
- "mov v20.16b, v5.16b\n"
- ".inst 0x6e869405 // udot v5.4s, v0.16b, v6.16b\n"
- ".inst 0x6e819419 // udot v25.4s, v0.16b, v1.16b\n"
- ".inst 0x6e819605 // udot v5.4s, v16.16b, v1.16b\n"
- "ext v6.16b, v6.16b, v6.16b, #0x1\n"
- "add x27, x27, #0x4\n"
- "ext v1.16b, v1.16b, v1.16b, #0x1\n"
- "movi v17.4s, #0x0\n"
- ".inst 0x6e86941e // udot v30.4s, v0.16b, v6.16b\n"
- ".inst 0x6e819414 // udot v20.4s, v0.16b, v1.16b\n"
- ".inst 0x6e8195d1 // udot v17.4s, v14.16b, v1.16b\n"
- ".inst 0x6e9a9619 // udot v25.4s, v16.16b, v26.16b\n"
- ".inst 0x6e9a97e5 // udot v5.4s, v31.16b, v26.16b\n"
- "ext v26.16b, v26.16b, v26.16b, #0x1\n"
- ".inst 0x6e81961e // udot v30.4s, v16.16b, v1.16b\n"
- ".inst 0x6e9a9614 // udot v20.4s, v16.16b, v26.16b\n"
- "mls v5.4s, v19.4s, v11.4s\n"
- ".inst 0x6e9a95d1 // udot v17.4s, v14.16b, v26.16b\n"
- ".inst 0x6e9597f9 // udot v25.4s, v31.16b, v21.16b\n"
- "ext v21.16b, v21.16b, v21.16b, #0x1\n"
- ".inst 0x6e9a97fe // udot v30.4s, v31.16b, v26.16b\n"
- ".inst 0x6e9597f4 // udot v20.4s, v31.16b, v21.16b\n"
- "sqrdmulh v5.4s, v5.4s, v9.4s\n"
- "mov v16.16b, v17.16b\n .inst 0x6e9595d0 // udot v16.4s, v14.16b, v21.16b\n"
- ".inst 0x6e8695d1 // udot v17.4s, v14.16b, v6.16b\n"
- "mls v30.4s, v17.4s, v11.4s\n"
- "mls v25.4s, v18.4s, v11.4s\n"
- "mls v20.4s, v16.4s, v11.4s\n"
- "and v0.16b, v5.16b, v4.16b\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "sqrdmulh v30.4s, v30.4s, v9.4s\n"
- "sqrdmulh v25.4s, v25.4s, v9.4s\n"
- "sqrdmulh v20.4s, v20.4s, v9.4s\n"
- "sqadd v5.4s, v5.4s, v0.4s\n"
- "and v16.16b, v30.16b, v4.16b\n"
- "and v31.16b, v25.16b, v4.16b\n"
- "and v0.16b, v20.16b, v4.16b\n"
- "sshr v16.4s, v16.4s, #0x1f\n"
- "sshr v31.4s, v31.4s, #0x1f\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "sqadd v30.4s, v30.4s, v16.4s\n"
- "sqadd v25.4s, v25.4s, v31.4s\n"
- "sqadd v20.4s, v20.4s, v0.4s\n"
- "srshl v5.4s, v5.4s, v4.4s\n"
- "srshl v30.4s, v30.4s, v4.4s\n"
- "srshl v25.4s, v25.4s, v4.4s\n"
- "srshl v20.4s, v20.4s, v4.4s\n"
- "add v5.4s, v5.4s, v10.4s\n"
- "add v30.4s, v30.4s, v10.4s\n"
- "add v25.4s, v25.4s, v10.4s\n"
- "add v20.4s, v20.4s, v10.4s\n"
- "smax v5.4s, v5.4s, v13.4s\n"
- "smax v30.4s, v30.4s, v13.4s\n"
- "smax v25.4s, v25.4s, v13.4s\n"
- "smax v20.4s, v20.4s, v13.4s\n"
- "smin v5.4s, v5.4s, v12.4s\n"
- "smin v30.4s, v30.4s, v12.4s\n"
- "smin v25.4s, v25.4s, v12.4s\n"
+ "smax v22.4s, v22.4s, v9.4s\n"
"smin v20.4s, v20.4s, v12.4s\n"
- "uzp1 v5.16b, v5.16b, v5.16b\n"
+ "srshl v19.4s, v19.4s, v21.4s\n"
+ "smin v22.4s, v22.4s, v12.4s\n"
"uzp1 v30.16b, v30.16b, v30.16b\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
+ "str s30, [x25, x23]\n"
+ "add v19.4s, v19.4s, v13.4s\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
"uzp1 v20.16b, v20.16b, v20.16b\n"
- "uzp1 v5.16b, v5.16b, v5.16b\n"
- "str s5, [x24, x27]\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
- "str s30, [x23, x27]\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "str s22, [x24, x23]\n"
+ "smax v19.4s, v19.4s, v9.4s\n"
"uzp1 v20.16b, v20.16b, v20.16b\n"
- "str s25, [x22, x27]\n"
- "str s20, [x21, x27]\n"
- "add x27, x27, #0x4\n"
- "beq 35f\n"
- "3:" // Oddments
- "and x20, %x[n_channels], #0xf\n"
- "add x15, x15, x28\n"
- "add x14, x14, x28\n"
- "add x13, x13, x28\n"
- "add x12, x12, x28\n"
- "add x10, x10, x28\n"
- "add x9, x9, x28\n"
- "add x26, x26, x28\n"
- "add x25, x25, x28\n"
- "tbz %x[n_channels], #3, 7f\n"
- "ldr d9, [x15], #0x8\n"
- "ldr d8, [x14], #0x8\n"
- "ldr d7, [x13], #0x8\n"
- "ldr d6, [x12], #0x8\n"
- "ldr d4, [x10], #0x8\n"
- "ldr d3, [x9], #0x8\n"
- "ldr d2, [x26], #0x8\n"
- "ldr d1, [x25], #0x8\n"
- "tbz %x[n_channels], #2, 5f\n"
- "ld1 { v9.s }[2], [x15], #0x4\n"
- "ld1 { v8.s }[2], [x14], #0x4\n"
- "ld1 { v7.s }[2], [x13], #0x4\n"
- "ld1 { v6.s }[2], [x12], #0x4\n"
- "ld1 { v4.s }[2], [x10], #0x4\n"
- "ld1 { v3.s }[2], [x9], #0x4\n"
- "ld1 { v2.s }[2], [x26], #0x4\n"
- "ld1 { v1.s }[2], [x25], #0x4\n"
- "tbz %x[n_channels], #1, 4f\n"
- "ld1 { v9.h }[6], [x15], #0x2\n"
- "ld1 { v8.h }[6], [x14], #0x2\n"
- "ld1 { v7.h }[6], [x13], #0x2\n"
- "ld1 { v6.h }[6], [x12], #0x2\n"
- "ld1 { v4.h }[6], [x10], #0x2\n"
- "ld1 { v3.h }[6], [x9], #0x2\n"
- "ld1 { v2.h }[6], [x26], #0x2\n"
- "ld1 { v1.h }[6], [x25], #0x2\n"
- "tbz %x[n_channels], #0, 11f\n"
- "ld1 { v9.b }[14], [x15], #0x1\n"
- "ld1 { v8.b }[14], [x14], #0x1\n"
- "ld1 { v7.b }[14], [x13], #0x1\n"
- "ld1 { v6.b }[14], [x12], #0x1\n"
- "ld1 { v4.b }[14], [x10], #0x1\n"
- "ld1 { v3.b }[14], [x9], #0x1\n"
- "ld1 { v2.b }[14], [x26], #0x1\n"
- "ld1 { v1.b }[14], [x25], #0x1\n"
- "b 11f\n"
- "4:" // Oddments: Load (A): Bit 3: Bit 2: Bit 1: Unset
- "tbz %x[n_channels], #0, 11f\n"
- "ld1 { v9.b }[12], [x15], #0x1\n"
- "ld1 { v8.b }[12], [x14], #0x1\n"
- "ld1 { v7.b }[12], [x13], #0x1\n"
- "ld1 { v6.b }[12], [x12], #0x1\n"
- "ld1 { v4.b }[12], [x10], #0x1\n"
- "ld1 { v3.b }[12], [x9], #0x1\n"
- "ld1 { v2.b }[12], [x26], #0x1\n"
- "ld1 { v1.b }[12], [x25], #0x1\n"
- "b 11f\n"
- "5:" // Oddments: Load (A): Bit 3: Bit 2: Unset
- "tbz %x[n_channels], #1, 6f\n"
- "ld1 { v9.h }[4], [x15], #0x2\n"
- "ld1 { v8.h }[4], [x14], #0x2\n"
- "ld1 { v7.h }[4], [x13], #0x2\n"
- "ld1 { v6.h }[4], [x12], #0x2\n"
- "ld1 { v4.h }[4], [x10], #0x2\n"
- "ld1 { v3.h }[4], [x9], #0x2\n"
- "ld1 { v2.h }[4], [x26], #0x2\n"
- "ld1 { v1.h }[4], [x25], #0x2\n"
- "tbz %x[n_channels], #0, 11f\n"
- "ld1 { v9.b }[10], [x15], #0x1\n"
- "ld1 { v8.b }[10], [x14], #0x1\n"
- "ld1 { v7.b }[10], [x13], #0x1\n"
- "ld1 { v6.b }[10], [x12], #0x1\n"
- "ld1 { v4.b }[10], [x10], #0x1\n"
- "ld1 { v3.b }[10], [x9], #0x1\n"
- "ld1 { v2.b }[10], [x26], #0x1\n"
- "ld1 { v1.b }[10], [x25], #0x1\n"
- "b 11f\n"
- "6:" // Oddments: Load (A): Bit 3: Bit 2: Unset: Bit 1: Unset
- "tbz %x[n_channels], #0, 11f\n"
- "ld1 { v9.b }[8], [x15], #0x1\n"
- "ld1 { v8.b }[8], [x14], #0x1\n"
- "ld1 { v7.b }[8], [x13], #0x1\n"
- "ld1 { v6.b }[8], [x12], #0x1\n"
- "ld1 { v4.b }[8], [x10], #0x1\n"
- "ld1 { v3.b }[8], [x9], #0x1\n"
- "ld1 { v2.b }[8], [x26], #0x1\n"
- "ld1 { v1.b }[8], [x25], #0x1\n"
- "b 11f\n"
- "7:" // Oddments: Load (A): Bit 3: Unset
- "tbz %x[n_channels], #2, 9f\n"
- "ldr s9, [x15], #0x4\n"
- "ldr s8, [x14], #0x4\n"
- "ldr s7, [x13], #0x4\n"
- "ldr s6, [x12], #0x4\n"
- "ldr s4, [x10], #0x4\n"
- "ldr s3, [x9], #0x4\n"
- "ldr s2, [x26], #0x4\n"
- "ldr s1, [x25], #0x4\n"
- "tbz %x[n_channels], #1, 8f\n"
- "ld1 { v9.h }[2], [x15], #0x2\n"
- "ld1 { v8.h }[2], [x14], #0x2\n"
- "ld1 { v7.h }[2], [x13], #0x2\n"
- "ld1 { v6.h }[2], [x12], #0x2\n"
- "ld1 { v4.h }[2], [x10], #0x2\n"
- "ld1 { v3.h }[2], [x9], #0x2\n"
- "ld1 { v2.h }[2], [x26], #0x2\n"
- "ld1 { v1.h }[2], [x25], #0x2\n"
- "tbz %x[n_channels], #0, 11f\n"
- "ld1 { v9.b }[6], [x15], #0x1\n"
- "ld1 { v8.b }[6], [x14], #0x1\n"
- "ld1 { v7.b }[6], [x13], #0x1\n"
- "ld1 { v6.b }[6], [x12], #0x1\n"
- "ld1 { v4.b }[6], [x10], #0x1\n"
- "ld1 { v3.b }[6], [x9], #0x1\n"
- "ld1 { v2.b }[6], [x26], #0x1\n"
- "ld1 { v1.b }[6], [x25], #0x1\n"
- "b 11f\n"
- "8:" // Oddments: Load (A): Bit 3: Unset: Bit 2: Bit 1: Unset
- "tbz %x[n_channels], #0, 11f\n"
- "ld1 { v9.b }[4], [x15], #0x1\n"
- "ld1 { v8.b }[4], [x14], #0x1\n"
- "ld1 { v7.b }[4], [x13], #0x1\n"
- "ld1 { v6.b }[4], [x12], #0x1\n"
- "ld1 { v4.b }[4], [x10], #0x1\n"
- "ld1 { v3.b }[4], [x9], #0x1\n"
- "ld1 { v2.b }[4], [x26], #0x1\n"
- "ld1 { v1.b }[4], [x25], #0x1\n"
- "b 11f\n"
- "9:" // Oddments: Load (A): Bit 3: Unset: Bit 2: Unset
- "tbz %x[n_channels], #1, 10f\n"
- "ldr h9, [x15], #0x2\n"
- "ldr h8, [x14], #0x2\n"
- "ldr h7, [x13], #0x2\n"
- "ldr h6, [x12], #0x2\n"
- "ldr h4, [x10], #0x2\n"
- "ldr h3, [x9], #0x2\n"
- "ldr h2, [x26], #0x2\n"
- "ldr h1, [x25], #0x2\n"
- "tbz %x[n_channels], #0, 11f\n"
- "ld1 { v9.b }[2], [x15], #0x1\n"
- "ld1 { v8.b }[2], [x14], #0x1\n"
- "ld1 { v7.b }[2], [x13], #0x1\n"
- "ld1 { v6.b }[2], [x12], #0x1\n"
- "ld1 { v4.b }[2], [x10], #0x1\n"
- "ld1 { v3.b }[2], [x9], #0x1\n"
- "ld1 { v2.b }[2], [x26], #0x1\n"
- "ld1 { v1.b }[2], [x25], #0x1\n"
- "b 11f\n"
- "10:" // Oddments: Load (A): Bit 3: Unset: Bit 2: Unset: Bit 1: Unset
- "ldr b9, [x15], #0x1\n"
- "ldr b8, [x14], #0x1\n"
- "ldr b7, [x13], #0x1\n"
- "ldr b6, [x12], #0x1\n"
- "ldr b4, [x10], #0x1\n"
- "ldr b3, [x9], #0x1\n"
- "ldr b2, [x26], #0x1\n"
- "ldr b1, [x25], #0x1\n"
- "11:" // Oddments: Load (A): Bit 3: End
- "ldp x15, x14, [%x[inptrs], #0x40]\n"
- "ldp x13, x12, [%x[inptrs], #0x50]\n"
- "add x15, x15, x28\n"
- "add x14, x14, x28\n"
- "ldp x10, x9, [%x[inptrs], #0x60]\n"
- "ldp x26, x25, [%x[inptrs], #0x70]\n"
- "add x13, x13, x28\n"
- "add x12, x12, x28\n"
- "add x10, x10, x28\n"
- "add x9, x9, x28\n"
- "add x26, x26, x28\n"
- "add x25, x25, x28\n"
- "tbz %x[n_channels], #3, 15f\n"
- "ldr d29, [x15], #0x8\n"
- "ldr d28, [x14], #0x8\n"
- "ldr d27, [x13], #0x8\n"
- "ldr d26, [x12], #0x8\n"
- "ldr d24, [x10], #0x8\n"
- "ldr d23, [x9], #0x8\n"
- "ldr d22, [x26], #0x8\n"
- "ldr d21, [x25], #0x8\n"
- "tbz %x[n_channels], #2, 13f\n"
- "ld1 { v29.s }[2], [x15], #0x4\n"
- "ld1 { v28.s }[2], [x14], #0x4\n"
+ "str s20, [x22, x23]\n"
+ "smin v19.4s, v19.4s, v12.4s\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ "str s19, [x21, x23]\n"
+ "add x23, x23, #0x4\n"
+ "bgt 1b\n"
+ "tst %x[n_channels], #0xf\n"
+ "beq 34f\n"
+ "2:" // Oddments
+ "and x19, %x[n_channels], #0xf\n"
+ "add x13, x13, x23\n"
+ "add x12, x12, x23\n"
+ "add x11, x11, x23\n"
+ "add x10, x10, x23\n"
+ "add x9, x9, x23\n"
+ "add x28, x28, x23\n"
+ "add x27, x27, x23\n"
+ "add x26, x26, x23\n"
+ "tbz %x[n_channels], #3, 6f\n"
+ "ld1 { v27.d }[0], [x13], #0x8\n"
+ "ld1 { v1.d }[0], [x12], #0x8\n"
+ "ld1 { v25.d }[0], [x11], #0x8\n"
+ "ld1 { v23.d }[0], [x10], #0x8\n"
+ "ld1 { v31.d }[0], [x9], #0x8\n"
+ "ld1 { v28.d }[0], [x28], #0x8\n"
+ "ld1 { v21.d }[0], [x27], #0x8\n"
+ "ld1 { v26.d }[0], [x26], #0x8\n"
+ "tbz %x[n_channels], #2, 4f\n"
"ld1 { v27.s }[2], [x13], #0x4\n"
- "ld1 { v26.s }[2], [x12], #0x4\n"
- "ld1 { v24.s }[2], [x10], #0x4\n"
- "ld1 { v23.s }[2], [x9], #0x4\n"
- "ld1 { v22.s }[2], [x26], #0x4\n"
- "ld1 { v21.s }[2], [x25], #0x4\n"
- "tbz %x[n_channels], #1, 12f\n"
- "ld1 { v29.h }[6], [x15], #0x2\n"
- "ld1 { v28.h }[6], [x14], #0x2\n"
+ "ld1 { v1.s }[2], [x12], #0x4\n"
+ "ld1 { v25.s }[2], [x11], #0x4\n"
+ "ld1 { v23.s }[2], [x10], #0x4\n"
+ "ld1 { v31.s }[2], [x9], #0x4\n"
+ "ld1 { v28.s }[2], [x28], #0x4\n"
+ "ld1 { v21.s }[2], [x27], #0x4\n"
+ "ld1 { v26.s }[2], [x26], #0x4\n"
+ "tbz %x[n_channels], #1, 3f\n"
"ld1 { v27.h }[6], [x13], #0x2\n"
- "ld1 { v26.h }[6], [x12], #0x2\n"
- "ld1 { v24.h }[6], [x10], #0x2\n"
- "ld1 { v23.h }[6], [x9], #0x2\n"
- "ld1 { v22.h }[6], [x26], #0x2\n"
- "ld1 { v21.h }[6], [x25], #0x2\n"
- "tbz %x[n_channels], #0, 19f\n"
- "ld1 { v29.b }[14], [x15], #0x1\n"
- "ld1 { v28.b }[14], [x14], #0x1\n"
+ "ld1 { v1.h }[6], [x12], #0x2\n"
+ "ld1 { v25.h }[6], [x11], #0x2\n"
+ "ld1 { v23.h }[6], [x10], #0x2\n"
+ "ld1 { v31.h }[6], [x9], #0x2\n"
+ "ld1 { v28.h }[6], [x28], #0x2\n"
+ "ld1 { v21.h }[6], [x27], #0x2\n"
+ "ld1 { v26.h }[6], [x26], #0x2\n"
+ "tbz %x[n_channels], #0, 10f\n"
"ld1 { v27.b }[14], [x13], #0x1\n"
- "ld1 { v26.b }[14], [x12], #0x1\n"
- "ld1 { v24.b }[14], [x10], #0x1\n"
- "ld1 { v23.b }[14], [x9], #0x1\n"
- "ld1 { v22.b }[14], [x26], #0x1\n"
- "ld1 { v21.b }[14], [x25], #0x1\n"
- "b 19f\n"
- "12:" // Oddments: Load (B): Bit 3: Bit 2: Bit 1: Unset
- "tbz %x[n_channels], #0, 19f\n"
- "ld1 { v29.b }[12], [x15], #0x1\n"
- "ld1 { v28.b }[12], [x14], #0x1\n"
+ "ld1 { v1.b }[14], [x12], #0x1\n"
+ "ld1 { v25.b }[14], [x11], #0x1\n"
+ "ld1 { v23.b }[14], [x10], #0x1\n"
+ "ld1 { v31.b }[14], [x9], #0x1\n"
+ "ld1 { v28.b }[14], [x28], #0x1\n"
+ "ld1 { v21.b }[14], [x27], #0x1\n"
+ "ld1 { v26.b }[14], [x26], #0x1\n"
+ "b 10f\n"
+ "3:" // Oddments: Load (A): Bit 3: Bit 2: Bit 1: Unset
+ "tbz %x[n_channels], #0, 10f\n"
"ld1 { v27.b }[12], [x13], #0x1\n"
- "ld1 { v26.b }[12], [x12], #0x1\n"
- "ld1 { v24.b }[12], [x10], #0x1\n"
- "ld1 { v23.b }[12], [x9], #0x1\n"
- "ld1 { v22.b }[12], [x26], #0x1\n"
- "ld1 { v21.b }[12], [x25], #0x1\n"
- "b 19f\n"
- "13:" // Oddments: Load (B): Bit 3: Bit 2: Unset
- "tbz %x[n_channels], #1, 14f\n"
- "ld1 { v29.h }[4], [x15], #0x2\n"
- "ld1 { v28.h }[4], [x14], #0x2\n"
+ "ld1 { v1.b }[12], [x12], #0x1\n"
+ "ld1 { v25.b }[12], [x11], #0x1\n"
+ "ld1 { v23.b }[12], [x10], #0x1\n"
+ "ld1 { v31.b }[12], [x9], #0x1\n"
+ "ld1 { v28.b }[12], [x28], #0x1\n"
+ "ld1 { v21.b }[12], [x27], #0x1\n"
+ "ld1 { v26.b }[12], [x26], #0x1\n"
+ "b 10f\n"
+ "4:" // Oddments: Load (A): Bit 3: Bit 2: Unset
+ "tbz %x[n_channels], #1, 5f\n"
"ld1 { v27.h }[4], [x13], #0x2\n"
- "ld1 { v26.h }[4], [x12], #0x2\n"
- "ld1 { v24.h }[4], [x10], #0x2\n"
- "ld1 { v23.h }[4], [x9], #0x2\n"
- "ld1 { v22.h }[4], [x26], #0x2\n"
- "ld1 { v21.h }[4], [x25], #0x2\n"
- "tbz %x[n_channels], #0, 19f\n"
- "ld1 { v29.b }[10], [x15], #0x1\n"
- "ld1 { v28.b }[10], [x14], #0x1\n"
+ "ld1 { v1.h }[4], [x12], #0x2\n"
+ "ld1 { v25.h }[4], [x11], #0x2\n"
+ "ld1 { v23.h }[4], [x10], #0x2\n"
+ "ld1 { v31.h }[4], [x9], #0x2\n"
+ "ld1 { v28.h }[4], [x28], #0x2\n"
+ "ld1 { v21.h }[4], [x27], #0x2\n"
+ "ld1 { v26.h }[4], [x26], #0x2\n"
+ "tbz %x[n_channels], #0, 10f\n"
"ld1 { v27.b }[10], [x13], #0x1\n"
- "ld1 { v26.b }[10], [x12], #0x1\n"
- "ld1 { v24.b }[10], [x10], #0x1\n"
- "ld1 { v23.b }[10], [x9], #0x1\n"
- "ld1 { v22.b }[10], [x26], #0x1\n"
- "ld1 { v21.b }[10], [x25], #0x1\n"
- "b 19f\n"
- "14:" // Oddments: Load (B): Bit 3: Bit 2: Unset: Bit 1: Unset
- "tbz %x[n_channels], #0, 19f\n"
- "ld1 { v29.b }[8], [x15], #0x1\n"
- "ld1 { v28.b }[8], [x14], #0x1\n"
+ "ld1 { v1.b }[10], [x12], #0x1\n"
+ "ld1 { v25.b }[10], [x11], #0x1\n"
+ "ld1 { v23.b }[10], [x10], #0x1\n"
+ "ld1 { v31.b }[10], [x9], #0x1\n"
+ "ld1 { v28.b }[10], [x28], #0x1\n"
+ "ld1 { v21.b }[10], [x27], #0x1\n"
+ "ld1 { v26.b }[10], [x26], #0x1\n"
+ "b 10f\n"
+ "5:" // Oddments: Load (A): Bit 3: Bit 2: Unset: Bit 1: Unset
+ "tbz %x[n_channels], #0, 10f\n"
"ld1 { v27.b }[8], [x13], #0x1\n"
- "ld1 { v26.b }[8], [x12], #0x1\n"
- "ld1 { v24.b }[8], [x10], #0x1\n"
- "ld1 { v23.b }[8], [x9], #0x1\n"
- "ld1 { v22.b }[8], [x26], #0x1\n"
- "ld1 { v21.b }[8], [x25], #0x1\n"
- "b 19f\n"
- "15:" // Oddments: Load (B): Bit 3: Unset
- "tbz %x[n_channels], #2, 17f\n"
- "ldr s29, [x15], #0x4\n"
- "ldr s28, [x14], #0x4\n"
- "ldr s27, [x13], #0x4\n"
- "ldr s26, [x12], #0x4\n"
- "ldr s24, [x10], #0x4\n"
- "ldr s23, [x9], #0x4\n"
- "ldr s22, [x26], #0x4\n"
- "ldr s21, [x25], #0x4\n"
- "tbz %x[n_channels], #1, 16f\n"
- "ld1 { v29.h }[2], [x15], #0x2\n"
- "ld1 { v28.h }[2], [x14], #0x2\n"
+ "ld1 { v1.b }[8], [x12], #0x1\n"
+ "ld1 { v25.b }[8], [x11], #0x1\n"
+ "ld1 { v23.b }[8], [x10], #0x1\n"
+ "ld1 { v31.b }[8], [x9], #0x1\n"
+ "ld1 { v28.b }[8], [x28], #0x1\n"
+ "ld1 { v21.b }[8], [x27], #0x1\n"
+ "ld1 { v26.b }[8], [x26], #0x1\n"
+ "b 10f\n"
+ "6:" // Oddments: Load (A): Bit 3: Unset
+ "tbz %x[n_channels], #2, 8f\n"
+ "ld1 { v27.s }[0], [x13], #0x4\n"
+ "ld1 { v1.s }[0], [x12], #0x4\n"
+ "ld1 { v25.s }[0], [x11], #0x4\n"
+ "ld1 { v23.s }[0], [x10], #0x4\n"
+ "ld1 { v31.s }[0], [x9], #0x4\n"
+ "ld1 { v28.s }[0], [x28], #0x4\n"
+ "ld1 { v21.s }[0], [x27], #0x4\n"
+ "ld1 { v26.s }[0], [x26], #0x4\n"
+ "tbz %x[n_channels], #1, 7f\n"
"ld1 { v27.h }[2], [x13], #0x2\n"
- "ld1 { v26.h }[2], [x12], #0x2\n"
- "ld1 { v24.h }[2], [x10], #0x2\n"
- "ld1 { v23.h }[2], [x9], #0x2\n"
- "ld1 { v22.h }[2], [x26], #0x2\n"
- "ld1 { v21.h }[2], [x25], #0x2\n"
- "tbz %x[n_channels], #0, 19f\n"
- "ld1 { v29.b }[6], [x15], #0x1\n"
- "ld1 { v28.b }[6], [x14], #0x1\n"
+ "ld1 { v1.h }[2], [x12], #0x2\n"
+ "ld1 { v25.h }[2], [x11], #0x2\n"
+ "ld1 { v23.h }[2], [x10], #0x2\n"
+ "ld1 { v31.h }[2], [x9], #0x2\n"
+ "ld1 { v28.h }[2], [x28], #0x2\n"
+ "ld1 { v21.h }[2], [x27], #0x2\n"
+ "ld1 { v26.h }[2], [x26], #0x2\n"
+ "tbz %x[n_channels], #0, 10f\n"
"ld1 { v27.b }[6], [x13], #0x1\n"
- "ld1 { v26.b }[6], [x12], #0x1\n"
- "ld1 { v24.b }[6], [x10], #0x1\n"
- "ld1 { v23.b }[6], [x9], #0x1\n"
- "ld1 { v22.b }[6], [x26], #0x1\n"
- "ld1 { v21.b }[6], [x25], #0x1\n"
- "b 19f\n"
- "16:" // Oddments: Load (B): Bit 3: Unset: Bit 2: Bit 1: Unset
- "tbz %x[n_channels], #0, 19f\n"
- "ld1 { v29.b }[4], [x15], #0x1\n"
- "ld1 { v28.b }[4], [x14], #0x1\n"
+ "ld1 { v1.b }[6], [x12], #0x1\n"
+ "ld1 { v25.b }[6], [x11], #0x1\n"
+ "ld1 { v23.b }[6], [x10], #0x1\n"
+ "ld1 { v31.b }[6], [x9], #0x1\n"
+ "ld1 { v28.b }[6], [x28], #0x1\n"
+ "ld1 { v21.b }[6], [x27], #0x1\n"
+ "ld1 { v26.b }[6], [x26], #0x1\n"
+ "b 10f\n"
+ "7:" // Oddments: Load (A): Bit 3: Unset: Bit 2: Bit 1: Unset
+ "tbz %x[n_channels], #0, 10f\n"
"ld1 { v27.b }[4], [x13], #0x1\n"
- "ld1 { v26.b }[4], [x12], #0x1\n"
- "ld1 { v24.b }[4], [x10], #0x1\n"
- "ld1 { v23.b }[4], [x9], #0x1\n"
- "ld1 { v22.b }[4], [x26], #0x1\n"
- "ld1 { v21.b }[4], [x25], #0x1\n"
- "b 19f\n"
- "17:" // Oddments: Load (B): Bit 3: Unset: Bit 2: Unset
- "tbz %x[n_channels], #1, 18f\n"
- "ldr h29, [x15], #0x2\n"
- "ldr h28, [x14], #0x2\n"
- "ldr h27, [x13], #0x2\n"
- "ldr h26, [x12], #0x2\n"
- "ldr h24, [x10], #0x2\n"
- "ldr h23, [x9], #0x2\n"
- "ldr h22, [x26], #0x2\n"
- "ldr h21, [x25], #0x2\n"
- "tbz %x[n_channels], #0, 19f\n"
- "ld1 { v29.b }[2], [x15], #0x1\n"
- "ld1 { v28.b }[2], [x14], #0x1\n"
+ "ld1 { v1.b }[4], [x12], #0x1\n"
+ "ld1 { v25.b }[4], [x11], #0x1\n"
+ "ld1 { v23.b }[4], [x10], #0x1\n"
+ "ld1 { v31.b }[4], [x9], #0x1\n"
+ "ld1 { v28.b }[4], [x28], #0x1\n"
+ "ld1 { v21.b }[4], [x27], #0x1\n"
+ "ld1 { v26.b }[4], [x26], #0x1\n"
+ "b 10f\n"
+ "8:" // Oddments: Load (A): Bit 3: Unset: Bit 2: Unset
+ "tbz %x[n_channels], #1, 9f\n"
+ "ld1 { v27.h }[0], [x13], #0x2\n"
+ "ld1 { v1.h }[0], [x12], #0x2\n"
+ "ld1 { v25.h }[0], [x11], #0x2\n"
+ "ld1 { v23.h }[0], [x10], #0x2\n"
+ "ld1 { v31.h }[0], [x9], #0x2\n"
+ "ld1 { v28.h }[0], [x28], #0x2\n"
+ "ld1 { v21.h }[0], [x27], #0x2\n"
+ "ld1 { v26.h }[0], [x26], #0x2\n"
+ "tbz %x[n_channels], #0, 10f\n"
"ld1 { v27.b }[2], [x13], #0x1\n"
- "ld1 { v26.b }[2], [x12], #0x1\n"
- "ld1 { v24.b }[2], [x10], #0x1\n"
- "ld1 { v23.b }[2], [x9], #0x1\n"
- "ld1 { v22.b }[2], [x26], #0x1\n"
- "ld1 { v21.b }[2], [x25], #0x1\n"
- "b 19f\n"
- "18:" // Oddments: Load (B): Bit 3: Unset: Bit 2: Unset: Bit 1: Unset
- "ldr b29, [x15], #0x1\n"
- "ldr b28, [x14], #0x1\n"
- "ldr b27, [x13], #0x1\n"
- "ldr b26, [x12], #0x1\n"
- "ldr b24, [x10], #0x1\n"
- "ldr b23, [x9], #0x1\n"
- "ldr b22, [x26], #0x1\n"
- "ldr b21, [x25], #0x1\n"
- "19:" // Oddments: Load (B): Bit 3: End
- "ldr q0, [%x[params], #0x10]\n"
- "ldr q16, [%x[params], #0x20]\n"
- "zip2 v30.16b, v4.16b, v2.16b\n"
- "zip1 v4.16b, v4.16b, v2.16b\n"
- "ldr q31, [%x[params], #0x30]\n"
- "zip1 v2.16b, v3.16b, v1.16b\n"
- "zip2 v5.16b, v9.16b, v7.16b\n"
- "cmp x20, #0x4\n"
- "zip1 v9.16b, v9.16b, v7.16b\n"
- "zip1 v7.16b, v8.16b, v6.16b\n"
- "zip2 v6.16b, v8.16b, v6.16b\n"
- "zip2 v1.16b, v3.16b, v1.16b\n"
- "zip2 v3.16b, v4.16b, v2.16b\n"
- "zip1 v4.16b, v4.16b, v2.16b\n"
- "zip2 v25.16b, v29.16b, v27.16b\n"
- "zip1 v29.16b, v29.16b, v27.16b\n"
- "zip1 v27.16b, v28.16b, v26.16b\n"
- "movi v19.4s, #0x0\n"
- ".inst 0x6e8495d3 // udot v19.4s, v14.16b, v4.16b\n"
- "zip2 v8.16b, v9.16b, v7.16b\n"
- "zip1 v9.16b, v9.16b, v7.16b\n"
- "zip1 v7.16b, v5.16b, v6.16b\n"
- "zip2 v6.16b, v5.16b, v6.16b\n"
- "ldr q5, [%x[params], #0x0]\n"
- "zip2 v26.16b, v28.16b, v26.16b\n"
- "zip2 v20.16b, v24.16b, v22.16b\n"
- "zip1 v24.16b, v24.16b, v22.16b\n"
- "zip1 v22.16b, v23.16b, v21.16b\n"
- "zip2 v21.16b, v23.16b, v21.16b\n"
- "zip2 v28.16b, v29.16b, v27.16b\n"
- "zip1 v29.16b, v29.16b, v27.16b\n"
- "zip1 v2.16b, v30.16b, v1.16b\n"
- ".inst 0x6e9d95d3 // udot v19.4s, v14.16b, v29.16b\n"
- "zip2 v1.16b, v30.16b, v1.16b\n"
- "zip1 v27.16b, v25.16b, v26.16b\n"
- "zip2 v26.16b, v25.16b, v26.16b\n"
- "zip2 v23.16b, v24.16b, v22.16b\n"
- "zip1 v24.16b, v24.16b, v22.16b\n"
- "zip1 v22.16b, v20.16b, v21.16b\n"
- "mov v18.16b, v19.16b\n .inst 0x6e9895d2 // udot v18.4s, v14.16b, v24.16b\n"
- "zip2 v21.16b, v20.16b, v21.16b\n"
- "mov v30.16b, v5.16b\n"
- ".inst 0x6e8995d3 // udot v19.4s, v14.16b, v9.16b\n"
- "mov v25.16b, v5.16b\n"
- "mov v20.16b, v5.16b\n"
- ".inst 0x6e899405 // udot v5.4s, v0.16b, v9.16b\n"
- ".inst 0x6e849419 // udot v25.4s, v0.16b, v4.16b\n"
- ".inst 0x6e849605 // udot v5.4s, v16.16b, v4.16b\n"
- "ext v4.16b, v4.16b, v4.16b, #0x1\n"
- "ext v9.16b, v9.16b, v9.16b, #0x1\n"
- ".inst 0x6e9d9619 // udot v25.4s, v16.16b, v29.16b\n"
- ".inst 0x6e9d97e5 // udot v5.4s, v31.16b, v29.16b\n"
- "ext v29.16b, v29.16b, v29.16b, #0x1\n"
- ".inst 0x6e89941e // udot v30.4s, v0.16b, v9.16b\n"
- ".inst 0x6e849414 // udot v20.4s, v0.16b, v4.16b\n"
- "movi v17.4s, #0x0\n"
- ".inst 0x6e8495d1 // udot v17.4s, v14.16b, v4.16b\n"
- ".inst 0x6e9d95d1 // udot v17.4s, v14.16b, v29.16b\n"
- ".inst 0x6e9897f9 // udot v25.4s, v31.16b, v24.16b\n"
- "ext v24.16b, v24.16b, v24.16b, #0x1\n"
- ".inst 0x6e84961e // udot v30.4s, v16.16b, v4.16b\n"
- "ldr q4, [%x[params], #0x50]\n"
- ".inst 0x6e9d9614 // udot v20.4s, v16.16b, v29.16b\n"
- "mov v16.16b, v17.16b\n .inst 0x6e9895d0 // udot v16.4s, v14.16b, v24.16b\n"
- "mls v5.4s, v19.4s, v11.4s\n"
- ".inst 0x6e8995d1 // udot v17.4s, v14.16b, v9.16b\n"
- "ldr q9, [%x[params], #0x40]\n"
- ".inst 0x6e9d97fe // udot v30.4s, v31.16b, v29.16b\n"
- "sqrdmulh v5.4s, v5.4s, v9.4s\n"
- ".inst 0x6e9897f4 // udot v20.4s, v31.16b, v24.16b\n"
- "mls v30.4s, v17.4s, v11.4s\n"
+ "ld1 { v1.b }[2], [x12], #0x1\n"
+ "ld1 { v25.b }[2], [x11], #0x1\n"
+ "ld1 { v23.b }[2], [x10], #0x1\n"
+ "ld1 { v31.b }[2], [x9], #0x1\n"
+ "ld1 { v28.b }[2], [x28], #0x1\n"
+ "ld1 { v21.b }[2], [x27], #0x1\n"
+ "ld1 { v26.b }[2], [x26], #0x1\n"
+ "b 10f\n"
+ "9:" // Oddments: Load (A): Bit 3: Unset: Bit 2: Unset: Bit 1: Unset
+ "tbz %x[n_channels], #0, 10f\n"
+ "ld1 { v27.b }[0], [x13], #0x1\n"
+ "ld1 { v1.b }[0], [x12], #0x1\n"
+ "ld1 { v25.b }[0], [x11], #0x1\n"
+ "ld1 { v23.b }[0], [x10], #0x1\n"
+ "ld1 { v31.b }[0], [x9], #0x1\n"
+ "ld1 { v28.b }[0], [x28], #0x1\n"
+ "ld1 { v21.b }[0], [x27], #0x1\n"
+ "ld1 { v26.b }[0], [x26], #0x1\n"
+ "10:" // Oddments: Load (A): Bit 3: End
+ "ldp x13, x12, [%x[inptrs], #0x40]\n"
+ "add x13, x13, x23\n"
+ "ldp x11, x10, [%x[inptrs], #0x50]\n"
+ "ldp x9, x28, [%x[inptrs], #0x60]\n"
+ "add x12, x12, x23\n"
+ "ldp x27, x26, [%x[inptrs], #0x70]\n"
+ "add x11, x11, x23\n"
+ "add x10, x10, x23\n"
+ "add x9, x9, x23\n"
+ "add x28, x28, x23\n"
+ "add x27, x27, x23\n"
+ "add x26, x26, x23\n"
+ "tbz %x[n_channels], #3, 14f\n"
+ "ld1 { v24.d }[0], [x13], #0x8\n"
+ "ld1 { v22.d }[0], [x12], #0x8\n"
+ "ld1 { v20.d }[0], [x11], #0x8\n"
+ "ld1 { v16.d }[0], [x10], #0x8\n"
+ "ld1 { v19.d }[0], [x9], #0x8\n"
+ "ld1 { v0.d }[0], [x28], #0x8\n"
+ "ld1 { v18.d }[0], [x27], #0x8\n"
+ "ld1 { v17.d }[0], [x26], #0x8\n"
+ "tbz %x[n_channels], #2, 12f\n"
+ "ld1 { v24.s }[2], [x13], #0x4\n"
+ "ld1 { v22.s }[2], [x12], #0x4\n"
+ "ld1 { v20.s }[2], [x11], #0x4\n"
+ "ld1 { v16.s }[2], [x10], #0x4\n"
+ "ld1 { v19.s }[2], [x9], #0x4\n"
+ "ld1 { v0.s }[2], [x28], #0x4\n"
+ "ld1 { v18.s }[2], [x27], #0x4\n"
+ "ld1 { v17.s }[2], [x26], #0x4\n"
+ "tbz %x[n_channels], #1, 11f\n"
+ "ld1 { v24.h }[6], [x13], #0x2\n"
+ "ld1 { v22.h }[6], [x12], #0x2\n"
+ "ld1 { v20.h }[6], [x11], #0x2\n"
+ "ld1 { v16.h }[6], [x10], #0x2\n"
+ "ld1 { v19.h }[6], [x9], #0x2\n"
+ "ld1 { v0.h }[6], [x28], #0x2\n"
+ "ld1 { v18.h }[6], [x27], #0x2\n"
+ "ld1 { v17.h }[6], [x26], #0x2\n"
+ "tbz %x[n_channels], #0, 18f\n"
+ "ld1 { v24.b }[14], [x13], #0x1\n"
+ "ld1 { v22.b }[14], [x12], #0x1\n"
+ "ld1 { v20.b }[14], [x11], #0x1\n"
+ "ld1 { v16.b }[14], [x10], #0x1\n"
+ "ld1 { v19.b }[14], [x9], #0x1\n"
+ "ld1 { v0.b }[14], [x28], #0x1\n"
+ "ld1 { v18.b }[14], [x27], #0x1\n"
+ "ld1 { v17.b }[14], [x26], #0x1\n"
+ "b 18f\n"
+ "11:" // Oddments: Load (B): Bit 3: Bit 2: Bit 1: Unset
+ "tbz %x[n_channels], #0, 18f\n"
+ "ld1 { v24.b }[12], [x13], #0x1\n"
+ "ld1 { v22.b }[12], [x12], #0x1\n"
+ "ld1 { v20.b }[12], [x11], #0x1\n"
+ "ld1 { v16.b }[12], [x10], #0x1\n"
+ "ld1 { v19.b }[12], [x9], #0x1\n"
+ "ld1 { v0.b }[12], [x28], #0x1\n"
+ "ld1 { v18.b }[12], [x27], #0x1\n"
+ "ld1 { v17.b }[12], [x26], #0x1\n"
+ "b 18f\n"
+ "12:" // Oddments: Load (B): Bit 3: Bit 2: Unset
+ "tbz %x[n_channels], #1, 13f\n"
+ "ld1 { v24.h }[4], [x13], #0x2\n"
+ "ld1 { v22.h }[4], [x12], #0x2\n"
+ "ld1 { v20.h }[4], [x11], #0x2\n"
+ "ld1 { v16.h }[4], [x10], #0x2\n"
+ "ld1 { v19.h }[4], [x9], #0x2\n"
+ "ld1 { v0.h }[4], [x28], #0x2\n"
+ "ld1 { v18.h }[4], [x27], #0x2\n"
+ "ld1 { v17.h }[4], [x26], #0x2\n"
+ "tbz %x[n_channels], #0, 18f\n"
+ "ld1 { v24.b }[10], [x13], #0x1\n"
+ "ld1 { v22.b }[10], [x12], #0x1\n"
+ "ld1 { v20.b }[10], [x11], #0x1\n"
+ "ld1 { v16.b }[10], [x10], #0x1\n"
+ "ld1 { v19.b }[10], [x9], #0x1\n"
+ "ld1 { v0.b }[10], [x28], #0x1\n"
+ "ld1 { v18.b }[10], [x27], #0x1\n"
+ "ld1 { v17.b }[10], [x26], #0x1\n"
+ "b 18f\n"
+ "13:" // Oddments: Load (B): Bit 3: Bit 2: Unset: Bit 1: Unset
+ "tbz %x[n_channels], #0, 18f\n"
+ "ld1 { v24.b }[8], [x13], #0x1\n"
+ "ld1 { v22.b }[8], [x12], #0x1\n"
+ "ld1 { v20.b }[8], [x11], #0x1\n"
+ "ld1 { v16.b }[8], [x10], #0x1\n"
+ "ld1 { v19.b }[8], [x9], #0x1\n"
+ "ld1 { v0.b }[8], [x28], #0x1\n"
+ "ld1 { v18.b }[8], [x27], #0x1\n"
+ "ld1 { v17.b }[8], [x26], #0x1\n"
+ "b 18f\n"
+ "14:" // Oddments: Load (B): Bit 3: Unset
+ "tbz %x[n_channels], #2, 16f\n"
+ "ld1 { v24.s }[0], [x13], #0x4\n"
+ "ld1 { v22.s }[0], [x12], #0x4\n"
+ "ld1 { v20.s }[0], [x11], #0x4\n"
+ "ld1 { v16.s }[0], [x10], #0x4\n"
+ "ld1 { v19.s }[0], [x9], #0x4\n"
+ "ld1 { v0.s }[0], [x28], #0x4\n"
+ "ld1 { v18.s }[0], [x27], #0x4\n"
+ "ld1 { v17.s }[0], [x26], #0x4\n"
+ "tbz %x[n_channels], #1, 15f\n"
+ "ld1 { v24.h }[2], [x13], #0x2\n"
+ "ld1 { v22.h }[2], [x12], #0x2\n"
+ "ld1 { v20.h }[2], [x11], #0x2\n"
+ "ld1 { v16.h }[2], [x10], #0x2\n"
+ "ld1 { v19.h }[2], [x9], #0x2\n"
+ "ld1 { v0.h }[2], [x28], #0x2\n"
+ "ld1 { v18.h }[2], [x27], #0x2\n"
+ "ld1 { v17.h }[2], [x26], #0x2\n"
+ "tbz %x[n_channels], #0, 18f\n"
+ "ld1 { v24.b }[6], [x13], #0x1\n"
+ "ld1 { v22.b }[6], [x12], #0x1\n"
+ "ld1 { v20.b }[6], [x11], #0x1\n"
+ "ld1 { v16.b }[6], [x10], #0x1\n"
+ "ld1 { v19.b }[6], [x9], #0x1\n"
+ "ld1 { v0.b }[6], [x28], #0x1\n"
+ "ld1 { v18.b }[6], [x27], #0x1\n"
+ "ld1 { v17.b }[6], [x26], #0x1\n"
+ "b 18f\n"
+ "15:" // Oddments: Load (B): Bit 3: Unset: Bit 2: Bit 1: Unset
+ "tbz %x[n_channels], #0, 18f\n"
+ "ld1 { v24.b }[4], [x13], #0x1\n"
+ "ld1 { v22.b }[4], [x12], #0x1\n"
+ "ld1 { v20.b }[4], [x11], #0x1\n"
+ "ld1 { v16.b }[4], [x10], #0x1\n"
+ "ld1 { v19.b }[4], [x9], #0x1\n"
+ "ld1 { v0.b }[4], [x28], #0x1\n"
+ "ld1 { v18.b }[4], [x27], #0x1\n"
+ "ld1 { v17.b }[4], [x26], #0x1\n"
+ "b 18f\n"
+ "16:" // Oddments: Load (B): Bit 3: Unset: Bit 2: Unset
+ "tbz %x[n_channels], #1, 17f\n"
+ "ld1 { v24.h }[0], [x13], #0x2\n"
+ "ld1 { v22.h }[0], [x12], #0x2\n"
+ "ld1 { v20.h }[0], [x11], #0x2\n"
+ "ld1 { v16.h }[0], [x10], #0x2\n"
+ "ld1 { v19.h }[0], [x9], #0x2\n"
+ "ld1 { v0.h }[0], [x28], #0x2\n"
+ "ld1 { v18.h }[0], [x27], #0x2\n"
+ "ld1 { v17.h }[0], [x26], #0x2\n"
+ "tbz %x[n_channels], #0, 18f\n"
+ "ld1 { v24.b }[2], [x13], #0x1\n"
+ "ld1 { v22.b }[2], [x12], #0x1\n"
+ "ld1 { v20.b }[2], [x11], #0x1\n"
+ "ld1 { v16.b }[2], [x10], #0x1\n"
+ "ld1 { v19.b }[2], [x9], #0x1\n"
+ "ld1 { v0.b }[2], [x28], #0x1\n"
+ "ld1 { v18.b }[2], [x27], #0x1\n"
+ "ld1 { v17.b }[2], [x26], #0x1\n"
+ "b 18f\n"
+ "17:" // Oddments: Load (B): Bit 3: Unset: Bit 2: Unset: Bit 1: Unset
+ "tbz %x[n_channels], #0, 18f\n"
+ "ld1 { v24.b }[0], [x13], #0x1\n"
+ "ld1 { v22.b }[0], [x12], #0x1\n"
+ "ld1 { v20.b }[0], [x11], #0x1\n"
+ "ld1 { v16.b }[0], [x10], #0x1\n"
+ "ld1 { v19.b }[0], [x9], #0x1\n"
+ "ld1 { v0.b }[0], [x28], #0x1\n"
+ "ld1 { v18.b }[0], [x27], #0x1\n"
+ "ld1 { v17.b }[0], [x26], #0x1\n"
+ "18:" // Oddments: Load (B): Bit 3: End
+ "zip1 v7.16b, v27.16b, v25.16b\n"
+ "ldr q30, [%x[params], #0x0]\n"
+ "cmp x19, #0x4\n"
+ "zip2 v5.16b, v27.16b, v25.16b\n"
+ "ldr q29, [%x[params], #0x10]\n"
+ "zip1 v8.16b, v1.16b, v23.16b\n"
+ "ldr q27, [%x[params], #0x20]\n"
+ "zip2 v3.16b, v1.16b, v23.16b\n"
+ "ldr q25, [%x[params], #0x30]\n"
+ "zip1 v2.16b, v31.16b, v21.16b\n"
+ "ldr q23, [%x[params], #0x40]\n"
+ "zip2 v4.16b, v31.16b, v21.16b\n"
+ "ldr q21, [%x[params], #0x50]\n"
"add %x[params], %x[params], #0x60\n"
- "mls v25.4s, v18.4s, v11.4s\n"
- "mls v20.4s, v16.4s, v11.4s\n"
- "and v0.16b, v5.16b, v4.16b\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "sqrdmulh v30.4s, v30.4s, v9.4s\n"
- "sqrdmulh v25.4s, v25.4s, v9.4s\n"
- "sqrdmulh v20.4s, v20.4s, v9.4s\n"
- "sqadd v5.4s, v5.4s, v0.4s\n"
- "and v16.16b, v30.16b, v4.16b\n"
- "and v31.16b, v25.16b, v4.16b\n"
- "and v0.16b, v20.16b, v4.16b\n"
+ "zip1 v1.16b, v28.16b, v26.16b\n"
+ "zip2 v31.16b, v28.16b, v26.16b\n"
+ "zip1 v28.16b, v24.16b, v20.16b\n"
+ "zip2 v26.16b, v24.16b, v20.16b\n"
+ "zip1 v24.16b, v22.16b, v16.16b\n"
+ "zip2 v22.16b, v22.16b, v16.16b\n"
+ "zip1 v20.16b, v19.16b, v18.16b\n"
+ "zip2 v19.16b, v19.16b, v18.16b\n"
+ "zip1 v18.16b, v0.16b, v17.16b\n"
+ "zip2 v17.16b, v0.16b, v17.16b\n"
+ "zip1 v6.16b, v7.16b, v8.16b\n"
+ "zip2 v8.16b, v7.16b, v8.16b\n"
+ "zip1 v7.16b, v5.16b, v3.16b\n"
+ "str q7, [SP, #0x0]\n"
+ "zip2 v5.16b, v5.16b, v3.16b\n"
+ "str q5, [SP, #0x10]\n"
+ "zip1 v3.16b, v2.16b, v1.16b\n"
+ "zip2 v2.16b, v2.16b, v1.16b\n"
+ "zip1 v1.16b, v4.16b, v31.16b\n"
+ "str q1, [SP, #0x20]\n"
+ "zip2 v16.16b, v4.16b, v31.16b\n"
+ "str q16, [SP, #0x30]\n"
+ "zip1 v31.16b, v28.16b, v24.16b\n"
+ "zip2 v28.16b, v28.16b, v24.16b\n"
+ "zip1 v16.16b, v26.16b, v22.16b\n"
+ "str q16, [SP, #0x40]\n"
+ "zip2 v16.16b, v26.16b, v22.16b\n"
+ "str q16, [SP, #0x50]\n"
+ "zip1 v26.16b, v20.16b, v18.16b\n"
+ "zip2 v24.16b, v20.16b, v18.16b\n"
+ "zip1 v16.16b, v19.16b, v17.16b\n"
+ "str q16, [SP, #0x60]\n"
+ "zip2 v16.16b, v19.16b, v17.16b\n"
+ "str q16, [SP, #0x70]\n"
+ "mov v22.16b, v30.16b\n"
+ "mov v20.16b, v30.16b\n"
+ "mov v19.16b, v30.16b\n"
+ ".inst 0x6e8697be // udot v30.4s, v29.16b, v6.16b\n"
+ ".inst 0x6e8397b4 // udot v20.4s, v29.16b, v3.16b\n"
+ "movi v15.4s, #0x0\n"
+ ".inst 0x6e83956f // udot v15.4s, v11.16b, v3.16b\n"
+ ".inst 0x6e83977e // udot v30.4s, v27.16b, v3.16b\n"
+ ".inst 0x6e9f9774 // udot v20.4s, v27.16b, v31.16b\n"
+ "ext v3.16b, v3.16b, v3.16b, #0x1\n"
+ ".inst 0x6e9f956f // udot v15.4s, v11.16b, v31.16b\n"
+ ".inst 0x6e9f973e // udot v30.4s, v25.16b, v31.16b\n"
+ ".inst 0x6e9a9734 // udot v20.4s, v25.16b, v26.16b\n"
+ "ext v31.16b, v31.16b, v31.16b, #0x1\n"
+ "mov v17.16b, v15.16b\n"
+ ".inst 0x6e86956f // udot v15.4s, v11.16b, v6.16b\n"
+ "mls v30.4s, v15.4s, v14.4s\n"
+ ".inst 0x6e9a9571 // udot v17.4s, v11.16b, v26.16b\n"
+ "ext v6.16b, v6.16b, v6.16b, #0x1\n"
+ "mls v20.4s, v17.4s, v14.4s\n"
+ "ext v26.16b, v26.16b, v26.16b, #0x1\n"
+ ".inst 0x6e8697b6 // udot v22.4s, v29.16b, v6.16b\n"
+ ".inst 0x6e8397b3 // udot v19.4s, v29.16b, v3.16b\n"
+ "movi v10.4s, #0x0\n"
+ ".inst 0x6e83956a // udot v10.4s, v11.16b, v3.16b\n"
+ ".inst 0x6e839776 // udot v22.4s, v27.16b, v3.16b\n"
+ ".inst 0x6e9f9773 // udot v19.4s, v27.16b, v31.16b\n"
+ "sqrdmulh v30.4s, v30.4s, v23.4s\n"
+ ".inst 0x6e9f956a // udot v10.4s, v11.16b, v31.16b\n"
+ ".inst 0x6e9f9736 // udot v22.4s, v25.16b, v31.16b\n"
+ ".inst 0x6e9a9733 // udot v19.4s, v25.16b, v26.16b\n"
+ "and v18.16b, v30.16b, v21.16b\n"
+ "sshr v18.4s, v18.4s, #0x1f\n"
+ "mov v17.16b, v10.16b\n"
+ ".inst 0x6e86956a // udot v10.4s, v11.16b, v6.16b\n"
+ "mls v22.4s, v10.4s, v14.4s\n"
+ ".inst 0x6e9a9571 // udot v17.4s, v11.16b, v26.16b\n"
+ "sqrdmulh v20.4s, v20.4s, v23.4s\n"
+ "mls v19.4s, v17.4s, v14.4s\n"
+ "sqadd v30.4s, v30.4s, v18.4s\n"
+ "and v16.16b, v20.16b, v21.16b\n"
"sshr v16.4s, v16.4s, #0x1f\n"
- "sshr v31.4s, v31.4s, #0x1f\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "sqadd v30.4s, v30.4s, v16.4s\n"
- "sqadd v25.4s, v25.4s, v31.4s\n"
- "sqadd v20.4s, v20.4s, v0.4s\n"
- "srshl v5.4s, v5.4s, v4.4s\n"
- "srshl v30.4s, v30.4s, v4.4s\n"
- "srshl v25.4s, v25.4s, v4.4s\n"
- "srshl v20.4s, v20.4s, v4.4s\n"
- "add v5.4s, v5.4s, v10.4s\n"
- "add v30.4s, v30.4s, v10.4s\n"
- "add v25.4s, v25.4s, v10.4s\n"
- "add v20.4s, v20.4s, v10.4s\n"
- "smax v5.4s, v5.4s, v13.4s\n"
- "smax v30.4s, v30.4s, v13.4s\n"
- "smax v25.4s, v25.4s, v13.4s\n"
- "smax v20.4s, v20.4s, v13.4s\n"
- "smin v5.4s, v5.4s, v12.4s\n"
+ "sqrdmulh v22.4s, v22.4s, v23.4s\n"
+ "srshl v30.4s, v30.4s, v21.4s\n"
+ "sqrdmulh v19.4s, v19.4s, v23.4s\n"
+ "and v17.16b, v22.16b, v21.16b\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "add v30.4s, v30.4s, v13.4s\n"
+ "sqadd v20.4s, v20.4s, v16.4s\n"
+ "and v16.16b, v19.16b, v21.16b\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "smax v30.4s, v30.4s, v9.4s\n"
+ "srshl v20.4s, v20.4s, v21.4s\n"
+ "sqadd v22.4s, v22.4s, v17.4s\n"
"smin v30.4s, v30.4s, v12.4s\n"
- "smin v25.4s, v25.4s, v12.4s\n"
- "smin v20.4s, v20.4s, v12.4s\n"
- "uzp1 v5.16b, v5.16b, v5.16b\n"
+ "add v20.4s, v20.4s, v13.4s\n"
+ "srshl v22.4s, v22.4s, v21.4s\n"
+ "sqadd v19.4s, v19.4s, v16.4s\n"
"uzp1 v30.16b, v30.16b, v30.16b\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
- "uzp1 v20.16b, v20.16b, v20.16b\n"
- "uzp1 v5.16b, v5.16b, v5.16b\n"
+ "smax v20.4s, v20.4s, v9.4s\n"
"uzp1 v30.16b, v30.16b, v30.16b\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
+ "add v22.4s, v22.4s, v13.4s\n"
+ "smin v20.4s, v20.4s, v12.4s\n"
+ "srshl v19.4s, v19.4s, v21.4s\n"
+ "smax v22.4s, v22.4s, v9.4s\n"
+ "uzp1 v20.16b, v20.16b, v20.16b\n"
+ "smin v22.4s, v22.4s, v12.4s\n"
+ "add v19.4s, v19.4s, v13.4s\n"
"uzp1 v20.16b, v20.16b, v20.16b\n"
- "blt 20f\n"
- "str s5, [x24, x27]\n"
- "str s30, [x23, x27]\n"
- "str s25, [x22, x27]\n"
- "str s20, [x21, x27]\n"
- "b 23f\n"
- "20:" // Oddments: Unroll 0: Oddment store
- "add x24, x24, x27\n"
- "add x23, x23, x27\n"
- "add x22, x22, x27\n"
- "add x21, x21, x27\n"
- "tbz x20, #1, 21f\n"
- "st1 { v5.h }[0], [x24], #0x2\n"
- "st1 { v30.h }[0], [x23], #0x2\n"
- "st1 { v25.h }[0], [x22], #0x2\n"
- "st1 { v20.h }[0], [x21], #0x2\n"
- "tbz x20, #0, 22f\n"
- "st1 { v5.b }[2], [x24], #0x1\n"
- "st1 { v30.b }[2], [x23], #0x1\n"
- "st1 { v25.b }[2], [x22], #0x1\n"
- "st1 { v20.b }[2], [x21], #0x1\n"
+ "smax v19.4s, v19.4s, v9.4s\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "smin v19.4s, v19.4s, v12.4s\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ "blt 19f\n"
+ "str s30, [x25, x23]\n"
+ "str s22, [x24, x23]\n"
+ "str s20, [x22, x23]\n"
+ "str s19, [x21, x23]\n"
"b 22f\n"
- "21:" // Oddments: Unroll 0: Oddment store: Bit 1: Unset
- "st1 { v5.b }[0], [x24], #0x1\n"
- "st1 { v30.b }[0], [x23], #0x1\n"
- "st1 { v25.b }[0], [x22], #0x1\n"
- "st1 { v20.b }[0], [x21], #0x1\n"
- "22:" // Oddments: Unroll 0: Oddment store: Bit 1: End
- "23:" // Oddments: Unroll 0: After oddment store
- "subs x20, x20, #0x4\n"
- "add x27, x27, #0x4\n"
- "ble 35f\n"
- "ldr q5, [%x[params], #0x0]\n"
- "ldr q0, [%x[params], #0x10]\n"
- "movi v19.4s, #0x0\n"
- ".inst 0x6e8395d3 // udot v19.4s, v14.16b, v3.16b\n"
- "ldr q16, [%x[params], #0x20]\n"
- "ldr q31, [%x[params], #0x30]\n"
- "mov v30.16b, v5.16b\n"
- "mov v25.16b, v5.16b\n"
- "ldr q9, [%x[params], #0x40]\n"
- "ldr q4, [%x[params], #0x50]\n"
- "mov v20.16b, v5.16b\n"
- ".inst 0x6e889405 // udot v5.4s, v0.16b, v8.16b\n"
- ".inst 0x6e9c95d3 // udot v19.4s, v14.16b, v28.16b\n"
- ".inst 0x6e839419 // udot v25.4s, v0.16b, v3.16b\n"
- "movi v17.4s, #0x0\n"
- "cmp x20, #0x4\n"
- ".inst 0x6e839605 // udot v5.4s, v16.16b, v3.16b\n"
- "mov v18.16b, v19.16b\n .inst 0x6e9795d2 // udot v18.4s, v14.16b, v23.16b\n"
- "ext v3.16b, v3.16b, v3.16b, #0x1\n"
+ "19:" // Oddments: Unroll 0: Oddment store
+ "add x25, x25, x23\n"
+ "add x24, x24, x23\n"
+ "add x22, x22, x23\n"
+ "add x21, x21, x23\n"
+ "tbz x19, #1, 20f\n"
+ "st1 { v30.h }[0], [x25], #0x2\n"
+ "st1 { v22.h }[0], [x24], #0x2\n"
+ "st1 { v20.h }[0], [x22], #0x2\n"
+ "st1 { v19.h }[0], [x21], #0x2\n"
+ "tbz x19, #0, 21f\n"
+ "st1 { v30.b }[2], [x25], #0x1\n"
+ "st1 { v22.b }[2], [x24], #0x1\n"
+ "st1 { v20.b }[2], [x22], #0x1\n"
+ "st1 { v19.b }[2], [x21], #0x1\n"
+ "b 21f\n"
+ "20:" // Oddments: Unroll 0: Oddment store: Bit 1: Unset
+ "tbz x19, #0, 21f\n"
+ "st1 { v30.b }[0], [x25], #0x1\n"
+ "st1 { v22.b }[0], [x24], #0x1\n"
+ "st1 { v20.b }[0], [x22], #0x1\n"
+ "st1 { v19.b }[0], [x21], #0x1\n"
+ "21:" // Oddments: Unroll 0: Oddment store: Bit 1: End
+
+ "22:" // Oddments: Unroll 0: After oddment store
+ "add x23, x23, #0x4\n"
+ "subs x19, x19, #0x4\n"
+ "ble 34f\n"
+ "movi v15.4s, #0x0\n"
+ "ldr q30, [%x[params], #0x0]\n"
+ ".inst 0x6e82956f // udot v15.4s, v11.16b, v2.16b\n"
+ "ldr q29, [%x[params], #0x10]\n"
+ "cmp x19, #0x4\n"
+ "movi v10.4s, #0x0\n"
+ "ldr q27, [%x[params], #0x20]\n"
+ "ldr q25, [%x[params], #0x30]\n"
+ "mov v22.16b, v30.16b\n"
+ "ldr q23, [%x[params], #0x40]\n"
+ "mov v20.16b, v30.16b\n"
+ "ldr q21, [%x[params], #0x50]\n"
"add %x[params], %x[params], #0x60\n"
- ".inst 0x6e8895d3 // udot v19.4s, v14.16b, v8.16b\n"
+ "mov v19.16b, v30.16b\n"
+ ".inst 0x6e8897be // udot v30.4s, v29.16b, v8.16b\n"
+ ".inst 0x6e8297b4 // udot v20.4s, v29.16b, v2.16b\n"
+ ".inst 0x6e9c956f // udot v15.4s, v11.16b, v28.16b\n"
+ ".inst 0x6e82977e // udot v30.4s, v27.16b, v2.16b\n"
+ "ext v2.16b, v2.16b, v2.16b, #0x1\n"
+ ".inst 0x6e9c9774 // udot v20.4s, v27.16b, v28.16b\n"
+ "mov v17.16b, v15.16b\n"
+ ".inst 0x6e88956f // udot v15.4s, v11.16b, v8.16b\n"
+ ".inst 0x6e9c973e // udot v30.4s, v25.16b, v28.16b\n"
+ "mls v30.4s, v15.4s, v14.4s\n"
+ ".inst 0x6e989734 // udot v20.4s, v25.16b, v24.16b\n"
+ ".inst 0x6e989571 // udot v17.4s, v11.16b, v24.16b\n"
+ "mls v20.4s, v17.4s, v14.4s\n"
"ext v8.16b, v8.16b, v8.16b, #0x1\n"
- ".inst 0x6e88941e // udot v30.4s, v0.16b, v8.16b\n"
- ".inst 0x6e839414 // udot v20.4s, v0.16b, v3.16b\n"
- ".inst 0x6e8395d1 // udot v17.4s, v14.16b, v3.16b\n"
- ".inst 0x6e9c9619 // udot v25.4s, v16.16b, v28.16b\n"
- ".inst 0x6e9c97e5 // udot v5.4s, v31.16b, v28.16b\n"
"ext v28.16b, v28.16b, v28.16b, #0x1\n"
- ".inst 0x6e83961e // udot v30.4s, v16.16b, v3.16b\n"
- ".inst 0x6e9c9614 // udot v20.4s, v16.16b, v28.16b\n"
- "mls v5.4s, v19.4s, v11.4s\n"
- ".inst 0x6e9c95d1 // udot v17.4s, v14.16b, v28.16b\n"
- ".inst 0x6e9797f9 // udot v25.4s, v31.16b, v23.16b\n"
- "ext v23.16b, v23.16b, v23.16b, #0x1\n"
- ".inst 0x6e9c97fe // udot v30.4s, v31.16b, v28.16b\n"
- ".inst 0x6e9797f4 // udot v20.4s, v31.16b, v23.16b\n"
- "sqrdmulh v5.4s, v5.4s, v9.4s\n"
- "mov v16.16b, v17.16b\n .inst 0x6e9795d0 // udot v16.4s, v14.16b, v23.16b\n"
- ".inst 0x6e8895d1 // udot v17.4s, v14.16b, v8.16b\n"
- "mls v30.4s, v17.4s, v11.4s\n"
- "mls v25.4s, v18.4s, v11.4s\n"
- "mls v20.4s, v16.4s, v11.4s\n"
- "and v0.16b, v5.16b, v4.16b\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "sqrdmulh v30.4s, v30.4s, v9.4s\n"
- "sqrdmulh v25.4s, v25.4s, v9.4s\n"
- "sqrdmulh v20.4s, v20.4s, v9.4s\n"
- "sqadd v5.4s, v5.4s, v0.4s\n"
- "and v16.16b, v30.16b, v4.16b\n"
- "and v31.16b, v25.16b, v4.16b\n"
- "and v0.16b, v20.16b, v4.16b\n"
+ "ext v24.16b, v24.16b, v24.16b, #0x1\n"
+ ".inst 0x6e8297b3 // udot v19.4s, v29.16b, v2.16b\n"
+ ".inst 0x6e82956a // udot v10.4s, v11.16b, v2.16b\n"
+ ".inst 0x6e8897b6 // udot v22.4s, v29.16b, v8.16b\n"
+ "sqrdmulh v30.4s, v30.4s, v23.4s\n"
+ ".inst 0x6e9c9773 // udot v19.4s, v27.16b, v28.16b\n"
+ ".inst 0x6e9c956a // udot v10.4s, v11.16b, v28.16b\n"
+ ".inst 0x6e829776 // udot v22.4s, v27.16b, v2.16b\n"
+ "and v18.16b, v30.16b, v21.16b\n"
+ "sshr v18.4s, v18.4s, #0x1f\n"
+ ".inst 0x6e9c9736 // udot v22.4s, v25.16b, v28.16b\n"
+ ".inst 0x6e989733 // udot v19.4s, v25.16b, v24.16b\n"
+ "mov v17.16b, v10.16b\n"
+ ".inst 0x6e88956a // udot v10.4s, v11.16b, v8.16b\n"
+ "mls v22.4s, v10.4s, v14.4s\n"
+ ".inst 0x6e989571 // udot v17.4s, v11.16b, v24.16b\n"
+ "sqadd v30.4s, v30.4s, v18.4s\n"
+ "mls v19.4s, v17.4s, v14.4s\n"
+ "srshl v30.4s, v30.4s, v21.4s\n"
+ "sqrdmulh v20.4s, v20.4s, v23.4s\n"
+ "sqrdmulh v22.4s, v22.4s, v23.4s\n"
+ "add v30.4s, v30.4s, v13.4s\n"
+ "and v16.16b, v20.16b, v21.16b\n"
"sshr v16.4s, v16.4s, #0x1f\n"
- "sshr v31.4s, v31.4s, #0x1f\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "sqadd v30.4s, v30.4s, v16.4s\n"
- "sqadd v25.4s, v25.4s, v31.4s\n"
- "sqadd v20.4s, v20.4s, v0.4s\n"
- "srshl v5.4s, v5.4s, v4.4s\n"
- "srshl v30.4s, v30.4s, v4.4s\n"
- "srshl v25.4s, v25.4s, v4.4s\n"
- "srshl v20.4s, v20.4s, v4.4s\n"
- "add v5.4s, v5.4s, v10.4s\n"
- "add v30.4s, v30.4s, v10.4s\n"
- "add v25.4s, v25.4s, v10.4s\n"
- "add v20.4s, v20.4s, v10.4s\n"
- "smax v5.4s, v5.4s, v13.4s\n"
- "smax v30.4s, v30.4s, v13.4s\n"
- "smax v25.4s, v25.4s, v13.4s\n"
- "smax v20.4s, v20.4s, v13.4s\n"
- "smin v5.4s, v5.4s, v12.4s\n"
+ "smax v30.4s, v30.4s, v9.4s\n"
+ "and v17.16b, v22.16b, v21.16b\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
"smin v30.4s, v30.4s, v12.4s\n"
- "smin v25.4s, v25.4s, v12.4s\n"
- "smin v20.4s, v20.4s, v12.4s\n"
- "uzp1 v5.16b, v5.16b, v5.16b\n"
+ "sqrdmulh v19.4s, v19.4s, v23.4s\n"
+ "sqadd v20.4s, v20.4s, v16.4s\n"
"uzp1 v30.16b, v30.16b, v30.16b\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
- "uzp1 v20.16b, v20.16b, v20.16b\n"
- "uzp1 v5.16b, v5.16b, v5.16b\n"
+ "and v16.16b, v19.16b, v21.16b\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sqadd v22.4s, v22.4s, v17.4s\n"
+ "srshl v20.4s, v20.4s, v21.4s\n"
"uzp1 v30.16b, v30.16b, v30.16b\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
+ "srshl v22.4s, v22.4s, v21.4s\n"
+ "add v20.4s, v20.4s, v13.4s\n"
+ "sqadd v19.4s, v19.4s, v16.4s\n"
+ "smax v20.4s, v20.4s, v9.4s\n"
+ "add v22.4s, v22.4s, v13.4s\n"
+ "srshl v19.4s, v19.4s, v21.4s\n"
+ "smin v20.4s, v20.4s, v12.4s\n"
+ "smax v22.4s, v22.4s, v9.4s\n"
+ "add v19.4s, v19.4s, v13.4s\n"
+ "uzp1 v20.16b, v20.16b, v20.16b\n"
+ "smin v22.4s, v22.4s, v12.4s\n"
"uzp1 v20.16b, v20.16b, v20.16b\n"
- "blt 24f\n"
- "str s5, [x24, x27]\n"
- "str s30, [x23, x27]\n"
- "str s25, [x22, x27]\n"
- "str s20, [x21, x27]\n"
- "b 27f\n"
- "24:" // Oddments: Unroll 1: Oddment store
- "add x24, x24, x27\n"
- "add x23, x23, x27\n"
- "add x22, x22, x27\n"
- "add x21, x21, x27\n"
- "tbz x20, #1, 25f\n"
- "st1 { v5.h }[0], [x24], #0x2\n"
- "st1 { v30.h }[0], [x23], #0x2\n"
- "st1 { v25.h }[0], [x22], #0x2\n"
- "st1 { v20.h }[0], [x21], #0x2\n"
- "tbz x20, #0, 26f\n"
- "st1 { v5.b }[2], [x24], #0x1\n"
- "st1 { v30.b }[2], [x23], #0x1\n"
- "st1 { v25.b }[2], [x22], #0x1\n"
- "st1 { v20.b }[2], [x21], #0x1\n"
+ "smax v19.4s, v19.4s, v9.4s\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "smin v19.4s, v19.4s, v12.4s\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ "blt 23f\n"
+ "str s30, [x25, x23]\n"
+ "str s22, [x24, x23]\n"
+ "str s20, [x22, x23]\n"
+ "str s19, [x21, x23]\n"
"b 26f\n"
- "25:" // Oddments: Unroll 1: Oddment store: Bit 1: Unset
- "st1 { v5.b }[0], [x24], #0x1\n"
- "st1 { v30.b }[0], [x23], #0x1\n"
- "st1 { v25.b }[0], [x22], #0x1\n"
- "st1 { v20.b }[0], [x21], #0x1\n"
- "26:" // Oddments: Unroll 1: Oddment store: Bit 1: End
- "27:" // Oddments: Unroll 1: After oddment store
- "subs x20, x20, #0x4\n"
- "add x27, x27, #0x4\n"
- "ble 35f\n"
- "ldr q5, [%x[params], #0x0]\n"
- "ldr q0, [%x[params], #0x10]\n"
- "movi v19.4s, #0x0\n"
- ".inst 0x6e8295d3 // udot v19.4s, v14.16b, v2.16b\n"
- "ldr q16, [%x[params], #0x20]\n"
- "ldr q31, [%x[params], #0x30]\n"
- "mov v30.16b, v5.16b\n"
- "mov v25.16b, v5.16b\n"
- "ldr q9, [%x[params], #0x40]\n"
- "ldr q4, [%x[params], #0x50]\n"
- "mov v20.16b, v5.16b\n"
- ".inst 0x6e879405 // udot v5.4s, v0.16b, v7.16b\n"
- ".inst 0x6e9b95d3 // udot v19.4s, v14.16b, v27.16b\n"
- ".inst 0x6e829419 // udot v25.4s, v0.16b, v2.16b\n"
- "movi v17.4s, #0x0\n"
- "cmp x20, #0x4\n"
- ".inst 0x6e829605 // udot v5.4s, v16.16b, v2.16b\n"
- "mov v18.16b, v19.16b\n .inst 0x6e9695d2 // udot v18.4s, v14.16b, v22.16b\n"
- "ext v2.16b, v2.16b, v2.16b, #0x1\n"
+ "23:" // Oddments: Unroll 1: Oddment store
+ "add x25, x25, x23\n"
+ "add x24, x24, x23\n"
+ "add x22, x22, x23\n"
+ "add x21, x21, x23\n"
+ "tbz x19, #1, 24f\n"
+ "st1 { v30.h }[0], [x25], #0x2\n"
+ "st1 { v22.h }[0], [x24], #0x2\n"
+ "st1 { v20.h }[0], [x22], #0x2\n"
+ "st1 { v19.h }[0], [x21], #0x2\n"
+ "tbz x19, #0, 25f\n"
+ "st1 { v30.b }[2], [x25], #0x1\n"
+ "st1 { v22.b }[2], [x24], #0x1\n"
+ "st1 { v20.b }[2], [x22], #0x1\n"
+ "st1 { v19.b }[2], [x21], #0x1\n"
+ "b 25f\n"
+ "24:" // Oddments: Unroll 1: Oddment store: Bit 1: Unset
+ "tbz x19, #0, 25f\n"
+ "st1 { v30.b }[0], [x25], #0x1\n"
+ "st1 { v22.b }[0], [x24], #0x1\n"
+ "st1 { v20.b }[0], [x22], #0x1\n"
+ "st1 { v19.b }[0], [x21], #0x1\n"
+ "25:" // Oddments: Unroll 1: Oddment store: Bit 1: End
+
+ "26:" // Oddments: Unroll 1: After oddment store
+ "add x23, x23, #0x4\n"
+ "subs x19, x19, #0x4\n"
+ "ble 34f\n"
+ "movi v15.4s, #0x0\n"
+ "ldr q6, [SP, #0x0]\n"
+ "movi v10.4s, #0x0\n"
+ "ldr q3, [SP, #0x20]\n"
+ "cmp x19, #0x4\n"
+ ".inst 0x6e83956f // udot v15.4s, v11.16b, v3.16b\n"
+ "ldr q31, [SP, #0x40]\n"
+ "ldr q26, [SP, #0x60]\n"
+ ".inst 0x6e9f956f // udot v15.4s, v11.16b, v31.16b\n"
+ "ldr q30, [%x[params], #0x0]\n"
+ "ldr q29, [%x[params], #0x10]\n"
+ "mov v22.16b, v30.16b\n"
+ "ldr q27, [%x[params], #0x20]\n"
+ "mov v20.16b, v30.16b\n"
+ "ldr q25, [%x[params], #0x30]\n"
+ "mov v19.16b, v30.16b\n"
+ "ldr q23, [%x[params], #0x40]\n"
+ ".inst 0x6e8697be // udot v30.4s, v29.16b, v6.16b\n"
+ "ldr q21, [%x[params], #0x50]\n"
"add %x[params], %x[params], #0x60\n"
- ".inst 0x6e8795d3 // udot v19.4s, v14.16b, v7.16b\n"
- "ext v7.16b, v7.16b, v7.16b, #0x1\n"
- ".inst 0x6e87941e // udot v30.4s, v0.16b, v7.16b\n"
- ".inst 0x6e829414 // udot v20.4s, v0.16b, v2.16b\n"
- ".inst 0x6e8295d1 // udot v17.4s, v14.16b, v2.16b\n"
- ".inst 0x6e9b9619 // udot v25.4s, v16.16b, v27.16b\n"
- ".inst 0x6e9b97e5 // udot v5.4s, v31.16b, v27.16b\n"
- "ext v27.16b, v27.16b, v27.16b, #0x1\n"
- ".inst 0x6e82961e // udot v30.4s, v16.16b, v2.16b\n"
- ".inst 0x6e9b9614 // udot v20.4s, v16.16b, v27.16b\n"
- "mls v5.4s, v19.4s, v11.4s\n"
- ".inst 0x6e9b95d1 // udot v17.4s, v14.16b, v27.16b\n"
- ".inst 0x6e9697f9 // udot v25.4s, v31.16b, v22.16b\n"
- "ext v22.16b, v22.16b, v22.16b, #0x1\n"
- ".inst 0x6e9b97fe // udot v30.4s, v31.16b, v27.16b\n"
- ".inst 0x6e9697f4 // udot v20.4s, v31.16b, v22.16b\n"
- "sqrdmulh v5.4s, v5.4s, v9.4s\n"
- "mov v16.16b, v17.16b\n .inst 0x6e9695d0 // udot v16.4s, v14.16b, v22.16b\n"
- ".inst 0x6e8795d1 // udot v17.4s, v14.16b, v7.16b\n"
- "mls v30.4s, v17.4s, v11.4s\n"
- "mls v25.4s, v18.4s, v11.4s\n"
- "mls v20.4s, v16.4s, v11.4s\n"
- "and v0.16b, v5.16b, v4.16b\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "sqrdmulh v30.4s, v30.4s, v9.4s\n"
- "sqrdmulh v25.4s, v25.4s, v9.4s\n"
- "sqrdmulh v20.4s, v20.4s, v9.4s\n"
- "sqadd v5.4s, v5.4s, v0.4s\n"
- "and v16.16b, v30.16b, v4.16b\n"
- "and v31.16b, v25.16b, v4.16b\n"
- "and v0.16b, v20.16b, v4.16b\n"
+ ".inst 0x6e8397b4 // udot v20.4s, v29.16b, v3.16b\n"
+ "mov v17.16b, v15.16b\n"
+ ".inst 0x6e86956f // udot v15.4s, v11.16b, v6.16b\n"
+ ".inst 0x6e83977e // udot v30.4s, v27.16b, v3.16b\n"
+ ".inst 0x6e9a9571 // udot v17.4s, v11.16b, v26.16b\n"
+ ".inst 0x6e9f9774 // udot v20.4s, v27.16b, v31.16b\n"
+ "ext v6.16b, v6.16b, v6.16b, #0x1\n"
+ ".inst 0x6e9f973e // udot v30.4s, v25.16b, v31.16b\n"
+ "mls v30.4s, v15.4s, v14.4s\n"
+ ".inst 0x6e9a9734 // udot v20.4s, v25.16b, v26.16b\n"
+ "ext v3.16b, v3.16b, v3.16b, #0x1\n"
+ "mls v20.4s, v17.4s, v14.4s\n"
+ "ext v31.16b, v31.16b, v31.16b, #0x1\n"
+ "ext v26.16b, v26.16b, v26.16b, #0x1\n"
+ ".inst 0x6e8697b6 // udot v22.4s, v29.16b, v6.16b\n"
+ ".inst 0x6e8397b3 // udot v19.4s, v29.16b, v3.16b\n"
+ ".inst 0x6e83956a // udot v10.4s, v11.16b, v3.16b\n"
+ "sqrdmulh v30.4s, v30.4s, v23.4s\n"
+ ".inst 0x6e839776 // udot v22.4s, v27.16b, v3.16b\n"
+ ".inst 0x6e9f9773 // udot v19.4s, v27.16b, v31.16b\n"
+ ".inst 0x6e9f956a // udot v10.4s, v11.16b, v31.16b\n"
+ "and v18.16b, v30.16b, v21.16b\n"
+ "sshr v18.4s, v18.4s, #0x1f\n"
+ ".inst 0x6e9f9736 // udot v22.4s, v25.16b, v31.16b\n"
+ ".inst 0x6e9a9733 // udot v19.4s, v25.16b, v26.16b\n"
+ "mov v17.16b, v10.16b\n"
+ ".inst 0x6e86956a // udot v10.4s, v11.16b, v6.16b\n"
+ "mls v22.4s, v10.4s, v14.4s\n"
+ ".inst 0x6e9a9571 // udot v17.4s, v11.16b, v26.16b\n"
+ "sqadd v30.4s, v30.4s, v18.4s\n"
+ "mls v19.4s, v17.4s, v14.4s\n"
+ "srshl v30.4s, v30.4s, v21.4s\n"
+ "sqrdmulh v20.4s, v20.4s, v23.4s\n"
+ "sqrdmulh v22.4s, v22.4s, v23.4s\n"
+ "add v30.4s, v30.4s, v13.4s\n"
+ "and v16.16b, v20.16b, v21.16b\n"
"sshr v16.4s, v16.4s, #0x1f\n"
- "sshr v31.4s, v31.4s, #0x1f\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "sqadd v30.4s, v30.4s, v16.4s\n"
- "sqadd v25.4s, v25.4s, v31.4s\n"
- "sqadd v20.4s, v20.4s, v0.4s\n"
- "srshl v5.4s, v5.4s, v4.4s\n"
- "srshl v30.4s, v30.4s, v4.4s\n"
- "srshl v25.4s, v25.4s, v4.4s\n"
- "srshl v20.4s, v20.4s, v4.4s\n"
- "add v5.4s, v5.4s, v10.4s\n"
- "add v30.4s, v30.4s, v10.4s\n"
- "add v25.4s, v25.4s, v10.4s\n"
- "add v20.4s, v20.4s, v10.4s\n"
- "smax v5.4s, v5.4s, v13.4s\n"
- "smax v30.4s, v30.4s, v13.4s\n"
- "smax v25.4s, v25.4s, v13.4s\n"
- "smax v20.4s, v20.4s, v13.4s\n"
- "smin v5.4s, v5.4s, v12.4s\n"
+ "smax v30.4s, v30.4s, v9.4s\n"
+ "and v17.16b, v22.16b, v21.16b\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
"smin v30.4s, v30.4s, v12.4s\n"
- "smin v25.4s, v25.4s, v12.4s\n"
- "smin v20.4s, v20.4s, v12.4s\n"
- "uzp1 v5.16b, v5.16b, v5.16b\n"
+ "sqrdmulh v19.4s, v19.4s, v23.4s\n"
+ "sqadd v20.4s, v20.4s, v16.4s\n"
"uzp1 v30.16b, v30.16b, v30.16b\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
- "uzp1 v20.16b, v20.16b, v20.16b\n"
- "uzp1 v5.16b, v5.16b, v5.16b\n"
+ "and v16.16b, v19.16b, v21.16b\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sqadd v22.4s, v22.4s, v17.4s\n"
+ "srshl v20.4s, v20.4s, v21.4s\n"
"uzp1 v30.16b, v30.16b, v30.16b\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
+ "srshl v22.4s, v22.4s, v21.4s\n"
+ "add v20.4s, v20.4s, v13.4s\n"
+ "sqadd v19.4s, v19.4s, v16.4s\n"
+ "smax v20.4s, v20.4s, v9.4s\n"
+ "add v22.4s, v22.4s, v13.4s\n"
+ "srshl v19.4s, v19.4s, v21.4s\n"
+ "smin v20.4s, v20.4s, v12.4s\n"
+ "smax v22.4s, v22.4s, v9.4s\n"
+ "add v19.4s, v19.4s, v13.4s\n"
"uzp1 v20.16b, v20.16b, v20.16b\n"
- "blt 28f\n"
- "str s5, [x24, x27]\n"
- "str s30, [x23, x27]\n"
- "str s25, [x22, x27]\n"
- "str s20, [x21, x27]\n"
- "b 31f\n"
- "28:" // Oddments: Unroll 2: Oddment store
- "add x24, x24, x27\n"
- "add x23, x23, x27\n"
- "add x22, x22, x27\n"
- "add x21, x21, x27\n"
- "tbz x20, #1, 29f\n"
- "st1 { v5.h }[0], [x24], #0x2\n"
- "st1 { v30.h }[0], [x23], #0x2\n"
- "st1 { v25.h }[0], [x22], #0x2\n"
- "st1 { v20.h }[0], [x21], #0x2\n"
- "tbz x20, #0, 30f\n"
- "st1 { v5.b }[2], [x24], #0x1\n"
- "st1 { v30.b }[2], [x23], #0x1\n"
- "st1 { v25.b }[2], [x22], #0x1\n"
- "st1 { v20.b }[2], [x21], #0x1\n"
+ "smin v22.4s, v22.4s, v12.4s\n"
+ "uzp1 v20.16b, v20.16b, v20.16b\n"
+ "smax v19.4s, v19.4s, v9.4s\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "smin v19.4s, v19.4s, v12.4s\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ "blt 27f\n"
+ "str s30, [x25, x23]\n"
+ "str s22, [x24, x23]\n"
+ "str s20, [x22, x23]\n"
+ "str s19, [x21, x23]\n"
"b 30f\n"
- "29:" // Oddments: Unroll 2: Oddment store: Bit 1: Unset
- "st1 { v5.b }[0], [x24], #0x1\n"
- "st1 { v30.b }[0], [x23], #0x1\n"
- "st1 { v25.b }[0], [x22], #0x1\n"
- "st1 { v20.b }[0], [x21], #0x1\n"
- "30:" // Oddments: Unroll 2: Oddment store: Bit 1: End
- "31:" // Oddments: Unroll 2: After oddment store
- "subs x20, x20, #0x4\n"
- "add x27, x27, #0x4\n"
- "ble 35f\n"
- "ldr q5, [%x[params], #0x0]\n"
- "ldr q0, [%x[params], #0x10]\n"
- "movi v19.4s, #0x0\n"
- ".inst 0x6e8195d3 // udot v19.4s, v14.16b, v1.16b\n"
- "ldr q16, [%x[params], #0x20]\n"
- "ldr q31, [%x[params], #0x30]\n"
- "mov v30.16b, v5.16b\n"
- "mov v25.16b, v5.16b\n"
- "ldr q9, [%x[params], #0x40]\n"
- "ldr q4, [%x[params], #0x50]\n"
- "mov v20.16b, v5.16b\n"
- ".inst 0x6e869405 // udot v5.4s, v0.16b, v6.16b\n"
- ".inst 0x6e9a95d3 // udot v19.4s, v14.16b, v26.16b\n"
- ".inst 0x6e819419 // udot v25.4s, v0.16b, v1.16b\n"
- "movi v17.4s, #0x0\n"
+ "27:" // Oddments: Unroll 2: Oddment store
+ "add x25, x25, x23\n"
+ "add x24, x24, x23\n"
+ "add x22, x22, x23\n"
+ "add x21, x21, x23\n"
+ "tbz x19, #1, 28f\n"
+ "st1 { v30.h }[0], [x25], #0x2\n"
+ "st1 { v22.h }[0], [x24], #0x2\n"
+ "st1 { v20.h }[0], [x22], #0x2\n"
+ "st1 { v19.h }[0], [x21], #0x2\n"
+ "tbz x19, #0, 29f\n"
+ "st1 { v30.b }[2], [x25], #0x1\n"
+ "st1 { v22.b }[2], [x24], #0x1\n"
+ "st1 { v20.b }[2], [x22], #0x1\n"
+ "st1 { v19.b }[2], [x21], #0x1\n"
+ "b 29f\n"
+ "28:" // Oddments: Unroll 2: Oddment store: Bit 1: Unset
+ "tbz x19, #0, 29f\n"
+ "st1 { v30.b }[0], [x25], #0x1\n"
+ "st1 { v22.b }[0], [x24], #0x1\n"
+ "st1 { v20.b }[0], [x22], #0x1\n"
+ "st1 { v19.b }[0], [x21], #0x1\n"
+ "29:" // Oddments: Unroll 2: Oddment store: Bit 1: End
+
+ "30:" // Oddments: Unroll 2: After oddment store
+ "add x23, x23, #0x4\n"
+ "subs x19, x19, #0x4\n"
+ "ble 34f\n"
+ "movi v15.4s, #0x0\n"
+ "ldr q8, [SP, #0x10]\n"
+ "movi v10.4s, #0x0\n"
+ "ldr q2, [SP, #0x30]\n"
+ "ldr q28, [SP, #0x50]\n"
+ ".inst 0x6e82956f // udot v15.4s, v11.16b, v2.16b\n"
+ "ldr q24, [SP, #0x70]\n"
+ "ldr q30, [%x[params], #0x0]\n"
+ "mov v22.16b, v30.16b\n"
+ "ldr q29, [%x[params], #0x10]\n"
+ "mov v20.16b, v30.16b\n"
+ "ldr q27, [%x[params], #0x20]\n"
+ "mov v19.16b, v30.16b\n"
+ "ldr q25, [%x[params], #0x30]\n"
+ ".inst 0x6e9c956f // udot v15.4s, v11.16b, v28.16b\n"
+ "ldr q23, [%x[params], #0x40]\n"
+ "ldr q21, [%x[params], #0x50]\n"
+ ".inst 0x6e8897be // udot v30.4s, v29.16b, v8.16b\n"
"add %x[params], %x[params], #0x60\n"
- ".inst 0x6e819605 // udot v5.4s, v16.16b, v1.16b\n"
- "mov v18.16b, v19.16b\n .inst 0x6e9595d2 // udot v18.4s, v14.16b, v21.16b\n"
- "ext v1.16b, v1.16b, v1.16b, #0x1\n"
- ".inst 0x6e8695d3 // udot v19.4s, v14.16b, v6.16b\n"
- "ext v6.16b, v6.16b, v6.16b, #0x1\n"
- ".inst 0x6e86941e // udot v30.4s, v0.16b, v6.16b\n"
- ".inst 0x6e819414 // udot v20.4s, v0.16b, v1.16b\n"
- ".inst 0x6e8195d1 // udot v17.4s, v14.16b, v1.16b\n"
- ".inst 0x6e9a9619 // udot v25.4s, v16.16b, v26.16b\n"
- ".inst 0x6e9a97e5 // udot v5.4s, v31.16b, v26.16b\n"
- "ext v26.16b, v26.16b, v26.16b, #0x1\n"
- ".inst 0x6e81961e // udot v30.4s, v16.16b, v1.16b\n"
- ".inst 0x6e9a9614 // udot v20.4s, v16.16b, v26.16b\n"
- "mls v5.4s, v19.4s, v11.4s\n"
- ".inst 0x6e9a95d1 // udot v17.4s, v14.16b, v26.16b\n"
- ".inst 0x6e9597f9 // udot v25.4s, v31.16b, v21.16b\n"
- "ext v21.16b, v21.16b, v21.16b, #0x1\n"
- ".inst 0x6e9a97fe // udot v30.4s, v31.16b, v26.16b\n"
- ".inst 0x6e9597f4 // udot v20.4s, v31.16b, v21.16b\n"
- "sqrdmulh v5.4s, v5.4s, v9.4s\n"
- "mov v16.16b, v17.16b\n .inst 0x6e9595d0 // udot v16.4s, v14.16b, v21.16b\n"
- ".inst 0x6e8695d1 // udot v17.4s, v14.16b, v6.16b\n"
- "mls v30.4s, v17.4s, v11.4s\n"
- "mls v25.4s, v18.4s, v11.4s\n"
- "mls v20.4s, v16.4s, v11.4s\n"
- "and v0.16b, v5.16b, v4.16b\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "sqrdmulh v30.4s, v30.4s, v9.4s\n"
- "sqrdmulh v25.4s, v25.4s, v9.4s\n"
- "sqrdmulh v20.4s, v20.4s, v9.4s\n"
- "sqadd v5.4s, v5.4s, v0.4s\n"
- "and v16.16b, v30.16b, v4.16b\n"
- "and v31.16b, v25.16b, v4.16b\n"
- "and v0.16b, v20.16b, v4.16b\n"
+ ".inst 0x6e8297b4 // udot v20.4s, v29.16b, v2.16b\n"
+ "mov v17.16b, v15.16b\n"
+ ".inst 0x6e88956f // udot v15.4s, v11.16b, v8.16b\n"
+ ".inst 0x6e989571 // udot v17.4s, v11.16b, v24.16b\n"
+ ".inst 0x6e82977e // udot v30.4s, v27.16b, v2.16b\n"
+ ".inst 0x6e9c9774 // udot v20.4s, v27.16b, v28.16b\n"
+ "ext v8.16b, v8.16b, v8.16b, #0x1\n"
+ "ext v2.16b, v2.16b, v2.16b, #0x1\n"
+ ".inst 0x6e9c973e // udot v30.4s, v25.16b, v28.16b\n"
+ "mls v30.4s, v15.4s, v14.4s\n"
+ ".inst 0x6e989734 // udot v20.4s, v25.16b, v24.16b\n"
+ "ext v28.16b, v28.16b, v28.16b, #0x1\n"
+ "mls v20.4s, v17.4s, v14.4s\n"
+ "ext v24.16b, v24.16b, v24.16b, #0x1\n"
+ ".inst 0x6e8897b6 // udot v22.4s, v29.16b, v8.16b\n"
+ ".inst 0x6e8297b3 // udot v19.4s, v29.16b, v2.16b\n"
+ ".inst 0x6e82956a // udot v10.4s, v11.16b, v2.16b\n"
+ "sqrdmulh v30.4s, v30.4s, v23.4s\n"
+ ".inst 0x6e829776 // udot v22.4s, v27.16b, v2.16b\n"
+ ".inst 0x6e9c9773 // udot v19.4s, v27.16b, v28.16b\n"
+ ".inst 0x6e9c956a // udot v10.4s, v11.16b, v28.16b\n"
+ "and v18.16b, v30.16b, v21.16b\n"
+ "sshr v18.4s, v18.4s, #0x1f\n"
+ ".inst 0x6e9c9736 // udot v22.4s, v25.16b, v28.16b\n"
+ ".inst 0x6e989733 // udot v19.4s, v25.16b, v24.16b\n"
+ "mov v17.16b, v10.16b\n"
+ ".inst 0x6e88956a // udot v10.4s, v11.16b, v8.16b\n"
+ "mls v22.4s, v10.4s, v14.4s\n"
+ ".inst 0x6e989571 // udot v17.4s, v11.16b, v24.16b\n"
+ "sqadd v30.4s, v30.4s, v18.4s\n"
+ "mls v19.4s, v17.4s, v14.4s\n"
+ "srshl v30.4s, v30.4s, v21.4s\n"
+ "sqrdmulh v20.4s, v20.4s, v23.4s\n"
+ "sqrdmulh v22.4s, v22.4s, v23.4s\n"
+ "add v30.4s, v30.4s, v13.4s\n"
+ "and v16.16b, v20.16b, v21.16b\n"
"sshr v16.4s, v16.4s, #0x1f\n"
- "sshr v31.4s, v31.4s, #0x1f\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "sqadd v30.4s, v30.4s, v16.4s\n"
- "sqadd v25.4s, v25.4s, v31.4s\n"
- "sqadd v20.4s, v20.4s, v0.4s\n"
- "srshl v5.4s, v5.4s, v4.4s\n"
- "srshl v30.4s, v30.4s, v4.4s\n"
- "srshl v25.4s, v25.4s, v4.4s\n"
- "srshl v20.4s, v20.4s, v4.4s\n"
- "add v5.4s, v5.4s, v10.4s\n"
- "add v30.4s, v30.4s, v10.4s\n"
- "add v25.4s, v25.4s, v10.4s\n"
- "add v20.4s, v20.4s, v10.4s\n"
- "smax v5.4s, v5.4s, v13.4s\n"
- "smax v30.4s, v30.4s, v13.4s\n"
- "smax v25.4s, v25.4s, v13.4s\n"
- "smax v20.4s, v20.4s, v13.4s\n"
- "smin v5.4s, v5.4s, v12.4s\n"
+ "smax v30.4s, v30.4s, v9.4s\n"
+ "and v17.16b, v22.16b, v21.16b\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
"smin v30.4s, v30.4s, v12.4s\n"
- "smin v25.4s, v25.4s, v12.4s\n"
- "smin v20.4s, v20.4s, v12.4s\n"
- "uzp1 v5.16b, v5.16b, v5.16b\n"
+ "sqrdmulh v19.4s, v19.4s, v23.4s\n"
+ "sqadd v20.4s, v20.4s, v16.4s\n"
"uzp1 v30.16b, v30.16b, v30.16b\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
- "uzp1 v20.16b, v20.16b, v20.16b\n"
- "uzp1 v5.16b, v5.16b, v5.16b\n"
+ "and v16.16b, v19.16b, v21.16b\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sqadd v22.4s, v22.4s, v17.4s\n"
+ "srshl v20.4s, v20.4s, v21.4s\n"
"uzp1 v30.16b, v30.16b, v30.16b\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
+ "srshl v22.4s, v22.4s, v21.4s\n"
+ "add v20.4s, v20.4s, v13.4s\n"
+ "sqadd v19.4s, v19.4s, v16.4s\n"
+ "smax v20.4s, v20.4s, v9.4s\n"
+ "add v22.4s, v22.4s, v13.4s\n"
+ "srshl v19.4s, v19.4s, v21.4s\n"
+ "smin v20.4s, v20.4s, v12.4s\n"
+ "smax v22.4s, v22.4s, v9.4s\n"
+ "add v19.4s, v19.4s, v13.4s\n"
"uzp1 v20.16b, v20.16b, v20.16b\n"
- "32:" // Oddments: Unroll 3: Oddment store
- "add x24, x24, x27\n"
- "add x23, x23, x27\n"
- "add x22, x22, x27\n"
- "add x21, x21, x27\n"
- "tbz x20, #1, 33f\n"
- "st1 { v5.h }[0], [x24], #0x2\n"
- "st1 { v30.h }[0], [x23], #0x2\n"
- "st1 { v25.h }[0], [x22], #0x2\n"
- "st1 { v20.h }[0], [x21], #0x2\n"
- "tbz x20, #0, 34f\n"
- "st1 { v5.b }[2], [x24], #0x1\n"
- "st1 { v30.b }[2], [x23], #0x1\n"
- "st1 { v25.b }[2], [x22], #0x1\n"
- "st1 { v20.b }[2], [x21], #0x1\n"
- "b 34f\n"
- "33:" // Oddments: Unroll 3: Oddment store: Bit 1: Unset
- "st1 { v5.b }[0], [x24], #0x1\n"
- "st1 { v30.b }[0], [x23], #0x1\n"
- "st1 { v25.b }[0], [x22], #0x1\n"
- "st1 { v20.b }[0], [x21], #0x1\n"
- "34:" // Oddments: Unroll 3: Oddment store: Bit 1: End
- "35:" // End
+ "smin v22.4s, v22.4s, v12.4s\n"
+ "uzp1 v20.16b, v20.16b, v20.16b\n"
+ "smax v19.4s, v19.4s, v9.4s\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "smin v19.4s, v19.4s, v12.4s\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ "31:" // Oddments: Unroll 3: Oddment store
+ "add x25, x25, x23\n"
+ "add x24, x24, x23\n"
+ "add x22, x22, x23\n"
+ "add x21, x21, x23\n"
+ "tbz x19, #1, 32f\n"
+ "st1 { v30.h }[0], [x25], #0x2\n"
+ "st1 { v22.h }[0], [x24], #0x2\n"
+ "st1 { v20.h }[0], [x22], #0x2\n"
+ "st1 { v19.h }[0], [x21], #0x2\n"
+ "tbz x19, #0, 33f\n"
+ "st1 { v30.b }[2], [x25], #0x1\n"
+ "st1 { v22.b }[2], [x24], #0x1\n"
+ "st1 { v20.b }[2], [x22], #0x1\n"
+ "st1 { v19.b }[2], [x21], #0x1\n"
+ "b 33f\n"
+ "32:" // Oddments: Unroll 3: Oddment store: Bit 1: Unset
+ "tbz x19, #0, 33f\n"
+ "st1 { v30.b }[0], [x25], #0x1\n"
+ "st1 { v22.b }[0], [x24], #0x1\n"
+ "st1 { v20.b }[0], [x22], #0x1\n"
+ "st1 { v19.b }[0], [x21], #0x1\n"
+ "33:" // Oddments: Unroll 3: Oddment store: Bit 1: End
+
+ "34:" // End
+ "add SP, SP, #0x80\n"
: [params] "+&r" (params)
- : [inptrs] "r" (inptrs), [n_channels] "r" (n_channels), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [outptrs] "r" (outptrs), [qp] "r" (&qp)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : [inptrs] "r" (inptrs), [n_channels] "r" ((long unsigned) n_channels), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [outptrs] "r" (outptrs), [qp] "r" (&qp)
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp
index 15bbb31413..4b0fca77f1 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -91,1072 +91,1072 @@ void a64_u8q_nhwc_3x3_s1_output2x2_mla_depthfirst_impl(
requant_muls, requant_shifts, outptrs);
__asm__ __volatile__(
- "ldr x6, [%x[params], %[offsetof_Params_n_channels]]\n"
- "ldr x23, [%x[params], %[offsetof_Params_requant]]\n"
- "lsr x7, x6, #0x3\n"
- "add x20, x23, %[offsetof_Requantize32_a_offset]\n"
- "ld1r { v24.16b }, [x20]\n"
+ "ldr x19, [%x[params], %[offsetof_Params_requant]]\n"
+ "ldr x8, [%x[params], %[offsetof_Params_n_channels]]\n"
+ "add x24, x19, %[offsetof_Requantize32_a_offset]\n"
+ "add x23, x19, %[offsetof_Requantize32_b_offset]\n"
"ldr x22, [%x[params], %[offsetof_Params_outptrs]]\n"
- "add x21, x23, %[offsetof_Requantize32_b_offset]\n"
- "add x20, x23, %[offsetof_Requantize32_c_offset]\n"
- "ld1r { v15.16b }, [x21]\n"
- "ld1r { v14.8h }, [x20]\n"
- "add x21, x23, %[offsetof_Requantize32_minval]\n"
- "add x20, x23, %[offsetof_Requantize32_maxval]\n"
- "ld1r { v12.8h }, [x21]\n"
- "ld1r { v11.8h }, [x20]\n"
- "mov x8, #0x0\n"
- "mov x17, #0x0\n"
- "add x16, %x[params], %[offsetof_Params_inptrs]\n"
- "ldr x15, [%x[params], %[offsetof_Params_weights]]\n"
- "ldr x14, [%x[params], %[offsetof_Params_requant_muls]]\n"
- "ldr x13, [%x[params], %[offsetof_Params_requant_shifts]]\n"
- "ldp x12, x11, [x22, #0x0]\n"
- "ldp x10, x9, [x22, #0x10]\n"
- "cbz x7, 3f\n"
- "ldr d0, [x15, #0x0]\n"
- "ldr d1, [x15, #0x8]\n"
- "subs x7, x7, #0x1\n"
- "usubl v0.8h, v0.8b, v15.8b\n"
- "ldr d2, [x15, #0x10]\n"
- "ldr d3, [x15, #0x18]\n"
- "usubl v1.8h, v1.8b, v15.8b\n"
- "usubl v2.8h, v2.8b, v15.8b\n"
- "ldr d4, [x15, #0x20]\n"
- "ldr d5, [x15, #0x28]\n"
- "usubl v3.8h, v3.8b, v15.8b\n"
- "usubl v4.8h, v4.8b, v15.8b\n"
- "ldr d6, [x15, #0x30]\n"
- "ldr d7, [x15, #0x38]\n"
- "usubl v5.8h, v5.8b, v15.8b\n"
- "usubl v6.8h, v6.8b, v15.8b\n"
- "ldr d8, [x15, #0x40]\n"
- "ldr x28, [%x[params], %[offsetof_Params_bias]]\n"
- "usubl v7.8h, v7.8b, v15.8b\n"
- "usubl v8.8h, v8.8b, v15.8b\n"
- "ldr q13, [x28, #0x0]\n"
- "ldr q20, [x28, #0x10]\n"
- "add x28, x28, #0x20\n"
- "str x28, [%x[params], %[offsetof_Params_bias]]\n"
- "ldp x24, x23, [x16, #0x0]\n"
- "ldp x22, x21, [x16, #0x10]\n"
+ "add x21, x19, %[offsetof_Requantize32_c_offset]\n"
+ "add x20, x19, %[offsetof_Requantize32_minval]\n"
+ "ldr x17, [%x[params], %[offsetof_Params_weights]]\n"
+ "add x19, x19, %[offsetof_Requantize32_maxval]\n"
+ "ld1r { v22.16b }, [x24]\n"
+ "ld1r { v12.16b }, [x23]\n"
+ "lsr x16, x8, #0x3\n"
+ "ld1r { v14.8h }, [x21]\n"
+ "ld1r { v17.8h }, [x20]\n"
+ "mov x15, #0x0\n"
+ "mov x14, #0x0\n"
+ "ld1r { v15.8h }, [x19]\n"
+ "ldr x13, [%x[params], %[offsetof_Params_requant_muls]]\n"
+ "add x12, %x[params], %[offsetof_Params_inptrs]\n"
+ "ldr x11, [%x[params], %[offsetof_Params_requant_shifts]]\n"
+ "ldp x10, x9, [x22, #0x0]\n"
+ "ldp x28, x27, [x22, #0x10]\n"
+ "cbz x16, 3f\n"
+ "ldr x19, [%x[params], %[offsetof_Params_bias]]\n"
+ "ldr q13, [x19, #0x0]\n"
+ "subs x16, x16, #0x1\n"
+ "mov v19.16b, v13.16b\n"
+ "ldr q26, [x19, #0x10]\n"
+ "add x19, x19, #0x20\n"
+ "str x19, [%x[params], %[offsetof_Params_bias]]\n"
+ "ldr d0, [x17, #0x0]\n"
+ "ldr d1, [x17, #0x8]\n"
+ "ldr d2, [x17, #0x10]\n"
+ "mov v11.16b, v26.16b\n"
+ "mov v18.16b, v13.16b\n"
+ "ldr d3, [x17, #0x18]\n"
+ "ldr d4, [x17, #0x20]\n"
+ "mov v24.16b, v26.16b\n"
"mov v9.16b, v13.16b\n"
- "mov v18.16b, v20.16b\n"
- "ldr d31, [x24, x8]\n"
- "ldr d30, [x23, x8]\n"
- "mov v16.16b, v13.16b\n"
- "mov v26.16b, v20.16b\n"
- "ldr d29, [x22, x8]\n"
- "ldr d28, [x21, x8]\n"
- "mov v25.16b, v13.16b\n"
- "mov v10.16b, v20.16b\n"
- "ldr x20, [x16, #0x20]\n"
- "ldr d27, [x20, x8]\n"
- "usubl v31.8h, v31.8b, v24.8b\n"
- "usubl v30.8h, v30.8b, v24.8b\n"
- "usubl v29.8h, v29.8b, v24.8b\n"
- "usubl v28.8h, v28.8b, v24.8b\n"
- "usubl v27.8h, v27.8b, v24.8b\n"
+ "ldr d5, [x17, #0x28]\n"
+ "ldr d6, [x17, #0x30]\n"
+ "mov v23.16b, v26.16b\n"
+ "usubl v0.8h, v0.8b, v12.8b\n"
+ "ldr d7, [x17, #0x38]\n"
+ "ldr d8, [x17, #0x40]\n"
+ "usubl v1.8h, v1.8b, v12.8b\n"
+ "usubl v2.8h, v2.8b, v12.8b\n"
+ "ldp x23, x22, [x12, #0x0]\n"
+ "ldp x21, x20, [x12, #0x10]\n"
+ "usubl v3.8h, v3.8b, v12.8b\n"
+ "usubl v4.8h, v4.8b, v12.8b\n"
+ "ldr x19, [x12, #0x20]\n"
+ "ldr d31, [x23, x15]\n"
+ "usubl v5.8h, v5.8b, v12.8b\n"
+ "usubl v6.8h, v6.8b, v12.8b\n"
+ "ldr d30, [x22, x15]\n"
+ "ldr d29, [x21, x15]\n"
+ "usubl v7.8h, v7.8b, v12.8b\n"
+ "usubl v8.8h, v8.8b, v12.8b\n"
+ "ldr d28, [x20, x15]\n"
+ "ldr d27, [x19, x15]\n"
+ "usubl v31.8h, v31.8b, v22.8b\n"
+ "usubl v30.8h, v30.8b, v22.8b\n"
+ "usubl v29.8h, v29.8b, v22.8b\n"
+ "usubl v28.8h, v28.8b, v22.8b\n"
+ "usubl v27.8h, v27.8b, v22.8b\n"
"beq 2f\n"
"1:" // Loop
- "ldr q17, [x14, #0x0]\n"
- "ldr q22, [x13, #0x0]\n"
"smlal v13.4s, v31.4h, v4.4h\n"
- "smlal2 v20.4s, v31.8h, v4.8h\n"
- "ldr q23, [x14, #0x10]\n"
- "smlal v9.4s, v31.4h, v3.4h\n"
- "smlal2 v18.4s, v31.8h, v3.8h\n"
- "ldr x21, [x16, #0x28]\n"
+ "smlal2 v26.4s, v31.8h, v4.8h\n"
+ "ldr x21, [x12, #0x28]\n"
+ "ldr x26, [x12, #0x38]\n"
+ "smlal v19.4s, v31.4h, v3.4h\n"
+ "smlal2 v11.4s, v31.8h, v3.8h\n"
+ "ldr x20, [x12, #0x30]\n"
+ "ldr x25, [x12, #0x40]\n"
"smlal v13.4s, v30.4h, v0.4h\n"
- "smlal2 v20.4s, v30.8h, v0.8h\n"
- "ldr q19, [x13, #0x10]\n"
- "ldr x28, [x16, #0x38]\n"
- "smlal v9.4s, v29.4h, v2.4h\n"
- "smlal2 v18.4s, v29.8h, v2.8h\n"
- "ldr x20, [x16, #0x30]\n"
- "ldr d29, [x20, x8]\n"
- "smlal v16.4s, v31.4h, v1.4h\n"
- "smlal2 v26.4s, v31.8h, v1.8h\n"
- "ldr x27, [x16, #0x40]\n"
- "ldr x26, [x16, #0x48]\n"
- "smlal v25.4s, v31.4h, v0.4h\n"
- "smlal2 v10.4s, v31.8h, v0.8h\n"
- "ldr d31, [x21, x8]\n"
- "usubl v31.8h, v31.8b, v24.8b\n"
+ "smlal2 v26.4s, v30.8h, v0.8h\n"
+ "ldr x19, [x12, #0x48]\n"
+ "ldr d30, [x19, x15]\n"
+ "smlal v19.4s, v29.4h, v2.4h\n"
+ "smlal2 v11.4s, v29.8h, v2.8h\n"
+ "ldr d29, [x20, x15]\n"
+ "usubl v29.8h, v29.8b, v22.8b\n"
+ "smlal v18.4s, v31.4h, v1.4h\n"
+ "smlal2 v24.4s, v31.8h, v1.8h\n"
+ "ldr x24, [x12, #0x50]\n"
+ "ldr x23, [x12, #0x58]\n"
+ "smlal v9.4s, v31.4h, v0.4h\n"
+ "smlal2 v23.4s, v31.8h, v0.8h\n"
+ "ldr d31, [x21, x15]\n"
+ "usubl v31.8h, v31.8b, v22.8b\n"
"smlal v13.4s, v28.4h, v5.4h\n"
- "smlal2 v20.4s, v28.8h, v5.8h\n"
- "usubl v29.8h, v29.8b, v24.8b\n"
- "ldr x25, [x16, #0x50]\n"
- "smlal v9.4s, v28.4h, v4.4h\n"
- "smlal2 v18.4s, v28.8h, v4.8h\n"
- "ldr x24, [x16, #0x58]\n"
- "ldr x23, [x16, #0x60]\n"
- "smlal v16.4s, v28.4h, v2.4h\n"
- "smlal2 v26.4s, v28.8h, v2.8h\n"
- "ldr x22, [x16, #0x68]\n"
- "ldr x21, [x16, #0x70]\n"
- "smlal v25.4s, v28.4h, v1.4h\n"
- "smlal2 v10.4s, v28.8h, v1.8h\n"
- "ldr d28, [x28, x8]\n"
- "usubl v28.8h, v28.8b, v24.8b\n"
+ "smlal2 v26.4s, v28.8h, v5.8h\n"
+ "usubl v30.8h, v30.8b, v22.8b\n"
+ "ldr x22, [x12, #0x60]\n"
+ "smlal v19.4s, v28.4h, v4.4h\n"
+ "smlal2 v11.4s, v28.8h, v4.8h\n"
+ "ldr x21, [x12, #0x68]\n"
+ "ldr x20, [x12, #0x70]\n"
+ "smlal v18.4s, v28.4h, v2.4h\n"
+ "smlal2 v24.4s, v28.8h, v2.8h\n"
+ "ldr x19, [x12, #0x78]\n"
+ "ldr q21, [x13, #0x0]\n"
+ "smlal v9.4s, v28.4h, v1.4h\n"
+ "smlal2 v23.4s, v28.8h, v1.8h\n"
+ "ldr d28, [x26, x15]\n"
+ "usubl v28.8h, v28.8b, v22.8b\n"
"smlal v13.4s, v27.4h, v7.4h\n"
- "smlal2 v20.4s, v27.8h, v7.8h\n"
- "ldr x20, [x16, #0x78]\n"
- "ldr x28, [%x[params], %[offsetof_Params_bias]]\n"
- "smlal v9.4s, v27.4h, v6.4h\n"
- "smlal2 v18.4s, v27.8h, v6.8h\n"
- "add x15, x15, #0x48\n"
- "subs x7, x7, #0x1\n"
- "smlal v16.4s, v31.4h, v6.4h\n"
- "smlal2 v26.4s, v31.8h, v6.8h\n"
- "ldr d31, [x27, x8]\n"
- "usubl v31.8h, v31.8b, v24.8b\n"
- "smlal v25.4s, v27.4h, v3.4h\n"
- "smlal2 v10.4s, v27.8h, v3.8h\n"
- "add x14, x14, #0x20\n"
+ "smlal2 v26.4s, v27.8h, v7.8h\n"
+ "ldr q25, [x11, #0x0]\n"
+ "ldr q10, [x13, #0x10]\n"
+ "smlal v19.4s, v27.4h, v6.4h\n"
+ "smlal2 v11.4s, v27.8h, v6.8h\n"
+ "ldr q16, [x11, #0x10]\n"
+ "add x17, x17, #0x48\n"
+ "smlal v18.4s, v31.4h, v6.4h\n"
+ "smlal2 v24.4s, v31.8h, v6.8h\n"
+ "ldr d31, [x25, x15]\n"
+ "usubl v31.8h, v31.8b, v22.8b\n"
+ "smlal v9.4s, v27.4h, v3.4h\n"
+ "smlal2 v23.4s, v27.8h, v3.8h\n"
+ "subs x16, x16, #0x1\n"
"add x13, x13, #0x20\n"
"smlal v13.4s, v28.4h, v1.4h\n"
- "smlal2 v20.4s, v28.8h, v1.8h\n"
- "smlal v9.4s, v28.4h, v0.4h\n"
- "smlal2 v18.4s, v28.8h, v0.8h\n"
- "ldr d30, [x26, x8]\n"
- "usubl v30.8h, v30.8b, v24.8b\n"
- "smlal v16.4s, v27.4h, v4.4h\n"
- "smlal v25.4s, v29.4h, v8.4h\n"
- "smlal2 v26.4s, v27.8h, v4.8h\n"
- "ldr d28, [x24, x8]\n"
- "smlal2 v10.4s, v29.8h, v8.8h\n"
- "ldr d29, [x25, x8]\n"
+ "smlal2 v26.4s, v28.8h, v1.8h\n"
+ "add x11, x11, #0x20\n"
+ "smlal v19.4s, v28.4h, v0.4h\n"
+ "smlal2 v11.4s, v28.8h, v0.8h\n"
+ "ldr d28, [x23, x15]\n"
+ "usubl v28.8h, v28.8b, v22.8b\n"
+ "smlal v18.4s, v27.4h, v4.4h\n"
+ "smlal v9.4s, v29.4h, v8.4h\n"
+ "smlal2 v24.4s, v27.8h, v4.8h\n"
+ "smlal2 v23.4s, v29.8h, v8.8h\n"
+ "ldr d29, [x24, x15]\n"
+ "usubl v29.8h, v29.8b, v22.8b\n"
"smlal v13.4s, v31.4h, v2.4h\n"
- "smlal2 v20.4s, v31.8h, v2.8h\n"
- "usubl v29.8h, v29.8b, v24.8b\n"
- "smlal v9.4s, v31.4h, v1.4h\n"
- "smlal2 v18.4s, v31.8h, v1.8h\n"
- "ldr d31, [x23, x8]\n"
- "usubl v28.8h, v28.8b, v24.8b\n"
- "smlal v16.4s, v30.4h, v5.4h\n"
- "smlal v25.4s, v30.4h, v4.4h\n"
- "usubl v31.8h, v31.8b, v24.8b\n"
+ "smlal2 v26.4s, v31.8h, v2.8h\n"
+ "smlal v19.4s, v31.4h, v1.4h\n"
+ "smlal2 v11.4s, v31.8h, v1.8h\n"
+ "ldr d31, [x22, x15]\n"
+ "usubl v31.8h, v31.8b, v22.8b\n"
+ "smlal v18.4s, v30.4h, v5.4h\n"
+ "smlal v9.4s, v30.4h, v4.4h\n"
"smlal v13.4s, v30.4h, v8.4h\n"
- "smlal2 v20.4s, v30.8h, v8.8h\n"
- "smlal v9.4s, v30.4h, v7.4h\n"
- "smlal2 v18.4s, v30.8h, v7.8h\n"
- "smlal2 v26.4s, v30.8h, v5.8h\n"
- "smlal2 v10.4s, v30.8h, v4.8h\n"
- "ldr d30, [x22, x8]\n"
- "usubl v30.8h, v30.8b, v24.8b\n"
- "smlal v16.4s, v29.4h, v0.4h\n"
- "smlal v25.4s, v28.4h, v2.4h\n"
+ "smlal2 v26.4s, v30.8h, v8.8h\n"
+ "smlal v19.4s, v30.4h, v7.4h\n"
+ "smlal2 v11.4s, v30.8h, v7.8h\n"
+ "smlal2 v24.4s, v30.8h, v5.8h\n"
+ "smlal2 v23.4s, v30.8h, v4.8h\n"
+ "ldr d30, [x21, x15]\n"
+ "usubl v30.8h, v30.8b, v22.8b\n"
+ "smlal v18.4s, v29.4h, v0.4h\n"
+ "smlal v9.4s, v28.4h, v2.4h\n"
"smlal v13.4s, v29.4h, v3.4h\n"
- "smlal2 v20.4s, v29.8h, v3.8h\n"
- "smlal2 v26.4s, v29.8h, v0.8h\n"
- "ldr d29, [x21, x8]\n"
- "smlal2 v10.4s, v28.8h, v2.8h\n"
- "usubl v29.8h, v29.8b, v24.8b\n"
- "smlal v16.4s, v31.4h, v3.4h\n"
- "smlal v25.4s, v30.4h, v5.4h\n"
- "smlal v9.4s, v28.4h, v5.4h\n"
- "smlal2 v18.4s, v28.8h, v5.8h\n"
- "ldr d28, [x20, x8]\n"
- "usubl v28.8h, v28.8b, v24.8b\n"
+ "smlal2 v26.4s, v29.8h, v3.8h\n"
+ "smlal2 v24.4s, v29.8h, v0.8h\n"
+ "ldr d29, [x20, x15]\n"
+ "smlal2 v23.4s, v28.8h, v2.8h\n"
+ "usubl v29.8h, v29.8b, v22.8b\n"
+ "smlal v18.4s, v31.4h, v3.4h\n"
+ "smlal v9.4s, v30.4h, v5.4h\n"
+ "smlal v19.4s, v28.4h, v5.4h\n"
+ "smlal2 v11.4s, v28.8h, v5.8h\n"
+ "ldr d28, [x19, x15]\n"
+ "usubl v28.8h, v28.8b, v22.8b\n"
+ "smlal2 v24.4s, v31.8h, v3.8h\n"
+ "smlal2 v23.4s, v30.8h, v5.8h\n"
+ "add x15, x15, #0x8\n"
+ "smlal v18.4s, v29.4h, v7.4h\n"
+ "smlal v9.4s, v29.4h, v6.4h\n"
+ "smlal2 v24.4s, v29.8h, v7.8h\n"
+ "smlal2 v23.4s, v29.8h, v6.8h\n"
"smlal v13.4s, v31.4h, v6.4h\n"
- "smlal2 v26.4s, v31.8h, v3.8h\n"
- "sqrdmulh v13.4s, v13.4s, v17.4s\n"
- "add x8, x8, #0x8\n"
- "smlal2 v10.4s, v30.8h, v5.8h\n"
- "smlal v16.4s, v29.4h, v7.4h\n"
- "and v21.16b, v13.16b, v22.16b\n"
- "smlal v25.4s, v29.4h, v6.4h\n"
- "smlal2 v20.4s, v31.8h, v6.8h\n"
- "sqrdmulh v20.4s, v20.4s, v23.4s\n"
- "smlal2 v26.4s, v29.8h, v7.8h\n"
- "smlal2 v10.4s, v29.8h, v6.8h\n"
- "sshr v21.4s, v21.4s, #0x1f\n"
- "smlal v9.4s, v30.4h, v8.4h\n"
- "smlal v16.4s, v28.4h, v8.4h\n"
- "and v29.16b, v20.16b, v19.16b\n"
- "smlal v25.4s, v28.4h, v7.4h\n"
- "smlal2 v18.4s, v30.8h, v8.8h\n"
- "sqrdmulh v9.4s, v9.4s, v17.4s\n"
- "smlal2 v26.4s, v28.8h, v8.8h\n"
- "smlal2 v10.4s, v28.8h, v7.8h\n"
- "sqrdmulh v16.4s, v16.4s, v17.4s\n"
- "sqrdmulh v25.4s, v25.4s, v17.4s\n"
- "sqadd v13.4s, v13.4s, v21.4s\n"
- "sshr v29.4s, v29.4s, #0x1f\n"
- "and v0.16b, v9.16b, v22.16b\n"
- "sqrdmulh v18.4s, v18.4s, v23.4s\n"
- "and v27.16b, v16.16b, v22.16b\n"
- "sqrdmulh v26.4s, v26.4s, v23.4s\n"
- "and v21.16b, v25.16b, v22.16b\n"
- "sqrdmulh v10.4s, v10.4s, v23.4s\n"
- "sqadd v20.4s, v20.4s, v29.4s\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "and v17.16b, v18.16b, v19.16b\n"
- "sshr v27.4s, v27.4s, #0x1f\n"
- "and v7.16b, v26.16b, v19.16b\n"
- "sshr v21.4s, v21.4s, #0x1f\n"
- "and v29.16b, v10.16b, v19.16b\n"
- "sqadd v9.4s, v9.4s, v0.4s\n"
- "sshr v17.4s, v17.4s, #0x1f\n"
- "sqadd v16.4s, v16.4s, v27.4s\n"
+ "smlal v19.4s, v30.4h, v8.4h\n"
+ "sqrdmulh v13.4s, v13.4s, v21.4s\n"
+ "smlal v18.4s, v28.4h, v8.4h\n"
+ "smlal v9.4s, v28.4h, v7.4h\n"
+ "sqrdmulh v19.4s, v19.4s, v21.4s\n"
+ "smlal2 v26.4s, v31.8h, v6.8h\n"
+ "smlal2 v11.4s, v30.8h, v8.8h\n"
+ "sqrdmulh v18.4s, v18.4s, v21.4s\n"
+ "smlal2 v24.4s, v28.8h, v8.8h\n"
+ "smlal2 v23.4s, v28.8h, v7.8h\n"
+ "sqrdmulh v9.4s, v9.4s, v21.4s\n"
+ "and v7.16b, v13.16b, v25.16b\n"
+ "sqrdmulh v26.4s, v26.4s, v10.4s\n"
+ "and v4.16b, v19.16b, v25.16b\n"
+ "sqrdmulh v11.4s, v11.4s, v10.4s\n"
+ "and v21.16b, v18.16b, v25.16b\n"
+ "sqrdmulh v24.4s, v24.4s, v10.4s\n"
+ "and v20.16b, v9.16b, v25.16b\n"
+ "sqrdmulh v23.4s, v23.4s, v10.4s\n"
"sshr v7.4s, v7.4s, #0x1f\n"
- "sqadd v25.4s, v25.4s, v21.4s\n"
+ "and v29.16b, v26.16b, v16.16b\n"
+ "sshr v4.4s, v4.4s, #0x1f\n"
+ "and v10.16b, v11.16b, v16.16b\n"
+ "sshr v21.4s, v21.4s, #0x1f\n"
+ "and v31.16b, v24.16b, v16.16b\n"
+ "sshr v20.4s, v20.4s, #0x1f\n"
+ "and v30.16b, v23.16b, v16.16b\n"
+ "sqadd v13.4s, v13.4s, v7.4s\n"
"sshr v29.4s, v29.4s, #0x1f\n"
- "srshl v13.4s, v13.4s, v22.4s\n"
- "srshl v9.4s, v9.4s, v22.4s\n"
- "sqadd v18.4s, v18.4s, v17.4s\n"
- "srshl v16.4s, v16.4s, v22.4s\n"
- "sqadd v26.4s, v26.4s, v7.4s\n"
- "srshl v25.4s, v25.4s, v22.4s\n"
- "sqadd v10.4s, v10.4s, v29.4s\n"
- "srshl v20.4s, v20.4s, v19.4s\n"
+ "sqadd v19.4s, v19.4s, v4.4s\n"
+ "sshr v10.4s, v10.4s, #0x1f\n"
+ "sqadd v18.4s, v18.4s, v21.4s\n"
+ "sshr v31.4s, v31.4s, #0x1f\n"
+ "sqadd v9.4s, v9.4s, v20.4s\n"
+ "sshr v30.4s, v30.4s, #0x1f\n"
+ "srshl v13.4s, v13.4s, v25.4s\n"
+ "sqadd v26.4s, v26.4s, v29.4s\n"
+ "srshl v19.4s, v19.4s, v25.4s\n"
+ "sqadd v11.4s, v11.4s, v10.4s\n"
+ "srshl v18.4s, v18.4s, v25.4s\n"
+ "sqadd v24.4s, v24.4s, v31.4s\n"
+ "srshl v9.4s, v9.4s, v25.4s\n"
+ "sqadd v23.4s, v23.4s, v30.4s\n"
+ "srshl v26.4s, v26.4s, v16.4s\n"
"sqxtn v13.4h, v13.4s\n"
- "srshl v18.4s, v18.4s, v19.4s\n"
+ "srshl v11.4s, v11.4s, v16.4s\n"
+ "sqxtn v19.4h, v19.4s\n"
+ "srshl v24.4s, v24.4s, v16.4s\n"
+ "sqxtn v18.4h, v18.4s\n"
+ "srshl v23.4s, v23.4s, v16.4s\n"
"sqxtn v9.4h, v9.4s\n"
- "srshl v26.4s, v26.4s, v19.4s\n"
- "sqxtn v16.4h, v16.4s\n"
- "srshl v10.4s, v10.4s, v19.4s\n"
- "sqxtn v25.4h, v25.4s\n"
- "sqxtn2 v13.8h, v20.4s\n"
- "sqxtn2 v9.8h, v18.4s\n"
- "sqxtn2 v16.8h, v26.4s\n"
- "sqxtn2 v25.8h, v10.4s\n"
+ "sqxtn2 v13.8h, v26.4s\n"
+ "sqxtn2 v19.8h, v11.4s\n"
+ "sqxtn2 v18.8h, v24.4s\n"
+ "sqxtn2 v9.8h, v23.4s\n"
"sqadd v13.8h, v13.8h, v14.8h\n"
+ "sqadd v19.8h, v19.8h, v14.8h\n"
+ "sqadd v18.8h, v18.8h, v14.8h\n"
"sqadd v9.8h, v9.8h, v14.8h\n"
- "sqadd v16.8h, v16.8h, v14.8h\n"
- "sqadd v25.8h, v25.8h, v14.8h\n"
- "smax v13.8h, v13.8h, v12.8h\n"
- "smax v9.8h, v9.8h, v12.8h\n"
- "smax v16.8h, v16.8h, v12.8h\n"
- "smax v25.8h, v25.8h, v12.8h\n"
- "smin v13.8h, v13.8h, v11.8h\n"
- "smin v9.8h, v9.8h, v11.8h\n"
- "smin v16.8h, v16.8h, v11.8h\n"
- "smin v25.8h, v25.8h, v11.8h\n"
+ "smax v13.8h, v13.8h, v17.8h\n"
+ "smax v19.8h, v19.8h, v17.8h\n"
+ "smax v18.8h, v18.8h, v17.8h\n"
+ "smax v9.8h, v9.8h, v17.8h\n"
+ "smin v13.8h, v13.8h, v15.8h\n"
+ "smin v19.8h, v19.8h, v15.8h\n"
+ "smin v18.8h, v18.8h, v15.8h\n"
+ "smin v9.8h, v9.8h, v15.8h\n"
"uzp1 v13.16b, v13.16b, v13.16b\n"
- "str d13, [x12, x17]\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ "str d13, [x10, x14]\n"
+ "uzp1 v18.16b, v18.16b, v18.16b\n"
"uzp1 v9.16b, v9.16b, v9.16b\n"
- "uzp1 v16.16b, v16.16b, v16.16b\n"
- "str d9, [x11, x17]\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
- "str d16, [x10, x17]\n"
- "str d25, [x9, x17]\n"
- "ldr q13, [x28, #0x0]\n"
- "ldr q20, [x28, #0x10]\n"
- "add x28, x28, #0x20\n"
- "ldr d0, [x15, #0x0]\n"
- "ldr d1, [x15, #0x8]\n"
- "add x17, x17, #0x8\n"
- "str x28, [%x[params], %[offsetof_Params_bias]]\n"
- "ldr d2, [x15, #0x10]\n"
- "ldr d3, [x15, #0x18]\n"
+ "str d19, [x9, x14]\n"
+ "str d18, [x28, x14]\n"
+ "str d9, [x27, x14]\n"
+ "ldr x19, [%x[params], %[offsetof_Params_bias]]\n"
+ "ldr q13, [x19, #0x0]\n"
+ "add x14, x14, #0x8\n"
+ "ldr q26, [x19, #0x10]\n"
+ "add x19, x19, #0x20\n"
+ "str x19, [%x[params], %[offsetof_Params_bias]]\n"
+ "ldr d0, [x17, #0x0]\n"
+ "ldr d1, [x17, #0x8]\n"
+ "ldr d2, [x17, #0x10]\n"
+ "mov v19.16b, v13.16b\n"
+ "mov v11.16b, v26.16b\n"
+ "ldr d3, [x17, #0x18]\n"
+ "ldr d4, [x17, #0x20]\n"
+ "mov v18.16b, v13.16b\n"
+ "mov v24.16b, v26.16b\n"
+ "ldr d5, [x17, #0x28]\n"
+ "ldr d6, [x17, #0x30]\n"
"mov v9.16b, v13.16b\n"
- "mov v18.16b, v20.16b\n"
- "ldr d4, [x15, #0x20]\n"
- "ldr d5, [x15, #0x28]\n"
- "mov v16.16b, v13.16b\n"
- "mov v26.16b, v20.16b\n"
- "ldr d6, [x15, #0x30]\n"
- "ldr d7, [x15, #0x38]\n"
- "mov v25.16b, v13.16b\n"
- "mov v10.16b, v20.16b\n"
- "ldr d8, [x15, #0x40]\n"
- "ldp x24, x23, [x16, #0x0]\n"
- "usubl v0.8h, v0.8b, v15.8b\n"
- "usubl v1.8h, v1.8b, v15.8b\n"
- "ldp x22, x21, [x16, #0x10]\n"
- "ldr d31, [x24, x8]\n"
- "usubl v2.8h, v2.8b, v15.8b\n"
- "usubl v3.8h, v3.8b, v15.8b\n"
- "ldr d30, [x23, x8]\n"
- "ldr d29, [x22, x8]\n"
- "usubl v4.8h, v4.8b, v15.8b\n"
- "usubl v5.8h, v5.8b, v15.8b\n"
- "ldr d28, [x21, x8]\n"
- "ldr x20, [x16, #0x20]\n"
- "usubl v6.8h, v6.8b, v15.8b\n"
- "usubl v7.8h, v7.8b, v15.8b\n"
- "ldr d27, [x20, x8]\n"
- "usubl v8.8h, v8.8b, v15.8b\n"
- "usubl v31.8h, v31.8b, v24.8b\n"
- "usubl v30.8h, v30.8b, v24.8b\n"
- "usubl v29.8h, v29.8b, v24.8b\n"
- "usubl v28.8h, v28.8b, v24.8b\n"
- "usubl v27.8h, v27.8b, v24.8b\n"
+ "mov v23.16b, v26.16b\n"
+ "ldr d7, [x17, #0x38]\n"
+ "ldr d8, [x17, #0x40]\n"
+ "usubl v0.8h, v0.8b, v12.8b\n"
+ "usubl v1.8h, v1.8b, v12.8b\n"
+ "ldp x23, x22, [x12, #0x0]\n"
+ "ldp x21, x20, [x12, #0x10]\n"
+ "usubl v2.8h, v2.8b, v12.8b\n"
+ "usubl v3.8h, v3.8b, v12.8b\n"
+ "ldr x19, [x12, #0x20]\n"
+ "ldr d31, [x23, x15]\n"
+ "usubl v4.8h, v4.8b, v12.8b\n"
+ "usubl v5.8h, v5.8b, v12.8b\n"
+ "ldr d30, [x22, x15]\n"
+ "ldr d29, [x21, x15]\n"
+ "usubl v6.8h, v6.8b, v12.8b\n"
+ "usubl v7.8h, v7.8b, v12.8b\n"
+ "ldr d28, [x20, x15]\n"
+ "ldr d27, [x19, x15]\n"
+ "usubl v8.8h, v8.8b, v12.8b\n"
+ "usubl v31.8h, v31.8b, v22.8b\n"
+ "usubl v30.8h, v30.8b, v22.8b\n"
+ "usubl v29.8h, v29.8b, v22.8b\n"
+ "usubl v28.8h, v28.8b, v22.8b\n"
+ "usubl v27.8h, v27.8b, v22.8b\n"
"bgt 1b\n"
"2:" // Tail
- "ldr q17, [x14, #0x0]\n"
- "ldr q22, [x13, #0x0]\n"
"smlal v13.4s, v31.4h, v4.4h\n"
- "smlal2 v20.4s, v31.8h, v4.8h\n"
- "ldr q23, [x14, #0x10]\n"
- "smlal v9.4s, v31.4h, v3.4h\n"
- "smlal2 v18.4s, v31.8h, v3.8h\n"
- "ldr x21, [x16, #0x28]\n"
+ "smlal2 v26.4s, v31.8h, v4.8h\n"
+ "ldr x21, [x12, #0x28]\n"
+ "ldr x26, [x12, #0x38]\n"
+ "smlal v19.4s, v31.4h, v3.4h\n"
+ "smlal2 v11.4s, v31.8h, v3.8h\n"
+ "ldr x20, [x12, #0x30]\n"
+ "ldr x25, [x12, #0x40]\n"
"smlal v13.4s, v30.4h, v0.4h\n"
- "smlal2 v20.4s, v30.8h, v0.8h\n"
- "ldr q19, [x13, #0x10]\n"
- "ldr x28, [x16, #0x38]\n"
- "smlal v9.4s, v29.4h, v2.4h\n"
- "smlal2 v18.4s, v29.8h, v2.8h\n"
- "ldr x20, [x16, #0x30]\n"
- "ldr d29, [x20, x8]\n"
- "smlal v16.4s, v31.4h, v1.4h\n"
- "smlal2 v26.4s, v31.8h, v1.8h\n"
- "ldr x27, [x16, #0x40]\n"
- "ldr x26, [x16, #0x48]\n"
- "smlal v25.4s, v31.4h, v0.4h\n"
- "smlal2 v10.4s, v31.8h, v0.8h\n"
- "ldr d31, [x21, x8]\n"
- "usubl v31.8h, v31.8b, v24.8b\n"
+ "smlal2 v26.4s, v30.8h, v0.8h\n"
+ "ldr x19, [x12, #0x48]\n"
+ "ldr d30, [x19, x15]\n"
+ "smlal v19.4s, v29.4h, v2.4h\n"
+ "smlal2 v11.4s, v29.8h, v2.8h\n"
+ "ldr d29, [x20, x15]\n"
+ "usubl v29.8h, v29.8b, v22.8b\n"
+ "smlal v18.4s, v31.4h, v1.4h\n"
+ "smlal2 v24.4s, v31.8h, v1.8h\n"
+ "ldr x24, [x12, #0x50]\n"
+ "ldr x23, [x12, #0x58]\n"
+ "smlal v9.4s, v31.4h, v0.4h\n"
+ "smlal2 v23.4s, v31.8h, v0.8h\n"
+ "ldr d31, [x21, x15]\n"
+ "usubl v31.8h, v31.8b, v22.8b\n"
"smlal v13.4s, v28.4h, v5.4h\n"
- "smlal2 v20.4s, v28.8h, v5.8h\n"
- "usubl v29.8h, v29.8b, v24.8b\n"
- "ldr x25, [x16, #0x50]\n"
- "smlal v9.4s, v28.4h, v4.4h\n"
- "smlal2 v18.4s, v28.8h, v4.8h\n"
- "ldr x24, [x16, #0x58]\n"
- "ldr x23, [x16, #0x60]\n"
- "smlal v16.4s, v28.4h, v2.4h\n"
- "smlal2 v26.4s, v28.8h, v2.8h\n"
- "ldr x22, [x16, #0x68]\n"
- "ldr x21, [x16, #0x70]\n"
- "smlal v25.4s, v28.4h, v1.4h\n"
- "smlal2 v10.4s, v28.8h, v1.8h\n"
- "ldr d28, [x28, x8]\n"
- "usubl v28.8h, v28.8b, v24.8b\n"
+ "smlal2 v26.4s, v28.8h, v5.8h\n"
+ "usubl v30.8h, v30.8b, v22.8b\n"
+ "ldr x22, [x12, #0x60]\n"
+ "smlal v19.4s, v28.4h, v4.4h\n"
+ "smlal2 v11.4s, v28.8h, v4.8h\n"
+ "ldr x21, [x12, #0x68]\n"
+ "ldr x20, [x12, #0x70]\n"
+ "smlal v18.4s, v28.4h, v2.4h\n"
+ "smlal2 v24.4s, v28.8h, v2.8h\n"
+ "ldr x19, [x12, #0x78]\n"
+ "ldr q21, [x13, #0x0]\n"
+ "smlal v9.4s, v28.4h, v1.4h\n"
+ "smlal2 v23.4s, v28.8h, v1.8h\n"
+ "ldr d28, [x26, x15]\n"
+ "usubl v28.8h, v28.8b, v22.8b\n"
"smlal v13.4s, v27.4h, v7.4h\n"
- "smlal2 v20.4s, v27.8h, v7.8h\n"
- "ldr x20, [x16, #0x78]\n"
- "tst x6, #0x7\n"
- "smlal v9.4s, v27.4h, v6.4h\n"
- "smlal2 v18.4s, v27.8h, v6.8h\n"
- "add x14, x14, #0x20\n"
+ "smlal2 v26.4s, v27.8h, v7.8h\n"
+ "ldr q25, [x11, #0x0]\n"
+ "ldr q10, [x13, #0x10]\n"
+ "smlal v19.4s, v27.4h, v6.4h\n"
+ "smlal2 v11.4s, v27.8h, v6.8h\n"
+ "ldr q16, [x11, #0x10]\n"
+ "tst x8, #0x7\n"
+ "smlal v18.4s, v31.4h, v6.4h\n"
+ "smlal2 v24.4s, v31.8h, v6.8h\n"
+ "ldr d31, [x25, x15]\n"
+ "usubl v31.8h, v31.8b, v22.8b\n"
+ "smlal v9.4s, v27.4h, v3.4h\n"
+ "smlal2 v23.4s, v27.8h, v3.8h\n"
"add x13, x13, #0x20\n"
- "smlal v16.4s, v31.4h, v6.4h\n"
- "smlal2 v26.4s, v31.8h, v6.8h\n"
- "ldr d31, [x27, x8]\n"
- "usubl v31.8h, v31.8b, v24.8b\n"
- "smlal v25.4s, v27.4h, v3.4h\n"
- "smlal2 v10.4s, v27.8h, v3.8h\n"
+ "add x11, x11, #0x20\n"
"smlal v13.4s, v28.4h, v1.4h\n"
- "smlal2 v20.4s, v28.8h, v1.8h\n"
- "smlal v9.4s, v28.4h, v0.4h\n"
- "smlal2 v18.4s, v28.8h, v0.8h\n"
- "ldr d30, [x26, x8]\n"
- "usubl v30.8h, v30.8b, v24.8b\n"
- "smlal v16.4s, v27.4h, v4.4h\n"
- "smlal v25.4s, v29.4h, v8.4h\n"
- "smlal2 v26.4s, v27.8h, v4.8h\n"
- "ldr d28, [x24, x8]\n"
- "smlal2 v10.4s, v29.8h, v8.8h\n"
- "ldr d29, [x25, x8]\n"
+ "smlal2 v26.4s, v28.8h, v1.8h\n"
+ "smlal v19.4s, v28.4h, v0.4h\n"
+ "smlal2 v11.4s, v28.8h, v0.8h\n"
+ "ldr d28, [x23, x15]\n"
+ "usubl v28.8h, v28.8b, v22.8b\n"
+ "smlal v18.4s, v27.4h, v4.4h\n"
+ "smlal v9.4s, v29.4h, v8.4h\n"
+ "smlal2 v24.4s, v27.8h, v4.8h\n"
+ "smlal2 v23.4s, v29.8h, v8.8h\n"
+ "ldr d29, [x24, x15]\n"
+ "usubl v29.8h, v29.8b, v22.8b\n"
"smlal v13.4s, v31.4h, v2.4h\n"
- "smlal2 v20.4s, v31.8h, v2.8h\n"
- "usubl v29.8h, v29.8b, v24.8b\n"
- "smlal v9.4s, v31.4h, v1.4h\n"
- "smlal2 v18.4s, v31.8h, v1.8h\n"
- "ldr d31, [x23, x8]\n"
- "usubl v28.8h, v28.8b, v24.8b\n"
- "smlal v16.4s, v30.4h, v5.4h\n"
- "smlal v25.4s, v30.4h, v4.4h\n"
- "usubl v31.8h, v31.8b, v24.8b\n"
+ "smlal2 v26.4s, v31.8h, v2.8h\n"
+ "smlal v19.4s, v31.4h, v1.4h\n"
+ "smlal2 v11.4s, v31.8h, v1.8h\n"
+ "ldr d31, [x22, x15]\n"
+ "usubl v31.8h, v31.8b, v22.8b\n"
+ "smlal v18.4s, v30.4h, v5.4h\n"
+ "smlal v9.4s, v30.4h, v4.4h\n"
"smlal v13.4s, v30.4h, v8.4h\n"
- "smlal2 v20.4s, v30.8h, v8.8h\n"
- "smlal v9.4s, v30.4h, v7.4h\n"
- "smlal2 v18.4s, v30.8h, v7.8h\n"
- "smlal2 v26.4s, v30.8h, v5.8h\n"
- "smlal2 v10.4s, v30.8h, v4.8h\n"
- "ldr d30, [x22, x8]\n"
- "usubl v30.8h, v30.8b, v24.8b\n"
- "smlal v16.4s, v29.4h, v0.4h\n"
- "smlal v25.4s, v28.4h, v2.4h\n"
+ "smlal2 v26.4s, v30.8h, v8.8h\n"
+ "smlal v19.4s, v30.4h, v7.4h\n"
+ "smlal2 v11.4s, v30.8h, v7.8h\n"
+ "smlal2 v24.4s, v30.8h, v5.8h\n"
+ "smlal2 v23.4s, v30.8h, v4.8h\n"
+ "ldr d30, [x21, x15]\n"
+ "usubl v30.8h, v30.8b, v22.8b\n"
+ "smlal v18.4s, v29.4h, v0.4h\n"
+ "smlal v9.4s, v28.4h, v2.4h\n"
"smlal v13.4s, v29.4h, v3.4h\n"
- "smlal2 v20.4s, v29.8h, v3.8h\n"
- "smlal2 v26.4s, v29.8h, v0.8h\n"
- "ldr d29, [x21, x8]\n"
- "smlal2 v10.4s, v28.8h, v2.8h\n"
- "usubl v29.8h, v29.8b, v24.8b\n"
- "smlal v16.4s, v31.4h, v3.4h\n"
- "smlal v25.4s, v30.4h, v5.4h\n"
- "smlal v9.4s, v28.4h, v5.4h\n"
- "smlal2 v18.4s, v28.8h, v5.8h\n"
- "ldr d28, [x20, x8]\n"
- "usubl v28.8h, v28.8b, v24.8b\n"
+ "smlal2 v26.4s, v29.8h, v3.8h\n"
+ "smlal2 v24.4s, v29.8h, v0.8h\n"
+ "ldr d29, [x20, x15]\n"
+ "smlal2 v23.4s, v28.8h, v2.8h\n"
+ "usubl v29.8h, v29.8b, v22.8b\n"
+ "smlal v18.4s, v31.4h, v3.4h\n"
+ "smlal v9.4s, v30.4h, v5.4h\n"
+ "smlal v19.4s, v28.4h, v5.4h\n"
+ "smlal2 v11.4s, v28.8h, v5.8h\n"
+ "ldr d28, [x19, x15]\n"
+ "usubl v28.8h, v28.8b, v22.8b\n"
+ "smlal2 v24.4s, v31.8h, v3.8h\n"
+ "smlal2 v23.4s, v30.8h, v5.8h\n"
+ "add x15, x15, #0x8\n"
+ "smlal v18.4s, v29.4h, v7.4h\n"
+ "smlal v9.4s, v29.4h, v6.4h\n"
+ "smlal2 v24.4s, v29.8h, v7.8h\n"
+ "smlal2 v23.4s, v29.8h, v6.8h\n"
"smlal v13.4s, v31.4h, v6.4h\n"
- "smlal2 v26.4s, v31.8h, v3.8h\n"
- "sqrdmulh v13.4s, v13.4s, v17.4s\n"
- "add x8, x8, #0x8\n"
- "smlal2 v10.4s, v30.8h, v5.8h\n"
- "smlal v16.4s, v29.4h, v7.4h\n"
- "and v21.16b, v13.16b, v22.16b\n"
- "smlal v25.4s, v29.4h, v6.4h\n"
- "smlal2 v20.4s, v31.8h, v6.8h\n"
- "sqrdmulh v20.4s, v20.4s, v23.4s\n"
- "smlal2 v26.4s, v29.8h, v7.8h\n"
- "smlal2 v10.4s, v29.8h, v6.8h\n"
- "sshr v21.4s, v21.4s, #0x1f\n"
- "smlal v9.4s, v30.4h, v8.4h\n"
- "smlal v16.4s, v28.4h, v8.4h\n"
- "and v29.16b, v20.16b, v19.16b\n"
- "smlal v25.4s, v28.4h, v7.4h\n"
- "smlal2 v18.4s, v30.8h, v8.8h\n"
- "sqrdmulh v9.4s, v9.4s, v17.4s\n"
- "smlal2 v26.4s, v28.8h, v8.8h\n"
- "smlal2 v10.4s, v28.8h, v7.8h\n"
- "sqrdmulh v16.4s, v16.4s, v17.4s\n"
- "sqrdmulh v25.4s, v25.4s, v17.4s\n"
- "sqadd v13.4s, v13.4s, v21.4s\n"
- "sshr v29.4s, v29.4s, #0x1f\n"
- "and v0.16b, v9.16b, v22.16b\n"
- "sqrdmulh v18.4s, v18.4s, v23.4s\n"
- "and v27.16b, v16.16b, v22.16b\n"
- "sqrdmulh v26.4s, v26.4s, v23.4s\n"
- "and v21.16b, v25.16b, v22.16b\n"
- "sqrdmulh v10.4s, v10.4s, v23.4s\n"
- "sqadd v20.4s, v20.4s, v29.4s\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "and v17.16b, v18.16b, v19.16b\n"
- "sshr v27.4s, v27.4s, #0x1f\n"
- "and v7.16b, v26.16b, v19.16b\n"
- "sshr v21.4s, v21.4s, #0x1f\n"
- "and v29.16b, v10.16b, v19.16b\n"
- "sqadd v9.4s, v9.4s, v0.4s\n"
- "sshr v17.4s, v17.4s, #0x1f\n"
- "sqadd v16.4s, v16.4s, v27.4s\n"
+ "smlal v19.4s, v30.4h, v8.4h\n"
+ "sqrdmulh v13.4s, v13.4s, v21.4s\n"
+ "smlal v18.4s, v28.4h, v8.4h\n"
+ "smlal v9.4s, v28.4h, v7.4h\n"
+ "sqrdmulh v19.4s, v19.4s, v21.4s\n"
+ "smlal2 v26.4s, v31.8h, v6.8h\n"
+ "smlal2 v11.4s, v30.8h, v8.8h\n"
+ "sqrdmulh v18.4s, v18.4s, v21.4s\n"
+ "smlal2 v24.4s, v28.8h, v8.8h\n"
+ "smlal2 v23.4s, v28.8h, v7.8h\n"
+ "sqrdmulh v9.4s, v9.4s, v21.4s\n"
+ "and v7.16b, v13.16b, v25.16b\n"
+ "sqrdmulh v26.4s, v26.4s, v10.4s\n"
+ "and v4.16b, v19.16b, v25.16b\n"
+ "sqrdmulh v11.4s, v11.4s, v10.4s\n"
+ "and v21.16b, v18.16b, v25.16b\n"
+ "sqrdmulh v24.4s, v24.4s, v10.4s\n"
+ "and v20.16b, v9.16b, v25.16b\n"
+ "sqrdmulh v23.4s, v23.4s, v10.4s\n"
"sshr v7.4s, v7.4s, #0x1f\n"
- "sqadd v25.4s, v25.4s, v21.4s\n"
+ "and v29.16b, v26.16b, v16.16b\n"
+ "sshr v4.4s, v4.4s, #0x1f\n"
+ "and v10.16b, v11.16b, v16.16b\n"
+ "sshr v21.4s, v21.4s, #0x1f\n"
+ "and v31.16b, v24.16b, v16.16b\n"
+ "sshr v20.4s, v20.4s, #0x1f\n"
+ "and v30.16b, v23.16b, v16.16b\n"
+ "sqadd v13.4s, v13.4s, v7.4s\n"
"sshr v29.4s, v29.4s, #0x1f\n"
- "srshl v13.4s, v13.4s, v22.4s\n"
- "srshl v9.4s, v9.4s, v22.4s\n"
- "sqadd v18.4s, v18.4s, v17.4s\n"
- "srshl v16.4s, v16.4s, v22.4s\n"
- "sqadd v26.4s, v26.4s, v7.4s\n"
- "srshl v25.4s, v25.4s, v22.4s\n"
- "sqadd v10.4s, v10.4s, v29.4s\n"
- "srshl v20.4s, v20.4s, v19.4s\n"
+ "sqadd v19.4s, v19.4s, v4.4s\n"
+ "sshr v10.4s, v10.4s, #0x1f\n"
+ "sqadd v18.4s, v18.4s, v21.4s\n"
+ "sshr v31.4s, v31.4s, #0x1f\n"
+ "sqadd v9.4s, v9.4s, v20.4s\n"
+ "sshr v30.4s, v30.4s, #0x1f\n"
+ "srshl v13.4s, v13.4s, v25.4s\n"
+ "sqadd v26.4s, v26.4s, v29.4s\n"
+ "srshl v19.4s, v19.4s, v25.4s\n"
+ "sqadd v11.4s, v11.4s, v10.4s\n"
+ "srshl v18.4s, v18.4s, v25.4s\n"
+ "sqadd v24.4s, v24.4s, v31.4s\n"
+ "srshl v9.4s, v9.4s, v25.4s\n"
+ "sqadd v23.4s, v23.4s, v30.4s\n"
+ "srshl v26.4s, v26.4s, v16.4s\n"
"sqxtn v13.4h, v13.4s\n"
- "srshl v18.4s, v18.4s, v19.4s\n"
+ "srshl v11.4s, v11.4s, v16.4s\n"
+ "sqxtn v19.4h, v19.4s\n"
+ "srshl v24.4s, v24.4s, v16.4s\n"
+ "sqxtn v18.4h, v18.4s\n"
+ "srshl v23.4s, v23.4s, v16.4s\n"
"sqxtn v9.4h, v9.4s\n"
- "srshl v26.4s, v26.4s, v19.4s\n"
- "sqxtn v16.4h, v16.4s\n"
- "srshl v10.4s, v10.4s, v19.4s\n"
- "sqxtn v25.4h, v25.4s\n"
- "sqxtn2 v13.8h, v20.4s\n"
- "sqxtn2 v9.8h, v18.4s\n"
- "sqxtn2 v16.8h, v26.4s\n"
- "sqxtn2 v25.8h, v10.4s\n"
+ "sqxtn2 v13.8h, v26.4s\n"
+ "sqxtn2 v19.8h, v11.4s\n"
+ "sqxtn2 v18.8h, v24.4s\n"
+ "sqxtn2 v9.8h, v23.4s\n"
"sqadd v13.8h, v13.8h, v14.8h\n"
+ "sqadd v19.8h, v19.8h, v14.8h\n"
+ "sqadd v18.8h, v18.8h, v14.8h\n"
"sqadd v9.8h, v9.8h, v14.8h\n"
- "sqadd v16.8h, v16.8h, v14.8h\n"
- "sqadd v25.8h, v25.8h, v14.8h\n"
- "smax v13.8h, v13.8h, v12.8h\n"
- "smax v9.8h, v9.8h, v12.8h\n"
- "smax v16.8h, v16.8h, v12.8h\n"
- "smax v25.8h, v25.8h, v12.8h\n"
- "smin v13.8h, v13.8h, v11.8h\n"
- "smin v9.8h, v9.8h, v11.8h\n"
- "smin v16.8h, v16.8h, v11.8h\n"
- "smin v25.8h, v25.8h, v11.8h\n"
+ "smax v13.8h, v13.8h, v17.8h\n"
+ "smax v19.8h, v19.8h, v17.8h\n"
+ "smax v18.8h, v18.8h, v17.8h\n"
+ "smax v9.8h, v9.8h, v17.8h\n"
+ "smin v13.8h, v13.8h, v15.8h\n"
+ "smin v19.8h, v19.8h, v15.8h\n"
+ "smin v18.8h, v18.8h, v15.8h\n"
+ "smin v9.8h, v9.8h, v15.8h\n"
"uzp1 v13.16b, v13.16b, v13.16b\n"
- "str d13, [x12, x17]\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ "str d13, [x10, x14]\n"
+ "uzp1 v18.16b, v18.16b, v18.16b\n"
"uzp1 v9.16b, v9.16b, v9.16b\n"
- "uzp1 v16.16b, v16.16b, v16.16b\n"
- "str d9, [x11, x17]\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
- "str d16, [x10, x17]\n"
- "str d25, [x9, x17]\n"
- "add x17, x17, #0x8\n"
+ "str d19, [x9, x14]\n"
+ "str d18, [x28, x14]\n"
+ "str d9, [x27, x14]\n"
+ "add x14, x14, #0x8\n"
"beq 64f\n"
- "add x15, x15, #0x48\n"
+ "add x17, x17, #0x48\n"
"3:" // Oddments
- "ldr x28, [%x[params], %[offsetof_Params_bias]]\n"
- "tbz x6, #2, 5f\n"
- "ld1 { v13.4s }, [x28], #0x10\n"
- "tbz x6, #1, 4f\n"
- "ld1 { v20.d }[0], [x28], #0x8\n"
- "tbz x6, #0, 7f\n"
- "ld1 { v20.s }[2], [x28]\n"
+ "ldr x19, [%x[params], %[offsetof_Params_bias]]\n"
+ "tbz x8, #2, 5f\n"
+ "ld1 { v13.4s }, [x19], #0x10\n"
+ "tbz x8, #1, 4f\n"
+ "ld1 { v26.d }[0], [x19], #0x8\n"
+ "tbz x8, #0, 7f\n"
+ "ld1 { v26.s }[2], [x19]\n"
"b 7f\n"
"4:" // Oddments: Load bias: Bit 2: Bit 1: Unset
- "tbz x6, #0, 7f\n"
- "ld1 { v20.s }[0], [x28]\n"
+ "tbz x8, #0, 7f\n"
+ "ld1 { v26.s }[0], [x19]\n"
"b 7f\n"
"5:" // Oddments: Load bias: Bit 2: Unset
- "tbz x6, #1, 6f\n"
- "ld1 { v13.d }[0], [x28], #0x8\n"
- "tbz x6, #0, 7f\n"
- "ld1 { v13.s }[2], [x28]\n"
+ "tbz x8, #1, 6f\n"
+ "ld1 { v13.d }[0], [x19], #0x8\n"
+ "tbz x8, #0, 7f\n"
+ "ld1 { v13.s }[2], [x19]\n"
"b 7f\n"
"6:" // Oddments: Load bias: Bit 2: Unset: Bit 1: Unset
- "tbz x6, #0, 7f\n"
- "ld1 { v13.s }[0], [x28]\n"
+ "tbz x8, #0, 7f\n"
+ "ld1 { v13.s }[0], [x19]\n"
"7:" // Oddments: Load bias: Bit 2: End
- "ldr d0, [x15, #0x0]\n"
- "ldr d1, [x15, #0x8]\n"
+ "ldr d0, [x17, #0x0]\n"
+ "ldr d1, [x17, #0x8]\n"
+ "mov v19.16b, v13.16b\n"
+ "mov v11.16b, v26.16b\n"
+ "ldr d2, [x17, #0x10]\n"
+ "ldr d3, [x17, #0x18]\n"
+ "mov v18.16b, v13.16b\n"
+ "mov v24.16b, v26.16b\n"
+ "ldr d4, [x17, #0x20]\n"
+ "ldr d5, [x17, #0x28]\n"
"mov v9.16b, v13.16b\n"
- "mov v18.16b, v20.16b\n"
- "ldr d2, [x15, #0x10]\n"
- "ldr d3, [x15, #0x18]\n"
- "mov v16.16b, v13.16b\n"
- "mov v26.16b, v20.16b\n"
- "ldr d4, [x15, #0x20]\n"
- "ldr d5, [x15, #0x28]\n"
- "mov v25.16b, v13.16b\n"
- "mov v10.16b, v20.16b\n"
- "ldr d6, [x15, #0x30]\n"
- "ldr d7, [x15, #0x38]\n"
- "usubl v0.8h, v0.8b, v15.8b\n"
- "usubl v1.8h, v1.8b, v15.8b\n"
- "ldr d8, [x15, #0x40]\n"
- "ldp x24, x23, [x16, #0x0]\n"
- "usubl v2.8h, v2.8b, v15.8b\n"
- "usubl v3.8h, v3.8b, v15.8b\n"
- "ldp x22, x21, [x16, #0x10]\n"
- "ldr x20, [x16, #0x20]\n"
- "usubl v4.8h, v4.8b, v15.8b\n"
- "usubl v5.8h, v5.8b, v15.8b\n"
- "usubl v6.8h, v6.8b, v15.8b\n"
- "usubl v7.8h, v7.8b, v15.8b\n"
- "usubl v8.8h, v8.8b, v15.8b\n"
- "add x24, x24, x8\n"
- "add x23, x23, x8\n"
- "add x22, x22, x8\n"
- "add x21, x21, x8\n"
- "add x20, x20, x8\n"
- "tbz x6, #2, 9f\n"
- "ld1 { v31.s }[0], [x24], #0x4\n"
- "ld1 { v30.s }[0], [x23], #0x4\n"
- "ld1 { v29.s }[0], [x22], #0x4\n"
- "ld1 { v28.s }[0], [x21], #0x4\n"
- "ld1 { v27.s }[0], [x20], #0x4\n"
- "tbz x6, #1, 8f\n"
- "ld1 { v31.h }[2], [x24], #0x2\n"
- "ld1 { v30.h }[2], [x23], #0x2\n"
- "ld1 { v29.h }[2], [x22], #0x2\n"
- "ld1 { v28.h }[2], [x21], #0x2\n"
- "ld1 { v27.h }[2], [x20], #0x2\n"
- "tbz x6, #0, 11f\n"
- "ld1 { v31.b }[6], [x24]\n"
- "ld1 { v30.b }[6], [x23]\n"
- "ld1 { v29.b }[6], [x22]\n"
- "ld1 { v28.b }[6], [x21]\n"
- "ld1 { v27.b }[6], [x20]\n"
+ "mov v23.16b, v26.16b\n"
+ "ldr d6, [x17, #0x30]\n"
+ "ldr d7, [x17, #0x38]\n"
+ "usubl v0.8h, v0.8b, v12.8b\n"
+ "usubl v1.8h, v1.8b, v12.8b\n"
+ "ldr d8, [x17, #0x40]\n"
+ "ldp x23, x22, [x12, #0x0]\n"
+ "usubl v2.8h, v2.8b, v12.8b\n"
+ "usubl v3.8h, v3.8b, v12.8b\n"
+ "ldp x21, x20, [x12, #0x10]\n"
+ "ldr x19, [x12, #0x20]\n"
+ "usubl v4.8h, v4.8b, v12.8b\n"
+ "usubl v5.8h, v5.8b, v12.8b\n"
+ "usubl v6.8h, v6.8b, v12.8b\n"
+ "usubl v7.8h, v7.8b, v12.8b\n"
+ "usubl v8.8h, v8.8b, v12.8b\n"
+ "add x23, x23, x15\n"
+ "add x22, x22, x15\n"
+ "add x21, x21, x15\n"
+ "add x20, x20, x15\n"
+ "add x19, x19, x15\n"
+ "tbz x8, #2, 9f\n"
+ "ld1 { v31.s }[0], [x23], #0x4\n"
+ "ld1 { v30.s }[0], [x22], #0x4\n"
+ "ld1 { v29.s }[0], [x21], #0x4\n"
+ "ld1 { v28.s }[0], [x20], #0x4\n"
+ "ld1 { v27.s }[0], [x19], #0x4\n"
+ "tbz x8, #1, 8f\n"
+ "ld1 { v31.h }[2], [x23], #0x2\n"
+ "ld1 { v30.h }[2], [x22], #0x2\n"
+ "ld1 { v29.h }[2], [x21], #0x2\n"
+ "ld1 { v28.h }[2], [x20], #0x2\n"
+ "ld1 { v27.h }[2], [x19], #0x2\n"
+ "tbz x8, #0, 11f\n"
+ "ld1 { v31.b }[6], [x23]\n"
+ "ld1 { v30.b }[6], [x22]\n"
+ "ld1 { v29.b }[6], [x21]\n"
+ "ld1 { v28.b }[6], [x20]\n"
+ "ld1 { v27.b }[6], [x19]\n"
"b 11f\n"
"8:" // Oddments: Initial loads: Bit 2: Bit 1: Unset
- "tbz x6, #0, 11f\n"
- "ld1 { v31.b }[4], [x24]\n"
- "ld1 { v30.b }[4], [x23]\n"
- "ld1 { v29.b }[4], [x22]\n"
- "ld1 { v28.b }[4], [x21]\n"
- "ld1 { v27.b }[4], [x20]\n"
+ "tbz x8, #0, 11f\n"
+ "ld1 { v31.b }[4], [x23]\n"
+ "ld1 { v30.b }[4], [x22]\n"
+ "ld1 { v29.b }[4], [x21]\n"
+ "ld1 { v28.b }[4], [x20]\n"
+ "ld1 { v27.b }[4], [x19]\n"
"b 11f\n"
"9:" // Oddments: Initial loads: Bit 2: Unset
- "tbz x6, #1, 10f\n"
- "ld1 { v31.h }[0], [x24], #0x2\n"
- "ld1 { v30.h }[0], [x23], #0x2\n"
- "ld1 { v29.h }[0], [x22], #0x2\n"
- "ld1 { v28.h }[0], [x21], #0x2\n"
- "ld1 { v27.h }[0], [x20], #0x2\n"
- "tbz x6, #0, 11f\n"
- "ld1 { v31.b }[2], [x24]\n"
- "ld1 { v30.b }[2], [x23]\n"
- "ld1 { v29.b }[2], [x22]\n"
- "ld1 { v28.b }[2], [x21]\n"
- "ld1 { v27.b }[2], [x20]\n"
+ "tbz x8, #1, 10f\n"
+ "ld1 { v31.h }[0], [x23], #0x2\n"
+ "ld1 { v30.h }[0], [x22], #0x2\n"
+ "ld1 { v29.h }[0], [x21], #0x2\n"
+ "ld1 { v28.h }[0], [x20], #0x2\n"
+ "ld1 { v27.h }[0], [x19], #0x2\n"
+ "tbz x8, #0, 11f\n"
+ "ld1 { v31.b }[2], [x23]\n"
+ "ld1 { v30.b }[2], [x22]\n"
+ "ld1 { v29.b }[2], [x21]\n"
+ "ld1 { v28.b }[2], [x20]\n"
+ "ld1 { v27.b }[2], [x19]\n"
"b 11f\n"
"10:" // Oddments: Initial loads: Bit 2: Unset: Bit 1: Unset
- "tbz x6, #0, 11f\n"
- "ld1 { v31.b }[0], [x24]\n"
- "ld1 { v30.b }[0], [x23]\n"
- "ld1 { v29.b }[0], [x22]\n"
- "ld1 { v28.b }[0], [x21]\n"
- "ld1 { v27.b }[0], [x20]\n"
+ "tbz x8, #0, 11f\n"
+ "ld1 { v31.b }[0], [x23]\n"
+ "ld1 { v30.b }[0], [x22]\n"
+ "ld1 { v29.b }[0], [x21]\n"
+ "ld1 { v28.b }[0], [x20]\n"
+ "ld1 { v27.b }[0], [x19]\n"
"11:" // Oddments: Initial loads: Bit 2: End
- "usubl v31.8h, v31.8b, v24.8b\n"
+ "usubl v31.8h, v31.8b, v22.8b\n"
"smlal v13.4s, v31.4h, v4.4h\n"
- "smlal2 v20.4s, v31.8h, v4.8h\n"
- "ldr x21, [x16, #0x28]\n"
- "smlal v9.4s, v31.4h, v3.4h\n"
- "smlal2 v18.4s, v31.8h, v3.8h\n"
- "usubl v30.8h, v30.8b, v24.8b\n"
- "add x21, x21, x8\n"
- "usubl v29.8h, v29.8b, v24.8b\n"
- "smlal v16.4s, v31.4h, v1.4h\n"
- "smlal2 v26.4s, v31.8h, v1.8h\n"
- "smlal v25.4s, v31.4h, v0.4h\n"
- "smlal2 v10.4s, v31.8h, v0.8h\n"
- "usubl v28.8h, v28.8b, v24.8b\n"
+ "smlal2 v26.4s, v31.8h, v4.8h\n"
+ "ldr x21, [x12, #0x28]\n"
+ "smlal v19.4s, v31.4h, v3.4h\n"
+ "smlal2 v11.4s, v31.8h, v3.8h\n"
+ "usubl v30.8h, v30.8b, v22.8b\n"
+ "add x21, x21, x15\n"
+ "usubl v29.8h, v29.8b, v22.8b\n"
+ "smlal v18.4s, v31.4h, v1.4h\n"
+ "smlal2 v24.4s, v31.8h, v1.8h\n"
+ "smlal v9.4s, v31.4h, v0.4h\n"
+ "smlal2 v23.4s, v31.8h, v0.8h\n"
+ "usubl v28.8h, v28.8b, v22.8b\n"
"smlal v13.4s, v30.4h, v0.4h\n"
- "smlal2 v20.4s, v30.8h, v0.8h\n"
- "usubl v27.8h, v27.8b, v24.8b\n"
- "smlal v9.4s, v29.4h, v2.4h\n"
- "smlal2 v18.4s, v29.8h, v2.8h\n"
+ "smlal2 v26.4s, v30.8h, v0.8h\n"
+ "usubl v27.8h, v27.8b, v22.8b\n"
+ "smlal v19.4s, v29.4h, v2.4h\n"
+ "smlal2 v11.4s, v29.8h, v2.8h\n"
"smlal v13.4s, v28.4h, v5.4h\n"
- "smlal2 v20.4s, v28.8h, v5.8h\n"
- "smlal v9.4s, v28.4h, v4.4h\n"
- "smlal2 v18.4s, v28.8h, v4.8h\n"
- "smlal v16.4s, v28.4h, v2.4h\n"
- "smlal2 v26.4s, v28.8h, v2.8h\n"
- "smlal v25.4s, v28.4h, v1.4h\n"
- "smlal2 v10.4s, v28.8h, v1.8h\n"
- "tbz x6, #2, 13f\n"
+ "smlal2 v26.4s, v28.8h, v5.8h\n"
+ "smlal v19.4s, v28.4h, v4.4h\n"
+ "smlal2 v11.4s, v28.8h, v4.8h\n"
+ "smlal v18.4s, v28.4h, v2.4h\n"
+ "smlal2 v24.4s, v28.8h, v2.8h\n"
+ "smlal v9.4s, v28.4h, v1.4h\n"
+ "smlal2 v23.4s, v28.8h, v1.8h\n"
+ "tbz x8, #2, 13f\n"
"ld1 { v31.s }[0], [x21], #0x4\n"
- "tbz x6, #1, 12f\n"
+ "tbz x8, #1, 12f\n"
"ld1 { v31.h }[2], [x21], #0x2\n"
- "tbz x6, #0, 15f\n"
+ "tbz x8, #0, 15f\n"
"ld1 { v31.b }[6], [x21]\n"
"b 15f\n"
"12:" // Oddments: Load (3, 0): Bit 2: Bit 1: Unset
- "tbz x6, #0, 15f\n"
+ "tbz x8, #0, 15f\n"
"ld1 { v31.b }[4], [x21]\n"
"b 15f\n"
"13:" // Oddments: Load (3, 0): Bit 2: Unset
- "tbz x6, #1, 14f\n"
+ "tbz x8, #1, 14f\n"
"ld1 { v31.h }[0], [x21], #0x2\n"
- "tbz x6, #0, 15f\n"
+ "tbz x8, #0, 15f\n"
"ld1 { v31.b }[2], [x21]\n"
"b 15f\n"
"14:" // Oddments: Load (3, 0): Bit 2: Unset: Bit 1: Unset
- "tbz x6, #0, 15f\n"
+ "tbz x8, #0, 15f\n"
"ld1 { v31.b }[0], [x21]\n"
"15:" // Oddments: Load (3, 0): Bit 2: End
- "usubl v31.8h, v31.8b, v24.8b\n"
- "smlal v16.4s, v31.4h, v6.4h\n"
- "smlal2 v26.4s, v31.8h, v6.8h\n"
- "ldr x20, [x16, #0x30]\n"
+ "usubl v31.8h, v31.8b, v22.8b\n"
+ "smlal v18.4s, v31.4h, v6.4h\n"
+ "smlal2 v24.4s, v31.8h, v6.8h\n"
+ "ldr x20, [x12, #0x30]\n"
"smlal v13.4s, v27.4h, v7.4h\n"
- "smlal2 v20.4s, v27.8h, v7.8h\n"
- "add x20, x20, x8\n"
- "smlal v9.4s, v27.4h, v6.4h\n"
- "smlal2 v18.4s, v27.8h, v6.8h\n"
- "smlal v16.4s, v27.4h, v4.4h\n"
- "smlal2 v26.4s, v27.8h, v4.8h\n"
- "smlal v25.4s, v27.4h, v3.4h\n"
- "smlal2 v10.4s, v27.8h, v3.8h\n"
- "tbz x6, #2, 17f\n"
+ "smlal2 v26.4s, v27.8h, v7.8h\n"
+ "add x20, x20, x15\n"
+ "smlal v19.4s, v27.4h, v6.4h\n"
+ "smlal2 v11.4s, v27.8h, v6.8h\n"
+ "smlal v18.4s, v27.4h, v4.4h\n"
+ "smlal2 v24.4s, v27.8h, v4.8h\n"
+ "smlal v9.4s, v27.4h, v3.4h\n"
+ "smlal2 v23.4s, v27.8h, v3.8h\n"
+ "tbz x8, #2, 17f\n"
"ld1 { v29.s }[0], [x20], #0x4\n"
- "tbz x6, #1, 16f\n"
+ "tbz x8, #1, 16f\n"
"ld1 { v29.h }[2], [x20], #0x2\n"
- "tbz x6, #0, 19f\n"
+ "tbz x8, #0, 19f\n"
"ld1 { v29.b }[6], [x20]\n"
"b 19f\n"
"16:" // Oddments: Load (3, 3): Bit 2: Bit 1: Unset
- "tbz x6, #0, 19f\n"
+ "tbz x8, #0, 19f\n"
"ld1 { v29.b }[4], [x20]\n"
"b 19f\n"
"17:" // Oddments: Load (3, 3): Bit 2: Unset
- "tbz x6, #1, 18f\n"
+ "tbz x8, #1, 18f\n"
"ld1 { v29.h }[0], [x20], #0x2\n"
- "tbz x6, #0, 19f\n"
+ "tbz x8, #0, 19f\n"
"ld1 { v29.b }[2], [x20]\n"
"b 19f\n"
"18:" // Oddments: Load (3, 3): Bit 2: Unset: Bit 1: Unset
- "tbz x6, #0, 19f\n"
+ "tbz x8, #0, 19f\n"
"ld1 { v29.b }[0], [x20]\n"
"19:" // Oddments: Load (3, 3): Bit 2: End
- "usubl v29.8h, v29.8b, v24.8b\n"
- "ldr x28, [x16, #0x38]\n"
- "smlal v25.4s, v29.4h, v8.4h\n"
- "smlal2 v10.4s, v29.8h, v8.8h\n"
- "add x28, x28, x8\n"
- "tbz x6, #2, 21f\n"
- "ld1 { v28.s }[0], [x28], #0x4\n"
- "tbz x6, #1, 20f\n"
- "ld1 { v28.h }[2], [x28], #0x2\n"
- "tbz x6, #0, 23f\n"
- "ld1 { v28.b }[6], [x28]\n"
+ "usubl v29.8h, v29.8b, v22.8b\n"
+ "ldr x26, [x12, #0x38]\n"
+ "smlal v9.4s, v29.4h, v8.4h\n"
+ "smlal2 v23.4s, v29.8h, v8.8h\n"
+ "add x26, x26, x15\n"
+ "tbz x8, #2, 21f\n"
+ "ld1 { v28.s }[0], [x26], #0x4\n"
+ "tbz x8, #1, 20f\n"
+ "ld1 { v28.h }[2], [x26], #0x2\n"
+ "tbz x8, #0, 23f\n"
+ "ld1 { v28.b }[6], [x26]\n"
"b 23f\n"
"20:" // Oddments: Load (0, 1): Bit 2: Bit 1: Unset
- "tbz x6, #0, 23f\n"
- "ld1 { v28.b }[4], [x28]\n"
+ "tbz x8, #0, 23f\n"
+ "ld1 { v28.b }[4], [x26]\n"
"b 23f\n"
"21:" // Oddments: Load (0, 1): Bit 2: Unset
- "tbz x6, #1, 22f\n"
- "ld1 { v28.h }[0], [x28], #0x2\n"
- "tbz x6, #0, 23f\n"
- "ld1 { v28.b }[2], [x28]\n"
+ "tbz x8, #1, 22f\n"
+ "ld1 { v28.h }[0], [x26], #0x2\n"
+ "tbz x8, #0, 23f\n"
+ "ld1 { v28.b }[2], [x26]\n"
"b 23f\n"
"22:" // Oddments: Load (0, 1): Bit 2: Unset: Bit 1: Unset
- "tbz x6, #0, 23f\n"
- "ld1 { v28.b }[0], [x28]\n"
+ "tbz x8, #0, 23f\n"
+ "ld1 { v28.b }[0], [x26]\n"
"23:" // Oddments: Load (0, 1): Bit 2: End
- "usubl v28.8h, v28.8b, v24.8b\n"
- "ldr x27, [x16, #0x40]\n"
+ "usubl v28.8h, v28.8b, v22.8b\n"
+ "ldr x25, [x12, #0x40]\n"
"smlal v13.4s, v28.4h, v1.4h\n"
- "smlal2 v20.4s, v28.8h, v1.8h\n"
- "smlal v9.4s, v28.4h, v0.4h\n"
- "smlal2 v18.4s, v28.8h, v0.8h\n"
- "add x27, x27, x8\n"
- "tbz x6, #2, 25f\n"
- "ld1 { v31.s }[0], [x27], #0x4\n"
- "tbz x6, #1, 24f\n"
- "ld1 { v31.h }[2], [x27], #0x2\n"
- "tbz x6, #0, 27f\n"
- "ld1 { v31.b }[6], [x27]\n"
+ "smlal2 v26.4s, v28.8h, v1.8h\n"
+ "smlal v19.4s, v28.4h, v0.4h\n"
+ "smlal2 v11.4s, v28.8h, v0.8h\n"
+ "add x25, x25, x15\n"
+ "tbz x8, #2, 25f\n"
+ "ld1 { v31.s }[0], [x25], #0x4\n"
+ "tbz x8, #1, 24f\n"
+ "ld1 { v31.h }[2], [x25], #0x2\n"
+ "tbz x8, #0, 27f\n"
+ "ld1 { v31.b }[6], [x25]\n"
"b 27f\n"
"24:" // Oddments: Load (0, 2): Bit 2: Bit 1: Unset
- "tbz x6, #0, 27f\n"
- "ld1 { v31.b }[4], [x27]\n"
+ "tbz x8, #0, 27f\n"
+ "ld1 { v31.b }[4], [x25]\n"
"b 27f\n"
"25:" // Oddments: Load (0, 2): Bit 2: Unset
- "tbz x6, #1, 26f\n"
- "ld1 { v31.h }[0], [x27], #0x2\n"
- "tbz x6, #0, 27f\n"
- "ld1 { v31.b }[2], [x27]\n"
+ "tbz x8, #1, 26f\n"
+ "ld1 { v31.h }[0], [x25], #0x2\n"
+ "tbz x8, #0, 27f\n"
+ "ld1 { v31.b }[2], [x25]\n"
"b 27f\n"
"26:" // Oddments: Load (0, 2): Bit 2: Unset: Bit 1: Unset
- "tbz x6, #0, 27f\n"
- "ld1 { v31.b }[0], [x27]\n"
+ "tbz x8, #0, 27f\n"
+ "ld1 { v31.b }[0], [x25]\n"
"27:" // Oddments: Load (0, 2): Bit 2: End
- "usubl v31.8h, v31.8b, v24.8b\n"
- "ldr x26, [x16, #0x48]\n"
+ "usubl v31.8h, v31.8b, v22.8b\n"
+ "ldr x19, [x12, #0x48]\n"
"smlal v13.4s, v31.4h, v2.4h\n"
- "smlal2 v20.4s, v31.8h, v2.8h\n"
- "smlal v9.4s, v31.4h, v1.4h\n"
- "smlal2 v18.4s, v31.8h, v1.8h\n"
- "add x26, x26, x8\n"
- "tbz x6, #2, 29f\n"
- "ld1 { v30.s }[0], [x26], #0x4\n"
- "tbz x6, #1, 28f\n"
- "ld1 { v30.h }[2], [x26], #0x2\n"
- "tbz x6, #0, 31f\n"
- "ld1 { v30.b }[6], [x26]\n"
+ "smlal2 v26.4s, v31.8h, v2.8h\n"
+ "smlal v19.4s, v31.4h, v1.4h\n"
+ "smlal2 v11.4s, v31.8h, v1.8h\n"
+ "add x19, x19, x15\n"
+ "tbz x8, #2, 29f\n"
+ "ld1 { v30.s }[0], [x19], #0x4\n"
+ "tbz x8, #1, 28f\n"
+ "ld1 { v30.h }[2], [x19], #0x2\n"
+ "tbz x8, #0, 31f\n"
+ "ld1 { v30.b }[6], [x19]\n"
"b 31f\n"
"28:" // Oddments: Load (2, 2): Bit 2: Bit 1: Unset
- "tbz x6, #0, 31f\n"
- "ld1 { v30.b }[4], [x26]\n"
+ "tbz x8, #0, 31f\n"
+ "ld1 { v30.b }[4], [x19]\n"
"b 31f\n"
"29:" // Oddments: Load (2, 2): Bit 2: Unset
- "tbz x6, #1, 30f\n"
- "ld1 { v30.h }[0], [x26], #0x2\n"
- "tbz x6, #0, 31f\n"
- "ld1 { v30.b }[2], [x26]\n"
+ "tbz x8, #1, 30f\n"
+ "ld1 { v30.h }[0], [x19], #0x2\n"
+ "tbz x8, #0, 31f\n"
+ "ld1 { v30.b }[2], [x19]\n"
"b 31f\n"
"30:" // Oddments: Load (2, 2): Bit 2: Unset: Bit 1: Unset
- "tbz x6, #0, 31f\n"
- "ld1 { v30.b }[0], [x26]\n"
+ "tbz x8, #0, 31f\n"
+ "ld1 { v30.b }[0], [x19]\n"
"31:" // Oddments: Load (2, 2): Bit 2: End
- "usubl v30.8h, v30.8b, v24.8b\n"
- "ldr x25, [x16, #0x50]\n"
+ "usubl v30.8h, v30.8b, v22.8b\n"
+ "ldr x24, [x12, #0x50]\n"
"smlal v13.4s, v30.4h, v8.4h\n"
- "smlal2 v20.4s, v30.8h, v8.8h\n"
- "smlal v9.4s, v30.4h, v7.4h\n"
- "smlal2 v18.4s, v30.8h, v7.8h\n"
- "add x25, x25, x8\n"
- "smlal v16.4s, v30.4h, v5.4h\n"
- "smlal2 v26.4s, v30.8h, v5.8h\n"
- "smlal v25.4s, v30.4h, v4.4h\n"
- "smlal2 v10.4s, v30.8h, v4.8h\n"
- "tbz x6, #2, 33f\n"
- "ld1 { v29.s }[0], [x25], #0x4\n"
- "tbz x6, #1, 32f\n"
- "ld1 { v29.h }[2], [x25], #0x2\n"
- "tbz x6, #0, 35f\n"
- "ld1 { v29.b }[6], [x25]\n"
+ "smlal2 v26.4s, v30.8h, v8.8h\n"
+ "smlal v19.4s, v30.4h, v7.4h\n"
+ "smlal2 v11.4s, v30.8h, v7.8h\n"
+ "add x24, x24, x15\n"
+ "smlal v18.4s, v30.4h, v5.4h\n"
+ "smlal2 v24.4s, v30.8h, v5.8h\n"
+ "smlal v9.4s, v30.4h, v4.4h\n"
+ "smlal2 v23.4s, v30.8h, v4.8h\n"
+ "tbz x8, #2, 33f\n"
+ "ld1 { v29.s }[0], [x24], #0x4\n"
+ "tbz x8, #1, 32f\n"
+ "ld1 { v29.h }[2], [x24], #0x2\n"
+ "tbz x8, #0, 35f\n"
+ "ld1 { v29.b }[6], [x24]\n"
"b 35f\n"
"32:" // Oddments: Load (1, 0): Bit 2: Bit 1: Unset
- "tbz x6, #0, 35f\n"
- "ld1 { v29.b }[4], [x25]\n"
+ "tbz x8, #0, 35f\n"
+ "ld1 { v29.b }[4], [x24]\n"
"b 35f\n"
"33:" // Oddments: Load (1, 0): Bit 2: Unset
- "tbz x6, #1, 34f\n"
- "ld1 { v29.h }[0], [x25], #0x2\n"
- "tbz x6, #0, 35f\n"
- "ld1 { v29.b }[2], [x25]\n"
+ "tbz x8, #1, 34f\n"
+ "ld1 { v29.h }[0], [x24], #0x2\n"
+ "tbz x8, #0, 35f\n"
+ "ld1 { v29.b }[2], [x24]\n"
"b 35f\n"
"34:" // Oddments: Load (1, 0): Bit 2: Unset: Bit 1: Unset
- "tbz x6, #0, 35f\n"
- "ld1 { v29.b }[0], [x25]\n"
+ "tbz x8, #0, 35f\n"
+ "ld1 { v29.b }[0], [x24]\n"
"35:" // Oddments: Load (1, 0): Bit 2: End
- "usubl v29.8h, v29.8b, v24.8b\n"
- "ldr x24, [x16, #0x58]\n"
+ "usubl v29.8h, v29.8b, v22.8b\n"
+ "ldr x23, [x12, #0x58]\n"
"smlal v13.4s, v29.4h, v3.4h\n"
- "smlal2 v20.4s, v29.8h, v3.8h\n"
- "smlal v16.4s, v29.4h, v0.4h\n"
- "smlal2 v26.4s, v29.8h, v0.8h\n"
- "add x24, x24, x8\n"
- "tbz x6, #2, 37f\n"
- "ld1 { v28.s }[0], [x24], #0x4\n"
- "tbz x6, #1, 36f\n"
- "ld1 { v28.h }[2], [x24], #0x2\n"
- "tbz x6, #0, 39f\n"
- "ld1 { v28.b }[6], [x24]\n"
+ "smlal2 v26.4s, v29.8h, v3.8h\n"
+ "smlal v18.4s, v29.4h, v0.4h\n"
+ "smlal2 v24.4s, v29.8h, v0.8h\n"
+ "add x23, x23, x15\n"
+ "tbz x8, #2, 37f\n"
+ "ld1 { v28.s }[0], [x23], #0x4\n"
+ "tbz x8, #1, 36f\n"
+ "ld1 { v28.h }[2], [x23], #0x2\n"
+ "tbz x8, #0, 39f\n"
+ "ld1 { v28.b }[6], [x23]\n"
"b 39f\n"
"36:" // Oddments: Load (1, 3): Bit 2: Bit 1: Unset
- "tbz x6, #0, 39f\n"
- "ld1 { v28.b }[4], [x24]\n"
+ "tbz x8, #0, 39f\n"
+ "ld1 { v28.b }[4], [x23]\n"
"b 39f\n"
"37:" // Oddments: Load (1, 3): Bit 2: Unset
- "tbz x6, #1, 38f\n"
- "ld1 { v28.h }[0], [x24], #0x2\n"
- "tbz x6, #0, 39f\n"
- "ld1 { v28.b }[2], [x24]\n"
+ "tbz x8, #1, 38f\n"
+ "ld1 { v28.h }[0], [x23], #0x2\n"
+ "tbz x8, #0, 39f\n"
+ "ld1 { v28.b }[2], [x23]\n"
"b 39f\n"
"38:" // Oddments: Load (1, 3): Bit 2: Unset: Bit 1: Unset
- "tbz x6, #0, 39f\n"
- "ld1 { v28.b }[0], [x24]\n"
+ "tbz x8, #0, 39f\n"
+ "ld1 { v28.b }[0], [x23]\n"
"39:" // Oddments: Load (1, 3): Bit 2: End
- "usubl v28.8h, v28.8b, v24.8b\n"
- "ldr x23, [x16, #0x60]\n"
- "smlal v9.4s, v28.4h, v5.4h\n"
- "smlal2 v18.4s, v28.8h, v5.8h\n"
- "smlal v25.4s, v28.4h, v2.4h\n"
- "smlal2 v10.4s, v28.8h, v2.8h\n"
- "add x23, x23, x8\n"
- "tbz x6, #2, 41f\n"
- "ld1 { v31.s }[0], [x23], #0x4\n"
- "tbz x6, #1, 40f\n"
- "ld1 { v31.h }[2], [x23], #0x2\n"
- "tbz x6, #0, 43f\n"
- "ld1 { v31.b }[6], [x23]\n"
+ "usubl v28.8h, v28.8b, v22.8b\n"
+ "ldr x22, [x12, #0x60]\n"
+ "smlal v19.4s, v28.4h, v5.4h\n"
+ "smlal2 v11.4s, v28.8h, v5.8h\n"
+ "smlal v9.4s, v28.4h, v2.4h\n"
+ "smlal2 v23.4s, v28.8h, v2.8h\n"
+ "add x22, x22, x15\n"
+ "tbz x8, #2, 41f\n"
+ "ld1 { v31.s }[0], [x22], #0x4\n"
+ "tbz x8, #1, 40f\n"
+ "ld1 { v31.h }[2], [x22], #0x2\n"
+ "tbz x8, #0, 43f\n"
+ "ld1 { v31.b }[6], [x22]\n"
"b 43f\n"
"40:" // Oddments: Load (2, 0): Bit 2: Bit 1: Unset
- "tbz x6, #0, 43f\n"
- "ld1 { v31.b }[4], [x23]\n"
+ "tbz x8, #0, 43f\n"
+ "ld1 { v31.b }[4], [x22]\n"
"b 43f\n"
"41:" // Oddments: Load (2, 0): Bit 2: Unset
- "tbz x6, #1, 42f\n"
- "ld1 { v31.h }[0], [x23], #0x2\n"
- "tbz x6, #0, 43f\n"
- "ld1 { v31.b }[2], [x23]\n"
+ "tbz x8, #1, 42f\n"
+ "ld1 { v31.h }[0], [x22], #0x2\n"
+ "tbz x8, #0, 43f\n"
+ "ld1 { v31.b }[2], [x22]\n"
"b 43f\n"
"42:" // Oddments: Load (2, 0): Bit 2: Unset: Bit 1: Unset
- "tbz x6, #0, 43f\n"
- "ld1 { v31.b }[0], [x23]\n"
+ "tbz x8, #0, 43f\n"
+ "ld1 { v31.b }[0], [x22]\n"
"43:" // Oddments: Load (2, 0): Bit 2: End
- "usubl v31.8h, v31.8b, v24.8b\n"
- "ldr x22, [x16, #0x68]\n"
+ "usubl v31.8h, v31.8b, v22.8b\n"
+ "ldr x21, [x12, #0x68]\n"
"smlal v13.4s, v31.4h, v6.4h\n"
- "smlal2 v20.4s, v31.8h, v6.8h\n"
- "smlal v16.4s, v31.4h, v3.4h\n"
- "smlal2 v26.4s, v31.8h, v3.8h\n"
- "add x22, x22, x8\n"
- "tbz x6, #2, 45f\n"
- "ld1 { v30.s }[0], [x22], #0x4\n"
- "tbz x6, #1, 44f\n"
- "ld1 { v30.h }[2], [x22], #0x2\n"
- "tbz x6, #0, 47f\n"
- "ld1 { v30.b }[6], [x22]\n"
+ "smlal2 v26.4s, v31.8h, v6.8h\n"
+ "smlal v18.4s, v31.4h, v3.4h\n"
+ "smlal2 v24.4s, v31.8h, v3.8h\n"
+ "add x21, x21, x15\n"
+ "tbz x8, #2, 45f\n"
+ "ld1 { v30.s }[0], [x21], #0x4\n"
+ "tbz x8, #1, 44f\n"
+ "ld1 { v30.h }[2], [x21], #0x2\n"
+ "tbz x8, #0, 47f\n"
+ "ld1 { v30.b }[6], [x21]\n"
"b 47f\n"
"44:" // Oddments: Load (2, 3): Bit 2: Bit 1: Unset
- "tbz x6, #0, 47f\n"
- "ld1 { v30.b }[4], [x22]\n"
+ "tbz x8, #0, 47f\n"
+ "ld1 { v30.b }[4], [x21]\n"
"b 47f\n"
"45:" // Oddments: Load (2, 3): Bit 2: Unset
- "tbz x6, #1, 46f\n"
- "ld1 { v30.h }[0], [x22], #0x2\n"
- "tbz x6, #0, 47f\n"
- "ld1 { v30.b }[2], [x22]\n"
+ "tbz x8, #1, 46f\n"
+ "ld1 { v30.h }[0], [x21], #0x2\n"
+ "tbz x8, #0, 47f\n"
+ "ld1 { v30.b }[2], [x21]\n"
"b 47f\n"
"46:" // Oddments: Load (2, 3): Bit 2: Unset: Bit 1: Unset
- "tbz x6, #0, 47f\n"
- "ld1 { v30.b }[0], [x22]\n"
+ "tbz x8, #0, 47f\n"
+ "ld1 { v30.b }[0], [x21]\n"
"47:" // Oddments: Load (2, 3): Bit 2: End
- "usubl v30.8h, v30.8b, v24.8b\n"
- "ldr x21, [x16, #0x70]\n"
- "smlal v9.4s, v30.4h, v8.4h\n"
- "smlal2 v18.4s, v30.8h, v8.8h\n"
- "smlal v25.4s, v30.4h, v5.4h\n"
- "smlal2 v10.4s, v30.8h, v5.8h\n"
- "add x21, x21, x8\n"
- "tbz x6, #2, 49f\n"
- "ld1 { v29.s }[0], [x21], #0x4\n"
- "tbz x6, #1, 48f\n"
- "ld1 { v29.h }[2], [x21], #0x2\n"
- "tbz x6, #0, 51f\n"
- "ld1 { v29.b }[6], [x21]\n"
+ "usubl v30.8h, v30.8b, v22.8b\n"
+ "ldr x20, [x12, #0x70]\n"
+ "smlal v19.4s, v30.4h, v8.4h\n"
+ "smlal2 v11.4s, v30.8h, v8.8h\n"
+ "smlal v9.4s, v30.4h, v5.4h\n"
+ "smlal2 v23.4s, v30.8h, v5.8h\n"
+ "add x20, x20, x15\n"
+ "tbz x8, #2, 49f\n"
+ "ld1 { v29.s }[0], [x20], #0x4\n"
+ "tbz x8, #1, 48f\n"
+ "ld1 { v29.h }[2], [x20], #0x2\n"
+ "tbz x8, #0, 51f\n"
+ "ld1 { v29.b }[6], [x20]\n"
"b 51f\n"
"48:" // Oddments: Load (3, 1): Bit 2: Bit 1: Unset
- "tbz x6, #0, 51f\n"
- "ld1 { v29.b }[4], [x21]\n"
+ "tbz x8, #0, 51f\n"
+ "ld1 { v29.b }[4], [x20]\n"
"b 51f\n"
"49:" // Oddments: Load (3, 1): Bit 2: Unset
- "tbz x6, #1, 50f\n"
- "ld1 { v29.h }[0], [x21], #0x2\n"
- "tbz x6, #0, 51f\n"
- "ld1 { v29.b }[2], [x21]\n"
+ "tbz x8, #1, 50f\n"
+ "ld1 { v29.h }[0], [x20], #0x2\n"
+ "tbz x8, #0, 51f\n"
+ "ld1 { v29.b }[2], [x20]\n"
"b 51f\n"
"50:" // Oddments: Load (3, 1): Bit 2: Unset: Bit 1: Unset
- "tbz x6, #0, 51f\n"
- "ld1 { v29.b }[0], [x21]\n"
+ "tbz x8, #0, 51f\n"
+ "ld1 { v29.b }[0], [x20]\n"
"51:" // Oddments: Load (3, 1): Bit 2: End
- "usubl v29.8h, v29.8b, v24.8b\n"
- "ldr x20, [x16, #0x78]\n"
- "smlal v16.4s, v29.4h, v7.4h\n"
- "smlal2 v26.4s, v29.8h, v7.8h\n"
- "smlal v25.4s, v29.4h, v6.4h\n"
- "smlal2 v10.4s, v29.8h, v6.8h\n"
- "add x20, x20, x8\n"
- "tbz x6, #2, 53f\n"
- "ld1 { v28.s }[0], [x20], #0x4\n"
- "tbz x6, #1, 52f\n"
- "ld1 { v28.h }[2], [x20], #0x2\n"
- "tbz x6, #0, 55f\n"
- "ld1 { v28.b }[6], [x20]\n"
+ "usubl v29.8h, v29.8b, v22.8b\n"
+ "ldr x19, [x12, #0x78]\n"
+ "smlal v18.4s, v29.4h, v7.4h\n"
+ "smlal2 v24.4s, v29.8h, v7.8h\n"
+ "smlal v9.4s, v29.4h, v6.4h\n"
+ "smlal2 v23.4s, v29.8h, v6.8h\n"
+ "add x19, x19, x15\n"
+ "tbz x8, #2, 53f\n"
+ "ld1 { v28.s }[0], [x19], #0x4\n"
+ "tbz x8, #1, 52f\n"
+ "ld1 { v28.h }[2], [x19], #0x2\n"
+ "tbz x8, #0, 55f\n"
+ "ld1 { v28.b }[6], [x19]\n"
"b 55f\n"
"52:" // Oddments: Load (3, 2): Bit 2: Bit 1: Unset
- "tbz x6, #0, 55f\n"
- "ld1 { v28.b }[4], [x20]\n"
+ "tbz x8, #0, 55f\n"
+ "ld1 { v28.b }[4], [x19]\n"
"b 55f\n"
"53:" // Oddments: Load (3, 2): Bit 2: Unset
- "tbz x6, #1, 54f\n"
- "ld1 { v28.h }[0], [x20], #0x2\n"
- "tbz x6, #0, 55f\n"
- "ld1 { v28.b }[2], [x20]\n"
+ "tbz x8, #1, 54f\n"
+ "ld1 { v28.h }[0], [x19], #0x2\n"
+ "tbz x8, #0, 55f\n"
+ "ld1 { v28.b }[2], [x19]\n"
"b 55f\n"
"54:" // Oddments: Load (3, 2): Bit 2: Unset: Bit 1: Unset
- "tbz x6, #0, 55f\n"
- "ld1 { v28.b }[0], [x20]\n"
+ "tbz x8, #0, 55f\n"
+ "ld1 { v28.b }[0], [x19]\n"
"55:" // Oddments: Load (3, 2): Bit 2: End
- "usubl v28.8h, v28.8b, v24.8b\n"
- "smlal v16.4s, v28.4h, v8.4h\n"
- "smlal2 v26.4s, v28.8h, v8.8h\n"
- "smlal v25.4s, v28.4h, v7.4h\n"
- "smlal2 v10.4s, v28.8h, v7.8h\n"
- "tbz x6, #2, 57f\n"
- "ld1 { v17.4s }, [x14], #0x10\n"
- "ld1 { v22.4s }, [x13], #0x10\n"
- "tbz x6, #1, 56f\n"
- "ld1 { v23.d }[0], [x14], #0x8\n"
- "ld1 { v19.d }[0], [x13], #0x8\n"
- "tbz x6, #0, 59f\n"
- "ld1 { v23.s }[2], [x14]\n"
- "ld1 { v19.s }[2], [x13]\n"
+ "usubl v28.8h, v28.8b, v22.8b\n"
+ "smlal v18.4s, v28.4h, v8.4h\n"
+ "smlal2 v24.4s, v28.8h, v8.8h\n"
+ "smlal v9.4s, v28.4h, v7.4h\n"
+ "smlal2 v23.4s, v28.8h, v7.8h\n"
+ "tbz x8, #2, 57f\n"
+ "ld1 { v21.4s }, [x13], #0x10\n"
+ "ld1 { v25.4s }, [x11], #0x10\n"
+ "tbz x8, #1, 56f\n"
+ "ld1 { v10.d }[0], [x13], #0x8\n"
+ "ld1 { v16.d }[0], [x11], #0x8\n"
+ "tbz x8, #0, 59f\n"
+ "ld1 { v10.s }[2], [x13]\n"
+ "ld1 { v16.s }[2], [x11]\n"
"b 59f\n"
"56:" // Oddments: Load requant params: Bit 2: Bit 1: Unset
- "tbz x6, #0, 59f\n"
- "ld1 { v23.s }[0], [x14]\n"
- "ld1 { v19.s }[0], [x13]\n"
+ "tbz x8, #0, 59f\n"
+ "ld1 { v10.s }[0], [x13]\n"
+ "ld1 { v16.s }[0], [x11]\n"
"b 59f\n"
"57:" // Oddments: Load requant params: Bit 2: Unset
- "tbz x6, #1, 58f\n"
- "ld1 { v17.d }[0], [x14], #0x8\n"
- "ld1 { v22.d }[0], [x13], #0x8\n"
- "tbz x6, #0, 59f\n"
- "ld1 { v17.s }[2], [x14]\n"
- "ld1 { v22.s }[2], [x13]\n"
+ "tbz x8, #1, 58f\n"
+ "ld1 { v21.d }[0], [x13], #0x8\n"
+ "ld1 { v25.d }[0], [x11], #0x8\n"
+ "tbz x8, #0, 59f\n"
+ "ld1 { v21.s }[2], [x13]\n"
+ "ld1 { v25.s }[2], [x11]\n"
"b 59f\n"
"58:" // Oddments: Load requant params: Bit 2: Unset: Bit 1: Unset
- "tbz x6, #0, 59f\n"
- "ld1 { v17.s }[0], [x14]\n"
- "ld1 { v22.s }[0], [x13]\n"
+ "tbz x8, #0, 59f\n"
+ "ld1 { v21.s }[0], [x13]\n"
+ "ld1 { v25.s }[0], [x11]\n"
"59:" // Oddments: Load requant params: Bit 2: End
- "sqrdmulh v13.4s, v13.4s, v17.4s\n"
- "and v21.16b, v13.16b, v22.16b\n"
- "add x12, x12, x17\n"
- "add x11, x11, x17\n"
- "sqrdmulh v20.4s, v20.4s, v23.4s\n"
- "sshr v21.4s, v21.4s, #0x1f\n"
- "add x10, x10, x17\n"
- "add x9, x9, x17\n"
- "and v29.16b, v20.16b, v19.16b\n"
- "sqrdmulh v9.4s, v9.4s, v17.4s\n"
- "sqrdmulh v16.4s, v16.4s, v17.4s\n"
- "sqrdmulh v25.4s, v25.4s, v17.4s\n"
- "sqadd v13.4s, v13.4s, v21.4s\n"
- "sshr v29.4s, v29.4s, #0x1f\n"
- "and v0.16b, v9.16b, v22.16b\n"
- "sqrdmulh v18.4s, v18.4s, v23.4s\n"
- "and v27.16b, v16.16b, v22.16b\n"
- "sqrdmulh v26.4s, v26.4s, v23.4s\n"
- "and v21.16b, v25.16b, v22.16b\n"
- "sqrdmulh v10.4s, v10.4s, v23.4s\n"
- "sqadd v20.4s, v20.4s, v29.4s\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "and v17.16b, v18.16b, v19.16b\n"
- "sshr v27.4s, v27.4s, #0x1f\n"
- "and v7.16b, v26.16b, v19.16b\n"
- "sshr v21.4s, v21.4s, #0x1f\n"
- "and v29.16b, v10.16b, v19.16b\n"
- "sqadd v9.4s, v9.4s, v0.4s\n"
- "sshr v17.4s, v17.4s, #0x1f\n"
- "sqadd v16.4s, v16.4s, v27.4s\n"
+ "sqrdmulh v13.4s, v13.4s, v21.4s\n"
+ "sqrdmulh v19.4s, v19.4s, v21.4s\n"
+ "add x10, x10, x14\n"
+ "add x9, x9, x14\n"
+ "sqrdmulh v18.4s, v18.4s, v21.4s\n"
+ "sqrdmulh v9.4s, v9.4s, v21.4s\n"
+ "add x28, x28, x14\n"
+ "add x27, x27, x14\n"
+ "and v7.16b, v13.16b, v25.16b\n"
+ "sqrdmulh v26.4s, v26.4s, v10.4s\n"
+ "and v4.16b, v19.16b, v25.16b\n"
+ "sqrdmulh v11.4s, v11.4s, v10.4s\n"
+ "and v21.16b, v18.16b, v25.16b\n"
+ "sqrdmulh v24.4s, v24.4s, v10.4s\n"
+ "and v20.16b, v9.16b, v25.16b\n"
+ "sqrdmulh v23.4s, v23.4s, v10.4s\n"
"sshr v7.4s, v7.4s, #0x1f\n"
- "sqadd v25.4s, v25.4s, v21.4s\n"
+ "and v29.16b, v26.16b, v16.16b\n"
+ "sshr v4.4s, v4.4s, #0x1f\n"
+ "and v10.16b, v11.16b, v16.16b\n"
+ "sshr v21.4s, v21.4s, #0x1f\n"
+ "and v31.16b, v24.16b, v16.16b\n"
+ "sshr v20.4s, v20.4s, #0x1f\n"
+ "and v30.16b, v23.16b, v16.16b\n"
+ "sqadd v13.4s, v13.4s, v7.4s\n"
"sshr v29.4s, v29.4s, #0x1f\n"
- "srshl v13.4s, v13.4s, v22.4s\n"
- "srshl v9.4s, v9.4s, v22.4s\n"
- "sqadd v18.4s, v18.4s, v17.4s\n"
- "srshl v16.4s, v16.4s, v22.4s\n"
- "sqadd v26.4s, v26.4s, v7.4s\n"
- "srshl v25.4s, v25.4s, v22.4s\n"
- "sqadd v10.4s, v10.4s, v29.4s\n"
- "srshl v20.4s, v20.4s, v19.4s\n"
+ "sqadd v19.4s, v19.4s, v4.4s\n"
+ "sshr v10.4s, v10.4s, #0x1f\n"
+ "sqadd v18.4s, v18.4s, v21.4s\n"
+ "sshr v31.4s, v31.4s, #0x1f\n"
+ "sqadd v9.4s, v9.4s, v20.4s\n"
+ "sshr v30.4s, v30.4s, #0x1f\n"
+ "srshl v13.4s, v13.4s, v25.4s\n"
+ "sqadd v26.4s, v26.4s, v29.4s\n"
+ "srshl v19.4s, v19.4s, v25.4s\n"
+ "sqadd v11.4s, v11.4s, v10.4s\n"
+ "srshl v18.4s, v18.4s, v25.4s\n"
+ "sqadd v24.4s, v24.4s, v31.4s\n"
+ "srshl v9.4s, v9.4s, v25.4s\n"
+ "sqadd v23.4s, v23.4s, v30.4s\n"
+ "srshl v26.4s, v26.4s, v16.4s\n"
"sqxtn v13.4h, v13.4s\n"
- "srshl v18.4s, v18.4s, v19.4s\n"
+ "srshl v11.4s, v11.4s, v16.4s\n"
+ "sqxtn v19.4h, v19.4s\n"
+ "srshl v24.4s, v24.4s, v16.4s\n"
+ "sqxtn v18.4h, v18.4s\n"
+ "srshl v23.4s, v23.4s, v16.4s\n"
"sqxtn v9.4h, v9.4s\n"
- "srshl v26.4s, v26.4s, v19.4s\n"
- "sqxtn v16.4h, v16.4s\n"
- "srshl v10.4s, v10.4s, v19.4s\n"
- "sqxtn v25.4h, v25.4s\n"
- "sqxtn2 v13.8h, v20.4s\n"
- "sqxtn2 v9.8h, v18.4s\n"
- "sqxtn2 v16.8h, v26.4s\n"
- "sqxtn2 v25.8h, v10.4s\n"
+ "sqxtn2 v13.8h, v26.4s\n"
+ "sqxtn2 v19.8h, v11.4s\n"
+ "sqxtn2 v18.8h, v24.4s\n"
+ "sqxtn2 v9.8h, v23.4s\n"
"sqadd v13.8h, v13.8h, v14.8h\n"
+ "sqadd v19.8h, v19.8h, v14.8h\n"
+ "sqadd v18.8h, v18.8h, v14.8h\n"
"sqadd v9.8h, v9.8h, v14.8h\n"
- "sqadd v16.8h, v16.8h, v14.8h\n"
- "sqadd v25.8h, v25.8h, v14.8h\n"
- "smax v13.8h, v13.8h, v12.8h\n"
- "smax v9.8h, v9.8h, v12.8h\n"
- "smax v16.8h, v16.8h, v12.8h\n"
- "smax v25.8h, v25.8h, v12.8h\n"
- "smin v13.8h, v13.8h, v11.8h\n"
- "smin v9.8h, v9.8h, v11.8h\n"
- "smin v16.8h, v16.8h, v11.8h\n"
- "smin v25.8h, v25.8h, v11.8h\n"
+ "smax v13.8h, v13.8h, v17.8h\n"
+ "smax v19.8h, v19.8h, v17.8h\n"
+ "smax v18.8h, v18.8h, v17.8h\n"
+ "smax v9.8h, v9.8h, v17.8h\n"
+ "smin v13.8h, v13.8h, v15.8h\n"
+ "smin v19.8h, v19.8h, v15.8h\n"
+ "smin v18.8h, v18.8h, v15.8h\n"
+ "smin v9.8h, v9.8h, v15.8h\n"
"uzp1 v13.16b, v13.16b, v13.16b\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ "uzp1 v18.16b, v18.16b, v18.16b\n"
"uzp1 v9.16b, v9.16b, v9.16b\n"
- "uzp1 v16.16b, v16.16b, v16.16b\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
- "tbz x6, #2, 61f\n"
- "st1 { v13.s }[0], [x12], #0x4\n"
- "st1 { v9.s }[0], [x11], #0x4\n"
- "st1 { v16.s }[0], [x10], #0x4\n"
- "st1 { v25.s }[0], [x9], #0x4\n"
- "tbz x6, #1, 60f\n"
- "st1 { v13.h }[2], [x12], #0x2\n"
- "st1 { v9.h }[2], [x11], #0x2\n"
- "st1 { v16.h }[2], [x10], #0x2\n"
- "st1 { v25.h }[2], [x9], #0x2\n"
- "tbz x6, #0, 63f\n"
- "st1 { v13.b }[6], [x12], #0x1\n"
- "st1 { v9.b }[6], [x11], #0x1\n"
- "st1 { v16.b }[6], [x10], #0x1\n"
- "st1 { v25.b }[6], [x9], #0x1\n"
+ "tbz x8, #2, 61f\n"
+ "st1 { v13.s }[0], [x10], #0x4\n"
+ "st1 { v19.s }[0], [x9], #0x4\n"
+ "st1 { v18.s }[0], [x28], #0x4\n"
+ "st1 { v9.s }[0], [x27], #0x4\n"
+ "tbz x8, #1, 60f\n"
+ "st1 { v13.h }[2], [x10], #0x2\n"
+ "st1 { v19.h }[2], [x9], #0x2\n"
+ "st1 { v18.h }[2], [x28], #0x2\n"
+ "st1 { v9.h }[2], [x27], #0x2\n"
+ "tbz x8, #0, 63f\n"
+ "st1 { v13.b }[6], [x10], #0x1\n"
+ "st1 { v19.b }[6], [x9], #0x1\n"
+ "st1 { v18.b }[6], [x28], #0x1\n"
+ "st1 { v9.b }[6], [x27], #0x1\n"
"b 63f\n"
"60:" // Oddments: Bit 2: Bit 1: Unset
- "tbz x6, #0, 63f\n"
- "st1 { v13.b }[4], [x12], #0x1\n"
- "st1 { v9.b }[4], [x11], #0x1\n"
- "st1 { v16.b }[4], [x10], #0x1\n"
- "st1 { v25.b }[4], [x9], #0x1\n"
+ "tbz x8, #0, 63f\n"
+ "st1 { v13.b }[4], [x10], #0x1\n"
+ "st1 { v19.b }[4], [x9], #0x1\n"
+ "st1 { v18.b }[4], [x28], #0x1\n"
+ "st1 { v9.b }[4], [x27], #0x1\n"
"b 63f\n"
"61:" // Oddments: Bit 2: Unset
- "tbz x6, #1, 62f\n"
- "st1 { v13.h }[0], [x12], #0x2\n"
- "st1 { v9.h }[0], [x11], #0x2\n"
- "st1 { v16.h }[0], [x10], #0x2\n"
- "st1 { v25.h }[0], [x9], #0x2\n"
- "tbz x6, #0, 63f\n"
- "st1 { v13.b }[2], [x12], #0x1\n"
- "st1 { v9.b }[2], [x11], #0x1\n"
- "st1 { v16.b }[2], [x10], #0x1\n"
- "st1 { v25.b }[2], [x9], #0x1\n"
+ "tbz x8, #1, 62f\n"
+ "st1 { v13.h }[0], [x10], #0x2\n"
+ "st1 { v19.h }[0], [x9], #0x2\n"
+ "st1 { v18.h }[0], [x28], #0x2\n"
+ "st1 { v9.h }[0], [x27], #0x2\n"
+ "tbz x8, #0, 63f\n"
+ "st1 { v13.b }[2], [x10], #0x1\n"
+ "st1 { v19.b }[2], [x9], #0x1\n"
+ "st1 { v18.b }[2], [x28], #0x1\n"
+ "st1 { v9.b }[2], [x27], #0x1\n"
"b 63f\n"
"62:" // Oddments: Bit 2: Unset: Bit 1: Unset
- "tbz x6, #0, 63f\n"
- "st1 { v13.b }[0], [x12], #0x1\n"
- "st1 { v9.b }[0], [x11], #0x1\n"
- "st1 { v16.b }[0], [x10], #0x1\n"
- "st1 { v25.b }[0], [x9], #0x1\n"
+ "tbz x8, #0, 63f\n"
+ "st1 { v13.b }[0], [x10], #0x1\n"
+ "st1 { v19.b }[0], [x9], #0x1\n"
+ "st1 { v18.b }[0], [x28], #0x1\n"
+ "st1 { v9.b }[0], [x27], #0x1\n"
"63:" // Oddments: Bit 2: End
"64:" // End
:
: [offsetof_Params_bias] "I" (offsetof(Params, bias)), [offsetof_Params_inptrs] "I" (offsetof(Params, inptrs)), [offsetof_Params_n_channels] "I" (offsetof(Params, n_channels)), [offsetof_Params_outptrs] "I" (offsetof(Params, outptrs)), [offsetof_Params_requant] "I" (offsetof(Params, requant)), [offsetof_Params_requant_muls] "I" (offsetof(Params, requant_muls)), [offsetof_Params_requant_shifts] "I" (offsetof(Params, requant_shifts)), [offsetof_Params_weights] "I" (offsetof(Params, weights)), [offsetof_Requantize32_a_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [params] "r" (&params)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp
index de072a7d55..0216786c6f 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -100,75 +100,75 @@ void a64_u8q_nhwc_3x3_s2_output2x2_mla_depthfirst_impl(
requant_muls, requant_shifts, outptrs);
__asm__ __volatile__(
- "ldr x7, [%x[params], %[offsetof_Params_n_channels]]\n"
- "ldr x23, [%x[params], %[offsetof_Params_requant]]\n"
- "lsr x8, x7, #0x3\n"
- "add x20, x23, %[offsetof_Requantize32_a_offset]\n"
- "ld1r { v12.16b }, [x20]\n"
+ "ldr x19, [%x[params], %[offsetof_Params_requant]]\n"
+ "ldr x8, [%x[params], %[offsetof_Params_n_channels]]\n"
+ "add x24, x19, %[offsetof_Requantize32_a_offset]\n"
+ "add x23, x19, %[offsetof_Requantize32_b_offset]\n"
"ldr x22, [%x[params], %[offsetof_Params_outptrs]]\n"
- "add x21, x23, %[offsetof_Requantize32_b_offset]\n"
- "add x20, x23, %[offsetof_Requantize32_c_offset]\n"
- "ld1r { v13.16b }, [x21]\n"
- "ld1r { v11.8h }, [x20]\n"
- "add x21, x23, %[offsetof_Requantize32_minval]\n"
- "add x20, x23, %[offsetof_Requantize32_maxval]\n"
- "ld1r { v16.8h }, [x21]\n"
- "ld1r { v14.8h }, [x20]\n"
- "mov x17, #0x0\n"
- "mov x16, #0x0\n"
- "add x15, %x[params], %[offsetof_Params_inptrs]\n"
- "ldr x14, [%x[params], %[offsetof_Params_weights]]\n"
+ "add x21, x19, %[offsetof_Requantize32_c_offset]\n"
+ "add x20, x19, %[offsetof_Requantize32_minval]\n"
+ "ldr x17, [%x[params], %[offsetof_Params_weights]]\n"
+ "add x19, x19, %[offsetof_Requantize32_maxval]\n"
+ "ld1r { v12.16b }, [x24]\n"
+ "ld1r { v13.16b }, [x23]\n"
+ "lsr x16, x8, #0x3\n"
+ "ld1r { v11.8h }, [x21]\n"
+ "ld1r { v17.8h }, [x20]\n"
+ "mov x15, #0x0\n"
+ "mov x14, #0x0\n"
+ "ld1r { v14.8h }, [x19]\n"
"ldr x13, [%x[params], %[offsetof_Params_requant_muls]]\n"
- "ldr x12, [%x[params], %[offsetof_Params_requant_shifts]]\n"
- "ldp x11, x10, [x22, #0x0]\n"
- "ldp x9, x28, [x22, #0x10]\n"
- "cbz x8, 3f\n"
- "ldr d0, [x14, #0x0]\n"
- "ldr d1, [x14, #0x8]\n"
- "subs x8, x8, #0x1\n"
+ "add x12, %x[params], %[offsetof_Params_inptrs]\n"
+ "ldr x11, [%x[params], %[offsetof_Params_requant_shifts]]\n"
+ "ldp x10, x9, [x22, #0x0]\n"
+ "ldp x28, x27, [x22, #0x10]\n"
+ "cbz x16, 3f\n"
+ "ldr x19, [%x[params], %[offsetof_Params_bias]]\n"
+ "ldr q15, [x19, #0x0]\n"
+ "subs x16, x16, #0x1\n"
+ "mov v9.16b, v15.16b\n"
+ "ldr q10, [x19, #0x10]\n"
+ "add x19, x19, #0x20\n"
+ "str x19, [%x[params], %[offsetof_Params_bias]]\n"
+ "ldr d0, [x17, #0x0]\n"
+ "ldr d1, [x17, #0x8]\n"
+ "ldr d2, [x17, #0x10]\n"
+ "mov v16.16b, v10.16b\n"
+ "mov v22.16b, v15.16b\n"
+ "ldr d3, [x17, #0x18]\n"
+ "ldr d4, [x17, #0x20]\n"
+ "mov v21.16b, v10.16b\n"
+ "mov v23.16b, v15.16b\n"
+ "ldr d5, [x17, #0x28]\n"
+ "ldr d6, [x17, #0x30]\n"
+ "mov v18.16b, v10.16b\n"
"usubl v0.8h, v0.8b, v13.8b\n"
- "ldr d2, [x14, #0x10]\n"
- "ldr d3, [x14, #0x18]\n"
+ "ldr d7, [x17, #0x38]\n"
+ "ldr d8, [x17, #0x40]\n"
"usubl v1.8h, v1.8b, v13.8b\n"
"usubl v2.8h, v2.8b, v13.8b\n"
- "ldr d4, [x14, #0x20]\n"
- "ldr d5, [x14, #0x28]\n"
+ "ldp x26, x25, [x12, #0x0]\n"
+ "ldp x24, x23, [x12, #0x10]\n"
"usubl v3.8h, v3.8b, v13.8b\n"
"usubl v4.8h, v4.8b, v13.8b\n"
- "ldr d6, [x14, #0x30]\n"
- "ldr d7, [x14, #0x38]\n"
+ "ldp x22, x21, [x12, #0x20]\n"
+ "ldp x20, x19, [x12, #0x30]\n"
"usubl v5.8h, v5.8b, v13.8b\n"
"usubl v6.8h, v6.8b, v13.8b\n"
- "ldr d8, [x14, #0x40]\n"
- "ldr x24, [%x[params], %[offsetof_Params_bias]]\n"
+ "ldr d31, [x26, x15]\n"
+ "ldr d30, [x25, x15]\n"
"usubl v7.8h, v7.8b, v13.8b\n"
"usubl v8.8h, v8.8b, v13.8b\n"
- "ldr q15, [x24, #0x0]\n"
- "ldr q17, [x24, #0x10]\n"
- "add x24, x24, #0x20\n"
- "str x24, [%x[params], %[offsetof_Params_bias]]\n"
- "ldp x27, x26, [x15, #0x0]\n"
- "ldp x25, x24, [x15, #0x10]\n"
- "mov v10.16b, v15.16b\n"
- "mov v20.16b, v17.16b\n"
- "ldp x23, x22, [x15, #0x20]\n"
- "ldp x21, x20, [x15, #0x30]\n"
- "mov v9.16b, v15.16b\n"
- "mov v23.16b, v17.16b\n"
- "ldr d31, [x27, x17]\n"
- "ldr d30, [x26, x17]\n"
- "mov v21.16b, v15.16b\n"
- "mov v22.16b, v17.16b\n"
- "ldr d29, [x25, x17]\n"
- "ldr d28, [x24, x17]\n"
+ "ldr d29, [x24, x15]\n"
+ "ldr d28, [x23, x15]\n"
"usubl v31.8h, v31.8b, v12.8b\n"
"usubl v30.8h, v30.8b, v12.8b\n"
- "ldr d27, [x23, x17]\n"
- "ldr d26, [x22, x17]\n"
+ "ldr d27, [x22, x15]\n"
+ "ldr d26, [x21, x15]\n"
"usubl v29.8h, v29.8b, v12.8b\n"
"usubl v28.8h, v28.8b, v12.8b\n"
- "ldr d25, [x21, x17]\n"
- "ldr d24, [x20, x17]\n"
+ "ldr d25, [x20, x15]\n"
+ "ldr d24, [x19, x15]\n"
"usubl v27.8h, v27.8b, v12.8b\n"
"usubl v26.8h, v26.8b, v12.8b\n"
"usubl v25.8h, v25.8b, v12.8b\n"
@@ -176,250 +176,250 @@ void a64_u8q_nhwc_3x3_s2_output2x2_mla_depthfirst_impl(
"beq 2f\n"
"1:" // Loop
"smlal v15.4s, v31.4h, v8.4h\n"
- "smlal2 v17.4s, v31.8h, v8.8h\n"
- "ldr x24, [x15, #0x40]\n"
- "ldr x22, [x15, #0x48]\n"
- "smlal v10.4s, v31.4h, v6.4h\n"
- "smlal2 v20.4s, v31.8h, v6.8h\n"
- "ldr x21, [x15, #0x50]\n"
- "ldr x20, [x15, #0x58]\n"
+ "smlal2 v10.4s, v31.8h, v8.8h\n"
+ "ldr x24, [x12, #0x40]\n"
+ "ldr x23, [x12, #0x48]\n"
+ "smlal v9.4s, v31.4h, v6.4h\n"
+ "smlal2 v16.4s, v31.8h, v6.8h\n"
+ "ldr x21, [x12, #0x50]\n"
+ "ldr x19, [x12, #0x58]\n"
"smlal v15.4s, v30.4h, v0.4h\n"
- "smlal2 v17.4s, v30.8h, v0.8h\n"
- "ldr q19, [x13, #0x0]\n"
- "ldr x23, [x15, #0x78]\n"
- "smlal v10.4s, v28.4h, v1.4h\n"
- "smlal2 v20.4s, v28.8h, v1.8h\n"
- "ldr d28, [x22, x17]\n"
+ "smlal2 v10.4s, v30.8h, v0.8h\n"
+ "ldr x22, [x12, #0x78]\n"
+ "ldr x20, [x12, #0x60]\n"
+ "smlal v9.4s, v28.4h, v1.4h\n"
+ "smlal2 v16.4s, v28.8h, v1.8h\n"
+ "ldr d28, [x23, x15]\n"
"usubl v28.8h, v28.8b, v12.8b\n"
"smlal v15.4s, v29.4h, v1.4h\n"
- "smlal2 v17.4s, v29.8h, v1.8h\n"
- "ldr d29, [x24, x17]\n"
+ "smlal2 v10.4s, v29.8h, v1.8h\n"
+ "ldr d29, [x24, x15]\n"
"usubl v29.8h, v29.8b, v12.8b\n"
- "smlal v10.4s, v27.4h, v2.4h\n"
- "smlal2 v20.4s, v27.8h, v2.8h\n"
- "ldr d27, [x21, x17]\n"
+ "smlal v9.4s, v27.4h, v2.4h\n"
+ "smlal2 v16.4s, v27.8h, v2.8h\n"
+ "ldr d27, [x21, x15]\n"
"usubl v27.8h, v27.8b, v12.8b\n"
"smlal v15.4s, v26.4h, v3.4h\n"
- "smlal2 v17.4s, v26.8h, v3.8h\n"
- "ldr d26, [x20, x17]\n"
- "ldr x20, [x15, #0x60]\n"
- "smlal v10.4s, v24.4h, v0.4h\n"
- "smlal2 v20.4s, v24.8h, v0.8h\n"
+ "smlal2 v10.4s, v26.8h, v3.8h\n"
+ "ldr d26, [x19, x15]\n"
"usubl v26.8h, v26.8b, v12.8b\n"
- "ldr x21, [x15, #0x80]\n"
+ "smlal v9.4s, v24.4h, v0.4h\n"
+ "smlal2 v16.4s, v24.8h, v0.8h\n"
+ "ldr x21, [x12, #0x80]\n"
+ "ldr x19, [x12, #0x68]\n"
"smlal v15.4s, v25.4h, v4.4h\n"
- "smlal2 v17.4s, v25.8h, v4.8h\n"
- "ldr d25, [x20, x17]\n"
- "ldr x20, [x15, #0x68]\n"
- "smlal v10.4s, v29.4h, v4.4h\n"
- "smlal2 v20.4s, v29.8h, v4.8h\n"
- "ldr d29, [x20, x17]\n"
+ "smlal2 v10.4s, v25.8h, v4.8h\n"
+ "ldr d25, [x20, x15]\n"
"usubl v25.8h, v25.8b, v12.8b\n"
+ "smlal v9.4s, v29.4h, v4.4h\n"
+ "smlal2 v16.4s, v29.8h, v4.8h\n"
+ "ldr x20, [x12, #0x88]\n"
+ "ldr d29, [x19, x15]\n"
"smlal v15.4s, v24.4h, v2.4h\n"
- "smlal2 v17.4s, v24.8h, v2.8h\n"
- "ldr q18, [x12, #0x0]\n"
- "ldr x22, [x15, #0x88]\n"
- "smlal v10.4s, v28.4h, v5.4h\n"
- "smlal2 v20.4s, v28.8h, v5.8h\n"
- "ldr d28, [x21, x17]\n"
- "ldr x21, [x15, #0x70]\n"
- "smlal v9.4s, v31.4h, v2.4h\n"
- "smlal2 v23.4s, v31.8h, v2.8h\n"
+ "smlal2 v10.4s, v24.8h, v2.8h\n"
+ "ldr x19, [x12, #0x70]\n"
+ "usubl v29.8h, v29.8b, v12.8b\n"
+ "smlal v9.4s, v28.4h, v5.4h\n"
+ "smlal2 v16.4s, v28.8h, v5.8h\n"
+ "ldr d28, [x21, x15]\n"
"usubl v28.8h, v28.8b, v12.8b\n"
- "ldr x25, [x15, #0x98]\n"
+ "smlal v22.4s, v31.4h, v2.4h\n"
+ "smlal2 v21.4s, v31.8h, v2.8h\n"
+ "ldr x24, [x12, #0x98]\n"
+ "ldr d24, [x19, x15]\n"
"smlal v15.4s, v27.4h, v5.4h\n"
- "smlal2 v17.4s, v27.8h, v5.8h\n"
- "usubl v29.8h, v29.8b, v12.8b\n"
- "ldr x24, [x15, #0x90]\n"
- "smlal v10.4s, v27.4h, v3.4h\n"
- "smlal2 v20.4s, v27.8h, v3.8h\n"
- "ldr d27, [x23, x17]\n"
+ "smlal2 v10.4s, v27.8h, v5.8h\n"
+ "usubl v24.8h, v24.8b, v12.8b\n"
+ "ldr x23, [x12, #0x90]\n"
+ "smlal v9.4s, v27.4h, v3.4h\n"
+ "smlal2 v16.4s, v27.8h, v3.8h\n"
+ "ldr d27, [x22, x15]\n"
"usubl v27.8h, v27.8b, v12.8b\n"
- "smlal v21.4s, v31.4h, v0.4h\n"
- "smlal v9.4s, v26.4h, v3.4h\n"
- "ldr x23, [x15, #0xa8]\n"
- "ldr x20, [x15, #0xa0]\n"
- "smlal2 v23.4s, v26.8h, v3.8h\n"
- "ldr d26, [x22, x17]\n"
- "smlal2 v22.4s, v31.8h, v0.8h\n"
- "ldr d24, [x21, x17]\n"
- "smlal v21.4s, v27.4h, v4.4h\n"
- "smlal v9.4s, v25.4h, v0.4h\n"
+ "smlal v23.4s, v31.4h, v0.4h\n"
+ "smlal v22.4s, v26.4h, v3.4h\n"
+ "ldr x22, [x12, #0xa8]\n"
+ "ldr x19, [x12, #0xa0]\n"
+ "smlal2 v21.4s, v26.8h, v3.8h\n"
+ "smlal2 v18.4s, v31.8h, v0.8h\n"
+ "ldr d26, [x20, x15]\n"
"usubl v26.8h, v26.8b, v12.8b\n"
- "ldr x22, [x15, #0xb0]\n"
- "smlal2 v23.4s, v25.8h, v0.8h\n"
- "ldr q30, [x13, #0x10]\n"
- "smlal2 v22.4s, v27.8h, v4.8h\n"
- "ldr d27, [x20, x17]\n"
- "smlal v21.4s, v28.4h, v1.4h\n"
+ "smlal v23.4s, v27.4h, v4.4h\n"
+ "smlal v22.4s, v25.4h, v0.4h\n"
+ "ldr x21, [x12, #0xb0]\n"
+ "ldr x20, [x12, #0xb8]\n"
+ "smlal2 v21.4s, v25.8h, v0.8h\n"
+ "smlal2 v18.4s, v27.8h, v4.8h\n"
+ "ldr d27, [x19, x15]\n"
+ "usubl v27.8h, v27.8b, v12.8b\n"
+ "smlal v23.4s, v28.4h, v1.4h\n"
"smlal v15.4s, v25.4h, v6.4h\n"
- "usubl v24.8h, v24.8b, v12.8b\n"
- "ldr x21, [x15, #0xb8]\n"
- "smlal2 v17.4s, v25.8h, v6.8h\n"
- "ldr d25, [x24, x17]\n"
- "smlal v9.4s, v29.4h, v4.4h\n"
+ "ldr x19, [x12, #0xc0]\n"
+ "ldr q19, [x13, #0x0]\n"
+ "smlal2 v10.4s, v25.8h, v6.8h\n"
+ "smlal v22.4s, v29.4h, v4.4h\n"
+ "ldr d25, [x23, x15]\n"
"usubl v25.8h, v25.8b, v12.8b\n"
- "smlal2 v23.4s, v29.8h, v4.8h\n"
- "ldr d29, [x25, x17]\n"
- "ldr q31, [x12, #0x10]\n"
- "smlal2 v22.4s, v28.8h, v1.8h\n"
- "smlal v21.4s, v26.4h, v5.4h\n"
+ "smlal2 v21.4s, v29.8h, v4.8h\n"
+ "ldr d29, [x24, x15]\n"
+ "smlal2 v18.4s, v28.8h, v1.8h\n"
"usubl v29.8h, v29.8b, v12.8b\n"
+ "smlal v23.4s, v26.4h, v5.4h\n"
"smlal v15.4s, v24.4h, v7.4h\n"
- "ldr x20, [x15, #0xc0]\n"
- "smlal2 v17.4s, v24.8h, v7.8h\n"
- "smlal v9.4s, v24.4h, v1.4h\n"
- "usubl v27.8h, v27.8b, v12.8b\n"
- "ldr x24, [%x[params], %[offsetof_Params_bias]]\n"
- "smlal2 v23.4s, v24.8h, v1.8h\n"
- "ldr d24, [x23, x17]\n"
- "smlal2 v22.4s, v26.8h, v5.8h\n"
- "ldr d26, [x22, x17]\n"
- "smlal v21.4s, v29.4h, v2.4h\n"
+ "ldr q0, [x11, #0x0]\n"
+ "ldr q4, [x13, #0x10]\n"
+ "smlal2 v10.4s, v24.8h, v7.8h\n"
+ "smlal v22.4s, v24.4h, v1.4h\n"
+ "sqrdmulh v15.4s, v15.4s, v19.4s\n"
+ "ldr q31, [x11, #0x10]\n"
+ "smlal2 v21.4s, v24.8h, v1.8h\n"
+ "ldr d24, [x22, x15]\n"
+ "smlal2 v18.4s, v26.8h, v5.8h\n"
"usubl v24.8h, v24.8b, v12.8b\n"
- "smlal2 v22.4s, v29.8h, v2.8h\n"
- "add x14, x14, #0x48\n"
- "smlal v9.4s, v25.4h, v6.4h\n"
- "smlal v21.4s, v24.4h, v3.4h\n"
+ "smlal v23.4s, v29.4h, v2.4h\n"
+ "ldr d26, [x21, x15]\n"
+ "smlal2 v18.4s, v29.8h, v2.8h\n"
"usubl v26.8h, v26.8b, v12.8b\n"
- "subs x8, x8, #0x1\n"
- "smlal v10.4s, v28.4h, v7.4h\n"
- "smlal2 v20.4s, v28.8h, v7.8h\n"
- "sqrdmulh v15.4s, v15.4s, v19.4s\n"
- "add x13, x13, #0x20\n"
- "smlal2 v23.4s, v25.8h, v6.8h\n"
- "ldr d25, [x21, x17]\n"
- "smlal2 v22.4s, v24.8h, v3.8h\n"
+ "smlal v22.4s, v25.4h, v6.4h\n"
+ "smlal v23.4s, v24.4h, v3.4h\n"
+ "and v30.16b, v15.16b, v0.16b\n"
+ "add x17, x17, #0x48\n"
+ "smlal v9.4s, v28.4h, v7.4h\n"
+ "smlal2 v16.4s, v28.8h, v7.8h\n"
+ "sqrdmulh v10.4s, v10.4s, v4.4s\n"
+ "subs x16, x16, #0x1\n"
+ "smlal2 v21.4s, v25.8h, v6.8h\n"
+ "ldr d25, [x20, x15]\n"
+ "smlal2 v18.4s, v24.8h, v3.8h\n"
"usubl v25.8h, v25.8b, v12.8b\n"
- "smlal v9.4s, v27.4h, v7.4h\n"
- "smlal v21.4s, v26.4h, v7.4h\n"
- "and v0.16b, v15.16b, v18.16b\n"
- "add x12, x12, #0x20\n"
- "smlal v10.4s, v29.4h, v8.4h\n"
- "smlal2 v20.4s, v29.8h, v8.8h\n"
- "ldr d29, [x20, x17]\n"
+ "smlal v22.4s, v27.4h, v7.4h\n"
+ "smlal v23.4s, v26.4h, v7.4h\n"
+ "sshr v30.4s, v30.4s, #0x1f\n"
+ "add x13, x13, #0x20\n"
+ "smlal v9.4s, v29.4h, v8.4h\n"
+ "smlal2 v16.4s, v29.8h, v8.8h\n"
+ "ldr d29, [x19, x15]\n"
"usubl v29.8h, v29.8b, v12.8b\n"
- "smlal2 v23.4s, v27.8h, v7.8h\n"
- "smlal2 v22.4s, v26.8h, v7.8h\n"
- "sqrdmulh v17.4s, v17.4s, v30.4s\n"
- "add x17, x17, #0x8\n"
- "smlal v9.4s, v24.4h, v5.4h\n"
- "smlal v21.4s, v25.4h, v6.4h\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "smlal2 v23.4s, v24.8h, v5.8h\n"
- "smlal2 v22.4s, v25.8h, v6.8h\n"
- "and v7.16b, v17.16b, v31.16b\n"
- "smlal v9.4s, v25.4h, v8.4h\n"
- "smlal v21.4s, v29.4h, v8.4h\n"
- "sqrdmulh v10.4s, v10.4s, v19.4s\n"
- "smlal2 v23.4s, v25.8h, v8.8h\n"
- "smlal2 v22.4s, v29.8h, v8.8h\n"
+ "smlal2 v21.4s, v27.8h, v7.8h\n"
+ "smlal2 v18.4s, v26.8h, v7.8h\n"
"sqrdmulh v9.4s, v9.4s, v19.4s\n"
- "sqrdmulh v21.4s, v21.4s, v19.4s\n"
- "sqadd v15.4s, v15.4s, v0.4s\n"
- "sshr v7.4s, v7.4s, #0x1f\n"
- "and v19.16b, v10.16b, v18.16b\n"
- "sqrdmulh v20.4s, v20.4s, v30.4s\n"
- "and v27.16b, v9.16b, v18.16b\n"
- "sqrdmulh v23.4s, v23.4s, v30.4s\n"
- "and v0.16b, v21.16b, v18.16b\n"
- "sqrdmulh v22.4s, v22.4s, v30.4s\n"
- "sqadd v17.4s, v17.4s, v7.4s\n"
+ "add x15, x15, #0x8\n"
+ "smlal v22.4s, v24.4h, v5.4h\n"
+ "smlal v23.4s, v25.4h, v6.4h\n"
+ "and v28.16b, v9.16b, v0.16b\n"
+ "add x11, x11, #0x20\n"
+ "smlal2 v21.4s, v24.8h, v5.8h\n"
+ "smlal2 v18.4s, v25.8h, v6.8h\n"
+ "sqrdmulh v16.4s, v16.4s, v4.4s\n"
+ "smlal v22.4s, v25.4h, v8.4h\n"
+ "smlal v23.4s, v29.4h, v8.4h\n"
+ "sqrdmulh v22.4s, v22.4s, v19.4s\n"
+ "smlal2 v21.4s, v25.8h, v8.8h\n"
+ "smlal2 v18.4s, v29.8h, v8.8h\n"
+ "sqrdmulh v23.4s, v23.4s, v19.4s\n"
+ "and v29.16b, v22.16b, v0.16b\n"
+ "sqrdmulh v21.4s, v21.4s, v4.4s\n"
+ "and v20.16b, v23.16b, v0.16b\n"
+ "sqrdmulh v18.4s, v18.4s, v4.4s\n"
+ "and v19.16b, v10.16b, v31.16b\n"
+ "sshr v28.4s, v28.4s, #0x1f\n"
+ "and v4.16b, v16.16b, v31.16b\n"
+ "sshr v29.4s, v29.4s, #0x1f\n"
+ "and v5.16b, v21.16b, v31.16b\n"
+ "sshr v20.4s, v20.4s, #0x1f\n"
+ "and v26.16b, v18.16b, v31.16b\n"
+ "sqadd v15.4s, v15.4s, v30.4s\n"
"sshr v19.4s, v19.4s, #0x1f\n"
- "and v5.16b, v20.16b, v31.16b\n"
- "sshr v27.4s, v27.4s, #0x1f\n"
- "and v4.16b, v23.16b, v31.16b\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "and v7.16b, v22.16b, v31.16b\n"
- "sqadd v10.4s, v10.4s, v19.4s\n"
- "sshr v5.4s, v5.4s, #0x1f\n"
- "sqadd v9.4s, v9.4s, v27.4s\n"
+ "sqadd v9.4s, v9.4s, v28.4s\n"
"sshr v4.4s, v4.4s, #0x1f\n"
- "sqadd v21.4s, v21.4s, v0.4s\n"
- "sshr v7.4s, v7.4s, #0x1f\n"
- "srshl v15.4s, v15.4s, v18.4s\n"
- "srshl v10.4s, v10.4s, v18.4s\n"
- "sqadd v20.4s, v20.4s, v5.4s\n"
- "srshl v9.4s, v9.4s, v18.4s\n"
- "sqadd v23.4s, v23.4s, v4.4s\n"
- "srshl v21.4s, v21.4s, v18.4s\n"
- "sqadd v22.4s, v22.4s, v7.4s\n"
- "srshl v17.4s, v17.4s, v31.4s\n"
+ "sqadd v22.4s, v22.4s, v29.4s\n"
+ "sshr v5.4s, v5.4s, #0x1f\n"
+ "sqadd v23.4s, v23.4s, v20.4s\n"
+ "sshr v26.4s, v26.4s, #0x1f\n"
+ "srshl v15.4s, v15.4s, v0.4s\n"
+ "sqadd v10.4s, v10.4s, v19.4s\n"
+ "srshl v9.4s, v9.4s, v0.4s\n"
+ "sqadd v16.4s, v16.4s, v4.4s\n"
+ "srshl v22.4s, v22.4s, v0.4s\n"
+ "sqadd v21.4s, v21.4s, v5.4s\n"
+ "srshl v23.4s, v23.4s, v0.4s\n"
+ "sqadd v18.4s, v18.4s, v26.4s\n"
+ "srshl v10.4s, v10.4s, v31.4s\n"
"sqxtn v15.4h, v15.4s\n"
- "srshl v20.4s, v20.4s, v31.4s\n"
- "sqxtn v10.4h, v10.4s\n"
- "srshl v23.4s, v23.4s, v31.4s\n"
+ "srshl v16.4s, v16.4s, v31.4s\n"
"sqxtn v9.4h, v9.4s\n"
- "srshl v22.4s, v22.4s, v31.4s\n"
- "sqxtn v21.4h, v21.4s\n"
- "sqxtn2 v15.8h, v17.4s\n"
- "sqxtn2 v10.8h, v20.4s\n"
- "sqxtn2 v9.8h, v23.4s\n"
- "sqxtn2 v21.8h, v22.4s\n"
+ "srshl v21.4s, v21.4s, v31.4s\n"
+ "sqxtn v22.4h, v22.4s\n"
+ "srshl v18.4s, v18.4s, v31.4s\n"
+ "sqxtn v23.4h, v23.4s\n"
+ "sqxtn2 v15.8h, v10.4s\n"
+ "sqxtn2 v9.8h, v16.4s\n"
+ "sqxtn2 v22.8h, v21.4s\n"
+ "sqxtn2 v23.8h, v18.4s\n"
"sqadd v15.8h, v15.8h, v11.8h\n"
- "sqadd v10.8h, v10.8h, v11.8h\n"
"sqadd v9.8h, v9.8h, v11.8h\n"
- "sqadd v21.8h, v21.8h, v11.8h\n"
- "smax v15.8h, v15.8h, v16.8h\n"
- "smax v10.8h, v10.8h, v16.8h\n"
- "smax v9.8h, v9.8h, v16.8h\n"
- "smax v21.8h, v21.8h, v16.8h\n"
+ "sqadd v22.8h, v22.8h, v11.8h\n"
+ "sqadd v23.8h, v23.8h, v11.8h\n"
+ "smax v15.8h, v15.8h, v17.8h\n"
+ "smax v9.8h, v9.8h, v17.8h\n"
+ "smax v22.8h, v22.8h, v17.8h\n"
+ "smax v23.8h, v23.8h, v17.8h\n"
"smin v15.8h, v15.8h, v14.8h\n"
- "smin v10.8h, v10.8h, v14.8h\n"
"smin v9.8h, v9.8h, v14.8h\n"
- "smin v21.8h, v21.8h, v14.8h\n"
+ "smin v22.8h, v22.8h, v14.8h\n"
+ "smin v23.8h, v23.8h, v14.8h\n"
"uzp1 v15.16b, v15.16b, v15.16b\n"
- "str d15, [x11, x16]\n"
- "uzp1 v10.16b, v10.16b, v10.16b\n"
+ "str d15, [x10, x14]\n"
"uzp1 v9.16b, v9.16b, v9.16b\n"
- "str d10, [x10, x16]\n"
- "uzp1 v21.16b, v21.16b, v21.16b\n"
- "str d9, [x9, x16]\n"
- "str d21, [x28, x16]\n"
- "ldr q15, [x24, #0x0]\n"
- "ldr q17, [x24, #0x10]\n"
- "add x24, x24, #0x20\n"
- "ldr d0, [x14, #0x0]\n"
- "ldr d1, [x14, #0x8]\n"
- "add x16, x16, #0x8\n"
- "str x24, [%x[params], %[offsetof_Params_bias]]\n"
- "ldr d2, [x14, #0x10]\n"
- "ldr d3, [x14, #0x18]\n"
- "mov v10.16b, v15.16b\n"
- "mov v20.16b, v17.16b\n"
- "ldr d4, [x14, #0x20]\n"
- "ldr d5, [x14, #0x28]\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "str d9, [x9, x14]\n"
+ "uzp1 v23.16b, v23.16b, v23.16b\n"
+ "str d22, [x28, x14]\n"
+ "str d23, [x27, x14]\n"
+ "ldr x19, [%x[params], %[offsetof_Params_bias]]\n"
+ "ldr q15, [x19, #0x0]\n"
+ "add x14, x14, #0x8\n"
+ "ldr q10, [x19, #0x10]\n"
+ "add x19, x19, #0x20\n"
+ "str x19, [%x[params], %[offsetof_Params_bias]]\n"
+ "ldr d0, [x17, #0x0]\n"
+ "ldr d1, [x17, #0x8]\n"
+ "ldr d2, [x17, #0x10]\n"
"mov v9.16b, v15.16b\n"
- "mov v23.16b, v17.16b\n"
- "ldr d6, [x14, #0x30]\n"
- "ldr d7, [x14, #0x38]\n"
- "mov v21.16b, v15.16b\n"
- "mov v22.16b, v17.16b\n"
- "ldr d8, [x14, #0x40]\n"
- "ldp x27, x26, [x15, #0x0]\n"
+ "mov v16.16b, v10.16b\n"
+ "ldr d3, [x17, #0x18]\n"
+ "ldr d4, [x17, #0x20]\n"
+ "mov v22.16b, v15.16b\n"
+ "mov v21.16b, v10.16b\n"
+ "ldr d5, [x17, #0x28]\n"
+ "ldr d6, [x17, #0x30]\n"
+ "mov v23.16b, v15.16b\n"
+ "mov v18.16b, v10.16b\n"
+ "ldr d7, [x17, #0x38]\n"
+ "ldr d8, [x17, #0x40]\n"
"usubl v0.8h, v0.8b, v13.8b\n"
"usubl v1.8h, v1.8b, v13.8b\n"
- "ldp x25, x24, [x15, #0x10]\n"
- "ldp x23, x22, [x15, #0x20]\n"
+ "ldp x26, x25, [x12, #0x0]\n"
+ "ldp x24, x23, [x12, #0x10]\n"
"usubl v2.8h, v2.8b, v13.8b\n"
"usubl v3.8h, v3.8b, v13.8b\n"
- "ldp x21, x20, [x15, #0x30]\n"
- "ldr d31, [x27, x17]\n"
+ "ldp x22, x21, [x12, #0x20]\n"
+ "ldp x20, x19, [x12, #0x30]\n"
"usubl v4.8h, v4.8b, v13.8b\n"
"usubl v5.8h, v5.8b, v13.8b\n"
- "ldr d30, [x26, x17]\n"
- "ldr d29, [x25, x17]\n"
+ "ldr d31, [x26, x15]\n"
+ "ldr d30, [x25, x15]\n"
"usubl v6.8h, v6.8b, v13.8b\n"
"usubl v7.8h, v7.8b, v13.8b\n"
- "ldr d28, [x24, x17]\n"
- "ldr d27, [x23, x17]\n"
+ "ldr d29, [x24, x15]\n"
+ "ldr d28, [x23, x15]\n"
"usubl v8.8h, v8.8b, v13.8b\n"
"usubl v31.8h, v31.8b, v12.8b\n"
- "ldr d26, [x22, x17]\n"
- "ldr d25, [x21, x17]\n"
+ "ldr d27, [x22, x15]\n"
+ "ldr d26, [x21, x15]\n"
"usubl v30.8h, v30.8b, v12.8b\n"
"usubl v29.8h, v29.8b, v12.8b\n"
- "ldr d24, [x20, x17]\n"
+ "ldr d25, [x20, x15]\n"
+ "ldr d24, [x19, x15]\n"
"usubl v28.8h, v28.8b, v12.8b\n"
"usubl v27.8h, v27.8b, v12.8b\n"
"usubl v26.8h, v26.8b, v12.8b\n"
@@ -428,966 +428,966 @@ void a64_u8q_nhwc_3x3_s2_output2x2_mla_depthfirst_impl(
"bgt 1b\n"
"2:" // Tail
"smlal v15.4s, v31.4h, v8.4h\n"
- "smlal2 v17.4s, v31.8h, v8.8h\n"
- "ldr x24, [x15, #0x40]\n"
- "ldr x22, [x15, #0x48]\n"
- "smlal v10.4s, v31.4h, v6.4h\n"
- "smlal2 v20.4s, v31.8h, v6.8h\n"
- "ldr x21, [x15, #0x50]\n"
- "ldr x20, [x15, #0x58]\n"
+ "smlal2 v10.4s, v31.8h, v8.8h\n"
+ "ldr x24, [x12, #0x40]\n"
+ "ldr x23, [x12, #0x48]\n"
+ "smlal v9.4s, v31.4h, v6.4h\n"
+ "smlal2 v16.4s, v31.8h, v6.8h\n"
+ "ldr x21, [x12, #0x50]\n"
+ "ldr x19, [x12, #0x58]\n"
"smlal v15.4s, v30.4h, v0.4h\n"
- "smlal2 v17.4s, v30.8h, v0.8h\n"
- "ldr q19, [x13, #0x0]\n"
- "ldr x23, [x15, #0x78]\n"
- "smlal v10.4s, v28.4h, v1.4h\n"
- "smlal2 v20.4s, v28.8h, v1.8h\n"
- "ldr d28, [x22, x17]\n"
+ "smlal2 v10.4s, v30.8h, v0.8h\n"
+ "ldr x22, [x12, #0x78]\n"
+ "ldr x20, [x12, #0x60]\n"
+ "smlal v9.4s, v28.4h, v1.4h\n"
+ "smlal2 v16.4s, v28.8h, v1.8h\n"
+ "ldr d28, [x23, x15]\n"
"usubl v28.8h, v28.8b, v12.8b\n"
"smlal v15.4s, v29.4h, v1.4h\n"
- "smlal2 v17.4s, v29.8h, v1.8h\n"
- "ldr d29, [x24, x17]\n"
+ "smlal2 v10.4s, v29.8h, v1.8h\n"
+ "ldr d29, [x24, x15]\n"
"usubl v29.8h, v29.8b, v12.8b\n"
- "smlal v10.4s, v27.4h, v2.4h\n"
- "smlal2 v20.4s, v27.8h, v2.8h\n"
- "ldr d27, [x21, x17]\n"
+ "smlal v9.4s, v27.4h, v2.4h\n"
+ "smlal2 v16.4s, v27.8h, v2.8h\n"
+ "ldr d27, [x21, x15]\n"
"usubl v27.8h, v27.8b, v12.8b\n"
"smlal v15.4s, v26.4h, v3.4h\n"
- "smlal2 v17.4s, v26.8h, v3.8h\n"
- "ldr d26, [x20, x17]\n"
- "ldr x20, [x15, #0x60]\n"
- "smlal v10.4s, v24.4h, v0.4h\n"
- "smlal2 v20.4s, v24.8h, v0.8h\n"
+ "smlal2 v10.4s, v26.8h, v3.8h\n"
+ "ldr d26, [x19, x15]\n"
"usubl v26.8h, v26.8b, v12.8b\n"
- "ldr x21, [x15, #0x80]\n"
+ "smlal v9.4s, v24.4h, v0.4h\n"
+ "smlal2 v16.4s, v24.8h, v0.8h\n"
+ "ldr x21, [x12, #0x80]\n"
+ "ldr x19, [x12, #0x68]\n"
"smlal v15.4s, v25.4h, v4.4h\n"
- "smlal2 v17.4s, v25.8h, v4.8h\n"
- "ldr d25, [x20, x17]\n"
- "ldr x20, [x15, #0x68]\n"
- "smlal v10.4s, v29.4h, v4.4h\n"
- "smlal2 v20.4s, v29.8h, v4.8h\n"
- "ldr d29, [x20, x17]\n"
+ "smlal2 v10.4s, v25.8h, v4.8h\n"
+ "ldr d25, [x20, x15]\n"
"usubl v25.8h, v25.8b, v12.8b\n"
+ "smlal v9.4s, v29.4h, v4.4h\n"
+ "smlal2 v16.4s, v29.8h, v4.8h\n"
+ "ldr x20, [x12, #0x88]\n"
+ "ldr d29, [x19, x15]\n"
"smlal v15.4s, v24.4h, v2.4h\n"
- "smlal2 v17.4s, v24.8h, v2.8h\n"
- "ldr q18, [x12, #0x0]\n"
- "ldr x22, [x15, #0x88]\n"
- "smlal v10.4s, v28.4h, v5.4h\n"
- "smlal2 v20.4s, v28.8h, v5.8h\n"
- "ldr d28, [x21, x17]\n"
- "ldr x21, [x15, #0x70]\n"
- "smlal v9.4s, v31.4h, v2.4h\n"
- "smlal2 v23.4s, v31.8h, v2.8h\n"
+ "smlal2 v10.4s, v24.8h, v2.8h\n"
+ "ldr x19, [x12, #0x70]\n"
+ "usubl v29.8h, v29.8b, v12.8b\n"
+ "smlal v9.4s, v28.4h, v5.4h\n"
+ "smlal2 v16.4s, v28.8h, v5.8h\n"
+ "ldr d28, [x21, x15]\n"
"usubl v28.8h, v28.8b, v12.8b\n"
- "ldr x25, [x15, #0x98]\n"
+ "smlal v22.4s, v31.4h, v2.4h\n"
+ "smlal2 v21.4s, v31.8h, v2.8h\n"
+ "ldr x24, [x12, #0x98]\n"
+ "ldr d24, [x19, x15]\n"
"smlal v15.4s, v27.4h, v5.4h\n"
- "smlal2 v17.4s, v27.8h, v5.8h\n"
- "usubl v29.8h, v29.8b, v12.8b\n"
- "ldr x24, [x15, #0x90]\n"
- "smlal v10.4s, v27.4h, v3.4h\n"
- "smlal2 v20.4s, v27.8h, v3.8h\n"
- "ldr d27, [x23, x17]\n"
+ "smlal2 v10.4s, v27.8h, v5.8h\n"
+ "usubl v24.8h, v24.8b, v12.8b\n"
+ "ldr x23, [x12, #0x90]\n"
+ "smlal v9.4s, v27.4h, v3.4h\n"
+ "smlal2 v16.4s, v27.8h, v3.8h\n"
+ "ldr d27, [x22, x15]\n"
"usubl v27.8h, v27.8b, v12.8b\n"
- "smlal v21.4s, v31.4h, v0.4h\n"
- "smlal v9.4s, v26.4h, v3.4h\n"
- "ldr x23, [x15, #0xa8]\n"
- "ldr x20, [x15, #0xa0]\n"
- "smlal2 v23.4s, v26.8h, v3.8h\n"
- "ldr d26, [x22, x17]\n"
- "smlal2 v22.4s, v31.8h, v0.8h\n"
- "ldr d24, [x21, x17]\n"
- "smlal v21.4s, v27.4h, v4.4h\n"
- "smlal v9.4s, v25.4h, v0.4h\n"
+ "smlal v23.4s, v31.4h, v0.4h\n"
+ "smlal v22.4s, v26.4h, v3.4h\n"
+ "ldr x22, [x12, #0xa8]\n"
+ "ldr x19, [x12, #0xa0]\n"
+ "smlal2 v21.4s, v26.8h, v3.8h\n"
+ "smlal2 v18.4s, v31.8h, v0.8h\n"
+ "ldr d26, [x20, x15]\n"
"usubl v26.8h, v26.8b, v12.8b\n"
- "ldr x22, [x15, #0xb0]\n"
- "smlal2 v23.4s, v25.8h, v0.8h\n"
- "ldr q30, [x13, #0x10]\n"
- "smlal2 v22.4s, v27.8h, v4.8h\n"
- "ldr d27, [x20, x17]\n"
- "smlal v21.4s, v28.4h, v1.4h\n"
+ "smlal v23.4s, v27.4h, v4.4h\n"
+ "smlal v22.4s, v25.4h, v0.4h\n"
+ "ldr x21, [x12, #0xb0]\n"
+ "ldr x20, [x12, #0xb8]\n"
+ "smlal2 v21.4s, v25.8h, v0.8h\n"
+ "smlal2 v18.4s, v27.8h, v4.8h\n"
+ "ldr d27, [x19, x15]\n"
+ "usubl v27.8h, v27.8b, v12.8b\n"
+ "smlal v23.4s, v28.4h, v1.4h\n"
"smlal v15.4s, v25.4h, v6.4h\n"
- "usubl v24.8h, v24.8b, v12.8b\n"
- "ldr x21, [x15, #0xb8]\n"
- "smlal2 v17.4s, v25.8h, v6.8h\n"
- "ldr d25, [x24, x17]\n"
- "smlal v9.4s, v29.4h, v4.4h\n"
+ "ldr x19, [x12, #0xc0]\n"
+ "ldr q19, [x13, #0x0]\n"
+ "smlal2 v10.4s, v25.8h, v6.8h\n"
+ "smlal v22.4s, v29.4h, v4.4h\n"
+ "ldr d25, [x23, x15]\n"
"usubl v25.8h, v25.8b, v12.8b\n"
- "smlal2 v23.4s, v29.8h, v4.8h\n"
- "ldr d29, [x25, x17]\n"
- "ldr q31, [x12, #0x10]\n"
- "smlal2 v22.4s, v28.8h, v1.8h\n"
- "smlal v21.4s, v26.4h, v5.4h\n"
+ "smlal2 v21.4s, v29.8h, v4.8h\n"
+ "ldr d29, [x24, x15]\n"
+ "smlal2 v18.4s, v28.8h, v1.8h\n"
"usubl v29.8h, v29.8b, v12.8b\n"
+ "smlal v23.4s, v26.4h, v5.4h\n"
"smlal v15.4s, v24.4h, v7.4h\n"
- "ldr x20, [x15, #0xc0]\n"
- "smlal2 v17.4s, v24.8h, v7.8h\n"
- "smlal v9.4s, v24.4h, v1.4h\n"
- "usubl v27.8h, v27.8b, v12.8b\n"
- "tst x7, #0x7\n"
- "smlal2 v23.4s, v24.8h, v1.8h\n"
- "ldr d24, [x23, x17]\n"
- "smlal2 v22.4s, v26.8h, v5.8h\n"
- "ldr d26, [x22, x17]\n"
- "smlal v21.4s, v29.4h, v2.4h\n"
+ "ldr q0, [x11, #0x0]\n"
+ "ldr q4, [x13, #0x10]\n"
+ "smlal2 v10.4s, v24.8h, v7.8h\n"
+ "smlal v22.4s, v24.4h, v1.4h\n"
+ "sqrdmulh v15.4s, v15.4s, v19.4s\n"
+ "ldr q31, [x11, #0x10]\n"
+ "smlal2 v21.4s, v24.8h, v1.8h\n"
+ "ldr d24, [x22, x15]\n"
+ "smlal2 v18.4s, v26.8h, v5.8h\n"
"usubl v24.8h, v24.8b, v12.8b\n"
- "smlal2 v22.4s, v29.8h, v2.8h\n"
- "add x13, x13, #0x20\n"
- "smlal v9.4s, v25.4h, v6.4h\n"
- "smlal v21.4s, v24.4h, v3.4h\n"
+ "smlal v23.4s, v29.4h, v2.4h\n"
+ "ldr d26, [x21, x15]\n"
+ "smlal2 v18.4s, v29.8h, v2.8h\n"
"usubl v26.8h, v26.8b, v12.8b\n"
- "add x12, x12, #0x20\n"
- "smlal v10.4s, v28.4h, v7.4h\n"
- "smlal2 v20.4s, v28.8h, v7.8h\n"
- "sqrdmulh v15.4s, v15.4s, v19.4s\n"
- "smlal2 v23.4s, v25.8h, v6.8h\n"
- "ldr d25, [x21, x17]\n"
- "smlal2 v22.4s, v24.8h, v3.8h\n"
+ "smlal v22.4s, v25.4h, v6.4h\n"
+ "smlal v23.4s, v24.4h, v3.4h\n"
+ "and v30.16b, v15.16b, v0.16b\n"
+ "tst x8, #0x7\n"
+ "smlal v9.4s, v28.4h, v7.4h\n"
+ "smlal2 v16.4s, v28.8h, v7.8h\n"
+ "sqrdmulh v10.4s, v10.4s, v4.4s\n"
+ "add x13, x13, #0x20\n"
+ "smlal2 v21.4s, v25.8h, v6.8h\n"
+ "ldr d25, [x20, x15]\n"
+ "smlal2 v18.4s, v24.8h, v3.8h\n"
"usubl v25.8h, v25.8b, v12.8b\n"
- "smlal v9.4s, v27.4h, v7.4h\n"
- "smlal v21.4s, v26.4h, v7.4h\n"
- "and v0.16b, v15.16b, v18.16b\n"
- "smlal v10.4s, v29.4h, v8.4h\n"
- "smlal2 v20.4s, v29.8h, v8.8h\n"
- "ldr d29, [x20, x17]\n"
+ "smlal v22.4s, v27.4h, v7.4h\n"
+ "smlal v23.4s, v26.4h, v7.4h\n"
+ "sshr v30.4s, v30.4s, #0x1f\n"
+ "add x11, x11, #0x20\n"
+ "smlal v9.4s, v29.4h, v8.4h\n"
+ "smlal2 v16.4s, v29.8h, v8.8h\n"
+ "ldr d29, [x19, x15]\n"
"usubl v29.8h, v29.8b, v12.8b\n"
- "smlal2 v23.4s, v27.8h, v7.8h\n"
- "smlal2 v22.4s, v26.8h, v7.8h\n"
- "sqrdmulh v17.4s, v17.4s, v30.4s\n"
- "add x17, x17, #0x8\n"
- "smlal v9.4s, v24.4h, v5.4h\n"
- "smlal v21.4s, v25.4h, v6.4h\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "smlal2 v23.4s, v24.8h, v5.8h\n"
- "smlal2 v22.4s, v25.8h, v6.8h\n"
- "and v7.16b, v17.16b, v31.16b\n"
- "smlal v9.4s, v25.4h, v8.4h\n"
- "smlal v21.4s, v29.4h, v8.4h\n"
- "sqrdmulh v10.4s, v10.4s, v19.4s\n"
- "smlal2 v23.4s, v25.8h, v8.8h\n"
- "smlal2 v22.4s, v29.8h, v8.8h\n"
+ "smlal2 v21.4s, v27.8h, v7.8h\n"
+ "smlal2 v18.4s, v26.8h, v7.8h\n"
"sqrdmulh v9.4s, v9.4s, v19.4s\n"
- "sqrdmulh v21.4s, v21.4s, v19.4s\n"
- "sqadd v15.4s, v15.4s, v0.4s\n"
- "sshr v7.4s, v7.4s, #0x1f\n"
- "and v19.16b, v10.16b, v18.16b\n"
- "sqrdmulh v20.4s, v20.4s, v30.4s\n"
- "and v27.16b, v9.16b, v18.16b\n"
- "sqrdmulh v23.4s, v23.4s, v30.4s\n"
- "and v0.16b, v21.16b, v18.16b\n"
- "sqrdmulh v22.4s, v22.4s, v30.4s\n"
- "sqadd v17.4s, v17.4s, v7.4s\n"
+ "add x15, x15, #0x8\n"
+ "smlal v22.4s, v24.4h, v5.4h\n"
+ "smlal v23.4s, v25.4h, v6.4h\n"
+ "and v28.16b, v9.16b, v0.16b\n"
+ "smlal2 v21.4s, v24.8h, v5.8h\n"
+ "smlal2 v18.4s, v25.8h, v6.8h\n"
+ "sqrdmulh v16.4s, v16.4s, v4.4s\n"
+ "smlal v22.4s, v25.4h, v8.4h\n"
+ "smlal v23.4s, v29.4h, v8.4h\n"
+ "sqrdmulh v22.4s, v22.4s, v19.4s\n"
+ "smlal2 v21.4s, v25.8h, v8.8h\n"
+ "smlal2 v18.4s, v29.8h, v8.8h\n"
+ "sqrdmulh v23.4s, v23.4s, v19.4s\n"
+ "and v29.16b, v22.16b, v0.16b\n"
+ "sqrdmulh v21.4s, v21.4s, v4.4s\n"
+ "and v20.16b, v23.16b, v0.16b\n"
+ "sqrdmulh v18.4s, v18.4s, v4.4s\n"
+ "and v19.16b, v10.16b, v31.16b\n"
+ "sshr v28.4s, v28.4s, #0x1f\n"
+ "and v4.16b, v16.16b, v31.16b\n"
+ "sshr v29.4s, v29.4s, #0x1f\n"
+ "and v5.16b, v21.16b, v31.16b\n"
+ "sshr v20.4s, v20.4s, #0x1f\n"
+ "and v26.16b, v18.16b, v31.16b\n"
+ "sqadd v15.4s, v15.4s, v30.4s\n"
"sshr v19.4s, v19.4s, #0x1f\n"
- "and v5.16b, v20.16b, v31.16b\n"
- "sshr v27.4s, v27.4s, #0x1f\n"
- "and v4.16b, v23.16b, v31.16b\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "and v7.16b, v22.16b, v31.16b\n"
- "sqadd v10.4s, v10.4s, v19.4s\n"
- "sshr v5.4s, v5.4s, #0x1f\n"
- "sqadd v9.4s, v9.4s, v27.4s\n"
+ "sqadd v9.4s, v9.4s, v28.4s\n"
"sshr v4.4s, v4.4s, #0x1f\n"
- "sqadd v21.4s, v21.4s, v0.4s\n"
- "sshr v7.4s, v7.4s, #0x1f\n"
- "srshl v15.4s, v15.4s, v18.4s\n"
- "srshl v10.4s, v10.4s, v18.4s\n"
- "sqadd v20.4s, v20.4s, v5.4s\n"
- "srshl v9.4s, v9.4s, v18.4s\n"
- "sqadd v23.4s, v23.4s, v4.4s\n"
- "srshl v21.4s, v21.4s, v18.4s\n"
- "sqadd v22.4s, v22.4s, v7.4s\n"
- "srshl v17.4s, v17.4s, v31.4s\n"
+ "sqadd v22.4s, v22.4s, v29.4s\n"
+ "sshr v5.4s, v5.4s, #0x1f\n"
+ "sqadd v23.4s, v23.4s, v20.4s\n"
+ "sshr v26.4s, v26.4s, #0x1f\n"
+ "srshl v15.4s, v15.4s, v0.4s\n"
+ "sqadd v10.4s, v10.4s, v19.4s\n"
+ "srshl v9.4s, v9.4s, v0.4s\n"
+ "sqadd v16.4s, v16.4s, v4.4s\n"
+ "srshl v22.4s, v22.4s, v0.4s\n"
+ "sqadd v21.4s, v21.4s, v5.4s\n"
+ "srshl v23.4s, v23.4s, v0.4s\n"
+ "sqadd v18.4s, v18.4s, v26.4s\n"
+ "srshl v10.4s, v10.4s, v31.4s\n"
"sqxtn v15.4h, v15.4s\n"
- "srshl v20.4s, v20.4s, v31.4s\n"
- "sqxtn v10.4h, v10.4s\n"
- "srshl v23.4s, v23.4s, v31.4s\n"
+ "srshl v16.4s, v16.4s, v31.4s\n"
"sqxtn v9.4h, v9.4s\n"
- "srshl v22.4s, v22.4s, v31.4s\n"
- "sqxtn v21.4h, v21.4s\n"
- "sqxtn2 v15.8h, v17.4s\n"
- "sqxtn2 v10.8h, v20.4s\n"
- "sqxtn2 v9.8h, v23.4s\n"
- "sqxtn2 v21.8h, v22.4s\n"
+ "srshl v21.4s, v21.4s, v31.4s\n"
+ "sqxtn v22.4h, v22.4s\n"
+ "srshl v18.4s, v18.4s, v31.4s\n"
+ "sqxtn v23.4h, v23.4s\n"
+ "sqxtn2 v15.8h, v10.4s\n"
+ "sqxtn2 v9.8h, v16.4s\n"
+ "sqxtn2 v22.8h, v21.4s\n"
+ "sqxtn2 v23.8h, v18.4s\n"
"sqadd v15.8h, v15.8h, v11.8h\n"
- "sqadd v10.8h, v10.8h, v11.8h\n"
"sqadd v9.8h, v9.8h, v11.8h\n"
- "sqadd v21.8h, v21.8h, v11.8h\n"
- "smax v15.8h, v15.8h, v16.8h\n"
- "smax v10.8h, v10.8h, v16.8h\n"
- "smax v9.8h, v9.8h, v16.8h\n"
- "smax v21.8h, v21.8h, v16.8h\n"
+ "sqadd v22.8h, v22.8h, v11.8h\n"
+ "sqadd v23.8h, v23.8h, v11.8h\n"
+ "smax v15.8h, v15.8h, v17.8h\n"
+ "smax v9.8h, v9.8h, v17.8h\n"
+ "smax v22.8h, v22.8h, v17.8h\n"
+ "smax v23.8h, v23.8h, v17.8h\n"
"smin v15.8h, v15.8h, v14.8h\n"
- "smin v10.8h, v10.8h, v14.8h\n"
"smin v9.8h, v9.8h, v14.8h\n"
- "smin v21.8h, v21.8h, v14.8h\n"
+ "smin v22.8h, v22.8h, v14.8h\n"
+ "smin v23.8h, v23.8h, v14.8h\n"
"uzp1 v15.16b, v15.16b, v15.16b\n"
- "str d15, [x11, x16]\n"
- "uzp1 v10.16b, v10.16b, v10.16b\n"
+ "str d15, [x10, x14]\n"
"uzp1 v9.16b, v9.16b, v9.16b\n"
- "str d10, [x10, x16]\n"
- "uzp1 v21.16b, v21.16b, v21.16b\n"
- "str d9, [x9, x16]\n"
- "str d21, [x28, x16]\n"
- "add x16, x16, #0x8\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "str d9, [x9, x14]\n"
+ "uzp1 v23.16b, v23.16b, v23.16b\n"
+ "str d22, [x28, x14]\n"
+ "str d23, [x27, x14]\n"
+ "add x14, x14, #0x8\n"
"beq 88f\n"
- "add x14, x14, #0x48\n"
+ "add x17, x17, #0x48\n"
"3:" // Oddments
- "ldr x24, [%x[params], %[offsetof_Params_bias]]\n"
- "tbz x7, #2, 5f\n"
- "ld1 { v15.4s }, [x24], #0x10\n"
- "tbz x7, #1, 4f\n"
- "ld1 { v17.d }[0], [x24], #0x8\n"
- "tbz x7, #0, 7f\n"
- "ld1 { v17.s }[2], [x24]\n"
+ "ldr x19, [%x[params], %[offsetof_Params_bias]]\n"
+ "tbz x8, #2, 5f\n"
+ "ld1 { v15.4s }, [x19], #0x10\n"
+ "tbz x8, #1, 4f\n"
+ "ld1 { v10.d }[0], [x19], #0x8\n"
+ "tbz x8, #0, 7f\n"
+ "ld1 { v10.s }[2], [x19]\n"
"b 7f\n"
"4:" // Oddments: Load bias: Bit 2: Bit 1: Unset
- "tbz x7, #0, 7f\n"
- "ld1 { v17.s }[0], [x24]\n"
+ "tbz x8, #0, 7f\n"
+ "ld1 { v10.s }[0], [x19]\n"
"b 7f\n"
"5:" // Oddments: Load bias: Bit 2: Unset
- "tbz x7, #1, 6f\n"
- "ld1 { v15.d }[0], [x24], #0x8\n"
- "tbz x7, #0, 7f\n"
- "ld1 { v15.s }[2], [x24]\n"
+ "tbz x8, #1, 6f\n"
+ "ld1 { v15.d }[0], [x19], #0x8\n"
+ "tbz x8, #0, 7f\n"
+ "ld1 { v15.s }[2], [x19]\n"
"b 7f\n"
"6:" // Oddments: Load bias: Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 7f\n"
- "ld1 { v15.s }[0], [x24]\n"
+ "tbz x8, #0, 7f\n"
+ "ld1 { v15.s }[0], [x19]\n"
"7:" // Oddments: Load bias: Bit 2: End
- "ldr d0, [x14, #0x0]\n"
- "ldr d1, [x14, #0x8]\n"
- "mov v10.16b, v15.16b\n"
- "mov v20.16b, v17.16b\n"
- "ldr d2, [x14, #0x10]\n"
- "ldr d3, [x14, #0x18]\n"
+ "ldr d0, [x17, #0x0]\n"
+ "ldr d1, [x17, #0x8]\n"
"mov v9.16b, v15.16b\n"
- "mov v23.16b, v17.16b\n"
- "ldr d4, [x14, #0x20]\n"
- "ldr d5, [x14, #0x28]\n"
- "mov v21.16b, v15.16b\n"
- "mov v22.16b, v17.16b\n"
- "ldr d6, [x14, #0x30]\n"
- "ldr d7, [x14, #0x38]\n"
+ "mov v16.16b, v10.16b\n"
+ "ldr d2, [x17, #0x10]\n"
+ "ldr d3, [x17, #0x18]\n"
+ "mov v22.16b, v15.16b\n"
+ "mov v21.16b, v10.16b\n"
+ "ldr d4, [x17, #0x20]\n"
+ "ldr d5, [x17, #0x28]\n"
+ "mov v23.16b, v15.16b\n"
+ "mov v18.16b, v10.16b\n"
+ "ldr d6, [x17, #0x30]\n"
+ "ldr d7, [x17, #0x38]\n"
"usubl v0.8h, v0.8b, v13.8b\n"
"usubl v1.8h, v1.8b, v13.8b\n"
- "ldr d8, [x14, #0x40]\n"
- "ldp x27, x26, [x15, #0x0]\n"
+ "ldr d8, [x17, #0x40]\n"
+ "ldp x26, x25, [x12, #0x0]\n"
"usubl v2.8h, v2.8b, v13.8b\n"
"usubl v3.8h, v3.8b, v13.8b\n"
- "ldp x25, x24, [x15, #0x10]\n"
- "ldp x23, x22, [x15, #0x20]\n"
+ "ldp x24, x23, [x12, #0x10]\n"
+ "ldp x22, x21, [x12, #0x20]\n"
"usubl v4.8h, v4.8b, v13.8b\n"
"usubl v5.8h, v5.8b, v13.8b\n"
- "ldp x21, x20, [x15, #0x30]\n"
+ "ldp x20, x19, [x12, #0x30]\n"
"usubl v6.8h, v6.8b, v13.8b\n"
"usubl v7.8h, v7.8b, v13.8b\n"
"usubl v8.8h, v8.8b, v13.8b\n"
- "add x27, x27, x17\n"
- "add x26, x26, x17\n"
- "add x25, x25, x17\n"
- "add x24, x24, x17\n"
- "add x23, x23, x17\n"
- "add x22, x22, x17\n"
- "add x21, x21, x17\n"
- "add x20, x20, x17\n"
- "tbz x7, #2, 9f\n"
- "ld1 { v31.s }[0], [x27], #0x4\n"
- "ld1 { v30.s }[0], [x26], #0x4\n"
- "ld1 { v29.s }[0], [x25], #0x4\n"
- "ld1 { v28.s }[0], [x24], #0x4\n"
- "ld1 { v27.s }[0], [x23], #0x4\n"
- "ld1 { v26.s }[0], [x22], #0x4\n"
- "ld1 { v25.s }[0], [x21], #0x4\n"
- "ld1 { v24.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 8f\n"
- "ld1 { v31.h }[2], [x27], #0x2\n"
- "ld1 { v30.h }[2], [x26], #0x2\n"
- "ld1 { v29.h }[2], [x25], #0x2\n"
- "ld1 { v28.h }[2], [x24], #0x2\n"
- "ld1 { v27.h }[2], [x23], #0x2\n"
- "ld1 { v26.h }[2], [x22], #0x2\n"
- "ld1 { v25.h }[2], [x21], #0x2\n"
- "ld1 { v24.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 11f\n"
- "ld1 { v31.b }[6], [x27]\n"
- "ld1 { v30.b }[6], [x26]\n"
- "ld1 { v29.b }[6], [x25]\n"
- "ld1 { v28.b }[6], [x24]\n"
- "ld1 { v27.b }[6], [x23]\n"
- "ld1 { v26.b }[6], [x22]\n"
- "ld1 { v25.b }[6], [x21]\n"
- "ld1 { v24.b }[6], [x20]\n"
+ "add x26, x26, x15\n"
+ "add x25, x25, x15\n"
+ "add x24, x24, x15\n"
+ "add x23, x23, x15\n"
+ "add x22, x22, x15\n"
+ "add x21, x21, x15\n"
+ "add x20, x20, x15\n"
+ "add x19, x19, x15\n"
+ "tbz x8, #2, 9f\n"
+ "ld1 { v31.s }[0], [x26], #0x4\n"
+ "ld1 { v30.s }[0], [x25], #0x4\n"
+ "ld1 { v29.s }[0], [x24], #0x4\n"
+ "ld1 { v28.s }[0], [x23], #0x4\n"
+ "ld1 { v27.s }[0], [x22], #0x4\n"
+ "ld1 { v26.s }[0], [x21], #0x4\n"
+ "ld1 { v25.s }[0], [x20], #0x4\n"
+ "ld1 { v24.s }[0], [x19], #0x4\n"
+ "tbz x8, #1, 8f\n"
+ "ld1 { v31.h }[2], [x26], #0x2\n"
+ "ld1 { v30.h }[2], [x25], #0x2\n"
+ "ld1 { v29.h }[2], [x24], #0x2\n"
+ "ld1 { v28.h }[2], [x23], #0x2\n"
+ "ld1 { v27.h }[2], [x22], #0x2\n"
+ "ld1 { v26.h }[2], [x21], #0x2\n"
+ "ld1 { v25.h }[2], [x20], #0x2\n"
+ "ld1 { v24.h }[2], [x19], #0x2\n"
+ "tbz x8, #0, 11f\n"
+ "ld1 { v31.b }[6], [x26]\n"
+ "ld1 { v30.b }[6], [x25]\n"
+ "ld1 { v29.b }[6], [x24]\n"
+ "ld1 { v28.b }[6], [x23]\n"
+ "ld1 { v27.b }[6], [x22]\n"
+ "ld1 { v26.b }[6], [x21]\n"
+ "ld1 { v25.b }[6], [x20]\n"
+ "ld1 { v24.b }[6], [x19]\n"
"b 11f\n"
"8:" // Oddments: Initial loads: Bit 2: Bit 1: Unset
- "tbz x7, #0, 11f\n"
- "ld1 { v31.b }[4], [x27]\n"
- "ld1 { v30.b }[4], [x26]\n"
- "ld1 { v29.b }[4], [x25]\n"
- "ld1 { v28.b }[4], [x24]\n"
- "ld1 { v27.b }[4], [x23]\n"
- "ld1 { v26.b }[4], [x22]\n"
- "ld1 { v25.b }[4], [x21]\n"
- "ld1 { v24.b }[4], [x20]\n"
+ "tbz x8, #0, 11f\n"
+ "ld1 { v31.b }[4], [x26]\n"
+ "ld1 { v30.b }[4], [x25]\n"
+ "ld1 { v29.b }[4], [x24]\n"
+ "ld1 { v28.b }[4], [x23]\n"
+ "ld1 { v27.b }[4], [x22]\n"
+ "ld1 { v26.b }[4], [x21]\n"
+ "ld1 { v25.b }[4], [x20]\n"
+ "ld1 { v24.b }[4], [x19]\n"
"b 11f\n"
"9:" // Oddments: Initial loads: Bit 2: Unset
- "tbz x7, #1, 10f\n"
- "ld1 { v31.h }[0], [x27], #0x2\n"
- "ld1 { v30.h }[0], [x26], #0x2\n"
- "ld1 { v29.h }[0], [x25], #0x2\n"
- "ld1 { v28.h }[0], [x24], #0x2\n"
- "ld1 { v27.h }[0], [x23], #0x2\n"
- "ld1 { v26.h }[0], [x22], #0x2\n"
- "ld1 { v25.h }[0], [x21], #0x2\n"
- "ld1 { v24.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 11f\n"
- "ld1 { v31.b }[2], [x27]\n"
- "ld1 { v30.b }[2], [x26]\n"
- "ld1 { v29.b }[2], [x25]\n"
- "ld1 { v28.b }[2], [x24]\n"
- "ld1 { v27.b }[2], [x23]\n"
- "ld1 { v26.b }[2], [x22]\n"
- "ld1 { v25.b }[2], [x21]\n"
- "ld1 { v24.b }[2], [x20]\n"
+ "tbz x8, #1, 10f\n"
+ "ld1 { v31.h }[0], [x26], #0x2\n"
+ "ld1 { v30.h }[0], [x25], #0x2\n"
+ "ld1 { v29.h }[0], [x24], #0x2\n"
+ "ld1 { v28.h }[0], [x23], #0x2\n"
+ "ld1 { v27.h }[0], [x22], #0x2\n"
+ "ld1 { v26.h }[0], [x21], #0x2\n"
+ "ld1 { v25.h }[0], [x20], #0x2\n"
+ "ld1 { v24.h }[0], [x19], #0x2\n"
+ "tbz x8, #0, 11f\n"
+ "ld1 { v31.b }[2], [x26]\n"
+ "ld1 { v30.b }[2], [x25]\n"
+ "ld1 { v29.b }[2], [x24]\n"
+ "ld1 { v28.b }[2], [x23]\n"
+ "ld1 { v27.b }[2], [x22]\n"
+ "ld1 { v26.b }[2], [x21]\n"
+ "ld1 { v25.b }[2], [x20]\n"
+ "ld1 { v24.b }[2], [x19]\n"
"b 11f\n"
"10:" // Oddments: Initial loads: Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 11f\n"
- "ld1 { v31.b }[0], [x27]\n"
- "ld1 { v30.b }[0], [x26]\n"
- "ld1 { v29.b }[0], [x25]\n"
- "ld1 { v28.b }[0], [x24]\n"
- "ld1 { v27.b }[0], [x23]\n"
- "ld1 { v26.b }[0], [x22]\n"
- "ld1 { v25.b }[0], [x21]\n"
- "ld1 { v24.b }[0], [x20]\n"
+ "tbz x8, #0, 11f\n"
+ "ld1 { v31.b }[0], [x26]\n"
+ "ld1 { v30.b }[0], [x25]\n"
+ "ld1 { v29.b }[0], [x24]\n"
+ "ld1 { v28.b }[0], [x23]\n"
+ "ld1 { v27.b }[0], [x22]\n"
+ "ld1 { v26.b }[0], [x21]\n"
+ "ld1 { v25.b }[0], [x20]\n"
+ "ld1 { v24.b }[0], [x19]\n"
"11:" // Oddments: Initial loads: Bit 2: End
"usubl v31.8h, v31.8b, v12.8b\n"
"smlal v15.4s, v31.4h, v8.4h\n"
- "smlal2 v17.4s, v31.8h, v8.8h\n"
- "ldr x24, [x15, #0x40]\n"
+ "smlal2 v10.4s, v31.8h, v8.8h\n"
+ "ldr x24, [x12, #0x40]\n"
"usubl v30.8h, v30.8b, v12.8b\n"
"smlal v15.4s, v30.4h, v0.4h\n"
- "smlal2 v17.4s, v30.8h, v0.8h\n"
- "add x24, x24, x17\n"
+ "smlal2 v10.4s, v30.8h, v0.8h\n"
+ "add x24, x24, x15\n"
"usubl v29.8h, v29.8b, v12.8b\n"
- "smlal v10.4s, v31.4h, v6.4h\n"
- "smlal2 v20.4s, v31.8h, v6.8h\n"
+ "smlal v9.4s, v31.4h, v6.4h\n"
+ "smlal2 v16.4s, v31.8h, v6.8h\n"
"smlal v15.4s, v29.4h, v1.4h\n"
- "smlal2 v17.4s, v29.8h, v1.8h\n"
+ "smlal2 v10.4s, v29.8h, v1.8h\n"
"usubl v28.8h, v28.8b, v12.8b\n"
"usubl v26.8h, v26.8b, v12.8b\n"
- "smlal v10.4s, v28.4h, v1.4h\n"
- "smlal2 v20.4s, v28.8h, v1.8h\n"
+ "smlal v9.4s, v28.4h, v1.4h\n"
+ "smlal2 v16.4s, v28.8h, v1.8h\n"
"smlal v15.4s, v26.4h, v3.4h\n"
- "smlal2 v17.4s, v26.8h, v3.8h\n"
+ "smlal2 v10.4s, v26.8h, v3.8h\n"
"usubl v27.8h, v27.8b, v12.8b\n"
"usubl v25.8h, v25.8b, v12.8b\n"
- "smlal v10.4s, v27.4h, v2.4h\n"
- "smlal2 v20.4s, v27.8h, v2.8h\n"
+ "smlal v9.4s, v27.4h, v2.4h\n"
+ "smlal2 v16.4s, v27.8h, v2.8h\n"
"smlal v15.4s, v25.4h, v4.4h\n"
- "smlal2 v17.4s, v25.8h, v4.8h\n"
+ "smlal2 v10.4s, v25.8h, v4.8h\n"
"usubl v24.8h, v24.8b, v12.8b\n"
- "smlal v9.4s, v31.4h, v2.4h\n"
- "smlal2 v23.4s, v31.8h, v2.8h\n"
- "smlal v21.4s, v31.4h, v0.4h\n"
- "smlal2 v22.4s, v31.8h, v0.8h\n"
+ "smlal v22.4s, v31.4h, v2.4h\n"
+ "smlal2 v21.4s, v31.8h, v2.8h\n"
+ "smlal v23.4s, v31.4h, v0.4h\n"
+ "smlal2 v18.4s, v31.8h, v0.8h\n"
"smlal v15.4s, v24.4h, v2.4h\n"
- "smlal2 v17.4s, v24.8h, v2.8h\n"
- "smlal v10.4s, v24.4h, v0.4h\n"
- "smlal2 v20.4s, v24.8h, v0.8h\n"
- "tbz x7, #2, 13f\n"
+ "smlal2 v10.4s, v24.8h, v2.8h\n"
+ "smlal v9.4s, v24.4h, v0.4h\n"
+ "smlal2 v16.4s, v24.8h, v0.8h\n"
+ "tbz x8, #2, 13f\n"
"ld1 { v29.s }[0], [x24], #0x4\n"
- "tbz x7, #1, 12f\n"
+ "tbz x8, #1, 12f\n"
"ld1 { v29.h }[2], [x24], #0x2\n"
- "tbz x7, #0, 15f\n"
+ "tbz x8, #0, 15f\n"
"ld1 { v29.b }[6], [x24]\n"
"b 15f\n"
"12:" // Oddments: Load (1, 3): Bit 2: Bit 1: Unset
- "tbz x7, #0, 15f\n"
+ "tbz x8, #0, 15f\n"
"ld1 { v29.b }[4], [x24]\n"
"b 15f\n"
"13:" // Oddments: Load (1, 3): Bit 2: Unset
- "tbz x7, #1, 14f\n"
+ "tbz x8, #1, 14f\n"
"ld1 { v29.h }[0], [x24], #0x2\n"
- "tbz x7, #0, 15f\n"
+ "tbz x8, #0, 15f\n"
"ld1 { v29.b }[2], [x24]\n"
"b 15f\n"
"14:" // Oddments: Load (1, 3): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 15f\n"
+ "tbz x8, #0, 15f\n"
"ld1 { v29.b }[0], [x24]\n"
"15:" // Oddments: Load (1, 3): Bit 2: End
"usubl v29.8h, v29.8b, v12.8b\n"
- "ldr x22, [x15, #0x48]\n"
- "smlal v10.4s, v29.4h, v4.4h\n"
- "smlal2 v20.4s, v29.8h, v4.8h\n"
- "add x22, x22, x17\n"
- "tbz x7, #2, 17f\n"
- "ld1 { v28.s }[0], [x22], #0x4\n"
- "tbz x7, #1, 16f\n"
- "ld1 { v28.h }[2], [x22], #0x2\n"
- "tbz x7, #0, 19f\n"
- "ld1 { v28.b }[6], [x22]\n"
+ "ldr x23, [x12, #0x48]\n"
+ "smlal v9.4s, v29.4h, v4.4h\n"
+ "smlal2 v16.4s, v29.8h, v4.8h\n"
+ "add x23, x23, x15\n"
+ "tbz x8, #2, 17f\n"
+ "ld1 { v28.s }[0], [x23], #0x4\n"
+ "tbz x8, #1, 16f\n"
+ "ld1 { v28.h }[2], [x23], #0x2\n"
+ "tbz x8, #0, 19f\n"
+ "ld1 { v28.b }[6], [x23]\n"
"b 19f\n"
"16:" // Oddments: Load (1, 4): Bit 2: Bit 1: Unset
- "tbz x7, #0, 19f\n"
- "ld1 { v28.b }[4], [x22]\n"
+ "tbz x8, #0, 19f\n"
+ "ld1 { v28.b }[4], [x23]\n"
"b 19f\n"
"17:" // Oddments: Load (1, 4): Bit 2: Unset
- "tbz x7, #1, 18f\n"
- "ld1 { v28.h }[0], [x22], #0x2\n"
- "tbz x7, #0, 19f\n"
- "ld1 { v28.b }[2], [x22]\n"
+ "tbz x8, #1, 18f\n"
+ "ld1 { v28.h }[0], [x23], #0x2\n"
+ "tbz x8, #0, 19f\n"
+ "ld1 { v28.b }[2], [x23]\n"
"b 19f\n"
"18:" // Oddments: Load (1, 4): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 19f\n"
- "ld1 { v28.b }[0], [x22]\n"
+ "tbz x8, #0, 19f\n"
+ "ld1 { v28.b }[0], [x23]\n"
"19:" // Oddments: Load (1, 4): Bit 2: End
"usubl v28.8h, v28.8b, v12.8b\n"
- "ldr x21, [x15, #0x50]\n"
- "smlal v10.4s, v28.4h, v5.4h\n"
- "smlal2 v20.4s, v28.8h, v5.8h\n"
- "add x21, x21, x17\n"
- "tbz x7, #2, 21f\n"
+ "ldr x21, [x12, #0x50]\n"
+ "smlal v9.4s, v28.4h, v5.4h\n"
+ "smlal2 v16.4s, v28.8h, v5.8h\n"
+ "add x21, x21, x15\n"
+ "tbz x8, #2, 21f\n"
"ld1 { v27.s }[0], [x21], #0x4\n"
- "tbz x7, #1, 20f\n"
+ "tbz x8, #1, 20f\n"
"ld1 { v27.h }[2], [x21], #0x2\n"
- "tbz x7, #0, 23f\n"
+ "tbz x8, #0, 23f\n"
"ld1 { v27.b }[6], [x21]\n"
"b 23f\n"
"20:" // Oddments: Load (1, 2): Bit 2: Bit 1: Unset
- "tbz x7, #0, 23f\n"
+ "tbz x8, #0, 23f\n"
"ld1 { v27.b }[4], [x21]\n"
"b 23f\n"
"21:" // Oddments: Load (1, 2): Bit 2: Unset
- "tbz x7, #1, 22f\n"
+ "tbz x8, #1, 22f\n"
"ld1 { v27.h }[0], [x21], #0x2\n"
- "tbz x7, #0, 23f\n"
+ "tbz x8, #0, 23f\n"
"ld1 { v27.b }[2], [x21]\n"
"b 23f\n"
"22:" // Oddments: Load (1, 2): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 23f\n"
+ "tbz x8, #0, 23f\n"
"ld1 { v27.b }[0], [x21]\n"
"23:" // Oddments: Load (1, 2): Bit 2: End
"usubl v27.8h, v27.8b, v12.8b\n"
- "ldr x20, [x15, #0x58]\n"
+ "ldr x19, [x12, #0x58]\n"
"smlal v15.4s, v27.4h, v5.4h\n"
- "smlal2 v17.4s, v27.8h, v5.8h\n"
- "smlal v10.4s, v27.4h, v3.4h\n"
- "smlal2 v20.4s, v27.8h, v3.8h\n"
- "add x20, x20, x17\n"
- "tbz x7, #2, 25f\n"
- "ld1 { v26.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 24f\n"
- "ld1 { v26.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 27f\n"
- "ld1 { v26.b }[6], [x20]\n"
+ "smlal2 v10.4s, v27.8h, v5.8h\n"
+ "smlal v9.4s, v27.4h, v3.4h\n"
+ "smlal2 v16.4s, v27.8h, v3.8h\n"
+ "add x19, x19, x15\n"
+ "tbz x8, #2, 25f\n"
+ "ld1 { v26.s }[0], [x19], #0x4\n"
+ "tbz x8, #1, 24f\n"
+ "ld1 { v26.h }[2], [x19], #0x2\n"
+ "tbz x8, #0, 27f\n"
+ "ld1 { v26.b }[6], [x19]\n"
"b 27f\n"
"24:" // Oddments: Load (3, 0): Bit 2: Bit 1: Unset
- "tbz x7, #0, 27f\n"
- "ld1 { v26.b }[4], [x20]\n"
+ "tbz x8, #0, 27f\n"
+ "ld1 { v26.b }[4], [x19]\n"
"b 27f\n"
"25:" // Oddments: Load (3, 0): Bit 2: Unset
- "tbz x7, #1, 26f\n"
- "ld1 { v26.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 27f\n"
- "ld1 { v26.b }[2], [x20]\n"
+ "tbz x8, #1, 26f\n"
+ "ld1 { v26.h }[0], [x19], #0x2\n"
+ "tbz x8, #0, 27f\n"
+ "ld1 { v26.b }[2], [x19]\n"
"b 27f\n"
"26:" // Oddments: Load (3, 0): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 27f\n"
- "ld1 { v26.b }[0], [x20]\n"
+ "tbz x8, #0, 27f\n"
+ "ld1 { v26.b }[0], [x19]\n"
"27:" // Oddments: Load (3, 0): Bit 2: End
"usubl v26.8h, v26.8b, v12.8b\n"
- "ldr x20, [x15, #0x60]\n"
- "smlal v9.4s, v26.4h, v3.4h\n"
- "smlal2 v23.4s, v26.8h, v3.8h\n"
- "add x20, x20, x17\n"
- "tbz x7, #2, 29f\n"
+ "ldr x20, [x12, #0x60]\n"
+ "smlal v22.4s, v26.4h, v3.4h\n"
+ "smlal2 v21.4s, v26.8h, v3.8h\n"
+ "add x20, x20, x15\n"
+ "tbz x8, #2, 29f\n"
"ld1 { v25.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 28f\n"
+ "tbz x8, #1, 28f\n"
"ld1 { v25.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 31f\n"
+ "tbz x8, #0, 31f\n"
"ld1 { v25.b }[6], [x20]\n"
"b 31f\n"
"28:" // Oddments: Load (2, 0): Bit 2: Bit 1: Unset
- "tbz x7, #0, 31f\n"
+ "tbz x8, #0, 31f\n"
"ld1 { v25.b }[4], [x20]\n"
"b 31f\n"
"29:" // Oddments: Load (2, 0): Bit 2: Unset
- "tbz x7, #1, 30f\n"
+ "tbz x8, #1, 30f\n"
"ld1 { v25.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 31f\n"
+ "tbz x8, #0, 31f\n"
"ld1 { v25.b }[2], [x20]\n"
"b 31f\n"
"30:" // Oddments: Load (2, 0): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 31f\n"
+ "tbz x8, #0, 31f\n"
"ld1 { v25.b }[0], [x20]\n"
"31:" // Oddments: Load (2, 0): Bit 2: End
"usubl v25.8h, v25.8b, v12.8b\n"
- "ldr x20, [x15, #0x68]\n"
+ "ldr x19, [x12, #0x68]\n"
"smlal v15.4s, v25.4h, v6.4h\n"
- "smlal2 v17.4s, v25.8h, v6.8h\n"
- "smlal v9.4s, v25.4h, v0.4h\n"
- "smlal2 v23.4s, v25.8h, v0.8h\n"
- "add x20, x20, x17\n"
- "tbz x7, #2, 33f\n"
- "ld1 { v29.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 32f\n"
- "ld1 { v29.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 35f\n"
- "ld1 { v29.b }[6], [x20]\n"
+ "smlal2 v10.4s, v25.8h, v6.8h\n"
+ "smlal v22.4s, v25.4h, v0.4h\n"
+ "smlal2 v21.4s, v25.8h, v0.8h\n"
+ "add x19, x19, x15\n"
+ "tbz x8, #2, 33f\n"
+ "ld1 { v29.s }[0], [x19], #0x4\n"
+ "tbz x8, #1, 32f\n"
+ "ld1 { v29.h }[2], [x19], #0x2\n"
+ "tbz x8, #0, 35f\n"
+ "ld1 { v29.b }[6], [x19]\n"
"b 35f\n"
"32:" // Oddments: Load (3, 1): Bit 2: Bit 1: Unset
- "tbz x7, #0, 35f\n"
- "ld1 { v29.b }[4], [x20]\n"
+ "tbz x8, #0, 35f\n"
+ "ld1 { v29.b }[4], [x19]\n"
"b 35f\n"
"33:" // Oddments: Load (3, 1): Bit 2: Unset
- "tbz x7, #1, 34f\n"
- "ld1 { v29.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 35f\n"
- "ld1 { v29.b }[2], [x20]\n"
+ "tbz x8, #1, 34f\n"
+ "ld1 { v29.h }[0], [x19], #0x2\n"
+ "tbz x8, #0, 35f\n"
+ "ld1 { v29.b }[2], [x19]\n"
"b 35f\n"
"34:" // Oddments: Load (3, 1): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 35f\n"
- "ld1 { v29.b }[0], [x20]\n"
+ "tbz x8, #0, 35f\n"
+ "ld1 { v29.b }[0], [x19]\n"
"35:" // Oddments: Load (3, 1): Bit 2: End
"usubl v29.8h, v29.8b, v12.8b\n"
- "ldr x21, [x15, #0x70]\n"
- "smlal v9.4s, v29.4h, v4.4h\n"
- "smlal2 v23.4s, v29.8h, v4.8h\n"
- "add x21, x21, x17\n"
- "tbz x7, #2, 37f\n"
- "ld1 { v24.s }[0], [x21], #0x4\n"
- "tbz x7, #1, 36f\n"
- "ld1 { v24.h }[2], [x21], #0x2\n"
- "tbz x7, #0, 39f\n"
- "ld1 { v24.b }[6], [x21]\n"
+ "ldr x19, [x12, #0x70]\n"
+ "smlal v22.4s, v29.4h, v4.4h\n"
+ "smlal2 v21.4s, v29.8h, v4.8h\n"
+ "add x19, x19, x15\n"
+ "tbz x8, #2, 37f\n"
+ "ld1 { v24.s }[0], [x19], #0x4\n"
+ "tbz x8, #1, 36f\n"
+ "ld1 { v24.h }[2], [x19], #0x2\n"
+ "tbz x8, #0, 39f\n"
+ "ld1 { v24.b }[6], [x19]\n"
"b 39f\n"
"36:" // Oddments: Load (2, 1): Bit 2: Bit 1: Unset
- "tbz x7, #0, 39f\n"
- "ld1 { v24.b }[4], [x21]\n"
+ "tbz x8, #0, 39f\n"
+ "ld1 { v24.b }[4], [x19]\n"
"b 39f\n"
"37:" // Oddments: Load (2, 1): Bit 2: Unset
- "tbz x7, #1, 38f\n"
- "ld1 { v24.h }[0], [x21], #0x2\n"
- "tbz x7, #0, 39f\n"
- "ld1 { v24.b }[2], [x21]\n"
+ "tbz x8, #1, 38f\n"
+ "ld1 { v24.h }[0], [x19], #0x2\n"
+ "tbz x8, #0, 39f\n"
+ "ld1 { v24.b }[2], [x19]\n"
"b 39f\n"
"38:" // Oddments: Load (2, 1): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 39f\n"
- "ld1 { v24.b }[0], [x21]\n"
+ "tbz x8, #0, 39f\n"
+ "ld1 { v24.b }[0], [x19]\n"
"39:" // Oddments: Load (2, 1): Bit 2: End
"usubl v24.8h, v24.8b, v12.8b\n"
- "ldr x23, [x15, #0x78]\n"
+ "ldr x22, [x12, #0x78]\n"
"smlal v15.4s, v24.4h, v7.4h\n"
- "smlal2 v17.4s, v24.8h, v7.8h\n"
- "smlal v9.4s, v24.4h, v1.4h\n"
- "smlal2 v23.4s, v24.8h, v1.8h\n"
- "add x23, x23, x17\n"
- "tbz x7, #2, 41f\n"
- "ld1 { v27.s }[0], [x23], #0x4\n"
- "tbz x7, #1, 40f\n"
- "ld1 { v27.h }[2], [x23], #0x2\n"
- "tbz x7, #0, 43f\n"
- "ld1 { v27.b }[6], [x23]\n"
+ "smlal2 v10.4s, v24.8h, v7.8h\n"
+ "smlal v22.4s, v24.4h, v1.4h\n"
+ "smlal2 v21.4s, v24.8h, v1.8h\n"
+ "add x22, x22, x15\n"
+ "tbz x8, #2, 41f\n"
+ "ld1 { v27.s }[0], [x22], #0x4\n"
+ "tbz x8, #1, 40f\n"
+ "ld1 { v27.h }[2], [x22], #0x2\n"
+ "tbz x8, #0, 43f\n"
+ "ld1 { v27.b }[6], [x22]\n"
"b 43f\n"
"40:" // Oddments: Load (3, 3): Bit 2: Bit 1: Unset
- "tbz x7, #0, 43f\n"
- "ld1 { v27.b }[4], [x23]\n"
+ "tbz x8, #0, 43f\n"
+ "ld1 { v27.b }[4], [x22]\n"
"b 43f\n"
"41:" // Oddments: Load (3, 3): Bit 2: Unset
- "tbz x7, #1, 42f\n"
- "ld1 { v27.h }[0], [x23], #0x2\n"
- "tbz x7, #0, 43f\n"
- "ld1 { v27.b }[2], [x23]\n"
+ "tbz x8, #1, 42f\n"
+ "ld1 { v27.h }[0], [x22], #0x2\n"
+ "tbz x8, #0, 43f\n"
+ "ld1 { v27.b }[2], [x22]\n"
"b 43f\n"
"42:" // Oddments: Load (3, 3): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 43f\n"
- "ld1 { v27.b }[0], [x23]\n"
+ "tbz x8, #0, 43f\n"
+ "ld1 { v27.b }[0], [x22]\n"
"43:" // Oddments: Load (3, 3): Bit 2: End
"usubl v27.8h, v27.8b, v12.8b\n"
- "ldr x21, [x15, #0x80]\n"
- "smlal v21.4s, v27.4h, v4.4h\n"
- "smlal2 v22.4s, v27.8h, v4.8h\n"
- "add x21, x21, x17\n"
- "tbz x7, #2, 45f\n"
+ "ldr x21, [x12, #0x80]\n"
+ "smlal v23.4s, v27.4h, v4.4h\n"
+ "smlal2 v18.4s, v27.8h, v4.8h\n"
+ "add x21, x21, x15\n"
+ "tbz x8, #2, 45f\n"
"ld1 { v28.s }[0], [x21], #0x4\n"
- "tbz x7, #1, 44f\n"
+ "tbz x8, #1, 44f\n"
"ld1 { v28.h }[2], [x21], #0x2\n"
- "tbz x7, #0, 47f\n"
+ "tbz x8, #0, 47f\n"
"ld1 { v28.b }[6], [x21]\n"
"b 47f\n"
"44:" // Oddments: Load (2, 3): Bit 2: Bit 1: Unset
- "tbz x7, #0, 47f\n"
+ "tbz x8, #0, 47f\n"
"ld1 { v28.b }[4], [x21]\n"
"b 47f\n"
"45:" // Oddments: Load (2, 3): Bit 2: Unset
- "tbz x7, #1, 46f\n"
+ "tbz x8, #1, 46f\n"
"ld1 { v28.h }[0], [x21], #0x2\n"
- "tbz x7, #0, 47f\n"
+ "tbz x8, #0, 47f\n"
"ld1 { v28.b }[2], [x21]\n"
"b 47f\n"
"46:" // Oddments: Load (2, 3): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 47f\n"
+ "tbz x8, #0, 47f\n"
"ld1 { v28.b }[0], [x21]\n"
"47:" // Oddments: Load (2, 3): Bit 2: End
"usubl v28.8h, v28.8b, v12.8b\n"
- "ldr x22, [x15, #0x88]\n"
- "smlal v10.4s, v28.4h, v7.4h\n"
- "smlal2 v20.4s, v28.8h, v7.8h\n"
- "smlal v21.4s, v28.4h, v1.4h\n"
- "smlal2 v22.4s, v28.8h, v1.8h\n"
- "add x22, x22, x17\n"
- "tbz x7, #2, 49f\n"
- "ld1 { v26.s }[0], [x22], #0x4\n"
- "tbz x7, #1, 48f\n"
- "ld1 { v26.h }[2], [x22], #0x2\n"
- "tbz x7, #0, 51f\n"
- "ld1 { v26.b }[6], [x22]\n"
+ "ldr x20, [x12, #0x88]\n"
+ "smlal v9.4s, v28.4h, v7.4h\n"
+ "smlal2 v16.4s, v28.8h, v7.8h\n"
+ "smlal v23.4s, v28.4h, v1.4h\n"
+ "smlal2 v18.4s, v28.8h, v1.8h\n"
+ "add x20, x20, x15\n"
+ "tbz x8, #2, 49f\n"
+ "ld1 { v26.s }[0], [x20], #0x4\n"
+ "tbz x8, #1, 48f\n"
+ "ld1 { v26.h }[2], [x20], #0x2\n"
+ "tbz x8, #0, 51f\n"
+ "ld1 { v26.b }[6], [x20]\n"
"b 51f\n"
"48:" // Oddments: Load (3, 4): Bit 2: Bit 1: Unset
- "tbz x7, #0, 51f\n"
- "ld1 { v26.b }[4], [x22]\n"
+ "tbz x8, #0, 51f\n"
+ "ld1 { v26.b }[4], [x20]\n"
"b 51f\n"
"49:" // Oddments: Load (3, 4): Bit 2: Unset
- "tbz x7, #1, 50f\n"
- "ld1 { v26.h }[0], [x22], #0x2\n"
- "tbz x7, #0, 51f\n"
- "ld1 { v26.b }[2], [x22]\n"
+ "tbz x8, #1, 50f\n"
+ "ld1 { v26.h }[0], [x20], #0x2\n"
+ "tbz x8, #0, 51f\n"
+ "ld1 { v26.b }[2], [x20]\n"
"b 51f\n"
"50:" // Oddments: Load (3, 4): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 51f\n"
- "ld1 { v26.b }[0], [x22]\n"
+ "tbz x8, #0, 51f\n"
+ "ld1 { v26.b }[0], [x20]\n"
"51:" // Oddments: Load (3, 4): Bit 2: End
"usubl v26.8h, v26.8b, v12.8b\n"
- "ldr x24, [x15, #0x90]\n"
- "smlal v21.4s, v26.4h, v5.4h\n"
- "smlal2 v22.4s, v26.8h, v5.8h\n"
- "add x24, x24, x17\n"
- "tbz x7, #2, 53f\n"
- "ld1 { v25.s }[0], [x24], #0x4\n"
- "tbz x7, #1, 52f\n"
- "ld1 { v25.h }[2], [x24], #0x2\n"
- "tbz x7, #0, 55f\n"
- "ld1 { v25.b }[6], [x24]\n"
+ "ldr x23, [x12, #0x90]\n"
+ "smlal v23.4s, v26.4h, v5.4h\n"
+ "smlal2 v18.4s, v26.8h, v5.8h\n"
+ "add x23, x23, x15\n"
+ "tbz x8, #2, 53f\n"
+ "ld1 { v25.s }[0], [x23], #0x4\n"
+ "tbz x8, #1, 52f\n"
+ "ld1 { v25.h }[2], [x23], #0x2\n"
+ "tbz x8, #0, 55f\n"
+ "ld1 { v25.b }[6], [x23]\n"
"b 55f\n"
"52:" // Oddments: Load (4, 0): Bit 2: Bit 1: Unset
- "tbz x7, #0, 55f\n"
- "ld1 { v25.b }[4], [x24]\n"
+ "tbz x8, #0, 55f\n"
+ "ld1 { v25.b }[4], [x23]\n"
"b 55f\n"
"53:" // Oddments: Load (4, 0): Bit 2: Unset
- "tbz x7, #1, 54f\n"
- "ld1 { v25.h }[0], [x24], #0x2\n"
- "tbz x7, #0, 55f\n"
- "ld1 { v25.b }[2], [x24]\n"
+ "tbz x8, #1, 54f\n"
+ "ld1 { v25.h }[0], [x23], #0x2\n"
+ "tbz x8, #0, 55f\n"
+ "ld1 { v25.b }[2], [x23]\n"
"b 55f\n"
"54:" // Oddments: Load (4, 0): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 55f\n"
- "ld1 { v25.b }[0], [x24]\n"
+ "tbz x8, #0, 55f\n"
+ "ld1 { v25.b }[0], [x23]\n"
"55:" // Oddments: Load (4, 0): Bit 2: End
"usubl v25.8h, v25.8b, v12.8b\n"
- "ldr x25, [x15, #0x98]\n"
- "smlal v9.4s, v25.4h, v6.4h\n"
- "smlal2 v23.4s, v25.8h, v6.8h\n"
- "add x25, x25, x17\n"
- "tbz x7, #2, 57f\n"
- "ld1 { v29.s }[0], [x25], #0x4\n"
- "tbz x7, #1, 56f\n"
- "ld1 { v29.h }[2], [x25], #0x2\n"
- "tbz x7, #0, 59f\n"
- "ld1 { v29.b }[6], [x25]\n"
+ "ldr x24, [x12, #0x98]\n"
+ "smlal v22.4s, v25.4h, v6.4h\n"
+ "smlal2 v21.4s, v25.8h, v6.8h\n"
+ "add x24, x24, x15\n"
+ "tbz x8, #2, 57f\n"
+ "ld1 { v29.s }[0], [x24], #0x4\n"
+ "tbz x8, #1, 56f\n"
+ "ld1 { v29.h }[2], [x24], #0x2\n"
+ "tbz x8, #0, 59f\n"
+ "ld1 { v29.b }[6], [x24]\n"
"b 59f\n"
"56:" // Oddments: Load (2, 4): Bit 2: Bit 1: Unset
- "tbz x7, #0, 59f\n"
- "ld1 { v29.b }[4], [x25]\n"
+ "tbz x8, #0, 59f\n"
+ "ld1 { v29.b }[4], [x24]\n"
"b 59f\n"
"57:" // Oddments: Load (2, 4): Bit 2: Unset
- "tbz x7, #1, 58f\n"
- "ld1 { v29.h }[0], [x25], #0x2\n"
- "tbz x7, #0, 59f\n"
- "ld1 { v29.b }[2], [x25]\n"
+ "tbz x8, #1, 58f\n"
+ "ld1 { v29.h }[0], [x24], #0x2\n"
+ "tbz x8, #0, 59f\n"
+ "ld1 { v29.b }[2], [x24]\n"
"b 59f\n"
"58:" // Oddments: Load (2, 4): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 59f\n"
- "ld1 { v29.b }[0], [x25]\n"
+ "tbz x8, #0, 59f\n"
+ "ld1 { v29.b }[0], [x24]\n"
"59:" // Oddments: Load (2, 4): Bit 2: End
"usubl v29.8h, v29.8b, v12.8b\n"
- "ldr x20, [x15, #0xa0]\n"
- "smlal v10.4s, v29.4h, v8.4h\n"
- "smlal2 v20.4s, v29.8h, v8.8h\n"
- "smlal v21.4s, v29.4h, v2.4h\n"
- "smlal2 v22.4s, v29.8h, v2.8h\n"
- "add x20, x20, x17\n"
- "tbz x7, #2, 61f\n"
- "ld1 { v27.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 60f\n"
- "ld1 { v27.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 63f\n"
- "ld1 { v27.b }[6], [x20]\n"
+ "ldr x19, [x12, #0xa0]\n"
+ "smlal v9.4s, v29.4h, v8.4h\n"
+ "smlal2 v16.4s, v29.8h, v8.8h\n"
+ "smlal v23.4s, v29.4h, v2.4h\n"
+ "smlal2 v18.4s, v29.8h, v2.8h\n"
+ "add x19, x19, x15\n"
+ "tbz x8, #2, 61f\n"
+ "ld1 { v27.s }[0], [x19], #0x4\n"
+ "tbz x8, #1, 60f\n"
+ "ld1 { v27.h }[2], [x19], #0x2\n"
+ "tbz x8, #0, 63f\n"
+ "ld1 { v27.b }[6], [x19]\n"
"b 63f\n"
"60:" // Oddments: Load (4, 1): Bit 2: Bit 1: Unset
- "tbz x7, #0, 63f\n"
- "ld1 { v27.b }[4], [x20]\n"
+ "tbz x8, #0, 63f\n"
+ "ld1 { v27.b }[4], [x19]\n"
"b 63f\n"
"61:" // Oddments: Load (4, 1): Bit 2: Unset
- "tbz x7, #1, 62f\n"
- "ld1 { v27.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 63f\n"
- "ld1 { v27.b }[2], [x20]\n"
+ "tbz x8, #1, 62f\n"
+ "ld1 { v27.h }[0], [x19], #0x2\n"
+ "tbz x8, #0, 63f\n"
+ "ld1 { v27.b }[2], [x19]\n"
"b 63f\n"
"62:" // Oddments: Load (4, 1): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 63f\n"
- "ld1 { v27.b }[0], [x20]\n"
+ "tbz x8, #0, 63f\n"
+ "ld1 { v27.b }[0], [x19]\n"
"63:" // Oddments: Load (4, 1): Bit 2: End
"usubl v27.8h, v27.8b, v12.8b\n"
- "ldr x23, [x15, #0xa8]\n"
- "smlal v9.4s, v27.4h, v7.4h\n"
- "smlal2 v23.4s, v27.8h, v7.8h\n"
- "add x23, x23, x17\n"
- "tbz x7, #2, 65f\n"
- "ld1 { v24.s }[0], [x23], #0x4\n"
- "tbz x7, #1, 64f\n"
- "ld1 { v24.h }[2], [x23], #0x2\n"
- "tbz x7, #0, 67f\n"
- "ld1 { v24.b }[6], [x23]\n"
+ "ldr x22, [x12, #0xa8]\n"
+ "smlal v22.4s, v27.4h, v7.4h\n"
+ "smlal2 v21.4s, v27.8h, v7.8h\n"
+ "add x22, x22, x15\n"
+ "tbz x8, #2, 65f\n"
+ "ld1 { v24.s }[0], [x22], #0x4\n"
+ "tbz x8, #1, 64f\n"
+ "ld1 { v24.h }[2], [x22], #0x2\n"
+ "tbz x8, #0, 67f\n"
+ "ld1 { v24.b }[6], [x22]\n"
"b 67f\n"
"64:" // Oddments: Load (3, 2): Bit 2: Bit 1: Unset
- "tbz x7, #0, 67f\n"
- "ld1 { v24.b }[4], [x23]\n"
+ "tbz x8, #0, 67f\n"
+ "ld1 { v24.b }[4], [x22]\n"
"b 67f\n"
"65:" // Oddments: Load (3, 2): Bit 2: Unset
- "tbz x7, #1, 66f\n"
- "ld1 { v24.h }[0], [x23], #0x2\n"
- "tbz x7, #0, 67f\n"
- "ld1 { v24.b }[2], [x23]\n"
+ "tbz x8, #1, 66f\n"
+ "ld1 { v24.h }[0], [x22], #0x2\n"
+ "tbz x8, #0, 67f\n"
+ "ld1 { v24.b }[2], [x22]\n"
"b 67f\n"
"66:" // Oddments: Load (3, 2): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 67f\n"
- "ld1 { v24.b }[0], [x23]\n"
+ "tbz x8, #0, 67f\n"
+ "ld1 { v24.b }[0], [x22]\n"
"67:" // Oddments: Load (3, 2): Bit 2: End
"usubl v24.8h, v24.8b, v12.8b\n"
- "ldr x22, [x15, #0xb0]\n"
- "smlal v9.4s, v24.4h, v5.4h\n"
- "smlal2 v23.4s, v24.8h, v5.8h\n"
- "smlal v21.4s, v24.4h, v3.4h\n"
- "smlal2 v22.4s, v24.8h, v3.8h\n"
- "add x22, x22, x17\n"
- "tbz x7, #2, 69f\n"
- "ld1 { v26.s }[0], [x22], #0x4\n"
- "tbz x7, #1, 68f\n"
- "ld1 { v26.h }[2], [x22], #0x2\n"
- "tbz x7, #0, 71f\n"
- "ld1 { v26.b }[6], [x22]\n"
+ "ldr x21, [x12, #0xb0]\n"
+ "smlal v22.4s, v24.4h, v5.4h\n"
+ "smlal2 v21.4s, v24.8h, v5.8h\n"
+ "smlal v23.4s, v24.4h, v3.4h\n"
+ "smlal2 v18.4s, v24.8h, v3.8h\n"
+ "add x21, x21, x15\n"
+ "tbz x8, #2, 69f\n"
+ "ld1 { v26.s }[0], [x21], #0x4\n"
+ "tbz x8, #1, 68f\n"
+ "ld1 { v26.h }[2], [x21], #0x2\n"
+ "tbz x8, #0, 71f\n"
+ "ld1 { v26.b }[6], [x21]\n"
"b 71f\n"
"68:" // Oddments: Load (4, 3): Bit 2: Bit 1: Unset
- "tbz x7, #0, 71f\n"
- "ld1 { v26.b }[4], [x22]\n"
+ "tbz x8, #0, 71f\n"
+ "ld1 { v26.b }[4], [x21]\n"
"b 71f\n"
"69:" // Oddments: Load (4, 3): Bit 2: Unset
- "tbz x7, #1, 70f\n"
- "ld1 { v26.h }[0], [x22], #0x2\n"
- "tbz x7, #0, 71f\n"
- "ld1 { v26.b }[2], [x22]\n"
+ "tbz x8, #1, 70f\n"
+ "ld1 { v26.h }[0], [x21], #0x2\n"
+ "tbz x8, #0, 71f\n"
+ "ld1 { v26.b }[2], [x21]\n"
"b 71f\n"
"70:" // Oddments: Load (4, 3): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 71f\n"
- "ld1 { v26.b }[0], [x22]\n"
+ "tbz x8, #0, 71f\n"
+ "ld1 { v26.b }[0], [x21]\n"
"71:" // Oddments: Load (4, 3): Bit 2: End
"usubl v26.8h, v26.8b, v12.8b\n"
- "ldr x21, [x15, #0xb8]\n"
- "smlal v21.4s, v26.4h, v7.4h\n"
- "smlal2 v22.4s, v26.8h, v7.8h\n"
- "add x21, x21, x17\n"
- "tbz x7, #2, 73f\n"
- "ld1 { v25.s }[0], [x21], #0x4\n"
- "tbz x7, #1, 72f\n"
- "ld1 { v25.h }[2], [x21], #0x2\n"
- "tbz x7, #0, 75f\n"
- "ld1 { v25.b }[6], [x21]\n"
+ "ldr x20, [x12, #0xb8]\n"
+ "smlal v23.4s, v26.4h, v7.4h\n"
+ "smlal2 v18.4s, v26.8h, v7.8h\n"
+ "add x20, x20, x15\n"
+ "tbz x8, #2, 73f\n"
+ "ld1 { v25.s }[0], [x20], #0x4\n"
+ "tbz x8, #1, 72f\n"
+ "ld1 { v25.h }[2], [x20], #0x2\n"
+ "tbz x8, #0, 75f\n"
+ "ld1 { v25.b }[6], [x20]\n"
"b 75f\n"
"72:" // Oddments: Load (4, 2): Bit 2: Bit 1: Unset
- "tbz x7, #0, 75f\n"
- "ld1 { v25.b }[4], [x21]\n"
+ "tbz x8, #0, 75f\n"
+ "ld1 { v25.b }[4], [x20]\n"
"b 75f\n"
"73:" // Oddments: Load (4, 2): Bit 2: Unset
- "tbz x7, #1, 74f\n"
- "ld1 { v25.h }[0], [x21], #0x2\n"
- "tbz x7, #0, 75f\n"
- "ld1 { v25.b }[2], [x21]\n"
+ "tbz x8, #1, 74f\n"
+ "ld1 { v25.h }[0], [x20], #0x2\n"
+ "tbz x8, #0, 75f\n"
+ "ld1 { v25.b }[2], [x20]\n"
"b 75f\n"
"74:" // Oddments: Load (4, 2): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 75f\n"
- "ld1 { v25.b }[0], [x21]\n"
+ "tbz x8, #0, 75f\n"
+ "ld1 { v25.b }[0], [x20]\n"
"75:" // Oddments: Load (4, 2): Bit 2: End
"usubl v25.8h, v25.8b, v12.8b\n"
- "ldr x20, [x15, #0xc0]\n"
- "smlal v9.4s, v25.4h, v8.4h\n"
- "smlal2 v23.4s, v25.8h, v8.8h\n"
- "smlal v21.4s, v25.4h, v6.4h\n"
- "smlal2 v22.4s, v25.8h, v6.8h\n"
- "add x20, x20, x17\n"
- "tbz x7, #2, 77f\n"
- "ld1 { v29.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 76f\n"
- "ld1 { v29.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 79f\n"
- "ld1 { v29.b }[6], [x20]\n"
+ "ldr x19, [x12, #0xc0]\n"
+ "smlal v22.4s, v25.4h, v8.4h\n"
+ "smlal2 v21.4s, v25.8h, v8.8h\n"
+ "smlal v23.4s, v25.4h, v6.4h\n"
+ "smlal2 v18.4s, v25.8h, v6.8h\n"
+ "add x19, x19, x15\n"
+ "tbz x8, #2, 77f\n"
+ "ld1 { v29.s }[0], [x19], #0x4\n"
+ "tbz x8, #1, 76f\n"
+ "ld1 { v29.h }[2], [x19], #0x2\n"
+ "tbz x8, #0, 79f\n"
+ "ld1 { v29.b }[6], [x19]\n"
"b 79f\n"
"76:" // Oddments: Load (4, 4): Bit 2: Bit 1: Unset
- "tbz x7, #0, 79f\n"
- "ld1 { v29.b }[4], [x20]\n"
+ "tbz x8, #0, 79f\n"
+ "ld1 { v29.b }[4], [x19]\n"
"b 79f\n"
"77:" // Oddments: Load (4, 4): Bit 2: Unset
- "tbz x7, #1, 78f\n"
- "ld1 { v29.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 79f\n"
- "ld1 { v29.b }[2], [x20]\n"
+ "tbz x8, #1, 78f\n"
+ "ld1 { v29.h }[0], [x19], #0x2\n"
+ "tbz x8, #0, 79f\n"
+ "ld1 { v29.b }[2], [x19]\n"
"b 79f\n"
"78:" // Oddments: Load (4, 4): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 79f\n"
- "ld1 { v29.b }[0], [x20]\n"
+ "tbz x8, #0, 79f\n"
+ "ld1 { v29.b }[0], [x19]\n"
"79:" // Oddments: Load (4, 4): Bit 2: End
"usubl v29.8h, v29.8b, v12.8b\n"
- "smlal v21.4s, v29.4h, v8.4h\n"
- "smlal2 v22.4s, v29.8h, v8.8h\n"
- "tbz x7, #2, 81f\n"
+ "smlal v23.4s, v29.4h, v8.4h\n"
+ "smlal2 v18.4s, v29.8h, v8.8h\n"
+ "tbz x8, #2, 81f\n"
"ld1 { v19.4s }, [x13], #0x10\n"
- "ld1 { v18.4s }, [x12], #0x10\n"
- "tbz x7, #1, 80f\n"
- "ld1 { v30.d }[0], [x13], #0x8\n"
- "ld1 { v31.d }[0], [x12], #0x8\n"
- "tbz x7, #0, 83f\n"
- "ld1 { v30.s }[2], [x13]\n"
- "ld1 { v31.s }[2], [x12]\n"
+ "ld1 { v0.4s }, [x11], #0x10\n"
+ "tbz x8, #1, 80f\n"
+ "ld1 { v4.d }[0], [x13], #0x8\n"
+ "ld1 { v31.d }[0], [x11], #0x8\n"
+ "tbz x8, #0, 83f\n"
+ "ld1 { v4.s }[2], [x13]\n"
+ "ld1 { v31.s }[2], [x11]\n"
"b 83f\n"
"80:" // Oddments: Load requant params: Bit 2: Bit 1: Unset
- "tbz x7, #0, 83f\n"
- "ld1 { v30.s }[0], [x13]\n"
- "ld1 { v31.s }[0], [x12]\n"
+ "tbz x8, #0, 83f\n"
+ "ld1 { v4.s }[0], [x13]\n"
+ "ld1 { v31.s }[0], [x11]\n"
"b 83f\n"
"81:" // Oddments: Load requant params: Bit 2: Unset
- "tbz x7, #1, 82f\n"
+ "tbz x8, #1, 82f\n"
"ld1 { v19.d }[0], [x13], #0x8\n"
- "ld1 { v18.d }[0], [x12], #0x8\n"
- "tbz x7, #0, 83f\n"
+ "ld1 { v0.d }[0], [x11], #0x8\n"
+ "tbz x8, #0, 83f\n"
"ld1 { v19.s }[2], [x13]\n"
- "ld1 { v18.s }[2], [x12]\n"
+ "ld1 { v0.s }[2], [x11]\n"
"b 83f\n"
"82:" // Oddments: Load requant params: Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 83f\n"
+ "tbz x8, #0, 83f\n"
"ld1 { v19.s }[0], [x13]\n"
- "ld1 { v18.s }[0], [x12]\n"
+ "ld1 { v0.s }[0], [x11]\n"
"83:" // Oddments: Load requant params: Bit 2: End
"sqrdmulh v15.4s, v15.4s, v19.4s\n"
- "and v0.16b, v15.16b, v18.16b\n"
- "add x11, x11, x16\n"
- "add x10, x10, x16\n"
- "sqrdmulh v17.4s, v17.4s, v30.4s\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "add x9, x9, x16\n"
- "add x28, x28, x16\n"
- "and v7.16b, v17.16b, v31.16b\n"
- "sqrdmulh v10.4s, v10.4s, v19.4s\n"
"sqrdmulh v9.4s, v9.4s, v19.4s\n"
- "sqrdmulh v21.4s, v21.4s, v19.4s\n"
- "sqadd v15.4s, v15.4s, v0.4s\n"
- "sshr v7.4s, v7.4s, #0x1f\n"
- "and v19.16b, v10.16b, v18.16b\n"
- "sqrdmulh v20.4s, v20.4s, v30.4s\n"
- "and v27.16b, v9.16b, v18.16b\n"
- "sqrdmulh v23.4s, v23.4s, v30.4s\n"
- "and v0.16b, v21.16b, v18.16b\n"
- "sqrdmulh v22.4s, v22.4s, v30.4s\n"
- "sqadd v17.4s, v17.4s, v7.4s\n"
+ "add x10, x10, x14\n"
+ "add x9, x9, x14\n"
+ "sqrdmulh v22.4s, v22.4s, v19.4s\n"
+ "sqrdmulh v23.4s, v23.4s, v19.4s\n"
+ "add x28, x28, x14\n"
+ "add x27, x27, x14\n"
+ "and v30.16b, v15.16b, v0.16b\n"
+ "sqrdmulh v10.4s, v10.4s, v4.4s\n"
+ "and v28.16b, v9.16b, v0.16b\n"
+ "sqrdmulh v16.4s, v16.4s, v4.4s\n"
+ "and v29.16b, v22.16b, v0.16b\n"
+ "sqrdmulh v21.4s, v21.4s, v4.4s\n"
+ "and v20.16b, v23.16b, v0.16b\n"
+ "sqrdmulh v18.4s, v18.4s, v4.4s\n"
+ "sshr v30.4s, v30.4s, #0x1f\n"
+ "and v19.16b, v10.16b, v31.16b\n"
+ "sshr v28.4s, v28.4s, #0x1f\n"
+ "and v4.16b, v16.16b, v31.16b\n"
+ "sshr v29.4s, v29.4s, #0x1f\n"
+ "and v5.16b, v21.16b, v31.16b\n"
+ "sshr v20.4s, v20.4s, #0x1f\n"
+ "and v26.16b, v18.16b, v31.16b\n"
+ "sqadd v15.4s, v15.4s, v30.4s\n"
"sshr v19.4s, v19.4s, #0x1f\n"
- "and v5.16b, v20.16b, v31.16b\n"
- "sshr v27.4s, v27.4s, #0x1f\n"
- "and v4.16b, v23.16b, v31.16b\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "and v7.16b, v22.16b, v31.16b\n"
- "sqadd v10.4s, v10.4s, v19.4s\n"
- "sshr v5.4s, v5.4s, #0x1f\n"
- "sqadd v9.4s, v9.4s, v27.4s\n"
+ "sqadd v9.4s, v9.4s, v28.4s\n"
"sshr v4.4s, v4.4s, #0x1f\n"
- "sqadd v21.4s, v21.4s, v0.4s\n"
- "sshr v7.4s, v7.4s, #0x1f\n"
- "srshl v15.4s, v15.4s, v18.4s\n"
- "srshl v10.4s, v10.4s, v18.4s\n"
- "sqadd v20.4s, v20.4s, v5.4s\n"
- "srshl v9.4s, v9.4s, v18.4s\n"
- "sqadd v23.4s, v23.4s, v4.4s\n"
- "srshl v21.4s, v21.4s, v18.4s\n"
- "sqadd v22.4s, v22.4s, v7.4s\n"
- "srshl v17.4s, v17.4s, v31.4s\n"
+ "sqadd v22.4s, v22.4s, v29.4s\n"
+ "sshr v5.4s, v5.4s, #0x1f\n"
+ "sqadd v23.4s, v23.4s, v20.4s\n"
+ "sshr v26.4s, v26.4s, #0x1f\n"
+ "srshl v15.4s, v15.4s, v0.4s\n"
+ "sqadd v10.4s, v10.4s, v19.4s\n"
+ "srshl v9.4s, v9.4s, v0.4s\n"
+ "sqadd v16.4s, v16.4s, v4.4s\n"
+ "srshl v22.4s, v22.4s, v0.4s\n"
+ "sqadd v21.4s, v21.4s, v5.4s\n"
+ "srshl v23.4s, v23.4s, v0.4s\n"
+ "sqadd v18.4s, v18.4s, v26.4s\n"
+ "srshl v10.4s, v10.4s, v31.4s\n"
"sqxtn v15.4h, v15.4s\n"
- "srshl v20.4s, v20.4s, v31.4s\n"
- "sqxtn v10.4h, v10.4s\n"
- "srshl v23.4s, v23.4s, v31.4s\n"
+ "srshl v16.4s, v16.4s, v31.4s\n"
"sqxtn v9.4h, v9.4s\n"
- "srshl v22.4s, v22.4s, v31.4s\n"
- "sqxtn v21.4h, v21.4s\n"
- "sqxtn2 v15.8h, v17.4s\n"
- "sqxtn2 v10.8h, v20.4s\n"
- "sqxtn2 v9.8h, v23.4s\n"
- "sqxtn2 v21.8h, v22.4s\n"
+ "srshl v21.4s, v21.4s, v31.4s\n"
+ "sqxtn v22.4h, v22.4s\n"
+ "srshl v18.4s, v18.4s, v31.4s\n"
+ "sqxtn v23.4h, v23.4s\n"
+ "sqxtn2 v15.8h, v10.4s\n"
+ "sqxtn2 v9.8h, v16.4s\n"
+ "sqxtn2 v22.8h, v21.4s\n"
+ "sqxtn2 v23.8h, v18.4s\n"
"sqadd v15.8h, v15.8h, v11.8h\n"
- "sqadd v10.8h, v10.8h, v11.8h\n"
"sqadd v9.8h, v9.8h, v11.8h\n"
- "sqadd v21.8h, v21.8h, v11.8h\n"
- "smax v15.8h, v15.8h, v16.8h\n"
- "smax v10.8h, v10.8h, v16.8h\n"
- "smax v9.8h, v9.8h, v16.8h\n"
- "smax v21.8h, v21.8h, v16.8h\n"
+ "sqadd v22.8h, v22.8h, v11.8h\n"
+ "sqadd v23.8h, v23.8h, v11.8h\n"
+ "smax v15.8h, v15.8h, v17.8h\n"
+ "smax v9.8h, v9.8h, v17.8h\n"
+ "smax v22.8h, v22.8h, v17.8h\n"
+ "smax v23.8h, v23.8h, v17.8h\n"
"smin v15.8h, v15.8h, v14.8h\n"
- "smin v10.8h, v10.8h, v14.8h\n"
"smin v9.8h, v9.8h, v14.8h\n"
- "smin v21.8h, v21.8h, v14.8h\n"
+ "smin v22.8h, v22.8h, v14.8h\n"
+ "smin v23.8h, v23.8h, v14.8h\n"
"uzp1 v15.16b, v15.16b, v15.16b\n"
- "uzp1 v10.16b, v10.16b, v10.16b\n"
"uzp1 v9.16b, v9.16b, v9.16b\n"
- "uzp1 v21.16b, v21.16b, v21.16b\n"
- "tbz x7, #2, 85f\n"
- "st1 { v15.s }[0], [x11], #0x4\n"
- "st1 { v10.s }[0], [x10], #0x4\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "uzp1 v23.16b, v23.16b, v23.16b\n"
+ "tbz x8, #2, 85f\n"
+ "st1 { v15.s }[0], [x10], #0x4\n"
"st1 { v9.s }[0], [x9], #0x4\n"
- "st1 { v21.s }[0], [x28], #0x4\n"
- "tbz x7, #1, 84f\n"
- "st1 { v15.h }[2], [x11], #0x2\n"
- "st1 { v10.h }[2], [x10], #0x2\n"
+ "st1 { v22.s }[0], [x28], #0x4\n"
+ "st1 { v23.s }[0], [x27], #0x4\n"
+ "tbz x8, #1, 84f\n"
+ "st1 { v15.h }[2], [x10], #0x2\n"
"st1 { v9.h }[2], [x9], #0x2\n"
- "st1 { v21.h }[2], [x28], #0x2\n"
- "tbz x7, #0, 87f\n"
- "st1 { v15.b }[6], [x11], #0x1\n"
- "st1 { v10.b }[6], [x10], #0x1\n"
+ "st1 { v22.h }[2], [x28], #0x2\n"
+ "st1 { v23.h }[2], [x27], #0x2\n"
+ "tbz x8, #0, 87f\n"
+ "st1 { v15.b }[6], [x10], #0x1\n"
"st1 { v9.b }[6], [x9], #0x1\n"
- "st1 { v21.b }[6], [x28], #0x1\n"
+ "st1 { v22.b }[6], [x28], #0x1\n"
+ "st1 { v23.b }[6], [x27], #0x1\n"
"b 87f\n"
"84:" // Oddments: Bit 2: Bit 1: Unset
- "tbz x7, #0, 87f\n"
- "st1 { v15.b }[4], [x11], #0x1\n"
- "st1 { v10.b }[4], [x10], #0x1\n"
+ "tbz x8, #0, 87f\n"
+ "st1 { v15.b }[4], [x10], #0x1\n"
"st1 { v9.b }[4], [x9], #0x1\n"
- "st1 { v21.b }[4], [x28], #0x1\n"
+ "st1 { v22.b }[4], [x28], #0x1\n"
+ "st1 { v23.b }[4], [x27], #0x1\n"
"b 87f\n"
"85:" // Oddments: Bit 2: Unset
- "tbz x7, #1, 86f\n"
- "st1 { v15.h }[0], [x11], #0x2\n"
- "st1 { v10.h }[0], [x10], #0x2\n"
+ "tbz x8, #1, 86f\n"
+ "st1 { v15.h }[0], [x10], #0x2\n"
"st1 { v9.h }[0], [x9], #0x2\n"
- "st1 { v21.h }[0], [x28], #0x2\n"
- "tbz x7, #0, 87f\n"
- "st1 { v15.b }[2], [x11], #0x1\n"
- "st1 { v10.b }[2], [x10], #0x1\n"
+ "st1 { v22.h }[0], [x28], #0x2\n"
+ "st1 { v23.h }[0], [x27], #0x2\n"
+ "tbz x8, #0, 87f\n"
+ "st1 { v15.b }[2], [x10], #0x1\n"
"st1 { v9.b }[2], [x9], #0x1\n"
- "st1 { v21.b }[2], [x28], #0x1\n"
+ "st1 { v22.b }[2], [x28], #0x1\n"
+ "st1 { v23.b }[2], [x27], #0x1\n"
"b 87f\n"
"86:" // Oddments: Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 87f\n"
- "st1 { v15.b }[0], [x11], #0x1\n"
- "st1 { v10.b }[0], [x10], #0x1\n"
+ "tbz x8, #0, 87f\n"
+ "st1 { v15.b }[0], [x10], #0x1\n"
"st1 { v9.b }[0], [x9], #0x1\n"
- "st1 { v21.b }[0], [x28], #0x1\n"
+ "st1 { v22.b }[0], [x28], #0x1\n"
+ "st1 { v23.b }[0], [x27], #0x1\n"
"87:" // Oddments: Bit 2: End
"88:" // End
:
: [offsetof_Params_bias] "I" (offsetof(Params, bias)), [offsetof_Params_inptrs] "I" (offsetof(Params, inptrs)), [offsetof_Params_n_channels] "I" (offsetof(Params, n_channels)), [offsetof_Params_outptrs] "I" (offsetof(Params, outptrs)), [offsetof_Params_requant] "I" (offsetof(Params, requant)), [offsetof_Params_requant_muls] "I" (offsetof(Params, requant_muls)), [offsetof_Params_requant_shifts] "I" (offsetof(Params, requant_shifts)), [offsetof_Params_weights] "I" (offsetof(Params, weights)), [offsetof_Requantize32_a_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [params] "r" (&params)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp
index 2fe688a65e..bd6fa1d443 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -111,2073 +111,2073 @@ void a64_u8q_nhwc_5x5_s1_output2x2_mla_depthfirst_impl(
requant_muls, requant_shifts, outptrs);
__asm__ __volatile__(
- "ldr x1, [%x[params], %[offsetof_Params_n_channels]]\n"
- "ldr x13, [%x[params], %[offsetof_Params_requant]]\n"
- "lsr x2, x1, #0x3\n"
- "add x3, x13, %[offsetof_Requantize32_a_offset]\n"
- "ld1r { v9.16b }, [x3]\n"
- "ldr x24, [%x[params], %[offsetof_Params_outptrs]]\n"
- "add x11, x13, %[offsetof_Requantize32_b_offset]\n"
- "add x5, x13, %[offsetof_Requantize32_c_offset]\n"
- "ld1r { v15.16b }, [x11]\n"
- "ld1r { v14.8h }, [x5]\n"
- "add x3, x13, %[offsetof_Requantize32_minval]\n"
- "add x15, x13, %[offsetof_Requantize32_maxval]\n"
- "ld1r { v12.8h }, [x3]\n"
- "ld1r { v11.8h }, [x15]\n"
- "mov x0, #0x0\n"
- "mov x10, #0x0\n"
- "add x4, %x[params], %[offsetof_Params_inptrs]\n"
- "ldr x3, [%x[params], %[offsetof_Params_weights]]\n"
- "ldr x5, [%x[params], %[offsetof_Params_requant_muls]]\n"
- "ldr x8, [%x[params], %[offsetof_Params_requant_shifts]]\n"
- "ldp x17, x6, [x24, #0x0]\n"
- "ldp x7, x16, [x24, #0x10]\n"
- "cbz x2, 3f\n"
- "ldr d0, [x3, #0x0]\n"
- "ldr d1, [x3, #0x8]\n"
- "subs x2, x2, #0x1\n"
- "usubl v0.8h, v0.8b, v15.8b\n"
- "ldr d2, [x3, #0x10]\n"
- "ldr d3, [x3, #0x18]\n"
- "usubl v1.8h, v1.8b, v15.8b\n"
- "usubl v2.8h, v2.8b, v15.8b\n"
- "ldr d4, [x3, #0x20]\n"
- "ldr x13, [%x[params], %[offsetof_Params_bias]]\n"
- "usubl v3.8h, v3.8b, v15.8b\n"
- "usubl v4.8h, v4.8b, v15.8b\n"
- "ldr q13, [x13, #0x0]\n"
- "ldr q19, [x13, #0x10]\n"
- "add x13, x13, #0x20\n"
- "str x13, [%x[params], %[offsetof_Params_bias]]\n"
- "ldp x9, x28, [x4, #0x0]\n"
- "ldp x27, x26, [x4, #0x10]\n"
- "mov v20.16b, v13.16b\n"
- "mov v10.16b, v19.16b\n"
- "ldp x25, x24, [x4, #0x20]\n"
- "ldp x23, x22, [x4, #0x30]\n"
- "mov v8.16b, v13.16b\n"
- "mov v7.16b, v19.16b\n"
- "ldp x21, x20, [x4, #0x40]\n"
- "ldr d31, [x9, x0]\n"
- "mov v17.16b, v13.16b\n"
- "mov v21.16b, v19.16b\n"
- "ldr d30, [x28, x0]\n"
- "ldr d29, [x27, x0]\n"
+ "ldr x10, [%x[params], %[offsetof_Params_requant]]\n"
+ "ldr x0, [%x[params], %[offsetof_Params_n_channels]]\n"
+ "add x17, x10, %[offsetof_Requantize32_a_offset]\n"
+ "add x9, x10, %[offsetof_Requantize32_b_offset]\n"
+ "ldr x25, [%x[params], %[offsetof_Params_outptrs]]\n"
+ "add x4, x10, %[offsetof_Requantize32_c_offset]\n"
+ "add x14, x10, %[offsetof_Requantize32_minval]\n"
+ "ldr x23, [%x[params], %[offsetof_Params_weights]]\n"
+ "add x5, x10, %[offsetof_Requantize32_maxval]\n"
+ "ld1r { v9.16b }, [x17]\n"
+ "ld1r { v14.16b }, [x9]\n"
+ "lsr x3, x0, #0x3\n"
+ "ld1r { v18.8h }, [x4]\n"
+ "ld1r { v11.8h }, [x14]\n"
+ "mov x24, #0x0\n"
+ "mov x22, #0x0\n"
+ "ld1r { v13.8h }, [x5]\n"
+ "ldr x10, [%x[params], %[offsetof_Params_requant_muls]]\n"
+ "add x20, %x[params], %[offsetof_Params_inptrs]\n"
+ "ldr x1, [%x[params], %[offsetof_Params_requant_shifts]]\n"
+ "ldp x16, x8, [x25, #0x0]\n"
+ "ldp x4, x7, [x25, #0x10]\n"
+ "cbz x3, 3f\n"
+ "ldr x19, [%x[params], %[offsetof_Params_bias]]\n"
+ "ldr q15, [x19, #0x0]\n"
+ "subs x3, x3, #0x1\n"
+ "mov v17.16b, v15.16b\n"
+ "ldr q16, [x19, #0x10]\n"
+ "add x19, x19, #0x20\n"
+ "str x19, [%x[params], %[offsetof_Params_bias]]\n"
+ "ldr d0, [x23, #0x0]\n"
+ "ldr d1, [x23, #0x8]\n"
+ "ldr d2, [x23, #0x10]\n"
+ "mov v8.16b, v16.16b\n"
+ "mov v10.16b, v15.16b\n"
+ "ldr d3, [x23, #0x18]\n"
+ "ldr d4, [x23, #0x20]\n"
+ "mov v7.16b, v16.16b\n"
+ "mov v6.16b, v15.16b\n"
+ "ldp x28, x6, [x20, #0x0]\n"
+ "ldp x26, x25, [x20, #0x10]\n"
+ "mov v5.16b, v16.16b\n"
+ "usubl v0.8h, v0.8b, v14.8b\n"
+ "ldp x5, x2, [x20, #0x20]\n"
+ "ldp x27, x21, [x20, #0x30]\n"
+ "usubl v1.8h, v1.8b, v14.8b\n"
+ "usubl v2.8h, v2.8b, v14.8b\n"
+ "ldp x12, x19, [x20, #0x40]\n"
+ "ldr d31, [x28, x24]\n"
+ "usubl v3.8h, v3.8b, v14.8b\n"
+ "usubl v4.8h, v4.8b, v14.8b\n"
+ "ldr d30, [x6, x24]\n"
+ "ldr d29, [x26, x24]\n"
"usubl v31.8h, v31.8b, v9.8b\n"
"usubl v30.8h, v30.8b, v9.8b\n"
- "ldr d28, [x26, x0]\n"
- "ldr d27, [x25, x0]\n"
+ "ldr d28, [x25, x24]\n"
+ "ldr d27, [x5, x24]\n"
"usubl v29.8h, v29.8b, v9.8b\n"
"usubl v28.8h, v28.8b, v9.8b\n"
- "ldr d23, [x24, x0]\n"
- "ldr d25, [x23, x0]\n"
+ "ldr d23, [x2, x24]\n"
+ "ldr d25, [x27, x24]\n"
"usubl v27.8h, v27.8b, v9.8b\n"
"usubl v23.8h, v23.8b, v9.8b\n"
- "ldr d24, [x22, x0]\n"
- "ldr d26, [x21, x0]\n"
+ "ldr d24, [x21, x24]\n"
+ "ldr d26, [x12, x24]\n"
"usubl v25.8h, v25.8b, v9.8b\n"
"usubl v24.8h, v24.8b, v9.8b\n"
- "ldr d22, [x20, x0]\n"
+ "ldr d22, [x19, x24]\n"
"usubl v26.8h, v26.8b, v9.8b\n"
"usubl v22.8h, v22.8b, v9.8b\n"
"beq 2f\n"
"1:" // Loop
- "ldr q18, [x5, #0x0]\n"
- "ldr q6, [x8, #0x0]\n"
- "smlal v13.4s, v31.4h, v0.4h\n"
- "smlal2 v19.4s, v31.8h, v0.8h\n"
- "ldr q5, [x5, #0x10]\n"
- "smlal v13.4s, v30.4h, v1.4h\n"
- "ldr x20, [x4, #0x50]\n"
- "smlal v20.4s, v30.4h, v0.4h\n"
- "smlal v8.4s, v29.4h, v0.4h\n"
- "smlal v17.4s, v28.4h, v0.4h\n"
- "ldr x22, [x4, #0x58]\n"
- "ldr x21, [x4, #0x60]\n"
- "smlal2 v19.4s, v30.8h, v1.8h\n"
- "smlal2 v10.4s, v30.8h, v0.8h\n"
- "ldr d31, [x20, x0]\n"
+ "smlal v15.4s, v31.4h, v0.4h\n"
+ "smlal2 v16.4s, v31.8h, v0.8h\n"
+ "ldr x19, [x20, #0x50]\n"
+ "ldr d31, [x19, x24]\n"
+ "smlal v17.4s, v30.4h, v0.4h\n"
+ "smlal v10.4s, v29.4h, v0.4h\n"
+ "ldr x15, [x20, #0x58]\n"
"usubl v31.8h, v31.8b, v9.8b\n"
+ "smlal v6.4s, v28.4h, v0.4h\n"
+ "smlal2 v8.4s, v30.8h, v0.8h\n"
+ "ldr x19, [x20, #0x60]\n"
+ "ldr x27, [x20, #0x68]\n"
"smlal2 v7.4s, v29.8h, v0.8h\n"
- "smlal v13.4s, v27.4h, v2.4h\n"
- "ldr x20, [x4, #0x68]\n"
- "ldr x26, [x4, #0x70]\n"
- "smlal2 v21.4s, v28.8h, v0.8h\n"
- "ldr d30, [x22, x0]\n"
- "smlal v20.4s, v27.4h, v1.4h\n"
+ "smlal v15.4s, v30.4h, v1.4h\n"
+ "ldr x5, [x20, #0x70]\n"
+ "ldr x11, [x20, #0x78]\n"
+ "smlal2 v16.4s, v30.8h, v1.8h\n"
+ "smlal2 v5.4s, v28.8h, v0.8h\n"
+ "ldr d30, [x15, x24]\n"
"usubl v30.8h, v30.8b, v9.8b\n"
- "smlal v8.4s, v28.4h, v1.4h\n"
- "smlal v17.4s, v23.4h, v1.4h\n"
- "ldr x25, [x4, #0x78]\n"
- "ldr x23, [x4, #0x80]\n"
- "smlal2 v19.4s, v27.8h, v2.8h\n"
- "smlal2 v10.4s, v27.8h, v1.8h\n"
- "ldr d0, [x3, #0x28]\n"
- "usubl v0.8h, v0.8b, v15.8b\n"
+ "smlal v17.4s, v27.4h, v1.4h\n"
+ "smlal v10.4s, v28.4h, v1.4h\n"
+ "ldr d0, [x23, #0x28]\n"
+ "usubl v0.8h, v0.8b, v14.8b\n"
+ "smlal v6.4s, v23.4h, v1.4h\n"
+ "smlal2 v8.4s, v27.8h, v1.8h\n"
+ "ldr x12, [x20, #0x80]\n"
+ "ldr x26, [x20, #0x88]\n"
"smlal2 v7.4s, v28.8h, v1.8h\n"
- "smlal v13.4s, v25.4h, v3.4h\n"
- "ldr x24, [x4, #0x88]\n"
- "ldr x15, [x4, #0x90]\n"
- "smlal2 v21.4s, v23.8h, v1.8h\n"
- "ldr d27, [x21, x0]\n"
- "smlal v20.4s, v25.4h, v2.4h\n"
+ "smlal v15.4s, v27.4h, v2.4h\n"
+ "ldr x14, [x20, #0x90]\n"
+ "ldr x15, [x20, #0x98]\n"
+ "smlal2 v16.4s, v27.8h, v2.8h\n"
+ "smlal2 v5.4s, v23.8h, v1.8h\n"
+ "ldr d27, [x19, x24]\n"
"usubl v27.8h, v27.8b, v9.8b\n"
- "smlal v8.4s, v23.4h, v2.4h\n"
- "smlal v17.4s, v31.4h, v2.4h\n"
- "ldr x21, [x4, #0x98]\n"
- "ldr x14, [x4, #0xa0]\n"
- "smlal2 v19.4s, v25.8h, v3.8h\n"
- "smlal2 v10.4s, v25.8h, v2.8h\n"
- "ldr d1, [x3, #0x30]\n"
- "usubl v1.8h, v1.8b, v15.8b\n"
+ "smlal v17.4s, v25.4h, v2.4h\n"
+ "smlal v10.4s, v23.4h, v2.4h\n"
+ "ldr d1, [x23, #0x30]\n"
+ "usubl v1.8h, v1.8b, v14.8b\n"
+ "smlal v6.4s, v31.4h, v2.4h\n"
+ "smlal2 v8.4s, v25.8h, v2.8h\n"
+ "ldr x21, [x20, #0xa0]\n"
+ "ldr x2, [x20, #0xa8]\n"
"smlal2 v7.4s, v23.8h, v2.8h\n"
- "smlal v13.4s, v24.4h, v4.4h\n"
- "ldr x13, [x4, #0xa8]\n"
- "ldr x12, [x4, #0xb0]\n"
- "smlal2 v21.4s, v31.8h, v2.8h\n"
- "ldr d25, [x20, x0]\n"
- "smlal v20.4s, v24.4h, v3.4h\n"
+ "smlal v15.4s, v25.4h, v3.4h\n"
+ "ldr x13, [x20, #0xb0]\n"
+ "ldr x9, [x20, #0xb8]\n"
+ "smlal2 v16.4s, v25.8h, v3.8h\n"
+ "smlal2 v5.4s, v31.8h, v2.8h\n"
+ "ldr d25, [x27, x24]\n"
"usubl v25.8h, v25.8b, v9.8b\n"
- "smlal v8.4s, v31.4h, v3.4h\n"
- "smlal v17.4s, v30.4h, v3.4h\n"
- "ldr x20, [x4, #0xb8]\n"
- "ldr x11, [x4, #0xc0]\n"
- "smlal2 v19.4s, v24.8h, v4.8h\n"
- "smlal2 v10.4s, v24.8h, v3.8h\n"
- "ldr d2, [x3, #0x38]\n"
- "usubl v2.8h, v2.8b, v15.8b\n"
+ "smlal v17.4s, v24.4h, v3.4h\n"
+ "smlal v10.4s, v31.4h, v3.4h\n"
+ "ldr d2, [x23, #0x38]\n"
+ "usubl v2.8h, v2.8b, v14.8b\n"
+ "smlal v6.4s, v30.4h, v3.4h\n"
+ "smlal2 v8.4s, v24.8h, v3.8h\n"
+ "ldr x19, [x20, #0xc0]\n"
+ "ldr x28, [x20, #0xc8]\n"
"smlal2 v7.4s, v31.8h, v3.8h\n"
- "smlal v13.4s, v29.4h, v0.4h\n"
- "ldr x22, [x4, #0xc8]\n"
- "ldr x9, [x4, #0xd0]\n"
- "smlal2 v21.4s, v30.8h, v3.8h\n"
- "ldr d24, [x26, x0]\n"
- "smlal v20.4s, v27.4h, v4.4h\n"
+ "smlal v15.4s, v24.4h, v4.4h\n"
+ "ldr x6, [x20, #0xd0]\n"
+ "ldr x27, [x20, #0xd8]\n"
+ "smlal2 v16.4s, v24.8h, v4.8h\n"
+ "smlal2 v5.4s, v30.8h, v3.8h\n"
+ "ldr d24, [x5, x24]\n"
"usubl v24.8h, v24.8b, v9.8b\n"
- "smlal v8.4s, v30.4h, v4.4h\n"
- "smlal v17.4s, v26.4h, v4.4h\n"
- "ldr x28, [x4, #0xd8]\n"
- "ldr x27, [x4, #0xe0]\n"
- "smlal2 v19.4s, v29.8h, v0.8h\n"
- "ldr d3, [x3, #0x40]\n"
- "smlal2 v10.4s, v27.8h, v4.8h\n"
- "ldr d27, [x25, x0]\n"
- "smlal2 v7.4s, v30.8h, v4.8h\n"
- "smlal v13.4s, v28.4h, v1.4h\n"
- "usubl v3.8h, v3.8b, v15.8b\n"
- "ldr x26, [x4, #0xe8]\n"
- "smlal2 v21.4s, v26.8h, v4.8h\n"
- "ldr d4, [x3, #0x48]\n"
- "smlal v20.4s, v28.4h, v0.4h\n"
+ "smlal v17.4s, v27.4h, v4.4h\n"
+ "smlal v10.4s, v30.4h, v4.4h\n"
+ "ldr d3, [x23, #0x40]\n"
+ "usubl v3.8h, v3.8b, v14.8b\n"
+ "smlal v6.4s, v26.4h, v4.4h\n"
+ "smlal2 v8.4s, v27.8h, v4.8h\n"
+ "ldr d27, [x11, x24]\n"
"usubl v27.8h, v27.8b, v9.8b\n"
- "smlal v8.4s, v22.4h, v0.4h\n"
- "smlal v17.4s, v25.4h, v0.4h\n"
- "usubl v4.8h, v4.8b, v15.8b\n"
- "subs x2, x2, #0x1\n"
- "smlal2 v19.4s, v28.8h, v1.8h\n"
- "smlal2 v10.4s, v28.8h, v0.8h\n"
- "ldr d28, [x24, x0]\n"
- "usubl v28.8h, v28.8b, v9.8b\n"
+ "smlal2 v7.4s, v30.8h, v4.8h\n"
+ "smlal v15.4s, v29.4h, v0.4h\n"
+ "ldr x11, [x20, #0xe0]\n"
+ "ldr x17, [x20, #0xe8]\n"
+ "smlal2 v16.4s, v29.8h, v0.8h\n"
+ "smlal2 v5.4s, v26.8h, v4.8h\n"
+ "ldr d4, [x23, #0x48]\n"
+ "usubl v4.8h, v4.8b, v14.8b\n"
+ "smlal v17.4s, v28.4h, v0.4h\n"
+ "smlal v10.4s, v22.4h, v0.4h\n"
+ "ldr x5, [x20, #0xf0]\n"
+ "ldr q12, [x10, #0x0]\n"
+ "smlal v6.4s, v25.4h, v0.4h\n"
+ "smlal2 v8.4s, v28.8h, v0.8h\n"
+ "ldr q19, [x1, #0x0]\n"
+ "ldr q20, [x10, #0x10]\n"
"smlal2 v7.4s, v22.8h, v0.8h\n"
- "smlal v13.4s, v23.4h, v2.4h\n"
- "ldr x25, [x4, #0xf0]\n"
- "add x5, x5, #0x20\n"
- "smlal2 v21.4s, v25.8h, v0.8h\n"
- "ldr d0, [x3, #0x50]\n"
- "smlal v20.4s, v23.4h, v1.4h\n"
- "usubl v0.8h, v0.8b, v15.8b\n"
- "smlal v8.4s, v25.4h, v1.4h\n"
- "smlal v17.4s, v24.4h, v1.4h\n"
- "smlal2 v19.4s, v23.8h, v2.8h\n"
- "smlal2 v10.4s, v23.8h, v1.8h\n"
- "ldr d23, [x23, x0]\n"
- "usubl v23.8h, v23.8b, v9.8b\n"
+ "smlal v15.4s, v28.4h, v1.4h\n"
+ "ldr q29, [x1, #0x10]\n"
+ "subs x3, x3, #0x1\n"
+ "smlal2 v16.4s, v28.8h, v1.8h\n"
+ "smlal2 v5.4s, v25.8h, v0.8h\n"
+ "ldr d28, [x26, x24]\n"
+ "ldr d0, [x23, #0x50]\n"
+ "smlal v17.4s, v23.4h, v1.4h\n"
+ "smlal v10.4s, v25.4h, v1.4h\n"
+ "usubl v28.8h, v28.8b, v9.8b\n"
+ "ldr x25, [x20, #0xf8]\n"
+ "smlal v6.4s, v24.4h, v1.4h\n"
+ "smlal2 v8.4s, v23.8h, v1.8h\n"
+ "usubl v0.8h, v0.8b, v14.8b\n"
+ "add x10, x10, #0x20\n"
"smlal2 v7.4s, v25.8h, v1.8h\n"
- "smlal v13.4s, v31.4h, v3.4h\n"
- "ldr x24, [x4, #0xf8]\n"
- "smlal2 v21.4s, v24.8h, v1.8h\n"
- "ldr d1, [x3, #0x58]\n"
- "smlal v20.4s, v31.4h, v2.4h\n"
- "usubl v1.8h, v1.8b, v15.8b\n"
- "smlal v8.4s, v24.4h, v2.4h\n"
- "smlal v17.4s, v27.4h, v2.4h\n"
- "smlal2 v19.4s, v31.8h, v3.8h\n"
- "smlal2 v10.4s, v31.8h, v2.8h\n"
- "ldr d31, [x15, x0]\n"
- "usubl v31.8h, v31.8b, v9.8b\n"
+ "smlal v15.4s, v23.4h, v2.4h\n"
+ "add x1, x1, #0x20\n"
+ "smlal2 v16.4s, v23.8h, v2.8h\n"
+ "ldr d23, [x12, x24]\n"
+ "smlal2 v5.4s, v24.8h, v1.8h\n"
+ "usubl v23.8h, v23.8b, v9.8b\n"
+ "smlal v17.4s, v31.4h, v2.4h\n"
+ "smlal v10.4s, v24.4h, v2.4h\n"
+ "ldr d1, [x23, #0x58]\n"
+ "usubl v1.8h, v1.8b, v14.8b\n"
+ "smlal v6.4s, v27.4h, v2.4h\n"
+ "smlal2 v8.4s, v31.8h, v2.8h\n"
+ "ldr x26, [x20, #0x100]\n"
"smlal2 v7.4s, v24.8h, v2.8h\n"
- "smlal v13.4s, v30.4h, v4.4h\n"
- "ldr x23, [x4, #0x100]\n"
- "smlal2 v21.4s, v27.8h, v2.8h\n"
- "ldr d2, [x3, #0x60]\n"
- "smlal v20.4s, v30.4h, v3.4h\n"
- "usubl v2.8h, v2.8b, v15.8b\n"
- "smlal v8.4s, v27.4h, v3.4h\n"
- "smlal v17.4s, v23.4h, v3.4h\n"
- "smlal2 v19.4s, v30.8h, v4.8h\n"
- "smlal2 v10.4s, v30.8h, v3.8h\n"
- "ldr d30, [x21, x0]\n"
- "usubl v30.8h, v30.8b, v9.8b\n"
+ "smlal v15.4s, v31.4h, v3.4h\n"
+ "smlal2 v16.4s, v31.8h, v3.8h\n"
+ "smlal2 v5.4s, v27.8h, v2.8h\n"
+ "ldr d31, [x14, x24]\n"
+ "usubl v31.8h, v31.8b, v9.8b\n"
+ "smlal v17.4s, v30.4h, v3.4h\n"
+ "smlal v10.4s, v27.4h, v3.4h\n"
+ "ldr d2, [x23, #0x60]\n"
+ "usubl v2.8h, v2.8b, v14.8b\n"
+ "smlal v6.4s, v23.4h, v3.4h\n"
+ "smlal2 v8.4s, v30.8h, v3.8h\n"
+ "ldr x12, [x20, #0x108]\n"
"smlal2 v7.4s, v27.8h, v3.8h\n"
- "smlal v13.4s, v22.4h, v0.4h\n"
- "ldr x15, [x4, #0x108]\n"
- "smlal2 v21.4s, v23.8h, v3.8h\n"
- "ldr d3, [x3, #0x68]\n"
- "smlal v20.4s, v26.4h, v4.4h\n"
- "usubl v3.8h, v3.8b, v15.8b\n"
- "smlal v8.4s, v23.4h, v4.4h\n"
- "smlal v17.4s, v28.4h, v4.4h\n"
- "smlal2 v19.4s, v22.8h, v0.8h\n"
- "ldr d22, [x20, x0]\n"
- "smlal2 v10.4s, v26.8h, v4.8h\n"
- "ldr d26, [x14, x0]\n"
- "smlal2 v7.4s, v23.8h, v4.8h\n"
- "smlal v13.4s, v25.4h, v1.4h\n"
+ "smlal v15.4s, v30.4h, v4.4h\n"
+ "smlal2 v16.4s, v30.8h, v4.8h\n"
+ "ldr d30, [x15, x24]\n"
+ "smlal2 v5.4s, v23.8h, v3.8h\n"
+ "usubl v30.8h, v30.8b, v9.8b\n"
+ "smlal v17.4s, v26.4h, v4.4h\n"
+ "smlal v10.4s, v23.4h, v4.4h\n"
+ "ldr d3, [x23, #0x68]\n"
+ "usubl v3.8h, v3.8b, v14.8b\n"
+ "smlal v6.4s, v28.4h, v4.4h\n"
+ "smlal2 v8.4s, v26.8h, v4.8h\n"
+ "ldr d26, [x21, x24]\n"
"usubl v26.8h, v26.8b, v9.8b\n"
- "ldr x21, [x4, #0x110]\n"
- "smlal2 v21.4s, v28.8h, v4.8h\n"
- "ldr d4, [x3, #0x70]\n"
- "smlal v20.4s, v25.4h, v0.4h\n"
- "usubl v4.8h, v4.8b, v15.8b\n"
- "smlal v8.4s, v31.4h, v0.4h\n"
- "smlal v17.4s, v30.4h, v0.4h\n"
+ "smlal2 v7.4s, v23.8h, v4.8h\n"
+ "smlal v15.4s, v22.4h, v0.4h\n"
+ "ldr x14, [x20, #0x110]\n"
+ "ldr x21, [x20, #0x118]\n"
+ "smlal2 v16.4s, v22.8h, v0.8h\n"
+ "smlal2 v5.4s, v28.8h, v4.8h\n"
+ "ldr d4, [x23, #0x70]\n"
+ "ldr d22, [x9, x24]\n"
+ "smlal v17.4s, v25.4h, v0.4h\n"
+ "smlal v10.4s, v31.4h, v0.4h\n"
+ "usubl v4.8h, v4.8b, v14.8b\n"
+ "smlal v6.4s, v30.4h, v0.4h\n"
+ "smlal2 v8.4s, v25.8h, v0.8h\n"
"usubl v22.8h, v22.8b, v9.8b\n"
- "ldr x20, [x4, #0x118]\n"
- "smlal2 v19.4s, v25.8h, v1.8h\n"
- "smlal2 v10.4s, v25.8h, v0.8h\n"
- "ldr d25, [x13, x0]\n"
- "usubl v25.8h, v25.8b, v9.8b\n"
"smlal2 v7.4s, v31.8h, v0.8h\n"
- "smlal v13.4s, v24.4h, v2.4h\n"
- "ldr x13, [%x[params], %[offsetof_Params_bias]]\n"
- "smlal2 v21.4s, v30.8h, v0.8h\n"
- "ldr d0, [x3, #0x78]\n"
- "smlal v20.4s, v24.4h, v1.4h\n"
- "usubl v0.8h, v0.8b, v15.8b\n"
- "smlal v8.4s, v30.4h, v1.4h\n"
- "smlal v17.4s, v26.4h, v1.4h\n"
- "smlal2 v19.4s, v24.8h, v2.8h\n"
- "smlal2 v10.4s, v24.8h, v1.8h\n"
- "ldr d24, [x12, x0]\n"
- "usubl v24.8h, v24.8b, v9.8b\n"
+ "smlal v15.4s, v25.4h, v1.4h\n"
+ "smlal2 v16.4s, v25.8h, v1.8h\n"
+ "ldr d25, [x2, x24]\n"
+ "smlal2 v5.4s, v30.8h, v0.8h\n"
+ "usubl v25.8h, v25.8b, v9.8b\n"
+ "smlal v17.4s, v24.4h, v1.4h\n"
+ "smlal v10.4s, v30.4h, v1.4h\n"
+ "ldr d0, [x23, #0x78]\n"
+ "usubl v0.8h, v0.8b, v14.8b\n"
+ "smlal v6.4s, v26.4h, v1.4h\n"
+ "smlal2 v8.4s, v24.8h, v1.8h\n"
"smlal2 v7.4s, v30.8h, v1.8h\n"
- "smlal v13.4s, v27.4h, v3.4h\n"
- "smlal2 v21.4s, v26.8h, v1.8h\n"
- "ldr d1, [x3, #0x80]\n"
- "smlal v20.4s, v27.4h, v2.4h\n"
- "usubl v1.8h, v1.8b, v15.8b\n"
- "smlal v8.4s, v26.4h, v2.4h\n"
- "smlal v17.4s, v25.4h, v2.4h\n"
- "smlal2 v19.4s, v27.8h, v3.8h\n"
- "smlal2 v10.4s, v27.8h, v2.8h\n"
- "ldr d27, [x11, x0]\n"
- "usubl v27.8h, v27.8b, v9.8b\n"
+ "smlal v15.4s, v24.4h, v2.4h\n"
+ "smlal2 v16.4s, v24.8h, v2.8h\n"
+ "ldr d24, [x13, x24]\n"
+ "smlal2 v5.4s, v26.8h, v1.8h\n"
+ "usubl v24.8h, v24.8b, v9.8b\n"
+ "smlal v17.4s, v27.4h, v2.4h\n"
+ "smlal v10.4s, v26.4h, v2.4h\n"
+ "ldr d1, [x23, #0x80]\n"
+ "usubl v1.8h, v1.8b, v14.8b\n"
+ "smlal v6.4s, v25.4h, v2.4h\n"
+ "smlal2 v8.4s, v27.8h, v2.8h\n"
"smlal2 v7.4s, v26.8h, v2.8h\n"
- "smlal v13.4s, v23.4h, v4.4h\n"
- "smlal2 v21.4s, v25.8h, v2.8h\n"
- "ldr d2, [x3, #0x88]\n"
- "smlal v20.4s, v23.4h, v3.4h\n"
- "usubl v2.8h, v2.8b, v15.8b\n"
- "smlal v8.4s, v25.4h, v3.4h\n"
- "smlal v17.4s, v24.4h, v3.4h\n"
- "smlal2 v19.4s, v23.8h, v4.8h\n"
- "smlal2 v10.4s, v23.8h, v3.8h\n"
- "ldr d23, [x22, x0]\n"
- "usubl v23.8h, v23.8b, v9.8b\n"
+ "smlal v15.4s, v27.4h, v3.4h\n"
+ "smlal2 v16.4s, v27.8h, v3.8h\n"
+ "smlal2 v5.4s, v25.8h, v2.8h\n"
+ "ldr d27, [x19, x24]\n"
+ "usubl v27.8h, v27.8b, v9.8b\n"
+ "smlal v17.4s, v23.4h, v3.4h\n"
+ "smlal v10.4s, v25.4h, v3.4h\n"
+ "ldr d2, [x23, #0x88]\n"
+ "usubl v2.8h, v2.8b, v14.8b\n"
+ "smlal v6.4s, v24.4h, v3.4h\n"
+ "smlal2 v8.4s, v23.8h, v3.8h\n"
"smlal2 v7.4s, v25.8h, v3.8h\n"
- "smlal v13.4s, v31.4h, v0.4h\n"
- "smlal2 v21.4s, v24.8h, v3.8h\n"
- "ldr d3, [x3, #0x90]\n"
- "smlal v20.4s, v28.4h, v4.4h\n"
- "usubl v3.8h, v3.8b, v15.8b\n"
- "smlal v8.4s, v24.4h, v4.4h\n"
- "smlal v17.4s, v22.4h, v4.4h\n"
- "smlal2 v19.4s, v31.8h, v0.8h\n"
- "ldr d31, [x9, x0]\n"
- "smlal2 v10.4s, v28.8h, v4.8h\n"
- "ldr d28, [x27, x0]\n"
+ "smlal v15.4s, v23.4h, v4.4h\n"
+ "smlal2 v16.4s, v23.8h, v4.8h\n"
+ "ldr d23, [x28, x24]\n"
+ "smlal2 v5.4s, v24.8h, v3.8h\n"
+ "usubl v23.8h, v23.8b, v9.8b\n"
+ "smlal v17.4s, v28.4h, v4.4h\n"
+ "smlal v10.4s, v24.4h, v4.4h\n"
+ "ldr d3, [x23, #0x90]\n"
+ "usubl v3.8h, v3.8b, v14.8b\n"
+ "smlal v6.4s, v22.4h, v4.4h\n"
+ "smlal2 v8.4s, v28.8h, v4.8h\n"
+ "ldr d28, [x11, x24]\n"
+ "usubl v28.8h, v28.8b, v9.8b\n"
"smlal2 v7.4s, v24.8h, v4.8h\n"
- "smlal v13.4s, v30.4h, v1.4h\n"
+ "smlal v15.4s, v31.4h, v0.4h\n"
+ "smlal2 v16.4s, v31.8h, v0.8h\n"
+ "ldr d31, [x6, x24]\n"
+ "smlal2 v5.4s, v22.8h, v4.8h\n"
"usubl v31.8h, v31.8b, v9.8b\n"
- "smlal2 v21.4s, v22.8h, v4.8h\n"
- "ldr d4, [x3, #0x98]\n"
- "smlal v20.4s, v30.4h, v0.4h\n"
- "usubl v4.8h, v4.8b, v15.8b\n"
- "smlal v8.4s, v27.4h, v0.4h\n"
- "smlal v17.4s, v23.4h, v0.4h\n"
- "usubl v28.8h, v28.8b, v9.8b\n"
- "smlal2 v19.4s, v30.8h, v1.8h\n"
- "smlal2 v10.4s, v30.8h, v0.8h\n"
- "ldr d30, [x28, x0]\n"
- "usubl v30.8h, v30.8b, v9.8b\n"
+ "smlal v17.4s, v30.4h, v0.4h\n"
+ "smlal v10.4s, v27.4h, v0.4h\n"
+ "ldr d4, [x23, #0x98]\n"
+ "usubl v4.8h, v4.8b, v14.8b\n"
+ "smlal v6.4s, v23.4h, v0.4h\n"
+ "smlal2 v8.4s, v30.8h, v0.8h\n"
"smlal2 v7.4s, v27.8h, v0.8h\n"
- "smlal v13.4s, v26.4h, v2.4h\n"
- "smlal2 v21.4s, v23.8h, v0.8h\n"
- "ldr d0, [x3, #0xa0]\n"
- "smlal v20.4s, v26.4h, v1.4h\n"
- "usubl v0.8h, v0.8b, v15.8b\n"
- "smlal v8.4s, v23.4h, v1.4h\n"
- "smlal v17.4s, v31.4h, v1.4h\n"
- "smlal2 v19.4s, v26.8h, v2.8h\n"
- "smlal2 v10.4s, v26.8h, v1.8h\n"
- "ldr d26, [x26, x0]\n"
- "usubl v26.8h, v26.8b, v9.8b\n"
+ "smlal v15.4s, v30.4h, v1.4h\n"
+ "smlal2 v16.4s, v30.8h, v1.8h\n"
+ "ldr d30, [x27, x24]\n"
+ "smlal2 v5.4s, v23.8h, v0.8h\n"
+ "usubl v30.8h, v30.8b, v9.8b\n"
+ "smlal v17.4s, v26.4h, v1.4h\n"
+ "smlal v10.4s, v23.4h, v1.4h\n"
+ "ldr d0, [x23, #0xa0]\n"
+ "usubl v0.8h, v0.8b, v14.8b\n"
+ "smlal v6.4s, v31.4h, v1.4h\n"
+ "smlal2 v8.4s, v26.8h, v1.8h\n"
"smlal2 v7.4s, v23.8h, v1.8h\n"
- "smlal v13.4s, v25.4h, v3.4h\n"
- "smlal2 v21.4s, v31.8h, v1.8h\n"
- "ldr d1, [x3, #0xa8]\n"
- "smlal v20.4s, v25.4h, v2.4h\n"
- "usubl v1.8h, v1.8b, v15.8b\n"
- "smlal v8.4s, v31.4h, v2.4h\n"
- "smlal v17.4s, v30.4h, v2.4h\n"
- "smlal2 v19.4s, v25.8h, v3.8h\n"
- "smlal2 v10.4s, v25.8h, v2.8h\n"
- "ldr d25, [x25, x0]\n"
- "usubl v25.8h, v25.8b, v9.8b\n"
+ "smlal v15.4s, v26.4h, v2.4h\n"
+ "smlal2 v16.4s, v26.8h, v2.8h\n"
+ "smlal2 v5.4s, v31.8h, v1.8h\n"
+ "ldr d26, [x17, x24]\n"
+ "usubl v26.8h, v26.8b, v9.8b\n"
+ "smlal v17.4s, v25.4h, v2.4h\n"
+ "smlal v10.4s, v31.4h, v2.4h\n"
+ "ldr d1, [x23, #0xa8]\n"
+ "usubl v1.8h, v1.8b, v14.8b\n"
+ "smlal v6.4s, v30.4h, v2.4h\n"
+ "smlal2 v8.4s, v25.8h, v2.8h\n"
"smlal2 v7.4s, v31.8h, v2.8h\n"
- "smlal v13.4s, v24.4h, v4.4h\n"
- "smlal2 v21.4s, v30.8h, v2.8h\n"
- "ldr d2, [x3, #0xb0]\n"
- "smlal v20.4s, v24.4h, v3.4h\n"
- "usubl v2.8h, v2.8b, v15.8b\n"
- "smlal v8.4s, v30.4h, v3.4h\n"
- "smlal v17.4s, v28.4h, v3.4h\n"
- "smlal2 v19.4s, v24.8h, v4.8h\n"
- "smlal2 v10.4s, v24.8h, v3.8h\n"
- "ldr d24, [x24, x0]\n"
- "usubl v24.8h, v24.8b, v9.8b\n"
+ "smlal v15.4s, v25.4h, v3.4h\n"
+ "smlal2 v16.4s, v25.8h, v3.8h\n"
+ "smlal2 v5.4s, v30.8h, v2.8h\n"
+ "ldr d25, [x5, x24]\n"
+ "usubl v25.8h, v25.8b, v9.8b\n"
+ "smlal v17.4s, v24.4h, v3.4h\n"
+ "smlal v10.4s, v30.4h, v3.4h\n"
+ "ldr d2, [x23, #0xb0]\n"
+ "usubl v2.8h, v2.8b, v14.8b\n"
+ "smlal v6.4s, v28.4h, v3.4h\n"
+ "smlal2 v8.4s, v24.8h, v3.8h\n"
"smlal2 v7.4s, v30.8h, v3.8h\n"
- "smlal v13.4s, v27.4h, v0.4h\n"
- "smlal2 v21.4s, v28.8h, v3.8h\n"
- "ldr d3, [x3, #0xb8]\n"
- "smlal v20.4s, v22.4h, v4.4h\n"
- "usubl v3.8h, v3.8b, v15.8b\n"
- "smlal v8.4s, v28.4h, v4.4h\n"
- "smlal v17.4s, v26.4h, v4.4h\n"
- "smlal2 v19.4s, v27.8h, v0.8h\n"
- "ldr d27, [x23, x0]\n"
+ "smlal v15.4s, v24.4h, v4.4h\n"
+ "smlal2 v16.4s, v24.8h, v4.8h\n"
+ "ldr d24, [x25, x24]\n"
+ "smlal2 v5.4s, v28.8h, v3.8h\n"
+ "usubl v24.8h, v24.8b, v9.8b\n"
+ "smlal v17.4s, v22.4h, v4.4h\n"
+ "smlal v10.4s, v28.4h, v4.4h\n"
+ "ldr d3, [x23, #0xb8]\n"
+ "usubl v3.8h, v3.8b, v14.8b\n"
+ "smlal v6.4s, v26.4h, v4.4h\n"
"smlal2 v7.4s, v28.8h, v4.8h\n"
+ "smlal v15.4s, v27.4h, v0.4h\n"
+ "smlal2 v16.4s, v27.8h, v0.8h\n"
+ "ldr d27, [x26, x24]\n"
"usubl v27.8h, v27.8b, v9.8b\n"
- "smlal v13.4s, v23.4h, v1.4h\n"
- "smlal2 v10.4s, v22.8h, v4.8h\n"
- "ldr q22, [x8, #0x10]\n"
- "add x8, x8, #0x20\n"
- "smlal2 v21.4s, v26.8h, v4.8h\n"
- "ldr d4, [x3, #0xc0]\n"
- "smlal v20.4s, v23.4h, v0.4h\n"
- "usubl v4.8h, v4.8b, v15.8b\n"
- "smlal v8.4s, v25.4h, v0.4h\n"
- "smlal v17.4s, v24.4h, v0.4h\n"
- "add x3, x3, #0xc8\n"
- "smlal2 v19.4s, v23.8h, v1.8h\n"
+ "smlal2 v8.4s, v22.8h, v4.8h\n"
+ "smlal2 v5.4s, v26.8h, v4.8h\n"
+ "ldr d4, [x23, #0xc0]\n"
+ "usubl v4.8h, v4.8b, v14.8b\n"
+ "smlal v17.4s, v23.4h, v0.4h\n"
+ "smlal v10.4s, v25.4h, v0.4h\n"
+ "add x23, x23, #0xc8\n"
+ "smlal v6.4s, v24.4h, v0.4h\n"
"smlal2 v7.4s, v25.8h, v0.8h\n"
- "ldr d25, [x15, x0]\n"
+ "ldr d25, [x12, x24]\n"
"usubl v25.8h, v25.8b, v9.8b\n"
- "smlal v13.4s, v31.4h, v2.4h\n"
- "smlal2 v10.4s, v23.8h, v0.8h\n"
- "smlal2 v21.4s, v24.8h, v0.8h\n"
- "smlal v20.4s, v31.4h, v1.4h\n"
- "smlal v8.4s, v24.4h, v1.4h\n"
- "smlal v17.4s, v27.4h, v1.4h\n"
- "smlal2 v19.4s, v31.8h, v2.8h\n"
+ "smlal2 v8.4s, v23.8h, v0.8h\n"
+ "smlal2 v5.4s, v24.8h, v0.8h\n"
+ "smlal v15.4s, v23.4h, v1.4h\n"
+ "smlal v17.4s, v31.4h, v1.4h\n"
+ "smlal v10.4s, v24.4h, v1.4h\n"
+ "smlal v6.4s, v27.4h, v1.4h\n"
"smlal2 v7.4s, v24.8h, v1.8h\n"
- "ldr d24, [x21, x0]\n"
+ "ldr d24, [x14, x24]\n"
+ "smlal2 v16.4s, v23.8h, v1.8h\n"
"usubl v24.8h, v24.8b, v9.8b\n"
- "smlal v13.4s, v30.4h, v3.4h\n"
- "smlal2 v10.4s, v31.8h, v1.8h\n"
- "smlal2 v21.4s, v27.8h, v1.8h\n"
- "smlal v20.4s, v30.4h, v2.4h\n"
- "smlal v8.4s, v27.4h, v2.4h\n"
- "smlal v17.4s, v25.4h, v2.4h\n"
- "smlal2 v19.4s, v30.8h, v3.8h\n"
+ "smlal2 v8.4s, v31.8h, v1.8h\n"
+ "smlal2 v5.4s, v27.8h, v1.8h\n"
+ "smlal v15.4s, v31.4h, v2.4h\n"
+ "smlal v17.4s, v30.4h, v2.4h\n"
+ "smlal v10.4s, v27.4h, v2.4h\n"
+ "smlal v6.4s, v25.4h, v2.4h\n"
"smlal2 v7.4s, v27.8h, v2.8h\n"
- "ldr d27, [x20, x0]\n"
+ "ldr d27, [x21, x24]\n"
+ "smlal2 v16.4s, v31.8h, v2.8h\n"
"usubl v27.8h, v27.8b, v9.8b\n"
- "smlal v13.4s, v28.4h, v4.4h\n"
- "smlal2 v10.4s, v30.8h, v2.8h\n"
- "sqrdmulh v13.4s, v13.4s, v18.4s\n"
- "add x0, x0, #0x8\n"
- "smlal2 v21.4s, v25.8h, v2.8h\n"
- "smlal v20.4s, v28.4h, v3.4h\n"
- "and v30.16b, v13.16b, v6.16b\n"
- "smlal v8.4s, v25.4h, v3.4h\n"
- "smlal v17.4s, v24.4h, v3.4h\n"
- "sshr v30.4s, v30.4s, #0x1f\n"
- "smlal2 v19.4s, v28.8h, v4.8h\n"
- "smlal2 v10.4s, v28.8h, v3.8h\n"
- "sqrdmulh v19.4s, v19.4s, v5.4s\n"
+ "smlal2 v8.4s, v30.8h, v2.8h\n"
+ "smlal2 v5.4s, v25.8h, v2.8h\n"
+ "add x24, x24, #0x8\n"
+ "smlal v15.4s, v30.4h, v3.4h\n"
+ "smlal v17.4s, v28.4h, v3.4h\n"
+ "smlal v10.4s, v25.4h, v3.4h\n"
+ "smlal v6.4s, v24.4h, v3.4h\n"
+ "smlal2 v16.4s, v30.8h, v3.8h\n"
+ "smlal2 v8.4s, v28.8h, v3.8h\n"
"smlal2 v7.4s, v25.8h, v3.8h\n"
- "smlal2 v21.4s, v24.8h, v3.8h\n"
- "and v16.16b, v19.16b, v22.16b\n"
- "smlal v20.4s, v26.4h, v4.4h\n"
- "smlal v8.4s, v24.4h, v4.4h\n"
- "sqrdmulh v20.4s, v20.4s, v18.4s\n"
- "smlal v17.4s, v27.4h, v4.4h\n"
- "smlal2 v10.4s, v26.8h, v4.8h\n"
- "sqrdmulh v8.4s, v8.4s, v18.4s\n"
+ "smlal2 v5.4s, v24.8h, v3.8h\n"
+ "smlal v15.4s, v28.4h, v4.4h\n"
+ "smlal v17.4s, v26.4h, v4.4h\n"
+ "sqrdmulh v15.4s, v15.4s, v12.4s\n"
+ "smlal v10.4s, v24.4h, v4.4h\n"
+ "smlal v6.4s, v27.4h, v4.4h\n"
+ "sqrdmulh v17.4s, v17.4s, v12.4s\n"
+ "smlal2 v16.4s, v28.8h, v4.8h\n"
+ "smlal2 v8.4s, v26.8h, v4.8h\n"
+ "sqrdmulh v10.4s, v10.4s, v12.4s\n"
"smlal2 v7.4s, v24.8h, v4.8h\n"
- "smlal2 v21.4s, v27.8h, v4.8h\n"
- "sqrdmulh v17.4s, v17.4s, v18.4s\n"
- "sqadd v13.4s, v13.4s, v30.4s\n"
- "sshr v16.4s, v16.4s, #0x1f\n"
- "and v0.16b, v20.16b, v6.16b\n"
- "sqrdmulh v10.4s, v10.4s, v5.4s\n"
- "and v18.16b, v8.16b, v6.16b\n"
- "sqrdmulh v7.4s, v7.4s, v5.4s\n"
- "and v30.16b, v17.16b, v6.16b\n"
- "sqrdmulh v21.4s, v21.4s, v5.4s\n"
- "sqadd v19.4s, v19.4s, v16.4s\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "and v26.16b, v10.16b, v22.16b\n"
- "sshr v18.4s, v18.4s, #0x1f\n"
- "and v23.16b, v7.16b, v22.16b\n"
- "sshr v30.4s, v30.4s, #0x1f\n"
- "and v16.16b, v21.16b, v22.16b\n"
- "sqadd v20.4s, v20.4s, v0.4s\n"
- "sshr v26.4s, v26.4s, #0x1f\n"
- "sqadd v8.4s, v8.4s, v18.4s\n"
+ "smlal2 v5.4s, v27.8h, v4.8h\n"
+ "sqrdmulh v6.4s, v6.4s, v12.4s\n"
+ "and v23.16b, v15.16b, v19.16b\n"
+ "sqrdmulh v16.4s, v16.4s, v20.4s\n"
+ "and v22.16b, v17.16b, v19.16b\n"
+ "sqrdmulh v8.4s, v8.4s, v20.4s\n"
+ "and v21.16b, v10.16b, v19.16b\n"
+ "sqrdmulh v7.4s, v7.4s, v20.4s\n"
+ "and v26.16b, v6.16b, v19.16b\n"
+ "sqrdmulh v5.4s, v5.4s, v20.4s\n"
"sshr v23.4s, v23.4s, #0x1f\n"
- "sqadd v17.4s, v17.4s, v30.4s\n"
- "sshr v16.4s, v16.4s, #0x1f\n"
- "srshl v13.4s, v13.4s, v6.4s\n"
- "srshl v20.4s, v20.4s, v6.4s\n"
- "sqadd v10.4s, v10.4s, v26.4s\n"
- "srshl v8.4s, v8.4s, v6.4s\n"
- "sqadd v7.4s, v7.4s, v23.4s\n"
- "srshl v17.4s, v17.4s, v6.4s\n"
- "sqadd v21.4s, v21.4s, v16.4s\n"
- "srshl v19.4s, v19.4s, v22.4s\n"
- "sqxtn v13.4h, v13.4s\n"
- "srshl v10.4s, v10.4s, v22.4s\n"
- "sqxtn v20.4h, v20.4s\n"
- "srshl v7.4s, v7.4s, v22.4s\n"
- "sqxtn v8.4h, v8.4s\n"
- "srshl v21.4s, v21.4s, v22.4s\n"
+ "and v4.16b, v16.16b, v29.16b\n"
+ "sshr v22.4s, v22.4s, #0x1f\n"
+ "and v2.16b, v8.16b, v29.16b\n"
+ "sshr v21.4s, v21.4s, #0x1f\n"
+ "and v3.16b, v7.16b, v29.16b\n"
+ "sshr v26.4s, v26.4s, #0x1f\n"
+ "and v25.16b, v5.16b, v29.16b\n"
+ "sqadd v15.4s, v15.4s, v23.4s\n"
+ "sshr v4.4s, v4.4s, #0x1f\n"
+ "sqadd v17.4s, v17.4s, v22.4s\n"
+ "sshr v2.4s, v2.4s, #0x1f\n"
+ "sqadd v10.4s, v10.4s, v21.4s\n"
+ "sshr v3.4s, v3.4s, #0x1f\n"
+ "sqadd v6.4s, v6.4s, v26.4s\n"
+ "sshr v25.4s, v25.4s, #0x1f\n"
+ "srshl v15.4s, v15.4s, v19.4s\n"
+ "sqadd v16.4s, v16.4s, v4.4s\n"
+ "srshl v17.4s, v17.4s, v19.4s\n"
+ "sqadd v8.4s, v8.4s, v2.4s\n"
+ "srshl v10.4s, v10.4s, v19.4s\n"
+ "sqadd v7.4s, v7.4s, v3.4s\n"
+ "srshl v6.4s, v6.4s, v19.4s\n"
+ "sqadd v5.4s, v5.4s, v25.4s\n"
+ "srshl v16.4s, v16.4s, v29.4s\n"
+ "sqxtn v15.4h, v15.4s\n"
+ "srshl v8.4s, v8.4s, v29.4s\n"
"sqxtn v17.4h, v17.4s\n"
- "sqxtn2 v13.8h, v19.4s\n"
- "sqxtn2 v20.8h, v10.4s\n"
- "sqxtn2 v8.8h, v7.4s\n"
- "sqxtn2 v17.8h, v21.4s\n"
- "sqadd v13.8h, v13.8h, v14.8h\n"
- "sqadd v20.8h, v20.8h, v14.8h\n"
- "sqadd v8.8h, v8.8h, v14.8h\n"
- "sqadd v17.8h, v17.8h, v14.8h\n"
- "smax v13.8h, v13.8h, v12.8h\n"
- "smax v20.8h, v20.8h, v12.8h\n"
- "smax v8.8h, v8.8h, v12.8h\n"
- "smax v17.8h, v17.8h, v12.8h\n"
- "smin v13.8h, v13.8h, v11.8h\n"
- "smin v20.8h, v20.8h, v11.8h\n"
- "smin v8.8h, v8.8h, v11.8h\n"
- "smin v17.8h, v17.8h, v11.8h\n"
- "uzp1 v13.16b, v13.16b, v13.16b\n"
- "uzp1 v20.16b, v20.16b, v20.16b\n"
- "str d13, [x17, x10]\n"
- "uzp1 v8.16b, v8.16b, v8.16b\n"
+ "srshl v7.4s, v7.4s, v29.4s\n"
+ "sqxtn v10.4h, v10.4s\n"
+ "srshl v5.4s, v5.4s, v29.4s\n"
+ "sqxtn v6.4h, v6.4s\n"
+ "sqxtn2 v15.8h, v16.4s\n"
+ "sqxtn2 v17.8h, v8.4s\n"
+ "sqxtn2 v10.8h, v7.4s\n"
+ "sqxtn2 v6.8h, v5.4s\n"
+ "sqadd v15.8h, v15.8h, v18.8h\n"
+ "sqadd v17.8h, v17.8h, v18.8h\n"
+ "sqadd v10.8h, v10.8h, v18.8h\n"
+ "sqadd v6.8h, v6.8h, v18.8h\n"
+ "smax v15.8h, v15.8h, v11.8h\n"
+ "smax v17.8h, v17.8h, v11.8h\n"
+ "smax v10.8h, v10.8h, v11.8h\n"
+ "smax v6.8h, v6.8h, v11.8h\n"
+ "smin v15.8h, v15.8h, v13.8h\n"
+ "smin v17.8h, v17.8h, v13.8h\n"
+ "smin v10.8h, v10.8h, v13.8h\n"
+ "smin v6.8h, v6.8h, v13.8h\n"
+ "uzp1 v15.16b, v15.16b, v15.16b\n"
"uzp1 v17.16b, v17.16b, v17.16b\n"
- "str d20, [x6, x10]\n"
- "str d8, [x7, x10]\n"
- "str d17, [x16, x10]\n"
- "ldr q13, [x13, #0x0]\n"
- "ldr q19, [x13, #0x10]\n"
- "add x13, x13, #0x20\n"
- "ldr d0, [x3, #0x0]\n"
- "ldr d1, [x3, #0x8]\n"
- "add x10, x10, #0x8\n"
- "str x13, [%x[params], %[offsetof_Params_bias]]\n"
- "ldr d2, [x3, #0x10]\n"
- "ldr d3, [x3, #0x18]\n"
- "mov v20.16b, v13.16b\n"
- "mov v10.16b, v19.16b\n"
- "ldr d4, [x3, #0x20]\n"
- "ldp x9, x28, [x4, #0x0]\n"
- "mov v8.16b, v13.16b\n"
- "mov v7.16b, v19.16b\n"
- "ldp x27, x26, [x4, #0x10]\n"
- "ldp x25, x24, [x4, #0x20]\n"
- "mov v17.16b, v13.16b\n"
- "mov v21.16b, v19.16b\n"
- "ldp x23, x22, [x4, #0x30]\n"
- "ldp x21, x20, [x4, #0x40]\n"
- "usubl v0.8h, v0.8b, v15.8b\n"
- "usubl v1.8h, v1.8b, v15.8b\n"
- "ldr d31, [x9, x0]\n"
- "ldr d30, [x28, x0]\n"
- "usubl v2.8h, v2.8b, v15.8b\n"
- "usubl v3.8h, v3.8b, v15.8b\n"
- "ldr d29, [x27, x0]\n"
- "ldr d28, [x26, x0]\n"
- "usubl v4.8h, v4.8b, v15.8b\n"
+ "str d15, [x16, x22]\n"
+ "uzp1 v10.16b, v10.16b, v10.16b\n"
+ "uzp1 v6.16b, v6.16b, v6.16b\n"
+ "str d17, [x8, x22]\n"
+ "str d10, [x4, x22]\n"
+ "str d6, [x7, x22]\n"
+ "ldr x19, [%x[params], %[offsetof_Params_bias]]\n"
+ "ldr q15, [x19, #0x0]\n"
+ "add x22, x22, #0x8\n"
+ "ldr q16, [x19, #0x10]\n"
+ "add x19, x19, #0x20\n"
+ "str x19, [%x[params], %[offsetof_Params_bias]]\n"
+ "ldr d0, [x23, #0x0]\n"
+ "ldr d1, [x23, #0x8]\n"
+ "ldr d2, [x23, #0x10]\n"
+ "mov v17.16b, v15.16b\n"
+ "mov v8.16b, v16.16b\n"
+ "ldr d3, [x23, #0x18]\n"
+ "ldr d4, [x23, #0x20]\n"
+ "mov v10.16b, v15.16b\n"
+ "mov v7.16b, v16.16b\n"
+ "ldp x28, x6, [x20, #0x0]\n"
+ "ldp x26, x25, [x20, #0x10]\n"
+ "mov v6.16b, v15.16b\n"
+ "mov v5.16b, v16.16b\n"
+ "ldp x5, x2, [x20, #0x20]\n"
+ "ldp x27, x21, [x20, #0x30]\n"
+ "usubl v0.8h, v0.8b, v14.8b\n"
+ "usubl v1.8h, v1.8b, v14.8b\n"
+ "ldp x12, x19, [x20, #0x40]\n"
+ "ldr d31, [x28, x24]\n"
+ "usubl v2.8h, v2.8b, v14.8b\n"
+ "usubl v3.8h, v3.8b, v14.8b\n"
+ "ldr d30, [x6, x24]\n"
+ "ldr d29, [x26, x24]\n"
+ "usubl v4.8h, v4.8b, v14.8b\n"
"usubl v31.8h, v31.8b, v9.8b\n"
- "ldr d27, [x25, x0]\n"
- "ldr d23, [x24, x0]\n"
+ "ldr d28, [x25, x24]\n"
+ "ldr d27, [x5, x24]\n"
"usubl v30.8h, v30.8b, v9.8b\n"
"usubl v29.8h, v29.8b, v9.8b\n"
- "ldr d25, [x23, x0]\n"
- "ldr d24, [x22, x0]\n"
+ "ldr d23, [x2, x24]\n"
+ "ldr d25, [x27, x24]\n"
"usubl v28.8h, v28.8b, v9.8b\n"
"usubl v27.8h, v27.8b, v9.8b\n"
- "ldr d26, [x21, x0]\n"
- "ldr d22, [x20, x0]\n"
+ "ldr d24, [x21, x24]\n"
+ "ldr d26, [x12, x24]\n"
"usubl v23.8h, v23.8b, v9.8b\n"
"usubl v25.8h, v25.8b, v9.8b\n"
+ "ldr d22, [x19, x24]\n"
"usubl v24.8h, v24.8b, v9.8b\n"
"usubl v26.8h, v26.8b, v9.8b\n"
"usubl v22.8h, v22.8b, v9.8b\n"
"bgt 1b\n"
"2:" // Tail
- "ldr q18, [x5, #0x0]\n"
- "ldr q6, [x8, #0x0]\n"
- "smlal v13.4s, v31.4h, v0.4h\n"
- "smlal2 v19.4s, v31.8h, v0.8h\n"
- "ldr q5, [x5, #0x10]\n"
- "smlal v13.4s, v30.4h, v1.4h\n"
- "ldr x20, [x4, #0x50]\n"
- "smlal v20.4s, v30.4h, v0.4h\n"
- "smlal v8.4s, v29.4h, v0.4h\n"
- "smlal v17.4s, v28.4h, v0.4h\n"
- "ldr x22, [x4, #0x58]\n"
- "ldr x21, [x4, #0x60]\n"
- "smlal2 v19.4s, v30.8h, v1.8h\n"
- "smlal2 v10.4s, v30.8h, v0.8h\n"
- "ldr d31, [x20, x0]\n"
+ "smlal v15.4s, v31.4h, v0.4h\n"
+ "smlal2 v16.4s, v31.8h, v0.8h\n"
+ "ldr x19, [x20, #0x50]\n"
+ "ldr d31, [x19, x24]\n"
+ "smlal v17.4s, v30.4h, v0.4h\n"
+ "smlal v10.4s, v29.4h, v0.4h\n"
+ "ldr x15, [x20, #0x58]\n"
"usubl v31.8h, v31.8b, v9.8b\n"
+ "smlal v6.4s, v28.4h, v0.4h\n"
+ "smlal2 v8.4s, v30.8h, v0.8h\n"
+ "ldr x19, [x20, #0x60]\n"
+ "ldr x27, [x20, #0x68]\n"
"smlal2 v7.4s, v29.8h, v0.8h\n"
- "smlal v13.4s, v27.4h, v2.4h\n"
- "ldr x20, [x4, #0x68]\n"
- "ldr x26, [x4, #0x70]\n"
- "smlal2 v21.4s, v28.8h, v0.8h\n"
- "ldr d30, [x22, x0]\n"
- "smlal v20.4s, v27.4h, v1.4h\n"
+ "smlal v15.4s, v30.4h, v1.4h\n"
+ "ldr x5, [x20, #0x70]\n"
+ "ldr x11, [x20, #0x78]\n"
+ "smlal2 v16.4s, v30.8h, v1.8h\n"
+ "smlal2 v5.4s, v28.8h, v0.8h\n"
+ "ldr d30, [x15, x24]\n"
"usubl v30.8h, v30.8b, v9.8b\n"
- "smlal v8.4s, v28.4h, v1.4h\n"
- "smlal v17.4s, v23.4h, v1.4h\n"
- "ldr x25, [x4, #0x78]\n"
- "ldr x23, [x4, #0x80]\n"
- "smlal2 v19.4s, v27.8h, v2.8h\n"
- "smlal2 v10.4s, v27.8h, v1.8h\n"
- "ldr d0, [x3, #0x28]\n"
- "usubl v0.8h, v0.8b, v15.8b\n"
+ "smlal v17.4s, v27.4h, v1.4h\n"
+ "smlal v10.4s, v28.4h, v1.4h\n"
+ "ldr d0, [x23, #0x28]\n"
+ "usubl v0.8h, v0.8b, v14.8b\n"
+ "smlal v6.4s, v23.4h, v1.4h\n"
+ "smlal2 v8.4s, v27.8h, v1.8h\n"
+ "ldr x12, [x20, #0x80]\n"
+ "ldr x26, [x20, #0x88]\n"
"smlal2 v7.4s, v28.8h, v1.8h\n"
- "smlal v13.4s, v25.4h, v3.4h\n"
- "ldr x24, [x4, #0x88]\n"
- "ldr x15, [x4, #0x90]\n"
- "smlal2 v21.4s, v23.8h, v1.8h\n"
- "ldr d27, [x21, x0]\n"
- "smlal v20.4s, v25.4h, v2.4h\n"
+ "smlal v15.4s, v27.4h, v2.4h\n"
+ "ldr x14, [x20, #0x90]\n"
+ "ldr x15, [x20, #0x98]\n"
+ "smlal2 v16.4s, v27.8h, v2.8h\n"
+ "smlal2 v5.4s, v23.8h, v1.8h\n"
+ "ldr d27, [x19, x24]\n"
"usubl v27.8h, v27.8b, v9.8b\n"
- "smlal v8.4s, v23.4h, v2.4h\n"
- "smlal v17.4s, v31.4h, v2.4h\n"
- "ldr x21, [x4, #0x98]\n"
- "ldr x14, [x4, #0xa0]\n"
- "smlal2 v19.4s, v25.8h, v3.8h\n"
- "smlal2 v10.4s, v25.8h, v2.8h\n"
- "ldr d1, [x3, #0x30]\n"
- "usubl v1.8h, v1.8b, v15.8b\n"
+ "smlal v17.4s, v25.4h, v2.4h\n"
+ "smlal v10.4s, v23.4h, v2.4h\n"
+ "ldr d1, [x23, #0x30]\n"
+ "usubl v1.8h, v1.8b, v14.8b\n"
+ "smlal v6.4s, v31.4h, v2.4h\n"
+ "smlal2 v8.4s, v25.8h, v2.8h\n"
+ "ldr x21, [x20, #0xa0]\n"
+ "ldr x2, [x20, #0xa8]\n"
"smlal2 v7.4s, v23.8h, v2.8h\n"
- "smlal v13.4s, v24.4h, v4.4h\n"
- "ldr x13, [x4, #0xa8]\n"
- "ldr x12, [x4, #0xb0]\n"
- "smlal2 v21.4s, v31.8h, v2.8h\n"
- "ldr d25, [x20, x0]\n"
- "smlal v20.4s, v24.4h, v3.4h\n"
+ "smlal v15.4s, v25.4h, v3.4h\n"
+ "ldr x13, [x20, #0xb0]\n"
+ "ldr x9, [x20, #0xb8]\n"
+ "smlal2 v16.4s, v25.8h, v3.8h\n"
+ "smlal2 v5.4s, v31.8h, v2.8h\n"
+ "ldr d25, [x27, x24]\n"
"usubl v25.8h, v25.8b, v9.8b\n"
- "smlal v8.4s, v31.4h, v3.4h\n"
- "smlal v17.4s, v30.4h, v3.4h\n"
- "ldr x20, [x4, #0xb8]\n"
- "ldr x11, [x4, #0xc0]\n"
- "smlal2 v19.4s, v24.8h, v4.8h\n"
- "smlal2 v10.4s, v24.8h, v3.8h\n"
- "ldr d2, [x3, #0x38]\n"
- "usubl v2.8h, v2.8b, v15.8b\n"
+ "smlal v17.4s, v24.4h, v3.4h\n"
+ "smlal v10.4s, v31.4h, v3.4h\n"
+ "ldr d2, [x23, #0x38]\n"
+ "usubl v2.8h, v2.8b, v14.8b\n"
+ "smlal v6.4s, v30.4h, v3.4h\n"
+ "smlal2 v8.4s, v24.8h, v3.8h\n"
+ "ldr x19, [x20, #0xc0]\n"
+ "ldr x28, [x20, #0xc8]\n"
"smlal2 v7.4s, v31.8h, v3.8h\n"
- "smlal v13.4s, v29.4h, v0.4h\n"
- "ldr x22, [x4, #0xc8]\n"
- "ldr x9, [x4, #0xd0]\n"
- "smlal2 v21.4s, v30.8h, v3.8h\n"
- "ldr d24, [x26, x0]\n"
- "smlal v20.4s, v27.4h, v4.4h\n"
+ "smlal v15.4s, v24.4h, v4.4h\n"
+ "ldr x6, [x20, #0xd0]\n"
+ "ldr x27, [x20, #0xd8]\n"
+ "smlal2 v16.4s, v24.8h, v4.8h\n"
+ "smlal2 v5.4s, v30.8h, v3.8h\n"
+ "ldr d24, [x5, x24]\n"
"usubl v24.8h, v24.8b, v9.8b\n"
- "smlal v8.4s, v30.4h, v4.4h\n"
- "smlal v17.4s, v26.4h, v4.4h\n"
- "ldr x28, [x4, #0xd8]\n"
- "ldr x27, [x4, #0xe0]\n"
- "smlal2 v19.4s, v29.8h, v0.8h\n"
- "ldr d3, [x3, #0x40]\n"
- "smlal2 v10.4s, v27.8h, v4.8h\n"
- "ldr d27, [x25, x0]\n"
- "smlal2 v7.4s, v30.8h, v4.8h\n"
- "smlal v13.4s, v28.4h, v1.4h\n"
- "usubl v3.8h, v3.8b, v15.8b\n"
- "ldr x26, [x4, #0xe8]\n"
- "smlal2 v21.4s, v26.8h, v4.8h\n"
- "ldr d4, [x3, #0x48]\n"
- "smlal v20.4s, v28.4h, v0.4h\n"
+ "smlal v17.4s, v27.4h, v4.4h\n"
+ "smlal v10.4s, v30.4h, v4.4h\n"
+ "ldr d3, [x23, #0x40]\n"
+ "usubl v3.8h, v3.8b, v14.8b\n"
+ "smlal v6.4s, v26.4h, v4.4h\n"
+ "smlal2 v8.4s, v27.8h, v4.8h\n"
+ "ldr d27, [x11, x24]\n"
"usubl v27.8h, v27.8b, v9.8b\n"
- "smlal v8.4s, v22.4h, v0.4h\n"
- "smlal v17.4s, v25.4h, v0.4h\n"
- "usubl v4.8h, v4.8b, v15.8b\n"
- "ldr x25, [x4, #0xf0]\n"
- "smlal2 v19.4s, v28.8h, v1.8h\n"
- "smlal2 v10.4s, v28.8h, v0.8h\n"
- "ldr d28, [x24, x0]\n"
- "usubl v28.8h, v28.8b, v9.8b\n"
+ "smlal2 v7.4s, v30.8h, v4.8h\n"
+ "smlal v15.4s, v29.4h, v0.4h\n"
+ "ldr x11, [x20, #0xe0]\n"
+ "ldr x17, [x20, #0xe8]\n"
+ "smlal2 v16.4s, v29.8h, v0.8h\n"
+ "smlal2 v5.4s, v26.8h, v4.8h\n"
+ "ldr d4, [x23, #0x48]\n"
+ "usubl v4.8h, v4.8b, v14.8b\n"
+ "smlal v17.4s, v28.4h, v0.4h\n"
+ "smlal v10.4s, v22.4h, v0.4h\n"
+ "ldr x5, [x20, #0xf0]\n"
+ "ldr x25, [x20, #0xf8]\n"
+ "smlal v6.4s, v25.4h, v0.4h\n"
+ "smlal2 v8.4s, v28.8h, v0.8h\n"
+ "ldr q12, [x10, #0x0]\n"
+ "ldr q19, [x1, #0x0]\n"
"smlal2 v7.4s, v22.8h, v0.8h\n"
- "smlal v13.4s, v23.4h, v2.4h\n"
- "ldr x24, [x4, #0xf8]\n"
- "tst x1, #0x7\n"
- "smlal2 v21.4s, v25.8h, v0.8h\n"
- "ldr d0, [x3, #0x50]\n"
- "smlal v20.4s, v23.4h, v1.4h\n"
- "usubl v0.8h, v0.8b, v15.8b\n"
- "smlal v8.4s, v25.4h, v1.4h\n"
- "smlal v17.4s, v24.4h, v1.4h\n"
- "add x5, x5, #0x20\n"
- "smlal2 v19.4s, v23.8h, v2.8h\n"
- "smlal2 v10.4s, v23.8h, v1.8h\n"
- "ldr d23, [x23, x0]\n"
- "usubl v23.8h, v23.8b, v9.8b\n"
+ "smlal v15.4s, v28.4h, v1.4h\n"
+ "ldr q20, [x10, #0x10]\n"
+ "ldr q29, [x1, #0x10]\n"
+ "smlal2 v16.4s, v28.8h, v1.8h\n"
+ "smlal2 v5.4s, v25.8h, v0.8h\n"
+ "ldr d28, [x26, x24]\n"
+ "ldr d0, [x23, #0x50]\n"
+ "smlal v17.4s, v23.4h, v1.4h\n"
+ "smlal v10.4s, v25.4h, v1.4h\n"
+ "usubl v28.8h, v28.8b, v9.8b\n"
+ "ldr x26, [x20, #0x100]\n"
+ "smlal v6.4s, v24.4h, v1.4h\n"
+ "smlal2 v8.4s, v23.8h, v1.8h\n"
+ "usubl v0.8h, v0.8b, v14.8b\n"
+ "tst x0, #0x7\n"
"smlal2 v7.4s, v25.8h, v1.8h\n"
- "smlal v13.4s, v31.4h, v3.4h\n"
- "ldr x23, [x4, #0x100]\n"
- "smlal2 v21.4s, v24.8h, v1.8h\n"
- "ldr d1, [x3, #0x58]\n"
- "smlal v20.4s, v31.4h, v2.4h\n"
- "usubl v1.8h, v1.8b, v15.8b\n"
- "smlal v8.4s, v24.4h, v2.4h\n"
- "smlal v17.4s, v27.4h, v2.4h\n"
- "smlal2 v19.4s, v31.8h, v3.8h\n"
- "smlal2 v10.4s, v31.8h, v2.8h\n"
- "ldr d31, [x15, x0]\n"
- "usubl v31.8h, v31.8b, v9.8b\n"
+ "smlal v15.4s, v23.4h, v2.4h\n"
+ "add x10, x10, #0x20\n"
+ "add x1, x1, #0x20\n"
+ "smlal2 v16.4s, v23.8h, v2.8h\n"
+ "ldr d23, [x12, x24]\n"
+ "smlal2 v5.4s, v24.8h, v1.8h\n"
+ "usubl v23.8h, v23.8b, v9.8b\n"
+ "smlal v17.4s, v31.4h, v2.4h\n"
+ "smlal v10.4s, v24.4h, v2.4h\n"
+ "ldr d1, [x23, #0x58]\n"
+ "usubl v1.8h, v1.8b, v14.8b\n"
+ "smlal v6.4s, v27.4h, v2.4h\n"
+ "smlal2 v8.4s, v31.8h, v2.8h\n"
+ "ldr x12, [x20, #0x108]\n"
"smlal2 v7.4s, v24.8h, v2.8h\n"
- "smlal v13.4s, v30.4h, v4.4h\n"
- "ldr x15, [x4, #0x108]\n"
- "smlal2 v21.4s, v27.8h, v2.8h\n"
- "ldr d2, [x3, #0x60]\n"
- "smlal v20.4s, v30.4h, v3.4h\n"
- "usubl v2.8h, v2.8b, v15.8b\n"
- "smlal v8.4s, v27.4h, v3.4h\n"
- "smlal v17.4s, v23.4h, v3.4h\n"
- "smlal2 v19.4s, v30.8h, v4.8h\n"
- "smlal2 v10.4s, v30.8h, v3.8h\n"
- "ldr d30, [x21, x0]\n"
- "usubl v30.8h, v30.8b, v9.8b\n"
+ "smlal v15.4s, v31.4h, v3.4h\n"
+ "smlal2 v16.4s, v31.8h, v3.8h\n"
+ "smlal2 v5.4s, v27.8h, v2.8h\n"
+ "ldr d31, [x14, x24]\n"
+ "usubl v31.8h, v31.8b, v9.8b\n"
+ "smlal v17.4s, v30.4h, v3.4h\n"
+ "smlal v10.4s, v27.4h, v3.4h\n"
+ "ldr d2, [x23, #0x60]\n"
+ "usubl v2.8h, v2.8b, v14.8b\n"
+ "smlal v6.4s, v23.4h, v3.4h\n"
+ "smlal2 v8.4s, v30.8h, v3.8h\n"
+ "ldr x14, [x20, #0x110]\n"
"smlal2 v7.4s, v27.8h, v3.8h\n"
- "smlal v13.4s, v22.4h, v0.4h\n"
- "ldr x21, [x4, #0x110]\n"
- "smlal2 v21.4s, v23.8h, v3.8h\n"
- "ldr d3, [x3, #0x68]\n"
- "smlal v20.4s, v26.4h, v4.4h\n"
- "usubl v3.8h, v3.8b, v15.8b\n"
- "smlal v8.4s, v23.4h, v4.4h\n"
- "smlal v17.4s, v28.4h, v4.4h\n"
- "smlal2 v19.4s, v22.8h, v0.8h\n"
- "ldr d22, [x20, x0]\n"
- "smlal2 v10.4s, v26.8h, v4.8h\n"
- "ldr d26, [x14, x0]\n"
- "smlal2 v7.4s, v23.8h, v4.8h\n"
- "smlal v13.4s, v25.4h, v1.4h\n"
+ "smlal v15.4s, v30.4h, v4.4h\n"
+ "smlal2 v16.4s, v30.8h, v4.8h\n"
+ "ldr d30, [x15, x24]\n"
+ "smlal2 v5.4s, v23.8h, v3.8h\n"
+ "usubl v30.8h, v30.8b, v9.8b\n"
+ "smlal v17.4s, v26.4h, v4.4h\n"
+ "smlal v10.4s, v23.4h, v4.4h\n"
+ "ldr d3, [x23, #0x68]\n"
+ "usubl v3.8h, v3.8b, v14.8b\n"
+ "smlal v6.4s, v28.4h, v4.4h\n"
+ "smlal2 v8.4s, v26.8h, v4.8h\n"
+ "ldr d26, [x21, x24]\n"
"usubl v26.8h, v26.8b, v9.8b\n"
- "ldr x20, [x4, #0x118]\n"
- "smlal2 v21.4s, v28.8h, v4.8h\n"
- "ldr d4, [x3, #0x70]\n"
- "smlal v20.4s, v25.4h, v0.4h\n"
- "usubl v4.8h, v4.8b, v15.8b\n"
- "smlal v8.4s, v31.4h, v0.4h\n"
- "smlal v17.4s, v30.4h, v0.4h\n"
+ "smlal2 v7.4s, v23.8h, v4.8h\n"
+ "smlal v15.4s, v22.4h, v0.4h\n"
+ "ldr x21, [x20, #0x118]\n"
+ "smlal2 v16.4s, v22.8h, v0.8h\n"
+ "smlal2 v5.4s, v28.8h, v4.8h\n"
+ "ldr d4, [x23, #0x70]\n"
+ "ldr d22, [x9, x24]\n"
+ "smlal v17.4s, v25.4h, v0.4h\n"
+ "smlal v10.4s, v31.4h, v0.4h\n"
+ "usubl v4.8h, v4.8b, v14.8b\n"
+ "smlal v6.4s, v30.4h, v0.4h\n"
+ "smlal2 v8.4s, v25.8h, v0.8h\n"
"usubl v22.8h, v22.8b, v9.8b\n"
- "smlal2 v19.4s, v25.8h, v1.8h\n"
- "smlal2 v10.4s, v25.8h, v0.8h\n"
- "ldr d25, [x13, x0]\n"
- "usubl v25.8h, v25.8b, v9.8b\n"
"smlal2 v7.4s, v31.8h, v0.8h\n"
- "smlal v13.4s, v24.4h, v2.4h\n"
- "smlal2 v21.4s, v30.8h, v0.8h\n"
- "ldr d0, [x3, #0x78]\n"
- "smlal v20.4s, v24.4h, v1.4h\n"
- "usubl v0.8h, v0.8b, v15.8b\n"
- "smlal v8.4s, v30.4h, v1.4h\n"
- "smlal v17.4s, v26.4h, v1.4h\n"
- "smlal2 v19.4s, v24.8h, v2.8h\n"
- "smlal2 v10.4s, v24.8h, v1.8h\n"
- "ldr d24, [x12, x0]\n"
- "usubl v24.8h, v24.8b, v9.8b\n"
+ "smlal v15.4s, v25.4h, v1.4h\n"
+ "smlal2 v16.4s, v25.8h, v1.8h\n"
+ "ldr d25, [x2, x24]\n"
+ "smlal2 v5.4s, v30.8h, v0.8h\n"
+ "usubl v25.8h, v25.8b, v9.8b\n"
+ "smlal v17.4s, v24.4h, v1.4h\n"
+ "smlal v10.4s, v30.4h, v1.4h\n"
+ "ldr d0, [x23, #0x78]\n"
+ "usubl v0.8h, v0.8b, v14.8b\n"
+ "smlal v6.4s, v26.4h, v1.4h\n"
+ "smlal2 v8.4s, v24.8h, v1.8h\n"
"smlal2 v7.4s, v30.8h, v1.8h\n"
- "smlal v13.4s, v27.4h, v3.4h\n"
- "smlal2 v21.4s, v26.8h, v1.8h\n"
- "ldr d1, [x3, #0x80]\n"
- "smlal v20.4s, v27.4h, v2.4h\n"
- "usubl v1.8h, v1.8b, v15.8b\n"
- "smlal v8.4s, v26.4h, v2.4h\n"
- "smlal v17.4s, v25.4h, v2.4h\n"
- "smlal2 v19.4s, v27.8h, v3.8h\n"
- "smlal2 v10.4s, v27.8h, v2.8h\n"
- "ldr d27, [x11, x0]\n"
- "usubl v27.8h, v27.8b, v9.8b\n"
+ "smlal v15.4s, v24.4h, v2.4h\n"
+ "smlal2 v16.4s, v24.8h, v2.8h\n"
+ "ldr d24, [x13, x24]\n"
+ "smlal2 v5.4s, v26.8h, v1.8h\n"
+ "usubl v24.8h, v24.8b, v9.8b\n"
+ "smlal v17.4s, v27.4h, v2.4h\n"
+ "smlal v10.4s, v26.4h, v2.4h\n"
+ "ldr d1, [x23, #0x80]\n"
+ "usubl v1.8h, v1.8b, v14.8b\n"
+ "smlal v6.4s, v25.4h, v2.4h\n"
+ "smlal2 v8.4s, v27.8h, v2.8h\n"
"smlal2 v7.4s, v26.8h, v2.8h\n"
- "smlal v13.4s, v23.4h, v4.4h\n"
- "smlal2 v21.4s, v25.8h, v2.8h\n"
- "ldr d2, [x3, #0x88]\n"
- "smlal v20.4s, v23.4h, v3.4h\n"
- "usubl v2.8h, v2.8b, v15.8b\n"
- "smlal v8.4s, v25.4h, v3.4h\n"
- "smlal v17.4s, v24.4h, v3.4h\n"
- "smlal2 v19.4s, v23.8h, v4.8h\n"
- "smlal2 v10.4s, v23.8h, v3.8h\n"
- "ldr d23, [x22, x0]\n"
- "usubl v23.8h, v23.8b, v9.8b\n"
+ "smlal v15.4s, v27.4h, v3.4h\n"
+ "smlal2 v16.4s, v27.8h, v3.8h\n"
+ "smlal2 v5.4s, v25.8h, v2.8h\n"
+ "ldr d27, [x19, x24]\n"
+ "usubl v27.8h, v27.8b, v9.8b\n"
+ "smlal v17.4s, v23.4h, v3.4h\n"
+ "smlal v10.4s, v25.4h, v3.4h\n"
+ "ldr d2, [x23, #0x88]\n"
+ "usubl v2.8h, v2.8b, v14.8b\n"
+ "smlal v6.4s, v24.4h, v3.4h\n"
+ "smlal2 v8.4s, v23.8h, v3.8h\n"
"smlal2 v7.4s, v25.8h, v3.8h\n"
- "smlal v13.4s, v31.4h, v0.4h\n"
- "smlal2 v21.4s, v24.8h, v3.8h\n"
- "ldr d3, [x3, #0x90]\n"
- "smlal v20.4s, v28.4h, v4.4h\n"
- "usubl v3.8h, v3.8b, v15.8b\n"
- "smlal v8.4s, v24.4h, v4.4h\n"
- "smlal v17.4s, v22.4h, v4.4h\n"
- "smlal2 v19.4s, v31.8h, v0.8h\n"
- "ldr d31, [x9, x0]\n"
- "smlal2 v10.4s, v28.8h, v4.8h\n"
- "ldr d28, [x27, x0]\n"
+ "smlal v15.4s, v23.4h, v4.4h\n"
+ "smlal2 v16.4s, v23.8h, v4.8h\n"
+ "ldr d23, [x28, x24]\n"
+ "smlal2 v5.4s, v24.8h, v3.8h\n"
+ "usubl v23.8h, v23.8b, v9.8b\n"
+ "smlal v17.4s, v28.4h, v4.4h\n"
+ "smlal v10.4s, v24.4h, v4.4h\n"
+ "ldr d3, [x23, #0x90]\n"
+ "usubl v3.8h, v3.8b, v14.8b\n"
+ "smlal v6.4s, v22.4h, v4.4h\n"
+ "smlal2 v8.4s, v28.8h, v4.8h\n"
+ "ldr d28, [x11, x24]\n"
+ "usubl v28.8h, v28.8b, v9.8b\n"
"smlal2 v7.4s, v24.8h, v4.8h\n"
- "smlal v13.4s, v30.4h, v1.4h\n"
+ "smlal v15.4s, v31.4h, v0.4h\n"
+ "smlal2 v16.4s, v31.8h, v0.8h\n"
+ "ldr d31, [x6, x24]\n"
+ "smlal2 v5.4s, v22.8h, v4.8h\n"
"usubl v31.8h, v31.8b, v9.8b\n"
- "smlal2 v21.4s, v22.8h, v4.8h\n"
- "ldr d4, [x3, #0x98]\n"
- "smlal v20.4s, v30.4h, v0.4h\n"
- "usubl v4.8h, v4.8b, v15.8b\n"
- "smlal v8.4s, v27.4h, v0.4h\n"
- "smlal v17.4s, v23.4h, v0.4h\n"
- "usubl v28.8h, v28.8b, v9.8b\n"
- "smlal2 v19.4s, v30.8h, v1.8h\n"
- "smlal2 v10.4s, v30.8h, v0.8h\n"
- "ldr d30, [x28, x0]\n"
- "usubl v30.8h, v30.8b, v9.8b\n"
+ "smlal v17.4s, v30.4h, v0.4h\n"
+ "smlal v10.4s, v27.4h, v0.4h\n"
+ "ldr d4, [x23, #0x98]\n"
+ "usubl v4.8h, v4.8b, v14.8b\n"
+ "smlal v6.4s, v23.4h, v0.4h\n"
+ "smlal2 v8.4s, v30.8h, v0.8h\n"
"smlal2 v7.4s, v27.8h, v0.8h\n"
- "smlal v13.4s, v26.4h, v2.4h\n"
- "smlal2 v21.4s, v23.8h, v0.8h\n"
- "ldr d0, [x3, #0xa0]\n"
- "smlal v20.4s, v26.4h, v1.4h\n"
- "usubl v0.8h, v0.8b, v15.8b\n"
- "smlal v8.4s, v23.4h, v1.4h\n"
- "smlal v17.4s, v31.4h, v1.4h\n"
- "smlal2 v19.4s, v26.8h, v2.8h\n"
- "smlal2 v10.4s, v26.8h, v1.8h\n"
- "ldr d26, [x26, x0]\n"
- "usubl v26.8h, v26.8b, v9.8b\n"
+ "smlal v15.4s, v30.4h, v1.4h\n"
+ "smlal2 v16.4s, v30.8h, v1.8h\n"
+ "ldr d30, [x27, x24]\n"
+ "smlal2 v5.4s, v23.8h, v0.8h\n"
+ "usubl v30.8h, v30.8b, v9.8b\n"
+ "smlal v17.4s, v26.4h, v1.4h\n"
+ "smlal v10.4s, v23.4h, v1.4h\n"
+ "ldr d0, [x23, #0xa0]\n"
+ "usubl v0.8h, v0.8b, v14.8b\n"
+ "smlal v6.4s, v31.4h, v1.4h\n"
+ "smlal2 v8.4s, v26.8h, v1.8h\n"
"smlal2 v7.4s, v23.8h, v1.8h\n"
- "smlal v13.4s, v25.4h, v3.4h\n"
- "smlal2 v21.4s, v31.8h, v1.8h\n"
- "ldr d1, [x3, #0xa8]\n"
- "smlal v20.4s, v25.4h, v2.4h\n"
- "usubl v1.8h, v1.8b, v15.8b\n"
- "smlal v8.4s, v31.4h, v2.4h\n"
- "smlal v17.4s, v30.4h, v2.4h\n"
- "smlal2 v19.4s, v25.8h, v3.8h\n"
- "smlal2 v10.4s, v25.8h, v2.8h\n"
- "ldr d25, [x25, x0]\n"
- "usubl v25.8h, v25.8b, v9.8b\n"
+ "smlal v15.4s, v26.4h, v2.4h\n"
+ "smlal2 v16.4s, v26.8h, v2.8h\n"
+ "smlal2 v5.4s, v31.8h, v1.8h\n"
+ "ldr d26, [x17, x24]\n"
+ "usubl v26.8h, v26.8b, v9.8b\n"
+ "smlal v17.4s, v25.4h, v2.4h\n"
+ "smlal v10.4s, v31.4h, v2.4h\n"
+ "ldr d1, [x23, #0xa8]\n"
+ "usubl v1.8h, v1.8b, v14.8b\n"
+ "smlal v6.4s, v30.4h, v2.4h\n"
+ "smlal2 v8.4s, v25.8h, v2.8h\n"
"smlal2 v7.4s, v31.8h, v2.8h\n"
- "smlal v13.4s, v24.4h, v4.4h\n"
- "smlal2 v21.4s, v30.8h, v2.8h\n"
- "ldr d2, [x3, #0xb0]\n"
- "smlal v20.4s, v24.4h, v3.4h\n"
- "usubl v2.8h, v2.8b, v15.8b\n"
- "smlal v8.4s, v30.4h, v3.4h\n"
- "smlal v17.4s, v28.4h, v3.4h\n"
- "smlal2 v19.4s, v24.8h, v4.8h\n"
- "smlal2 v10.4s, v24.8h, v3.8h\n"
- "ldr d24, [x24, x0]\n"
- "usubl v24.8h, v24.8b, v9.8b\n"
+ "smlal v15.4s, v25.4h, v3.4h\n"
+ "smlal2 v16.4s, v25.8h, v3.8h\n"
+ "smlal2 v5.4s, v30.8h, v2.8h\n"
+ "ldr d25, [x5, x24]\n"
+ "usubl v25.8h, v25.8b, v9.8b\n"
+ "smlal v17.4s, v24.4h, v3.4h\n"
+ "smlal v10.4s, v30.4h, v3.4h\n"
+ "ldr d2, [x23, #0xb0]\n"
+ "usubl v2.8h, v2.8b, v14.8b\n"
+ "smlal v6.4s, v28.4h, v3.4h\n"
+ "smlal2 v8.4s, v24.8h, v3.8h\n"
"smlal2 v7.4s, v30.8h, v3.8h\n"
- "smlal v13.4s, v27.4h, v0.4h\n"
- "smlal2 v21.4s, v28.8h, v3.8h\n"
- "ldr d3, [x3, #0xb8]\n"
- "smlal v20.4s, v22.4h, v4.4h\n"
- "usubl v3.8h, v3.8b, v15.8b\n"
- "smlal v8.4s, v28.4h, v4.4h\n"
- "smlal v17.4s, v26.4h, v4.4h\n"
- "smlal2 v19.4s, v27.8h, v0.8h\n"
- "ldr d27, [x23, x0]\n"
+ "smlal v15.4s, v24.4h, v4.4h\n"
+ "smlal2 v16.4s, v24.8h, v4.8h\n"
+ "ldr d24, [x25, x24]\n"
+ "smlal2 v5.4s, v28.8h, v3.8h\n"
+ "usubl v24.8h, v24.8b, v9.8b\n"
+ "smlal v17.4s, v22.4h, v4.4h\n"
+ "smlal v10.4s, v28.4h, v4.4h\n"
+ "ldr d3, [x23, #0xb8]\n"
+ "usubl v3.8h, v3.8b, v14.8b\n"
+ "smlal v6.4s, v26.4h, v4.4h\n"
"smlal2 v7.4s, v28.8h, v4.8h\n"
+ "smlal v15.4s, v27.4h, v0.4h\n"
+ "smlal2 v16.4s, v27.8h, v0.8h\n"
+ "ldr d27, [x26, x24]\n"
"usubl v27.8h, v27.8b, v9.8b\n"
- "smlal v13.4s, v23.4h, v1.4h\n"
- "smlal2 v10.4s, v22.8h, v4.8h\n"
- "ldr q22, [x8, #0x10]\n"
- "add x8, x8, #0x20\n"
- "smlal2 v21.4s, v26.8h, v4.8h\n"
- "ldr d4, [x3, #0xc0]\n"
- "smlal v20.4s, v23.4h, v0.4h\n"
- "usubl v4.8h, v4.8b, v15.8b\n"
- "smlal v8.4s, v25.4h, v0.4h\n"
- "smlal v17.4s, v24.4h, v0.4h\n"
- "smlal2 v19.4s, v23.8h, v1.8h\n"
+ "smlal2 v8.4s, v22.8h, v4.8h\n"
+ "smlal2 v5.4s, v26.8h, v4.8h\n"
+ "ldr d4, [x23, #0xc0]\n"
+ "usubl v4.8h, v4.8b, v14.8b\n"
+ "smlal v17.4s, v23.4h, v0.4h\n"
+ "smlal v10.4s, v25.4h, v0.4h\n"
+ "smlal v6.4s, v24.4h, v0.4h\n"
"smlal2 v7.4s, v25.8h, v0.8h\n"
- "ldr d25, [x15, x0]\n"
+ "ldr d25, [x12, x24]\n"
"usubl v25.8h, v25.8b, v9.8b\n"
- "smlal v13.4s, v31.4h, v2.4h\n"
- "smlal2 v10.4s, v23.8h, v0.8h\n"
- "smlal2 v21.4s, v24.8h, v0.8h\n"
- "smlal v20.4s, v31.4h, v1.4h\n"
- "smlal v8.4s, v24.4h, v1.4h\n"
- "smlal v17.4s, v27.4h, v1.4h\n"
- "smlal2 v19.4s, v31.8h, v2.8h\n"
+ "smlal2 v8.4s, v23.8h, v0.8h\n"
+ "smlal2 v5.4s, v24.8h, v0.8h\n"
+ "smlal v15.4s, v23.4h, v1.4h\n"
+ "smlal v17.4s, v31.4h, v1.4h\n"
+ "smlal v10.4s, v24.4h, v1.4h\n"
+ "smlal v6.4s, v27.4h, v1.4h\n"
"smlal2 v7.4s, v24.8h, v1.8h\n"
- "ldr d24, [x21, x0]\n"
+ "ldr d24, [x14, x24]\n"
+ "smlal2 v16.4s, v23.8h, v1.8h\n"
"usubl v24.8h, v24.8b, v9.8b\n"
- "smlal v13.4s, v30.4h, v3.4h\n"
- "smlal2 v10.4s, v31.8h, v1.8h\n"
- "smlal2 v21.4s, v27.8h, v1.8h\n"
- "smlal v20.4s, v30.4h, v2.4h\n"
- "smlal v8.4s, v27.4h, v2.4h\n"
- "smlal v17.4s, v25.4h, v2.4h\n"
- "smlal2 v19.4s, v30.8h, v3.8h\n"
+ "smlal2 v8.4s, v31.8h, v1.8h\n"
+ "smlal2 v5.4s, v27.8h, v1.8h\n"
+ "smlal v15.4s, v31.4h, v2.4h\n"
+ "smlal v17.4s, v30.4h, v2.4h\n"
+ "smlal v10.4s, v27.4h, v2.4h\n"
+ "smlal v6.4s, v25.4h, v2.4h\n"
"smlal2 v7.4s, v27.8h, v2.8h\n"
- "ldr d27, [x20, x0]\n"
+ "ldr d27, [x21, x24]\n"
+ "smlal2 v16.4s, v31.8h, v2.8h\n"
"usubl v27.8h, v27.8b, v9.8b\n"
- "smlal v13.4s, v28.4h, v4.4h\n"
- "smlal2 v10.4s, v30.8h, v2.8h\n"
- "sqrdmulh v13.4s, v13.4s, v18.4s\n"
- "add x0, x0, #0x8\n"
- "smlal2 v21.4s, v25.8h, v2.8h\n"
- "smlal v20.4s, v28.4h, v3.4h\n"
- "and v30.16b, v13.16b, v6.16b\n"
- "smlal v8.4s, v25.4h, v3.4h\n"
- "smlal v17.4s, v24.4h, v3.4h\n"
- "sshr v30.4s, v30.4s, #0x1f\n"
- "smlal2 v19.4s, v28.8h, v4.8h\n"
- "smlal2 v10.4s, v28.8h, v3.8h\n"
- "sqrdmulh v19.4s, v19.4s, v5.4s\n"
+ "smlal2 v8.4s, v30.8h, v2.8h\n"
+ "smlal2 v5.4s, v25.8h, v2.8h\n"
+ "add x24, x24, #0x8\n"
+ "smlal v15.4s, v30.4h, v3.4h\n"
+ "smlal v17.4s, v28.4h, v3.4h\n"
+ "smlal v10.4s, v25.4h, v3.4h\n"
+ "smlal v6.4s, v24.4h, v3.4h\n"
+ "smlal2 v16.4s, v30.8h, v3.8h\n"
+ "smlal2 v8.4s, v28.8h, v3.8h\n"
"smlal2 v7.4s, v25.8h, v3.8h\n"
- "smlal2 v21.4s, v24.8h, v3.8h\n"
- "and v16.16b, v19.16b, v22.16b\n"
- "smlal v20.4s, v26.4h, v4.4h\n"
- "smlal v8.4s, v24.4h, v4.4h\n"
- "sqrdmulh v20.4s, v20.4s, v18.4s\n"
- "smlal v17.4s, v27.4h, v4.4h\n"
- "smlal2 v10.4s, v26.8h, v4.8h\n"
- "sqrdmulh v8.4s, v8.4s, v18.4s\n"
+ "smlal2 v5.4s, v24.8h, v3.8h\n"
+ "smlal v15.4s, v28.4h, v4.4h\n"
+ "smlal v17.4s, v26.4h, v4.4h\n"
+ "sqrdmulh v15.4s, v15.4s, v12.4s\n"
+ "smlal v10.4s, v24.4h, v4.4h\n"
+ "smlal v6.4s, v27.4h, v4.4h\n"
+ "sqrdmulh v17.4s, v17.4s, v12.4s\n"
+ "smlal2 v16.4s, v28.8h, v4.8h\n"
+ "smlal2 v8.4s, v26.8h, v4.8h\n"
+ "sqrdmulh v10.4s, v10.4s, v12.4s\n"
"smlal2 v7.4s, v24.8h, v4.8h\n"
- "smlal2 v21.4s, v27.8h, v4.8h\n"
- "sqrdmulh v17.4s, v17.4s, v18.4s\n"
- "sqadd v13.4s, v13.4s, v30.4s\n"
- "sshr v16.4s, v16.4s, #0x1f\n"
- "and v0.16b, v20.16b, v6.16b\n"
- "sqrdmulh v10.4s, v10.4s, v5.4s\n"
- "and v18.16b, v8.16b, v6.16b\n"
- "sqrdmulh v7.4s, v7.4s, v5.4s\n"
- "and v30.16b, v17.16b, v6.16b\n"
- "sqrdmulh v21.4s, v21.4s, v5.4s\n"
- "sqadd v19.4s, v19.4s, v16.4s\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "and v26.16b, v10.16b, v22.16b\n"
- "sshr v18.4s, v18.4s, #0x1f\n"
- "and v23.16b, v7.16b, v22.16b\n"
- "sshr v30.4s, v30.4s, #0x1f\n"
- "and v16.16b, v21.16b, v22.16b\n"
- "sqadd v20.4s, v20.4s, v0.4s\n"
- "sshr v26.4s, v26.4s, #0x1f\n"
- "sqadd v8.4s, v8.4s, v18.4s\n"
+ "smlal2 v5.4s, v27.8h, v4.8h\n"
+ "sqrdmulh v6.4s, v6.4s, v12.4s\n"
+ "and v23.16b, v15.16b, v19.16b\n"
+ "sqrdmulh v16.4s, v16.4s, v20.4s\n"
+ "and v22.16b, v17.16b, v19.16b\n"
+ "sqrdmulh v8.4s, v8.4s, v20.4s\n"
+ "and v21.16b, v10.16b, v19.16b\n"
+ "sqrdmulh v7.4s, v7.4s, v20.4s\n"
+ "and v26.16b, v6.16b, v19.16b\n"
+ "sqrdmulh v5.4s, v5.4s, v20.4s\n"
"sshr v23.4s, v23.4s, #0x1f\n"
- "sqadd v17.4s, v17.4s, v30.4s\n"
- "sshr v16.4s, v16.4s, #0x1f\n"
- "srshl v13.4s, v13.4s, v6.4s\n"
- "srshl v20.4s, v20.4s, v6.4s\n"
- "sqadd v10.4s, v10.4s, v26.4s\n"
- "srshl v8.4s, v8.4s, v6.4s\n"
- "sqadd v7.4s, v7.4s, v23.4s\n"
- "srshl v17.4s, v17.4s, v6.4s\n"
- "sqadd v21.4s, v21.4s, v16.4s\n"
- "srshl v19.4s, v19.4s, v22.4s\n"
- "sqxtn v13.4h, v13.4s\n"
- "srshl v10.4s, v10.4s, v22.4s\n"
- "sqxtn v20.4h, v20.4s\n"
- "srshl v7.4s, v7.4s, v22.4s\n"
- "sqxtn v8.4h, v8.4s\n"
- "srshl v21.4s, v21.4s, v22.4s\n"
+ "and v4.16b, v16.16b, v29.16b\n"
+ "sshr v22.4s, v22.4s, #0x1f\n"
+ "and v2.16b, v8.16b, v29.16b\n"
+ "sshr v21.4s, v21.4s, #0x1f\n"
+ "and v3.16b, v7.16b, v29.16b\n"
+ "sshr v26.4s, v26.4s, #0x1f\n"
+ "and v25.16b, v5.16b, v29.16b\n"
+ "sqadd v15.4s, v15.4s, v23.4s\n"
+ "sshr v4.4s, v4.4s, #0x1f\n"
+ "sqadd v17.4s, v17.4s, v22.4s\n"
+ "sshr v2.4s, v2.4s, #0x1f\n"
+ "sqadd v10.4s, v10.4s, v21.4s\n"
+ "sshr v3.4s, v3.4s, #0x1f\n"
+ "sqadd v6.4s, v6.4s, v26.4s\n"
+ "sshr v25.4s, v25.4s, #0x1f\n"
+ "srshl v15.4s, v15.4s, v19.4s\n"
+ "sqadd v16.4s, v16.4s, v4.4s\n"
+ "srshl v17.4s, v17.4s, v19.4s\n"
+ "sqadd v8.4s, v8.4s, v2.4s\n"
+ "srshl v10.4s, v10.4s, v19.4s\n"
+ "sqadd v7.4s, v7.4s, v3.4s\n"
+ "srshl v6.4s, v6.4s, v19.4s\n"
+ "sqadd v5.4s, v5.4s, v25.4s\n"
+ "srshl v16.4s, v16.4s, v29.4s\n"
+ "sqxtn v15.4h, v15.4s\n"
+ "srshl v8.4s, v8.4s, v29.4s\n"
"sqxtn v17.4h, v17.4s\n"
- "sqxtn2 v13.8h, v19.4s\n"
- "sqxtn2 v20.8h, v10.4s\n"
- "sqxtn2 v8.8h, v7.4s\n"
- "sqxtn2 v17.8h, v21.4s\n"
- "sqadd v13.8h, v13.8h, v14.8h\n"
- "sqadd v20.8h, v20.8h, v14.8h\n"
- "sqadd v8.8h, v8.8h, v14.8h\n"
- "sqadd v17.8h, v17.8h, v14.8h\n"
- "smax v13.8h, v13.8h, v12.8h\n"
- "smax v20.8h, v20.8h, v12.8h\n"
- "smax v8.8h, v8.8h, v12.8h\n"
- "smax v17.8h, v17.8h, v12.8h\n"
- "smin v13.8h, v13.8h, v11.8h\n"
- "smin v20.8h, v20.8h, v11.8h\n"
- "smin v8.8h, v8.8h, v11.8h\n"
- "smin v17.8h, v17.8h, v11.8h\n"
- "uzp1 v13.16b, v13.16b, v13.16b\n"
- "uzp1 v20.16b, v20.16b, v20.16b\n"
- "str d13, [x17, x10]\n"
- "uzp1 v8.16b, v8.16b, v8.16b\n"
+ "srshl v7.4s, v7.4s, v29.4s\n"
+ "sqxtn v10.4h, v10.4s\n"
+ "srshl v5.4s, v5.4s, v29.4s\n"
+ "sqxtn v6.4h, v6.4s\n"
+ "sqxtn2 v15.8h, v16.4s\n"
+ "sqxtn2 v17.8h, v8.4s\n"
+ "sqxtn2 v10.8h, v7.4s\n"
+ "sqxtn2 v6.8h, v5.4s\n"
+ "sqadd v15.8h, v15.8h, v18.8h\n"
+ "sqadd v17.8h, v17.8h, v18.8h\n"
+ "sqadd v10.8h, v10.8h, v18.8h\n"
+ "sqadd v6.8h, v6.8h, v18.8h\n"
+ "smax v15.8h, v15.8h, v11.8h\n"
+ "smax v17.8h, v17.8h, v11.8h\n"
+ "smax v10.8h, v10.8h, v11.8h\n"
+ "smax v6.8h, v6.8h, v11.8h\n"
+ "smin v15.8h, v15.8h, v13.8h\n"
+ "smin v17.8h, v17.8h, v13.8h\n"
+ "smin v10.8h, v10.8h, v13.8h\n"
+ "smin v6.8h, v6.8h, v13.8h\n"
+ "uzp1 v15.16b, v15.16b, v15.16b\n"
"uzp1 v17.16b, v17.16b, v17.16b\n"
- "str d20, [x6, x10]\n"
- "str d8, [x7, x10]\n"
- "str d17, [x16, x10]\n"
- "add x10, x10, #0x8\n"
+ "str d15, [x16, x22]\n"
+ "uzp1 v10.16b, v10.16b, v10.16b\n"
+ "uzp1 v6.16b, v6.16b, v6.16b\n"
+ "str d17, [x8, x22]\n"
+ "str d10, [x4, x22]\n"
+ "str d6, [x7, x22]\n"
+ "add x22, x22, #0x8\n"
"beq 124f\n"
- "add x3, x3, #0xc8\n"
+ "add x23, x23, #0xc8\n"
"3:" // Oddments
- "ldr x13, [%x[params], %[offsetof_Params_bias]]\n"
- "tbz x1, #2, 5f\n"
- "ld1 { v13.4s }, [x13], #0x10\n"
- "tbz x1, #1, 4f\n"
- "ld1 { v19.d }[0], [x13], #0x8\n"
- "tbz x1, #0, 7f\n"
- "ld1 { v19.s }[2], [x13]\n"
+ "ldr x19, [%x[params], %[offsetof_Params_bias]]\n"
+ "tbz x0, #2, 5f\n"
+ "ld1 { v15.4s }, [x19], #0x10\n"
+ "tbz x0, #1, 4f\n"
+ "ld1 { v16.d }[0], [x19], #0x8\n"
+ "tbz x0, #0, 7f\n"
+ "ld1 { v16.s }[2], [x19]\n"
"b 7f\n"
"4:" // Oddments: Load bias: Bit 2: Bit 1: Unset
- "tbz x1, #0, 7f\n"
- "ld1 { v19.s }[0], [x13]\n"
+ "tbz x0, #0, 7f\n"
+ "ld1 { v16.s }[0], [x19]\n"
"b 7f\n"
"5:" // Oddments: Load bias: Bit 2: Unset
- "tbz x1, #1, 6f\n"
- "ld1 { v13.d }[0], [x13], #0x8\n"
- "tbz x1, #0, 7f\n"
- "ld1 { v13.s }[2], [x13]\n"
+ "tbz x0, #1, 6f\n"
+ "ld1 { v15.d }[0], [x19], #0x8\n"
+ "tbz x0, #0, 7f\n"
+ "ld1 { v15.s }[2], [x19]\n"
"b 7f\n"
"6:" // Oddments: Load bias: Bit 2: Unset: Bit 1: Unset
- "tbz x1, #0, 7f\n"
- "ld1 { v13.s }[0], [x13]\n"
+ "tbz x0, #0, 7f\n"
+ "ld1 { v15.s }[0], [x19]\n"
"7:" // Oddments: Load bias: Bit 2: End
- "ldr d0, [x3, #0x0]\n"
- "ldr d1, [x3, #0x8]\n"
- "mov v20.16b, v13.16b\n"
- "mov v10.16b, v19.16b\n"
- "ldr d2, [x3, #0x10]\n"
- "ldr d3, [x3, #0x18]\n"
- "mov v8.16b, v13.16b\n"
- "mov v7.16b, v19.16b\n"
- "ldr d4, [x3, #0x20]\n"
- "ldp x9, x28, [x4, #0x0]\n"
- "mov v17.16b, v13.16b\n"
- "mov v21.16b, v19.16b\n"
- "ldp x27, x26, [x4, #0x10]\n"
- "ldp x25, x24, [x4, #0x20]\n"
- "usubl v0.8h, v0.8b, v15.8b\n"
- "usubl v1.8h, v1.8b, v15.8b\n"
- "ldp x23, x22, [x4, #0x30]\n"
- "ldp x21, x20, [x4, #0x40]\n"
- "usubl v2.8h, v2.8b, v15.8b\n"
- "usubl v3.8h, v3.8b, v15.8b\n"
- "usubl v4.8h, v4.8b, v15.8b\n"
- "add x9, x9, x0\n"
- "add x28, x28, x0\n"
- "add x27, x27, x0\n"
- "add x26, x26, x0\n"
- "add x25, x25, x0\n"
- "add x24, x24, x0\n"
- "add x23, x23, x0\n"
- "add x22, x22, x0\n"
- "add x21, x21, x0\n"
- "add x20, x20, x0\n"
- "tbz x1, #2, 9f\n"
- "ld1 { v31.s }[0], [x9], #0x4\n"
- "ld1 { v30.s }[0], [x28], #0x4\n"
- "ld1 { v29.s }[0], [x27], #0x4\n"
- "ld1 { v28.s }[0], [x26], #0x4\n"
- "ld1 { v27.s }[0], [x25], #0x4\n"
- "ld1 { v23.s }[0], [x24], #0x4\n"
- "ld1 { v25.s }[0], [x23], #0x4\n"
- "ld1 { v24.s }[0], [x22], #0x4\n"
- "ld1 { v26.s }[0], [x21], #0x4\n"
- "ld1 { v22.s }[0], [x20], #0x4\n"
- "tbz x1, #1, 8f\n"
- "ld1 { v31.h }[2], [x9], #0x2\n"
- "ld1 { v30.h }[2], [x28], #0x2\n"
- "ld1 { v29.h }[2], [x27], #0x2\n"
- "ld1 { v28.h }[2], [x26], #0x2\n"
- "ld1 { v27.h }[2], [x25], #0x2\n"
- "ld1 { v23.h }[2], [x24], #0x2\n"
- "ld1 { v25.h }[2], [x23], #0x2\n"
- "ld1 { v24.h }[2], [x22], #0x2\n"
- "ld1 { v26.h }[2], [x21], #0x2\n"
- "ld1 { v22.h }[2], [x20], #0x2\n"
- "tbz x1, #0, 11f\n"
- "ld1 { v31.b }[6], [x9]\n"
- "ld1 { v30.b }[6], [x28]\n"
- "ld1 { v29.b }[6], [x27]\n"
- "ld1 { v28.b }[6], [x26]\n"
- "ld1 { v27.b }[6], [x25]\n"
- "ld1 { v23.b }[6], [x24]\n"
- "ld1 { v25.b }[6], [x23]\n"
- "ld1 { v24.b }[6], [x22]\n"
- "ld1 { v26.b }[6], [x21]\n"
- "ld1 { v22.b }[6], [x20]\n"
+ "ldr d0, [x23, #0x0]\n"
+ "ldr d1, [x23, #0x8]\n"
+ "mov v17.16b, v15.16b\n"
+ "mov v8.16b, v16.16b\n"
+ "ldr d2, [x23, #0x10]\n"
+ "ldr d3, [x23, #0x18]\n"
+ "mov v10.16b, v15.16b\n"
+ "mov v7.16b, v16.16b\n"
+ "ldr d4, [x23, #0x20]\n"
+ "ldp x28, x6, [x20, #0x0]\n"
+ "mov v6.16b, v15.16b\n"
+ "mov v5.16b, v16.16b\n"
+ "ldp x26, x25, [x20, #0x10]\n"
+ "ldp x5, x2, [x20, #0x20]\n"
+ "usubl v0.8h, v0.8b, v14.8b\n"
+ "usubl v1.8h, v1.8b, v14.8b\n"
+ "ldp x27, x21, [x20, #0x30]\n"
+ "ldp x12, x19, [x20, #0x40]\n"
+ "usubl v2.8h, v2.8b, v14.8b\n"
+ "usubl v3.8h, v3.8b, v14.8b\n"
+ "usubl v4.8h, v4.8b, v14.8b\n"
+ "add x28, x28, x24\n"
+ "add x6, x6, x24\n"
+ "add x26, x26, x24\n"
+ "add x25, x25, x24\n"
+ "add x5, x5, x24\n"
+ "add x2, x2, x24\n"
+ "add x27, x27, x24\n"
+ "add x21, x21, x24\n"
+ "add x12, x12, x24\n"
+ "add x19, x19, x24\n"
+ "tbz x0, #2, 9f\n"
+ "ld1 { v31.s }[0], [x28], #0x4\n"
+ "ld1 { v30.s }[0], [x6], #0x4\n"
+ "ld1 { v29.s }[0], [x26], #0x4\n"
+ "ld1 { v28.s }[0], [x25], #0x4\n"
+ "ld1 { v27.s }[0], [x5], #0x4\n"
+ "ld1 { v23.s }[0], [x2], #0x4\n"
+ "ld1 { v25.s }[0], [x27], #0x4\n"
+ "ld1 { v24.s }[0], [x21], #0x4\n"
+ "ld1 { v26.s }[0], [x12], #0x4\n"
+ "ld1 { v22.s }[0], [x19], #0x4\n"
+ "tbz x0, #1, 8f\n"
+ "ld1 { v31.h }[2], [x28], #0x2\n"
+ "ld1 { v30.h }[2], [x6], #0x2\n"
+ "ld1 { v29.h }[2], [x26], #0x2\n"
+ "ld1 { v28.h }[2], [x25], #0x2\n"
+ "ld1 { v27.h }[2], [x5], #0x2\n"
+ "ld1 { v23.h }[2], [x2], #0x2\n"
+ "ld1 { v25.h }[2], [x27], #0x2\n"
+ "ld1 { v24.h }[2], [x21], #0x2\n"
+ "ld1 { v26.h }[2], [x12], #0x2\n"
+ "ld1 { v22.h }[2], [x19], #0x2\n"
+ "tbz x0, #0, 11f\n"
+ "ld1 { v31.b }[6], [x28]\n"
+ "ld1 { v30.b }[6], [x6]\n"
+ "ld1 { v29.b }[6], [x26]\n"
+ "ld1 { v28.b }[6], [x25]\n"
+ "ld1 { v27.b }[6], [x5]\n"
+ "ld1 { v23.b }[6], [x2]\n"
+ "ld1 { v25.b }[6], [x27]\n"
+ "ld1 { v24.b }[6], [x21]\n"
+ "ld1 { v26.b }[6], [x12]\n"
+ "ld1 { v22.b }[6], [x19]\n"
"b 11f\n"
"8:" // Oddments: Initial loads: Bit 2: Bit 1: Unset
- "tbz x1, #0, 11f\n"
- "ld1 { v31.b }[4], [x9]\n"
- "ld1 { v30.b }[4], [x28]\n"
- "ld1 { v29.b }[4], [x27]\n"
- "ld1 { v28.b }[4], [x26]\n"
- "ld1 { v27.b }[4], [x25]\n"
- "ld1 { v23.b }[4], [x24]\n"
- "ld1 { v25.b }[4], [x23]\n"
- "ld1 { v24.b }[4], [x22]\n"
- "ld1 { v26.b }[4], [x21]\n"
- "ld1 { v22.b }[4], [x20]\n"
+ "tbz x0, #0, 11f\n"
+ "ld1 { v31.b }[4], [x28]\n"
+ "ld1 { v30.b }[4], [x6]\n"
+ "ld1 { v29.b }[4], [x26]\n"
+ "ld1 { v28.b }[4], [x25]\n"
+ "ld1 { v27.b }[4], [x5]\n"
+ "ld1 { v23.b }[4], [x2]\n"
+ "ld1 { v25.b }[4], [x27]\n"
+ "ld1 { v24.b }[4], [x21]\n"
+ "ld1 { v26.b }[4], [x12]\n"
+ "ld1 { v22.b }[4], [x19]\n"
"b 11f\n"
"9:" // Oddments: Initial loads: Bit 2: Unset
- "tbz x1, #1, 10f\n"
- "ld1 { v31.h }[0], [x9], #0x2\n"
- "ld1 { v30.h }[0], [x28], #0x2\n"
- "ld1 { v29.h }[0], [x27], #0x2\n"
- "ld1 { v28.h }[0], [x26], #0x2\n"
- "ld1 { v27.h }[0], [x25], #0x2\n"
- "ld1 { v23.h }[0], [x24], #0x2\n"
- "ld1 { v25.h }[0], [x23], #0x2\n"
- "ld1 { v24.h }[0], [x22], #0x2\n"
- "ld1 { v26.h }[0], [x21], #0x2\n"
- "ld1 { v22.h }[0], [x20], #0x2\n"
- "tbz x1, #0, 11f\n"
- "ld1 { v31.b }[2], [x9]\n"
- "ld1 { v30.b }[2], [x28]\n"
- "ld1 { v29.b }[2], [x27]\n"
- "ld1 { v28.b }[2], [x26]\n"
- "ld1 { v27.b }[2], [x25]\n"
- "ld1 { v23.b }[2], [x24]\n"
- "ld1 { v25.b }[2], [x23]\n"
- "ld1 { v24.b }[2], [x22]\n"
- "ld1 { v26.b }[2], [x21]\n"
- "ld1 { v22.b }[2], [x20]\n"
+ "tbz x0, #1, 10f\n"
+ "ld1 { v31.h }[0], [x28], #0x2\n"
+ "ld1 { v30.h }[0], [x6], #0x2\n"
+ "ld1 { v29.h }[0], [x26], #0x2\n"
+ "ld1 { v28.h }[0], [x25], #0x2\n"
+ "ld1 { v27.h }[0], [x5], #0x2\n"
+ "ld1 { v23.h }[0], [x2], #0x2\n"
+ "ld1 { v25.h }[0], [x27], #0x2\n"
+ "ld1 { v24.h }[0], [x21], #0x2\n"
+ "ld1 { v26.h }[0], [x12], #0x2\n"
+ "ld1 { v22.h }[0], [x19], #0x2\n"
+ "tbz x0, #0, 11f\n"
+ "ld1 { v31.b }[2], [x28]\n"
+ "ld1 { v30.b }[2], [x6]\n"
+ "ld1 { v29.b }[2], [x26]\n"
+ "ld1 { v28.b }[2], [x25]\n"
+ "ld1 { v27.b }[2], [x5]\n"
+ "ld1 { v23.b }[2], [x2]\n"
+ "ld1 { v25.b }[2], [x27]\n"
+ "ld1 { v24.b }[2], [x21]\n"
+ "ld1 { v26.b }[2], [x12]\n"
+ "ld1 { v22.b }[2], [x19]\n"
"b 11f\n"
"10:" // Oddments: Initial loads: Bit 2: Unset: Bit 1: Unset
- "tbz x1, #0, 11f\n"
- "ld1 { v31.b }[0], [x9]\n"
- "ld1 { v30.b }[0], [x28]\n"
- "ld1 { v29.b }[0], [x27]\n"
- "ld1 { v28.b }[0], [x26]\n"
- "ld1 { v27.b }[0], [x25]\n"
- "ld1 { v23.b }[0], [x24]\n"
- "ld1 { v25.b }[0], [x23]\n"
- "ld1 { v24.b }[0], [x22]\n"
- "ld1 { v26.b }[0], [x21]\n"
- "ld1 { v22.b }[0], [x20]\n"
+ "tbz x0, #0, 11f\n"
+ "ld1 { v31.b }[0], [x28]\n"
+ "ld1 { v30.b }[0], [x6]\n"
+ "ld1 { v29.b }[0], [x26]\n"
+ "ld1 { v28.b }[0], [x25]\n"
+ "ld1 { v27.b }[0], [x5]\n"
+ "ld1 { v23.b }[0], [x2]\n"
+ "ld1 { v25.b }[0], [x27]\n"
+ "ld1 { v24.b }[0], [x21]\n"
+ "ld1 { v26.b }[0], [x12]\n"
+ "ld1 { v22.b }[0], [x19]\n"
"11:" // Oddments: Initial loads: Bit 2: End
"usubl v31.8h, v31.8b, v9.8b\n"
"usubl v30.8h, v30.8b, v9.8b\n"
- "smlal v13.4s, v31.4h, v0.4h\n"
- "ldr x20, [x4, #0x50]\n"
+ "smlal v15.4s, v31.4h, v0.4h\n"
+ "ldr x19, [x20, #0x50]\n"
"usubl v29.8h, v29.8b, v9.8b\n"
- "smlal2 v19.4s, v31.8h, v0.8h\n"
- "smlal v20.4s, v30.4h, v0.4h\n"
- "smlal2 v10.4s, v30.8h, v0.8h\n"
- "smlal v8.4s, v29.4h, v0.4h\n"
+ "smlal2 v16.4s, v31.8h, v0.8h\n"
+ "smlal v17.4s, v30.4h, v0.4h\n"
+ "smlal2 v8.4s, v30.8h, v0.8h\n"
+ "smlal v10.4s, v29.4h, v0.4h\n"
"usubl v28.8h, v28.8b, v9.8b\n"
- "add x20, x20, x0\n"
+ "add x19, x19, x24\n"
"smlal2 v7.4s, v29.8h, v0.8h\n"
"usubl v27.8h, v27.8b, v9.8b\n"
- "smlal v17.4s, v28.4h, v0.4h\n"
- "smlal2 v21.4s, v28.8h, v0.8h\n"
- "smlal v13.4s, v30.4h, v1.4h\n"
+ "smlal v6.4s, v28.4h, v0.4h\n"
+ "smlal2 v5.4s, v28.8h, v0.8h\n"
+ "smlal v15.4s, v30.4h, v1.4h\n"
"usubl v23.8h, v23.8b, v9.8b\n"
- "smlal2 v19.4s, v30.8h, v1.8h\n"
- "smlal v20.4s, v27.4h, v1.4h\n"
+ "smlal2 v16.4s, v30.8h, v1.8h\n"
+ "smlal v17.4s, v27.4h, v1.4h\n"
"usubl v25.8h, v25.8b, v9.8b\n"
- "smlal2 v10.4s, v27.8h, v1.8h\n"
- "smlal v8.4s, v28.4h, v1.4h\n"
+ "smlal2 v8.4s, v27.8h, v1.8h\n"
+ "smlal v10.4s, v28.4h, v1.4h\n"
"usubl v24.8h, v24.8b, v9.8b\n"
"smlal2 v7.4s, v28.8h, v1.8h\n"
"usubl v26.8h, v26.8b, v9.8b\n"
- "smlal v17.4s, v23.4h, v1.4h\n"
+ "smlal v6.4s, v23.4h, v1.4h\n"
"usubl v22.8h, v22.8b, v9.8b\n"
- "smlal2 v21.4s, v23.8h, v1.8h\n"
- "smlal v13.4s, v27.4h, v2.4h\n"
- "smlal2 v19.4s, v27.8h, v2.8h\n"
- "smlal v20.4s, v25.4h, v2.4h\n"
- "smlal2 v10.4s, v25.8h, v2.8h\n"
- "smlal v8.4s, v23.4h, v2.4h\n"
+ "smlal2 v5.4s, v23.8h, v1.8h\n"
+ "smlal v15.4s, v27.4h, v2.4h\n"
+ "smlal2 v16.4s, v27.8h, v2.8h\n"
+ "smlal v17.4s, v25.4h, v2.4h\n"
+ "smlal2 v8.4s, v25.8h, v2.8h\n"
+ "smlal v10.4s, v23.4h, v2.4h\n"
"smlal2 v7.4s, v23.8h, v2.8h\n"
- "tbz x1, #2, 13f\n"
- "ld1 { v31.s }[0], [x20], #0x4\n"
- "tbz x1, #1, 12f\n"
- "ld1 { v31.h }[2], [x20], #0x2\n"
- "tbz x1, #0, 15f\n"
- "ld1 { v31.b }[6], [x20]\n"
+ "tbz x0, #2, 13f\n"
+ "ld1 { v31.s }[0], [x19], #0x4\n"
+ "tbz x0, #1, 12f\n"
+ "ld1 { v31.h }[2], [x19], #0x2\n"
+ "tbz x0, #0, 15f\n"
+ "ld1 { v31.b }[6], [x19]\n"
"b 15f\n"
"12:" // Oddments: Load (1, 3): Bit 2: Bit 1: Unset
- "tbz x1, #0, 15f\n"
- "ld1 { v31.b }[4], [x20]\n"
+ "tbz x0, #0, 15f\n"
+ "ld1 { v31.b }[4], [x19]\n"
"b 15f\n"
"13:" // Oddments: Load (1, 3): Bit 2: Unset
- "tbz x1, #1, 14f\n"
- "ld1 { v31.h }[0], [x20], #0x2\n"
- "tbz x1, #0, 15f\n"
- "ld1 { v31.b }[2], [x20]\n"
+ "tbz x0, #1, 14f\n"
+ "ld1 { v31.h }[0], [x19], #0x2\n"
+ "tbz x0, #0, 15f\n"
+ "ld1 { v31.b }[2], [x19]\n"
"b 15f\n"
"14:" // Oddments: Load (1, 3): Bit 2: Unset: Bit 1: Unset
- "tbz x1, #0, 15f\n"
- "ld1 { v31.b }[0], [x20]\n"
+ "tbz x0, #0, 15f\n"
+ "ld1 { v31.b }[0], [x19]\n"
"15:" // Oddments: Load (1, 3): Bit 2: End
"usubl v31.8h, v31.8b, v9.8b\n"
- "ldr x22, [x4, #0x58]\n"
- "smlal v17.4s, v31.4h, v2.4h\n"
- "smlal2 v21.4s, v31.8h, v2.8h\n"
- "smlal v13.4s, v25.4h, v3.4h\n"
- "smlal2 v19.4s, v25.8h, v3.8h\n"
- "add x22, x22, x0\n"
- "smlal v20.4s, v24.4h, v3.4h\n"
- "smlal2 v10.4s, v24.8h, v3.8h\n"
- "smlal v8.4s, v31.4h, v3.4h\n"
+ "ldr x15, [x20, #0x58]\n"
+ "smlal v6.4s, v31.4h, v2.4h\n"
+ "smlal2 v5.4s, v31.8h, v2.8h\n"
+ "smlal v15.4s, v25.4h, v3.4h\n"
+ "smlal2 v16.4s, v25.8h, v3.8h\n"
+ "add x15, x15, x24\n"
+ "smlal v17.4s, v24.4h, v3.4h\n"
+ "smlal2 v8.4s, v24.8h, v3.8h\n"
+ "smlal v10.4s, v31.4h, v3.4h\n"
"smlal2 v7.4s, v31.8h, v3.8h\n"
- "tbz x1, #2, 17f\n"
- "ld1 { v30.s }[0], [x22], #0x4\n"
- "tbz x1, #1, 16f\n"
- "ld1 { v30.h }[2], [x22], #0x2\n"
- "tbz x1, #0, 19f\n"
- "ld1 { v30.b }[6], [x22]\n"
+ "tbz x0, #2, 17f\n"
+ "ld1 { v30.s }[0], [x15], #0x4\n"
+ "tbz x0, #1, 16f\n"
+ "ld1 { v30.h }[2], [x15], #0x2\n"
+ "tbz x0, #0, 19f\n"
+ "ld1 { v30.b }[6], [x15]\n"
"b 19f\n"
"16:" // Oddments: Load (1, 4): Bit 2: Bit 1: Unset
- "tbz x1, #0, 19f\n"
- "ld1 { v30.b }[4], [x22]\n"
+ "tbz x0, #0, 19f\n"
+ "ld1 { v30.b }[4], [x15]\n"
"b 19f\n"
"17:" // Oddments: Load (1, 4): Bit 2: Unset
- "tbz x1, #1, 18f\n"
- "ld1 { v30.h }[0], [x22], #0x2\n"
- "tbz x1, #0, 19f\n"
- "ld1 { v30.b }[2], [x22]\n"
+ "tbz x0, #1, 18f\n"
+ "ld1 { v30.h }[0], [x15], #0x2\n"
+ "tbz x0, #0, 19f\n"
+ "ld1 { v30.b }[2], [x15]\n"
"b 19f\n"
"18:" // Oddments: Load (1, 4): Bit 2: Unset: Bit 1: Unset
- "tbz x1, #0, 19f\n"
- "ld1 { v30.b }[0], [x22]\n"
+ "tbz x0, #0, 19f\n"
+ "ld1 { v30.b }[0], [x15]\n"
"19:" // Oddments: Load (1, 4): Bit 2: End
"usubl v30.8h, v30.8b, v9.8b\n"
- "ldr x21, [x4, #0x60]\n"
- "smlal v17.4s, v30.4h, v3.4h\n"
- "smlal2 v21.4s, v30.8h, v3.8h\n"
- "smlal v13.4s, v24.4h, v4.4h\n"
- "smlal2 v19.4s, v24.8h, v4.8h\n"
- "add x21, x21, x0\n"
- "tbz x1, #2, 21f\n"
- "ld1 { v27.s }[0], [x21], #0x4\n"
- "tbz x1, #1, 20f\n"
- "ld1 { v27.h }[2], [x21], #0x2\n"
- "tbz x1, #0, 23f\n"
- "ld1 { v27.b }[6], [x21]\n"
+ "ldr x19, [x20, #0x60]\n"
+ "smlal v6.4s, v30.4h, v3.4h\n"
+ "smlal2 v5.4s, v30.8h, v3.8h\n"
+ "smlal v15.4s, v24.4h, v4.4h\n"
+ "smlal2 v16.4s, v24.8h, v4.8h\n"
+ "add x19, x19, x24\n"
+ "tbz x0, #2, 21f\n"
+ "ld1 { v27.s }[0], [x19], #0x4\n"
+ "tbz x0, #1, 20f\n"
+ "ld1 { v27.h }[2], [x19], #0x2\n"
+ "tbz x0, #0, 23f\n"
+ "ld1 { v27.b }[6], [x19]\n"
"b 23f\n"
"20:" // Oddments: Load (0, 5): Bit 2: Bit 1: Unset
- "tbz x1, #0, 23f\n"
- "ld1 { v27.b }[4], [x21]\n"
+ "tbz x0, #0, 23f\n"
+ "ld1 { v27.b }[4], [x19]\n"
"b 23f\n"
"21:" // Oddments: Load (0, 5): Bit 2: Unset
- "tbz x1, #1, 22f\n"
- "ld1 { v27.h }[0], [x21], #0x2\n"
- "tbz x1, #0, 23f\n"
- "ld1 { v27.b }[2], [x21]\n"
+ "tbz x0, #1, 22f\n"
+ "ld1 { v27.h }[0], [x19], #0x2\n"
+ "tbz x0, #0, 23f\n"
+ "ld1 { v27.b }[2], [x19]\n"
"b 23f\n"
"22:" // Oddments: Load (0, 5): Bit 2: Unset: Bit 1: Unset
- "tbz x1, #0, 23f\n"
- "ld1 { v27.b }[0], [x21]\n"
+ "tbz x0, #0, 23f\n"
+ "ld1 { v27.b }[0], [x19]\n"
"23:" // Oddments: Load (0, 5): Bit 2: End
- "ldr d0, [x3, #0x28]\n"
"usubl v27.8h, v27.8b, v9.8b\n"
- "smlal v20.4s, v27.4h, v4.4h\n"
- "smlal2 v10.4s, v27.8h, v4.8h\n"
- "smlal v8.4s, v30.4h, v4.4h\n"
+ "ldr d0, [x23, #0x28]\n"
+ "smlal v17.4s, v27.4h, v4.4h\n"
+ "smlal2 v8.4s, v27.8h, v4.8h\n"
+ "smlal v10.4s, v30.4h, v4.4h\n"
"smlal2 v7.4s, v30.8h, v4.8h\n"
- "usubl v0.8h, v0.8b, v15.8b\n"
- "ldr x20, [x4, #0x68]\n"
- "smlal v17.4s, v26.4h, v4.4h\n"
- "smlal2 v21.4s, v26.8h, v4.8h\n"
- "add x20, x20, x0\n"
- "smlal v13.4s, v29.4h, v0.4h\n"
- "smlal2 v19.4s, v29.8h, v0.8h\n"
- "smlal v20.4s, v28.4h, v0.4h\n"
- "smlal2 v10.4s, v28.8h, v0.8h\n"
- "smlal v8.4s, v22.4h, v0.4h\n"
+ "usubl v0.8h, v0.8b, v14.8b\n"
+ "ldr x27, [x20, #0x68]\n"
+ "smlal v6.4s, v26.4h, v4.4h\n"
+ "smlal2 v5.4s, v26.8h, v4.8h\n"
+ "add x27, x27, x24\n"
+ "smlal v15.4s, v29.4h, v0.4h\n"
+ "smlal2 v16.4s, v29.8h, v0.8h\n"
+ "smlal v17.4s, v28.4h, v0.4h\n"
+ "smlal2 v8.4s, v28.8h, v0.8h\n"
+ "smlal v10.4s, v22.4h, v0.4h\n"
"smlal2 v7.4s, v22.8h, v0.8h\n"
- "tbz x1, #2, 25f\n"
- "ld1 { v25.s }[0], [x20], #0x4\n"
- "tbz x1, #1, 24f\n"
- "ld1 { v25.h }[2], [x20], #0x2\n"
- "tbz x1, #0, 27f\n"
- "ld1 { v25.b }[6], [x20]\n"
+ "tbz x0, #2, 25f\n"
+ "ld1 { v25.s }[0], [x27], #0x4\n"
+ "tbz x0, #1, 24f\n"
+ "ld1 { v25.h }[2], [x27], #0x2\n"
+ "tbz x0, #0, 27f\n"
+ "ld1 { v25.b }[6], [x27]\n"
"b 27f\n"
"24:" // Oddments: Load (2, 1): Bit 2: Bit 1: Unset
- "tbz x1, #0, 27f\n"
- "ld1 { v25.b }[4], [x20]\n"
+ "tbz x0, #0, 27f\n"
+ "ld1 { v25.b }[4], [x27]\n"
"b 27f\n"
"25:" // Oddments: Load (2, 1): Bit 2: Unset
- "tbz x1, #1, 26f\n"
- "ld1 { v25.h }[0], [x20], #0x2\n"
- "tbz x1, #0, 27f\n"
- "ld1 { v25.b }[2], [x20]\n"
+ "tbz x0, #1, 26f\n"
+ "ld1 { v25.h }[0], [x27], #0x2\n"
+ "tbz x0, #0, 27f\n"
+ "ld1 { v25.b }[2], [x27]\n"
"b 27f\n"
"26:" // Oddments: Load (2, 1): Bit 2: Unset: Bit 1: Unset
- "tbz x1, #0, 27f\n"
- "ld1 { v25.b }[0], [x20]\n"
+ "tbz x0, #0, 27f\n"
+ "ld1 { v25.b }[0], [x27]\n"
"27:" // Oddments: Load (2, 1): Bit 2: End
- "ldr d1, [x3, #0x30]\n"
+ "ldr d1, [x23, #0x30]\n"
"usubl v25.8h, v25.8b, v9.8b\n"
- "usubl v1.8h, v1.8b, v15.8b\n"
- "ldr x26, [x4, #0x70]\n"
- "smlal v17.4s, v25.4h, v0.4h\n"
- "smlal2 v21.4s, v25.8h, v0.8h\n"
- "add x26, x26, x0\n"
- "smlal v13.4s, v28.4h, v1.4h\n"
- "smlal2 v19.4s, v28.8h, v1.8h\n"
- "smlal v20.4s, v23.4h, v1.4h\n"
- "smlal2 v10.4s, v23.8h, v1.8h\n"
- "smlal v8.4s, v25.4h, v1.4h\n"
+ "usubl v1.8h, v1.8b, v14.8b\n"
+ "ldr x5, [x20, #0x70]\n"
+ "smlal v6.4s, v25.4h, v0.4h\n"
+ "smlal2 v5.4s, v25.8h, v0.8h\n"
+ "add x5, x5, x24\n"
+ "smlal v15.4s, v28.4h, v1.4h\n"
+ "smlal2 v16.4s, v28.8h, v1.8h\n"
+ "smlal v17.4s, v23.4h, v1.4h\n"
+ "smlal2 v8.4s, v23.8h, v1.8h\n"
+ "smlal v10.4s, v25.4h, v1.4h\n"
"smlal2 v7.4s, v25.8h, v1.8h\n"
- "tbz x1, #2, 29f\n"
- "ld1 { v24.s }[0], [x26], #0x4\n"
- "tbz x1, #1, 28f\n"
- "ld1 { v24.h }[2], [x26], #0x2\n"
- "tbz x1, #0, 31f\n"
- "ld1 { v24.b }[6], [x26]\n"
+ "tbz x0, #2, 29f\n"
+ "ld1 { v24.s }[0], [x5], #0x4\n"
+ "tbz x0, #1, 28f\n"
+ "ld1 { v24.h }[2], [x5], #0x2\n"
+ "tbz x0, #0, 31f\n"
+ "ld1 { v24.b }[6], [x5]\n"
"b 31f\n"
"28:" // Oddments: Load (2, 2): Bit 2: Bit 1: Unset
- "tbz x1, #0, 31f\n"
- "ld1 { v24.b }[4], [x26]\n"
+ "tbz x0, #0, 31f\n"
+ "ld1 { v24.b }[4], [x5]\n"
"b 31f\n"
"29:" // Oddments: Load (2, 2): Bit 2: Unset
- "tbz x1, #1, 30f\n"
- "ld1 { v24.h }[0], [x26], #0x2\n"
- "tbz x1, #0, 31f\n"
- "ld1 { v24.b }[2], [x26]\n"
+ "tbz x0, #1, 30f\n"
+ "ld1 { v24.h }[0], [x5], #0x2\n"
+ "tbz x0, #0, 31f\n"
+ "ld1 { v24.b }[2], [x5]\n"
"b 31f\n"
"30:" // Oddments: Load (2, 2): Bit 2: Unset: Bit 1: Unset
- "tbz x1, #0, 31f\n"
- "ld1 { v24.b }[0], [x26]\n"
+ "tbz x0, #0, 31f\n"
+ "ld1 { v24.b }[0], [x5]\n"
"31:" // Oddments: Load (2, 2): Bit 2: End
- "ldr d2, [x3, #0x38]\n"
+ "ldr d2, [x23, #0x38]\n"
"usubl v24.8h, v24.8b, v9.8b\n"
- "usubl v2.8h, v2.8b, v15.8b\n"
- "ldr x25, [x4, #0x78]\n"
- "smlal v17.4s, v24.4h, v1.4h\n"
- "smlal2 v21.4s, v24.8h, v1.8h\n"
- "add x25, x25, x0\n"
- "smlal v13.4s, v23.4h, v2.4h\n"
- "smlal2 v19.4s, v23.8h, v2.8h\n"
- "smlal v20.4s, v31.4h, v2.4h\n"
- "smlal2 v10.4s, v31.8h, v2.8h\n"
- "smlal v8.4s, v24.4h, v2.4h\n"
+ "usubl v2.8h, v2.8b, v14.8b\n"
+ "ldr x11, [x20, #0x78]\n"
+ "smlal v6.4s, v24.4h, v1.4h\n"
+ "smlal2 v5.4s, v24.8h, v1.8h\n"
+ "add x11, x11, x24\n"
+ "smlal v15.4s, v23.4h, v2.4h\n"
+ "smlal2 v16.4s, v23.8h, v2.8h\n"
+ "smlal v17.4s, v31.4h, v2.4h\n"
+ "smlal2 v8.4s, v31.8h, v2.8h\n"
+ "smlal v10.4s, v24.4h, v2.4h\n"
"smlal2 v7.4s, v24.8h, v2.8h\n"
- "tbz x1, #2, 33f\n"
- "ld1 { v27.s }[0], [x25], #0x4\n"
- "tbz x1, #1, 32f\n"
- "ld1 { v27.h }[2], [x25], #0x2\n"
- "tbz x1, #0, 35f\n"
- "ld1 { v27.b }[6], [x25]\n"
+ "tbz x0, #2, 33f\n"
+ "ld1 { v27.s }[0], [x11], #0x4\n"
+ "tbz x0, #1, 32f\n"
+ "ld1 { v27.h }[2], [x11], #0x2\n"
+ "tbz x0, #0, 35f\n"
+ "ld1 { v27.b }[6], [x11]\n"
"b 35f\n"
"32:" // Oddments: Load (2, 3): Bit 2: Bit 1: Unset
- "tbz x1, #0, 35f\n"
- "ld1 { v27.b }[4], [x25]\n"
+ "tbz x0, #0, 35f\n"
+ "ld1 { v27.b }[4], [x11]\n"
"b 35f\n"
"33:" // Oddments: Load (2, 3): Bit 2: Unset
- "tbz x1, #1, 34f\n"
- "ld1 { v27.h }[0], [x25], #0x2\n"
- "tbz x1, #0, 35f\n"
- "ld1 { v27.b }[2], [x25]\n"
+ "tbz x0, #1, 34f\n"
+ "ld1 { v27.h }[0], [x11], #0x2\n"
+ "tbz x0, #0, 35f\n"
+ "ld1 { v27.b }[2], [x11]\n"
"b 35f\n"
"34:" // Oddments: Load (2, 3): Bit 2: Unset: Bit 1: Unset
- "tbz x1, #0, 35f\n"
- "ld1 { v27.b }[0], [x25]\n"
+ "tbz x0, #0, 35f\n"
+ "ld1 { v27.b }[0], [x11]\n"
"35:" // Oddments: Load (2, 3): Bit 2: End
- "ldr d3, [x3, #0x40]\n"
+ "ldr d3, [x23, #0x40]\n"
"usubl v27.8h, v27.8b, v9.8b\n"
- "usubl v3.8h, v3.8b, v15.8b\n"
- "ldr x23, [x4, #0x80]\n"
- "smlal v17.4s, v27.4h, v2.4h\n"
- "smlal2 v21.4s, v27.8h, v2.8h\n"
- "add x23, x23, x0\n"
- "smlal v13.4s, v31.4h, v3.4h\n"
- "smlal2 v19.4s, v31.8h, v3.8h\n"
- "smlal v20.4s, v30.4h, v3.4h\n"
- "smlal2 v10.4s, v30.8h, v3.8h\n"
- "smlal v8.4s, v27.4h, v3.4h\n"
+ "usubl v3.8h, v3.8b, v14.8b\n"
+ "ldr x12, [x20, #0x80]\n"
+ "smlal v6.4s, v27.4h, v2.4h\n"
+ "smlal2 v5.4s, v27.8h, v2.8h\n"
+ "add x12, x12, x24\n"
+ "smlal v15.4s, v31.4h, v3.4h\n"
+ "smlal2 v16.4s, v31.8h, v3.8h\n"
+ "smlal v17.4s, v30.4h, v3.4h\n"
+ "smlal2 v8.4s, v30.8h, v3.8h\n"
+ "smlal v10.4s, v27.4h, v3.4h\n"
"smlal2 v7.4s, v27.8h, v3.8h\n"
- "tbz x1, #2, 37f\n"
- "ld1 { v23.s }[0], [x23], #0x4\n"
- "tbz x1, #1, 36f\n"
- "ld1 { v23.h }[2], [x23], #0x2\n"
- "tbz x1, #0, 39f\n"
- "ld1 { v23.b }[6], [x23]\n"
+ "tbz x0, #2, 37f\n"
+ "ld1 { v23.s }[0], [x12], #0x4\n"
+ "tbz x0, #1, 36f\n"
+ "ld1 { v23.h }[2], [x12], #0x2\n"
+ "tbz x0, #0, 39f\n"
+ "ld1 { v23.b }[6], [x12]\n"
"b 39f\n"
"36:" // Oddments: Load (2, 4): Bit 2: Bit 1: Unset
- "tbz x1, #0, 39f\n"
- "ld1 { v23.b }[4], [x23]\n"
+ "tbz x0, #0, 39f\n"
+ "ld1 { v23.b }[4], [x12]\n"
"b 39f\n"
"37:" // Oddments: Load (2, 4): Bit 2: Unset
- "tbz x1, #1, 38f\n"
- "ld1 { v23.h }[0], [x23], #0x2\n"
- "tbz x1, #0, 39f\n"
- "ld1 { v23.b }[2], [x23]\n"
+ "tbz x0, #1, 38f\n"
+ "ld1 { v23.h }[0], [x12], #0x2\n"
+ "tbz x0, #0, 39f\n"
+ "ld1 { v23.b }[2], [x12]\n"
"b 39f\n"
"38:" // Oddments: Load (2, 4): Bit 2: Unset: Bit 1: Unset
- "tbz x1, #0, 39f\n"
- "ld1 { v23.b }[0], [x23]\n"
+ "tbz x0, #0, 39f\n"
+ "ld1 { v23.b }[0], [x12]\n"
"39:" // Oddments: Load (2, 4): Bit 2: End
- "ldr d4, [x3, #0x48]\n"
+ "ldr d4, [x23, #0x48]\n"
"usubl v23.8h, v23.8b, v9.8b\n"
- "usubl v4.8h, v4.8b, v15.8b\n"
- "ldr x24, [x4, #0x88]\n"
- "smlal v17.4s, v23.4h, v3.4h\n"
- "smlal2 v21.4s, v23.8h, v3.8h\n"
- "add x24, x24, x0\n"
- "smlal v13.4s, v30.4h, v4.4h\n"
- "smlal2 v19.4s, v30.8h, v4.8h\n"
- "smlal v20.4s, v26.4h, v4.4h\n"
- "smlal2 v10.4s, v26.8h, v4.8h\n"
- "smlal v8.4s, v23.4h, v4.4h\n"
+ "usubl v4.8h, v4.8b, v14.8b\n"
+ "ldr x26, [x20, #0x88]\n"
+ "smlal v6.4s, v23.4h, v3.4h\n"
+ "smlal2 v5.4s, v23.8h, v3.8h\n"
+ "add x26, x26, x24\n"
+ "smlal v15.4s, v30.4h, v4.4h\n"
+ "smlal2 v16.4s, v30.8h, v4.8h\n"
+ "smlal v17.4s, v26.4h, v4.4h\n"
+ "smlal2 v8.4s, v26.8h, v4.8h\n"
+ "smlal v10.4s, v23.4h, v4.4h\n"
"smlal2 v7.4s, v23.8h, v4.8h\n"
- "tbz x1, #2, 41f\n"
- "ld1 { v28.s }[0], [x24], #0x4\n"
- "tbz x1, #1, 40f\n"
- "ld1 { v28.h }[2], [x24], #0x2\n"
- "tbz x1, #0, 43f\n"
- "ld1 { v28.b }[6], [x24]\n"
+ "tbz x0, #2, 41f\n"
+ "ld1 { v28.s }[0], [x26], #0x4\n"
+ "tbz x0, #1, 40f\n"
+ "ld1 { v28.h }[2], [x26], #0x2\n"
+ "tbz x0, #0, 43f\n"
+ "ld1 { v28.b }[6], [x26]\n"
"b 43f\n"
"40:" // Oddments: Load (2, 5): Bit 2: Bit 1: Unset
- "tbz x1, #0, 43f\n"
- "ld1 { v28.b }[4], [x24]\n"
+ "tbz x0, #0, 43f\n"
+ "ld1 { v28.b }[4], [x26]\n"
"b 43f\n"
"41:" // Oddments: Load (2, 5): Bit 2: Unset
- "tbz x1, #1, 42f\n"
- "ld1 { v28.h }[0], [x24], #0x2\n"
- "tbz x1, #0, 43f\n"
- "ld1 { v28.b }[2], [x24]\n"
+ "tbz x0, #1, 42f\n"
+ "ld1 { v28.h }[0], [x26], #0x2\n"
+ "tbz x0, #0, 43f\n"
+ "ld1 { v28.b }[2], [x26]\n"
"b 43f\n"
"42:" // Oddments: Load (2, 5): Bit 2: Unset: Bit 1: Unset
- "tbz x1, #0, 43f\n"
- "ld1 { v28.b }[0], [x24]\n"
+ "tbz x0, #0, 43f\n"
+ "ld1 { v28.b }[0], [x26]\n"
"43:" // Oddments: Load (2, 5): Bit 2: End
- "ldr d0, [x3, #0x50]\n"
+ "ldr d0, [x23, #0x50]\n"
"usubl v28.8h, v28.8b, v9.8b\n"
- "usubl v0.8h, v0.8b, v15.8b\n"
- "ldr x15, [x4, #0x90]\n"
- "smlal v17.4s, v28.4h, v4.4h\n"
- "smlal2 v21.4s, v28.8h, v4.8h\n"
- "add x15, x15, x0\n"
- "smlal v13.4s, v22.4h, v0.4h\n"
- "smlal2 v19.4s, v22.8h, v0.8h\n"
- "smlal v20.4s, v25.4h, v0.4h\n"
- "smlal2 v10.4s, v25.8h, v0.8h\n"
- "tbz x1, #2, 45f\n"
- "ld1 { v31.s }[0], [x15], #0x4\n"
- "tbz x1, #1, 44f\n"
- "ld1 { v31.h }[2], [x15], #0x2\n"
- "tbz x1, #0, 47f\n"
- "ld1 { v31.b }[6], [x15]\n"
+ "usubl v0.8h, v0.8b, v14.8b\n"
+ "ldr x14, [x20, #0x90]\n"
+ "smlal v6.4s, v28.4h, v4.4h\n"
+ "smlal2 v5.4s, v28.8h, v4.8h\n"
+ "add x14, x14, x24\n"
+ "smlal v15.4s, v22.4h, v0.4h\n"
+ "smlal2 v16.4s, v22.8h, v0.8h\n"
+ "smlal v17.4s, v25.4h, v0.4h\n"
+ "smlal2 v8.4s, v25.8h, v0.8h\n"
+ "tbz x0, #2, 45f\n"
+ "ld1 { v31.s }[0], [x14], #0x4\n"
+ "tbz x0, #1, 44f\n"
+ "ld1 { v31.h }[2], [x14], #0x2\n"
+ "tbz x0, #0, 47f\n"
+ "ld1 { v31.b }[6], [x14]\n"
"b 47f\n"
"44:" // Oddments: Load (3, 0): Bit 2: Bit 1: Unset
- "tbz x1, #0, 47f\n"
- "ld1 { v31.b }[4], [x15]\n"
+ "tbz x0, #0, 47f\n"
+ "ld1 { v31.b }[4], [x14]\n"
"b 47f\n"
"45:" // Oddments: Load (3, 0): Bit 2: Unset
- "tbz x1, #1, 46f\n"
- "ld1 { v31.h }[0], [x15], #0x2\n"
- "tbz x1, #0, 47f\n"
- "ld1 { v31.b }[2], [x15]\n"
+ "tbz x0, #1, 46f\n"
+ "ld1 { v31.h }[0], [x14], #0x2\n"
+ "tbz x0, #0, 47f\n"
+ "ld1 { v31.b }[2], [x14]\n"
"b 47f\n"
"46:" // Oddments: Load (3, 0): Bit 2: Unset: Bit 1: Unset
- "tbz x1, #0, 47f\n"
- "ld1 { v31.b }[0], [x15]\n"
+ "tbz x0, #0, 47f\n"
+ "ld1 { v31.b }[0], [x14]\n"
"47:" // Oddments: Load (3, 0): Bit 2: End
"usubl v31.8h, v31.8b, v9.8b\n"
- "ldr x21, [x4, #0x98]\n"
- "smlal v8.4s, v31.4h, v0.4h\n"
+ "ldr x15, [x20, #0x98]\n"
+ "smlal v10.4s, v31.4h, v0.4h\n"
"smlal2 v7.4s, v31.8h, v0.8h\n"
- "add x21, x21, x0\n"
- "tbz x1, #2, 49f\n"
- "ld1 { v30.s }[0], [x21], #0x4\n"
- "tbz x1, #1, 48f\n"
- "ld1 { v30.h }[2], [x21], #0x2\n"
- "tbz x1, #0, 51f\n"
- "ld1 { v30.b }[6], [x21]\n"
+ "add x15, x15, x24\n"
+ "tbz x0, #2, 49f\n"
+ "ld1 { v30.s }[0], [x15], #0x4\n"
+ "tbz x0, #1, 48f\n"
+ "ld1 { v30.h }[2], [x15], #0x2\n"
+ "tbz x0, #0, 51f\n"
+ "ld1 { v30.b }[6], [x15]\n"
"b 51f\n"
"48:" // Oddments: Load (3, 1): Bit 2: Bit 1: Unset
- "tbz x1, #0, 51f\n"
- "ld1 { v30.b }[4], [x21]\n"
+ "tbz x0, #0, 51f\n"
+ "ld1 { v30.b }[4], [x15]\n"
"b 51f\n"
"49:" // Oddments: Load (3, 1): Bit 2: Unset
- "tbz x1, #1, 50f\n"
- "ld1 { v30.h }[0], [x21], #0x2\n"
- "tbz x1, #0, 51f\n"
- "ld1 { v30.b }[2], [x21]\n"
+ "tbz x0, #1, 50f\n"
+ "ld1 { v30.h }[0], [x15], #0x2\n"
+ "tbz x0, #0, 51f\n"
+ "ld1 { v30.b }[2], [x15]\n"
"b 51f\n"
"50:" // Oddments: Load (3, 1): Bit 2: Unset: Bit 1: Unset
- "tbz x1, #0, 51f\n"
- "ld1 { v30.b }[0], [x21]\n"
+ "tbz x0, #0, 51f\n"
+ "ld1 { v30.b }[0], [x15]\n"
"51:" // Oddments: Load (3, 1): Bit 2: End
- "ldr d1, [x3, #0x58]\n"
+ "ldr d1, [x23, #0x58]\n"
"usubl v30.8h, v30.8b, v9.8b\n"
- "usubl v1.8h, v1.8b, v15.8b\n"
- "ldr x14, [x4, #0xa0]\n"
- "smlal v17.4s, v30.4h, v0.4h\n"
- "smlal2 v21.4s, v30.8h, v0.8h\n"
- "add x14, x14, x0\n"
- "smlal v13.4s, v25.4h, v1.4h\n"
- "smlal2 v19.4s, v25.8h, v1.8h\n"
- "smlal v20.4s, v24.4h, v1.4h\n"
- "smlal2 v10.4s, v24.8h, v1.8h\n"
- "smlal v8.4s, v30.4h, v1.4h\n"
+ "usubl v1.8h, v1.8b, v14.8b\n"
+ "ldr x21, [x20, #0xa0]\n"
+ "smlal v6.4s, v30.4h, v0.4h\n"
+ "smlal2 v5.4s, v30.8h, v0.8h\n"
+ "add x21, x21, x24\n"
+ "smlal v15.4s, v25.4h, v1.4h\n"
+ "smlal2 v16.4s, v25.8h, v1.8h\n"
+ "smlal v17.4s, v24.4h, v1.4h\n"
+ "smlal2 v8.4s, v24.8h, v1.8h\n"
+ "smlal v10.4s, v30.4h, v1.4h\n"
"smlal2 v7.4s, v30.8h, v1.8h\n"
- "tbz x1, #2, 53f\n"
- "ld1 { v26.s }[0], [x14], #0x4\n"
- "tbz x1, #1, 52f\n"
- "ld1 { v26.h }[2], [x14], #0x2\n"
- "tbz x1, #0, 55f\n"
- "ld1 { v26.b }[6], [x14]\n"
+ "tbz x0, #2, 53f\n"
+ "ld1 { v26.s }[0], [x21], #0x4\n"
+ "tbz x0, #1, 52f\n"
+ "ld1 { v26.h }[2], [x21], #0x2\n"
+ "tbz x0, #0, 55f\n"
+ "ld1 { v26.b }[6], [x21]\n"
"b 55f\n"
"52:" // Oddments: Load (3, 2): Bit 2: Bit 1: Unset
- "tbz x1, #0, 55f\n"
- "ld1 { v26.b }[4], [x14]\n"
+ "tbz x0, #0, 55f\n"
+ "ld1 { v26.b }[4], [x21]\n"
"b 55f\n"
"53:" // Oddments: Load (3, 2): Bit 2: Unset
- "tbz x1, #1, 54f\n"
- "ld1 { v26.h }[0], [x14], #0x2\n"
- "tbz x1, #0, 55f\n"
- "ld1 { v26.b }[2], [x14]\n"
+ "tbz x0, #1, 54f\n"
+ "ld1 { v26.h }[0], [x21], #0x2\n"
+ "tbz x0, #0, 55f\n"
+ "ld1 { v26.b }[2], [x21]\n"
"b 55f\n"
"54:" // Oddments: Load (3, 2): Bit 2: Unset: Bit 1: Unset
- "tbz x1, #0, 55f\n"
- "ld1 { v26.b }[0], [x14]\n"
+ "tbz x0, #0, 55f\n"
+ "ld1 { v26.b }[0], [x21]\n"
"55:" // Oddments: Load (3, 2): Bit 2: End
- "ldr d2, [x3, #0x60]\n"
+ "ldr d2, [x23, #0x60]\n"
"usubl v26.8h, v26.8b, v9.8b\n"
- "usubl v2.8h, v2.8b, v15.8b\n"
- "ldr x13, [x4, #0xa8]\n"
- "smlal v17.4s, v26.4h, v1.4h\n"
- "smlal2 v21.4s, v26.8h, v1.8h\n"
- "add x13, x13, x0\n"
- "smlal v13.4s, v24.4h, v2.4h\n"
- "smlal2 v19.4s, v24.8h, v2.8h\n"
- "smlal v20.4s, v27.4h, v2.4h\n"
- "smlal2 v10.4s, v27.8h, v2.8h\n"
- "smlal v8.4s, v26.4h, v2.4h\n"
+ "usubl v2.8h, v2.8b, v14.8b\n"
+ "ldr x2, [x20, #0xa8]\n"
+ "smlal v6.4s, v26.4h, v1.4h\n"
+ "smlal2 v5.4s, v26.8h, v1.8h\n"
+ "add x2, x2, x24\n"
+ "smlal v15.4s, v24.4h, v2.4h\n"
+ "smlal2 v16.4s, v24.8h, v2.8h\n"
+ "smlal v17.4s, v27.4h, v2.4h\n"
+ "smlal2 v8.4s, v27.8h, v2.8h\n"
+ "smlal v10.4s, v26.4h, v2.4h\n"
"smlal2 v7.4s, v26.8h, v2.8h\n"
- "tbz x1, #2, 57f\n"
- "ld1 { v25.s }[0], [x13], #0x4\n"
- "tbz x1, #1, 56f\n"
- "ld1 { v25.h }[2], [x13], #0x2\n"
- "tbz x1, #0, 59f\n"
- "ld1 { v25.b }[6], [x13]\n"
+ "tbz x0, #2, 57f\n"
+ "ld1 { v25.s }[0], [x2], #0x4\n"
+ "tbz x0, #1, 56f\n"
+ "ld1 { v25.h }[2], [x2], #0x2\n"
+ "tbz x0, #0, 59f\n"
+ "ld1 { v25.b }[6], [x2]\n"
"b 59f\n"
"56:" // Oddments: Load (3, 3): Bit 2: Bit 1: Unset
- "tbz x1, #0, 59f\n"
- "ld1 { v25.b }[4], [x13]\n"
+ "tbz x0, #0, 59f\n"
+ "ld1 { v25.b }[4], [x2]\n"
"b 59f\n"
"57:" // Oddments: Load (3, 3): Bit 2: Unset
- "tbz x1, #1, 58f\n"
- "ld1 { v25.h }[0], [x13], #0x2\n"
- "tbz x1, #0, 59f\n"
- "ld1 { v25.b }[2], [x13]\n"
+ "tbz x0, #1, 58f\n"
+ "ld1 { v25.h }[0], [x2], #0x2\n"
+ "tbz x0, #0, 59f\n"
+ "ld1 { v25.b }[2], [x2]\n"
"b 59f\n"
"58:" // Oddments: Load (3, 3): Bit 2: Unset: Bit 1: Unset
- "tbz x1, #0, 59f\n"
- "ld1 { v25.b }[0], [x13]\n"
+ "tbz x0, #0, 59f\n"
+ "ld1 { v25.b }[0], [x2]\n"
"59:" // Oddments: Load (3, 3): Bit 2: End
- "ldr d3, [x3, #0x68]\n"
+ "ldr d3, [x23, #0x68]\n"
"usubl v25.8h, v25.8b, v9.8b\n"
- "usubl v3.8h, v3.8b, v15.8b\n"
- "ldr x12, [x4, #0xb0]\n"
- "smlal v17.4s, v25.4h, v2.4h\n"
- "smlal2 v21.4s, v25.8h, v2.8h\n"
- "add x12, x12, x0\n"
- "smlal v13.4s, v27.4h, v3.4h\n"
- "smlal2 v19.4s, v27.8h, v3.8h\n"
- "smlal v20.4s, v23.4h, v3.4h\n"
- "smlal2 v10.4s, v23.8h, v3.8h\n"
- "smlal v8.4s, v25.4h, v3.4h\n"
+ "usubl v3.8h, v3.8b, v14.8b\n"
+ "ldr x13, [x20, #0xb0]\n"
+ "smlal v6.4s, v25.4h, v2.4h\n"
+ "smlal2 v5.4s, v25.8h, v2.8h\n"
+ "add x13, x13, x24\n"
+ "smlal v15.4s, v27.4h, v3.4h\n"
+ "smlal2 v16.4s, v27.8h, v3.8h\n"
+ "smlal v17.4s, v23.4h, v3.4h\n"
+ "smlal2 v8.4s, v23.8h, v3.8h\n"
+ "smlal v10.4s, v25.4h, v3.4h\n"
"smlal2 v7.4s, v25.8h, v3.8h\n"
- "tbz x1, #2, 61f\n"
- "ld1 { v24.s }[0], [x12], #0x4\n"
- "tbz x1, #1, 60f\n"
- "ld1 { v24.h }[2], [x12], #0x2\n"
- "tbz x1, #0, 63f\n"
- "ld1 { v24.b }[6], [x12]\n"
+ "tbz x0, #2, 61f\n"
+ "ld1 { v24.s }[0], [x13], #0x4\n"
+ "tbz x0, #1, 60f\n"
+ "ld1 { v24.h }[2], [x13], #0x2\n"
+ "tbz x0, #0, 63f\n"
+ "ld1 { v24.b }[6], [x13]\n"
"b 63f\n"
"60:" // Oddments: Load (3, 4): Bit 2: Bit 1: Unset
- "tbz x1, #0, 63f\n"
- "ld1 { v24.b }[4], [x12]\n"
+ "tbz x0, #0, 63f\n"
+ "ld1 { v24.b }[4], [x13]\n"
"b 63f\n"
"61:" // Oddments: Load (3, 4): Bit 2: Unset
- "tbz x1, #1, 62f\n"
- "ld1 { v24.h }[0], [x12], #0x2\n"
- "tbz x1, #0, 63f\n"
- "ld1 { v24.b }[2], [x12]\n"
+ "tbz x0, #1, 62f\n"
+ "ld1 { v24.h }[0], [x13], #0x2\n"
+ "tbz x0, #0, 63f\n"
+ "ld1 { v24.b }[2], [x13]\n"
"b 63f\n"
"62:" // Oddments: Load (3, 4): Bit 2: Unset: Bit 1: Unset
- "tbz x1, #0, 63f\n"
- "ld1 { v24.b }[0], [x12]\n"
+ "tbz x0, #0, 63f\n"
+ "ld1 { v24.b }[0], [x13]\n"
"63:" // Oddments: Load (3, 4): Bit 2: End
- "ldr d4, [x3, #0x70]\n"
+ "ldr d4, [x23, #0x70]\n"
"usubl v24.8h, v24.8b, v9.8b\n"
- "usubl v4.8h, v4.8b, v15.8b\n"
- "ldr x20, [x4, #0xb8]\n"
- "smlal v17.4s, v24.4h, v3.4h\n"
- "smlal2 v21.4s, v24.8h, v3.8h\n"
- "add x20, x20, x0\n"
- "smlal v13.4s, v23.4h, v4.4h\n"
- "smlal2 v19.4s, v23.8h, v4.8h\n"
- "smlal v20.4s, v28.4h, v4.4h\n"
- "smlal2 v10.4s, v28.8h, v4.8h\n"
- "smlal v8.4s, v24.4h, v4.4h\n"
+ "usubl v4.8h, v4.8b, v14.8b\n"
+ "ldr x9, [x20, #0xb8]\n"
+ "smlal v6.4s, v24.4h, v3.4h\n"
+ "smlal2 v5.4s, v24.8h, v3.8h\n"
+ "add x9, x9, x24\n"
+ "smlal v15.4s, v23.4h, v4.4h\n"
+ "smlal2 v16.4s, v23.8h, v4.8h\n"
+ "smlal v17.4s, v28.4h, v4.4h\n"
+ "smlal2 v8.4s, v28.8h, v4.8h\n"
+ "smlal v10.4s, v24.4h, v4.4h\n"
"smlal2 v7.4s, v24.8h, v4.8h\n"
- "tbz x1, #2, 65f\n"
- "ld1 { v22.s }[0], [x20], #0x4\n"
- "tbz x1, #1, 64f\n"
- "ld1 { v22.h }[2], [x20], #0x2\n"
- "tbz x1, #0, 67f\n"
- "ld1 { v22.b }[6], [x20]\n"
+ "tbz x0, #2, 65f\n"
+ "ld1 { v22.s }[0], [x9], #0x4\n"
+ "tbz x0, #1, 64f\n"
+ "ld1 { v22.h }[2], [x9], #0x2\n"
+ "tbz x0, #0, 67f\n"
+ "ld1 { v22.b }[6], [x9]\n"
"b 67f\n"
"64:" // Oddments: Load (3, 5): Bit 2: Bit 1: Unset
- "tbz x1, #0, 67f\n"
- "ld1 { v22.b }[4], [x20]\n"
+ "tbz x0, #0, 67f\n"
+ "ld1 { v22.b }[4], [x9]\n"
"b 67f\n"
"65:" // Oddments: Load (3, 5): Bit 2: Unset
- "tbz x1, #1, 66f\n"
- "ld1 { v22.h }[0], [x20], #0x2\n"
- "tbz x1, #0, 67f\n"
- "ld1 { v22.b }[2], [x20]\n"
+ "tbz x0, #1, 66f\n"
+ "ld1 { v22.h }[0], [x9], #0x2\n"
+ "tbz x0, #0, 67f\n"
+ "ld1 { v22.b }[2], [x9]\n"
"b 67f\n"
"66:" // Oddments: Load (3, 5): Bit 2: Unset: Bit 1: Unset
- "tbz x1, #0, 67f\n"
- "ld1 { v22.b }[0], [x20]\n"
+ "tbz x0, #0, 67f\n"
+ "ld1 { v22.b }[0], [x9]\n"
"67:" // Oddments: Load (3, 5): Bit 2: End
- "ldr d0, [x3, #0x78]\n"
+ "ldr d0, [x23, #0x78]\n"
"usubl v22.8h, v22.8b, v9.8b\n"
- "usubl v0.8h, v0.8b, v15.8b\n"
- "ldr x11, [x4, #0xc0]\n"
- "smlal v17.4s, v22.4h, v4.4h\n"
- "smlal2 v21.4s, v22.8h, v4.8h\n"
- "add x11, x11, x0\n"
- "smlal v13.4s, v31.4h, v0.4h\n"
- "smlal2 v19.4s, v31.8h, v0.8h\n"
- "smlal v20.4s, v30.4h, v0.4h\n"
- "smlal2 v10.4s, v30.8h, v0.8h\n"
- "tbz x1, #2, 69f\n"
- "ld1 { v27.s }[0], [x11], #0x4\n"
- "tbz x1, #1, 68f\n"
- "ld1 { v27.h }[2], [x11], #0x2\n"
- "tbz x1, #0, 71f\n"
- "ld1 { v27.b }[6], [x11]\n"
+ "usubl v0.8h, v0.8b, v14.8b\n"
+ "ldr x19, [x20, #0xc0]\n"
+ "smlal v6.4s, v22.4h, v4.4h\n"
+ "smlal2 v5.4s, v22.8h, v4.8h\n"
+ "add x19, x19, x24\n"
+ "smlal v15.4s, v31.4h, v0.4h\n"
+ "smlal2 v16.4s, v31.8h, v0.8h\n"
+ "smlal v17.4s, v30.4h, v0.4h\n"
+ "smlal2 v8.4s, v30.8h, v0.8h\n"
+ "tbz x0, #2, 69f\n"
+ "ld1 { v27.s }[0], [x19], #0x4\n"
+ "tbz x0, #1, 68f\n"
+ "ld1 { v27.h }[2], [x19], #0x2\n"
+ "tbz x0, #0, 71f\n"
+ "ld1 { v27.b }[6], [x19]\n"
"b 71f\n"
"68:" // Oddments: Load (4, 0): Bit 2: Bit 1: Unset
- "tbz x1, #0, 71f\n"
- "ld1 { v27.b }[4], [x11]\n"
+ "tbz x0, #0, 71f\n"
+ "ld1 { v27.b }[4], [x19]\n"
"b 71f\n"
"69:" // Oddments: Load (4, 0): Bit 2: Unset
- "tbz x1, #1, 70f\n"
- "ld1 { v27.h }[0], [x11], #0x2\n"
- "tbz x1, #0, 71f\n"
- "ld1 { v27.b }[2], [x11]\n"
+ "tbz x0, #1, 70f\n"
+ "ld1 { v27.h }[0], [x19], #0x2\n"
+ "tbz x0, #0, 71f\n"
+ "ld1 { v27.b }[2], [x19]\n"
"b 71f\n"
"70:" // Oddments: Load (4, 0): Bit 2: Unset: Bit 1: Unset
- "tbz x1, #0, 71f\n"
- "ld1 { v27.b }[0], [x11]\n"
+ "tbz x0, #0, 71f\n"
+ "ld1 { v27.b }[0], [x19]\n"
"71:" // Oddments: Load (4, 0): Bit 2: End
"usubl v27.8h, v27.8b, v9.8b\n"
- "ldr x22, [x4, #0xc8]\n"
- "smlal v8.4s, v27.4h, v0.4h\n"
+ "ldr x28, [x20, #0xc8]\n"
+ "smlal v10.4s, v27.4h, v0.4h\n"
"smlal2 v7.4s, v27.8h, v0.8h\n"
- "add x22, x22, x0\n"
- "tbz x1, #2, 73f\n"
- "ld1 { v23.s }[0], [x22], #0x4\n"
- "tbz x1, #1, 72f\n"
- "ld1 { v23.h }[2], [x22], #0x2\n"
- "tbz x1, #0, 75f\n"
- "ld1 { v23.b }[6], [x22]\n"
+ "add x28, x28, x24\n"
+ "tbz x0, #2, 73f\n"
+ "ld1 { v23.s }[0], [x28], #0x4\n"
+ "tbz x0, #1, 72f\n"
+ "ld1 { v23.h }[2], [x28], #0x2\n"
+ "tbz x0, #0, 75f\n"
+ "ld1 { v23.b }[6], [x28]\n"
"b 75f\n"
"72:" // Oddments: Load (4, 1): Bit 2: Bit 1: Unset
- "tbz x1, #0, 75f\n"
- "ld1 { v23.b }[4], [x22]\n"
+ "tbz x0, #0, 75f\n"
+ "ld1 { v23.b }[4], [x28]\n"
"b 75f\n"
"73:" // Oddments: Load (4, 1): Bit 2: Unset
- "tbz x1, #1, 74f\n"
- "ld1 { v23.h }[0], [x22], #0x2\n"
- "tbz x1, #0, 75f\n"
- "ld1 { v23.b }[2], [x22]\n"
+ "tbz x0, #1, 74f\n"
+ "ld1 { v23.h }[0], [x28], #0x2\n"
+ "tbz x0, #0, 75f\n"
+ "ld1 { v23.b }[2], [x28]\n"
"b 75f\n"
"74:" // Oddments: Load (4, 1): Bit 2: Unset: Bit 1: Unset
- "tbz x1, #0, 75f\n"
- "ld1 { v23.b }[0], [x22]\n"
+ "tbz x0, #0, 75f\n"
+ "ld1 { v23.b }[0], [x28]\n"
"75:" // Oddments: Load (4, 1): Bit 2: End
- "ldr d1, [x3, #0x80]\n"
+ "ldr d1, [x23, #0x80]\n"
"usubl v23.8h, v23.8b, v9.8b\n"
- "usubl v1.8h, v1.8b, v15.8b\n"
- "ldr x9, [x4, #0xd0]\n"
- "smlal v17.4s, v23.4h, v0.4h\n"
- "smlal2 v21.4s, v23.8h, v0.8h\n"
- "add x9, x9, x0\n"
- "smlal v13.4s, v30.4h, v1.4h\n"
- "smlal2 v19.4s, v30.8h, v1.8h\n"
- "smlal v20.4s, v26.4h, v1.4h\n"
- "smlal2 v10.4s, v26.8h, v1.8h\n"
- "smlal v8.4s, v23.4h, v1.4h\n"
+ "usubl v1.8h, v1.8b, v14.8b\n"
+ "ldr x6, [x20, #0xd0]\n"
+ "smlal v6.4s, v23.4h, v0.4h\n"
+ "smlal2 v5.4s, v23.8h, v0.8h\n"
+ "add x6, x6, x24\n"
+ "smlal v15.4s, v30.4h, v1.4h\n"
+ "smlal2 v16.4s, v30.8h, v1.8h\n"
+ "smlal v17.4s, v26.4h, v1.4h\n"
+ "smlal2 v8.4s, v26.8h, v1.8h\n"
+ "smlal v10.4s, v23.4h, v1.4h\n"
"smlal2 v7.4s, v23.8h, v1.8h\n"
- "tbz x1, #2, 77f\n"
- "ld1 { v31.s }[0], [x9], #0x4\n"
- "tbz x1, #1, 76f\n"
- "ld1 { v31.h }[2], [x9], #0x2\n"
- "tbz x1, #0, 79f\n"
- "ld1 { v31.b }[6], [x9]\n"
+ "tbz x0, #2, 77f\n"
+ "ld1 { v31.s }[0], [x6], #0x4\n"
+ "tbz x0, #1, 76f\n"
+ "ld1 { v31.h }[2], [x6], #0x2\n"
+ "tbz x0, #0, 79f\n"
+ "ld1 { v31.b }[6], [x6]\n"
"b 79f\n"
"76:" // Oddments: Load (4, 2): Bit 2: Bit 1: Unset
- "tbz x1, #0, 79f\n"
- "ld1 { v31.b }[4], [x9]\n"
+ "tbz x0, #0, 79f\n"
+ "ld1 { v31.b }[4], [x6]\n"
"b 79f\n"
"77:" // Oddments: Load (4, 2): Bit 2: Unset
- "tbz x1, #1, 78f\n"
- "ld1 { v31.h }[0], [x9], #0x2\n"
- "tbz x1, #0, 79f\n"
- "ld1 { v31.b }[2], [x9]\n"
+ "tbz x0, #1, 78f\n"
+ "ld1 { v31.h }[0], [x6], #0x2\n"
+ "tbz x0, #0, 79f\n"
+ "ld1 { v31.b }[2], [x6]\n"
"b 79f\n"
"78:" // Oddments: Load (4, 2): Bit 2: Unset: Bit 1: Unset
- "tbz x1, #0, 79f\n"
- "ld1 { v31.b }[0], [x9]\n"
+ "tbz x0, #0, 79f\n"
+ "ld1 { v31.b }[0], [x6]\n"
"79:" // Oddments: Load (4, 2): Bit 2: End
- "ldr d2, [x3, #0x88]\n"
+ "ldr d2, [x23, #0x88]\n"
"usubl v31.8h, v31.8b, v9.8b\n"
- "usubl v2.8h, v2.8b, v15.8b\n"
- "ldr x28, [x4, #0xd8]\n"
- "smlal v17.4s, v31.4h, v1.4h\n"
- "smlal2 v21.4s, v31.8h, v1.8h\n"
- "add x28, x28, x0\n"
- "smlal v13.4s, v26.4h, v2.4h\n"
- "smlal2 v19.4s, v26.8h, v2.8h\n"
- "smlal v20.4s, v25.4h, v2.4h\n"
- "smlal2 v10.4s, v25.8h, v2.8h\n"
- "smlal v8.4s, v31.4h, v2.4h\n"
+ "usubl v2.8h, v2.8b, v14.8b\n"
+ "ldr x27, [x20, #0xd8]\n"
+ "smlal v6.4s, v31.4h, v1.4h\n"
+ "smlal2 v5.4s, v31.8h, v1.8h\n"
+ "add x27, x27, x24\n"
+ "smlal v15.4s, v26.4h, v2.4h\n"
+ "smlal2 v16.4s, v26.8h, v2.8h\n"
+ "smlal v17.4s, v25.4h, v2.4h\n"
+ "smlal2 v8.4s, v25.8h, v2.8h\n"
+ "smlal v10.4s, v31.4h, v2.4h\n"
"smlal2 v7.4s, v31.8h, v2.8h\n"
- "tbz x1, #2, 81f\n"
- "ld1 { v30.s }[0], [x28], #0x4\n"
- "tbz x1, #1, 80f\n"
- "ld1 { v30.h }[2], [x28], #0x2\n"
- "tbz x1, #0, 83f\n"
- "ld1 { v30.b }[6], [x28]\n"
+ "tbz x0, #2, 81f\n"
+ "ld1 { v30.s }[0], [x27], #0x4\n"
+ "tbz x0, #1, 80f\n"
+ "ld1 { v30.h }[2], [x27], #0x2\n"
+ "tbz x0, #0, 83f\n"
+ "ld1 { v30.b }[6], [x27]\n"
"b 83f\n"
"80:" // Oddments: Load (4, 3): Bit 2: Bit 1: Unset
- "tbz x1, #0, 83f\n"
- "ld1 { v30.b }[4], [x28]\n"
+ "tbz x0, #0, 83f\n"
+ "ld1 { v30.b }[4], [x27]\n"
"b 83f\n"
"81:" // Oddments: Load (4, 3): Bit 2: Unset
- "tbz x1, #1, 82f\n"
- "ld1 { v30.h }[0], [x28], #0x2\n"
- "tbz x1, #0, 83f\n"
- "ld1 { v30.b }[2], [x28]\n"
+ "tbz x0, #1, 82f\n"
+ "ld1 { v30.h }[0], [x27], #0x2\n"
+ "tbz x0, #0, 83f\n"
+ "ld1 { v30.b }[2], [x27]\n"
"b 83f\n"
"82:" // Oddments: Load (4, 3): Bit 2: Unset: Bit 1: Unset
- "tbz x1, #0, 83f\n"
- "ld1 { v30.b }[0], [x28]\n"
+ "tbz x0, #0, 83f\n"
+ "ld1 { v30.b }[0], [x27]\n"
"83:" // Oddments: Load (4, 3): Bit 2: End
- "ldr d3, [x3, #0x90]\n"
+ "ldr d3, [x23, #0x90]\n"
"usubl v30.8h, v30.8b, v9.8b\n"
- "usubl v3.8h, v3.8b, v15.8b\n"
- "ldr x27, [x4, #0xe0]\n"
- "smlal v17.4s, v30.4h, v2.4h\n"
- "smlal2 v21.4s, v30.8h, v2.8h\n"
- "add x27, x27, x0\n"
- "smlal v13.4s, v25.4h, v3.4h\n"
- "smlal2 v19.4s, v25.8h, v3.8h\n"
- "smlal v20.4s, v24.4h, v3.4h\n"
- "smlal2 v10.4s, v24.8h, v3.8h\n"
- "smlal v8.4s, v30.4h, v3.4h\n"
+ "usubl v3.8h, v3.8b, v14.8b\n"
+ "ldr x11, [x20, #0xe0]\n"
+ "smlal v6.4s, v30.4h, v2.4h\n"
+ "smlal2 v5.4s, v30.8h, v2.8h\n"
+ "add x11, x11, x24\n"
+ "smlal v15.4s, v25.4h, v3.4h\n"
+ "smlal2 v16.4s, v25.8h, v3.8h\n"
+ "smlal v17.4s, v24.4h, v3.4h\n"
+ "smlal2 v8.4s, v24.8h, v3.8h\n"
+ "smlal v10.4s, v30.4h, v3.4h\n"
"smlal2 v7.4s, v30.8h, v3.8h\n"
- "tbz x1, #2, 85f\n"
- "ld1 { v28.s }[0], [x27], #0x4\n"
- "tbz x1, #1, 84f\n"
- "ld1 { v28.h }[2], [x27], #0x2\n"
- "tbz x1, #0, 87f\n"
- "ld1 { v28.b }[6], [x27]\n"
+ "tbz x0, #2, 85f\n"
+ "ld1 { v28.s }[0], [x11], #0x4\n"
+ "tbz x0, #1, 84f\n"
+ "ld1 { v28.h }[2], [x11], #0x2\n"
+ "tbz x0, #0, 87f\n"
+ "ld1 { v28.b }[6], [x11]\n"
"b 87f\n"
"84:" // Oddments: Load (4, 4): Bit 2: Bit 1: Unset
- "tbz x1, #0, 87f\n"
- "ld1 { v28.b }[4], [x27]\n"
+ "tbz x0, #0, 87f\n"
+ "ld1 { v28.b }[4], [x11]\n"
"b 87f\n"
"85:" // Oddments: Load (4, 4): Bit 2: Unset
- "tbz x1, #1, 86f\n"
- "ld1 { v28.h }[0], [x27], #0x2\n"
- "tbz x1, #0, 87f\n"
- "ld1 { v28.b }[2], [x27]\n"
+ "tbz x0, #1, 86f\n"
+ "ld1 { v28.h }[0], [x11], #0x2\n"
+ "tbz x0, #0, 87f\n"
+ "ld1 { v28.b }[2], [x11]\n"
"b 87f\n"
"86:" // Oddments: Load (4, 4): Bit 2: Unset: Bit 1: Unset
- "tbz x1, #0, 87f\n"
- "ld1 { v28.b }[0], [x27]\n"
+ "tbz x0, #0, 87f\n"
+ "ld1 { v28.b }[0], [x11]\n"
"87:" // Oddments: Load (4, 4): Bit 2: End
- "ldr d4, [x3, #0x98]\n"
+ "ldr d4, [x23, #0x98]\n"
"usubl v28.8h, v28.8b, v9.8b\n"
- "usubl v4.8h, v4.8b, v15.8b\n"
- "ldr x26, [x4, #0xe8]\n"
- "smlal v17.4s, v28.4h, v3.4h\n"
- "smlal2 v21.4s, v28.8h, v3.8h\n"
- "add x26, x26, x0\n"
- "smlal v13.4s, v24.4h, v4.4h\n"
- "smlal2 v19.4s, v24.8h, v4.8h\n"
- "smlal v20.4s, v22.4h, v4.4h\n"
- "smlal2 v10.4s, v22.8h, v4.8h\n"
- "smlal v8.4s, v28.4h, v4.4h\n"
+ "usubl v4.8h, v4.8b, v14.8b\n"
+ "ldr x17, [x20, #0xe8]\n"
+ "smlal v6.4s, v28.4h, v3.4h\n"
+ "smlal2 v5.4s, v28.8h, v3.8h\n"
+ "add x17, x17, x24\n"
+ "smlal v15.4s, v24.4h, v4.4h\n"
+ "smlal2 v16.4s, v24.8h, v4.8h\n"
+ "smlal v17.4s, v22.4h, v4.4h\n"
+ "smlal2 v8.4s, v22.8h, v4.8h\n"
+ "smlal v10.4s, v28.4h, v4.4h\n"
"smlal2 v7.4s, v28.8h, v4.8h\n"
- "tbz x1, #2, 89f\n"
- "ld1 { v26.s }[0], [x26], #0x4\n"
- "tbz x1, #1, 88f\n"
- "ld1 { v26.h }[2], [x26], #0x2\n"
- "tbz x1, #0, 91f\n"
- "ld1 { v26.b }[6], [x26]\n"
+ "tbz x0, #2, 89f\n"
+ "ld1 { v26.s }[0], [x17], #0x4\n"
+ "tbz x0, #1, 88f\n"
+ "ld1 { v26.h }[2], [x17], #0x2\n"
+ "tbz x0, #0, 91f\n"
+ "ld1 { v26.b }[6], [x17]\n"
"b 91f\n"
"88:" // Oddments: Load (4, 5): Bit 2: Bit 1: Unset
- "tbz x1, #0, 91f\n"
- "ld1 { v26.b }[4], [x26]\n"
+ "tbz x0, #0, 91f\n"
+ "ld1 { v26.b }[4], [x17]\n"
"b 91f\n"
"89:" // Oddments: Load (4, 5): Bit 2: Unset
- "tbz x1, #1, 90f\n"
- "ld1 { v26.h }[0], [x26], #0x2\n"
- "tbz x1, #0, 91f\n"
- "ld1 { v26.b }[2], [x26]\n"
+ "tbz x0, #1, 90f\n"
+ "ld1 { v26.h }[0], [x17], #0x2\n"
+ "tbz x0, #0, 91f\n"
+ "ld1 { v26.b }[2], [x17]\n"
"b 91f\n"
"90:" // Oddments: Load (4, 5): Bit 2: Unset: Bit 1: Unset
- "tbz x1, #0, 91f\n"
- "ld1 { v26.b }[0], [x26]\n"
+ "tbz x0, #0, 91f\n"
+ "ld1 { v26.b }[0], [x17]\n"
"91:" // Oddments: Load (4, 5): Bit 2: End
- "ldr d0, [x3, #0xa0]\n"
+ "ldr d0, [x23, #0xa0]\n"
"usubl v26.8h, v26.8b, v9.8b\n"
- "usubl v0.8h, v0.8b, v15.8b\n"
- "ldr x25, [x4, #0xf0]\n"
- "smlal v17.4s, v26.4h, v4.4h\n"
- "smlal2 v21.4s, v26.8h, v4.8h\n"
- "add x25, x25, x0\n"
- "smlal v13.4s, v27.4h, v0.4h\n"
- "smlal2 v19.4s, v27.8h, v0.8h\n"
- "smlal v20.4s, v23.4h, v0.4h\n"
- "smlal2 v10.4s, v23.8h, v0.8h\n"
- "tbz x1, #2, 93f\n"
- "ld1 { v25.s }[0], [x25], #0x4\n"
- "tbz x1, #1, 92f\n"
- "ld1 { v25.h }[2], [x25], #0x2\n"
- "tbz x1, #0, 95f\n"
- "ld1 { v25.b }[6], [x25]\n"
+ "usubl v0.8h, v0.8b, v14.8b\n"
+ "ldr x5, [x20, #0xf0]\n"
+ "smlal v6.4s, v26.4h, v4.4h\n"
+ "smlal2 v5.4s, v26.8h, v4.8h\n"
+ "add x5, x5, x24\n"
+ "smlal v15.4s, v27.4h, v0.4h\n"
+ "smlal2 v16.4s, v27.8h, v0.8h\n"
+ "smlal v17.4s, v23.4h, v0.4h\n"
+ "smlal2 v8.4s, v23.8h, v0.8h\n"
+ "tbz x0, #2, 93f\n"
+ "ld1 { v25.s }[0], [x5], #0x4\n"
+ "tbz x0, #1, 92f\n"
+ "ld1 { v25.h }[2], [x5], #0x2\n"
+ "tbz x0, #0, 95f\n"
+ "ld1 { v25.b }[6], [x5]\n"
"b 95f\n"
"92:" // Oddments: Load (5, 0): Bit 2: Bit 1: Unset
- "tbz x1, #0, 95f\n"
- "ld1 { v25.b }[4], [x25]\n"
+ "tbz x0, #0, 95f\n"
+ "ld1 { v25.b }[4], [x5]\n"
"b 95f\n"
"93:" // Oddments: Load (5, 0): Bit 2: Unset
- "tbz x1, #1, 94f\n"
- "ld1 { v25.h }[0], [x25], #0x2\n"
- "tbz x1, #0, 95f\n"
- "ld1 { v25.b }[2], [x25]\n"
+ "tbz x0, #1, 94f\n"
+ "ld1 { v25.h }[0], [x5], #0x2\n"
+ "tbz x0, #0, 95f\n"
+ "ld1 { v25.b }[2], [x5]\n"
"b 95f\n"
"94:" // Oddments: Load (5, 0): Bit 2: Unset: Bit 1: Unset
- "tbz x1, #0, 95f\n"
- "ld1 { v25.b }[0], [x25]\n"
+ "tbz x0, #0, 95f\n"
+ "ld1 { v25.b }[0], [x5]\n"
"95:" // Oddments: Load (5, 0): Bit 2: End
"usubl v25.8h, v25.8b, v9.8b\n"
- "ldr x24, [x4, #0xf8]\n"
- "smlal v8.4s, v25.4h, v0.4h\n"
+ "ldr x25, [x20, #0xf8]\n"
+ "smlal v10.4s, v25.4h, v0.4h\n"
"smlal2 v7.4s, v25.8h, v0.8h\n"
- "add x24, x24, x0\n"
- "tbz x1, #2, 97f\n"
- "ld1 { v24.s }[0], [x24], #0x4\n"
- "tbz x1, #1, 96f\n"
- "ld1 { v24.h }[2], [x24], #0x2\n"
- "tbz x1, #0, 99f\n"
- "ld1 { v24.b }[6], [x24]\n"
+ "add x25, x25, x24\n"
+ "tbz x0, #2, 97f\n"
+ "ld1 { v24.s }[0], [x25], #0x4\n"
+ "tbz x0, #1, 96f\n"
+ "ld1 { v24.h }[2], [x25], #0x2\n"
+ "tbz x0, #0, 99f\n"
+ "ld1 { v24.b }[6], [x25]\n"
"b 99f\n"
"96:" // Oddments: Load (5, 1): Bit 2: Bit 1: Unset
- "tbz x1, #0, 99f\n"
- "ld1 { v24.b }[4], [x24]\n"
+ "tbz x0, #0, 99f\n"
+ "ld1 { v24.b }[4], [x25]\n"
"b 99f\n"
"97:" // Oddments: Load (5, 1): Bit 2: Unset
- "tbz x1, #1, 98f\n"
- "ld1 { v24.h }[0], [x24], #0x2\n"
- "tbz x1, #0, 99f\n"
- "ld1 { v24.b }[2], [x24]\n"
+ "tbz x0, #1, 98f\n"
+ "ld1 { v24.h }[0], [x25], #0x2\n"
+ "tbz x0, #0, 99f\n"
+ "ld1 { v24.b }[2], [x25]\n"
"b 99f\n"
"98:" // Oddments: Load (5, 1): Bit 2: Unset: Bit 1: Unset
- "tbz x1, #0, 99f\n"
- "ld1 { v24.b }[0], [x24]\n"
+ "tbz x0, #0, 99f\n"
+ "ld1 { v24.b }[0], [x25]\n"
"99:" // Oddments: Load (5, 1): Bit 2: End
- "ldr d1, [x3, #0xa8]\n"
+ "ldr d1, [x23, #0xa8]\n"
"usubl v24.8h, v24.8b, v9.8b\n"
- "usubl v1.8h, v1.8b, v15.8b\n"
- "ldr x23, [x4, #0x100]\n"
- "smlal v17.4s, v24.4h, v0.4h\n"
- "smlal2 v21.4s, v24.8h, v0.8h\n"
- "add x23, x23, x0\n"
- "smlal v13.4s, v23.4h, v1.4h\n"
- "smlal2 v19.4s, v23.8h, v1.8h\n"
- "smlal v20.4s, v31.4h, v1.4h\n"
- "smlal2 v10.4s, v31.8h, v1.8h\n"
- "smlal v8.4s, v24.4h, v1.4h\n"
+ "usubl v1.8h, v1.8b, v14.8b\n"
+ "ldr x26, [x20, #0x100]\n"
+ "smlal v6.4s, v24.4h, v0.4h\n"
+ "smlal2 v5.4s, v24.8h, v0.8h\n"
+ "add x26, x26, x24\n"
+ "smlal v15.4s, v23.4h, v1.4h\n"
+ "smlal2 v16.4s, v23.8h, v1.8h\n"
+ "smlal v17.4s, v31.4h, v1.4h\n"
+ "smlal2 v8.4s, v31.8h, v1.8h\n"
+ "smlal v10.4s, v24.4h, v1.4h\n"
"smlal2 v7.4s, v24.8h, v1.8h\n"
- "tbz x1, #2, 101f\n"
- "ld1 { v27.s }[0], [x23], #0x4\n"
- "tbz x1, #1, 100f\n"
- "ld1 { v27.h }[2], [x23], #0x2\n"
- "tbz x1, #0, 103f\n"
- "ld1 { v27.b }[6], [x23]\n"
+ "tbz x0, #2, 101f\n"
+ "ld1 { v27.s }[0], [x26], #0x4\n"
+ "tbz x0, #1, 100f\n"
+ "ld1 { v27.h }[2], [x26], #0x2\n"
+ "tbz x0, #0, 103f\n"
+ "ld1 { v27.b }[6], [x26]\n"
"b 103f\n"
"100:" // Oddments: Load (5, 2): Bit 2: Bit 1: Unset
- "tbz x1, #0, 103f\n"
- "ld1 { v27.b }[4], [x23]\n"
+ "tbz x0, #0, 103f\n"
+ "ld1 { v27.b }[4], [x26]\n"
"b 103f\n"
"101:" // Oddments: Load (5, 2): Bit 2: Unset
- "tbz x1, #1, 102f\n"
- "ld1 { v27.h }[0], [x23], #0x2\n"
- "tbz x1, #0, 103f\n"
- "ld1 { v27.b }[2], [x23]\n"
+ "tbz x0, #1, 102f\n"
+ "ld1 { v27.h }[0], [x26], #0x2\n"
+ "tbz x0, #0, 103f\n"
+ "ld1 { v27.b }[2], [x26]\n"
"b 103f\n"
"102:" // Oddments: Load (5, 2): Bit 2: Unset: Bit 1: Unset
- "tbz x1, #0, 103f\n"
- "ld1 { v27.b }[0], [x23]\n"
+ "tbz x0, #0, 103f\n"
+ "ld1 { v27.b }[0], [x26]\n"
"103:" // Oddments: Load (5, 2): Bit 2: End
- "ldr d2, [x3, #0xb0]\n"
+ "ldr d2, [x23, #0xb0]\n"
"usubl v27.8h, v27.8b, v9.8b\n"
- "usubl v2.8h, v2.8b, v15.8b\n"
- "ldr x15, [x4, #0x108]\n"
- "smlal v17.4s, v27.4h, v1.4h\n"
- "smlal2 v21.4s, v27.8h, v1.8h\n"
- "add x15, x15, x0\n"
- "smlal v13.4s, v31.4h, v2.4h\n"
- "smlal2 v19.4s, v31.8h, v2.8h\n"
- "smlal v20.4s, v30.4h, v2.4h\n"
- "smlal2 v10.4s, v30.8h, v2.8h\n"
- "smlal v8.4s, v27.4h, v2.4h\n"
+ "usubl v2.8h, v2.8b, v14.8b\n"
+ "ldr x12, [x20, #0x108]\n"
+ "smlal v6.4s, v27.4h, v1.4h\n"
+ "smlal2 v5.4s, v27.8h, v1.8h\n"
+ "add x12, x12, x24\n"
+ "smlal v15.4s, v31.4h, v2.4h\n"
+ "smlal2 v16.4s, v31.8h, v2.8h\n"
+ "smlal v17.4s, v30.4h, v2.4h\n"
+ "smlal2 v8.4s, v30.8h, v2.8h\n"
+ "smlal v10.4s, v27.4h, v2.4h\n"
"smlal2 v7.4s, v27.8h, v2.8h\n"
- "tbz x1, #2, 105f\n"
- "ld1 { v25.s }[0], [x15], #0x4\n"
- "tbz x1, #1, 104f\n"
- "ld1 { v25.h }[2], [x15], #0x2\n"
- "tbz x1, #0, 107f\n"
- "ld1 { v25.b }[6], [x15]\n"
+ "tbz x0, #2, 105f\n"
+ "ld1 { v25.s }[0], [x12], #0x4\n"
+ "tbz x0, #1, 104f\n"
+ "ld1 { v25.h }[2], [x12], #0x2\n"
+ "tbz x0, #0, 107f\n"
+ "ld1 { v25.b }[6], [x12]\n"
"b 107f\n"
"104:" // Oddments: Load (5, 3): Bit 2: Bit 1: Unset
- "tbz x1, #0, 107f\n"
- "ld1 { v25.b }[4], [x15]\n"
+ "tbz x0, #0, 107f\n"
+ "ld1 { v25.b }[4], [x12]\n"
"b 107f\n"
"105:" // Oddments: Load (5, 3): Bit 2: Unset
- "tbz x1, #1, 106f\n"
- "ld1 { v25.h }[0], [x15], #0x2\n"
- "tbz x1, #0, 107f\n"
- "ld1 { v25.b }[2], [x15]\n"
+ "tbz x0, #1, 106f\n"
+ "ld1 { v25.h }[0], [x12], #0x2\n"
+ "tbz x0, #0, 107f\n"
+ "ld1 { v25.b }[2], [x12]\n"
"b 107f\n"
"106:" // Oddments: Load (5, 3): Bit 2: Unset: Bit 1: Unset
- "tbz x1, #0, 107f\n"
- "ld1 { v25.b }[0], [x15]\n"
+ "tbz x0, #0, 107f\n"
+ "ld1 { v25.b }[0], [x12]\n"
"107:" // Oddments: Load (5, 3): Bit 2: End
- "ldr d3, [x3, #0xb8]\n"
+ "ldr d3, [x23, #0xb8]\n"
"usubl v25.8h, v25.8b, v9.8b\n"
- "usubl v3.8h, v3.8b, v15.8b\n"
- "ldr x21, [x4, #0x110]\n"
- "smlal v17.4s, v25.4h, v2.4h\n"
- "smlal2 v21.4s, v25.8h, v2.8h\n"
- "add x21, x21, x0\n"
- "smlal v13.4s, v30.4h, v3.4h\n"
- "smlal2 v19.4s, v30.8h, v3.8h\n"
- "smlal v20.4s, v28.4h, v3.4h\n"
- "smlal2 v10.4s, v28.8h, v3.8h\n"
- "smlal v8.4s, v25.4h, v3.4h\n"
+ "usubl v3.8h, v3.8b, v14.8b\n"
+ "ldr x14, [x20, #0x110]\n"
+ "smlal v6.4s, v25.4h, v2.4h\n"
+ "smlal2 v5.4s, v25.8h, v2.8h\n"
+ "add x14, x14, x24\n"
+ "smlal v15.4s, v30.4h, v3.4h\n"
+ "smlal2 v16.4s, v30.8h, v3.8h\n"
+ "smlal v17.4s, v28.4h, v3.4h\n"
+ "smlal2 v8.4s, v28.8h, v3.8h\n"
+ "smlal v10.4s, v25.4h, v3.4h\n"
"smlal2 v7.4s, v25.8h, v3.8h\n"
- "tbz x1, #2, 109f\n"
- "ld1 { v24.s }[0], [x21], #0x4\n"
- "tbz x1, #1, 108f\n"
- "ld1 { v24.h }[2], [x21], #0x2\n"
- "tbz x1, #0, 111f\n"
- "ld1 { v24.b }[6], [x21]\n"
+ "tbz x0, #2, 109f\n"
+ "ld1 { v24.s }[0], [x14], #0x4\n"
+ "tbz x0, #1, 108f\n"
+ "ld1 { v24.h }[2], [x14], #0x2\n"
+ "tbz x0, #0, 111f\n"
+ "ld1 { v24.b }[6], [x14]\n"
"b 111f\n"
"108:" // Oddments: Load (5, 4): Bit 2: Bit 1: Unset
- "tbz x1, #0, 111f\n"
- "ld1 { v24.b }[4], [x21]\n"
+ "tbz x0, #0, 111f\n"
+ "ld1 { v24.b }[4], [x14]\n"
"b 111f\n"
"109:" // Oddments: Load (5, 4): Bit 2: Unset
- "tbz x1, #1, 110f\n"
- "ld1 { v24.h }[0], [x21], #0x2\n"
- "tbz x1, #0, 111f\n"
- "ld1 { v24.b }[2], [x21]\n"
+ "tbz x0, #1, 110f\n"
+ "ld1 { v24.h }[0], [x14], #0x2\n"
+ "tbz x0, #0, 111f\n"
+ "ld1 { v24.b }[2], [x14]\n"
"b 111f\n"
"110:" // Oddments: Load (5, 4): Bit 2: Unset: Bit 1: Unset
- "tbz x1, #0, 111f\n"
- "ld1 { v24.b }[0], [x21]\n"
+ "tbz x0, #0, 111f\n"
+ "ld1 { v24.b }[0], [x14]\n"
"111:" // Oddments: Load (5, 4): Bit 2: End
- "ldr d4, [x3, #0xc0]\n"
+ "ldr d4, [x23, #0xc0]\n"
"usubl v24.8h, v24.8b, v9.8b\n"
- "usubl v4.8h, v4.8b, v15.8b\n"
- "ldr x20, [x4, #0x118]\n"
- "smlal v17.4s, v24.4h, v3.4h\n"
- "smlal2 v21.4s, v24.8h, v3.8h\n"
- "add x20, x20, x0\n"
- "smlal v13.4s, v28.4h, v4.4h\n"
- "smlal2 v19.4s, v28.8h, v4.8h\n"
- "smlal v20.4s, v26.4h, v4.4h\n"
- "smlal2 v10.4s, v26.8h, v4.8h\n"
- "smlal v8.4s, v24.4h, v4.4h\n"
+ "usubl v4.8h, v4.8b, v14.8b\n"
+ "ldr x21, [x20, #0x118]\n"
+ "smlal v6.4s, v24.4h, v3.4h\n"
+ "smlal2 v5.4s, v24.8h, v3.8h\n"
+ "add x21, x21, x24\n"
+ "smlal v15.4s, v28.4h, v4.4h\n"
+ "smlal2 v16.4s, v28.8h, v4.8h\n"
+ "smlal v17.4s, v26.4h, v4.4h\n"
+ "smlal2 v8.4s, v26.8h, v4.8h\n"
+ "smlal v10.4s, v24.4h, v4.4h\n"
"smlal2 v7.4s, v24.8h, v4.8h\n"
- "tbz x1, #2, 113f\n"
- "ld1 { v27.s }[0], [x20], #0x4\n"
- "tbz x1, #1, 112f\n"
- "ld1 { v27.h }[2], [x20], #0x2\n"
- "tbz x1, #0, 115f\n"
- "ld1 { v27.b }[6], [x20]\n"
+ "tbz x0, #2, 113f\n"
+ "ld1 { v27.s }[0], [x21], #0x4\n"
+ "tbz x0, #1, 112f\n"
+ "ld1 { v27.h }[2], [x21], #0x2\n"
+ "tbz x0, #0, 115f\n"
+ "ld1 { v27.b }[6], [x21]\n"
"b 115f\n"
"112:" // Oddments: Load (5, 5): Bit 2: Bit 1: Unset
- "tbz x1, #0, 115f\n"
- "ld1 { v27.b }[4], [x20]\n"
+ "tbz x0, #0, 115f\n"
+ "ld1 { v27.b }[4], [x21]\n"
"b 115f\n"
"113:" // Oddments: Load (5, 5): Bit 2: Unset
- "tbz x1, #1, 114f\n"
- "ld1 { v27.h }[0], [x20], #0x2\n"
- "tbz x1, #0, 115f\n"
- "ld1 { v27.b }[2], [x20]\n"
+ "tbz x0, #1, 114f\n"
+ "ld1 { v27.h }[0], [x21], #0x2\n"
+ "tbz x0, #0, 115f\n"
+ "ld1 { v27.b }[2], [x21]\n"
"b 115f\n"
"114:" // Oddments: Load (5, 5): Bit 2: Unset: Bit 1: Unset
- "tbz x1, #0, 115f\n"
- "ld1 { v27.b }[0], [x20]\n"
+ "tbz x0, #0, 115f\n"
+ "ld1 { v27.b }[0], [x21]\n"
"115:" // Oddments: Load (5, 5): Bit 2: End
"usubl v27.8h, v27.8b, v9.8b\n"
- "smlal v17.4s, v27.4h, v4.4h\n"
- "smlal2 v21.4s, v27.8h, v4.8h\n"
- "tbz x1, #2, 117f\n"
- "ld1 { v18.4s }, [x5], #0x10\n"
- "ld1 { v6.4s }, [x8], #0x10\n"
- "tbz x1, #1, 116f\n"
- "ld1 { v5.d }[0], [x5], #0x8\n"
- "ld1 { v22.d }[0], [x8], #0x8\n"
- "tbz x1, #0, 119f\n"
- "ld1 { v5.s }[2], [x5]\n"
- "ld1 { v22.s }[2], [x8]\n"
+ "smlal v6.4s, v27.4h, v4.4h\n"
+ "smlal2 v5.4s, v27.8h, v4.8h\n"
+ "tbz x0, #2, 117f\n"
+ "ld1 { v12.4s }, [x10], #0x10\n"
+ "ld1 { v19.4s }, [x1], #0x10\n"
+ "tbz x0, #1, 116f\n"
+ "ld1 { v20.d }[0], [x10], #0x8\n"
+ "ld1 { v29.d }[0], [x1], #0x8\n"
+ "tbz x0, #0, 119f\n"
+ "ld1 { v20.s }[2], [x10]\n"
+ "ld1 { v29.s }[2], [x1]\n"
"b 119f\n"
"116:" // Oddments: Load requant params: Bit 2: Bit 1: Unset
- "tbz x1, #0, 119f\n"
- "ld1 { v5.s }[0], [x5]\n"
- "ld1 { v22.s }[0], [x8]\n"
+ "tbz x0, #0, 119f\n"
+ "ld1 { v20.s }[0], [x10]\n"
+ "ld1 { v29.s }[0], [x1]\n"
"b 119f\n"
"117:" // Oddments: Load requant params: Bit 2: Unset
- "tbz x1, #1, 118f\n"
- "ld1 { v18.d }[0], [x5], #0x8\n"
- "ld1 { v6.d }[0], [x8], #0x8\n"
- "tbz x1, #0, 119f\n"
- "ld1 { v18.s }[2], [x5]\n"
- "ld1 { v6.s }[2], [x8]\n"
+ "tbz x0, #1, 118f\n"
+ "ld1 { v12.d }[0], [x10], #0x8\n"
+ "ld1 { v19.d }[0], [x1], #0x8\n"
+ "tbz x0, #0, 119f\n"
+ "ld1 { v12.s }[2], [x10]\n"
+ "ld1 { v19.s }[2], [x1]\n"
"b 119f\n"
"118:" // Oddments: Load requant params: Bit 2: Unset: Bit 1: Unset
- "tbz x1, #0, 119f\n"
- "ld1 { v18.s }[0], [x5]\n"
- "ld1 { v6.s }[0], [x8]\n"
+ "tbz x0, #0, 119f\n"
+ "ld1 { v12.s }[0], [x10]\n"
+ "ld1 { v19.s }[0], [x1]\n"
"119:" // Oddments: Load requant params: Bit 2: End
- "sqrdmulh v13.4s, v13.4s, v18.4s\n"
- "and v30.16b, v13.16b, v6.16b\n"
- "add x17, x17, x10\n"
- "add x6, x6, x10\n"
- "sqrdmulh v19.4s, v19.4s, v5.4s\n"
- "sshr v30.4s, v30.4s, #0x1f\n"
- "add x7, x7, x10\n"
- "add x16, x16, x10\n"
- "and v16.16b, v19.16b, v22.16b\n"
- "sqrdmulh v20.4s, v20.4s, v18.4s\n"
- "sqrdmulh v8.4s, v8.4s, v18.4s\n"
- "sqrdmulh v17.4s, v17.4s, v18.4s\n"
- "sqadd v13.4s, v13.4s, v30.4s\n"
- "sshr v16.4s, v16.4s, #0x1f\n"
- "and v0.16b, v20.16b, v6.16b\n"
- "sqrdmulh v10.4s, v10.4s, v5.4s\n"
- "and v18.16b, v8.16b, v6.16b\n"
- "sqrdmulh v7.4s, v7.4s, v5.4s\n"
- "and v30.16b, v17.16b, v6.16b\n"
- "sqrdmulh v21.4s, v21.4s, v5.4s\n"
- "sqadd v19.4s, v19.4s, v16.4s\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "and v26.16b, v10.16b, v22.16b\n"
- "sshr v18.4s, v18.4s, #0x1f\n"
- "and v23.16b, v7.16b, v22.16b\n"
- "sshr v30.4s, v30.4s, #0x1f\n"
- "and v16.16b, v21.16b, v22.16b\n"
- "sqadd v20.4s, v20.4s, v0.4s\n"
- "sshr v26.4s, v26.4s, #0x1f\n"
- "sqadd v8.4s, v8.4s, v18.4s\n"
+ "sqrdmulh v15.4s, v15.4s, v12.4s\n"
+ "sqrdmulh v17.4s, v17.4s, v12.4s\n"
+ "add x16, x16, x22\n"
+ "add x8, x8, x22\n"
+ "sqrdmulh v10.4s, v10.4s, v12.4s\n"
+ "sqrdmulh v6.4s, v6.4s, v12.4s\n"
+ "add x4, x4, x22\n"
+ "add x7, x7, x22\n"
+ "and v23.16b, v15.16b, v19.16b\n"
+ "sqrdmulh v16.4s, v16.4s, v20.4s\n"
+ "and v22.16b, v17.16b, v19.16b\n"
+ "sqrdmulh v8.4s, v8.4s, v20.4s\n"
+ "and v21.16b, v10.16b, v19.16b\n"
+ "sqrdmulh v7.4s, v7.4s, v20.4s\n"
+ "and v26.16b, v6.16b, v19.16b\n"
+ "sqrdmulh v5.4s, v5.4s, v20.4s\n"
"sshr v23.4s, v23.4s, #0x1f\n"
- "sqadd v17.4s, v17.4s, v30.4s\n"
- "sshr v16.4s, v16.4s, #0x1f\n"
- "srshl v13.4s, v13.4s, v6.4s\n"
- "srshl v20.4s, v20.4s, v6.4s\n"
- "sqadd v10.4s, v10.4s, v26.4s\n"
- "srshl v8.4s, v8.4s, v6.4s\n"
- "sqadd v7.4s, v7.4s, v23.4s\n"
- "srshl v17.4s, v17.4s, v6.4s\n"
- "sqadd v21.4s, v21.4s, v16.4s\n"
- "srshl v19.4s, v19.4s, v22.4s\n"
- "sqxtn v13.4h, v13.4s\n"
- "srshl v10.4s, v10.4s, v22.4s\n"
- "sqxtn v20.4h, v20.4s\n"
- "srshl v7.4s, v7.4s, v22.4s\n"
- "sqxtn v8.4h, v8.4s\n"
- "srshl v21.4s, v21.4s, v22.4s\n"
+ "and v4.16b, v16.16b, v29.16b\n"
+ "sshr v22.4s, v22.4s, #0x1f\n"
+ "and v2.16b, v8.16b, v29.16b\n"
+ "sshr v21.4s, v21.4s, #0x1f\n"
+ "and v3.16b, v7.16b, v29.16b\n"
+ "sshr v26.4s, v26.4s, #0x1f\n"
+ "and v25.16b, v5.16b, v29.16b\n"
+ "sqadd v15.4s, v15.4s, v23.4s\n"
+ "sshr v4.4s, v4.4s, #0x1f\n"
+ "sqadd v17.4s, v17.4s, v22.4s\n"
+ "sshr v2.4s, v2.4s, #0x1f\n"
+ "sqadd v10.4s, v10.4s, v21.4s\n"
+ "sshr v3.4s, v3.4s, #0x1f\n"
+ "sqadd v6.4s, v6.4s, v26.4s\n"
+ "sshr v25.4s, v25.4s, #0x1f\n"
+ "srshl v15.4s, v15.4s, v19.4s\n"
+ "sqadd v16.4s, v16.4s, v4.4s\n"
+ "srshl v17.4s, v17.4s, v19.4s\n"
+ "sqadd v8.4s, v8.4s, v2.4s\n"
+ "srshl v10.4s, v10.4s, v19.4s\n"
+ "sqadd v7.4s, v7.4s, v3.4s\n"
+ "srshl v6.4s, v6.4s, v19.4s\n"
+ "sqadd v5.4s, v5.4s, v25.4s\n"
+ "srshl v16.4s, v16.4s, v29.4s\n"
+ "sqxtn v15.4h, v15.4s\n"
+ "srshl v8.4s, v8.4s, v29.4s\n"
"sqxtn v17.4h, v17.4s\n"
- "sqxtn2 v13.8h, v19.4s\n"
- "sqxtn2 v20.8h, v10.4s\n"
- "sqxtn2 v8.8h, v7.4s\n"
- "sqxtn2 v17.8h, v21.4s\n"
- "sqadd v13.8h, v13.8h, v14.8h\n"
- "sqadd v20.8h, v20.8h, v14.8h\n"
- "sqadd v8.8h, v8.8h, v14.8h\n"
- "sqadd v17.8h, v17.8h, v14.8h\n"
- "smax v13.8h, v13.8h, v12.8h\n"
- "smax v20.8h, v20.8h, v12.8h\n"
- "smax v8.8h, v8.8h, v12.8h\n"
- "smax v17.8h, v17.8h, v12.8h\n"
- "smin v13.8h, v13.8h, v11.8h\n"
- "smin v20.8h, v20.8h, v11.8h\n"
- "smin v8.8h, v8.8h, v11.8h\n"
- "smin v17.8h, v17.8h, v11.8h\n"
- "uzp1 v13.16b, v13.16b, v13.16b\n"
- "uzp1 v20.16b, v20.16b, v20.16b\n"
- "uzp1 v8.16b, v8.16b, v8.16b\n"
+ "srshl v7.4s, v7.4s, v29.4s\n"
+ "sqxtn v10.4h, v10.4s\n"
+ "srshl v5.4s, v5.4s, v29.4s\n"
+ "sqxtn v6.4h, v6.4s\n"
+ "sqxtn2 v15.8h, v16.4s\n"
+ "sqxtn2 v17.8h, v8.4s\n"
+ "sqxtn2 v10.8h, v7.4s\n"
+ "sqxtn2 v6.8h, v5.4s\n"
+ "sqadd v15.8h, v15.8h, v18.8h\n"
+ "sqadd v17.8h, v17.8h, v18.8h\n"
+ "sqadd v10.8h, v10.8h, v18.8h\n"
+ "sqadd v6.8h, v6.8h, v18.8h\n"
+ "smax v15.8h, v15.8h, v11.8h\n"
+ "smax v17.8h, v17.8h, v11.8h\n"
+ "smax v10.8h, v10.8h, v11.8h\n"
+ "smax v6.8h, v6.8h, v11.8h\n"
+ "smin v15.8h, v15.8h, v13.8h\n"
+ "smin v17.8h, v17.8h, v13.8h\n"
+ "smin v10.8h, v10.8h, v13.8h\n"
+ "smin v6.8h, v6.8h, v13.8h\n"
+ "uzp1 v15.16b, v15.16b, v15.16b\n"
"uzp1 v17.16b, v17.16b, v17.16b\n"
- "tbz x1, #2, 121f\n"
- "st1 { v13.s }[0], [x17], #0x4\n"
- "st1 { v20.s }[0], [x6], #0x4\n"
- "st1 { v8.s }[0], [x7], #0x4\n"
- "st1 { v17.s }[0], [x16], #0x4\n"
- "tbz x1, #1, 120f\n"
- "st1 { v13.h }[2], [x17], #0x2\n"
- "st1 { v20.h }[2], [x6], #0x2\n"
- "st1 { v8.h }[2], [x7], #0x2\n"
- "st1 { v17.h }[2], [x16], #0x2\n"
- "tbz x1, #0, 123f\n"
- "st1 { v13.b }[6], [x17], #0x1\n"
- "st1 { v20.b }[6], [x6], #0x1\n"
- "st1 { v8.b }[6], [x7], #0x1\n"
- "st1 { v17.b }[6], [x16], #0x1\n"
+ "uzp1 v10.16b, v10.16b, v10.16b\n"
+ "uzp1 v6.16b, v6.16b, v6.16b\n"
+ "tbz x0, #2, 121f\n"
+ "st1 { v15.s }[0], [x16], #0x4\n"
+ "st1 { v17.s }[0], [x8], #0x4\n"
+ "st1 { v10.s }[0], [x4], #0x4\n"
+ "st1 { v6.s }[0], [x7], #0x4\n"
+ "tbz x0, #1, 120f\n"
+ "st1 { v15.h }[2], [x16], #0x2\n"
+ "st1 { v17.h }[2], [x8], #0x2\n"
+ "st1 { v10.h }[2], [x4], #0x2\n"
+ "st1 { v6.h }[2], [x7], #0x2\n"
+ "tbz x0, #0, 123f\n"
+ "st1 { v15.b }[6], [x16], #0x1\n"
+ "st1 { v17.b }[6], [x8], #0x1\n"
+ "st1 { v10.b }[6], [x4], #0x1\n"
+ "st1 { v6.b }[6], [x7], #0x1\n"
"b 123f\n"
"120:" // Oddments: Bit 2: Bit 1: Unset
- "tbz x1, #0, 123f\n"
- "st1 { v13.b }[4], [x17], #0x1\n"
- "st1 { v20.b }[4], [x6], #0x1\n"
- "st1 { v8.b }[4], [x7], #0x1\n"
- "st1 { v17.b }[4], [x16], #0x1\n"
+ "tbz x0, #0, 123f\n"
+ "st1 { v15.b }[4], [x16], #0x1\n"
+ "st1 { v17.b }[4], [x8], #0x1\n"
+ "st1 { v10.b }[4], [x4], #0x1\n"
+ "st1 { v6.b }[4], [x7], #0x1\n"
"b 123f\n"
"121:" // Oddments: Bit 2: Unset
- "tbz x1, #1, 122f\n"
- "st1 { v13.h }[0], [x17], #0x2\n"
- "st1 { v20.h }[0], [x6], #0x2\n"
- "st1 { v8.h }[0], [x7], #0x2\n"
- "st1 { v17.h }[0], [x16], #0x2\n"
- "tbz x1, #0, 123f\n"
- "st1 { v13.b }[2], [x17], #0x1\n"
- "st1 { v20.b }[2], [x6], #0x1\n"
- "st1 { v8.b }[2], [x7], #0x1\n"
- "st1 { v17.b }[2], [x16], #0x1\n"
+ "tbz x0, #1, 122f\n"
+ "st1 { v15.h }[0], [x16], #0x2\n"
+ "st1 { v17.h }[0], [x8], #0x2\n"
+ "st1 { v10.h }[0], [x4], #0x2\n"
+ "st1 { v6.h }[0], [x7], #0x2\n"
+ "tbz x0, #0, 123f\n"
+ "st1 { v15.b }[2], [x16], #0x1\n"
+ "st1 { v17.b }[2], [x8], #0x1\n"
+ "st1 { v10.b }[2], [x4], #0x1\n"
+ "st1 { v6.b }[2], [x7], #0x1\n"
"b 123f\n"
"122:" // Oddments: Bit 2: Unset: Bit 1: Unset
- "tbz x1, #0, 123f\n"
- "st1 { v13.b }[0], [x17], #0x1\n"
- "st1 { v20.b }[0], [x6], #0x1\n"
- "st1 { v8.b }[0], [x7], #0x1\n"
- "st1 { v17.b }[0], [x16], #0x1\n"
+ "tbz x0, #0, 123f\n"
+ "st1 { v15.b }[0], [x16], #0x1\n"
+ "st1 { v17.b }[0], [x8], #0x1\n"
+ "st1 { v10.b }[0], [x4], #0x1\n"
+ "st1 { v6.b }[0], [x7], #0x1\n"
"123:" // Oddments: Bit 2: End
"124:" // End
:
: [offsetof_Params_bias] "I" (offsetof(Params, bias)), [offsetof_Params_inptrs] "I" (offsetof(Params, inptrs)), [offsetof_Params_n_channels] "I" (offsetof(Params, n_channels)), [offsetof_Params_outptrs] "I" (offsetof(Params, outptrs)), [offsetof_Params_requant] "I" (offsetof(Params, requant)), [offsetof_Params_requant_muls] "I" (offsetof(Params, requant_muls)), [offsetof_Params_requant_shifts] "I" (offsetof(Params, requant_shifts)), [offsetof_Params_weights] "I" (offsetof(Params, weights)), [offsetof_Requantize32_a_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [params] "r" (&params)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_generic_output9_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_generic_output9_mla_depthfirst/generic.cpp
index 39001aa1fd..9cebfe8f03 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_generic_output9_mla_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_generic_output9_mla_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -41,577 +41,583 @@ void a64_u8q_nhwc_generic_output9_mla_depthfirst_impl(
)
{
__asm__ __volatile__(
- "lsr x12, %x[n_channels], #0x2\n"
- "add x20, %x[qp], %[offsetof_Requantize32_minval]\n"
- "ld1r { v8.4s }, [x20]\n"
+ "add x19, %x[qp], %[offsetof_Requantize32_minval]\n"
+ "ld1r { v12.4s }, [x19]\n"
"add x20, %x[qp], %[offsetof_Requantize32_maxval]\n"
- "ld1r { v7.4s }, [x20]\n"
- "add x20, %x[qp], %[offsetof_Requantize32_a_offset]\n"
- "ld1r { v6.16b }, [x20]\n"
+ "add x19, %x[qp], %[offsetof_Requantize32_a_offset]\n"
+ "ld1r { v11.4s }, [x20]\n"
+ "ld1r { v10.16b }, [x19]\n"
"add x20, %x[qp], %[offsetof_Requantize32_b_offset]\n"
- "ld1r { v5.16b }, [x20]\n"
- "add x20, %x[qp], %[offsetof_Requantize32_c_offset]\n"
- "ld1r { v4.4s }, [x20]\n"
+ "add x19, %x[qp], %[offsetof_Requantize32_c_offset]\n"
+ "ld1r { v9.16b }, [x20]\n"
+ "ld1r { v8.4s }, [x19]\n"
"add x20, %x[qp], %[offsetof_Requantize32_per_layer_left_shift]\n"
- "ld1r { v3.4s }, [x20]\n"
- "add x20, %x[qp], %[offsetof_Requantize32_per_layer_mul]\n"
- "ld1r { v2.4s }, [x20]\n"
- "add x20, %x[qp], %[offsetof_Requantize32_per_layer_right_shift]\n"
- "ld1r { v1.4s }, [x20]\n"
+ "add x19, %x[qp], %[offsetof_Requantize32_per_layer_mul]\n"
+ "ld1r { v7.4s }, [x20]\n"
+ "ld1r { v6.4s }, [x19]\n"
+ "add x19, %x[qp], %[offsetof_Requantize32_per_layer_right_shift]\n"
"mov x11, #0x0\n"
- "cbz x12, 6f\n"
+ "ld1r { v5.4s }, [x19]\n"
+ "lsr x10, %x[n_channels], #0x2\n"
+ "cbz x10, 6f\n"
"1:" // Channel loop
- "movi v23.4s, #0x0\n"
+ "movi v27.4s, #0x0\n"
"cbz %x[bias], 2f\n"
- "lsl x20, x11, #0x2\n"
- "ldr q23, [%x[bias], x20]\n"
+ "lsl x19, x11, #0x2\n"
+ "ldr q27, [%x[bias], x19]\n"
"2:" // Channel loop: Load bias: Done
- "ldr s0, [%x[params]], #0x4\n"
- "mov x21, %x[inptrs]\n"
- "ldp x10, x9, [x21], #0x10\n"
- "subs x20, %x[n_points], #0x1\n"
- "ldr s14, [x10, x11]\n"
- "ldr s15, [x9, x11]\n"
- "mov v24.16b, v23.16b\n"
- "mov v25.16b, v23.16b\n"
- "ldp x28, x27, [x21], #0x10\n"
- "ldr s16, [x28, x11]\n"
- "mov v26.16b, v23.16b\n"
- "mov v27.16b, v23.16b\n"
- "ldr s17, [x27, x11]\n"
- "ldp x26, x25, [x21], #0x10\n"
- "mov v28.16b, v23.16b\n"
- "mov v29.16b, v23.16b\n"
- "ldr s18, [x26, x11]\n"
- "ldr s19, [x25, x11]\n"
- "mov v30.16b, v23.16b\n"
- "mov v31.16b, v23.16b\n"
- "ldp x24, x23, [x21], #0x10\n"
- "ldr s20, [x24, x11]\n"
- "usubl v0.8h, v0.8b, v5.8b\n"
- "usubl v14.8h, v14.8b, v6.8b\n"
- "ldr s21, [x23, x11]\n"
- "ldr x22, [x21], #0x8\n"
- "usubl v15.8h, v15.8b, v6.8b\n"
- "usubl v16.8h, v16.8b, v6.8b\n"
- "ldr s22, [x22, x11]\n"
- "usubl v17.8h, v17.8b, v6.8b\n"
- "usubl v18.8h, v18.8b, v6.8b\n"
- "usubl v19.8h, v19.8b, v6.8b\n"
- "usubl v20.8h, v20.8b, v6.8b\n"
- "usubl v21.8h, v21.8b, v6.8b\n"
- "usubl v22.8h, v22.8b, v6.8b\n"
+ "mov v26.16b, v27.16b\n"
+ "ldr s16, [%x[params]], #0x4\n"
+ "mov x20, %x[inptrs]\n"
+ "mov v25.16b, v27.16b\n"
+ "ldp x9, x28, [x20], #0x10\n"
+ "subs x19, %x[n_points], #0x1\n"
+ "mov v24.16b, v27.16b\n"
+ "ldr s4, [x9, x11]\n"
+ "mov v23.16b, v27.16b\n"
+ "mov v22.16b, v27.16b\n"
+ "ldr s3, [x28, x11]\n"
+ "mov v21.16b, v27.16b\n"
+ "ldp x27, x26, [x20], #0x10\n"
+ "mov v20.16b, v27.16b\n"
+ "ldr s2, [x27, x11]\n"
+ "mov v19.16b, v27.16b\n"
+ "usubl v16.8h, v16.8b, v9.8b\n"
+ "ldr s1, [x26, x11]\n"
+ "usubl v4.8h, v4.8b, v10.8b\n"
+ "ldp x25, x24, [x20], #0x10\n"
+ "usubl v3.8h, v3.8b, v10.8b\n"
+ "ldr s0, [x25, x11]\n"
+ "usubl v2.8h, v2.8b, v10.8b\n"
+ "usubl v1.8h, v1.8b, v10.8b\n"
+ "ldr s31, [x24, x11]\n"
+ "ldp x23, x22, [x20], #0x10\n"
+ "usubl v0.8h, v0.8b, v10.8b\n"
+ "ldr s30, [x23, x11]\n"
+ "ldr s29, [x22, x11]\n"
+ "usubl v31.8h, v31.8b, v10.8b\n"
+ "ldr x21, [x20], #0x8\n"
+ "usubl v30.8h, v30.8b, v10.8b\n"
+ "ldr s28, [x21, x11]\n"
+ "usubl v29.8h, v29.8b, v10.8b\n"
+ "usubl v28.8h, v28.8b, v10.8b\n"
"ble 4f\n"
"3:" // Channel loop: Planar loop
- "ldp x10, x9, [x21], #0x10\n"
- "ldp x28, x27, [x21], #0x10\n"
- "smlal v23.4s, v14.4h, v0.4h\n"
- "smlal v24.4s, v15.4h, v0.4h\n"
- "ldr s14, [x10, x11]\n"
- "ldr s15, [x9, x11]\n"
- "smlal v25.4s, v16.4h, v0.4h\n"
- "smlal v26.4s, v17.4h, v0.4h\n"
- "ldr s16, [x28, x11]\n"
- "ldr s17, [x27, x11]\n"
- "smlal v27.4s, v18.4h, v0.4h\n"
- "smlal v28.4s, v19.4h, v0.4h\n"
- "ldp x26, x25, [x21], #0x10\n"
- "ldr s18, [x26, x11]\n"
- "smlal v29.4s, v20.4h, v0.4h\n"
- "smlal v30.4s, v21.4h, v0.4h\n"
- "ldr s19, [x25, x11]\n"
- "ldp x24, x23, [x21], #0x10\n"
- "smlal v31.4s, v22.4h, v0.4h\n"
- "subs x20, x20, #0x1\n"
- "ldr s0, [%x[params]], #0x4\n"
- "ldr s20, [x24, x11]\n"
- "usubl v0.8h, v0.8b, v5.8b\n"
- "usubl v14.8h, v14.8b, v6.8b\n"
- "ldr s21, [x23, x11]\n"
- "ldr x22, [x21], #0x8\n"
- "usubl v15.8h, v15.8b, v6.8b\n"
- "usubl v16.8h, v16.8b, v6.8b\n"
- "ldr s22, [x22, x11]\n"
- "usubl v17.8h, v17.8b, v6.8b\n"
- "usubl v18.8h, v18.8b, v6.8b\n"
- "usubl v19.8h, v19.8b, v6.8b\n"
- "usubl v20.8h, v20.8b, v6.8b\n"
- "usubl v21.8h, v21.8b, v6.8b\n"
- "usubl v22.8h, v22.8b, v6.8b\n"
+ "smlal v27.4s, v4.4h, v16.4h\n"
+ "ldp x9, x28, [x20], #0x10\n"
+ "subs x19, x19, #0x1\n"
+ "smlal v26.4s, v3.4h, v16.4h\n"
+ "ldr s4, [x9, x11]\n"
+ "smlal v25.4s, v2.4h, v16.4h\n"
+ "smlal v24.4s, v1.4h, v16.4h\n"
+ "ldr s3, [x28, x11]\n"
+ "smlal v23.4s, v0.4h, v16.4h\n"
+ "ldp x27, x26, [x20], #0x10\n"
+ "smlal v22.4s, v31.4h, v16.4h\n"
+ "smlal v21.4s, v30.4h, v16.4h\n"
+ "ldr s2, [x27, x11]\n"
+ "smlal v20.4s, v29.4h, v16.4h\n"
+ "smlal v19.4s, v28.4h, v16.4h\n"
+ "ldr s16, [%x[params]], #0x4\n"
+ "usubl v4.8h, v4.8b, v10.8b\n"
+ "ldr s1, [x26, x11]\n"
+ "usubl v3.8h, v3.8b, v10.8b\n"
+ "ldp x25, x24, [x20], #0x10\n"
+ "usubl v2.8h, v2.8b, v10.8b\n"
+ "ldr s0, [x25, x11]\n"
+ "usubl v16.8h, v16.8b, v9.8b\n"
+ "usubl v1.8h, v1.8b, v10.8b\n"
+ "ldr s31, [x24, x11]\n"
+ "ldp x23, x22, [x20], #0x10\n"
+ "usubl v0.8h, v0.8b, v10.8b\n"
+ "ldr s30, [x23, x11]\n"
+ "ldr s29, [x22, x11]\n"
+ "usubl v31.8h, v31.8b, v10.8b\n"
+ "ldr x21, [x20], #0x8\n"
+ "usubl v30.8h, v30.8b, v10.8b\n"
+ "ldr s28, [x21, x11]\n"
+ "usubl v29.8h, v29.8b, v10.8b\n"
+ "usubl v28.8h, v28.8b, v10.8b\n"
"bgt 3b\n"
"4:" // Channel loop: Planar tail
- "smlal v23.4s, v14.4h, v0.4h\n"
- "smlal v24.4s, v15.4h, v0.4h\n"
- "smlal v25.4s, v16.4h, v0.4h\n"
- "smlal v26.4s, v17.4h, v0.4h\n"
- "smlal v27.4s, v18.4h, v0.4h\n"
- "smlal v28.4s, v19.4h, v0.4h\n"
- "smlal v29.4s, v20.4h, v0.4h\n"
- "smlal v30.4s, v21.4h, v0.4h\n"
- "smlal v31.4s, v22.4h, v0.4h\n"
+ "smlal v27.4s, v4.4h, v16.4h\n"
+ "smlal v26.4s, v3.4h, v16.4h\n"
+ "smlal v25.4s, v2.4h, v16.4h\n"
+ "smlal v24.4s, v1.4h, v16.4h\n"
+ "smlal v23.4s, v0.4h, v16.4h\n"
+ "smlal v22.4s, v31.4h, v16.4h\n"
+ "smlal v21.4s, v30.4h, v16.4h\n"
+ "smlal v20.4s, v29.4h, v16.4h\n"
+ "smlal v19.4s, v28.4h, v16.4h\n"
"cbz %x[rq_mul_ptr], 5f\n"
- "lsl x20, x11, #0x2\n"
- "ldr q2, [%x[rq_mul_ptr], x20]\n"
- "ldr q1, [%x[rq_right_shift_ptr], x20]\n"
+ "lsl x19, x11, #0x2\n"
+ "ldr q6, [%x[rq_mul_ptr], x19]\n"
+ "ldr q5, [%x[rq_right_shift_ptr], x19]\n"
"cbz %x[rq_left_shift_ptr], 5f\n"
- "ldr q3, [%x[rq_left_shift_ptr], x20]\n"
+ "ldr q7, [%x[rq_left_shift_ptr], x19]\n"
"5:" // Channel loop: Load quantisation parameters: Done
- "sshl v23.4s, v23.4s, v3.4s\n"
- "sshl v24.4s, v24.4s, v3.4s\n"
- "ldp x28, x27, [%x[outptrs], #0x0]\n"
- "ldp x26, x25, [%x[outptrs], #0x10]\n"
- "sshl v25.4s, v25.4s, v3.4s\n"
- "sqrdmulh v23.4s, v23.4s, v2.4s\n"
- "ldp x24, x23, [%x[outptrs], #0x20]\n"
- "ldp x22, x21, [%x[outptrs], #0x30]\n"
- "sqrdmulh v24.4s, v24.4s, v2.4s\n"
- "sqrdmulh v25.4s, v25.4s, v2.4s\n"
- "ldr x20, [%x[outptrs], #0x40]\n"
- "and v21.16b, v23.16b, v1.16b\n"
- "and v20.16b, v24.16b, v1.16b\n"
- "and v19.16b, v25.16b, v1.16b\n"
- "sshl v26.4s, v26.4s, v3.4s\n"
- "sshl v27.4s, v27.4s, v3.4s\n"
- "sshl v28.4s, v28.4s, v3.4s\n"
- "sshl v29.4s, v29.4s, v3.4s\n"
- "sshl v30.4s, v30.4s, v3.4s\n"
- "sshl v31.4s, v31.4s, v3.4s\n"
- "sshr v21.4s, v21.4s, #0x1f\n"
- "sshr v20.4s, v20.4s, #0x1f\n"
- "sshr v19.4s, v19.4s, #0x1f\n"
- "sqrdmulh v26.4s, v26.4s, v2.4s\n"
- "sqrdmulh v27.4s, v27.4s, v2.4s\n"
- "sqrdmulh v28.4s, v28.4s, v2.4s\n"
- "sqrdmulh v29.4s, v29.4s, v2.4s\n"
- "sqrdmulh v30.4s, v30.4s, v2.4s\n"
- "sqrdmulh v31.4s, v31.4s, v2.4s\n"
- "sqadd v23.4s, v23.4s, v21.4s\n"
- "sqadd v24.4s, v24.4s, v20.4s\n"
- "sqadd v25.4s, v25.4s, v19.4s\n"
- "and v18.16b, v26.16b, v1.16b\n"
- "and v17.16b, v27.16b, v1.16b\n"
- "and v16.16b, v28.16b, v1.16b\n"
- "and v21.16b, v29.16b, v1.16b\n"
- "and v20.16b, v30.16b, v1.16b\n"
- "and v19.16b, v31.16b, v1.16b\n"
+ "sshl v27.4s, v27.4s, v7.4s\n"
+ "ldp x27, x26, [%x[outptrs], #0x0]\n"
+ "sshl v26.4s, v26.4s, v7.4s\n"
+ "ldp x25, x24, [%x[outptrs], #0x10]\n"
+ "sshl v25.4s, v25.4s, v7.4s\n"
+ "ldp x23, x22, [%x[outptrs], #0x20]\n"
+ "sqrdmulh v27.4s, v27.4s, v6.4s\n"
+ "ldp x21, x20, [%x[outptrs], #0x30]\n"
+ "sqrdmulh v26.4s, v26.4s, v6.4s\n"
+ "ldr x19, [%x[outptrs], #0x40]\n"
+ "sqrdmulh v25.4s, v25.4s, v6.4s\n"
+ "sshl v24.4s, v24.4s, v7.4s\n"
+ "and v16.16b, v27.16b, v5.16b\n"
+ "and v18.16b, v26.16b, v5.16b\n"
+ "and v17.16b, v25.16b, v5.16b\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
"sshr v18.4s, v18.4s, #0x1f\n"
"sshr v17.4s, v17.4s, #0x1f\n"
- "sshr v16.4s, v16.4s, #0x1f\n"
- "sshr v21.4s, v21.4s, #0x1f\n"
- "sshr v20.4s, v20.4s, #0x1f\n"
- "sshr v19.4s, v19.4s, #0x1f\n"
+ "sqadd v27.4s, v27.4s, v16.4s\n"
"sqadd v26.4s, v26.4s, v18.4s\n"
- "sqadd v27.4s, v27.4s, v17.4s\n"
- "sqadd v28.4s, v28.4s, v16.4s\n"
- "sqadd v29.4s, v29.4s, v21.4s\n"
- "sqadd v30.4s, v30.4s, v20.4s\n"
- "sqadd v31.4s, v31.4s, v19.4s\n"
- "srshl v23.4s, v23.4s, v1.4s\n"
- "srshl v24.4s, v24.4s, v1.4s\n"
- "srshl v25.4s, v25.4s, v1.4s\n"
- "srshl v26.4s, v26.4s, v1.4s\n"
- "srshl v27.4s, v27.4s, v1.4s\n"
- "srshl v28.4s, v28.4s, v1.4s\n"
- "srshl v29.4s, v29.4s, v1.4s\n"
- "srshl v30.4s, v30.4s, v1.4s\n"
- "srshl v31.4s, v31.4s, v1.4s\n"
- "add v23.4s, v23.4s, v4.4s\n"
- "add v24.4s, v24.4s, v4.4s\n"
- "add v25.4s, v25.4s, v4.4s\n"
- "add v26.4s, v26.4s, v4.4s\n"
- "add v27.4s, v27.4s, v4.4s\n"
- "add v28.4s, v28.4s, v4.4s\n"
- "add v29.4s, v29.4s, v4.4s\n"
- "add v30.4s, v30.4s, v4.4s\n"
- "add v31.4s, v31.4s, v4.4s\n"
- "smax v23.4s, v23.4s, v8.4s\n"
- "smax v24.4s, v24.4s, v8.4s\n"
- "smax v25.4s, v25.4s, v8.4s\n"
- "smax v26.4s, v26.4s, v8.4s\n"
- "smax v27.4s, v27.4s, v8.4s\n"
- "smax v28.4s, v28.4s, v8.4s\n"
- "smax v29.4s, v29.4s, v8.4s\n"
- "smax v30.4s, v30.4s, v8.4s\n"
- "smax v31.4s, v31.4s, v8.4s\n"
- "smin v23.4s, v23.4s, v7.4s\n"
- "smin v24.4s, v24.4s, v7.4s\n"
- "smin v25.4s, v25.4s, v7.4s\n"
- "smin v26.4s, v26.4s, v7.4s\n"
- "smin v27.4s, v27.4s, v7.4s\n"
- "smin v28.4s, v28.4s, v7.4s\n"
- "smin v29.4s, v29.4s, v7.4s\n"
- "smin v30.4s, v30.4s, v7.4s\n"
- "smin v31.4s, v31.4s, v7.4s\n"
- "uzp1 v23.16b, v23.16b, v23.16b\n"
- "uzp1 v24.16b, v24.16b, v24.16b\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
- "uzp1 v26.16b, v26.16b, v26.16b\n"
+ "sqadd v25.4s, v25.4s, v17.4s\n"
+ "sqrdmulh v24.4s, v24.4s, v6.4s\n"
+ "srshl v27.4s, v27.4s, v5.4s\n"
+ "srshl v26.4s, v26.4s, v5.4s\n"
+ "srshl v25.4s, v25.4s, v5.4s\n"
+ "and v16.16b, v24.16b, v5.16b\n"
+ "add v27.4s, v27.4s, v8.4s\n"
+ "add v26.4s, v26.4s, v8.4s\n"
+ "add v25.4s, v25.4s, v8.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "smax v27.4s, v27.4s, v12.4s\n"
+ "smax v26.4s, v26.4s, v12.4s\n"
+ "sqadd v24.4s, v24.4s, v16.4s\n"
+ "smin v27.4s, v27.4s, v11.4s\n"
+ "smin v26.4s, v26.4s, v11.4s\n"
+ "smax v25.4s, v25.4s, v12.4s\n"
+ "srshl v24.4s, v24.4s, v5.4s\n"
"uzp1 v27.16b, v27.16b, v27.16b\n"
- "uzp1 v28.16b, v28.16b, v28.16b\n"
- "uzp1 v29.16b, v29.16b, v29.16b\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
- "uzp1 v31.16b, v31.16b, v31.16b\n"
- "uzp1 v23.16b, v23.16b, v23.16b\n"
- "uzp1 v24.16b, v24.16b, v24.16b\n"
- "str s23, [x28, x11]\n"
+ "smin v25.4s, v25.4s, v11.4s\n"
+ "uzp1 v27.16b, v27.16b, v27.16b\n"
+ "str s27, [x27, x11]\n"
+ "add v24.4s, v24.4s, v8.4s\n"
+ "uzp1 v26.16b, v26.16b, v26.16b\n"
"uzp1 v25.16b, v25.16b, v25.16b\n"
"uzp1 v26.16b, v26.16b, v26.16b\n"
- "str s24, [x27, x11]\n"
- "uzp1 v27.16b, v27.16b, v27.16b\n"
- "uzp1 v28.16b, v28.16b, v28.16b\n"
- "str s25, [x26, x11]\n"
- "uzp1 v29.16b, v29.16b, v29.16b\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
- "str s26, [x25, x11]\n"
- "uzp1 v31.16b, v31.16b, v31.16b\n"
- "str s27, [x24, x11]\n"
- "str s28, [x23, x11]\n"
- "str s29, [x22, x11]\n"
- "str s30, [x21, x11]\n"
- "str s31, [x20, x11]\n"
+ "str s26, [x26, x11]\n"
+ "smax v24.4s, v24.4s, v12.4s\n"
+ "uzp1 v25.16b, v25.16b, v25.16b\n"
+ "str s25, [x25, x11]\n"
+ "sshl v23.4s, v23.4s, v7.4s\n"
+ "sshl v22.4s, v22.4s, v7.4s\n"
+ "smin v24.4s, v24.4s, v11.4s\n"
+ "sqrdmulh v23.4s, v23.4s, v6.4s\n"
+ "sqrdmulh v22.4s, v22.4s, v6.4s\n"
+ "uzp1 v24.16b, v24.16b, v24.16b\n"
+ "sshl v21.4s, v21.4s, v7.4s\n"
+ "and v17.16b, v23.16b, v5.16b\n"
+ "and v16.16b, v22.16b, v5.16b\n"
+ "sqrdmulh v21.4s, v21.4s, v6.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "uzp1 v24.16b, v24.16b, v24.16b\n"
+ "str s24, [x24, x11]\n"
+ "sqadd v23.4s, v23.4s, v17.4s\n"
+ "sqadd v22.4s, v22.4s, v16.4s\n"
+ "and v16.16b, v21.16b, v5.16b\n"
+ "sshl v20.4s, v20.4s, v7.4s\n"
+ "sshl v19.4s, v19.4s, v7.4s\n"
+ "srshl v23.4s, v23.4s, v5.4s\n"
+ "srshl v22.4s, v22.4s, v5.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sqrdmulh v20.4s, v20.4s, v6.4s\n"
+ "add v23.4s, v23.4s, v8.4s\n"
+ "add v22.4s, v22.4s, v8.4s\n"
+ "sqadd v21.4s, v21.4s, v16.4s\n"
+ "and v17.16b, v20.16b, v5.16b\n"
+ "sqrdmulh v19.4s, v19.4s, v6.4s\n"
+ "smax v23.4s, v23.4s, v12.4s\n"
+ "srshl v21.4s, v21.4s, v5.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "and v16.16b, v19.16b, v5.16b\n"
+ "smin v23.4s, v23.4s, v11.4s\n"
+ "add v21.4s, v21.4s, v8.4s\n"
+ "sqadd v20.4s, v20.4s, v17.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "smax v22.4s, v22.4s, v12.4s\n"
+ "smax v21.4s, v21.4s, v12.4s\n"
+ "srshl v20.4s, v20.4s, v5.4s\n"
+ "sqadd v19.4s, v19.4s, v16.4s\n"
+ "smin v22.4s, v22.4s, v11.4s\n"
+ "smin v21.4s, v21.4s, v11.4s\n"
+ "add v20.4s, v20.4s, v8.4s\n"
+ "srshl v19.4s, v19.4s, v5.4s\n"
+ "uzp1 v23.16b, v23.16b, v23.16b\n"
+ "smax v20.4s, v20.4s, v12.4s\n"
+ "uzp1 v23.16b, v23.16b, v23.16b\n"
+ "str s23, [x23, x11]\n"
+ "add v19.4s, v19.4s, v8.4s\n"
+ "smin v20.4s, v20.4s, v11.4s\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "uzp1 v21.16b, v21.16b, v21.16b\n"
+ "smax v19.4s, v19.4s, v12.4s\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "str s22, [x22, x11]\n"
+ "smin v19.4s, v19.4s, v11.4s\n"
+ "uzp1 v21.16b, v21.16b, v21.16b\n"
+ "str s21, [x21, x11]\n"
+ "uzp1 v20.16b, v20.16b, v20.16b\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ "uzp1 v20.16b, v20.16b, v20.16b\n"
+ "str s20, [x20, x11]\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ "str s19, [x19, x11]\n"
"add x11, x11, #0x4\n"
- "cmp x11, x12, LSL #2\n"
+ "cmp x11, x10, LSL #2\n"
"blt 1b\n"
"6:" // Oddments
"tst %x[n_channels], #0x3\n"
"beq 24f\n"
- "movi v23.4s, #0x0\n"
+ "movi v27.4s, #0x0\n"
"cbz %x[bias], 9f\n"
- "add x20, %x[bias], x11, LSL #2\n"
+ "add x19, %x[bias], x11, LSL #2\n"
"tbz %x[n_channels], #1, 7f\n"
- "ld1 { v23.d }[0], [x20], #0x8\n"
+ "ld1 { v27.d }[0], [x19], #0x8\n"
"tbz %x[n_channels], #0, 8f\n"
- "ld1 { v23.s }[2], [x20], #0x4\n"
+ "ld1 { v27.s }[2], [x19], #0x4\n"
"b 8f\n"
"7:" // Oddments: Load bias: Bit 1: Unset
- "ld1 { v23.s }[0], [x20], #0x4\n"
+ "tbz %x[n_channels], #0, 8f\n"
+ "ld1 { v27.s }[0], [x19], #0x4\n"
"8:" // Oddments: Load bias: Bit 1: End
+
"9:" // Oddments: Load bias: Done
- "ldr s0, [%x[params]], #0x4\n"
- "mov x21, %x[inptrs]\n"
- "ldp x10, x9, [x21], #0x10\n"
- "mov v24.16b, v23.16b\n"
- "ldp x28, x27, [x21], #0x10\n"
- "ldp x26, x25, [x21], #0x10\n"
- "mov v25.16b, v23.16b\n"
- "mov v26.16b, v23.16b\n"
- "ldp x24, x23, [x21], #0x10\n"
- "ldr x22, [x21], #0x8\n"
- "mov v27.16b, v23.16b\n"
- "mov v28.16b, v23.16b\n"
- "mov v29.16b, v23.16b\n"
- "mov v30.16b, v23.16b\n"
- "add x10, x10, x11\n"
+ "mov v26.16b, v27.16b\n"
+ "ldr s16, [%x[params]], #0x4\n"
+ "mov x20, %x[inptrs]\n"
+ "mov v25.16b, v27.16b\n"
+ "ldp x9, x28, [x20], #0x10\n"
"add x9, x9, x11\n"
- "mov v31.16b, v23.16b\n"
- "usubl v0.8h, v0.8b, v5.8b\n"
+ "mov v24.16b, v27.16b\n"
+ "ldp x27, x26, [x20], #0x10\n"
+ "mov v23.16b, v27.16b\n"
+ "ldp x25, x24, [x20], #0x10\n"
+ "mov v22.16b, v27.16b\n"
"add x28, x28, x11\n"
+ "mov v21.16b, v27.16b\n"
+ "ldp x23, x22, [x20], #0x10\n"
+ "mov v20.16b, v27.16b\n"
"add x27, x27, x11\n"
+ "mov v19.16b, v27.16b\n"
+ "ldr x21, [x20], #0x8\n"
+ "usubl v16.8h, v16.8b, v9.8b\n"
"add x26, x26, x11\n"
"add x25, x25, x11\n"
"add x24, x24, x11\n"
"add x23, x23, x11\n"
"add x22, x22, x11\n"
+ "add x21, x21, x11\n"
"tbz %x[n_channels], #1, 10f\n"
- "ldr h14, [x10], #0x2\n"
- "ldr h15, [x9], #0x2\n"
- "ldr h16, [x28], #0x2\n"
- "ldr h17, [x27], #0x2\n"
- "ldr h18, [x26], #0x2\n"
- "ldr h19, [x25], #0x2\n"
- "ldr h20, [x24], #0x2\n"
- "ldr h21, [x23], #0x2\n"
- "ldr h22, [x22], #0x2\n"
+ "ldr h4, [x9], #0x2\n"
+ "ldr h3, [x28], #0x2\n"
+ "ldr h2, [x27], #0x2\n"
+ "ldr h1, [x26], #0x2\n"
+ "ldr h0, [x25], #0x2\n"
+ "ldr h31, [x24], #0x2\n"
+ "ldr h30, [x23], #0x2\n"
+ "ldr h29, [x22], #0x2\n"
+ "ldr h28, [x21], #0x2\n"
"tbz %x[n_channels], #0, 11f\n"
- "ld1 { v14.b }[2], [x10], #0x1\n"
- "ld1 { v15.b }[2], [x9], #0x1\n"
- "ld1 { v16.b }[2], [x28], #0x1\n"
- "ld1 { v17.b }[2], [x27], #0x1\n"
- "ld1 { v18.b }[2], [x26], #0x1\n"
- "ld1 { v19.b }[2], [x25], #0x1\n"
- "ld1 { v20.b }[2], [x24], #0x1\n"
- "ld1 { v21.b }[2], [x23], #0x1\n"
- "ld1 { v22.b }[2], [x22], #0x1\n"
+ "ld1 { v4.b }[2], [x9], #0x1\n"
+ "ld1 { v3.b }[2], [x28], #0x1\n"
+ "ld1 { v2.b }[2], [x27], #0x1\n"
+ "ld1 { v1.b }[2], [x26], #0x1\n"
+ "ld1 { v0.b }[2], [x25], #0x1\n"
+ "ld1 { v31.b }[2], [x24], #0x1\n"
+ "ld1 { v30.b }[2], [x23], #0x1\n"
+ "ld1 { v29.b }[2], [x22], #0x1\n"
+ "ld1 { v28.b }[2], [x21], #0x1\n"
"b 11f\n"
"10:" // Oddments: Load: Bit 1: Unset
- "ldr b14, [x10], #0x1\n"
- "ldr b15, [x9], #0x1\n"
- "ldr b16, [x28], #0x1\n"
- "ldr b17, [x27], #0x1\n"
- "ldr b18, [x26], #0x1\n"
- "ldr b19, [x25], #0x1\n"
- "ldr b20, [x24], #0x1\n"
- "ldr b21, [x23], #0x1\n"
- "ldr b22, [x22], #0x1\n"
+ "tbz %x[n_channels], #0, 11f\n"
+ "ldr b4, [x9], #0x1\n"
+ "ldr b3, [x28], #0x1\n"
+ "ldr b2, [x27], #0x1\n"
+ "ldr b1, [x26], #0x1\n"
+ "ldr b0, [x25], #0x1\n"
+ "ldr b31, [x24], #0x1\n"
+ "ldr b30, [x23], #0x1\n"
+ "ldr b29, [x22], #0x1\n"
+ "ldr b28, [x21], #0x1\n"
"11:" // Oddments: Load: Bit 1: End
- "subs x20, %x[n_points], #0x1\n"
- "usubl v14.8h, v14.8b, v6.8b\n"
- "usubl v15.8h, v15.8b, v6.8b\n"
- "usubl v16.8h, v16.8b, v6.8b\n"
- "usubl v17.8h, v17.8b, v6.8b\n"
- "usubl v18.8h, v18.8b, v6.8b\n"
- "usubl v19.8h, v19.8b, v6.8b\n"
- "usubl v20.8h, v20.8b, v6.8b\n"
- "usubl v21.8h, v21.8b, v6.8b\n"
- "usubl v22.8h, v22.8b, v6.8b\n"
+ "usubl v4.8h, v4.8b, v10.8b\n"
+ "subs x19, %x[n_points], #0x1\n"
+ "usubl v3.8h, v3.8b, v10.8b\n"
+ "usubl v2.8h, v2.8b, v10.8b\n"
+ "usubl v1.8h, v1.8b, v10.8b\n"
+ "usubl v0.8h, v0.8b, v10.8b\n"
+ "usubl v31.8h, v31.8b, v10.8b\n"
+ "usubl v30.8h, v30.8b, v10.8b\n"
+ "usubl v29.8h, v29.8b, v10.8b\n"
+ "usubl v28.8h, v28.8b, v10.8b\n"
"ble 15f\n"
"12:" // Oddments: Planar loop
- "ldp x10, x9, [x21], #0x10\n"
- "ldp x28, x27, [x21], #0x10\n"
- "smlal v23.4s, v14.4h, v0.4h\n"
- "smlal v24.4s, v15.4h, v0.4h\n"
- "ldp x26, x25, [x21], #0x10\n"
- "ldp x24, x23, [x21], #0x10\n"
- "smlal v25.4s, v16.4h, v0.4h\n"
- "smlal v26.4s, v17.4h, v0.4h\n"
- "smlal v27.4s, v18.4h, v0.4h\n"
- "smlal v28.4s, v19.4h, v0.4h\n"
- "ldr x22, [x21], #0x8\n"
- "add x10, x10, x11\n"
- "smlal v29.4s, v20.4h, v0.4h\n"
- "smlal v30.4s, v21.4h, v0.4h\n"
+ "smlal v27.4s, v4.4h, v16.4h\n"
+ "ldp x9, x28, [x20], #0x10\n"
"add x9, x9, x11\n"
+ "smlal v26.4s, v3.4h, v16.4h\n"
+ "ldp x27, x26, [x20], #0x10\n"
+ "smlal v25.4s, v2.4h, v16.4h\n"
+ "ldp x25, x24, [x20], #0x10\n"
+ "smlal v24.4s, v1.4h, v16.4h\n"
"add x28, x28, x11\n"
- "smlal v31.4s, v22.4h, v0.4h\n"
- "ldr s0, [%x[params]], #0x4\n"
- "usubl v0.8h, v0.8b, v5.8b\n"
+ "smlal v23.4s, v0.4h, v16.4h\n"
+ "ldp x23, x22, [x20], #0x10\n"
+ "smlal v22.4s, v31.4h, v16.4h\n"
"add x27, x27, x11\n"
+ "smlal v21.4s, v30.4h, v16.4h\n"
+ "ldr x21, [x20], #0x8\n"
+ "smlal v20.4s, v29.4h, v16.4h\n"
"add x26, x26, x11\n"
+ "smlal v19.4s, v28.4h, v16.4h\n"
+ "ldr s16, [%x[params]], #0x4\n"
"add x25, x25, x11\n"
+ "usubl v16.8h, v16.8b, v9.8b\n"
"add x24, x24, x11\n"
"add x23, x23, x11\n"
"add x22, x22, x11\n"
+ "add x21, x21, x11\n"
"tbz %x[n_channels], #1, 13f\n"
- "ldr h14, [x10], #0x2\n"
- "ldr h15, [x9], #0x2\n"
- "ldr h16, [x28], #0x2\n"
- "ldr h17, [x27], #0x2\n"
- "ldr h18, [x26], #0x2\n"
- "ldr h19, [x25], #0x2\n"
- "ldr h20, [x24], #0x2\n"
- "ldr h21, [x23], #0x2\n"
- "ldr h22, [x22], #0x2\n"
+ "ldr h4, [x9], #0x2\n"
+ "ldr h3, [x28], #0x2\n"
+ "ldr h2, [x27], #0x2\n"
+ "ldr h1, [x26], #0x2\n"
+ "ldr h0, [x25], #0x2\n"
+ "ldr h31, [x24], #0x2\n"
+ "ldr h30, [x23], #0x2\n"
+ "ldr h29, [x22], #0x2\n"
+ "ldr h28, [x21], #0x2\n"
"tbz %x[n_channels], #0, 14f\n"
- "ld1 { v14.b }[2], [x10], #0x1\n"
- "ld1 { v15.b }[2], [x9], #0x1\n"
- "ld1 { v16.b }[2], [x28], #0x1\n"
- "ld1 { v17.b }[2], [x27], #0x1\n"
- "ld1 { v18.b }[2], [x26], #0x1\n"
- "ld1 { v19.b }[2], [x25], #0x1\n"
- "ld1 { v20.b }[2], [x24], #0x1\n"
- "ld1 { v21.b }[2], [x23], #0x1\n"
- "ld1 { v22.b }[2], [x22], #0x1\n"
+ "ld1 { v4.b }[2], [x9], #0x1\n"
+ "ld1 { v3.b }[2], [x28], #0x1\n"
+ "ld1 { v2.b }[2], [x27], #0x1\n"
+ "ld1 { v1.b }[2], [x26], #0x1\n"
+ "ld1 { v0.b }[2], [x25], #0x1\n"
+ "ld1 { v31.b }[2], [x24], #0x1\n"
+ "ld1 { v30.b }[2], [x23], #0x1\n"
+ "ld1 { v29.b }[2], [x22], #0x1\n"
+ "ld1 { v28.b }[2], [x21], #0x1\n"
"b 14f\n"
"13:" // Oddments: Planar loop: Load: Bit 1: Unset
- "ldr b14, [x10], #0x1\n"
- "ldr b15, [x9], #0x1\n"
- "ldr b16, [x28], #0x1\n"
- "ldr b17, [x27], #0x1\n"
- "ldr b18, [x26], #0x1\n"
- "ldr b19, [x25], #0x1\n"
- "ldr b20, [x24], #0x1\n"
- "ldr b21, [x23], #0x1\n"
- "ldr b22, [x22], #0x1\n"
+ "tbz %x[n_channels], #0, 14f\n"
+ "ldr b4, [x9], #0x1\n"
+ "ldr b3, [x28], #0x1\n"
+ "ldr b2, [x27], #0x1\n"
+ "ldr b1, [x26], #0x1\n"
+ "ldr b0, [x25], #0x1\n"
+ "ldr b31, [x24], #0x1\n"
+ "ldr b30, [x23], #0x1\n"
+ "ldr b29, [x22], #0x1\n"
+ "ldr b28, [x21], #0x1\n"
"14:" // Oddments: Planar loop: Load: Bit 1: End
- "subs x20, x20, #0x1\n"
- "usubl v14.8h, v14.8b, v6.8b\n"
- "usubl v15.8h, v15.8b, v6.8b\n"
- "usubl v16.8h, v16.8b, v6.8b\n"
- "usubl v17.8h, v17.8b, v6.8b\n"
- "usubl v18.8h, v18.8b, v6.8b\n"
- "usubl v19.8h, v19.8b, v6.8b\n"
- "usubl v20.8h, v20.8b, v6.8b\n"
- "usubl v21.8h, v21.8b, v6.8b\n"
- "usubl v22.8h, v22.8b, v6.8b\n"
+ "usubl v4.8h, v4.8b, v10.8b\n"
+ "subs x19, x19, #0x1\n"
+ "usubl v3.8h, v3.8b, v10.8b\n"
+ "usubl v2.8h, v2.8b, v10.8b\n"
+ "usubl v1.8h, v1.8b, v10.8b\n"
+ "usubl v0.8h, v0.8b, v10.8b\n"
+ "usubl v31.8h, v31.8b, v10.8b\n"
+ "usubl v30.8h, v30.8b, v10.8b\n"
+ "usubl v29.8h, v29.8b, v10.8b\n"
+ "usubl v28.8h, v28.8b, v10.8b\n"
"bgt 12b\n"
"15:" // Oddments: Planar tail
- "smlal v23.4s, v14.4h, v0.4h\n"
- "smlal v24.4s, v15.4h, v0.4h\n"
- "smlal v25.4s, v16.4h, v0.4h\n"
- "smlal v26.4s, v17.4h, v0.4h\n"
- "smlal v27.4s, v18.4h, v0.4h\n"
- "smlal v28.4s, v19.4h, v0.4h\n"
- "smlal v29.4s, v20.4h, v0.4h\n"
- "smlal v30.4s, v21.4h, v0.4h\n"
- "smlal v31.4s, v22.4h, v0.4h\n"
+ "smlal v27.4s, v4.4h, v16.4h\n"
+ "smlal v26.4s, v3.4h, v16.4h\n"
+ "smlal v25.4s, v2.4h, v16.4h\n"
+ "smlal v24.4s, v1.4h, v16.4h\n"
+ "smlal v23.4s, v0.4h, v16.4h\n"
+ "smlal v22.4s, v31.4h, v16.4h\n"
+ "smlal v21.4s, v30.4h, v16.4h\n"
+ "smlal v20.4s, v29.4h, v16.4h\n"
+ "smlal v19.4s, v28.4h, v16.4h\n"
"cbz %x[rq_mul_ptr], 21f\n"
- "add x22, %x[rq_mul_ptr], x11, LSL #2\n"
- "add x21, %x[rq_right_shift_ptr], x11, LSL #2\n"
- "add x20, %x[rq_left_shift_ptr], x11, LSL #2\n"
+ "add x21, %x[rq_mul_ptr], x11, LSL #2\n"
+ "add x20, %x[rq_right_shift_ptr], x11, LSL #2\n"
+ "add x19, %x[rq_left_shift_ptr], x11, LSL #2\n"
"tbz %x[n_channels], #1, 18f\n"
- "ld1 { v2.d }[0], [x22], #0x8\n"
- "ld1 { v1.d }[0], [x21], #0x8\n"
+ "ld1 { v6.d }[0], [x21], #0x8\n"
+ "ld1 { v5.d }[0], [x20], #0x8\n"
"cbz %x[rq_left_shift_ptr], 16f\n"
- "ld1 { v3.d }[0], [x20], #0x8\n"
+ "ld1 { v7.d }[0], [x19], #0x8\n"
"16:" // Oddments: Load quantisation parameters: Bit 1: Load left shift: Done
"tbz %x[n_channels], #0, 20f\n"
- "ld1 { v2.s }[2], [x22], #0x4\n"
- "ld1 { v1.s }[2], [x21], #0x4\n"
+ "ld1 { v6.s }[2], [x21], #0x4\n"
+ "ld1 { v5.s }[2], [x20], #0x4\n"
"cbz %x[rq_left_shift_ptr], 17f\n"
- "ld1 { v3.s }[2], [x20], #0x4\n"
+ "ld1 { v7.s }[2], [x19], #0x4\n"
"17:" // Oddments: Load quantisation parameters: Bit 1: Bit 0: Load left shift: Done
"b 20f\n"
"18:" // Oddments: Load quantisation parameters: Bit 1: Unset
- "ld1 { v2.s }[0], [x22], #0x4\n"
- "ld1 { v1.s }[0], [x21], #0x4\n"
+ "tbz %x[n_channels], #0, 20f\n"
+ "ld1 { v6.s }[0], [x21], #0x4\n"
+ "ld1 { v5.s }[0], [x20], #0x4\n"
"cbz %x[rq_left_shift_ptr], 19f\n"
- "ld1 { v3.s }[0], [x20], #0x4\n"
+ "ld1 { v7.s }[0], [x19], #0x4\n"
"19:" // Oddments: Load quantisation parameters: Bit 1: Unset: Bit 0: Load left shift: Done
"20:" // Oddments: Load quantisation parameters: Bit 1: End
"21:" // Oddments: Load quantisation parameters: Done
- "sshl v23.4s, v23.4s, v3.4s\n"
- "sshl v24.4s, v24.4s, v3.4s\n"
- "ldp x28, x27, [%x[outptrs], #0x0]\n"
- "ldp x26, x25, [%x[outptrs], #0x10]\n"
- "sshl v25.4s, v25.4s, v3.4s\n"
- "sqrdmulh v23.4s, v23.4s, v2.4s\n"
- "ldp x24, x23, [%x[outptrs], #0x20]\n"
- "ldp x22, x21, [%x[outptrs], #0x30]\n"
- "sqrdmulh v24.4s, v24.4s, v2.4s\n"
- "sqrdmulh v25.4s, v25.4s, v2.4s\n"
- "ldr x20, [%x[outptrs], #0x40]\n"
- "add x28, x28, x11\n"
- "and v21.16b, v23.16b, v1.16b\n"
- "and v20.16b, v24.16b, v1.16b\n"
+ "sshl v27.4s, v27.4s, v7.4s\n"
+ "ldp x27, x26, [%x[outptrs], #0x0]\n"
"add x27, x27, x11\n"
+ "sqrdmulh v27.4s, v27.4s, v6.4s\n"
+ "ldp x25, x24, [%x[outptrs], #0x10]\n"
+ "sshl v26.4s, v26.4s, v7.4s\n"
+ "ldp x23, x22, [%x[outptrs], #0x20]\n"
"add x26, x26, x11\n"
- "and v19.16b, v25.16b, v1.16b\n"
- "sshl v26.4s, v26.4s, v3.4s\n"
+ "sshl v25.4s, v25.4s, v7.4s\n"
+ "ldp x21, x20, [%x[outptrs], #0x30]\n"
+ "sshl v24.4s, v24.4s, v7.4s\n"
+ "ldr x19, [%x[outptrs], #0x40]\n"
"add x25, x25, x11\n"
+ "and v16.16b, v27.16b, v5.16b\n"
"add x24, x24, x11\n"
- "sshl v27.4s, v27.4s, v3.4s\n"
- "sshl v28.4s, v28.4s, v3.4s\n"
+ "sqrdmulh v26.4s, v26.4s, v6.4s\n"
"add x23, x23, x11\n"
+ "sqrdmulh v25.4s, v25.4s, v6.4s\n"
"add x22, x22, x11\n"
- "sshl v29.4s, v29.4s, v3.4s\n"
- "sshl v30.4s, v30.4s, v3.4s\n"
+ "sqrdmulh v24.4s, v24.4s, v6.4s\n"
"add x21, x21, x11\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
"add x20, x20, x11\n"
- "sshl v31.4s, v31.4s, v3.4s\n"
- "sshr v21.4s, v21.4s, #0x1f\n"
- "sshr v20.4s, v20.4s, #0x1f\n"
- "sshr v19.4s, v19.4s, #0x1f\n"
- "sqrdmulh v26.4s, v26.4s, v2.4s\n"
- "sqrdmulh v27.4s, v27.4s, v2.4s\n"
- "sqrdmulh v28.4s, v28.4s, v2.4s\n"
- "sqrdmulh v29.4s, v29.4s, v2.4s\n"
- "sqrdmulh v30.4s, v30.4s, v2.4s\n"
- "sqrdmulh v31.4s, v31.4s, v2.4s\n"
- "sqadd v23.4s, v23.4s, v21.4s\n"
- "sqadd v24.4s, v24.4s, v20.4s\n"
- "sqadd v25.4s, v25.4s, v19.4s\n"
- "and v18.16b, v26.16b, v1.16b\n"
- "and v17.16b, v27.16b, v1.16b\n"
- "and v16.16b, v28.16b, v1.16b\n"
- "and v21.16b, v29.16b, v1.16b\n"
- "and v20.16b, v30.16b, v1.16b\n"
- "and v19.16b, v31.16b, v1.16b\n"
+ "and v18.16b, v26.16b, v5.16b\n"
+ "add x19, x19, x11\n"
+ "and v17.16b, v25.16b, v5.16b\n"
+ "sqadd v27.4s, v27.4s, v16.4s\n"
"sshr v18.4s, v18.4s, #0x1f\n"
"sshr v17.4s, v17.4s, #0x1f\n"
- "sshr v16.4s, v16.4s, #0x1f\n"
- "sshr v21.4s, v21.4s, #0x1f\n"
- "sshr v20.4s, v20.4s, #0x1f\n"
- "sshr v19.4s, v19.4s, #0x1f\n"
+ "and v16.16b, v24.16b, v5.16b\n"
+ "srshl v27.4s, v27.4s, v5.4s\n"
"sqadd v26.4s, v26.4s, v18.4s\n"
- "sqadd v27.4s, v27.4s, v17.4s\n"
- "sqadd v28.4s, v28.4s, v16.4s\n"
- "sqadd v29.4s, v29.4s, v21.4s\n"
- "sqadd v30.4s, v30.4s, v20.4s\n"
- "sqadd v31.4s, v31.4s, v19.4s\n"
- "srshl v23.4s, v23.4s, v1.4s\n"
- "srshl v24.4s, v24.4s, v1.4s\n"
- "srshl v25.4s, v25.4s, v1.4s\n"
- "srshl v26.4s, v26.4s, v1.4s\n"
- "srshl v27.4s, v27.4s, v1.4s\n"
- "srshl v28.4s, v28.4s, v1.4s\n"
- "srshl v29.4s, v29.4s, v1.4s\n"
- "srshl v30.4s, v30.4s, v1.4s\n"
- "srshl v31.4s, v31.4s, v1.4s\n"
- "add v23.4s, v23.4s, v4.4s\n"
- "add v24.4s, v24.4s, v4.4s\n"
- "add v25.4s, v25.4s, v4.4s\n"
- "add v26.4s, v26.4s, v4.4s\n"
- "add v27.4s, v27.4s, v4.4s\n"
- "add v28.4s, v28.4s, v4.4s\n"
- "add v29.4s, v29.4s, v4.4s\n"
- "add v30.4s, v30.4s, v4.4s\n"
- "add v31.4s, v31.4s, v4.4s\n"
- "smax v23.4s, v23.4s, v8.4s\n"
- "smax v24.4s, v24.4s, v8.4s\n"
- "smax v25.4s, v25.4s, v8.4s\n"
- "smax v26.4s, v26.4s, v8.4s\n"
- "smax v27.4s, v27.4s, v8.4s\n"
- "smax v28.4s, v28.4s, v8.4s\n"
- "smax v29.4s, v29.4s, v8.4s\n"
- "smax v30.4s, v30.4s, v8.4s\n"
- "smax v31.4s, v31.4s, v8.4s\n"
- "smin v23.4s, v23.4s, v7.4s\n"
- "smin v24.4s, v24.4s, v7.4s\n"
- "smin v25.4s, v25.4s, v7.4s\n"
- "smin v26.4s, v26.4s, v7.4s\n"
- "smin v27.4s, v27.4s, v7.4s\n"
- "smin v28.4s, v28.4s, v7.4s\n"
- "smin v29.4s, v29.4s, v7.4s\n"
- "smin v30.4s, v30.4s, v7.4s\n"
- "smin v31.4s, v31.4s, v7.4s\n"
- "uzp1 v23.16b, v23.16b, v23.16b\n"
- "uzp1 v24.16b, v24.16b, v24.16b\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
+ "sqadd v25.4s, v25.4s, v17.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "add v27.4s, v27.4s, v8.4s\n"
+ "srshl v26.4s, v26.4s, v5.4s\n"
+ "srshl v25.4s, v25.4s, v5.4s\n"
+ "sqadd v24.4s, v24.4s, v16.4s\n"
+ "smax v27.4s, v27.4s, v12.4s\n"
+ "add v26.4s, v26.4s, v8.4s\n"
+ "add v25.4s, v25.4s, v8.4s\n"
+ "srshl v24.4s, v24.4s, v5.4s\n"
+ "smin v27.4s, v27.4s, v11.4s\n"
+ "smax v26.4s, v26.4s, v12.4s\n"
+ "smax v25.4s, v25.4s, v12.4s\n"
+ "add v24.4s, v24.4s, v8.4s\n"
+ "smin v26.4s, v26.4s, v11.4s\n"
+ "smin v25.4s, v25.4s, v11.4s\n"
+ "smax v24.4s, v24.4s, v12.4s\n"
+ "uzp1 v27.16b, v27.16b, v27.16b\n"
"uzp1 v26.16b, v26.16b, v26.16b\n"
+ "smin v24.4s, v24.4s, v11.4s\n"
"uzp1 v27.16b, v27.16b, v27.16b\n"
- "uzp1 v28.16b, v28.16b, v28.16b\n"
- "uzp1 v29.16b, v29.16b, v29.16b\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
- "uzp1 v31.16b, v31.16b, v31.16b\n"
- "uzp1 v23.16b, v23.16b, v23.16b\n"
+ "uzp1 v26.16b, v26.16b, v26.16b\n"
+ "uzp1 v25.16b, v25.16b, v25.16b\n"
"uzp1 v24.16b, v24.16b, v24.16b\n"
"uzp1 v25.16b, v25.16b, v25.16b\n"
- "uzp1 v26.16b, v26.16b, v26.16b\n"
- "uzp1 v27.16b, v27.16b, v27.16b\n"
- "uzp1 v28.16b, v28.16b, v28.16b\n"
- "uzp1 v29.16b, v29.16b, v29.16b\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
- "uzp1 v31.16b, v31.16b, v31.16b\n"
+ "uzp1 v24.16b, v24.16b, v24.16b\n"
+ "sshl v23.4s, v23.4s, v7.4s\n"
+ "sshl v22.4s, v22.4s, v7.4s\n"
+ "sqrdmulh v23.4s, v23.4s, v6.4s\n"
+ "sqrdmulh v22.4s, v22.4s, v6.4s\n"
+ "sshl v21.4s, v21.4s, v7.4s\n"
+ "sshl v20.4s, v20.4s, v7.4s\n"
+ "and v17.16b, v23.16b, v5.16b\n"
+ "and v16.16b, v22.16b, v5.16b\n"
+ "sqrdmulh v21.4s, v21.4s, v6.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sqrdmulh v20.4s, v20.4s, v6.4s\n"
+ "sqadd v23.4s, v23.4s, v17.4s\n"
+ "sqadd v22.4s, v22.4s, v16.4s\n"
+ "and v16.16b, v21.16b, v5.16b\n"
+ "and v17.16b, v20.16b, v5.16b\n"
+ "srshl v23.4s, v23.4s, v5.4s\n"
+ "srshl v22.4s, v22.4s, v5.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "add v23.4s, v23.4s, v8.4s\n"
+ "add v22.4s, v22.4s, v8.4s\n"
+ "sqadd v21.4s, v21.4s, v16.4s\n"
+ "sqadd v20.4s, v20.4s, v17.4s\n"
+ "smax v23.4s, v23.4s, v12.4s\n"
+ "smax v22.4s, v22.4s, v12.4s\n"
+ "srshl v21.4s, v21.4s, v5.4s\n"
+ "srshl v20.4s, v20.4s, v5.4s\n"
+ "smin v23.4s, v23.4s, v11.4s\n"
+ "smin v22.4s, v22.4s, v11.4s\n"
+ "add v21.4s, v21.4s, v8.4s\n"
+ "add v20.4s, v20.4s, v8.4s\n"
+ "uzp1 v23.16b, v23.16b, v23.16b\n"
+ "smax v21.4s, v21.4s, v12.4s\n"
+ "smax v20.4s, v20.4s, v12.4s\n"
+ "uzp1 v23.16b, v23.16b, v23.16b\n"
+ "smin v21.4s, v21.4s, v11.4s\n"
+ "smin v20.4s, v20.4s, v11.4s\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "uzp1 v21.16b, v21.16b, v21.16b\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "uzp1 v21.16b, v21.16b, v21.16b\n"
+ "uzp1 v20.16b, v20.16b, v20.16b\n"
+ "sshl v19.4s, v19.4s, v7.4s\n"
+ "uzp1 v20.16b, v20.16b, v20.16b\n"
+ "sqrdmulh v19.4s, v19.4s, v6.4s\n"
+ "and v16.16b, v19.16b, v5.16b\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sqadd v19.4s, v19.4s, v16.4s\n"
+ "srshl v19.4s, v19.4s, v5.4s\n"
+ "add v19.4s, v19.4s, v8.4s\n"
+ "smax v19.4s, v19.4s, v12.4s\n"
+ "smin v19.4s, v19.4s, v11.4s\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
"tbz %x[n_channels], #1, 22f\n"
- "st1 { v23.h }[0], [x28], #0x2\n"
- "st1 { v24.h }[0], [x27], #0x2\n"
- "st1 { v25.h }[0], [x26], #0x2\n"
- "st1 { v26.h }[0], [x25], #0x2\n"
- "st1 { v27.h }[0], [x24], #0x2\n"
- "st1 { v28.h }[0], [x23], #0x2\n"
- "st1 { v29.h }[0], [x22], #0x2\n"
- "st1 { v30.h }[0], [x21], #0x2\n"
- "st1 { v31.h }[0], [x20], #0x2\n"
+ "st1 { v27.h }[0], [x27], #0x2\n"
+ "st1 { v26.h }[0], [x26], #0x2\n"
+ "st1 { v25.h }[0], [x25], #0x2\n"
+ "st1 { v24.h }[0], [x24], #0x2\n"
+ "st1 { v23.h }[0], [x23], #0x2\n"
+ "st1 { v22.h }[0], [x22], #0x2\n"
+ "st1 { v21.h }[0], [x21], #0x2\n"
+ "st1 { v20.h }[0], [x20], #0x2\n"
+ "st1 { v19.h }[0], [x19], #0x2\n"
"tbz %x[n_channels], #0, 23f\n"
- "st1 { v23.b }[2], [x28], #0x1\n"
- "st1 { v24.b }[2], [x27], #0x1\n"
- "st1 { v25.b }[2], [x26], #0x1\n"
- "st1 { v26.b }[2], [x25], #0x1\n"
- "st1 { v27.b }[2], [x24], #0x1\n"
- "st1 { v28.b }[2], [x23], #0x1\n"
- "st1 { v29.b }[2], [x22], #0x1\n"
- "st1 { v30.b }[2], [x21], #0x1\n"
- "st1 { v31.b }[2], [x20], #0x1\n"
+ "st1 { v27.b }[2], [x27], #0x1\n"
+ "st1 { v26.b }[2], [x26], #0x1\n"
+ "st1 { v25.b }[2], [x25], #0x1\n"
+ "st1 { v24.b }[2], [x24], #0x1\n"
+ "st1 { v23.b }[2], [x23], #0x1\n"
+ "st1 { v22.b }[2], [x22], #0x1\n"
+ "st1 { v21.b }[2], [x21], #0x1\n"
+ "st1 { v20.b }[2], [x20], #0x1\n"
+ "st1 { v19.b }[2], [x19], #0x1\n"
"b 23f\n"
"22:" // Oddments: Store: Bit 1: Unset
- "st1 { v23.b }[0], [x28], #0x1\n"
- "st1 { v24.b }[0], [x27], #0x1\n"
- "st1 { v25.b }[0], [x26], #0x1\n"
- "st1 { v26.b }[0], [x25], #0x1\n"
- "st1 { v27.b }[0], [x24], #0x1\n"
- "st1 { v28.b }[0], [x23], #0x1\n"
- "st1 { v29.b }[0], [x22], #0x1\n"
- "st1 { v30.b }[0], [x21], #0x1\n"
- "st1 { v31.b }[0], [x20], #0x1\n"
+ "tbz %x[n_channels], #0, 23f\n"
+ "st1 { v27.b }[0], [x27], #0x1\n"
+ "st1 { v26.b }[0], [x26], #0x1\n"
+ "st1 { v25.b }[0], [x25], #0x1\n"
+ "st1 { v24.b }[0], [x24], #0x1\n"
+ "st1 { v23.b }[0], [x23], #0x1\n"
+ "st1 { v22.b }[0], [x22], #0x1\n"
+ "st1 { v21.b }[0], [x21], #0x1\n"
+ "st1 { v20.b }[0], [x20], #0x1\n"
+ "st1 { v19.b }[0], [x19], #0x1\n"
"23:" // Oddments: Store: Bit 1: End
"24:" // End
: [params] "+&r" (params)
: [bias] "r" (qp.bias), [inptrs] "r" (inptrs), [n_channels] "r" ((uint64_t) n_channels), [n_points] "r" ((uint64_t) n_points), [offsetof_Requantize32_a_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [offsetof_Requantize32_per_layer_left_shift] "I" (offsetof(arm_gemm::Requantize32, per_layer_left_shift)), [offsetof_Requantize32_per_layer_mul] "I" (offsetof(arm_gemm::Requantize32, per_layer_mul)), [offsetof_Requantize32_per_layer_right_shift] "I" (offsetof(arm_gemm::Requantize32, per_layer_right_shift)), [outptrs] "r" (outptrs), [qp] "r" (&qp), [rq_left_shift_ptr] "r" (qp.per_channel_left_shifts), [rq_mul_ptr] "r" (qp.per_channel_muls), [rq_right_shift_ptr] "r" (qp.per_channel_right_shifts)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst/generic.cpp
index a6dba90f9e..057b1ef492 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -40,475 +40,487 @@ void a64_u8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst_impl
)
{
__asm__ __volatile__(
- "ldr q14, [%x[params], #0x0]\n"
- "ldr q5, [%x[params], #0x10]\n"
- "movi v15.16b, #0x1\n"
- "ushr v15.4s, v15.4s, #0x8\n"
- "ldr q6, [%x[params], #0x20]\n"
- "ldr q7, [%x[params], #0x30]\n"
- "movi v26.4s, #0x0\n"
- "movi v27.4s, #0x0\n"
+ "movi v5.16b, #0x1\n"
+ "ldr x22, [%x[inptrs], #0x0]\n"
+ "add SP, SP, #-0x80\n"
+ "ushr v5.4s, v5.4s, #0x8\n"
"ldr x20, [%x[inptrs], #0x8]\n"
- "ld1 { v1.16b }, [x20]\n"
- "mov v29.16b, v1.16b\n"
- "mov v16.16b, v1.16b\n"
- "ldr x20, [%x[inptrs], #0x10]\n"
- "ld1 { v2.16b }, [x20]\n"
- "mov v28.16b, v1.16b\n"
- "mov v22.16b, v2.16b\n"
- "ldr x20, [%x[inptrs], #0x20]\n"
- "ld1 { v4.16b }, [x20]\n"
- "mov v31.16b, v2.16b\n"
- "mov v30.16b, v2.16b\n"
- "ldr x20, [%x[inptrs], #0x0]\n"
- "ld1 { v0.16b }, [x20]\n"
- "mov v23.16b, v4.16b\n"
- "mov v21.16b, v4.16b\n"
+ "add x21, %x[qp], %[offsetof_Requantize32_b_offset]\n"
+ "movi v26.4s, #0x0\n"
+ "ldr x19, [%x[inptrs], #0x10]\n"
+ "mov x11, #0x0\n"
+ "movi v1.4s, #0x0\n"
+ "ld1 { v15.16b }, [x22]\n"
+ "mov x10, #0x0\n"
+ "movi v22.4s, #0x0\n"
+ "ld1 { v29.16b }, [x20]\n"
+ "add x9, %x[qp], %[offsetof_Requantize32_c_offset]\n"
+ "movi v25.4s, #0x0\n"
+ "ld1 { v0.16b }, [x19]\n"
+ "add x28, %x[qp], %[offsetof_Requantize32_minval]\n"
+ "movi v13.4s, #0x0\n"
"ldr x20, [%x[inptrs], #0x18]\n"
- "ld1 { v3.16b }, [x20]\n"
- "mov v20.16b, v4.16b\n"
- "ext v29.16b, v29.16b, v29.16b, #0x2\n"
- "ext v16.16b, v16.16b, v16.16b, #0x4\n"
- "ext v28.16b, v28.16b, v28.16b, #0x6\n"
- "add x20, %x[qp], %[offsetof_Requantize32_b_offset]\n"
- "ld1r { v13.4s }, [x20]\n"
- "ext v22.16b, v22.16b, v22.16b, #0x2\n"
- "ext v31.16b, v31.16b, v31.16b, #0x4\n"
- "add x20, %x[qp], %[offsetof_Requantize32_c_offset]\n"
- "ld1r { v12.4s }, [x20]\n"
- "ext v30.16b, v30.16b, v30.16b, #0x6\n"
- "ext v23.16b, v23.16b, v23.16b, #0x2\n"
- "add x20, %x[qp], %[offsetof_Requantize32_minval]\n"
- "ld1r { v11.4s }, [x20]\n"
- "ext v21.16b, v21.16b, v21.16b, #0x4\n"
- "ext v20.16b, v20.16b, v20.16b, #0x6\n"
- "add x20, %x[qp], %[offsetof_Requantize32_maxval]\n"
- "ld1r { v10.4s }, [x20]\n"
- "mov v25.16b, v0.16b\n"
- "mov v19.16b, v0.16b\n"
+ "add x27, %x[qp], %[offsetof_Requantize32_maxval]\n"
+ "mov v20.16b, v15.16b\n"
+ "ldr x19, [%x[inptrs], #0x20]\n"
"cmp %x[n_channels], #0x4\n"
- "mov x9, #0x0\n"
- "mov v18.16b, v0.16b\n"
- "mov v24.16b, v3.16b\n"
- "mov x28, #0x0\n"
- "ldp x27, x26, [%x[outptrs], #0x0]\n"
- "mov v17.16b, v3.16b\n"
- "ext v25.16b, v25.16b, v25.16b, #0x2\n"
- "ldp x25, x24, [%x[outptrs], #0x10]\n"
- "ldp x23, x22, [%x[outptrs], #0x20]\n"
- "ext v19.16b, v19.16b, v19.16b, #0x4\n"
- "ext v18.16b, v18.16b, v18.16b, #0x6\n"
- "ldp x21, x20, [%x[outptrs], #0x30]\n"
+ "ext v20.16b, v20.16b, v20.16b, #0x2\n"
+ "ld1r { v4.4s }, [x21]\n"
+ "mov v17.16b, v15.16b\n"
+ "ld1 { v2.16b }, [x20]\n"
+ "ext v17.16b, v17.16b, v17.16b, #0x4\n"
+ "ld1 { v7.16b }, [x19]\n"
+ "mov v23.16b, v15.16b\n"
+ "ldp x26, x25, [%x[outptrs], #0x0]\n"
+ "ext v23.16b, v23.16b, v23.16b, #0x6\n"
+ "ldp x24, x23, [%x[outptrs], #0x10]\n"
+ "mov v18.16b, v29.16b\n"
+ "ldp x22, x21, [%x[outptrs], #0x20]\n"
+ "zip1 v15.4s, v15.4s, v17.4s\n"
+ "ldp x20, x19, [%x[outptrs], #0x30]\n"
+ "ext v18.16b, v18.16b, v18.16b, #0x2\n"
+ "ld1r { v14.4s }, [x9]\n"
+ "zip1 v20.4s, v20.4s, v23.4s\n"
+ "ld1r { v27.4s }, [x28]\n"
+ "zip1 v15.4s, v15.4s, v20.4s\n"
+ "ld1r { v23.4s }, [x27]\n"
+ "mov v17.16b, v29.16b\n"
+ "ldr q6, [%x[params], #0x0]\n"
+ "ext v17.16b, v17.16b, v17.16b, #0x4\n"
+ "ldr q8, [%x[params], #0x10]\n"
+ "mov v11.16b, v29.16b\n"
+ "ldr q9, [%x[params], #0x20]\n"
+ "ext v11.16b, v11.16b, v11.16b, #0x6\n"
+ "ldr q10, [%x[params], #0x30]\n"
"add %x[params], %x[params], #0x40\n"
- "zip1 v1.4s, v1.4s, v16.4s\n"
- "mov v16.16b, v3.16b\n"
- "zip1 v29.4s, v29.4s, v28.4s\n"
- "zip1 v2.4s, v2.4s, v31.4s\n"
- "zip1 v22.4s, v22.4s, v30.4s\n"
- "ext v24.16b, v24.16b, v24.16b, #0x2\n"
+ "zip1 v29.4s, v29.4s, v17.4s\n"
+ "mov v12.16b, v0.16b\n"
+ "ext v12.16b, v12.16b, v12.16b, #0x2\n"
+ "zip1 v18.4s, v18.4s, v11.4s\n"
+ "zip1 v29.4s, v29.4s, v18.4s\n"
+ "mov v17.16b, v0.16b\n"
"ext v17.16b, v17.16b, v17.16b, #0x4\n"
- "ext v16.16b, v16.16b, v16.16b, #0x6\n"
- "zip1 v4.4s, v4.4s, v21.4s\n"
- "zip1 v23.4s, v23.4s, v20.4s\n"
- "zip1 v0.4s, v0.4s, v19.4s\n"
- "zip1 v25.4s, v25.4s, v18.4s\n"
- "zip1 v1.4s, v1.4s, v29.4s\n"
- "zip1 v2.4s, v2.4s, v22.4s\n"
- ".inst 0x6f81e1fa // udot v26.4s, v15.16b, v1.4b[0]\n"
- "zip1 v3.4s, v3.4s, v17.4s\n"
- "zip1 v24.4s, v24.4s, v16.4s\n"
- ".inst 0x6fa1e1fb // udot v27.4s, v15.16b, v1.4b[1]\n"
- "zip1 v4.4s, v4.4s, v23.4s\n"
- "movi v23.4s, #0x0\n"
- ".inst 0x6f81e9f7 // udot v23.4s, v15.16b, v1.4b[2]\n"
- "movi v22.4s, #0x0\n"
- "movi v21.4s, #0x0\n"
- ".inst 0x6fa1e9f6 // udot v22.4s, v15.16b, v1.4b[3]\n"
- "movi v20.4s, #0x0\n"
- "movi v9.4s, #0x0\n"
- ".inst 0x6f82e1f5 // udot v21.4s, v15.16b, v2.4b[0]\n"
- "movi v8.4s, #0x0\n"
+ "mov v11.16b, v0.16b\n"
+ "ext v11.16b, v11.16b, v11.16b, #0x6\n"
+ "mov v18.16b, v2.16b\n"
+ "zip1 v0.4s, v0.4s, v17.4s\n"
+ "ext v18.16b, v18.16b, v18.16b, #0x2\n"
+ "zip1 v12.4s, v12.4s, v11.4s\n"
+ "zip1 v0.4s, v0.4s, v12.4s\n"
+ "mov v17.16b, v2.16b\n"
+ "ext v17.16b, v17.16b, v17.16b, #0x4\n"
+ "mov v19.16b, v2.16b\n"
+ "ext v19.16b, v19.16b, v19.16b, #0x6\n"
+ "mov v28.16b, v7.16b\n"
+ "zip1 v2.4s, v2.4s, v17.4s\n"
+ "ext v28.16b, v28.16b, v28.16b, #0x2\n"
+ "zip1 v18.4s, v18.4s, v19.4s\n"
+ "zip1 v2.4s, v2.4s, v18.4s\n"
+ "mov v18.16b, v7.16b\n"
+ "ext v18.16b, v18.16b, v18.16b, #0x4\n"
+ "mov v21.16b, v7.16b\n"
+ "ext v21.16b, v21.16b, v21.16b, #0x6\n"
+ "movi v30.4s, #0x0\n"
+ "zip1 v7.4s, v7.4s, v18.4s\n"
+ "movi v3.4s, #0x0\n"
+ "zip1 v28.4s, v28.4s, v21.4s\n"
+ "zip1 v7.4s, v7.4s, v28.4s\n"
+ "movi v12.4s, #0x0\n"
+ "movi v11.4s, #0x0\n"
"movi v19.4s, #0x0\n"
- ".inst 0x6fa2e1f4 // udot v20.4s, v15.16b, v2.4b[1]\n"
- "movi v18.4s, #0x0\n"
+ "movi v21.4s, #0x0\n"
"movi v17.4s, #0x0\n"
- ".inst 0x6f82e9e9 // udot v9.4s, v15.16b, v2.4b[2]\n"
"movi v16.4s, #0x0\n"
- "zip1 v0.4s, v0.4s, v25.4s\n"
- ".inst 0x6fa2e9e8 // udot v8.4s, v15.16b, v2.4b[3]\n"
- "zip1 v3.4s, v3.4s, v24.4s\n"
- ".inst 0x6f84e1f3 // udot v19.4s, v15.16b, v4.4b[0]\n"
- ".inst 0x6fa4e1f2 // udot v18.4s, v15.16b, v4.4b[1]\n"
- ".inst 0x6f84e9f1 // udot v17.4s, v15.16b, v4.4b[2]\n"
- ".inst 0x6fa4e9f0 // udot v16.4s, v15.16b, v4.4b[3]\n"
- "movi v31.4s, #0x0\n"
- "movi v30.4s, #0x0\n"
- "movi v29.4s, #0x0\n"
- ".inst 0x6f80e1ff // udot v31.4s, v15.16b, v0.4b[0]\n"
"movi v28.4s, #0x0\n"
- ".inst 0x6fa0e1fe // udot v30.4s, v15.16b, v0.4b[1]\n"
- ".inst 0x6f80e9fd // udot v29.4s, v15.16b, v0.4b[2]\n"
- ".inst 0x6fa0e9fc // udot v28.4s, v15.16b, v0.4b[3]\n"
- "add v24.4s, v26.4s, v21.4s\n"
- "add v25.4s, v27.4s, v20.4s\n"
- "add v26.4s, v23.4s, v9.4s\n"
- "add v27.4s, v22.4s, v8.4s\n"
- "add v23.4s, v19.4s, v21.4s\n"
- "movi v22.4s, #0x0\n"
- ".inst 0x6f83e1f6 // udot v22.4s, v15.16b, v3.4b[0]\n"
- "add v21.4s, v18.4s, v20.4s\n"
- "movi v20.4s, #0x0\n"
- ".inst 0x6fa3e1f4 // udot v20.4s, v15.16b, v3.4b[1]\n"
- "add v19.4s, v17.4s, v9.4s\n"
"movi v18.4s, #0x0\n"
- ".inst 0x6f83e9f2 // udot v18.4s, v15.16b, v3.4b[2]\n"
- "add v17.4s, v16.4s, v8.4s\n"
- "movi v16.4s, #0x0\n"
- ".inst 0x6fa3e9f0 // udot v16.4s, v15.16b, v3.4b[3]\n"
- "add v24.4s, v24.4s, v31.4s\n"
- "add v25.4s, v25.4s, v30.4s\n"
- "add v26.4s, v26.4s, v29.4s\n"
- "add v27.4s, v27.4s, v28.4s\n"
- "add v28.4s, v23.4s, v22.4s\n"
- "add v29.4s, v21.4s, v20.4s\n"
- "add v30.4s, v19.4s, v18.4s\n"
- "add v31.4s, v17.4s, v16.4s\n"
- "neg v13.4s, v13.4s\n"
- "mul v24.4s, v24.4s, v13.4s\n"
- "mul v25.4s, v25.4s, v13.4s\n"
- "mul v26.4s, v26.4s, v13.4s\n"
- "mul v27.4s, v27.4s, v13.4s\n"
- "mul v28.4s, v28.4s, v13.4s\n"
- "mul v29.4s, v29.4s, v13.4s\n"
- "mul v30.4s, v30.4s, v13.4s\n"
- "mul v31.4s, v31.4s, v13.4s\n"
- "zip1 v19.4s, v24.4s, v26.4s\n"
- "zip1 v18.4s, v25.4s, v27.4s\n"
- "zip1 v17.4s, v28.4s, v30.4s\n"
- "zip1 v16.4s, v29.4s, v31.4s\n"
- "zip1 v22.4s, v19.4s, v18.4s\n"
- "zip1 v23.4s, v17.4s, v16.4s\n"
- "add v24.4s, v24.4s, v14.4s\n"
- "add v25.4s, v25.4s, v14.4s\n"
- "add v26.4s, v26.4s, v14.4s\n"
- "add v27.4s, v27.4s, v14.4s\n"
- "add v28.4s, v28.4s, v14.4s\n"
- "add v29.4s, v29.4s, v14.4s\n"
- "add v30.4s, v30.4s, v14.4s\n"
- "add v31.4s, v31.4s, v14.4s\n"
+ "movi v20.4s, #0x0\n"
+ "movi v24.4s, #0x0\n"
+ "movi v31.4s, #0x0\n"
+ ".inst 0x6f8fe0ba // udot v26.4s, v5.16b, v15.4b[0]\n"
+ ".inst 0x6fafe0a1 // udot v1.4s, v5.16b, v15.4b[1]\n"
+ ".inst 0x6f8fe8b6 // udot v22.4s, v5.16b, v15.4b[2]\n"
+ ".inst 0x6fafe8b9 // udot v25.4s, v5.16b, v15.4b[3]\n"
+ ".inst 0x6f9de0ad // udot v13.4s, v5.16b, v29.4b[0]\n"
+ ".inst 0x6fbde0be // udot v30.4s, v5.16b, v29.4b[1]\n"
+ ".inst 0x6f9de8a3 // udot v3.4s, v5.16b, v29.4b[2]\n"
+ ".inst 0x6fbde8ac // udot v12.4s, v5.16b, v29.4b[3]\n"
+ ".inst 0x6f80e0ab // udot v11.4s, v5.16b, v0.4b[0]\n"
+ ".inst 0x6fa0e0b3 // udot v19.4s, v5.16b, v0.4b[1]\n"
+ ".inst 0x6f80e8b5 // udot v21.4s, v5.16b, v0.4b[2]\n"
+ ".inst 0x6fa0e8b1 // udot v17.4s, v5.16b, v0.4b[3]\n"
+ ".inst 0x6f82e0b0 // udot v16.4s, v5.16b, v2.4b[0]\n"
+ ".inst 0x6fa2e0bc // udot v28.4s, v5.16b, v2.4b[1]\n"
+ ".inst 0x6f82e8b2 // udot v18.4s, v5.16b, v2.4b[2]\n"
+ ".inst 0x6fa2e8b4 // udot v20.4s, v5.16b, v2.4b[3]\n"
+ ".inst 0x6f87e0b8 // udot v24.4s, v5.16b, v7.4b[0]\n"
+ ".inst 0x6fa7e0bf // udot v31.4s, v5.16b, v7.4b[1]\n"
+ "mov v26.16b, v26.16b\n"
+ "mov v1.16b, v1.16b\n"
+ "mov v22.16b, v22.16b\n"
+ "mov v25.16b, v25.16b\n"
+ "add v26.4s, v26.4s, v13.4s\n"
+ "movi v13.4s, #0x0\n"
+ ".inst 0x6f87e8ad // udot v13.4s, v5.16b, v7.4b[2]\n"
+ "add v1.4s, v1.4s, v30.4s\n"
+ "movi v30.4s, #0x0\n"
+ ".inst 0x6fa7e8be // udot v30.4s, v5.16b, v7.4b[3]\n"
+ "add v22.4s, v22.4s, v3.4s\n"
+ "add v25.4s, v25.4s, v12.4s\n"
+ "add v26.4s, v26.4s, v11.4s\n"
+ "add v1.4s, v1.4s, v19.4s\n"
+ "add v22.4s, v22.4s, v21.4s\n"
+ "add v25.4s, v25.4s, v17.4s\n"
+ "mov v11.16b, v11.16b\n"
+ "mov v3.16b, v19.16b\n"
+ "mov v19.16b, v21.16b\n"
+ "mov v21.16b, v17.16b\n"
+ "add v11.4s, v11.4s, v16.4s\n"
+ "add v3.4s, v3.4s, v28.4s\n"
+ "add v19.4s, v19.4s, v18.4s\n"
+ "add v21.4s, v21.4s, v20.4s\n"
+ "add v11.4s, v11.4s, v24.4s\n"
+ "add v3.4s, v3.4s, v31.4s\n"
+ "add v19.4s, v19.4s, v13.4s\n"
+ "add v21.4s, v21.4s, v30.4s\n"
+ "neg v4.4s, v4.4s\n"
+ "mul v26.4s, v26.4s, v4.4s\n"
+ "str q26, [SP, #0x0]\n"
+ "mul v1.4s, v1.4s, v4.4s\n"
+ "mul v22.4s, v22.4s, v4.4s\n"
+ "str q1, [SP, #0x10]\n"
+ "mul v25.4s, v25.4s, v4.4s\n"
+ "mul v11.4s, v11.4s, v4.4s\n"
+ "str q22, [SP, #0x20]\n"
+ "mul v3.4s, v3.4s, v4.4s\n"
+ "str q25, [SP, #0x30]\n"
+ "mul v19.4s, v19.4s, v4.4s\n"
+ "mul v21.4s, v21.4s, v4.4s\n"
+ "str q11, [SP, #0x40]\n"
+ "add v26.4s, v26.4s, v6.4s\n"
+ "str q3, [SP, #0x50]\n"
+ "add v1.4s, v1.4s, v6.4s\n"
+ "str q19, [SP, #0x60]\n"
+ "add v22.4s, v22.4s, v6.4s\n"
+ "add v25.4s, v25.4s, v6.4s\n"
+ "str q21, [SP, #0x70]\n"
+ "add v11.4s, v11.4s, v6.4s\n"
+ "add v3.4s, v3.4s, v6.4s\n"
+ "add v19.4s, v19.4s, v6.4s\n"
+ "add v21.4s, v21.4s, v6.4s\n"
"ble 2f\n"
"1:" // Loop
- "ldr q21, [%x[params], #0x0]\n"
- "ldr q20, [%x[params], #0x10]\n"
- ".inst 0x6f80e0b8 // udot v24.4s, v5.16b, v0.4b[0]\n"
- ".inst 0x6fa0e0b9 // udot v25.4s, v5.16b, v0.4b[1]\n"
- "ldr q14, [%x[params], #0x20]\n"
- ".inst 0x6f80e8ba // udot v26.4s, v5.16b, v0.4b[2]\n"
- ".inst 0x6fa0e8bb // udot v27.4s, v5.16b, v0.4b[3]\n"
+ ".inst 0x6f8fe11a // udot v26.4s, v8.16b, v15.4b[0]\n"
+ "ldr q20, [%x[params], #0x0]\n"
+ "add x11, x11, #0x10\n"
+ ".inst 0x6fafe101 // udot v1.4s, v8.16b, v15.4b[1]\n"
+ "ldr q4, [%x[params], #0x10]\n"
"sub %x[n_channels], %x[n_channels], #0x4\n"
- ".inst 0x6f81e0d8 // udot v24.4s, v6.16b, v1.4b[0]\n"
- ".inst 0x6fa1e0d9 // udot v25.4s, v6.16b, v1.4b[1]\n"
+ ".inst 0x6f8fe916 // udot v22.4s, v8.16b, v15.4b[2]\n"
+ "ldr q6, [%x[params], #0x20]\n"
"cmp %x[n_channels], #0x4\n"
- "add x9, x9, #0x10\n"
- ".inst 0x6f81e8da // udot v26.4s, v6.16b, v1.4b[2]\n"
- ".inst 0x6fa1e8db // udot v27.4s, v6.16b, v1.4b[3]\n"
- ".inst 0x6f82e0bc // udot v28.4s, v5.16b, v2.4b[0]\n"
- ".inst 0x6fa2e0bd // udot v29.4s, v5.16b, v2.4b[1]\n"
- ".inst 0x6f82e8be // udot v30.4s, v5.16b, v2.4b[2]\n"
- ".inst 0x6fa2e8bf // udot v31.4s, v5.16b, v2.4b[3]\n"
- "ldr q5, [%x[params], #0x30]\n"
- ".inst 0x6f82e0f8 // udot v24.4s, v7.16b, v2.4b[0]\n"
- ".inst 0x6fa2e0f9 // udot v25.4s, v7.16b, v2.4b[1]\n"
- "sqrdmulh v24.4s, v24.4s, v21.4s\n"
- ".inst 0x6f82e8fa // udot v26.4s, v7.16b, v2.4b[2]\n"
- ".inst 0x6fa2e8fb // udot v27.4s, v7.16b, v2.4b[3]\n"
- "sqrdmulh v25.4s, v25.4s, v21.4s\n"
- ".inst 0x6f83e0dc // udot v28.4s, v6.16b, v3.4b[0]\n"
- ".inst 0x6fa3e0dd // udot v29.4s, v6.16b, v3.4b[1]\n"
- "sqrdmulh v26.4s, v26.4s, v21.4s\n"
- ".inst 0x6f83e8de // udot v30.4s, v6.16b, v3.4b[2]\n"
- ".inst 0x6fa3e8df // udot v31.4s, v6.16b, v3.4b[3]\n"
- "ldr q6, [%x[params], #0x40]\n"
- "sqrdmulh v27.4s, v27.4s, v21.4s\n"
- ".inst 0x6f84e0fc // udot v28.4s, v7.16b, v4.4b[0]\n"
- ".inst 0x6fa4e0fd // udot v29.4s, v7.16b, v4.4b[1]\n"
- "and v19.16b, v24.16b, v20.16b\n"
- ".inst 0x6f84e8fe // udot v30.4s, v7.16b, v4.4b[2]\n"
- ".inst 0x6fa4e8ff // udot v31.4s, v7.16b, v4.4b[3]\n"
- "ldr q7, [%x[params], #0x50]\n"
- "and v18.16b, v25.16b, v20.16b\n"
- "and v17.16b, v26.16b, v20.16b\n"
- "and v16.16b, v27.16b, v20.16b\n"
+ ".inst 0x6fafe919 // udot v25.4s, v8.16b, v15.4b[3]\n"
+ ".inst 0x6f80e10b // udot v11.4s, v8.16b, v0.4b[0]\n"
+ ".inst 0x6fa0e103 // udot v3.4s, v8.16b, v0.4b[1]\n"
+ ".inst 0x6f80e913 // udot v19.4s, v8.16b, v0.4b[2]\n"
+ ".inst 0x6fa0e915 // udot v21.4s, v8.16b, v0.4b[3]\n"
+ "ldr q8, [%x[params], #0x30]\n"
+ ".inst 0x6f9de13a // udot v26.4s, v9.16b, v29.4b[0]\n"
+ ".inst 0x6fbde121 // udot v1.4s, v9.16b, v29.4b[1]\n"
+ ".inst 0x6f9de936 // udot v22.4s, v9.16b, v29.4b[2]\n"
+ ".inst 0x6fbde939 // udot v25.4s, v9.16b, v29.4b[3]\n"
+ ".inst 0x6f82e12b // udot v11.4s, v9.16b, v2.4b[0]\n"
+ ".inst 0x6fa2e123 // udot v3.4s, v9.16b, v2.4b[1]\n"
+ ".inst 0x6f82e933 // udot v19.4s, v9.16b, v2.4b[2]\n"
+ ".inst 0x6fa2e935 // udot v21.4s, v9.16b, v2.4b[3]\n"
+ "ldr q9, [%x[params], #0x40]\n"
+ ".inst 0x6f80e15a // udot v26.4s, v10.16b, v0.4b[0]\n"
+ ".inst 0x6fa0e141 // udot v1.4s, v10.16b, v0.4b[1]\n"
+ ".inst 0x6f80e956 // udot v22.4s, v10.16b, v0.4b[2]\n"
+ ".inst 0x6fa0e959 // udot v25.4s, v10.16b, v0.4b[3]\n"
+ ".inst 0x6f87e14b // udot v11.4s, v10.16b, v7.4b[0]\n"
+ ".inst 0x6fa7e143 // udot v3.4s, v10.16b, v7.4b[1]\n"
+ ".inst 0x6f87e953 // udot v19.4s, v10.16b, v7.4b[2]\n"
+ ".inst 0x6fa7e955 // udot v21.4s, v10.16b, v7.4b[3]\n"
+ "ldr q10, [%x[params], #0x50]\n"
"add %x[params], %x[params], #0x60\n"
- "sshr v19.4s, v19.4s, #0x1f\n"
- "sshr v18.4s, v18.4s, #0x1f\n"
+ "sqrdmulh v26.4s, v26.4s, v20.4s\n"
+ "sqrdmulh v1.4s, v1.4s, v20.4s\n"
+ "sqrdmulh v22.4s, v22.4s, v20.4s\n"
+ "sqrdmulh v25.4s, v25.4s, v20.4s\n"
+ "sqrdmulh v11.4s, v11.4s, v20.4s\n"
+ "and v30.16b, v26.16b, v4.16b\n"
+ "and v17.16b, v1.16b, v4.16b\n"
+ "and v16.16b, v22.16b, v4.16b\n"
+ "sshr v30.4s, v30.4s, #0x1f\n"
"sshr v17.4s, v17.4s, #0x1f\n"
"sshr v16.4s, v16.4s, #0x1f\n"
- "sqrdmulh v28.4s, v28.4s, v21.4s\n"
- "sqrdmulh v29.4s, v29.4s, v21.4s\n"
- "sqrdmulh v30.4s, v30.4s, v21.4s\n"
- "sqrdmulh v31.4s, v31.4s, v21.4s\n"
- "sqadd v24.4s, v24.4s, v19.4s\n"
- "sqadd v25.4s, v25.4s, v18.4s\n"
- "sqadd v26.4s, v26.4s, v17.4s\n"
- "sqadd v27.4s, v27.4s, v16.4s\n"
- "and v19.16b, v28.16b, v20.16b\n"
- "and v18.16b, v29.16b, v20.16b\n"
- "and v17.16b, v30.16b, v20.16b\n"
- "and v16.16b, v31.16b, v20.16b\n"
- "sshr v19.4s, v19.4s, #0x1f\n"
- "sshr v18.4s, v18.4s, #0x1f\n"
- "sshr v17.4s, v17.4s, #0x1f\n"
+ "sqadd v26.4s, v26.4s, v30.4s\n"
+ "sqadd v1.4s, v1.4s, v17.4s\n"
+ "sqadd v22.4s, v22.4s, v16.4s\n"
+ "and v16.16b, v25.16b, v4.16b\n"
+ "srshl v26.4s, v26.4s, v4.4s\n"
+ "srshl v1.4s, v1.4s, v4.4s\n"
+ "srshl v22.4s, v22.4s, v4.4s\n"
"sshr v16.4s, v16.4s, #0x1f\n"
- "sqadd v28.4s, v28.4s, v19.4s\n"
- "sqadd v29.4s, v29.4s, v18.4s\n"
- "sqadd v30.4s, v30.4s, v17.4s\n"
- "sqadd v31.4s, v31.4s, v16.4s\n"
- "srshl v24.4s, v24.4s, v20.4s\n"
- "srshl v25.4s, v25.4s, v20.4s\n"
- "srshl v26.4s, v26.4s, v20.4s\n"
- "srshl v27.4s, v27.4s, v20.4s\n"
- "srshl v28.4s, v28.4s, v20.4s\n"
- "srshl v29.4s, v29.4s, v20.4s\n"
- "srshl v30.4s, v30.4s, v20.4s\n"
- "srshl v31.4s, v31.4s, v20.4s\n"
- "add v24.4s, v24.4s, v12.4s\n"
- "add v25.4s, v25.4s, v12.4s\n"
- "add v26.4s, v26.4s, v12.4s\n"
- "add v27.4s, v27.4s, v12.4s\n"
- "add v28.4s, v28.4s, v12.4s\n"
- "add v29.4s, v29.4s, v12.4s\n"
- "add v30.4s, v30.4s, v12.4s\n"
- "add v31.4s, v31.4s, v12.4s\n"
- "smin v24.4s, v24.4s, v10.4s\n"
- "smin v25.4s, v25.4s, v10.4s\n"
- "smin v26.4s, v26.4s, v10.4s\n"
- "smin v27.4s, v27.4s, v10.4s\n"
- "smin v28.4s, v28.4s, v10.4s\n"
- "smin v29.4s, v29.4s, v10.4s\n"
- "smin v30.4s, v30.4s, v10.4s\n"
- "smin v31.4s, v31.4s, v10.4s\n"
- "smax v24.4s, v24.4s, v11.4s\n"
- "smax v25.4s, v25.4s, v11.4s\n"
- "smax v26.4s, v26.4s, v11.4s\n"
- "smax v27.4s, v27.4s, v11.4s\n"
- "smax v28.4s, v28.4s, v11.4s\n"
- "smax v29.4s, v29.4s, v11.4s\n"
- "smax v30.4s, v30.4s, v11.4s\n"
- "smax v31.4s, v31.4s, v11.4s\n"
- "uzp1 v24.16b, v24.16b, v24.16b\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
+ "add v26.4s, v26.4s, v14.4s\n"
+ "add v1.4s, v1.4s, v14.4s\n"
+ "add v22.4s, v22.4s, v14.4s\n"
+ "smin v26.4s, v26.4s, v23.4s\n"
+ "smin v1.4s, v1.4s, v23.4s\n"
+ "smin v22.4s, v22.4s, v23.4s\n"
+ "smax v26.4s, v26.4s, v27.4s\n"
+ "smax v1.4s, v1.4s, v27.4s\n"
+ "smax v22.4s, v22.4s, v27.4s\n"
"uzp1 v26.16b, v26.16b, v26.16b\n"
- "uzp1 v27.16b, v27.16b, v27.16b\n"
- "uzp1 v28.16b, v28.16b, v28.16b\n"
- "uzp1 v29.16b, v29.16b, v29.16b\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
- "uzp1 v31.16b, v31.16b, v31.16b\n"
- "uzp1 v24.16b, v24.16b, v24.16b\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
- "str s24, [x27, x28]\n"
+ "uzp1 v1.16b, v1.16b, v1.16b\n"
"uzp1 v26.16b, v26.16b, v26.16b\n"
- "uzp1 v27.16b, v27.16b, v27.16b\n"
- "str s25, [x26, x28]\n"
- "uzp1 v28.16b, v28.16b, v28.16b\n"
- "uzp1 v29.16b, v29.16b, v29.16b\n"
- "str s26, [x25, x28]\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
- "uzp1 v31.16b, v31.16b, v31.16b\n"
- "str s27, [x24, x28]\n"
- "str s28, [x23, x28]\n"
- "dup v24.4s, v22.s[0]\n"
- "dup v25.4s, v22.s[1]\n"
- "str s29, [x22, x28]\n"
- "dup v26.4s, v22.s[2]\n"
- "dup v27.4s, v22.s[3]\n"
- "str s30, [x21, x28]\n"
- "dup v28.4s, v23.s[0]\n"
- "dup v29.4s, v23.s[1]\n"
- "str s31, [x20, x28]\n"
- "dup v30.4s, v23.s[2]\n"
- "dup v31.4s, v23.s[3]\n"
- "add x28, x28, #0x4\n"
- "add v24.4s, v24.4s, v14.4s\n"
+ "str s26, [x26, x10]\n"
+ "uzp1 v1.16b, v1.16b, v1.16b\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "ldr q26, [SP, #0x0]\n"
+ "sqadd v25.4s, v25.4s, v16.4s\n"
+ "str s1, [x25, x10]\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "ldr q1, [SP, #0x10]\n"
+ "and v16.16b, v11.16b, v4.16b\n"
+ "str s22, [x24, x10]\n"
+ "sqrdmulh v3.4s, v3.4s, v20.4s\n"
+ "ldr q22, [SP, #0x20]\n"
+ "srshl v25.4s, v25.4s, v4.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sqrdmulh v19.4s, v19.4s, v20.4s\n"
+ "and v17.16b, v3.16b, v4.16b\n"
"add v25.4s, v25.4s, v14.4s\n"
- "add v26.4s, v26.4s, v14.4s\n"
- "add v27.4s, v27.4s, v14.4s\n"
- "add v28.4s, v28.4s, v14.4s\n"
- "add v29.4s, v29.4s, v14.4s\n"
- "add v30.4s, v30.4s, v14.4s\n"
- "add v31.4s, v31.4s, v14.4s\n"
+ "sqadd v11.4s, v11.4s, v16.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "smin v25.4s, v25.4s, v23.4s\n"
+ "and v16.16b, v19.16b, v4.16b\n"
+ "srshl v11.4s, v11.4s, v4.4s\n"
+ "smax v25.4s, v25.4s, v27.4s\n"
+ "sqadd v3.4s, v3.4s, v17.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "uzp1 v25.16b, v25.16b, v25.16b\n"
+ "add v11.4s, v11.4s, v14.4s\n"
+ "uzp1 v25.16b, v25.16b, v25.16b\n"
+ "str s25, [x23, x10]\n"
+ "smin v11.4s, v11.4s, v23.4s\n"
+ "srshl v3.4s, v3.4s, v4.4s\n"
+ "ldr q25, [SP, #0x30]\n"
+ "sqadd v19.4s, v19.4s, v16.4s\n"
+ "sqrdmulh v21.4s, v21.4s, v20.4s\n"
+ "smax v11.4s, v11.4s, v27.4s\n"
+ "add v3.4s, v3.4s, v14.4s\n"
+ "srshl v19.4s, v19.4s, v4.4s\n"
+ "uzp1 v11.16b, v11.16b, v11.16b\n"
+ "smin v3.4s, v3.4s, v23.4s\n"
+ "uzp1 v11.16b, v11.16b, v11.16b\n"
+ "str s11, [x22, x10]\n"
+ "smax v3.4s, v3.4s, v27.4s\n"
+ "add v19.4s, v19.4s, v14.4s\n"
+ "ldr q11, [SP, #0x40]\n"
+ "and v16.16b, v21.16b, v4.16b\n"
+ "add v26.4s, v26.4s, v6.4s\n"
+ "uzp1 v3.16b, v3.16b, v3.16b\n"
+ "smin v19.4s, v19.4s, v23.4s\n"
+ "uzp1 v3.16b, v3.16b, v3.16b\n"
+ "str s3, [x21, x10]\n"
+ "smax v19.4s, v19.4s, v27.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "ldr q3, [SP, #0x50]\n"
+ "add v1.4s, v1.4s, v6.4s\n"
+ "add v22.4s, v22.4s, v6.4s\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ "sqadd v21.4s, v21.4s, v16.4s\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ "str s19, [x20, x10]\n"
+ "add v25.4s, v25.4s, v6.4s\n"
+ "add v11.4s, v11.4s, v6.4s\n"
+ "ldr q19, [SP, #0x60]\n"
+ "srshl v21.4s, v21.4s, v4.4s\n"
+ "add v3.4s, v3.4s, v6.4s\n"
+ "add v21.4s, v21.4s, v14.4s\n"
+ "add v19.4s, v19.4s, v6.4s\n"
+ "smin v21.4s, v21.4s, v23.4s\n"
+ "smax v21.4s, v21.4s, v27.4s\n"
+ "uzp1 v21.16b, v21.16b, v21.16b\n"
+ "uzp1 v21.16b, v21.16b, v21.16b\n"
+ "str s21, [x19, x10]\n"
+ "add x10, x10, #0x4\n"
+ "ldr q21, [SP, #0x70]\n"
+ "add v21.4s, v21.4s, v6.4s\n"
"bgt 1b\n"
"2:" // Tail
- "ldr q21, [%x[params], #0x0]\n"
- "ldr q20, [%x[params], #0x10]\n"
- ".inst 0x6f80e0b8 // udot v24.4s, v5.16b, v0.4b[0]\n"
- ".inst 0x6fa0e0b9 // udot v25.4s, v5.16b, v0.4b[1]\n"
- ".inst 0x6f80e8ba // udot v26.4s, v5.16b, v0.4b[2]\n"
- ".inst 0x6fa0e8bb // udot v27.4s, v5.16b, v0.4b[3]\n"
+ ".inst 0x6f8fe11a // udot v26.4s, v8.16b, v15.4b[0]\n"
+ "ldr q20, [%x[params], #0x0]\n"
+ "add x26, x26, x10\n"
+ ".inst 0x6fafe101 // udot v1.4s, v8.16b, v15.4b[1]\n"
+ "ldr q4, [%x[params], #0x10]\n"
+ "add x25, x25, x10\n"
+ ".inst 0x6f8fe916 // udot v22.4s, v8.16b, v15.4b[2]\n"
+ "add x24, x24, x10\n"
+ ".inst 0x6fafe919 // udot v25.4s, v8.16b, v15.4b[3]\n"
+ "add x23, x23, x10\n"
+ ".inst 0x6f80e10b // udot v11.4s, v8.16b, v0.4b[0]\n"
+ "add x22, x22, x10\n"
+ ".inst 0x6fa0e103 // udot v3.4s, v8.16b, v0.4b[1]\n"
+ "add x21, x21, x10\n"
+ ".inst 0x6f80e913 // udot v19.4s, v8.16b, v0.4b[2]\n"
+ "add x20, x20, x10\n"
+ ".inst 0x6fa0e915 // udot v21.4s, v8.16b, v0.4b[3]\n"
+ "add x19, x19, x10\n"
+ ".inst 0x6f9de13a // udot v26.4s, v9.16b, v29.4b[0]\n"
"cmp %x[n_channels], #0x4\n"
- "add x27, x27, x28\n"
- ".inst 0x6f81e0d8 // udot v24.4s, v6.16b, v1.4b[0]\n"
- ".inst 0x6fa1e0d9 // udot v25.4s, v6.16b, v1.4b[1]\n"
- "add x26, x26, x28\n"
- "add x25, x25, x28\n"
- ".inst 0x6f81e8da // udot v26.4s, v6.16b, v1.4b[2]\n"
- ".inst 0x6fa1e8db // udot v27.4s, v6.16b, v1.4b[3]\n"
- "add x24, x24, x28\n"
- "add x23, x23, x28\n"
- ".inst 0x6f82e0bc // udot v28.4s, v5.16b, v2.4b[0]\n"
- ".inst 0x6fa2e0bd // udot v29.4s, v5.16b, v2.4b[1]\n"
- "add x22, x22, x28\n"
- "add x21, x21, x28\n"
- ".inst 0x6f82e8be // udot v30.4s, v5.16b, v2.4b[2]\n"
- ".inst 0x6fa2e8bf // udot v31.4s, v5.16b, v2.4b[3]\n"
- "add x20, x20, x28\n"
+ ".inst 0x6fbde121 // udot v1.4s, v9.16b, v29.4b[1]\n"
"add %x[params], %x[params], #0x20\n"
- ".inst 0x6f82e0f8 // udot v24.4s, v7.16b, v2.4b[0]\n"
- ".inst 0x6fa2e0f9 // udot v25.4s, v7.16b, v2.4b[1]\n"
- "sqrdmulh v24.4s, v24.4s, v21.4s\n"
- ".inst 0x6f82e8fa // udot v26.4s, v7.16b, v2.4b[2]\n"
- ".inst 0x6fa2e8fb // udot v27.4s, v7.16b, v2.4b[3]\n"
- "sqrdmulh v25.4s, v25.4s, v21.4s\n"
- ".inst 0x6f83e0dc // udot v28.4s, v6.16b, v3.4b[0]\n"
- ".inst 0x6fa3e0dd // udot v29.4s, v6.16b, v3.4b[1]\n"
- "sqrdmulh v26.4s, v26.4s, v21.4s\n"
- ".inst 0x6f83e8de // udot v30.4s, v6.16b, v3.4b[2]\n"
- ".inst 0x6fa3e8df // udot v31.4s, v6.16b, v3.4b[3]\n"
- "sqrdmulh v27.4s, v27.4s, v21.4s\n"
- ".inst 0x6f84e0fc // udot v28.4s, v7.16b, v4.4b[0]\n"
- ".inst 0x6fa4e0fd // udot v29.4s, v7.16b, v4.4b[1]\n"
- "and v19.16b, v24.16b, v20.16b\n"
- ".inst 0x6f84e8fe // udot v30.4s, v7.16b, v4.4b[2]\n"
- ".inst 0x6fa4e8ff // udot v31.4s, v7.16b, v4.4b[3]\n"
- "and v18.16b, v25.16b, v20.16b\n"
- "and v17.16b, v26.16b, v20.16b\n"
- "and v16.16b, v27.16b, v20.16b\n"
- "sshr v19.4s, v19.4s, #0x1f\n"
- "sshr v18.4s, v18.4s, #0x1f\n"
+ ".inst 0x6f9de936 // udot v22.4s, v9.16b, v29.4b[2]\n"
+ ".inst 0x6fbde939 // udot v25.4s, v9.16b, v29.4b[3]\n"
+ ".inst 0x6f82e12b // udot v11.4s, v9.16b, v2.4b[0]\n"
+ ".inst 0x6fa2e123 // udot v3.4s, v9.16b, v2.4b[1]\n"
+ ".inst 0x6f82e933 // udot v19.4s, v9.16b, v2.4b[2]\n"
+ ".inst 0x6fa2e935 // udot v21.4s, v9.16b, v2.4b[3]\n"
+ ".inst 0x6f80e15a // udot v26.4s, v10.16b, v0.4b[0]\n"
+ ".inst 0x6fa0e141 // udot v1.4s, v10.16b, v0.4b[1]\n"
+ ".inst 0x6f80e956 // udot v22.4s, v10.16b, v0.4b[2]\n"
+ ".inst 0x6fa0e959 // udot v25.4s, v10.16b, v0.4b[3]\n"
+ ".inst 0x6f87e14b // udot v11.4s, v10.16b, v7.4b[0]\n"
+ ".inst 0x6fa7e143 // udot v3.4s, v10.16b, v7.4b[1]\n"
+ ".inst 0x6f87e953 // udot v19.4s, v10.16b, v7.4b[2]\n"
+ ".inst 0x6fa7e955 // udot v21.4s, v10.16b, v7.4b[3]\n"
+ "sqrdmulh v26.4s, v26.4s, v20.4s\n"
+ "sqrdmulh v1.4s, v1.4s, v20.4s\n"
+ "sqrdmulh v22.4s, v22.4s, v20.4s\n"
+ "sqrdmulh v25.4s, v25.4s, v20.4s\n"
+ "and v30.16b, v26.16b, v4.16b\n"
+ "and v17.16b, v1.16b, v4.16b\n"
+ "and v16.16b, v22.16b, v4.16b\n"
+ "sshr v30.4s, v30.4s, #0x1f\n"
"sshr v17.4s, v17.4s, #0x1f\n"
"sshr v16.4s, v16.4s, #0x1f\n"
- "sqrdmulh v28.4s, v28.4s, v21.4s\n"
- "sqrdmulh v29.4s, v29.4s, v21.4s\n"
- "sqrdmulh v30.4s, v30.4s, v21.4s\n"
- "sqrdmulh v31.4s, v31.4s, v21.4s\n"
- "sqadd v24.4s, v24.4s, v19.4s\n"
- "sqadd v25.4s, v25.4s, v18.4s\n"
- "sqadd v26.4s, v26.4s, v17.4s\n"
- "sqadd v27.4s, v27.4s, v16.4s\n"
- "and v19.16b, v28.16b, v20.16b\n"
- "and v18.16b, v29.16b, v20.16b\n"
- "and v17.16b, v30.16b, v20.16b\n"
- "and v16.16b, v31.16b, v20.16b\n"
- "sshr v19.4s, v19.4s, #0x1f\n"
- "sshr v18.4s, v18.4s, #0x1f\n"
- "sshr v17.4s, v17.4s, #0x1f\n"
+ "sqadd v26.4s, v26.4s, v30.4s\n"
+ "sqadd v1.4s, v1.4s, v17.4s\n"
+ "sqadd v22.4s, v22.4s, v16.4s\n"
+ "and v16.16b, v25.16b, v4.16b\n"
+ "srshl v26.4s, v26.4s, v4.4s\n"
+ "srshl v1.4s, v1.4s, v4.4s\n"
+ "srshl v22.4s, v22.4s, v4.4s\n"
"sshr v16.4s, v16.4s, #0x1f\n"
- "sqadd v28.4s, v28.4s, v19.4s\n"
- "sqadd v29.4s, v29.4s, v18.4s\n"
- "sqadd v30.4s, v30.4s, v17.4s\n"
- "sqadd v31.4s, v31.4s, v16.4s\n"
- "srshl v24.4s, v24.4s, v20.4s\n"
- "srshl v25.4s, v25.4s, v20.4s\n"
- "srshl v26.4s, v26.4s, v20.4s\n"
- "srshl v27.4s, v27.4s, v20.4s\n"
- "srshl v28.4s, v28.4s, v20.4s\n"
- "srshl v29.4s, v29.4s, v20.4s\n"
- "srshl v30.4s, v30.4s, v20.4s\n"
- "srshl v31.4s, v31.4s, v20.4s\n"
- "add v24.4s, v24.4s, v12.4s\n"
- "add v25.4s, v25.4s, v12.4s\n"
- "add v26.4s, v26.4s, v12.4s\n"
- "add v27.4s, v27.4s, v12.4s\n"
- "add v28.4s, v28.4s, v12.4s\n"
- "add v29.4s, v29.4s, v12.4s\n"
- "add v30.4s, v30.4s, v12.4s\n"
- "add v31.4s, v31.4s, v12.4s\n"
- "smin v24.4s, v24.4s, v10.4s\n"
- "smin v25.4s, v25.4s, v10.4s\n"
- "smin v26.4s, v26.4s, v10.4s\n"
- "smin v27.4s, v27.4s, v10.4s\n"
- "smin v28.4s, v28.4s, v10.4s\n"
- "smin v29.4s, v29.4s, v10.4s\n"
- "smin v30.4s, v30.4s, v10.4s\n"
- "smin v31.4s, v31.4s, v10.4s\n"
- "smax v24.4s, v24.4s, v11.4s\n"
- "smax v25.4s, v25.4s, v11.4s\n"
- "smax v26.4s, v26.4s, v11.4s\n"
- "smax v27.4s, v27.4s, v11.4s\n"
- "smax v28.4s, v28.4s, v11.4s\n"
- "smax v29.4s, v29.4s, v11.4s\n"
- "smax v30.4s, v30.4s, v11.4s\n"
- "smax v31.4s, v31.4s, v11.4s\n"
- "uzp1 v24.16b, v24.16b, v24.16b\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
+ "add v26.4s, v26.4s, v14.4s\n"
+ "add v1.4s, v1.4s, v14.4s\n"
+ "add v22.4s, v22.4s, v14.4s\n"
+ "smin v26.4s, v26.4s, v23.4s\n"
+ "smin v1.4s, v1.4s, v23.4s\n"
+ "smin v22.4s, v22.4s, v23.4s\n"
+ "smax v26.4s, v26.4s, v27.4s\n"
+ "smax v1.4s, v1.4s, v27.4s\n"
+ "smax v22.4s, v22.4s, v27.4s\n"
"uzp1 v26.16b, v26.16b, v26.16b\n"
- "uzp1 v27.16b, v27.16b, v27.16b\n"
- "uzp1 v28.16b, v28.16b, v28.16b\n"
- "uzp1 v29.16b, v29.16b, v29.16b\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
- "uzp1 v31.16b, v31.16b, v31.16b\n"
- "uzp1 v24.16b, v24.16b, v24.16b\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
+ "uzp1 v1.16b, v1.16b, v1.16b\n"
"uzp1 v26.16b, v26.16b, v26.16b\n"
- "uzp1 v27.16b, v27.16b, v27.16b\n"
- "uzp1 v28.16b, v28.16b, v28.16b\n"
- "uzp1 v29.16b, v29.16b, v29.16b\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
- "uzp1 v31.16b, v31.16b, v31.16b\n"
+ "uzp1 v1.16b, v1.16b, v1.16b\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "sqadd v25.4s, v25.4s, v16.4s\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "sqrdmulh v11.4s, v11.4s, v20.4s\n"
+ "sqrdmulh v3.4s, v3.4s, v20.4s\n"
+ "srshl v25.4s, v25.4s, v4.4s\n"
+ "sqrdmulh v19.4s, v19.4s, v20.4s\n"
+ "and v16.16b, v11.16b, v4.16b\n"
+ "and v17.16b, v3.16b, v4.16b\n"
+ "add v25.4s, v25.4s, v14.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "smin v25.4s, v25.4s, v23.4s\n"
+ "sqadd v11.4s, v11.4s, v16.4s\n"
+ "sqadd v3.4s, v3.4s, v17.4s\n"
+ "smax v25.4s, v25.4s, v27.4s\n"
+ "and v16.16b, v19.16b, v4.16b\n"
+ "srshl v11.4s, v11.4s, v4.4s\n"
+ "uzp1 v25.16b, v25.16b, v25.16b\n"
+ "srshl v3.4s, v3.4s, v4.4s\n"
+ "uzp1 v25.16b, v25.16b, v25.16b\n"
+ "add v11.4s, v11.4s, v14.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "add v3.4s, v3.4s, v14.4s\n"
+ "smin v11.4s, v11.4s, v23.4s\n"
+ "sqadd v19.4s, v19.4s, v16.4s\n"
+ "smin v3.4s, v3.4s, v23.4s\n"
+ "smax v11.4s, v11.4s, v27.4s\n"
+ "sqrdmulh v21.4s, v21.4s, v20.4s\n"
+ "smax v3.4s, v3.4s, v27.4s\n"
+ "uzp1 v11.16b, v11.16b, v11.16b\n"
+ "srshl v19.4s, v19.4s, v4.4s\n"
+ "uzp1 v11.16b, v11.16b, v11.16b\n"
+ "uzp1 v3.16b, v3.16b, v3.16b\n"
+ "and v16.16b, v21.16b, v4.16b\n"
+ "uzp1 v3.16b, v3.16b, v3.16b\n"
+ "add v19.4s, v19.4s, v14.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "smin v19.4s, v19.4s, v23.4s\n"
+ "sqadd v21.4s, v21.4s, v16.4s\n"
+ "smax v19.4s, v19.4s, v27.4s\n"
+ "srshl v21.4s, v21.4s, v4.4s\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ "add v21.4s, v21.4s, v14.4s\n"
+ "smin v21.4s, v21.4s, v23.4s\n"
+ "smax v21.4s, v21.4s, v27.4s\n"
+ "uzp1 v21.16b, v21.16b, v21.16b\n"
+ "uzp1 v21.16b, v21.16b, v21.16b\n"
"blt 3f\n"
- "str s24, [x27, #0x0]\n"
- "str s25, [x26, #0x0]\n"
- "str s26, [x25, #0x0]\n"
- "str s27, [x24, #0x0]\n"
- "str s28, [x23, #0x0]\n"
- "str s29, [x22, #0x0]\n"
- "str s30, [x21, #0x0]\n"
- "str s31, [x20, #0x0]\n"
+ "str s26, [x26, #0x0]\n"
+ "str s1, [x25, #0x0]\n"
+ "str s22, [x24, #0x0]\n"
+ "str s25, [x23, #0x0]\n"
+ "str s11, [x22, #0x0]\n"
+ "str s3, [x21, #0x0]\n"
+ "str s19, [x20, #0x0]\n"
+ "str s21, [x19, #0x0]\n"
"b 4f\n"
"3:" // Tail: Oddments
+ "st1 { v26.b }[0], [x26], #0x1\n"
"subs %x[n_channels], %x[n_channels], #0x1\n"
- "st1 { v24.b }[0], [x27], #0x1\n"
- "st1 { v25.b }[0], [x26], #0x1\n"
- "st1 { v26.b }[0], [x25], #0x1\n"
- "st1 { v27.b }[0], [x24], #0x1\n"
- "st1 { v28.b }[0], [x23], #0x1\n"
- "st1 { v29.b }[0], [x22], #0x1\n"
- "st1 { v30.b }[0], [x21], #0x1\n"
- "st1 { v31.b }[0], [x20], #0x1\n"
+ "st1 { v1.b }[0], [x25], #0x1\n"
+ "st1 { v22.b }[0], [x24], #0x1\n"
+ "st1 { v25.b }[0], [x23], #0x1\n"
+ "st1 { v11.b }[0], [x22], #0x1\n"
+ "st1 { v3.b }[0], [x21], #0x1\n"
+ "st1 { v19.b }[0], [x20], #0x1\n"
+ "st1 { v21.b }[0], [x19], #0x1\n"
"beq 4f\n"
+ "st1 { v26.b }[1], [x26], #0x1\n"
"subs %x[n_channels], %x[n_channels], #0x1\n"
- "st1 { v24.b }[1], [x27], #0x1\n"
- "st1 { v25.b }[1], [x26], #0x1\n"
- "st1 { v26.b }[1], [x25], #0x1\n"
- "st1 { v27.b }[1], [x24], #0x1\n"
- "st1 { v28.b }[1], [x23], #0x1\n"
- "st1 { v29.b }[1], [x22], #0x1\n"
- "st1 { v30.b }[1], [x21], #0x1\n"
- "st1 { v31.b }[1], [x20], #0x1\n"
+ "st1 { v1.b }[1], [x25], #0x1\n"
+ "st1 { v22.b }[1], [x24], #0x1\n"
+ "st1 { v25.b }[1], [x23], #0x1\n"
+ "st1 { v11.b }[1], [x22], #0x1\n"
+ "st1 { v3.b }[1], [x21], #0x1\n"
+ "st1 { v19.b }[1], [x20], #0x1\n"
+ "st1 { v21.b }[1], [x19], #0x1\n"
"beq 4f\n"
+ "st1 { v26.b }[2], [x26], #0x1\n"
"subs %x[n_channels], %x[n_channels], #0x1\n"
- "st1 { v24.b }[2], [x27], #0x1\n"
- "st1 { v25.b }[2], [x26], #0x1\n"
- "st1 { v26.b }[2], [x25], #0x1\n"
- "st1 { v27.b }[2], [x24], #0x1\n"
- "st1 { v28.b }[2], [x23], #0x1\n"
- "st1 { v29.b }[2], [x22], #0x1\n"
- "st1 { v30.b }[2], [x21], #0x1\n"
- "st1 { v31.b }[2], [x20], #0x1\n"
+ "st1 { v1.b }[2], [x25], #0x1\n"
+ "st1 { v22.b }[2], [x24], #0x1\n"
+ "st1 { v25.b }[2], [x23], #0x1\n"
+ "st1 { v11.b }[2], [x22], #0x1\n"
+ "st1 { v3.b }[2], [x21], #0x1\n"
+ "st1 { v19.b }[2], [x20], #0x1\n"
+ "st1 { v21.b }[2], [x19], #0x1\n"
"beq 4f\n"
- "st1 { v24.b }[3], [x27], #0x1\n"
+ "st1 { v26.b }[3], [x26], #0x1\n"
"subs %x[n_channels], %x[n_channels], #0x1\n"
- "st1 { v25.b }[3], [x26], #0x1\n"
- "st1 { v26.b }[3], [x25], #0x1\n"
- "st1 { v27.b }[3], [x24], #0x1\n"
- "st1 { v28.b }[3], [x23], #0x1\n"
- "st1 { v29.b }[3], [x22], #0x1\n"
- "st1 { v30.b }[3], [x21], #0x1\n"
- "st1 { v31.b }[3], [x20], #0x1\n"
+ "st1 { v1.b }[3], [x25], #0x1\n"
+ "st1 { v22.b }[3], [x24], #0x1\n"
+ "st1 { v25.b }[3], [x23], #0x1\n"
+ "st1 { v11.b }[3], [x22], #0x1\n"
+ "st1 { v3.b }[3], [x21], #0x1\n"
+ "st1 { v19.b }[3], [x20], #0x1\n"
+ "st1 { v21.b }[3], [x19], #0x1\n"
"4:" // Tail: End
+ "add SP, SP, #0x80\n"
: [n_channels] "+&r" (n_output_channels), [params] "+&r" (params)
: [inptrs] "r" (inptrs), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [outptrs] "r" (outptrs), [qp] "r" (&qp)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst/generic.cpp
index 027cc9e5a2..40242e9718 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -40,596 +40,622 @@ void a64_u8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst_impl
)
{
__asm__ __volatile__(
- "ldr q12, [%x[params], #0x0]\n"
- "ldr q8, [%x[params], #0x10]\n"
- "movi v28.16b, #0x1\n"
- "movi v18.4s, #0x0\n"
- "ldr q9, [%x[params], #0x20]\n"
- "ldr q10, [%x[params], #0x30]\n"
- "movi v31.4s, #0x0\n"
+ "movi v15.16b, #0x1\n"
+ "ldr x21, [%x[inptrs], #0x0]\n"
+ "add SP, SP, #-0x80\n"
+ "movi v14.4s, #0x1\n"
+ "ldr x20, [%x[inptrs], #0x8]\n"
+ "add x22, %x[qp], %[offsetof_Requantize32_b_offset]\n"
+ "movi v28.4s, #0x0\n"
+ "ldr x19, [%x[inptrs], #0x10]\n"
+ "mov x11, #0x0\n"
+ "movi v27.4s, #0x0\n"
+ "ld1 { v13.16b }, [x21]\n"
+ "mov x10, #0x0\n"
+ "movi v26.4s, #0x0\n"
+ "ld1 { v12.16b }, [x20]\n"
+ "add x9, %x[qp], %[offsetof_Requantize32_c_offset]\n"
+ "movi v25.4s, #0x0\n"
+ "ld1 { v7.16b }, [x19]\n"
+ "add x28, %x[qp], %[offsetof_Requantize32_minval]\n"
"movi v24.4s, #0x0\n"
- "ldr q11, [%x[params], #0x40]\n"
- "ldr x20, [%x[inptrs], #0x18]\n"
- "movi v30.4s, #0x0\n"
- "movi v21.4s, #0x0\n"
- "ld1 { v3.16b }, [x20]\n"
+ "ldr x21, [%x[inptrs], #0x18]\n"
+ "add x27, %x[qp], %[offsetof_Requantize32_maxval]\n"
+ "mov v18.16b, v13.16b\n"
"ldr x20, [%x[inptrs], #0x20]\n"
- "mov v16.16b, v3.16b\n"
- "ext v16.16b, v16.16b, v16.16b, #0x1\n"
- "ld1 { v4.16b }, [x20]\n"
- "ldr x20, [%x[inptrs], #0x10]\n"
- "mov v15.16b, v4.16b\n"
- "ext v15.16b, v15.16b, v15.16b, #0x1\n"
- "ld1 { v2.16b }, [x20]\n"
- "ldr x20, [%x[inptrs], #0x8]\n"
- "mov v20.16b, v2.16b\n"
- "ext v20.16b, v20.16b, v20.16b, #0x1\n"
- "ld1 { v1.16b }, [x20]\n"
- "ldr x20, [%x[inptrs], #0x28]\n"
- "zip1 v3.2d, v3.2d, v16.2d\n"
- "zip1 v4.2d, v4.2d, v15.2d\n"
+ "cmp %x[n_channels], #0x4\n"
+ "ext v18.16b, v18.16b, v18.16b, #0x1\n"
+ "ldr x19, [%x[inptrs], #0x28]\n"
+ "mov v17.16b, v12.16b\n"
+ "ld1 { v6.16b }, [x21]\n"
+ "ext v17.16b, v17.16b, v17.16b, #0x1\n"
"ld1 { v5.16b }, [x20]\n"
+ "mov v16.16b, v7.16b\n"
+ "ld1 { v4.16b }, [x19]\n"
+ "ext v16.16b, v16.16b, v16.16b, #0x1\n"
"ldr x20, [%x[inptrs], #0x30]\n"
- "mov v26.16b, v1.16b\n"
- "mov v13.16b, v5.16b\n"
- "ld1 { v6.16b }, [x20]\n"
- "ldr x20, [%x[inptrs], #0x38]\n"
- "mov v19.16b, v6.16b\n"
- "ext v26.16b, v26.16b, v26.16b, #0x1\n"
- "ld1 { v7.16b }, [x20]\n"
- "ldr x20, [%x[inptrs], #0x0]\n"
- "mov v17.16b, v7.16b\n"
- "zip1 v2.2d, v2.2d, v20.2d\n"
- "ld1 { v0.16b }, [x20]\n"
- "ext v13.16b, v13.16b, v13.16b, #0x1\n"
- "ext v19.16b, v19.16b, v19.16b, #0x1\n"
- ".inst 0x6f83e392 // udot v18.4s, v28.16b, v3.4b[0]\n"
+ "zip1 v13.2d, v13.2d, v18.2d\n"
+ "ldr x19, [%x[inptrs], #0x38]\n"
+ "zip1 v12.2d, v12.2d, v17.2d\n"
+ "ld1r { v3.4s }, [x22]\n"
+ "mov v18.16b, v6.16b\n"
+ "ld1 { v2.16b }, [x20]\n"
+ "zip1 v7.2d, v7.2d, v16.2d\n"
+ "ld1 { v1.16b }, [x19]\n"
+ "ext v18.16b, v18.16b, v18.16b, #0x1\n"
+ "ldp x26, x25, [%x[outptrs], #0x0]\n"
+ "mov v17.16b, v5.16b\n"
+ "ldp x24, x23, [%x[outptrs], #0x10]\n"
"ext v17.16b, v17.16b, v17.16b, #0x1\n"
- ".inst 0x6f83eb9f // udot v31.4s, v28.16b, v3.4b[2]\n"
- ".inst 0x6f84e398 // udot v24.4s, v28.16b, v4.4b[0]\n"
- "add x20, %x[qp], %[offsetof_Requantize32_b_offset]\n"
- "ld1r { v23.4s }, [x20]\n"
- ".inst 0x6f84eb9e // udot v30.4s, v28.16b, v4.4b[2]\n"
- "mov v16.16b, v0.16b\n"
- ".inst 0x6f82e395 // udot v21.4s, v28.16b, v2.4b[0]\n"
- "movi v20.4s, #0x0\n"
- "movi v29.4s, #0x1\n"
- ".inst 0x6f82eb94 // udot v20.4s, v28.16b, v2.4b[2]\n"
- "add x20, %x[qp], %[offsetof_Requantize32_c_offset]\n"
- "ld1r { v14.4s }, [x20]\n"
+ "ldp x22, x21, [%x[outptrs], #0x20]\n"
+ "mov v16.16b, v4.16b\n"
+ "ldp x20, x19, [%x[outptrs], #0x30]\n"
+ "zip1 v6.2d, v6.2d, v18.2d\n"
+ "ld1r { v0.4s }, [x9]\n"
"ext v16.16b, v16.16b, v16.16b, #0x1\n"
- "zip1 v1.2d, v1.2d, v26.2d\n"
- ".inst 0x6fa3e3b2 // udot v18.4s, v29.16b, v3.4b[1]\n"
- "zip1 v5.2d, v5.2d, v13.2d\n"
- "zip1 v6.2d, v6.2d, v19.2d\n"
- ".inst 0x6fa3ebbf // udot v31.4s, v29.16b, v3.4b[3]\n"
- "add x20, %x[qp], %[offsetof_Requantize32_minval]\n"
- "ld1r { v13.4s }, [x20]\n"
- "zip1 v7.2d, v7.2d, v17.2d\n"
+ "ld1r { v31.4s }, [x28]\n"
+ "zip1 v5.2d, v5.2d, v17.2d\n"
+ "ld1r { v30.4s }, [x27]\n"
+ "mov v17.16b, v2.16b\n"
+ "ldr q29, [%x[params], #0x0]\n"
+ "ext v17.16b, v17.16b, v17.16b, #0x1\n"
+ "ldr q8, [%x[params], #0x10]\n"
+ "zip1 v4.2d, v4.2d, v16.2d\n"
+ "ldr q9, [%x[params], #0x20]\n"
+ "mov v16.16b, v1.16b\n"
+ "ldr q10, [%x[params], #0x30]\n"
+ "ext v16.16b, v16.16b, v16.16b, #0x1\n"
+ "ldr q11, [%x[params], #0x40]\n"
+ "add %x[params], %x[params], #0x50\n"
+ "zip1 v2.2d, v2.2d, v17.2d\n"
+ "movi v23.4s, #0x0\n"
"movi v22.4s, #0x0\n"
- ".inst 0x6fa4e3b8 // udot v24.4s, v29.16b, v4.4b[1]\n"
- "movi v26.4s, #0x0\n"
- ".inst 0x6fa4ebbe // udot v30.4s, v29.16b, v4.4b[3]\n"
- ".inst 0x6f81e396 // udot v22.4s, v28.16b, v1.4b[0]\n"
- "add x20, %x[qp], %[offsetof_Requantize32_maxval]\n"
- "ld1r { v15.4s }, [x20]\n"
- "movi v25.4s, #0x0\n"
- "movi v27.4s, #0x0\n"
- ".inst 0x6f81eb9a // udot v26.4s, v28.16b, v1.4b[2]\n"
- "zip1 v0.2d, v0.2d, v16.2d\n"
- "movi v19.4s, #0x0\n"
- ".inst 0x6f85e399 // udot v25.4s, v28.16b, v5.4b[0]\n"
- "cmp %x[n_channels], #0x4\n"
- ".inst 0x6f85eb9b // udot v27.4s, v28.16b, v5.4b[2]\n"
- ".inst 0x6f86e393 // udot v19.4s, v28.16b, v6.4b[0]\n"
- "add v24.4s, v18.4s, v24.4s\n"
- "mov x9, #0x0\n"
+ "zip1 v1.2d, v1.2d, v16.2d\n"
+ "movi v21.4s, #0x0\n"
"movi v18.4s, #0x0\n"
- ".inst 0x6f86eb92 // udot v18.4s, v28.16b, v6.4b[2]\n"
- ".inst 0x6fa2e3b5 // udot v21.4s, v29.16b, v2.4b[1]\n"
- "mov x28, #0x0\n"
- ".inst 0x6fa2ebb4 // udot v20.4s, v29.16b, v2.4b[3]\n"
- "add v17.4s, v31.4s, v30.4s\n"
- ".inst 0x6fa1e3b6 // udot v22.4s, v29.16b, v1.4b[1]\n"
- "ldp x27, x26, [%x[outptrs], #0x0]\n"
- "movi v16.4s, #0x0\n"
- ".inst 0x6f87e390 // udot v16.4s, v28.16b, v7.4b[0]\n"
- ".inst 0x6fa1ebba // udot v26.4s, v29.16b, v1.4b[3]\n"
- "ldp x25, x24, [%x[outptrs], #0x10]\n"
- ".inst 0x6fa5e3b9 // udot v25.4s, v29.16b, v5.4b[1]\n"
- ".inst 0x6fa5ebbb // udot v27.4s, v29.16b, v5.4b[3]\n"
- "add v30.4s, v21.4s, v24.4s\n"
- "ldp x23, x22, [%x[outptrs], #0x20]\n"
- ".inst 0x6fa6e3b3 // udot v19.4s, v29.16b, v6.4b[1]\n"
- ".inst 0x6fa6ebb2 // udot v18.4s, v29.16b, v6.4b[3]\n"
- "add v31.4s, v20.4s, v17.4s\n"
- "ldp x21, x20, [%x[outptrs], #0x30]\n"
- ".inst 0x6fa7e3b0 // udot v16.4s, v29.16b, v7.4b[1]\n"
- "add v22.4s, v22.4s, v30.4s\n"
- "add %x[params], %x[params], #0x50\n"
- "add v21.4s, v26.4s, v31.4s\n"
- "add v20.4s, v25.4s, v19.4s\n"
- "add v19.4s, v27.4s, v18.4s\n"
- "add v18.4s, v16.4s, v24.4s\n"
+ "movi v17.4s, #0x0\n"
"movi v16.4s, #0x0\n"
- ".inst 0x6f87eb90 // udot v16.4s, v28.16b, v7.4b[2]\n"
- ".inst 0x6fa7ebb0 // udot v16.4s, v29.16b, v7.4b[3]\n"
- "add v17.4s, v16.4s, v17.4s\n"
- "movi v16.4s, #0x0\n"
- ".inst 0x6f80e390 // udot v16.4s, v28.16b, v0.4b[0]\n"
- ".inst 0x6fa0e3b0 // udot v16.4s, v29.16b, v0.4b[1]\n"
- "add v24.4s, v22.4s, v16.4s\n"
- "add v26.4s, v22.4s, v25.4s\n"
+ "movi v20.4s, #0x0\n"
+ "movi v19.4s, #0x0\n"
+ ".inst 0x6f8de1fc // udot v28.4s, v15.16b, v13.4b[0]\n"
+ ".inst 0x6f8de9fb // udot v27.4s, v15.16b, v13.4b[2]\n"
+ ".inst 0x6f8ce1fa // udot v26.4s, v15.16b, v12.4b[0]\n"
+ ".inst 0x6f8ce9f9 // udot v25.4s, v15.16b, v12.4b[2]\n"
+ ".inst 0x6fade1dc // udot v28.4s, v14.16b, v13.4b[1]\n"
+ ".inst 0x6fade9db // udot v27.4s, v14.16b, v13.4b[3]\n"
+ ".inst 0x6face1da // udot v26.4s, v14.16b, v12.4b[1]\n"
+ ".inst 0x6face9d9 // udot v25.4s, v14.16b, v12.4b[3]\n"
+ ".inst 0x6f87e1f8 // udot v24.4s, v15.16b, v7.4b[0]\n"
+ ".inst 0x6f87e9f7 // udot v23.4s, v15.16b, v7.4b[2]\n"
+ ".inst 0x6f86e1f6 // udot v22.4s, v15.16b, v6.4b[0]\n"
+ ".inst 0x6f86e9f5 // udot v21.4s, v15.16b, v6.4b[2]\n"
+ ".inst 0x6fa7e1d8 // udot v24.4s, v14.16b, v7.4b[1]\n"
+ ".inst 0x6fa7e9d7 // udot v23.4s, v14.16b, v7.4b[3]\n"
+ ".inst 0x6fa6e1d6 // udot v22.4s, v14.16b, v6.4b[1]\n"
+ ".inst 0x6fa6e9d5 // udot v21.4s, v14.16b, v6.4b[3]\n"
+ ".inst 0x6f85e1f2 // udot v18.4s, v15.16b, v5.4b[0]\n"
+ ".inst 0x6f85e9f1 // udot v17.4s, v15.16b, v5.4b[2]\n"
+ ".inst 0x6f84e1f0 // udot v16.4s, v15.16b, v4.4b[0]\n"
+ ".inst 0x6f84e9f4 // udot v20.4s, v15.16b, v4.4b[2]\n"
+ ".inst 0x6fa5e1d2 // udot v18.4s, v14.16b, v5.4b[1]\n"
+ ".inst 0x6fa5e9d1 // udot v17.4s, v14.16b, v5.4b[3]\n"
+ ".inst 0x6fa4e1d0 // udot v16.4s, v14.16b, v4.4b[1]\n"
+ ".inst 0x6fa4e9d4 // udot v20.4s, v14.16b, v4.4b[3]\n"
+ ".inst 0x6f82e1f3 // udot v19.4s, v15.16b, v2.4b[0]\n"
+ "mov v28.16b, v28.16b\n"
+ "mov v27.16b, v27.16b\n"
+ "add v28.4s, v28.4s, v26.4s\n"
+ ".inst 0x6fa2e1d3 // udot v19.4s, v14.16b, v2.4b[1]\n"
+ "add v27.4s, v27.4s, v25.4s\n"
+ "add v28.4s, v28.4s, v24.4s\n"
+ "mov v26.16b, v26.16b\n"
+ "add v27.4s, v27.4s, v23.4s\n"
+ "add v28.4s, v28.4s, v22.4s\n"
+ "mov v25.16b, v25.16b\n"
+ "add v27.4s, v27.4s, v21.4s\n"
+ "add v28.4s, v28.4s, v18.4s\n"
+ "add v26.4s, v26.4s, v24.4s\n"
+ "add v27.4s, v27.4s, v17.4s\n"
+ "add v25.4s, v25.4s, v23.4s\n"
+ "add v26.4s, v26.4s, v22.4s\n"
+ "mov v24.16b, v24.16b\n"
+ "add v25.4s, v25.4s, v21.4s\n"
+ "add v26.4s, v26.4s, v18.4s\n"
+ "mov v23.16b, v23.16b\n"
+ "add v25.4s, v25.4s, v17.4s\n"
+ "add v26.4s, v26.4s, v16.4s\n"
+ "add v24.4s, v24.4s, v22.4s\n"
+ "add v25.4s, v25.4s, v20.4s\n"
+ "add v23.4s, v23.4s, v21.4s\n"
+ "add v24.4s, v24.4s, v18.4s\n"
+ "mov v22.16b, v22.16b\n"
+ "add v23.4s, v23.4s, v17.4s\n"
+ "add v24.4s, v24.4s, v16.4s\n"
+ "mov v21.16b, v21.16b\n"
+ "add v23.4s, v23.4s, v20.4s\n"
+ "add v24.4s, v24.4s, v19.4s\n"
+ "add v22.4s, v22.4s, v18.4s\n"
+ "movi v18.4s, #0x0\n"
+ ".inst 0x6f82e9f2 // udot v18.4s, v15.16b, v2.4b[2]\n"
+ "add v21.4s, v21.4s, v17.4s\n"
+ "movi v17.4s, #0x0\n"
+ ".inst 0x6f81e1f1 // udot v17.4s, v15.16b, v1.4b[0]\n"
+ ".inst 0x6fa2e9d2 // udot v18.4s, v14.16b, v2.4b[3]\n"
+ "add v22.4s, v22.4s, v16.4s\n"
"movi v16.4s, #0x0\n"
- ".inst 0x6f80eb90 // udot v16.4s, v28.16b, v0.4b[2]\n"
- ".inst 0x6fa0ebb0 // udot v16.4s, v29.16b, v0.4b[3]\n"
- "add v25.4s, v21.4s, v16.4s\n"
- "add v27.4s, v21.4s, v27.4s\n"
- "add v28.4s, v20.4s, v30.4s\n"
- "add v29.4s, v19.4s, v31.4s\n"
- "add v30.4s, v18.4s, v20.4s\n"
- "add v31.4s, v17.4s, v19.4s\n"
- "neg v23.4s, v23.4s\n"
- "mul v24.4s, v24.4s, v23.4s\n"
- "mul v25.4s, v25.4s, v23.4s\n"
- "mul v26.4s, v26.4s, v23.4s\n"
- "mul v27.4s, v27.4s, v23.4s\n"
- "mul v28.4s, v28.4s, v23.4s\n"
- "mul v29.4s, v29.4s, v23.4s\n"
- "mul v30.4s, v30.4s, v23.4s\n"
- "mul v31.4s, v31.4s, v23.4s\n"
- "zip1 v19.4s, v24.4s, v26.4s\n"
- "zip1 v18.4s, v25.4s, v27.4s\n"
- "zip1 v17.4s, v28.4s, v30.4s\n"
- "zip1 v16.4s, v29.4s, v31.4s\n"
- "zip1 v22.4s, v19.4s, v18.4s\n"
- "zip1 v23.4s, v17.4s, v16.4s\n"
- "add v24.4s, v24.4s, v12.4s\n"
- "add v25.4s, v25.4s, v12.4s\n"
- "add v26.4s, v26.4s, v12.4s\n"
- "add v27.4s, v27.4s, v12.4s\n"
- "add v28.4s, v28.4s, v12.4s\n"
- "add v29.4s, v29.4s, v12.4s\n"
- "add v30.4s, v30.4s, v12.4s\n"
- "add v31.4s, v31.4s, v12.4s\n"
+ ".inst 0x6fa1e1d1 // udot v17.4s, v14.16b, v1.4b[1]\n"
+ ".inst 0x6f81e9f0 // udot v16.4s, v15.16b, v1.4b[2]\n"
+ "add v23.4s, v23.4s, v18.4s\n"
+ "add v21.4s, v21.4s, v20.4s\n"
+ "add v22.4s, v22.4s, v19.4s\n"
+ ".inst 0x6fa1e9d0 // udot v16.4s, v14.16b, v1.4b[3]\n"
+ "add v21.4s, v21.4s, v18.4s\n"
+ "add v22.4s, v22.4s, v17.4s\n"
+ "neg v3.4s, v3.4s\n"
+ "add v21.4s, v21.4s, v16.4s\n"
+ "mul v28.4s, v28.4s, v3.4s\n"
+ "str q28, [SP, #0x0]\n"
+ "mul v27.4s, v27.4s, v3.4s\n"
+ "mul v26.4s, v26.4s, v3.4s\n"
+ "str q27, [SP, #0x10]\n"
+ "mul v25.4s, v25.4s, v3.4s\n"
+ "mul v24.4s, v24.4s, v3.4s\n"
+ "str q26, [SP, #0x20]\n"
+ "mul v23.4s, v23.4s, v3.4s\n"
+ "str q25, [SP, #0x30]\n"
+ "mul v22.4s, v22.4s, v3.4s\n"
+ "mul v21.4s, v21.4s, v3.4s\n"
+ "str q24, [SP, #0x40]\n"
+ "add v28.4s, v28.4s, v29.4s\n"
+ "str q23, [SP, #0x50]\n"
+ "add v27.4s, v27.4s, v29.4s\n"
+ "str q22, [SP, #0x60]\n"
+ "add v26.4s, v26.4s, v29.4s\n"
+ "add v25.4s, v25.4s, v29.4s\n"
+ "str q21, [SP, #0x70]\n"
+ "add v24.4s, v24.4s, v29.4s\n"
+ "add v23.4s, v23.4s, v29.4s\n"
+ "add v22.4s, v22.4s, v29.4s\n"
+ "add v21.4s, v21.4s, v29.4s\n"
"ble 2f\n"
"1:" // Loop
- "ldr q21, [%x[params], #0x60]\n"
- "ldr q20, [%x[params], #0x70]\n"
- ".inst 0x6f80e118 // udot v24.4s, v8.16b, v0.4b[0]\n"
- ".inst 0x6f80e919 // udot v25.4s, v8.16b, v0.4b[2]\n"
- "ldr q12, [%x[params], #0x80]\n"
- ".inst 0x6f81e11a // udot v26.4s, v8.16b, v1.4b[0]\n"
- ".inst 0x6f81e91b // udot v27.4s, v8.16b, v1.4b[2]\n"
+ ".inst 0x6f8de11c // udot v28.4s, v8.16b, v13.4b[0]\n"
+ "ldr q20, [%x[params], #0x60]\n"
+ "add x11, x11, #0x10\n"
+ ".inst 0x6f8de91b // udot v27.4s, v8.16b, v13.4b[2]\n"
+ "ldr q19, [%x[params], #0x70]\n"
"sub %x[n_channels], %x[n_channels], #0x4\n"
- ".inst 0x6fa0e138 // udot v24.4s, v9.16b, v0.4b[1]\n"
- ".inst 0x6fa0e939 // udot v25.4s, v9.16b, v0.4b[3]\n"
+ ".inst 0x6f8ce11a // udot v26.4s, v8.16b, v12.4b[0]\n"
+ "ldr q29, [%x[params], #0x80]\n"
"cmp %x[n_channels], #0x4\n"
- "add x9, x9, #0x10\n"
- ".inst 0x6fa1e13a // udot v26.4s, v9.16b, v1.4b[1]\n"
- ".inst 0x6fa1e93b // udot v27.4s, v9.16b, v1.4b[3]\n"
- ".inst 0x6f82e11c // udot v28.4s, v8.16b, v2.4b[0]\n"
- ".inst 0x6f82e91d // udot v29.4s, v8.16b, v2.4b[2]\n"
- ".inst 0x6f83e11e // udot v30.4s, v8.16b, v3.4b[0]\n"
- ".inst 0x6f83e91f // udot v31.4s, v8.16b, v3.4b[2]\n"
+ ".inst 0x6f8ce919 // udot v25.4s, v8.16b, v12.4b[2]\n"
+ ".inst 0x6f87e118 // udot v24.4s, v8.16b, v7.4b[0]\n"
+ ".inst 0x6f87e917 // udot v23.4s, v8.16b, v7.4b[2]\n"
+ ".inst 0x6f86e116 // udot v22.4s, v8.16b, v6.4b[0]\n"
+ ".inst 0x6f86e915 // udot v21.4s, v8.16b, v6.4b[2]\n"
"ldr q8, [%x[params], #0x0]\n"
- ".inst 0x6f81e158 // udot v24.4s, v10.16b, v1.4b[0]\n"
- ".inst 0x6f81e959 // udot v25.4s, v10.16b, v1.4b[2]\n"
- ".inst 0x6f82e15a // udot v26.4s, v10.16b, v2.4b[0]\n"
- ".inst 0x6f82e95b // udot v27.4s, v10.16b, v2.4b[2]\n"
- ".inst 0x6fa2e13c // udot v28.4s, v9.16b, v2.4b[1]\n"
- ".inst 0x6fa2e93d // udot v29.4s, v9.16b, v2.4b[3]\n"
- ".inst 0x6fa3e13e // udot v30.4s, v9.16b, v3.4b[1]\n"
- ".inst 0x6fa3e93f // udot v31.4s, v9.16b, v3.4b[3]\n"
+ ".inst 0x6fade13c // udot v28.4s, v9.16b, v13.4b[1]\n"
+ ".inst 0x6fade93b // udot v27.4s, v9.16b, v13.4b[3]\n"
+ ".inst 0x6face13a // udot v26.4s, v9.16b, v12.4b[1]\n"
+ ".inst 0x6face939 // udot v25.4s, v9.16b, v12.4b[3]\n"
+ ".inst 0x6fa7e138 // udot v24.4s, v9.16b, v7.4b[1]\n"
+ ".inst 0x6fa7e937 // udot v23.4s, v9.16b, v7.4b[3]\n"
+ ".inst 0x6fa6e136 // udot v22.4s, v9.16b, v6.4b[1]\n"
+ ".inst 0x6fa6e935 // udot v21.4s, v9.16b, v6.4b[3]\n"
"ldr q9, [%x[params], #0x10]\n"
- ".inst 0x6fa1e178 // udot v24.4s, v11.16b, v1.4b[1]\n"
- ".inst 0x6fa1e979 // udot v25.4s, v11.16b, v1.4b[3]\n"
- ".inst 0x6fa2e17a // udot v26.4s, v11.16b, v2.4b[1]\n"
- ".inst 0x6fa2e97b // udot v27.4s, v11.16b, v2.4b[3]\n"
- ".inst 0x6f83e15c // udot v28.4s, v10.16b, v3.4b[0]\n"
- ".inst 0x6f83e95d // udot v29.4s, v10.16b, v3.4b[2]\n"
- ".inst 0x6f84e15e // udot v30.4s, v10.16b, v4.4b[0]\n"
- ".inst 0x6f84e95f // udot v31.4s, v10.16b, v4.4b[2]\n"
+ ".inst 0x6f8ce15c // udot v28.4s, v10.16b, v12.4b[0]\n"
+ ".inst 0x6f8ce95b // udot v27.4s, v10.16b, v12.4b[2]\n"
+ ".inst 0x6f87e15a // udot v26.4s, v10.16b, v7.4b[0]\n"
+ ".inst 0x6f87e959 // udot v25.4s, v10.16b, v7.4b[2]\n"
+ ".inst 0x6f86e158 // udot v24.4s, v10.16b, v6.4b[0]\n"
+ ".inst 0x6f86e957 // udot v23.4s, v10.16b, v6.4b[2]\n"
+ ".inst 0x6f85e156 // udot v22.4s, v10.16b, v5.4b[0]\n"
+ ".inst 0x6f85e955 // udot v21.4s, v10.16b, v5.4b[2]\n"
"ldr q10, [%x[params], #0x20]\n"
- ".inst 0x6f82e118 // udot v24.4s, v8.16b, v2.4b[0]\n"
- ".inst 0x6f82e919 // udot v25.4s, v8.16b, v2.4b[2]\n"
- ".inst 0x6f83e11a // udot v26.4s, v8.16b, v3.4b[0]\n"
- ".inst 0x6f83e91b // udot v27.4s, v8.16b, v3.4b[2]\n"
- ".inst 0x6fa3e17c // udot v28.4s, v11.16b, v3.4b[1]\n"
- ".inst 0x6fa3e97d // udot v29.4s, v11.16b, v3.4b[3]\n"
- ".inst 0x6fa4e17e // udot v30.4s, v11.16b, v4.4b[1]\n"
- ".inst 0x6fa4e97f // udot v31.4s, v11.16b, v4.4b[3]\n"
+ ".inst 0x6face17c // udot v28.4s, v11.16b, v12.4b[1]\n"
+ ".inst 0x6face97b // udot v27.4s, v11.16b, v12.4b[3]\n"
+ ".inst 0x6fa7e17a // udot v26.4s, v11.16b, v7.4b[1]\n"
+ ".inst 0x6fa7e979 // udot v25.4s, v11.16b, v7.4b[3]\n"
+ ".inst 0x6fa6e178 // udot v24.4s, v11.16b, v6.4b[1]\n"
+ ".inst 0x6fa6e977 // udot v23.4s, v11.16b, v6.4b[3]\n"
+ ".inst 0x6fa5e176 // udot v22.4s, v11.16b, v5.4b[1]\n"
+ ".inst 0x6fa5e975 // udot v21.4s, v11.16b, v5.4b[3]\n"
"ldr q11, [%x[params], #0x30]\n"
- ".inst 0x6fa2e138 // udot v24.4s, v9.16b, v2.4b[1]\n"
- ".inst 0x6fa2e939 // udot v25.4s, v9.16b, v2.4b[3]\n"
- ".inst 0x6fa3e13a // udot v26.4s, v9.16b, v3.4b[1]\n"
- ".inst 0x6fa3e93b // udot v27.4s, v9.16b, v3.4b[3]\n"
- ".inst 0x6f84e11c // udot v28.4s, v8.16b, v4.4b[0]\n"
- ".inst 0x6f84e91d // udot v29.4s, v8.16b, v4.4b[2]\n"
- ".inst 0x6f85e11e // udot v30.4s, v8.16b, v5.4b[0]\n"
- ".inst 0x6f85e91f // udot v31.4s, v8.16b, v5.4b[2]\n"
+ ".inst 0x6f87e11c // udot v28.4s, v8.16b, v7.4b[0]\n"
+ ".inst 0x6f87e91b // udot v27.4s, v8.16b, v7.4b[2]\n"
+ ".inst 0x6f86e11a // udot v26.4s, v8.16b, v6.4b[0]\n"
+ ".inst 0x6f86e919 // udot v25.4s, v8.16b, v6.4b[2]\n"
+ ".inst 0x6f85e118 // udot v24.4s, v8.16b, v5.4b[0]\n"
+ ".inst 0x6f85e917 // udot v23.4s, v8.16b, v5.4b[2]\n"
+ ".inst 0x6f84e116 // udot v22.4s, v8.16b, v4.4b[0]\n"
+ ".inst 0x6f84e915 // udot v21.4s, v8.16b, v4.4b[2]\n"
"ldr q8, [%x[params], #0x40]\n"
- ".inst 0x6f83e158 // udot v24.4s, v10.16b, v3.4b[0]\n"
- ".inst 0x6f83e959 // udot v25.4s, v10.16b, v3.4b[2]\n"
- ".inst 0x6f84e15a // udot v26.4s, v10.16b, v4.4b[0]\n"
- ".inst 0x6f84e95b // udot v27.4s, v10.16b, v4.4b[2]\n"
- ".inst 0x6fa4e13c // udot v28.4s, v9.16b, v4.4b[1]\n"
- ".inst 0x6fa4e93d // udot v29.4s, v9.16b, v4.4b[3]\n"
- ".inst 0x6fa5e13e // udot v30.4s, v9.16b, v5.4b[1]\n"
- ".inst 0x6fa5e93f // udot v31.4s, v9.16b, v5.4b[3]\n"
+ ".inst 0x6fa7e13c // udot v28.4s, v9.16b, v7.4b[1]\n"
+ ".inst 0x6fa7e93b // udot v27.4s, v9.16b, v7.4b[3]\n"
+ ".inst 0x6fa6e13a // udot v26.4s, v9.16b, v6.4b[1]\n"
+ ".inst 0x6fa6e939 // udot v25.4s, v9.16b, v6.4b[3]\n"
+ ".inst 0x6fa5e138 // udot v24.4s, v9.16b, v5.4b[1]\n"
+ ".inst 0x6fa5e937 // udot v23.4s, v9.16b, v5.4b[3]\n"
+ ".inst 0x6fa4e136 // udot v22.4s, v9.16b, v4.4b[1]\n"
+ ".inst 0x6fa4e935 // udot v21.4s, v9.16b, v4.4b[3]\n"
"ldr q9, [%x[params], #0x50]\n"
- ".inst 0x6fa3e178 // udot v24.4s, v11.16b, v3.4b[1]\n"
- ".inst 0x6fa3e979 // udot v25.4s, v11.16b, v3.4b[3]\n"
- ".inst 0x6fa4e17a // udot v26.4s, v11.16b, v4.4b[1]\n"
- ".inst 0x6fa4e97b // udot v27.4s, v11.16b, v4.4b[3]\n"
- ".inst 0x6f85e15c // udot v28.4s, v10.16b, v5.4b[0]\n"
- ".inst 0x6f85e95d // udot v29.4s, v10.16b, v5.4b[2]\n"
- ".inst 0x6f86e15e // udot v30.4s, v10.16b, v6.4b[0]\n"
- ".inst 0x6f86e95f // udot v31.4s, v10.16b, v6.4b[2]\n"
+ ".inst 0x6f86e15c // udot v28.4s, v10.16b, v6.4b[0]\n"
+ ".inst 0x6f86e95b // udot v27.4s, v10.16b, v6.4b[2]\n"
+ ".inst 0x6f85e15a // udot v26.4s, v10.16b, v5.4b[0]\n"
+ ".inst 0x6f85e959 // udot v25.4s, v10.16b, v5.4b[2]\n"
+ ".inst 0x6f84e158 // udot v24.4s, v10.16b, v4.4b[0]\n"
+ ".inst 0x6f84e957 // udot v23.4s, v10.16b, v4.4b[2]\n"
+ ".inst 0x6f82e156 // udot v22.4s, v10.16b, v2.4b[0]\n"
+ ".inst 0x6f82e955 // udot v21.4s, v10.16b, v2.4b[2]\n"
"ldr q10, [%x[params], #0xb0]\n"
- ".inst 0x6f84e118 // udot v24.4s, v8.16b, v4.4b[0]\n"
- ".inst 0x6f84e919 // udot v25.4s, v8.16b, v4.4b[2]\n"
- ".inst 0x6f85e11a // udot v26.4s, v8.16b, v5.4b[0]\n"
- ".inst 0x6f85e91b // udot v27.4s, v8.16b, v5.4b[2]\n"
- ".inst 0x6fa5e17c // udot v28.4s, v11.16b, v5.4b[1]\n"
- ".inst 0x6fa5e97d // udot v29.4s, v11.16b, v5.4b[3]\n"
- ".inst 0x6fa6e17e // udot v30.4s, v11.16b, v6.4b[1]\n"
- ".inst 0x6fa6e97f // udot v31.4s, v11.16b, v6.4b[3]\n"
+ ".inst 0x6fa6e17c // udot v28.4s, v11.16b, v6.4b[1]\n"
+ ".inst 0x6fa6e97b // udot v27.4s, v11.16b, v6.4b[3]\n"
+ ".inst 0x6fa5e17a // udot v26.4s, v11.16b, v5.4b[1]\n"
+ ".inst 0x6fa5e979 // udot v25.4s, v11.16b, v5.4b[3]\n"
+ ".inst 0x6fa4e178 // udot v24.4s, v11.16b, v4.4b[1]\n"
+ ".inst 0x6fa4e977 // udot v23.4s, v11.16b, v4.4b[3]\n"
+ ".inst 0x6fa2e176 // udot v22.4s, v11.16b, v2.4b[1]\n"
+ ".inst 0x6fa2e975 // udot v21.4s, v11.16b, v2.4b[3]\n"
"ldr q11, [%x[params], #0xc0]\n"
- ".inst 0x6fa4e138 // udot v24.4s, v9.16b, v4.4b[1]\n"
- ".inst 0x6fa4e939 // udot v25.4s, v9.16b, v4.4b[3]\n"
- "sqrdmulh v24.4s, v24.4s, v21.4s\n"
- ".inst 0x6fa5e13a // udot v26.4s, v9.16b, v5.4b[1]\n"
- ".inst 0x6fa5e93b // udot v27.4s, v9.16b, v5.4b[3]\n"
- "sqrdmulh v25.4s, v25.4s, v21.4s\n"
- ".inst 0x6f86e11c // udot v28.4s, v8.16b, v6.4b[0]\n"
- ".inst 0x6f86e91d // udot v29.4s, v8.16b, v6.4b[2]\n"
- "sqrdmulh v26.4s, v26.4s, v21.4s\n"
- ".inst 0x6f87e11e // udot v30.4s, v8.16b, v7.4b[0]\n"
- ".inst 0x6f87e91f // udot v31.4s, v8.16b, v7.4b[2]\n"
+ ".inst 0x6f85e11c // udot v28.4s, v8.16b, v5.4b[0]\n"
+ ".inst 0x6f85e91b // udot v27.4s, v8.16b, v5.4b[2]\n"
+ ".inst 0x6f84e11a // udot v26.4s, v8.16b, v4.4b[0]\n"
+ ".inst 0x6f84e919 // udot v25.4s, v8.16b, v4.4b[2]\n"
+ ".inst 0x6f82e118 // udot v24.4s, v8.16b, v2.4b[0]\n"
+ ".inst 0x6f82e917 // udot v23.4s, v8.16b, v2.4b[2]\n"
+ ".inst 0x6f81e116 // udot v22.4s, v8.16b, v1.4b[0]\n"
+ ".inst 0x6f81e915 // udot v21.4s, v8.16b, v1.4b[2]\n"
"ldr q8, [%x[params], #0x90]\n"
- "sqrdmulh v27.4s, v27.4s, v21.4s\n"
- ".inst 0x6fa6e13c // udot v28.4s, v9.16b, v6.4b[1]\n"
- ".inst 0x6fa6e93d // udot v29.4s, v9.16b, v6.4b[3]\n"
- "and v19.16b, v24.16b, v20.16b\n"
- ".inst 0x6fa7e13e // udot v30.4s, v9.16b, v7.4b[1]\n"
- ".inst 0x6fa7e93f // udot v31.4s, v9.16b, v7.4b[3]\n"
+ ".inst 0x6fa5e13c // udot v28.4s, v9.16b, v5.4b[1]\n"
+ ".inst 0x6fa5e93b // udot v27.4s, v9.16b, v5.4b[3]\n"
+ ".inst 0x6fa4e13a // udot v26.4s, v9.16b, v4.4b[1]\n"
+ ".inst 0x6fa4e939 // udot v25.4s, v9.16b, v4.4b[3]\n"
+ ".inst 0x6fa2e138 // udot v24.4s, v9.16b, v2.4b[1]\n"
+ ".inst 0x6fa2e937 // udot v23.4s, v9.16b, v2.4b[3]\n"
+ ".inst 0x6fa1e136 // udot v22.4s, v9.16b, v1.4b[1]\n"
+ ".inst 0x6fa1e935 // udot v21.4s, v9.16b, v1.4b[3]\n"
"ldr q9, [%x[params], #0xa0]\n"
- "and v18.16b, v25.16b, v20.16b\n"
- "sshr v19.4s, v19.4s, #0x1f\n"
- "sshr v18.4s, v18.4s, #0x1f\n"
"add %x[params], %x[params], #0xd0\n"
- "sqrdmulh v28.4s, v28.4s, v21.4s\n"
- "sqrdmulh v29.4s, v29.4s, v21.4s\n"
- "sqrdmulh v30.4s, v30.4s, v21.4s\n"
- "sqrdmulh v31.4s, v31.4s, v21.4s\n"
- "and v17.16b, v26.16b, v20.16b\n"
- "sshr v17.4s, v17.4s, #0x1f\n"
- "sqadd v24.4s, v24.4s, v19.4s\n"
- "and v16.16b, v27.16b, v20.16b\n"
- "sshr v16.4s, v16.4s, #0x1f\n"
- "sqadd v25.4s, v25.4s, v18.4s\n"
- "sqadd v26.4s, v26.4s, v17.4s\n"
- "sqadd v27.4s, v27.4s, v16.4s\n"
- "and v19.16b, v28.16b, v20.16b\n"
- "and v18.16b, v29.16b, v20.16b\n"
- "and v17.16b, v30.16b, v20.16b\n"
- "sshr v19.4s, v19.4s, #0x1f\n"
+ "sqrdmulh v28.4s, v28.4s, v20.4s\n"
+ "sqrdmulh v27.4s, v27.4s, v20.4s\n"
+ "sqrdmulh v26.4s, v26.4s, v20.4s\n"
+ "sqrdmulh v25.4s, v25.4s, v20.4s\n"
+ "sqrdmulh v24.4s, v24.4s, v20.4s\n"
+ "and v18.16b, v28.16b, v19.16b\n"
+ "and v17.16b, v27.16b, v19.16b\n"
+ "and v16.16b, v26.16b, v19.16b\n"
"sshr v18.4s, v18.4s, #0x1f\n"
"sshr v17.4s, v17.4s, #0x1f\n"
- "sqadd v28.4s, v28.4s, v19.4s\n"
- "and v16.16b, v31.16b, v20.16b\n"
"sshr v16.4s, v16.4s, #0x1f\n"
- "sqadd v29.4s, v29.4s, v18.4s\n"
- "sqadd v30.4s, v30.4s, v17.4s\n"
- "sqadd v31.4s, v31.4s, v16.4s\n"
- "srshl v24.4s, v24.4s, v20.4s\n"
- "srshl v25.4s, v25.4s, v20.4s\n"
- "srshl v26.4s, v26.4s, v20.4s\n"
- "srshl v27.4s, v27.4s, v20.4s\n"
- "srshl v28.4s, v28.4s, v20.4s\n"
- "srshl v29.4s, v29.4s, v20.4s\n"
- "srshl v30.4s, v30.4s, v20.4s\n"
- "srshl v31.4s, v31.4s, v20.4s\n"
- "add v24.4s, v24.4s, v14.4s\n"
- "add v25.4s, v25.4s, v14.4s\n"
- "add v26.4s, v26.4s, v14.4s\n"
- "add v27.4s, v27.4s, v14.4s\n"
- "add v28.4s, v28.4s, v14.4s\n"
- "add v29.4s, v29.4s, v14.4s\n"
- "add v30.4s, v30.4s, v14.4s\n"
- "add v31.4s, v31.4s, v14.4s\n"
- "smin v24.4s, v24.4s, v15.4s\n"
- "smin v25.4s, v25.4s, v15.4s\n"
- "smin v26.4s, v26.4s, v15.4s\n"
- "smin v27.4s, v27.4s, v15.4s\n"
- "smin v28.4s, v28.4s, v15.4s\n"
- "smin v29.4s, v29.4s, v15.4s\n"
- "smin v30.4s, v30.4s, v15.4s\n"
- "smin v31.4s, v31.4s, v15.4s\n"
- "smax v24.4s, v24.4s, v13.4s\n"
- "smax v25.4s, v25.4s, v13.4s\n"
- "smax v26.4s, v26.4s, v13.4s\n"
- "smax v27.4s, v27.4s, v13.4s\n"
- "smax v28.4s, v28.4s, v13.4s\n"
- "smax v29.4s, v29.4s, v13.4s\n"
- "smax v30.4s, v30.4s, v13.4s\n"
- "smax v31.4s, v31.4s, v13.4s\n"
- "uzp1 v24.16b, v24.16b, v24.16b\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
- "uzp1 v26.16b, v26.16b, v26.16b\n"
- "uzp1 v27.16b, v27.16b, v27.16b\n"
+ "sqadd v28.4s, v28.4s, v18.4s\n"
+ "sqadd v27.4s, v27.4s, v17.4s\n"
+ "sqadd v26.4s, v26.4s, v16.4s\n"
+ "and v16.16b, v25.16b, v19.16b\n"
+ "srshl v28.4s, v28.4s, v19.4s\n"
+ "srshl v27.4s, v27.4s, v19.4s\n"
+ "srshl v26.4s, v26.4s, v19.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "add v28.4s, v28.4s, v0.4s\n"
+ "add v27.4s, v27.4s, v0.4s\n"
+ "add v26.4s, v26.4s, v0.4s\n"
+ "smin v28.4s, v28.4s, v30.4s\n"
+ "smin v27.4s, v27.4s, v30.4s\n"
+ "smin v26.4s, v26.4s, v30.4s\n"
+ "smax v28.4s, v28.4s, v31.4s\n"
+ "smax v27.4s, v27.4s, v31.4s\n"
+ "smax v26.4s, v26.4s, v31.4s\n"
"uzp1 v28.16b, v28.16b, v28.16b\n"
- "uzp1 v29.16b, v29.16b, v29.16b\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
- "uzp1 v31.16b, v31.16b, v31.16b\n"
- "uzp1 v24.16b, v24.16b, v24.16b\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
- "str s24, [x27, x28]\n"
- "uzp1 v26.16b, v26.16b, v26.16b\n"
"uzp1 v27.16b, v27.16b, v27.16b\n"
- "str s25, [x26, x28]\n"
"uzp1 v28.16b, v28.16b, v28.16b\n"
- "uzp1 v29.16b, v29.16b, v29.16b\n"
- "str s26, [x25, x28]\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
- "uzp1 v31.16b, v31.16b, v31.16b\n"
- "str s27, [x24, x28]\n"
- "str s28, [x23, x28]\n"
- "dup v24.4s, v22.s[0]\n"
- "dup v25.4s, v22.s[1]\n"
- "str s29, [x22, x28]\n"
- "dup v26.4s, v22.s[2]\n"
- "dup v27.4s, v22.s[3]\n"
- "str s30, [x21, x28]\n"
- "dup v28.4s, v23.s[0]\n"
- "dup v29.4s, v23.s[1]\n"
- "str s31, [x20, x28]\n"
- "dup v30.4s, v23.s[2]\n"
- "dup v31.4s, v23.s[3]\n"
- "add x28, x28, #0x4\n"
- "add v24.4s, v24.4s, v12.4s\n"
- "add v25.4s, v25.4s, v12.4s\n"
- "add v26.4s, v26.4s, v12.4s\n"
- "add v27.4s, v27.4s, v12.4s\n"
- "add v28.4s, v28.4s, v12.4s\n"
- "add v29.4s, v29.4s, v12.4s\n"
- "add v30.4s, v30.4s, v12.4s\n"
- "add v31.4s, v31.4s, v12.4s\n"
+ "str s28, [x26, x10]\n"
+ "uzp1 v27.16b, v27.16b, v27.16b\n"
+ "uzp1 v26.16b, v26.16b, v26.16b\n"
+ "ldr q28, [SP, #0x0]\n"
+ "sqadd v25.4s, v25.4s, v16.4s\n"
+ "str s27, [x25, x10]\n"
+ "uzp1 v26.16b, v26.16b, v26.16b\n"
+ "ldr q27, [SP, #0x10]\n"
+ "and v16.16b, v24.16b, v19.16b\n"
+ "str s26, [x24, x10]\n"
+ "sqrdmulh v23.4s, v23.4s, v20.4s\n"
+ "ldr q26, [SP, #0x20]\n"
+ "srshl v25.4s, v25.4s, v19.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sqrdmulh v22.4s, v22.4s, v20.4s\n"
+ "and v17.16b, v23.16b, v19.16b\n"
+ "add v25.4s, v25.4s, v0.4s\n"
+ "sqadd v24.4s, v24.4s, v16.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "smin v25.4s, v25.4s, v30.4s\n"
+ "and v16.16b, v22.16b, v19.16b\n"
+ "srshl v24.4s, v24.4s, v19.4s\n"
+ "smax v25.4s, v25.4s, v31.4s\n"
+ "sqadd v23.4s, v23.4s, v17.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "uzp1 v25.16b, v25.16b, v25.16b\n"
+ "add v24.4s, v24.4s, v0.4s\n"
+ "uzp1 v25.16b, v25.16b, v25.16b\n"
+ "str s25, [x23, x10]\n"
+ "smin v24.4s, v24.4s, v30.4s\n"
+ "srshl v23.4s, v23.4s, v19.4s\n"
+ "ldr q25, [SP, #0x30]\n"
+ "sqadd v22.4s, v22.4s, v16.4s\n"
+ "sqrdmulh v21.4s, v21.4s, v20.4s\n"
+ "smax v24.4s, v24.4s, v31.4s\n"
+ "add v23.4s, v23.4s, v0.4s\n"
+ "srshl v22.4s, v22.4s, v19.4s\n"
+ "uzp1 v24.16b, v24.16b, v24.16b\n"
+ "smin v23.4s, v23.4s, v30.4s\n"
+ "uzp1 v24.16b, v24.16b, v24.16b\n"
+ "str s24, [x22, x10]\n"
+ "smax v23.4s, v23.4s, v31.4s\n"
+ "add v22.4s, v22.4s, v0.4s\n"
+ "ldr q24, [SP, #0x40]\n"
+ "and v16.16b, v21.16b, v19.16b\n"
+ "add v28.4s, v28.4s, v29.4s\n"
+ "uzp1 v23.16b, v23.16b, v23.16b\n"
+ "smin v22.4s, v22.4s, v30.4s\n"
+ "uzp1 v23.16b, v23.16b, v23.16b\n"
+ "str s23, [x21, x10]\n"
+ "smax v22.4s, v22.4s, v31.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "ldr q23, [SP, #0x50]\n"
+ "add v27.4s, v27.4s, v29.4s\n"
+ "add v26.4s, v26.4s, v29.4s\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "sqadd v21.4s, v21.4s, v16.4s\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "str s22, [x20, x10]\n"
+ "add v25.4s, v25.4s, v29.4s\n"
+ "add v24.4s, v24.4s, v29.4s\n"
+ "ldr q22, [SP, #0x60]\n"
+ "srshl v21.4s, v21.4s, v19.4s\n"
+ "add v23.4s, v23.4s, v29.4s\n"
+ "add v21.4s, v21.4s, v0.4s\n"
+ "add v22.4s, v22.4s, v29.4s\n"
+ "smin v21.4s, v21.4s, v30.4s\n"
+ "smax v21.4s, v21.4s, v31.4s\n"
+ "uzp1 v21.16b, v21.16b, v21.16b\n"
+ "uzp1 v21.16b, v21.16b, v21.16b\n"
+ "str s21, [x19, x10]\n"
+ "add x10, x10, #0x4\n"
+ "ldr q21, [SP, #0x70]\n"
+ "add v21.4s, v21.4s, v29.4s\n"
"bgt 1b\n"
"2:" // Tail
- "ldr q21, [%x[params], #0x60]\n"
- "ldr q20, [%x[params], #0x70]\n"
- ".inst 0x6f80e118 // udot v24.4s, v8.16b, v0.4b[0]\n"
- ".inst 0x6f80e919 // udot v25.4s, v8.16b, v0.4b[2]\n"
- ".inst 0x6f81e11a // udot v26.4s, v8.16b, v1.4b[0]\n"
- ".inst 0x6f81e91b // udot v27.4s, v8.16b, v1.4b[2]\n"
- "cmp %x[n_channels], #0x4\n"
- "add x27, x27, x28\n"
- ".inst 0x6fa0e138 // udot v24.4s, v9.16b, v0.4b[1]\n"
- ".inst 0x6fa0e939 // udot v25.4s, v9.16b, v0.4b[3]\n"
- "add x26, x26, x28\n"
- "add x25, x25, x28\n"
- ".inst 0x6fa1e13a // udot v26.4s, v9.16b, v1.4b[1]\n"
- ".inst 0x6fa1e93b // udot v27.4s, v9.16b, v1.4b[3]\n"
- "add x24, x24, x28\n"
- "add x23, x23, x28\n"
- ".inst 0x6f82e11c // udot v28.4s, v8.16b, v2.4b[0]\n"
- ".inst 0x6f82e91d // udot v29.4s, v8.16b, v2.4b[2]\n"
- "add x22, x22, x28\n"
- "add x21, x21, x28\n"
- ".inst 0x6f83e11e // udot v30.4s, v8.16b, v3.4b[0]\n"
- ".inst 0x6f83e91f // udot v31.4s, v8.16b, v3.4b[2]\n"
+ ".inst 0x6f8de11c // udot v28.4s, v8.16b, v13.4b[0]\n"
+ "ldr q20, [%x[params], #0x60]\n"
+ "add x26, x26, x10\n"
+ ".inst 0x6f8de91b // udot v27.4s, v8.16b, v13.4b[2]\n"
+ "ldr q19, [%x[params], #0x70]\n"
+ "add x25, x25, x10\n"
+ ".inst 0x6f8ce11a // udot v26.4s, v8.16b, v12.4b[0]\n"
+ "add x24, x24, x10\n"
+ ".inst 0x6f8ce919 // udot v25.4s, v8.16b, v12.4b[2]\n"
+ "add x23, x23, x10\n"
+ ".inst 0x6f87e118 // udot v24.4s, v8.16b, v7.4b[0]\n"
+ "add x22, x22, x10\n"
+ ".inst 0x6f87e917 // udot v23.4s, v8.16b, v7.4b[2]\n"
+ "add x21, x21, x10\n"
+ ".inst 0x6f86e116 // udot v22.4s, v8.16b, v6.4b[0]\n"
+ "add x20, x20, x10\n"
+ ".inst 0x6f86e915 // udot v21.4s, v8.16b, v6.4b[2]\n"
"ldr q8, [%x[params], #0x0]\n"
- "add x20, x20, x28\n"
- ".inst 0x6f81e158 // udot v24.4s, v10.16b, v1.4b[0]\n"
- ".inst 0x6f81e959 // udot v25.4s, v10.16b, v1.4b[2]\n"
- ".inst 0x6f82e15a // udot v26.4s, v10.16b, v2.4b[0]\n"
- ".inst 0x6f82e95b // udot v27.4s, v10.16b, v2.4b[2]\n"
- ".inst 0x6fa2e13c // udot v28.4s, v9.16b, v2.4b[1]\n"
- ".inst 0x6fa2e93d // udot v29.4s, v9.16b, v2.4b[3]\n"
- ".inst 0x6fa3e13e // udot v30.4s, v9.16b, v3.4b[1]\n"
- ".inst 0x6fa3e93f // udot v31.4s, v9.16b, v3.4b[3]\n"
+ "add x19, x19, x10\n"
+ ".inst 0x6fade13c // udot v28.4s, v9.16b, v13.4b[1]\n"
+ "cmp %x[n_channels], #0x4\n"
+ ".inst 0x6fade93b // udot v27.4s, v9.16b, v13.4b[3]\n"
+ ".inst 0x6face13a // udot v26.4s, v9.16b, v12.4b[1]\n"
+ ".inst 0x6face939 // udot v25.4s, v9.16b, v12.4b[3]\n"
+ ".inst 0x6fa7e138 // udot v24.4s, v9.16b, v7.4b[1]\n"
+ ".inst 0x6fa7e937 // udot v23.4s, v9.16b, v7.4b[3]\n"
+ ".inst 0x6fa6e136 // udot v22.4s, v9.16b, v6.4b[1]\n"
+ ".inst 0x6fa6e935 // udot v21.4s, v9.16b, v6.4b[3]\n"
"ldr q9, [%x[params], #0x10]\n"
- ".inst 0x6fa1e178 // udot v24.4s, v11.16b, v1.4b[1]\n"
- ".inst 0x6fa1e979 // udot v25.4s, v11.16b, v1.4b[3]\n"
- ".inst 0x6fa2e17a // udot v26.4s, v11.16b, v2.4b[1]\n"
- ".inst 0x6fa2e97b // udot v27.4s, v11.16b, v2.4b[3]\n"
- ".inst 0x6f83e15c // udot v28.4s, v10.16b, v3.4b[0]\n"
- ".inst 0x6f83e95d // udot v29.4s, v10.16b, v3.4b[2]\n"
- ".inst 0x6f84e15e // udot v30.4s, v10.16b, v4.4b[0]\n"
- ".inst 0x6f84e95f // udot v31.4s, v10.16b, v4.4b[2]\n"
+ ".inst 0x6f8ce15c // udot v28.4s, v10.16b, v12.4b[0]\n"
+ ".inst 0x6f8ce95b // udot v27.4s, v10.16b, v12.4b[2]\n"
+ ".inst 0x6f87e15a // udot v26.4s, v10.16b, v7.4b[0]\n"
+ ".inst 0x6f87e959 // udot v25.4s, v10.16b, v7.4b[2]\n"
+ ".inst 0x6f86e158 // udot v24.4s, v10.16b, v6.4b[0]\n"
+ ".inst 0x6f86e957 // udot v23.4s, v10.16b, v6.4b[2]\n"
+ ".inst 0x6f85e156 // udot v22.4s, v10.16b, v5.4b[0]\n"
+ ".inst 0x6f85e955 // udot v21.4s, v10.16b, v5.4b[2]\n"
"ldr q10, [%x[params], #0x20]\n"
- ".inst 0x6f82e118 // udot v24.4s, v8.16b, v2.4b[0]\n"
- ".inst 0x6f82e919 // udot v25.4s, v8.16b, v2.4b[2]\n"
- ".inst 0x6f83e11a // udot v26.4s, v8.16b, v3.4b[0]\n"
- ".inst 0x6f83e91b // udot v27.4s, v8.16b, v3.4b[2]\n"
- ".inst 0x6fa3e17c // udot v28.4s, v11.16b, v3.4b[1]\n"
- ".inst 0x6fa3e97d // udot v29.4s, v11.16b, v3.4b[3]\n"
- ".inst 0x6fa4e17e // udot v30.4s, v11.16b, v4.4b[1]\n"
- ".inst 0x6fa4e97f // udot v31.4s, v11.16b, v4.4b[3]\n"
+ ".inst 0x6face17c // udot v28.4s, v11.16b, v12.4b[1]\n"
+ ".inst 0x6face97b // udot v27.4s, v11.16b, v12.4b[3]\n"
+ ".inst 0x6fa7e17a // udot v26.4s, v11.16b, v7.4b[1]\n"
+ ".inst 0x6fa7e979 // udot v25.4s, v11.16b, v7.4b[3]\n"
+ ".inst 0x6fa6e178 // udot v24.4s, v11.16b, v6.4b[1]\n"
+ ".inst 0x6fa6e977 // udot v23.4s, v11.16b, v6.4b[3]\n"
+ ".inst 0x6fa5e176 // udot v22.4s, v11.16b, v5.4b[1]\n"
+ ".inst 0x6fa5e975 // udot v21.4s, v11.16b, v5.4b[3]\n"
"ldr q11, [%x[params], #0x30]\n"
- ".inst 0x6fa2e138 // udot v24.4s, v9.16b, v2.4b[1]\n"
- ".inst 0x6fa2e939 // udot v25.4s, v9.16b, v2.4b[3]\n"
- ".inst 0x6fa3e13a // udot v26.4s, v9.16b, v3.4b[1]\n"
- ".inst 0x6fa3e93b // udot v27.4s, v9.16b, v3.4b[3]\n"
- ".inst 0x6f84e11c // udot v28.4s, v8.16b, v4.4b[0]\n"
- ".inst 0x6f84e91d // udot v29.4s, v8.16b, v4.4b[2]\n"
- ".inst 0x6f85e11e // udot v30.4s, v8.16b, v5.4b[0]\n"
- ".inst 0x6f85e91f // udot v31.4s, v8.16b, v5.4b[2]\n"
+ ".inst 0x6f87e11c // udot v28.4s, v8.16b, v7.4b[0]\n"
+ ".inst 0x6f87e91b // udot v27.4s, v8.16b, v7.4b[2]\n"
+ ".inst 0x6f86e11a // udot v26.4s, v8.16b, v6.4b[0]\n"
+ ".inst 0x6f86e919 // udot v25.4s, v8.16b, v6.4b[2]\n"
+ ".inst 0x6f85e118 // udot v24.4s, v8.16b, v5.4b[0]\n"
+ ".inst 0x6f85e917 // udot v23.4s, v8.16b, v5.4b[2]\n"
+ ".inst 0x6f84e116 // udot v22.4s, v8.16b, v4.4b[0]\n"
+ ".inst 0x6f84e915 // udot v21.4s, v8.16b, v4.4b[2]\n"
"ldr q8, [%x[params], #0x40]\n"
- ".inst 0x6f83e158 // udot v24.4s, v10.16b, v3.4b[0]\n"
- ".inst 0x6f83e959 // udot v25.4s, v10.16b, v3.4b[2]\n"
- ".inst 0x6f84e15a // udot v26.4s, v10.16b, v4.4b[0]\n"
- ".inst 0x6f84e95b // udot v27.4s, v10.16b, v4.4b[2]\n"
- ".inst 0x6fa4e13c // udot v28.4s, v9.16b, v4.4b[1]\n"
- ".inst 0x6fa4e93d // udot v29.4s, v9.16b, v4.4b[3]\n"
- ".inst 0x6fa5e13e // udot v30.4s, v9.16b, v5.4b[1]\n"
- ".inst 0x6fa5e93f // udot v31.4s, v9.16b, v5.4b[3]\n"
+ ".inst 0x6fa7e13c // udot v28.4s, v9.16b, v7.4b[1]\n"
+ ".inst 0x6fa7e93b // udot v27.4s, v9.16b, v7.4b[3]\n"
+ ".inst 0x6fa6e13a // udot v26.4s, v9.16b, v6.4b[1]\n"
+ ".inst 0x6fa6e939 // udot v25.4s, v9.16b, v6.4b[3]\n"
+ ".inst 0x6fa5e138 // udot v24.4s, v9.16b, v5.4b[1]\n"
+ ".inst 0x6fa5e937 // udot v23.4s, v9.16b, v5.4b[3]\n"
+ ".inst 0x6fa4e136 // udot v22.4s, v9.16b, v4.4b[1]\n"
+ ".inst 0x6fa4e935 // udot v21.4s, v9.16b, v4.4b[3]\n"
"ldr q9, [%x[params], #0x50]\n"
"add %x[params], %x[params], #0x80\n"
- ".inst 0x6fa3e178 // udot v24.4s, v11.16b, v3.4b[1]\n"
- ".inst 0x6fa3e979 // udot v25.4s, v11.16b, v3.4b[3]\n"
- ".inst 0x6fa4e17a // udot v26.4s, v11.16b, v4.4b[1]\n"
- ".inst 0x6fa4e97b // udot v27.4s, v11.16b, v4.4b[3]\n"
- ".inst 0x6f85e15c // udot v28.4s, v10.16b, v5.4b[0]\n"
- ".inst 0x6f85e95d // udot v29.4s, v10.16b, v5.4b[2]\n"
- ".inst 0x6f86e15e // udot v30.4s, v10.16b, v6.4b[0]\n"
- ".inst 0x6f86e95f // udot v31.4s, v10.16b, v6.4b[2]\n"
- ".inst 0x6f84e118 // udot v24.4s, v8.16b, v4.4b[0]\n"
- ".inst 0x6f84e919 // udot v25.4s, v8.16b, v4.4b[2]\n"
- ".inst 0x6f85e11a // udot v26.4s, v8.16b, v5.4b[0]\n"
+ ".inst 0x6f86e15c // udot v28.4s, v10.16b, v6.4b[0]\n"
+ ".inst 0x6f86e95b // udot v27.4s, v10.16b, v6.4b[2]\n"
+ ".inst 0x6f85e15a // udot v26.4s, v10.16b, v5.4b[0]\n"
+ ".inst 0x6f85e959 // udot v25.4s, v10.16b, v5.4b[2]\n"
+ ".inst 0x6f84e158 // udot v24.4s, v10.16b, v4.4b[0]\n"
+ ".inst 0x6f84e957 // udot v23.4s, v10.16b, v4.4b[2]\n"
+ ".inst 0x6f82e156 // udot v22.4s, v10.16b, v2.4b[0]\n"
+ ".inst 0x6f82e955 // udot v21.4s, v10.16b, v2.4b[2]\n"
+ ".inst 0x6fa6e17c // udot v28.4s, v11.16b, v6.4b[1]\n"
+ ".inst 0x6fa6e97b // udot v27.4s, v11.16b, v6.4b[3]\n"
+ ".inst 0x6fa5e17a // udot v26.4s, v11.16b, v5.4b[1]\n"
+ ".inst 0x6fa5e979 // udot v25.4s, v11.16b, v5.4b[3]\n"
+ ".inst 0x6fa4e178 // udot v24.4s, v11.16b, v4.4b[1]\n"
+ ".inst 0x6fa4e977 // udot v23.4s, v11.16b, v4.4b[3]\n"
+ ".inst 0x6fa2e176 // udot v22.4s, v11.16b, v2.4b[1]\n"
+ ".inst 0x6fa2e975 // udot v21.4s, v11.16b, v2.4b[3]\n"
+ ".inst 0x6f85e11c // udot v28.4s, v8.16b, v5.4b[0]\n"
".inst 0x6f85e91b // udot v27.4s, v8.16b, v5.4b[2]\n"
- ".inst 0x6fa5e17c // udot v28.4s, v11.16b, v5.4b[1]\n"
- ".inst 0x6fa5e97d // udot v29.4s, v11.16b, v5.4b[3]\n"
- ".inst 0x6fa6e17e // udot v30.4s, v11.16b, v6.4b[1]\n"
- ".inst 0x6fa6e97f // udot v31.4s, v11.16b, v6.4b[3]\n"
- ".inst 0x6fa4e138 // udot v24.4s, v9.16b, v4.4b[1]\n"
- ".inst 0x6fa4e939 // udot v25.4s, v9.16b, v4.4b[3]\n"
- "sqrdmulh v24.4s, v24.4s, v21.4s\n"
- ".inst 0x6fa5e13a // udot v26.4s, v9.16b, v5.4b[1]\n"
+ ".inst 0x6f84e11a // udot v26.4s, v8.16b, v4.4b[0]\n"
+ ".inst 0x6f84e919 // udot v25.4s, v8.16b, v4.4b[2]\n"
+ ".inst 0x6f82e118 // udot v24.4s, v8.16b, v2.4b[0]\n"
+ ".inst 0x6f82e917 // udot v23.4s, v8.16b, v2.4b[2]\n"
+ ".inst 0x6f81e116 // udot v22.4s, v8.16b, v1.4b[0]\n"
+ ".inst 0x6f81e915 // udot v21.4s, v8.16b, v1.4b[2]\n"
+ ".inst 0x6fa5e13c // udot v28.4s, v9.16b, v5.4b[1]\n"
".inst 0x6fa5e93b // udot v27.4s, v9.16b, v5.4b[3]\n"
- "sqrdmulh v25.4s, v25.4s, v21.4s\n"
- ".inst 0x6f86e11c // udot v28.4s, v8.16b, v6.4b[0]\n"
- ".inst 0x6f86e91d // udot v29.4s, v8.16b, v6.4b[2]\n"
- "sqrdmulh v26.4s, v26.4s, v21.4s\n"
- ".inst 0x6f87e11e // udot v30.4s, v8.16b, v7.4b[0]\n"
- ".inst 0x6f87e91f // udot v31.4s, v8.16b, v7.4b[2]\n"
- "sqrdmulh v27.4s, v27.4s, v21.4s\n"
- ".inst 0x6fa6e13c // udot v28.4s, v9.16b, v6.4b[1]\n"
- ".inst 0x6fa6e93d // udot v29.4s, v9.16b, v6.4b[3]\n"
- "and v19.16b, v24.16b, v20.16b\n"
- ".inst 0x6fa7e13e // udot v30.4s, v9.16b, v7.4b[1]\n"
- ".inst 0x6fa7e93f // udot v31.4s, v9.16b, v7.4b[3]\n"
- "and v18.16b, v25.16b, v20.16b\n"
- "and v17.16b, v26.16b, v20.16b\n"
- "and v16.16b, v27.16b, v20.16b\n"
- "sshr v19.4s, v19.4s, #0x1f\n"
+ ".inst 0x6fa4e13a // udot v26.4s, v9.16b, v4.4b[1]\n"
+ ".inst 0x6fa4e939 // udot v25.4s, v9.16b, v4.4b[3]\n"
+ ".inst 0x6fa2e138 // udot v24.4s, v9.16b, v2.4b[1]\n"
+ ".inst 0x6fa2e937 // udot v23.4s, v9.16b, v2.4b[3]\n"
+ ".inst 0x6fa1e136 // udot v22.4s, v9.16b, v1.4b[1]\n"
+ ".inst 0x6fa1e935 // udot v21.4s, v9.16b, v1.4b[3]\n"
+ "sqrdmulh v28.4s, v28.4s, v20.4s\n"
+ "sqrdmulh v27.4s, v27.4s, v20.4s\n"
+ "sqrdmulh v26.4s, v26.4s, v20.4s\n"
+ "sqrdmulh v25.4s, v25.4s, v20.4s\n"
+ "and v18.16b, v28.16b, v19.16b\n"
+ "and v17.16b, v27.16b, v19.16b\n"
+ "and v16.16b, v26.16b, v19.16b\n"
"sshr v18.4s, v18.4s, #0x1f\n"
"sshr v17.4s, v17.4s, #0x1f\n"
"sshr v16.4s, v16.4s, #0x1f\n"
- "sqrdmulh v28.4s, v28.4s, v21.4s\n"
- "sqrdmulh v29.4s, v29.4s, v21.4s\n"
- "sqrdmulh v30.4s, v30.4s, v21.4s\n"
- "sqrdmulh v31.4s, v31.4s, v21.4s\n"
- "sqadd v24.4s, v24.4s, v19.4s\n"
- "sqadd v25.4s, v25.4s, v18.4s\n"
- "sqadd v26.4s, v26.4s, v17.4s\n"
- "sqadd v27.4s, v27.4s, v16.4s\n"
- "and v19.16b, v28.16b, v20.16b\n"
- "and v18.16b, v29.16b, v20.16b\n"
- "and v17.16b, v30.16b, v20.16b\n"
- "and v16.16b, v31.16b, v20.16b\n"
- "sshr v19.4s, v19.4s, #0x1f\n"
- "sshr v18.4s, v18.4s, #0x1f\n"
- "sshr v17.4s, v17.4s, #0x1f\n"
+ "sqadd v28.4s, v28.4s, v18.4s\n"
+ "sqadd v27.4s, v27.4s, v17.4s\n"
+ "sqadd v26.4s, v26.4s, v16.4s\n"
+ "and v16.16b, v25.16b, v19.16b\n"
+ "srshl v28.4s, v28.4s, v19.4s\n"
+ "srshl v27.4s, v27.4s, v19.4s\n"
+ "srshl v26.4s, v26.4s, v19.4s\n"
"sshr v16.4s, v16.4s, #0x1f\n"
- "sqadd v28.4s, v28.4s, v19.4s\n"
- "sqadd v29.4s, v29.4s, v18.4s\n"
- "sqadd v30.4s, v30.4s, v17.4s\n"
- "sqadd v31.4s, v31.4s, v16.4s\n"
- "srshl v24.4s, v24.4s, v20.4s\n"
- "srshl v25.4s, v25.4s, v20.4s\n"
- "srshl v26.4s, v26.4s, v20.4s\n"
- "srshl v27.4s, v27.4s, v20.4s\n"
- "srshl v28.4s, v28.4s, v20.4s\n"
- "srshl v29.4s, v29.4s, v20.4s\n"
- "srshl v30.4s, v30.4s, v20.4s\n"
- "srshl v31.4s, v31.4s, v20.4s\n"
- "add v24.4s, v24.4s, v14.4s\n"
- "add v25.4s, v25.4s, v14.4s\n"
- "add v26.4s, v26.4s, v14.4s\n"
- "add v27.4s, v27.4s, v14.4s\n"
- "add v28.4s, v28.4s, v14.4s\n"
- "add v29.4s, v29.4s, v14.4s\n"
- "add v30.4s, v30.4s, v14.4s\n"
- "add v31.4s, v31.4s, v14.4s\n"
- "smin v24.4s, v24.4s, v15.4s\n"
- "smin v25.4s, v25.4s, v15.4s\n"
- "smin v26.4s, v26.4s, v15.4s\n"
- "smin v27.4s, v27.4s, v15.4s\n"
- "smin v28.4s, v28.4s, v15.4s\n"
- "smin v29.4s, v29.4s, v15.4s\n"
- "smin v30.4s, v30.4s, v15.4s\n"
- "smin v31.4s, v31.4s, v15.4s\n"
- "smax v24.4s, v24.4s, v13.4s\n"
- "smax v25.4s, v25.4s, v13.4s\n"
- "smax v26.4s, v26.4s, v13.4s\n"
- "smax v27.4s, v27.4s, v13.4s\n"
- "smax v28.4s, v28.4s, v13.4s\n"
- "smax v29.4s, v29.4s, v13.4s\n"
- "smax v30.4s, v30.4s, v13.4s\n"
- "smax v31.4s, v31.4s, v13.4s\n"
- "uzp1 v24.16b, v24.16b, v24.16b\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
- "uzp1 v26.16b, v26.16b, v26.16b\n"
- "uzp1 v27.16b, v27.16b, v27.16b\n"
+ "add v28.4s, v28.4s, v0.4s\n"
+ "add v27.4s, v27.4s, v0.4s\n"
+ "add v26.4s, v26.4s, v0.4s\n"
+ "smin v28.4s, v28.4s, v30.4s\n"
+ "smin v27.4s, v27.4s, v30.4s\n"
+ "smin v26.4s, v26.4s, v30.4s\n"
+ "smax v28.4s, v28.4s, v31.4s\n"
+ "smax v27.4s, v27.4s, v31.4s\n"
+ "smax v26.4s, v26.4s, v31.4s\n"
"uzp1 v28.16b, v28.16b, v28.16b\n"
- "uzp1 v29.16b, v29.16b, v29.16b\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
- "uzp1 v31.16b, v31.16b, v31.16b\n"
- "uzp1 v24.16b, v24.16b, v24.16b\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
- "uzp1 v26.16b, v26.16b, v26.16b\n"
"uzp1 v27.16b, v27.16b, v27.16b\n"
"uzp1 v28.16b, v28.16b, v28.16b\n"
- "uzp1 v29.16b, v29.16b, v29.16b\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
- "uzp1 v31.16b, v31.16b, v31.16b\n"
+ "uzp1 v27.16b, v27.16b, v27.16b\n"
+ "uzp1 v26.16b, v26.16b, v26.16b\n"
+ "sqadd v25.4s, v25.4s, v16.4s\n"
+ "uzp1 v26.16b, v26.16b, v26.16b\n"
+ "sqrdmulh v24.4s, v24.4s, v20.4s\n"
+ "sqrdmulh v23.4s, v23.4s, v20.4s\n"
+ "srshl v25.4s, v25.4s, v19.4s\n"
+ "sqrdmulh v22.4s, v22.4s, v20.4s\n"
+ "and v16.16b, v24.16b, v19.16b\n"
+ "and v17.16b, v23.16b, v19.16b\n"
+ "add v25.4s, v25.4s, v0.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "smin v25.4s, v25.4s, v30.4s\n"
+ "sqadd v24.4s, v24.4s, v16.4s\n"
+ "sqadd v23.4s, v23.4s, v17.4s\n"
+ "smax v25.4s, v25.4s, v31.4s\n"
+ "and v16.16b, v22.16b, v19.16b\n"
+ "srshl v24.4s, v24.4s, v19.4s\n"
+ "uzp1 v25.16b, v25.16b, v25.16b\n"
+ "srshl v23.4s, v23.4s, v19.4s\n"
+ "uzp1 v25.16b, v25.16b, v25.16b\n"
+ "add v24.4s, v24.4s, v0.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "add v23.4s, v23.4s, v0.4s\n"
+ "smin v24.4s, v24.4s, v30.4s\n"
+ "sqadd v22.4s, v22.4s, v16.4s\n"
+ "smin v23.4s, v23.4s, v30.4s\n"
+ "smax v24.4s, v24.4s, v31.4s\n"
+ "sqrdmulh v21.4s, v21.4s, v20.4s\n"
+ "smax v23.4s, v23.4s, v31.4s\n"
+ "uzp1 v24.16b, v24.16b, v24.16b\n"
+ "srshl v22.4s, v22.4s, v19.4s\n"
+ "uzp1 v24.16b, v24.16b, v24.16b\n"
+ "uzp1 v23.16b, v23.16b, v23.16b\n"
+ "and v16.16b, v21.16b, v19.16b\n"
+ "uzp1 v23.16b, v23.16b, v23.16b\n"
+ "add v22.4s, v22.4s, v0.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "smin v22.4s, v22.4s, v30.4s\n"
+ "sqadd v21.4s, v21.4s, v16.4s\n"
+ "smax v22.4s, v22.4s, v31.4s\n"
+ "srshl v21.4s, v21.4s, v19.4s\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "add v21.4s, v21.4s, v0.4s\n"
+ "smin v21.4s, v21.4s, v30.4s\n"
+ "smax v21.4s, v21.4s, v31.4s\n"
+ "uzp1 v21.16b, v21.16b, v21.16b\n"
+ "uzp1 v21.16b, v21.16b, v21.16b\n"
"blt 3f\n"
- "str s24, [x27, #0x0]\n"
- "str s25, [x26, #0x0]\n"
- "str s26, [x25, #0x0]\n"
- "str s27, [x24, #0x0]\n"
- "str s28, [x23, #0x0]\n"
- "str s29, [x22, #0x0]\n"
- "str s30, [x21, #0x0]\n"
- "str s31, [x20, #0x0]\n"
+ "str s28, [x26, #0x0]\n"
+ "str s27, [x25, #0x0]\n"
+ "str s26, [x24, #0x0]\n"
+ "str s25, [x23, #0x0]\n"
+ "str s24, [x22, #0x0]\n"
+ "str s23, [x21, #0x0]\n"
+ "str s22, [x20, #0x0]\n"
+ "str s21, [x19, #0x0]\n"
"b 4f\n"
"3:" // Tail: Oddments
+ "st1 { v28.b }[0], [x26], #0x1\n"
"subs %x[n_channels], %x[n_channels], #0x1\n"
- "st1 { v24.b }[0], [x27], #0x1\n"
- "st1 { v25.b }[0], [x26], #0x1\n"
- "st1 { v26.b }[0], [x25], #0x1\n"
- "st1 { v27.b }[0], [x24], #0x1\n"
- "st1 { v28.b }[0], [x23], #0x1\n"
- "st1 { v29.b }[0], [x22], #0x1\n"
- "st1 { v30.b }[0], [x21], #0x1\n"
- "st1 { v31.b }[0], [x20], #0x1\n"
+ "st1 { v27.b }[0], [x25], #0x1\n"
+ "st1 { v26.b }[0], [x24], #0x1\n"
+ "st1 { v25.b }[0], [x23], #0x1\n"
+ "st1 { v24.b }[0], [x22], #0x1\n"
+ "st1 { v23.b }[0], [x21], #0x1\n"
+ "st1 { v22.b }[0], [x20], #0x1\n"
+ "st1 { v21.b }[0], [x19], #0x1\n"
"beq 4f\n"
+ "st1 { v28.b }[1], [x26], #0x1\n"
"subs %x[n_channels], %x[n_channels], #0x1\n"
- "st1 { v24.b }[1], [x27], #0x1\n"
- "st1 { v25.b }[1], [x26], #0x1\n"
- "st1 { v26.b }[1], [x25], #0x1\n"
- "st1 { v27.b }[1], [x24], #0x1\n"
- "st1 { v28.b }[1], [x23], #0x1\n"
- "st1 { v29.b }[1], [x22], #0x1\n"
- "st1 { v30.b }[1], [x21], #0x1\n"
- "st1 { v31.b }[1], [x20], #0x1\n"
+ "st1 { v27.b }[1], [x25], #0x1\n"
+ "st1 { v26.b }[1], [x24], #0x1\n"
+ "st1 { v25.b }[1], [x23], #0x1\n"
+ "st1 { v24.b }[1], [x22], #0x1\n"
+ "st1 { v23.b }[1], [x21], #0x1\n"
+ "st1 { v22.b }[1], [x20], #0x1\n"
+ "st1 { v21.b }[1], [x19], #0x1\n"
"beq 4f\n"
+ "st1 { v28.b }[2], [x26], #0x1\n"
"subs %x[n_channels], %x[n_channels], #0x1\n"
- "st1 { v24.b }[2], [x27], #0x1\n"
- "st1 { v25.b }[2], [x26], #0x1\n"
- "st1 { v26.b }[2], [x25], #0x1\n"
- "st1 { v27.b }[2], [x24], #0x1\n"
- "st1 { v28.b }[2], [x23], #0x1\n"
- "st1 { v29.b }[2], [x22], #0x1\n"
- "st1 { v30.b }[2], [x21], #0x1\n"
- "st1 { v31.b }[2], [x20], #0x1\n"
+ "st1 { v27.b }[2], [x25], #0x1\n"
+ "st1 { v26.b }[2], [x24], #0x1\n"
+ "st1 { v25.b }[2], [x23], #0x1\n"
+ "st1 { v24.b }[2], [x22], #0x1\n"
+ "st1 { v23.b }[2], [x21], #0x1\n"
+ "st1 { v22.b }[2], [x20], #0x1\n"
+ "st1 { v21.b }[2], [x19], #0x1\n"
"beq 4f\n"
- "st1 { v24.b }[3], [x27], #0x1\n"
+ "st1 { v28.b }[3], [x26], #0x1\n"
"subs %x[n_channels], %x[n_channels], #0x1\n"
- "st1 { v25.b }[3], [x26], #0x1\n"
- "st1 { v26.b }[3], [x25], #0x1\n"
- "st1 { v27.b }[3], [x24], #0x1\n"
- "st1 { v28.b }[3], [x23], #0x1\n"
- "st1 { v29.b }[3], [x22], #0x1\n"
- "st1 { v30.b }[3], [x21], #0x1\n"
- "st1 { v31.b }[3], [x20], #0x1\n"
+ "st1 { v27.b }[3], [x25], #0x1\n"
+ "st1 { v26.b }[3], [x24], #0x1\n"
+ "st1 { v25.b }[3], [x23], #0x1\n"
+ "st1 { v24.b }[3], [x22], #0x1\n"
+ "st1 { v23.b }[3], [x21], #0x1\n"
+ "st1 { v22.b }[3], [x20], #0x1\n"
+ "st1 { v21.b }[3], [x19], #0x1\n"
"4:" // Tail: End
+ "add SP, SP, #0x80\n"
: [n_channels] "+&r" (n_output_channels), [params] "+&r" (params)
: [inptrs] "r" (inptrs), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [outptrs] "r" (outptrs), [qp] "r" (&qp)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst/generic.cpp
index bbb817a883..e896304c59 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -45,1433 +45,1439 @@ void a64_u8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst_imp
)
{
__asm__ __volatile__(
- "lsr x10, %x[n_output_channels], #0x2\n"
- "add x20, %x[qp], %[offsetof_Requantize32_minval]\n"
- "ld1r { v13.4s }, [x20]\n"
- "add x20, %x[qp], %[offsetof_Requantize32_maxval]\n"
- "ld1r { v11.4s }, [x20]\n"
- "add x20, %x[qp], %[offsetof_Requantize32_a_offset]\n"
- "ld1r { v3.16b }, [x20]\n"
- "add x20, %x[qp], %[offsetof_Requantize32_b_offset]\n"
- "ld1r { v12.16b }, [x20]\n"
- "add x20, %x[qp], %[offsetof_Requantize32_c_offset]\n"
- "ld1r { v14.4s }, [x20]\n"
- "add x20, %x[qp], %[offsetof_Requantize32_per_layer_left_shift]\n"
- "ld1r { v15.4s }, [x20]\n"
- "add x20, %x[qp], %[offsetof_Requantize32_per_layer_mul]\n"
- "ld1r { v9.4s }, [x20]\n"
- "add x20, %x[qp], %[offsetof_Requantize32_per_layer_right_shift]\n"
- "ld1r { v10.4s }, [x20]\n"
"mov x9, #0x0\n"
- "cbz x10, 9f\n"
+ "add x19, %x[qp], %[offsetof_Requantize32_minval]\n"
+ "ld1r { v14.4s }, [x19]\n"
+ "add x19, %x[qp], %[offsetof_Requantize32_maxval]\n"
+ "ld1r { v13.4s }, [x19]\n"
+ "add x19, %x[qp], %[offsetof_Requantize32_a_offset]\n"
+ "ld1r { v12.16b }, [x19]\n"
+ "add x19, %x[qp], %[offsetof_Requantize32_b_offset]\n"
+ "ld1r { v11.16b }, [x19]\n"
+ "add x19, %x[qp], %[offsetof_Requantize32_c_offset]\n"
+ "ld1r { v10.4s }, [x19]\n"
+ "add x19, %x[qp], %[offsetof_Requantize32_per_layer_left_shift]\n"
+ "ld1r { v9.4s }, [x19]\n"
+ "add x19, %x[qp], %[offsetof_Requantize32_per_layer_mul]\n"
+ "ld1r { v8.4s }, [x19]\n"
+ "add x19, %x[qp], %[offsetof_Requantize32_per_layer_right_shift]\n"
+ "ld1r { v7.4s }, [x19]\n"
+ "lsr x28, %x[n_output_channels], #0x2\n"
+ "cbz x28, 9f\n"
"1:" // Output channel loop
- "movi v31.4s, #0x0\n"
+ "movi v16.4s, #0x0\n"
"cbz %x[bias], 2f\n"
- "lsl x20, x9, #0x2\n"
- "ldr q31, [%x[bias], x20]\n"
+ "lsl x19, x9, #0x2\n"
+ "ldr q16, [%x[bias], x19]\n"
"2:" // Output channel loop: Load bias: Done
- "mov v16.16b, v31.16b\n"
- "mov v17.16b, v31.16b\n"
- "mov v18.16b, v31.16b\n"
- "mov v19.16b, v31.16b\n"
- "mov v20.16b, v31.16b\n"
- "mov v21.16b, v31.16b\n"
- "mov v22.16b, v31.16b\n"
- "mov v23.16b, v31.16b\n"
- "mov v24.16b, v31.16b\n"
- "mov v25.16b, v31.16b\n"
- "mov v26.16b, v31.16b\n"
- "mov v27.16b, v31.16b\n"
- "mov v28.16b, v31.16b\n"
- "mov v29.16b, v31.16b\n"
- "mov v30.16b, v31.16b\n"
- "mov v31.16b, v31.16b\n"
+ "mov v6.16b, v16.16b\n"
+ "mov v5.16b, v16.16b\n"
+ "mov v4.16b, v16.16b\n"
+ "mov v31.16b, v16.16b\n"
+ "mov v30.16b, v16.16b\n"
+ "mov v29.16b, v16.16b\n"
+ "mov v28.16b, v16.16b\n"
+ "mov v27.16b, v16.16b\n"
+ "mov v26.16b, v16.16b\n"
+ "mov v25.16b, v16.16b\n"
+ "mov v24.16b, v16.16b\n"
+ "mov v23.16b, v16.16b\n"
+ "mov v22.16b, v16.16b\n"
+ "mov v21.16b, v16.16b\n"
+ "mov v20.16b, v16.16b\n"
+ "mov v19.16b, v16.16b\n"
"cbz %x[rq_mul_ptr], 3f\n"
- "lsl x20, x9, #0x2\n"
- "ldr q9, [%x[rq_mul_ptr], x20]\n"
- "ldr q10, [%x[rq_right_shift_ptr], x20]\n"
+ "lsl x19, x9, #0x2\n"
+ "ldr q8, [%x[rq_mul_ptr], x19]\n"
+ "ldr q7, [%x[rq_right_shift_ptr], x19]\n"
"cbz %x[rq_left_shift_ptr], 3f\n"
- "ldr q15, [%x[rq_left_shift_ptr], x20]\n"
+ "ldr q9, [%x[rq_left_shift_ptr], x19]\n"
"3:" // Output channel loop: Load quantization parameters: Done
- "ldr s8, [%x[weights]], #0x4\n"
- "mov x20, %x[inptrs]\n"
- "ldp x25, x28, [x20], #0x10\n"
- "lsr x21, %x[kernel_points], #0x1\n"
- "ldr d2, [x25, #0x0]\n"
- "ldr d7, [x28, #0x0]\n"
- "usubl v2.8h, v2.8b, v3.8b\n"
- "usubl v7.8h, v7.8b, v3.8b\n"
- "usubl v8.8h, v8.8b, v12.8b\n"
- "cbz x21, 7f\n"
- "ldr s6, [%x[weights]], #0x4\n"
- "ldp x25, x28, [x20], #0x10\n"
- "subs x21, x21, #0x1\n"
- "usubl v6.8h, v6.8b, v12.8b\n"
+ "ldr s17, [%x[weights]], #0x4\n"
+ "usubl v17.8h, v17.8b, v11.8b\n"
+ "mov x19, %x[inptrs]\n"
+ "ldp x25, x27, [x19], #0x10\n"
+ "lsr x20, %x[kernel_points], #0x1\n"
+ "ldr d3, [x25, #0x0]\n"
+ "usubl v3.8h, v3.8b, v12.8b\n"
+ "ldr d2, [x27, #0x0]\n"
+ "usubl v2.8h, v2.8b, v12.8b\n"
+ "cbz x20, 7f\n"
+ "ldp x25, x27, [x19], #0x10\n"
+ "ldr s16, [%x[weights]], #0x4\n"
+ "usubl v16.8h, v16.8b, v11.8b\n"
"ldr d1, [x25, #0x0]\n"
- "ldr d0, [x28, #0x0]\n"
- "usubl v1.8h, v1.8b, v3.8b\n"
- "usubl v0.8h, v0.8b, v3.8b\n"
+ "subs x20, x20, #0x1\n"
+ "usubl v1.8h, v1.8b, v12.8b\n"
+ "ldr d0, [x27, #0x0]\n"
+ "usubl v0.8h, v0.8b, v12.8b\n"
"beq 5f\n"
"4:" // Output channel loop: Kernel loop
- "ldp x25, x28, [x20], #0x10\n"
- "smlal v16.4s, v8.4h, v2.h[0]\n"
- "smlal v17.4s, v8.4h, v2.h[1]\n"
- "subs x21, x21, #0x1\n"
- "smlal v18.4s, v8.4h, v2.h[2]\n"
- "smlal v19.4s, v8.4h, v2.h[3]\n"
- "smlal v20.4s, v8.4h, v2.h[4]\n"
- "smlal v21.4s, v8.4h, v2.h[5]\n"
- "smlal v22.4s, v8.4h, v2.h[6]\n"
- "smlal v23.4s, v8.4h, v2.h[7]\n"
- "ldr d2, [x25, #0x0]\n"
- "usubl v2.8h, v2.8b, v3.8b\n"
- "smlal v24.4s, v8.4h, v7.h[0]\n"
- "smlal v25.4s, v8.4h, v7.h[1]\n"
- "smlal v26.4s, v8.4h, v7.h[2]\n"
- "smlal v27.4s, v8.4h, v7.h[3]\n"
- "smlal v28.4s, v8.4h, v7.h[4]\n"
- "smlal v29.4s, v8.4h, v7.h[5]\n"
- "smlal v30.4s, v8.4h, v7.h[6]\n"
- "smlal v31.4s, v8.4h, v7.h[7]\n"
- "ldr d7, [x28, #0x0]\n"
- "ldr s8, [%x[weights]], #0x4\n"
- "ldp x25, x28, [x20], #0x10\n"
- "smlal v16.4s, v6.4h, v1.h[0]\n"
- "smlal v17.4s, v6.4h, v1.h[1]\n"
- "usubl v7.8h, v7.8b, v3.8b\n"
- "smlal v18.4s, v6.4h, v1.h[2]\n"
- "smlal v19.4s, v6.4h, v1.h[3]\n"
- "usubl v8.8h, v8.8b, v12.8b\n"
- "smlal v20.4s, v6.4h, v1.h[4]\n"
- "smlal v21.4s, v6.4h, v1.h[5]\n"
- "smlal v22.4s, v6.4h, v1.h[6]\n"
- "smlal v23.4s, v6.4h, v1.h[7]\n"
+ "smlal v6.4s, v17.4h, v3.h[0]\n"
+ "ldp x25, x27, [x19], #0x10\n"
+ "subs x20, x20, #0x1\n"
+ "smlal v5.4s, v17.4h, v3.h[1]\n"
+ "smlal v4.4s, v17.4h, v3.h[2]\n"
+ "smlal v31.4s, v17.4h, v3.h[3]\n"
+ "smlal v30.4s, v17.4h, v3.h[4]\n"
+ "smlal v29.4s, v17.4h, v3.h[5]\n"
+ "smlal v28.4s, v17.4h, v3.h[6]\n"
+ "smlal v27.4s, v17.4h, v3.h[7]\n"
+ "ldr d3, [x25, #0x0]\n"
+ "smlal v26.4s, v17.4h, v2.h[0]\n"
+ "smlal v25.4s, v17.4h, v2.h[1]\n"
+ "smlal v24.4s, v17.4h, v2.h[2]\n"
+ "smlal v23.4s, v17.4h, v2.h[3]\n"
+ "smlal v22.4s, v17.4h, v2.h[4]\n"
+ "smlal v21.4s, v17.4h, v2.h[5]\n"
+ "smlal v20.4s, v17.4h, v2.h[6]\n"
+ "smlal v19.4s, v17.4h, v2.h[7]\n"
+ "ldr d2, [x27, #0x0]\n"
+ "usubl v3.8h, v3.8b, v12.8b\n"
+ "ldr s17, [%x[weights]], #0x4\n"
+ "smlal v6.4s, v16.4h, v1.h[0]\n"
+ "ldp x25, x27, [x19], #0x10\n"
+ "smlal v5.4s, v16.4h, v1.h[1]\n"
+ "smlal v4.4s, v16.4h, v1.h[2]\n"
+ "usubl v2.8h, v2.8b, v12.8b\n"
+ "usubl v17.8h, v17.8b, v11.8b\n"
+ "smlal v31.4s, v16.4h, v1.h[3]\n"
+ "smlal v30.4s, v16.4h, v1.h[4]\n"
+ "smlal v29.4s, v16.4h, v1.h[5]\n"
+ "smlal v28.4s, v16.4h, v1.h[6]\n"
+ "smlal v27.4s, v16.4h, v1.h[7]\n"
"ldr d1, [x25, #0x0]\n"
- "usubl v1.8h, v1.8b, v3.8b\n"
- "smlal v24.4s, v6.4h, v0.h[0]\n"
- "smlal v25.4s, v6.4h, v0.h[1]\n"
- "smlal v26.4s, v6.4h, v0.h[2]\n"
- "smlal v27.4s, v6.4h, v0.h[3]\n"
- "smlal v28.4s, v6.4h, v0.h[4]\n"
- "smlal v29.4s, v6.4h, v0.h[5]\n"
- "smlal v30.4s, v6.4h, v0.h[6]\n"
- "smlal v31.4s, v6.4h, v0.h[7]\n"
- "ldr d0, [x28, #0x0]\n"
- "ldr s6, [%x[weights]], #0x4\n"
- "usubl v0.8h, v0.8b, v3.8b\n"
- "usubl v6.8h, v6.8b, v12.8b\n"
+ "smlal v26.4s, v16.4h, v0.h[0]\n"
+ "smlal v25.4s, v16.4h, v0.h[1]\n"
+ "smlal v24.4s, v16.4h, v0.h[2]\n"
+ "smlal v23.4s, v16.4h, v0.h[3]\n"
+ "smlal v22.4s, v16.4h, v0.h[4]\n"
+ "smlal v21.4s, v16.4h, v0.h[5]\n"
+ "smlal v20.4s, v16.4h, v0.h[6]\n"
+ "smlal v19.4s, v16.4h, v0.h[7]\n"
+ "ldr d0, [x27, #0x0]\n"
+ "usubl v1.8h, v1.8b, v12.8b\n"
+ "ldr s16, [%x[weights]], #0x4\n"
+ "usubl v0.8h, v0.8b, v12.8b\n"
+ "usubl v16.8h, v16.8b, v11.8b\n"
"bgt 4b\n"
"5:" // Output channel loop: Kernel loop tail
"tbnz %x[kernel_points], #0, 6f\n"
- "smlal v16.4s, v8.4h, v2.h[0]\n"
- "smlal v17.4s, v8.4h, v2.h[1]\n"
- "ldr x20, [%x[outptrs], #0x0]\n"
- "ldr x21, [%x[outptrs], #0x8]\n"
- "smlal v18.4s, v8.4h, v2.h[2]\n"
- "smlal v19.4s, v8.4h, v2.h[3]\n"
- "ldr x22, [%x[outptrs], #0x10]\n"
- "ldr x23, [%x[outptrs], #0x18]\n"
- "smlal v16.4s, v6.4h, v1.h[0]\n"
- "smlal v17.4s, v6.4h, v1.h[1]\n"
- "sshl v16.4s, v16.4s, v15.4s\n"
- "ldr x24, [%x[outptrs], #0x20]\n"
- "smlal v18.4s, v6.4h, v1.h[2]\n"
- "smlal v19.4s, v6.4h, v1.h[3]\n"
- "sshl v17.4s, v17.4s, v15.4s\n"
- "ldr x25, [%x[outptrs], #0x28]\n"
- "smlal v20.4s, v8.4h, v2.h[4]\n"
- "smlal v21.4s, v8.4h, v2.h[5]\n"
- "sshl v18.4s, v18.4s, v15.4s\n"
- "ldr x26, [%x[outptrs], #0x30]\n"
- "smlal v22.4s, v8.4h, v2.h[6]\n"
- "smlal v23.4s, v8.4h, v2.h[7]\n"
- "sshl v19.4s, v19.4s, v15.4s\n"
- "ldr x27, [%x[outptrs], #0x38]\n"
- "smlal v24.4s, v8.4h, v7.h[0]\n"
- "smlal v25.4s, v8.4h, v7.h[1]\n"
- "sqrdmulh v16.4s, v16.4s, v9.4s\n"
- "smlal v20.4s, v6.4h, v1.h[4]\n"
- "smlal v21.4s, v6.4h, v1.h[5]\n"
- "sqrdmulh v17.4s, v17.4s, v9.4s\n"
- "smlal v22.4s, v6.4h, v1.h[6]\n"
- "smlal v23.4s, v6.4h, v1.h[7]\n"
- "sqrdmulh v18.4s, v18.4s, v9.4s\n"
- "smlal v24.4s, v6.4h, v0.h[0]\n"
- "smlal v25.4s, v6.4h, v0.h[1]\n"
- "sqrdmulh v19.4s, v19.4s, v9.4s\n"
- "smlal v26.4s, v8.4h, v7.h[2]\n"
- "smlal v27.4s, v8.4h, v7.h[3]\n"
- "and v5.16b, v16.16b, v10.16b\n"
- "smlal v28.4s, v8.4h, v7.h[4]\n"
- "smlal v29.4s, v8.4h, v7.h[5]\n"
- "and v4.16b, v17.16b, v10.16b\n"
- "smlal v30.4s, v8.4h, v7.h[6]\n"
- "smlal v31.4s, v8.4h, v7.h[7]\n"
- "and v2.16b, v18.16b, v10.16b\n"
- "and v1.16b, v19.16b, v10.16b\n"
- "sshl v20.4s, v20.4s, v15.4s\n"
- "smlal v26.4s, v6.4h, v0.h[2]\n"
- "sshl v21.4s, v21.4s, v15.4s\n"
- "sshl v22.4s, v22.4s, v15.4s\n"
- "smlal v27.4s, v6.4h, v0.h[3]\n"
- "sshl v23.4s, v23.4s, v15.4s\n"
- "sshl v24.4s, v24.4s, v15.4s\n"
- "smlal v28.4s, v6.4h, v0.h[4]\n"
- "sshl v25.4s, v25.4s, v15.4s\n"
- "smlal v29.4s, v6.4h, v0.h[5]\n"
- "smlal v30.4s, v6.4h, v0.h[6]\n"
- "smlal v31.4s, v6.4h, v0.h[7]\n"
- "sshr v5.4s, v5.4s, #0x1f\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
- "sshr v2.4s, v2.4s, #0x1f\n"
- "sshr v1.4s, v1.4s, #0x1f\n"
- "sqrdmulh v20.4s, v20.4s, v9.4s\n"
- "sqrdmulh v21.4s, v21.4s, v9.4s\n"
- "sqrdmulh v22.4s, v22.4s, v9.4s\n"
- "sqrdmulh v23.4s, v23.4s, v9.4s\n"
- "sqrdmulh v24.4s, v24.4s, v9.4s\n"
- "sqrdmulh v25.4s, v25.4s, v9.4s\n"
- "sqadd v16.4s, v16.4s, v5.4s\n"
- "sqadd v17.4s, v17.4s, v4.4s\n"
- "sqadd v18.4s, v18.4s, v2.4s\n"
- "sqadd v19.4s, v19.4s, v1.4s\n"
- "and v8.16b, v20.16b, v10.16b\n"
- "and v0.16b, v21.16b, v10.16b\n"
- "and v5.16b, v22.16b, v10.16b\n"
- "and v4.16b, v23.16b, v10.16b\n"
- "and v2.16b, v24.16b, v10.16b\n"
- "and v1.16b, v25.16b, v10.16b\n"
- "sshl v26.4s, v26.4s, v15.4s\n"
- "sshl v27.4s, v27.4s, v15.4s\n"
- "sshl v28.4s, v28.4s, v15.4s\n"
- "sshl v29.4s, v29.4s, v15.4s\n"
- "sshl v30.4s, v30.4s, v15.4s\n"
- "sshl v31.4s, v31.4s, v15.4s\n"
- "sshr v8.4s, v8.4s, #0x1f\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "sshr v5.4s, v5.4s, #0x1f\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
- "sshr v2.4s, v2.4s, #0x1f\n"
- "sshr v1.4s, v1.4s, #0x1f\n"
- "sqrdmulh v26.4s, v26.4s, v9.4s\n"
- "sqrdmulh v27.4s, v27.4s, v9.4s\n"
- "sqrdmulh v28.4s, v28.4s, v9.4s\n"
- "sqrdmulh v29.4s, v29.4s, v9.4s\n"
- "sqrdmulh v30.4s, v30.4s, v9.4s\n"
- "sqrdmulh v31.4s, v31.4s, v9.4s\n"
- "sqadd v20.4s, v20.4s, v8.4s\n"
- "sqadd v21.4s, v21.4s, v0.4s\n"
- "sqadd v22.4s, v22.4s, v5.4s\n"
- "sqadd v23.4s, v23.4s, v4.4s\n"
- "sqadd v24.4s, v24.4s, v2.4s\n"
- "sqadd v25.4s, v25.4s, v1.4s\n"
- "and v8.16b, v26.16b, v10.16b\n"
- "and v0.16b, v27.16b, v10.16b\n"
- "and v5.16b, v28.16b, v10.16b\n"
- "and v4.16b, v29.16b, v10.16b\n"
- "and v2.16b, v30.16b, v10.16b\n"
- "and v1.16b, v31.16b, v10.16b\n"
- "sshr v8.4s, v8.4s, #0x1f\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "sshr v5.4s, v5.4s, #0x1f\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
- "sshr v2.4s, v2.4s, #0x1f\n"
- "sshr v1.4s, v1.4s, #0x1f\n"
- "srshl v16.4s, v16.4s, v10.4s\n"
- "srshl v17.4s, v17.4s, v10.4s\n"
- "srshl v18.4s, v18.4s, v10.4s\n"
- "srshl v19.4s, v19.4s, v10.4s\n"
- "srshl v20.4s, v20.4s, v10.4s\n"
- "srshl v21.4s, v21.4s, v10.4s\n"
- "srshl v22.4s, v22.4s, v10.4s\n"
- "srshl v23.4s, v23.4s, v10.4s\n"
- "sqadd v26.4s, v26.4s, v8.4s\n"
- "sqadd v27.4s, v27.4s, v0.4s\n"
- "sqadd v28.4s, v28.4s, v5.4s\n"
- "sqadd v29.4s, v29.4s, v4.4s\n"
- "sqadd v30.4s, v30.4s, v2.4s\n"
- "sqadd v31.4s, v31.4s, v1.4s\n"
- "add v16.4s, v16.4s, v14.4s\n"
- "add v17.4s, v17.4s, v14.4s\n"
- "add v18.4s, v18.4s, v14.4s\n"
- "add v19.4s, v19.4s, v14.4s\n"
- "add v20.4s, v20.4s, v14.4s\n"
- "add v21.4s, v21.4s, v14.4s\n"
- "add v22.4s, v22.4s, v14.4s\n"
- "add v23.4s, v23.4s, v14.4s\n"
- "srshl v24.4s, v24.4s, v10.4s\n"
- "srshl v25.4s, v25.4s, v10.4s\n"
- "srshl v26.4s, v26.4s, v10.4s\n"
- "srshl v27.4s, v27.4s, v10.4s\n"
- "srshl v28.4s, v28.4s, v10.4s\n"
- "srshl v29.4s, v29.4s, v10.4s\n"
- "srshl v30.4s, v30.4s, v10.4s\n"
- "srshl v31.4s, v31.4s, v10.4s\n"
- "smin v16.4s, v16.4s, v11.4s\n"
- "smin v17.4s, v17.4s, v11.4s\n"
- "smin v18.4s, v18.4s, v11.4s\n"
- "smin v19.4s, v19.4s, v11.4s\n"
- "smin v20.4s, v20.4s, v11.4s\n"
- "smin v21.4s, v21.4s, v11.4s\n"
- "smin v22.4s, v22.4s, v11.4s\n"
- "smin v23.4s, v23.4s, v11.4s\n"
- "add v24.4s, v24.4s, v14.4s\n"
- "add v25.4s, v25.4s, v14.4s\n"
- "add v26.4s, v26.4s, v14.4s\n"
- "add v27.4s, v27.4s, v14.4s\n"
- "add v28.4s, v28.4s, v14.4s\n"
- "add v29.4s, v29.4s, v14.4s\n"
- "add v30.4s, v30.4s, v14.4s\n"
- "add v31.4s, v31.4s, v14.4s\n"
- "smax v16.4s, v16.4s, v13.4s\n"
- "smax v17.4s, v17.4s, v13.4s\n"
- "smax v18.4s, v18.4s, v13.4s\n"
- "smax v19.4s, v19.4s, v13.4s\n"
- "smax v20.4s, v20.4s, v13.4s\n"
- "smax v21.4s, v21.4s, v13.4s\n"
- "smax v22.4s, v22.4s, v13.4s\n"
- "smax v23.4s, v23.4s, v13.4s\n"
- "smin v24.4s, v24.4s, v11.4s\n"
- "smin v25.4s, v25.4s, v11.4s\n"
- "smin v26.4s, v26.4s, v11.4s\n"
- "smin v27.4s, v27.4s, v11.4s\n"
- "smin v28.4s, v28.4s, v11.4s\n"
- "smin v29.4s, v29.4s, v11.4s\n"
- "smin v30.4s, v30.4s, v11.4s\n"
- "smin v31.4s, v31.4s, v11.4s\n"
- "uzp1 v16.16b, v16.16b, v16.16b\n"
- "uzp1 v17.16b, v17.16b, v17.16b\n"
- "uzp1 v18.16b, v18.16b, v18.16b\n"
- "uzp1 v19.16b, v19.16b, v19.16b\n"
- "uzp1 v20.16b, v20.16b, v20.16b\n"
- "uzp1 v21.16b, v21.16b, v21.16b\n"
- "uzp1 v22.16b, v22.16b, v22.16b\n"
- "uzp1 v23.16b, v23.16b, v23.16b\n"
- "smax v24.4s, v24.4s, v13.4s\n"
- "smax v25.4s, v25.4s, v13.4s\n"
- "smax v26.4s, v26.4s, v13.4s\n"
- "smax v27.4s, v27.4s, v13.4s\n"
- "smax v28.4s, v28.4s, v13.4s\n"
- "smax v29.4s, v29.4s, v13.4s\n"
- "smax v30.4s, v30.4s, v13.4s\n"
- "smax v31.4s, v31.4s, v13.4s\n"
- "uzp1 v16.16b, v16.16b, v16.16b\n"
- "str s16, [x20, x9]\n"
- "ldr x20, [%x[outptrs], #0x40]\n"
- "uzp1 v17.16b, v17.16b, v17.16b\n"
- "uzp1 v18.16b, v18.16b, v18.16b\n"
- "str s17, [x21, x9]\n"
- "ldr x21, [%x[outptrs], #0x48]\n"
- "uzp1 v19.16b, v19.16b, v19.16b\n"
- "uzp1 v20.16b, v20.16b, v20.16b\n"
- "str s18, [x22, x9]\n"
- "ldr x22, [%x[outptrs], #0x50]\n"
- "uzp1 v21.16b, v21.16b, v21.16b\n"
- "uzp1 v22.16b, v22.16b, v22.16b\n"
- "str s19, [x23, x9]\n"
- "ldr x23, [%x[outptrs], #0x58]\n"
- "uzp1 v23.16b, v23.16b, v23.16b\n"
- "uzp1 v24.16b, v24.16b, v24.16b\n"
- "str s20, [x24, x9]\n"
- "ldr x24, [%x[outptrs], #0x60]\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
- "uzp1 v26.16b, v26.16b, v26.16b\n"
- "str s21, [x25, x9]\n"
- "ldr x25, [%x[outptrs], #0x68]\n"
- "uzp1 v27.16b, v27.16b, v27.16b\n"
- "uzp1 v28.16b, v28.16b, v28.16b\n"
- "str s22, [x26, x9]\n"
- "ldr x26, [%x[outptrs], #0x70]\n"
- "uzp1 v29.16b, v29.16b, v29.16b\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
- "str s23, [x27, x9]\n"
- "ldr x27, [%x[outptrs], #0x78]\n"
+ "smlal v6.4s, v17.4h, v3.h[0]\n"
+ "ldr x19, [%x[outptrs], #0x0]\n"
+ "smlal v5.4s, v17.4h, v3.h[1]\n"
+ "ldr x20, [%x[outptrs], #0x8]\n"
+ "smlal v4.4s, v17.4h, v3.h[2]\n"
+ "ldr x21, [%x[outptrs], #0x10]\n"
+ "smlal v31.4s, v17.4h, v3.h[3]\n"
+ "ldr x22, [%x[outptrs], #0x18]\n"
+ "smlal v30.4s, v17.4h, v3.h[4]\n"
+ "ldr x23, [%x[outptrs], #0x20]\n"
+ "smlal v29.4s, v17.4h, v3.h[5]\n"
+ "ldr x24, [%x[outptrs], #0x28]\n"
+ "smlal v28.4s, v17.4h, v3.h[6]\n"
+ "ldr x25, [%x[outptrs], #0x30]\n"
+ "smlal v27.4s, v17.4h, v3.h[7]\n"
+ "ldr x26, [%x[outptrs], #0x38]\n"
+ "smlal v26.4s, v17.4h, v2.h[0]\n"
+ "smlal v25.4s, v17.4h, v2.h[1]\n"
+ "smlal v24.4s, v17.4h, v2.h[2]\n"
+ "smlal v23.4s, v17.4h, v2.h[3]\n"
+ "smlal v22.4s, v17.4h, v2.h[4]\n"
+ "smlal v21.4s, v17.4h, v2.h[5]\n"
+ "smlal v20.4s, v17.4h, v2.h[6]\n"
+ "smlal v19.4s, v17.4h, v2.h[7]\n"
+ "smlal v6.4s, v16.4h, v1.h[0]\n"
+ "smlal v5.4s, v16.4h, v1.h[1]\n"
+ "smlal v4.4s, v16.4h, v1.h[2]\n"
+ "smlal v31.4s, v16.4h, v1.h[3]\n"
+ "smlal v30.4s, v16.4h, v1.h[4]\n"
+ "smlal v29.4s, v16.4h, v1.h[5]\n"
+ "smlal v28.4s, v16.4h, v1.h[6]\n"
+ "smlal v27.4s, v16.4h, v1.h[7]\n"
+ "smlal v26.4s, v16.4h, v0.h[0]\n"
+ "smlal v25.4s, v16.4h, v0.h[1]\n"
+ "smlal v24.4s, v16.4h, v0.h[2]\n"
+ "smlal v23.4s, v16.4h, v0.h[3]\n"
+ "smlal v22.4s, v16.4h, v0.h[4]\n"
+ "smlal v21.4s, v16.4h, v0.h[5]\n"
+ "smlal v20.4s, v16.4h, v0.h[6]\n"
+ "smlal v19.4s, v16.4h, v0.h[7]\n"
+ "sshl v6.4s, v6.4s, v9.4s\n"
+ "sshl v5.4s, v5.4s, v9.4s\n"
+ "sqrdmulh v6.4s, v6.4s, v8.4s\n"
+ "sqrdmulh v5.4s, v5.4s, v8.4s\n"
+ "sshl v4.4s, v4.4s, v9.4s\n"
+ "sshl v31.4s, v31.4s, v9.4s\n"
+ "and v18.16b, v6.16b, v7.16b\n"
+ "and v16.16b, v5.16b, v7.16b\n"
+ "sqrdmulh v4.4s, v4.4s, v8.4s\n"
+ "sshr v18.4s, v18.4s, #0x1f\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sqrdmulh v31.4s, v31.4s, v8.4s\n"
+ "sqadd v6.4s, v6.4s, v18.4s\n"
+ "sqadd v5.4s, v5.4s, v16.4s\n"
+ "and v17.16b, v4.16b, v7.16b\n"
+ "and v16.16b, v31.16b, v7.16b\n"
+ "srshl v6.4s, v6.4s, v7.4s\n"
+ "srshl v5.4s, v5.4s, v7.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "add v6.4s, v6.4s, v10.4s\n"
+ "add v5.4s, v5.4s, v10.4s\n"
+ "sqadd v4.4s, v4.4s, v17.4s\n"
+ "smin v6.4s, v6.4s, v13.4s\n"
+ "smin v5.4s, v5.4s, v13.4s\n"
+ "sqadd v31.4s, v31.4s, v16.4s\n"
+ "smax v6.4s, v6.4s, v14.4s\n"
+ "smax v5.4s, v5.4s, v14.4s\n"
+ "srshl v4.4s, v4.4s, v7.4s\n"
+ "uzp1 v6.16b, v6.16b, v6.16b\n"
+ "uzp1 v5.16b, v5.16b, v5.16b\n"
+ "uzp1 v6.16b, v6.16b, v6.16b\n"
+ "str s6, [x19, x9]\n"
+ "uzp1 v5.16b, v5.16b, v5.16b\n"
+ "add v4.4s, v4.4s, v10.4s\n"
+ "ldr x19, [%x[outptrs], #0x40]\n"
+ "srshl v31.4s, v31.4s, v7.4s\n"
+ "str s5, [x20, x9]\n"
+ "sshl v30.4s, v30.4s, v9.4s\n"
+ "ldr x20, [%x[outptrs], #0x48]\n"
+ "smin v4.4s, v4.4s, v13.4s\n"
+ "sqrdmulh v30.4s, v30.4s, v8.4s\n"
+ "add v31.4s, v31.4s, v10.4s\n"
+ "smax v4.4s, v4.4s, v14.4s\n"
+ "sshl v29.4s, v29.4s, v9.4s\n"
+ "smin v31.4s, v31.4s, v13.4s\n"
+ "uzp1 v4.16b, v4.16b, v4.16b\n"
+ "and v16.16b, v30.16b, v7.16b\n"
+ "uzp1 v4.16b, v4.16b, v4.16b\n"
+ "str s4, [x21, x9]\n"
+ "smax v31.4s, v31.4s, v14.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "ldr x21, [%x[outptrs], #0x50]\n"
+ "sqrdmulh v29.4s, v29.4s, v8.4s\n"
+ "sshl v28.4s, v28.4s, v9.4s\n"
"uzp1 v31.16b, v31.16b, v31.16b\n"
- "uzp1 v24.16b, v24.16b, v24.16b\n"
- "str s24, [x20, x9]\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
- "uzp1 v26.16b, v26.16b, v26.16b\n"
- "str s25, [x21, x9]\n"
- "uzp1 v27.16b, v27.16b, v27.16b\n"
- "uzp1 v28.16b, v28.16b, v28.16b\n"
- "str s26, [x22, x9]\n"
- "uzp1 v29.16b, v29.16b, v29.16b\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
- "str s27, [x23, x9]\n"
+ "sqadd v30.4s, v30.4s, v16.4s\n"
"uzp1 v31.16b, v31.16b, v31.16b\n"
- "str s28, [x24, x9]\n"
- "str s29, [x25, x9]\n"
- "str s30, [x26, x9]\n"
- "str s31, [x27, x9]\n"
- "b 8f\n"
- "6:" // Output channel loop: Odd tail
- "ldp x25, x28, [x20], #0x10\n"
- "smlal v16.4s, v8.4h, v2.h[0]\n"
- "smlal v17.4s, v8.4h, v2.h[1]\n"
- "ldr x20, [%x[outptrs], #0x0]\n"
- "smlal v18.4s, v8.4h, v2.h[2]\n"
- "smlal v19.4s, v8.4h, v2.h[3]\n"
- "ldr x21, [%x[outptrs], #0x8]\n"
- "ldr x22, [%x[outptrs], #0x10]\n"
- "smlal v20.4s, v8.4h, v2.h[4]\n"
- "smlal v21.4s, v8.4h, v2.h[5]\n"
- "ldr x23, [%x[outptrs], #0x18]\n"
- "ldr x24, [%x[outptrs], #0x20]\n"
- "smlal v22.4s, v8.4h, v2.h[6]\n"
- "smlal v23.4s, v8.4h, v2.h[7]\n"
- "ldr d2, [x25, #0x0]\n"
- "usubl v2.8h, v2.8b, v3.8b\n"
- "smlal v24.4s, v8.4h, v7.h[0]\n"
- "smlal v25.4s, v8.4h, v7.h[1]\n"
- "ldr x25, [%x[outptrs], #0x28]\n"
- "ldr x26, [%x[outptrs], #0x30]\n"
- "smlal v26.4s, v8.4h, v7.h[2]\n"
- "smlal v27.4s, v8.4h, v7.h[3]\n"
- "ldr x27, [%x[outptrs], #0x38]\n"
- "smlal v28.4s, v8.4h, v7.h[4]\n"
- "smlal v29.4s, v8.4h, v7.h[5]\n"
- "smlal v30.4s, v8.4h, v7.h[6]\n"
- "smlal v31.4s, v8.4h, v7.h[7]\n"
- "ldr s8, [%x[weights]], #0x4\n"
- "ldr d7, [x28, #0x0]\n"
- "smlal v16.4s, v6.4h, v1.h[0]\n"
- "smlal v17.4s, v6.4h, v1.h[1]\n"
- "usubl v8.8h, v8.8b, v12.8b\n"
- "smlal v18.4s, v6.4h, v1.h[2]\n"
- "smlal v19.4s, v6.4h, v1.h[3]\n"
- "usubl v7.8h, v7.8b, v3.8b\n"
- "smlal v16.4s, v8.4h, v2.h[0]\n"
- "smlal v17.4s, v8.4h, v2.h[1]\n"
- "sshl v16.4s, v16.4s, v15.4s\n"
- "smlal v18.4s, v8.4h, v2.h[2]\n"
- "smlal v19.4s, v8.4h, v2.h[3]\n"
- "sshl v17.4s, v17.4s, v15.4s\n"
- "smlal v20.4s, v6.4h, v1.h[4]\n"
- "smlal v21.4s, v6.4h, v1.h[5]\n"
- "sshl v18.4s, v18.4s, v15.4s\n"
- "smlal v22.4s, v6.4h, v1.h[6]\n"
- "smlal v23.4s, v6.4h, v1.h[7]\n"
- "sshl v19.4s, v19.4s, v15.4s\n"
- "smlal v24.4s, v6.4h, v0.h[0]\n"
- "smlal v25.4s, v6.4h, v0.h[1]\n"
- "sqrdmulh v16.4s, v16.4s, v9.4s\n"
- "smlal v20.4s, v8.4h, v2.h[4]\n"
- "smlal v21.4s, v8.4h, v2.h[5]\n"
- "sqrdmulh v17.4s, v17.4s, v9.4s\n"
- "smlal v22.4s, v8.4h, v2.h[6]\n"
- "smlal v23.4s, v8.4h, v2.h[7]\n"
- "sqrdmulh v18.4s, v18.4s, v9.4s\n"
- "smlal v24.4s, v8.4h, v7.h[0]\n"
- "smlal v25.4s, v8.4h, v7.h[1]\n"
- "sqrdmulh v19.4s, v19.4s, v9.4s\n"
- "smlal v26.4s, v6.4h, v0.h[2]\n"
- "smlal v27.4s, v6.4h, v0.h[3]\n"
- "and v5.16b, v16.16b, v10.16b\n"
- "smlal v28.4s, v6.4h, v0.h[4]\n"
- "smlal v29.4s, v6.4h, v0.h[5]\n"
- "and v4.16b, v17.16b, v10.16b\n"
- "smlal v30.4s, v6.4h, v0.h[6]\n"
- "smlal v31.4s, v6.4h, v0.h[7]\n"
- "and v2.16b, v18.16b, v10.16b\n"
- "and v1.16b, v19.16b, v10.16b\n"
- "sshl v20.4s, v20.4s, v15.4s\n"
- "smlal v26.4s, v8.4h, v7.h[2]\n"
- "sshl v21.4s, v21.4s, v15.4s\n"
- "sshl v22.4s, v22.4s, v15.4s\n"
- "smlal v27.4s, v8.4h, v7.h[3]\n"
- "sshl v23.4s, v23.4s, v15.4s\n"
- "sshl v24.4s, v24.4s, v15.4s\n"
- "smlal v28.4s, v8.4h, v7.h[4]\n"
- "sshl v25.4s, v25.4s, v15.4s\n"
- "smlal v29.4s, v8.4h, v7.h[5]\n"
- "smlal v30.4s, v8.4h, v7.h[6]\n"
- "smlal v31.4s, v8.4h, v7.h[7]\n"
- "sshr v5.4s, v5.4s, #0x1f\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
- "sshr v2.4s, v2.4s, #0x1f\n"
- "sshr v1.4s, v1.4s, #0x1f\n"
- "sqrdmulh v20.4s, v20.4s, v9.4s\n"
- "sqrdmulh v21.4s, v21.4s, v9.4s\n"
- "sqrdmulh v22.4s, v22.4s, v9.4s\n"
- "sqrdmulh v23.4s, v23.4s, v9.4s\n"
- "sqrdmulh v24.4s, v24.4s, v9.4s\n"
- "sqrdmulh v25.4s, v25.4s, v9.4s\n"
- "sqadd v16.4s, v16.4s, v5.4s\n"
- "sqadd v17.4s, v17.4s, v4.4s\n"
- "sqadd v18.4s, v18.4s, v2.4s\n"
- "sqadd v19.4s, v19.4s, v1.4s\n"
- "and v8.16b, v20.16b, v10.16b\n"
- "and v0.16b, v21.16b, v10.16b\n"
- "and v5.16b, v22.16b, v10.16b\n"
- "and v4.16b, v23.16b, v10.16b\n"
- "and v2.16b, v24.16b, v10.16b\n"
- "and v1.16b, v25.16b, v10.16b\n"
- "sshl v26.4s, v26.4s, v15.4s\n"
- "sshl v27.4s, v27.4s, v15.4s\n"
- "sshl v28.4s, v28.4s, v15.4s\n"
- "sshl v29.4s, v29.4s, v15.4s\n"
- "sshl v30.4s, v30.4s, v15.4s\n"
- "sshl v31.4s, v31.4s, v15.4s\n"
- "sshr v8.4s, v8.4s, #0x1f\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "sshr v5.4s, v5.4s, #0x1f\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
- "sshr v2.4s, v2.4s, #0x1f\n"
- "sshr v1.4s, v1.4s, #0x1f\n"
- "sqrdmulh v26.4s, v26.4s, v9.4s\n"
- "sqrdmulh v27.4s, v27.4s, v9.4s\n"
- "sqrdmulh v28.4s, v28.4s, v9.4s\n"
- "sqrdmulh v29.4s, v29.4s, v9.4s\n"
- "sqrdmulh v30.4s, v30.4s, v9.4s\n"
- "sqrdmulh v31.4s, v31.4s, v9.4s\n"
- "sqadd v20.4s, v20.4s, v8.4s\n"
- "sqadd v21.4s, v21.4s, v0.4s\n"
- "sqadd v22.4s, v22.4s, v5.4s\n"
- "sqadd v23.4s, v23.4s, v4.4s\n"
- "sqadd v24.4s, v24.4s, v2.4s\n"
- "sqadd v25.4s, v25.4s, v1.4s\n"
- "and v8.16b, v26.16b, v10.16b\n"
- "and v0.16b, v27.16b, v10.16b\n"
- "and v5.16b, v28.16b, v10.16b\n"
- "and v4.16b, v29.16b, v10.16b\n"
- "and v2.16b, v30.16b, v10.16b\n"
- "and v1.16b, v31.16b, v10.16b\n"
- "sshr v8.4s, v8.4s, #0x1f\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "sshr v5.4s, v5.4s, #0x1f\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
- "sshr v2.4s, v2.4s, #0x1f\n"
- "sshr v1.4s, v1.4s, #0x1f\n"
- "srshl v16.4s, v16.4s, v10.4s\n"
- "srshl v17.4s, v17.4s, v10.4s\n"
- "srshl v18.4s, v18.4s, v10.4s\n"
- "srshl v19.4s, v19.4s, v10.4s\n"
- "srshl v20.4s, v20.4s, v10.4s\n"
- "srshl v21.4s, v21.4s, v10.4s\n"
- "srshl v22.4s, v22.4s, v10.4s\n"
- "srshl v23.4s, v23.4s, v10.4s\n"
- "sqadd v26.4s, v26.4s, v8.4s\n"
- "sqadd v27.4s, v27.4s, v0.4s\n"
- "sqadd v28.4s, v28.4s, v5.4s\n"
- "sqadd v29.4s, v29.4s, v4.4s\n"
- "sqadd v30.4s, v30.4s, v2.4s\n"
- "sqadd v31.4s, v31.4s, v1.4s\n"
- "add v16.4s, v16.4s, v14.4s\n"
- "add v17.4s, v17.4s, v14.4s\n"
- "add v18.4s, v18.4s, v14.4s\n"
- "add v19.4s, v19.4s, v14.4s\n"
- "add v20.4s, v20.4s, v14.4s\n"
- "add v21.4s, v21.4s, v14.4s\n"
- "add v22.4s, v22.4s, v14.4s\n"
- "add v23.4s, v23.4s, v14.4s\n"
- "srshl v24.4s, v24.4s, v10.4s\n"
- "srshl v25.4s, v25.4s, v10.4s\n"
- "srshl v26.4s, v26.4s, v10.4s\n"
- "srshl v27.4s, v27.4s, v10.4s\n"
- "srshl v28.4s, v28.4s, v10.4s\n"
- "srshl v29.4s, v29.4s, v10.4s\n"
- "srshl v30.4s, v30.4s, v10.4s\n"
- "srshl v31.4s, v31.4s, v10.4s\n"
- "smin v16.4s, v16.4s, v11.4s\n"
- "smin v17.4s, v17.4s, v11.4s\n"
- "smin v18.4s, v18.4s, v11.4s\n"
- "smin v19.4s, v19.4s, v11.4s\n"
- "smin v20.4s, v20.4s, v11.4s\n"
- "smin v21.4s, v21.4s, v11.4s\n"
- "smin v22.4s, v22.4s, v11.4s\n"
- "smin v23.4s, v23.4s, v11.4s\n"
- "add v24.4s, v24.4s, v14.4s\n"
- "add v25.4s, v25.4s, v14.4s\n"
- "add v26.4s, v26.4s, v14.4s\n"
- "add v27.4s, v27.4s, v14.4s\n"
- "add v28.4s, v28.4s, v14.4s\n"
- "add v29.4s, v29.4s, v14.4s\n"
- "add v30.4s, v30.4s, v14.4s\n"
- "add v31.4s, v31.4s, v14.4s\n"
- "smax v16.4s, v16.4s, v13.4s\n"
- "smax v17.4s, v17.4s, v13.4s\n"
- "smax v18.4s, v18.4s, v13.4s\n"
- "smax v19.4s, v19.4s, v13.4s\n"
- "smax v20.4s, v20.4s, v13.4s\n"
- "smax v21.4s, v21.4s, v13.4s\n"
- "smax v22.4s, v22.4s, v13.4s\n"
- "smax v23.4s, v23.4s, v13.4s\n"
- "smin v24.4s, v24.4s, v11.4s\n"
- "smin v25.4s, v25.4s, v11.4s\n"
- "smin v26.4s, v26.4s, v11.4s\n"
- "smin v27.4s, v27.4s, v11.4s\n"
- "smin v28.4s, v28.4s, v11.4s\n"
- "smin v29.4s, v29.4s, v11.4s\n"
- "smin v30.4s, v30.4s, v11.4s\n"
- "smin v31.4s, v31.4s, v11.4s\n"
- "uzp1 v16.16b, v16.16b, v16.16b\n"
- "uzp1 v17.16b, v17.16b, v17.16b\n"
- "uzp1 v18.16b, v18.16b, v18.16b\n"
- "uzp1 v19.16b, v19.16b, v19.16b\n"
- "uzp1 v20.16b, v20.16b, v20.16b\n"
- "uzp1 v21.16b, v21.16b, v21.16b\n"
- "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "str s31, [x22, x9]\n"
+ "and v17.16b, v29.16b, v7.16b\n"
+ "sqrdmulh v28.4s, v28.4s, v8.4s\n"
+ "ldr x22, [%x[outptrs], #0x58]\n"
+ "srshl v30.4s, v30.4s, v7.4s\n"
+ "sshl v27.4s, v27.4s, v9.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "and v16.16b, v28.16b, v7.16b\n"
+ "add v30.4s, v30.4s, v10.4s\n"
+ "sqadd v29.4s, v29.4s, v17.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "smin v30.4s, v30.4s, v13.4s\n"
+ "sqrdmulh v27.4s, v27.4s, v8.4s\n"
+ "srshl v29.4s, v29.4s, v7.4s\n"
+ "smax v30.4s, v30.4s, v14.4s\n"
+ "sqadd v28.4s, v28.4s, v16.4s\n"
+ "and v16.16b, v27.16b, v7.16b\n"
+ "uzp1 v30.16b, v30.16b, v30.16b\n"
+ "add v29.4s, v29.4s, v10.4s\n"
+ "uzp1 v30.16b, v30.16b, v30.16b\n"
+ "str s30, [x23, x9]\n"
+ "smin v29.4s, v29.4s, v13.4s\n"
+ "srshl v28.4s, v28.4s, v7.4s\n"
+ "ldr x23, [%x[outptrs], #0x60]\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sshl v26.4s, v26.4s, v9.4s\n"
+ "smax v29.4s, v29.4s, v14.4s\n"
+ "add v28.4s, v28.4s, v10.4s\n"
+ "sqadd v27.4s, v27.4s, v16.4s\n"
+ "uzp1 v29.16b, v29.16b, v29.16b\n"
+ "smin v28.4s, v28.4s, v13.4s\n"
+ "uzp1 v29.16b, v29.16b, v29.16b\n"
+ "str s29, [x24, x9]\n"
+ "smax v28.4s, v28.4s, v14.4s\n"
+ "srshl v27.4s, v27.4s, v7.4s\n"
+ "ldr x24, [%x[outptrs], #0x68]\n"
+ "sqrdmulh v26.4s, v26.4s, v8.4s\n"
+ "sshl v25.4s, v25.4s, v9.4s\n"
+ "uzp1 v28.16b, v28.16b, v28.16b\n"
+ "add v27.4s, v27.4s, v10.4s\n"
+ "uzp1 v28.16b, v28.16b, v28.16b\n"
+ "str s28, [x25, x9]\n"
+ "smin v27.4s, v27.4s, v13.4s\n"
+ "and v17.16b, v26.16b, v7.16b\n"
+ "ldr x25, [%x[outptrs], #0x70]\n"
+ "sqrdmulh v25.4s, v25.4s, v8.4s\n"
+ "sshl v24.4s, v24.4s, v9.4s\n"
+ "smax v27.4s, v27.4s, v14.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "and v16.16b, v25.16b, v7.16b\n"
+ "uzp1 v27.16b, v27.16b, v27.16b\n"
+ "sqadd v26.4s, v26.4s, v17.4s\n"
+ "uzp1 v27.16b, v27.16b, v27.16b\n"
+ "str s27, [x26, x9]\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sqrdmulh v24.4s, v24.4s, v8.4s\n"
+ "ldr x26, [%x[outptrs], #0x78]\n"
+ "srshl v26.4s, v26.4s, v7.4s\n"
+ "sshl v23.4s, v23.4s, v9.4s\n"
+ "sqadd v25.4s, v25.4s, v16.4s\n"
+ "and v17.16b, v24.16b, v7.16b\n"
+ "add v26.4s, v26.4s, v10.4s\n"
+ "sqrdmulh v23.4s, v23.4s, v8.4s\n"
+ "srshl v25.4s, v25.4s, v7.4s\n"
+ "smin v26.4s, v26.4s, v13.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "and v16.16b, v23.16b, v7.16b\n"
+ "smax v26.4s, v26.4s, v14.4s\n"
+ "add v25.4s, v25.4s, v10.4s\n"
+ "sqadd v24.4s, v24.4s, v17.4s\n"
+ "uzp1 v26.16b, v26.16b, v26.16b\n"
+ "smin v25.4s, v25.4s, v13.4s\n"
+ "uzp1 v26.16b, v26.16b, v26.16b\n"
+ "str s26, [x19, x9]\n"
+ "smax v25.4s, v25.4s, v14.4s\n"
+ "srshl v24.4s, v24.4s, v7.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sshl v22.4s, v22.4s, v9.4s\n"
+ "uzp1 v25.16b, v25.16b, v25.16b\n"
+ "add v24.4s, v24.4s, v10.4s\n"
+ "uzp1 v25.16b, v25.16b, v25.16b\n"
+ "str s25, [x20, x9]\n"
+ "smin v24.4s, v24.4s, v13.4s\n"
+ "sqadd v23.4s, v23.4s, v16.4s\n"
+ "sqrdmulh v22.4s, v22.4s, v8.4s\n"
+ "sshl v21.4s, v21.4s, v9.4s\n"
+ "smax v24.4s, v24.4s, v14.4s\n"
+ "srshl v23.4s, v23.4s, v7.4s\n"
+ "and v17.16b, v22.16b, v7.16b\n"
+ "uzp1 v24.16b, v24.16b, v24.16b\n"
+ "sqrdmulh v21.4s, v21.4s, v8.4s\n"
+ "uzp1 v24.16b, v24.16b, v24.16b\n"
+ "str s24, [x21, x9]\n"
+ "add v23.4s, v23.4s, v10.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "and v16.16b, v21.16b, v7.16b\n"
+ "sshl v20.4s, v20.4s, v9.4s\n"
+ "smin v23.4s, v23.4s, v13.4s\n"
+ "sqadd v22.4s, v22.4s, v17.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "smax v23.4s, v23.4s, v14.4s\n"
+ "sqrdmulh v20.4s, v20.4s, v8.4s\n"
+ "srshl v22.4s, v22.4s, v7.4s\n"
"uzp1 v23.16b, v23.16b, v23.16b\n"
- "smax v24.4s, v24.4s, v13.4s\n"
- "smax v25.4s, v25.4s, v13.4s\n"
- "smax v26.4s, v26.4s, v13.4s\n"
- "smax v27.4s, v27.4s, v13.4s\n"
- "smax v28.4s, v28.4s, v13.4s\n"
- "smax v29.4s, v29.4s, v13.4s\n"
- "smax v30.4s, v30.4s, v13.4s\n"
- "smax v31.4s, v31.4s, v13.4s\n"
- "uzp1 v16.16b, v16.16b, v16.16b\n"
- "str s16, [x20, x9]\n"
- "ldr x20, [%x[outptrs], #0x40]\n"
- "uzp1 v17.16b, v17.16b, v17.16b\n"
- "uzp1 v18.16b, v18.16b, v18.16b\n"
- "str s17, [x21, x9]\n"
- "ldr x21, [%x[outptrs], #0x48]\n"
- "uzp1 v19.16b, v19.16b, v19.16b\n"
- "uzp1 v20.16b, v20.16b, v20.16b\n"
- "str s18, [x22, x9]\n"
- "ldr x22, [%x[outptrs], #0x50]\n"
- "uzp1 v21.16b, v21.16b, v21.16b\n"
- "uzp1 v22.16b, v22.16b, v22.16b\n"
- "str s19, [x23, x9]\n"
- "ldr x23, [%x[outptrs], #0x58]\n"
+ "sqadd v21.4s, v21.4s, v16.4s\n"
"uzp1 v23.16b, v23.16b, v23.16b\n"
- "uzp1 v24.16b, v24.16b, v24.16b\n"
- "str s20, [x24, x9]\n"
- "ldr x24, [%x[outptrs], #0x60]\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
- "uzp1 v26.16b, v26.16b, v26.16b\n"
- "str s21, [x25, x9]\n"
- "ldr x25, [%x[outptrs], #0x68]\n"
- "uzp1 v27.16b, v27.16b, v27.16b\n"
- "uzp1 v28.16b, v28.16b, v28.16b\n"
- "str s22, [x26, x9]\n"
- "ldr x26, [%x[outptrs], #0x70]\n"
- "uzp1 v29.16b, v29.16b, v29.16b\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
- "str s23, [x27, x9]\n"
- "ldr x27, [%x[outptrs], #0x78]\n"
+ "str s23, [x22, x9]\n"
+ "add v22.4s, v22.4s, v10.4s\n"
+ "and v16.16b, v20.16b, v7.16b\n"
+ "srshl v21.4s, v21.4s, v7.4s\n"
+ "sshl v19.4s, v19.4s, v9.4s\n"
+ "smin v22.4s, v22.4s, v13.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "add v21.4s, v21.4s, v10.4s\n"
+ "smax v22.4s, v22.4s, v14.4s\n"
+ "sqadd v20.4s, v20.4s, v16.4s\n"
+ "smin v21.4s, v21.4s, v13.4s\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "sqrdmulh v19.4s, v19.4s, v8.4s\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "str s22, [x23, x9]\n"
+ "smax v21.4s, v21.4s, v14.4s\n"
+ "srshl v20.4s, v20.4s, v7.4s\n"
+ "and v16.16b, v19.16b, v7.16b\n"
+ "uzp1 v21.16b, v21.16b, v21.16b\n"
+ "add v20.4s, v20.4s, v10.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "uzp1 v21.16b, v21.16b, v21.16b\n"
+ "str s21, [x24, x9]\n"
+ "smin v20.4s, v20.4s, v13.4s\n"
+ "sqadd v19.4s, v19.4s, v16.4s\n"
+ "smax v20.4s, v20.4s, v14.4s\n"
+ "srshl v19.4s, v19.4s, v7.4s\n"
+ "uzp1 v20.16b, v20.16b, v20.16b\n"
+ "uzp1 v20.16b, v20.16b, v20.16b\n"
+ "str s20, [x25, x9]\n"
+ "add v19.4s, v19.4s, v10.4s\n"
+ "smin v19.4s, v19.4s, v13.4s\n"
+ "smax v19.4s, v19.4s, v14.4s\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ "str s19, [x26, x9]\n"
+ "b 8f\n"
+ "6:" // Output channel loop: Odd tail
+ "smlal v6.4s, v17.4h, v3.h[0]\n"
+ "ldp x25, x27, [x19], #0x10\n"
+ "smlal v5.4s, v17.4h, v3.h[1]\n"
+ "ldr x19, [%x[outptrs], #0x0]\n"
+ "smlal v4.4s, v17.4h, v3.h[2]\n"
+ "ldr x20, [%x[outptrs], #0x8]\n"
+ "smlal v31.4s, v17.4h, v3.h[3]\n"
+ "ldr x21, [%x[outptrs], #0x10]\n"
+ "smlal v30.4s, v17.4h, v3.h[4]\n"
+ "ldr x22, [%x[outptrs], #0x18]\n"
+ "smlal v29.4s, v17.4h, v3.h[5]\n"
+ "ldr x23, [%x[outptrs], #0x20]\n"
+ "smlal v28.4s, v17.4h, v3.h[6]\n"
+ "ldr x24, [%x[outptrs], #0x28]\n"
+ "smlal v27.4s, v17.4h, v3.h[7]\n"
+ "ldr d3, [x25, #0x0]\n"
+ "smlal v26.4s, v17.4h, v2.h[0]\n"
+ "ldr x25, [%x[outptrs], #0x30]\n"
+ "smlal v25.4s, v17.4h, v2.h[1]\n"
+ "ldr x26, [%x[outptrs], #0x38]\n"
+ "smlal v24.4s, v17.4h, v2.h[2]\n"
+ "smlal v23.4s, v17.4h, v2.h[3]\n"
+ "smlal v22.4s, v17.4h, v2.h[4]\n"
+ "smlal v21.4s, v17.4h, v2.h[5]\n"
+ "smlal v20.4s, v17.4h, v2.h[6]\n"
+ "smlal v19.4s, v17.4h, v2.h[7]\n"
+ "ldr d2, [x27, #0x0]\n"
+ "usubl v3.8h, v3.8b, v12.8b\n"
+ "ldr s17, [%x[weights]], #0x4\n"
+ "smlal v6.4s, v16.4h, v1.h[0]\n"
+ "smlal v5.4s, v16.4h, v1.h[1]\n"
+ "smlal v4.4s, v16.4h, v1.h[2]\n"
+ "usubl v2.8h, v2.8b, v12.8b\n"
+ "usubl v17.8h, v17.8b, v11.8b\n"
+ "smlal v31.4s, v16.4h, v1.h[3]\n"
+ "smlal v30.4s, v16.4h, v1.h[4]\n"
+ "smlal v29.4s, v16.4h, v1.h[5]\n"
+ "smlal v28.4s, v16.4h, v1.h[6]\n"
+ "smlal v27.4s, v16.4h, v1.h[7]\n"
+ "smlal v26.4s, v16.4h, v0.h[0]\n"
+ "smlal v25.4s, v16.4h, v0.h[1]\n"
+ "smlal v24.4s, v16.4h, v0.h[2]\n"
+ "smlal v23.4s, v16.4h, v0.h[3]\n"
+ "smlal v22.4s, v16.4h, v0.h[4]\n"
+ "smlal v21.4s, v16.4h, v0.h[5]\n"
+ "smlal v20.4s, v16.4h, v0.h[6]\n"
+ "smlal v19.4s, v16.4h, v0.h[7]\n"
+ "smlal v6.4s, v17.4h, v3.h[0]\n"
+ "smlal v5.4s, v17.4h, v3.h[1]\n"
+ "smlal v4.4s, v17.4h, v3.h[2]\n"
+ "smlal v31.4s, v17.4h, v3.h[3]\n"
+ "smlal v30.4s, v17.4h, v3.h[4]\n"
+ "smlal v29.4s, v17.4h, v3.h[5]\n"
+ "smlal v28.4s, v17.4h, v3.h[6]\n"
+ "smlal v27.4s, v17.4h, v3.h[7]\n"
+ "smlal v26.4s, v17.4h, v2.h[0]\n"
+ "smlal v25.4s, v17.4h, v2.h[1]\n"
+ "smlal v24.4s, v17.4h, v2.h[2]\n"
+ "smlal v23.4s, v17.4h, v2.h[3]\n"
+ "smlal v22.4s, v17.4h, v2.h[4]\n"
+ "smlal v21.4s, v17.4h, v2.h[5]\n"
+ "smlal v20.4s, v17.4h, v2.h[6]\n"
+ "smlal v19.4s, v17.4h, v2.h[7]\n"
+ "sshl v6.4s, v6.4s, v9.4s\n"
+ "sshl v5.4s, v5.4s, v9.4s\n"
+ "sqrdmulh v6.4s, v6.4s, v8.4s\n"
+ "sqrdmulh v5.4s, v5.4s, v8.4s\n"
+ "sshl v4.4s, v4.4s, v9.4s\n"
+ "sshl v31.4s, v31.4s, v9.4s\n"
+ "and v18.16b, v6.16b, v7.16b\n"
+ "and v16.16b, v5.16b, v7.16b\n"
+ "sqrdmulh v4.4s, v4.4s, v8.4s\n"
+ "sshr v18.4s, v18.4s, #0x1f\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sqrdmulh v31.4s, v31.4s, v8.4s\n"
+ "sqadd v6.4s, v6.4s, v18.4s\n"
+ "sqadd v5.4s, v5.4s, v16.4s\n"
+ "and v17.16b, v4.16b, v7.16b\n"
+ "and v16.16b, v31.16b, v7.16b\n"
+ "srshl v6.4s, v6.4s, v7.4s\n"
+ "srshl v5.4s, v5.4s, v7.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "add v6.4s, v6.4s, v10.4s\n"
+ "add v5.4s, v5.4s, v10.4s\n"
+ "sqadd v4.4s, v4.4s, v17.4s\n"
+ "smin v6.4s, v6.4s, v13.4s\n"
+ "smin v5.4s, v5.4s, v13.4s\n"
+ "sqadd v31.4s, v31.4s, v16.4s\n"
+ "smax v6.4s, v6.4s, v14.4s\n"
+ "smax v5.4s, v5.4s, v14.4s\n"
+ "srshl v4.4s, v4.4s, v7.4s\n"
+ "uzp1 v6.16b, v6.16b, v6.16b\n"
+ "uzp1 v5.16b, v5.16b, v5.16b\n"
+ "uzp1 v6.16b, v6.16b, v6.16b\n"
+ "str s6, [x19, x9]\n"
+ "uzp1 v5.16b, v5.16b, v5.16b\n"
+ "add v4.4s, v4.4s, v10.4s\n"
+ "ldr x19, [%x[outptrs], #0x40]\n"
+ "srshl v31.4s, v31.4s, v7.4s\n"
+ "str s5, [x20, x9]\n"
+ "sshl v30.4s, v30.4s, v9.4s\n"
+ "ldr x20, [%x[outptrs], #0x48]\n"
+ "smin v4.4s, v4.4s, v13.4s\n"
+ "sqrdmulh v30.4s, v30.4s, v8.4s\n"
+ "add v31.4s, v31.4s, v10.4s\n"
+ "smax v4.4s, v4.4s, v14.4s\n"
+ "sshl v29.4s, v29.4s, v9.4s\n"
+ "smin v31.4s, v31.4s, v13.4s\n"
+ "uzp1 v4.16b, v4.16b, v4.16b\n"
+ "and v16.16b, v30.16b, v7.16b\n"
+ "uzp1 v4.16b, v4.16b, v4.16b\n"
+ "str s4, [x21, x9]\n"
+ "smax v31.4s, v31.4s, v14.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "ldr x21, [%x[outptrs], #0x50]\n"
+ "sqrdmulh v29.4s, v29.4s, v8.4s\n"
+ "sshl v28.4s, v28.4s, v9.4s\n"
"uzp1 v31.16b, v31.16b, v31.16b\n"
- "uzp1 v24.16b, v24.16b, v24.16b\n"
- "str s24, [x20, x9]\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
- "uzp1 v26.16b, v26.16b, v26.16b\n"
- "str s25, [x21, x9]\n"
- "uzp1 v27.16b, v27.16b, v27.16b\n"
- "uzp1 v28.16b, v28.16b, v28.16b\n"
- "str s26, [x22, x9]\n"
- "uzp1 v29.16b, v29.16b, v29.16b\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
- "str s27, [x23, x9]\n"
+ "sqadd v30.4s, v30.4s, v16.4s\n"
"uzp1 v31.16b, v31.16b, v31.16b\n"
- "str s28, [x24, x9]\n"
- "str s29, [x25, x9]\n"
- "str s30, [x26, x9]\n"
- "str s31, [x27, x9]\n"
- "b 8f\n"
- "7:" // Output channel loop: Single kernel point
- "smlal v16.4s, v8.4h, v2.h[0]\n"
- "smlal v17.4s, v8.4h, v2.h[1]\n"
- "sshl v16.4s, v16.4s, v15.4s\n"
- "ldr x20, [%x[outptrs], #0x0]\n"
- "smlal v18.4s, v8.4h, v2.h[2]\n"
- "smlal v19.4s, v8.4h, v2.h[3]\n"
- "sshl v17.4s, v17.4s, v15.4s\n"
- "ldr x21, [%x[outptrs], #0x8]\n"
- "sshl v18.4s, v18.4s, v15.4s\n"
- "sshl v19.4s, v19.4s, v15.4s\n"
- "smlal v20.4s, v8.4h, v2.h[4]\n"
- "ldr x22, [%x[outptrs], #0x10]\n"
- "smlal v21.4s, v8.4h, v2.h[5]\n"
- "smlal v22.4s, v8.4h, v2.h[6]\n"
- "sqrdmulh v16.4s, v16.4s, v9.4s\n"
- "ldr x23, [%x[outptrs], #0x18]\n"
- "smlal v23.4s, v8.4h, v2.h[7]\n"
- "smlal v24.4s, v8.4h, v7.h[0]\n"
- "sqrdmulh v17.4s, v17.4s, v9.4s\n"
- "ldr x24, [%x[outptrs], #0x20]\n"
- "smlal v25.4s, v8.4h, v7.h[1]\n"
- "sqrdmulh v18.4s, v18.4s, v9.4s\n"
- "smlal v26.4s, v8.4h, v7.h[2]\n"
- "ldr x25, [%x[outptrs], #0x28]\n"
- "sqrdmulh v19.4s, v19.4s, v9.4s\n"
- "and v5.16b, v16.16b, v10.16b\n"
- "smlal v27.4s, v8.4h, v7.h[3]\n"
- "ldr x26, [%x[outptrs], #0x30]\n"
- "and v4.16b, v17.16b, v10.16b\n"
- "and v2.16b, v18.16b, v10.16b\n"
- "smlal v28.4s, v8.4h, v7.h[4]\n"
- "ldr x27, [%x[outptrs], #0x38]\n"
- "and v1.16b, v19.16b, v10.16b\n"
- "sshl v20.4s, v20.4s, v15.4s\n"
- "smlal v29.4s, v8.4h, v7.h[5]\n"
- "sshl v21.4s, v21.4s, v15.4s\n"
- "sshl v22.4s, v22.4s, v15.4s\n"
- "smlal v30.4s, v8.4h, v7.h[6]\n"
- "sshl v23.4s, v23.4s, v15.4s\n"
- "sshl v24.4s, v24.4s, v15.4s\n"
- "smlal v31.4s, v8.4h, v7.h[7]\n"
- "sshl v25.4s, v25.4s, v15.4s\n"
- "sshr v5.4s, v5.4s, #0x1f\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
- "sshr v2.4s, v2.4s, #0x1f\n"
- "sshr v1.4s, v1.4s, #0x1f\n"
- "sqrdmulh v20.4s, v20.4s, v9.4s\n"
- "sqrdmulh v21.4s, v21.4s, v9.4s\n"
- "sqrdmulh v22.4s, v22.4s, v9.4s\n"
- "sqrdmulh v23.4s, v23.4s, v9.4s\n"
- "sqrdmulh v24.4s, v24.4s, v9.4s\n"
- "sqrdmulh v25.4s, v25.4s, v9.4s\n"
- "sqadd v16.4s, v16.4s, v5.4s\n"
- "sqadd v17.4s, v17.4s, v4.4s\n"
- "sqadd v18.4s, v18.4s, v2.4s\n"
- "sqadd v19.4s, v19.4s, v1.4s\n"
- "and v8.16b, v20.16b, v10.16b\n"
- "and v0.16b, v21.16b, v10.16b\n"
- "and v5.16b, v22.16b, v10.16b\n"
- "and v4.16b, v23.16b, v10.16b\n"
- "and v2.16b, v24.16b, v10.16b\n"
- "and v1.16b, v25.16b, v10.16b\n"
- "sshl v26.4s, v26.4s, v15.4s\n"
- "sshl v27.4s, v27.4s, v15.4s\n"
- "sshl v28.4s, v28.4s, v15.4s\n"
- "sshl v29.4s, v29.4s, v15.4s\n"
- "sshl v30.4s, v30.4s, v15.4s\n"
- "sshl v31.4s, v31.4s, v15.4s\n"
- "sshr v8.4s, v8.4s, #0x1f\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "sshr v5.4s, v5.4s, #0x1f\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
- "sshr v2.4s, v2.4s, #0x1f\n"
- "sshr v1.4s, v1.4s, #0x1f\n"
- "sqrdmulh v26.4s, v26.4s, v9.4s\n"
- "sqrdmulh v27.4s, v27.4s, v9.4s\n"
- "sqrdmulh v28.4s, v28.4s, v9.4s\n"
- "sqrdmulh v29.4s, v29.4s, v9.4s\n"
- "sqrdmulh v30.4s, v30.4s, v9.4s\n"
- "sqrdmulh v31.4s, v31.4s, v9.4s\n"
- "sqadd v20.4s, v20.4s, v8.4s\n"
- "sqadd v21.4s, v21.4s, v0.4s\n"
- "sqadd v22.4s, v22.4s, v5.4s\n"
- "sqadd v23.4s, v23.4s, v4.4s\n"
- "sqadd v24.4s, v24.4s, v2.4s\n"
- "sqadd v25.4s, v25.4s, v1.4s\n"
- "and v8.16b, v26.16b, v10.16b\n"
- "and v0.16b, v27.16b, v10.16b\n"
- "and v5.16b, v28.16b, v10.16b\n"
- "and v4.16b, v29.16b, v10.16b\n"
- "and v2.16b, v30.16b, v10.16b\n"
- "and v1.16b, v31.16b, v10.16b\n"
- "sshr v8.4s, v8.4s, #0x1f\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "sshr v5.4s, v5.4s, #0x1f\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
- "sshr v2.4s, v2.4s, #0x1f\n"
- "sshr v1.4s, v1.4s, #0x1f\n"
- "srshl v16.4s, v16.4s, v10.4s\n"
- "srshl v17.4s, v17.4s, v10.4s\n"
- "srshl v18.4s, v18.4s, v10.4s\n"
- "srshl v19.4s, v19.4s, v10.4s\n"
- "srshl v20.4s, v20.4s, v10.4s\n"
- "srshl v21.4s, v21.4s, v10.4s\n"
- "srshl v22.4s, v22.4s, v10.4s\n"
- "srshl v23.4s, v23.4s, v10.4s\n"
- "sqadd v26.4s, v26.4s, v8.4s\n"
- "sqadd v27.4s, v27.4s, v0.4s\n"
- "sqadd v28.4s, v28.4s, v5.4s\n"
- "sqadd v29.4s, v29.4s, v4.4s\n"
- "sqadd v30.4s, v30.4s, v2.4s\n"
- "sqadd v31.4s, v31.4s, v1.4s\n"
- "add v16.4s, v16.4s, v14.4s\n"
- "add v17.4s, v17.4s, v14.4s\n"
- "add v18.4s, v18.4s, v14.4s\n"
- "add v19.4s, v19.4s, v14.4s\n"
- "add v20.4s, v20.4s, v14.4s\n"
- "add v21.4s, v21.4s, v14.4s\n"
- "add v22.4s, v22.4s, v14.4s\n"
- "add v23.4s, v23.4s, v14.4s\n"
- "srshl v24.4s, v24.4s, v10.4s\n"
- "srshl v25.4s, v25.4s, v10.4s\n"
- "srshl v26.4s, v26.4s, v10.4s\n"
- "srshl v27.4s, v27.4s, v10.4s\n"
- "srshl v28.4s, v28.4s, v10.4s\n"
- "srshl v29.4s, v29.4s, v10.4s\n"
- "srshl v30.4s, v30.4s, v10.4s\n"
- "srshl v31.4s, v31.4s, v10.4s\n"
- "smin v16.4s, v16.4s, v11.4s\n"
- "smin v17.4s, v17.4s, v11.4s\n"
- "smin v18.4s, v18.4s, v11.4s\n"
- "smin v19.4s, v19.4s, v11.4s\n"
- "smin v20.4s, v20.4s, v11.4s\n"
- "smin v21.4s, v21.4s, v11.4s\n"
- "smin v22.4s, v22.4s, v11.4s\n"
- "smin v23.4s, v23.4s, v11.4s\n"
- "add v24.4s, v24.4s, v14.4s\n"
- "add v25.4s, v25.4s, v14.4s\n"
- "add v26.4s, v26.4s, v14.4s\n"
- "add v27.4s, v27.4s, v14.4s\n"
- "add v28.4s, v28.4s, v14.4s\n"
- "add v29.4s, v29.4s, v14.4s\n"
- "add v30.4s, v30.4s, v14.4s\n"
- "add v31.4s, v31.4s, v14.4s\n"
- "smax v16.4s, v16.4s, v13.4s\n"
- "smax v17.4s, v17.4s, v13.4s\n"
- "smax v18.4s, v18.4s, v13.4s\n"
- "smax v19.4s, v19.4s, v13.4s\n"
- "smax v20.4s, v20.4s, v13.4s\n"
- "smax v21.4s, v21.4s, v13.4s\n"
- "smax v22.4s, v22.4s, v13.4s\n"
- "smax v23.4s, v23.4s, v13.4s\n"
- "smin v24.4s, v24.4s, v11.4s\n"
- "smin v25.4s, v25.4s, v11.4s\n"
- "smin v26.4s, v26.4s, v11.4s\n"
- "smin v27.4s, v27.4s, v11.4s\n"
- "smin v28.4s, v28.4s, v11.4s\n"
- "smin v29.4s, v29.4s, v11.4s\n"
- "smin v30.4s, v30.4s, v11.4s\n"
- "smin v31.4s, v31.4s, v11.4s\n"
- "uzp1 v16.16b, v16.16b, v16.16b\n"
- "uzp1 v17.16b, v17.16b, v17.16b\n"
- "uzp1 v18.16b, v18.16b, v18.16b\n"
- "uzp1 v19.16b, v19.16b, v19.16b\n"
- "uzp1 v20.16b, v20.16b, v20.16b\n"
- "uzp1 v21.16b, v21.16b, v21.16b\n"
- "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "str s31, [x22, x9]\n"
+ "and v17.16b, v29.16b, v7.16b\n"
+ "sqrdmulh v28.4s, v28.4s, v8.4s\n"
+ "ldr x22, [%x[outptrs], #0x58]\n"
+ "srshl v30.4s, v30.4s, v7.4s\n"
+ "sshl v27.4s, v27.4s, v9.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "and v16.16b, v28.16b, v7.16b\n"
+ "add v30.4s, v30.4s, v10.4s\n"
+ "sqadd v29.4s, v29.4s, v17.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "smin v30.4s, v30.4s, v13.4s\n"
+ "sqrdmulh v27.4s, v27.4s, v8.4s\n"
+ "srshl v29.4s, v29.4s, v7.4s\n"
+ "smax v30.4s, v30.4s, v14.4s\n"
+ "sqadd v28.4s, v28.4s, v16.4s\n"
+ "and v16.16b, v27.16b, v7.16b\n"
+ "uzp1 v30.16b, v30.16b, v30.16b\n"
+ "add v29.4s, v29.4s, v10.4s\n"
+ "uzp1 v30.16b, v30.16b, v30.16b\n"
+ "str s30, [x23, x9]\n"
+ "smin v29.4s, v29.4s, v13.4s\n"
+ "srshl v28.4s, v28.4s, v7.4s\n"
+ "ldr x23, [%x[outptrs], #0x60]\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sshl v26.4s, v26.4s, v9.4s\n"
+ "smax v29.4s, v29.4s, v14.4s\n"
+ "add v28.4s, v28.4s, v10.4s\n"
+ "sqadd v27.4s, v27.4s, v16.4s\n"
+ "uzp1 v29.16b, v29.16b, v29.16b\n"
+ "smin v28.4s, v28.4s, v13.4s\n"
+ "uzp1 v29.16b, v29.16b, v29.16b\n"
+ "str s29, [x24, x9]\n"
+ "smax v28.4s, v28.4s, v14.4s\n"
+ "srshl v27.4s, v27.4s, v7.4s\n"
+ "ldr x24, [%x[outptrs], #0x68]\n"
+ "sqrdmulh v26.4s, v26.4s, v8.4s\n"
+ "sshl v25.4s, v25.4s, v9.4s\n"
+ "uzp1 v28.16b, v28.16b, v28.16b\n"
+ "add v27.4s, v27.4s, v10.4s\n"
+ "uzp1 v28.16b, v28.16b, v28.16b\n"
+ "str s28, [x25, x9]\n"
+ "smin v27.4s, v27.4s, v13.4s\n"
+ "and v17.16b, v26.16b, v7.16b\n"
+ "ldr x25, [%x[outptrs], #0x70]\n"
+ "sqrdmulh v25.4s, v25.4s, v8.4s\n"
+ "sshl v24.4s, v24.4s, v9.4s\n"
+ "smax v27.4s, v27.4s, v14.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "and v16.16b, v25.16b, v7.16b\n"
+ "uzp1 v27.16b, v27.16b, v27.16b\n"
+ "sqadd v26.4s, v26.4s, v17.4s\n"
+ "uzp1 v27.16b, v27.16b, v27.16b\n"
+ "str s27, [x26, x9]\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sqrdmulh v24.4s, v24.4s, v8.4s\n"
+ "ldr x26, [%x[outptrs], #0x78]\n"
+ "srshl v26.4s, v26.4s, v7.4s\n"
+ "sshl v23.4s, v23.4s, v9.4s\n"
+ "sqadd v25.4s, v25.4s, v16.4s\n"
+ "and v17.16b, v24.16b, v7.16b\n"
+ "add v26.4s, v26.4s, v10.4s\n"
+ "sqrdmulh v23.4s, v23.4s, v8.4s\n"
+ "srshl v25.4s, v25.4s, v7.4s\n"
+ "smin v26.4s, v26.4s, v13.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "and v16.16b, v23.16b, v7.16b\n"
+ "smax v26.4s, v26.4s, v14.4s\n"
+ "add v25.4s, v25.4s, v10.4s\n"
+ "sqadd v24.4s, v24.4s, v17.4s\n"
+ "uzp1 v26.16b, v26.16b, v26.16b\n"
+ "smin v25.4s, v25.4s, v13.4s\n"
+ "uzp1 v26.16b, v26.16b, v26.16b\n"
+ "str s26, [x19, x9]\n"
+ "smax v25.4s, v25.4s, v14.4s\n"
+ "srshl v24.4s, v24.4s, v7.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sshl v22.4s, v22.4s, v9.4s\n"
+ "uzp1 v25.16b, v25.16b, v25.16b\n"
+ "add v24.4s, v24.4s, v10.4s\n"
+ "uzp1 v25.16b, v25.16b, v25.16b\n"
+ "str s25, [x20, x9]\n"
+ "smin v24.4s, v24.4s, v13.4s\n"
+ "sqadd v23.4s, v23.4s, v16.4s\n"
+ "sqrdmulh v22.4s, v22.4s, v8.4s\n"
+ "sshl v21.4s, v21.4s, v9.4s\n"
+ "smax v24.4s, v24.4s, v14.4s\n"
+ "srshl v23.4s, v23.4s, v7.4s\n"
+ "and v17.16b, v22.16b, v7.16b\n"
+ "uzp1 v24.16b, v24.16b, v24.16b\n"
+ "sqrdmulh v21.4s, v21.4s, v8.4s\n"
+ "uzp1 v24.16b, v24.16b, v24.16b\n"
+ "str s24, [x21, x9]\n"
+ "add v23.4s, v23.4s, v10.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "and v16.16b, v21.16b, v7.16b\n"
+ "sshl v20.4s, v20.4s, v9.4s\n"
+ "smin v23.4s, v23.4s, v13.4s\n"
+ "sqadd v22.4s, v22.4s, v17.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "smax v23.4s, v23.4s, v14.4s\n"
+ "sqrdmulh v20.4s, v20.4s, v8.4s\n"
+ "srshl v22.4s, v22.4s, v7.4s\n"
"uzp1 v23.16b, v23.16b, v23.16b\n"
- "smax v24.4s, v24.4s, v13.4s\n"
- "smax v25.4s, v25.4s, v13.4s\n"
- "smax v26.4s, v26.4s, v13.4s\n"
- "smax v27.4s, v27.4s, v13.4s\n"
- "smax v28.4s, v28.4s, v13.4s\n"
- "smax v29.4s, v29.4s, v13.4s\n"
- "smax v30.4s, v30.4s, v13.4s\n"
- "smax v31.4s, v31.4s, v13.4s\n"
- "uzp1 v16.16b, v16.16b, v16.16b\n"
- "str s16, [x20, x9]\n"
- "ldr x20, [%x[outptrs], #0x40]\n"
- "uzp1 v17.16b, v17.16b, v17.16b\n"
- "uzp1 v18.16b, v18.16b, v18.16b\n"
- "str s17, [x21, x9]\n"
- "ldr x21, [%x[outptrs], #0x48]\n"
- "uzp1 v19.16b, v19.16b, v19.16b\n"
- "uzp1 v20.16b, v20.16b, v20.16b\n"
- "str s18, [x22, x9]\n"
- "ldr x22, [%x[outptrs], #0x50]\n"
- "uzp1 v21.16b, v21.16b, v21.16b\n"
- "uzp1 v22.16b, v22.16b, v22.16b\n"
- "str s19, [x23, x9]\n"
- "ldr x23, [%x[outptrs], #0x58]\n"
+ "sqadd v21.4s, v21.4s, v16.4s\n"
"uzp1 v23.16b, v23.16b, v23.16b\n"
- "uzp1 v24.16b, v24.16b, v24.16b\n"
- "str s20, [x24, x9]\n"
- "ldr x24, [%x[outptrs], #0x60]\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
- "uzp1 v26.16b, v26.16b, v26.16b\n"
- "str s21, [x25, x9]\n"
- "ldr x25, [%x[outptrs], #0x68]\n"
- "uzp1 v27.16b, v27.16b, v27.16b\n"
- "uzp1 v28.16b, v28.16b, v28.16b\n"
- "str s22, [x26, x9]\n"
- "ldr x26, [%x[outptrs], #0x70]\n"
- "uzp1 v29.16b, v29.16b, v29.16b\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
- "str s23, [x27, x9]\n"
- "ldr x27, [%x[outptrs], #0x78]\n"
+ "str s23, [x22, x9]\n"
+ "add v22.4s, v22.4s, v10.4s\n"
+ "and v16.16b, v20.16b, v7.16b\n"
+ "srshl v21.4s, v21.4s, v7.4s\n"
+ "sshl v19.4s, v19.4s, v9.4s\n"
+ "smin v22.4s, v22.4s, v13.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "add v21.4s, v21.4s, v10.4s\n"
+ "smax v22.4s, v22.4s, v14.4s\n"
+ "sqadd v20.4s, v20.4s, v16.4s\n"
+ "smin v21.4s, v21.4s, v13.4s\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "sqrdmulh v19.4s, v19.4s, v8.4s\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "str s22, [x23, x9]\n"
+ "smax v21.4s, v21.4s, v14.4s\n"
+ "srshl v20.4s, v20.4s, v7.4s\n"
+ "and v16.16b, v19.16b, v7.16b\n"
+ "uzp1 v21.16b, v21.16b, v21.16b\n"
+ "add v20.4s, v20.4s, v10.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "uzp1 v21.16b, v21.16b, v21.16b\n"
+ "str s21, [x24, x9]\n"
+ "smin v20.4s, v20.4s, v13.4s\n"
+ "sqadd v19.4s, v19.4s, v16.4s\n"
+ "smax v20.4s, v20.4s, v14.4s\n"
+ "srshl v19.4s, v19.4s, v7.4s\n"
+ "uzp1 v20.16b, v20.16b, v20.16b\n"
+ "uzp1 v20.16b, v20.16b, v20.16b\n"
+ "str s20, [x25, x9]\n"
+ "add v19.4s, v19.4s, v10.4s\n"
+ "smin v19.4s, v19.4s, v13.4s\n"
+ "smax v19.4s, v19.4s, v14.4s\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ "str s19, [x26, x9]\n"
+ "b 8f\n"
+ "7:" // Output channel loop: Single kernel point
+ "smlal v6.4s, v17.4h, v3.h[0]\n"
+ "ldr x19, [%x[outptrs], #0x0]\n"
+ "smlal v5.4s, v17.4h, v3.h[1]\n"
+ "ldr x20, [%x[outptrs], #0x8]\n"
+ "smlal v4.4s, v17.4h, v3.h[2]\n"
+ "ldr x21, [%x[outptrs], #0x10]\n"
+ "smlal v31.4s, v17.4h, v3.h[3]\n"
+ "ldr x22, [%x[outptrs], #0x18]\n"
+ "smlal v30.4s, v17.4h, v3.h[4]\n"
+ "ldr x23, [%x[outptrs], #0x20]\n"
+ "smlal v29.4s, v17.4h, v3.h[5]\n"
+ "ldr x24, [%x[outptrs], #0x28]\n"
+ "smlal v28.4s, v17.4h, v3.h[6]\n"
+ "ldr x25, [%x[outptrs], #0x30]\n"
+ "smlal v27.4s, v17.4h, v3.h[7]\n"
+ "ldr x26, [%x[outptrs], #0x38]\n"
+ "smlal v26.4s, v17.4h, v2.h[0]\n"
+ "smlal v25.4s, v17.4h, v2.h[1]\n"
+ "smlal v24.4s, v17.4h, v2.h[2]\n"
+ "smlal v23.4s, v17.4h, v2.h[3]\n"
+ "smlal v22.4s, v17.4h, v2.h[4]\n"
+ "smlal v21.4s, v17.4h, v2.h[5]\n"
+ "smlal v20.4s, v17.4h, v2.h[6]\n"
+ "smlal v19.4s, v17.4h, v2.h[7]\n"
+ "sshl v6.4s, v6.4s, v9.4s\n"
+ "sshl v5.4s, v5.4s, v9.4s\n"
+ "sqrdmulh v6.4s, v6.4s, v8.4s\n"
+ "sqrdmulh v5.4s, v5.4s, v8.4s\n"
+ "sshl v4.4s, v4.4s, v9.4s\n"
+ "sshl v31.4s, v31.4s, v9.4s\n"
+ "and v18.16b, v6.16b, v7.16b\n"
+ "and v16.16b, v5.16b, v7.16b\n"
+ "sqrdmulh v4.4s, v4.4s, v8.4s\n"
+ "sshr v18.4s, v18.4s, #0x1f\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sqrdmulh v31.4s, v31.4s, v8.4s\n"
+ "sqadd v6.4s, v6.4s, v18.4s\n"
+ "sqadd v5.4s, v5.4s, v16.4s\n"
+ "and v17.16b, v4.16b, v7.16b\n"
+ "and v16.16b, v31.16b, v7.16b\n"
+ "srshl v6.4s, v6.4s, v7.4s\n"
+ "srshl v5.4s, v5.4s, v7.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "add v6.4s, v6.4s, v10.4s\n"
+ "add v5.4s, v5.4s, v10.4s\n"
+ "sqadd v4.4s, v4.4s, v17.4s\n"
+ "smin v6.4s, v6.4s, v13.4s\n"
+ "smin v5.4s, v5.4s, v13.4s\n"
+ "sqadd v31.4s, v31.4s, v16.4s\n"
+ "smax v6.4s, v6.4s, v14.4s\n"
+ "smax v5.4s, v5.4s, v14.4s\n"
+ "srshl v4.4s, v4.4s, v7.4s\n"
+ "uzp1 v6.16b, v6.16b, v6.16b\n"
+ "uzp1 v5.16b, v5.16b, v5.16b\n"
+ "uzp1 v6.16b, v6.16b, v6.16b\n"
+ "str s6, [x19, x9]\n"
+ "uzp1 v5.16b, v5.16b, v5.16b\n"
+ "add v4.4s, v4.4s, v10.4s\n"
+ "ldr x19, [%x[outptrs], #0x40]\n"
+ "srshl v31.4s, v31.4s, v7.4s\n"
+ "str s5, [x20, x9]\n"
+ "sshl v30.4s, v30.4s, v9.4s\n"
+ "ldr x20, [%x[outptrs], #0x48]\n"
+ "smin v4.4s, v4.4s, v13.4s\n"
+ "sqrdmulh v30.4s, v30.4s, v8.4s\n"
+ "add v31.4s, v31.4s, v10.4s\n"
+ "smax v4.4s, v4.4s, v14.4s\n"
+ "sshl v29.4s, v29.4s, v9.4s\n"
+ "smin v31.4s, v31.4s, v13.4s\n"
+ "uzp1 v4.16b, v4.16b, v4.16b\n"
+ "and v16.16b, v30.16b, v7.16b\n"
+ "uzp1 v4.16b, v4.16b, v4.16b\n"
+ "str s4, [x21, x9]\n"
+ "smax v31.4s, v31.4s, v14.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "ldr x21, [%x[outptrs], #0x50]\n"
+ "sqrdmulh v29.4s, v29.4s, v8.4s\n"
+ "sshl v28.4s, v28.4s, v9.4s\n"
"uzp1 v31.16b, v31.16b, v31.16b\n"
- "uzp1 v24.16b, v24.16b, v24.16b\n"
- "str s24, [x20, x9]\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
- "uzp1 v26.16b, v26.16b, v26.16b\n"
- "str s25, [x21, x9]\n"
- "uzp1 v27.16b, v27.16b, v27.16b\n"
- "uzp1 v28.16b, v28.16b, v28.16b\n"
- "str s26, [x22, x9]\n"
- "uzp1 v29.16b, v29.16b, v29.16b\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
- "str s27, [x23, x9]\n"
+ "sqadd v30.4s, v30.4s, v16.4s\n"
"uzp1 v31.16b, v31.16b, v31.16b\n"
- "str s28, [x24, x9]\n"
- "str s29, [x25, x9]\n"
- "str s30, [x26, x9]\n"
- "str s31, [x27, x9]\n"
+ "str s31, [x22, x9]\n"
+ "and v17.16b, v29.16b, v7.16b\n"
+ "sqrdmulh v28.4s, v28.4s, v8.4s\n"
+ "ldr x22, [%x[outptrs], #0x58]\n"
+ "srshl v30.4s, v30.4s, v7.4s\n"
+ "sshl v27.4s, v27.4s, v9.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "and v16.16b, v28.16b, v7.16b\n"
+ "add v30.4s, v30.4s, v10.4s\n"
+ "sqadd v29.4s, v29.4s, v17.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "smin v30.4s, v30.4s, v13.4s\n"
+ "sqrdmulh v27.4s, v27.4s, v8.4s\n"
+ "srshl v29.4s, v29.4s, v7.4s\n"
+ "smax v30.4s, v30.4s, v14.4s\n"
+ "sqadd v28.4s, v28.4s, v16.4s\n"
+ "and v16.16b, v27.16b, v7.16b\n"
+ "uzp1 v30.16b, v30.16b, v30.16b\n"
+ "add v29.4s, v29.4s, v10.4s\n"
+ "uzp1 v30.16b, v30.16b, v30.16b\n"
+ "str s30, [x23, x9]\n"
+ "smin v29.4s, v29.4s, v13.4s\n"
+ "srshl v28.4s, v28.4s, v7.4s\n"
+ "ldr x23, [%x[outptrs], #0x60]\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sshl v26.4s, v26.4s, v9.4s\n"
+ "smax v29.4s, v29.4s, v14.4s\n"
+ "add v28.4s, v28.4s, v10.4s\n"
+ "sqadd v27.4s, v27.4s, v16.4s\n"
+ "uzp1 v29.16b, v29.16b, v29.16b\n"
+ "smin v28.4s, v28.4s, v13.4s\n"
+ "uzp1 v29.16b, v29.16b, v29.16b\n"
+ "str s29, [x24, x9]\n"
+ "smax v28.4s, v28.4s, v14.4s\n"
+ "srshl v27.4s, v27.4s, v7.4s\n"
+ "ldr x24, [%x[outptrs], #0x68]\n"
+ "sqrdmulh v26.4s, v26.4s, v8.4s\n"
+ "sshl v25.4s, v25.4s, v9.4s\n"
+ "uzp1 v28.16b, v28.16b, v28.16b\n"
+ "add v27.4s, v27.4s, v10.4s\n"
+ "uzp1 v28.16b, v28.16b, v28.16b\n"
+ "str s28, [x25, x9]\n"
+ "smin v27.4s, v27.4s, v13.4s\n"
+ "and v17.16b, v26.16b, v7.16b\n"
+ "ldr x25, [%x[outptrs], #0x70]\n"
+ "sqrdmulh v25.4s, v25.4s, v8.4s\n"
+ "sshl v24.4s, v24.4s, v9.4s\n"
+ "smax v27.4s, v27.4s, v14.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "and v16.16b, v25.16b, v7.16b\n"
+ "uzp1 v27.16b, v27.16b, v27.16b\n"
+ "sqadd v26.4s, v26.4s, v17.4s\n"
+ "uzp1 v27.16b, v27.16b, v27.16b\n"
+ "str s27, [x26, x9]\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sqrdmulh v24.4s, v24.4s, v8.4s\n"
+ "ldr x26, [%x[outptrs], #0x78]\n"
+ "srshl v26.4s, v26.4s, v7.4s\n"
+ "sshl v23.4s, v23.4s, v9.4s\n"
+ "sqadd v25.4s, v25.4s, v16.4s\n"
+ "and v17.16b, v24.16b, v7.16b\n"
+ "add v26.4s, v26.4s, v10.4s\n"
+ "sqrdmulh v23.4s, v23.4s, v8.4s\n"
+ "srshl v25.4s, v25.4s, v7.4s\n"
+ "smin v26.4s, v26.4s, v13.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "and v16.16b, v23.16b, v7.16b\n"
+ "smax v26.4s, v26.4s, v14.4s\n"
+ "add v25.4s, v25.4s, v10.4s\n"
+ "sqadd v24.4s, v24.4s, v17.4s\n"
+ "uzp1 v26.16b, v26.16b, v26.16b\n"
+ "smin v25.4s, v25.4s, v13.4s\n"
+ "uzp1 v26.16b, v26.16b, v26.16b\n"
+ "str s26, [x19, x9]\n"
+ "smax v25.4s, v25.4s, v14.4s\n"
+ "srshl v24.4s, v24.4s, v7.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sshl v22.4s, v22.4s, v9.4s\n"
+ "uzp1 v25.16b, v25.16b, v25.16b\n"
+ "add v24.4s, v24.4s, v10.4s\n"
+ "uzp1 v25.16b, v25.16b, v25.16b\n"
+ "str s25, [x20, x9]\n"
+ "smin v24.4s, v24.4s, v13.4s\n"
+ "sqadd v23.4s, v23.4s, v16.4s\n"
+ "sqrdmulh v22.4s, v22.4s, v8.4s\n"
+ "sshl v21.4s, v21.4s, v9.4s\n"
+ "smax v24.4s, v24.4s, v14.4s\n"
+ "srshl v23.4s, v23.4s, v7.4s\n"
+ "and v17.16b, v22.16b, v7.16b\n"
+ "uzp1 v24.16b, v24.16b, v24.16b\n"
+ "sqrdmulh v21.4s, v21.4s, v8.4s\n"
+ "uzp1 v24.16b, v24.16b, v24.16b\n"
+ "str s24, [x21, x9]\n"
+ "add v23.4s, v23.4s, v10.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "and v16.16b, v21.16b, v7.16b\n"
+ "sshl v20.4s, v20.4s, v9.4s\n"
+ "smin v23.4s, v23.4s, v13.4s\n"
+ "sqadd v22.4s, v22.4s, v17.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "smax v23.4s, v23.4s, v14.4s\n"
+ "sqrdmulh v20.4s, v20.4s, v8.4s\n"
+ "srshl v22.4s, v22.4s, v7.4s\n"
+ "uzp1 v23.16b, v23.16b, v23.16b\n"
+ "sqadd v21.4s, v21.4s, v16.4s\n"
+ "uzp1 v23.16b, v23.16b, v23.16b\n"
+ "str s23, [x22, x9]\n"
+ "add v22.4s, v22.4s, v10.4s\n"
+ "and v16.16b, v20.16b, v7.16b\n"
+ "srshl v21.4s, v21.4s, v7.4s\n"
+ "sshl v19.4s, v19.4s, v9.4s\n"
+ "smin v22.4s, v22.4s, v13.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "add v21.4s, v21.4s, v10.4s\n"
+ "smax v22.4s, v22.4s, v14.4s\n"
+ "sqadd v20.4s, v20.4s, v16.4s\n"
+ "smin v21.4s, v21.4s, v13.4s\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "sqrdmulh v19.4s, v19.4s, v8.4s\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "str s22, [x23, x9]\n"
+ "smax v21.4s, v21.4s, v14.4s\n"
+ "srshl v20.4s, v20.4s, v7.4s\n"
+ "and v16.16b, v19.16b, v7.16b\n"
+ "uzp1 v21.16b, v21.16b, v21.16b\n"
+ "add v20.4s, v20.4s, v10.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "uzp1 v21.16b, v21.16b, v21.16b\n"
+ "str s21, [x24, x9]\n"
+ "smin v20.4s, v20.4s, v13.4s\n"
+ "sqadd v19.4s, v19.4s, v16.4s\n"
+ "smax v20.4s, v20.4s, v14.4s\n"
+ "srshl v19.4s, v19.4s, v7.4s\n"
+ "uzp1 v20.16b, v20.16b, v20.16b\n"
+ "uzp1 v20.16b, v20.16b, v20.16b\n"
+ "str s20, [x25, x9]\n"
+ "add v19.4s, v19.4s, v10.4s\n"
+ "smin v19.4s, v19.4s, v13.4s\n"
+ "smax v19.4s, v19.4s, v14.4s\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ "str s19, [x26, x9]\n"
"8:" // Output channel loop: Done
"add x9, x9, #0x4\n"
- "cmp x9, x10, LSL #2\n"
+ "cmp x9, x28, LSL #2\n"
"blt 1b\n"
"tst %x[n_output_channels], #0x3\n"
"beq 26f\n"
"9:" // Output channel oddments
- "movi v31.4s, #0x0\n"
+ "movi v16.4s, #0x0\n"
"cbz %x[bias], 12f\n"
- "add x20, %x[bias], x9, LSL #2\n"
+ "add x19, %x[bias], x9, LSL #2\n"
"tbz %x[n_output_channels], #1, 10f\n"
- "ld1 { v31.d }[0], [x20], #0x8\n"
+ "ld1 { v16.d }[0], [x19], #0x8\n"
"tbz %x[n_output_channels], #0, 11f\n"
- "ld1 { v31.s }[2], [x20]\n"
+ "ld1 { v16.s }[2], [x19]\n"
"b 11f\n"
"10:" // Output channel oddments: Load bias: Bit 1: Unset
- "ld1 { v31.s }[0], [x20]\n"
+ "tbz %x[n_output_channels], #0, 11f\n"
+ "ld1 { v16.s }[0], [x19]\n"
"11:" // Output channel oddments: Load bias: Bit 1: End
+
"12:" // Output channel oddments: Load bias: Done
- "mov v16.16b, v31.16b\n"
- "mov v17.16b, v31.16b\n"
- "mov v18.16b, v31.16b\n"
- "mov v19.16b, v31.16b\n"
- "mov v20.16b, v31.16b\n"
- "mov v21.16b, v31.16b\n"
- "mov v22.16b, v31.16b\n"
- "mov v23.16b, v31.16b\n"
- "mov v24.16b, v31.16b\n"
- "mov v25.16b, v31.16b\n"
- "mov v26.16b, v31.16b\n"
- "mov v27.16b, v31.16b\n"
- "mov v28.16b, v31.16b\n"
- "mov v29.16b, v31.16b\n"
- "mov v30.16b, v31.16b\n"
- "mov v31.16b, v31.16b\n"
+ "mov v6.16b, v16.16b\n"
+ "mov v5.16b, v16.16b\n"
+ "mov v4.16b, v16.16b\n"
+ "mov v31.16b, v16.16b\n"
+ "mov v30.16b, v16.16b\n"
+ "mov v29.16b, v16.16b\n"
+ "mov v28.16b, v16.16b\n"
+ "mov v27.16b, v16.16b\n"
+ "mov v26.16b, v16.16b\n"
+ "mov v25.16b, v16.16b\n"
+ "mov v24.16b, v16.16b\n"
+ "mov v23.16b, v16.16b\n"
+ "mov v22.16b, v16.16b\n"
+ "mov v21.16b, v16.16b\n"
+ "mov v20.16b, v16.16b\n"
+ "mov v19.16b, v16.16b\n"
"cbz %x[rq_mul_ptr], 18f\n"
- "add x22, %x[rq_mul_ptr], x9, LSL #2\n"
- "add x21, %x[rq_right_shift_ptr], x9, LSL #2\n"
- "add x20, %x[rq_left_shift_ptr], x9, LSL #2\n"
+ "add x21, %x[rq_mul_ptr], x9, LSL #2\n"
+ "add x20, %x[rq_right_shift_ptr], x9, LSL #2\n"
+ "add x19, %x[rq_left_shift_ptr], x9, LSL #2\n"
"cbz %x[rq_left_shift_ptr], 15f\n"
"tbz %x[n_output_channels], #1, 13f\n"
- "ld1 { v9.d }[0], [x22], #0x8\n"
- "ld1 { v10.d }[0], [x21], #0x8\n"
- "ld1 { v15.d }[0], [x20], #0x8\n"
+ "ld1 { v8.d }[0], [x21], #0x8\n"
+ "ld1 { v7.d }[0], [x20], #0x8\n"
+ "ld1 { v9.d }[0], [x19], #0x8\n"
"tbz %x[n_output_channels], #0, 14f\n"
- "ld1 { v9.s }[2], [x22], #0x4\n"
- "ld1 { v10.s }[2], [x21], #0x4\n"
- "ld1 { v15.s }[2], [x20], #0x4\n"
+ "ld1 { v8.s }[2], [x21], #0x4\n"
+ "ld1 { v7.s }[2], [x20], #0x4\n"
+ "ld1 { v9.s }[2], [x19], #0x4\n"
"b 14f\n"
"13:" // Output channel oddments: Load quantization parameters: With left shift: Bit 1: Unset
- "ld1 { v9.s }[0], [x22], #0x4\n"
- "ld1 { v10.s }[0], [x21], #0x4\n"
- "ld1 { v15.s }[0], [x20], #0x4\n"
+ "tbz %x[n_output_channels], #0, 14f\n"
+ "ld1 { v8.s }[0], [x21], #0x4\n"
+ "ld1 { v7.s }[0], [x20], #0x4\n"
+ "ld1 { v9.s }[0], [x19], #0x4\n"
"14:" // Output channel oddments: Load quantization parameters: With left shift: Bit 1: End
"b 18f\n"
"15:" // Output channel oddments: Load quantization parameters: No left shift
"tbz %x[n_output_channels], #1, 16f\n"
- "ld1 { v9.d }[0], [x22], #0x8\n"
- "ld1 { v10.d }[0], [x21], #0x8\n"
+ "ld1 { v8.d }[0], [x21], #0x8\n"
+ "ld1 { v7.d }[0], [x20], #0x8\n"
"tbz %x[n_output_channels], #0, 17f\n"
- "ld1 { v9.s }[2], [x22], #0x4\n"
- "ld1 { v10.s }[2], [x21], #0x4\n"
+ "ld1 { v8.s }[2], [x21], #0x4\n"
+ "ld1 { v7.s }[2], [x20], #0x4\n"
"b 17f\n"
"16:" // Output channel oddments: Load quantization parameters: No left shift: Bit 1: Unset
- "ld1 { v9.s }[0], [x22], #0x4\n"
- "ld1 { v10.s }[0], [x21], #0x4\n"
+ "tbz %x[n_output_channels], #0, 17f\n"
+ "ld1 { v8.s }[0], [x21], #0x4\n"
+ "ld1 { v7.s }[0], [x20], #0x4\n"
"17:" // Output channel oddments: Load quantization parameters: No left shift: Bit 1: End
+
"18:" // Output channel oddments: Load quantization parameters: Done
- "ldr s8, [%x[weights]], #0x4\n"
- "mov x20, %x[inptrs]\n"
- "ldp x25, x28, [x20], #0x10\n"
- "lsr x21, %x[kernel_points], #0x1\n"
- "ldr d2, [x25, #0x0]\n"
- "ldr d7, [x28, #0x0]\n"
- "usubl v2.8h, v2.8b, v3.8b\n"
- "usubl v7.8h, v7.8b, v3.8b\n"
- "usubl v8.8h, v8.8b, v12.8b\n"
- "cbz x21, 22f\n"
- "ldr s6, [%x[weights]], #0x4\n"
- "ldp x25, x28, [x20], #0x10\n"
- "subs x21, x21, #0x1\n"
- "usubl v6.8h, v6.8b, v12.8b\n"
+ "ldr s17, [%x[weights]], #0x4\n"
+ "usubl v17.8h, v17.8b, v11.8b\n"
+ "mov x19, %x[inptrs]\n"
+ "ldp x25, x27, [x19], #0x10\n"
+ "lsr x20, %x[kernel_points], #0x1\n"
+ "ldr d3, [x25, #0x0]\n"
+ "usubl v3.8h, v3.8b, v12.8b\n"
+ "ldr d2, [x27, #0x0]\n"
+ "usubl v2.8h, v2.8b, v12.8b\n"
+ "cbz x20, 22f\n"
+ "ldp x25, x27, [x19], #0x10\n"
+ "ldr s16, [%x[weights]], #0x4\n"
+ "usubl v16.8h, v16.8b, v11.8b\n"
"ldr d1, [x25, #0x0]\n"
- "ldr d0, [x28, #0x0]\n"
- "usubl v1.8h, v1.8b, v3.8b\n"
- "usubl v0.8h, v0.8b, v3.8b\n"
+ "subs x20, x20, #0x1\n"
+ "usubl v1.8h, v1.8b, v12.8b\n"
+ "ldr d0, [x27, #0x0]\n"
+ "usubl v0.8h, v0.8b, v12.8b\n"
"beq 20f\n"
"19:" // Output channel oddments: Kernel loop
- "ldp x25, x28, [x20], #0x10\n"
- "smlal v16.4s, v8.4h, v2.h[0]\n"
- "smlal v17.4s, v8.4h, v2.h[1]\n"
- "subs x21, x21, #0x1\n"
- "smlal v18.4s, v8.4h, v2.h[2]\n"
- "smlal v19.4s, v8.4h, v2.h[3]\n"
- "smlal v20.4s, v8.4h, v2.h[4]\n"
- "smlal v21.4s, v8.4h, v2.h[5]\n"
- "smlal v22.4s, v8.4h, v2.h[6]\n"
- "smlal v23.4s, v8.4h, v2.h[7]\n"
- "ldr d2, [x25, #0x0]\n"
- "usubl v2.8h, v2.8b, v3.8b\n"
- "smlal v24.4s, v8.4h, v7.h[0]\n"
- "smlal v25.4s, v8.4h, v7.h[1]\n"
- "smlal v26.4s, v8.4h, v7.h[2]\n"
- "smlal v27.4s, v8.4h, v7.h[3]\n"
- "smlal v28.4s, v8.4h, v7.h[4]\n"
- "smlal v29.4s, v8.4h, v7.h[5]\n"
- "smlal v30.4s, v8.4h, v7.h[6]\n"
- "smlal v31.4s, v8.4h, v7.h[7]\n"
- "ldr d7, [x28, #0x0]\n"
- "ldr s8, [%x[weights]], #0x4\n"
- "ldp x25, x28, [x20], #0x10\n"
- "smlal v16.4s, v6.4h, v1.h[0]\n"
- "smlal v17.4s, v6.4h, v1.h[1]\n"
- "usubl v7.8h, v7.8b, v3.8b\n"
- "smlal v18.4s, v6.4h, v1.h[2]\n"
- "smlal v19.4s, v6.4h, v1.h[3]\n"
- "usubl v8.8h, v8.8b, v12.8b\n"
- "smlal v20.4s, v6.4h, v1.h[4]\n"
- "smlal v21.4s, v6.4h, v1.h[5]\n"
- "smlal v22.4s, v6.4h, v1.h[6]\n"
- "smlal v23.4s, v6.4h, v1.h[7]\n"
+ "smlal v6.4s, v17.4h, v3.h[0]\n"
+ "ldp x25, x27, [x19], #0x10\n"
+ "subs x20, x20, #0x1\n"
+ "smlal v5.4s, v17.4h, v3.h[1]\n"
+ "smlal v4.4s, v17.4h, v3.h[2]\n"
+ "smlal v31.4s, v17.4h, v3.h[3]\n"
+ "smlal v30.4s, v17.4h, v3.h[4]\n"
+ "smlal v29.4s, v17.4h, v3.h[5]\n"
+ "smlal v28.4s, v17.4h, v3.h[6]\n"
+ "smlal v27.4s, v17.4h, v3.h[7]\n"
+ "ldr d3, [x25, #0x0]\n"
+ "smlal v26.4s, v17.4h, v2.h[0]\n"
+ "smlal v25.4s, v17.4h, v2.h[1]\n"
+ "smlal v24.4s, v17.4h, v2.h[2]\n"
+ "smlal v23.4s, v17.4h, v2.h[3]\n"
+ "smlal v22.4s, v17.4h, v2.h[4]\n"
+ "smlal v21.4s, v17.4h, v2.h[5]\n"
+ "smlal v20.4s, v17.4h, v2.h[6]\n"
+ "smlal v19.4s, v17.4h, v2.h[7]\n"
+ "ldr d2, [x27, #0x0]\n"
+ "usubl v3.8h, v3.8b, v12.8b\n"
+ "ldr s17, [%x[weights]], #0x4\n"
+ "smlal v6.4s, v16.4h, v1.h[0]\n"
+ "ldp x25, x27, [x19], #0x10\n"
+ "smlal v5.4s, v16.4h, v1.h[1]\n"
+ "smlal v4.4s, v16.4h, v1.h[2]\n"
+ "usubl v2.8h, v2.8b, v12.8b\n"
+ "usubl v17.8h, v17.8b, v11.8b\n"
+ "smlal v31.4s, v16.4h, v1.h[3]\n"
+ "smlal v30.4s, v16.4h, v1.h[4]\n"
+ "smlal v29.4s, v16.4h, v1.h[5]\n"
+ "smlal v28.4s, v16.4h, v1.h[6]\n"
+ "smlal v27.4s, v16.4h, v1.h[7]\n"
"ldr d1, [x25, #0x0]\n"
- "usubl v1.8h, v1.8b, v3.8b\n"
- "smlal v24.4s, v6.4h, v0.h[0]\n"
- "smlal v25.4s, v6.4h, v0.h[1]\n"
- "smlal v26.4s, v6.4h, v0.h[2]\n"
- "smlal v27.4s, v6.4h, v0.h[3]\n"
- "smlal v28.4s, v6.4h, v0.h[4]\n"
- "smlal v29.4s, v6.4h, v0.h[5]\n"
- "smlal v30.4s, v6.4h, v0.h[6]\n"
- "smlal v31.4s, v6.4h, v0.h[7]\n"
- "ldr d0, [x28, #0x0]\n"
- "ldr s6, [%x[weights]], #0x4\n"
- "usubl v0.8h, v0.8b, v3.8b\n"
- "usubl v6.8h, v6.8b, v12.8b\n"
+ "smlal v26.4s, v16.4h, v0.h[0]\n"
+ "smlal v25.4s, v16.4h, v0.h[1]\n"
+ "smlal v24.4s, v16.4h, v0.h[2]\n"
+ "smlal v23.4s, v16.4h, v0.h[3]\n"
+ "smlal v22.4s, v16.4h, v0.h[4]\n"
+ "smlal v21.4s, v16.4h, v0.h[5]\n"
+ "smlal v20.4s, v16.4h, v0.h[6]\n"
+ "smlal v19.4s, v16.4h, v0.h[7]\n"
+ "ldr d0, [x27, #0x0]\n"
+ "usubl v1.8h, v1.8b, v12.8b\n"
+ "ldr s16, [%x[weights]], #0x4\n"
+ "usubl v0.8h, v0.8b, v12.8b\n"
+ "usubl v16.8h, v16.8b, v11.8b\n"
"bgt 19b\n"
"20:" // Output channel oddments: Kernel loop tail
"tbnz %x[kernel_points], #0, 21f\n"
- "smlal v16.4s, v8.4h, v2.h[0]\n"
- "smlal v17.4s, v8.4h, v2.h[1]\n"
- "smlal v18.4s, v8.4h, v2.h[2]\n"
- "smlal v19.4s, v8.4h, v2.h[3]\n"
- "smlal v20.4s, v8.4h, v2.h[4]\n"
- "smlal v21.4s, v8.4h, v2.h[5]\n"
- "smlal v22.4s, v8.4h, v2.h[6]\n"
- "smlal v23.4s, v8.4h, v2.h[7]\n"
- "smlal v24.4s, v8.4h, v7.h[0]\n"
- "smlal v25.4s, v8.4h, v7.h[1]\n"
- "smlal v26.4s, v8.4h, v7.h[2]\n"
- "smlal v27.4s, v8.4h, v7.h[3]\n"
- "smlal v28.4s, v8.4h, v7.h[4]\n"
- "smlal v29.4s, v8.4h, v7.h[5]\n"
- "smlal v30.4s, v8.4h, v7.h[6]\n"
- "smlal v31.4s, v8.4h, v7.h[7]\n"
- "smlal v16.4s, v6.4h, v1.h[0]\n"
- "smlal v17.4s, v6.4h, v1.h[1]\n"
- "smlal v18.4s, v6.4h, v1.h[2]\n"
- "smlal v19.4s, v6.4h, v1.h[3]\n"
- "smlal v20.4s, v6.4h, v1.h[4]\n"
- "smlal v21.4s, v6.4h, v1.h[5]\n"
- "smlal v22.4s, v6.4h, v1.h[6]\n"
- "smlal v23.4s, v6.4h, v1.h[7]\n"
- "smlal v24.4s, v6.4h, v0.h[0]\n"
- "smlal v25.4s, v6.4h, v0.h[1]\n"
- "smlal v26.4s, v6.4h, v0.h[2]\n"
- "smlal v27.4s, v6.4h, v0.h[3]\n"
- "smlal v28.4s, v6.4h, v0.h[4]\n"
- "smlal v29.4s, v6.4h, v0.h[5]\n"
- "smlal v30.4s, v6.4h, v0.h[6]\n"
- "smlal v31.4s, v6.4h, v0.h[7]\n"
+ "smlal v6.4s, v17.4h, v3.h[0]\n"
+ "smlal v5.4s, v17.4h, v3.h[1]\n"
+ "smlal v4.4s, v17.4h, v3.h[2]\n"
+ "smlal v31.4s, v17.4h, v3.h[3]\n"
+ "smlal v30.4s, v17.4h, v3.h[4]\n"
+ "smlal v29.4s, v17.4h, v3.h[5]\n"
+ "smlal v28.4s, v17.4h, v3.h[6]\n"
+ "smlal v27.4s, v17.4h, v3.h[7]\n"
+ "smlal v26.4s, v17.4h, v2.h[0]\n"
+ "smlal v25.4s, v17.4h, v2.h[1]\n"
+ "smlal v24.4s, v17.4h, v2.h[2]\n"
+ "smlal v23.4s, v17.4h, v2.h[3]\n"
+ "smlal v22.4s, v17.4h, v2.h[4]\n"
+ "smlal v21.4s, v17.4h, v2.h[5]\n"
+ "smlal v20.4s, v17.4h, v2.h[6]\n"
+ "smlal v19.4s, v17.4h, v2.h[7]\n"
+ "smlal v6.4s, v16.4h, v1.h[0]\n"
+ "smlal v5.4s, v16.4h, v1.h[1]\n"
+ "smlal v4.4s, v16.4h, v1.h[2]\n"
+ "smlal v31.4s, v16.4h, v1.h[3]\n"
+ "smlal v30.4s, v16.4h, v1.h[4]\n"
+ "smlal v29.4s, v16.4h, v1.h[5]\n"
+ "smlal v28.4s, v16.4h, v1.h[6]\n"
+ "smlal v27.4s, v16.4h, v1.h[7]\n"
+ "smlal v26.4s, v16.4h, v0.h[0]\n"
+ "smlal v25.4s, v16.4h, v0.h[1]\n"
+ "smlal v24.4s, v16.4h, v0.h[2]\n"
+ "smlal v23.4s, v16.4h, v0.h[3]\n"
+ "smlal v22.4s, v16.4h, v0.h[4]\n"
+ "smlal v21.4s, v16.4h, v0.h[5]\n"
+ "smlal v20.4s, v16.4h, v0.h[6]\n"
+ "smlal v19.4s, v16.4h, v0.h[7]\n"
"b 23f\n"
"21:" // Output channel oddments: Odd tail
- "ldp x25, x28, [x20], #0x10\n"
- "smlal v16.4s, v8.4h, v2.h[0]\n"
- "smlal v17.4s, v8.4h, v2.h[1]\n"
- "smlal v18.4s, v8.4h, v2.h[2]\n"
- "smlal v19.4s, v8.4h, v2.h[3]\n"
- "smlal v20.4s, v8.4h, v2.h[4]\n"
- "smlal v21.4s, v8.4h, v2.h[5]\n"
- "smlal v22.4s, v8.4h, v2.h[6]\n"
- "smlal v23.4s, v8.4h, v2.h[7]\n"
- "ldr d2, [x25, #0x0]\n"
- "usubl v2.8h, v2.8b, v3.8b\n"
- "smlal v24.4s, v8.4h, v7.h[0]\n"
- "smlal v25.4s, v8.4h, v7.h[1]\n"
- "smlal v26.4s, v8.4h, v7.h[2]\n"
- "smlal v27.4s, v8.4h, v7.h[3]\n"
- "smlal v28.4s, v8.4h, v7.h[4]\n"
- "smlal v29.4s, v8.4h, v7.h[5]\n"
- "smlal v30.4s, v8.4h, v7.h[6]\n"
- "smlal v31.4s, v8.4h, v7.h[7]\n"
- "ldr d7, [x28, #0x0]\n"
- "ldr s8, [%x[weights]], #0x4\n"
- "smlal v16.4s, v6.4h, v1.h[0]\n"
- "smlal v17.4s, v6.4h, v1.h[1]\n"
- "usubl v7.8h, v7.8b, v3.8b\n"
- "smlal v18.4s, v6.4h, v1.h[2]\n"
- "smlal v19.4s, v6.4h, v1.h[3]\n"
- "usubl v8.8h, v8.8b, v12.8b\n"
- "smlal v20.4s, v6.4h, v1.h[4]\n"
- "smlal v21.4s, v6.4h, v1.h[5]\n"
- "smlal v22.4s, v6.4h, v1.h[6]\n"
- "smlal v23.4s, v6.4h, v1.h[7]\n"
- "smlal v24.4s, v6.4h, v0.h[0]\n"
- "smlal v25.4s, v6.4h, v0.h[1]\n"
- "smlal v26.4s, v6.4h, v0.h[2]\n"
- "smlal v27.4s, v6.4h, v0.h[3]\n"
- "smlal v28.4s, v6.4h, v0.h[4]\n"
- "smlal v29.4s, v6.4h, v0.h[5]\n"
- "smlal v30.4s, v6.4h, v0.h[6]\n"
- "smlal v31.4s, v6.4h, v0.h[7]\n"
- "smlal v16.4s, v8.4h, v2.h[0]\n"
- "smlal v17.4s, v8.4h, v2.h[1]\n"
- "smlal v18.4s, v8.4h, v2.h[2]\n"
- "smlal v19.4s, v8.4h, v2.h[3]\n"
- "smlal v20.4s, v8.4h, v2.h[4]\n"
- "smlal v21.4s, v8.4h, v2.h[5]\n"
- "smlal v22.4s, v8.4h, v2.h[6]\n"
- "smlal v23.4s, v8.4h, v2.h[7]\n"
- "smlal v24.4s, v8.4h, v7.h[0]\n"
- "smlal v25.4s, v8.4h, v7.h[1]\n"
- "smlal v26.4s, v8.4h, v7.h[2]\n"
- "smlal v27.4s, v8.4h, v7.h[3]\n"
- "smlal v28.4s, v8.4h, v7.h[4]\n"
- "smlal v29.4s, v8.4h, v7.h[5]\n"
- "smlal v30.4s, v8.4h, v7.h[6]\n"
- "smlal v31.4s, v8.4h, v7.h[7]\n"
+ "smlal v6.4s, v17.4h, v3.h[0]\n"
+ "ldp x25, x27, [x19], #0x10\n"
+ "smlal v5.4s, v17.4h, v3.h[1]\n"
+ "smlal v4.4s, v17.4h, v3.h[2]\n"
+ "smlal v31.4s, v17.4h, v3.h[3]\n"
+ "smlal v30.4s, v17.4h, v3.h[4]\n"
+ "smlal v29.4s, v17.4h, v3.h[5]\n"
+ "smlal v28.4s, v17.4h, v3.h[6]\n"
+ "smlal v27.4s, v17.4h, v3.h[7]\n"
+ "ldr d3, [x25, #0x0]\n"
+ "smlal v26.4s, v17.4h, v2.h[0]\n"
+ "smlal v25.4s, v17.4h, v2.h[1]\n"
+ "smlal v24.4s, v17.4h, v2.h[2]\n"
+ "smlal v23.4s, v17.4h, v2.h[3]\n"
+ "smlal v22.4s, v17.4h, v2.h[4]\n"
+ "smlal v21.4s, v17.4h, v2.h[5]\n"
+ "smlal v20.4s, v17.4h, v2.h[6]\n"
+ "smlal v19.4s, v17.4h, v2.h[7]\n"
+ "ldr d2, [x27, #0x0]\n"
+ "usubl v3.8h, v3.8b, v12.8b\n"
+ "ldr s17, [%x[weights]], #0x4\n"
+ "smlal v6.4s, v16.4h, v1.h[0]\n"
+ "smlal v5.4s, v16.4h, v1.h[1]\n"
+ "smlal v4.4s, v16.4h, v1.h[2]\n"
+ "usubl v2.8h, v2.8b, v12.8b\n"
+ "usubl v17.8h, v17.8b, v11.8b\n"
+ "smlal v31.4s, v16.4h, v1.h[3]\n"
+ "smlal v30.4s, v16.4h, v1.h[4]\n"
+ "smlal v29.4s, v16.4h, v1.h[5]\n"
+ "smlal v28.4s, v16.4h, v1.h[6]\n"
+ "smlal v27.4s, v16.4h, v1.h[7]\n"
+ "smlal v26.4s, v16.4h, v0.h[0]\n"
+ "smlal v25.4s, v16.4h, v0.h[1]\n"
+ "smlal v24.4s, v16.4h, v0.h[2]\n"
+ "smlal v23.4s, v16.4h, v0.h[3]\n"
+ "smlal v22.4s, v16.4h, v0.h[4]\n"
+ "smlal v21.4s, v16.4h, v0.h[5]\n"
+ "smlal v20.4s, v16.4h, v0.h[6]\n"
+ "smlal v19.4s, v16.4h, v0.h[7]\n"
+ "smlal v6.4s, v17.4h, v3.h[0]\n"
+ "smlal v5.4s, v17.4h, v3.h[1]\n"
+ "smlal v4.4s, v17.4h, v3.h[2]\n"
+ "smlal v31.4s, v17.4h, v3.h[3]\n"
+ "smlal v30.4s, v17.4h, v3.h[4]\n"
+ "smlal v29.4s, v17.4h, v3.h[5]\n"
+ "smlal v28.4s, v17.4h, v3.h[6]\n"
+ "smlal v27.4s, v17.4h, v3.h[7]\n"
+ "smlal v26.4s, v17.4h, v2.h[0]\n"
+ "smlal v25.4s, v17.4h, v2.h[1]\n"
+ "smlal v24.4s, v17.4h, v2.h[2]\n"
+ "smlal v23.4s, v17.4h, v2.h[3]\n"
+ "smlal v22.4s, v17.4h, v2.h[4]\n"
+ "smlal v21.4s, v17.4h, v2.h[5]\n"
+ "smlal v20.4s, v17.4h, v2.h[6]\n"
+ "smlal v19.4s, v17.4h, v2.h[7]\n"
"b 23f\n"
"22:" // Output channel oddments: Single kernel point
- "smlal v16.4s, v8.4h, v2.h[0]\n"
- "smlal v17.4s, v8.4h, v2.h[1]\n"
- "smlal v18.4s, v8.4h, v2.h[2]\n"
- "smlal v19.4s, v8.4h, v2.h[3]\n"
- "smlal v20.4s, v8.4h, v2.h[4]\n"
- "smlal v21.4s, v8.4h, v2.h[5]\n"
- "smlal v22.4s, v8.4h, v2.h[6]\n"
- "smlal v23.4s, v8.4h, v2.h[7]\n"
- "smlal v24.4s, v8.4h, v7.h[0]\n"
- "smlal v25.4s, v8.4h, v7.h[1]\n"
- "smlal v26.4s, v8.4h, v7.h[2]\n"
- "smlal v27.4s, v8.4h, v7.h[3]\n"
- "smlal v28.4s, v8.4h, v7.h[4]\n"
- "smlal v29.4s, v8.4h, v7.h[5]\n"
- "smlal v30.4s, v8.4h, v7.h[6]\n"
- "smlal v31.4s, v8.4h, v7.h[7]\n"
+ "smlal v6.4s, v17.4h, v3.h[0]\n"
+ "smlal v5.4s, v17.4h, v3.h[1]\n"
+ "smlal v4.4s, v17.4h, v3.h[2]\n"
+ "smlal v31.4s, v17.4h, v3.h[3]\n"
+ "smlal v30.4s, v17.4h, v3.h[4]\n"
+ "smlal v29.4s, v17.4h, v3.h[5]\n"
+ "smlal v28.4s, v17.4h, v3.h[6]\n"
+ "smlal v27.4s, v17.4h, v3.h[7]\n"
+ "smlal v26.4s, v17.4h, v2.h[0]\n"
+ "smlal v25.4s, v17.4h, v2.h[1]\n"
+ "smlal v24.4s, v17.4h, v2.h[2]\n"
+ "smlal v23.4s, v17.4h, v2.h[3]\n"
+ "smlal v22.4s, v17.4h, v2.h[4]\n"
+ "smlal v21.4s, v17.4h, v2.h[5]\n"
+ "smlal v20.4s, v17.4h, v2.h[6]\n"
+ "smlal v19.4s, v17.4h, v2.h[7]\n"
"23:" // Output channel oddments: Done
- "sshl v16.4s, v16.4s, v15.4s\n"
- "sshl v17.4s, v17.4s, v15.4s\n"
- "sshl v18.4s, v18.4s, v15.4s\n"
- "sshl v19.4s, v19.4s, v15.4s\n"
- "sqrdmulh v16.4s, v16.4s, v9.4s\n"
- "sqrdmulh v17.4s, v17.4s, v9.4s\n"
- "sqrdmulh v18.4s, v18.4s, v9.4s\n"
- "sqrdmulh v19.4s, v19.4s, v9.4s\n"
- "and v5.16b, v16.16b, v10.16b\n"
- "and v4.16b, v17.16b, v10.16b\n"
- "and v2.16b, v18.16b, v10.16b\n"
- "and v1.16b, v19.16b, v10.16b\n"
- "sshl v20.4s, v20.4s, v15.4s\n"
- "sshl v21.4s, v21.4s, v15.4s\n"
- "sshl v22.4s, v22.4s, v15.4s\n"
- "sshl v23.4s, v23.4s, v15.4s\n"
- "sshl v24.4s, v24.4s, v15.4s\n"
- "sshl v25.4s, v25.4s, v15.4s\n"
- "sshr v5.4s, v5.4s, #0x1f\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
- "sshr v2.4s, v2.4s, #0x1f\n"
- "sshr v1.4s, v1.4s, #0x1f\n"
- "sqrdmulh v20.4s, v20.4s, v9.4s\n"
- "sqrdmulh v21.4s, v21.4s, v9.4s\n"
- "sqrdmulh v22.4s, v22.4s, v9.4s\n"
- "sqrdmulh v23.4s, v23.4s, v9.4s\n"
- "sqrdmulh v24.4s, v24.4s, v9.4s\n"
- "sqrdmulh v25.4s, v25.4s, v9.4s\n"
- "sqadd v16.4s, v16.4s, v5.4s\n"
- "sqadd v17.4s, v17.4s, v4.4s\n"
- "sqadd v18.4s, v18.4s, v2.4s\n"
- "sqadd v19.4s, v19.4s, v1.4s\n"
- "and v8.16b, v20.16b, v10.16b\n"
- "and v0.16b, v21.16b, v10.16b\n"
- "and v5.16b, v22.16b, v10.16b\n"
- "and v4.16b, v23.16b, v10.16b\n"
- "and v2.16b, v24.16b, v10.16b\n"
- "and v1.16b, v25.16b, v10.16b\n"
- "sshl v26.4s, v26.4s, v15.4s\n"
- "sshl v27.4s, v27.4s, v15.4s\n"
- "sshl v28.4s, v28.4s, v15.4s\n"
- "sshl v29.4s, v29.4s, v15.4s\n"
- "sshl v30.4s, v30.4s, v15.4s\n"
- "sshl v31.4s, v31.4s, v15.4s\n"
- "sshr v8.4s, v8.4s, #0x1f\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "sshr v5.4s, v5.4s, #0x1f\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
- "sshr v2.4s, v2.4s, #0x1f\n"
- "sshr v1.4s, v1.4s, #0x1f\n"
- "sqrdmulh v26.4s, v26.4s, v9.4s\n"
- "sqrdmulh v27.4s, v27.4s, v9.4s\n"
- "sqrdmulh v28.4s, v28.4s, v9.4s\n"
- "sqrdmulh v29.4s, v29.4s, v9.4s\n"
- "sqrdmulh v30.4s, v30.4s, v9.4s\n"
- "sqrdmulh v31.4s, v31.4s, v9.4s\n"
- "sqadd v20.4s, v20.4s, v8.4s\n"
- "sqadd v21.4s, v21.4s, v0.4s\n"
- "sqadd v22.4s, v22.4s, v5.4s\n"
- "sqadd v23.4s, v23.4s, v4.4s\n"
- "sqadd v24.4s, v24.4s, v2.4s\n"
- "sqadd v25.4s, v25.4s, v1.4s\n"
- "and v8.16b, v26.16b, v10.16b\n"
- "and v0.16b, v27.16b, v10.16b\n"
- "and v5.16b, v28.16b, v10.16b\n"
- "and v4.16b, v29.16b, v10.16b\n"
- "and v2.16b, v30.16b, v10.16b\n"
- "and v1.16b, v31.16b, v10.16b\n"
- "sshr v8.4s, v8.4s, #0x1f\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "sshr v5.4s, v5.4s, #0x1f\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
- "sshr v2.4s, v2.4s, #0x1f\n"
- "sshr v1.4s, v1.4s, #0x1f\n"
- "sqadd v26.4s, v26.4s, v8.4s\n"
- "sqadd v27.4s, v27.4s, v0.4s\n"
- "sqadd v28.4s, v28.4s, v5.4s\n"
- "sqadd v29.4s, v29.4s, v4.4s\n"
- "sqadd v30.4s, v30.4s, v2.4s\n"
- "sqadd v31.4s, v31.4s, v1.4s\n"
- "srshl v16.4s, v16.4s, v10.4s\n"
- "srshl v17.4s, v17.4s, v10.4s\n"
- "srshl v18.4s, v18.4s, v10.4s\n"
- "srshl v19.4s, v19.4s, v10.4s\n"
- "srshl v20.4s, v20.4s, v10.4s\n"
- "srshl v21.4s, v21.4s, v10.4s\n"
- "srshl v22.4s, v22.4s, v10.4s\n"
- "srshl v23.4s, v23.4s, v10.4s\n"
- "srshl v24.4s, v24.4s, v10.4s\n"
- "srshl v25.4s, v25.4s, v10.4s\n"
- "srshl v26.4s, v26.4s, v10.4s\n"
- "srshl v27.4s, v27.4s, v10.4s\n"
- "srshl v28.4s, v28.4s, v10.4s\n"
- "srshl v29.4s, v29.4s, v10.4s\n"
- "srshl v30.4s, v30.4s, v10.4s\n"
- "srshl v31.4s, v31.4s, v10.4s\n"
- "add v16.4s, v16.4s, v14.4s\n"
- "add v17.4s, v17.4s, v14.4s\n"
- "add v18.4s, v18.4s, v14.4s\n"
- "add v19.4s, v19.4s, v14.4s\n"
- "add v20.4s, v20.4s, v14.4s\n"
- "add v21.4s, v21.4s, v14.4s\n"
- "add v22.4s, v22.4s, v14.4s\n"
- "add v23.4s, v23.4s, v14.4s\n"
- "add v24.4s, v24.4s, v14.4s\n"
- "add v25.4s, v25.4s, v14.4s\n"
- "add v26.4s, v26.4s, v14.4s\n"
- "add v27.4s, v27.4s, v14.4s\n"
- "add v28.4s, v28.4s, v14.4s\n"
- "add v29.4s, v29.4s, v14.4s\n"
- "add v30.4s, v30.4s, v14.4s\n"
- "add v31.4s, v31.4s, v14.4s\n"
- "smin v16.4s, v16.4s, v11.4s\n"
- "smin v17.4s, v17.4s, v11.4s\n"
- "smin v18.4s, v18.4s, v11.4s\n"
- "smin v19.4s, v19.4s, v11.4s\n"
- "smin v20.4s, v20.4s, v11.4s\n"
- "smin v21.4s, v21.4s, v11.4s\n"
- "smin v22.4s, v22.4s, v11.4s\n"
- "smin v23.4s, v23.4s, v11.4s\n"
- "smin v24.4s, v24.4s, v11.4s\n"
- "smin v25.4s, v25.4s, v11.4s\n"
- "smin v26.4s, v26.4s, v11.4s\n"
- "smin v27.4s, v27.4s, v11.4s\n"
- "smin v28.4s, v28.4s, v11.4s\n"
- "smin v29.4s, v29.4s, v11.4s\n"
- "smin v30.4s, v30.4s, v11.4s\n"
- "smin v31.4s, v31.4s, v11.4s\n"
- "smax v16.4s, v16.4s, v13.4s\n"
- "smax v17.4s, v17.4s, v13.4s\n"
- "smax v18.4s, v18.4s, v13.4s\n"
- "smax v19.4s, v19.4s, v13.4s\n"
- "smax v20.4s, v20.4s, v13.4s\n"
- "smax v21.4s, v21.4s, v13.4s\n"
- "smax v22.4s, v22.4s, v13.4s\n"
- "smax v23.4s, v23.4s, v13.4s\n"
- "smax v24.4s, v24.4s, v13.4s\n"
- "smax v25.4s, v25.4s, v13.4s\n"
- "smax v26.4s, v26.4s, v13.4s\n"
- "smax v27.4s, v27.4s, v13.4s\n"
- "smax v28.4s, v28.4s, v13.4s\n"
- "smax v29.4s, v29.4s, v13.4s\n"
- "smax v30.4s, v30.4s, v13.4s\n"
- "smax v31.4s, v31.4s, v13.4s\n"
- "uzp1 v16.16b, v16.16b, v16.16b\n"
- "uzp1 v17.16b, v17.16b, v17.16b\n"
- "uzp1 v18.16b, v18.16b, v18.16b\n"
- "uzp1 v19.16b, v19.16b, v19.16b\n"
- "uzp1 v20.16b, v20.16b, v20.16b\n"
- "uzp1 v21.16b, v21.16b, v21.16b\n"
- "uzp1 v22.16b, v22.16b, v22.16b\n"
- "uzp1 v23.16b, v23.16b, v23.16b\n"
- "uzp1 v24.16b, v24.16b, v24.16b\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
- "uzp1 v26.16b, v26.16b, v26.16b\n"
- "uzp1 v27.16b, v27.16b, v27.16b\n"
- "uzp1 v28.16b, v28.16b, v28.16b\n"
- "uzp1 v29.16b, v29.16b, v29.16b\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
+ "sshl v6.4s, v6.4s, v9.4s\n"
+ "sshl v5.4s, v5.4s, v9.4s\n"
+ "sshl v4.4s, v4.4s, v9.4s\n"
+ "sqrdmulh v6.4s, v6.4s, v8.4s\n"
+ "sqrdmulh v5.4s, v5.4s, v8.4s\n"
+ "sqrdmulh v4.4s, v4.4s, v8.4s\n"
+ "sshl v31.4s, v31.4s, v9.4s\n"
+ "and v18.16b, v6.16b, v7.16b\n"
+ "and v16.16b, v5.16b, v7.16b\n"
+ "and v17.16b, v4.16b, v7.16b\n"
+ "sshr v18.4s, v18.4s, #0x1f\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "sqadd v6.4s, v6.4s, v18.4s\n"
+ "sqadd v5.4s, v5.4s, v16.4s\n"
+ "sqadd v4.4s, v4.4s, v17.4s\n"
+ "sqrdmulh v31.4s, v31.4s, v8.4s\n"
+ "srshl v6.4s, v6.4s, v7.4s\n"
+ "srshl v5.4s, v5.4s, v7.4s\n"
+ "srshl v4.4s, v4.4s, v7.4s\n"
+ "and v16.16b, v31.16b, v7.16b\n"
+ "add v6.4s, v6.4s, v10.4s\n"
+ "add v5.4s, v5.4s, v10.4s\n"
+ "add v4.4s, v4.4s, v10.4s\n"
+ "smin v6.4s, v6.4s, v13.4s\n"
+ "smin v5.4s, v5.4s, v13.4s\n"
+ "smin v4.4s, v4.4s, v13.4s\n"
+ "smax v6.4s, v6.4s, v14.4s\n"
+ "smax v5.4s, v5.4s, v14.4s\n"
+ "smax v4.4s, v4.4s, v14.4s\n"
+ "uzp1 v6.16b, v6.16b, v6.16b\n"
+ "uzp1 v5.16b, v5.16b, v5.16b\n"
+ "uzp1 v6.16b, v6.16b, v6.16b\n"
+ "uzp1 v5.16b, v5.16b, v5.16b\n"
+ "uzp1 v4.16b, v4.16b, v4.16b\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "uzp1 v4.16b, v4.16b, v4.16b\n"
+ "sshl v30.4s, v30.4s, v9.4s\n"
+ "sqadd v31.4s, v31.4s, v16.4s\n"
+ "sqrdmulh v30.4s, v30.4s, v8.4s\n"
+ "sshl v29.4s, v29.4s, v9.4s\n"
+ "sshl v28.4s, v28.4s, v9.4s\n"
+ "srshl v31.4s, v31.4s, v7.4s\n"
+ "and v16.16b, v30.16b, v7.16b\n"
+ "sqrdmulh v29.4s, v29.4s, v8.4s\n"
+ "sqrdmulh v28.4s, v28.4s, v8.4s\n"
+ "add v31.4s, v31.4s, v10.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "and v17.16b, v29.16b, v7.16b\n"
+ "smin v31.4s, v31.4s, v13.4s\n"
+ "sqadd v30.4s, v30.4s, v16.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "smax v31.4s, v31.4s, v14.4s\n"
+ "and v16.16b, v28.16b, v7.16b\n"
+ "srshl v30.4s, v30.4s, v7.4s\n"
"uzp1 v31.16b, v31.16b, v31.16b\n"
- "uzp1 v16.16b, v16.16b, v16.16b\n"
- "uzp1 v17.16b, v17.16b, v17.16b\n"
- "uzp1 v18.16b, v18.16b, v18.16b\n"
- "uzp1 v19.16b, v19.16b, v19.16b\n"
- "uzp1 v20.16b, v20.16b, v20.16b\n"
- "uzp1 v21.16b, v21.16b, v21.16b\n"
- "uzp1 v22.16b, v22.16b, v22.16b\n"
- "uzp1 v23.16b, v23.16b, v23.16b\n"
- "uzp1 v24.16b, v24.16b, v24.16b\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
- "uzp1 v26.16b, v26.16b, v26.16b\n"
- "uzp1 v27.16b, v27.16b, v27.16b\n"
- "uzp1 v28.16b, v28.16b, v28.16b\n"
- "uzp1 v29.16b, v29.16b, v29.16b\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
+ "sqadd v29.4s, v29.4s, v17.4s\n"
"uzp1 v31.16b, v31.16b, v31.16b\n"
+ "add v30.4s, v30.4s, v10.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "srshl v29.4s, v29.4s, v7.4s\n"
+ "smin v30.4s, v30.4s, v13.4s\n"
+ "sqadd v28.4s, v28.4s, v16.4s\n"
+ "sshl v27.4s, v27.4s, v9.4s\n"
+ "smax v30.4s, v30.4s, v14.4s\n"
+ "add v29.4s, v29.4s, v10.4s\n"
+ "srshl v28.4s, v28.4s, v7.4s\n"
+ "uzp1 v30.16b, v30.16b, v30.16b\n"
+ "smin v29.4s, v29.4s, v13.4s\n"
+ "uzp1 v30.16b, v30.16b, v30.16b\n"
+ "add v28.4s, v28.4s, v10.4s\n"
+ "smax v29.4s, v29.4s, v14.4s\n"
+ "sqrdmulh v27.4s, v27.4s, v8.4s\n"
+ "smin v28.4s, v28.4s, v13.4s\n"
+ "uzp1 v29.16b, v29.16b, v29.16b\n"
+ "sshl v26.4s, v26.4s, v9.4s\n"
+ "uzp1 v29.16b, v29.16b, v29.16b\n"
+ "smax v28.4s, v28.4s, v14.4s\n"
+ "and v16.16b, v27.16b, v7.16b\n"
+ "sqrdmulh v26.4s, v26.4s, v8.4s\n"
+ "uzp1 v28.16b, v28.16b, v28.16b\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "uzp1 v28.16b, v28.16b, v28.16b\n"
+ "and v17.16b, v26.16b, v7.16b\n"
+ "sqadd v27.4s, v27.4s, v16.4s\n"
+ "sshl v25.4s, v25.4s, v9.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "sqrdmulh v25.4s, v25.4s, v8.4s\n"
+ "srshl v27.4s, v27.4s, v7.4s\n"
+ "sqadd v26.4s, v26.4s, v17.4s\n"
+ "sshl v24.4s, v24.4s, v9.4s\n"
+ "and v16.16b, v25.16b, v7.16b\n"
+ "add v27.4s, v27.4s, v10.4s\n"
+ "srshl v26.4s, v26.4s, v7.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "smin v27.4s, v27.4s, v13.4s\n"
+ "sqrdmulh v24.4s, v24.4s, v8.4s\n"
+ "add v26.4s, v26.4s, v10.4s\n"
+ "smax v27.4s, v27.4s, v14.4s\n"
+ "sqadd v25.4s, v25.4s, v16.4s\n"
+ "smin v26.4s, v26.4s, v13.4s\n"
+ "uzp1 v27.16b, v27.16b, v27.16b\n"
+ "and v17.16b, v24.16b, v7.16b\n"
+ "uzp1 v27.16b, v27.16b, v27.16b\n"
+ "smax v26.4s, v26.4s, v14.4s\n"
+ "srshl v25.4s, v25.4s, v7.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "uzp1 v26.16b, v26.16b, v26.16b\n"
+ "sshl v23.4s, v23.4s, v9.4s\n"
+ "uzp1 v26.16b, v26.16b, v26.16b\n"
+ "add v25.4s, v25.4s, v10.4s\n"
+ "sqadd v24.4s, v24.4s, v17.4s\n"
+ "sqrdmulh v23.4s, v23.4s, v8.4s\n"
+ "smin v25.4s, v25.4s, v13.4s\n"
+ "sshl v22.4s, v22.4s, v9.4s\n"
+ "srshl v24.4s, v24.4s, v7.4s\n"
+ "smax v25.4s, v25.4s, v14.4s\n"
+ "and v16.16b, v23.16b, v7.16b\n"
+ "sqrdmulh v22.4s, v22.4s, v8.4s\n"
+ "uzp1 v25.16b, v25.16b, v25.16b\n"
+ "add v24.4s, v24.4s, v10.4s\n"
+ "uzp1 v25.16b, v25.16b, v25.16b\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "smin v24.4s, v24.4s, v13.4s\n"
+ "and v17.16b, v22.16b, v7.16b\n"
+ "sqadd v23.4s, v23.4s, v16.4s\n"
+ "smax v24.4s, v24.4s, v14.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "sshl v21.4s, v21.4s, v9.4s\n"
+ "uzp1 v24.16b, v24.16b, v24.16b\n"
+ "srshl v23.4s, v23.4s, v7.4s\n"
+ "uzp1 v24.16b, v24.16b, v24.16b\n"
+ "sqadd v22.4s, v22.4s, v17.4s\n"
+ "sqrdmulh v21.4s, v21.4s, v8.4s\n"
+ "add v23.4s, v23.4s, v10.4s\n"
+ "sshl v20.4s, v20.4s, v9.4s\n"
+ "srshl v22.4s, v22.4s, v7.4s\n"
+ "smin v23.4s, v23.4s, v13.4s\n"
+ "and v16.16b, v21.16b, v7.16b\n"
+ "sqrdmulh v20.4s, v20.4s, v8.4s\n"
+ "smax v23.4s, v23.4s, v14.4s\n"
+ "add v22.4s, v22.4s, v10.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "uzp1 v23.16b, v23.16b, v23.16b\n"
+ "smin v22.4s, v22.4s, v13.4s\n"
+ "uzp1 v23.16b, v23.16b, v23.16b\n"
+ "sqadd v21.4s, v21.4s, v16.4s\n"
+ "smax v22.4s, v22.4s, v14.4s\n"
+ "and v16.16b, v20.16b, v7.16b\n"
+ "sshl v19.4s, v19.4s, v9.4s\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "srshl v21.4s, v21.4s, v7.4s\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sqrdmulh v19.4s, v19.4s, v8.4s\n"
+ "add v21.4s, v21.4s, v10.4s\n"
+ "sqadd v20.4s, v20.4s, v16.4s\n"
+ "smin v21.4s, v21.4s, v13.4s\n"
+ "and v16.16b, v19.16b, v7.16b\n"
+ "srshl v20.4s, v20.4s, v7.4s\n"
+ "smax v21.4s, v21.4s, v14.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "uzp1 v21.16b, v21.16b, v21.16b\n"
+ "add v20.4s, v20.4s, v10.4s\n"
+ "sqadd v19.4s, v19.4s, v16.4s\n"
+ "uzp1 v21.16b, v21.16b, v21.16b\n"
+ "smin v20.4s, v20.4s, v13.4s\n"
+ "srshl v19.4s, v19.4s, v7.4s\n"
+ "smax v20.4s, v20.4s, v14.4s\n"
+ "uzp1 v20.16b, v20.16b, v20.16b\n"
+ "add v19.4s, v19.4s, v10.4s\n"
+ "uzp1 v20.16b, v20.16b, v20.16b\n"
+ "smin v19.4s, v19.4s, v13.4s\n"
+ "smax v19.4s, v19.4s, v14.4s\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
"tbz %x[n_output_channels], #1, 24f\n"
- "ldr x20, [%x[outptrs], #0x0]\n"
- "ldr x21, [%x[outptrs], #0x8]\n"
+ "ldr x19, [%x[outptrs], #0x0]\n"
+ "ldr x20, [%x[outptrs], #0x8]\n"
+ "add x19, x19, x9\n"
+ "ldr x21, [%x[outptrs], #0x10]\n"
+ "ldr x22, [%x[outptrs], #0x18]\n"
"add x20, x20, x9\n"
+ "st1 { v6.h }[0], [x19]\n"
"add x21, x21, x9\n"
- "ldr x22, [%x[outptrs], #0x10]\n"
- "ldr x23, [%x[outptrs], #0x18]\n"
+ "st1 { v5.h }[0], [x20]\n"
+ "ldr x23, [%x[outptrs], #0x20]\n"
"add x22, x22, x9\n"
+ "st1 { v4.h }[0], [x21]\n"
"add x23, x23, x9\n"
- "ldr x24, [%x[outptrs], #0x20]\n"
- "ldr x25, [%x[outptrs], #0x28]\n"
+ "st1 { v31.h }[0], [x22]\n"
+ "ldr x24, [%x[outptrs], #0x28]\n"
"add x24, x24, x9\n"
+ "st1 { v30.h }[0], [x23]\n"
+ "ldr x25, [%x[outptrs], #0x30]\n"
"add x25, x25, x9\n"
- "ldr x26, [%x[outptrs], #0x30]\n"
- "ldr x27, [%x[outptrs], #0x38]\n"
+ "st1 { v29.h }[0], [x24]\n"
+ "ldr x26, [%x[outptrs], #0x38]\n"
"add x26, x26, x9\n"
- "add x27, x27, x9\n"
- "st1 { v16.h }[0], [x20]\n"
- "ldr x20, [%x[outptrs], #0x40]\n"
+ "st1 { v28.h }[0], [x25]\n"
+ "ldr x19, [%x[outptrs], #0x40]\n"
+ "add x19, x19, x9\n"
+ "st1 { v27.h }[0], [x26]\n"
+ "ldr x20, [%x[outptrs], #0x48]\n"
"add x20, x20, x9\n"
- "st1 { v17.h }[0], [x21]\n"
- "ldr x21, [%x[outptrs], #0x48]\n"
+ "st1 { v26.h }[0], [x19]\n"
+ "ldr x21, [%x[outptrs], #0x50]\n"
"add x21, x21, x9\n"
- "st1 { v18.h }[0], [x22]\n"
- "ldr x22, [%x[outptrs], #0x50]\n"
+ "st1 { v25.h }[0], [x20]\n"
+ "ldr x22, [%x[outptrs], #0x58]\n"
"add x22, x22, x9\n"
- "st1 { v19.h }[0], [x23]\n"
- "ldr x23, [%x[outptrs], #0x58]\n"
+ "st1 { v24.h }[0], [x21]\n"
+ "ldr x23, [%x[outptrs], #0x60]\n"
"add x23, x23, x9\n"
- "st1 { v20.h }[0], [x24]\n"
- "ldr x24, [%x[outptrs], #0x60]\n"
+ "st1 { v23.h }[0], [x22]\n"
+ "ldr x24, [%x[outptrs], #0x68]\n"
"add x24, x24, x9\n"
- "st1 { v21.h }[0], [x25]\n"
- "ldr x25, [%x[outptrs], #0x68]\n"
+ "st1 { v22.h }[0], [x23]\n"
+ "ldr x25, [%x[outptrs], #0x70]\n"
"add x25, x25, x9\n"
- "st1 { v22.h }[0], [x26]\n"
- "ldr x26, [%x[outptrs], #0x70]\n"
+ "st1 { v21.h }[0], [x24]\n"
+ "ldr x26, [%x[outptrs], #0x78]\n"
"add x26, x26, x9\n"
- "st1 { v23.h }[0], [x27]\n"
- "ldr x27, [%x[outptrs], #0x78]\n"
- "add x27, x27, x9\n"
+ "st1 { v20.h }[0], [x25]\n"
"add x9, x9, #0x2\n"
- "st1 { v24.h }[0], [x20]\n"
- "st1 { v25.h }[0], [x21]\n"
- "st1 { v26.h }[0], [x22]\n"
- "st1 { v27.h }[0], [x23]\n"
- "st1 { v28.h }[0], [x24]\n"
- "st1 { v29.h }[0], [x25]\n"
- "st1 { v30.h }[0], [x26]\n"
- "st1 { v31.h }[0], [x27]\n"
+ "st1 { v19.h }[0], [x26]\n"
"tbz %x[n_output_channels], #0, 25f\n"
- "ldr x20, [%x[outptrs], #0x0]\n"
- "ldr x21, [%x[outptrs], #0x8]\n"
+ "ldr x19, [%x[outptrs], #0x0]\n"
+ "ldr x20, [%x[outptrs], #0x8]\n"
+ "add x19, x19, x9\n"
+ "ldr x21, [%x[outptrs], #0x10]\n"
+ "ldr x22, [%x[outptrs], #0x18]\n"
"add x20, x20, x9\n"
+ "st1 { v6.b }[2], [x19]\n"
"add x21, x21, x9\n"
- "ldr x22, [%x[outptrs], #0x10]\n"
- "ldr x23, [%x[outptrs], #0x18]\n"
+ "st1 { v5.b }[2], [x20]\n"
+ "ldr x23, [%x[outptrs], #0x20]\n"
"add x22, x22, x9\n"
+ "st1 { v4.b }[2], [x21]\n"
"add x23, x23, x9\n"
- "ldr x24, [%x[outptrs], #0x20]\n"
- "ldr x25, [%x[outptrs], #0x28]\n"
+ "st1 { v31.b }[2], [x22]\n"
+ "ldr x24, [%x[outptrs], #0x28]\n"
"add x24, x24, x9\n"
+ "st1 { v30.b }[2], [x23]\n"
+ "ldr x25, [%x[outptrs], #0x30]\n"
"add x25, x25, x9\n"
- "ldr x26, [%x[outptrs], #0x30]\n"
- "ldr x27, [%x[outptrs], #0x38]\n"
+ "st1 { v29.b }[2], [x24]\n"
+ "ldr x26, [%x[outptrs], #0x38]\n"
"add x26, x26, x9\n"
- "add x27, x27, x9\n"
- "st1 { v16.b }[2], [x20]\n"
- "ldr x20, [%x[outptrs], #0x40]\n"
+ "st1 { v28.b }[2], [x25]\n"
+ "ldr x19, [%x[outptrs], #0x40]\n"
+ "add x19, x19, x9\n"
+ "st1 { v27.b }[2], [x26]\n"
+ "ldr x20, [%x[outptrs], #0x48]\n"
"add x20, x20, x9\n"
- "st1 { v17.b }[2], [x21]\n"
- "ldr x21, [%x[outptrs], #0x48]\n"
+ "st1 { v26.b }[2], [x19]\n"
+ "ldr x21, [%x[outptrs], #0x50]\n"
"add x21, x21, x9\n"
- "st1 { v18.b }[2], [x22]\n"
- "ldr x22, [%x[outptrs], #0x50]\n"
+ "st1 { v25.b }[2], [x20]\n"
+ "ldr x22, [%x[outptrs], #0x58]\n"
"add x22, x22, x9\n"
- "st1 { v19.b }[2], [x23]\n"
- "ldr x23, [%x[outptrs], #0x58]\n"
+ "st1 { v24.b }[2], [x21]\n"
+ "ldr x23, [%x[outptrs], #0x60]\n"
"add x23, x23, x9\n"
- "st1 { v20.b }[2], [x24]\n"
- "ldr x24, [%x[outptrs], #0x60]\n"
+ "st1 { v23.b }[2], [x22]\n"
+ "ldr x24, [%x[outptrs], #0x68]\n"
"add x24, x24, x9\n"
- "st1 { v21.b }[2], [x25]\n"
- "ldr x25, [%x[outptrs], #0x68]\n"
+ "st1 { v22.b }[2], [x23]\n"
+ "ldr x25, [%x[outptrs], #0x70]\n"
"add x25, x25, x9\n"
- "st1 { v22.b }[2], [x26]\n"
- "ldr x26, [%x[outptrs], #0x70]\n"
+ "st1 { v21.b }[2], [x24]\n"
+ "ldr x26, [%x[outptrs], #0x78]\n"
"add x26, x26, x9\n"
- "st1 { v23.b }[2], [x27]\n"
- "ldr x27, [%x[outptrs], #0x78]\n"
- "add x27, x27, x9\n"
- "st1 { v24.b }[2], [x20]\n"
- "st1 { v25.b }[2], [x21]\n"
- "st1 { v26.b }[2], [x22]\n"
- "st1 { v27.b }[2], [x23]\n"
- "st1 { v28.b }[2], [x24]\n"
- "st1 { v29.b }[2], [x25]\n"
- "st1 { v30.b }[2], [x26]\n"
- "st1 { v31.b }[2], [x27]\n"
+ "st1 { v20.b }[2], [x25]\n"
+ "st1 { v19.b }[2], [x26]\n"
"b 25f\n"
"24:" // Output channel oddments: Done: Store: Bit 1: Unset
- "ldr x20, [%x[outptrs], #0x0]\n"
- "ldr x21, [%x[outptrs], #0x8]\n"
+ "tbz %x[n_output_channels], #0, 25f\n"
+ "ldr x19, [%x[outptrs], #0x0]\n"
+ "ldr x20, [%x[outptrs], #0x8]\n"
+ "add x19, x19, x9\n"
+ "ldr x21, [%x[outptrs], #0x10]\n"
+ "ldr x22, [%x[outptrs], #0x18]\n"
"add x20, x20, x9\n"
+ "st1 { v6.b }[0], [x19]\n"
"add x21, x21, x9\n"
- "ldr x22, [%x[outptrs], #0x10]\n"
- "ldr x23, [%x[outptrs], #0x18]\n"
+ "st1 { v5.b }[0], [x20]\n"
+ "ldr x23, [%x[outptrs], #0x20]\n"
"add x22, x22, x9\n"
+ "st1 { v4.b }[0], [x21]\n"
"add x23, x23, x9\n"
- "ldr x24, [%x[outptrs], #0x20]\n"
- "ldr x25, [%x[outptrs], #0x28]\n"
+ "st1 { v31.b }[0], [x22]\n"
+ "ldr x24, [%x[outptrs], #0x28]\n"
"add x24, x24, x9\n"
+ "st1 { v30.b }[0], [x23]\n"
+ "ldr x25, [%x[outptrs], #0x30]\n"
"add x25, x25, x9\n"
- "ldr x26, [%x[outptrs], #0x30]\n"
- "ldr x27, [%x[outptrs], #0x38]\n"
+ "st1 { v29.b }[0], [x24]\n"
+ "ldr x26, [%x[outptrs], #0x38]\n"
"add x26, x26, x9\n"
- "add x27, x27, x9\n"
- "st1 { v16.b }[0], [x20]\n"
- "ldr x20, [%x[outptrs], #0x40]\n"
+ "st1 { v28.b }[0], [x25]\n"
+ "ldr x19, [%x[outptrs], #0x40]\n"
+ "add x19, x19, x9\n"
+ "st1 { v27.b }[0], [x26]\n"
+ "ldr x20, [%x[outptrs], #0x48]\n"
"add x20, x20, x9\n"
- "st1 { v17.b }[0], [x21]\n"
- "ldr x21, [%x[outptrs], #0x48]\n"
+ "st1 { v26.b }[0], [x19]\n"
+ "ldr x21, [%x[outptrs], #0x50]\n"
"add x21, x21, x9\n"
- "st1 { v18.b }[0], [x22]\n"
- "ldr x22, [%x[outptrs], #0x50]\n"
+ "st1 { v25.b }[0], [x20]\n"
+ "ldr x22, [%x[outptrs], #0x58]\n"
"add x22, x22, x9\n"
- "st1 { v19.b }[0], [x23]\n"
- "ldr x23, [%x[outptrs], #0x58]\n"
+ "st1 { v24.b }[0], [x21]\n"
+ "ldr x23, [%x[outptrs], #0x60]\n"
"add x23, x23, x9\n"
- "st1 { v20.b }[0], [x24]\n"
- "ldr x24, [%x[outptrs], #0x60]\n"
+ "st1 { v23.b }[0], [x22]\n"
+ "ldr x24, [%x[outptrs], #0x68]\n"
"add x24, x24, x9\n"
- "st1 { v21.b }[0], [x25]\n"
- "ldr x25, [%x[outptrs], #0x68]\n"
+ "st1 { v22.b }[0], [x23]\n"
+ "ldr x25, [%x[outptrs], #0x70]\n"
"add x25, x25, x9\n"
- "st1 { v22.b }[0], [x26]\n"
- "ldr x26, [%x[outptrs], #0x70]\n"
+ "st1 { v21.b }[0], [x24]\n"
+ "ldr x26, [%x[outptrs], #0x78]\n"
"add x26, x26, x9\n"
- "st1 { v23.b }[0], [x27]\n"
- "ldr x27, [%x[outptrs], #0x78]\n"
- "add x27, x27, x9\n"
- "st1 { v24.b }[0], [x20]\n"
- "st1 { v25.b }[0], [x21]\n"
- "st1 { v26.b }[0], [x22]\n"
- "st1 { v27.b }[0], [x23]\n"
- "st1 { v28.b }[0], [x24]\n"
- "st1 { v29.b }[0], [x25]\n"
- "st1 { v30.b }[0], [x26]\n"
- "st1 { v31.b }[0], [x27]\n"
+ "st1 { v20.b }[0], [x25]\n"
+ "st1 { v19.b }[0], [x26]\n"
"25:" // Output channel oddments: Done: Store: Bit 1: End
"26:" // Done
: [weights] "+&r" (weights)
: [bias] "r" (bias), [inptrs] "r" (inptrs), [kernel_points] "r" ((uint64_t) kernel_points), [n_output_channels] "r" ((uint64_t) n_output_channels), [offsetof_Requantize32_a_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [offsetof_Requantize32_per_layer_left_shift] "I" (offsetof(arm_gemm::Requantize32, per_layer_left_shift)), [offsetof_Requantize32_per_layer_mul] "I" (offsetof(arm_gemm::Requantize32, per_layer_mul)), [offsetof_Requantize32_per_layer_right_shift] "I" (offsetof(arm_gemm::Requantize32, per_layer_right_shift)), [outptrs] "r" (outptrs), [qp] "r" (&qp), [rq_left_shift_ptr] "r" (per_channel_left_shifts), [rq_mul_ptr] "r" (per_channel_muls), [rq_right_shift_ptr] "r" (per_channel_right_shifts)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8qa_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8qa_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp
index afc6695ff1..89cb2ec380 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8qa_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8qa_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -91,65 +91,65 @@ void a64_u8qa_nhwc_3x3_s1_output2x2_mla_depthfirst_impl(
requant_muls, requant_shifts, outptrs);
__asm__ __volatile__(
- "ldr x7, [%x[params], %[offsetof_Params_n_channels]]\n"
- "ldr x23, [%x[params], %[offsetof_Params_requant]]\n"
- "lsr x8, x7, #0x3\n"
- "add x20, x23, %[offsetof_Requantize32_b_offset]\n"
- "ld1r { v12.16b }, [x20]\n"
- "ldr x22, [%x[params], %[offsetof_Params_outptrs]]\n"
- "add x21, x23, %[offsetof_Requantize32_c_offset]\n"
- "add x20, x23, %[offsetof_Requantize32_minval]\n"
- "ld1r { v20.8h }, [x21]\n"
- "ld1r { v15.8h }, [x20]\n"
- "add x20, x23, %[offsetof_Requantize32_maxval]\n"
- "mov x17, #0x0\n"
- "ld1r { v13.8h }, [x20]\n"
- "mov x16, #0x0\n"
- "add x15, %x[params], %[offsetof_Params_inptrs]\n"
- "ldr x14, [%x[params], %[offsetof_Params_weights]]\n"
- "ldr x13, [%x[params], %[offsetof_Params_requant_muls]]\n"
- "ldr x12, [%x[params], %[offsetof_Params_requant_shifts]]\n"
- "ldp x11, x10, [x22, #0x0]\n"
- "ldp x9, x28, [x22, #0x10]\n"
- "cbz x8, 3f\n"
- "ldr d0, [x14, #0x0]\n"
- "ldr d1, [x14, #0x8]\n"
- "subs x8, x8, #0x1\n"
- "usubl v0.8h, v0.8b, v12.8b\n"
- "ldr d2, [x14, #0x10]\n"
- "ldr d3, [x14, #0x18]\n"
- "usubl v1.8h, v1.8b, v12.8b\n"
- "usubl v2.8h, v2.8b, v12.8b\n"
- "ldr d4, [x14, #0x20]\n"
- "ldr d5, [x14, #0x28]\n"
- "usubl v3.8h, v3.8b, v12.8b\n"
- "usubl v4.8h, v4.8b, v12.8b\n"
- "ldr d6, [x14, #0x30]\n"
- "ldr d7, [x14, #0x38]\n"
- "usubl v5.8h, v5.8b, v12.8b\n"
- "usubl v6.8h, v6.8b, v12.8b\n"
- "ldr d8, [x14, #0x40]\n"
- "ldr x27, [%x[params], %[offsetof_Params_bias]]\n"
- "usubl v7.8h, v7.8b, v12.8b\n"
- "usubl v8.8h, v8.8b, v12.8b\n"
- "ldr q14, [x27, #0x0]\n"
- "ldr q11, [x27, #0x10]\n"
- "add x27, x27, #0x20\n"
- "str x27, [%x[params], %[offsetof_Params_bias]]\n"
- "ldp x24, x23, [x15, #0x0]\n"
- "ldp x22, x21, [x15, #0x10]\n"
- "mov v16.16b, v14.16b\n"
- "mov v9.16b, v11.16b\n"
- "ldr d31, [x24, x17]\n"
- "ldr d30, [x23, x17]\n"
- "mov v24.16b, v14.16b\n"
- "mov v17.16b, v11.16b\n"
- "ldr d29, [x22, x17]\n"
- "ldr d28, [x21, x17]\n"
- "mov v23.16b, v14.16b\n"
- "mov v25.16b, v11.16b\n"
- "ldr x20, [x15, #0x20]\n"
- "ldr d27, [x20, x17]\n"
+ "ldr x19, [%x[params], %[offsetof_Params_requant]]\n"
+ "ldr x8, [%x[params], %[offsetof_Params_n_channels]]\n"
+ "add x23, x19, %[offsetof_Requantize32_b_offset]\n"
+ "add x22, x19, %[offsetof_Requantize32_c_offset]\n"
+ "ldr x21, [%x[params], %[offsetof_Params_outptrs]]\n"
+ "add x20, x19, %[offsetof_Requantize32_minval]\n"
+ "add x19, x19, %[offsetof_Requantize32_maxval]\n"
+ "ldr x17, [%x[params], %[offsetof_Params_weights]]\n"
+ "ld1r { v15.16b }, [x23]\n"
+ "ld1r { v13.8h }, [x22]\n"
+ "lsr x16, x8, #0x3\n"
+ "mov x15, #0x0\n"
+ "ld1r { v11.8h }, [x20]\n"
+ "ld1r { v25.8h }, [x19]\n"
+ "mov x14, #0x0\n"
+ "add x13, %x[params], %[offsetof_Params_inptrs]\n"
+ "ldr x12, [%x[params], %[offsetof_Params_requant_muls]]\n"
+ "ldr x11, [%x[params], %[offsetof_Params_requant_shifts]]\n"
+ "ldp x10, x9, [x21, #0x0]\n"
+ "ldp x28, x27, [x21, #0x10]\n"
+ "cbz x16, 3f\n"
+ "ldr x19, [%x[params], %[offsetof_Params_bias]]\n"
+ "ldr q12, [x19, #0x0]\n"
+ "subs x16, x16, #0x1\n"
+ "mov v14.16b, v12.16b\n"
+ "ldr q17, [x19, #0x10]\n"
+ "add x19, x19, #0x20\n"
+ "str x19, [%x[params], %[offsetof_Params_bias]]\n"
+ "ldr d0, [x17, #0x0]\n"
+ "ldr d1, [x17, #0x8]\n"
+ "ldr d2, [x17, #0x10]\n"
+ "mov v9.16b, v17.16b\n"
+ "mov v16.16b, v12.16b\n"
+ "ldr d3, [x17, #0x18]\n"
+ "ldr d4, [x17, #0x20]\n"
+ "mov v10.16b, v17.16b\n"
+ "mov v18.16b, v12.16b\n"
+ "ldr d5, [x17, #0x28]\n"
+ "ldr d6, [x17, #0x30]\n"
+ "mov v26.16b, v17.16b\n"
+ "usubl v0.8h, v0.8b, v15.8b\n"
+ "ldr d7, [x17, #0x38]\n"
+ "ldr d8, [x17, #0x40]\n"
+ "usubl v1.8h, v1.8b, v15.8b\n"
+ "usubl v2.8h, v2.8b, v15.8b\n"
+ "ldp x23, x22, [x13, #0x0]\n"
+ "ldp x21, x20, [x13, #0x10]\n"
+ "usubl v3.8h, v3.8b, v15.8b\n"
+ "usubl v4.8h, v4.8b, v15.8b\n"
+ "ldr x19, [x13, #0x20]\n"
+ "ldr d31, [x23, x15]\n"
+ "usubl v5.8h, v5.8b, v15.8b\n"
+ "usubl v6.8h, v6.8b, v15.8b\n"
+ "ldr d30, [x22, x15]\n"
+ "ldr d29, [x21, x15]\n"
+ "usubl v7.8h, v7.8b, v15.8b\n"
+ "usubl v8.8h, v8.8b, v15.8b\n"
+ "ldr d28, [x20, x15]\n"
+ "ldr d27, [x19, x15]\n"
"ushll v31.8h, v31.8b, #0x0\n"
"ushll v30.8h, v30.8b, #0x0\n"
"ushll v29.8h, v29.8b, #0x0\n"
@@ -157,226 +157,226 @@ void a64_u8qa_nhwc_3x3_s1_output2x2_mla_depthfirst_impl(
"ushll v27.8h, v27.8b, #0x0\n"
"beq 2f\n"
"1:" // Loop
- "ldr q22, [x13, #0x0]\n"
- "ldr q10, [x12, #0x0]\n"
- "smlal v14.4s, v31.4h, v4.4h\n"
- "smlal2 v11.4s, v31.8h, v4.8h\n"
- "ldr q18, [x13, #0x10]\n"
- "ldr q26, [x12, #0x10]\n"
- "smlal v16.4s, v31.4h, v3.4h\n"
+ "smlal v12.4s, v31.4h, v4.4h\n"
+ "smlal2 v17.4s, v31.8h, v4.8h\n"
+ "ldr x21, [x13, #0x28]\n"
+ "ldr x26, [x13, #0x38]\n"
+ "smlal v14.4s, v31.4h, v3.4h\n"
"smlal2 v9.4s, v31.8h, v3.8h\n"
- "smlal v14.4s, v30.4h, v0.4h\n"
- "smlal2 v11.4s, v30.8h, v0.8h\n"
- "ldr x22, [x15, #0x28]\n"
- "ldr x21, [x15, #0x38]\n"
- "smlal v16.4s, v29.4h, v2.4h\n"
+ "ldr x20, [x13, #0x30]\n"
+ "ldr x25, [x13, #0x40]\n"
+ "smlal v12.4s, v30.4h, v0.4h\n"
+ "smlal2 v17.4s, v30.8h, v0.8h\n"
+ "ldr x19, [x13, #0x48]\n"
+ "ldr d30, [x19, x15]\n"
+ "smlal v14.4s, v29.4h, v2.4h\n"
"smlal2 v9.4s, v29.8h, v2.8h\n"
- "ldr x20, [x15, #0x30]\n"
- "ldr d29, [x20, x17]\n"
- "smlal v24.4s, v31.4h, v1.4h\n"
- "smlal2 v17.4s, v31.8h, v1.8h\n"
- "ldr x26, [x15, #0x40]\n"
- "ldr x20, [x15, #0x48]\n"
- "ldr d30, [x20, x17]\n"
- "smlal v23.4s, v31.4h, v0.4h\n"
- "smlal2 v25.4s, v31.8h, v0.8h\n"
- "ldr d31, [x22, x17]\n"
- "smlal v14.4s, v28.4h, v5.4h\n"
- "smlal2 v11.4s, v28.8h, v5.8h\n"
- "ushll v31.8h, v31.8b, #0x0\n"
- "ldr x25, [x15, #0x50]\n"
- "smlal v16.4s, v28.4h, v4.4h\n"
- "smlal2 v9.4s, v28.8h, v4.8h\n"
+ "ldr d29, [x20, x15]\n"
"ushll v29.8h, v29.8b, #0x0\n"
- "ldr x24, [x15, #0x58]\n"
- "smlal v24.4s, v28.4h, v2.4h\n"
- "smlal2 v17.4s, v28.8h, v2.8h\n"
+ "smlal v16.4s, v31.4h, v1.4h\n"
+ "smlal2 v10.4s, v31.8h, v1.8h\n"
+ "ldr x24, [x13, #0x50]\n"
+ "ldr x23, [x13, #0x58]\n"
+ "smlal v18.4s, v31.4h, v0.4h\n"
+ "smlal2 v26.4s, v31.8h, v0.8h\n"
+ "ldr d31, [x21, x15]\n"
+ "ushll v31.8h, v31.8b, #0x0\n"
+ "smlal v12.4s, v28.4h, v5.4h\n"
+ "smlal2 v17.4s, v28.8h, v5.8h\n"
"ushll v30.8h, v30.8b, #0x0\n"
- "ldr x23, [x15, #0x60]\n"
- "smlal v23.4s, v28.4h, v1.4h\n"
- "smlal2 v25.4s, v28.8h, v1.8h\n"
- "ldr d28, [x21, x17]\n"
+ "ldr x22, [x13, #0x60]\n"
+ "smlal v14.4s, v28.4h, v4.4h\n"
+ "smlal2 v9.4s, v28.8h, v4.8h\n"
+ "ldr x21, [x13, #0x68]\n"
+ "ldr x20, [x13, #0x70]\n"
+ "smlal v16.4s, v28.4h, v2.4h\n"
+ "smlal2 v10.4s, v28.8h, v2.8h\n"
+ "ldr x19, [x13, #0x78]\n"
+ "ldr q21, [x12, #0x0]\n"
+ "smlal v18.4s, v28.4h, v1.4h\n"
+ "smlal2 v26.4s, v28.8h, v1.8h\n"
+ "ldr d28, [x26, x15]\n"
"ushll v28.8h, v28.8b, #0x0\n"
- "smlal v14.4s, v27.4h, v7.4h\n"
- "smlal2 v11.4s, v27.8h, v7.8h\n"
- "ldr x22, [x15, #0x68]\n"
- "ldr x21, [x15, #0x70]\n"
- "smlal v16.4s, v27.4h, v6.4h\n"
+ "smlal v12.4s, v27.4h, v7.4h\n"
+ "smlal2 v17.4s, v27.8h, v7.8h\n"
+ "ldr q24, [x11, #0x0]\n"
+ "ldr q19, [x12, #0x10]\n"
+ "smlal v14.4s, v27.4h, v6.4h\n"
"smlal2 v9.4s, v27.8h, v6.8h\n"
- "ldr x20, [x15, #0x78]\n"
- "ldr x27, [%x[params], %[offsetof_Params_bias]]\n"
- "smlal v24.4s, v31.4h, v6.4h\n"
- "smlal2 v17.4s, v31.8h, v6.8h\n"
- "ldr d31, [x26, x17]\n"
+ "ldr q23, [x11, #0x10]\n"
+ "add x17, x17, #0x48\n"
+ "smlal v16.4s, v31.4h, v6.4h\n"
+ "smlal2 v10.4s, v31.8h, v6.8h\n"
+ "ldr d31, [x25, x15]\n"
"ushll v31.8h, v31.8b, #0x0\n"
- "smlal v23.4s, v27.4h, v3.4h\n"
- "smlal2 v25.4s, v27.8h, v3.8h\n"
- "add x14, x14, #0x48\n"
- "subs x8, x8, #0x1\n"
- "smlal v14.4s, v28.4h, v1.4h\n"
- "smlal2 v11.4s, v28.8h, v1.8h\n"
- "add x13, x13, #0x20\n"
+ "smlal v18.4s, v27.4h, v3.4h\n"
+ "smlal2 v26.4s, v27.8h, v3.8h\n"
+ "subs x16, x16, #0x1\n"
"add x12, x12, #0x20\n"
- "smlal v16.4s, v28.4h, v0.4h\n"
+ "smlal v12.4s, v28.4h, v1.4h\n"
+ "smlal2 v17.4s, v28.8h, v1.8h\n"
+ "add x11, x11, #0x20\n"
+ "smlal v14.4s, v28.4h, v0.4h\n"
"smlal2 v9.4s, v28.8h, v0.8h\n"
- "ldr d28, [x24, x17]\n"
+ "ldr d28, [x23, x15]\n"
"ushll v28.8h, v28.8b, #0x0\n"
- "smlal v24.4s, v27.4h, v4.4h\n"
- "smlal v23.4s, v29.4h, v8.4h\n"
- "smlal2 v17.4s, v27.8h, v4.8h\n"
- "smlal2 v25.4s, v29.8h, v8.8h\n"
- "ldr d29, [x25, x17]\n"
+ "smlal v16.4s, v27.4h, v4.4h\n"
+ "smlal v18.4s, v29.4h, v8.4h\n"
+ "smlal2 v10.4s, v27.8h, v4.8h\n"
+ "smlal2 v26.4s, v29.8h, v8.8h\n"
+ "ldr d29, [x24, x15]\n"
"ushll v29.8h, v29.8b, #0x0\n"
- "smlal v14.4s, v31.4h, v2.4h\n"
- "smlal2 v11.4s, v31.8h, v2.8h\n"
- "smlal v16.4s, v31.4h, v1.4h\n"
+ "smlal v12.4s, v31.4h, v2.4h\n"
+ "smlal2 v17.4s, v31.8h, v2.8h\n"
+ "smlal v14.4s, v31.4h, v1.4h\n"
"smlal2 v9.4s, v31.8h, v1.8h\n"
- "ldr d31, [x23, x17]\n"
+ "ldr d31, [x22, x15]\n"
"ushll v31.8h, v31.8b, #0x0\n"
- "smlal v24.4s, v30.4h, v5.4h\n"
- "smlal v23.4s, v30.4h, v4.4h\n"
- "smlal v14.4s, v30.4h, v8.4h\n"
- "smlal2 v11.4s, v30.8h, v8.8h\n"
- "smlal v16.4s, v30.4h, v7.4h\n"
+ "smlal v16.4s, v30.4h, v5.4h\n"
+ "smlal v18.4s, v30.4h, v4.4h\n"
+ "smlal v12.4s, v30.4h, v8.4h\n"
+ "smlal2 v17.4s, v30.8h, v8.8h\n"
+ "smlal v14.4s, v30.4h, v7.4h\n"
"smlal2 v9.4s, v30.8h, v7.8h\n"
- "smlal2 v17.4s, v30.8h, v5.8h\n"
- "smlal2 v25.4s, v30.8h, v4.8h\n"
- "ldr d30, [x22, x17]\n"
+ "smlal2 v10.4s, v30.8h, v5.8h\n"
+ "smlal2 v26.4s, v30.8h, v4.8h\n"
+ "ldr d30, [x21, x15]\n"
"ushll v30.8h, v30.8b, #0x0\n"
- "smlal v24.4s, v29.4h, v0.4h\n"
- "smlal v23.4s, v28.4h, v2.4h\n"
- "smlal v14.4s, v29.4h, v3.4h\n"
- "smlal2 v11.4s, v29.8h, v3.8h\n"
- "smlal2 v17.4s, v29.8h, v0.8h\n"
- "ldr d29, [x21, x17]\n"
- "smlal2 v25.4s, v28.8h, v2.8h\n"
+ "smlal v16.4s, v29.4h, v0.4h\n"
+ "smlal v18.4s, v28.4h, v2.4h\n"
+ "smlal v12.4s, v29.4h, v3.4h\n"
+ "smlal2 v17.4s, v29.8h, v3.8h\n"
+ "smlal2 v10.4s, v29.8h, v0.8h\n"
+ "ldr d29, [x20, x15]\n"
+ "smlal2 v26.4s, v28.8h, v2.8h\n"
"ushll v29.8h, v29.8b, #0x0\n"
- "smlal v24.4s, v31.4h, v3.4h\n"
- "smlal v23.4s, v30.4h, v5.4h\n"
- "smlal v16.4s, v28.4h, v5.4h\n"
+ "smlal v16.4s, v31.4h, v3.4h\n"
+ "smlal v18.4s, v30.4h, v5.4h\n"
+ "smlal v14.4s, v28.4h, v5.4h\n"
"smlal2 v9.4s, v28.8h, v5.8h\n"
- "ldr d28, [x20, x17]\n"
+ "ldr d28, [x19, x15]\n"
"ushll v28.8h, v28.8b, #0x0\n"
- "smlal v14.4s, v31.4h, v6.4h\n"
- "smlal2 v17.4s, v31.8h, v3.8h\n"
- "sqrdmulh v14.4s, v14.4s, v22.4s\n"
- "add x17, x17, #0x8\n"
- "smlal2 v25.4s, v30.8h, v5.8h\n"
- "smlal v24.4s, v29.4h, v7.4h\n"
- "and v21.16b, v14.16b, v10.16b\n"
- "smlal v23.4s, v29.4h, v6.4h\n"
- "smlal2 v11.4s, v31.8h, v6.8h\n"
- "sqrdmulh v11.4s, v11.4s, v18.4s\n"
- "smlal2 v17.4s, v29.8h, v7.8h\n"
- "smlal2 v25.4s, v29.8h, v6.8h\n"
- "sshr v21.4s, v21.4s, #0x1f\n"
- "smlal v16.4s, v30.4h, v8.4h\n"
- "smlal v24.4s, v28.4h, v8.4h\n"
- "and v4.16b, v11.16b, v26.16b\n"
- "smlal v23.4s, v28.4h, v7.4h\n"
+ "smlal2 v10.4s, v31.8h, v3.8h\n"
+ "smlal2 v26.4s, v30.8h, v5.8h\n"
+ "add x15, x15, #0x8\n"
+ "smlal v16.4s, v29.4h, v7.4h\n"
+ "smlal v18.4s, v29.4h, v6.4h\n"
+ "smlal2 v10.4s, v29.8h, v7.8h\n"
+ "smlal2 v26.4s, v29.8h, v6.8h\n"
+ "smlal v12.4s, v31.4h, v6.4h\n"
+ "smlal v14.4s, v30.4h, v8.4h\n"
+ "sqrdmulh v12.4s, v12.4s, v21.4s\n"
+ "smlal v16.4s, v28.4h, v8.4h\n"
+ "smlal v18.4s, v28.4h, v7.4h\n"
+ "sqrdmulh v14.4s, v14.4s, v21.4s\n"
+ "smlal2 v17.4s, v31.8h, v6.8h\n"
"smlal2 v9.4s, v30.8h, v8.8h\n"
- "sqrdmulh v16.4s, v16.4s, v22.4s\n"
- "smlal2 v17.4s, v28.8h, v8.8h\n"
- "smlal2 v25.4s, v28.8h, v7.8h\n"
- "sqrdmulh v24.4s, v24.4s, v22.4s\n"
- "sqrdmulh v23.4s, v23.4s, v22.4s\n"
- "sqadd v14.4s, v14.4s, v21.4s\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
- "and v19.16b, v16.16b, v10.16b\n"
- "sqrdmulh v9.4s, v9.4s, v18.4s\n"
- "and v3.16b, v24.16b, v10.16b\n"
- "sqrdmulh v17.4s, v17.4s, v18.4s\n"
- "and v21.16b, v23.16b, v10.16b\n"
- "sqrdmulh v25.4s, v25.4s, v18.4s\n"
- "sqadd v11.4s, v11.4s, v4.4s\n"
+ "sqrdmulh v16.4s, v16.4s, v21.4s\n"
+ "smlal2 v10.4s, v28.8h, v8.8h\n"
+ "smlal2 v26.4s, v28.8h, v7.8h\n"
+ "sqrdmulh v18.4s, v18.4s, v21.4s\n"
+ "and v29.16b, v12.16b, v24.16b\n"
+ "sqrdmulh v17.4s, v17.4s, v19.4s\n"
+ "and v22.16b, v14.16b, v24.16b\n"
+ "sqrdmulh v9.4s, v9.4s, v19.4s\n"
+ "and v21.16b, v16.16b, v24.16b\n"
+ "sqrdmulh v10.4s, v10.4s, v19.4s\n"
+ "and v20.16b, v18.16b, v24.16b\n"
+ "sqrdmulh v26.4s, v26.4s, v19.4s\n"
+ "sshr v29.4s, v29.4s, #0x1f\n"
+ "and v19.16b, v17.16b, v23.16b\n"
+ "sshr v22.4s, v22.4s, #0x1f\n"
+ "and v30.16b, v9.16b, v23.16b\n"
+ "sshr v21.4s, v21.4s, #0x1f\n"
+ "and v3.16b, v10.16b, v23.16b\n"
+ "sshr v20.4s, v20.4s, #0x1f\n"
+ "and v28.16b, v26.16b, v23.16b\n"
+ "sqadd v12.4s, v12.4s, v29.4s\n"
"sshr v19.4s, v19.4s, #0x1f\n"
- "and v27.16b, v9.16b, v26.16b\n"
+ "sqadd v14.4s, v14.4s, v22.4s\n"
+ "sshr v30.4s, v30.4s, #0x1f\n"
+ "sqadd v16.4s, v16.4s, v21.4s\n"
"sshr v3.4s, v3.4s, #0x1f\n"
- "and v5.16b, v17.16b, v26.16b\n"
- "sshr v21.4s, v21.4s, #0x1f\n"
- "and v4.16b, v25.16b, v26.16b\n"
- "sqadd v16.4s, v16.4s, v19.4s\n"
- "sshr v27.4s, v27.4s, #0x1f\n"
- "sqadd v24.4s, v24.4s, v3.4s\n"
- "sshr v5.4s, v5.4s, #0x1f\n"
- "sqadd v23.4s, v23.4s, v21.4s\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
- "srshl v14.4s, v14.4s, v10.4s\n"
- "srshl v16.4s, v16.4s, v10.4s\n"
- "sqadd v9.4s, v9.4s, v27.4s\n"
- "srshl v24.4s, v24.4s, v10.4s\n"
- "sqadd v17.4s, v17.4s, v5.4s\n"
- "srshl v23.4s, v23.4s, v10.4s\n"
- "sqadd v25.4s, v25.4s, v4.4s\n"
- "srshl v11.4s, v11.4s, v26.4s\n"
+ "sqadd v18.4s, v18.4s, v20.4s\n"
+ "sshr v28.4s, v28.4s, #0x1f\n"
+ "srshl v12.4s, v12.4s, v24.4s\n"
+ "sqadd v17.4s, v17.4s, v19.4s\n"
+ "srshl v14.4s, v14.4s, v24.4s\n"
+ "sqadd v9.4s, v9.4s, v30.4s\n"
+ "srshl v16.4s, v16.4s, v24.4s\n"
+ "sqadd v10.4s, v10.4s, v3.4s\n"
+ "srshl v18.4s, v18.4s, v24.4s\n"
+ "sqadd v26.4s, v26.4s, v28.4s\n"
+ "srshl v17.4s, v17.4s, v23.4s\n"
+ "sqxtn v12.4h, v12.4s\n"
+ "srshl v9.4s, v9.4s, v23.4s\n"
"sqxtn v14.4h, v14.4s\n"
- "srshl v9.4s, v9.4s, v26.4s\n"
+ "srshl v10.4s, v10.4s, v23.4s\n"
"sqxtn v16.4h, v16.4s\n"
- "srshl v17.4s, v17.4s, v26.4s\n"
- "sqxtn v24.4h, v24.4s\n"
- "srshl v25.4s, v25.4s, v26.4s\n"
- "sqxtn v23.4h, v23.4s\n"
- "sqxtn2 v14.8h, v11.4s\n"
- "sqxtn2 v16.8h, v9.4s\n"
- "sqxtn2 v24.8h, v17.4s\n"
- "sqxtn2 v23.8h, v25.4s\n"
- "sqadd v14.8h, v14.8h, v20.8h\n"
- "sqadd v16.8h, v16.8h, v20.8h\n"
- "sqadd v24.8h, v24.8h, v20.8h\n"
- "sqadd v23.8h, v23.8h, v20.8h\n"
- "smax v14.8h, v14.8h, v15.8h\n"
- "smax v16.8h, v16.8h, v15.8h\n"
- "smax v24.8h, v24.8h, v15.8h\n"
- "smax v23.8h, v23.8h, v15.8h\n"
- "smin v14.8h, v14.8h, v13.8h\n"
- "smin v16.8h, v16.8h, v13.8h\n"
- "smin v24.8h, v24.8h, v13.8h\n"
- "smin v23.8h, v23.8h, v13.8h\n"
+ "srshl v26.4s, v26.4s, v23.4s\n"
+ "sqxtn v18.4h, v18.4s\n"
+ "sqxtn2 v12.8h, v17.4s\n"
+ "sqxtn2 v14.8h, v9.4s\n"
+ "sqxtn2 v16.8h, v10.4s\n"
+ "sqxtn2 v18.8h, v26.4s\n"
+ "sqadd v12.8h, v12.8h, v13.8h\n"
+ "sqadd v14.8h, v14.8h, v13.8h\n"
+ "sqadd v16.8h, v16.8h, v13.8h\n"
+ "sqadd v18.8h, v18.8h, v13.8h\n"
+ "smax v12.8h, v12.8h, v11.8h\n"
+ "smax v14.8h, v14.8h, v11.8h\n"
+ "smax v16.8h, v16.8h, v11.8h\n"
+ "smax v18.8h, v18.8h, v11.8h\n"
+ "smin v12.8h, v12.8h, v25.8h\n"
+ "smin v14.8h, v14.8h, v25.8h\n"
+ "smin v16.8h, v16.8h, v25.8h\n"
+ "smin v18.8h, v18.8h, v25.8h\n"
+ "uzp1 v12.16b, v12.16b, v12.16b\n"
"uzp1 v14.16b, v14.16b, v14.16b\n"
- "str d14, [x11, x16]\n"
+ "str d12, [x10, x14]\n"
"uzp1 v16.16b, v16.16b, v16.16b\n"
- "uzp1 v24.16b, v24.16b, v24.16b\n"
- "str d16, [x10, x16]\n"
- "uzp1 v23.16b, v23.16b, v23.16b\n"
- "str d24, [x9, x16]\n"
- "str d23, [x28, x16]\n"
- "ldr q14, [x27, #0x0]\n"
- "ldr q11, [x27, #0x10]\n"
- "add x27, x27, #0x20\n"
- "ldr d0, [x14, #0x0]\n"
- "ldr d1, [x14, #0x8]\n"
- "add x16, x16, #0x8\n"
- "str x27, [%x[params], %[offsetof_Params_bias]]\n"
- "ldr d2, [x14, #0x10]\n"
- "ldr d3, [x14, #0x18]\n"
- "mov v16.16b, v14.16b\n"
- "mov v9.16b, v11.16b\n"
- "ldr d4, [x14, #0x20]\n"
- "ldr d5, [x14, #0x28]\n"
- "mov v24.16b, v14.16b\n"
- "mov v17.16b, v11.16b\n"
- "ldr d6, [x14, #0x30]\n"
- "ldr d7, [x14, #0x38]\n"
- "mov v23.16b, v14.16b\n"
- "mov v25.16b, v11.16b\n"
- "ldr d8, [x14, #0x40]\n"
- "ldp x24, x23, [x15, #0x0]\n"
- "usubl v0.8h, v0.8b, v12.8b\n"
- "usubl v1.8h, v1.8b, v12.8b\n"
- "ldp x22, x21, [x15, #0x10]\n"
- "ldr d31, [x24, x17]\n"
- "usubl v2.8h, v2.8b, v12.8b\n"
- "usubl v3.8h, v3.8b, v12.8b\n"
- "ldr d30, [x23, x17]\n"
- "ldr d29, [x22, x17]\n"
- "usubl v4.8h, v4.8b, v12.8b\n"
- "usubl v5.8h, v5.8b, v12.8b\n"
- "ldr d28, [x21, x17]\n"
- "ldr x20, [x15, #0x20]\n"
- "usubl v6.8h, v6.8b, v12.8b\n"
- "usubl v7.8h, v7.8b, v12.8b\n"
- "ldr d27, [x20, x17]\n"
- "usubl v8.8h, v8.8b, v12.8b\n"
+ "uzp1 v18.16b, v18.16b, v18.16b\n"
+ "str d14, [x9, x14]\n"
+ "str d16, [x28, x14]\n"
+ "str d18, [x27, x14]\n"
+ "ldr x19, [%x[params], %[offsetof_Params_bias]]\n"
+ "ldr q12, [x19, #0x0]\n"
+ "add x14, x14, #0x8\n"
+ "ldr q17, [x19, #0x10]\n"
+ "add x19, x19, #0x20\n"
+ "str x19, [%x[params], %[offsetof_Params_bias]]\n"
+ "ldr d0, [x17, #0x0]\n"
+ "ldr d1, [x17, #0x8]\n"
+ "ldr d2, [x17, #0x10]\n"
+ "mov v14.16b, v12.16b\n"
+ "mov v9.16b, v17.16b\n"
+ "ldr d3, [x17, #0x18]\n"
+ "ldr d4, [x17, #0x20]\n"
+ "mov v16.16b, v12.16b\n"
+ "mov v10.16b, v17.16b\n"
+ "ldr d5, [x17, #0x28]\n"
+ "ldr d6, [x17, #0x30]\n"
+ "mov v18.16b, v12.16b\n"
+ "mov v26.16b, v17.16b\n"
+ "ldr d7, [x17, #0x38]\n"
+ "ldr d8, [x17, #0x40]\n"
+ "usubl v0.8h, v0.8b, v15.8b\n"
+ "usubl v1.8h, v1.8b, v15.8b\n"
+ "ldp x23, x22, [x13, #0x0]\n"
+ "ldp x21, x20, [x13, #0x10]\n"
+ "usubl v2.8h, v2.8b, v15.8b\n"
+ "usubl v3.8h, v3.8b, v15.8b\n"
+ "ldr x19, [x13, #0x20]\n"
+ "ldr d31, [x23, x15]\n"
+ "usubl v4.8h, v4.8b, v15.8b\n"
+ "usubl v5.8h, v5.8b, v15.8b\n"
+ "ldr d30, [x22, x15]\n"
+ "ldr d29, [x21, x15]\n"
+ "usubl v6.8h, v6.8b, v15.8b\n"
+ "usubl v7.8h, v7.8b, v15.8b\n"
+ "ldr d28, [x20, x15]\n"
+ "ldr d27, [x19, x15]\n"
+ "usubl v8.8h, v8.8b, v15.8b\n"
"ushll v31.8h, v31.8b, #0x0\n"
"ushll v30.8h, v30.8b, #0x0\n"
"ushll v29.8h, v29.8b, #0x0\n"
@@ -384,777 +384,777 @@ void a64_u8qa_nhwc_3x3_s1_output2x2_mla_depthfirst_impl(
"ushll v27.8h, v27.8b, #0x0\n"
"bgt 1b\n"
"2:" // Tail
- "ldr q22, [x13, #0x0]\n"
- "ldr q10, [x12, #0x0]\n"
- "smlal v14.4s, v31.4h, v4.4h\n"
- "smlal2 v11.4s, v31.8h, v4.8h\n"
- "ldr q18, [x13, #0x10]\n"
- "ldr q26, [x12, #0x10]\n"
- "smlal v16.4s, v31.4h, v3.4h\n"
+ "smlal v12.4s, v31.4h, v4.4h\n"
+ "smlal2 v17.4s, v31.8h, v4.8h\n"
+ "ldr x21, [x13, #0x28]\n"
+ "ldr x26, [x13, #0x38]\n"
+ "smlal v14.4s, v31.4h, v3.4h\n"
"smlal2 v9.4s, v31.8h, v3.8h\n"
- "smlal v14.4s, v30.4h, v0.4h\n"
- "smlal2 v11.4s, v30.8h, v0.8h\n"
- "ldr x22, [x15, #0x28]\n"
- "ldr x21, [x15, #0x38]\n"
- "smlal v16.4s, v29.4h, v2.4h\n"
+ "ldr x20, [x13, #0x30]\n"
+ "ldr x25, [x13, #0x40]\n"
+ "smlal v12.4s, v30.4h, v0.4h\n"
+ "smlal2 v17.4s, v30.8h, v0.8h\n"
+ "ldr x19, [x13, #0x48]\n"
+ "ldr d30, [x19, x15]\n"
+ "smlal v14.4s, v29.4h, v2.4h\n"
"smlal2 v9.4s, v29.8h, v2.8h\n"
- "ldr x20, [x15, #0x30]\n"
- "ldr d29, [x20, x17]\n"
- "smlal v24.4s, v31.4h, v1.4h\n"
- "smlal2 v17.4s, v31.8h, v1.8h\n"
- "ldr x26, [x15, #0x40]\n"
- "ldr x20, [x15, #0x48]\n"
- "ldr d30, [x20, x17]\n"
- "smlal v23.4s, v31.4h, v0.4h\n"
- "smlal2 v25.4s, v31.8h, v0.8h\n"
- "ldr d31, [x22, x17]\n"
- "smlal v14.4s, v28.4h, v5.4h\n"
- "smlal2 v11.4s, v28.8h, v5.8h\n"
- "ushll v31.8h, v31.8b, #0x0\n"
- "ldr x25, [x15, #0x50]\n"
- "smlal v16.4s, v28.4h, v4.4h\n"
- "smlal2 v9.4s, v28.8h, v4.8h\n"
+ "ldr d29, [x20, x15]\n"
"ushll v29.8h, v29.8b, #0x0\n"
- "ldr x24, [x15, #0x58]\n"
- "smlal v24.4s, v28.4h, v2.4h\n"
- "smlal2 v17.4s, v28.8h, v2.8h\n"
+ "smlal v16.4s, v31.4h, v1.4h\n"
+ "smlal2 v10.4s, v31.8h, v1.8h\n"
+ "ldr x24, [x13, #0x50]\n"
+ "ldr x23, [x13, #0x58]\n"
+ "smlal v18.4s, v31.4h, v0.4h\n"
+ "smlal2 v26.4s, v31.8h, v0.8h\n"
+ "ldr d31, [x21, x15]\n"
+ "ushll v31.8h, v31.8b, #0x0\n"
+ "smlal v12.4s, v28.4h, v5.4h\n"
+ "smlal2 v17.4s, v28.8h, v5.8h\n"
"ushll v30.8h, v30.8b, #0x0\n"
- "ldr x23, [x15, #0x60]\n"
- "smlal v23.4s, v28.4h, v1.4h\n"
- "smlal2 v25.4s, v28.8h, v1.8h\n"
- "ldr d28, [x21, x17]\n"
+ "ldr x22, [x13, #0x60]\n"
+ "smlal v14.4s, v28.4h, v4.4h\n"
+ "smlal2 v9.4s, v28.8h, v4.8h\n"
+ "ldr x21, [x13, #0x68]\n"
+ "ldr x20, [x13, #0x70]\n"
+ "smlal v16.4s, v28.4h, v2.4h\n"
+ "smlal2 v10.4s, v28.8h, v2.8h\n"
+ "ldr x19, [x13, #0x78]\n"
+ "ldr q21, [x12, #0x0]\n"
+ "smlal v18.4s, v28.4h, v1.4h\n"
+ "smlal2 v26.4s, v28.8h, v1.8h\n"
+ "ldr d28, [x26, x15]\n"
"ushll v28.8h, v28.8b, #0x0\n"
- "smlal v14.4s, v27.4h, v7.4h\n"
- "smlal2 v11.4s, v27.8h, v7.8h\n"
- "ldr x22, [x15, #0x68]\n"
- "ldr x21, [x15, #0x70]\n"
- "smlal v16.4s, v27.4h, v6.4h\n"
+ "smlal v12.4s, v27.4h, v7.4h\n"
+ "smlal2 v17.4s, v27.8h, v7.8h\n"
+ "ldr q24, [x11, #0x0]\n"
+ "ldr q19, [x12, #0x10]\n"
+ "smlal v14.4s, v27.4h, v6.4h\n"
"smlal2 v9.4s, v27.8h, v6.8h\n"
- "ldr x20, [x15, #0x78]\n"
- "tst x7, #0x7\n"
- "smlal v24.4s, v31.4h, v6.4h\n"
- "smlal2 v17.4s, v31.8h, v6.8h\n"
- "ldr d31, [x26, x17]\n"
+ "ldr q23, [x11, #0x10]\n"
+ "tst x8, #0x7\n"
+ "smlal v16.4s, v31.4h, v6.4h\n"
+ "smlal2 v10.4s, v31.8h, v6.8h\n"
+ "ldr d31, [x25, x15]\n"
"ushll v31.8h, v31.8b, #0x0\n"
- "smlal v23.4s, v27.4h, v3.4h\n"
- "smlal2 v25.4s, v27.8h, v3.8h\n"
- "add x13, x13, #0x20\n"
+ "smlal v18.4s, v27.4h, v3.4h\n"
+ "smlal2 v26.4s, v27.8h, v3.8h\n"
"add x12, x12, #0x20\n"
- "smlal v14.4s, v28.4h, v1.4h\n"
- "smlal2 v11.4s, v28.8h, v1.8h\n"
- "smlal v16.4s, v28.4h, v0.4h\n"
+ "add x11, x11, #0x20\n"
+ "smlal v12.4s, v28.4h, v1.4h\n"
+ "smlal2 v17.4s, v28.8h, v1.8h\n"
+ "smlal v14.4s, v28.4h, v0.4h\n"
"smlal2 v9.4s, v28.8h, v0.8h\n"
- "ldr d28, [x24, x17]\n"
+ "ldr d28, [x23, x15]\n"
"ushll v28.8h, v28.8b, #0x0\n"
- "smlal v24.4s, v27.4h, v4.4h\n"
- "smlal v23.4s, v29.4h, v8.4h\n"
- "smlal2 v17.4s, v27.8h, v4.8h\n"
- "smlal2 v25.4s, v29.8h, v8.8h\n"
- "ldr d29, [x25, x17]\n"
+ "smlal v16.4s, v27.4h, v4.4h\n"
+ "smlal v18.4s, v29.4h, v8.4h\n"
+ "smlal2 v10.4s, v27.8h, v4.8h\n"
+ "smlal2 v26.4s, v29.8h, v8.8h\n"
+ "ldr d29, [x24, x15]\n"
"ushll v29.8h, v29.8b, #0x0\n"
- "smlal v14.4s, v31.4h, v2.4h\n"
- "smlal2 v11.4s, v31.8h, v2.8h\n"
- "smlal v16.4s, v31.4h, v1.4h\n"
+ "smlal v12.4s, v31.4h, v2.4h\n"
+ "smlal2 v17.4s, v31.8h, v2.8h\n"
+ "smlal v14.4s, v31.4h, v1.4h\n"
"smlal2 v9.4s, v31.8h, v1.8h\n"
- "ldr d31, [x23, x17]\n"
+ "ldr d31, [x22, x15]\n"
"ushll v31.8h, v31.8b, #0x0\n"
- "smlal v24.4s, v30.4h, v5.4h\n"
- "smlal v23.4s, v30.4h, v4.4h\n"
- "smlal v14.4s, v30.4h, v8.4h\n"
- "smlal2 v11.4s, v30.8h, v8.8h\n"
- "smlal v16.4s, v30.4h, v7.4h\n"
+ "smlal v16.4s, v30.4h, v5.4h\n"
+ "smlal v18.4s, v30.4h, v4.4h\n"
+ "smlal v12.4s, v30.4h, v8.4h\n"
+ "smlal2 v17.4s, v30.8h, v8.8h\n"
+ "smlal v14.4s, v30.4h, v7.4h\n"
"smlal2 v9.4s, v30.8h, v7.8h\n"
- "smlal2 v17.4s, v30.8h, v5.8h\n"
- "smlal2 v25.4s, v30.8h, v4.8h\n"
- "ldr d30, [x22, x17]\n"
+ "smlal2 v10.4s, v30.8h, v5.8h\n"
+ "smlal2 v26.4s, v30.8h, v4.8h\n"
+ "ldr d30, [x21, x15]\n"
"ushll v30.8h, v30.8b, #0x0\n"
- "smlal v24.4s, v29.4h, v0.4h\n"
- "smlal v23.4s, v28.4h, v2.4h\n"
- "smlal v14.4s, v29.4h, v3.4h\n"
- "smlal2 v11.4s, v29.8h, v3.8h\n"
- "smlal2 v17.4s, v29.8h, v0.8h\n"
- "ldr d29, [x21, x17]\n"
- "smlal2 v25.4s, v28.8h, v2.8h\n"
+ "smlal v16.4s, v29.4h, v0.4h\n"
+ "smlal v18.4s, v28.4h, v2.4h\n"
+ "smlal v12.4s, v29.4h, v3.4h\n"
+ "smlal2 v17.4s, v29.8h, v3.8h\n"
+ "smlal2 v10.4s, v29.8h, v0.8h\n"
+ "ldr d29, [x20, x15]\n"
+ "smlal2 v26.4s, v28.8h, v2.8h\n"
"ushll v29.8h, v29.8b, #0x0\n"
- "smlal v24.4s, v31.4h, v3.4h\n"
- "smlal v23.4s, v30.4h, v5.4h\n"
- "smlal v16.4s, v28.4h, v5.4h\n"
+ "smlal v16.4s, v31.4h, v3.4h\n"
+ "smlal v18.4s, v30.4h, v5.4h\n"
+ "smlal v14.4s, v28.4h, v5.4h\n"
"smlal2 v9.4s, v28.8h, v5.8h\n"
- "ldr d28, [x20, x17]\n"
+ "ldr d28, [x19, x15]\n"
"ushll v28.8h, v28.8b, #0x0\n"
- "smlal v14.4s, v31.4h, v6.4h\n"
- "smlal2 v17.4s, v31.8h, v3.8h\n"
- "sqrdmulh v14.4s, v14.4s, v22.4s\n"
- "add x17, x17, #0x8\n"
- "smlal2 v25.4s, v30.8h, v5.8h\n"
- "smlal v24.4s, v29.4h, v7.4h\n"
- "and v21.16b, v14.16b, v10.16b\n"
- "smlal v23.4s, v29.4h, v6.4h\n"
- "smlal2 v11.4s, v31.8h, v6.8h\n"
- "sqrdmulh v11.4s, v11.4s, v18.4s\n"
- "smlal2 v17.4s, v29.8h, v7.8h\n"
- "smlal2 v25.4s, v29.8h, v6.8h\n"
- "sshr v21.4s, v21.4s, #0x1f\n"
- "smlal v16.4s, v30.4h, v8.4h\n"
- "smlal v24.4s, v28.4h, v8.4h\n"
- "and v4.16b, v11.16b, v26.16b\n"
- "smlal v23.4s, v28.4h, v7.4h\n"
+ "smlal2 v10.4s, v31.8h, v3.8h\n"
+ "smlal2 v26.4s, v30.8h, v5.8h\n"
+ "add x15, x15, #0x8\n"
+ "smlal v16.4s, v29.4h, v7.4h\n"
+ "smlal v18.4s, v29.4h, v6.4h\n"
+ "smlal2 v10.4s, v29.8h, v7.8h\n"
+ "smlal2 v26.4s, v29.8h, v6.8h\n"
+ "smlal v12.4s, v31.4h, v6.4h\n"
+ "smlal v14.4s, v30.4h, v8.4h\n"
+ "sqrdmulh v12.4s, v12.4s, v21.4s\n"
+ "smlal v16.4s, v28.4h, v8.4h\n"
+ "smlal v18.4s, v28.4h, v7.4h\n"
+ "sqrdmulh v14.4s, v14.4s, v21.4s\n"
+ "smlal2 v17.4s, v31.8h, v6.8h\n"
"smlal2 v9.4s, v30.8h, v8.8h\n"
- "sqrdmulh v16.4s, v16.4s, v22.4s\n"
- "smlal2 v17.4s, v28.8h, v8.8h\n"
- "smlal2 v25.4s, v28.8h, v7.8h\n"
- "sqrdmulh v24.4s, v24.4s, v22.4s\n"
- "sqrdmulh v23.4s, v23.4s, v22.4s\n"
- "sqadd v14.4s, v14.4s, v21.4s\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
- "and v19.16b, v16.16b, v10.16b\n"
- "sqrdmulh v9.4s, v9.4s, v18.4s\n"
- "and v3.16b, v24.16b, v10.16b\n"
- "sqrdmulh v17.4s, v17.4s, v18.4s\n"
- "and v21.16b, v23.16b, v10.16b\n"
- "sqrdmulh v25.4s, v25.4s, v18.4s\n"
- "sqadd v11.4s, v11.4s, v4.4s\n"
+ "sqrdmulh v16.4s, v16.4s, v21.4s\n"
+ "smlal2 v10.4s, v28.8h, v8.8h\n"
+ "smlal2 v26.4s, v28.8h, v7.8h\n"
+ "sqrdmulh v18.4s, v18.4s, v21.4s\n"
+ "and v29.16b, v12.16b, v24.16b\n"
+ "sqrdmulh v17.4s, v17.4s, v19.4s\n"
+ "and v22.16b, v14.16b, v24.16b\n"
+ "sqrdmulh v9.4s, v9.4s, v19.4s\n"
+ "and v21.16b, v16.16b, v24.16b\n"
+ "sqrdmulh v10.4s, v10.4s, v19.4s\n"
+ "and v20.16b, v18.16b, v24.16b\n"
+ "sqrdmulh v26.4s, v26.4s, v19.4s\n"
+ "sshr v29.4s, v29.4s, #0x1f\n"
+ "and v19.16b, v17.16b, v23.16b\n"
+ "sshr v22.4s, v22.4s, #0x1f\n"
+ "and v30.16b, v9.16b, v23.16b\n"
+ "sshr v21.4s, v21.4s, #0x1f\n"
+ "and v3.16b, v10.16b, v23.16b\n"
+ "sshr v20.4s, v20.4s, #0x1f\n"
+ "and v28.16b, v26.16b, v23.16b\n"
+ "sqadd v12.4s, v12.4s, v29.4s\n"
"sshr v19.4s, v19.4s, #0x1f\n"
- "and v27.16b, v9.16b, v26.16b\n"
+ "sqadd v14.4s, v14.4s, v22.4s\n"
+ "sshr v30.4s, v30.4s, #0x1f\n"
+ "sqadd v16.4s, v16.4s, v21.4s\n"
"sshr v3.4s, v3.4s, #0x1f\n"
- "and v5.16b, v17.16b, v26.16b\n"
- "sshr v21.4s, v21.4s, #0x1f\n"
- "and v4.16b, v25.16b, v26.16b\n"
- "sqadd v16.4s, v16.4s, v19.4s\n"
- "sshr v27.4s, v27.4s, #0x1f\n"
- "sqadd v24.4s, v24.4s, v3.4s\n"
- "sshr v5.4s, v5.4s, #0x1f\n"
- "sqadd v23.4s, v23.4s, v21.4s\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
- "srshl v14.4s, v14.4s, v10.4s\n"
- "srshl v16.4s, v16.4s, v10.4s\n"
- "sqadd v9.4s, v9.4s, v27.4s\n"
- "srshl v24.4s, v24.4s, v10.4s\n"
- "sqadd v17.4s, v17.4s, v5.4s\n"
- "srshl v23.4s, v23.4s, v10.4s\n"
- "sqadd v25.4s, v25.4s, v4.4s\n"
- "srshl v11.4s, v11.4s, v26.4s\n"
+ "sqadd v18.4s, v18.4s, v20.4s\n"
+ "sshr v28.4s, v28.4s, #0x1f\n"
+ "srshl v12.4s, v12.4s, v24.4s\n"
+ "sqadd v17.4s, v17.4s, v19.4s\n"
+ "srshl v14.4s, v14.4s, v24.4s\n"
+ "sqadd v9.4s, v9.4s, v30.4s\n"
+ "srshl v16.4s, v16.4s, v24.4s\n"
+ "sqadd v10.4s, v10.4s, v3.4s\n"
+ "srshl v18.4s, v18.4s, v24.4s\n"
+ "sqadd v26.4s, v26.4s, v28.4s\n"
+ "srshl v17.4s, v17.4s, v23.4s\n"
+ "sqxtn v12.4h, v12.4s\n"
+ "srshl v9.4s, v9.4s, v23.4s\n"
"sqxtn v14.4h, v14.4s\n"
- "srshl v9.4s, v9.4s, v26.4s\n"
+ "srshl v10.4s, v10.4s, v23.4s\n"
"sqxtn v16.4h, v16.4s\n"
- "srshl v17.4s, v17.4s, v26.4s\n"
- "sqxtn v24.4h, v24.4s\n"
- "srshl v25.4s, v25.4s, v26.4s\n"
- "sqxtn v23.4h, v23.4s\n"
- "sqxtn2 v14.8h, v11.4s\n"
- "sqxtn2 v16.8h, v9.4s\n"
- "sqxtn2 v24.8h, v17.4s\n"
- "sqxtn2 v23.8h, v25.4s\n"
- "sqadd v14.8h, v14.8h, v20.8h\n"
- "sqadd v16.8h, v16.8h, v20.8h\n"
- "sqadd v24.8h, v24.8h, v20.8h\n"
- "sqadd v23.8h, v23.8h, v20.8h\n"
- "smax v14.8h, v14.8h, v15.8h\n"
- "smax v16.8h, v16.8h, v15.8h\n"
- "smax v24.8h, v24.8h, v15.8h\n"
- "smax v23.8h, v23.8h, v15.8h\n"
- "smin v14.8h, v14.8h, v13.8h\n"
- "smin v16.8h, v16.8h, v13.8h\n"
- "smin v24.8h, v24.8h, v13.8h\n"
- "smin v23.8h, v23.8h, v13.8h\n"
+ "srshl v26.4s, v26.4s, v23.4s\n"
+ "sqxtn v18.4h, v18.4s\n"
+ "sqxtn2 v12.8h, v17.4s\n"
+ "sqxtn2 v14.8h, v9.4s\n"
+ "sqxtn2 v16.8h, v10.4s\n"
+ "sqxtn2 v18.8h, v26.4s\n"
+ "sqadd v12.8h, v12.8h, v13.8h\n"
+ "sqadd v14.8h, v14.8h, v13.8h\n"
+ "sqadd v16.8h, v16.8h, v13.8h\n"
+ "sqadd v18.8h, v18.8h, v13.8h\n"
+ "smax v12.8h, v12.8h, v11.8h\n"
+ "smax v14.8h, v14.8h, v11.8h\n"
+ "smax v16.8h, v16.8h, v11.8h\n"
+ "smax v18.8h, v18.8h, v11.8h\n"
+ "smin v12.8h, v12.8h, v25.8h\n"
+ "smin v14.8h, v14.8h, v25.8h\n"
+ "smin v16.8h, v16.8h, v25.8h\n"
+ "smin v18.8h, v18.8h, v25.8h\n"
+ "uzp1 v12.16b, v12.16b, v12.16b\n"
"uzp1 v14.16b, v14.16b, v14.16b\n"
- "str d14, [x11, x16]\n"
+ "str d12, [x10, x14]\n"
"uzp1 v16.16b, v16.16b, v16.16b\n"
- "uzp1 v24.16b, v24.16b, v24.16b\n"
- "str d16, [x10, x16]\n"
- "uzp1 v23.16b, v23.16b, v23.16b\n"
- "str d24, [x9, x16]\n"
- "str d23, [x28, x16]\n"
- "add x16, x16, #0x8\n"
+ "uzp1 v18.16b, v18.16b, v18.16b\n"
+ "str d14, [x9, x14]\n"
+ "str d16, [x28, x14]\n"
+ "str d18, [x27, x14]\n"
+ "add x14, x14, #0x8\n"
"beq 64f\n"
- "add x14, x14, #0x48\n"
+ "add x17, x17, #0x48\n"
"3:" // Oddments
- "ldr x27, [%x[params], %[offsetof_Params_bias]]\n"
- "tbz x7, #2, 5f\n"
- "ld1 { v14.4s }, [x27], #0x10\n"
- "tbz x7, #1, 4f\n"
- "ld1 { v11.d }[0], [x27], #0x8\n"
- "tbz x7, #0, 7f\n"
- "ld1 { v11.s }[2], [x27]\n"
+ "ldr x19, [%x[params], %[offsetof_Params_bias]]\n"
+ "tbz x8, #2, 5f\n"
+ "ld1 { v12.4s }, [x19], #0x10\n"
+ "tbz x8, #1, 4f\n"
+ "ld1 { v17.d }[0], [x19], #0x8\n"
+ "tbz x8, #0, 7f\n"
+ "ld1 { v17.s }[2], [x19]\n"
"b 7f\n"
"4:" // Oddments: Load bias: Bit 2: Bit 1: Unset
- "tbz x7, #0, 7f\n"
- "ld1 { v11.s }[0], [x27]\n"
+ "tbz x8, #0, 7f\n"
+ "ld1 { v17.s }[0], [x19]\n"
"b 7f\n"
"5:" // Oddments: Load bias: Bit 2: Unset
- "tbz x7, #1, 6f\n"
- "ld1 { v14.d }[0], [x27], #0x8\n"
- "tbz x7, #0, 7f\n"
- "ld1 { v14.s }[2], [x27]\n"
+ "tbz x8, #1, 6f\n"
+ "ld1 { v12.d }[0], [x19], #0x8\n"
+ "tbz x8, #0, 7f\n"
+ "ld1 { v12.s }[2], [x19]\n"
"b 7f\n"
"6:" // Oddments: Load bias: Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 7f\n"
- "ld1 { v14.s }[0], [x27]\n"
+ "tbz x8, #0, 7f\n"
+ "ld1 { v12.s }[0], [x19]\n"
"7:" // Oddments: Load bias: Bit 2: End
- "ldr d0, [x14, #0x0]\n"
- "ldr d1, [x14, #0x8]\n"
- "mov v16.16b, v14.16b\n"
- "mov v9.16b, v11.16b\n"
- "ldr d2, [x14, #0x10]\n"
- "ldr d3, [x14, #0x18]\n"
- "mov v24.16b, v14.16b\n"
- "mov v17.16b, v11.16b\n"
- "ldr d4, [x14, #0x20]\n"
- "ldr d5, [x14, #0x28]\n"
- "mov v23.16b, v14.16b\n"
- "mov v25.16b, v11.16b\n"
- "ldr d6, [x14, #0x30]\n"
- "ldr d7, [x14, #0x38]\n"
- "usubl v0.8h, v0.8b, v12.8b\n"
- "usubl v1.8h, v1.8b, v12.8b\n"
- "ldr d8, [x14, #0x40]\n"
- "ldp x24, x23, [x15, #0x0]\n"
- "usubl v2.8h, v2.8b, v12.8b\n"
- "usubl v3.8h, v3.8b, v12.8b\n"
- "ldp x22, x21, [x15, #0x10]\n"
- "ldr x20, [x15, #0x20]\n"
- "usubl v4.8h, v4.8b, v12.8b\n"
- "usubl v5.8h, v5.8b, v12.8b\n"
- "usubl v6.8h, v6.8b, v12.8b\n"
- "usubl v7.8h, v7.8b, v12.8b\n"
- "usubl v8.8h, v8.8b, v12.8b\n"
- "add x24, x24, x17\n"
- "add x23, x23, x17\n"
- "add x22, x22, x17\n"
- "add x21, x21, x17\n"
- "add x20, x20, x17\n"
- "tbz x7, #2, 9f\n"
- "ld1 { v31.s }[0], [x24], #0x4\n"
- "ld1 { v30.s }[0], [x23], #0x4\n"
- "ld1 { v29.s }[0], [x22], #0x4\n"
- "ld1 { v28.s }[0], [x21], #0x4\n"
- "ld1 { v27.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 8f\n"
- "ld1 { v31.h }[2], [x24], #0x2\n"
- "ld1 { v30.h }[2], [x23], #0x2\n"
- "ld1 { v29.h }[2], [x22], #0x2\n"
- "ld1 { v28.h }[2], [x21], #0x2\n"
- "ld1 { v27.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 11f\n"
- "ld1 { v31.b }[6], [x24]\n"
- "ld1 { v30.b }[6], [x23]\n"
- "ld1 { v29.b }[6], [x22]\n"
- "ld1 { v28.b }[6], [x21]\n"
- "ld1 { v27.b }[6], [x20]\n"
+ "ldr d0, [x17, #0x0]\n"
+ "ldr d1, [x17, #0x8]\n"
+ "mov v14.16b, v12.16b\n"
+ "mov v9.16b, v17.16b\n"
+ "ldr d2, [x17, #0x10]\n"
+ "ldr d3, [x17, #0x18]\n"
+ "mov v16.16b, v12.16b\n"
+ "mov v10.16b, v17.16b\n"
+ "ldr d4, [x17, #0x20]\n"
+ "ldr d5, [x17, #0x28]\n"
+ "mov v18.16b, v12.16b\n"
+ "mov v26.16b, v17.16b\n"
+ "ldr d6, [x17, #0x30]\n"
+ "ldr d7, [x17, #0x38]\n"
+ "usubl v0.8h, v0.8b, v15.8b\n"
+ "usubl v1.8h, v1.8b, v15.8b\n"
+ "ldr d8, [x17, #0x40]\n"
+ "ldp x23, x22, [x13, #0x0]\n"
+ "usubl v2.8h, v2.8b, v15.8b\n"
+ "usubl v3.8h, v3.8b, v15.8b\n"
+ "ldp x21, x20, [x13, #0x10]\n"
+ "ldr x19, [x13, #0x20]\n"
+ "usubl v4.8h, v4.8b, v15.8b\n"
+ "usubl v5.8h, v5.8b, v15.8b\n"
+ "usubl v6.8h, v6.8b, v15.8b\n"
+ "usubl v7.8h, v7.8b, v15.8b\n"
+ "usubl v8.8h, v8.8b, v15.8b\n"
+ "add x23, x23, x15\n"
+ "add x22, x22, x15\n"
+ "add x21, x21, x15\n"
+ "add x20, x20, x15\n"
+ "add x19, x19, x15\n"
+ "tbz x8, #2, 9f\n"
+ "ld1 { v31.s }[0], [x23], #0x4\n"
+ "ld1 { v30.s }[0], [x22], #0x4\n"
+ "ld1 { v29.s }[0], [x21], #0x4\n"
+ "ld1 { v28.s }[0], [x20], #0x4\n"
+ "ld1 { v27.s }[0], [x19], #0x4\n"
+ "tbz x8, #1, 8f\n"
+ "ld1 { v31.h }[2], [x23], #0x2\n"
+ "ld1 { v30.h }[2], [x22], #0x2\n"
+ "ld1 { v29.h }[2], [x21], #0x2\n"
+ "ld1 { v28.h }[2], [x20], #0x2\n"
+ "ld1 { v27.h }[2], [x19], #0x2\n"
+ "tbz x8, #0, 11f\n"
+ "ld1 { v31.b }[6], [x23]\n"
+ "ld1 { v30.b }[6], [x22]\n"
+ "ld1 { v29.b }[6], [x21]\n"
+ "ld1 { v28.b }[6], [x20]\n"
+ "ld1 { v27.b }[6], [x19]\n"
"b 11f\n"
"8:" // Oddments: Initial loads: Bit 2: Bit 1: Unset
- "tbz x7, #0, 11f\n"
- "ld1 { v31.b }[4], [x24]\n"
- "ld1 { v30.b }[4], [x23]\n"
- "ld1 { v29.b }[4], [x22]\n"
- "ld1 { v28.b }[4], [x21]\n"
- "ld1 { v27.b }[4], [x20]\n"
+ "tbz x8, #0, 11f\n"
+ "ld1 { v31.b }[4], [x23]\n"
+ "ld1 { v30.b }[4], [x22]\n"
+ "ld1 { v29.b }[4], [x21]\n"
+ "ld1 { v28.b }[4], [x20]\n"
+ "ld1 { v27.b }[4], [x19]\n"
"b 11f\n"
"9:" // Oddments: Initial loads: Bit 2: Unset
- "tbz x7, #1, 10f\n"
- "ld1 { v31.h }[0], [x24], #0x2\n"
- "ld1 { v30.h }[0], [x23], #0x2\n"
- "ld1 { v29.h }[0], [x22], #0x2\n"
- "ld1 { v28.h }[0], [x21], #0x2\n"
- "ld1 { v27.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 11f\n"
- "ld1 { v31.b }[2], [x24]\n"
- "ld1 { v30.b }[2], [x23]\n"
- "ld1 { v29.b }[2], [x22]\n"
- "ld1 { v28.b }[2], [x21]\n"
- "ld1 { v27.b }[2], [x20]\n"
+ "tbz x8, #1, 10f\n"
+ "ld1 { v31.h }[0], [x23], #0x2\n"
+ "ld1 { v30.h }[0], [x22], #0x2\n"
+ "ld1 { v29.h }[0], [x21], #0x2\n"
+ "ld1 { v28.h }[0], [x20], #0x2\n"
+ "ld1 { v27.h }[0], [x19], #0x2\n"
+ "tbz x8, #0, 11f\n"
+ "ld1 { v31.b }[2], [x23]\n"
+ "ld1 { v30.b }[2], [x22]\n"
+ "ld1 { v29.b }[2], [x21]\n"
+ "ld1 { v28.b }[2], [x20]\n"
+ "ld1 { v27.b }[2], [x19]\n"
"b 11f\n"
"10:" // Oddments: Initial loads: Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 11f\n"
- "ld1 { v31.b }[0], [x24]\n"
- "ld1 { v30.b }[0], [x23]\n"
- "ld1 { v29.b }[0], [x22]\n"
- "ld1 { v28.b }[0], [x21]\n"
- "ld1 { v27.b }[0], [x20]\n"
+ "tbz x8, #0, 11f\n"
+ "ld1 { v31.b }[0], [x23]\n"
+ "ld1 { v30.b }[0], [x22]\n"
+ "ld1 { v29.b }[0], [x21]\n"
+ "ld1 { v28.b }[0], [x20]\n"
+ "ld1 { v27.b }[0], [x19]\n"
"11:" // Oddments: Initial loads: Bit 2: End
"ushll v31.8h, v31.8b, #0x0\n"
- "smlal v14.4s, v31.4h, v4.4h\n"
- "smlal2 v11.4s, v31.8h, v4.8h\n"
- "ldr x22, [x15, #0x28]\n"
- "smlal v16.4s, v31.4h, v3.4h\n"
+ "smlal v12.4s, v31.4h, v4.4h\n"
+ "smlal2 v17.4s, v31.8h, v4.8h\n"
+ "ldr x21, [x13, #0x28]\n"
+ "smlal v14.4s, v31.4h, v3.4h\n"
"smlal2 v9.4s, v31.8h, v3.8h\n"
"ushll v30.8h, v30.8b, #0x0\n"
- "add x22, x22, x17\n"
+ "add x21, x21, x15\n"
"ushll v29.8h, v29.8b, #0x0\n"
- "smlal v24.4s, v31.4h, v1.4h\n"
- "smlal2 v17.4s, v31.8h, v1.8h\n"
- "smlal v23.4s, v31.4h, v0.4h\n"
- "smlal2 v25.4s, v31.8h, v0.8h\n"
+ "smlal v16.4s, v31.4h, v1.4h\n"
+ "smlal2 v10.4s, v31.8h, v1.8h\n"
+ "smlal v18.4s, v31.4h, v0.4h\n"
+ "smlal2 v26.4s, v31.8h, v0.8h\n"
"ushll v28.8h, v28.8b, #0x0\n"
- "smlal v14.4s, v30.4h, v0.4h\n"
- "smlal2 v11.4s, v30.8h, v0.8h\n"
+ "smlal v12.4s, v30.4h, v0.4h\n"
+ "smlal2 v17.4s, v30.8h, v0.8h\n"
"ushll v27.8h, v27.8b, #0x0\n"
- "smlal v16.4s, v29.4h, v2.4h\n"
+ "smlal v14.4s, v29.4h, v2.4h\n"
"smlal2 v9.4s, v29.8h, v2.8h\n"
- "smlal v14.4s, v28.4h, v5.4h\n"
- "smlal2 v11.4s, v28.8h, v5.8h\n"
- "smlal v16.4s, v28.4h, v4.4h\n"
+ "smlal v12.4s, v28.4h, v5.4h\n"
+ "smlal2 v17.4s, v28.8h, v5.8h\n"
+ "smlal v14.4s, v28.4h, v4.4h\n"
"smlal2 v9.4s, v28.8h, v4.8h\n"
- "smlal v24.4s, v28.4h, v2.4h\n"
- "smlal2 v17.4s, v28.8h, v2.8h\n"
- "smlal v23.4s, v28.4h, v1.4h\n"
- "smlal2 v25.4s, v28.8h, v1.8h\n"
- "tbz x7, #2, 13f\n"
- "ld1 { v31.s }[0], [x22], #0x4\n"
- "tbz x7, #1, 12f\n"
- "ld1 { v31.h }[2], [x22], #0x2\n"
- "tbz x7, #0, 15f\n"
- "ld1 { v31.b }[6], [x22]\n"
+ "smlal v16.4s, v28.4h, v2.4h\n"
+ "smlal2 v10.4s, v28.8h, v2.8h\n"
+ "smlal v18.4s, v28.4h, v1.4h\n"
+ "smlal2 v26.4s, v28.8h, v1.8h\n"
+ "tbz x8, #2, 13f\n"
+ "ld1 { v31.s }[0], [x21], #0x4\n"
+ "tbz x8, #1, 12f\n"
+ "ld1 { v31.h }[2], [x21], #0x2\n"
+ "tbz x8, #0, 15f\n"
+ "ld1 { v31.b }[6], [x21]\n"
"b 15f\n"
"12:" // Oddments: Load (3, 0): Bit 2: Bit 1: Unset
- "tbz x7, #0, 15f\n"
- "ld1 { v31.b }[4], [x22]\n"
+ "tbz x8, #0, 15f\n"
+ "ld1 { v31.b }[4], [x21]\n"
"b 15f\n"
"13:" // Oddments: Load (3, 0): Bit 2: Unset
- "tbz x7, #1, 14f\n"
- "ld1 { v31.h }[0], [x22], #0x2\n"
- "tbz x7, #0, 15f\n"
- "ld1 { v31.b }[2], [x22]\n"
+ "tbz x8, #1, 14f\n"
+ "ld1 { v31.h }[0], [x21], #0x2\n"
+ "tbz x8, #0, 15f\n"
+ "ld1 { v31.b }[2], [x21]\n"
"b 15f\n"
"14:" // Oddments: Load (3, 0): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 15f\n"
- "ld1 { v31.b }[0], [x22]\n"
+ "tbz x8, #0, 15f\n"
+ "ld1 { v31.b }[0], [x21]\n"
"15:" // Oddments: Load (3, 0): Bit 2: End
"ushll v31.8h, v31.8b, #0x0\n"
- "smlal v24.4s, v31.4h, v6.4h\n"
- "smlal2 v17.4s, v31.8h, v6.8h\n"
- "ldr x20, [x15, #0x30]\n"
- "smlal v14.4s, v27.4h, v7.4h\n"
- "smlal2 v11.4s, v27.8h, v7.8h\n"
- "add x20, x20, x17\n"
- "smlal v16.4s, v27.4h, v6.4h\n"
+ "smlal v16.4s, v31.4h, v6.4h\n"
+ "smlal2 v10.4s, v31.8h, v6.8h\n"
+ "ldr x20, [x13, #0x30]\n"
+ "smlal v12.4s, v27.4h, v7.4h\n"
+ "smlal2 v17.4s, v27.8h, v7.8h\n"
+ "add x20, x20, x15\n"
+ "smlal v14.4s, v27.4h, v6.4h\n"
"smlal2 v9.4s, v27.8h, v6.8h\n"
- "smlal v24.4s, v27.4h, v4.4h\n"
- "smlal2 v17.4s, v27.8h, v4.8h\n"
- "smlal v23.4s, v27.4h, v3.4h\n"
- "smlal2 v25.4s, v27.8h, v3.8h\n"
- "tbz x7, #2, 17f\n"
+ "smlal v16.4s, v27.4h, v4.4h\n"
+ "smlal2 v10.4s, v27.8h, v4.8h\n"
+ "smlal v18.4s, v27.4h, v3.4h\n"
+ "smlal2 v26.4s, v27.8h, v3.8h\n"
+ "tbz x8, #2, 17f\n"
"ld1 { v29.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 16f\n"
+ "tbz x8, #1, 16f\n"
"ld1 { v29.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 19f\n"
+ "tbz x8, #0, 19f\n"
"ld1 { v29.b }[6], [x20]\n"
"b 19f\n"
"16:" // Oddments: Load (3, 3): Bit 2: Bit 1: Unset
- "tbz x7, #0, 19f\n"
+ "tbz x8, #0, 19f\n"
"ld1 { v29.b }[4], [x20]\n"
"b 19f\n"
"17:" // Oddments: Load (3, 3): Bit 2: Unset
- "tbz x7, #1, 18f\n"
+ "tbz x8, #1, 18f\n"
"ld1 { v29.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 19f\n"
+ "tbz x8, #0, 19f\n"
"ld1 { v29.b }[2], [x20]\n"
"b 19f\n"
"18:" // Oddments: Load (3, 3): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 19f\n"
+ "tbz x8, #0, 19f\n"
"ld1 { v29.b }[0], [x20]\n"
"19:" // Oddments: Load (3, 3): Bit 2: End
"ushll v29.8h, v29.8b, #0x0\n"
- "ldr x21, [x15, #0x38]\n"
- "smlal v23.4s, v29.4h, v8.4h\n"
- "smlal2 v25.4s, v29.8h, v8.8h\n"
- "add x21, x21, x17\n"
- "tbz x7, #2, 21f\n"
- "ld1 { v28.s }[0], [x21], #0x4\n"
- "tbz x7, #1, 20f\n"
- "ld1 { v28.h }[2], [x21], #0x2\n"
- "tbz x7, #0, 23f\n"
- "ld1 { v28.b }[6], [x21]\n"
+ "ldr x26, [x13, #0x38]\n"
+ "smlal v18.4s, v29.4h, v8.4h\n"
+ "smlal2 v26.4s, v29.8h, v8.8h\n"
+ "add x26, x26, x15\n"
+ "tbz x8, #2, 21f\n"
+ "ld1 { v28.s }[0], [x26], #0x4\n"
+ "tbz x8, #1, 20f\n"
+ "ld1 { v28.h }[2], [x26], #0x2\n"
+ "tbz x8, #0, 23f\n"
+ "ld1 { v28.b }[6], [x26]\n"
"b 23f\n"
"20:" // Oddments: Load (0, 1): Bit 2: Bit 1: Unset
- "tbz x7, #0, 23f\n"
- "ld1 { v28.b }[4], [x21]\n"
+ "tbz x8, #0, 23f\n"
+ "ld1 { v28.b }[4], [x26]\n"
"b 23f\n"
"21:" // Oddments: Load (0, 1): Bit 2: Unset
- "tbz x7, #1, 22f\n"
- "ld1 { v28.h }[0], [x21], #0x2\n"
- "tbz x7, #0, 23f\n"
- "ld1 { v28.b }[2], [x21]\n"
+ "tbz x8, #1, 22f\n"
+ "ld1 { v28.h }[0], [x26], #0x2\n"
+ "tbz x8, #0, 23f\n"
+ "ld1 { v28.b }[2], [x26]\n"
"b 23f\n"
"22:" // Oddments: Load (0, 1): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 23f\n"
- "ld1 { v28.b }[0], [x21]\n"
+ "tbz x8, #0, 23f\n"
+ "ld1 { v28.b }[0], [x26]\n"
"23:" // Oddments: Load (0, 1): Bit 2: End
"ushll v28.8h, v28.8b, #0x0\n"
- "ldr x26, [x15, #0x40]\n"
- "smlal v14.4s, v28.4h, v1.4h\n"
- "smlal2 v11.4s, v28.8h, v1.8h\n"
- "smlal v16.4s, v28.4h, v0.4h\n"
+ "ldr x25, [x13, #0x40]\n"
+ "smlal v12.4s, v28.4h, v1.4h\n"
+ "smlal2 v17.4s, v28.8h, v1.8h\n"
+ "smlal v14.4s, v28.4h, v0.4h\n"
"smlal2 v9.4s, v28.8h, v0.8h\n"
- "add x26, x26, x17\n"
- "tbz x7, #2, 25f\n"
- "ld1 { v31.s }[0], [x26], #0x4\n"
- "tbz x7, #1, 24f\n"
- "ld1 { v31.h }[2], [x26], #0x2\n"
- "tbz x7, #0, 27f\n"
- "ld1 { v31.b }[6], [x26]\n"
+ "add x25, x25, x15\n"
+ "tbz x8, #2, 25f\n"
+ "ld1 { v31.s }[0], [x25], #0x4\n"
+ "tbz x8, #1, 24f\n"
+ "ld1 { v31.h }[2], [x25], #0x2\n"
+ "tbz x8, #0, 27f\n"
+ "ld1 { v31.b }[6], [x25]\n"
"b 27f\n"
"24:" // Oddments: Load (0, 2): Bit 2: Bit 1: Unset
- "tbz x7, #0, 27f\n"
- "ld1 { v31.b }[4], [x26]\n"
+ "tbz x8, #0, 27f\n"
+ "ld1 { v31.b }[4], [x25]\n"
"b 27f\n"
"25:" // Oddments: Load (0, 2): Bit 2: Unset
- "tbz x7, #1, 26f\n"
- "ld1 { v31.h }[0], [x26], #0x2\n"
- "tbz x7, #0, 27f\n"
- "ld1 { v31.b }[2], [x26]\n"
+ "tbz x8, #1, 26f\n"
+ "ld1 { v31.h }[0], [x25], #0x2\n"
+ "tbz x8, #0, 27f\n"
+ "ld1 { v31.b }[2], [x25]\n"
"b 27f\n"
"26:" // Oddments: Load (0, 2): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 27f\n"
- "ld1 { v31.b }[0], [x26]\n"
+ "tbz x8, #0, 27f\n"
+ "ld1 { v31.b }[0], [x25]\n"
"27:" // Oddments: Load (0, 2): Bit 2: End
"ushll v31.8h, v31.8b, #0x0\n"
- "ldr x20, [x15, #0x48]\n"
- "smlal v14.4s, v31.4h, v2.4h\n"
- "smlal2 v11.4s, v31.8h, v2.8h\n"
- "smlal v16.4s, v31.4h, v1.4h\n"
+ "ldr x19, [x13, #0x48]\n"
+ "smlal v12.4s, v31.4h, v2.4h\n"
+ "smlal2 v17.4s, v31.8h, v2.8h\n"
+ "smlal v14.4s, v31.4h, v1.4h\n"
"smlal2 v9.4s, v31.8h, v1.8h\n"
- "add x20, x20, x17\n"
- "tbz x7, #2, 29f\n"
- "ld1 { v30.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 28f\n"
- "ld1 { v30.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 31f\n"
- "ld1 { v30.b }[6], [x20]\n"
+ "add x19, x19, x15\n"
+ "tbz x8, #2, 29f\n"
+ "ld1 { v30.s }[0], [x19], #0x4\n"
+ "tbz x8, #1, 28f\n"
+ "ld1 { v30.h }[2], [x19], #0x2\n"
+ "tbz x8, #0, 31f\n"
+ "ld1 { v30.b }[6], [x19]\n"
"b 31f\n"
"28:" // Oddments: Load (2, 2): Bit 2: Bit 1: Unset
- "tbz x7, #0, 31f\n"
- "ld1 { v30.b }[4], [x20]\n"
+ "tbz x8, #0, 31f\n"
+ "ld1 { v30.b }[4], [x19]\n"
"b 31f\n"
"29:" // Oddments: Load (2, 2): Bit 2: Unset
- "tbz x7, #1, 30f\n"
- "ld1 { v30.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 31f\n"
- "ld1 { v30.b }[2], [x20]\n"
+ "tbz x8, #1, 30f\n"
+ "ld1 { v30.h }[0], [x19], #0x2\n"
+ "tbz x8, #0, 31f\n"
+ "ld1 { v30.b }[2], [x19]\n"
"b 31f\n"
"30:" // Oddments: Load (2, 2): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 31f\n"
- "ld1 { v30.b }[0], [x20]\n"
+ "tbz x8, #0, 31f\n"
+ "ld1 { v30.b }[0], [x19]\n"
"31:" // Oddments: Load (2, 2): Bit 2: End
"ushll v30.8h, v30.8b, #0x0\n"
- "ldr x25, [x15, #0x50]\n"
- "smlal v14.4s, v30.4h, v8.4h\n"
- "smlal2 v11.4s, v30.8h, v8.8h\n"
- "smlal v16.4s, v30.4h, v7.4h\n"
+ "ldr x24, [x13, #0x50]\n"
+ "smlal v12.4s, v30.4h, v8.4h\n"
+ "smlal2 v17.4s, v30.8h, v8.8h\n"
+ "smlal v14.4s, v30.4h, v7.4h\n"
"smlal2 v9.4s, v30.8h, v7.8h\n"
- "add x25, x25, x17\n"
- "smlal v24.4s, v30.4h, v5.4h\n"
- "smlal2 v17.4s, v30.8h, v5.8h\n"
- "smlal v23.4s, v30.4h, v4.4h\n"
- "smlal2 v25.4s, v30.8h, v4.8h\n"
- "tbz x7, #2, 33f\n"
- "ld1 { v29.s }[0], [x25], #0x4\n"
- "tbz x7, #1, 32f\n"
- "ld1 { v29.h }[2], [x25], #0x2\n"
- "tbz x7, #0, 35f\n"
- "ld1 { v29.b }[6], [x25]\n"
+ "add x24, x24, x15\n"
+ "smlal v16.4s, v30.4h, v5.4h\n"
+ "smlal2 v10.4s, v30.8h, v5.8h\n"
+ "smlal v18.4s, v30.4h, v4.4h\n"
+ "smlal2 v26.4s, v30.8h, v4.8h\n"
+ "tbz x8, #2, 33f\n"
+ "ld1 { v29.s }[0], [x24], #0x4\n"
+ "tbz x8, #1, 32f\n"
+ "ld1 { v29.h }[2], [x24], #0x2\n"
+ "tbz x8, #0, 35f\n"
+ "ld1 { v29.b }[6], [x24]\n"
"b 35f\n"
"32:" // Oddments: Load (1, 0): Bit 2: Bit 1: Unset
- "tbz x7, #0, 35f\n"
- "ld1 { v29.b }[4], [x25]\n"
+ "tbz x8, #0, 35f\n"
+ "ld1 { v29.b }[4], [x24]\n"
"b 35f\n"
"33:" // Oddments: Load (1, 0): Bit 2: Unset
- "tbz x7, #1, 34f\n"
- "ld1 { v29.h }[0], [x25], #0x2\n"
- "tbz x7, #0, 35f\n"
- "ld1 { v29.b }[2], [x25]\n"
+ "tbz x8, #1, 34f\n"
+ "ld1 { v29.h }[0], [x24], #0x2\n"
+ "tbz x8, #0, 35f\n"
+ "ld1 { v29.b }[2], [x24]\n"
"b 35f\n"
"34:" // Oddments: Load (1, 0): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 35f\n"
- "ld1 { v29.b }[0], [x25]\n"
+ "tbz x8, #0, 35f\n"
+ "ld1 { v29.b }[0], [x24]\n"
"35:" // Oddments: Load (1, 0): Bit 2: End
"ushll v29.8h, v29.8b, #0x0\n"
- "ldr x24, [x15, #0x58]\n"
- "smlal v14.4s, v29.4h, v3.4h\n"
- "smlal2 v11.4s, v29.8h, v3.8h\n"
- "smlal v24.4s, v29.4h, v0.4h\n"
- "smlal2 v17.4s, v29.8h, v0.8h\n"
- "add x24, x24, x17\n"
- "tbz x7, #2, 37f\n"
- "ld1 { v28.s }[0], [x24], #0x4\n"
- "tbz x7, #1, 36f\n"
- "ld1 { v28.h }[2], [x24], #0x2\n"
- "tbz x7, #0, 39f\n"
- "ld1 { v28.b }[6], [x24]\n"
+ "ldr x23, [x13, #0x58]\n"
+ "smlal v12.4s, v29.4h, v3.4h\n"
+ "smlal2 v17.4s, v29.8h, v3.8h\n"
+ "smlal v16.4s, v29.4h, v0.4h\n"
+ "smlal2 v10.4s, v29.8h, v0.8h\n"
+ "add x23, x23, x15\n"
+ "tbz x8, #2, 37f\n"
+ "ld1 { v28.s }[0], [x23], #0x4\n"
+ "tbz x8, #1, 36f\n"
+ "ld1 { v28.h }[2], [x23], #0x2\n"
+ "tbz x8, #0, 39f\n"
+ "ld1 { v28.b }[6], [x23]\n"
"b 39f\n"
"36:" // Oddments: Load (1, 3): Bit 2: Bit 1: Unset
- "tbz x7, #0, 39f\n"
- "ld1 { v28.b }[4], [x24]\n"
+ "tbz x8, #0, 39f\n"
+ "ld1 { v28.b }[4], [x23]\n"
"b 39f\n"
"37:" // Oddments: Load (1, 3): Bit 2: Unset
- "tbz x7, #1, 38f\n"
- "ld1 { v28.h }[0], [x24], #0x2\n"
- "tbz x7, #0, 39f\n"
- "ld1 { v28.b }[2], [x24]\n"
+ "tbz x8, #1, 38f\n"
+ "ld1 { v28.h }[0], [x23], #0x2\n"
+ "tbz x8, #0, 39f\n"
+ "ld1 { v28.b }[2], [x23]\n"
"b 39f\n"
"38:" // Oddments: Load (1, 3): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 39f\n"
- "ld1 { v28.b }[0], [x24]\n"
+ "tbz x8, #0, 39f\n"
+ "ld1 { v28.b }[0], [x23]\n"
"39:" // Oddments: Load (1, 3): Bit 2: End
"ushll v28.8h, v28.8b, #0x0\n"
- "ldr x23, [x15, #0x60]\n"
- "smlal v16.4s, v28.4h, v5.4h\n"
+ "ldr x22, [x13, #0x60]\n"
+ "smlal v14.4s, v28.4h, v5.4h\n"
"smlal2 v9.4s, v28.8h, v5.8h\n"
- "smlal v23.4s, v28.4h, v2.4h\n"
- "smlal2 v25.4s, v28.8h, v2.8h\n"
- "add x23, x23, x17\n"
- "tbz x7, #2, 41f\n"
- "ld1 { v31.s }[0], [x23], #0x4\n"
- "tbz x7, #1, 40f\n"
- "ld1 { v31.h }[2], [x23], #0x2\n"
- "tbz x7, #0, 43f\n"
- "ld1 { v31.b }[6], [x23]\n"
+ "smlal v18.4s, v28.4h, v2.4h\n"
+ "smlal2 v26.4s, v28.8h, v2.8h\n"
+ "add x22, x22, x15\n"
+ "tbz x8, #2, 41f\n"
+ "ld1 { v31.s }[0], [x22], #0x4\n"
+ "tbz x8, #1, 40f\n"
+ "ld1 { v31.h }[2], [x22], #0x2\n"
+ "tbz x8, #0, 43f\n"
+ "ld1 { v31.b }[6], [x22]\n"
"b 43f\n"
"40:" // Oddments: Load (2, 0): Bit 2: Bit 1: Unset
- "tbz x7, #0, 43f\n"
- "ld1 { v31.b }[4], [x23]\n"
+ "tbz x8, #0, 43f\n"
+ "ld1 { v31.b }[4], [x22]\n"
"b 43f\n"
"41:" // Oddments: Load (2, 0): Bit 2: Unset
- "tbz x7, #1, 42f\n"
- "ld1 { v31.h }[0], [x23], #0x2\n"
- "tbz x7, #0, 43f\n"
- "ld1 { v31.b }[2], [x23]\n"
+ "tbz x8, #1, 42f\n"
+ "ld1 { v31.h }[0], [x22], #0x2\n"
+ "tbz x8, #0, 43f\n"
+ "ld1 { v31.b }[2], [x22]\n"
"b 43f\n"
"42:" // Oddments: Load (2, 0): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 43f\n"
- "ld1 { v31.b }[0], [x23]\n"
+ "tbz x8, #0, 43f\n"
+ "ld1 { v31.b }[0], [x22]\n"
"43:" // Oddments: Load (2, 0): Bit 2: End
"ushll v31.8h, v31.8b, #0x0\n"
- "ldr x22, [x15, #0x68]\n"
- "smlal v14.4s, v31.4h, v6.4h\n"
- "smlal2 v11.4s, v31.8h, v6.8h\n"
- "smlal v24.4s, v31.4h, v3.4h\n"
- "smlal2 v17.4s, v31.8h, v3.8h\n"
- "add x22, x22, x17\n"
- "tbz x7, #2, 45f\n"
- "ld1 { v30.s }[0], [x22], #0x4\n"
- "tbz x7, #1, 44f\n"
- "ld1 { v30.h }[2], [x22], #0x2\n"
- "tbz x7, #0, 47f\n"
- "ld1 { v30.b }[6], [x22]\n"
+ "ldr x21, [x13, #0x68]\n"
+ "smlal v12.4s, v31.4h, v6.4h\n"
+ "smlal2 v17.4s, v31.8h, v6.8h\n"
+ "smlal v16.4s, v31.4h, v3.4h\n"
+ "smlal2 v10.4s, v31.8h, v3.8h\n"
+ "add x21, x21, x15\n"
+ "tbz x8, #2, 45f\n"
+ "ld1 { v30.s }[0], [x21], #0x4\n"
+ "tbz x8, #1, 44f\n"
+ "ld1 { v30.h }[2], [x21], #0x2\n"
+ "tbz x8, #0, 47f\n"
+ "ld1 { v30.b }[6], [x21]\n"
"b 47f\n"
"44:" // Oddments: Load (2, 3): Bit 2: Bit 1: Unset
- "tbz x7, #0, 47f\n"
- "ld1 { v30.b }[4], [x22]\n"
+ "tbz x8, #0, 47f\n"
+ "ld1 { v30.b }[4], [x21]\n"
"b 47f\n"
"45:" // Oddments: Load (2, 3): Bit 2: Unset
- "tbz x7, #1, 46f\n"
- "ld1 { v30.h }[0], [x22], #0x2\n"
- "tbz x7, #0, 47f\n"
- "ld1 { v30.b }[2], [x22]\n"
+ "tbz x8, #1, 46f\n"
+ "ld1 { v30.h }[0], [x21], #0x2\n"
+ "tbz x8, #0, 47f\n"
+ "ld1 { v30.b }[2], [x21]\n"
"b 47f\n"
"46:" // Oddments: Load (2, 3): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 47f\n"
- "ld1 { v30.b }[0], [x22]\n"
+ "tbz x8, #0, 47f\n"
+ "ld1 { v30.b }[0], [x21]\n"
"47:" // Oddments: Load (2, 3): Bit 2: End
"ushll v30.8h, v30.8b, #0x0\n"
- "ldr x21, [x15, #0x70]\n"
- "smlal v16.4s, v30.4h, v8.4h\n"
+ "ldr x20, [x13, #0x70]\n"
+ "smlal v14.4s, v30.4h, v8.4h\n"
"smlal2 v9.4s, v30.8h, v8.8h\n"
- "smlal v23.4s, v30.4h, v5.4h\n"
- "smlal2 v25.4s, v30.8h, v5.8h\n"
- "add x21, x21, x17\n"
- "tbz x7, #2, 49f\n"
- "ld1 { v29.s }[0], [x21], #0x4\n"
- "tbz x7, #1, 48f\n"
- "ld1 { v29.h }[2], [x21], #0x2\n"
- "tbz x7, #0, 51f\n"
- "ld1 { v29.b }[6], [x21]\n"
+ "smlal v18.4s, v30.4h, v5.4h\n"
+ "smlal2 v26.4s, v30.8h, v5.8h\n"
+ "add x20, x20, x15\n"
+ "tbz x8, #2, 49f\n"
+ "ld1 { v29.s }[0], [x20], #0x4\n"
+ "tbz x8, #1, 48f\n"
+ "ld1 { v29.h }[2], [x20], #0x2\n"
+ "tbz x8, #0, 51f\n"
+ "ld1 { v29.b }[6], [x20]\n"
"b 51f\n"
"48:" // Oddments: Load (3, 1): Bit 2: Bit 1: Unset
- "tbz x7, #0, 51f\n"
- "ld1 { v29.b }[4], [x21]\n"
+ "tbz x8, #0, 51f\n"
+ "ld1 { v29.b }[4], [x20]\n"
"b 51f\n"
"49:" // Oddments: Load (3, 1): Bit 2: Unset
- "tbz x7, #1, 50f\n"
- "ld1 { v29.h }[0], [x21], #0x2\n"
- "tbz x7, #0, 51f\n"
- "ld1 { v29.b }[2], [x21]\n"
+ "tbz x8, #1, 50f\n"
+ "ld1 { v29.h }[0], [x20], #0x2\n"
+ "tbz x8, #0, 51f\n"
+ "ld1 { v29.b }[2], [x20]\n"
"b 51f\n"
"50:" // Oddments: Load (3, 1): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 51f\n"
- "ld1 { v29.b }[0], [x21]\n"
+ "tbz x8, #0, 51f\n"
+ "ld1 { v29.b }[0], [x20]\n"
"51:" // Oddments: Load (3, 1): Bit 2: End
"ushll v29.8h, v29.8b, #0x0\n"
- "ldr x20, [x15, #0x78]\n"
- "smlal v24.4s, v29.4h, v7.4h\n"
- "smlal2 v17.4s, v29.8h, v7.8h\n"
- "smlal v23.4s, v29.4h, v6.4h\n"
- "smlal2 v25.4s, v29.8h, v6.8h\n"
- "add x20, x20, x17\n"
- "tbz x7, #2, 53f\n"
- "ld1 { v28.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 52f\n"
- "ld1 { v28.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 55f\n"
- "ld1 { v28.b }[6], [x20]\n"
+ "ldr x19, [x13, #0x78]\n"
+ "smlal v16.4s, v29.4h, v7.4h\n"
+ "smlal2 v10.4s, v29.8h, v7.8h\n"
+ "smlal v18.4s, v29.4h, v6.4h\n"
+ "smlal2 v26.4s, v29.8h, v6.8h\n"
+ "add x19, x19, x15\n"
+ "tbz x8, #2, 53f\n"
+ "ld1 { v28.s }[0], [x19], #0x4\n"
+ "tbz x8, #1, 52f\n"
+ "ld1 { v28.h }[2], [x19], #0x2\n"
+ "tbz x8, #0, 55f\n"
+ "ld1 { v28.b }[6], [x19]\n"
"b 55f\n"
"52:" // Oddments: Load (3, 2): Bit 2: Bit 1: Unset
- "tbz x7, #0, 55f\n"
- "ld1 { v28.b }[4], [x20]\n"
+ "tbz x8, #0, 55f\n"
+ "ld1 { v28.b }[4], [x19]\n"
"b 55f\n"
"53:" // Oddments: Load (3, 2): Bit 2: Unset
- "tbz x7, #1, 54f\n"
- "ld1 { v28.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 55f\n"
- "ld1 { v28.b }[2], [x20]\n"
+ "tbz x8, #1, 54f\n"
+ "ld1 { v28.h }[0], [x19], #0x2\n"
+ "tbz x8, #0, 55f\n"
+ "ld1 { v28.b }[2], [x19]\n"
"b 55f\n"
"54:" // Oddments: Load (3, 2): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 55f\n"
- "ld1 { v28.b }[0], [x20]\n"
+ "tbz x8, #0, 55f\n"
+ "ld1 { v28.b }[0], [x19]\n"
"55:" // Oddments: Load (3, 2): Bit 2: End
"ushll v28.8h, v28.8b, #0x0\n"
- "smlal v24.4s, v28.4h, v8.4h\n"
- "smlal2 v17.4s, v28.8h, v8.8h\n"
- "smlal v23.4s, v28.4h, v7.4h\n"
- "smlal2 v25.4s, v28.8h, v7.8h\n"
- "tbz x7, #2, 57f\n"
- "ld1 { v22.4s }, [x13], #0x10\n"
- "ld1 { v10.4s }, [x12], #0x10\n"
- "tbz x7, #1, 56f\n"
- "ld1 { v18.d }[0], [x13], #0x8\n"
- "ld1 { v26.d }[0], [x12], #0x8\n"
- "tbz x7, #0, 59f\n"
- "ld1 { v18.s }[2], [x13]\n"
- "ld1 { v26.s }[2], [x12]\n"
+ "smlal v16.4s, v28.4h, v8.4h\n"
+ "smlal2 v10.4s, v28.8h, v8.8h\n"
+ "smlal v18.4s, v28.4h, v7.4h\n"
+ "smlal2 v26.4s, v28.8h, v7.8h\n"
+ "tbz x8, #2, 57f\n"
+ "ld1 { v21.4s }, [x12], #0x10\n"
+ "ld1 { v24.4s }, [x11], #0x10\n"
+ "tbz x8, #1, 56f\n"
+ "ld1 { v19.d }[0], [x12], #0x8\n"
+ "ld1 { v23.d }[0], [x11], #0x8\n"
+ "tbz x8, #0, 59f\n"
+ "ld1 { v19.s }[2], [x12]\n"
+ "ld1 { v23.s }[2], [x11]\n"
"b 59f\n"
"56:" // Oddments: Load requant params: Bit 2: Bit 1: Unset
- "tbz x7, #0, 59f\n"
- "ld1 { v18.s }[0], [x13]\n"
- "ld1 { v26.s }[0], [x12]\n"
+ "tbz x8, #0, 59f\n"
+ "ld1 { v19.s }[0], [x12]\n"
+ "ld1 { v23.s }[0], [x11]\n"
"b 59f\n"
"57:" // Oddments: Load requant params: Bit 2: Unset
- "tbz x7, #1, 58f\n"
- "ld1 { v22.d }[0], [x13], #0x8\n"
- "ld1 { v10.d }[0], [x12], #0x8\n"
- "tbz x7, #0, 59f\n"
- "ld1 { v22.s }[2], [x13]\n"
- "ld1 { v10.s }[2], [x12]\n"
+ "tbz x8, #1, 58f\n"
+ "ld1 { v21.d }[0], [x12], #0x8\n"
+ "ld1 { v24.d }[0], [x11], #0x8\n"
+ "tbz x8, #0, 59f\n"
+ "ld1 { v21.s }[2], [x12]\n"
+ "ld1 { v24.s }[2], [x11]\n"
"b 59f\n"
"58:" // Oddments: Load requant params: Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 59f\n"
- "ld1 { v22.s }[0], [x13]\n"
- "ld1 { v10.s }[0], [x12]\n"
+ "tbz x8, #0, 59f\n"
+ "ld1 { v21.s }[0], [x12]\n"
+ "ld1 { v24.s }[0], [x11]\n"
"59:" // Oddments: Load requant params: Bit 2: End
- "sqrdmulh v14.4s, v14.4s, v22.4s\n"
- "and v21.16b, v14.16b, v10.16b\n"
- "add x11, x11, x16\n"
- "add x10, x10, x16\n"
- "sqrdmulh v11.4s, v11.4s, v18.4s\n"
+ "sqrdmulh v12.4s, v12.4s, v21.4s\n"
+ "sqrdmulh v14.4s, v14.4s, v21.4s\n"
+ "add x10, x10, x14\n"
+ "add x9, x9, x14\n"
+ "sqrdmulh v16.4s, v16.4s, v21.4s\n"
+ "sqrdmulh v18.4s, v18.4s, v21.4s\n"
+ "add x28, x28, x14\n"
+ "add x27, x27, x14\n"
+ "and v29.16b, v12.16b, v24.16b\n"
+ "sqrdmulh v17.4s, v17.4s, v19.4s\n"
+ "and v22.16b, v14.16b, v24.16b\n"
+ "sqrdmulh v9.4s, v9.4s, v19.4s\n"
+ "and v21.16b, v16.16b, v24.16b\n"
+ "sqrdmulh v10.4s, v10.4s, v19.4s\n"
+ "and v20.16b, v18.16b, v24.16b\n"
+ "sqrdmulh v26.4s, v26.4s, v19.4s\n"
+ "sshr v29.4s, v29.4s, #0x1f\n"
+ "and v19.16b, v17.16b, v23.16b\n"
+ "sshr v22.4s, v22.4s, #0x1f\n"
+ "and v30.16b, v9.16b, v23.16b\n"
"sshr v21.4s, v21.4s, #0x1f\n"
- "add x9, x9, x16\n"
- "add x28, x28, x16\n"
- "and v4.16b, v11.16b, v26.16b\n"
- "sqrdmulh v16.4s, v16.4s, v22.4s\n"
- "sqrdmulh v24.4s, v24.4s, v22.4s\n"
- "sqrdmulh v23.4s, v23.4s, v22.4s\n"
- "sqadd v14.4s, v14.4s, v21.4s\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
- "and v19.16b, v16.16b, v10.16b\n"
- "sqrdmulh v9.4s, v9.4s, v18.4s\n"
- "and v3.16b, v24.16b, v10.16b\n"
- "sqrdmulh v17.4s, v17.4s, v18.4s\n"
- "and v21.16b, v23.16b, v10.16b\n"
- "sqrdmulh v25.4s, v25.4s, v18.4s\n"
- "sqadd v11.4s, v11.4s, v4.4s\n"
+ "and v3.16b, v10.16b, v23.16b\n"
+ "sshr v20.4s, v20.4s, #0x1f\n"
+ "and v28.16b, v26.16b, v23.16b\n"
+ "sqadd v12.4s, v12.4s, v29.4s\n"
"sshr v19.4s, v19.4s, #0x1f\n"
- "and v27.16b, v9.16b, v26.16b\n"
+ "sqadd v14.4s, v14.4s, v22.4s\n"
+ "sshr v30.4s, v30.4s, #0x1f\n"
+ "sqadd v16.4s, v16.4s, v21.4s\n"
"sshr v3.4s, v3.4s, #0x1f\n"
- "and v5.16b, v17.16b, v26.16b\n"
- "sshr v21.4s, v21.4s, #0x1f\n"
- "and v4.16b, v25.16b, v26.16b\n"
- "sqadd v16.4s, v16.4s, v19.4s\n"
- "sshr v27.4s, v27.4s, #0x1f\n"
- "sqadd v24.4s, v24.4s, v3.4s\n"
- "sshr v5.4s, v5.4s, #0x1f\n"
- "sqadd v23.4s, v23.4s, v21.4s\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
- "srshl v14.4s, v14.4s, v10.4s\n"
- "srshl v16.4s, v16.4s, v10.4s\n"
- "sqadd v9.4s, v9.4s, v27.4s\n"
- "srshl v24.4s, v24.4s, v10.4s\n"
- "sqadd v17.4s, v17.4s, v5.4s\n"
- "srshl v23.4s, v23.4s, v10.4s\n"
- "sqadd v25.4s, v25.4s, v4.4s\n"
- "srshl v11.4s, v11.4s, v26.4s\n"
+ "sqadd v18.4s, v18.4s, v20.4s\n"
+ "sshr v28.4s, v28.4s, #0x1f\n"
+ "srshl v12.4s, v12.4s, v24.4s\n"
+ "sqadd v17.4s, v17.4s, v19.4s\n"
+ "srshl v14.4s, v14.4s, v24.4s\n"
+ "sqadd v9.4s, v9.4s, v30.4s\n"
+ "srshl v16.4s, v16.4s, v24.4s\n"
+ "sqadd v10.4s, v10.4s, v3.4s\n"
+ "srshl v18.4s, v18.4s, v24.4s\n"
+ "sqadd v26.4s, v26.4s, v28.4s\n"
+ "srshl v17.4s, v17.4s, v23.4s\n"
+ "sqxtn v12.4h, v12.4s\n"
+ "srshl v9.4s, v9.4s, v23.4s\n"
"sqxtn v14.4h, v14.4s\n"
- "srshl v9.4s, v9.4s, v26.4s\n"
+ "srshl v10.4s, v10.4s, v23.4s\n"
"sqxtn v16.4h, v16.4s\n"
- "srshl v17.4s, v17.4s, v26.4s\n"
- "sqxtn v24.4h, v24.4s\n"
- "srshl v25.4s, v25.4s, v26.4s\n"
- "sqxtn v23.4h, v23.4s\n"
- "sqxtn2 v14.8h, v11.4s\n"
- "sqxtn2 v16.8h, v9.4s\n"
- "sqxtn2 v24.8h, v17.4s\n"
- "sqxtn2 v23.8h, v25.4s\n"
- "sqadd v14.8h, v14.8h, v20.8h\n"
- "sqadd v16.8h, v16.8h, v20.8h\n"
- "sqadd v24.8h, v24.8h, v20.8h\n"
- "sqadd v23.8h, v23.8h, v20.8h\n"
- "smax v14.8h, v14.8h, v15.8h\n"
- "smax v16.8h, v16.8h, v15.8h\n"
- "smax v24.8h, v24.8h, v15.8h\n"
- "smax v23.8h, v23.8h, v15.8h\n"
- "smin v14.8h, v14.8h, v13.8h\n"
- "smin v16.8h, v16.8h, v13.8h\n"
- "smin v24.8h, v24.8h, v13.8h\n"
- "smin v23.8h, v23.8h, v13.8h\n"
+ "srshl v26.4s, v26.4s, v23.4s\n"
+ "sqxtn v18.4h, v18.4s\n"
+ "sqxtn2 v12.8h, v17.4s\n"
+ "sqxtn2 v14.8h, v9.4s\n"
+ "sqxtn2 v16.8h, v10.4s\n"
+ "sqxtn2 v18.8h, v26.4s\n"
+ "sqadd v12.8h, v12.8h, v13.8h\n"
+ "sqadd v14.8h, v14.8h, v13.8h\n"
+ "sqadd v16.8h, v16.8h, v13.8h\n"
+ "sqadd v18.8h, v18.8h, v13.8h\n"
+ "smax v12.8h, v12.8h, v11.8h\n"
+ "smax v14.8h, v14.8h, v11.8h\n"
+ "smax v16.8h, v16.8h, v11.8h\n"
+ "smax v18.8h, v18.8h, v11.8h\n"
+ "smin v12.8h, v12.8h, v25.8h\n"
+ "smin v14.8h, v14.8h, v25.8h\n"
+ "smin v16.8h, v16.8h, v25.8h\n"
+ "smin v18.8h, v18.8h, v25.8h\n"
+ "uzp1 v12.16b, v12.16b, v12.16b\n"
"uzp1 v14.16b, v14.16b, v14.16b\n"
"uzp1 v16.16b, v16.16b, v16.16b\n"
- "uzp1 v24.16b, v24.16b, v24.16b\n"
- "uzp1 v23.16b, v23.16b, v23.16b\n"
- "tbz x7, #2, 61f\n"
- "st1 { v14.s }[0], [x11], #0x4\n"
- "st1 { v16.s }[0], [x10], #0x4\n"
- "st1 { v24.s }[0], [x9], #0x4\n"
- "st1 { v23.s }[0], [x28], #0x4\n"
- "tbz x7, #1, 60f\n"
- "st1 { v14.h }[2], [x11], #0x2\n"
- "st1 { v16.h }[2], [x10], #0x2\n"
- "st1 { v24.h }[2], [x9], #0x2\n"
- "st1 { v23.h }[2], [x28], #0x2\n"
- "tbz x7, #0, 63f\n"
- "st1 { v14.b }[6], [x11], #0x1\n"
- "st1 { v16.b }[6], [x10], #0x1\n"
- "st1 { v24.b }[6], [x9], #0x1\n"
- "st1 { v23.b }[6], [x28], #0x1\n"
+ "uzp1 v18.16b, v18.16b, v18.16b\n"
+ "tbz x8, #2, 61f\n"
+ "st1 { v12.s }[0], [x10], #0x4\n"
+ "st1 { v14.s }[0], [x9], #0x4\n"
+ "st1 { v16.s }[0], [x28], #0x4\n"
+ "st1 { v18.s }[0], [x27], #0x4\n"
+ "tbz x8, #1, 60f\n"
+ "st1 { v12.h }[2], [x10], #0x2\n"
+ "st1 { v14.h }[2], [x9], #0x2\n"
+ "st1 { v16.h }[2], [x28], #0x2\n"
+ "st1 { v18.h }[2], [x27], #0x2\n"
+ "tbz x8, #0, 63f\n"
+ "st1 { v12.b }[6], [x10], #0x1\n"
+ "st1 { v14.b }[6], [x9], #0x1\n"
+ "st1 { v16.b }[6], [x28], #0x1\n"
+ "st1 { v18.b }[6], [x27], #0x1\n"
"b 63f\n"
"60:" // Oddments: Bit 2: Bit 1: Unset
- "tbz x7, #0, 63f\n"
- "st1 { v14.b }[4], [x11], #0x1\n"
- "st1 { v16.b }[4], [x10], #0x1\n"
- "st1 { v24.b }[4], [x9], #0x1\n"
- "st1 { v23.b }[4], [x28], #0x1\n"
+ "tbz x8, #0, 63f\n"
+ "st1 { v12.b }[4], [x10], #0x1\n"
+ "st1 { v14.b }[4], [x9], #0x1\n"
+ "st1 { v16.b }[4], [x28], #0x1\n"
+ "st1 { v18.b }[4], [x27], #0x1\n"
"b 63f\n"
"61:" // Oddments: Bit 2: Unset
- "tbz x7, #1, 62f\n"
- "st1 { v14.h }[0], [x11], #0x2\n"
- "st1 { v16.h }[0], [x10], #0x2\n"
- "st1 { v24.h }[0], [x9], #0x2\n"
- "st1 { v23.h }[0], [x28], #0x2\n"
- "tbz x7, #0, 63f\n"
- "st1 { v14.b }[2], [x11], #0x1\n"
- "st1 { v16.b }[2], [x10], #0x1\n"
- "st1 { v24.b }[2], [x9], #0x1\n"
- "st1 { v23.b }[2], [x28], #0x1\n"
+ "tbz x8, #1, 62f\n"
+ "st1 { v12.h }[0], [x10], #0x2\n"
+ "st1 { v14.h }[0], [x9], #0x2\n"
+ "st1 { v16.h }[0], [x28], #0x2\n"
+ "st1 { v18.h }[0], [x27], #0x2\n"
+ "tbz x8, #0, 63f\n"
+ "st1 { v12.b }[2], [x10], #0x1\n"
+ "st1 { v14.b }[2], [x9], #0x1\n"
+ "st1 { v16.b }[2], [x28], #0x1\n"
+ "st1 { v18.b }[2], [x27], #0x1\n"
"b 63f\n"
"62:" // Oddments: Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 63f\n"
- "st1 { v14.b }[0], [x11], #0x1\n"
- "st1 { v16.b }[0], [x10], #0x1\n"
- "st1 { v24.b }[0], [x9], #0x1\n"
- "st1 { v23.b }[0], [x28], #0x1\n"
+ "tbz x8, #0, 63f\n"
+ "st1 { v12.b }[0], [x10], #0x1\n"
+ "st1 { v14.b }[0], [x9], #0x1\n"
+ "st1 { v16.b }[0], [x28], #0x1\n"
+ "st1 { v18.b }[0], [x27], #0x1\n"
"63:" // Oddments: Bit 2: End
"64:" // End
:
: [offsetof_Params_bias] "I" (offsetof(Params, bias)), [offsetof_Params_inptrs] "I" (offsetof(Params, inptrs)), [offsetof_Params_n_channels] "I" (offsetof(Params, n_channels)), [offsetof_Params_outptrs] "I" (offsetof(Params, outptrs)), [offsetof_Params_requant] "I" (offsetof(Params, requant)), [offsetof_Params_requant_muls] "I" (offsetof(Params, requant_muls)), [offsetof_Params_requant_shifts] "I" (offsetof(Params, requant_shifts)), [offsetof_Params_weights] "I" (offsetof(Params, weights)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [params] "r" (&params)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8qa_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8qa_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp
index a1e5c669b7..42ff502b0f 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8qa_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8qa_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -100,324 +100,324 @@ void a64_u8qa_nhwc_3x3_s2_output2x2_mla_depthfirst_impl(
requant_muls, requant_shifts, outptrs);
__asm__ __volatile__(
- "ldr x7, [%x[params], %[offsetof_Params_n_channels]]\n"
- "ldr x23, [%x[params], %[offsetof_Params_requant]]\n"
- "lsr x8, x7, #0x3\n"
- "add x20, x23, %[offsetof_Requantize32_b_offset]\n"
- "ld1r { v19.16b }, [x20]\n"
- "ldr x22, [%x[params], %[offsetof_Params_outptrs]]\n"
- "add x21, x23, %[offsetof_Requantize32_c_offset]\n"
- "add x20, x23, %[offsetof_Requantize32_minval]\n"
- "ld1r { v12.8h }, [x21]\n"
+ "ldr x19, [%x[params], %[offsetof_Params_requant]]\n"
+ "ldr x8, [%x[params], %[offsetof_Params_n_channels]]\n"
+ "add x23, x19, %[offsetof_Requantize32_b_offset]\n"
+ "add x22, x19, %[offsetof_Requantize32_c_offset]\n"
+ "ldr x21, [%x[params], %[offsetof_Params_outptrs]]\n"
+ "add x20, x19, %[offsetof_Requantize32_minval]\n"
+ "add x19, x19, %[offsetof_Requantize32_maxval]\n"
+ "ldr x17, [%x[params], %[offsetof_Params_weights]]\n"
+ "ld1r { v16.16b }, [x23]\n"
+ "ld1r { v12.8h }, [x22]\n"
+ "lsr x16, x8, #0x3\n"
+ "mov x15, #0x0\n"
"ld1r { v14.8h }, [x20]\n"
- "add x20, x23, %[offsetof_Requantize32_maxval]\n"
- "mov x17, #0x0\n"
- "ld1r { v23.8h }, [x20]\n"
- "mov x16, #0x0\n"
- "add x15, %x[params], %[offsetof_Params_inptrs]\n"
- "ldr x14, [%x[params], %[offsetof_Params_weights]]\n"
- "ldr x13, [%x[params], %[offsetof_Params_requant_muls]]\n"
- "ldr x12, [%x[params], %[offsetof_Params_requant_shifts]]\n"
- "ldp x11, x10, [x22, #0x0]\n"
- "ldp x9, x28, [x22, #0x10]\n"
- "cbz x8, 3f\n"
- "ldr d0, [x14, #0x0]\n"
- "ldr d1, [x14, #0x8]\n"
- "subs x8, x8, #0x1\n"
- "usubl v0.8h, v0.8b, v19.8b\n"
- "ldr d2, [x14, #0x10]\n"
- "ldr d3, [x14, #0x18]\n"
- "usubl v1.8h, v1.8b, v19.8b\n"
- "usubl v2.8h, v2.8b, v19.8b\n"
- "ldr d4, [x14, #0x20]\n"
- "ldr d5, [x14, #0x28]\n"
- "usubl v3.8h, v3.8b, v19.8b\n"
- "usubl v4.8h, v4.8b, v19.8b\n"
- "ldr d6, [x14, #0x30]\n"
- "ldr d7, [x14, #0x38]\n"
- "usubl v5.8h, v5.8b, v19.8b\n"
- "usubl v6.8h, v6.8b, v19.8b\n"
- "ldr d8, [x14, #0x40]\n"
- "ldr x22, [%x[params], %[offsetof_Params_bias]]\n"
- "usubl v7.8h, v7.8b, v19.8b\n"
- "usubl v8.8h, v8.8b, v19.8b\n"
- "ldr q15, [x22, #0x0]\n"
- "ldr q13, [x22, #0x10]\n"
- "add x22, x22, #0x20\n"
- "str x22, [%x[params], %[offsetof_Params_bias]]\n"
- "ldp x27, x26, [x15, #0x0]\n"
- "ldp x25, x24, [x15, #0x10]\n"
- "mov v17.16b, v15.16b\n"
- "mov v20.16b, v13.16b\n"
- "ldp x23, x22, [x15, #0x20]\n"
- "ldp x21, x20, [x15, #0x30]\n"
+ "ld1r { v21.8h }, [x19]\n"
+ "mov x14, #0x0\n"
+ "add x13, %x[params], %[offsetof_Params_inptrs]\n"
+ "ldr x12, [%x[params], %[offsetof_Params_requant_muls]]\n"
+ "ldr x11, [%x[params], %[offsetof_Params_requant_shifts]]\n"
+ "ldp x10, x9, [x21, #0x0]\n"
+ "ldp x28, x27, [x21, #0x10]\n"
+ "cbz x16, 3f\n"
+ "ldr x19, [%x[params], %[offsetof_Params_bias]]\n"
+ "ldr q15, [x19, #0x0]\n"
+ "subs x16, x16, #0x1\n"
+ "mov v13.16b, v15.16b\n"
+ "ldr q18, [x19, #0x10]\n"
+ "add x19, x19, #0x20\n"
+ "str x19, [%x[params], %[offsetof_Params_bias]]\n"
+ "ldr d0, [x17, #0x0]\n"
+ "ldr d1, [x17, #0x8]\n"
+ "ldr d2, [x17, #0x10]\n"
+ "mov v17.16b, v18.16b\n"
"mov v11.16b, v15.16b\n"
- "mov v10.16b, v13.16b\n"
- "ldr d31, [x27, x17]\n"
- "ldr d30, [x26, x17]\n"
- "mov v9.16b, v15.16b\n"
- "mov v22.16b, v13.16b\n"
- "ldr d29, [x25, x17]\n"
- "ldr d28, [x24, x17]\n"
+ "ldr d3, [x17, #0x18]\n"
+ "ldr d4, [x17, #0x20]\n"
+ "mov v10.16b, v18.16b\n"
+ "mov v23.16b, v15.16b\n"
+ "ldr d5, [x17, #0x28]\n"
+ "ldr d6, [x17, #0x30]\n"
+ "mov v9.16b, v18.16b\n"
+ "usubl v0.8h, v0.8b, v16.8b\n"
+ "ldr d7, [x17, #0x38]\n"
+ "ldr d8, [x17, #0x40]\n"
+ "usubl v1.8h, v1.8b, v16.8b\n"
+ "usubl v2.8h, v2.8b, v16.8b\n"
+ "ldp x26, x25, [x13, #0x0]\n"
+ "ldp x24, x23, [x13, #0x10]\n"
+ "usubl v3.8h, v3.8b, v16.8b\n"
+ "usubl v4.8h, v4.8b, v16.8b\n"
+ "ldp x22, x21, [x13, #0x20]\n"
+ "ldp x20, x19, [x13, #0x30]\n"
+ "usubl v5.8h, v5.8b, v16.8b\n"
+ "usubl v6.8h, v6.8b, v16.8b\n"
+ "ldr d31, [x26, x15]\n"
+ "ldr d30, [x25, x15]\n"
+ "usubl v7.8h, v7.8b, v16.8b\n"
+ "usubl v8.8h, v8.8b, v16.8b\n"
+ "ldr d29, [x24, x15]\n"
+ "ldr d28, [x23, x15]\n"
"ushll v31.8h, v31.8b, #0x0\n"
"ushll v30.8h, v30.8b, #0x0\n"
- "ldr d27, [x23, x17]\n"
- "ldr d26, [x22, x17]\n"
+ "ldr d27, [x22, x15]\n"
+ "ldr d26, [x21, x15]\n"
"ushll v29.8h, v29.8b, #0x0\n"
"ushll v28.8h, v28.8b, #0x0\n"
- "ldr d25, [x21, x17]\n"
- "ldr d24, [x20, x17]\n"
+ "ldr d25, [x20, x15]\n"
+ "ldr d24, [x19, x15]\n"
"ushll v27.8h, v27.8b, #0x0\n"
"ushll v26.8h, v26.8b, #0x0\n"
"ushll v25.8h, v25.8b, #0x0\n"
"ushll v24.8h, v24.8b, #0x0\n"
"beq 2f\n"
"1:" // Loop
- "ldr q18, [x13, #0x0]\n"
"smlal v15.4s, v31.4h, v8.4h\n"
- "smlal2 v13.4s, v31.8h, v8.8h\n"
- "ldr x23, [x15, #0x40]\n"
- "smlal v17.4s, v31.4h, v6.4h\n"
- "smlal2 v20.4s, v31.8h, v6.8h\n"
- "ldr x22, [x15, #0x48]\n"
- "ldr x21, [x15, #0x50]\n"
+ "smlal2 v18.4s, v31.8h, v8.8h\n"
+ "ldr x24, [x13, #0x40]\n"
+ "ldr x23, [x13, #0x48]\n"
+ "smlal v13.4s, v31.4h, v6.4h\n"
+ "smlal2 v17.4s, v31.8h, v6.8h\n"
+ "ldr x21, [x13, #0x50]\n"
+ "ldr x19, [x13, #0x58]\n"
"smlal v15.4s, v30.4h, v0.4h\n"
- "smlal2 v13.4s, v30.8h, v0.8h\n"
- "ldr q21, [x12, #0x0]\n"
- "ldr x20, [x15, #0x58]\n"
- "smlal v17.4s, v28.4h, v1.4h\n"
- "smlal2 v20.4s, v28.8h, v1.8h\n"
- "ldr d28, [x22, x17]\n"
+ "smlal2 v18.4s, v30.8h, v0.8h\n"
+ "ldr x22, [x13, #0x78]\n"
+ "ldr x20, [x13, #0x60]\n"
+ "smlal v13.4s, v28.4h, v1.4h\n"
+ "smlal2 v17.4s, v28.8h, v1.8h\n"
+ "ldr d28, [x23, x15]\n"
"ushll v28.8h, v28.8b, #0x0\n"
"smlal v15.4s, v29.4h, v1.4h\n"
- "smlal2 v13.4s, v29.8h, v1.8h\n"
- "ldr d29, [x23, x17]\n"
+ "smlal2 v18.4s, v29.8h, v1.8h\n"
+ "ldr d29, [x24, x15]\n"
"ushll v29.8h, v29.8b, #0x0\n"
- "smlal v17.4s, v27.4h, v2.4h\n"
- "smlal2 v20.4s, v27.8h, v2.8h\n"
- "ldr d27, [x21, x17]\n"
+ "smlal v13.4s, v27.4h, v2.4h\n"
+ "smlal2 v17.4s, v27.8h, v2.8h\n"
+ "ldr d27, [x21, x15]\n"
"ushll v27.8h, v27.8b, #0x0\n"
"smlal v15.4s, v26.4h, v3.4h\n"
- "smlal2 v13.4s, v26.8h, v3.8h\n"
- "ldr d26, [x20, x17]\n"
- "ldr x23, [x15, #0x78]\n"
- "smlal v17.4s, v24.4h, v0.4h\n"
- "smlal2 v20.4s, v24.8h, v0.8h\n"
- "ldr x20, [x15, #0x60]\n"
+ "smlal2 v18.4s, v26.8h, v3.8h\n"
+ "ldr d26, [x19, x15]\n"
"ushll v26.8h, v26.8b, #0x0\n"
+ "smlal v13.4s, v24.4h, v0.4h\n"
+ "smlal2 v17.4s, v24.8h, v0.8h\n"
+ "ldr x21, [x13, #0x80]\n"
+ "ldr x19, [x13, #0x68]\n"
"smlal v15.4s, v25.4h, v4.4h\n"
- "smlal2 v13.4s, v25.8h, v4.8h\n"
- "ldr d25, [x20, x17]\n"
- "ldr x21, [x15, #0x80]\n"
- "smlal v17.4s, v29.4h, v4.4h\n"
- "smlal2 v20.4s, v29.8h, v4.8h\n"
- "ldr q30, [x13, #0x10]\n"
- "ldr x20, [x15, #0x68]\n"
- "smlal v15.4s, v24.4h, v2.4h\n"
- "smlal2 v13.4s, v24.8h, v2.8h\n"
- "ldr d29, [x20, x17]\n"
+ "smlal2 v18.4s, v25.8h, v4.8h\n"
+ "ldr d25, [x20, x15]\n"
"ushll v25.8h, v25.8b, #0x0\n"
- "smlal v17.4s, v28.4h, v5.4h\n"
- "smlal2 v20.4s, v28.8h, v5.8h\n"
- "ldr d28, [x21, x17]\n"
- "ldr x22, [x15, #0x88]\n"
+ "smlal v13.4s, v29.4h, v4.4h\n"
+ "smlal2 v17.4s, v29.8h, v4.8h\n"
+ "ldr x20, [x13, #0x88]\n"
+ "ldr d29, [x19, x15]\n"
+ "smlal v15.4s, v24.4h, v2.4h\n"
+ "smlal2 v18.4s, v24.8h, v2.8h\n"
+ "ldr x19, [x13, #0x70]\n"
+ "ushll v29.8h, v29.8b, #0x0\n"
+ "smlal v13.4s, v28.4h, v5.4h\n"
+ "smlal2 v17.4s, v28.8h, v5.8h\n"
+ "ldr d28, [x21, x15]\n"
+ "ushll v28.8h, v28.8b, #0x0\n"
"smlal v11.4s, v31.4h, v2.4h\n"
"smlal2 v10.4s, v31.8h, v2.8h\n"
- "ldr x20, [x15, #0x70]\n"
- "ushll v28.8h, v28.8b, #0x0\n"
+ "ldr x24, [x13, #0x98]\n"
+ "ldr d24, [x19, x15]\n"
"smlal v15.4s, v27.4h, v5.4h\n"
- "smlal2 v13.4s, v27.8h, v5.8h\n"
- "ushll v29.8h, v29.8b, #0x0\n"
- "ldr x24, [x15, #0x98]\n"
- "smlal v17.4s, v27.4h, v3.4h\n"
- "smlal2 v20.4s, v27.8h, v3.8h\n"
- "ldr d27, [x23, x17]\n"
+ "smlal2 v18.4s, v27.8h, v5.8h\n"
+ "ushll v24.8h, v24.8b, #0x0\n"
+ "ldr x23, [x13, #0x90]\n"
+ "smlal v13.4s, v27.4h, v3.4h\n"
+ "smlal2 v17.4s, v27.8h, v3.8h\n"
+ "ldr d27, [x22, x15]\n"
"ushll v27.8h, v27.8b, #0x0\n"
- "smlal v9.4s, v31.4h, v0.4h\n"
+ "smlal v23.4s, v31.4h, v0.4h\n"
"smlal v11.4s, v26.4h, v3.4h\n"
- "ldr x21, [x15, #0x90]\n"
- "ldr x23, [x15, #0xa8]\n"
+ "ldr x22, [x13, #0xa8]\n"
+ "ldr x19, [x13, #0xa0]\n"
"smlal2 v10.4s, v26.8h, v3.8h\n"
- "ldr d26, [x22, x17]\n"
- "smlal2 v22.4s, v31.8h, v0.8h\n"
- "ldr d24, [x20, x17]\n"
- "smlal v9.4s, v27.4h, v4.4h\n"
- "smlal v11.4s, v25.4h, v0.4h\n"
+ "smlal2 v9.4s, v31.8h, v0.8h\n"
+ "ldr d26, [x20, x15]\n"
"ushll v26.8h, v26.8b, #0x0\n"
- "ldr x20, [x15, #0xa0]\n"
+ "smlal v23.4s, v27.4h, v4.4h\n"
+ "smlal v11.4s, v25.4h, v0.4h\n"
+ "ldr x21, [x13, #0xb0]\n"
+ "ldr x20, [x13, #0xb8]\n"
"smlal2 v10.4s, v25.8h, v0.8h\n"
- "ldr q31, [x12, #0x10]\n"
- "smlal2 v22.4s, v27.8h, v4.8h\n"
- "ldr d27, [x20, x17]\n"
- "smlal v9.4s, v28.4h, v1.4h\n"
+ "smlal2 v9.4s, v27.8h, v4.8h\n"
+ "ldr d27, [x19, x15]\n"
+ "ushll v27.8h, v27.8b, #0x0\n"
+ "smlal v23.4s, v28.4h, v1.4h\n"
"smlal v15.4s, v25.4h, v6.4h\n"
- "ushll v24.8h, v24.8b, #0x0\n"
- "ldr x22, [x15, #0xb0]\n"
- "smlal2 v13.4s, v25.8h, v6.8h\n"
- "ldr d25, [x21, x17]\n"
+ "ldr x19, [x13, #0xc0]\n"
+ "ldr q22, [x12, #0x0]\n"
+ "smlal2 v18.4s, v25.8h, v6.8h\n"
"smlal v11.4s, v29.4h, v4.4h\n"
+ "ldr d25, [x23, x15]\n"
"ushll v25.8h, v25.8b, #0x0\n"
"smlal2 v10.4s, v29.8h, v4.8h\n"
- "ldr d29, [x24, x17]\n"
- "smlal2 v22.4s, v28.8h, v1.8h\n"
+ "ldr d29, [x24, x15]\n"
+ "smlal2 v9.4s, v28.8h, v1.8h\n"
"ushll v29.8h, v29.8b, #0x0\n"
- "smlal v9.4s, v26.4h, v5.4h\n"
+ "smlal v23.4s, v26.4h, v5.4h\n"
"smlal v15.4s, v24.4h, v7.4h\n"
- "ldr x21, [x15, #0xb8]\n"
- "ushll v27.8h, v27.8b, #0x0\n"
- "smlal2 v13.4s, v24.8h, v7.8h\n"
+ "ldr q31, [x11, #0x0]\n"
+ "ldr q19, [x12, #0x10]\n"
+ "smlal2 v18.4s, v24.8h, v7.8h\n"
"smlal v11.4s, v24.4h, v1.4h\n"
- "ldr x20, [x15, #0xc0]\n"
- "sqrdmulh v15.4s, v15.4s, v18.4s\n"
+ "sqrdmulh v15.4s, v15.4s, v22.4s\n"
+ "ldr q30, [x11, #0x10]\n"
"smlal2 v10.4s, v24.8h, v1.8h\n"
- "ldr d24, [x23, x17]\n"
- "smlal2 v22.4s, v26.8h, v5.8h\n"
- "ldr d26, [x22, x17]\n"
- "smlal v9.4s, v29.4h, v2.4h\n"
+ "ldr d24, [x22, x15]\n"
+ "smlal2 v9.4s, v26.8h, v5.8h\n"
"ushll v24.8h, v24.8b, #0x0\n"
- "smlal2 v22.4s, v29.8h, v2.8h\n"
- "ldr x22, [%x[params], %[offsetof_Params_bias]]\n"
- "smlal v11.4s, v25.4h, v6.4h\n"
- "smlal v9.4s, v24.4h, v3.4h\n"
+ "smlal v23.4s, v29.4h, v2.4h\n"
+ "ldr d26, [x21, x15]\n"
+ "smlal2 v9.4s, v29.8h, v2.8h\n"
"ushll v26.8h, v26.8b, #0x0\n"
- "add x14, x14, #0x48\n"
- "smlal v17.4s, v28.4h, v7.4h\n"
- "smlal2 v20.4s, v28.8h, v7.8h\n"
- "and v2.16b, v15.16b, v21.16b\n"
- "subs x8, x8, #0x1\n"
+ "smlal v11.4s, v25.4h, v6.4h\n"
+ "smlal v23.4s, v24.4h, v3.4h\n"
+ "and v4.16b, v15.16b, v31.16b\n"
+ "add x17, x17, #0x48\n"
+ "smlal v13.4s, v28.4h, v7.4h\n"
+ "smlal2 v17.4s, v28.8h, v7.8h\n"
+ "sqrdmulh v18.4s, v18.4s, v19.4s\n"
+ "subs x16, x16, #0x1\n"
"smlal2 v10.4s, v25.8h, v6.8h\n"
- "ldr d25, [x21, x17]\n"
- "smlal2 v22.4s, v24.8h, v3.8h\n"
+ "ldr d25, [x20, x15]\n"
+ "smlal2 v9.4s, v24.8h, v3.8h\n"
"ushll v25.8h, v25.8b, #0x0\n"
"smlal v11.4s, v27.4h, v7.4h\n"
- "smlal v9.4s, v26.4h, v7.4h\n"
- "sqrdmulh v13.4s, v13.4s, v30.4s\n"
- "add x13, x13, #0x20\n"
- "smlal v17.4s, v29.4h, v8.4h\n"
- "smlal2 v20.4s, v29.8h, v8.8h\n"
- "ldr d29, [x20, x17]\n"
+ "smlal v23.4s, v26.4h, v7.4h\n"
+ "sshr v4.4s, v4.4s, #0x1f\n"
+ "add x12, x12, #0x20\n"
+ "smlal v13.4s, v29.4h, v8.4h\n"
+ "smlal2 v17.4s, v29.8h, v8.8h\n"
+ "ldr d29, [x19, x15]\n"
"ushll v29.8h, v29.8b, #0x0\n"
"smlal2 v10.4s, v27.8h, v7.8h\n"
- "smlal2 v22.4s, v26.8h, v7.8h\n"
- "sshr v2.4s, v2.4s, #0x1f\n"
- "add x17, x17, #0x8\n"
+ "smlal2 v9.4s, v26.8h, v7.8h\n"
+ "sqrdmulh v13.4s, v13.4s, v22.4s\n"
+ "add x15, x15, #0x8\n"
"smlal v11.4s, v24.4h, v5.4h\n"
- "smlal v9.4s, v25.4h, v6.4h\n"
- "and v16.16b, v13.16b, v31.16b\n"
- "add x12, x12, #0x20\n"
+ "smlal v23.4s, v25.4h, v6.4h\n"
+ "and v1.16b, v13.16b, v31.16b\n"
+ "add x11, x11, #0x20\n"
"smlal2 v10.4s, v24.8h, v5.8h\n"
- "smlal2 v22.4s, v25.8h, v6.8h\n"
- "sqrdmulh v17.4s, v17.4s, v18.4s\n"
+ "smlal2 v9.4s, v25.8h, v6.8h\n"
+ "sqrdmulh v17.4s, v17.4s, v19.4s\n"
"smlal v11.4s, v25.4h, v8.4h\n"
- "smlal v9.4s, v29.4h, v8.4h\n"
- "sqrdmulh v11.4s, v11.4s, v18.4s\n"
+ "smlal v23.4s, v29.4h, v8.4h\n"
+ "sqrdmulh v11.4s, v11.4s, v22.4s\n"
"smlal2 v10.4s, v25.8h, v8.8h\n"
- "smlal2 v22.4s, v29.8h, v8.8h\n"
- "sqrdmulh v9.4s, v9.4s, v18.4s\n"
- "sqadd v15.4s, v15.4s, v2.4s\n"
- "sshr v16.4s, v16.4s, #0x1f\n"
- "and v18.16b, v17.16b, v21.16b\n"
- "sqrdmulh v20.4s, v20.4s, v30.4s\n"
- "and v28.16b, v11.16b, v21.16b\n"
- "sqrdmulh v10.4s, v10.4s, v30.4s\n"
- "and v2.16b, v9.16b, v21.16b\n"
- "sqrdmulh v22.4s, v22.4s, v30.4s\n"
- "sqadd v13.4s, v13.4s, v16.4s\n"
- "sshr v18.4s, v18.4s, #0x1f\n"
- "and v4.16b, v20.16b, v31.16b\n"
- "sshr v28.4s, v28.4s, #0x1f\n"
- "and v3.16b, v10.16b, v31.16b\n"
- "sshr v2.4s, v2.4s, #0x1f\n"
- "and v16.16b, v22.16b, v31.16b\n"
- "sqadd v17.4s, v17.4s, v18.4s\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
- "sqadd v11.4s, v11.4s, v28.4s\n"
- "sshr v3.4s, v3.4s, #0x1f\n"
- "sqadd v9.4s, v9.4s, v2.4s\n"
- "sshr v16.4s, v16.4s, #0x1f\n"
- "srshl v15.4s, v15.4s, v21.4s\n"
- "srshl v17.4s, v17.4s, v21.4s\n"
- "sqadd v20.4s, v20.4s, v4.4s\n"
- "srshl v11.4s, v11.4s, v21.4s\n"
- "sqadd v10.4s, v10.4s, v3.4s\n"
- "srshl v9.4s, v9.4s, v21.4s\n"
- "sqadd v22.4s, v22.4s, v16.4s\n"
+ "smlal2 v9.4s, v29.8h, v8.8h\n"
+ "sqrdmulh v23.4s, v23.4s, v22.4s\n"
+ "and v22.16b, v11.16b, v31.16b\n"
+ "sqrdmulh v10.4s, v10.4s, v19.4s\n"
+ "and v20.16b, v23.16b, v31.16b\n"
+ "sqrdmulh v9.4s, v9.4s, v19.4s\n"
+ "and v19.16b, v18.16b, v30.16b\n"
+ "sshr v1.4s, v1.4s, #0x1f\n"
+ "and v27.16b, v17.16b, v30.16b\n"
+ "sshr v22.4s, v22.4s, #0x1f\n"
+ "and v25.16b, v10.16b, v30.16b\n"
+ "sshr v20.4s, v20.4s, #0x1f\n"
+ "and v0.16b, v9.16b, v30.16b\n"
+ "sqadd v15.4s, v15.4s, v4.4s\n"
+ "sshr v19.4s, v19.4s, #0x1f\n"
+ "sqadd v13.4s, v13.4s, v1.4s\n"
+ "sshr v27.4s, v27.4s, #0x1f\n"
+ "sqadd v11.4s, v11.4s, v22.4s\n"
+ "sshr v25.4s, v25.4s, #0x1f\n"
+ "sqadd v23.4s, v23.4s, v20.4s\n"
+ "sshr v0.4s, v0.4s, #0x1f\n"
+ "srshl v15.4s, v15.4s, v31.4s\n"
+ "sqadd v18.4s, v18.4s, v19.4s\n"
"srshl v13.4s, v13.4s, v31.4s\n"
+ "sqadd v17.4s, v17.4s, v27.4s\n"
+ "srshl v11.4s, v11.4s, v31.4s\n"
+ "sqadd v10.4s, v10.4s, v25.4s\n"
+ "srshl v23.4s, v23.4s, v31.4s\n"
+ "sqadd v9.4s, v9.4s, v0.4s\n"
+ "srshl v18.4s, v18.4s, v30.4s\n"
"sqxtn v15.4h, v15.4s\n"
- "srshl v20.4s, v20.4s, v31.4s\n"
- "sqxtn v17.4h, v17.4s\n"
- "srshl v10.4s, v10.4s, v31.4s\n"
+ "srshl v17.4s, v17.4s, v30.4s\n"
+ "sqxtn v13.4h, v13.4s\n"
+ "srshl v10.4s, v10.4s, v30.4s\n"
"sqxtn v11.4h, v11.4s\n"
- "srshl v22.4s, v22.4s, v31.4s\n"
- "sqxtn v9.4h, v9.4s\n"
- "sqxtn2 v15.8h, v13.4s\n"
- "sqxtn2 v17.8h, v20.4s\n"
+ "srshl v9.4s, v9.4s, v30.4s\n"
+ "sqxtn v23.4h, v23.4s\n"
+ "sqxtn2 v15.8h, v18.4s\n"
+ "sqxtn2 v13.8h, v17.4s\n"
"sqxtn2 v11.8h, v10.4s\n"
- "sqxtn2 v9.8h, v22.4s\n"
+ "sqxtn2 v23.8h, v9.4s\n"
"sqadd v15.8h, v15.8h, v12.8h\n"
- "sqadd v17.8h, v17.8h, v12.8h\n"
+ "sqadd v13.8h, v13.8h, v12.8h\n"
"sqadd v11.8h, v11.8h, v12.8h\n"
- "sqadd v9.8h, v9.8h, v12.8h\n"
+ "sqadd v23.8h, v23.8h, v12.8h\n"
"smax v15.8h, v15.8h, v14.8h\n"
- "smax v17.8h, v17.8h, v14.8h\n"
+ "smax v13.8h, v13.8h, v14.8h\n"
"smax v11.8h, v11.8h, v14.8h\n"
- "smax v9.8h, v9.8h, v14.8h\n"
- "smin v15.8h, v15.8h, v23.8h\n"
- "smin v17.8h, v17.8h, v23.8h\n"
- "smin v11.8h, v11.8h, v23.8h\n"
- "smin v9.8h, v9.8h, v23.8h\n"
+ "smax v23.8h, v23.8h, v14.8h\n"
+ "smin v15.8h, v15.8h, v21.8h\n"
+ "smin v13.8h, v13.8h, v21.8h\n"
+ "smin v11.8h, v11.8h, v21.8h\n"
+ "smin v23.8h, v23.8h, v21.8h\n"
"uzp1 v15.16b, v15.16b, v15.16b\n"
- "uzp1 v17.16b, v17.16b, v17.16b\n"
- "str d15, [x11, x16]\n"
+ "str d15, [x10, x14]\n"
+ "uzp1 v13.16b, v13.16b, v13.16b\n"
"uzp1 v11.16b, v11.16b, v11.16b\n"
- "uzp1 v9.16b, v9.16b, v9.16b\n"
- "str d17, [x10, x16]\n"
- "str d11, [x9, x16]\n"
- "str d9, [x28, x16]\n"
- "ldr q15, [x22, #0x0]\n"
- "ldr q13, [x22, #0x10]\n"
- "add x22, x22, #0x20\n"
- "ldr d0, [x14, #0x0]\n"
- "ldr d1, [x14, #0x8]\n"
- "add x16, x16, #0x8\n"
- "str x22, [%x[params], %[offsetof_Params_bias]]\n"
- "ldr d2, [x14, #0x10]\n"
- "ldr d3, [x14, #0x18]\n"
- "mov v17.16b, v15.16b\n"
- "mov v20.16b, v13.16b\n"
- "ldr d4, [x14, #0x20]\n"
- "ldr d5, [x14, #0x28]\n"
+ "str d13, [x9, x14]\n"
+ "uzp1 v23.16b, v23.16b, v23.16b\n"
+ "str d11, [x28, x14]\n"
+ "str d23, [x27, x14]\n"
+ "ldr x19, [%x[params], %[offsetof_Params_bias]]\n"
+ "ldr q15, [x19, #0x0]\n"
+ "add x14, x14, #0x8\n"
+ "ldr q18, [x19, #0x10]\n"
+ "add x19, x19, #0x20\n"
+ "str x19, [%x[params], %[offsetof_Params_bias]]\n"
+ "ldr d0, [x17, #0x0]\n"
+ "ldr d1, [x17, #0x8]\n"
+ "ldr d2, [x17, #0x10]\n"
+ "mov v13.16b, v15.16b\n"
+ "mov v17.16b, v18.16b\n"
+ "ldr d3, [x17, #0x18]\n"
+ "ldr d4, [x17, #0x20]\n"
"mov v11.16b, v15.16b\n"
- "mov v10.16b, v13.16b\n"
- "ldr d6, [x14, #0x30]\n"
- "ldr d7, [x14, #0x38]\n"
- "mov v9.16b, v15.16b\n"
- "mov v22.16b, v13.16b\n"
- "ldr d8, [x14, #0x40]\n"
- "ldp x27, x26, [x15, #0x0]\n"
- "usubl v0.8h, v0.8b, v19.8b\n"
- "usubl v1.8h, v1.8b, v19.8b\n"
- "ldp x25, x24, [x15, #0x10]\n"
- "ldp x23, x22, [x15, #0x20]\n"
- "usubl v2.8h, v2.8b, v19.8b\n"
- "usubl v3.8h, v3.8b, v19.8b\n"
- "ldp x21, x20, [x15, #0x30]\n"
- "ldr d31, [x27, x17]\n"
- "usubl v4.8h, v4.8b, v19.8b\n"
- "usubl v5.8h, v5.8b, v19.8b\n"
- "ldr d30, [x26, x17]\n"
- "ldr d29, [x25, x17]\n"
- "usubl v6.8h, v6.8b, v19.8b\n"
- "usubl v7.8h, v7.8b, v19.8b\n"
- "ldr d28, [x24, x17]\n"
- "ldr d27, [x23, x17]\n"
- "usubl v8.8h, v8.8b, v19.8b\n"
+ "mov v10.16b, v18.16b\n"
+ "ldr d5, [x17, #0x28]\n"
+ "ldr d6, [x17, #0x30]\n"
+ "mov v23.16b, v15.16b\n"
+ "mov v9.16b, v18.16b\n"
+ "ldr d7, [x17, #0x38]\n"
+ "ldr d8, [x17, #0x40]\n"
+ "usubl v0.8h, v0.8b, v16.8b\n"
+ "usubl v1.8h, v1.8b, v16.8b\n"
+ "ldp x26, x25, [x13, #0x0]\n"
+ "ldp x24, x23, [x13, #0x10]\n"
+ "usubl v2.8h, v2.8b, v16.8b\n"
+ "usubl v3.8h, v3.8b, v16.8b\n"
+ "ldp x22, x21, [x13, #0x20]\n"
+ "ldp x20, x19, [x13, #0x30]\n"
+ "usubl v4.8h, v4.8b, v16.8b\n"
+ "usubl v5.8h, v5.8b, v16.8b\n"
+ "ldr d31, [x26, x15]\n"
+ "ldr d30, [x25, x15]\n"
+ "usubl v6.8h, v6.8b, v16.8b\n"
+ "usubl v7.8h, v7.8b, v16.8b\n"
+ "ldr d29, [x24, x15]\n"
+ "ldr d28, [x23, x15]\n"
+ "usubl v8.8h, v8.8b, v16.8b\n"
"ushll v31.8h, v31.8b, #0x0\n"
- "ldr d26, [x22, x17]\n"
- "ldr d25, [x21, x17]\n"
+ "ldr d27, [x22, x15]\n"
+ "ldr d26, [x21, x15]\n"
"ushll v30.8h, v30.8b, #0x0\n"
"ushll v29.8h, v29.8b, #0x0\n"
- "ldr d24, [x20, x17]\n"
+ "ldr d25, [x20, x15]\n"
+ "ldr d24, [x19, x15]\n"
"ushll v28.8h, v28.8b, #0x0\n"
"ushll v27.8h, v27.8b, #0x0\n"
"ushll v26.8h, v26.8b, #0x0\n"
@@ -425,967 +425,967 @@ void a64_u8qa_nhwc_3x3_s2_output2x2_mla_depthfirst_impl(
"ushll v24.8h, v24.8b, #0x0\n"
"bgt 1b\n"
"2:" // Tail
- "ldr q18, [x13, #0x0]\n"
"smlal v15.4s, v31.4h, v8.4h\n"
- "smlal2 v13.4s, v31.8h, v8.8h\n"
- "ldr x23, [x15, #0x40]\n"
- "smlal v17.4s, v31.4h, v6.4h\n"
- "smlal2 v20.4s, v31.8h, v6.8h\n"
- "ldr x22, [x15, #0x48]\n"
- "ldr x21, [x15, #0x50]\n"
+ "smlal2 v18.4s, v31.8h, v8.8h\n"
+ "ldr x24, [x13, #0x40]\n"
+ "ldr x23, [x13, #0x48]\n"
+ "smlal v13.4s, v31.4h, v6.4h\n"
+ "smlal2 v17.4s, v31.8h, v6.8h\n"
+ "ldr x21, [x13, #0x50]\n"
+ "ldr x19, [x13, #0x58]\n"
"smlal v15.4s, v30.4h, v0.4h\n"
- "smlal2 v13.4s, v30.8h, v0.8h\n"
- "ldr q21, [x12, #0x0]\n"
- "ldr x20, [x15, #0x58]\n"
- "smlal v17.4s, v28.4h, v1.4h\n"
- "smlal2 v20.4s, v28.8h, v1.8h\n"
- "ldr d28, [x22, x17]\n"
+ "smlal2 v18.4s, v30.8h, v0.8h\n"
+ "ldr x22, [x13, #0x78]\n"
+ "ldr x20, [x13, #0x60]\n"
+ "smlal v13.4s, v28.4h, v1.4h\n"
+ "smlal2 v17.4s, v28.8h, v1.8h\n"
+ "ldr d28, [x23, x15]\n"
"ushll v28.8h, v28.8b, #0x0\n"
"smlal v15.4s, v29.4h, v1.4h\n"
- "smlal2 v13.4s, v29.8h, v1.8h\n"
- "ldr d29, [x23, x17]\n"
+ "smlal2 v18.4s, v29.8h, v1.8h\n"
+ "ldr d29, [x24, x15]\n"
"ushll v29.8h, v29.8b, #0x0\n"
- "smlal v17.4s, v27.4h, v2.4h\n"
- "smlal2 v20.4s, v27.8h, v2.8h\n"
- "ldr d27, [x21, x17]\n"
+ "smlal v13.4s, v27.4h, v2.4h\n"
+ "smlal2 v17.4s, v27.8h, v2.8h\n"
+ "ldr d27, [x21, x15]\n"
"ushll v27.8h, v27.8b, #0x0\n"
"smlal v15.4s, v26.4h, v3.4h\n"
- "smlal2 v13.4s, v26.8h, v3.8h\n"
- "ldr d26, [x20, x17]\n"
- "ldr x23, [x15, #0x78]\n"
- "smlal v17.4s, v24.4h, v0.4h\n"
- "smlal2 v20.4s, v24.8h, v0.8h\n"
- "ldr x20, [x15, #0x60]\n"
+ "smlal2 v18.4s, v26.8h, v3.8h\n"
+ "ldr d26, [x19, x15]\n"
"ushll v26.8h, v26.8b, #0x0\n"
+ "smlal v13.4s, v24.4h, v0.4h\n"
+ "smlal2 v17.4s, v24.8h, v0.8h\n"
+ "ldr x21, [x13, #0x80]\n"
+ "ldr x19, [x13, #0x68]\n"
"smlal v15.4s, v25.4h, v4.4h\n"
- "smlal2 v13.4s, v25.8h, v4.8h\n"
- "ldr d25, [x20, x17]\n"
- "ldr x21, [x15, #0x80]\n"
- "smlal v17.4s, v29.4h, v4.4h\n"
- "smlal2 v20.4s, v29.8h, v4.8h\n"
- "ldr q30, [x13, #0x10]\n"
- "ldr x20, [x15, #0x68]\n"
- "smlal v15.4s, v24.4h, v2.4h\n"
- "smlal2 v13.4s, v24.8h, v2.8h\n"
- "ldr d29, [x20, x17]\n"
+ "smlal2 v18.4s, v25.8h, v4.8h\n"
+ "ldr d25, [x20, x15]\n"
"ushll v25.8h, v25.8b, #0x0\n"
- "smlal v17.4s, v28.4h, v5.4h\n"
- "smlal2 v20.4s, v28.8h, v5.8h\n"
- "ldr d28, [x21, x17]\n"
- "ldr x22, [x15, #0x88]\n"
+ "smlal v13.4s, v29.4h, v4.4h\n"
+ "smlal2 v17.4s, v29.8h, v4.8h\n"
+ "ldr x20, [x13, #0x88]\n"
+ "ldr d29, [x19, x15]\n"
+ "smlal v15.4s, v24.4h, v2.4h\n"
+ "smlal2 v18.4s, v24.8h, v2.8h\n"
+ "ldr x19, [x13, #0x70]\n"
+ "ushll v29.8h, v29.8b, #0x0\n"
+ "smlal v13.4s, v28.4h, v5.4h\n"
+ "smlal2 v17.4s, v28.8h, v5.8h\n"
+ "ldr d28, [x21, x15]\n"
+ "ushll v28.8h, v28.8b, #0x0\n"
"smlal v11.4s, v31.4h, v2.4h\n"
"smlal2 v10.4s, v31.8h, v2.8h\n"
- "ldr x20, [x15, #0x70]\n"
- "ushll v28.8h, v28.8b, #0x0\n"
+ "ldr x24, [x13, #0x98]\n"
+ "ldr d24, [x19, x15]\n"
"smlal v15.4s, v27.4h, v5.4h\n"
- "smlal2 v13.4s, v27.8h, v5.8h\n"
- "ushll v29.8h, v29.8b, #0x0\n"
- "ldr x24, [x15, #0x98]\n"
- "smlal v17.4s, v27.4h, v3.4h\n"
- "smlal2 v20.4s, v27.8h, v3.8h\n"
- "ldr d27, [x23, x17]\n"
+ "smlal2 v18.4s, v27.8h, v5.8h\n"
+ "ushll v24.8h, v24.8b, #0x0\n"
+ "ldr x23, [x13, #0x90]\n"
+ "smlal v13.4s, v27.4h, v3.4h\n"
+ "smlal2 v17.4s, v27.8h, v3.8h\n"
+ "ldr d27, [x22, x15]\n"
"ushll v27.8h, v27.8b, #0x0\n"
- "smlal v9.4s, v31.4h, v0.4h\n"
+ "smlal v23.4s, v31.4h, v0.4h\n"
"smlal v11.4s, v26.4h, v3.4h\n"
- "ldr x21, [x15, #0x90]\n"
- "ldr x23, [x15, #0xa8]\n"
+ "ldr x22, [x13, #0xa8]\n"
+ "ldr x19, [x13, #0xa0]\n"
"smlal2 v10.4s, v26.8h, v3.8h\n"
- "ldr d26, [x22, x17]\n"
- "smlal2 v22.4s, v31.8h, v0.8h\n"
- "ldr d24, [x20, x17]\n"
- "smlal v9.4s, v27.4h, v4.4h\n"
- "smlal v11.4s, v25.4h, v0.4h\n"
+ "smlal2 v9.4s, v31.8h, v0.8h\n"
+ "ldr d26, [x20, x15]\n"
"ushll v26.8h, v26.8b, #0x0\n"
- "ldr x20, [x15, #0xa0]\n"
+ "smlal v23.4s, v27.4h, v4.4h\n"
+ "smlal v11.4s, v25.4h, v0.4h\n"
+ "ldr x21, [x13, #0xb0]\n"
+ "ldr x20, [x13, #0xb8]\n"
"smlal2 v10.4s, v25.8h, v0.8h\n"
- "ldr q31, [x12, #0x10]\n"
- "smlal2 v22.4s, v27.8h, v4.8h\n"
- "ldr d27, [x20, x17]\n"
- "smlal v9.4s, v28.4h, v1.4h\n"
+ "smlal2 v9.4s, v27.8h, v4.8h\n"
+ "ldr d27, [x19, x15]\n"
+ "ushll v27.8h, v27.8b, #0x0\n"
+ "smlal v23.4s, v28.4h, v1.4h\n"
"smlal v15.4s, v25.4h, v6.4h\n"
- "ushll v24.8h, v24.8b, #0x0\n"
- "ldr x22, [x15, #0xb0]\n"
- "smlal2 v13.4s, v25.8h, v6.8h\n"
- "ldr d25, [x21, x17]\n"
+ "ldr x19, [x13, #0xc0]\n"
+ "ldr q22, [x12, #0x0]\n"
+ "smlal2 v18.4s, v25.8h, v6.8h\n"
"smlal v11.4s, v29.4h, v4.4h\n"
+ "ldr d25, [x23, x15]\n"
"ushll v25.8h, v25.8b, #0x0\n"
"smlal2 v10.4s, v29.8h, v4.8h\n"
- "ldr d29, [x24, x17]\n"
- "smlal2 v22.4s, v28.8h, v1.8h\n"
+ "ldr d29, [x24, x15]\n"
+ "smlal2 v9.4s, v28.8h, v1.8h\n"
"ushll v29.8h, v29.8b, #0x0\n"
- "smlal v9.4s, v26.4h, v5.4h\n"
+ "smlal v23.4s, v26.4h, v5.4h\n"
"smlal v15.4s, v24.4h, v7.4h\n"
- "ldr x21, [x15, #0xb8]\n"
- "ushll v27.8h, v27.8b, #0x0\n"
- "smlal2 v13.4s, v24.8h, v7.8h\n"
+ "ldr q31, [x11, #0x0]\n"
+ "ldr q19, [x12, #0x10]\n"
+ "smlal2 v18.4s, v24.8h, v7.8h\n"
"smlal v11.4s, v24.4h, v1.4h\n"
- "ldr x20, [x15, #0xc0]\n"
- "sqrdmulh v15.4s, v15.4s, v18.4s\n"
+ "sqrdmulh v15.4s, v15.4s, v22.4s\n"
+ "ldr q30, [x11, #0x10]\n"
"smlal2 v10.4s, v24.8h, v1.8h\n"
- "ldr d24, [x23, x17]\n"
- "smlal2 v22.4s, v26.8h, v5.8h\n"
- "ldr d26, [x22, x17]\n"
- "smlal v9.4s, v29.4h, v2.4h\n"
+ "ldr d24, [x22, x15]\n"
+ "smlal2 v9.4s, v26.8h, v5.8h\n"
"ushll v24.8h, v24.8b, #0x0\n"
- "smlal2 v22.4s, v29.8h, v2.8h\n"
- "tst x7, #0x7\n"
- "smlal v11.4s, v25.4h, v6.4h\n"
- "smlal v9.4s, v24.4h, v3.4h\n"
+ "smlal v23.4s, v29.4h, v2.4h\n"
+ "ldr d26, [x21, x15]\n"
+ "smlal2 v9.4s, v29.8h, v2.8h\n"
"ushll v26.8h, v26.8b, #0x0\n"
- "add x13, x13, #0x20\n"
- "smlal v17.4s, v28.4h, v7.4h\n"
- "smlal2 v20.4s, v28.8h, v7.8h\n"
- "and v2.16b, v15.16b, v21.16b\n"
+ "smlal v11.4s, v25.4h, v6.4h\n"
+ "smlal v23.4s, v24.4h, v3.4h\n"
+ "and v4.16b, v15.16b, v31.16b\n"
+ "tst x8, #0x7\n"
+ "smlal v13.4s, v28.4h, v7.4h\n"
+ "smlal2 v17.4s, v28.8h, v7.8h\n"
+ "sqrdmulh v18.4s, v18.4s, v19.4s\n"
"add x12, x12, #0x20\n"
"smlal2 v10.4s, v25.8h, v6.8h\n"
- "ldr d25, [x21, x17]\n"
- "smlal2 v22.4s, v24.8h, v3.8h\n"
+ "ldr d25, [x20, x15]\n"
+ "smlal2 v9.4s, v24.8h, v3.8h\n"
"ushll v25.8h, v25.8b, #0x0\n"
"smlal v11.4s, v27.4h, v7.4h\n"
- "smlal v9.4s, v26.4h, v7.4h\n"
- "sqrdmulh v13.4s, v13.4s, v30.4s\n"
- "smlal v17.4s, v29.4h, v8.4h\n"
- "smlal2 v20.4s, v29.8h, v8.8h\n"
- "ldr d29, [x20, x17]\n"
+ "smlal v23.4s, v26.4h, v7.4h\n"
+ "sshr v4.4s, v4.4s, #0x1f\n"
+ "add x11, x11, #0x20\n"
+ "smlal v13.4s, v29.4h, v8.4h\n"
+ "smlal2 v17.4s, v29.8h, v8.8h\n"
+ "ldr d29, [x19, x15]\n"
"ushll v29.8h, v29.8b, #0x0\n"
"smlal2 v10.4s, v27.8h, v7.8h\n"
- "smlal2 v22.4s, v26.8h, v7.8h\n"
- "sshr v2.4s, v2.4s, #0x1f\n"
- "add x17, x17, #0x8\n"
+ "smlal2 v9.4s, v26.8h, v7.8h\n"
+ "sqrdmulh v13.4s, v13.4s, v22.4s\n"
+ "add x15, x15, #0x8\n"
"smlal v11.4s, v24.4h, v5.4h\n"
- "smlal v9.4s, v25.4h, v6.4h\n"
- "and v16.16b, v13.16b, v31.16b\n"
+ "smlal v23.4s, v25.4h, v6.4h\n"
+ "and v1.16b, v13.16b, v31.16b\n"
"smlal2 v10.4s, v24.8h, v5.8h\n"
- "smlal2 v22.4s, v25.8h, v6.8h\n"
- "sqrdmulh v17.4s, v17.4s, v18.4s\n"
+ "smlal2 v9.4s, v25.8h, v6.8h\n"
+ "sqrdmulh v17.4s, v17.4s, v19.4s\n"
"smlal v11.4s, v25.4h, v8.4h\n"
- "smlal v9.4s, v29.4h, v8.4h\n"
- "sqrdmulh v11.4s, v11.4s, v18.4s\n"
+ "smlal v23.4s, v29.4h, v8.4h\n"
+ "sqrdmulh v11.4s, v11.4s, v22.4s\n"
"smlal2 v10.4s, v25.8h, v8.8h\n"
- "smlal2 v22.4s, v29.8h, v8.8h\n"
- "sqrdmulh v9.4s, v9.4s, v18.4s\n"
- "sqadd v15.4s, v15.4s, v2.4s\n"
- "sshr v16.4s, v16.4s, #0x1f\n"
- "and v18.16b, v17.16b, v21.16b\n"
- "sqrdmulh v20.4s, v20.4s, v30.4s\n"
- "and v28.16b, v11.16b, v21.16b\n"
- "sqrdmulh v10.4s, v10.4s, v30.4s\n"
- "and v2.16b, v9.16b, v21.16b\n"
- "sqrdmulh v22.4s, v22.4s, v30.4s\n"
- "sqadd v13.4s, v13.4s, v16.4s\n"
- "sshr v18.4s, v18.4s, #0x1f\n"
- "and v4.16b, v20.16b, v31.16b\n"
- "sshr v28.4s, v28.4s, #0x1f\n"
- "and v3.16b, v10.16b, v31.16b\n"
- "sshr v2.4s, v2.4s, #0x1f\n"
- "and v16.16b, v22.16b, v31.16b\n"
- "sqadd v17.4s, v17.4s, v18.4s\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
- "sqadd v11.4s, v11.4s, v28.4s\n"
- "sshr v3.4s, v3.4s, #0x1f\n"
- "sqadd v9.4s, v9.4s, v2.4s\n"
- "sshr v16.4s, v16.4s, #0x1f\n"
- "srshl v15.4s, v15.4s, v21.4s\n"
- "srshl v17.4s, v17.4s, v21.4s\n"
- "sqadd v20.4s, v20.4s, v4.4s\n"
- "srshl v11.4s, v11.4s, v21.4s\n"
- "sqadd v10.4s, v10.4s, v3.4s\n"
- "srshl v9.4s, v9.4s, v21.4s\n"
- "sqadd v22.4s, v22.4s, v16.4s\n"
+ "smlal2 v9.4s, v29.8h, v8.8h\n"
+ "sqrdmulh v23.4s, v23.4s, v22.4s\n"
+ "and v22.16b, v11.16b, v31.16b\n"
+ "sqrdmulh v10.4s, v10.4s, v19.4s\n"
+ "and v20.16b, v23.16b, v31.16b\n"
+ "sqrdmulh v9.4s, v9.4s, v19.4s\n"
+ "and v19.16b, v18.16b, v30.16b\n"
+ "sshr v1.4s, v1.4s, #0x1f\n"
+ "and v27.16b, v17.16b, v30.16b\n"
+ "sshr v22.4s, v22.4s, #0x1f\n"
+ "and v25.16b, v10.16b, v30.16b\n"
+ "sshr v20.4s, v20.4s, #0x1f\n"
+ "and v0.16b, v9.16b, v30.16b\n"
+ "sqadd v15.4s, v15.4s, v4.4s\n"
+ "sshr v19.4s, v19.4s, #0x1f\n"
+ "sqadd v13.4s, v13.4s, v1.4s\n"
+ "sshr v27.4s, v27.4s, #0x1f\n"
+ "sqadd v11.4s, v11.4s, v22.4s\n"
+ "sshr v25.4s, v25.4s, #0x1f\n"
+ "sqadd v23.4s, v23.4s, v20.4s\n"
+ "sshr v0.4s, v0.4s, #0x1f\n"
+ "srshl v15.4s, v15.4s, v31.4s\n"
+ "sqadd v18.4s, v18.4s, v19.4s\n"
"srshl v13.4s, v13.4s, v31.4s\n"
+ "sqadd v17.4s, v17.4s, v27.4s\n"
+ "srshl v11.4s, v11.4s, v31.4s\n"
+ "sqadd v10.4s, v10.4s, v25.4s\n"
+ "srshl v23.4s, v23.4s, v31.4s\n"
+ "sqadd v9.4s, v9.4s, v0.4s\n"
+ "srshl v18.4s, v18.4s, v30.4s\n"
"sqxtn v15.4h, v15.4s\n"
- "srshl v20.4s, v20.4s, v31.4s\n"
- "sqxtn v17.4h, v17.4s\n"
- "srshl v10.4s, v10.4s, v31.4s\n"
+ "srshl v17.4s, v17.4s, v30.4s\n"
+ "sqxtn v13.4h, v13.4s\n"
+ "srshl v10.4s, v10.4s, v30.4s\n"
"sqxtn v11.4h, v11.4s\n"
- "srshl v22.4s, v22.4s, v31.4s\n"
- "sqxtn v9.4h, v9.4s\n"
- "sqxtn2 v15.8h, v13.4s\n"
- "sqxtn2 v17.8h, v20.4s\n"
+ "srshl v9.4s, v9.4s, v30.4s\n"
+ "sqxtn v23.4h, v23.4s\n"
+ "sqxtn2 v15.8h, v18.4s\n"
+ "sqxtn2 v13.8h, v17.4s\n"
"sqxtn2 v11.8h, v10.4s\n"
- "sqxtn2 v9.8h, v22.4s\n"
+ "sqxtn2 v23.8h, v9.4s\n"
"sqadd v15.8h, v15.8h, v12.8h\n"
- "sqadd v17.8h, v17.8h, v12.8h\n"
+ "sqadd v13.8h, v13.8h, v12.8h\n"
"sqadd v11.8h, v11.8h, v12.8h\n"
- "sqadd v9.8h, v9.8h, v12.8h\n"
+ "sqadd v23.8h, v23.8h, v12.8h\n"
"smax v15.8h, v15.8h, v14.8h\n"
- "smax v17.8h, v17.8h, v14.8h\n"
+ "smax v13.8h, v13.8h, v14.8h\n"
"smax v11.8h, v11.8h, v14.8h\n"
- "smax v9.8h, v9.8h, v14.8h\n"
- "smin v15.8h, v15.8h, v23.8h\n"
- "smin v17.8h, v17.8h, v23.8h\n"
- "smin v11.8h, v11.8h, v23.8h\n"
- "smin v9.8h, v9.8h, v23.8h\n"
+ "smax v23.8h, v23.8h, v14.8h\n"
+ "smin v15.8h, v15.8h, v21.8h\n"
+ "smin v13.8h, v13.8h, v21.8h\n"
+ "smin v11.8h, v11.8h, v21.8h\n"
+ "smin v23.8h, v23.8h, v21.8h\n"
"uzp1 v15.16b, v15.16b, v15.16b\n"
- "uzp1 v17.16b, v17.16b, v17.16b\n"
- "str d15, [x11, x16]\n"
+ "str d15, [x10, x14]\n"
+ "uzp1 v13.16b, v13.16b, v13.16b\n"
"uzp1 v11.16b, v11.16b, v11.16b\n"
- "uzp1 v9.16b, v9.16b, v9.16b\n"
- "str d17, [x10, x16]\n"
- "str d11, [x9, x16]\n"
- "str d9, [x28, x16]\n"
- "add x16, x16, #0x8\n"
+ "str d13, [x9, x14]\n"
+ "uzp1 v23.16b, v23.16b, v23.16b\n"
+ "str d11, [x28, x14]\n"
+ "str d23, [x27, x14]\n"
+ "add x14, x14, #0x8\n"
"beq 88f\n"
- "add x14, x14, #0x48\n"
+ "add x17, x17, #0x48\n"
"3:" // Oddments
- "ldr x22, [%x[params], %[offsetof_Params_bias]]\n"
- "tbz x7, #2, 5f\n"
- "ld1 { v15.4s }, [x22], #0x10\n"
- "tbz x7, #1, 4f\n"
- "ld1 { v13.d }[0], [x22], #0x8\n"
- "tbz x7, #0, 7f\n"
- "ld1 { v13.s }[2], [x22]\n"
+ "ldr x19, [%x[params], %[offsetof_Params_bias]]\n"
+ "tbz x8, #2, 5f\n"
+ "ld1 { v15.4s }, [x19], #0x10\n"
+ "tbz x8, #1, 4f\n"
+ "ld1 { v18.d }[0], [x19], #0x8\n"
+ "tbz x8, #0, 7f\n"
+ "ld1 { v18.s }[2], [x19]\n"
"b 7f\n"
"4:" // Oddments: Load bias: Bit 2: Bit 1: Unset
- "tbz x7, #0, 7f\n"
- "ld1 { v13.s }[0], [x22]\n"
+ "tbz x8, #0, 7f\n"
+ "ld1 { v18.s }[0], [x19]\n"
"b 7f\n"
"5:" // Oddments: Load bias: Bit 2: Unset
- "tbz x7, #1, 6f\n"
- "ld1 { v15.d }[0], [x22], #0x8\n"
- "tbz x7, #0, 7f\n"
- "ld1 { v15.s }[2], [x22]\n"
+ "tbz x8, #1, 6f\n"
+ "ld1 { v15.d }[0], [x19], #0x8\n"
+ "tbz x8, #0, 7f\n"
+ "ld1 { v15.s }[2], [x19]\n"
"b 7f\n"
"6:" // Oddments: Load bias: Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 7f\n"
- "ld1 { v15.s }[0], [x22]\n"
+ "tbz x8, #0, 7f\n"
+ "ld1 { v15.s }[0], [x19]\n"
"7:" // Oddments: Load bias: Bit 2: End
- "ldr d0, [x14, #0x0]\n"
- "ldr d1, [x14, #0x8]\n"
- "mov v17.16b, v15.16b\n"
- "mov v20.16b, v13.16b\n"
- "ldr d2, [x14, #0x10]\n"
- "ldr d3, [x14, #0x18]\n"
+ "ldr d0, [x17, #0x0]\n"
+ "ldr d1, [x17, #0x8]\n"
+ "mov v13.16b, v15.16b\n"
+ "mov v17.16b, v18.16b\n"
+ "ldr d2, [x17, #0x10]\n"
+ "ldr d3, [x17, #0x18]\n"
"mov v11.16b, v15.16b\n"
- "mov v10.16b, v13.16b\n"
- "ldr d4, [x14, #0x20]\n"
- "ldr d5, [x14, #0x28]\n"
- "mov v9.16b, v15.16b\n"
- "mov v22.16b, v13.16b\n"
- "ldr d6, [x14, #0x30]\n"
- "ldr d7, [x14, #0x38]\n"
- "usubl v0.8h, v0.8b, v19.8b\n"
- "usubl v1.8h, v1.8b, v19.8b\n"
- "ldr d8, [x14, #0x40]\n"
- "ldp x27, x26, [x15, #0x0]\n"
- "usubl v2.8h, v2.8b, v19.8b\n"
- "usubl v3.8h, v3.8b, v19.8b\n"
- "ldp x25, x24, [x15, #0x10]\n"
- "ldp x23, x22, [x15, #0x20]\n"
- "usubl v4.8h, v4.8b, v19.8b\n"
- "usubl v5.8h, v5.8b, v19.8b\n"
- "ldp x21, x20, [x15, #0x30]\n"
- "usubl v6.8h, v6.8b, v19.8b\n"
- "usubl v7.8h, v7.8b, v19.8b\n"
- "usubl v8.8h, v8.8b, v19.8b\n"
- "add x27, x27, x17\n"
- "add x26, x26, x17\n"
- "add x25, x25, x17\n"
- "add x24, x24, x17\n"
- "add x23, x23, x17\n"
- "add x22, x22, x17\n"
- "add x21, x21, x17\n"
- "add x20, x20, x17\n"
- "tbz x7, #2, 9f\n"
- "ld1 { v31.s }[0], [x27], #0x4\n"
- "ld1 { v30.s }[0], [x26], #0x4\n"
- "ld1 { v29.s }[0], [x25], #0x4\n"
- "ld1 { v28.s }[0], [x24], #0x4\n"
- "ld1 { v27.s }[0], [x23], #0x4\n"
- "ld1 { v26.s }[0], [x22], #0x4\n"
- "ld1 { v25.s }[0], [x21], #0x4\n"
- "ld1 { v24.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 8f\n"
- "ld1 { v31.h }[2], [x27], #0x2\n"
- "ld1 { v30.h }[2], [x26], #0x2\n"
- "ld1 { v29.h }[2], [x25], #0x2\n"
- "ld1 { v28.h }[2], [x24], #0x2\n"
- "ld1 { v27.h }[2], [x23], #0x2\n"
- "ld1 { v26.h }[2], [x22], #0x2\n"
- "ld1 { v25.h }[2], [x21], #0x2\n"
- "ld1 { v24.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 11f\n"
- "ld1 { v31.b }[6], [x27]\n"
- "ld1 { v30.b }[6], [x26]\n"
- "ld1 { v29.b }[6], [x25]\n"
- "ld1 { v28.b }[6], [x24]\n"
- "ld1 { v27.b }[6], [x23]\n"
- "ld1 { v26.b }[6], [x22]\n"
- "ld1 { v25.b }[6], [x21]\n"
- "ld1 { v24.b }[6], [x20]\n"
+ "mov v10.16b, v18.16b\n"
+ "ldr d4, [x17, #0x20]\n"
+ "ldr d5, [x17, #0x28]\n"
+ "mov v23.16b, v15.16b\n"
+ "mov v9.16b, v18.16b\n"
+ "ldr d6, [x17, #0x30]\n"
+ "ldr d7, [x17, #0x38]\n"
+ "usubl v0.8h, v0.8b, v16.8b\n"
+ "usubl v1.8h, v1.8b, v16.8b\n"
+ "ldr d8, [x17, #0x40]\n"
+ "ldp x26, x25, [x13, #0x0]\n"
+ "usubl v2.8h, v2.8b, v16.8b\n"
+ "usubl v3.8h, v3.8b, v16.8b\n"
+ "ldp x24, x23, [x13, #0x10]\n"
+ "ldp x22, x21, [x13, #0x20]\n"
+ "usubl v4.8h, v4.8b, v16.8b\n"
+ "usubl v5.8h, v5.8b, v16.8b\n"
+ "ldp x20, x19, [x13, #0x30]\n"
+ "usubl v6.8h, v6.8b, v16.8b\n"
+ "usubl v7.8h, v7.8b, v16.8b\n"
+ "usubl v8.8h, v8.8b, v16.8b\n"
+ "add x26, x26, x15\n"
+ "add x25, x25, x15\n"
+ "add x24, x24, x15\n"
+ "add x23, x23, x15\n"
+ "add x22, x22, x15\n"
+ "add x21, x21, x15\n"
+ "add x20, x20, x15\n"
+ "add x19, x19, x15\n"
+ "tbz x8, #2, 9f\n"
+ "ld1 { v31.s }[0], [x26], #0x4\n"
+ "ld1 { v30.s }[0], [x25], #0x4\n"
+ "ld1 { v29.s }[0], [x24], #0x4\n"
+ "ld1 { v28.s }[0], [x23], #0x4\n"
+ "ld1 { v27.s }[0], [x22], #0x4\n"
+ "ld1 { v26.s }[0], [x21], #0x4\n"
+ "ld1 { v25.s }[0], [x20], #0x4\n"
+ "ld1 { v24.s }[0], [x19], #0x4\n"
+ "tbz x8, #1, 8f\n"
+ "ld1 { v31.h }[2], [x26], #0x2\n"
+ "ld1 { v30.h }[2], [x25], #0x2\n"
+ "ld1 { v29.h }[2], [x24], #0x2\n"
+ "ld1 { v28.h }[2], [x23], #0x2\n"
+ "ld1 { v27.h }[2], [x22], #0x2\n"
+ "ld1 { v26.h }[2], [x21], #0x2\n"
+ "ld1 { v25.h }[2], [x20], #0x2\n"
+ "ld1 { v24.h }[2], [x19], #0x2\n"
+ "tbz x8, #0, 11f\n"
+ "ld1 { v31.b }[6], [x26]\n"
+ "ld1 { v30.b }[6], [x25]\n"
+ "ld1 { v29.b }[6], [x24]\n"
+ "ld1 { v28.b }[6], [x23]\n"
+ "ld1 { v27.b }[6], [x22]\n"
+ "ld1 { v26.b }[6], [x21]\n"
+ "ld1 { v25.b }[6], [x20]\n"
+ "ld1 { v24.b }[6], [x19]\n"
"b 11f\n"
"8:" // Oddments: Initial loads: Bit 2: Bit 1: Unset
- "tbz x7, #0, 11f\n"
- "ld1 { v31.b }[4], [x27]\n"
- "ld1 { v30.b }[4], [x26]\n"
- "ld1 { v29.b }[4], [x25]\n"
- "ld1 { v28.b }[4], [x24]\n"
- "ld1 { v27.b }[4], [x23]\n"
- "ld1 { v26.b }[4], [x22]\n"
- "ld1 { v25.b }[4], [x21]\n"
- "ld1 { v24.b }[4], [x20]\n"
+ "tbz x8, #0, 11f\n"
+ "ld1 { v31.b }[4], [x26]\n"
+ "ld1 { v30.b }[4], [x25]\n"
+ "ld1 { v29.b }[4], [x24]\n"
+ "ld1 { v28.b }[4], [x23]\n"
+ "ld1 { v27.b }[4], [x22]\n"
+ "ld1 { v26.b }[4], [x21]\n"
+ "ld1 { v25.b }[4], [x20]\n"
+ "ld1 { v24.b }[4], [x19]\n"
"b 11f\n"
"9:" // Oddments: Initial loads: Bit 2: Unset
- "tbz x7, #1, 10f\n"
- "ld1 { v31.h }[0], [x27], #0x2\n"
- "ld1 { v30.h }[0], [x26], #0x2\n"
- "ld1 { v29.h }[0], [x25], #0x2\n"
- "ld1 { v28.h }[0], [x24], #0x2\n"
- "ld1 { v27.h }[0], [x23], #0x2\n"
- "ld1 { v26.h }[0], [x22], #0x2\n"
- "ld1 { v25.h }[0], [x21], #0x2\n"
- "ld1 { v24.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 11f\n"
- "ld1 { v31.b }[2], [x27]\n"
- "ld1 { v30.b }[2], [x26]\n"
- "ld1 { v29.b }[2], [x25]\n"
- "ld1 { v28.b }[2], [x24]\n"
- "ld1 { v27.b }[2], [x23]\n"
- "ld1 { v26.b }[2], [x22]\n"
- "ld1 { v25.b }[2], [x21]\n"
- "ld1 { v24.b }[2], [x20]\n"
+ "tbz x8, #1, 10f\n"
+ "ld1 { v31.h }[0], [x26], #0x2\n"
+ "ld1 { v30.h }[0], [x25], #0x2\n"
+ "ld1 { v29.h }[0], [x24], #0x2\n"
+ "ld1 { v28.h }[0], [x23], #0x2\n"
+ "ld1 { v27.h }[0], [x22], #0x2\n"
+ "ld1 { v26.h }[0], [x21], #0x2\n"
+ "ld1 { v25.h }[0], [x20], #0x2\n"
+ "ld1 { v24.h }[0], [x19], #0x2\n"
+ "tbz x8, #0, 11f\n"
+ "ld1 { v31.b }[2], [x26]\n"
+ "ld1 { v30.b }[2], [x25]\n"
+ "ld1 { v29.b }[2], [x24]\n"
+ "ld1 { v28.b }[2], [x23]\n"
+ "ld1 { v27.b }[2], [x22]\n"
+ "ld1 { v26.b }[2], [x21]\n"
+ "ld1 { v25.b }[2], [x20]\n"
+ "ld1 { v24.b }[2], [x19]\n"
"b 11f\n"
"10:" // Oddments: Initial loads: Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 11f\n"
- "ld1 { v31.b }[0], [x27]\n"
- "ld1 { v30.b }[0], [x26]\n"
- "ld1 { v29.b }[0], [x25]\n"
- "ld1 { v28.b }[0], [x24]\n"
- "ld1 { v27.b }[0], [x23]\n"
- "ld1 { v26.b }[0], [x22]\n"
- "ld1 { v25.b }[0], [x21]\n"
- "ld1 { v24.b }[0], [x20]\n"
+ "tbz x8, #0, 11f\n"
+ "ld1 { v31.b }[0], [x26]\n"
+ "ld1 { v30.b }[0], [x25]\n"
+ "ld1 { v29.b }[0], [x24]\n"
+ "ld1 { v28.b }[0], [x23]\n"
+ "ld1 { v27.b }[0], [x22]\n"
+ "ld1 { v26.b }[0], [x21]\n"
+ "ld1 { v25.b }[0], [x20]\n"
+ "ld1 { v24.b }[0], [x19]\n"
"11:" // Oddments: Initial loads: Bit 2: End
"ushll v31.8h, v31.8b, #0x0\n"
"smlal v15.4s, v31.4h, v8.4h\n"
- "smlal2 v13.4s, v31.8h, v8.8h\n"
- "ldr x23, [x15, #0x40]\n"
+ "smlal2 v18.4s, v31.8h, v8.8h\n"
+ "ldr x24, [x13, #0x40]\n"
"ushll v30.8h, v30.8b, #0x0\n"
"smlal v15.4s, v30.4h, v0.4h\n"
- "smlal2 v13.4s, v30.8h, v0.8h\n"
- "add x23, x23, x17\n"
+ "smlal2 v18.4s, v30.8h, v0.8h\n"
+ "add x24, x24, x15\n"
"ushll v29.8h, v29.8b, #0x0\n"
- "smlal v17.4s, v31.4h, v6.4h\n"
- "smlal2 v20.4s, v31.8h, v6.8h\n"
+ "smlal v13.4s, v31.4h, v6.4h\n"
+ "smlal2 v17.4s, v31.8h, v6.8h\n"
"smlal v15.4s, v29.4h, v1.4h\n"
- "smlal2 v13.4s, v29.8h, v1.8h\n"
+ "smlal2 v18.4s, v29.8h, v1.8h\n"
"ushll v28.8h, v28.8b, #0x0\n"
"ushll v26.8h, v26.8b, #0x0\n"
- "smlal v17.4s, v28.4h, v1.4h\n"
- "smlal2 v20.4s, v28.8h, v1.8h\n"
+ "smlal v13.4s, v28.4h, v1.4h\n"
+ "smlal2 v17.4s, v28.8h, v1.8h\n"
"smlal v15.4s, v26.4h, v3.4h\n"
- "smlal2 v13.4s, v26.8h, v3.8h\n"
+ "smlal2 v18.4s, v26.8h, v3.8h\n"
"ushll v27.8h, v27.8b, #0x0\n"
"ushll v25.8h, v25.8b, #0x0\n"
- "smlal v17.4s, v27.4h, v2.4h\n"
- "smlal2 v20.4s, v27.8h, v2.8h\n"
+ "smlal v13.4s, v27.4h, v2.4h\n"
+ "smlal2 v17.4s, v27.8h, v2.8h\n"
"smlal v15.4s, v25.4h, v4.4h\n"
- "smlal2 v13.4s, v25.8h, v4.8h\n"
+ "smlal2 v18.4s, v25.8h, v4.8h\n"
"ushll v24.8h, v24.8b, #0x0\n"
"smlal v11.4s, v31.4h, v2.4h\n"
"smlal2 v10.4s, v31.8h, v2.8h\n"
- "smlal v9.4s, v31.4h, v0.4h\n"
- "smlal2 v22.4s, v31.8h, v0.8h\n"
+ "smlal v23.4s, v31.4h, v0.4h\n"
+ "smlal2 v9.4s, v31.8h, v0.8h\n"
"smlal v15.4s, v24.4h, v2.4h\n"
- "smlal2 v13.4s, v24.8h, v2.8h\n"
- "smlal v17.4s, v24.4h, v0.4h\n"
- "smlal2 v20.4s, v24.8h, v0.8h\n"
- "tbz x7, #2, 13f\n"
- "ld1 { v29.s }[0], [x23], #0x4\n"
- "tbz x7, #1, 12f\n"
- "ld1 { v29.h }[2], [x23], #0x2\n"
- "tbz x7, #0, 15f\n"
- "ld1 { v29.b }[6], [x23]\n"
+ "smlal2 v18.4s, v24.8h, v2.8h\n"
+ "smlal v13.4s, v24.4h, v0.4h\n"
+ "smlal2 v17.4s, v24.8h, v0.8h\n"
+ "tbz x8, #2, 13f\n"
+ "ld1 { v29.s }[0], [x24], #0x4\n"
+ "tbz x8, #1, 12f\n"
+ "ld1 { v29.h }[2], [x24], #0x2\n"
+ "tbz x8, #0, 15f\n"
+ "ld1 { v29.b }[6], [x24]\n"
"b 15f\n"
"12:" // Oddments: Load (1, 3): Bit 2: Bit 1: Unset
- "tbz x7, #0, 15f\n"
- "ld1 { v29.b }[4], [x23]\n"
+ "tbz x8, #0, 15f\n"
+ "ld1 { v29.b }[4], [x24]\n"
"b 15f\n"
"13:" // Oddments: Load (1, 3): Bit 2: Unset
- "tbz x7, #1, 14f\n"
- "ld1 { v29.h }[0], [x23], #0x2\n"
- "tbz x7, #0, 15f\n"
- "ld1 { v29.b }[2], [x23]\n"
+ "tbz x8, #1, 14f\n"
+ "ld1 { v29.h }[0], [x24], #0x2\n"
+ "tbz x8, #0, 15f\n"
+ "ld1 { v29.b }[2], [x24]\n"
"b 15f\n"
"14:" // Oddments: Load (1, 3): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 15f\n"
- "ld1 { v29.b }[0], [x23]\n"
+ "tbz x8, #0, 15f\n"
+ "ld1 { v29.b }[0], [x24]\n"
"15:" // Oddments: Load (1, 3): Bit 2: End
"ushll v29.8h, v29.8b, #0x0\n"
- "ldr x22, [x15, #0x48]\n"
- "smlal v17.4s, v29.4h, v4.4h\n"
- "smlal2 v20.4s, v29.8h, v4.8h\n"
- "add x22, x22, x17\n"
- "tbz x7, #2, 17f\n"
- "ld1 { v28.s }[0], [x22], #0x4\n"
- "tbz x7, #1, 16f\n"
- "ld1 { v28.h }[2], [x22], #0x2\n"
- "tbz x7, #0, 19f\n"
- "ld1 { v28.b }[6], [x22]\n"
+ "ldr x23, [x13, #0x48]\n"
+ "smlal v13.4s, v29.4h, v4.4h\n"
+ "smlal2 v17.4s, v29.8h, v4.8h\n"
+ "add x23, x23, x15\n"
+ "tbz x8, #2, 17f\n"
+ "ld1 { v28.s }[0], [x23], #0x4\n"
+ "tbz x8, #1, 16f\n"
+ "ld1 { v28.h }[2], [x23], #0x2\n"
+ "tbz x8, #0, 19f\n"
+ "ld1 { v28.b }[6], [x23]\n"
"b 19f\n"
"16:" // Oddments: Load (1, 4): Bit 2: Bit 1: Unset
- "tbz x7, #0, 19f\n"
- "ld1 { v28.b }[4], [x22]\n"
+ "tbz x8, #0, 19f\n"
+ "ld1 { v28.b }[4], [x23]\n"
"b 19f\n"
"17:" // Oddments: Load (1, 4): Bit 2: Unset
- "tbz x7, #1, 18f\n"
- "ld1 { v28.h }[0], [x22], #0x2\n"
- "tbz x7, #0, 19f\n"
- "ld1 { v28.b }[2], [x22]\n"
+ "tbz x8, #1, 18f\n"
+ "ld1 { v28.h }[0], [x23], #0x2\n"
+ "tbz x8, #0, 19f\n"
+ "ld1 { v28.b }[2], [x23]\n"
"b 19f\n"
"18:" // Oddments: Load (1, 4): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 19f\n"
- "ld1 { v28.b }[0], [x22]\n"
+ "tbz x8, #0, 19f\n"
+ "ld1 { v28.b }[0], [x23]\n"
"19:" // Oddments: Load (1, 4): Bit 2: End
"ushll v28.8h, v28.8b, #0x0\n"
- "ldr x21, [x15, #0x50]\n"
- "smlal v17.4s, v28.4h, v5.4h\n"
- "smlal2 v20.4s, v28.8h, v5.8h\n"
- "add x21, x21, x17\n"
- "tbz x7, #2, 21f\n"
+ "ldr x21, [x13, #0x50]\n"
+ "smlal v13.4s, v28.4h, v5.4h\n"
+ "smlal2 v17.4s, v28.8h, v5.8h\n"
+ "add x21, x21, x15\n"
+ "tbz x8, #2, 21f\n"
"ld1 { v27.s }[0], [x21], #0x4\n"
- "tbz x7, #1, 20f\n"
+ "tbz x8, #1, 20f\n"
"ld1 { v27.h }[2], [x21], #0x2\n"
- "tbz x7, #0, 23f\n"
+ "tbz x8, #0, 23f\n"
"ld1 { v27.b }[6], [x21]\n"
"b 23f\n"
"20:" // Oddments: Load (1, 2): Bit 2: Bit 1: Unset
- "tbz x7, #0, 23f\n"
+ "tbz x8, #0, 23f\n"
"ld1 { v27.b }[4], [x21]\n"
"b 23f\n"
"21:" // Oddments: Load (1, 2): Bit 2: Unset
- "tbz x7, #1, 22f\n"
+ "tbz x8, #1, 22f\n"
"ld1 { v27.h }[0], [x21], #0x2\n"
- "tbz x7, #0, 23f\n"
+ "tbz x8, #0, 23f\n"
"ld1 { v27.b }[2], [x21]\n"
"b 23f\n"
"22:" // Oddments: Load (1, 2): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 23f\n"
+ "tbz x8, #0, 23f\n"
"ld1 { v27.b }[0], [x21]\n"
"23:" // Oddments: Load (1, 2): Bit 2: End
"ushll v27.8h, v27.8b, #0x0\n"
- "ldr x20, [x15, #0x58]\n"
+ "ldr x19, [x13, #0x58]\n"
"smlal v15.4s, v27.4h, v5.4h\n"
- "smlal2 v13.4s, v27.8h, v5.8h\n"
- "smlal v17.4s, v27.4h, v3.4h\n"
- "smlal2 v20.4s, v27.8h, v3.8h\n"
- "add x20, x20, x17\n"
- "tbz x7, #2, 25f\n"
- "ld1 { v26.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 24f\n"
- "ld1 { v26.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 27f\n"
- "ld1 { v26.b }[6], [x20]\n"
+ "smlal2 v18.4s, v27.8h, v5.8h\n"
+ "smlal v13.4s, v27.4h, v3.4h\n"
+ "smlal2 v17.4s, v27.8h, v3.8h\n"
+ "add x19, x19, x15\n"
+ "tbz x8, #2, 25f\n"
+ "ld1 { v26.s }[0], [x19], #0x4\n"
+ "tbz x8, #1, 24f\n"
+ "ld1 { v26.h }[2], [x19], #0x2\n"
+ "tbz x8, #0, 27f\n"
+ "ld1 { v26.b }[6], [x19]\n"
"b 27f\n"
"24:" // Oddments: Load (3, 0): Bit 2: Bit 1: Unset
- "tbz x7, #0, 27f\n"
- "ld1 { v26.b }[4], [x20]\n"
+ "tbz x8, #0, 27f\n"
+ "ld1 { v26.b }[4], [x19]\n"
"b 27f\n"
"25:" // Oddments: Load (3, 0): Bit 2: Unset
- "tbz x7, #1, 26f\n"
- "ld1 { v26.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 27f\n"
- "ld1 { v26.b }[2], [x20]\n"
+ "tbz x8, #1, 26f\n"
+ "ld1 { v26.h }[0], [x19], #0x2\n"
+ "tbz x8, #0, 27f\n"
+ "ld1 { v26.b }[2], [x19]\n"
"b 27f\n"
"26:" // Oddments: Load (3, 0): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 27f\n"
- "ld1 { v26.b }[0], [x20]\n"
+ "tbz x8, #0, 27f\n"
+ "ld1 { v26.b }[0], [x19]\n"
"27:" // Oddments: Load (3, 0): Bit 2: End
"ushll v26.8h, v26.8b, #0x0\n"
- "ldr x20, [x15, #0x60]\n"
+ "ldr x20, [x13, #0x60]\n"
"smlal v11.4s, v26.4h, v3.4h\n"
"smlal2 v10.4s, v26.8h, v3.8h\n"
- "add x20, x20, x17\n"
- "tbz x7, #2, 29f\n"
+ "add x20, x20, x15\n"
+ "tbz x8, #2, 29f\n"
"ld1 { v25.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 28f\n"
+ "tbz x8, #1, 28f\n"
"ld1 { v25.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 31f\n"
+ "tbz x8, #0, 31f\n"
"ld1 { v25.b }[6], [x20]\n"
"b 31f\n"
"28:" // Oddments: Load (2, 0): Bit 2: Bit 1: Unset
- "tbz x7, #0, 31f\n"
+ "tbz x8, #0, 31f\n"
"ld1 { v25.b }[4], [x20]\n"
"b 31f\n"
"29:" // Oddments: Load (2, 0): Bit 2: Unset
- "tbz x7, #1, 30f\n"
+ "tbz x8, #1, 30f\n"
"ld1 { v25.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 31f\n"
+ "tbz x8, #0, 31f\n"
"ld1 { v25.b }[2], [x20]\n"
"b 31f\n"
"30:" // Oddments: Load (2, 0): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 31f\n"
+ "tbz x8, #0, 31f\n"
"ld1 { v25.b }[0], [x20]\n"
"31:" // Oddments: Load (2, 0): Bit 2: End
"ushll v25.8h, v25.8b, #0x0\n"
- "ldr x20, [x15, #0x68]\n"
+ "ldr x19, [x13, #0x68]\n"
"smlal v15.4s, v25.4h, v6.4h\n"
- "smlal2 v13.4s, v25.8h, v6.8h\n"
+ "smlal2 v18.4s, v25.8h, v6.8h\n"
"smlal v11.4s, v25.4h, v0.4h\n"
"smlal2 v10.4s, v25.8h, v0.8h\n"
- "add x20, x20, x17\n"
- "tbz x7, #2, 33f\n"
- "ld1 { v29.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 32f\n"
- "ld1 { v29.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 35f\n"
- "ld1 { v29.b }[6], [x20]\n"
+ "add x19, x19, x15\n"
+ "tbz x8, #2, 33f\n"
+ "ld1 { v29.s }[0], [x19], #0x4\n"
+ "tbz x8, #1, 32f\n"
+ "ld1 { v29.h }[2], [x19], #0x2\n"
+ "tbz x8, #0, 35f\n"
+ "ld1 { v29.b }[6], [x19]\n"
"b 35f\n"
"32:" // Oddments: Load (3, 1): Bit 2: Bit 1: Unset
- "tbz x7, #0, 35f\n"
- "ld1 { v29.b }[4], [x20]\n"
+ "tbz x8, #0, 35f\n"
+ "ld1 { v29.b }[4], [x19]\n"
"b 35f\n"
"33:" // Oddments: Load (3, 1): Bit 2: Unset
- "tbz x7, #1, 34f\n"
- "ld1 { v29.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 35f\n"
- "ld1 { v29.b }[2], [x20]\n"
+ "tbz x8, #1, 34f\n"
+ "ld1 { v29.h }[0], [x19], #0x2\n"
+ "tbz x8, #0, 35f\n"
+ "ld1 { v29.b }[2], [x19]\n"
"b 35f\n"
"34:" // Oddments: Load (3, 1): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 35f\n"
- "ld1 { v29.b }[0], [x20]\n"
+ "tbz x8, #0, 35f\n"
+ "ld1 { v29.b }[0], [x19]\n"
"35:" // Oddments: Load (3, 1): Bit 2: End
"ushll v29.8h, v29.8b, #0x0\n"
- "ldr x20, [x15, #0x70]\n"
+ "ldr x19, [x13, #0x70]\n"
"smlal v11.4s, v29.4h, v4.4h\n"
"smlal2 v10.4s, v29.8h, v4.8h\n"
- "add x20, x20, x17\n"
- "tbz x7, #2, 37f\n"
- "ld1 { v24.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 36f\n"
- "ld1 { v24.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 39f\n"
- "ld1 { v24.b }[6], [x20]\n"
+ "add x19, x19, x15\n"
+ "tbz x8, #2, 37f\n"
+ "ld1 { v24.s }[0], [x19], #0x4\n"
+ "tbz x8, #1, 36f\n"
+ "ld1 { v24.h }[2], [x19], #0x2\n"
+ "tbz x8, #0, 39f\n"
+ "ld1 { v24.b }[6], [x19]\n"
"b 39f\n"
"36:" // Oddments: Load (2, 1): Bit 2: Bit 1: Unset
- "tbz x7, #0, 39f\n"
- "ld1 { v24.b }[4], [x20]\n"
+ "tbz x8, #0, 39f\n"
+ "ld1 { v24.b }[4], [x19]\n"
"b 39f\n"
"37:" // Oddments: Load (2, 1): Bit 2: Unset
- "tbz x7, #1, 38f\n"
- "ld1 { v24.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 39f\n"
- "ld1 { v24.b }[2], [x20]\n"
+ "tbz x8, #1, 38f\n"
+ "ld1 { v24.h }[0], [x19], #0x2\n"
+ "tbz x8, #0, 39f\n"
+ "ld1 { v24.b }[2], [x19]\n"
"b 39f\n"
"38:" // Oddments: Load (2, 1): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 39f\n"
- "ld1 { v24.b }[0], [x20]\n"
+ "tbz x8, #0, 39f\n"
+ "ld1 { v24.b }[0], [x19]\n"
"39:" // Oddments: Load (2, 1): Bit 2: End
"ushll v24.8h, v24.8b, #0x0\n"
- "ldr x23, [x15, #0x78]\n"
+ "ldr x22, [x13, #0x78]\n"
"smlal v15.4s, v24.4h, v7.4h\n"
- "smlal2 v13.4s, v24.8h, v7.8h\n"
+ "smlal2 v18.4s, v24.8h, v7.8h\n"
"smlal v11.4s, v24.4h, v1.4h\n"
"smlal2 v10.4s, v24.8h, v1.8h\n"
- "add x23, x23, x17\n"
- "tbz x7, #2, 41f\n"
- "ld1 { v27.s }[0], [x23], #0x4\n"
- "tbz x7, #1, 40f\n"
- "ld1 { v27.h }[2], [x23], #0x2\n"
- "tbz x7, #0, 43f\n"
- "ld1 { v27.b }[6], [x23]\n"
+ "add x22, x22, x15\n"
+ "tbz x8, #2, 41f\n"
+ "ld1 { v27.s }[0], [x22], #0x4\n"
+ "tbz x8, #1, 40f\n"
+ "ld1 { v27.h }[2], [x22], #0x2\n"
+ "tbz x8, #0, 43f\n"
+ "ld1 { v27.b }[6], [x22]\n"
"b 43f\n"
"40:" // Oddments: Load (3, 3): Bit 2: Bit 1: Unset
- "tbz x7, #0, 43f\n"
- "ld1 { v27.b }[4], [x23]\n"
+ "tbz x8, #0, 43f\n"
+ "ld1 { v27.b }[4], [x22]\n"
"b 43f\n"
"41:" // Oddments: Load (3, 3): Bit 2: Unset
- "tbz x7, #1, 42f\n"
- "ld1 { v27.h }[0], [x23], #0x2\n"
- "tbz x7, #0, 43f\n"
- "ld1 { v27.b }[2], [x23]\n"
+ "tbz x8, #1, 42f\n"
+ "ld1 { v27.h }[0], [x22], #0x2\n"
+ "tbz x8, #0, 43f\n"
+ "ld1 { v27.b }[2], [x22]\n"
"b 43f\n"
"42:" // Oddments: Load (3, 3): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 43f\n"
- "ld1 { v27.b }[0], [x23]\n"
+ "tbz x8, #0, 43f\n"
+ "ld1 { v27.b }[0], [x22]\n"
"43:" // Oddments: Load (3, 3): Bit 2: End
"ushll v27.8h, v27.8b, #0x0\n"
- "ldr x21, [x15, #0x80]\n"
- "smlal v9.4s, v27.4h, v4.4h\n"
- "smlal2 v22.4s, v27.8h, v4.8h\n"
- "add x21, x21, x17\n"
- "tbz x7, #2, 45f\n"
+ "ldr x21, [x13, #0x80]\n"
+ "smlal v23.4s, v27.4h, v4.4h\n"
+ "smlal2 v9.4s, v27.8h, v4.8h\n"
+ "add x21, x21, x15\n"
+ "tbz x8, #2, 45f\n"
"ld1 { v28.s }[0], [x21], #0x4\n"
- "tbz x7, #1, 44f\n"
+ "tbz x8, #1, 44f\n"
"ld1 { v28.h }[2], [x21], #0x2\n"
- "tbz x7, #0, 47f\n"
+ "tbz x8, #0, 47f\n"
"ld1 { v28.b }[6], [x21]\n"
"b 47f\n"
"44:" // Oddments: Load (2, 3): Bit 2: Bit 1: Unset
- "tbz x7, #0, 47f\n"
+ "tbz x8, #0, 47f\n"
"ld1 { v28.b }[4], [x21]\n"
"b 47f\n"
"45:" // Oddments: Load (2, 3): Bit 2: Unset
- "tbz x7, #1, 46f\n"
+ "tbz x8, #1, 46f\n"
"ld1 { v28.h }[0], [x21], #0x2\n"
- "tbz x7, #0, 47f\n"
+ "tbz x8, #0, 47f\n"
"ld1 { v28.b }[2], [x21]\n"
"b 47f\n"
"46:" // Oddments: Load (2, 3): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 47f\n"
+ "tbz x8, #0, 47f\n"
"ld1 { v28.b }[0], [x21]\n"
"47:" // Oddments: Load (2, 3): Bit 2: End
"ushll v28.8h, v28.8b, #0x0\n"
- "ldr x22, [x15, #0x88]\n"
- "smlal v17.4s, v28.4h, v7.4h\n"
- "smlal2 v20.4s, v28.8h, v7.8h\n"
- "smlal v9.4s, v28.4h, v1.4h\n"
- "smlal2 v22.4s, v28.8h, v1.8h\n"
- "add x22, x22, x17\n"
- "tbz x7, #2, 49f\n"
- "ld1 { v26.s }[0], [x22], #0x4\n"
- "tbz x7, #1, 48f\n"
- "ld1 { v26.h }[2], [x22], #0x2\n"
- "tbz x7, #0, 51f\n"
- "ld1 { v26.b }[6], [x22]\n"
+ "ldr x20, [x13, #0x88]\n"
+ "smlal v13.4s, v28.4h, v7.4h\n"
+ "smlal2 v17.4s, v28.8h, v7.8h\n"
+ "smlal v23.4s, v28.4h, v1.4h\n"
+ "smlal2 v9.4s, v28.8h, v1.8h\n"
+ "add x20, x20, x15\n"
+ "tbz x8, #2, 49f\n"
+ "ld1 { v26.s }[0], [x20], #0x4\n"
+ "tbz x8, #1, 48f\n"
+ "ld1 { v26.h }[2], [x20], #0x2\n"
+ "tbz x8, #0, 51f\n"
+ "ld1 { v26.b }[6], [x20]\n"
"b 51f\n"
"48:" // Oddments: Load (3, 4): Bit 2: Bit 1: Unset
- "tbz x7, #0, 51f\n"
- "ld1 { v26.b }[4], [x22]\n"
+ "tbz x8, #0, 51f\n"
+ "ld1 { v26.b }[4], [x20]\n"
"b 51f\n"
"49:" // Oddments: Load (3, 4): Bit 2: Unset
- "tbz x7, #1, 50f\n"
- "ld1 { v26.h }[0], [x22], #0x2\n"
- "tbz x7, #0, 51f\n"
- "ld1 { v26.b }[2], [x22]\n"
+ "tbz x8, #1, 50f\n"
+ "ld1 { v26.h }[0], [x20], #0x2\n"
+ "tbz x8, #0, 51f\n"
+ "ld1 { v26.b }[2], [x20]\n"
"b 51f\n"
"50:" // Oddments: Load (3, 4): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 51f\n"
- "ld1 { v26.b }[0], [x22]\n"
+ "tbz x8, #0, 51f\n"
+ "ld1 { v26.b }[0], [x20]\n"
"51:" // Oddments: Load (3, 4): Bit 2: End
"ushll v26.8h, v26.8b, #0x0\n"
- "ldr x21, [x15, #0x90]\n"
- "smlal v9.4s, v26.4h, v5.4h\n"
- "smlal2 v22.4s, v26.8h, v5.8h\n"
- "add x21, x21, x17\n"
- "tbz x7, #2, 53f\n"
- "ld1 { v25.s }[0], [x21], #0x4\n"
- "tbz x7, #1, 52f\n"
- "ld1 { v25.h }[2], [x21], #0x2\n"
- "tbz x7, #0, 55f\n"
- "ld1 { v25.b }[6], [x21]\n"
+ "ldr x23, [x13, #0x90]\n"
+ "smlal v23.4s, v26.4h, v5.4h\n"
+ "smlal2 v9.4s, v26.8h, v5.8h\n"
+ "add x23, x23, x15\n"
+ "tbz x8, #2, 53f\n"
+ "ld1 { v25.s }[0], [x23], #0x4\n"
+ "tbz x8, #1, 52f\n"
+ "ld1 { v25.h }[2], [x23], #0x2\n"
+ "tbz x8, #0, 55f\n"
+ "ld1 { v25.b }[6], [x23]\n"
"b 55f\n"
"52:" // Oddments: Load (4, 0): Bit 2: Bit 1: Unset
- "tbz x7, #0, 55f\n"
- "ld1 { v25.b }[4], [x21]\n"
+ "tbz x8, #0, 55f\n"
+ "ld1 { v25.b }[4], [x23]\n"
"b 55f\n"
"53:" // Oddments: Load (4, 0): Bit 2: Unset
- "tbz x7, #1, 54f\n"
- "ld1 { v25.h }[0], [x21], #0x2\n"
- "tbz x7, #0, 55f\n"
- "ld1 { v25.b }[2], [x21]\n"
+ "tbz x8, #1, 54f\n"
+ "ld1 { v25.h }[0], [x23], #0x2\n"
+ "tbz x8, #0, 55f\n"
+ "ld1 { v25.b }[2], [x23]\n"
"b 55f\n"
"54:" // Oddments: Load (4, 0): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 55f\n"
- "ld1 { v25.b }[0], [x21]\n"
+ "tbz x8, #0, 55f\n"
+ "ld1 { v25.b }[0], [x23]\n"
"55:" // Oddments: Load (4, 0): Bit 2: End
"ushll v25.8h, v25.8b, #0x0\n"
- "ldr x24, [x15, #0x98]\n"
+ "ldr x24, [x13, #0x98]\n"
"smlal v11.4s, v25.4h, v6.4h\n"
"smlal2 v10.4s, v25.8h, v6.8h\n"
- "add x24, x24, x17\n"
- "tbz x7, #2, 57f\n"
+ "add x24, x24, x15\n"
+ "tbz x8, #2, 57f\n"
"ld1 { v29.s }[0], [x24], #0x4\n"
- "tbz x7, #1, 56f\n"
+ "tbz x8, #1, 56f\n"
"ld1 { v29.h }[2], [x24], #0x2\n"
- "tbz x7, #0, 59f\n"
+ "tbz x8, #0, 59f\n"
"ld1 { v29.b }[6], [x24]\n"
"b 59f\n"
"56:" // Oddments: Load (2, 4): Bit 2: Bit 1: Unset
- "tbz x7, #0, 59f\n"
+ "tbz x8, #0, 59f\n"
"ld1 { v29.b }[4], [x24]\n"
"b 59f\n"
"57:" // Oddments: Load (2, 4): Bit 2: Unset
- "tbz x7, #1, 58f\n"
+ "tbz x8, #1, 58f\n"
"ld1 { v29.h }[0], [x24], #0x2\n"
- "tbz x7, #0, 59f\n"
+ "tbz x8, #0, 59f\n"
"ld1 { v29.b }[2], [x24]\n"
"b 59f\n"
"58:" // Oddments: Load (2, 4): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 59f\n"
+ "tbz x8, #0, 59f\n"
"ld1 { v29.b }[0], [x24]\n"
"59:" // Oddments: Load (2, 4): Bit 2: End
"ushll v29.8h, v29.8b, #0x0\n"
- "ldr x20, [x15, #0xa0]\n"
- "smlal v17.4s, v29.4h, v8.4h\n"
- "smlal2 v20.4s, v29.8h, v8.8h\n"
- "smlal v9.4s, v29.4h, v2.4h\n"
- "smlal2 v22.4s, v29.8h, v2.8h\n"
- "add x20, x20, x17\n"
- "tbz x7, #2, 61f\n"
- "ld1 { v27.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 60f\n"
- "ld1 { v27.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 63f\n"
- "ld1 { v27.b }[6], [x20]\n"
+ "ldr x19, [x13, #0xa0]\n"
+ "smlal v13.4s, v29.4h, v8.4h\n"
+ "smlal2 v17.4s, v29.8h, v8.8h\n"
+ "smlal v23.4s, v29.4h, v2.4h\n"
+ "smlal2 v9.4s, v29.8h, v2.8h\n"
+ "add x19, x19, x15\n"
+ "tbz x8, #2, 61f\n"
+ "ld1 { v27.s }[0], [x19], #0x4\n"
+ "tbz x8, #1, 60f\n"
+ "ld1 { v27.h }[2], [x19], #0x2\n"
+ "tbz x8, #0, 63f\n"
+ "ld1 { v27.b }[6], [x19]\n"
"b 63f\n"
"60:" // Oddments: Load (4, 1): Bit 2: Bit 1: Unset
- "tbz x7, #0, 63f\n"
- "ld1 { v27.b }[4], [x20]\n"
+ "tbz x8, #0, 63f\n"
+ "ld1 { v27.b }[4], [x19]\n"
"b 63f\n"
"61:" // Oddments: Load (4, 1): Bit 2: Unset
- "tbz x7, #1, 62f\n"
- "ld1 { v27.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 63f\n"
- "ld1 { v27.b }[2], [x20]\n"
+ "tbz x8, #1, 62f\n"
+ "ld1 { v27.h }[0], [x19], #0x2\n"
+ "tbz x8, #0, 63f\n"
+ "ld1 { v27.b }[2], [x19]\n"
"b 63f\n"
"62:" // Oddments: Load (4, 1): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 63f\n"
- "ld1 { v27.b }[0], [x20]\n"
+ "tbz x8, #0, 63f\n"
+ "ld1 { v27.b }[0], [x19]\n"
"63:" // Oddments: Load (4, 1): Bit 2: End
"ushll v27.8h, v27.8b, #0x0\n"
- "ldr x23, [x15, #0xa8]\n"
+ "ldr x22, [x13, #0xa8]\n"
"smlal v11.4s, v27.4h, v7.4h\n"
"smlal2 v10.4s, v27.8h, v7.8h\n"
- "add x23, x23, x17\n"
- "tbz x7, #2, 65f\n"
- "ld1 { v24.s }[0], [x23], #0x4\n"
- "tbz x7, #1, 64f\n"
- "ld1 { v24.h }[2], [x23], #0x2\n"
- "tbz x7, #0, 67f\n"
- "ld1 { v24.b }[6], [x23]\n"
+ "add x22, x22, x15\n"
+ "tbz x8, #2, 65f\n"
+ "ld1 { v24.s }[0], [x22], #0x4\n"
+ "tbz x8, #1, 64f\n"
+ "ld1 { v24.h }[2], [x22], #0x2\n"
+ "tbz x8, #0, 67f\n"
+ "ld1 { v24.b }[6], [x22]\n"
"b 67f\n"
"64:" // Oddments: Load (3, 2): Bit 2: Bit 1: Unset
- "tbz x7, #0, 67f\n"
- "ld1 { v24.b }[4], [x23]\n"
+ "tbz x8, #0, 67f\n"
+ "ld1 { v24.b }[4], [x22]\n"
"b 67f\n"
"65:" // Oddments: Load (3, 2): Bit 2: Unset
- "tbz x7, #1, 66f\n"
- "ld1 { v24.h }[0], [x23], #0x2\n"
- "tbz x7, #0, 67f\n"
- "ld1 { v24.b }[2], [x23]\n"
+ "tbz x8, #1, 66f\n"
+ "ld1 { v24.h }[0], [x22], #0x2\n"
+ "tbz x8, #0, 67f\n"
+ "ld1 { v24.b }[2], [x22]\n"
"b 67f\n"
"66:" // Oddments: Load (3, 2): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 67f\n"
- "ld1 { v24.b }[0], [x23]\n"
+ "tbz x8, #0, 67f\n"
+ "ld1 { v24.b }[0], [x22]\n"
"67:" // Oddments: Load (3, 2): Bit 2: End
"ushll v24.8h, v24.8b, #0x0\n"
- "ldr x22, [x15, #0xb0]\n"
+ "ldr x21, [x13, #0xb0]\n"
"smlal v11.4s, v24.4h, v5.4h\n"
"smlal2 v10.4s, v24.8h, v5.8h\n"
- "smlal v9.4s, v24.4h, v3.4h\n"
- "smlal2 v22.4s, v24.8h, v3.8h\n"
- "add x22, x22, x17\n"
- "tbz x7, #2, 69f\n"
- "ld1 { v26.s }[0], [x22], #0x4\n"
- "tbz x7, #1, 68f\n"
- "ld1 { v26.h }[2], [x22], #0x2\n"
- "tbz x7, #0, 71f\n"
- "ld1 { v26.b }[6], [x22]\n"
+ "smlal v23.4s, v24.4h, v3.4h\n"
+ "smlal2 v9.4s, v24.8h, v3.8h\n"
+ "add x21, x21, x15\n"
+ "tbz x8, #2, 69f\n"
+ "ld1 { v26.s }[0], [x21], #0x4\n"
+ "tbz x8, #1, 68f\n"
+ "ld1 { v26.h }[2], [x21], #0x2\n"
+ "tbz x8, #0, 71f\n"
+ "ld1 { v26.b }[6], [x21]\n"
"b 71f\n"
"68:" // Oddments: Load (4, 3): Bit 2: Bit 1: Unset
- "tbz x7, #0, 71f\n"
- "ld1 { v26.b }[4], [x22]\n"
+ "tbz x8, #0, 71f\n"
+ "ld1 { v26.b }[4], [x21]\n"
"b 71f\n"
"69:" // Oddments: Load (4, 3): Bit 2: Unset
- "tbz x7, #1, 70f\n"
- "ld1 { v26.h }[0], [x22], #0x2\n"
- "tbz x7, #0, 71f\n"
- "ld1 { v26.b }[2], [x22]\n"
+ "tbz x8, #1, 70f\n"
+ "ld1 { v26.h }[0], [x21], #0x2\n"
+ "tbz x8, #0, 71f\n"
+ "ld1 { v26.b }[2], [x21]\n"
"b 71f\n"
"70:" // Oddments: Load (4, 3): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 71f\n"
- "ld1 { v26.b }[0], [x22]\n"
+ "tbz x8, #0, 71f\n"
+ "ld1 { v26.b }[0], [x21]\n"
"71:" // Oddments: Load (4, 3): Bit 2: End
"ushll v26.8h, v26.8b, #0x0\n"
- "ldr x21, [x15, #0xb8]\n"
- "smlal v9.4s, v26.4h, v7.4h\n"
- "smlal2 v22.4s, v26.8h, v7.8h\n"
- "add x21, x21, x17\n"
- "tbz x7, #2, 73f\n"
- "ld1 { v25.s }[0], [x21], #0x4\n"
- "tbz x7, #1, 72f\n"
- "ld1 { v25.h }[2], [x21], #0x2\n"
- "tbz x7, #0, 75f\n"
- "ld1 { v25.b }[6], [x21]\n"
+ "ldr x20, [x13, #0xb8]\n"
+ "smlal v23.4s, v26.4h, v7.4h\n"
+ "smlal2 v9.4s, v26.8h, v7.8h\n"
+ "add x20, x20, x15\n"
+ "tbz x8, #2, 73f\n"
+ "ld1 { v25.s }[0], [x20], #0x4\n"
+ "tbz x8, #1, 72f\n"
+ "ld1 { v25.h }[2], [x20], #0x2\n"
+ "tbz x8, #0, 75f\n"
+ "ld1 { v25.b }[6], [x20]\n"
"b 75f\n"
"72:" // Oddments: Load (4, 2): Bit 2: Bit 1: Unset
- "tbz x7, #0, 75f\n"
- "ld1 { v25.b }[4], [x21]\n"
+ "tbz x8, #0, 75f\n"
+ "ld1 { v25.b }[4], [x20]\n"
"b 75f\n"
"73:" // Oddments: Load (4, 2): Bit 2: Unset
- "tbz x7, #1, 74f\n"
- "ld1 { v25.h }[0], [x21], #0x2\n"
- "tbz x7, #0, 75f\n"
- "ld1 { v25.b }[2], [x21]\n"
+ "tbz x8, #1, 74f\n"
+ "ld1 { v25.h }[0], [x20], #0x2\n"
+ "tbz x8, #0, 75f\n"
+ "ld1 { v25.b }[2], [x20]\n"
"b 75f\n"
"74:" // Oddments: Load (4, 2): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 75f\n"
- "ld1 { v25.b }[0], [x21]\n"
+ "tbz x8, #0, 75f\n"
+ "ld1 { v25.b }[0], [x20]\n"
"75:" // Oddments: Load (4, 2): Bit 2: End
"ushll v25.8h, v25.8b, #0x0\n"
- "ldr x20, [x15, #0xc0]\n"
+ "ldr x19, [x13, #0xc0]\n"
"smlal v11.4s, v25.4h, v8.4h\n"
"smlal2 v10.4s, v25.8h, v8.8h\n"
- "smlal v9.4s, v25.4h, v6.4h\n"
- "smlal2 v22.4s, v25.8h, v6.8h\n"
- "add x20, x20, x17\n"
- "tbz x7, #2, 77f\n"
- "ld1 { v29.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 76f\n"
- "ld1 { v29.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 79f\n"
- "ld1 { v29.b }[6], [x20]\n"
+ "smlal v23.4s, v25.4h, v6.4h\n"
+ "smlal2 v9.4s, v25.8h, v6.8h\n"
+ "add x19, x19, x15\n"
+ "tbz x8, #2, 77f\n"
+ "ld1 { v29.s }[0], [x19], #0x4\n"
+ "tbz x8, #1, 76f\n"
+ "ld1 { v29.h }[2], [x19], #0x2\n"
+ "tbz x8, #0, 79f\n"
+ "ld1 { v29.b }[6], [x19]\n"
"b 79f\n"
"76:" // Oddments: Load (4, 4): Bit 2: Bit 1: Unset
- "tbz x7, #0, 79f\n"
- "ld1 { v29.b }[4], [x20]\n"
+ "tbz x8, #0, 79f\n"
+ "ld1 { v29.b }[4], [x19]\n"
"b 79f\n"
"77:" // Oddments: Load (4, 4): Bit 2: Unset
- "tbz x7, #1, 78f\n"
- "ld1 { v29.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 79f\n"
- "ld1 { v29.b }[2], [x20]\n"
+ "tbz x8, #1, 78f\n"
+ "ld1 { v29.h }[0], [x19], #0x2\n"
+ "tbz x8, #0, 79f\n"
+ "ld1 { v29.b }[2], [x19]\n"
"b 79f\n"
"78:" // Oddments: Load (4, 4): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 79f\n"
- "ld1 { v29.b }[0], [x20]\n"
+ "tbz x8, #0, 79f\n"
+ "ld1 { v29.b }[0], [x19]\n"
"79:" // Oddments: Load (4, 4): Bit 2: End
"ushll v29.8h, v29.8b, #0x0\n"
- "smlal v9.4s, v29.4h, v8.4h\n"
- "smlal2 v22.4s, v29.8h, v8.8h\n"
- "tbz x7, #2, 81f\n"
- "ld1 { v18.4s }, [x13], #0x10\n"
- "ld1 { v21.4s }, [x12], #0x10\n"
- "tbz x7, #1, 80f\n"
- "ld1 { v30.d }[0], [x13], #0x8\n"
- "ld1 { v31.d }[0], [x12], #0x8\n"
- "tbz x7, #0, 83f\n"
- "ld1 { v30.s }[2], [x13]\n"
- "ld1 { v31.s }[2], [x12]\n"
+ "smlal v23.4s, v29.4h, v8.4h\n"
+ "smlal2 v9.4s, v29.8h, v8.8h\n"
+ "tbz x8, #2, 81f\n"
+ "ld1 { v22.4s }, [x12], #0x10\n"
+ "ld1 { v31.4s }, [x11], #0x10\n"
+ "tbz x8, #1, 80f\n"
+ "ld1 { v19.d }[0], [x12], #0x8\n"
+ "ld1 { v30.d }[0], [x11], #0x8\n"
+ "tbz x8, #0, 83f\n"
+ "ld1 { v19.s }[2], [x12]\n"
+ "ld1 { v30.s }[2], [x11]\n"
"b 83f\n"
"80:" // Oddments: Load requant params: Bit 2: Bit 1: Unset
- "tbz x7, #0, 83f\n"
- "ld1 { v30.s }[0], [x13]\n"
- "ld1 { v31.s }[0], [x12]\n"
+ "tbz x8, #0, 83f\n"
+ "ld1 { v19.s }[0], [x12]\n"
+ "ld1 { v30.s }[0], [x11]\n"
"b 83f\n"
"81:" // Oddments: Load requant params: Bit 2: Unset
- "tbz x7, #1, 82f\n"
- "ld1 { v18.d }[0], [x13], #0x8\n"
- "ld1 { v21.d }[0], [x12], #0x8\n"
- "tbz x7, #0, 83f\n"
- "ld1 { v18.s }[2], [x13]\n"
- "ld1 { v21.s }[2], [x12]\n"
+ "tbz x8, #1, 82f\n"
+ "ld1 { v22.d }[0], [x12], #0x8\n"
+ "ld1 { v31.d }[0], [x11], #0x8\n"
+ "tbz x8, #0, 83f\n"
+ "ld1 { v22.s }[2], [x12]\n"
+ "ld1 { v31.s }[2], [x11]\n"
"b 83f\n"
"82:" // Oddments: Load requant params: Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 83f\n"
- "ld1 { v18.s }[0], [x13]\n"
- "ld1 { v21.s }[0], [x12]\n"
+ "tbz x8, #0, 83f\n"
+ "ld1 { v22.s }[0], [x12]\n"
+ "ld1 { v31.s }[0], [x11]\n"
"83:" // Oddments: Load requant params: Bit 2: End
- "sqrdmulh v15.4s, v15.4s, v18.4s\n"
- "and v2.16b, v15.16b, v21.16b\n"
- "add x11, x11, x16\n"
- "add x10, x10, x16\n"
- "sqrdmulh v13.4s, v13.4s, v30.4s\n"
- "sshr v2.4s, v2.4s, #0x1f\n"
- "add x9, x9, x16\n"
- "add x28, x28, x16\n"
- "and v16.16b, v13.16b, v31.16b\n"
- "sqrdmulh v17.4s, v17.4s, v18.4s\n"
- "sqrdmulh v11.4s, v11.4s, v18.4s\n"
- "sqrdmulh v9.4s, v9.4s, v18.4s\n"
- "sqadd v15.4s, v15.4s, v2.4s\n"
- "sshr v16.4s, v16.4s, #0x1f\n"
- "and v18.16b, v17.16b, v21.16b\n"
- "sqrdmulh v20.4s, v20.4s, v30.4s\n"
- "and v28.16b, v11.16b, v21.16b\n"
- "sqrdmulh v10.4s, v10.4s, v30.4s\n"
- "and v2.16b, v9.16b, v21.16b\n"
- "sqrdmulh v22.4s, v22.4s, v30.4s\n"
- "sqadd v13.4s, v13.4s, v16.4s\n"
- "sshr v18.4s, v18.4s, #0x1f\n"
- "and v4.16b, v20.16b, v31.16b\n"
- "sshr v28.4s, v28.4s, #0x1f\n"
- "and v3.16b, v10.16b, v31.16b\n"
- "sshr v2.4s, v2.4s, #0x1f\n"
- "and v16.16b, v22.16b, v31.16b\n"
- "sqadd v17.4s, v17.4s, v18.4s\n"
+ "sqrdmulh v15.4s, v15.4s, v22.4s\n"
+ "sqrdmulh v13.4s, v13.4s, v22.4s\n"
+ "add x10, x10, x14\n"
+ "add x9, x9, x14\n"
+ "sqrdmulh v11.4s, v11.4s, v22.4s\n"
+ "sqrdmulh v23.4s, v23.4s, v22.4s\n"
+ "add x28, x28, x14\n"
+ "add x27, x27, x14\n"
+ "and v4.16b, v15.16b, v31.16b\n"
+ "sqrdmulh v18.4s, v18.4s, v19.4s\n"
+ "and v1.16b, v13.16b, v31.16b\n"
+ "sqrdmulh v17.4s, v17.4s, v19.4s\n"
+ "and v22.16b, v11.16b, v31.16b\n"
+ "sqrdmulh v10.4s, v10.4s, v19.4s\n"
+ "and v20.16b, v23.16b, v31.16b\n"
+ "sqrdmulh v9.4s, v9.4s, v19.4s\n"
"sshr v4.4s, v4.4s, #0x1f\n"
- "sqadd v11.4s, v11.4s, v28.4s\n"
- "sshr v3.4s, v3.4s, #0x1f\n"
- "sqadd v9.4s, v9.4s, v2.4s\n"
- "sshr v16.4s, v16.4s, #0x1f\n"
- "srshl v15.4s, v15.4s, v21.4s\n"
- "srshl v17.4s, v17.4s, v21.4s\n"
- "sqadd v20.4s, v20.4s, v4.4s\n"
- "srshl v11.4s, v11.4s, v21.4s\n"
- "sqadd v10.4s, v10.4s, v3.4s\n"
- "srshl v9.4s, v9.4s, v21.4s\n"
- "sqadd v22.4s, v22.4s, v16.4s\n"
+ "and v19.16b, v18.16b, v30.16b\n"
+ "sshr v1.4s, v1.4s, #0x1f\n"
+ "and v27.16b, v17.16b, v30.16b\n"
+ "sshr v22.4s, v22.4s, #0x1f\n"
+ "and v25.16b, v10.16b, v30.16b\n"
+ "sshr v20.4s, v20.4s, #0x1f\n"
+ "and v0.16b, v9.16b, v30.16b\n"
+ "sqadd v15.4s, v15.4s, v4.4s\n"
+ "sshr v19.4s, v19.4s, #0x1f\n"
+ "sqadd v13.4s, v13.4s, v1.4s\n"
+ "sshr v27.4s, v27.4s, #0x1f\n"
+ "sqadd v11.4s, v11.4s, v22.4s\n"
+ "sshr v25.4s, v25.4s, #0x1f\n"
+ "sqadd v23.4s, v23.4s, v20.4s\n"
+ "sshr v0.4s, v0.4s, #0x1f\n"
+ "srshl v15.4s, v15.4s, v31.4s\n"
+ "sqadd v18.4s, v18.4s, v19.4s\n"
"srshl v13.4s, v13.4s, v31.4s\n"
+ "sqadd v17.4s, v17.4s, v27.4s\n"
+ "srshl v11.4s, v11.4s, v31.4s\n"
+ "sqadd v10.4s, v10.4s, v25.4s\n"
+ "srshl v23.4s, v23.4s, v31.4s\n"
+ "sqadd v9.4s, v9.4s, v0.4s\n"
+ "srshl v18.4s, v18.4s, v30.4s\n"
"sqxtn v15.4h, v15.4s\n"
- "srshl v20.4s, v20.4s, v31.4s\n"
- "sqxtn v17.4h, v17.4s\n"
- "srshl v10.4s, v10.4s, v31.4s\n"
+ "srshl v17.4s, v17.4s, v30.4s\n"
+ "sqxtn v13.4h, v13.4s\n"
+ "srshl v10.4s, v10.4s, v30.4s\n"
"sqxtn v11.4h, v11.4s\n"
- "srshl v22.4s, v22.4s, v31.4s\n"
- "sqxtn v9.4h, v9.4s\n"
- "sqxtn2 v15.8h, v13.4s\n"
- "sqxtn2 v17.8h, v20.4s\n"
+ "srshl v9.4s, v9.4s, v30.4s\n"
+ "sqxtn v23.4h, v23.4s\n"
+ "sqxtn2 v15.8h, v18.4s\n"
+ "sqxtn2 v13.8h, v17.4s\n"
"sqxtn2 v11.8h, v10.4s\n"
- "sqxtn2 v9.8h, v22.4s\n"
+ "sqxtn2 v23.8h, v9.4s\n"
"sqadd v15.8h, v15.8h, v12.8h\n"
- "sqadd v17.8h, v17.8h, v12.8h\n"
+ "sqadd v13.8h, v13.8h, v12.8h\n"
"sqadd v11.8h, v11.8h, v12.8h\n"
- "sqadd v9.8h, v9.8h, v12.8h\n"
+ "sqadd v23.8h, v23.8h, v12.8h\n"
"smax v15.8h, v15.8h, v14.8h\n"
- "smax v17.8h, v17.8h, v14.8h\n"
+ "smax v13.8h, v13.8h, v14.8h\n"
"smax v11.8h, v11.8h, v14.8h\n"
- "smax v9.8h, v9.8h, v14.8h\n"
- "smin v15.8h, v15.8h, v23.8h\n"
- "smin v17.8h, v17.8h, v23.8h\n"
- "smin v11.8h, v11.8h, v23.8h\n"
- "smin v9.8h, v9.8h, v23.8h\n"
+ "smax v23.8h, v23.8h, v14.8h\n"
+ "smin v15.8h, v15.8h, v21.8h\n"
+ "smin v13.8h, v13.8h, v21.8h\n"
+ "smin v11.8h, v11.8h, v21.8h\n"
+ "smin v23.8h, v23.8h, v21.8h\n"
"uzp1 v15.16b, v15.16b, v15.16b\n"
- "uzp1 v17.16b, v17.16b, v17.16b\n"
+ "uzp1 v13.16b, v13.16b, v13.16b\n"
"uzp1 v11.16b, v11.16b, v11.16b\n"
- "uzp1 v9.16b, v9.16b, v9.16b\n"
- "tbz x7, #2, 85f\n"
- "st1 { v15.s }[0], [x11], #0x4\n"
- "st1 { v17.s }[0], [x10], #0x4\n"
- "st1 { v11.s }[0], [x9], #0x4\n"
- "st1 { v9.s }[0], [x28], #0x4\n"
- "tbz x7, #1, 84f\n"
- "st1 { v15.h }[2], [x11], #0x2\n"
- "st1 { v17.h }[2], [x10], #0x2\n"
- "st1 { v11.h }[2], [x9], #0x2\n"
- "st1 { v9.h }[2], [x28], #0x2\n"
- "tbz x7, #0, 87f\n"
- "st1 { v15.b }[6], [x11], #0x1\n"
- "st1 { v17.b }[6], [x10], #0x1\n"
- "st1 { v11.b }[6], [x9], #0x1\n"
- "st1 { v9.b }[6], [x28], #0x1\n"
+ "uzp1 v23.16b, v23.16b, v23.16b\n"
+ "tbz x8, #2, 85f\n"
+ "st1 { v15.s }[0], [x10], #0x4\n"
+ "st1 { v13.s }[0], [x9], #0x4\n"
+ "st1 { v11.s }[0], [x28], #0x4\n"
+ "st1 { v23.s }[0], [x27], #0x4\n"
+ "tbz x8, #1, 84f\n"
+ "st1 { v15.h }[2], [x10], #0x2\n"
+ "st1 { v13.h }[2], [x9], #0x2\n"
+ "st1 { v11.h }[2], [x28], #0x2\n"
+ "st1 { v23.h }[2], [x27], #0x2\n"
+ "tbz x8, #0, 87f\n"
+ "st1 { v15.b }[6], [x10], #0x1\n"
+ "st1 { v13.b }[6], [x9], #0x1\n"
+ "st1 { v11.b }[6], [x28], #0x1\n"
+ "st1 { v23.b }[6], [x27], #0x1\n"
"b 87f\n"
"84:" // Oddments: Bit 2: Bit 1: Unset
- "tbz x7, #0, 87f\n"
- "st1 { v15.b }[4], [x11], #0x1\n"
- "st1 { v17.b }[4], [x10], #0x1\n"
- "st1 { v11.b }[4], [x9], #0x1\n"
- "st1 { v9.b }[4], [x28], #0x1\n"
+ "tbz x8, #0, 87f\n"
+ "st1 { v15.b }[4], [x10], #0x1\n"
+ "st1 { v13.b }[4], [x9], #0x1\n"
+ "st1 { v11.b }[4], [x28], #0x1\n"
+ "st1 { v23.b }[4], [x27], #0x1\n"
"b 87f\n"
"85:" // Oddments: Bit 2: Unset
- "tbz x7, #1, 86f\n"
- "st1 { v15.h }[0], [x11], #0x2\n"
- "st1 { v17.h }[0], [x10], #0x2\n"
- "st1 { v11.h }[0], [x9], #0x2\n"
- "st1 { v9.h }[0], [x28], #0x2\n"
- "tbz x7, #0, 87f\n"
- "st1 { v15.b }[2], [x11], #0x1\n"
- "st1 { v17.b }[2], [x10], #0x1\n"
- "st1 { v11.b }[2], [x9], #0x1\n"
- "st1 { v9.b }[2], [x28], #0x1\n"
+ "tbz x8, #1, 86f\n"
+ "st1 { v15.h }[0], [x10], #0x2\n"
+ "st1 { v13.h }[0], [x9], #0x2\n"
+ "st1 { v11.h }[0], [x28], #0x2\n"
+ "st1 { v23.h }[0], [x27], #0x2\n"
+ "tbz x8, #0, 87f\n"
+ "st1 { v15.b }[2], [x10], #0x1\n"
+ "st1 { v13.b }[2], [x9], #0x1\n"
+ "st1 { v11.b }[2], [x28], #0x1\n"
+ "st1 { v23.b }[2], [x27], #0x1\n"
"b 87f\n"
"86:" // Oddments: Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 87f\n"
- "st1 { v15.b }[0], [x11], #0x1\n"
- "st1 { v17.b }[0], [x10], #0x1\n"
- "st1 { v11.b }[0], [x9], #0x1\n"
- "st1 { v9.b }[0], [x28], #0x1\n"
+ "tbz x8, #0, 87f\n"
+ "st1 { v15.b }[0], [x10], #0x1\n"
+ "st1 { v13.b }[0], [x9], #0x1\n"
+ "st1 { v11.b }[0], [x28], #0x1\n"
+ "st1 { v23.b }[0], [x27], #0x1\n"
"87:" // Oddments: Bit 2: End
"88:" // End
:
: [offsetof_Params_bias] "I" (offsetof(Params, bias)), [offsetof_Params_inptrs] "I" (offsetof(Params, inptrs)), [offsetof_Params_n_channels] "I" (offsetof(Params, n_channels)), [offsetof_Params_outptrs] "I" (offsetof(Params, outptrs)), [offsetof_Params_requant] "I" (offsetof(Params, requant)), [offsetof_Params_requant_muls] "I" (offsetof(Params, requant_muls)), [offsetof_Params_requant_shifts] "I" (offsetof(Params, requant_shifts)), [offsetof_Params_weights] "I" (offsetof(Params, weights)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [params] "r" (&params)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8qa_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8qa_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp
index eec3ba5900..9ac7173b4c 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8qa_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8qa_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -111,1255 +111,1255 @@ void a64_u8qa_nhwc_5x5_s1_output2x2_mla_depthfirst_impl(
requant_muls, requant_shifts, outptrs);
__asm__ __volatile__(
+ "ldr x16, [%x[params], %[offsetof_Params_requant]]\n"
"ldr x4, [%x[params], %[offsetof_Params_n_channels]]\n"
- "ldr x22, [%x[params], %[offsetof_Params_requant]]\n"
- "lsr x9, x4, #0x3\n"
- "add x24, x22, %[offsetof_Requantize32_b_offset]\n"
- "ld1r { v9.16b }, [x24]\n"
- "ldr x25, [%x[params], %[offsetof_Params_outptrs]]\n"
- "add x28, x22, %[offsetof_Requantize32_c_offset]\n"
- "add x24, x22, %[offsetof_Requantize32_minval]\n"
- "ld1r { v15.8h }, [x28]\n"
- "ld1r { v14.8h }, [x24]\n"
- "add x20, x22, %[offsetof_Requantize32_maxval]\n"
- "mov x3, #0x0\n"
- "ld1r { v12.8h }, [x20]\n"
+ "add x9, x16, %[offsetof_Requantize32_b_offset]\n"
+ "add x19, x16, %[offsetof_Requantize32_c_offset]\n"
+ "ldr x10, [%x[params], %[offsetof_Params_outptrs]]\n"
+ "add x24, x16, %[offsetof_Requantize32_minval]\n"
+ "add x2, x16, %[offsetof_Requantize32_maxval]\n"
+ "ldr x8, [%x[params], %[offsetof_Params_weights]]\n"
+ "ld1r { v15.16b }, [x9]\n"
+ "ld1r { v16.8h }, [x19]\n"
+ "lsr x3, x4, #0x3\n"
"mov x1, #0x0\n"
- "add x2, %x[params], %[offsetof_Params_inptrs]\n"
- "ldr x0, [%x[params], %[offsetof_Params_weights]]\n"
- "ldr x6, [%x[params], %[offsetof_Params_requant_muls]]\n"
- "ldr x5, [%x[params], %[offsetof_Params_requant_shifts]]\n"
- "ldp x7, x8, [x25, #0x0]\n"
- "ldp x17, x16, [x25, #0x10]\n"
- "cbz x9, 3f\n"
- "ldr d0, [x0, #0x0]\n"
- "ldr d1, [x0, #0x8]\n"
- "subs x9, x9, #0x1\n"
- "usubl v0.8h, v0.8b, v9.8b\n"
- "ldr d2, [x0, #0x10]\n"
- "ldr d3, [x0, #0x18]\n"
- "usubl v1.8h, v1.8b, v9.8b\n"
- "usubl v2.8h, v2.8b, v9.8b\n"
- "ldr d4, [x0, #0x20]\n"
- "ldr x13, [%x[params], %[offsetof_Params_bias]]\n"
- "usubl v3.8h, v3.8b, v9.8b\n"
- "usubl v4.8h, v4.8b, v9.8b\n"
- "ldr q11, [x13, #0x0]\n"
- "ldr q13, [x13, #0x10]\n"
- "add x13, x13, #0x20\n"
- "str x13, [%x[params], %[offsetof_Params_bias]]\n"
- "ldp x10, x28, [x2, #0x0]\n"
- "ldp x27, x26, [x2, #0x10]\n"
- "mov v20.16b, v11.16b\n"
- "mov v19.16b, v13.16b\n"
- "ldp x25, x24, [x2, #0x20]\n"
- "ldp x23, x22, [x2, #0x30]\n"
- "mov v8.16b, v11.16b\n"
- "mov v7.16b, v13.16b\n"
- "ldp x21, x20, [x2, #0x40]\n"
- "ldr d31, [x10, x3]\n"
- "mov v6.16b, v11.16b\n"
- "mov v5.16b, v13.16b\n"
- "ldr d30, [x28, x3]\n"
- "ldr d29, [x27, x3]\n"
+ "ld1r { v12.8h }, [x24]\n"
+ "ld1r { v13.8h }, [x2]\n"
+ "mov x2, #0x0\n"
+ "add x0, %x[params], %[offsetof_Params_inptrs]\n"
+ "ldr x5, [%x[params], %[offsetof_Params_requant_muls]]\n"
+ "ldr x6, [%x[params], %[offsetof_Params_requant_shifts]]\n"
+ "ldp x21, x15, [x10, #0x0]\n"
+ "ldp x17, x16, [x10, #0x10]\n"
+ "cbz x3, 3f\n"
+ "ldr x19, [%x[params], %[offsetof_Params_bias]]\n"
+ "ldr q11, [x19, #0x0]\n"
+ "subs x3, x3, #0x1\n"
+ "mov v14.16b, v11.16b\n"
+ "ldr q21, [x19, #0x10]\n"
+ "add x19, x19, #0x20\n"
+ "str x19, [%x[params], %[offsetof_Params_bias]]\n"
+ "ldr d0, [x8, #0x0]\n"
+ "ldr d1, [x8, #0x8]\n"
+ "ldr d2, [x8, #0x10]\n"
+ "mov v10.16b, v21.16b\n"
+ "mov v9.16b, v11.16b\n"
+ "ldr d3, [x8, #0x18]\n"
+ "ldr d4, [x8, #0x20]\n"
+ "mov v8.16b, v21.16b\n"
+ "mov v7.16b, v11.16b\n"
+ "ldp x28, x27, [x0, #0x0]\n"
+ "ldp x10, x26, [x0, #0x10]\n"
+ "mov v6.16b, v21.16b\n"
+ "usubl v0.8h, v0.8b, v15.8b\n"
+ "ldp x24, x23, [x0, #0x20]\n"
+ "ldp x22, x25, [x0, #0x30]\n"
+ "usubl v1.8h, v1.8b, v15.8b\n"
+ "usubl v2.8h, v2.8b, v15.8b\n"
+ "ldp x20, x19, [x0, #0x40]\n"
+ "ldr d31, [x28, x1]\n"
+ "usubl v3.8h, v3.8b, v15.8b\n"
+ "usubl v4.8h, v4.8b, v15.8b\n"
+ "ldr d30, [x27, x1]\n"
+ "ldr d29, [x10, x1]\n"
"ushll v31.8h, v31.8b, #0x0\n"
"ushll v30.8h, v30.8b, #0x0\n"
- "ldr d28, [x26, x3]\n"
- "ldr d27, [x25, x3]\n"
+ "ldr d28, [x26, x1]\n"
+ "ldr d27, [x24, x1]\n"
"ushll v29.8h, v29.8b, #0x0\n"
"ushll v28.8h, v28.8b, #0x0\n"
- "ldr d23, [x24, x3]\n"
- "ldr d25, [x23, x3]\n"
+ "ldr d23, [x23, x1]\n"
+ "ldr d25, [x22, x1]\n"
"ushll v27.8h, v27.8b, #0x0\n"
"ushll v23.8h, v23.8b, #0x0\n"
- "ldr d24, [x22, x3]\n"
- "ldr d26, [x21, x3]\n"
+ "ldr d24, [x25, x1]\n"
+ "ldr d26, [x20, x1]\n"
"ushll v25.8h, v25.8b, #0x0\n"
"ushll v24.8h, v24.8b, #0x0\n"
- "ldr d22, [x20, x3]\n"
+ "ldr d22, [x19, x1]\n"
"ushll v26.8h, v26.8b, #0x0\n"
"ushll v22.8h, v22.8b, #0x0\n"
"beq 2f\n"
"1:" // Loop
- "ldr q18, [x6, #0x0]\n"
- "ldr q21, [x5, #0x0]\n"
"smlal v11.4s, v31.4h, v0.4h\n"
- "smlal2 v13.4s, v31.8h, v0.8h\n"
- "ldr q16, [x6, #0x10]\n"
- "ldr q10, [x5, #0x10]\n"
- "smlal v11.4s, v30.4h, v1.4h\n"
- "smlal v20.4s, v30.4h, v0.4h\n"
- "ldr x20, [x2, #0x50]\n"
- "smlal v8.4s, v29.4h, v0.4h\n"
- "smlal v6.4s, v28.4h, v0.4h\n"
- "ldr x22, [x2, #0x58]\n"
- "smlal2 v13.4s, v30.8h, v1.8h\n"
- "smlal2 v19.4s, v30.8h, v0.8h\n"
- "ldr d31, [x20, x3]\n"
+ "smlal2 v21.4s, v31.8h, v0.8h\n"
+ "ldr x19, [x0, #0x50]\n"
+ "ldr d31, [x19, x1]\n"
+ "smlal v14.4s, v30.4h, v0.4h\n"
+ "smlal v9.4s, v29.4h, v0.4h\n"
+ "ldr x20, [x0, #0x58]\n"
"ushll v31.8h, v31.8b, #0x0\n"
- "smlal2 v7.4s, v29.8h, v0.8h\n"
- "smlal v11.4s, v27.4h, v2.4h\n"
- "ldr x21, [x2, #0x60]\n"
- "ldr x20, [x2, #0x68]\n"
- "smlal2 v5.4s, v28.8h, v0.8h\n"
- "ldr d30, [x22, x3]\n"
- "smlal v20.4s, v27.4h, v1.4h\n"
+ "smlal v7.4s, v28.4h, v0.4h\n"
+ "smlal2 v10.4s, v30.8h, v0.8h\n"
+ "ldr x19, [x0, #0x60]\n"
+ "ldr x24, [x0, #0x68]\n"
+ "smlal2 v8.4s, v29.8h, v0.8h\n"
+ "smlal v11.4s, v30.4h, v1.4h\n"
+ "ldr x23, [x0, #0x70]\n"
+ "ldr x26, [x0, #0x78]\n"
+ "smlal2 v21.4s, v30.8h, v1.8h\n"
+ "smlal2 v6.4s, v28.8h, v0.8h\n"
+ "ldr d30, [x20, x1]\n"
"ushll v30.8h, v30.8b, #0x0\n"
- "smlal v8.4s, v28.4h, v1.4h\n"
- "smlal v6.4s, v23.4h, v1.4h\n"
- "ldr x25, [x2, #0x70]\n"
- "ldr x26, [x2, #0x78]\n"
- "smlal2 v13.4s, v27.8h, v2.8h\n"
- "smlal2 v19.4s, v27.8h, v1.8h\n"
- "ldr d0, [x0, #0x28]\n"
- "usubl v0.8h, v0.8b, v9.8b\n"
- "smlal2 v7.4s, v28.8h, v1.8h\n"
- "smlal v11.4s, v25.4h, v3.4h\n"
- "ldr x23, [x2, #0x80]\n"
- "ldr x24, [x2, #0x88]\n"
- "smlal2 v5.4s, v23.8h, v1.8h\n"
- "ldr d27, [x21, x3]\n"
- "smlal v20.4s, v25.4h, v2.4h\n"
+ "smlal v14.4s, v27.4h, v1.4h\n"
+ "smlal v9.4s, v28.4h, v1.4h\n"
+ "ldr d0, [x8, #0x28]\n"
+ "usubl v0.8h, v0.8b, v15.8b\n"
+ "smlal v7.4s, v23.4h, v1.4h\n"
+ "smlal2 v10.4s, v27.8h, v1.8h\n"
+ "ldr x7, [x0, #0x80]\n"
+ "ldr x22, [x0, #0x88]\n"
+ "smlal2 v8.4s, v28.8h, v1.8h\n"
+ "smlal v11.4s, v27.4h, v2.4h\n"
+ "ldr x20, [x0, #0x90]\n"
+ "ldr x14, [x0, #0x98]\n"
+ "smlal2 v21.4s, v27.8h, v2.8h\n"
+ "smlal2 v6.4s, v23.8h, v1.8h\n"
+ "ldr d27, [x19, x1]\n"
"ushll v27.8h, v27.8b, #0x0\n"
- "smlal v8.4s, v23.4h, v2.4h\n"
- "smlal v6.4s, v31.4h, v2.4h\n"
- "ldr x15, [x2, #0x90]\n"
- "ldr x21, [x2, #0x98]\n"
- "smlal2 v13.4s, v25.8h, v3.8h\n"
- "smlal2 v19.4s, v25.8h, v2.8h\n"
- "ldr d1, [x0, #0x30]\n"
- "usubl v1.8h, v1.8b, v9.8b\n"
- "smlal2 v7.4s, v23.8h, v2.8h\n"
- "smlal v11.4s, v24.4h, v4.4h\n"
- "ldr x14, [x2, #0xa0]\n"
- "ldr x13, [x2, #0xa8]\n"
- "smlal2 v5.4s, v31.8h, v2.8h\n"
- "ldr d25, [x20, x3]\n"
- "smlal v20.4s, v24.4h, v3.4h\n"
+ "smlal v14.4s, v25.4h, v2.4h\n"
+ "smlal v9.4s, v23.4h, v2.4h\n"
+ "ldr d1, [x8, #0x30]\n"
+ "usubl v1.8h, v1.8b, v15.8b\n"
+ "smlal v7.4s, v31.4h, v2.4h\n"
+ "smlal2 v10.4s, v25.8h, v2.8h\n"
+ "ldr x19, [x0, #0xa0]\n"
+ "ldr x13, [x0, #0xa8]\n"
+ "smlal2 v8.4s, v23.8h, v2.8h\n"
+ "smlal v11.4s, v25.4h, v3.4h\n"
+ "ldr x12, [x0, #0xb0]\n"
+ "ldr x11, [x0, #0xb8]\n"
+ "smlal2 v21.4s, v25.8h, v3.8h\n"
+ "smlal2 v6.4s, v31.8h, v2.8h\n"
+ "ldr d25, [x24, x1]\n"
"ushll v25.8h, v25.8b, #0x0\n"
- "smlal v8.4s, v31.4h, v3.4h\n"
- "smlal v6.4s, v30.4h, v3.4h\n"
- "ldr x12, [x2, #0xb0]\n"
- "ldr x20, [x2, #0xb8]\n"
- "smlal2 v13.4s, v24.8h, v4.8h\n"
- "smlal2 v19.4s, v24.8h, v3.8h\n"
- "ldr d2, [x0, #0x38]\n"
- "usubl v2.8h, v2.8b, v9.8b\n"
- "smlal2 v7.4s, v31.8h, v3.8h\n"
- "smlal v11.4s, v29.4h, v0.4h\n"
- "ldr x11, [x2, #0xc0]\n"
- "ldr x10, [x2, #0xc8]\n"
- "smlal2 v5.4s, v30.8h, v3.8h\n"
- "ldr d24, [x25, x3]\n"
- "smlal v20.4s, v27.4h, v4.4h\n"
+ "smlal v14.4s, v24.4h, v3.4h\n"
+ "smlal v9.4s, v31.4h, v3.4h\n"
+ "ldr d2, [x8, #0x38]\n"
+ "usubl v2.8h, v2.8b, v15.8b\n"
+ "smlal v7.4s, v30.4h, v3.4h\n"
+ "smlal2 v10.4s, v24.8h, v3.8h\n"
+ "ldr x10, [x0, #0xc0]\n"
+ "ldr x9, [x0, #0xc8]\n"
+ "smlal2 v8.4s, v31.8h, v3.8h\n"
+ "smlal v11.4s, v24.4h, v4.4h\n"
+ "ldr x28, [x0, #0xd0]\n"
+ "ldr x27, [x0, #0xd8]\n"
+ "smlal2 v21.4s, v24.8h, v4.8h\n"
+ "smlal2 v6.4s, v30.8h, v3.8h\n"
+ "ldr d24, [x23, x1]\n"
"ushll v24.8h, v24.8b, #0x0\n"
- "smlal v8.4s, v30.4h, v4.4h\n"
- "smlal v6.4s, v26.4h, v4.4h\n"
- "ldr x22, [x2, #0xd0]\n"
- "ldr x28, [x2, #0xd8]\n"
- "smlal2 v13.4s, v29.8h, v0.8h\n"
- "ldr d3, [x0, #0x40]\n"
- "smlal2 v19.4s, v27.8h, v4.8h\n"
- "ldr d27, [x26, x3]\n"
- "smlal2 v7.4s, v30.8h, v4.8h\n"
- "smlal v11.4s, v28.4h, v1.4h\n"
- "usubl v3.8h, v3.8b, v9.8b\n"
- "ldr x27, [x2, #0xe0]\n"
- "smlal2 v5.4s, v26.8h, v4.8h\n"
- "ldr d4, [x0, #0x48]\n"
- "smlal v20.4s, v28.4h, v0.4h\n"
+ "smlal v14.4s, v27.4h, v4.4h\n"
+ "smlal v9.4s, v30.4h, v4.4h\n"
+ "ldr d3, [x8, #0x40]\n"
+ "usubl v3.8h, v3.8b, v15.8b\n"
+ "smlal v7.4s, v26.4h, v4.4h\n"
+ "smlal2 v10.4s, v27.8h, v4.8h\n"
+ "ldr d27, [x26, x1]\n"
"ushll v27.8h, v27.8b, #0x0\n"
- "smlal v8.4s, v22.4h, v0.4h\n"
- "smlal v6.4s, v25.4h, v0.4h\n"
- "usubl v4.8h, v4.8b, v9.8b\n"
- "ldr x26, [x2, #0xe8]\n"
- "smlal2 v13.4s, v28.8h, v1.8h\n"
- "smlal2 v19.4s, v28.8h, v0.8h\n"
- "ldr d28, [x24, x3]\n"
+ "smlal2 v8.4s, v30.8h, v4.8h\n"
+ "smlal v11.4s, v29.4h, v0.4h\n"
+ "ldr x26, [x0, #0xe0]\n"
+ "ldr x25, [x0, #0xe8]\n"
+ "smlal2 v21.4s, v29.8h, v0.8h\n"
+ "smlal2 v6.4s, v26.8h, v4.8h\n"
+ "ldr d4, [x8, #0x48]\n"
+ "usubl v4.8h, v4.8b, v15.8b\n"
+ "smlal v14.4s, v28.4h, v0.4h\n"
+ "smlal v9.4s, v22.4h, v0.4h\n"
+ "ldr x24, [x0, #0xf0]\n"
+ "ldr q17, [x5, #0x0]\n"
+ "smlal v7.4s, v25.4h, v0.4h\n"
+ "smlal2 v10.4s, v28.8h, v0.8h\n"
+ "ldr q5, [x6, #0x0]\n"
+ "ldr q18, [x5, #0x10]\n"
+ "smlal2 v8.4s, v22.8h, v0.8h\n"
+ "smlal v11.4s, v28.4h, v1.4h\n"
+ "ldr q29, [x6, #0x10]\n"
+ "subs x3, x3, #0x1\n"
+ "smlal2 v21.4s, v28.8h, v1.8h\n"
+ "smlal2 v6.4s, v25.8h, v0.8h\n"
+ "ldr d28, [x22, x1]\n"
+ "ldr d0, [x8, #0x50]\n"
+ "smlal v14.4s, v23.4h, v1.4h\n"
+ "smlal v9.4s, v25.4h, v1.4h\n"
"ushll v28.8h, v28.8b, #0x0\n"
- "smlal2 v7.4s, v22.8h, v0.8h\n"
+ "ldr x23, [x0, #0xf8]\n"
+ "smlal v7.4s, v24.4h, v1.4h\n"
+ "smlal2 v10.4s, v23.8h, v1.8h\n"
+ "usubl v0.8h, v0.8b, v15.8b\n"
+ "add x5, x5, #0x20\n"
+ "smlal2 v8.4s, v25.8h, v1.8h\n"
"smlal v11.4s, v23.4h, v2.4h\n"
- "ldr x25, [x2, #0xf0]\n"
- "subs x9, x9, #0x1\n"
- "smlal2 v5.4s, v25.8h, v0.8h\n"
- "ldr d0, [x0, #0x50]\n"
- "smlal v20.4s, v23.4h, v1.4h\n"
- "usubl v0.8h, v0.8b, v9.8b\n"
- "smlal v8.4s, v25.4h, v1.4h\n"
- "smlal v6.4s, v24.4h, v1.4h\n"
"add x6, x6, #0x20\n"
- "add x5, x5, #0x20\n"
- "smlal2 v13.4s, v23.8h, v2.8h\n"
- "smlal2 v19.4s, v23.8h, v1.8h\n"
- "ldr d23, [x23, x3]\n"
+ "smlal2 v21.4s, v23.8h, v2.8h\n"
+ "ldr d23, [x7, x1]\n"
+ "smlal2 v6.4s, v24.8h, v1.8h\n"
"ushll v23.8h, v23.8b, #0x0\n"
- "smlal2 v7.4s, v25.8h, v1.8h\n"
+ "smlal v14.4s, v31.4h, v2.4h\n"
+ "smlal v9.4s, v24.4h, v2.4h\n"
+ "ldr d1, [x8, #0x58]\n"
+ "usubl v1.8h, v1.8b, v15.8b\n"
+ "smlal v7.4s, v27.4h, v2.4h\n"
+ "smlal2 v10.4s, v31.8h, v2.8h\n"
+ "ldr x22, [x0, #0x100]\n"
+ "smlal2 v8.4s, v24.8h, v2.8h\n"
"smlal v11.4s, v31.4h, v3.4h\n"
- "ldr x24, [x2, #0xf8]\n"
- "smlal2 v5.4s, v24.8h, v1.8h\n"
- "ldr d1, [x0, #0x58]\n"
- "smlal v20.4s, v31.4h, v2.4h\n"
- "usubl v1.8h, v1.8b, v9.8b\n"
- "smlal v8.4s, v24.4h, v2.4h\n"
- "smlal v6.4s, v27.4h, v2.4h\n"
- "smlal2 v13.4s, v31.8h, v3.8h\n"
- "smlal2 v19.4s, v31.8h, v2.8h\n"
- "ldr d31, [x15, x3]\n"
+ "smlal2 v21.4s, v31.8h, v3.8h\n"
+ "smlal2 v6.4s, v27.8h, v2.8h\n"
+ "ldr d31, [x20, x1]\n"
"ushll v31.8h, v31.8b, #0x0\n"
- "smlal2 v7.4s, v24.8h, v2.8h\n"
+ "smlal v14.4s, v30.4h, v3.4h\n"
+ "smlal v9.4s, v27.4h, v3.4h\n"
+ "ldr d2, [x8, #0x60]\n"
+ "usubl v2.8h, v2.8b, v15.8b\n"
+ "smlal v7.4s, v23.4h, v3.4h\n"
+ "smlal2 v10.4s, v30.8h, v3.8h\n"
+ "ldr x7, [x0, #0x108]\n"
+ "smlal2 v8.4s, v27.8h, v3.8h\n"
"smlal v11.4s, v30.4h, v4.4h\n"
- "ldr x23, [x2, #0x100]\n"
- "smlal2 v5.4s, v27.8h, v2.8h\n"
- "ldr d2, [x0, #0x60]\n"
- "smlal v20.4s, v30.4h, v3.4h\n"
- "usubl v2.8h, v2.8b, v9.8b\n"
- "smlal v8.4s, v27.4h, v3.4h\n"
- "smlal v6.4s, v23.4h, v3.4h\n"
- "smlal2 v13.4s, v30.8h, v4.8h\n"
- "smlal2 v19.4s, v30.8h, v3.8h\n"
- "ldr d30, [x21, x3]\n"
+ "smlal2 v21.4s, v30.8h, v4.8h\n"
+ "ldr d30, [x14, x1]\n"
+ "smlal2 v6.4s, v23.8h, v3.8h\n"
"ushll v30.8h, v30.8b, #0x0\n"
- "smlal2 v7.4s, v27.8h, v3.8h\n"
- "smlal v11.4s, v22.4h, v0.4h\n"
- "ldr x15, [x2, #0x108]\n"
- "smlal2 v5.4s, v23.8h, v3.8h\n"
- "ldr d3, [x0, #0x68]\n"
- "smlal v20.4s, v26.4h, v4.4h\n"
- "usubl v3.8h, v3.8b, v9.8b\n"
- "smlal v8.4s, v23.4h, v4.4h\n"
- "smlal v6.4s, v28.4h, v4.4h\n"
- "smlal2 v13.4s, v22.8h, v0.8h\n"
- "ldr d22, [x20, x3]\n"
- "smlal2 v19.4s, v26.8h, v4.8h\n"
- "ldr d26, [x14, x3]\n"
- "smlal2 v7.4s, v23.8h, v4.8h\n"
- "smlal v11.4s, v25.4h, v1.4h\n"
+ "smlal v14.4s, v26.4h, v4.4h\n"
+ "smlal v9.4s, v23.4h, v4.4h\n"
+ "ldr d3, [x8, #0x68]\n"
+ "usubl v3.8h, v3.8b, v15.8b\n"
+ "smlal v7.4s, v28.4h, v4.4h\n"
+ "smlal2 v10.4s, v26.8h, v4.8h\n"
+ "ldr d26, [x19, x1]\n"
"ushll v26.8h, v26.8b, #0x0\n"
- "ldr x21, [x2, #0x110]\n"
- "smlal2 v5.4s, v28.8h, v4.8h\n"
- "ldr d4, [x0, #0x70]\n"
- "smlal v20.4s, v25.4h, v0.4h\n"
- "usubl v4.8h, v4.8b, v9.8b\n"
- "smlal v8.4s, v31.4h, v0.4h\n"
- "smlal v6.4s, v30.4h, v0.4h\n"
+ "smlal2 v8.4s, v23.8h, v4.8h\n"
+ "smlal v11.4s, v22.4h, v0.4h\n"
+ "ldr x20, [x0, #0x110]\n"
+ "ldr x19, [x0, #0x118]\n"
+ "smlal2 v21.4s, v22.8h, v0.8h\n"
+ "smlal2 v6.4s, v28.8h, v4.8h\n"
+ "ldr d4, [x8, #0x70]\n"
+ "ldr d22, [x11, x1]\n"
+ "smlal v14.4s, v25.4h, v0.4h\n"
+ "smlal v9.4s, v31.4h, v0.4h\n"
+ "usubl v4.8h, v4.8b, v15.8b\n"
+ "smlal v7.4s, v30.4h, v0.4h\n"
+ "smlal2 v10.4s, v25.8h, v0.8h\n"
"ushll v22.8h, v22.8b, #0x0\n"
- "ldr x20, [x2, #0x118]\n"
- "smlal2 v13.4s, v25.8h, v1.8h\n"
- "smlal2 v19.4s, v25.8h, v0.8h\n"
- "ldr d25, [x13, x3]\n"
+ "smlal2 v8.4s, v31.8h, v0.8h\n"
+ "smlal v11.4s, v25.4h, v1.4h\n"
+ "smlal2 v21.4s, v25.8h, v1.8h\n"
+ "ldr d25, [x13, x1]\n"
+ "smlal2 v6.4s, v30.8h, v0.8h\n"
"ushll v25.8h, v25.8b, #0x0\n"
- "smlal2 v7.4s, v31.8h, v0.8h\n"
+ "smlal v14.4s, v24.4h, v1.4h\n"
+ "smlal v9.4s, v30.4h, v1.4h\n"
+ "ldr d0, [x8, #0x78]\n"
+ "usubl v0.8h, v0.8b, v15.8b\n"
+ "smlal v7.4s, v26.4h, v1.4h\n"
+ "smlal2 v10.4s, v24.8h, v1.8h\n"
+ "smlal2 v8.4s, v30.8h, v1.8h\n"
"smlal v11.4s, v24.4h, v2.4h\n"
- "ldr x13, [%x[params], %[offsetof_Params_bias]]\n"
- "smlal2 v5.4s, v30.8h, v0.8h\n"
- "ldr d0, [x0, #0x78]\n"
- "smlal v20.4s, v24.4h, v1.4h\n"
- "usubl v0.8h, v0.8b, v9.8b\n"
- "smlal v8.4s, v30.4h, v1.4h\n"
- "smlal v6.4s, v26.4h, v1.4h\n"
- "smlal2 v13.4s, v24.8h, v2.8h\n"
- "smlal2 v19.4s, v24.8h, v1.8h\n"
- "ldr d24, [x12, x3]\n"
+ "smlal2 v21.4s, v24.8h, v2.8h\n"
+ "ldr d24, [x12, x1]\n"
+ "smlal2 v6.4s, v26.8h, v1.8h\n"
"ushll v24.8h, v24.8b, #0x0\n"
- "smlal2 v7.4s, v30.8h, v1.8h\n"
+ "smlal v14.4s, v27.4h, v2.4h\n"
+ "smlal v9.4s, v26.4h, v2.4h\n"
+ "ldr d1, [x8, #0x80]\n"
+ "usubl v1.8h, v1.8b, v15.8b\n"
+ "smlal v7.4s, v25.4h, v2.4h\n"
+ "smlal2 v10.4s, v27.8h, v2.8h\n"
+ "smlal2 v8.4s, v26.8h, v2.8h\n"
"smlal v11.4s, v27.4h, v3.4h\n"
- "smlal2 v5.4s, v26.8h, v1.8h\n"
- "ldr d1, [x0, #0x80]\n"
- "smlal v20.4s, v27.4h, v2.4h\n"
- "usubl v1.8h, v1.8b, v9.8b\n"
- "smlal v8.4s, v26.4h, v2.4h\n"
- "smlal v6.4s, v25.4h, v2.4h\n"
- "smlal2 v13.4s, v27.8h, v3.8h\n"
- "smlal2 v19.4s, v27.8h, v2.8h\n"
- "ldr d27, [x11, x3]\n"
+ "smlal2 v21.4s, v27.8h, v3.8h\n"
+ "smlal2 v6.4s, v25.8h, v2.8h\n"
+ "ldr d27, [x10, x1]\n"
"ushll v27.8h, v27.8b, #0x0\n"
- "smlal2 v7.4s, v26.8h, v2.8h\n"
+ "smlal v14.4s, v23.4h, v3.4h\n"
+ "smlal v9.4s, v25.4h, v3.4h\n"
+ "ldr d2, [x8, #0x88]\n"
+ "usubl v2.8h, v2.8b, v15.8b\n"
+ "smlal v7.4s, v24.4h, v3.4h\n"
+ "smlal2 v10.4s, v23.8h, v3.8h\n"
+ "smlal2 v8.4s, v25.8h, v3.8h\n"
"smlal v11.4s, v23.4h, v4.4h\n"
- "smlal2 v5.4s, v25.8h, v2.8h\n"
- "ldr d2, [x0, #0x88]\n"
- "smlal v20.4s, v23.4h, v3.4h\n"
- "usubl v2.8h, v2.8b, v9.8b\n"
- "smlal v8.4s, v25.4h, v3.4h\n"
- "smlal v6.4s, v24.4h, v3.4h\n"
- "smlal2 v13.4s, v23.8h, v4.8h\n"
- "smlal2 v19.4s, v23.8h, v3.8h\n"
- "ldr d23, [x10, x3]\n"
+ "smlal2 v21.4s, v23.8h, v4.8h\n"
+ "ldr d23, [x9, x1]\n"
+ "smlal2 v6.4s, v24.8h, v3.8h\n"
"ushll v23.8h, v23.8b, #0x0\n"
- "smlal2 v7.4s, v25.8h, v3.8h\n"
+ "smlal v14.4s, v28.4h, v4.4h\n"
+ "smlal v9.4s, v24.4h, v4.4h\n"
+ "ldr d3, [x8, #0x90]\n"
+ "usubl v3.8h, v3.8b, v15.8b\n"
+ "smlal v7.4s, v22.4h, v4.4h\n"
+ "smlal2 v10.4s, v28.8h, v4.8h\n"
+ "ldr d28, [x26, x1]\n"
+ "ushll v28.8h, v28.8b, #0x0\n"
+ "smlal2 v8.4s, v24.8h, v4.8h\n"
"smlal v11.4s, v31.4h, v0.4h\n"
- "smlal2 v5.4s, v24.8h, v3.8h\n"
- "ldr d3, [x0, #0x90]\n"
- "smlal v20.4s, v28.4h, v4.4h\n"
- "usubl v3.8h, v3.8b, v9.8b\n"
- "smlal v8.4s, v24.4h, v4.4h\n"
- "smlal v6.4s, v22.4h, v4.4h\n"
- "smlal2 v13.4s, v31.8h, v0.8h\n"
- "ldr d31, [x22, x3]\n"
- "smlal2 v19.4s, v28.8h, v4.8h\n"
- "ldr d28, [x27, x3]\n"
- "smlal2 v7.4s, v24.8h, v4.8h\n"
- "smlal v11.4s, v30.4h, v1.4h\n"
+ "smlal2 v21.4s, v31.8h, v0.8h\n"
+ "ldr d31, [x28, x1]\n"
+ "smlal2 v6.4s, v22.8h, v4.8h\n"
"ushll v31.8h, v31.8b, #0x0\n"
- "smlal2 v5.4s, v22.8h, v4.8h\n"
- "ldr d4, [x0, #0x98]\n"
- "smlal v20.4s, v30.4h, v0.4h\n"
- "usubl v4.8h, v4.8b, v9.8b\n"
- "smlal v8.4s, v27.4h, v0.4h\n"
- "smlal v6.4s, v23.4h, v0.4h\n"
- "ushll v28.8h, v28.8b, #0x0\n"
- "smlal2 v13.4s, v30.8h, v1.8h\n"
- "smlal2 v19.4s, v30.8h, v0.8h\n"
- "ldr d30, [x28, x3]\n"
+ "smlal v14.4s, v30.4h, v0.4h\n"
+ "smlal v9.4s, v27.4h, v0.4h\n"
+ "ldr d4, [x8, #0x98]\n"
+ "usubl v4.8h, v4.8b, v15.8b\n"
+ "smlal v7.4s, v23.4h, v0.4h\n"
+ "smlal2 v10.4s, v30.8h, v0.8h\n"
+ "smlal2 v8.4s, v27.8h, v0.8h\n"
+ "smlal v11.4s, v30.4h, v1.4h\n"
+ "smlal2 v21.4s, v30.8h, v1.8h\n"
+ "ldr d30, [x27, x1]\n"
+ "smlal2 v6.4s, v23.8h, v0.8h\n"
"ushll v30.8h, v30.8b, #0x0\n"
- "smlal2 v7.4s, v27.8h, v0.8h\n"
+ "smlal v14.4s, v26.4h, v1.4h\n"
+ "smlal v9.4s, v23.4h, v1.4h\n"
+ "ldr d0, [x8, #0xa0]\n"
+ "usubl v0.8h, v0.8b, v15.8b\n"
+ "smlal v7.4s, v31.4h, v1.4h\n"
+ "smlal2 v10.4s, v26.8h, v1.8h\n"
+ "smlal2 v8.4s, v23.8h, v1.8h\n"
"smlal v11.4s, v26.4h, v2.4h\n"
- "smlal2 v5.4s, v23.8h, v0.8h\n"
- "ldr d0, [x0, #0xa0]\n"
- "smlal v20.4s, v26.4h, v1.4h\n"
- "usubl v0.8h, v0.8b, v9.8b\n"
- "smlal v8.4s, v23.4h, v1.4h\n"
- "smlal v6.4s, v31.4h, v1.4h\n"
- "smlal2 v13.4s, v26.8h, v2.8h\n"
- "smlal2 v19.4s, v26.8h, v1.8h\n"
- "ldr d26, [x26, x3]\n"
+ "smlal2 v21.4s, v26.8h, v2.8h\n"
+ "smlal2 v6.4s, v31.8h, v1.8h\n"
+ "ldr d26, [x25, x1]\n"
"ushll v26.8h, v26.8b, #0x0\n"
- "smlal2 v7.4s, v23.8h, v1.8h\n"
+ "smlal v14.4s, v25.4h, v2.4h\n"
+ "smlal v9.4s, v31.4h, v2.4h\n"
+ "ldr d1, [x8, #0xa8]\n"
+ "usubl v1.8h, v1.8b, v15.8b\n"
+ "smlal v7.4s, v30.4h, v2.4h\n"
+ "smlal2 v10.4s, v25.8h, v2.8h\n"
+ "smlal2 v8.4s, v31.8h, v2.8h\n"
"smlal v11.4s, v25.4h, v3.4h\n"
- "smlal2 v5.4s, v31.8h, v1.8h\n"
- "ldr d1, [x0, #0xa8]\n"
- "smlal v20.4s, v25.4h, v2.4h\n"
- "usubl v1.8h, v1.8b, v9.8b\n"
- "smlal v8.4s, v31.4h, v2.4h\n"
- "smlal v6.4s, v30.4h, v2.4h\n"
- "smlal2 v13.4s, v25.8h, v3.8h\n"
- "smlal2 v19.4s, v25.8h, v2.8h\n"
- "ldr d25, [x25, x3]\n"
+ "smlal2 v21.4s, v25.8h, v3.8h\n"
+ "smlal2 v6.4s, v30.8h, v2.8h\n"
+ "ldr d25, [x24, x1]\n"
"ushll v25.8h, v25.8b, #0x0\n"
- "smlal2 v7.4s, v31.8h, v2.8h\n"
+ "smlal v14.4s, v24.4h, v3.4h\n"
+ "smlal v9.4s, v30.4h, v3.4h\n"
+ "ldr d2, [x8, #0xb0]\n"
+ "usubl v2.8h, v2.8b, v15.8b\n"
+ "smlal v7.4s, v28.4h, v3.4h\n"
+ "smlal2 v10.4s, v24.8h, v3.8h\n"
+ "smlal2 v8.4s, v30.8h, v3.8h\n"
"smlal v11.4s, v24.4h, v4.4h\n"
- "smlal2 v5.4s, v30.8h, v2.8h\n"
- "ldr d2, [x0, #0xb0]\n"
- "smlal v20.4s, v24.4h, v3.4h\n"
- "usubl v2.8h, v2.8b, v9.8b\n"
- "smlal v8.4s, v30.4h, v3.4h\n"
- "smlal v6.4s, v28.4h, v3.4h\n"
- "smlal2 v13.4s, v24.8h, v4.8h\n"
- "smlal2 v19.4s, v24.8h, v3.8h\n"
- "ldr d24, [x24, x3]\n"
+ "smlal2 v21.4s, v24.8h, v4.8h\n"
+ "ldr d24, [x23, x1]\n"
+ "smlal2 v6.4s, v28.8h, v3.8h\n"
"ushll v24.8h, v24.8b, #0x0\n"
- "smlal2 v7.4s, v30.8h, v3.8h\n"
+ "smlal v14.4s, v22.4h, v4.4h\n"
+ "smlal v9.4s, v28.4h, v4.4h\n"
+ "ldr d3, [x8, #0xb8]\n"
+ "usubl v3.8h, v3.8b, v15.8b\n"
+ "smlal v7.4s, v26.4h, v4.4h\n"
+ "smlal2 v8.4s, v28.8h, v4.8h\n"
"smlal v11.4s, v27.4h, v0.4h\n"
- "smlal2 v5.4s, v28.8h, v3.8h\n"
- "ldr d3, [x0, #0xb8]\n"
- "smlal v20.4s, v22.4h, v4.4h\n"
- "usubl v3.8h, v3.8b, v9.8b\n"
- "smlal v8.4s, v28.4h, v4.4h\n"
- "smlal v6.4s, v26.4h, v4.4h\n"
- "smlal2 v13.4s, v27.8h, v0.8h\n"
- "ldr d27, [x23, x3]\n"
- "smlal2 v7.4s, v28.8h, v4.8h\n"
+ "smlal2 v21.4s, v27.8h, v0.8h\n"
+ "ldr d27, [x22, x1]\n"
"ushll v27.8h, v27.8b, #0x0\n"
- "smlal v11.4s, v23.4h, v1.4h\n"
- "smlal2 v19.4s, v22.8h, v4.8h\n"
- "smlal2 v5.4s, v26.8h, v4.8h\n"
- "ldr d4, [x0, #0xc0]\n"
- "smlal v20.4s, v23.4h, v0.4h\n"
- "usubl v4.8h, v4.8b, v9.8b\n"
- "smlal v8.4s, v25.4h, v0.4h\n"
- "smlal v6.4s, v24.4h, v0.4h\n"
- "add x0, x0, #0xc8\n"
- "smlal2 v13.4s, v23.8h, v1.8h\n"
- "smlal2 v7.4s, v25.8h, v0.8h\n"
- "ldr d25, [x15, x3]\n"
+ "smlal2 v10.4s, v22.8h, v4.8h\n"
+ "smlal2 v6.4s, v26.8h, v4.8h\n"
+ "ldr d4, [x8, #0xc0]\n"
+ "usubl v4.8h, v4.8b, v15.8b\n"
+ "smlal v14.4s, v23.4h, v0.4h\n"
+ "smlal v9.4s, v25.4h, v0.4h\n"
+ "add x8, x8, #0xc8\n"
+ "smlal v7.4s, v24.4h, v0.4h\n"
+ "smlal2 v8.4s, v25.8h, v0.8h\n"
+ "ldr d25, [x7, x1]\n"
"ushll v25.8h, v25.8b, #0x0\n"
- "smlal v11.4s, v31.4h, v2.4h\n"
- "smlal2 v19.4s, v23.8h, v0.8h\n"
- "smlal2 v5.4s, v24.8h, v0.8h\n"
- "smlal v20.4s, v31.4h, v1.4h\n"
- "smlal v8.4s, v24.4h, v1.4h\n"
- "smlal v6.4s, v27.4h, v1.4h\n"
- "smlal2 v13.4s, v31.8h, v2.8h\n"
- "smlal2 v7.4s, v24.8h, v1.8h\n"
- "ldr d24, [x21, x3]\n"
+ "smlal2 v10.4s, v23.8h, v0.8h\n"
+ "smlal2 v6.4s, v24.8h, v0.8h\n"
+ "smlal v11.4s, v23.4h, v1.4h\n"
+ "smlal v14.4s, v31.4h, v1.4h\n"
+ "smlal v9.4s, v24.4h, v1.4h\n"
+ "smlal v7.4s, v27.4h, v1.4h\n"
+ "smlal2 v8.4s, v24.8h, v1.8h\n"
+ "ldr d24, [x20, x1]\n"
+ "smlal2 v21.4s, v23.8h, v1.8h\n"
"ushll v24.8h, v24.8b, #0x0\n"
- "smlal v11.4s, v30.4h, v3.4h\n"
- "smlal2 v19.4s, v31.8h, v1.8h\n"
- "smlal2 v5.4s, v27.8h, v1.8h\n"
- "smlal v20.4s, v30.4h, v2.4h\n"
- "smlal v8.4s, v27.4h, v2.4h\n"
- "smlal v6.4s, v25.4h, v2.4h\n"
- "smlal2 v13.4s, v30.8h, v3.8h\n"
- "smlal2 v7.4s, v27.8h, v2.8h\n"
- "ldr d27, [x20, x3]\n"
+ "smlal2 v10.4s, v31.8h, v1.8h\n"
+ "smlal2 v6.4s, v27.8h, v1.8h\n"
+ "smlal v11.4s, v31.4h, v2.4h\n"
+ "smlal v14.4s, v30.4h, v2.4h\n"
+ "smlal v9.4s, v27.4h, v2.4h\n"
+ "smlal v7.4s, v25.4h, v2.4h\n"
+ "smlal2 v8.4s, v27.8h, v2.8h\n"
+ "ldr d27, [x19, x1]\n"
+ "smlal2 v21.4s, v31.8h, v2.8h\n"
"ushll v27.8h, v27.8b, #0x0\n"
+ "smlal2 v10.4s, v30.8h, v2.8h\n"
+ "smlal2 v6.4s, v25.8h, v2.8h\n"
+ "add x1, x1, #0x8\n"
+ "smlal v11.4s, v30.4h, v3.4h\n"
+ "smlal v14.4s, v28.4h, v3.4h\n"
+ "smlal v9.4s, v25.4h, v3.4h\n"
+ "smlal v7.4s, v24.4h, v3.4h\n"
+ "smlal2 v21.4s, v30.8h, v3.8h\n"
+ "smlal2 v10.4s, v28.8h, v3.8h\n"
+ "smlal2 v8.4s, v25.8h, v3.8h\n"
+ "smlal2 v6.4s, v24.8h, v3.8h\n"
"smlal v11.4s, v28.4h, v4.4h\n"
- "smlal2 v19.4s, v30.8h, v2.8h\n"
- "sqrdmulh v11.4s, v11.4s, v18.4s\n"
- "add x3, x3, #0x8\n"
- "smlal2 v5.4s, v25.8h, v2.8h\n"
- "smlal v20.4s, v28.4h, v3.4h\n"
- "and v31.16b, v11.16b, v21.16b\n"
- "smlal v8.4s, v25.4h, v3.4h\n"
- "smlal v6.4s, v24.4h, v3.4h\n"
- "sshr v31.4s, v31.4s, #0x1f\n"
- "smlal2 v13.4s, v28.8h, v4.8h\n"
- "smlal2 v19.4s, v28.8h, v3.8h\n"
- "sqrdmulh v13.4s, v13.4s, v16.4s\n"
- "smlal2 v7.4s, v25.8h, v3.8h\n"
- "smlal2 v5.4s, v24.8h, v3.8h\n"
- "and v17.16b, v13.16b, v10.16b\n"
- "smlal v20.4s, v26.4h, v4.4h\n"
- "smlal v8.4s, v24.4h, v4.4h\n"
- "sqrdmulh v20.4s, v20.4s, v18.4s\n"
- "smlal v6.4s, v27.4h, v4.4h\n"
- "smlal2 v19.4s, v26.8h, v4.8h\n"
+ "smlal v14.4s, v26.4h, v4.4h\n"
+ "sqrdmulh v11.4s, v11.4s, v17.4s\n"
+ "smlal v9.4s, v24.4h, v4.4h\n"
+ "smlal v7.4s, v27.4h, v4.4h\n"
+ "sqrdmulh v14.4s, v14.4s, v17.4s\n"
+ "smlal2 v21.4s, v28.8h, v4.8h\n"
+ "smlal2 v10.4s, v26.8h, v4.8h\n"
+ "sqrdmulh v9.4s, v9.4s, v17.4s\n"
+ "smlal2 v8.4s, v24.8h, v4.8h\n"
+ "smlal2 v6.4s, v27.8h, v4.8h\n"
+ "sqrdmulh v7.4s, v7.4s, v17.4s\n"
+ "and v23.16b, v11.16b, v5.16b\n"
+ "sqrdmulh v21.4s, v21.4s, v18.4s\n"
+ "and v22.16b, v14.16b, v5.16b\n"
+ "sqrdmulh v10.4s, v10.4s, v18.4s\n"
+ "and v17.16b, v9.16b, v5.16b\n"
"sqrdmulh v8.4s, v8.4s, v18.4s\n"
- "smlal2 v7.4s, v24.8h, v4.8h\n"
- "smlal2 v5.4s, v27.8h, v4.8h\n"
+ "and v20.16b, v7.16b, v5.16b\n"
"sqrdmulh v6.4s, v6.4s, v18.4s\n"
- "sqadd v11.4s, v11.4s, v31.4s\n"
+ "sshr v23.4s, v23.4s, #0x1f\n"
+ "and v19.16b, v21.16b, v29.16b\n"
+ "sshr v22.4s, v22.4s, #0x1f\n"
+ "and v18.16b, v10.16b, v29.16b\n"
"sshr v17.4s, v17.4s, #0x1f\n"
- "and v26.16b, v20.16b, v21.16b\n"
- "sqrdmulh v19.4s, v19.4s, v16.4s\n"
- "and v18.16b, v8.16b, v21.16b\n"
- "sqrdmulh v7.4s, v7.4s, v16.4s\n"
- "and v31.16b, v6.16b, v21.16b\n"
- "sqrdmulh v5.4s, v5.4s, v16.4s\n"
- "sqadd v13.4s, v13.4s, v17.4s\n"
- "sshr v26.4s, v26.4s, #0x1f\n"
- "and v27.16b, v19.16b, v10.16b\n"
+ "and v26.16b, v8.16b, v29.16b\n"
+ "sshr v20.4s, v20.4s, #0x1f\n"
+ "and v4.16b, v6.16b, v29.16b\n"
+ "sqadd v11.4s, v11.4s, v23.4s\n"
+ "sshr v19.4s, v19.4s, #0x1f\n"
+ "sqadd v14.4s, v14.4s, v22.4s\n"
"sshr v18.4s, v18.4s, #0x1f\n"
- "and v25.16b, v7.16b, v10.16b\n"
- "sshr v31.4s, v31.4s, #0x1f\n"
- "and v17.16b, v5.16b, v10.16b\n"
- "sqadd v20.4s, v20.4s, v26.4s\n"
- "sshr v27.4s, v27.4s, #0x1f\n"
- "sqadd v8.4s, v8.4s, v18.4s\n"
- "sshr v25.4s, v25.4s, #0x1f\n"
- "sqadd v6.4s, v6.4s, v31.4s\n"
- "sshr v17.4s, v17.4s, #0x1f\n"
- "srshl v11.4s, v11.4s, v21.4s\n"
- "srshl v20.4s, v20.4s, v21.4s\n"
- "sqadd v19.4s, v19.4s, v27.4s\n"
- "srshl v8.4s, v8.4s, v21.4s\n"
- "sqadd v7.4s, v7.4s, v25.4s\n"
- "srshl v6.4s, v6.4s, v21.4s\n"
- "sqadd v5.4s, v5.4s, v17.4s\n"
- "srshl v13.4s, v13.4s, v10.4s\n"
+ "sqadd v9.4s, v9.4s, v17.4s\n"
+ "sshr v26.4s, v26.4s, #0x1f\n"
+ "sqadd v7.4s, v7.4s, v20.4s\n"
+ "sshr v4.4s, v4.4s, #0x1f\n"
+ "srshl v11.4s, v11.4s, v5.4s\n"
+ "sqadd v21.4s, v21.4s, v19.4s\n"
+ "srshl v14.4s, v14.4s, v5.4s\n"
+ "sqadd v10.4s, v10.4s, v18.4s\n"
+ "srshl v9.4s, v9.4s, v5.4s\n"
+ "sqadd v8.4s, v8.4s, v26.4s\n"
+ "srshl v7.4s, v7.4s, v5.4s\n"
+ "sqadd v6.4s, v6.4s, v4.4s\n"
+ "srshl v21.4s, v21.4s, v29.4s\n"
"sqxtn v11.4h, v11.4s\n"
- "srshl v19.4s, v19.4s, v10.4s\n"
- "sqxtn v20.4h, v20.4s\n"
- "srshl v7.4s, v7.4s, v10.4s\n"
- "sqxtn v8.4h, v8.4s\n"
- "srshl v5.4s, v5.4s, v10.4s\n"
- "sqxtn v6.4h, v6.4s\n"
- "sqxtn2 v11.8h, v13.4s\n"
- "sqxtn2 v20.8h, v19.4s\n"
- "sqxtn2 v8.8h, v7.4s\n"
- "sqxtn2 v6.8h, v5.4s\n"
- "sqadd v11.8h, v11.8h, v15.8h\n"
- "sqadd v20.8h, v20.8h, v15.8h\n"
- "sqadd v8.8h, v8.8h, v15.8h\n"
- "sqadd v6.8h, v6.8h, v15.8h\n"
- "smax v11.8h, v11.8h, v14.8h\n"
- "smax v20.8h, v20.8h, v14.8h\n"
- "smax v8.8h, v8.8h, v14.8h\n"
- "smax v6.8h, v6.8h, v14.8h\n"
- "smin v11.8h, v11.8h, v12.8h\n"
- "smin v20.8h, v20.8h, v12.8h\n"
- "smin v8.8h, v8.8h, v12.8h\n"
- "smin v6.8h, v6.8h, v12.8h\n"
+ "srshl v10.4s, v10.4s, v29.4s\n"
+ "sqxtn v14.4h, v14.4s\n"
+ "srshl v8.4s, v8.4s, v29.4s\n"
+ "sqxtn v9.4h, v9.4s\n"
+ "srshl v6.4s, v6.4s, v29.4s\n"
+ "sqxtn v7.4h, v7.4s\n"
+ "sqxtn2 v11.8h, v21.4s\n"
+ "sqxtn2 v14.8h, v10.4s\n"
+ "sqxtn2 v9.8h, v8.4s\n"
+ "sqxtn2 v7.8h, v6.4s\n"
+ "sqadd v11.8h, v11.8h, v16.8h\n"
+ "sqadd v14.8h, v14.8h, v16.8h\n"
+ "sqadd v9.8h, v9.8h, v16.8h\n"
+ "sqadd v7.8h, v7.8h, v16.8h\n"
+ "smax v11.8h, v11.8h, v12.8h\n"
+ "smax v14.8h, v14.8h, v12.8h\n"
+ "smax v9.8h, v9.8h, v12.8h\n"
+ "smax v7.8h, v7.8h, v12.8h\n"
+ "smin v11.8h, v11.8h, v13.8h\n"
+ "smin v14.8h, v14.8h, v13.8h\n"
+ "smin v9.8h, v9.8h, v13.8h\n"
+ "smin v7.8h, v7.8h, v13.8h\n"
"uzp1 v11.16b, v11.16b, v11.16b\n"
- "uzp1 v20.16b, v20.16b, v20.16b\n"
- "str d11, [x7, x1]\n"
- "uzp1 v8.16b, v8.16b, v8.16b\n"
- "uzp1 v6.16b, v6.16b, v6.16b\n"
- "str d20, [x8, x1]\n"
- "str d8, [x17, x1]\n"
- "str d6, [x16, x1]\n"
- "ldr q11, [x13, #0x0]\n"
- "ldr q13, [x13, #0x10]\n"
- "add x13, x13, #0x20\n"
- "ldr d0, [x0, #0x0]\n"
- "ldr d1, [x0, #0x8]\n"
- "add x1, x1, #0x8\n"
- "str x13, [%x[params], %[offsetof_Params_bias]]\n"
- "ldr d2, [x0, #0x10]\n"
- "ldr d3, [x0, #0x18]\n"
- "mov v20.16b, v11.16b\n"
- "mov v19.16b, v13.16b\n"
- "ldr d4, [x0, #0x20]\n"
- "ldp x10, x28, [x2, #0x0]\n"
- "mov v8.16b, v11.16b\n"
- "mov v7.16b, v13.16b\n"
- "ldp x27, x26, [x2, #0x10]\n"
- "ldp x25, x24, [x2, #0x20]\n"
- "mov v6.16b, v11.16b\n"
- "mov v5.16b, v13.16b\n"
- "ldp x23, x22, [x2, #0x30]\n"
- "ldp x21, x20, [x2, #0x40]\n"
- "usubl v0.8h, v0.8b, v9.8b\n"
- "usubl v1.8h, v1.8b, v9.8b\n"
- "ldr d31, [x10, x3]\n"
- "ldr d30, [x28, x3]\n"
- "usubl v2.8h, v2.8b, v9.8b\n"
- "usubl v3.8h, v3.8b, v9.8b\n"
- "ldr d29, [x27, x3]\n"
- "ldr d28, [x26, x3]\n"
- "usubl v4.8h, v4.8b, v9.8b\n"
+ "uzp1 v14.16b, v14.16b, v14.16b\n"
+ "str d11, [x21, x2]\n"
+ "uzp1 v9.16b, v9.16b, v9.16b\n"
+ "uzp1 v7.16b, v7.16b, v7.16b\n"
+ "str d14, [x15, x2]\n"
+ "str d9, [x17, x2]\n"
+ "str d7, [x16, x2]\n"
+ "ldr x19, [%x[params], %[offsetof_Params_bias]]\n"
+ "ldr q11, [x19, #0x0]\n"
+ "add x2, x2, #0x8\n"
+ "ldr q21, [x19, #0x10]\n"
+ "add x19, x19, #0x20\n"
+ "str x19, [%x[params], %[offsetof_Params_bias]]\n"
+ "ldr d0, [x8, #0x0]\n"
+ "ldr d1, [x8, #0x8]\n"
+ "ldr d2, [x8, #0x10]\n"
+ "mov v14.16b, v11.16b\n"
+ "mov v10.16b, v21.16b\n"
+ "ldr d3, [x8, #0x18]\n"
+ "ldr d4, [x8, #0x20]\n"
+ "mov v9.16b, v11.16b\n"
+ "mov v8.16b, v21.16b\n"
+ "ldp x28, x27, [x0, #0x0]\n"
+ "ldp x10, x26, [x0, #0x10]\n"
+ "mov v7.16b, v11.16b\n"
+ "mov v6.16b, v21.16b\n"
+ "ldp x24, x23, [x0, #0x20]\n"
+ "ldp x22, x25, [x0, #0x30]\n"
+ "usubl v0.8h, v0.8b, v15.8b\n"
+ "usubl v1.8h, v1.8b, v15.8b\n"
+ "ldp x20, x19, [x0, #0x40]\n"
+ "ldr d31, [x28, x1]\n"
+ "usubl v2.8h, v2.8b, v15.8b\n"
+ "usubl v3.8h, v3.8b, v15.8b\n"
+ "ldr d30, [x27, x1]\n"
+ "ldr d29, [x10, x1]\n"
+ "usubl v4.8h, v4.8b, v15.8b\n"
"ushll v31.8h, v31.8b, #0x0\n"
- "ldr d27, [x25, x3]\n"
- "ldr d23, [x24, x3]\n"
+ "ldr d28, [x26, x1]\n"
+ "ldr d27, [x24, x1]\n"
"ushll v30.8h, v30.8b, #0x0\n"
"ushll v29.8h, v29.8b, #0x0\n"
- "ldr d25, [x23, x3]\n"
- "ldr d24, [x22, x3]\n"
+ "ldr d23, [x23, x1]\n"
+ "ldr d25, [x22, x1]\n"
"ushll v28.8h, v28.8b, #0x0\n"
"ushll v27.8h, v27.8b, #0x0\n"
- "ldr d26, [x21, x3]\n"
- "ldr d22, [x20, x3]\n"
+ "ldr d24, [x25, x1]\n"
+ "ldr d26, [x20, x1]\n"
"ushll v23.8h, v23.8b, #0x0\n"
"ushll v25.8h, v25.8b, #0x0\n"
+ "ldr d22, [x19, x1]\n"
"ushll v24.8h, v24.8b, #0x0\n"
"ushll v26.8h, v26.8b, #0x0\n"
"ushll v22.8h, v22.8b, #0x0\n"
"bgt 1b\n"
"2:" // Tail
- "ldr q18, [x6, #0x0]\n"
- "ldr q21, [x5, #0x0]\n"
"smlal v11.4s, v31.4h, v0.4h\n"
- "smlal2 v13.4s, v31.8h, v0.8h\n"
- "ldr q16, [x6, #0x10]\n"
- "ldr q10, [x5, #0x10]\n"
- "smlal v11.4s, v30.4h, v1.4h\n"
- "smlal v20.4s, v30.4h, v0.4h\n"
- "ldr x20, [x2, #0x50]\n"
- "smlal v8.4s, v29.4h, v0.4h\n"
- "smlal v6.4s, v28.4h, v0.4h\n"
- "ldr x22, [x2, #0x58]\n"
- "smlal2 v13.4s, v30.8h, v1.8h\n"
- "smlal2 v19.4s, v30.8h, v0.8h\n"
- "ldr d31, [x20, x3]\n"
+ "smlal2 v21.4s, v31.8h, v0.8h\n"
+ "ldr x19, [x0, #0x50]\n"
+ "ldr d31, [x19, x1]\n"
+ "smlal v14.4s, v30.4h, v0.4h\n"
+ "smlal v9.4s, v29.4h, v0.4h\n"
+ "ldr x20, [x0, #0x58]\n"
"ushll v31.8h, v31.8b, #0x0\n"
- "smlal2 v7.4s, v29.8h, v0.8h\n"
- "smlal v11.4s, v27.4h, v2.4h\n"
- "ldr x21, [x2, #0x60]\n"
- "ldr x20, [x2, #0x68]\n"
- "smlal2 v5.4s, v28.8h, v0.8h\n"
- "ldr d30, [x22, x3]\n"
- "smlal v20.4s, v27.4h, v1.4h\n"
+ "smlal v7.4s, v28.4h, v0.4h\n"
+ "smlal2 v10.4s, v30.8h, v0.8h\n"
+ "ldr x19, [x0, #0x60]\n"
+ "ldr x24, [x0, #0x68]\n"
+ "smlal2 v8.4s, v29.8h, v0.8h\n"
+ "smlal v11.4s, v30.4h, v1.4h\n"
+ "ldr x23, [x0, #0x70]\n"
+ "ldr x26, [x0, #0x78]\n"
+ "smlal2 v21.4s, v30.8h, v1.8h\n"
+ "smlal2 v6.4s, v28.8h, v0.8h\n"
+ "ldr d30, [x20, x1]\n"
"ushll v30.8h, v30.8b, #0x0\n"
- "smlal v8.4s, v28.4h, v1.4h\n"
- "smlal v6.4s, v23.4h, v1.4h\n"
- "ldr x25, [x2, #0x70]\n"
- "ldr x26, [x2, #0x78]\n"
- "smlal2 v13.4s, v27.8h, v2.8h\n"
- "smlal2 v19.4s, v27.8h, v1.8h\n"
- "ldr d0, [x0, #0x28]\n"
- "usubl v0.8h, v0.8b, v9.8b\n"
- "smlal2 v7.4s, v28.8h, v1.8h\n"
- "smlal v11.4s, v25.4h, v3.4h\n"
- "ldr x23, [x2, #0x80]\n"
- "ldr x24, [x2, #0x88]\n"
- "smlal2 v5.4s, v23.8h, v1.8h\n"
- "ldr d27, [x21, x3]\n"
- "smlal v20.4s, v25.4h, v2.4h\n"
+ "smlal v14.4s, v27.4h, v1.4h\n"
+ "smlal v9.4s, v28.4h, v1.4h\n"
+ "ldr d0, [x8, #0x28]\n"
+ "usubl v0.8h, v0.8b, v15.8b\n"
+ "smlal v7.4s, v23.4h, v1.4h\n"
+ "smlal2 v10.4s, v27.8h, v1.8h\n"
+ "ldr x7, [x0, #0x80]\n"
+ "ldr x22, [x0, #0x88]\n"
+ "smlal2 v8.4s, v28.8h, v1.8h\n"
+ "smlal v11.4s, v27.4h, v2.4h\n"
+ "ldr x20, [x0, #0x90]\n"
+ "ldr x14, [x0, #0x98]\n"
+ "smlal2 v21.4s, v27.8h, v2.8h\n"
+ "smlal2 v6.4s, v23.8h, v1.8h\n"
+ "ldr d27, [x19, x1]\n"
"ushll v27.8h, v27.8b, #0x0\n"
- "smlal v8.4s, v23.4h, v2.4h\n"
- "smlal v6.4s, v31.4h, v2.4h\n"
- "ldr x15, [x2, #0x90]\n"
- "ldr x21, [x2, #0x98]\n"
- "smlal2 v13.4s, v25.8h, v3.8h\n"
- "smlal2 v19.4s, v25.8h, v2.8h\n"
- "ldr d1, [x0, #0x30]\n"
- "usubl v1.8h, v1.8b, v9.8b\n"
- "smlal2 v7.4s, v23.8h, v2.8h\n"
- "smlal v11.4s, v24.4h, v4.4h\n"
- "ldr x14, [x2, #0xa0]\n"
- "ldr x13, [x2, #0xa8]\n"
- "smlal2 v5.4s, v31.8h, v2.8h\n"
- "ldr d25, [x20, x3]\n"
- "smlal v20.4s, v24.4h, v3.4h\n"
+ "smlal v14.4s, v25.4h, v2.4h\n"
+ "smlal v9.4s, v23.4h, v2.4h\n"
+ "ldr d1, [x8, #0x30]\n"
+ "usubl v1.8h, v1.8b, v15.8b\n"
+ "smlal v7.4s, v31.4h, v2.4h\n"
+ "smlal2 v10.4s, v25.8h, v2.8h\n"
+ "ldr x19, [x0, #0xa0]\n"
+ "ldr x13, [x0, #0xa8]\n"
+ "smlal2 v8.4s, v23.8h, v2.8h\n"
+ "smlal v11.4s, v25.4h, v3.4h\n"
+ "ldr x12, [x0, #0xb0]\n"
+ "ldr x11, [x0, #0xb8]\n"
+ "smlal2 v21.4s, v25.8h, v3.8h\n"
+ "smlal2 v6.4s, v31.8h, v2.8h\n"
+ "ldr d25, [x24, x1]\n"
"ushll v25.8h, v25.8b, #0x0\n"
- "smlal v8.4s, v31.4h, v3.4h\n"
- "smlal v6.4s, v30.4h, v3.4h\n"
- "ldr x12, [x2, #0xb0]\n"
- "ldr x20, [x2, #0xb8]\n"
- "smlal2 v13.4s, v24.8h, v4.8h\n"
- "smlal2 v19.4s, v24.8h, v3.8h\n"
- "ldr d2, [x0, #0x38]\n"
- "usubl v2.8h, v2.8b, v9.8b\n"
- "smlal2 v7.4s, v31.8h, v3.8h\n"
- "smlal v11.4s, v29.4h, v0.4h\n"
- "ldr x11, [x2, #0xc0]\n"
- "ldr x10, [x2, #0xc8]\n"
- "smlal2 v5.4s, v30.8h, v3.8h\n"
- "ldr d24, [x25, x3]\n"
- "smlal v20.4s, v27.4h, v4.4h\n"
+ "smlal v14.4s, v24.4h, v3.4h\n"
+ "smlal v9.4s, v31.4h, v3.4h\n"
+ "ldr d2, [x8, #0x38]\n"
+ "usubl v2.8h, v2.8b, v15.8b\n"
+ "smlal v7.4s, v30.4h, v3.4h\n"
+ "smlal2 v10.4s, v24.8h, v3.8h\n"
+ "ldr x10, [x0, #0xc0]\n"
+ "ldr x9, [x0, #0xc8]\n"
+ "smlal2 v8.4s, v31.8h, v3.8h\n"
+ "smlal v11.4s, v24.4h, v4.4h\n"
+ "ldr x28, [x0, #0xd0]\n"
+ "ldr x27, [x0, #0xd8]\n"
+ "smlal2 v21.4s, v24.8h, v4.8h\n"
+ "smlal2 v6.4s, v30.8h, v3.8h\n"
+ "ldr d24, [x23, x1]\n"
"ushll v24.8h, v24.8b, #0x0\n"
- "smlal v8.4s, v30.4h, v4.4h\n"
- "smlal v6.4s, v26.4h, v4.4h\n"
- "ldr x22, [x2, #0xd0]\n"
- "ldr x28, [x2, #0xd8]\n"
- "smlal2 v13.4s, v29.8h, v0.8h\n"
- "ldr d3, [x0, #0x40]\n"
- "smlal2 v19.4s, v27.8h, v4.8h\n"
- "ldr d27, [x26, x3]\n"
- "smlal2 v7.4s, v30.8h, v4.8h\n"
- "smlal v11.4s, v28.4h, v1.4h\n"
- "usubl v3.8h, v3.8b, v9.8b\n"
- "ldr x27, [x2, #0xe0]\n"
- "smlal2 v5.4s, v26.8h, v4.8h\n"
- "ldr d4, [x0, #0x48]\n"
- "smlal v20.4s, v28.4h, v0.4h\n"
+ "smlal v14.4s, v27.4h, v4.4h\n"
+ "smlal v9.4s, v30.4h, v4.4h\n"
+ "ldr d3, [x8, #0x40]\n"
+ "usubl v3.8h, v3.8b, v15.8b\n"
+ "smlal v7.4s, v26.4h, v4.4h\n"
+ "smlal2 v10.4s, v27.8h, v4.8h\n"
+ "ldr d27, [x26, x1]\n"
"ushll v27.8h, v27.8b, #0x0\n"
- "smlal v8.4s, v22.4h, v0.4h\n"
- "smlal v6.4s, v25.4h, v0.4h\n"
- "usubl v4.8h, v4.8b, v9.8b\n"
- "ldr x26, [x2, #0xe8]\n"
- "smlal2 v13.4s, v28.8h, v1.8h\n"
- "smlal2 v19.4s, v28.8h, v0.8h\n"
- "ldr d28, [x24, x3]\n"
+ "smlal2 v8.4s, v30.8h, v4.8h\n"
+ "smlal v11.4s, v29.4h, v0.4h\n"
+ "ldr x26, [x0, #0xe0]\n"
+ "ldr x25, [x0, #0xe8]\n"
+ "smlal2 v21.4s, v29.8h, v0.8h\n"
+ "smlal2 v6.4s, v26.8h, v4.8h\n"
+ "ldr d4, [x8, #0x48]\n"
+ "usubl v4.8h, v4.8b, v15.8b\n"
+ "smlal v14.4s, v28.4h, v0.4h\n"
+ "smlal v9.4s, v22.4h, v0.4h\n"
+ "ldr x24, [x0, #0xf0]\n"
+ "ldr x23, [x0, #0xf8]\n"
+ "smlal v7.4s, v25.4h, v0.4h\n"
+ "smlal2 v10.4s, v28.8h, v0.8h\n"
+ "ldr q17, [x5, #0x0]\n"
+ "ldr q5, [x6, #0x0]\n"
+ "smlal2 v8.4s, v22.8h, v0.8h\n"
+ "smlal v11.4s, v28.4h, v1.4h\n"
+ "ldr q18, [x5, #0x10]\n"
+ "ldr q29, [x6, #0x10]\n"
+ "smlal2 v21.4s, v28.8h, v1.8h\n"
+ "smlal2 v6.4s, v25.8h, v0.8h\n"
+ "ldr d28, [x22, x1]\n"
+ "ldr d0, [x8, #0x50]\n"
+ "smlal v14.4s, v23.4h, v1.4h\n"
+ "smlal v9.4s, v25.4h, v1.4h\n"
"ushll v28.8h, v28.8b, #0x0\n"
- "smlal2 v7.4s, v22.8h, v0.8h\n"
- "smlal v11.4s, v23.4h, v2.4h\n"
- "ldr x25, [x2, #0xf0]\n"
- "ldr x24, [x2, #0xf8]\n"
- "smlal2 v5.4s, v25.8h, v0.8h\n"
- "ldr d0, [x0, #0x50]\n"
- "smlal v20.4s, v23.4h, v1.4h\n"
- "usubl v0.8h, v0.8b, v9.8b\n"
- "smlal v8.4s, v25.4h, v1.4h\n"
- "smlal v6.4s, v24.4h, v1.4h\n"
+ "ldr x22, [x0, #0x100]\n"
+ "smlal v7.4s, v24.4h, v1.4h\n"
+ "smlal2 v10.4s, v23.8h, v1.8h\n"
+ "usubl v0.8h, v0.8b, v15.8b\n"
"tst x4, #0x7\n"
+ "smlal2 v8.4s, v25.8h, v1.8h\n"
+ "smlal v11.4s, v23.4h, v2.4h\n"
+ "add x5, x5, #0x20\n"
"add x6, x6, #0x20\n"
- "smlal2 v13.4s, v23.8h, v2.8h\n"
- "smlal2 v19.4s, v23.8h, v1.8h\n"
- "ldr d23, [x23, x3]\n"
+ "smlal2 v21.4s, v23.8h, v2.8h\n"
+ "ldr d23, [x7, x1]\n"
+ "smlal2 v6.4s, v24.8h, v1.8h\n"
"ushll v23.8h, v23.8b, #0x0\n"
- "smlal2 v7.4s, v25.8h, v1.8h\n"
+ "smlal v14.4s, v31.4h, v2.4h\n"
+ "smlal v9.4s, v24.4h, v2.4h\n"
+ "ldr d1, [x8, #0x58]\n"
+ "usubl v1.8h, v1.8b, v15.8b\n"
+ "smlal v7.4s, v27.4h, v2.4h\n"
+ "smlal2 v10.4s, v31.8h, v2.8h\n"
+ "ldr x7, [x0, #0x108]\n"
+ "smlal2 v8.4s, v24.8h, v2.8h\n"
"smlal v11.4s, v31.4h, v3.4h\n"
- "ldr x23, [x2, #0x100]\n"
- "add x5, x5, #0x20\n"
- "smlal2 v5.4s, v24.8h, v1.8h\n"
- "ldr d1, [x0, #0x58]\n"
- "smlal v20.4s, v31.4h, v2.4h\n"
- "usubl v1.8h, v1.8b, v9.8b\n"
- "smlal v8.4s, v24.4h, v2.4h\n"
- "smlal v6.4s, v27.4h, v2.4h\n"
- "smlal2 v13.4s, v31.8h, v3.8h\n"
- "smlal2 v19.4s, v31.8h, v2.8h\n"
- "ldr d31, [x15, x3]\n"
+ "smlal2 v21.4s, v31.8h, v3.8h\n"
+ "smlal2 v6.4s, v27.8h, v2.8h\n"
+ "ldr d31, [x20, x1]\n"
"ushll v31.8h, v31.8b, #0x0\n"
- "smlal2 v7.4s, v24.8h, v2.8h\n"
+ "smlal v14.4s, v30.4h, v3.4h\n"
+ "smlal v9.4s, v27.4h, v3.4h\n"
+ "ldr d2, [x8, #0x60]\n"
+ "usubl v2.8h, v2.8b, v15.8b\n"
+ "smlal v7.4s, v23.4h, v3.4h\n"
+ "smlal2 v10.4s, v30.8h, v3.8h\n"
+ "ldr x20, [x0, #0x110]\n"
+ "smlal2 v8.4s, v27.8h, v3.8h\n"
"smlal v11.4s, v30.4h, v4.4h\n"
- "ldr x15, [x2, #0x108]\n"
- "smlal2 v5.4s, v27.8h, v2.8h\n"
- "ldr d2, [x0, #0x60]\n"
- "smlal v20.4s, v30.4h, v3.4h\n"
- "usubl v2.8h, v2.8b, v9.8b\n"
- "smlal v8.4s, v27.4h, v3.4h\n"
- "smlal v6.4s, v23.4h, v3.4h\n"
- "smlal2 v13.4s, v30.8h, v4.8h\n"
- "smlal2 v19.4s, v30.8h, v3.8h\n"
- "ldr d30, [x21, x3]\n"
+ "smlal2 v21.4s, v30.8h, v4.8h\n"
+ "ldr d30, [x14, x1]\n"
+ "smlal2 v6.4s, v23.8h, v3.8h\n"
"ushll v30.8h, v30.8b, #0x0\n"
- "smlal2 v7.4s, v27.8h, v3.8h\n"
- "smlal v11.4s, v22.4h, v0.4h\n"
- "ldr x21, [x2, #0x110]\n"
- "smlal2 v5.4s, v23.8h, v3.8h\n"
- "ldr d3, [x0, #0x68]\n"
- "smlal v20.4s, v26.4h, v4.4h\n"
- "usubl v3.8h, v3.8b, v9.8b\n"
- "smlal v8.4s, v23.4h, v4.4h\n"
- "smlal v6.4s, v28.4h, v4.4h\n"
- "smlal2 v13.4s, v22.8h, v0.8h\n"
- "ldr d22, [x20, x3]\n"
- "smlal2 v19.4s, v26.8h, v4.8h\n"
- "ldr d26, [x14, x3]\n"
- "smlal2 v7.4s, v23.8h, v4.8h\n"
- "smlal v11.4s, v25.4h, v1.4h\n"
+ "smlal v14.4s, v26.4h, v4.4h\n"
+ "smlal v9.4s, v23.4h, v4.4h\n"
+ "ldr d3, [x8, #0x68]\n"
+ "usubl v3.8h, v3.8b, v15.8b\n"
+ "smlal v7.4s, v28.4h, v4.4h\n"
+ "smlal2 v10.4s, v26.8h, v4.8h\n"
+ "ldr d26, [x19, x1]\n"
"ushll v26.8h, v26.8b, #0x0\n"
- "ldr x20, [x2, #0x118]\n"
- "smlal2 v5.4s, v28.8h, v4.8h\n"
- "ldr d4, [x0, #0x70]\n"
- "smlal v20.4s, v25.4h, v0.4h\n"
- "usubl v4.8h, v4.8b, v9.8b\n"
- "smlal v8.4s, v31.4h, v0.4h\n"
- "smlal v6.4s, v30.4h, v0.4h\n"
+ "smlal2 v8.4s, v23.8h, v4.8h\n"
+ "smlal v11.4s, v22.4h, v0.4h\n"
+ "ldr x19, [x0, #0x118]\n"
+ "smlal2 v21.4s, v22.8h, v0.8h\n"
+ "smlal2 v6.4s, v28.8h, v4.8h\n"
+ "ldr d4, [x8, #0x70]\n"
+ "ldr d22, [x11, x1]\n"
+ "smlal v14.4s, v25.4h, v0.4h\n"
+ "smlal v9.4s, v31.4h, v0.4h\n"
+ "usubl v4.8h, v4.8b, v15.8b\n"
+ "smlal v7.4s, v30.4h, v0.4h\n"
+ "smlal2 v10.4s, v25.8h, v0.8h\n"
"ushll v22.8h, v22.8b, #0x0\n"
- "smlal2 v13.4s, v25.8h, v1.8h\n"
- "smlal2 v19.4s, v25.8h, v0.8h\n"
- "ldr d25, [x13, x3]\n"
+ "smlal2 v8.4s, v31.8h, v0.8h\n"
+ "smlal v11.4s, v25.4h, v1.4h\n"
+ "smlal2 v21.4s, v25.8h, v1.8h\n"
+ "ldr d25, [x13, x1]\n"
+ "smlal2 v6.4s, v30.8h, v0.8h\n"
"ushll v25.8h, v25.8b, #0x0\n"
- "smlal2 v7.4s, v31.8h, v0.8h\n"
+ "smlal v14.4s, v24.4h, v1.4h\n"
+ "smlal v9.4s, v30.4h, v1.4h\n"
+ "ldr d0, [x8, #0x78]\n"
+ "usubl v0.8h, v0.8b, v15.8b\n"
+ "smlal v7.4s, v26.4h, v1.4h\n"
+ "smlal2 v10.4s, v24.8h, v1.8h\n"
+ "smlal2 v8.4s, v30.8h, v1.8h\n"
"smlal v11.4s, v24.4h, v2.4h\n"
- "smlal2 v5.4s, v30.8h, v0.8h\n"
- "ldr d0, [x0, #0x78]\n"
- "smlal v20.4s, v24.4h, v1.4h\n"
- "usubl v0.8h, v0.8b, v9.8b\n"
- "smlal v8.4s, v30.4h, v1.4h\n"
- "smlal v6.4s, v26.4h, v1.4h\n"
- "smlal2 v13.4s, v24.8h, v2.8h\n"
- "smlal2 v19.4s, v24.8h, v1.8h\n"
- "ldr d24, [x12, x3]\n"
+ "smlal2 v21.4s, v24.8h, v2.8h\n"
+ "ldr d24, [x12, x1]\n"
+ "smlal2 v6.4s, v26.8h, v1.8h\n"
"ushll v24.8h, v24.8b, #0x0\n"
- "smlal2 v7.4s, v30.8h, v1.8h\n"
+ "smlal v14.4s, v27.4h, v2.4h\n"
+ "smlal v9.4s, v26.4h, v2.4h\n"
+ "ldr d1, [x8, #0x80]\n"
+ "usubl v1.8h, v1.8b, v15.8b\n"
+ "smlal v7.4s, v25.4h, v2.4h\n"
+ "smlal2 v10.4s, v27.8h, v2.8h\n"
+ "smlal2 v8.4s, v26.8h, v2.8h\n"
"smlal v11.4s, v27.4h, v3.4h\n"
- "smlal2 v5.4s, v26.8h, v1.8h\n"
- "ldr d1, [x0, #0x80]\n"
- "smlal v20.4s, v27.4h, v2.4h\n"
- "usubl v1.8h, v1.8b, v9.8b\n"
- "smlal v8.4s, v26.4h, v2.4h\n"
- "smlal v6.4s, v25.4h, v2.4h\n"
- "smlal2 v13.4s, v27.8h, v3.8h\n"
- "smlal2 v19.4s, v27.8h, v2.8h\n"
- "ldr d27, [x11, x3]\n"
+ "smlal2 v21.4s, v27.8h, v3.8h\n"
+ "smlal2 v6.4s, v25.8h, v2.8h\n"
+ "ldr d27, [x10, x1]\n"
"ushll v27.8h, v27.8b, #0x0\n"
- "smlal2 v7.4s, v26.8h, v2.8h\n"
+ "smlal v14.4s, v23.4h, v3.4h\n"
+ "smlal v9.4s, v25.4h, v3.4h\n"
+ "ldr d2, [x8, #0x88]\n"
+ "usubl v2.8h, v2.8b, v15.8b\n"
+ "smlal v7.4s, v24.4h, v3.4h\n"
+ "smlal2 v10.4s, v23.8h, v3.8h\n"
+ "smlal2 v8.4s, v25.8h, v3.8h\n"
"smlal v11.4s, v23.4h, v4.4h\n"
- "smlal2 v5.4s, v25.8h, v2.8h\n"
- "ldr d2, [x0, #0x88]\n"
- "smlal v20.4s, v23.4h, v3.4h\n"
- "usubl v2.8h, v2.8b, v9.8b\n"
- "smlal v8.4s, v25.4h, v3.4h\n"
- "smlal v6.4s, v24.4h, v3.4h\n"
- "smlal2 v13.4s, v23.8h, v4.8h\n"
- "smlal2 v19.4s, v23.8h, v3.8h\n"
- "ldr d23, [x10, x3]\n"
+ "smlal2 v21.4s, v23.8h, v4.8h\n"
+ "ldr d23, [x9, x1]\n"
+ "smlal2 v6.4s, v24.8h, v3.8h\n"
"ushll v23.8h, v23.8b, #0x0\n"
- "smlal2 v7.4s, v25.8h, v3.8h\n"
+ "smlal v14.4s, v28.4h, v4.4h\n"
+ "smlal v9.4s, v24.4h, v4.4h\n"
+ "ldr d3, [x8, #0x90]\n"
+ "usubl v3.8h, v3.8b, v15.8b\n"
+ "smlal v7.4s, v22.4h, v4.4h\n"
+ "smlal2 v10.4s, v28.8h, v4.8h\n"
+ "ldr d28, [x26, x1]\n"
+ "ushll v28.8h, v28.8b, #0x0\n"
+ "smlal2 v8.4s, v24.8h, v4.8h\n"
"smlal v11.4s, v31.4h, v0.4h\n"
- "smlal2 v5.4s, v24.8h, v3.8h\n"
- "ldr d3, [x0, #0x90]\n"
- "smlal v20.4s, v28.4h, v4.4h\n"
- "usubl v3.8h, v3.8b, v9.8b\n"
- "smlal v8.4s, v24.4h, v4.4h\n"
- "smlal v6.4s, v22.4h, v4.4h\n"
- "smlal2 v13.4s, v31.8h, v0.8h\n"
- "ldr d31, [x22, x3]\n"
- "smlal2 v19.4s, v28.8h, v4.8h\n"
- "ldr d28, [x27, x3]\n"
- "smlal2 v7.4s, v24.8h, v4.8h\n"
- "smlal v11.4s, v30.4h, v1.4h\n"
+ "smlal2 v21.4s, v31.8h, v0.8h\n"
+ "ldr d31, [x28, x1]\n"
+ "smlal2 v6.4s, v22.8h, v4.8h\n"
"ushll v31.8h, v31.8b, #0x0\n"
- "smlal2 v5.4s, v22.8h, v4.8h\n"
- "ldr d4, [x0, #0x98]\n"
- "smlal v20.4s, v30.4h, v0.4h\n"
- "usubl v4.8h, v4.8b, v9.8b\n"
- "smlal v8.4s, v27.4h, v0.4h\n"
- "smlal v6.4s, v23.4h, v0.4h\n"
- "ushll v28.8h, v28.8b, #0x0\n"
- "smlal2 v13.4s, v30.8h, v1.8h\n"
- "smlal2 v19.4s, v30.8h, v0.8h\n"
- "ldr d30, [x28, x3]\n"
+ "smlal v14.4s, v30.4h, v0.4h\n"
+ "smlal v9.4s, v27.4h, v0.4h\n"
+ "ldr d4, [x8, #0x98]\n"
+ "usubl v4.8h, v4.8b, v15.8b\n"
+ "smlal v7.4s, v23.4h, v0.4h\n"
+ "smlal2 v10.4s, v30.8h, v0.8h\n"
+ "smlal2 v8.4s, v27.8h, v0.8h\n"
+ "smlal v11.4s, v30.4h, v1.4h\n"
+ "smlal2 v21.4s, v30.8h, v1.8h\n"
+ "ldr d30, [x27, x1]\n"
+ "smlal2 v6.4s, v23.8h, v0.8h\n"
"ushll v30.8h, v30.8b, #0x0\n"
- "smlal2 v7.4s, v27.8h, v0.8h\n"
+ "smlal v14.4s, v26.4h, v1.4h\n"
+ "smlal v9.4s, v23.4h, v1.4h\n"
+ "ldr d0, [x8, #0xa0]\n"
+ "usubl v0.8h, v0.8b, v15.8b\n"
+ "smlal v7.4s, v31.4h, v1.4h\n"
+ "smlal2 v10.4s, v26.8h, v1.8h\n"
+ "smlal2 v8.4s, v23.8h, v1.8h\n"
"smlal v11.4s, v26.4h, v2.4h\n"
- "smlal2 v5.4s, v23.8h, v0.8h\n"
- "ldr d0, [x0, #0xa0]\n"
- "smlal v20.4s, v26.4h, v1.4h\n"
- "usubl v0.8h, v0.8b, v9.8b\n"
- "smlal v8.4s, v23.4h, v1.4h\n"
- "smlal v6.4s, v31.4h, v1.4h\n"
- "smlal2 v13.4s, v26.8h, v2.8h\n"
- "smlal2 v19.4s, v26.8h, v1.8h\n"
- "ldr d26, [x26, x3]\n"
+ "smlal2 v21.4s, v26.8h, v2.8h\n"
+ "smlal2 v6.4s, v31.8h, v1.8h\n"
+ "ldr d26, [x25, x1]\n"
"ushll v26.8h, v26.8b, #0x0\n"
- "smlal2 v7.4s, v23.8h, v1.8h\n"
+ "smlal v14.4s, v25.4h, v2.4h\n"
+ "smlal v9.4s, v31.4h, v2.4h\n"
+ "ldr d1, [x8, #0xa8]\n"
+ "usubl v1.8h, v1.8b, v15.8b\n"
+ "smlal v7.4s, v30.4h, v2.4h\n"
+ "smlal2 v10.4s, v25.8h, v2.8h\n"
+ "smlal2 v8.4s, v31.8h, v2.8h\n"
"smlal v11.4s, v25.4h, v3.4h\n"
- "smlal2 v5.4s, v31.8h, v1.8h\n"
- "ldr d1, [x0, #0xa8]\n"
- "smlal v20.4s, v25.4h, v2.4h\n"
- "usubl v1.8h, v1.8b, v9.8b\n"
- "smlal v8.4s, v31.4h, v2.4h\n"
- "smlal v6.4s, v30.4h, v2.4h\n"
- "smlal2 v13.4s, v25.8h, v3.8h\n"
- "smlal2 v19.4s, v25.8h, v2.8h\n"
- "ldr d25, [x25, x3]\n"
+ "smlal2 v21.4s, v25.8h, v3.8h\n"
+ "smlal2 v6.4s, v30.8h, v2.8h\n"
+ "ldr d25, [x24, x1]\n"
"ushll v25.8h, v25.8b, #0x0\n"
- "smlal2 v7.4s, v31.8h, v2.8h\n"
+ "smlal v14.4s, v24.4h, v3.4h\n"
+ "smlal v9.4s, v30.4h, v3.4h\n"
+ "ldr d2, [x8, #0xb0]\n"
+ "usubl v2.8h, v2.8b, v15.8b\n"
+ "smlal v7.4s, v28.4h, v3.4h\n"
+ "smlal2 v10.4s, v24.8h, v3.8h\n"
+ "smlal2 v8.4s, v30.8h, v3.8h\n"
"smlal v11.4s, v24.4h, v4.4h\n"
- "smlal2 v5.4s, v30.8h, v2.8h\n"
- "ldr d2, [x0, #0xb0]\n"
- "smlal v20.4s, v24.4h, v3.4h\n"
- "usubl v2.8h, v2.8b, v9.8b\n"
- "smlal v8.4s, v30.4h, v3.4h\n"
- "smlal v6.4s, v28.4h, v3.4h\n"
- "smlal2 v13.4s, v24.8h, v4.8h\n"
- "smlal2 v19.4s, v24.8h, v3.8h\n"
- "ldr d24, [x24, x3]\n"
+ "smlal2 v21.4s, v24.8h, v4.8h\n"
+ "ldr d24, [x23, x1]\n"
+ "smlal2 v6.4s, v28.8h, v3.8h\n"
"ushll v24.8h, v24.8b, #0x0\n"
- "smlal2 v7.4s, v30.8h, v3.8h\n"
+ "smlal v14.4s, v22.4h, v4.4h\n"
+ "smlal v9.4s, v28.4h, v4.4h\n"
+ "ldr d3, [x8, #0xb8]\n"
+ "usubl v3.8h, v3.8b, v15.8b\n"
+ "smlal v7.4s, v26.4h, v4.4h\n"
+ "smlal2 v8.4s, v28.8h, v4.8h\n"
"smlal v11.4s, v27.4h, v0.4h\n"
- "smlal2 v5.4s, v28.8h, v3.8h\n"
- "ldr d3, [x0, #0xb8]\n"
- "smlal v20.4s, v22.4h, v4.4h\n"
- "usubl v3.8h, v3.8b, v9.8b\n"
- "smlal v8.4s, v28.4h, v4.4h\n"
- "smlal v6.4s, v26.4h, v4.4h\n"
- "smlal2 v13.4s, v27.8h, v0.8h\n"
- "ldr d27, [x23, x3]\n"
- "smlal2 v7.4s, v28.8h, v4.8h\n"
+ "smlal2 v21.4s, v27.8h, v0.8h\n"
+ "ldr d27, [x22, x1]\n"
"ushll v27.8h, v27.8b, #0x0\n"
- "smlal v11.4s, v23.4h, v1.4h\n"
- "smlal2 v19.4s, v22.8h, v4.8h\n"
- "smlal2 v5.4s, v26.8h, v4.8h\n"
- "ldr d4, [x0, #0xc0]\n"
- "smlal v20.4s, v23.4h, v0.4h\n"
- "usubl v4.8h, v4.8b, v9.8b\n"
- "smlal v8.4s, v25.4h, v0.4h\n"
- "smlal v6.4s, v24.4h, v0.4h\n"
- "smlal2 v13.4s, v23.8h, v1.8h\n"
- "smlal2 v7.4s, v25.8h, v0.8h\n"
- "ldr d25, [x15, x3]\n"
+ "smlal2 v10.4s, v22.8h, v4.8h\n"
+ "smlal2 v6.4s, v26.8h, v4.8h\n"
+ "ldr d4, [x8, #0xc0]\n"
+ "usubl v4.8h, v4.8b, v15.8b\n"
+ "smlal v14.4s, v23.4h, v0.4h\n"
+ "smlal v9.4s, v25.4h, v0.4h\n"
+ "smlal v7.4s, v24.4h, v0.4h\n"
+ "smlal2 v8.4s, v25.8h, v0.8h\n"
+ "ldr d25, [x7, x1]\n"
"ushll v25.8h, v25.8b, #0x0\n"
- "smlal v11.4s, v31.4h, v2.4h\n"
- "smlal2 v19.4s, v23.8h, v0.8h\n"
- "smlal2 v5.4s, v24.8h, v0.8h\n"
- "smlal v20.4s, v31.4h, v1.4h\n"
- "smlal v8.4s, v24.4h, v1.4h\n"
- "smlal v6.4s, v27.4h, v1.4h\n"
- "smlal2 v13.4s, v31.8h, v2.8h\n"
- "smlal2 v7.4s, v24.8h, v1.8h\n"
- "ldr d24, [x21, x3]\n"
+ "smlal2 v10.4s, v23.8h, v0.8h\n"
+ "smlal2 v6.4s, v24.8h, v0.8h\n"
+ "smlal v11.4s, v23.4h, v1.4h\n"
+ "smlal v14.4s, v31.4h, v1.4h\n"
+ "smlal v9.4s, v24.4h, v1.4h\n"
+ "smlal v7.4s, v27.4h, v1.4h\n"
+ "smlal2 v8.4s, v24.8h, v1.8h\n"
+ "ldr d24, [x20, x1]\n"
+ "smlal2 v21.4s, v23.8h, v1.8h\n"
"ushll v24.8h, v24.8b, #0x0\n"
- "smlal v11.4s, v30.4h, v3.4h\n"
- "smlal2 v19.4s, v31.8h, v1.8h\n"
- "smlal2 v5.4s, v27.8h, v1.8h\n"
- "smlal v20.4s, v30.4h, v2.4h\n"
- "smlal v8.4s, v27.4h, v2.4h\n"
- "smlal v6.4s, v25.4h, v2.4h\n"
- "smlal2 v13.4s, v30.8h, v3.8h\n"
- "smlal2 v7.4s, v27.8h, v2.8h\n"
- "ldr d27, [x20, x3]\n"
+ "smlal2 v10.4s, v31.8h, v1.8h\n"
+ "smlal2 v6.4s, v27.8h, v1.8h\n"
+ "smlal v11.4s, v31.4h, v2.4h\n"
+ "smlal v14.4s, v30.4h, v2.4h\n"
+ "smlal v9.4s, v27.4h, v2.4h\n"
+ "smlal v7.4s, v25.4h, v2.4h\n"
+ "smlal2 v8.4s, v27.8h, v2.8h\n"
+ "ldr d27, [x19, x1]\n"
+ "smlal2 v21.4s, v31.8h, v2.8h\n"
"ushll v27.8h, v27.8b, #0x0\n"
+ "smlal2 v10.4s, v30.8h, v2.8h\n"
+ "smlal2 v6.4s, v25.8h, v2.8h\n"
+ "add x1, x1, #0x8\n"
+ "smlal v11.4s, v30.4h, v3.4h\n"
+ "smlal v14.4s, v28.4h, v3.4h\n"
+ "smlal v9.4s, v25.4h, v3.4h\n"
+ "smlal v7.4s, v24.4h, v3.4h\n"
+ "smlal2 v21.4s, v30.8h, v3.8h\n"
+ "smlal2 v10.4s, v28.8h, v3.8h\n"
+ "smlal2 v8.4s, v25.8h, v3.8h\n"
+ "smlal2 v6.4s, v24.8h, v3.8h\n"
"smlal v11.4s, v28.4h, v4.4h\n"
- "smlal2 v19.4s, v30.8h, v2.8h\n"
- "sqrdmulh v11.4s, v11.4s, v18.4s\n"
- "add x3, x3, #0x8\n"
- "smlal2 v5.4s, v25.8h, v2.8h\n"
- "smlal v20.4s, v28.4h, v3.4h\n"
- "and v31.16b, v11.16b, v21.16b\n"
- "smlal v8.4s, v25.4h, v3.4h\n"
- "smlal v6.4s, v24.4h, v3.4h\n"
- "sshr v31.4s, v31.4s, #0x1f\n"
- "smlal2 v13.4s, v28.8h, v4.8h\n"
- "smlal2 v19.4s, v28.8h, v3.8h\n"
- "sqrdmulh v13.4s, v13.4s, v16.4s\n"
- "smlal2 v7.4s, v25.8h, v3.8h\n"
- "smlal2 v5.4s, v24.8h, v3.8h\n"
- "and v17.16b, v13.16b, v10.16b\n"
- "smlal v20.4s, v26.4h, v4.4h\n"
- "smlal v8.4s, v24.4h, v4.4h\n"
- "sqrdmulh v20.4s, v20.4s, v18.4s\n"
- "smlal v6.4s, v27.4h, v4.4h\n"
- "smlal2 v19.4s, v26.8h, v4.8h\n"
+ "smlal v14.4s, v26.4h, v4.4h\n"
+ "sqrdmulh v11.4s, v11.4s, v17.4s\n"
+ "smlal v9.4s, v24.4h, v4.4h\n"
+ "smlal v7.4s, v27.4h, v4.4h\n"
+ "sqrdmulh v14.4s, v14.4s, v17.4s\n"
+ "smlal2 v21.4s, v28.8h, v4.8h\n"
+ "smlal2 v10.4s, v26.8h, v4.8h\n"
+ "sqrdmulh v9.4s, v9.4s, v17.4s\n"
+ "smlal2 v8.4s, v24.8h, v4.8h\n"
+ "smlal2 v6.4s, v27.8h, v4.8h\n"
+ "sqrdmulh v7.4s, v7.4s, v17.4s\n"
+ "and v23.16b, v11.16b, v5.16b\n"
+ "sqrdmulh v21.4s, v21.4s, v18.4s\n"
+ "and v22.16b, v14.16b, v5.16b\n"
+ "sqrdmulh v10.4s, v10.4s, v18.4s\n"
+ "and v17.16b, v9.16b, v5.16b\n"
"sqrdmulh v8.4s, v8.4s, v18.4s\n"
- "smlal2 v7.4s, v24.8h, v4.8h\n"
- "smlal2 v5.4s, v27.8h, v4.8h\n"
+ "and v20.16b, v7.16b, v5.16b\n"
"sqrdmulh v6.4s, v6.4s, v18.4s\n"
- "sqadd v11.4s, v11.4s, v31.4s\n"
+ "sshr v23.4s, v23.4s, #0x1f\n"
+ "and v19.16b, v21.16b, v29.16b\n"
+ "sshr v22.4s, v22.4s, #0x1f\n"
+ "and v18.16b, v10.16b, v29.16b\n"
"sshr v17.4s, v17.4s, #0x1f\n"
- "and v26.16b, v20.16b, v21.16b\n"
- "sqrdmulh v19.4s, v19.4s, v16.4s\n"
- "and v18.16b, v8.16b, v21.16b\n"
- "sqrdmulh v7.4s, v7.4s, v16.4s\n"
- "and v31.16b, v6.16b, v21.16b\n"
- "sqrdmulh v5.4s, v5.4s, v16.4s\n"
- "sqadd v13.4s, v13.4s, v17.4s\n"
- "sshr v26.4s, v26.4s, #0x1f\n"
- "and v27.16b, v19.16b, v10.16b\n"
+ "and v26.16b, v8.16b, v29.16b\n"
+ "sshr v20.4s, v20.4s, #0x1f\n"
+ "and v4.16b, v6.16b, v29.16b\n"
+ "sqadd v11.4s, v11.4s, v23.4s\n"
+ "sshr v19.4s, v19.4s, #0x1f\n"
+ "sqadd v14.4s, v14.4s, v22.4s\n"
"sshr v18.4s, v18.4s, #0x1f\n"
- "and v25.16b, v7.16b, v10.16b\n"
- "sshr v31.4s, v31.4s, #0x1f\n"
- "and v17.16b, v5.16b, v10.16b\n"
- "sqadd v20.4s, v20.4s, v26.4s\n"
- "sshr v27.4s, v27.4s, #0x1f\n"
- "sqadd v8.4s, v8.4s, v18.4s\n"
- "sshr v25.4s, v25.4s, #0x1f\n"
- "sqadd v6.4s, v6.4s, v31.4s\n"
- "sshr v17.4s, v17.4s, #0x1f\n"
- "srshl v11.4s, v11.4s, v21.4s\n"
- "srshl v20.4s, v20.4s, v21.4s\n"
- "sqadd v19.4s, v19.4s, v27.4s\n"
- "srshl v8.4s, v8.4s, v21.4s\n"
- "sqadd v7.4s, v7.4s, v25.4s\n"
- "srshl v6.4s, v6.4s, v21.4s\n"
- "sqadd v5.4s, v5.4s, v17.4s\n"
- "srshl v13.4s, v13.4s, v10.4s\n"
+ "sqadd v9.4s, v9.4s, v17.4s\n"
+ "sshr v26.4s, v26.4s, #0x1f\n"
+ "sqadd v7.4s, v7.4s, v20.4s\n"
+ "sshr v4.4s, v4.4s, #0x1f\n"
+ "srshl v11.4s, v11.4s, v5.4s\n"
+ "sqadd v21.4s, v21.4s, v19.4s\n"
+ "srshl v14.4s, v14.4s, v5.4s\n"
+ "sqadd v10.4s, v10.4s, v18.4s\n"
+ "srshl v9.4s, v9.4s, v5.4s\n"
+ "sqadd v8.4s, v8.4s, v26.4s\n"
+ "srshl v7.4s, v7.4s, v5.4s\n"
+ "sqadd v6.4s, v6.4s, v4.4s\n"
+ "srshl v21.4s, v21.4s, v29.4s\n"
"sqxtn v11.4h, v11.4s\n"
- "srshl v19.4s, v19.4s, v10.4s\n"
- "sqxtn v20.4h, v20.4s\n"
- "srshl v7.4s, v7.4s, v10.4s\n"
- "sqxtn v8.4h, v8.4s\n"
- "srshl v5.4s, v5.4s, v10.4s\n"
- "sqxtn v6.4h, v6.4s\n"
- "sqxtn2 v11.8h, v13.4s\n"
- "sqxtn2 v20.8h, v19.4s\n"
- "sqxtn2 v8.8h, v7.4s\n"
- "sqxtn2 v6.8h, v5.4s\n"
- "sqadd v11.8h, v11.8h, v15.8h\n"
- "sqadd v20.8h, v20.8h, v15.8h\n"
- "sqadd v8.8h, v8.8h, v15.8h\n"
- "sqadd v6.8h, v6.8h, v15.8h\n"
- "smax v11.8h, v11.8h, v14.8h\n"
- "smax v20.8h, v20.8h, v14.8h\n"
- "smax v8.8h, v8.8h, v14.8h\n"
- "smax v6.8h, v6.8h, v14.8h\n"
- "smin v11.8h, v11.8h, v12.8h\n"
- "smin v20.8h, v20.8h, v12.8h\n"
- "smin v8.8h, v8.8h, v12.8h\n"
- "smin v6.8h, v6.8h, v12.8h\n"
+ "srshl v10.4s, v10.4s, v29.4s\n"
+ "sqxtn v14.4h, v14.4s\n"
+ "srshl v8.4s, v8.4s, v29.4s\n"
+ "sqxtn v9.4h, v9.4s\n"
+ "srshl v6.4s, v6.4s, v29.4s\n"
+ "sqxtn v7.4h, v7.4s\n"
+ "sqxtn2 v11.8h, v21.4s\n"
+ "sqxtn2 v14.8h, v10.4s\n"
+ "sqxtn2 v9.8h, v8.4s\n"
+ "sqxtn2 v7.8h, v6.4s\n"
+ "sqadd v11.8h, v11.8h, v16.8h\n"
+ "sqadd v14.8h, v14.8h, v16.8h\n"
+ "sqadd v9.8h, v9.8h, v16.8h\n"
+ "sqadd v7.8h, v7.8h, v16.8h\n"
+ "smax v11.8h, v11.8h, v12.8h\n"
+ "smax v14.8h, v14.8h, v12.8h\n"
+ "smax v9.8h, v9.8h, v12.8h\n"
+ "smax v7.8h, v7.8h, v12.8h\n"
+ "smin v11.8h, v11.8h, v13.8h\n"
+ "smin v14.8h, v14.8h, v13.8h\n"
+ "smin v9.8h, v9.8h, v13.8h\n"
+ "smin v7.8h, v7.8h, v13.8h\n"
"uzp1 v11.16b, v11.16b, v11.16b\n"
- "uzp1 v20.16b, v20.16b, v20.16b\n"
- "str d11, [x7, x1]\n"
- "uzp1 v8.16b, v8.16b, v8.16b\n"
- "uzp1 v6.16b, v6.16b, v6.16b\n"
- "str d20, [x8, x1]\n"
- "str d8, [x17, x1]\n"
- "str d6, [x16, x1]\n"
- "add x1, x1, #0x8\n"
+ "uzp1 v14.16b, v14.16b, v14.16b\n"
+ "str d11, [x21, x2]\n"
+ "uzp1 v9.16b, v9.16b, v9.16b\n"
+ "uzp1 v7.16b, v7.16b, v7.16b\n"
+ "str d14, [x15, x2]\n"
+ "str d9, [x17, x2]\n"
+ "str d7, [x16, x2]\n"
+ "add x2, x2, #0x8\n"
"beq 124f\n"
- "add x0, x0, #0xc8\n"
+ "add x8, x8, #0xc8\n"
"3:" // Oddments
- "ldr x13, [%x[params], %[offsetof_Params_bias]]\n"
+ "ldr x19, [%x[params], %[offsetof_Params_bias]]\n"
"tbz x4, #2, 5f\n"
- "ld1 { v11.4s }, [x13], #0x10\n"
+ "ld1 { v11.4s }, [x19], #0x10\n"
"tbz x4, #1, 4f\n"
- "ld1 { v13.d }[0], [x13], #0x8\n"
+ "ld1 { v21.d }[0], [x19], #0x8\n"
"tbz x4, #0, 7f\n"
- "ld1 { v13.s }[2], [x13]\n"
+ "ld1 { v21.s }[2], [x19]\n"
"b 7f\n"
"4:" // Oddments: Load bias: Bit 2: Bit 1: Unset
"tbz x4, #0, 7f\n"
- "ld1 { v13.s }[0], [x13]\n"
+ "ld1 { v21.s }[0], [x19]\n"
"b 7f\n"
"5:" // Oddments: Load bias: Bit 2: Unset
"tbz x4, #1, 6f\n"
- "ld1 { v11.d }[0], [x13], #0x8\n"
+ "ld1 { v11.d }[0], [x19], #0x8\n"
"tbz x4, #0, 7f\n"
- "ld1 { v11.s }[2], [x13]\n"
+ "ld1 { v11.s }[2], [x19]\n"
"b 7f\n"
"6:" // Oddments: Load bias: Bit 2: Unset: Bit 1: Unset
"tbz x4, #0, 7f\n"
- "ld1 { v11.s }[0], [x13]\n"
+ "ld1 { v11.s }[0], [x19]\n"
"7:" // Oddments: Load bias: Bit 2: End
- "ldr d0, [x0, #0x0]\n"
- "ldr d1, [x0, #0x8]\n"
- "mov v20.16b, v11.16b\n"
- "mov v19.16b, v13.16b\n"
- "ldr d2, [x0, #0x10]\n"
- "ldr d3, [x0, #0x18]\n"
- "mov v8.16b, v11.16b\n"
- "mov v7.16b, v13.16b\n"
- "ldr d4, [x0, #0x20]\n"
- "ldp x10, x28, [x2, #0x0]\n"
- "mov v6.16b, v11.16b\n"
- "mov v5.16b, v13.16b\n"
- "ldp x27, x26, [x2, #0x10]\n"
- "ldp x25, x24, [x2, #0x20]\n"
- "usubl v0.8h, v0.8b, v9.8b\n"
- "usubl v1.8h, v1.8b, v9.8b\n"
- "ldp x23, x22, [x2, #0x30]\n"
- "ldp x21, x20, [x2, #0x40]\n"
- "usubl v2.8h, v2.8b, v9.8b\n"
- "usubl v3.8h, v3.8b, v9.8b\n"
- "usubl v4.8h, v4.8b, v9.8b\n"
- "add x10, x10, x3\n"
- "add x28, x28, x3\n"
- "add x27, x27, x3\n"
- "add x26, x26, x3\n"
- "add x25, x25, x3\n"
- "add x24, x24, x3\n"
- "add x23, x23, x3\n"
- "add x22, x22, x3\n"
- "add x21, x21, x3\n"
- "add x20, x20, x3\n"
+ "ldr d0, [x8, #0x0]\n"
+ "ldr d1, [x8, #0x8]\n"
+ "mov v14.16b, v11.16b\n"
+ "mov v10.16b, v21.16b\n"
+ "ldr d2, [x8, #0x10]\n"
+ "ldr d3, [x8, #0x18]\n"
+ "mov v9.16b, v11.16b\n"
+ "mov v8.16b, v21.16b\n"
+ "ldr d4, [x8, #0x20]\n"
+ "ldp x28, x27, [x0, #0x0]\n"
+ "mov v7.16b, v11.16b\n"
+ "mov v6.16b, v21.16b\n"
+ "ldp x10, x26, [x0, #0x10]\n"
+ "ldp x24, x23, [x0, #0x20]\n"
+ "usubl v0.8h, v0.8b, v15.8b\n"
+ "usubl v1.8h, v1.8b, v15.8b\n"
+ "ldp x22, x25, [x0, #0x30]\n"
+ "ldp x20, x19, [x0, #0x40]\n"
+ "usubl v2.8h, v2.8b, v15.8b\n"
+ "usubl v3.8h, v3.8b, v15.8b\n"
+ "usubl v4.8h, v4.8b, v15.8b\n"
+ "add x28, x28, x1\n"
+ "add x27, x27, x1\n"
+ "add x10, x10, x1\n"
+ "add x26, x26, x1\n"
+ "add x24, x24, x1\n"
+ "add x23, x23, x1\n"
+ "add x22, x22, x1\n"
+ "add x25, x25, x1\n"
+ "add x20, x20, x1\n"
+ "add x19, x19, x1\n"
"tbz x4, #2, 9f\n"
- "ld1 { v31.s }[0], [x10], #0x4\n"
- "ld1 { v30.s }[0], [x28], #0x4\n"
- "ld1 { v29.s }[0], [x27], #0x4\n"
+ "ld1 { v31.s }[0], [x28], #0x4\n"
+ "ld1 { v30.s }[0], [x27], #0x4\n"
+ "ld1 { v29.s }[0], [x10], #0x4\n"
"ld1 { v28.s }[0], [x26], #0x4\n"
- "ld1 { v27.s }[0], [x25], #0x4\n"
- "ld1 { v23.s }[0], [x24], #0x4\n"
- "ld1 { v25.s }[0], [x23], #0x4\n"
- "ld1 { v24.s }[0], [x22], #0x4\n"
- "ld1 { v26.s }[0], [x21], #0x4\n"
- "ld1 { v22.s }[0], [x20], #0x4\n"
+ "ld1 { v27.s }[0], [x24], #0x4\n"
+ "ld1 { v23.s }[0], [x23], #0x4\n"
+ "ld1 { v25.s }[0], [x22], #0x4\n"
+ "ld1 { v24.s }[0], [x25], #0x4\n"
+ "ld1 { v26.s }[0], [x20], #0x4\n"
+ "ld1 { v22.s }[0], [x19], #0x4\n"
"tbz x4, #1, 8f\n"
- "ld1 { v31.h }[2], [x10], #0x2\n"
- "ld1 { v30.h }[2], [x28], #0x2\n"
- "ld1 { v29.h }[2], [x27], #0x2\n"
+ "ld1 { v31.h }[2], [x28], #0x2\n"
+ "ld1 { v30.h }[2], [x27], #0x2\n"
+ "ld1 { v29.h }[2], [x10], #0x2\n"
"ld1 { v28.h }[2], [x26], #0x2\n"
- "ld1 { v27.h }[2], [x25], #0x2\n"
- "ld1 { v23.h }[2], [x24], #0x2\n"
- "ld1 { v25.h }[2], [x23], #0x2\n"
- "ld1 { v24.h }[2], [x22], #0x2\n"
- "ld1 { v26.h }[2], [x21], #0x2\n"
- "ld1 { v22.h }[2], [x20], #0x2\n"
+ "ld1 { v27.h }[2], [x24], #0x2\n"
+ "ld1 { v23.h }[2], [x23], #0x2\n"
+ "ld1 { v25.h }[2], [x22], #0x2\n"
+ "ld1 { v24.h }[2], [x25], #0x2\n"
+ "ld1 { v26.h }[2], [x20], #0x2\n"
+ "ld1 { v22.h }[2], [x19], #0x2\n"
"tbz x4, #0, 11f\n"
- "ld1 { v31.b }[6], [x10]\n"
- "ld1 { v30.b }[6], [x28]\n"
- "ld1 { v29.b }[6], [x27]\n"
+ "ld1 { v31.b }[6], [x28]\n"
+ "ld1 { v30.b }[6], [x27]\n"
+ "ld1 { v29.b }[6], [x10]\n"
"ld1 { v28.b }[6], [x26]\n"
- "ld1 { v27.b }[6], [x25]\n"
- "ld1 { v23.b }[6], [x24]\n"
- "ld1 { v25.b }[6], [x23]\n"
- "ld1 { v24.b }[6], [x22]\n"
- "ld1 { v26.b }[6], [x21]\n"
- "ld1 { v22.b }[6], [x20]\n"
+ "ld1 { v27.b }[6], [x24]\n"
+ "ld1 { v23.b }[6], [x23]\n"
+ "ld1 { v25.b }[6], [x22]\n"
+ "ld1 { v24.b }[6], [x25]\n"
+ "ld1 { v26.b }[6], [x20]\n"
+ "ld1 { v22.b }[6], [x19]\n"
"b 11f\n"
"8:" // Oddments: Initial loads: Bit 2: Bit 1: Unset
"tbz x4, #0, 11f\n"
- "ld1 { v31.b }[4], [x10]\n"
- "ld1 { v30.b }[4], [x28]\n"
- "ld1 { v29.b }[4], [x27]\n"
+ "ld1 { v31.b }[4], [x28]\n"
+ "ld1 { v30.b }[4], [x27]\n"
+ "ld1 { v29.b }[4], [x10]\n"
"ld1 { v28.b }[4], [x26]\n"
- "ld1 { v27.b }[4], [x25]\n"
- "ld1 { v23.b }[4], [x24]\n"
- "ld1 { v25.b }[4], [x23]\n"
- "ld1 { v24.b }[4], [x22]\n"
- "ld1 { v26.b }[4], [x21]\n"
- "ld1 { v22.b }[4], [x20]\n"
+ "ld1 { v27.b }[4], [x24]\n"
+ "ld1 { v23.b }[4], [x23]\n"
+ "ld1 { v25.b }[4], [x22]\n"
+ "ld1 { v24.b }[4], [x25]\n"
+ "ld1 { v26.b }[4], [x20]\n"
+ "ld1 { v22.b }[4], [x19]\n"
"b 11f\n"
"9:" // Oddments: Initial loads: Bit 2: Unset
"tbz x4, #1, 10f\n"
- "ld1 { v31.h }[0], [x10], #0x2\n"
- "ld1 { v30.h }[0], [x28], #0x2\n"
- "ld1 { v29.h }[0], [x27], #0x2\n"
+ "ld1 { v31.h }[0], [x28], #0x2\n"
+ "ld1 { v30.h }[0], [x27], #0x2\n"
+ "ld1 { v29.h }[0], [x10], #0x2\n"
"ld1 { v28.h }[0], [x26], #0x2\n"
- "ld1 { v27.h }[0], [x25], #0x2\n"
- "ld1 { v23.h }[0], [x24], #0x2\n"
- "ld1 { v25.h }[0], [x23], #0x2\n"
- "ld1 { v24.h }[0], [x22], #0x2\n"
- "ld1 { v26.h }[0], [x21], #0x2\n"
- "ld1 { v22.h }[0], [x20], #0x2\n"
+ "ld1 { v27.h }[0], [x24], #0x2\n"
+ "ld1 { v23.h }[0], [x23], #0x2\n"
+ "ld1 { v25.h }[0], [x22], #0x2\n"
+ "ld1 { v24.h }[0], [x25], #0x2\n"
+ "ld1 { v26.h }[0], [x20], #0x2\n"
+ "ld1 { v22.h }[0], [x19], #0x2\n"
"tbz x4, #0, 11f\n"
- "ld1 { v31.b }[2], [x10]\n"
- "ld1 { v30.b }[2], [x28]\n"
- "ld1 { v29.b }[2], [x27]\n"
+ "ld1 { v31.b }[2], [x28]\n"
+ "ld1 { v30.b }[2], [x27]\n"
+ "ld1 { v29.b }[2], [x10]\n"
"ld1 { v28.b }[2], [x26]\n"
- "ld1 { v27.b }[2], [x25]\n"
- "ld1 { v23.b }[2], [x24]\n"
- "ld1 { v25.b }[2], [x23]\n"
- "ld1 { v24.b }[2], [x22]\n"
- "ld1 { v26.b }[2], [x21]\n"
- "ld1 { v22.b }[2], [x20]\n"
+ "ld1 { v27.b }[2], [x24]\n"
+ "ld1 { v23.b }[2], [x23]\n"
+ "ld1 { v25.b }[2], [x22]\n"
+ "ld1 { v24.b }[2], [x25]\n"
+ "ld1 { v26.b }[2], [x20]\n"
+ "ld1 { v22.b }[2], [x19]\n"
"b 11f\n"
"10:" // Oddments: Initial loads: Bit 2: Unset: Bit 1: Unset
"tbz x4, #0, 11f\n"
- "ld1 { v31.b }[0], [x10]\n"
- "ld1 { v30.b }[0], [x28]\n"
- "ld1 { v29.b }[0], [x27]\n"
+ "ld1 { v31.b }[0], [x28]\n"
+ "ld1 { v30.b }[0], [x27]\n"
+ "ld1 { v29.b }[0], [x10]\n"
"ld1 { v28.b }[0], [x26]\n"
- "ld1 { v27.b }[0], [x25]\n"
- "ld1 { v23.b }[0], [x24]\n"
- "ld1 { v25.b }[0], [x23]\n"
- "ld1 { v24.b }[0], [x22]\n"
- "ld1 { v26.b }[0], [x21]\n"
- "ld1 { v22.b }[0], [x20]\n"
+ "ld1 { v27.b }[0], [x24]\n"
+ "ld1 { v23.b }[0], [x23]\n"
+ "ld1 { v25.b }[0], [x22]\n"
+ "ld1 { v24.b }[0], [x25]\n"
+ "ld1 { v26.b }[0], [x20]\n"
+ "ld1 { v22.b }[0], [x19]\n"
"11:" // Oddments: Initial loads: Bit 2: End
"ushll v31.8h, v31.8b, #0x0\n"
"ushll v30.8h, v30.8b, #0x0\n"
"smlal v11.4s, v31.4h, v0.4h\n"
- "ldr x20, [x2, #0x50]\n"
+ "ldr x19, [x0, #0x50]\n"
"ushll v29.8h, v29.8b, #0x0\n"
- "smlal2 v13.4s, v31.8h, v0.8h\n"
- "smlal v20.4s, v30.4h, v0.4h\n"
- "smlal2 v19.4s, v30.8h, v0.8h\n"
- "smlal v8.4s, v29.4h, v0.4h\n"
+ "smlal2 v21.4s, v31.8h, v0.8h\n"
+ "smlal v14.4s, v30.4h, v0.4h\n"
+ "smlal2 v10.4s, v30.8h, v0.8h\n"
+ "smlal v9.4s, v29.4h, v0.4h\n"
"ushll v28.8h, v28.8b, #0x0\n"
- "add x20, x20, x3\n"
- "smlal2 v7.4s, v29.8h, v0.8h\n"
+ "add x19, x19, x1\n"
+ "smlal2 v8.4s, v29.8h, v0.8h\n"
"ushll v27.8h, v27.8b, #0x0\n"
- "smlal v6.4s, v28.4h, v0.4h\n"
- "smlal2 v5.4s, v28.8h, v0.8h\n"
+ "smlal v7.4s, v28.4h, v0.4h\n"
+ "smlal2 v6.4s, v28.8h, v0.8h\n"
"smlal v11.4s, v30.4h, v1.4h\n"
"ushll v23.8h, v23.8b, #0x0\n"
- "smlal2 v13.4s, v30.8h, v1.8h\n"
- "smlal v20.4s, v27.4h, v1.4h\n"
+ "smlal2 v21.4s, v30.8h, v1.8h\n"
+ "smlal v14.4s, v27.4h, v1.4h\n"
"ushll v25.8h, v25.8b, #0x0\n"
- "smlal2 v19.4s, v27.8h, v1.8h\n"
- "smlal v8.4s, v28.4h, v1.4h\n"
+ "smlal2 v10.4s, v27.8h, v1.8h\n"
+ "smlal v9.4s, v28.4h, v1.4h\n"
"ushll v24.8h, v24.8b, #0x0\n"
- "smlal2 v7.4s, v28.8h, v1.8h\n"
+ "smlal2 v8.4s, v28.8h, v1.8h\n"
"ushll v26.8h, v26.8b, #0x0\n"
- "smlal v6.4s, v23.4h, v1.4h\n"
+ "smlal v7.4s, v23.4h, v1.4h\n"
"ushll v22.8h, v22.8b, #0x0\n"
- "smlal2 v5.4s, v23.8h, v1.8h\n"
+ "smlal2 v6.4s, v23.8h, v1.8h\n"
"smlal v11.4s, v27.4h, v2.4h\n"
- "smlal2 v13.4s, v27.8h, v2.8h\n"
- "smlal v20.4s, v25.4h, v2.4h\n"
- "smlal2 v19.4s, v25.8h, v2.8h\n"
- "smlal v8.4s, v23.4h, v2.4h\n"
- "smlal2 v7.4s, v23.8h, v2.8h\n"
+ "smlal2 v21.4s, v27.8h, v2.8h\n"
+ "smlal v14.4s, v25.4h, v2.4h\n"
+ "smlal2 v10.4s, v25.8h, v2.8h\n"
+ "smlal v9.4s, v23.4h, v2.4h\n"
+ "smlal2 v8.4s, v23.8h, v2.8h\n"
"tbz x4, #2, 13f\n"
- "ld1 { v31.s }[0], [x20], #0x4\n"
+ "ld1 { v31.s }[0], [x19], #0x4\n"
"tbz x4, #1, 12f\n"
- "ld1 { v31.h }[2], [x20], #0x2\n"
+ "ld1 { v31.h }[2], [x19], #0x2\n"
"tbz x4, #0, 15f\n"
- "ld1 { v31.b }[6], [x20]\n"
+ "ld1 { v31.b }[6], [x19]\n"
"b 15f\n"
"12:" // Oddments: Load (1, 3): Bit 2: Bit 1: Unset
"tbz x4, #0, 15f\n"
- "ld1 { v31.b }[4], [x20]\n"
+ "ld1 { v31.b }[4], [x19]\n"
"b 15f\n"
"13:" // Oddments: Load (1, 3): Bit 2: Unset
"tbz x4, #1, 14f\n"
- "ld1 { v31.h }[0], [x20], #0x2\n"
+ "ld1 { v31.h }[0], [x19], #0x2\n"
"tbz x4, #0, 15f\n"
- "ld1 { v31.b }[2], [x20]\n"
+ "ld1 { v31.b }[2], [x19]\n"
"b 15f\n"
"14:" // Oddments: Load (1, 3): Bit 2: Unset: Bit 1: Unset
"tbz x4, #0, 15f\n"
- "ld1 { v31.b }[0], [x20]\n"
+ "ld1 { v31.b }[0], [x19]\n"
"15:" // Oddments: Load (1, 3): Bit 2: End
"ushll v31.8h, v31.8b, #0x0\n"
- "ldr x22, [x2, #0x58]\n"
- "smlal v6.4s, v31.4h, v2.4h\n"
- "smlal2 v5.4s, v31.8h, v2.8h\n"
+ "ldr x20, [x0, #0x58]\n"
+ "smlal v7.4s, v31.4h, v2.4h\n"
+ "smlal2 v6.4s, v31.8h, v2.8h\n"
"smlal v11.4s, v25.4h, v3.4h\n"
- "smlal2 v13.4s, v25.8h, v3.8h\n"
- "add x22, x22, x3\n"
- "smlal v20.4s, v24.4h, v3.4h\n"
- "smlal2 v19.4s, v24.8h, v3.8h\n"
- "smlal v8.4s, v31.4h, v3.4h\n"
- "smlal2 v7.4s, v31.8h, v3.8h\n"
+ "smlal2 v21.4s, v25.8h, v3.8h\n"
+ "add x20, x20, x1\n"
+ "smlal v14.4s, v24.4h, v3.4h\n"
+ "smlal2 v10.4s, v24.8h, v3.8h\n"
+ "smlal v9.4s, v31.4h, v3.4h\n"
+ "smlal2 v8.4s, v31.8h, v3.8h\n"
"tbz x4, #2, 17f\n"
- "ld1 { v30.s }[0], [x22], #0x4\n"
+ "ld1 { v30.s }[0], [x20], #0x4\n"
"tbz x4, #1, 16f\n"
- "ld1 { v30.h }[2], [x22], #0x2\n"
+ "ld1 { v30.h }[2], [x20], #0x2\n"
"tbz x4, #0, 19f\n"
- "ld1 { v30.b }[6], [x22]\n"
+ "ld1 { v30.b }[6], [x20]\n"
"b 19f\n"
"16:" // Oddments: Load (1, 4): Bit 2: Bit 1: Unset
"tbz x4, #0, 19f\n"
- "ld1 { v30.b }[4], [x22]\n"
+ "ld1 { v30.b }[4], [x20]\n"
"b 19f\n"
"17:" // Oddments: Load (1, 4): Bit 2: Unset
"tbz x4, #1, 18f\n"
- "ld1 { v30.h }[0], [x22], #0x2\n"
+ "ld1 { v30.h }[0], [x20], #0x2\n"
"tbz x4, #0, 19f\n"
- "ld1 { v30.b }[2], [x22]\n"
+ "ld1 { v30.b }[2], [x20]\n"
"b 19f\n"
"18:" // Oddments: Load (1, 4): Bit 2: Unset: Bit 1: Unset
"tbz x4, #0, 19f\n"
- "ld1 { v30.b }[0], [x22]\n"
+ "ld1 { v30.b }[0], [x20]\n"
"19:" // Oddments: Load (1, 4): Bit 2: End
"ushll v30.8h, v30.8b, #0x0\n"
- "ldr x21, [x2, #0x60]\n"
- "smlal v6.4s, v30.4h, v3.4h\n"
- "smlal2 v5.4s, v30.8h, v3.8h\n"
+ "ldr x19, [x0, #0x60]\n"
+ "smlal v7.4s, v30.4h, v3.4h\n"
+ "smlal2 v6.4s, v30.8h, v3.8h\n"
"smlal v11.4s, v24.4h, v4.4h\n"
- "smlal2 v13.4s, v24.8h, v4.8h\n"
- "add x21, x21, x3\n"
+ "smlal2 v21.4s, v24.8h, v4.8h\n"
+ "add x19, x19, x1\n"
"tbz x4, #2, 21f\n"
- "ld1 { v27.s }[0], [x21], #0x4\n"
+ "ld1 { v27.s }[0], [x19], #0x4\n"
"tbz x4, #1, 20f\n"
- "ld1 { v27.h }[2], [x21], #0x2\n"
+ "ld1 { v27.h }[2], [x19], #0x2\n"
"tbz x4, #0, 23f\n"
- "ld1 { v27.b }[6], [x21]\n"
+ "ld1 { v27.b }[6], [x19]\n"
"b 23f\n"
"20:" // Oddments: Load (0, 5): Bit 2: Bit 1: Unset
"tbz x4, #0, 23f\n"
- "ld1 { v27.b }[4], [x21]\n"
+ "ld1 { v27.b }[4], [x19]\n"
"b 23f\n"
"21:" // Oddments: Load (0, 5): Bit 2: Unset
"tbz x4, #1, 22f\n"
- "ld1 { v27.h }[0], [x21], #0x2\n"
+ "ld1 { v27.h }[0], [x19], #0x2\n"
"tbz x4, #0, 23f\n"
- "ld1 { v27.b }[2], [x21]\n"
+ "ld1 { v27.b }[2], [x19]\n"
"b 23f\n"
"22:" // Oddments: Load (0, 5): Bit 2: Unset: Bit 1: Unset
"tbz x4, #0, 23f\n"
- "ld1 { v27.b }[0], [x21]\n"
+ "ld1 { v27.b }[0], [x19]\n"
"23:" // Oddments: Load (0, 5): Bit 2: End
- "ldr d0, [x0, #0x28]\n"
"ushll v27.8h, v27.8b, #0x0\n"
- "smlal v20.4s, v27.4h, v4.4h\n"
- "smlal2 v19.4s, v27.8h, v4.8h\n"
- "smlal v8.4s, v30.4h, v4.4h\n"
- "smlal2 v7.4s, v30.8h, v4.8h\n"
- "usubl v0.8h, v0.8b, v9.8b\n"
- "ldr x20, [x2, #0x68]\n"
- "smlal v6.4s, v26.4h, v4.4h\n"
- "smlal2 v5.4s, v26.8h, v4.8h\n"
- "add x20, x20, x3\n"
+ "ldr d0, [x8, #0x28]\n"
+ "smlal v14.4s, v27.4h, v4.4h\n"
+ "smlal2 v10.4s, v27.8h, v4.8h\n"
+ "smlal v9.4s, v30.4h, v4.4h\n"
+ "smlal2 v8.4s, v30.8h, v4.8h\n"
+ "usubl v0.8h, v0.8b, v15.8b\n"
+ "ldr x24, [x0, #0x68]\n"
+ "smlal v7.4s, v26.4h, v4.4h\n"
+ "smlal2 v6.4s, v26.8h, v4.8h\n"
+ "add x24, x24, x1\n"
"smlal v11.4s, v29.4h, v0.4h\n"
- "smlal2 v13.4s, v29.8h, v0.8h\n"
- "smlal v20.4s, v28.4h, v0.4h\n"
- "smlal2 v19.4s, v28.8h, v0.8h\n"
- "smlal v8.4s, v22.4h, v0.4h\n"
- "smlal2 v7.4s, v22.8h, v0.8h\n"
+ "smlal2 v21.4s, v29.8h, v0.8h\n"
+ "smlal v14.4s, v28.4h, v0.4h\n"
+ "smlal2 v10.4s, v28.8h, v0.8h\n"
+ "smlal v9.4s, v22.4h, v0.4h\n"
+ "smlal2 v8.4s, v22.8h, v0.8h\n"
"tbz x4, #2, 25f\n"
- "ld1 { v25.s }[0], [x20], #0x4\n"
+ "ld1 { v25.s }[0], [x24], #0x4\n"
"tbz x4, #1, 24f\n"
- "ld1 { v25.h }[2], [x20], #0x2\n"
+ "ld1 { v25.h }[2], [x24], #0x2\n"
"tbz x4, #0, 27f\n"
- "ld1 { v25.b }[6], [x20]\n"
+ "ld1 { v25.b }[6], [x24]\n"
"b 27f\n"
"24:" // Oddments: Load (2, 1): Bit 2: Bit 1: Unset
"tbz x4, #0, 27f\n"
- "ld1 { v25.b }[4], [x20]\n"
+ "ld1 { v25.b }[4], [x24]\n"
"b 27f\n"
"25:" // Oddments: Load (2, 1): Bit 2: Unset
"tbz x4, #1, 26f\n"
- "ld1 { v25.h }[0], [x20], #0x2\n"
+ "ld1 { v25.h }[0], [x24], #0x2\n"
"tbz x4, #0, 27f\n"
- "ld1 { v25.b }[2], [x20]\n"
+ "ld1 { v25.b }[2], [x24]\n"
"b 27f\n"
"26:" // Oddments: Load (2, 1): Bit 2: Unset: Bit 1: Unset
"tbz x4, #0, 27f\n"
- "ld1 { v25.b }[0], [x20]\n"
+ "ld1 { v25.b }[0], [x24]\n"
"27:" // Oddments: Load (2, 1): Bit 2: End
- "ldr d1, [x0, #0x30]\n"
+ "ldr d1, [x8, #0x30]\n"
"ushll v25.8h, v25.8b, #0x0\n"
- "usubl v1.8h, v1.8b, v9.8b\n"
- "ldr x25, [x2, #0x70]\n"
- "smlal v6.4s, v25.4h, v0.4h\n"
- "smlal2 v5.4s, v25.8h, v0.8h\n"
- "add x25, x25, x3\n"
+ "usubl v1.8h, v1.8b, v15.8b\n"
+ "ldr x23, [x0, #0x70]\n"
+ "smlal v7.4s, v25.4h, v0.4h\n"
+ "smlal2 v6.4s, v25.8h, v0.8h\n"
+ "add x23, x23, x1\n"
"smlal v11.4s, v28.4h, v1.4h\n"
- "smlal2 v13.4s, v28.8h, v1.8h\n"
- "smlal v20.4s, v23.4h, v1.4h\n"
- "smlal2 v19.4s, v23.8h, v1.8h\n"
- "smlal v8.4s, v25.4h, v1.4h\n"
- "smlal2 v7.4s, v25.8h, v1.8h\n"
+ "smlal2 v21.4s, v28.8h, v1.8h\n"
+ "smlal v14.4s, v23.4h, v1.4h\n"
+ "smlal2 v10.4s, v23.8h, v1.8h\n"
+ "smlal v9.4s, v25.4h, v1.4h\n"
+ "smlal2 v8.4s, v25.8h, v1.8h\n"
"tbz x4, #2, 29f\n"
- "ld1 { v24.s }[0], [x25], #0x4\n"
+ "ld1 { v24.s }[0], [x23], #0x4\n"
"tbz x4, #1, 28f\n"
- "ld1 { v24.h }[2], [x25], #0x2\n"
+ "ld1 { v24.h }[2], [x23], #0x2\n"
"tbz x4, #0, 31f\n"
- "ld1 { v24.b }[6], [x25]\n"
+ "ld1 { v24.b }[6], [x23]\n"
"b 31f\n"
"28:" // Oddments: Load (2, 2): Bit 2: Bit 1: Unset
"tbz x4, #0, 31f\n"
- "ld1 { v24.b }[4], [x25]\n"
+ "ld1 { v24.b }[4], [x23]\n"
"b 31f\n"
"29:" // Oddments: Load (2, 2): Bit 2: Unset
"tbz x4, #1, 30f\n"
- "ld1 { v24.h }[0], [x25], #0x2\n"
+ "ld1 { v24.h }[0], [x23], #0x2\n"
"tbz x4, #0, 31f\n"
- "ld1 { v24.b }[2], [x25]\n"
+ "ld1 { v24.b }[2], [x23]\n"
"b 31f\n"
"30:" // Oddments: Load (2, 2): Bit 2: Unset: Bit 1: Unset
"tbz x4, #0, 31f\n"
- "ld1 { v24.b }[0], [x25]\n"
+ "ld1 { v24.b }[0], [x23]\n"
"31:" // Oddments: Load (2, 2): Bit 2: End
- "ldr d2, [x0, #0x38]\n"
+ "ldr d2, [x8, #0x38]\n"
"ushll v24.8h, v24.8b, #0x0\n"
- "usubl v2.8h, v2.8b, v9.8b\n"
- "ldr x26, [x2, #0x78]\n"
- "smlal v6.4s, v24.4h, v1.4h\n"
- "smlal2 v5.4s, v24.8h, v1.8h\n"
- "add x26, x26, x3\n"
+ "usubl v2.8h, v2.8b, v15.8b\n"
+ "ldr x26, [x0, #0x78]\n"
+ "smlal v7.4s, v24.4h, v1.4h\n"
+ "smlal2 v6.4s, v24.8h, v1.8h\n"
+ "add x26, x26, x1\n"
"smlal v11.4s, v23.4h, v2.4h\n"
- "smlal2 v13.4s, v23.8h, v2.8h\n"
- "smlal v20.4s, v31.4h, v2.4h\n"
- "smlal2 v19.4s, v31.8h, v2.8h\n"
- "smlal v8.4s, v24.4h, v2.4h\n"
- "smlal2 v7.4s, v24.8h, v2.8h\n"
+ "smlal2 v21.4s, v23.8h, v2.8h\n"
+ "smlal v14.4s, v31.4h, v2.4h\n"
+ "smlal2 v10.4s, v31.8h, v2.8h\n"
+ "smlal v9.4s, v24.4h, v2.4h\n"
+ "smlal2 v8.4s, v24.8h, v2.8h\n"
"tbz x4, #2, 33f\n"
"ld1 { v27.s }[0], [x26], #0x4\n"
"tbz x4, #1, 32f\n"
@@ -1381,179 +1381,179 @@ void a64_u8qa_nhwc_5x5_s1_output2x2_mla_depthfirst_impl(
"tbz x4, #0, 35f\n"
"ld1 { v27.b }[0], [x26]\n"
"35:" // Oddments: Load (2, 3): Bit 2: End
- "ldr d3, [x0, #0x40]\n"
+ "ldr d3, [x8, #0x40]\n"
"ushll v27.8h, v27.8b, #0x0\n"
- "usubl v3.8h, v3.8b, v9.8b\n"
- "ldr x23, [x2, #0x80]\n"
- "smlal v6.4s, v27.4h, v2.4h\n"
- "smlal2 v5.4s, v27.8h, v2.8h\n"
- "add x23, x23, x3\n"
+ "usubl v3.8h, v3.8b, v15.8b\n"
+ "ldr x7, [x0, #0x80]\n"
+ "smlal v7.4s, v27.4h, v2.4h\n"
+ "smlal2 v6.4s, v27.8h, v2.8h\n"
+ "add x7, x7, x1\n"
"smlal v11.4s, v31.4h, v3.4h\n"
- "smlal2 v13.4s, v31.8h, v3.8h\n"
- "smlal v20.4s, v30.4h, v3.4h\n"
- "smlal2 v19.4s, v30.8h, v3.8h\n"
- "smlal v8.4s, v27.4h, v3.4h\n"
- "smlal2 v7.4s, v27.8h, v3.8h\n"
+ "smlal2 v21.4s, v31.8h, v3.8h\n"
+ "smlal v14.4s, v30.4h, v3.4h\n"
+ "smlal2 v10.4s, v30.8h, v3.8h\n"
+ "smlal v9.4s, v27.4h, v3.4h\n"
+ "smlal2 v8.4s, v27.8h, v3.8h\n"
"tbz x4, #2, 37f\n"
- "ld1 { v23.s }[0], [x23], #0x4\n"
+ "ld1 { v23.s }[0], [x7], #0x4\n"
"tbz x4, #1, 36f\n"
- "ld1 { v23.h }[2], [x23], #0x2\n"
+ "ld1 { v23.h }[2], [x7], #0x2\n"
"tbz x4, #0, 39f\n"
- "ld1 { v23.b }[6], [x23]\n"
+ "ld1 { v23.b }[6], [x7]\n"
"b 39f\n"
"36:" // Oddments: Load (2, 4): Bit 2: Bit 1: Unset
"tbz x4, #0, 39f\n"
- "ld1 { v23.b }[4], [x23]\n"
+ "ld1 { v23.b }[4], [x7]\n"
"b 39f\n"
"37:" // Oddments: Load (2, 4): Bit 2: Unset
"tbz x4, #1, 38f\n"
- "ld1 { v23.h }[0], [x23], #0x2\n"
+ "ld1 { v23.h }[0], [x7], #0x2\n"
"tbz x4, #0, 39f\n"
- "ld1 { v23.b }[2], [x23]\n"
+ "ld1 { v23.b }[2], [x7]\n"
"b 39f\n"
"38:" // Oddments: Load (2, 4): Bit 2: Unset: Bit 1: Unset
"tbz x4, #0, 39f\n"
- "ld1 { v23.b }[0], [x23]\n"
+ "ld1 { v23.b }[0], [x7]\n"
"39:" // Oddments: Load (2, 4): Bit 2: End
- "ldr d4, [x0, #0x48]\n"
+ "ldr d4, [x8, #0x48]\n"
"ushll v23.8h, v23.8b, #0x0\n"
- "usubl v4.8h, v4.8b, v9.8b\n"
- "ldr x24, [x2, #0x88]\n"
- "smlal v6.4s, v23.4h, v3.4h\n"
- "smlal2 v5.4s, v23.8h, v3.8h\n"
- "add x24, x24, x3\n"
+ "usubl v4.8h, v4.8b, v15.8b\n"
+ "ldr x22, [x0, #0x88]\n"
+ "smlal v7.4s, v23.4h, v3.4h\n"
+ "smlal2 v6.4s, v23.8h, v3.8h\n"
+ "add x22, x22, x1\n"
"smlal v11.4s, v30.4h, v4.4h\n"
- "smlal2 v13.4s, v30.8h, v4.8h\n"
- "smlal v20.4s, v26.4h, v4.4h\n"
- "smlal2 v19.4s, v26.8h, v4.8h\n"
- "smlal v8.4s, v23.4h, v4.4h\n"
- "smlal2 v7.4s, v23.8h, v4.8h\n"
+ "smlal2 v21.4s, v30.8h, v4.8h\n"
+ "smlal v14.4s, v26.4h, v4.4h\n"
+ "smlal2 v10.4s, v26.8h, v4.8h\n"
+ "smlal v9.4s, v23.4h, v4.4h\n"
+ "smlal2 v8.4s, v23.8h, v4.8h\n"
"tbz x4, #2, 41f\n"
- "ld1 { v28.s }[0], [x24], #0x4\n"
+ "ld1 { v28.s }[0], [x22], #0x4\n"
"tbz x4, #1, 40f\n"
- "ld1 { v28.h }[2], [x24], #0x2\n"
+ "ld1 { v28.h }[2], [x22], #0x2\n"
"tbz x4, #0, 43f\n"
- "ld1 { v28.b }[6], [x24]\n"
+ "ld1 { v28.b }[6], [x22]\n"
"b 43f\n"
"40:" // Oddments: Load (2, 5): Bit 2: Bit 1: Unset
"tbz x4, #0, 43f\n"
- "ld1 { v28.b }[4], [x24]\n"
+ "ld1 { v28.b }[4], [x22]\n"
"b 43f\n"
"41:" // Oddments: Load (2, 5): Bit 2: Unset
"tbz x4, #1, 42f\n"
- "ld1 { v28.h }[0], [x24], #0x2\n"
+ "ld1 { v28.h }[0], [x22], #0x2\n"
"tbz x4, #0, 43f\n"
- "ld1 { v28.b }[2], [x24]\n"
+ "ld1 { v28.b }[2], [x22]\n"
"b 43f\n"
"42:" // Oddments: Load (2, 5): Bit 2: Unset: Bit 1: Unset
"tbz x4, #0, 43f\n"
- "ld1 { v28.b }[0], [x24]\n"
+ "ld1 { v28.b }[0], [x22]\n"
"43:" // Oddments: Load (2, 5): Bit 2: End
- "ldr d0, [x0, #0x50]\n"
+ "ldr d0, [x8, #0x50]\n"
"ushll v28.8h, v28.8b, #0x0\n"
- "usubl v0.8h, v0.8b, v9.8b\n"
- "ldr x15, [x2, #0x90]\n"
- "smlal v6.4s, v28.4h, v4.4h\n"
- "smlal2 v5.4s, v28.8h, v4.8h\n"
- "add x15, x15, x3\n"
+ "usubl v0.8h, v0.8b, v15.8b\n"
+ "ldr x20, [x0, #0x90]\n"
+ "smlal v7.4s, v28.4h, v4.4h\n"
+ "smlal2 v6.4s, v28.8h, v4.8h\n"
+ "add x20, x20, x1\n"
"smlal v11.4s, v22.4h, v0.4h\n"
- "smlal2 v13.4s, v22.8h, v0.8h\n"
- "smlal v20.4s, v25.4h, v0.4h\n"
- "smlal2 v19.4s, v25.8h, v0.8h\n"
+ "smlal2 v21.4s, v22.8h, v0.8h\n"
+ "smlal v14.4s, v25.4h, v0.4h\n"
+ "smlal2 v10.4s, v25.8h, v0.8h\n"
"tbz x4, #2, 45f\n"
- "ld1 { v31.s }[0], [x15], #0x4\n"
+ "ld1 { v31.s }[0], [x20], #0x4\n"
"tbz x4, #1, 44f\n"
- "ld1 { v31.h }[2], [x15], #0x2\n"
+ "ld1 { v31.h }[2], [x20], #0x2\n"
"tbz x4, #0, 47f\n"
- "ld1 { v31.b }[6], [x15]\n"
+ "ld1 { v31.b }[6], [x20]\n"
"b 47f\n"
"44:" // Oddments: Load (3, 0): Bit 2: Bit 1: Unset
"tbz x4, #0, 47f\n"
- "ld1 { v31.b }[4], [x15]\n"
+ "ld1 { v31.b }[4], [x20]\n"
"b 47f\n"
"45:" // Oddments: Load (3, 0): Bit 2: Unset
"tbz x4, #1, 46f\n"
- "ld1 { v31.h }[0], [x15], #0x2\n"
+ "ld1 { v31.h }[0], [x20], #0x2\n"
"tbz x4, #0, 47f\n"
- "ld1 { v31.b }[2], [x15]\n"
+ "ld1 { v31.b }[2], [x20]\n"
"b 47f\n"
"46:" // Oddments: Load (3, 0): Bit 2: Unset: Bit 1: Unset
"tbz x4, #0, 47f\n"
- "ld1 { v31.b }[0], [x15]\n"
+ "ld1 { v31.b }[0], [x20]\n"
"47:" // Oddments: Load (3, 0): Bit 2: End
"ushll v31.8h, v31.8b, #0x0\n"
- "ldr x21, [x2, #0x98]\n"
- "smlal v8.4s, v31.4h, v0.4h\n"
- "smlal2 v7.4s, v31.8h, v0.8h\n"
- "add x21, x21, x3\n"
+ "ldr x14, [x0, #0x98]\n"
+ "smlal v9.4s, v31.4h, v0.4h\n"
+ "smlal2 v8.4s, v31.8h, v0.8h\n"
+ "add x14, x14, x1\n"
"tbz x4, #2, 49f\n"
- "ld1 { v30.s }[0], [x21], #0x4\n"
+ "ld1 { v30.s }[0], [x14], #0x4\n"
"tbz x4, #1, 48f\n"
- "ld1 { v30.h }[2], [x21], #0x2\n"
+ "ld1 { v30.h }[2], [x14], #0x2\n"
"tbz x4, #0, 51f\n"
- "ld1 { v30.b }[6], [x21]\n"
+ "ld1 { v30.b }[6], [x14]\n"
"b 51f\n"
"48:" // Oddments: Load (3, 1): Bit 2: Bit 1: Unset
"tbz x4, #0, 51f\n"
- "ld1 { v30.b }[4], [x21]\n"
+ "ld1 { v30.b }[4], [x14]\n"
"b 51f\n"
"49:" // Oddments: Load (3, 1): Bit 2: Unset
"tbz x4, #1, 50f\n"
- "ld1 { v30.h }[0], [x21], #0x2\n"
+ "ld1 { v30.h }[0], [x14], #0x2\n"
"tbz x4, #0, 51f\n"
- "ld1 { v30.b }[2], [x21]\n"
+ "ld1 { v30.b }[2], [x14]\n"
"b 51f\n"
"50:" // Oddments: Load (3, 1): Bit 2: Unset: Bit 1: Unset
"tbz x4, #0, 51f\n"
- "ld1 { v30.b }[0], [x21]\n"
+ "ld1 { v30.b }[0], [x14]\n"
"51:" // Oddments: Load (3, 1): Bit 2: End
- "ldr d1, [x0, #0x58]\n"
+ "ldr d1, [x8, #0x58]\n"
"ushll v30.8h, v30.8b, #0x0\n"
- "usubl v1.8h, v1.8b, v9.8b\n"
- "ldr x14, [x2, #0xa0]\n"
- "smlal v6.4s, v30.4h, v0.4h\n"
- "smlal2 v5.4s, v30.8h, v0.8h\n"
- "add x14, x14, x3\n"
+ "usubl v1.8h, v1.8b, v15.8b\n"
+ "ldr x19, [x0, #0xa0]\n"
+ "smlal v7.4s, v30.4h, v0.4h\n"
+ "smlal2 v6.4s, v30.8h, v0.8h\n"
+ "add x19, x19, x1\n"
"smlal v11.4s, v25.4h, v1.4h\n"
- "smlal2 v13.4s, v25.8h, v1.8h\n"
- "smlal v20.4s, v24.4h, v1.4h\n"
- "smlal2 v19.4s, v24.8h, v1.8h\n"
- "smlal v8.4s, v30.4h, v1.4h\n"
- "smlal2 v7.4s, v30.8h, v1.8h\n"
+ "smlal2 v21.4s, v25.8h, v1.8h\n"
+ "smlal v14.4s, v24.4h, v1.4h\n"
+ "smlal2 v10.4s, v24.8h, v1.8h\n"
+ "smlal v9.4s, v30.4h, v1.4h\n"
+ "smlal2 v8.4s, v30.8h, v1.8h\n"
"tbz x4, #2, 53f\n"
- "ld1 { v26.s }[0], [x14], #0x4\n"
+ "ld1 { v26.s }[0], [x19], #0x4\n"
"tbz x4, #1, 52f\n"
- "ld1 { v26.h }[2], [x14], #0x2\n"
+ "ld1 { v26.h }[2], [x19], #0x2\n"
"tbz x4, #0, 55f\n"
- "ld1 { v26.b }[6], [x14]\n"
+ "ld1 { v26.b }[6], [x19]\n"
"b 55f\n"
"52:" // Oddments: Load (3, 2): Bit 2: Bit 1: Unset
"tbz x4, #0, 55f\n"
- "ld1 { v26.b }[4], [x14]\n"
+ "ld1 { v26.b }[4], [x19]\n"
"b 55f\n"
"53:" // Oddments: Load (3, 2): Bit 2: Unset
"tbz x4, #1, 54f\n"
- "ld1 { v26.h }[0], [x14], #0x2\n"
+ "ld1 { v26.h }[0], [x19], #0x2\n"
"tbz x4, #0, 55f\n"
- "ld1 { v26.b }[2], [x14]\n"
+ "ld1 { v26.b }[2], [x19]\n"
"b 55f\n"
"54:" // Oddments: Load (3, 2): Bit 2: Unset: Bit 1: Unset
"tbz x4, #0, 55f\n"
- "ld1 { v26.b }[0], [x14]\n"
+ "ld1 { v26.b }[0], [x19]\n"
"55:" // Oddments: Load (3, 2): Bit 2: End
- "ldr d2, [x0, #0x60]\n"
+ "ldr d2, [x8, #0x60]\n"
"ushll v26.8h, v26.8b, #0x0\n"
- "usubl v2.8h, v2.8b, v9.8b\n"
- "ldr x13, [x2, #0xa8]\n"
- "smlal v6.4s, v26.4h, v1.4h\n"
- "smlal2 v5.4s, v26.8h, v1.8h\n"
- "add x13, x13, x3\n"
+ "usubl v2.8h, v2.8b, v15.8b\n"
+ "ldr x13, [x0, #0xa8]\n"
+ "smlal v7.4s, v26.4h, v1.4h\n"
+ "smlal2 v6.4s, v26.8h, v1.8h\n"
+ "add x13, x13, x1\n"
"smlal v11.4s, v24.4h, v2.4h\n"
- "smlal2 v13.4s, v24.8h, v2.8h\n"
- "smlal v20.4s, v27.4h, v2.4h\n"
- "smlal2 v19.4s, v27.8h, v2.8h\n"
- "smlal v8.4s, v26.4h, v2.4h\n"
- "smlal2 v7.4s, v26.8h, v2.8h\n"
+ "smlal2 v21.4s, v24.8h, v2.8h\n"
+ "smlal v14.4s, v27.4h, v2.4h\n"
+ "smlal2 v10.4s, v27.8h, v2.8h\n"
+ "smlal v9.4s, v26.4h, v2.4h\n"
+ "smlal2 v8.4s, v26.8h, v2.8h\n"
"tbz x4, #2, 57f\n"
"ld1 { v25.s }[0], [x13], #0x4\n"
"tbz x4, #1, 56f\n"
@@ -1575,19 +1575,19 @@ void a64_u8qa_nhwc_5x5_s1_output2x2_mla_depthfirst_impl(
"tbz x4, #0, 59f\n"
"ld1 { v25.b }[0], [x13]\n"
"59:" // Oddments: Load (3, 3): Bit 2: End
- "ldr d3, [x0, #0x68]\n"
+ "ldr d3, [x8, #0x68]\n"
"ushll v25.8h, v25.8b, #0x0\n"
- "usubl v3.8h, v3.8b, v9.8b\n"
- "ldr x12, [x2, #0xb0]\n"
- "smlal v6.4s, v25.4h, v2.4h\n"
- "smlal2 v5.4s, v25.8h, v2.8h\n"
- "add x12, x12, x3\n"
+ "usubl v3.8h, v3.8b, v15.8b\n"
+ "ldr x12, [x0, #0xb0]\n"
+ "smlal v7.4s, v25.4h, v2.4h\n"
+ "smlal2 v6.4s, v25.8h, v2.8h\n"
+ "add x12, x12, x1\n"
"smlal v11.4s, v27.4h, v3.4h\n"
- "smlal2 v13.4s, v27.8h, v3.8h\n"
- "smlal v20.4s, v23.4h, v3.4h\n"
- "smlal2 v19.4s, v23.8h, v3.8h\n"
- "smlal v8.4s, v25.4h, v3.4h\n"
- "smlal2 v7.4s, v25.8h, v3.8h\n"
+ "smlal2 v21.4s, v27.8h, v3.8h\n"
+ "smlal v14.4s, v23.4h, v3.4h\n"
+ "smlal2 v10.4s, v23.8h, v3.8h\n"
+ "smlal v9.4s, v25.4h, v3.4h\n"
+ "smlal2 v8.4s, v25.8h, v3.8h\n"
"tbz x4, #2, 61f\n"
"ld1 { v24.s }[0], [x12], #0x4\n"
"tbz x4, #1, 60f\n"
@@ -1609,573 +1609,573 @@ void a64_u8qa_nhwc_5x5_s1_output2x2_mla_depthfirst_impl(
"tbz x4, #0, 63f\n"
"ld1 { v24.b }[0], [x12]\n"
"63:" // Oddments: Load (3, 4): Bit 2: End
- "ldr d4, [x0, #0x70]\n"
+ "ldr d4, [x8, #0x70]\n"
"ushll v24.8h, v24.8b, #0x0\n"
- "usubl v4.8h, v4.8b, v9.8b\n"
- "ldr x20, [x2, #0xb8]\n"
- "smlal v6.4s, v24.4h, v3.4h\n"
- "smlal2 v5.4s, v24.8h, v3.8h\n"
- "add x20, x20, x3\n"
+ "usubl v4.8h, v4.8b, v15.8b\n"
+ "ldr x11, [x0, #0xb8]\n"
+ "smlal v7.4s, v24.4h, v3.4h\n"
+ "smlal2 v6.4s, v24.8h, v3.8h\n"
+ "add x11, x11, x1\n"
"smlal v11.4s, v23.4h, v4.4h\n"
- "smlal2 v13.4s, v23.8h, v4.8h\n"
- "smlal v20.4s, v28.4h, v4.4h\n"
- "smlal2 v19.4s, v28.8h, v4.8h\n"
- "smlal v8.4s, v24.4h, v4.4h\n"
- "smlal2 v7.4s, v24.8h, v4.8h\n"
+ "smlal2 v21.4s, v23.8h, v4.8h\n"
+ "smlal v14.4s, v28.4h, v4.4h\n"
+ "smlal2 v10.4s, v28.8h, v4.8h\n"
+ "smlal v9.4s, v24.4h, v4.4h\n"
+ "smlal2 v8.4s, v24.8h, v4.8h\n"
"tbz x4, #2, 65f\n"
- "ld1 { v22.s }[0], [x20], #0x4\n"
+ "ld1 { v22.s }[0], [x11], #0x4\n"
"tbz x4, #1, 64f\n"
- "ld1 { v22.h }[2], [x20], #0x2\n"
+ "ld1 { v22.h }[2], [x11], #0x2\n"
"tbz x4, #0, 67f\n"
- "ld1 { v22.b }[6], [x20]\n"
+ "ld1 { v22.b }[6], [x11]\n"
"b 67f\n"
"64:" // Oddments: Load (3, 5): Bit 2: Bit 1: Unset
"tbz x4, #0, 67f\n"
- "ld1 { v22.b }[4], [x20]\n"
+ "ld1 { v22.b }[4], [x11]\n"
"b 67f\n"
"65:" // Oddments: Load (3, 5): Bit 2: Unset
"tbz x4, #1, 66f\n"
- "ld1 { v22.h }[0], [x20], #0x2\n"
+ "ld1 { v22.h }[0], [x11], #0x2\n"
"tbz x4, #0, 67f\n"
- "ld1 { v22.b }[2], [x20]\n"
+ "ld1 { v22.b }[2], [x11]\n"
"b 67f\n"
"66:" // Oddments: Load (3, 5): Bit 2: Unset: Bit 1: Unset
"tbz x4, #0, 67f\n"
- "ld1 { v22.b }[0], [x20]\n"
+ "ld1 { v22.b }[0], [x11]\n"
"67:" // Oddments: Load (3, 5): Bit 2: End
- "ldr d0, [x0, #0x78]\n"
+ "ldr d0, [x8, #0x78]\n"
"ushll v22.8h, v22.8b, #0x0\n"
- "usubl v0.8h, v0.8b, v9.8b\n"
- "ldr x11, [x2, #0xc0]\n"
- "smlal v6.4s, v22.4h, v4.4h\n"
- "smlal2 v5.4s, v22.8h, v4.8h\n"
- "add x11, x11, x3\n"
+ "usubl v0.8h, v0.8b, v15.8b\n"
+ "ldr x10, [x0, #0xc0]\n"
+ "smlal v7.4s, v22.4h, v4.4h\n"
+ "smlal2 v6.4s, v22.8h, v4.8h\n"
+ "add x10, x10, x1\n"
"smlal v11.4s, v31.4h, v0.4h\n"
- "smlal2 v13.4s, v31.8h, v0.8h\n"
- "smlal v20.4s, v30.4h, v0.4h\n"
- "smlal2 v19.4s, v30.8h, v0.8h\n"
+ "smlal2 v21.4s, v31.8h, v0.8h\n"
+ "smlal v14.4s, v30.4h, v0.4h\n"
+ "smlal2 v10.4s, v30.8h, v0.8h\n"
"tbz x4, #2, 69f\n"
- "ld1 { v27.s }[0], [x11], #0x4\n"
+ "ld1 { v27.s }[0], [x10], #0x4\n"
"tbz x4, #1, 68f\n"
- "ld1 { v27.h }[2], [x11], #0x2\n"
+ "ld1 { v27.h }[2], [x10], #0x2\n"
"tbz x4, #0, 71f\n"
- "ld1 { v27.b }[6], [x11]\n"
+ "ld1 { v27.b }[6], [x10]\n"
"b 71f\n"
"68:" // Oddments: Load (4, 0): Bit 2: Bit 1: Unset
"tbz x4, #0, 71f\n"
- "ld1 { v27.b }[4], [x11]\n"
+ "ld1 { v27.b }[4], [x10]\n"
"b 71f\n"
"69:" // Oddments: Load (4, 0): Bit 2: Unset
"tbz x4, #1, 70f\n"
- "ld1 { v27.h }[0], [x11], #0x2\n"
+ "ld1 { v27.h }[0], [x10], #0x2\n"
"tbz x4, #0, 71f\n"
- "ld1 { v27.b }[2], [x11]\n"
+ "ld1 { v27.b }[2], [x10]\n"
"b 71f\n"
"70:" // Oddments: Load (4, 0): Bit 2: Unset: Bit 1: Unset
"tbz x4, #0, 71f\n"
- "ld1 { v27.b }[0], [x11]\n"
+ "ld1 { v27.b }[0], [x10]\n"
"71:" // Oddments: Load (4, 0): Bit 2: End
"ushll v27.8h, v27.8b, #0x0\n"
- "ldr x10, [x2, #0xc8]\n"
- "smlal v8.4s, v27.4h, v0.4h\n"
- "smlal2 v7.4s, v27.8h, v0.8h\n"
- "add x10, x10, x3\n"
+ "ldr x9, [x0, #0xc8]\n"
+ "smlal v9.4s, v27.4h, v0.4h\n"
+ "smlal2 v8.4s, v27.8h, v0.8h\n"
+ "add x9, x9, x1\n"
"tbz x4, #2, 73f\n"
- "ld1 { v23.s }[0], [x10], #0x4\n"
+ "ld1 { v23.s }[0], [x9], #0x4\n"
"tbz x4, #1, 72f\n"
- "ld1 { v23.h }[2], [x10], #0x2\n"
+ "ld1 { v23.h }[2], [x9], #0x2\n"
"tbz x4, #0, 75f\n"
- "ld1 { v23.b }[6], [x10]\n"
+ "ld1 { v23.b }[6], [x9]\n"
"b 75f\n"
"72:" // Oddments: Load (4, 1): Bit 2: Bit 1: Unset
"tbz x4, #0, 75f\n"
- "ld1 { v23.b }[4], [x10]\n"
+ "ld1 { v23.b }[4], [x9]\n"
"b 75f\n"
"73:" // Oddments: Load (4, 1): Bit 2: Unset
"tbz x4, #1, 74f\n"
- "ld1 { v23.h }[0], [x10], #0x2\n"
+ "ld1 { v23.h }[0], [x9], #0x2\n"
"tbz x4, #0, 75f\n"
- "ld1 { v23.b }[2], [x10]\n"
+ "ld1 { v23.b }[2], [x9]\n"
"b 75f\n"
"74:" // Oddments: Load (4, 1): Bit 2: Unset: Bit 1: Unset
"tbz x4, #0, 75f\n"
- "ld1 { v23.b }[0], [x10]\n"
+ "ld1 { v23.b }[0], [x9]\n"
"75:" // Oddments: Load (4, 1): Bit 2: End
- "ldr d1, [x0, #0x80]\n"
+ "ldr d1, [x8, #0x80]\n"
"ushll v23.8h, v23.8b, #0x0\n"
- "usubl v1.8h, v1.8b, v9.8b\n"
- "ldr x22, [x2, #0xd0]\n"
- "smlal v6.4s, v23.4h, v0.4h\n"
- "smlal2 v5.4s, v23.8h, v0.8h\n"
- "add x22, x22, x3\n"
+ "usubl v1.8h, v1.8b, v15.8b\n"
+ "ldr x28, [x0, #0xd0]\n"
+ "smlal v7.4s, v23.4h, v0.4h\n"
+ "smlal2 v6.4s, v23.8h, v0.8h\n"
+ "add x28, x28, x1\n"
"smlal v11.4s, v30.4h, v1.4h\n"
- "smlal2 v13.4s, v30.8h, v1.8h\n"
- "smlal v20.4s, v26.4h, v1.4h\n"
- "smlal2 v19.4s, v26.8h, v1.8h\n"
- "smlal v8.4s, v23.4h, v1.4h\n"
- "smlal2 v7.4s, v23.8h, v1.8h\n"
+ "smlal2 v21.4s, v30.8h, v1.8h\n"
+ "smlal v14.4s, v26.4h, v1.4h\n"
+ "smlal2 v10.4s, v26.8h, v1.8h\n"
+ "smlal v9.4s, v23.4h, v1.4h\n"
+ "smlal2 v8.4s, v23.8h, v1.8h\n"
"tbz x4, #2, 77f\n"
- "ld1 { v31.s }[0], [x22], #0x4\n"
+ "ld1 { v31.s }[0], [x28], #0x4\n"
"tbz x4, #1, 76f\n"
- "ld1 { v31.h }[2], [x22], #0x2\n"
+ "ld1 { v31.h }[2], [x28], #0x2\n"
"tbz x4, #0, 79f\n"
- "ld1 { v31.b }[6], [x22]\n"
+ "ld1 { v31.b }[6], [x28]\n"
"b 79f\n"
"76:" // Oddments: Load (4, 2): Bit 2: Bit 1: Unset
"tbz x4, #0, 79f\n"
- "ld1 { v31.b }[4], [x22]\n"
+ "ld1 { v31.b }[4], [x28]\n"
"b 79f\n"
"77:" // Oddments: Load (4, 2): Bit 2: Unset
"tbz x4, #1, 78f\n"
- "ld1 { v31.h }[0], [x22], #0x2\n"
+ "ld1 { v31.h }[0], [x28], #0x2\n"
"tbz x4, #0, 79f\n"
- "ld1 { v31.b }[2], [x22]\n"
+ "ld1 { v31.b }[2], [x28]\n"
"b 79f\n"
"78:" // Oddments: Load (4, 2): Bit 2: Unset: Bit 1: Unset
"tbz x4, #0, 79f\n"
- "ld1 { v31.b }[0], [x22]\n"
+ "ld1 { v31.b }[0], [x28]\n"
"79:" // Oddments: Load (4, 2): Bit 2: End
- "ldr d2, [x0, #0x88]\n"
+ "ldr d2, [x8, #0x88]\n"
"ushll v31.8h, v31.8b, #0x0\n"
- "usubl v2.8h, v2.8b, v9.8b\n"
- "ldr x28, [x2, #0xd8]\n"
- "smlal v6.4s, v31.4h, v1.4h\n"
- "smlal2 v5.4s, v31.8h, v1.8h\n"
- "add x28, x28, x3\n"
+ "usubl v2.8h, v2.8b, v15.8b\n"
+ "ldr x27, [x0, #0xd8]\n"
+ "smlal v7.4s, v31.4h, v1.4h\n"
+ "smlal2 v6.4s, v31.8h, v1.8h\n"
+ "add x27, x27, x1\n"
"smlal v11.4s, v26.4h, v2.4h\n"
- "smlal2 v13.4s, v26.8h, v2.8h\n"
- "smlal v20.4s, v25.4h, v2.4h\n"
- "smlal2 v19.4s, v25.8h, v2.8h\n"
- "smlal v8.4s, v31.4h, v2.4h\n"
- "smlal2 v7.4s, v31.8h, v2.8h\n"
+ "smlal2 v21.4s, v26.8h, v2.8h\n"
+ "smlal v14.4s, v25.4h, v2.4h\n"
+ "smlal2 v10.4s, v25.8h, v2.8h\n"
+ "smlal v9.4s, v31.4h, v2.4h\n"
+ "smlal2 v8.4s, v31.8h, v2.8h\n"
"tbz x4, #2, 81f\n"
- "ld1 { v30.s }[0], [x28], #0x4\n"
+ "ld1 { v30.s }[0], [x27], #0x4\n"
"tbz x4, #1, 80f\n"
- "ld1 { v30.h }[2], [x28], #0x2\n"
+ "ld1 { v30.h }[2], [x27], #0x2\n"
"tbz x4, #0, 83f\n"
- "ld1 { v30.b }[6], [x28]\n"
+ "ld1 { v30.b }[6], [x27]\n"
"b 83f\n"
"80:" // Oddments: Load (4, 3): Bit 2: Bit 1: Unset
"tbz x4, #0, 83f\n"
- "ld1 { v30.b }[4], [x28]\n"
+ "ld1 { v30.b }[4], [x27]\n"
"b 83f\n"
"81:" // Oddments: Load (4, 3): Bit 2: Unset
"tbz x4, #1, 82f\n"
- "ld1 { v30.h }[0], [x28], #0x2\n"
+ "ld1 { v30.h }[0], [x27], #0x2\n"
"tbz x4, #0, 83f\n"
- "ld1 { v30.b }[2], [x28]\n"
+ "ld1 { v30.b }[2], [x27]\n"
"b 83f\n"
"82:" // Oddments: Load (4, 3): Bit 2: Unset: Bit 1: Unset
"tbz x4, #0, 83f\n"
- "ld1 { v30.b }[0], [x28]\n"
+ "ld1 { v30.b }[0], [x27]\n"
"83:" // Oddments: Load (4, 3): Bit 2: End
- "ldr d3, [x0, #0x90]\n"
+ "ldr d3, [x8, #0x90]\n"
"ushll v30.8h, v30.8b, #0x0\n"
- "usubl v3.8h, v3.8b, v9.8b\n"
- "ldr x27, [x2, #0xe0]\n"
- "smlal v6.4s, v30.4h, v2.4h\n"
- "smlal2 v5.4s, v30.8h, v2.8h\n"
- "add x27, x27, x3\n"
+ "usubl v3.8h, v3.8b, v15.8b\n"
+ "ldr x26, [x0, #0xe0]\n"
+ "smlal v7.4s, v30.4h, v2.4h\n"
+ "smlal2 v6.4s, v30.8h, v2.8h\n"
+ "add x26, x26, x1\n"
"smlal v11.4s, v25.4h, v3.4h\n"
- "smlal2 v13.4s, v25.8h, v3.8h\n"
- "smlal v20.4s, v24.4h, v3.4h\n"
- "smlal2 v19.4s, v24.8h, v3.8h\n"
- "smlal v8.4s, v30.4h, v3.4h\n"
- "smlal2 v7.4s, v30.8h, v3.8h\n"
+ "smlal2 v21.4s, v25.8h, v3.8h\n"
+ "smlal v14.4s, v24.4h, v3.4h\n"
+ "smlal2 v10.4s, v24.8h, v3.8h\n"
+ "smlal v9.4s, v30.4h, v3.4h\n"
+ "smlal2 v8.4s, v30.8h, v3.8h\n"
"tbz x4, #2, 85f\n"
- "ld1 { v28.s }[0], [x27], #0x4\n"
+ "ld1 { v28.s }[0], [x26], #0x4\n"
"tbz x4, #1, 84f\n"
- "ld1 { v28.h }[2], [x27], #0x2\n"
+ "ld1 { v28.h }[2], [x26], #0x2\n"
"tbz x4, #0, 87f\n"
- "ld1 { v28.b }[6], [x27]\n"
+ "ld1 { v28.b }[6], [x26]\n"
"b 87f\n"
"84:" // Oddments: Load (4, 4): Bit 2: Bit 1: Unset
"tbz x4, #0, 87f\n"
- "ld1 { v28.b }[4], [x27]\n"
+ "ld1 { v28.b }[4], [x26]\n"
"b 87f\n"
"85:" // Oddments: Load (4, 4): Bit 2: Unset
"tbz x4, #1, 86f\n"
- "ld1 { v28.h }[0], [x27], #0x2\n"
+ "ld1 { v28.h }[0], [x26], #0x2\n"
"tbz x4, #0, 87f\n"
- "ld1 { v28.b }[2], [x27]\n"
+ "ld1 { v28.b }[2], [x26]\n"
"b 87f\n"
"86:" // Oddments: Load (4, 4): Bit 2: Unset: Bit 1: Unset
"tbz x4, #0, 87f\n"
- "ld1 { v28.b }[0], [x27]\n"
+ "ld1 { v28.b }[0], [x26]\n"
"87:" // Oddments: Load (4, 4): Bit 2: End
- "ldr d4, [x0, #0x98]\n"
+ "ldr d4, [x8, #0x98]\n"
"ushll v28.8h, v28.8b, #0x0\n"
- "usubl v4.8h, v4.8b, v9.8b\n"
- "ldr x26, [x2, #0xe8]\n"
- "smlal v6.4s, v28.4h, v3.4h\n"
- "smlal2 v5.4s, v28.8h, v3.8h\n"
- "add x26, x26, x3\n"
+ "usubl v4.8h, v4.8b, v15.8b\n"
+ "ldr x25, [x0, #0xe8]\n"
+ "smlal v7.4s, v28.4h, v3.4h\n"
+ "smlal2 v6.4s, v28.8h, v3.8h\n"
+ "add x25, x25, x1\n"
"smlal v11.4s, v24.4h, v4.4h\n"
- "smlal2 v13.4s, v24.8h, v4.8h\n"
- "smlal v20.4s, v22.4h, v4.4h\n"
- "smlal2 v19.4s, v22.8h, v4.8h\n"
- "smlal v8.4s, v28.4h, v4.4h\n"
- "smlal2 v7.4s, v28.8h, v4.8h\n"
+ "smlal2 v21.4s, v24.8h, v4.8h\n"
+ "smlal v14.4s, v22.4h, v4.4h\n"
+ "smlal2 v10.4s, v22.8h, v4.8h\n"
+ "smlal v9.4s, v28.4h, v4.4h\n"
+ "smlal2 v8.4s, v28.8h, v4.8h\n"
"tbz x4, #2, 89f\n"
- "ld1 { v26.s }[0], [x26], #0x4\n"
+ "ld1 { v26.s }[0], [x25], #0x4\n"
"tbz x4, #1, 88f\n"
- "ld1 { v26.h }[2], [x26], #0x2\n"
+ "ld1 { v26.h }[2], [x25], #0x2\n"
"tbz x4, #0, 91f\n"
- "ld1 { v26.b }[6], [x26]\n"
+ "ld1 { v26.b }[6], [x25]\n"
"b 91f\n"
"88:" // Oddments: Load (4, 5): Bit 2: Bit 1: Unset
"tbz x4, #0, 91f\n"
- "ld1 { v26.b }[4], [x26]\n"
+ "ld1 { v26.b }[4], [x25]\n"
"b 91f\n"
"89:" // Oddments: Load (4, 5): Bit 2: Unset
"tbz x4, #1, 90f\n"
- "ld1 { v26.h }[0], [x26], #0x2\n"
+ "ld1 { v26.h }[0], [x25], #0x2\n"
"tbz x4, #0, 91f\n"
- "ld1 { v26.b }[2], [x26]\n"
+ "ld1 { v26.b }[2], [x25]\n"
"b 91f\n"
"90:" // Oddments: Load (4, 5): Bit 2: Unset: Bit 1: Unset
"tbz x4, #0, 91f\n"
- "ld1 { v26.b }[0], [x26]\n"
+ "ld1 { v26.b }[0], [x25]\n"
"91:" // Oddments: Load (4, 5): Bit 2: End
- "ldr d0, [x0, #0xa0]\n"
+ "ldr d0, [x8, #0xa0]\n"
"ushll v26.8h, v26.8b, #0x0\n"
- "usubl v0.8h, v0.8b, v9.8b\n"
- "ldr x25, [x2, #0xf0]\n"
- "smlal v6.4s, v26.4h, v4.4h\n"
- "smlal2 v5.4s, v26.8h, v4.8h\n"
- "add x25, x25, x3\n"
+ "usubl v0.8h, v0.8b, v15.8b\n"
+ "ldr x24, [x0, #0xf0]\n"
+ "smlal v7.4s, v26.4h, v4.4h\n"
+ "smlal2 v6.4s, v26.8h, v4.8h\n"
+ "add x24, x24, x1\n"
"smlal v11.4s, v27.4h, v0.4h\n"
- "smlal2 v13.4s, v27.8h, v0.8h\n"
- "smlal v20.4s, v23.4h, v0.4h\n"
- "smlal2 v19.4s, v23.8h, v0.8h\n"
+ "smlal2 v21.4s, v27.8h, v0.8h\n"
+ "smlal v14.4s, v23.4h, v0.4h\n"
+ "smlal2 v10.4s, v23.8h, v0.8h\n"
"tbz x4, #2, 93f\n"
- "ld1 { v25.s }[0], [x25], #0x4\n"
+ "ld1 { v25.s }[0], [x24], #0x4\n"
"tbz x4, #1, 92f\n"
- "ld1 { v25.h }[2], [x25], #0x2\n"
+ "ld1 { v25.h }[2], [x24], #0x2\n"
"tbz x4, #0, 95f\n"
- "ld1 { v25.b }[6], [x25]\n"
+ "ld1 { v25.b }[6], [x24]\n"
"b 95f\n"
"92:" // Oddments: Load (5, 0): Bit 2: Bit 1: Unset
"tbz x4, #0, 95f\n"
- "ld1 { v25.b }[4], [x25]\n"
+ "ld1 { v25.b }[4], [x24]\n"
"b 95f\n"
"93:" // Oddments: Load (5, 0): Bit 2: Unset
"tbz x4, #1, 94f\n"
- "ld1 { v25.h }[0], [x25], #0x2\n"
+ "ld1 { v25.h }[0], [x24], #0x2\n"
"tbz x4, #0, 95f\n"
- "ld1 { v25.b }[2], [x25]\n"
+ "ld1 { v25.b }[2], [x24]\n"
"b 95f\n"
"94:" // Oddments: Load (5, 0): Bit 2: Unset: Bit 1: Unset
"tbz x4, #0, 95f\n"
- "ld1 { v25.b }[0], [x25]\n"
+ "ld1 { v25.b }[0], [x24]\n"
"95:" // Oddments: Load (5, 0): Bit 2: End
"ushll v25.8h, v25.8b, #0x0\n"
- "ldr x24, [x2, #0xf8]\n"
- "smlal v8.4s, v25.4h, v0.4h\n"
- "smlal2 v7.4s, v25.8h, v0.8h\n"
- "add x24, x24, x3\n"
+ "ldr x23, [x0, #0xf8]\n"
+ "smlal v9.4s, v25.4h, v0.4h\n"
+ "smlal2 v8.4s, v25.8h, v0.8h\n"
+ "add x23, x23, x1\n"
"tbz x4, #2, 97f\n"
- "ld1 { v24.s }[0], [x24], #0x4\n"
+ "ld1 { v24.s }[0], [x23], #0x4\n"
"tbz x4, #1, 96f\n"
- "ld1 { v24.h }[2], [x24], #0x2\n"
+ "ld1 { v24.h }[2], [x23], #0x2\n"
"tbz x4, #0, 99f\n"
- "ld1 { v24.b }[6], [x24]\n"
+ "ld1 { v24.b }[6], [x23]\n"
"b 99f\n"
"96:" // Oddments: Load (5, 1): Bit 2: Bit 1: Unset
"tbz x4, #0, 99f\n"
- "ld1 { v24.b }[4], [x24]\n"
+ "ld1 { v24.b }[4], [x23]\n"
"b 99f\n"
"97:" // Oddments: Load (5, 1): Bit 2: Unset
"tbz x4, #1, 98f\n"
- "ld1 { v24.h }[0], [x24], #0x2\n"
+ "ld1 { v24.h }[0], [x23], #0x2\n"
"tbz x4, #0, 99f\n"
- "ld1 { v24.b }[2], [x24]\n"
+ "ld1 { v24.b }[2], [x23]\n"
"b 99f\n"
"98:" // Oddments: Load (5, 1): Bit 2: Unset: Bit 1: Unset
"tbz x4, #0, 99f\n"
- "ld1 { v24.b }[0], [x24]\n"
+ "ld1 { v24.b }[0], [x23]\n"
"99:" // Oddments: Load (5, 1): Bit 2: End
- "ldr d1, [x0, #0xa8]\n"
+ "ldr d1, [x8, #0xa8]\n"
"ushll v24.8h, v24.8b, #0x0\n"
- "usubl v1.8h, v1.8b, v9.8b\n"
- "ldr x23, [x2, #0x100]\n"
- "smlal v6.4s, v24.4h, v0.4h\n"
- "smlal2 v5.4s, v24.8h, v0.8h\n"
- "add x23, x23, x3\n"
+ "usubl v1.8h, v1.8b, v15.8b\n"
+ "ldr x22, [x0, #0x100]\n"
+ "smlal v7.4s, v24.4h, v0.4h\n"
+ "smlal2 v6.4s, v24.8h, v0.8h\n"
+ "add x22, x22, x1\n"
"smlal v11.4s, v23.4h, v1.4h\n"
- "smlal2 v13.4s, v23.8h, v1.8h\n"
- "smlal v20.4s, v31.4h, v1.4h\n"
- "smlal2 v19.4s, v31.8h, v1.8h\n"
- "smlal v8.4s, v24.4h, v1.4h\n"
- "smlal2 v7.4s, v24.8h, v1.8h\n"
+ "smlal2 v21.4s, v23.8h, v1.8h\n"
+ "smlal v14.4s, v31.4h, v1.4h\n"
+ "smlal2 v10.4s, v31.8h, v1.8h\n"
+ "smlal v9.4s, v24.4h, v1.4h\n"
+ "smlal2 v8.4s, v24.8h, v1.8h\n"
"tbz x4, #2, 101f\n"
- "ld1 { v27.s }[0], [x23], #0x4\n"
+ "ld1 { v27.s }[0], [x22], #0x4\n"
"tbz x4, #1, 100f\n"
- "ld1 { v27.h }[2], [x23], #0x2\n"
+ "ld1 { v27.h }[2], [x22], #0x2\n"
"tbz x4, #0, 103f\n"
- "ld1 { v27.b }[6], [x23]\n"
+ "ld1 { v27.b }[6], [x22]\n"
"b 103f\n"
"100:" // Oddments: Load (5, 2): Bit 2: Bit 1: Unset
"tbz x4, #0, 103f\n"
- "ld1 { v27.b }[4], [x23]\n"
+ "ld1 { v27.b }[4], [x22]\n"
"b 103f\n"
"101:" // Oddments: Load (5, 2): Bit 2: Unset
"tbz x4, #1, 102f\n"
- "ld1 { v27.h }[0], [x23], #0x2\n"
+ "ld1 { v27.h }[0], [x22], #0x2\n"
"tbz x4, #0, 103f\n"
- "ld1 { v27.b }[2], [x23]\n"
+ "ld1 { v27.b }[2], [x22]\n"
"b 103f\n"
"102:" // Oddments: Load (5, 2): Bit 2: Unset: Bit 1: Unset
"tbz x4, #0, 103f\n"
- "ld1 { v27.b }[0], [x23]\n"
+ "ld1 { v27.b }[0], [x22]\n"
"103:" // Oddments: Load (5, 2): Bit 2: End
- "ldr d2, [x0, #0xb0]\n"
+ "ldr d2, [x8, #0xb0]\n"
"ushll v27.8h, v27.8b, #0x0\n"
- "usubl v2.8h, v2.8b, v9.8b\n"
- "ldr x15, [x2, #0x108]\n"
- "smlal v6.4s, v27.4h, v1.4h\n"
- "smlal2 v5.4s, v27.8h, v1.8h\n"
- "add x15, x15, x3\n"
+ "usubl v2.8h, v2.8b, v15.8b\n"
+ "ldr x7, [x0, #0x108]\n"
+ "smlal v7.4s, v27.4h, v1.4h\n"
+ "smlal2 v6.4s, v27.8h, v1.8h\n"
+ "add x7, x7, x1\n"
"smlal v11.4s, v31.4h, v2.4h\n"
- "smlal2 v13.4s, v31.8h, v2.8h\n"
- "smlal v20.4s, v30.4h, v2.4h\n"
- "smlal2 v19.4s, v30.8h, v2.8h\n"
- "smlal v8.4s, v27.4h, v2.4h\n"
- "smlal2 v7.4s, v27.8h, v2.8h\n"
+ "smlal2 v21.4s, v31.8h, v2.8h\n"
+ "smlal v14.4s, v30.4h, v2.4h\n"
+ "smlal2 v10.4s, v30.8h, v2.8h\n"
+ "smlal v9.4s, v27.4h, v2.4h\n"
+ "smlal2 v8.4s, v27.8h, v2.8h\n"
"tbz x4, #2, 105f\n"
- "ld1 { v25.s }[0], [x15], #0x4\n"
+ "ld1 { v25.s }[0], [x7], #0x4\n"
"tbz x4, #1, 104f\n"
- "ld1 { v25.h }[2], [x15], #0x2\n"
+ "ld1 { v25.h }[2], [x7], #0x2\n"
"tbz x4, #0, 107f\n"
- "ld1 { v25.b }[6], [x15]\n"
+ "ld1 { v25.b }[6], [x7]\n"
"b 107f\n"
"104:" // Oddments: Load (5, 3): Bit 2: Bit 1: Unset
"tbz x4, #0, 107f\n"
- "ld1 { v25.b }[4], [x15]\n"
+ "ld1 { v25.b }[4], [x7]\n"
"b 107f\n"
"105:" // Oddments: Load (5, 3): Bit 2: Unset
"tbz x4, #1, 106f\n"
- "ld1 { v25.h }[0], [x15], #0x2\n"
+ "ld1 { v25.h }[0], [x7], #0x2\n"
"tbz x4, #0, 107f\n"
- "ld1 { v25.b }[2], [x15]\n"
+ "ld1 { v25.b }[2], [x7]\n"
"b 107f\n"
"106:" // Oddments: Load (5, 3): Bit 2: Unset: Bit 1: Unset
"tbz x4, #0, 107f\n"
- "ld1 { v25.b }[0], [x15]\n"
+ "ld1 { v25.b }[0], [x7]\n"
"107:" // Oddments: Load (5, 3): Bit 2: End
- "ldr d3, [x0, #0xb8]\n"
+ "ldr d3, [x8, #0xb8]\n"
"ushll v25.8h, v25.8b, #0x0\n"
- "usubl v3.8h, v3.8b, v9.8b\n"
- "ldr x21, [x2, #0x110]\n"
- "smlal v6.4s, v25.4h, v2.4h\n"
- "smlal2 v5.4s, v25.8h, v2.8h\n"
- "add x21, x21, x3\n"
+ "usubl v3.8h, v3.8b, v15.8b\n"
+ "ldr x20, [x0, #0x110]\n"
+ "smlal v7.4s, v25.4h, v2.4h\n"
+ "smlal2 v6.4s, v25.8h, v2.8h\n"
+ "add x20, x20, x1\n"
"smlal v11.4s, v30.4h, v3.4h\n"
- "smlal2 v13.4s, v30.8h, v3.8h\n"
- "smlal v20.4s, v28.4h, v3.4h\n"
- "smlal2 v19.4s, v28.8h, v3.8h\n"
- "smlal v8.4s, v25.4h, v3.4h\n"
- "smlal2 v7.4s, v25.8h, v3.8h\n"
+ "smlal2 v21.4s, v30.8h, v3.8h\n"
+ "smlal v14.4s, v28.4h, v3.4h\n"
+ "smlal2 v10.4s, v28.8h, v3.8h\n"
+ "smlal v9.4s, v25.4h, v3.4h\n"
+ "smlal2 v8.4s, v25.8h, v3.8h\n"
"tbz x4, #2, 109f\n"
- "ld1 { v24.s }[0], [x21], #0x4\n"
+ "ld1 { v24.s }[0], [x20], #0x4\n"
"tbz x4, #1, 108f\n"
- "ld1 { v24.h }[2], [x21], #0x2\n"
+ "ld1 { v24.h }[2], [x20], #0x2\n"
"tbz x4, #0, 111f\n"
- "ld1 { v24.b }[6], [x21]\n"
+ "ld1 { v24.b }[6], [x20]\n"
"b 111f\n"
"108:" // Oddments: Load (5, 4): Bit 2: Bit 1: Unset
"tbz x4, #0, 111f\n"
- "ld1 { v24.b }[4], [x21]\n"
+ "ld1 { v24.b }[4], [x20]\n"
"b 111f\n"
"109:" // Oddments: Load (5, 4): Bit 2: Unset
"tbz x4, #1, 110f\n"
- "ld1 { v24.h }[0], [x21], #0x2\n"
+ "ld1 { v24.h }[0], [x20], #0x2\n"
"tbz x4, #0, 111f\n"
- "ld1 { v24.b }[2], [x21]\n"
+ "ld1 { v24.b }[2], [x20]\n"
"b 111f\n"
"110:" // Oddments: Load (5, 4): Bit 2: Unset: Bit 1: Unset
"tbz x4, #0, 111f\n"
- "ld1 { v24.b }[0], [x21]\n"
+ "ld1 { v24.b }[0], [x20]\n"
"111:" // Oddments: Load (5, 4): Bit 2: End
- "ldr d4, [x0, #0xc0]\n"
+ "ldr d4, [x8, #0xc0]\n"
"ushll v24.8h, v24.8b, #0x0\n"
- "usubl v4.8h, v4.8b, v9.8b\n"
- "ldr x20, [x2, #0x118]\n"
- "smlal v6.4s, v24.4h, v3.4h\n"
- "smlal2 v5.4s, v24.8h, v3.8h\n"
- "add x20, x20, x3\n"
+ "usubl v4.8h, v4.8b, v15.8b\n"
+ "ldr x19, [x0, #0x118]\n"
+ "smlal v7.4s, v24.4h, v3.4h\n"
+ "smlal2 v6.4s, v24.8h, v3.8h\n"
+ "add x19, x19, x1\n"
"smlal v11.4s, v28.4h, v4.4h\n"
- "smlal2 v13.4s, v28.8h, v4.8h\n"
- "smlal v20.4s, v26.4h, v4.4h\n"
- "smlal2 v19.4s, v26.8h, v4.8h\n"
- "smlal v8.4s, v24.4h, v4.4h\n"
- "smlal2 v7.4s, v24.8h, v4.8h\n"
+ "smlal2 v21.4s, v28.8h, v4.8h\n"
+ "smlal v14.4s, v26.4h, v4.4h\n"
+ "smlal2 v10.4s, v26.8h, v4.8h\n"
+ "smlal v9.4s, v24.4h, v4.4h\n"
+ "smlal2 v8.4s, v24.8h, v4.8h\n"
"tbz x4, #2, 113f\n"
- "ld1 { v27.s }[0], [x20], #0x4\n"
+ "ld1 { v27.s }[0], [x19], #0x4\n"
"tbz x4, #1, 112f\n"
- "ld1 { v27.h }[2], [x20], #0x2\n"
+ "ld1 { v27.h }[2], [x19], #0x2\n"
"tbz x4, #0, 115f\n"
- "ld1 { v27.b }[6], [x20]\n"
+ "ld1 { v27.b }[6], [x19]\n"
"b 115f\n"
"112:" // Oddments: Load (5, 5): Bit 2: Bit 1: Unset
"tbz x4, #0, 115f\n"
- "ld1 { v27.b }[4], [x20]\n"
+ "ld1 { v27.b }[4], [x19]\n"
"b 115f\n"
"113:" // Oddments: Load (5, 5): Bit 2: Unset
"tbz x4, #1, 114f\n"
- "ld1 { v27.h }[0], [x20], #0x2\n"
+ "ld1 { v27.h }[0], [x19], #0x2\n"
"tbz x4, #0, 115f\n"
- "ld1 { v27.b }[2], [x20]\n"
+ "ld1 { v27.b }[2], [x19]\n"
"b 115f\n"
"114:" // Oddments: Load (5, 5): Bit 2: Unset: Bit 1: Unset
"tbz x4, #0, 115f\n"
- "ld1 { v27.b }[0], [x20]\n"
+ "ld1 { v27.b }[0], [x19]\n"
"115:" // Oddments: Load (5, 5): Bit 2: End
"ushll v27.8h, v27.8b, #0x0\n"
- "smlal v6.4s, v27.4h, v4.4h\n"
- "smlal2 v5.4s, v27.8h, v4.8h\n"
+ "smlal v7.4s, v27.4h, v4.4h\n"
+ "smlal2 v6.4s, v27.8h, v4.8h\n"
"tbz x4, #2, 117f\n"
- "ld1 { v18.4s }, [x6], #0x10\n"
- "ld1 { v21.4s }, [x5], #0x10\n"
+ "ld1 { v17.4s }, [x5], #0x10\n"
+ "ld1 { v5.4s }, [x6], #0x10\n"
"tbz x4, #1, 116f\n"
- "ld1 { v16.d }[0], [x6], #0x8\n"
- "ld1 { v10.d }[0], [x5], #0x8\n"
+ "ld1 { v18.d }[0], [x5], #0x8\n"
+ "ld1 { v29.d }[0], [x6], #0x8\n"
"tbz x4, #0, 119f\n"
- "ld1 { v16.s }[2], [x6]\n"
- "ld1 { v10.s }[2], [x5]\n"
+ "ld1 { v18.s }[2], [x5]\n"
+ "ld1 { v29.s }[2], [x6]\n"
"b 119f\n"
"116:" // Oddments: Load requant params: Bit 2: Bit 1: Unset
"tbz x4, #0, 119f\n"
- "ld1 { v16.s }[0], [x6]\n"
- "ld1 { v10.s }[0], [x5]\n"
+ "ld1 { v18.s }[0], [x5]\n"
+ "ld1 { v29.s }[0], [x6]\n"
"b 119f\n"
"117:" // Oddments: Load requant params: Bit 2: Unset
"tbz x4, #1, 118f\n"
- "ld1 { v18.d }[0], [x6], #0x8\n"
- "ld1 { v21.d }[0], [x5], #0x8\n"
+ "ld1 { v17.d }[0], [x5], #0x8\n"
+ "ld1 { v5.d }[0], [x6], #0x8\n"
"tbz x4, #0, 119f\n"
- "ld1 { v18.s }[2], [x6]\n"
- "ld1 { v21.s }[2], [x5]\n"
+ "ld1 { v17.s }[2], [x5]\n"
+ "ld1 { v5.s }[2], [x6]\n"
"b 119f\n"
"118:" // Oddments: Load requant params: Bit 2: Unset: Bit 1: Unset
"tbz x4, #0, 119f\n"
- "ld1 { v18.s }[0], [x6]\n"
- "ld1 { v21.s }[0], [x5]\n"
+ "ld1 { v17.s }[0], [x5]\n"
+ "ld1 { v5.s }[0], [x6]\n"
"119:" // Oddments: Load requant params: Bit 2: End
- "sqrdmulh v11.4s, v11.4s, v18.4s\n"
- "and v31.16b, v11.16b, v21.16b\n"
- "add x7, x7, x1\n"
- "add x8, x8, x1\n"
- "sqrdmulh v13.4s, v13.4s, v16.4s\n"
- "sshr v31.4s, v31.4s, #0x1f\n"
- "add x17, x17, x1\n"
- "add x16, x16, x1\n"
- "and v17.16b, v13.16b, v10.16b\n"
- "sqrdmulh v20.4s, v20.4s, v18.4s\n"
+ "sqrdmulh v11.4s, v11.4s, v17.4s\n"
+ "sqrdmulh v14.4s, v14.4s, v17.4s\n"
+ "add x21, x21, x2\n"
+ "add x15, x15, x2\n"
+ "sqrdmulh v9.4s, v9.4s, v17.4s\n"
+ "sqrdmulh v7.4s, v7.4s, v17.4s\n"
+ "add x17, x17, x2\n"
+ "add x16, x16, x2\n"
+ "and v23.16b, v11.16b, v5.16b\n"
+ "sqrdmulh v21.4s, v21.4s, v18.4s\n"
+ "and v22.16b, v14.16b, v5.16b\n"
+ "sqrdmulh v10.4s, v10.4s, v18.4s\n"
+ "and v17.16b, v9.16b, v5.16b\n"
"sqrdmulh v8.4s, v8.4s, v18.4s\n"
+ "and v20.16b, v7.16b, v5.16b\n"
"sqrdmulh v6.4s, v6.4s, v18.4s\n"
- "sqadd v11.4s, v11.4s, v31.4s\n"
+ "sshr v23.4s, v23.4s, #0x1f\n"
+ "and v19.16b, v21.16b, v29.16b\n"
+ "sshr v22.4s, v22.4s, #0x1f\n"
+ "and v18.16b, v10.16b, v29.16b\n"
"sshr v17.4s, v17.4s, #0x1f\n"
- "and v26.16b, v20.16b, v21.16b\n"
- "sqrdmulh v19.4s, v19.4s, v16.4s\n"
- "and v18.16b, v8.16b, v21.16b\n"
- "sqrdmulh v7.4s, v7.4s, v16.4s\n"
- "and v31.16b, v6.16b, v21.16b\n"
- "sqrdmulh v5.4s, v5.4s, v16.4s\n"
- "sqadd v13.4s, v13.4s, v17.4s\n"
- "sshr v26.4s, v26.4s, #0x1f\n"
- "and v27.16b, v19.16b, v10.16b\n"
+ "and v26.16b, v8.16b, v29.16b\n"
+ "sshr v20.4s, v20.4s, #0x1f\n"
+ "and v4.16b, v6.16b, v29.16b\n"
+ "sqadd v11.4s, v11.4s, v23.4s\n"
+ "sshr v19.4s, v19.4s, #0x1f\n"
+ "sqadd v14.4s, v14.4s, v22.4s\n"
"sshr v18.4s, v18.4s, #0x1f\n"
- "and v25.16b, v7.16b, v10.16b\n"
- "sshr v31.4s, v31.4s, #0x1f\n"
- "and v17.16b, v5.16b, v10.16b\n"
- "sqadd v20.4s, v20.4s, v26.4s\n"
- "sshr v27.4s, v27.4s, #0x1f\n"
- "sqadd v8.4s, v8.4s, v18.4s\n"
- "sshr v25.4s, v25.4s, #0x1f\n"
- "sqadd v6.4s, v6.4s, v31.4s\n"
- "sshr v17.4s, v17.4s, #0x1f\n"
- "srshl v11.4s, v11.4s, v21.4s\n"
- "srshl v20.4s, v20.4s, v21.4s\n"
- "sqadd v19.4s, v19.4s, v27.4s\n"
- "srshl v8.4s, v8.4s, v21.4s\n"
- "sqadd v7.4s, v7.4s, v25.4s\n"
- "srshl v6.4s, v6.4s, v21.4s\n"
- "sqadd v5.4s, v5.4s, v17.4s\n"
- "srshl v13.4s, v13.4s, v10.4s\n"
+ "sqadd v9.4s, v9.4s, v17.4s\n"
+ "sshr v26.4s, v26.4s, #0x1f\n"
+ "sqadd v7.4s, v7.4s, v20.4s\n"
+ "sshr v4.4s, v4.4s, #0x1f\n"
+ "srshl v11.4s, v11.4s, v5.4s\n"
+ "sqadd v21.4s, v21.4s, v19.4s\n"
+ "srshl v14.4s, v14.4s, v5.4s\n"
+ "sqadd v10.4s, v10.4s, v18.4s\n"
+ "srshl v9.4s, v9.4s, v5.4s\n"
+ "sqadd v8.4s, v8.4s, v26.4s\n"
+ "srshl v7.4s, v7.4s, v5.4s\n"
+ "sqadd v6.4s, v6.4s, v4.4s\n"
+ "srshl v21.4s, v21.4s, v29.4s\n"
"sqxtn v11.4h, v11.4s\n"
- "srshl v19.4s, v19.4s, v10.4s\n"
- "sqxtn v20.4h, v20.4s\n"
- "srshl v7.4s, v7.4s, v10.4s\n"
- "sqxtn v8.4h, v8.4s\n"
- "srshl v5.4s, v5.4s, v10.4s\n"
- "sqxtn v6.4h, v6.4s\n"
- "sqxtn2 v11.8h, v13.4s\n"
- "sqxtn2 v20.8h, v19.4s\n"
- "sqxtn2 v8.8h, v7.4s\n"
- "sqxtn2 v6.8h, v5.4s\n"
- "sqadd v11.8h, v11.8h, v15.8h\n"
- "sqadd v20.8h, v20.8h, v15.8h\n"
- "sqadd v8.8h, v8.8h, v15.8h\n"
- "sqadd v6.8h, v6.8h, v15.8h\n"
- "smax v11.8h, v11.8h, v14.8h\n"
- "smax v20.8h, v20.8h, v14.8h\n"
- "smax v8.8h, v8.8h, v14.8h\n"
- "smax v6.8h, v6.8h, v14.8h\n"
- "smin v11.8h, v11.8h, v12.8h\n"
- "smin v20.8h, v20.8h, v12.8h\n"
- "smin v8.8h, v8.8h, v12.8h\n"
- "smin v6.8h, v6.8h, v12.8h\n"
+ "srshl v10.4s, v10.4s, v29.4s\n"
+ "sqxtn v14.4h, v14.4s\n"
+ "srshl v8.4s, v8.4s, v29.4s\n"
+ "sqxtn v9.4h, v9.4s\n"
+ "srshl v6.4s, v6.4s, v29.4s\n"
+ "sqxtn v7.4h, v7.4s\n"
+ "sqxtn2 v11.8h, v21.4s\n"
+ "sqxtn2 v14.8h, v10.4s\n"
+ "sqxtn2 v9.8h, v8.4s\n"
+ "sqxtn2 v7.8h, v6.4s\n"
+ "sqadd v11.8h, v11.8h, v16.8h\n"
+ "sqadd v14.8h, v14.8h, v16.8h\n"
+ "sqadd v9.8h, v9.8h, v16.8h\n"
+ "sqadd v7.8h, v7.8h, v16.8h\n"
+ "smax v11.8h, v11.8h, v12.8h\n"
+ "smax v14.8h, v14.8h, v12.8h\n"
+ "smax v9.8h, v9.8h, v12.8h\n"
+ "smax v7.8h, v7.8h, v12.8h\n"
+ "smin v11.8h, v11.8h, v13.8h\n"
+ "smin v14.8h, v14.8h, v13.8h\n"
+ "smin v9.8h, v9.8h, v13.8h\n"
+ "smin v7.8h, v7.8h, v13.8h\n"
"uzp1 v11.16b, v11.16b, v11.16b\n"
- "uzp1 v20.16b, v20.16b, v20.16b\n"
- "uzp1 v8.16b, v8.16b, v8.16b\n"
- "uzp1 v6.16b, v6.16b, v6.16b\n"
+ "uzp1 v14.16b, v14.16b, v14.16b\n"
+ "uzp1 v9.16b, v9.16b, v9.16b\n"
+ "uzp1 v7.16b, v7.16b, v7.16b\n"
"tbz x4, #2, 121f\n"
- "st1 { v11.s }[0], [x7], #0x4\n"
- "st1 { v20.s }[0], [x8], #0x4\n"
- "st1 { v8.s }[0], [x17], #0x4\n"
- "st1 { v6.s }[0], [x16], #0x4\n"
+ "st1 { v11.s }[0], [x21], #0x4\n"
+ "st1 { v14.s }[0], [x15], #0x4\n"
+ "st1 { v9.s }[0], [x17], #0x4\n"
+ "st1 { v7.s }[0], [x16], #0x4\n"
"tbz x4, #1, 120f\n"
- "st1 { v11.h }[2], [x7], #0x2\n"
- "st1 { v20.h }[2], [x8], #0x2\n"
- "st1 { v8.h }[2], [x17], #0x2\n"
- "st1 { v6.h }[2], [x16], #0x2\n"
+ "st1 { v11.h }[2], [x21], #0x2\n"
+ "st1 { v14.h }[2], [x15], #0x2\n"
+ "st1 { v9.h }[2], [x17], #0x2\n"
+ "st1 { v7.h }[2], [x16], #0x2\n"
"tbz x4, #0, 123f\n"
- "st1 { v11.b }[6], [x7], #0x1\n"
- "st1 { v20.b }[6], [x8], #0x1\n"
- "st1 { v8.b }[6], [x17], #0x1\n"
- "st1 { v6.b }[6], [x16], #0x1\n"
+ "st1 { v11.b }[6], [x21], #0x1\n"
+ "st1 { v14.b }[6], [x15], #0x1\n"
+ "st1 { v9.b }[6], [x17], #0x1\n"
+ "st1 { v7.b }[6], [x16], #0x1\n"
"b 123f\n"
"120:" // Oddments: Bit 2: Bit 1: Unset
"tbz x4, #0, 123f\n"
- "st1 { v11.b }[4], [x7], #0x1\n"
- "st1 { v20.b }[4], [x8], #0x1\n"
- "st1 { v8.b }[4], [x17], #0x1\n"
- "st1 { v6.b }[4], [x16], #0x1\n"
+ "st1 { v11.b }[4], [x21], #0x1\n"
+ "st1 { v14.b }[4], [x15], #0x1\n"
+ "st1 { v9.b }[4], [x17], #0x1\n"
+ "st1 { v7.b }[4], [x16], #0x1\n"
"b 123f\n"
"121:" // Oddments: Bit 2: Unset
"tbz x4, #1, 122f\n"
- "st1 { v11.h }[0], [x7], #0x2\n"
- "st1 { v20.h }[0], [x8], #0x2\n"
- "st1 { v8.h }[0], [x17], #0x2\n"
- "st1 { v6.h }[0], [x16], #0x2\n"
+ "st1 { v11.h }[0], [x21], #0x2\n"
+ "st1 { v14.h }[0], [x15], #0x2\n"
+ "st1 { v9.h }[0], [x17], #0x2\n"
+ "st1 { v7.h }[0], [x16], #0x2\n"
"tbz x4, #0, 123f\n"
- "st1 { v11.b }[2], [x7], #0x1\n"
- "st1 { v20.b }[2], [x8], #0x1\n"
- "st1 { v8.b }[2], [x17], #0x1\n"
- "st1 { v6.b }[2], [x16], #0x1\n"
+ "st1 { v11.b }[2], [x21], #0x1\n"
+ "st1 { v14.b }[2], [x15], #0x1\n"
+ "st1 { v9.b }[2], [x17], #0x1\n"
+ "st1 { v7.b }[2], [x16], #0x1\n"
"b 123f\n"
"122:" // Oddments: Bit 2: Unset: Bit 1: Unset
"tbz x4, #0, 123f\n"
- "st1 { v11.b }[0], [x7], #0x1\n"
- "st1 { v20.b }[0], [x8], #0x1\n"
- "st1 { v8.b }[0], [x17], #0x1\n"
- "st1 { v6.b }[0], [x16], #0x1\n"
+ "st1 { v11.b }[0], [x21], #0x1\n"
+ "st1 { v14.b }[0], [x15], #0x1\n"
+ "st1 { v9.b }[0], [x17], #0x1\n"
+ "st1 { v7.b }[0], [x16], #0x1\n"
"123:" // Oddments: Bit 2: End
"124:" // End
:
: [offsetof_Params_bias] "I" (offsetof(Params, bias)), [offsetof_Params_inptrs] "I" (offsetof(Params, inptrs)), [offsetof_Params_n_channels] "I" (offsetof(Params, n_channels)), [offsetof_Params_outptrs] "I" (offsetof(Params, outptrs)), [offsetof_Params_requant] "I" (offsetof(Params, requant)), [offsetof_Params_requant_muls] "I" (offsetof(Params, requant_muls)), [offsetof_Params_requant_shifts] "I" (offsetof(Params, requant_shifts)), [offsetof_Params_weights] "I" (offsetof(Params, weights)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [params] "r" (&params)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8s8u8q_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8s8u8q_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp
index 4419048793..96cde40e04 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8s8u8q_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8s8u8q_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -91,1072 +91,1072 @@ void a64_u8s8u8q_nhwc_3x3_s1_output2x2_mla_depthfirst_impl(
requant_muls, requant_shifts, outptrs);
__asm__ __volatile__(
- "ldr x6, [%x[params], %[offsetof_Params_n_channels]]\n"
- "ldr x23, [%x[params], %[offsetof_Params_requant]]\n"
- "lsr x7, x6, #0x3\n"
- "add x20, x23, %[offsetof_Requantize32_a_offset]\n"
- "ld1r { v24.16b }, [x20]\n"
+ "ldr x19, [%x[params], %[offsetof_Params_requant]]\n"
+ "ldr x8, [%x[params], %[offsetof_Params_n_channels]]\n"
+ "add x24, x19, %[offsetof_Requantize32_a_offset]\n"
+ "add x23, x19, %[offsetof_Requantize32_b_offset]\n"
"ldr x22, [%x[params], %[offsetof_Params_outptrs]]\n"
- "add x21, x23, %[offsetof_Requantize32_b_offset]\n"
- "add x20, x23, %[offsetof_Requantize32_c_offset]\n"
- "ld1r { v15.16b }, [x21]\n"
- "ld1r { v14.8h }, [x20]\n"
- "add x21, x23, %[offsetof_Requantize32_minval]\n"
- "add x20, x23, %[offsetof_Requantize32_maxval]\n"
- "ld1r { v12.8h }, [x21]\n"
- "ld1r { v11.8h }, [x20]\n"
- "mov x8, #0x0\n"
- "mov x17, #0x0\n"
- "add x16, %x[params], %[offsetof_Params_inptrs]\n"
- "ldr x15, [%x[params], %[offsetof_Params_weights]]\n"
- "ldr x14, [%x[params], %[offsetof_Params_requant_muls]]\n"
- "ldr x13, [%x[params], %[offsetof_Params_requant_shifts]]\n"
- "ldp x12, x11, [x22, #0x0]\n"
- "ldp x10, x9, [x22, #0x10]\n"
- "cbz x7, 3f\n"
- "ldr d0, [x15, #0x0]\n"
- "ldr d1, [x15, #0x8]\n"
- "subs x7, x7, #0x1\n"
- "ssubl v0.8h, v0.8b, v15.8b\n"
- "ldr d2, [x15, #0x10]\n"
- "ldr d3, [x15, #0x18]\n"
- "ssubl v1.8h, v1.8b, v15.8b\n"
- "ssubl v2.8h, v2.8b, v15.8b\n"
- "ldr d4, [x15, #0x20]\n"
- "ldr d5, [x15, #0x28]\n"
- "ssubl v3.8h, v3.8b, v15.8b\n"
- "ssubl v4.8h, v4.8b, v15.8b\n"
- "ldr d6, [x15, #0x30]\n"
- "ldr d7, [x15, #0x38]\n"
- "ssubl v5.8h, v5.8b, v15.8b\n"
- "ssubl v6.8h, v6.8b, v15.8b\n"
- "ldr d8, [x15, #0x40]\n"
- "ldr x28, [%x[params], %[offsetof_Params_bias]]\n"
- "ssubl v7.8h, v7.8b, v15.8b\n"
- "ssubl v8.8h, v8.8b, v15.8b\n"
- "ldr q13, [x28, #0x0]\n"
- "ldr q20, [x28, #0x10]\n"
- "add x28, x28, #0x20\n"
- "str x28, [%x[params], %[offsetof_Params_bias]]\n"
- "ldp x24, x23, [x16, #0x0]\n"
- "ldp x22, x21, [x16, #0x10]\n"
+ "add x21, x19, %[offsetof_Requantize32_c_offset]\n"
+ "add x20, x19, %[offsetof_Requantize32_minval]\n"
+ "ldr x17, [%x[params], %[offsetof_Params_weights]]\n"
+ "add x19, x19, %[offsetof_Requantize32_maxval]\n"
+ "ld1r { v22.16b }, [x24]\n"
+ "ld1r { v12.16b }, [x23]\n"
+ "lsr x16, x8, #0x3\n"
+ "ld1r { v14.8h }, [x21]\n"
+ "ld1r { v17.8h }, [x20]\n"
+ "mov x15, #0x0\n"
+ "mov x14, #0x0\n"
+ "ld1r { v15.8h }, [x19]\n"
+ "ldr x13, [%x[params], %[offsetof_Params_requant_muls]]\n"
+ "add x12, %x[params], %[offsetof_Params_inptrs]\n"
+ "ldr x11, [%x[params], %[offsetof_Params_requant_shifts]]\n"
+ "ldp x10, x9, [x22, #0x0]\n"
+ "ldp x28, x27, [x22, #0x10]\n"
+ "cbz x16, 3f\n"
+ "ldr x19, [%x[params], %[offsetof_Params_bias]]\n"
+ "ldr q13, [x19, #0x0]\n"
+ "subs x16, x16, #0x1\n"
+ "mov v19.16b, v13.16b\n"
+ "ldr q26, [x19, #0x10]\n"
+ "add x19, x19, #0x20\n"
+ "str x19, [%x[params], %[offsetof_Params_bias]]\n"
+ "ldr d0, [x17, #0x0]\n"
+ "ldr d1, [x17, #0x8]\n"
+ "ldr d2, [x17, #0x10]\n"
+ "mov v11.16b, v26.16b\n"
+ "mov v18.16b, v13.16b\n"
+ "ldr d3, [x17, #0x18]\n"
+ "ldr d4, [x17, #0x20]\n"
+ "mov v24.16b, v26.16b\n"
"mov v9.16b, v13.16b\n"
- "mov v18.16b, v20.16b\n"
- "ldr d31, [x24, x8]\n"
- "ldr d30, [x23, x8]\n"
- "mov v16.16b, v13.16b\n"
- "mov v26.16b, v20.16b\n"
- "ldr d29, [x22, x8]\n"
- "ldr d28, [x21, x8]\n"
- "mov v25.16b, v13.16b\n"
- "mov v10.16b, v20.16b\n"
- "ldr x20, [x16, #0x20]\n"
- "ldr d27, [x20, x8]\n"
- "usubl v31.8h, v31.8b, v24.8b\n"
- "usubl v30.8h, v30.8b, v24.8b\n"
- "usubl v29.8h, v29.8b, v24.8b\n"
- "usubl v28.8h, v28.8b, v24.8b\n"
- "usubl v27.8h, v27.8b, v24.8b\n"
+ "ldr d5, [x17, #0x28]\n"
+ "ldr d6, [x17, #0x30]\n"
+ "mov v23.16b, v26.16b\n"
+ "ssubl v0.8h, v0.8b, v12.8b\n"
+ "ldr d7, [x17, #0x38]\n"
+ "ldr d8, [x17, #0x40]\n"
+ "ssubl v1.8h, v1.8b, v12.8b\n"
+ "ssubl v2.8h, v2.8b, v12.8b\n"
+ "ldp x23, x22, [x12, #0x0]\n"
+ "ldp x21, x20, [x12, #0x10]\n"
+ "ssubl v3.8h, v3.8b, v12.8b\n"
+ "ssubl v4.8h, v4.8b, v12.8b\n"
+ "ldr x19, [x12, #0x20]\n"
+ "ldr d31, [x23, x15]\n"
+ "ssubl v5.8h, v5.8b, v12.8b\n"
+ "ssubl v6.8h, v6.8b, v12.8b\n"
+ "ldr d30, [x22, x15]\n"
+ "ldr d29, [x21, x15]\n"
+ "ssubl v7.8h, v7.8b, v12.8b\n"
+ "ssubl v8.8h, v8.8b, v12.8b\n"
+ "ldr d28, [x20, x15]\n"
+ "ldr d27, [x19, x15]\n"
+ "usubl v31.8h, v31.8b, v22.8b\n"
+ "usubl v30.8h, v30.8b, v22.8b\n"
+ "usubl v29.8h, v29.8b, v22.8b\n"
+ "usubl v28.8h, v28.8b, v22.8b\n"
+ "usubl v27.8h, v27.8b, v22.8b\n"
"beq 2f\n"
"1:" // Loop
- "ldr q17, [x14, #0x0]\n"
- "ldr q22, [x13, #0x0]\n"
"smlal v13.4s, v31.4h, v4.4h\n"
- "smlal2 v20.4s, v31.8h, v4.8h\n"
- "ldr q23, [x14, #0x10]\n"
- "smlal v9.4s, v31.4h, v3.4h\n"
- "smlal2 v18.4s, v31.8h, v3.8h\n"
- "ldr x21, [x16, #0x28]\n"
+ "smlal2 v26.4s, v31.8h, v4.8h\n"
+ "ldr x21, [x12, #0x28]\n"
+ "ldr x26, [x12, #0x38]\n"
+ "smlal v19.4s, v31.4h, v3.4h\n"
+ "smlal2 v11.4s, v31.8h, v3.8h\n"
+ "ldr x20, [x12, #0x30]\n"
+ "ldr x25, [x12, #0x40]\n"
"smlal v13.4s, v30.4h, v0.4h\n"
- "smlal2 v20.4s, v30.8h, v0.8h\n"
- "ldr q19, [x13, #0x10]\n"
- "ldr x28, [x16, #0x38]\n"
- "smlal v9.4s, v29.4h, v2.4h\n"
- "smlal2 v18.4s, v29.8h, v2.8h\n"
- "ldr x20, [x16, #0x30]\n"
- "ldr d29, [x20, x8]\n"
- "smlal v16.4s, v31.4h, v1.4h\n"
- "smlal2 v26.4s, v31.8h, v1.8h\n"
- "ldr x27, [x16, #0x40]\n"
- "ldr x26, [x16, #0x48]\n"
- "smlal v25.4s, v31.4h, v0.4h\n"
- "smlal2 v10.4s, v31.8h, v0.8h\n"
- "ldr d31, [x21, x8]\n"
- "usubl v31.8h, v31.8b, v24.8b\n"
+ "smlal2 v26.4s, v30.8h, v0.8h\n"
+ "ldr x19, [x12, #0x48]\n"
+ "ldr d30, [x19, x15]\n"
+ "smlal v19.4s, v29.4h, v2.4h\n"
+ "smlal2 v11.4s, v29.8h, v2.8h\n"
+ "ldr d29, [x20, x15]\n"
+ "usubl v29.8h, v29.8b, v22.8b\n"
+ "smlal v18.4s, v31.4h, v1.4h\n"
+ "smlal2 v24.4s, v31.8h, v1.8h\n"
+ "ldr x24, [x12, #0x50]\n"
+ "ldr x23, [x12, #0x58]\n"
+ "smlal v9.4s, v31.4h, v0.4h\n"
+ "smlal2 v23.4s, v31.8h, v0.8h\n"
+ "ldr d31, [x21, x15]\n"
+ "usubl v31.8h, v31.8b, v22.8b\n"
"smlal v13.4s, v28.4h, v5.4h\n"
- "smlal2 v20.4s, v28.8h, v5.8h\n"
- "usubl v29.8h, v29.8b, v24.8b\n"
- "ldr x25, [x16, #0x50]\n"
- "smlal v9.4s, v28.4h, v4.4h\n"
- "smlal2 v18.4s, v28.8h, v4.8h\n"
- "ldr x24, [x16, #0x58]\n"
- "ldr x23, [x16, #0x60]\n"
- "smlal v16.4s, v28.4h, v2.4h\n"
- "smlal2 v26.4s, v28.8h, v2.8h\n"
- "ldr x22, [x16, #0x68]\n"
- "ldr x21, [x16, #0x70]\n"
- "smlal v25.4s, v28.4h, v1.4h\n"
- "smlal2 v10.4s, v28.8h, v1.8h\n"
- "ldr d28, [x28, x8]\n"
- "usubl v28.8h, v28.8b, v24.8b\n"
+ "smlal2 v26.4s, v28.8h, v5.8h\n"
+ "usubl v30.8h, v30.8b, v22.8b\n"
+ "ldr x22, [x12, #0x60]\n"
+ "smlal v19.4s, v28.4h, v4.4h\n"
+ "smlal2 v11.4s, v28.8h, v4.8h\n"
+ "ldr x21, [x12, #0x68]\n"
+ "ldr x20, [x12, #0x70]\n"
+ "smlal v18.4s, v28.4h, v2.4h\n"
+ "smlal2 v24.4s, v28.8h, v2.8h\n"
+ "ldr x19, [x12, #0x78]\n"
+ "ldr q21, [x13, #0x0]\n"
+ "smlal v9.4s, v28.4h, v1.4h\n"
+ "smlal2 v23.4s, v28.8h, v1.8h\n"
+ "ldr d28, [x26, x15]\n"
+ "usubl v28.8h, v28.8b, v22.8b\n"
"smlal v13.4s, v27.4h, v7.4h\n"
- "smlal2 v20.4s, v27.8h, v7.8h\n"
- "ldr x20, [x16, #0x78]\n"
- "ldr x28, [%x[params], %[offsetof_Params_bias]]\n"
- "smlal v9.4s, v27.4h, v6.4h\n"
- "smlal2 v18.4s, v27.8h, v6.8h\n"
- "add x15, x15, #0x48\n"
- "subs x7, x7, #0x1\n"
- "smlal v16.4s, v31.4h, v6.4h\n"
- "smlal2 v26.4s, v31.8h, v6.8h\n"
- "ldr d31, [x27, x8]\n"
- "usubl v31.8h, v31.8b, v24.8b\n"
- "smlal v25.4s, v27.4h, v3.4h\n"
- "smlal2 v10.4s, v27.8h, v3.8h\n"
- "add x14, x14, #0x20\n"
+ "smlal2 v26.4s, v27.8h, v7.8h\n"
+ "ldr q25, [x11, #0x0]\n"
+ "ldr q10, [x13, #0x10]\n"
+ "smlal v19.4s, v27.4h, v6.4h\n"
+ "smlal2 v11.4s, v27.8h, v6.8h\n"
+ "ldr q16, [x11, #0x10]\n"
+ "add x17, x17, #0x48\n"
+ "smlal v18.4s, v31.4h, v6.4h\n"
+ "smlal2 v24.4s, v31.8h, v6.8h\n"
+ "ldr d31, [x25, x15]\n"
+ "usubl v31.8h, v31.8b, v22.8b\n"
+ "smlal v9.4s, v27.4h, v3.4h\n"
+ "smlal2 v23.4s, v27.8h, v3.8h\n"
+ "subs x16, x16, #0x1\n"
"add x13, x13, #0x20\n"
"smlal v13.4s, v28.4h, v1.4h\n"
- "smlal2 v20.4s, v28.8h, v1.8h\n"
- "smlal v9.4s, v28.4h, v0.4h\n"
- "smlal2 v18.4s, v28.8h, v0.8h\n"
- "ldr d30, [x26, x8]\n"
- "usubl v30.8h, v30.8b, v24.8b\n"
- "smlal v16.4s, v27.4h, v4.4h\n"
- "smlal v25.4s, v29.4h, v8.4h\n"
- "smlal2 v26.4s, v27.8h, v4.8h\n"
- "ldr d28, [x24, x8]\n"
- "smlal2 v10.4s, v29.8h, v8.8h\n"
- "ldr d29, [x25, x8]\n"
+ "smlal2 v26.4s, v28.8h, v1.8h\n"
+ "add x11, x11, #0x20\n"
+ "smlal v19.4s, v28.4h, v0.4h\n"
+ "smlal2 v11.4s, v28.8h, v0.8h\n"
+ "ldr d28, [x23, x15]\n"
+ "usubl v28.8h, v28.8b, v22.8b\n"
+ "smlal v18.4s, v27.4h, v4.4h\n"
+ "smlal v9.4s, v29.4h, v8.4h\n"
+ "smlal2 v24.4s, v27.8h, v4.8h\n"
+ "smlal2 v23.4s, v29.8h, v8.8h\n"
+ "ldr d29, [x24, x15]\n"
+ "usubl v29.8h, v29.8b, v22.8b\n"
"smlal v13.4s, v31.4h, v2.4h\n"
- "smlal2 v20.4s, v31.8h, v2.8h\n"
- "usubl v29.8h, v29.8b, v24.8b\n"
- "smlal v9.4s, v31.4h, v1.4h\n"
- "smlal2 v18.4s, v31.8h, v1.8h\n"
- "ldr d31, [x23, x8]\n"
- "usubl v28.8h, v28.8b, v24.8b\n"
- "smlal v16.4s, v30.4h, v5.4h\n"
- "smlal v25.4s, v30.4h, v4.4h\n"
- "usubl v31.8h, v31.8b, v24.8b\n"
+ "smlal2 v26.4s, v31.8h, v2.8h\n"
+ "smlal v19.4s, v31.4h, v1.4h\n"
+ "smlal2 v11.4s, v31.8h, v1.8h\n"
+ "ldr d31, [x22, x15]\n"
+ "usubl v31.8h, v31.8b, v22.8b\n"
+ "smlal v18.4s, v30.4h, v5.4h\n"
+ "smlal v9.4s, v30.4h, v4.4h\n"
"smlal v13.4s, v30.4h, v8.4h\n"
- "smlal2 v20.4s, v30.8h, v8.8h\n"
- "smlal v9.4s, v30.4h, v7.4h\n"
- "smlal2 v18.4s, v30.8h, v7.8h\n"
- "smlal2 v26.4s, v30.8h, v5.8h\n"
- "smlal2 v10.4s, v30.8h, v4.8h\n"
- "ldr d30, [x22, x8]\n"
- "usubl v30.8h, v30.8b, v24.8b\n"
- "smlal v16.4s, v29.4h, v0.4h\n"
- "smlal v25.4s, v28.4h, v2.4h\n"
+ "smlal2 v26.4s, v30.8h, v8.8h\n"
+ "smlal v19.4s, v30.4h, v7.4h\n"
+ "smlal2 v11.4s, v30.8h, v7.8h\n"
+ "smlal2 v24.4s, v30.8h, v5.8h\n"
+ "smlal2 v23.4s, v30.8h, v4.8h\n"
+ "ldr d30, [x21, x15]\n"
+ "usubl v30.8h, v30.8b, v22.8b\n"
+ "smlal v18.4s, v29.4h, v0.4h\n"
+ "smlal v9.4s, v28.4h, v2.4h\n"
"smlal v13.4s, v29.4h, v3.4h\n"
- "smlal2 v20.4s, v29.8h, v3.8h\n"
- "smlal2 v26.4s, v29.8h, v0.8h\n"
- "ldr d29, [x21, x8]\n"
- "smlal2 v10.4s, v28.8h, v2.8h\n"
- "usubl v29.8h, v29.8b, v24.8b\n"
- "smlal v16.4s, v31.4h, v3.4h\n"
- "smlal v25.4s, v30.4h, v5.4h\n"
- "smlal v9.4s, v28.4h, v5.4h\n"
- "smlal2 v18.4s, v28.8h, v5.8h\n"
- "ldr d28, [x20, x8]\n"
- "usubl v28.8h, v28.8b, v24.8b\n"
+ "smlal2 v26.4s, v29.8h, v3.8h\n"
+ "smlal2 v24.4s, v29.8h, v0.8h\n"
+ "ldr d29, [x20, x15]\n"
+ "smlal2 v23.4s, v28.8h, v2.8h\n"
+ "usubl v29.8h, v29.8b, v22.8b\n"
+ "smlal v18.4s, v31.4h, v3.4h\n"
+ "smlal v9.4s, v30.4h, v5.4h\n"
+ "smlal v19.4s, v28.4h, v5.4h\n"
+ "smlal2 v11.4s, v28.8h, v5.8h\n"
+ "ldr d28, [x19, x15]\n"
+ "usubl v28.8h, v28.8b, v22.8b\n"
+ "smlal2 v24.4s, v31.8h, v3.8h\n"
+ "smlal2 v23.4s, v30.8h, v5.8h\n"
+ "add x15, x15, #0x8\n"
+ "smlal v18.4s, v29.4h, v7.4h\n"
+ "smlal v9.4s, v29.4h, v6.4h\n"
+ "smlal2 v24.4s, v29.8h, v7.8h\n"
+ "smlal2 v23.4s, v29.8h, v6.8h\n"
"smlal v13.4s, v31.4h, v6.4h\n"
- "smlal2 v26.4s, v31.8h, v3.8h\n"
- "sqrdmulh v13.4s, v13.4s, v17.4s\n"
- "add x8, x8, #0x8\n"
- "smlal2 v10.4s, v30.8h, v5.8h\n"
- "smlal v16.4s, v29.4h, v7.4h\n"
- "and v21.16b, v13.16b, v22.16b\n"
- "smlal v25.4s, v29.4h, v6.4h\n"
- "smlal2 v20.4s, v31.8h, v6.8h\n"
- "sqrdmulh v20.4s, v20.4s, v23.4s\n"
- "smlal2 v26.4s, v29.8h, v7.8h\n"
- "smlal2 v10.4s, v29.8h, v6.8h\n"
- "sshr v21.4s, v21.4s, #0x1f\n"
- "smlal v9.4s, v30.4h, v8.4h\n"
- "smlal v16.4s, v28.4h, v8.4h\n"
- "and v29.16b, v20.16b, v19.16b\n"
- "smlal v25.4s, v28.4h, v7.4h\n"
- "smlal2 v18.4s, v30.8h, v8.8h\n"
- "sqrdmulh v9.4s, v9.4s, v17.4s\n"
- "smlal2 v26.4s, v28.8h, v8.8h\n"
- "smlal2 v10.4s, v28.8h, v7.8h\n"
- "sqrdmulh v16.4s, v16.4s, v17.4s\n"
- "sqrdmulh v25.4s, v25.4s, v17.4s\n"
- "sqadd v13.4s, v13.4s, v21.4s\n"
- "sshr v29.4s, v29.4s, #0x1f\n"
- "and v0.16b, v9.16b, v22.16b\n"
- "sqrdmulh v18.4s, v18.4s, v23.4s\n"
- "and v27.16b, v16.16b, v22.16b\n"
- "sqrdmulh v26.4s, v26.4s, v23.4s\n"
- "and v21.16b, v25.16b, v22.16b\n"
- "sqrdmulh v10.4s, v10.4s, v23.4s\n"
- "sqadd v20.4s, v20.4s, v29.4s\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "and v17.16b, v18.16b, v19.16b\n"
- "sshr v27.4s, v27.4s, #0x1f\n"
- "and v7.16b, v26.16b, v19.16b\n"
- "sshr v21.4s, v21.4s, #0x1f\n"
- "and v29.16b, v10.16b, v19.16b\n"
- "sqadd v9.4s, v9.4s, v0.4s\n"
- "sshr v17.4s, v17.4s, #0x1f\n"
- "sqadd v16.4s, v16.4s, v27.4s\n"
+ "smlal v19.4s, v30.4h, v8.4h\n"
+ "sqrdmulh v13.4s, v13.4s, v21.4s\n"
+ "smlal v18.4s, v28.4h, v8.4h\n"
+ "smlal v9.4s, v28.4h, v7.4h\n"
+ "sqrdmulh v19.4s, v19.4s, v21.4s\n"
+ "smlal2 v26.4s, v31.8h, v6.8h\n"
+ "smlal2 v11.4s, v30.8h, v8.8h\n"
+ "sqrdmulh v18.4s, v18.4s, v21.4s\n"
+ "smlal2 v24.4s, v28.8h, v8.8h\n"
+ "smlal2 v23.4s, v28.8h, v7.8h\n"
+ "sqrdmulh v9.4s, v9.4s, v21.4s\n"
+ "and v7.16b, v13.16b, v25.16b\n"
+ "sqrdmulh v26.4s, v26.4s, v10.4s\n"
+ "and v4.16b, v19.16b, v25.16b\n"
+ "sqrdmulh v11.4s, v11.4s, v10.4s\n"
+ "and v21.16b, v18.16b, v25.16b\n"
+ "sqrdmulh v24.4s, v24.4s, v10.4s\n"
+ "and v20.16b, v9.16b, v25.16b\n"
+ "sqrdmulh v23.4s, v23.4s, v10.4s\n"
"sshr v7.4s, v7.4s, #0x1f\n"
- "sqadd v25.4s, v25.4s, v21.4s\n"
+ "and v29.16b, v26.16b, v16.16b\n"
+ "sshr v4.4s, v4.4s, #0x1f\n"
+ "and v10.16b, v11.16b, v16.16b\n"
+ "sshr v21.4s, v21.4s, #0x1f\n"
+ "and v31.16b, v24.16b, v16.16b\n"
+ "sshr v20.4s, v20.4s, #0x1f\n"
+ "and v30.16b, v23.16b, v16.16b\n"
+ "sqadd v13.4s, v13.4s, v7.4s\n"
"sshr v29.4s, v29.4s, #0x1f\n"
- "srshl v13.4s, v13.4s, v22.4s\n"
- "srshl v9.4s, v9.4s, v22.4s\n"
- "sqadd v18.4s, v18.4s, v17.4s\n"
- "srshl v16.4s, v16.4s, v22.4s\n"
- "sqadd v26.4s, v26.4s, v7.4s\n"
- "srshl v25.4s, v25.4s, v22.4s\n"
- "sqadd v10.4s, v10.4s, v29.4s\n"
- "srshl v20.4s, v20.4s, v19.4s\n"
+ "sqadd v19.4s, v19.4s, v4.4s\n"
+ "sshr v10.4s, v10.4s, #0x1f\n"
+ "sqadd v18.4s, v18.4s, v21.4s\n"
+ "sshr v31.4s, v31.4s, #0x1f\n"
+ "sqadd v9.4s, v9.4s, v20.4s\n"
+ "sshr v30.4s, v30.4s, #0x1f\n"
+ "srshl v13.4s, v13.4s, v25.4s\n"
+ "sqadd v26.4s, v26.4s, v29.4s\n"
+ "srshl v19.4s, v19.4s, v25.4s\n"
+ "sqadd v11.4s, v11.4s, v10.4s\n"
+ "srshl v18.4s, v18.4s, v25.4s\n"
+ "sqadd v24.4s, v24.4s, v31.4s\n"
+ "srshl v9.4s, v9.4s, v25.4s\n"
+ "sqadd v23.4s, v23.4s, v30.4s\n"
+ "srshl v26.4s, v26.4s, v16.4s\n"
"sqxtn v13.4h, v13.4s\n"
- "srshl v18.4s, v18.4s, v19.4s\n"
+ "srshl v11.4s, v11.4s, v16.4s\n"
+ "sqxtn v19.4h, v19.4s\n"
+ "srshl v24.4s, v24.4s, v16.4s\n"
+ "sqxtn v18.4h, v18.4s\n"
+ "srshl v23.4s, v23.4s, v16.4s\n"
"sqxtn v9.4h, v9.4s\n"
- "srshl v26.4s, v26.4s, v19.4s\n"
- "sqxtn v16.4h, v16.4s\n"
- "srshl v10.4s, v10.4s, v19.4s\n"
- "sqxtn v25.4h, v25.4s\n"
- "sqxtn2 v13.8h, v20.4s\n"
- "sqxtn2 v9.8h, v18.4s\n"
- "sqxtn2 v16.8h, v26.4s\n"
- "sqxtn2 v25.8h, v10.4s\n"
+ "sqxtn2 v13.8h, v26.4s\n"
+ "sqxtn2 v19.8h, v11.4s\n"
+ "sqxtn2 v18.8h, v24.4s\n"
+ "sqxtn2 v9.8h, v23.4s\n"
"sqadd v13.8h, v13.8h, v14.8h\n"
+ "sqadd v19.8h, v19.8h, v14.8h\n"
+ "sqadd v18.8h, v18.8h, v14.8h\n"
"sqadd v9.8h, v9.8h, v14.8h\n"
- "sqadd v16.8h, v16.8h, v14.8h\n"
- "sqadd v25.8h, v25.8h, v14.8h\n"
- "smax v13.8h, v13.8h, v12.8h\n"
- "smax v9.8h, v9.8h, v12.8h\n"
- "smax v16.8h, v16.8h, v12.8h\n"
- "smax v25.8h, v25.8h, v12.8h\n"
- "smin v13.8h, v13.8h, v11.8h\n"
- "smin v9.8h, v9.8h, v11.8h\n"
- "smin v16.8h, v16.8h, v11.8h\n"
- "smin v25.8h, v25.8h, v11.8h\n"
+ "smax v13.8h, v13.8h, v17.8h\n"
+ "smax v19.8h, v19.8h, v17.8h\n"
+ "smax v18.8h, v18.8h, v17.8h\n"
+ "smax v9.8h, v9.8h, v17.8h\n"
+ "smin v13.8h, v13.8h, v15.8h\n"
+ "smin v19.8h, v19.8h, v15.8h\n"
+ "smin v18.8h, v18.8h, v15.8h\n"
+ "smin v9.8h, v9.8h, v15.8h\n"
"uzp1 v13.16b, v13.16b, v13.16b\n"
- "str d13, [x12, x17]\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ "str d13, [x10, x14]\n"
+ "uzp1 v18.16b, v18.16b, v18.16b\n"
"uzp1 v9.16b, v9.16b, v9.16b\n"
- "uzp1 v16.16b, v16.16b, v16.16b\n"
- "str d9, [x11, x17]\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
- "str d16, [x10, x17]\n"
- "str d25, [x9, x17]\n"
- "ldr q13, [x28, #0x0]\n"
- "ldr q20, [x28, #0x10]\n"
- "add x28, x28, #0x20\n"
- "ldr d0, [x15, #0x0]\n"
- "ldr d1, [x15, #0x8]\n"
- "add x17, x17, #0x8\n"
- "str x28, [%x[params], %[offsetof_Params_bias]]\n"
- "ldr d2, [x15, #0x10]\n"
- "ldr d3, [x15, #0x18]\n"
+ "str d19, [x9, x14]\n"
+ "str d18, [x28, x14]\n"
+ "str d9, [x27, x14]\n"
+ "ldr x19, [%x[params], %[offsetof_Params_bias]]\n"
+ "ldr q13, [x19, #0x0]\n"
+ "add x14, x14, #0x8\n"
+ "ldr q26, [x19, #0x10]\n"
+ "add x19, x19, #0x20\n"
+ "str x19, [%x[params], %[offsetof_Params_bias]]\n"
+ "ldr d0, [x17, #0x0]\n"
+ "ldr d1, [x17, #0x8]\n"
+ "ldr d2, [x17, #0x10]\n"
+ "mov v19.16b, v13.16b\n"
+ "mov v11.16b, v26.16b\n"
+ "ldr d3, [x17, #0x18]\n"
+ "ldr d4, [x17, #0x20]\n"
+ "mov v18.16b, v13.16b\n"
+ "mov v24.16b, v26.16b\n"
+ "ldr d5, [x17, #0x28]\n"
+ "ldr d6, [x17, #0x30]\n"
"mov v9.16b, v13.16b\n"
- "mov v18.16b, v20.16b\n"
- "ldr d4, [x15, #0x20]\n"
- "ldr d5, [x15, #0x28]\n"
- "mov v16.16b, v13.16b\n"
- "mov v26.16b, v20.16b\n"
- "ldr d6, [x15, #0x30]\n"
- "ldr d7, [x15, #0x38]\n"
- "mov v25.16b, v13.16b\n"
- "mov v10.16b, v20.16b\n"
- "ldr d8, [x15, #0x40]\n"
- "ldp x24, x23, [x16, #0x0]\n"
- "ssubl v0.8h, v0.8b, v15.8b\n"
- "ssubl v1.8h, v1.8b, v15.8b\n"
- "ldp x22, x21, [x16, #0x10]\n"
- "ldr d31, [x24, x8]\n"
- "ssubl v2.8h, v2.8b, v15.8b\n"
- "ssubl v3.8h, v3.8b, v15.8b\n"
- "ldr d30, [x23, x8]\n"
- "ldr d29, [x22, x8]\n"
- "ssubl v4.8h, v4.8b, v15.8b\n"
- "ssubl v5.8h, v5.8b, v15.8b\n"
- "ldr d28, [x21, x8]\n"
- "ldr x20, [x16, #0x20]\n"
- "ssubl v6.8h, v6.8b, v15.8b\n"
- "ssubl v7.8h, v7.8b, v15.8b\n"
- "ldr d27, [x20, x8]\n"
- "ssubl v8.8h, v8.8b, v15.8b\n"
- "usubl v31.8h, v31.8b, v24.8b\n"
- "usubl v30.8h, v30.8b, v24.8b\n"
- "usubl v29.8h, v29.8b, v24.8b\n"
- "usubl v28.8h, v28.8b, v24.8b\n"
- "usubl v27.8h, v27.8b, v24.8b\n"
+ "mov v23.16b, v26.16b\n"
+ "ldr d7, [x17, #0x38]\n"
+ "ldr d8, [x17, #0x40]\n"
+ "ssubl v0.8h, v0.8b, v12.8b\n"
+ "ssubl v1.8h, v1.8b, v12.8b\n"
+ "ldp x23, x22, [x12, #0x0]\n"
+ "ldp x21, x20, [x12, #0x10]\n"
+ "ssubl v2.8h, v2.8b, v12.8b\n"
+ "ssubl v3.8h, v3.8b, v12.8b\n"
+ "ldr x19, [x12, #0x20]\n"
+ "ldr d31, [x23, x15]\n"
+ "ssubl v4.8h, v4.8b, v12.8b\n"
+ "ssubl v5.8h, v5.8b, v12.8b\n"
+ "ldr d30, [x22, x15]\n"
+ "ldr d29, [x21, x15]\n"
+ "ssubl v6.8h, v6.8b, v12.8b\n"
+ "ssubl v7.8h, v7.8b, v12.8b\n"
+ "ldr d28, [x20, x15]\n"
+ "ldr d27, [x19, x15]\n"
+ "ssubl v8.8h, v8.8b, v12.8b\n"
+ "usubl v31.8h, v31.8b, v22.8b\n"
+ "usubl v30.8h, v30.8b, v22.8b\n"
+ "usubl v29.8h, v29.8b, v22.8b\n"
+ "usubl v28.8h, v28.8b, v22.8b\n"
+ "usubl v27.8h, v27.8b, v22.8b\n"
"bgt 1b\n"
"2:" // Tail
- "ldr q17, [x14, #0x0]\n"
- "ldr q22, [x13, #0x0]\n"
"smlal v13.4s, v31.4h, v4.4h\n"
- "smlal2 v20.4s, v31.8h, v4.8h\n"
- "ldr q23, [x14, #0x10]\n"
- "smlal v9.4s, v31.4h, v3.4h\n"
- "smlal2 v18.4s, v31.8h, v3.8h\n"
- "ldr x21, [x16, #0x28]\n"
+ "smlal2 v26.4s, v31.8h, v4.8h\n"
+ "ldr x21, [x12, #0x28]\n"
+ "ldr x26, [x12, #0x38]\n"
+ "smlal v19.4s, v31.4h, v3.4h\n"
+ "smlal2 v11.4s, v31.8h, v3.8h\n"
+ "ldr x20, [x12, #0x30]\n"
+ "ldr x25, [x12, #0x40]\n"
"smlal v13.4s, v30.4h, v0.4h\n"
- "smlal2 v20.4s, v30.8h, v0.8h\n"
- "ldr q19, [x13, #0x10]\n"
- "ldr x28, [x16, #0x38]\n"
- "smlal v9.4s, v29.4h, v2.4h\n"
- "smlal2 v18.4s, v29.8h, v2.8h\n"
- "ldr x20, [x16, #0x30]\n"
- "ldr d29, [x20, x8]\n"
- "smlal v16.4s, v31.4h, v1.4h\n"
- "smlal2 v26.4s, v31.8h, v1.8h\n"
- "ldr x27, [x16, #0x40]\n"
- "ldr x26, [x16, #0x48]\n"
- "smlal v25.4s, v31.4h, v0.4h\n"
- "smlal2 v10.4s, v31.8h, v0.8h\n"
- "ldr d31, [x21, x8]\n"
- "usubl v31.8h, v31.8b, v24.8b\n"
+ "smlal2 v26.4s, v30.8h, v0.8h\n"
+ "ldr x19, [x12, #0x48]\n"
+ "ldr d30, [x19, x15]\n"
+ "smlal v19.4s, v29.4h, v2.4h\n"
+ "smlal2 v11.4s, v29.8h, v2.8h\n"
+ "ldr d29, [x20, x15]\n"
+ "usubl v29.8h, v29.8b, v22.8b\n"
+ "smlal v18.4s, v31.4h, v1.4h\n"
+ "smlal2 v24.4s, v31.8h, v1.8h\n"
+ "ldr x24, [x12, #0x50]\n"
+ "ldr x23, [x12, #0x58]\n"
+ "smlal v9.4s, v31.4h, v0.4h\n"
+ "smlal2 v23.4s, v31.8h, v0.8h\n"
+ "ldr d31, [x21, x15]\n"
+ "usubl v31.8h, v31.8b, v22.8b\n"
"smlal v13.4s, v28.4h, v5.4h\n"
- "smlal2 v20.4s, v28.8h, v5.8h\n"
- "usubl v29.8h, v29.8b, v24.8b\n"
- "ldr x25, [x16, #0x50]\n"
- "smlal v9.4s, v28.4h, v4.4h\n"
- "smlal2 v18.4s, v28.8h, v4.8h\n"
- "ldr x24, [x16, #0x58]\n"
- "ldr x23, [x16, #0x60]\n"
- "smlal v16.4s, v28.4h, v2.4h\n"
- "smlal2 v26.4s, v28.8h, v2.8h\n"
- "ldr x22, [x16, #0x68]\n"
- "ldr x21, [x16, #0x70]\n"
- "smlal v25.4s, v28.4h, v1.4h\n"
- "smlal2 v10.4s, v28.8h, v1.8h\n"
- "ldr d28, [x28, x8]\n"
- "usubl v28.8h, v28.8b, v24.8b\n"
+ "smlal2 v26.4s, v28.8h, v5.8h\n"
+ "usubl v30.8h, v30.8b, v22.8b\n"
+ "ldr x22, [x12, #0x60]\n"
+ "smlal v19.4s, v28.4h, v4.4h\n"
+ "smlal2 v11.4s, v28.8h, v4.8h\n"
+ "ldr x21, [x12, #0x68]\n"
+ "ldr x20, [x12, #0x70]\n"
+ "smlal v18.4s, v28.4h, v2.4h\n"
+ "smlal2 v24.4s, v28.8h, v2.8h\n"
+ "ldr x19, [x12, #0x78]\n"
+ "ldr q21, [x13, #0x0]\n"
+ "smlal v9.4s, v28.4h, v1.4h\n"
+ "smlal2 v23.4s, v28.8h, v1.8h\n"
+ "ldr d28, [x26, x15]\n"
+ "usubl v28.8h, v28.8b, v22.8b\n"
"smlal v13.4s, v27.4h, v7.4h\n"
- "smlal2 v20.4s, v27.8h, v7.8h\n"
- "ldr x20, [x16, #0x78]\n"
- "tst x6, #0x7\n"
- "smlal v9.4s, v27.4h, v6.4h\n"
- "smlal2 v18.4s, v27.8h, v6.8h\n"
- "add x14, x14, #0x20\n"
+ "smlal2 v26.4s, v27.8h, v7.8h\n"
+ "ldr q25, [x11, #0x0]\n"
+ "ldr q10, [x13, #0x10]\n"
+ "smlal v19.4s, v27.4h, v6.4h\n"
+ "smlal2 v11.4s, v27.8h, v6.8h\n"
+ "ldr q16, [x11, #0x10]\n"
+ "tst x8, #0x7\n"
+ "smlal v18.4s, v31.4h, v6.4h\n"
+ "smlal2 v24.4s, v31.8h, v6.8h\n"
+ "ldr d31, [x25, x15]\n"
+ "usubl v31.8h, v31.8b, v22.8b\n"
+ "smlal v9.4s, v27.4h, v3.4h\n"
+ "smlal2 v23.4s, v27.8h, v3.8h\n"
"add x13, x13, #0x20\n"
- "smlal v16.4s, v31.4h, v6.4h\n"
- "smlal2 v26.4s, v31.8h, v6.8h\n"
- "ldr d31, [x27, x8]\n"
- "usubl v31.8h, v31.8b, v24.8b\n"
- "smlal v25.4s, v27.4h, v3.4h\n"
- "smlal2 v10.4s, v27.8h, v3.8h\n"
+ "add x11, x11, #0x20\n"
"smlal v13.4s, v28.4h, v1.4h\n"
- "smlal2 v20.4s, v28.8h, v1.8h\n"
- "smlal v9.4s, v28.4h, v0.4h\n"
- "smlal2 v18.4s, v28.8h, v0.8h\n"
- "ldr d30, [x26, x8]\n"
- "usubl v30.8h, v30.8b, v24.8b\n"
- "smlal v16.4s, v27.4h, v4.4h\n"
- "smlal v25.4s, v29.4h, v8.4h\n"
- "smlal2 v26.4s, v27.8h, v4.8h\n"
- "ldr d28, [x24, x8]\n"
- "smlal2 v10.4s, v29.8h, v8.8h\n"
- "ldr d29, [x25, x8]\n"
+ "smlal2 v26.4s, v28.8h, v1.8h\n"
+ "smlal v19.4s, v28.4h, v0.4h\n"
+ "smlal2 v11.4s, v28.8h, v0.8h\n"
+ "ldr d28, [x23, x15]\n"
+ "usubl v28.8h, v28.8b, v22.8b\n"
+ "smlal v18.4s, v27.4h, v4.4h\n"
+ "smlal v9.4s, v29.4h, v8.4h\n"
+ "smlal2 v24.4s, v27.8h, v4.8h\n"
+ "smlal2 v23.4s, v29.8h, v8.8h\n"
+ "ldr d29, [x24, x15]\n"
+ "usubl v29.8h, v29.8b, v22.8b\n"
"smlal v13.4s, v31.4h, v2.4h\n"
- "smlal2 v20.4s, v31.8h, v2.8h\n"
- "usubl v29.8h, v29.8b, v24.8b\n"
- "smlal v9.4s, v31.4h, v1.4h\n"
- "smlal2 v18.4s, v31.8h, v1.8h\n"
- "ldr d31, [x23, x8]\n"
- "usubl v28.8h, v28.8b, v24.8b\n"
- "smlal v16.4s, v30.4h, v5.4h\n"
- "smlal v25.4s, v30.4h, v4.4h\n"
- "usubl v31.8h, v31.8b, v24.8b\n"
+ "smlal2 v26.4s, v31.8h, v2.8h\n"
+ "smlal v19.4s, v31.4h, v1.4h\n"
+ "smlal2 v11.4s, v31.8h, v1.8h\n"
+ "ldr d31, [x22, x15]\n"
+ "usubl v31.8h, v31.8b, v22.8b\n"
+ "smlal v18.4s, v30.4h, v5.4h\n"
+ "smlal v9.4s, v30.4h, v4.4h\n"
"smlal v13.4s, v30.4h, v8.4h\n"
- "smlal2 v20.4s, v30.8h, v8.8h\n"
- "smlal v9.4s, v30.4h, v7.4h\n"
- "smlal2 v18.4s, v30.8h, v7.8h\n"
- "smlal2 v26.4s, v30.8h, v5.8h\n"
- "smlal2 v10.4s, v30.8h, v4.8h\n"
- "ldr d30, [x22, x8]\n"
- "usubl v30.8h, v30.8b, v24.8b\n"
- "smlal v16.4s, v29.4h, v0.4h\n"
- "smlal v25.4s, v28.4h, v2.4h\n"
+ "smlal2 v26.4s, v30.8h, v8.8h\n"
+ "smlal v19.4s, v30.4h, v7.4h\n"
+ "smlal2 v11.4s, v30.8h, v7.8h\n"
+ "smlal2 v24.4s, v30.8h, v5.8h\n"
+ "smlal2 v23.4s, v30.8h, v4.8h\n"
+ "ldr d30, [x21, x15]\n"
+ "usubl v30.8h, v30.8b, v22.8b\n"
+ "smlal v18.4s, v29.4h, v0.4h\n"
+ "smlal v9.4s, v28.4h, v2.4h\n"
"smlal v13.4s, v29.4h, v3.4h\n"
- "smlal2 v20.4s, v29.8h, v3.8h\n"
- "smlal2 v26.4s, v29.8h, v0.8h\n"
- "ldr d29, [x21, x8]\n"
- "smlal2 v10.4s, v28.8h, v2.8h\n"
- "usubl v29.8h, v29.8b, v24.8b\n"
- "smlal v16.4s, v31.4h, v3.4h\n"
- "smlal v25.4s, v30.4h, v5.4h\n"
- "smlal v9.4s, v28.4h, v5.4h\n"
- "smlal2 v18.4s, v28.8h, v5.8h\n"
- "ldr d28, [x20, x8]\n"
- "usubl v28.8h, v28.8b, v24.8b\n"
+ "smlal2 v26.4s, v29.8h, v3.8h\n"
+ "smlal2 v24.4s, v29.8h, v0.8h\n"
+ "ldr d29, [x20, x15]\n"
+ "smlal2 v23.4s, v28.8h, v2.8h\n"
+ "usubl v29.8h, v29.8b, v22.8b\n"
+ "smlal v18.4s, v31.4h, v3.4h\n"
+ "smlal v9.4s, v30.4h, v5.4h\n"
+ "smlal v19.4s, v28.4h, v5.4h\n"
+ "smlal2 v11.4s, v28.8h, v5.8h\n"
+ "ldr d28, [x19, x15]\n"
+ "usubl v28.8h, v28.8b, v22.8b\n"
+ "smlal2 v24.4s, v31.8h, v3.8h\n"
+ "smlal2 v23.4s, v30.8h, v5.8h\n"
+ "add x15, x15, #0x8\n"
+ "smlal v18.4s, v29.4h, v7.4h\n"
+ "smlal v9.4s, v29.4h, v6.4h\n"
+ "smlal2 v24.4s, v29.8h, v7.8h\n"
+ "smlal2 v23.4s, v29.8h, v6.8h\n"
"smlal v13.4s, v31.4h, v6.4h\n"
- "smlal2 v26.4s, v31.8h, v3.8h\n"
- "sqrdmulh v13.4s, v13.4s, v17.4s\n"
- "add x8, x8, #0x8\n"
- "smlal2 v10.4s, v30.8h, v5.8h\n"
- "smlal v16.4s, v29.4h, v7.4h\n"
- "and v21.16b, v13.16b, v22.16b\n"
- "smlal v25.4s, v29.4h, v6.4h\n"
- "smlal2 v20.4s, v31.8h, v6.8h\n"
- "sqrdmulh v20.4s, v20.4s, v23.4s\n"
- "smlal2 v26.4s, v29.8h, v7.8h\n"
- "smlal2 v10.4s, v29.8h, v6.8h\n"
- "sshr v21.4s, v21.4s, #0x1f\n"
- "smlal v9.4s, v30.4h, v8.4h\n"
- "smlal v16.4s, v28.4h, v8.4h\n"
- "and v29.16b, v20.16b, v19.16b\n"
- "smlal v25.4s, v28.4h, v7.4h\n"
- "smlal2 v18.4s, v30.8h, v8.8h\n"
- "sqrdmulh v9.4s, v9.4s, v17.4s\n"
- "smlal2 v26.4s, v28.8h, v8.8h\n"
- "smlal2 v10.4s, v28.8h, v7.8h\n"
- "sqrdmulh v16.4s, v16.4s, v17.4s\n"
- "sqrdmulh v25.4s, v25.4s, v17.4s\n"
- "sqadd v13.4s, v13.4s, v21.4s\n"
- "sshr v29.4s, v29.4s, #0x1f\n"
- "and v0.16b, v9.16b, v22.16b\n"
- "sqrdmulh v18.4s, v18.4s, v23.4s\n"
- "and v27.16b, v16.16b, v22.16b\n"
- "sqrdmulh v26.4s, v26.4s, v23.4s\n"
- "and v21.16b, v25.16b, v22.16b\n"
- "sqrdmulh v10.4s, v10.4s, v23.4s\n"
- "sqadd v20.4s, v20.4s, v29.4s\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "and v17.16b, v18.16b, v19.16b\n"
- "sshr v27.4s, v27.4s, #0x1f\n"
- "and v7.16b, v26.16b, v19.16b\n"
- "sshr v21.4s, v21.4s, #0x1f\n"
- "and v29.16b, v10.16b, v19.16b\n"
- "sqadd v9.4s, v9.4s, v0.4s\n"
- "sshr v17.4s, v17.4s, #0x1f\n"
- "sqadd v16.4s, v16.4s, v27.4s\n"
+ "smlal v19.4s, v30.4h, v8.4h\n"
+ "sqrdmulh v13.4s, v13.4s, v21.4s\n"
+ "smlal v18.4s, v28.4h, v8.4h\n"
+ "smlal v9.4s, v28.4h, v7.4h\n"
+ "sqrdmulh v19.4s, v19.4s, v21.4s\n"
+ "smlal2 v26.4s, v31.8h, v6.8h\n"
+ "smlal2 v11.4s, v30.8h, v8.8h\n"
+ "sqrdmulh v18.4s, v18.4s, v21.4s\n"
+ "smlal2 v24.4s, v28.8h, v8.8h\n"
+ "smlal2 v23.4s, v28.8h, v7.8h\n"
+ "sqrdmulh v9.4s, v9.4s, v21.4s\n"
+ "and v7.16b, v13.16b, v25.16b\n"
+ "sqrdmulh v26.4s, v26.4s, v10.4s\n"
+ "and v4.16b, v19.16b, v25.16b\n"
+ "sqrdmulh v11.4s, v11.4s, v10.4s\n"
+ "and v21.16b, v18.16b, v25.16b\n"
+ "sqrdmulh v24.4s, v24.4s, v10.4s\n"
+ "and v20.16b, v9.16b, v25.16b\n"
+ "sqrdmulh v23.4s, v23.4s, v10.4s\n"
"sshr v7.4s, v7.4s, #0x1f\n"
- "sqadd v25.4s, v25.4s, v21.4s\n"
+ "and v29.16b, v26.16b, v16.16b\n"
+ "sshr v4.4s, v4.4s, #0x1f\n"
+ "and v10.16b, v11.16b, v16.16b\n"
+ "sshr v21.4s, v21.4s, #0x1f\n"
+ "and v31.16b, v24.16b, v16.16b\n"
+ "sshr v20.4s, v20.4s, #0x1f\n"
+ "and v30.16b, v23.16b, v16.16b\n"
+ "sqadd v13.4s, v13.4s, v7.4s\n"
"sshr v29.4s, v29.4s, #0x1f\n"
- "srshl v13.4s, v13.4s, v22.4s\n"
- "srshl v9.4s, v9.4s, v22.4s\n"
- "sqadd v18.4s, v18.4s, v17.4s\n"
- "srshl v16.4s, v16.4s, v22.4s\n"
- "sqadd v26.4s, v26.4s, v7.4s\n"
- "srshl v25.4s, v25.4s, v22.4s\n"
- "sqadd v10.4s, v10.4s, v29.4s\n"
- "srshl v20.4s, v20.4s, v19.4s\n"
+ "sqadd v19.4s, v19.4s, v4.4s\n"
+ "sshr v10.4s, v10.4s, #0x1f\n"
+ "sqadd v18.4s, v18.4s, v21.4s\n"
+ "sshr v31.4s, v31.4s, #0x1f\n"
+ "sqadd v9.4s, v9.4s, v20.4s\n"
+ "sshr v30.4s, v30.4s, #0x1f\n"
+ "srshl v13.4s, v13.4s, v25.4s\n"
+ "sqadd v26.4s, v26.4s, v29.4s\n"
+ "srshl v19.4s, v19.4s, v25.4s\n"
+ "sqadd v11.4s, v11.4s, v10.4s\n"
+ "srshl v18.4s, v18.4s, v25.4s\n"
+ "sqadd v24.4s, v24.4s, v31.4s\n"
+ "srshl v9.4s, v9.4s, v25.4s\n"
+ "sqadd v23.4s, v23.4s, v30.4s\n"
+ "srshl v26.4s, v26.4s, v16.4s\n"
"sqxtn v13.4h, v13.4s\n"
- "srshl v18.4s, v18.4s, v19.4s\n"
+ "srshl v11.4s, v11.4s, v16.4s\n"
+ "sqxtn v19.4h, v19.4s\n"
+ "srshl v24.4s, v24.4s, v16.4s\n"
+ "sqxtn v18.4h, v18.4s\n"
+ "srshl v23.4s, v23.4s, v16.4s\n"
"sqxtn v9.4h, v9.4s\n"
- "srshl v26.4s, v26.4s, v19.4s\n"
- "sqxtn v16.4h, v16.4s\n"
- "srshl v10.4s, v10.4s, v19.4s\n"
- "sqxtn v25.4h, v25.4s\n"
- "sqxtn2 v13.8h, v20.4s\n"
- "sqxtn2 v9.8h, v18.4s\n"
- "sqxtn2 v16.8h, v26.4s\n"
- "sqxtn2 v25.8h, v10.4s\n"
+ "sqxtn2 v13.8h, v26.4s\n"
+ "sqxtn2 v19.8h, v11.4s\n"
+ "sqxtn2 v18.8h, v24.4s\n"
+ "sqxtn2 v9.8h, v23.4s\n"
"sqadd v13.8h, v13.8h, v14.8h\n"
+ "sqadd v19.8h, v19.8h, v14.8h\n"
+ "sqadd v18.8h, v18.8h, v14.8h\n"
"sqadd v9.8h, v9.8h, v14.8h\n"
- "sqadd v16.8h, v16.8h, v14.8h\n"
- "sqadd v25.8h, v25.8h, v14.8h\n"
- "smax v13.8h, v13.8h, v12.8h\n"
- "smax v9.8h, v9.8h, v12.8h\n"
- "smax v16.8h, v16.8h, v12.8h\n"
- "smax v25.8h, v25.8h, v12.8h\n"
- "smin v13.8h, v13.8h, v11.8h\n"
- "smin v9.8h, v9.8h, v11.8h\n"
- "smin v16.8h, v16.8h, v11.8h\n"
- "smin v25.8h, v25.8h, v11.8h\n"
+ "smax v13.8h, v13.8h, v17.8h\n"
+ "smax v19.8h, v19.8h, v17.8h\n"
+ "smax v18.8h, v18.8h, v17.8h\n"
+ "smax v9.8h, v9.8h, v17.8h\n"
+ "smin v13.8h, v13.8h, v15.8h\n"
+ "smin v19.8h, v19.8h, v15.8h\n"
+ "smin v18.8h, v18.8h, v15.8h\n"
+ "smin v9.8h, v9.8h, v15.8h\n"
"uzp1 v13.16b, v13.16b, v13.16b\n"
- "str d13, [x12, x17]\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ "str d13, [x10, x14]\n"
+ "uzp1 v18.16b, v18.16b, v18.16b\n"
"uzp1 v9.16b, v9.16b, v9.16b\n"
- "uzp1 v16.16b, v16.16b, v16.16b\n"
- "str d9, [x11, x17]\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
- "str d16, [x10, x17]\n"
- "str d25, [x9, x17]\n"
- "add x17, x17, #0x8\n"
+ "str d19, [x9, x14]\n"
+ "str d18, [x28, x14]\n"
+ "str d9, [x27, x14]\n"
+ "add x14, x14, #0x8\n"
"beq 64f\n"
- "add x15, x15, #0x48\n"
+ "add x17, x17, #0x48\n"
"3:" // Oddments
- "ldr x28, [%x[params], %[offsetof_Params_bias]]\n"
- "tbz x6, #2, 5f\n"
- "ld1 { v13.4s }, [x28], #0x10\n"
- "tbz x6, #1, 4f\n"
- "ld1 { v20.d }[0], [x28], #0x8\n"
- "tbz x6, #0, 7f\n"
- "ld1 { v20.s }[2], [x28]\n"
+ "ldr x19, [%x[params], %[offsetof_Params_bias]]\n"
+ "tbz x8, #2, 5f\n"
+ "ld1 { v13.4s }, [x19], #0x10\n"
+ "tbz x8, #1, 4f\n"
+ "ld1 { v26.d }[0], [x19], #0x8\n"
+ "tbz x8, #0, 7f\n"
+ "ld1 { v26.s }[2], [x19]\n"
"b 7f\n"
"4:" // Oddments: Load bias: Bit 2: Bit 1: Unset
- "tbz x6, #0, 7f\n"
- "ld1 { v20.s }[0], [x28]\n"
+ "tbz x8, #0, 7f\n"
+ "ld1 { v26.s }[0], [x19]\n"
"b 7f\n"
"5:" // Oddments: Load bias: Bit 2: Unset
- "tbz x6, #1, 6f\n"
- "ld1 { v13.d }[0], [x28], #0x8\n"
- "tbz x6, #0, 7f\n"
- "ld1 { v13.s }[2], [x28]\n"
+ "tbz x8, #1, 6f\n"
+ "ld1 { v13.d }[0], [x19], #0x8\n"
+ "tbz x8, #0, 7f\n"
+ "ld1 { v13.s }[2], [x19]\n"
"b 7f\n"
"6:" // Oddments: Load bias: Bit 2: Unset: Bit 1: Unset
- "tbz x6, #0, 7f\n"
- "ld1 { v13.s }[0], [x28]\n"
+ "tbz x8, #0, 7f\n"
+ "ld1 { v13.s }[0], [x19]\n"
"7:" // Oddments: Load bias: Bit 2: End
- "ldr d0, [x15, #0x0]\n"
- "ldr d1, [x15, #0x8]\n"
+ "ldr d0, [x17, #0x0]\n"
+ "ldr d1, [x17, #0x8]\n"
+ "mov v19.16b, v13.16b\n"
+ "mov v11.16b, v26.16b\n"
+ "ldr d2, [x17, #0x10]\n"
+ "ldr d3, [x17, #0x18]\n"
+ "mov v18.16b, v13.16b\n"
+ "mov v24.16b, v26.16b\n"
+ "ldr d4, [x17, #0x20]\n"
+ "ldr d5, [x17, #0x28]\n"
"mov v9.16b, v13.16b\n"
- "mov v18.16b, v20.16b\n"
- "ldr d2, [x15, #0x10]\n"
- "ldr d3, [x15, #0x18]\n"
- "mov v16.16b, v13.16b\n"
- "mov v26.16b, v20.16b\n"
- "ldr d4, [x15, #0x20]\n"
- "ldr d5, [x15, #0x28]\n"
- "mov v25.16b, v13.16b\n"
- "mov v10.16b, v20.16b\n"
- "ldr d6, [x15, #0x30]\n"
- "ldr d7, [x15, #0x38]\n"
- "ssubl v0.8h, v0.8b, v15.8b\n"
- "ssubl v1.8h, v1.8b, v15.8b\n"
- "ldr d8, [x15, #0x40]\n"
- "ldp x24, x23, [x16, #0x0]\n"
- "ssubl v2.8h, v2.8b, v15.8b\n"
- "ssubl v3.8h, v3.8b, v15.8b\n"
- "ldp x22, x21, [x16, #0x10]\n"
- "ldr x20, [x16, #0x20]\n"
- "ssubl v4.8h, v4.8b, v15.8b\n"
- "ssubl v5.8h, v5.8b, v15.8b\n"
- "ssubl v6.8h, v6.8b, v15.8b\n"
- "ssubl v7.8h, v7.8b, v15.8b\n"
- "ssubl v8.8h, v8.8b, v15.8b\n"
- "add x24, x24, x8\n"
- "add x23, x23, x8\n"
- "add x22, x22, x8\n"
- "add x21, x21, x8\n"
- "add x20, x20, x8\n"
- "tbz x6, #2, 9f\n"
- "ld1 { v31.s }[0], [x24], #0x4\n"
- "ld1 { v30.s }[0], [x23], #0x4\n"
- "ld1 { v29.s }[0], [x22], #0x4\n"
- "ld1 { v28.s }[0], [x21], #0x4\n"
- "ld1 { v27.s }[0], [x20], #0x4\n"
- "tbz x6, #1, 8f\n"
- "ld1 { v31.h }[2], [x24], #0x2\n"
- "ld1 { v30.h }[2], [x23], #0x2\n"
- "ld1 { v29.h }[2], [x22], #0x2\n"
- "ld1 { v28.h }[2], [x21], #0x2\n"
- "ld1 { v27.h }[2], [x20], #0x2\n"
- "tbz x6, #0, 11f\n"
- "ld1 { v31.b }[6], [x24]\n"
- "ld1 { v30.b }[6], [x23]\n"
- "ld1 { v29.b }[6], [x22]\n"
- "ld1 { v28.b }[6], [x21]\n"
- "ld1 { v27.b }[6], [x20]\n"
+ "mov v23.16b, v26.16b\n"
+ "ldr d6, [x17, #0x30]\n"
+ "ldr d7, [x17, #0x38]\n"
+ "ssubl v0.8h, v0.8b, v12.8b\n"
+ "ssubl v1.8h, v1.8b, v12.8b\n"
+ "ldr d8, [x17, #0x40]\n"
+ "ldp x23, x22, [x12, #0x0]\n"
+ "ssubl v2.8h, v2.8b, v12.8b\n"
+ "ssubl v3.8h, v3.8b, v12.8b\n"
+ "ldp x21, x20, [x12, #0x10]\n"
+ "ldr x19, [x12, #0x20]\n"
+ "ssubl v4.8h, v4.8b, v12.8b\n"
+ "ssubl v5.8h, v5.8b, v12.8b\n"
+ "ssubl v6.8h, v6.8b, v12.8b\n"
+ "ssubl v7.8h, v7.8b, v12.8b\n"
+ "ssubl v8.8h, v8.8b, v12.8b\n"
+ "add x23, x23, x15\n"
+ "add x22, x22, x15\n"
+ "add x21, x21, x15\n"
+ "add x20, x20, x15\n"
+ "add x19, x19, x15\n"
+ "tbz x8, #2, 9f\n"
+ "ld1 { v31.s }[0], [x23], #0x4\n"
+ "ld1 { v30.s }[0], [x22], #0x4\n"
+ "ld1 { v29.s }[0], [x21], #0x4\n"
+ "ld1 { v28.s }[0], [x20], #0x4\n"
+ "ld1 { v27.s }[0], [x19], #0x4\n"
+ "tbz x8, #1, 8f\n"
+ "ld1 { v31.h }[2], [x23], #0x2\n"
+ "ld1 { v30.h }[2], [x22], #0x2\n"
+ "ld1 { v29.h }[2], [x21], #0x2\n"
+ "ld1 { v28.h }[2], [x20], #0x2\n"
+ "ld1 { v27.h }[2], [x19], #0x2\n"
+ "tbz x8, #0, 11f\n"
+ "ld1 { v31.b }[6], [x23]\n"
+ "ld1 { v30.b }[6], [x22]\n"
+ "ld1 { v29.b }[6], [x21]\n"
+ "ld1 { v28.b }[6], [x20]\n"
+ "ld1 { v27.b }[6], [x19]\n"
"b 11f\n"
"8:" // Oddments: Initial loads: Bit 2: Bit 1: Unset
- "tbz x6, #0, 11f\n"
- "ld1 { v31.b }[4], [x24]\n"
- "ld1 { v30.b }[4], [x23]\n"
- "ld1 { v29.b }[4], [x22]\n"
- "ld1 { v28.b }[4], [x21]\n"
- "ld1 { v27.b }[4], [x20]\n"
+ "tbz x8, #0, 11f\n"
+ "ld1 { v31.b }[4], [x23]\n"
+ "ld1 { v30.b }[4], [x22]\n"
+ "ld1 { v29.b }[4], [x21]\n"
+ "ld1 { v28.b }[4], [x20]\n"
+ "ld1 { v27.b }[4], [x19]\n"
"b 11f\n"
"9:" // Oddments: Initial loads: Bit 2: Unset
- "tbz x6, #1, 10f\n"
- "ld1 { v31.h }[0], [x24], #0x2\n"
- "ld1 { v30.h }[0], [x23], #0x2\n"
- "ld1 { v29.h }[0], [x22], #0x2\n"
- "ld1 { v28.h }[0], [x21], #0x2\n"
- "ld1 { v27.h }[0], [x20], #0x2\n"
- "tbz x6, #0, 11f\n"
- "ld1 { v31.b }[2], [x24]\n"
- "ld1 { v30.b }[2], [x23]\n"
- "ld1 { v29.b }[2], [x22]\n"
- "ld1 { v28.b }[2], [x21]\n"
- "ld1 { v27.b }[2], [x20]\n"
+ "tbz x8, #1, 10f\n"
+ "ld1 { v31.h }[0], [x23], #0x2\n"
+ "ld1 { v30.h }[0], [x22], #0x2\n"
+ "ld1 { v29.h }[0], [x21], #0x2\n"
+ "ld1 { v28.h }[0], [x20], #0x2\n"
+ "ld1 { v27.h }[0], [x19], #0x2\n"
+ "tbz x8, #0, 11f\n"
+ "ld1 { v31.b }[2], [x23]\n"
+ "ld1 { v30.b }[2], [x22]\n"
+ "ld1 { v29.b }[2], [x21]\n"
+ "ld1 { v28.b }[2], [x20]\n"
+ "ld1 { v27.b }[2], [x19]\n"
"b 11f\n"
"10:" // Oddments: Initial loads: Bit 2: Unset: Bit 1: Unset
- "tbz x6, #0, 11f\n"
- "ld1 { v31.b }[0], [x24]\n"
- "ld1 { v30.b }[0], [x23]\n"
- "ld1 { v29.b }[0], [x22]\n"
- "ld1 { v28.b }[0], [x21]\n"
- "ld1 { v27.b }[0], [x20]\n"
+ "tbz x8, #0, 11f\n"
+ "ld1 { v31.b }[0], [x23]\n"
+ "ld1 { v30.b }[0], [x22]\n"
+ "ld1 { v29.b }[0], [x21]\n"
+ "ld1 { v28.b }[0], [x20]\n"
+ "ld1 { v27.b }[0], [x19]\n"
"11:" // Oddments: Initial loads: Bit 2: End
- "usubl v31.8h, v31.8b, v24.8b\n"
+ "usubl v31.8h, v31.8b, v22.8b\n"
"smlal v13.4s, v31.4h, v4.4h\n"
- "smlal2 v20.4s, v31.8h, v4.8h\n"
- "ldr x21, [x16, #0x28]\n"
- "smlal v9.4s, v31.4h, v3.4h\n"
- "smlal2 v18.4s, v31.8h, v3.8h\n"
- "usubl v30.8h, v30.8b, v24.8b\n"
- "add x21, x21, x8\n"
- "usubl v29.8h, v29.8b, v24.8b\n"
- "smlal v16.4s, v31.4h, v1.4h\n"
- "smlal2 v26.4s, v31.8h, v1.8h\n"
- "smlal v25.4s, v31.4h, v0.4h\n"
- "smlal2 v10.4s, v31.8h, v0.8h\n"
- "usubl v28.8h, v28.8b, v24.8b\n"
+ "smlal2 v26.4s, v31.8h, v4.8h\n"
+ "ldr x21, [x12, #0x28]\n"
+ "smlal v19.4s, v31.4h, v3.4h\n"
+ "smlal2 v11.4s, v31.8h, v3.8h\n"
+ "usubl v30.8h, v30.8b, v22.8b\n"
+ "add x21, x21, x15\n"
+ "usubl v29.8h, v29.8b, v22.8b\n"
+ "smlal v18.4s, v31.4h, v1.4h\n"
+ "smlal2 v24.4s, v31.8h, v1.8h\n"
+ "smlal v9.4s, v31.4h, v0.4h\n"
+ "smlal2 v23.4s, v31.8h, v0.8h\n"
+ "usubl v28.8h, v28.8b, v22.8b\n"
"smlal v13.4s, v30.4h, v0.4h\n"
- "smlal2 v20.4s, v30.8h, v0.8h\n"
- "usubl v27.8h, v27.8b, v24.8b\n"
- "smlal v9.4s, v29.4h, v2.4h\n"
- "smlal2 v18.4s, v29.8h, v2.8h\n"
+ "smlal2 v26.4s, v30.8h, v0.8h\n"
+ "usubl v27.8h, v27.8b, v22.8b\n"
+ "smlal v19.4s, v29.4h, v2.4h\n"
+ "smlal2 v11.4s, v29.8h, v2.8h\n"
"smlal v13.4s, v28.4h, v5.4h\n"
- "smlal2 v20.4s, v28.8h, v5.8h\n"
- "smlal v9.4s, v28.4h, v4.4h\n"
- "smlal2 v18.4s, v28.8h, v4.8h\n"
- "smlal v16.4s, v28.4h, v2.4h\n"
- "smlal2 v26.4s, v28.8h, v2.8h\n"
- "smlal v25.4s, v28.4h, v1.4h\n"
- "smlal2 v10.4s, v28.8h, v1.8h\n"
- "tbz x6, #2, 13f\n"
+ "smlal2 v26.4s, v28.8h, v5.8h\n"
+ "smlal v19.4s, v28.4h, v4.4h\n"
+ "smlal2 v11.4s, v28.8h, v4.8h\n"
+ "smlal v18.4s, v28.4h, v2.4h\n"
+ "smlal2 v24.4s, v28.8h, v2.8h\n"
+ "smlal v9.4s, v28.4h, v1.4h\n"
+ "smlal2 v23.4s, v28.8h, v1.8h\n"
+ "tbz x8, #2, 13f\n"
"ld1 { v31.s }[0], [x21], #0x4\n"
- "tbz x6, #1, 12f\n"
+ "tbz x8, #1, 12f\n"
"ld1 { v31.h }[2], [x21], #0x2\n"
- "tbz x6, #0, 15f\n"
+ "tbz x8, #0, 15f\n"
"ld1 { v31.b }[6], [x21]\n"
"b 15f\n"
"12:" // Oddments: Load (3, 0): Bit 2: Bit 1: Unset
- "tbz x6, #0, 15f\n"
+ "tbz x8, #0, 15f\n"
"ld1 { v31.b }[4], [x21]\n"
"b 15f\n"
"13:" // Oddments: Load (3, 0): Bit 2: Unset
- "tbz x6, #1, 14f\n"
+ "tbz x8, #1, 14f\n"
"ld1 { v31.h }[0], [x21], #0x2\n"
- "tbz x6, #0, 15f\n"
+ "tbz x8, #0, 15f\n"
"ld1 { v31.b }[2], [x21]\n"
"b 15f\n"
"14:" // Oddments: Load (3, 0): Bit 2: Unset: Bit 1: Unset
- "tbz x6, #0, 15f\n"
+ "tbz x8, #0, 15f\n"
"ld1 { v31.b }[0], [x21]\n"
"15:" // Oddments: Load (3, 0): Bit 2: End
- "usubl v31.8h, v31.8b, v24.8b\n"
- "smlal v16.4s, v31.4h, v6.4h\n"
- "smlal2 v26.4s, v31.8h, v6.8h\n"
- "ldr x20, [x16, #0x30]\n"
+ "usubl v31.8h, v31.8b, v22.8b\n"
+ "smlal v18.4s, v31.4h, v6.4h\n"
+ "smlal2 v24.4s, v31.8h, v6.8h\n"
+ "ldr x20, [x12, #0x30]\n"
"smlal v13.4s, v27.4h, v7.4h\n"
- "smlal2 v20.4s, v27.8h, v7.8h\n"
- "add x20, x20, x8\n"
- "smlal v9.4s, v27.4h, v6.4h\n"
- "smlal2 v18.4s, v27.8h, v6.8h\n"
- "smlal v16.4s, v27.4h, v4.4h\n"
- "smlal2 v26.4s, v27.8h, v4.8h\n"
- "smlal v25.4s, v27.4h, v3.4h\n"
- "smlal2 v10.4s, v27.8h, v3.8h\n"
- "tbz x6, #2, 17f\n"
+ "smlal2 v26.4s, v27.8h, v7.8h\n"
+ "add x20, x20, x15\n"
+ "smlal v19.4s, v27.4h, v6.4h\n"
+ "smlal2 v11.4s, v27.8h, v6.8h\n"
+ "smlal v18.4s, v27.4h, v4.4h\n"
+ "smlal2 v24.4s, v27.8h, v4.8h\n"
+ "smlal v9.4s, v27.4h, v3.4h\n"
+ "smlal2 v23.4s, v27.8h, v3.8h\n"
+ "tbz x8, #2, 17f\n"
"ld1 { v29.s }[0], [x20], #0x4\n"
- "tbz x6, #1, 16f\n"
+ "tbz x8, #1, 16f\n"
"ld1 { v29.h }[2], [x20], #0x2\n"
- "tbz x6, #0, 19f\n"
+ "tbz x8, #0, 19f\n"
"ld1 { v29.b }[6], [x20]\n"
"b 19f\n"
"16:" // Oddments: Load (3, 3): Bit 2: Bit 1: Unset
- "tbz x6, #0, 19f\n"
+ "tbz x8, #0, 19f\n"
"ld1 { v29.b }[4], [x20]\n"
"b 19f\n"
"17:" // Oddments: Load (3, 3): Bit 2: Unset
- "tbz x6, #1, 18f\n"
+ "tbz x8, #1, 18f\n"
"ld1 { v29.h }[0], [x20], #0x2\n"
- "tbz x6, #0, 19f\n"
+ "tbz x8, #0, 19f\n"
"ld1 { v29.b }[2], [x20]\n"
"b 19f\n"
"18:" // Oddments: Load (3, 3): Bit 2: Unset: Bit 1: Unset
- "tbz x6, #0, 19f\n"
+ "tbz x8, #0, 19f\n"
"ld1 { v29.b }[0], [x20]\n"
"19:" // Oddments: Load (3, 3): Bit 2: End
- "usubl v29.8h, v29.8b, v24.8b\n"
- "ldr x28, [x16, #0x38]\n"
- "smlal v25.4s, v29.4h, v8.4h\n"
- "smlal2 v10.4s, v29.8h, v8.8h\n"
- "add x28, x28, x8\n"
- "tbz x6, #2, 21f\n"
- "ld1 { v28.s }[0], [x28], #0x4\n"
- "tbz x6, #1, 20f\n"
- "ld1 { v28.h }[2], [x28], #0x2\n"
- "tbz x6, #0, 23f\n"
- "ld1 { v28.b }[6], [x28]\n"
+ "usubl v29.8h, v29.8b, v22.8b\n"
+ "ldr x26, [x12, #0x38]\n"
+ "smlal v9.4s, v29.4h, v8.4h\n"
+ "smlal2 v23.4s, v29.8h, v8.8h\n"
+ "add x26, x26, x15\n"
+ "tbz x8, #2, 21f\n"
+ "ld1 { v28.s }[0], [x26], #0x4\n"
+ "tbz x8, #1, 20f\n"
+ "ld1 { v28.h }[2], [x26], #0x2\n"
+ "tbz x8, #0, 23f\n"
+ "ld1 { v28.b }[6], [x26]\n"
"b 23f\n"
"20:" // Oddments: Load (0, 1): Bit 2: Bit 1: Unset
- "tbz x6, #0, 23f\n"
- "ld1 { v28.b }[4], [x28]\n"
+ "tbz x8, #0, 23f\n"
+ "ld1 { v28.b }[4], [x26]\n"
"b 23f\n"
"21:" // Oddments: Load (0, 1): Bit 2: Unset
- "tbz x6, #1, 22f\n"
- "ld1 { v28.h }[0], [x28], #0x2\n"
- "tbz x6, #0, 23f\n"
- "ld1 { v28.b }[2], [x28]\n"
+ "tbz x8, #1, 22f\n"
+ "ld1 { v28.h }[0], [x26], #0x2\n"
+ "tbz x8, #0, 23f\n"
+ "ld1 { v28.b }[2], [x26]\n"
"b 23f\n"
"22:" // Oddments: Load (0, 1): Bit 2: Unset: Bit 1: Unset
- "tbz x6, #0, 23f\n"
- "ld1 { v28.b }[0], [x28]\n"
+ "tbz x8, #0, 23f\n"
+ "ld1 { v28.b }[0], [x26]\n"
"23:" // Oddments: Load (0, 1): Bit 2: End
- "usubl v28.8h, v28.8b, v24.8b\n"
- "ldr x27, [x16, #0x40]\n"
+ "usubl v28.8h, v28.8b, v22.8b\n"
+ "ldr x25, [x12, #0x40]\n"
"smlal v13.4s, v28.4h, v1.4h\n"
- "smlal2 v20.4s, v28.8h, v1.8h\n"
- "smlal v9.4s, v28.4h, v0.4h\n"
- "smlal2 v18.4s, v28.8h, v0.8h\n"
- "add x27, x27, x8\n"
- "tbz x6, #2, 25f\n"
- "ld1 { v31.s }[0], [x27], #0x4\n"
- "tbz x6, #1, 24f\n"
- "ld1 { v31.h }[2], [x27], #0x2\n"
- "tbz x6, #0, 27f\n"
- "ld1 { v31.b }[6], [x27]\n"
+ "smlal2 v26.4s, v28.8h, v1.8h\n"
+ "smlal v19.4s, v28.4h, v0.4h\n"
+ "smlal2 v11.4s, v28.8h, v0.8h\n"
+ "add x25, x25, x15\n"
+ "tbz x8, #2, 25f\n"
+ "ld1 { v31.s }[0], [x25], #0x4\n"
+ "tbz x8, #1, 24f\n"
+ "ld1 { v31.h }[2], [x25], #0x2\n"
+ "tbz x8, #0, 27f\n"
+ "ld1 { v31.b }[6], [x25]\n"
"b 27f\n"
"24:" // Oddments: Load (0, 2): Bit 2: Bit 1: Unset
- "tbz x6, #0, 27f\n"
- "ld1 { v31.b }[4], [x27]\n"
+ "tbz x8, #0, 27f\n"
+ "ld1 { v31.b }[4], [x25]\n"
"b 27f\n"
"25:" // Oddments: Load (0, 2): Bit 2: Unset
- "tbz x6, #1, 26f\n"
- "ld1 { v31.h }[0], [x27], #0x2\n"
- "tbz x6, #0, 27f\n"
- "ld1 { v31.b }[2], [x27]\n"
+ "tbz x8, #1, 26f\n"
+ "ld1 { v31.h }[0], [x25], #0x2\n"
+ "tbz x8, #0, 27f\n"
+ "ld1 { v31.b }[2], [x25]\n"
"b 27f\n"
"26:" // Oddments: Load (0, 2): Bit 2: Unset: Bit 1: Unset
- "tbz x6, #0, 27f\n"
- "ld1 { v31.b }[0], [x27]\n"
+ "tbz x8, #0, 27f\n"
+ "ld1 { v31.b }[0], [x25]\n"
"27:" // Oddments: Load (0, 2): Bit 2: End
- "usubl v31.8h, v31.8b, v24.8b\n"
- "ldr x26, [x16, #0x48]\n"
+ "usubl v31.8h, v31.8b, v22.8b\n"
+ "ldr x19, [x12, #0x48]\n"
"smlal v13.4s, v31.4h, v2.4h\n"
- "smlal2 v20.4s, v31.8h, v2.8h\n"
- "smlal v9.4s, v31.4h, v1.4h\n"
- "smlal2 v18.4s, v31.8h, v1.8h\n"
- "add x26, x26, x8\n"
- "tbz x6, #2, 29f\n"
- "ld1 { v30.s }[0], [x26], #0x4\n"
- "tbz x6, #1, 28f\n"
- "ld1 { v30.h }[2], [x26], #0x2\n"
- "tbz x6, #0, 31f\n"
- "ld1 { v30.b }[6], [x26]\n"
+ "smlal2 v26.4s, v31.8h, v2.8h\n"
+ "smlal v19.4s, v31.4h, v1.4h\n"
+ "smlal2 v11.4s, v31.8h, v1.8h\n"
+ "add x19, x19, x15\n"
+ "tbz x8, #2, 29f\n"
+ "ld1 { v30.s }[0], [x19], #0x4\n"
+ "tbz x8, #1, 28f\n"
+ "ld1 { v30.h }[2], [x19], #0x2\n"
+ "tbz x8, #0, 31f\n"
+ "ld1 { v30.b }[6], [x19]\n"
"b 31f\n"
"28:" // Oddments: Load (2, 2): Bit 2: Bit 1: Unset
- "tbz x6, #0, 31f\n"
- "ld1 { v30.b }[4], [x26]\n"
+ "tbz x8, #0, 31f\n"
+ "ld1 { v30.b }[4], [x19]\n"
"b 31f\n"
"29:" // Oddments: Load (2, 2): Bit 2: Unset
- "tbz x6, #1, 30f\n"
- "ld1 { v30.h }[0], [x26], #0x2\n"
- "tbz x6, #0, 31f\n"
- "ld1 { v30.b }[2], [x26]\n"
+ "tbz x8, #1, 30f\n"
+ "ld1 { v30.h }[0], [x19], #0x2\n"
+ "tbz x8, #0, 31f\n"
+ "ld1 { v30.b }[2], [x19]\n"
"b 31f\n"
"30:" // Oddments: Load (2, 2): Bit 2: Unset: Bit 1: Unset
- "tbz x6, #0, 31f\n"
- "ld1 { v30.b }[0], [x26]\n"
+ "tbz x8, #0, 31f\n"
+ "ld1 { v30.b }[0], [x19]\n"
"31:" // Oddments: Load (2, 2): Bit 2: End
- "usubl v30.8h, v30.8b, v24.8b\n"
- "ldr x25, [x16, #0x50]\n"
+ "usubl v30.8h, v30.8b, v22.8b\n"
+ "ldr x24, [x12, #0x50]\n"
"smlal v13.4s, v30.4h, v8.4h\n"
- "smlal2 v20.4s, v30.8h, v8.8h\n"
- "smlal v9.4s, v30.4h, v7.4h\n"
- "smlal2 v18.4s, v30.8h, v7.8h\n"
- "add x25, x25, x8\n"
- "smlal v16.4s, v30.4h, v5.4h\n"
- "smlal2 v26.4s, v30.8h, v5.8h\n"
- "smlal v25.4s, v30.4h, v4.4h\n"
- "smlal2 v10.4s, v30.8h, v4.8h\n"
- "tbz x6, #2, 33f\n"
- "ld1 { v29.s }[0], [x25], #0x4\n"
- "tbz x6, #1, 32f\n"
- "ld1 { v29.h }[2], [x25], #0x2\n"
- "tbz x6, #0, 35f\n"
- "ld1 { v29.b }[6], [x25]\n"
+ "smlal2 v26.4s, v30.8h, v8.8h\n"
+ "smlal v19.4s, v30.4h, v7.4h\n"
+ "smlal2 v11.4s, v30.8h, v7.8h\n"
+ "add x24, x24, x15\n"
+ "smlal v18.4s, v30.4h, v5.4h\n"
+ "smlal2 v24.4s, v30.8h, v5.8h\n"
+ "smlal v9.4s, v30.4h, v4.4h\n"
+ "smlal2 v23.4s, v30.8h, v4.8h\n"
+ "tbz x8, #2, 33f\n"
+ "ld1 { v29.s }[0], [x24], #0x4\n"
+ "tbz x8, #1, 32f\n"
+ "ld1 { v29.h }[2], [x24], #0x2\n"
+ "tbz x8, #0, 35f\n"
+ "ld1 { v29.b }[6], [x24]\n"
"b 35f\n"
"32:" // Oddments: Load (1, 0): Bit 2: Bit 1: Unset
- "tbz x6, #0, 35f\n"
- "ld1 { v29.b }[4], [x25]\n"
+ "tbz x8, #0, 35f\n"
+ "ld1 { v29.b }[4], [x24]\n"
"b 35f\n"
"33:" // Oddments: Load (1, 0): Bit 2: Unset
- "tbz x6, #1, 34f\n"
- "ld1 { v29.h }[0], [x25], #0x2\n"
- "tbz x6, #0, 35f\n"
- "ld1 { v29.b }[2], [x25]\n"
+ "tbz x8, #1, 34f\n"
+ "ld1 { v29.h }[0], [x24], #0x2\n"
+ "tbz x8, #0, 35f\n"
+ "ld1 { v29.b }[2], [x24]\n"
"b 35f\n"
"34:" // Oddments: Load (1, 0): Bit 2: Unset: Bit 1: Unset
- "tbz x6, #0, 35f\n"
- "ld1 { v29.b }[0], [x25]\n"
+ "tbz x8, #0, 35f\n"
+ "ld1 { v29.b }[0], [x24]\n"
"35:" // Oddments: Load (1, 0): Bit 2: End
- "usubl v29.8h, v29.8b, v24.8b\n"
- "ldr x24, [x16, #0x58]\n"
+ "usubl v29.8h, v29.8b, v22.8b\n"
+ "ldr x23, [x12, #0x58]\n"
"smlal v13.4s, v29.4h, v3.4h\n"
- "smlal2 v20.4s, v29.8h, v3.8h\n"
- "smlal v16.4s, v29.4h, v0.4h\n"
- "smlal2 v26.4s, v29.8h, v0.8h\n"
- "add x24, x24, x8\n"
- "tbz x6, #2, 37f\n"
- "ld1 { v28.s }[0], [x24], #0x4\n"
- "tbz x6, #1, 36f\n"
- "ld1 { v28.h }[2], [x24], #0x2\n"
- "tbz x6, #0, 39f\n"
- "ld1 { v28.b }[6], [x24]\n"
+ "smlal2 v26.4s, v29.8h, v3.8h\n"
+ "smlal v18.4s, v29.4h, v0.4h\n"
+ "smlal2 v24.4s, v29.8h, v0.8h\n"
+ "add x23, x23, x15\n"
+ "tbz x8, #2, 37f\n"
+ "ld1 { v28.s }[0], [x23], #0x4\n"
+ "tbz x8, #1, 36f\n"
+ "ld1 { v28.h }[2], [x23], #0x2\n"
+ "tbz x8, #0, 39f\n"
+ "ld1 { v28.b }[6], [x23]\n"
"b 39f\n"
"36:" // Oddments: Load (1, 3): Bit 2: Bit 1: Unset
- "tbz x6, #0, 39f\n"
- "ld1 { v28.b }[4], [x24]\n"
+ "tbz x8, #0, 39f\n"
+ "ld1 { v28.b }[4], [x23]\n"
"b 39f\n"
"37:" // Oddments: Load (1, 3): Bit 2: Unset
- "tbz x6, #1, 38f\n"
- "ld1 { v28.h }[0], [x24], #0x2\n"
- "tbz x6, #0, 39f\n"
- "ld1 { v28.b }[2], [x24]\n"
+ "tbz x8, #1, 38f\n"
+ "ld1 { v28.h }[0], [x23], #0x2\n"
+ "tbz x8, #0, 39f\n"
+ "ld1 { v28.b }[2], [x23]\n"
"b 39f\n"
"38:" // Oddments: Load (1, 3): Bit 2: Unset: Bit 1: Unset
- "tbz x6, #0, 39f\n"
- "ld1 { v28.b }[0], [x24]\n"
+ "tbz x8, #0, 39f\n"
+ "ld1 { v28.b }[0], [x23]\n"
"39:" // Oddments: Load (1, 3): Bit 2: End
- "usubl v28.8h, v28.8b, v24.8b\n"
- "ldr x23, [x16, #0x60]\n"
- "smlal v9.4s, v28.4h, v5.4h\n"
- "smlal2 v18.4s, v28.8h, v5.8h\n"
- "smlal v25.4s, v28.4h, v2.4h\n"
- "smlal2 v10.4s, v28.8h, v2.8h\n"
- "add x23, x23, x8\n"
- "tbz x6, #2, 41f\n"
- "ld1 { v31.s }[0], [x23], #0x4\n"
- "tbz x6, #1, 40f\n"
- "ld1 { v31.h }[2], [x23], #0x2\n"
- "tbz x6, #0, 43f\n"
- "ld1 { v31.b }[6], [x23]\n"
+ "usubl v28.8h, v28.8b, v22.8b\n"
+ "ldr x22, [x12, #0x60]\n"
+ "smlal v19.4s, v28.4h, v5.4h\n"
+ "smlal2 v11.4s, v28.8h, v5.8h\n"
+ "smlal v9.4s, v28.4h, v2.4h\n"
+ "smlal2 v23.4s, v28.8h, v2.8h\n"
+ "add x22, x22, x15\n"
+ "tbz x8, #2, 41f\n"
+ "ld1 { v31.s }[0], [x22], #0x4\n"
+ "tbz x8, #1, 40f\n"
+ "ld1 { v31.h }[2], [x22], #0x2\n"
+ "tbz x8, #0, 43f\n"
+ "ld1 { v31.b }[6], [x22]\n"
"b 43f\n"
"40:" // Oddments: Load (2, 0): Bit 2: Bit 1: Unset
- "tbz x6, #0, 43f\n"
- "ld1 { v31.b }[4], [x23]\n"
+ "tbz x8, #0, 43f\n"
+ "ld1 { v31.b }[4], [x22]\n"
"b 43f\n"
"41:" // Oddments: Load (2, 0): Bit 2: Unset
- "tbz x6, #1, 42f\n"
- "ld1 { v31.h }[0], [x23], #0x2\n"
- "tbz x6, #0, 43f\n"
- "ld1 { v31.b }[2], [x23]\n"
+ "tbz x8, #1, 42f\n"
+ "ld1 { v31.h }[0], [x22], #0x2\n"
+ "tbz x8, #0, 43f\n"
+ "ld1 { v31.b }[2], [x22]\n"
"b 43f\n"
"42:" // Oddments: Load (2, 0): Bit 2: Unset: Bit 1: Unset
- "tbz x6, #0, 43f\n"
- "ld1 { v31.b }[0], [x23]\n"
+ "tbz x8, #0, 43f\n"
+ "ld1 { v31.b }[0], [x22]\n"
"43:" // Oddments: Load (2, 0): Bit 2: End
- "usubl v31.8h, v31.8b, v24.8b\n"
- "ldr x22, [x16, #0x68]\n"
+ "usubl v31.8h, v31.8b, v22.8b\n"
+ "ldr x21, [x12, #0x68]\n"
"smlal v13.4s, v31.4h, v6.4h\n"
- "smlal2 v20.4s, v31.8h, v6.8h\n"
- "smlal v16.4s, v31.4h, v3.4h\n"
- "smlal2 v26.4s, v31.8h, v3.8h\n"
- "add x22, x22, x8\n"
- "tbz x6, #2, 45f\n"
- "ld1 { v30.s }[0], [x22], #0x4\n"
- "tbz x6, #1, 44f\n"
- "ld1 { v30.h }[2], [x22], #0x2\n"
- "tbz x6, #0, 47f\n"
- "ld1 { v30.b }[6], [x22]\n"
+ "smlal2 v26.4s, v31.8h, v6.8h\n"
+ "smlal v18.4s, v31.4h, v3.4h\n"
+ "smlal2 v24.4s, v31.8h, v3.8h\n"
+ "add x21, x21, x15\n"
+ "tbz x8, #2, 45f\n"
+ "ld1 { v30.s }[0], [x21], #0x4\n"
+ "tbz x8, #1, 44f\n"
+ "ld1 { v30.h }[2], [x21], #0x2\n"
+ "tbz x8, #0, 47f\n"
+ "ld1 { v30.b }[6], [x21]\n"
"b 47f\n"
"44:" // Oddments: Load (2, 3): Bit 2: Bit 1: Unset
- "tbz x6, #0, 47f\n"
- "ld1 { v30.b }[4], [x22]\n"
+ "tbz x8, #0, 47f\n"
+ "ld1 { v30.b }[4], [x21]\n"
"b 47f\n"
"45:" // Oddments: Load (2, 3): Bit 2: Unset
- "tbz x6, #1, 46f\n"
- "ld1 { v30.h }[0], [x22], #0x2\n"
- "tbz x6, #0, 47f\n"
- "ld1 { v30.b }[2], [x22]\n"
+ "tbz x8, #1, 46f\n"
+ "ld1 { v30.h }[0], [x21], #0x2\n"
+ "tbz x8, #0, 47f\n"
+ "ld1 { v30.b }[2], [x21]\n"
"b 47f\n"
"46:" // Oddments: Load (2, 3): Bit 2: Unset: Bit 1: Unset
- "tbz x6, #0, 47f\n"
- "ld1 { v30.b }[0], [x22]\n"
+ "tbz x8, #0, 47f\n"
+ "ld1 { v30.b }[0], [x21]\n"
"47:" // Oddments: Load (2, 3): Bit 2: End
- "usubl v30.8h, v30.8b, v24.8b\n"
- "ldr x21, [x16, #0x70]\n"
- "smlal v9.4s, v30.4h, v8.4h\n"
- "smlal2 v18.4s, v30.8h, v8.8h\n"
- "smlal v25.4s, v30.4h, v5.4h\n"
- "smlal2 v10.4s, v30.8h, v5.8h\n"
- "add x21, x21, x8\n"
- "tbz x6, #2, 49f\n"
- "ld1 { v29.s }[0], [x21], #0x4\n"
- "tbz x6, #1, 48f\n"
- "ld1 { v29.h }[2], [x21], #0x2\n"
- "tbz x6, #0, 51f\n"
- "ld1 { v29.b }[6], [x21]\n"
+ "usubl v30.8h, v30.8b, v22.8b\n"
+ "ldr x20, [x12, #0x70]\n"
+ "smlal v19.4s, v30.4h, v8.4h\n"
+ "smlal2 v11.4s, v30.8h, v8.8h\n"
+ "smlal v9.4s, v30.4h, v5.4h\n"
+ "smlal2 v23.4s, v30.8h, v5.8h\n"
+ "add x20, x20, x15\n"
+ "tbz x8, #2, 49f\n"
+ "ld1 { v29.s }[0], [x20], #0x4\n"
+ "tbz x8, #1, 48f\n"
+ "ld1 { v29.h }[2], [x20], #0x2\n"
+ "tbz x8, #0, 51f\n"
+ "ld1 { v29.b }[6], [x20]\n"
"b 51f\n"
"48:" // Oddments: Load (3, 1): Bit 2: Bit 1: Unset
- "tbz x6, #0, 51f\n"
- "ld1 { v29.b }[4], [x21]\n"
+ "tbz x8, #0, 51f\n"
+ "ld1 { v29.b }[4], [x20]\n"
"b 51f\n"
"49:" // Oddments: Load (3, 1): Bit 2: Unset
- "tbz x6, #1, 50f\n"
- "ld1 { v29.h }[0], [x21], #0x2\n"
- "tbz x6, #0, 51f\n"
- "ld1 { v29.b }[2], [x21]\n"
+ "tbz x8, #1, 50f\n"
+ "ld1 { v29.h }[0], [x20], #0x2\n"
+ "tbz x8, #0, 51f\n"
+ "ld1 { v29.b }[2], [x20]\n"
"b 51f\n"
"50:" // Oddments: Load (3, 1): Bit 2: Unset: Bit 1: Unset
- "tbz x6, #0, 51f\n"
- "ld1 { v29.b }[0], [x21]\n"
+ "tbz x8, #0, 51f\n"
+ "ld1 { v29.b }[0], [x20]\n"
"51:" // Oddments: Load (3, 1): Bit 2: End
- "usubl v29.8h, v29.8b, v24.8b\n"
- "ldr x20, [x16, #0x78]\n"
- "smlal v16.4s, v29.4h, v7.4h\n"
- "smlal2 v26.4s, v29.8h, v7.8h\n"
- "smlal v25.4s, v29.4h, v6.4h\n"
- "smlal2 v10.4s, v29.8h, v6.8h\n"
- "add x20, x20, x8\n"
- "tbz x6, #2, 53f\n"
- "ld1 { v28.s }[0], [x20], #0x4\n"
- "tbz x6, #1, 52f\n"
- "ld1 { v28.h }[2], [x20], #0x2\n"
- "tbz x6, #0, 55f\n"
- "ld1 { v28.b }[6], [x20]\n"
+ "usubl v29.8h, v29.8b, v22.8b\n"
+ "ldr x19, [x12, #0x78]\n"
+ "smlal v18.4s, v29.4h, v7.4h\n"
+ "smlal2 v24.4s, v29.8h, v7.8h\n"
+ "smlal v9.4s, v29.4h, v6.4h\n"
+ "smlal2 v23.4s, v29.8h, v6.8h\n"
+ "add x19, x19, x15\n"
+ "tbz x8, #2, 53f\n"
+ "ld1 { v28.s }[0], [x19], #0x4\n"
+ "tbz x8, #1, 52f\n"
+ "ld1 { v28.h }[2], [x19], #0x2\n"
+ "tbz x8, #0, 55f\n"
+ "ld1 { v28.b }[6], [x19]\n"
"b 55f\n"
"52:" // Oddments: Load (3, 2): Bit 2: Bit 1: Unset
- "tbz x6, #0, 55f\n"
- "ld1 { v28.b }[4], [x20]\n"
+ "tbz x8, #0, 55f\n"
+ "ld1 { v28.b }[4], [x19]\n"
"b 55f\n"
"53:" // Oddments: Load (3, 2): Bit 2: Unset
- "tbz x6, #1, 54f\n"
- "ld1 { v28.h }[0], [x20], #0x2\n"
- "tbz x6, #0, 55f\n"
- "ld1 { v28.b }[2], [x20]\n"
+ "tbz x8, #1, 54f\n"
+ "ld1 { v28.h }[0], [x19], #0x2\n"
+ "tbz x8, #0, 55f\n"
+ "ld1 { v28.b }[2], [x19]\n"
"b 55f\n"
"54:" // Oddments: Load (3, 2): Bit 2: Unset: Bit 1: Unset
- "tbz x6, #0, 55f\n"
- "ld1 { v28.b }[0], [x20]\n"
+ "tbz x8, #0, 55f\n"
+ "ld1 { v28.b }[0], [x19]\n"
"55:" // Oddments: Load (3, 2): Bit 2: End
- "usubl v28.8h, v28.8b, v24.8b\n"
- "smlal v16.4s, v28.4h, v8.4h\n"
- "smlal2 v26.4s, v28.8h, v8.8h\n"
- "smlal v25.4s, v28.4h, v7.4h\n"
- "smlal2 v10.4s, v28.8h, v7.8h\n"
- "tbz x6, #2, 57f\n"
- "ld1 { v17.4s }, [x14], #0x10\n"
- "ld1 { v22.4s }, [x13], #0x10\n"
- "tbz x6, #1, 56f\n"
- "ld1 { v23.d }[0], [x14], #0x8\n"
- "ld1 { v19.d }[0], [x13], #0x8\n"
- "tbz x6, #0, 59f\n"
- "ld1 { v23.s }[2], [x14]\n"
- "ld1 { v19.s }[2], [x13]\n"
+ "usubl v28.8h, v28.8b, v22.8b\n"
+ "smlal v18.4s, v28.4h, v8.4h\n"
+ "smlal2 v24.4s, v28.8h, v8.8h\n"
+ "smlal v9.4s, v28.4h, v7.4h\n"
+ "smlal2 v23.4s, v28.8h, v7.8h\n"
+ "tbz x8, #2, 57f\n"
+ "ld1 { v21.4s }, [x13], #0x10\n"
+ "ld1 { v25.4s }, [x11], #0x10\n"
+ "tbz x8, #1, 56f\n"
+ "ld1 { v10.d }[0], [x13], #0x8\n"
+ "ld1 { v16.d }[0], [x11], #0x8\n"
+ "tbz x8, #0, 59f\n"
+ "ld1 { v10.s }[2], [x13]\n"
+ "ld1 { v16.s }[2], [x11]\n"
"b 59f\n"
"56:" // Oddments: Load requant params: Bit 2: Bit 1: Unset
- "tbz x6, #0, 59f\n"
- "ld1 { v23.s }[0], [x14]\n"
- "ld1 { v19.s }[0], [x13]\n"
+ "tbz x8, #0, 59f\n"
+ "ld1 { v10.s }[0], [x13]\n"
+ "ld1 { v16.s }[0], [x11]\n"
"b 59f\n"
"57:" // Oddments: Load requant params: Bit 2: Unset
- "tbz x6, #1, 58f\n"
- "ld1 { v17.d }[0], [x14], #0x8\n"
- "ld1 { v22.d }[0], [x13], #0x8\n"
- "tbz x6, #0, 59f\n"
- "ld1 { v17.s }[2], [x14]\n"
- "ld1 { v22.s }[2], [x13]\n"
+ "tbz x8, #1, 58f\n"
+ "ld1 { v21.d }[0], [x13], #0x8\n"
+ "ld1 { v25.d }[0], [x11], #0x8\n"
+ "tbz x8, #0, 59f\n"
+ "ld1 { v21.s }[2], [x13]\n"
+ "ld1 { v25.s }[2], [x11]\n"
"b 59f\n"
"58:" // Oddments: Load requant params: Bit 2: Unset: Bit 1: Unset
- "tbz x6, #0, 59f\n"
- "ld1 { v17.s }[0], [x14]\n"
- "ld1 { v22.s }[0], [x13]\n"
+ "tbz x8, #0, 59f\n"
+ "ld1 { v21.s }[0], [x13]\n"
+ "ld1 { v25.s }[0], [x11]\n"
"59:" // Oddments: Load requant params: Bit 2: End
- "sqrdmulh v13.4s, v13.4s, v17.4s\n"
- "and v21.16b, v13.16b, v22.16b\n"
- "add x12, x12, x17\n"
- "add x11, x11, x17\n"
- "sqrdmulh v20.4s, v20.4s, v23.4s\n"
- "sshr v21.4s, v21.4s, #0x1f\n"
- "add x10, x10, x17\n"
- "add x9, x9, x17\n"
- "and v29.16b, v20.16b, v19.16b\n"
- "sqrdmulh v9.4s, v9.4s, v17.4s\n"
- "sqrdmulh v16.4s, v16.4s, v17.4s\n"
- "sqrdmulh v25.4s, v25.4s, v17.4s\n"
- "sqadd v13.4s, v13.4s, v21.4s\n"
- "sshr v29.4s, v29.4s, #0x1f\n"
- "and v0.16b, v9.16b, v22.16b\n"
- "sqrdmulh v18.4s, v18.4s, v23.4s\n"
- "and v27.16b, v16.16b, v22.16b\n"
- "sqrdmulh v26.4s, v26.4s, v23.4s\n"
- "and v21.16b, v25.16b, v22.16b\n"
- "sqrdmulh v10.4s, v10.4s, v23.4s\n"
- "sqadd v20.4s, v20.4s, v29.4s\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "and v17.16b, v18.16b, v19.16b\n"
- "sshr v27.4s, v27.4s, #0x1f\n"
- "and v7.16b, v26.16b, v19.16b\n"
- "sshr v21.4s, v21.4s, #0x1f\n"
- "and v29.16b, v10.16b, v19.16b\n"
- "sqadd v9.4s, v9.4s, v0.4s\n"
- "sshr v17.4s, v17.4s, #0x1f\n"
- "sqadd v16.4s, v16.4s, v27.4s\n"
+ "sqrdmulh v13.4s, v13.4s, v21.4s\n"
+ "sqrdmulh v19.4s, v19.4s, v21.4s\n"
+ "add x10, x10, x14\n"
+ "add x9, x9, x14\n"
+ "sqrdmulh v18.4s, v18.4s, v21.4s\n"
+ "sqrdmulh v9.4s, v9.4s, v21.4s\n"
+ "add x28, x28, x14\n"
+ "add x27, x27, x14\n"
+ "and v7.16b, v13.16b, v25.16b\n"
+ "sqrdmulh v26.4s, v26.4s, v10.4s\n"
+ "and v4.16b, v19.16b, v25.16b\n"
+ "sqrdmulh v11.4s, v11.4s, v10.4s\n"
+ "and v21.16b, v18.16b, v25.16b\n"
+ "sqrdmulh v24.4s, v24.4s, v10.4s\n"
+ "and v20.16b, v9.16b, v25.16b\n"
+ "sqrdmulh v23.4s, v23.4s, v10.4s\n"
"sshr v7.4s, v7.4s, #0x1f\n"
- "sqadd v25.4s, v25.4s, v21.4s\n"
+ "and v29.16b, v26.16b, v16.16b\n"
+ "sshr v4.4s, v4.4s, #0x1f\n"
+ "and v10.16b, v11.16b, v16.16b\n"
+ "sshr v21.4s, v21.4s, #0x1f\n"
+ "and v31.16b, v24.16b, v16.16b\n"
+ "sshr v20.4s, v20.4s, #0x1f\n"
+ "and v30.16b, v23.16b, v16.16b\n"
+ "sqadd v13.4s, v13.4s, v7.4s\n"
"sshr v29.4s, v29.4s, #0x1f\n"
- "srshl v13.4s, v13.4s, v22.4s\n"
- "srshl v9.4s, v9.4s, v22.4s\n"
- "sqadd v18.4s, v18.4s, v17.4s\n"
- "srshl v16.4s, v16.4s, v22.4s\n"
- "sqadd v26.4s, v26.4s, v7.4s\n"
- "srshl v25.4s, v25.4s, v22.4s\n"
- "sqadd v10.4s, v10.4s, v29.4s\n"
- "srshl v20.4s, v20.4s, v19.4s\n"
+ "sqadd v19.4s, v19.4s, v4.4s\n"
+ "sshr v10.4s, v10.4s, #0x1f\n"
+ "sqadd v18.4s, v18.4s, v21.4s\n"
+ "sshr v31.4s, v31.4s, #0x1f\n"
+ "sqadd v9.4s, v9.4s, v20.4s\n"
+ "sshr v30.4s, v30.4s, #0x1f\n"
+ "srshl v13.4s, v13.4s, v25.4s\n"
+ "sqadd v26.4s, v26.4s, v29.4s\n"
+ "srshl v19.4s, v19.4s, v25.4s\n"
+ "sqadd v11.4s, v11.4s, v10.4s\n"
+ "srshl v18.4s, v18.4s, v25.4s\n"
+ "sqadd v24.4s, v24.4s, v31.4s\n"
+ "srshl v9.4s, v9.4s, v25.4s\n"
+ "sqadd v23.4s, v23.4s, v30.4s\n"
+ "srshl v26.4s, v26.4s, v16.4s\n"
"sqxtn v13.4h, v13.4s\n"
- "srshl v18.4s, v18.4s, v19.4s\n"
+ "srshl v11.4s, v11.4s, v16.4s\n"
+ "sqxtn v19.4h, v19.4s\n"
+ "srshl v24.4s, v24.4s, v16.4s\n"
+ "sqxtn v18.4h, v18.4s\n"
+ "srshl v23.4s, v23.4s, v16.4s\n"
"sqxtn v9.4h, v9.4s\n"
- "srshl v26.4s, v26.4s, v19.4s\n"
- "sqxtn v16.4h, v16.4s\n"
- "srshl v10.4s, v10.4s, v19.4s\n"
- "sqxtn v25.4h, v25.4s\n"
- "sqxtn2 v13.8h, v20.4s\n"
- "sqxtn2 v9.8h, v18.4s\n"
- "sqxtn2 v16.8h, v26.4s\n"
- "sqxtn2 v25.8h, v10.4s\n"
+ "sqxtn2 v13.8h, v26.4s\n"
+ "sqxtn2 v19.8h, v11.4s\n"
+ "sqxtn2 v18.8h, v24.4s\n"
+ "sqxtn2 v9.8h, v23.4s\n"
"sqadd v13.8h, v13.8h, v14.8h\n"
+ "sqadd v19.8h, v19.8h, v14.8h\n"
+ "sqadd v18.8h, v18.8h, v14.8h\n"
"sqadd v9.8h, v9.8h, v14.8h\n"
- "sqadd v16.8h, v16.8h, v14.8h\n"
- "sqadd v25.8h, v25.8h, v14.8h\n"
- "smax v13.8h, v13.8h, v12.8h\n"
- "smax v9.8h, v9.8h, v12.8h\n"
- "smax v16.8h, v16.8h, v12.8h\n"
- "smax v25.8h, v25.8h, v12.8h\n"
- "smin v13.8h, v13.8h, v11.8h\n"
- "smin v9.8h, v9.8h, v11.8h\n"
- "smin v16.8h, v16.8h, v11.8h\n"
- "smin v25.8h, v25.8h, v11.8h\n"
+ "smax v13.8h, v13.8h, v17.8h\n"
+ "smax v19.8h, v19.8h, v17.8h\n"
+ "smax v18.8h, v18.8h, v17.8h\n"
+ "smax v9.8h, v9.8h, v17.8h\n"
+ "smin v13.8h, v13.8h, v15.8h\n"
+ "smin v19.8h, v19.8h, v15.8h\n"
+ "smin v18.8h, v18.8h, v15.8h\n"
+ "smin v9.8h, v9.8h, v15.8h\n"
"uzp1 v13.16b, v13.16b, v13.16b\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ "uzp1 v18.16b, v18.16b, v18.16b\n"
"uzp1 v9.16b, v9.16b, v9.16b\n"
- "uzp1 v16.16b, v16.16b, v16.16b\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
- "tbz x6, #2, 61f\n"
- "st1 { v13.s }[0], [x12], #0x4\n"
- "st1 { v9.s }[0], [x11], #0x4\n"
- "st1 { v16.s }[0], [x10], #0x4\n"
- "st1 { v25.s }[0], [x9], #0x4\n"
- "tbz x6, #1, 60f\n"
- "st1 { v13.h }[2], [x12], #0x2\n"
- "st1 { v9.h }[2], [x11], #0x2\n"
- "st1 { v16.h }[2], [x10], #0x2\n"
- "st1 { v25.h }[2], [x9], #0x2\n"
- "tbz x6, #0, 63f\n"
- "st1 { v13.b }[6], [x12], #0x1\n"
- "st1 { v9.b }[6], [x11], #0x1\n"
- "st1 { v16.b }[6], [x10], #0x1\n"
- "st1 { v25.b }[6], [x9], #0x1\n"
+ "tbz x8, #2, 61f\n"
+ "st1 { v13.s }[0], [x10], #0x4\n"
+ "st1 { v19.s }[0], [x9], #0x4\n"
+ "st1 { v18.s }[0], [x28], #0x4\n"
+ "st1 { v9.s }[0], [x27], #0x4\n"
+ "tbz x8, #1, 60f\n"
+ "st1 { v13.h }[2], [x10], #0x2\n"
+ "st1 { v19.h }[2], [x9], #0x2\n"
+ "st1 { v18.h }[2], [x28], #0x2\n"
+ "st1 { v9.h }[2], [x27], #0x2\n"
+ "tbz x8, #0, 63f\n"
+ "st1 { v13.b }[6], [x10], #0x1\n"
+ "st1 { v19.b }[6], [x9], #0x1\n"
+ "st1 { v18.b }[6], [x28], #0x1\n"
+ "st1 { v9.b }[6], [x27], #0x1\n"
"b 63f\n"
"60:" // Oddments: Bit 2: Bit 1: Unset
- "tbz x6, #0, 63f\n"
- "st1 { v13.b }[4], [x12], #0x1\n"
- "st1 { v9.b }[4], [x11], #0x1\n"
- "st1 { v16.b }[4], [x10], #0x1\n"
- "st1 { v25.b }[4], [x9], #0x1\n"
+ "tbz x8, #0, 63f\n"
+ "st1 { v13.b }[4], [x10], #0x1\n"
+ "st1 { v19.b }[4], [x9], #0x1\n"
+ "st1 { v18.b }[4], [x28], #0x1\n"
+ "st1 { v9.b }[4], [x27], #0x1\n"
"b 63f\n"
"61:" // Oddments: Bit 2: Unset
- "tbz x6, #1, 62f\n"
- "st1 { v13.h }[0], [x12], #0x2\n"
- "st1 { v9.h }[0], [x11], #0x2\n"
- "st1 { v16.h }[0], [x10], #0x2\n"
- "st1 { v25.h }[0], [x9], #0x2\n"
- "tbz x6, #0, 63f\n"
- "st1 { v13.b }[2], [x12], #0x1\n"
- "st1 { v9.b }[2], [x11], #0x1\n"
- "st1 { v16.b }[2], [x10], #0x1\n"
- "st1 { v25.b }[2], [x9], #0x1\n"
+ "tbz x8, #1, 62f\n"
+ "st1 { v13.h }[0], [x10], #0x2\n"
+ "st1 { v19.h }[0], [x9], #0x2\n"
+ "st1 { v18.h }[0], [x28], #0x2\n"
+ "st1 { v9.h }[0], [x27], #0x2\n"
+ "tbz x8, #0, 63f\n"
+ "st1 { v13.b }[2], [x10], #0x1\n"
+ "st1 { v19.b }[2], [x9], #0x1\n"
+ "st1 { v18.b }[2], [x28], #0x1\n"
+ "st1 { v9.b }[2], [x27], #0x1\n"
"b 63f\n"
"62:" // Oddments: Bit 2: Unset: Bit 1: Unset
- "tbz x6, #0, 63f\n"
- "st1 { v13.b }[0], [x12], #0x1\n"
- "st1 { v9.b }[0], [x11], #0x1\n"
- "st1 { v16.b }[0], [x10], #0x1\n"
- "st1 { v25.b }[0], [x9], #0x1\n"
+ "tbz x8, #0, 63f\n"
+ "st1 { v13.b }[0], [x10], #0x1\n"
+ "st1 { v19.b }[0], [x9], #0x1\n"
+ "st1 { v18.b }[0], [x28], #0x1\n"
+ "st1 { v9.b }[0], [x27], #0x1\n"
"63:" // Oddments: Bit 2: End
"64:" // End
:
: [offsetof_Params_bias] "I" (offsetof(Params, bias)), [offsetof_Params_inptrs] "I" (offsetof(Params, inptrs)), [offsetof_Params_n_channels] "I" (offsetof(Params, n_channels)), [offsetof_Params_outptrs] "I" (offsetof(Params, outptrs)), [offsetof_Params_requant] "I" (offsetof(Params, requant)), [offsetof_Params_requant_muls] "I" (offsetof(Params, requant_muls)), [offsetof_Params_requant_shifts] "I" (offsetof(Params, requant_shifts)), [offsetof_Params_weights] "I" (offsetof(Params, weights)), [offsetof_Requantize32_a_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [params] "r" (&params)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8s8u8q_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8s8u8q_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp
index 5124b2c8f3..079b212e6c 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8s8u8q_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8s8u8q_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -100,75 +100,75 @@ void a64_u8s8u8q_nhwc_3x3_s2_output2x2_mla_depthfirst_impl(
requant_muls, requant_shifts, outptrs);
__asm__ __volatile__(
- "ldr x7, [%x[params], %[offsetof_Params_n_channels]]\n"
- "ldr x23, [%x[params], %[offsetof_Params_requant]]\n"
- "lsr x8, x7, #0x3\n"
- "add x20, x23, %[offsetof_Requantize32_a_offset]\n"
- "ld1r { v12.16b }, [x20]\n"
+ "ldr x19, [%x[params], %[offsetof_Params_requant]]\n"
+ "ldr x8, [%x[params], %[offsetof_Params_n_channels]]\n"
+ "add x24, x19, %[offsetof_Requantize32_a_offset]\n"
+ "add x23, x19, %[offsetof_Requantize32_b_offset]\n"
"ldr x22, [%x[params], %[offsetof_Params_outptrs]]\n"
- "add x21, x23, %[offsetof_Requantize32_b_offset]\n"
- "add x20, x23, %[offsetof_Requantize32_c_offset]\n"
- "ld1r { v13.16b }, [x21]\n"
- "ld1r { v11.8h }, [x20]\n"
- "add x21, x23, %[offsetof_Requantize32_minval]\n"
- "add x20, x23, %[offsetof_Requantize32_maxval]\n"
- "ld1r { v16.8h }, [x21]\n"
- "ld1r { v14.8h }, [x20]\n"
- "mov x17, #0x0\n"
- "mov x16, #0x0\n"
- "add x15, %x[params], %[offsetof_Params_inptrs]\n"
- "ldr x14, [%x[params], %[offsetof_Params_weights]]\n"
+ "add x21, x19, %[offsetof_Requantize32_c_offset]\n"
+ "add x20, x19, %[offsetof_Requantize32_minval]\n"
+ "ldr x17, [%x[params], %[offsetof_Params_weights]]\n"
+ "add x19, x19, %[offsetof_Requantize32_maxval]\n"
+ "ld1r { v12.16b }, [x24]\n"
+ "ld1r { v13.16b }, [x23]\n"
+ "lsr x16, x8, #0x3\n"
+ "ld1r { v11.8h }, [x21]\n"
+ "ld1r { v17.8h }, [x20]\n"
+ "mov x15, #0x0\n"
+ "mov x14, #0x0\n"
+ "ld1r { v14.8h }, [x19]\n"
"ldr x13, [%x[params], %[offsetof_Params_requant_muls]]\n"
- "ldr x12, [%x[params], %[offsetof_Params_requant_shifts]]\n"
- "ldp x11, x10, [x22, #0x0]\n"
- "ldp x9, x28, [x22, #0x10]\n"
- "cbz x8, 3f\n"
- "ldr d0, [x14, #0x0]\n"
- "ldr d1, [x14, #0x8]\n"
- "subs x8, x8, #0x1\n"
+ "add x12, %x[params], %[offsetof_Params_inptrs]\n"
+ "ldr x11, [%x[params], %[offsetof_Params_requant_shifts]]\n"
+ "ldp x10, x9, [x22, #0x0]\n"
+ "ldp x28, x27, [x22, #0x10]\n"
+ "cbz x16, 3f\n"
+ "ldr x19, [%x[params], %[offsetof_Params_bias]]\n"
+ "ldr q15, [x19, #0x0]\n"
+ "subs x16, x16, #0x1\n"
+ "mov v9.16b, v15.16b\n"
+ "ldr q10, [x19, #0x10]\n"
+ "add x19, x19, #0x20\n"
+ "str x19, [%x[params], %[offsetof_Params_bias]]\n"
+ "ldr d0, [x17, #0x0]\n"
+ "ldr d1, [x17, #0x8]\n"
+ "ldr d2, [x17, #0x10]\n"
+ "mov v16.16b, v10.16b\n"
+ "mov v22.16b, v15.16b\n"
+ "ldr d3, [x17, #0x18]\n"
+ "ldr d4, [x17, #0x20]\n"
+ "mov v21.16b, v10.16b\n"
+ "mov v23.16b, v15.16b\n"
+ "ldr d5, [x17, #0x28]\n"
+ "ldr d6, [x17, #0x30]\n"
+ "mov v18.16b, v10.16b\n"
"ssubl v0.8h, v0.8b, v13.8b\n"
- "ldr d2, [x14, #0x10]\n"
- "ldr d3, [x14, #0x18]\n"
+ "ldr d7, [x17, #0x38]\n"
+ "ldr d8, [x17, #0x40]\n"
"ssubl v1.8h, v1.8b, v13.8b\n"
"ssubl v2.8h, v2.8b, v13.8b\n"
- "ldr d4, [x14, #0x20]\n"
- "ldr d5, [x14, #0x28]\n"
+ "ldp x26, x25, [x12, #0x0]\n"
+ "ldp x24, x23, [x12, #0x10]\n"
"ssubl v3.8h, v3.8b, v13.8b\n"
"ssubl v4.8h, v4.8b, v13.8b\n"
- "ldr d6, [x14, #0x30]\n"
- "ldr d7, [x14, #0x38]\n"
+ "ldp x22, x21, [x12, #0x20]\n"
+ "ldp x20, x19, [x12, #0x30]\n"
"ssubl v5.8h, v5.8b, v13.8b\n"
"ssubl v6.8h, v6.8b, v13.8b\n"
- "ldr d8, [x14, #0x40]\n"
- "ldr x24, [%x[params], %[offsetof_Params_bias]]\n"
+ "ldr d31, [x26, x15]\n"
+ "ldr d30, [x25, x15]\n"
"ssubl v7.8h, v7.8b, v13.8b\n"
"ssubl v8.8h, v8.8b, v13.8b\n"
- "ldr q15, [x24, #0x0]\n"
- "ldr q17, [x24, #0x10]\n"
- "add x24, x24, #0x20\n"
- "str x24, [%x[params], %[offsetof_Params_bias]]\n"
- "ldp x27, x26, [x15, #0x0]\n"
- "ldp x25, x24, [x15, #0x10]\n"
- "mov v10.16b, v15.16b\n"
- "mov v20.16b, v17.16b\n"
- "ldp x23, x22, [x15, #0x20]\n"
- "ldp x21, x20, [x15, #0x30]\n"
- "mov v9.16b, v15.16b\n"
- "mov v23.16b, v17.16b\n"
- "ldr d31, [x27, x17]\n"
- "ldr d30, [x26, x17]\n"
- "mov v21.16b, v15.16b\n"
- "mov v22.16b, v17.16b\n"
- "ldr d29, [x25, x17]\n"
- "ldr d28, [x24, x17]\n"
+ "ldr d29, [x24, x15]\n"
+ "ldr d28, [x23, x15]\n"
"usubl v31.8h, v31.8b, v12.8b\n"
"usubl v30.8h, v30.8b, v12.8b\n"
- "ldr d27, [x23, x17]\n"
- "ldr d26, [x22, x17]\n"
+ "ldr d27, [x22, x15]\n"
+ "ldr d26, [x21, x15]\n"
"usubl v29.8h, v29.8b, v12.8b\n"
"usubl v28.8h, v28.8b, v12.8b\n"
- "ldr d25, [x21, x17]\n"
- "ldr d24, [x20, x17]\n"
+ "ldr d25, [x20, x15]\n"
+ "ldr d24, [x19, x15]\n"
"usubl v27.8h, v27.8b, v12.8b\n"
"usubl v26.8h, v26.8b, v12.8b\n"
"usubl v25.8h, v25.8b, v12.8b\n"
@@ -176,250 +176,250 @@ void a64_u8s8u8q_nhwc_3x3_s2_output2x2_mla_depthfirst_impl(
"beq 2f\n"
"1:" // Loop
"smlal v15.4s, v31.4h, v8.4h\n"
- "smlal2 v17.4s, v31.8h, v8.8h\n"
- "ldr x24, [x15, #0x40]\n"
- "ldr x22, [x15, #0x48]\n"
- "smlal v10.4s, v31.4h, v6.4h\n"
- "smlal2 v20.4s, v31.8h, v6.8h\n"
- "ldr x21, [x15, #0x50]\n"
- "ldr x20, [x15, #0x58]\n"
+ "smlal2 v10.4s, v31.8h, v8.8h\n"
+ "ldr x24, [x12, #0x40]\n"
+ "ldr x23, [x12, #0x48]\n"
+ "smlal v9.4s, v31.4h, v6.4h\n"
+ "smlal2 v16.4s, v31.8h, v6.8h\n"
+ "ldr x21, [x12, #0x50]\n"
+ "ldr x19, [x12, #0x58]\n"
"smlal v15.4s, v30.4h, v0.4h\n"
- "smlal2 v17.4s, v30.8h, v0.8h\n"
- "ldr q19, [x13, #0x0]\n"
- "ldr x23, [x15, #0x78]\n"
- "smlal v10.4s, v28.4h, v1.4h\n"
- "smlal2 v20.4s, v28.8h, v1.8h\n"
- "ldr d28, [x22, x17]\n"
+ "smlal2 v10.4s, v30.8h, v0.8h\n"
+ "ldr x22, [x12, #0x78]\n"
+ "ldr x20, [x12, #0x60]\n"
+ "smlal v9.4s, v28.4h, v1.4h\n"
+ "smlal2 v16.4s, v28.8h, v1.8h\n"
+ "ldr d28, [x23, x15]\n"
"usubl v28.8h, v28.8b, v12.8b\n"
"smlal v15.4s, v29.4h, v1.4h\n"
- "smlal2 v17.4s, v29.8h, v1.8h\n"
- "ldr d29, [x24, x17]\n"
+ "smlal2 v10.4s, v29.8h, v1.8h\n"
+ "ldr d29, [x24, x15]\n"
"usubl v29.8h, v29.8b, v12.8b\n"
- "smlal v10.4s, v27.4h, v2.4h\n"
- "smlal2 v20.4s, v27.8h, v2.8h\n"
- "ldr d27, [x21, x17]\n"
+ "smlal v9.4s, v27.4h, v2.4h\n"
+ "smlal2 v16.4s, v27.8h, v2.8h\n"
+ "ldr d27, [x21, x15]\n"
"usubl v27.8h, v27.8b, v12.8b\n"
"smlal v15.4s, v26.4h, v3.4h\n"
- "smlal2 v17.4s, v26.8h, v3.8h\n"
- "ldr d26, [x20, x17]\n"
- "ldr x20, [x15, #0x60]\n"
- "smlal v10.4s, v24.4h, v0.4h\n"
- "smlal2 v20.4s, v24.8h, v0.8h\n"
+ "smlal2 v10.4s, v26.8h, v3.8h\n"
+ "ldr d26, [x19, x15]\n"
"usubl v26.8h, v26.8b, v12.8b\n"
- "ldr x21, [x15, #0x80]\n"
+ "smlal v9.4s, v24.4h, v0.4h\n"
+ "smlal2 v16.4s, v24.8h, v0.8h\n"
+ "ldr x21, [x12, #0x80]\n"
+ "ldr x19, [x12, #0x68]\n"
"smlal v15.4s, v25.4h, v4.4h\n"
- "smlal2 v17.4s, v25.8h, v4.8h\n"
- "ldr d25, [x20, x17]\n"
- "ldr x20, [x15, #0x68]\n"
- "smlal v10.4s, v29.4h, v4.4h\n"
- "smlal2 v20.4s, v29.8h, v4.8h\n"
- "ldr d29, [x20, x17]\n"
+ "smlal2 v10.4s, v25.8h, v4.8h\n"
+ "ldr d25, [x20, x15]\n"
"usubl v25.8h, v25.8b, v12.8b\n"
+ "smlal v9.4s, v29.4h, v4.4h\n"
+ "smlal2 v16.4s, v29.8h, v4.8h\n"
+ "ldr x20, [x12, #0x88]\n"
+ "ldr d29, [x19, x15]\n"
"smlal v15.4s, v24.4h, v2.4h\n"
- "smlal2 v17.4s, v24.8h, v2.8h\n"
- "ldr q18, [x12, #0x0]\n"
- "ldr x22, [x15, #0x88]\n"
- "smlal v10.4s, v28.4h, v5.4h\n"
- "smlal2 v20.4s, v28.8h, v5.8h\n"
- "ldr d28, [x21, x17]\n"
- "ldr x21, [x15, #0x70]\n"
- "smlal v9.4s, v31.4h, v2.4h\n"
- "smlal2 v23.4s, v31.8h, v2.8h\n"
+ "smlal2 v10.4s, v24.8h, v2.8h\n"
+ "ldr x19, [x12, #0x70]\n"
+ "usubl v29.8h, v29.8b, v12.8b\n"
+ "smlal v9.4s, v28.4h, v5.4h\n"
+ "smlal2 v16.4s, v28.8h, v5.8h\n"
+ "ldr d28, [x21, x15]\n"
"usubl v28.8h, v28.8b, v12.8b\n"
- "ldr x25, [x15, #0x98]\n"
+ "smlal v22.4s, v31.4h, v2.4h\n"
+ "smlal2 v21.4s, v31.8h, v2.8h\n"
+ "ldr x24, [x12, #0x98]\n"
+ "ldr d24, [x19, x15]\n"
"smlal v15.4s, v27.4h, v5.4h\n"
- "smlal2 v17.4s, v27.8h, v5.8h\n"
- "usubl v29.8h, v29.8b, v12.8b\n"
- "ldr x24, [x15, #0x90]\n"
- "smlal v10.4s, v27.4h, v3.4h\n"
- "smlal2 v20.4s, v27.8h, v3.8h\n"
- "ldr d27, [x23, x17]\n"
+ "smlal2 v10.4s, v27.8h, v5.8h\n"
+ "usubl v24.8h, v24.8b, v12.8b\n"
+ "ldr x23, [x12, #0x90]\n"
+ "smlal v9.4s, v27.4h, v3.4h\n"
+ "smlal2 v16.4s, v27.8h, v3.8h\n"
+ "ldr d27, [x22, x15]\n"
"usubl v27.8h, v27.8b, v12.8b\n"
- "smlal v21.4s, v31.4h, v0.4h\n"
- "smlal v9.4s, v26.4h, v3.4h\n"
- "ldr x23, [x15, #0xa8]\n"
- "ldr x20, [x15, #0xa0]\n"
- "smlal2 v23.4s, v26.8h, v3.8h\n"
- "ldr d26, [x22, x17]\n"
- "smlal2 v22.4s, v31.8h, v0.8h\n"
- "ldr d24, [x21, x17]\n"
- "smlal v21.4s, v27.4h, v4.4h\n"
- "smlal v9.4s, v25.4h, v0.4h\n"
+ "smlal v23.4s, v31.4h, v0.4h\n"
+ "smlal v22.4s, v26.4h, v3.4h\n"
+ "ldr x22, [x12, #0xa8]\n"
+ "ldr x19, [x12, #0xa0]\n"
+ "smlal2 v21.4s, v26.8h, v3.8h\n"
+ "smlal2 v18.4s, v31.8h, v0.8h\n"
+ "ldr d26, [x20, x15]\n"
"usubl v26.8h, v26.8b, v12.8b\n"
- "ldr x22, [x15, #0xb0]\n"
- "smlal2 v23.4s, v25.8h, v0.8h\n"
- "ldr q30, [x13, #0x10]\n"
- "smlal2 v22.4s, v27.8h, v4.8h\n"
- "ldr d27, [x20, x17]\n"
- "smlal v21.4s, v28.4h, v1.4h\n"
+ "smlal v23.4s, v27.4h, v4.4h\n"
+ "smlal v22.4s, v25.4h, v0.4h\n"
+ "ldr x21, [x12, #0xb0]\n"
+ "ldr x20, [x12, #0xb8]\n"
+ "smlal2 v21.4s, v25.8h, v0.8h\n"
+ "smlal2 v18.4s, v27.8h, v4.8h\n"
+ "ldr d27, [x19, x15]\n"
+ "usubl v27.8h, v27.8b, v12.8b\n"
+ "smlal v23.4s, v28.4h, v1.4h\n"
"smlal v15.4s, v25.4h, v6.4h\n"
- "usubl v24.8h, v24.8b, v12.8b\n"
- "ldr x21, [x15, #0xb8]\n"
- "smlal2 v17.4s, v25.8h, v6.8h\n"
- "ldr d25, [x24, x17]\n"
- "smlal v9.4s, v29.4h, v4.4h\n"
+ "ldr x19, [x12, #0xc0]\n"
+ "ldr q19, [x13, #0x0]\n"
+ "smlal2 v10.4s, v25.8h, v6.8h\n"
+ "smlal v22.4s, v29.4h, v4.4h\n"
+ "ldr d25, [x23, x15]\n"
"usubl v25.8h, v25.8b, v12.8b\n"
- "smlal2 v23.4s, v29.8h, v4.8h\n"
- "ldr d29, [x25, x17]\n"
- "ldr q31, [x12, #0x10]\n"
- "smlal2 v22.4s, v28.8h, v1.8h\n"
- "smlal v21.4s, v26.4h, v5.4h\n"
+ "smlal2 v21.4s, v29.8h, v4.8h\n"
+ "ldr d29, [x24, x15]\n"
+ "smlal2 v18.4s, v28.8h, v1.8h\n"
"usubl v29.8h, v29.8b, v12.8b\n"
+ "smlal v23.4s, v26.4h, v5.4h\n"
"smlal v15.4s, v24.4h, v7.4h\n"
- "ldr x20, [x15, #0xc0]\n"
- "smlal2 v17.4s, v24.8h, v7.8h\n"
- "smlal v9.4s, v24.4h, v1.4h\n"
- "usubl v27.8h, v27.8b, v12.8b\n"
- "ldr x24, [%x[params], %[offsetof_Params_bias]]\n"
- "smlal2 v23.4s, v24.8h, v1.8h\n"
- "ldr d24, [x23, x17]\n"
- "smlal2 v22.4s, v26.8h, v5.8h\n"
- "ldr d26, [x22, x17]\n"
- "smlal v21.4s, v29.4h, v2.4h\n"
+ "ldr q0, [x11, #0x0]\n"
+ "ldr q4, [x13, #0x10]\n"
+ "smlal2 v10.4s, v24.8h, v7.8h\n"
+ "smlal v22.4s, v24.4h, v1.4h\n"
+ "sqrdmulh v15.4s, v15.4s, v19.4s\n"
+ "ldr q31, [x11, #0x10]\n"
+ "smlal2 v21.4s, v24.8h, v1.8h\n"
+ "ldr d24, [x22, x15]\n"
+ "smlal2 v18.4s, v26.8h, v5.8h\n"
"usubl v24.8h, v24.8b, v12.8b\n"
- "smlal2 v22.4s, v29.8h, v2.8h\n"
- "add x14, x14, #0x48\n"
- "smlal v9.4s, v25.4h, v6.4h\n"
- "smlal v21.4s, v24.4h, v3.4h\n"
+ "smlal v23.4s, v29.4h, v2.4h\n"
+ "ldr d26, [x21, x15]\n"
+ "smlal2 v18.4s, v29.8h, v2.8h\n"
"usubl v26.8h, v26.8b, v12.8b\n"
- "subs x8, x8, #0x1\n"
- "smlal v10.4s, v28.4h, v7.4h\n"
- "smlal2 v20.4s, v28.8h, v7.8h\n"
- "sqrdmulh v15.4s, v15.4s, v19.4s\n"
- "add x13, x13, #0x20\n"
- "smlal2 v23.4s, v25.8h, v6.8h\n"
- "ldr d25, [x21, x17]\n"
- "smlal2 v22.4s, v24.8h, v3.8h\n"
+ "smlal v22.4s, v25.4h, v6.4h\n"
+ "smlal v23.4s, v24.4h, v3.4h\n"
+ "and v30.16b, v15.16b, v0.16b\n"
+ "add x17, x17, #0x48\n"
+ "smlal v9.4s, v28.4h, v7.4h\n"
+ "smlal2 v16.4s, v28.8h, v7.8h\n"
+ "sqrdmulh v10.4s, v10.4s, v4.4s\n"
+ "subs x16, x16, #0x1\n"
+ "smlal2 v21.4s, v25.8h, v6.8h\n"
+ "ldr d25, [x20, x15]\n"
+ "smlal2 v18.4s, v24.8h, v3.8h\n"
"usubl v25.8h, v25.8b, v12.8b\n"
- "smlal v9.4s, v27.4h, v7.4h\n"
- "smlal v21.4s, v26.4h, v7.4h\n"
- "and v0.16b, v15.16b, v18.16b\n"
- "add x12, x12, #0x20\n"
- "smlal v10.4s, v29.4h, v8.4h\n"
- "smlal2 v20.4s, v29.8h, v8.8h\n"
- "ldr d29, [x20, x17]\n"
+ "smlal v22.4s, v27.4h, v7.4h\n"
+ "smlal v23.4s, v26.4h, v7.4h\n"
+ "sshr v30.4s, v30.4s, #0x1f\n"
+ "add x13, x13, #0x20\n"
+ "smlal v9.4s, v29.4h, v8.4h\n"
+ "smlal2 v16.4s, v29.8h, v8.8h\n"
+ "ldr d29, [x19, x15]\n"
"usubl v29.8h, v29.8b, v12.8b\n"
- "smlal2 v23.4s, v27.8h, v7.8h\n"
- "smlal2 v22.4s, v26.8h, v7.8h\n"
- "sqrdmulh v17.4s, v17.4s, v30.4s\n"
- "add x17, x17, #0x8\n"
- "smlal v9.4s, v24.4h, v5.4h\n"
- "smlal v21.4s, v25.4h, v6.4h\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "smlal2 v23.4s, v24.8h, v5.8h\n"
- "smlal2 v22.4s, v25.8h, v6.8h\n"
- "and v7.16b, v17.16b, v31.16b\n"
- "smlal v9.4s, v25.4h, v8.4h\n"
- "smlal v21.4s, v29.4h, v8.4h\n"
- "sqrdmulh v10.4s, v10.4s, v19.4s\n"
- "smlal2 v23.4s, v25.8h, v8.8h\n"
- "smlal2 v22.4s, v29.8h, v8.8h\n"
+ "smlal2 v21.4s, v27.8h, v7.8h\n"
+ "smlal2 v18.4s, v26.8h, v7.8h\n"
"sqrdmulh v9.4s, v9.4s, v19.4s\n"
- "sqrdmulh v21.4s, v21.4s, v19.4s\n"
- "sqadd v15.4s, v15.4s, v0.4s\n"
- "sshr v7.4s, v7.4s, #0x1f\n"
- "and v19.16b, v10.16b, v18.16b\n"
- "sqrdmulh v20.4s, v20.4s, v30.4s\n"
- "and v27.16b, v9.16b, v18.16b\n"
- "sqrdmulh v23.4s, v23.4s, v30.4s\n"
- "and v0.16b, v21.16b, v18.16b\n"
- "sqrdmulh v22.4s, v22.4s, v30.4s\n"
- "sqadd v17.4s, v17.4s, v7.4s\n"
+ "add x15, x15, #0x8\n"
+ "smlal v22.4s, v24.4h, v5.4h\n"
+ "smlal v23.4s, v25.4h, v6.4h\n"
+ "and v28.16b, v9.16b, v0.16b\n"
+ "add x11, x11, #0x20\n"
+ "smlal2 v21.4s, v24.8h, v5.8h\n"
+ "smlal2 v18.4s, v25.8h, v6.8h\n"
+ "sqrdmulh v16.4s, v16.4s, v4.4s\n"
+ "smlal v22.4s, v25.4h, v8.4h\n"
+ "smlal v23.4s, v29.4h, v8.4h\n"
+ "sqrdmulh v22.4s, v22.4s, v19.4s\n"
+ "smlal2 v21.4s, v25.8h, v8.8h\n"
+ "smlal2 v18.4s, v29.8h, v8.8h\n"
+ "sqrdmulh v23.4s, v23.4s, v19.4s\n"
+ "and v29.16b, v22.16b, v0.16b\n"
+ "sqrdmulh v21.4s, v21.4s, v4.4s\n"
+ "and v20.16b, v23.16b, v0.16b\n"
+ "sqrdmulh v18.4s, v18.4s, v4.4s\n"
+ "and v19.16b, v10.16b, v31.16b\n"
+ "sshr v28.4s, v28.4s, #0x1f\n"
+ "and v4.16b, v16.16b, v31.16b\n"
+ "sshr v29.4s, v29.4s, #0x1f\n"
+ "and v5.16b, v21.16b, v31.16b\n"
+ "sshr v20.4s, v20.4s, #0x1f\n"
+ "and v26.16b, v18.16b, v31.16b\n"
+ "sqadd v15.4s, v15.4s, v30.4s\n"
"sshr v19.4s, v19.4s, #0x1f\n"
- "and v5.16b, v20.16b, v31.16b\n"
- "sshr v27.4s, v27.4s, #0x1f\n"
- "and v4.16b, v23.16b, v31.16b\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "and v7.16b, v22.16b, v31.16b\n"
- "sqadd v10.4s, v10.4s, v19.4s\n"
- "sshr v5.4s, v5.4s, #0x1f\n"
- "sqadd v9.4s, v9.4s, v27.4s\n"
+ "sqadd v9.4s, v9.4s, v28.4s\n"
"sshr v4.4s, v4.4s, #0x1f\n"
- "sqadd v21.4s, v21.4s, v0.4s\n"
- "sshr v7.4s, v7.4s, #0x1f\n"
- "srshl v15.4s, v15.4s, v18.4s\n"
- "srshl v10.4s, v10.4s, v18.4s\n"
- "sqadd v20.4s, v20.4s, v5.4s\n"
- "srshl v9.4s, v9.4s, v18.4s\n"
- "sqadd v23.4s, v23.4s, v4.4s\n"
- "srshl v21.4s, v21.4s, v18.4s\n"
- "sqadd v22.4s, v22.4s, v7.4s\n"
- "srshl v17.4s, v17.4s, v31.4s\n"
+ "sqadd v22.4s, v22.4s, v29.4s\n"
+ "sshr v5.4s, v5.4s, #0x1f\n"
+ "sqadd v23.4s, v23.4s, v20.4s\n"
+ "sshr v26.4s, v26.4s, #0x1f\n"
+ "srshl v15.4s, v15.4s, v0.4s\n"
+ "sqadd v10.4s, v10.4s, v19.4s\n"
+ "srshl v9.4s, v9.4s, v0.4s\n"
+ "sqadd v16.4s, v16.4s, v4.4s\n"
+ "srshl v22.4s, v22.4s, v0.4s\n"
+ "sqadd v21.4s, v21.4s, v5.4s\n"
+ "srshl v23.4s, v23.4s, v0.4s\n"
+ "sqadd v18.4s, v18.4s, v26.4s\n"
+ "srshl v10.4s, v10.4s, v31.4s\n"
"sqxtn v15.4h, v15.4s\n"
- "srshl v20.4s, v20.4s, v31.4s\n"
- "sqxtn v10.4h, v10.4s\n"
- "srshl v23.4s, v23.4s, v31.4s\n"
+ "srshl v16.4s, v16.4s, v31.4s\n"
"sqxtn v9.4h, v9.4s\n"
- "srshl v22.4s, v22.4s, v31.4s\n"
- "sqxtn v21.4h, v21.4s\n"
- "sqxtn2 v15.8h, v17.4s\n"
- "sqxtn2 v10.8h, v20.4s\n"
- "sqxtn2 v9.8h, v23.4s\n"
- "sqxtn2 v21.8h, v22.4s\n"
+ "srshl v21.4s, v21.4s, v31.4s\n"
+ "sqxtn v22.4h, v22.4s\n"
+ "srshl v18.4s, v18.4s, v31.4s\n"
+ "sqxtn v23.4h, v23.4s\n"
+ "sqxtn2 v15.8h, v10.4s\n"
+ "sqxtn2 v9.8h, v16.4s\n"
+ "sqxtn2 v22.8h, v21.4s\n"
+ "sqxtn2 v23.8h, v18.4s\n"
"sqadd v15.8h, v15.8h, v11.8h\n"
- "sqadd v10.8h, v10.8h, v11.8h\n"
"sqadd v9.8h, v9.8h, v11.8h\n"
- "sqadd v21.8h, v21.8h, v11.8h\n"
- "smax v15.8h, v15.8h, v16.8h\n"
- "smax v10.8h, v10.8h, v16.8h\n"
- "smax v9.8h, v9.8h, v16.8h\n"
- "smax v21.8h, v21.8h, v16.8h\n"
+ "sqadd v22.8h, v22.8h, v11.8h\n"
+ "sqadd v23.8h, v23.8h, v11.8h\n"
+ "smax v15.8h, v15.8h, v17.8h\n"
+ "smax v9.8h, v9.8h, v17.8h\n"
+ "smax v22.8h, v22.8h, v17.8h\n"
+ "smax v23.8h, v23.8h, v17.8h\n"
"smin v15.8h, v15.8h, v14.8h\n"
- "smin v10.8h, v10.8h, v14.8h\n"
"smin v9.8h, v9.8h, v14.8h\n"
- "smin v21.8h, v21.8h, v14.8h\n"
+ "smin v22.8h, v22.8h, v14.8h\n"
+ "smin v23.8h, v23.8h, v14.8h\n"
"uzp1 v15.16b, v15.16b, v15.16b\n"
- "str d15, [x11, x16]\n"
- "uzp1 v10.16b, v10.16b, v10.16b\n"
+ "str d15, [x10, x14]\n"
"uzp1 v9.16b, v9.16b, v9.16b\n"
- "str d10, [x10, x16]\n"
- "uzp1 v21.16b, v21.16b, v21.16b\n"
- "str d9, [x9, x16]\n"
- "str d21, [x28, x16]\n"
- "ldr q15, [x24, #0x0]\n"
- "ldr q17, [x24, #0x10]\n"
- "add x24, x24, #0x20\n"
- "ldr d0, [x14, #0x0]\n"
- "ldr d1, [x14, #0x8]\n"
- "add x16, x16, #0x8\n"
- "str x24, [%x[params], %[offsetof_Params_bias]]\n"
- "ldr d2, [x14, #0x10]\n"
- "ldr d3, [x14, #0x18]\n"
- "mov v10.16b, v15.16b\n"
- "mov v20.16b, v17.16b\n"
- "ldr d4, [x14, #0x20]\n"
- "ldr d5, [x14, #0x28]\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "str d9, [x9, x14]\n"
+ "uzp1 v23.16b, v23.16b, v23.16b\n"
+ "str d22, [x28, x14]\n"
+ "str d23, [x27, x14]\n"
+ "ldr x19, [%x[params], %[offsetof_Params_bias]]\n"
+ "ldr q15, [x19, #0x0]\n"
+ "add x14, x14, #0x8\n"
+ "ldr q10, [x19, #0x10]\n"
+ "add x19, x19, #0x20\n"
+ "str x19, [%x[params], %[offsetof_Params_bias]]\n"
+ "ldr d0, [x17, #0x0]\n"
+ "ldr d1, [x17, #0x8]\n"
+ "ldr d2, [x17, #0x10]\n"
"mov v9.16b, v15.16b\n"
- "mov v23.16b, v17.16b\n"
- "ldr d6, [x14, #0x30]\n"
- "ldr d7, [x14, #0x38]\n"
- "mov v21.16b, v15.16b\n"
- "mov v22.16b, v17.16b\n"
- "ldr d8, [x14, #0x40]\n"
- "ldp x27, x26, [x15, #0x0]\n"
+ "mov v16.16b, v10.16b\n"
+ "ldr d3, [x17, #0x18]\n"
+ "ldr d4, [x17, #0x20]\n"
+ "mov v22.16b, v15.16b\n"
+ "mov v21.16b, v10.16b\n"
+ "ldr d5, [x17, #0x28]\n"
+ "ldr d6, [x17, #0x30]\n"
+ "mov v23.16b, v15.16b\n"
+ "mov v18.16b, v10.16b\n"
+ "ldr d7, [x17, #0x38]\n"
+ "ldr d8, [x17, #0x40]\n"
"ssubl v0.8h, v0.8b, v13.8b\n"
"ssubl v1.8h, v1.8b, v13.8b\n"
- "ldp x25, x24, [x15, #0x10]\n"
- "ldp x23, x22, [x15, #0x20]\n"
+ "ldp x26, x25, [x12, #0x0]\n"
+ "ldp x24, x23, [x12, #0x10]\n"
"ssubl v2.8h, v2.8b, v13.8b\n"
"ssubl v3.8h, v3.8b, v13.8b\n"
- "ldp x21, x20, [x15, #0x30]\n"
- "ldr d31, [x27, x17]\n"
+ "ldp x22, x21, [x12, #0x20]\n"
+ "ldp x20, x19, [x12, #0x30]\n"
"ssubl v4.8h, v4.8b, v13.8b\n"
"ssubl v5.8h, v5.8b, v13.8b\n"
- "ldr d30, [x26, x17]\n"
- "ldr d29, [x25, x17]\n"
+ "ldr d31, [x26, x15]\n"
+ "ldr d30, [x25, x15]\n"
"ssubl v6.8h, v6.8b, v13.8b\n"
"ssubl v7.8h, v7.8b, v13.8b\n"
- "ldr d28, [x24, x17]\n"
- "ldr d27, [x23, x17]\n"
+ "ldr d29, [x24, x15]\n"
+ "ldr d28, [x23, x15]\n"
"ssubl v8.8h, v8.8b, v13.8b\n"
"usubl v31.8h, v31.8b, v12.8b\n"
- "ldr d26, [x22, x17]\n"
- "ldr d25, [x21, x17]\n"
+ "ldr d27, [x22, x15]\n"
+ "ldr d26, [x21, x15]\n"
"usubl v30.8h, v30.8b, v12.8b\n"
"usubl v29.8h, v29.8b, v12.8b\n"
- "ldr d24, [x20, x17]\n"
+ "ldr d25, [x20, x15]\n"
+ "ldr d24, [x19, x15]\n"
"usubl v28.8h, v28.8b, v12.8b\n"
"usubl v27.8h, v27.8b, v12.8b\n"
"usubl v26.8h, v26.8b, v12.8b\n"
@@ -428,966 +428,966 @@ void a64_u8s8u8q_nhwc_3x3_s2_output2x2_mla_depthfirst_impl(
"bgt 1b\n"
"2:" // Tail
"smlal v15.4s, v31.4h, v8.4h\n"
- "smlal2 v17.4s, v31.8h, v8.8h\n"
- "ldr x24, [x15, #0x40]\n"
- "ldr x22, [x15, #0x48]\n"
- "smlal v10.4s, v31.4h, v6.4h\n"
- "smlal2 v20.4s, v31.8h, v6.8h\n"
- "ldr x21, [x15, #0x50]\n"
- "ldr x20, [x15, #0x58]\n"
+ "smlal2 v10.4s, v31.8h, v8.8h\n"
+ "ldr x24, [x12, #0x40]\n"
+ "ldr x23, [x12, #0x48]\n"
+ "smlal v9.4s, v31.4h, v6.4h\n"
+ "smlal2 v16.4s, v31.8h, v6.8h\n"
+ "ldr x21, [x12, #0x50]\n"
+ "ldr x19, [x12, #0x58]\n"
"smlal v15.4s, v30.4h, v0.4h\n"
- "smlal2 v17.4s, v30.8h, v0.8h\n"
- "ldr q19, [x13, #0x0]\n"
- "ldr x23, [x15, #0x78]\n"
- "smlal v10.4s, v28.4h, v1.4h\n"
- "smlal2 v20.4s, v28.8h, v1.8h\n"
- "ldr d28, [x22, x17]\n"
+ "smlal2 v10.4s, v30.8h, v0.8h\n"
+ "ldr x22, [x12, #0x78]\n"
+ "ldr x20, [x12, #0x60]\n"
+ "smlal v9.4s, v28.4h, v1.4h\n"
+ "smlal2 v16.4s, v28.8h, v1.8h\n"
+ "ldr d28, [x23, x15]\n"
"usubl v28.8h, v28.8b, v12.8b\n"
"smlal v15.4s, v29.4h, v1.4h\n"
- "smlal2 v17.4s, v29.8h, v1.8h\n"
- "ldr d29, [x24, x17]\n"
+ "smlal2 v10.4s, v29.8h, v1.8h\n"
+ "ldr d29, [x24, x15]\n"
"usubl v29.8h, v29.8b, v12.8b\n"
- "smlal v10.4s, v27.4h, v2.4h\n"
- "smlal2 v20.4s, v27.8h, v2.8h\n"
- "ldr d27, [x21, x17]\n"
+ "smlal v9.4s, v27.4h, v2.4h\n"
+ "smlal2 v16.4s, v27.8h, v2.8h\n"
+ "ldr d27, [x21, x15]\n"
"usubl v27.8h, v27.8b, v12.8b\n"
"smlal v15.4s, v26.4h, v3.4h\n"
- "smlal2 v17.4s, v26.8h, v3.8h\n"
- "ldr d26, [x20, x17]\n"
- "ldr x20, [x15, #0x60]\n"
- "smlal v10.4s, v24.4h, v0.4h\n"
- "smlal2 v20.4s, v24.8h, v0.8h\n"
+ "smlal2 v10.4s, v26.8h, v3.8h\n"
+ "ldr d26, [x19, x15]\n"
"usubl v26.8h, v26.8b, v12.8b\n"
- "ldr x21, [x15, #0x80]\n"
+ "smlal v9.4s, v24.4h, v0.4h\n"
+ "smlal2 v16.4s, v24.8h, v0.8h\n"
+ "ldr x21, [x12, #0x80]\n"
+ "ldr x19, [x12, #0x68]\n"
"smlal v15.4s, v25.4h, v4.4h\n"
- "smlal2 v17.4s, v25.8h, v4.8h\n"
- "ldr d25, [x20, x17]\n"
- "ldr x20, [x15, #0x68]\n"
- "smlal v10.4s, v29.4h, v4.4h\n"
- "smlal2 v20.4s, v29.8h, v4.8h\n"
- "ldr d29, [x20, x17]\n"
+ "smlal2 v10.4s, v25.8h, v4.8h\n"
+ "ldr d25, [x20, x15]\n"
"usubl v25.8h, v25.8b, v12.8b\n"
+ "smlal v9.4s, v29.4h, v4.4h\n"
+ "smlal2 v16.4s, v29.8h, v4.8h\n"
+ "ldr x20, [x12, #0x88]\n"
+ "ldr d29, [x19, x15]\n"
"smlal v15.4s, v24.4h, v2.4h\n"
- "smlal2 v17.4s, v24.8h, v2.8h\n"
- "ldr q18, [x12, #0x0]\n"
- "ldr x22, [x15, #0x88]\n"
- "smlal v10.4s, v28.4h, v5.4h\n"
- "smlal2 v20.4s, v28.8h, v5.8h\n"
- "ldr d28, [x21, x17]\n"
- "ldr x21, [x15, #0x70]\n"
- "smlal v9.4s, v31.4h, v2.4h\n"
- "smlal2 v23.4s, v31.8h, v2.8h\n"
+ "smlal2 v10.4s, v24.8h, v2.8h\n"
+ "ldr x19, [x12, #0x70]\n"
+ "usubl v29.8h, v29.8b, v12.8b\n"
+ "smlal v9.4s, v28.4h, v5.4h\n"
+ "smlal2 v16.4s, v28.8h, v5.8h\n"
+ "ldr d28, [x21, x15]\n"
"usubl v28.8h, v28.8b, v12.8b\n"
- "ldr x25, [x15, #0x98]\n"
+ "smlal v22.4s, v31.4h, v2.4h\n"
+ "smlal2 v21.4s, v31.8h, v2.8h\n"
+ "ldr x24, [x12, #0x98]\n"
+ "ldr d24, [x19, x15]\n"
"smlal v15.4s, v27.4h, v5.4h\n"
- "smlal2 v17.4s, v27.8h, v5.8h\n"
- "usubl v29.8h, v29.8b, v12.8b\n"
- "ldr x24, [x15, #0x90]\n"
- "smlal v10.4s, v27.4h, v3.4h\n"
- "smlal2 v20.4s, v27.8h, v3.8h\n"
- "ldr d27, [x23, x17]\n"
+ "smlal2 v10.4s, v27.8h, v5.8h\n"
+ "usubl v24.8h, v24.8b, v12.8b\n"
+ "ldr x23, [x12, #0x90]\n"
+ "smlal v9.4s, v27.4h, v3.4h\n"
+ "smlal2 v16.4s, v27.8h, v3.8h\n"
+ "ldr d27, [x22, x15]\n"
"usubl v27.8h, v27.8b, v12.8b\n"
- "smlal v21.4s, v31.4h, v0.4h\n"
- "smlal v9.4s, v26.4h, v3.4h\n"
- "ldr x23, [x15, #0xa8]\n"
- "ldr x20, [x15, #0xa0]\n"
- "smlal2 v23.4s, v26.8h, v3.8h\n"
- "ldr d26, [x22, x17]\n"
- "smlal2 v22.4s, v31.8h, v0.8h\n"
- "ldr d24, [x21, x17]\n"
- "smlal v21.4s, v27.4h, v4.4h\n"
- "smlal v9.4s, v25.4h, v0.4h\n"
+ "smlal v23.4s, v31.4h, v0.4h\n"
+ "smlal v22.4s, v26.4h, v3.4h\n"
+ "ldr x22, [x12, #0xa8]\n"
+ "ldr x19, [x12, #0xa0]\n"
+ "smlal2 v21.4s, v26.8h, v3.8h\n"
+ "smlal2 v18.4s, v31.8h, v0.8h\n"
+ "ldr d26, [x20, x15]\n"
"usubl v26.8h, v26.8b, v12.8b\n"
- "ldr x22, [x15, #0xb0]\n"
- "smlal2 v23.4s, v25.8h, v0.8h\n"
- "ldr q30, [x13, #0x10]\n"
- "smlal2 v22.4s, v27.8h, v4.8h\n"
- "ldr d27, [x20, x17]\n"
- "smlal v21.4s, v28.4h, v1.4h\n"
+ "smlal v23.4s, v27.4h, v4.4h\n"
+ "smlal v22.4s, v25.4h, v0.4h\n"
+ "ldr x21, [x12, #0xb0]\n"
+ "ldr x20, [x12, #0xb8]\n"
+ "smlal2 v21.4s, v25.8h, v0.8h\n"
+ "smlal2 v18.4s, v27.8h, v4.8h\n"
+ "ldr d27, [x19, x15]\n"
+ "usubl v27.8h, v27.8b, v12.8b\n"
+ "smlal v23.4s, v28.4h, v1.4h\n"
"smlal v15.4s, v25.4h, v6.4h\n"
- "usubl v24.8h, v24.8b, v12.8b\n"
- "ldr x21, [x15, #0xb8]\n"
- "smlal2 v17.4s, v25.8h, v6.8h\n"
- "ldr d25, [x24, x17]\n"
- "smlal v9.4s, v29.4h, v4.4h\n"
+ "ldr x19, [x12, #0xc0]\n"
+ "ldr q19, [x13, #0x0]\n"
+ "smlal2 v10.4s, v25.8h, v6.8h\n"
+ "smlal v22.4s, v29.4h, v4.4h\n"
+ "ldr d25, [x23, x15]\n"
"usubl v25.8h, v25.8b, v12.8b\n"
- "smlal2 v23.4s, v29.8h, v4.8h\n"
- "ldr d29, [x25, x17]\n"
- "ldr q31, [x12, #0x10]\n"
- "smlal2 v22.4s, v28.8h, v1.8h\n"
- "smlal v21.4s, v26.4h, v5.4h\n"
+ "smlal2 v21.4s, v29.8h, v4.8h\n"
+ "ldr d29, [x24, x15]\n"
+ "smlal2 v18.4s, v28.8h, v1.8h\n"
"usubl v29.8h, v29.8b, v12.8b\n"
+ "smlal v23.4s, v26.4h, v5.4h\n"
"smlal v15.4s, v24.4h, v7.4h\n"
- "ldr x20, [x15, #0xc0]\n"
- "smlal2 v17.4s, v24.8h, v7.8h\n"
- "smlal v9.4s, v24.4h, v1.4h\n"
- "usubl v27.8h, v27.8b, v12.8b\n"
- "tst x7, #0x7\n"
- "smlal2 v23.4s, v24.8h, v1.8h\n"
- "ldr d24, [x23, x17]\n"
- "smlal2 v22.4s, v26.8h, v5.8h\n"
- "ldr d26, [x22, x17]\n"
- "smlal v21.4s, v29.4h, v2.4h\n"
+ "ldr q0, [x11, #0x0]\n"
+ "ldr q4, [x13, #0x10]\n"
+ "smlal2 v10.4s, v24.8h, v7.8h\n"
+ "smlal v22.4s, v24.4h, v1.4h\n"
+ "sqrdmulh v15.4s, v15.4s, v19.4s\n"
+ "ldr q31, [x11, #0x10]\n"
+ "smlal2 v21.4s, v24.8h, v1.8h\n"
+ "ldr d24, [x22, x15]\n"
+ "smlal2 v18.4s, v26.8h, v5.8h\n"
"usubl v24.8h, v24.8b, v12.8b\n"
- "smlal2 v22.4s, v29.8h, v2.8h\n"
- "add x13, x13, #0x20\n"
- "smlal v9.4s, v25.4h, v6.4h\n"
- "smlal v21.4s, v24.4h, v3.4h\n"
+ "smlal v23.4s, v29.4h, v2.4h\n"
+ "ldr d26, [x21, x15]\n"
+ "smlal2 v18.4s, v29.8h, v2.8h\n"
"usubl v26.8h, v26.8b, v12.8b\n"
- "add x12, x12, #0x20\n"
- "smlal v10.4s, v28.4h, v7.4h\n"
- "smlal2 v20.4s, v28.8h, v7.8h\n"
- "sqrdmulh v15.4s, v15.4s, v19.4s\n"
- "smlal2 v23.4s, v25.8h, v6.8h\n"
- "ldr d25, [x21, x17]\n"
- "smlal2 v22.4s, v24.8h, v3.8h\n"
+ "smlal v22.4s, v25.4h, v6.4h\n"
+ "smlal v23.4s, v24.4h, v3.4h\n"
+ "and v30.16b, v15.16b, v0.16b\n"
+ "tst x8, #0x7\n"
+ "smlal v9.4s, v28.4h, v7.4h\n"
+ "smlal2 v16.4s, v28.8h, v7.8h\n"
+ "sqrdmulh v10.4s, v10.4s, v4.4s\n"
+ "add x13, x13, #0x20\n"
+ "smlal2 v21.4s, v25.8h, v6.8h\n"
+ "ldr d25, [x20, x15]\n"
+ "smlal2 v18.4s, v24.8h, v3.8h\n"
"usubl v25.8h, v25.8b, v12.8b\n"
- "smlal v9.4s, v27.4h, v7.4h\n"
- "smlal v21.4s, v26.4h, v7.4h\n"
- "and v0.16b, v15.16b, v18.16b\n"
- "smlal v10.4s, v29.4h, v8.4h\n"
- "smlal2 v20.4s, v29.8h, v8.8h\n"
- "ldr d29, [x20, x17]\n"
+ "smlal v22.4s, v27.4h, v7.4h\n"
+ "smlal v23.4s, v26.4h, v7.4h\n"
+ "sshr v30.4s, v30.4s, #0x1f\n"
+ "add x11, x11, #0x20\n"
+ "smlal v9.4s, v29.4h, v8.4h\n"
+ "smlal2 v16.4s, v29.8h, v8.8h\n"
+ "ldr d29, [x19, x15]\n"
"usubl v29.8h, v29.8b, v12.8b\n"
- "smlal2 v23.4s, v27.8h, v7.8h\n"
- "smlal2 v22.4s, v26.8h, v7.8h\n"
- "sqrdmulh v17.4s, v17.4s, v30.4s\n"
- "add x17, x17, #0x8\n"
- "smlal v9.4s, v24.4h, v5.4h\n"
- "smlal v21.4s, v25.4h, v6.4h\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "smlal2 v23.4s, v24.8h, v5.8h\n"
- "smlal2 v22.4s, v25.8h, v6.8h\n"
- "and v7.16b, v17.16b, v31.16b\n"
- "smlal v9.4s, v25.4h, v8.4h\n"
- "smlal v21.4s, v29.4h, v8.4h\n"
- "sqrdmulh v10.4s, v10.4s, v19.4s\n"
- "smlal2 v23.4s, v25.8h, v8.8h\n"
- "smlal2 v22.4s, v29.8h, v8.8h\n"
+ "smlal2 v21.4s, v27.8h, v7.8h\n"
+ "smlal2 v18.4s, v26.8h, v7.8h\n"
"sqrdmulh v9.4s, v9.4s, v19.4s\n"
- "sqrdmulh v21.4s, v21.4s, v19.4s\n"
- "sqadd v15.4s, v15.4s, v0.4s\n"
- "sshr v7.4s, v7.4s, #0x1f\n"
- "and v19.16b, v10.16b, v18.16b\n"
- "sqrdmulh v20.4s, v20.4s, v30.4s\n"
- "and v27.16b, v9.16b, v18.16b\n"
- "sqrdmulh v23.4s, v23.4s, v30.4s\n"
- "and v0.16b, v21.16b, v18.16b\n"
- "sqrdmulh v22.4s, v22.4s, v30.4s\n"
- "sqadd v17.4s, v17.4s, v7.4s\n"
+ "add x15, x15, #0x8\n"
+ "smlal v22.4s, v24.4h, v5.4h\n"
+ "smlal v23.4s, v25.4h, v6.4h\n"
+ "and v28.16b, v9.16b, v0.16b\n"
+ "smlal2 v21.4s, v24.8h, v5.8h\n"
+ "smlal2 v18.4s, v25.8h, v6.8h\n"
+ "sqrdmulh v16.4s, v16.4s, v4.4s\n"
+ "smlal v22.4s, v25.4h, v8.4h\n"
+ "smlal v23.4s, v29.4h, v8.4h\n"
+ "sqrdmulh v22.4s, v22.4s, v19.4s\n"
+ "smlal2 v21.4s, v25.8h, v8.8h\n"
+ "smlal2 v18.4s, v29.8h, v8.8h\n"
+ "sqrdmulh v23.4s, v23.4s, v19.4s\n"
+ "and v29.16b, v22.16b, v0.16b\n"
+ "sqrdmulh v21.4s, v21.4s, v4.4s\n"
+ "and v20.16b, v23.16b, v0.16b\n"
+ "sqrdmulh v18.4s, v18.4s, v4.4s\n"
+ "and v19.16b, v10.16b, v31.16b\n"
+ "sshr v28.4s, v28.4s, #0x1f\n"
+ "and v4.16b, v16.16b, v31.16b\n"
+ "sshr v29.4s, v29.4s, #0x1f\n"
+ "and v5.16b, v21.16b, v31.16b\n"
+ "sshr v20.4s, v20.4s, #0x1f\n"
+ "and v26.16b, v18.16b, v31.16b\n"
+ "sqadd v15.4s, v15.4s, v30.4s\n"
"sshr v19.4s, v19.4s, #0x1f\n"
- "and v5.16b, v20.16b, v31.16b\n"
- "sshr v27.4s, v27.4s, #0x1f\n"
- "and v4.16b, v23.16b, v31.16b\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "and v7.16b, v22.16b, v31.16b\n"
- "sqadd v10.4s, v10.4s, v19.4s\n"
- "sshr v5.4s, v5.4s, #0x1f\n"
- "sqadd v9.4s, v9.4s, v27.4s\n"
+ "sqadd v9.4s, v9.4s, v28.4s\n"
"sshr v4.4s, v4.4s, #0x1f\n"
- "sqadd v21.4s, v21.4s, v0.4s\n"
- "sshr v7.4s, v7.4s, #0x1f\n"
- "srshl v15.4s, v15.4s, v18.4s\n"
- "srshl v10.4s, v10.4s, v18.4s\n"
- "sqadd v20.4s, v20.4s, v5.4s\n"
- "srshl v9.4s, v9.4s, v18.4s\n"
- "sqadd v23.4s, v23.4s, v4.4s\n"
- "srshl v21.4s, v21.4s, v18.4s\n"
- "sqadd v22.4s, v22.4s, v7.4s\n"
- "srshl v17.4s, v17.4s, v31.4s\n"
+ "sqadd v22.4s, v22.4s, v29.4s\n"
+ "sshr v5.4s, v5.4s, #0x1f\n"
+ "sqadd v23.4s, v23.4s, v20.4s\n"
+ "sshr v26.4s, v26.4s, #0x1f\n"
+ "srshl v15.4s, v15.4s, v0.4s\n"
+ "sqadd v10.4s, v10.4s, v19.4s\n"
+ "srshl v9.4s, v9.4s, v0.4s\n"
+ "sqadd v16.4s, v16.4s, v4.4s\n"
+ "srshl v22.4s, v22.4s, v0.4s\n"
+ "sqadd v21.4s, v21.4s, v5.4s\n"
+ "srshl v23.4s, v23.4s, v0.4s\n"
+ "sqadd v18.4s, v18.4s, v26.4s\n"
+ "srshl v10.4s, v10.4s, v31.4s\n"
"sqxtn v15.4h, v15.4s\n"
- "srshl v20.4s, v20.4s, v31.4s\n"
- "sqxtn v10.4h, v10.4s\n"
- "srshl v23.4s, v23.4s, v31.4s\n"
+ "srshl v16.4s, v16.4s, v31.4s\n"
"sqxtn v9.4h, v9.4s\n"
- "srshl v22.4s, v22.4s, v31.4s\n"
- "sqxtn v21.4h, v21.4s\n"
- "sqxtn2 v15.8h, v17.4s\n"
- "sqxtn2 v10.8h, v20.4s\n"
- "sqxtn2 v9.8h, v23.4s\n"
- "sqxtn2 v21.8h, v22.4s\n"
+ "srshl v21.4s, v21.4s, v31.4s\n"
+ "sqxtn v22.4h, v22.4s\n"
+ "srshl v18.4s, v18.4s, v31.4s\n"
+ "sqxtn v23.4h, v23.4s\n"
+ "sqxtn2 v15.8h, v10.4s\n"
+ "sqxtn2 v9.8h, v16.4s\n"
+ "sqxtn2 v22.8h, v21.4s\n"
+ "sqxtn2 v23.8h, v18.4s\n"
"sqadd v15.8h, v15.8h, v11.8h\n"
- "sqadd v10.8h, v10.8h, v11.8h\n"
"sqadd v9.8h, v9.8h, v11.8h\n"
- "sqadd v21.8h, v21.8h, v11.8h\n"
- "smax v15.8h, v15.8h, v16.8h\n"
- "smax v10.8h, v10.8h, v16.8h\n"
- "smax v9.8h, v9.8h, v16.8h\n"
- "smax v21.8h, v21.8h, v16.8h\n"
+ "sqadd v22.8h, v22.8h, v11.8h\n"
+ "sqadd v23.8h, v23.8h, v11.8h\n"
+ "smax v15.8h, v15.8h, v17.8h\n"
+ "smax v9.8h, v9.8h, v17.8h\n"
+ "smax v22.8h, v22.8h, v17.8h\n"
+ "smax v23.8h, v23.8h, v17.8h\n"
"smin v15.8h, v15.8h, v14.8h\n"
- "smin v10.8h, v10.8h, v14.8h\n"
"smin v9.8h, v9.8h, v14.8h\n"
- "smin v21.8h, v21.8h, v14.8h\n"
+ "smin v22.8h, v22.8h, v14.8h\n"
+ "smin v23.8h, v23.8h, v14.8h\n"
"uzp1 v15.16b, v15.16b, v15.16b\n"
- "str d15, [x11, x16]\n"
- "uzp1 v10.16b, v10.16b, v10.16b\n"
+ "str d15, [x10, x14]\n"
"uzp1 v9.16b, v9.16b, v9.16b\n"
- "str d10, [x10, x16]\n"
- "uzp1 v21.16b, v21.16b, v21.16b\n"
- "str d9, [x9, x16]\n"
- "str d21, [x28, x16]\n"
- "add x16, x16, #0x8\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "str d9, [x9, x14]\n"
+ "uzp1 v23.16b, v23.16b, v23.16b\n"
+ "str d22, [x28, x14]\n"
+ "str d23, [x27, x14]\n"
+ "add x14, x14, #0x8\n"
"beq 88f\n"
- "add x14, x14, #0x48\n"
+ "add x17, x17, #0x48\n"
"3:" // Oddments
- "ldr x24, [%x[params], %[offsetof_Params_bias]]\n"
- "tbz x7, #2, 5f\n"
- "ld1 { v15.4s }, [x24], #0x10\n"
- "tbz x7, #1, 4f\n"
- "ld1 { v17.d }[0], [x24], #0x8\n"
- "tbz x7, #0, 7f\n"
- "ld1 { v17.s }[2], [x24]\n"
+ "ldr x19, [%x[params], %[offsetof_Params_bias]]\n"
+ "tbz x8, #2, 5f\n"
+ "ld1 { v15.4s }, [x19], #0x10\n"
+ "tbz x8, #1, 4f\n"
+ "ld1 { v10.d }[0], [x19], #0x8\n"
+ "tbz x8, #0, 7f\n"
+ "ld1 { v10.s }[2], [x19]\n"
"b 7f\n"
"4:" // Oddments: Load bias: Bit 2: Bit 1: Unset
- "tbz x7, #0, 7f\n"
- "ld1 { v17.s }[0], [x24]\n"
+ "tbz x8, #0, 7f\n"
+ "ld1 { v10.s }[0], [x19]\n"
"b 7f\n"
"5:" // Oddments: Load bias: Bit 2: Unset
- "tbz x7, #1, 6f\n"
- "ld1 { v15.d }[0], [x24], #0x8\n"
- "tbz x7, #0, 7f\n"
- "ld1 { v15.s }[2], [x24]\n"
+ "tbz x8, #1, 6f\n"
+ "ld1 { v15.d }[0], [x19], #0x8\n"
+ "tbz x8, #0, 7f\n"
+ "ld1 { v15.s }[2], [x19]\n"
"b 7f\n"
"6:" // Oddments: Load bias: Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 7f\n"
- "ld1 { v15.s }[0], [x24]\n"
+ "tbz x8, #0, 7f\n"
+ "ld1 { v15.s }[0], [x19]\n"
"7:" // Oddments: Load bias: Bit 2: End
- "ldr d0, [x14, #0x0]\n"
- "ldr d1, [x14, #0x8]\n"
- "mov v10.16b, v15.16b\n"
- "mov v20.16b, v17.16b\n"
- "ldr d2, [x14, #0x10]\n"
- "ldr d3, [x14, #0x18]\n"
+ "ldr d0, [x17, #0x0]\n"
+ "ldr d1, [x17, #0x8]\n"
"mov v9.16b, v15.16b\n"
- "mov v23.16b, v17.16b\n"
- "ldr d4, [x14, #0x20]\n"
- "ldr d5, [x14, #0x28]\n"
- "mov v21.16b, v15.16b\n"
- "mov v22.16b, v17.16b\n"
- "ldr d6, [x14, #0x30]\n"
- "ldr d7, [x14, #0x38]\n"
+ "mov v16.16b, v10.16b\n"
+ "ldr d2, [x17, #0x10]\n"
+ "ldr d3, [x17, #0x18]\n"
+ "mov v22.16b, v15.16b\n"
+ "mov v21.16b, v10.16b\n"
+ "ldr d4, [x17, #0x20]\n"
+ "ldr d5, [x17, #0x28]\n"
+ "mov v23.16b, v15.16b\n"
+ "mov v18.16b, v10.16b\n"
+ "ldr d6, [x17, #0x30]\n"
+ "ldr d7, [x17, #0x38]\n"
"ssubl v0.8h, v0.8b, v13.8b\n"
"ssubl v1.8h, v1.8b, v13.8b\n"
- "ldr d8, [x14, #0x40]\n"
- "ldp x27, x26, [x15, #0x0]\n"
+ "ldr d8, [x17, #0x40]\n"
+ "ldp x26, x25, [x12, #0x0]\n"
"ssubl v2.8h, v2.8b, v13.8b\n"
"ssubl v3.8h, v3.8b, v13.8b\n"
- "ldp x25, x24, [x15, #0x10]\n"
- "ldp x23, x22, [x15, #0x20]\n"
+ "ldp x24, x23, [x12, #0x10]\n"
+ "ldp x22, x21, [x12, #0x20]\n"
"ssubl v4.8h, v4.8b, v13.8b\n"
"ssubl v5.8h, v5.8b, v13.8b\n"
- "ldp x21, x20, [x15, #0x30]\n"
+ "ldp x20, x19, [x12, #0x30]\n"
"ssubl v6.8h, v6.8b, v13.8b\n"
"ssubl v7.8h, v7.8b, v13.8b\n"
"ssubl v8.8h, v8.8b, v13.8b\n"
- "add x27, x27, x17\n"
- "add x26, x26, x17\n"
- "add x25, x25, x17\n"
- "add x24, x24, x17\n"
- "add x23, x23, x17\n"
- "add x22, x22, x17\n"
- "add x21, x21, x17\n"
- "add x20, x20, x17\n"
- "tbz x7, #2, 9f\n"
- "ld1 { v31.s }[0], [x27], #0x4\n"
- "ld1 { v30.s }[0], [x26], #0x4\n"
- "ld1 { v29.s }[0], [x25], #0x4\n"
- "ld1 { v28.s }[0], [x24], #0x4\n"
- "ld1 { v27.s }[0], [x23], #0x4\n"
- "ld1 { v26.s }[0], [x22], #0x4\n"
- "ld1 { v25.s }[0], [x21], #0x4\n"
- "ld1 { v24.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 8f\n"
- "ld1 { v31.h }[2], [x27], #0x2\n"
- "ld1 { v30.h }[2], [x26], #0x2\n"
- "ld1 { v29.h }[2], [x25], #0x2\n"
- "ld1 { v28.h }[2], [x24], #0x2\n"
- "ld1 { v27.h }[2], [x23], #0x2\n"
- "ld1 { v26.h }[2], [x22], #0x2\n"
- "ld1 { v25.h }[2], [x21], #0x2\n"
- "ld1 { v24.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 11f\n"
- "ld1 { v31.b }[6], [x27]\n"
- "ld1 { v30.b }[6], [x26]\n"
- "ld1 { v29.b }[6], [x25]\n"
- "ld1 { v28.b }[6], [x24]\n"
- "ld1 { v27.b }[6], [x23]\n"
- "ld1 { v26.b }[6], [x22]\n"
- "ld1 { v25.b }[6], [x21]\n"
- "ld1 { v24.b }[6], [x20]\n"
+ "add x26, x26, x15\n"
+ "add x25, x25, x15\n"
+ "add x24, x24, x15\n"
+ "add x23, x23, x15\n"
+ "add x22, x22, x15\n"
+ "add x21, x21, x15\n"
+ "add x20, x20, x15\n"
+ "add x19, x19, x15\n"
+ "tbz x8, #2, 9f\n"
+ "ld1 { v31.s }[0], [x26], #0x4\n"
+ "ld1 { v30.s }[0], [x25], #0x4\n"
+ "ld1 { v29.s }[0], [x24], #0x4\n"
+ "ld1 { v28.s }[0], [x23], #0x4\n"
+ "ld1 { v27.s }[0], [x22], #0x4\n"
+ "ld1 { v26.s }[0], [x21], #0x4\n"
+ "ld1 { v25.s }[0], [x20], #0x4\n"
+ "ld1 { v24.s }[0], [x19], #0x4\n"
+ "tbz x8, #1, 8f\n"
+ "ld1 { v31.h }[2], [x26], #0x2\n"
+ "ld1 { v30.h }[2], [x25], #0x2\n"
+ "ld1 { v29.h }[2], [x24], #0x2\n"
+ "ld1 { v28.h }[2], [x23], #0x2\n"
+ "ld1 { v27.h }[2], [x22], #0x2\n"
+ "ld1 { v26.h }[2], [x21], #0x2\n"
+ "ld1 { v25.h }[2], [x20], #0x2\n"
+ "ld1 { v24.h }[2], [x19], #0x2\n"
+ "tbz x8, #0, 11f\n"
+ "ld1 { v31.b }[6], [x26]\n"
+ "ld1 { v30.b }[6], [x25]\n"
+ "ld1 { v29.b }[6], [x24]\n"
+ "ld1 { v28.b }[6], [x23]\n"
+ "ld1 { v27.b }[6], [x22]\n"
+ "ld1 { v26.b }[6], [x21]\n"
+ "ld1 { v25.b }[6], [x20]\n"
+ "ld1 { v24.b }[6], [x19]\n"
"b 11f\n"
"8:" // Oddments: Initial loads: Bit 2: Bit 1: Unset
- "tbz x7, #0, 11f\n"
- "ld1 { v31.b }[4], [x27]\n"
- "ld1 { v30.b }[4], [x26]\n"
- "ld1 { v29.b }[4], [x25]\n"
- "ld1 { v28.b }[4], [x24]\n"
- "ld1 { v27.b }[4], [x23]\n"
- "ld1 { v26.b }[4], [x22]\n"
- "ld1 { v25.b }[4], [x21]\n"
- "ld1 { v24.b }[4], [x20]\n"
+ "tbz x8, #0, 11f\n"
+ "ld1 { v31.b }[4], [x26]\n"
+ "ld1 { v30.b }[4], [x25]\n"
+ "ld1 { v29.b }[4], [x24]\n"
+ "ld1 { v28.b }[4], [x23]\n"
+ "ld1 { v27.b }[4], [x22]\n"
+ "ld1 { v26.b }[4], [x21]\n"
+ "ld1 { v25.b }[4], [x20]\n"
+ "ld1 { v24.b }[4], [x19]\n"
"b 11f\n"
"9:" // Oddments: Initial loads: Bit 2: Unset
- "tbz x7, #1, 10f\n"
- "ld1 { v31.h }[0], [x27], #0x2\n"
- "ld1 { v30.h }[0], [x26], #0x2\n"
- "ld1 { v29.h }[0], [x25], #0x2\n"
- "ld1 { v28.h }[0], [x24], #0x2\n"
- "ld1 { v27.h }[0], [x23], #0x2\n"
- "ld1 { v26.h }[0], [x22], #0x2\n"
- "ld1 { v25.h }[0], [x21], #0x2\n"
- "ld1 { v24.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 11f\n"
- "ld1 { v31.b }[2], [x27]\n"
- "ld1 { v30.b }[2], [x26]\n"
- "ld1 { v29.b }[2], [x25]\n"
- "ld1 { v28.b }[2], [x24]\n"
- "ld1 { v27.b }[2], [x23]\n"
- "ld1 { v26.b }[2], [x22]\n"
- "ld1 { v25.b }[2], [x21]\n"
- "ld1 { v24.b }[2], [x20]\n"
+ "tbz x8, #1, 10f\n"
+ "ld1 { v31.h }[0], [x26], #0x2\n"
+ "ld1 { v30.h }[0], [x25], #0x2\n"
+ "ld1 { v29.h }[0], [x24], #0x2\n"
+ "ld1 { v28.h }[0], [x23], #0x2\n"
+ "ld1 { v27.h }[0], [x22], #0x2\n"
+ "ld1 { v26.h }[0], [x21], #0x2\n"
+ "ld1 { v25.h }[0], [x20], #0x2\n"
+ "ld1 { v24.h }[0], [x19], #0x2\n"
+ "tbz x8, #0, 11f\n"
+ "ld1 { v31.b }[2], [x26]\n"
+ "ld1 { v30.b }[2], [x25]\n"
+ "ld1 { v29.b }[2], [x24]\n"
+ "ld1 { v28.b }[2], [x23]\n"
+ "ld1 { v27.b }[2], [x22]\n"
+ "ld1 { v26.b }[2], [x21]\n"
+ "ld1 { v25.b }[2], [x20]\n"
+ "ld1 { v24.b }[2], [x19]\n"
"b 11f\n"
"10:" // Oddments: Initial loads: Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 11f\n"
- "ld1 { v31.b }[0], [x27]\n"
- "ld1 { v30.b }[0], [x26]\n"
- "ld1 { v29.b }[0], [x25]\n"
- "ld1 { v28.b }[0], [x24]\n"
- "ld1 { v27.b }[0], [x23]\n"
- "ld1 { v26.b }[0], [x22]\n"
- "ld1 { v25.b }[0], [x21]\n"
- "ld1 { v24.b }[0], [x20]\n"
+ "tbz x8, #0, 11f\n"
+ "ld1 { v31.b }[0], [x26]\n"
+ "ld1 { v30.b }[0], [x25]\n"
+ "ld1 { v29.b }[0], [x24]\n"
+ "ld1 { v28.b }[0], [x23]\n"
+ "ld1 { v27.b }[0], [x22]\n"
+ "ld1 { v26.b }[0], [x21]\n"
+ "ld1 { v25.b }[0], [x20]\n"
+ "ld1 { v24.b }[0], [x19]\n"
"11:" // Oddments: Initial loads: Bit 2: End
"usubl v31.8h, v31.8b, v12.8b\n"
"smlal v15.4s, v31.4h, v8.4h\n"
- "smlal2 v17.4s, v31.8h, v8.8h\n"
- "ldr x24, [x15, #0x40]\n"
+ "smlal2 v10.4s, v31.8h, v8.8h\n"
+ "ldr x24, [x12, #0x40]\n"
"usubl v30.8h, v30.8b, v12.8b\n"
"smlal v15.4s, v30.4h, v0.4h\n"
- "smlal2 v17.4s, v30.8h, v0.8h\n"
- "add x24, x24, x17\n"
+ "smlal2 v10.4s, v30.8h, v0.8h\n"
+ "add x24, x24, x15\n"
"usubl v29.8h, v29.8b, v12.8b\n"
- "smlal v10.4s, v31.4h, v6.4h\n"
- "smlal2 v20.4s, v31.8h, v6.8h\n"
+ "smlal v9.4s, v31.4h, v6.4h\n"
+ "smlal2 v16.4s, v31.8h, v6.8h\n"
"smlal v15.4s, v29.4h, v1.4h\n"
- "smlal2 v17.4s, v29.8h, v1.8h\n"
+ "smlal2 v10.4s, v29.8h, v1.8h\n"
"usubl v28.8h, v28.8b, v12.8b\n"
"usubl v26.8h, v26.8b, v12.8b\n"
- "smlal v10.4s, v28.4h, v1.4h\n"
- "smlal2 v20.4s, v28.8h, v1.8h\n"
+ "smlal v9.4s, v28.4h, v1.4h\n"
+ "smlal2 v16.4s, v28.8h, v1.8h\n"
"smlal v15.4s, v26.4h, v3.4h\n"
- "smlal2 v17.4s, v26.8h, v3.8h\n"
+ "smlal2 v10.4s, v26.8h, v3.8h\n"
"usubl v27.8h, v27.8b, v12.8b\n"
"usubl v25.8h, v25.8b, v12.8b\n"
- "smlal v10.4s, v27.4h, v2.4h\n"
- "smlal2 v20.4s, v27.8h, v2.8h\n"
+ "smlal v9.4s, v27.4h, v2.4h\n"
+ "smlal2 v16.4s, v27.8h, v2.8h\n"
"smlal v15.4s, v25.4h, v4.4h\n"
- "smlal2 v17.4s, v25.8h, v4.8h\n"
+ "smlal2 v10.4s, v25.8h, v4.8h\n"
"usubl v24.8h, v24.8b, v12.8b\n"
- "smlal v9.4s, v31.4h, v2.4h\n"
- "smlal2 v23.4s, v31.8h, v2.8h\n"
- "smlal v21.4s, v31.4h, v0.4h\n"
- "smlal2 v22.4s, v31.8h, v0.8h\n"
+ "smlal v22.4s, v31.4h, v2.4h\n"
+ "smlal2 v21.4s, v31.8h, v2.8h\n"
+ "smlal v23.4s, v31.4h, v0.4h\n"
+ "smlal2 v18.4s, v31.8h, v0.8h\n"
"smlal v15.4s, v24.4h, v2.4h\n"
- "smlal2 v17.4s, v24.8h, v2.8h\n"
- "smlal v10.4s, v24.4h, v0.4h\n"
- "smlal2 v20.4s, v24.8h, v0.8h\n"
- "tbz x7, #2, 13f\n"
+ "smlal2 v10.4s, v24.8h, v2.8h\n"
+ "smlal v9.4s, v24.4h, v0.4h\n"
+ "smlal2 v16.4s, v24.8h, v0.8h\n"
+ "tbz x8, #2, 13f\n"
"ld1 { v29.s }[0], [x24], #0x4\n"
- "tbz x7, #1, 12f\n"
+ "tbz x8, #1, 12f\n"
"ld1 { v29.h }[2], [x24], #0x2\n"
- "tbz x7, #0, 15f\n"
+ "tbz x8, #0, 15f\n"
"ld1 { v29.b }[6], [x24]\n"
"b 15f\n"
"12:" // Oddments: Load (1, 3): Bit 2: Bit 1: Unset
- "tbz x7, #0, 15f\n"
+ "tbz x8, #0, 15f\n"
"ld1 { v29.b }[4], [x24]\n"
"b 15f\n"
"13:" // Oddments: Load (1, 3): Bit 2: Unset
- "tbz x7, #1, 14f\n"
+ "tbz x8, #1, 14f\n"
"ld1 { v29.h }[0], [x24], #0x2\n"
- "tbz x7, #0, 15f\n"
+ "tbz x8, #0, 15f\n"
"ld1 { v29.b }[2], [x24]\n"
"b 15f\n"
"14:" // Oddments: Load (1, 3): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 15f\n"
+ "tbz x8, #0, 15f\n"
"ld1 { v29.b }[0], [x24]\n"
"15:" // Oddments: Load (1, 3): Bit 2: End
"usubl v29.8h, v29.8b, v12.8b\n"
- "ldr x22, [x15, #0x48]\n"
- "smlal v10.4s, v29.4h, v4.4h\n"
- "smlal2 v20.4s, v29.8h, v4.8h\n"
- "add x22, x22, x17\n"
- "tbz x7, #2, 17f\n"
- "ld1 { v28.s }[0], [x22], #0x4\n"
- "tbz x7, #1, 16f\n"
- "ld1 { v28.h }[2], [x22], #0x2\n"
- "tbz x7, #0, 19f\n"
- "ld1 { v28.b }[6], [x22]\n"
+ "ldr x23, [x12, #0x48]\n"
+ "smlal v9.4s, v29.4h, v4.4h\n"
+ "smlal2 v16.4s, v29.8h, v4.8h\n"
+ "add x23, x23, x15\n"
+ "tbz x8, #2, 17f\n"
+ "ld1 { v28.s }[0], [x23], #0x4\n"
+ "tbz x8, #1, 16f\n"
+ "ld1 { v28.h }[2], [x23], #0x2\n"
+ "tbz x8, #0, 19f\n"
+ "ld1 { v28.b }[6], [x23]\n"
"b 19f\n"
"16:" // Oddments: Load (1, 4): Bit 2: Bit 1: Unset
- "tbz x7, #0, 19f\n"
- "ld1 { v28.b }[4], [x22]\n"
+ "tbz x8, #0, 19f\n"
+ "ld1 { v28.b }[4], [x23]\n"
"b 19f\n"
"17:" // Oddments: Load (1, 4): Bit 2: Unset
- "tbz x7, #1, 18f\n"
- "ld1 { v28.h }[0], [x22], #0x2\n"
- "tbz x7, #0, 19f\n"
- "ld1 { v28.b }[2], [x22]\n"
+ "tbz x8, #1, 18f\n"
+ "ld1 { v28.h }[0], [x23], #0x2\n"
+ "tbz x8, #0, 19f\n"
+ "ld1 { v28.b }[2], [x23]\n"
"b 19f\n"
"18:" // Oddments: Load (1, 4): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 19f\n"
- "ld1 { v28.b }[0], [x22]\n"
+ "tbz x8, #0, 19f\n"
+ "ld1 { v28.b }[0], [x23]\n"
"19:" // Oddments: Load (1, 4): Bit 2: End
"usubl v28.8h, v28.8b, v12.8b\n"
- "ldr x21, [x15, #0x50]\n"
- "smlal v10.4s, v28.4h, v5.4h\n"
- "smlal2 v20.4s, v28.8h, v5.8h\n"
- "add x21, x21, x17\n"
- "tbz x7, #2, 21f\n"
+ "ldr x21, [x12, #0x50]\n"
+ "smlal v9.4s, v28.4h, v5.4h\n"
+ "smlal2 v16.4s, v28.8h, v5.8h\n"
+ "add x21, x21, x15\n"
+ "tbz x8, #2, 21f\n"
"ld1 { v27.s }[0], [x21], #0x4\n"
- "tbz x7, #1, 20f\n"
+ "tbz x8, #1, 20f\n"
"ld1 { v27.h }[2], [x21], #0x2\n"
- "tbz x7, #0, 23f\n"
+ "tbz x8, #0, 23f\n"
"ld1 { v27.b }[6], [x21]\n"
"b 23f\n"
"20:" // Oddments: Load (1, 2): Bit 2: Bit 1: Unset
- "tbz x7, #0, 23f\n"
+ "tbz x8, #0, 23f\n"
"ld1 { v27.b }[4], [x21]\n"
"b 23f\n"
"21:" // Oddments: Load (1, 2): Bit 2: Unset
- "tbz x7, #1, 22f\n"
+ "tbz x8, #1, 22f\n"
"ld1 { v27.h }[0], [x21], #0x2\n"
- "tbz x7, #0, 23f\n"
+ "tbz x8, #0, 23f\n"
"ld1 { v27.b }[2], [x21]\n"
"b 23f\n"
"22:" // Oddments: Load (1, 2): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 23f\n"
+ "tbz x8, #0, 23f\n"
"ld1 { v27.b }[0], [x21]\n"
"23:" // Oddments: Load (1, 2): Bit 2: End
"usubl v27.8h, v27.8b, v12.8b\n"
- "ldr x20, [x15, #0x58]\n"
+ "ldr x19, [x12, #0x58]\n"
"smlal v15.4s, v27.4h, v5.4h\n"
- "smlal2 v17.4s, v27.8h, v5.8h\n"
- "smlal v10.4s, v27.4h, v3.4h\n"
- "smlal2 v20.4s, v27.8h, v3.8h\n"
- "add x20, x20, x17\n"
- "tbz x7, #2, 25f\n"
- "ld1 { v26.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 24f\n"
- "ld1 { v26.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 27f\n"
- "ld1 { v26.b }[6], [x20]\n"
+ "smlal2 v10.4s, v27.8h, v5.8h\n"
+ "smlal v9.4s, v27.4h, v3.4h\n"
+ "smlal2 v16.4s, v27.8h, v3.8h\n"
+ "add x19, x19, x15\n"
+ "tbz x8, #2, 25f\n"
+ "ld1 { v26.s }[0], [x19], #0x4\n"
+ "tbz x8, #1, 24f\n"
+ "ld1 { v26.h }[2], [x19], #0x2\n"
+ "tbz x8, #0, 27f\n"
+ "ld1 { v26.b }[6], [x19]\n"
"b 27f\n"
"24:" // Oddments: Load (3, 0): Bit 2: Bit 1: Unset
- "tbz x7, #0, 27f\n"
- "ld1 { v26.b }[4], [x20]\n"
+ "tbz x8, #0, 27f\n"
+ "ld1 { v26.b }[4], [x19]\n"
"b 27f\n"
"25:" // Oddments: Load (3, 0): Bit 2: Unset
- "tbz x7, #1, 26f\n"
- "ld1 { v26.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 27f\n"
- "ld1 { v26.b }[2], [x20]\n"
+ "tbz x8, #1, 26f\n"
+ "ld1 { v26.h }[0], [x19], #0x2\n"
+ "tbz x8, #0, 27f\n"
+ "ld1 { v26.b }[2], [x19]\n"
"b 27f\n"
"26:" // Oddments: Load (3, 0): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 27f\n"
- "ld1 { v26.b }[0], [x20]\n"
+ "tbz x8, #0, 27f\n"
+ "ld1 { v26.b }[0], [x19]\n"
"27:" // Oddments: Load (3, 0): Bit 2: End
"usubl v26.8h, v26.8b, v12.8b\n"
- "ldr x20, [x15, #0x60]\n"
- "smlal v9.4s, v26.4h, v3.4h\n"
- "smlal2 v23.4s, v26.8h, v3.8h\n"
- "add x20, x20, x17\n"
- "tbz x7, #2, 29f\n"
+ "ldr x20, [x12, #0x60]\n"
+ "smlal v22.4s, v26.4h, v3.4h\n"
+ "smlal2 v21.4s, v26.8h, v3.8h\n"
+ "add x20, x20, x15\n"
+ "tbz x8, #2, 29f\n"
"ld1 { v25.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 28f\n"
+ "tbz x8, #1, 28f\n"
"ld1 { v25.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 31f\n"
+ "tbz x8, #0, 31f\n"
"ld1 { v25.b }[6], [x20]\n"
"b 31f\n"
"28:" // Oddments: Load (2, 0): Bit 2: Bit 1: Unset
- "tbz x7, #0, 31f\n"
+ "tbz x8, #0, 31f\n"
"ld1 { v25.b }[4], [x20]\n"
"b 31f\n"
"29:" // Oddments: Load (2, 0): Bit 2: Unset
- "tbz x7, #1, 30f\n"
+ "tbz x8, #1, 30f\n"
"ld1 { v25.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 31f\n"
+ "tbz x8, #0, 31f\n"
"ld1 { v25.b }[2], [x20]\n"
"b 31f\n"
"30:" // Oddments: Load (2, 0): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 31f\n"
+ "tbz x8, #0, 31f\n"
"ld1 { v25.b }[0], [x20]\n"
"31:" // Oddments: Load (2, 0): Bit 2: End
"usubl v25.8h, v25.8b, v12.8b\n"
- "ldr x20, [x15, #0x68]\n"
+ "ldr x19, [x12, #0x68]\n"
"smlal v15.4s, v25.4h, v6.4h\n"
- "smlal2 v17.4s, v25.8h, v6.8h\n"
- "smlal v9.4s, v25.4h, v0.4h\n"
- "smlal2 v23.4s, v25.8h, v0.8h\n"
- "add x20, x20, x17\n"
- "tbz x7, #2, 33f\n"
- "ld1 { v29.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 32f\n"
- "ld1 { v29.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 35f\n"
- "ld1 { v29.b }[6], [x20]\n"
+ "smlal2 v10.4s, v25.8h, v6.8h\n"
+ "smlal v22.4s, v25.4h, v0.4h\n"
+ "smlal2 v21.4s, v25.8h, v0.8h\n"
+ "add x19, x19, x15\n"
+ "tbz x8, #2, 33f\n"
+ "ld1 { v29.s }[0], [x19], #0x4\n"
+ "tbz x8, #1, 32f\n"
+ "ld1 { v29.h }[2], [x19], #0x2\n"
+ "tbz x8, #0, 35f\n"
+ "ld1 { v29.b }[6], [x19]\n"
"b 35f\n"
"32:" // Oddments: Load (3, 1): Bit 2: Bit 1: Unset
- "tbz x7, #0, 35f\n"
- "ld1 { v29.b }[4], [x20]\n"
+ "tbz x8, #0, 35f\n"
+ "ld1 { v29.b }[4], [x19]\n"
"b 35f\n"
"33:" // Oddments: Load (3, 1): Bit 2: Unset
- "tbz x7, #1, 34f\n"
- "ld1 { v29.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 35f\n"
- "ld1 { v29.b }[2], [x20]\n"
+ "tbz x8, #1, 34f\n"
+ "ld1 { v29.h }[0], [x19], #0x2\n"
+ "tbz x8, #0, 35f\n"
+ "ld1 { v29.b }[2], [x19]\n"
"b 35f\n"
"34:" // Oddments: Load (3, 1): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 35f\n"
- "ld1 { v29.b }[0], [x20]\n"
+ "tbz x8, #0, 35f\n"
+ "ld1 { v29.b }[0], [x19]\n"
"35:" // Oddments: Load (3, 1): Bit 2: End
"usubl v29.8h, v29.8b, v12.8b\n"
- "ldr x21, [x15, #0x70]\n"
- "smlal v9.4s, v29.4h, v4.4h\n"
- "smlal2 v23.4s, v29.8h, v4.8h\n"
- "add x21, x21, x17\n"
- "tbz x7, #2, 37f\n"
- "ld1 { v24.s }[0], [x21], #0x4\n"
- "tbz x7, #1, 36f\n"
- "ld1 { v24.h }[2], [x21], #0x2\n"
- "tbz x7, #0, 39f\n"
- "ld1 { v24.b }[6], [x21]\n"
+ "ldr x19, [x12, #0x70]\n"
+ "smlal v22.4s, v29.4h, v4.4h\n"
+ "smlal2 v21.4s, v29.8h, v4.8h\n"
+ "add x19, x19, x15\n"
+ "tbz x8, #2, 37f\n"
+ "ld1 { v24.s }[0], [x19], #0x4\n"
+ "tbz x8, #1, 36f\n"
+ "ld1 { v24.h }[2], [x19], #0x2\n"
+ "tbz x8, #0, 39f\n"
+ "ld1 { v24.b }[6], [x19]\n"
"b 39f\n"
"36:" // Oddments: Load (2, 1): Bit 2: Bit 1: Unset
- "tbz x7, #0, 39f\n"
- "ld1 { v24.b }[4], [x21]\n"
+ "tbz x8, #0, 39f\n"
+ "ld1 { v24.b }[4], [x19]\n"
"b 39f\n"
"37:" // Oddments: Load (2, 1): Bit 2: Unset
- "tbz x7, #1, 38f\n"
- "ld1 { v24.h }[0], [x21], #0x2\n"
- "tbz x7, #0, 39f\n"
- "ld1 { v24.b }[2], [x21]\n"
+ "tbz x8, #1, 38f\n"
+ "ld1 { v24.h }[0], [x19], #0x2\n"
+ "tbz x8, #0, 39f\n"
+ "ld1 { v24.b }[2], [x19]\n"
"b 39f\n"
"38:" // Oddments: Load (2, 1): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 39f\n"
- "ld1 { v24.b }[0], [x21]\n"
+ "tbz x8, #0, 39f\n"
+ "ld1 { v24.b }[0], [x19]\n"
"39:" // Oddments: Load (2, 1): Bit 2: End
"usubl v24.8h, v24.8b, v12.8b\n"
- "ldr x23, [x15, #0x78]\n"
+ "ldr x22, [x12, #0x78]\n"
"smlal v15.4s, v24.4h, v7.4h\n"
- "smlal2 v17.4s, v24.8h, v7.8h\n"
- "smlal v9.4s, v24.4h, v1.4h\n"
- "smlal2 v23.4s, v24.8h, v1.8h\n"
- "add x23, x23, x17\n"
- "tbz x7, #2, 41f\n"
- "ld1 { v27.s }[0], [x23], #0x4\n"
- "tbz x7, #1, 40f\n"
- "ld1 { v27.h }[2], [x23], #0x2\n"
- "tbz x7, #0, 43f\n"
- "ld1 { v27.b }[6], [x23]\n"
+ "smlal2 v10.4s, v24.8h, v7.8h\n"
+ "smlal v22.4s, v24.4h, v1.4h\n"
+ "smlal2 v21.4s, v24.8h, v1.8h\n"
+ "add x22, x22, x15\n"
+ "tbz x8, #2, 41f\n"
+ "ld1 { v27.s }[0], [x22], #0x4\n"
+ "tbz x8, #1, 40f\n"
+ "ld1 { v27.h }[2], [x22], #0x2\n"
+ "tbz x8, #0, 43f\n"
+ "ld1 { v27.b }[6], [x22]\n"
"b 43f\n"
"40:" // Oddments: Load (3, 3): Bit 2: Bit 1: Unset
- "tbz x7, #0, 43f\n"
- "ld1 { v27.b }[4], [x23]\n"
+ "tbz x8, #0, 43f\n"
+ "ld1 { v27.b }[4], [x22]\n"
"b 43f\n"
"41:" // Oddments: Load (3, 3): Bit 2: Unset
- "tbz x7, #1, 42f\n"
- "ld1 { v27.h }[0], [x23], #0x2\n"
- "tbz x7, #0, 43f\n"
- "ld1 { v27.b }[2], [x23]\n"
+ "tbz x8, #1, 42f\n"
+ "ld1 { v27.h }[0], [x22], #0x2\n"
+ "tbz x8, #0, 43f\n"
+ "ld1 { v27.b }[2], [x22]\n"
"b 43f\n"
"42:" // Oddments: Load (3, 3): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 43f\n"
- "ld1 { v27.b }[0], [x23]\n"
+ "tbz x8, #0, 43f\n"
+ "ld1 { v27.b }[0], [x22]\n"
"43:" // Oddments: Load (3, 3): Bit 2: End
"usubl v27.8h, v27.8b, v12.8b\n"
- "ldr x21, [x15, #0x80]\n"
- "smlal v21.4s, v27.4h, v4.4h\n"
- "smlal2 v22.4s, v27.8h, v4.8h\n"
- "add x21, x21, x17\n"
- "tbz x7, #2, 45f\n"
+ "ldr x21, [x12, #0x80]\n"
+ "smlal v23.4s, v27.4h, v4.4h\n"
+ "smlal2 v18.4s, v27.8h, v4.8h\n"
+ "add x21, x21, x15\n"
+ "tbz x8, #2, 45f\n"
"ld1 { v28.s }[0], [x21], #0x4\n"
- "tbz x7, #1, 44f\n"
+ "tbz x8, #1, 44f\n"
"ld1 { v28.h }[2], [x21], #0x2\n"
- "tbz x7, #0, 47f\n"
+ "tbz x8, #0, 47f\n"
"ld1 { v28.b }[6], [x21]\n"
"b 47f\n"
"44:" // Oddments: Load (2, 3): Bit 2: Bit 1: Unset
- "tbz x7, #0, 47f\n"
+ "tbz x8, #0, 47f\n"
"ld1 { v28.b }[4], [x21]\n"
"b 47f\n"
"45:" // Oddments: Load (2, 3): Bit 2: Unset
- "tbz x7, #1, 46f\n"
+ "tbz x8, #1, 46f\n"
"ld1 { v28.h }[0], [x21], #0x2\n"
- "tbz x7, #0, 47f\n"
+ "tbz x8, #0, 47f\n"
"ld1 { v28.b }[2], [x21]\n"
"b 47f\n"
"46:" // Oddments: Load (2, 3): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 47f\n"
+ "tbz x8, #0, 47f\n"
"ld1 { v28.b }[0], [x21]\n"
"47:" // Oddments: Load (2, 3): Bit 2: End
"usubl v28.8h, v28.8b, v12.8b\n"
- "ldr x22, [x15, #0x88]\n"
- "smlal v10.4s, v28.4h, v7.4h\n"
- "smlal2 v20.4s, v28.8h, v7.8h\n"
- "smlal v21.4s, v28.4h, v1.4h\n"
- "smlal2 v22.4s, v28.8h, v1.8h\n"
- "add x22, x22, x17\n"
- "tbz x7, #2, 49f\n"
- "ld1 { v26.s }[0], [x22], #0x4\n"
- "tbz x7, #1, 48f\n"
- "ld1 { v26.h }[2], [x22], #0x2\n"
- "tbz x7, #0, 51f\n"
- "ld1 { v26.b }[6], [x22]\n"
+ "ldr x20, [x12, #0x88]\n"
+ "smlal v9.4s, v28.4h, v7.4h\n"
+ "smlal2 v16.4s, v28.8h, v7.8h\n"
+ "smlal v23.4s, v28.4h, v1.4h\n"
+ "smlal2 v18.4s, v28.8h, v1.8h\n"
+ "add x20, x20, x15\n"
+ "tbz x8, #2, 49f\n"
+ "ld1 { v26.s }[0], [x20], #0x4\n"
+ "tbz x8, #1, 48f\n"
+ "ld1 { v26.h }[2], [x20], #0x2\n"
+ "tbz x8, #0, 51f\n"
+ "ld1 { v26.b }[6], [x20]\n"
"b 51f\n"
"48:" // Oddments: Load (3, 4): Bit 2: Bit 1: Unset
- "tbz x7, #0, 51f\n"
- "ld1 { v26.b }[4], [x22]\n"
+ "tbz x8, #0, 51f\n"
+ "ld1 { v26.b }[4], [x20]\n"
"b 51f\n"
"49:" // Oddments: Load (3, 4): Bit 2: Unset
- "tbz x7, #1, 50f\n"
- "ld1 { v26.h }[0], [x22], #0x2\n"
- "tbz x7, #0, 51f\n"
- "ld1 { v26.b }[2], [x22]\n"
+ "tbz x8, #1, 50f\n"
+ "ld1 { v26.h }[0], [x20], #0x2\n"
+ "tbz x8, #0, 51f\n"
+ "ld1 { v26.b }[2], [x20]\n"
"b 51f\n"
"50:" // Oddments: Load (3, 4): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 51f\n"
- "ld1 { v26.b }[0], [x22]\n"
+ "tbz x8, #0, 51f\n"
+ "ld1 { v26.b }[0], [x20]\n"
"51:" // Oddments: Load (3, 4): Bit 2: End
"usubl v26.8h, v26.8b, v12.8b\n"
- "ldr x24, [x15, #0x90]\n"
- "smlal v21.4s, v26.4h, v5.4h\n"
- "smlal2 v22.4s, v26.8h, v5.8h\n"
- "add x24, x24, x17\n"
- "tbz x7, #2, 53f\n"
- "ld1 { v25.s }[0], [x24], #0x4\n"
- "tbz x7, #1, 52f\n"
- "ld1 { v25.h }[2], [x24], #0x2\n"
- "tbz x7, #0, 55f\n"
- "ld1 { v25.b }[6], [x24]\n"
+ "ldr x23, [x12, #0x90]\n"
+ "smlal v23.4s, v26.4h, v5.4h\n"
+ "smlal2 v18.4s, v26.8h, v5.8h\n"
+ "add x23, x23, x15\n"
+ "tbz x8, #2, 53f\n"
+ "ld1 { v25.s }[0], [x23], #0x4\n"
+ "tbz x8, #1, 52f\n"
+ "ld1 { v25.h }[2], [x23], #0x2\n"
+ "tbz x8, #0, 55f\n"
+ "ld1 { v25.b }[6], [x23]\n"
"b 55f\n"
"52:" // Oddments: Load (4, 0): Bit 2: Bit 1: Unset
- "tbz x7, #0, 55f\n"
- "ld1 { v25.b }[4], [x24]\n"
+ "tbz x8, #0, 55f\n"
+ "ld1 { v25.b }[4], [x23]\n"
"b 55f\n"
"53:" // Oddments: Load (4, 0): Bit 2: Unset
- "tbz x7, #1, 54f\n"
- "ld1 { v25.h }[0], [x24], #0x2\n"
- "tbz x7, #0, 55f\n"
- "ld1 { v25.b }[2], [x24]\n"
+ "tbz x8, #1, 54f\n"
+ "ld1 { v25.h }[0], [x23], #0x2\n"
+ "tbz x8, #0, 55f\n"
+ "ld1 { v25.b }[2], [x23]\n"
"b 55f\n"
"54:" // Oddments: Load (4, 0): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 55f\n"
- "ld1 { v25.b }[0], [x24]\n"
+ "tbz x8, #0, 55f\n"
+ "ld1 { v25.b }[0], [x23]\n"
"55:" // Oddments: Load (4, 0): Bit 2: End
"usubl v25.8h, v25.8b, v12.8b\n"
- "ldr x25, [x15, #0x98]\n"
- "smlal v9.4s, v25.4h, v6.4h\n"
- "smlal2 v23.4s, v25.8h, v6.8h\n"
- "add x25, x25, x17\n"
- "tbz x7, #2, 57f\n"
- "ld1 { v29.s }[0], [x25], #0x4\n"
- "tbz x7, #1, 56f\n"
- "ld1 { v29.h }[2], [x25], #0x2\n"
- "tbz x7, #0, 59f\n"
- "ld1 { v29.b }[6], [x25]\n"
+ "ldr x24, [x12, #0x98]\n"
+ "smlal v22.4s, v25.4h, v6.4h\n"
+ "smlal2 v21.4s, v25.8h, v6.8h\n"
+ "add x24, x24, x15\n"
+ "tbz x8, #2, 57f\n"
+ "ld1 { v29.s }[0], [x24], #0x4\n"
+ "tbz x8, #1, 56f\n"
+ "ld1 { v29.h }[2], [x24], #0x2\n"
+ "tbz x8, #0, 59f\n"
+ "ld1 { v29.b }[6], [x24]\n"
"b 59f\n"
"56:" // Oddments: Load (2, 4): Bit 2: Bit 1: Unset
- "tbz x7, #0, 59f\n"
- "ld1 { v29.b }[4], [x25]\n"
+ "tbz x8, #0, 59f\n"
+ "ld1 { v29.b }[4], [x24]\n"
"b 59f\n"
"57:" // Oddments: Load (2, 4): Bit 2: Unset
- "tbz x7, #1, 58f\n"
- "ld1 { v29.h }[0], [x25], #0x2\n"
- "tbz x7, #0, 59f\n"
- "ld1 { v29.b }[2], [x25]\n"
+ "tbz x8, #1, 58f\n"
+ "ld1 { v29.h }[0], [x24], #0x2\n"
+ "tbz x8, #0, 59f\n"
+ "ld1 { v29.b }[2], [x24]\n"
"b 59f\n"
"58:" // Oddments: Load (2, 4): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 59f\n"
- "ld1 { v29.b }[0], [x25]\n"
+ "tbz x8, #0, 59f\n"
+ "ld1 { v29.b }[0], [x24]\n"
"59:" // Oddments: Load (2, 4): Bit 2: End
"usubl v29.8h, v29.8b, v12.8b\n"
- "ldr x20, [x15, #0xa0]\n"
- "smlal v10.4s, v29.4h, v8.4h\n"
- "smlal2 v20.4s, v29.8h, v8.8h\n"
- "smlal v21.4s, v29.4h, v2.4h\n"
- "smlal2 v22.4s, v29.8h, v2.8h\n"
- "add x20, x20, x17\n"
- "tbz x7, #2, 61f\n"
- "ld1 { v27.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 60f\n"
- "ld1 { v27.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 63f\n"
- "ld1 { v27.b }[6], [x20]\n"
+ "ldr x19, [x12, #0xa0]\n"
+ "smlal v9.4s, v29.4h, v8.4h\n"
+ "smlal2 v16.4s, v29.8h, v8.8h\n"
+ "smlal v23.4s, v29.4h, v2.4h\n"
+ "smlal2 v18.4s, v29.8h, v2.8h\n"
+ "add x19, x19, x15\n"
+ "tbz x8, #2, 61f\n"
+ "ld1 { v27.s }[0], [x19], #0x4\n"
+ "tbz x8, #1, 60f\n"
+ "ld1 { v27.h }[2], [x19], #0x2\n"
+ "tbz x8, #0, 63f\n"
+ "ld1 { v27.b }[6], [x19]\n"
"b 63f\n"
"60:" // Oddments: Load (4, 1): Bit 2: Bit 1: Unset
- "tbz x7, #0, 63f\n"
- "ld1 { v27.b }[4], [x20]\n"
+ "tbz x8, #0, 63f\n"
+ "ld1 { v27.b }[4], [x19]\n"
"b 63f\n"
"61:" // Oddments: Load (4, 1): Bit 2: Unset
- "tbz x7, #1, 62f\n"
- "ld1 { v27.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 63f\n"
- "ld1 { v27.b }[2], [x20]\n"
+ "tbz x8, #1, 62f\n"
+ "ld1 { v27.h }[0], [x19], #0x2\n"
+ "tbz x8, #0, 63f\n"
+ "ld1 { v27.b }[2], [x19]\n"
"b 63f\n"
"62:" // Oddments: Load (4, 1): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 63f\n"
- "ld1 { v27.b }[0], [x20]\n"
+ "tbz x8, #0, 63f\n"
+ "ld1 { v27.b }[0], [x19]\n"
"63:" // Oddments: Load (4, 1): Bit 2: End
"usubl v27.8h, v27.8b, v12.8b\n"
- "ldr x23, [x15, #0xa8]\n"
- "smlal v9.4s, v27.4h, v7.4h\n"
- "smlal2 v23.4s, v27.8h, v7.8h\n"
- "add x23, x23, x17\n"
- "tbz x7, #2, 65f\n"
- "ld1 { v24.s }[0], [x23], #0x4\n"
- "tbz x7, #1, 64f\n"
- "ld1 { v24.h }[2], [x23], #0x2\n"
- "tbz x7, #0, 67f\n"
- "ld1 { v24.b }[6], [x23]\n"
+ "ldr x22, [x12, #0xa8]\n"
+ "smlal v22.4s, v27.4h, v7.4h\n"
+ "smlal2 v21.4s, v27.8h, v7.8h\n"
+ "add x22, x22, x15\n"
+ "tbz x8, #2, 65f\n"
+ "ld1 { v24.s }[0], [x22], #0x4\n"
+ "tbz x8, #1, 64f\n"
+ "ld1 { v24.h }[2], [x22], #0x2\n"
+ "tbz x8, #0, 67f\n"
+ "ld1 { v24.b }[6], [x22]\n"
"b 67f\n"
"64:" // Oddments: Load (3, 2): Bit 2: Bit 1: Unset
- "tbz x7, #0, 67f\n"
- "ld1 { v24.b }[4], [x23]\n"
+ "tbz x8, #0, 67f\n"
+ "ld1 { v24.b }[4], [x22]\n"
"b 67f\n"
"65:" // Oddments: Load (3, 2): Bit 2: Unset
- "tbz x7, #1, 66f\n"
- "ld1 { v24.h }[0], [x23], #0x2\n"
- "tbz x7, #0, 67f\n"
- "ld1 { v24.b }[2], [x23]\n"
+ "tbz x8, #1, 66f\n"
+ "ld1 { v24.h }[0], [x22], #0x2\n"
+ "tbz x8, #0, 67f\n"
+ "ld1 { v24.b }[2], [x22]\n"
"b 67f\n"
"66:" // Oddments: Load (3, 2): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 67f\n"
- "ld1 { v24.b }[0], [x23]\n"
+ "tbz x8, #0, 67f\n"
+ "ld1 { v24.b }[0], [x22]\n"
"67:" // Oddments: Load (3, 2): Bit 2: End
"usubl v24.8h, v24.8b, v12.8b\n"
- "ldr x22, [x15, #0xb0]\n"
- "smlal v9.4s, v24.4h, v5.4h\n"
- "smlal2 v23.4s, v24.8h, v5.8h\n"
- "smlal v21.4s, v24.4h, v3.4h\n"
- "smlal2 v22.4s, v24.8h, v3.8h\n"
- "add x22, x22, x17\n"
- "tbz x7, #2, 69f\n"
- "ld1 { v26.s }[0], [x22], #0x4\n"
- "tbz x7, #1, 68f\n"
- "ld1 { v26.h }[2], [x22], #0x2\n"
- "tbz x7, #0, 71f\n"
- "ld1 { v26.b }[6], [x22]\n"
+ "ldr x21, [x12, #0xb0]\n"
+ "smlal v22.4s, v24.4h, v5.4h\n"
+ "smlal2 v21.4s, v24.8h, v5.8h\n"
+ "smlal v23.4s, v24.4h, v3.4h\n"
+ "smlal2 v18.4s, v24.8h, v3.8h\n"
+ "add x21, x21, x15\n"
+ "tbz x8, #2, 69f\n"
+ "ld1 { v26.s }[0], [x21], #0x4\n"
+ "tbz x8, #1, 68f\n"
+ "ld1 { v26.h }[2], [x21], #0x2\n"
+ "tbz x8, #0, 71f\n"
+ "ld1 { v26.b }[6], [x21]\n"
"b 71f\n"
"68:" // Oddments: Load (4, 3): Bit 2: Bit 1: Unset
- "tbz x7, #0, 71f\n"
- "ld1 { v26.b }[4], [x22]\n"
+ "tbz x8, #0, 71f\n"
+ "ld1 { v26.b }[4], [x21]\n"
"b 71f\n"
"69:" // Oddments: Load (4, 3): Bit 2: Unset
- "tbz x7, #1, 70f\n"
- "ld1 { v26.h }[0], [x22], #0x2\n"
- "tbz x7, #0, 71f\n"
- "ld1 { v26.b }[2], [x22]\n"
+ "tbz x8, #1, 70f\n"
+ "ld1 { v26.h }[0], [x21], #0x2\n"
+ "tbz x8, #0, 71f\n"
+ "ld1 { v26.b }[2], [x21]\n"
"b 71f\n"
"70:" // Oddments: Load (4, 3): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 71f\n"
- "ld1 { v26.b }[0], [x22]\n"
+ "tbz x8, #0, 71f\n"
+ "ld1 { v26.b }[0], [x21]\n"
"71:" // Oddments: Load (4, 3): Bit 2: End
"usubl v26.8h, v26.8b, v12.8b\n"
- "ldr x21, [x15, #0xb8]\n"
- "smlal v21.4s, v26.4h, v7.4h\n"
- "smlal2 v22.4s, v26.8h, v7.8h\n"
- "add x21, x21, x17\n"
- "tbz x7, #2, 73f\n"
- "ld1 { v25.s }[0], [x21], #0x4\n"
- "tbz x7, #1, 72f\n"
- "ld1 { v25.h }[2], [x21], #0x2\n"
- "tbz x7, #0, 75f\n"
- "ld1 { v25.b }[6], [x21]\n"
+ "ldr x20, [x12, #0xb8]\n"
+ "smlal v23.4s, v26.4h, v7.4h\n"
+ "smlal2 v18.4s, v26.8h, v7.8h\n"
+ "add x20, x20, x15\n"
+ "tbz x8, #2, 73f\n"
+ "ld1 { v25.s }[0], [x20], #0x4\n"
+ "tbz x8, #1, 72f\n"
+ "ld1 { v25.h }[2], [x20], #0x2\n"
+ "tbz x8, #0, 75f\n"
+ "ld1 { v25.b }[6], [x20]\n"
"b 75f\n"
"72:" // Oddments: Load (4, 2): Bit 2: Bit 1: Unset
- "tbz x7, #0, 75f\n"
- "ld1 { v25.b }[4], [x21]\n"
+ "tbz x8, #0, 75f\n"
+ "ld1 { v25.b }[4], [x20]\n"
"b 75f\n"
"73:" // Oddments: Load (4, 2): Bit 2: Unset
- "tbz x7, #1, 74f\n"
- "ld1 { v25.h }[0], [x21], #0x2\n"
- "tbz x7, #0, 75f\n"
- "ld1 { v25.b }[2], [x21]\n"
+ "tbz x8, #1, 74f\n"
+ "ld1 { v25.h }[0], [x20], #0x2\n"
+ "tbz x8, #0, 75f\n"
+ "ld1 { v25.b }[2], [x20]\n"
"b 75f\n"
"74:" // Oddments: Load (4, 2): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 75f\n"
- "ld1 { v25.b }[0], [x21]\n"
+ "tbz x8, #0, 75f\n"
+ "ld1 { v25.b }[0], [x20]\n"
"75:" // Oddments: Load (4, 2): Bit 2: End
"usubl v25.8h, v25.8b, v12.8b\n"
- "ldr x20, [x15, #0xc0]\n"
- "smlal v9.4s, v25.4h, v8.4h\n"
- "smlal2 v23.4s, v25.8h, v8.8h\n"
- "smlal v21.4s, v25.4h, v6.4h\n"
- "smlal2 v22.4s, v25.8h, v6.8h\n"
- "add x20, x20, x17\n"
- "tbz x7, #2, 77f\n"
- "ld1 { v29.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 76f\n"
- "ld1 { v29.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 79f\n"
- "ld1 { v29.b }[6], [x20]\n"
+ "ldr x19, [x12, #0xc0]\n"
+ "smlal v22.4s, v25.4h, v8.4h\n"
+ "smlal2 v21.4s, v25.8h, v8.8h\n"
+ "smlal v23.4s, v25.4h, v6.4h\n"
+ "smlal2 v18.4s, v25.8h, v6.8h\n"
+ "add x19, x19, x15\n"
+ "tbz x8, #2, 77f\n"
+ "ld1 { v29.s }[0], [x19], #0x4\n"
+ "tbz x8, #1, 76f\n"
+ "ld1 { v29.h }[2], [x19], #0x2\n"
+ "tbz x8, #0, 79f\n"
+ "ld1 { v29.b }[6], [x19]\n"
"b 79f\n"
"76:" // Oddments: Load (4, 4): Bit 2: Bit 1: Unset
- "tbz x7, #0, 79f\n"
- "ld1 { v29.b }[4], [x20]\n"
+ "tbz x8, #0, 79f\n"
+ "ld1 { v29.b }[4], [x19]\n"
"b 79f\n"
"77:" // Oddments: Load (4, 4): Bit 2: Unset
- "tbz x7, #1, 78f\n"
- "ld1 { v29.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 79f\n"
- "ld1 { v29.b }[2], [x20]\n"
+ "tbz x8, #1, 78f\n"
+ "ld1 { v29.h }[0], [x19], #0x2\n"
+ "tbz x8, #0, 79f\n"
+ "ld1 { v29.b }[2], [x19]\n"
"b 79f\n"
"78:" // Oddments: Load (4, 4): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 79f\n"
- "ld1 { v29.b }[0], [x20]\n"
+ "tbz x8, #0, 79f\n"
+ "ld1 { v29.b }[0], [x19]\n"
"79:" // Oddments: Load (4, 4): Bit 2: End
"usubl v29.8h, v29.8b, v12.8b\n"
- "smlal v21.4s, v29.4h, v8.4h\n"
- "smlal2 v22.4s, v29.8h, v8.8h\n"
- "tbz x7, #2, 81f\n"
+ "smlal v23.4s, v29.4h, v8.4h\n"
+ "smlal2 v18.4s, v29.8h, v8.8h\n"
+ "tbz x8, #2, 81f\n"
"ld1 { v19.4s }, [x13], #0x10\n"
- "ld1 { v18.4s }, [x12], #0x10\n"
- "tbz x7, #1, 80f\n"
- "ld1 { v30.d }[0], [x13], #0x8\n"
- "ld1 { v31.d }[0], [x12], #0x8\n"
- "tbz x7, #0, 83f\n"
- "ld1 { v30.s }[2], [x13]\n"
- "ld1 { v31.s }[2], [x12]\n"
+ "ld1 { v0.4s }, [x11], #0x10\n"
+ "tbz x8, #1, 80f\n"
+ "ld1 { v4.d }[0], [x13], #0x8\n"
+ "ld1 { v31.d }[0], [x11], #0x8\n"
+ "tbz x8, #0, 83f\n"
+ "ld1 { v4.s }[2], [x13]\n"
+ "ld1 { v31.s }[2], [x11]\n"
"b 83f\n"
"80:" // Oddments: Load requant params: Bit 2: Bit 1: Unset
- "tbz x7, #0, 83f\n"
- "ld1 { v30.s }[0], [x13]\n"
- "ld1 { v31.s }[0], [x12]\n"
+ "tbz x8, #0, 83f\n"
+ "ld1 { v4.s }[0], [x13]\n"
+ "ld1 { v31.s }[0], [x11]\n"
"b 83f\n"
"81:" // Oddments: Load requant params: Bit 2: Unset
- "tbz x7, #1, 82f\n"
+ "tbz x8, #1, 82f\n"
"ld1 { v19.d }[0], [x13], #0x8\n"
- "ld1 { v18.d }[0], [x12], #0x8\n"
- "tbz x7, #0, 83f\n"
+ "ld1 { v0.d }[0], [x11], #0x8\n"
+ "tbz x8, #0, 83f\n"
"ld1 { v19.s }[2], [x13]\n"
- "ld1 { v18.s }[2], [x12]\n"
+ "ld1 { v0.s }[2], [x11]\n"
"b 83f\n"
"82:" // Oddments: Load requant params: Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 83f\n"
+ "tbz x8, #0, 83f\n"
"ld1 { v19.s }[0], [x13]\n"
- "ld1 { v18.s }[0], [x12]\n"
+ "ld1 { v0.s }[0], [x11]\n"
"83:" // Oddments: Load requant params: Bit 2: End
"sqrdmulh v15.4s, v15.4s, v19.4s\n"
- "and v0.16b, v15.16b, v18.16b\n"
- "add x11, x11, x16\n"
- "add x10, x10, x16\n"
- "sqrdmulh v17.4s, v17.4s, v30.4s\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "add x9, x9, x16\n"
- "add x28, x28, x16\n"
- "and v7.16b, v17.16b, v31.16b\n"
- "sqrdmulh v10.4s, v10.4s, v19.4s\n"
"sqrdmulh v9.4s, v9.4s, v19.4s\n"
- "sqrdmulh v21.4s, v21.4s, v19.4s\n"
- "sqadd v15.4s, v15.4s, v0.4s\n"
- "sshr v7.4s, v7.4s, #0x1f\n"
- "and v19.16b, v10.16b, v18.16b\n"
- "sqrdmulh v20.4s, v20.4s, v30.4s\n"
- "and v27.16b, v9.16b, v18.16b\n"
- "sqrdmulh v23.4s, v23.4s, v30.4s\n"
- "and v0.16b, v21.16b, v18.16b\n"
- "sqrdmulh v22.4s, v22.4s, v30.4s\n"
- "sqadd v17.4s, v17.4s, v7.4s\n"
+ "add x10, x10, x14\n"
+ "add x9, x9, x14\n"
+ "sqrdmulh v22.4s, v22.4s, v19.4s\n"
+ "sqrdmulh v23.4s, v23.4s, v19.4s\n"
+ "add x28, x28, x14\n"
+ "add x27, x27, x14\n"
+ "and v30.16b, v15.16b, v0.16b\n"
+ "sqrdmulh v10.4s, v10.4s, v4.4s\n"
+ "and v28.16b, v9.16b, v0.16b\n"
+ "sqrdmulh v16.4s, v16.4s, v4.4s\n"
+ "and v29.16b, v22.16b, v0.16b\n"
+ "sqrdmulh v21.4s, v21.4s, v4.4s\n"
+ "and v20.16b, v23.16b, v0.16b\n"
+ "sqrdmulh v18.4s, v18.4s, v4.4s\n"
+ "sshr v30.4s, v30.4s, #0x1f\n"
+ "and v19.16b, v10.16b, v31.16b\n"
+ "sshr v28.4s, v28.4s, #0x1f\n"
+ "and v4.16b, v16.16b, v31.16b\n"
+ "sshr v29.4s, v29.4s, #0x1f\n"
+ "and v5.16b, v21.16b, v31.16b\n"
+ "sshr v20.4s, v20.4s, #0x1f\n"
+ "and v26.16b, v18.16b, v31.16b\n"
+ "sqadd v15.4s, v15.4s, v30.4s\n"
"sshr v19.4s, v19.4s, #0x1f\n"
- "and v5.16b, v20.16b, v31.16b\n"
- "sshr v27.4s, v27.4s, #0x1f\n"
- "and v4.16b, v23.16b, v31.16b\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "and v7.16b, v22.16b, v31.16b\n"
- "sqadd v10.4s, v10.4s, v19.4s\n"
- "sshr v5.4s, v5.4s, #0x1f\n"
- "sqadd v9.4s, v9.4s, v27.4s\n"
+ "sqadd v9.4s, v9.4s, v28.4s\n"
"sshr v4.4s, v4.4s, #0x1f\n"
- "sqadd v21.4s, v21.4s, v0.4s\n"
- "sshr v7.4s, v7.4s, #0x1f\n"
- "srshl v15.4s, v15.4s, v18.4s\n"
- "srshl v10.4s, v10.4s, v18.4s\n"
- "sqadd v20.4s, v20.4s, v5.4s\n"
- "srshl v9.4s, v9.4s, v18.4s\n"
- "sqadd v23.4s, v23.4s, v4.4s\n"
- "srshl v21.4s, v21.4s, v18.4s\n"
- "sqadd v22.4s, v22.4s, v7.4s\n"
- "srshl v17.4s, v17.4s, v31.4s\n"
+ "sqadd v22.4s, v22.4s, v29.4s\n"
+ "sshr v5.4s, v5.4s, #0x1f\n"
+ "sqadd v23.4s, v23.4s, v20.4s\n"
+ "sshr v26.4s, v26.4s, #0x1f\n"
+ "srshl v15.4s, v15.4s, v0.4s\n"
+ "sqadd v10.4s, v10.4s, v19.4s\n"
+ "srshl v9.4s, v9.4s, v0.4s\n"
+ "sqadd v16.4s, v16.4s, v4.4s\n"
+ "srshl v22.4s, v22.4s, v0.4s\n"
+ "sqadd v21.4s, v21.4s, v5.4s\n"
+ "srshl v23.4s, v23.4s, v0.4s\n"
+ "sqadd v18.4s, v18.4s, v26.4s\n"
+ "srshl v10.4s, v10.4s, v31.4s\n"
"sqxtn v15.4h, v15.4s\n"
- "srshl v20.4s, v20.4s, v31.4s\n"
- "sqxtn v10.4h, v10.4s\n"
- "srshl v23.4s, v23.4s, v31.4s\n"
+ "srshl v16.4s, v16.4s, v31.4s\n"
"sqxtn v9.4h, v9.4s\n"
- "srshl v22.4s, v22.4s, v31.4s\n"
- "sqxtn v21.4h, v21.4s\n"
- "sqxtn2 v15.8h, v17.4s\n"
- "sqxtn2 v10.8h, v20.4s\n"
- "sqxtn2 v9.8h, v23.4s\n"
- "sqxtn2 v21.8h, v22.4s\n"
+ "srshl v21.4s, v21.4s, v31.4s\n"
+ "sqxtn v22.4h, v22.4s\n"
+ "srshl v18.4s, v18.4s, v31.4s\n"
+ "sqxtn v23.4h, v23.4s\n"
+ "sqxtn2 v15.8h, v10.4s\n"
+ "sqxtn2 v9.8h, v16.4s\n"
+ "sqxtn2 v22.8h, v21.4s\n"
+ "sqxtn2 v23.8h, v18.4s\n"
"sqadd v15.8h, v15.8h, v11.8h\n"
- "sqadd v10.8h, v10.8h, v11.8h\n"
"sqadd v9.8h, v9.8h, v11.8h\n"
- "sqadd v21.8h, v21.8h, v11.8h\n"
- "smax v15.8h, v15.8h, v16.8h\n"
- "smax v10.8h, v10.8h, v16.8h\n"
- "smax v9.8h, v9.8h, v16.8h\n"
- "smax v21.8h, v21.8h, v16.8h\n"
+ "sqadd v22.8h, v22.8h, v11.8h\n"
+ "sqadd v23.8h, v23.8h, v11.8h\n"
+ "smax v15.8h, v15.8h, v17.8h\n"
+ "smax v9.8h, v9.8h, v17.8h\n"
+ "smax v22.8h, v22.8h, v17.8h\n"
+ "smax v23.8h, v23.8h, v17.8h\n"
"smin v15.8h, v15.8h, v14.8h\n"
- "smin v10.8h, v10.8h, v14.8h\n"
"smin v9.8h, v9.8h, v14.8h\n"
- "smin v21.8h, v21.8h, v14.8h\n"
+ "smin v22.8h, v22.8h, v14.8h\n"
+ "smin v23.8h, v23.8h, v14.8h\n"
"uzp1 v15.16b, v15.16b, v15.16b\n"
- "uzp1 v10.16b, v10.16b, v10.16b\n"
"uzp1 v9.16b, v9.16b, v9.16b\n"
- "uzp1 v21.16b, v21.16b, v21.16b\n"
- "tbz x7, #2, 85f\n"
- "st1 { v15.s }[0], [x11], #0x4\n"
- "st1 { v10.s }[0], [x10], #0x4\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "uzp1 v23.16b, v23.16b, v23.16b\n"
+ "tbz x8, #2, 85f\n"
+ "st1 { v15.s }[0], [x10], #0x4\n"
"st1 { v9.s }[0], [x9], #0x4\n"
- "st1 { v21.s }[0], [x28], #0x4\n"
- "tbz x7, #1, 84f\n"
- "st1 { v15.h }[2], [x11], #0x2\n"
- "st1 { v10.h }[2], [x10], #0x2\n"
+ "st1 { v22.s }[0], [x28], #0x4\n"
+ "st1 { v23.s }[0], [x27], #0x4\n"
+ "tbz x8, #1, 84f\n"
+ "st1 { v15.h }[2], [x10], #0x2\n"
"st1 { v9.h }[2], [x9], #0x2\n"
- "st1 { v21.h }[2], [x28], #0x2\n"
- "tbz x7, #0, 87f\n"
- "st1 { v15.b }[6], [x11], #0x1\n"
- "st1 { v10.b }[6], [x10], #0x1\n"
+ "st1 { v22.h }[2], [x28], #0x2\n"
+ "st1 { v23.h }[2], [x27], #0x2\n"
+ "tbz x8, #0, 87f\n"
+ "st1 { v15.b }[6], [x10], #0x1\n"
"st1 { v9.b }[6], [x9], #0x1\n"
- "st1 { v21.b }[6], [x28], #0x1\n"
+ "st1 { v22.b }[6], [x28], #0x1\n"
+ "st1 { v23.b }[6], [x27], #0x1\n"
"b 87f\n"
"84:" // Oddments: Bit 2: Bit 1: Unset
- "tbz x7, #0, 87f\n"
- "st1 { v15.b }[4], [x11], #0x1\n"
- "st1 { v10.b }[4], [x10], #0x1\n"
+ "tbz x8, #0, 87f\n"
+ "st1 { v15.b }[4], [x10], #0x1\n"
"st1 { v9.b }[4], [x9], #0x1\n"
- "st1 { v21.b }[4], [x28], #0x1\n"
+ "st1 { v22.b }[4], [x28], #0x1\n"
+ "st1 { v23.b }[4], [x27], #0x1\n"
"b 87f\n"
"85:" // Oddments: Bit 2: Unset
- "tbz x7, #1, 86f\n"
- "st1 { v15.h }[0], [x11], #0x2\n"
- "st1 { v10.h }[0], [x10], #0x2\n"
+ "tbz x8, #1, 86f\n"
+ "st1 { v15.h }[0], [x10], #0x2\n"
"st1 { v9.h }[0], [x9], #0x2\n"
- "st1 { v21.h }[0], [x28], #0x2\n"
- "tbz x7, #0, 87f\n"
- "st1 { v15.b }[2], [x11], #0x1\n"
- "st1 { v10.b }[2], [x10], #0x1\n"
+ "st1 { v22.h }[0], [x28], #0x2\n"
+ "st1 { v23.h }[0], [x27], #0x2\n"
+ "tbz x8, #0, 87f\n"
+ "st1 { v15.b }[2], [x10], #0x1\n"
"st1 { v9.b }[2], [x9], #0x1\n"
- "st1 { v21.b }[2], [x28], #0x1\n"
+ "st1 { v22.b }[2], [x28], #0x1\n"
+ "st1 { v23.b }[2], [x27], #0x1\n"
"b 87f\n"
"86:" // Oddments: Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 87f\n"
- "st1 { v15.b }[0], [x11], #0x1\n"
- "st1 { v10.b }[0], [x10], #0x1\n"
+ "tbz x8, #0, 87f\n"
+ "st1 { v15.b }[0], [x10], #0x1\n"
"st1 { v9.b }[0], [x9], #0x1\n"
- "st1 { v21.b }[0], [x28], #0x1\n"
+ "st1 { v22.b }[0], [x28], #0x1\n"
+ "st1 { v23.b }[0], [x27], #0x1\n"
"87:" // Oddments: Bit 2: End
"88:" // End
:
: [offsetof_Params_bias] "I" (offsetof(Params, bias)), [offsetof_Params_inptrs] "I" (offsetof(Params, inptrs)), [offsetof_Params_n_channels] "I" (offsetof(Params, n_channels)), [offsetof_Params_outptrs] "I" (offsetof(Params, outptrs)), [offsetof_Params_requant] "I" (offsetof(Params, requant)), [offsetof_Params_requant_muls] "I" (offsetof(Params, requant_muls)), [offsetof_Params_requant_shifts] "I" (offsetof(Params, requant_shifts)), [offsetof_Params_weights] "I" (offsetof(Params, weights)), [offsetof_Requantize32_a_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [params] "r" (&params)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8s8u8q_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8s8u8q_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp
index 1ce037b68c..082d8dd3e1 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8s8u8q_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8s8u8q_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -111,2073 +111,2073 @@ void a64_u8s8u8q_nhwc_5x5_s1_output2x2_mla_depthfirst_impl(
requant_muls, requant_shifts, outptrs);
__asm__ __volatile__(
- "ldr x1, [%x[params], %[offsetof_Params_n_channels]]\n"
- "ldr x13, [%x[params], %[offsetof_Params_requant]]\n"
- "lsr x2, x1, #0x3\n"
- "add x3, x13, %[offsetof_Requantize32_a_offset]\n"
- "ld1r { v9.16b }, [x3]\n"
- "ldr x24, [%x[params], %[offsetof_Params_outptrs]]\n"
- "add x11, x13, %[offsetof_Requantize32_b_offset]\n"
- "add x5, x13, %[offsetof_Requantize32_c_offset]\n"
- "ld1r { v15.16b }, [x11]\n"
- "ld1r { v14.8h }, [x5]\n"
- "add x3, x13, %[offsetof_Requantize32_minval]\n"
- "add x15, x13, %[offsetof_Requantize32_maxval]\n"
- "ld1r { v12.8h }, [x3]\n"
- "ld1r { v11.8h }, [x15]\n"
- "mov x0, #0x0\n"
- "mov x10, #0x0\n"
- "add x4, %x[params], %[offsetof_Params_inptrs]\n"
- "ldr x3, [%x[params], %[offsetof_Params_weights]]\n"
- "ldr x5, [%x[params], %[offsetof_Params_requant_muls]]\n"
- "ldr x8, [%x[params], %[offsetof_Params_requant_shifts]]\n"
- "ldp x17, x6, [x24, #0x0]\n"
- "ldp x7, x16, [x24, #0x10]\n"
- "cbz x2, 3f\n"
- "ldr d0, [x3, #0x0]\n"
- "ldr d1, [x3, #0x8]\n"
- "subs x2, x2, #0x1\n"
- "ssubl v0.8h, v0.8b, v15.8b\n"
- "ldr d2, [x3, #0x10]\n"
- "ldr d3, [x3, #0x18]\n"
- "ssubl v1.8h, v1.8b, v15.8b\n"
- "ssubl v2.8h, v2.8b, v15.8b\n"
- "ldr d4, [x3, #0x20]\n"
- "ldr x13, [%x[params], %[offsetof_Params_bias]]\n"
- "ssubl v3.8h, v3.8b, v15.8b\n"
- "ssubl v4.8h, v4.8b, v15.8b\n"
- "ldr q13, [x13, #0x0]\n"
- "ldr q19, [x13, #0x10]\n"
- "add x13, x13, #0x20\n"
- "str x13, [%x[params], %[offsetof_Params_bias]]\n"
- "ldp x9, x28, [x4, #0x0]\n"
- "ldp x27, x26, [x4, #0x10]\n"
- "mov v20.16b, v13.16b\n"
- "mov v10.16b, v19.16b\n"
- "ldp x25, x24, [x4, #0x20]\n"
- "ldp x23, x22, [x4, #0x30]\n"
- "mov v8.16b, v13.16b\n"
- "mov v7.16b, v19.16b\n"
- "ldp x21, x20, [x4, #0x40]\n"
- "ldr d31, [x9, x0]\n"
- "mov v17.16b, v13.16b\n"
- "mov v21.16b, v19.16b\n"
- "ldr d30, [x28, x0]\n"
- "ldr d29, [x27, x0]\n"
+ "ldr x10, [%x[params], %[offsetof_Params_requant]]\n"
+ "ldr x0, [%x[params], %[offsetof_Params_n_channels]]\n"
+ "add x17, x10, %[offsetof_Requantize32_a_offset]\n"
+ "add x9, x10, %[offsetof_Requantize32_b_offset]\n"
+ "ldr x25, [%x[params], %[offsetof_Params_outptrs]]\n"
+ "add x4, x10, %[offsetof_Requantize32_c_offset]\n"
+ "add x14, x10, %[offsetof_Requantize32_minval]\n"
+ "ldr x23, [%x[params], %[offsetof_Params_weights]]\n"
+ "add x5, x10, %[offsetof_Requantize32_maxval]\n"
+ "ld1r { v9.16b }, [x17]\n"
+ "ld1r { v14.16b }, [x9]\n"
+ "lsr x3, x0, #0x3\n"
+ "ld1r { v18.8h }, [x4]\n"
+ "ld1r { v11.8h }, [x14]\n"
+ "mov x24, #0x0\n"
+ "mov x22, #0x0\n"
+ "ld1r { v13.8h }, [x5]\n"
+ "ldr x10, [%x[params], %[offsetof_Params_requant_muls]]\n"
+ "add x20, %x[params], %[offsetof_Params_inptrs]\n"
+ "ldr x1, [%x[params], %[offsetof_Params_requant_shifts]]\n"
+ "ldp x16, x8, [x25, #0x0]\n"
+ "ldp x4, x7, [x25, #0x10]\n"
+ "cbz x3, 3f\n"
+ "ldr x19, [%x[params], %[offsetof_Params_bias]]\n"
+ "ldr q15, [x19, #0x0]\n"
+ "subs x3, x3, #0x1\n"
+ "mov v17.16b, v15.16b\n"
+ "ldr q16, [x19, #0x10]\n"
+ "add x19, x19, #0x20\n"
+ "str x19, [%x[params], %[offsetof_Params_bias]]\n"
+ "ldr d0, [x23, #0x0]\n"
+ "ldr d1, [x23, #0x8]\n"
+ "ldr d2, [x23, #0x10]\n"
+ "mov v8.16b, v16.16b\n"
+ "mov v10.16b, v15.16b\n"
+ "ldr d3, [x23, #0x18]\n"
+ "ldr d4, [x23, #0x20]\n"
+ "mov v7.16b, v16.16b\n"
+ "mov v6.16b, v15.16b\n"
+ "ldp x28, x6, [x20, #0x0]\n"
+ "ldp x26, x25, [x20, #0x10]\n"
+ "mov v5.16b, v16.16b\n"
+ "ssubl v0.8h, v0.8b, v14.8b\n"
+ "ldp x5, x2, [x20, #0x20]\n"
+ "ldp x27, x21, [x20, #0x30]\n"
+ "ssubl v1.8h, v1.8b, v14.8b\n"
+ "ssubl v2.8h, v2.8b, v14.8b\n"
+ "ldp x12, x19, [x20, #0x40]\n"
+ "ldr d31, [x28, x24]\n"
+ "ssubl v3.8h, v3.8b, v14.8b\n"
+ "ssubl v4.8h, v4.8b, v14.8b\n"
+ "ldr d30, [x6, x24]\n"
+ "ldr d29, [x26, x24]\n"
"usubl v31.8h, v31.8b, v9.8b\n"
"usubl v30.8h, v30.8b, v9.8b\n"
- "ldr d28, [x26, x0]\n"
- "ldr d27, [x25, x0]\n"
+ "ldr d28, [x25, x24]\n"
+ "ldr d27, [x5, x24]\n"
"usubl v29.8h, v29.8b, v9.8b\n"
"usubl v28.8h, v28.8b, v9.8b\n"
- "ldr d23, [x24, x0]\n"
- "ldr d25, [x23, x0]\n"
+ "ldr d23, [x2, x24]\n"
+ "ldr d25, [x27, x24]\n"
"usubl v27.8h, v27.8b, v9.8b\n"
"usubl v23.8h, v23.8b, v9.8b\n"
- "ldr d24, [x22, x0]\n"
- "ldr d26, [x21, x0]\n"
+ "ldr d24, [x21, x24]\n"
+ "ldr d26, [x12, x24]\n"
"usubl v25.8h, v25.8b, v9.8b\n"
"usubl v24.8h, v24.8b, v9.8b\n"
- "ldr d22, [x20, x0]\n"
+ "ldr d22, [x19, x24]\n"
"usubl v26.8h, v26.8b, v9.8b\n"
"usubl v22.8h, v22.8b, v9.8b\n"
"beq 2f\n"
"1:" // Loop
- "ldr q18, [x5, #0x0]\n"
- "ldr q6, [x8, #0x0]\n"
- "smlal v13.4s, v31.4h, v0.4h\n"
- "smlal2 v19.4s, v31.8h, v0.8h\n"
- "ldr q5, [x5, #0x10]\n"
- "smlal v13.4s, v30.4h, v1.4h\n"
- "ldr x20, [x4, #0x50]\n"
- "smlal v20.4s, v30.4h, v0.4h\n"
- "smlal v8.4s, v29.4h, v0.4h\n"
- "smlal v17.4s, v28.4h, v0.4h\n"
- "ldr x22, [x4, #0x58]\n"
- "ldr x21, [x4, #0x60]\n"
- "smlal2 v19.4s, v30.8h, v1.8h\n"
- "smlal2 v10.4s, v30.8h, v0.8h\n"
- "ldr d31, [x20, x0]\n"
+ "smlal v15.4s, v31.4h, v0.4h\n"
+ "smlal2 v16.4s, v31.8h, v0.8h\n"
+ "ldr x19, [x20, #0x50]\n"
+ "ldr d31, [x19, x24]\n"
+ "smlal v17.4s, v30.4h, v0.4h\n"
+ "smlal v10.4s, v29.4h, v0.4h\n"
+ "ldr x15, [x20, #0x58]\n"
"usubl v31.8h, v31.8b, v9.8b\n"
+ "smlal v6.4s, v28.4h, v0.4h\n"
+ "smlal2 v8.4s, v30.8h, v0.8h\n"
+ "ldr x19, [x20, #0x60]\n"
+ "ldr x27, [x20, #0x68]\n"
"smlal2 v7.4s, v29.8h, v0.8h\n"
- "smlal v13.4s, v27.4h, v2.4h\n"
- "ldr x20, [x4, #0x68]\n"
- "ldr x26, [x4, #0x70]\n"
- "smlal2 v21.4s, v28.8h, v0.8h\n"
- "ldr d30, [x22, x0]\n"
- "smlal v20.4s, v27.4h, v1.4h\n"
+ "smlal v15.4s, v30.4h, v1.4h\n"
+ "ldr x5, [x20, #0x70]\n"
+ "ldr x11, [x20, #0x78]\n"
+ "smlal2 v16.4s, v30.8h, v1.8h\n"
+ "smlal2 v5.4s, v28.8h, v0.8h\n"
+ "ldr d30, [x15, x24]\n"
"usubl v30.8h, v30.8b, v9.8b\n"
- "smlal v8.4s, v28.4h, v1.4h\n"
- "smlal v17.4s, v23.4h, v1.4h\n"
- "ldr x25, [x4, #0x78]\n"
- "ldr x23, [x4, #0x80]\n"
- "smlal2 v19.4s, v27.8h, v2.8h\n"
- "smlal2 v10.4s, v27.8h, v1.8h\n"
- "ldr d0, [x3, #0x28]\n"
- "ssubl v0.8h, v0.8b, v15.8b\n"
+ "smlal v17.4s, v27.4h, v1.4h\n"
+ "smlal v10.4s, v28.4h, v1.4h\n"
+ "ldr d0, [x23, #0x28]\n"
+ "ssubl v0.8h, v0.8b, v14.8b\n"
+ "smlal v6.4s, v23.4h, v1.4h\n"
+ "smlal2 v8.4s, v27.8h, v1.8h\n"
+ "ldr x12, [x20, #0x80]\n"
+ "ldr x26, [x20, #0x88]\n"
"smlal2 v7.4s, v28.8h, v1.8h\n"
- "smlal v13.4s, v25.4h, v3.4h\n"
- "ldr x24, [x4, #0x88]\n"
- "ldr x15, [x4, #0x90]\n"
- "smlal2 v21.4s, v23.8h, v1.8h\n"
- "ldr d27, [x21, x0]\n"
- "smlal v20.4s, v25.4h, v2.4h\n"
+ "smlal v15.4s, v27.4h, v2.4h\n"
+ "ldr x14, [x20, #0x90]\n"
+ "ldr x15, [x20, #0x98]\n"
+ "smlal2 v16.4s, v27.8h, v2.8h\n"
+ "smlal2 v5.4s, v23.8h, v1.8h\n"
+ "ldr d27, [x19, x24]\n"
"usubl v27.8h, v27.8b, v9.8b\n"
- "smlal v8.4s, v23.4h, v2.4h\n"
- "smlal v17.4s, v31.4h, v2.4h\n"
- "ldr x21, [x4, #0x98]\n"
- "ldr x14, [x4, #0xa0]\n"
- "smlal2 v19.4s, v25.8h, v3.8h\n"
- "smlal2 v10.4s, v25.8h, v2.8h\n"
- "ldr d1, [x3, #0x30]\n"
- "ssubl v1.8h, v1.8b, v15.8b\n"
+ "smlal v17.4s, v25.4h, v2.4h\n"
+ "smlal v10.4s, v23.4h, v2.4h\n"
+ "ldr d1, [x23, #0x30]\n"
+ "ssubl v1.8h, v1.8b, v14.8b\n"
+ "smlal v6.4s, v31.4h, v2.4h\n"
+ "smlal2 v8.4s, v25.8h, v2.8h\n"
+ "ldr x21, [x20, #0xa0]\n"
+ "ldr x2, [x20, #0xa8]\n"
"smlal2 v7.4s, v23.8h, v2.8h\n"
- "smlal v13.4s, v24.4h, v4.4h\n"
- "ldr x13, [x4, #0xa8]\n"
- "ldr x12, [x4, #0xb0]\n"
- "smlal2 v21.4s, v31.8h, v2.8h\n"
- "ldr d25, [x20, x0]\n"
- "smlal v20.4s, v24.4h, v3.4h\n"
+ "smlal v15.4s, v25.4h, v3.4h\n"
+ "ldr x13, [x20, #0xb0]\n"
+ "ldr x9, [x20, #0xb8]\n"
+ "smlal2 v16.4s, v25.8h, v3.8h\n"
+ "smlal2 v5.4s, v31.8h, v2.8h\n"
+ "ldr d25, [x27, x24]\n"
"usubl v25.8h, v25.8b, v9.8b\n"
- "smlal v8.4s, v31.4h, v3.4h\n"
- "smlal v17.4s, v30.4h, v3.4h\n"
- "ldr x20, [x4, #0xb8]\n"
- "ldr x11, [x4, #0xc0]\n"
- "smlal2 v19.4s, v24.8h, v4.8h\n"
- "smlal2 v10.4s, v24.8h, v3.8h\n"
- "ldr d2, [x3, #0x38]\n"
- "ssubl v2.8h, v2.8b, v15.8b\n"
+ "smlal v17.4s, v24.4h, v3.4h\n"
+ "smlal v10.4s, v31.4h, v3.4h\n"
+ "ldr d2, [x23, #0x38]\n"
+ "ssubl v2.8h, v2.8b, v14.8b\n"
+ "smlal v6.4s, v30.4h, v3.4h\n"
+ "smlal2 v8.4s, v24.8h, v3.8h\n"
+ "ldr x19, [x20, #0xc0]\n"
+ "ldr x28, [x20, #0xc8]\n"
"smlal2 v7.4s, v31.8h, v3.8h\n"
- "smlal v13.4s, v29.4h, v0.4h\n"
- "ldr x22, [x4, #0xc8]\n"
- "ldr x9, [x4, #0xd0]\n"
- "smlal2 v21.4s, v30.8h, v3.8h\n"
- "ldr d24, [x26, x0]\n"
- "smlal v20.4s, v27.4h, v4.4h\n"
+ "smlal v15.4s, v24.4h, v4.4h\n"
+ "ldr x6, [x20, #0xd0]\n"
+ "ldr x27, [x20, #0xd8]\n"
+ "smlal2 v16.4s, v24.8h, v4.8h\n"
+ "smlal2 v5.4s, v30.8h, v3.8h\n"
+ "ldr d24, [x5, x24]\n"
"usubl v24.8h, v24.8b, v9.8b\n"
- "smlal v8.4s, v30.4h, v4.4h\n"
- "smlal v17.4s, v26.4h, v4.4h\n"
- "ldr x28, [x4, #0xd8]\n"
- "ldr x27, [x4, #0xe0]\n"
- "smlal2 v19.4s, v29.8h, v0.8h\n"
- "ldr d3, [x3, #0x40]\n"
- "smlal2 v10.4s, v27.8h, v4.8h\n"
- "ldr d27, [x25, x0]\n"
- "smlal2 v7.4s, v30.8h, v4.8h\n"
- "smlal v13.4s, v28.4h, v1.4h\n"
- "ssubl v3.8h, v3.8b, v15.8b\n"
- "ldr x26, [x4, #0xe8]\n"
- "smlal2 v21.4s, v26.8h, v4.8h\n"
- "ldr d4, [x3, #0x48]\n"
- "smlal v20.4s, v28.4h, v0.4h\n"
+ "smlal v17.4s, v27.4h, v4.4h\n"
+ "smlal v10.4s, v30.4h, v4.4h\n"
+ "ldr d3, [x23, #0x40]\n"
+ "ssubl v3.8h, v3.8b, v14.8b\n"
+ "smlal v6.4s, v26.4h, v4.4h\n"
+ "smlal2 v8.4s, v27.8h, v4.8h\n"
+ "ldr d27, [x11, x24]\n"
"usubl v27.8h, v27.8b, v9.8b\n"
- "smlal v8.4s, v22.4h, v0.4h\n"
- "smlal v17.4s, v25.4h, v0.4h\n"
- "ssubl v4.8h, v4.8b, v15.8b\n"
- "subs x2, x2, #0x1\n"
- "smlal2 v19.4s, v28.8h, v1.8h\n"
- "smlal2 v10.4s, v28.8h, v0.8h\n"
- "ldr d28, [x24, x0]\n"
- "usubl v28.8h, v28.8b, v9.8b\n"
+ "smlal2 v7.4s, v30.8h, v4.8h\n"
+ "smlal v15.4s, v29.4h, v0.4h\n"
+ "ldr x11, [x20, #0xe0]\n"
+ "ldr x17, [x20, #0xe8]\n"
+ "smlal2 v16.4s, v29.8h, v0.8h\n"
+ "smlal2 v5.4s, v26.8h, v4.8h\n"
+ "ldr d4, [x23, #0x48]\n"
+ "ssubl v4.8h, v4.8b, v14.8b\n"
+ "smlal v17.4s, v28.4h, v0.4h\n"
+ "smlal v10.4s, v22.4h, v0.4h\n"
+ "ldr x5, [x20, #0xf0]\n"
+ "ldr q12, [x10, #0x0]\n"
+ "smlal v6.4s, v25.4h, v0.4h\n"
+ "smlal2 v8.4s, v28.8h, v0.8h\n"
+ "ldr q19, [x1, #0x0]\n"
+ "ldr q20, [x10, #0x10]\n"
"smlal2 v7.4s, v22.8h, v0.8h\n"
- "smlal v13.4s, v23.4h, v2.4h\n"
- "ldr x25, [x4, #0xf0]\n"
- "add x5, x5, #0x20\n"
- "smlal2 v21.4s, v25.8h, v0.8h\n"
- "ldr d0, [x3, #0x50]\n"
- "smlal v20.4s, v23.4h, v1.4h\n"
- "ssubl v0.8h, v0.8b, v15.8b\n"
- "smlal v8.4s, v25.4h, v1.4h\n"
- "smlal v17.4s, v24.4h, v1.4h\n"
- "smlal2 v19.4s, v23.8h, v2.8h\n"
- "smlal2 v10.4s, v23.8h, v1.8h\n"
- "ldr d23, [x23, x0]\n"
- "usubl v23.8h, v23.8b, v9.8b\n"
+ "smlal v15.4s, v28.4h, v1.4h\n"
+ "ldr q29, [x1, #0x10]\n"
+ "subs x3, x3, #0x1\n"
+ "smlal2 v16.4s, v28.8h, v1.8h\n"
+ "smlal2 v5.4s, v25.8h, v0.8h\n"
+ "ldr d28, [x26, x24]\n"
+ "ldr d0, [x23, #0x50]\n"
+ "smlal v17.4s, v23.4h, v1.4h\n"
+ "smlal v10.4s, v25.4h, v1.4h\n"
+ "usubl v28.8h, v28.8b, v9.8b\n"
+ "ldr x25, [x20, #0xf8]\n"
+ "smlal v6.4s, v24.4h, v1.4h\n"
+ "smlal2 v8.4s, v23.8h, v1.8h\n"
+ "ssubl v0.8h, v0.8b, v14.8b\n"
+ "add x10, x10, #0x20\n"
"smlal2 v7.4s, v25.8h, v1.8h\n"
- "smlal v13.4s, v31.4h, v3.4h\n"
- "ldr x24, [x4, #0xf8]\n"
- "smlal2 v21.4s, v24.8h, v1.8h\n"
- "ldr d1, [x3, #0x58]\n"
- "smlal v20.4s, v31.4h, v2.4h\n"
- "ssubl v1.8h, v1.8b, v15.8b\n"
- "smlal v8.4s, v24.4h, v2.4h\n"
- "smlal v17.4s, v27.4h, v2.4h\n"
- "smlal2 v19.4s, v31.8h, v3.8h\n"
- "smlal2 v10.4s, v31.8h, v2.8h\n"
- "ldr d31, [x15, x0]\n"
- "usubl v31.8h, v31.8b, v9.8b\n"
+ "smlal v15.4s, v23.4h, v2.4h\n"
+ "add x1, x1, #0x20\n"
+ "smlal2 v16.4s, v23.8h, v2.8h\n"
+ "ldr d23, [x12, x24]\n"
+ "smlal2 v5.4s, v24.8h, v1.8h\n"
+ "usubl v23.8h, v23.8b, v9.8b\n"
+ "smlal v17.4s, v31.4h, v2.4h\n"
+ "smlal v10.4s, v24.4h, v2.4h\n"
+ "ldr d1, [x23, #0x58]\n"
+ "ssubl v1.8h, v1.8b, v14.8b\n"
+ "smlal v6.4s, v27.4h, v2.4h\n"
+ "smlal2 v8.4s, v31.8h, v2.8h\n"
+ "ldr x26, [x20, #0x100]\n"
"smlal2 v7.4s, v24.8h, v2.8h\n"
- "smlal v13.4s, v30.4h, v4.4h\n"
- "ldr x23, [x4, #0x100]\n"
- "smlal2 v21.4s, v27.8h, v2.8h\n"
- "ldr d2, [x3, #0x60]\n"
- "smlal v20.4s, v30.4h, v3.4h\n"
- "ssubl v2.8h, v2.8b, v15.8b\n"
- "smlal v8.4s, v27.4h, v3.4h\n"
- "smlal v17.4s, v23.4h, v3.4h\n"
- "smlal2 v19.4s, v30.8h, v4.8h\n"
- "smlal2 v10.4s, v30.8h, v3.8h\n"
- "ldr d30, [x21, x0]\n"
- "usubl v30.8h, v30.8b, v9.8b\n"
+ "smlal v15.4s, v31.4h, v3.4h\n"
+ "smlal2 v16.4s, v31.8h, v3.8h\n"
+ "smlal2 v5.4s, v27.8h, v2.8h\n"
+ "ldr d31, [x14, x24]\n"
+ "usubl v31.8h, v31.8b, v9.8b\n"
+ "smlal v17.4s, v30.4h, v3.4h\n"
+ "smlal v10.4s, v27.4h, v3.4h\n"
+ "ldr d2, [x23, #0x60]\n"
+ "ssubl v2.8h, v2.8b, v14.8b\n"
+ "smlal v6.4s, v23.4h, v3.4h\n"
+ "smlal2 v8.4s, v30.8h, v3.8h\n"
+ "ldr x12, [x20, #0x108]\n"
"smlal2 v7.4s, v27.8h, v3.8h\n"
- "smlal v13.4s, v22.4h, v0.4h\n"
- "ldr x15, [x4, #0x108]\n"
- "smlal2 v21.4s, v23.8h, v3.8h\n"
- "ldr d3, [x3, #0x68]\n"
- "smlal v20.4s, v26.4h, v4.4h\n"
- "ssubl v3.8h, v3.8b, v15.8b\n"
- "smlal v8.4s, v23.4h, v4.4h\n"
- "smlal v17.4s, v28.4h, v4.4h\n"
- "smlal2 v19.4s, v22.8h, v0.8h\n"
- "ldr d22, [x20, x0]\n"
- "smlal2 v10.4s, v26.8h, v4.8h\n"
- "ldr d26, [x14, x0]\n"
- "smlal2 v7.4s, v23.8h, v4.8h\n"
- "smlal v13.4s, v25.4h, v1.4h\n"
+ "smlal v15.4s, v30.4h, v4.4h\n"
+ "smlal2 v16.4s, v30.8h, v4.8h\n"
+ "ldr d30, [x15, x24]\n"
+ "smlal2 v5.4s, v23.8h, v3.8h\n"
+ "usubl v30.8h, v30.8b, v9.8b\n"
+ "smlal v17.4s, v26.4h, v4.4h\n"
+ "smlal v10.4s, v23.4h, v4.4h\n"
+ "ldr d3, [x23, #0x68]\n"
+ "ssubl v3.8h, v3.8b, v14.8b\n"
+ "smlal v6.4s, v28.4h, v4.4h\n"
+ "smlal2 v8.4s, v26.8h, v4.8h\n"
+ "ldr d26, [x21, x24]\n"
"usubl v26.8h, v26.8b, v9.8b\n"
- "ldr x21, [x4, #0x110]\n"
- "smlal2 v21.4s, v28.8h, v4.8h\n"
- "ldr d4, [x3, #0x70]\n"
- "smlal v20.4s, v25.4h, v0.4h\n"
- "ssubl v4.8h, v4.8b, v15.8b\n"
- "smlal v8.4s, v31.4h, v0.4h\n"
- "smlal v17.4s, v30.4h, v0.4h\n"
+ "smlal2 v7.4s, v23.8h, v4.8h\n"
+ "smlal v15.4s, v22.4h, v0.4h\n"
+ "ldr x14, [x20, #0x110]\n"
+ "ldr x21, [x20, #0x118]\n"
+ "smlal2 v16.4s, v22.8h, v0.8h\n"
+ "smlal2 v5.4s, v28.8h, v4.8h\n"
+ "ldr d4, [x23, #0x70]\n"
+ "ldr d22, [x9, x24]\n"
+ "smlal v17.4s, v25.4h, v0.4h\n"
+ "smlal v10.4s, v31.4h, v0.4h\n"
+ "ssubl v4.8h, v4.8b, v14.8b\n"
+ "smlal v6.4s, v30.4h, v0.4h\n"
+ "smlal2 v8.4s, v25.8h, v0.8h\n"
"usubl v22.8h, v22.8b, v9.8b\n"
- "ldr x20, [x4, #0x118]\n"
- "smlal2 v19.4s, v25.8h, v1.8h\n"
- "smlal2 v10.4s, v25.8h, v0.8h\n"
- "ldr d25, [x13, x0]\n"
- "usubl v25.8h, v25.8b, v9.8b\n"
"smlal2 v7.4s, v31.8h, v0.8h\n"
- "smlal v13.4s, v24.4h, v2.4h\n"
- "ldr x13, [%x[params], %[offsetof_Params_bias]]\n"
- "smlal2 v21.4s, v30.8h, v0.8h\n"
- "ldr d0, [x3, #0x78]\n"
- "smlal v20.4s, v24.4h, v1.4h\n"
- "ssubl v0.8h, v0.8b, v15.8b\n"
- "smlal v8.4s, v30.4h, v1.4h\n"
- "smlal v17.4s, v26.4h, v1.4h\n"
- "smlal2 v19.4s, v24.8h, v2.8h\n"
- "smlal2 v10.4s, v24.8h, v1.8h\n"
- "ldr d24, [x12, x0]\n"
- "usubl v24.8h, v24.8b, v9.8b\n"
+ "smlal v15.4s, v25.4h, v1.4h\n"
+ "smlal2 v16.4s, v25.8h, v1.8h\n"
+ "ldr d25, [x2, x24]\n"
+ "smlal2 v5.4s, v30.8h, v0.8h\n"
+ "usubl v25.8h, v25.8b, v9.8b\n"
+ "smlal v17.4s, v24.4h, v1.4h\n"
+ "smlal v10.4s, v30.4h, v1.4h\n"
+ "ldr d0, [x23, #0x78]\n"
+ "ssubl v0.8h, v0.8b, v14.8b\n"
+ "smlal v6.4s, v26.4h, v1.4h\n"
+ "smlal2 v8.4s, v24.8h, v1.8h\n"
"smlal2 v7.4s, v30.8h, v1.8h\n"
- "smlal v13.4s, v27.4h, v3.4h\n"
- "smlal2 v21.4s, v26.8h, v1.8h\n"
- "ldr d1, [x3, #0x80]\n"
- "smlal v20.4s, v27.4h, v2.4h\n"
- "ssubl v1.8h, v1.8b, v15.8b\n"
- "smlal v8.4s, v26.4h, v2.4h\n"
- "smlal v17.4s, v25.4h, v2.4h\n"
- "smlal2 v19.4s, v27.8h, v3.8h\n"
- "smlal2 v10.4s, v27.8h, v2.8h\n"
- "ldr d27, [x11, x0]\n"
- "usubl v27.8h, v27.8b, v9.8b\n"
+ "smlal v15.4s, v24.4h, v2.4h\n"
+ "smlal2 v16.4s, v24.8h, v2.8h\n"
+ "ldr d24, [x13, x24]\n"
+ "smlal2 v5.4s, v26.8h, v1.8h\n"
+ "usubl v24.8h, v24.8b, v9.8b\n"
+ "smlal v17.4s, v27.4h, v2.4h\n"
+ "smlal v10.4s, v26.4h, v2.4h\n"
+ "ldr d1, [x23, #0x80]\n"
+ "ssubl v1.8h, v1.8b, v14.8b\n"
+ "smlal v6.4s, v25.4h, v2.4h\n"
+ "smlal2 v8.4s, v27.8h, v2.8h\n"
"smlal2 v7.4s, v26.8h, v2.8h\n"
- "smlal v13.4s, v23.4h, v4.4h\n"
- "smlal2 v21.4s, v25.8h, v2.8h\n"
- "ldr d2, [x3, #0x88]\n"
- "smlal v20.4s, v23.4h, v3.4h\n"
- "ssubl v2.8h, v2.8b, v15.8b\n"
- "smlal v8.4s, v25.4h, v3.4h\n"
- "smlal v17.4s, v24.4h, v3.4h\n"
- "smlal2 v19.4s, v23.8h, v4.8h\n"
- "smlal2 v10.4s, v23.8h, v3.8h\n"
- "ldr d23, [x22, x0]\n"
- "usubl v23.8h, v23.8b, v9.8b\n"
+ "smlal v15.4s, v27.4h, v3.4h\n"
+ "smlal2 v16.4s, v27.8h, v3.8h\n"
+ "smlal2 v5.4s, v25.8h, v2.8h\n"
+ "ldr d27, [x19, x24]\n"
+ "usubl v27.8h, v27.8b, v9.8b\n"
+ "smlal v17.4s, v23.4h, v3.4h\n"
+ "smlal v10.4s, v25.4h, v3.4h\n"
+ "ldr d2, [x23, #0x88]\n"
+ "ssubl v2.8h, v2.8b, v14.8b\n"
+ "smlal v6.4s, v24.4h, v3.4h\n"
+ "smlal2 v8.4s, v23.8h, v3.8h\n"
"smlal2 v7.4s, v25.8h, v3.8h\n"
- "smlal v13.4s, v31.4h, v0.4h\n"
- "smlal2 v21.4s, v24.8h, v3.8h\n"
- "ldr d3, [x3, #0x90]\n"
- "smlal v20.4s, v28.4h, v4.4h\n"
- "ssubl v3.8h, v3.8b, v15.8b\n"
- "smlal v8.4s, v24.4h, v4.4h\n"
- "smlal v17.4s, v22.4h, v4.4h\n"
- "smlal2 v19.4s, v31.8h, v0.8h\n"
- "ldr d31, [x9, x0]\n"
- "smlal2 v10.4s, v28.8h, v4.8h\n"
- "ldr d28, [x27, x0]\n"
+ "smlal v15.4s, v23.4h, v4.4h\n"
+ "smlal2 v16.4s, v23.8h, v4.8h\n"
+ "ldr d23, [x28, x24]\n"
+ "smlal2 v5.4s, v24.8h, v3.8h\n"
+ "usubl v23.8h, v23.8b, v9.8b\n"
+ "smlal v17.4s, v28.4h, v4.4h\n"
+ "smlal v10.4s, v24.4h, v4.4h\n"
+ "ldr d3, [x23, #0x90]\n"
+ "ssubl v3.8h, v3.8b, v14.8b\n"
+ "smlal v6.4s, v22.4h, v4.4h\n"
+ "smlal2 v8.4s, v28.8h, v4.8h\n"
+ "ldr d28, [x11, x24]\n"
+ "usubl v28.8h, v28.8b, v9.8b\n"
"smlal2 v7.4s, v24.8h, v4.8h\n"
- "smlal v13.4s, v30.4h, v1.4h\n"
+ "smlal v15.4s, v31.4h, v0.4h\n"
+ "smlal2 v16.4s, v31.8h, v0.8h\n"
+ "ldr d31, [x6, x24]\n"
+ "smlal2 v5.4s, v22.8h, v4.8h\n"
"usubl v31.8h, v31.8b, v9.8b\n"
- "smlal2 v21.4s, v22.8h, v4.8h\n"
- "ldr d4, [x3, #0x98]\n"
- "smlal v20.4s, v30.4h, v0.4h\n"
- "ssubl v4.8h, v4.8b, v15.8b\n"
- "smlal v8.4s, v27.4h, v0.4h\n"
- "smlal v17.4s, v23.4h, v0.4h\n"
- "usubl v28.8h, v28.8b, v9.8b\n"
- "smlal2 v19.4s, v30.8h, v1.8h\n"
- "smlal2 v10.4s, v30.8h, v0.8h\n"
- "ldr d30, [x28, x0]\n"
- "usubl v30.8h, v30.8b, v9.8b\n"
+ "smlal v17.4s, v30.4h, v0.4h\n"
+ "smlal v10.4s, v27.4h, v0.4h\n"
+ "ldr d4, [x23, #0x98]\n"
+ "ssubl v4.8h, v4.8b, v14.8b\n"
+ "smlal v6.4s, v23.4h, v0.4h\n"
+ "smlal2 v8.4s, v30.8h, v0.8h\n"
"smlal2 v7.4s, v27.8h, v0.8h\n"
- "smlal v13.4s, v26.4h, v2.4h\n"
- "smlal2 v21.4s, v23.8h, v0.8h\n"
- "ldr d0, [x3, #0xa0]\n"
- "smlal v20.4s, v26.4h, v1.4h\n"
- "ssubl v0.8h, v0.8b, v15.8b\n"
- "smlal v8.4s, v23.4h, v1.4h\n"
- "smlal v17.4s, v31.4h, v1.4h\n"
- "smlal2 v19.4s, v26.8h, v2.8h\n"
- "smlal2 v10.4s, v26.8h, v1.8h\n"
- "ldr d26, [x26, x0]\n"
- "usubl v26.8h, v26.8b, v9.8b\n"
+ "smlal v15.4s, v30.4h, v1.4h\n"
+ "smlal2 v16.4s, v30.8h, v1.8h\n"
+ "ldr d30, [x27, x24]\n"
+ "smlal2 v5.4s, v23.8h, v0.8h\n"
+ "usubl v30.8h, v30.8b, v9.8b\n"
+ "smlal v17.4s, v26.4h, v1.4h\n"
+ "smlal v10.4s, v23.4h, v1.4h\n"
+ "ldr d0, [x23, #0xa0]\n"
+ "ssubl v0.8h, v0.8b, v14.8b\n"
+ "smlal v6.4s, v31.4h, v1.4h\n"
+ "smlal2 v8.4s, v26.8h, v1.8h\n"
"smlal2 v7.4s, v23.8h, v1.8h\n"
- "smlal v13.4s, v25.4h, v3.4h\n"
- "smlal2 v21.4s, v31.8h, v1.8h\n"
- "ldr d1, [x3, #0xa8]\n"
- "smlal v20.4s, v25.4h, v2.4h\n"
- "ssubl v1.8h, v1.8b, v15.8b\n"
- "smlal v8.4s, v31.4h, v2.4h\n"
- "smlal v17.4s, v30.4h, v2.4h\n"
- "smlal2 v19.4s, v25.8h, v3.8h\n"
- "smlal2 v10.4s, v25.8h, v2.8h\n"
- "ldr d25, [x25, x0]\n"
- "usubl v25.8h, v25.8b, v9.8b\n"
+ "smlal v15.4s, v26.4h, v2.4h\n"
+ "smlal2 v16.4s, v26.8h, v2.8h\n"
+ "smlal2 v5.4s, v31.8h, v1.8h\n"
+ "ldr d26, [x17, x24]\n"
+ "usubl v26.8h, v26.8b, v9.8b\n"
+ "smlal v17.4s, v25.4h, v2.4h\n"
+ "smlal v10.4s, v31.4h, v2.4h\n"
+ "ldr d1, [x23, #0xa8]\n"
+ "ssubl v1.8h, v1.8b, v14.8b\n"
+ "smlal v6.4s, v30.4h, v2.4h\n"
+ "smlal2 v8.4s, v25.8h, v2.8h\n"
"smlal2 v7.4s, v31.8h, v2.8h\n"
- "smlal v13.4s, v24.4h, v4.4h\n"
- "smlal2 v21.4s, v30.8h, v2.8h\n"
- "ldr d2, [x3, #0xb0]\n"
- "smlal v20.4s, v24.4h, v3.4h\n"
- "ssubl v2.8h, v2.8b, v15.8b\n"
- "smlal v8.4s, v30.4h, v3.4h\n"
- "smlal v17.4s, v28.4h, v3.4h\n"
- "smlal2 v19.4s, v24.8h, v4.8h\n"
- "smlal2 v10.4s, v24.8h, v3.8h\n"
- "ldr d24, [x24, x0]\n"
- "usubl v24.8h, v24.8b, v9.8b\n"
+ "smlal v15.4s, v25.4h, v3.4h\n"
+ "smlal2 v16.4s, v25.8h, v3.8h\n"
+ "smlal2 v5.4s, v30.8h, v2.8h\n"
+ "ldr d25, [x5, x24]\n"
+ "usubl v25.8h, v25.8b, v9.8b\n"
+ "smlal v17.4s, v24.4h, v3.4h\n"
+ "smlal v10.4s, v30.4h, v3.4h\n"
+ "ldr d2, [x23, #0xb0]\n"
+ "ssubl v2.8h, v2.8b, v14.8b\n"
+ "smlal v6.4s, v28.4h, v3.4h\n"
+ "smlal2 v8.4s, v24.8h, v3.8h\n"
"smlal2 v7.4s, v30.8h, v3.8h\n"
- "smlal v13.4s, v27.4h, v0.4h\n"
- "smlal2 v21.4s, v28.8h, v3.8h\n"
- "ldr d3, [x3, #0xb8]\n"
- "smlal v20.4s, v22.4h, v4.4h\n"
- "ssubl v3.8h, v3.8b, v15.8b\n"
- "smlal v8.4s, v28.4h, v4.4h\n"
- "smlal v17.4s, v26.4h, v4.4h\n"
- "smlal2 v19.4s, v27.8h, v0.8h\n"
- "ldr d27, [x23, x0]\n"
+ "smlal v15.4s, v24.4h, v4.4h\n"
+ "smlal2 v16.4s, v24.8h, v4.8h\n"
+ "ldr d24, [x25, x24]\n"
+ "smlal2 v5.4s, v28.8h, v3.8h\n"
+ "usubl v24.8h, v24.8b, v9.8b\n"
+ "smlal v17.4s, v22.4h, v4.4h\n"
+ "smlal v10.4s, v28.4h, v4.4h\n"
+ "ldr d3, [x23, #0xb8]\n"
+ "ssubl v3.8h, v3.8b, v14.8b\n"
+ "smlal v6.4s, v26.4h, v4.4h\n"
"smlal2 v7.4s, v28.8h, v4.8h\n"
+ "smlal v15.4s, v27.4h, v0.4h\n"
+ "smlal2 v16.4s, v27.8h, v0.8h\n"
+ "ldr d27, [x26, x24]\n"
"usubl v27.8h, v27.8b, v9.8b\n"
- "smlal v13.4s, v23.4h, v1.4h\n"
- "smlal2 v10.4s, v22.8h, v4.8h\n"
- "ldr q22, [x8, #0x10]\n"
- "add x8, x8, #0x20\n"
- "smlal2 v21.4s, v26.8h, v4.8h\n"
- "ldr d4, [x3, #0xc0]\n"
- "smlal v20.4s, v23.4h, v0.4h\n"
- "ssubl v4.8h, v4.8b, v15.8b\n"
- "smlal v8.4s, v25.4h, v0.4h\n"
- "smlal v17.4s, v24.4h, v0.4h\n"
- "add x3, x3, #0xc8\n"
- "smlal2 v19.4s, v23.8h, v1.8h\n"
+ "smlal2 v8.4s, v22.8h, v4.8h\n"
+ "smlal2 v5.4s, v26.8h, v4.8h\n"
+ "ldr d4, [x23, #0xc0]\n"
+ "ssubl v4.8h, v4.8b, v14.8b\n"
+ "smlal v17.4s, v23.4h, v0.4h\n"
+ "smlal v10.4s, v25.4h, v0.4h\n"
+ "add x23, x23, #0xc8\n"
+ "smlal v6.4s, v24.4h, v0.4h\n"
"smlal2 v7.4s, v25.8h, v0.8h\n"
- "ldr d25, [x15, x0]\n"
+ "ldr d25, [x12, x24]\n"
"usubl v25.8h, v25.8b, v9.8b\n"
- "smlal v13.4s, v31.4h, v2.4h\n"
- "smlal2 v10.4s, v23.8h, v0.8h\n"
- "smlal2 v21.4s, v24.8h, v0.8h\n"
- "smlal v20.4s, v31.4h, v1.4h\n"
- "smlal v8.4s, v24.4h, v1.4h\n"
- "smlal v17.4s, v27.4h, v1.4h\n"
- "smlal2 v19.4s, v31.8h, v2.8h\n"
+ "smlal2 v8.4s, v23.8h, v0.8h\n"
+ "smlal2 v5.4s, v24.8h, v0.8h\n"
+ "smlal v15.4s, v23.4h, v1.4h\n"
+ "smlal v17.4s, v31.4h, v1.4h\n"
+ "smlal v10.4s, v24.4h, v1.4h\n"
+ "smlal v6.4s, v27.4h, v1.4h\n"
"smlal2 v7.4s, v24.8h, v1.8h\n"
- "ldr d24, [x21, x0]\n"
+ "ldr d24, [x14, x24]\n"
+ "smlal2 v16.4s, v23.8h, v1.8h\n"
"usubl v24.8h, v24.8b, v9.8b\n"
- "smlal v13.4s, v30.4h, v3.4h\n"
- "smlal2 v10.4s, v31.8h, v1.8h\n"
- "smlal2 v21.4s, v27.8h, v1.8h\n"
- "smlal v20.4s, v30.4h, v2.4h\n"
- "smlal v8.4s, v27.4h, v2.4h\n"
- "smlal v17.4s, v25.4h, v2.4h\n"
- "smlal2 v19.4s, v30.8h, v3.8h\n"
+ "smlal2 v8.4s, v31.8h, v1.8h\n"
+ "smlal2 v5.4s, v27.8h, v1.8h\n"
+ "smlal v15.4s, v31.4h, v2.4h\n"
+ "smlal v17.4s, v30.4h, v2.4h\n"
+ "smlal v10.4s, v27.4h, v2.4h\n"
+ "smlal v6.4s, v25.4h, v2.4h\n"
"smlal2 v7.4s, v27.8h, v2.8h\n"
- "ldr d27, [x20, x0]\n"
+ "ldr d27, [x21, x24]\n"
+ "smlal2 v16.4s, v31.8h, v2.8h\n"
"usubl v27.8h, v27.8b, v9.8b\n"
- "smlal v13.4s, v28.4h, v4.4h\n"
- "smlal2 v10.4s, v30.8h, v2.8h\n"
- "sqrdmulh v13.4s, v13.4s, v18.4s\n"
- "add x0, x0, #0x8\n"
- "smlal2 v21.4s, v25.8h, v2.8h\n"
- "smlal v20.4s, v28.4h, v3.4h\n"
- "and v30.16b, v13.16b, v6.16b\n"
- "smlal v8.4s, v25.4h, v3.4h\n"
- "smlal v17.4s, v24.4h, v3.4h\n"
- "sshr v30.4s, v30.4s, #0x1f\n"
- "smlal2 v19.4s, v28.8h, v4.8h\n"
- "smlal2 v10.4s, v28.8h, v3.8h\n"
- "sqrdmulh v19.4s, v19.4s, v5.4s\n"
+ "smlal2 v8.4s, v30.8h, v2.8h\n"
+ "smlal2 v5.4s, v25.8h, v2.8h\n"
+ "add x24, x24, #0x8\n"
+ "smlal v15.4s, v30.4h, v3.4h\n"
+ "smlal v17.4s, v28.4h, v3.4h\n"
+ "smlal v10.4s, v25.4h, v3.4h\n"
+ "smlal v6.4s, v24.4h, v3.4h\n"
+ "smlal2 v16.4s, v30.8h, v3.8h\n"
+ "smlal2 v8.4s, v28.8h, v3.8h\n"
"smlal2 v7.4s, v25.8h, v3.8h\n"
- "smlal2 v21.4s, v24.8h, v3.8h\n"
- "and v16.16b, v19.16b, v22.16b\n"
- "smlal v20.4s, v26.4h, v4.4h\n"
- "smlal v8.4s, v24.4h, v4.4h\n"
- "sqrdmulh v20.4s, v20.4s, v18.4s\n"
- "smlal v17.4s, v27.4h, v4.4h\n"
- "smlal2 v10.4s, v26.8h, v4.8h\n"
- "sqrdmulh v8.4s, v8.4s, v18.4s\n"
+ "smlal2 v5.4s, v24.8h, v3.8h\n"
+ "smlal v15.4s, v28.4h, v4.4h\n"
+ "smlal v17.4s, v26.4h, v4.4h\n"
+ "sqrdmulh v15.4s, v15.4s, v12.4s\n"
+ "smlal v10.4s, v24.4h, v4.4h\n"
+ "smlal v6.4s, v27.4h, v4.4h\n"
+ "sqrdmulh v17.4s, v17.4s, v12.4s\n"
+ "smlal2 v16.4s, v28.8h, v4.8h\n"
+ "smlal2 v8.4s, v26.8h, v4.8h\n"
+ "sqrdmulh v10.4s, v10.4s, v12.4s\n"
"smlal2 v7.4s, v24.8h, v4.8h\n"
- "smlal2 v21.4s, v27.8h, v4.8h\n"
- "sqrdmulh v17.4s, v17.4s, v18.4s\n"
- "sqadd v13.4s, v13.4s, v30.4s\n"
- "sshr v16.4s, v16.4s, #0x1f\n"
- "and v0.16b, v20.16b, v6.16b\n"
- "sqrdmulh v10.4s, v10.4s, v5.4s\n"
- "and v18.16b, v8.16b, v6.16b\n"
- "sqrdmulh v7.4s, v7.4s, v5.4s\n"
- "and v30.16b, v17.16b, v6.16b\n"
- "sqrdmulh v21.4s, v21.4s, v5.4s\n"
- "sqadd v19.4s, v19.4s, v16.4s\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "and v26.16b, v10.16b, v22.16b\n"
- "sshr v18.4s, v18.4s, #0x1f\n"
- "and v23.16b, v7.16b, v22.16b\n"
- "sshr v30.4s, v30.4s, #0x1f\n"
- "and v16.16b, v21.16b, v22.16b\n"
- "sqadd v20.4s, v20.4s, v0.4s\n"
- "sshr v26.4s, v26.4s, #0x1f\n"
- "sqadd v8.4s, v8.4s, v18.4s\n"
+ "smlal2 v5.4s, v27.8h, v4.8h\n"
+ "sqrdmulh v6.4s, v6.4s, v12.4s\n"
+ "and v23.16b, v15.16b, v19.16b\n"
+ "sqrdmulh v16.4s, v16.4s, v20.4s\n"
+ "and v22.16b, v17.16b, v19.16b\n"
+ "sqrdmulh v8.4s, v8.4s, v20.4s\n"
+ "and v21.16b, v10.16b, v19.16b\n"
+ "sqrdmulh v7.4s, v7.4s, v20.4s\n"
+ "and v26.16b, v6.16b, v19.16b\n"
+ "sqrdmulh v5.4s, v5.4s, v20.4s\n"
"sshr v23.4s, v23.4s, #0x1f\n"
- "sqadd v17.4s, v17.4s, v30.4s\n"
- "sshr v16.4s, v16.4s, #0x1f\n"
- "srshl v13.4s, v13.4s, v6.4s\n"
- "srshl v20.4s, v20.4s, v6.4s\n"
- "sqadd v10.4s, v10.4s, v26.4s\n"
- "srshl v8.4s, v8.4s, v6.4s\n"
- "sqadd v7.4s, v7.4s, v23.4s\n"
- "srshl v17.4s, v17.4s, v6.4s\n"
- "sqadd v21.4s, v21.4s, v16.4s\n"
- "srshl v19.4s, v19.4s, v22.4s\n"
- "sqxtn v13.4h, v13.4s\n"
- "srshl v10.4s, v10.4s, v22.4s\n"
- "sqxtn v20.4h, v20.4s\n"
- "srshl v7.4s, v7.4s, v22.4s\n"
- "sqxtn v8.4h, v8.4s\n"
- "srshl v21.4s, v21.4s, v22.4s\n"
+ "and v4.16b, v16.16b, v29.16b\n"
+ "sshr v22.4s, v22.4s, #0x1f\n"
+ "and v2.16b, v8.16b, v29.16b\n"
+ "sshr v21.4s, v21.4s, #0x1f\n"
+ "and v3.16b, v7.16b, v29.16b\n"
+ "sshr v26.4s, v26.4s, #0x1f\n"
+ "and v25.16b, v5.16b, v29.16b\n"
+ "sqadd v15.4s, v15.4s, v23.4s\n"
+ "sshr v4.4s, v4.4s, #0x1f\n"
+ "sqadd v17.4s, v17.4s, v22.4s\n"
+ "sshr v2.4s, v2.4s, #0x1f\n"
+ "sqadd v10.4s, v10.4s, v21.4s\n"
+ "sshr v3.4s, v3.4s, #0x1f\n"
+ "sqadd v6.4s, v6.4s, v26.4s\n"
+ "sshr v25.4s, v25.4s, #0x1f\n"
+ "srshl v15.4s, v15.4s, v19.4s\n"
+ "sqadd v16.4s, v16.4s, v4.4s\n"
+ "srshl v17.4s, v17.4s, v19.4s\n"
+ "sqadd v8.4s, v8.4s, v2.4s\n"
+ "srshl v10.4s, v10.4s, v19.4s\n"
+ "sqadd v7.4s, v7.4s, v3.4s\n"
+ "srshl v6.4s, v6.4s, v19.4s\n"
+ "sqadd v5.4s, v5.4s, v25.4s\n"
+ "srshl v16.4s, v16.4s, v29.4s\n"
+ "sqxtn v15.4h, v15.4s\n"
+ "srshl v8.4s, v8.4s, v29.4s\n"
"sqxtn v17.4h, v17.4s\n"
- "sqxtn2 v13.8h, v19.4s\n"
- "sqxtn2 v20.8h, v10.4s\n"
- "sqxtn2 v8.8h, v7.4s\n"
- "sqxtn2 v17.8h, v21.4s\n"
- "sqadd v13.8h, v13.8h, v14.8h\n"
- "sqadd v20.8h, v20.8h, v14.8h\n"
- "sqadd v8.8h, v8.8h, v14.8h\n"
- "sqadd v17.8h, v17.8h, v14.8h\n"
- "smax v13.8h, v13.8h, v12.8h\n"
- "smax v20.8h, v20.8h, v12.8h\n"
- "smax v8.8h, v8.8h, v12.8h\n"
- "smax v17.8h, v17.8h, v12.8h\n"
- "smin v13.8h, v13.8h, v11.8h\n"
- "smin v20.8h, v20.8h, v11.8h\n"
- "smin v8.8h, v8.8h, v11.8h\n"
- "smin v17.8h, v17.8h, v11.8h\n"
- "uzp1 v13.16b, v13.16b, v13.16b\n"
- "uzp1 v20.16b, v20.16b, v20.16b\n"
- "str d13, [x17, x10]\n"
- "uzp1 v8.16b, v8.16b, v8.16b\n"
+ "srshl v7.4s, v7.4s, v29.4s\n"
+ "sqxtn v10.4h, v10.4s\n"
+ "srshl v5.4s, v5.4s, v29.4s\n"
+ "sqxtn v6.4h, v6.4s\n"
+ "sqxtn2 v15.8h, v16.4s\n"
+ "sqxtn2 v17.8h, v8.4s\n"
+ "sqxtn2 v10.8h, v7.4s\n"
+ "sqxtn2 v6.8h, v5.4s\n"
+ "sqadd v15.8h, v15.8h, v18.8h\n"
+ "sqadd v17.8h, v17.8h, v18.8h\n"
+ "sqadd v10.8h, v10.8h, v18.8h\n"
+ "sqadd v6.8h, v6.8h, v18.8h\n"
+ "smax v15.8h, v15.8h, v11.8h\n"
+ "smax v17.8h, v17.8h, v11.8h\n"
+ "smax v10.8h, v10.8h, v11.8h\n"
+ "smax v6.8h, v6.8h, v11.8h\n"
+ "smin v15.8h, v15.8h, v13.8h\n"
+ "smin v17.8h, v17.8h, v13.8h\n"
+ "smin v10.8h, v10.8h, v13.8h\n"
+ "smin v6.8h, v6.8h, v13.8h\n"
+ "uzp1 v15.16b, v15.16b, v15.16b\n"
"uzp1 v17.16b, v17.16b, v17.16b\n"
- "str d20, [x6, x10]\n"
- "str d8, [x7, x10]\n"
- "str d17, [x16, x10]\n"
- "ldr q13, [x13, #0x0]\n"
- "ldr q19, [x13, #0x10]\n"
- "add x13, x13, #0x20\n"
- "ldr d0, [x3, #0x0]\n"
- "ldr d1, [x3, #0x8]\n"
- "add x10, x10, #0x8\n"
- "str x13, [%x[params], %[offsetof_Params_bias]]\n"
- "ldr d2, [x3, #0x10]\n"
- "ldr d3, [x3, #0x18]\n"
- "mov v20.16b, v13.16b\n"
- "mov v10.16b, v19.16b\n"
- "ldr d4, [x3, #0x20]\n"
- "ldp x9, x28, [x4, #0x0]\n"
- "mov v8.16b, v13.16b\n"
- "mov v7.16b, v19.16b\n"
- "ldp x27, x26, [x4, #0x10]\n"
- "ldp x25, x24, [x4, #0x20]\n"
- "mov v17.16b, v13.16b\n"
- "mov v21.16b, v19.16b\n"
- "ldp x23, x22, [x4, #0x30]\n"
- "ldp x21, x20, [x4, #0x40]\n"
- "ssubl v0.8h, v0.8b, v15.8b\n"
- "ssubl v1.8h, v1.8b, v15.8b\n"
- "ldr d31, [x9, x0]\n"
- "ldr d30, [x28, x0]\n"
- "ssubl v2.8h, v2.8b, v15.8b\n"
- "ssubl v3.8h, v3.8b, v15.8b\n"
- "ldr d29, [x27, x0]\n"
- "ldr d28, [x26, x0]\n"
- "ssubl v4.8h, v4.8b, v15.8b\n"
+ "str d15, [x16, x22]\n"
+ "uzp1 v10.16b, v10.16b, v10.16b\n"
+ "uzp1 v6.16b, v6.16b, v6.16b\n"
+ "str d17, [x8, x22]\n"
+ "str d10, [x4, x22]\n"
+ "str d6, [x7, x22]\n"
+ "ldr x19, [%x[params], %[offsetof_Params_bias]]\n"
+ "ldr q15, [x19, #0x0]\n"
+ "add x22, x22, #0x8\n"
+ "ldr q16, [x19, #0x10]\n"
+ "add x19, x19, #0x20\n"
+ "str x19, [%x[params], %[offsetof_Params_bias]]\n"
+ "ldr d0, [x23, #0x0]\n"
+ "ldr d1, [x23, #0x8]\n"
+ "ldr d2, [x23, #0x10]\n"
+ "mov v17.16b, v15.16b\n"
+ "mov v8.16b, v16.16b\n"
+ "ldr d3, [x23, #0x18]\n"
+ "ldr d4, [x23, #0x20]\n"
+ "mov v10.16b, v15.16b\n"
+ "mov v7.16b, v16.16b\n"
+ "ldp x28, x6, [x20, #0x0]\n"
+ "ldp x26, x25, [x20, #0x10]\n"
+ "mov v6.16b, v15.16b\n"
+ "mov v5.16b, v16.16b\n"
+ "ldp x5, x2, [x20, #0x20]\n"
+ "ldp x27, x21, [x20, #0x30]\n"
+ "ssubl v0.8h, v0.8b, v14.8b\n"
+ "ssubl v1.8h, v1.8b, v14.8b\n"
+ "ldp x12, x19, [x20, #0x40]\n"
+ "ldr d31, [x28, x24]\n"
+ "ssubl v2.8h, v2.8b, v14.8b\n"
+ "ssubl v3.8h, v3.8b, v14.8b\n"
+ "ldr d30, [x6, x24]\n"
+ "ldr d29, [x26, x24]\n"
+ "ssubl v4.8h, v4.8b, v14.8b\n"
"usubl v31.8h, v31.8b, v9.8b\n"
- "ldr d27, [x25, x0]\n"
- "ldr d23, [x24, x0]\n"
+ "ldr d28, [x25, x24]\n"
+ "ldr d27, [x5, x24]\n"
"usubl v30.8h, v30.8b, v9.8b\n"
"usubl v29.8h, v29.8b, v9.8b\n"
- "ldr d25, [x23, x0]\n"
- "ldr d24, [x22, x0]\n"
+ "ldr d23, [x2, x24]\n"
+ "ldr d25, [x27, x24]\n"
"usubl v28.8h, v28.8b, v9.8b\n"
"usubl v27.8h, v27.8b, v9.8b\n"
- "ldr d26, [x21, x0]\n"
- "ldr d22, [x20, x0]\n"
+ "ldr d24, [x21, x24]\n"
+ "ldr d26, [x12, x24]\n"
"usubl v23.8h, v23.8b, v9.8b\n"
"usubl v25.8h, v25.8b, v9.8b\n"
+ "ldr d22, [x19, x24]\n"
"usubl v24.8h, v24.8b, v9.8b\n"
"usubl v26.8h, v26.8b, v9.8b\n"
"usubl v22.8h, v22.8b, v9.8b\n"
"bgt 1b\n"
"2:" // Tail
- "ldr q18, [x5, #0x0]\n"
- "ldr q6, [x8, #0x0]\n"
- "smlal v13.4s, v31.4h, v0.4h\n"
- "smlal2 v19.4s, v31.8h, v0.8h\n"
- "ldr q5, [x5, #0x10]\n"
- "smlal v13.4s, v30.4h, v1.4h\n"
- "ldr x20, [x4, #0x50]\n"
- "smlal v20.4s, v30.4h, v0.4h\n"
- "smlal v8.4s, v29.4h, v0.4h\n"
- "smlal v17.4s, v28.4h, v0.4h\n"
- "ldr x22, [x4, #0x58]\n"
- "ldr x21, [x4, #0x60]\n"
- "smlal2 v19.4s, v30.8h, v1.8h\n"
- "smlal2 v10.4s, v30.8h, v0.8h\n"
- "ldr d31, [x20, x0]\n"
+ "smlal v15.4s, v31.4h, v0.4h\n"
+ "smlal2 v16.4s, v31.8h, v0.8h\n"
+ "ldr x19, [x20, #0x50]\n"
+ "ldr d31, [x19, x24]\n"
+ "smlal v17.4s, v30.4h, v0.4h\n"
+ "smlal v10.4s, v29.4h, v0.4h\n"
+ "ldr x15, [x20, #0x58]\n"
"usubl v31.8h, v31.8b, v9.8b\n"
+ "smlal v6.4s, v28.4h, v0.4h\n"
+ "smlal2 v8.4s, v30.8h, v0.8h\n"
+ "ldr x19, [x20, #0x60]\n"
+ "ldr x27, [x20, #0x68]\n"
"smlal2 v7.4s, v29.8h, v0.8h\n"
- "smlal v13.4s, v27.4h, v2.4h\n"
- "ldr x20, [x4, #0x68]\n"
- "ldr x26, [x4, #0x70]\n"
- "smlal2 v21.4s, v28.8h, v0.8h\n"
- "ldr d30, [x22, x0]\n"
- "smlal v20.4s, v27.4h, v1.4h\n"
+ "smlal v15.4s, v30.4h, v1.4h\n"
+ "ldr x5, [x20, #0x70]\n"
+ "ldr x11, [x20, #0x78]\n"
+ "smlal2 v16.4s, v30.8h, v1.8h\n"
+ "smlal2 v5.4s, v28.8h, v0.8h\n"
+ "ldr d30, [x15, x24]\n"
"usubl v30.8h, v30.8b, v9.8b\n"
- "smlal v8.4s, v28.4h, v1.4h\n"
- "smlal v17.4s, v23.4h, v1.4h\n"
- "ldr x25, [x4, #0x78]\n"
- "ldr x23, [x4, #0x80]\n"
- "smlal2 v19.4s, v27.8h, v2.8h\n"
- "smlal2 v10.4s, v27.8h, v1.8h\n"
- "ldr d0, [x3, #0x28]\n"
- "ssubl v0.8h, v0.8b, v15.8b\n"
+ "smlal v17.4s, v27.4h, v1.4h\n"
+ "smlal v10.4s, v28.4h, v1.4h\n"
+ "ldr d0, [x23, #0x28]\n"
+ "ssubl v0.8h, v0.8b, v14.8b\n"
+ "smlal v6.4s, v23.4h, v1.4h\n"
+ "smlal2 v8.4s, v27.8h, v1.8h\n"
+ "ldr x12, [x20, #0x80]\n"
+ "ldr x26, [x20, #0x88]\n"
"smlal2 v7.4s, v28.8h, v1.8h\n"
- "smlal v13.4s, v25.4h, v3.4h\n"
- "ldr x24, [x4, #0x88]\n"
- "ldr x15, [x4, #0x90]\n"
- "smlal2 v21.4s, v23.8h, v1.8h\n"
- "ldr d27, [x21, x0]\n"
- "smlal v20.4s, v25.4h, v2.4h\n"
+ "smlal v15.4s, v27.4h, v2.4h\n"
+ "ldr x14, [x20, #0x90]\n"
+ "ldr x15, [x20, #0x98]\n"
+ "smlal2 v16.4s, v27.8h, v2.8h\n"
+ "smlal2 v5.4s, v23.8h, v1.8h\n"
+ "ldr d27, [x19, x24]\n"
"usubl v27.8h, v27.8b, v9.8b\n"
- "smlal v8.4s, v23.4h, v2.4h\n"
- "smlal v17.4s, v31.4h, v2.4h\n"
- "ldr x21, [x4, #0x98]\n"
- "ldr x14, [x4, #0xa0]\n"
- "smlal2 v19.4s, v25.8h, v3.8h\n"
- "smlal2 v10.4s, v25.8h, v2.8h\n"
- "ldr d1, [x3, #0x30]\n"
- "ssubl v1.8h, v1.8b, v15.8b\n"
+ "smlal v17.4s, v25.4h, v2.4h\n"
+ "smlal v10.4s, v23.4h, v2.4h\n"
+ "ldr d1, [x23, #0x30]\n"
+ "ssubl v1.8h, v1.8b, v14.8b\n"
+ "smlal v6.4s, v31.4h, v2.4h\n"
+ "smlal2 v8.4s, v25.8h, v2.8h\n"
+ "ldr x21, [x20, #0xa0]\n"
+ "ldr x2, [x20, #0xa8]\n"
"smlal2 v7.4s, v23.8h, v2.8h\n"
- "smlal v13.4s, v24.4h, v4.4h\n"
- "ldr x13, [x4, #0xa8]\n"
- "ldr x12, [x4, #0xb0]\n"
- "smlal2 v21.4s, v31.8h, v2.8h\n"
- "ldr d25, [x20, x0]\n"
- "smlal v20.4s, v24.4h, v3.4h\n"
+ "smlal v15.4s, v25.4h, v3.4h\n"
+ "ldr x13, [x20, #0xb0]\n"
+ "ldr x9, [x20, #0xb8]\n"
+ "smlal2 v16.4s, v25.8h, v3.8h\n"
+ "smlal2 v5.4s, v31.8h, v2.8h\n"
+ "ldr d25, [x27, x24]\n"
"usubl v25.8h, v25.8b, v9.8b\n"
- "smlal v8.4s, v31.4h, v3.4h\n"
- "smlal v17.4s, v30.4h, v3.4h\n"
- "ldr x20, [x4, #0xb8]\n"
- "ldr x11, [x4, #0xc0]\n"
- "smlal2 v19.4s, v24.8h, v4.8h\n"
- "smlal2 v10.4s, v24.8h, v3.8h\n"
- "ldr d2, [x3, #0x38]\n"
- "ssubl v2.8h, v2.8b, v15.8b\n"
+ "smlal v17.4s, v24.4h, v3.4h\n"
+ "smlal v10.4s, v31.4h, v3.4h\n"
+ "ldr d2, [x23, #0x38]\n"
+ "ssubl v2.8h, v2.8b, v14.8b\n"
+ "smlal v6.4s, v30.4h, v3.4h\n"
+ "smlal2 v8.4s, v24.8h, v3.8h\n"
+ "ldr x19, [x20, #0xc0]\n"
+ "ldr x28, [x20, #0xc8]\n"
"smlal2 v7.4s, v31.8h, v3.8h\n"
- "smlal v13.4s, v29.4h, v0.4h\n"
- "ldr x22, [x4, #0xc8]\n"
- "ldr x9, [x4, #0xd0]\n"
- "smlal2 v21.4s, v30.8h, v3.8h\n"
- "ldr d24, [x26, x0]\n"
- "smlal v20.4s, v27.4h, v4.4h\n"
+ "smlal v15.4s, v24.4h, v4.4h\n"
+ "ldr x6, [x20, #0xd0]\n"
+ "ldr x27, [x20, #0xd8]\n"
+ "smlal2 v16.4s, v24.8h, v4.8h\n"
+ "smlal2 v5.4s, v30.8h, v3.8h\n"
+ "ldr d24, [x5, x24]\n"
"usubl v24.8h, v24.8b, v9.8b\n"
- "smlal v8.4s, v30.4h, v4.4h\n"
- "smlal v17.4s, v26.4h, v4.4h\n"
- "ldr x28, [x4, #0xd8]\n"
- "ldr x27, [x4, #0xe0]\n"
- "smlal2 v19.4s, v29.8h, v0.8h\n"
- "ldr d3, [x3, #0x40]\n"
- "smlal2 v10.4s, v27.8h, v4.8h\n"
- "ldr d27, [x25, x0]\n"
- "smlal2 v7.4s, v30.8h, v4.8h\n"
- "smlal v13.4s, v28.4h, v1.4h\n"
- "ssubl v3.8h, v3.8b, v15.8b\n"
- "ldr x26, [x4, #0xe8]\n"
- "smlal2 v21.4s, v26.8h, v4.8h\n"
- "ldr d4, [x3, #0x48]\n"
- "smlal v20.4s, v28.4h, v0.4h\n"
+ "smlal v17.4s, v27.4h, v4.4h\n"
+ "smlal v10.4s, v30.4h, v4.4h\n"
+ "ldr d3, [x23, #0x40]\n"
+ "ssubl v3.8h, v3.8b, v14.8b\n"
+ "smlal v6.4s, v26.4h, v4.4h\n"
+ "smlal2 v8.4s, v27.8h, v4.8h\n"
+ "ldr d27, [x11, x24]\n"
"usubl v27.8h, v27.8b, v9.8b\n"
- "smlal v8.4s, v22.4h, v0.4h\n"
- "smlal v17.4s, v25.4h, v0.4h\n"
- "ssubl v4.8h, v4.8b, v15.8b\n"
- "ldr x25, [x4, #0xf0]\n"
- "smlal2 v19.4s, v28.8h, v1.8h\n"
- "smlal2 v10.4s, v28.8h, v0.8h\n"
- "ldr d28, [x24, x0]\n"
- "usubl v28.8h, v28.8b, v9.8b\n"
+ "smlal2 v7.4s, v30.8h, v4.8h\n"
+ "smlal v15.4s, v29.4h, v0.4h\n"
+ "ldr x11, [x20, #0xe0]\n"
+ "ldr x17, [x20, #0xe8]\n"
+ "smlal2 v16.4s, v29.8h, v0.8h\n"
+ "smlal2 v5.4s, v26.8h, v4.8h\n"
+ "ldr d4, [x23, #0x48]\n"
+ "ssubl v4.8h, v4.8b, v14.8b\n"
+ "smlal v17.4s, v28.4h, v0.4h\n"
+ "smlal v10.4s, v22.4h, v0.4h\n"
+ "ldr x5, [x20, #0xf0]\n"
+ "ldr x25, [x20, #0xf8]\n"
+ "smlal v6.4s, v25.4h, v0.4h\n"
+ "smlal2 v8.4s, v28.8h, v0.8h\n"
+ "ldr q12, [x10, #0x0]\n"
+ "ldr q19, [x1, #0x0]\n"
"smlal2 v7.4s, v22.8h, v0.8h\n"
- "smlal v13.4s, v23.4h, v2.4h\n"
- "ldr x24, [x4, #0xf8]\n"
- "tst x1, #0x7\n"
- "smlal2 v21.4s, v25.8h, v0.8h\n"
- "ldr d0, [x3, #0x50]\n"
- "smlal v20.4s, v23.4h, v1.4h\n"
- "ssubl v0.8h, v0.8b, v15.8b\n"
- "smlal v8.4s, v25.4h, v1.4h\n"
- "smlal v17.4s, v24.4h, v1.4h\n"
- "add x5, x5, #0x20\n"
- "smlal2 v19.4s, v23.8h, v2.8h\n"
- "smlal2 v10.4s, v23.8h, v1.8h\n"
- "ldr d23, [x23, x0]\n"
- "usubl v23.8h, v23.8b, v9.8b\n"
+ "smlal v15.4s, v28.4h, v1.4h\n"
+ "ldr q20, [x10, #0x10]\n"
+ "ldr q29, [x1, #0x10]\n"
+ "smlal2 v16.4s, v28.8h, v1.8h\n"
+ "smlal2 v5.4s, v25.8h, v0.8h\n"
+ "ldr d28, [x26, x24]\n"
+ "ldr d0, [x23, #0x50]\n"
+ "smlal v17.4s, v23.4h, v1.4h\n"
+ "smlal v10.4s, v25.4h, v1.4h\n"
+ "usubl v28.8h, v28.8b, v9.8b\n"
+ "ldr x26, [x20, #0x100]\n"
+ "smlal v6.4s, v24.4h, v1.4h\n"
+ "smlal2 v8.4s, v23.8h, v1.8h\n"
+ "ssubl v0.8h, v0.8b, v14.8b\n"
+ "tst x0, #0x7\n"
"smlal2 v7.4s, v25.8h, v1.8h\n"
- "smlal v13.4s, v31.4h, v3.4h\n"
- "ldr x23, [x4, #0x100]\n"
- "smlal2 v21.4s, v24.8h, v1.8h\n"
- "ldr d1, [x3, #0x58]\n"
- "smlal v20.4s, v31.4h, v2.4h\n"
- "ssubl v1.8h, v1.8b, v15.8b\n"
- "smlal v8.4s, v24.4h, v2.4h\n"
- "smlal v17.4s, v27.4h, v2.4h\n"
- "smlal2 v19.4s, v31.8h, v3.8h\n"
- "smlal2 v10.4s, v31.8h, v2.8h\n"
- "ldr d31, [x15, x0]\n"
- "usubl v31.8h, v31.8b, v9.8b\n"
+ "smlal v15.4s, v23.4h, v2.4h\n"
+ "add x10, x10, #0x20\n"
+ "add x1, x1, #0x20\n"
+ "smlal2 v16.4s, v23.8h, v2.8h\n"
+ "ldr d23, [x12, x24]\n"
+ "smlal2 v5.4s, v24.8h, v1.8h\n"
+ "usubl v23.8h, v23.8b, v9.8b\n"
+ "smlal v17.4s, v31.4h, v2.4h\n"
+ "smlal v10.4s, v24.4h, v2.4h\n"
+ "ldr d1, [x23, #0x58]\n"
+ "ssubl v1.8h, v1.8b, v14.8b\n"
+ "smlal v6.4s, v27.4h, v2.4h\n"
+ "smlal2 v8.4s, v31.8h, v2.8h\n"
+ "ldr x12, [x20, #0x108]\n"
"smlal2 v7.4s, v24.8h, v2.8h\n"
- "smlal v13.4s, v30.4h, v4.4h\n"
- "ldr x15, [x4, #0x108]\n"
- "smlal2 v21.4s, v27.8h, v2.8h\n"
- "ldr d2, [x3, #0x60]\n"
- "smlal v20.4s, v30.4h, v3.4h\n"
- "ssubl v2.8h, v2.8b, v15.8b\n"
- "smlal v8.4s, v27.4h, v3.4h\n"
- "smlal v17.4s, v23.4h, v3.4h\n"
- "smlal2 v19.4s, v30.8h, v4.8h\n"
- "smlal2 v10.4s, v30.8h, v3.8h\n"
- "ldr d30, [x21, x0]\n"
- "usubl v30.8h, v30.8b, v9.8b\n"
+ "smlal v15.4s, v31.4h, v3.4h\n"
+ "smlal2 v16.4s, v31.8h, v3.8h\n"
+ "smlal2 v5.4s, v27.8h, v2.8h\n"
+ "ldr d31, [x14, x24]\n"
+ "usubl v31.8h, v31.8b, v9.8b\n"
+ "smlal v17.4s, v30.4h, v3.4h\n"
+ "smlal v10.4s, v27.4h, v3.4h\n"
+ "ldr d2, [x23, #0x60]\n"
+ "ssubl v2.8h, v2.8b, v14.8b\n"
+ "smlal v6.4s, v23.4h, v3.4h\n"
+ "smlal2 v8.4s, v30.8h, v3.8h\n"
+ "ldr x14, [x20, #0x110]\n"
"smlal2 v7.4s, v27.8h, v3.8h\n"
- "smlal v13.4s, v22.4h, v0.4h\n"
- "ldr x21, [x4, #0x110]\n"
- "smlal2 v21.4s, v23.8h, v3.8h\n"
- "ldr d3, [x3, #0x68]\n"
- "smlal v20.4s, v26.4h, v4.4h\n"
- "ssubl v3.8h, v3.8b, v15.8b\n"
- "smlal v8.4s, v23.4h, v4.4h\n"
- "smlal v17.4s, v28.4h, v4.4h\n"
- "smlal2 v19.4s, v22.8h, v0.8h\n"
- "ldr d22, [x20, x0]\n"
- "smlal2 v10.4s, v26.8h, v4.8h\n"
- "ldr d26, [x14, x0]\n"
- "smlal2 v7.4s, v23.8h, v4.8h\n"
- "smlal v13.4s, v25.4h, v1.4h\n"
+ "smlal v15.4s, v30.4h, v4.4h\n"
+ "smlal2 v16.4s, v30.8h, v4.8h\n"
+ "ldr d30, [x15, x24]\n"
+ "smlal2 v5.4s, v23.8h, v3.8h\n"
+ "usubl v30.8h, v30.8b, v9.8b\n"
+ "smlal v17.4s, v26.4h, v4.4h\n"
+ "smlal v10.4s, v23.4h, v4.4h\n"
+ "ldr d3, [x23, #0x68]\n"
+ "ssubl v3.8h, v3.8b, v14.8b\n"
+ "smlal v6.4s, v28.4h, v4.4h\n"
+ "smlal2 v8.4s, v26.8h, v4.8h\n"
+ "ldr d26, [x21, x24]\n"
"usubl v26.8h, v26.8b, v9.8b\n"
- "ldr x20, [x4, #0x118]\n"
- "smlal2 v21.4s, v28.8h, v4.8h\n"
- "ldr d4, [x3, #0x70]\n"
- "smlal v20.4s, v25.4h, v0.4h\n"
- "ssubl v4.8h, v4.8b, v15.8b\n"
- "smlal v8.4s, v31.4h, v0.4h\n"
- "smlal v17.4s, v30.4h, v0.4h\n"
+ "smlal2 v7.4s, v23.8h, v4.8h\n"
+ "smlal v15.4s, v22.4h, v0.4h\n"
+ "ldr x21, [x20, #0x118]\n"
+ "smlal2 v16.4s, v22.8h, v0.8h\n"
+ "smlal2 v5.4s, v28.8h, v4.8h\n"
+ "ldr d4, [x23, #0x70]\n"
+ "ldr d22, [x9, x24]\n"
+ "smlal v17.4s, v25.4h, v0.4h\n"
+ "smlal v10.4s, v31.4h, v0.4h\n"
+ "ssubl v4.8h, v4.8b, v14.8b\n"
+ "smlal v6.4s, v30.4h, v0.4h\n"
+ "smlal2 v8.4s, v25.8h, v0.8h\n"
"usubl v22.8h, v22.8b, v9.8b\n"
- "smlal2 v19.4s, v25.8h, v1.8h\n"
- "smlal2 v10.4s, v25.8h, v0.8h\n"
- "ldr d25, [x13, x0]\n"
- "usubl v25.8h, v25.8b, v9.8b\n"
"smlal2 v7.4s, v31.8h, v0.8h\n"
- "smlal v13.4s, v24.4h, v2.4h\n"
- "smlal2 v21.4s, v30.8h, v0.8h\n"
- "ldr d0, [x3, #0x78]\n"
- "smlal v20.4s, v24.4h, v1.4h\n"
- "ssubl v0.8h, v0.8b, v15.8b\n"
- "smlal v8.4s, v30.4h, v1.4h\n"
- "smlal v17.4s, v26.4h, v1.4h\n"
- "smlal2 v19.4s, v24.8h, v2.8h\n"
- "smlal2 v10.4s, v24.8h, v1.8h\n"
- "ldr d24, [x12, x0]\n"
- "usubl v24.8h, v24.8b, v9.8b\n"
+ "smlal v15.4s, v25.4h, v1.4h\n"
+ "smlal2 v16.4s, v25.8h, v1.8h\n"
+ "ldr d25, [x2, x24]\n"
+ "smlal2 v5.4s, v30.8h, v0.8h\n"
+ "usubl v25.8h, v25.8b, v9.8b\n"
+ "smlal v17.4s, v24.4h, v1.4h\n"
+ "smlal v10.4s, v30.4h, v1.4h\n"
+ "ldr d0, [x23, #0x78]\n"
+ "ssubl v0.8h, v0.8b, v14.8b\n"
+ "smlal v6.4s, v26.4h, v1.4h\n"
+ "smlal2 v8.4s, v24.8h, v1.8h\n"
"smlal2 v7.4s, v30.8h, v1.8h\n"
- "smlal v13.4s, v27.4h, v3.4h\n"
- "smlal2 v21.4s, v26.8h, v1.8h\n"
- "ldr d1, [x3, #0x80]\n"
- "smlal v20.4s, v27.4h, v2.4h\n"
- "ssubl v1.8h, v1.8b, v15.8b\n"
- "smlal v8.4s, v26.4h, v2.4h\n"
- "smlal v17.4s, v25.4h, v2.4h\n"
- "smlal2 v19.4s, v27.8h, v3.8h\n"
- "smlal2 v10.4s, v27.8h, v2.8h\n"
- "ldr d27, [x11, x0]\n"
- "usubl v27.8h, v27.8b, v9.8b\n"
+ "smlal v15.4s, v24.4h, v2.4h\n"
+ "smlal2 v16.4s, v24.8h, v2.8h\n"
+ "ldr d24, [x13, x24]\n"
+ "smlal2 v5.4s, v26.8h, v1.8h\n"
+ "usubl v24.8h, v24.8b, v9.8b\n"
+ "smlal v17.4s, v27.4h, v2.4h\n"
+ "smlal v10.4s, v26.4h, v2.4h\n"
+ "ldr d1, [x23, #0x80]\n"
+ "ssubl v1.8h, v1.8b, v14.8b\n"
+ "smlal v6.4s, v25.4h, v2.4h\n"
+ "smlal2 v8.4s, v27.8h, v2.8h\n"
"smlal2 v7.4s, v26.8h, v2.8h\n"
- "smlal v13.4s, v23.4h, v4.4h\n"
- "smlal2 v21.4s, v25.8h, v2.8h\n"
- "ldr d2, [x3, #0x88]\n"
- "smlal v20.4s, v23.4h, v3.4h\n"
- "ssubl v2.8h, v2.8b, v15.8b\n"
- "smlal v8.4s, v25.4h, v3.4h\n"
- "smlal v17.4s, v24.4h, v3.4h\n"
- "smlal2 v19.4s, v23.8h, v4.8h\n"
- "smlal2 v10.4s, v23.8h, v3.8h\n"
- "ldr d23, [x22, x0]\n"
- "usubl v23.8h, v23.8b, v9.8b\n"
+ "smlal v15.4s, v27.4h, v3.4h\n"
+ "smlal2 v16.4s, v27.8h, v3.8h\n"
+ "smlal2 v5.4s, v25.8h, v2.8h\n"
+ "ldr d27, [x19, x24]\n"
+ "usubl v27.8h, v27.8b, v9.8b\n"
+ "smlal v17.4s, v23.4h, v3.4h\n"
+ "smlal v10.4s, v25.4h, v3.4h\n"
+ "ldr d2, [x23, #0x88]\n"
+ "ssubl v2.8h, v2.8b, v14.8b\n"
+ "smlal v6.4s, v24.4h, v3.4h\n"
+ "smlal2 v8.4s, v23.8h, v3.8h\n"
"smlal2 v7.4s, v25.8h, v3.8h\n"
- "smlal v13.4s, v31.4h, v0.4h\n"
- "smlal2 v21.4s, v24.8h, v3.8h\n"
- "ldr d3, [x3, #0x90]\n"
- "smlal v20.4s, v28.4h, v4.4h\n"
- "ssubl v3.8h, v3.8b, v15.8b\n"
- "smlal v8.4s, v24.4h, v4.4h\n"
- "smlal v17.4s, v22.4h, v4.4h\n"
- "smlal2 v19.4s, v31.8h, v0.8h\n"
- "ldr d31, [x9, x0]\n"
- "smlal2 v10.4s, v28.8h, v4.8h\n"
- "ldr d28, [x27, x0]\n"
+ "smlal v15.4s, v23.4h, v4.4h\n"
+ "smlal2 v16.4s, v23.8h, v4.8h\n"
+ "ldr d23, [x28, x24]\n"
+ "smlal2 v5.4s, v24.8h, v3.8h\n"
+ "usubl v23.8h, v23.8b, v9.8b\n"
+ "smlal v17.4s, v28.4h, v4.4h\n"
+ "smlal v10.4s, v24.4h, v4.4h\n"
+ "ldr d3, [x23, #0x90]\n"
+ "ssubl v3.8h, v3.8b, v14.8b\n"
+ "smlal v6.4s, v22.4h, v4.4h\n"
+ "smlal2 v8.4s, v28.8h, v4.8h\n"
+ "ldr d28, [x11, x24]\n"
+ "usubl v28.8h, v28.8b, v9.8b\n"
"smlal2 v7.4s, v24.8h, v4.8h\n"
- "smlal v13.4s, v30.4h, v1.4h\n"
+ "smlal v15.4s, v31.4h, v0.4h\n"
+ "smlal2 v16.4s, v31.8h, v0.8h\n"
+ "ldr d31, [x6, x24]\n"
+ "smlal2 v5.4s, v22.8h, v4.8h\n"
"usubl v31.8h, v31.8b, v9.8b\n"
- "smlal2 v21.4s, v22.8h, v4.8h\n"
- "ldr d4, [x3, #0x98]\n"
- "smlal v20.4s, v30.4h, v0.4h\n"
- "ssubl v4.8h, v4.8b, v15.8b\n"
- "smlal v8.4s, v27.4h, v0.4h\n"
- "smlal v17.4s, v23.4h, v0.4h\n"
- "usubl v28.8h, v28.8b, v9.8b\n"
- "smlal2 v19.4s, v30.8h, v1.8h\n"
- "smlal2 v10.4s, v30.8h, v0.8h\n"
- "ldr d30, [x28, x0]\n"
- "usubl v30.8h, v30.8b, v9.8b\n"
+ "smlal v17.4s, v30.4h, v0.4h\n"
+ "smlal v10.4s, v27.4h, v0.4h\n"
+ "ldr d4, [x23, #0x98]\n"
+ "ssubl v4.8h, v4.8b, v14.8b\n"
+ "smlal v6.4s, v23.4h, v0.4h\n"
+ "smlal2 v8.4s, v30.8h, v0.8h\n"
"smlal2 v7.4s, v27.8h, v0.8h\n"
- "smlal v13.4s, v26.4h, v2.4h\n"
- "smlal2 v21.4s, v23.8h, v0.8h\n"
- "ldr d0, [x3, #0xa0]\n"
- "smlal v20.4s, v26.4h, v1.4h\n"
- "ssubl v0.8h, v0.8b, v15.8b\n"
- "smlal v8.4s, v23.4h, v1.4h\n"
- "smlal v17.4s, v31.4h, v1.4h\n"
- "smlal2 v19.4s, v26.8h, v2.8h\n"
- "smlal2 v10.4s, v26.8h, v1.8h\n"
- "ldr d26, [x26, x0]\n"
- "usubl v26.8h, v26.8b, v9.8b\n"
+ "smlal v15.4s, v30.4h, v1.4h\n"
+ "smlal2 v16.4s, v30.8h, v1.8h\n"
+ "ldr d30, [x27, x24]\n"
+ "smlal2 v5.4s, v23.8h, v0.8h\n"
+ "usubl v30.8h, v30.8b, v9.8b\n"
+ "smlal v17.4s, v26.4h, v1.4h\n"
+ "smlal v10.4s, v23.4h, v1.4h\n"
+ "ldr d0, [x23, #0xa0]\n"
+ "ssubl v0.8h, v0.8b, v14.8b\n"
+ "smlal v6.4s, v31.4h, v1.4h\n"
+ "smlal2 v8.4s, v26.8h, v1.8h\n"
"smlal2 v7.4s, v23.8h, v1.8h\n"
- "smlal v13.4s, v25.4h, v3.4h\n"
- "smlal2 v21.4s, v31.8h, v1.8h\n"
- "ldr d1, [x3, #0xa8]\n"
- "smlal v20.4s, v25.4h, v2.4h\n"
- "ssubl v1.8h, v1.8b, v15.8b\n"
- "smlal v8.4s, v31.4h, v2.4h\n"
- "smlal v17.4s, v30.4h, v2.4h\n"
- "smlal2 v19.4s, v25.8h, v3.8h\n"
- "smlal2 v10.4s, v25.8h, v2.8h\n"
- "ldr d25, [x25, x0]\n"
- "usubl v25.8h, v25.8b, v9.8b\n"
+ "smlal v15.4s, v26.4h, v2.4h\n"
+ "smlal2 v16.4s, v26.8h, v2.8h\n"
+ "smlal2 v5.4s, v31.8h, v1.8h\n"
+ "ldr d26, [x17, x24]\n"
+ "usubl v26.8h, v26.8b, v9.8b\n"
+ "smlal v17.4s, v25.4h, v2.4h\n"
+ "smlal v10.4s, v31.4h, v2.4h\n"
+ "ldr d1, [x23, #0xa8]\n"
+ "ssubl v1.8h, v1.8b, v14.8b\n"
+ "smlal v6.4s, v30.4h, v2.4h\n"
+ "smlal2 v8.4s, v25.8h, v2.8h\n"
"smlal2 v7.4s, v31.8h, v2.8h\n"
- "smlal v13.4s, v24.4h, v4.4h\n"
- "smlal2 v21.4s, v30.8h, v2.8h\n"
- "ldr d2, [x3, #0xb0]\n"
- "smlal v20.4s, v24.4h, v3.4h\n"
- "ssubl v2.8h, v2.8b, v15.8b\n"
- "smlal v8.4s, v30.4h, v3.4h\n"
- "smlal v17.4s, v28.4h, v3.4h\n"
- "smlal2 v19.4s, v24.8h, v4.8h\n"
- "smlal2 v10.4s, v24.8h, v3.8h\n"
- "ldr d24, [x24, x0]\n"
- "usubl v24.8h, v24.8b, v9.8b\n"
+ "smlal v15.4s, v25.4h, v3.4h\n"
+ "smlal2 v16.4s, v25.8h, v3.8h\n"
+ "smlal2 v5.4s, v30.8h, v2.8h\n"
+ "ldr d25, [x5, x24]\n"
+ "usubl v25.8h, v25.8b, v9.8b\n"
+ "smlal v17.4s, v24.4h, v3.4h\n"
+ "smlal v10.4s, v30.4h, v3.4h\n"
+ "ldr d2, [x23, #0xb0]\n"
+ "ssubl v2.8h, v2.8b, v14.8b\n"
+ "smlal v6.4s, v28.4h, v3.4h\n"
+ "smlal2 v8.4s, v24.8h, v3.8h\n"
"smlal2 v7.4s, v30.8h, v3.8h\n"
- "smlal v13.4s, v27.4h, v0.4h\n"
- "smlal2 v21.4s, v28.8h, v3.8h\n"
- "ldr d3, [x3, #0xb8]\n"
- "smlal v20.4s, v22.4h, v4.4h\n"
- "ssubl v3.8h, v3.8b, v15.8b\n"
- "smlal v8.4s, v28.4h, v4.4h\n"
- "smlal v17.4s, v26.4h, v4.4h\n"
- "smlal2 v19.4s, v27.8h, v0.8h\n"
- "ldr d27, [x23, x0]\n"
+ "smlal v15.4s, v24.4h, v4.4h\n"
+ "smlal2 v16.4s, v24.8h, v4.8h\n"
+ "ldr d24, [x25, x24]\n"
+ "smlal2 v5.4s, v28.8h, v3.8h\n"
+ "usubl v24.8h, v24.8b, v9.8b\n"
+ "smlal v17.4s, v22.4h, v4.4h\n"
+ "smlal v10.4s, v28.4h, v4.4h\n"
+ "ldr d3, [x23, #0xb8]\n"
+ "ssubl v3.8h, v3.8b, v14.8b\n"
+ "smlal v6.4s, v26.4h, v4.4h\n"
"smlal2 v7.4s, v28.8h, v4.8h\n"
+ "smlal v15.4s, v27.4h, v0.4h\n"
+ "smlal2 v16.4s, v27.8h, v0.8h\n"
+ "ldr d27, [x26, x24]\n"
"usubl v27.8h, v27.8b, v9.8b\n"
- "smlal v13.4s, v23.4h, v1.4h\n"
- "smlal2 v10.4s, v22.8h, v4.8h\n"
- "ldr q22, [x8, #0x10]\n"
- "add x8, x8, #0x20\n"
- "smlal2 v21.4s, v26.8h, v4.8h\n"
- "ldr d4, [x3, #0xc0]\n"
- "smlal v20.4s, v23.4h, v0.4h\n"
- "ssubl v4.8h, v4.8b, v15.8b\n"
- "smlal v8.4s, v25.4h, v0.4h\n"
- "smlal v17.4s, v24.4h, v0.4h\n"
- "smlal2 v19.4s, v23.8h, v1.8h\n"
+ "smlal2 v8.4s, v22.8h, v4.8h\n"
+ "smlal2 v5.4s, v26.8h, v4.8h\n"
+ "ldr d4, [x23, #0xc0]\n"
+ "ssubl v4.8h, v4.8b, v14.8b\n"
+ "smlal v17.4s, v23.4h, v0.4h\n"
+ "smlal v10.4s, v25.4h, v0.4h\n"
+ "smlal v6.4s, v24.4h, v0.4h\n"
"smlal2 v7.4s, v25.8h, v0.8h\n"
- "ldr d25, [x15, x0]\n"
+ "ldr d25, [x12, x24]\n"
"usubl v25.8h, v25.8b, v9.8b\n"
- "smlal v13.4s, v31.4h, v2.4h\n"
- "smlal2 v10.4s, v23.8h, v0.8h\n"
- "smlal2 v21.4s, v24.8h, v0.8h\n"
- "smlal v20.4s, v31.4h, v1.4h\n"
- "smlal v8.4s, v24.4h, v1.4h\n"
- "smlal v17.4s, v27.4h, v1.4h\n"
- "smlal2 v19.4s, v31.8h, v2.8h\n"
+ "smlal2 v8.4s, v23.8h, v0.8h\n"
+ "smlal2 v5.4s, v24.8h, v0.8h\n"
+ "smlal v15.4s, v23.4h, v1.4h\n"
+ "smlal v17.4s, v31.4h, v1.4h\n"
+ "smlal v10.4s, v24.4h, v1.4h\n"
+ "smlal v6.4s, v27.4h, v1.4h\n"
"smlal2 v7.4s, v24.8h, v1.8h\n"
- "ldr d24, [x21, x0]\n"
+ "ldr d24, [x14, x24]\n"
+ "smlal2 v16.4s, v23.8h, v1.8h\n"
"usubl v24.8h, v24.8b, v9.8b\n"
- "smlal v13.4s, v30.4h, v3.4h\n"
- "smlal2 v10.4s, v31.8h, v1.8h\n"
- "smlal2 v21.4s, v27.8h, v1.8h\n"
- "smlal v20.4s, v30.4h, v2.4h\n"
- "smlal v8.4s, v27.4h, v2.4h\n"
- "smlal v17.4s, v25.4h, v2.4h\n"
- "smlal2 v19.4s, v30.8h, v3.8h\n"
+ "smlal2 v8.4s, v31.8h, v1.8h\n"
+ "smlal2 v5.4s, v27.8h, v1.8h\n"
+ "smlal v15.4s, v31.4h, v2.4h\n"
+ "smlal v17.4s, v30.4h, v2.4h\n"
+ "smlal v10.4s, v27.4h, v2.4h\n"
+ "smlal v6.4s, v25.4h, v2.4h\n"
"smlal2 v7.4s, v27.8h, v2.8h\n"
- "ldr d27, [x20, x0]\n"
+ "ldr d27, [x21, x24]\n"
+ "smlal2 v16.4s, v31.8h, v2.8h\n"
"usubl v27.8h, v27.8b, v9.8b\n"
- "smlal v13.4s, v28.4h, v4.4h\n"
- "smlal2 v10.4s, v30.8h, v2.8h\n"
- "sqrdmulh v13.4s, v13.4s, v18.4s\n"
- "add x0, x0, #0x8\n"
- "smlal2 v21.4s, v25.8h, v2.8h\n"
- "smlal v20.4s, v28.4h, v3.4h\n"
- "and v30.16b, v13.16b, v6.16b\n"
- "smlal v8.4s, v25.4h, v3.4h\n"
- "smlal v17.4s, v24.4h, v3.4h\n"
- "sshr v30.4s, v30.4s, #0x1f\n"
- "smlal2 v19.4s, v28.8h, v4.8h\n"
- "smlal2 v10.4s, v28.8h, v3.8h\n"
- "sqrdmulh v19.4s, v19.4s, v5.4s\n"
+ "smlal2 v8.4s, v30.8h, v2.8h\n"
+ "smlal2 v5.4s, v25.8h, v2.8h\n"
+ "add x24, x24, #0x8\n"
+ "smlal v15.4s, v30.4h, v3.4h\n"
+ "smlal v17.4s, v28.4h, v3.4h\n"
+ "smlal v10.4s, v25.4h, v3.4h\n"
+ "smlal v6.4s, v24.4h, v3.4h\n"
+ "smlal2 v16.4s, v30.8h, v3.8h\n"
+ "smlal2 v8.4s, v28.8h, v3.8h\n"
"smlal2 v7.4s, v25.8h, v3.8h\n"
- "smlal2 v21.4s, v24.8h, v3.8h\n"
- "and v16.16b, v19.16b, v22.16b\n"
- "smlal v20.4s, v26.4h, v4.4h\n"
- "smlal v8.4s, v24.4h, v4.4h\n"
- "sqrdmulh v20.4s, v20.4s, v18.4s\n"
- "smlal v17.4s, v27.4h, v4.4h\n"
- "smlal2 v10.4s, v26.8h, v4.8h\n"
- "sqrdmulh v8.4s, v8.4s, v18.4s\n"
+ "smlal2 v5.4s, v24.8h, v3.8h\n"
+ "smlal v15.4s, v28.4h, v4.4h\n"
+ "smlal v17.4s, v26.4h, v4.4h\n"
+ "sqrdmulh v15.4s, v15.4s, v12.4s\n"
+ "smlal v10.4s, v24.4h, v4.4h\n"
+ "smlal v6.4s, v27.4h, v4.4h\n"
+ "sqrdmulh v17.4s, v17.4s, v12.4s\n"
+ "smlal2 v16.4s, v28.8h, v4.8h\n"
+ "smlal2 v8.4s, v26.8h, v4.8h\n"
+ "sqrdmulh v10.4s, v10.4s, v12.4s\n"
"smlal2 v7.4s, v24.8h, v4.8h\n"
- "smlal2 v21.4s, v27.8h, v4.8h\n"
- "sqrdmulh v17.4s, v17.4s, v18.4s\n"
- "sqadd v13.4s, v13.4s, v30.4s\n"
- "sshr v16.4s, v16.4s, #0x1f\n"
- "and v0.16b, v20.16b, v6.16b\n"
- "sqrdmulh v10.4s, v10.4s, v5.4s\n"
- "and v18.16b, v8.16b, v6.16b\n"
- "sqrdmulh v7.4s, v7.4s, v5.4s\n"
- "and v30.16b, v17.16b, v6.16b\n"
- "sqrdmulh v21.4s, v21.4s, v5.4s\n"
- "sqadd v19.4s, v19.4s, v16.4s\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "and v26.16b, v10.16b, v22.16b\n"
- "sshr v18.4s, v18.4s, #0x1f\n"
- "and v23.16b, v7.16b, v22.16b\n"
- "sshr v30.4s, v30.4s, #0x1f\n"
- "and v16.16b, v21.16b, v22.16b\n"
- "sqadd v20.4s, v20.4s, v0.4s\n"
- "sshr v26.4s, v26.4s, #0x1f\n"
- "sqadd v8.4s, v8.4s, v18.4s\n"
+ "smlal2 v5.4s, v27.8h, v4.8h\n"
+ "sqrdmulh v6.4s, v6.4s, v12.4s\n"
+ "and v23.16b, v15.16b, v19.16b\n"
+ "sqrdmulh v16.4s, v16.4s, v20.4s\n"
+ "and v22.16b, v17.16b, v19.16b\n"
+ "sqrdmulh v8.4s, v8.4s, v20.4s\n"
+ "and v21.16b, v10.16b, v19.16b\n"
+ "sqrdmulh v7.4s, v7.4s, v20.4s\n"
+ "and v26.16b, v6.16b, v19.16b\n"
+ "sqrdmulh v5.4s, v5.4s, v20.4s\n"
"sshr v23.4s, v23.4s, #0x1f\n"
- "sqadd v17.4s, v17.4s, v30.4s\n"
- "sshr v16.4s, v16.4s, #0x1f\n"
- "srshl v13.4s, v13.4s, v6.4s\n"
- "srshl v20.4s, v20.4s, v6.4s\n"
- "sqadd v10.4s, v10.4s, v26.4s\n"
- "srshl v8.4s, v8.4s, v6.4s\n"
- "sqadd v7.4s, v7.4s, v23.4s\n"
- "srshl v17.4s, v17.4s, v6.4s\n"
- "sqadd v21.4s, v21.4s, v16.4s\n"
- "srshl v19.4s, v19.4s, v22.4s\n"
- "sqxtn v13.4h, v13.4s\n"
- "srshl v10.4s, v10.4s, v22.4s\n"
- "sqxtn v20.4h, v20.4s\n"
- "srshl v7.4s, v7.4s, v22.4s\n"
- "sqxtn v8.4h, v8.4s\n"
- "srshl v21.4s, v21.4s, v22.4s\n"
+ "and v4.16b, v16.16b, v29.16b\n"
+ "sshr v22.4s, v22.4s, #0x1f\n"
+ "and v2.16b, v8.16b, v29.16b\n"
+ "sshr v21.4s, v21.4s, #0x1f\n"
+ "and v3.16b, v7.16b, v29.16b\n"
+ "sshr v26.4s, v26.4s, #0x1f\n"
+ "and v25.16b, v5.16b, v29.16b\n"
+ "sqadd v15.4s, v15.4s, v23.4s\n"
+ "sshr v4.4s, v4.4s, #0x1f\n"
+ "sqadd v17.4s, v17.4s, v22.4s\n"
+ "sshr v2.4s, v2.4s, #0x1f\n"
+ "sqadd v10.4s, v10.4s, v21.4s\n"
+ "sshr v3.4s, v3.4s, #0x1f\n"
+ "sqadd v6.4s, v6.4s, v26.4s\n"
+ "sshr v25.4s, v25.4s, #0x1f\n"
+ "srshl v15.4s, v15.4s, v19.4s\n"
+ "sqadd v16.4s, v16.4s, v4.4s\n"
+ "srshl v17.4s, v17.4s, v19.4s\n"
+ "sqadd v8.4s, v8.4s, v2.4s\n"
+ "srshl v10.4s, v10.4s, v19.4s\n"
+ "sqadd v7.4s, v7.4s, v3.4s\n"
+ "srshl v6.4s, v6.4s, v19.4s\n"
+ "sqadd v5.4s, v5.4s, v25.4s\n"
+ "srshl v16.4s, v16.4s, v29.4s\n"
+ "sqxtn v15.4h, v15.4s\n"
+ "srshl v8.4s, v8.4s, v29.4s\n"
"sqxtn v17.4h, v17.4s\n"
- "sqxtn2 v13.8h, v19.4s\n"
- "sqxtn2 v20.8h, v10.4s\n"
- "sqxtn2 v8.8h, v7.4s\n"
- "sqxtn2 v17.8h, v21.4s\n"
- "sqadd v13.8h, v13.8h, v14.8h\n"
- "sqadd v20.8h, v20.8h, v14.8h\n"
- "sqadd v8.8h, v8.8h, v14.8h\n"
- "sqadd v17.8h, v17.8h, v14.8h\n"
- "smax v13.8h, v13.8h, v12.8h\n"
- "smax v20.8h, v20.8h, v12.8h\n"
- "smax v8.8h, v8.8h, v12.8h\n"
- "smax v17.8h, v17.8h, v12.8h\n"
- "smin v13.8h, v13.8h, v11.8h\n"
- "smin v20.8h, v20.8h, v11.8h\n"
- "smin v8.8h, v8.8h, v11.8h\n"
- "smin v17.8h, v17.8h, v11.8h\n"
- "uzp1 v13.16b, v13.16b, v13.16b\n"
- "uzp1 v20.16b, v20.16b, v20.16b\n"
- "str d13, [x17, x10]\n"
- "uzp1 v8.16b, v8.16b, v8.16b\n"
+ "srshl v7.4s, v7.4s, v29.4s\n"
+ "sqxtn v10.4h, v10.4s\n"
+ "srshl v5.4s, v5.4s, v29.4s\n"
+ "sqxtn v6.4h, v6.4s\n"
+ "sqxtn2 v15.8h, v16.4s\n"
+ "sqxtn2 v17.8h, v8.4s\n"
+ "sqxtn2 v10.8h, v7.4s\n"
+ "sqxtn2 v6.8h, v5.4s\n"
+ "sqadd v15.8h, v15.8h, v18.8h\n"
+ "sqadd v17.8h, v17.8h, v18.8h\n"
+ "sqadd v10.8h, v10.8h, v18.8h\n"
+ "sqadd v6.8h, v6.8h, v18.8h\n"
+ "smax v15.8h, v15.8h, v11.8h\n"
+ "smax v17.8h, v17.8h, v11.8h\n"
+ "smax v10.8h, v10.8h, v11.8h\n"
+ "smax v6.8h, v6.8h, v11.8h\n"
+ "smin v15.8h, v15.8h, v13.8h\n"
+ "smin v17.8h, v17.8h, v13.8h\n"
+ "smin v10.8h, v10.8h, v13.8h\n"
+ "smin v6.8h, v6.8h, v13.8h\n"
+ "uzp1 v15.16b, v15.16b, v15.16b\n"
"uzp1 v17.16b, v17.16b, v17.16b\n"
- "str d20, [x6, x10]\n"
- "str d8, [x7, x10]\n"
- "str d17, [x16, x10]\n"
- "add x10, x10, #0x8\n"
+ "str d15, [x16, x22]\n"
+ "uzp1 v10.16b, v10.16b, v10.16b\n"
+ "uzp1 v6.16b, v6.16b, v6.16b\n"
+ "str d17, [x8, x22]\n"
+ "str d10, [x4, x22]\n"
+ "str d6, [x7, x22]\n"
+ "add x22, x22, #0x8\n"
"beq 124f\n"
- "add x3, x3, #0xc8\n"
+ "add x23, x23, #0xc8\n"
"3:" // Oddments
- "ldr x13, [%x[params], %[offsetof_Params_bias]]\n"
- "tbz x1, #2, 5f\n"
- "ld1 { v13.4s }, [x13], #0x10\n"
- "tbz x1, #1, 4f\n"
- "ld1 { v19.d }[0], [x13], #0x8\n"
- "tbz x1, #0, 7f\n"
- "ld1 { v19.s }[2], [x13]\n"
+ "ldr x19, [%x[params], %[offsetof_Params_bias]]\n"
+ "tbz x0, #2, 5f\n"
+ "ld1 { v15.4s }, [x19], #0x10\n"
+ "tbz x0, #1, 4f\n"
+ "ld1 { v16.d }[0], [x19], #0x8\n"
+ "tbz x0, #0, 7f\n"
+ "ld1 { v16.s }[2], [x19]\n"
"b 7f\n"
"4:" // Oddments: Load bias: Bit 2: Bit 1: Unset
- "tbz x1, #0, 7f\n"
- "ld1 { v19.s }[0], [x13]\n"
+ "tbz x0, #0, 7f\n"
+ "ld1 { v16.s }[0], [x19]\n"
"b 7f\n"
"5:" // Oddments: Load bias: Bit 2: Unset
- "tbz x1, #1, 6f\n"
- "ld1 { v13.d }[0], [x13], #0x8\n"
- "tbz x1, #0, 7f\n"
- "ld1 { v13.s }[2], [x13]\n"
+ "tbz x0, #1, 6f\n"
+ "ld1 { v15.d }[0], [x19], #0x8\n"
+ "tbz x0, #0, 7f\n"
+ "ld1 { v15.s }[2], [x19]\n"
"b 7f\n"
"6:" // Oddments: Load bias: Bit 2: Unset: Bit 1: Unset
- "tbz x1, #0, 7f\n"
- "ld1 { v13.s }[0], [x13]\n"
+ "tbz x0, #0, 7f\n"
+ "ld1 { v15.s }[0], [x19]\n"
"7:" // Oddments: Load bias: Bit 2: End
- "ldr d0, [x3, #0x0]\n"
- "ldr d1, [x3, #0x8]\n"
- "mov v20.16b, v13.16b\n"
- "mov v10.16b, v19.16b\n"
- "ldr d2, [x3, #0x10]\n"
- "ldr d3, [x3, #0x18]\n"
- "mov v8.16b, v13.16b\n"
- "mov v7.16b, v19.16b\n"
- "ldr d4, [x3, #0x20]\n"
- "ldp x9, x28, [x4, #0x0]\n"
- "mov v17.16b, v13.16b\n"
- "mov v21.16b, v19.16b\n"
- "ldp x27, x26, [x4, #0x10]\n"
- "ldp x25, x24, [x4, #0x20]\n"
- "ssubl v0.8h, v0.8b, v15.8b\n"
- "ssubl v1.8h, v1.8b, v15.8b\n"
- "ldp x23, x22, [x4, #0x30]\n"
- "ldp x21, x20, [x4, #0x40]\n"
- "ssubl v2.8h, v2.8b, v15.8b\n"
- "ssubl v3.8h, v3.8b, v15.8b\n"
- "ssubl v4.8h, v4.8b, v15.8b\n"
- "add x9, x9, x0\n"
- "add x28, x28, x0\n"
- "add x27, x27, x0\n"
- "add x26, x26, x0\n"
- "add x25, x25, x0\n"
- "add x24, x24, x0\n"
- "add x23, x23, x0\n"
- "add x22, x22, x0\n"
- "add x21, x21, x0\n"
- "add x20, x20, x0\n"
- "tbz x1, #2, 9f\n"
- "ld1 { v31.s }[0], [x9], #0x4\n"
- "ld1 { v30.s }[0], [x28], #0x4\n"
- "ld1 { v29.s }[0], [x27], #0x4\n"
- "ld1 { v28.s }[0], [x26], #0x4\n"
- "ld1 { v27.s }[0], [x25], #0x4\n"
- "ld1 { v23.s }[0], [x24], #0x4\n"
- "ld1 { v25.s }[0], [x23], #0x4\n"
- "ld1 { v24.s }[0], [x22], #0x4\n"
- "ld1 { v26.s }[0], [x21], #0x4\n"
- "ld1 { v22.s }[0], [x20], #0x4\n"
- "tbz x1, #1, 8f\n"
- "ld1 { v31.h }[2], [x9], #0x2\n"
- "ld1 { v30.h }[2], [x28], #0x2\n"
- "ld1 { v29.h }[2], [x27], #0x2\n"
- "ld1 { v28.h }[2], [x26], #0x2\n"
- "ld1 { v27.h }[2], [x25], #0x2\n"
- "ld1 { v23.h }[2], [x24], #0x2\n"
- "ld1 { v25.h }[2], [x23], #0x2\n"
- "ld1 { v24.h }[2], [x22], #0x2\n"
- "ld1 { v26.h }[2], [x21], #0x2\n"
- "ld1 { v22.h }[2], [x20], #0x2\n"
- "tbz x1, #0, 11f\n"
- "ld1 { v31.b }[6], [x9]\n"
- "ld1 { v30.b }[6], [x28]\n"
- "ld1 { v29.b }[6], [x27]\n"
- "ld1 { v28.b }[6], [x26]\n"
- "ld1 { v27.b }[6], [x25]\n"
- "ld1 { v23.b }[6], [x24]\n"
- "ld1 { v25.b }[6], [x23]\n"
- "ld1 { v24.b }[6], [x22]\n"
- "ld1 { v26.b }[6], [x21]\n"
- "ld1 { v22.b }[6], [x20]\n"
+ "ldr d0, [x23, #0x0]\n"
+ "ldr d1, [x23, #0x8]\n"
+ "mov v17.16b, v15.16b\n"
+ "mov v8.16b, v16.16b\n"
+ "ldr d2, [x23, #0x10]\n"
+ "ldr d3, [x23, #0x18]\n"
+ "mov v10.16b, v15.16b\n"
+ "mov v7.16b, v16.16b\n"
+ "ldr d4, [x23, #0x20]\n"
+ "ldp x28, x6, [x20, #0x0]\n"
+ "mov v6.16b, v15.16b\n"
+ "mov v5.16b, v16.16b\n"
+ "ldp x26, x25, [x20, #0x10]\n"
+ "ldp x5, x2, [x20, #0x20]\n"
+ "ssubl v0.8h, v0.8b, v14.8b\n"
+ "ssubl v1.8h, v1.8b, v14.8b\n"
+ "ldp x27, x21, [x20, #0x30]\n"
+ "ldp x12, x19, [x20, #0x40]\n"
+ "ssubl v2.8h, v2.8b, v14.8b\n"
+ "ssubl v3.8h, v3.8b, v14.8b\n"
+ "ssubl v4.8h, v4.8b, v14.8b\n"
+ "add x28, x28, x24\n"
+ "add x6, x6, x24\n"
+ "add x26, x26, x24\n"
+ "add x25, x25, x24\n"
+ "add x5, x5, x24\n"
+ "add x2, x2, x24\n"
+ "add x27, x27, x24\n"
+ "add x21, x21, x24\n"
+ "add x12, x12, x24\n"
+ "add x19, x19, x24\n"
+ "tbz x0, #2, 9f\n"
+ "ld1 { v31.s }[0], [x28], #0x4\n"
+ "ld1 { v30.s }[0], [x6], #0x4\n"
+ "ld1 { v29.s }[0], [x26], #0x4\n"
+ "ld1 { v28.s }[0], [x25], #0x4\n"
+ "ld1 { v27.s }[0], [x5], #0x4\n"
+ "ld1 { v23.s }[0], [x2], #0x4\n"
+ "ld1 { v25.s }[0], [x27], #0x4\n"
+ "ld1 { v24.s }[0], [x21], #0x4\n"
+ "ld1 { v26.s }[0], [x12], #0x4\n"
+ "ld1 { v22.s }[0], [x19], #0x4\n"
+ "tbz x0, #1, 8f\n"
+ "ld1 { v31.h }[2], [x28], #0x2\n"
+ "ld1 { v30.h }[2], [x6], #0x2\n"
+ "ld1 { v29.h }[2], [x26], #0x2\n"
+ "ld1 { v28.h }[2], [x25], #0x2\n"
+ "ld1 { v27.h }[2], [x5], #0x2\n"
+ "ld1 { v23.h }[2], [x2], #0x2\n"
+ "ld1 { v25.h }[2], [x27], #0x2\n"
+ "ld1 { v24.h }[2], [x21], #0x2\n"
+ "ld1 { v26.h }[2], [x12], #0x2\n"
+ "ld1 { v22.h }[2], [x19], #0x2\n"
+ "tbz x0, #0, 11f\n"
+ "ld1 { v31.b }[6], [x28]\n"
+ "ld1 { v30.b }[6], [x6]\n"
+ "ld1 { v29.b }[6], [x26]\n"
+ "ld1 { v28.b }[6], [x25]\n"
+ "ld1 { v27.b }[6], [x5]\n"
+ "ld1 { v23.b }[6], [x2]\n"
+ "ld1 { v25.b }[6], [x27]\n"
+ "ld1 { v24.b }[6], [x21]\n"
+ "ld1 { v26.b }[6], [x12]\n"
+ "ld1 { v22.b }[6], [x19]\n"
"b 11f\n"
"8:" // Oddments: Initial loads: Bit 2: Bit 1: Unset
- "tbz x1, #0, 11f\n"
- "ld1 { v31.b }[4], [x9]\n"
- "ld1 { v30.b }[4], [x28]\n"
- "ld1 { v29.b }[4], [x27]\n"
- "ld1 { v28.b }[4], [x26]\n"
- "ld1 { v27.b }[4], [x25]\n"
- "ld1 { v23.b }[4], [x24]\n"
- "ld1 { v25.b }[4], [x23]\n"
- "ld1 { v24.b }[4], [x22]\n"
- "ld1 { v26.b }[4], [x21]\n"
- "ld1 { v22.b }[4], [x20]\n"
+ "tbz x0, #0, 11f\n"
+ "ld1 { v31.b }[4], [x28]\n"
+ "ld1 { v30.b }[4], [x6]\n"
+ "ld1 { v29.b }[4], [x26]\n"
+ "ld1 { v28.b }[4], [x25]\n"
+ "ld1 { v27.b }[4], [x5]\n"
+ "ld1 { v23.b }[4], [x2]\n"
+ "ld1 { v25.b }[4], [x27]\n"
+ "ld1 { v24.b }[4], [x21]\n"
+ "ld1 { v26.b }[4], [x12]\n"
+ "ld1 { v22.b }[4], [x19]\n"
"b 11f\n"
"9:" // Oddments: Initial loads: Bit 2: Unset
- "tbz x1, #1, 10f\n"
- "ld1 { v31.h }[0], [x9], #0x2\n"
- "ld1 { v30.h }[0], [x28], #0x2\n"
- "ld1 { v29.h }[0], [x27], #0x2\n"
- "ld1 { v28.h }[0], [x26], #0x2\n"
- "ld1 { v27.h }[0], [x25], #0x2\n"
- "ld1 { v23.h }[0], [x24], #0x2\n"
- "ld1 { v25.h }[0], [x23], #0x2\n"
- "ld1 { v24.h }[0], [x22], #0x2\n"
- "ld1 { v26.h }[0], [x21], #0x2\n"
- "ld1 { v22.h }[0], [x20], #0x2\n"
- "tbz x1, #0, 11f\n"
- "ld1 { v31.b }[2], [x9]\n"
- "ld1 { v30.b }[2], [x28]\n"
- "ld1 { v29.b }[2], [x27]\n"
- "ld1 { v28.b }[2], [x26]\n"
- "ld1 { v27.b }[2], [x25]\n"
- "ld1 { v23.b }[2], [x24]\n"
- "ld1 { v25.b }[2], [x23]\n"
- "ld1 { v24.b }[2], [x22]\n"
- "ld1 { v26.b }[2], [x21]\n"
- "ld1 { v22.b }[2], [x20]\n"
+ "tbz x0, #1, 10f\n"
+ "ld1 { v31.h }[0], [x28], #0x2\n"
+ "ld1 { v30.h }[0], [x6], #0x2\n"
+ "ld1 { v29.h }[0], [x26], #0x2\n"
+ "ld1 { v28.h }[0], [x25], #0x2\n"
+ "ld1 { v27.h }[0], [x5], #0x2\n"
+ "ld1 { v23.h }[0], [x2], #0x2\n"
+ "ld1 { v25.h }[0], [x27], #0x2\n"
+ "ld1 { v24.h }[0], [x21], #0x2\n"
+ "ld1 { v26.h }[0], [x12], #0x2\n"
+ "ld1 { v22.h }[0], [x19], #0x2\n"
+ "tbz x0, #0, 11f\n"
+ "ld1 { v31.b }[2], [x28]\n"
+ "ld1 { v30.b }[2], [x6]\n"
+ "ld1 { v29.b }[2], [x26]\n"
+ "ld1 { v28.b }[2], [x25]\n"
+ "ld1 { v27.b }[2], [x5]\n"
+ "ld1 { v23.b }[2], [x2]\n"
+ "ld1 { v25.b }[2], [x27]\n"
+ "ld1 { v24.b }[2], [x21]\n"
+ "ld1 { v26.b }[2], [x12]\n"
+ "ld1 { v22.b }[2], [x19]\n"
"b 11f\n"
"10:" // Oddments: Initial loads: Bit 2: Unset: Bit 1: Unset
- "tbz x1, #0, 11f\n"
- "ld1 { v31.b }[0], [x9]\n"
- "ld1 { v30.b }[0], [x28]\n"
- "ld1 { v29.b }[0], [x27]\n"
- "ld1 { v28.b }[0], [x26]\n"
- "ld1 { v27.b }[0], [x25]\n"
- "ld1 { v23.b }[0], [x24]\n"
- "ld1 { v25.b }[0], [x23]\n"
- "ld1 { v24.b }[0], [x22]\n"
- "ld1 { v26.b }[0], [x21]\n"
- "ld1 { v22.b }[0], [x20]\n"
+ "tbz x0, #0, 11f\n"
+ "ld1 { v31.b }[0], [x28]\n"
+ "ld1 { v30.b }[0], [x6]\n"
+ "ld1 { v29.b }[0], [x26]\n"
+ "ld1 { v28.b }[0], [x25]\n"
+ "ld1 { v27.b }[0], [x5]\n"
+ "ld1 { v23.b }[0], [x2]\n"
+ "ld1 { v25.b }[0], [x27]\n"
+ "ld1 { v24.b }[0], [x21]\n"
+ "ld1 { v26.b }[0], [x12]\n"
+ "ld1 { v22.b }[0], [x19]\n"
"11:" // Oddments: Initial loads: Bit 2: End
"usubl v31.8h, v31.8b, v9.8b\n"
"usubl v30.8h, v30.8b, v9.8b\n"
- "smlal v13.4s, v31.4h, v0.4h\n"
- "ldr x20, [x4, #0x50]\n"
+ "smlal v15.4s, v31.4h, v0.4h\n"
+ "ldr x19, [x20, #0x50]\n"
"usubl v29.8h, v29.8b, v9.8b\n"
- "smlal2 v19.4s, v31.8h, v0.8h\n"
- "smlal v20.4s, v30.4h, v0.4h\n"
- "smlal2 v10.4s, v30.8h, v0.8h\n"
- "smlal v8.4s, v29.4h, v0.4h\n"
+ "smlal2 v16.4s, v31.8h, v0.8h\n"
+ "smlal v17.4s, v30.4h, v0.4h\n"
+ "smlal2 v8.4s, v30.8h, v0.8h\n"
+ "smlal v10.4s, v29.4h, v0.4h\n"
"usubl v28.8h, v28.8b, v9.8b\n"
- "add x20, x20, x0\n"
+ "add x19, x19, x24\n"
"smlal2 v7.4s, v29.8h, v0.8h\n"
"usubl v27.8h, v27.8b, v9.8b\n"
- "smlal v17.4s, v28.4h, v0.4h\n"
- "smlal2 v21.4s, v28.8h, v0.8h\n"
- "smlal v13.4s, v30.4h, v1.4h\n"
+ "smlal v6.4s, v28.4h, v0.4h\n"
+ "smlal2 v5.4s, v28.8h, v0.8h\n"
+ "smlal v15.4s, v30.4h, v1.4h\n"
"usubl v23.8h, v23.8b, v9.8b\n"
- "smlal2 v19.4s, v30.8h, v1.8h\n"
- "smlal v20.4s, v27.4h, v1.4h\n"
+ "smlal2 v16.4s, v30.8h, v1.8h\n"
+ "smlal v17.4s, v27.4h, v1.4h\n"
"usubl v25.8h, v25.8b, v9.8b\n"
- "smlal2 v10.4s, v27.8h, v1.8h\n"
- "smlal v8.4s, v28.4h, v1.4h\n"
+ "smlal2 v8.4s, v27.8h, v1.8h\n"
+ "smlal v10.4s, v28.4h, v1.4h\n"
"usubl v24.8h, v24.8b, v9.8b\n"
"smlal2 v7.4s, v28.8h, v1.8h\n"
"usubl v26.8h, v26.8b, v9.8b\n"
- "smlal v17.4s, v23.4h, v1.4h\n"
+ "smlal v6.4s, v23.4h, v1.4h\n"
"usubl v22.8h, v22.8b, v9.8b\n"
- "smlal2 v21.4s, v23.8h, v1.8h\n"
- "smlal v13.4s, v27.4h, v2.4h\n"
- "smlal2 v19.4s, v27.8h, v2.8h\n"
- "smlal v20.4s, v25.4h, v2.4h\n"
- "smlal2 v10.4s, v25.8h, v2.8h\n"
- "smlal v8.4s, v23.4h, v2.4h\n"
+ "smlal2 v5.4s, v23.8h, v1.8h\n"
+ "smlal v15.4s, v27.4h, v2.4h\n"
+ "smlal2 v16.4s, v27.8h, v2.8h\n"
+ "smlal v17.4s, v25.4h, v2.4h\n"
+ "smlal2 v8.4s, v25.8h, v2.8h\n"
+ "smlal v10.4s, v23.4h, v2.4h\n"
"smlal2 v7.4s, v23.8h, v2.8h\n"
- "tbz x1, #2, 13f\n"
- "ld1 { v31.s }[0], [x20], #0x4\n"
- "tbz x1, #1, 12f\n"
- "ld1 { v31.h }[2], [x20], #0x2\n"
- "tbz x1, #0, 15f\n"
- "ld1 { v31.b }[6], [x20]\n"
+ "tbz x0, #2, 13f\n"
+ "ld1 { v31.s }[0], [x19], #0x4\n"
+ "tbz x0, #1, 12f\n"
+ "ld1 { v31.h }[2], [x19], #0x2\n"
+ "tbz x0, #0, 15f\n"
+ "ld1 { v31.b }[6], [x19]\n"
"b 15f\n"
"12:" // Oddments: Load (1, 3): Bit 2: Bit 1: Unset
- "tbz x1, #0, 15f\n"
- "ld1 { v31.b }[4], [x20]\n"
+ "tbz x0, #0, 15f\n"
+ "ld1 { v31.b }[4], [x19]\n"
"b 15f\n"
"13:" // Oddments: Load (1, 3): Bit 2: Unset
- "tbz x1, #1, 14f\n"
- "ld1 { v31.h }[0], [x20], #0x2\n"
- "tbz x1, #0, 15f\n"
- "ld1 { v31.b }[2], [x20]\n"
+ "tbz x0, #1, 14f\n"
+ "ld1 { v31.h }[0], [x19], #0x2\n"
+ "tbz x0, #0, 15f\n"
+ "ld1 { v31.b }[2], [x19]\n"
"b 15f\n"
"14:" // Oddments: Load (1, 3): Bit 2: Unset: Bit 1: Unset
- "tbz x1, #0, 15f\n"
- "ld1 { v31.b }[0], [x20]\n"
+ "tbz x0, #0, 15f\n"
+ "ld1 { v31.b }[0], [x19]\n"
"15:" // Oddments: Load (1, 3): Bit 2: End
"usubl v31.8h, v31.8b, v9.8b\n"
- "ldr x22, [x4, #0x58]\n"
- "smlal v17.4s, v31.4h, v2.4h\n"
- "smlal2 v21.4s, v31.8h, v2.8h\n"
- "smlal v13.4s, v25.4h, v3.4h\n"
- "smlal2 v19.4s, v25.8h, v3.8h\n"
- "add x22, x22, x0\n"
- "smlal v20.4s, v24.4h, v3.4h\n"
- "smlal2 v10.4s, v24.8h, v3.8h\n"
- "smlal v8.4s, v31.4h, v3.4h\n"
+ "ldr x15, [x20, #0x58]\n"
+ "smlal v6.4s, v31.4h, v2.4h\n"
+ "smlal2 v5.4s, v31.8h, v2.8h\n"
+ "smlal v15.4s, v25.4h, v3.4h\n"
+ "smlal2 v16.4s, v25.8h, v3.8h\n"
+ "add x15, x15, x24\n"
+ "smlal v17.4s, v24.4h, v3.4h\n"
+ "smlal2 v8.4s, v24.8h, v3.8h\n"
+ "smlal v10.4s, v31.4h, v3.4h\n"
"smlal2 v7.4s, v31.8h, v3.8h\n"
- "tbz x1, #2, 17f\n"
- "ld1 { v30.s }[0], [x22], #0x4\n"
- "tbz x1, #1, 16f\n"
- "ld1 { v30.h }[2], [x22], #0x2\n"
- "tbz x1, #0, 19f\n"
- "ld1 { v30.b }[6], [x22]\n"
+ "tbz x0, #2, 17f\n"
+ "ld1 { v30.s }[0], [x15], #0x4\n"
+ "tbz x0, #1, 16f\n"
+ "ld1 { v30.h }[2], [x15], #0x2\n"
+ "tbz x0, #0, 19f\n"
+ "ld1 { v30.b }[6], [x15]\n"
"b 19f\n"
"16:" // Oddments: Load (1, 4): Bit 2: Bit 1: Unset
- "tbz x1, #0, 19f\n"
- "ld1 { v30.b }[4], [x22]\n"
+ "tbz x0, #0, 19f\n"
+ "ld1 { v30.b }[4], [x15]\n"
"b 19f\n"
"17:" // Oddments: Load (1, 4): Bit 2: Unset
- "tbz x1, #1, 18f\n"
- "ld1 { v30.h }[0], [x22], #0x2\n"
- "tbz x1, #0, 19f\n"
- "ld1 { v30.b }[2], [x22]\n"
+ "tbz x0, #1, 18f\n"
+ "ld1 { v30.h }[0], [x15], #0x2\n"
+ "tbz x0, #0, 19f\n"
+ "ld1 { v30.b }[2], [x15]\n"
"b 19f\n"
"18:" // Oddments: Load (1, 4): Bit 2: Unset: Bit 1: Unset
- "tbz x1, #0, 19f\n"
- "ld1 { v30.b }[0], [x22]\n"
+ "tbz x0, #0, 19f\n"
+ "ld1 { v30.b }[0], [x15]\n"
"19:" // Oddments: Load (1, 4): Bit 2: End
"usubl v30.8h, v30.8b, v9.8b\n"
- "ldr x21, [x4, #0x60]\n"
- "smlal v17.4s, v30.4h, v3.4h\n"
- "smlal2 v21.4s, v30.8h, v3.8h\n"
- "smlal v13.4s, v24.4h, v4.4h\n"
- "smlal2 v19.4s, v24.8h, v4.8h\n"
- "add x21, x21, x0\n"
- "tbz x1, #2, 21f\n"
- "ld1 { v27.s }[0], [x21], #0x4\n"
- "tbz x1, #1, 20f\n"
- "ld1 { v27.h }[2], [x21], #0x2\n"
- "tbz x1, #0, 23f\n"
- "ld1 { v27.b }[6], [x21]\n"
+ "ldr x19, [x20, #0x60]\n"
+ "smlal v6.4s, v30.4h, v3.4h\n"
+ "smlal2 v5.4s, v30.8h, v3.8h\n"
+ "smlal v15.4s, v24.4h, v4.4h\n"
+ "smlal2 v16.4s, v24.8h, v4.8h\n"
+ "add x19, x19, x24\n"
+ "tbz x0, #2, 21f\n"
+ "ld1 { v27.s }[0], [x19], #0x4\n"
+ "tbz x0, #1, 20f\n"
+ "ld1 { v27.h }[2], [x19], #0x2\n"
+ "tbz x0, #0, 23f\n"
+ "ld1 { v27.b }[6], [x19]\n"
"b 23f\n"
"20:" // Oddments: Load (0, 5): Bit 2: Bit 1: Unset
- "tbz x1, #0, 23f\n"
- "ld1 { v27.b }[4], [x21]\n"
+ "tbz x0, #0, 23f\n"
+ "ld1 { v27.b }[4], [x19]\n"
"b 23f\n"
"21:" // Oddments: Load (0, 5): Bit 2: Unset
- "tbz x1, #1, 22f\n"
- "ld1 { v27.h }[0], [x21], #0x2\n"
- "tbz x1, #0, 23f\n"
- "ld1 { v27.b }[2], [x21]\n"
+ "tbz x0, #1, 22f\n"
+ "ld1 { v27.h }[0], [x19], #0x2\n"
+ "tbz x0, #0, 23f\n"
+ "ld1 { v27.b }[2], [x19]\n"
"b 23f\n"
"22:" // Oddments: Load (0, 5): Bit 2: Unset: Bit 1: Unset
- "tbz x1, #0, 23f\n"
- "ld1 { v27.b }[0], [x21]\n"
+ "tbz x0, #0, 23f\n"
+ "ld1 { v27.b }[0], [x19]\n"
"23:" // Oddments: Load (0, 5): Bit 2: End
- "ldr d0, [x3, #0x28]\n"
"usubl v27.8h, v27.8b, v9.8b\n"
- "smlal v20.4s, v27.4h, v4.4h\n"
- "smlal2 v10.4s, v27.8h, v4.8h\n"
- "smlal v8.4s, v30.4h, v4.4h\n"
+ "ldr d0, [x23, #0x28]\n"
+ "smlal v17.4s, v27.4h, v4.4h\n"
+ "smlal2 v8.4s, v27.8h, v4.8h\n"
+ "smlal v10.4s, v30.4h, v4.4h\n"
"smlal2 v7.4s, v30.8h, v4.8h\n"
- "ssubl v0.8h, v0.8b, v15.8b\n"
- "ldr x20, [x4, #0x68]\n"
- "smlal v17.4s, v26.4h, v4.4h\n"
- "smlal2 v21.4s, v26.8h, v4.8h\n"
- "add x20, x20, x0\n"
- "smlal v13.4s, v29.4h, v0.4h\n"
- "smlal2 v19.4s, v29.8h, v0.8h\n"
- "smlal v20.4s, v28.4h, v0.4h\n"
- "smlal2 v10.4s, v28.8h, v0.8h\n"
- "smlal v8.4s, v22.4h, v0.4h\n"
+ "ssubl v0.8h, v0.8b, v14.8b\n"
+ "ldr x27, [x20, #0x68]\n"
+ "smlal v6.4s, v26.4h, v4.4h\n"
+ "smlal2 v5.4s, v26.8h, v4.8h\n"
+ "add x27, x27, x24\n"
+ "smlal v15.4s, v29.4h, v0.4h\n"
+ "smlal2 v16.4s, v29.8h, v0.8h\n"
+ "smlal v17.4s, v28.4h, v0.4h\n"
+ "smlal2 v8.4s, v28.8h, v0.8h\n"
+ "smlal v10.4s, v22.4h, v0.4h\n"
"smlal2 v7.4s, v22.8h, v0.8h\n"
- "tbz x1, #2, 25f\n"
- "ld1 { v25.s }[0], [x20], #0x4\n"
- "tbz x1, #1, 24f\n"
- "ld1 { v25.h }[2], [x20], #0x2\n"
- "tbz x1, #0, 27f\n"
- "ld1 { v25.b }[6], [x20]\n"
+ "tbz x0, #2, 25f\n"
+ "ld1 { v25.s }[0], [x27], #0x4\n"
+ "tbz x0, #1, 24f\n"
+ "ld1 { v25.h }[2], [x27], #0x2\n"
+ "tbz x0, #0, 27f\n"
+ "ld1 { v25.b }[6], [x27]\n"
"b 27f\n"
"24:" // Oddments: Load (2, 1): Bit 2: Bit 1: Unset
- "tbz x1, #0, 27f\n"
- "ld1 { v25.b }[4], [x20]\n"
+ "tbz x0, #0, 27f\n"
+ "ld1 { v25.b }[4], [x27]\n"
"b 27f\n"
"25:" // Oddments: Load (2, 1): Bit 2: Unset
- "tbz x1, #1, 26f\n"
- "ld1 { v25.h }[0], [x20], #0x2\n"
- "tbz x1, #0, 27f\n"
- "ld1 { v25.b }[2], [x20]\n"
+ "tbz x0, #1, 26f\n"
+ "ld1 { v25.h }[0], [x27], #0x2\n"
+ "tbz x0, #0, 27f\n"
+ "ld1 { v25.b }[2], [x27]\n"
"b 27f\n"
"26:" // Oddments: Load (2, 1): Bit 2: Unset: Bit 1: Unset
- "tbz x1, #0, 27f\n"
- "ld1 { v25.b }[0], [x20]\n"
+ "tbz x0, #0, 27f\n"
+ "ld1 { v25.b }[0], [x27]\n"
"27:" // Oddments: Load (2, 1): Bit 2: End
- "ldr d1, [x3, #0x30]\n"
+ "ldr d1, [x23, #0x30]\n"
"usubl v25.8h, v25.8b, v9.8b\n"
- "ssubl v1.8h, v1.8b, v15.8b\n"
- "ldr x26, [x4, #0x70]\n"
- "smlal v17.4s, v25.4h, v0.4h\n"
- "smlal2 v21.4s, v25.8h, v0.8h\n"
- "add x26, x26, x0\n"
- "smlal v13.4s, v28.4h, v1.4h\n"
- "smlal2 v19.4s, v28.8h, v1.8h\n"
- "smlal v20.4s, v23.4h, v1.4h\n"
- "smlal2 v10.4s, v23.8h, v1.8h\n"
- "smlal v8.4s, v25.4h, v1.4h\n"
+ "ssubl v1.8h, v1.8b, v14.8b\n"
+ "ldr x5, [x20, #0x70]\n"
+ "smlal v6.4s, v25.4h, v0.4h\n"
+ "smlal2 v5.4s, v25.8h, v0.8h\n"
+ "add x5, x5, x24\n"
+ "smlal v15.4s, v28.4h, v1.4h\n"
+ "smlal2 v16.4s, v28.8h, v1.8h\n"
+ "smlal v17.4s, v23.4h, v1.4h\n"
+ "smlal2 v8.4s, v23.8h, v1.8h\n"
+ "smlal v10.4s, v25.4h, v1.4h\n"
"smlal2 v7.4s, v25.8h, v1.8h\n"
- "tbz x1, #2, 29f\n"
- "ld1 { v24.s }[0], [x26], #0x4\n"
- "tbz x1, #1, 28f\n"
- "ld1 { v24.h }[2], [x26], #0x2\n"
- "tbz x1, #0, 31f\n"
- "ld1 { v24.b }[6], [x26]\n"
+ "tbz x0, #2, 29f\n"
+ "ld1 { v24.s }[0], [x5], #0x4\n"
+ "tbz x0, #1, 28f\n"
+ "ld1 { v24.h }[2], [x5], #0x2\n"
+ "tbz x0, #0, 31f\n"
+ "ld1 { v24.b }[6], [x5]\n"
"b 31f\n"
"28:" // Oddments: Load (2, 2): Bit 2: Bit 1: Unset
- "tbz x1, #0, 31f\n"
- "ld1 { v24.b }[4], [x26]\n"
+ "tbz x0, #0, 31f\n"
+ "ld1 { v24.b }[4], [x5]\n"
"b 31f\n"
"29:" // Oddments: Load (2, 2): Bit 2: Unset
- "tbz x1, #1, 30f\n"
- "ld1 { v24.h }[0], [x26], #0x2\n"
- "tbz x1, #0, 31f\n"
- "ld1 { v24.b }[2], [x26]\n"
+ "tbz x0, #1, 30f\n"
+ "ld1 { v24.h }[0], [x5], #0x2\n"
+ "tbz x0, #0, 31f\n"
+ "ld1 { v24.b }[2], [x5]\n"
"b 31f\n"
"30:" // Oddments: Load (2, 2): Bit 2: Unset: Bit 1: Unset
- "tbz x1, #0, 31f\n"
- "ld1 { v24.b }[0], [x26]\n"
+ "tbz x0, #0, 31f\n"
+ "ld1 { v24.b }[0], [x5]\n"
"31:" // Oddments: Load (2, 2): Bit 2: End
- "ldr d2, [x3, #0x38]\n"
+ "ldr d2, [x23, #0x38]\n"
"usubl v24.8h, v24.8b, v9.8b\n"
- "ssubl v2.8h, v2.8b, v15.8b\n"
- "ldr x25, [x4, #0x78]\n"
- "smlal v17.4s, v24.4h, v1.4h\n"
- "smlal2 v21.4s, v24.8h, v1.8h\n"
- "add x25, x25, x0\n"
- "smlal v13.4s, v23.4h, v2.4h\n"
- "smlal2 v19.4s, v23.8h, v2.8h\n"
- "smlal v20.4s, v31.4h, v2.4h\n"
- "smlal2 v10.4s, v31.8h, v2.8h\n"
- "smlal v8.4s, v24.4h, v2.4h\n"
+ "ssubl v2.8h, v2.8b, v14.8b\n"
+ "ldr x11, [x20, #0x78]\n"
+ "smlal v6.4s, v24.4h, v1.4h\n"
+ "smlal2 v5.4s, v24.8h, v1.8h\n"
+ "add x11, x11, x24\n"
+ "smlal v15.4s, v23.4h, v2.4h\n"
+ "smlal2 v16.4s, v23.8h, v2.8h\n"
+ "smlal v17.4s, v31.4h, v2.4h\n"
+ "smlal2 v8.4s, v31.8h, v2.8h\n"
+ "smlal v10.4s, v24.4h, v2.4h\n"
"smlal2 v7.4s, v24.8h, v2.8h\n"
- "tbz x1, #2, 33f\n"
- "ld1 { v27.s }[0], [x25], #0x4\n"
- "tbz x1, #1, 32f\n"
- "ld1 { v27.h }[2], [x25], #0x2\n"
- "tbz x1, #0, 35f\n"
- "ld1 { v27.b }[6], [x25]\n"
+ "tbz x0, #2, 33f\n"
+ "ld1 { v27.s }[0], [x11], #0x4\n"
+ "tbz x0, #1, 32f\n"
+ "ld1 { v27.h }[2], [x11], #0x2\n"
+ "tbz x0, #0, 35f\n"
+ "ld1 { v27.b }[6], [x11]\n"
"b 35f\n"
"32:" // Oddments: Load (2, 3): Bit 2: Bit 1: Unset
- "tbz x1, #0, 35f\n"
- "ld1 { v27.b }[4], [x25]\n"
+ "tbz x0, #0, 35f\n"
+ "ld1 { v27.b }[4], [x11]\n"
"b 35f\n"
"33:" // Oddments: Load (2, 3): Bit 2: Unset
- "tbz x1, #1, 34f\n"
- "ld1 { v27.h }[0], [x25], #0x2\n"
- "tbz x1, #0, 35f\n"
- "ld1 { v27.b }[2], [x25]\n"
+ "tbz x0, #1, 34f\n"
+ "ld1 { v27.h }[0], [x11], #0x2\n"
+ "tbz x0, #0, 35f\n"
+ "ld1 { v27.b }[2], [x11]\n"
"b 35f\n"
"34:" // Oddments: Load (2, 3): Bit 2: Unset: Bit 1: Unset
- "tbz x1, #0, 35f\n"
- "ld1 { v27.b }[0], [x25]\n"
+ "tbz x0, #0, 35f\n"
+ "ld1 { v27.b }[0], [x11]\n"
"35:" // Oddments: Load (2, 3): Bit 2: End
- "ldr d3, [x3, #0x40]\n"
+ "ldr d3, [x23, #0x40]\n"
"usubl v27.8h, v27.8b, v9.8b\n"
- "ssubl v3.8h, v3.8b, v15.8b\n"
- "ldr x23, [x4, #0x80]\n"
- "smlal v17.4s, v27.4h, v2.4h\n"
- "smlal2 v21.4s, v27.8h, v2.8h\n"
- "add x23, x23, x0\n"
- "smlal v13.4s, v31.4h, v3.4h\n"
- "smlal2 v19.4s, v31.8h, v3.8h\n"
- "smlal v20.4s, v30.4h, v3.4h\n"
- "smlal2 v10.4s, v30.8h, v3.8h\n"
- "smlal v8.4s, v27.4h, v3.4h\n"
+ "ssubl v3.8h, v3.8b, v14.8b\n"
+ "ldr x12, [x20, #0x80]\n"
+ "smlal v6.4s, v27.4h, v2.4h\n"
+ "smlal2 v5.4s, v27.8h, v2.8h\n"
+ "add x12, x12, x24\n"
+ "smlal v15.4s, v31.4h, v3.4h\n"
+ "smlal2 v16.4s, v31.8h, v3.8h\n"
+ "smlal v17.4s, v30.4h, v3.4h\n"
+ "smlal2 v8.4s, v30.8h, v3.8h\n"
+ "smlal v10.4s, v27.4h, v3.4h\n"
"smlal2 v7.4s, v27.8h, v3.8h\n"
- "tbz x1, #2, 37f\n"
- "ld1 { v23.s }[0], [x23], #0x4\n"
- "tbz x1, #1, 36f\n"
- "ld1 { v23.h }[2], [x23], #0x2\n"
- "tbz x1, #0, 39f\n"
- "ld1 { v23.b }[6], [x23]\n"
+ "tbz x0, #2, 37f\n"
+ "ld1 { v23.s }[0], [x12], #0x4\n"
+ "tbz x0, #1, 36f\n"
+ "ld1 { v23.h }[2], [x12], #0x2\n"
+ "tbz x0, #0, 39f\n"
+ "ld1 { v23.b }[6], [x12]\n"
"b 39f\n"
"36:" // Oddments: Load (2, 4): Bit 2: Bit 1: Unset
- "tbz x1, #0, 39f\n"
- "ld1 { v23.b }[4], [x23]\n"
+ "tbz x0, #0, 39f\n"
+ "ld1 { v23.b }[4], [x12]\n"
"b 39f\n"
"37:" // Oddments: Load (2, 4): Bit 2: Unset
- "tbz x1, #1, 38f\n"
- "ld1 { v23.h }[0], [x23], #0x2\n"
- "tbz x1, #0, 39f\n"
- "ld1 { v23.b }[2], [x23]\n"
+ "tbz x0, #1, 38f\n"
+ "ld1 { v23.h }[0], [x12], #0x2\n"
+ "tbz x0, #0, 39f\n"
+ "ld1 { v23.b }[2], [x12]\n"
"b 39f\n"
"38:" // Oddments: Load (2, 4): Bit 2: Unset: Bit 1: Unset
- "tbz x1, #0, 39f\n"
- "ld1 { v23.b }[0], [x23]\n"
+ "tbz x0, #0, 39f\n"
+ "ld1 { v23.b }[0], [x12]\n"
"39:" // Oddments: Load (2, 4): Bit 2: End
- "ldr d4, [x3, #0x48]\n"
+ "ldr d4, [x23, #0x48]\n"
"usubl v23.8h, v23.8b, v9.8b\n"
- "ssubl v4.8h, v4.8b, v15.8b\n"
- "ldr x24, [x4, #0x88]\n"
- "smlal v17.4s, v23.4h, v3.4h\n"
- "smlal2 v21.4s, v23.8h, v3.8h\n"
- "add x24, x24, x0\n"
- "smlal v13.4s, v30.4h, v4.4h\n"
- "smlal2 v19.4s, v30.8h, v4.8h\n"
- "smlal v20.4s, v26.4h, v4.4h\n"
- "smlal2 v10.4s, v26.8h, v4.8h\n"
- "smlal v8.4s, v23.4h, v4.4h\n"
+ "ssubl v4.8h, v4.8b, v14.8b\n"
+ "ldr x26, [x20, #0x88]\n"
+ "smlal v6.4s, v23.4h, v3.4h\n"
+ "smlal2 v5.4s, v23.8h, v3.8h\n"
+ "add x26, x26, x24\n"
+ "smlal v15.4s, v30.4h, v4.4h\n"
+ "smlal2 v16.4s, v30.8h, v4.8h\n"
+ "smlal v17.4s, v26.4h, v4.4h\n"
+ "smlal2 v8.4s, v26.8h, v4.8h\n"
+ "smlal v10.4s, v23.4h, v4.4h\n"
"smlal2 v7.4s, v23.8h, v4.8h\n"
- "tbz x1, #2, 41f\n"
- "ld1 { v28.s }[0], [x24], #0x4\n"
- "tbz x1, #1, 40f\n"
- "ld1 { v28.h }[2], [x24], #0x2\n"
- "tbz x1, #0, 43f\n"
- "ld1 { v28.b }[6], [x24]\n"
+ "tbz x0, #2, 41f\n"
+ "ld1 { v28.s }[0], [x26], #0x4\n"
+ "tbz x0, #1, 40f\n"
+ "ld1 { v28.h }[2], [x26], #0x2\n"
+ "tbz x0, #0, 43f\n"
+ "ld1 { v28.b }[6], [x26]\n"
"b 43f\n"
"40:" // Oddments: Load (2, 5): Bit 2: Bit 1: Unset
- "tbz x1, #0, 43f\n"
- "ld1 { v28.b }[4], [x24]\n"
+ "tbz x0, #0, 43f\n"
+ "ld1 { v28.b }[4], [x26]\n"
"b 43f\n"
"41:" // Oddments: Load (2, 5): Bit 2: Unset
- "tbz x1, #1, 42f\n"
- "ld1 { v28.h }[0], [x24], #0x2\n"
- "tbz x1, #0, 43f\n"
- "ld1 { v28.b }[2], [x24]\n"
+ "tbz x0, #1, 42f\n"
+ "ld1 { v28.h }[0], [x26], #0x2\n"
+ "tbz x0, #0, 43f\n"
+ "ld1 { v28.b }[2], [x26]\n"
"b 43f\n"
"42:" // Oddments: Load (2, 5): Bit 2: Unset: Bit 1: Unset
- "tbz x1, #0, 43f\n"
- "ld1 { v28.b }[0], [x24]\n"
+ "tbz x0, #0, 43f\n"
+ "ld1 { v28.b }[0], [x26]\n"
"43:" // Oddments: Load (2, 5): Bit 2: End
- "ldr d0, [x3, #0x50]\n"
+ "ldr d0, [x23, #0x50]\n"
"usubl v28.8h, v28.8b, v9.8b\n"
- "ssubl v0.8h, v0.8b, v15.8b\n"
- "ldr x15, [x4, #0x90]\n"
- "smlal v17.4s, v28.4h, v4.4h\n"
- "smlal2 v21.4s, v28.8h, v4.8h\n"
- "add x15, x15, x0\n"
- "smlal v13.4s, v22.4h, v0.4h\n"
- "smlal2 v19.4s, v22.8h, v0.8h\n"
- "smlal v20.4s, v25.4h, v0.4h\n"
- "smlal2 v10.4s, v25.8h, v0.8h\n"
- "tbz x1, #2, 45f\n"
- "ld1 { v31.s }[0], [x15], #0x4\n"
- "tbz x1, #1, 44f\n"
- "ld1 { v31.h }[2], [x15], #0x2\n"
- "tbz x1, #0, 47f\n"
- "ld1 { v31.b }[6], [x15]\n"
+ "ssubl v0.8h, v0.8b, v14.8b\n"
+ "ldr x14, [x20, #0x90]\n"
+ "smlal v6.4s, v28.4h, v4.4h\n"
+ "smlal2 v5.4s, v28.8h, v4.8h\n"
+ "add x14, x14, x24\n"
+ "smlal v15.4s, v22.4h, v0.4h\n"
+ "smlal2 v16.4s, v22.8h, v0.8h\n"
+ "smlal v17.4s, v25.4h, v0.4h\n"
+ "smlal2 v8.4s, v25.8h, v0.8h\n"
+ "tbz x0, #2, 45f\n"
+ "ld1 { v31.s }[0], [x14], #0x4\n"
+ "tbz x0, #1, 44f\n"
+ "ld1 { v31.h }[2], [x14], #0x2\n"
+ "tbz x0, #0, 47f\n"
+ "ld1 { v31.b }[6], [x14]\n"
"b 47f\n"
"44:" // Oddments: Load (3, 0): Bit 2: Bit 1: Unset
- "tbz x1, #0, 47f\n"
- "ld1 { v31.b }[4], [x15]\n"
+ "tbz x0, #0, 47f\n"
+ "ld1 { v31.b }[4], [x14]\n"
"b 47f\n"
"45:" // Oddments: Load (3, 0): Bit 2: Unset
- "tbz x1, #1, 46f\n"
- "ld1 { v31.h }[0], [x15], #0x2\n"
- "tbz x1, #0, 47f\n"
- "ld1 { v31.b }[2], [x15]\n"
+ "tbz x0, #1, 46f\n"
+ "ld1 { v31.h }[0], [x14], #0x2\n"
+ "tbz x0, #0, 47f\n"
+ "ld1 { v31.b }[2], [x14]\n"
"b 47f\n"
"46:" // Oddments: Load (3, 0): Bit 2: Unset: Bit 1: Unset
- "tbz x1, #0, 47f\n"
- "ld1 { v31.b }[0], [x15]\n"
+ "tbz x0, #0, 47f\n"
+ "ld1 { v31.b }[0], [x14]\n"
"47:" // Oddments: Load (3, 0): Bit 2: End
"usubl v31.8h, v31.8b, v9.8b\n"
- "ldr x21, [x4, #0x98]\n"
- "smlal v8.4s, v31.4h, v0.4h\n"
+ "ldr x15, [x20, #0x98]\n"
+ "smlal v10.4s, v31.4h, v0.4h\n"
"smlal2 v7.4s, v31.8h, v0.8h\n"
- "add x21, x21, x0\n"
- "tbz x1, #2, 49f\n"
- "ld1 { v30.s }[0], [x21], #0x4\n"
- "tbz x1, #1, 48f\n"
- "ld1 { v30.h }[2], [x21], #0x2\n"
- "tbz x1, #0, 51f\n"
- "ld1 { v30.b }[6], [x21]\n"
+ "add x15, x15, x24\n"
+ "tbz x0, #2, 49f\n"
+ "ld1 { v30.s }[0], [x15], #0x4\n"
+ "tbz x0, #1, 48f\n"
+ "ld1 { v30.h }[2], [x15], #0x2\n"
+ "tbz x0, #0, 51f\n"
+ "ld1 { v30.b }[6], [x15]\n"
"b 51f\n"
"48:" // Oddments: Load (3, 1): Bit 2: Bit 1: Unset
- "tbz x1, #0, 51f\n"
- "ld1 { v30.b }[4], [x21]\n"
+ "tbz x0, #0, 51f\n"
+ "ld1 { v30.b }[4], [x15]\n"
"b 51f\n"
"49:" // Oddments: Load (3, 1): Bit 2: Unset
- "tbz x1, #1, 50f\n"
- "ld1 { v30.h }[0], [x21], #0x2\n"
- "tbz x1, #0, 51f\n"
- "ld1 { v30.b }[2], [x21]\n"
+ "tbz x0, #1, 50f\n"
+ "ld1 { v30.h }[0], [x15], #0x2\n"
+ "tbz x0, #0, 51f\n"
+ "ld1 { v30.b }[2], [x15]\n"
"b 51f\n"
"50:" // Oddments: Load (3, 1): Bit 2: Unset: Bit 1: Unset
- "tbz x1, #0, 51f\n"
- "ld1 { v30.b }[0], [x21]\n"
+ "tbz x0, #0, 51f\n"
+ "ld1 { v30.b }[0], [x15]\n"
"51:" // Oddments: Load (3, 1): Bit 2: End
- "ldr d1, [x3, #0x58]\n"
+ "ldr d1, [x23, #0x58]\n"
"usubl v30.8h, v30.8b, v9.8b\n"
- "ssubl v1.8h, v1.8b, v15.8b\n"
- "ldr x14, [x4, #0xa0]\n"
- "smlal v17.4s, v30.4h, v0.4h\n"
- "smlal2 v21.4s, v30.8h, v0.8h\n"
- "add x14, x14, x0\n"
- "smlal v13.4s, v25.4h, v1.4h\n"
- "smlal2 v19.4s, v25.8h, v1.8h\n"
- "smlal v20.4s, v24.4h, v1.4h\n"
- "smlal2 v10.4s, v24.8h, v1.8h\n"
- "smlal v8.4s, v30.4h, v1.4h\n"
+ "ssubl v1.8h, v1.8b, v14.8b\n"
+ "ldr x21, [x20, #0xa0]\n"
+ "smlal v6.4s, v30.4h, v0.4h\n"
+ "smlal2 v5.4s, v30.8h, v0.8h\n"
+ "add x21, x21, x24\n"
+ "smlal v15.4s, v25.4h, v1.4h\n"
+ "smlal2 v16.4s, v25.8h, v1.8h\n"
+ "smlal v17.4s, v24.4h, v1.4h\n"
+ "smlal2 v8.4s, v24.8h, v1.8h\n"
+ "smlal v10.4s, v30.4h, v1.4h\n"
"smlal2 v7.4s, v30.8h, v1.8h\n"
- "tbz x1, #2, 53f\n"
- "ld1 { v26.s }[0], [x14], #0x4\n"
- "tbz x1, #1, 52f\n"
- "ld1 { v26.h }[2], [x14], #0x2\n"
- "tbz x1, #0, 55f\n"
- "ld1 { v26.b }[6], [x14]\n"
+ "tbz x0, #2, 53f\n"
+ "ld1 { v26.s }[0], [x21], #0x4\n"
+ "tbz x0, #1, 52f\n"
+ "ld1 { v26.h }[2], [x21], #0x2\n"
+ "tbz x0, #0, 55f\n"
+ "ld1 { v26.b }[6], [x21]\n"
"b 55f\n"
"52:" // Oddments: Load (3, 2): Bit 2: Bit 1: Unset
- "tbz x1, #0, 55f\n"
- "ld1 { v26.b }[4], [x14]\n"
+ "tbz x0, #0, 55f\n"
+ "ld1 { v26.b }[4], [x21]\n"
"b 55f\n"
"53:" // Oddments: Load (3, 2): Bit 2: Unset
- "tbz x1, #1, 54f\n"
- "ld1 { v26.h }[0], [x14], #0x2\n"
- "tbz x1, #0, 55f\n"
- "ld1 { v26.b }[2], [x14]\n"
+ "tbz x0, #1, 54f\n"
+ "ld1 { v26.h }[0], [x21], #0x2\n"
+ "tbz x0, #0, 55f\n"
+ "ld1 { v26.b }[2], [x21]\n"
"b 55f\n"
"54:" // Oddments: Load (3, 2): Bit 2: Unset: Bit 1: Unset
- "tbz x1, #0, 55f\n"
- "ld1 { v26.b }[0], [x14]\n"
+ "tbz x0, #0, 55f\n"
+ "ld1 { v26.b }[0], [x21]\n"
"55:" // Oddments: Load (3, 2): Bit 2: End
- "ldr d2, [x3, #0x60]\n"
+ "ldr d2, [x23, #0x60]\n"
"usubl v26.8h, v26.8b, v9.8b\n"
- "ssubl v2.8h, v2.8b, v15.8b\n"
- "ldr x13, [x4, #0xa8]\n"
- "smlal v17.4s, v26.4h, v1.4h\n"
- "smlal2 v21.4s, v26.8h, v1.8h\n"
- "add x13, x13, x0\n"
- "smlal v13.4s, v24.4h, v2.4h\n"
- "smlal2 v19.4s, v24.8h, v2.8h\n"
- "smlal v20.4s, v27.4h, v2.4h\n"
- "smlal2 v10.4s, v27.8h, v2.8h\n"
- "smlal v8.4s, v26.4h, v2.4h\n"
+ "ssubl v2.8h, v2.8b, v14.8b\n"
+ "ldr x2, [x20, #0xa8]\n"
+ "smlal v6.4s, v26.4h, v1.4h\n"
+ "smlal2 v5.4s, v26.8h, v1.8h\n"
+ "add x2, x2, x24\n"
+ "smlal v15.4s, v24.4h, v2.4h\n"
+ "smlal2 v16.4s, v24.8h, v2.8h\n"
+ "smlal v17.4s, v27.4h, v2.4h\n"
+ "smlal2 v8.4s, v27.8h, v2.8h\n"
+ "smlal v10.4s, v26.4h, v2.4h\n"
"smlal2 v7.4s, v26.8h, v2.8h\n"
- "tbz x1, #2, 57f\n"
- "ld1 { v25.s }[0], [x13], #0x4\n"
- "tbz x1, #1, 56f\n"
- "ld1 { v25.h }[2], [x13], #0x2\n"
- "tbz x1, #0, 59f\n"
- "ld1 { v25.b }[6], [x13]\n"
+ "tbz x0, #2, 57f\n"
+ "ld1 { v25.s }[0], [x2], #0x4\n"
+ "tbz x0, #1, 56f\n"
+ "ld1 { v25.h }[2], [x2], #0x2\n"
+ "tbz x0, #0, 59f\n"
+ "ld1 { v25.b }[6], [x2]\n"
"b 59f\n"
"56:" // Oddments: Load (3, 3): Bit 2: Bit 1: Unset
- "tbz x1, #0, 59f\n"
- "ld1 { v25.b }[4], [x13]\n"
+ "tbz x0, #0, 59f\n"
+ "ld1 { v25.b }[4], [x2]\n"
"b 59f\n"
"57:" // Oddments: Load (3, 3): Bit 2: Unset
- "tbz x1, #1, 58f\n"
- "ld1 { v25.h }[0], [x13], #0x2\n"
- "tbz x1, #0, 59f\n"
- "ld1 { v25.b }[2], [x13]\n"
+ "tbz x0, #1, 58f\n"
+ "ld1 { v25.h }[0], [x2], #0x2\n"
+ "tbz x0, #0, 59f\n"
+ "ld1 { v25.b }[2], [x2]\n"
"b 59f\n"
"58:" // Oddments: Load (3, 3): Bit 2: Unset: Bit 1: Unset
- "tbz x1, #0, 59f\n"
- "ld1 { v25.b }[0], [x13]\n"
+ "tbz x0, #0, 59f\n"
+ "ld1 { v25.b }[0], [x2]\n"
"59:" // Oddments: Load (3, 3): Bit 2: End
- "ldr d3, [x3, #0x68]\n"
+ "ldr d3, [x23, #0x68]\n"
"usubl v25.8h, v25.8b, v9.8b\n"
- "ssubl v3.8h, v3.8b, v15.8b\n"
- "ldr x12, [x4, #0xb0]\n"
- "smlal v17.4s, v25.4h, v2.4h\n"
- "smlal2 v21.4s, v25.8h, v2.8h\n"
- "add x12, x12, x0\n"
- "smlal v13.4s, v27.4h, v3.4h\n"
- "smlal2 v19.4s, v27.8h, v3.8h\n"
- "smlal v20.4s, v23.4h, v3.4h\n"
- "smlal2 v10.4s, v23.8h, v3.8h\n"
- "smlal v8.4s, v25.4h, v3.4h\n"
+ "ssubl v3.8h, v3.8b, v14.8b\n"
+ "ldr x13, [x20, #0xb0]\n"
+ "smlal v6.4s, v25.4h, v2.4h\n"
+ "smlal2 v5.4s, v25.8h, v2.8h\n"
+ "add x13, x13, x24\n"
+ "smlal v15.4s, v27.4h, v3.4h\n"
+ "smlal2 v16.4s, v27.8h, v3.8h\n"
+ "smlal v17.4s, v23.4h, v3.4h\n"
+ "smlal2 v8.4s, v23.8h, v3.8h\n"
+ "smlal v10.4s, v25.4h, v3.4h\n"
"smlal2 v7.4s, v25.8h, v3.8h\n"
- "tbz x1, #2, 61f\n"
- "ld1 { v24.s }[0], [x12], #0x4\n"
- "tbz x1, #1, 60f\n"
- "ld1 { v24.h }[2], [x12], #0x2\n"
- "tbz x1, #0, 63f\n"
- "ld1 { v24.b }[6], [x12]\n"
+ "tbz x0, #2, 61f\n"
+ "ld1 { v24.s }[0], [x13], #0x4\n"
+ "tbz x0, #1, 60f\n"
+ "ld1 { v24.h }[2], [x13], #0x2\n"
+ "tbz x0, #0, 63f\n"
+ "ld1 { v24.b }[6], [x13]\n"
"b 63f\n"
"60:" // Oddments: Load (3, 4): Bit 2: Bit 1: Unset
- "tbz x1, #0, 63f\n"
- "ld1 { v24.b }[4], [x12]\n"
+ "tbz x0, #0, 63f\n"
+ "ld1 { v24.b }[4], [x13]\n"
"b 63f\n"
"61:" // Oddments: Load (3, 4): Bit 2: Unset
- "tbz x1, #1, 62f\n"
- "ld1 { v24.h }[0], [x12], #0x2\n"
- "tbz x1, #0, 63f\n"
- "ld1 { v24.b }[2], [x12]\n"
+ "tbz x0, #1, 62f\n"
+ "ld1 { v24.h }[0], [x13], #0x2\n"
+ "tbz x0, #0, 63f\n"
+ "ld1 { v24.b }[2], [x13]\n"
"b 63f\n"
"62:" // Oddments: Load (3, 4): Bit 2: Unset: Bit 1: Unset
- "tbz x1, #0, 63f\n"
- "ld1 { v24.b }[0], [x12]\n"
+ "tbz x0, #0, 63f\n"
+ "ld1 { v24.b }[0], [x13]\n"
"63:" // Oddments: Load (3, 4): Bit 2: End
- "ldr d4, [x3, #0x70]\n"
+ "ldr d4, [x23, #0x70]\n"
"usubl v24.8h, v24.8b, v9.8b\n"
- "ssubl v4.8h, v4.8b, v15.8b\n"
- "ldr x20, [x4, #0xb8]\n"
- "smlal v17.4s, v24.4h, v3.4h\n"
- "smlal2 v21.4s, v24.8h, v3.8h\n"
- "add x20, x20, x0\n"
- "smlal v13.4s, v23.4h, v4.4h\n"
- "smlal2 v19.4s, v23.8h, v4.8h\n"
- "smlal v20.4s, v28.4h, v4.4h\n"
- "smlal2 v10.4s, v28.8h, v4.8h\n"
- "smlal v8.4s, v24.4h, v4.4h\n"
+ "ssubl v4.8h, v4.8b, v14.8b\n"
+ "ldr x9, [x20, #0xb8]\n"
+ "smlal v6.4s, v24.4h, v3.4h\n"
+ "smlal2 v5.4s, v24.8h, v3.8h\n"
+ "add x9, x9, x24\n"
+ "smlal v15.4s, v23.4h, v4.4h\n"
+ "smlal2 v16.4s, v23.8h, v4.8h\n"
+ "smlal v17.4s, v28.4h, v4.4h\n"
+ "smlal2 v8.4s, v28.8h, v4.8h\n"
+ "smlal v10.4s, v24.4h, v4.4h\n"
"smlal2 v7.4s, v24.8h, v4.8h\n"
- "tbz x1, #2, 65f\n"
- "ld1 { v22.s }[0], [x20], #0x4\n"
- "tbz x1, #1, 64f\n"
- "ld1 { v22.h }[2], [x20], #0x2\n"
- "tbz x1, #0, 67f\n"
- "ld1 { v22.b }[6], [x20]\n"
+ "tbz x0, #2, 65f\n"
+ "ld1 { v22.s }[0], [x9], #0x4\n"
+ "tbz x0, #1, 64f\n"
+ "ld1 { v22.h }[2], [x9], #0x2\n"
+ "tbz x0, #0, 67f\n"
+ "ld1 { v22.b }[6], [x9]\n"
"b 67f\n"
"64:" // Oddments: Load (3, 5): Bit 2: Bit 1: Unset
- "tbz x1, #0, 67f\n"
- "ld1 { v22.b }[4], [x20]\n"
+ "tbz x0, #0, 67f\n"
+ "ld1 { v22.b }[4], [x9]\n"
"b 67f\n"
"65:" // Oddments: Load (3, 5): Bit 2: Unset
- "tbz x1, #1, 66f\n"
- "ld1 { v22.h }[0], [x20], #0x2\n"
- "tbz x1, #0, 67f\n"
- "ld1 { v22.b }[2], [x20]\n"
+ "tbz x0, #1, 66f\n"
+ "ld1 { v22.h }[0], [x9], #0x2\n"
+ "tbz x0, #0, 67f\n"
+ "ld1 { v22.b }[2], [x9]\n"
"b 67f\n"
"66:" // Oddments: Load (3, 5): Bit 2: Unset: Bit 1: Unset
- "tbz x1, #0, 67f\n"
- "ld1 { v22.b }[0], [x20]\n"
+ "tbz x0, #0, 67f\n"
+ "ld1 { v22.b }[0], [x9]\n"
"67:" // Oddments: Load (3, 5): Bit 2: End
- "ldr d0, [x3, #0x78]\n"
+ "ldr d0, [x23, #0x78]\n"
"usubl v22.8h, v22.8b, v9.8b\n"
- "ssubl v0.8h, v0.8b, v15.8b\n"
- "ldr x11, [x4, #0xc0]\n"
- "smlal v17.4s, v22.4h, v4.4h\n"
- "smlal2 v21.4s, v22.8h, v4.8h\n"
- "add x11, x11, x0\n"
- "smlal v13.4s, v31.4h, v0.4h\n"
- "smlal2 v19.4s, v31.8h, v0.8h\n"
- "smlal v20.4s, v30.4h, v0.4h\n"
- "smlal2 v10.4s, v30.8h, v0.8h\n"
- "tbz x1, #2, 69f\n"
- "ld1 { v27.s }[0], [x11], #0x4\n"
- "tbz x1, #1, 68f\n"
- "ld1 { v27.h }[2], [x11], #0x2\n"
- "tbz x1, #0, 71f\n"
- "ld1 { v27.b }[6], [x11]\n"
+ "ssubl v0.8h, v0.8b, v14.8b\n"
+ "ldr x19, [x20, #0xc0]\n"
+ "smlal v6.4s, v22.4h, v4.4h\n"
+ "smlal2 v5.4s, v22.8h, v4.8h\n"
+ "add x19, x19, x24\n"
+ "smlal v15.4s, v31.4h, v0.4h\n"
+ "smlal2 v16.4s, v31.8h, v0.8h\n"
+ "smlal v17.4s, v30.4h, v0.4h\n"
+ "smlal2 v8.4s, v30.8h, v0.8h\n"
+ "tbz x0, #2, 69f\n"
+ "ld1 { v27.s }[0], [x19], #0x4\n"
+ "tbz x0, #1, 68f\n"
+ "ld1 { v27.h }[2], [x19], #0x2\n"
+ "tbz x0, #0, 71f\n"
+ "ld1 { v27.b }[6], [x19]\n"
"b 71f\n"
"68:" // Oddments: Load (4, 0): Bit 2: Bit 1: Unset
- "tbz x1, #0, 71f\n"
- "ld1 { v27.b }[4], [x11]\n"
+ "tbz x0, #0, 71f\n"
+ "ld1 { v27.b }[4], [x19]\n"
"b 71f\n"
"69:" // Oddments: Load (4, 0): Bit 2: Unset
- "tbz x1, #1, 70f\n"
- "ld1 { v27.h }[0], [x11], #0x2\n"
- "tbz x1, #0, 71f\n"
- "ld1 { v27.b }[2], [x11]\n"
+ "tbz x0, #1, 70f\n"
+ "ld1 { v27.h }[0], [x19], #0x2\n"
+ "tbz x0, #0, 71f\n"
+ "ld1 { v27.b }[2], [x19]\n"
"b 71f\n"
"70:" // Oddments: Load (4, 0): Bit 2: Unset: Bit 1: Unset
- "tbz x1, #0, 71f\n"
- "ld1 { v27.b }[0], [x11]\n"
+ "tbz x0, #0, 71f\n"
+ "ld1 { v27.b }[0], [x19]\n"
"71:" // Oddments: Load (4, 0): Bit 2: End
"usubl v27.8h, v27.8b, v9.8b\n"
- "ldr x22, [x4, #0xc8]\n"
- "smlal v8.4s, v27.4h, v0.4h\n"
+ "ldr x28, [x20, #0xc8]\n"
+ "smlal v10.4s, v27.4h, v0.4h\n"
"smlal2 v7.4s, v27.8h, v0.8h\n"
- "add x22, x22, x0\n"
- "tbz x1, #2, 73f\n"
- "ld1 { v23.s }[0], [x22], #0x4\n"
- "tbz x1, #1, 72f\n"
- "ld1 { v23.h }[2], [x22], #0x2\n"
- "tbz x1, #0, 75f\n"
- "ld1 { v23.b }[6], [x22]\n"
+ "add x28, x28, x24\n"
+ "tbz x0, #2, 73f\n"
+ "ld1 { v23.s }[0], [x28], #0x4\n"
+ "tbz x0, #1, 72f\n"
+ "ld1 { v23.h }[2], [x28], #0x2\n"
+ "tbz x0, #0, 75f\n"
+ "ld1 { v23.b }[6], [x28]\n"
"b 75f\n"
"72:" // Oddments: Load (4, 1): Bit 2: Bit 1: Unset
- "tbz x1, #0, 75f\n"
- "ld1 { v23.b }[4], [x22]\n"
+ "tbz x0, #0, 75f\n"
+ "ld1 { v23.b }[4], [x28]\n"
"b 75f\n"
"73:" // Oddments: Load (4, 1): Bit 2: Unset
- "tbz x1, #1, 74f\n"
- "ld1 { v23.h }[0], [x22], #0x2\n"
- "tbz x1, #0, 75f\n"
- "ld1 { v23.b }[2], [x22]\n"
+ "tbz x0, #1, 74f\n"
+ "ld1 { v23.h }[0], [x28], #0x2\n"
+ "tbz x0, #0, 75f\n"
+ "ld1 { v23.b }[2], [x28]\n"
"b 75f\n"
"74:" // Oddments: Load (4, 1): Bit 2: Unset: Bit 1: Unset
- "tbz x1, #0, 75f\n"
- "ld1 { v23.b }[0], [x22]\n"
+ "tbz x0, #0, 75f\n"
+ "ld1 { v23.b }[0], [x28]\n"
"75:" // Oddments: Load (4, 1): Bit 2: End
- "ldr d1, [x3, #0x80]\n"
+ "ldr d1, [x23, #0x80]\n"
"usubl v23.8h, v23.8b, v9.8b\n"
- "ssubl v1.8h, v1.8b, v15.8b\n"
- "ldr x9, [x4, #0xd0]\n"
- "smlal v17.4s, v23.4h, v0.4h\n"
- "smlal2 v21.4s, v23.8h, v0.8h\n"
- "add x9, x9, x0\n"
- "smlal v13.4s, v30.4h, v1.4h\n"
- "smlal2 v19.4s, v30.8h, v1.8h\n"
- "smlal v20.4s, v26.4h, v1.4h\n"
- "smlal2 v10.4s, v26.8h, v1.8h\n"
- "smlal v8.4s, v23.4h, v1.4h\n"
+ "ssubl v1.8h, v1.8b, v14.8b\n"
+ "ldr x6, [x20, #0xd0]\n"
+ "smlal v6.4s, v23.4h, v0.4h\n"
+ "smlal2 v5.4s, v23.8h, v0.8h\n"
+ "add x6, x6, x24\n"
+ "smlal v15.4s, v30.4h, v1.4h\n"
+ "smlal2 v16.4s, v30.8h, v1.8h\n"
+ "smlal v17.4s, v26.4h, v1.4h\n"
+ "smlal2 v8.4s, v26.8h, v1.8h\n"
+ "smlal v10.4s, v23.4h, v1.4h\n"
"smlal2 v7.4s, v23.8h, v1.8h\n"
- "tbz x1, #2, 77f\n"
- "ld1 { v31.s }[0], [x9], #0x4\n"
- "tbz x1, #1, 76f\n"
- "ld1 { v31.h }[2], [x9], #0x2\n"
- "tbz x1, #0, 79f\n"
- "ld1 { v31.b }[6], [x9]\n"
+ "tbz x0, #2, 77f\n"
+ "ld1 { v31.s }[0], [x6], #0x4\n"
+ "tbz x0, #1, 76f\n"
+ "ld1 { v31.h }[2], [x6], #0x2\n"
+ "tbz x0, #0, 79f\n"
+ "ld1 { v31.b }[6], [x6]\n"
"b 79f\n"
"76:" // Oddments: Load (4, 2): Bit 2: Bit 1: Unset
- "tbz x1, #0, 79f\n"
- "ld1 { v31.b }[4], [x9]\n"
+ "tbz x0, #0, 79f\n"
+ "ld1 { v31.b }[4], [x6]\n"
"b 79f\n"
"77:" // Oddments: Load (4, 2): Bit 2: Unset
- "tbz x1, #1, 78f\n"
- "ld1 { v31.h }[0], [x9], #0x2\n"
- "tbz x1, #0, 79f\n"
- "ld1 { v31.b }[2], [x9]\n"
+ "tbz x0, #1, 78f\n"
+ "ld1 { v31.h }[0], [x6], #0x2\n"
+ "tbz x0, #0, 79f\n"
+ "ld1 { v31.b }[2], [x6]\n"
"b 79f\n"
"78:" // Oddments: Load (4, 2): Bit 2: Unset: Bit 1: Unset
- "tbz x1, #0, 79f\n"
- "ld1 { v31.b }[0], [x9]\n"
+ "tbz x0, #0, 79f\n"
+ "ld1 { v31.b }[0], [x6]\n"
"79:" // Oddments: Load (4, 2): Bit 2: End
- "ldr d2, [x3, #0x88]\n"
+ "ldr d2, [x23, #0x88]\n"
"usubl v31.8h, v31.8b, v9.8b\n"
- "ssubl v2.8h, v2.8b, v15.8b\n"
- "ldr x28, [x4, #0xd8]\n"
- "smlal v17.4s, v31.4h, v1.4h\n"
- "smlal2 v21.4s, v31.8h, v1.8h\n"
- "add x28, x28, x0\n"
- "smlal v13.4s, v26.4h, v2.4h\n"
- "smlal2 v19.4s, v26.8h, v2.8h\n"
- "smlal v20.4s, v25.4h, v2.4h\n"
- "smlal2 v10.4s, v25.8h, v2.8h\n"
- "smlal v8.4s, v31.4h, v2.4h\n"
+ "ssubl v2.8h, v2.8b, v14.8b\n"
+ "ldr x27, [x20, #0xd8]\n"
+ "smlal v6.4s, v31.4h, v1.4h\n"
+ "smlal2 v5.4s, v31.8h, v1.8h\n"
+ "add x27, x27, x24\n"
+ "smlal v15.4s, v26.4h, v2.4h\n"
+ "smlal2 v16.4s, v26.8h, v2.8h\n"
+ "smlal v17.4s, v25.4h, v2.4h\n"
+ "smlal2 v8.4s, v25.8h, v2.8h\n"
+ "smlal v10.4s, v31.4h, v2.4h\n"
"smlal2 v7.4s, v31.8h, v2.8h\n"
- "tbz x1, #2, 81f\n"
- "ld1 { v30.s }[0], [x28], #0x4\n"
- "tbz x1, #1, 80f\n"
- "ld1 { v30.h }[2], [x28], #0x2\n"
- "tbz x1, #0, 83f\n"
- "ld1 { v30.b }[6], [x28]\n"
+ "tbz x0, #2, 81f\n"
+ "ld1 { v30.s }[0], [x27], #0x4\n"
+ "tbz x0, #1, 80f\n"
+ "ld1 { v30.h }[2], [x27], #0x2\n"
+ "tbz x0, #0, 83f\n"
+ "ld1 { v30.b }[6], [x27]\n"
"b 83f\n"
"80:" // Oddments: Load (4, 3): Bit 2: Bit 1: Unset
- "tbz x1, #0, 83f\n"
- "ld1 { v30.b }[4], [x28]\n"
+ "tbz x0, #0, 83f\n"
+ "ld1 { v30.b }[4], [x27]\n"
"b 83f\n"
"81:" // Oddments: Load (4, 3): Bit 2: Unset
- "tbz x1, #1, 82f\n"
- "ld1 { v30.h }[0], [x28], #0x2\n"
- "tbz x1, #0, 83f\n"
- "ld1 { v30.b }[2], [x28]\n"
+ "tbz x0, #1, 82f\n"
+ "ld1 { v30.h }[0], [x27], #0x2\n"
+ "tbz x0, #0, 83f\n"
+ "ld1 { v30.b }[2], [x27]\n"
"b 83f\n"
"82:" // Oddments: Load (4, 3): Bit 2: Unset: Bit 1: Unset
- "tbz x1, #0, 83f\n"
- "ld1 { v30.b }[0], [x28]\n"
+ "tbz x0, #0, 83f\n"
+ "ld1 { v30.b }[0], [x27]\n"
"83:" // Oddments: Load (4, 3): Bit 2: End
- "ldr d3, [x3, #0x90]\n"
+ "ldr d3, [x23, #0x90]\n"
"usubl v30.8h, v30.8b, v9.8b\n"
- "ssubl v3.8h, v3.8b, v15.8b\n"
- "ldr x27, [x4, #0xe0]\n"
- "smlal v17.4s, v30.4h, v2.4h\n"
- "smlal2 v21.4s, v30.8h, v2.8h\n"
- "add x27, x27, x0\n"
- "smlal v13.4s, v25.4h, v3.4h\n"
- "smlal2 v19.4s, v25.8h, v3.8h\n"
- "smlal v20.4s, v24.4h, v3.4h\n"
- "smlal2 v10.4s, v24.8h, v3.8h\n"
- "smlal v8.4s, v30.4h, v3.4h\n"
+ "ssubl v3.8h, v3.8b, v14.8b\n"
+ "ldr x11, [x20, #0xe0]\n"
+ "smlal v6.4s, v30.4h, v2.4h\n"
+ "smlal2 v5.4s, v30.8h, v2.8h\n"
+ "add x11, x11, x24\n"
+ "smlal v15.4s, v25.4h, v3.4h\n"
+ "smlal2 v16.4s, v25.8h, v3.8h\n"
+ "smlal v17.4s, v24.4h, v3.4h\n"
+ "smlal2 v8.4s, v24.8h, v3.8h\n"
+ "smlal v10.4s, v30.4h, v3.4h\n"
"smlal2 v7.4s, v30.8h, v3.8h\n"
- "tbz x1, #2, 85f\n"
- "ld1 { v28.s }[0], [x27], #0x4\n"
- "tbz x1, #1, 84f\n"
- "ld1 { v28.h }[2], [x27], #0x2\n"
- "tbz x1, #0, 87f\n"
- "ld1 { v28.b }[6], [x27]\n"
+ "tbz x0, #2, 85f\n"
+ "ld1 { v28.s }[0], [x11], #0x4\n"
+ "tbz x0, #1, 84f\n"
+ "ld1 { v28.h }[2], [x11], #0x2\n"
+ "tbz x0, #0, 87f\n"
+ "ld1 { v28.b }[6], [x11]\n"
"b 87f\n"
"84:" // Oddments: Load (4, 4): Bit 2: Bit 1: Unset
- "tbz x1, #0, 87f\n"
- "ld1 { v28.b }[4], [x27]\n"
+ "tbz x0, #0, 87f\n"
+ "ld1 { v28.b }[4], [x11]\n"
"b 87f\n"
"85:" // Oddments: Load (4, 4): Bit 2: Unset
- "tbz x1, #1, 86f\n"
- "ld1 { v28.h }[0], [x27], #0x2\n"
- "tbz x1, #0, 87f\n"
- "ld1 { v28.b }[2], [x27]\n"
+ "tbz x0, #1, 86f\n"
+ "ld1 { v28.h }[0], [x11], #0x2\n"
+ "tbz x0, #0, 87f\n"
+ "ld1 { v28.b }[2], [x11]\n"
"b 87f\n"
"86:" // Oddments: Load (4, 4): Bit 2: Unset: Bit 1: Unset
- "tbz x1, #0, 87f\n"
- "ld1 { v28.b }[0], [x27]\n"
+ "tbz x0, #0, 87f\n"
+ "ld1 { v28.b }[0], [x11]\n"
"87:" // Oddments: Load (4, 4): Bit 2: End
- "ldr d4, [x3, #0x98]\n"
+ "ldr d4, [x23, #0x98]\n"
"usubl v28.8h, v28.8b, v9.8b\n"
- "ssubl v4.8h, v4.8b, v15.8b\n"
- "ldr x26, [x4, #0xe8]\n"
- "smlal v17.4s, v28.4h, v3.4h\n"
- "smlal2 v21.4s, v28.8h, v3.8h\n"
- "add x26, x26, x0\n"
- "smlal v13.4s, v24.4h, v4.4h\n"
- "smlal2 v19.4s, v24.8h, v4.8h\n"
- "smlal v20.4s, v22.4h, v4.4h\n"
- "smlal2 v10.4s, v22.8h, v4.8h\n"
- "smlal v8.4s, v28.4h, v4.4h\n"
+ "ssubl v4.8h, v4.8b, v14.8b\n"
+ "ldr x17, [x20, #0xe8]\n"
+ "smlal v6.4s, v28.4h, v3.4h\n"
+ "smlal2 v5.4s, v28.8h, v3.8h\n"
+ "add x17, x17, x24\n"
+ "smlal v15.4s, v24.4h, v4.4h\n"
+ "smlal2 v16.4s, v24.8h, v4.8h\n"
+ "smlal v17.4s, v22.4h, v4.4h\n"
+ "smlal2 v8.4s, v22.8h, v4.8h\n"
+ "smlal v10.4s, v28.4h, v4.4h\n"
"smlal2 v7.4s, v28.8h, v4.8h\n"
- "tbz x1, #2, 89f\n"
- "ld1 { v26.s }[0], [x26], #0x4\n"
- "tbz x1, #1, 88f\n"
- "ld1 { v26.h }[2], [x26], #0x2\n"
- "tbz x1, #0, 91f\n"
- "ld1 { v26.b }[6], [x26]\n"
+ "tbz x0, #2, 89f\n"
+ "ld1 { v26.s }[0], [x17], #0x4\n"
+ "tbz x0, #1, 88f\n"
+ "ld1 { v26.h }[2], [x17], #0x2\n"
+ "tbz x0, #0, 91f\n"
+ "ld1 { v26.b }[6], [x17]\n"
"b 91f\n"
"88:" // Oddments: Load (4, 5): Bit 2: Bit 1: Unset
- "tbz x1, #0, 91f\n"
- "ld1 { v26.b }[4], [x26]\n"
+ "tbz x0, #0, 91f\n"
+ "ld1 { v26.b }[4], [x17]\n"
"b 91f\n"
"89:" // Oddments: Load (4, 5): Bit 2: Unset
- "tbz x1, #1, 90f\n"
- "ld1 { v26.h }[0], [x26], #0x2\n"
- "tbz x1, #0, 91f\n"
- "ld1 { v26.b }[2], [x26]\n"
+ "tbz x0, #1, 90f\n"
+ "ld1 { v26.h }[0], [x17], #0x2\n"
+ "tbz x0, #0, 91f\n"
+ "ld1 { v26.b }[2], [x17]\n"
"b 91f\n"
"90:" // Oddments: Load (4, 5): Bit 2: Unset: Bit 1: Unset
- "tbz x1, #0, 91f\n"
- "ld1 { v26.b }[0], [x26]\n"
+ "tbz x0, #0, 91f\n"
+ "ld1 { v26.b }[0], [x17]\n"
"91:" // Oddments: Load (4, 5): Bit 2: End
- "ldr d0, [x3, #0xa0]\n"
+ "ldr d0, [x23, #0xa0]\n"
"usubl v26.8h, v26.8b, v9.8b\n"
- "ssubl v0.8h, v0.8b, v15.8b\n"
- "ldr x25, [x4, #0xf0]\n"
- "smlal v17.4s, v26.4h, v4.4h\n"
- "smlal2 v21.4s, v26.8h, v4.8h\n"
- "add x25, x25, x0\n"
- "smlal v13.4s, v27.4h, v0.4h\n"
- "smlal2 v19.4s, v27.8h, v0.8h\n"
- "smlal v20.4s, v23.4h, v0.4h\n"
- "smlal2 v10.4s, v23.8h, v0.8h\n"
- "tbz x1, #2, 93f\n"
- "ld1 { v25.s }[0], [x25], #0x4\n"
- "tbz x1, #1, 92f\n"
- "ld1 { v25.h }[2], [x25], #0x2\n"
- "tbz x1, #0, 95f\n"
- "ld1 { v25.b }[6], [x25]\n"
+ "ssubl v0.8h, v0.8b, v14.8b\n"
+ "ldr x5, [x20, #0xf0]\n"
+ "smlal v6.4s, v26.4h, v4.4h\n"
+ "smlal2 v5.4s, v26.8h, v4.8h\n"
+ "add x5, x5, x24\n"
+ "smlal v15.4s, v27.4h, v0.4h\n"
+ "smlal2 v16.4s, v27.8h, v0.8h\n"
+ "smlal v17.4s, v23.4h, v0.4h\n"
+ "smlal2 v8.4s, v23.8h, v0.8h\n"
+ "tbz x0, #2, 93f\n"
+ "ld1 { v25.s }[0], [x5], #0x4\n"
+ "tbz x0, #1, 92f\n"
+ "ld1 { v25.h }[2], [x5], #0x2\n"
+ "tbz x0, #0, 95f\n"
+ "ld1 { v25.b }[6], [x5]\n"
"b 95f\n"
"92:" // Oddments: Load (5, 0): Bit 2: Bit 1: Unset
- "tbz x1, #0, 95f\n"
- "ld1 { v25.b }[4], [x25]\n"
+ "tbz x0, #0, 95f\n"
+ "ld1 { v25.b }[4], [x5]\n"
"b 95f\n"
"93:" // Oddments: Load (5, 0): Bit 2: Unset
- "tbz x1, #1, 94f\n"
- "ld1 { v25.h }[0], [x25], #0x2\n"
- "tbz x1, #0, 95f\n"
- "ld1 { v25.b }[2], [x25]\n"
+ "tbz x0, #1, 94f\n"
+ "ld1 { v25.h }[0], [x5], #0x2\n"
+ "tbz x0, #0, 95f\n"
+ "ld1 { v25.b }[2], [x5]\n"
"b 95f\n"
"94:" // Oddments: Load (5, 0): Bit 2: Unset: Bit 1: Unset
- "tbz x1, #0, 95f\n"
- "ld1 { v25.b }[0], [x25]\n"
+ "tbz x0, #0, 95f\n"
+ "ld1 { v25.b }[0], [x5]\n"
"95:" // Oddments: Load (5, 0): Bit 2: End
"usubl v25.8h, v25.8b, v9.8b\n"
- "ldr x24, [x4, #0xf8]\n"
- "smlal v8.4s, v25.4h, v0.4h\n"
+ "ldr x25, [x20, #0xf8]\n"
+ "smlal v10.4s, v25.4h, v0.4h\n"
"smlal2 v7.4s, v25.8h, v0.8h\n"
- "add x24, x24, x0\n"
- "tbz x1, #2, 97f\n"
- "ld1 { v24.s }[0], [x24], #0x4\n"
- "tbz x1, #1, 96f\n"
- "ld1 { v24.h }[2], [x24], #0x2\n"
- "tbz x1, #0, 99f\n"
- "ld1 { v24.b }[6], [x24]\n"
+ "add x25, x25, x24\n"
+ "tbz x0, #2, 97f\n"
+ "ld1 { v24.s }[0], [x25], #0x4\n"
+ "tbz x0, #1, 96f\n"
+ "ld1 { v24.h }[2], [x25], #0x2\n"
+ "tbz x0, #0, 99f\n"
+ "ld1 { v24.b }[6], [x25]\n"
"b 99f\n"
"96:" // Oddments: Load (5, 1): Bit 2: Bit 1: Unset
- "tbz x1, #0, 99f\n"
- "ld1 { v24.b }[4], [x24]\n"
+ "tbz x0, #0, 99f\n"
+ "ld1 { v24.b }[4], [x25]\n"
"b 99f\n"
"97:" // Oddments: Load (5, 1): Bit 2: Unset
- "tbz x1, #1, 98f\n"
- "ld1 { v24.h }[0], [x24], #0x2\n"
- "tbz x1, #0, 99f\n"
- "ld1 { v24.b }[2], [x24]\n"
+ "tbz x0, #1, 98f\n"
+ "ld1 { v24.h }[0], [x25], #0x2\n"
+ "tbz x0, #0, 99f\n"
+ "ld1 { v24.b }[2], [x25]\n"
"b 99f\n"
"98:" // Oddments: Load (5, 1): Bit 2: Unset: Bit 1: Unset
- "tbz x1, #0, 99f\n"
- "ld1 { v24.b }[0], [x24]\n"
+ "tbz x0, #0, 99f\n"
+ "ld1 { v24.b }[0], [x25]\n"
"99:" // Oddments: Load (5, 1): Bit 2: End
- "ldr d1, [x3, #0xa8]\n"
+ "ldr d1, [x23, #0xa8]\n"
"usubl v24.8h, v24.8b, v9.8b\n"
- "ssubl v1.8h, v1.8b, v15.8b\n"
- "ldr x23, [x4, #0x100]\n"
- "smlal v17.4s, v24.4h, v0.4h\n"
- "smlal2 v21.4s, v24.8h, v0.8h\n"
- "add x23, x23, x0\n"
- "smlal v13.4s, v23.4h, v1.4h\n"
- "smlal2 v19.4s, v23.8h, v1.8h\n"
- "smlal v20.4s, v31.4h, v1.4h\n"
- "smlal2 v10.4s, v31.8h, v1.8h\n"
- "smlal v8.4s, v24.4h, v1.4h\n"
+ "ssubl v1.8h, v1.8b, v14.8b\n"
+ "ldr x26, [x20, #0x100]\n"
+ "smlal v6.4s, v24.4h, v0.4h\n"
+ "smlal2 v5.4s, v24.8h, v0.8h\n"
+ "add x26, x26, x24\n"
+ "smlal v15.4s, v23.4h, v1.4h\n"
+ "smlal2 v16.4s, v23.8h, v1.8h\n"
+ "smlal v17.4s, v31.4h, v1.4h\n"
+ "smlal2 v8.4s, v31.8h, v1.8h\n"
+ "smlal v10.4s, v24.4h, v1.4h\n"
"smlal2 v7.4s, v24.8h, v1.8h\n"
- "tbz x1, #2, 101f\n"
- "ld1 { v27.s }[0], [x23], #0x4\n"
- "tbz x1, #1, 100f\n"
- "ld1 { v27.h }[2], [x23], #0x2\n"
- "tbz x1, #0, 103f\n"
- "ld1 { v27.b }[6], [x23]\n"
+ "tbz x0, #2, 101f\n"
+ "ld1 { v27.s }[0], [x26], #0x4\n"
+ "tbz x0, #1, 100f\n"
+ "ld1 { v27.h }[2], [x26], #0x2\n"
+ "tbz x0, #0, 103f\n"
+ "ld1 { v27.b }[6], [x26]\n"
"b 103f\n"
"100:" // Oddments: Load (5, 2): Bit 2: Bit 1: Unset
- "tbz x1, #0, 103f\n"
- "ld1 { v27.b }[4], [x23]\n"
+ "tbz x0, #0, 103f\n"
+ "ld1 { v27.b }[4], [x26]\n"
"b 103f\n"
"101:" // Oddments: Load (5, 2): Bit 2: Unset
- "tbz x1, #1, 102f\n"
- "ld1 { v27.h }[0], [x23], #0x2\n"
- "tbz x1, #0, 103f\n"
- "ld1 { v27.b }[2], [x23]\n"
+ "tbz x0, #1, 102f\n"
+ "ld1 { v27.h }[0], [x26], #0x2\n"
+ "tbz x0, #0, 103f\n"
+ "ld1 { v27.b }[2], [x26]\n"
"b 103f\n"
"102:" // Oddments: Load (5, 2): Bit 2: Unset: Bit 1: Unset
- "tbz x1, #0, 103f\n"
- "ld1 { v27.b }[0], [x23]\n"
+ "tbz x0, #0, 103f\n"
+ "ld1 { v27.b }[0], [x26]\n"
"103:" // Oddments: Load (5, 2): Bit 2: End
- "ldr d2, [x3, #0xb0]\n"
+ "ldr d2, [x23, #0xb0]\n"
"usubl v27.8h, v27.8b, v9.8b\n"
- "ssubl v2.8h, v2.8b, v15.8b\n"
- "ldr x15, [x4, #0x108]\n"
- "smlal v17.4s, v27.4h, v1.4h\n"
- "smlal2 v21.4s, v27.8h, v1.8h\n"
- "add x15, x15, x0\n"
- "smlal v13.4s, v31.4h, v2.4h\n"
- "smlal2 v19.4s, v31.8h, v2.8h\n"
- "smlal v20.4s, v30.4h, v2.4h\n"
- "smlal2 v10.4s, v30.8h, v2.8h\n"
- "smlal v8.4s, v27.4h, v2.4h\n"
+ "ssubl v2.8h, v2.8b, v14.8b\n"
+ "ldr x12, [x20, #0x108]\n"
+ "smlal v6.4s, v27.4h, v1.4h\n"
+ "smlal2 v5.4s, v27.8h, v1.8h\n"
+ "add x12, x12, x24\n"
+ "smlal v15.4s, v31.4h, v2.4h\n"
+ "smlal2 v16.4s, v31.8h, v2.8h\n"
+ "smlal v17.4s, v30.4h, v2.4h\n"
+ "smlal2 v8.4s, v30.8h, v2.8h\n"
+ "smlal v10.4s, v27.4h, v2.4h\n"
"smlal2 v7.4s, v27.8h, v2.8h\n"
- "tbz x1, #2, 105f\n"
- "ld1 { v25.s }[0], [x15], #0x4\n"
- "tbz x1, #1, 104f\n"
- "ld1 { v25.h }[2], [x15], #0x2\n"
- "tbz x1, #0, 107f\n"
- "ld1 { v25.b }[6], [x15]\n"
+ "tbz x0, #2, 105f\n"
+ "ld1 { v25.s }[0], [x12], #0x4\n"
+ "tbz x0, #1, 104f\n"
+ "ld1 { v25.h }[2], [x12], #0x2\n"
+ "tbz x0, #0, 107f\n"
+ "ld1 { v25.b }[6], [x12]\n"
"b 107f\n"
"104:" // Oddments: Load (5, 3): Bit 2: Bit 1: Unset
- "tbz x1, #0, 107f\n"
- "ld1 { v25.b }[4], [x15]\n"
+ "tbz x0, #0, 107f\n"
+ "ld1 { v25.b }[4], [x12]\n"
"b 107f\n"
"105:" // Oddments: Load (5, 3): Bit 2: Unset
- "tbz x1, #1, 106f\n"
- "ld1 { v25.h }[0], [x15], #0x2\n"
- "tbz x1, #0, 107f\n"
- "ld1 { v25.b }[2], [x15]\n"
+ "tbz x0, #1, 106f\n"
+ "ld1 { v25.h }[0], [x12], #0x2\n"
+ "tbz x0, #0, 107f\n"
+ "ld1 { v25.b }[2], [x12]\n"
"b 107f\n"
"106:" // Oddments: Load (5, 3): Bit 2: Unset: Bit 1: Unset
- "tbz x1, #0, 107f\n"
- "ld1 { v25.b }[0], [x15]\n"
+ "tbz x0, #0, 107f\n"
+ "ld1 { v25.b }[0], [x12]\n"
"107:" // Oddments: Load (5, 3): Bit 2: End
- "ldr d3, [x3, #0xb8]\n"
+ "ldr d3, [x23, #0xb8]\n"
"usubl v25.8h, v25.8b, v9.8b\n"
- "ssubl v3.8h, v3.8b, v15.8b\n"
- "ldr x21, [x4, #0x110]\n"
- "smlal v17.4s, v25.4h, v2.4h\n"
- "smlal2 v21.4s, v25.8h, v2.8h\n"
- "add x21, x21, x0\n"
- "smlal v13.4s, v30.4h, v3.4h\n"
- "smlal2 v19.4s, v30.8h, v3.8h\n"
- "smlal v20.4s, v28.4h, v3.4h\n"
- "smlal2 v10.4s, v28.8h, v3.8h\n"
- "smlal v8.4s, v25.4h, v3.4h\n"
+ "ssubl v3.8h, v3.8b, v14.8b\n"
+ "ldr x14, [x20, #0x110]\n"
+ "smlal v6.4s, v25.4h, v2.4h\n"
+ "smlal2 v5.4s, v25.8h, v2.8h\n"
+ "add x14, x14, x24\n"
+ "smlal v15.4s, v30.4h, v3.4h\n"
+ "smlal2 v16.4s, v30.8h, v3.8h\n"
+ "smlal v17.4s, v28.4h, v3.4h\n"
+ "smlal2 v8.4s, v28.8h, v3.8h\n"
+ "smlal v10.4s, v25.4h, v3.4h\n"
"smlal2 v7.4s, v25.8h, v3.8h\n"
- "tbz x1, #2, 109f\n"
- "ld1 { v24.s }[0], [x21], #0x4\n"
- "tbz x1, #1, 108f\n"
- "ld1 { v24.h }[2], [x21], #0x2\n"
- "tbz x1, #0, 111f\n"
- "ld1 { v24.b }[6], [x21]\n"
+ "tbz x0, #2, 109f\n"
+ "ld1 { v24.s }[0], [x14], #0x4\n"
+ "tbz x0, #1, 108f\n"
+ "ld1 { v24.h }[2], [x14], #0x2\n"
+ "tbz x0, #0, 111f\n"
+ "ld1 { v24.b }[6], [x14]\n"
"b 111f\n"
"108:" // Oddments: Load (5, 4): Bit 2: Bit 1: Unset
- "tbz x1, #0, 111f\n"
- "ld1 { v24.b }[4], [x21]\n"
+ "tbz x0, #0, 111f\n"
+ "ld1 { v24.b }[4], [x14]\n"
"b 111f\n"
"109:" // Oddments: Load (5, 4): Bit 2: Unset
- "tbz x1, #1, 110f\n"
- "ld1 { v24.h }[0], [x21], #0x2\n"
- "tbz x1, #0, 111f\n"
- "ld1 { v24.b }[2], [x21]\n"
+ "tbz x0, #1, 110f\n"
+ "ld1 { v24.h }[0], [x14], #0x2\n"
+ "tbz x0, #0, 111f\n"
+ "ld1 { v24.b }[2], [x14]\n"
"b 111f\n"
"110:" // Oddments: Load (5, 4): Bit 2: Unset: Bit 1: Unset
- "tbz x1, #0, 111f\n"
- "ld1 { v24.b }[0], [x21]\n"
+ "tbz x0, #0, 111f\n"
+ "ld1 { v24.b }[0], [x14]\n"
"111:" // Oddments: Load (5, 4): Bit 2: End
- "ldr d4, [x3, #0xc0]\n"
+ "ldr d4, [x23, #0xc0]\n"
"usubl v24.8h, v24.8b, v9.8b\n"
- "ssubl v4.8h, v4.8b, v15.8b\n"
- "ldr x20, [x4, #0x118]\n"
- "smlal v17.4s, v24.4h, v3.4h\n"
- "smlal2 v21.4s, v24.8h, v3.8h\n"
- "add x20, x20, x0\n"
- "smlal v13.4s, v28.4h, v4.4h\n"
- "smlal2 v19.4s, v28.8h, v4.8h\n"
- "smlal v20.4s, v26.4h, v4.4h\n"
- "smlal2 v10.4s, v26.8h, v4.8h\n"
- "smlal v8.4s, v24.4h, v4.4h\n"
+ "ssubl v4.8h, v4.8b, v14.8b\n"
+ "ldr x21, [x20, #0x118]\n"
+ "smlal v6.4s, v24.4h, v3.4h\n"
+ "smlal2 v5.4s, v24.8h, v3.8h\n"
+ "add x21, x21, x24\n"
+ "smlal v15.4s, v28.4h, v4.4h\n"
+ "smlal2 v16.4s, v28.8h, v4.8h\n"
+ "smlal v17.4s, v26.4h, v4.4h\n"
+ "smlal2 v8.4s, v26.8h, v4.8h\n"
+ "smlal v10.4s, v24.4h, v4.4h\n"
"smlal2 v7.4s, v24.8h, v4.8h\n"
- "tbz x1, #2, 113f\n"
- "ld1 { v27.s }[0], [x20], #0x4\n"
- "tbz x1, #1, 112f\n"
- "ld1 { v27.h }[2], [x20], #0x2\n"
- "tbz x1, #0, 115f\n"
- "ld1 { v27.b }[6], [x20]\n"
+ "tbz x0, #2, 113f\n"
+ "ld1 { v27.s }[0], [x21], #0x4\n"
+ "tbz x0, #1, 112f\n"
+ "ld1 { v27.h }[2], [x21], #0x2\n"
+ "tbz x0, #0, 115f\n"
+ "ld1 { v27.b }[6], [x21]\n"
"b 115f\n"
"112:" // Oddments: Load (5, 5): Bit 2: Bit 1: Unset
- "tbz x1, #0, 115f\n"
- "ld1 { v27.b }[4], [x20]\n"
+ "tbz x0, #0, 115f\n"
+ "ld1 { v27.b }[4], [x21]\n"
"b 115f\n"
"113:" // Oddments: Load (5, 5): Bit 2: Unset
- "tbz x1, #1, 114f\n"
- "ld1 { v27.h }[0], [x20], #0x2\n"
- "tbz x1, #0, 115f\n"
- "ld1 { v27.b }[2], [x20]\n"
+ "tbz x0, #1, 114f\n"
+ "ld1 { v27.h }[0], [x21], #0x2\n"
+ "tbz x0, #0, 115f\n"
+ "ld1 { v27.b }[2], [x21]\n"
"b 115f\n"
"114:" // Oddments: Load (5, 5): Bit 2: Unset: Bit 1: Unset
- "tbz x1, #0, 115f\n"
- "ld1 { v27.b }[0], [x20]\n"
+ "tbz x0, #0, 115f\n"
+ "ld1 { v27.b }[0], [x21]\n"
"115:" // Oddments: Load (5, 5): Bit 2: End
"usubl v27.8h, v27.8b, v9.8b\n"
- "smlal v17.4s, v27.4h, v4.4h\n"
- "smlal2 v21.4s, v27.8h, v4.8h\n"
- "tbz x1, #2, 117f\n"
- "ld1 { v18.4s }, [x5], #0x10\n"
- "ld1 { v6.4s }, [x8], #0x10\n"
- "tbz x1, #1, 116f\n"
- "ld1 { v5.d }[0], [x5], #0x8\n"
- "ld1 { v22.d }[0], [x8], #0x8\n"
- "tbz x1, #0, 119f\n"
- "ld1 { v5.s }[2], [x5]\n"
- "ld1 { v22.s }[2], [x8]\n"
+ "smlal v6.4s, v27.4h, v4.4h\n"
+ "smlal2 v5.4s, v27.8h, v4.8h\n"
+ "tbz x0, #2, 117f\n"
+ "ld1 { v12.4s }, [x10], #0x10\n"
+ "ld1 { v19.4s }, [x1], #0x10\n"
+ "tbz x0, #1, 116f\n"
+ "ld1 { v20.d }[0], [x10], #0x8\n"
+ "ld1 { v29.d }[0], [x1], #0x8\n"
+ "tbz x0, #0, 119f\n"
+ "ld1 { v20.s }[2], [x10]\n"
+ "ld1 { v29.s }[2], [x1]\n"
"b 119f\n"
"116:" // Oddments: Load requant params: Bit 2: Bit 1: Unset
- "tbz x1, #0, 119f\n"
- "ld1 { v5.s }[0], [x5]\n"
- "ld1 { v22.s }[0], [x8]\n"
+ "tbz x0, #0, 119f\n"
+ "ld1 { v20.s }[0], [x10]\n"
+ "ld1 { v29.s }[0], [x1]\n"
"b 119f\n"
"117:" // Oddments: Load requant params: Bit 2: Unset
- "tbz x1, #1, 118f\n"
- "ld1 { v18.d }[0], [x5], #0x8\n"
- "ld1 { v6.d }[0], [x8], #0x8\n"
- "tbz x1, #0, 119f\n"
- "ld1 { v18.s }[2], [x5]\n"
- "ld1 { v6.s }[2], [x8]\n"
+ "tbz x0, #1, 118f\n"
+ "ld1 { v12.d }[0], [x10], #0x8\n"
+ "ld1 { v19.d }[0], [x1], #0x8\n"
+ "tbz x0, #0, 119f\n"
+ "ld1 { v12.s }[2], [x10]\n"
+ "ld1 { v19.s }[2], [x1]\n"
"b 119f\n"
"118:" // Oddments: Load requant params: Bit 2: Unset: Bit 1: Unset
- "tbz x1, #0, 119f\n"
- "ld1 { v18.s }[0], [x5]\n"
- "ld1 { v6.s }[0], [x8]\n"
+ "tbz x0, #0, 119f\n"
+ "ld1 { v12.s }[0], [x10]\n"
+ "ld1 { v19.s }[0], [x1]\n"
"119:" // Oddments: Load requant params: Bit 2: End
- "sqrdmulh v13.4s, v13.4s, v18.4s\n"
- "and v30.16b, v13.16b, v6.16b\n"
- "add x17, x17, x10\n"
- "add x6, x6, x10\n"
- "sqrdmulh v19.4s, v19.4s, v5.4s\n"
- "sshr v30.4s, v30.4s, #0x1f\n"
- "add x7, x7, x10\n"
- "add x16, x16, x10\n"
- "and v16.16b, v19.16b, v22.16b\n"
- "sqrdmulh v20.4s, v20.4s, v18.4s\n"
- "sqrdmulh v8.4s, v8.4s, v18.4s\n"
- "sqrdmulh v17.4s, v17.4s, v18.4s\n"
- "sqadd v13.4s, v13.4s, v30.4s\n"
- "sshr v16.4s, v16.4s, #0x1f\n"
- "and v0.16b, v20.16b, v6.16b\n"
- "sqrdmulh v10.4s, v10.4s, v5.4s\n"
- "and v18.16b, v8.16b, v6.16b\n"
- "sqrdmulh v7.4s, v7.4s, v5.4s\n"
- "and v30.16b, v17.16b, v6.16b\n"
- "sqrdmulh v21.4s, v21.4s, v5.4s\n"
- "sqadd v19.4s, v19.4s, v16.4s\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "and v26.16b, v10.16b, v22.16b\n"
- "sshr v18.4s, v18.4s, #0x1f\n"
- "and v23.16b, v7.16b, v22.16b\n"
- "sshr v30.4s, v30.4s, #0x1f\n"
- "and v16.16b, v21.16b, v22.16b\n"
- "sqadd v20.4s, v20.4s, v0.4s\n"
- "sshr v26.4s, v26.4s, #0x1f\n"
- "sqadd v8.4s, v8.4s, v18.4s\n"
+ "sqrdmulh v15.4s, v15.4s, v12.4s\n"
+ "sqrdmulh v17.4s, v17.4s, v12.4s\n"
+ "add x16, x16, x22\n"
+ "add x8, x8, x22\n"
+ "sqrdmulh v10.4s, v10.4s, v12.4s\n"
+ "sqrdmulh v6.4s, v6.4s, v12.4s\n"
+ "add x4, x4, x22\n"
+ "add x7, x7, x22\n"
+ "and v23.16b, v15.16b, v19.16b\n"
+ "sqrdmulh v16.4s, v16.4s, v20.4s\n"
+ "and v22.16b, v17.16b, v19.16b\n"
+ "sqrdmulh v8.4s, v8.4s, v20.4s\n"
+ "and v21.16b, v10.16b, v19.16b\n"
+ "sqrdmulh v7.4s, v7.4s, v20.4s\n"
+ "and v26.16b, v6.16b, v19.16b\n"
+ "sqrdmulh v5.4s, v5.4s, v20.4s\n"
"sshr v23.4s, v23.4s, #0x1f\n"
- "sqadd v17.4s, v17.4s, v30.4s\n"
- "sshr v16.4s, v16.4s, #0x1f\n"
- "srshl v13.4s, v13.4s, v6.4s\n"
- "srshl v20.4s, v20.4s, v6.4s\n"
- "sqadd v10.4s, v10.4s, v26.4s\n"
- "srshl v8.4s, v8.4s, v6.4s\n"
- "sqadd v7.4s, v7.4s, v23.4s\n"
- "srshl v17.4s, v17.4s, v6.4s\n"
- "sqadd v21.4s, v21.4s, v16.4s\n"
- "srshl v19.4s, v19.4s, v22.4s\n"
- "sqxtn v13.4h, v13.4s\n"
- "srshl v10.4s, v10.4s, v22.4s\n"
- "sqxtn v20.4h, v20.4s\n"
- "srshl v7.4s, v7.4s, v22.4s\n"
- "sqxtn v8.4h, v8.4s\n"
- "srshl v21.4s, v21.4s, v22.4s\n"
+ "and v4.16b, v16.16b, v29.16b\n"
+ "sshr v22.4s, v22.4s, #0x1f\n"
+ "and v2.16b, v8.16b, v29.16b\n"
+ "sshr v21.4s, v21.4s, #0x1f\n"
+ "and v3.16b, v7.16b, v29.16b\n"
+ "sshr v26.4s, v26.4s, #0x1f\n"
+ "and v25.16b, v5.16b, v29.16b\n"
+ "sqadd v15.4s, v15.4s, v23.4s\n"
+ "sshr v4.4s, v4.4s, #0x1f\n"
+ "sqadd v17.4s, v17.4s, v22.4s\n"
+ "sshr v2.4s, v2.4s, #0x1f\n"
+ "sqadd v10.4s, v10.4s, v21.4s\n"
+ "sshr v3.4s, v3.4s, #0x1f\n"
+ "sqadd v6.4s, v6.4s, v26.4s\n"
+ "sshr v25.4s, v25.4s, #0x1f\n"
+ "srshl v15.4s, v15.4s, v19.4s\n"
+ "sqadd v16.4s, v16.4s, v4.4s\n"
+ "srshl v17.4s, v17.4s, v19.4s\n"
+ "sqadd v8.4s, v8.4s, v2.4s\n"
+ "srshl v10.4s, v10.4s, v19.4s\n"
+ "sqadd v7.4s, v7.4s, v3.4s\n"
+ "srshl v6.4s, v6.4s, v19.4s\n"
+ "sqadd v5.4s, v5.4s, v25.4s\n"
+ "srshl v16.4s, v16.4s, v29.4s\n"
+ "sqxtn v15.4h, v15.4s\n"
+ "srshl v8.4s, v8.4s, v29.4s\n"
"sqxtn v17.4h, v17.4s\n"
- "sqxtn2 v13.8h, v19.4s\n"
- "sqxtn2 v20.8h, v10.4s\n"
- "sqxtn2 v8.8h, v7.4s\n"
- "sqxtn2 v17.8h, v21.4s\n"
- "sqadd v13.8h, v13.8h, v14.8h\n"
- "sqadd v20.8h, v20.8h, v14.8h\n"
- "sqadd v8.8h, v8.8h, v14.8h\n"
- "sqadd v17.8h, v17.8h, v14.8h\n"
- "smax v13.8h, v13.8h, v12.8h\n"
- "smax v20.8h, v20.8h, v12.8h\n"
- "smax v8.8h, v8.8h, v12.8h\n"
- "smax v17.8h, v17.8h, v12.8h\n"
- "smin v13.8h, v13.8h, v11.8h\n"
- "smin v20.8h, v20.8h, v11.8h\n"
- "smin v8.8h, v8.8h, v11.8h\n"
- "smin v17.8h, v17.8h, v11.8h\n"
- "uzp1 v13.16b, v13.16b, v13.16b\n"
- "uzp1 v20.16b, v20.16b, v20.16b\n"
- "uzp1 v8.16b, v8.16b, v8.16b\n"
+ "srshl v7.4s, v7.4s, v29.4s\n"
+ "sqxtn v10.4h, v10.4s\n"
+ "srshl v5.4s, v5.4s, v29.4s\n"
+ "sqxtn v6.4h, v6.4s\n"
+ "sqxtn2 v15.8h, v16.4s\n"
+ "sqxtn2 v17.8h, v8.4s\n"
+ "sqxtn2 v10.8h, v7.4s\n"
+ "sqxtn2 v6.8h, v5.4s\n"
+ "sqadd v15.8h, v15.8h, v18.8h\n"
+ "sqadd v17.8h, v17.8h, v18.8h\n"
+ "sqadd v10.8h, v10.8h, v18.8h\n"
+ "sqadd v6.8h, v6.8h, v18.8h\n"
+ "smax v15.8h, v15.8h, v11.8h\n"
+ "smax v17.8h, v17.8h, v11.8h\n"
+ "smax v10.8h, v10.8h, v11.8h\n"
+ "smax v6.8h, v6.8h, v11.8h\n"
+ "smin v15.8h, v15.8h, v13.8h\n"
+ "smin v17.8h, v17.8h, v13.8h\n"
+ "smin v10.8h, v10.8h, v13.8h\n"
+ "smin v6.8h, v6.8h, v13.8h\n"
+ "uzp1 v15.16b, v15.16b, v15.16b\n"
"uzp1 v17.16b, v17.16b, v17.16b\n"
- "tbz x1, #2, 121f\n"
- "st1 { v13.s }[0], [x17], #0x4\n"
- "st1 { v20.s }[0], [x6], #0x4\n"
- "st1 { v8.s }[0], [x7], #0x4\n"
- "st1 { v17.s }[0], [x16], #0x4\n"
- "tbz x1, #1, 120f\n"
- "st1 { v13.h }[2], [x17], #0x2\n"
- "st1 { v20.h }[2], [x6], #0x2\n"
- "st1 { v8.h }[2], [x7], #0x2\n"
- "st1 { v17.h }[2], [x16], #0x2\n"
- "tbz x1, #0, 123f\n"
- "st1 { v13.b }[6], [x17], #0x1\n"
- "st1 { v20.b }[6], [x6], #0x1\n"
- "st1 { v8.b }[6], [x7], #0x1\n"
- "st1 { v17.b }[6], [x16], #0x1\n"
+ "uzp1 v10.16b, v10.16b, v10.16b\n"
+ "uzp1 v6.16b, v6.16b, v6.16b\n"
+ "tbz x0, #2, 121f\n"
+ "st1 { v15.s }[0], [x16], #0x4\n"
+ "st1 { v17.s }[0], [x8], #0x4\n"
+ "st1 { v10.s }[0], [x4], #0x4\n"
+ "st1 { v6.s }[0], [x7], #0x4\n"
+ "tbz x0, #1, 120f\n"
+ "st1 { v15.h }[2], [x16], #0x2\n"
+ "st1 { v17.h }[2], [x8], #0x2\n"
+ "st1 { v10.h }[2], [x4], #0x2\n"
+ "st1 { v6.h }[2], [x7], #0x2\n"
+ "tbz x0, #0, 123f\n"
+ "st1 { v15.b }[6], [x16], #0x1\n"
+ "st1 { v17.b }[6], [x8], #0x1\n"
+ "st1 { v10.b }[6], [x4], #0x1\n"
+ "st1 { v6.b }[6], [x7], #0x1\n"
"b 123f\n"
"120:" // Oddments: Bit 2: Bit 1: Unset
- "tbz x1, #0, 123f\n"
- "st1 { v13.b }[4], [x17], #0x1\n"
- "st1 { v20.b }[4], [x6], #0x1\n"
- "st1 { v8.b }[4], [x7], #0x1\n"
- "st1 { v17.b }[4], [x16], #0x1\n"
+ "tbz x0, #0, 123f\n"
+ "st1 { v15.b }[4], [x16], #0x1\n"
+ "st1 { v17.b }[4], [x8], #0x1\n"
+ "st1 { v10.b }[4], [x4], #0x1\n"
+ "st1 { v6.b }[4], [x7], #0x1\n"
"b 123f\n"
"121:" // Oddments: Bit 2: Unset
- "tbz x1, #1, 122f\n"
- "st1 { v13.h }[0], [x17], #0x2\n"
- "st1 { v20.h }[0], [x6], #0x2\n"
- "st1 { v8.h }[0], [x7], #0x2\n"
- "st1 { v17.h }[0], [x16], #0x2\n"
- "tbz x1, #0, 123f\n"
- "st1 { v13.b }[2], [x17], #0x1\n"
- "st1 { v20.b }[2], [x6], #0x1\n"
- "st1 { v8.b }[2], [x7], #0x1\n"
- "st1 { v17.b }[2], [x16], #0x1\n"
+ "tbz x0, #1, 122f\n"
+ "st1 { v15.h }[0], [x16], #0x2\n"
+ "st1 { v17.h }[0], [x8], #0x2\n"
+ "st1 { v10.h }[0], [x4], #0x2\n"
+ "st1 { v6.h }[0], [x7], #0x2\n"
+ "tbz x0, #0, 123f\n"
+ "st1 { v15.b }[2], [x16], #0x1\n"
+ "st1 { v17.b }[2], [x8], #0x1\n"
+ "st1 { v10.b }[2], [x4], #0x1\n"
+ "st1 { v6.b }[2], [x7], #0x1\n"
"b 123f\n"
"122:" // Oddments: Bit 2: Unset: Bit 1: Unset
- "tbz x1, #0, 123f\n"
- "st1 { v13.b }[0], [x17], #0x1\n"
- "st1 { v20.b }[0], [x6], #0x1\n"
- "st1 { v8.b }[0], [x7], #0x1\n"
- "st1 { v17.b }[0], [x16], #0x1\n"
+ "tbz x0, #0, 123f\n"
+ "st1 { v15.b }[0], [x16], #0x1\n"
+ "st1 { v17.b }[0], [x8], #0x1\n"
+ "st1 { v10.b }[0], [x4], #0x1\n"
+ "st1 { v6.b }[0], [x7], #0x1\n"
"123:" // Oddments: Bit 2: End
"124:" // End
:
: [offsetof_Params_bias] "I" (offsetof(Params, bias)), [offsetof_Params_inptrs] "I" (offsetof(Params, inptrs)), [offsetof_Params_n_channels] "I" (offsetof(Params, n_channels)), [offsetof_Params_outptrs] "I" (offsetof(Params, outptrs)), [offsetof_Params_requant] "I" (offsetof(Params, requant)), [offsetof_Params_requant_muls] "I" (offsetof(Params, requant_muls)), [offsetof_Params_requant_shifts] "I" (offsetof(Params, requant_shifts)), [offsetof_Params_weights] "I" (offsetof(Params, weights)), [offsetof_Requantize32_a_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [params] "r" (&params)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8s8u8q_nhwc_generic_output9_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8s8u8q_nhwc_generic_output9_mla_depthfirst/generic.cpp
index 1676119bc1..08a2b7a98e 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8s8u8q_nhwc_generic_output9_mla_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8s8u8q_nhwc_generic_output9_mla_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -41,577 +41,583 @@ void a64_u8s8u8q_nhwc_generic_output9_mla_depthfirst_impl(
)
{
__asm__ __volatile__(
- "lsr x12, %x[n_channels], #0x2\n"
- "add x20, %x[qp], %[offsetof_Requantize32_minval]\n"
- "ld1r { v8.4s }, [x20]\n"
+ "add x19, %x[qp], %[offsetof_Requantize32_minval]\n"
+ "ld1r { v12.4s }, [x19]\n"
"add x20, %x[qp], %[offsetof_Requantize32_maxval]\n"
- "ld1r { v7.4s }, [x20]\n"
- "add x20, %x[qp], %[offsetof_Requantize32_a_offset]\n"
- "ld1r { v6.16b }, [x20]\n"
+ "add x19, %x[qp], %[offsetof_Requantize32_a_offset]\n"
+ "ld1r { v11.4s }, [x20]\n"
+ "ld1r { v10.16b }, [x19]\n"
"add x20, %x[qp], %[offsetof_Requantize32_b_offset]\n"
- "ld1r { v5.16b }, [x20]\n"
- "add x20, %x[qp], %[offsetof_Requantize32_c_offset]\n"
- "ld1r { v4.4s }, [x20]\n"
+ "add x19, %x[qp], %[offsetof_Requantize32_c_offset]\n"
+ "ld1r { v9.16b }, [x20]\n"
+ "ld1r { v8.4s }, [x19]\n"
"add x20, %x[qp], %[offsetof_Requantize32_per_layer_left_shift]\n"
- "ld1r { v3.4s }, [x20]\n"
- "add x20, %x[qp], %[offsetof_Requantize32_per_layer_mul]\n"
- "ld1r { v2.4s }, [x20]\n"
- "add x20, %x[qp], %[offsetof_Requantize32_per_layer_right_shift]\n"
- "ld1r { v1.4s }, [x20]\n"
+ "add x19, %x[qp], %[offsetof_Requantize32_per_layer_mul]\n"
+ "ld1r { v7.4s }, [x20]\n"
+ "ld1r { v6.4s }, [x19]\n"
+ "add x19, %x[qp], %[offsetof_Requantize32_per_layer_right_shift]\n"
"mov x11, #0x0\n"
- "cbz x12, 6f\n"
+ "ld1r { v5.4s }, [x19]\n"
+ "lsr x10, %x[n_channels], #0x2\n"
+ "cbz x10, 6f\n"
"1:" // Channel loop
- "movi v23.4s, #0x0\n"
+ "movi v27.4s, #0x0\n"
"cbz %x[bias], 2f\n"
- "lsl x20, x11, #0x2\n"
- "ldr q23, [%x[bias], x20]\n"
+ "lsl x19, x11, #0x2\n"
+ "ldr q27, [%x[bias], x19]\n"
"2:" // Channel loop: Load bias: Done
- "ldr s0, [%x[params]], #0x4\n"
- "mov x21, %x[inptrs]\n"
- "ldp x10, x9, [x21], #0x10\n"
- "subs x20, %x[n_points], #0x1\n"
- "ldr s14, [x10, x11]\n"
- "ldr s15, [x9, x11]\n"
- "mov v24.16b, v23.16b\n"
- "mov v25.16b, v23.16b\n"
- "ldp x28, x27, [x21], #0x10\n"
- "ldr s16, [x28, x11]\n"
- "mov v26.16b, v23.16b\n"
- "mov v27.16b, v23.16b\n"
- "ldr s17, [x27, x11]\n"
- "ldp x26, x25, [x21], #0x10\n"
- "mov v28.16b, v23.16b\n"
- "mov v29.16b, v23.16b\n"
- "ldr s18, [x26, x11]\n"
- "ldr s19, [x25, x11]\n"
- "mov v30.16b, v23.16b\n"
- "mov v31.16b, v23.16b\n"
- "ldp x24, x23, [x21], #0x10\n"
- "ldr s20, [x24, x11]\n"
- "ssubl v0.8h, v0.8b, v5.8b\n"
- "usubl v14.8h, v14.8b, v6.8b\n"
- "ldr s21, [x23, x11]\n"
- "ldr x22, [x21], #0x8\n"
- "usubl v15.8h, v15.8b, v6.8b\n"
- "usubl v16.8h, v16.8b, v6.8b\n"
- "ldr s22, [x22, x11]\n"
- "usubl v17.8h, v17.8b, v6.8b\n"
- "usubl v18.8h, v18.8b, v6.8b\n"
- "usubl v19.8h, v19.8b, v6.8b\n"
- "usubl v20.8h, v20.8b, v6.8b\n"
- "usubl v21.8h, v21.8b, v6.8b\n"
- "usubl v22.8h, v22.8b, v6.8b\n"
+ "mov v26.16b, v27.16b\n"
+ "ldr s16, [%x[params]], #0x4\n"
+ "mov x20, %x[inptrs]\n"
+ "mov v25.16b, v27.16b\n"
+ "ldp x9, x28, [x20], #0x10\n"
+ "subs x19, %x[n_points], #0x1\n"
+ "mov v24.16b, v27.16b\n"
+ "ldr s4, [x9, x11]\n"
+ "mov v23.16b, v27.16b\n"
+ "mov v22.16b, v27.16b\n"
+ "ldr s3, [x28, x11]\n"
+ "mov v21.16b, v27.16b\n"
+ "ldp x27, x26, [x20], #0x10\n"
+ "mov v20.16b, v27.16b\n"
+ "ldr s2, [x27, x11]\n"
+ "mov v19.16b, v27.16b\n"
+ "ssubl v16.8h, v16.8b, v9.8b\n"
+ "ldr s1, [x26, x11]\n"
+ "usubl v4.8h, v4.8b, v10.8b\n"
+ "ldp x25, x24, [x20], #0x10\n"
+ "usubl v3.8h, v3.8b, v10.8b\n"
+ "ldr s0, [x25, x11]\n"
+ "usubl v2.8h, v2.8b, v10.8b\n"
+ "usubl v1.8h, v1.8b, v10.8b\n"
+ "ldr s31, [x24, x11]\n"
+ "ldp x23, x22, [x20], #0x10\n"
+ "usubl v0.8h, v0.8b, v10.8b\n"
+ "ldr s30, [x23, x11]\n"
+ "ldr s29, [x22, x11]\n"
+ "usubl v31.8h, v31.8b, v10.8b\n"
+ "ldr x21, [x20], #0x8\n"
+ "usubl v30.8h, v30.8b, v10.8b\n"
+ "ldr s28, [x21, x11]\n"
+ "usubl v29.8h, v29.8b, v10.8b\n"
+ "usubl v28.8h, v28.8b, v10.8b\n"
"ble 4f\n"
"3:" // Channel loop: Planar loop
- "ldp x10, x9, [x21], #0x10\n"
- "ldp x28, x27, [x21], #0x10\n"
- "smlal v23.4s, v14.4h, v0.4h\n"
- "smlal v24.4s, v15.4h, v0.4h\n"
- "ldr s14, [x10, x11]\n"
- "ldr s15, [x9, x11]\n"
- "smlal v25.4s, v16.4h, v0.4h\n"
- "smlal v26.4s, v17.4h, v0.4h\n"
- "ldr s16, [x28, x11]\n"
- "ldr s17, [x27, x11]\n"
- "smlal v27.4s, v18.4h, v0.4h\n"
- "smlal v28.4s, v19.4h, v0.4h\n"
- "ldp x26, x25, [x21], #0x10\n"
- "ldr s18, [x26, x11]\n"
- "smlal v29.4s, v20.4h, v0.4h\n"
- "smlal v30.4s, v21.4h, v0.4h\n"
- "ldr s19, [x25, x11]\n"
- "ldp x24, x23, [x21], #0x10\n"
- "smlal v31.4s, v22.4h, v0.4h\n"
- "subs x20, x20, #0x1\n"
- "ldr s0, [%x[params]], #0x4\n"
- "ldr s20, [x24, x11]\n"
- "ssubl v0.8h, v0.8b, v5.8b\n"
- "usubl v14.8h, v14.8b, v6.8b\n"
- "ldr s21, [x23, x11]\n"
- "ldr x22, [x21], #0x8\n"
- "usubl v15.8h, v15.8b, v6.8b\n"
- "usubl v16.8h, v16.8b, v6.8b\n"
- "ldr s22, [x22, x11]\n"
- "usubl v17.8h, v17.8b, v6.8b\n"
- "usubl v18.8h, v18.8b, v6.8b\n"
- "usubl v19.8h, v19.8b, v6.8b\n"
- "usubl v20.8h, v20.8b, v6.8b\n"
- "usubl v21.8h, v21.8b, v6.8b\n"
- "usubl v22.8h, v22.8b, v6.8b\n"
+ "smlal v27.4s, v4.4h, v16.4h\n"
+ "ldp x9, x28, [x20], #0x10\n"
+ "subs x19, x19, #0x1\n"
+ "smlal v26.4s, v3.4h, v16.4h\n"
+ "ldr s4, [x9, x11]\n"
+ "smlal v25.4s, v2.4h, v16.4h\n"
+ "smlal v24.4s, v1.4h, v16.4h\n"
+ "ldr s3, [x28, x11]\n"
+ "smlal v23.4s, v0.4h, v16.4h\n"
+ "ldp x27, x26, [x20], #0x10\n"
+ "smlal v22.4s, v31.4h, v16.4h\n"
+ "smlal v21.4s, v30.4h, v16.4h\n"
+ "ldr s2, [x27, x11]\n"
+ "smlal v20.4s, v29.4h, v16.4h\n"
+ "smlal v19.4s, v28.4h, v16.4h\n"
+ "ldr s16, [%x[params]], #0x4\n"
+ "usubl v4.8h, v4.8b, v10.8b\n"
+ "ldr s1, [x26, x11]\n"
+ "usubl v3.8h, v3.8b, v10.8b\n"
+ "ldp x25, x24, [x20], #0x10\n"
+ "usubl v2.8h, v2.8b, v10.8b\n"
+ "ldr s0, [x25, x11]\n"
+ "ssubl v16.8h, v16.8b, v9.8b\n"
+ "usubl v1.8h, v1.8b, v10.8b\n"
+ "ldr s31, [x24, x11]\n"
+ "ldp x23, x22, [x20], #0x10\n"
+ "usubl v0.8h, v0.8b, v10.8b\n"
+ "ldr s30, [x23, x11]\n"
+ "ldr s29, [x22, x11]\n"
+ "usubl v31.8h, v31.8b, v10.8b\n"
+ "ldr x21, [x20], #0x8\n"
+ "usubl v30.8h, v30.8b, v10.8b\n"
+ "ldr s28, [x21, x11]\n"
+ "usubl v29.8h, v29.8b, v10.8b\n"
+ "usubl v28.8h, v28.8b, v10.8b\n"
"bgt 3b\n"
"4:" // Channel loop: Planar tail
- "smlal v23.4s, v14.4h, v0.4h\n"
- "smlal v24.4s, v15.4h, v0.4h\n"
- "smlal v25.4s, v16.4h, v0.4h\n"
- "smlal v26.4s, v17.4h, v0.4h\n"
- "smlal v27.4s, v18.4h, v0.4h\n"
- "smlal v28.4s, v19.4h, v0.4h\n"
- "smlal v29.4s, v20.4h, v0.4h\n"
- "smlal v30.4s, v21.4h, v0.4h\n"
- "smlal v31.4s, v22.4h, v0.4h\n"
+ "smlal v27.4s, v4.4h, v16.4h\n"
+ "smlal v26.4s, v3.4h, v16.4h\n"
+ "smlal v25.4s, v2.4h, v16.4h\n"
+ "smlal v24.4s, v1.4h, v16.4h\n"
+ "smlal v23.4s, v0.4h, v16.4h\n"
+ "smlal v22.4s, v31.4h, v16.4h\n"
+ "smlal v21.4s, v30.4h, v16.4h\n"
+ "smlal v20.4s, v29.4h, v16.4h\n"
+ "smlal v19.4s, v28.4h, v16.4h\n"
"cbz %x[rq_mul_ptr], 5f\n"
- "lsl x20, x11, #0x2\n"
- "ldr q2, [%x[rq_mul_ptr], x20]\n"
- "ldr q1, [%x[rq_right_shift_ptr], x20]\n"
+ "lsl x19, x11, #0x2\n"
+ "ldr q6, [%x[rq_mul_ptr], x19]\n"
+ "ldr q5, [%x[rq_right_shift_ptr], x19]\n"
"cbz %x[rq_left_shift_ptr], 5f\n"
- "ldr q3, [%x[rq_left_shift_ptr], x20]\n"
+ "ldr q7, [%x[rq_left_shift_ptr], x19]\n"
"5:" // Channel loop: Load quantisation parameters: Done
- "sshl v23.4s, v23.4s, v3.4s\n"
- "sshl v24.4s, v24.4s, v3.4s\n"
- "ldp x28, x27, [%x[outptrs], #0x0]\n"
- "ldp x26, x25, [%x[outptrs], #0x10]\n"
- "sshl v25.4s, v25.4s, v3.4s\n"
- "sqrdmulh v23.4s, v23.4s, v2.4s\n"
- "ldp x24, x23, [%x[outptrs], #0x20]\n"
- "ldp x22, x21, [%x[outptrs], #0x30]\n"
- "sqrdmulh v24.4s, v24.4s, v2.4s\n"
- "sqrdmulh v25.4s, v25.4s, v2.4s\n"
- "ldr x20, [%x[outptrs], #0x40]\n"
- "and v21.16b, v23.16b, v1.16b\n"
- "and v20.16b, v24.16b, v1.16b\n"
- "and v19.16b, v25.16b, v1.16b\n"
- "sshl v26.4s, v26.4s, v3.4s\n"
- "sshl v27.4s, v27.4s, v3.4s\n"
- "sshl v28.4s, v28.4s, v3.4s\n"
- "sshl v29.4s, v29.4s, v3.4s\n"
- "sshl v30.4s, v30.4s, v3.4s\n"
- "sshl v31.4s, v31.4s, v3.4s\n"
- "sshr v21.4s, v21.4s, #0x1f\n"
- "sshr v20.4s, v20.4s, #0x1f\n"
- "sshr v19.4s, v19.4s, #0x1f\n"
- "sqrdmulh v26.4s, v26.4s, v2.4s\n"
- "sqrdmulh v27.4s, v27.4s, v2.4s\n"
- "sqrdmulh v28.4s, v28.4s, v2.4s\n"
- "sqrdmulh v29.4s, v29.4s, v2.4s\n"
- "sqrdmulh v30.4s, v30.4s, v2.4s\n"
- "sqrdmulh v31.4s, v31.4s, v2.4s\n"
- "sqadd v23.4s, v23.4s, v21.4s\n"
- "sqadd v24.4s, v24.4s, v20.4s\n"
- "sqadd v25.4s, v25.4s, v19.4s\n"
- "and v18.16b, v26.16b, v1.16b\n"
- "and v17.16b, v27.16b, v1.16b\n"
- "and v16.16b, v28.16b, v1.16b\n"
- "and v21.16b, v29.16b, v1.16b\n"
- "and v20.16b, v30.16b, v1.16b\n"
- "and v19.16b, v31.16b, v1.16b\n"
+ "sshl v27.4s, v27.4s, v7.4s\n"
+ "ldp x27, x26, [%x[outptrs], #0x0]\n"
+ "sshl v26.4s, v26.4s, v7.4s\n"
+ "ldp x25, x24, [%x[outptrs], #0x10]\n"
+ "sshl v25.4s, v25.4s, v7.4s\n"
+ "ldp x23, x22, [%x[outptrs], #0x20]\n"
+ "sqrdmulh v27.4s, v27.4s, v6.4s\n"
+ "ldp x21, x20, [%x[outptrs], #0x30]\n"
+ "sqrdmulh v26.4s, v26.4s, v6.4s\n"
+ "ldr x19, [%x[outptrs], #0x40]\n"
+ "sqrdmulh v25.4s, v25.4s, v6.4s\n"
+ "sshl v24.4s, v24.4s, v7.4s\n"
+ "and v16.16b, v27.16b, v5.16b\n"
+ "and v18.16b, v26.16b, v5.16b\n"
+ "and v17.16b, v25.16b, v5.16b\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
"sshr v18.4s, v18.4s, #0x1f\n"
"sshr v17.4s, v17.4s, #0x1f\n"
- "sshr v16.4s, v16.4s, #0x1f\n"
- "sshr v21.4s, v21.4s, #0x1f\n"
- "sshr v20.4s, v20.4s, #0x1f\n"
- "sshr v19.4s, v19.4s, #0x1f\n"
+ "sqadd v27.4s, v27.4s, v16.4s\n"
"sqadd v26.4s, v26.4s, v18.4s\n"
- "sqadd v27.4s, v27.4s, v17.4s\n"
- "sqadd v28.4s, v28.4s, v16.4s\n"
- "sqadd v29.4s, v29.4s, v21.4s\n"
- "sqadd v30.4s, v30.4s, v20.4s\n"
- "sqadd v31.4s, v31.4s, v19.4s\n"
- "srshl v23.4s, v23.4s, v1.4s\n"
- "srshl v24.4s, v24.4s, v1.4s\n"
- "srshl v25.4s, v25.4s, v1.4s\n"
- "srshl v26.4s, v26.4s, v1.4s\n"
- "srshl v27.4s, v27.4s, v1.4s\n"
- "srshl v28.4s, v28.4s, v1.4s\n"
- "srshl v29.4s, v29.4s, v1.4s\n"
- "srshl v30.4s, v30.4s, v1.4s\n"
- "srshl v31.4s, v31.4s, v1.4s\n"
- "add v23.4s, v23.4s, v4.4s\n"
- "add v24.4s, v24.4s, v4.4s\n"
- "add v25.4s, v25.4s, v4.4s\n"
- "add v26.4s, v26.4s, v4.4s\n"
- "add v27.4s, v27.4s, v4.4s\n"
- "add v28.4s, v28.4s, v4.4s\n"
- "add v29.4s, v29.4s, v4.4s\n"
- "add v30.4s, v30.4s, v4.4s\n"
- "add v31.4s, v31.4s, v4.4s\n"
- "smax v23.4s, v23.4s, v8.4s\n"
- "smax v24.4s, v24.4s, v8.4s\n"
- "smax v25.4s, v25.4s, v8.4s\n"
- "smax v26.4s, v26.4s, v8.4s\n"
- "smax v27.4s, v27.4s, v8.4s\n"
- "smax v28.4s, v28.4s, v8.4s\n"
- "smax v29.4s, v29.4s, v8.4s\n"
- "smax v30.4s, v30.4s, v8.4s\n"
- "smax v31.4s, v31.4s, v8.4s\n"
- "smin v23.4s, v23.4s, v7.4s\n"
- "smin v24.4s, v24.4s, v7.4s\n"
- "smin v25.4s, v25.4s, v7.4s\n"
- "smin v26.4s, v26.4s, v7.4s\n"
- "smin v27.4s, v27.4s, v7.4s\n"
- "smin v28.4s, v28.4s, v7.4s\n"
- "smin v29.4s, v29.4s, v7.4s\n"
- "smin v30.4s, v30.4s, v7.4s\n"
- "smin v31.4s, v31.4s, v7.4s\n"
- "uzp1 v23.16b, v23.16b, v23.16b\n"
- "uzp1 v24.16b, v24.16b, v24.16b\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
- "uzp1 v26.16b, v26.16b, v26.16b\n"
+ "sqadd v25.4s, v25.4s, v17.4s\n"
+ "sqrdmulh v24.4s, v24.4s, v6.4s\n"
+ "srshl v27.4s, v27.4s, v5.4s\n"
+ "srshl v26.4s, v26.4s, v5.4s\n"
+ "srshl v25.4s, v25.4s, v5.4s\n"
+ "and v16.16b, v24.16b, v5.16b\n"
+ "add v27.4s, v27.4s, v8.4s\n"
+ "add v26.4s, v26.4s, v8.4s\n"
+ "add v25.4s, v25.4s, v8.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "smax v27.4s, v27.4s, v12.4s\n"
+ "smax v26.4s, v26.4s, v12.4s\n"
+ "sqadd v24.4s, v24.4s, v16.4s\n"
+ "smin v27.4s, v27.4s, v11.4s\n"
+ "smin v26.4s, v26.4s, v11.4s\n"
+ "smax v25.4s, v25.4s, v12.4s\n"
+ "srshl v24.4s, v24.4s, v5.4s\n"
"uzp1 v27.16b, v27.16b, v27.16b\n"
- "uzp1 v28.16b, v28.16b, v28.16b\n"
- "uzp1 v29.16b, v29.16b, v29.16b\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
- "uzp1 v31.16b, v31.16b, v31.16b\n"
- "uzp1 v23.16b, v23.16b, v23.16b\n"
- "uzp1 v24.16b, v24.16b, v24.16b\n"
- "str s23, [x28, x11]\n"
+ "smin v25.4s, v25.4s, v11.4s\n"
+ "uzp1 v27.16b, v27.16b, v27.16b\n"
+ "str s27, [x27, x11]\n"
+ "add v24.4s, v24.4s, v8.4s\n"
+ "uzp1 v26.16b, v26.16b, v26.16b\n"
"uzp1 v25.16b, v25.16b, v25.16b\n"
"uzp1 v26.16b, v26.16b, v26.16b\n"
- "str s24, [x27, x11]\n"
- "uzp1 v27.16b, v27.16b, v27.16b\n"
- "uzp1 v28.16b, v28.16b, v28.16b\n"
- "str s25, [x26, x11]\n"
- "uzp1 v29.16b, v29.16b, v29.16b\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
- "str s26, [x25, x11]\n"
- "uzp1 v31.16b, v31.16b, v31.16b\n"
- "str s27, [x24, x11]\n"
- "str s28, [x23, x11]\n"
- "str s29, [x22, x11]\n"
- "str s30, [x21, x11]\n"
- "str s31, [x20, x11]\n"
+ "str s26, [x26, x11]\n"
+ "smax v24.4s, v24.4s, v12.4s\n"
+ "uzp1 v25.16b, v25.16b, v25.16b\n"
+ "str s25, [x25, x11]\n"
+ "sshl v23.4s, v23.4s, v7.4s\n"
+ "sshl v22.4s, v22.4s, v7.4s\n"
+ "smin v24.4s, v24.4s, v11.4s\n"
+ "sqrdmulh v23.4s, v23.4s, v6.4s\n"
+ "sqrdmulh v22.4s, v22.4s, v6.4s\n"
+ "uzp1 v24.16b, v24.16b, v24.16b\n"
+ "sshl v21.4s, v21.4s, v7.4s\n"
+ "and v17.16b, v23.16b, v5.16b\n"
+ "and v16.16b, v22.16b, v5.16b\n"
+ "sqrdmulh v21.4s, v21.4s, v6.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "uzp1 v24.16b, v24.16b, v24.16b\n"
+ "str s24, [x24, x11]\n"
+ "sqadd v23.4s, v23.4s, v17.4s\n"
+ "sqadd v22.4s, v22.4s, v16.4s\n"
+ "and v16.16b, v21.16b, v5.16b\n"
+ "sshl v20.4s, v20.4s, v7.4s\n"
+ "sshl v19.4s, v19.4s, v7.4s\n"
+ "srshl v23.4s, v23.4s, v5.4s\n"
+ "srshl v22.4s, v22.4s, v5.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sqrdmulh v20.4s, v20.4s, v6.4s\n"
+ "add v23.4s, v23.4s, v8.4s\n"
+ "add v22.4s, v22.4s, v8.4s\n"
+ "sqadd v21.4s, v21.4s, v16.4s\n"
+ "and v17.16b, v20.16b, v5.16b\n"
+ "sqrdmulh v19.4s, v19.4s, v6.4s\n"
+ "smax v23.4s, v23.4s, v12.4s\n"
+ "srshl v21.4s, v21.4s, v5.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "and v16.16b, v19.16b, v5.16b\n"
+ "smin v23.4s, v23.4s, v11.4s\n"
+ "add v21.4s, v21.4s, v8.4s\n"
+ "sqadd v20.4s, v20.4s, v17.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "smax v22.4s, v22.4s, v12.4s\n"
+ "smax v21.4s, v21.4s, v12.4s\n"
+ "srshl v20.4s, v20.4s, v5.4s\n"
+ "sqadd v19.4s, v19.4s, v16.4s\n"
+ "smin v22.4s, v22.4s, v11.4s\n"
+ "smin v21.4s, v21.4s, v11.4s\n"
+ "add v20.4s, v20.4s, v8.4s\n"
+ "srshl v19.4s, v19.4s, v5.4s\n"
+ "uzp1 v23.16b, v23.16b, v23.16b\n"
+ "smax v20.4s, v20.4s, v12.4s\n"
+ "uzp1 v23.16b, v23.16b, v23.16b\n"
+ "str s23, [x23, x11]\n"
+ "add v19.4s, v19.4s, v8.4s\n"
+ "smin v20.4s, v20.4s, v11.4s\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "uzp1 v21.16b, v21.16b, v21.16b\n"
+ "smax v19.4s, v19.4s, v12.4s\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "str s22, [x22, x11]\n"
+ "smin v19.4s, v19.4s, v11.4s\n"
+ "uzp1 v21.16b, v21.16b, v21.16b\n"
+ "str s21, [x21, x11]\n"
+ "uzp1 v20.16b, v20.16b, v20.16b\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ "uzp1 v20.16b, v20.16b, v20.16b\n"
+ "str s20, [x20, x11]\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ "str s19, [x19, x11]\n"
"add x11, x11, #0x4\n"
- "cmp x11, x12, LSL #2\n"
+ "cmp x11, x10, LSL #2\n"
"blt 1b\n"
"6:" // Oddments
"tst %x[n_channels], #0x3\n"
"beq 24f\n"
- "movi v23.4s, #0x0\n"
+ "movi v27.4s, #0x0\n"
"cbz %x[bias], 9f\n"
- "add x20, %x[bias], x11, LSL #2\n"
+ "add x19, %x[bias], x11, LSL #2\n"
"tbz %x[n_channels], #1, 7f\n"
- "ld1 { v23.d }[0], [x20], #0x8\n"
+ "ld1 { v27.d }[0], [x19], #0x8\n"
"tbz %x[n_channels], #0, 8f\n"
- "ld1 { v23.s }[2], [x20], #0x4\n"
+ "ld1 { v27.s }[2], [x19], #0x4\n"
"b 8f\n"
"7:" // Oddments: Load bias: Bit 1: Unset
- "ld1 { v23.s }[0], [x20], #0x4\n"
+ "tbz %x[n_channels], #0, 8f\n"
+ "ld1 { v27.s }[0], [x19], #0x4\n"
"8:" // Oddments: Load bias: Bit 1: End
+
"9:" // Oddments: Load bias: Done
- "ldr s0, [%x[params]], #0x4\n"
- "mov x21, %x[inptrs]\n"
- "ldp x10, x9, [x21], #0x10\n"
- "mov v24.16b, v23.16b\n"
- "ldp x28, x27, [x21], #0x10\n"
- "ldp x26, x25, [x21], #0x10\n"
- "mov v25.16b, v23.16b\n"
- "mov v26.16b, v23.16b\n"
- "ldp x24, x23, [x21], #0x10\n"
- "ldr x22, [x21], #0x8\n"
- "mov v27.16b, v23.16b\n"
- "mov v28.16b, v23.16b\n"
- "mov v29.16b, v23.16b\n"
- "mov v30.16b, v23.16b\n"
- "add x10, x10, x11\n"
+ "mov v26.16b, v27.16b\n"
+ "ldr s16, [%x[params]], #0x4\n"
+ "mov x20, %x[inptrs]\n"
+ "mov v25.16b, v27.16b\n"
+ "ldp x9, x28, [x20], #0x10\n"
"add x9, x9, x11\n"
- "mov v31.16b, v23.16b\n"
- "ssubl v0.8h, v0.8b, v5.8b\n"
+ "mov v24.16b, v27.16b\n"
+ "ldp x27, x26, [x20], #0x10\n"
+ "mov v23.16b, v27.16b\n"
+ "ldp x25, x24, [x20], #0x10\n"
+ "mov v22.16b, v27.16b\n"
"add x28, x28, x11\n"
+ "mov v21.16b, v27.16b\n"
+ "ldp x23, x22, [x20], #0x10\n"
+ "mov v20.16b, v27.16b\n"
"add x27, x27, x11\n"
+ "mov v19.16b, v27.16b\n"
+ "ldr x21, [x20], #0x8\n"
+ "ssubl v16.8h, v16.8b, v9.8b\n"
"add x26, x26, x11\n"
"add x25, x25, x11\n"
"add x24, x24, x11\n"
"add x23, x23, x11\n"
"add x22, x22, x11\n"
+ "add x21, x21, x11\n"
"tbz %x[n_channels], #1, 10f\n"
- "ldr h14, [x10], #0x2\n"
- "ldr h15, [x9], #0x2\n"
- "ldr h16, [x28], #0x2\n"
- "ldr h17, [x27], #0x2\n"
- "ldr h18, [x26], #0x2\n"
- "ldr h19, [x25], #0x2\n"
- "ldr h20, [x24], #0x2\n"
- "ldr h21, [x23], #0x2\n"
- "ldr h22, [x22], #0x2\n"
+ "ldr h4, [x9], #0x2\n"
+ "ldr h3, [x28], #0x2\n"
+ "ldr h2, [x27], #0x2\n"
+ "ldr h1, [x26], #0x2\n"
+ "ldr h0, [x25], #0x2\n"
+ "ldr h31, [x24], #0x2\n"
+ "ldr h30, [x23], #0x2\n"
+ "ldr h29, [x22], #0x2\n"
+ "ldr h28, [x21], #0x2\n"
"tbz %x[n_channels], #0, 11f\n"
- "ld1 { v14.b }[2], [x10], #0x1\n"
- "ld1 { v15.b }[2], [x9], #0x1\n"
- "ld1 { v16.b }[2], [x28], #0x1\n"
- "ld1 { v17.b }[2], [x27], #0x1\n"
- "ld1 { v18.b }[2], [x26], #0x1\n"
- "ld1 { v19.b }[2], [x25], #0x1\n"
- "ld1 { v20.b }[2], [x24], #0x1\n"
- "ld1 { v21.b }[2], [x23], #0x1\n"
- "ld1 { v22.b }[2], [x22], #0x1\n"
+ "ld1 { v4.b }[2], [x9], #0x1\n"
+ "ld1 { v3.b }[2], [x28], #0x1\n"
+ "ld1 { v2.b }[2], [x27], #0x1\n"
+ "ld1 { v1.b }[2], [x26], #0x1\n"
+ "ld1 { v0.b }[2], [x25], #0x1\n"
+ "ld1 { v31.b }[2], [x24], #0x1\n"
+ "ld1 { v30.b }[2], [x23], #0x1\n"
+ "ld1 { v29.b }[2], [x22], #0x1\n"
+ "ld1 { v28.b }[2], [x21], #0x1\n"
"b 11f\n"
"10:" // Oddments: Load: Bit 1: Unset
- "ldr b14, [x10], #0x1\n"
- "ldr b15, [x9], #0x1\n"
- "ldr b16, [x28], #0x1\n"
- "ldr b17, [x27], #0x1\n"
- "ldr b18, [x26], #0x1\n"
- "ldr b19, [x25], #0x1\n"
- "ldr b20, [x24], #0x1\n"
- "ldr b21, [x23], #0x1\n"
- "ldr b22, [x22], #0x1\n"
+ "tbz %x[n_channels], #0, 11f\n"
+ "ldr b4, [x9], #0x1\n"
+ "ldr b3, [x28], #0x1\n"
+ "ldr b2, [x27], #0x1\n"
+ "ldr b1, [x26], #0x1\n"
+ "ldr b0, [x25], #0x1\n"
+ "ldr b31, [x24], #0x1\n"
+ "ldr b30, [x23], #0x1\n"
+ "ldr b29, [x22], #0x1\n"
+ "ldr b28, [x21], #0x1\n"
"11:" // Oddments: Load: Bit 1: End
- "subs x20, %x[n_points], #0x1\n"
- "usubl v14.8h, v14.8b, v6.8b\n"
- "usubl v15.8h, v15.8b, v6.8b\n"
- "usubl v16.8h, v16.8b, v6.8b\n"
- "usubl v17.8h, v17.8b, v6.8b\n"
- "usubl v18.8h, v18.8b, v6.8b\n"
- "usubl v19.8h, v19.8b, v6.8b\n"
- "usubl v20.8h, v20.8b, v6.8b\n"
- "usubl v21.8h, v21.8b, v6.8b\n"
- "usubl v22.8h, v22.8b, v6.8b\n"
+ "usubl v4.8h, v4.8b, v10.8b\n"
+ "subs x19, %x[n_points], #0x1\n"
+ "usubl v3.8h, v3.8b, v10.8b\n"
+ "usubl v2.8h, v2.8b, v10.8b\n"
+ "usubl v1.8h, v1.8b, v10.8b\n"
+ "usubl v0.8h, v0.8b, v10.8b\n"
+ "usubl v31.8h, v31.8b, v10.8b\n"
+ "usubl v30.8h, v30.8b, v10.8b\n"
+ "usubl v29.8h, v29.8b, v10.8b\n"
+ "usubl v28.8h, v28.8b, v10.8b\n"
"ble 15f\n"
"12:" // Oddments: Planar loop
- "ldp x10, x9, [x21], #0x10\n"
- "ldp x28, x27, [x21], #0x10\n"
- "smlal v23.4s, v14.4h, v0.4h\n"
- "smlal v24.4s, v15.4h, v0.4h\n"
- "ldp x26, x25, [x21], #0x10\n"
- "ldp x24, x23, [x21], #0x10\n"
- "smlal v25.4s, v16.4h, v0.4h\n"
- "smlal v26.4s, v17.4h, v0.4h\n"
- "smlal v27.4s, v18.4h, v0.4h\n"
- "smlal v28.4s, v19.4h, v0.4h\n"
- "ldr x22, [x21], #0x8\n"
- "add x10, x10, x11\n"
- "smlal v29.4s, v20.4h, v0.4h\n"
- "smlal v30.4s, v21.4h, v0.4h\n"
+ "smlal v27.4s, v4.4h, v16.4h\n"
+ "ldp x9, x28, [x20], #0x10\n"
"add x9, x9, x11\n"
+ "smlal v26.4s, v3.4h, v16.4h\n"
+ "ldp x27, x26, [x20], #0x10\n"
+ "smlal v25.4s, v2.4h, v16.4h\n"
+ "ldp x25, x24, [x20], #0x10\n"
+ "smlal v24.4s, v1.4h, v16.4h\n"
"add x28, x28, x11\n"
- "smlal v31.4s, v22.4h, v0.4h\n"
- "ldr s0, [%x[params]], #0x4\n"
- "ssubl v0.8h, v0.8b, v5.8b\n"
+ "smlal v23.4s, v0.4h, v16.4h\n"
+ "ldp x23, x22, [x20], #0x10\n"
+ "smlal v22.4s, v31.4h, v16.4h\n"
"add x27, x27, x11\n"
+ "smlal v21.4s, v30.4h, v16.4h\n"
+ "ldr x21, [x20], #0x8\n"
+ "smlal v20.4s, v29.4h, v16.4h\n"
"add x26, x26, x11\n"
+ "smlal v19.4s, v28.4h, v16.4h\n"
+ "ldr s16, [%x[params]], #0x4\n"
"add x25, x25, x11\n"
+ "ssubl v16.8h, v16.8b, v9.8b\n"
"add x24, x24, x11\n"
"add x23, x23, x11\n"
"add x22, x22, x11\n"
+ "add x21, x21, x11\n"
"tbz %x[n_channels], #1, 13f\n"
- "ldr h14, [x10], #0x2\n"
- "ldr h15, [x9], #0x2\n"
- "ldr h16, [x28], #0x2\n"
- "ldr h17, [x27], #0x2\n"
- "ldr h18, [x26], #0x2\n"
- "ldr h19, [x25], #0x2\n"
- "ldr h20, [x24], #0x2\n"
- "ldr h21, [x23], #0x2\n"
- "ldr h22, [x22], #0x2\n"
+ "ldr h4, [x9], #0x2\n"
+ "ldr h3, [x28], #0x2\n"
+ "ldr h2, [x27], #0x2\n"
+ "ldr h1, [x26], #0x2\n"
+ "ldr h0, [x25], #0x2\n"
+ "ldr h31, [x24], #0x2\n"
+ "ldr h30, [x23], #0x2\n"
+ "ldr h29, [x22], #0x2\n"
+ "ldr h28, [x21], #0x2\n"
"tbz %x[n_channels], #0, 14f\n"
- "ld1 { v14.b }[2], [x10], #0x1\n"
- "ld1 { v15.b }[2], [x9], #0x1\n"
- "ld1 { v16.b }[2], [x28], #0x1\n"
- "ld1 { v17.b }[2], [x27], #0x1\n"
- "ld1 { v18.b }[2], [x26], #0x1\n"
- "ld1 { v19.b }[2], [x25], #0x1\n"
- "ld1 { v20.b }[2], [x24], #0x1\n"
- "ld1 { v21.b }[2], [x23], #0x1\n"
- "ld1 { v22.b }[2], [x22], #0x1\n"
+ "ld1 { v4.b }[2], [x9], #0x1\n"
+ "ld1 { v3.b }[2], [x28], #0x1\n"
+ "ld1 { v2.b }[2], [x27], #0x1\n"
+ "ld1 { v1.b }[2], [x26], #0x1\n"
+ "ld1 { v0.b }[2], [x25], #0x1\n"
+ "ld1 { v31.b }[2], [x24], #0x1\n"
+ "ld1 { v30.b }[2], [x23], #0x1\n"
+ "ld1 { v29.b }[2], [x22], #0x1\n"
+ "ld1 { v28.b }[2], [x21], #0x1\n"
"b 14f\n"
"13:" // Oddments: Planar loop: Load: Bit 1: Unset
- "ldr b14, [x10], #0x1\n"
- "ldr b15, [x9], #0x1\n"
- "ldr b16, [x28], #0x1\n"
- "ldr b17, [x27], #0x1\n"
- "ldr b18, [x26], #0x1\n"
- "ldr b19, [x25], #0x1\n"
- "ldr b20, [x24], #0x1\n"
- "ldr b21, [x23], #0x1\n"
- "ldr b22, [x22], #0x1\n"
+ "tbz %x[n_channels], #0, 14f\n"
+ "ldr b4, [x9], #0x1\n"
+ "ldr b3, [x28], #0x1\n"
+ "ldr b2, [x27], #0x1\n"
+ "ldr b1, [x26], #0x1\n"
+ "ldr b0, [x25], #0x1\n"
+ "ldr b31, [x24], #0x1\n"
+ "ldr b30, [x23], #0x1\n"
+ "ldr b29, [x22], #0x1\n"
+ "ldr b28, [x21], #0x1\n"
"14:" // Oddments: Planar loop: Load: Bit 1: End
- "subs x20, x20, #0x1\n"
- "usubl v14.8h, v14.8b, v6.8b\n"
- "usubl v15.8h, v15.8b, v6.8b\n"
- "usubl v16.8h, v16.8b, v6.8b\n"
- "usubl v17.8h, v17.8b, v6.8b\n"
- "usubl v18.8h, v18.8b, v6.8b\n"
- "usubl v19.8h, v19.8b, v6.8b\n"
- "usubl v20.8h, v20.8b, v6.8b\n"
- "usubl v21.8h, v21.8b, v6.8b\n"
- "usubl v22.8h, v22.8b, v6.8b\n"
+ "usubl v4.8h, v4.8b, v10.8b\n"
+ "subs x19, x19, #0x1\n"
+ "usubl v3.8h, v3.8b, v10.8b\n"
+ "usubl v2.8h, v2.8b, v10.8b\n"
+ "usubl v1.8h, v1.8b, v10.8b\n"
+ "usubl v0.8h, v0.8b, v10.8b\n"
+ "usubl v31.8h, v31.8b, v10.8b\n"
+ "usubl v30.8h, v30.8b, v10.8b\n"
+ "usubl v29.8h, v29.8b, v10.8b\n"
+ "usubl v28.8h, v28.8b, v10.8b\n"
"bgt 12b\n"
"15:" // Oddments: Planar tail
- "smlal v23.4s, v14.4h, v0.4h\n"
- "smlal v24.4s, v15.4h, v0.4h\n"
- "smlal v25.4s, v16.4h, v0.4h\n"
- "smlal v26.4s, v17.4h, v0.4h\n"
- "smlal v27.4s, v18.4h, v0.4h\n"
- "smlal v28.4s, v19.4h, v0.4h\n"
- "smlal v29.4s, v20.4h, v0.4h\n"
- "smlal v30.4s, v21.4h, v0.4h\n"
- "smlal v31.4s, v22.4h, v0.4h\n"
+ "smlal v27.4s, v4.4h, v16.4h\n"
+ "smlal v26.4s, v3.4h, v16.4h\n"
+ "smlal v25.4s, v2.4h, v16.4h\n"
+ "smlal v24.4s, v1.4h, v16.4h\n"
+ "smlal v23.4s, v0.4h, v16.4h\n"
+ "smlal v22.4s, v31.4h, v16.4h\n"
+ "smlal v21.4s, v30.4h, v16.4h\n"
+ "smlal v20.4s, v29.4h, v16.4h\n"
+ "smlal v19.4s, v28.4h, v16.4h\n"
"cbz %x[rq_mul_ptr], 21f\n"
- "add x22, %x[rq_mul_ptr], x11, LSL #2\n"
- "add x21, %x[rq_right_shift_ptr], x11, LSL #2\n"
- "add x20, %x[rq_left_shift_ptr], x11, LSL #2\n"
+ "add x21, %x[rq_mul_ptr], x11, LSL #2\n"
+ "add x20, %x[rq_right_shift_ptr], x11, LSL #2\n"
+ "add x19, %x[rq_left_shift_ptr], x11, LSL #2\n"
"tbz %x[n_channels], #1, 18f\n"
- "ld1 { v2.d }[0], [x22], #0x8\n"
- "ld1 { v1.d }[0], [x21], #0x8\n"
+ "ld1 { v6.d }[0], [x21], #0x8\n"
+ "ld1 { v5.d }[0], [x20], #0x8\n"
"cbz %x[rq_left_shift_ptr], 16f\n"
- "ld1 { v3.d }[0], [x20], #0x8\n"
+ "ld1 { v7.d }[0], [x19], #0x8\n"
"16:" // Oddments: Load quantisation parameters: Bit 1: Load left shift: Done
"tbz %x[n_channels], #0, 20f\n"
- "ld1 { v2.s }[2], [x22], #0x4\n"
- "ld1 { v1.s }[2], [x21], #0x4\n"
+ "ld1 { v6.s }[2], [x21], #0x4\n"
+ "ld1 { v5.s }[2], [x20], #0x4\n"
"cbz %x[rq_left_shift_ptr], 17f\n"
- "ld1 { v3.s }[2], [x20], #0x4\n"
+ "ld1 { v7.s }[2], [x19], #0x4\n"
"17:" // Oddments: Load quantisation parameters: Bit 1: Bit 0: Load left shift: Done
"b 20f\n"
"18:" // Oddments: Load quantisation parameters: Bit 1: Unset
- "ld1 { v2.s }[0], [x22], #0x4\n"
- "ld1 { v1.s }[0], [x21], #0x4\n"
+ "tbz %x[n_channels], #0, 20f\n"
+ "ld1 { v6.s }[0], [x21], #0x4\n"
+ "ld1 { v5.s }[0], [x20], #0x4\n"
"cbz %x[rq_left_shift_ptr], 19f\n"
- "ld1 { v3.s }[0], [x20], #0x4\n"
+ "ld1 { v7.s }[0], [x19], #0x4\n"
"19:" // Oddments: Load quantisation parameters: Bit 1: Unset: Bit 0: Load left shift: Done
"20:" // Oddments: Load quantisation parameters: Bit 1: End
"21:" // Oddments: Load quantisation parameters: Done
- "sshl v23.4s, v23.4s, v3.4s\n"
- "sshl v24.4s, v24.4s, v3.4s\n"
- "ldp x28, x27, [%x[outptrs], #0x0]\n"
- "ldp x26, x25, [%x[outptrs], #0x10]\n"
- "sshl v25.4s, v25.4s, v3.4s\n"
- "sqrdmulh v23.4s, v23.4s, v2.4s\n"
- "ldp x24, x23, [%x[outptrs], #0x20]\n"
- "ldp x22, x21, [%x[outptrs], #0x30]\n"
- "sqrdmulh v24.4s, v24.4s, v2.4s\n"
- "sqrdmulh v25.4s, v25.4s, v2.4s\n"
- "ldr x20, [%x[outptrs], #0x40]\n"
- "add x28, x28, x11\n"
- "and v21.16b, v23.16b, v1.16b\n"
- "and v20.16b, v24.16b, v1.16b\n"
+ "sshl v27.4s, v27.4s, v7.4s\n"
+ "ldp x27, x26, [%x[outptrs], #0x0]\n"
"add x27, x27, x11\n"
+ "sqrdmulh v27.4s, v27.4s, v6.4s\n"
+ "ldp x25, x24, [%x[outptrs], #0x10]\n"
+ "sshl v26.4s, v26.4s, v7.4s\n"
+ "ldp x23, x22, [%x[outptrs], #0x20]\n"
"add x26, x26, x11\n"
- "and v19.16b, v25.16b, v1.16b\n"
- "sshl v26.4s, v26.4s, v3.4s\n"
+ "sshl v25.4s, v25.4s, v7.4s\n"
+ "ldp x21, x20, [%x[outptrs], #0x30]\n"
+ "sshl v24.4s, v24.4s, v7.4s\n"
+ "ldr x19, [%x[outptrs], #0x40]\n"
"add x25, x25, x11\n"
+ "and v16.16b, v27.16b, v5.16b\n"
"add x24, x24, x11\n"
- "sshl v27.4s, v27.4s, v3.4s\n"
- "sshl v28.4s, v28.4s, v3.4s\n"
+ "sqrdmulh v26.4s, v26.4s, v6.4s\n"
"add x23, x23, x11\n"
+ "sqrdmulh v25.4s, v25.4s, v6.4s\n"
"add x22, x22, x11\n"
- "sshl v29.4s, v29.4s, v3.4s\n"
- "sshl v30.4s, v30.4s, v3.4s\n"
+ "sqrdmulh v24.4s, v24.4s, v6.4s\n"
"add x21, x21, x11\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
"add x20, x20, x11\n"
- "sshl v31.4s, v31.4s, v3.4s\n"
- "sshr v21.4s, v21.4s, #0x1f\n"
- "sshr v20.4s, v20.4s, #0x1f\n"
- "sshr v19.4s, v19.4s, #0x1f\n"
- "sqrdmulh v26.4s, v26.4s, v2.4s\n"
- "sqrdmulh v27.4s, v27.4s, v2.4s\n"
- "sqrdmulh v28.4s, v28.4s, v2.4s\n"
- "sqrdmulh v29.4s, v29.4s, v2.4s\n"
- "sqrdmulh v30.4s, v30.4s, v2.4s\n"
- "sqrdmulh v31.4s, v31.4s, v2.4s\n"
- "sqadd v23.4s, v23.4s, v21.4s\n"
- "sqadd v24.4s, v24.4s, v20.4s\n"
- "sqadd v25.4s, v25.4s, v19.4s\n"
- "and v18.16b, v26.16b, v1.16b\n"
- "and v17.16b, v27.16b, v1.16b\n"
- "and v16.16b, v28.16b, v1.16b\n"
- "and v21.16b, v29.16b, v1.16b\n"
- "and v20.16b, v30.16b, v1.16b\n"
- "and v19.16b, v31.16b, v1.16b\n"
+ "and v18.16b, v26.16b, v5.16b\n"
+ "add x19, x19, x11\n"
+ "and v17.16b, v25.16b, v5.16b\n"
+ "sqadd v27.4s, v27.4s, v16.4s\n"
"sshr v18.4s, v18.4s, #0x1f\n"
"sshr v17.4s, v17.4s, #0x1f\n"
- "sshr v16.4s, v16.4s, #0x1f\n"
- "sshr v21.4s, v21.4s, #0x1f\n"
- "sshr v20.4s, v20.4s, #0x1f\n"
- "sshr v19.4s, v19.4s, #0x1f\n"
+ "and v16.16b, v24.16b, v5.16b\n"
+ "srshl v27.4s, v27.4s, v5.4s\n"
"sqadd v26.4s, v26.4s, v18.4s\n"
- "sqadd v27.4s, v27.4s, v17.4s\n"
- "sqadd v28.4s, v28.4s, v16.4s\n"
- "sqadd v29.4s, v29.4s, v21.4s\n"
- "sqadd v30.4s, v30.4s, v20.4s\n"
- "sqadd v31.4s, v31.4s, v19.4s\n"
- "srshl v23.4s, v23.4s, v1.4s\n"
- "srshl v24.4s, v24.4s, v1.4s\n"
- "srshl v25.4s, v25.4s, v1.4s\n"
- "srshl v26.4s, v26.4s, v1.4s\n"
- "srshl v27.4s, v27.4s, v1.4s\n"
- "srshl v28.4s, v28.4s, v1.4s\n"
- "srshl v29.4s, v29.4s, v1.4s\n"
- "srshl v30.4s, v30.4s, v1.4s\n"
- "srshl v31.4s, v31.4s, v1.4s\n"
- "add v23.4s, v23.4s, v4.4s\n"
- "add v24.4s, v24.4s, v4.4s\n"
- "add v25.4s, v25.4s, v4.4s\n"
- "add v26.4s, v26.4s, v4.4s\n"
- "add v27.4s, v27.4s, v4.4s\n"
- "add v28.4s, v28.4s, v4.4s\n"
- "add v29.4s, v29.4s, v4.4s\n"
- "add v30.4s, v30.4s, v4.4s\n"
- "add v31.4s, v31.4s, v4.4s\n"
- "smax v23.4s, v23.4s, v8.4s\n"
- "smax v24.4s, v24.4s, v8.4s\n"
- "smax v25.4s, v25.4s, v8.4s\n"
- "smax v26.4s, v26.4s, v8.4s\n"
- "smax v27.4s, v27.4s, v8.4s\n"
- "smax v28.4s, v28.4s, v8.4s\n"
- "smax v29.4s, v29.4s, v8.4s\n"
- "smax v30.4s, v30.4s, v8.4s\n"
- "smax v31.4s, v31.4s, v8.4s\n"
- "smin v23.4s, v23.4s, v7.4s\n"
- "smin v24.4s, v24.4s, v7.4s\n"
- "smin v25.4s, v25.4s, v7.4s\n"
- "smin v26.4s, v26.4s, v7.4s\n"
- "smin v27.4s, v27.4s, v7.4s\n"
- "smin v28.4s, v28.4s, v7.4s\n"
- "smin v29.4s, v29.4s, v7.4s\n"
- "smin v30.4s, v30.4s, v7.4s\n"
- "smin v31.4s, v31.4s, v7.4s\n"
- "uzp1 v23.16b, v23.16b, v23.16b\n"
- "uzp1 v24.16b, v24.16b, v24.16b\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
+ "sqadd v25.4s, v25.4s, v17.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "add v27.4s, v27.4s, v8.4s\n"
+ "srshl v26.4s, v26.4s, v5.4s\n"
+ "srshl v25.4s, v25.4s, v5.4s\n"
+ "sqadd v24.4s, v24.4s, v16.4s\n"
+ "smax v27.4s, v27.4s, v12.4s\n"
+ "add v26.4s, v26.4s, v8.4s\n"
+ "add v25.4s, v25.4s, v8.4s\n"
+ "srshl v24.4s, v24.4s, v5.4s\n"
+ "smin v27.4s, v27.4s, v11.4s\n"
+ "smax v26.4s, v26.4s, v12.4s\n"
+ "smax v25.4s, v25.4s, v12.4s\n"
+ "add v24.4s, v24.4s, v8.4s\n"
+ "smin v26.4s, v26.4s, v11.4s\n"
+ "smin v25.4s, v25.4s, v11.4s\n"
+ "smax v24.4s, v24.4s, v12.4s\n"
+ "uzp1 v27.16b, v27.16b, v27.16b\n"
"uzp1 v26.16b, v26.16b, v26.16b\n"
+ "smin v24.4s, v24.4s, v11.4s\n"
"uzp1 v27.16b, v27.16b, v27.16b\n"
- "uzp1 v28.16b, v28.16b, v28.16b\n"
- "uzp1 v29.16b, v29.16b, v29.16b\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
- "uzp1 v31.16b, v31.16b, v31.16b\n"
- "uzp1 v23.16b, v23.16b, v23.16b\n"
+ "uzp1 v26.16b, v26.16b, v26.16b\n"
+ "uzp1 v25.16b, v25.16b, v25.16b\n"
"uzp1 v24.16b, v24.16b, v24.16b\n"
"uzp1 v25.16b, v25.16b, v25.16b\n"
- "uzp1 v26.16b, v26.16b, v26.16b\n"
- "uzp1 v27.16b, v27.16b, v27.16b\n"
- "uzp1 v28.16b, v28.16b, v28.16b\n"
- "uzp1 v29.16b, v29.16b, v29.16b\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
- "uzp1 v31.16b, v31.16b, v31.16b\n"
+ "uzp1 v24.16b, v24.16b, v24.16b\n"
+ "sshl v23.4s, v23.4s, v7.4s\n"
+ "sshl v22.4s, v22.4s, v7.4s\n"
+ "sqrdmulh v23.4s, v23.4s, v6.4s\n"
+ "sqrdmulh v22.4s, v22.4s, v6.4s\n"
+ "sshl v21.4s, v21.4s, v7.4s\n"
+ "sshl v20.4s, v20.4s, v7.4s\n"
+ "and v17.16b, v23.16b, v5.16b\n"
+ "and v16.16b, v22.16b, v5.16b\n"
+ "sqrdmulh v21.4s, v21.4s, v6.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sqrdmulh v20.4s, v20.4s, v6.4s\n"
+ "sqadd v23.4s, v23.4s, v17.4s\n"
+ "sqadd v22.4s, v22.4s, v16.4s\n"
+ "and v16.16b, v21.16b, v5.16b\n"
+ "and v17.16b, v20.16b, v5.16b\n"
+ "srshl v23.4s, v23.4s, v5.4s\n"
+ "srshl v22.4s, v22.4s, v5.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "add v23.4s, v23.4s, v8.4s\n"
+ "add v22.4s, v22.4s, v8.4s\n"
+ "sqadd v21.4s, v21.4s, v16.4s\n"
+ "sqadd v20.4s, v20.4s, v17.4s\n"
+ "smax v23.4s, v23.4s, v12.4s\n"
+ "smax v22.4s, v22.4s, v12.4s\n"
+ "srshl v21.4s, v21.4s, v5.4s\n"
+ "srshl v20.4s, v20.4s, v5.4s\n"
+ "smin v23.4s, v23.4s, v11.4s\n"
+ "smin v22.4s, v22.4s, v11.4s\n"
+ "add v21.4s, v21.4s, v8.4s\n"
+ "add v20.4s, v20.4s, v8.4s\n"
+ "uzp1 v23.16b, v23.16b, v23.16b\n"
+ "smax v21.4s, v21.4s, v12.4s\n"
+ "smax v20.4s, v20.4s, v12.4s\n"
+ "uzp1 v23.16b, v23.16b, v23.16b\n"
+ "smin v21.4s, v21.4s, v11.4s\n"
+ "smin v20.4s, v20.4s, v11.4s\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "uzp1 v21.16b, v21.16b, v21.16b\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "uzp1 v21.16b, v21.16b, v21.16b\n"
+ "uzp1 v20.16b, v20.16b, v20.16b\n"
+ "sshl v19.4s, v19.4s, v7.4s\n"
+ "uzp1 v20.16b, v20.16b, v20.16b\n"
+ "sqrdmulh v19.4s, v19.4s, v6.4s\n"
+ "and v16.16b, v19.16b, v5.16b\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sqadd v19.4s, v19.4s, v16.4s\n"
+ "srshl v19.4s, v19.4s, v5.4s\n"
+ "add v19.4s, v19.4s, v8.4s\n"
+ "smax v19.4s, v19.4s, v12.4s\n"
+ "smin v19.4s, v19.4s, v11.4s\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
"tbz %x[n_channels], #1, 22f\n"
- "st1 { v23.h }[0], [x28], #0x2\n"
- "st1 { v24.h }[0], [x27], #0x2\n"
- "st1 { v25.h }[0], [x26], #0x2\n"
- "st1 { v26.h }[0], [x25], #0x2\n"
- "st1 { v27.h }[0], [x24], #0x2\n"
- "st1 { v28.h }[0], [x23], #0x2\n"
- "st1 { v29.h }[0], [x22], #0x2\n"
- "st1 { v30.h }[0], [x21], #0x2\n"
- "st1 { v31.h }[0], [x20], #0x2\n"
+ "st1 { v27.h }[0], [x27], #0x2\n"
+ "st1 { v26.h }[0], [x26], #0x2\n"
+ "st1 { v25.h }[0], [x25], #0x2\n"
+ "st1 { v24.h }[0], [x24], #0x2\n"
+ "st1 { v23.h }[0], [x23], #0x2\n"
+ "st1 { v22.h }[0], [x22], #0x2\n"
+ "st1 { v21.h }[0], [x21], #0x2\n"
+ "st1 { v20.h }[0], [x20], #0x2\n"
+ "st1 { v19.h }[0], [x19], #0x2\n"
"tbz %x[n_channels], #0, 23f\n"
- "st1 { v23.b }[2], [x28], #0x1\n"
- "st1 { v24.b }[2], [x27], #0x1\n"
- "st1 { v25.b }[2], [x26], #0x1\n"
- "st1 { v26.b }[2], [x25], #0x1\n"
- "st1 { v27.b }[2], [x24], #0x1\n"
- "st1 { v28.b }[2], [x23], #0x1\n"
- "st1 { v29.b }[2], [x22], #0x1\n"
- "st1 { v30.b }[2], [x21], #0x1\n"
- "st1 { v31.b }[2], [x20], #0x1\n"
+ "st1 { v27.b }[2], [x27], #0x1\n"
+ "st1 { v26.b }[2], [x26], #0x1\n"
+ "st1 { v25.b }[2], [x25], #0x1\n"
+ "st1 { v24.b }[2], [x24], #0x1\n"
+ "st1 { v23.b }[2], [x23], #0x1\n"
+ "st1 { v22.b }[2], [x22], #0x1\n"
+ "st1 { v21.b }[2], [x21], #0x1\n"
+ "st1 { v20.b }[2], [x20], #0x1\n"
+ "st1 { v19.b }[2], [x19], #0x1\n"
"b 23f\n"
"22:" // Oddments: Store: Bit 1: Unset
- "st1 { v23.b }[0], [x28], #0x1\n"
- "st1 { v24.b }[0], [x27], #0x1\n"
- "st1 { v25.b }[0], [x26], #0x1\n"
- "st1 { v26.b }[0], [x25], #0x1\n"
- "st1 { v27.b }[0], [x24], #0x1\n"
- "st1 { v28.b }[0], [x23], #0x1\n"
- "st1 { v29.b }[0], [x22], #0x1\n"
- "st1 { v30.b }[0], [x21], #0x1\n"
- "st1 { v31.b }[0], [x20], #0x1\n"
+ "tbz %x[n_channels], #0, 23f\n"
+ "st1 { v27.b }[0], [x27], #0x1\n"
+ "st1 { v26.b }[0], [x26], #0x1\n"
+ "st1 { v25.b }[0], [x25], #0x1\n"
+ "st1 { v24.b }[0], [x24], #0x1\n"
+ "st1 { v23.b }[0], [x23], #0x1\n"
+ "st1 { v22.b }[0], [x22], #0x1\n"
+ "st1 { v21.b }[0], [x21], #0x1\n"
+ "st1 { v20.b }[0], [x20], #0x1\n"
+ "st1 { v19.b }[0], [x19], #0x1\n"
"23:" // Oddments: Store: Bit 1: End
"24:" // End
: [params] "+&r" (params)
: [bias] "r" (qp.bias), [inptrs] "r" (inptrs), [n_channels] "r" ((uint64_t) n_channels), [n_points] "r" ((uint64_t) n_points), [offsetof_Requantize32_a_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [offsetof_Requantize32_per_layer_left_shift] "I" (offsetof(arm_gemm::Requantize32, per_layer_left_shift)), [offsetof_Requantize32_per_layer_mul] "I" (offsetof(arm_gemm::Requantize32, per_layer_mul)), [offsetof_Requantize32_per_layer_right_shift] "I" (offsetof(arm_gemm::Requantize32, per_layer_right_shift)), [outptrs] "r" (outptrs), [qp] "r" (&qp), [rq_left_shift_ptr] "r" (qp.per_channel_left_shifts), [rq_mul_ptr] "r" (qp.per_channel_muls), [rq_right_shift_ptr] "r" (qp.per_channel_right_shifts)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8s8u8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8s8u8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst/generic.cpp
index 976434aa28..09b274056f 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8s8u8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8s8u8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -45,1433 +45,1439 @@ void a64_u8s8u8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst
)
{
__asm__ __volatile__(
- "lsr x10, %x[n_output_channels], #0x2\n"
- "add x20, %x[qp], %[offsetof_Requantize32_minval]\n"
- "ld1r { v13.4s }, [x20]\n"
- "add x20, %x[qp], %[offsetof_Requantize32_maxval]\n"
- "ld1r { v11.4s }, [x20]\n"
- "add x20, %x[qp], %[offsetof_Requantize32_a_offset]\n"
- "ld1r { v3.16b }, [x20]\n"
- "add x20, %x[qp], %[offsetof_Requantize32_b_offset]\n"
- "ld1r { v12.16b }, [x20]\n"
- "add x20, %x[qp], %[offsetof_Requantize32_c_offset]\n"
- "ld1r { v14.4s }, [x20]\n"
- "add x20, %x[qp], %[offsetof_Requantize32_per_layer_left_shift]\n"
- "ld1r { v15.4s }, [x20]\n"
- "add x20, %x[qp], %[offsetof_Requantize32_per_layer_mul]\n"
- "ld1r { v9.4s }, [x20]\n"
- "add x20, %x[qp], %[offsetof_Requantize32_per_layer_right_shift]\n"
- "ld1r { v10.4s }, [x20]\n"
"mov x9, #0x0\n"
- "cbz x10, 9f\n"
+ "add x19, %x[qp], %[offsetof_Requantize32_minval]\n"
+ "ld1r { v14.4s }, [x19]\n"
+ "add x19, %x[qp], %[offsetof_Requantize32_maxval]\n"
+ "ld1r { v13.4s }, [x19]\n"
+ "add x19, %x[qp], %[offsetof_Requantize32_a_offset]\n"
+ "ld1r { v12.16b }, [x19]\n"
+ "add x19, %x[qp], %[offsetof_Requantize32_b_offset]\n"
+ "ld1r { v11.16b }, [x19]\n"
+ "add x19, %x[qp], %[offsetof_Requantize32_c_offset]\n"
+ "ld1r { v10.4s }, [x19]\n"
+ "add x19, %x[qp], %[offsetof_Requantize32_per_layer_left_shift]\n"
+ "ld1r { v9.4s }, [x19]\n"
+ "add x19, %x[qp], %[offsetof_Requantize32_per_layer_mul]\n"
+ "ld1r { v8.4s }, [x19]\n"
+ "add x19, %x[qp], %[offsetof_Requantize32_per_layer_right_shift]\n"
+ "ld1r { v7.4s }, [x19]\n"
+ "lsr x28, %x[n_output_channels], #0x2\n"
+ "cbz x28, 9f\n"
"1:" // Output channel loop
- "movi v31.4s, #0x0\n"
+ "movi v16.4s, #0x0\n"
"cbz %x[bias], 2f\n"
- "lsl x20, x9, #0x2\n"
- "ldr q31, [%x[bias], x20]\n"
+ "lsl x19, x9, #0x2\n"
+ "ldr q16, [%x[bias], x19]\n"
"2:" // Output channel loop: Load bias: Done
- "mov v16.16b, v31.16b\n"
- "mov v17.16b, v31.16b\n"
- "mov v18.16b, v31.16b\n"
- "mov v19.16b, v31.16b\n"
- "mov v20.16b, v31.16b\n"
- "mov v21.16b, v31.16b\n"
- "mov v22.16b, v31.16b\n"
- "mov v23.16b, v31.16b\n"
- "mov v24.16b, v31.16b\n"
- "mov v25.16b, v31.16b\n"
- "mov v26.16b, v31.16b\n"
- "mov v27.16b, v31.16b\n"
- "mov v28.16b, v31.16b\n"
- "mov v29.16b, v31.16b\n"
- "mov v30.16b, v31.16b\n"
- "mov v31.16b, v31.16b\n"
+ "mov v6.16b, v16.16b\n"
+ "mov v5.16b, v16.16b\n"
+ "mov v4.16b, v16.16b\n"
+ "mov v31.16b, v16.16b\n"
+ "mov v30.16b, v16.16b\n"
+ "mov v29.16b, v16.16b\n"
+ "mov v28.16b, v16.16b\n"
+ "mov v27.16b, v16.16b\n"
+ "mov v26.16b, v16.16b\n"
+ "mov v25.16b, v16.16b\n"
+ "mov v24.16b, v16.16b\n"
+ "mov v23.16b, v16.16b\n"
+ "mov v22.16b, v16.16b\n"
+ "mov v21.16b, v16.16b\n"
+ "mov v20.16b, v16.16b\n"
+ "mov v19.16b, v16.16b\n"
"cbz %x[rq_mul_ptr], 3f\n"
- "lsl x20, x9, #0x2\n"
- "ldr q9, [%x[rq_mul_ptr], x20]\n"
- "ldr q10, [%x[rq_right_shift_ptr], x20]\n"
+ "lsl x19, x9, #0x2\n"
+ "ldr q8, [%x[rq_mul_ptr], x19]\n"
+ "ldr q7, [%x[rq_right_shift_ptr], x19]\n"
"cbz %x[rq_left_shift_ptr], 3f\n"
- "ldr q15, [%x[rq_left_shift_ptr], x20]\n"
+ "ldr q9, [%x[rq_left_shift_ptr], x19]\n"
"3:" // Output channel loop: Load quantization parameters: Done
- "ldr s8, [%x[weights]], #0x4\n"
- "mov x20, %x[inptrs]\n"
- "ldp x25, x28, [x20], #0x10\n"
- "lsr x21, %x[kernel_points], #0x1\n"
- "ldr d2, [x25, #0x0]\n"
- "ldr d7, [x28, #0x0]\n"
- "usubl v2.8h, v2.8b, v3.8b\n"
- "usubl v7.8h, v7.8b, v3.8b\n"
- "ssubl v8.8h, v8.8b, v12.8b\n"
- "cbz x21, 7f\n"
- "ldr s6, [%x[weights]], #0x4\n"
- "ldp x25, x28, [x20], #0x10\n"
- "subs x21, x21, #0x1\n"
- "ssubl v6.8h, v6.8b, v12.8b\n"
+ "ldr s17, [%x[weights]], #0x4\n"
+ "ssubl v17.8h, v17.8b, v11.8b\n"
+ "mov x19, %x[inptrs]\n"
+ "ldp x25, x27, [x19], #0x10\n"
+ "lsr x20, %x[kernel_points], #0x1\n"
+ "ldr d3, [x25, #0x0]\n"
+ "usubl v3.8h, v3.8b, v12.8b\n"
+ "ldr d2, [x27, #0x0]\n"
+ "usubl v2.8h, v2.8b, v12.8b\n"
+ "cbz x20, 7f\n"
+ "ldp x25, x27, [x19], #0x10\n"
+ "ldr s16, [%x[weights]], #0x4\n"
+ "ssubl v16.8h, v16.8b, v11.8b\n"
"ldr d1, [x25, #0x0]\n"
- "ldr d0, [x28, #0x0]\n"
- "usubl v1.8h, v1.8b, v3.8b\n"
- "usubl v0.8h, v0.8b, v3.8b\n"
+ "subs x20, x20, #0x1\n"
+ "usubl v1.8h, v1.8b, v12.8b\n"
+ "ldr d0, [x27, #0x0]\n"
+ "usubl v0.8h, v0.8b, v12.8b\n"
"beq 5f\n"
"4:" // Output channel loop: Kernel loop
- "ldp x25, x28, [x20], #0x10\n"
- "smlal v16.4s, v8.4h, v2.h[0]\n"
- "smlal v17.4s, v8.4h, v2.h[1]\n"
- "subs x21, x21, #0x1\n"
- "smlal v18.4s, v8.4h, v2.h[2]\n"
- "smlal v19.4s, v8.4h, v2.h[3]\n"
- "smlal v20.4s, v8.4h, v2.h[4]\n"
- "smlal v21.4s, v8.4h, v2.h[5]\n"
- "smlal v22.4s, v8.4h, v2.h[6]\n"
- "smlal v23.4s, v8.4h, v2.h[7]\n"
- "ldr d2, [x25, #0x0]\n"
- "usubl v2.8h, v2.8b, v3.8b\n"
- "smlal v24.4s, v8.4h, v7.h[0]\n"
- "smlal v25.4s, v8.4h, v7.h[1]\n"
- "smlal v26.4s, v8.4h, v7.h[2]\n"
- "smlal v27.4s, v8.4h, v7.h[3]\n"
- "smlal v28.4s, v8.4h, v7.h[4]\n"
- "smlal v29.4s, v8.4h, v7.h[5]\n"
- "smlal v30.4s, v8.4h, v7.h[6]\n"
- "smlal v31.4s, v8.4h, v7.h[7]\n"
- "ldr d7, [x28, #0x0]\n"
- "ldr s8, [%x[weights]], #0x4\n"
- "ldp x25, x28, [x20], #0x10\n"
- "smlal v16.4s, v6.4h, v1.h[0]\n"
- "smlal v17.4s, v6.4h, v1.h[1]\n"
- "usubl v7.8h, v7.8b, v3.8b\n"
- "smlal v18.4s, v6.4h, v1.h[2]\n"
- "smlal v19.4s, v6.4h, v1.h[3]\n"
- "ssubl v8.8h, v8.8b, v12.8b\n"
- "smlal v20.4s, v6.4h, v1.h[4]\n"
- "smlal v21.4s, v6.4h, v1.h[5]\n"
- "smlal v22.4s, v6.4h, v1.h[6]\n"
- "smlal v23.4s, v6.4h, v1.h[7]\n"
+ "smlal v6.4s, v17.4h, v3.h[0]\n"
+ "ldp x25, x27, [x19], #0x10\n"
+ "subs x20, x20, #0x1\n"
+ "smlal v5.4s, v17.4h, v3.h[1]\n"
+ "smlal v4.4s, v17.4h, v3.h[2]\n"
+ "smlal v31.4s, v17.4h, v3.h[3]\n"
+ "smlal v30.4s, v17.4h, v3.h[4]\n"
+ "smlal v29.4s, v17.4h, v3.h[5]\n"
+ "smlal v28.4s, v17.4h, v3.h[6]\n"
+ "smlal v27.4s, v17.4h, v3.h[7]\n"
+ "ldr d3, [x25, #0x0]\n"
+ "smlal v26.4s, v17.4h, v2.h[0]\n"
+ "smlal v25.4s, v17.4h, v2.h[1]\n"
+ "smlal v24.4s, v17.4h, v2.h[2]\n"
+ "smlal v23.4s, v17.4h, v2.h[3]\n"
+ "smlal v22.4s, v17.4h, v2.h[4]\n"
+ "smlal v21.4s, v17.4h, v2.h[5]\n"
+ "smlal v20.4s, v17.4h, v2.h[6]\n"
+ "smlal v19.4s, v17.4h, v2.h[7]\n"
+ "ldr d2, [x27, #0x0]\n"
+ "usubl v3.8h, v3.8b, v12.8b\n"
+ "ldr s17, [%x[weights]], #0x4\n"
+ "smlal v6.4s, v16.4h, v1.h[0]\n"
+ "ldp x25, x27, [x19], #0x10\n"
+ "smlal v5.4s, v16.4h, v1.h[1]\n"
+ "smlal v4.4s, v16.4h, v1.h[2]\n"
+ "usubl v2.8h, v2.8b, v12.8b\n"
+ "ssubl v17.8h, v17.8b, v11.8b\n"
+ "smlal v31.4s, v16.4h, v1.h[3]\n"
+ "smlal v30.4s, v16.4h, v1.h[4]\n"
+ "smlal v29.4s, v16.4h, v1.h[5]\n"
+ "smlal v28.4s, v16.4h, v1.h[6]\n"
+ "smlal v27.4s, v16.4h, v1.h[7]\n"
"ldr d1, [x25, #0x0]\n"
- "usubl v1.8h, v1.8b, v3.8b\n"
- "smlal v24.4s, v6.4h, v0.h[0]\n"
- "smlal v25.4s, v6.4h, v0.h[1]\n"
- "smlal v26.4s, v6.4h, v0.h[2]\n"
- "smlal v27.4s, v6.4h, v0.h[3]\n"
- "smlal v28.4s, v6.4h, v0.h[4]\n"
- "smlal v29.4s, v6.4h, v0.h[5]\n"
- "smlal v30.4s, v6.4h, v0.h[6]\n"
- "smlal v31.4s, v6.4h, v0.h[7]\n"
- "ldr d0, [x28, #0x0]\n"
- "ldr s6, [%x[weights]], #0x4\n"
- "usubl v0.8h, v0.8b, v3.8b\n"
- "ssubl v6.8h, v6.8b, v12.8b\n"
+ "smlal v26.4s, v16.4h, v0.h[0]\n"
+ "smlal v25.4s, v16.4h, v0.h[1]\n"
+ "smlal v24.4s, v16.4h, v0.h[2]\n"
+ "smlal v23.4s, v16.4h, v0.h[3]\n"
+ "smlal v22.4s, v16.4h, v0.h[4]\n"
+ "smlal v21.4s, v16.4h, v0.h[5]\n"
+ "smlal v20.4s, v16.4h, v0.h[6]\n"
+ "smlal v19.4s, v16.4h, v0.h[7]\n"
+ "ldr d0, [x27, #0x0]\n"
+ "usubl v1.8h, v1.8b, v12.8b\n"
+ "ldr s16, [%x[weights]], #0x4\n"
+ "usubl v0.8h, v0.8b, v12.8b\n"
+ "ssubl v16.8h, v16.8b, v11.8b\n"
"bgt 4b\n"
"5:" // Output channel loop: Kernel loop tail
"tbnz %x[kernel_points], #0, 6f\n"
- "smlal v16.4s, v8.4h, v2.h[0]\n"
- "smlal v17.4s, v8.4h, v2.h[1]\n"
- "ldr x20, [%x[outptrs], #0x0]\n"
- "ldr x21, [%x[outptrs], #0x8]\n"
- "smlal v18.4s, v8.4h, v2.h[2]\n"
- "smlal v19.4s, v8.4h, v2.h[3]\n"
- "ldr x22, [%x[outptrs], #0x10]\n"
- "ldr x23, [%x[outptrs], #0x18]\n"
- "smlal v16.4s, v6.4h, v1.h[0]\n"
- "smlal v17.4s, v6.4h, v1.h[1]\n"
- "sshl v16.4s, v16.4s, v15.4s\n"
- "ldr x24, [%x[outptrs], #0x20]\n"
- "smlal v18.4s, v6.4h, v1.h[2]\n"
- "smlal v19.4s, v6.4h, v1.h[3]\n"
- "sshl v17.4s, v17.4s, v15.4s\n"
- "ldr x25, [%x[outptrs], #0x28]\n"
- "smlal v20.4s, v8.4h, v2.h[4]\n"
- "smlal v21.4s, v8.4h, v2.h[5]\n"
- "sshl v18.4s, v18.4s, v15.4s\n"
- "ldr x26, [%x[outptrs], #0x30]\n"
- "smlal v22.4s, v8.4h, v2.h[6]\n"
- "smlal v23.4s, v8.4h, v2.h[7]\n"
- "sshl v19.4s, v19.4s, v15.4s\n"
- "ldr x27, [%x[outptrs], #0x38]\n"
- "smlal v24.4s, v8.4h, v7.h[0]\n"
- "smlal v25.4s, v8.4h, v7.h[1]\n"
- "sqrdmulh v16.4s, v16.4s, v9.4s\n"
- "smlal v20.4s, v6.4h, v1.h[4]\n"
- "smlal v21.4s, v6.4h, v1.h[5]\n"
- "sqrdmulh v17.4s, v17.4s, v9.4s\n"
- "smlal v22.4s, v6.4h, v1.h[6]\n"
- "smlal v23.4s, v6.4h, v1.h[7]\n"
- "sqrdmulh v18.4s, v18.4s, v9.4s\n"
- "smlal v24.4s, v6.4h, v0.h[0]\n"
- "smlal v25.4s, v6.4h, v0.h[1]\n"
- "sqrdmulh v19.4s, v19.4s, v9.4s\n"
- "smlal v26.4s, v8.4h, v7.h[2]\n"
- "smlal v27.4s, v8.4h, v7.h[3]\n"
- "and v5.16b, v16.16b, v10.16b\n"
- "smlal v28.4s, v8.4h, v7.h[4]\n"
- "smlal v29.4s, v8.4h, v7.h[5]\n"
- "and v4.16b, v17.16b, v10.16b\n"
- "smlal v30.4s, v8.4h, v7.h[6]\n"
- "smlal v31.4s, v8.4h, v7.h[7]\n"
- "and v2.16b, v18.16b, v10.16b\n"
- "and v1.16b, v19.16b, v10.16b\n"
- "sshl v20.4s, v20.4s, v15.4s\n"
- "smlal v26.4s, v6.4h, v0.h[2]\n"
- "sshl v21.4s, v21.4s, v15.4s\n"
- "sshl v22.4s, v22.4s, v15.4s\n"
- "smlal v27.4s, v6.4h, v0.h[3]\n"
- "sshl v23.4s, v23.4s, v15.4s\n"
- "sshl v24.4s, v24.4s, v15.4s\n"
- "smlal v28.4s, v6.4h, v0.h[4]\n"
- "sshl v25.4s, v25.4s, v15.4s\n"
- "smlal v29.4s, v6.4h, v0.h[5]\n"
- "smlal v30.4s, v6.4h, v0.h[6]\n"
- "smlal v31.4s, v6.4h, v0.h[7]\n"
- "sshr v5.4s, v5.4s, #0x1f\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
- "sshr v2.4s, v2.4s, #0x1f\n"
- "sshr v1.4s, v1.4s, #0x1f\n"
- "sqrdmulh v20.4s, v20.4s, v9.4s\n"
- "sqrdmulh v21.4s, v21.4s, v9.4s\n"
- "sqrdmulh v22.4s, v22.4s, v9.4s\n"
- "sqrdmulh v23.4s, v23.4s, v9.4s\n"
- "sqrdmulh v24.4s, v24.4s, v9.4s\n"
- "sqrdmulh v25.4s, v25.4s, v9.4s\n"
- "sqadd v16.4s, v16.4s, v5.4s\n"
- "sqadd v17.4s, v17.4s, v4.4s\n"
- "sqadd v18.4s, v18.4s, v2.4s\n"
- "sqadd v19.4s, v19.4s, v1.4s\n"
- "and v8.16b, v20.16b, v10.16b\n"
- "and v0.16b, v21.16b, v10.16b\n"
- "and v5.16b, v22.16b, v10.16b\n"
- "and v4.16b, v23.16b, v10.16b\n"
- "and v2.16b, v24.16b, v10.16b\n"
- "and v1.16b, v25.16b, v10.16b\n"
- "sshl v26.4s, v26.4s, v15.4s\n"
- "sshl v27.4s, v27.4s, v15.4s\n"
- "sshl v28.4s, v28.4s, v15.4s\n"
- "sshl v29.4s, v29.4s, v15.4s\n"
- "sshl v30.4s, v30.4s, v15.4s\n"
- "sshl v31.4s, v31.4s, v15.4s\n"
- "sshr v8.4s, v8.4s, #0x1f\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "sshr v5.4s, v5.4s, #0x1f\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
- "sshr v2.4s, v2.4s, #0x1f\n"
- "sshr v1.4s, v1.4s, #0x1f\n"
- "sqrdmulh v26.4s, v26.4s, v9.4s\n"
- "sqrdmulh v27.4s, v27.4s, v9.4s\n"
- "sqrdmulh v28.4s, v28.4s, v9.4s\n"
- "sqrdmulh v29.4s, v29.4s, v9.4s\n"
- "sqrdmulh v30.4s, v30.4s, v9.4s\n"
- "sqrdmulh v31.4s, v31.4s, v9.4s\n"
- "sqadd v20.4s, v20.4s, v8.4s\n"
- "sqadd v21.4s, v21.4s, v0.4s\n"
- "sqadd v22.4s, v22.4s, v5.4s\n"
- "sqadd v23.4s, v23.4s, v4.4s\n"
- "sqadd v24.4s, v24.4s, v2.4s\n"
- "sqadd v25.4s, v25.4s, v1.4s\n"
- "and v8.16b, v26.16b, v10.16b\n"
- "and v0.16b, v27.16b, v10.16b\n"
- "and v5.16b, v28.16b, v10.16b\n"
- "and v4.16b, v29.16b, v10.16b\n"
- "and v2.16b, v30.16b, v10.16b\n"
- "and v1.16b, v31.16b, v10.16b\n"
- "sshr v8.4s, v8.4s, #0x1f\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "sshr v5.4s, v5.4s, #0x1f\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
- "sshr v2.4s, v2.4s, #0x1f\n"
- "sshr v1.4s, v1.4s, #0x1f\n"
- "srshl v16.4s, v16.4s, v10.4s\n"
- "srshl v17.4s, v17.4s, v10.4s\n"
- "srshl v18.4s, v18.4s, v10.4s\n"
- "srshl v19.4s, v19.4s, v10.4s\n"
- "srshl v20.4s, v20.4s, v10.4s\n"
- "srshl v21.4s, v21.4s, v10.4s\n"
- "srshl v22.4s, v22.4s, v10.4s\n"
- "srshl v23.4s, v23.4s, v10.4s\n"
- "sqadd v26.4s, v26.4s, v8.4s\n"
- "sqadd v27.4s, v27.4s, v0.4s\n"
- "sqadd v28.4s, v28.4s, v5.4s\n"
- "sqadd v29.4s, v29.4s, v4.4s\n"
- "sqadd v30.4s, v30.4s, v2.4s\n"
- "sqadd v31.4s, v31.4s, v1.4s\n"
- "add v16.4s, v16.4s, v14.4s\n"
- "add v17.4s, v17.4s, v14.4s\n"
- "add v18.4s, v18.4s, v14.4s\n"
- "add v19.4s, v19.4s, v14.4s\n"
- "add v20.4s, v20.4s, v14.4s\n"
- "add v21.4s, v21.4s, v14.4s\n"
- "add v22.4s, v22.4s, v14.4s\n"
- "add v23.4s, v23.4s, v14.4s\n"
- "srshl v24.4s, v24.4s, v10.4s\n"
- "srshl v25.4s, v25.4s, v10.4s\n"
- "srshl v26.4s, v26.4s, v10.4s\n"
- "srshl v27.4s, v27.4s, v10.4s\n"
- "srshl v28.4s, v28.4s, v10.4s\n"
- "srshl v29.4s, v29.4s, v10.4s\n"
- "srshl v30.4s, v30.4s, v10.4s\n"
- "srshl v31.4s, v31.4s, v10.4s\n"
- "smin v16.4s, v16.4s, v11.4s\n"
- "smin v17.4s, v17.4s, v11.4s\n"
- "smin v18.4s, v18.4s, v11.4s\n"
- "smin v19.4s, v19.4s, v11.4s\n"
- "smin v20.4s, v20.4s, v11.4s\n"
- "smin v21.4s, v21.4s, v11.4s\n"
- "smin v22.4s, v22.4s, v11.4s\n"
- "smin v23.4s, v23.4s, v11.4s\n"
- "add v24.4s, v24.4s, v14.4s\n"
- "add v25.4s, v25.4s, v14.4s\n"
- "add v26.4s, v26.4s, v14.4s\n"
- "add v27.4s, v27.4s, v14.4s\n"
- "add v28.4s, v28.4s, v14.4s\n"
- "add v29.4s, v29.4s, v14.4s\n"
- "add v30.4s, v30.4s, v14.4s\n"
- "add v31.4s, v31.4s, v14.4s\n"
- "smax v16.4s, v16.4s, v13.4s\n"
- "smax v17.4s, v17.4s, v13.4s\n"
- "smax v18.4s, v18.4s, v13.4s\n"
- "smax v19.4s, v19.4s, v13.4s\n"
- "smax v20.4s, v20.4s, v13.4s\n"
- "smax v21.4s, v21.4s, v13.4s\n"
- "smax v22.4s, v22.4s, v13.4s\n"
- "smax v23.4s, v23.4s, v13.4s\n"
- "smin v24.4s, v24.4s, v11.4s\n"
- "smin v25.4s, v25.4s, v11.4s\n"
- "smin v26.4s, v26.4s, v11.4s\n"
- "smin v27.4s, v27.4s, v11.4s\n"
- "smin v28.4s, v28.4s, v11.4s\n"
- "smin v29.4s, v29.4s, v11.4s\n"
- "smin v30.4s, v30.4s, v11.4s\n"
- "smin v31.4s, v31.4s, v11.4s\n"
- "uzp1 v16.16b, v16.16b, v16.16b\n"
- "uzp1 v17.16b, v17.16b, v17.16b\n"
- "uzp1 v18.16b, v18.16b, v18.16b\n"
- "uzp1 v19.16b, v19.16b, v19.16b\n"
- "uzp1 v20.16b, v20.16b, v20.16b\n"
- "uzp1 v21.16b, v21.16b, v21.16b\n"
- "uzp1 v22.16b, v22.16b, v22.16b\n"
- "uzp1 v23.16b, v23.16b, v23.16b\n"
- "smax v24.4s, v24.4s, v13.4s\n"
- "smax v25.4s, v25.4s, v13.4s\n"
- "smax v26.4s, v26.4s, v13.4s\n"
- "smax v27.4s, v27.4s, v13.4s\n"
- "smax v28.4s, v28.4s, v13.4s\n"
- "smax v29.4s, v29.4s, v13.4s\n"
- "smax v30.4s, v30.4s, v13.4s\n"
- "smax v31.4s, v31.4s, v13.4s\n"
- "uzp1 v16.16b, v16.16b, v16.16b\n"
- "str s16, [x20, x9]\n"
- "ldr x20, [%x[outptrs], #0x40]\n"
- "uzp1 v17.16b, v17.16b, v17.16b\n"
- "uzp1 v18.16b, v18.16b, v18.16b\n"
- "str s17, [x21, x9]\n"
- "ldr x21, [%x[outptrs], #0x48]\n"
- "uzp1 v19.16b, v19.16b, v19.16b\n"
- "uzp1 v20.16b, v20.16b, v20.16b\n"
- "str s18, [x22, x9]\n"
- "ldr x22, [%x[outptrs], #0x50]\n"
- "uzp1 v21.16b, v21.16b, v21.16b\n"
- "uzp1 v22.16b, v22.16b, v22.16b\n"
- "str s19, [x23, x9]\n"
- "ldr x23, [%x[outptrs], #0x58]\n"
- "uzp1 v23.16b, v23.16b, v23.16b\n"
- "uzp1 v24.16b, v24.16b, v24.16b\n"
- "str s20, [x24, x9]\n"
- "ldr x24, [%x[outptrs], #0x60]\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
- "uzp1 v26.16b, v26.16b, v26.16b\n"
- "str s21, [x25, x9]\n"
- "ldr x25, [%x[outptrs], #0x68]\n"
- "uzp1 v27.16b, v27.16b, v27.16b\n"
- "uzp1 v28.16b, v28.16b, v28.16b\n"
- "str s22, [x26, x9]\n"
- "ldr x26, [%x[outptrs], #0x70]\n"
- "uzp1 v29.16b, v29.16b, v29.16b\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
- "str s23, [x27, x9]\n"
- "ldr x27, [%x[outptrs], #0x78]\n"
+ "smlal v6.4s, v17.4h, v3.h[0]\n"
+ "ldr x19, [%x[outptrs], #0x0]\n"
+ "smlal v5.4s, v17.4h, v3.h[1]\n"
+ "ldr x20, [%x[outptrs], #0x8]\n"
+ "smlal v4.4s, v17.4h, v3.h[2]\n"
+ "ldr x21, [%x[outptrs], #0x10]\n"
+ "smlal v31.4s, v17.4h, v3.h[3]\n"
+ "ldr x22, [%x[outptrs], #0x18]\n"
+ "smlal v30.4s, v17.4h, v3.h[4]\n"
+ "ldr x23, [%x[outptrs], #0x20]\n"
+ "smlal v29.4s, v17.4h, v3.h[5]\n"
+ "ldr x24, [%x[outptrs], #0x28]\n"
+ "smlal v28.4s, v17.4h, v3.h[6]\n"
+ "ldr x25, [%x[outptrs], #0x30]\n"
+ "smlal v27.4s, v17.4h, v3.h[7]\n"
+ "ldr x26, [%x[outptrs], #0x38]\n"
+ "smlal v26.4s, v17.4h, v2.h[0]\n"
+ "smlal v25.4s, v17.4h, v2.h[1]\n"
+ "smlal v24.4s, v17.4h, v2.h[2]\n"
+ "smlal v23.4s, v17.4h, v2.h[3]\n"
+ "smlal v22.4s, v17.4h, v2.h[4]\n"
+ "smlal v21.4s, v17.4h, v2.h[5]\n"
+ "smlal v20.4s, v17.4h, v2.h[6]\n"
+ "smlal v19.4s, v17.4h, v2.h[7]\n"
+ "smlal v6.4s, v16.4h, v1.h[0]\n"
+ "smlal v5.4s, v16.4h, v1.h[1]\n"
+ "smlal v4.4s, v16.4h, v1.h[2]\n"
+ "smlal v31.4s, v16.4h, v1.h[3]\n"
+ "smlal v30.4s, v16.4h, v1.h[4]\n"
+ "smlal v29.4s, v16.4h, v1.h[5]\n"
+ "smlal v28.4s, v16.4h, v1.h[6]\n"
+ "smlal v27.4s, v16.4h, v1.h[7]\n"
+ "smlal v26.4s, v16.4h, v0.h[0]\n"
+ "smlal v25.4s, v16.4h, v0.h[1]\n"
+ "smlal v24.4s, v16.4h, v0.h[2]\n"
+ "smlal v23.4s, v16.4h, v0.h[3]\n"
+ "smlal v22.4s, v16.4h, v0.h[4]\n"
+ "smlal v21.4s, v16.4h, v0.h[5]\n"
+ "smlal v20.4s, v16.4h, v0.h[6]\n"
+ "smlal v19.4s, v16.4h, v0.h[7]\n"
+ "sshl v6.4s, v6.4s, v9.4s\n"
+ "sshl v5.4s, v5.4s, v9.4s\n"
+ "sqrdmulh v6.4s, v6.4s, v8.4s\n"
+ "sqrdmulh v5.4s, v5.4s, v8.4s\n"
+ "sshl v4.4s, v4.4s, v9.4s\n"
+ "sshl v31.4s, v31.4s, v9.4s\n"
+ "and v18.16b, v6.16b, v7.16b\n"
+ "and v16.16b, v5.16b, v7.16b\n"
+ "sqrdmulh v4.4s, v4.4s, v8.4s\n"
+ "sshr v18.4s, v18.4s, #0x1f\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sqrdmulh v31.4s, v31.4s, v8.4s\n"
+ "sqadd v6.4s, v6.4s, v18.4s\n"
+ "sqadd v5.4s, v5.4s, v16.4s\n"
+ "and v17.16b, v4.16b, v7.16b\n"
+ "and v16.16b, v31.16b, v7.16b\n"
+ "srshl v6.4s, v6.4s, v7.4s\n"
+ "srshl v5.4s, v5.4s, v7.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "add v6.4s, v6.4s, v10.4s\n"
+ "add v5.4s, v5.4s, v10.4s\n"
+ "sqadd v4.4s, v4.4s, v17.4s\n"
+ "smin v6.4s, v6.4s, v13.4s\n"
+ "smin v5.4s, v5.4s, v13.4s\n"
+ "sqadd v31.4s, v31.4s, v16.4s\n"
+ "smax v6.4s, v6.4s, v14.4s\n"
+ "smax v5.4s, v5.4s, v14.4s\n"
+ "srshl v4.4s, v4.4s, v7.4s\n"
+ "uzp1 v6.16b, v6.16b, v6.16b\n"
+ "uzp1 v5.16b, v5.16b, v5.16b\n"
+ "uzp1 v6.16b, v6.16b, v6.16b\n"
+ "str s6, [x19, x9]\n"
+ "uzp1 v5.16b, v5.16b, v5.16b\n"
+ "add v4.4s, v4.4s, v10.4s\n"
+ "ldr x19, [%x[outptrs], #0x40]\n"
+ "srshl v31.4s, v31.4s, v7.4s\n"
+ "str s5, [x20, x9]\n"
+ "sshl v30.4s, v30.4s, v9.4s\n"
+ "ldr x20, [%x[outptrs], #0x48]\n"
+ "smin v4.4s, v4.4s, v13.4s\n"
+ "sqrdmulh v30.4s, v30.4s, v8.4s\n"
+ "add v31.4s, v31.4s, v10.4s\n"
+ "smax v4.4s, v4.4s, v14.4s\n"
+ "sshl v29.4s, v29.4s, v9.4s\n"
+ "smin v31.4s, v31.4s, v13.4s\n"
+ "uzp1 v4.16b, v4.16b, v4.16b\n"
+ "and v16.16b, v30.16b, v7.16b\n"
+ "uzp1 v4.16b, v4.16b, v4.16b\n"
+ "str s4, [x21, x9]\n"
+ "smax v31.4s, v31.4s, v14.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "ldr x21, [%x[outptrs], #0x50]\n"
+ "sqrdmulh v29.4s, v29.4s, v8.4s\n"
+ "sshl v28.4s, v28.4s, v9.4s\n"
"uzp1 v31.16b, v31.16b, v31.16b\n"
- "uzp1 v24.16b, v24.16b, v24.16b\n"
- "str s24, [x20, x9]\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
- "uzp1 v26.16b, v26.16b, v26.16b\n"
- "str s25, [x21, x9]\n"
- "uzp1 v27.16b, v27.16b, v27.16b\n"
- "uzp1 v28.16b, v28.16b, v28.16b\n"
- "str s26, [x22, x9]\n"
- "uzp1 v29.16b, v29.16b, v29.16b\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
- "str s27, [x23, x9]\n"
+ "sqadd v30.4s, v30.4s, v16.4s\n"
"uzp1 v31.16b, v31.16b, v31.16b\n"
- "str s28, [x24, x9]\n"
- "str s29, [x25, x9]\n"
- "str s30, [x26, x9]\n"
- "str s31, [x27, x9]\n"
- "b 8f\n"
- "6:" // Output channel loop: Odd tail
- "ldp x25, x28, [x20], #0x10\n"
- "smlal v16.4s, v8.4h, v2.h[0]\n"
- "smlal v17.4s, v8.4h, v2.h[1]\n"
- "ldr x20, [%x[outptrs], #0x0]\n"
- "smlal v18.4s, v8.4h, v2.h[2]\n"
- "smlal v19.4s, v8.4h, v2.h[3]\n"
- "ldr x21, [%x[outptrs], #0x8]\n"
- "ldr x22, [%x[outptrs], #0x10]\n"
- "smlal v20.4s, v8.4h, v2.h[4]\n"
- "smlal v21.4s, v8.4h, v2.h[5]\n"
- "ldr x23, [%x[outptrs], #0x18]\n"
- "ldr x24, [%x[outptrs], #0x20]\n"
- "smlal v22.4s, v8.4h, v2.h[6]\n"
- "smlal v23.4s, v8.4h, v2.h[7]\n"
- "ldr d2, [x25, #0x0]\n"
- "usubl v2.8h, v2.8b, v3.8b\n"
- "smlal v24.4s, v8.4h, v7.h[0]\n"
- "smlal v25.4s, v8.4h, v7.h[1]\n"
- "ldr x25, [%x[outptrs], #0x28]\n"
- "ldr x26, [%x[outptrs], #0x30]\n"
- "smlal v26.4s, v8.4h, v7.h[2]\n"
- "smlal v27.4s, v8.4h, v7.h[3]\n"
- "ldr x27, [%x[outptrs], #0x38]\n"
- "smlal v28.4s, v8.4h, v7.h[4]\n"
- "smlal v29.4s, v8.4h, v7.h[5]\n"
- "smlal v30.4s, v8.4h, v7.h[6]\n"
- "smlal v31.4s, v8.4h, v7.h[7]\n"
- "ldr s8, [%x[weights]], #0x4\n"
- "ldr d7, [x28, #0x0]\n"
- "smlal v16.4s, v6.4h, v1.h[0]\n"
- "smlal v17.4s, v6.4h, v1.h[1]\n"
- "ssubl v8.8h, v8.8b, v12.8b\n"
- "smlal v18.4s, v6.4h, v1.h[2]\n"
- "smlal v19.4s, v6.4h, v1.h[3]\n"
- "usubl v7.8h, v7.8b, v3.8b\n"
- "smlal v16.4s, v8.4h, v2.h[0]\n"
- "smlal v17.4s, v8.4h, v2.h[1]\n"
- "sshl v16.4s, v16.4s, v15.4s\n"
- "smlal v18.4s, v8.4h, v2.h[2]\n"
- "smlal v19.4s, v8.4h, v2.h[3]\n"
- "sshl v17.4s, v17.4s, v15.4s\n"
- "smlal v20.4s, v6.4h, v1.h[4]\n"
- "smlal v21.4s, v6.4h, v1.h[5]\n"
- "sshl v18.4s, v18.4s, v15.4s\n"
- "smlal v22.4s, v6.4h, v1.h[6]\n"
- "smlal v23.4s, v6.4h, v1.h[7]\n"
- "sshl v19.4s, v19.4s, v15.4s\n"
- "smlal v24.4s, v6.4h, v0.h[0]\n"
- "smlal v25.4s, v6.4h, v0.h[1]\n"
- "sqrdmulh v16.4s, v16.4s, v9.4s\n"
- "smlal v20.4s, v8.4h, v2.h[4]\n"
- "smlal v21.4s, v8.4h, v2.h[5]\n"
- "sqrdmulh v17.4s, v17.4s, v9.4s\n"
- "smlal v22.4s, v8.4h, v2.h[6]\n"
- "smlal v23.4s, v8.4h, v2.h[7]\n"
- "sqrdmulh v18.4s, v18.4s, v9.4s\n"
- "smlal v24.4s, v8.4h, v7.h[0]\n"
- "smlal v25.4s, v8.4h, v7.h[1]\n"
- "sqrdmulh v19.4s, v19.4s, v9.4s\n"
- "smlal v26.4s, v6.4h, v0.h[2]\n"
- "smlal v27.4s, v6.4h, v0.h[3]\n"
- "and v5.16b, v16.16b, v10.16b\n"
- "smlal v28.4s, v6.4h, v0.h[4]\n"
- "smlal v29.4s, v6.4h, v0.h[5]\n"
- "and v4.16b, v17.16b, v10.16b\n"
- "smlal v30.4s, v6.4h, v0.h[6]\n"
- "smlal v31.4s, v6.4h, v0.h[7]\n"
- "and v2.16b, v18.16b, v10.16b\n"
- "and v1.16b, v19.16b, v10.16b\n"
- "sshl v20.4s, v20.4s, v15.4s\n"
- "smlal v26.4s, v8.4h, v7.h[2]\n"
- "sshl v21.4s, v21.4s, v15.4s\n"
- "sshl v22.4s, v22.4s, v15.4s\n"
- "smlal v27.4s, v8.4h, v7.h[3]\n"
- "sshl v23.4s, v23.4s, v15.4s\n"
- "sshl v24.4s, v24.4s, v15.4s\n"
- "smlal v28.4s, v8.4h, v7.h[4]\n"
- "sshl v25.4s, v25.4s, v15.4s\n"
- "smlal v29.4s, v8.4h, v7.h[5]\n"
- "smlal v30.4s, v8.4h, v7.h[6]\n"
- "smlal v31.4s, v8.4h, v7.h[7]\n"
- "sshr v5.4s, v5.4s, #0x1f\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
- "sshr v2.4s, v2.4s, #0x1f\n"
- "sshr v1.4s, v1.4s, #0x1f\n"
- "sqrdmulh v20.4s, v20.4s, v9.4s\n"
- "sqrdmulh v21.4s, v21.4s, v9.4s\n"
- "sqrdmulh v22.4s, v22.4s, v9.4s\n"
- "sqrdmulh v23.4s, v23.4s, v9.4s\n"
- "sqrdmulh v24.4s, v24.4s, v9.4s\n"
- "sqrdmulh v25.4s, v25.4s, v9.4s\n"
- "sqadd v16.4s, v16.4s, v5.4s\n"
- "sqadd v17.4s, v17.4s, v4.4s\n"
- "sqadd v18.4s, v18.4s, v2.4s\n"
- "sqadd v19.4s, v19.4s, v1.4s\n"
- "and v8.16b, v20.16b, v10.16b\n"
- "and v0.16b, v21.16b, v10.16b\n"
- "and v5.16b, v22.16b, v10.16b\n"
- "and v4.16b, v23.16b, v10.16b\n"
- "and v2.16b, v24.16b, v10.16b\n"
- "and v1.16b, v25.16b, v10.16b\n"
- "sshl v26.4s, v26.4s, v15.4s\n"
- "sshl v27.4s, v27.4s, v15.4s\n"
- "sshl v28.4s, v28.4s, v15.4s\n"
- "sshl v29.4s, v29.4s, v15.4s\n"
- "sshl v30.4s, v30.4s, v15.4s\n"
- "sshl v31.4s, v31.4s, v15.4s\n"
- "sshr v8.4s, v8.4s, #0x1f\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "sshr v5.4s, v5.4s, #0x1f\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
- "sshr v2.4s, v2.4s, #0x1f\n"
- "sshr v1.4s, v1.4s, #0x1f\n"
- "sqrdmulh v26.4s, v26.4s, v9.4s\n"
- "sqrdmulh v27.4s, v27.4s, v9.4s\n"
- "sqrdmulh v28.4s, v28.4s, v9.4s\n"
- "sqrdmulh v29.4s, v29.4s, v9.4s\n"
- "sqrdmulh v30.4s, v30.4s, v9.4s\n"
- "sqrdmulh v31.4s, v31.4s, v9.4s\n"
- "sqadd v20.4s, v20.4s, v8.4s\n"
- "sqadd v21.4s, v21.4s, v0.4s\n"
- "sqadd v22.4s, v22.4s, v5.4s\n"
- "sqadd v23.4s, v23.4s, v4.4s\n"
- "sqadd v24.4s, v24.4s, v2.4s\n"
- "sqadd v25.4s, v25.4s, v1.4s\n"
- "and v8.16b, v26.16b, v10.16b\n"
- "and v0.16b, v27.16b, v10.16b\n"
- "and v5.16b, v28.16b, v10.16b\n"
- "and v4.16b, v29.16b, v10.16b\n"
- "and v2.16b, v30.16b, v10.16b\n"
- "and v1.16b, v31.16b, v10.16b\n"
- "sshr v8.4s, v8.4s, #0x1f\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "sshr v5.4s, v5.4s, #0x1f\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
- "sshr v2.4s, v2.4s, #0x1f\n"
- "sshr v1.4s, v1.4s, #0x1f\n"
- "srshl v16.4s, v16.4s, v10.4s\n"
- "srshl v17.4s, v17.4s, v10.4s\n"
- "srshl v18.4s, v18.4s, v10.4s\n"
- "srshl v19.4s, v19.4s, v10.4s\n"
- "srshl v20.4s, v20.4s, v10.4s\n"
- "srshl v21.4s, v21.4s, v10.4s\n"
- "srshl v22.4s, v22.4s, v10.4s\n"
- "srshl v23.4s, v23.4s, v10.4s\n"
- "sqadd v26.4s, v26.4s, v8.4s\n"
- "sqadd v27.4s, v27.4s, v0.4s\n"
- "sqadd v28.4s, v28.4s, v5.4s\n"
- "sqadd v29.4s, v29.4s, v4.4s\n"
- "sqadd v30.4s, v30.4s, v2.4s\n"
- "sqadd v31.4s, v31.4s, v1.4s\n"
- "add v16.4s, v16.4s, v14.4s\n"
- "add v17.4s, v17.4s, v14.4s\n"
- "add v18.4s, v18.4s, v14.4s\n"
- "add v19.4s, v19.4s, v14.4s\n"
- "add v20.4s, v20.4s, v14.4s\n"
- "add v21.4s, v21.4s, v14.4s\n"
- "add v22.4s, v22.4s, v14.4s\n"
- "add v23.4s, v23.4s, v14.4s\n"
- "srshl v24.4s, v24.4s, v10.4s\n"
- "srshl v25.4s, v25.4s, v10.4s\n"
- "srshl v26.4s, v26.4s, v10.4s\n"
- "srshl v27.4s, v27.4s, v10.4s\n"
- "srshl v28.4s, v28.4s, v10.4s\n"
- "srshl v29.4s, v29.4s, v10.4s\n"
- "srshl v30.4s, v30.4s, v10.4s\n"
- "srshl v31.4s, v31.4s, v10.4s\n"
- "smin v16.4s, v16.4s, v11.4s\n"
- "smin v17.4s, v17.4s, v11.4s\n"
- "smin v18.4s, v18.4s, v11.4s\n"
- "smin v19.4s, v19.4s, v11.4s\n"
- "smin v20.4s, v20.4s, v11.4s\n"
- "smin v21.4s, v21.4s, v11.4s\n"
- "smin v22.4s, v22.4s, v11.4s\n"
- "smin v23.4s, v23.4s, v11.4s\n"
- "add v24.4s, v24.4s, v14.4s\n"
- "add v25.4s, v25.4s, v14.4s\n"
- "add v26.4s, v26.4s, v14.4s\n"
- "add v27.4s, v27.4s, v14.4s\n"
- "add v28.4s, v28.4s, v14.4s\n"
- "add v29.4s, v29.4s, v14.4s\n"
- "add v30.4s, v30.4s, v14.4s\n"
- "add v31.4s, v31.4s, v14.4s\n"
- "smax v16.4s, v16.4s, v13.4s\n"
- "smax v17.4s, v17.4s, v13.4s\n"
- "smax v18.4s, v18.4s, v13.4s\n"
- "smax v19.4s, v19.4s, v13.4s\n"
- "smax v20.4s, v20.4s, v13.4s\n"
- "smax v21.4s, v21.4s, v13.4s\n"
- "smax v22.4s, v22.4s, v13.4s\n"
- "smax v23.4s, v23.4s, v13.4s\n"
- "smin v24.4s, v24.4s, v11.4s\n"
- "smin v25.4s, v25.4s, v11.4s\n"
- "smin v26.4s, v26.4s, v11.4s\n"
- "smin v27.4s, v27.4s, v11.4s\n"
- "smin v28.4s, v28.4s, v11.4s\n"
- "smin v29.4s, v29.4s, v11.4s\n"
- "smin v30.4s, v30.4s, v11.4s\n"
- "smin v31.4s, v31.4s, v11.4s\n"
- "uzp1 v16.16b, v16.16b, v16.16b\n"
- "uzp1 v17.16b, v17.16b, v17.16b\n"
- "uzp1 v18.16b, v18.16b, v18.16b\n"
- "uzp1 v19.16b, v19.16b, v19.16b\n"
- "uzp1 v20.16b, v20.16b, v20.16b\n"
- "uzp1 v21.16b, v21.16b, v21.16b\n"
- "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "str s31, [x22, x9]\n"
+ "and v17.16b, v29.16b, v7.16b\n"
+ "sqrdmulh v28.4s, v28.4s, v8.4s\n"
+ "ldr x22, [%x[outptrs], #0x58]\n"
+ "srshl v30.4s, v30.4s, v7.4s\n"
+ "sshl v27.4s, v27.4s, v9.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "and v16.16b, v28.16b, v7.16b\n"
+ "add v30.4s, v30.4s, v10.4s\n"
+ "sqadd v29.4s, v29.4s, v17.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "smin v30.4s, v30.4s, v13.4s\n"
+ "sqrdmulh v27.4s, v27.4s, v8.4s\n"
+ "srshl v29.4s, v29.4s, v7.4s\n"
+ "smax v30.4s, v30.4s, v14.4s\n"
+ "sqadd v28.4s, v28.4s, v16.4s\n"
+ "and v16.16b, v27.16b, v7.16b\n"
+ "uzp1 v30.16b, v30.16b, v30.16b\n"
+ "add v29.4s, v29.4s, v10.4s\n"
+ "uzp1 v30.16b, v30.16b, v30.16b\n"
+ "str s30, [x23, x9]\n"
+ "smin v29.4s, v29.4s, v13.4s\n"
+ "srshl v28.4s, v28.4s, v7.4s\n"
+ "ldr x23, [%x[outptrs], #0x60]\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sshl v26.4s, v26.4s, v9.4s\n"
+ "smax v29.4s, v29.4s, v14.4s\n"
+ "add v28.4s, v28.4s, v10.4s\n"
+ "sqadd v27.4s, v27.4s, v16.4s\n"
+ "uzp1 v29.16b, v29.16b, v29.16b\n"
+ "smin v28.4s, v28.4s, v13.4s\n"
+ "uzp1 v29.16b, v29.16b, v29.16b\n"
+ "str s29, [x24, x9]\n"
+ "smax v28.4s, v28.4s, v14.4s\n"
+ "srshl v27.4s, v27.4s, v7.4s\n"
+ "ldr x24, [%x[outptrs], #0x68]\n"
+ "sqrdmulh v26.4s, v26.4s, v8.4s\n"
+ "sshl v25.4s, v25.4s, v9.4s\n"
+ "uzp1 v28.16b, v28.16b, v28.16b\n"
+ "add v27.4s, v27.4s, v10.4s\n"
+ "uzp1 v28.16b, v28.16b, v28.16b\n"
+ "str s28, [x25, x9]\n"
+ "smin v27.4s, v27.4s, v13.4s\n"
+ "and v17.16b, v26.16b, v7.16b\n"
+ "ldr x25, [%x[outptrs], #0x70]\n"
+ "sqrdmulh v25.4s, v25.4s, v8.4s\n"
+ "sshl v24.4s, v24.4s, v9.4s\n"
+ "smax v27.4s, v27.4s, v14.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "and v16.16b, v25.16b, v7.16b\n"
+ "uzp1 v27.16b, v27.16b, v27.16b\n"
+ "sqadd v26.4s, v26.4s, v17.4s\n"
+ "uzp1 v27.16b, v27.16b, v27.16b\n"
+ "str s27, [x26, x9]\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sqrdmulh v24.4s, v24.4s, v8.4s\n"
+ "ldr x26, [%x[outptrs], #0x78]\n"
+ "srshl v26.4s, v26.4s, v7.4s\n"
+ "sshl v23.4s, v23.4s, v9.4s\n"
+ "sqadd v25.4s, v25.4s, v16.4s\n"
+ "and v17.16b, v24.16b, v7.16b\n"
+ "add v26.4s, v26.4s, v10.4s\n"
+ "sqrdmulh v23.4s, v23.4s, v8.4s\n"
+ "srshl v25.4s, v25.4s, v7.4s\n"
+ "smin v26.4s, v26.4s, v13.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "and v16.16b, v23.16b, v7.16b\n"
+ "smax v26.4s, v26.4s, v14.4s\n"
+ "add v25.4s, v25.4s, v10.4s\n"
+ "sqadd v24.4s, v24.4s, v17.4s\n"
+ "uzp1 v26.16b, v26.16b, v26.16b\n"
+ "smin v25.4s, v25.4s, v13.4s\n"
+ "uzp1 v26.16b, v26.16b, v26.16b\n"
+ "str s26, [x19, x9]\n"
+ "smax v25.4s, v25.4s, v14.4s\n"
+ "srshl v24.4s, v24.4s, v7.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sshl v22.4s, v22.4s, v9.4s\n"
+ "uzp1 v25.16b, v25.16b, v25.16b\n"
+ "add v24.4s, v24.4s, v10.4s\n"
+ "uzp1 v25.16b, v25.16b, v25.16b\n"
+ "str s25, [x20, x9]\n"
+ "smin v24.4s, v24.4s, v13.4s\n"
+ "sqadd v23.4s, v23.4s, v16.4s\n"
+ "sqrdmulh v22.4s, v22.4s, v8.4s\n"
+ "sshl v21.4s, v21.4s, v9.4s\n"
+ "smax v24.4s, v24.4s, v14.4s\n"
+ "srshl v23.4s, v23.4s, v7.4s\n"
+ "and v17.16b, v22.16b, v7.16b\n"
+ "uzp1 v24.16b, v24.16b, v24.16b\n"
+ "sqrdmulh v21.4s, v21.4s, v8.4s\n"
+ "uzp1 v24.16b, v24.16b, v24.16b\n"
+ "str s24, [x21, x9]\n"
+ "add v23.4s, v23.4s, v10.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "and v16.16b, v21.16b, v7.16b\n"
+ "sshl v20.4s, v20.4s, v9.4s\n"
+ "smin v23.4s, v23.4s, v13.4s\n"
+ "sqadd v22.4s, v22.4s, v17.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "smax v23.4s, v23.4s, v14.4s\n"
+ "sqrdmulh v20.4s, v20.4s, v8.4s\n"
+ "srshl v22.4s, v22.4s, v7.4s\n"
"uzp1 v23.16b, v23.16b, v23.16b\n"
- "smax v24.4s, v24.4s, v13.4s\n"
- "smax v25.4s, v25.4s, v13.4s\n"
- "smax v26.4s, v26.4s, v13.4s\n"
- "smax v27.4s, v27.4s, v13.4s\n"
- "smax v28.4s, v28.4s, v13.4s\n"
- "smax v29.4s, v29.4s, v13.4s\n"
- "smax v30.4s, v30.4s, v13.4s\n"
- "smax v31.4s, v31.4s, v13.4s\n"
- "uzp1 v16.16b, v16.16b, v16.16b\n"
- "str s16, [x20, x9]\n"
- "ldr x20, [%x[outptrs], #0x40]\n"
- "uzp1 v17.16b, v17.16b, v17.16b\n"
- "uzp1 v18.16b, v18.16b, v18.16b\n"
- "str s17, [x21, x9]\n"
- "ldr x21, [%x[outptrs], #0x48]\n"
- "uzp1 v19.16b, v19.16b, v19.16b\n"
- "uzp1 v20.16b, v20.16b, v20.16b\n"
- "str s18, [x22, x9]\n"
- "ldr x22, [%x[outptrs], #0x50]\n"
- "uzp1 v21.16b, v21.16b, v21.16b\n"
- "uzp1 v22.16b, v22.16b, v22.16b\n"
- "str s19, [x23, x9]\n"
- "ldr x23, [%x[outptrs], #0x58]\n"
+ "sqadd v21.4s, v21.4s, v16.4s\n"
"uzp1 v23.16b, v23.16b, v23.16b\n"
- "uzp1 v24.16b, v24.16b, v24.16b\n"
- "str s20, [x24, x9]\n"
- "ldr x24, [%x[outptrs], #0x60]\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
- "uzp1 v26.16b, v26.16b, v26.16b\n"
- "str s21, [x25, x9]\n"
- "ldr x25, [%x[outptrs], #0x68]\n"
- "uzp1 v27.16b, v27.16b, v27.16b\n"
- "uzp1 v28.16b, v28.16b, v28.16b\n"
- "str s22, [x26, x9]\n"
- "ldr x26, [%x[outptrs], #0x70]\n"
- "uzp1 v29.16b, v29.16b, v29.16b\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
- "str s23, [x27, x9]\n"
- "ldr x27, [%x[outptrs], #0x78]\n"
+ "str s23, [x22, x9]\n"
+ "add v22.4s, v22.4s, v10.4s\n"
+ "and v16.16b, v20.16b, v7.16b\n"
+ "srshl v21.4s, v21.4s, v7.4s\n"
+ "sshl v19.4s, v19.4s, v9.4s\n"
+ "smin v22.4s, v22.4s, v13.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "add v21.4s, v21.4s, v10.4s\n"
+ "smax v22.4s, v22.4s, v14.4s\n"
+ "sqadd v20.4s, v20.4s, v16.4s\n"
+ "smin v21.4s, v21.4s, v13.4s\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "sqrdmulh v19.4s, v19.4s, v8.4s\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "str s22, [x23, x9]\n"
+ "smax v21.4s, v21.4s, v14.4s\n"
+ "srshl v20.4s, v20.4s, v7.4s\n"
+ "and v16.16b, v19.16b, v7.16b\n"
+ "uzp1 v21.16b, v21.16b, v21.16b\n"
+ "add v20.4s, v20.4s, v10.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "uzp1 v21.16b, v21.16b, v21.16b\n"
+ "str s21, [x24, x9]\n"
+ "smin v20.4s, v20.4s, v13.4s\n"
+ "sqadd v19.4s, v19.4s, v16.4s\n"
+ "smax v20.4s, v20.4s, v14.4s\n"
+ "srshl v19.4s, v19.4s, v7.4s\n"
+ "uzp1 v20.16b, v20.16b, v20.16b\n"
+ "uzp1 v20.16b, v20.16b, v20.16b\n"
+ "str s20, [x25, x9]\n"
+ "add v19.4s, v19.4s, v10.4s\n"
+ "smin v19.4s, v19.4s, v13.4s\n"
+ "smax v19.4s, v19.4s, v14.4s\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ "str s19, [x26, x9]\n"
+ "b 8f\n"
+ "6:" // Output channel loop: Odd tail
+ "smlal v6.4s, v17.4h, v3.h[0]\n"
+ "ldp x25, x27, [x19], #0x10\n"
+ "smlal v5.4s, v17.4h, v3.h[1]\n"
+ "ldr x19, [%x[outptrs], #0x0]\n"
+ "smlal v4.4s, v17.4h, v3.h[2]\n"
+ "ldr x20, [%x[outptrs], #0x8]\n"
+ "smlal v31.4s, v17.4h, v3.h[3]\n"
+ "ldr x21, [%x[outptrs], #0x10]\n"
+ "smlal v30.4s, v17.4h, v3.h[4]\n"
+ "ldr x22, [%x[outptrs], #0x18]\n"
+ "smlal v29.4s, v17.4h, v3.h[5]\n"
+ "ldr x23, [%x[outptrs], #0x20]\n"
+ "smlal v28.4s, v17.4h, v3.h[6]\n"
+ "ldr x24, [%x[outptrs], #0x28]\n"
+ "smlal v27.4s, v17.4h, v3.h[7]\n"
+ "ldr d3, [x25, #0x0]\n"
+ "smlal v26.4s, v17.4h, v2.h[0]\n"
+ "ldr x25, [%x[outptrs], #0x30]\n"
+ "smlal v25.4s, v17.4h, v2.h[1]\n"
+ "ldr x26, [%x[outptrs], #0x38]\n"
+ "smlal v24.4s, v17.4h, v2.h[2]\n"
+ "smlal v23.4s, v17.4h, v2.h[3]\n"
+ "smlal v22.4s, v17.4h, v2.h[4]\n"
+ "smlal v21.4s, v17.4h, v2.h[5]\n"
+ "smlal v20.4s, v17.4h, v2.h[6]\n"
+ "smlal v19.4s, v17.4h, v2.h[7]\n"
+ "ldr d2, [x27, #0x0]\n"
+ "usubl v3.8h, v3.8b, v12.8b\n"
+ "ldr s17, [%x[weights]], #0x4\n"
+ "smlal v6.4s, v16.4h, v1.h[0]\n"
+ "smlal v5.4s, v16.4h, v1.h[1]\n"
+ "smlal v4.4s, v16.4h, v1.h[2]\n"
+ "usubl v2.8h, v2.8b, v12.8b\n"
+ "ssubl v17.8h, v17.8b, v11.8b\n"
+ "smlal v31.4s, v16.4h, v1.h[3]\n"
+ "smlal v30.4s, v16.4h, v1.h[4]\n"
+ "smlal v29.4s, v16.4h, v1.h[5]\n"
+ "smlal v28.4s, v16.4h, v1.h[6]\n"
+ "smlal v27.4s, v16.4h, v1.h[7]\n"
+ "smlal v26.4s, v16.4h, v0.h[0]\n"
+ "smlal v25.4s, v16.4h, v0.h[1]\n"
+ "smlal v24.4s, v16.4h, v0.h[2]\n"
+ "smlal v23.4s, v16.4h, v0.h[3]\n"
+ "smlal v22.4s, v16.4h, v0.h[4]\n"
+ "smlal v21.4s, v16.4h, v0.h[5]\n"
+ "smlal v20.4s, v16.4h, v0.h[6]\n"
+ "smlal v19.4s, v16.4h, v0.h[7]\n"
+ "smlal v6.4s, v17.4h, v3.h[0]\n"
+ "smlal v5.4s, v17.4h, v3.h[1]\n"
+ "smlal v4.4s, v17.4h, v3.h[2]\n"
+ "smlal v31.4s, v17.4h, v3.h[3]\n"
+ "smlal v30.4s, v17.4h, v3.h[4]\n"
+ "smlal v29.4s, v17.4h, v3.h[5]\n"
+ "smlal v28.4s, v17.4h, v3.h[6]\n"
+ "smlal v27.4s, v17.4h, v3.h[7]\n"
+ "smlal v26.4s, v17.4h, v2.h[0]\n"
+ "smlal v25.4s, v17.4h, v2.h[1]\n"
+ "smlal v24.4s, v17.4h, v2.h[2]\n"
+ "smlal v23.4s, v17.4h, v2.h[3]\n"
+ "smlal v22.4s, v17.4h, v2.h[4]\n"
+ "smlal v21.4s, v17.4h, v2.h[5]\n"
+ "smlal v20.4s, v17.4h, v2.h[6]\n"
+ "smlal v19.4s, v17.4h, v2.h[7]\n"
+ "sshl v6.4s, v6.4s, v9.4s\n"
+ "sshl v5.4s, v5.4s, v9.4s\n"
+ "sqrdmulh v6.4s, v6.4s, v8.4s\n"
+ "sqrdmulh v5.4s, v5.4s, v8.4s\n"
+ "sshl v4.4s, v4.4s, v9.4s\n"
+ "sshl v31.4s, v31.4s, v9.4s\n"
+ "and v18.16b, v6.16b, v7.16b\n"
+ "and v16.16b, v5.16b, v7.16b\n"
+ "sqrdmulh v4.4s, v4.4s, v8.4s\n"
+ "sshr v18.4s, v18.4s, #0x1f\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sqrdmulh v31.4s, v31.4s, v8.4s\n"
+ "sqadd v6.4s, v6.4s, v18.4s\n"
+ "sqadd v5.4s, v5.4s, v16.4s\n"
+ "and v17.16b, v4.16b, v7.16b\n"
+ "and v16.16b, v31.16b, v7.16b\n"
+ "srshl v6.4s, v6.4s, v7.4s\n"
+ "srshl v5.4s, v5.4s, v7.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "add v6.4s, v6.4s, v10.4s\n"
+ "add v5.4s, v5.4s, v10.4s\n"
+ "sqadd v4.4s, v4.4s, v17.4s\n"
+ "smin v6.4s, v6.4s, v13.4s\n"
+ "smin v5.4s, v5.4s, v13.4s\n"
+ "sqadd v31.4s, v31.4s, v16.4s\n"
+ "smax v6.4s, v6.4s, v14.4s\n"
+ "smax v5.4s, v5.4s, v14.4s\n"
+ "srshl v4.4s, v4.4s, v7.4s\n"
+ "uzp1 v6.16b, v6.16b, v6.16b\n"
+ "uzp1 v5.16b, v5.16b, v5.16b\n"
+ "uzp1 v6.16b, v6.16b, v6.16b\n"
+ "str s6, [x19, x9]\n"
+ "uzp1 v5.16b, v5.16b, v5.16b\n"
+ "add v4.4s, v4.4s, v10.4s\n"
+ "ldr x19, [%x[outptrs], #0x40]\n"
+ "srshl v31.4s, v31.4s, v7.4s\n"
+ "str s5, [x20, x9]\n"
+ "sshl v30.4s, v30.4s, v9.4s\n"
+ "ldr x20, [%x[outptrs], #0x48]\n"
+ "smin v4.4s, v4.4s, v13.4s\n"
+ "sqrdmulh v30.4s, v30.4s, v8.4s\n"
+ "add v31.4s, v31.4s, v10.4s\n"
+ "smax v4.4s, v4.4s, v14.4s\n"
+ "sshl v29.4s, v29.4s, v9.4s\n"
+ "smin v31.4s, v31.4s, v13.4s\n"
+ "uzp1 v4.16b, v4.16b, v4.16b\n"
+ "and v16.16b, v30.16b, v7.16b\n"
+ "uzp1 v4.16b, v4.16b, v4.16b\n"
+ "str s4, [x21, x9]\n"
+ "smax v31.4s, v31.4s, v14.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "ldr x21, [%x[outptrs], #0x50]\n"
+ "sqrdmulh v29.4s, v29.4s, v8.4s\n"
+ "sshl v28.4s, v28.4s, v9.4s\n"
"uzp1 v31.16b, v31.16b, v31.16b\n"
- "uzp1 v24.16b, v24.16b, v24.16b\n"
- "str s24, [x20, x9]\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
- "uzp1 v26.16b, v26.16b, v26.16b\n"
- "str s25, [x21, x9]\n"
- "uzp1 v27.16b, v27.16b, v27.16b\n"
- "uzp1 v28.16b, v28.16b, v28.16b\n"
- "str s26, [x22, x9]\n"
- "uzp1 v29.16b, v29.16b, v29.16b\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
- "str s27, [x23, x9]\n"
+ "sqadd v30.4s, v30.4s, v16.4s\n"
"uzp1 v31.16b, v31.16b, v31.16b\n"
- "str s28, [x24, x9]\n"
- "str s29, [x25, x9]\n"
- "str s30, [x26, x9]\n"
- "str s31, [x27, x9]\n"
- "b 8f\n"
- "7:" // Output channel loop: Single kernel point
- "smlal v16.4s, v8.4h, v2.h[0]\n"
- "smlal v17.4s, v8.4h, v2.h[1]\n"
- "sshl v16.4s, v16.4s, v15.4s\n"
- "ldr x20, [%x[outptrs], #0x0]\n"
- "smlal v18.4s, v8.4h, v2.h[2]\n"
- "smlal v19.4s, v8.4h, v2.h[3]\n"
- "sshl v17.4s, v17.4s, v15.4s\n"
- "ldr x21, [%x[outptrs], #0x8]\n"
- "sshl v18.4s, v18.4s, v15.4s\n"
- "sshl v19.4s, v19.4s, v15.4s\n"
- "smlal v20.4s, v8.4h, v2.h[4]\n"
- "ldr x22, [%x[outptrs], #0x10]\n"
- "smlal v21.4s, v8.4h, v2.h[5]\n"
- "smlal v22.4s, v8.4h, v2.h[6]\n"
- "sqrdmulh v16.4s, v16.4s, v9.4s\n"
- "ldr x23, [%x[outptrs], #0x18]\n"
- "smlal v23.4s, v8.4h, v2.h[7]\n"
- "smlal v24.4s, v8.4h, v7.h[0]\n"
- "sqrdmulh v17.4s, v17.4s, v9.4s\n"
- "ldr x24, [%x[outptrs], #0x20]\n"
- "smlal v25.4s, v8.4h, v7.h[1]\n"
- "sqrdmulh v18.4s, v18.4s, v9.4s\n"
- "smlal v26.4s, v8.4h, v7.h[2]\n"
- "ldr x25, [%x[outptrs], #0x28]\n"
- "sqrdmulh v19.4s, v19.4s, v9.4s\n"
- "and v5.16b, v16.16b, v10.16b\n"
- "smlal v27.4s, v8.4h, v7.h[3]\n"
- "ldr x26, [%x[outptrs], #0x30]\n"
- "and v4.16b, v17.16b, v10.16b\n"
- "and v2.16b, v18.16b, v10.16b\n"
- "smlal v28.4s, v8.4h, v7.h[4]\n"
- "ldr x27, [%x[outptrs], #0x38]\n"
- "and v1.16b, v19.16b, v10.16b\n"
- "sshl v20.4s, v20.4s, v15.4s\n"
- "smlal v29.4s, v8.4h, v7.h[5]\n"
- "sshl v21.4s, v21.4s, v15.4s\n"
- "sshl v22.4s, v22.4s, v15.4s\n"
- "smlal v30.4s, v8.4h, v7.h[6]\n"
- "sshl v23.4s, v23.4s, v15.4s\n"
- "sshl v24.4s, v24.4s, v15.4s\n"
- "smlal v31.4s, v8.4h, v7.h[7]\n"
- "sshl v25.4s, v25.4s, v15.4s\n"
- "sshr v5.4s, v5.4s, #0x1f\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
- "sshr v2.4s, v2.4s, #0x1f\n"
- "sshr v1.4s, v1.4s, #0x1f\n"
- "sqrdmulh v20.4s, v20.4s, v9.4s\n"
- "sqrdmulh v21.4s, v21.4s, v9.4s\n"
- "sqrdmulh v22.4s, v22.4s, v9.4s\n"
- "sqrdmulh v23.4s, v23.4s, v9.4s\n"
- "sqrdmulh v24.4s, v24.4s, v9.4s\n"
- "sqrdmulh v25.4s, v25.4s, v9.4s\n"
- "sqadd v16.4s, v16.4s, v5.4s\n"
- "sqadd v17.4s, v17.4s, v4.4s\n"
- "sqadd v18.4s, v18.4s, v2.4s\n"
- "sqadd v19.4s, v19.4s, v1.4s\n"
- "and v8.16b, v20.16b, v10.16b\n"
- "and v0.16b, v21.16b, v10.16b\n"
- "and v5.16b, v22.16b, v10.16b\n"
- "and v4.16b, v23.16b, v10.16b\n"
- "and v2.16b, v24.16b, v10.16b\n"
- "and v1.16b, v25.16b, v10.16b\n"
- "sshl v26.4s, v26.4s, v15.4s\n"
- "sshl v27.4s, v27.4s, v15.4s\n"
- "sshl v28.4s, v28.4s, v15.4s\n"
- "sshl v29.4s, v29.4s, v15.4s\n"
- "sshl v30.4s, v30.4s, v15.4s\n"
- "sshl v31.4s, v31.4s, v15.4s\n"
- "sshr v8.4s, v8.4s, #0x1f\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "sshr v5.4s, v5.4s, #0x1f\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
- "sshr v2.4s, v2.4s, #0x1f\n"
- "sshr v1.4s, v1.4s, #0x1f\n"
- "sqrdmulh v26.4s, v26.4s, v9.4s\n"
- "sqrdmulh v27.4s, v27.4s, v9.4s\n"
- "sqrdmulh v28.4s, v28.4s, v9.4s\n"
- "sqrdmulh v29.4s, v29.4s, v9.4s\n"
- "sqrdmulh v30.4s, v30.4s, v9.4s\n"
- "sqrdmulh v31.4s, v31.4s, v9.4s\n"
- "sqadd v20.4s, v20.4s, v8.4s\n"
- "sqadd v21.4s, v21.4s, v0.4s\n"
- "sqadd v22.4s, v22.4s, v5.4s\n"
- "sqadd v23.4s, v23.4s, v4.4s\n"
- "sqadd v24.4s, v24.4s, v2.4s\n"
- "sqadd v25.4s, v25.4s, v1.4s\n"
- "and v8.16b, v26.16b, v10.16b\n"
- "and v0.16b, v27.16b, v10.16b\n"
- "and v5.16b, v28.16b, v10.16b\n"
- "and v4.16b, v29.16b, v10.16b\n"
- "and v2.16b, v30.16b, v10.16b\n"
- "and v1.16b, v31.16b, v10.16b\n"
- "sshr v8.4s, v8.4s, #0x1f\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "sshr v5.4s, v5.4s, #0x1f\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
- "sshr v2.4s, v2.4s, #0x1f\n"
- "sshr v1.4s, v1.4s, #0x1f\n"
- "srshl v16.4s, v16.4s, v10.4s\n"
- "srshl v17.4s, v17.4s, v10.4s\n"
- "srshl v18.4s, v18.4s, v10.4s\n"
- "srshl v19.4s, v19.4s, v10.4s\n"
- "srshl v20.4s, v20.4s, v10.4s\n"
- "srshl v21.4s, v21.4s, v10.4s\n"
- "srshl v22.4s, v22.4s, v10.4s\n"
- "srshl v23.4s, v23.4s, v10.4s\n"
- "sqadd v26.4s, v26.4s, v8.4s\n"
- "sqadd v27.4s, v27.4s, v0.4s\n"
- "sqadd v28.4s, v28.4s, v5.4s\n"
- "sqadd v29.4s, v29.4s, v4.4s\n"
- "sqadd v30.4s, v30.4s, v2.4s\n"
- "sqadd v31.4s, v31.4s, v1.4s\n"
- "add v16.4s, v16.4s, v14.4s\n"
- "add v17.4s, v17.4s, v14.4s\n"
- "add v18.4s, v18.4s, v14.4s\n"
- "add v19.4s, v19.4s, v14.4s\n"
- "add v20.4s, v20.4s, v14.4s\n"
- "add v21.4s, v21.4s, v14.4s\n"
- "add v22.4s, v22.4s, v14.4s\n"
- "add v23.4s, v23.4s, v14.4s\n"
- "srshl v24.4s, v24.4s, v10.4s\n"
- "srshl v25.4s, v25.4s, v10.4s\n"
- "srshl v26.4s, v26.4s, v10.4s\n"
- "srshl v27.4s, v27.4s, v10.4s\n"
- "srshl v28.4s, v28.4s, v10.4s\n"
- "srshl v29.4s, v29.4s, v10.4s\n"
- "srshl v30.4s, v30.4s, v10.4s\n"
- "srshl v31.4s, v31.4s, v10.4s\n"
- "smin v16.4s, v16.4s, v11.4s\n"
- "smin v17.4s, v17.4s, v11.4s\n"
- "smin v18.4s, v18.4s, v11.4s\n"
- "smin v19.4s, v19.4s, v11.4s\n"
- "smin v20.4s, v20.4s, v11.4s\n"
- "smin v21.4s, v21.4s, v11.4s\n"
- "smin v22.4s, v22.4s, v11.4s\n"
- "smin v23.4s, v23.4s, v11.4s\n"
- "add v24.4s, v24.4s, v14.4s\n"
- "add v25.4s, v25.4s, v14.4s\n"
- "add v26.4s, v26.4s, v14.4s\n"
- "add v27.4s, v27.4s, v14.4s\n"
- "add v28.4s, v28.4s, v14.4s\n"
- "add v29.4s, v29.4s, v14.4s\n"
- "add v30.4s, v30.4s, v14.4s\n"
- "add v31.4s, v31.4s, v14.4s\n"
- "smax v16.4s, v16.4s, v13.4s\n"
- "smax v17.4s, v17.4s, v13.4s\n"
- "smax v18.4s, v18.4s, v13.4s\n"
- "smax v19.4s, v19.4s, v13.4s\n"
- "smax v20.4s, v20.4s, v13.4s\n"
- "smax v21.4s, v21.4s, v13.4s\n"
- "smax v22.4s, v22.4s, v13.4s\n"
- "smax v23.4s, v23.4s, v13.4s\n"
- "smin v24.4s, v24.4s, v11.4s\n"
- "smin v25.4s, v25.4s, v11.4s\n"
- "smin v26.4s, v26.4s, v11.4s\n"
- "smin v27.4s, v27.4s, v11.4s\n"
- "smin v28.4s, v28.4s, v11.4s\n"
- "smin v29.4s, v29.4s, v11.4s\n"
- "smin v30.4s, v30.4s, v11.4s\n"
- "smin v31.4s, v31.4s, v11.4s\n"
- "uzp1 v16.16b, v16.16b, v16.16b\n"
- "uzp1 v17.16b, v17.16b, v17.16b\n"
- "uzp1 v18.16b, v18.16b, v18.16b\n"
- "uzp1 v19.16b, v19.16b, v19.16b\n"
- "uzp1 v20.16b, v20.16b, v20.16b\n"
- "uzp1 v21.16b, v21.16b, v21.16b\n"
- "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "str s31, [x22, x9]\n"
+ "and v17.16b, v29.16b, v7.16b\n"
+ "sqrdmulh v28.4s, v28.4s, v8.4s\n"
+ "ldr x22, [%x[outptrs], #0x58]\n"
+ "srshl v30.4s, v30.4s, v7.4s\n"
+ "sshl v27.4s, v27.4s, v9.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "and v16.16b, v28.16b, v7.16b\n"
+ "add v30.4s, v30.4s, v10.4s\n"
+ "sqadd v29.4s, v29.4s, v17.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "smin v30.4s, v30.4s, v13.4s\n"
+ "sqrdmulh v27.4s, v27.4s, v8.4s\n"
+ "srshl v29.4s, v29.4s, v7.4s\n"
+ "smax v30.4s, v30.4s, v14.4s\n"
+ "sqadd v28.4s, v28.4s, v16.4s\n"
+ "and v16.16b, v27.16b, v7.16b\n"
+ "uzp1 v30.16b, v30.16b, v30.16b\n"
+ "add v29.4s, v29.4s, v10.4s\n"
+ "uzp1 v30.16b, v30.16b, v30.16b\n"
+ "str s30, [x23, x9]\n"
+ "smin v29.4s, v29.4s, v13.4s\n"
+ "srshl v28.4s, v28.4s, v7.4s\n"
+ "ldr x23, [%x[outptrs], #0x60]\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sshl v26.4s, v26.4s, v9.4s\n"
+ "smax v29.4s, v29.4s, v14.4s\n"
+ "add v28.4s, v28.4s, v10.4s\n"
+ "sqadd v27.4s, v27.4s, v16.4s\n"
+ "uzp1 v29.16b, v29.16b, v29.16b\n"
+ "smin v28.4s, v28.4s, v13.4s\n"
+ "uzp1 v29.16b, v29.16b, v29.16b\n"
+ "str s29, [x24, x9]\n"
+ "smax v28.4s, v28.4s, v14.4s\n"
+ "srshl v27.4s, v27.4s, v7.4s\n"
+ "ldr x24, [%x[outptrs], #0x68]\n"
+ "sqrdmulh v26.4s, v26.4s, v8.4s\n"
+ "sshl v25.4s, v25.4s, v9.4s\n"
+ "uzp1 v28.16b, v28.16b, v28.16b\n"
+ "add v27.4s, v27.4s, v10.4s\n"
+ "uzp1 v28.16b, v28.16b, v28.16b\n"
+ "str s28, [x25, x9]\n"
+ "smin v27.4s, v27.4s, v13.4s\n"
+ "and v17.16b, v26.16b, v7.16b\n"
+ "ldr x25, [%x[outptrs], #0x70]\n"
+ "sqrdmulh v25.4s, v25.4s, v8.4s\n"
+ "sshl v24.4s, v24.4s, v9.4s\n"
+ "smax v27.4s, v27.4s, v14.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "and v16.16b, v25.16b, v7.16b\n"
+ "uzp1 v27.16b, v27.16b, v27.16b\n"
+ "sqadd v26.4s, v26.4s, v17.4s\n"
+ "uzp1 v27.16b, v27.16b, v27.16b\n"
+ "str s27, [x26, x9]\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sqrdmulh v24.4s, v24.4s, v8.4s\n"
+ "ldr x26, [%x[outptrs], #0x78]\n"
+ "srshl v26.4s, v26.4s, v7.4s\n"
+ "sshl v23.4s, v23.4s, v9.4s\n"
+ "sqadd v25.4s, v25.4s, v16.4s\n"
+ "and v17.16b, v24.16b, v7.16b\n"
+ "add v26.4s, v26.4s, v10.4s\n"
+ "sqrdmulh v23.4s, v23.4s, v8.4s\n"
+ "srshl v25.4s, v25.4s, v7.4s\n"
+ "smin v26.4s, v26.4s, v13.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "and v16.16b, v23.16b, v7.16b\n"
+ "smax v26.4s, v26.4s, v14.4s\n"
+ "add v25.4s, v25.4s, v10.4s\n"
+ "sqadd v24.4s, v24.4s, v17.4s\n"
+ "uzp1 v26.16b, v26.16b, v26.16b\n"
+ "smin v25.4s, v25.4s, v13.4s\n"
+ "uzp1 v26.16b, v26.16b, v26.16b\n"
+ "str s26, [x19, x9]\n"
+ "smax v25.4s, v25.4s, v14.4s\n"
+ "srshl v24.4s, v24.4s, v7.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sshl v22.4s, v22.4s, v9.4s\n"
+ "uzp1 v25.16b, v25.16b, v25.16b\n"
+ "add v24.4s, v24.4s, v10.4s\n"
+ "uzp1 v25.16b, v25.16b, v25.16b\n"
+ "str s25, [x20, x9]\n"
+ "smin v24.4s, v24.4s, v13.4s\n"
+ "sqadd v23.4s, v23.4s, v16.4s\n"
+ "sqrdmulh v22.4s, v22.4s, v8.4s\n"
+ "sshl v21.4s, v21.4s, v9.4s\n"
+ "smax v24.4s, v24.4s, v14.4s\n"
+ "srshl v23.4s, v23.4s, v7.4s\n"
+ "and v17.16b, v22.16b, v7.16b\n"
+ "uzp1 v24.16b, v24.16b, v24.16b\n"
+ "sqrdmulh v21.4s, v21.4s, v8.4s\n"
+ "uzp1 v24.16b, v24.16b, v24.16b\n"
+ "str s24, [x21, x9]\n"
+ "add v23.4s, v23.4s, v10.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "and v16.16b, v21.16b, v7.16b\n"
+ "sshl v20.4s, v20.4s, v9.4s\n"
+ "smin v23.4s, v23.4s, v13.4s\n"
+ "sqadd v22.4s, v22.4s, v17.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "smax v23.4s, v23.4s, v14.4s\n"
+ "sqrdmulh v20.4s, v20.4s, v8.4s\n"
+ "srshl v22.4s, v22.4s, v7.4s\n"
"uzp1 v23.16b, v23.16b, v23.16b\n"
- "smax v24.4s, v24.4s, v13.4s\n"
- "smax v25.4s, v25.4s, v13.4s\n"
- "smax v26.4s, v26.4s, v13.4s\n"
- "smax v27.4s, v27.4s, v13.4s\n"
- "smax v28.4s, v28.4s, v13.4s\n"
- "smax v29.4s, v29.4s, v13.4s\n"
- "smax v30.4s, v30.4s, v13.4s\n"
- "smax v31.4s, v31.4s, v13.4s\n"
- "uzp1 v16.16b, v16.16b, v16.16b\n"
- "str s16, [x20, x9]\n"
- "ldr x20, [%x[outptrs], #0x40]\n"
- "uzp1 v17.16b, v17.16b, v17.16b\n"
- "uzp1 v18.16b, v18.16b, v18.16b\n"
- "str s17, [x21, x9]\n"
- "ldr x21, [%x[outptrs], #0x48]\n"
- "uzp1 v19.16b, v19.16b, v19.16b\n"
- "uzp1 v20.16b, v20.16b, v20.16b\n"
- "str s18, [x22, x9]\n"
- "ldr x22, [%x[outptrs], #0x50]\n"
- "uzp1 v21.16b, v21.16b, v21.16b\n"
- "uzp1 v22.16b, v22.16b, v22.16b\n"
- "str s19, [x23, x9]\n"
- "ldr x23, [%x[outptrs], #0x58]\n"
+ "sqadd v21.4s, v21.4s, v16.4s\n"
"uzp1 v23.16b, v23.16b, v23.16b\n"
- "uzp1 v24.16b, v24.16b, v24.16b\n"
- "str s20, [x24, x9]\n"
- "ldr x24, [%x[outptrs], #0x60]\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
- "uzp1 v26.16b, v26.16b, v26.16b\n"
- "str s21, [x25, x9]\n"
- "ldr x25, [%x[outptrs], #0x68]\n"
- "uzp1 v27.16b, v27.16b, v27.16b\n"
- "uzp1 v28.16b, v28.16b, v28.16b\n"
- "str s22, [x26, x9]\n"
- "ldr x26, [%x[outptrs], #0x70]\n"
- "uzp1 v29.16b, v29.16b, v29.16b\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
- "str s23, [x27, x9]\n"
- "ldr x27, [%x[outptrs], #0x78]\n"
+ "str s23, [x22, x9]\n"
+ "add v22.4s, v22.4s, v10.4s\n"
+ "and v16.16b, v20.16b, v7.16b\n"
+ "srshl v21.4s, v21.4s, v7.4s\n"
+ "sshl v19.4s, v19.4s, v9.4s\n"
+ "smin v22.4s, v22.4s, v13.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "add v21.4s, v21.4s, v10.4s\n"
+ "smax v22.4s, v22.4s, v14.4s\n"
+ "sqadd v20.4s, v20.4s, v16.4s\n"
+ "smin v21.4s, v21.4s, v13.4s\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "sqrdmulh v19.4s, v19.4s, v8.4s\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "str s22, [x23, x9]\n"
+ "smax v21.4s, v21.4s, v14.4s\n"
+ "srshl v20.4s, v20.4s, v7.4s\n"
+ "and v16.16b, v19.16b, v7.16b\n"
+ "uzp1 v21.16b, v21.16b, v21.16b\n"
+ "add v20.4s, v20.4s, v10.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "uzp1 v21.16b, v21.16b, v21.16b\n"
+ "str s21, [x24, x9]\n"
+ "smin v20.4s, v20.4s, v13.4s\n"
+ "sqadd v19.4s, v19.4s, v16.4s\n"
+ "smax v20.4s, v20.4s, v14.4s\n"
+ "srshl v19.4s, v19.4s, v7.4s\n"
+ "uzp1 v20.16b, v20.16b, v20.16b\n"
+ "uzp1 v20.16b, v20.16b, v20.16b\n"
+ "str s20, [x25, x9]\n"
+ "add v19.4s, v19.4s, v10.4s\n"
+ "smin v19.4s, v19.4s, v13.4s\n"
+ "smax v19.4s, v19.4s, v14.4s\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ "str s19, [x26, x9]\n"
+ "b 8f\n"
+ "7:" // Output channel loop: Single kernel point
+ "smlal v6.4s, v17.4h, v3.h[0]\n"
+ "ldr x19, [%x[outptrs], #0x0]\n"
+ "smlal v5.4s, v17.4h, v3.h[1]\n"
+ "ldr x20, [%x[outptrs], #0x8]\n"
+ "smlal v4.4s, v17.4h, v3.h[2]\n"
+ "ldr x21, [%x[outptrs], #0x10]\n"
+ "smlal v31.4s, v17.4h, v3.h[3]\n"
+ "ldr x22, [%x[outptrs], #0x18]\n"
+ "smlal v30.4s, v17.4h, v3.h[4]\n"
+ "ldr x23, [%x[outptrs], #0x20]\n"
+ "smlal v29.4s, v17.4h, v3.h[5]\n"
+ "ldr x24, [%x[outptrs], #0x28]\n"
+ "smlal v28.4s, v17.4h, v3.h[6]\n"
+ "ldr x25, [%x[outptrs], #0x30]\n"
+ "smlal v27.4s, v17.4h, v3.h[7]\n"
+ "ldr x26, [%x[outptrs], #0x38]\n"
+ "smlal v26.4s, v17.4h, v2.h[0]\n"
+ "smlal v25.4s, v17.4h, v2.h[1]\n"
+ "smlal v24.4s, v17.4h, v2.h[2]\n"
+ "smlal v23.4s, v17.4h, v2.h[3]\n"
+ "smlal v22.4s, v17.4h, v2.h[4]\n"
+ "smlal v21.4s, v17.4h, v2.h[5]\n"
+ "smlal v20.4s, v17.4h, v2.h[6]\n"
+ "smlal v19.4s, v17.4h, v2.h[7]\n"
+ "sshl v6.4s, v6.4s, v9.4s\n"
+ "sshl v5.4s, v5.4s, v9.4s\n"
+ "sqrdmulh v6.4s, v6.4s, v8.4s\n"
+ "sqrdmulh v5.4s, v5.4s, v8.4s\n"
+ "sshl v4.4s, v4.4s, v9.4s\n"
+ "sshl v31.4s, v31.4s, v9.4s\n"
+ "and v18.16b, v6.16b, v7.16b\n"
+ "and v16.16b, v5.16b, v7.16b\n"
+ "sqrdmulh v4.4s, v4.4s, v8.4s\n"
+ "sshr v18.4s, v18.4s, #0x1f\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sqrdmulh v31.4s, v31.4s, v8.4s\n"
+ "sqadd v6.4s, v6.4s, v18.4s\n"
+ "sqadd v5.4s, v5.4s, v16.4s\n"
+ "and v17.16b, v4.16b, v7.16b\n"
+ "and v16.16b, v31.16b, v7.16b\n"
+ "srshl v6.4s, v6.4s, v7.4s\n"
+ "srshl v5.4s, v5.4s, v7.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "add v6.4s, v6.4s, v10.4s\n"
+ "add v5.4s, v5.4s, v10.4s\n"
+ "sqadd v4.4s, v4.4s, v17.4s\n"
+ "smin v6.4s, v6.4s, v13.4s\n"
+ "smin v5.4s, v5.4s, v13.4s\n"
+ "sqadd v31.4s, v31.4s, v16.4s\n"
+ "smax v6.4s, v6.4s, v14.4s\n"
+ "smax v5.4s, v5.4s, v14.4s\n"
+ "srshl v4.4s, v4.4s, v7.4s\n"
+ "uzp1 v6.16b, v6.16b, v6.16b\n"
+ "uzp1 v5.16b, v5.16b, v5.16b\n"
+ "uzp1 v6.16b, v6.16b, v6.16b\n"
+ "str s6, [x19, x9]\n"
+ "uzp1 v5.16b, v5.16b, v5.16b\n"
+ "add v4.4s, v4.4s, v10.4s\n"
+ "ldr x19, [%x[outptrs], #0x40]\n"
+ "srshl v31.4s, v31.4s, v7.4s\n"
+ "str s5, [x20, x9]\n"
+ "sshl v30.4s, v30.4s, v9.4s\n"
+ "ldr x20, [%x[outptrs], #0x48]\n"
+ "smin v4.4s, v4.4s, v13.4s\n"
+ "sqrdmulh v30.4s, v30.4s, v8.4s\n"
+ "add v31.4s, v31.4s, v10.4s\n"
+ "smax v4.4s, v4.4s, v14.4s\n"
+ "sshl v29.4s, v29.4s, v9.4s\n"
+ "smin v31.4s, v31.4s, v13.4s\n"
+ "uzp1 v4.16b, v4.16b, v4.16b\n"
+ "and v16.16b, v30.16b, v7.16b\n"
+ "uzp1 v4.16b, v4.16b, v4.16b\n"
+ "str s4, [x21, x9]\n"
+ "smax v31.4s, v31.4s, v14.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "ldr x21, [%x[outptrs], #0x50]\n"
+ "sqrdmulh v29.4s, v29.4s, v8.4s\n"
+ "sshl v28.4s, v28.4s, v9.4s\n"
"uzp1 v31.16b, v31.16b, v31.16b\n"
- "uzp1 v24.16b, v24.16b, v24.16b\n"
- "str s24, [x20, x9]\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
- "uzp1 v26.16b, v26.16b, v26.16b\n"
- "str s25, [x21, x9]\n"
- "uzp1 v27.16b, v27.16b, v27.16b\n"
- "uzp1 v28.16b, v28.16b, v28.16b\n"
- "str s26, [x22, x9]\n"
- "uzp1 v29.16b, v29.16b, v29.16b\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
- "str s27, [x23, x9]\n"
+ "sqadd v30.4s, v30.4s, v16.4s\n"
"uzp1 v31.16b, v31.16b, v31.16b\n"
- "str s28, [x24, x9]\n"
- "str s29, [x25, x9]\n"
- "str s30, [x26, x9]\n"
- "str s31, [x27, x9]\n"
+ "str s31, [x22, x9]\n"
+ "and v17.16b, v29.16b, v7.16b\n"
+ "sqrdmulh v28.4s, v28.4s, v8.4s\n"
+ "ldr x22, [%x[outptrs], #0x58]\n"
+ "srshl v30.4s, v30.4s, v7.4s\n"
+ "sshl v27.4s, v27.4s, v9.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "and v16.16b, v28.16b, v7.16b\n"
+ "add v30.4s, v30.4s, v10.4s\n"
+ "sqadd v29.4s, v29.4s, v17.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "smin v30.4s, v30.4s, v13.4s\n"
+ "sqrdmulh v27.4s, v27.4s, v8.4s\n"
+ "srshl v29.4s, v29.4s, v7.4s\n"
+ "smax v30.4s, v30.4s, v14.4s\n"
+ "sqadd v28.4s, v28.4s, v16.4s\n"
+ "and v16.16b, v27.16b, v7.16b\n"
+ "uzp1 v30.16b, v30.16b, v30.16b\n"
+ "add v29.4s, v29.4s, v10.4s\n"
+ "uzp1 v30.16b, v30.16b, v30.16b\n"
+ "str s30, [x23, x9]\n"
+ "smin v29.4s, v29.4s, v13.4s\n"
+ "srshl v28.4s, v28.4s, v7.4s\n"
+ "ldr x23, [%x[outptrs], #0x60]\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sshl v26.4s, v26.4s, v9.4s\n"
+ "smax v29.4s, v29.4s, v14.4s\n"
+ "add v28.4s, v28.4s, v10.4s\n"
+ "sqadd v27.4s, v27.4s, v16.4s\n"
+ "uzp1 v29.16b, v29.16b, v29.16b\n"
+ "smin v28.4s, v28.4s, v13.4s\n"
+ "uzp1 v29.16b, v29.16b, v29.16b\n"
+ "str s29, [x24, x9]\n"
+ "smax v28.4s, v28.4s, v14.4s\n"
+ "srshl v27.4s, v27.4s, v7.4s\n"
+ "ldr x24, [%x[outptrs], #0x68]\n"
+ "sqrdmulh v26.4s, v26.4s, v8.4s\n"
+ "sshl v25.4s, v25.4s, v9.4s\n"
+ "uzp1 v28.16b, v28.16b, v28.16b\n"
+ "add v27.4s, v27.4s, v10.4s\n"
+ "uzp1 v28.16b, v28.16b, v28.16b\n"
+ "str s28, [x25, x9]\n"
+ "smin v27.4s, v27.4s, v13.4s\n"
+ "and v17.16b, v26.16b, v7.16b\n"
+ "ldr x25, [%x[outptrs], #0x70]\n"
+ "sqrdmulh v25.4s, v25.4s, v8.4s\n"
+ "sshl v24.4s, v24.4s, v9.4s\n"
+ "smax v27.4s, v27.4s, v14.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "and v16.16b, v25.16b, v7.16b\n"
+ "uzp1 v27.16b, v27.16b, v27.16b\n"
+ "sqadd v26.4s, v26.4s, v17.4s\n"
+ "uzp1 v27.16b, v27.16b, v27.16b\n"
+ "str s27, [x26, x9]\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sqrdmulh v24.4s, v24.4s, v8.4s\n"
+ "ldr x26, [%x[outptrs], #0x78]\n"
+ "srshl v26.4s, v26.4s, v7.4s\n"
+ "sshl v23.4s, v23.4s, v9.4s\n"
+ "sqadd v25.4s, v25.4s, v16.4s\n"
+ "and v17.16b, v24.16b, v7.16b\n"
+ "add v26.4s, v26.4s, v10.4s\n"
+ "sqrdmulh v23.4s, v23.4s, v8.4s\n"
+ "srshl v25.4s, v25.4s, v7.4s\n"
+ "smin v26.4s, v26.4s, v13.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "and v16.16b, v23.16b, v7.16b\n"
+ "smax v26.4s, v26.4s, v14.4s\n"
+ "add v25.4s, v25.4s, v10.4s\n"
+ "sqadd v24.4s, v24.4s, v17.4s\n"
+ "uzp1 v26.16b, v26.16b, v26.16b\n"
+ "smin v25.4s, v25.4s, v13.4s\n"
+ "uzp1 v26.16b, v26.16b, v26.16b\n"
+ "str s26, [x19, x9]\n"
+ "smax v25.4s, v25.4s, v14.4s\n"
+ "srshl v24.4s, v24.4s, v7.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sshl v22.4s, v22.4s, v9.4s\n"
+ "uzp1 v25.16b, v25.16b, v25.16b\n"
+ "add v24.4s, v24.4s, v10.4s\n"
+ "uzp1 v25.16b, v25.16b, v25.16b\n"
+ "str s25, [x20, x9]\n"
+ "smin v24.4s, v24.4s, v13.4s\n"
+ "sqadd v23.4s, v23.4s, v16.4s\n"
+ "sqrdmulh v22.4s, v22.4s, v8.4s\n"
+ "sshl v21.4s, v21.4s, v9.4s\n"
+ "smax v24.4s, v24.4s, v14.4s\n"
+ "srshl v23.4s, v23.4s, v7.4s\n"
+ "and v17.16b, v22.16b, v7.16b\n"
+ "uzp1 v24.16b, v24.16b, v24.16b\n"
+ "sqrdmulh v21.4s, v21.4s, v8.4s\n"
+ "uzp1 v24.16b, v24.16b, v24.16b\n"
+ "str s24, [x21, x9]\n"
+ "add v23.4s, v23.4s, v10.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "and v16.16b, v21.16b, v7.16b\n"
+ "sshl v20.4s, v20.4s, v9.4s\n"
+ "smin v23.4s, v23.4s, v13.4s\n"
+ "sqadd v22.4s, v22.4s, v17.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "smax v23.4s, v23.4s, v14.4s\n"
+ "sqrdmulh v20.4s, v20.4s, v8.4s\n"
+ "srshl v22.4s, v22.4s, v7.4s\n"
+ "uzp1 v23.16b, v23.16b, v23.16b\n"
+ "sqadd v21.4s, v21.4s, v16.4s\n"
+ "uzp1 v23.16b, v23.16b, v23.16b\n"
+ "str s23, [x22, x9]\n"
+ "add v22.4s, v22.4s, v10.4s\n"
+ "and v16.16b, v20.16b, v7.16b\n"
+ "srshl v21.4s, v21.4s, v7.4s\n"
+ "sshl v19.4s, v19.4s, v9.4s\n"
+ "smin v22.4s, v22.4s, v13.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "add v21.4s, v21.4s, v10.4s\n"
+ "smax v22.4s, v22.4s, v14.4s\n"
+ "sqadd v20.4s, v20.4s, v16.4s\n"
+ "smin v21.4s, v21.4s, v13.4s\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "sqrdmulh v19.4s, v19.4s, v8.4s\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "str s22, [x23, x9]\n"
+ "smax v21.4s, v21.4s, v14.4s\n"
+ "srshl v20.4s, v20.4s, v7.4s\n"
+ "and v16.16b, v19.16b, v7.16b\n"
+ "uzp1 v21.16b, v21.16b, v21.16b\n"
+ "add v20.4s, v20.4s, v10.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "uzp1 v21.16b, v21.16b, v21.16b\n"
+ "str s21, [x24, x9]\n"
+ "smin v20.4s, v20.4s, v13.4s\n"
+ "sqadd v19.4s, v19.4s, v16.4s\n"
+ "smax v20.4s, v20.4s, v14.4s\n"
+ "srshl v19.4s, v19.4s, v7.4s\n"
+ "uzp1 v20.16b, v20.16b, v20.16b\n"
+ "uzp1 v20.16b, v20.16b, v20.16b\n"
+ "str s20, [x25, x9]\n"
+ "add v19.4s, v19.4s, v10.4s\n"
+ "smin v19.4s, v19.4s, v13.4s\n"
+ "smax v19.4s, v19.4s, v14.4s\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ "str s19, [x26, x9]\n"
"8:" // Output channel loop: Done
"add x9, x9, #0x4\n"
- "cmp x9, x10, LSL #2\n"
+ "cmp x9, x28, LSL #2\n"
"blt 1b\n"
"tst %x[n_output_channels], #0x3\n"
"beq 26f\n"
"9:" // Output channel oddments
- "movi v31.4s, #0x0\n"
+ "movi v16.4s, #0x0\n"
"cbz %x[bias], 12f\n"
- "add x20, %x[bias], x9, LSL #2\n"
+ "add x19, %x[bias], x9, LSL #2\n"
"tbz %x[n_output_channels], #1, 10f\n"
- "ld1 { v31.d }[0], [x20], #0x8\n"
+ "ld1 { v16.d }[0], [x19], #0x8\n"
"tbz %x[n_output_channels], #0, 11f\n"
- "ld1 { v31.s }[2], [x20]\n"
+ "ld1 { v16.s }[2], [x19]\n"
"b 11f\n"
"10:" // Output channel oddments: Load bias: Bit 1: Unset
- "ld1 { v31.s }[0], [x20]\n"
+ "tbz %x[n_output_channels], #0, 11f\n"
+ "ld1 { v16.s }[0], [x19]\n"
"11:" // Output channel oddments: Load bias: Bit 1: End
+
"12:" // Output channel oddments: Load bias: Done
- "mov v16.16b, v31.16b\n"
- "mov v17.16b, v31.16b\n"
- "mov v18.16b, v31.16b\n"
- "mov v19.16b, v31.16b\n"
- "mov v20.16b, v31.16b\n"
- "mov v21.16b, v31.16b\n"
- "mov v22.16b, v31.16b\n"
- "mov v23.16b, v31.16b\n"
- "mov v24.16b, v31.16b\n"
- "mov v25.16b, v31.16b\n"
- "mov v26.16b, v31.16b\n"
- "mov v27.16b, v31.16b\n"
- "mov v28.16b, v31.16b\n"
- "mov v29.16b, v31.16b\n"
- "mov v30.16b, v31.16b\n"
- "mov v31.16b, v31.16b\n"
+ "mov v6.16b, v16.16b\n"
+ "mov v5.16b, v16.16b\n"
+ "mov v4.16b, v16.16b\n"
+ "mov v31.16b, v16.16b\n"
+ "mov v30.16b, v16.16b\n"
+ "mov v29.16b, v16.16b\n"
+ "mov v28.16b, v16.16b\n"
+ "mov v27.16b, v16.16b\n"
+ "mov v26.16b, v16.16b\n"
+ "mov v25.16b, v16.16b\n"
+ "mov v24.16b, v16.16b\n"
+ "mov v23.16b, v16.16b\n"
+ "mov v22.16b, v16.16b\n"
+ "mov v21.16b, v16.16b\n"
+ "mov v20.16b, v16.16b\n"
+ "mov v19.16b, v16.16b\n"
"cbz %x[rq_mul_ptr], 18f\n"
- "add x22, %x[rq_mul_ptr], x9, LSL #2\n"
- "add x21, %x[rq_right_shift_ptr], x9, LSL #2\n"
- "add x20, %x[rq_left_shift_ptr], x9, LSL #2\n"
+ "add x21, %x[rq_mul_ptr], x9, LSL #2\n"
+ "add x20, %x[rq_right_shift_ptr], x9, LSL #2\n"
+ "add x19, %x[rq_left_shift_ptr], x9, LSL #2\n"
"cbz %x[rq_left_shift_ptr], 15f\n"
"tbz %x[n_output_channels], #1, 13f\n"
- "ld1 { v9.d }[0], [x22], #0x8\n"
- "ld1 { v10.d }[0], [x21], #0x8\n"
- "ld1 { v15.d }[0], [x20], #0x8\n"
+ "ld1 { v8.d }[0], [x21], #0x8\n"
+ "ld1 { v7.d }[0], [x20], #0x8\n"
+ "ld1 { v9.d }[0], [x19], #0x8\n"
"tbz %x[n_output_channels], #0, 14f\n"
- "ld1 { v9.s }[2], [x22], #0x4\n"
- "ld1 { v10.s }[2], [x21], #0x4\n"
- "ld1 { v15.s }[2], [x20], #0x4\n"
+ "ld1 { v8.s }[2], [x21], #0x4\n"
+ "ld1 { v7.s }[2], [x20], #0x4\n"
+ "ld1 { v9.s }[2], [x19], #0x4\n"
"b 14f\n"
"13:" // Output channel oddments: Load quantization parameters: With left shift: Bit 1: Unset
- "ld1 { v9.s }[0], [x22], #0x4\n"
- "ld1 { v10.s }[0], [x21], #0x4\n"
- "ld1 { v15.s }[0], [x20], #0x4\n"
+ "tbz %x[n_output_channels], #0, 14f\n"
+ "ld1 { v8.s }[0], [x21], #0x4\n"
+ "ld1 { v7.s }[0], [x20], #0x4\n"
+ "ld1 { v9.s }[0], [x19], #0x4\n"
"14:" // Output channel oddments: Load quantization parameters: With left shift: Bit 1: End
"b 18f\n"
"15:" // Output channel oddments: Load quantization parameters: No left shift
"tbz %x[n_output_channels], #1, 16f\n"
- "ld1 { v9.d }[0], [x22], #0x8\n"
- "ld1 { v10.d }[0], [x21], #0x8\n"
+ "ld1 { v8.d }[0], [x21], #0x8\n"
+ "ld1 { v7.d }[0], [x20], #0x8\n"
"tbz %x[n_output_channels], #0, 17f\n"
- "ld1 { v9.s }[2], [x22], #0x4\n"
- "ld1 { v10.s }[2], [x21], #0x4\n"
+ "ld1 { v8.s }[2], [x21], #0x4\n"
+ "ld1 { v7.s }[2], [x20], #0x4\n"
"b 17f\n"
"16:" // Output channel oddments: Load quantization parameters: No left shift: Bit 1: Unset
- "ld1 { v9.s }[0], [x22], #0x4\n"
- "ld1 { v10.s }[0], [x21], #0x4\n"
+ "tbz %x[n_output_channels], #0, 17f\n"
+ "ld1 { v8.s }[0], [x21], #0x4\n"
+ "ld1 { v7.s }[0], [x20], #0x4\n"
"17:" // Output channel oddments: Load quantization parameters: No left shift: Bit 1: End
+
"18:" // Output channel oddments: Load quantization parameters: Done
- "ldr s8, [%x[weights]], #0x4\n"
- "mov x20, %x[inptrs]\n"
- "ldp x25, x28, [x20], #0x10\n"
- "lsr x21, %x[kernel_points], #0x1\n"
- "ldr d2, [x25, #0x0]\n"
- "ldr d7, [x28, #0x0]\n"
- "usubl v2.8h, v2.8b, v3.8b\n"
- "usubl v7.8h, v7.8b, v3.8b\n"
- "ssubl v8.8h, v8.8b, v12.8b\n"
- "cbz x21, 22f\n"
- "ldr s6, [%x[weights]], #0x4\n"
- "ldp x25, x28, [x20], #0x10\n"
- "subs x21, x21, #0x1\n"
- "ssubl v6.8h, v6.8b, v12.8b\n"
+ "ldr s17, [%x[weights]], #0x4\n"
+ "ssubl v17.8h, v17.8b, v11.8b\n"
+ "mov x19, %x[inptrs]\n"
+ "ldp x25, x27, [x19], #0x10\n"
+ "lsr x20, %x[kernel_points], #0x1\n"
+ "ldr d3, [x25, #0x0]\n"
+ "usubl v3.8h, v3.8b, v12.8b\n"
+ "ldr d2, [x27, #0x0]\n"
+ "usubl v2.8h, v2.8b, v12.8b\n"
+ "cbz x20, 22f\n"
+ "ldp x25, x27, [x19], #0x10\n"
+ "ldr s16, [%x[weights]], #0x4\n"
+ "ssubl v16.8h, v16.8b, v11.8b\n"
"ldr d1, [x25, #0x0]\n"
- "ldr d0, [x28, #0x0]\n"
- "usubl v1.8h, v1.8b, v3.8b\n"
- "usubl v0.8h, v0.8b, v3.8b\n"
+ "subs x20, x20, #0x1\n"
+ "usubl v1.8h, v1.8b, v12.8b\n"
+ "ldr d0, [x27, #0x0]\n"
+ "usubl v0.8h, v0.8b, v12.8b\n"
"beq 20f\n"
"19:" // Output channel oddments: Kernel loop
- "ldp x25, x28, [x20], #0x10\n"
- "smlal v16.4s, v8.4h, v2.h[0]\n"
- "smlal v17.4s, v8.4h, v2.h[1]\n"
- "subs x21, x21, #0x1\n"
- "smlal v18.4s, v8.4h, v2.h[2]\n"
- "smlal v19.4s, v8.4h, v2.h[3]\n"
- "smlal v20.4s, v8.4h, v2.h[4]\n"
- "smlal v21.4s, v8.4h, v2.h[5]\n"
- "smlal v22.4s, v8.4h, v2.h[6]\n"
- "smlal v23.4s, v8.4h, v2.h[7]\n"
- "ldr d2, [x25, #0x0]\n"
- "usubl v2.8h, v2.8b, v3.8b\n"
- "smlal v24.4s, v8.4h, v7.h[0]\n"
- "smlal v25.4s, v8.4h, v7.h[1]\n"
- "smlal v26.4s, v8.4h, v7.h[2]\n"
- "smlal v27.4s, v8.4h, v7.h[3]\n"
- "smlal v28.4s, v8.4h, v7.h[4]\n"
- "smlal v29.4s, v8.4h, v7.h[5]\n"
- "smlal v30.4s, v8.4h, v7.h[6]\n"
- "smlal v31.4s, v8.4h, v7.h[7]\n"
- "ldr d7, [x28, #0x0]\n"
- "ldr s8, [%x[weights]], #0x4\n"
- "ldp x25, x28, [x20], #0x10\n"
- "smlal v16.4s, v6.4h, v1.h[0]\n"
- "smlal v17.4s, v6.4h, v1.h[1]\n"
- "usubl v7.8h, v7.8b, v3.8b\n"
- "smlal v18.4s, v6.4h, v1.h[2]\n"
- "smlal v19.4s, v6.4h, v1.h[3]\n"
- "ssubl v8.8h, v8.8b, v12.8b\n"
- "smlal v20.4s, v6.4h, v1.h[4]\n"
- "smlal v21.4s, v6.4h, v1.h[5]\n"
- "smlal v22.4s, v6.4h, v1.h[6]\n"
- "smlal v23.4s, v6.4h, v1.h[7]\n"
+ "smlal v6.4s, v17.4h, v3.h[0]\n"
+ "ldp x25, x27, [x19], #0x10\n"
+ "subs x20, x20, #0x1\n"
+ "smlal v5.4s, v17.4h, v3.h[1]\n"
+ "smlal v4.4s, v17.4h, v3.h[2]\n"
+ "smlal v31.4s, v17.4h, v3.h[3]\n"
+ "smlal v30.4s, v17.4h, v3.h[4]\n"
+ "smlal v29.4s, v17.4h, v3.h[5]\n"
+ "smlal v28.4s, v17.4h, v3.h[6]\n"
+ "smlal v27.4s, v17.4h, v3.h[7]\n"
+ "ldr d3, [x25, #0x0]\n"
+ "smlal v26.4s, v17.4h, v2.h[0]\n"
+ "smlal v25.4s, v17.4h, v2.h[1]\n"
+ "smlal v24.4s, v17.4h, v2.h[2]\n"
+ "smlal v23.4s, v17.4h, v2.h[3]\n"
+ "smlal v22.4s, v17.4h, v2.h[4]\n"
+ "smlal v21.4s, v17.4h, v2.h[5]\n"
+ "smlal v20.4s, v17.4h, v2.h[6]\n"
+ "smlal v19.4s, v17.4h, v2.h[7]\n"
+ "ldr d2, [x27, #0x0]\n"
+ "usubl v3.8h, v3.8b, v12.8b\n"
+ "ldr s17, [%x[weights]], #0x4\n"
+ "smlal v6.4s, v16.4h, v1.h[0]\n"
+ "ldp x25, x27, [x19], #0x10\n"
+ "smlal v5.4s, v16.4h, v1.h[1]\n"
+ "smlal v4.4s, v16.4h, v1.h[2]\n"
+ "usubl v2.8h, v2.8b, v12.8b\n"
+ "ssubl v17.8h, v17.8b, v11.8b\n"
+ "smlal v31.4s, v16.4h, v1.h[3]\n"
+ "smlal v30.4s, v16.4h, v1.h[4]\n"
+ "smlal v29.4s, v16.4h, v1.h[5]\n"
+ "smlal v28.4s, v16.4h, v1.h[6]\n"
+ "smlal v27.4s, v16.4h, v1.h[7]\n"
"ldr d1, [x25, #0x0]\n"
- "usubl v1.8h, v1.8b, v3.8b\n"
- "smlal v24.4s, v6.4h, v0.h[0]\n"
- "smlal v25.4s, v6.4h, v0.h[1]\n"
- "smlal v26.4s, v6.4h, v0.h[2]\n"
- "smlal v27.4s, v6.4h, v0.h[3]\n"
- "smlal v28.4s, v6.4h, v0.h[4]\n"
- "smlal v29.4s, v6.4h, v0.h[5]\n"
- "smlal v30.4s, v6.4h, v0.h[6]\n"
- "smlal v31.4s, v6.4h, v0.h[7]\n"
- "ldr d0, [x28, #0x0]\n"
- "ldr s6, [%x[weights]], #0x4\n"
- "usubl v0.8h, v0.8b, v3.8b\n"
- "ssubl v6.8h, v6.8b, v12.8b\n"
+ "smlal v26.4s, v16.4h, v0.h[0]\n"
+ "smlal v25.4s, v16.4h, v0.h[1]\n"
+ "smlal v24.4s, v16.4h, v0.h[2]\n"
+ "smlal v23.4s, v16.4h, v0.h[3]\n"
+ "smlal v22.4s, v16.4h, v0.h[4]\n"
+ "smlal v21.4s, v16.4h, v0.h[5]\n"
+ "smlal v20.4s, v16.4h, v0.h[6]\n"
+ "smlal v19.4s, v16.4h, v0.h[7]\n"
+ "ldr d0, [x27, #0x0]\n"
+ "usubl v1.8h, v1.8b, v12.8b\n"
+ "ldr s16, [%x[weights]], #0x4\n"
+ "usubl v0.8h, v0.8b, v12.8b\n"
+ "ssubl v16.8h, v16.8b, v11.8b\n"
"bgt 19b\n"
"20:" // Output channel oddments: Kernel loop tail
"tbnz %x[kernel_points], #0, 21f\n"
- "smlal v16.4s, v8.4h, v2.h[0]\n"
- "smlal v17.4s, v8.4h, v2.h[1]\n"
- "smlal v18.4s, v8.4h, v2.h[2]\n"
- "smlal v19.4s, v8.4h, v2.h[3]\n"
- "smlal v20.4s, v8.4h, v2.h[4]\n"
- "smlal v21.4s, v8.4h, v2.h[5]\n"
- "smlal v22.4s, v8.4h, v2.h[6]\n"
- "smlal v23.4s, v8.4h, v2.h[7]\n"
- "smlal v24.4s, v8.4h, v7.h[0]\n"
- "smlal v25.4s, v8.4h, v7.h[1]\n"
- "smlal v26.4s, v8.4h, v7.h[2]\n"
- "smlal v27.4s, v8.4h, v7.h[3]\n"
- "smlal v28.4s, v8.4h, v7.h[4]\n"
- "smlal v29.4s, v8.4h, v7.h[5]\n"
- "smlal v30.4s, v8.4h, v7.h[6]\n"
- "smlal v31.4s, v8.4h, v7.h[7]\n"
- "smlal v16.4s, v6.4h, v1.h[0]\n"
- "smlal v17.4s, v6.4h, v1.h[1]\n"
- "smlal v18.4s, v6.4h, v1.h[2]\n"
- "smlal v19.4s, v6.4h, v1.h[3]\n"
- "smlal v20.4s, v6.4h, v1.h[4]\n"
- "smlal v21.4s, v6.4h, v1.h[5]\n"
- "smlal v22.4s, v6.4h, v1.h[6]\n"
- "smlal v23.4s, v6.4h, v1.h[7]\n"
- "smlal v24.4s, v6.4h, v0.h[0]\n"
- "smlal v25.4s, v6.4h, v0.h[1]\n"
- "smlal v26.4s, v6.4h, v0.h[2]\n"
- "smlal v27.4s, v6.4h, v0.h[3]\n"
- "smlal v28.4s, v6.4h, v0.h[4]\n"
- "smlal v29.4s, v6.4h, v0.h[5]\n"
- "smlal v30.4s, v6.4h, v0.h[6]\n"
- "smlal v31.4s, v6.4h, v0.h[7]\n"
+ "smlal v6.4s, v17.4h, v3.h[0]\n"
+ "smlal v5.4s, v17.4h, v3.h[1]\n"
+ "smlal v4.4s, v17.4h, v3.h[2]\n"
+ "smlal v31.4s, v17.4h, v3.h[3]\n"
+ "smlal v30.4s, v17.4h, v3.h[4]\n"
+ "smlal v29.4s, v17.4h, v3.h[5]\n"
+ "smlal v28.4s, v17.4h, v3.h[6]\n"
+ "smlal v27.4s, v17.4h, v3.h[7]\n"
+ "smlal v26.4s, v17.4h, v2.h[0]\n"
+ "smlal v25.4s, v17.4h, v2.h[1]\n"
+ "smlal v24.4s, v17.4h, v2.h[2]\n"
+ "smlal v23.4s, v17.4h, v2.h[3]\n"
+ "smlal v22.4s, v17.4h, v2.h[4]\n"
+ "smlal v21.4s, v17.4h, v2.h[5]\n"
+ "smlal v20.4s, v17.4h, v2.h[6]\n"
+ "smlal v19.4s, v17.4h, v2.h[7]\n"
+ "smlal v6.4s, v16.4h, v1.h[0]\n"
+ "smlal v5.4s, v16.4h, v1.h[1]\n"
+ "smlal v4.4s, v16.4h, v1.h[2]\n"
+ "smlal v31.4s, v16.4h, v1.h[3]\n"
+ "smlal v30.4s, v16.4h, v1.h[4]\n"
+ "smlal v29.4s, v16.4h, v1.h[5]\n"
+ "smlal v28.4s, v16.4h, v1.h[6]\n"
+ "smlal v27.4s, v16.4h, v1.h[7]\n"
+ "smlal v26.4s, v16.4h, v0.h[0]\n"
+ "smlal v25.4s, v16.4h, v0.h[1]\n"
+ "smlal v24.4s, v16.4h, v0.h[2]\n"
+ "smlal v23.4s, v16.4h, v0.h[3]\n"
+ "smlal v22.4s, v16.4h, v0.h[4]\n"
+ "smlal v21.4s, v16.4h, v0.h[5]\n"
+ "smlal v20.4s, v16.4h, v0.h[6]\n"
+ "smlal v19.4s, v16.4h, v0.h[7]\n"
"b 23f\n"
"21:" // Output channel oddments: Odd tail
- "ldp x25, x28, [x20], #0x10\n"
- "smlal v16.4s, v8.4h, v2.h[0]\n"
- "smlal v17.4s, v8.4h, v2.h[1]\n"
- "smlal v18.4s, v8.4h, v2.h[2]\n"
- "smlal v19.4s, v8.4h, v2.h[3]\n"
- "smlal v20.4s, v8.4h, v2.h[4]\n"
- "smlal v21.4s, v8.4h, v2.h[5]\n"
- "smlal v22.4s, v8.4h, v2.h[6]\n"
- "smlal v23.4s, v8.4h, v2.h[7]\n"
- "ldr d2, [x25, #0x0]\n"
- "usubl v2.8h, v2.8b, v3.8b\n"
- "smlal v24.4s, v8.4h, v7.h[0]\n"
- "smlal v25.4s, v8.4h, v7.h[1]\n"
- "smlal v26.4s, v8.4h, v7.h[2]\n"
- "smlal v27.4s, v8.4h, v7.h[3]\n"
- "smlal v28.4s, v8.4h, v7.h[4]\n"
- "smlal v29.4s, v8.4h, v7.h[5]\n"
- "smlal v30.4s, v8.4h, v7.h[6]\n"
- "smlal v31.4s, v8.4h, v7.h[7]\n"
- "ldr d7, [x28, #0x0]\n"
- "ldr s8, [%x[weights]], #0x4\n"
- "smlal v16.4s, v6.4h, v1.h[0]\n"
- "smlal v17.4s, v6.4h, v1.h[1]\n"
- "usubl v7.8h, v7.8b, v3.8b\n"
- "smlal v18.4s, v6.4h, v1.h[2]\n"
- "smlal v19.4s, v6.4h, v1.h[3]\n"
- "ssubl v8.8h, v8.8b, v12.8b\n"
- "smlal v20.4s, v6.4h, v1.h[4]\n"
- "smlal v21.4s, v6.4h, v1.h[5]\n"
- "smlal v22.4s, v6.4h, v1.h[6]\n"
- "smlal v23.4s, v6.4h, v1.h[7]\n"
- "smlal v24.4s, v6.4h, v0.h[0]\n"
- "smlal v25.4s, v6.4h, v0.h[1]\n"
- "smlal v26.4s, v6.4h, v0.h[2]\n"
- "smlal v27.4s, v6.4h, v0.h[3]\n"
- "smlal v28.4s, v6.4h, v0.h[4]\n"
- "smlal v29.4s, v6.4h, v0.h[5]\n"
- "smlal v30.4s, v6.4h, v0.h[6]\n"
- "smlal v31.4s, v6.4h, v0.h[7]\n"
- "smlal v16.4s, v8.4h, v2.h[0]\n"
- "smlal v17.4s, v8.4h, v2.h[1]\n"
- "smlal v18.4s, v8.4h, v2.h[2]\n"
- "smlal v19.4s, v8.4h, v2.h[3]\n"
- "smlal v20.4s, v8.4h, v2.h[4]\n"
- "smlal v21.4s, v8.4h, v2.h[5]\n"
- "smlal v22.4s, v8.4h, v2.h[6]\n"
- "smlal v23.4s, v8.4h, v2.h[7]\n"
- "smlal v24.4s, v8.4h, v7.h[0]\n"
- "smlal v25.4s, v8.4h, v7.h[1]\n"
- "smlal v26.4s, v8.4h, v7.h[2]\n"
- "smlal v27.4s, v8.4h, v7.h[3]\n"
- "smlal v28.4s, v8.4h, v7.h[4]\n"
- "smlal v29.4s, v8.4h, v7.h[5]\n"
- "smlal v30.4s, v8.4h, v7.h[6]\n"
- "smlal v31.4s, v8.4h, v7.h[7]\n"
+ "smlal v6.4s, v17.4h, v3.h[0]\n"
+ "ldp x25, x27, [x19], #0x10\n"
+ "smlal v5.4s, v17.4h, v3.h[1]\n"
+ "smlal v4.4s, v17.4h, v3.h[2]\n"
+ "smlal v31.4s, v17.4h, v3.h[3]\n"
+ "smlal v30.4s, v17.4h, v3.h[4]\n"
+ "smlal v29.4s, v17.4h, v3.h[5]\n"
+ "smlal v28.4s, v17.4h, v3.h[6]\n"
+ "smlal v27.4s, v17.4h, v3.h[7]\n"
+ "ldr d3, [x25, #0x0]\n"
+ "smlal v26.4s, v17.4h, v2.h[0]\n"
+ "smlal v25.4s, v17.4h, v2.h[1]\n"
+ "smlal v24.4s, v17.4h, v2.h[2]\n"
+ "smlal v23.4s, v17.4h, v2.h[3]\n"
+ "smlal v22.4s, v17.4h, v2.h[4]\n"
+ "smlal v21.4s, v17.4h, v2.h[5]\n"
+ "smlal v20.4s, v17.4h, v2.h[6]\n"
+ "smlal v19.4s, v17.4h, v2.h[7]\n"
+ "ldr d2, [x27, #0x0]\n"
+ "usubl v3.8h, v3.8b, v12.8b\n"
+ "ldr s17, [%x[weights]], #0x4\n"
+ "smlal v6.4s, v16.4h, v1.h[0]\n"
+ "smlal v5.4s, v16.4h, v1.h[1]\n"
+ "smlal v4.4s, v16.4h, v1.h[2]\n"
+ "usubl v2.8h, v2.8b, v12.8b\n"
+ "ssubl v17.8h, v17.8b, v11.8b\n"
+ "smlal v31.4s, v16.4h, v1.h[3]\n"
+ "smlal v30.4s, v16.4h, v1.h[4]\n"
+ "smlal v29.4s, v16.4h, v1.h[5]\n"
+ "smlal v28.4s, v16.4h, v1.h[6]\n"
+ "smlal v27.4s, v16.4h, v1.h[7]\n"
+ "smlal v26.4s, v16.4h, v0.h[0]\n"
+ "smlal v25.4s, v16.4h, v0.h[1]\n"
+ "smlal v24.4s, v16.4h, v0.h[2]\n"
+ "smlal v23.4s, v16.4h, v0.h[3]\n"
+ "smlal v22.4s, v16.4h, v0.h[4]\n"
+ "smlal v21.4s, v16.4h, v0.h[5]\n"
+ "smlal v20.4s, v16.4h, v0.h[6]\n"
+ "smlal v19.4s, v16.4h, v0.h[7]\n"
+ "smlal v6.4s, v17.4h, v3.h[0]\n"
+ "smlal v5.4s, v17.4h, v3.h[1]\n"
+ "smlal v4.4s, v17.4h, v3.h[2]\n"
+ "smlal v31.4s, v17.4h, v3.h[3]\n"
+ "smlal v30.4s, v17.4h, v3.h[4]\n"
+ "smlal v29.4s, v17.4h, v3.h[5]\n"
+ "smlal v28.4s, v17.4h, v3.h[6]\n"
+ "smlal v27.4s, v17.4h, v3.h[7]\n"
+ "smlal v26.4s, v17.4h, v2.h[0]\n"
+ "smlal v25.4s, v17.4h, v2.h[1]\n"
+ "smlal v24.4s, v17.4h, v2.h[2]\n"
+ "smlal v23.4s, v17.4h, v2.h[3]\n"
+ "smlal v22.4s, v17.4h, v2.h[4]\n"
+ "smlal v21.4s, v17.4h, v2.h[5]\n"
+ "smlal v20.4s, v17.4h, v2.h[6]\n"
+ "smlal v19.4s, v17.4h, v2.h[7]\n"
"b 23f\n"
"22:" // Output channel oddments: Single kernel point
- "smlal v16.4s, v8.4h, v2.h[0]\n"
- "smlal v17.4s, v8.4h, v2.h[1]\n"
- "smlal v18.4s, v8.4h, v2.h[2]\n"
- "smlal v19.4s, v8.4h, v2.h[3]\n"
- "smlal v20.4s, v8.4h, v2.h[4]\n"
- "smlal v21.4s, v8.4h, v2.h[5]\n"
- "smlal v22.4s, v8.4h, v2.h[6]\n"
- "smlal v23.4s, v8.4h, v2.h[7]\n"
- "smlal v24.4s, v8.4h, v7.h[0]\n"
- "smlal v25.4s, v8.4h, v7.h[1]\n"
- "smlal v26.4s, v8.4h, v7.h[2]\n"
- "smlal v27.4s, v8.4h, v7.h[3]\n"
- "smlal v28.4s, v8.4h, v7.h[4]\n"
- "smlal v29.4s, v8.4h, v7.h[5]\n"
- "smlal v30.4s, v8.4h, v7.h[6]\n"
- "smlal v31.4s, v8.4h, v7.h[7]\n"
+ "smlal v6.4s, v17.4h, v3.h[0]\n"
+ "smlal v5.4s, v17.4h, v3.h[1]\n"
+ "smlal v4.4s, v17.4h, v3.h[2]\n"
+ "smlal v31.4s, v17.4h, v3.h[3]\n"
+ "smlal v30.4s, v17.4h, v3.h[4]\n"
+ "smlal v29.4s, v17.4h, v3.h[5]\n"
+ "smlal v28.4s, v17.4h, v3.h[6]\n"
+ "smlal v27.4s, v17.4h, v3.h[7]\n"
+ "smlal v26.4s, v17.4h, v2.h[0]\n"
+ "smlal v25.4s, v17.4h, v2.h[1]\n"
+ "smlal v24.4s, v17.4h, v2.h[2]\n"
+ "smlal v23.4s, v17.4h, v2.h[3]\n"
+ "smlal v22.4s, v17.4h, v2.h[4]\n"
+ "smlal v21.4s, v17.4h, v2.h[5]\n"
+ "smlal v20.4s, v17.4h, v2.h[6]\n"
+ "smlal v19.4s, v17.4h, v2.h[7]\n"
"23:" // Output channel oddments: Done
- "sshl v16.4s, v16.4s, v15.4s\n"
- "sshl v17.4s, v17.4s, v15.4s\n"
- "sshl v18.4s, v18.4s, v15.4s\n"
- "sshl v19.4s, v19.4s, v15.4s\n"
- "sqrdmulh v16.4s, v16.4s, v9.4s\n"
- "sqrdmulh v17.4s, v17.4s, v9.4s\n"
- "sqrdmulh v18.4s, v18.4s, v9.4s\n"
- "sqrdmulh v19.4s, v19.4s, v9.4s\n"
- "and v5.16b, v16.16b, v10.16b\n"
- "and v4.16b, v17.16b, v10.16b\n"
- "and v2.16b, v18.16b, v10.16b\n"
- "and v1.16b, v19.16b, v10.16b\n"
- "sshl v20.4s, v20.4s, v15.4s\n"
- "sshl v21.4s, v21.4s, v15.4s\n"
- "sshl v22.4s, v22.4s, v15.4s\n"
- "sshl v23.4s, v23.4s, v15.4s\n"
- "sshl v24.4s, v24.4s, v15.4s\n"
- "sshl v25.4s, v25.4s, v15.4s\n"
- "sshr v5.4s, v5.4s, #0x1f\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
- "sshr v2.4s, v2.4s, #0x1f\n"
- "sshr v1.4s, v1.4s, #0x1f\n"
- "sqrdmulh v20.4s, v20.4s, v9.4s\n"
- "sqrdmulh v21.4s, v21.4s, v9.4s\n"
- "sqrdmulh v22.4s, v22.4s, v9.4s\n"
- "sqrdmulh v23.4s, v23.4s, v9.4s\n"
- "sqrdmulh v24.4s, v24.4s, v9.4s\n"
- "sqrdmulh v25.4s, v25.4s, v9.4s\n"
- "sqadd v16.4s, v16.4s, v5.4s\n"
- "sqadd v17.4s, v17.4s, v4.4s\n"
- "sqadd v18.4s, v18.4s, v2.4s\n"
- "sqadd v19.4s, v19.4s, v1.4s\n"
- "and v8.16b, v20.16b, v10.16b\n"
- "and v0.16b, v21.16b, v10.16b\n"
- "and v5.16b, v22.16b, v10.16b\n"
- "and v4.16b, v23.16b, v10.16b\n"
- "and v2.16b, v24.16b, v10.16b\n"
- "and v1.16b, v25.16b, v10.16b\n"
- "sshl v26.4s, v26.4s, v15.4s\n"
- "sshl v27.4s, v27.4s, v15.4s\n"
- "sshl v28.4s, v28.4s, v15.4s\n"
- "sshl v29.4s, v29.4s, v15.4s\n"
- "sshl v30.4s, v30.4s, v15.4s\n"
- "sshl v31.4s, v31.4s, v15.4s\n"
- "sshr v8.4s, v8.4s, #0x1f\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "sshr v5.4s, v5.4s, #0x1f\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
- "sshr v2.4s, v2.4s, #0x1f\n"
- "sshr v1.4s, v1.4s, #0x1f\n"
- "sqrdmulh v26.4s, v26.4s, v9.4s\n"
- "sqrdmulh v27.4s, v27.4s, v9.4s\n"
- "sqrdmulh v28.4s, v28.4s, v9.4s\n"
- "sqrdmulh v29.4s, v29.4s, v9.4s\n"
- "sqrdmulh v30.4s, v30.4s, v9.4s\n"
- "sqrdmulh v31.4s, v31.4s, v9.4s\n"
- "sqadd v20.4s, v20.4s, v8.4s\n"
- "sqadd v21.4s, v21.4s, v0.4s\n"
- "sqadd v22.4s, v22.4s, v5.4s\n"
- "sqadd v23.4s, v23.4s, v4.4s\n"
- "sqadd v24.4s, v24.4s, v2.4s\n"
- "sqadd v25.4s, v25.4s, v1.4s\n"
- "and v8.16b, v26.16b, v10.16b\n"
- "and v0.16b, v27.16b, v10.16b\n"
- "and v5.16b, v28.16b, v10.16b\n"
- "and v4.16b, v29.16b, v10.16b\n"
- "and v2.16b, v30.16b, v10.16b\n"
- "and v1.16b, v31.16b, v10.16b\n"
- "sshr v8.4s, v8.4s, #0x1f\n"
- "sshr v0.4s, v0.4s, #0x1f\n"
- "sshr v5.4s, v5.4s, #0x1f\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
- "sshr v2.4s, v2.4s, #0x1f\n"
- "sshr v1.4s, v1.4s, #0x1f\n"
- "sqadd v26.4s, v26.4s, v8.4s\n"
- "sqadd v27.4s, v27.4s, v0.4s\n"
- "sqadd v28.4s, v28.4s, v5.4s\n"
- "sqadd v29.4s, v29.4s, v4.4s\n"
- "sqadd v30.4s, v30.4s, v2.4s\n"
- "sqadd v31.4s, v31.4s, v1.4s\n"
- "srshl v16.4s, v16.4s, v10.4s\n"
- "srshl v17.4s, v17.4s, v10.4s\n"
- "srshl v18.4s, v18.4s, v10.4s\n"
- "srshl v19.4s, v19.4s, v10.4s\n"
- "srshl v20.4s, v20.4s, v10.4s\n"
- "srshl v21.4s, v21.4s, v10.4s\n"
- "srshl v22.4s, v22.4s, v10.4s\n"
- "srshl v23.4s, v23.4s, v10.4s\n"
- "srshl v24.4s, v24.4s, v10.4s\n"
- "srshl v25.4s, v25.4s, v10.4s\n"
- "srshl v26.4s, v26.4s, v10.4s\n"
- "srshl v27.4s, v27.4s, v10.4s\n"
- "srshl v28.4s, v28.4s, v10.4s\n"
- "srshl v29.4s, v29.4s, v10.4s\n"
- "srshl v30.4s, v30.4s, v10.4s\n"
- "srshl v31.4s, v31.4s, v10.4s\n"
- "add v16.4s, v16.4s, v14.4s\n"
- "add v17.4s, v17.4s, v14.4s\n"
- "add v18.4s, v18.4s, v14.4s\n"
- "add v19.4s, v19.4s, v14.4s\n"
- "add v20.4s, v20.4s, v14.4s\n"
- "add v21.4s, v21.4s, v14.4s\n"
- "add v22.4s, v22.4s, v14.4s\n"
- "add v23.4s, v23.4s, v14.4s\n"
- "add v24.4s, v24.4s, v14.4s\n"
- "add v25.4s, v25.4s, v14.4s\n"
- "add v26.4s, v26.4s, v14.4s\n"
- "add v27.4s, v27.4s, v14.4s\n"
- "add v28.4s, v28.4s, v14.4s\n"
- "add v29.4s, v29.4s, v14.4s\n"
- "add v30.4s, v30.4s, v14.4s\n"
- "add v31.4s, v31.4s, v14.4s\n"
- "smin v16.4s, v16.4s, v11.4s\n"
- "smin v17.4s, v17.4s, v11.4s\n"
- "smin v18.4s, v18.4s, v11.4s\n"
- "smin v19.4s, v19.4s, v11.4s\n"
- "smin v20.4s, v20.4s, v11.4s\n"
- "smin v21.4s, v21.4s, v11.4s\n"
- "smin v22.4s, v22.4s, v11.4s\n"
- "smin v23.4s, v23.4s, v11.4s\n"
- "smin v24.4s, v24.4s, v11.4s\n"
- "smin v25.4s, v25.4s, v11.4s\n"
- "smin v26.4s, v26.4s, v11.4s\n"
- "smin v27.4s, v27.4s, v11.4s\n"
- "smin v28.4s, v28.4s, v11.4s\n"
- "smin v29.4s, v29.4s, v11.4s\n"
- "smin v30.4s, v30.4s, v11.4s\n"
- "smin v31.4s, v31.4s, v11.4s\n"
- "smax v16.4s, v16.4s, v13.4s\n"
- "smax v17.4s, v17.4s, v13.4s\n"
- "smax v18.4s, v18.4s, v13.4s\n"
- "smax v19.4s, v19.4s, v13.4s\n"
- "smax v20.4s, v20.4s, v13.4s\n"
- "smax v21.4s, v21.4s, v13.4s\n"
- "smax v22.4s, v22.4s, v13.4s\n"
- "smax v23.4s, v23.4s, v13.4s\n"
- "smax v24.4s, v24.4s, v13.4s\n"
- "smax v25.4s, v25.4s, v13.4s\n"
- "smax v26.4s, v26.4s, v13.4s\n"
- "smax v27.4s, v27.4s, v13.4s\n"
- "smax v28.4s, v28.4s, v13.4s\n"
- "smax v29.4s, v29.4s, v13.4s\n"
- "smax v30.4s, v30.4s, v13.4s\n"
- "smax v31.4s, v31.4s, v13.4s\n"
- "uzp1 v16.16b, v16.16b, v16.16b\n"
- "uzp1 v17.16b, v17.16b, v17.16b\n"
- "uzp1 v18.16b, v18.16b, v18.16b\n"
- "uzp1 v19.16b, v19.16b, v19.16b\n"
- "uzp1 v20.16b, v20.16b, v20.16b\n"
- "uzp1 v21.16b, v21.16b, v21.16b\n"
- "uzp1 v22.16b, v22.16b, v22.16b\n"
- "uzp1 v23.16b, v23.16b, v23.16b\n"
- "uzp1 v24.16b, v24.16b, v24.16b\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
- "uzp1 v26.16b, v26.16b, v26.16b\n"
- "uzp1 v27.16b, v27.16b, v27.16b\n"
- "uzp1 v28.16b, v28.16b, v28.16b\n"
- "uzp1 v29.16b, v29.16b, v29.16b\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
+ "sshl v6.4s, v6.4s, v9.4s\n"
+ "sshl v5.4s, v5.4s, v9.4s\n"
+ "sshl v4.4s, v4.4s, v9.4s\n"
+ "sqrdmulh v6.4s, v6.4s, v8.4s\n"
+ "sqrdmulh v5.4s, v5.4s, v8.4s\n"
+ "sqrdmulh v4.4s, v4.4s, v8.4s\n"
+ "sshl v31.4s, v31.4s, v9.4s\n"
+ "and v18.16b, v6.16b, v7.16b\n"
+ "and v16.16b, v5.16b, v7.16b\n"
+ "and v17.16b, v4.16b, v7.16b\n"
+ "sshr v18.4s, v18.4s, #0x1f\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "sqadd v6.4s, v6.4s, v18.4s\n"
+ "sqadd v5.4s, v5.4s, v16.4s\n"
+ "sqadd v4.4s, v4.4s, v17.4s\n"
+ "sqrdmulh v31.4s, v31.4s, v8.4s\n"
+ "srshl v6.4s, v6.4s, v7.4s\n"
+ "srshl v5.4s, v5.4s, v7.4s\n"
+ "srshl v4.4s, v4.4s, v7.4s\n"
+ "and v16.16b, v31.16b, v7.16b\n"
+ "add v6.4s, v6.4s, v10.4s\n"
+ "add v5.4s, v5.4s, v10.4s\n"
+ "add v4.4s, v4.4s, v10.4s\n"
+ "smin v6.4s, v6.4s, v13.4s\n"
+ "smin v5.4s, v5.4s, v13.4s\n"
+ "smin v4.4s, v4.4s, v13.4s\n"
+ "smax v6.4s, v6.4s, v14.4s\n"
+ "smax v5.4s, v5.4s, v14.4s\n"
+ "smax v4.4s, v4.4s, v14.4s\n"
+ "uzp1 v6.16b, v6.16b, v6.16b\n"
+ "uzp1 v5.16b, v5.16b, v5.16b\n"
+ "uzp1 v6.16b, v6.16b, v6.16b\n"
+ "uzp1 v5.16b, v5.16b, v5.16b\n"
+ "uzp1 v4.16b, v4.16b, v4.16b\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "uzp1 v4.16b, v4.16b, v4.16b\n"
+ "sshl v30.4s, v30.4s, v9.4s\n"
+ "sqadd v31.4s, v31.4s, v16.4s\n"
+ "sqrdmulh v30.4s, v30.4s, v8.4s\n"
+ "sshl v29.4s, v29.4s, v9.4s\n"
+ "sshl v28.4s, v28.4s, v9.4s\n"
+ "srshl v31.4s, v31.4s, v7.4s\n"
+ "and v16.16b, v30.16b, v7.16b\n"
+ "sqrdmulh v29.4s, v29.4s, v8.4s\n"
+ "sqrdmulh v28.4s, v28.4s, v8.4s\n"
+ "add v31.4s, v31.4s, v10.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "and v17.16b, v29.16b, v7.16b\n"
+ "smin v31.4s, v31.4s, v13.4s\n"
+ "sqadd v30.4s, v30.4s, v16.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "smax v31.4s, v31.4s, v14.4s\n"
+ "and v16.16b, v28.16b, v7.16b\n"
+ "srshl v30.4s, v30.4s, v7.4s\n"
"uzp1 v31.16b, v31.16b, v31.16b\n"
- "uzp1 v16.16b, v16.16b, v16.16b\n"
- "uzp1 v17.16b, v17.16b, v17.16b\n"
- "uzp1 v18.16b, v18.16b, v18.16b\n"
- "uzp1 v19.16b, v19.16b, v19.16b\n"
- "uzp1 v20.16b, v20.16b, v20.16b\n"
- "uzp1 v21.16b, v21.16b, v21.16b\n"
- "uzp1 v22.16b, v22.16b, v22.16b\n"
- "uzp1 v23.16b, v23.16b, v23.16b\n"
- "uzp1 v24.16b, v24.16b, v24.16b\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
- "uzp1 v26.16b, v26.16b, v26.16b\n"
- "uzp1 v27.16b, v27.16b, v27.16b\n"
- "uzp1 v28.16b, v28.16b, v28.16b\n"
- "uzp1 v29.16b, v29.16b, v29.16b\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
+ "sqadd v29.4s, v29.4s, v17.4s\n"
"uzp1 v31.16b, v31.16b, v31.16b\n"
+ "add v30.4s, v30.4s, v10.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "srshl v29.4s, v29.4s, v7.4s\n"
+ "smin v30.4s, v30.4s, v13.4s\n"
+ "sqadd v28.4s, v28.4s, v16.4s\n"
+ "sshl v27.4s, v27.4s, v9.4s\n"
+ "smax v30.4s, v30.4s, v14.4s\n"
+ "add v29.4s, v29.4s, v10.4s\n"
+ "srshl v28.4s, v28.4s, v7.4s\n"
+ "uzp1 v30.16b, v30.16b, v30.16b\n"
+ "smin v29.4s, v29.4s, v13.4s\n"
+ "uzp1 v30.16b, v30.16b, v30.16b\n"
+ "add v28.4s, v28.4s, v10.4s\n"
+ "smax v29.4s, v29.4s, v14.4s\n"
+ "sqrdmulh v27.4s, v27.4s, v8.4s\n"
+ "smin v28.4s, v28.4s, v13.4s\n"
+ "uzp1 v29.16b, v29.16b, v29.16b\n"
+ "sshl v26.4s, v26.4s, v9.4s\n"
+ "uzp1 v29.16b, v29.16b, v29.16b\n"
+ "smax v28.4s, v28.4s, v14.4s\n"
+ "and v16.16b, v27.16b, v7.16b\n"
+ "sqrdmulh v26.4s, v26.4s, v8.4s\n"
+ "uzp1 v28.16b, v28.16b, v28.16b\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "uzp1 v28.16b, v28.16b, v28.16b\n"
+ "and v17.16b, v26.16b, v7.16b\n"
+ "sqadd v27.4s, v27.4s, v16.4s\n"
+ "sshl v25.4s, v25.4s, v9.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "sqrdmulh v25.4s, v25.4s, v8.4s\n"
+ "srshl v27.4s, v27.4s, v7.4s\n"
+ "sqadd v26.4s, v26.4s, v17.4s\n"
+ "sshl v24.4s, v24.4s, v9.4s\n"
+ "and v16.16b, v25.16b, v7.16b\n"
+ "add v27.4s, v27.4s, v10.4s\n"
+ "srshl v26.4s, v26.4s, v7.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "smin v27.4s, v27.4s, v13.4s\n"
+ "sqrdmulh v24.4s, v24.4s, v8.4s\n"
+ "add v26.4s, v26.4s, v10.4s\n"
+ "smax v27.4s, v27.4s, v14.4s\n"
+ "sqadd v25.4s, v25.4s, v16.4s\n"
+ "smin v26.4s, v26.4s, v13.4s\n"
+ "uzp1 v27.16b, v27.16b, v27.16b\n"
+ "and v17.16b, v24.16b, v7.16b\n"
+ "uzp1 v27.16b, v27.16b, v27.16b\n"
+ "smax v26.4s, v26.4s, v14.4s\n"
+ "srshl v25.4s, v25.4s, v7.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "uzp1 v26.16b, v26.16b, v26.16b\n"
+ "sshl v23.4s, v23.4s, v9.4s\n"
+ "uzp1 v26.16b, v26.16b, v26.16b\n"
+ "add v25.4s, v25.4s, v10.4s\n"
+ "sqadd v24.4s, v24.4s, v17.4s\n"
+ "sqrdmulh v23.4s, v23.4s, v8.4s\n"
+ "smin v25.4s, v25.4s, v13.4s\n"
+ "sshl v22.4s, v22.4s, v9.4s\n"
+ "srshl v24.4s, v24.4s, v7.4s\n"
+ "smax v25.4s, v25.4s, v14.4s\n"
+ "and v16.16b, v23.16b, v7.16b\n"
+ "sqrdmulh v22.4s, v22.4s, v8.4s\n"
+ "uzp1 v25.16b, v25.16b, v25.16b\n"
+ "add v24.4s, v24.4s, v10.4s\n"
+ "uzp1 v25.16b, v25.16b, v25.16b\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "smin v24.4s, v24.4s, v13.4s\n"
+ "and v17.16b, v22.16b, v7.16b\n"
+ "sqadd v23.4s, v23.4s, v16.4s\n"
+ "smax v24.4s, v24.4s, v14.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "sshl v21.4s, v21.4s, v9.4s\n"
+ "uzp1 v24.16b, v24.16b, v24.16b\n"
+ "srshl v23.4s, v23.4s, v7.4s\n"
+ "uzp1 v24.16b, v24.16b, v24.16b\n"
+ "sqadd v22.4s, v22.4s, v17.4s\n"
+ "sqrdmulh v21.4s, v21.4s, v8.4s\n"
+ "add v23.4s, v23.4s, v10.4s\n"
+ "sshl v20.4s, v20.4s, v9.4s\n"
+ "srshl v22.4s, v22.4s, v7.4s\n"
+ "smin v23.4s, v23.4s, v13.4s\n"
+ "and v16.16b, v21.16b, v7.16b\n"
+ "sqrdmulh v20.4s, v20.4s, v8.4s\n"
+ "smax v23.4s, v23.4s, v14.4s\n"
+ "add v22.4s, v22.4s, v10.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "uzp1 v23.16b, v23.16b, v23.16b\n"
+ "smin v22.4s, v22.4s, v13.4s\n"
+ "uzp1 v23.16b, v23.16b, v23.16b\n"
+ "sqadd v21.4s, v21.4s, v16.4s\n"
+ "smax v22.4s, v22.4s, v14.4s\n"
+ "and v16.16b, v20.16b, v7.16b\n"
+ "sshl v19.4s, v19.4s, v9.4s\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "srshl v21.4s, v21.4s, v7.4s\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sqrdmulh v19.4s, v19.4s, v8.4s\n"
+ "add v21.4s, v21.4s, v10.4s\n"
+ "sqadd v20.4s, v20.4s, v16.4s\n"
+ "smin v21.4s, v21.4s, v13.4s\n"
+ "and v16.16b, v19.16b, v7.16b\n"
+ "srshl v20.4s, v20.4s, v7.4s\n"
+ "smax v21.4s, v21.4s, v14.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "uzp1 v21.16b, v21.16b, v21.16b\n"
+ "add v20.4s, v20.4s, v10.4s\n"
+ "sqadd v19.4s, v19.4s, v16.4s\n"
+ "uzp1 v21.16b, v21.16b, v21.16b\n"
+ "smin v20.4s, v20.4s, v13.4s\n"
+ "srshl v19.4s, v19.4s, v7.4s\n"
+ "smax v20.4s, v20.4s, v14.4s\n"
+ "uzp1 v20.16b, v20.16b, v20.16b\n"
+ "add v19.4s, v19.4s, v10.4s\n"
+ "uzp1 v20.16b, v20.16b, v20.16b\n"
+ "smin v19.4s, v19.4s, v13.4s\n"
+ "smax v19.4s, v19.4s, v14.4s\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
"tbz %x[n_output_channels], #1, 24f\n"
- "ldr x20, [%x[outptrs], #0x0]\n"
- "ldr x21, [%x[outptrs], #0x8]\n"
+ "ldr x19, [%x[outptrs], #0x0]\n"
+ "ldr x20, [%x[outptrs], #0x8]\n"
+ "add x19, x19, x9\n"
+ "ldr x21, [%x[outptrs], #0x10]\n"
+ "ldr x22, [%x[outptrs], #0x18]\n"
"add x20, x20, x9\n"
+ "st1 { v6.h }[0], [x19]\n"
"add x21, x21, x9\n"
- "ldr x22, [%x[outptrs], #0x10]\n"
- "ldr x23, [%x[outptrs], #0x18]\n"
+ "st1 { v5.h }[0], [x20]\n"
+ "ldr x23, [%x[outptrs], #0x20]\n"
"add x22, x22, x9\n"
+ "st1 { v4.h }[0], [x21]\n"
"add x23, x23, x9\n"
- "ldr x24, [%x[outptrs], #0x20]\n"
- "ldr x25, [%x[outptrs], #0x28]\n"
+ "st1 { v31.h }[0], [x22]\n"
+ "ldr x24, [%x[outptrs], #0x28]\n"
"add x24, x24, x9\n"
+ "st1 { v30.h }[0], [x23]\n"
+ "ldr x25, [%x[outptrs], #0x30]\n"
"add x25, x25, x9\n"
- "ldr x26, [%x[outptrs], #0x30]\n"
- "ldr x27, [%x[outptrs], #0x38]\n"
+ "st1 { v29.h }[0], [x24]\n"
+ "ldr x26, [%x[outptrs], #0x38]\n"
"add x26, x26, x9\n"
- "add x27, x27, x9\n"
- "st1 { v16.h }[0], [x20]\n"
- "ldr x20, [%x[outptrs], #0x40]\n"
+ "st1 { v28.h }[0], [x25]\n"
+ "ldr x19, [%x[outptrs], #0x40]\n"
+ "add x19, x19, x9\n"
+ "st1 { v27.h }[0], [x26]\n"
+ "ldr x20, [%x[outptrs], #0x48]\n"
"add x20, x20, x9\n"
- "st1 { v17.h }[0], [x21]\n"
- "ldr x21, [%x[outptrs], #0x48]\n"
+ "st1 { v26.h }[0], [x19]\n"
+ "ldr x21, [%x[outptrs], #0x50]\n"
"add x21, x21, x9\n"
- "st1 { v18.h }[0], [x22]\n"
- "ldr x22, [%x[outptrs], #0x50]\n"
+ "st1 { v25.h }[0], [x20]\n"
+ "ldr x22, [%x[outptrs], #0x58]\n"
"add x22, x22, x9\n"
- "st1 { v19.h }[0], [x23]\n"
- "ldr x23, [%x[outptrs], #0x58]\n"
+ "st1 { v24.h }[0], [x21]\n"
+ "ldr x23, [%x[outptrs], #0x60]\n"
"add x23, x23, x9\n"
- "st1 { v20.h }[0], [x24]\n"
- "ldr x24, [%x[outptrs], #0x60]\n"
+ "st1 { v23.h }[0], [x22]\n"
+ "ldr x24, [%x[outptrs], #0x68]\n"
"add x24, x24, x9\n"
- "st1 { v21.h }[0], [x25]\n"
- "ldr x25, [%x[outptrs], #0x68]\n"
+ "st1 { v22.h }[0], [x23]\n"
+ "ldr x25, [%x[outptrs], #0x70]\n"
"add x25, x25, x9\n"
- "st1 { v22.h }[0], [x26]\n"
- "ldr x26, [%x[outptrs], #0x70]\n"
+ "st1 { v21.h }[0], [x24]\n"
+ "ldr x26, [%x[outptrs], #0x78]\n"
"add x26, x26, x9\n"
- "st1 { v23.h }[0], [x27]\n"
- "ldr x27, [%x[outptrs], #0x78]\n"
- "add x27, x27, x9\n"
+ "st1 { v20.h }[0], [x25]\n"
"add x9, x9, #0x2\n"
- "st1 { v24.h }[0], [x20]\n"
- "st1 { v25.h }[0], [x21]\n"
- "st1 { v26.h }[0], [x22]\n"
- "st1 { v27.h }[0], [x23]\n"
- "st1 { v28.h }[0], [x24]\n"
- "st1 { v29.h }[0], [x25]\n"
- "st1 { v30.h }[0], [x26]\n"
- "st1 { v31.h }[0], [x27]\n"
+ "st1 { v19.h }[0], [x26]\n"
"tbz %x[n_output_channels], #0, 25f\n"
- "ldr x20, [%x[outptrs], #0x0]\n"
- "ldr x21, [%x[outptrs], #0x8]\n"
+ "ldr x19, [%x[outptrs], #0x0]\n"
+ "ldr x20, [%x[outptrs], #0x8]\n"
+ "add x19, x19, x9\n"
+ "ldr x21, [%x[outptrs], #0x10]\n"
+ "ldr x22, [%x[outptrs], #0x18]\n"
"add x20, x20, x9\n"
+ "st1 { v6.b }[2], [x19]\n"
"add x21, x21, x9\n"
- "ldr x22, [%x[outptrs], #0x10]\n"
- "ldr x23, [%x[outptrs], #0x18]\n"
+ "st1 { v5.b }[2], [x20]\n"
+ "ldr x23, [%x[outptrs], #0x20]\n"
"add x22, x22, x9\n"
+ "st1 { v4.b }[2], [x21]\n"
"add x23, x23, x9\n"
- "ldr x24, [%x[outptrs], #0x20]\n"
- "ldr x25, [%x[outptrs], #0x28]\n"
+ "st1 { v31.b }[2], [x22]\n"
+ "ldr x24, [%x[outptrs], #0x28]\n"
"add x24, x24, x9\n"
+ "st1 { v30.b }[2], [x23]\n"
+ "ldr x25, [%x[outptrs], #0x30]\n"
"add x25, x25, x9\n"
- "ldr x26, [%x[outptrs], #0x30]\n"
- "ldr x27, [%x[outptrs], #0x38]\n"
+ "st1 { v29.b }[2], [x24]\n"
+ "ldr x26, [%x[outptrs], #0x38]\n"
"add x26, x26, x9\n"
- "add x27, x27, x9\n"
- "st1 { v16.b }[2], [x20]\n"
- "ldr x20, [%x[outptrs], #0x40]\n"
+ "st1 { v28.b }[2], [x25]\n"
+ "ldr x19, [%x[outptrs], #0x40]\n"
+ "add x19, x19, x9\n"
+ "st1 { v27.b }[2], [x26]\n"
+ "ldr x20, [%x[outptrs], #0x48]\n"
"add x20, x20, x9\n"
- "st1 { v17.b }[2], [x21]\n"
- "ldr x21, [%x[outptrs], #0x48]\n"
+ "st1 { v26.b }[2], [x19]\n"
+ "ldr x21, [%x[outptrs], #0x50]\n"
"add x21, x21, x9\n"
- "st1 { v18.b }[2], [x22]\n"
- "ldr x22, [%x[outptrs], #0x50]\n"
+ "st1 { v25.b }[2], [x20]\n"
+ "ldr x22, [%x[outptrs], #0x58]\n"
"add x22, x22, x9\n"
- "st1 { v19.b }[2], [x23]\n"
- "ldr x23, [%x[outptrs], #0x58]\n"
+ "st1 { v24.b }[2], [x21]\n"
+ "ldr x23, [%x[outptrs], #0x60]\n"
"add x23, x23, x9\n"
- "st1 { v20.b }[2], [x24]\n"
- "ldr x24, [%x[outptrs], #0x60]\n"
+ "st1 { v23.b }[2], [x22]\n"
+ "ldr x24, [%x[outptrs], #0x68]\n"
"add x24, x24, x9\n"
- "st1 { v21.b }[2], [x25]\n"
- "ldr x25, [%x[outptrs], #0x68]\n"
+ "st1 { v22.b }[2], [x23]\n"
+ "ldr x25, [%x[outptrs], #0x70]\n"
"add x25, x25, x9\n"
- "st1 { v22.b }[2], [x26]\n"
- "ldr x26, [%x[outptrs], #0x70]\n"
+ "st1 { v21.b }[2], [x24]\n"
+ "ldr x26, [%x[outptrs], #0x78]\n"
"add x26, x26, x9\n"
- "st1 { v23.b }[2], [x27]\n"
- "ldr x27, [%x[outptrs], #0x78]\n"
- "add x27, x27, x9\n"
- "st1 { v24.b }[2], [x20]\n"
- "st1 { v25.b }[2], [x21]\n"
- "st1 { v26.b }[2], [x22]\n"
- "st1 { v27.b }[2], [x23]\n"
- "st1 { v28.b }[2], [x24]\n"
- "st1 { v29.b }[2], [x25]\n"
- "st1 { v30.b }[2], [x26]\n"
- "st1 { v31.b }[2], [x27]\n"
+ "st1 { v20.b }[2], [x25]\n"
+ "st1 { v19.b }[2], [x26]\n"
"b 25f\n"
"24:" // Output channel oddments: Done: Store: Bit 1: Unset
- "ldr x20, [%x[outptrs], #0x0]\n"
- "ldr x21, [%x[outptrs], #0x8]\n"
+ "tbz %x[n_output_channels], #0, 25f\n"
+ "ldr x19, [%x[outptrs], #0x0]\n"
+ "ldr x20, [%x[outptrs], #0x8]\n"
+ "add x19, x19, x9\n"
+ "ldr x21, [%x[outptrs], #0x10]\n"
+ "ldr x22, [%x[outptrs], #0x18]\n"
"add x20, x20, x9\n"
+ "st1 { v6.b }[0], [x19]\n"
"add x21, x21, x9\n"
- "ldr x22, [%x[outptrs], #0x10]\n"
- "ldr x23, [%x[outptrs], #0x18]\n"
+ "st1 { v5.b }[0], [x20]\n"
+ "ldr x23, [%x[outptrs], #0x20]\n"
"add x22, x22, x9\n"
+ "st1 { v4.b }[0], [x21]\n"
"add x23, x23, x9\n"
- "ldr x24, [%x[outptrs], #0x20]\n"
- "ldr x25, [%x[outptrs], #0x28]\n"
+ "st1 { v31.b }[0], [x22]\n"
+ "ldr x24, [%x[outptrs], #0x28]\n"
"add x24, x24, x9\n"
+ "st1 { v30.b }[0], [x23]\n"
+ "ldr x25, [%x[outptrs], #0x30]\n"
"add x25, x25, x9\n"
- "ldr x26, [%x[outptrs], #0x30]\n"
- "ldr x27, [%x[outptrs], #0x38]\n"
+ "st1 { v29.b }[0], [x24]\n"
+ "ldr x26, [%x[outptrs], #0x38]\n"
"add x26, x26, x9\n"
- "add x27, x27, x9\n"
- "st1 { v16.b }[0], [x20]\n"
- "ldr x20, [%x[outptrs], #0x40]\n"
+ "st1 { v28.b }[0], [x25]\n"
+ "ldr x19, [%x[outptrs], #0x40]\n"
+ "add x19, x19, x9\n"
+ "st1 { v27.b }[0], [x26]\n"
+ "ldr x20, [%x[outptrs], #0x48]\n"
"add x20, x20, x9\n"
- "st1 { v17.b }[0], [x21]\n"
- "ldr x21, [%x[outptrs], #0x48]\n"
+ "st1 { v26.b }[0], [x19]\n"
+ "ldr x21, [%x[outptrs], #0x50]\n"
"add x21, x21, x9\n"
- "st1 { v18.b }[0], [x22]\n"
- "ldr x22, [%x[outptrs], #0x50]\n"
+ "st1 { v25.b }[0], [x20]\n"
+ "ldr x22, [%x[outptrs], #0x58]\n"
"add x22, x22, x9\n"
- "st1 { v19.b }[0], [x23]\n"
- "ldr x23, [%x[outptrs], #0x58]\n"
+ "st1 { v24.b }[0], [x21]\n"
+ "ldr x23, [%x[outptrs], #0x60]\n"
"add x23, x23, x9\n"
- "st1 { v20.b }[0], [x24]\n"
- "ldr x24, [%x[outptrs], #0x60]\n"
+ "st1 { v23.b }[0], [x22]\n"
+ "ldr x24, [%x[outptrs], #0x68]\n"
"add x24, x24, x9\n"
- "st1 { v21.b }[0], [x25]\n"
- "ldr x25, [%x[outptrs], #0x68]\n"
+ "st1 { v22.b }[0], [x23]\n"
+ "ldr x25, [%x[outptrs], #0x70]\n"
"add x25, x25, x9\n"
- "st1 { v22.b }[0], [x26]\n"
- "ldr x26, [%x[outptrs], #0x70]\n"
+ "st1 { v21.b }[0], [x24]\n"
+ "ldr x26, [%x[outptrs], #0x78]\n"
"add x26, x26, x9\n"
- "st1 { v23.b }[0], [x27]\n"
- "ldr x27, [%x[outptrs], #0x78]\n"
- "add x27, x27, x9\n"
- "st1 { v24.b }[0], [x20]\n"
- "st1 { v25.b }[0], [x21]\n"
- "st1 { v26.b }[0], [x22]\n"
- "st1 { v27.b }[0], [x23]\n"
- "st1 { v28.b }[0], [x24]\n"
- "st1 { v29.b }[0], [x25]\n"
- "st1 { v30.b }[0], [x26]\n"
- "st1 { v31.b }[0], [x27]\n"
+ "st1 { v20.b }[0], [x25]\n"
+ "st1 { v19.b }[0], [x26]\n"
"25:" // Output channel oddments: Done: Store: Bit 1: End
"26:" // Done
: [weights] "+&r" (weights)
: [bias] "r" (bias), [inptrs] "r" (inptrs), [kernel_points] "r" ((uint64_t) kernel_points), [n_output_channels] "r" ((uint64_t) n_output_channels), [offsetof_Requantize32_a_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [offsetof_Requantize32_per_layer_left_shift] "I" (offsetof(arm_gemm::Requantize32, per_layer_left_shift)), [offsetof_Requantize32_per_layer_mul] "I" (offsetof(arm_gemm::Requantize32, per_layer_mul)), [offsetof_Requantize32_per_layer_right_shift] "I" (offsetof(arm_gemm::Requantize32, per_layer_right_shift)), [outptrs] "r" (outptrs), [qp] "r" (&qp), [rq_left_shift_ptr] "r" (per_channel_left_shifts), [rq_mul_ptr] "r" (per_channel_muls), [rq_right_shift_ptr] "r" (per_channel_right_shifts)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_direct.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_direct.cpp
index 2ee961db15..4c4247834c 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_direct.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_direct.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -90,243 +90,243 @@ void sme2_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst_direct_impl(
".inst 0xd503477f // SMSTART ZA\n"
"ptrue p3.b\n"
".inst 0x25207810 // ptrue pn8.b\n"
- "mov x4, #0x0\n"
"mov x5, #0x0\n"
+ "mov x6, #0x0\n"
"1:" // Tile loop
- "str x4, [%x[params_struct], %[offsetof_args_tile_i]]\n"
- "mov x22, #0x2\n"
- "str x5, [%x[params_struct], %[offsetof_args_tile_j]]\n"
- "ldr x21, [%x[params_struct], %[offsetof_args_ld_input_row]]\n"
- "mul x20, x4, x21\n" // offset = tile_i * ld_input_row
- "ldr x6, [%x[params_struct], %[offsetof_args_ld_input_col]]\n"
- "madd x20, x5, x6, x20\n" // offset += tile_j * ld_input_col
- "mul x20, x20, x22\n" // offset *= kernel_stride * output_size
- "ldr x7, [%x[params_struct], %[offsetof_args_inptr]]\n"
- "add x7, x7, x20, LSL #2\n" // inptr[0] += offset * sizeof(float)
- "add x8, x7, x21, LSL #2\n"
- "add x17, x8, x21, LSL #2\n"
- "add x16, x6, x6\n"
- "ldr x15, [%x[params_struct], %[offsetof_args_params]]\n"
- "add x14, x17, x21, LSL #2\n"
- "add x13, x16, x6\n"
- "cbnz x5, 2f\n"
- "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
- "sub x21, x20, x5\n"
- "sub x21, x21, #0x1\n"
- "lsl x12, %x[n_channels], #0x2\n"
- "mov x20, #0x8\n"
- "and x21, x21, #0x3fffff\n"
- "mul x20, x20, x6\n"
- "orr x12, x12, x21, LSL #22\n"
- "orr x12, x12, x20, LSL #38\n"
- "add x11, x8, x6, LSL #2\n"
- "add x10, x7, x13, LSL #2\n"
- "add x9, x8, x16, LSL #2\n"
- "add x28, x17, x6, LSL #2\n"
- "add x27, x14, x13, LSL #2\n"
- "add x26, x7, x6, LSL #2\n"
- "add x25, x7, x16, LSL #2\n"
- "add x24, x17, x16, LSL #2\n"
- "add x23, x8, x13, LSL #2\n"
- "add x22, x17, x13, LSL #2\n"
- "add x21, x14, x6, LSL #2\n"
- "add x20, x14, x16, LSL #2\n"
- ".inst 0xf8ac497a // rprfm pldonce, x12, [x11]\n"
- ".inst 0xf8ac48fa // rprfm pldonce, x12, [x7]\n"
- ".inst 0xf8ac495a // rprfm pldonce, x12, [x10]\n"
- ".inst 0xf8ac493a // rprfm pldonce, x12, [x9]\n"
- ".inst 0xf8ac4b9a // rprfm pldonce, x12, [x28]\n"
- ".inst 0xf8ac49da // rprfm pldonce, x12, [x14]\n"
- ".inst 0xf8ac4b7a // rprfm pldonce, x12, [x27]\n"
- ".inst 0xf8ac4b5a // rprfm pldonce, x12, [x26]\n"
- ".inst 0xf8ac4b3a // rprfm pldonce, x12, [x25]\n"
- ".inst 0xf8ac4b1a // rprfm pldonce, x12, [x24]\n"
- ".inst 0xf8ac491a // rprfm pldonce, x12, [x8]\n"
- ".inst 0xf8ac4afa // rprfm pldonce, x12, [x23]\n"
- ".inst 0xf8ac4a3a // rprfm pldonce, x12, [x17]\n"
- ".inst 0xf8ac4ada // rprfm pldonce, x12, [x22]\n"
- ".inst 0xf8ac4aba // rprfm pldonce, x12, [x21]\n"
- ".inst 0xf8ac4a9a // rprfm pldonce, x12, [x20]\n"
+ "str x5, [%x[params_struct], %[offsetof_args_tile_i]]\n"
+ "mov x21, #0x2\n"
+ "str x6, [%x[params_struct], %[offsetof_args_tile_j]]\n"
+ "ldr x20, [%x[params_struct], %[offsetof_args_ld_input_row]]\n"
+ "mul x19, x5, x20\n" // offset = tile_i * ld_input_row
+ "ldr x7, [%x[params_struct], %[offsetof_args_ld_input_col]]\n"
+ "madd x19, x6, x7, x19\n" // offset += tile_j * ld_input_col
+ "mul x19, x19, x21\n" // offset *= kernel_stride * output_size
+ "ldr x8, [%x[params_struct], %[offsetof_args_inptr]]\n"
+ "add x8, x8, x19, LSL #2\n" // inptr[0] += offset * sizeof(float)
+ "add x17, x8, x20, LSL #2\n"
+ "add x16, x17, x20, LSL #2\n"
+ "add x15, x7, x7\n"
+ "ldr x14, [%x[params_struct], %[offsetof_args_params]]\n"
+ "add x13, x16, x20, LSL #2\n"
+ "add x12, x15, x7\n"
+ "cbnz x6, 2f\n"
+ "ldr x19, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
+ "sub x20, x19, x6\n"
+ "sub x20, x20, #0x1\n"
+ "lsl x11, %x[n_channels], #0x2\n"
+ "mov x19, #0x8\n"
+ "and x20, x20, #0x3fffff\n"
+ "mul x19, x19, x7\n"
+ "orr x11, x11, x20, LSL #22\n"
+ "orr x11, x11, x19, LSL #38\n"
+ "add x10, x17, x7, LSL #2\n"
+ "add x9, x8, x12, LSL #2\n"
+ "add x28, x17, x15, LSL #2\n"
+ "add x27, x16, x7, LSL #2\n"
+ "add x26, x13, x12, LSL #2\n"
+ "add x25, x8, x7, LSL #2\n"
+ "add x24, x8, x15, LSL #2\n"
+ "add x23, x16, x15, LSL #2\n"
+ "add x22, x17, x12, LSL #2\n"
+ "add x21, x16, x12, LSL #2\n"
+ "add x20, x13, x7, LSL #2\n"
+ "add x19, x13, x15, LSL #2\n"
+ ".inst 0xf8ab495a // rprfm pldonce, x10, [x11]\n"
+ ".inst 0xf8ab491a // rprfm pldonce, x8, [x11]\n"
+ ".inst 0xf8ab493a // rprfm pldonce, x9, [x11]\n"
+ ".inst 0xf8ab4b9a // rprfm pldonce, x28, [x11]\n"
+ ".inst 0xf8ab4b7a // rprfm pldonce, x27, [x11]\n"
+ ".inst 0xf8ab49ba // rprfm pldonce, x13, [x11]\n"
+ ".inst 0xf8ab4b5a // rprfm pldonce, x26, [x11]\n"
+ ".inst 0xf8ab4b3a // rprfm pldonce, x25, [x11]\n"
+ ".inst 0xf8ab4b1a // rprfm pldonce, x24, [x11]\n"
+ ".inst 0xf8ab4afa // rprfm pldonce, x23, [x11]\n"
+ ".inst 0xf8ab4a3a // rprfm pldonce, x17, [x11]\n"
+ ".inst 0xf8ab4ada // rprfm pldonce, x22, [x11]\n"
+ ".inst 0xf8ab4a1a // rprfm pldonce, x16, [x11]\n"
+ ".inst 0xf8ab4aba // rprfm pldonce, x21, [x11]\n"
+ ".inst 0xf8ab4a9a // rprfm pldonce, x20, [x11]\n"
+ ".inst 0xf8ab4a7a // rprfm pldonce, x19, [x11]\n"
"2:" // Tile loop: Prefetch input rows: End
- "ldr x22, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
- "mul x21, x4, x22\n" // offset = tile_i * ld_output_row
- "mov x20, #0x2\n"
- "ld1w { z18.s }, p3/Z, [x15]\n"
- "ldr x25, [%x[params_struct], %[offsetof_args_ld_output_col]]\n"
- "madd x21, x5, x25, x21\n" // offset += tile_j * ld_output_col
- "addvl x15, x15, #1\n"
- ".inst 0xa040c1e0 // ld1w { z0.s-z3.s }, pn8.b/Z, [x15]\n"
- "ldr x24, [%x[params_struct], %[offsetof_args_outptr]]\n"
- "mul x21, x21, x20\n" // offset *= output_tile_size
- "cntw x23\n"
+ "ldr x21, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
+ "mul x20, x5, x21\n" // offset = tile_i * ld_output_row
+ "mov x19, #0x2\n"
+ "ld1w { z18.s }, p3/Z, [x14]\n"
+ "ldr x24, [%x[params_struct], %[offsetof_args_ld_output_col]]\n"
+ "madd x20, x6, x24, x20\n" // offset += tile_j * ld_output_col
+ "addvl x14, x14, #1\n"
+ ".inst 0xa040c1c0 // ld1w { z0.s-z3.s }, pn8.b/Z, [x14]\n"
+ "ldr x23, [%x[params_struct], %[offsetof_args_outptr]]\n"
+ "mul x20, x20, x19\n" // offset *= output_tile_size
+ "cntw x22\n"
"ld1rw { z17.s }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
- "addvl x15, x15, #4\n"
- "add x24, x24, x21, LSL #2\n" // outptrs[0] += offset * sizeof(float)
- ".inst 0xa040c1e4 // ld1w { z4.s-z7.s }, pn8.b/Z, [x15]\n"
+ "addvl x14, x14, #4\n"
+ "add x23, x23, x20, LSL #2\n" // outptrs[0] += offset * sizeof(float)
+ ".inst 0xa040c1c4 // ld1w { z4.s-z7.s }, pn8.b/Z, [x14]\n"
"whilelt p2.s, XZR, %x[n_channels]\n"
- "addvl x15, x15, #4\n"
+ "addvl x14, x14, #4\n"
"ld1rw { z16.s }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
- "cmp x23, %x[n_channels]\n"
- "add x22, x24, x22, LSL #2\n"
- "ld1w { z8.s }, p3/Z, [x15]\n"
- "mov x21, #0x0\n"
- "sub x20, XZR, x23\n"
- "ld1w { z9.s }, p2/Z, [x8, x6, LSL #2]\n"
- "ld1w { z10.s }, p2/Z, [x7]\n"
- "addvl x15, x15, #1\n"
- "ld1w { z11.s }, p2/Z, [x7, x13, LSL #2]\n"
- "ld1w { z12.s }, p2/Z, [x8, x16, LSL #2]\n"
- "ld1w { z13.s }, p2/Z, [x17, x6, LSL #2]\n"
+ "cmp x22, %x[n_channels]\n"
+ "add x21, x23, x21, LSL #2\n"
+ "ld1w { z8.s }, p3/Z, [x14]\n"
+ "mov x20, #0x0\n"
+ "sub x19, XZR, x22\n"
+ "ld1w { z9.s }, p2/Z, [x17, x7, LSL #2]\n"
+ "ld1w { z10.s }, p2/Z, [x8]\n"
+ "addvl x14, x14, #1\n"
+ "ld1w { z11.s }, p2/Z, [x8, x12, LSL #2]\n"
+ "ld1w { z12.s }, p2/Z, [x17, x15, LSL #2]\n"
+ "ld1w { z13.s }, p2/Z, [x16, x7, LSL #2]\n"
"bge 4f\n"
"3:" // Tile loop: Channel loop
"movprfx z28, z18\n fmla z28.s, p3/M, z4.s, z9.s\n"
"movprfx z29, z18\n fmla z29.s, p3/M, z3.s, z9.s\n"
- "whilelt p1.s, x23, %x[n_channels]\n"
- "incw x21\n"
+ "whilelt p1.s, x22, %x[n_channels]\n"
+ "incw x20\n"
"movprfx z30, z18\n fmla z30.s, p3/M, z1.s, z9.s\n"
"movprfx z31, z18\n fmla z31.s, p3/M, z0.s, z9.s\n"
- "ld1w { z9.s }, p2/Z, [x14]\n"
- "incw x23\n"
+ "ld1w { z9.s }, p2/Z, [x13]\n"
+ "incw x22\n"
"fmla z28.s, p3/M, z0.s, z10.s\n"
"fmla z29.s, p3/M, z2.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x14, x13, LSL #2]\n"
+ "ld1w { z11.s }, p2/Z, [x13, x12, LSL #2]\n"
"mov p0.b, p2.b\n"
"fmla z30.s, p3/M, z2.s, z12.s\n"
"fmla z31.s, p3/M, z1.s, z12.s\n"
- "ld1w { z10.s }, p2/Z, [x17, x16, LSL #2]\n"
- "incw x20\n"
+ "ld1w { z10.s }, p2/Z, [x16, x15, LSL #2]\n"
+ "incw x19\n"
"fmla z28.s, p3/M, z5.s, z12.s\n"
"fmla z29.s, p3/M, z4.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x7, x6, LSL #2]\n"
+ "ld1w { z12.s }, p2/Z, [x8, x7, LSL #2]\n"
"fmla z30.s, p3/M, z6.s, z9.s\n"
"fmla z31.s, p3/M, z3.s, z13.s\n"
- "ld1w { z9.s }, p2/Z, [x7, x16, LSL #2]\n"
- "addvl x7, x7, #1\n"
+ "ld1w { z9.s }, p2/Z, [x8, x15, LSL #2]\n"
+ "addvl x8, x8, #1\n"
"fmla z28.s, p3/M, z7.s, z13.s\n"
"fmla z29.s, p3/M, z6.s, z13.s\n"
- "ld1w { z18.s }, p3/Z, [x15]\n"
- "addvl x15, x15, #1\n"
+ "ld1w { z18.s }, p3/Z, [x14]\n"
+ "addvl x14, x14, #1\n"
"fmla z30.s, p3/M, z4.s, z13.s\n"
"fmla z31.s, p3/M, z8.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x8]\n"
+ "ld1w { z11.s }, p2/Z, [x17]\n"
"fmla z28.s, p3/M, z1.s, z12.s\n"
"fmla z29.s, p3/M, z0.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x8, x13, LSL #2]\n"
- "addvl x8, x8, #1\n"
+ "ld1w { z12.s }, p2/Z, [x17, x12, LSL #2]\n"
+ "addvl x17, x17, #1\n"
"fmla z30.s, p3/M, z5.s, z10.s\n"
"fmla z31.s, p3/M, z4.s, z10.s\n"
"fmla z28.s, p3/M, z2.s, z9.s\n"
"fmla z29.s, p3/M, z1.s, z9.s\n"
- "ld1w { z9.s }, p2/Z, [x17]\n"
+ "ld1w { z9.s }, p2/Z, [x16]\n"
"fmla z30.s, p3/M, z0.s, z11.s\n"
"fmla z31.s, p3/M, z2.s, z12.s\n"
"fmla z28.s, p3/M, z8.s, z10.s\n"
"fmla z29.s, p3/M, z7.s, z10.s\n"
- "ld1w { z10.s }, p2/Z, [x17, x13, LSL #2]\n"
- "addvl x17, x17, #1\n"
+ "ld1w { z10.s }, p2/Z, [x16, x12, LSL #2]\n"
+ "addvl x16, x16, #1\n"
"fmla z30.s, p3/M, z3.s, z9.s\n"
"fmla z31.s, p3/M, z5.s, z10.s\n"
- "ld1w { z13.s }, p1/Z, [x17, x6, LSL #2]\n"
+ "ld1w { z13.s }, p1/Z, [x16, x7, LSL #2]\n"
"fmla z28.s, p3/M, z3.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x14, x6, LSL #2]\n"
+ "ld1w { z11.s }, p2/Z, [x13, x7, LSL #2]\n"
"fmla z29.s, p3/M, z5.s, z12.s\n"
"fmla z30.s, p3/M, z7.s, z11.s\n"
"fmla z31.s, p3/M, z6.s, z11.s\n"
- "ld1w { z12.s }, p2/Z, [x14, x16, LSL #2]\n"
- "whilelt p2.s, x21, %x[n_channels]\n"
+ "ld1w { z12.s }, p2/Z, [x13, x15, LSL #2]\n"
+ "whilelt p2.s, x20, %x[n_channels]\n"
"fmla z28.s, p3/M, z6.s, z9.s\n"
"fmla z29.s, p3/M, z8.s, z10.s\n"
- ".inst 0xa040c1e0 // ld1w { z0.s-z3.s }, pn8.b/Z, [x15]\n"
- "addvl x15, x15, #4\n"
+ ".inst 0xa040c1c0 // ld1w { z0.s-z3.s }, pn8.b/Z, [x14]\n"
+ "addvl x14, x14, #4\n"
"fmla z30.s, p3/M, z8.s, z12.s\n"
"fmla z31.s, p3/M, z7.s, z12.s\n"
- ".inst 0xa040c1e4 // ld1w { z4.s-z7.s }, pn8.b/Z, [x15]\n"
- "addvl x15, x15, #4\n"
- "cmp x23, %x[n_channels]\n"
+ ".inst 0xa040c1c4 // ld1w { z4.s-z7.s }, pn8.b/Z, [x14]\n"
+ "addvl x14, x14, #4\n"
+ "cmp x22, %x[n_channels]\n"
".inst 0xc1b0ca3c // fclamp { z28.s-z31.s }, z17.s, z16.s\n"
+ "addvl x13, x13, #1\n"
+ "ld1w { z9.s }, p1/Z, [x17, x7, LSL #2]\n"
+ "ld1w { z10.s }, p1/Z, [x8]\n"
+ "st1w { z28.s }, p0, [x23]\n"
+ "ld1w { z11.s }, p1/Z, [x8, x12, LSL #2]\n"
+ "st1w { z29.s }, p0, [x23, x24, LSL #2]\n"
+ "addvl x23, x23, #1\n"
+ "ld1w { z12.s }, p1/Z, [x17, x15, LSL #2]\n"
+ "st1w { z30.s }, p0, [x21]\n"
+ "st1w { z31.s }, p0, [x21, x24, LSL #2]\n"
+ "addvl x21, x21, #1\n"
+ "ld1w { z8.s }, p3/Z, [x14]\n"
"addvl x14, x14, #1\n"
- "ld1w { z9.s }, p1/Z, [x8, x6, LSL #2]\n"
- "ld1w { z10.s }, p1/Z, [x7]\n"
- "st1w { z28.s }, p0, [x24]\n"
- "ld1w { z11.s }, p1/Z, [x7, x13, LSL #2]\n"
- "st1w { z29.s }, p0, [x24, x25, LSL #2]\n"
- "addvl x24, x24, #1\n"
- "ld1w { z12.s }, p1/Z, [x8, x16, LSL #2]\n"
- "st1w { z30.s }, p0, [x22]\n"
- "st1w { z31.s }, p0, [x22, x25, LSL #2]\n"
- "addvl x22, x22, #1\n"
- "ld1w { z8.s }, p3/Z, [x15]\n"
- "addvl x15, x15, #1\n"
"blt 3b\n"
"4:" // Tile loop: Channel tail
"movprfx z28, z18\n fmla z28.s, p3/M, z4.s, z9.s\n"
"movprfx z29, z18\n fmla z29.s, p3/M, z3.s, z9.s\n"
- "ldr x5, [%x[params_struct], %[offsetof_args_tile_j]]\n"
- "add x5, x5, #0x1\n"
+ "ldr x6, [%x[params_struct], %[offsetof_args_tile_j]]\n"
+ "add x6, x6, #0x1\n"
"movprfx z30, z18\n fmla z30.s, p3/M, z1.s, z9.s\n"
"movprfx z31, z18\n fmla z31.s, p3/M, z0.s, z9.s\n"
- "ld1w { z9.s }, p2/Z, [x14]\n"
- "ldr x4, [%x[params_struct], %[offsetof_args_tile_i]]\n"
+ "ld1w { z9.s }, p2/Z, [x13]\n"
+ "ldr x5, [%x[params_struct], %[offsetof_args_tile_i]]\n"
"fmla z28.s, p3/M, z0.s, z10.s\n"
"fmla z29.s, p3/M, z2.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x14, x13, LSL #2]\n"
- "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
+ "ld1w { z11.s }, p2/Z, [x13, x12, LSL #2]\n"
+ "ldr x19, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
"fmla z30.s, p3/M, z2.s, z12.s\n"
"fmla z31.s, p3/M, z1.s, z12.s\n"
- "ld1w { z10.s }, p2/Z, [x17, x16, LSL #2]\n"
- "ldr x21, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
+ "ld1w { z10.s }, p2/Z, [x16, x15, LSL #2]\n"
+ "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
"fmla z28.s, p3/M, z5.s, z12.s\n"
"fmla z29.s, p3/M, z4.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x7, x6, LSL #2]\n"
- "cmp x5, x20\n"
+ "ld1w { z12.s }, p2/Z, [x8, x7, LSL #2]\n"
+ "cmp x6, x19\n"
"fmla z30.s, p3/M, z6.s, z9.s\n"
"fmla z31.s, p3/M, z3.s, z13.s\n"
- "ld1w { z9.s }, p2/Z, [x7, x16, LSL #2]\n"
- "add x20, x4, #0x1\n"
+ "ld1w { z9.s }, p2/Z, [x8, x15, LSL #2]\n"
+ "add x19, x5, #0x1\n"
"fmla z28.s, p3/M, z7.s, z13.s\n"
"fmla z29.s, p3/M, z6.s, z13.s\n"
- "csel x4, x4, x20, LT\n"
+ "csel x5, x5, x19, LT\n"
"mov p0.b, p2.b\n"
"fmla z30.s, p3/M, z4.s, z13.s\n"
"fmla z31.s, p3/M, z8.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x8]\n"
- "csel x5, x5, XZR, LT\n"
+ "ld1w { z11.s }, p2/Z, [x17]\n"
+ "csel x6, x6, XZR, LT\n"
"fmla z28.s, p3/M, z1.s, z12.s\n"
"fmla z29.s, p3/M, z0.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x8, x13, LSL #2]\n"
- "cmp x4, x21\n"
+ "ld1w { z12.s }, p2/Z, [x17, x12, LSL #2]\n"
+ "cmp x5, x20\n"
"fmla z30.s, p3/M, z5.s, z10.s\n"
"fmla z31.s, p3/M, z4.s, z10.s\n"
"fmla z28.s, p3/M, z2.s, z9.s\n"
"fmla z29.s, p3/M, z1.s, z9.s\n"
- "ld1w { z9.s }, p2/Z, [x17]\n"
+ "ld1w { z9.s }, p2/Z, [x16]\n"
"fmla z30.s, p3/M, z0.s, z11.s\n"
"fmla z31.s, p3/M, z2.s, z12.s\n"
"fmla z28.s, p3/M, z8.s, z10.s\n"
"fmla z29.s, p3/M, z7.s, z10.s\n"
- "ld1w { z10.s }, p2/Z, [x17, x13, LSL #2]\n"
+ "ld1w { z10.s }, p2/Z, [x16, x12, LSL #2]\n"
"fmla z30.s, p3/M, z3.s, z9.s\n"
"fmla z31.s, p3/M, z5.s, z10.s\n"
"fmla z28.s, p3/M, z3.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x14, x6, LSL #2]\n"
+ "ld1w { z11.s }, p2/Z, [x13, x7, LSL #2]\n"
"fmla z29.s, p3/M, z5.s, z12.s\n"
"fmla z30.s, p3/M, z7.s, z11.s\n"
"fmla z31.s, p3/M, z6.s, z11.s\n"
- "ld1w { z12.s }, p2/Z, [x14, x16, LSL #2]\n"
+ "ld1w { z12.s }, p2/Z, [x13, x15, LSL #2]\n"
"fmla z28.s, p3/M, z6.s, z9.s\n"
"fmla z29.s, p3/M, z8.s, z10.s\n"
"fmla z30.s, p3/M, z8.s, z12.s\n"
"fmla z31.s, p3/M, z7.s, z12.s\n"
".inst 0xc1b0ca3c // fclamp { z28.s-z31.s }, z17.s, z16.s\n"
- "st1w { z28.s }, p0, [x24]\n"
- "st1w { z29.s }, p0, [x24, x25, LSL #2]\n"
- "st1w { z30.s }, p0, [x22]\n"
- "st1w { z31.s }, p0, [x22, x25, LSL #2]\n"
+ "st1w { z28.s }, p0, [x23]\n"
+ "st1w { z29.s }, p0, [x23, x24, LSL #2]\n"
+ "st1w { z30.s }, p0, [x21]\n"
+ "st1w { z31.s }, p0, [x21, x24, LSL #2]\n"
"blt 1b\n"
".inst 0xd503467f // SMSTOP\n"
:
: [n_channels] "r" ((unsigned long) n_channels), [offsetof_args_inptr] "I" (offsetof(Args, inptr)), [offsetof_args_ld_input_col] "I" (offsetof(Args, ld_input_col)), [offsetof_args_ld_input_row] "I" (offsetof(Args, ld_input_row)), [offsetof_args_ld_output_col] "I" (offsetof(Args, ld_output_col)), [offsetof_args_ld_output_row] "I" (offsetof(Args, ld_output_row)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_n_tile_cols] "I" (offsetof(Args, n_tile_cols)), [offsetof_args_n_tile_rows] "I" (offsetof(Args, n_tile_rows)), [offsetof_args_outptr] "I" (offsetof(Args, outptr)), [offsetof_args_params] "I" (offsetof(Args, params)), [offsetof_args_tile_i] "I" (offsetof(Args, tile_i)), [offsetof_args_tile_j] "I" (offsetof(Args, tile_j)), [params_struct] "r" (&params_struct)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_indirect.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_indirect.cpp
index 079b39c5ec..5fc6602c91 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_indirect.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_indirect.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -78,196 +78,196 @@ void sme2_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst_indirect_impl(
activation_min, activation_max);
__asm__ __volatile__(
- "ldr x20, [%x[params_struct], %[offsetof_args_outptrs]]\n"
+ "ldr x19, [%x[params_struct], %[offsetof_args_outptrs]]\n"
".inst 0xd503477f // SMSTART ZA\n"
- "add x15, %x[params_struct], %[offsetof_Args_inptrs]\n"
+ "add x14, %x[params_struct], %[offsetof_Args_inptrs]\n"
"ptrue p3.b\n"
- "ldr x14, [%x[params_struct], %[offsetof_args_params]]\n"
+ "ldr x13, [%x[params_struct], %[offsetof_args_params]]\n"
".inst 0x25207810 // ptrue pn8.b\n"
- "ld1w { z18.s }, p3/Z, [x14]\n"
- "addvl x14, x14, #1\n"
- "ldp x13, x12, [x20, #0x0]\n"
- "cntw x11\n"
- ".inst 0xa040c1c0 // ld1w { z0.s-z3.s }, pn8.b/Z, [x14]\n"
- "addvl x14, x14, #4\n"
- "ldp x10, x9, [x20, #0x10]\n"
- "mov x28, #0x0\n"
+ "ld1w { z18.s }, p3/Z, [x13]\n"
+ "addvl x13, x13, #1\n"
+ "ldp x12, x11, [x19, #0x0]\n"
+ "cntw x10\n"
+ ".inst 0xa040c1a0 // ld1w { z0.s-z3.s }, pn8.b/Z, [x13]\n"
+ "addvl x13, x13, #4\n"
+ "ldp x9, x28, [x19, #0x10]\n"
+ "mov x27, #0x0\n"
"whilelt p2.s, XZR, %x[n_channels]\n"
- ".inst 0xa040c1c4 // ld1w { z4.s-z7.s }, pn8.b/Z, [x14]\n"
- "ldp x27, x26, [x15, #0x0]\n"
- "addvl x14, x14, #4\n"
- "cmp x11, %x[n_channels]\n"
+ ".inst 0xa040c1a4 // ld1w { z4.s-z7.s }, pn8.b/Z, [x13]\n"
+ "ldp x26, x25, [x14, #0x0]\n"
+ "addvl x13, x13, #4\n"
+ "cmp x10, %x[n_channels]\n"
"ld1rw { z17.s }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
- "ldp x25, x22, [x15, #0x10]\n"
+ "ldp x24, x21, [x14, #0x10]\n"
"ld1rw { z16.s }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
- "sub x24, XZR, x11\n"
- "ldr x23, [x15, #0x20]\n"
- "ld1w { z8.s }, p3/Z, [x14]\n"
- "addvl x14, x14, #1\n"
- "ld1w { z9.s }, p2/Z, [x27, x28, LSL #2]\n"
- "ld1w { z10.s }, p2/Z, [x26, x28, LSL #2]\n"
- "ld1w { z11.s }, p2/Z, [x25, x28, LSL #2]\n"
- "ld1w { z12.s }, p2/Z, [x22, x28, LSL #2]\n"
- "ld1w { z13.s }, p2/Z, [x23, x28, LSL #2]\n"
+ "sub x23, XZR, x10\n"
+ "ldr x22, [x14, #0x20]\n"
+ "ld1w { z8.s }, p3/Z, [x13]\n"
+ "addvl x13, x13, #1\n"
+ "ld1w { z9.s }, p2/Z, [x26, x27, LSL #2]\n"
+ "ld1w { z10.s }, p2/Z, [x25, x27, LSL #2]\n"
+ "ld1w { z11.s }, p2/Z, [x24, x27, LSL #2]\n"
+ "ld1w { z12.s }, p2/Z, [x21, x27, LSL #2]\n"
+ "ld1w { z13.s }, p2/Z, [x22, x27, LSL #2]\n"
"bge 2f\n"
"1:" // Channel loop
"movprfx z28, z18\n fmla z28.s, p3/M, z4.s, z9.s\n"
"movprfx z29, z18\n fmla z29.s, p3/M, z3.s, z9.s\n"
- "ldr x22, [x15, #0x28]\n"
- "whilelt p1.s, x11, %x[n_channels]\n"
+ "ldr x21, [x14, #0x28]\n"
+ "whilelt p1.s, x10, %x[n_channels]\n"
"movprfx z30, z18\n fmla z30.s, p3/M, z1.s, z9.s\n"
"movprfx z31, z18\n fmla z31.s, p3/M, z0.s, z9.s\n"
- "ld1w { z9.s }, p2/Z, [x22, x28, LSL #2]\n"
- "ldr x21, [x15, #0x30]\n"
+ "ld1w { z9.s }, p2/Z, [x21, x27, LSL #2]\n"
+ "ldr x20, [x14, #0x30]\n"
"fmla z28.s, p3/M, z0.s, z10.s\n"
"fmla z29.s, p3/M, z2.s, z11.s\n"
- "ldr x20, [x15, #0x38]\n"
- "ld1w { z11.s }, p2/Z, [x21, x28, LSL #2]\n"
+ "ldr x19, [x14, #0x38]\n"
+ "ld1w { z11.s }, p2/Z, [x20, x27, LSL #2]\n"
"fmla z30.s, p3/M, z2.s, z12.s\n"
"fmla z31.s, p3/M, z1.s, z12.s\n"
- "ldr x26, [x15, #0x48]\n"
- "ld1w { z10.s }, p2/Z, [x26, x28, LSL #2]\n"
+ "ldr x25, [x14, #0x48]\n"
+ "ld1w { z10.s }, p2/Z, [x25, x27, LSL #2]\n"
"fmla z28.s, p3/M, z5.s, z12.s\n"
"fmla z29.s, p3/M, z4.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x20, x28, LSL #2]\n"
- "ldr x27, [x15, #0x40]\n"
+ "ld1w { z12.s }, p2/Z, [x19, x27, LSL #2]\n"
+ "ldr x26, [x14, #0x40]\n"
"fmla z30.s, p3/M, z6.s, z9.s\n"
"fmla z31.s, p3/M, z3.s, z13.s\n"
- "ld1w { z9.s }, p2/Z, [x27, x28, LSL #2]\n"
- "ldr x25, [x15, #0x50]\n"
+ "ld1w { z9.s }, p2/Z, [x26, x27, LSL #2]\n"
+ "ldr x24, [x14, #0x50]\n"
"fmla z28.s, p3/M, z7.s, z13.s\n"
"fmla z29.s, p3/M, z6.s, z13.s\n"
- "ldr x22, [x15, #0x58]\n"
- "ld1w { z18.s }, p3/Z, [x14]\n"
+ "ldr x21, [x14, #0x58]\n"
+ "ld1w { z18.s }, p3/Z, [x13]\n"
"fmla z30.s, p3/M, z4.s, z13.s\n"
"fmla z31.s, p3/M, z8.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x25, x28, LSL #2]\n"
- "ldr x23, [x15, #0x60]\n"
+ "ld1w { z11.s }, p2/Z, [x24, x27, LSL #2]\n"
+ "ldr x22, [x14, #0x60]\n"
"fmla z28.s, p3/M, z1.s, z12.s\n"
"fmla z29.s, p3/M, z0.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x22, x28, LSL #2]\n"
- "ldr x22, [x15, #0x68]\n"
+ "ld1w { z12.s }, p2/Z, [x21, x27, LSL #2]\n"
+ "ldr x21, [x14, #0x68]\n"
"fmla z30.s, p3/M, z5.s, z10.s\n"
"fmla z31.s, p3/M, z4.s, z10.s\n"
- "ldr x21, [x15, #0x70]\n"
- "addvl x14, x14, #1\n"
+ "ldr x20, [x14, #0x70]\n"
+ "addvl x13, x13, #1\n"
"fmla z28.s, p3/M, z2.s, z9.s\n"
"fmla z29.s, p3/M, z1.s, z9.s\n"
- "ld1w { z9.s }, p2/Z, [x23, x28, LSL #2]\n"
- "ldr x20, [x15, #0x78]\n"
+ "ld1w { z9.s }, p2/Z, [x22, x27, LSL #2]\n"
+ "ldr x19, [x14, #0x78]\n"
"fmla z30.s, p3/M, z0.s, z11.s\n"
"fmla z31.s, p3/M, z2.s, z12.s\n"
- "ldp x27, x26, [x15, #0x0]\n"
- "incw x24\n"
+ "ldp x26, x25, [x14, #0x0]\n"
+ "incw x23\n"
"fmla z28.s, p3/M, z8.s, z10.s\n"
"fmla z29.s, p3/M, z7.s, z10.s\n"
- "ld1w { z10.s }, p2/Z, [x22, x28, LSL #2]\n"
- "ldp x25, x22, [x15, #0x10]\n"
+ "ld1w { z10.s }, p2/Z, [x21, x27, LSL #2]\n"
+ "ldp x24, x21, [x14, #0x10]\n"
"fmla z30.s, p3/M, z3.s, z9.s\n"
"fmla z31.s, p3/M, z5.s, z10.s\n"
- "ldr x23, [x15, #0x20]\n"
- "ld1w { z13.s }, p1/Z, [x23, x11, LSL #2]\n"
+ "ldr x22, [x14, #0x20]\n"
+ "ld1w { z13.s }, p1/Z, [x22, x10, LSL #2]\n"
"fmla z28.s, p3/M, z3.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x21, x28, LSL #2]\n"
+ "ld1w { z11.s }, p2/Z, [x20, x27, LSL #2]\n"
"fmla z29.s, p3/M, z5.s, z12.s\n"
"mov p0.b, p2.b\n"
"fmla z30.s, p3/M, z7.s, z11.s\n"
"fmla z31.s, p3/M, z6.s, z11.s\n"
- "ld1w { z12.s }, p2/Z, [x20, x28, LSL #2]\n"
- "incw x28\n"
+ "ld1w { z12.s }, p2/Z, [x19, x27, LSL #2]\n"
+ "incw x27\n"
"fmla z28.s, p3/M, z6.s, z9.s\n"
"fmla z29.s, p3/M, z8.s, z10.s\n"
- "ld1w { z9.s }, p1/Z, [x27, x11, LSL #2]\n"
- "whilelt p2.s, x28, %x[n_channels]\n"
+ "ld1w { z9.s }, p1/Z, [x26, x10, LSL #2]\n"
+ "whilelt p2.s, x27, %x[n_channels]\n"
"fmla z30.s, p3/M, z8.s, z12.s\n"
"fmla z31.s, p3/M, z7.s, z12.s\n"
- "ld1w { z10.s }, p1/Z, [x26, x11, LSL #2]\n"
- "ld1w { z11.s }, p1/Z, [x25, x11, LSL #2]\n"
+ "ld1w { z10.s }, p1/Z, [x25, x10, LSL #2]\n"
+ "ld1w { z11.s }, p1/Z, [x24, x10, LSL #2]\n"
".inst 0xc1b0ca3c // fclamp { z28.s-z31.s }, z17.s, z16.s\n"
- "st1w { z28.s }, p0, [x13, x24, LSL #2]\n"
- "ld1w { z12.s }, p1/Z, [x22, x11, LSL #2]\n"
- "incw x11\n"
- "cmp x11, %x[n_channels]\n"
- "st1w { z29.s }, p0, [x12, x24, LSL #2]\n"
- ".inst 0xa040c1c0 // ld1w { z0.s-z3.s }, pn8.b/Z, [x14]\n"
- "addvl x14, x14, #4\n"
- "st1w { z30.s }, p0, [x10, x24, LSL #2]\n"
- ".inst 0xa040c1c4 // ld1w { z4.s-z7.s }, pn8.b/Z, [x14]\n"
- "addvl x14, x14, #4\n"
- "st1w { z31.s }, p0, [x9, x24, LSL #2]\n"
- "ld1w { z8.s }, p3/Z, [x14]\n"
- "addvl x14, x14, #1\n"
+ "st1w { z28.s }, p0, [x12, x23, LSL #2]\n"
+ "ld1w { z12.s }, p1/Z, [x21, x10, LSL #2]\n"
+ "incw x10\n"
+ "cmp x10, %x[n_channels]\n"
+ "st1w { z29.s }, p0, [x11, x23, LSL #2]\n"
+ ".inst 0xa040c1a0 // ld1w { z0.s-z3.s }, pn8.b/Z, [x13]\n"
+ "addvl x13, x13, #4\n"
+ "st1w { z30.s }, p0, [x9, x23, LSL #2]\n"
+ ".inst 0xa040c1a4 // ld1w { z4.s-z7.s }, pn8.b/Z, [x13]\n"
+ "addvl x13, x13, #4\n"
+ "st1w { z31.s }, p0, [x28, x23, LSL #2]\n"
+ "ld1w { z8.s }, p3/Z, [x13]\n"
+ "addvl x13, x13, #1\n"
"blt 1b\n"
"2:" // Channel tail
"movprfx z28, z18\n fmla z28.s, p3/M, z4.s, z9.s\n"
"movprfx z29, z18\n fmla z29.s, p3/M, z3.s, z9.s\n"
- "ldr x22, [x15, #0x28]\n"
- "incw x24\n"
+ "ldr x21, [x14, #0x28]\n"
+ "incw x23\n"
"movprfx z30, z18\n fmla z30.s, p3/M, z1.s, z9.s\n"
"movprfx z31, z18\n fmla z31.s, p3/M, z0.s, z9.s\n"
- "ld1w { z9.s }, p2/Z, [x22, x28, LSL #2]\n"
- "ldr x21, [x15, #0x30]\n"
+ "ld1w { z9.s }, p2/Z, [x21, x27, LSL #2]\n"
+ "ldr x20, [x14, #0x30]\n"
"fmla z28.s, p3/M, z0.s, z10.s\n"
"fmla z29.s, p3/M, z2.s, z11.s\n"
- "ldr x20, [x15, #0x38]\n"
- "ld1w { z11.s }, p2/Z, [x21, x28, LSL #2]\n"
+ "ldr x19, [x14, #0x38]\n"
+ "ld1w { z11.s }, p2/Z, [x20, x27, LSL #2]\n"
"fmla z30.s, p3/M, z2.s, z12.s\n"
"fmla z31.s, p3/M, z1.s, z12.s\n"
- "ldr x26, [x15, #0x48]\n"
- "ld1w { z10.s }, p2/Z, [x26, x28, LSL #2]\n"
+ "ldr x25, [x14, #0x48]\n"
+ "ld1w { z10.s }, p2/Z, [x25, x27, LSL #2]\n"
"fmla z28.s, p3/M, z5.s, z12.s\n"
"fmla z29.s, p3/M, z4.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x20, x28, LSL #2]\n"
- "ldr x27, [x15, #0x40]\n"
+ "ld1w { z12.s }, p2/Z, [x19, x27, LSL #2]\n"
+ "ldr x26, [x14, #0x40]\n"
"fmla z30.s, p3/M, z6.s, z9.s\n"
"fmla z31.s, p3/M, z3.s, z13.s\n"
- "ld1w { z9.s }, p2/Z, [x27, x28, LSL #2]\n"
- "ldr x25, [x15, #0x50]\n"
+ "ld1w { z9.s }, p2/Z, [x26, x27, LSL #2]\n"
+ "ldr x24, [x14, #0x50]\n"
"fmla z28.s, p3/M, z7.s, z13.s\n"
"fmla z29.s, p3/M, z6.s, z13.s\n"
- "ldr x22, [x15, #0x58]\n"
+ "ldr x21, [x14, #0x58]\n"
"mov p0.b, p2.b\n"
"fmla z30.s, p3/M, z4.s, z13.s\n"
"fmla z31.s, p3/M, z8.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x25, x28, LSL #2]\n"
- "ldr x23, [x15, #0x60]\n"
+ "ld1w { z11.s }, p2/Z, [x24, x27, LSL #2]\n"
+ "ldr x22, [x14, #0x60]\n"
"fmla z28.s, p3/M, z1.s, z12.s\n"
"fmla z29.s, p3/M, z0.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x22, x28, LSL #2]\n"
- "ldr x22, [x15, #0x68]\n"
+ "ld1w { z12.s }, p2/Z, [x21, x27, LSL #2]\n"
+ "ldr x21, [x14, #0x68]\n"
"fmla z30.s, p3/M, z5.s, z10.s\n"
"fmla z31.s, p3/M, z4.s, z10.s\n"
- "ldr x21, [x15, #0x70]\n"
+ "ldr x20, [x14, #0x70]\n"
"fmla z28.s, p3/M, z2.s, z9.s\n"
"fmla z29.s, p3/M, z1.s, z9.s\n"
- "ld1w { z9.s }, p2/Z, [x23, x28, LSL #2]\n"
- "ldr x20, [x15, #0x78]\n"
+ "ld1w { z9.s }, p2/Z, [x22, x27, LSL #2]\n"
+ "ldr x19, [x14, #0x78]\n"
"fmla z30.s, p3/M, z0.s, z11.s\n"
"fmla z31.s, p3/M, z2.s, z12.s\n"
"fmla z28.s, p3/M, z8.s, z10.s\n"
"fmla z29.s, p3/M, z7.s, z10.s\n"
- "ld1w { z10.s }, p2/Z, [x22, x28, LSL #2]\n"
+ "ld1w { z10.s }, p2/Z, [x21, x27, LSL #2]\n"
"fmla z30.s, p3/M, z3.s, z9.s\n"
"fmla z31.s, p3/M, z5.s, z10.s\n"
"fmla z28.s, p3/M, z3.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x21, x28, LSL #2]\n"
+ "ld1w { z11.s }, p2/Z, [x20, x27, LSL #2]\n"
"fmla z29.s, p3/M, z5.s, z12.s\n"
"fmla z30.s, p3/M, z7.s, z11.s\n"
"fmla z31.s, p3/M, z6.s, z11.s\n"
- "ld1w { z12.s }, p2/Z, [x20, x28, LSL #2]\n"
+ "ld1w { z12.s }, p2/Z, [x19, x27, LSL #2]\n"
"fmla z28.s, p3/M, z6.s, z9.s\n"
"fmla z29.s, p3/M, z8.s, z10.s\n"
"fmla z30.s, p3/M, z8.s, z12.s\n"
"fmla z31.s, p3/M, z7.s, z12.s\n"
".inst 0xc1b0ca3c // fclamp { z28.s-z31.s }, z17.s, z16.s\n"
- "st1w { z28.s }, p0, [x13, x24, LSL #2]\n"
- "st1w { z29.s }, p0, [x12, x24, LSL #2]\n"
- "st1w { z30.s }, p0, [x10, x24, LSL #2]\n"
- "st1w { z31.s }, p0, [x9, x24, LSL #2]\n"
+ "st1w { z28.s }, p0, [x12, x23, LSL #2]\n"
+ "st1w { z29.s }, p0, [x11, x23, LSL #2]\n"
+ "st1w { z30.s }, p0, [x9, x23, LSL #2]\n"
+ "st1w { z31.s }, p0, [x28, x23, LSL #2]\n"
".inst 0xd503467f // SMSTOP\n"
:
: [n_channels] "r" ((unsigned long) n_channels), [offsetof_Args_inptrs] "I" (offsetof(Args, inptrs)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_args_params] "I" (offsetof(Args, params)), [params_struct] "r" (&params_struct)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_direct.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_direct.cpp
index ce0ae29756..8ff0fe4dff 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_direct.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_direct.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -94,105 +94,105 @@ void sme2_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst_direct_impl(
"mov x3, #0x0\n"
"1:" // Tile loop
"str x2, [%x[params_struct], %[offsetof_args_tile_i]]\n"
- "mov x22, #0x3\n"
+ "mov x21, #0x3\n"
"str x3, [%x[params_struct], %[offsetof_args_tile_j]]\n"
- "ldr x21, [%x[params_struct], %[offsetof_args_ld_input_row]]\n"
- "mul x20, x2, x21\n" // offset = tile_i * ld_input_row
+ "ldr x20, [%x[params_struct], %[offsetof_args_ld_input_row]]\n"
+ "mul x19, x2, x20\n" // offset = tile_i * ld_input_row
"ldr x4, [%x[params_struct], %[offsetof_args_ld_input_col]]\n"
- "madd x20, x3, x4, x20\n" // offset += tile_j * ld_input_col
- "mul x20, x20, x22\n" // offset *= kernel_stride * output_size
+ "madd x19, x3, x4, x19\n" // offset += tile_j * ld_input_col
+ "mul x19, x19, x21\n" // offset *= kernel_stride * output_size
"ldr x5, [%x[params_struct], %[offsetof_args_inptr]]\n"
- "add x5, x5, x20, LSL #2\n" // inptr[0] += offset * sizeof(float)
- "add x6, x5, x21, LSL #2\n"
- "add x7, x6, x21, LSL #2\n"
+ "add x5, x5, x19, LSL #2\n" // inptr[0] += offset * sizeof(float)
+ "add x6, x5, x20, LSL #2\n"
+ "add x7, x6, x20, LSL #2\n"
"add x8, x4, x4\n"
"ldr x17, [%x[params_struct], %[offsetof_args_params]]\n"
- "add x16, x7, x21, LSL #2\n"
+ "add x16, x7, x20, LSL #2\n"
"add x15, x8, x4\n"
- "add x14, x16, x21, LSL #2\n"
+ "add x14, x16, x20, LSL #2\n"
"add x13, x15, x4\n"
"cbnz x3, 2f\n"
- "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
- "sub x21, x20, x3\n"
- "sub x21, x21, #0x1\n"
+ "ldr x19, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
+ "sub x20, x19, x3\n"
+ "sub x20, x20, #0x1\n"
"lsl x12, %x[n_channels], #0x2\n"
- "mov x20, #0xc\n"
- "and x21, x21, #0x3fffff\n"
- "mul x20, x20, x4\n"
- "orr x12, x12, x21, LSL #22\n"
- "orr x12, x12, x20, LSL #38\n"
- "add x27, x7, x8, LSL #2\n"
- "add x26, x5, x13, LSL #2\n"
- "add x25, x6, x8, LSL #2\n"
- "add x24, x14, x13, LSL #2\n"
- "add x23, x7, x4, LSL #2\n"
- "add x22, x5, x4, LSL #2\n"
- "add x21, x5, x15, LSL #2\n"
- "add x20, x7, x15, LSL #2\n"
- "add x11, x6, x13, LSL #2\n"
- "add x10, x16, x8, LSL #2\n"
- "add x9, x16, x13, LSL #2\n"
- "add x28, x14, x4, LSL #2\n"
- ".inst 0xf8ac4b7a // rprfm pldonce, x12, [x27]\n"
- "add x27, x6, x4, LSL #2\n"
- ".inst 0xf8ac48ba // rprfm pldonce, x12, [x5]\n"
- ".inst 0xf8ac4b5a // rprfm pldonce, x12, [x26]\n"
- "add x26, x6, x15, LSL #2\n"
- ".inst 0xf8ac49da // rprfm pldonce, x12, [x14]\n"
- ".inst 0xf8ac4b3a // rprfm pldonce, x12, [x25]\n"
- "add x25, x14, x15, LSL #2\n"
- ".inst 0xf8ac4b1a // rprfm pldonce, x12, [x24]\n"
- "add x24, x16, x4, LSL #2\n"
- ".inst 0xf8ac4afa // rprfm pldonce, x12, [x23]\n"
- "add x23, x5, x8, LSL #2\n"
- ".inst 0xf8ac4ada // rprfm pldonce, x12, [x22]\n"
- "add x22, x16, x15, LSL #2\n"
- ".inst 0xf8ac4aba // rprfm pldonce, x12, [x21]\n"
- "add x21, x7, x13, LSL #2\n"
- ".inst 0xf8ac4a9a // rprfm pldonce, x12, [x20]\n"
- "add x20, x14, x8, LSL #2\n"
- ".inst 0xf8ac48da // rprfm pldonce, x12, [x6]\n"
- ".inst 0xf8ac497a // rprfm pldonce, x12, [x11]\n"
- ".inst 0xf8ac4a1a // rprfm pldonce, x12, [x16]\n"
- ".inst 0xf8ac495a // rprfm pldonce, x12, [x10]\n"
- ".inst 0xf8ac493a // rprfm pldonce, x12, [x9]\n"
- ".inst 0xf8ac4b9a // rprfm pldonce, x12, [x28]\n"
- ".inst 0xf8ac4b7a // rprfm pldonce, x12, [x27]\n"
- ".inst 0xf8ac4b5a // rprfm pldonce, x12, [x26]\n"
- ".inst 0xf8ac4b3a // rprfm pldonce, x12, [x25]\n"
- ".inst 0xf8ac4b1a // rprfm pldonce, x12, [x24]\n"
- ".inst 0xf8ac4afa // rprfm pldonce, x12, [x23]\n"
- ".inst 0xf8ac4ada // rprfm pldonce, x12, [x22]\n"
- ".inst 0xf8ac48fa // rprfm pldonce, x12, [x7]\n"
- ".inst 0xf8ac4aba // rprfm pldonce, x12, [x21]\n"
- ".inst 0xf8ac4a9a // rprfm pldonce, x12, [x20]\n"
+ "mov x19, #0xc\n"
+ "and x20, x20, #0x3fffff\n"
+ "mul x19, x19, x4\n"
+ "orr x12, x12, x20, LSL #22\n"
+ "orr x12, x12, x19, LSL #38\n"
+ "add x25, x7, x8, LSL #2\n"
+ "add x24, x5, x13, LSL #2\n"
+ "add x23, x6, x8, LSL #2\n"
+ "add x22, x14, x13, LSL #2\n"
+ "add x21, x7, x4, LSL #2\n"
+ "add x20, x5, x4, LSL #2\n"
+ "add x19, x5, x15, LSL #2\n"
+ "add x11, x7, x15, LSL #2\n"
+ "add x10, x6, x13, LSL #2\n"
+ "add x9, x16, x8, LSL #2\n"
+ "add x28, x16, x13, LSL #2\n"
+ "add x27, x14, x4, LSL #2\n"
+ "add x26, x6, x4, LSL #2\n"
+ ".inst 0xf8ac4b3a // rprfm pldonce, x25, [x12]\n"
+ "add x25, x6, x15, LSL #2\n"
+ ".inst 0xf8ac48ba // rprfm pldonce, x5, [x12]\n"
+ ".inst 0xf8ac4b1a // rprfm pldonce, x24, [x12]\n"
+ "add x24, x14, x15, LSL #2\n"
+ ".inst 0xf8ac49da // rprfm pldonce, x14, [x12]\n"
+ ".inst 0xf8ac4afa // rprfm pldonce, x23, [x12]\n"
+ "add x23, x16, x4, LSL #2\n"
+ ".inst 0xf8ac4ada // rprfm pldonce, x22, [x12]\n"
+ "add x22, x5, x8, LSL #2\n"
+ ".inst 0xf8ac4aba // rprfm pldonce, x21, [x12]\n"
+ "add x21, x16, x15, LSL #2\n"
+ ".inst 0xf8ac4a9a // rprfm pldonce, x20, [x12]\n"
+ "add x20, x7, x13, LSL #2\n"
+ ".inst 0xf8ac4a7a // rprfm pldonce, x19, [x12]\n"
+ "add x19, x14, x8, LSL #2\n"
+ ".inst 0xf8ac497a // rprfm pldonce, x11, [x12]\n"
+ ".inst 0xf8ac48da // rprfm pldonce, x6, [x12]\n"
+ ".inst 0xf8ac495a // rprfm pldonce, x10, [x12]\n"
+ ".inst 0xf8ac4a1a // rprfm pldonce, x16, [x12]\n"
+ ".inst 0xf8ac493a // rprfm pldonce, x9, [x12]\n"
+ ".inst 0xf8ac4b9a // rprfm pldonce, x28, [x12]\n"
+ ".inst 0xf8ac4b7a // rprfm pldonce, x27, [x12]\n"
+ ".inst 0xf8ac4b5a // rprfm pldonce, x26, [x12]\n"
+ ".inst 0xf8ac4b3a // rprfm pldonce, x25, [x12]\n"
+ ".inst 0xf8ac4b1a // rprfm pldonce, x24, [x12]\n"
+ ".inst 0xf8ac4afa // rprfm pldonce, x23, [x12]\n"
+ ".inst 0xf8ac4ada // rprfm pldonce, x22, [x12]\n"
+ ".inst 0xf8ac4aba // rprfm pldonce, x21, [x12]\n"
+ ".inst 0xf8ac48fa // rprfm pldonce, x7, [x12]\n"
+ ".inst 0xf8ac4a9a // rprfm pldonce, x20, [x12]\n"
+ ".inst 0xf8ac4a7a // rprfm pldonce, x19, [x12]\n"
"2:" // Tile loop: Prefetch input rows: End
- "ldr x22, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
- "mul x21, x2, x22\n" // offset = tile_i * ld_output_row
- "mov x20, #0x3\n"
+ "ldr x21, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
+ "mul x20, x2, x21\n" // offset = tile_i * ld_output_row
+ "mov x19, #0x3\n"
"ld1w { z18.s }, p3/Z, [x17]\n"
- "ldr x27, [%x[params_struct], %[offsetof_args_ld_output_col]]\n"
- "madd x21, x3, x27, x21\n" // offset += tile_j * ld_output_col
- "mul x21, x21, x20\n" // offset *= output_tile_size
+ "ldr x26, [%x[params_struct], %[offsetof_args_ld_output_col]]\n"
+ "madd x20, x3, x26, x20\n" // offset += tile_j * ld_output_col
+ "mul x20, x20, x19\n" // offset *= output_tile_size
"ld1rw { z17.s }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
- "ldr x26, [%x[params_struct], %[offsetof_args_outptr]]\n"
+ "ldr x25, [%x[params_struct], %[offsetof_args_outptr]]\n"
"addvl x17, x17, #1\n"
- "add x26, x26, x21, LSL #2\n" // outptrs[0] += offset * sizeof(float)
+ "add x25, x25, x20, LSL #2\n" // outptrs[0] += offset * sizeof(float)
".inst 0xa040c220 // ld1w { z0.s-z3.s }, pn8.b/Z, [x17]\n"
- "cntw x25\n"
+ "cntw x24\n"
"addvl x17, x17, #4\n"
".inst 0xa040c224 // ld1w { z4.s-z7.s }, pn8.b/Z, [x17]\n"
- "add x24, x26, x22, LSL #2\n"
+ "add x23, x25, x21, LSL #2\n"
"whilelt p2.s, XZR, %x[n_channels]\n"
"ld1rw { z16.s }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
"addvl x17, x17, #4\n"
- "cmp x25, %x[n_channels]\n"
+ "cmp x24, %x[n_channels]\n"
"ld1w { z8.s }, p3/Z, [x17]\n"
- "add x23, x24, x22, LSL #2\n"
- "add x22, x27, x27\n"
+ "add x22, x23, x21, LSL #2\n"
+ "add x21, x26, x26\n"
"ld1w { z9.s }, p2/Z, [x7, x8, LSL #2]\n"
- "mov x21, #0x0\n"
- "sub x20, XZR, x25\n"
+ "mov x20, #0x0\n"
+ "sub x19, XZR, x24\n"
"ld1w { z10.s }, p2/Z, [x5]\n"
"ld1w { z11.s }, p2/Z, [x5, x13, LSL #2]\n"
"addvl x17, x17, #1\n"
@@ -202,15 +202,15 @@ void sme2_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst_direct_impl(
"3:" // Tile loop: Channel loop
"movprfx z24, z18\n fmla z24.s, p3/M, z7.s, z9.s\n"
"movprfx z23, z18\n fmla z23.s, p3/M, z8.s, z9.s\n"
- "whilelt p1.s, x25, %x[n_channels]\n"
- "incw x21\n"
+ "whilelt p1.s, x24, %x[n_channels]\n"
+ "incw x20\n"
"movprfx z25, z18\n fmla z25.s, p3/M, z6.s, z9.s\n"
"fmla z24.s, p3/M, z4.s, z13.s\n"
- "incw x25\n"
+ "incw x24\n"
"mov p0.b, p2.b\n"
"movprfx z26, z18\n fmla z26.s, p3/M, z5.s, z9.s\n"
"movprfx z27, z18\n fmla z27.s, p3/M, z4.s, z9.s\n"
- "incw x20\n"
+ "incw x19\n"
"movprfx z28, z18\n fmla z28.s, p3/M, z3.s, z9.s\n"
"fmla z23.s, p3/M, z0.s, z10.s\n"
"ld1w { z10.s }, p2/Z, [x7, x15, LSL #2]\n"
@@ -309,7 +309,7 @@ void sme2_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst_direct_impl(
"fmla z27.s, p3/M, z8.s, z13.s\n"
"ld1w { z13.s }, p2/Z, [x14, x8, LSL #2]\n"
"fmla z26.s, p3/M, z3.s, z12.s\n"
- "whilelt p2.s, x21, %x[n_channels]\n"
+ "whilelt p2.s, x20, %x[n_channels]\n"
"fmla z25.s, p3/M, z8.s, z11.s\n"
"fmla z28.s, p3/M, z5.s, z11.s\n"
".inst 0xa040c220 // ld1w { z0.s-z3.s }, pn8.b/Z, [x17]\n"
@@ -317,7 +317,7 @@ void sme2_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst_direct_impl(
"fmla z29.s, p3/M, z8.s, z13.s\n"
"fmla z30.s, p3/M, z7.s, z13.s\n"
"addvl x14, x14, #1\n"
- "cmp x25, %x[n_channels]\n"
+ "cmp x24, %x[n_channels]\n"
"fmla z31.s, p3/M, z6.s, z13.s\n"
"fmax z23.s, p3/M, z23.s, z17.s\n"
".inst 0xa040c224 // ld1w { z4.s-z7.s }, pn8.b/Z, [x17]\n"
@@ -327,21 +327,21 @@ void sme2_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst_direct_impl(
"ld1w { z11.s }, p1/Z, [x5, x13, LSL #2]\n"
".inst 0xc1b0ca3c // fclamp { z28.s-z31.s }, z17.s, z16.s\n"
"ld1w { z12.s }, p1/Z, [x14]\n"
- "st1w { z23.s }, p0, [x26]\n"
+ "st1w { z23.s }, p0, [x25]\n"
"ld1w { z13.s }, p1/Z, [x6, x8, LSL #2]\n"
- "st1w { z24.s }, p0, [x26, x27, LSL #2]\n"
- "st1w { z25.s }, p0, [x26, x22, LSL #2]\n"
- "addvl x26, x26, #1\n"
+ "st1w { z24.s }, p0, [x25, x26, LSL #2]\n"
+ "st1w { z25.s }, p0, [x25, x21, LSL #2]\n"
+ "addvl x25, x25, #1\n"
"ld1w { z8.s }, p3/Z, [x17]\n"
"addvl x17, x17, #1\n"
- "st1w { z26.s }, p0, [x24]\n"
- "st1w { z27.s }, p0, [x24, x27, LSL #2]\n"
- "st1w { z28.s }, p0, [x24, x22, LSL #2]\n"
- "addvl x24, x24, #1\n"
- "st1w { z29.s }, p0, [x23]\n"
- "st1w { z30.s }, p0, [x23, x27, LSL #2]\n"
- "st1w { z31.s }, p0, [x23, x22, LSL #2]\n"
+ "st1w { z26.s }, p0, [x23]\n"
+ "st1w { z27.s }, p0, [x23, x26, LSL #2]\n"
+ "st1w { z28.s }, p0, [x23, x21, LSL #2]\n"
"addvl x23, x23, #1\n"
+ "st1w { z29.s }, p0, [x22]\n"
+ "st1w { z30.s }, p0, [x22, x26, LSL #2]\n"
+ "st1w { z31.s }, p0, [x22, x21, LSL #2]\n"
+ "addvl x22, x22, #1\n"
"blt 3b\n"
"4:" // Tile loop: Channel tail
"movprfx z24, z18\n fmla z24.s, p3/M, z7.s, z9.s\n"
@@ -351,26 +351,26 @@ void sme2_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst_direct_impl(
"movprfx z25, z18\n fmla z25.s, p3/M, z6.s, z9.s\n"
"fmla z24.s, p3/M, z4.s, z13.s\n"
"ldr x2, [%x[params_struct], %[offsetof_args_tile_i]]\n"
- "add x21, x2, #0x1\n"
+ "add x20, x2, #0x1\n"
"movprfx z26, z18\n fmla z26.s, p3/M, z5.s, z9.s\n"
"movprfx z27, z18\n fmla z27.s, p3/M, z4.s, z9.s\n"
- "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
- "cmp x3, x20\n"
+ "ldr x19, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
+ "cmp x3, x19\n"
"movprfx z28, z18\n fmla z28.s, p3/M, z3.s, z9.s\n"
"fmla z23.s, p3/M, z0.s, z10.s\n"
"ld1w { z10.s }, p2/Z, [x7, x15, LSL #2]\n"
- "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
+ "ldr x19, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
"fmla z25.s, p3/M, z2.s, z11.s\n"
"ld1w { z11.s }, p2/Z, [x7, x4, LSL #2]\n"
"movprfx z29, z18\n fmla z29.s, p3/M, z2.s, z9.s\n"
- "csel x2, x2, x21, LT\n"
+ "csel x2, x2, x20, LT\n"
"fmla z24.s, p3/M, z6.s, z11.s\n"
"movprfx z31, z18\n fmla z31.s, p3/M, z0.s, z9.s\n"
"mov p0.b, p2.b\n"
"csel x3, x3, XZR, LT\n"
"fmla z23.s, p3/M, z5.s, z13.s\n"
"fmla z25.s, p3/M, z3.s, z13.s\n"
- "cmp x2, x20\n"
+ "cmp x2, x19\n"
"fmla z26.s, p3/M, z2.s, z13.s\n"
"fmla z27.s, p3/M, z1.s, z13.s\n"
"fmla z28.s, p3/M, z0.s, z13.s\n"
@@ -459,21 +459,21 @@ void sme2_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst_direct_impl(
"fmax z23.s, p3/M, z23.s, z17.s\n"
"fmin z23.s, p3/M, z23.s, z16.s\n"
".inst 0xc1b0ca38 // fclamp { z24.s-z27.s }, z17.s, z16.s\n"
- "st1w { z23.s }, p0, [x26]\n"
+ "st1w { z23.s }, p0, [x25]\n"
".inst 0xc1b0ca3c // fclamp { z28.s-z31.s }, z17.s, z16.s\n"
- "st1w { z24.s }, p0, [x26, x27, LSL #2]\n"
- "st1w { z25.s }, p0, [x26, x22, LSL #2]\n"
- "st1w { z26.s }, p0, [x24]\n"
- "st1w { z27.s }, p0, [x24, x27, LSL #2]\n"
- "st1w { z28.s }, p0, [x24, x22, LSL #2]\n"
- "st1w { z29.s }, p0, [x23]\n"
- "st1w { z30.s }, p0, [x23, x27, LSL #2]\n"
- "st1w { z31.s }, p0, [x23, x22, LSL #2]\n"
+ "st1w { z24.s }, p0, [x25, x26, LSL #2]\n"
+ "st1w { z25.s }, p0, [x25, x21, LSL #2]\n"
+ "st1w { z26.s }, p0, [x23]\n"
+ "st1w { z27.s }, p0, [x23, x26, LSL #2]\n"
+ "st1w { z28.s }, p0, [x23, x21, LSL #2]\n"
+ "st1w { z29.s }, p0, [x22]\n"
+ "st1w { z30.s }, p0, [x22, x26, LSL #2]\n"
+ "st1w { z31.s }, p0, [x22, x21, LSL #2]\n"
"blt 1b\n"
".inst 0xd503467f // SMSTOP\n"
:
: [n_channels] "r" ((unsigned long) n_channels), [offsetof_args_inptr] "I" (offsetof(Args, inptr)), [offsetof_args_ld_input_col] "I" (offsetof(Args, ld_input_col)), [offsetof_args_ld_input_row] "I" (offsetof(Args, ld_input_row)), [offsetof_args_ld_output_col] "I" (offsetof(Args, ld_output_col)), [offsetof_args_ld_output_row] "I" (offsetof(Args, ld_output_row)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_n_tile_cols] "I" (offsetof(Args, n_tile_cols)), [offsetof_args_n_tile_rows] "I" (offsetof(Args, n_tile_rows)), [offsetof_args_outptr] "I" (offsetof(Args, outptr)), [offsetof_args_params] "I" (offsetof(Args, params)), [offsetof_args_tile_i] "I" (offsetof(Args, tile_i)), [offsetof_args_tile_j] "I" (offsetof(Args, tile_j)), [params_struct] "r" (&params_struct)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_indirect.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_indirect.cpp
index fd648a392f..ab910c144d 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_indirect.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_indirect.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -87,354 +87,354 @@ void sme2_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst_indirect_impl(
activation_min, activation_max);
__asm__ __volatile__(
- "ldr x17, [%x[params_struct], %[offsetof_args_params]]\n"
+ "ldr x16, [%x[params_struct], %[offsetof_args_params]]\n"
".inst 0xd503477f // SMSTART ZA\n"
- "add x16, %x[params_struct], %[offsetof_Args_inptrs]\n"
+ "add x15, %x[params_struct], %[offsetof_Args_inptrs]\n"
"ptrue p3.b\n"
".inst 0x25207810 // ptrue pn8.b\n"
- "ld1w { z18.s }, p3/Z, [x17]\n"
- "addvl x17, x17, #1\n"
- "ldp x15, x14, [x16, #0x0]\n"
- "ldp x13, x12, [x16, #0x10]\n"
- "cntw x11\n"
- ".inst 0xa040c220 // ld1w { z0.s-z3.s }, pn8.b/Z, [x17]\n"
- "addvl x17, x17, #4\n"
- "ldr x10, [x16, #0x20]\n"
- "mov x9, #0x0\n"
+ "ld1w { z18.s }, p3/Z, [x16]\n"
+ "addvl x16, x16, #1\n"
+ "ldp x14, x13, [x15, #0x0]\n"
+ "ldp x12, x11, [x15, #0x10]\n"
+ "cntw x10\n"
+ ".inst 0xa040c200 // ld1w { z0.s-z3.s }, pn8.b/Z, [x16]\n"
+ "addvl x16, x16, #4\n"
+ "ldr x9, [x15, #0x20]\n"
+ "mov x28, #0x0\n"
"whilelt p2.s, XZR, %x[n_channels]\n"
- ".inst 0xa040c224 // ld1w { z4.s-z7.s }, pn8.b/Z, [x17]\n"
- "addvl x17, x17, #4\n"
- "cmp x11, %x[n_channels]\n"
- "ldr x28, [%x[params_struct], %[offsetof_args_outptrs]]\n"
+ ".inst 0xa040c204 // ld1w { z4.s-z7.s }, pn8.b/Z, [x16]\n"
+ "addvl x16, x16, #4\n"
+ "cmp x10, %x[n_channels]\n"
+ "ldr x27, [%x[params_struct], %[offsetof_args_outptrs]]\n"
"ld1rw { z17.s }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
"ld1rw { z16.s }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
- "sub x27, XZR, x11\n"
- "ld1w { z8.s }, p3/Z, [x17]\n"
- "addvl x17, x17, #1\n"
- "ld1w { z9.s }, p2/Z, [x15, x9, LSL #2]\n"
- "ld1w { z10.s }, p2/Z, [x14, x9, LSL #2]\n"
- "ld1w { z11.s }, p2/Z, [x13, x9, LSL #2]\n"
- "ld1w { z12.s }, p2/Z, [x12, x9, LSL #2]\n"
- "ld1w { z13.s }, p2/Z, [x10, x9, LSL #2]\n"
+ "sub x26, XZR, x10\n"
+ "ld1w { z8.s }, p3/Z, [x16]\n"
+ "addvl x16, x16, #1\n"
+ "ld1w { z9.s }, p2/Z, [x14, x28, LSL #2]\n"
+ "ld1w { z10.s }, p2/Z, [x13, x28, LSL #2]\n"
+ "ld1w { z11.s }, p2/Z, [x12, x28, LSL #2]\n"
+ "ld1w { z12.s }, p2/Z, [x11, x28, LSL #2]\n"
+ "ld1w { z13.s }, p2/Z, [x9, x28, LSL #2]\n"
"bge 2f\n"
"1:" // Channel loop
"movprfx z23, z18\n fmla z23.s, p3/M, z8.s, z9.s\n"
"movprfx z24, z18\n fmla z24.s, p3/M, z7.s, z9.s\n"
- "ldr x26, [x16, #0x30]\n"
- "incw x27\n"
+ "ldr x25, [x15, #0x30]\n"
+ "incw x26\n"
"movprfx z25, z18\n fmla z25.s, p3/M, z6.s, z9.s\n"
"fmla z23.s, p3/M, z0.s, z10.s\n"
- "ldr x25, [x16, #0x38]\n"
+ "ldr x24, [x15, #0x38]\n"
"mov p1.b, p2.b\n"
"fmla z24.s, p3/M, z4.s, z13.s\n"
"movprfx z26, z18\n fmla z26.s, p3/M, z5.s, z9.s\n"
- "ldr x24, [x16, #0x28]\n"
- "whilelt p0.s, x11, %x[n_channels]\n"
+ "ldr x23, [x15, #0x28]\n"
+ "whilelt p0.s, x10, %x[n_channels]\n"
"movprfx z27, z18\n fmla z27.s, p3/M, z4.s, z9.s\n"
"movprfx z28, z18\n fmla z28.s, p3/M, z3.s, z9.s\n"
- "ldr x14, [x16, #0x48]\n"
- "ld1w { z10.s }, p2/Z, [x14, x9, LSL #2]\n"
+ "ldr x13, [x15, #0x48]\n"
+ "ld1w { z10.s }, p2/Z, [x13, x28, LSL #2]\n"
"fmla z25.s, p3/M, z2.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x26, x9, LSL #2]\n"
+ "ld1w { z11.s }, p2/Z, [x25, x28, LSL #2]\n"
"movprfx z29, z18\n fmla z29.s, p3/M, z2.s, z9.s\n"
- "ldr x15, [x16, #0x40]\n"
+ "ldr x14, [x15, #0x40]\n"
"fmla z23.s, p3/M, z5.s, z13.s\n"
"fmla z24.s, p3/M, z6.s, z11.s\n"
- "ldr x13, [x16, #0x50]\n"
+ "ldr x12, [x15, #0x50]\n"
"movprfx z31, z18\n fmla z31.s, p3/M, z0.s, z9.s\n"
"fmla z25.s, p3/M, z3.s, z13.s\n"
- "ldr x12, [x16, #0x58]\n"
+ "ldr x11, [x15, #0x58]\n"
"fmla z26.s, p3/M, z2.s, z13.s\n"
"fmla z27.s, p3/M, z1.s, z13.s\n"
- "ldr x10, [x16, #0x60]\n"
+ "ldr x9, [x15, #0x60]\n"
"fmla z28.s, p3/M, z0.s, z13.s\n"
- "ld1w { z13.s }, p2/Z, [x25, x9, LSL #2]\n"
+ "ld1w { z13.s }, p2/Z, [x24, x28, LSL #2]\n"
"fmla z29.s, p3/M, z6.s, z12.s\n"
- "ldr x26, [x16, #0x70]\n"
- "ld1w { z12.s }, p2/Z, [x24, x9, LSL #2]\n"
+ "ldr x25, [x15, #0x70]\n"
+ "ld1w { z12.s }, p2/Z, [x23, x28, LSL #2]\n"
"movprfx z30, z18\n fmla z30.s, p3/M, z1.s, z9.s\n"
"fmla z23.s, p3/M, z7.s, z11.s\n"
- "ldr x24, [x16, #0x68]\n"
+ "ldr x23, [x15, #0x68]\n"
"fmla z24.s, p3/M, z0.s, z13.s\n"
"fmla z31.s, p3/M, z8.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x15, x9, LSL #2]\n"
- "ldr x25, [x16, #0x78]\n"
+ "ld1w { z12.s }, p2/Z, [x14, x28, LSL #2]\n"
+ "ldr x24, [x15, #0x78]\n"
"fmla z26.s, p3/M, z4.s, z11.s\n"
"fmla z27.s, p3/M, z3.s, z11.s\n"
- "ldr x15, [x16, #0x80]\n"
- "ld1w { z18.s }, p3/Z, [x17]\n"
+ "ldr x14, [x15, #0x80]\n"
+ "ld1w { z18.s }, p3/Z, [x16]\n"
"fmla z30.s, p3/M, z0.s, z11.s\n"
"fmla z28.s, p3/M, z4.s, z10.s\n"
- "ldr x14, [x16, #0x88]\n"
- "addvl x17, x17, #1\n"
+ "ldr x13, [x15, #0x88]\n"
+ "addvl x16, x16, #1\n"
"fmla z29.s, p3/M, z1.s, z11.s\n"
"fmla z23.s, p3/M, z1.s, z13.s\n"
- "ld1w { z11.s }, p2/Z, [x13, x9, LSL #2]\n"
- "ldr x13, [x16, #0x90]\n"
+ "ld1w { z11.s }, p2/Z, [x12, x28, LSL #2]\n"
+ "ldr x12, [x15, #0x90]\n"
"fmla z24.s, p3/M, z2.s, z12.s\n"
"fmla z25.s, p3/M, z1.s, z12.s\n"
- "ld1w { z13.s }, p2/Z, [x12, x9, LSL #2]\n"
- "ldr x12, [x16, #0x98]\n"
- "ld1w { z12.s }, p2/Z, [x10, x9, LSL #2]\n"
+ "ld1w { z13.s }, p2/Z, [x11, x28, LSL #2]\n"
+ "ldr x11, [x15, #0x98]\n"
+ "ld1w { z12.s }, p2/Z, [x9, x28, LSL #2]\n"
"fmla z27.s, p3/M, z5.s, z10.s\n"
"fmla z30.s, p3/M, z2.s, z10.s\n"
- "ldr x10, [x16, #0xa0]\n"
+ "ldr x9, [x15, #0xa0]\n"
"fmla z26.s, p3/M, z0.s, z11.s\n"
"fmla z28.s, p3/M, z2.s, z13.s\n"
- "ldr x23, [x28, #0x0]\n"
+ "ldr x22, [x27, #0x0]\n"
"fmla z24.s, p3/M, z8.s, z10.s\n"
"fmla z25.s, p3/M, z7.s, z10.s\n"
- "ldr x22, [x28, #0x8]\n"
+ "ldr x21, [x27, #0x8]\n"
"fmla z31.s, p3/M, z1.s, z10.s\n"
"fmla z29.s, p3/M, z3.s, z12.s\n"
- "ld1w { z10.s }, p2/Z, [x24, x9, LSL #2]\n"
- "ldr x24, [x16, #0xa8]\n"
+ "ld1w { z10.s }, p2/Z, [x23, x28, LSL #2]\n"
+ "ldr x23, [x15, #0xa8]\n"
"fmla z26.s, p3/M, z6.s, z12.s\n"
"fmla z27.s, p3/M, z7.s, z10.s\n"
- "ld1w { z12.s }, p2/Z, [x15, x9, LSL #2]\n"
- "ldr x15, [x16, #0xc0]\n"
+ "ld1w { z12.s }, p2/Z, [x14, x28, LSL #2]\n"
+ "ldr x14, [x15, #0xc0]\n"
"fmla z28.s, p3/M, z6.s, z10.s\n"
"fmla z30.s, p3/M, z4.s, z10.s\n"
- "ldr x21, [x28, #0x10]\n"
+ "ldr x20, [x27, #0x10]\n"
"fmla z23.s, p3/M, z3.s, z11.s\n"
"fmla z25.s, p3/M, z5.s, z13.s\n"
- "ld1w { z11.s }, p2/Z, [x26, x9, LSL #2]\n"
- "ldr x26, [x16, #0xb0]\n"
+ "ld1w { z11.s }, p2/Z, [x25, x28, LSL #2]\n"
+ "ldr x25, [x15, #0xb0]\n"
"fmla z29.s, p3/M, z5.s, z10.s\n"
"fmla z31.s, p3/M, z3.s, z10.s\n"
- "ld1w { z13.s }, p2/Z, [x25, x9, LSL #2]\n"
- "ldr x25, [x16, #0xb8]\n"
+ "ld1w { z13.s }, p2/Z, [x24, x28, LSL #2]\n"
+ "ldr x24, [x15, #0xb8]\n"
"fmla z26.s, p3/M, z8.s, z10.s\n"
"fmla z28.s, p3/M, z8.s, z11.s\n"
- "ldr x20, [x28, #0x18]\n"
+ "ldr x19, [x27, #0x18]\n"
"fmla z30.s, p3/M, z6.s, z13.s\n"
"fmla z24.s, p3/M, z3.s, z12.s\n"
"fmla z27.s, p3/M, z0.s, z12.s\n"
"fmla z31.s, p3/M, z5.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x14, x9, LSL #2]\n"
+ "ld1w { z11.s }, p2/Z, [x13, x28, LSL #2]\n"
"fmla z29.s, p3/M, z7.s, z13.s\n"
- "ld1w { z13.s }, p2/Z, [x13, x9, LSL #2]\n"
+ "ld1w { z13.s }, p2/Z, [x12, x28, LSL #2]\n"
"fmla z23.s, p3/M, z4.s, z12.s\n"
"fmla z26.s, p3/M, z1.s, z12.s\n"
"fmla z24.s, p3/M, z5.s, z11.s\n"
- "ld1w { z12.s }, p2/Z, [x12, x9, LSL #2]\n"
+ "ld1w { z12.s }, p2/Z, [x11, x28, LSL #2]\n"
"fmla z25.s, p3/M, z4.s, z11.s\n"
"fmla z27.s, p3/M, z2.s, z11.s\n"
"fmla z28.s, p3/M, z1.s, z11.s\n"
"fmla z30.s, p3/M, z8.s, z13.s\n"
- "ld1w { z11.s }, p2/Z, [x10, x9, LSL #2]\n"
- "ldr x10, [x16, #0x20]\n"
+ "ld1w { z11.s }, p2/Z, [x9, x28, LSL #2]\n"
+ "ldr x9, [x15, #0x20]\n"
"fmla z23.s, p3/M, z2.s, z11.s\n"
"fmla z26.s, p3/M, z7.s, z12.s\n"
"fmla z27.s, p3/M, z6.s, z12.s\n"
"fmla z29.s, p3/M, z4.s, z12.s\n"
"fmla z30.s, p3/M, z3.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x26, x9, LSL #2]\n"
+ "ld1w { z12.s }, p2/Z, [x25, x28, LSL #2]\n"
"fmla z31.s, p3/M, z7.s, z13.s\n"
- "ld1w { z13.s }, p2/Z, [x24, x9, LSL #2]\n"
+ "ld1w { z13.s }, p2/Z, [x23, x28, LSL #2]\n"
"fmla z23.s, p3/M, z6.s, z12.s\n"
"fmla z31.s, p3/M, z4.s, z13.s\n"
"fmla z24.s, p3/M, z1.s, z11.s\n"
"fmla z25.s, p3/M, z0.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x25, x9, LSL #2]\n"
+ "ld1w { z11.s }, p2/Z, [x24, x28, LSL #2]\n"
"fmax z23.s, p3/M, z23.s, z17.s\n"
"fmla z28.s, p3/M, z7.s, z13.s\n"
"fmla z30.s, p3/M, z5.s, z13.s\n"
"fmla z29.s, p3/M, z0.s, z12.s\n"
"fmla z31.s, p3/M, z2.s, z11.s\n"
"fmla z27.s, p3/M, z8.s, z13.s\n"
- "ld1w { z13.s }, p2/Z, [x15, x9, LSL #2]\n"
- "ldp x15, x14, [x16, #0x0]\n"
+ "ld1w { z13.s }, p2/Z, [x14, x28, LSL #2]\n"
+ "ldp x14, x13, [x15, #0x0]\n"
"fmla z26.s, p3/M, z3.s, z12.s\n"
"fmla z25.s, p3/M, z8.s, z11.s\n"
- "ldp x13, x12, [x16, #0x10]\n"
- "incw x9\n"
+ "ldp x12, x11, [x15, #0x10]\n"
+ "incw x28\n"
"fmin z23.s, p3/M, z23.s, z16.s\n"
- "st1w { z23.s }, p1, [x23, x27, LSL #2]\n"
- "ldr x23, [x28, #0x20]\n"
+ "st1w { z23.s }, p1, [x22, x26, LSL #2]\n"
+ "ldr x22, [x27, #0x20]\n"
"fmla z28.s, p3/M, z5.s, z11.s\n"
"fmla z29.s, p3/M, z8.s, z13.s\n"
"fmla z30.s, p3/M, z7.s, z13.s\n"
- "ld1w { z9.s }, p0/Z, [x15, x11, LSL #2]\n"
- "whilelt p2.s, x9, %x[n_channels]\n"
+ "ld1w { z9.s }, p0/Z, [x14, x10, LSL #2]\n"
+ "whilelt p2.s, x28, %x[n_channels]\n"
"fmla z31.s, p3/M, z6.s, z13.s\n"
".inst 0xc1b0ca38 // fclamp { z24.s-z27.s }, z17.s, z16.s\n"
- "st1w { z24.s }, p1, [x22, x27, LSL #2]\n"
- "ldr x22, [x28, #0x28]\n"
- "st1w { z25.s }, p1, [x21, x27, LSL #2]\n"
- "ldr x21, [x28, #0x30]\n"
- "ld1w { z10.s }, p0/Z, [x14, x11, LSL #2]\n"
+ "st1w { z24.s }, p1, [x21, x26, LSL #2]\n"
+ "ldr x21, [x27, #0x28]\n"
+ "st1w { z25.s }, p1, [x20, x26, LSL #2]\n"
+ "ldr x20, [x27, #0x30]\n"
+ "ld1w { z10.s }, p0/Z, [x13, x10, LSL #2]\n"
".inst 0xc1b0ca3c // fclamp { z28.s-z31.s }, z17.s, z16.s\n"
- "st1w { z26.s }, p1, [x20, x27, LSL #2]\n"
- "ldr x20, [x28, #0x38]\n"
- "ld1w { z11.s }, p0/Z, [x13, x11, LSL #2]\n"
- "st1w { z27.s }, p1, [x23, x27, LSL #2]\n"
- "ldr x23, [x28, #0x40]\n"
- "ld1w { z12.s }, p0/Z, [x12, x11, LSL #2]\n"
- "ld1w { z13.s }, p0/Z, [x10, x11, LSL #2]\n"
- "incw x11\n"
- "cmp x11, %x[n_channels]\n"
- "st1w { z28.s }, p1, [x22, x27, LSL #2]\n"
- ".inst 0xa040c220 // ld1w { z0.s-z3.s }, pn8.b/Z, [x17]\n"
- "addvl x17, x17, #4\n"
- "st1w { z29.s }, p1, [x21, x27, LSL #2]\n"
- ".inst 0xa040c224 // ld1w { z4.s-z7.s }, pn8.b/Z, [x17]\n"
- "addvl x17, x17, #4\n"
- "st1w { z30.s }, p1, [x20, x27, LSL #2]\n"
- "st1w { z31.s }, p1, [x23, x27, LSL #2]\n"
- "ld1w { z8.s }, p3/Z, [x17]\n"
- "addvl x17, x17, #1\n"
+ "st1w { z26.s }, p1, [x19, x26, LSL #2]\n"
+ "ldr x19, [x27, #0x38]\n"
+ "ld1w { z11.s }, p0/Z, [x12, x10, LSL #2]\n"
+ "st1w { z27.s }, p1, [x22, x26, LSL #2]\n"
+ "ldr x22, [x27, #0x40]\n"
+ "ld1w { z12.s }, p0/Z, [x11, x10, LSL #2]\n"
+ "ld1w { z13.s }, p0/Z, [x9, x10, LSL #2]\n"
+ "incw x10\n"
+ "cmp x10, %x[n_channels]\n"
+ "st1w { z28.s }, p1, [x21, x26, LSL #2]\n"
+ ".inst 0xa040c200 // ld1w { z0.s-z3.s }, pn8.b/Z, [x16]\n"
+ "addvl x16, x16, #4\n"
+ "st1w { z29.s }, p1, [x20, x26, LSL #2]\n"
+ ".inst 0xa040c204 // ld1w { z4.s-z7.s }, pn8.b/Z, [x16]\n"
+ "addvl x16, x16, #4\n"
+ "st1w { z30.s }, p1, [x19, x26, LSL #2]\n"
+ "st1w { z31.s }, p1, [x22, x26, LSL #2]\n"
+ "ld1w { z8.s }, p3/Z, [x16]\n"
+ "addvl x16, x16, #1\n"
"blt 1b\n"
"2:" // Channel tail
"movprfx z23, z18\n fmla z23.s, p3/M, z8.s, z9.s\n"
"movprfx z24, z18\n fmla z24.s, p3/M, z7.s, z9.s\n"
- "ldr x26, [x16, #0x30]\n"
- "incw x27\n"
+ "ldr x25, [x15, #0x30]\n"
+ "incw x26\n"
"movprfx z25, z18\n fmla z25.s, p3/M, z6.s, z9.s\n"
"fmla z23.s, p3/M, z0.s, z10.s\n"
- "ldr x25, [x16, #0x38]\n"
+ "ldr x24, [x15, #0x38]\n"
"mov p1.b, p2.b\n"
"fmla z24.s, p3/M, z4.s, z13.s\n"
"movprfx z26, z18\n fmla z26.s, p3/M, z5.s, z9.s\n"
- "ldr x24, [x16, #0x28]\n"
+ "ldr x23, [x15, #0x28]\n"
"movprfx z27, z18\n fmla z27.s, p3/M, z4.s, z9.s\n"
"movprfx z28, z18\n fmla z28.s, p3/M, z3.s, z9.s\n"
- "ldr x14, [x16, #0x48]\n"
- "ld1w { z10.s }, p2/Z, [x14, x9, LSL #2]\n"
+ "ldr x13, [x15, #0x48]\n"
+ "ld1w { z10.s }, p2/Z, [x13, x28, LSL #2]\n"
"fmla z25.s, p3/M, z2.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x26, x9, LSL #2]\n"
+ "ld1w { z11.s }, p2/Z, [x25, x28, LSL #2]\n"
"movprfx z29, z18\n fmla z29.s, p3/M, z2.s, z9.s\n"
- "ldr x15, [x16, #0x40]\n"
+ "ldr x14, [x15, #0x40]\n"
"fmla z23.s, p3/M, z5.s, z13.s\n"
"fmla z24.s, p3/M, z6.s, z11.s\n"
- "ldr x13, [x16, #0x50]\n"
+ "ldr x12, [x15, #0x50]\n"
"movprfx z31, z18\n fmla z31.s, p3/M, z0.s, z9.s\n"
"fmla z25.s, p3/M, z3.s, z13.s\n"
- "ldr x12, [x16, #0x58]\n"
+ "ldr x11, [x15, #0x58]\n"
"fmla z26.s, p3/M, z2.s, z13.s\n"
"fmla z27.s, p3/M, z1.s, z13.s\n"
- "ldr x10, [x16, #0x60]\n"
+ "ldr x9, [x15, #0x60]\n"
"fmla z28.s, p3/M, z0.s, z13.s\n"
- "ld1w { z13.s }, p2/Z, [x25, x9, LSL #2]\n"
+ "ld1w { z13.s }, p2/Z, [x24, x28, LSL #2]\n"
"fmla z29.s, p3/M, z6.s, z12.s\n"
- "ldr x26, [x16, #0x70]\n"
- "ld1w { z12.s }, p2/Z, [x24, x9, LSL #2]\n"
+ "ldr x25, [x15, #0x70]\n"
+ "ld1w { z12.s }, p2/Z, [x23, x28, LSL #2]\n"
"movprfx z30, z18\n fmla z30.s, p3/M, z1.s, z9.s\n"
"fmla z23.s, p3/M, z7.s, z11.s\n"
- "ldr x24, [x16, #0x68]\n"
+ "ldr x23, [x15, #0x68]\n"
"fmla z24.s, p3/M, z0.s, z13.s\n"
"fmla z31.s, p3/M, z8.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x15, x9, LSL #2]\n"
- "ldr x25, [x16, #0x78]\n"
+ "ld1w { z12.s }, p2/Z, [x14, x28, LSL #2]\n"
+ "ldr x24, [x15, #0x78]\n"
"fmla z26.s, p3/M, z4.s, z11.s\n"
"fmla z27.s, p3/M, z3.s, z11.s\n"
- "ldr x15, [x16, #0x80]\n"
+ "ldr x14, [x15, #0x80]\n"
"fmla z30.s, p3/M, z0.s, z11.s\n"
"fmla z28.s, p3/M, z4.s, z10.s\n"
- "ldr x14, [x16, #0x88]\n"
+ "ldr x13, [x15, #0x88]\n"
"fmla z29.s, p3/M, z1.s, z11.s\n"
"fmla z23.s, p3/M, z1.s, z13.s\n"
- "ld1w { z11.s }, p2/Z, [x13, x9, LSL #2]\n"
- "ldr x13, [x16, #0x90]\n"
+ "ld1w { z11.s }, p2/Z, [x12, x28, LSL #2]\n"
+ "ldr x12, [x15, #0x90]\n"
"fmla z24.s, p3/M, z2.s, z12.s\n"
"fmla z25.s, p3/M, z1.s, z12.s\n"
- "ld1w { z13.s }, p2/Z, [x12, x9, LSL #2]\n"
- "ldr x12, [x16, #0x98]\n"
- "ld1w { z12.s }, p2/Z, [x10, x9, LSL #2]\n"
+ "ld1w { z13.s }, p2/Z, [x11, x28, LSL #2]\n"
+ "ldr x11, [x15, #0x98]\n"
+ "ld1w { z12.s }, p2/Z, [x9, x28, LSL #2]\n"
"fmla z27.s, p3/M, z5.s, z10.s\n"
"fmla z30.s, p3/M, z2.s, z10.s\n"
- "ldr x10, [x16, #0xa0]\n"
+ "ldr x9, [x15, #0xa0]\n"
"fmla z26.s, p3/M, z0.s, z11.s\n"
"fmla z28.s, p3/M, z2.s, z13.s\n"
- "ldr x23, [x28, #0x0]\n"
+ "ldr x22, [x27, #0x0]\n"
"fmla z24.s, p3/M, z8.s, z10.s\n"
"fmla z25.s, p3/M, z7.s, z10.s\n"
- "ldr x22, [x28, #0x8]\n"
+ "ldr x21, [x27, #0x8]\n"
"fmla z31.s, p3/M, z1.s, z10.s\n"
"fmla z29.s, p3/M, z3.s, z12.s\n"
- "ld1w { z10.s }, p2/Z, [x24, x9, LSL #2]\n"
- "ldr x24, [x16, #0xa8]\n"
+ "ld1w { z10.s }, p2/Z, [x23, x28, LSL #2]\n"
+ "ldr x23, [x15, #0xa8]\n"
"fmla z26.s, p3/M, z6.s, z12.s\n"
"fmla z27.s, p3/M, z7.s, z10.s\n"
- "ld1w { z12.s }, p2/Z, [x15, x9, LSL #2]\n"
- "ldr x15, [x16, #0xc0]\n"
+ "ld1w { z12.s }, p2/Z, [x14, x28, LSL #2]\n"
+ "ldr x14, [x15, #0xc0]\n"
"fmla z28.s, p3/M, z6.s, z10.s\n"
"fmla z30.s, p3/M, z4.s, z10.s\n"
- "ldr x21, [x28, #0x10]\n"
+ "ldr x20, [x27, #0x10]\n"
"fmla z23.s, p3/M, z3.s, z11.s\n"
"fmla z25.s, p3/M, z5.s, z13.s\n"
- "ld1w { z11.s }, p2/Z, [x26, x9, LSL #2]\n"
- "ldr x26, [x16, #0xb0]\n"
+ "ld1w { z11.s }, p2/Z, [x25, x28, LSL #2]\n"
+ "ldr x25, [x15, #0xb0]\n"
"fmla z29.s, p3/M, z5.s, z10.s\n"
"fmla z31.s, p3/M, z3.s, z10.s\n"
- "ld1w { z13.s }, p2/Z, [x25, x9, LSL #2]\n"
- "ldr x25, [x16, #0xb8]\n"
+ "ld1w { z13.s }, p2/Z, [x24, x28, LSL #2]\n"
+ "ldr x24, [x15, #0xb8]\n"
"fmla z26.s, p3/M, z8.s, z10.s\n"
"fmla z28.s, p3/M, z8.s, z11.s\n"
- "ldr x20, [x28, #0x18]\n"
+ "ldr x19, [x27, #0x18]\n"
"fmla z30.s, p3/M, z6.s, z13.s\n"
"fmla z24.s, p3/M, z3.s, z12.s\n"
"fmla z27.s, p3/M, z0.s, z12.s\n"
"fmla z31.s, p3/M, z5.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x14, x9, LSL #2]\n"
+ "ld1w { z11.s }, p2/Z, [x13, x28, LSL #2]\n"
"fmla z29.s, p3/M, z7.s, z13.s\n"
- "ld1w { z13.s }, p2/Z, [x13, x9, LSL #2]\n"
+ "ld1w { z13.s }, p2/Z, [x12, x28, LSL #2]\n"
"fmla z23.s, p3/M, z4.s, z12.s\n"
"fmla z26.s, p3/M, z1.s, z12.s\n"
"fmla z24.s, p3/M, z5.s, z11.s\n"
- "ld1w { z12.s }, p2/Z, [x12, x9, LSL #2]\n"
+ "ld1w { z12.s }, p2/Z, [x11, x28, LSL #2]\n"
"fmla z25.s, p3/M, z4.s, z11.s\n"
"fmla z27.s, p3/M, z2.s, z11.s\n"
"fmla z28.s, p3/M, z1.s, z11.s\n"
"fmla z30.s, p3/M, z8.s, z13.s\n"
- "ld1w { z11.s }, p2/Z, [x10, x9, LSL #2]\n"
+ "ld1w { z11.s }, p2/Z, [x9, x28, LSL #2]\n"
"fmla z23.s, p3/M, z2.s, z11.s\n"
"fmla z26.s, p3/M, z7.s, z12.s\n"
"fmla z27.s, p3/M, z6.s, z12.s\n"
"fmla z29.s, p3/M, z4.s, z12.s\n"
"fmla z30.s, p3/M, z3.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x26, x9, LSL #2]\n"
+ "ld1w { z12.s }, p2/Z, [x25, x28, LSL #2]\n"
"fmla z31.s, p3/M, z7.s, z13.s\n"
- "ld1w { z13.s }, p2/Z, [x24, x9, LSL #2]\n"
+ "ld1w { z13.s }, p2/Z, [x23, x28, LSL #2]\n"
"fmla z23.s, p3/M, z6.s, z12.s\n"
"fmla z31.s, p3/M, z4.s, z13.s\n"
"fmla z24.s, p3/M, z1.s, z11.s\n"
"fmla z25.s, p3/M, z0.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x25, x9, LSL #2]\n"
+ "ld1w { z11.s }, p2/Z, [x24, x28, LSL #2]\n"
"fmax z23.s, p3/M, z23.s, z17.s\n"
"fmla z28.s, p3/M, z7.s, z13.s\n"
"fmla z30.s, p3/M, z5.s, z13.s\n"
"fmla z29.s, p3/M, z0.s, z12.s\n"
"fmla z31.s, p3/M, z2.s, z11.s\n"
"fmla z27.s, p3/M, z8.s, z13.s\n"
- "ld1w { z13.s }, p2/Z, [x15, x9, LSL #2]\n"
+ "ld1w { z13.s }, p2/Z, [x14, x28, LSL #2]\n"
"fmla z26.s, p3/M, z3.s, z12.s\n"
"fmla z25.s, p3/M, z8.s, z11.s\n"
"fmin z23.s, p3/M, z23.s, z16.s\n"
- "st1w { z23.s }, p1, [x23, x27, LSL #2]\n"
- "ldr x23, [x28, #0x20]\n"
+ "st1w { z23.s }, p1, [x22, x26, LSL #2]\n"
+ "ldr x22, [x27, #0x20]\n"
"fmla z28.s, p3/M, z5.s, z11.s\n"
"fmla z29.s, p3/M, z8.s, z13.s\n"
"fmla z30.s, p3/M, z7.s, z13.s\n"
"fmla z31.s, p3/M, z6.s, z13.s\n"
".inst 0xc1b0ca38 // fclamp { z24.s-z27.s }, z17.s, z16.s\n"
- "st1w { z24.s }, p1, [x22, x27, LSL #2]\n"
- "ldr x22, [x28, #0x28]\n"
- "st1w { z25.s }, p1, [x21, x27, LSL #2]\n"
- "ldr x21, [x28, #0x30]\n"
+ "st1w { z24.s }, p1, [x21, x26, LSL #2]\n"
+ "ldr x21, [x27, #0x28]\n"
+ "st1w { z25.s }, p1, [x20, x26, LSL #2]\n"
+ "ldr x20, [x27, #0x30]\n"
".inst 0xc1b0ca3c // fclamp { z28.s-z31.s }, z17.s, z16.s\n"
- "st1w { z26.s }, p1, [x20, x27, LSL #2]\n"
- "ldr x20, [x28, #0x38]\n"
- "st1w { z27.s }, p1, [x23, x27, LSL #2]\n"
- "ldr x23, [x28, #0x40]\n"
- "st1w { z28.s }, p1, [x22, x27, LSL #2]\n"
- "st1w { z29.s }, p1, [x21, x27, LSL #2]\n"
- "st1w { z30.s }, p1, [x20, x27, LSL #2]\n"
- "st1w { z31.s }, p1, [x23, x27, LSL #2]\n"
+ "st1w { z26.s }, p1, [x19, x26, LSL #2]\n"
+ "ldr x19, [x27, #0x38]\n"
+ "st1w { z27.s }, p1, [x22, x26, LSL #2]\n"
+ "ldr x22, [x27, #0x40]\n"
+ "st1w { z28.s }, p1, [x21, x26, LSL #2]\n"
+ "st1w { z29.s }, p1, [x20, x26, LSL #2]\n"
+ "st1w { z30.s }, p1, [x19, x26, LSL #2]\n"
+ "st1w { z31.s }, p1, [x22, x26, LSL #2]\n"
".inst 0xd503467f // SMSTOP\n"
:
: [n_channels] "r" ((unsigned long) n_channels), [offsetof_Args_inptrs] "I" (offsetof(Args, inptrs)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_args_params] "I" (offsetof(Args, params)), [params_struct] "r" (&params_struct)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_direct.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_direct.cpp
index 5380567d36..8ec7bcca7e 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_direct.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_direct.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -94,131 +94,131 @@ void sme2_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"mov x3, #0x0\n"
"1:" // Tile loop
"str x2, [%x[params_struct], %[offsetof_args_tile_i]]\n"
- "mov x22, #0x4\n"
+ "mov x21, #0x4\n"
"str x3, [%x[params_struct], %[offsetof_args_tile_j]]\n"
- "ldr x21, [%x[params_struct], %[offsetof_args_ld_input_row]]\n"
- "mul x20, x2, x21\n" // offset = tile_i * ld_input_row
+ "ldr x20, [%x[params_struct], %[offsetof_args_ld_input_row]]\n"
+ "mul x19, x2, x20\n" // offset = tile_i * ld_input_row
"ldr x4, [%x[params_struct], %[offsetof_args_ld_input_col]]\n"
- "madd x20, x3, x4, x20\n" // offset += tile_j * ld_input_col
- "mul x20, x20, x22\n" // offset *= kernel_stride * output_size
+ "madd x19, x3, x4, x19\n" // offset += tile_j * ld_input_col
+ "mul x19, x19, x21\n" // offset *= kernel_stride * output_size
"ldr x5, [%x[params_struct], %[offsetof_args_inptr]]\n"
- "add x5, x5, x20, LSL #2\n" // inptr[0] += offset * sizeof(float)
- "add x6, x5, x21, LSL #2\n"
- "add x7, x6, x21, LSL #2\n"
+ "add x5, x5, x19, LSL #2\n" // inptr[0] += offset * sizeof(float)
+ "add x6, x5, x20, LSL #2\n"
+ "add x7, x6, x20, LSL #2\n"
"add x8, x4, x4\n"
"ldr x17, [%x[params_struct], %[offsetof_args_params]]\n"
- "add x16, x7, x21, LSL #2\n"
+ "add x16, x7, x20, LSL #2\n"
"add x15, x8, x4\n"
- "add x14, x16, x21, LSL #2\n"
+ "add x14, x16, x20, LSL #2\n"
"add x13, x15, x4\n"
- "add x12, x14, x21, LSL #2\n"
+ "add x12, x14, x20, LSL #2\n"
"add x11, x13, x4\n"
"cbnz x3, 2f\n"
- "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
- "sub x21, x20, x3\n"
- "sub x21, x21, #0x1\n"
+ "ldr x19, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
+ "sub x20, x19, x3\n"
+ "sub x20, x20, #0x1\n"
"lsl x10, %x[n_channels], #0x2\n"
- "mov x20, #0x10\n"
- "and x21, x21, #0x3fffff\n"
- "mul x20, x20, x4\n"
- "orr x10, x10, x21, LSL #22\n"
- "orr x10, x10, x20, LSL #38\n"
- "add x9, x7, x8, LSL #2\n"
- "add x28, x5, x11, LSL #2\n"
- "add x27, x7, x15, LSL #2\n"
- "add x26, x12, x11, LSL #2\n"
- "add x25, x16, x8, LSL #2\n"
- "add x24, x5, x4, LSL #2\n"
- "add x23, x5, x13, LSL #2\n"
- "add x22, x16, x15, LSL #2\n"
- "add x21, x6, x11, LSL #2\n"
- "add x20, x6, x8, LSL #2\n"
- ".inst 0xf8aa493a // rprfm pldonce, x10, [x9]\n"
- "add x9, x14, x11, LSL #2\n"
- ".inst 0xf8aa48ba // rprfm pldonce, x10, [x5]\n"
- ".inst 0xf8aa4b9a // rprfm pldonce, x10, [x28]\n"
- "add x28, x6, x15, LSL #2\n"
- ".inst 0xf8aa4b7a // rprfm pldonce, x10, [x27]\n"
- "add x27, x12, x4, LSL #2\n"
- ".inst 0xf8aa499a // rprfm pldonce, x10, [x12]\n"
- ".inst 0xf8aa4b5a // rprfm pldonce, x10, [x26]\n"
- "add x26, x7, x4, LSL #2\n"
- ".inst 0xf8aa4b3a // rprfm pldonce, x10, [x25]\n"
- "add x25, x12, x13, LSL #2\n"
- ".inst 0xf8aa4b1a // rprfm pldonce, x10, [x24]\n"
- "add x24, x7, x13, LSL #2\n"
- ".inst 0xf8aa4afa // rprfm pldonce, x10, [x23]\n"
- "add x23, x5, x8, LSL #2\n"
- ".inst 0xf8aa4ada // rprfm pldonce, x10, [x22]\n"
- "add x22, x16, x4, LSL #2\n"
- ".inst 0xf8aa48da // rprfm pldonce, x10, [x6]\n"
- ".inst 0xf8aa4aba // rprfm pldonce, x10, [x21]\n"
- "add x21, x5, x15, LSL #2\n"
- ".inst 0xf8aa49da // rprfm pldonce, x10, [x14]\n"
- ".inst 0xf8aa4a9a // rprfm pldonce, x10, [x20]\n"
- "add x20, x16, x13, LSL #2\n"
- ".inst 0xf8aa493a // rprfm pldonce, x10, [x9]\n"
- "add x9, x7, x11, LSL #2\n"
- ".inst 0xf8aa4b9a // rprfm pldonce, x10, [x28]\n"
- "add x28, x14, x8, LSL #2\n"
- ".inst 0xf8aa4b7a // rprfm pldonce, x10, [x27]\n"
- "add x27, x16, x11, LSL #2\n"
- ".inst 0xf8aa4b5a // rprfm pldonce, x10, [x26]\n"
- "add x26, x12, x8, LSL #2\n"
- ".inst 0xf8aa4b3a // rprfm pldonce, x10, [x25]\n"
- "add x25, x14, x15, LSL #2\n"
- ".inst 0xf8aa4b1a // rprfm pldonce, x10, [x24]\n"
- "add x24, x12, x15, LSL #2\n"
- ".inst 0xf8aa4afa // rprfm pldonce, x10, [x23]\n"
- "add x23, x6, x4, LSL #2\n"
- ".inst 0xf8aa4ada // rprfm pldonce, x10, [x22]\n"
- "add x22, x6, x13, LSL #2\n"
- ".inst 0xf8aa4aba // rprfm pldonce, x10, [x21]\n"
- "add x21, x14, x4, LSL #2\n"
- ".inst 0xf8aa48fa // rprfm pldonce, x10, [x7]\n"
- ".inst 0xf8aa4a9a // rprfm pldonce, x10, [x20]\n"
- "add x20, x14, x13, LSL #2\n"
- ".inst 0xf8aa493a // rprfm pldonce, x10, [x9]\n"
- ".inst 0xf8aa4a1a // rprfm pldonce, x10, [x16]\n"
- ".inst 0xf8aa4b9a // rprfm pldonce, x10, [x28]\n"
- ".inst 0xf8aa4b7a // rprfm pldonce, x10, [x27]\n"
- ".inst 0xf8aa4b5a // rprfm pldonce, x10, [x26]\n"
- ".inst 0xf8aa4b3a // rprfm pldonce, x10, [x25]\n"
- ".inst 0xf8aa4b1a // rprfm pldonce, x10, [x24]\n"
- ".inst 0xf8aa4afa // rprfm pldonce, x10, [x23]\n"
- ".inst 0xf8aa4ada // rprfm pldonce, x10, [x22]\n"
- ".inst 0xf8aa4aba // rprfm pldonce, x10, [x21]\n"
- ".inst 0xf8aa4a9a // rprfm pldonce, x10, [x20]\n"
+ "mov x19, #0x10\n"
+ "and x20, x20, #0x3fffff\n"
+ "mul x19, x19, x4\n"
+ "orr x10, x10, x20, LSL #22\n"
+ "orr x10, x10, x19, LSL #38\n"
+ "add x26, x7, x8, LSL #2\n"
+ "add x25, x5, x11, LSL #2\n"
+ "add x24, x7, x15, LSL #2\n"
+ "add x23, x12, x11, LSL #2\n"
+ "add x22, x16, x8, LSL #2\n"
+ "add x21, x5, x4, LSL #2\n"
+ "add x20, x5, x13, LSL #2\n"
+ "add x19, x16, x15, LSL #2\n"
+ "add x9, x6, x11, LSL #2\n"
+ "add x28, x6, x8, LSL #2\n"
+ "add x27, x14, x11, LSL #2\n"
+ ".inst 0xf8aa4b5a // rprfm pldonce, x26, [x10]\n"
+ "add x26, x6, x15, LSL #2\n"
+ ".inst 0xf8aa48ba // rprfm pldonce, x5, [x10]\n"
+ ".inst 0xf8aa4b3a // rprfm pldonce, x25, [x10]\n"
+ "add x25, x12, x4, LSL #2\n"
+ ".inst 0xf8aa4b1a // rprfm pldonce, x24, [x10]\n"
+ "add x24, x7, x4, LSL #2\n"
+ ".inst 0xf8aa499a // rprfm pldonce, x12, [x10]\n"
+ ".inst 0xf8aa4afa // rprfm pldonce, x23, [x10]\n"
+ "add x23, x12, x13, LSL #2\n"
+ ".inst 0xf8aa4ada // rprfm pldonce, x22, [x10]\n"
+ "add x22, x7, x13, LSL #2\n"
+ ".inst 0xf8aa4aba // rprfm pldonce, x21, [x10]\n"
+ "add x21, x5, x8, LSL #2\n"
+ ".inst 0xf8aa4a9a // rprfm pldonce, x20, [x10]\n"
+ "add x20, x16, x4, LSL #2\n"
+ ".inst 0xf8aa4a7a // rprfm pldonce, x19, [x10]\n"
+ "add x19, x5, x15, LSL #2\n"
+ ".inst 0xf8aa48da // rprfm pldonce, x6, [x10]\n"
+ ".inst 0xf8aa493a // rprfm pldonce, x9, [x10]\n"
+ "add x9, x16, x13, LSL #2\n"
+ ".inst 0xf8aa49da // rprfm pldonce, x14, [x10]\n"
+ ".inst 0xf8aa4b9a // rprfm pldonce, x28, [x10]\n"
+ "add x28, x7, x11, LSL #2\n"
+ ".inst 0xf8aa4b7a // rprfm pldonce, x27, [x10]\n"
+ "add x27, x14, x8, LSL #2\n"
+ ".inst 0xf8aa4b5a // rprfm pldonce, x26, [x10]\n"
+ "add x26, x16, x11, LSL #2\n"
+ ".inst 0xf8aa4b3a // rprfm pldonce, x25, [x10]\n"
+ "add x25, x12, x8, LSL #2\n"
+ ".inst 0xf8aa4b1a // rprfm pldonce, x24, [x10]\n"
+ "add x24, x14, x15, LSL #2\n"
+ ".inst 0xf8aa4afa // rprfm pldonce, x23, [x10]\n"
+ "add x23, x12, x15, LSL #2\n"
+ ".inst 0xf8aa4ada // rprfm pldonce, x22, [x10]\n"
+ "add x22, x6, x4, LSL #2\n"
+ ".inst 0xf8aa4aba // rprfm pldonce, x21, [x10]\n"
+ "add x21, x6, x13, LSL #2\n"
+ ".inst 0xf8aa4a9a // rprfm pldonce, x20, [x10]\n"
+ "add x20, x14, x4, LSL #2\n"
+ ".inst 0xf8aa4a7a // rprfm pldonce, x19, [x10]\n"
+ "add x19, x14, x13, LSL #2\n"
+ ".inst 0xf8aa48fa // rprfm pldonce, x7, [x10]\n"
+ ".inst 0xf8aa493a // rprfm pldonce, x9, [x10]\n"
+ ".inst 0xf8aa4b9a // rprfm pldonce, x28, [x10]\n"
+ ".inst 0xf8aa4a1a // rprfm pldonce, x16, [x10]\n"
+ ".inst 0xf8aa4b7a // rprfm pldonce, x27, [x10]\n"
+ ".inst 0xf8aa4b5a // rprfm pldonce, x26, [x10]\n"
+ ".inst 0xf8aa4b3a // rprfm pldonce, x25, [x10]\n"
+ ".inst 0xf8aa4b1a // rprfm pldonce, x24, [x10]\n"
+ ".inst 0xf8aa4afa // rprfm pldonce, x23, [x10]\n"
+ ".inst 0xf8aa4ada // rprfm pldonce, x22, [x10]\n"
+ ".inst 0xf8aa4aba // rprfm pldonce, x21, [x10]\n"
+ ".inst 0xf8aa4a9a // rprfm pldonce, x20, [x10]\n"
+ ".inst 0xf8aa4a7a // rprfm pldonce, x19, [x10]\n"
"2:" // Tile loop: Prefetch input rows: End
- "ldr x22, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
- "mul x21, x2, x22\n" // offset = tile_i * ld_output_row
- "mov x20, #0x4\n"
+ "ldr x21, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
+ "mul x20, x2, x21\n" // offset = tile_i * ld_output_row
+ "mov x19, #0x4\n"
"ld1w { z15.s }, p3/Z, [x17]\n"
- "ldr x9, [%x[params_struct], %[offsetof_args_ld_output_col]]\n"
- "madd x21, x3, x9, x21\n" // offset += tile_j * ld_output_col
- "mul x21, x21, x20\n" // offset *= output_tile_size
+ "ldr x28, [%x[params_struct], %[offsetof_args_ld_output_col]]\n"
+ "madd x20, x3, x28, x20\n" // offset += tile_j * ld_output_col
+ "mul x20, x20, x19\n" // offset *= output_tile_size
"ld1rw { z14.s }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
- "ldr x28, [%x[params_struct], %[offsetof_args_outptr]]\n"
- "add x28, x28, x21, LSL #2\n" // outptrs[0] += offset * sizeof(float)
+ "ldr x27, [%x[params_struct], %[offsetof_args_outptr]]\n"
+ "add x27, x27, x20, LSL #2\n" // outptrs[0] += offset * sizeof(float)
"addvl x17, x17, #1\n"
".inst 0xa040c220 // ld1w { z0.s-z3.s }, pn8.b/Z, [x17]\n"
- "add x27, x28, x22, LSL #2\n"
- "cntw x26\n"
+ "add x26, x27, x21, LSL #2\n"
+ "cntw x25\n"
"ld1rw { z13.s }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
"addvl x17, x17, #4\n"
- "add x25, x27, x22, LSL #2\n"
+ "add x24, x26, x21, LSL #2\n"
".inst 0xa040c224 // ld1w { z4.s-z7.s }, pn8.b/Z, [x17]\n"
- "add x24, x9, x9\n"
+ "add x23, x28, x28\n"
"whilelt p2.s, XZR, %x[n_channels]\n"
"ld1w { z9.s }, p2/Z, [x7, x8, LSL #2]\n"
"addvl x17, x17, #4\n"
- "cmp x26, %x[n_channels]\n"
+ "cmp x25, %x[n_channels]\n"
"ld1w { z8.s }, p3/Z, [x17]\n"
- "add x23, x25, x22, LSL #2\n"
- "add x22, x24, x9\n"
+ "add x22, x24, x21, LSL #2\n"
+ "add x21, x23, x28\n"
"ld1w { z10.s }, p2/Z, [x5]\n"
- "mov x21, #0x0\n"
- "sub x20, XZR, x26\n"
+ "mov x20, #0x0\n"
+ "sub x19, XZR, x25\n"
"ld1w { z11.s }, p2/Z, [x5, x11, LSL #2]\n"
"ld1w { z12.s }, p2/Z, [x7, x15, LSL #2]\n"
"addvl x17, x17, #1\n"
@@ -226,15 +226,15 @@ void sme2_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"3:" // Tile loop: Channel loop
"movprfx z21, z15\n fmla z21.s, p3/M, z4.s, z9.s\n"
"movprfx z16, z15\n fmla z16.s, p3/M, z8.s, z9.s\n"
- "whilelt p1.s, x26, %x[n_channels]\n"
- "incw x21\n"
+ "whilelt p1.s, x25, %x[n_channels]\n"
+ "incw x20\n"
"movprfx z22, z15\n fmla z22.s, p3/M, z3.s, z9.s\n"
"movprfx z25, z15\n fmla z25.s, p3/M, z1.s, z9.s\n"
- "incw x26\n"
+ "incw x25\n"
"mov p0.b, p2.b\n"
"movprfx z26, z15\n fmla z26.s, p3/M, z0.s, z9.s\n"
"fmla z21.s, p3/M, z5.s, z12.s\n"
- "incw x20\n"
+ "incw x19\n"
"movprfx z17, z15\n fmla z17.s, p3/M, z7.s, z9.s\n"
"movprfx z18, z15\n fmla z18.s, p3/M, z6.s, z9.s\n"
"movprfx z20, z15\n fmla z20.s, p3/M, z5.s, z9.s\n"
@@ -261,10 +261,15 @@ void sme2_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"ld1w { z11.s }, p2/Z, [x5, x13, LSL #2]\n"
"fmla z25.s, p3/M, z4.s, z9.s\n"
"fmla z26.s, p3/M, z3.s, z9.s\n"
+ "movprfx z29, z15\n fmla z29.s, p3/M, z1.s, z9.s\n"
+ "movprfx z30, z15\n fmla z30.s, p3/M, z0.s, z9.s\n"
+ "ld1w { z15.s }, p3/Z, [x17]\n"
+ "addvl x17, x17, #1\n"
"fmla z20.s, p3/M, z8.s, z9.s\n"
"fmla z24.s, p3/M, z5.s, z9.s\n"
"fmla z28.s, p3/M, z2.s, z9.s\n"
"fmla z21.s, p3/M, z8.s, z10.s\n"
+ "ld1w { z9.s }, p2/Z, [x6]\n"
"fmla z16.s, p3/M, z1.s, z12.s\n"
"fmla z17.s, p3/M, z0.s, z12.s\n"
"ld1w { z12.s }, p2/Z, [x6, x11, LSL #2]\n"
@@ -276,28 +281,25 @@ void sme2_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"fmla z25.s, p3/M, z5.s, z10.s\n"
"fmla z26.s, p3/M, z4.s, z10.s\n"
"fmla z27.s, p3/M, z3.s, z10.s\n"
+ "fmla z29.s, p3/M, z2.s, z10.s\n"
+ "fmla z30.s, p3/M, z1.s, z10.s\n"
"fmla z31.s, p3/M, z0.s, z10.s\n"
+ "ld1w { z10.s }, p2/Z, [x6, x8, LSL #2]\n"
+ "fmla z20.s, p3/M, z0.s, z9.s\n"
"fmla z24.s, p3/M, z6.s, z11.s\n"
"fmla z28.s, p3/M, z3.s, z11.s\n"
+ "fmla z21.s, p3/M, z1.s, z10.s\n"
"ld1w { z11.s }, p2/Z, [x14, x11, LSL #2]\n"
+ "fmla z16.s, p3/M, z3.s, z9.s\n"
"fmla z19.s, p3/M, z5.s, z12.s\n"
"fmla z23.s, p3/M, z2.s, z12.s\n"
+ "fmla z17.s, p3/M, z4.s, z10.s\n"
"ld1w { z12.s }, p2/Z, [x6, x15, LSL #2]\n"
+ "fmla z18.s, p3/M, z3.s, z10.s\n"
+ "fmla z22.s, p3/M, z0.s, z10.s\n"
"fmla z27.s, p3/M, z8.s, z11.s\n"
"fmla z31.s, p3/M, z5.s, z11.s\n"
- "movprfx z29, z15\n fmla z29.s, p3/M, z1.s, z9.s\n"
- "movprfx z30, z15\n fmla z30.s, p3/M, z0.s, z9.s\n"
- "ld1w { z9.s }, p2/Z, [x6]\n"
- "fmla z29.s, p3/M, z2.s, z10.s\n"
- "fmla z30.s, p3/M, z1.s, z10.s\n"
- "ld1w { z10.s }, p2/Z, [x6, x8, LSL #2]\n"
- "fmla z20.s, p3/M, z0.s, z9.s\n"
- "fmla z21.s, p3/M, z1.s, z10.s\n"
- "fmla z16.s, p3/M, z3.s, z9.s\n"
- "fmla z17.s, p3/M, z4.s, z10.s\n"
"ld1w { z11.s }, p2/Z, [x12, x4, LSL #2]\n"
- "fmla z18.s, p3/M, z3.s, z10.s\n"
- "fmla z22.s, p3/M, z0.s, z10.s\n"
"fmla z20.s, p3/M, z2.s, z10.s\n"
"fmla z21.s, p3/M, z2.s, z12.s\n"
"fmla z16.s, p3/M, z5.s, z10.s\n"
@@ -359,6 +361,7 @@ void sme2_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"fmla z29.s, p3/M, z4.s, z11.s\n"
"fmla z30.s, p3/M, z3.s, z11.s\n"
"fmla z19.s, p3/M, z8.s, z12.s\n"
+ "ld1w { z9.s }, p1/Z, [x7, x8, LSL #2]\n"
"fmla z23.s, p3/M, z5.s, z12.s\n"
"fmla z27.s, p3/M, z2.s, z12.s\n"
"ld1w { z12.s }, p2/Z, [x16, x11, LSL #2]\n"
@@ -398,25 +401,23 @@ void sme2_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"fmla z20.s, p3/M, z1.s, z10.s\n"
"fmla z21.s, p3/M, z0.s, z10.s\n"
"ld1w { z10.s }, p2/Z, [x14, x13, LSL #2]\n"
- "whilelt p2.s, x21, %x[n_channels]\n"
+ "whilelt p2.s, x20, %x[n_channels]\n"
"fmla z18.s, p3/M, z5.s, z11.s\n"
"fmla z19.s, p3/M, z4.s, z11.s\n"
- "ld1w { z15.s }, p3/Z, [x17]\n"
- "addvl x17, x17, #1\n"
+ "cmp x25, %x[n_channels]\n"
+ "addvl x14, x14, #1\n"
"fmla z22.s, p3/M, z2.s, z11.s\n"
"fmla z23.s, p3/M, z1.s, z11.s\n"
- "cmp x26, %x[n_channels]\n"
- "addvl x14, x14, #1\n"
+ "ld1w { z11.s }, p1/Z, [x5, x11, LSL #2]\n"
"fmla z24.s, p3/M, z7.s, z12.s\n"
"fmla z25.s, p3/M, z6.s, z12.s\n"
- "ld1w { z9.s }, p1/Z, [x7, x8, LSL #2]\n"
"fmla z28.s, p3/M, z4.s, z12.s\n"
"fmla z29.s, p3/M, z3.s, z12.s\n"
".inst 0xa040c220 // ld1w { z0.s-z3.s }, pn8.b/Z, [x17]\n"
"addvl x17, x17, #4\n"
"fmla z26.s, p3/M, z8.s, z10.s\n"
"fmla z27.s, p3/M, z7.s, z10.s\n"
- "ld1w { z11.s }, p1/Z, [x5, x11, LSL #2]\n"
+ "ld1w { z12.s }, p1/Z, [x7, x15, LSL #2]\n"
"fmla z30.s, p3/M, z5.s, z10.s\n"
"fmla z31.s, p3/M, z4.s, z10.s\n"
".inst 0xa040c224 // ld1w { z4.s-z7.s }, pn8.b/Z, [x17]\n"
@@ -426,29 +427,28 @@ void sme2_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"ld1w { z10.s }, p1/Z, [x5]\n"
".inst 0xc1adc9d8 // fclamp { z24.s-z27.s }, z14.s, z13.s\n"
".inst 0xc1adc9dc // fclamp { z28.s-z31.s }, z14.s, z13.s\n"
- "st1w { z16.s }, p0, [x28]\n"
- "ld1w { z12.s }, p1/Z, [x7, x15, LSL #2]\n"
- "st1w { z17.s }, p0, [x28, x9, LSL #2]\n"
- "st1w { z18.s }, p0, [x28, x24, LSL #2]\n"
+ "st1w { z16.s }, p0, [x27]\n"
+ "st1w { z17.s }, p0, [x27, x28, LSL #2]\n"
"ld1w { z8.s }, p3/Z, [x17]\n"
"addvl x17, x17, #1\n"
- "st1w { z19.s }, p0, [x28, x22, LSL #2]\n"
- "addvl x28, x28, #1\n"
- "st1w { z20.s }, p0, [x27]\n"
- "st1w { z21.s }, p0, [x27, x9, LSL #2]\n"
- "st1w { z22.s }, p0, [x27, x24, LSL #2]\n"
- "st1w { z23.s }, p0, [x27, x22, LSL #2]\n"
+ "st1w { z18.s }, p0, [x27, x23, LSL #2]\n"
+ "st1w { z19.s }, p0, [x27, x21, LSL #2]\n"
"addvl x27, x27, #1\n"
- "st1w { z24.s }, p0, [x25]\n"
- "st1w { z25.s }, p0, [x25, x9, LSL #2]\n"
- "st1w { z26.s }, p0, [x25, x24, LSL #2]\n"
- "st1w { z27.s }, p0, [x25, x22, LSL #2]\n"
- "addvl x25, x25, #1\n"
- "st1w { z28.s }, p0, [x23]\n"
- "st1w { z29.s }, p0, [x23, x9, LSL #2]\n"
- "st1w { z30.s }, p0, [x23, x24, LSL #2]\n"
- "st1w { z31.s }, p0, [x23, x22, LSL #2]\n"
- "addvl x23, x23, #1\n"
+ "st1w { z20.s }, p0, [x26]\n"
+ "st1w { z21.s }, p0, [x26, x28, LSL #2]\n"
+ "st1w { z22.s }, p0, [x26, x23, LSL #2]\n"
+ "st1w { z23.s }, p0, [x26, x21, LSL #2]\n"
+ "addvl x26, x26, #1\n"
+ "st1w { z24.s }, p0, [x24]\n"
+ "st1w { z25.s }, p0, [x24, x28, LSL #2]\n"
+ "st1w { z26.s }, p0, [x24, x23, LSL #2]\n"
+ "st1w { z27.s }, p0, [x24, x21, LSL #2]\n"
+ "addvl x24, x24, #1\n"
+ "st1w { z28.s }, p0, [x22]\n"
+ "st1w { z29.s }, p0, [x22, x28, LSL #2]\n"
+ "st1w { z30.s }, p0, [x22, x23, LSL #2]\n"
+ "st1w { z31.s }, p0, [x22, x21, LSL #2]\n"
+ "addvl x22, x22, #1\n"
"blt 3b\n"
"4:" // Tile loop: Channel tail
"movprfx z21, z15\n fmla z21.s, p3/M, z4.s, z9.s\n"
@@ -458,15 +458,15 @@ void sme2_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"movprfx z22, z15\n fmla z22.s, p3/M, z3.s, z9.s\n"
"movprfx z25, z15\n fmla z25.s, p3/M, z1.s, z9.s\n"
"ldr x2, [%x[params_struct], %[offsetof_args_tile_i]]\n"
- "add x21, x2, #0x1\n"
+ "add x20, x2, #0x1\n"
"movprfx z26, z15\n fmla z26.s, p3/M, z0.s, z9.s\n"
"fmla z21.s, p3/M, z5.s, z12.s\n"
- "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
- "cmp x3, x20\n"
+ "ldr x19, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
+ "cmp x3, x19\n"
"movprfx z17, z15\n fmla z17.s, p3/M, z7.s, z9.s\n"
"movprfx z18, z15\n fmla z18.s, p3/M, z6.s, z9.s\n"
- "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
- "csel x2, x2, x21, LT\n"
+ "ldr x19, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
+ "csel x2, x2, x20, LT\n"
"movprfx z20, z15\n fmla z20.s, p3/M, z5.s, z9.s\n"
"movprfx z24, z15\n fmla z24.s, p3/M, z2.s, z9.s\n"
"ld1w { z9.s }, p2/Z, [x16, x8, LSL #2]\n"
@@ -478,7 +478,7 @@ void sme2_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"fmla z22.s, p3/M, z4.s, z12.s\n"
"fmla z25.s, p3/M, z2.s, z12.s\n"
"ld1w { z11.s }, p2/Z, [x12, x11, LSL #2]\n"
- "cmp x2, x20\n"
+ "cmp x2, x19\n"
"fmla z26.s, p3/M, z1.s, z12.s\n"
"movprfx z28, z15\n fmla z28.s, p3/M, z6.s, z10.s\n"
"ld1w { z10.s }, p2/Z, [x16, x15, LSL #2]\n"
@@ -494,10 +494,13 @@ void sme2_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"ld1w { z11.s }, p2/Z, [x5, x13, LSL #2]\n"
"fmla z25.s, p3/M, z4.s, z9.s\n"
"fmla z26.s, p3/M, z3.s, z9.s\n"
+ "movprfx z29, z15\n fmla z29.s, p3/M, z1.s, z9.s\n"
+ "movprfx z30, z15\n fmla z30.s, p3/M, z0.s, z9.s\n"
"fmla z20.s, p3/M, z8.s, z9.s\n"
"fmla z24.s, p3/M, z5.s, z9.s\n"
"fmla z28.s, p3/M, z2.s, z9.s\n"
"fmla z21.s, p3/M, z8.s, z10.s\n"
+ "ld1w { z9.s }, p2/Z, [x6]\n"
"fmla z16.s, p3/M, z1.s, z12.s\n"
"fmla z17.s, p3/M, z0.s, z12.s\n"
"ld1w { z12.s }, p2/Z, [x6, x11, LSL #2]\n"
@@ -509,28 +512,25 @@ void sme2_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"fmla z25.s, p3/M, z5.s, z10.s\n"
"fmla z26.s, p3/M, z4.s, z10.s\n"
"fmla z27.s, p3/M, z3.s, z10.s\n"
+ "fmla z29.s, p3/M, z2.s, z10.s\n"
+ "fmla z30.s, p3/M, z1.s, z10.s\n"
"fmla z31.s, p3/M, z0.s, z10.s\n"
+ "ld1w { z10.s }, p2/Z, [x6, x8, LSL #2]\n"
+ "fmla z20.s, p3/M, z0.s, z9.s\n"
"fmla z24.s, p3/M, z6.s, z11.s\n"
"fmla z28.s, p3/M, z3.s, z11.s\n"
+ "fmla z21.s, p3/M, z1.s, z10.s\n"
"ld1w { z11.s }, p2/Z, [x14, x11, LSL #2]\n"
+ "fmla z16.s, p3/M, z3.s, z9.s\n"
"fmla z19.s, p3/M, z5.s, z12.s\n"
"fmla z23.s, p3/M, z2.s, z12.s\n"
+ "fmla z17.s, p3/M, z4.s, z10.s\n"
"ld1w { z12.s }, p2/Z, [x6, x15, LSL #2]\n"
+ "fmla z18.s, p3/M, z3.s, z10.s\n"
+ "fmla z22.s, p3/M, z0.s, z10.s\n"
"fmla z27.s, p3/M, z8.s, z11.s\n"
"fmla z31.s, p3/M, z5.s, z11.s\n"
- "movprfx z29, z15\n fmla z29.s, p3/M, z1.s, z9.s\n"
- "movprfx z30, z15\n fmla z30.s, p3/M, z0.s, z9.s\n"
- "ld1w { z9.s }, p2/Z, [x6]\n"
- "fmla z29.s, p3/M, z2.s, z10.s\n"
- "fmla z30.s, p3/M, z1.s, z10.s\n"
- "ld1w { z10.s }, p2/Z, [x6, x8, LSL #2]\n"
- "fmla z20.s, p3/M, z0.s, z9.s\n"
- "fmla z21.s, p3/M, z1.s, z10.s\n"
- "fmla z16.s, p3/M, z3.s, z9.s\n"
- "fmla z17.s, p3/M, z4.s, z10.s\n"
"ld1w { z11.s }, p2/Z, [x12, x4, LSL #2]\n"
- "fmla z18.s, p3/M, z3.s, z10.s\n"
- "fmla z22.s, p3/M, z0.s, z10.s\n"
"fmla z20.s, p3/M, z2.s, z10.s\n"
"fmla z21.s, p3/M, z2.s, z12.s\n"
"fmla z16.s, p3/M, z5.s, z10.s\n"
@@ -640,29 +640,29 @@ void sme2_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"fmla z31.s, p3/M, z4.s, z10.s\n"
".inst 0xc1adc9d0 // fclamp { z16.s-z19.s }, z14.s, z13.s\n"
".inst 0xc1adc9d4 // fclamp { z20.s-z23.s }, z14.s, z13.s\n"
- "st1w { z16.s }, p0, [x28]\n"
+ "st1w { z16.s }, p0, [x27]\n"
".inst 0xc1adc9d8 // fclamp { z24.s-z27.s }, z14.s, z13.s\n"
".inst 0xc1adc9dc // fclamp { z28.s-z31.s }, z14.s, z13.s\n"
- "st1w { z17.s }, p0, [x28, x9, LSL #2]\n"
- "st1w { z18.s }, p0, [x28, x24, LSL #2]\n"
- "st1w { z19.s }, p0, [x28, x22, LSL #2]\n"
- "st1w { z20.s }, p0, [x27]\n"
- "st1w { z21.s }, p0, [x27, x9, LSL #2]\n"
- "st1w { z22.s }, p0, [x27, x24, LSL #2]\n"
- "st1w { z23.s }, p0, [x27, x22, LSL #2]\n"
- "st1w { z24.s }, p0, [x25]\n"
- "st1w { z25.s }, p0, [x25, x9, LSL #2]\n"
- "st1w { z26.s }, p0, [x25, x24, LSL #2]\n"
- "st1w { z27.s }, p0, [x25, x22, LSL #2]\n"
- "st1w { z28.s }, p0, [x23]\n"
- "st1w { z29.s }, p0, [x23, x9, LSL #2]\n"
- "st1w { z30.s }, p0, [x23, x24, LSL #2]\n"
- "st1w { z31.s }, p0, [x23, x22, LSL #2]\n"
+ "st1w { z17.s }, p0, [x27, x28, LSL #2]\n"
+ "st1w { z18.s }, p0, [x27, x23, LSL #2]\n"
+ "st1w { z19.s }, p0, [x27, x21, LSL #2]\n"
+ "st1w { z20.s }, p0, [x26]\n"
+ "st1w { z21.s }, p0, [x26, x28, LSL #2]\n"
+ "st1w { z22.s }, p0, [x26, x23, LSL #2]\n"
+ "st1w { z23.s }, p0, [x26, x21, LSL #2]\n"
+ "st1w { z24.s }, p0, [x24]\n"
+ "st1w { z25.s }, p0, [x24, x28, LSL #2]\n"
+ "st1w { z26.s }, p0, [x24, x23, LSL #2]\n"
+ "st1w { z27.s }, p0, [x24, x21, LSL #2]\n"
+ "st1w { z28.s }, p0, [x22]\n"
+ "st1w { z29.s }, p0, [x22, x28, LSL #2]\n"
+ "st1w { z30.s }, p0, [x22, x23, LSL #2]\n"
+ "st1w { z31.s }, p0, [x22, x21, LSL #2]\n"
"blt 1b\n"
".inst 0xd503467f // SMSTOP\n"
:
: [n_channels] "r" ((unsigned long) n_channels), [offsetof_args_inptr] "I" (offsetof(Args, inptr)), [offsetof_args_ld_input_col] "I" (offsetof(Args, ld_input_col)), [offsetof_args_ld_input_row] "I" (offsetof(Args, ld_input_row)), [offsetof_args_ld_output_col] "I" (offsetof(Args, ld_output_col)), [offsetof_args_ld_output_row] "I" (offsetof(Args, ld_output_row)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_n_tile_cols] "I" (offsetof(Args, n_tile_cols)), [offsetof_args_n_tile_rows] "I" (offsetof(Args, n_tile_rows)), [offsetof_args_outptr] "I" (offsetof(Args, outptr)), [offsetof_args_params] "I" (offsetof(Args, params)), [offsetof_args_tile_i] "I" (offsetof(Args, tile_i)), [offsetof_args_tile_j] "I" (offsetof(Args, tile_j)), [params_struct] "r" (&params_struct)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_indirect.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_indirect.cpp
index d904f68806..d99ebb2bb4 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_indirect.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_indirect.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -98,209 +98,211 @@ void sme2_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_indirect_impl(
activation_min, activation_max);
__asm__ __volatile__(
- "ldr x17, [%x[params_struct], %[offsetof_args_params]]\n"
+ "ldr x16, [%x[params_struct], %[offsetof_args_params]]\n"
".inst 0xd503477f // SMSTART ZA\n"
- "add x16, %x[params_struct], %[offsetof_Args_inptrs]\n"
+ "add x15, %x[params_struct], %[offsetof_Args_inptrs]\n"
"ptrue p3.b\n"
".inst 0x25207810 // ptrue pn8.b\n"
- "ld1w { z15.s }, p3/Z, [x17]\n"
- "addvl x17, x17, #1\n"
- "ldp x15, x14, [x16, #0x0]\n"
- "ldp x13, x12, [x16, #0x10]\n"
- "cntw x11\n"
- ".inst 0xa040c220 // ld1w { z0.s-z3.s }, pn8.b/Z, [x17]\n"
- "addvl x17, x17, #4\n"
- "mov x10, #0x0\n"
+ "ld1w { z15.s }, p3/Z, [x16]\n"
+ "addvl x16, x16, #1\n"
+ "ldp x14, x13, [x15, #0x0]\n"
+ "ldp x12, x11, [x15, #0x10]\n"
+ "cntw x10\n"
+ ".inst 0xa040c200 // ld1w { z0.s-z3.s }, pn8.b/Z, [x16]\n"
+ "addvl x16, x16, #4\n"
+ "mov x9, #0x0\n"
"whilelt p2.s, XZR, %x[n_channels]\n"
- ".inst 0xa040c224 // ld1w { z4.s-z7.s }, pn8.b/Z, [x17]\n"
- "ldr x9, [%x[params_struct], %[offsetof_args_outptrs]]\n"
- "addvl x17, x17, #4\n"
- "cmp x11, %x[n_channels]\n"
+ ".inst 0xa040c204 // ld1w { z4.s-z7.s }, pn8.b/Z, [x16]\n"
+ "ldr x28, [%x[params_struct], %[offsetof_args_outptrs]]\n"
+ "addvl x16, x16, #4\n"
+ "cmp x10, %x[n_channels]\n"
"ld1rw { z14.s }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
"ld1rw { z13.s }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
- "sub x28, XZR, x11\n"
- "ld1w { z8.s }, p3/Z, [x17]\n"
- "addvl x17, x17, #1\n"
- "ld1w { z9.s }, p2/Z, [x15, x10, LSL #2]\n"
- "ld1w { z10.s }, p2/Z, [x14, x10, LSL #2]\n"
- "ld1w { z11.s }, p2/Z, [x13, x10, LSL #2]\n"
- "ld1w { z12.s }, p2/Z, [x12, x10, LSL #2]\n"
+ "sub x27, XZR, x10\n"
+ "ld1w { z8.s }, p3/Z, [x16]\n"
+ "addvl x16, x16, #1\n"
+ "ld1w { z9.s }, p2/Z, [x14, x9, LSL #2]\n"
+ "ld1w { z10.s }, p2/Z, [x13, x9, LSL #2]\n"
+ "ld1w { z11.s }, p2/Z, [x12, x9, LSL #2]\n"
+ "ld1w { z12.s }, p2/Z, [x11, x9, LSL #2]\n"
"bge 2f\n"
"1:" // Channel loop
"movprfx z21, z15\n fmla z21.s, p3/M, z4.s, z9.s\n"
"movprfx z16, z15\n fmla z16.s, p3/M, z8.s, z9.s\n"
- "ldr x27, [x16, #0x20]\n"
- "incw x28\n"
+ "ldr x26, [x15, #0x20]\n"
+ "incw x27\n"
"movprfx z22, z15\n fmla z22.s, p3/M, z3.s, z9.s\n"
"movprfx z25, z15\n fmla z25.s, p3/M, z1.s, z9.s\n"
- "ldr x26, [x16, #0x30]\n"
+ "ldr x25, [x15, #0x30]\n"
"mov p1.b, p2.b\n"
"movprfx z26, z15\n fmla z26.s, p3/M, z0.s, z9.s\n"
- "ldr x25, [x16, #0x28]\n"
+ "ldr x24, [x15, #0x28]\n"
"movprfx z17, z15\n fmla z17.s, p3/M, z7.s, z9.s\n"
- "whilelt p0.s, x11, %x[n_channels]\n"
+ "whilelt p0.s, x10, %x[n_channels]\n"
"movprfx z18, z15\n fmla z18.s, p3/M, z6.s, z9.s\n"
"fmla z21.s, p3/M, z5.s, z12.s\n"
- "ldr x24, [x16, #0x38]\n"
+ "ldr x23, [x15, #0x38]\n"
"movprfx z20, z15\n fmla z20.s, p3/M, z5.s, z9.s\n"
"movprfx z24, z15\n fmla z24.s, p3/M, z2.s, z9.s\n"
- "ld1w { z9.s }, p2/Z, [x26, x10, LSL #2]\n"
- "ldr x15, [x16, #0x40]\n"
+ "ld1w { z9.s }, p2/Z, [x25, x9, LSL #2]\n"
+ "ldr x14, [x15, #0x40]\n"
"fmla z16.s, p3/M, z0.s, z10.s\n"
"movprfx z19, z15\n fmla z19.s, p3/M, z2.s, z11.s\n"
- "ld1w { z10.s }, p2/Z, [x27, x10, LSL #2]\n"
- "ldr x14, [x16, #0x48]\n"
+ "ld1w { z10.s }, p2/Z, [x26, x9, LSL #2]\n"
+ "ldr x13, [x15, #0x48]\n"
"fmla z22.s, p3/M, z4.s, z12.s\n"
"fmla z25.s, p3/M, z2.s, z12.s\n"
- "ld1w { z11.s }, p2/Z, [x25, x10, LSL #2]\n"
- "ldr x13, [x16, #0x50]\n"
+ "ld1w { z11.s }, p2/Z, [x24, x9, LSL #2]\n"
+ "ldr x12, [x15, #0x50]\n"
"fmla z26.s, p3/M, z1.s, z12.s\n"
"fmla z17.s, p3/M, z8.s, z12.s\n"
- "ldr x27, [x16, #0x60]\n"
+ "ldr x26, [x15, #0x60]\n"
"fmla z18.s, p3/M, z7.s, z12.s\n"
"movprfx z28, z15\n fmla z28.s, p3/M, z6.s, z10.s\n"
- "ld1w { z10.s }, p2/Z, [x14, x10, LSL #2]\n"
- "ldr x25, [x16, #0x68]\n"
+ "ld1w { z10.s }, p2/Z, [x13, x9, LSL #2]\n"
+ "ldr x24, [x15, #0x68]\n"
"fmla z21.s, p3/M, z7.s, z9.s\n"
"fmla z19.s, p3/M, z6.s, z12.s\n"
- "ldr x12, [x16, #0x58]\n"
+ "ldr x11, [x15, #0x58]\n"
"movprfx z23, z15\n fmla z23.s, p3/M, z3.s, z12.s\n"
"movprfx z27, z15\n fmla z27.s, p3/M, z0.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x24, x10, LSL #2]\n"
- "ldr x26, [x16, #0x70]\n"
+ "ld1w { z12.s }, p2/Z, [x23, x9, LSL #2]\n"
+ "ldr x25, [x15, #0x70]\n"
"movprfx z31, z15\n fmla z31.s, p3/M, z8.s, z11.s\n"
"fmla z22.s, p3/M, z6.s, z9.s\n"
- "ld1w { z11.s }, p2/Z, [x15, x10, LSL #2]\n"
- "ldr x24, [x16, #0x78]\n"
+ "ld1w { z11.s }, p2/Z, [x14, x9, LSL #2]\n"
+ "ldr x23, [x15, #0x78]\n"
"fmla z25.s, p3/M, z4.s, z9.s\n"
"fmla z26.s, p3/M, z3.s, z9.s\n"
- "ldr x15, [x16, #0x80]\n"
+ "ldr x14, [x15, #0x80]\n"
+ "movprfx z29, z15\n fmla z29.s, p3/M, z1.s, z9.s\n"
+ "movprfx z30, z15\n fmla z30.s, p3/M, z0.s, z9.s\n"
+ "ldr x13, [x15, #0x88]\n"
+ "ld1w { z15.s }, p3/Z, [x16]\n"
"fmla z20.s, p3/M, z8.s, z9.s\n"
"fmla z24.s, p3/M, z5.s, z9.s\n"
- "ldr x14, [x16, #0x88]\n"
+ "ldr x22, [x28, #0x0]\n"
+ "addvl x16, x16, #1\n"
"fmla z28.s, p3/M, z2.s, z9.s\n"
"fmla z16.s, p3/M, z1.s, z12.s\n"
- "ldr x23, [x9, #0x0]\n"
+ "ld1w { z9.s }, p2/Z, [x12, x9, LSL #2]\n"
+ "ldr x12, [x15, #0x90]\n"
"fmla z17.s, p3/M, z0.s, z12.s\n"
- "movprfx z29, z15\n fmla z29.s, p3/M, z1.s, z9.s\n"
- "ldr x22, [x9, #0x8]\n"
- "movprfx z30, z15\n fmla z30.s, p3/M, z0.s, z9.s\n"
"fmla z18.s, p3/M, z2.s, z11.s\n"
- "ld1w { z9.s }, p2/Z, [x13, x10, LSL #2]\n"
- "ldr x13, [x16, #0x90]\n"
+ "ld1w { z12.s }, p2/Z, [x11, x9, LSL #2]\n"
+ "ldr x11, [x15, #0x98]\n"
"fmla z21.s, p3/M, z8.s, z10.s\n"
"fmla z19.s, p3/M, z1.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x27, x10, LSL #2]\n"
- "ldr x27, [x16, #0xa0]\n"
+ "ld1w { z11.s }, p2/Z, [x26, x9, LSL #2]\n"
+ "ldr x26, [x15, #0xa0]\n"
"fmla z22.s, p3/M, z7.s, z10.s\n"
"fmla z23.s, p3/M, z6.s, z10.s\n"
- "ldr x21, [x9, #0x10]\n"
+ "ldr x21, [x28, #0x8]\n"
"fmla z25.s, p3/M, z5.s, z10.s\n"
"fmla z26.s, p3/M, z4.s, z10.s\n"
- "ldr x20, [x9, #0x18]\n"
+ "ldr x20, [x28, #0x10]\n"
"fmla z27.s, p3/M, z3.s, z10.s\n"
"fmla z29.s, p3/M, z2.s, z10.s\n"
+ "ldr x19, [x28, #0x18]\n"
"fmla z30.s, p3/M, z1.s, z10.s\n"
"fmla z31.s, p3/M, z0.s, z10.s\n"
- "ld1w { z10.s }, p2/Z, [x25, x10, LSL #2]\n"
- "ldr x25, [x16, #0xa8]\n"
+ "ld1w { z10.s }, p2/Z, [x24, x9, LSL #2]\n"
+ "ldr x24, [x15, #0xa8]\n"
"fmla z16.s, p3/M, z3.s, z9.s\n"
"fmla z20.s, p3/M, z0.s, z9.s\n"
- "ld1w { z12.s }, p2/Z, [x12, x10, LSL #2]\n"
- "ldr x12, [x16, #0x98]\n"
"fmla z24.s, p3/M, z6.s, z11.s\n"
"fmla z28.s, p3/M, z3.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x26, x10, LSL #2]\n"
- "ldr x26, [x16, #0xb0]\n"
+ "ld1w { z11.s }, p2/Z, [x25, x9, LSL #2]\n"
+ "ldr x25, [x15, #0xb0]\n"
"fmla z17.s, p3/M, z4.s, z10.s\n"
"fmla z18.s, p3/M, z3.s, z10.s\n"
"fmla z21.s, p3/M, z1.s, z10.s\n"
"fmla z19.s, p3/M, z5.s, z12.s\n"
"fmla z23.s, p3/M, z2.s, z12.s\n"
"fmla z22.s, p3/M, z0.s, z10.s\n"
- "ld1w { z12.s }, p2/Z, [x24, x10, LSL #2]\n"
- "ldr x24, [x16, #0xb8]\n"
+ "ld1w { z12.s }, p2/Z, [x23, x9, LSL #2]\n"
+ "ldr x23, [x15, #0xb8]\n"
"fmla z27.s, p3/M, z8.s, z11.s\n"
"fmla z31.s, p3/M, z5.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x15, x10, LSL #2]\n"
- "ldr x15, [x16, #0xc0]\n"
+ "ld1w { z11.s }, p2/Z, [x14, x9, LSL #2]\n"
+ "ldr x14, [x15, #0xc0]\n"
"fmla z16.s, p3/M, z5.s, z10.s\n"
"fmla z20.s, p3/M, z2.s, z10.s\n"
- "ld1w { z10.s }, p2/Z, [x14, x10, LSL #2]\n"
- "ldr x14, [x16, #0xc8]\n"
+ "ld1w { z10.s }, p2/Z, [x13, x9, LSL #2]\n"
+ "ldr x13, [x15, #0xc8]\n"
"fmla z17.s, p3/M, z5.s, z12.s\n"
"fmla z18.s, p3/M, z4.s, z12.s\n"
"fmla z21.s, p3/M, z2.s, z12.s\n"
"fmla z19.s, p3/M, z3.s, z12.s\n"
"fmla z22.s, p3/M, z1.s, z12.s\n"
"fmla z23.s, p3/M, z0.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x12, x10, LSL #2]\n"
- "ldr x12, [x16, #0xd8]\n"
+ "ld1w { z12.s }, p2/Z, [x11, x9, LSL #2]\n"
+ "ldr x11, [x15, #0xd8]\n"
"fmla z28.s, p3/M, z7.s, z11.s\n"
"fmla z29.s, p3/M, z6.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x13, x10, LSL #2]\n"
- "ldr x13, [x16, #0xd0]\n"
+ "ld1w { z11.s }, p2/Z, [x12, x9, LSL #2]\n"
+ "ldr x12, [x15, #0xd0]\n"
"fmla z16.s, p3/M, z7.s, z10.s\n"
"fmla z17.s, p3/M, z6.s, z10.s\n"
"fmla z20.s, p3/M, z4.s, z10.s\n"
"fmla z21.s, p3/M, z3.s, z10.s\n"
"fmla z24.s, p3/M, z1.s, z10.s\n"
"fmla z25.s, p3/M, z0.s, z10.s\n"
- "ld1w { z10.s }, p2/Z, [x27, x10, LSL #2]\n"
- "ldr x27, [x16, #0xe0]\n"
+ "ld1w { z10.s }, p2/Z, [x26, x9, LSL #2]\n"
+ "ldr x26, [x15, #0xe0]\n"
"fmla z18.s, p3/M, z8.s, z12.s\n"
"fmla z30.s, p3/M, z8.s, z11.s\n"
"fmla z31.s, p3/M, z7.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x25, x10, LSL #2]\n"
+ "ld1w { z11.s }, p2/Z, [x24, x9, LSL #2]\n"
"fmla z27.s, p3/M, z1.s, z12.s\n"
- "ldr x25, [x16, #0xe8]\n"
+ "ldr x24, [x15, #0xe8]\n"
"fmla z19.s, p3/M, z7.s, z12.s\n"
"fmla z22.s, p3/M, z5.s, z12.s\n"
"fmla z23.s, p3/M, z4.s, z12.s\n"
"fmla z26.s, p3/M, z2.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x26, x10, LSL #2]\n"
- "ldr x26, [x16, #0xf0]\n"
+ "ld1w { z12.s }, p2/Z, [x25, x9, LSL #2]\n"
+ "ldr x25, [x15, #0xf0]\n"
"fmla z16.s, p3/M, z2.s, z10.s\n"
"fmla z17.s, p3/M, z1.s, z10.s\n"
"fmla z18.s, p3/M, z0.s, z10.s\n"
"fmla z20.s, p3/M, z7.s, z11.s\n"
- "ld1w { z10.s }, p2/Z, [x24, x10, LSL #2]\n"
- "ldr x24, [x16, #0xf8]\n"
+ "ld1w { z10.s }, p2/Z, [x23, x9, LSL #2]\n"
+ "ldr x23, [x15, #0xf8]\n"
"fmla z21.s, p3/M, z6.s, z11.s\n"
"fmla z24.s, p3/M, z4.s, z11.s\n"
"fmla z25.s, p3/M, z3.s, z11.s\n"
"fmla z28.s, p3/M, z1.s, z11.s\n"
"fmla z29.s, p3/M, z0.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x15, x10, LSL #2]\n"
+ "ld1w { z11.s }, p2/Z, [x14, x9, LSL #2]\n"
"fmla z27.s, p3/M, z4.s, z11.s\n"
- "ldr x15, [x16, #0x100]\n"
+ "ldr x14, [x15, #0x100]\n"
"fmla z30.s, p3/M, z2.s, z11.s\n"
"fmla z17.s, p3/M, z2.s, z12.s\n"
"fmla z18.s, p3/M, z1.s, z12.s\n"
"fmla z19.s, p3/M, z0.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x14, x10, LSL #2]\n"
- "ldr x14, [x16, #0x108]\n"
+ "ld1w { z12.s }, p2/Z, [x13, x9, LSL #2]\n"
+ "ldr x13, [x15, #0x108]\n"
"fmla z16.s, p3/M, z6.s, z10.s\n"
"fmla z20.s, p3/M, z3.s, z10.s\n"
"fmla z24.s, p3/M, z0.s, z10.s\n"
"fmla z22.s, p3/M, z8.s, z11.s\n"
- "ld1w { z10.s }, p2/Z, [x13, x10, LSL #2]\n"
- "ldr x13, [x16, #0x110]\n"
+ "ld1w { z10.s }, p2/Z, [x12, x9, LSL #2]\n"
+ "ldr x12, [x15, #0x110]\n"
"fmla z23.s, p3/M, z7.s, z11.s\n"
"fmla z26.s, p3/M, z5.s, z11.s\n"
"fmla z31.s, p3/M, z1.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x12, x10, LSL #2]\n"
+ "ld1w { z11.s }, p2/Z, [x11, x9, LSL #2]\n"
"fmla z27.s, p3/M, z2.s, z12.s\n"
- "ldr x12, [x16, #0x118]\n"
+ "ldr x11, [x15, #0x118]\n"
"fmla z28.s, p3/M, z0.s, z10.s\n"
"fmla z29.s, p3/M, z4.s, z11.s\n"
"fmla z30.s, p3/M, z3.s, z11.s\n"
"fmla z19.s, p3/M, z8.s, z12.s\n"
"fmla z23.s, p3/M, z5.s, z12.s\n"
"fmla z20.s, p3/M, z6.s, z10.s\n"
- "ld1w { z12.s }, p2/Z, [x27, x10, LSL #2]\n"
+ "ld1w { z12.s }, p2/Z, [x26, x9, LSL #2]\n"
"fmla z24.s, p3/M, z3.s, z10.s\n"
- "ld1w { z10.s }, p2/Z, [x25, x10, LSL #2]\n"
+ "ld1w { z10.s }, p2/Z, [x24, x9, LSL #2]\n"
"fmla z25.s, p3/M, z7.s, z11.s\n"
"fmla z26.s, p3/M, z6.s, z11.s\n"
"fmla z28.s, p3/M, z5.s, z11.s\n"
@@ -309,264 +311,262 @@ void sme2_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_indirect_impl(
"fmla z29.s, p3/M, z7.s, z10.s\n"
"fmla z30.s, p3/M, z6.s, z10.s\n"
"fmla z24.s, p3/M, z8.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x26, x10, LSL #2]\n"
+ "ld1w { z11.s }, p2/Z, [x25, x9, LSL #2]\n"
"fmla z28.s, p3/M, z8.s, z10.s\n"
"fmla z25.s, p3/M, z8.s, z11.s\n"
"fmla z26.s, p3/M, z7.s, z11.s\n"
- "ld1w { z10.s }, p2/Z, [x15, x10, LSL #2]\n"
+ "ld1w { z10.s }, p2/Z, [x14, x9, LSL #2]\n"
"fmla z27.s, p3/M, z6.s, z11.s\n"
"fmla z29.s, p3/M, z5.s, z11.s\n"
"fmla z30.s, p3/M, z4.s, z11.s\n"
"fmla z31.s, p3/M, z3.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x14, x10, LSL #2]\n"
- "ldp x15, x14, [x16, #0x0]\n"
+ "ld1w { z11.s }, p2/Z, [x13, x9, LSL #2]\n"
+ "ldp x14, x13, [x15, #0x0]\n"
"fmla z23.s, p3/M, z8.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x24, x10, LSL #2]\n"
+ "ld1w { z12.s }, p2/Z, [x23, x9, LSL #2]\n"
"fmla z16.s, p3/M, z4.s, z10.s\n"
"fmla z17.s, p3/M, z3.s, z10.s\n"
"fmla z18.s, p3/M, z5.s, z11.s\n"
+ "ld1w { z9.s }, p0/Z, [x14, x10, LSL #2]\n"
"fmla z19.s, p3/M, z4.s, z11.s\n"
"fmla z29.s, p3/M, z8.s, z12.s\n"
"fmla z30.s, p3/M, z7.s, z12.s\n"
"fmla z31.s, p3/M, z6.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x13, x10, LSL #2]\n"
+ "ld1w { z12.s }, p2/Z, [x12, x9, LSL #2]\n"
"fmla z20.s, p3/M, z1.s, z10.s\n"
"fmla z21.s, p3/M, z0.s, z10.s\n"
- "ld1w { z10.s }, p2/Z, [x12, x10, LSL #2]\n"
- "ldp x13, x12, [x16, #0x10]\n"
+ "ld1w { z10.s }, p2/Z, [x11, x9, LSL #2]\n"
+ "ldp x12, x11, [x15, #0x10]\n"
"fmla z22.s, p3/M, z2.s, z11.s\n"
"fmla z23.s, p3/M, z1.s, z11.s\n"
- "ld1w { z15.s }, p3/Z, [x17]\n"
- "addvl x17, x17, #1\n"
+ "incw x9\n"
+ "ld1w { z11.s }, p0/Z, [x12, x10, LSL #2]\n"
".inst 0xc1adc9d0 // fclamp { z16.s-z19.s }, z14.s, z13.s\n"
- "st1w { z16.s }, p1, [x23, x28, LSL #2]\n"
- "ldr x23, [x9, #0x20]\n"
+ "st1w { z16.s }, p1, [x22, x27, LSL #2]\n"
+ "ldr x22, [x28, #0x20]\n"
"fmla z24.s, p3/M, z7.s, z12.s\n"
- "st1w { z17.s }, p1, [x22, x28, LSL #2]\n"
- "ldr x22, [x9, #0x28]\n"
+ "st1w { z17.s }, p1, [x21, x27, LSL #2]\n"
+ "ldr x21, [x28, #0x28]\n"
"fmla z25.s, p3/M, z6.s, z12.s\n"
"fmla z26.s, p3/M, z8.s, z10.s\n"
- "st1w { z18.s }, p1, [x21, x28, LSL #2]\n"
- "ldr x21, [x9, #0x30]\n"
+ "st1w { z18.s }, p1, [x20, x27, LSL #2]\n"
+ "ldr x20, [x28, #0x30]\n"
"fmla z27.s, p3/M, z7.s, z10.s\n"
".inst 0xc1adc9d4 // fclamp { z20.s-z23.s }, z14.s, z13.s\n"
- "st1w { z19.s }, p1, [x20, x28, LSL #2]\n"
- "ldr x20, [x9, #0x38]\n"
+ "st1w { z19.s }, p1, [x19, x27, LSL #2]\n"
+ "ldr x19, [x28, #0x38]\n"
"fmla z28.s, p3/M, z4.s, z12.s\n"
"fmla z29.s, p3/M, z3.s, z12.s\n"
- "st1w { z20.s }, p1, [x23, x28, LSL #2]\n"
- "ldr x23, [x9, #0x40]\n"
+ "st1w { z20.s }, p1, [x22, x27, LSL #2]\n"
+ "ldr x22, [x28, #0x40]\n"
"fmla z30.s, p3/M, z5.s, z10.s\n"
"fmla z31.s, p3/M, z4.s, z10.s\n"
- "st1w { z21.s }, p1, [x22, x28, LSL #2]\n"
- "ldr x22, [x9, #0x48]\n"
+ "st1w { z21.s }, p1, [x21, x27, LSL #2]\n"
+ "ldr x21, [x28, #0x48]\n"
".inst 0xc1adc9d8 // fclamp { z24.s-z27.s }, z14.s, z13.s\n"
+ "ld1w { z10.s }, p0/Z, [x13, x10, LSL #2]\n"
+ "st1w { z22.s }, p1, [x20, x27, LSL #2]\n"
+ "ldr x20, [x28, #0x50]\n"
+ "ld1w { z12.s }, p0/Z, [x11, x10, LSL #2]\n"
"incw x10\n"
- "st1w { z22.s }, p1, [x21, x28, LSL #2]\n"
- "ldr x21, [x9, #0x50]\n"
- "ld1w { z9.s }, p0/Z, [x15, x11, LSL #2]\n"
- "whilelt p2.s, x10, %x[n_channels]\n"
- "st1w { z23.s }, p1, [x20, x28, LSL #2]\n"
- "ldr x20, [x9, #0x58]\n"
- "ld1w { z10.s }, p0/Z, [x14, x11, LSL #2]\n"
+ "st1w { z23.s }, p1, [x19, x27, LSL #2]\n"
+ "ldr x19, [x28, #0x58]\n"
+ ".inst 0xa040c200 // ld1w { z0.s-z3.s }, pn8.b/Z, [x16]\n"
+ "addvl x16, x16, #4\n"
+ "st1w { z24.s }, p1, [x22, x27, LSL #2]\n"
+ "ldr x22, [x28, #0x60]\n"
+ "whilelt p2.s, x9, %x[n_channels]\n"
+ ".inst 0xa040c204 // ld1w { z4.s-z7.s }, pn8.b/Z, [x16]\n"
+ "st1w { z25.s }, p1, [x21, x27, LSL #2]\n"
+ "ldr x21, [x28, #0x68]\n"
+ "addvl x16, x16, #4\n"
+ "cmp x10, %x[n_channels]\n"
+ "st1w { z26.s }, p1, [x20, x27, LSL #2]\n"
+ "ldr x20, [x28, #0x70]\n"
".inst 0xc1adc9dc // fclamp { z28.s-z31.s }, z14.s, z13.s\n"
- "st1w { z24.s }, p1, [x23, x28, LSL #2]\n"
- "ldr x23, [x9, #0x60]\n"
- "ld1w { z11.s }, p0/Z, [x13, x11, LSL #2]\n"
- "st1w { z25.s }, p1, [x22, x28, LSL #2]\n"
- "ldr x22, [x9, #0x68]\n"
- "ld1w { z12.s }, p0/Z, [x12, x11, LSL #2]\n"
- "incw x11\n"
- "st1w { z26.s }, p1, [x21, x28, LSL #2]\n"
- "ldr x21, [x9, #0x70]\n"
- ".inst 0xa040c220 // ld1w { z0.s-z3.s }, pn8.b/Z, [x17]\n"
- "addvl x17, x17, #4\n"
- "st1w { z27.s }, p1, [x20, x28, LSL #2]\n"
- "ldr x20, [x9, #0x78]\n"
- ".inst 0xa040c224 // ld1w { z4.s-z7.s }, pn8.b/Z, [x17]\n"
- "addvl x17, x17, #4\n"
- "cmp x11, %x[n_channels]\n"
- "st1w { z28.s }, p1, [x23, x28, LSL #2]\n"
- "ld1w { z8.s }, p3/Z, [x17]\n"
- "addvl x17, x17, #1\n"
- "st1w { z29.s }, p1, [x22, x28, LSL #2]\n"
- "st1w { z30.s }, p1, [x21, x28, LSL #2]\n"
- "st1w { z31.s }, p1, [x20, x28, LSL #2]\n"
+ "ld1w { z8.s }, p3/Z, [x16]\n"
+ "st1w { z27.s }, p1, [x19, x27, LSL #2]\n"
+ "ldr x19, [x28, #0x78]\n"
+ "addvl x16, x16, #1\n"
+ "st1w { z28.s }, p1, [x22, x27, LSL #2]\n"
+ "st1w { z29.s }, p1, [x21, x27, LSL #2]\n"
+ "st1w { z30.s }, p1, [x20, x27, LSL #2]\n"
+ "st1w { z31.s }, p1, [x19, x27, LSL #2]\n"
"blt 1b\n"
"2:" // Channel tail
"movprfx z21, z15\n fmla z21.s, p3/M, z4.s, z9.s\n"
"movprfx z16, z15\n fmla z16.s, p3/M, z8.s, z9.s\n"
- "ldr x27, [x16, #0x20]\n"
- "incw x28\n"
+ "ldr x26, [x15, #0x20]\n"
+ "incw x27\n"
"movprfx z22, z15\n fmla z22.s, p3/M, z3.s, z9.s\n"
"movprfx z25, z15\n fmla z25.s, p3/M, z1.s, z9.s\n"
- "ldr x26, [x16, #0x30]\n"
+ "ldr x25, [x15, #0x30]\n"
"mov p1.b, p2.b\n"
"movprfx z26, z15\n fmla z26.s, p3/M, z0.s, z9.s\n"
- "ldr x25, [x16, #0x28]\n"
+ "ldr x24, [x15, #0x28]\n"
"movprfx z17, z15\n fmla z17.s, p3/M, z7.s, z9.s\n"
"movprfx z18, z15\n fmla z18.s, p3/M, z6.s, z9.s\n"
"fmla z21.s, p3/M, z5.s, z12.s\n"
- "ldr x24, [x16, #0x38]\n"
+ "ldr x23, [x15, #0x38]\n"
"movprfx z20, z15\n fmla z20.s, p3/M, z5.s, z9.s\n"
"movprfx z24, z15\n fmla z24.s, p3/M, z2.s, z9.s\n"
- "ld1w { z9.s }, p2/Z, [x26, x10, LSL #2]\n"
- "ldr x15, [x16, #0x40]\n"
+ "ld1w { z9.s }, p2/Z, [x25, x9, LSL #2]\n"
+ "ldr x14, [x15, #0x40]\n"
"fmla z16.s, p3/M, z0.s, z10.s\n"
"movprfx z19, z15\n fmla z19.s, p3/M, z2.s, z11.s\n"
- "ld1w { z10.s }, p2/Z, [x27, x10, LSL #2]\n"
- "ldr x14, [x16, #0x48]\n"
+ "ld1w { z10.s }, p2/Z, [x26, x9, LSL #2]\n"
+ "ldr x13, [x15, #0x48]\n"
"fmla z22.s, p3/M, z4.s, z12.s\n"
"fmla z25.s, p3/M, z2.s, z12.s\n"
- "ld1w { z11.s }, p2/Z, [x25, x10, LSL #2]\n"
- "ldr x13, [x16, #0x50]\n"
+ "ld1w { z11.s }, p2/Z, [x24, x9, LSL #2]\n"
+ "ldr x12, [x15, #0x50]\n"
"fmla z26.s, p3/M, z1.s, z12.s\n"
"fmla z17.s, p3/M, z8.s, z12.s\n"
- "ldr x27, [x16, #0x60]\n"
+ "ldr x26, [x15, #0x60]\n"
"fmla z18.s, p3/M, z7.s, z12.s\n"
"movprfx z28, z15\n fmla z28.s, p3/M, z6.s, z10.s\n"
- "ld1w { z10.s }, p2/Z, [x14, x10, LSL #2]\n"
- "ldr x25, [x16, #0x68]\n"
+ "ld1w { z10.s }, p2/Z, [x13, x9, LSL #2]\n"
+ "ldr x24, [x15, #0x68]\n"
"fmla z21.s, p3/M, z7.s, z9.s\n"
"fmla z19.s, p3/M, z6.s, z12.s\n"
- "ldr x12, [x16, #0x58]\n"
+ "ldr x11, [x15, #0x58]\n"
"movprfx z23, z15\n fmla z23.s, p3/M, z3.s, z12.s\n"
"movprfx z27, z15\n fmla z27.s, p3/M, z0.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x24, x10, LSL #2]\n"
- "ldr x26, [x16, #0x70]\n"
+ "ld1w { z12.s }, p2/Z, [x23, x9, LSL #2]\n"
+ "ldr x25, [x15, #0x70]\n"
"movprfx z31, z15\n fmla z31.s, p3/M, z8.s, z11.s\n"
"fmla z22.s, p3/M, z6.s, z9.s\n"
- "ld1w { z11.s }, p2/Z, [x15, x10, LSL #2]\n"
- "ldr x24, [x16, #0x78]\n"
+ "ld1w { z11.s }, p2/Z, [x14, x9, LSL #2]\n"
+ "ldr x23, [x15, #0x78]\n"
"fmla z25.s, p3/M, z4.s, z9.s\n"
"fmla z26.s, p3/M, z3.s, z9.s\n"
- "ldr x15, [x16, #0x80]\n"
+ "ldr x14, [x15, #0x80]\n"
+ "movprfx z29, z15\n fmla z29.s, p3/M, z1.s, z9.s\n"
+ "movprfx z30, z15\n fmla z30.s, p3/M, z0.s, z9.s\n"
+ "ldr x13, [x15, #0x88]\n"
"fmla z20.s, p3/M, z8.s, z9.s\n"
"fmla z24.s, p3/M, z5.s, z9.s\n"
- "ldr x14, [x16, #0x88]\n"
+ "ldr x22, [x28, #0x0]\n"
"fmla z28.s, p3/M, z2.s, z9.s\n"
"fmla z16.s, p3/M, z1.s, z12.s\n"
- "ldr x23, [x9, #0x0]\n"
+ "ld1w { z9.s }, p2/Z, [x12, x9, LSL #2]\n"
+ "ldr x12, [x15, #0x90]\n"
"fmla z17.s, p3/M, z0.s, z12.s\n"
- "movprfx z29, z15\n fmla z29.s, p3/M, z1.s, z9.s\n"
- "ldr x22, [x9, #0x8]\n"
- "movprfx z30, z15\n fmla z30.s, p3/M, z0.s, z9.s\n"
"fmla z18.s, p3/M, z2.s, z11.s\n"
- "ld1w { z9.s }, p2/Z, [x13, x10, LSL #2]\n"
- "ldr x13, [x16, #0x90]\n"
+ "ld1w { z12.s }, p2/Z, [x11, x9, LSL #2]\n"
+ "ldr x11, [x15, #0x98]\n"
"fmla z21.s, p3/M, z8.s, z10.s\n"
"fmla z19.s, p3/M, z1.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x27, x10, LSL #2]\n"
- "ldr x27, [x16, #0xa0]\n"
+ "ld1w { z11.s }, p2/Z, [x26, x9, LSL #2]\n"
+ "ldr x26, [x15, #0xa0]\n"
"fmla z22.s, p3/M, z7.s, z10.s\n"
"fmla z23.s, p3/M, z6.s, z10.s\n"
- "ldr x21, [x9, #0x10]\n"
+ "ldr x21, [x28, #0x8]\n"
"fmla z25.s, p3/M, z5.s, z10.s\n"
"fmla z26.s, p3/M, z4.s, z10.s\n"
- "ldr x20, [x9, #0x18]\n"
+ "ldr x20, [x28, #0x10]\n"
"fmla z27.s, p3/M, z3.s, z10.s\n"
"fmla z29.s, p3/M, z2.s, z10.s\n"
+ "ldr x19, [x28, #0x18]\n"
"fmla z30.s, p3/M, z1.s, z10.s\n"
"fmla z31.s, p3/M, z0.s, z10.s\n"
- "ld1w { z10.s }, p2/Z, [x25, x10, LSL #2]\n"
- "ldr x25, [x16, #0xa8]\n"
+ "ld1w { z10.s }, p2/Z, [x24, x9, LSL #2]\n"
+ "ldr x24, [x15, #0xa8]\n"
"fmla z16.s, p3/M, z3.s, z9.s\n"
"fmla z20.s, p3/M, z0.s, z9.s\n"
- "ld1w { z12.s }, p2/Z, [x12, x10, LSL #2]\n"
- "ldr x12, [x16, #0x98]\n"
"fmla z24.s, p3/M, z6.s, z11.s\n"
"fmla z28.s, p3/M, z3.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x26, x10, LSL #2]\n"
- "ldr x26, [x16, #0xb0]\n"
+ "ld1w { z11.s }, p2/Z, [x25, x9, LSL #2]\n"
+ "ldr x25, [x15, #0xb0]\n"
"fmla z17.s, p3/M, z4.s, z10.s\n"
"fmla z18.s, p3/M, z3.s, z10.s\n"
"fmla z21.s, p3/M, z1.s, z10.s\n"
"fmla z19.s, p3/M, z5.s, z12.s\n"
"fmla z23.s, p3/M, z2.s, z12.s\n"
"fmla z22.s, p3/M, z0.s, z10.s\n"
- "ld1w { z12.s }, p2/Z, [x24, x10, LSL #2]\n"
- "ldr x24, [x16, #0xb8]\n"
+ "ld1w { z12.s }, p2/Z, [x23, x9, LSL #2]\n"
+ "ldr x23, [x15, #0xb8]\n"
"fmla z27.s, p3/M, z8.s, z11.s\n"
"fmla z31.s, p3/M, z5.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x15, x10, LSL #2]\n"
- "ldr x15, [x16, #0xc0]\n"
+ "ld1w { z11.s }, p2/Z, [x14, x9, LSL #2]\n"
+ "ldr x14, [x15, #0xc0]\n"
"fmla z16.s, p3/M, z5.s, z10.s\n"
"fmla z20.s, p3/M, z2.s, z10.s\n"
- "ld1w { z10.s }, p2/Z, [x14, x10, LSL #2]\n"
- "ldr x14, [x16, #0xc8]\n"
+ "ld1w { z10.s }, p2/Z, [x13, x9, LSL #2]\n"
+ "ldr x13, [x15, #0xc8]\n"
"fmla z17.s, p3/M, z5.s, z12.s\n"
"fmla z18.s, p3/M, z4.s, z12.s\n"
"fmla z21.s, p3/M, z2.s, z12.s\n"
"fmla z19.s, p3/M, z3.s, z12.s\n"
"fmla z22.s, p3/M, z1.s, z12.s\n"
"fmla z23.s, p3/M, z0.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x12, x10, LSL #2]\n"
- "ldr x12, [x16, #0xd8]\n"
+ "ld1w { z12.s }, p2/Z, [x11, x9, LSL #2]\n"
+ "ldr x11, [x15, #0xd8]\n"
"fmla z28.s, p3/M, z7.s, z11.s\n"
"fmla z29.s, p3/M, z6.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x13, x10, LSL #2]\n"
- "ldr x13, [x16, #0xd0]\n"
+ "ld1w { z11.s }, p2/Z, [x12, x9, LSL #2]\n"
+ "ldr x12, [x15, #0xd0]\n"
"fmla z16.s, p3/M, z7.s, z10.s\n"
"fmla z17.s, p3/M, z6.s, z10.s\n"
"fmla z20.s, p3/M, z4.s, z10.s\n"
"fmla z21.s, p3/M, z3.s, z10.s\n"
"fmla z24.s, p3/M, z1.s, z10.s\n"
"fmla z25.s, p3/M, z0.s, z10.s\n"
- "ld1w { z10.s }, p2/Z, [x27, x10, LSL #2]\n"
- "ldr x27, [x16, #0xe0]\n"
+ "ld1w { z10.s }, p2/Z, [x26, x9, LSL #2]\n"
+ "ldr x26, [x15, #0xe0]\n"
"fmla z18.s, p3/M, z8.s, z12.s\n"
"fmla z30.s, p3/M, z8.s, z11.s\n"
"fmla z31.s, p3/M, z7.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x25, x10, LSL #2]\n"
+ "ld1w { z11.s }, p2/Z, [x24, x9, LSL #2]\n"
"fmla z27.s, p3/M, z1.s, z12.s\n"
- "ldr x25, [x16, #0xe8]\n"
+ "ldr x24, [x15, #0xe8]\n"
"fmla z19.s, p3/M, z7.s, z12.s\n"
"fmla z22.s, p3/M, z5.s, z12.s\n"
"fmla z23.s, p3/M, z4.s, z12.s\n"
"fmla z26.s, p3/M, z2.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x26, x10, LSL #2]\n"
- "ldr x26, [x16, #0xf0]\n"
+ "ld1w { z12.s }, p2/Z, [x25, x9, LSL #2]\n"
+ "ldr x25, [x15, #0xf0]\n"
"fmla z16.s, p3/M, z2.s, z10.s\n"
"fmla z17.s, p3/M, z1.s, z10.s\n"
"fmla z18.s, p3/M, z0.s, z10.s\n"
"fmla z20.s, p3/M, z7.s, z11.s\n"
- "ld1w { z10.s }, p2/Z, [x24, x10, LSL #2]\n"
- "ldr x24, [x16, #0xf8]\n"
+ "ld1w { z10.s }, p2/Z, [x23, x9, LSL #2]\n"
+ "ldr x23, [x15, #0xf8]\n"
"fmla z21.s, p3/M, z6.s, z11.s\n"
"fmla z24.s, p3/M, z4.s, z11.s\n"
"fmla z25.s, p3/M, z3.s, z11.s\n"
"fmla z28.s, p3/M, z1.s, z11.s\n"
"fmla z29.s, p3/M, z0.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x15, x10, LSL #2]\n"
+ "ld1w { z11.s }, p2/Z, [x14, x9, LSL #2]\n"
"fmla z27.s, p3/M, z4.s, z11.s\n"
- "ldr x15, [x16, #0x100]\n"
+ "ldr x14, [x15, #0x100]\n"
"fmla z30.s, p3/M, z2.s, z11.s\n"
"fmla z17.s, p3/M, z2.s, z12.s\n"
"fmla z18.s, p3/M, z1.s, z12.s\n"
"fmla z19.s, p3/M, z0.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x14, x10, LSL #2]\n"
- "ldr x14, [x16, #0x108]\n"
+ "ld1w { z12.s }, p2/Z, [x13, x9, LSL #2]\n"
+ "ldr x13, [x15, #0x108]\n"
"fmla z16.s, p3/M, z6.s, z10.s\n"
"fmla z20.s, p3/M, z3.s, z10.s\n"
"fmla z24.s, p3/M, z0.s, z10.s\n"
"fmla z22.s, p3/M, z8.s, z11.s\n"
- "ld1w { z10.s }, p2/Z, [x13, x10, LSL #2]\n"
- "ldr x13, [x16, #0x110]\n"
+ "ld1w { z10.s }, p2/Z, [x12, x9, LSL #2]\n"
+ "ldr x12, [x15, #0x110]\n"
"fmla z23.s, p3/M, z7.s, z11.s\n"
"fmla z26.s, p3/M, z5.s, z11.s\n"
"fmla z31.s, p3/M, z1.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x12, x10, LSL #2]\n"
+ "ld1w { z11.s }, p2/Z, [x11, x9, LSL #2]\n"
"fmla z27.s, p3/M, z2.s, z12.s\n"
- "ldr x12, [x16, #0x118]\n"
+ "ldr x11, [x15, #0x118]\n"
"fmla z28.s, p3/M, z0.s, z10.s\n"
"fmla z29.s, p3/M, z4.s, z11.s\n"
"fmla z30.s, p3/M, z3.s, z11.s\n"
"fmla z19.s, p3/M, z8.s, z12.s\n"
"fmla z23.s, p3/M, z5.s, z12.s\n"
"fmla z20.s, p3/M, z6.s, z10.s\n"
- "ld1w { z12.s }, p2/Z, [x27, x10, LSL #2]\n"
+ "ld1w { z12.s }, p2/Z, [x26, x9, LSL #2]\n"
"fmla z24.s, p3/M, z3.s, z10.s\n"
- "ld1w { z10.s }, p2/Z, [x25, x10, LSL #2]\n"
+ "ld1w { z10.s }, p2/Z, [x24, x9, LSL #2]\n"
"fmla z25.s, p3/M, z7.s, z11.s\n"
"fmla z26.s, p3/M, z6.s, z11.s\n"
"fmla z28.s, p3/M, z5.s, z11.s\n"
@@ -575,18 +575,18 @@ void sme2_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_indirect_impl(
"fmla z29.s, p3/M, z7.s, z10.s\n"
"fmla z30.s, p3/M, z6.s, z10.s\n"
"fmla z24.s, p3/M, z8.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x26, x10, LSL #2]\n"
+ "ld1w { z11.s }, p2/Z, [x25, x9, LSL #2]\n"
"fmla z28.s, p3/M, z8.s, z10.s\n"
"fmla z25.s, p3/M, z8.s, z11.s\n"
"fmla z26.s, p3/M, z7.s, z11.s\n"
- "ld1w { z10.s }, p2/Z, [x15, x10, LSL #2]\n"
+ "ld1w { z10.s }, p2/Z, [x14, x9, LSL #2]\n"
"fmla z27.s, p3/M, z6.s, z11.s\n"
"fmla z29.s, p3/M, z5.s, z11.s\n"
"fmla z30.s, p3/M, z4.s, z11.s\n"
"fmla z31.s, p3/M, z3.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x14, x10, LSL #2]\n"
+ "ld1w { z11.s }, p2/Z, [x13, x9, LSL #2]\n"
"fmla z23.s, p3/M, z8.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x24, x10, LSL #2]\n"
+ "ld1w { z12.s }, p2/Z, [x23, x9, LSL #2]\n"
"fmla z16.s, p3/M, z4.s, z10.s\n"
"fmla z17.s, p3/M, z3.s, z10.s\n"
"fmla z18.s, p3/M, z5.s, z11.s\n"
@@ -594,56 +594,56 @@ void sme2_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_indirect_impl(
"fmla z29.s, p3/M, z8.s, z12.s\n"
"fmla z30.s, p3/M, z7.s, z12.s\n"
"fmla z31.s, p3/M, z6.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x13, x10, LSL #2]\n"
+ "ld1w { z12.s }, p2/Z, [x12, x9, LSL #2]\n"
"fmla z20.s, p3/M, z1.s, z10.s\n"
"fmla z21.s, p3/M, z0.s, z10.s\n"
- "ld1w { z10.s }, p2/Z, [x12, x10, LSL #2]\n"
+ "ld1w { z10.s }, p2/Z, [x11, x9, LSL #2]\n"
"fmla z22.s, p3/M, z2.s, z11.s\n"
"fmla z23.s, p3/M, z1.s, z11.s\n"
".inst 0xc1adc9d0 // fclamp { z16.s-z19.s }, z14.s, z13.s\n"
- "st1w { z16.s }, p1, [x23, x28, LSL #2]\n"
- "ldr x23, [x9, #0x20]\n"
+ "st1w { z16.s }, p1, [x22, x27, LSL #2]\n"
+ "ldr x22, [x28, #0x20]\n"
"fmla z24.s, p3/M, z7.s, z12.s\n"
- "st1w { z17.s }, p1, [x22, x28, LSL #2]\n"
- "ldr x22, [x9, #0x28]\n"
+ "st1w { z17.s }, p1, [x21, x27, LSL #2]\n"
+ "ldr x21, [x28, #0x28]\n"
"fmla z25.s, p3/M, z6.s, z12.s\n"
"fmla z26.s, p3/M, z8.s, z10.s\n"
- "st1w { z18.s }, p1, [x21, x28, LSL #2]\n"
- "ldr x21, [x9, #0x30]\n"
+ "st1w { z18.s }, p1, [x20, x27, LSL #2]\n"
+ "ldr x20, [x28, #0x30]\n"
"fmla z27.s, p3/M, z7.s, z10.s\n"
".inst 0xc1adc9d4 // fclamp { z20.s-z23.s }, z14.s, z13.s\n"
- "st1w { z19.s }, p1, [x20, x28, LSL #2]\n"
- "ldr x20, [x9, #0x38]\n"
+ "st1w { z19.s }, p1, [x19, x27, LSL #2]\n"
+ "ldr x19, [x28, #0x38]\n"
"fmla z28.s, p3/M, z4.s, z12.s\n"
"fmla z29.s, p3/M, z3.s, z12.s\n"
- "st1w { z20.s }, p1, [x23, x28, LSL #2]\n"
- "ldr x23, [x9, #0x40]\n"
+ "st1w { z20.s }, p1, [x22, x27, LSL #2]\n"
+ "ldr x22, [x28, #0x40]\n"
"fmla z30.s, p3/M, z5.s, z10.s\n"
"fmla z31.s, p3/M, z4.s, z10.s\n"
- "st1w { z21.s }, p1, [x22, x28, LSL #2]\n"
- "ldr x22, [x9, #0x48]\n"
+ "st1w { z21.s }, p1, [x21, x27, LSL #2]\n"
+ "ldr x21, [x28, #0x48]\n"
".inst 0xc1adc9d8 // fclamp { z24.s-z27.s }, z14.s, z13.s\n"
".inst 0xc1adc9dc // fclamp { z28.s-z31.s }, z14.s, z13.s\n"
- "st1w { z22.s }, p1, [x21, x28, LSL #2]\n"
- "ldr x21, [x9, #0x50]\n"
- "st1w { z23.s }, p1, [x20, x28, LSL #2]\n"
- "ldr x20, [x9, #0x58]\n"
- "st1w { z24.s }, p1, [x23, x28, LSL #2]\n"
- "ldr x23, [x9, #0x60]\n"
- "st1w { z25.s }, p1, [x22, x28, LSL #2]\n"
- "ldr x22, [x9, #0x68]\n"
- "st1w { z26.s }, p1, [x21, x28, LSL #2]\n"
- "ldr x21, [x9, #0x70]\n"
- "st1w { z27.s }, p1, [x20, x28, LSL #2]\n"
- "ldr x20, [x9, #0x78]\n"
- "st1w { z28.s }, p1, [x23, x28, LSL #2]\n"
- "st1w { z29.s }, p1, [x22, x28, LSL #2]\n"
- "st1w { z30.s }, p1, [x21, x28, LSL #2]\n"
- "st1w { z31.s }, p1, [x20, x28, LSL #2]\n"
+ "st1w { z22.s }, p1, [x20, x27, LSL #2]\n"
+ "ldr x20, [x28, #0x50]\n"
+ "st1w { z23.s }, p1, [x19, x27, LSL #2]\n"
+ "ldr x19, [x28, #0x58]\n"
+ "st1w { z24.s }, p1, [x22, x27, LSL #2]\n"
+ "ldr x22, [x28, #0x60]\n"
+ "st1w { z25.s }, p1, [x21, x27, LSL #2]\n"
+ "ldr x21, [x28, #0x68]\n"
+ "st1w { z26.s }, p1, [x20, x27, LSL #2]\n"
+ "ldr x20, [x28, #0x70]\n"
+ "st1w { z27.s }, p1, [x19, x27, LSL #2]\n"
+ "ldr x19, [x28, #0x78]\n"
+ "st1w { z28.s }, p1, [x22, x27, LSL #2]\n"
+ "st1w { z29.s }, p1, [x21, x27, LSL #2]\n"
+ "st1w { z30.s }, p1, [x20, x27, LSL #2]\n"
+ "st1w { z31.s }, p1, [x19, x27, LSL #2]\n"
".inst 0xd503467f // SMSTOP\n"
:
: [n_channels] "r" ((unsigned long) n_channels), [offsetof_Args_inptrs] "I" (offsetof(Args, inptrs)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_args_params] "I" (offsetof(Args, params)), [params_struct] "r" (&params_struct)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_direct.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_direct.cpp
index f7f67855c1..449df1e29a 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_direct.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_direct.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -94,102 +94,102 @@ void sme2_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst_direct_impl(
"mov x3, #0x0\n"
"1:" // Tile loop
"str x2, [%x[params_struct], %[offsetof_args_tile_i]]\n"
- "mov x22, #0x4\n"
+ "mov x21, #0x4\n"
"str x3, [%x[params_struct], %[offsetof_args_tile_j]]\n"
- "ldr x21, [%x[params_struct], %[offsetof_args_ld_input_row]]\n"
- "mul x20, x2, x21\n" // offset = tile_i * ld_input_row
+ "ldr x20, [%x[params_struct], %[offsetof_args_ld_input_row]]\n"
+ "mul x19, x2, x20\n" // offset = tile_i * ld_input_row
"ldr x4, [%x[params_struct], %[offsetof_args_ld_input_col]]\n"
- "madd x20, x3, x4, x20\n" // offset += tile_j * ld_input_col
- "mul x20, x20, x22\n" // offset *= kernel_stride * output_size
+ "madd x19, x3, x4, x19\n" // offset += tile_j * ld_input_col
+ "mul x19, x19, x21\n" // offset *= kernel_stride * output_size
"ldr x5, [%x[params_struct], %[offsetof_args_inptr]]\n"
- "add x5, x5, x20, LSL #2\n" // inptr[0] += offset * sizeof(float)
- "add x6, x5, x21, LSL #2\n"
- "add x7, x6, x21, LSL #2\n"
+ "add x5, x5, x19, LSL #2\n" // inptr[0] += offset * sizeof(float)
+ "add x6, x5, x20, LSL #2\n"
+ "add x7, x6, x20, LSL #2\n"
"add x8, x4, x4\n"
"ldr x17, [%x[params_struct], %[offsetof_args_params]]\n"
- "add x16, x7, x21, LSL #2\n"
+ "add x16, x7, x20, LSL #2\n"
"add x15, x8, x4\n"
- "add x14, x16, x21, LSL #2\n"
+ "add x14, x16, x20, LSL #2\n"
"add x13, x15, x4\n"
"cbnz x3, 2f\n"
- "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
- "sub x21, x20, x3\n"
- "sub x21, x21, #0x1\n"
+ "ldr x19, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
+ "sub x20, x19, x3\n"
+ "sub x20, x20, #0x1\n"
"lsl x12, %x[n_channels], #0x2\n"
- "mov x20, #0x10\n"
- "and x21, x21, #0x3fffff\n"
- "mul x20, x20, x4\n"
- "orr x12, x12, x21, LSL #22\n"
- "orr x12, x12, x20, LSL #38\n"
- "add x27, x7, x8, LSL #2\n"
- "add x26, x5, x4, LSL #2\n"
- "add x25, x5, x15, LSL #2\n"
- "add x24, x5, x13, LSL #2\n"
- "add x23, x6, x4, LSL #2\n"
- "add x22, x5, x8, LSL #2\n"
- "add x21, x6, x15, LSL #2\n"
- "add x20, x6, x13, LSL #2\n"
- "add x11, x6, x8, LSL #2\n"
- "add x10, x16, x4, LSL #2\n"
- "add x9, x7, x4, LSL #2\n"
- "add x28, x16, x15, LSL #2\n"
- ".inst 0xf8ac4b7a // rprfm pldonce, x12, [x27]\n"
- "add x27, x7, x15, LSL #2\n"
- ".inst 0xf8ac48ba // rprfm pldonce, x12, [x5]\n"
- ".inst 0xf8ac4b5a // rprfm pldonce, x12, [x26]\n"
- "add x26, x16, x13, LSL #2\n"
- ".inst 0xf8ac4b3a // rprfm pldonce, x12, [x25]\n"
- "add x25, x7, x13, LSL #2\n"
- ".inst 0xf8ac4b1a // rprfm pldonce, x12, [x24]\n"
- "add x24, x14, x4, LSL #2\n"
- ".inst 0xf8ac48da // rprfm pldonce, x12, [x6]\n"
- ".inst 0xf8ac4afa // rprfm pldonce, x12, [x23]\n"
- "add x23, x16, x8, LSL #2\n"
- ".inst 0xf8ac4ada // rprfm pldonce, x12, [x22]\n"
- "add x22, x14, x15, LSL #2\n"
- ".inst 0xf8ac4aba // rprfm pldonce, x12, [x21]\n"
- "add x21, x14, x8, LSL #2\n"
- ".inst 0xf8ac4a9a // rprfm pldonce, x12, [x20]\n"
- "add x20, x14, x13, LSL #2\n"
- ".inst 0xf8ac497a // rprfm pldonce, x12, [x11]\n"
- ".inst 0xf8ac4a1a // rprfm pldonce, x12, [x16]\n"
- ".inst 0xf8ac48fa // rprfm pldonce, x12, [x7]\n"
- ".inst 0xf8ac495a // rprfm pldonce, x12, [x10]\n"
- ".inst 0xf8ac493a // rprfm pldonce, x12, [x9]\n"
- ".inst 0xf8ac4b9a // rprfm pldonce, x12, [x28]\n"
- ".inst 0xf8ac4b7a // rprfm pldonce, x12, [x27]\n"
- ".inst 0xf8ac4b5a // rprfm pldonce, x12, [x26]\n"
- ".inst 0xf8ac49da // rprfm pldonce, x12, [x14]\n"
- ".inst 0xf8ac4b3a // rprfm pldonce, x12, [x25]\n"
- ".inst 0xf8ac4b1a // rprfm pldonce, x12, [x24]\n"
- ".inst 0xf8ac4afa // rprfm pldonce, x12, [x23]\n"
- ".inst 0xf8ac4ada // rprfm pldonce, x12, [x22]\n"
- ".inst 0xf8ac4aba // rprfm pldonce, x12, [x21]\n"
- ".inst 0xf8ac4a9a // rprfm pldonce, x12, [x20]\n"
+ "mov x19, #0x10\n"
+ "and x20, x20, #0x3fffff\n"
+ "mul x19, x19, x4\n"
+ "orr x12, x12, x20, LSL #22\n"
+ "orr x12, x12, x19, LSL #38\n"
+ "add x25, x7, x8, LSL #2\n"
+ "add x24, x5, x4, LSL #2\n"
+ "add x23, x5, x15, LSL #2\n"
+ "add x22, x5, x13, LSL #2\n"
+ "add x21, x6, x4, LSL #2\n"
+ "add x20, x5, x8, LSL #2\n"
+ "add x19, x6, x15, LSL #2\n"
+ "add x11, x6, x13, LSL #2\n"
+ "add x10, x6, x8, LSL #2\n"
+ "add x9, x16, x4, LSL #2\n"
+ "add x28, x7, x4, LSL #2\n"
+ "add x27, x16, x15, LSL #2\n"
+ "add x26, x7, x15, LSL #2\n"
+ ".inst 0xf8ac4b3a // rprfm pldonce, x25, [x12]\n"
+ "add x25, x16, x13, LSL #2\n"
+ ".inst 0xf8ac48ba // rprfm pldonce, x5, [x12]\n"
+ ".inst 0xf8ac4b1a // rprfm pldonce, x24, [x12]\n"
+ "add x24, x7, x13, LSL #2\n"
+ ".inst 0xf8ac4afa // rprfm pldonce, x23, [x12]\n"
+ "add x23, x14, x4, LSL #2\n"
+ ".inst 0xf8ac4ada // rprfm pldonce, x22, [x12]\n"
+ "add x22, x16, x8, LSL #2\n"
+ ".inst 0xf8ac48da // rprfm pldonce, x6, [x12]\n"
+ ".inst 0xf8ac4aba // rprfm pldonce, x21, [x12]\n"
+ "add x21, x14, x15, LSL #2\n"
+ ".inst 0xf8ac4a9a // rprfm pldonce, x20, [x12]\n"
+ "add x20, x14, x8, LSL #2\n"
+ ".inst 0xf8ac4a7a // rprfm pldonce, x19, [x12]\n"
+ "add x19, x14, x13, LSL #2\n"
+ ".inst 0xf8ac497a // rprfm pldonce, x11, [x12]\n"
+ ".inst 0xf8ac495a // rprfm pldonce, x10, [x12]\n"
+ ".inst 0xf8ac4a1a // rprfm pldonce, x16, [x12]\n"
+ ".inst 0xf8ac48fa // rprfm pldonce, x7, [x12]\n"
+ ".inst 0xf8ac493a // rprfm pldonce, x9, [x12]\n"
+ ".inst 0xf8ac4b9a // rprfm pldonce, x28, [x12]\n"
+ ".inst 0xf8ac4b7a // rprfm pldonce, x27, [x12]\n"
+ ".inst 0xf8ac4b5a // rprfm pldonce, x26, [x12]\n"
+ ".inst 0xf8ac4b3a // rprfm pldonce, x25, [x12]\n"
+ ".inst 0xf8ac49da // rprfm pldonce, x14, [x12]\n"
+ ".inst 0xf8ac4b1a // rprfm pldonce, x24, [x12]\n"
+ ".inst 0xf8ac4afa // rprfm pldonce, x23, [x12]\n"
+ ".inst 0xf8ac4ada // rprfm pldonce, x22, [x12]\n"
+ ".inst 0xf8ac4aba // rprfm pldonce, x21, [x12]\n"
+ ".inst 0xf8ac4a9a // rprfm pldonce, x20, [x12]\n"
+ ".inst 0xf8ac4a7a // rprfm pldonce, x19, [x12]\n"
"2:" // Tile loop: Prefetch input rows: End
- "ldr x22, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
- "mul x21, x2, x22\n" // offset = tile_i * ld_output_row
- "mov x20, #0x2\n"
+ "ldr x21, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
+ "mul x20, x2, x21\n" // offset = tile_i * ld_output_row
+ "mov x19, #0x2\n"
"ld1w { z19.s }, p3/Z, [x17]\n"
- "ldr x25, [%x[params_struct], %[offsetof_args_ld_output_col]]\n"
- "madd x21, x3, x25, x21\n" // offset += tile_j * ld_output_col
+ "ldr x24, [%x[params_struct], %[offsetof_args_ld_output_col]]\n"
+ "madd x20, x3, x24, x20\n" // offset += tile_j * ld_output_col
"addvl x17, x17, #1\n"
".inst 0xa040c220 // ld1w { z0.s-z3.s }, pn8.b/Z, [x17]\n"
- "ldr x24, [%x[params_struct], %[offsetof_args_outptr]]\n"
- "mul x21, x21, x20\n" // offset *= output_tile_size
- "cntw x23\n"
+ "ldr x23, [%x[params_struct], %[offsetof_args_outptr]]\n"
+ "mul x20, x20, x19\n" // offset *= output_tile_size
+ "cntw x22\n"
"ld1rw { z18.s }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
"addvl x17, x17, #4\n"
- "add x24, x24, x21, LSL #2\n" // outptrs[0] += offset * sizeof(float)
+ "add x23, x23, x20, LSL #2\n" // outptrs[0] += offset * sizeof(float)
".inst 0xa040c224 // ld1w { z4.s-z7.s }, pn8.b/Z, [x17]\n"
"whilelt p2.s, XZR, %x[n_channels]\n"
"addvl x17, x17, #4\n"
"ld1rw { z17.s }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
- "cmp x23, %x[n_channels]\n"
- "add x22, x24, x22, LSL #2\n"
+ "cmp x22, %x[n_channels]\n"
+ "add x21, x23, x21, LSL #2\n"
"ld1w { z8.s }, p3/Z, [x17]\n"
- "mov x21, #0x0\n"
- "sub x20, XZR, x23\n"
+ "mov x20, #0x0\n"
+ "sub x19, XZR, x22\n"
"ld1w { z9.s }, p2/Z, [x7, x8, LSL #2]\n"
"ld1w { z10.s }, p2/Z, [x5]\n"
"addvl x17, x17, #1\n"
@@ -203,12 +203,12 @@ void sme2_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst_direct_impl(
"3:" // Tile loop: Channel loop
"movprfx z28, z19\n fmla z28.s, p3/M, z8.s, z9.s\n"
"movprfx z29, z19\n fmla z29.s, p3/M, z6.s, z9.s\n"
- "whilelt p1.s, x23, %x[n_channels]\n"
- "incw x21\n"
+ "whilelt p1.s, x22, %x[n_channels]\n"
+ "incw x20\n"
"fmla z28.s, p3/M, z0.s, z10.s\n"
"fmla z29.s, p3/M, z1.s, z12.s\n"
"ld1w { z12.s }, p2/Z, [x6, x13, LSL #2]\n"
- "incw x23\n"
+ "incw x22\n"
"fmla z28.s, p3/M, z1.s, z11.s\n"
"fmla z29.s, p3/M, z2.s, z13.s\n"
"ld1w { z11.s }, p2/Z, [x6, x15, LSL #2]\n"
@@ -224,7 +224,7 @@ void sme2_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst_direct_impl(
"fmla z28.s, p3/M, z2.s, z16.s\n"
"fmla z29.s, p3/M, z5.s, z12.s\n"
"ld1w { z15.s }, p2/Z, [x7]\n"
- "incw x20\n"
+ "incw x19\n"
"movprfx z30, z19\n fmla z30.s, p3/M, z2.s, z9.s\n"
"movprfx z31, z19\n fmla z31.s, p3/M, z0.s, z9.s\n"
"ld1w { z12.s }, p2/Z, [x7, x15, LSL #2]\n"
@@ -263,24 +263,24 @@ void sme2_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst_direct_impl(
"ld1w { z11.s }, p2/Z, [x14, x13, LSL #2]\n"
"fmla z30.s, p3/M, z8.s, z15.s\n"
"fmla z31.s, p3/M, z8.s, z11.s\n"
- "whilelt p2.s, x21, %x[n_channels]\n"
+ "whilelt p2.s, x20, %x[n_channels]\n"
"ld1w { z19.s }, p3/Z, [x17]\n"
"addvl x17, x17, #1\n"
- "cmp x23, %x[n_channels]\n"
+ "cmp x22, %x[n_channels]\n"
".inst 0xc1b1ca5c // fclamp { z28.s-z31.s }, z18.s, z17.s\n"
".inst 0xa040c220 // ld1w { z0.s-z3.s }, pn8.b/Z, [x17]\n"
"addvl x17, x17, #4\n"
"addvl x14, x14, #1\n"
- "st1w { z28.s }, p0, [x24]\n"
+ "st1w { z28.s }, p0, [x23]\n"
".inst 0xa040c224 // ld1w { z4.s-z7.s }, pn8.b/Z, [x17]\n"
"addvl x17, x17, #4\n"
- "st1w { z29.s }, p0, [x24, x25, LSL #2]\n"
- "addvl x24, x24, #1\n"
+ "st1w { z29.s }, p0, [x23, x24, LSL #2]\n"
+ "addvl x23, x23, #1\n"
"ld1w { z9.s }, p1/Z, [x7, x8, LSL #2]\n"
- "st1w { z30.s }, p0, [x22]\n"
+ "st1w { z30.s }, p0, [x21]\n"
"ld1w { z10.s }, p1/Z, [x5]\n"
- "st1w { z31.s }, p0, [x22, x25, LSL #2]\n"
- "addvl x22, x22, #1\n"
+ "st1w { z31.s }, p0, [x21, x24, LSL #2]\n"
+ "addvl x21, x21, #1\n"
"ld1w { z11.s }, p1/Z, [x5, x4, LSL #2]\n"
"ld1w { z12.s }, p1/Z, [x5, x15, LSL #2]\n"
"ld1w { z13.s }, p1/Z, [x5, x13, LSL #2]\n"
@@ -302,23 +302,23 @@ void sme2_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst_direct_impl(
"fmla z28.s, p3/M, z1.s, z11.s\n"
"fmla z29.s, p3/M, z2.s, z13.s\n"
"ld1w { z11.s }, p2/Z, [x6, x15, LSL #2]\n"
- "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
+ "ldr x19, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
"fmla z28.s, p3/M, z3.s, z14.s\n"
"fmla z29.s, p3/M, z0.s, z16.s\n"
"ld1w { z13.s }, p2/Z, [x6, x8, LSL #2]\n"
- "ldr x21, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
+ "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
"fmla z28.s, p3/M, z4.s, z15.s\n"
"fmla z29.s, p3/M, z4.s, z11.s\n"
"ld1w { z14.s }, p2/Z, [x16]\n"
- "cmp x3, x20\n"
+ "cmp x3, x19\n"
"fmla z28.s, p3/M, z2.s, z16.s\n"
"fmla z29.s, p3/M, z5.s, z12.s\n"
"ld1w { z15.s }, p2/Z, [x7]\n"
- "add x20, x2, #0x1\n"
+ "add x19, x2, #0x1\n"
"movprfx z30, z19\n fmla z30.s, p3/M, z2.s, z9.s\n"
"movprfx z31, z19\n fmla z31.s, p3/M, z0.s, z9.s\n"
"ld1w { z12.s }, p2/Z, [x7, x15, LSL #2]\n"
- "csel x2, x2, x20, LT\n"
+ "csel x2, x2, x19, LT\n"
"fmla z28.s, p3/M, z5.s, z13.s\n"
"fmla z29.s, p3/M, z3.s, z13.s\n"
"ld1w { z13.s }, p2/Z, [x16, x15, LSL #2]\n"
@@ -330,7 +330,7 @@ void sme2_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst_direct_impl(
"fmla z30.s, p3/M, z0.s, z15.s\n"
"fmla z31.s, p3/M, z1.s, z12.s\n"
"ld1w { z14.s }, p2/Z, [x16, x13, LSL #2]\n"
- "cmp x2, x21\n"
+ "cmp x2, x20\n"
"fmla z30.s, p3/M, z4.s, z11.s\n"
"fmla z31.s, p3/M, z5.s, z14.s\n"
"ld1w { z16.s }, p2/Z, [x7, x4, LSL #2]\n"
@@ -356,15 +356,15 @@ void sme2_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst_direct_impl(
"fmla z30.s, p3/M, z8.s, z15.s\n"
"fmla z31.s, p3/M, z8.s, z11.s\n"
".inst 0xc1b1ca5c // fclamp { z28.s-z31.s }, z18.s, z17.s\n"
- "st1w { z28.s }, p0, [x24]\n"
- "st1w { z29.s }, p0, [x24, x25, LSL #2]\n"
- "st1w { z30.s }, p0, [x22]\n"
- "st1w { z31.s }, p0, [x22, x25, LSL #2]\n"
+ "st1w { z28.s }, p0, [x23]\n"
+ "st1w { z29.s }, p0, [x23, x24, LSL #2]\n"
+ "st1w { z30.s }, p0, [x21]\n"
+ "st1w { z31.s }, p0, [x21, x24, LSL #2]\n"
"blt 1b\n"
".inst 0xd503467f // SMSTOP\n"
:
: [n_channels] "r" ((unsigned long) n_channels), [offsetof_args_inptr] "I" (offsetof(Args, inptr)), [offsetof_args_ld_input_col] "I" (offsetof(Args, ld_input_col)), [offsetof_args_ld_input_row] "I" (offsetof(Args, ld_input_row)), [offsetof_args_ld_output_col] "I" (offsetof(Args, ld_output_col)), [offsetof_args_ld_output_row] "I" (offsetof(Args, ld_output_row)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_n_tile_cols] "I" (offsetof(Args, n_tile_cols)), [offsetof_args_n_tile_rows] "I" (offsetof(Args, n_tile_rows)), [offsetof_args_outptr] "I" (offsetof(Args, outptr)), [offsetof_args_params] "I" (offsetof(Args, params)), [offsetof_args_tile_i] "I" (offsetof(Args, tile_i)), [offsetof_args_tile_j] "I" (offsetof(Args, tile_j)), [params_struct] "r" (&params_struct)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_indirect.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_indirect.cpp
index e2ff9a214e..063084eb3c 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_indirect.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_indirect.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -87,228 +87,228 @@ void sme2_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst_indirect_impl(
activation_min, activation_max);
__asm__ __volatile__(
- "ldr x20, [%x[params_struct], %[offsetof_args_outptrs]]\n"
+ "ldr x19, [%x[params_struct], %[offsetof_args_outptrs]]\n"
".inst 0xd503477f // SMSTART ZA\n"
- "add x16, %x[params_struct], %[offsetof_Args_inptrs]\n"
+ "add x15, %x[params_struct], %[offsetof_Args_inptrs]\n"
"ptrue p3.b\n"
- "ldr x15, [%x[params_struct], %[offsetof_args_params]]\n"
+ "ldr x14, [%x[params_struct], %[offsetof_args_params]]\n"
".inst 0x25207810 // ptrue pn8.b\n"
- "ld1w { z19.s }, p3/Z, [x15]\n"
- "addvl x15, x15, #1\n"
- "ldp x14, x13, [x20, #0x0]\n"
- "cntw x12\n"
- ".inst 0xa040c1e0 // ld1w { z0.s-z3.s }, pn8.b/Z, [x15]\n"
- "addvl x15, x15, #4\n"
- "ldp x11, x10, [x20, #0x10]\n"
- "mov x9, #0x0\n"
+ "ld1w { z19.s }, p3/Z, [x14]\n"
+ "addvl x14, x14, #1\n"
+ "ldp x13, x12, [x19, #0x0]\n"
+ "cntw x11\n"
+ ".inst 0xa040c1c0 // ld1w { z0.s-z3.s }, pn8.b/Z, [x14]\n"
+ "addvl x14, x14, #4\n"
+ "ldp x10, x9, [x19, #0x10]\n"
+ "mov x28, #0x0\n"
"whilelt p2.s, XZR, %x[n_channels]\n"
- ".inst 0xa040c1e4 // ld1w { z4.s-z7.s }, pn8.b/Z, [x15]\n"
- "ldp x28, x27, [x16, #0x0]\n"
- "addvl x15, x15, #4\n"
- "cmp x12, %x[n_channels]\n"
+ ".inst 0xa040c1c4 // ld1w { z4.s-z7.s }, pn8.b/Z, [x14]\n"
+ "ldp x27, x26, [x15, #0x0]\n"
+ "addvl x14, x14, #4\n"
+ "cmp x11, %x[n_channels]\n"
"ld1rw { z18.s }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
- "ldp x26, x25, [x16, #0x10]\n"
+ "ldp x25, x24, [x15, #0x10]\n"
"ld1rw { z17.s }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
- "sub x24, XZR, x12\n"
- "ldp x23, x22, [x16, #0x20]\n"
- "ld1w { z8.s }, p3/Z, [x15]\n"
- "addvl x15, x15, #1\n"
- "ldp x21, x20, [x16, #0x30]\n"
- "ld1w { z9.s }, p2/Z, [x28, x9, LSL #2]\n"
- "ld1w { z10.s }, p2/Z, [x27, x9, LSL #2]\n"
- "ld1w { z11.s }, p2/Z, [x26, x9, LSL #2]\n"
- "ld1w { z12.s }, p2/Z, [x25, x9, LSL #2]\n"
- "ld1w { z13.s }, p2/Z, [x23, x9, LSL #2]\n"
- "ld1w { z14.s }, p2/Z, [x22, x9, LSL #2]\n"
- "ld1w { z15.s }, p2/Z, [x21, x9, LSL #2]\n"
- "ld1w { z16.s }, p2/Z, [x20, x9, LSL #2]\n"
+ "sub x23, XZR, x11\n"
+ "ldp x22, x21, [x15, #0x20]\n"
+ "ld1w { z8.s }, p3/Z, [x14]\n"
+ "addvl x14, x14, #1\n"
+ "ldp x20, x19, [x15, #0x30]\n"
+ "ld1w { z9.s }, p2/Z, [x27, x28, LSL #2]\n"
+ "ld1w { z10.s }, p2/Z, [x26, x28, LSL #2]\n"
+ "ld1w { z11.s }, p2/Z, [x25, x28, LSL #2]\n"
+ "ld1w { z12.s }, p2/Z, [x24, x28, LSL #2]\n"
+ "ld1w { z13.s }, p2/Z, [x22, x28, LSL #2]\n"
+ "ld1w { z14.s }, p2/Z, [x21, x28, LSL #2]\n"
+ "ld1w { z15.s }, p2/Z, [x20, x28, LSL #2]\n"
+ "ld1w { z16.s }, p2/Z, [x19, x28, LSL #2]\n"
"bge 2f\n"
"1:" // Channel loop
"movprfx z28, z19\n fmla z28.s, p3/M, z8.s, z9.s\n"
"movprfx z29, z19\n fmla z29.s, p3/M, z6.s, z9.s\n"
- "ldr x28, [x16, #0x40]\n"
- "whilelt p1.s, x12, %x[n_channels]\n"
+ "ldr x27, [x15, #0x40]\n"
+ "whilelt p1.s, x11, %x[n_channels]\n"
"fmla z28.s, p3/M, z0.s, z10.s\n"
"fmla z29.s, p3/M, z1.s, z12.s\n"
- "ldr x27, [x16, #0x48]\n"
- "ld1w { z12.s }, p2/Z, [x27, x9, LSL #2]\n"
+ "ldr x26, [x15, #0x48]\n"
+ "ld1w { z12.s }, p2/Z, [x26, x28, LSL #2]\n"
"fmla z28.s, p3/M, z1.s, z11.s\n"
"fmla z29.s, p3/M, z2.s, z13.s\n"
- "ld1w { z11.s }, p2/Z, [x28, x9, LSL #2]\n"
- "ldr x26, [x16, #0x50]\n"
+ "ld1w { z11.s }, p2/Z, [x27, x28, LSL #2]\n"
+ "ldr x25, [x15, #0x50]\n"
"fmla z28.s, p3/M, z3.s, z14.s\n"
"fmla z29.s, p3/M, z0.s, z16.s\n"
- "ld1w { z13.s }, p2/Z, [x26, x9, LSL #2]\n"
- "ldr x25, [x16, #0x58]\n"
+ "ld1w { z13.s }, p2/Z, [x25, x28, LSL #2]\n"
+ "ldr x24, [x15, #0x58]\n"
"fmla z28.s, p3/M, z4.s, z15.s\n"
"fmla z29.s, p3/M, z4.s, z11.s\n"
- "ldr x20, [x16, #0x78]\n"
- "ld1w { z14.s }, p2/Z, [x25, x9, LSL #2]\n"
+ "ldr x19, [x15, #0x78]\n"
+ "ld1w { z14.s }, p2/Z, [x24, x28, LSL #2]\n"
"fmla z28.s, p3/M, z2.s, z16.s\n"
"fmla z29.s, p3/M, z5.s, z12.s\n"
- "ldr x23, [x16, #0x60]\n"
- "ld1w { z15.s }, p2/Z, [x23, x9, LSL #2]\n"
+ "ldr x22, [x15, #0x60]\n"
+ "ld1w { z15.s }, p2/Z, [x22, x28, LSL #2]\n"
"movprfx z30, z19\n fmla z30.s, p3/M, z2.s, z9.s\n"
"movprfx z31, z19\n fmla z31.s, p3/M, z0.s, z9.s\n"
- "ldr x28, [x16, #0x80]\n"
- "ld1w { z12.s }, p2/Z, [x28, x9, LSL #2]\n"
+ "ldr x27, [x15, #0x80]\n"
+ "ld1w { z12.s }, p2/Z, [x27, x28, LSL #2]\n"
"fmla z28.s, p3/M, z5.s, z13.s\n"
"fmla z29.s, p3/M, z3.s, z13.s\n"
- "ld1w { z13.s }, p2/Z, [x20, x9, LSL #2]\n"
- "ldr x22, [x16, #0x68]\n"
+ "ld1w { z13.s }, p2/Z, [x19, x28, LSL #2]\n"
+ "ldr x21, [x15, #0x68]\n"
"fmla z30.s, p3/M, z3.s, z14.s\n"
"fmla z31.s, p3/M, z4.s, z13.s\n"
- "ldr x27, [x16, #0x88]\n"
- "ld1w { z11.s }, p2/Z, [x22, x9, LSL #2]\n"
+ "ldr x26, [x15, #0x88]\n"
+ "ld1w { z11.s }, p2/Z, [x21, x28, LSL #2]\n"
"fmla z30.s, p3/M, z0.s, z15.s\n"
"fmla z31.s, p3/M, z1.s, z12.s\n"
- "ld1w { z14.s }, p2/Z, [x27, x9, LSL #2]\n"
- "ldr x21, [x16, #0x70]\n"
- "ldr x25, [x16, #0x98]\n"
+ "ld1w { z14.s }, p2/Z, [x26, x28, LSL #2]\n"
+ "ldr x20, [x15, #0x70]\n"
+ "ldr x24, [x15, #0x98]\n"
"fmla z30.s, p3/M, z4.s, z11.s\n"
"fmla z31.s, p3/M, z5.s, z14.s\n"
- "ld1w { z16.s }, p2/Z, [x21, x9, LSL #2]\n"
+ "ld1w { z16.s }, p2/Z, [x20, x28, LSL #2]\n"
"fmla z28.s, p3/M, z6.s, z15.s\n"
- "ld1w { z11.s }, p2/Z, [x25, x9, LSL #2]\n"
- "ldr x26, [x16, #0x90]\n"
+ "ld1w { z11.s }, p2/Z, [x24, x28, LSL #2]\n"
+ "ldr x25, [x15, #0x90]\n"
"fmla z30.s, p3/M, z1.s, z16.s\n"
- "ldr x22, [x16, #0xa8]\n"
+ "ldr x21, [x15, #0xa8]\n"
"fmla z31.s, p3/M, z2.s, z11.s\n"
"fmla z28.s, p3/M, z7.s, z16.s\n"
- "ld1w { z15.s }, p2/Z, [x26, x9, LSL #2]\n"
- "ld1w { z16.s }, p2/Z, [x22, x9, LSL #2]\n"
- "ldr x23, [x16, #0xa0]\n"
+ "ld1w { z15.s }, p2/Z, [x25, x28, LSL #2]\n"
+ "ld1w { z16.s }, p2/Z, [x21, x28, LSL #2]\n"
+ "ldr x22, [x15, #0xa0]\n"
"fmla z30.s, p3/M, z6.s, z15.s\n"
"fmla z31.s, p3/M, z3.s, z16.s\n"
- "ldr x21, [x16, #0xb0]\n"
- "ld1w { z13.s }, p2/Z, [x23, x9, LSL #2]\n"
+ "ldr x20, [x15, #0xb0]\n"
+ "ld1w { z13.s }, p2/Z, [x22, x28, LSL #2]\n"
"fmla z30.s, p3/M, z7.s, z13.s\n"
"fmla z29.s, p3/M, z7.s, z12.s\n"
- "ld1w { z14.s }, p2/Z, [x21, x9, LSL #2]\n"
- "ldr x20, [x16, #0xb8]\n"
+ "ld1w { z14.s }, p2/Z, [x20, x28, LSL #2]\n"
+ "ldr x19, [x15, #0xb8]\n"
"fmla z31.s, p3/M, z7.s, z14.s\n"
"fmla z30.s, p3/M, z5.s, z16.s\n"
- "ld1w { z15.s }, p2/Z, [x20, x9, LSL #2]\n"
- "ldr x28, [x16, #0xc0]\n"
+ "ld1w { z15.s }, p2/Z, [x19, x28, LSL #2]\n"
+ "ldr x27, [x15, #0xc0]\n"
"fmla z31.s, p3/M, z6.s, z15.s\n"
"fmla z29.s, p3/M, z8.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x28, x9, LSL #2]\n"
- "ldp x28, x27, [x16, #0x0]\n"
+ "ld1w { z11.s }, p2/Z, [x27, x28, LSL #2]\n"
+ "ldp x27, x26, [x15, #0x0]\n"
"fmla z30.s, p3/M, z8.s, z15.s\n"
"fmla z31.s, p3/M, z8.s, z11.s\n"
- "ldp x26, x25, [x16, #0x10]\n"
- "ld1w { z19.s }, p3/Z, [x15]\n"
- "addvl x15, x15, #1\n"
- "incw x9\n"
- "ldp x23, x22, [x16, #0x20]\n"
- "ld1w { z9.s }, p1/Z, [x28, x12, LSL #2]\n"
- "incw x24\n"
+ "ldp x25, x24, [x15, #0x10]\n"
+ "ld1w { z19.s }, p3/Z, [x14]\n"
+ "addvl x14, x14, #1\n"
+ "incw x28\n"
+ "ldp x22, x21, [x15, #0x20]\n"
+ "ld1w { z9.s }, p1/Z, [x27, x11, LSL #2]\n"
+ "incw x23\n"
"mov p0.b, p2.b\n"
- "ldp x21, x20, [x16, #0x30]\n"
- "ld1w { z10.s }, p1/Z, [x27, x12, LSL #2]\n"
- "whilelt p2.s, x9, %x[n_channels]\n"
+ "ldp x20, x19, [x15, #0x30]\n"
+ "ld1w { z10.s }, p1/Z, [x26, x11, LSL #2]\n"
+ "whilelt p2.s, x28, %x[n_channels]\n"
".inst 0xc1b1ca5c // fclamp { z28.s-z31.s }, z18.s, z17.s\n"
- "ld1w { z11.s }, p1/Z, [x26, x12, LSL #2]\n"
- "st1w { z28.s }, p0, [x14, x24, LSL #2]\n"
- "ld1w { z12.s }, p1/Z, [x25, x12, LSL #2]\n"
- "st1w { z29.s }, p0, [x13, x24, LSL #2]\n"
- "ld1w { z13.s }, p1/Z, [x23, x12, LSL #2]\n"
- "st1w { z30.s }, p0, [x11, x24, LSL #2]\n"
- "ld1w { z14.s }, p1/Z, [x22, x12, LSL #2]\n"
- "st1w { z31.s }, p0, [x10, x24, LSL #2]\n"
- "ld1w { z15.s }, p1/Z, [x21, x12, LSL #2]\n"
- "ld1w { z16.s }, p1/Z, [x20, x12, LSL #2]\n"
- "incw x12\n"
- "cmp x12, %x[n_channels]\n"
- ".inst 0xa040c1e0 // ld1w { z0.s-z3.s }, pn8.b/Z, [x15]\n"
- "addvl x15, x15, #4\n"
- ".inst 0xa040c1e4 // ld1w { z4.s-z7.s }, pn8.b/Z, [x15]\n"
- "addvl x15, x15, #4\n"
- "ld1w { z8.s }, p3/Z, [x15]\n"
- "addvl x15, x15, #1\n"
+ "ld1w { z11.s }, p1/Z, [x25, x11, LSL #2]\n"
+ "st1w { z28.s }, p0, [x13, x23, LSL #2]\n"
+ "ld1w { z12.s }, p1/Z, [x24, x11, LSL #2]\n"
+ "st1w { z29.s }, p0, [x12, x23, LSL #2]\n"
+ "ld1w { z13.s }, p1/Z, [x22, x11, LSL #2]\n"
+ "st1w { z30.s }, p0, [x10, x23, LSL #2]\n"
+ "ld1w { z14.s }, p1/Z, [x21, x11, LSL #2]\n"
+ "st1w { z31.s }, p0, [x9, x23, LSL #2]\n"
+ "ld1w { z15.s }, p1/Z, [x20, x11, LSL #2]\n"
+ "ld1w { z16.s }, p1/Z, [x19, x11, LSL #2]\n"
+ "incw x11\n"
+ "cmp x11, %x[n_channels]\n"
+ ".inst 0xa040c1c0 // ld1w { z0.s-z3.s }, pn8.b/Z, [x14]\n"
+ "addvl x14, x14, #4\n"
+ ".inst 0xa040c1c4 // ld1w { z4.s-z7.s }, pn8.b/Z, [x14]\n"
+ "addvl x14, x14, #4\n"
+ "ld1w { z8.s }, p3/Z, [x14]\n"
+ "addvl x14, x14, #1\n"
"blt 1b\n"
"2:" // Channel tail
"movprfx z28, z19\n fmla z28.s, p3/M, z8.s, z9.s\n"
"movprfx z29, z19\n fmla z29.s, p3/M, z6.s, z9.s\n"
- "ldr x28, [x16, #0x40]\n"
- "incw x24\n"
+ "ldr x27, [x15, #0x40]\n"
+ "incw x23\n"
"fmla z28.s, p3/M, z0.s, z10.s\n"
"fmla z29.s, p3/M, z1.s, z12.s\n"
- "ldr x27, [x16, #0x48]\n"
- "ld1w { z12.s }, p2/Z, [x27, x9, LSL #2]\n"
+ "ldr x26, [x15, #0x48]\n"
+ "ld1w { z12.s }, p2/Z, [x26, x28, LSL #2]\n"
"fmla z28.s, p3/M, z1.s, z11.s\n"
"fmla z29.s, p3/M, z2.s, z13.s\n"
- "ld1w { z11.s }, p2/Z, [x28, x9, LSL #2]\n"
- "ldr x26, [x16, #0x50]\n"
+ "ld1w { z11.s }, p2/Z, [x27, x28, LSL #2]\n"
+ "ldr x25, [x15, #0x50]\n"
"fmla z28.s, p3/M, z3.s, z14.s\n"
"fmla z29.s, p3/M, z0.s, z16.s\n"
- "ld1w { z13.s }, p2/Z, [x26, x9, LSL #2]\n"
- "ldr x25, [x16, #0x58]\n"
+ "ld1w { z13.s }, p2/Z, [x25, x28, LSL #2]\n"
+ "ldr x24, [x15, #0x58]\n"
"fmla z28.s, p3/M, z4.s, z15.s\n"
"fmla z29.s, p3/M, z4.s, z11.s\n"
- "ldr x20, [x16, #0x78]\n"
- "ld1w { z14.s }, p2/Z, [x25, x9, LSL #2]\n"
+ "ldr x19, [x15, #0x78]\n"
+ "ld1w { z14.s }, p2/Z, [x24, x28, LSL #2]\n"
"fmla z28.s, p3/M, z2.s, z16.s\n"
"fmla z29.s, p3/M, z5.s, z12.s\n"
- "ldr x23, [x16, #0x60]\n"
- "ld1w { z15.s }, p2/Z, [x23, x9, LSL #2]\n"
+ "ldr x22, [x15, #0x60]\n"
+ "ld1w { z15.s }, p2/Z, [x22, x28, LSL #2]\n"
"movprfx z30, z19\n fmla z30.s, p3/M, z2.s, z9.s\n"
"movprfx z31, z19\n fmla z31.s, p3/M, z0.s, z9.s\n"
- "ldr x28, [x16, #0x80]\n"
- "ld1w { z12.s }, p2/Z, [x28, x9, LSL #2]\n"
+ "ldr x27, [x15, #0x80]\n"
+ "ld1w { z12.s }, p2/Z, [x27, x28, LSL #2]\n"
"fmla z28.s, p3/M, z5.s, z13.s\n"
"fmla z29.s, p3/M, z3.s, z13.s\n"
- "ld1w { z13.s }, p2/Z, [x20, x9, LSL #2]\n"
- "ldr x22, [x16, #0x68]\n"
+ "ld1w { z13.s }, p2/Z, [x19, x28, LSL #2]\n"
+ "ldr x21, [x15, #0x68]\n"
"fmla z30.s, p3/M, z3.s, z14.s\n"
"fmla z31.s, p3/M, z4.s, z13.s\n"
- "ldr x27, [x16, #0x88]\n"
- "ld1w { z11.s }, p2/Z, [x22, x9, LSL #2]\n"
+ "ldr x26, [x15, #0x88]\n"
+ "ld1w { z11.s }, p2/Z, [x21, x28, LSL #2]\n"
"fmla z30.s, p3/M, z0.s, z15.s\n"
"fmla z31.s, p3/M, z1.s, z12.s\n"
- "ld1w { z14.s }, p2/Z, [x27, x9, LSL #2]\n"
- "ldr x21, [x16, #0x70]\n"
- "ldr x25, [x16, #0x98]\n"
+ "ld1w { z14.s }, p2/Z, [x26, x28, LSL #2]\n"
+ "ldr x20, [x15, #0x70]\n"
+ "ldr x24, [x15, #0x98]\n"
"fmla z30.s, p3/M, z4.s, z11.s\n"
"fmla z31.s, p3/M, z5.s, z14.s\n"
- "ld1w { z16.s }, p2/Z, [x21, x9, LSL #2]\n"
+ "ld1w { z16.s }, p2/Z, [x20, x28, LSL #2]\n"
"fmla z28.s, p3/M, z6.s, z15.s\n"
- "ld1w { z11.s }, p2/Z, [x25, x9, LSL #2]\n"
- "ldr x26, [x16, #0x90]\n"
+ "ld1w { z11.s }, p2/Z, [x24, x28, LSL #2]\n"
+ "ldr x25, [x15, #0x90]\n"
"fmla z30.s, p3/M, z1.s, z16.s\n"
- "ldr x22, [x16, #0xa8]\n"
+ "ldr x21, [x15, #0xa8]\n"
"fmla z31.s, p3/M, z2.s, z11.s\n"
"fmla z28.s, p3/M, z7.s, z16.s\n"
- "ld1w { z15.s }, p2/Z, [x26, x9, LSL #2]\n"
- "ld1w { z16.s }, p2/Z, [x22, x9, LSL #2]\n"
- "ldr x23, [x16, #0xa0]\n"
+ "ld1w { z15.s }, p2/Z, [x25, x28, LSL #2]\n"
+ "ld1w { z16.s }, p2/Z, [x21, x28, LSL #2]\n"
+ "ldr x22, [x15, #0xa0]\n"
"fmla z30.s, p3/M, z6.s, z15.s\n"
"fmla z31.s, p3/M, z3.s, z16.s\n"
- "ldr x21, [x16, #0xb0]\n"
- "ld1w { z13.s }, p2/Z, [x23, x9, LSL #2]\n"
+ "ldr x20, [x15, #0xb0]\n"
+ "ld1w { z13.s }, p2/Z, [x22, x28, LSL #2]\n"
"fmla z30.s, p3/M, z7.s, z13.s\n"
"fmla z29.s, p3/M, z7.s, z12.s\n"
- "ld1w { z14.s }, p2/Z, [x21, x9, LSL #2]\n"
- "ldr x20, [x16, #0xb8]\n"
+ "ld1w { z14.s }, p2/Z, [x20, x28, LSL #2]\n"
+ "ldr x19, [x15, #0xb8]\n"
"fmla z31.s, p3/M, z7.s, z14.s\n"
"fmla z30.s, p3/M, z5.s, z16.s\n"
- "ld1w { z15.s }, p2/Z, [x20, x9, LSL #2]\n"
- "ldr x28, [x16, #0xc0]\n"
+ "ld1w { z15.s }, p2/Z, [x19, x28, LSL #2]\n"
+ "ldr x27, [x15, #0xc0]\n"
"fmla z31.s, p3/M, z6.s, z15.s\n"
"fmla z29.s, p3/M, z8.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x28, x9, LSL #2]\n"
+ "ld1w { z11.s }, p2/Z, [x27, x28, LSL #2]\n"
"fmla z30.s, p3/M, z8.s, z15.s\n"
"fmla z31.s, p3/M, z8.s, z11.s\n"
"mov p0.b, p2.b\n"
".inst 0xc1b1ca5c // fclamp { z28.s-z31.s }, z18.s, z17.s\n"
- "st1w { z28.s }, p0, [x14, x24, LSL #2]\n"
- "st1w { z29.s }, p0, [x13, x24, LSL #2]\n"
- "st1w { z30.s }, p0, [x11, x24, LSL #2]\n"
- "st1w { z31.s }, p0, [x10, x24, LSL #2]\n"
+ "st1w { z28.s }, p0, [x13, x23, LSL #2]\n"
+ "st1w { z29.s }, p0, [x12, x23, LSL #2]\n"
+ "st1w { z30.s }, p0, [x10, x23, LSL #2]\n"
+ "st1w { z31.s }, p0, [x9, x23, LSL #2]\n"
".inst 0xd503467f // SMSTOP\n"
:
: [n_channels] "r" ((unsigned long) n_channels), [offsetof_Args_inptrs] "I" (offsetof(Args, inptrs)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_args_params] "I" (offsetof(Args, params)), [params_struct] "r" (&params_struct)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_planar_3x3_s1_4rows_mla_za/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_planar_3x3_s1_4rows_mla_za/generic.cpp
index 4d02d29e4e..493166cb19 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_planar_3x3_s1_4rows_mla_za/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_planar_3x3_s1_4rows_mla_za/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -69,316 +69,316 @@ void sme2_fp32_planar_3x3_s1_4rows_mla_za_impl(
Args args = { inptr, ld_in_vl, pad_top, 6u - std::min(6u, pad_top + valid_input_rows), pad_left, weights, bias, valid_input_cols, output_cols, outptrs, outlds, outvllds, start_channel, valid_channels, act_min, act_max };
__asm__ __volatile__(
- "ldr x6, [%x[args], %[offsetof_Args_pad_bottom]]\n"
- "mov x20, #0x6\n"
+ "ldr x7, [%x[args], %[offsetof_Args_pad_bottom]]\n"
+ "mov x19, #0x6\n"
".inst 0xd503477f // SMSTART ZA\n"
- "sub x20, x20, x6\n"
- "ldr x7, [%x[args], %[offsetof_Args_pad_top]]\n"
+ "sub x19, x19, x7\n"
+ "ldr x17, [%x[args], %[offsetof_Args_pad_top]]\n"
"ptrue p2.b\n"
".inst 0x25207812 // ptrue pn10.b\n"
"ld1rw { z5.s }, p2/Z, [%x[args], %[offsetof_Args_clamp_min]]\n"
- "ldr x17, [%x[args], %[offsetof_Args_n_channels]]\n"
- "whilelt p1.s, XZR, x17\n"
- "whilelt p9.s, XZR, x20\n"
+ "ldr x16, [%x[args], %[offsetof_Args_n_channels]]\n"
+ "whilelt p1.s, XZR, x16\n"
+ "whilelt p9.s, XZR, x19\n"
"ld1rw { z11.s }, p2/Z, [%x[args], %[offsetof_Args_clamp_max]]\n"
- "whilelt p8.s, XZR, x7\n"
+ "whilelt p8.s, XZR, x17\n"
"eor p8.b, p2/Z, p8.b, p9.b\n"
- "ldr x16, [%x[args], %[offsetof_Args_current_channel]]\n"
+ "ldr x15, [%x[args], %[offsetof_Args_current_channel]]\n"
"1:" // Channel loop
- "ldr x20, [%x[args], %[offsetof_Args_bias]]\n"
+ "ldr x19, [%x[args], %[offsetof_Args_bias]]\n"
"fmov z16.s, #0x0\n"
- "cbz x20, 2f\n"
- "ld1w { z16.s }, p1/Z, [x20, x16, LSL #2]\n"
+ "cbz x19, 2f\n"
+ "ld1w { z16.s }, p1/Z, [x19, x15, LSL #2]\n"
"2:" // Load bias: Done
- "ldr x15, [%x[args], %[offsetof_Args_input_cols]]\n"
- "sub x20, x15, #0x1\n"
- "orr x24, x20, %x[ld_in_col], LSL #18\n"
+ "ldr x14, [%x[args], %[offsetof_Args_input_cols]]\n"
+ "sub x19, x14, #0x1\n"
+ "orr x23, x19, %x[ld_in_col], LSL #18\n"
"mov z17.d, z16.d\n"
- "ldr x23, [%x[args], %[offsetof_Args_weights]]\n"
- ".inst 0xa1404ae0 // ld1w { z0.s, z8.s }, pn10.b/Z, [x23]\n"
- "orr x24, x17, x24, LSL #20\n"
- "mov x22, #0x6\n"
- "ldr x14, [%x[args], %[offsetof_Args_inptr]]\n"
- "ld1w { z3.s }, p2/Z, [x23, #2, MUL VL]\n"
- "addvl x23, x23, #3\n"
- "add x21, x7, x6\n"
- ".inst 0xa0404ae6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x23]\n"
- "lsl x20, %x[ld_in_row], #0x2\n"
+ "ldr x22, [%x[args], %[offsetof_Args_weights]]\n"
+ ".inst 0xa1404ac0 // ld1w { z0.s, z8.s }, pn10.b/Z, [x22]\n"
+ "orr x23, x16, x23, LSL #20\n"
+ "mov x21, #0x6\n"
+ "ldr x13, [%x[args], %[offsetof_Args_inptr]]\n"
+ "ld1w { z3.s }, p2/Z, [x22, #2, MUL VL]\n"
+ "addvl x22, x22, #3\n"
+ "add x20, x17, x7\n"
+ ".inst 0xa0404ac6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x22]\n"
+ "lsl x19, %x[ld_in_row], #0x2\n"
"mov z18.d, z16.d\n"
"mov z19.d, z16.d\n"
- "ld1w { z9.s }, p2/Z, [x23, #2, MUL VL]\n"
- "addvl x23, x23, #3\n"
+ "ld1w { z9.s }, p2/Z, [x22, #2, MUL VL]\n"
+ "addvl x22, x22, #3\n"
"mov x8, #0x0\n"
- "ldr x13, [%x[args], %[offsetof_Args_output_cols]]\n"
- ".inst 0xa1404ae2 // ld1w { z2.s, z10.s }, pn10.b/Z, [x23]\n"
- "lsl x24, x24, #0x2\n"
- "sub x22, x22, x21\n"
- "ld1w { z1.s }, p2/Z, [x23, #2, MUL VL]\n"
- "madd x20, x20, x7, x14\n"
+ "ldr x11, [%x[args], %[offsetof_Args_output_cols]]\n"
+ ".inst 0xa1404ac2 // ld1w { z2.s, z10.s }, pn10.b/Z, [x22]\n"
+ "lsl x23, x23, #0x2\n"
+ "sub x21, x21, x20\n"
+ "ld1w { z1.s }, p2/Z, [x22, #2, MUL VL]\n"
+ "madd x19, x19, x17, x13\n"
"3:" // Issue prefetches
- "subs x22, x22, #0x1\n"
- ".inst 0xf8b84a9c // rprfm pldstrm, x24, [x20]\n"
- "add x20, x20, %x[ld_in_col], LSL #2\n"
+ "subs x21, x21, #0x1\n"
+ ".inst 0xf8b74a7c // rprfm pldstrm, x23, [x19]\n"
+ "add x19, x19, %x[ld_in_col], LSL #2\n"
"bgt 3b\n"
- "ldr x11, [%x[args], %[offsetof_Args_outptrs]]\n"
- "lsl x20, %x[ld_in_row], #0x2\n"
- "msub x14, x7, x20, x14\n"
+ "ldr x10, [%x[args], %[offsetof_Args_outptrs]]\n"
+ "lsl x19, %x[ld_in_row], #0x2\n"
+ "msub x13, x17, x19, x13\n"
".inst 0xc0040e00 // mova za.d[x8, #0], { z16.d-z19.d }\n"
- "ldr x20, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
+ "ldr x19, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
".inst 0xc0040e01 // mova za.d[x8, #1], { z16.d-z19.d }\n"
- "mov x10, #0x2\n"
- "ldp x9, x28, [x11], #0x10\n"
+ "mov x9, #0x2\n"
+ "ldp x28, x27, [x10], #0x10\n"
".inst 0xc0040e02 // mova za.d[x8, #2], { z16.d-z19.d }\n"
- "ldp x27, x26, [x20], #0x10\n"
- "ldr x21, [%x[args], %[offsetof_Args_pad_left]]\n"
- "ldp x25, x24, [x11], #0x10\n"
- "ldp x23, x22, [x20], #0x10\n"
- "cbz x21, 5f\n"
- "cmp x21, x10\n"
- "csel x20, x21, x10, LT\n"
- "sub x21, x21, x20\n"
- "sub x10, x10, x20\n"
- "cbz x21, 5f\n"
+ "ldp x26, x25, [x19], #0x10\n"
+ "ldr x20, [%x[args], %[offsetof_Args_pad_left]]\n"
+ "ldp x24, x23, [x10], #0x10\n"
+ "ldp x22, x21, [x19], #0x10\n"
+ "cbz x20, 5f\n"
+ "cmp x20, x9\n"
+ "csel x19, x20, x9, LT\n"
+ "sub x20, x20, x19\n"
+ "sub x9, x9, x19\n"
+ "cbz x20, 5f\n"
".inst 0xc0060c0c // mova { z12.d-z15.d }, za.d[x8, #0]\n"
- "sub x13, x13, x21\n"
+ "sub x11, x11, x20\n"
".inst 0xc1abc8ac // fclamp { z12.s-z15.s }, z5.s, z11.s\n"
"4:" // Left padding
- "subs x21, x21, #0x1\n"
- "st1w { z12.s }, p1, [x9]\n"
- "add x9, x9, x27, LSL #2\n"
- "st1w { z13.s }, p1, [x28]\n"
+ "subs x20, x20, #0x1\n"
+ "st1w { z12.s }, p1, [x28]\n"
"add x28, x28, x26, LSL #2\n"
- "st1w { z14.s }, p1, [x25]\n"
- "add x25, x25, x23, LSL #2\n"
- "st1w { z15.s }, p1, [x24]\n"
+ "st1w { z13.s }, p1, [x27]\n"
+ "add x27, x27, x25, LSL #2\n"
+ "st1w { z14.s }, p1, [x24]\n"
"add x24, x24, x22, LSL #2\n"
+ "st1w { z15.s }, p1, [x23]\n"
+ "add x23, x23, x21, LSL #2\n"
"bgt 4b\n"
"5:" // Left padding: End
- "adds XZR, x7, x6\n"
+ "adds XZR, x17, x7\n"
"bne 10f\n"
- "cbz x10, 8f\n"
- "cmp x10, #0x1\n"
- "sub x15, x15, x10\n"
+ "cbz x9, 8f\n"
+ "cmp x9, #0x1\n"
+ "sub x14, x14, x9\n"
"beq 7f\n"
"6:" // Unpadded: 2 priming loads
- "add x20, x14, %x[ld_in_row], LSL #2\n"
- "ld1w { z23.s }, p1/Z, [x14]\n"
- "add x14, x14, %x[ld_in_col], LSL #2\n"
- "ld1w { z24.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- "ld1w { z25.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- "ld1w { z26.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x19, x13, %x[ld_in_row], LSL #2\n"
+ "ld1w { z23.s }, p1/Z, [x13]\n"
+ "add x13, x13, %x[ld_in_col], LSL #2\n"
+ "ld1w { z24.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ "ld1w { z25.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ "ld1w { z26.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0xc1301ae0 // fmla za.s[x8, 0], { z23.s-z26.s }, z0.s\n"
- "ld1w { z27.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z27.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0xc1361b00 // fmla za.s[x8, 0], { z24.s-z27.s }, z6.s\n"
- "ld1w { z28.s }, p1/Z, [x20]\n"
+ "ld1w { z28.s }, p1/Z, [x19]\n"
".inst 0xc1321b20 // fmla za.s[x8, 0], { z25.s-z28.s }, z2.s\n"
"7:" // Unpadded: 1 priming loads
- "add x20, x14, %x[ld_in_row], LSL #2\n"
- "ld1w { z23.s }, p1/Z, [x14]\n"
- "add x14, x14, %x[ld_in_col], LSL #2\n"
- "ld1w { z24.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- "ld1w { z25.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- "ld1w { z26.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x19, x13, %x[ld_in_row], LSL #2\n"
+ "ld1w { z23.s }, p1/Z, [x13]\n"
+ "add x13, x13, %x[ld_in_col], LSL #2\n"
+ "ld1w { z24.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ "ld1w { z25.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ "ld1w { z26.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0xc1381ae0 // fmla za.s[x8, 0], { z23.s-z26.s }, z8.s\n"
".inst 0xc1301ae1 // fmla za.s[x8, 1], { z23.s-z26.s }, z0.s\n"
- "ld1w { z27.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z27.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0xc1371b00 // fmla za.s[x8, 0], { z24.s-z27.s }, z7.s\n"
- "ld1w { z28.s }, p1/Z, [x20]\n"
+ "ld1w { z28.s }, p1/Z, [x19]\n"
".inst 0xc1361b01 // fmla za.s[x8, 1], { z24.s-z27.s }, z6.s\n"
".inst 0xc13a1b20 // fmla za.s[x8, 0], { z25.s-z28.s }, z10.s\n"
".inst 0xc1321b21 // fmla za.s[x8, 1], { z25.s-z28.s }, z2.s\n"
"8:" // Unpadded: 0 priming loads
- "cbz x15, 16f\n"
- "add x20, x14, %x[ld_in_row], LSL #2\n"
- "ld1w { z23.s }, p1/Z, [x14]\n"
- "sub x15, x15, #0x1\n"
- "ld1w { z24.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- "sub x13, x13, #0x1\n"
- "ld1w { z25.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- "cmp x15, x13\n"
- "ld1w { z26.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- "csel x21, x15, x13, LT\n"
- "ld1w { z27.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- "add x14, x14, %x[ld_in_col], LSL #2\n"
- "ld1w { z28.s }, p1/Z, [x20]\n"
- "sub x13, x13, x21\n"
- "cbz x21, 15f\n"
+ "cbz x14, 16f\n"
+ "add x19, x13, %x[ld_in_row], LSL #2\n"
+ "ld1w { z23.s }, p1/Z, [x13]\n"
+ "sub x14, x14, #0x1\n"
+ "ld1w { z24.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ "sub x11, x11, #0x1\n"
+ "ld1w { z25.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ "cmp x14, x11\n"
+ "ld1w { z26.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ "csel x20, x14, x11, LT\n"
+ "ld1w { z27.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ "add x13, x13, %x[ld_in_col], LSL #2\n"
+ "ld1w { z28.s }, p1/Z, [x19]\n"
+ "sub x11, x11, x20\n"
+ "cbz x20, 15f\n"
"9:" // Unpadded: Main loop
".inst 0xc1331ae0 // fmla za.s[x8, 0], { z23.s-z26.s }, z3.s\n"
- "add x20, x14, %x[ld_in_row], LSL #2\n"
- "subs x21, x21, #0x1\n"
+ "add x19, x13, %x[ld_in_row], LSL #2\n"
+ "subs x20, x20, #0x1\n"
".inst 0xc1391b00 // fmla za.s[x8, 0], { z24.s-z27.s }, z9.s\n"
".inst 0xc1381ae1 // fmla za.s[x8, 1], { z23.s-z26.s }, z8.s\n"
".inst 0xc1301ae2 // fmla za.s[x8, 2], { z23.s-z26.s }, z0.s\n"
- "ld1w { z23.s }, p1/Z, [x14]\n"
- "add x14, x14, %x[ld_in_col], LSL #2\n"
+ "ld1w { z23.s }, p1/Z, [x13]\n"
+ "add x13, x13, %x[ld_in_col], LSL #2\n"
".inst 0xc1311b20 // fmla za.s[x8, 0], { z25.s-z28.s }, z1.s\n"
".inst 0xc1371b01 // fmla za.s[x8, 1], { z24.s-z27.s }, z7.s\n"
".inst 0xc1361b02 // fmla za.s[x8, 2], { z24.s-z27.s }, z6.s\n"
- "ld1w { z24.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z24.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0xc0060c0c // mova { z12.d-z15.d }, za.d[x8, #0]\n"
".inst 0xc1abc8ac // fclamp { z12.s-z15.s }, z5.s, z11.s\n"
- "st1w { z12.s }, p1, [x9]\n"
- "add x9, x9, x27, LSL #2\n"
- ".inst 0xc13a1b21 // fmla za.s[x8, 1], { z25.s-z28.s }, z10.s\n"
- "st1w { z13.s }, p1, [x28]\n"
+ "st1w { z12.s }, p1, [x28]\n"
"add x28, x28, x26, LSL #2\n"
+ ".inst 0xc13a1b21 // fmla za.s[x8, 1], { z25.s-z28.s }, z10.s\n"
+ "st1w { z13.s }, p1, [x27]\n"
+ "add x27, x27, x25, LSL #2\n"
".inst 0xc1321b22 // fmla za.s[x8, 2], { z25.s-z28.s }, z2.s\n"
- "ld1w { z25.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z25.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
"add x8, x8, #0x1\n"
- "ld1w { z26.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- "st1w { z14.s }, p1, [x25]\n"
- "add x25, x25, x23, LSL #2\n"
- "ld1w { z27.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- "st1w { z15.s }, p1, [x24]\n"
+ "ld1w { z26.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ "st1w { z14.s }, p1, [x24]\n"
"add x24, x24, x22, LSL #2\n"
+ "ld1w { z27.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ "st1w { z15.s }, p1, [x23]\n"
+ "add x23, x23, x21, LSL #2\n"
".inst 0xc0040e02 // mova za.d[x8, #2], { z16.d-z19.d }\n"
- "ld1w { z28.s }, p1/Z, [x20]\n"
+ "ld1w { z28.s }, p1/Z, [x19]\n"
"bgt 9b\n"
"b 15f\n"
"10:" // Padded
- "cbz x10, 13f\n"
- "cmp x10, #0x1\n"
- "sub x15, x15, x10\n"
+ "cbz x9, 13f\n"
+ "cmp x9, #0x1\n"
+ "sub x14, x14, x9\n"
"beq 12f\n"
"11:" // Padded: 2 priming loads
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z23.s }, p0/Z, [x14]\n"
- "add x20, x14, %x[ld_in_row], LSL #2\n"
+ "ld1w { z23.s }, p0/Z, [x13]\n"
+ "add x19, x13, %x[ld_in_row], LSL #2\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z24.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z24.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z25.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z25.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1w { z26.s }, p0/Z, [x20]\n"
+ "ld1w { z26.s }, p0/Z, [x19]\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0xc1301ae0 // fmla za.s[x8, 0], { z23.s-z26.s }, z0.s\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z27.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z27.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0xc1361b00 // fmla za.s[x8, 0], { z24.s-z27.s }, z6.s\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z28.s }, p0/Z, [x20]\n"
- "add x14, x14, %x[ld_in_col], LSL #2\n"
+ "ld1w { z28.s }, p0/Z, [x19]\n"
+ "add x13, x13, %x[ld_in_col], LSL #2\n"
".inst 0xc1321b20 // fmla za.s[x8, 0], { z25.s-z28.s }, z2.s\n"
"12:" // Padded: 1 priming loads
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z23.s }, p0/Z, [x14]\n"
- "add x20, x14, %x[ld_in_row], LSL #2\n"
+ "ld1w { z23.s }, p0/Z, [x13]\n"
+ "add x19, x13, %x[ld_in_row], LSL #2\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z24.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z24.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z25.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z25.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1w { z26.s }, p0/Z, [x20]\n"
+ "ld1w { z26.s }, p0/Z, [x19]\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0xc1381ae0 // fmla za.s[x8, 0], { z23.s-z26.s }, z8.s\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
".inst 0xc1301ae1 // fmla za.s[x8, 1], { z23.s-z26.s }, z0.s\n"
- "ld1w { z27.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z27.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
".inst 0xc1371b00 // fmla za.s[x8, 0], { z24.s-z27.s }, z7.s\n"
- "ld1w { z28.s }, p0/Z, [x20]\n"
- "add x14, x14, %x[ld_in_col], LSL #2\n"
+ "ld1w { z28.s }, p0/Z, [x19]\n"
+ "add x13, x13, %x[ld_in_col], LSL #2\n"
".inst 0xc1361b01 // fmla za.s[x8, 1], { z24.s-z27.s }, z6.s\n"
".inst 0xc13a1b20 // fmla za.s[x8, 0], { z25.s-z28.s }, z10.s\n"
".inst 0xc1321b21 // fmla za.s[x8, 1], { z25.s-z28.s }, z2.s\n"
"13:" // Padded: 0 priming loads
- "cbz x15, 16f\n"
+ "cbz x14, 16f\n"
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z23.s }, p0/Z, [x14]\n"
- "add x20, x14, %x[ld_in_row], LSL #2\n"
+ "ld1w { z23.s }, p0/Z, [x13]\n"
+ "add x19, x13, %x[ld_in_row], LSL #2\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z24.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z24.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z25.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z25.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1w { z26.s }, p0/Z, [x20]\n"
+ "ld1w { z26.s }, p0/Z, [x19]\n"
"mov x12, #0x4\n"
- "sub x15, x15, #0x1\n"
- "sub x13, x13, #0x1\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "sub x14, x14, #0x1\n"
+ "sub x11, x11, #0x1\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "cmp x15, x13\n"
- "ld1w { z27.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "cmp x14, x11\n"
+ "ld1w { z27.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z28.s }, p0/Z, [x20]\n"
- "csel x21, x15, x13, LT\n"
- "add x14, x14, %x[ld_in_col], LSL #2\n"
- "sub x13, x13, x21\n"
- "cbz x21, 15f\n"
+ "ld1w { z28.s }, p0/Z, [x19]\n"
+ "csel x20, x14, x11, LT\n"
+ "add x13, x13, %x[ld_in_col], LSL #2\n"
+ "sub x11, x11, x20\n"
+ "cbz x20, 15f\n"
"14:" // Padded: Main loop
".inst 0xc1331ae0 // fmla za.s[x8, 0], { z23.s-z26.s }, z3.s\n"
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
".inst 0xc1391b00 // fmla za.s[x8, 0], { z24.s-z27.s }, z9.s\n"
- "add x20, x14, %x[ld_in_row], LSL #2\n"
- "subs x21, x21, #0x1\n"
+ "add x19, x13, %x[ld_in_row], LSL #2\n"
+ "subs x20, x20, #0x1\n"
".inst 0xc1381ae1 // fmla za.s[x8, 1], { z23.s-z26.s }, z8.s\n"
".inst 0xc1301ae2 // fmla za.s[x8, 2], { z23.s-z26.s }, z0.s\n"
- "ld1w { z23.s }, p0/Z, [x14]\n"
+ "ld1w { z23.s }, p0/Z, [x13]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "add x14, x14, %x[ld_in_col], LSL #2\n"
+ "add x13, x13, %x[ld_in_col], LSL #2\n"
".inst 0xc1311b20 // fmla za.s[x8, 0], { z25.s-z28.s }, z1.s\n"
".inst 0xc1371b01 // fmla za.s[x8, 1], { z24.s-z27.s }, z7.s\n"
".inst 0xc1361b02 // fmla za.s[x8, 2], { z24.s-z27.s }, z6.s\n"
- "ld1w { z24.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z24.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
".inst 0xc0060c0c // mova { z12.d-z15.d }, za.d[x8, #0]\n"
".inst 0xc1abc8ac // fclamp { z12.s-z15.s }, z5.s, z11.s\n"
- "st1w { z12.s }, p1, [x9]\n"
- "add x9, x9, x27, LSL #2\n"
- ".inst 0xc13a1b21 // fmla za.s[x8, 1], { z25.s-z28.s }, z10.s\n"
- "st1w { z13.s }, p1, [x28]\n"
+ "st1w { z12.s }, p1, [x28]\n"
"add x28, x28, x26, LSL #2\n"
+ ".inst 0xc13a1b21 // fmla za.s[x8, 1], { z25.s-z28.s }, z10.s\n"
+ "st1w { z13.s }, p1, [x27]\n"
+ "add x27, x27, x25, LSL #2\n"
".inst 0xc1321b22 // fmla za.s[x8, 2], { z25.s-z28.s }, z2.s\n"
- "ld1w { z25.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z25.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
"mov x12, #0x4\n"
- "ld1w { z26.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- "st1w { z14.s }, p1, [x25]\n"
+ "ld1w { z26.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ "st1w { z14.s }, p1, [x24]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
"add x8, x8, #0x1\n"
- "ld1w { z27.s }, p0/Z, [x20]\n"
- "st1w { z15.s }, p1, [x24]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z27.s }, p0/Z, [x19]\n"
+ "st1w { z15.s }, p1, [x23]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
".inst 0xc0040e02 // mova za.d[x8, #2], { z16.d-z19.d }\n"
- "ld1w { z28.s }, p0/Z, [x20]\n"
- "add x25, x25, x23, LSL #2\n"
+ "ld1w { z28.s }, p0/Z, [x19]\n"
"add x24, x24, x22, LSL #2\n"
+ "add x23, x23, x21, LSL #2\n"
"bgt 14b\n"
"15:" // Main loop tail
".inst 0xc1331ae0 // fmla za.s[x8, 0], { z23.s-z26.s }, z3.s\n"
@@ -390,62 +390,62 @@ void sme2_fp32_planar_3x3_s1_4rows_mla_za_impl(
".inst 0xc1361b02 // fmla za.s[x8, 2], { z24.s-z27.s }, z6.s\n"
".inst 0xc0060c0c // mova { z12.d-z15.d }, za.d[x8, #0]\n"
".inst 0xc1abc8ac // fclamp { z12.s-z15.s }, z5.s, z11.s\n"
- "st1w { z12.s }, p1, [x9]\n"
- "add x9, x9, x27, LSL #2\n"
- ".inst 0xc13a1b21 // fmla za.s[x8, 1], { z25.s-z28.s }, z10.s\n"
- "st1w { z13.s }, p1, [x28]\n"
+ "st1w { z12.s }, p1, [x28]\n"
"add x28, x28, x26, LSL #2\n"
+ ".inst 0xc13a1b21 // fmla za.s[x8, 1], { z25.s-z28.s }, z10.s\n"
+ "st1w { z13.s }, p1, [x27]\n"
+ "add x27, x27, x25, LSL #2\n"
".inst 0xc1321b22 // fmla za.s[x8, 2], { z25.s-z28.s }, z2.s\n"
"add x8, x8, #0x1\n"
- "st1w { z14.s }, p1, [x25]\n"
- "add x25, x25, x23, LSL #2\n"
- "st1w { z15.s }, p1, [x24]\n"
+ "st1w { z14.s }, p1, [x24]\n"
"add x24, x24, x22, LSL #2\n"
+ "st1w { z15.s }, p1, [x23]\n"
+ "add x23, x23, x21, LSL #2\n"
".inst 0xc0040e02 // mova za.d[x8, #2], { z16.d-z19.d }\n"
"16:" // Main loop skip tail
- "cbz x13, 18f\n"
+ "cbz x11, 18f\n"
"17:" // Right padding loop
".inst 0xc0060c0c // mova { z12.d-z15.d }, za.d[x8, #0]\n"
"add x8, x8, #0x1\n"
- "subs x13, x13, #0x1\n"
+ "subs x11, x11, #0x1\n"
".inst 0xc1abc8ac // fclamp { z12.s-z15.s }, z5.s, z11.s\n"
- "st1w { z12.s }, p1, [x9]\n"
- "add x9, x9, x27, LSL #2\n"
- ".inst 0xc0040e02 // mova za.d[x8, #2], { z16.d-z19.d }\n"
- "st1w { z13.s }, p1, [x28]\n"
+ "st1w { z12.s }, p1, [x28]\n"
"add x28, x28, x26, LSL #2\n"
- "st1w { z14.s }, p1, [x25]\n"
- "add x25, x25, x23, LSL #2\n"
- "st1w { z15.s }, p1, [x24]\n"
+ ".inst 0xc0040e02 // mova za.d[x8, #2], { z16.d-z19.d }\n"
+ "st1w { z13.s }, p1, [x27]\n"
+ "add x27, x27, x25, LSL #2\n"
+ "st1w { z14.s }, p1, [x24]\n"
"add x24, x24, x22, LSL #2\n"
+ "st1w { z15.s }, p1, [x23]\n"
+ "add x23, x23, x21, LSL #2\n"
"bgt 17b\n"
"18:" // End
- "ldr x23, [%x[args], %[offsetof_Args_weights]]\n"
- "incb x23, ALL, MUL #9\n"
- "str x23, [%x[args], %[offsetof_Args_weights]]\n"
- "incw x16\n"
- "ldr x20, [%x[args], %[offsetof_Args_ld_in_vl]]\n"
- "whilelt p1.s, x16, x17\n"
- "ldr x14, [%x[args], %[offsetof_Args_inptr]]\n"
- "add x14, x14, x20, LSL #2\n"
- "str x14, [%x[args], %[offsetof_Args_inptr]]\n"
- "ldr x11, [%x[args], %[offsetof_Args_outptrs]]\n"
- "ldr x24, [%x[args], %[offsetof_Args_ld_out_vls]]\n"
- "ldp x23, x22, [x11, #0x0]\n"
- "ldp x21, x20, [x24, #0x0]\n"
- "add x23, x23, x21, LSL #2\n"
+ "ldr x22, [%x[args], %[offsetof_Args_weights]]\n"
+ "incb x22, ALL, MUL #9\n"
+ "str x22, [%x[args], %[offsetof_Args_weights]]\n"
+ "incw x15\n"
+ "ldr x19, [%x[args], %[offsetof_Args_ld_in_vl]]\n"
+ "whilelt p1.s, x15, x16\n"
+ "ldr x13, [%x[args], %[offsetof_Args_inptr]]\n"
+ "add x13, x13, x19, LSL #2\n"
+ "str x13, [%x[args], %[offsetof_Args_inptr]]\n"
+ "ldr x10, [%x[args], %[offsetof_Args_outptrs]]\n"
+ "ldr x23, [%x[args], %[offsetof_Args_ld_out_vls]]\n"
+ "ldp x22, x21, [x10, #0x0]\n"
+ "ldp x20, x19, [x23, #0x0]\n"
"add x22, x22, x20, LSL #2\n"
- "stp x23, x22, [x11, #0x0]\n"
- "ldp x23, x22, [x11, #0x10]\n"
- "ldp x21, x20, [x24, #0x10]\n"
- "add x23, x23, x21, LSL #2\n"
+ "add x21, x21, x19, LSL #2\n"
+ "stp x22, x21, [x10, #0x0]\n"
+ "ldp x22, x21, [x10, #0x10]\n"
+ "ldp x20, x19, [x23, #0x10]\n"
"add x22, x22, x20, LSL #2\n"
- "stp x23, x22, [x11, #0x10]\n"
+ "add x21, x21, x19, LSL #2\n"
+ "stp x22, x21, [x10, #0x10]\n"
"b.any 1b\n"
".inst 0xd503467f // SMSTOP\n"
:
: [args] "r" (&args), [ld_in_col] "r" (ld_in_col), [ld_in_row] "r" (ld_in_row), [offsetof_Args_bias] "I" (offsetof(Args, bias)), [offsetof_Args_clamp_max] "I" (offsetof(Args, clamp_max)), [offsetof_Args_clamp_min] "I" (offsetof(Args, clamp_min)), [offsetof_Args_current_channel] "I" (offsetof(Args, current_channel)), [offsetof_Args_inptr] "I" (offsetof(Args, inptr)), [offsetof_Args_input_cols] "I" (offsetof(Args, input_cols)), [offsetof_Args_ld_in_vl] "I" (offsetof(Args, ld_in_vl)), [offsetof_Args_ld_out_cols] "I" (offsetof(Args, ld_out_cols)), [offsetof_Args_ld_out_vls] "I" (offsetof(Args, ld_out_vls)), [offsetof_Args_n_channels] "I" (offsetof(Args, n_channels)), [offsetof_Args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_Args_output_cols] "I" (offsetof(Args, output_cols)), [offsetof_Args_pad_bottom] "I" (offsetof(Args, pad_bottom)), [offsetof_Args_pad_left] "I" (offsetof(Args, pad_left)), [offsetof_Args_pad_top] "I" (offsetof(Args, pad_top)), [offsetof_Args_weights] "I" (offsetof(Args, weights))
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_planar_3x3_s2_4rows_mla_za/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_planar_3x3_s2_4rows_mla_za/generic.cpp
index 9f6b09ef88..289803ce8c 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_planar_3x3_s2_4rows_mla_za/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_planar_3x3_s2_4rows_mla_za/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -69,578 +69,578 @@ void sme2_fp32_planar_3x3_s2_4rows_mla_za_impl(
Args args = { inptr, ld_in_vl, pad_top, 9u - std::min(9u, pad_top + valid_input_rows), pad_left, weights, bias, valid_input_cols, output_cols, outptrs, outlds, outvllds, start_channel, valid_channels, act_min, act_max };
__asm__ __volatile__(
- "ldr x6, [%x[args], %[offsetof_Args_pad_bottom]]\n"
- "mov x20, #0x9\n"
+ "ldr x7, [%x[args], %[offsetof_Args_pad_bottom]]\n"
+ "mov x19, #0x9\n"
".inst 0xd503477f // SMSTART ZA\n"
- "sub x20, x20, x6\n"
- "ldr x7, [%x[args], %[offsetof_Args_pad_top]]\n"
+ "sub x19, x19, x7\n"
+ "ldr x17, [%x[args], %[offsetof_Args_pad_top]]\n"
"ptrue p2.b\n"
".inst 0x25207812 // ptrue pn10.b\n"
"ld1rw { z28.s }, p2/Z, [%x[args], %[offsetof_Args_clamp_min]]\n"
- "ldr x17, [%x[args], %[offsetof_Args_n_channels]]\n"
- "whilelt p1.s, XZR, x17\n"
- "whilelt p9.s, XZR, x20\n"
+ "ldr x16, [%x[args], %[offsetof_Args_n_channels]]\n"
+ "whilelt p1.s, XZR, x16\n"
+ "whilelt p9.s, XZR, x19\n"
"ld1rw { z19.s }, p2/Z, [%x[args], %[offsetof_Args_clamp_max]]\n"
- "whilelt p8.s, XZR, x7\n"
+ "whilelt p8.s, XZR, x17\n"
"eor p8.b, p2/Z, p8.b, p9.b\n"
- "ldr x16, [%x[args], %[offsetof_Args_current_channel]]\n"
+ "ldr x15, [%x[args], %[offsetof_Args_current_channel]]\n"
"1:" // Channel loop
- "ldr x20, [%x[args], %[offsetof_Args_bias]]\n"
+ "ldr x19, [%x[args], %[offsetof_Args_bias]]\n"
"fmov z24.s, #0x0\n"
- "cbz x20, 2f\n"
- "ld1w { z24.s }, p1/Z, [x20, x16, LSL #2]\n"
+ "cbz x19, 2f\n"
+ "ld1w { z24.s }, p1/Z, [x19, x15, LSL #2]\n"
"2:" // Load bias: Done
- "ldr x15, [%x[args], %[offsetof_Args_input_cols]]\n"
- "sub x20, x15, #0x1\n"
- "orr x24, x20, %x[ld_in_col], LSL #18\n"
+ "ldr x14, [%x[args], %[offsetof_Args_input_cols]]\n"
+ "sub x19, x14, #0x1\n"
+ "orr x23, x19, %x[ld_in_col], LSL #18\n"
"mov z25.d, z24.d\n"
- "ldr x23, [%x[args], %[offsetof_Args_weights]]\n"
- ".inst 0xa0404ae2 // ld1w { z2.s-z3.s }, pn10.b/Z, [x23]\n"
- "orr x24, x17, x24, LSL #20\n"
- "mov x22, #0x9\n"
- "ldr x14, [%x[args], %[offsetof_Args_inptr]]\n"
- "ld1w { z7.s }, p2/Z, [x23, #2, MUL VL]\n"
- "addvl x23, x23, #3\n"
- "add x21, x7, x6\n"
- ".inst 0xa0404ae4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x23]\n"
- "lsl x20, %x[ld_in_row], #0x2\n"
+ "ldr x22, [%x[args], %[offsetof_Args_weights]]\n"
+ ".inst 0xa0404ac2 // ld1w { z2.s-z3.s }, pn10.b/Z, [x22]\n"
+ "orr x23, x16, x23, LSL #20\n"
+ "mov x21, #0x9\n"
+ "ldr x13, [%x[args], %[offsetof_Args_inptr]]\n"
+ "ld1w { z7.s }, p2/Z, [x22, #2, MUL VL]\n"
+ "addvl x22, x22, #3\n"
+ "add x20, x17, x7\n"
+ ".inst 0xa0404ac4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x22]\n"
+ "lsl x19, %x[ld_in_row], #0x2\n"
"mov z26.d, z24.d\n"
"mov z27.d, z24.d\n"
- "ld1w { z6.s }, p2/Z, [x23, #2, MUL VL]\n"
- "addvl x23, x23, #3\n"
+ "ld1w { z6.s }, p2/Z, [x22, #2, MUL VL]\n"
+ "addvl x22, x22, #3\n"
"mov x8, #0x0\n"
- "ldr x13, [%x[args], %[offsetof_Args_output_cols]]\n"
- ".inst 0xa1404ae1 // ld1w { z1.s, z9.s }, pn10.b/Z, [x23]\n"
- "lsl x24, x24, #0x2\n"
- "sub x22, x22, x21\n"
- "ld1w { z8.s }, p2/Z, [x23, #2, MUL VL]\n"
- "madd x20, x20, x7, x14\n"
+ "ldr x11, [%x[args], %[offsetof_Args_output_cols]]\n"
+ ".inst 0xa1404ac1 // ld1w { z1.s, z9.s }, pn10.b/Z, [x22]\n"
+ "lsl x23, x23, #0x2\n"
+ "sub x21, x21, x20\n"
+ "ld1w { z8.s }, p2/Z, [x22, #2, MUL VL]\n"
+ "madd x19, x19, x17, x13\n"
"3:" // Issue prefetches
- "subs x22, x22, #0x1\n"
- ".inst 0xf8b84a9c // rprfm pldstrm, x24, [x20]\n"
- "add x20, x20, %x[ld_in_col], LSL #2\n"
+ "subs x21, x21, #0x1\n"
+ ".inst 0xf8b74a7c // rprfm pldstrm, x23, [x19]\n"
+ "add x19, x19, %x[ld_in_col], LSL #2\n"
"bgt 3b\n"
- "ldr x11, [%x[args], %[offsetof_Args_outptrs]]\n"
- "lsl x20, %x[ld_in_row], #0x2\n"
- "msub x14, x7, x20, x14\n"
+ "ldr x10, [%x[args], %[offsetof_Args_outptrs]]\n"
+ "lsl x19, %x[ld_in_row], #0x2\n"
+ "msub x13, x17, x19, x13\n"
".inst 0xc0040f00 // mova za.d[x8, #0], { z24.d-z27.d }\n"
- "ldr x20, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
+ "ldr x19, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
".inst 0xc0040f01 // mova za.d[x8, #1], { z24.d-z27.d }\n"
- "mov x22, #0x2\n"
- "ldp x10, x9, [x11], #0x10\n"
+ "mov x21, #0x2\n"
+ "ldp x9, x28, [x10], #0x10\n"
".inst 0xc0040f02 // mova za.d[x8, #2], { z24.d-z27.d }\n"
- "ldp x28, x27, [x20], #0x10\n"
- "ldr x21, [%x[args], %[offsetof_Args_pad_left]]\n"
- "ldp x26, x25, [x11], #0x10\n"
- "ldp x24, x23, [x20], #0x10\n"
- "cbz x21, 5f\n"
- "cmp x21, x22\n"
- "csel x20, x21, x22, LT\n"
- "sub x21, x21, x20\n"
- "sub x22, x22, x20\n"
- "cbz x21, 5f\n"
+ "ldp x27, x26, [x19], #0x10\n"
+ "ldr x20, [%x[args], %[offsetof_Args_pad_left]]\n"
+ "ldp x25, x24, [x10], #0x10\n"
+ "ldp x23, x22, [x19], #0x10\n"
+ "cbz x20, 5f\n"
+ "cmp x20, x21\n"
+ "csel x19, x20, x21, LT\n"
+ "sub x20, x20, x19\n"
+ "sub x21, x21, x19\n"
+ "cbz x20, 5f\n"
".inst 0xc0060c14 // mova { z20.d-z23.d }, za.d[x8, #0]\n"
- "and x22, x21, #0x1\n"
- "add x21, x21, #0x1\n"
+ "and x21, x20, #0x1\n"
+ "add x20, x20, #0x1\n"
".inst 0xc1b3cb94 // fclamp { z20.s-z23.s }, z28.s, z19.s\n"
- "lsr x21, x21, #0x1\n"
- "sub x13, x13, x21\n"
+ "lsr x20, x20, #0x1\n"
+ "sub x11, x11, x20\n"
"4:" // Left padding
- "subs x21, x21, #0x1\n"
- "st1w { z20.s }, p1, [x10]\n"
- "add x10, x10, x28, LSL #2\n"
- "st1w { z21.s }, p1, [x9]\n"
+ "subs x20, x20, #0x1\n"
+ "st1w { z20.s }, p1, [x9]\n"
"add x9, x9, x27, LSL #2\n"
- "st1w { z22.s }, p1, [x26]\n"
- "add x26, x26, x24, LSL #2\n"
- "st1w { z23.s }, p1, [x25]\n"
+ "st1w { z21.s }, p1, [x28]\n"
+ "add x28, x28, x26, LSL #2\n"
+ "st1w { z22.s }, p1, [x25]\n"
"add x25, x25, x23, LSL #2\n"
+ "st1w { z23.s }, p1, [x24]\n"
+ "add x24, x24, x22, LSL #2\n"
"bgt 4b\n"
"5:" // Left padding: End
- "adds XZR, x7, x6\n"
+ "adds XZR, x17, x7\n"
"bne 10f\n"
- "cbz x22, 8f\n"
- "cmp x22, #0x1\n"
- "sub x15, x15, x22\n"
+ "cbz x21, 8f\n"
+ "cmp x21, #0x1\n"
+ "sub x14, x14, x21\n"
"beq 7f\n"
"6:" // Unpadded: 2 priming loads
- "add x20, x14, %x[ld_in_row], LSL #2\n"
- "ld1w { z12.s }, p1/Z, [x14]\n"
- "add x14, x14, %x[ld_in_col], LSL #2\n"
- "ld1w { z29.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- "ld1w { z13.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- "ld1w { z30.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- "ld1w { z14.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- "ld1w { z31.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- "ld1w { z15.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x19, x13, %x[ld_in_row], LSL #2\n"
+ "ld1w { z12.s }, p1/Z, [x13]\n"
+ "add x13, x13, %x[ld_in_col], LSL #2\n"
+ "ld1w { z29.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ "ld1w { z13.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ "ld1w { z30.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ "ld1w { z14.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ "ld1w { z31.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ "ld1w { z15.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0xc1321980 // fmla za.s[x8, 0], { z12.s-z15.s }, z2.s\n"
- "ld1w { z0.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z0.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0xc1341ba0 // fmla za.s[x8, 0], { z29.s-z0.s }, z4.s\n"
- "ld1w { z16.s }, p1/Z, [x20]\n"
+ "ld1w { z16.s }, p1/Z, [x19]\n"
".inst 0xc13119a0 // fmla za.s[x8, 0], { z13.s-z16.s }, z1.s\n"
"7:" // Unpadded: 1 priming loads
- "add x20, x14, %x[ld_in_row], LSL #2\n"
- "ld1w { z12.s }, p1/Z, [x14]\n"
- "add x14, x14, %x[ld_in_col], LSL #2\n"
+ "add x19, x13, %x[ld_in_row], LSL #2\n"
+ "ld1w { z12.s }, p1/Z, [x13]\n"
+ "add x13, x13, %x[ld_in_col], LSL #2\n"
+ "ld1w { z29.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ "ld1w { z13.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ "ld1w { z30.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ "ld1w { z14.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ "ld1w { z31.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ "ld1w { z15.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc1331980 // fmla za.s[x8, 0], { z12.s-z15.s }, z3.s\n"
+ "ld1w { z0.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc1351ba0 // fmla za.s[x8, 0], { z29.s-z0.s }, z5.s\n"
+ "ld1w { z16.s }, p1/Z, [x19]\n"
+ ".inst 0xc13919a0 // fmla za.s[x8, 0], { z13.s-z16.s }, z9.s\n"
+ "8:" // Unpadded: 0 priming loads
+ "cmp x14, #0x2\n"
+ "blt 16f\n"
+ "add x20, x13, %x[ld_in_row], LSL #2\n"
+ "ld1w { z12.s }, p1/Z, [x13]\n"
+ "sub x14, x14, #0x2\n"
"ld1w { z29.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ "sub x11, x11, #0x1\n"
"ld1w { z13.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ "lsr x19, x14, #0x1\n"
"ld1w { z30.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ "cmp x19, x11\n"
"ld1w { z14.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ "csel x21, x19, x11, LT\n"
"ld1w { z31.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x13, x13, %x[ld_in_col], LSL #2\n"
"ld1w { z15.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xc1331980 // fmla za.s[x8, 0], { z12.s-z15.s }, z3.s\n"
+ "and x14, x14, #0x1\n"
"ld1w { z0.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xc1351ba0 // fmla za.s[x8, 0], { z29.s-z0.s }, z5.s\n"
+ "sub x11, x11, x21\n"
"ld1w { z16.s }, p1/Z, [x20]\n"
- ".inst 0xc13919a0 // fmla za.s[x8, 0], { z13.s-z16.s }, z9.s\n"
- "8:" // Unpadded: 0 priming loads
- "cmp x15, #0x2\n"
- "blt 16f\n"
- "add x21, x14, %x[ld_in_row], LSL #2\n"
- "ld1w { z12.s }, p1/Z, [x14]\n"
- "sub x15, x15, #0x2\n"
- "ld1w { z29.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
- "sub x13, x13, #0x1\n"
- "ld1w { z13.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
- "lsr x20, x15, #0x1\n"
- "ld1w { z30.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
- "cmp x20, x13\n"
- "ld1w { z14.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
- "csel x22, x20, x13, LT\n"
- "ld1w { z31.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
- "add x14, x14, %x[ld_in_col], LSL #2\n"
- "ld1w { z15.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
- "and x15, x15, #0x1\n"
- "ld1w { z0.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
- "sub x13, x13, x22\n"
- "ld1w { z16.s }, p1/Z, [x21]\n"
- "cbz x22, 15f\n"
+ "cbz x21, 15f\n"
"9:" // Unpadded: Main loop
".inst 0xc1371980 // fmla za.s[x8, 0], { z12.s-z15.s }, z7.s\n"
- "add x21, x14, %x[ld_in_row], LSL #2\n"
- "subs x22, x22, #0x1\n"
+ "add x20, x13, %x[ld_in_row], LSL #2\n"
+ "subs x21, x21, #0x1\n"
".inst 0xc1321981 // fmla za.s[x8, 1], { z12.s-z15.s }, z2.s\n"
- "ld1w { z12.s }, p1/Z, [x14]\n"
- "add x14, x14, %x[ld_in_col], LSL #2\n"
- "add x20, x14, %x[ld_in_row], LSL #2\n"
+ "ld1w { z12.s }, p1/Z, [x13]\n"
+ "add x13, x13, %x[ld_in_col], LSL #2\n"
+ "add x19, x13, %x[ld_in_row], LSL #2\n"
".inst 0xc1361ba0 // fmla za.s[x8, 0], { z29.s-z0.s }, z6.s\n"
".inst 0xc1341ba1 // fmla za.s[x8, 1], { z29.s-z0.s }, z4.s\n"
- "ld1w { z29.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
- ".inst 0xc13819a0 // fmla za.s[x8, 0], { z13.s-z16.s }, z8.s\n"
- ".inst 0xc13119a1 // fmla za.s[x8, 1], { z13.s-z16.s }, z1.s\n"
- "ld1w { z13.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
- "ld1w { z30.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
- ".inst 0xc0060c14 // mova { z20.d-z23.d }, za.d[x8, #0]\n"
- "add x8, x8, #0x1\n"
- "ld1w { z14.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
- ".inst 0xc1b3cb94 // fclamp { z20.s-z23.s }, z28.s, z19.s\n"
- "st1w { z20.s }, p1, [x10]\n"
- "ld1w { z31.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
- "add x10, x10, x28, LSL #2\n"
- "st1w { z21.s }, p1, [x9]\n"
- "ld1w { z15.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
- ".inst 0xc1331980 // fmla za.s[x8, 0], { z12.s-z15.s }, z3.s\n"
- "add x9, x9, x27, LSL #2\n"
- "ld1w { z0.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
- ".inst 0xc1351ba0 // fmla za.s[x8, 0], { z29.s-z0.s }, z5.s\n"
- "st1w { z22.s }, p1, [x26]\n"
- "ld1w { z16.s }, p1/Z, [x21]\n"
- ".inst 0xc13919a0 // fmla za.s[x8, 0], { z13.s-z16.s }, z9.s\n"
- "add x26, x26, x24, LSL #2\n"
- "st1w { z23.s }, p1, [x25]\n"
- "ld1w { z12.s }, p1/Z, [x14]\n"
- "add x25, x25, x23, LSL #2\n"
- ".inst 0xc0040f02 // mova za.d[x8, #2], { z24.d-z27.d }\n"
- "add x14, x14, %x[ld_in_col], LSL #2\n"
"ld1w { z29.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc13819a0 // fmla za.s[x8, 0], { z13.s-z16.s }, z8.s\n"
+ ".inst 0xc13119a1 // fmla za.s[x8, 1], { z13.s-z16.s }, z1.s\n"
"ld1w { z13.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
"ld1w { z30.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc0060c14 // mova { z20.d-z23.d }, za.d[x8, #0]\n"
+ "add x8, x8, #0x1\n"
"ld1w { z14.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc1b3cb94 // fclamp { z20.s-z23.s }, z28.s, z19.s\n"
+ "st1w { z20.s }, p1, [x9]\n"
"ld1w { z31.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x9, x9, x27, LSL #2\n"
+ "st1w { z21.s }, p1, [x28]\n"
"ld1w { z15.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc1331980 // fmla za.s[x8, 0], { z12.s-z15.s }, z3.s\n"
+ "add x28, x28, x26, LSL #2\n"
"ld1w { z0.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc1351ba0 // fmla za.s[x8, 0], { z29.s-z0.s }, z5.s\n"
+ "st1w { z22.s }, p1, [x25]\n"
"ld1w { z16.s }, p1/Z, [x20]\n"
+ ".inst 0xc13919a0 // fmla za.s[x8, 0], { z13.s-z16.s }, z9.s\n"
+ "add x25, x25, x23, LSL #2\n"
+ "st1w { z23.s }, p1, [x24]\n"
+ "ld1w { z12.s }, p1/Z, [x13]\n"
+ "add x24, x24, x22, LSL #2\n"
+ ".inst 0xc0040f02 // mova za.d[x8, #2], { z24.d-z27.d }\n"
+ "add x13, x13, %x[ld_in_col], LSL #2\n"
+ "ld1w { z29.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ "ld1w { z13.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ "ld1w { z30.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ "ld1w { z14.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ "ld1w { z31.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ "ld1w { z15.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ "ld1w { z0.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p1/Z, [x19]\n"
"bgt 9b\n"
"b 15f\n"
"10:" // Padded
- "cbz x22, 13f\n"
- "cmp x22, #0x1\n"
- "sub x15, x15, x22\n"
+ "cbz x21, 13f\n"
+ "cmp x21, #0x1\n"
+ "sub x14, x14, x21\n"
"beq 12f\n"
"11:" // Padded: 2 priming loads
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z12.s }, p0/Z, [x14]\n"
- "add x20, x14, %x[ld_in_row], LSL #2\n"
+ "ld1w { z12.s }, p0/Z, [x13]\n"
+ "add x19, x13, %x[ld_in_row], LSL #2\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z29.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z29.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z13.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z13.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1w { z30.s }, p0/Z, [x20]\n"
+ "ld1w { z30.s }, p0/Z, [x19]\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z14.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z14.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z31.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z31.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z15.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z15.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
"mov x12, #0x8\n"
".inst 0xc1321980 // fmla za.s[x8, 0], { z12.s-z15.s }, z2.s\n"
- "ld1w { z0.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z0.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
".inst 0xc1341ba0 // fmla za.s[x8, 0], { z29.s-z0.s }, z4.s\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
- "add x14, x14, %x[ld_in_col], LSL #2\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
+ "add x13, x13, %x[ld_in_col], LSL #2\n"
".inst 0xc13119a0 // fmla za.s[x8, 0], { z13.s-z16.s }, z1.s\n"
"12:" // Padded: 1 priming loads
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z12.s }, p0/Z, [x14]\n"
- "add x20, x14, %x[ld_in_row], LSL #2\n"
+ "ld1w { z12.s }, p0/Z, [x13]\n"
+ "add x19, x13, %x[ld_in_row], LSL #2\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z29.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z29.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z13.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z13.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1w { z30.s }, p0/Z, [x20]\n"
+ "ld1w { z30.s }, p0/Z, [x19]\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z14.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z14.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z31.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z31.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z15.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z15.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
"mov x12, #0x8\n"
".inst 0xc1331980 // fmla za.s[x8, 0], { z12.s-z15.s }, z3.s\n"
- "ld1w { z0.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z0.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
".inst 0xc1351ba0 // fmla za.s[x8, 0], { z29.s-z0.s }, z5.s\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
- "add x14, x14, %x[ld_in_col], LSL #2\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
+ "add x13, x13, %x[ld_in_col], LSL #2\n"
".inst 0xc13919a0 // fmla za.s[x8, 0], { z13.s-z16.s }, z9.s\n"
"13:" // Padded: 0 priming loads
- "cmp x15, #0x2\n"
+ "cmp x14, #0x2\n"
"blt 16f\n"
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z12.s }, p0/Z, [x14]\n"
- "add x21, x14, %x[ld_in_row], LSL #2\n"
+ "ld1w { z12.s }, p0/Z, [x13]\n"
+ "add x20, x13, %x[ld_in_row], LSL #2\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z29.s }, p0/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
+ "ld1w { z29.s }, p0/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z13.s }, p0/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
+ "ld1w { z13.s }, p0/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1w { z30.s }, p0/Z, [x21]\n"
+ "ld1w { z30.s }, p0/Z, [x20]\n"
"mov x12, #0x4\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z14.s }, p0/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
+ "ld1w { z14.s }, p0/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "sub x15, x15, #0x2\n"
- "ld1w { z31.s }, p0/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
+ "sub x14, x14, #0x2\n"
+ "ld1w { z31.s }, p0/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z15.s }, p0/Z, [x21]\n"
- "sub x13, x13, #0x1\n"
- "lsr x20, x15, #0x1\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
+ "ld1w { z15.s }, p0/Z, [x20]\n"
+ "sub x11, x11, #0x1\n"
+ "lsr x19, x14, #0x1\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1w { z0.s }, p0/Z, [x21]\n"
+ "ld1w { z0.s }, p0/Z, [x20]\n"
"mov x12, #0x8\n"
- "cmp x20, x13\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
+ "cmp x19, x11\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z16.s }, p0/Z, [x21]\n"
- "csel x22, x20, x13, LT\n"
- "add x14, x14, %x[ld_in_col], LSL #2\n"
- "and x15, x15, #0x1\n"
- "sub x13, x13, x22\n"
- "cbz x22, 15f\n"
+ "ld1w { z16.s }, p0/Z, [x20]\n"
+ "csel x21, x19, x11, LT\n"
+ "add x13, x13, %x[ld_in_col], LSL #2\n"
+ "and x14, x14, #0x1\n"
+ "sub x11, x11, x21\n"
+ "cbz x21, 15f\n"
"14:" // Padded: Main loop
".inst 0xc1371980 // fmla za.s[x8, 0], { z12.s-z15.s }, z7.s\n"
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
".inst 0xc1321981 // fmla za.s[x8, 1], { z12.s-z15.s }, z2.s\n"
- "ld1w { z12.s }, p0/Z, [x14]\n"
- "add x21, x14, %x[ld_in_row], LSL #2\n"
+ "ld1w { z12.s }, p0/Z, [x13]\n"
+ "add x20, x13, %x[ld_in_row], LSL #2\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
".inst 0xc1361ba0 // fmla za.s[x8, 0], { z29.s-z0.s }, z6.s\n"
- "add x14, x14, %x[ld_in_col], LSL #2\n"
- "add x20, x14, %x[ld_in_row], LSL #2\n"
+ "add x13, x13, %x[ld_in_col], LSL #2\n"
+ "add x19, x13, %x[ld_in_row], LSL #2\n"
".inst 0xc1341ba1 // fmla za.s[x8, 1], { z29.s-z0.s }, z4.s\n"
- "ld1w { z29.s }, p0/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
+ "ld1w { z29.s }, p0/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
".inst 0xc13819a0 // fmla za.s[x8, 0], { z13.s-z16.s }, z8.s\n"
- "subs x22, x22, #0x1\n"
+ "subs x21, x21, #0x1\n"
".inst 0xc13119a1 // fmla za.s[x8, 1], { z13.s-z16.s }, z1.s\n"
- "ld1w { z13.s }, p0/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
+ "ld1w { z13.s }, p0/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
"mov x12, #0x4\n"
- "ld1w { z30.s }, p0/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
+ "ld1w { z30.s }, p0/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
".inst 0xc0060c14 // mova { z20.d-z23.d }, za.d[x8, #0]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z14.s }, p0/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
+ "ld1w { z14.s }, p0/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
".inst 0xc1b3cb94 // fclamp { z20.s-z23.s }, z28.s, z19.s\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z31.s }, p0/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
- "st1w { z20.s }, p1, [x10]\n"
+ "ld1w { z31.s }, p0/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "st1w { z20.s }, p1, [x9]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z15.s }, p0/Z, [x21]\n"
+ "ld1w { z15.s }, p0/Z, [x20]\n"
"add x8, x8, #0x1\n"
- "st1w { z21.s }, p1, [x9]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
+ "st1w { z21.s }, p1, [x28]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1w { z0.s }, p0/Z, [x21]\n"
- "st1w { z22.s }, p1, [x26]\n"
+ "ld1w { z0.s }, p0/Z, [x20]\n"
+ "st1w { z22.s }, p1, [x25]\n"
"mov x12, #0x8\n"
".inst 0xc1331980 // fmla za.s[x8, 0], { z12.s-z15.s }, z3.s\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
- "st1w { z23.s }, p1, [x25]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "st1w { z23.s }, p1, [x24]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
".inst 0xc1351ba0 // fmla za.s[x8, 0], { z29.s-z0.s }, z5.s\n"
"mov x12, #0x0\n"
- "ld1w { z16.s }, p0/Z, [x21]\n"
+ "ld1w { z16.s }, p0/Z, [x20]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z12.s }, p0/Z, [x14]\n"
+ "ld1w { z12.s }, p0/Z, [x13]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
".inst 0xc13919a0 // fmla za.s[x8, 0], { z13.s-z16.s }, z9.s\n"
- "ld1w { z29.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z29.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
".inst 0xc0040f02 // mova za.d[x8, #2], { z24.d-z27.d }\n"
- "ld1w { z13.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z13.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
"mov x12, #0x4\n"
- "ld1w { z30.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z30.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z14.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z14.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z31.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z31.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z15.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z15.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
"mov x12, #0x8\n"
- "ld1w { z0.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z0.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
- "add x10, x10, x28, LSL #2\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
"add x9, x9, x27, LSL #2\n"
- "add x26, x26, x24, LSL #2\n"
+ "add x28, x28, x26, LSL #2\n"
"add x25, x25, x23, LSL #2\n"
- "add x14, x14, %x[ld_in_col], LSL #2\n"
+ "add x24, x24, x22, LSL #2\n"
+ "add x13, x13, %x[ld_in_col], LSL #2\n"
"bgt 14b\n"
"15:" // Main loop tail
".inst 0xc1371980 // fmla za.s[x8, 0], { z12.s-z15.s }, z7.s\n"
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
".inst 0xc1321981 // fmla za.s[x8, 1], { z12.s-z15.s }, z2.s\n"
- "ld1w { z12.s }, p0/Z, [x14]\n"
- "add x20, x14, %x[ld_in_row], LSL #2\n"
+ "ld1w { z12.s }, p0/Z, [x13]\n"
+ "add x19, x13, %x[ld_in_row], LSL #2\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
".inst 0xc1361ba0 // fmla za.s[x8, 0], { z29.s-z0.s }, z6.s\n"
- "add x14, x14, %x[ld_in_col], LSL #2\n"
+ "add x13, x13, %x[ld_in_col], LSL #2\n"
".inst 0xc1341ba1 // fmla za.s[x8, 1], { z29.s-z0.s }, z4.s\n"
- "ld1w { z29.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z29.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
".inst 0xc13819a0 // fmla za.s[x8, 0], { z13.s-z16.s }, z8.s\n"
".inst 0xc13119a1 // fmla za.s[x8, 1], { z13.s-z16.s }, z1.s\n"
- "ld1w { z13.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z13.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
"mov x12, #0x4\n"
- "ld1w { z30.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z30.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0xc0060c14 // mova { z20.d-z23.d }, za.d[x8, #0]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z14.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z14.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0xc1b3cb94 // fclamp { z20.s-z23.s }, z28.s, z19.s\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z31.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- "st1w { z20.s }, p1, [x10]\n"
+ "ld1w { z31.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ "st1w { z20.s }, p1, [x9]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z15.s }, p0/Z, [x20]\n"
+ "ld1w { z15.s }, p0/Z, [x19]\n"
"add x8, x8, #0x1\n"
- "st1w { z21.s }, p1, [x9]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "st1w { z21.s }, p1, [x28]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1w { z0.s }, p0/Z, [x20]\n"
- "st1w { z22.s }, p1, [x26]\n"
+ "ld1w { z0.s }, p0/Z, [x19]\n"
+ "st1w { z22.s }, p1, [x25]\n"
"mov x12, #0x8\n"
".inst 0xc1331980 // fmla za.s[x8, 0], { z12.s-z15.s }, z3.s\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- "st1w { z23.s }, p1, [x25]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ "st1w { z23.s }, p1, [x24]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
".inst 0xc1351ba0 // fmla za.s[x8, 0], { z29.s-z0.s }, z5.s\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
- "add x10, x10, x28, LSL #2\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
"add x9, x9, x27, LSL #2\n"
- "add x26, x26, x24, LSL #2\n"
- ".inst 0xc0040f02 // mova za.d[x8, #2], { z24.d-z27.d }\n"
+ "add x28, x28, x26, LSL #2\n"
"add x25, x25, x23, LSL #2\n"
+ ".inst 0xc0040f02 // mova za.d[x8, #2], { z24.d-z27.d }\n"
+ "add x24, x24, x22, LSL #2\n"
".inst 0xc13919a0 // fmla za.s[x8, 0], { z13.s-z16.s }, z9.s\n"
"16:" // Main loop skip tail
- "cbz x15, 17f\n" // Skip remainder inputs
+ "cbz x14, 17f\n" // Skip remainder inputs
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z12.s }, p0/Z, [x14]\n"
- "add x20, x14, %x[ld_in_row], LSL #2\n"
+ "ld1w { z12.s }, p0/Z, [x13]\n"
+ "add x19, x13, %x[ld_in_row], LSL #2\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z29.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z29.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z13.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z13.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1w { z30.s }, p0/Z, [x20]\n"
+ "ld1w { z30.s }, p0/Z, [x19]\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z14.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z14.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z31.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z31.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z15.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z15.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
"mov x12, #0x8\n"
".inst 0xc1371980 // fmla za.s[x8, 0], { z12.s-z15.s }, z7.s\n"
- "ld1w { z0.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z0.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
".inst 0xc1361ba0 // fmla za.s[x8, 0], { z29.s-z0.s }, z6.s\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
".inst 0xc1321981 // fmla za.s[x8, 1], { z12.s-z15.s }, z2.s\n"
- "sub x13, x13, #0x1\n"
+ "sub x11, x11, #0x1\n"
".inst 0xc13819a0 // fmla za.s[x8, 0], { z13.s-z16.s }, z8.s\n"
".inst 0xc1341ba1 // fmla za.s[x8, 1], { z29.s-z0.s }, z4.s\n"
".inst 0xc0060c14 // mova { z20.d-z23.d }, za.d[x8, #0]\n"
".inst 0xc1b3cb94 // fclamp { z20.s-z23.s }, z28.s, z19.s\n"
- "st1w { z20.s }, p1, [x10]\n"
- "add x10, x10, x28, LSL #2\n"
+ "st1w { z20.s }, p1, [x9]\n"
+ "add x9, x9, x27, LSL #2\n"
".inst 0xc13119a1 // fmla za.s[x8, 1], { z13.s-z16.s }, z1.s\n"
"add x8, x8, #0x1\n"
- "st1w { z21.s }, p1, [x9]\n"
- "add x9, x9, x27, LSL #2\n"
- "st1w { z22.s }, p1, [x26]\n"
- "add x26, x26, x24, LSL #2\n"
- ".inst 0xc0040f02 // mova za.d[x8, #2], { z24.d-z27.d }\n"
- "st1w { z23.s }, p1, [x25]\n"
+ "st1w { z21.s }, p1, [x28]\n"
+ "add x28, x28, x26, LSL #2\n"
+ "st1w { z22.s }, p1, [x25]\n"
"add x25, x25, x23, LSL #2\n"
+ ".inst 0xc0040f02 // mova za.d[x8, #2], { z24.d-z27.d }\n"
+ "st1w { z23.s }, p1, [x24]\n"
+ "add x24, x24, x22, LSL #2\n"
"17:" // Tail input: End
- "cbz x13, 19f\n"
+ "cbz x11, 19f\n"
"18:" // Right padding loop
".inst 0xc0060c14 // mova { z20.d-z23.d }, za.d[x8, #0]\n"
"add x8, x8, #0x1\n"
- "subs x13, x13, #0x1\n"
+ "subs x11, x11, #0x1\n"
".inst 0xc1b3cb94 // fclamp { z20.s-z23.s }, z28.s, z19.s\n"
- "st1w { z20.s }, p1, [x10]\n"
- "add x10, x10, x28, LSL #2\n"
- ".inst 0xc0040f02 // mova za.d[x8, #2], { z24.d-z27.d }\n"
- "st1w { z21.s }, p1, [x9]\n"
+ "st1w { z20.s }, p1, [x9]\n"
"add x9, x9, x27, LSL #2\n"
- "st1w { z22.s }, p1, [x26]\n"
- "add x26, x26, x24, LSL #2\n"
- "st1w { z23.s }, p1, [x25]\n"
+ ".inst 0xc0040f02 // mova za.d[x8, #2], { z24.d-z27.d }\n"
+ "st1w { z21.s }, p1, [x28]\n"
+ "add x28, x28, x26, LSL #2\n"
+ "st1w { z22.s }, p1, [x25]\n"
"add x25, x25, x23, LSL #2\n"
+ "st1w { z23.s }, p1, [x24]\n"
+ "add x24, x24, x22, LSL #2\n"
"bgt 18b\n"
"19:" // End
- "ldr x23, [%x[args], %[offsetof_Args_weights]]\n"
- "incb x23, ALL, MUL #9\n"
- "str x23, [%x[args], %[offsetof_Args_weights]]\n"
- "incw x16\n"
- "ldr x20, [%x[args], %[offsetof_Args_ld_in_vl]]\n"
- "whilelt p1.s, x16, x17\n"
- "ldr x14, [%x[args], %[offsetof_Args_inptr]]\n"
- "add x14, x14, x20, LSL #2\n"
- "str x14, [%x[args], %[offsetof_Args_inptr]]\n"
- "ldr x11, [%x[args], %[offsetof_Args_outptrs]]\n"
- "ldr x24, [%x[args], %[offsetof_Args_ld_out_vls]]\n"
- "ldp x23, x22, [x11, #0x0]\n"
- "ldp x21, x20, [x24, #0x0]\n"
- "add x23, x23, x21, LSL #2\n"
+ "ldr x22, [%x[args], %[offsetof_Args_weights]]\n"
+ "incb x22, ALL, MUL #9\n"
+ "str x22, [%x[args], %[offsetof_Args_weights]]\n"
+ "incw x15\n"
+ "ldr x19, [%x[args], %[offsetof_Args_ld_in_vl]]\n"
+ "whilelt p1.s, x15, x16\n"
+ "ldr x13, [%x[args], %[offsetof_Args_inptr]]\n"
+ "add x13, x13, x19, LSL #2\n"
+ "str x13, [%x[args], %[offsetof_Args_inptr]]\n"
+ "ldr x10, [%x[args], %[offsetof_Args_outptrs]]\n"
+ "ldr x23, [%x[args], %[offsetof_Args_ld_out_vls]]\n"
+ "ldp x22, x21, [x10, #0x0]\n"
+ "ldp x20, x19, [x23, #0x0]\n"
"add x22, x22, x20, LSL #2\n"
- "stp x23, x22, [x11, #0x0]\n"
- "ldp x23, x22, [x11, #0x10]\n"
- "ldp x21, x20, [x24, #0x10]\n"
- "add x23, x23, x21, LSL #2\n"
+ "add x21, x21, x19, LSL #2\n"
+ "stp x22, x21, [x10, #0x0]\n"
+ "ldp x22, x21, [x10, #0x10]\n"
+ "ldp x20, x19, [x23, #0x10]\n"
"add x22, x22, x20, LSL #2\n"
- "stp x23, x22, [x11, #0x10]\n"
+ "add x21, x21, x19, LSL #2\n"
+ "stp x22, x21, [x10, #0x10]\n"
"b.any 1b\n"
".inst 0xd503467f // SMSTOP\n"
:
: [args] "r" (&args), [ld_in_col] "r" (ld_in_col), [ld_in_row] "r" (ld_in_row), [offsetof_Args_bias] "I" (offsetof(Args, bias)), [offsetof_Args_clamp_max] "I" (offsetof(Args, clamp_max)), [offsetof_Args_clamp_min] "I" (offsetof(Args, clamp_min)), [offsetof_Args_current_channel] "I" (offsetof(Args, current_channel)), [offsetof_Args_inptr] "I" (offsetof(Args, inptr)), [offsetof_Args_input_cols] "I" (offsetof(Args, input_cols)), [offsetof_Args_ld_in_vl] "I" (offsetof(Args, ld_in_vl)), [offsetof_Args_ld_out_cols] "I" (offsetof(Args, ld_out_cols)), [offsetof_Args_ld_out_vls] "I" (offsetof(Args, ld_out_vls)), [offsetof_Args_n_channels] "I" (offsetof(Args, n_channels)), [offsetof_Args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_Args_output_cols] "I" (offsetof(Args, output_cols)), [offsetof_Args_pad_bottom] "I" (offsetof(Args, pad_bottom)), [offsetof_Args_pad_left] "I" (offsetof(Args, pad_left)), [offsetof_Args_pad_top] "I" (offsetof(Args, pad_top)), [offsetof_Args_weights] "I" (offsetof(Args, weights))
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_planar_5x5_s1_4rows_mla_za/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_planar_5x5_s1_4rows_mla_za/generic.cpp
index bf12b42ddc..0753e2db88 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_planar_5x5_s1_4rows_mla_za/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_planar_5x5_s1_4rows_mla_za/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -69,745 +69,745 @@ void sme2_fp32_planar_5x5_s1_4rows_mla_za_impl(
Args args = { inptr, ld_in_vl, pad_top, 8u - std::min(8u, pad_top + valid_input_rows), pad_left, weights, bias, valid_input_cols, output_cols, outptrs, outlds, outvllds, start_channel, valid_channels, act_min, act_max };
__asm__ __volatile__(
- "ldr x5, [%x[args], %[offsetof_Args_pad_bottom]]\n"
- "mov x20, #0x8\n"
+ "ldr x6, [%x[args], %[offsetof_Args_pad_bottom]]\n"
+ "mov x19, #0x8\n"
".inst 0xd503477f // SMSTART ZA\n"
- "sub x20, x20, x5\n"
- "ldr x6, [%x[args], %[offsetof_Args_pad_top]]\n"
+ "sub x19, x19, x6\n"
+ "ldr x7, [%x[args], %[offsetof_Args_pad_top]]\n"
"ptrue p2.b\n"
".inst 0x25207812 // ptrue pn10.b\n"
"ld1rw { z22.s }, p2/Z, [%x[args], %[offsetof_Args_clamp_min]]\n"
- "ldr x7, [%x[args], %[offsetof_Args_n_channels]]\n"
- "whilelt p1.s, XZR, x7\n"
- "whilelt p9.s, XZR, x20\n"
+ "ldr x17, [%x[args], %[offsetof_Args_n_channels]]\n"
+ "whilelt p1.s, XZR, x17\n"
+ "whilelt p9.s, XZR, x19\n"
"ld1rw { z11.s }, p2/Z, [%x[args], %[offsetof_Args_clamp_max]]\n"
- "whilelt p8.s, XZR, x6\n"
+ "whilelt p8.s, XZR, x7\n"
"eor p8.b, p2/Z, p8.b, p9.b\n"
- "ldr x17, [%x[args], %[offsetof_Args_current_channel]]\n"
+ "ldr x16, [%x[args], %[offsetof_Args_current_channel]]\n"
"1:" // Channel loop
- "ldr x20, [%x[args], %[offsetof_Args_bias]]\n"
+ "ldr x19, [%x[args], %[offsetof_Args_bias]]\n"
"fmov z28.s, #0x0\n"
- "cbz x20, 2f\n"
- "ld1w { z28.s }, p1/Z, [x20, x17, LSL #2]\n"
+ "cbz x19, 2f\n"
+ "ld1w { z28.s }, p1/Z, [x19, x16, LSL #2]\n"
"2:" // Load bias: Done
- "ldr x16, [%x[args], %[offsetof_Args_input_cols]]\n"
- "sub x20, x16, #0x1\n"
- "orr x23, x20, %x[ld_in_col], LSL #18\n"
+ "ldr x15, [%x[args], %[offsetof_Args_input_cols]]\n"
+ "sub x19, x15, #0x1\n"
+ "orr x22, x19, %x[ld_in_col], LSL #18\n"
"mov z29.d, z28.d\n"
- "ldr x15, [%x[args], %[offsetof_Args_weights]]\n"
- ".inst 0xa04049e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15]\n"
- "orr x23, x7, x23, LSL #20\n"
- "mov x22, #0x8\n"
- "ldr x14, [%x[args], %[offsetof_Args_inptr]]\n"
- ".inst 0xa04149e2 // ld1w { z2.s-z3.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
- "add x21, x6, x5\n"
- "lsl x20, %x[ld_in_row], #0x2\n"
- "ld1w { z10.s }, p2/Z, [x15, #4, MUL VL]\n"
- "addvl x15, x15, #5\n"
+ "ldr x14, [%x[args], %[offsetof_Args_weights]]\n"
+ ".inst 0xa04049c4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+ "orr x22, x17, x22, LSL #20\n"
+ "mov x21, #0x8\n"
+ "ldr x13, [%x[args], %[offsetof_Args_inptr]]\n"
+ ".inst 0xa04149c2 // ld1w { z2.s-z3.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ "add x20, x7, x6\n"
+ "lsl x19, %x[ld_in_row], #0x2\n"
+ "ld1w { z10.s }, p2/Z, [x14, #4, MUL VL]\n"
+ "addvl x14, x14, #5\n"
"mov z30.d, z28.d\n"
"mov z31.d, z28.d\n"
- ".inst 0xa14049e0 // ld1w { z0.s, z8.s }, pn10.b/Z, [x15]\n"
+ ".inst 0xa14049c0 // ld1w { z0.s, z8.s }, pn10.b/Z, [x14]\n"
"mov x8, #0x0\n"
- "ldr x13, [%x[args], %[offsetof_Args_output_cols]]\n"
- "lsl x23, x23, #0x2\n"
- ".inst 0xa04149e6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
- "sub x22, x22, x21\n"
- "madd x20, x20, x6, x14\n"
- "ld1w { z1.s }, p2/Z, [x15, #4, MUL VL]\n"
- "addvl x15, x15, #5\n"
+ "ldr x11, [%x[args], %[offsetof_Args_output_cols]]\n"
+ "lsl x22, x22, #0x2\n"
+ ".inst 0xa04149c6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ "sub x21, x21, x20\n"
+ "madd x19, x19, x7, x13\n"
+ "ld1w { z1.s }, p2/Z, [x14, #4, MUL VL]\n"
+ "addvl x14, x14, #5\n"
"3:" // Issue prefetches
- "subs x22, x22, #0x1\n"
- ".inst 0xf8b74a9c // rprfm pldstrm, x23, [x20]\n"
- "add x20, x20, %x[ld_in_col], LSL #2\n"
+ "subs x21, x21, #0x1\n"
+ ".inst 0xf8b64a7c // rprfm pldstrm, x22, [x19]\n"
+ "add x19, x19, %x[ld_in_col], LSL #2\n"
"bgt 3b\n"
- "ldr x11, [%x[args], %[offsetof_Args_outptrs]]\n"
- "lsl x20, %x[ld_in_row], #0x2\n"
- "msub x14, x6, x20, x14\n"
+ "ldr x10, [%x[args], %[offsetof_Args_outptrs]]\n"
+ "lsl x19, %x[ld_in_row], #0x2\n"
+ "msub x13, x7, x19, x13\n"
".inst 0xc0040f80 // mova za.d[x8, #0], { z28.d-z31.d }\n"
- "ldr x20, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
+ "ldr x19, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
".inst 0xc0040f81 // mova za.d[x8, #1], { z28.d-z31.d }\n"
- "mov x10, #0x4\n"
- "ldp x9, x28, [x11], #0x10\n"
+ "mov x9, #0x4\n"
+ "ldp x28, x27, [x10], #0x10\n"
".inst 0xc0040f82 // mova za.d[x8, #2], { z28.d-z31.d }\n"
- "ldp x27, x26, [x20], #0x10\n"
+ "ldp x26, x25, [x19], #0x10\n"
".inst 0xc0040f83 // mova za.d[x8, #3], { z28.d-z31.d }\n"
- "ldr x21, [%x[args], %[offsetof_Args_pad_left]]\n"
+ "ldr x20, [%x[args], %[offsetof_Args_pad_left]]\n"
".inst 0xc0040f84 // mova za.d[x8, #4], { z28.d-z31.d }\n"
- "ldp x25, x24, [x11], #0x10\n"
- "ldp x23, x22, [x20], #0x10\n"
- "cbz x21, 5f\n"
- "cmp x21, x10\n"
- "csel x20, x21, x10, LT\n"
- "sub x21, x21, x20\n"
- "sub x10, x10, x20\n"
- "cbz x21, 5f\n"
+ "ldp x24, x23, [x10], #0x10\n"
+ "ldp x22, x21, [x19], #0x10\n"
+ "cbz x20, 5f\n"
+ "cmp x20, x9\n"
+ "csel x19, x20, x9, LT\n"
+ "sub x20, x20, x19\n"
+ "sub x9, x9, x19\n"
+ "cbz x20, 5f\n"
".inst 0xc0060c18 // mova { z24.d-z27.d }, za.d[x8, #0]\n"
- "sub x13, x13, x21\n"
+ "sub x11, x11, x20\n"
".inst 0xc1abcad8 // fclamp { z24.s-z27.s }, z22.s, z11.s\n"
"4:" // Left padding
- "subs x21, x21, #0x1\n"
- "st1w { z24.s }, p1, [x9]\n"
- "add x9, x9, x27, LSL #2\n"
- "st1w { z25.s }, p1, [x28]\n"
+ "subs x20, x20, #0x1\n"
+ "st1w { z24.s }, p1, [x28]\n"
"add x28, x28, x26, LSL #2\n"
- "st1w { z26.s }, p1, [x25]\n"
- "add x25, x25, x23, LSL #2\n"
- "st1w { z27.s }, p1, [x24]\n"
+ "st1w { z25.s }, p1, [x27]\n"
+ "add x27, x27, x25, LSL #2\n"
+ "st1w { z26.s }, p1, [x24]\n"
"add x24, x24, x22, LSL #2\n"
+ "st1w { z27.s }, p1, [x23]\n"
+ "add x23, x23, x21, LSL #2\n"
"bgt 4b\n"
"5:" // Left padding: End
- "adds XZR, x6, x5\n"
+ "adds XZR, x7, x6\n"
"bne 12f\n"
- "cbz x10, 10f\n"
- "cmp x10, #0x1\n"
- "sub x16, x16, x10\n"
+ "cbz x9, 10f\n"
+ "cmp x9, #0x1\n"
+ "sub x15, x15, x9\n"
"beq 9f\n"
- "cmp x10, #0x2\n"
+ "cmp x9, #0x2\n"
"beq 8f\n"
- "cmp x10, #0x3\n"
+ "cmp x9, #0x3\n"
"beq 7f\n"
"6:" // Unpadded: 4 priming loads
- "add x20, x14, %x[ld_in_row], LSL #2\n"
- "ld1w { z14.s }, p1/Z, [x14]\n"
- "add x14, x14, %x[ld_in_col], LSL #2\n"
- "ld1w { z15.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- "ld1w { z16.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- "ld1w { z17.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x19, x13, %x[ld_in_row], LSL #2\n"
+ "ld1w { z14.s }, p1/Z, [x13]\n"
+ "add x13, x13, %x[ld_in_col], LSL #2\n"
+ "ld1w { z15.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ "ld1w { z17.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0xc13419c0 // fmla za.s[x8, 0], { z14.s-z17.s }, z4.s\n"
- "ld1w { z18.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z18.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0xc13019e0 // fmla za.s[x8, 0], { z15.s-z18.s }, z0.s\n"
- "ld1w { z19.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xa04049e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15]\n"
- "addvl x15, x15, #5\n"
+ "ld1w { z19.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ ".inst 0xa04049c4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+ "addvl x14, x14, #5\n"
".inst 0xc1341a00 // fmla za.s[x8, 0], { z16.s-z19.s }, z4.s\n"
- "ld1w { z20.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xa14049e0 // ld1w { z0.s, z8.s }, pn10.b/Z, [x15]\n"
- "addvl x15, x15, #5\n"
+ "ld1w { z20.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ ".inst 0xa14049c0 // ld1w { z0.s, z8.s }, pn10.b/Z, [x14]\n"
+ "addvl x14, x14, #5\n"
".inst 0xc1301a20 // fmla za.s[x8, 0], { z17.s-z20.s }, z0.s\n"
- "ld1w { z21.s }, p1/Z, [x20]\n"
- ".inst 0xa04049e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15]\n"
- "ldr x15, [%x[args], %[offsetof_Args_weights]]\n"
+ "ld1w { z21.s }, p1/Z, [x19]\n"
+ ".inst 0xa04049c4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+ "ldr x14, [%x[args], %[offsetof_Args_weights]]\n"
".inst 0xc1341a40 // fmla za.s[x8, 0], { z18.s-z21.s }, z4.s\n"
- ".inst 0xa04049e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15]\n"
- "addvl x15, x15, #5\n"
- ".inst 0xa14049e0 // ld1w { z0.s, z8.s }, pn10.b/Z, [x15]\n"
- "addvl x15, x15, #5\n"
+ ".inst 0xa04049c4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+ "addvl x14, x14, #5\n"
+ ".inst 0xa14049c0 // ld1w { z0.s, z8.s }, pn10.b/Z, [x14]\n"
+ "addvl x14, x14, #5\n"
"7:" // Unpadded: 3 priming loads
- "add x20, x14, %x[ld_in_row], LSL #2\n"
- "ld1w { z14.s }, p1/Z, [x14]\n"
- "add x14, x14, %x[ld_in_col], LSL #2\n"
- "ld1w { z15.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- "ld1w { z16.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- "ld1w { z17.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x19, x13, %x[ld_in_row], LSL #2\n"
+ "ld1w { z14.s }, p1/Z, [x13]\n"
+ "add x13, x13, %x[ld_in_col], LSL #2\n"
+ "ld1w { z15.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ "ld1w { z17.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0xc13519c0 // fmla za.s[x8, 0], { z14.s-z17.s }, z5.s\n"
".inst 0xc13419c1 // fmla za.s[x8, 1], { z14.s-z17.s }, z4.s\n"
- "ld1w { z18.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z18.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0xc13819e0 // fmla za.s[x8, 0], { z15.s-z18.s }, z8.s\n"
- "ld1w { z19.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z19.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0xc13019e1 // fmla za.s[x8, 1], { z15.s-z18.s }, z0.s\n"
- ".inst 0xa04049e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15]\n"
- "addvl x15, x15, #5\n"
+ ".inst 0xa04049c4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+ "addvl x14, x14, #5\n"
".inst 0xc1351a00 // fmla za.s[x8, 0], { z16.s-z19.s }, z5.s\n"
- "ld1w { z20.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z20.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0xc1341a01 // fmla za.s[x8, 1], { z16.s-z19.s }, z4.s\n"
- ".inst 0xa14049e0 // ld1w { z0.s, z8.s }, pn10.b/Z, [x15]\n"
- "addvl x15, x15, #5\n"
+ ".inst 0xa14049c0 // ld1w { z0.s, z8.s }, pn10.b/Z, [x14]\n"
+ "addvl x14, x14, #5\n"
".inst 0xc1381a20 // fmla za.s[x8, 0], { z17.s-z20.s }, z8.s\n"
- "ld1w { z21.s }, p1/Z, [x20]\n"
+ "ld1w { z21.s }, p1/Z, [x19]\n"
".inst 0xc1301a21 // fmla za.s[x8, 1], { z17.s-z20.s }, z0.s\n"
- ".inst 0xa04049e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15]\n"
- "ldr x15, [%x[args], %[offsetof_Args_weights]]\n"
+ ".inst 0xa04049c4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+ "ldr x14, [%x[args], %[offsetof_Args_weights]]\n"
".inst 0xc1351a40 // fmla za.s[x8, 0], { z18.s-z21.s }, z5.s\n"
- ".inst 0xa04149e2 // ld1w { z2.s-z3.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
+ ".inst 0xa04149c2 // ld1w { z2.s-z3.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
".inst 0xc1341a41 // fmla za.s[x8, 1], { z18.s-z21.s }, z4.s\n"
- ".inst 0xa04049e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15]\n"
- "addvl x15, x15, #5\n"
- ".inst 0xa14049e0 // ld1w { z0.s, z8.s }, pn10.b/Z, [x15]\n"
- ".inst 0xa04149e6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
- "addvl x15, x15, #5\n"
+ ".inst 0xa04049c4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+ "addvl x14, x14, #5\n"
+ ".inst 0xa14049c0 // ld1w { z0.s, z8.s }, pn10.b/Z, [x14]\n"
+ ".inst 0xa04149c6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ "addvl x14, x14, #5\n"
"8:" // Unpadded: 2 priming loads
- "add x20, x14, %x[ld_in_row], LSL #2\n"
- "ld1w { z14.s }, p1/Z, [x14]\n"
- "add x14, x14, %x[ld_in_col], LSL #2\n"
- "ld1w { z15.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- "ld1w { z16.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- "ld1w { z17.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x19, x13, %x[ld_in_row], LSL #2\n"
+ "ld1w { z14.s }, p1/Z, [x13]\n"
+ "add x13, x13, %x[ld_in_col], LSL #2\n"
+ "ld1w { z15.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ "ld1w { z17.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0xc13219c0 // fmla za.s[x8, 0], { z14.s-z17.s }, z2.s\n"
".inst 0xc13519c1 // fmla za.s[x8, 1], { z14.s-z17.s }, z5.s\n"
- "ld1w { z18.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z18.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0xc13419c2 // fmla za.s[x8, 2], { z14.s-z17.s }, z4.s\n"
- "ld1w { z19.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z19.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0xc13619e0 // fmla za.s[x8, 0], { z15.s-z18.s }, z6.s\n"
- ".inst 0xa04049e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15]\n"
+ ".inst 0xa04049c4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
".inst 0xc13819e1 // fmla za.s[x8, 1], { z15.s-z18.s }, z8.s\n"
- ".inst 0xa04149e2 // ld1w { z2.s-z3.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
- "addvl x15, x15, #5\n"
+ ".inst 0xa04149c2 // ld1w { z2.s-z3.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ "addvl x14, x14, #5\n"
".inst 0xc13019e2 // fmla za.s[x8, 2], { z15.s-z18.s }, z0.s\n"
- "ld1w { z20.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z20.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0xc1321a00 // fmla za.s[x8, 0], { z16.s-z19.s }, z2.s\n"
- ".inst 0xa14049e0 // ld1w { z0.s, z8.s }, pn10.b/Z, [x15]\n"
+ ".inst 0xa14049c0 // ld1w { z0.s, z8.s }, pn10.b/Z, [x14]\n"
".inst 0xc1351a01 // fmla za.s[x8, 1], { z16.s-z19.s }, z5.s\n"
- ".inst 0xa04149e6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
- "addvl x15, x15, #5\n"
+ ".inst 0xa04149c6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ "addvl x14, x14, #5\n"
".inst 0xc1341a02 // fmla za.s[x8, 2], { z16.s-z19.s }, z4.s\n"
- "ld1w { z21.s }, p1/Z, [x20]\n"
+ "ld1w { z21.s }, p1/Z, [x19]\n"
".inst 0xc1361a20 // fmla za.s[x8, 0], { z17.s-z20.s }, z6.s\n"
- ".inst 0xa04049e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15]\n"
+ ".inst 0xa04049c4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
".inst 0xc1381a21 // fmla za.s[x8, 1], { z17.s-z20.s }, z8.s\n"
- ".inst 0xa04149e2 // ld1w { z2.s-z3.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
- "ldr x15, [%x[args], %[offsetof_Args_weights]]\n"
+ ".inst 0xa04149c2 // ld1w { z2.s-z3.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ "ldr x14, [%x[args], %[offsetof_Args_weights]]\n"
".inst 0xc1301a22 // fmla za.s[x8, 2], { z17.s-z20.s }, z0.s\n"
".inst 0xc1321a40 // fmla za.s[x8, 0], { z18.s-z21.s }, z2.s\n"
- ".inst 0xa04149e2 // ld1w { z2.s-z3.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
+ ".inst 0xa04149c2 // ld1w { z2.s-z3.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
".inst 0xc1351a41 // fmla za.s[x8, 1], { z18.s-z21.s }, z5.s\n"
".inst 0xc1341a42 // fmla za.s[x8, 2], { z18.s-z21.s }, z4.s\n"
- ".inst 0xa04049e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15]\n"
- "addvl x15, x15, #5\n"
- ".inst 0xa14049e0 // ld1w { z0.s, z8.s }, pn10.b/Z, [x15]\n"
- ".inst 0xa04149e6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
- "addvl x15, x15, #5\n"
+ ".inst 0xa04049c4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+ "addvl x14, x14, #5\n"
+ ".inst 0xa14049c0 // ld1w { z0.s, z8.s }, pn10.b/Z, [x14]\n"
+ ".inst 0xa04149c6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ "addvl x14, x14, #5\n"
"9:" // Unpadded: 1 priming loads
- "add x20, x14, %x[ld_in_row], LSL #2\n"
- "ld1w { z14.s }, p1/Z, [x14]\n"
- "add x14, x14, %x[ld_in_col], LSL #2\n"
- "ld1w { z15.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- "ld1w { z16.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- "ld1w { z17.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x19, x13, %x[ld_in_row], LSL #2\n"
+ "ld1w { z14.s }, p1/Z, [x13]\n"
+ "add x13, x13, %x[ld_in_col], LSL #2\n"
+ "ld1w { z15.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ "ld1w { z17.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0xc13319c0 // fmla za.s[x8, 0], { z14.s-z17.s }, z3.s\n"
".inst 0xc13219c1 // fmla za.s[x8, 1], { z14.s-z17.s }, z2.s\n"
- "ld1w { z18.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z18.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0xc13519c2 // fmla za.s[x8, 2], { z14.s-z17.s }, z5.s\n"
- "ld1w { z19.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z19.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0xc13419c3 // fmla za.s[x8, 3], { z14.s-z17.s }, z4.s\n"
- ".inst 0xa04049e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15]\n"
+ ".inst 0xa04049c4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
".inst 0xc13719e0 // fmla za.s[x8, 0], { z15.s-z18.s }, z7.s\n"
- ".inst 0xa04149e2 // ld1w { z2.s-z3.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
- "addvl x15, x15, #5\n"
+ ".inst 0xa04149c2 // ld1w { z2.s-z3.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ "addvl x14, x14, #5\n"
".inst 0xc13619e1 // fmla za.s[x8, 1], { z15.s-z18.s }, z6.s\n"
- "ld1w { z20.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z20.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0xc13819e2 // fmla za.s[x8, 2], { z15.s-z18.s }, z8.s\n"
- ".inst 0xa04149e6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
+ ".inst 0xa04149c6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
".inst 0xc13019e3 // fmla za.s[x8, 3], { z15.s-z18.s }, z0.s\n"
- ".inst 0xa14049e0 // ld1w { z0.s, z8.s }, pn10.b/Z, [x15]\n"
- "addvl x15, x15, #5\n"
+ ".inst 0xa14049c0 // ld1w { z0.s, z8.s }, pn10.b/Z, [x14]\n"
+ "addvl x14, x14, #5\n"
".inst 0xc1331a00 // fmla za.s[x8, 0], { z16.s-z19.s }, z3.s\n"
- "ld1w { z21.s }, p1/Z, [x20]\n"
+ "ld1w { z21.s }, p1/Z, [x19]\n"
".inst 0xc1321a01 // fmla za.s[x8, 1], { z16.s-z19.s }, z2.s\n"
- ".inst 0xa04149e2 // ld1w { z2.s-z3.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
+ ".inst 0xa04149c2 // ld1w { z2.s-z3.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
".inst 0xc1351a02 // fmla za.s[x8, 2], { z16.s-z19.s }, z5.s\n"
".inst 0xc1341a03 // fmla za.s[x8, 3], { z16.s-z19.s }, z4.s\n"
- ".inst 0xa04049e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15]\n"
- "ldr x15, [%x[args], %[offsetof_Args_weights]]\n"
+ ".inst 0xa04049c4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+ "ldr x14, [%x[args], %[offsetof_Args_weights]]\n"
".inst 0xc1371a20 // fmla za.s[x8, 0], { z17.s-z20.s }, z7.s\n"
- "ld1w { z10.s }, p2/Z, [x15, #4, MUL VL]\n"
+ "ld1w { z10.s }, p2/Z, [x14, #4, MUL VL]\n"
".inst 0xc1361a21 // fmla za.s[x8, 1], { z17.s-z20.s }, z6.s\n"
".inst 0xc1381a22 // fmla za.s[x8, 2], { z17.s-z20.s }, z8.s\n"
".inst 0xc1301a23 // fmla za.s[x8, 3], { z17.s-z20.s }, z0.s\n"
".inst 0xc1331a40 // fmla za.s[x8, 0], { z18.s-z21.s }, z3.s\n"
".inst 0xc1321a41 // fmla za.s[x8, 1], { z18.s-z21.s }, z2.s\n"
- ".inst 0xa04149e2 // ld1w { z2.s-z3.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
+ ".inst 0xa04149c2 // ld1w { z2.s-z3.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
".inst 0xc1351a42 // fmla za.s[x8, 2], { z18.s-z21.s }, z5.s\n"
".inst 0xc1341a43 // fmla za.s[x8, 3], { z18.s-z21.s }, z4.s\n"
- ".inst 0xa04049e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15]\n"
- "addvl x15, x15, #5\n"
- ".inst 0xa14049e0 // ld1w { z0.s, z8.s }, pn10.b/Z, [x15]\n"
- ".inst 0xa04149e6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
- "ld1w { z1.s }, p2/Z, [x15, #4, MUL VL]\n"
- "addvl x15, x15, #5\n"
+ ".inst 0xa04049c4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+ "addvl x14, x14, #5\n"
+ ".inst 0xa14049c0 // ld1w { z0.s, z8.s }, pn10.b/Z, [x14]\n"
+ ".inst 0xa04149c6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ "ld1w { z1.s }, p2/Z, [x14, #4, MUL VL]\n"
+ "addvl x14, x14, #5\n"
"10:" // Unpadded: 0 priming loads
- "cbz x16, 20f\n"
- "add x20, x14, %x[ld_in_row], LSL #2\n"
- "ld1w { z14.s }, p1/Z, [x14]\n"
- "sub x16, x16, #0x1\n"
- "ld1w { z15.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- "sub x13, x13, #0x1\n"
- "ld1w { z16.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- "cmp x16, x13\n"
- "ld1w { z17.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- "csel x21, x16, x13, LT\n"
- "ld1w { z18.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- "add x14, x14, %x[ld_in_col], LSL #2\n"
- "ld1w { z19.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- "sub x13, x13, x21\n"
- "ld1w { z20.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- "ld1w { z21.s }, p1/Z, [x20]\n"
- "cbz x21, 19f\n"
+ "cbz x15, 20f\n"
+ "add x19, x13, %x[ld_in_row], LSL #2\n"
+ "ld1w { z14.s }, p1/Z, [x13]\n"
+ "sub x15, x15, #0x1\n"
+ "ld1w { z15.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ "sub x11, x11, #0x1\n"
+ "ld1w { z16.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ "cmp x15, x11\n"
+ "ld1w { z17.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ "csel x20, x15, x11, LT\n"
+ "ld1w { z18.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ "add x13, x13, %x[ld_in_col], LSL #2\n"
+ "ld1w { z19.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ "sub x11, x11, x20\n"
+ "ld1w { z20.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ "ld1w { z21.s }, p1/Z, [x19]\n"
+ "cbz x20, 19f\n"
"11:" // Unpadded: Main loop
".inst 0xc13a19c0 // fmla za.s[x8, 0], { z14.s-z17.s }, z10.s\n"
- "ld1w { z10.s }, p2/Z, [x15, #4, MUL VL]\n"
- "add x20, x14, %x[ld_in_row], LSL #2\n"
- "subs x21, x21, #0x1\n"
+ "ld1w { z10.s }, p2/Z, [x14, #4, MUL VL]\n"
+ "add x19, x13, %x[ld_in_row], LSL #2\n"
+ "subs x20, x20, #0x1\n"
".inst 0xc13119e0 // fmla za.s[x8, 0], { z15.s-z18.s }, z1.s\n"
".inst 0xc13319c1 // fmla za.s[x8, 1], { z14.s-z17.s }, z3.s\n"
".inst 0xc13219c2 // fmla za.s[x8, 2], { z14.s-z17.s }, z2.s\n"
- ".inst 0xa04149e2 // ld1w { z2.s-z3.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
+ ".inst 0xa04149c2 // ld1w { z2.s-z3.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
".inst 0xc13519c3 // fmla za.s[x8, 3], { z14.s-z17.s }, z5.s\n"
".inst 0xc13419c4 // fmla za.s[x8, 4], { z14.s-z17.s }, z4.s\n"
- ".inst 0xa04049e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15]\n"
- "addvl x15, x15, #5\n"
+ ".inst 0xa04049c4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+ "addvl x14, x14, #5\n"
".inst 0xc13a1a00 // fmla za.s[x8, 0], { z16.s-z19.s }, z10.s\n"
- "ld1w { z1.s }, p2/Z, [x15, #4, MUL VL]\n"
+ "ld1w { z1.s }, p2/Z, [x14, #4, MUL VL]\n"
".inst 0xc13719e1 // fmla za.s[x8, 1], { z15.s-z18.s }, z7.s\n"
- "ld1w { z14.s }, p1/Z, [x14]\n"
- "add x14, x14, %x[ld_in_col], LSL #2\n"
+ "ld1w { z14.s }, p1/Z, [x13]\n"
+ "add x13, x13, %x[ld_in_col], LSL #2\n"
".inst 0xc13619e2 // fmla za.s[x8, 2], { z15.s-z18.s }, z6.s\n"
- ".inst 0xa04149e6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
+ ".inst 0xa04149c6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
".inst 0xc13819e3 // fmla za.s[x8, 3], { z15.s-z18.s }, z8.s\n"
".inst 0xc13019e4 // fmla za.s[x8, 4], { z15.s-z18.s }, z0.s\n"
- ".inst 0xa14049e0 // ld1w { z0.s, z8.s }, pn10.b/Z, [x15]\n"
- "addvl x15, x15, #5\n"
+ ".inst 0xa14049c0 // ld1w { z0.s, z8.s }, pn10.b/Z, [x14]\n"
+ "addvl x14, x14, #5\n"
".inst 0xc1311a20 // fmla za.s[x8, 0], { z17.s-z20.s }, z1.s\n"
- "ld1w { z10.s }, p2/Z, [x15, #4, MUL VL]\n"
+ "ld1w { z10.s }, p2/Z, [x14, #4, MUL VL]\n"
".inst 0xc1331a01 // fmla za.s[x8, 1], { z16.s-z19.s }, z3.s\n"
- "ld1w { z15.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z15.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0xc1321a02 // fmla za.s[x8, 2], { z16.s-z19.s }, z2.s\n"
- ".inst 0xa04149e2 // ld1w { z2.s-z3.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
+ ".inst 0xa04149c2 // ld1w { z2.s-z3.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
".inst 0xc1351a03 // fmla za.s[x8, 3], { z16.s-z19.s }, z5.s\n"
".inst 0xc1341a04 // fmla za.s[x8, 4], { z16.s-z19.s }, z4.s\n"
- ".inst 0xa04049e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15]\n"
- "ldr x15, [%x[args], %[offsetof_Args_weights]]\n"
+ ".inst 0xa04049c4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+ "ldr x14, [%x[args], %[offsetof_Args_weights]]\n"
".inst 0xc13a1a40 // fmla za.s[x8, 0], { z18.s-z21.s }, z10.s\n"
- "ld1w { z16.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0xc1371a21 // fmla za.s[x8, 1], { z17.s-z20.s }, z7.s\n"
- "ld1w { z10.s }, p2/Z, [x15, #4, MUL VL]\n"
+ "ld1w { z10.s }, p2/Z, [x14, #4, MUL VL]\n"
".inst 0xc1361a22 // fmla za.s[x8, 2], { z17.s-z20.s }, z6.s\n"
".inst 0xc1381a23 // fmla za.s[x8, 3], { z17.s-z20.s }, z8.s\n"
".inst 0xc1301a24 // fmla za.s[x8, 4], { z17.s-z20.s }, z0.s\n"
- "ld1w { z17.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z17.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0xc0060c18 // mova { z24.d-z27.d }, za.d[x8, #0]\n"
".inst 0xc1abcad8 // fclamp { z24.s-z27.s }, z22.s, z11.s\n"
- "st1w { z24.s }, p1, [x9]\n"
- "add x9, x9, x27, LSL #2\n"
- ".inst 0xc1331a41 // fmla za.s[x8, 1], { z18.s-z21.s }, z3.s\n"
- "st1w { z25.s }, p1, [x28]\n"
+ "st1w { z24.s }, p1, [x28]\n"
"add x28, x28, x26, LSL #2\n"
+ ".inst 0xc1331a41 // fmla za.s[x8, 1], { z18.s-z21.s }, z3.s\n"
+ "st1w { z25.s }, p1, [x27]\n"
+ "add x27, x27, x25, LSL #2\n"
".inst 0xc1321a42 // fmla za.s[x8, 2], { z18.s-z21.s }, z2.s\n"
- ".inst 0xa04149e2 // ld1w { z2.s-z3.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
- "st1w { z26.s }, p1, [x25]\n"
- "add x25, x25, x23, LSL #2\n"
- ".inst 0xc1351a43 // fmla za.s[x8, 3], { z18.s-z21.s }, z5.s\n"
- "st1w { z27.s }, p1, [x24]\n"
+ ".inst 0xa04149c2 // ld1w { z2.s-z3.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ "st1w { z26.s }, p1, [x24]\n"
"add x24, x24, x22, LSL #2\n"
+ ".inst 0xc1351a43 // fmla za.s[x8, 3], { z18.s-z21.s }, z5.s\n"
+ "st1w { z27.s }, p1, [x23]\n"
+ "add x23, x23, x21, LSL #2\n"
".inst 0xc1341a44 // fmla za.s[x8, 4], { z18.s-z21.s }, z4.s\n"
- "ld1w { z18.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z18.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
"add x8, x8, #0x1\n"
- "ld1w { z19.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z19.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0xc0040f84 // mova za.d[x8, #4], { z28.d-z31.d }\n"
- ".inst 0xa04049e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15]\n"
- "addvl x15, x15, #5\n"
- "ld1w { z20.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xa14049e0 // ld1w { z0.s, z8.s }, pn10.b/Z, [x15]\n"
- ".inst 0xa04149e6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
- "ld1w { z1.s }, p2/Z, [x15, #4, MUL VL]\n"
- "addvl x15, x15, #5\n"
- "ld1w { z21.s }, p1/Z, [x20]\n"
+ ".inst 0xa04049c4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+ "addvl x14, x14, #5\n"
+ "ld1w { z20.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ ".inst 0xa14049c0 // ld1w { z0.s, z8.s }, pn10.b/Z, [x14]\n"
+ ".inst 0xa04149c6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ "ld1w { z1.s }, p2/Z, [x14, #4, MUL VL]\n"
+ "addvl x14, x14, #5\n"
+ "ld1w { z21.s }, p1/Z, [x19]\n"
"bgt 11b\n"
"b 19f\n"
"12:" // Padded
- "cbz x10, 17f\n"
- "cmp x10, #0x1\n"
- "sub x16, x16, x10\n"
+ "cbz x9, 17f\n"
+ "cmp x9, #0x1\n"
+ "sub x15, x15, x9\n"
"beq 16f\n"
- "cmp x10, #0x2\n"
+ "cmp x9, #0x2\n"
"beq 15f\n"
- "cmp x10, #0x3\n"
+ "cmp x9, #0x3\n"
"beq 14f\n"
"13:" // Padded: 4 priming loads
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z14.s }, p0/Z, [x14]\n"
- "add x20, x14, %x[ld_in_row], LSL #2\n"
+ "ld1w { z14.s }, p0/Z, [x13]\n"
+ "add x19, x13, %x[ld_in_row], LSL #2\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z15.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z15.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1w { z17.s }, p0/Z, [x20]\n"
+ "ld1w { z17.s }, p0/Z, [x19]\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0xc13419c0 // fmla za.s[x8, 0], { z14.s-z17.s }, z4.s\n"
- ".inst 0xa04049e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15]\n"
+ ".inst 0xa04049c4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z18.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z18.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0xc13019e0 // fmla za.s[x8, 0], { z15.s-z18.s }, z0.s\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z19.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z19.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0xc1341a00 // fmla za.s[x8, 0], { z16.s-z19.s }, z4.s\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "addvl x15, x15, #5\n"
- "ld1w { z20.s }, p0/Z, [x20]\n"
- ".inst 0xa14049e0 // ld1w { z0.s, z8.s }, pn10.b/Z, [x15]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "addvl x14, x14, #5\n"
+ "ld1w { z20.s }, p0/Z, [x19]\n"
+ ".inst 0xa14049c0 // ld1w { z0.s, z8.s }, pn10.b/Z, [x14]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
".inst 0xc1301a20 // fmla za.s[x8, 0], { z17.s-z20.s }, z0.s\n"
- "addvl x15, x15, #5\n"
- "ld1w { z21.s }, p0/Z, [x20]\n"
- "add x14, x14, %x[ld_in_col], LSL #2\n"
- ".inst 0xa04049e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15]\n"
- "ldr x15, [%x[args], %[offsetof_Args_weights]]\n"
+ "addvl x14, x14, #5\n"
+ "ld1w { z21.s }, p0/Z, [x19]\n"
+ "add x13, x13, %x[ld_in_col], LSL #2\n"
+ ".inst 0xa04049c4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+ "ldr x14, [%x[args], %[offsetof_Args_weights]]\n"
".inst 0xc1341a40 // fmla za.s[x8, 0], { z18.s-z21.s }, z4.s\n"
- ".inst 0xa04049e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15]\n"
- "addvl x15, x15, #5\n"
- ".inst 0xa14049e0 // ld1w { z0.s, z8.s }, pn10.b/Z, [x15]\n"
- "addvl x15, x15, #5\n"
+ ".inst 0xa04049c4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+ "addvl x14, x14, #5\n"
+ ".inst 0xa14049c0 // ld1w { z0.s, z8.s }, pn10.b/Z, [x14]\n"
+ "addvl x14, x14, #5\n"
"14:" // Padded: 3 priming loads
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z14.s }, p0/Z, [x14]\n"
- "add x20, x14, %x[ld_in_row], LSL #2\n"
+ "ld1w { z14.s }, p0/Z, [x13]\n"
+ "add x19, x13, %x[ld_in_row], LSL #2\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z15.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z15.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1w { z17.s }, p0/Z, [x20]\n"
+ "ld1w { z17.s }, p0/Z, [x19]\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0xc13519c0 // fmla za.s[x8, 0], { z14.s-z17.s }, z5.s\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
".inst 0xc13419c1 // fmla za.s[x8, 1], { z14.s-z17.s }, z4.s\n"
- "ld1w { z18.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z18.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
".inst 0xc13819e0 // fmla za.s[x8, 0], { z15.s-z18.s }, z8.s\n"
- "ld1w { z19.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z19.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0xc13019e1 // fmla za.s[x8, 1], { z15.s-z18.s }, z0.s\n"
- ".inst 0xa04049e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15]\n"
+ ".inst 0xa04049c4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "addvl x15, x15, #5\n"
+ "addvl x14, x14, #5\n"
".inst 0xc1351a00 // fmla za.s[x8, 0], { z16.s-z19.s }, z5.s\n"
- "ld1w { z20.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z20.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
".inst 0xc1341a01 // fmla za.s[x8, 1], { z16.s-z19.s }, z4.s\n"
- ".inst 0xa14049e0 // ld1w { z0.s, z8.s }, pn10.b/Z, [x15]\n"
- "addvl x15, x15, #5\n"
- "add x14, x14, %x[ld_in_col], LSL #2\n"
+ ".inst 0xa14049c0 // ld1w { z0.s, z8.s }, pn10.b/Z, [x14]\n"
+ "addvl x14, x14, #5\n"
+ "add x13, x13, %x[ld_in_col], LSL #2\n"
".inst 0xc1381a20 // fmla za.s[x8, 0], { z17.s-z20.s }, z8.s\n"
- "ld1w { z21.s }, p0/Z, [x20]\n"
+ "ld1w { z21.s }, p0/Z, [x19]\n"
".inst 0xc1301a21 // fmla za.s[x8, 1], { z17.s-z20.s }, z0.s\n"
- ".inst 0xa04049e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15]\n"
- "ldr x15, [%x[args], %[offsetof_Args_weights]]\n"
+ ".inst 0xa04049c4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+ "ldr x14, [%x[args], %[offsetof_Args_weights]]\n"
".inst 0xc1351a40 // fmla za.s[x8, 0], { z18.s-z21.s }, z5.s\n"
- ".inst 0xa04149e2 // ld1w { z2.s-z3.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
+ ".inst 0xa04149c2 // ld1w { z2.s-z3.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
".inst 0xc1341a41 // fmla za.s[x8, 1], { z18.s-z21.s }, z4.s\n"
- ".inst 0xa04049e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15]\n"
- "addvl x15, x15, #5\n"
- ".inst 0xa14049e0 // ld1w { z0.s, z8.s }, pn10.b/Z, [x15]\n"
- ".inst 0xa04149e6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
- "addvl x15, x15, #5\n"
+ ".inst 0xa04049c4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+ "addvl x14, x14, #5\n"
+ ".inst 0xa14049c0 // ld1w { z0.s, z8.s }, pn10.b/Z, [x14]\n"
+ ".inst 0xa04149c6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ "addvl x14, x14, #5\n"
"15:" // Padded: 2 priming loads
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z14.s }, p0/Z, [x14]\n"
- "add x20, x14, %x[ld_in_row], LSL #2\n"
+ "ld1w { z14.s }, p0/Z, [x13]\n"
+ "add x19, x13, %x[ld_in_row], LSL #2\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z15.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z15.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1w { z17.s }, p0/Z, [x20]\n"
+ "ld1w { z17.s }, p0/Z, [x19]\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0xc13219c0 // fmla za.s[x8, 0], { z14.s-z17.s }, z2.s\n"
- ".inst 0xa04149e2 // ld1w { z2.s-z3.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
+ ".inst 0xa04149c2 // ld1w { z2.s-z3.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
".inst 0xc13519c1 // fmla za.s[x8, 1], { z14.s-z17.s }, z5.s\n"
- "ld1w { z18.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z18.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0xc13419c2 // fmla za.s[x8, 2], { z14.s-z17.s }, z4.s\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z19.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z19.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0xc13619e0 // fmla za.s[x8, 0], { z15.s-z18.s }, z6.s\n"
- ".inst 0xa04049e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15]\n"
+ ".inst 0xa04049c4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "addvl x15, x15, #5\n"
+ "addvl x14, x14, #5\n"
".inst 0xc13819e1 // fmla za.s[x8, 1], { z15.s-z18.s }, z8.s\n"
- "ld1w { z20.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z20.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
".inst 0xc13019e2 // fmla za.s[x8, 2], { z15.s-z18.s }, z0.s\n"
- ".inst 0xa14049e0 // ld1w { z0.s, z8.s }, pn10.b/Z, [x15]\n"
- "add x14, x14, %x[ld_in_col], LSL #2\n"
+ ".inst 0xa14049c0 // ld1w { z0.s, z8.s }, pn10.b/Z, [x14]\n"
+ "add x13, x13, %x[ld_in_col], LSL #2\n"
".inst 0xc1321a00 // fmla za.s[x8, 0], { z16.s-z19.s }, z2.s\n"
- ".inst 0xa04149e6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
- "addvl x15, x15, #5\n"
+ ".inst 0xa04149c6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ "addvl x14, x14, #5\n"
".inst 0xc1351a01 // fmla za.s[x8, 1], { z16.s-z19.s }, z5.s\n"
- "ld1w { z21.s }, p0/Z, [x20]\n"
+ "ld1w { z21.s }, p0/Z, [x19]\n"
".inst 0xc1341a02 // fmla za.s[x8, 2], { z16.s-z19.s }, z4.s\n"
- ".inst 0xa04049e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15]\n"
+ ".inst 0xa04049c4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
".inst 0xc1361a20 // fmla za.s[x8, 0], { z17.s-z20.s }, z6.s\n"
- ".inst 0xa04149e2 // ld1w { z2.s-z3.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
- "ldr x15, [%x[args], %[offsetof_Args_weights]]\n"
+ ".inst 0xa04149c2 // ld1w { z2.s-z3.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ "ldr x14, [%x[args], %[offsetof_Args_weights]]\n"
".inst 0xc1381a21 // fmla za.s[x8, 1], { z17.s-z20.s }, z8.s\n"
".inst 0xc1301a22 // fmla za.s[x8, 2], { z17.s-z20.s }, z0.s\n"
".inst 0xc1321a40 // fmla za.s[x8, 0], { z18.s-z21.s }, z2.s\n"
- ".inst 0xa04149e2 // ld1w { z2.s-z3.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
+ ".inst 0xa04149c2 // ld1w { z2.s-z3.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
".inst 0xc1351a41 // fmla za.s[x8, 1], { z18.s-z21.s }, z5.s\n"
".inst 0xc1341a42 // fmla za.s[x8, 2], { z18.s-z21.s }, z4.s\n"
- ".inst 0xa04049e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15]\n"
- "addvl x15, x15, #5\n"
- ".inst 0xa14049e0 // ld1w { z0.s, z8.s }, pn10.b/Z, [x15]\n"
- ".inst 0xa04149e6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
- "addvl x15, x15, #5\n"
+ ".inst 0xa04049c4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+ "addvl x14, x14, #5\n"
+ ".inst 0xa14049c0 // ld1w { z0.s, z8.s }, pn10.b/Z, [x14]\n"
+ ".inst 0xa04149c6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ "addvl x14, x14, #5\n"
"16:" // Padded: 1 priming loads
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z14.s }, p0/Z, [x14]\n"
- "add x20, x14, %x[ld_in_row], LSL #2\n"
+ "ld1w { z14.s }, p0/Z, [x13]\n"
+ "add x19, x13, %x[ld_in_row], LSL #2\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z15.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z15.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1w { z17.s }, p0/Z, [x20]\n"
+ "ld1w { z17.s }, p0/Z, [x19]\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0xc13319c0 // fmla za.s[x8, 0], { z14.s-z17.s }, z3.s\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
".inst 0xc13219c1 // fmla za.s[x8, 1], { z14.s-z17.s }, z2.s\n"
- "ld1w { z18.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z18.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0xc13519c2 // fmla za.s[x8, 2], { z14.s-z17.s }, z5.s\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z19.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z19.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0xc13419c3 // fmla za.s[x8, 3], { z14.s-z17.s }, z4.s\n"
- ".inst 0xa04049e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15]\n"
+ ".inst 0xa04049c4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "add x14, x14, %x[ld_in_col], LSL #2\n"
+ "add x13, x13, %x[ld_in_col], LSL #2\n"
".inst 0xc13719e0 // fmla za.s[x8, 0], { z15.s-z18.s }, z7.s\n"
- ".inst 0xa04149e2 // ld1w { z2.s-z3.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
- "addvl x15, x15, #5\n"
+ ".inst 0xa04149c2 // ld1w { z2.s-z3.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ "addvl x14, x14, #5\n"
".inst 0xc13619e1 // fmla za.s[x8, 1], { z15.s-z18.s }, z6.s\n"
- "ld1w { z20.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z20.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
".inst 0xc13819e2 // fmla za.s[x8, 2], { z15.s-z18.s }, z8.s\n"
- ".inst 0xa04149e6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
+ ".inst 0xa04149c6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
".inst 0xc13019e3 // fmla za.s[x8, 3], { z15.s-z18.s }, z0.s\n"
- ".inst 0xa14049e0 // ld1w { z0.s, z8.s }, pn10.b/Z, [x15]\n"
- "addvl x15, x15, #5\n"
+ ".inst 0xa14049c0 // ld1w { z0.s, z8.s }, pn10.b/Z, [x14]\n"
+ "addvl x14, x14, #5\n"
".inst 0xc1331a00 // fmla za.s[x8, 0], { z16.s-z19.s }, z3.s\n"
- "ld1w { z21.s }, p0/Z, [x20]\n"
+ "ld1w { z21.s }, p0/Z, [x19]\n"
".inst 0xc1321a01 // fmla za.s[x8, 1], { z16.s-z19.s }, z2.s\n"
- ".inst 0xa04149e2 // ld1w { z2.s-z3.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
+ ".inst 0xa04149c2 // ld1w { z2.s-z3.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
".inst 0xc1351a02 // fmla za.s[x8, 2], { z16.s-z19.s }, z5.s\n"
".inst 0xc1341a03 // fmla za.s[x8, 3], { z16.s-z19.s }, z4.s\n"
- ".inst 0xa04049e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15]\n"
- "ldr x15, [%x[args], %[offsetof_Args_weights]]\n"
+ ".inst 0xa04049c4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+ "ldr x14, [%x[args], %[offsetof_Args_weights]]\n"
".inst 0xc1371a20 // fmla za.s[x8, 0], { z17.s-z20.s }, z7.s\n"
- "ld1w { z10.s }, p2/Z, [x15, #4, MUL VL]\n"
+ "ld1w { z10.s }, p2/Z, [x14, #4, MUL VL]\n"
".inst 0xc1361a21 // fmla za.s[x8, 1], { z17.s-z20.s }, z6.s\n"
".inst 0xc1381a22 // fmla za.s[x8, 2], { z17.s-z20.s }, z8.s\n"
".inst 0xc1301a23 // fmla za.s[x8, 3], { z17.s-z20.s }, z0.s\n"
".inst 0xc1331a40 // fmla za.s[x8, 0], { z18.s-z21.s }, z3.s\n"
".inst 0xc1321a41 // fmla za.s[x8, 1], { z18.s-z21.s }, z2.s\n"
- ".inst 0xa04149e2 // ld1w { z2.s-z3.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
+ ".inst 0xa04149c2 // ld1w { z2.s-z3.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
".inst 0xc1351a42 // fmla za.s[x8, 2], { z18.s-z21.s }, z5.s\n"
".inst 0xc1341a43 // fmla za.s[x8, 3], { z18.s-z21.s }, z4.s\n"
- ".inst 0xa04049e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15]\n"
- "addvl x15, x15, #5\n"
- ".inst 0xa14049e0 // ld1w { z0.s, z8.s }, pn10.b/Z, [x15]\n"
- ".inst 0xa04149e6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
- "ld1w { z1.s }, p2/Z, [x15, #4, MUL VL]\n"
- "addvl x15, x15, #5\n"
+ ".inst 0xa04049c4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+ "addvl x14, x14, #5\n"
+ ".inst 0xa14049c0 // ld1w { z0.s, z8.s }, pn10.b/Z, [x14]\n"
+ ".inst 0xa04149c6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ "ld1w { z1.s }, p2/Z, [x14, #4, MUL VL]\n"
+ "addvl x14, x14, #5\n"
"17:" // Padded: 0 priming loads
- "cbz x16, 20f\n"
+ "cbz x15, 20f\n"
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z14.s }, p0/Z, [x14]\n"
- "add x20, x14, %x[ld_in_row], LSL #2\n"
+ "ld1w { z14.s }, p0/Z, [x13]\n"
+ "add x19, x13, %x[ld_in_row], LSL #2\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z15.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z15.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1w { z17.s }, p0/Z, [x20]\n"
+ "ld1w { z17.s }, p0/Z, [x19]\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z18.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z18.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "sub x16, x16, #0x1\n"
- "ld1w { z19.s }, p0/Z, [x20]\n"
- "sub x13, x13, #0x1\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "sub x15, x15, #0x1\n"
+ "ld1w { z19.s }, p0/Z, [x19]\n"
+ "sub x11, x11, #0x1\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "cmp x16, x13\n"
- "ld1w { z20.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "cmp x15, x11\n"
+ "ld1w { z20.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1w { z21.s }, p0/Z, [x20]\n"
- "csel x21, x16, x13, LT\n"
- "add x14, x14, %x[ld_in_col], LSL #2\n"
- "sub x13, x13, x21\n"
- "cbz x21, 19f\n"
+ "ld1w { z21.s }, p0/Z, [x19]\n"
+ "csel x20, x15, x11, LT\n"
+ "add x13, x13, %x[ld_in_col], LSL #2\n"
+ "sub x11, x11, x20\n"
+ "cbz x20, 19f\n"
"18:" // Padded: Main loop
".inst 0xc13a19c0 // fmla za.s[x8, 0], { z14.s-z17.s }, z10.s\n"
- "ld1w { z10.s }, p2/Z, [x15, #4, MUL VL]\n"
+ "ld1w { z10.s }, p2/Z, [x14, #4, MUL VL]\n"
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
".inst 0xc13119e0 // fmla za.s[x8, 0], { z15.s-z18.s }, z1.s\n"
- "add x20, x14, %x[ld_in_row], LSL #2\n"
- "subs x21, x21, #0x1\n"
+ "add x19, x13, %x[ld_in_row], LSL #2\n"
+ "subs x20, x20, #0x1\n"
".inst 0xc13319c1 // fmla za.s[x8, 1], { z14.s-z17.s }, z3.s\n"
".inst 0xc13219c2 // fmla za.s[x8, 2], { z14.s-z17.s }, z2.s\n"
- ".inst 0xa04149e2 // ld1w { z2.s-z3.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
+ ".inst 0xa04149c2 // ld1w { z2.s-z3.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
".inst 0xc13519c3 // fmla za.s[x8, 3], { z14.s-z17.s }, z5.s\n"
".inst 0xc13419c4 // fmla za.s[x8, 4], { z14.s-z17.s }, z4.s\n"
- ".inst 0xa04049e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15]\n"
- "addvl x15, x15, #5\n"
+ ".inst 0xa04049c4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+ "addvl x14, x14, #5\n"
".inst 0xc13a1a00 // fmla za.s[x8, 0], { z16.s-z19.s }, z10.s\n"
- "ld1w { z1.s }, p2/Z, [x15, #4, MUL VL]\n"
+ "ld1w { z1.s }, p2/Z, [x14, #4, MUL VL]\n"
".inst 0xc13719e1 // fmla za.s[x8, 1], { z15.s-z18.s }, z7.s\n"
- "ld1w { z14.s }, p0/Z, [x14]\n"
+ "ld1w { z14.s }, p0/Z, [x13]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "add x14, x14, %x[ld_in_col], LSL #2\n"
+ "add x13, x13, %x[ld_in_col], LSL #2\n"
".inst 0xc13619e2 // fmla za.s[x8, 2], { z15.s-z18.s }, z6.s\n"
- ".inst 0xa04149e6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
+ ".inst 0xa04149c6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
".inst 0xc13819e3 // fmla za.s[x8, 3], { z15.s-z18.s }, z8.s\n"
".inst 0xc13019e4 // fmla za.s[x8, 4], { z15.s-z18.s }, z0.s\n"
- ".inst 0xa14049e0 // ld1w { z0.s, z8.s }, pn10.b/Z, [x15]\n"
- "addvl x15, x15, #5\n"
+ ".inst 0xa14049c0 // ld1w { z0.s, z8.s }, pn10.b/Z, [x14]\n"
+ "addvl x14, x14, #5\n"
".inst 0xc1311a20 // fmla za.s[x8, 0], { z17.s-z20.s }, z1.s\n"
- "ld1w { z10.s }, p2/Z, [x15, #4, MUL VL]\n"
+ "ld1w { z10.s }, p2/Z, [x14, #4, MUL VL]\n"
".inst 0xc1331a01 // fmla za.s[x8, 1], { z16.s-z19.s }, z3.s\n"
- "ld1w { z15.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z15.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
".inst 0xc1321a02 // fmla za.s[x8, 2], { z16.s-z19.s }, z2.s\n"
- ".inst 0xa04149e2 // ld1w { z2.s-z3.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
+ ".inst 0xa04149c2 // ld1w { z2.s-z3.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
".inst 0xc1351a03 // fmla za.s[x8, 3], { z16.s-z19.s }, z5.s\n"
".inst 0xc1341a04 // fmla za.s[x8, 4], { z16.s-z19.s }, z4.s\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
"mov x12, #0x4\n"
".inst 0xc13a1a40 // fmla za.s[x8, 0], { z18.s-z21.s }, z10.s\n"
- ".inst 0xa04049e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- "ldr x15, [%x[args], %[offsetof_Args_weights]]\n"
+ ".inst 0xa04049c4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ "ldr x14, [%x[args], %[offsetof_Args_weights]]\n"
".inst 0xc1371a21 // fmla za.s[x8, 1], { z17.s-z20.s }, z7.s\n"
- "ld1w { z10.s }, p2/Z, [x15, #4, MUL VL]\n"
+ "ld1w { z10.s }, p2/Z, [x14, #4, MUL VL]\n"
".inst 0xc1361a22 // fmla za.s[x8, 2], { z17.s-z20.s }, z6.s\n"
".inst 0xc1381a23 // fmla za.s[x8, 3], { z17.s-z20.s }, z8.s\n"
".inst 0xc1301a24 // fmla za.s[x8, 4], { z17.s-z20.s }, z0.s\n"
- "ld1w { z17.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z17.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
".inst 0xc0060c18 // mova { z24.d-z27.d }, za.d[x8, #0]\n"
".inst 0xc1abcad8 // fclamp { z24.s-z27.s }, z22.s, z11.s\n"
- "st1w { z24.s }, p1, [x9]\n"
- "add x9, x9, x27, LSL #2\n"
- ".inst 0xc1331a41 // fmla za.s[x8, 1], { z18.s-z21.s }, z3.s\n"
- "st1w { z25.s }, p1, [x28]\n"
+ "st1w { z24.s }, p1, [x28]\n"
"add x28, x28, x26, LSL #2\n"
+ ".inst 0xc1331a41 // fmla za.s[x8, 1], { z18.s-z21.s }, z3.s\n"
+ "st1w { z25.s }, p1, [x27]\n"
+ "add x27, x27, x25, LSL #2\n"
".inst 0xc1321a42 // fmla za.s[x8, 2], { z18.s-z21.s }, z2.s\n"
- ".inst 0xa04149e2 // ld1w { z2.s-z3.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
- "st1w { z26.s }, p1, [x25]\n"
- "add x25, x25, x23, LSL #2\n"
- ".inst 0xc1351a43 // fmla za.s[x8, 3], { z18.s-z21.s }, z5.s\n"
- "st1w { z27.s }, p1, [x24]\n"
+ ".inst 0xa04149c2 // ld1w { z2.s-z3.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ "st1w { z26.s }, p1, [x24]\n"
"add x24, x24, x22, LSL #2\n"
+ ".inst 0xc1351a43 // fmla za.s[x8, 3], { z18.s-z21.s }, z5.s\n"
+ "st1w { z27.s }, p1, [x23]\n"
+ "add x23, x23, x21, LSL #2\n"
".inst 0xc1341a44 // fmla za.s[x8, 4], { z18.s-z21.s }, z4.s\n"
- "ld1w { z18.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z18.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z19.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z19.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- ".inst 0xa04049e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15]\n"
- "addvl x15, x15, #5\n"
+ ".inst 0xa04049c4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+ "addvl x14, x14, #5\n"
"add x8, x8, #0x1\n"
".inst 0xc0040f84 // mova za.d[x8, #4], { z28.d-z31.d }\n"
- "ld1w { z20.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z20.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- ".inst 0xa14049e0 // ld1w { z0.s, z8.s }, pn10.b/Z, [x15]\n"
- ".inst 0xa04149e6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
- "ld1w { z1.s }, p2/Z, [x15, #4, MUL VL]\n"
- "addvl x15, x15, #5\n"
- "ld1w { z21.s }, p0/Z, [x20]\n"
+ ".inst 0xa14049c0 // ld1w { z0.s, z8.s }, pn10.b/Z, [x14]\n"
+ ".inst 0xa04149c6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ "ld1w { z1.s }, p2/Z, [x14, #4, MUL VL]\n"
+ "addvl x14, x14, #5\n"
+ "ld1w { z21.s }, p0/Z, [x19]\n"
"bgt 18b\n"
"19:" // Main loop tail
".inst 0xc13a19c0 // fmla za.s[x8, 0], { z14.s-z17.s }, z10.s\n"
- "ld1w { z10.s }, p2/Z, [x15, #4, MUL VL]\n"
+ "ld1w { z10.s }, p2/Z, [x14, #4, MUL VL]\n"
".inst 0xc13119e0 // fmla za.s[x8, 0], { z15.s-z18.s }, z1.s\n"
".inst 0xc13319c1 // fmla za.s[x8, 1], { z14.s-z17.s }, z3.s\n"
".inst 0xc13219c2 // fmla za.s[x8, 2], { z14.s-z17.s }, z2.s\n"
- ".inst 0xa04149e2 // ld1w { z2.s-z3.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
+ ".inst 0xa04149c2 // ld1w { z2.s-z3.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
".inst 0xc13519c3 // fmla za.s[x8, 3], { z14.s-z17.s }, z5.s\n"
".inst 0xc13419c4 // fmla za.s[x8, 4], { z14.s-z17.s }, z4.s\n"
- ".inst 0xa04049e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15]\n"
- "addvl x15, x15, #5\n"
+ ".inst 0xa04049c4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+ "addvl x14, x14, #5\n"
".inst 0xc13a1a00 // fmla za.s[x8, 0], { z16.s-z19.s }, z10.s\n"
- "ld1w { z1.s }, p2/Z, [x15, #4, MUL VL]\n"
+ "ld1w { z1.s }, p2/Z, [x14, #4, MUL VL]\n"
".inst 0xc13719e1 // fmla za.s[x8, 1], { z15.s-z18.s }, z7.s\n"
".inst 0xc13619e2 // fmla za.s[x8, 2], { z15.s-z18.s }, z6.s\n"
- ".inst 0xa04149e6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
+ ".inst 0xa04149c6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
".inst 0xc13819e3 // fmla za.s[x8, 3], { z15.s-z18.s }, z8.s\n"
".inst 0xc13019e4 // fmla za.s[x8, 4], { z15.s-z18.s }, z0.s\n"
- ".inst 0xa14049e0 // ld1w { z0.s, z8.s }, pn10.b/Z, [x15]\n"
- "addvl x15, x15, #5\n"
+ ".inst 0xa14049c0 // ld1w { z0.s, z8.s }, pn10.b/Z, [x14]\n"
+ "addvl x14, x14, #5\n"
".inst 0xc1311a20 // fmla za.s[x8, 0], { z17.s-z20.s }, z1.s\n"
- "ld1w { z10.s }, p2/Z, [x15, #4, MUL VL]\n"
+ "ld1w { z10.s }, p2/Z, [x14, #4, MUL VL]\n"
".inst 0xc1331a01 // fmla za.s[x8, 1], { z16.s-z19.s }, z3.s\n"
".inst 0xc1321a02 // fmla za.s[x8, 2], { z16.s-z19.s }, z2.s\n"
- ".inst 0xa04149e2 // ld1w { z2.s-z3.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
+ ".inst 0xa04149c2 // ld1w { z2.s-z3.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
".inst 0xc1351a03 // fmla za.s[x8, 3], { z16.s-z19.s }, z5.s\n"
".inst 0xc1341a04 // fmla za.s[x8, 4], { z16.s-z19.s }, z4.s\n"
- ".inst 0xa04049e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15]\n"
+ ".inst 0xa04049c4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
".inst 0xc13a1a40 // fmla za.s[x8, 0], { z18.s-z21.s }, z10.s\n"
".inst 0xc1371a21 // fmla za.s[x8, 1], { z17.s-z20.s }, z7.s\n"
".inst 0xc1361a22 // fmla za.s[x8, 2], { z17.s-z20.s }, z6.s\n"
@@ -815,65 +815,65 @@ void sme2_fp32_planar_5x5_s1_4rows_mla_za_impl(
".inst 0xc1301a24 // fmla za.s[x8, 4], { z17.s-z20.s }, z0.s\n"
".inst 0xc0060c18 // mova { z24.d-z27.d }, za.d[x8, #0]\n"
".inst 0xc1abcad8 // fclamp { z24.s-z27.s }, z22.s, z11.s\n"
- "st1w { z24.s }, p1, [x9]\n"
- "add x9, x9, x27, LSL #2\n"
- ".inst 0xc1331a41 // fmla za.s[x8, 1], { z18.s-z21.s }, z3.s\n"
- "st1w { z25.s }, p1, [x28]\n"
+ "st1w { z24.s }, p1, [x28]\n"
"add x28, x28, x26, LSL #2\n"
+ ".inst 0xc1331a41 // fmla za.s[x8, 1], { z18.s-z21.s }, z3.s\n"
+ "st1w { z25.s }, p1, [x27]\n"
+ "add x27, x27, x25, LSL #2\n"
".inst 0xc1321a42 // fmla za.s[x8, 2], { z18.s-z21.s }, z2.s\n"
- "st1w { z26.s }, p1, [x25]\n"
- "add x25, x25, x23, LSL #2\n"
- ".inst 0xc1351a43 // fmla za.s[x8, 3], { z18.s-z21.s }, z5.s\n"
- "st1w { z27.s }, p1, [x24]\n"
+ "st1w { z26.s }, p1, [x24]\n"
"add x24, x24, x22, LSL #2\n"
+ ".inst 0xc1351a43 // fmla za.s[x8, 3], { z18.s-z21.s }, z5.s\n"
+ "st1w { z27.s }, p1, [x23]\n"
+ "add x23, x23, x21, LSL #2\n"
".inst 0xc1341a44 // fmla za.s[x8, 4], { z18.s-z21.s }, z4.s\n"
"add x8, x8, #0x1\n"
".inst 0xc0040f84 // mova za.d[x8, #4], { z28.d-z31.d }\n"
"20:" // Main loop skip tail
- "cbz x13, 22f\n"
+ "cbz x11, 22f\n"
"21:" // Right padding loop
".inst 0xc0060c18 // mova { z24.d-z27.d }, za.d[x8, #0]\n"
"add x8, x8, #0x1\n"
- "subs x13, x13, #0x1\n"
+ "subs x11, x11, #0x1\n"
".inst 0xc1abcad8 // fclamp { z24.s-z27.s }, z22.s, z11.s\n"
- "st1w { z24.s }, p1, [x9]\n"
- "add x9, x9, x27, LSL #2\n"
- ".inst 0xc0040f84 // mova za.d[x8, #4], { z28.d-z31.d }\n"
- "st1w { z25.s }, p1, [x28]\n"
+ "st1w { z24.s }, p1, [x28]\n"
"add x28, x28, x26, LSL #2\n"
- "st1w { z26.s }, p1, [x25]\n"
- "add x25, x25, x23, LSL #2\n"
- "st1w { z27.s }, p1, [x24]\n"
+ ".inst 0xc0040f84 // mova za.d[x8, #4], { z28.d-z31.d }\n"
+ "st1w { z25.s }, p1, [x27]\n"
+ "add x27, x27, x25, LSL #2\n"
+ "st1w { z26.s }, p1, [x24]\n"
"add x24, x24, x22, LSL #2\n"
+ "st1w { z27.s }, p1, [x23]\n"
+ "add x23, x23, x21, LSL #2\n"
"bgt 21b\n"
"22:" // End
- "ldr x15, [%x[args], %[offsetof_Args_weights]]\n"
- "incb x15, ALL, MUL #16\n"
- "incb x15, ALL, MUL #9\n"
- "str x15, [%x[args], %[offsetof_Args_weights]]\n"
- "ldr x20, [%x[args], %[offsetof_Args_ld_in_vl]]\n"
- "incw x17\n"
- "whilelt p1.s, x17, x7\n"
- "ldr x14, [%x[args], %[offsetof_Args_inptr]]\n"
- "add x14, x14, x20, LSL #2\n"
- "str x14, [%x[args], %[offsetof_Args_inptr]]\n"
- "ldr x11, [%x[args], %[offsetof_Args_outptrs]]\n"
- "ldr x24, [%x[args], %[offsetof_Args_ld_out_vls]]\n"
- "ldp x23, x22, [x11, #0x0]\n"
- "ldp x21, x20, [x24, #0x0]\n"
- "add x23, x23, x21, LSL #2\n"
+ "ldr x14, [%x[args], %[offsetof_Args_weights]]\n"
+ "incb x14, ALL, MUL #16\n"
+ "incb x14, ALL, MUL #9\n"
+ "str x14, [%x[args], %[offsetof_Args_weights]]\n"
+ "ldr x19, [%x[args], %[offsetof_Args_ld_in_vl]]\n"
+ "incw x16\n"
+ "whilelt p1.s, x16, x17\n"
+ "ldr x13, [%x[args], %[offsetof_Args_inptr]]\n"
+ "add x13, x13, x19, LSL #2\n"
+ "str x13, [%x[args], %[offsetof_Args_inptr]]\n"
+ "ldr x10, [%x[args], %[offsetof_Args_outptrs]]\n"
+ "ldr x23, [%x[args], %[offsetof_Args_ld_out_vls]]\n"
+ "ldp x22, x21, [x10, #0x0]\n"
+ "ldp x20, x19, [x23, #0x0]\n"
"add x22, x22, x20, LSL #2\n"
- "stp x23, x22, [x11, #0x0]\n"
- "ldp x23, x22, [x11, #0x10]\n"
- "ldp x21, x20, [x24, #0x10]\n"
- "add x23, x23, x21, LSL #2\n"
+ "add x21, x21, x19, LSL #2\n"
+ "stp x22, x21, [x10, #0x0]\n"
+ "ldp x22, x21, [x10, #0x10]\n"
+ "ldp x20, x19, [x23, #0x10]\n"
"add x22, x22, x20, LSL #2\n"
- "stp x23, x22, [x11, #0x10]\n"
+ "add x21, x21, x19, LSL #2\n"
+ "stp x22, x21, [x10, #0x10]\n"
"b.any 1b\n"
".inst 0xd503467f // SMSTOP\n"
:
: [args] "r" (&args), [ld_in_col] "r" (ld_in_col), [ld_in_row] "r" (ld_in_row), [offsetof_Args_bias] "I" (offsetof(Args, bias)), [offsetof_Args_clamp_max] "I" (offsetof(Args, clamp_max)), [offsetof_Args_clamp_min] "I" (offsetof(Args, clamp_min)), [offsetof_Args_current_channel] "I" (offsetof(Args, current_channel)), [offsetof_Args_inptr] "I" (offsetof(Args, inptr)), [offsetof_Args_input_cols] "I" (offsetof(Args, input_cols)), [offsetof_Args_ld_in_vl] "I" (offsetof(Args, ld_in_vl)), [offsetof_Args_ld_out_cols] "I" (offsetof(Args, ld_out_cols)), [offsetof_Args_ld_out_vls] "I" (offsetof(Args, ld_out_vls)), [offsetof_Args_n_channels] "I" (offsetof(Args, n_channels)), [offsetof_Args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_Args_output_cols] "I" (offsetof(Args, output_cols)), [offsetof_Args_pad_bottom] "I" (offsetof(Args, pad_bottom)), [offsetof_Args_pad_left] "I" (offsetof(Args, pad_left)), [offsetof_Args_pad_top] "I" (offsetof(Args, pad_top)), [offsetof_Args_weights] "I" (offsetof(Args, weights))
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_planar_5x5_s2_4rows_mla_za/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_planar_5x5_s2_4rows_mla_za/generic.cpp
index 755265835d..8920b3b749 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_planar_5x5_s2_4rows_mla_za/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_planar_5x5_s2_4rows_mla_za/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -69,1100 +69,1100 @@ void sme2_fp32_planar_5x5_s2_4rows_mla_za_impl(
Args args = { inptr, ld_in_vl, pad_top, 11u - std::min(11u, pad_top + valid_input_rows), pad_left, weights, bias, valid_input_cols, output_cols, outptrs, outlds, outvllds, start_channel, valid_channels, act_min, act_max };
__asm__ __volatile__(
- "ldr x5, [%x[args], %[offsetof_Args_pad_bottom]]\n"
- "mov x20, #0xb\n"
+ "ldr x6, [%x[args], %[offsetof_Args_pad_bottom]]\n"
+ "mov x19, #0xb\n"
".inst 0xd503477f // SMSTART ZA\n"
- "sub x20, x20, x5\n"
- "ldr x6, [%x[args], %[offsetof_Args_pad_top]]\n"
+ "sub x19, x19, x6\n"
+ "ldr x7, [%x[args], %[offsetof_Args_pad_top]]\n"
"ptrue p2.b\n"
".inst 0x25207812 // ptrue pn10.b\n"
"ld1rw { z0.s }, p2/Z, [%x[args], %[offsetof_Args_clamp_min]]\n"
- "ldr x7, [%x[args], %[offsetof_Args_n_channels]]\n"
- "whilelt p1.s, XZR, x7\n"
- "whilelt p9.s, XZR, x20\n"
+ "ldr x17, [%x[args], %[offsetof_Args_n_channels]]\n"
+ "whilelt p1.s, XZR, x17\n"
+ "whilelt p9.s, XZR, x19\n"
"ld1rw { z17.s }, p2/Z, [%x[args], %[offsetof_Args_clamp_max]]\n"
- "whilelt p8.s, XZR, x6\n"
+ "whilelt p8.s, XZR, x7\n"
"eor p8.b, p2/Z, p8.b, p9.b\n"
- "ldr x17, [%x[args], %[offsetof_Args_current_channel]]\n"
+ "ldr x16, [%x[args], %[offsetof_Args_current_channel]]\n"
"1:" // Channel loop
- "ldr x20, [%x[args], %[offsetof_Args_bias]]\n"
+ "ldr x19, [%x[args], %[offsetof_Args_bias]]\n"
"fmov z28.s, #0x0\n"
- "cbz x20, 2f\n"
- "ld1w { z28.s }, p1/Z, [x20, x17, LSL #2]\n"
+ "cbz x19, 2f\n"
+ "ld1w { z28.s }, p1/Z, [x19, x16, LSL #2]\n"
"2:" // Load bias: Done
- "ldr x16, [%x[args], %[offsetof_Args_input_cols]]\n"
- "sub x20, x16, #0x1\n"
- "orr x23, x20, %x[ld_in_col], LSL #18\n"
+ "ldr x15, [%x[args], %[offsetof_Args_input_cols]]\n"
+ "sub x19, x15, #0x1\n"
+ "orr x22, x19, %x[ld_in_col], LSL #18\n"
"mov z29.d, z28.d\n"
- "ldr x15, [%x[args], %[offsetof_Args_weights]]\n"
- ".inst 0xa04049e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15]\n"
- "orr x23, x7, x23, LSL #20\n"
- "mov x22, #0xb\n"
- "ldr x14, [%x[args], %[offsetof_Args_inptr]]\n"
- ".inst 0xa04149e6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
- "add x21, x6, x5\n"
- "lsl x20, %x[ld_in_row], #0x2\n"
- "ld1w { z8.s }, p2/Z, [x15, #4, MUL VL]\n"
- "addvl x15, x15, #5\n"
+ "ldr x14, [%x[args], %[offsetof_Args_weights]]\n"
+ ".inst 0xa04049c4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+ "orr x22, x17, x22, LSL #20\n"
+ "mov x21, #0xb\n"
+ "ldr x13, [%x[args], %[offsetof_Args_inptr]]\n"
+ ".inst 0xa04149c6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ "add x20, x7, x6\n"
+ "lsl x19, %x[ld_in_row], #0x2\n"
+ "ld1w { z8.s }, p2/Z, [x14, #4, MUL VL]\n"
+ "addvl x14, x14, #5\n"
"mov z30.d, z28.d\n"
"mov z31.d, z28.d\n"
- ".inst 0xa14049e1 // ld1w { z1.s, z9.s }, pn10.b/Z, [x15]\n"
+ ".inst 0xa14049c1 // ld1w { z1.s, z9.s }, pn10.b/Z, [x14]\n"
"mov x8, #0x0\n"
- "ldr x13, [%x[args], %[offsetof_Args_output_cols]]\n"
- "lsl x23, x23, #0x2\n"
- ".inst 0xa14149e2 // ld1w { z2.s, z10.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
- "sub x22, x22, x21\n"
- "madd x20, x20, x6, x14\n"
- "ld1w { z3.s }, p2/Z, [x15, #4, MUL VL]\n"
- "addvl x15, x15, #5\n"
+ "ldr x11, [%x[args], %[offsetof_Args_output_cols]]\n"
+ "lsl x22, x22, #0x2\n"
+ ".inst 0xa14149c2 // ld1w { z2.s, z10.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ "sub x21, x21, x20\n"
+ "madd x19, x19, x7, x13\n"
+ "ld1w { z3.s }, p2/Z, [x14, #4, MUL VL]\n"
+ "addvl x14, x14, #5\n"
"3:" // Issue prefetches
- "subs x22, x22, #0x1\n"
- ".inst 0xf8b74a9c // rprfm pldstrm, x23, [x20]\n"
- "add x20, x20, %x[ld_in_col], LSL #2\n"
+ "subs x21, x21, #0x1\n"
+ ".inst 0xf8b64a7c // rprfm pldstrm, x22, [x19]\n"
+ "add x19, x19, %x[ld_in_col], LSL #2\n"
"bgt 3b\n"
- "ldr x11, [%x[args], %[offsetof_Args_outptrs]]\n"
- "lsl x20, %x[ld_in_row], #0x2\n"
- "msub x14, x6, x20, x14\n"
+ "ldr x10, [%x[args], %[offsetof_Args_outptrs]]\n"
+ "lsl x19, %x[ld_in_row], #0x2\n"
+ "msub x13, x7, x19, x13\n"
".inst 0xc0040f80 // mova za.d[x8, #0], { z28.d-z31.d }\n"
- "ldr x20, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
+ "ldr x19, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
".inst 0xc0040f81 // mova za.d[x8, #1], { z28.d-z31.d }\n"
- "mov x22, #0x4\n"
- "ldp x10, x9, [x11], #0x10\n"
+ "mov x21, #0x4\n"
+ "ldp x9, x28, [x10], #0x10\n"
".inst 0xc0040f82 // mova za.d[x8, #2], { z28.d-z31.d }\n"
- "ldp x28, x27, [x20], #0x10\n"
+ "ldp x27, x26, [x19], #0x10\n"
".inst 0xc0040f83 // mova za.d[x8, #3], { z28.d-z31.d }\n"
- "ldr x21, [%x[args], %[offsetof_Args_pad_left]]\n"
+ "ldr x20, [%x[args], %[offsetof_Args_pad_left]]\n"
".inst 0xc0040f84 // mova za.d[x8, #4], { z28.d-z31.d }\n"
- "ldp x26, x25, [x11], #0x10\n"
- "ldp x24, x23, [x20], #0x10\n"
- "cbz x21, 5f\n"
- "cmp x21, x22\n"
- "csel x20, x21, x22, LT\n"
- "sub x21, x21, x20\n"
- "sub x22, x22, x20\n"
- "cbz x21, 5f\n"
+ "ldp x25, x24, [x10], #0x10\n"
+ "ldp x23, x22, [x19], #0x10\n"
+ "cbz x20, 5f\n"
+ "cmp x20, x21\n"
+ "csel x19, x20, x21, LT\n"
+ "sub x20, x20, x19\n"
+ "sub x21, x21, x19\n"
+ "cbz x20, 5f\n"
".inst 0xc0060c18 // mova { z24.d-z27.d }, za.d[x8, #0]\n"
- "and x22, x21, #0x1\n"
- "add x21, x21, #0x1\n"
+ "and x21, x20, #0x1\n"
+ "add x20, x20, #0x1\n"
".inst 0xc1b1c818 // fclamp { z24.s-z27.s }, z0.s, z17.s\n"
- "lsr x21, x21, #0x1\n"
- "sub x13, x13, x21\n"
+ "lsr x20, x20, #0x1\n"
+ "sub x11, x11, x20\n"
"4:" // Left padding
- "subs x21, x21, #0x1\n"
- "st1w { z24.s }, p1, [x10]\n"
- "add x10, x10, x28, LSL #2\n"
- "st1w { z25.s }, p1, [x9]\n"
+ "subs x20, x20, #0x1\n"
+ "st1w { z24.s }, p1, [x9]\n"
"add x9, x9, x27, LSL #2\n"
- "st1w { z26.s }, p1, [x26]\n"
- "add x26, x26, x24, LSL #2\n"
- "st1w { z27.s }, p1, [x25]\n"
+ "st1w { z25.s }, p1, [x28]\n"
+ "add x28, x28, x26, LSL #2\n"
+ "st1w { z26.s }, p1, [x25]\n"
"add x25, x25, x23, LSL #2\n"
+ "st1w { z27.s }, p1, [x24]\n"
+ "add x24, x24, x22, LSL #2\n"
"bgt 4b\n"
"5:" // Left padding: End
- "adds XZR, x6, x5\n"
+ "adds XZR, x7, x6\n"
"bne 12f\n"
- "cbz x22, 10f\n"
- "cmp x22, #0x1\n"
- "sub x16, x16, x22\n"
+ "cbz x21, 10f\n"
+ "cmp x21, #0x1\n"
+ "sub x15, x15, x21\n"
"beq 9f\n"
- "cmp x22, #0x2\n"
+ "cmp x21, #0x2\n"
"beq 8f\n"
- "cmp x22, #0x3\n"
+ "cmp x21, #0x3\n"
"beq 7f\n"
"6:" // Unpadded: 4 priming loads
- "add x20, x14, %x[ld_in_row], LSL #2\n"
- "ld1w { z11.s }, p1/Z, [x14]\n"
- "add x14, x14, %x[ld_in_col], LSL #2\n"
- "ld1w { z21.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- "ld1w { z12.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- "ld1w { z22.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- "ld1w { z13.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- "ld1w { z23.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- "ld1w { z14.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x19, x13, %x[ld_in_row], LSL #2\n"
+ "ld1w { z11.s }, p1/Z, [x13]\n"
+ "add x13, x13, %x[ld_in_col], LSL #2\n"
+ "ld1w { z21.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ "ld1w { z12.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ "ld1w { z22.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ "ld1w { z13.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ "ld1w { z23.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ "ld1w { z14.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0xc1341960 // fmla za.s[x8, 0], { z11.s-z14.s }, z4.s\n"
- "ld1w { z24.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z24.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0xc1311aa0 // fmla za.s[x8, 0], { z21.s-z24.s }, z1.s\n"
- "ld1w { z15.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xa04049e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15]\n"
- "addvl x15, x15, #5\n"
+ "ld1w { z15.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ ".inst 0xa04049c4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+ "addvl x14, x14, #5\n"
".inst 0xc1341980 // fmla za.s[x8, 0], { z12.s-z15.s }, z4.s\n"
- "ld1w { z25.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xa14049e1 // ld1w { z1.s, z9.s }, pn10.b/Z, [x15]\n"
- "addvl x15, x15, #5\n"
+ "ld1w { z25.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ ".inst 0xa14049c1 // ld1w { z1.s, z9.s }, pn10.b/Z, [x14]\n"
+ "addvl x14, x14, #5\n"
".inst 0xc1311ac0 // fmla za.s[x8, 0], { z22.s-z25.s }, z1.s\n"
- "ld1w { z16.s }, p1/Z, [x20]\n"
- ".inst 0xa04049e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15]\n"
- "ldr x15, [%x[args], %[offsetof_Args_weights]]\n"
+ "ld1w { z16.s }, p1/Z, [x19]\n"
+ ".inst 0xa04049c4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+ "ldr x14, [%x[args], %[offsetof_Args_weights]]\n"
".inst 0xc13419a0 // fmla za.s[x8, 0], { z13.s-z16.s }, z4.s\n"
- ".inst 0xa04049e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15]\n"
- "addvl x15, x15, #5\n"
- ".inst 0xa14049e1 // ld1w { z1.s, z9.s }, pn10.b/Z, [x15]\n"
- "addvl x15, x15, #5\n"
+ ".inst 0xa04049c4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+ "addvl x14, x14, #5\n"
+ ".inst 0xa14049c1 // ld1w { z1.s, z9.s }, pn10.b/Z, [x14]\n"
+ "addvl x14, x14, #5\n"
"7:" // Unpadded: 3 priming loads
- "add x20, x14, %x[ld_in_row], LSL #2\n"
- "ld1w { z11.s }, p1/Z, [x14]\n"
- "add x14, x14, %x[ld_in_col], LSL #2\n"
- "ld1w { z21.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- "ld1w { z12.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- "ld1w { z22.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- "ld1w { z13.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- "ld1w { z23.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- "ld1w { z14.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x19, x13, %x[ld_in_row], LSL #2\n"
+ "ld1w { z11.s }, p1/Z, [x13]\n"
+ "add x13, x13, %x[ld_in_col], LSL #2\n"
+ "ld1w { z21.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ "ld1w { z12.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ "ld1w { z22.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ "ld1w { z13.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ "ld1w { z23.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ "ld1w { z14.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0xc1351960 // fmla za.s[x8, 0], { z11.s-z14.s }, z5.s\n"
- "ld1w { z24.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z24.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0xc1391aa0 // fmla za.s[x8, 0], { z21.s-z24.s }, z9.s\n"
- "ld1w { z15.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xa04049e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15]\n"
- "addvl x15, x15, #5\n"
+ "ld1w { z15.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ ".inst 0xa04049c4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+ "addvl x14, x14, #5\n"
".inst 0xc1351980 // fmla za.s[x8, 0], { z12.s-z15.s }, z5.s\n"
- "ld1w { z25.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xa14049e1 // ld1w { z1.s, z9.s }, pn10.b/Z, [x15]\n"
- "addvl x15, x15, #5\n"
+ "ld1w { z25.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ ".inst 0xa14049c1 // ld1w { z1.s, z9.s }, pn10.b/Z, [x14]\n"
+ "addvl x14, x14, #5\n"
".inst 0xc1391ac0 // fmla za.s[x8, 0], { z22.s-z25.s }, z9.s\n"
- "ld1w { z16.s }, p1/Z, [x20]\n"
- ".inst 0xa04049e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15]\n"
- "ldr x15, [%x[args], %[offsetof_Args_weights]]\n"
+ "ld1w { z16.s }, p1/Z, [x19]\n"
+ ".inst 0xa04049c4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+ "ldr x14, [%x[args], %[offsetof_Args_weights]]\n"
".inst 0xc13519a0 // fmla za.s[x8, 0], { z13.s-z16.s }, z5.s\n"
- ".inst 0xa04049e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15]\n"
- ".inst 0xa04149e6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
- "addvl x15, x15, #5\n"
- ".inst 0xa14049e1 // ld1w { z1.s, z9.s }, pn10.b/Z, [x15]\n"
- ".inst 0xa14149e2 // ld1w { z2.s, z10.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
- "addvl x15, x15, #5\n"
+ ".inst 0xa04049c4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+ ".inst 0xa04149c6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ "addvl x14, x14, #5\n"
+ ".inst 0xa14049c1 // ld1w { z1.s, z9.s }, pn10.b/Z, [x14]\n"
+ ".inst 0xa14149c2 // ld1w { z2.s, z10.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ "addvl x14, x14, #5\n"
"8:" // Unpadded: 2 priming loads
- "add x20, x14, %x[ld_in_row], LSL #2\n"
- "ld1w { z11.s }, p1/Z, [x14]\n"
- "add x14, x14, %x[ld_in_col], LSL #2\n"
- "ld1w { z21.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- "ld1w { z12.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- "ld1w { z22.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- "ld1w { z13.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- "ld1w { z23.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- "ld1w { z14.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x19, x13, %x[ld_in_row], LSL #2\n"
+ "ld1w { z11.s }, p1/Z, [x13]\n"
+ "add x13, x13, %x[ld_in_col], LSL #2\n"
+ "ld1w { z21.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ "ld1w { z12.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ "ld1w { z22.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ "ld1w { z13.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ "ld1w { z23.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ "ld1w { z14.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0xc1361960 // fmla za.s[x8, 0], { z11.s-z14.s }, z6.s\n"
".inst 0xc1341961 // fmla za.s[x8, 1], { z11.s-z14.s }, z4.s\n"
- "ld1w { z24.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z24.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0xc1321aa0 // fmla za.s[x8, 0], { z21.s-z24.s }, z2.s\n"
- "ld1w { z15.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z15.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0xc1311aa1 // fmla za.s[x8, 1], { z21.s-z24.s }, z1.s\n"
- ".inst 0xa04049e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15]\n"
- ".inst 0xa04149e6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
- "addvl x15, x15, #5\n"
+ ".inst 0xa04049c4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+ ".inst 0xa04149c6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ "addvl x14, x14, #5\n"
".inst 0xc1361980 // fmla za.s[x8, 0], { z12.s-z15.s }, z6.s\n"
".inst 0xc1341981 // fmla za.s[x8, 1], { z12.s-z15.s }, z4.s\n"
- "ld1w { z25.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xa14049e1 // ld1w { z1.s, z9.s }, pn10.b/Z, [x15]\n"
+ "ld1w { z25.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ ".inst 0xa14049c1 // ld1w { z1.s, z9.s }, pn10.b/Z, [x14]\n"
".inst 0xc1311ac1 // fmla za.s[x8, 1], { z22.s-z25.s }, z1.s\n"
- ".inst 0xa14149e2 // ld1w { z2.s, z10.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
- "addvl x15, x15, #5\n"
+ ".inst 0xa14149c2 // ld1w { z2.s, z10.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ "addvl x14, x14, #5\n"
".inst 0xc1321ac0 // fmla za.s[x8, 0], { z22.s-z25.s }, z2.s\n"
- "ld1w { z16.s }, p1/Z, [x20]\n"
- ".inst 0xa04049e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15]\n"
+ "ld1w { z16.s }, p1/Z, [x19]\n"
+ ".inst 0xa04049c4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
".inst 0xc13419a1 // fmla za.s[x8, 1], { z13.s-z16.s }, z4.s\n"
- ".inst 0xa04149e6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
- "ldr x15, [%x[args], %[offsetof_Args_weights]]\n"
+ ".inst 0xa04149c6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ "ldr x14, [%x[args], %[offsetof_Args_weights]]\n"
".inst 0xc13619a0 // fmla za.s[x8, 0], { z13.s-z16.s }, z6.s\n"
- ".inst 0xa04049e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15]\n"
- ".inst 0xa04149e6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
- "addvl x15, x15, #5\n"
- ".inst 0xa14049e1 // ld1w { z1.s, z9.s }, pn10.b/Z, [x15]\n"
- ".inst 0xa14149e2 // ld1w { z2.s, z10.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
- "addvl x15, x15, #5\n"
+ ".inst 0xa04049c4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+ ".inst 0xa04149c6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ "addvl x14, x14, #5\n"
+ ".inst 0xa14049c1 // ld1w { z1.s, z9.s }, pn10.b/Z, [x14]\n"
+ ".inst 0xa14149c2 // ld1w { z2.s, z10.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ "addvl x14, x14, #5\n"
"9:" // Unpadded: 1 priming loads
- "add x20, x14, %x[ld_in_row], LSL #2\n"
- "ld1w { z11.s }, p1/Z, [x14]\n"
- "add x14, x14, %x[ld_in_col], LSL #2\n"
+ "add x19, x13, %x[ld_in_row], LSL #2\n"
+ "ld1w { z11.s }, p1/Z, [x13]\n"
+ "add x13, x13, %x[ld_in_col], LSL #2\n"
+ "ld1w { z21.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ "ld1w { z12.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ "ld1w { z22.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ "ld1w { z13.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ "ld1w { z23.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ "ld1w { z14.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc1371960 // fmla za.s[x8, 0], { z11.s-z14.s }, z7.s\n"
+ ".inst 0xc1351961 // fmla za.s[x8, 1], { z11.s-z14.s }, z5.s\n"
+ "ld1w { z24.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc13a1aa0 // fmla za.s[x8, 0], { z21.s-z24.s }, z10.s\n"
+ "ld1w { z15.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc1391aa1 // fmla za.s[x8, 1], { z21.s-z24.s }, z9.s\n"
+ ".inst 0xa04049c4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+ ".inst 0xa04149c6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ "addvl x14, x14, #5\n"
+ ".inst 0xc1371980 // fmla za.s[x8, 0], { z12.s-z15.s }, z7.s\n"
+ ".inst 0xc1351981 // fmla za.s[x8, 1], { z12.s-z15.s }, z5.s\n"
+ "ld1w { z25.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ ".inst 0xa14049c1 // ld1w { z1.s, z9.s }, pn10.b/Z, [x14]\n"
+ ".inst 0xc1391ac1 // fmla za.s[x8, 1], { z22.s-z25.s }, z9.s\n"
+ ".inst 0xa14149c2 // ld1w { z2.s, z10.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ "addvl x14, x14, #5\n"
+ ".inst 0xc13a1ac0 // fmla za.s[x8, 0], { z22.s-z25.s }, z10.s\n"
+ "ld1w { z16.s }, p1/Z, [x19]\n"
+ ".inst 0xa04049c4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+ ".inst 0xc13519a1 // fmla za.s[x8, 1], { z13.s-z16.s }, z5.s\n"
+ ".inst 0xa04149c6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ "ldr x14, [%x[args], %[offsetof_Args_weights]]\n"
+ ".inst 0xc13719a0 // fmla za.s[x8, 0], { z13.s-z16.s }, z7.s\n"
+ ".inst 0xa04049c4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+ ".inst 0xa04149c6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ "ld1w { z8.s }, p2/Z, [x14, #4, MUL VL]\n"
+ "addvl x14, x14, #5\n"
+ ".inst 0xa14049c1 // ld1w { z1.s, z9.s }, pn10.b/Z, [x14]\n"
+ ".inst 0xa14149c2 // ld1w { z2.s, z10.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ "ld1w { z3.s }, p2/Z, [x14, #4, MUL VL]\n"
+ "addvl x14, x14, #5\n"
+ "10:" // Unpadded: 0 priming loads
+ "cmp x15, #0x2\n"
+ "blt 20f\n"
+ "add x20, x13, %x[ld_in_row], LSL #2\n"
+ "ld1w { z11.s }, p1/Z, [x13]\n"
+ "sub x15, x15, #0x2\n"
"ld1w { z21.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ "sub x11, x11, #0x1\n"
"ld1w { z12.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ "lsr x19, x15, #0x1\n"
"ld1w { z22.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ "cmp x19, x11\n"
"ld1w { z13.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ "csel x21, x19, x11, LT\n"
"ld1w { z23.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x13, x13, %x[ld_in_col], LSL #2\n"
"ld1w { z14.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xc1371960 // fmla za.s[x8, 0], { z11.s-z14.s }, z7.s\n"
- ".inst 0xc1351961 // fmla za.s[x8, 1], { z11.s-z14.s }, z5.s\n"
+ "and x15, x15, #0x1\n"
"ld1w { z24.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xc13a1aa0 // fmla za.s[x8, 0], { z21.s-z24.s }, z10.s\n"
+ "sub x11, x11, x21\n"
"ld1w { z15.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xc1391aa1 // fmla za.s[x8, 1], { z21.s-z24.s }, z9.s\n"
- ".inst 0xa04049e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15]\n"
- ".inst 0xa04149e6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
- "addvl x15, x15, #5\n"
- ".inst 0xc1371980 // fmla za.s[x8, 0], { z12.s-z15.s }, z7.s\n"
- ".inst 0xc1351981 // fmla za.s[x8, 1], { z12.s-z15.s }, z5.s\n"
"ld1w { z25.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xa14049e1 // ld1w { z1.s, z9.s }, pn10.b/Z, [x15]\n"
- ".inst 0xc1391ac1 // fmla za.s[x8, 1], { z22.s-z25.s }, z9.s\n"
- ".inst 0xa14149e2 // ld1w { z2.s, z10.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
- "addvl x15, x15, #5\n"
- ".inst 0xc13a1ac0 // fmla za.s[x8, 0], { z22.s-z25.s }, z10.s\n"
"ld1w { z16.s }, p1/Z, [x20]\n"
- ".inst 0xa04049e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15]\n"
- ".inst 0xc13519a1 // fmla za.s[x8, 1], { z13.s-z16.s }, z5.s\n"
- ".inst 0xa04149e6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
- "ldr x15, [%x[args], %[offsetof_Args_weights]]\n"
- ".inst 0xc13719a0 // fmla za.s[x8, 0], { z13.s-z16.s }, z7.s\n"
- ".inst 0xa04049e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15]\n"
- ".inst 0xa04149e6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
- "ld1w { z8.s }, p2/Z, [x15, #4, MUL VL]\n"
- "addvl x15, x15, #5\n"
- ".inst 0xa14049e1 // ld1w { z1.s, z9.s }, pn10.b/Z, [x15]\n"
- ".inst 0xa14149e2 // ld1w { z2.s, z10.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
- "ld1w { z3.s }, p2/Z, [x15, #4, MUL VL]\n"
- "addvl x15, x15, #5\n"
- "10:" // Unpadded: 0 priming loads
- "cmp x16, #0x2\n"
- "blt 20f\n"
- "add x21, x14, %x[ld_in_row], LSL #2\n"
- "ld1w { z11.s }, p1/Z, [x14]\n"
- "sub x16, x16, #0x2\n"
- "ld1w { z21.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
- "sub x13, x13, #0x1\n"
- "ld1w { z12.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
- "lsr x20, x16, #0x1\n"
- "ld1w { z22.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
- "cmp x20, x13\n"
- "ld1w { z13.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
- "csel x22, x20, x13, LT\n"
- "ld1w { z23.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
- "add x14, x14, %x[ld_in_col], LSL #2\n"
- "ld1w { z14.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
- "and x16, x16, #0x1\n"
- "ld1w { z24.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
- "sub x13, x13, x22\n"
- "ld1w { z15.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
- "ld1w { z25.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
- "ld1w { z16.s }, p1/Z, [x21]\n"
- "cbz x22, 19f\n"
+ "cbz x21, 19f\n"
"11:" // Unpadded: Main loop
".inst 0xc1381960 // fmla za.s[x8, 0], { z11.s-z14.s }, z8.s\n"
- "ld1w { z8.s }, p2/Z, [x15, #4, MUL VL]\n"
- "add x21, x14, %x[ld_in_row], LSL #2\n"
- "subs x22, x22, #0x1\n"
+ "ld1w { z8.s }, p2/Z, [x14, #4, MUL VL]\n"
+ "add x20, x13, %x[ld_in_row], LSL #2\n"
+ "subs x21, x21, #0x1\n"
".inst 0xc1361961 // fmla za.s[x8, 1], { z11.s-z14.s }, z6.s\n"
- ".inst 0xa04149e6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
+ ".inst 0xa04149c6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
".inst 0xc1341962 // fmla za.s[x8, 2], { z11.s-z14.s }, z4.s\n"
- ".inst 0xa04049e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15]\n"
- "addvl x15, x15, #5\n"
+ ".inst 0xa04049c4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+ "addvl x14, x14, #5\n"
".inst 0xc1331aa0 // fmla za.s[x8, 0], { z21.s-z24.s }, z3.s\n"
- "ld1w { z3.s }, p2/Z, [x15, #4, MUL VL]\n"
+ "ld1w { z3.s }, p2/Z, [x14, #4, MUL VL]\n"
".inst 0xc1321aa1 // fmla za.s[x8, 1], { z21.s-z24.s }, z2.s\n"
- ".inst 0xa14149e2 // ld1w { z2.s, z10.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
+ ".inst 0xa14149c2 // ld1w { z2.s, z10.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
".inst 0xc1311aa2 // fmla za.s[x8, 2], { z21.s-z24.s }, z1.s\n"
- ".inst 0xa14049e1 // ld1w { z1.s, z9.s }, pn10.b/Z, [x15]\n"
- "addvl x15, x15, #5\n"
+ ".inst 0xa14049c1 // ld1w { z1.s, z9.s }, pn10.b/Z, [x14]\n"
+ "addvl x14, x14, #5\n"
".inst 0xc1381980 // fmla za.s[x8, 0], { z12.s-z15.s }, z8.s\n"
- "ld1w { z8.s }, p2/Z, [x15, #4, MUL VL]\n"
+ "ld1w { z8.s }, p2/Z, [x14, #4, MUL VL]\n"
".inst 0xc1361981 // fmla za.s[x8, 1], { z12.s-z15.s }, z6.s\n"
- ".inst 0xa04149e6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
+ ".inst 0xa04149c6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
".inst 0xc1341982 // fmla za.s[x8, 2], { z12.s-z15.s }, z4.s\n"
- ".inst 0xa04049e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15]\n"
- "ldr x15, [%x[args], %[offsetof_Args_weights]]\n"
+ ".inst 0xa04049c4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+ "ldr x14, [%x[args], %[offsetof_Args_weights]]\n"
".inst 0xc1331ac0 // fmla za.s[x8, 0], { z22.s-z25.s }, z3.s\n"
- "ld1w { z11.s }, p1/Z, [x14]\n"
- "add x14, x14, %x[ld_in_col], LSL #2\n"
- "add x20, x14, %x[ld_in_row], LSL #2\n"
+ "ld1w { z11.s }, p1/Z, [x13]\n"
+ "add x13, x13, %x[ld_in_col], LSL #2\n"
+ "add x19, x13, %x[ld_in_row], LSL #2\n"
".inst 0xc1321ac1 // fmla za.s[x8, 1], { z22.s-z25.s }, z2.s\n"
- "ld1w { z21.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
- ".inst 0xc1311ac2 // fmla za.s[x8, 2], { z22.s-z25.s }, z1.s\n"
- "ld1w { z12.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
- "ld1w { z22.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
- ".inst 0xc13819a0 // fmla za.s[x8, 0], { z13.s-z16.s }, z8.s\n"
- ".inst 0xc13619a1 // fmla za.s[x8, 1], { z13.s-z16.s }, z6.s\n"
- ".inst 0xa04149e6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
- ".inst 0xc13419a2 // fmla za.s[x8, 2], { z13.s-z16.s }, z4.s\n"
- "ld1w { z13.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
- "ld1w { z23.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
- ".inst 0xc0060c18 // mova { z24.d-z27.d }, za.d[x8, #0]\n"
- "add x8, x8, #0x1\n"
- ".inst 0xa04049e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15]\n"
- "addvl x15, x15, #5\n"
- ".inst 0xc1b1c818 // fclamp { z24.s-z27.s }, z0.s, z17.s\n"
- "st1w { z24.s }, p1, [x10]\n"
- "ld1w { z14.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
- ".inst 0xc1371960 // fmla za.s[x8, 0], { z11.s-z14.s }, z7.s\n"
- "add x10, x10, x28, LSL #2\n"
- ".inst 0xc1351961 // fmla za.s[x8, 1], { z11.s-z14.s }, z5.s\n"
- ".inst 0xa14049e1 // ld1w { z1.s, z9.s }, pn10.b/Z, [x15]\n"
- "st1w { z25.s }, p1, [x9]\n"
- "add x9, x9, x27, LSL #2\n"
- ".inst 0xa14149e2 // ld1w { z2.s, z10.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
- "addvl x15, x15, #5\n"
- "st1w { z26.s }, p1, [x26]\n"
- "add x26, x26, x24, LSL #2\n"
- "ld1w { z24.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
- ".inst 0xc13a1aa0 // fmla za.s[x8, 0], { z21.s-z24.s }, z10.s\n"
- "st1w { z27.s }, p1, [x25]\n"
- ".inst 0xc1391aa1 // fmla za.s[x8, 1], { z21.s-z24.s }, z9.s\n"
- "ld1w { z15.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
- "add x25, x25, x23, LSL #2\n"
- ".inst 0xa04049e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15]\n"
- ".inst 0xc1351981 // fmla za.s[x8, 1], { z12.s-z15.s }, z5.s\n"
- ".inst 0xa04149e6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
- "addvl x15, x15, #5\n"
- ".inst 0xc1371980 // fmla za.s[x8, 0], { z12.s-z15.s }, z7.s\n"
- "ld1w { z25.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
- ".inst 0xc0040f84 // mova za.d[x8, #4], { z28.d-z31.d }\n"
- ".inst 0xa14049e1 // ld1w { z1.s, z9.s }, pn10.b/Z, [x15]\n"
- ".inst 0xc1391ac1 // fmla za.s[x8, 1], { z22.s-z25.s }, z9.s\n"
- ".inst 0xa14149e2 // ld1w { z2.s, z10.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
- "addvl x15, x15, #5\n"
- ".inst 0xc13a1ac0 // fmla za.s[x8, 0], { z22.s-z25.s }, z10.s\n"
- "ld1w { z16.s }, p1/Z, [x21]\n"
- ".inst 0xa04049e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15]\n"
- ".inst 0xc13519a1 // fmla za.s[x8, 1], { z13.s-z16.s }, z5.s\n"
- ".inst 0xa04149e6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
- ".inst 0xc13719a0 // fmla za.s[x8, 0], { z13.s-z16.s }, z7.s\n"
- "ldr x15, [%x[args], %[offsetof_Args_weights]]\n"
- "ld1w { z11.s }, p1/Z, [x14]\n"
- "add x14, x14, %x[ld_in_col], LSL #2\n"
"ld1w { z21.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc1311ac2 // fmla za.s[x8, 2], { z22.s-z25.s }, z1.s\n"
"ld1w { z12.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
"ld1w { z22.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc13819a0 // fmla za.s[x8, 0], { z13.s-z16.s }, z8.s\n"
+ ".inst 0xc13619a1 // fmla za.s[x8, 1], { z13.s-z16.s }, z6.s\n"
+ ".inst 0xa04149c6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ ".inst 0xc13419a2 // fmla za.s[x8, 2], { z13.s-z16.s }, z4.s\n"
"ld1w { z13.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
"ld1w { z23.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc0060c18 // mova { z24.d-z27.d }, za.d[x8, #0]\n"
+ "add x8, x8, #0x1\n"
+ ".inst 0xa04049c4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+ "addvl x14, x14, #5\n"
+ ".inst 0xc1b1c818 // fclamp { z24.s-z27.s }, z0.s, z17.s\n"
+ "st1w { z24.s }, p1, [x9]\n"
"ld1w { z14.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc1371960 // fmla za.s[x8, 0], { z11.s-z14.s }, z7.s\n"
+ "add x9, x9, x27, LSL #2\n"
+ ".inst 0xc1351961 // fmla za.s[x8, 1], { z11.s-z14.s }, z5.s\n"
+ ".inst 0xa14049c1 // ld1w { z1.s, z9.s }, pn10.b/Z, [x14]\n"
+ "st1w { z25.s }, p1, [x28]\n"
+ "add x28, x28, x26, LSL #2\n"
+ ".inst 0xa14149c2 // ld1w { z2.s, z10.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ "addvl x14, x14, #5\n"
+ "st1w { z26.s }, p1, [x25]\n"
+ "add x25, x25, x23, LSL #2\n"
"ld1w { z24.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc13a1aa0 // fmla za.s[x8, 0], { z21.s-z24.s }, z10.s\n"
+ "st1w { z27.s }, p1, [x24]\n"
+ ".inst 0xc1391aa1 // fmla za.s[x8, 1], { z21.s-z24.s }, z9.s\n"
"ld1w { z15.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xa04049e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15]\n"
- ".inst 0xa04149e6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
- "ld1w { z8.s }, p2/Z, [x15, #4, MUL VL]\n"
- "addvl x15, x15, #5\n"
+ "add x24, x24, x22, LSL #2\n"
+ ".inst 0xa04049c4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+ ".inst 0xc1351981 // fmla za.s[x8, 1], { z12.s-z15.s }, z5.s\n"
+ ".inst 0xa04149c6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ "addvl x14, x14, #5\n"
+ ".inst 0xc1371980 // fmla za.s[x8, 0], { z12.s-z15.s }, z7.s\n"
"ld1w { z25.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xa14049e1 // ld1w { z1.s, z9.s }, pn10.b/Z, [x15]\n"
- ".inst 0xa14149e2 // ld1w { z2.s, z10.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
- "ld1w { z3.s }, p2/Z, [x15, #4, MUL VL]\n"
- "addvl x15, x15, #5\n"
+ ".inst 0xc0040f84 // mova za.d[x8, #4], { z28.d-z31.d }\n"
+ ".inst 0xa14049c1 // ld1w { z1.s, z9.s }, pn10.b/Z, [x14]\n"
+ ".inst 0xc1391ac1 // fmla za.s[x8, 1], { z22.s-z25.s }, z9.s\n"
+ ".inst 0xa14149c2 // ld1w { z2.s, z10.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ "addvl x14, x14, #5\n"
+ ".inst 0xc13a1ac0 // fmla za.s[x8, 0], { z22.s-z25.s }, z10.s\n"
"ld1w { z16.s }, p1/Z, [x20]\n"
+ ".inst 0xa04049c4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+ ".inst 0xc13519a1 // fmla za.s[x8, 1], { z13.s-z16.s }, z5.s\n"
+ ".inst 0xa04149c6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ ".inst 0xc13719a0 // fmla za.s[x8, 0], { z13.s-z16.s }, z7.s\n"
+ "ldr x14, [%x[args], %[offsetof_Args_weights]]\n"
+ "ld1w { z11.s }, p1/Z, [x13]\n"
+ "add x13, x13, %x[ld_in_col], LSL #2\n"
+ "ld1w { z21.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ "ld1w { z12.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ "ld1w { z22.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ "ld1w { z13.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ "ld1w { z23.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ "ld1w { z14.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ "ld1w { z24.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ "ld1w { z15.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ ".inst 0xa04049c4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+ ".inst 0xa04149c6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ "ld1w { z8.s }, p2/Z, [x14, #4, MUL VL]\n"
+ "addvl x14, x14, #5\n"
+ "ld1w { z25.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ ".inst 0xa14049c1 // ld1w { z1.s, z9.s }, pn10.b/Z, [x14]\n"
+ ".inst 0xa14149c2 // ld1w { z2.s, z10.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ "ld1w { z3.s }, p2/Z, [x14, #4, MUL VL]\n"
+ "addvl x14, x14, #5\n"
+ "ld1w { z16.s }, p1/Z, [x19]\n"
"bgt 11b\n"
"b 19f\n"
"12:" // Padded
- "cbz x22, 17f\n"
- "cmp x22, #0x1\n"
- "sub x16, x16, x22\n"
+ "cbz x21, 17f\n"
+ "cmp x21, #0x1\n"
+ "sub x15, x15, x21\n"
"beq 16f\n"
- "cmp x22, #0x2\n"
+ "cmp x21, #0x2\n"
"beq 15f\n"
- "cmp x22, #0x3\n"
+ "cmp x21, #0x3\n"
"beq 14f\n"
"13:" // Padded: 4 priming loads
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z11.s }, p0/Z, [x14]\n"
- "add x20, x14, %x[ld_in_row], LSL #2\n"
+ "ld1w { z11.s }, p0/Z, [x13]\n"
+ "add x19, x13, %x[ld_in_row], LSL #2\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z21.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z21.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z12.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z12.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1w { z22.s }, p0/Z, [x20]\n"
+ "ld1w { z22.s }, p0/Z, [x19]\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z13.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z13.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z23.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z23.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z14.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z14.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
"mov x12, #0x8\n"
".inst 0xc1341960 // fmla za.s[x8, 0], { z11.s-z14.s }, z4.s\n"
- "ld1w { z24.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z24.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
".inst 0xc1311aa0 // fmla za.s[x8, 0], { z21.s-z24.s }, z1.s\n"
- "ld1w { z15.s }, p0/Z, [x20]\n"
- ".inst 0xa04049e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z15.s }, p0/Z, [x19]\n"
+ ".inst 0xa04049c4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
".inst 0xc1341980 // fmla za.s[x8, 0], { z12.s-z15.s }, z4.s\n"
- "addvl x15, x15, #5\n"
- "ld1w { z25.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xa14049e1 // ld1w { z1.s, z9.s }, pn10.b/Z, [x15]\n"
+ "addvl x14, x14, #5\n"
+ "ld1w { z25.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ ".inst 0xa14049c1 // ld1w { z1.s, z9.s }, pn10.b/Z, [x14]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "addvl x15, x15, #5\n"
+ "addvl x14, x14, #5\n"
".inst 0xc1311ac0 // fmla za.s[x8, 0], { z22.s-z25.s }, z1.s\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
- "add x14, x14, %x[ld_in_col], LSL #2\n"
- ".inst 0xa04049e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15]\n"
- "ldr x15, [%x[args], %[offsetof_Args_weights]]\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
+ "add x13, x13, %x[ld_in_col], LSL #2\n"
+ ".inst 0xa04049c4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+ "ldr x14, [%x[args], %[offsetof_Args_weights]]\n"
".inst 0xc13419a0 // fmla za.s[x8, 0], { z13.s-z16.s }, z4.s\n"
- ".inst 0xa04049e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15]\n"
- "addvl x15, x15, #5\n"
- ".inst 0xa14049e1 // ld1w { z1.s, z9.s }, pn10.b/Z, [x15]\n"
- "addvl x15, x15, #5\n"
+ ".inst 0xa04049c4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+ "addvl x14, x14, #5\n"
+ ".inst 0xa14049c1 // ld1w { z1.s, z9.s }, pn10.b/Z, [x14]\n"
+ "addvl x14, x14, #5\n"
"14:" // Padded: 3 priming loads
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z11.s }, p0/Z, [x14]\n"
- "add x20, x14, %x[ld_in_row], LSL #2\n"
+ "ld1w { z11.s }, p0/Z, [x13]\n"
+ "add x19, x13, %x[ld_in_row], LSL #2\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z21.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z21.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z12.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z12.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1w { z22.s }, p0/Z, [x20]\n"
+ "ld1w { z22.s }, p0/Z, [x19]\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z13.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z13.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z23.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z23.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z14.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z14.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
"mov x12, #0x8\n"
".inst 0xc1351960 // fmla za.s[x8, 0], { z11.s-z14.s }, z5.s\n"
- "ld1w { z24.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z24.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
".inst 0xc1391aa0 // fmla za.s[x8, 0], { z21.s-z24.s }, z9.s\n"
- "ld1w { z15.s }, p0/Z, [x20]\n"
- ".inst 0xa04049e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z15.s }, p0/Z, [x19]\n"
+ ".inst 0xa04049c4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
".inst 0xc1351980 // fmla za.s[x8, 0], { z12.s-z15.s }, z5.s\n"
- "addvl x15, x15, #5\n"
- "ld1w { z25.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xa14049e1 // ld1w { z1.s, z9.s }, pn10.b/Z, [x15]\n"
+ "addvl x14, x14, #5\n"
+ "ld1w { z25.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ ".inst 0xa14049c1 // ld1w { z1.s, z9.s }, pn10.b/Z, [x14]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "addvl x15, x15, #5\n"
+ "addvl x14, x14, #5\n"
".inst 0xc1391ac0 // fmla za.s[x8, 0], { z22.s-z25.s }, z9.s\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
- "add x14, x14, %x[ld_in_col], LSL #2\n"
- ".inst 0xa04049e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15]\n"
- "ldr x15, [%x[args], %[offsetof_Args_weights]]\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
+ "add x13, x13, %x[ld_in_col], LSL #2\n"
+ ".inst 0xa04049c4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+ "ldr x14, [%x[args], %[offsetof_Args_weights]]\n"
".inst 0xc13519a0 // fmla za.s[x8, 0], { z13.s-z16.s }, z5.s\n"
- ".inst 0xa04049e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15]\n"
- ".inst 0xa04149e6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
- "addvl x15, x15, #5\n"
- ".inst 0xa14049e1 // ld1w { z1.s, z9.s }, pn10.b/Z, [x15]\n"
- ".inst 0xa14149e2 // ld1w { z2.s, z10.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
- "addvl x15, x15, #5\n"
+ ".inst 0xa04049c4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+ ".inst 0xa04149c6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ "addvl x14, x14, #5\n"
+ ".inst 0xa14049c1 // ld1w { z1.s, z9.s }, pn10.b/Z, [x14]\n"
+ ".inst 0xa14149c2 // ld1w { z2.s, z10.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ "addvl x14, x14, #5\n"
"15:" // Padded: 2 priming loads
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z11.s }, p0/Z, [x14]\n"
- "add x20, x14, %x[ld_in_row], LSL #2\n"
+ "ld1w { z11.s }, p0/Z, [x13]\n"
+ "add x19, x13, %x[ld_in_row], LSL #2\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z21.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z21.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z12.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z12.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1w { z22.s }, p0/Z, [x20]\n"
+ "ld1w { z22.s }, p0/Z, [x19]\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z13.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z13.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z23.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z23.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z14.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z14.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
"mov x12, #0x8\n"
".inst 0xc1361960 // fmla za.s[x8, 0], { z11.s-z14.s }, z6.s\n"
- "ld1w { z24.s }, p0/Z, [x20]\n"
+ "ld1w { z24.s }, p0/Z, [x19]\n"
".inst 0xc1341961 // fmla za.s[x8, 1], { z11.s-z14.s }, z4.s\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z15.s }, p0/Z, [x20]\n"
+ "ld1w { z15.s }, p0/Z, [x19]\n"
".inst 0xc1321aa0 // fmla za.s[x8, 0], { z21.s-z24.s }, z2.s\n"
- ".inst 0xa04049e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0xa04049c4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
".inst 0xc1311aa1 // fmla za.s[x8, 1], { z21.s-z24.s }, z1.s\n"
- ".inst 0xa04149e6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
- "addvl x15, x15, #5\n"
- "add x14, x14, %x[ld_in_col], LSL #2\n"
+ ".inst 0xa04149c6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ "addvl x14, x14, #5\n"
+ "add x13, x13, %x[ld_in_col], LSL #2\n"
".inst 0xc1361980 // fmla za.s[x8, 0], { z12.s-z15.s }, z6.s\n"
- "ld1w { z25.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z25.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
".inst 0xc1341981 // fmla za.s[x8, 1], { z12.s-z15.s }, z4.s\n"
- ".inst 0xa14049e1 // ld1w { z1.s, z9.s }, pn10.b/Z, [x15]\n"
- ".inst 0xa14149e2 // ld1w { z2.s, z10.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
- "addvl x15, x15, #5\n"
+ ".inst 0xa14049c1 // ld1w { z1.s, z9.s }, pn10.b/Z, [x14]\n"
+ ".inst 0xa14149c2 // ld1w { z2.s, z10.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ "addvl x14, x14, #5\n"
".inst 0xc1321ac0 // fmla za.s[x8, 0], { z22.s-z25.s }, z2.s\n"
".inst 0xc1311ac1 // fmla za.s[x8, 1], { z22.s-z25.s }, z1.s\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
- ".inst 0xa04049e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15]\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
+ ".inst 0xa04049c4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
".inst 0xc13419a1 // fmla za.s[x8, 1], { z13.s-z16.s }, z4.s\n"
- ".inst 0xa04149e6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
- "ldr x15, [%x[args], %[offsetof_Args_weights]]\n"
+ ".inst 0xa04149c6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ "ldr x14, [%x[args], %[offsetof_Args_weights]]\n"
".inst 0xc13619a0 // fmla za.s[x8, 0], { z13.s-z16.s }, z6.s\n"
- ".inst 0xa04049e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15]\n"
- ".inst 0xa04149e6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
- "addvl x15, x15, #5\n"
- ".inst 0xa14049e1 // ld1w { z1.s, z9.s }, pn10.b/Z, [x15]\n"
- ".inst 0xa14149e2 // ld1w { z2.s, z10.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
- "addvl x15, x15, #5\n"
+ ".inst 0xa04049c4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+ ".inst 0xa04149c6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ "addvl x14, x14, #5\n"
+ ".inst 0xa14049c1 // ld1w { z1.s, z9.s }, pn10.b/Z, [x14]\n"
+ ".inst 0xa14149c2 // ld1w { z2.s, z10.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ "addvl x14, x14, #5\n"
"16:" // Padded: 1 priming loads
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z11.s }, p0/Z, [x14]\n"
- "add x20, x14, %x[ld_in_row], LSL #2\n"
+ "ld1w { z11.s }, p0/Z, [x13]\n"
+ "add x19, x13, %x[ld_in_row], LSL #2\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z21.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z21.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z12.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z12.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1w { z22.s }, p0/Z, [x20]\n"
+ "ld1w { z22.s }, p0/Z, [x19]\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z13.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z13.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z23.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z23.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z14.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z14.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
"mov x12, #0x8\n"
".inst 0xc1371960 // fmla za.s[x8, 0], { z11.s-z14.s }, z7.s\n"
- "ld1w { z24.s }, p0/Z, [x20]\n"
+ "ld1w { z24.s }, p0/Z, [x19]\n"
".inst 0xc1351961 // fmla za.s[x8, 1], { z11.s-z14.s }, z5.s\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z15.s }, p0/Z, [x20]\n"
+ "ld1w { z15.s }, p0/Z, [x19]\n"
".inst 0xc13a1aa0 // fmla za.s[x8, 0], { z21.s-z24.s }, z10.s\n"
- ".inst 0xa04049e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0xa04049c4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
".inst 0xc1391aa1 // fmla za.s[x8, 1], { z21.s-z24.s }, z9.s\n"
- ".inst 0xa04149e6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
- "addvl x15, x15, #5\n"
- "add x14, x14, %x[ld_in_col], LSL #2\n"
+ ".inst 0xa04149c6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ "addvl x14, x14, #5\n"
+ "add x13, x13, %x[ld_in_col], LSL #2\n"
".inst 0xc1371980 // fmla za.s[x8, 0], { z12.s-z15.s }, z7.s\n"
- "ld1w { z25.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z25.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
".inst 0xc1351981 // fmla za.s[x8, 1], { z12.s-z15.s }, z5.s\n"
- ".inst 0xa14049e1 // ld1w { z1.s, z9.s }, pn10.b/Z, [x15]\n"
- ".inst 0xa14149e2 // ld1w { z2.s, z10.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
- "addvl x15, x15, #5\n"
+ ".inst 0xa14049c1 // ld1w { z1.s, z9.s }, pn10.b/Z, [x14]\n"
+ ".inst 0xa14149c2 // ld1w { z2.s, z10.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ "addvl x14, x14, #5\n"
".inst 0xc13a1ac0 // fmla za.s[x8, 0], { z22.s-z25.s }, z10.s\n"
".inst 0xc1391ac1 // fmla za.s[x8, 1], { z22.s-z25.s }, z9.s\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
- ".inst 0xa04049e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15]\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
+ ".inst 0xa04049c4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
".inst 0xc13519a1 // fmla za.s[x8, 1], { z13.s-z16.s }, z5.s\n"
- ".inst 0xa04149e6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
- "ldr x15, [%x[args], %[offsetof_Args_weights]]\n"
+ ".inst 0xa04149c6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ "ldr x14, [%x[args], %[offsetof_Args_weights]]\n"
".inst 0xc13719a0 // fmla za.s[x8, 0], { z13.s-z16.s }, z7.s\n"
- ".inst 0xa04049e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15]\n"
- ".inst 0xa04149e6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
- "ld1w { z8.s }, p2/Z, [x15, #4, MUL VL]\n"
- "addvl x15, x15, #5\n"
- ".inst 0xa14049e1 // ld1w { z1.s, z9.s }, pn10.b/Z, [x15]\n"
- ".inst 0xa14149e2 // ld1w { z2.s, z10.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
- "ld1w { z3.s }, p2/Z, [x15, #4, MUL VL]\n"
- "addvl x15, x15, #5\n"
+ ".inst 0xa04049c4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+ ".inst 0xa04149c6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ "ld1w { z8.s }, p2/Z, [x14, #4, MUL VL]\n"
+ "addvl x14, x14, #5\n"
+ ".inst 0xa14049c1 // ld1w { z1.s, z9.s }, pn10.b/Z, [x14]\n"
+ ".inst 0xa14149c2 // ld1w { z2.s, z10.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ "ld1w { z3.s }, p2/Z, [x14, #4, MUL VL]\n"
+ "addvl x14, x14, #5\n"
"17:" // Padded: 0 priming loads
- "cmp x16, #0x2\n"
+ "cmp x15, #0x2\n"
"blt 20f\n"
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z11.s }, p0/Z, [x14]\n"
- "add x21, x14, %x[ld_in_row], LSL #2\n"
+ "ld1w { z11.s }, p0/Z, [x13]\n"
+ "add x20, x13, %x[ld_in_row], LSL #2\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z21.s }, p0/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
+ "ld1w { z21.s }, p0/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z12.s }, p0/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
+ "ld1w { z12.s }, p0/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1w { z22.s }, p0/Z, [x21]\n"
+ "ld1w { z22.s }, p0/Z, [x20]\n"
"mov x12, #0x4\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z13.s }, p0/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
+ "ld1w { z13.s }, p0/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z23.s }, p0/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
+ "ld1w { z23.s }, p0/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z14.s }, p0/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
+ "ld1w { z14.s }, p0/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
"mov x12, #0x8\n"
- "ld1w { z24.s }, p0/Z, [x21]\n"
- "sub x16, x16, #0x2\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
+ "ld1w { z24.s }, p0/Z, [x20]\n"
+ "sub x15, x15, #0x2\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "sub x13, x13, #0x1\n"
- "ld1w { z15.s }, p0/Z, [x21]\n"
- "lsr x20, x16, #0x1\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
+ "sub x11, x11, #0x1\n"
+ "ld1w { z15.s }, p0/Z, [x20]\n"
+ "lsr x19, x15, #0x1\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "cmp x20, x13\n"
- "ld1w { z25.s }, p0/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
+ "cmp x19, x11\n"
+ "ld1w { z25.s }, p0/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z16.s }, p0/Z, [x21]\n"
- "csel x22, x20, x13, LT\n"
- "add x14, x14, %x[ld_in_col], LSL #2\n"
- "and x16, x16, #0x1\n"
- "sub x13, x13, x22\n"
- "cbz x22, 19f\n"
+ "ld1w { z16.s }, p0/Z, [x20]\n"
+ "csel x21, x19, x11, LT\n"
+ "add x13, x13, %x[ld_in_col], LSL #2\n"
+ "and x15, x15, #0x1\n"
+ "sub x11, x11, x21\n"
+ "cbz x21, 19f\n"
"18:" // Padded: Main loop
".inst 0xc1381960 // fmla za.s[x8, 0], { z11.s-z14.s }, z8.s\n"
- "ld1w { z8.s }, p2/Z, [x15, #4, MUL VL]\n"
+ "ld1w { z8.s }, p2/Z, [x14, #4, MUL VL]\n"
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
".inst 0xc1361961 // fmla za.s[x8, 1], { z11.s-z14.s }, z6.s\n"
- ".inst 0xa04149e6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
- "add x21, x14, %x[ld_in_row], LSL #2\n"
- "subs x22, x22, #0x1\n"
+ ".inst 0xa04149c6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ "add x20, x13, %x[ld_in_row], LSL #2\n"
+ "subs x21, x21, #0x1\n"
".inst 0xc1341962 // fmla za.s[x8, 2], { z11.s-z14.s }, z4.s\n"
- ".inst 0xa04049e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15]\n"
- "addvl x15, x15, #5\n"
+ ".inst 0xa04049c4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+ "addvl x14, x14, #5\n"
".inst 0xc1331aa0 // fmla za.s[x8, 0], { z21.s-z24.s }, z3.s\n"
- "ld1w { z3.s }, p2/Z, [x15, #4, MUL VL]\n"
+ "ld1w { z3.s }, p2/Z, [x14, #4, MUL VL]\n"
".inst 0xc1321aa1 // fmla za.s[x8, 1], { z21.s-z24.s }, z2.s\n"
- ".inst 0xa14149e2 // ld1w { z2.s, z10.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
+ ".inst 0xa14149c2 // ld1w { z2.s, z10.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
".inst 0xc1311aa2 // fmla za.s[x8, 2], { z21.s-z24.s }, z1.s\n"
- ".inst 0xa14049e1 // ld1w { z1.s, z9.s }, pn10.b/Z, [x15]\n"
- "addvl x15, x15, #5\n"
+ ".inst 0xa14049c1 // ld1w { z1.s, z9.s }, pn10.b/Z, [x14]\n"
+ "addvl x14, x14, #5\n"
".inst 0xc1381980 // fmla za.s[x8, 0], { z12.s-z15.s }, z8.s\n"
- "ld1w { z11.s }, p0/Z, [x14]\n"
+ "ld1w { z11.s }, p0/Z, [x13]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "add x14, x14, %x[ld_in_col], LSL #2\n"
+ "add x13, x13, %x[ld_in_col], LSL #2\n"
".inst 0xc1361981 // fmla za.s[x8, 1], { z12.s-z15.s }, z6.s\n"
- ".inst 0xa04149e6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
- "add x20, x14, %x[ld_in_row], LSL #2\n"
+ ".inst 0xa04149c6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ "add x19, x13, %x[ld_in_row], LSL #2\n"
".inst 0xc1341982 // fmla za.s[x8, 2], { z12.s-z15.s }, z4.s\n"
- ".inst 0xa04049e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15]\n"
+ ".inst 0xa04049c4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
".inst 0xc1331ac0 // fmla za.s[x8, 0], { z22.s-z25.s }, z3.s\n"
- "ld1w { z8.s }, p2/Z, [x15, #4, MUL VL]\n"
- "ldr x15, [%x[args], %[offsetof_Args_weights]]\n"
+ "ld1w { z8.s }, p2/Z, [x14, #4, MUL VL]\n"
+ "ldr x14, [%x[args], %[offsetof_Args_weights]]\n"
".inst 0xc1321ac1 // fmla za.s[x8, 1], { z22.s-z25.s }, z2.s\n"
- "ld1w { z21.s }, p0/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
+ "ld1w { z21.s }, p0/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
".inst 0xc1311ac2 // fmla za.s[x8, 2], { z22.s-z25.s }, z1.s\n"
- "ld1w { z12.s }, p0/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
+ "ld1w { z12.s }, p0/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
"mov x12, #0x4\n"
- "ld1w { z22.s }, p0/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
+ "ld1w { z22.s }, p0/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
".inst 0xc13819a0 // fmla za.s[x8, 0], { z13.s-z16.s }, z8.s\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
".inst 0xc13619a1 // fmla za.s[x8, 1], { z13.s-z16.s }, z6.s\n"
- ".inst 0xa04149e6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
+ ".inst 0xa04149c6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
".inst 0xc13419a2 // fmla za.s[x8, 2], { z13.s-z16.s }, z4.s\n"
- "ld1w { z13.s }, p0/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
+ "ld1w { z13.s }, p0/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z23.s }, p0/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
+ "ld1w { z23.s }, p0/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
".inst 0xc0060c18 // mova { z24.d-z27.d }, za.d[x8, #0]\n"
- ".inst 0xa04049e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15]\n"
+ ".inst 0xa04049c4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
"add x8, x8, #0x1\n"
- "addvl x15, x15, #5\n"
+ "addvl x14, x14, #5\n"
".inst 0xc1b1c818 // fclamp { z24.s-z27.s }, z0.s, z17.s\n"
- "ld1w { z14.s }, p0/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
+ "ld1w { z14.s }, p0/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "st1w { z24.s }, p1, [x10]\n"
+ "st1w { z24.s }, p1, [x9]\n"
"mov x12, #0x8\n"
".inst 0xc1371960 // fmla za.s[x8, 0], { z11.s-z14.s }, z7.s\n"
- ".inst 0xa14049e1 // ld1w { z1.s, z9.s }, pn10.b/Z, [x15]\n"
- "add x10, x10, x28, LSL #2\n"
+ ".inst 0xa14049c1 // ld1w { z1.s, z9.s }, pn10.b/Z, [x14]\n"
+ "add x9, x9, x27, LSL #2\n"
".inst 0xc1351961 // fmla za.s[x8, 1], { z11.s-z14.s }, z5.s\n"
- ".inst 0xa14149e2 // ld1w { z2.s, z10.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
- "addvl x15, x15, #5\n"
- "st1w { z25.s }, p1, [x9]\n"
- "ld1w { z24.s }, p0/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
+ ".inst 0xa14149c2 // ld1w { z2.s, z10.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ "addvl x14, x14, #5\n"
+ "st1w { z25.s }, p1, [x28]\n"
+ "ld1w { z24.s }, p0/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
".inst 0xc13a1aa0 // fmla za.s[x8, 0], { z21.s-z24.s }, z10.s\n"
".inst 0xc1391aa1 // fmla za.s[x8, 1], { z21.s-z24.s }, z9.s\n"
- "ld1w { z15.s }, p0/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
+ "ld1w { z15.s }, p0/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- ".inst 0xa04049e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15]\n"
+ ".inst 0xa04049c4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
".inst 0xc1351981 // fmla za.s[x8, 1], { z12.s-z15.s }, z5.s\n"
- "add x9, x9, x27, LSL #2\n"
- "st1w { z26.s }, p1, [x26]\n"
- ".inst 0xa04149e6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
- "addvl x15, x15, #5\n"
+ "add x28, x28, x26, LSL #2\n"
+ "st1w { z26.s }, p1, [x25]\n"
+ ".inst 0xa04149c6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ "addvl x14, x14, #5\n"
".inst 0xc1371980 // fmla za.s[x8, 0], { z12.s-z15.s }, z7.s\n"
- "add x26, x26, x24, LSL #2\n"
- "ld1w { z25.s }, p0/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
+ "add x25, x25, x23, LSL #2\n"
+ "ld1w { z25.s }, p0/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "st1w { z27.s }, p1, [x25]\n"
- ".inst 0xa14049e1 // ld1w { z1.s, z9.s }, pn10.b/Z, [x15]\n"
+ "st1w { z27.s }, p1, [x24]\n"
+ ".inst 0xa14049c1 // ld1w { z1.s, z9.s }, pn10.b/Z, [x14]\n"
"mov x12, #0x0\n"
".inst 0xc1391ac1 // fmla za.s[x8, 1], { z22.s-z25.s }, z9.s\n"
- "add x25, x25, x23, LSL #2\n"
- ".inst 0xa14149e2 // ld1w { z2.s, z10.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
- "addvl x15, x15, #5\n"
+ "add x24, x24, x22, LSL #2\n"
+ ".inst 0xa14149c2 // ld1w { z2.s, z10.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ "addvl x14, x14, #5\n"
".inst 0xc13a1ac0 // fmla za.s[x8, 0], { z22.s-z25.s }, z10.s\n"
- "ld1w { z16.s }, p0/Z, [x21]\n"
+ "ld1w { z16.s }, p0/Z, [x20]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
".inst 0xc0040f84 // mova za.d[x8, #4], { z28.d-z31.d }\n"
- "ld1w { z11.s }, p0/Z, [x14]\n"
+ "ld1w { z11.s }, p0/Z, [x13]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "add x14, x14, %x[ld_in_col], LSL #2\n"
- ".inst 0xa04049e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15]\n"
+ "add x13, x13, %x[ld_in_col], LSL #2\n"
+ ".inst 0xa04049c4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
".inst 0xc13519a1 // fmla za.s[x8, 1], { z13.s-z16.s }, z5.s\n"
- ".inst 0xa04149e6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
+ ".inst 0xa04149c6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
".inst 0xc13719a0 // fmla za.s[x8, 0], { z13.s-z16.s }, z7.s\n"
- "ldr x15, [%x[args], %[offsetof_Args_weights]]\n"
- "ld1w { z21.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ldr x14, [%x[args], %[offsetof_Args_weights]]\n"
+ "ld1w { z21.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z12.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z12.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
"mov x12, #0x4\n"
- "ld1w { z22.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z22.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z13.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z13.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z23.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z23.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z14.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z14.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
"mov x12, #0x8\n"
- "ld1w { z24.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z24.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z15.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z15.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- ".inst 0xa04049e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15]\n"
- ".inst 0xa04149e6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
- "ld1w { z8.s }, p2/Z, [x15, #4, MUL VL]\n"
- "addvl x15, x15, #5\n"
- "ld1w { z25.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0xa04049c4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+ ".inst 0xa04149c6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ "ld1w { z8.s }, p2/Z, [x14, #4, MUL VL]\n"
+ "addvl x14, x14, #5\n"
+ "ld1w { z25.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- ".inst 0xa14049e1 // ld1w { z1.s, z9.s }, pn10.b/Z, [x15]\n"
- ".inst 0xa14149e2 // ld1w { z2.s, z10.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
- "ld1w { z3.s }, p2/Z, [x15, #4, MUL VL]\n"
- "addvl x15, x15, #5\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
+ ".inst 0xa14049c1 // ld1w { z1.s, z9.s }, pn10.b/Z, [x14]\n"
+ ".inst 0xa14149c2 // ld1w { z2.s, z10.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ "ld1w { z3.s }, p2/Z, [x14, #4, MUL VL]\n"
+ "addvl x14, x14, #5\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
"bgt 18b\n"
"19:" // Main loop tail
".inst 0xc1381960 // fmla za.s[x8, 0], { z11.s-z14.s }, z8.s\n"
- "ld1w { z8.s }, p2/Z, [x15, #4, MUL VL]\n"
+ "ld1w { z8.s }, p2/Z, [x14, #4, MUL VL]\n"
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
".inst 0xc1361961 // fmla za.s[x8, 1], { z11.s-z14.s }, z6.s\n"
- ".inst 0xa04149e6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
- "add x20, x14, %x[ld_in_row], LSL #2\n"
+ ".inst 0xa04149c6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ "add x19, x13, %x[ld_in_row], LSL #2\n"
".inst 0xc1341962 // fmla za.s[x8, 2], { z11.s-z14.s }, z4.s\n"
- ".inst 0xa04049e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15]\n"
- "addvl x15, x15, #5\n"
+ ".inst 0xa04049c4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+ "addvl x14, x14, #5\n"
".inst 0xc1331aa0 // fmla za.s[x8, 0], { z21.s-z24.s }, z3.s\n"
- "ld1w { z3.s }, p2/Z, [x15, #4, MUL VL]\n"
+ "ld1w { z3.s }, p2/Z, [x14, #4, MUL VL]\n"
".inst 0xc1321aa1 // fmla za.s[x8, 1], { z21.s-z24.s }, z2.s\n"
- ".inst 0xa14149e2 // ld1w { z2.s, z10.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
+ ".inst 0xa14149c2 // ld1w { z2.s, z10.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
".inst 0xc1311aa2 // fmla za.s[x8, 2], { z21.s-z24.s }, z1.s\n"
- ".inst 0xa14049e1 // ld1w { z1.s, z9.s }, pn10.b/Z, [x15]\n"
- "addvl x15, x15, #5\n"
+ ".inst 0xa14049c1 // ld1w { z1.s, z9.s }, pn10.b/Z, [x14]\n"
+ "addvl x14, x14, #5\n"
".inst 0xc1381980 // fmla za.s[x8, 0], { z12.s-z15.s }, z8.s\n"
- "ld1w { z11.s }, p0/Z, [x14]\n"
+ "ld1w { z11.s }, p0/Z, [x13]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "add x14, x14, %x[ld_in_col], LSL #2\n"
+ "add x13, x13, %x[ld_in_col], LSL #2\n"
".inst 0xc1361981 // fmla za.s[x8, 1], { z12.s-z15.s }, z6.s\n"
- ".inst 0xa04149e6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
+ ".inst 0xa04149c6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
".inst 0xc1341982 // fmla za.s[x8, 2], { z12.s-z15.s }, z4.s\n"
- ".inst 0xa04049e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15]\n"
+ ".inst 0xa04049c4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
".inst 0xc1331ac0 // fmla za.s[x8, 0], { z22.s-z25.s }, z3.s\n"
- "ld1w { z8.s }, p2/Z, [x15, #4, MUL VL]\n"
- "ldr x15, [%x[args], %[offsetof_Args_weights]]\n"
+ "ld1w { z8.s }, p2/Z, [x14, #4, MUL VL]\n"
+ "ldr x14, [%x[args], %[offsetof_Args_weights]]\n"
".inst 0xc1321ac1 // fmla za.s[x8, 1], { z22.s-z25.s }, z2.s\n"
- "ld1w { z21.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z21.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
".inst 0xc1311ac2 // fmla za.s[x8, 2], { z22.s-z25.s }, z1.s\n"
- "ld1w { z12.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z12.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
"mov x12, #0x4\n"
- "ld1w { z22.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z22.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0xc13819a0 // fmla za.s[x8, 0], { z13.s-z16.s }, z8.s\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
".inst 0xc13619a1 // fmla za.s[x8, 1], { z13.s-z16.s }, z6.s\n"
- ".inst 0xa04149e6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
+ ".inst 0xa04149c6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
".inst 0xc13419a2 // fmla za.s[x8, 2], { z13.s-z16.s }, z4.s\n"
- "ld1w { z13.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z13.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z23.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z23.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
".inst 0xc0060c18 // mova { z24.d-z27.d }, za.d[x8, #0]\n"
- ".inst 0xa04049e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15]\n"
+ ".inst 0xa04049c4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
"add x8, x8, #0x1\n"
- "addvl x15, x15, #5\n"
+ "addvl x14, x14, #5\n"
".inst 0xc1b1c818 // fclamp { z24.s-z27.s }, z0.s, z17.s\n"
- "ld1w { z14.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z14.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "st1w { z24.s }, p1, [x10]\n"
+ "st1w { z24.s }, p1, [x9]\n"
"mov x12, #0x8\n"
".inst 0xc1371960 // fmla za.s[x8, 0], { z11.s-z14.s }, z7.s\n"
- ".inst 0xa14049e1 // ld1w { z1.s, z9.s }, pn10.b/Z, [x15]\n"
- "add x10, x10, x28, LSL #2\n"
+ ".inst 0xa14049c1 // ld1w { z1.s, z9.s }, pn10.b/Z, [x14]\n"
+ "add x9, x9, x27, LSL #2\n"
".inst 0xc1351961 // fmla za.s[x8, 1], { z11.s-z14.s }, z5.s\n"
- ".inst 0xa14149e2 // ld1w { z2.s, z10.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
- "addvl x15, x15, #5\n"
- "st1w { z25.s }, p1, [x9]\n"
- "ld1w { z24.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0xa14149c2 // ld1w { z2.s, z10.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ "addvl x14, x14, #5\n"
+ "st1w { z25.s }, p1, [x28]\n"
+ "ld1w { z24.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
".inst 0xc13a1aa0 // fmla za.s[x8, 0], { z21.s-z24.s }, z10.s\n"
".inst 0xc1391aa1 // fmla za.s[x8, 1], { z21.s-z24.s }, z9.s\n"
- "ld1w { z15.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z15.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- ".inst 0xa04049e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15]\n"
+ ".inst 0xa04049c4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
".inst 0xc1351981 // fmla za.s[x8, 1], { z12.s-z15.s }, z5.s\n"
- "add x9, x9, x27, LSL #2\n"
- "st1w { z26.s }, p1, [x26]\n"
- ".inst 0xa04149e6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
- "addvl x15, x15, #5\n"
+ "add x28, x28, x26, LSL #2\n"
+ "st1w { z26.s }, p1, [x25]\n"
+ ".inst 0xa04149c6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ "addvl x14, x14, #5\n"
".inst 0xc1371980 // fmla za.s[x8, 0], { z12.s-z15.s }, z7.s\n"
- "add x26, x26, x24, LSL #2\n"
- "ld1w { z25.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x25, x25, x23, LSL #2\n"
+ "ld1w { z25.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "st1w { z27.s }, p1, [x25]\n"
- ".inst 0xa14049e1 // ld1w { z1.s, z9.s }, pn10.b/Z, [x15]\n"
+ "st1w { z27.s }, p1, [x24]\n"
+ ".inst 0xa14049c1 // ld1w { z1.s, z9.s }, pn10.b/Z, [x14]\n"
".inst 0xc1391ac1 // fmla za.s[x8, 1], { z22.s-z25.s }, z9.s\n"
- "add x25, x25, x23, LSL #2\n"
- ".inst 0xa14149e2 // ld1w { z2.s, z10.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
- "addvl x15, x15, #5\n"
+ "add x24, x24, x22, LSL #2\n"
+ ".inst 0xa14149c2 // ld1w { z2.s, z10.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ "addvl x14, x14, #5\n"
".inst 0xc13a1ac0 // fmla za.s[x8, 0], { z22.s-z25.s }, z10.s\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
".inst 0xc0040f84 // mova za.d[x8, #4], { z28.d-z31.d }\n"
- ".inst 0xa04049e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15]\n"
+ ".inst 0xa04049c4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
".inst 0xc13519a1 // fmla za.s[x8, 1], { z13.s-z16.s }, z5.s\n"
- ".inst 0xa04149e6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
- "ldr x15, [%x[args], %[offsetof_Args_weights]]\n"
+ ".inst 0xa04149c6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ "ldr x14, [%x[args], %[offsetof_Args_weights]]\n"
".inst 0xc13719a0 // fmla za.s[x8, 0], { z13.s-z16.s }, z7.s\n"
- ".inst 0xa04049e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15]\n"
- ".inst 0xa04149e6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
- "ld1w { z8.s }, p2/Z, [x15, #4, MUL VL]\n"
- "addvl x15, x15, #5\n"
- ".inst 0xa14049e1 // ld1w { z1.s, z9.s }, pn10.b/Z, [x15]\n"
- ".inst 0xa14149e2 // ld1w { z2.s, z10.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
- "ld1w { z3.s }, p2/Z, [x15, #4, MUL VL]\n"
- "addvl x15, x15, #5\n"
+ ".inst 0xa04049c4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+ ".inst 0xa04149c6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ "ld1w { z8.s }, p2/Z, [x14, #4, MUL VL]\n"
+ "addvl x14, x14, #5\n"
+ ".inst 0xa14049c1 // ld1w { z1.s, z9.s }, pn10.b/Z, [x14]\n"
+ ".inst 0xa14149c2 // ld1w { z2.s, z10.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ "ld1w { z3.s }, p2/Z, [x14, #4, MUL VL]\n"
+ "addvl x14, x14, #5\n"
"20:" // Main loop skip tail
- "cbz x16, 21f\n" // Skip remainder inputs
+ "cbz x15, 21f\n" // Skip remainder inputs
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z11.s }, p0/Z, [x14]\n"
- "add x20, x14, %x[ld_in_row], LSL #2\n"
+ "ld1w { z11.s }, p0/Z, [x13]\n"
+ "add x19, x13, %x[ld_in_row], LSL #2\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z21.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z21.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z12.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z12.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1w { z22.s }, p0/Z, [x20]\n"
+ "ld1w { z22.s }, p0/Z, [x19]\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z13.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z13.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z23.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z23.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z14.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z14.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
"mov x12, #0x8\n"
".inst 0xc1381960 // fmla za.s[x8, 0], { z11.s-z14.s }, z8.s\n"
- "ld1w { z24.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z24.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
".inst 0xc1331aa0 // fmla za.s[x8, 0], { z21.s-z24.s }, z3.s\n"
- "ld1w { z15.s }, p0/Z, [x20]\n"
- "ld1w { z8.s }, p2/Z, [x15, #4, MUL VL]\n"
+ "ld1w { z15.s }, p0/Z, [x19]\n"
+ "ld1w { z8.s }, p2/Z, [x14, #4, MUL VL]\n"
".inst 0xc1361961 // fmla za.s[x8, 1], { z11.s-z14.s }, z6.s\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
".inst 0xc1341962 // fmla za.s[x8, 2], { z11.s-z14.s }, z4.s\n"
- ".inst 0xa04049e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15]\n"
- "sub x13, x13, #0x1\n"
- ".inst 0xa04149e6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
- "addvl x15, x15, #5\n"
+ ".inst 0xa04049c4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+ "sub x11, x11, #0x1\n"
+ ".inst 0xa04149c6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ "addvl x14, x14, #5\n"
".inst 0xc1381980 // fmla za.s[x8, 0], { z12.s-z15.s }, z8.s\n"
- "ld1w { z25.s }, p0/Z, [x20]\n"
+ "ld1w { z25.s }, p0/Z, [x19]\n"
".inst 0xc1321aa1 // fmla za.s[x8, 1], { z21.s-z24.s }, z2.s\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z3.s }, p2/Z, [x15, #4, MUL VL]\n"
+ "ld1w { z3.s }, p2/Z, [x14, #4, MUL VL]\n"
".inst 0xc1311aa2 // fmla za.s[x8, 2], { z21.s-z24.s }, z1.s\n"
- ".inst 0xa14049e1 // ld1w { z1.s, z9.s }, pn10.b/Z, [x15]\n"
+ ".inst 0xa14049c1 // ld1w { z1.s, z9.s }, pn10.b/Z, [x14]\n"
".inst 0xc1331ac0 // fmla za.s[x8, 0], { z22.s-z25.s }, z3.s\n"
- ".inst 0xa14149e2 // ld1w { z2.s, z10.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
- "addvl x15, x15, #5\n"
+ ".inst 0xa14149c2 // ld1w { z2.s, z10.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ "addvl x14, x14, #5\n"
".inst 0xc1361981 // fmla za.s[x8, 1], { z12.s-z15.s }, z6.s\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
".inst 0xc1341982 // fmla za.s[x8, 2], { z12.s-z15.s }, z4.s\n"
- "ld1w { z8.s }, p2/Z, [x15, #4, MUL VL]\n"
+ "ld1w { z8.s }, p2/Z, [x14, #4, MUL VL]\n"
".inst 0xc13819a0 // fmla za.s[x8, 0], { z13.s-z16.s }, z8.s\n"
".inst 0xc1321ac1 // fmla za.s[x8, 1], { z22.s-z25.s }, z2.s\n"
- ".inst 0xa04049e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15]\n"
+ ".inst 0xa04049c4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
".inst 0xc1311ac2 // fmla za.s[x8, 2], { z22.s-z25.s }, z1.s\n"
- ".inst 0xa04149e6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
+ ".inst 0xa04149c6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
".inst 0xc0060c18 // mova { z24.d-z27.d }, za.d[x8, #0]\n"
".inst 0xc1b1c818 // fclamp { z24.s-z27.s }, z0.s, z17.s\n"
- "st1w { z24.s }, p1, [x10]\n"
- "add x10, x10, x28, LSL #2\n"
- ".inst 0xc13619a1 // fmla za.s[x8, 1], { z13.s-z16.s }, z6.s\n"
- "st1w { z25.s }, p1, [x9]\n"
+ "st1w { z24.s }, p1, [x9]\n"
"add x9, x9, x27, LSL #2\n"
+ ".inst 0xc13619a1 // fmla za.s[x8, 1], { z13.s-z16.s }, z6.s\n"
+ "st1w { z25.s }, p1, [x28]\n"
+ "add x28, x28, x26, LSL #2\n"
".inst 0xc13419a2 // fmla za.s[x8, 2], { z13.s-z16.s }, z4.s\n"
"add x8, x8, #0x1\n"
- "st1w { z26.s }, p1, [x26]\n"
- "add x26, x26, x24, LSL #2\n"
- "st1w { z27.s }, p1, [x25]\n"
+ "st1w { z26.s }, p1, [x25]\n"
"add x25, x25, x23, LSL #2\n"
+ "st1w { z27.s }, p1, [x24]\n"
+ "add x24, x24, x22, LSL #2\n"
".inst 0xc0040f84 // mova za.d[x8, #4], { z28.d-z31.d }\n"
"21:" // Tail input: End
- "cbz x13, 23f\n"
+ "cbz x11, 23f\n"
"22:" // Right padding loop
".inst 0xc0060c18 // mova { z24.d-z27.d }, za.d[x8, #0]\n"
"add x8, x8, #0x1\n"
- "subs x13, x13, #0x1\n"
+ "subs x11, x11, #0x1\n"
".inst 0xc1b1c818 // fclamp { z24.s-z27.s }, z0.s, z17.s\n"
- "st1w { z24.s }, p1, [x10]\n"
- "add x10, x10, x28, LSL #2\n"
- ".inst 0xc0040f84 // mova za.d[x8, #4], { z28.d-z31.d }\n"
- "st1w { z25.s }, p1, [x9]\n"
+ "st1w { z24.s }, p1, [x9]\n"
"add x9, x9, x27, LSL #2\n"
- "st1w { z26.s }, p1, [x26]\n"
- "add x26, x26, x24, LSL #2\n"
- "st1w { z27.s }, p1, [x25]\n"
+ ".inst 0xc0040f84 // mova za.d[x8, #4], { z28.d-z31.d }\n"
+ "st1w { z25.s }, p1, [x28]\n"
+ "add x28, x28, x26, LSL #2\n"
+ "st1w { z26.s }, p1, [x25]\n"
"add x25, x25, x23, LSL #2\n"
+ "st1w { z27.s }, p1, [x24]\n"
+ "add x24, x24, x22, LSL #2\n"
"bgt 22b\n"
"23:" // End
- "ldr x15, [%x[args], %[offsetof_Args_weights]]\n"
- "incb x15, ALL, MUL #16\n"
- "incb x15, ALL, MUL #9\n"
- "str x15, [%x[args], %[offsetof_Args_weights]]\n"
- "ldr x20, [%x[args], %[offsetof_Args_ld_in_vl]]\n"
- "incw x17\n"
- "whilelt p1.s, x17, x7\n"
- "ldr x14, [%x[args], %[offsetof_Args_inptr]]\n"
- "add x14, x14, x20, LSL #2\n"
- "str x14, [%x[args], %[offsetof_Args_inptr]]\n"
- "ldr x11, [%x[args], %[offsetof_Args_outptrs]]\n"
- "ldr x24, [%x[args], %[offsetof_Args_ld_out_vls]]\n"
- "ldp x23, x22, [x11, #0x0]\n"
- "ldp x21, x20, [x24, #0x0]\n"
- "add x23, x23, x21, LSL #2\n"
+ "ldr x14, [%x[args], %[offsetof_Args_weights]]\n"
+ "incb x14, ALL, MUL #16\n"
+ "incb x14, ALL, MUL #9\n"
+ "str x14, [%x[args], %[offsetof_Args_weights]]\n"
+ "ldr x19, [%x[args], %[offsetof_Args_ld_in_vl]]\n"
+ "incw x16\n"
+ "whilelt p1.s, x16, x17\n"
+ "ldr x13, [%x[args], %[offsetof_Args_inptr]]\n"
+ "add x13, x13, x19, LSL #2\n"
+ "str x13, [%x[args], %[offsetof_Args_inptr]]\n"
+ "ldr x10, [%x[args], %[offsetof_Args_outptrs]]\n"
+ "ldr x23, [%x[args], %[offsetof_Args_ld_out_vls]]\n"
+ "ldp x22, x21, [x10, #0x0]\n"
+ "ldp x20, x19, [x23, #0x0]\n"
"add x22, x22, x20, LSL #2\n"
- "stp x23, x22, [x11, #0x0]\n"
- "ldp x23, x22, [x11, #0x10]\n"
- "ldp x21, x20, [x24, #0x10]\n"
- "add x23, x23, x21, LSL #2\n"
+ "add x21, x21, x19, LSL #2\n"
+ "stp x22, x21, [x10, #0x0]\n"
+ "ldp x22, x21, [x10, #0x10]\n"
+ "ldp x20, x19, [x23, #0x10]\n"
"add x22, x22, x20, LSL #2\n"
- "stp x23, x22, [x11, #0x10]\n"
+ "add x21, x21, x19, LSL #2\n"
+ "stp x22, x21, [x10, #0x10]\n"
"b.any 1b\n"
".inst 0xd503467f // SMSTOP\n"
:
: [args] "r" (&args), [ld_in_col] "r" (ld_in_col), [ld_in_row] "r" (ld_in_row), [offsetof_Args_bias] "I" (offsetof(Args, bias)), [offsetof_Args_clamp_max] "I" (offsetof(Args, clamp_max)), [offsetof_Args_clamp_min] "I" (offsetof(Args, clamp_min)), [offsetof_Args_current_channel] "I" (offsetof(Args, current_channel)), [offsetof_Args_inptr] "I" (offsetof(Args, inptr)), [offsetof_Args_input_cols] "I" (offsetof(Args, input_cols)), [offsetof_Args_ld_in_vl] "I" (offsetof(Args, ld_in_vl)), [offsetof_Args_ld_out_cols] "I" (offsetof(Args, ld_out_cols)), [offsetof_Args_ld_out_vls] "I" (offsetof(Args, ld_out_vls)), [offsetof_Args_n_channels] "I" (offsetof(Args, n_channels)), [offsetof_Args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_Args_output_cols] "I" (offsetof(Args, output_cols)), [offsetof_Args_pad_bottom] "I" (offsetof(Args, pad_bottom)), [offsetof_Args_pad_left] "I" (offsetof(Args, pad_left)), [offsetof_Args_pad_top] "I" (offsetof(Args, pad_top)), [offsetof_Args_weights] "I" (offsetof(Args, weights))
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32bf16fp32_planar_3x3_s1_4rows_dot_za/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32bf16fp32_planar_3x3_s1_4rows_dot_za/generic.cpp
index 5570b27644..e6c0cb7910 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32bf16fp32_planar_3x3_s1_4rows_dot_za/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32bf16fp32_planar_3x3_s1_4rows_dot_za/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -69,174 +69,174 @@ void sme2_fp32bf16fp32_planar_3x3_s1_4rows_dot_za_impl(
Args args = { inptr, ld_in_vl, pad_top, 6u - std::min(6u, pad_top + valid_input_rows), pad_left, weights, bias, valid_input_cols, output_cols, outptrs, outlds, outvllds, start_channel, valid_channels, act_min, act_max };
__asm__ __volatile__(
- "ldr x6, [%x[args], %[offsetof_Args_pad_bottom]]\n"
- "mov x20, #0x6\n"
+ "ldr x7, [%x[args], %[offsetof_Args_pad_bottom]]\n"
+ "mov x19, #0x6\n"
".inst 0xd503477f // SMSTART ZA\n"
- "sub x20, x20, x6\n"
- "ldr x7, [%x[args], %[offsetof_Args_pad_top]]\n"
+ "sub x19, x19, x7\n"
+ "ldr x17, [%x[args], %[offsetof_Args_pad_top]]\n"
"ptrue p2.b\n"
"ld1rw { z28.s }, p2/Z, [%x[args], %[offsetof_Args_clamp_min]]\n"
- "ldr x17, [%x[args], %[offsetof_Args_n_channels]]\n"
- "whilelt p1.s, XZR, x17\n"
- "whilelt p9.s, XZR, x20\n"
+ "ldr x16, [%x[args], %[offsetof_Args_n_channels]]\n"
+ "whilelt p1.s, XZR, x16\n"
+ "whilelt p9.s, XZR, x19\n"
"ld1rw { z29.s }, p2/Z, [%x[args], %[offsetof_Args_clamp_max]]\n"
- "whilelt p8.s, XZR, x7\n"
+ "whilelt p8.s, XZR, x17\n"
"eor p8.b, p2/Z, p8.b, p9.b\n"
- "ldr x16, [%x[args], %[offsetof_Args_current_channel]]\n"
+ "ldr x15, [%x[args], %[offsetof_Args_current_channel]]\n"
"1:" // Channel loop
- "ldr x20, [%x[args], %[offsetof_Args_bias]]\n"
+ "ldr x19, [%x[args], %[offsetof_Args_bias]]\n"
"fmov z22.s, #0x0\n"
- "cbz x20, 2f\n"
- "ld1w { z22.s }, p1/Z, [x20, x16, LSL #2]\n"
+ "cbz x19, 2f\n"
+ "ld1w { z22.s }, p1/Z, [x19, x15, LSL #2]\n"
"2:" // Load bias: Done
- "ldr x20, [%x[args], %[offsetof_Args_weights]]\n"
- "mov x21, x20\n"
+ "ldr x19, [%x[args], %[offsetof_Args_weights]]\n"
+ "mov x20, x19\n"
"fmov z9.s, #0x0\n"
- "ld1w { z25.s }, p2/Z, [x21]\n"
- "incb x21, ALL, MUL #3\n"
- "incb x20\n"
- "ld1w { z27.s }, p2/Z, [x21]\n"
+ "ld1w { z25.s }, p2/Z, [x20]\n"
+ "incb x20, ALL, MUL #3\n"
+ "incb x19\n"
+ "ld1w { z27.s }, p2/Z, [x20]\n"
".inst 0x648aab29 // bfcvtnt z9.h, p2/M, z25.s\n"
- "incb x21, ALL, MUL #3\n"
- "ld1w { z21.s }, p2/Z, [x21]\n"
- "mov x21, x20\n"
+ "incb x20, ALL, MUL #3\n"
+ "ld1w { z21.s }, p2/Z, [x20]\n"
+ "mov x20, x19\n"
".inst 0x658aab28 // bfcvt z8.h, p2/M, z25.s\n"
- "ld1w { z25.s }, p2/Z, [x21]\n"
+ "ld1w { z25.s }, p2/Z, [x20]\n"
".inst 0x658aab66 // bfcvt z6.h, p2/M, z27.s\n"
"fmov z2.s, #0x0\n"
- "incb x21, ALL, MUL #3\n"
+ "incb x20, ALL, MUL #3\n"
".inst 0x658aab21 // bfcvt z1.h, p2/M, z25.s\n"
".inst 0x648aab68 // bfcvtnt z8.h, p2/M, z27.s\n"
- "incb x20\n"
- "ld1w { z27.s }, p2/Z, [x21]\n"
- "incb x21, ALL, MUL #3\n"
+ "incb x19\n"
+ "ld1w { z27.s }, p2/Z, [x20]\n"
+ "incb x20, ALL, MUL #3\n"
".inst 0x648aaaa6 // bfcvtnt z6.h, p2/M, z21.s\n"
".inst 0x658aaaa5 // bfcvt z5.h, p2/M, z21.s\n"
- "ld1w { z21.s }, p2/Z, [x21]\n"
- "mov x21, x20\n"
+ "ld1w { z21.s }, p2/Z, [x20]\n"
+ "mov x20, x19\n"
".inst 0x648aab22 // bfcvtnt z2.h, p2/M, z25.s\n"
- "ld1w { z25.s }, p2/Z, [x21]\n"
- "incb x21, ALL, MUL #3\n"
+ "ld1w { z25.s }, p2/Z, [x20]\n"
+ "incb x20, ALL, MUL #3\n"
".inst 0x648aab61 // bfcvtnt z1.h, p2/M, z27.s\n"
".inst 0x658aab6c // bfcvt z12.h, p2/M, z27.s\n"
- "ld1w { z27.s }, p2/Z, [x21]\n"
- "ldr x15, [%x[args], %[offsetof_Args_input_cols]]\n"
- "incb x21, ALL, MUL #3\n"
+ "ld1w { z27.s }, p2/Z, [x20]\n"
+ "ldr x14, [%x[args], %[offsetof_Args_input_cols]]\n"
+ "incb x20, ALL, MUL #3\n"
"fmov z7.s, #0x0\n"
".inst 0x658aab24 // bfcvt z4.h, p2/M, z25.s\n"
- "ldr x14, [%x[args], %[offsetof_Args_inptr]]\n"
+ "ldr x13, [%x[args], %[offsetof_Args_inptr]]\n"
".inst 0x658aab60 // bfcvt z0.h, p2/M, z27.s\n"
".inst 0x648aaaac // bfcvtnt z12.h, p2/M, z21.s\n"
- "sub x20, x15, #0x1\n"
- "orr x23, x20, %x[ld_in_col], LSL #18\n"
+ "sub x19, x14, #0x1\n"
+ "orr x22, x19, %x[ld_in_col], LSL #18\n"
".inst 0x658aaaaa // bfcvt z10.h, p2/M, z21.s\n"
- "ld1w { z21.s }, p2/Z, [x21]\n"
- "orr x23, x17, x23, LSL #20\n"
- "mov x22, #0x6\n"
- "add x21, x7, x6\n"
- "lsl x20, %x[ld_in_row], #0x2\n"
+ "ld1w { z21.s }, p2/Z, [x20]\n"
+ "orr x22, x16, x22, LSL #20\n"
+ "mov x21, #0x6\n"
+ "add x20, x17, x7\n"
+ "lsl x19, %x[ld_in_row], #0x2\n"
"mov z23.d, z22.d\n"
".inst 0x648aab27 // bfcvtnt z7.h, p2/M, z25.s\n"
".inst 0x648aab64 // bfcvtnt z4.h, p2/M, z27.s\n"
".inst 0x648aaaa0 // bfcvtnt z0.h, p2/M, z21.s\n"
"mov x8, #0x0\n"
- "ldr x13, [%x[args], %[offsetof_Args_output_cols]]\n"
+ "ldr x11, [%x[args], %[offsetof_Args_output_cols]]\n"
".inst 0x658aaaa3 // bfcvt z3.h, p2/M, z21.s\n"
- "lsl x23, x23, #0x2\n"
- "sub x22, x22, x21\n"
- "madd x20, x20, x7, x14\n"
+ "lsl x22, x22, #0x2\n"
+ "sub x21, x21, x20\n"
+ "madd x19, x19, x17, x13\n"
"3:" // Issue prefetches
- "subs x22, x22, #0x1\n"
- ".inst 0xf8b74a9c // rprfm pldstrm, x23, [x20]\n"
- "add x20, x20, %x[ld_in_col], LSL #2\n"
+ "subs x21, x21, #0x1\n"
+ ".inst 0xf8b64a7c // rprfm pldstrm, x22, [x19]\n"
+ "add x19, x19, %x[ld_in_col], LSL #2\n"
"bgt 3b\n"
- "ldr x11, [%x[args], %[offsetof_Args_outptrs]]\n"
- "lsl x20, %x[ld_in_row], #0x2\n"
- "msub x14, x7, x20, x14\n"
+ "ldr x10, [%x[args], %[offsetof_Args_outptrs]]\n"
+ "lsl x19, %x[ld_in_row], #0x2\n"
+ "msub x13, x17, x19, x13\n"
".inst 0xc0040ac0 // mova za.d[x8, #0], { z22.d-z23.d }\n"
- "ldr x20, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
+ "ldr x19, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
".inst 0xc0040ac1 // mova za.d[x8, #1], { z22.d-z23.d }\n"
- "mov x10, #0x2\n"
- "ldp x9, x28, [x11], #0x10\n"
+ "mov x9, #0x2\n"
+ "ldp x28, x27, [x10], #0x10\n"
".inst 0xc0040ac2 // mova za.d[x8, #2], { z22.d-z23.d }\n"
- "ldp x27, x26, [x20], #0x10\n"
+ "ldp x26, x25, [x19], #0x10\n"
".inst 0xc0040ac3 // mova za.d[x8, #3], { z22.d-z23.d }\n"
- "ldr x21, [%x[args], %[offsetof_Args_pad_left]]\n"
+ "ldr x20, [%x[args], %[offsetof_Args_pad_left]]\n"
".inst 0xc0040ac4 // mova za.d[x8, #4], { z22.d-z23.d }\n"
- "ldp x25, x24, [x11], #0x10\n"
+ "ldp x24, x23, [x10], #0x10\n"
".inst 0xc0040ac5 // mova za.d[x8, #5], { z22.d-z23.d }\n"
- "ldp x23, x22, [x20], #0x10\n"
- "cbz x21, 5f\n"
- "cmp x21, x10\n"
- "csel x20, x21, x10, LT\n"
- "sub x21, x21, x20\n"
- "sub x10, x10, x20\n"
- "cbz x21, 5f\n"
+ "ldp x22, x21, [x19], #0x10\n"
+ "cbz x20, 5f\n"
+ "cmp x20, x9\n"
+ "csel x19, x20, x9, LT\n"
+ "sub x20, x20, x19\n"
+ "sub x9, x9, x19\n"
+ "cbz x20, 5f\n"
".inst 0xc0060818 // mova { z24.d-z25.d }, za.d[x8, #0]\n"
- "sub x13, x13, x21\n"
+ "sub x11, x11, x20\n"
".inst 0xc006083a // mova { z26.d-z27.d }, za.d[x8, #1]\n"
".inst 0xc1bdcb98 // fclamp { z24.s-z27.s }, z28.s, z29.s\n"
"4:" // Left padding
- "subs x21, x21, #0x1\n"
- "st1w { z24.s }, p1, [x9]\n"
- "add x9, x9, x27, LSL #2\n"
- "st1w { z26.s }, p1, [x28]\n"
+ "subs x20, x20, #0x1\n"
+ "st1w { z24.s }, p1, [x28]\n"
"add x28, x28, x26, LSL #2\n"
- "st1w { z25.s }, p1, [x25]\n"
- "add x25, x25, x23, LSL #2\n"
- "st1w { z27.s }, p1, [x24]\n"
+ "st1w { z26.s }, p1, [x27]\n"
+ "add x27, x27, x25, LSL #2\n"
+ "st1w { z25.s }, p1, [x24]\n"
"add x24, x24, x22, LSL #2\n"
+ "st1w { z27.s }, p1, [x23]\n"
+ "add x23, x23, x21, LSL #2\n"
"bgt 4b\n"
"5:" // Left padding: End
- "adds XZR, x7, x6\n"
+ "adds XZR, x17, x7\n"
"bne 10f\n"
- "cbz x10, 8f\n"
- "cmp x10, #0x1\n"
- "sub x15, x15, x10\n"
+ "cbz x9, 8f\n"
+ "cmp x9, #0x1\n"
+ "sub x14, x14, x9\n"
"beq 7f\n"
"6:" // Unpadded: 2 priming loads
- "add x20, x14, %x[ld_in_row], LSL #2\n"
- "ld1w { z16.s }, p1/Z, [x14]\n"
+ "add x19, x13, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p1/Z, [x13]\n"
".inst 0x658aaa0d // bfcvt z13.h, p2/M, z16.s\n"
- "add x14, x14, %x[ld_in_col], LSL #2\n"
- "ld1w { z16.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x13, x13, %x[ld_in_col], LSL #2\n"
+ "ld1w { z16.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x648aaa0d // bfcvtnt z13.h, p2/M, z16.s\n"
- "ld1w { z16.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x658aaa0e // bfcvt z14.h, p2/M, z16.s\n"
- "ld1w { z16.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x648aaa0e // bfcvtnt z14.h, p2/M, z16.s\n"
".inst 0xc12811b0 // bfdot za.s[x8, 0], { z13.h-z14.h }, z8.h\n"
- "ld1w { z16.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x658aaa0f // bfcvt z15.h, p2/M, z16.s\n"
".inst 0xc12911b1 // bfdot za.s[x8, 1], { z13.h-z14.h }, z9.h\n"
- "ld1w { z16.s }, p1/Z, [x20]\n"
+ "ld1w { z16.s }, p1/Z, [x19]\n"
".inst 0x648aaa0f // bfcvtnt z15.h, p2/M, z16.s\n"
".inst 0xc12511d0 // bfdot za.s[x8, 0], { z14.h-z15.h }, z5.h\n"
".inst 0xc12611d1 // bfdot za.s[x8, 1], { z14.h-z15.h }, z6.h\n"
"7:" // Unpadded: 1 priming loads
- "add x20, x14, %x[ld_in_row], LSL #2\n"
- "ld1w { z16.s }, p1/Z, [x14]\n"
+ "add x19, x13, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p1/Z, [x13]\n"
".inst 0x658aaa0d // bfcvt z13.h, p2/M, z16.s\n"
- "add x14, x14, %x[ld_in_col], LSL #2\n"
- "ld1w { z16.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x13, x13, %x[ld_in_col], LSL #2\n"
+ "ld1w { z16.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x648aaa0d // bfcvtnt z13.h, p2/M, z16.s\n"
- "ld1w { z16.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x658aaa0e // bfcvt z14.h, p2/M, z16.s\n"
- "ld1w { z16.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x648aaa0e // bfcvtnt z14.h, p2/M, z16.s\n"
".inst 0xc12111b0 // bfdot za.s[x8, 0], { z13.h-z14.h }, z1.h\n"
- "ld1w { z16.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x658aaa0f // bfcvt z15.h, p2/M, z16.s\n"
".inst 0xc12211b1 // bfdot za.s[x8, 1], { z13.h-z14.h }, z2.h\n"
- "ld1w { z16.s }, p1/Z, [x20]\n"
+ "ld1w { z16.s }, p1/Z, [x19]\n"
".inst 0x648aaa0f // bfcvtnt z15.h, p2/M, z16.s\n"
".inst 0xc12811b2 // bfdot za.s[x8, 2], { z13.h-z14.h }, z8.h\n"
".inst 0xc12911b3 // bfdot za.s[x8, 3], { z13.h-z14.h }, z9.h\n"
@@ -245,51 +245,51 @@ void sme2_fp32bf16fp32_planar_3x3_s1_4rows_dot_za_impl(
".inst 0xc12511d2 // bfdot za.s[x8, 2], { z14.h-z15.h }, z5.h\n"
".inst 0xc12611d3 // bfdot za.s[x8, 3], { z14.h-z15.h }, z6.h\n"
"8:" // Unpadded: 0 priming loads
- "cbz x15, 16f\n"
- "add x20, x14, %x[ld_in_row], LSL #2\n"
- "ld1w { z16.s }, p1/Z, [x14]\n"
+ "cbz x14, 16f\n"
+ "add x19, x13, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p1/Z, [x13]\n"
".inst 0x658aaa0d // bfcvt z13.h, p2/M, z16.s\n"
- "sub x15, x15, #0x1\n"
- "ld1w { z16.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- "sub x13, x13, #0x1\n"
+ "sub x14, x14, #0x1\n"
+ "ld1w { z16.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ "sub x11, x11, #0x1\n"
".inst 0x648aaa0d // bfcvtnt z13.h, p2/M, z16.s\n"
- "ld1w { z16.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x658aaa0e // bfcvt z14.h, p2/M, z16.s\n"
- "cmp x15, x13\n"
- "ld1w { z16.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- "csel x21, x15, x13, LT\n"
+ "cmp x14, x11\n"
+ "ld1w { z16.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ "csel x20, x14, x11, LT\n"
".inst 0x648aaa0e // bfcvtnt z14.h, p2/M, z16.s\n"
- "ld1w { z16.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x658aaa0f // bfcvt z15.h, p2/M, z16.s\n"
- "add x14, x14, %x[ld_in_col], LSL #2\n"
- "ld1w { z16.s }, p1/Z, [x20]\n"
+ "add x13, x13, %x[ld_in_col], LSL #2\n"
+ "ld1w { z16.s }, p1/Z, [x19]\n"
".inst 0x648aaa0f // bfcvtnt z15.h, p2/M, z16.s\n"
- "sub x13, x13, x21\n"
- "cbz x21, 15f\n"
+ "sub x11, x11, x20\n"
+ "cbz x20, 15f\n"
"9:" // Unpadded: Main loop
".inst 0xc12411b0 // bfdot za.s[x8, 0], { z13.h-z14.h }, z4.h\n"
- "add x20, x14, %x[ld_in_row], LSL #2\n"
- "ld1w { z21.s }, p1/Z, [x14]\n"
- "subs x21, x21, #0x1\n"
+ "add x19, x13, %x[ld_in_row], LSL #2\n"
+ "ld1w { z21.s }, p1/Z, [x13]\n"
+ "subs x20, x20, #0x1\n"
".inst 0xc12711b1 // bfdot za.s[x8, 1], { z13.h-z14.h }, z7.h\n"
- "ld1w { z20.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- "add x14, x14, %x[ld_in_col], LSL #2\n"
- "ld1w { z19.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z20.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ "add x13, x13, %x[ld_in_col], LSL #2\n"
+ "ld1w { z19.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0xc12311d0 // bfdot za.s[x8, 0], { z14.h-z15.h }, z3.h\n"
".inst 0xc12011d1 // bfdot za.s[x8, 1], { z14.h-z15.h }, z0.h\n"
- "ld1w { z18.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z18.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0xc12111b2 // bfdot za.s[x8, 2], { z13.h-z14.h }, z1.h\n"
- "ld1w { z17.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z17.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0xc12211b3 // bfdot za.s[x8, 3], { z13.h-z14.h }, z2.h\n"
- "ld1w { z16.s }, p1/Z, [x20]\n"
+ "ld1w { z16.s }, p1/Z, [x19]\n"
".inst 0xc12811b4 // bfdot za.s[x8, 4], { z13.h-z14.h }, z8.h\n"
".inst 0xc12911b5 // bfdot za.s[x8, 5], { z13.h-z14.h }, z9.h\n"
".inst 0x658aaaad // bfcvt z13.h, p2/M, z21.s\n"
@@ -306,155 +306,155 @@ void sme2_fp32bf16fp32_planar_3x3_s1_4rows_dot_za_impl(
".inst 0xc006083a // mova { z26.d-z27.d }, za.d[x8, #1]\n"
"add x8, x8, #0x2\n"
".inst 0xc1bdcb98 // fclamp { z24.s-z27.s }, z28.s, z29.s\n"
- "st1w { z24.s }, p1, [x9]\n"
- "add x9, x9, x27, LSL #2\n"
- "st1w { z26.s }, p1, [x28]\n"
+ "st1w { z24.s }, p1, [x28]\n"
"add x28, x28, x26, LSL #2\n"
+ "st1w { z26.s }, p1, [x27]\n"
+ "add x27, x27, x25, LSL #2\n"
".inst 0xc0040ac4 // mova za.d[x8, #4], { z22.d-z23.d }\n"
- "st1w { z25.s }, p1, [x25]\n"
- "add x25, x25, x23, LSL #2\n"
- ".inst 0xc0040ac5 // mova za.d[x8, #5], { z22.d-z23.d }\n"
- "st1w { z27.s }, p1, [x24]\n"
+ "st1w { z25.s }, p1, [x24]\n"
"add x24, x24, x22, LSL #2\n"
+ ".inst 0xc0040ac5 // mova za.d[x8, #5], { z22.d-z23.d }\n"
+ "st1w { z27.s }, p1, [x23]\n"
+ "add x23, x23, x21, LSL #2\n"
"bgt 9b\n"
"b 15f\n"
"10:" // Padded
- "cbz x10, 13f\n"
- "cmp x10, #0x1\n"
- "sub x15, x15, x10\n"
+ "cbz x9, 13f\n"
+ "cmp x9, #0x1\n"
+ "sub x14, x14, x9\n"
"beq 12f\n"
"11:" // Padded: 2 priming loads
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z16.s }, p0/Z, [x14]\n"
+ "ld1w { z16.s }, p0/Z, [x13]\n"
".inst 0x658aaa0d // bfcvt z13.h, p2/M, z16.s\n"
- "add x20, x14, %x[ld_in_row], LSL #2\n"
+ "add x19, x13, %x[ld_in_row], LSL #2\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
".inst 0x648aaa0d // bfcvtnt z13.h, p2/M, z16.s\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
".inst 0x658aaa0e // bfcvt z14.h, p2/M, z16.s\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
".inst 0x648aaa0e // bfcvtnt z14.h, p2/M, z16.s\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0xc12811b0 // bfdot za.s[x8, 0], { z13.h-z14.h }, z8.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x658aaa0f // bfcvt z15.h, p2/M, z16.s\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
".inst 0x648aaa0f // bfcvtnt z15.h, p2/M, z16.s\n"
".inst 0xc12911b1 // bfdot za.s[x8, 1], { z13.h-z14.h }, z9.h\n"
- "add x14, x14, %x[ld_in_col], LSL #2\n"
+ "add x13, x13, %x[ld_in_col], LSL #2\n"
".inst 0xc12511d0 // bfdot za.s[x8, 0], { z14.h-z15.h }, z5.h\n"
".inst 0xc12611d1 // bfdot za.s[x8, 1], { z14.h-z15.h }, z6.h\n"
"12:" // Padded: 1 priming loads
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z16.s }, p0/Z, [x14]\n"
+ "ld1w { z16.s }, p0/Z, [x13]\n"
".inst 0x658aaa0d // bfcvt z13.h, p2/M, z16.s\n"
- "add x20, x14, %x[ld_in_row], LSL #2\n"
+ "add x19, x13, %x[ld_in_row], LSL #2\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
".inst 0x648aaa0d // bfcvtnt z13.h, p2/M, z16.s\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
".inst 0x658aaa0e // bfcvt z14.h, p2/M, z16.s\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
".inst 0x648aaa0e // bfcvtnt z14.h, p2/M, z16.s\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0xc12111b0 // bfdot za.s[x8, 0], { z13.h-z14.h }, z1.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x658aaa0f // bfcvt z15.h, p2/M, z16.s\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
".inst 0x648aaa0f // bfcvtnt z15.h, p2/M, z16.s\n"
".inst 0xc12211b1 // bfdot za.s[x8, 1], { z13.h-z14.h }, z2.h\n"
".inst 0xc12811b2 // bfdot za.s[x8, 2], { z13.h-z14.h }, z8.h\n"
- "add x14, x14, %x[ld_in_col], LSL #2\n"
+ "add x13, x13, %x[ld_in_col], LSL #2\n"
".inst 0xc12911b3 // bfdot za.s[x8, 3], { z13.h-z14.h }, z9.h\n"
".inst 0xc12a11d0 // bfdot za.s[x8, 0], { z14.h-z15.h }, z10.h\n"
".inst 0xc12c11d1 // bfdot za.s[x8, 1], { z14.h-z15.h }, z12.h\n"
".inst 0xc12511d2 // bfdot za.s[x8, 2], { z14.h-z15.h }, z5.h\n"
".inst 0xc12611d3 // bfdot za.s[x8, 3], { z14.h-z15.h }, z6.h\n"
"13:" // Padded: 0 priming loads
- "cbz x15, 16f\n"
+ "cbz x14, 16f\n"
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z16.s }, p0/Z, [x14]\n"
+ "ld1w { z16.s }, p0/Z, [x13]\n"
".inst 0x658aaa0d // bfcvt z13.h, p2/M, z16.s\n"
- "add x20, x14, %x[ld_in_row], LSL #2\n"
+ "add x19, x13, %x[ld_in_row], LSL #2\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
".inst 0x648aaa0d // bfcvtnt z13.h, p2/M, z16.s\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
".inst 0x658aaa0e // bfcvt z14.h, p2/M, z16.s\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
".inst 0x648aaa0e // bfcvtnt z14.h, p2/M, z16.s\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x658aaa0f // bfcvt z15.h, p2/M, z16.s\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
- "sub x15, x15, #0x1\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
+ "sub x14, x14, #0x1\n"
".inst 0x648aaa0f // bfcvtnt z15.h, p2/M, z16.s\n"
- "sub x13, x13, #0x1\n"
- "cmp x15, x13\n"
- "csel x21, x15, x13, LT\n"
- "add x14, x14, %x[ld_in_col], LSL #2\n"
- "sub x13, x13, x21\n"
- "cbz x21, 15f\n"
+ "sub x11, x11, #0x1\n"
+ "cmp x14, x11\n"
+ "csel x20, x14, x11, LT\n"
+ "add x13, x13, %x[ld_in_col], LSL #2\n"
+ "sub x11, x11, x20\n"
+ "cbz x20, 15f\n"
"14:" // Padded: Main loop
"mov x12, #0x0\n"
".inst 0xc12411b0 // bfdot za.s[x8, 0], { z13.h-z14.h }, z4.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z21.s }, p0/Z, [x14]\n"
+ "ld1w { z21.s }, p0/Z, [x13]\n"
".inst 0xc12711b1 // bfdot za.s[x8, 1], { z13.h-z14.h }, z7.h\n"
- "add x20, x14, %x[ld_in_row], LSL #2\n"
+ "add x19, x13, %x[ld_in_row], LSL #2\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z20.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z20.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z19.s }, p0/Z, [x20]\n"
+ "ld1w { z19.s }, p0/Z, [x19]\n"
".inst 0xc12311d0 // bfdot za.s[x8, 0], { z14.h-z15.h }, z3.h\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
".inst 0xc12011d1 // bfdot za.s[x8, 1], { z14.h-z15.h }, z0.h\n"
- "ld1w { z18.s }, p0/Z, [x20]\n"
+ "ld1w { z18.s }, p0/Z, [x19]\n"
"mov x12, #0x4\n"
".inst 0xc12111b2 // bfdot za.s[x8, 2], { z13.h-z14.h }, z1.h\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0xc12211b3 // bfdot za.s[x8, 3], { z13.h-z14.h }, z2.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z17.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z17.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0xc12811b4 // bfdot za.s[x8, 4], { z13.h-z14.h }, z8.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
- "subs x21, x21, #0x1\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
+ "subs x20, x20, #0x1\n"
".inst 0xc12911b5 // bfdot za.s[x8, 5], { z13.h-z14.h }, z9.h\n"
".inst 0x658aaaad // bfcvt z13.h, p2/M, z21.s\n"
".inst 0x648aaa8d // bfcvtnt z13.h, p2/M, z20.s\n"
- "add x14, x14, %x[ld_in_col], LSL #2\n"
+ "add x13, x13, %x[ld_in_col], LSL #2\n"
".inst 0xc12a11d2 // bfdot za.s[x8, 2], { z14.h-z15.h }, z10.h\n"
".inst 0xc12c11d3 // bfdot za.s[x8, 3], { z14.h-z15.h }, z12.h\n"
".inst 0xc12511d4 // bfdot za.s[x8, 4], { z14.h-z15.h }, z5.h\n"
@@ -467,16 +467,16 @@ void sme2_fp32bf16fp32_planar_3x3_s1_4rows_dot_za_impl(
".inst 0xc006083a // mova { z26.d-z27.d }, za.d[x8, #1]\n"
"add x8, x8, #0x2\n"
".inst 0xc1bdcb98 // fclamp { z24.s-z27.s }, z28.s, z29.s\n"
- "st1w { z24.s }, p1, [x9]\n"
- "add x9, x9, x27, LSL #2\n"
- "st1w { z26.s }, p1, [x28]\n"
+ "st1w { z24.s }, p1, [x28]\n"
"add x28, x28, x26, LSL #2\n"
+ "st1w { z26.s }, p1, [x27]\n"
+ "add x27, x27, x25, LSL #2\n"
".inst 0xc0040ac4 // mova za.d[x8, #4], { z22.d-z23.d }\n"
- "st1w { z25.s }, p1, [x25]\n"
- "add x25, x25, x23, LSL #2\n"
- ".inst 0xc0040ac5 // mova za.d[x8, #5], { z22.d-z23.d }\n"
- "st1w { z27.s }, p1, [x24]\n"
+ "st1w { z25.s }, p1, [x24]\n"
"add x24, x24, x22, LSL #2\n"
+ ".inst 0xc0040ac5 // mova za.d[x8, #5], { z22.d-z23.d }\n"
+ "st1w { z27.s }, p1, [x23]\n"
+ "add x23, x23, x21, LSL #2\n"
"bgt 14b\n"
"15:" // Main loop tail
".inst 0xc12411b0 // bfdot za.s[x8, 0], { z13.h-z14.h }, z4.h\n"
@@ -490,67 +490,67 @@ void sme2_fp32bf16fp32_planar_3x3_s1_4rows_dot_za_impl(
".inst 0xc0060818 // mova { z24.d-z25.d }, za.d[x8, #0]\n"
".inst 0xc006083a // mova { z26.d-z27.d }, za.d[x8, #1]\n"
".inst 0xc1bdcb98 // fclamp { z24.s-z27.s }, z28.s, z29.s\n"
- "st1w { z24.s }, p1, [x9]\n"
- "add x9, x9, x27, LSL #2\n"
- ".inst 0xc12a11d2 // bfdot za.s[x8, 2], { z14.h-z15.h }, z10.h\n"
- "st1w { z26.s }, p1, [x28]\n"
+ "st1w { z24.s }, p1, [x28]\n"
"add x28, x28, x26, LSL #2\n"
+ ".inst 0xc12a11d2 // bfdot za.s[x8, 2], { z14.h-z15.h }, z10.h\n"
+ "st1w { z26.s }, p1, [x27]\n"
+ "add x27, x27, x25, LSL #2\n"
".inst 0xc12c11d3 // bfdot za.s[x8, 3], { z14.h-z15.h }, z12.h\n"
- "st1w { z25.s }, p1, [x25]\n"
- "add x25, x25, x23, LSL #2\n"
- ".inst 0xc12511d4 // bfdot za.s[x8, 4], { z14.h-z15.h }, z5.h\n"
- "st1w { z27.s }, p1, [x24]\n"
+ "st1w { z25.s }, p1, [x24]\n"
"add x24, x24, x22, LSL #2\n"
+ ".inst 0xc12511d4 // bfdot za.s[x8, 4], { z14.h-z15.h }, z5.h\n"
+ "st1w { z27.s }, p1, [x23]\n"
+ "add x23, x23, x21, LSL #2\n"
".inst 0xc12611d5 // bfdot za.s[x8, 5], { z14.h-z15.h }, z6.h\n"
"add x8, x8, #0x2\n"
".inst 0xc0040ac4 // mova za.d[x8, #4], { z22.d-z23.d }\n"
".inst 0xc0040ac5 // mova za.d[x8, #5], { z22.d-z23.d }\n"
"16:" // Main loop skip tail
- "cbz x13, 18f\n"
+ "cbz x11, 18f\n"
"17:" // Right padding loop
".inst 0xc0060818 // mova { z24.d-z25.d }, za.d[x8, #0]\n"
- "subs x13, x13, #0x1\n"
+ "subs x11, x11, #0x1\n"
".inst 0xc006083a // mova { z26.d-z27.d }, za.d[x8, #1]\n"
"add x8, x8, #0x2\n"
".inst 0xc1bdcb98 // fclamp { z24.s-z27.s }, z28.s, z29.s\n"
- "st1w { z24.s }, p1, [x9]\n"
- "add x9, x9, x27, LSL #2\n"
- "st1w { z26.s }, p1, [x28]\n"
+ "st1w { z24.s }, p1, [x28]\n"
"add x28, x28, x26, LSL #2\n"
+ "st1w { z26.s }, p1, [x27]\n"
+ "add x27, x27, x25, LSL #2\n"
".inst 0xc0040ac4 // mova za.d[x8, #4], { z22.d-z23.d }\n"
- "st1w { z25.s }, p1, [x25]\n"
- "add x25, x25, x23, LSL #2\n"
- ".inst 0xc0040ac5 // mova za.d[x8, #5], { z22.d-z23.d }\n"
- "st1w { z27.s }, p1, [x24]\n"
+ "st1w { z25.s }, p1, [x24]\n"
"add x24, x24, x22, LSL #2\n"
+ ".inst 0xc0040ac5 // mova za.d[x8, #5], { z22.d-z23.d }\n"
+ "st1w { z27.s }, p1, [x23]\n"
+ "add x23, x23, x21, LSL #2\n"
"bgt 17b\n"
"18:" // End
- "ldr x20, [%x[args], %[offsetof_Args_weights]]\n"
- "incb x20, ALL, MUL #9\n"
- "str x20, [%x[args], %[offsetof_Args_weights]]\n"
- "incw x16\n"
- "ldr x20, [%x[args], %[offsetof_Args_ld_in_vl]]\n"
- "whilelt p1.s, x16, x17\n"
- "ldr x14, [%x[args], %[offsetof_Args_inptr]]\n"
- "add x14, x14, x20, LSL #2\n"
- "str x14, [%x[args], %[offsetof_Args_inptr]]\n"
- "ldr x11, [%x[args], %[offsetof_Args_outptrs]]\n"
- "ldr x24, [%x[args], %[offsetof_Args_ld_out_vls]]\n"
- "ldp x23, x22, [x11, #0x0]\n"
- "ldp x21, x20, [x24, #0x0]\n"
- "add x23, x23, x21, LSL #2\n"
+ "ldr x19, [%x[args], %[offsetof_Args_weights]]\n"
+ "incb x19, ALL, MUL #9\n"
+ "str x19, [%x[args], %[offsetof_Args_weights]]\n"
+ "incw x15\n"
+ "ldr x19, [%x[args], %[offsetof_Args_ld_in_vl]]\n"
+ "whilelt p1.s, x15, x16\n"
+ "ldr x13, [%x[args], %[offsetof_Args_inptr]]\n"
+ "add x13, x13, x19, LSL #2\n"
+ "str x13, [%x[args], %[offsetof_Args_inptr]]\n"
+ "ldr x10, [%x[args], %[offsetof_Args_outptrs]]\n"
+ "ldr x23, [%x[args], %[offsetof_Args_ld_out_vls]]\n"
+ "ldp x22, x21, [x10, #0x0]\n"
+ "ldp x20, x19, [x23, #0x0]\n"
"add x22, x22, x20, LSL #2\n"
- "stp x23, x22, [x11, #0x0]\n"
- "ldp x23, x22, [x11, #0x10]\n"
- "ldp x21, x20, [x24, #0x10]\n"
- "add x23, x23, x21, LSL #2\n"
+ "add x21, x21, x19, LSL #2\n"
+ "stp x22, x21, [x10, #0x0]\n"
+ "ldp x22, x21, [x10, #0x10]\n"
+ "ldp x20, x19, [x23, #0x10]\n"
"add x22, x22, x20, LSL #2\n"
- "stp x23, x22, [x11, #0x10]\n"
+ "add x21, x21, x19, LSL #2\n"
+ "stp x22, x21, [x10, #0x10]\n"
"b.any 1b\n"
".inst 0xd503467f // SMSTOP\n"
:
: [args] "r" (&args), [ld_in_col] "r" (ld_in_col), [ld_in_row] "r" (ld_in_row), [offsetof_Args_bias] "I" (offsetof(Args, bias)), [offsetof_Args_clamp_max] "I" (offsetof(Args, clamp_max)), [offsetof_Args_clamp_min] "I" (offsetof(Args, clamp_min)), [offsetof_Args_current_channel] "I" (offsetof(Args, current_channel)), [offsetof_Args_inptr] "I" (offsetof(Args, inptr)), [offsetof_Args_input_cols] "I" (offsetof(Args, input_cols)), [offsetof_Args_ld_in_vl] "I" (offsetof(Args, ld_in_vl)), [offsetof_Args_ld_out_cols] "I" (offsetof(Args, ld_out_cols)), [offsetof_Args_ld_out_vls] "I" (offsetof(Args, ld_out_vls)), [offsetof_Args_n_channels] "I" (offsetof(Args, n_channels)), [offsetof_Args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_Args_output_cols] "I" (offsetof(Args, output_cols)), [offsetof_Args_pad_bottom] "I" (offsetof(Args, pad_bottom)), [offsetof_Args_pad_left] "I" (offsetof(Args, pad_left)), [offsetof_Args_pad_top] "I" (offsetof(Args, pad_top)), [offsetof_Args_weights] "I" (offsetof(Args, weights))
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32bf16fp32_planar_3x3_s2_4rows_dot_za/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32bf16fp32_planar_3x3_s2_4rows_dot_za/generic.cpp
index e8c9bfeb29..253f0dae0c 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32bf16fp32_planar_3x3_s2_4rows_dot_za/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32bf16fp32_planar_3x3_s2_4rows_dot_za/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -69,691 +69,691 @@ void sme2_fp32bf16fp32_planar_3x3_s2_4rows_dot_za_impl(
Args args = { inptr, ld_in_vl, pad_top, 9u - std::min(9u, pad_top + valid_input_rows), pad_left, weights, bias, valid_input_cols, output_cols, outptrs, outlds, outvllds, start_channel, valid_channels, act_min, act_max };
__asm__ __volatile__(
- "ldr x6, [%x[args], %[offsetof_Args_pad_bottom]]\n"
- "mov x20, #0x9\n"
+ "ldr x7, [%x[args], %[offsetof_Args_pad_bottom]]\n"
+ "mov x19, #0x9\n"
".inst 0xd503477f // SMSTART ZA\n"
- "sub x20, x20, x6\n"
- "ldr x7, [%x[args], %[offsetof_Args_pad_top]]\n"
+ "sub x19, x19, x7\n"
+ "ldr x17, [%x[args], %[offsetof_Args_pad_top]]\n"
"ptrue p2.b\n"
"ld1rw { z27.s }, p2/Z, [%x[args], %[offsetof_Args_clamp_min]]\n"
- "ldr x17, [%x[args], %[offsetof_Args_n_channels]]\n"
- "whilelt p1.s, XZR, x17\n"
- "whilelt p9.s, XZR, x20\n"
+ "ldr x16, [%x[args], %[offsetof_Args_n_channels]]\n"
+ "whilelt p1.s, XZR, x16\n"
+ "whilelt p9.s, XZR, x19\n"
"ld1rw { z23.s }, p2/Z, [%x[args], %[offsetof_Args_clamp_max]]\n"
- "whilelt p8.s, XZR, x7\n"
+ "whilelt p8.s, XZR, x17\n"
"eor p8.b, p2/Z, p8.b, p9.b\n"
- "ldr x16, [%x[args], %[offsetof_Args_current_channel]]\n"
+ "ldr x15, [%x[args], %[offsetof_Args_current_channel]]\n"
"1:" // Channel loop
- "ldr x20, [%x[args], %[offsetof_Args_bias]]\n"
+ "ldr x19, [%x[args], %[offsetof_Args_bias]]\n"
"fmov z4.s, #0x0\n"
- "cbz x20, 2f\n"
- "ld1w { z4.s }, p1/Z, [x20, x16, LSL #2]\n"
+ "cbz x19, 2f\n"
+ "ld1w { z4.s }, p1/Z, [x19, x15, LSL #2]\n"
"2:" // Load bias: Done
- "ldr x20, [%x[args], %[offsetof_Args_weights]]\n"
- "mov x21, x20\n"
- "ld1w { z19.s }, p2/Z, [x21]\n"
- "incb x21, ALL, MUL #3\n"
- "incb x20\n"
- "ld1w { z24.s }, p2/Z, [x21]\n"
- "incb x21, ALL, MUL #3\n"
+ "ldr x19, [%x[args], %[offsetof_Args_weights]]\n"
+ "mov x20, x19\n"
+ "ld1w { z19.s }, p2/Z, [x20]\n"
+ "incb x20, ALL, MUL #3\n"
+ "incb x19\n"
+ "ld1w { z24.s }, p2/Z, [x20]\n"
+ "incb x20, ALL, MUL #3\n"
".inst 0x658aaa69 // bfcvt z9.h, p2/M, z19.s\n"
- "ld1w { z12.s }, p2/Z, [x21]\n"
- "mov x21, x20\n"
+ "ld1w { z12.s }, p2/Z, [x20]\n"
+ "mov x20, x19\n"
".inst 0x648aab09 // bfcvtnt z9.h, p2/M, z24.s\n"
- "incb x20\n"
- "ld1w { z19.s }, p2/Z, [x21]\n"
- "incb x21, ALL, MUL #3\n"
+ "incb x19\n"
+ "ld1w { z19.s }, p2/Z, [x20]\n"
+ "incb x20, ALL, MUL #3\n"
".inst 0x658aa983 // bfcvt z3.h, p2/M, z12.s\n"
".inst 0x658aaa62 // bfcvt z2.h, p2/M, z19.s\n"
- "ld1w { z24.s }, p2/Z, [x21]\n"
- "incb x21, ALL, MUL #3\n"
- "ldr x15, [%x[args], %[offsetof_Args_input_cols]]\n"
+ "ld1w { z24.s }, p2/Z, [x20]\n"
+ "incb x20, ALL, MUL #3\n"
+ "ldr x14, [%x[args], %[offsetof_Args_input_cols]]\n"
".inst 0x648aab02 // bfcvtnt z2.h, p2/M, z24.s\n"
- "ld1w { z12.s }, p2/Z, [x21]\n"
- "mov x21, x20\n"
+ "ld1w { z12.s }, p2/Z, [x20]\n"
+ "mov x20, x19\n"
".inst 0x658aa980 // bfcvt z0.h, p2/M, z12.s\n"
- "ldr x14, [%x[args], %[offsetof_Args_inptr]]\n"
- "ld1w { z19.s }, p2/Z, [x21]\n"
- "incb x21, ALL, MUL #3\n"
+ "ldr x13, [%x[args], %[offsetof_Args_inptr]]\n"
+ "ld1w { z19.s }, p2/Z, [x20]\n"
+ "incb x20, ALL, MUL #3\n"
".inst 0x658aaa6a // bfcvt z10.h, p2/M, z19.s\n"
- "sub x20, x15, #0x1\n"
- "ld1w { z24.s }, p2/Z, [x21]\n"
- "incb x21, ALL, MUL #3\n"
- "orr x23, x20, %x[ld_in_col], LSL #18\n"
+ "sub x19, x14, #0x1\n"
+ "ld1w { z24.s }, p2/Z, [x20]\n"
+ "incb x20, ALL, MUL #3\n"
+ "orr x22, x19, %x[ld_in_col], LSL #18\n"
"mov z5.d, z4.d\n"
- "ld1w { z12.s }, p2/Z, [x21]\n"
- "orr x23, x17, x23, LSL #20\n"
- "mov x22, #0x9\n"
+ "ld1w { z12.s }, p2/Z, [x20]\n"
+ "orr x22, x16, x22, LSL #20\n"
+ "mov x21, #0x9\n"
"mov z6.d, z4.d\n"
- "add x21, x7, x6\n"
- "lsl x20, %x[ld_in_row], #0x2\n"
+ "add x20, x17, x7\n"
+ "lsl x19, %x[ld_in_row], #0x2\n"
"mov z7.d, z4.d\n"
".inst 0x648aab0a // bfcvtnt z10.h, p2/M, z24.s\n"
".inst 0x658aa981 // bfcvt z1.h, p2/M, z12.s\n"
"mov x8, #0x0\n"
- "ldr x13, [%x[args], %[offsetof_Args_output_cols]]\n"
- "lsl x23, x23, #0x2\n"
- "sub x22, x22, x21\n"
- "madd x20, x20, x7, x14\n"
+ "ldr x11, [%x[args], %[offsetof_Args_output_cols]]\n"
+ "lsl x22, x22, #0x2\n"
+ "sub x21, x21, x20\n"
+ "madd x19, x19, x17, x13\n"
"3:" // Issue prefetches
- "subs x22, x22, #0x1\n"
- ".inst 0xf8b74a9c // rprfm pldstrm, x23, [x20]\n"
- "add x20, x20, %x[ld_in_col], LSL #2\n"
+ "subs x21, x21, #0x1\n"
+ ".inst 0xf8b64a7c // rprfm pldstrm, x22, [x19]\n"
+ "add x19, x19, %x[ld_in_col], LSL #2\n"
"bgt 3b\n"
- "ldr x11, [%x[args], %[offsetof_Args_outptrs]]\n"
- "lsl x20, %x[ld_in_row], #0x2\n"
- "msub x14, x7, x20, x14\n"
+ "ldr x10, [%x[args], %[offsetof_Args_outptrs]]\n"
+ "lsl x19, %x[ld_in_row], #0x2\n"
+ "msub x13, x17, x19, x13\n"
".inst 0xc0040c80 // mova za.d[x8, #0], { z4.d-z7.d }\n"
- "ldr x20, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
+ "ldr x19, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
".inst 0xc0040c81 // mova za.d[x8, #1], { z4.d-z7.d }\n"
- "mov x22, #0x2\n"
- "ldp x10, x9, [x11], #0x10\n"
+ "mov x21, #0x2\n"
+ "ldp x9, x28, [x10], #0x10\n"
".inst 0xc0040c82 // mova za.d[x8, #2], { z4.d-z7.d }\n"
- "ldp x28, x27, [x20], #0x10\n"
- "ldr x21, [%x[args], %[offsetof_Args_pad_left]]\n"
- "ldp x26, x25, [x11], #0x10\n"
- "ldp x24, x23, [x20], #0x10\n"
- "cbz x21, 5f\n"
- "cmp x21, x22\n"
- "csel x20, x21, x22, LT\n"
- "sub x21, x21, x20\n"
- "sub x22, x22, x20\n"
- "cbz x21, 5f\n"
+ "ldp x27, x26, [x19], #0x10\n"
+ "ldr x20, [%x[args], %[offsetof_Args_pad_left]]\n"
+ "ldp x25, x24, [x10], #0x10\n"
+ "ldp x23, x22, [x19], #0x10\n"
+ "cbz x20, 5f\n"
+ "cmp x20, x21\n"
+ "csel x19, x20, x21, LT\n"
+ "sub x20, x20, x19\n"
+ "sub x21, x21, x19\n"
+ "cbz x20, 5f\n"
".inst 0xc0060c1c // mova { z28.d-z31.d }, za.d[x8, #0]\n"
- "and x22, x21, #0x1\n"
- "add x21, x21, #0x1\n"
+ "and x21, x20, #0x1\n"
+ "add x20, x20, #0x1\n"
".inst 0xc1b7cb7c // fclamp { z28.s-z31.s }, z27.s, z23.s\n"
- "lsr x21, x21, #0x1\n"
- "sub x13, x13, x21\n"
+ "lsr x20, x20, #0x1\n"
+ "sub x11, x11, x20\n"
"4:" // Left padding
- "subs x21, x21, #0x1\n"
- "st1w { z28.s }, p1, [x10]\n"
- "add x10, x10, x28, LSL #2\n"
- "st1w { z29.s }, p1, [x9]\n"
+ "subs x20, x20, #0x1\n"
+ "st1w { z28.s }, p1, [x9]\n"
"add x9, x9, x27, LSL #2\n"
- "st1w { z30.s }, p1, [x26]\n"
- "add x26, x26, x24, LSL #2\n"
- "st1w { z31.s }, p1, [x25]\n"
+ "st1w { z29.s }, p1, [x28]\n"
+ "add x28, x28, x26, LSL #2\n"
+ "st1w { z30.s }, p1, [x25]\n"
"add x25, x25, x23, LSL #2\n"
+ "st1w { z31.s }, p1, [x24]\n"
+ "add x24, x24, x22, LSL #2\n"
"bgt 4b\n"
"5:" // Left padding: End
- "adds XZR, x7, x6\n"
+ "adds XZR, x17, x7\n"
"bne 10f\n"
- "cbz x22, 8f\n"
- "cmp x22, #0x1\n"
- "sub x15, x15, x22\n"
+ "cbz x21, 8f\n"
+ "cmp x21, #0x1\n"
+ "sub x14, x14, x21\n"
"beq 7f\n"
"6:" // Unpadded: 2 priming loads
- "add x20, x14, %x[ld_in_row], LSL #2\n"
- "ld1w { z16.s }, p1/Z, [x14]\n"
+ "add x19, x13, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p1/Z, [x13]\n"
".inst 0x658aaa0b // bfcvt z11.h, p2/M, z16.s\n"
- "add x14, x14, %x[ld_in_col], LSL #2\n"
- "ld1w { z16.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x13, x13, %x[ld_in_col], LSL #2\n"
+ "ld1w { z16.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x648aaa0b // bfcvtnt z11.h, p2/M, z16.s\n"
- "ld1w { z16.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x658aaa0c // bfcvt z12.h, p2/M, z16.s\n"
- "ld1w { z16.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x648aaa0c // bfcvtnt z12.h, p2/M, z16.s\n"
- "ld1w { z16.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x658aaa0d // bfcvt z13.h, p2/M, z16.s\n"
- "ld1w { z16.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x648aaa0d // bfcvtnt z13.h, p2/M, z16.s\n"
- "ld1w { z22.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z22.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x658aaace // bfcvt z14.h, p2/M, z22.s\n"
- "ld1w { z16.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x648aaa0e // bfcvtnt z14.h, p2/M, z16.s\n"
".inst 0xc1391170 // bfdot za.s[x8, 0], { z11.h-z14.h }, z9.h\n"
- "ld1w { z16.s }, p1/Z, [x20]\n"
+ "ld1w { z16.s }, p1/Z, [x19]\n"
".inst 0x658aaa0f // bfcvt z15.h, p2/M, z16.s\n"
".inst 0xc1331190 // bfdot za.s[x8, 0], { z12.h-z15.h }, z3.h\n"
"7:" // Unpadded: 1 priming loads
- "add x20, x14, %x[ld_in_row], LSL #2\n"
- "ld1w { z16.s }, p1/Z, [x14]\n"
+ "add x19, x13, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p1/Z, [x13]\n"
".inst 0x658aaa0b // bfcvt z11.h, p2/M, z16.s\n"
- "add x14, x14, %x[ld_in_col], LSL #2\n"
+ "add x13, x13, %x[ld_in_col], LSL #2\n"
+ "ld1w { z16.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ ".inst 0x648aaa0b // bfcvtnt z11.h, p2/M, z16.s\n"
+ "ld1w { z16.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ ".inst 0x658aaa0c // bfcvt z12.h, p2/M, z16.s\n"
+ "ld1w { z16.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ ".inst 0x648aaa0c // bfcvtnt z12.h, p2/M, z16.s\n"
+ "ld1w { z16.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ ".inst 0x658aaa0d // bfcvt z13.h, p2/M, z16.s\n"
+ "ld1w { z16.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ ".inst 0x648aaa0d // bfcvtnt z13.h, p2/M, z16.s\n"
+ "ld1w { z16.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ ".inst 0x658aaa0e // bfcvt z14.h, p2/M, z16.s\n"
+ "ld1w { z16.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ ".inst 0x648aaa0e // bfcvtnt z14.h, p2/M, z16.s\n"
+ ".inst 0xc1321170 // bfdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
+ "ld1w { z16.s }, p1/Z, [x19]\n"
+ ".inst 0x658aaa0f // bfcvt z15.h, p2/M, z16.s\n"
+ ".inst 0xc1301190 // bfdot za.s[x8, 0], { z12.h-z15.h }, z0.h\n"
+ "8:" // Unpadded: 0 priming loads
+ "cmp x14, #0x2\n"
+ "blt 16f\n"
+ "add x20, x13, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p1/Z, [x13]\n"
+ ".inst 0x658aaa0b // bfcvt z11.h, p2/M, z16.s\n"
+ "sub x14, x14, #0x2\n"
"ld1w { z16.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ "sub x11, x11, #0x1\n"
".inst 0x648aaa0b // bfcvtnt z11.h, p2/M, z16.s\n"
"ld1w { z16.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
".inst 0x658aaa0c // bfcvt z12.h, p2/M, z16.s\n"
+ "lsr x19, x14, #0x1\n"
"ld1w { z16.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ "cmp x19, x11\n"
".inst 0x648aaa0c // bfcvtnt z12.h, p2/M, z16.s\n"
"ld1w { z16.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
".inst 0x658aaa0d // bfcvt z13.h, p2/M, z16.s\n"
+ "csel x21, x19, x11, LT\n"
"ld1w { z16.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
".inst 0x648aaa0d // bfcvtnt z13.h, p2/M, z16.s\n"
+ "add x13, x13, %x[ld_in_col], LSL #2\n"
"ld1w { z16.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
".inst 0x658aaa0e // bfcvt z14.h, p2/M, z16.s\n"
+ "and x14, x14, #0x1\n"
"ld1w { z16.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
".inst 0x648aaa0e // bfcvtnt z14.h, p2/M, z16.s\n"
- ".inst 0xc1321170 // bfdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
+ "sub x11, x11, x21\n"
"ld1w { z16.s }, p1/Z, [x20]\n"
".inst 0x658aaa0f // bfcvt z15.h, p2/M, z16.s\n"
- ".inst 0xc1301190 // bfdot za.s[x8, 0], { z12.h-z15.h }, z0.h\n"
- "8:" // Unpadded: 0 priming loads
- "cmp x15, #0x2\n"
- "blt 16f\n"
- "add x21, x14, %x[ld_in_row], LSL #2\n"
- "ld1w { z16.s }, p1/Z, [x14]\n"
- ".inst 0x658aaa0b // bfcvt z11.h, p2/M, z16.s\n"
- "sub x15, x15, #0x2\n"
- "ld1w { z16.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
- "sub x13, x13, #0x1\n"
- ".inst 0x648aaa0b // bfcvtnt z11.h, p2/M, z16.s\n"
- "ld1w { z16.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aaa0c // bfcvt z12.h, p2/M, z16.s\n"
- "lsr x20, x15, #0x1\n"
- "ld1w { z16.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
- "cmp x20, x13\n"
- ".inst 0x648aaa0c // bfcvtnt z12.h, p2/M, z16.s\n"
- "ld1w { z16.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aaa0d // bfcvt z13.h, p2/M, z16.s\n"
- "csel x22, x20, x13, LT\n"
- "ld1w { z16.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
- ".inst 0x648aaa0d // bfcvtnt z13.h, p2/M, z16.s\n"
- "add x14, x14, %x[ld_in_col], LSL #2\n"
- "ld1w { z16.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aaa0e // bfcvt z14.h, p2/M, z16.s\n"
- "and x15, x15, #0x1\n"
- "ld1w { z16.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
- ".inst 0x648aaa0e // bfcvtnt z14.h, p2/M, z16.s\n"
- "sub x13, x13, x22\n"
- "ld1w { z16.s }, p1/Z, [x21]\n"
- ".inst 0x658aaa0f // bfcvt z15.h, p2/M, z16.s\n"
- "cbz x22, 15f\n"
+ "cbz x21, 15f\n"
"9:" // Unpadded: Main loop
- "add x21, x14, %x[ld_in_row], LSL #2\n"
- "ld1w { z16.s }, p1/Z, [x14]\n"
+ "add x20, x13, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p1/Z, [x13]\n"
".inst 0xc13a1170 // bfdot za.s[x8, 0], { z11.h-z14.h }, z10.h\n"
- "add x14, x14, %x[ld_in_col], LSL #2\n"
- "ld1w { z18.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
+ "add x13, x13, %x[ld_in_col], LSL #2\n"
+ "ld1w { z18.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
".inst 0xc1391171 // bfdot za.s[x8, 1], { z11.h-z14.h }, z9.h\n"
".inst 0x658aaa0b // bfcvt z11.h, p2/M, z16.s\n"
- "ld1w { z16.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
".inst 0xc1311190 // bfdot za.s[x8, 0], { z12.h-z15.h }, z1.h\n"
- "add x20, x14, %x[ld_in_row], LSL #2\n"
- "ld1w { z17.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
+ "add x19, x13, %x[ld_in_row], LSL #2\n"
+ "ld1w { z17.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
".inst 0xc1331191 // bfdot za.s[x8, 1], { z12.h-z15.h }, z3.h\n"
".inst 0x658aaa0c // bfcvt z12.h, p2/M, z16.s\n"
- "ld1w { z16.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
".inst 0x658aaa0d // bfcvt z13.h, p2/M, z16.s\n"
".inst 0x648aaa4b // bfcvtnt z11.h, p2/M, z18.s\n"
- "ld1w { z16.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
".inst 0x648aaa2c // bfcvtnt z12.h, p2/M, z17.s\n"
".inst 0x648aaa0d // bfcvtnt z13.h, p2/M, z16.s\n"
- "ld1w { z16.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
".inst 0x658aaa0e // bfcvt z14.h, p2/M, z16.s\n"
".inst 0xc0060c1c // mova { z28.d-z31.d }, za.d[x8, #0]\n"
- "ld1w { z16.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
".inst 0x648aaa0e // bfcvtnt z14.h, p2/M, z16.s\n"
"add x8, x8, #0x1\n"
- "ld1w { z16.s }, p1/Z, [x14]\n"
+ "ld1w { z16.s }, p1/Z, [x13]\n"
".inst 0xc1321170 // bfdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
".inst 0x658aaa0b // bfcvt z11.h, p2/M, z16.s\n"
- "subs x22, x22, #0x1\n"
- "ld1w { z20.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "subs x21, x21, #0x1\n"
+ "ld1w { z20.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0xc1b7cb7c // fclamp { z28.s-z31.s }, z27.s, z23.s\n"
- "st1w { z28.s }, p1, [x10]\n"
- "ld1w { z16.s }, p1/Z, [x21]\n"
+ "st1w { z28.s }, p1, [x9]\n"
+ "ld1w { z16.s }, p1/Z, [x20]\n"
".inst 0x658aaa0f // bfcvt z15.h, p2/M, z16.s\n"
".inst 0xc1301190 // bfdot za.s[x8, 0], { z12.h-z15.h }, z0.h\n"
- "add x10, x10, x28, LSL #2\n"
- "ld1w { z16.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aaa0c // bfcvt z12.h, p2/M, z16.s\n"
- "st1w { z29.s }, p1, [x9]\n"
- "ld1w { z19.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
"add x9, x9, x27, LSL #2\n"
- "st1w { z30.s }, p1, [x26]\n"
- "ld1w { z16.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ ".inst 0x658aaa0c // bfcvt z12.h, p2/M, z16.s\n"
+ "st1w { z29.s }, p1, [x28]\n"
+ "ld1w { z19.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ "add x28, x28, x26, LSL #2\n"
+ "st1w { z30.s }, p1, [x25]\n"
+ "ld1w { z16.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x658aaa0d // bfcvt z13.h, p2/M, z16.s\n"
- "add x26, x26, x24, LSL #2\n"
- "ld1w { z18.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- "st1w { z31.s }, p1, [x25]\n"
"add x25, x25, x23, LSL #2\n"
- "ld1w { z16.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z18.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ "st1w { z31.s }, p1, [x24]\n"
+ "add x24, x24, x22, LSL #2\n"
+ "ld1w { z16.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x658aaa0e // bfcvt z14.h, p2/M, z16.s\n"
".inst 0xc0040c82 // mova za.d[x8, #2], { z4.d-z7.d }\n"
- "ld1w { z17.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z17.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x648aaa8b // bfcvtnt z11.h, p2/M, z20.s\n"
".inst 0x648aaa6c // bfcvtnt z12.h, p2/M, z19.s\n"
- "ld1w { z16.s }, p1/Z, [x20]\n"
+ "ld1w { z16.s }, p1/Z, [x19]\n"
".inst 0x648aaa4d // bfcvtnt z13.h, p2/M, z18.s\n"
".inst 0x648aaa2e // bfcvtnt z14.h, p2/M, z17.s\n"
- "add x14, x14, %x[ld_in_col], LSL #2\n"
+ "add x13, x13, %x[ld_in_col], LSL #2\n"
".inst 0x658aaa0f // bfcvt z15.h, p2/M, z16.s\n"
"bgt 9b\n"
"b 15f\n"
"10:" // Padded
- "cbz x22, 13f\n"
- "cmp x22, #0x1\n"
- "sub x15, x15, x22\n"
+ "cbz x21, 13f\n"
+ "cmp x21, #0x1\n"
+ "sub x14, x14, x21\n"
"beq 12f\n"
"11:" // Padded: 2 priming loads
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z16.s }, p0/Z, [x14]\n"
+ "ld1w { z16.s }, p0/Z, [x13]\n"
".inst 0x658aaa0b // bfcvt z11.h, p2/M, z16.s\n"
- "add x20, x14, %x[ld_in_row], LSL #2\n"
+ "add x19, x13, %x[ld_in_row], LSL #2\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
".inst 0x648aaa0b // bfcvtnt z11.h, p2/M, z16.s\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
".inst 0x658aaa0c // bfcvt z12.h, p2/M, z16.s\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
".inst 0x648aaa0c // bfcvtnt z12.h, p2/M, z16.s\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x658aaa0d // bfcvt z13.h, p2/M, z16.s\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x648aaa0d // bfcvtnt z13.h, p2/M, z16.s\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x658aaa0e // bfcvt z14.h, p2/M, z16.s\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
"mov x12, #0x8\n"
".inst 0x648aaa0e // bfcvtnt z14.h, p2/M, z16.s\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
".inst 0x658aaa0f // bfcvt z15.h, p2/M, z16.s\n"
".inst 0xc1391170 // bfdot za.s[x8, 0], { z11.h-z14.h }, z9.h\n"
- "add x14, x14, %x[ld_in_col], LSL #2\n"
+ "add x13, x13, %x[ld_in_col], LSL #2\n"
".inst 0xc1331190 // bfdot za.s[x8, 0], { z12.h-z15.h }, z3.h\n"
"12:" // Padded: 1 priming loads
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z16.s }, p0/Z, [x14]\n"
+ "ld1w { z16.s }, p0/Z, [x13]\n"
".inst 0x658aaa0b // bfcvt z11.h, p2/M, z16.s\n"
- "add x20, x14, %x[ld_in_row], LSL #2\n"
+ "add x19, x13, %x[ld_in_row], LSL #2\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
".inst 0x648aaa0b // bfcvtnt z11.h, p2/M, z16.s\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
".inst 0x658aaa0c // bfcvt z12.h, p2/M, z16.s\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
".inst 0x648aaa0c // bfcvtnt z12.h, p2/M, z16.s\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x658aaa0d // bfcvt z13.h, p2/M, z16.s\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x648aaa0d // bfcvtnt z13.h, p2/M, z16.s\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x658aaa0e // bfcvt z14.h, p2/M, z16.s\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
"mov x12, #0x8\n"
".inst 0x648aaa0e // bfcvtnt z14.h, p2/M, z16.s\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
".inst 0x658aaa0f // bfcvt z15.h, p2/M, z16.s\n"
".inst 0xc1321170 // bfdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
- "add x14, x14, %x[ld_in_col], LSL #2\n"
+ "add x13, x13, %x[ld_in_col], LSL #2\n"
".inst 0xc1301190 // bfdot za.s[x8, 0], { z12.h-z15.h }, z0.h\n"
"13:" // Padded: 0 priming loads
- "cmp x15, #0x2\n"
+ "cmp x14, #0x2\n"
"blt 16f\n"
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z16.s }, p0/Z, [x14]\n"
+ "ld1w { z16.s }, p0/Z, [x13]\n"
".inst 0x658aaa0b // bfcvt z11.h, p2/M, z16.s\n"
- "add x20, x14, %x[ld_in_row], LSL #2\n"
+ "add x19, x13, %x[ld_in_row], LSL #2\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
".inst 0x648aaa0b // bfcvtnt z11.h, p2/M, z16.s\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
".inst 0x658aaa0c // bfcvt z12.h, p2/M, z16.s\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
".inst 0x648aaa0c // bfcvtnt z12.h, p2/M, z16.s\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x658aaa0d // bfcvt z13.h, p2/M, z16.s\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x648aaa0d // bfcvtnt z13.h, p2/M, z16.s\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x658aaa0e // bfcvt z14.h, p2/M, z16.s\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
"mov x12, #0x8\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
".inst 0x648aaa0e // bfcvtnt z14.h, p2/M, z16.s\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
".inst 0x658aaa0f // bfcvt z15.h, p2/M, z16.s\n"
- "sub x15, x15, #0x2\n"
- "sub x13, x13, #0x1\n"
- "lsr x20, x15, #0x1\n"
- "cmp x20, x13\n"
- "csel x21, x20, x13, LT\n"
- "add x14, x14, %x[ld_in_col], LSL #2\n"
- "and x15, x15, #0x1\n"
- "sub x13, x13, x21\n"
- "cbz x21, 15f\n"
+ "sub x14, x14, #0x2\n"
+ "sub x11, x11, #0x1\n"
+ "lsr x19, x14, #0x1\n"
+ "cmp x19, x11\n"
+ "csel x20, x19, x11, LT\n"
+ "add x13, x13, %x[ld_in_col], LSL #2\n"
+ "and x14, x14, #0x1\n"
+ "sub x11, x11, x20\n"
+ "cbz x20, 15f\n"
"14:" // Padded: Main loop
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z18.s }, p0/Z, [x14]\n"
+ "ld1w { z18.s }, p0/Z, [x13]\n"
".inst 0xc13a1170 // bfdot za.s[x8, 0], { z11.h-z14.h }, z10.h\n"
- "add x20, x14, %x[ld_in_row], LSL #2\n"
+ "add x19, x13, %x[ld_in_row], LSL #2\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z19.s }, p0/Z, [x20]\n"
+ "ld1w { z19.s }, p0/Z, [x19]\n"
".inst 0xc1391171 // bfdot za.s[x8, 1], { z11.h-z14.h }, z9.h\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
".inst 0xc1311190 // bfdot za.s[x8, 0], { z12.h-z15.h }, z1.h\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1w { z17.s }, p0/Z, [x20]\n"
+ "ld1w { z17.s }, p0/Z, [x19]\n"
".inst 0xc1331191 // bfdot za.s[x8, 1], { z12.h-z15.h }, z3.h\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x658aaa4b // bfcvt z11.h, p2/M, z18.s\n"
".inst 0x658aaa0c // bfcvt z12.h, p2/M, z16.s\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x658aaa0d // bfcvt z13.h, p2/M, z16.s\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z18.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z18.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x648aaa6b // bfcvtnt z11.h, p2/M, z19.s\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x658aaa0e // bfcvt z14.h, p2/M, z16.s\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
"mov x12, #0x8\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
".inst 0x648aaa2c // bfcvtnt z12.h, p2/M, z17.s\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z17.s }, p0/Z, [x20]\n"
+ "ld1w { z17.s }, p0/Z, [x19]\n"
".inst 0x648aaa4d // bfcvtnt z13.h, p2/M, z18.s\n"
"mov x12, #0x0\n"
- "add x14, x14, %x[ld_in_col], LSL #2\n"
+ "add x13, x13, %x[ld_in_col], LSL #2\n"
".inst 0x648aaa0e // bfcvtnt z14.h, p2/M, z16.s\n"
".inst 0xc0060c1c // mova { z28.d-z31.d }, za.d[x8, #0]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z16.s }, p0/Z, [x14]\n"
- "add x20, x14, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p0/Z, [x13]\n"
+ "add x19, x13, %x[ld_in_row], LSL #2\n"
".inst 0x658aaa2f // bfcvt z15.h, p2/M, z17.s\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z21.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z21.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0xc1b7cb7c // fclamp { z28.s-z31.s }, z27.s, z23.s\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z17.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- "st1w { z28.s }, p1, [x10]\n"
+ "ld1w { z17.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ "st1w { z28.s }, p1, [x9]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
"mov x12, #0x4\n"
- "ld1w { z20.s }, p0/Z, [x20]\n"
- "st1w { z29.s }, p1, [x9]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z20.s }, p0/Z, [x19]\n"
+ "st1w { z29.s }, p1, [x28]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z19.s }, p0/Z, [x20]\n"
- "st1w { z30.s }, p1, [x26]\n"
+ "ld1w { z19.s }, p0/Z, [x19]\n"
+ "st1w { z30.s }, p1, [x25]\n"
"add x8, x8, #0x1\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0xc1321170 // bfdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
".inst 0x658aaa0b // bfcvt z11.h, p2/M, z16.s\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z18.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z18.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0xc1301190 // bfdot za.s[x8, 0], { z12.h-z15.h }, z0.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x658aaa2c // bfcvt z12.h, p2/M, z17.s\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
"mov x12, #0x8\n"
- "ld1w { z17.s }, p0/Z, [x20]\n"
+ "ld1w { z17.s }, p0/Z, [x19]\n"
".inst 0x658aaa6d // bfcvt z13.h, p2/M, z19.s\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
".inst 0x658aaa0e // bfcvt z14.h, p2/M, z16.s\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
- "subs x21, x21, #0x1\n"
- "add x10, x10, x28, LSL #2\n"
- "st1w { z31.s }, p1, [x25]\n"
- ".inst 0xc0040c82 // mova za.d[x8, #2], { z4.d-z7.d }\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
+ "subs x20, x20, #0x1\n"
"add x9, x9, x27, LSL #2\n"
- "add x26, x26, x24, LSL #2\n"
+ "st1w { z31.s }, p1, [x24]\n"
+ ".inst 0xc0040c82 // mova za.d[x8, #2], { z4.d-z7.d }\n"
+ "add x28, x28, x26, LSL #2\n"
+ "add x25, x25, x23, LSL #2\n"
".inst 0x648aaaab // bfcvtnt z11.h, p2/M, z21.s\n"
".inst 0x648aaa8c // bfcvtnt z12.h, p2/M, z20.s\n"
- "add x25, x25, x23, LSL #2\n"
+ "add x24, x24, x22, LSL #2\n"
".inst 0x648aaa4d // bfcvtnt z13.h, p2/M, z18.s\n"
".inst 0x648aaa2e // bfcvtnt z14.h, p2/M, z17.s\n"
- "add x14, x14, %x[ld_in_col], LSL #2\n"
+ "add x13, x13, %x[ld_in_col], LSL #2\n"
".inst 0x658aaa0f // bfcvt z15.h, p2/M, z16.s\n"
"bgt 14b\n"
"15:" // Main loop tail
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z17.s }, p0/Z, [x14]\n"
+ "ld1w { z17.s }, p0/Z, [x13]\n"
".inst 0xc13a1170 // bfdot za.s[x8, 0], { z11.h-z14.h }, z10.h\n"
- "add x20, x14, %x[ld_in_row], LSL #2\n"
+ "add x19, x13, %x[ld_in_row], LSL #2\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z19.s }, p0/Z, [x20]\n"
+ "ld1w { z19.s }, p0/Z, [x19]\n"
".inst 0xc1391171 // bfdot za.s[x8, 1], { z11.h-z14.h }, z9.h\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
".inst 0xc1311190 // bfdot za.s[x8, 0], { z12.h-z15.h }, z1.h\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1w { z18.s }, p0/Z, [x20]\n"
+ "ld1w { z18.s }, p0/Z, [x19]\n"
".inst 0xc1331191 // bfdot za.s[x8, 1], { z12.h-z15.h }, z3.h\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x658aaa2b // bfcvt z11.h, p2/M, z17.s\n"
".inst 0x658aaa0c // bfcvt z12.h, p2/M, z16.s\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x658aaa0d // bfcvt z13.h, p2/M, z16.s\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z17.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z17.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x648aaa6b // bfcvtnt z11.h, p2/M, z19.s\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x658aaa0e // bfcvt z14.h, p2/M, z16.s\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
"mov x12, #0x8\n"
".inst 0x648aaa4c // bfcvtnt z12.h, p2/M, z18.s\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
".inst 0x648aaa2d // bfcvtnt z13.h, p2/M, z17.s\n"
".inst 0x648aaa0e // bfcvtnt z14.h, p2/M, z16.s\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
".inst 0xc0060c1c // mova { z28.d-z31.d }, za.d[x8, #0]\n"
"add x8, x8, #0x1\n"
".inst 0x658aaa0f // bfcvt z15.h, p2/M, z16.s\n"
".inst 0xc1321170 // bfdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
".inst 0xc1b7cb7c // fclamp { z28.s-z31.s }, z27.s, z23.s\n"
- "st1w { z28.s }, p1, [x10]\n"
- "add x10, x10, x28, LSL #2\n"
- "st1w { z29.s }, p1, [x9]\n"
+ "st1w { z28.s }, p1, [x9]\n"
"add x9, x9, x27, LSL #2\n"
+ "st1w { z29.s }, p1, [x28]\n"
+ "add x28, x28, x26, LSL #2\n"
".inst 0xc0040c82 // mova za.d[x8, #2], { z4.d-z7.d }\n"
- "add x14, x14, %x[ld_in_col], LSL #2\n"
- "st1w { z30.s }, p1, [x26]\n"
- "add x26, x26, x24, LSL #2\n"
- ".inst 0xc1301190 // bfdot za.s[x8, 0], { z12.h-z15.h }, z0.h\n"
- "st1w { z31.s }, p1, [x25]\n"
+ "add x13, x13, %x[ld_in_col], LSL #2\n"
+ "st1w { z30.s }, p1, [x25]\n"
"add x25, x25, x23, LSL #2\n"
+ ".inst 0xc1301190 // bfdot za.s[x8, 0], { z12.h-z15.h }, z0.h\n"
+ "st1w { z31.s }, p1, [x24]\n"
+ "add x24, x24, x22, LSL #2\n"
"16:" // Main loop skip tail
- "cbz x15, 17f\n" // Skip remainder inputs
+ "cbz x14, 17f\n" // Skip remainder inputs
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z16.s }, p0/Z, [x14]\n"
+ "ld1w { z16.s }, p0/Z, [x13]\n"
".inst 0x658aaa0b // bfcvt z11.h, p2/M, z16.s\n"
- "add x20, x14, %x[ld_in_row], LSL #2\n"
+ "add x19, x13, %x[ld_in_row], LSL #2\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
".inst 0x648aaa0b // bfcvtnt z11.h, p2/M, z16.s\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
".inst 0x658aaa0c // bfcvt z12.h, p2/M, z16.s\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
".inst 0x648aaa0c // bfcvtnt z12.h, p2/M, z16.s\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x658aaa0d // bfcvt z13.h, p2/M, z16.s\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x648aaa0d // bfcvtnt z13.h, p2/M, z16.s\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x658aaa0e // bfcvt z14.h, p2/M, z16.s\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
"mov x12, #0x8\n"
".inst 0x648aaa0e // bfcvtnt z14.h, p2/M, z16.s\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
".inst 0x658aaa0f // bfcvt z15.h, p2/M, z16.s\n"
".inst 0xc13a1170 // bfdot za.s[x8, 0], { z11.h-z14.h }, z10.h\n"
- "sub x13, x13, #0x1\n"
+ "sub x11, x11, #0x1\n"
".inst 0xc1311190 // bfdot za.s[x8, 0], { z12.h-z15.h }, z1.h\n"
".inst 0xc1391171 // bfdot za.s[x8, 1], { z11.h-z14.h }, z9.h\n"
".inst 0xc0060c1c // mova { z28.d-z31.d }, za.d[x8, #0]\n"
".inst 0xc1b7cb7c // fclamp { z28.s-z31.s }, z27.s, z23.s\n"
- "st1w { z28.s }, p1, [x10]\n"
- "add x10, x10, x28, LSL #2\n"
+ "st1w { z28.s }, p1, [x9]\n"
+ "add x9, x9, x27, LSL #2\n"
".inst 0xc1331191 // bfdot za.s[x8, 1], { z12.h-z15.h }, z3.h\n"
"add x8, x8, #0x1\n"
- "st1w { z29.s }, p1, [x9]\n"
- "add x9, x9, x27, LSL #2\n"
- "st1w { z30.s }, p1, [x26]\n"
- "add x26, x26, x24, LSL #2\n"
- ".inst 0xc0040c82 // mova za.d[x8, #2], { z4.d-z7.d }\n"
- "st1w { z31.s }, p1, [x25]\n"
+ "st1w { z29.s }, p1, [x28]\n"
+ "add x28, x28, x26, LSL #2\n"
+ "st1w { z30.s }, p1, [x25]\n"
"add x25, x25, x23, LSL #2\n"
+ ".inst 0xc0040c82 // mova za.d[x8, #2], { z4.d-z7.d }\n"
+ "st1w { z31.s }, p1, [x24]\n"
+ "add x24, x24, x22, LSL #2\n"
"17:" // Tail input: End
- "cbz x13, 19f\n"
+ "cbz x11, 19f\n"
"18:" // Right padding loop
".inst 0xc0060c1c // mova { z28.d-z31.d }, za.d[x8, #0]\n"
"add x8, x8, #0x1\n"
- "subs x13, x13, #0x1\n"
+ "subs x11, x11, #0x1\n"
".inst 0xc1b7cb7c // fclamp { z28.s-z31.s }, z27.s, z23.s\n"
- "st1w { z28.s }, p1, [x10]\n"
- "add x10, x10, x28, LSL #2\n"
- ".inst 0xc0040c82 // mova za.d[x8, #2], { z4.d-z7.d }\n"
- "st1w { z29.s }, p1, [x9]\n"
+ "st1w { z28.s }, p1, [x9]\n"
"add x9, x9, x27, LSL #2\n"
- "st1w { z30.s }, p1, [x26]\n"
- "add x26, x26, x24, LSL #2\n"
- "st1w { z31.s }, p1, [x25]\n"
+ ".inst 0xc0040c82 // mova za.d[x8, #2], { z4.d-z7.d }\n"
+ "st1w { z29.s }, p1, [x28]\n"
+ "add x28, x28, x26, LSL #2\n"
+ "st1w { z30.s }, p1, [x25]\n"
"add x25, x25, x23, LSL #2\n"
+ "st1w { z31.s }, p1, [x24]\n"
+ "add x24, x24, x22, LSL #2\n"
"bgt 18b\n"
"19:" // End
- "ldr x20, [%x[args], %[offsetof_Args_weights]]\n"
- "incb x20, ALL, MUL #9\n"
- "str x20, [%x[args], %[offsetof_Args_weights]]\n"
- "incw x16\n"
- "ldr x20, [%x[args], %[offsetof_Args_ld_in_vl]]\n"
- "whilelt p1.s, x16, x17\n"
- "ldr x14, [%x[args], %[offsetof_Args_inptr]]\n"
- "add x14, x14, x20, LSL #2\n"
- "str x14, [%x[args], %[offsetof_Args_inptr]]\n"
- "ldr x11, [%x[args], %[offsetof_Args_outptrs]]\n"
- "ldr x24, [%x[args], %[offsetof_Args_ld_out_vls]]\n"
- "ldp x23, x22, [x11, #0x0]\n"
- "ldp x21, x20, [x24, #0x0]\n"
- "add x23, x23, x21, LSL #2\n"
+ "ldr x19, [%x[args], %[offsetof_Args_weights]]\n"
+ "incb x19, ALL, MUL #9\n"
+ "str x19, [%x[args], %[offsetof_Args_weights]]\n"
+ "incw x15\n"
+ "ldr x19, [%x[args], %[offsetof_Args_ld_in_vl]]\n"
+ "whilelt p1.s, x15, x16\n"
+ "ldr x13, [%x[args], %[offsetof_Args_inptr]]\n"
+ "add x13, x13, x19, LSL #2\n"
+ "str x13, [%x[args], %[offsetof_Args_inptr]]\n"
+ "ldr x10, [%x[args], %[offsetof_Args_outptrs]]\n"
+ "ldr x23, [%x[args], %[offsetof_Args_ld_out_vls]]\n"
+ "ldp x22, x21, [x10, #0x0]\n"
+ "ldp x20, x19, [x23, #0x0]\n"
"add x22, x22, x20, LSL #2\n"
- "stp x23, x22, [x11, #0x0]\n"
- "ldp x23, x22, [x11, #0x10]\n"
- "ldp x21, x20, [x24, #0x10]\n"
- "add x23, x23, x21, LSL #2\n"
+ "add x21, x21, x19, LSL #2\n"
+ "stp x22, x21, [x10, #0x0]\n"
+ "ldp x22, x21, [x10, #0x10]\n"
+ "ldp x20, x19, [x23, #0x10]\n"
"add x22, x22, x20, LSL #2\n"
- "stp x23, x22, [x11, #0x10]\n"
+ "add x21, x21, x19, LSL #2\n"
+ "stp x22, x21, [x10, #0x10]\n"
"b.any 1b\n"
".inst 0xd503467f // SMSTOP\n"
:
: [args] "r" (&args), [ld_in_col] "r" (ld_in_col), [ld_in_row] "r" (ld_in_row), [offsetof_Args_bias] "I" (offsetof(Args, bias)), [offsetof_Args_clamp_max] "I" (offsetof(Args, clamp_max)), [offsetof_Args_clamp_min] "I" (offsetof(Args, clamp_min)), [offsetof_Args_current_channel] "I" (offsetof(Args, current_channel)), [offsetof_Args_inptr] "I" (offsetof(Args, inptr)), [offsetof_Args_input_cols] "I" (offsetof(Args, input_cols)), [offsetof_Args_ld_in_vl] "I" (offsetof(Args, ld_in_vl)), [offsetof_Args_ld_out_cols] "I" (offsetof(Args, ld_out_cols)), [offsetof_Args_ld_out_vls] "I" (offsetof(Args, ld_out_vls)), [offsetof_Args_n_channels] "I" (offsetof(Args, n_channels)), [offsetof_Args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_Args_output_cols] "I" (offsetof(Args, output_cols)), [offsetof_Args_pad_bottom] "I" (offsetof(Args, pad_bottom)), [offsetof_Args_pad_left] "I" (offsetof(Args, pad_left)), [offsetof_Args_pad_top] "I" (offsetof(Args, pad_top)), [offsetof_Args_weights] "I" (offsetof(Args, weights))
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32bf16fp32_planar_5x5_s1_4rows_dot_za/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32bf16fp32_planar_5x5_s1_4rows_dot_za/generic.cpp
index 2b3a247686..17f2455469 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32bf16fp32_planar_5x5_s1_4rows_dot_za/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32bf16fp32_planar_5x5_s1_4rows_dot_za/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -69,258 +69,294 @@ void sme2_fp32bf16fp32_planar_5x5_s1_4rows_dot_za_impl(
Args args = { inptr, ld_in_vl, pad_top, 8u - std::min(8u, pad_top + valid_input_rows), pad_left, weights, bias, valid_input_cols, output_cols, outptrs, outlds, outvllds, start_channel, valid_channels, act_min, act_max };
__asm__ __volatile__(
- "ldr x4, [%x[args], %[offsetof_Args_pad_bottom]]\n"
- "mov x20, #0x8\n"
+ "ldr x6, [%x[args], %[offsetof_Args_pad_bottom]]\n"
+ "mov x19, #0x8\n"
".inst 0xd503477f // SMSTART ZA\n"
- "sub x20, x20, x4\n"
+ "sub x19, x19, x6\n"
"ldr x5, [%x[args], %[offsetof_Args_pad_top]]\n"
"ptrue p2.b\n"
".inst 0x25207812 // ptrue pn10.b\n"
"ld1rw { z26.s }, p2/Z, [%x[args], %[offsetof_Args_clamp_min]]\n"
- "ldr x6, [%x[args], %[offsetof_Args_n_channels]]\n"
- "whilelt p1.s, XZR, x6\n"
- "whilelt p9.s, XZR, x20\n"
+ "ldr x7, [%x[args], %[offsetof_Args_n_channels]]\n"
+ "whilelt p1.s, XZR, x7\n"
+ "whilelt p9.s, XZR, x19\n"
"ld1rw { z31.s }, p2/Z, [%x[args], %[offsetof_Args_clamp_max]]\n"
"whilelt p8.s, XZR, x5\n"
"addvl SP, SP, #-30\n"
- "ldr x7, [%x[args], %[offsetof_Args_current_channel]]\n"
+ "ldr x17, [%x[args], %[offsetof_Args_current_channel]]\n"
"eor p8.b, p2/Z, p8.b, p9.b\n"
"1:" // Channel loop
- "ldr x20, [%x[args], %[offsetof_Args_bias]]\n"
+ "ldr x19, [%x[args], %[offsetof_Args_bias]]\n"
"fmov z24.s, #0x0\n"
- "cbz x20, 2f\n"
- "ld1w { z24.s }, p1/Z, [x20, x7, LSL #2]\n"
+ "cbz x19, 2f\n"
+ "ld1w { z24.s }, p1/Z, [x19, x17, LSL #2]\n"
"2:" // Load bias: Done
- "ldr x20, [%x[args], %[offsetof_Args_weights]]\n"
- "mov x21, x20\n"
- "ld1w { z18.s }, p2/Z, [x21]\n"
- "incb x21, ALL, MUL #5\n"
- "ld1w { z11.s }, p2/Z, [x21]\n"
- "incb x21, ALL, MUL #5\n"
+ "ldr x19, [%x[args], %[offsetof_Args_weights]]\n"
+ "mov x20, x19\n"
+ "ld1w { z18.s }, p2/Z, [x20]\n"
+ "incb x20, ALL, MUL #5\n"
+ "ld1w { z11.s }, p2/Z, [x20]\n"
+ "incb x20, ALL, MUL #5\n"
"fmov z4.s, #0x0\n"
- "incb x20\n"
- "ld1w { z3.s }, p2/Z, [x21]\n"
- "incb x21, ALL, MUL #5\n"
+ "incb x19\n"
+ "ld1w { z3.s }, p2/Z, [x20]\n"
+ "incb x20, ALL, MUL #5\n"
".inst 0x658aaa45 // bfcvt z5.h, p2/M, z18.s\n"
".inst 0x658aa966 // bfcvt z6.h, p2/M, z11.s\n"
- "ld1w { z17.s }, p2/Z, [x21]\n"
- "incb x21, ALL, MUL #5\n"
- "addvl x24, SP, #30\n"
+ "ld1w { z17.s }, p2/Z, [x20]\n"
+ "incb x20, ALL, MUL #5\n"
+ "addvl x23, SP, #30\n"
".inst 0x648aaa44 // bfcvtnt z4.h, p2/M, z18.s\n"
- "ld1w { z16.s }, p2/Z, [x21]\n"
- "mov x21, x20\n"
+ "ld1w { z16.s }, p2/Z, [x20]\n"
+ "mov x20, x19\n"
".inst 0x658aa867 // bfcvt z7.h, p2/M, z3.s\n"
- "addvl x24, x24, #-6\n"
- "ld1w { z18.s }, p2/Z, [x21]\n"
+ "addvl x23, x23, #-6\n"
+ "ld1w { z18.s }, p2/Z, [x20]\n"
".inst 0x658aaa28 // bfcvt z8.h, p2/M, z17.s\n"
- "incb x21, ALL, MUL #5\n"
- "st1h { z4.h }, p2, [x24]\n"
+ "incb x20, ALL, MUL #5\n"
+ "st1h { z4.h }, p2, [x23]\n"
".inst 0x648aa965 // bfcvtnt z5.h, p2/M, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x21]\n"
- "incb x21, ALL, MUL #5\n"
+ "ld1w { z11.s }, p2/Z, [x20]\n"
+ "incb x20, ALL, MUL #5\n"
"fmov z4.s, #0x0\n"
- "st1h { z5.h }, p2, [x24, #1, MUL VL]\n"
+ "st1h { z5.h }, p2, [x23, #1, MUL VL]\n"
".inst 0x648aa866 // bfcvtnt z6.h, p2/M, z3.s\n"
- "ld1w { z3.s }, p2/Z, [x21]\n"
- "incb x21, ALL, MUL #5\n"
+ "ld1w { z3.s }, p2/Z, [x20]\n"
+ "incb x20, ALL, MUL #5\n"
".inst 0x658aaa45 // bfcvt z5.h, p2/M, z18.s\n"
".inst 0x648aaa27 // bfcvtnt z7.h, p2/M, z17.s\n"
- "incb x20\n"
- "ld1w { z17.s }, p2/Z, [x21]\n"
- "incb x21, ALL, MUL #5\n"
- "st1h { z6.h }, p2, [x24, #2, MUL VL]\n"
+ "incb x19\n"
+ "ld1w { z17.s }, p2/Z, [x20]\n"
+ "incb x20, ALL, MUL #5\n"
+ "st1h { z6.h }, p2, [x23, #2, MUL VL]\n"
".inst 0x648aaa08 // bfcvtnt z8.h, p2/M, z16.s\n"
".inst 0x658aaa09 // bfcvt z9.h, p2/M, z16.s\n"
- "ld1w { z16.s }, p2/Z, [x21]\n"
+ "ld1w { z16.s }, p2/Z, [x20]\n"
".inst 0x658aa966 // bfcvt z6.h, p2/M, z11.s\n"
- "mov x21, x20\n"
- "st1h { z7.h }, p2, [x24, #3, MUL VL]\n"
+ "mov x20, x19\n"
+ "st1h { z7.h }, p2, [x23, #3, MUL VL]\n"
".inst 0x648aaa44 // bfcvtnt z4.h, p2/M, z18.s\n"
".inst 0x658aa867 // bfcvt z7.h, p2/M, z3.s\n"
- "ld1w { z18.s }, p2/Z, [x21]\n"
- "incb x21, ALL, MUL #5\n"
- "st1h { z8.h }, p2, [x24, #4, MUL VL]\n"
+ "ld1w { z18.s }, p2/Z, [x20]\n"
+ "incb x20, ALL, MUL #5\n"
+ "st1h { z8.h }, p2, [x23, #4, MUL VL]\n"
".inst 0x648aa965 // bfcvtnt z5.h, p2/M, z11.s\n"
".inst 0x658aaa28 // bfcvt z8.h, p2/M, z17.s\n"
- "incb x20\n"
- "st1h { z9.h }, p2, [x24, #5, MUL VL]\n"
- "addvl x24, x24, #-6\n"
- "ld1w { z11.s }, p2/Z, [x21]\n"
- "incb x21, ALL, MUL #5\n"
- "st1h { z4.h }, p2, [x24]\n"
+ "incb x19\n"
+ "st1h { z9.h }, p2, [x23, #5, MUL VL]\n"
+ "addvl x23, x23, #-6\n"
+ "ld1w { z11.s }, p2/Z, [x20]\n"
+ "incb x20, ALL, MUL #5\n"
+ "st1h { z4.h }, p2, [x23]\n"
"fmov z4.s, #0x0\n"
".inst 0x648aa866 // bfcvtnt z6.h, p2/M, z3.s\n"
- "ldr x17, [%x[args], %[offsetof_Args_input_cols]]\n"
- "st1h { z5.h }, p2, [x24, #1, MUL VL]\n"
- "ld1w { z3.s }, p2/Z, [x21]\n"
- "incb x21, ALL, MUL #5\n"
+ "ldr x16, [%x[args], %[offsetof_Args_input_cols]]\n"
+ "st1h { z5.h }, p2, [x23, #1, MUL VL]\n"
+ "ld1w { z3.s }, p2/Z, [x20]\n"
+ "incb x20, ALL, MUL #5\n"
".inst 0x658aaa45 // bfcvt z5.h, p2/M, z18.s\n"
- "st1h { z6.h }, p2, [x24, #2, MUL VL]\n"
+ "st1h { z6.h }, p2, [x23, #2, MUL VL]\n"
".inst 0x648aaa27 // bfcvtnt z7.h, p2/M, z17.s\n"
- "ld1w { z17.s }, p2/Z, [x21]\n"
- "incb x21, ALL, MUL #5\n"
+ "ld1w { z17.s }, p2/Z, [x20]\n"
+ "incb x20, ALL, MUL #5\n"
".inst 0x658aa966 // bfcvt z6.h, p2/M, z11.s\n"
".inst 0x648aaa08 // bfcvtnt z8.h, p2/M, z16.s\n"
- "st1h { z7.h }, p2, [x24, #3, MUL VL]\n"
- "ldr x16, [%x[args], %[offsetof_Args_inptr]]\n"
+ "st1h { z7.h }, p2, [x23, #3, MUL VL]\n"
+ "ldr x15, [%x[args], %[offsetof_Args_inptr]]\n"
".inst 0x658aaa09 // bfcvt z9.h, p2/M, z16.s\n"
- "ld1w { z16.s }, p2/Z, [x21]\n"
- "mov x21, x20\n"
+ "ld1w { z16.s }, p2/Z, [x20]\n"
+ "mov x20, x19\n"
".inst 0x648aaa44 // bfcvtnt z4.h, p2/M, z18.s\n"
".inst 0x658aa867 // bfcvt z7.h, p2/M, z3.s\n"
- "ld1w { z18.s }, p2/Z, [x21]\n"
- "incb x21, ALL, MUL #5\n"
- "st1h { z8.h }, p2, [x24, #4, MUL VL]\n"
- "st1h { z9.h }, p2, [x24, #5, MUL VL]\n"
- "addvl x24, x24, #-6\n"
+ "ld1w { z18.s }, p2/Z, [x20]\n"
+ "incb x20, ALL, MUL #5\n"
+ "st1h { z8.h }, p2, [x23, #4, MUL VL]\n"
+ "st1h { z9.h }, p2, [x23, #5, MUL VL]\n"
+ "addvl x23, x23, #-6\n"
".inst 0x648aa965 // bfcvtnt z5.h, p2/M, z11.s\n"
".inst 0x658aaa28 // bfcvt z8.h, p2/M, z17.s\n"
- "ld1w { z11.s }, p2/Z, [x21]\n"
- "incb x21, ALL, MUL #5\n"
- "st1h { z4.h }, p2, [x24]\n"
+ "ld1w { z11.s }, p2/Z, [x20]\n"
+ "incb x20, ALL, MUL #5\n"
+ "st1h { z4.h }, p2, [x23]\n"
".inst 0x648aa866 // bfcvtnt z6.h, p2/M, z3.s\n"
- "ld1w { z3.s }, p2/Z, [x21]\n"
+ "ld1w { z3.s }, p2/Z, [x20]\n"
"fmov z4.s, #0x0\n"
- "st1h { z5.h }, p2, [x24, #1, MUL VL]\n"
- "incb x21, ALL, MUL #5\n"
+ "st1h { z5.h }, p2, [x23, #1, MUL VL]\n"
+ "incb x20, ALL, MUL #5\n"
".inst 0x658aaa45 // bfcvt z5.h, p2/M, z18.s\n"
- "st1h { z6.h }, p2, [x24, #2, MUL VL]\n"
+ "st1h { z6.h }, p2, [x23, #2, MUL VL]\n"
".inst 0x648aaa27 // bfcvtnt z7.h, p2/M, z17.s\n"
- "incb x20\n"
- "ld1w { z17.s }, p2/Z, [x21]\n"
- "incb x21, ALL, MUL #5\n"
+ "incb x19\n"
+ "ld1w { z17.s }, p2/Z, [x20]\n"
+ "incb x20, ALL, MUL #5\n"
".inst 0x658aa966 // bfcvt z6.h, p2/M, z11.s\n"
- "st1h { z7.h }, p2, [x24, #3, MUL VL]\n"
+ "st1h { z7.h }, p2, [x23, #3, MUL VL]\n"
".inst 0x648aaa08 // bfcvtnt z8.h, p2/M, z16.s\n"
".inst 0x658aaa09 // bfcvt z9.h, p2/M, z16.s\n"
- "ld1w { z16.s }, p2/Z, [x21]\n"
- "mov x21, x20\n"
+ "ld1w { z16.s }, p2/Z, [x20]\n"
+ "mov x20, x19\n"
".inst 0x658aa867 // bfcvt z7.h, p2/M, z3.s\n"
".inst 0x648aaa44 // bfcvtnt z4.h, p2/M, z18.s\n"
- "ld1w { z18.s }, p2/Z, [x21]\n"
- "incb x21, ALL, MUL #5\n"
+ "ld1w { z18.s }, p2/Z, [x20]\n"
+ "incb x20, ALL, MUL #5\n"
".inst 0x648aa965 // bfcvtnt z5.h, p2/M, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x21]\n"
- "incb x21, ALL, MUL #5\n"
- "st1h { z8.h }, p2, [x24, #4, MUL VL]\n"
+ "ld1w { z11.s }, p2/Z, [x20]\n"
+ "incb x20, ALL, MUL #5\n"
+ "st1h { z8.h }, p2, [x23, #4, MUL VL]\n"
".inst 0x648aa866 // bfcvtnt z6.h, p2/M, z3.s\n"
".inst 0x658aaa28 // bfcvt z8.h, p2/M, z17.s\n"
- "ld1w { z3.s }, p2/Z, [x21]\n"
- "incb x21, ALL, MUL #5\n"
+ "ld1w { z3.s }, p2/Z, [x20]\n"
+ "incb x20, ALL, MUL #5\n"
".inst 0x648aaa27 // bfcvtnt z7.h, p2/M, z17.s\n"
- "ld1w { z17.s }, p2/Z, [x21]\n"
- "st1h { z9.h }, p2, [x24, #5, MUL VL]\n"
- "addvl x24, x24, #-6\n"
- "st1h { z4.h }, p2, [x24]\n"
+ "ld1w { z17.s }, p2/Z, [x20]\n"
+ "st1h { z9.h }, p2, [x23, #5, MUL VL]\n"
+ "addvl x23, x23, #-6\n"
+ "st1h { z4.h }, p2, [x23]\n"
".inst 0x648aaa08 // bfcvtnt z8.h, p2/M, z16.s\n"
- "incb x21, ALL, MUL #5\n"
+ "incb x20, ALL, MUL #5\n"
"fmov z4.s, #0x0\n"
- "st1h { z5.h }, p2, [x24, #1, MUL VL]\n"
+ "st1h { z5.h }, p2, [x23, #1, MUL VL]\n"
".inst 0x658aaa45 // bfcvt z5.h, p2/M, z18.s\n"
".inst 0x658aaa09 // bfcvt z9.h, p2/M, z16.s\n"
- "sub x20, x17, #0x1\n"
- "st1h { z6.h }, p2, [x24, #2, MUL VL]\n"
+ "sub x19, x16, #0x1\n"
+ "st1h { z6.h }, p2, [x23, #2, MUL VL]\n"
".inst 0x658aa966 // bfcvt z6.h, p2/M, z11.s\n"
- "ld1w { z16.s }, p2/Z, [x21]\n"
- "orr x23, x20, %x[ld_in_col], LSL #18\n"
- "st1h { z7.h }, p2, [x24, #3, MUL VL]\n"
+ "ld1w { z16.s }, p2/Z, [x20]\n"
+ "orr x22, x19, %x[ld_in_col], LSL #18\n"
+ "st1h { z7.h }, p2, [x23, #3, MUL VL]\n"
".inst 0x658aa867 // bfcvt z7.h, p2/M, z3.s\n"
- "orr x23, x6, x23, LSL #20\n"
- "mov x22, #0x8\n"
- "st1h { z8.h }, p2, [x24, #4, MUL VL]\n"
+ "orr x22, x7, x22, LSL #20\n"
+ "mov x21, #0x8\n"
+ "st1h { z8.h }, p2, [x23, #4, MUL VL]\n"
".inst 0x658aaa28 // bfcvt z8.h, p2/M, z17.s\n"
- "add x21, x5, x4\n"
- "lsl x20, %x[ld_in_row], #0x2\n"
- "st1h { z9.h }, p2, [x24, #5, MUL VL]\n"
- "addvl x24, x24, #-6\n"
+ "add x20, x5, x6\n"
+ "lsl x19, %x[ld_in_row], #0x2\n"
+ "st1h { z9.h }, p2, [x23, #5, MUL VL]\n"
+ "addvl x23, x23, #-6\n"
"mov z25.d, z24.d\n"
".inst 0x648aaa44 // bfcvtnt z4.h, p2/M, z18.s\n"
- "st1h { z4.h }, p2, [x24]\n"
+ "st1h { z4.h }, p2, [x23]\n"
".inst 0x648aa965 // bfcvtnt z5.h, p2/M, z11.s\n"
".inst 0x648aa866 // bfcvtnt z6.h, p2/M, z3.s\n"
"mov x11, #0x0\n"
- "st1h { z5.h }, p2, [x24, #1, MUL VL]\n"
+ "st1h { z5.h }, p2, [x23, #1, MUL VL]\n"
".inst 0x648aaa27 // bfcvtnt z7.h, p2/M, z17.s\n"
".inst 0x648aaa08 // bfcvtnt z8.h, p2/M, z16.s\n"
"mov x8, #0x8\n"
- "st1h { z6.h }, p2, [x24, #2, MUL VL]\n"
+ "st1h { z6.h }, p2, [x23, #2, MUL VL]\n"
".inst 0x658aaa09 // bfcvt z9.h, p2/M, z16.s\n"
- "ldr x15, [%x[args], %[offsetof_Args_output_cols]]\n"
- "lsl x23, x23, #0x2\n"
- "st1h { z7.h }, p2, [x24, #3, MUL VL]\n"
- "sub x22, x22, x21\n"
- "madd x20, x20, x5, x16\n"
- "st1h { z8.h }, p2, [x24, #4, MUL VL]\n"
- "st1h { z9.h }, p2, [x24, #5, MUL VL]\n"
+ "ldr x14, [%x[args], %[offsetof_Args_output_cols]]\n"
+ "lsl x22, x22, #0x2\n"
+ "st1h { z7.h }, p2, [x23, #3, MUL VL]\n"
+ "sub x21, x21, x20\n"
+ "madd x19, x19, x5, x15\n"
+ "st1h { z8.h }, p2, [x23, #4, MUL VL]\n"
+ "st1h { z9.h }, p2, [x23, #5, MUL VL]\n"
"3:" // Issue prefetches
- "subs x22, x22, #0x1\n"
- ".inst 0xf8b74a9c // rprfm pldstrm, x23, [x20]\n"
- "add x20, x20, %x[ld_in_col], LSL #2\n"
+ "subs x21, x21, #0x1\n"
+ ".inst 0xf8b64a7c // rprfm pldstrm, x22, [x19]\n"
+ "add x19, x19, %x[ld_in_col], LSL #2\n"
"bgt 3b\n"
- "ldr x25, [%x[args], %[offsetof_Args_outptrs]]\n"
- "lsl x20, %x[ld_in_row], #0x2\n"
- "msub x16, x5, x20, x16\n"
+ "ldr x24, [%x[args], %[offsetof_Args_outptrs]]\n"
+ "lsl x19, %x[ld_in_row], #0x2\n"
+ "msub x15, x5, x19, x15\n"
".inst 0xc0046b00 // mova za.d[x11, #0], { z24.d-z25.d }\n"
- "ldr x20, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
+ "ldr x19, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
".inst 0xc0046b01 // mova za.d[x11, #1], { z24.d-z25.d }\n"
- "mov x22, #0x4\n"
- "ldp x14, x13, [x25], #0x10\n"
+ "mov x21, #0x4\n"
+ "ldp x13, x0, [x24], #0x10\n"
".inst 0xc0046b02 // mova za.d[x11, #2], { z24.d-z25.d }\n"
- "ldp x0, x10, [x20], #0x10\n"
+ "ldp x10, x9, [x19], #0x10\n"
".inst 0xc0046b03 // mova za.d[x11, #3], { z24.d-z25.d }\n"
- "ldr x21, [%x[args], %[offsetof_Args_pad_left]]\n"
+ "ldr x20, [%x[args], %[offsetof_Args_pad_left]]\n"
".inst 0xc0046b04 // mova za.d[x11, #4], { z24.d-z25.d }\n"
- "ldp x9, x28, [x25], #0x10\n"
+ "ldp x28, x27, [x24], #0x10\n"
".inst 0xc0046b05 // mova za.d[x11, #5], { z24.d-z25.d }\n"
- "ldp x27, x26, [x20], #0x10\n"
+ "ldp x26, x25, [x19], #0x10\n"
".inst 0xc0046b06 // mova za.d[x11, #6], { z24.d-z25.d }\n"
".inst 0xc0046b07 // mova za.d[x11, #7], { z24.d-z25.d }\n"
".inst 0xc0040b00 // mova za.d[x8, #0], { z24.d-z25.d }\n"
".inst 0xc0040b01 // mova za.d[x8, #1], { z24.d-z25.d }\n"
- "cbz x21, 5f\n"
- "cmp x21, x22\n"
- "csel x20, x21, x22, LT\n"
- "sub x21, x21, x20\n"
- "sub x22, x22, x20\n"
- "cbz x21, 5f\n"
+ "cbz x20, 5f\n"
+ "cmp x20, x21\n"
+ "csel x19, x20, x21, LT\n"
+ "sub x20, x20, x19\n"
+ "sub x21, x21, x19\n"
+ "cbz x20, 5f\n"
".inst 0xc0066800 // mova { z0.d-z1.d }, za.d[x11, #0]\n"
- "sub x15, x15, x21\n"
+ "sub x14, x14, x20\n"
".inst 0xc0066822 // mova { z2.d-z3.d }, za.d[x11, #1]\n"
".inst 0xc1bfcb40 // fclamp { z0.s-z3.s }, z26.s, z31.s\n"
"4:" // Left padding
- "subs x21, x21, #0x1\n"
- "st1w { z0.s }, p1, [x14]\n"
- "add x14, x14, x0, LSL #2\n"
- "st1w { z2.s }, p1, [x13]\n"
+ "subs x20, x20, #0x1\n"
+ "st1w { z0.s }, p1, [x13]\n"
"add x13, x13, x10, LSL #2\n"
- "st1w { z1.s }, p1, [x9]\n"
- "add x9, x9, x27, LSL #2\n"
- "st1w { z3.s }, p1, [x28]\n"
+ "st1w { z2.s }, p1, [x0]\n"
+ "add x0, x0, x9, LSL #2\n"
+ "st1w { z1.s }, p1, [x28]\n"
"add x28, x28, x26, LSL #2\n"
+ "st1w { z3.s }, p1, [x27]\n"
+ "add x27, x27, x25, LSL #2\n"
"bgt 4b\n"
"5:" // Left padding: End
- "adds XZR, x5, x4\n"
+ "adds XZR, x5, x6\n"
"bne 12f\n"
- "cbz x22, 10f\n"
- "cmp x22, #0x1\n"
- "sub x17, x17, x22\n"
+ "cbz x21, 10f\n"
+ "cmp x21, #0x1\n"
+ "sub x16, x16, x21\n"
"beq 9f\n"
- "cmp x22, #0x2\n"
+ "cmp x21, #0x2\n"
"beq 8f\n"
- "cmp x22, #0x3\n"
+ "cmp x21, #0x3\n"
"beq 7f\n"
"6:" // Unpadded: 4 priming loads
- "add x21, x16, %x[ld_in_row], LSL #2\n"
- "ld1w { z16.s }, p1/Z, [x16]\n"
+ "add x20, x15, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p1/Z, [x15]\n"
".inst 0x658aaa0c // bfcvt z12.h, p2/M, z16.s\n"
- "addvl x20, SP, #24\n"
+ "addvl x19, SP, #24\n"
+ "ld1w { z16.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0x648aaa0c // bfcvtnt z12.h, p2/M, z16.s\n"
+ "add x15, x15, %x[ld_in_col], LSL #2\n"
+ "ld1w { z16.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0x658aaa0d // bfcvt z13.h, p2/M, z16.s\n"
+ "ld1w { z16.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0x648aaa0d // bfcvtnt z13.h, p2/M, z16.s\n"
+ "ld1w { z16.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0x658aaa0e // bfcvt z14.h, p2/M, z16.s\n"
+ "ld1w { z16.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0x648aaa0e // bfcvtnt z14.h, p2/M, z16.s\n"
+ ".inst 0xa0402a64 // ld1h { z4.h-z5.h }, pn10.b/Z, [x19]\n"
+ ".inst 0xc1257190 // bfdot za.s[x11, 0], { z12.h-z13.h }, z5.h\n"
+ "ld1w { z16.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0x658aaa0f // bfcvt z15.h, p2/M, z16.s\n"
+ ".inst 0xc1247191 // bfdot za.s[x11, 1], { z12.h-z13.h }, z4.h\n"
+ ".inst 0xa0412a66 // ld1h { z6.h-z7.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
+ ".inst 0xc12771b0 // bfdot za.s[x11, 0], { z13.h-z14.h }, z7.h\n"
+ "ld1w { z16.s }, p1/Z, [x20]\n"
+ ".inst 0x648aaa0f // bfcvtnt z15.h, p2/M, z16.s\n"
+ ".inst 0xc12671b1 // bfdot za.s[x11, 1], { z13.h-z14.h }, z6.h\n"
+ ".inst 0xa0422a68 // ld1h { z8.h-z9.h }, pn10.b/Z, [x19, #0x4, MUL VL]\n"
+ ".inst 0xc12971d0 // bfdot za.s[x11, 0], { z14.h-z15.h }, z9.h\n"
+ ".inst 0xc12871d1 // bfdot za.s[x11, 1], { z14.h-z15.h }, z8.h\n"
+ "7:" // Unpadded: 3 priming loads
+ "add x21, x15, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p1/Z, [x15]\n"
+ ".inst 0x658aaa0c // bfcvt z12.h, p2/M, z16.s\n"
+ "addvl x20, SP, #18\n"
"ld1w { z16.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
".inst 0x648aaa0c // bfcvtnt z12.h, p2/M, z16.s\n"
- "add x16, x16, %x[ld_in_col], LSL #2\n"
+ "addvl x19, SP, #24\n"
"ld1w { z16.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
".inst 0x658aaa0d // bfcvt z13.h, p2/M, z16.s\n"
+ "add x15, x15, %x[ld_in_col], LSL #2\n"
"ld1w { z16.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
".inst 0x648aaa0d // bfcvtnt z13.h, p2/M, z16.s\n"
@@ -336,30 +372,40 @@ void sme2_fp32bf16fp32_planar_5x5_s1_4rows_dot_za_impl(
"add x21, x21, %x[ld_in_row], LSL #2\n"
".inst 0x658aaa0f // bfcvt z15.h, p2/M, z16.s\n"
".inst 0xc1247191 // bfdot za.s[x11, 1], { z12.h-z13.h }, z4.h\n"
+ ".inst 0xa0402a64 // ld1h { z4.h-z5.h }, pn10.b/Z, [x19]\n"
+ ".inst 0xc1257192 // bfdot za.s[x11, 2], { z12.h-z13.h }, z5.h\n"
".inst 0xa0412a86 // ld1h { z6.h-z7.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
- ".inst 0xc12771b0 // bfdot za.s[x11, 0], { z13.h-z14.h }, z7.h\n"
+ ".inst 0xc1247193 // bfdot za.s[x11, 3], { z12.h-z13.h }, z4.h\n"
"ld1w { z16.s }, p1/Z, [x21]\n"
".inst 0x648aaa0f // bfcvtnt z15.h, p2/M, z16.s\n"
+ ".inst 0xc12771b0 // bfdot za.s[x11, 0], { z13.h-z14.h }, z7.h\n"
".inst 0xc12671b1 // bfdot za.s[x11, 1], { z13.h-z14.h }, z6.h\n"
+ ".inst 0xa0412a66 // ld1h { z6.h-z7.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
".inst 0xa0422a88 // ld1h { z8.h-z9.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+ ".inst 0xc12771b2 // bfdot za.s[x11, 2], { z13.h-z14.h }, z7.h\n"
+ ".inst 0xc12671b3 // bfdot za.s[x11, 3], { z13.h-z14.h }, z6.h\n"
".inst 0xc12971d0 // bfdot za.s[x11, 0], { z14.h-z15.h }, z9.h\n"
".inst 0xc12871d1 // bfdot za.s[x11, 1], { z14.h-z15.h }, z8.h\n"
- "7:" // Unpadded: 3 priming loads
- "add x22, x16, %x[ld_in_row], LSL #2\n"
- "ld1w { z16.s }, p1/Z, [x16]\n"
+ ".inst 0xa0422a68 // ld1h { z8.h-z9.h }, pn10.b/Z, [x19, #0x4, MUL VL]\n"
+ ".inst 0xc12971d2 // bfdot za.s[x11, 2], { z14.h-z15.h }, z9.h\n"
+ ".inst 0xc12871d3 // bfdot za.s[x11, 3], { z14.h-z15.h }, z8.h\n"
+ "8:" // Unpadded: 2 priming loads
+ "add x22, x15, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p1/Z, [x15]\n"
".inst 0x658aaa0c // bfcvt z12.h, p2/M, z16.s\n"
- "addvl x21, SP, #18\n"
+ "addvl x21, SP, #12\n"
"ld1w { z16.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row], LSL #2\n"
".inst 0x648aaa0c // bfcvtnt z12.h, p2/M, z16.s\n"
- "addvl x20, SP, #24\n"
+ "addvl x20, SP, #18\n"
"ld1w { z16.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row], LSL #2\n"
".inst 0x658aaa0d // bfcvt z13.h, p2/M, z16.s\n"
- "add x16, x16, %x[ld_in_col], LSL #2\n"
+ "addvl x19, SP, #24\n"
"ld1w { z16.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row], LSL #2\n"
".inst 0x648aaa0d // bfcvtnt z13.h, p2/M, z16.s\n"
+ "add x15, x15, %x[ld_in_col], LSL #2\n"
"ld1w { z16.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row], LSL #2\n"
".inst 0x658aaa0e // bfcvt z14.h, p2/M, z16.s\n"
@@ -379,36 +425,46 @@ void sme2_fp32bf16fp32_planar_5x5_s1_4rows_dot_za_impl(
"ld1w { z16.s }, p1/Z, [x22]\n"
".inst 0x648aaa0f // bfcvtnt z15.h, p2/M, z16.s\n"
".inst 0xc12771b0 // bfdot za.s[x11, 0], { z13.h-z14.h }, z7.h\n"
+ ".inst 0xa0402a64 // ld1h { z4.h-z5.h }, pn10.b/Z, [x19]\n"
".inst 0xc12671b1 // bfdot za.s[x11, 1], { z13.h-z14.h }, z6.h\n"
".inst 0xa0412a86 // ld1h { z6.h-z7.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ ".inst 0xc1257194 // bfdot za.s[x11, 4], { z12.h-z13.h }, z5.h\n"
".inst 0xa0422aa8 // ld1h { z8.h-z9.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
+ ".inst 0xc1247195 // bfdot za.s[x11, 5], { z12.h-z13.h }, z4.h\n"
".inst 0xc12771b2 // bfdot za.s[x11, 2], { z13.h-z14.h }, z7.h\n"
".inst 0xc12671b3 // bfdot za.s[x11, 3], { z13.h-z14.h }, z6.h\n"
+ ".inst 0xa0412a66 // ld1h { z6.h-z7.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
".inst 0xc12971d0 // bfdot za.s[x11, 0], { z14.h-z15.h }, z9.h\n"
".inst 0xc12871d1 // bfdot za.s[x11, 1], { z14.h-z15.h }, z8.h\n"
".inst 0xa0422a88 // ld1h { z8.h-z9.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+ ".inst 0xc12771b4 // bfdot za.s[x11, 4], { z13.h-z14.h }, z7.h\n"
+ ".inst 0xc12671b5 // bfdot za.s[x11, 5], { z13.h-z14.h }, z6.h\n"
".inst 0xc12971d2 // bfdot za.s[x11, 2], { z14.h-z15.h }, z9.h\n"
".inst 0xc12871d3 // bfdot za.s[x11, 3], { z14.h-z15.h }, z8.h\n"
- "8:" // Unpadded: 2 priming loads
- "add x23, x16, %x[ld_in_row], LSL #2\n"
- "ld1w { z16.s }, p1/Z, [x16]\n"
+ ".inst 0xa0422a68 // ld1h { z8.h-z9.h }, pn10.b/Z, [x19, #0x4, MUL VL]\n"
+ ".inst 0xc12971d4 // bfdot za.s[x11, 4], { z14.h-z15.h }, z9.h\n"
+ ".inst 0xc12871d5 // bfdot za.s[x11, 5], { z14.h-z15.h }, z8.h\n"
+ "9:" // Unpadded: 1 priming loads
+ "add x23, x15, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p1/Z, [x15]\n"
".inst 0x658aaa0c // bfcvt z12.h, p2/M, z16.s\n"
- "addvl x22, SP, #12\n"
+ "addvl x22, SP, #6\n"
"ld1w { z16.s }, p1/Z, [x23]\n"
"add x23, x23, %x[ld_in_row], LSL #2\n"
".inst 0x648aaa0c // bfcvtnt z12.h, p2/M, z16.s\n"
- "addvl x21, SP, #18\n"
+ "addvl x21, SP, #12\n"
"ld1w { z16.s }, p1/Z, [x23]\n"
"add x23, x23, %x[ld_in_row], LSL #2\n"
".inst 0x658aaa0d // bfcvt z13.h, p2/M, z16.s\n"
- "addvl x20, SP, #24\n"
+ "addvl x20, SP, #18\n"
"ld1w { z16.s }, p1/Z, [x23]\n"
"add x23, x23, %x[ld_in_row], LSL #2\n"
".inst 0x648aaa0d // bfcvtnt z13.h, p2/M, z16.s\n"
- "add x16, x16, %x[ld_in_col], LSL #2\n"
+ "addvl x19, SP, #24\n"
"ld1w { z16.s }, p1/Z, [x23]\n"
"add x23, x23, %x[ld_in_row], LSL #2\n"
".inst 0x658aaa0e // bfcvt z14.h, p2/M, z16.s\n"
+ "add x15, x15, %x[ld_in_col], LSL #2\n"
"ld1w { z16.s }, p1/Z, [x23]\n"
"add x23, x23, %x[ld_in_row], LSL #2\n"
".inst 0x648aaa0e // bfcvtnt z14.h, p2/M, z16.s\n"
@@ -431,177 +487,121 @@ void sme2_fp32bf16fp32_planar_5x5_s1_4rows_dot_za_impl(
".inst 0xc1257194 // bfdot za.s[x11, 4], { z12.h-z13.h }, z5.h\n"
".inst 0xa0422ac8 // ld1h { z8.h-z9.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
".inst 0xc1247195 // bfdot za.s[x11, 5], { z12.h-z13.h }, z4.h\n"
+ ".inst 0xa0402a64 // ld1h { z4.h-z5.h }, pn10.b/Z, [x19]\n"
".inst 0xc12771b2 // bfdot za.s[x11, 2], { z13.h-z14.h }, z7.h\n"
".inst 0xc12671b3 // bfdot za.s[x11, 3], { z13.h-z14.h }, z6.h\n"
".inst 0xa0412a86 // ld1h { z6.h-z7.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
".inst 0xc12971d0 // bfdot za.s[x11, 0], { z14.h-z15.h }, z9.h\n"
".inst 0xc12871d1 // bfdot za.s[x11, 1], { z14.h-z15.h }, z8.h\n"
".inst 0xa0422aa8 // ld1h { z8.h-z9.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
- ".inst 0xc12771b4 // bfdot za.s[x11, 4], { z13.h-z14.h }, z7.h\n"
- ".inst 0xc12671b5 // bfdot za.s[x11, 5], { z13.h-z14.h }, z6.h\n"
- ".inst 0xc12971d2 // bfdot za.s[x11, 2], { z14.h-z15.h }, z9.h\n"
- ".inst 0xc12871d3 // bfdot za.s[x11, 3], { z14.h-z15.h }, z8.h\n"
- ".inst 0xa0422a88 // ld1h { z8.h-z9.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
- ".inst 0xc12971d4 // bfdot za.s[x11, 4], { z14.h-z15.h }, z9.h\n"
- ".inst 0xc12871d5 // bfdot za.s[x11, 5], { z14.h-z15.h }, z8.h\n"
- "9:" // Unpadded: 1 priming loads
- "add x24, x16, %x[ld_in_row], LSL #2\n"
- "ld1w { z16.s }, p1/Z, [x16]\n"
- ".inst 0x658aaa0c // bfcvt z12.h, p2/M, z16.s\n"
- "addvl x23, SP, #6\n"
- "ld1w { z16.s }, p1/Z, [x24]\n"
- "add x24, x24, %x[ld_in_row], LSL #2\n"
- ".inst 0x648aaa0c // bfcvtnt z12.h, p2/M, z16.s\n"
- "addvl x22, SP, #12\n"
- "ld1w { z16.s }, p1/Z, [x24]\n"
- "add x24, x24, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aaa0d // bfcvt z13.h, p2/M, z16.s\n"
- "addvl x21, SP, #18\n"
- "ld1w { z16.s }, p1/Z, [x24]\n"
- "add x24, x24, %x[ld_in_row], LSL #2\n"
- ".inst 0x648aaa0d // bfcvtnt z13.h, p2/M, z16.s\n"
- "addvl x20, SP, #24\n"
- "ld1w { z16.s }, p1/Z, [x24]\n"
- "add x24, x24, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aaa0e // bfcvt z14.h, p2/M, z16.s\n"
- "add x16, x16, %x[ld_in_col], LSL #2\n"
- "ld1w { z16.s }, p1/Z, [x24]\n"
- "add x24, x24, %x[ld_in_row], LSL #2\n"
- ".inst 0x648aaa0e // bfcvtnt z14.h, p2/M, z16.s\n"
- ".inst 0xa0402ae4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x23]\n"
- ".inst 0xc1257190 // bfdot za.s[x11, 0], { z12.h-z13.h }, z5.h\n"
- "ld1w { z16.s }, p1/Z, [x24]\n"
- "add x24, x24, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aaa0f // bfcvt z15.h, p2/M, z16.s\n"
- ".inst 0xc1247191 // bfdot za.s[x11, 1], { z12.h-z13.h }, z4.h\n"
- ".inst 0xa0402ac4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x22]\n"
- ".inst 0xc1257192 // bfdot za.s[x11, 2], { z12.h-z13.h }, z5.h\n"
- ".inst 0xa0412ae6 // ld1h { z6.h-z7.h }, pn10.b/Z, [x23, #0x2, MUL VL]\n"
- ".inst 0xc1247193 // bfdot za.s[x11, 3], { z12.h-z13.h }, z4.h\n"
- "ld1w { z16.s }, p1/Z, [x24]\n"
- ".inst 0x648aaa0f // bfcvtnt z15.h, p2/M, z16.s\n"
- ".inst 0xc12771b0 // bfdot za.s[x11, 0], { z13.h-z14.h }, z7.h\n"
- ".inst 0xa0402aa4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x21]\n"
- ".inst 0xc12671b1 // bfdot za.s[x11, 1], { z13.h-z14.h }, z6.h\n"
- ".inst 0xa0412ac6 // ld1h { z6.h-z7.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
- ".inst 0xc1257194 // bfdot za.s[x11, 4], { z12.h-z13.h }, z5.h\n"
- ".inst 0xa0422ae8 // ld1h { z8.h-z9.h }, pn10.b/Z, [x23, #0x4, MUL VL]\n"
- ".inst 0xc1247195 // bfdot za.s[x11, 5], { z12.h-z13.h }, z4.h\n"
- ".inst 0xa0402a84 // ld1h { z4.h-z5.h }, pn10.b/Z, [x20]\n"
- ".inst 0xc12771b2 // bfdot za.s[x11, 2], { z13.h-z14.h }, z7.h\n"
- ".inst 0xc12671b3 // bfdot za.s[x11, 3], { z13.h-z14.h }, z6.h\n"
- ".inst 0xa0412aa6 // ld1h { z6.h-z7.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
- ".inst 0xc12971d0 // bfdot za.s[x11, 0], { z14.h-z15.h }, z9.h\n"
- ".inst 0xc12871d1 // bfdot za.s[x11, 1], { z14.h-z15.h }, z8.h\n"
- ".inst 0xa0422ac8 // ld1h { z8.h-z9.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
".inst 0xc1257196 // bfdot za.s[x11, 6], { z12.h-z13.h }, z5.h\n"
".inst 0xc1247197 // bfdot za.s[x11, 7], { z12.h-z13.h }, z4.h\n"
".inst 0xc12771b4 // bfdot za.s[x11, 4], { z13.h-z14.h }, z7.h\n"
".inst 0xc12671b5 // bfdot za.s[x11, 5], { z13.h-z14.h }, z6.h\n"
- ".inst 0xa0412a86 // ld1h { z6.h-z7.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ ".inst 0xa0412a66 // ld1h { z6.h-z7.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
".inst 0xc12971d2 // bfdot za.s[x11, 2], { z14.h-z15.h }, z9.h\n"
".inst 0xc12871d3 // bfdot za.s[x11, 3], { z14.h-z15.h }, z8.h\n"
- ".inst 0xa0422aa8 // ld1h { z8.h-z9.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
+ ".inst 0xa0422a88 // ld1h { z8.h-z9.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
".inst 0xc12771b6 // bfdot za.s[x11, 6], { z13.h-z14.h }, z7.h\n"
".inst 0xc12671b7 // bfdot za.s[x11, 7], { z13.h-z14.h }, z6.h\n"
".inst 0xc12971d4 // bfdot za.s[x11, 4], { z14.h-z15.h }, z9.h\n"
".inst 0xc12871d5 // bfdot za.s[x11, 5], { z14.h-z15.h }, z8.h\n"
- ".inst 0xa0422a88 // ld1h { z8.h-z9.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+ ".inst 0xa0422a68 // ld1h { z8.h-z9.h }, pn10.b/Z, [x19, #0x4, MUL VL]\n"
".inst 0xc12971d6 // bfdot za.s[x11, 6], { z14.h-z15.h }, z9.h\n"
".inst 0xc12871d7 // bfdot za.s[x11, 7], { z14.h-z15.h }, z8.h\n"
"10:" // Unpadded: 0 priming loads
".inst 0xa0402be4 // ld1h { z4.h-z5.h }, pn10.b/Z, [SP]\n"
".inst 0xa0412be6 // ld1h { z6.h-z7.h }, pn10.b/Z, [SP, #0x2, MUL VL]\n"
".inst 0xa0422be8 // ld1h { z8.h-z9.h }, pn10.b/Z, [SP, #0x4, MUL VL]\n"
- "cbz x17, 20f\n"
- "add x20, x16, %x[ld_in_row], LSL #2\n"
- "ld1w { z16.s }, p1/Z, [x16]\n"
+ "cbz x16, 20f\n"
+ "add x19, x15, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p1/Z, [x15]\n"
".inst 0x658aaa0c // bfcvt z12.h, p2/M, z16.s\n"
- "sub x17, x17, #0x1\n"
- "ld1w { z16.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- "sub x15, x15, #0x1\n"
+ "sub x16, x16, #0x1\n"
+ "ld1w { z16.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ "sub x14, x14, #0x1\n"
".inst 0x648aaa0c // bfcvtnt z12.h, p2/M, z16.s\n"
- "ld1w { z16.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x658aaa0d // bfcvt z13.h, p2/M, z16.s\n"
- "cmp x17, x15\n"
- "ld1w { z16.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- "csel x25, x17, x15, LT\n"
+ "cmp x16, x14\n"
+ "ld1w { z16.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ "csel x24, x16, x14, LT\n"
".inst 0x648aaa0d // bfcvtnt z13.h, p2/M, z16.s\n"
- "ld1w { z16.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x658aaa0e // bfcvt z14.h, p2/M, z16.s\n"
- "add x16, x16, %x[ld_in_col], LSL #2\n"
- "ld1w { z16.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x15, x15, %x[ld_in_col], LSL #2\n"
+ "ld1w { z16.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x648aaa0e // bfcvtnt z14.h, p2/M, z16.s\n"
- "sub x15, x15, x25\n"
- "ld1w { z16.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "sub x14, x14, x24\n"
+ "ld1w { z16.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x658aaa0f // bfcvt z15.h, p2/M, z16.s\n"
- "ld1w { z16.s }, p1/Z, [x20]\n"
+ "ld1w { z16.s }, p1/Z, [x19]\n"
".inst 0x648aaa0f // bfcvtnt z15.h, p2/M, z16.s\n"
- "cbz x25, 19f\n"
+ "cbz x24, 19f\n"
"11:" // Unpadded: Main loop
- "addvl x24, SP, #6\n"
+ "addvl x23, SP, #6\n"
".inst 0xc1257190 // bfdot za.s[x11, 0], { z12.h-z13.h }, z5.h\n"
- "addvl x23, SP, #12\n"
- "ld1w { z23.s }, p1/Z, [x16]\n"
+ "addvl x22, SP, #12\n"
+ "ld1w { z23.s }, p1/Z, [x15]\n"
".inst 0xc1247191 // bfdot za.s[x11, 1], { z12.h-z13.h }, z4.h\n"
- ".inst 0xa0402b04 // ld1h { z4.h-z5.h }, pn10.b/Z, [x24]\n"
- "addvl x22, SP, #18\n"
- "addvl x21, SP, #24\n"
+ ".inst 0xa0402ae4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x23]\n"
+ "addvl x21, SP, #18\n"
+ "addvl x20, SP, #24\n"
".inst 0xc1257192 // bfdot za.s[x11, 2], { z12.h-z13.h }, z5.h\n"
- "add x20, x16, %x[ld_in_row], LSL #2\n"
- "ld1w { z22.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x19, x15, %x[ld_in_row], LSL #2\n"
+ "ld1w { z22.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0xc1247193 // bfdot za.s[x11, 3], { z12.h-z13.h }, z4.h\n"
- ".inst 0xa0402ae4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x23]\n"
- "subs x25, x25, #0x1\n"
- "add x16, x16, %x[ld_in_col], LSL #2\n"
+ ".inst 0xa0402ac4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x22]\n"
+ "subs x24, x24, #0x1\n"
+ "add x15, x15, %x[ld_in_col], LSL #2\n"
".inst 0xc12771b0 // bfdot za.s[x11, 0], { z13.h-z14.h }, z7.h\n"
- "ld1w { z21.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z21.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0xc12671b1 // bfdot za.s[x11, 1], { z13.h-z14.h }, z6.h\n"
- ".inst 0xa0412b06 // ld1h { z6.h-z7.h }, pn10.b/Z, [x24, #0x2, MUL VL]\n"
+ ".inst 0xa0412ae6 // ld1h { z6.h-z7.h }, pn10.b/Z, [x23, #0x2, MUL VL]\n"
".inst 0xc1257194 // bfdot za.s[x11, 4], { z12.h-z13.h }, z5.h\n"
- "ld1w { z20.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z20.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0xc1247195 // bfdot za.s[x11, 5], { z12.h-z13.h }, z4.h\n"
- ".inst 0xa0402ac4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x22]\n"
+ ".inst 0xa0402aa4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x21]\n"
".inst 0xc12771b2 // bfdot za.s[x11, 2], { z13.h-z14.h }, z7.h\n"
- "ld1w { z19.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z19.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0xc12671b3 // bfdot za.s[x11, 3], { z13.h-z14.h }, z6.h\n"
- ".inst 0xa0412ae6 // ld1h { z6.h-z7.h }, pn10.b/Z, [x23, #0x2, MUL VL]\n"
+ ".inst 0xa0412ac6 // ld1h { z6.h-z7.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
".inst 0xc12971d0 // bfdot za.s[x11, 0], { z14.h-z15.h }, z9.h\n"
- "ld1w { z18.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z18.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0xc12871d1 // bfdot za.s[x11, 1], { z14.h-z15.h }, z8.h\n"
- ".inst 0xa0422b08 // ld1h { z8.h-z9.h }, pn10.b/Z, [x24, #0x4, MUL VL]\n"
+ ".inst 0xa0422ae8 // ld1h { z8.h-z9.h }, pn10.b/Z, [x23, #0x4, MUL VL]\n"
".inst 0xc1257196 // bfdot za.s[x11, 6], { z12.h-z13.h }, z5.h\n"
- "ld1w { z17.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z17.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0xc1247197 // bfdot za.s[x11, 7], { z12.h-z13.h }, z4.h\n"
- ".inst 0xa0402aa4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x21]\n"
+ ".inst 0xa0402a84 // ld1h { z4.h-z5.h }, pn10.b/Z, [x20]\n"
".inst 0xc12771b4 // bfdot za.s[x11, 4], { z13.h-z14.h }, z7.h\n"
- "ld1w { z16.s }, p1/Z, [x20]\n"
+ "ld1w { z16.s }, p1/Z, [x19]\n"
".inst 0xc12671b5 // bfdot za.s[x11, 5], { z13.h-z14.h }, z6.h\n"
- ".inst 0xa0412ac6 // ld1h { z6.h-z7.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
+ ".inst 0xa0412aa6 // ld1h { z6.h-z7.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
".inst 0xc12971d2 // bfdot za.s[x11, 2], { z14.h-z15.h }, z9.h\n"
".inst 0xc12871d3 // bfdot za.s[x11, 3], { z14.h-z15.h }, z8.h\n"
- ".inst 0xa0422ae8 // ld1h { z8.h-z9.h }, pn10.b/Z, [x23, #0x4, MUL VL]\n"
+ ".inst 0xa0422ac8 // ld1h { z8.h-z9.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
".inst 0xc12771b6 // bfdot za.s[x11, 6], { z13.h-z14.h }, z7.h\n"
".inst 0xc12671b7 // bfdot za.s[x11, 7], { z13.h-z14.h }, z6.h\n"
- ".inst 0xa0412aa6 // ld1h { z6.h-z7.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
+ ".inst 0xa0412a86 // ld1h { z6.h-z7.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
".inst 0xc12971d4 // bfdot za.s[x11, 4], { z14.h-z15.h }, z9.h\n"
".inst 0xc12871d5 // bfdot za.s[x11, 5], { z14.h-z15.h }, z8.h\n"
- ".inst 0xa0422ac8 // ld1h { z8.h-z9.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
+ ".inst 0xa0422aa8 // ld1h { z8.h-z9.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
".inst 0xc12971d6 // bfdot za.s[x11, 6], { z14.h-z15.h }, z9.h\n"
".inst 0xc12871d7 // bfdot za.s[x11, 7], { z14.h-z15.h }, z8.h\n"
- ".inst 0xa0422aa8 // ld1h { z8.h-z9.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
+ ".inst 0xa0422a88 // ld1h { z8.h-z9.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
".inst 0xc1251190 // bfdot za.s[x8, 0], { z12.h-z13.h }, z5.h\n"
".inst 0xc1241191 // bfdot za.s[x8, 1], { z12.h-z13.h }, z4.h\n"
".inst 0x658aaaec // bfcvt z12.h, p2/M, z23.s\n"
@@ -622,35 +622,80 @@ void sme2_fp32bf16fp32_planar_5x5_s1_4rows_dot_za_impl(
".inst 0x648aaa4e // bfcvtnt z14.h, p2/M, z18.s\n"
".inst 0xc0066822 // mova { z2.d-z3.d }, za.d[x11, #1]\n"
".inst 0xc1bfcb40 // fclamp { z0.s-z3.s }, z26.s, z31.s\n"
- "st1w { z0.s }, p1, [x14]\n"
- "add x14, x14, x0, LSL #2\n"
- "st1w { z2.s }, p1, [x13]\n"
+ "st1w { z0.s }, p1, [x13]\n"
"add x13, x13, x10, LSL #2\n"
+ "st1w { z2.s }, p1, [x0]\n"
+ "add x0, x0, x9, LSL #2\n"
"add x11, x11, #0x2\n"
".inst 0xc0040b00 // mova za.d[x8, #0], { z24.d-z25.d }\n"
- "st1w { z1.s }, p1, [x9]\n"
- "add x9, x9, x27, LSL #2\n"
+ "st1w { z1.s }, p1, [x28]\n"
+ "add x28, x28, x26, LSL #2\n"
".inst 0xc0040b01 // mova za.d[x8, #1], { z24.d-z25.d }\n"
".inst 0x648aaa0f // bfcvtnt z15.h, p2/M, z16.s\n"
- "st1w { z3.s }, p1, [x28]\n"
- "add x28, x28, x26, LSL #2\n"
+ "st1w { z3.s }, p1, [x27]\n"
+ "add x27, x27, x25, LSL #2\n"
"bgt 11b\n"
"b 19f\n"
"12:" // Padded
- "cbz x22, 17f\n"
- "cmp x22, #0x1\n"
- "sub x17, x17, x22\n"
+ "cbz x21, 17f\n"
+ "cmp x21, #0x1\n"
+ "sub x16, x16, x21\n"
"beq 16f\n"
- "cmp x22, #0x2\n"
+ "cmp x21, #0x2\n"
"beq 15f\n"
- "cmp x22, #0x3\n"
+ "cmp x21, #0x3\n"
"beq 14f\n"
"13:" // Padded: 4 priming loads
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z16.s }, p0/Z, [x16]\n"
+ "ld1w { z16.s }, p0/Z, [x15]\n"
".inst 0x658aaa0c // bfcvt z12.h, p2/M, z16.s\n"
- "add x21, x16, %x[ld_in_row], LSL #2\n"
+ "add x20, x15, %x[ld_in_row], LSL #2\n"
+ ".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+ "ld1w { z16.s }, p0/Z, [x20]\n"
+ ".inst 0x648aaa0c // bfcvtnt z12.h, p2/M, z16.s\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+ "ld1w { z16.s }, p0/Z, [x20]\n"
+ ".inst 0x658aaa0d // bfcvt z13.h, p2/M, z16.s\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+ "ld1w { z16.s }, p0/Z, [x20]\n"
+ ".inst 0x648aaa0d // bfcvtnt z13.h, p2/M, z16.s\n"
+ "mov x12, #0x4\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
+ "ld1w { z16.s }, p0/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0x658aaa0e // bfcvt z14.h, p2/M, z16.s\n"
+ ".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+ "addvl x19, SP, #24\n"
+ "ld1w { z16.s }, p0/Z, [x20]\n"
+ ".inst 0x648aaa0e // bfcvtnt z14.h, p2/M, z16.s\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+ ".inst 0xa0402a64 // ld1h { z4.h-z5.h }, pn10.b/Z, [x19]\n"
+ ".inst 0xc1257190 // bfdot za.s[x11, 0], { z12.h-z13.h }, z5.h\n"
+ "ld1w { z16.s }, p0/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+ ".inst 0x658aaa0f // bfcvt z15.h, p2/M, z16.s\n"
+ ".inst 0xc1247191 // bfdot za.s[x11, 1], { z12.h-z13.h }, z4.h\n"
+ ".inst 0xa0412a66 // ld1h { z6.h-z7.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
+ "add x15, x15, %x[ld_in_col], LSL #2\n"
+ "ld1w { z16.s }, p0/Z, [x20]\n"
+ ".inst 0x648aaa0f // bfcvtnt z15.h, p2/M, z16.s\n"
+ ".inst 0xc12771b0 // bfdot za.s[x11, 0], { z13.h-z14.h }, z7.h\n"
+ ".inst 0xc12671b1 // bfdot za.s[x11, 1], { z13.h-z14.h }, z6.h\n"
+ ".inst 0xa0422a68 // ld1h { z8.h-z9.h }, pn10.b/Z, [x19, #0x4, MUL VL]\n"
+ ".inst 0xc12971d0 // bfdot za.s[x11, 0], { z14.h-z15.h }, z9.h\n"
+ ".inst 0xc12871d1 // bfdot za.s[x11, 1], { z14.h-z15.h }, z8.h\n"
+ "14:" // Padded: 3 priming loads
+ "mov x12, #0x0\n"
+ ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
+ "ld1w { z16.s }, p0/Z, [x15]\n"
+ ".inst 0x658aaa0c // bfcvt z12.h, p2/M, z16.s\n"
+ "add x21, x15, %x[ld_in_row], LSL #2\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
"ld1w { z16.s }, p0/Z, [x21]\n"
".inst 0x648aaa0c // bfcvtnt z12.h, p2/M, z16.s\n"
@@ -669,7 +714,7 @@ void sme2_fp32bf16fp32_planar_5x5_s1_4rows_dot_za_impl(
"add x21, x21, %x[ld_in_row], LSL #2\n"
".inst 0x658aaa0e // bfcvt z14.h, p2/M, z16.s\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "addvl x20, SP, #24\n"
+ "addvl x20, SP, #18\n"
"ld1w { z16.s }, p0/Z, [x21]\n"
".inst 0x648aaa0e // bfcvtnt z14.h, p2/M, z16.s\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
@@ -677,25 +722,35 @@ void sme2_fp32bf16fp32_planar_5x5_s1_4rows_dot_za_impl(
".inst 0xa0402a84 // ld1h { z4.h-z5.h }, pn10.b/Z, [x20]\n"
".inst 0xc1257190 // bfdot za.s[x11, 0], { z12.h-z13.h }, z5.h\n"
"ld1w { z16.s }, p0/Z, [x21]\n"
+ "addvl x19, SP, #24\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
- ".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
".inst 0x658aaa0f // bfcvt z15.h, p2/M, z16.s\n"
+ ".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
".inst 0xc1247191 // bfdot za.s[x11, 1], { z12.h-z13.h }, z4.h\n"
+ ".inst 0xa0402a64 // ld1h { z4.h-z5.h }, pn10.b/Z, [x19]\n"
+ "add x15, x15, %x[ld_in_col], LSL #2\n"
".inst 0xa0412a86 // ld1h { z6.h-z7.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
- "add x16, x16, %x[ld_in_col], LSL #2\n"
+ ".inst 0xc1257192 // bfdot za.s[x11, 2], { z12.h-z13.h }, z5.h\n"
"ld1w { z16.s }, p0/Z, [x21]\n"
+ ".inst 0xc1247193 // bfdot za.s[x11, 3], { z12.h-z13.h }, z4.h\n"
".inst 0x648aaa0f // bfcvtnt z15.h, p2/M, z16.s\n"
".inst 0xc12771b0 // bfdot za.s[x11, 0], { z13.h-z14.h }, z7.h\n"
- ".inst 0xc12671b1 // bfdot za.s[x11, 1], { z13.h-z14.h }, z6.h\n"
".inst 0xa0422a88 // ld1h { z8.h-z9.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+ ".inst 0xc12671b1 // bfdot za.s[x11, 1], { z13.h-z14.h }, z6.h\n"
+ ".inst 0xa0412a66 // ld1h { z6.h-z7.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
+ ".inst 0xc12771b2 // bfdot za.s[x11, 2], { z13.h-z14.h }, z7.h\n"
+ ".inst 0xc12671b3 // bfdot za.s[x11, 3], { z13.h-z14.h }, z6.h\n"
".inst 0xc12971d0 // bfdot za.s[x11, 0], { z14.h-z15.h }, z9.h\n"
".inst 0xc12871d1 // bfdot za.s[x11, 1], { z14.h-z15.h }, z8.h\n"
- "14:" // Padded: 3 priming loads
+ ".inst 0xa0422a68 // ld1h { z8.h-z9.h }, pn10.b/Z, [x19, #0x4, MUL VL]\n"
+ ".inst 0xc12971d2 // bfdot za.s[x11, 2], { z14.h-z15.h }, z9.h\n"
+ ".inst 0xc12871d3 // bfdot za.s[x11, 3], { z14.h-z15.h }, z8.h\n"
+ "15:" // Padded: 2 priming loads
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z16.s }, p0/Z, [x16]\n"
+ "ld1w { z16.s }, p0/Z, [x15]\n"
".inst 0x658aaa0c // bfcvt z12.h, p2/M, z16.s\n"
- "add x22, x16, %x[ld_in_row], LSL #2\n"
+ "add x22, x15, %x[ld_in_row], LSL #2\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
"ld1w { z16.s }, p0/Z, [x22]\n"
".inst 0x648aaa0c // bfcvtnt z12.h, p2/M, z16.s\n"
@@ -714,7 +769,7 @@ void sme2_fp32bf16fp32_planar_5x5_s1_4rows_dot_za_impl(
"add x22, x22, %x[ld_in_row], LSL #2\n"
".inst 0x658aaa0e // bfcvt z14.h, p2/M, z16.s\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "addvl x21, SP, #18\n"
+ "addvl x21, SP, #12\n"
"ld1w { z16.s }, p0/Z, [x22]\n"
".inst 0x648aaa0e // bfcvtnt z14.h, p2/M, z16.s\n"
"add x22, x22, %x[ld_in_row], LSL #2\n"
@@ -722,35 +777,45 @@ void sme2_fp32bf16fp32_planar_5x5_s1_4rows_dot_za_impl(
".inst 0xa0402aa4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x21]\n"
".inst 0xc1257190 // bfdot za.s[x11, 0], { z12.h-z13.h }, z5.h\n"
"ld1w { z16.s }, p0/Z, [x22]\n"
- "addvl x20, SP, #24\n"
+ "addvl x20, SP, #18\n"
"add x22, x22, %x[ld_in_row], LSL #2\n"
".inst 0x658aaa0f // bfcvt z15.h, p2/M, z16.s\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
".inst 0xc1247191 // bfdot za.s[x11, 1], { z12.h-z13.h }, z4.h\n"
".inst 0xa0402a84 // ld1h { z4.h-z5.h }, pn10.b/Z, [x20]\n"
- "add x16, x16, %x[ld_in_col], LSL #2\n"
+ "addvl x19, SP, #24\n"
".inst 0xa0412aa6 // ld1h { z6.h-z7.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
".inst 0xc1257192 // bfdot za.s[x11, 2], { z12.h-z13.h }, z5.h\n"
+ "add x15, x15, %x[ld_in_col], LSL #2\n"
"ld1w { z16.s }, p0/Z, [x22]\n"
".inst 0xc1247193 // bfdot za.s[x11, 3], { z12.h-z13.h }, z4.h\n"
".inst 0x648aaa0f // bfcvtnt z15.h, p2/M, z16.s\n"
+ ".inst 0xa0402a64 // ld1h { z4.h-z5.h }, pn10.b/Z, [x19]\n"
".inst 0xc12771b0 // bfdot za.s[x11, 0], { z13.h-z14.h }, z7.h\n"
- ".inst 0xa0422aa8 // ld1h { z8.h-z9.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
".inst 0xc12671b1 // bfdot za.s[x11, 1], { z13.h-z14.h }, z6.h\n"
".inst 0xa0412a86 // ld1h { z6.h-z7.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ ".inst 0xa0422aa8 // ld1h { z8.h-z9.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
+ ".inst 0xc1257194 // bfdot za.s[x11, 4], { z12.h-z13.h }, z5.h\n"
+ ".inst 0xc1247195 // bfdot za.s[x11, 5], { z12.h-z13.h }, z4.h\n"
".inst 0xc12771b2 // bfdot za.s[x11, 2], { z13.h-z14.h }, z7.h\n"
".inst 0xc12671b3 // bfdot za.s[x11, 3], { z13.h-z14.h }, z6.h\n"
+ ".inst 0xa0412a66 // ld1h { z6.h-z7.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
".inst 0xc12971d0 // bfdot za.s[x11, 0], { z14.h-z15.h }, z9.h\n"
".inst 0xc12871d1 // bfdot za.s[x11, 1], { z14.h-z15.h }, z8.h\n"
".inst 0xa0422a88 // ld1h { z8.h-z9.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+ ".inst 0xc12771b4 // bfdot za.s[x11, 4], { z13.h-z14.h }, z7.h\n"
+ ".inst 0xc12671b5 // bfdot za.s[x11, 5], { z13.h-z14.h }, z6.h\n"
".inst 0xc12971d2 // bfdot za.s[x11, 2], { z14.h-z15.h }, z9.h\n"
".inst 0xc12871d3 // bfdot za.s[x11, 3], { z14.h-z15.h }, z8.h\n"
- "15:" // Padded: 2 priming loads
+ ".inst 0xa0422a68 // ld1h { z8.h-z9.h }, pn10.b/Z, [x19, #0x4, MUL VL]\n"
+ ".inst 0xc12971d4 // bfdot za.s[x11, 4], { z14.h-z15.h }, z9.h\n"
+ ".inst 0xc12871d5 // bfdot za.s[x11, 5], { z14.h-z15.h }, z8.h\n"
+ "16:" // Padded: 1 priming loads
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z16.s }, p0/Z, [x16]\n"
+ "ld1w { z16.s }, p0/Z, [x15]\n"
".inst 0x658aaa0c // bfcvt z12.h, p2/M, z16.s\n"
- "add x23, x16, %x[ld_in_row], LSL #2\n"
+ "add x23, x15, %x[ld_in_row], LSL #2\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
"ld1w { z16.s }, p0/Z, [x23]\n"
".inst 0x648aaa0c // bfcvtnt z12.h, p2/M, z16.s\n"
@@ -769,7 +834,7 @@ void sme2_fp32bf16fp32_planar_5x5_s1_4rows_dot_za_impl(
"add x23, x23, %x[ld_in_row], LSL #2\n"
".inst 0x658aaa0e // bfcvt z14.h, p2/M, z16.s\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "addvl x22, SP, #12\n"
+ "addvl x22, SP, #6\n"
"ld1w { z16.s }, p0/Z, [x23]\n"
".inst 0x648aaa0e // bfcvtnt z14.h, p2/M, z16.s\n"
"add x23, x23, %x[ld_in_row], LSL #2\n"
@@ -777,16 +842,17 @@ void sme2_fp32bf16fp32_planar_5x5_s1_4rows_dot_za_impl(
".inst 0xa0402ac4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x22]\n"
".inst 0xc1257190 // bfdot za.s[x11, 0], { z12.h-z13.h }, z5.h\n"
"ld1w { z16.s }, p0/Z, [x23]\n"
- "addvl x21, SP, #18\n"
+ "addvl x21, SP, #12\n"
"add x23, x23, %x[ld_in_row], LSL #2\n"
".inst 0x658aaa0f // bfcvt z15.h, p2/M, z16.s\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
".inst 0xc1247191 // bfdot za.s[x11, 1], { z12.h-z13.h }, z4.h\n"
".inst 0xa0402aa4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x21]\n"
- "addvl x20, SP, #24\n"
+ "addvl x20, SP, #18\n"
".inst 0xa0412ac6 // ld1h { z6.h-z7.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
".inst 0xc1257192 // bfdot za.s[x11, 2], { z12.h-z13.h }, z5.h\n"
- "add x16, x16, %x[ld_in_col], LSL #2\n"
+ "addvl x19, SP, #24\n"
+ "add x15, x15, %x[ld_in_col], LSL #2\n"
"ld1w { z16.s }, p0/Z, [x23]\n"
".inst 0xc1247193 // bfdot za.s[x11, 3], { z12.h-z13.h }, z4.h\n"
".inst 0x648aaa0f // bfcvtnt z15.h, p2/M, z16.s\n"
@@ -797,207 +863,141 @@ void sme2_fp32bf16fp32_planar_5x5_s1_4rows_dot_za_impl(
".inst 0xa0422ac8 // ld1h { z8.h-z9.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
".inst 0xc1257194 // bfdot za.s[x11, 4], { z12.h-z13.h }, z5.h\n"
".inst 0xc1247195 // bfdot za.s[x11, 5], { z12.h-z13.h }, z4.h\n"
+ ".inst 0xa0402a64 // ld1h { z4.h-z5.h }, pn10.b/Z, [x19]\n"
".inst 0xc12771b2 // bfdot za.s[x11, 2], { z13.h-z14.h }, z7.h\n"
".inst 0xc12671b3 // bfdot za.s[x11, 3], { z13.h-z14.h }, z6.h\n"
".inst 0xa0412a86 // ld1h { z6.h-z7.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
".inst 0xc12971d0 // bfdot za.s[x11, 0], { z14.h-z15.h }, z9.h\n"
".inst 0xc12871d1 // bfdot za.s[x11, 1], { z14.h-z15.h }, z8.h\n"
".inst 0xa0422aa8 // ld1h { z8.h-z9.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
- ".inst 0xc12771b4 // bfdot za.s[x11, 4], { z13.h-z14.h }, z7.h\n"
- ".inst 0xc12671b5 // bfdot za.s[x11, 5], { z13.h-z14.h }, z6.h\n"
- ".inst 0xc12971d2 // bfdot za.s[x11, 2], { z14.h-z15.h }, z9.h\n"
- ".inst 0xc12871d3 // bfdot za.s[x11, 3], { z14.h-z15.h }, z8.h\n"
- ".inst 0xa0422a88 // ld1h { z8.h-z9.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
- ".inst 0xc12971d4 // bfdot za.s[x11, 4], { z14.h-z15.h }, z9.h\n"
- ".inst 0xc12871d5 // bfdot za.s[x11, 5], { z14.h-z15.h }, z8.h\n"
- "16:" // Padded: 1 priming loads
- "mov x12, #0x0\n"
- ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z16.s }, p0/Z, [x16]\n"
- ".inst 0x658aaa0c // bfcvt z12.h, p2/M, z16.s\n"
- "add x24, x16, %x[ld_in_row], LSL #2\n"
- ".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z16.s }, p0/Z, [x24]\n"
- ".inst 0x648aaa0c // bfcvtnt z12.h, p2/M, z16.s\n"
- "add x24, x24, %x[ld_in_row], LSL #2\n"
- ".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z16.s }, p0/Z, [x24]\n"
- ".inst 0x658aaa0d // bfcvt z13.h, p2/M, z16.s\n"
- "add x24, x24, %x[ld_in_row], LSL #2\n"
- ".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1w { z16.s }, p0/Z, [x24]\n"
- ".inst 0x648aaa0d // bfcvtnt z13.h, p2/M, z16.s\n"
- "mov x12, #0x4\n"
- "add x24, x24, %x[ld_in_row], LSL #2\n"
- ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z16.s }, p0/Z, [x24]\n"
- "add x24, x24, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aaa0e // bfcvt z14.h, p2/M, z16.s\n"
- ".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "addvl x23, SP, #6\n"
- "ld1w { z16.s }, p0/Z, [x24]\n"
- ".inst 0x648aaa0e // bfcvtnt z14.h, p2/M, z16.s\n"
- "add x24, x24, %x[ld_in_row], LSL #2\n"
- ".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- ".inst 0xa0402ae4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x23]\n"
- ".inst 0xc1257190 // bfdot za.s[x11, 0], { z12.h-z13.h }, z5.h\n"
- "ld1w { z16.s }, p0/Z, [x24]\n"
- "addvl x22, SP, #12\n"
- "add x24, x24, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aaa0f // bfcvt z15.h, p2/M, z16.s\n"
- ".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- ".inst 0xc1247191 // bfdot za.s[x11, 1], { z12.h-z13.h }, z4.h\n"
- ".inst 0xa0402ac4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x22]\n"
- "addvl x21, SP, #18\n"
- ".inst 0xa0412ae6 // ld1h { z6.h-z7.h }, pn10.b/Z, [x23, #0x2, MUL VL]\n"
- ".inst 0xc1257192 // bfdot za.s[x11, 2], { z12.h-z13.h }, z5.h\n"
- "addvl x20, SP, #24\n"
- "add x16, x16, %x[ld_in_col], LSL #2\n"
- "ld1w { z16.s }, p0/Z, [x24]\n"
- ".inst 0xc1247193 // bfdot za.s[x11, 3], { z12.h-z13.h }, z4.h\n"
- ".inst 0x648aaa0f // bfcvtnt z15.h, p2/M, z16.s\n"
- ".inst 0xa0402aa4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x21]\n"
- ".inst 0xc12771b0 // bfdot za.s[x11, 0], { z13.h-z14.h }, z7.h\n"
- ".inst 0xc12671b1 // bfdot za.s[x11, 1], { z13.h-z14.h }, z6.h\n"
- ".inst 0xa0412ac6 // ld1h { z6.h-z7.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
- ".inst 0xa0422ae8 // ld1h { z8.h-z9.h }, pn10.b/Z, [x23, #0x4, MUL VL]\n"
- ".inst 0xc1257194 // bfdot za.s[x11, 4], { z12.h-z13.h }, z5.h\n"
- ".inst 0xc1247195 // bfdot za.s[x11, 5], { z12.h-z13.h }, z4.h\n"
- ".inst 0xa0402a84 // ld1h { z4.h-z5.h }, pn10.b/Z, [x20]\n"
- ".inst 0xc12771b2 // bfdot za.s[x11, 2], { z13.h-z14.h }, z7.h\n"
- ".inst 0xc12671b3 // bfdot za.s[x11, 3], { z13.h-z14.h }, z6.h\n"
- ".inst 0xa0412aa6 // ld1h { z6.h-z7.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
- ".inst 0xc12971d0 // bfdot za.s[x11, 0], { z14.h-z15.h }, z9.h\n"
- ".inst 0xc12871d1 // bfdot za.s[x11, 1], { z14.h-z15.h }, z8.h\n"
- ".inst 0xa0422ac8 // ld1h { z8.h-z9.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
".inst 0xc1257196 // bfdot za.s[x11, 6], { z12.h-z13.h }, z5.h\n"
".inst 0xc1247197 // bfdot za.s[x11, 7], { z12.h-z13.h }, z4.h\n"
".inst 0xc12771b4 // bfdot za.s[x11, 4], { z13.h-z14.h }, z7.h\n"
".inst 0xc12671b5 // bfdot za.s[x11, 5], { z13.h-z14.h }, z6.h\n"
- ".inst 0xa0412a86 // ld1h { z6.h-z7.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ ".inst 0xa0412a66 // ld1h { z6.h-z7.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
".inst 0xc12971d2 // bfdot za.s[x11, 2], { z14.h-z15.h }, z9.h\n"
".inst 0xc12871d3 // bfdot za.s[x11, 3], { z14.h-z15.h }, z8.h\n"
- ".inst 0xa0422aa8 // ld1h { z8.h-z9.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
+ ".inst 0xa0422a88 // ld1h { z8.h-z9.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
".inst 0xc12771b6 // bfdot za.s[x11, 6], { z13.h-z14.h }, z7.h\n"
".inst 0xc12671b7 // bfdot za.s[x11, 7], { z13.h-z14.h }, z6.h\n"
".inst 0xc12971d4 // bfdot za.s[x11, 4], { z14.h-z15.h }, z9.h\n"
".inst 0xc12871d5 // bfdot za.s[x11, 5], { z14.h-z15.h }, z8.h\n"
- ".inst 0xa0422a88 // ld1h { z8.h-z9.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+ ".inst 0xa0422a68 // ld1h { z8.h-z9.h }, pn10.b/Z, [x19, #0x4, MUL VL]\n"
".inst 0xc12971d6 // bfdot za.s[x11, 6], { z14.h-z15.h }, z9.h\n"
".inst 0xc12871d7 // bfdot za.s[x11, 7], { z14.h-z15.h }, z8.h\n"
"17:" // Padded: 0 priming loads
".inst 0xa0402be4 // ld1h { z4.h-z5.h }, pn10.b/Z, [SP]\n"
".inst 0xa0412be6 // ld1h { z6.h-z7.h }, pn10.b/Z, [SP, #0x2, MUL VL]\n"
".inst 0xa0422be8 // ld1h { z8.h-z9.h }, pn10.b/Z, [SP, #0x4, MUL VL]\n"
- "cbz x17, 20f\n"
+ "cbz x16, 20f\n"
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z16.s }, p0/Z, [x16]\n"
+ "ld1w { z16.s }, p0/Z, [x15]\n"
".inst 0x658aaa0c // bfcvt z12.h, p2/M, z16.s\n"
- "add x20, x16, %x[ld_in_row], LSL #2\n"
+ "add x19, x15, %x[ld_in_row], LSL #2\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
".inst 0x648aaa0c // bfcvtnt z12.h, p2/M, z16.s\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
".inst 0x658aaa0d // bfcvt z13.h, p2/M, z16.s\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
".inst 0x648aaa0d // bfcvtnt z13.h, p2/M, z16.s\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x658aaa0e // bfcvt z14.h, p2/M, z16.s\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x648aaa0e // bfcvtnt z14.h, p2/M, z16.s\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x658aaa0f // bfcvt z15.h, p2/M, z16.s\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
- "sub x17, x17, #0x1\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
+ "sub x16, x16, #0x1\n"
".inst 0x648aaa0f // bfcvtnt z15.h, p2/M, z16.s\n"
- "sub x15, x15, #0x1\n"
- "cmp x17, x15\n"
- "csel x25, x17, x15, LT\n"
- "add x16, x16, %x[ld_in_col], LSL #2\n"
- "sub x15, x15, x25\n"
- "cbz x25, 19f\n"
+ "sub x14, x14, #0x1\n"
+ "cmp x16, x14\n"
+ "csel x24, x16, x14, LT\n"
+ "add x15, x15, %x[ld_in_col], LSL #2\n"
+ "sub x14, x14, x24\n"
+ "cbz x24, 19f\n"
"18:" // Padded: Main loop
- "addvl x24, SP, #6\n"
+ "addvl x23, SP, #6\n"
".inst 0xc1257190 // bfdot za.s[x11, 0], { z12.h-z13.h }, z5.h\n"
- "addvl x23, SP, #12\n"
+ "addvl x22, SP, #12\n"
".inst 0xc1247191 // bfdot za.s[x11, 1], { z12.h-z13.h }, z4.h\n"
- ".inst 0xa0402b04 // ld1h { z4.h-z5.h }, pn10.b/Z, [x24]\n"
+ ".inst 0xa0402ae4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x23]\n"
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
".inst 0xc1257192 // bfdot za.s[x11, 2], { z12.h-z13.h }, z5.h\n"
- "ld1w { z23.s }, p0/Z, [x16]\n"
- "add x22, x16, %x[ld_in_row], LSL #2\n"
+ "ld1w { z23.s }, p0/Z, [x15]\n"
+ "add x21, x15, %x[ld_in_row], LSL #2\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
".inst 0xc1247193 // bfdot za.s[x11, 3], { z12.h-z13.h }, z4.h\n"
- ".inst 0xa0402ae4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x23]\n"
- "addvl x21, SP, #18\n"
- "addvl x20, SP, #24\n"
+ ".inst 0xa0402ac4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x22]\n"
+ "addvl x20, SP, #18\n"
+ "addvl x19, SP, #24\n"
".inst 0xc12771b0 // bfdot za.s[x11, 0], { z13.h-z14.h }, z7.h\n"
- "ld1w { z22.s }, p0/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row], LSL #2\n"
+ "ld1w { z22.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row], LSL #2\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
".inst 0xc12671b1 // bfdot za.s[x11, 1], { z13.h-z14.h }, z6.h\n"
- ".inst 0xa0412b06 // ld1h { z6.h-z7.h }, pn10.b/Z, [x24, #0x2, MUL VL]\n"
- "subs x25, x25, #0x1\n"
- "add x16, x16, %x[ld_in_col], LSL #2\n"
+ ".inst 0xa0412ae6 // ld1h { z6.h-z7.h }, pn10.b/Z, [x23, #0x2, MUL VL]\n"
+ "subs x24, x24, #0x1\n"
+ "add x15, x15, %x[ld_in_col], LSL #2\n"
".inst 0xc1257194 // bfdot za.s[x11, 4], { z12.h-z13.h }, z5.h\n"
- "ld1w { z21.s }, p0/Z, [x22]\n"
+ "ld1w { z21.s }, p0/Z, [x21]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
"mov x12, #0x4\n"
".inst 0xc1247195 // bfdot za.s[x11, 5], { z12.h-z13.h }, z4.h\n"
- ".inst 0xa0402aa4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x21]\n"
- "add x22, x22, %x[ld_in_row], LSL #2\n"
+ ".inst 0xa0402a84 // ld1h { z4.h-z5.h }, pn10.b/Z, [x20]\n"
+ "add x21, x21, %x[ld_in_row], LSL #2\n"
".inst 0xc12771b2 // bfdot za.s[x11, 2], { z13.h-z14.h }, z7.h\n"
- "ld1w { z20.s }, p0/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row], LSL #2\n"
+ "ld1w { z20.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row], LSL #2\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
".inst 0xc12671b3 // bfdot za.s[x11, 3], { z13.h-z14.h }, z6.h\n"
- ".inst 0xa0412ae6 // ld1h { z6.h-z7.h }, pn10.b/Z, [x23, #0x2, MUL VL]\n"
+ ".inst 0xa0412ac6 // ld1h { z6.h-z7.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
".inst 0xc12971d0 // bfdot za.s[x11, 0], { z14.h-z15.h }, z9.h\n"
- "ld1w { z19.s }, p0/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row], LSL #2\n"
+ "ld1w { z19.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row], LSL #2\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
".inst 0xc12871d1 // bfdot za.s[x11, 1], { z14.h-z15.h }, z8.h\n"
- ".inst 0xa0422b08 // ld1h { z8.h-z9.h }, pn10.b/Z, [x24, #0x4, MUL VL]\n"
+ ".inst 0xa0422ae8 // ld1h { z8.h-z9.h }, pn10.b/Z, [x23, #0x4, MUL VL]\n"
".inst 0xc1257196 // bfdot za.s[x11, 6], { z12.h-z13.h }, z5.h\n"
- "ld1w { z18.s }, p0/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row], LSL #2\n"
+ "ld1w { z18.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row], LSL #2\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
".inst 0xc1247197 // bfdot za.s[x11, 7], { z12.h-z13.h }, z4.h\n"
- ".inst 0xa0402a84 // ld1h { z4.h-z5.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xa0402a64 // ld1h { z4.h-z5.h }, pn10.b/Z, [x19]\n"
".inst 0xc12771b4 // bfdot za.s[x11, 4], { z13.h-z14.h }, z7.h\n"
- "ld1w { z17.s }, p0/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row], LSL #2\n"
+ "ld1w { z17.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row], LSL #2\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
".inst 0xc12671b5 // bfdot za.s[x11, 5], { z13.h-z14.h }, z6.h\n"
- ".inst 0xa0412aa6 // ld1h { z6.h-z7.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
+ ".inst 0xa0412a86 // ld1h { z6.h-z7.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
".inst 0xc12971d2 // bfdot za.s[x11, 2], { z14.h-z15.h }, z9.h\n"
- "ld1w { z16.s }, p0/Z, [x22]\n"
+ "ld1w { z16.s }, p0/Z, [x21]\n"
".inst 0xc12871d3 // bfdot za.s[x11, 3], { z14.h-z15.h }, z8.h\n"
- ".inst 0xa0422ae8 // ld1h { z8.h-z9.h }, pn10.b/Z, [x23, #0x4, MUL VL]\n"
+ ".inst 0xa0422ac8 // ld1h { z8.h-z9.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
".inst 0xc12771b6 // bfdot za.s[x11, 6], { z13.h-z14.h }, z7.h\n"
".inst 0xc12671b7 // bfdot za.s[x11, 7], { z13.h-z14.h }, z6.h\n"
- ".inst 0xa0412a86 // ld1h { z6.h-z7.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ ".inst 0xa0412a66 // ld1h { z6.h-z7.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
".inst 0xc12971d4 // bfdot za.s[x11, 4], { z14.h-z15.h }, z9.h\n"
".inst 0xc12871d5 // bfdot za.s[x11, 5], { z14.h-z15.h }, z8.h\n"
- ".inst 0xa0422aa8 // ld1h { z8.h-z9.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
+ ".inst 0xa0422a88 // ld1h { z8.h-z9.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
".inst 0xc12971d6 // bfdot za.s[x11, 6], { z14.h-z15.h }, z9.h\n"
".inst 0xc12871d7 // bfdot za.s[x11, 7], { z14.h-z15.h }, z8.h\n"
- ".inst 0xa0422a88 // ld1h { z8.h-z9.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+ ".inst 0xa0422a68 // ld1h { z8.h-z9.h }, pn10.b/Z, [x19, #0x4, MUL VL]\n"
".inst 0xc1251190 // bfdot za.s[x8, 0], { z12.h-z13.h }, z5.h\n"
".inst 0xc1241191 // bfdot za.s[x8, 1], { z12.h-z13.h }, z4.h\n"
".inst 0x658aaaec // bfcvt z12.h, p2/M, z23.s\n"
@@ -1018,60 +1018,60 @@ void sme2_fp32bf16fp32_planar_5x5_s1_4rows_dot_za_impl(
".inst 0x648aaa4e // bfcvtnt z14.h, p2/M, z18.s\n"
".inst 0xc0066822 // mova { z2.d-z3.d }, za.d[x11, #1]\n"
".inst 0xc1bfcb40 // fclamp { z0.s-z3.s }, z26.s, z31.s\n"
- "st1w { z0.s }, p1, [x14]\n"
- "add x14, x14, x0, LSL #2\n"
- "st1w { z2.s }, p1, [x13]\n"
+ "st1w { z0.s }, p1, [x13]\n"
"add x13, x13, x10, LSL #2\n"
+ "st1w { z2.s }, p1, [x0]\n"
+ "add x0, x0, x9, LSL #2\n"
"add x11, x11, #0x2\n"
".inst 0xc0040b00 // mova za.d[x8, #0], { z24.d-z25.d }\n"
- "st1w { z1.s }, p1, [x9]\n"
- "add x9, x9, x27, LSL #2\n"
+ "st1w { z1.s }, p1, [x28]\n"
+ "add x28, x28, x26, LSL #2\n"
".inst 0xc0040b01 // mova za.d[x8, #1], { z24.d-z25.d }\n"
".inst 0x648aaa0f // bfcvtnt z15.h, p2/M, z16.s\n"
- "st1w { z3.s }, p1, [x28]\n"
- "add x28, x28, x26, LSL #2\n"
+ "st1w { z3.s }, p1, [x27]\n"
+ "add x27, x27, x25, LSL #2\n"
"bgt 18b\n"
"19:" // Main loop tail
- "addvl x23, SP, #6\n"
+ "addvl x22, SP, #6\n"
".inst 0xc1257190 // bfdot za.s[x11, 0], { z12.h-z13.h }, z5.h\n"
- "addvl x22, SP, #12\n"
+ "addvl x21, SP, #12\n"
".inst 0xc1247191 // bfdot za.s[x11, 1], { z12.h-z13.h }, z4.h\n"
- ".inst 0xa0402ae4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x23]\n"
- "addvl x21, SP, #18\n"
- "addvl x20, SP, #24\n"
+ ".inst 0xa0402ac4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x22]\n"
+ "addvl x20, SP, #18\n"
+ "addvl x19, SP, #24\n"
".inst 0xc1257192 // bfdot za.s[x11, 2], { z12.h-z13.h }, z5.h\n"
".inst 0xc1247193 // bfdot za.s[x11, 3], { z12.h-z13.h }, z4.h\n"
- ".inst 0xa0402ac4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x22]\n"
+ ".inst 0xa0402aa4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x21]\n"
".inst 0xc12771b0 // bfdot za.s[x11, 0], { z13.h-z14.h }, z7.h\n"
".inst 0xc12671b1 // bfdot za.s[x11, 1], { z13.h-z14.h }, z6.h\n"
- ".inst 0xa0412ae6 // ld1h { z6.h-z7.h }, pn10.b/Z, [x23, #0x2, MUL VL]\n"
+ ".inst 0xa0412ac6 // ld1h { z6.h-z7.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
".inst 0xc1257194 // bfdot za.s[x11, 4], { z12.h-z13.h }, z5.h\n"
".inst 0xc1247195 // bfdot za.s[x11, 5], { z12.h-z13.h }, z4.h\n"
- ".inst 0xa0402aa4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x21]\n"
+ ".inst 0xa0402a84 // ld1h { z4.h-z5.h }, pn10.b/Z, [x20]\n"
".inst 0xc12771b2 // bfdot za.s[x11, 2], { z13.h-z14.h }, z7.h\n"
".inst 0xc12671b3 // bfdot za.s[x11, 3], { z13.h-z14.h }, z6.h\n"
- ".inst 0xa0412ac6 // ld1h { z6.h-z7.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
+ ".inst 0xa0412aa6 // ld1h { z6.h-z7.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
".inst 0xc12971d0 // bfdot za.s[x11, 0], { z14.h-z15.h }, z9.h\n"
".inst 0xc12871d1 // bfdot za.s[x11, 1], { z14.h-z15.h }, z8.h\n"
- ".inst 0xa0422ae8 // ld1h { z8.h-z9.h }, pn10.b/Z, [x23, #0x4, MUL VL]\n"
+ ".inst 0xa0422ac8 // ld1h { z8.h-z9.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
".inst 0xc1257196 // bfdot za.s[x11, 6], { z12.h-z13.h }, z5.h\n"
".inst 0xc1247197 // bfdot za.s[x11, 7], { z12.h-z13.h }, z4.h\n"
- ".inst 0xa0402a84 // ld1h { z4.h-z5.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xa0402a64 // ld1h { z4.h-z5.h }, pn10.b/Z, [x19]\n"
".inst 0xc12771b4 // bfdot za.s[x11, 4], { z13.h-z14.h }, z7.h\n"
".inst 0xc12671b5 // bfdot za.s[x11, 5], { z13.h-z14.h }, z6.h\n"
- ".inst 0xa0412aa6 // ld1h { z6.h-z7.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
+ ".inst 0xa0412a86 // ld1h { z6.h-z7.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
".inst 0xc12971d2 // bfdot za.s[x11, 2], { z14.h-z15.h }, z9.h\n"
".inst 0xc12871d3 // bfdot za.s[x11, 3], { z14.h-z15.h }, z8.h\n"
- ".inst 0xa0422ac8 // ld1h { z8.h-z9.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
+ ".inst 0xa0422aa8 // ld1h { z8.h-z9.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
".inst 0xc12771b6 // bfdot za.s[x11, 6], { z13.h-z14.h }, z7.h\n"
".inst 0xc12671b7 // bfdot za.s[x11, 7], { z13.h-z14.h }, z6.h\n"
- ".inst 0xa0412a86 // ld1h { z6.h-z7.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ ".inst 0xa0412a66 // ld1h { z6.h-z7.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
".inst 0xc12971d4 // bfdot za.s[x11, 4], { z14.h-z15.h }, z9.h\n"
".inst 0xc12871d5 // bfdot za.s[x11, 5], { z14.h-z15.h }, z8.h\n"
- ".inst 0xa0422aa8 // ld1h { z8.h-z9.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
+ ".inst 0xa0422a88 // ld1h { z8.h-z9.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
".inst 0xc12971d6 // bfdot za.s[x11, 6], { z14.h-z15.h }, z9.h\n"
".inst 0xc12871d7 // bfdot za.s[x11, 7], { z14.h-z15.h }, z8.h\n"
- ".inst 0xa0422a88 // ld1h { z8.h-z9.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+ ".inst 0xa0422a68 // ld1h { z8.h-z9.h }, pn10.b/Z, [x19, #0x4, MUL VL]\n"
".inst 0xc1251190 // bfdot za.s[x8, 0], { z12.h-z13.h }, z5.h\n"
".inst 0xc1241191 // bfdot za.s[x8, 1], { z12.h-z13.h }, z4.h\n"
".inst 0xc12711b0 // bfdot za.s[x8, 0], { z13.h-z14.h }, z7.h\n"
@@ -1082,66 +1082,66 @@ void sme2_fp32bf16fp32_planar_5x5_s1_4rows_dot_za_impl(
".inst 0xc0066800 // mova { z0.d-z1.d }, za.d[x11, #0]\n"
".inst 0xc0066822 // mova { z2.d-z3.d }, za.d[x11, #1]\n"
".inst 0xc1bfcb40 // fclamp { z0.s-z3.s }, z26.s, z31.s\n"
- "st1w { z0.s }, p1, [x14]\n"
- "add x14, x14, x0, LSL #2\n"
- "st1w { z2.s }, p1, [x13]\n"
+ "st1w { z0.s }, p1, [x13]\n"
"add x13, x13, x10, LSL #2\n"
+ "st1w { z2.s }, p1, [x0]\n"
+ "add x0, x0, x9, LSL #2\n"
"add x11, x11, #0x2\n"
".inst 0xc0040b00 // mova za.d[x8, #0], { z24.d-z25.d }\n"
- "st1w { z1.s }, p1, [x9]\n"
- "add x9, x9, x27, LSL #2\n"
- ".inst 0xc0040b01 // mova za.d[x8, #1], { z24.d-z25.d }\n"
- "st1w { z3.s }, p1, [x28]\n"
+ "st1w { z1.s }, p1, [x28]\n"
"add x28, x28, x26, LSL #2\n"
+ ".inst 0xc0040b01 // mova za.d[x8, #1], { z24.d-z25.d }\n"
+ "st1w { z3.s }, p1, [x27]\n"
+ "add x27, x27, x25, LSL #2\n"
"20:" // Main loop skip tail
- "cbz x15, 22f\n"
+ "cbz x14, 22f\n"
"21:" // Right padding loop
".inst 0xc0066800 // mova { z0.d-z1.d }, za.d[x11, #0]\n"
"add x8, x8, #0x2\n"
- "subs x15, x15, #0x1\n"
+ "subs x14, x14, #0x1\n"
".inst 0xc0066822 // mova { z2.d-z3.d }, za.d[x11, #1]\n"
".inst 0xc1bfcb40 // fclamp { z0.s-z3.s }, z26.s, z31.s\n"
- "st1w { z0.s }, p1, [x14]\n"
- "add x14, x14, x0, LSL #2\n"
- "st1w { z2.s }, p1, [x13]\n"
+ "st1w { z0.s }, p1, [x13]\n"
"add x13, x13, x10, LSL #2\n"
+ "st1w { z2.s }, p1, [x0]\n"
+ "add x0, x0, x9, LSL #2\n"
"add x11, x11, #0x2\n"
".inst 0xc0040b00 // mova za.d[x8, #0], { z24.d-z25.d }\n"
- "st1w { z1.s }, p1, [x9]\n"
- "add x9, x9, x27, LSL #2\n"
- ".inst 0xc0040b01 // mova za.d[x8, #1], { z24.d-z25.d }\n"
- "st1w { z3.s }, p1, [x28]\n"
+ "st1w { z1.s }, p1, [x28]\n"
"add x28, x28, x26, LSL #2\n"
+ ".inst 0xc0040b01 // mova za.d[x8, #1], { z24.d-z25.d }\n"
+ "st1w { z3.s }, p1, [x27]\n"
+ "add x27, x27, x25, LSL #2\n"
"bgt 21b\n"
"22:" // End
- "ldr x20, [%x[args], %[offsetof_Args_weights]]\n"
- "incb x20, ALL, MUL #16\n"
- "incb x20, ALL, MUL #9\n"
- "str x20, [%x[args], %[offsetof_Args_weights]]\n"
- "ldr x20, [%x[args], %[offsetof_Args_ld_in_vl]]\n"
- "incw x7\n"
- "whilelt p1.s, x7, x6\n"
- "ldr x16, [%x[args], %[offsetof_Args_inptr]]\n"
- "add x16, x16, x20, LSL #2\n"
- "str x16, [%x[args], %[offsetof_Args_inptr]]\n"
- "ldr x25, [%x[args], %[offsetof_Args_outptrs]]\n"
- "ldr x24, [%x[args], %[offsetof_Args_ld_out_vls]]\n"
- "ldp x23, x22, [x25, #0x0]\n"
- "ldp x21, x20, [x24, #0x0]\n"
- "add x23, x23, x21, LSL #2\n"
+ "ldr x19, [%x[args], %[offsetof_Args_weights]]\n"
+ "incb x19, ALL, MUL #16\n"
+ "incb x19, ALL, MUL #9\n"
+ "str x19, [%x[args], %[offsetof_Args_weights]]\n"
+ "ldr x19, [%x[args], %[offsetof_Args_ld_in_vl]]\n"
+ "incw x17\n"
+ "whilelt p1.s, x17, x7\n"
+ "ldr x15, [%x[args], %[offsetof_Args_inptr]]\n"
+ "add x15, x15, x19, LSL #2\n"
+ "str x15, [%x[args], %[offsetof_Args_inptr]]\n"
+ "ldr x24, [%x[args], %[offsetof_Args_outptrs]]\n"
+ "ldr x23, [%x[args], %[offsetof_Args_ld_out_vls]]\n"
+ "ldp x22, x21, [x24, #0x0]\n"
+ "ldp x20, x19, [x23, #0x0]\n"
"add x22, x22, x20, LSL #2\n"
- "stp x23, x22, [x25, #0x0]\n"
- "ldp x23, x22, [x25, #0x10]\n"
- "ldp x21, x20, [x24, #0x10]\n"
- "add x23, x23, x21, LSL #2\n"
+ "add x21, x21, x19, LSL #2\n"
+ "stp x22, x21, [x24, #0x0]\n"
+ "ldp x22, x21, [x24, #0x10]\n"
+ "ldp x20, x19, [x23, #0x10]\n"
"add x22, x22, x20, LSL #2\n"
- "stp x23, x22, [x25, #0x10]\n"
+ "add x21, x21, x19, LSL #2\n"
+ "stp x22, x21, [x24, #0x10]\n"
"b.any 1b\n"
"addvl SP, SP, #30\n"
".inst 0xd503467f // SMSTOP\n"
:
: [args] "r" (&args), [ld_in_col] "r" (ld_in_col), [ld_in_row] "r" (ld_in_row), [offsetof_Args_bias] "I" (offsetof(Args, bias)), [offsetof_Args_clamp_max] "I" (offsetof(Args, clamp_max)), [offsetof_Args_clamp_min] "I" (offsetof(Args, clamp_min)), [offsetof_Args_current_channel] "I" (offsetof(Args, current_channel)), [offsetof_Args_inptr] "I" (offsetof(Args, inptr)), [offsetof_Args_input_cols] "I" (offsetof(Args, input_cols)), [offsetof_Args_ld_in_vl] "I" (offsetof(Args, ld_in_vl)), [offsetof_Args_ld_out_cols] "I" (offsetof(Args, ld_out_cols)), [offsetof_Args_ld_out_vls] "I" (offsetof(Args, ld_out_vls)), [offsetof_Args_n_channels] "I" (offsetof(Args, n_channels)), [offsetof_Args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_Args_output_cols] "I" (offsetof(Args, output_cols)), [offsetof_Args_pad_bottom] "I" (offsetof(Args, pad_bottom)), [offsetof_Args_pad_left] "I" (offsetof(Args, pad_left)), [offsetof_Args_pad_top] "I" (offsetof(Args, pad_top)), [offsetof_Args_weights] "I" (offsetof(Args, weights))
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x0", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x0", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32bf16fp32_planar_5x5_s2_4rows_dot_za/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32bf16fp32_planar_5x5_s2_4rows_dot_za/generic.cpp
index 01f689a0b4..eae8994166 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32bf16fp32_planar_5x5_s2_4rows_dot_za/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32bf16fp32_planar_5x5_s2_4rows_dot_za/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -69,211 +69,292 @@ void sme2_fp32bf16fp32_planar_5x5_s2_4rows_dot_za_impl(
Args args = { inptr, ld_in_vl, pad_top, 11u - std::min(11u, pad_top + valid_input_rows), pad_left, weights, bias, valid_input_cols, output_cols, outptrs, outlds, outvllds, start_channel, valid_channels, act_min, act_max };
__asm__ __volatile__(
- "ldr x3, [%x[args], %[offsetof_Args_pad_bottom]]\n"
- "mov x20, #0xb\n"
+ "ldr x4, [%x[args], %[offsetof_Args_pad_bottom]]\n"
+ "mov x19, #0xb\n"
".inst 0xd503477f // SMSTART ZA\n"
- "sub x20, x20, x3\n"
- "ldr x4, [%x[args], %[offsetof_Args_pad_top]]\n"
+ "sub x19, x19, x4\n"
+ "ldr x5, [%x[args], %[offsetof_Args_pad_top]]\n"
"ptrue p2.b\n"
".inst 0x25207812 // ptrue pn10.b\n"
"ld1rw { z30.s }, p2/Z, [%x[args], %[offsetof_Args_clamp_min]]\n"
- "ldr x5, [%x[args], %[offsetof_Args_n_channels]]\n"
- "whilelt p1.s, XZR, x5\n"
- "whilelt p9.s, XZR, x20\n"
+ "ldr x6, [%x[args], %[offsetof_Args_n_channels]]\n"
+ "whilelt p1.s, XZR, x6\n"
+ "whilelt p9.s, XZR, x19\n"
"ld1rw { z22.s }, p2/Z, [%x[args], %[offsetof_Args_clamp_max]]\n"
- "whilelt p8.s, XZR, x4\n"
+ "whilelt p8.s, XZR, x5\n"
"addvl SP, SP, #-15\n"
- "ldr x6, [%x[args], %[offsetof_Args_current_channel]]\n"
+ "ldr x7, [%x[args], %[offsetof_Args_current_channel]]\n"
"eor p8.b, p2/Z, p8.b, p9.b\n"
"1:" // Channel loop
- "ldr x20, [%x[args], %[offsetof_Args_bias]]\n"
+ "ldr x19, [%x[args], %[offsetof_Args_bias]]\n"
"fmov z4.s, #0x0\n"
- "cbz x20, 2f\n"
- "ld1w { z4.s }, p1/Z, [x20, x6, LSL #2]\n"
+ "cbz x19, 2f\n"
+ "ld1w { z4.s }, p1/Z, [x19, x7, LSL #2]\n"
"2:" // Load bias: Done
- "ldr x20, [%x[args], %[offsetof_Args_weights]]\n"
- "mov x21, x20\n"
- "ld1w { z31.s }, p2/Z, [x21]\n"
- "incb x21, ALL, MUL #5\n"
- "ld1w { z16.s }, p2/Z, [x21]\n"
- "incb x21, ALL, MUL #5\n"
+ "ldr x19, [%x[args], %[offsetof_Args_weights]]\n"
+ "mov x20, x19\n"
+ "ld1w { z31.s }, p2/Z, [x20]\n"
+ "incb x20, ALL, MUL #5\n"
+ "ld1w { z16.s }, p2/Z, [x20]\n"
+ "incb x20, ALL, MUL #5\n"
".inst 0x658aabe1 // bfcvt z1.h, p2/M, z31.s\n"
- "incb x20\n"
- "ld1w { z13.s }, p2/Z, [x21]\n"
- "incb x21, ALL, MUL #5\n"
+ "incb x19\n"
+ "ld1w { z13.s }, p2/Z, [x20]\n"
+ "incb x20, ALL, MUL #5\n"
".inst 0x658aa9a9 // bfcvt z9.h, p2/M, z13.s\n"
- "addvl x24, SP, #15\n"
- "ld1w { z18.s }, p2/Z, [x21]\n"
- "incb x21, ALL, MUL #5\n"
+ "addvl x23, SP, #15\n"
+ "ld1w { z18.s }, p2/Z, [x20]\n"
+ "incb x20, ALL, MUL #5\n"
".inst 0x648aaa01 // bfcvtnt z1.h, p2/M, z16.s\n"
- "addvl x24, x24, #-3\n"
- "ld1w { z15.s }, p2/Z, [x21]\n"
- "mov x21, x20\n"
- "st1h { z1.h }, p2, [x24]\n"
+ "addvl x23, x23, #-3\n"
+ "ld1w { z15.s }, p2/Z, [x20]\n"
+ "mov x20, x19\n"
+ "st1h { z1.h }, p2, [x23]\n"
".inst 0x648aaa49 // bfcvtnt z9.h, p2/M, z18.s\n"
- "ld1w { z31.s }, p2/Z, [x21]\n"
- "incb x21, ALL, MUL #5\n"
+ "ld1w { z31.s }, p2/Z, [x20]\n"
+ "incb x20, ALL, MUL #5\n"
".inst 0x658aabe1 // bfcvt z1.h, p2/M, z31.s\n"
- "incb x20\n"
- "ld1w { z16.s }, p2/Z, [x21]\n"
- "incb x21, ALL, MUL #5\n"
- "st1h { z9.h }, p2, [x24, #1, MUL VL]\n"
+ "incb x19\n"
+ "ld1w { z16.s }, p2/Z, [x20]\n"
+ "incb x20, ALL, MUL #5\n"
+ "st1h { z9.h }, p2, [x23, #1, MUL VL]\n"
".inst 0x658aa9e2 // bfcvt z2.h, p2/M, z15.s\n"
- "ld1w { z13.s }, p2/Z, [x21]\n"
- "incb x21, ALL, MUL #5\n"
+ "ld1w { z13.s }, p2/Z, [x20]\n"
+ "incb x20, ALL, MUL #5\n"
".inst 0x658aa9a9 // bfcvt z9.h, p2/M, z13.s\n"
".inst 0x648aaa01 // bfcvtnt z1.h, p2/M, z16.s\n"
- "ld1w { z18.s }, p2/Z, [x21]\n"
- "incb x21, ALL, MUL #5\n"
- "st1h { z2.h }, p2, [x24, #2, MUL VL]\n"
- "addvl x24, x24, #-3\n"
- "ld1w { z15.s }, p2/Z, [x21]\n"
- "mov x21, x20\n"
- "st1h { z1.h }, p2, [x24]\n"
+ "ld1w { z18.s }, p2/Z, [x20]\n"
+ "incb x20, ALL, MUL #5\n"
+ "st1h { z2.h }, p2, [x23, #2, MUL VL]\n"
+ "addvl x23, x23, #-3\n"
+ "ld1w { z15.s }, p2/Z, [x20]\n"
+ "mov x20, x19\n"
+ "st1h { z1.h }, p2, [x23]\n"
".inst 0x648aaa49 // bfcvtnt z9.h, p2/M, z18.s\n"
- "ld1w { z31.s }, p2/Z, [x21]\n"
- "incb x21, ALL, MUL #5\n"
- "incb x20\n"
+ "ld1w { z31.s }, p2/Z, [x20]\n"
+ "incb x20, ALL, MUL #5\n"
+ "incb x19\n"
".inst 0x658aabe1 // bfcvt z1.h, p2/M, z31.s\n"
- "ld1w { z16.s }, p2/Z, [x21]\n"
- "incb x21, ALL, MUL #5\n"
+ "ld1w { z16.s }, p2/Z, [x20]\n"
+ "incb x20, ALL, MUL #5\n"
".inst 0x658aa9e2 // bfcvt z2.h, p2/M, z15.s\n"
- "st1h { z9.h }, p2, [x24, #1, MUL VL]\n"
- "ld1w { z13.s }, p2/Z, [x21]\n"
- "incb x21, ALL, MUL #5\n"
+ "st1h { z9.h }, p2, [x23, #1, MUL VL]\n"
+ "ld1w { z13.s }, p2/Z, [x20]\n"
+ "incb x20, ALL, MUL #5\n"
".inst 0x658aa9a9 // bfcvt z9.h, p2/M, z13.s\n"
- "st1h { z2.h }, p2, [x24, #2, MUL VL]\n"
- "ld1w { z18.s }, p2/Z, [x21]\n"
- "incb x21, ALL, MUL #5\n"
- "addvl x24, x24, #-3\n"
+ "st1h { z2.h }, p2, [x23, #2, MUL VL]\n"
+ "ld1w { z18.s }, p2/Z, [x20]\n"
+ "incb x20, ALL, MUL #5\n"
+ "addvl x23, x23, #-3\n"
".inst 0x648aaa01 // bfcvtnt z1.h, p2/M, z16.s\n"
- "ld1w { z15.s }, p2/Z, [x21]\n"
- "mov x21, x20\n"
- "st1h { z1.h }, p2, [x24]\n"
+ "ld1w { z15.s }, p2/Z, [x20]\n"
+ "mov x20, x19\n"
+ "st1h { z1.h }, p2, [x23]\n"
".inst 0x648aaa49 // bfcvtnt z9.h, p2/M, z18.s\n"
- "ld1w { z31.s }, p2/Z, [x21]\n"
- "incb x21, ALL, MUL #5\n"
+ "ld1w { z31.s }, p2/Z, [x20]\n"
+ "incb x20, ALL, MUL #5\n"
".inst 0x658aabe1 // bfcvt z1.h, p2/M, z31.s\n"
- "incb x20\n"
- "ld1w { z16.s }, p2/Z, [x21]\n"
- "incb x21, ALL, MUL #5\n"
+ "incb x19\n"
+ "ld1w { z16.s }, p2/Z, [x20]\n"
+ "incb x20, ALL, MUL #5\n"
".inst 0x658aa9e2 // bfcvt z2.h, p2/M, z15.s\n"
- "st1h { z9.h }, p2, [x24, #1, MUL VL]\n"
- "ld1w { z13.s }, p2/Z, [x21]\n"
- "incb x21, ALL, MUL #5\n"
+ "st1h { z9.h }, p2, [x23, #1, MUL VL]\n"
+ "ld1w { z13.s }, p2/Z, [x20]\n"
+ "incb x20, ALL, MUL #5\n"
".inst 0x648aaa01 // bfcvtnt z1.h, p2/M, z16.s\n"
".inst 0x658aa9a9 // bfcvt z9.h, p2/M, z13.s\n"
- "ld1w { z18.s }, p2/Z, [x21]\n"
- "incb x21, ALL, MUL #5\n"
- "ldr x7, [%x[args], %[offsetof_Args_input_cols]]\n"
- "st1h { z2.h }, p2, [x24, #2, MUL VL]\n"
- "ld1w { z15.s }, p2/Z, [x21]\n"
- "mov x21, x20\n"
- "addvl x24, x24, #-3\n"
- "st1h { z1.h }, p2, [x24]\n"
- "ld1w { z31.s }, p2/Z, [x21]\n"
- "incb x21, ALL, MUL #5\n"
+ "ld1w { z18.s }, p2/Z, [x20]\n"
+ "incb x20, ALL, MUL #5\n"
+ "ldr x17, [%x[args], %[offsetof_Args_input_cols]]\n"
+ "st1h { z2.h }, p2, [x23, #2, MUL VL]\n"
+ "ld1w { z15.s }, p2/Z, [x20]\n"
+ "mov x20, x19\n"
+ "addvl x23, x23, #-3\n"
+ "st1h { z1.h }, p2, [x23]\n"
+ "ld1w { z31.s }, p2/Z, [x20]\n"
+ "incb x20, ALL, MUL #5\n"
".inst 0x648aaa49 // bfcvtnt z9.h, p2/M, z18.s\n"
- "st1h { z9.h }, p2, [x24, #1, MUL VL]\n"
- "ld1w { z16.s }, p2/Z, [x21]\n"
- "incb x21, ALL, MUL #5\n"
+ "st1h { z9.h }, p2, [x23, #1, MUL VL]\n"
+ "ld1w { z16.s }, p2/Z, [x20]\n"
+ "incb x20, ALL, MUL #5\n"
".inst 0x658aabe1 // bfcvt z1.h, p2/M, z31.s\n"
".inst 0x658aa9e2 // bfcvt z2.h, p2/M, z15.s\n"
- "ld1w { z13.s }, p2/Z, [x21]\n"
- "incb x21, ALL, MUL #5\n"
+ "ld1w { z13.s }, p2/Z, [x20]\n"
+ "incb x20, ALL, MUL #5\n"
".inst 0x658aa9a9 // bfcvt z9.h, p2/M, z13.s\n"
- "ldr x17, [%x[args], %[offsetof_Args_inptr]]\n"
- "ld1w { z18.s }, p2/Z, [x21]\n"
- "incb x21, ALL, MUL #5\n"
- "sub x20, x7, #0x1\n"
- "st1h { z2.h }, p2, [x24, #2, MUL VL]\n"
- "ld1w { z15.s }, p2/Z, [x21]\n"
- "orr x23, x20, %x[ld_in_col], LSL #18\n"
- "addvl x24, x24, #-3\n"
+ "ldr x16, [%x[args], %[offsetof_Args_inptr]]\n"
+ "ld1w { z18.s }, p2/Z, [x20]\n"
+ "incb x20, ALL, MUL #5\n"
+ "sub x19, x17, #0x1\n"
+ "st1h { z2.h }, p2, [x23, #2, MUL VL]\n"
+ "ld1w { z15.s }, p2/Z, [x20]\n"
+ "orr x22, x19, %x[ld_in_col], LSL #18\n"
+ "addvl x23, x23, #-3\n"
"mov z5.d, z4.d\n"
- "orr x23, x5, x23, LSL #20\n"
- "mov x22, #0xb\n"
+ "orr x22, x6, x22, LSL #20\n"
+ "mov x21, #0xb\n"
"mov z6.d, z4.d\n"
"mov z7.d, z4.d\n"
- "add x21, x4, x3\n"
- "lsl x20, %x[ld_in_row], #0x2\n"
+ "add x20, x5, x4\n"
+ "lsl x19, %x[ld_in_row], #0x2\n"
".inst 0x648aaa01 // bfcvtnt z1.h, p2/M, z16.s\n"
- "st1h { z1.h }, p2, [x24]\n"
+ "st1h { z1.h }, p2, [x23]\n"
".inst 0x648aaa49 // bfcvtnt z9.h, p2/M, z18.s\n"
- "st1h { z9.h }, p2, [x24, #1, MUL VL]\n"
+ "st1h { z9.h }, p2, [x23, #1, MUL VL]\n"
".inst 0x658aa9e2 // bfcvt z2.h, p2/M, z15.s\n"
"mov x8, #0x0\n"
- "st1h { z2.h }, p2, [x24, #2, MUL VL]\n"
- "ldr x16, [%x[args], %[offsetof_Args_output_cols]]\n"
- "lsl x23, x23, #0x2\n"
- "sub x22, x22, x21\n"
- "madd x20, x20, x4, x17\n"
+ "st1h { z2.h }, p2, [x23, #2, MUL VL]\n"
+ "ldr x15, [%x[args], %[offsetof_Args_output_cols]]\n"
+ "lsl x22, x22, #0x2\n"
+ "sub x21, x21, x20\n"
+ "madd x19, x19, x5, x16\n"
"3:" // Issue prefetches
- "subs x22, x22, #0x1\n"
- ".inst 0xf8b74a9c // rprfm pldstrm, x23, [x20]\n"
- "add x20, x20, %x[ld_in_col], LSL #2\n"
+ "subs x21, x21, #0x1\n"
+ ".inst 0xf8b64a7c // rprfm pldstrm, x22, [x19]\n"
+ "add x19, x19, %x[ld_in_col], LSL #2\n"
"bgt 3b\n"
- "ldr x25, [%x[args], %[offsetof_Args_outptrs]]\n"
- "lsl x20, %x[ld_in_row], #0x2\n"
- "msub x17, x4, x20, x17\n"
+ "ldr x24, [%x[args], %[offsetof_Args_outptrs]]\n"
+ "lsl x19, %x[ld_in_row], #0x2\n"
+ "msub x16, x5, x19, x16\n"
".inst 0xc0040c80 // mova za.d[x8, #0], { z4.d-z7.d }\n"
- "ldr x20, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
+ "ldr x19, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
".inst 0xc0040c81 // mova za.d[x8, #1], { z4.d-z7.d }\n"
- "mov x22, #0x4\n"
- "ldp x15, x14, [x25], #0x10\n"
+ "mov x21, #0x4\n"
+ "ldp x14, x13, [x24], #0x10\n"
".inst 0xc0040c82 // mova za.d[x8, #2], { z4.d-z7.d }\n"
- "ldp x13, x11, [x20], #0x10\n"
+ "ldp x11, x10, [x19], #0x10\n"
".inst 0xc0040c83 // mova za.d[x8, #3], { z4.d-z7.d }\n"
- "ldr x21, [%x[args], %[offsetof_Args_pad_left]]\n"
+ "ldr x20, [%x[args], %[offsetof_Args_pad_left]]\n"
".inst 0xc0040c84 // mova za.d[x8, #4], { z4.d-z7.d }\n"
- "ldp x10, x9, [x25], #0x10\n"
- "ldp x28, x27, [x20], #0x10\n"
- "cbz x21, 5f\n"
- "cmp x21, x22\n"
- "csel x20, x21, x22, LT\n"
- "sub x21, x21, x20\n"
- "sub x22, x22, x20\n"
- "cbz x21, 5f\n"
+ "ldp x9, x28, [x24], #0x10\n"
+ "ldp x27, x26, [x19], #0x10\n"
+ "cbz x20, 5f\n"
+ "cmp x20, x21\n"
+ "csel x19, x20, x21, LT\n"
+ "sub x20, x20, x19\n"
+ "sub x21, x21, x19\n"
+ "cbz x20, 5f\n"
".inst 0xc0060c18 // mova { z24.d-z27.d }, za.d[x8, #0]\n"
- "and x22, x21, #0x1\n"
- "add x21, x21, #0x1\n"
+ "and x21, x20, #0x1\n"
+ "add x20, x20, #0x1\n"
".inst 0xc1b6cbd8 // fclamp { z24.s-z27.s }, z30.s, z22.s\n"
- "lsr x21, x21, #0x1\n"
- "sub x16, x16, x21\n"
+ "lsr x20, x20, #0x1\n"
+ "sub x15, x15, x20\n"
"4:" // Left padding
- "subs x21, x21, #0x1\n"
- "st1w { z24.s }, p1, [x15]\n"
- "add x15, x15, x13, LSL #2\n"
- "st1w { z25.s }, p1, [x14]\n"
+ "subs x20, x20, #0x1\n"
+ "st1w { z24.s }, p1, [x14]\n"
"add x14, x14, x11, LSL #2\n"
- "st1w { z26.s }, p1, [x10]\n"
- "add x10, x10, x28, LSL #2\n"
- "st1w { z27.s }, p1, [x9]\n"
+ "st1w { z25.s }, p1, [x13]\n"
+ "add x13, x13, x10, LSL #2\n"
+ "st1w { z26.s }, p1, [x9]\n"
"add x9, x9, x27, LSL #2\n"
+ "st1w { z27.s }, p1, [x28]\n"
+ "add x28, x28, x26, LSL #2\n"
"bgt 4b\n"
"5:" // Left padding: End
- "adds XZR, x4, x3\n"
+ "adds XZR, x5, x4\n"
"bne 12f\n"
- "cbz x22, 10f\n"
- "cmp x22, #0x1\n"
- "sub x7, x7, x22\n"
+ "cbz x21, 10f\n"
+ "cmp x21, #0x1\n"
+ "sub x17, x17, x21\n"
"beq 9f\n"
- "cmp x22, #0x2\n"
+ "cmp x21, #0x2\n"
"beq 8f\n"
- "cmp x22, #0x3\n"
+ "cmp x21, #0x3\n"
"beq 7f\n"
"6:" // Unpadded: 4 priming loads
- "add x21, x17, %x[ld_in_row], LSL #2\n"
- "ld1w { z23.s }, p1/Z, [x17]\n"
+ "add x20, x16, %x[ld_in_row], LSL #2\n"
+ "ld1w { z23.s }, p1/Z, [x16]\n"
".inst 0x658aaaea // bfcvt z10.h, p2/M, z23.s\n"
- "addvl x20, SP, #12\n"
+ "addvl x19, SP, #12\n"
+ "ld1w { z16.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0x648aaa0a // bfcvtnt z10.h, p2/M, z16.s\n"
+ "add x16, x16, %x[ld_in_col], LSL #2\n"
+ "ld1w { z16.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0x658aaa0b // bfcvt z11.h, p2/M, z16.s\n"
+ "ld1w { z16.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0x648aaa0b // bfcvtnt z11.h, p2/M, z16.s\n"
+ "ld1w { z16.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0x658aaa0c // bfcvt z12.h, p2/M, z16.s\n"
+ "ld1w { z16.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0x648aaa0c // bfcvtnt z12.h, p2/M, z16.s\n"
+ "ld1w { z16.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0x658aaa0d // bfcvt z13.h, p2/M, z16.s\n"
+ "ld1w { z16.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0x648aaa0d // bfcvtnt z13.h, p2/M, z16.s\n"
+ "ld1w { z16.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0x658aaa0e // bfcvt z14.h, p2/M, z16.s\n"
+ ".inst 0xa1402a61 // ld1h { z1.h, z9.h }, pn10.b/Z, [x19]\n"
+ ".inst 0xc1311150 // bfdot za.s[x8, 0], { z10.h-z13.h }, z1.h\n"
+ "ld1w { z16.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0x648aaa0e // bfcvtnt z14.h, p2/M, z16.s\n"
+ ".inst 0xc1391170 // bfdot za.s[x8, 0], { z11.h-z14.h }, z9.h\n"
+ "ld1w { z16.s }, p1/Z, [x20]\n"
+ ".inst 0x658aaa0f // bfcvt z15.h, p2/M, z16.s\n"
+ "ld1h { z2.h }, p2/Z, [x19, #2, MUL VL]\n"
+ ".inst 0xc1321190 // bfdot za.s[x8, 0], { z12.h-z15.h }, z2.h\n"
+ "7:" // Unpadded: 3 priming loads
+ "add x20, x16, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p1/Z, [x16]\n"
+ ".inst 0x658aaa0a // bfcvt z10.h, p2/M, z16.s\n"
+ "addvl x19, SP, #9\n"
+ "ld1w { z16.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0x648aaa0a // bfcvtnt z10.h, p2/M, z16.s\n"
+ "add x16, x16, %x[ld_in_col], LSL #2\n"
+ "ld1w { z16.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0x658aaa0b // bfcvt z11.h, p2/M, z16.s\n"
+ "ld1w { z16.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0x648aaa0b // bfcvtnt z11.h, p2/M, z16.s\n"
+ "ld1w { z16.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0x658aaa0c // bfcvt z12.h, p2/M, z16.s\n"
+ "ld1w { z16.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0x648aaa0c // bfcvtnt z12.h, p2/M, z16.s\n"
+ "ld1w { z16.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0x658aaa0d // bfcvt z13.h, p2/M, z16.s\n"
+ "ld1w { z16.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0x648aaa0d // bfcvtnt z13.h, p2/M, z16.s\n"
+ "ld1w { z16.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0x658aaa0e // bfcvt z14.h, p2/M, z16.s\n"
+ ".inst 0xa1402a61 // ld1h { z1.h, z9.h }, pn10.b/Z, [x19]\n"
+ ".inst 0xc1311150 // bfdot za.s[x8, 0], { z10.h-z13.h }, z1.h\n"
+ "ld1w { z16.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0x648aaa0e // bfcvtnt z14.h, p2/M, z16.s\n"
+ ".inst 0xc1391170 // bfdot za.s[x8, 0], { z11.h-z14.h }, z9.h\n"
+ "ld1w { z16.s }, p1/Z, [x20]\n"
+ ".inst 0x658aaa0f // bfcvt z15.h, p2/M, z16.s\n"
+ "ld1h { z2.h }, p2/Z, [x19, #2, MUL VL]\n"
+ ".inst 0xc1321190 // bfdot za.s[x8, 0], { z12.h-z15.h }, z2.h\n"
+ "8:" // Unpadded: 2 priming loads
+ "add x21, x16, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p1/Z, [x16]\n"
+ ".inst 0x658aaa0a // bfcvt z10.h, p2/M, z16.s\n"
+ "addvl x20, SP, #6\n"
"ld1w { z16.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
".inst 0x648aaa0a // bfcvtnt z10.h, p2/M, z16.s\n"
- "add x17, x17, %x[ld_in_col], LSL #2\n"
+ "addvl x19, SP, #12\n"
"ld1w { z16.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
".inst 0x658aaa0b // bfcvt z11.h, p2/M, z16.s\n"
+ "add x16, x16, %x[ld_in_col], LSL #2\n"
"ld1w { z16.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
".inst 0x648aaa0b // bfcvtnt z11.h, p2/M, z16.s\n"
@@ -295,25 +376,31 @@ void sme2_fp32bf16fp32_planar_5x5_s2_4rows_dot_za_impl(
".inst 0xa1402a81 // ld1h { z1.h, z9.h }, pn10.b/Z, [x20]\n"
".inst 0xc1311150 // bfdot za.s[x8, 0], { z10.h-z13.h }, z1.h\n"
"ld1w { z16.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
".inst 0x648aaa0e // bfcvtnt z14.h, p2/M, z16.s\n"
+ "add x21, x21, %x[ld_in_row], LSL #2\n"
".inst 0xc1391170 // bfdot za.s[x8, 0], { z11.h-z14.h }, z9.h\n"
+ ".inst 0xa1402a61 // ld1h { z1.h, z9.h }, pn10.b/Z, [x19]\n"
+ ".inst 0xc1311151 // bfdot za.s[x8, 1], { z10.h-z13.h }, z1.h\n"
"ld1w { z16.s }, p1/Z, [x21]\n"
".inst 0x658aaa0f // bfcvt z15.h, p2/M, z16.s\n"
+ ".inst 0xc1391171 // bfdot za.s[x8, 1], { z11.h-z14.h }, z9.h\n"
"ld1h { z2.h }, p2/Z, [x20, #2, MUL VL]\n"
".inst 0xc1321190 // bfdot za.s[x8, 0], { z12.h-z15.h }, z2.h\n"
- "7:" // Unpadded: 3 priming loads
- "add x21, x17, %x[ld_in_row], LSL #2\n"
- "ld1w { z16.s }, p1/Z, [x17]\n"
+ "ld1h { z2.h }, p2/Z, [x19, #2, MUL VL]\n"
+ ".inst 0xc1321191 // bfdot za.s[x8, 1], { z12.h-z15.h }, z2.h\n"
+ "9:" // Unpadded: 1 priming loads
+ "add x21, x16, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p1/Z, [x16]\n"
".inst 0x658aaa0a // bfcvt z10.h, p2/M, z16.s\n"
- "addvl x20, SP, #9\n"
+ "addvl x20, SP, #3\n"
"ld1w { z16.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
".inst 0x648aaa0a // bfcvtnt z10.h, p2/M, z16.s\n"
- "add x17, x17, %x[ld_in_col], LSL #2\n"
+ "addvl x19, SP, #9\n"
"ld1w { z16.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
".inst 0x658aaa0b // bfcvt z11.h, p2/M, z16.s\n"
+ "add x16, x16, %x[ld_in_col], LSL #2\n"
"ld1w { z16.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
".inst 0x648aaa0b // bfcvtnt z11.h, p2/M, z16.s\n"
@@ -335,256 +422,169 @@ void sme2_fp32bf16fp32_planar_5x5_s2_4rows_dot_za_impl(
".inst 0xa1402a81 // ld1h { z1.h, z9.h }, pn10.b/Z, [x20]\n"
".inst 0xc1311150 // bfdot za.s[x8, 0], { z10.h-z13.h }, z1.h\n"
"ld1w { z16.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
- ".inst 0x648aaa0e // bfcvtnt z14.h, p2/M, z16.s\n"
- ".inst 0xc1391170 // bfdot za.s[x8, 0], { z11.h-z14.h }, z9.h\n"
- "ld1w { z16.s }, p1/Z, [x21]\n"
- ".inst 0x658aaa0f // bfcvt z15.h, p2/M, z16.s\n"
- "ld1h { z2.h }, p2/Z, [x20, #2, MUL VL]\n"
- ".inst 0xc1321190 // bfdot za.s[x8, 0], { z12.h-z15.h }, z2.h\n"
- "8:" // Unpadded: 2 priming loads
- "add x22, x17, %x[ld_in_row], LSL #2\n"
- "ld1w { z16.s }, p1/Z, [x17]\n"
- ".inst 0x658aaa0a // bfcvt z10.h, p2/M, z16.s\n"
- "addvl x21, SP, #6\n"
- "ld1w { z16.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row], LSL #2\n"
- ".inst 0x648aaa0a // bfcvtnt z10.h, p2/M, z16.s\n"
- "addvl x20, SP, #12\n"
- "ld1w { z16.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aaa0b // bfcvt z11.h, p2/M, z16.s\n"
- "add x17, x17, %x[ld_in_col], LSL #2\n"
- "ld1w { z16.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row], LSL #2\n"
- ".inst 0x648aaa0b // bfcvtnt z11.h, p2/M, z16.s\n"
- "ld1w { z16.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aaa0c // bfcvt z12.h, p2/M, z16.s\n"
- "ld1w { z16.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row], LSL #2\n"
- ".inst 0x648aaa0c // bfcvtnt z12.h, p2/M, z16.s\n"
- "ld1w { z16.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aaa0d // bfcvt z13.h, p2/M, z16.s\n"
- "ld1w { z16.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row], LSL #2\n"
- ".inst 0x648aaa0d // bfcvtnt z13.h, p2/M, z16.s\n"
- "ld1w { z16.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aaa0e // bfcvt z14.h, p2/M, z16.s\n"
- ".inst 0xa1402aa1 // ld1h { z1.h, z9.h }, pn10.b/Z, [x21]\n"
- ".inst 0xc1311150 // bfdot za.s[x8, 0], { z10.h-z13.h }, z1.h\n"
- "ld1w { z16.s }, p1/Z, [x22]\n"
".inst 0x648aaa0e // bfcvtnt z14.h, p2/M, z16.s\n"
- "add x22, x22, %x[ld_in_row], LSL #2\n"
+ "add x21, x21, %x[ld_in_row], LSL #2\n"
".inst 0xc1391170 // bfdot za.s[x8, 0], { z11.h-z14.h }, z9.h\n"
- ".inst 0xa1402a81 // ld1h { z1.h, z9.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xa1402a61 // ld1h { z1.h, z9.h }, pn10.b/Z, [x19]\n"
".inst 0xc1311151 // bfdot za.s[x8, 1], { z10.h-z13.h }, z1.h\n"
- "ld1w { z16.s }, p1/Z, [x22]\n"
+ "ld1w { z16.s }, p1/Z, [x21]\n"
".inst 0x658aaa0f // bfcvt z15.h, p2/M, z16.s\n"
".inst 0xc1391171 // bfdot za.s[x8, 1], { z11.h-z14.h }, z9.h\n"
- "ld1h { z2.h }, p2/Z, [x21, #2, MUL VL]\n"
- ".inst 0xc1321190 // bfdot za.s[x8, 0], { z12.h-z15.h }, z2.h\n"
"ld1h { z2.h }, p2/Z, [x20, #2, MUL VL]\n"
- ".inst 0xc1321191 // bfdot za.s[x8, 1], { z12.h-z15.h }, z2.h\n"
- "9:" // Unpadded: 1 priming loads
- "add x22, x17, %x[ld_in_row], LSL #2\n"
- "ld1w { z16.s }, p1/Z, [x17]\n"
- ".inst 0x658aaa0a // bfcvt z10.h, p2/M, z16.s\n"
- "addvl x21, SP, #3\n"
- "ld1w { z16.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row], LSL #2\n"
- ".inst 0x648aaa0a // bfcvtnt z10.h, p2/M, z16.s\n"
- "addvl x20, SP, #9\n"
- "ld1w { z16.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aaa0b // bfcvt z11.h, p2/M, z16.s\n"
- "add x17, x17, %x[ld_in_col], LSL #2\n"
- "ld1w { z16.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row], LSL #2\n"
- ".inst 0x648aaa0b // bfcvtnt z11.h, p2/M, z16.s\n"
- "ld1w { z16.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aaa0c // bfcvt z12.h, p2/M, z16.s\n"
- "ld1w { z16.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row], LSL #2\n"
- ".inst 0x648aaa0c // bfcvtnt z12.h, p2/M, z16.s\n"
- "ld1w { z16.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aaa0d // bfcvt z13.h, p2/M, z16.s\n"
- "ld1w { z16.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row], LSL #2\n"
- ".inst 0x648aaa0d // bfcvtnt z13.h, p2/M, z16.s\n"
- "ld1w { z16.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aaa0e // bfcvt z14.h, p2/M, z16.s\n"
- ".inst 0xa1402aa1 // ld1h { z1.h, z9.h }, pn10.b/Z, [x21]\n"
- ".inst 0xc1311150 // bfdot za.s[x8, 0], { z10.h-z13.h }, z1.h\n"
- "ld1w { z16.s }, p1/Z, [x22]\n"
- ".inst 0x648aaa0e // bfcvtnt z14.h, p2/M, z16.s\n"
- "add x22, x22, %x[ld_in_row], LSL #2\n"
- ".inst 0xc1391170 // bfdot za.s[x8, 0], { z11.h-z14.h }, z9.h\n"
- ".inst 0xa1402a81 // ld1h { z1.h, z9.h }, pn10.b/Z, [x20]\n"
- ".inst 0xc1311151 // bfdot za.s[x8, 1], { z10.h-z13.h }, z1.h\n"
- "ld1w { z16.s }, p1/Z, [x22]\n"
- ".inst 0x658aaa0f // bfcvt z15.h, p2/M, z16.s\n"
- ".inst 0xc1391171 // bfdot za.s[x8, 1], { z11.h-z14.h }, z9.h\n"
- "ld1h { z2.h }, p2/Z, [x21, #2, MUL VL]\n"
".inst 0xc1321190 // bfdot za.s[x8, 0], { z12.h-z15.h }, z2.h\n"
- "ld1h { z2.h }, p2/Z, [x20, #2, MUL VL]\n"
+ "ld1h { z2.h }, p2/Z, [x19, #2, MUL VL]\n"
".inst 0xc1321191 // bfdot za.s[x8, 1], { z12.h-z15.h }, z2.h\n"
"10:" // Unpadded: 0 priming loads
- "cmp x7, #0x2\n"
+ "cmp x17, #0x2\n"
".inst 0xa1402be1 // ld1h { z1.h, z9.h }, pn10.b/Z, [SP]\n"
"ld1h { z2.h }, p2/Z, [SP, #2, MUL VL]\n"
"blt 20f\n"
- "add x21, x17, %x[ld_in_row], LSL #2\n"
- "ld1w { z16.s }, p1/Z, [x17]\n"
+ "add x20, x16, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p1/Z, [x16]\n"
".inst 0x658aaa0a // bfcvt z10.h, p2/M, z16.s\n"
- "sub x7, x7, #0x2\n"
- "ld1w { z16.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
- "sub x16, x16, #0x1\n"
+ "sub x17, x17, #0x2\n"
+ "ld1w { z16.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "sub x15, x15, #0x1\n"
".inst 0x648aaa0a // bfcvtnt z10.h, p2/M, z16.s\n"
- "ld1w { z16.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
".inst 0x658aaa0b // bfcvt z11.h, p2/M, z16.s\n"
- "lsr x20, x7, #0x1\n"
- "ld1w { z16.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
- "cmp x20, x16\n"
+ "lsr x19, x17, #0x1\n"
+ "ld1w { z16.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "cmp x19, x15\n"
".inst 0x648aaa0b // bfcvtnt z11.h, p2/M, z16.s\n"
- "ld1w { z16.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
".inst 0x658aaa0c // bfcvt z12.h, p2/M, z16.s\n"
- "csel x26, x20, x16, LT\n"
- "ld1w { z16.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
+ "csel x25, x19, x15, LT\n"
+ "ld1w { z16.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
".inst 0x648aaa0c // bfcvtnt z12.h, p2/M, z16.s\n"
- "add x17, x17, %x[ld_in_col], LSL #2\n"
- "ld1w { z16.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
+ "add x16, x16, %x[ld_in_col], LSL #2\n"
+ "ld1w { z16.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
".inst 0x658aaa0d // bfcvt z13.h, p2/M, z16.s\n"
- "and x7, x7, #0x1\n"
- "ld1w { z16.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
+ "and x17, x17, #0x1\n"
+ "ld1w { z16.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
".inst 0x648aaa0d // bfcvtnt z13.h, p2/M, z16.s\n"
- "sub x16, x16, x26\n"
- "ld1w { z16.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
+ "sub x15, x15, x25\n"
+ "ld1w { z16.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
".inst 0x658aaa0e // bfcvt z14.h, p2/M, z16.s\n"
- "ld1w { z16.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
".inst 0x648aaa0e // bfcvtnt z14.h, p2/M, z16.s\n"
- "ld1w { z16.s }, p1/Z, [x21]\n"
+ "ld1w { z16.s }, p1/Z, [x20]\n"
".inst 0x658aaa0f // bfcvt z15.h, p2/M, z16.s\n"
- "cbz x26, 19f\n"
+ "cbz x25, 19f\n"
"11:" // Unpadded: Main loop
".inst 0xc1311150 // bfdot za.s[x8, 0], { z10.h-z13.h }, z1.h\n"
- "addvl x25, SP, #6\n"
- "addvl x24, SP, #12\n"
- "ld1w { z18.s }, p1/Z, [x17]\n"
+ "addvl x24, SP, #6\n"
+ "addvl x23, SP, #12\n"
+ "ld1w { z18.s }, p1/Z, [x16]\n"
".inst 0xc1391170 // bfdot za.s[x8, 0], { z11.h-z14.h }, z9.h\n"
- ".inst 0xa1402b21 // ld1h { z1.h, z9.h }, pn10.b/Z, [x25]\n"
- "add x23, x17, %x[ld_in_row], LSL #2\n"
- "addvl x22, SP, #3\n"
+ ".inst 0xa1402b01 // ld1h { z1.h, z9.h }, pn10.b/Z, [x24]\n"
+ "add x22, x16, %x[ld_in_row], LSL #2\n"
+ "addvl x21, SP, #3\n"
".inst 0xc1311151 // bfdot za.s[x8, 1], { z10.h-z13.h }, z1.h\n"
- "ld1w { z17.s }, p1/Z, [x23]\n"
- "add x23, x23, %x[ld_in_row], LSL #2\n"
- "add x17, x17, %x[ld_in_col], LSL #2\n"
+ "ld1w { z17.s }, p1/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row], LSL #2\n"
+ "add x16, x16, %x[ld_in_col], LSL #2\n"
".inst 0xc1391171 // bfdot za.s[x8, 1], { z11.h-z14.h }, z9.h\n"
- ".inst 0xa1402b01 // ld1h { z1.h, z9.h }, pn10.b/Z, [x24]\n"
- "addvl x21, SP, #9\n"
- "add x20, x17, %x[ld_in_row], LSL #2\n"
+ ".inst 0xa1402ae1 // ld1h { z1.h, z9.h }, pn10.b/Z, [x23]\n"
+ "addvl x20, SP, #9\n"
+ "add x19, x16, %x[ld_in_row], LSL #2\n"
".inst 0xc1311152 // bfdot za.s[x8, 2], { z10.h-z13.h }, z1.h\n"
- "ld1w { z16.s }, p1/Z, [x23]\n"
- "add x23, x23, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p1/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row], LSL #2\n"
".inst 0x658aaa4a // bfcvt z10.h, p2/M, z18.s\n"
".inst 0xc1321190 // bfdot za.s[x8, 0], { z12.h-z15.h }, z2.h\n"
- "ld1h { z2.h }, p2/Z, [x25, #2, MUL VL]\n"
+ "ld1h { z2.h }, p2/Z, [x24, #2, MUL VL]\n"
".inst 0x648aaa2a // bfcvtnt z10.h, p2/M, z17.s\n"
- "subs x26, x26, #0x1\n"
- "ld1w { z17.s }, p1/Z, [x23]\n"
- "add x23, x23, %x[ld_in_row], LSL #2\n"
+ "subs x25, x25, #0x1\n"
+ "ld1w { z17.s }, p1/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row], LSL #2\n"
".inst 0xc1391172 // bfdot za.s[x8, 2], { z11.h-z14.h }, z9.h\n"
".inst 0x658aaa0b // bfcvt z11.h, p2/M, z16.s\n"
- "ld1w { z16.s }, p1/Z, [x23]\n"
- "add x23, x23, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p1/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row], LSL #2\n"
".inst 0xc1321191 // bfdot za.s[x8, 1], { z12.h-z15.h }, z2.h\n"
".inst 0x648aaa2b // bfcvtnt z11.h, p2/M, z17.s\n"
- "ld1w { z17.s }, p1/Z, [x23]\n"
- "add x23, x23, %x[ld_in_row], LSL #2\n"
+ "ld1w { z17.s }, p1/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row], LSL #2\n"
".inst 0xc0060c18 // mova { z24.d-z27.d }, za.d[x8, #0]\n"
".inst 0xc1b6cbd8 // fclamp { z24.s-z27.s }, z30.s, z22.s\n"
- "ld1h { z2.h }, p2/Z, [x24, #2, MUL VL]\n"
+ "ld1h { z2.h }, p2/Z, [x23, #2, MUL VL]\n"
".inst 0xc1321192 // bfdot za.s[x8, 2], { z12.h-z15.h }, z2.h\n"
".inst 0x658aaa0c // bfcvt z12.h, p2/M, z16.s\n"
"add x8, x8, #0x1\n"
- "ld1w { z16.s }, p1/Z, [x23]\n"
- "add x23, x23, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p1/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row], LSL #2\n"
".inst 0x658aaa0d // bfcvt z13.h, p2/M, z16.s\n"
".inst 0x648aaa2c // bfcvtnt z12.h, p2/M, z17.s\n"
- "ld1w { z16.s }, p1/Z, [x23]\n"
- "add x23, x23, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p1/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row], LSL #2\n"
".inst 0x648aaa0d // bfcvtnt z13.h, p2/M, z16.s\n"
- "st1w { z24.s }, p1, [x15]\n"
- "ld1w { z16.s }, p1/Z, [x23]\n"
- "add x23, x23, %x[ld_in_row], LSL #2\n"
+ "st1w { z24.s }, p1, [x14]\n"
+ "ld1w { z16.s }, p1/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row], LSL #2\n"
".inst 0x658aaa0e // bfcvt z14.h, p2/M, z16.s\n"
- "add x15, x15, x13, LSL #2\n"
- ".inst 0xa1402ac1 // ld1h { z1.h, z9.h }, pn10.b/Z, [x22]\n"
- ".inst 0xc1311150 // bfdot za.s[x8, 0], { z10.h-z13.h }, z1.h\n"
- "st1w { z25.s }, p1, [x14]\n"
"add x14, x14, x11, LSL #2\n"
- "ld1w { z16.s }, p1/Z, [x23]\n"
+ ".inst 0xa1402aa1 // ld1h { z1.h, z9.h }, pn10.b/Z, [x21]\n"
+ ".inst 0xc1311150 // bfdot za.s[x8, 0], { z10.h-z13.h }, z1.h\n"
+ "st1w { z25.s }, p1, [x13]\n"
+ "add x13, x13, x10, LSL #2\n"
+ "ld1w { z16.s }, p1/Z, [x22]\n"
".inst 0x648aaa0e // bfcvtnt z14.h, p2/M, z16.s\n"
- "add x23, x23, %x[ld_in_row], LSL #2\n"
+ "add x22, x22, %x[ld_in_row], LSL #2\n"
".inst 0xc1391170 // bfdot za.s[x8, 0], { z11.h-z14.h }, z9.h\n"
- ".inst 0xa1402aa1 // ld1h { z1.h, z9.h }, pn10.b/Z, [x21]\n"
+ ".inst 0xa1402a81 // ld1h { z1.h, z9.h }, pn10.b/Z, [x20]\n"
".inst 0xc1311151 // bfdot za.s[x8, 1], { z10.h-z13.h }, z1.h\n"
- "st1w { z26.s }, p1, [x10]\n"
- "add x10, x10, x28, LSL #2\n"
- "ld1w { z16.s }, p1/Z, [x23]\n"
+ "st1w { z26.s }, p1, [x9]\n"
+ "add x9, x9, x27, LSL #2\n"
+ "ld1w { z16.s }, p1/Z, [x22]\n"
".inst 0x658aaa0f // bfcvt z15.h, p2/M, z16.s\n"
".inst 0xc1391171 // bfdot za.s[x8, 1], { z11.h-z14.h }, z9.h\n"
- "ld1w { z16.s }, p1/Z, [x17]\n"
+ "ld1w { z16.s }, p1/Z, [x16]\n"
".inst 0x658aaa0a // bfcvt z10.h, p2/M, z16.s\n"
- "st1w { z27.s }, p1, [x9]\n"
- "add x9, x9, x27, LSL #2\n"
- "ld1w { z16.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "st1w { z27.s }, p1, [x28]\n"
+ "add x28, x28, x26, LSL #2\n"
+ "ld1w { z16.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0xc0040c84 // mova za.d[x8, #4], { z4.d-z7.d }\n"
".inst 0x648aaa0a // bfcvtnt z10.h, p2/M, z16.s\n"
- "ld1w { z16.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x658aaa0b // bfcvt z11.h, p2/M, z16.s\n"
- "add x17, x17, %x[ld_in_col], LSL #2\n"
- "ld1w { z16.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x16, x16, %x[ld_in_col], LSL #2\n"
+ "ld1w { z16.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x648aaa0b // bfcvtnt z11.h, p2/M, z16.s\n"
- "ld1w { z16.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- "ld1h { z2.h }, p2/Z, [x22, #2, MUL VL]\n"
- ".inst 0xc1321190 // bfdot za.s[x8, 0], { z12.h-z15.h }, z2.h\n"
- "ld1w { z19.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- "ld1w { z17.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- "ld1w { z18.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
"ld1h { z2.h }, p2/Z, [x21, #2, MUL VL]\n"
+ ".inst 0xc1321190 // bfdot za.s[x8, 0], { z12.h-z15.h }, z2.h\n"
+ "ld1w { z19.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ "ld1w { z17.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ "ld1w { z18.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ "ld1h { z2.h }, p2/Z, [x20, #2, MUL VL]\n"
".inst 0xc1321191 // bfdot za.s[x8, 1], { z12.h-z15.h }, z2.h\n"
".inst 0x658aaa0c // bfcvt z12.h, p2/M, z16.s\n"
- "ld1w { z16.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x658aaa2d // bfcvt z13.h, p2/M, z17.s\n"
".inst 0x658aaa0e // bfcvt z14.h, p2/M, z16.s\n"
- "ld1w { z17.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z17.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x648aaa6c // bfcvtnt z12.h, p2/M, z19.s\n"
".inst 0x648aaa4d // bfcvtnt z13.h, p2/M, z18.s\n"
- "ld1w { z16.s }, p1/Z, [x20]\n"
+ "ld1w { z16.s }, p1/Z, [x19]\n"
".inst 0x648aaa2e // bfcvtnt z14.h, p2/M, z17.s\n"
".inst 0x658aaa0f // bfcvt z15.h, p2/M, z16.s\n"
".inst 0xa1402be1 // ld1h { z1.h, z9.h }, pn10.b/Z, [SP]\n"
@@ -592,434 +592,434 @@ void sme2_fp32bf16fp32_planar_5x5_s2_4rows_dot_za_impl(
"bgt 11b\n"
"b 19f\n"
"12:" // Padded
- "cbz x22, 17f\n"
- "cmp x22, #0x1\n"
- "sub x7, x7, x22\n"
+ "cbz x21, 17f\n"
+ "cmp x21, #0x1\n"
+ "sub x17, x17, x21\n"
"beq 16f\n"
- "cmp x22, #0x2\n"
+ "cmp x21, #0x2\n"
"beq 15f\n"
- "cmp x22, #0x3\n"
+ "cmp x21, #0x3\n"
"beq 14f\n"
"13:" // Padded: 4 priming loads
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z16.s }, p0/Z, [x17]\n"
+ "ld1w { z16.s }, p0/Z, [x16]\n"
".inst 0x658aaa0a // bfcvt z10.h, p2/M, z16.s\n"
- "add x21, x17, %x[ld_in_row], LSL #2\n"
+ "add x20, x16, %x[ld_in_row], LSL #2\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z16.s }, p0/Z, [x21]\n"
+ "ld1w { z16.s }, p0/Z, [x20]\n"
".inst 0x648aaa0a // bfcvtnt z10.h, p2/M, z16.s\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z16.s }, p0/Z, [x21]\n"
+ "ld1w { z16.s }, p0/Z, [x20]\n"
".inst 0x658aaa0b // bfcvt z11.h, p2/M, z16.s\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1w { z16.s }, p0/Z, [x21]\n"
+ "ld1w { z16.s }, p0/Z, [x20]\n"
".inst 0x648aaa0b // bfcvtnt z11.h, p2/M, z16.s\n"
"mov x12, #0x4\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z16.s }, p0/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p0/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
".inst 0x658aaa0c // bfcvt z12.h, p2/M, z16.s\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z16.s }, p0/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p0/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
".inst 0x648aaa0c // bfcvtnt z12.h, p2/M, z16.s\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z16.s }, p0/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p0/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
".inst 0x658aaa0d // bfcvt z13.h, p2/M, z16.s\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
"mov x12, #0x8\n"
- "ld1w { z16.s }, p0/Z, [x21]\n"
+ "ld1w { z16.s }, p0/Z, [x20]\n"
".inst 0x648aaa0d // bfcvtnt z13.h, p2/M, z16.s\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z16.s }, p0/Z, [x21]\n"
+ "ld1w { z16.s }, p0/Z, [x20]\n"
".inst 0x658aaa0e // bfcvt z14.h, p2/M, z16.s\n"
- "addvl x20, SP, #12\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
- ".inst 0xa1402a81 // ld1h { z1.h, z9.h }, pn10.b/Z, [x20]\n"
+ "addvl x19, SP, #12\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0xa1402a61 // ld1h { z1.h, z9.h }, pn10.b/Z, [x19]\n"
".inst 0xc1311150 // bfdot za.s[x8, 0], { z10.h-z13.h }, z1.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z16.s }, p0/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p0/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
".inst 0x648aaa0e // bfcvtnt z14.h, p2/M, z16.s\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z16.s }, p0/Z, [x21]\n"
+ "ld1w { z16.s }, p0/Z, [x20]\n"
".inst 0x658aaa0f // bfcvt z15.h, p2/M, z16.s\n"
".inst 0xc1391170 // bfdot za.s[x8, 0], { z11.h-z14.h }, z9.h\n"
- "ld1h { z2.h }, p2/Z, [x20, #2, MUL VL]\n"
- "add x17, x17, %x[ld_in_col], LSL #2\n"
+ "ld1h { z2.h }, p2/Z, [x19, #2, MUL VL]\n"
+ "add x16, x16, %x[ld_in_col], LSL #2\n"
".inst 0xc1321190 // bfdot za.s[x8, 0], { z12.h-z15.h }, z2.h\n"
"14:" // Padded: 3 priming loads
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z16.s }, p0/Z, [x17]\n"
+ "ld1w { z16.s }, p0/Z, [x16]\n"
".inst 0x658aaa0a // bfcvt z10.h, p2/M, z16.s\n"
- "add x21, x17, %x[ld_in_row], LSL #2\n"
+ "add x20, x16, %x[ld_in_row], LSL #2\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z16.s }, p0/Z, [x21]\n"
+ "ld1w { z16.s }, p0/Z, [x20]\n"
".inst 0x648aaa0a // bfcvtnt z10.h, p2/M, z16.s\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z16.s }, p0/Z, [x21]\n"
+ "ld1w { z16.s }, p0/Z, [x20]\n"
".inst 0x658aaa0b // bfcvt z11.h, p2/M, z16.s\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1w { z16.s }, p0/Z, [x21]\n"
+ "ld1w { z16.s }, p0/Z, [x20]\n"
".inst 0x648aaa0b // bfcvtnt z11.h, p2/M, z16.s\n"
"mov x12, #0x4\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z16.s }, p0/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p0/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
".inst 0x658aaa0c // bfcvt z12.h, p2/M, z16.s\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z16.s }, p0/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p0/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
".inst 0x648aaa0c // bfcvtnt z12.h, p2/M, z16.s\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z16.s }, p0/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p0/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
".inst 0x658aaa0d // bfcvt z13.h, p2/M, z16.s\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
"mov x12, #0x8\n"
- "ld1w { z16.s }, p0/Z, [x21]\n"
+ "ld1w { z16.s }, p0/Z, [x20]\n"
".inst 0x648aaa0d // bfcvtnt z13.h, p2/M, z16.s\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z16.s }, p0/Z, [x21]\n"
+ "ld1w { z16.s }, p0/Z, [x20]\n"
".inst 0x658aaa0e // bfcvt z14.h, p2/M, z16.s\n"
- "addvl x20, SP, #9\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
- ".inst 0xa1402a81 // ld1h { z1.h, z9.h }, pn10.b/Z, [x20]\n"
+ "addvl x19, SP, #9\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0xa1402a61 // ld1h { z1.h, z9.h }, pn10.b/Z, [x19]\n"
".inst 0xc1311150 // bfdot za.s[x8, 0], { z10.h-z13.h }, z1.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z16.s }, p0/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p0/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
".inst 0x648aaa0e // bfcvtnt z14.h, p2/M, z16.s\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z16.s }, p0/Z, [x21]\n"
+ "ld1w { z16.s }, p0/Z, [x20]\n"
".inst 0x658aaa0f // bfcvt z15.h, p2/M, z16.s\n"
".inst 0xc1391170 // bfdot za.s[x8, 0], { z11.h-z14.h }, z9.h\n"
- "ld1h { z2.h }, p2/Z, [x20, #2, MUL VL]\n"
- "add x17, x17, %x[ld_in_col], LSL #2\n"
+ "ld1h { z2.h }, p2/Z, [x19, #2, MUL VL]\n"
+ "add x16, x16, %x[ld_in_col], LSL #2\n"
".inst 0xc1321190 // bfdot za.s[x8, 0], { z12.h-z15.h }, z2.h\n"
"15:" // Padded: 2 priming loads
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z16.s }, p0/Z, [x17]\n"
+ "ld1w { z16.s }, p0/Z, [x16]\n"
".inst 0x658aaa0a // bfcvt z10.h, p2/M, z16.s\n"
- "add x22, x17, %x[ld_in_row], LSL #2\n"
+ "add x21, x16, %x[ld_in_row], LSL #2\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z16.s }, p0/Z, [x22]\n"
+ "ld1w { z16.s }, p0/Z, [x21]\n"
".inst 0x648aaa0a // bfcvtnt z10.h, p2/M, z16.s\n"
- "add x22, x22, %x[ld_in_row], LSL #2\n"
+ "add x21, x21, %x[ld_in_row], LSL #2\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z16.s }, p0/Z, [x22]\n"
+ "ld1w { z16.s }, p0/Z, [x21]\n"
".inst 0x658aaa0b // bfcvt z11.h, p2/M, z16.s\n"
- "add x22, x22, %x[ld_in_row], LSL #2\n"
+ "add x21, x21, %x[ld_in_row], LSL #2\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1w { z16.s }, p0/Z, [x22]\n"
+ "ld1w { z16.s }, p0/Z, [x21]\n"
".inst 0x648aaa0b // bfcvtnt z11.h, p2/M, z16.s\n"
"mov x12, #0x4\n"
- "add x22, x22, %x[ld_in_row], LSL #2\n"
+ "add x21, x21, %x[ld_in_row], LSL #2\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z16.s }, p0/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row], LSL #2\n"
".inst 0x658aaa0c // bfcvt z12.h, p2/M, z16.s\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z16.s }, p0/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row], LSL #2\n"
".inst 0x648aaa0c // bfcvtnt z12.h, p2/M, z16.s\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z16.s }, p0/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row], LSL #2\n"
".inst 0x658aaa0d // bfcvt z13.h, p2/M, z16.s\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
"mov x12, #0x8\n"
- "ld1w { z16.s }, p0/Z, [x22]\n"
+ "ld1w { z16.s }, p0/Z, [x21]\n"
".inst 0x648aaa0d // bfcvtnt z13.h, p2/M, z16.s\n"
- "add x22, x22, %x[ld_in_row], LSL #2\n"
+ "add x21, x21, %x[ld_in_row], LSL #2\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z16.s }, p0/Z, [x22]\n"
+ "ld1w { z16.s }, p0/Z, [x21]\n"
".inst 0x658aaa0e // bfcvt z14.h, p2/M, z16.s\n"
- "addvl x21, SP, #6\n"
- "add x22, x22, %x[ld_in_row], LSL #2\n"
- ".inst 0xa1402aa1 // ld1h { z1.h, z9.h }, pn10.b/Z, [x21]\n"
+ "addvl x20, SP, #6\n"
+ "add x21, x21, %x[ld_in_row], LSL #2\n"
+ ".inst 0xa1402a81 // ld1h { z1.h, z9.h }, pn10.b/Z, [x20]\n"
".inst 0xc1311150 // bfdot za.s[x8, 0], { z10.h-z13.h }, z1.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z16.s }, p0/Z, [x22]\n"
+ "ld1w { z16.s }, p0/Z, [x21]\n"
".inst 0x648aaa0e // bfcvtnt z14.h, p2/M, z16.s\n"
- "addvl x20, SP, #12\n"
- "add x22, x22, %x[ld_in_row], LSL #2\n"
+ "addvl x19, SP, #12\n"
+ "add x21, x21, %x[ld_in_row], LSL #2\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
".inst 0xc1391170 // bfdot za.s[x8, 0], { z11.h-z14.h }, z9.h\n"
- ".inst 0xa1402a81 // ld1h { z1.h, z9.h }, pn10.b/Z, [x20]\n"
- "ld1w { z16.s }, p0/Z, [x22]\n"
+ ".inst 0xa1402a61 // ld1h { z1.h, z9.h }, pn10.b/Z, [x19]\n"
+ "ld1w { z16.s }, p0/Z, [x21]\n"
".inst 0xc1311151 // bfdot za.s[x8, 1], { z10.h-z13.h }, z1.h\n"
".inst 0x658aaa0f // bfcvt z15.h, p2/M, z16.s\n"
- "add x17, x17, %x[ld_in_col], LSL #2\n"
- "ld1h { z2.h }, p2/Z, [x21, #2, MUL VL]\n"
+ "add x16, x16, %x[ld_in_col], LSL #2\n"
+ "ld1h { z2.h }, p2/Z, [x20, #2, MUL VL]\n"
".inst 0xc1391171 // bfdot za.s[x8, 1], { z11.h-z14.h }, z9.h\n"
".inst 0xc1321190 // bfdot za.s[x8, 0], { z12.h-z15.h }, z2.h\n"
- "ld1h { z2.h }, p2/Z, [x20, #2, MUL VL]\n"
+ "ld1h { z2.h }, p2/Z, [x19, #2, MUL VL]\n"
".inst 0xc1321191 // bfdot za.s[x8, 1], { z12.h-z15.h }, z2.h\n"
"16:" // Padded: 1 priming loads
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z16.s }, p0/Z, [x17]\n"
+ "ld1w { z16.s }, p0/Z, [x16]\n"
".inst 0x658aaa0a // bfcvt z10.h, p2/M, z16.s\n"
- "add x22, x17, %x[ld_in_row], LSL #2\n"
+ "add x21, x16, %x[ld_in_row], LSL #2\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z16.s }, p0/Z, [x22]\n"
+ "ld1w { z16.s }, p0/Z, [x21]\n"
".inst 0x648aaa0a // bfcvtnt z10.h, p2/M, z16.s\n"
- "add x22, x22, %x[ld_in_row], LSL #2\n"
+ "add x21, x21, %x[ld_in_row], LSL #2\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z16.s }, p0/Z, [x22]\n"
+ "ld1w { z16.s }, p0/Z, [x21]\n"
".inst 0x658aaa0b // bfcvt z11.h, p2/M, z16.s\n"
- "add x22, x22, %x[ld_in_row], LSL #2\n"
+ "add x21, x21, %x[ld_in_row], LSL #2\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1w { z16.s }, p0/Z, [x22]\n"
+ "ld1w { z16.s }, p0/Z, [x21]\n"
".inst 0x648aaa0b // bfcvtnt z11.h, p2/M, z16.s\n"
"mov x12, #0x4\n"
- "add x22, x22, %x[ld_in_row], LSL #2\n"
+ "add x21, x21, %x[ld_in_row], LSL #2\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z16.s }, p0/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row], LSL #2\n"
".inst 0x658aaa0c // bfcvt z12.h, p2/M, z16.s\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z16.s }, p0/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row], LSL #2\n"
".inst 0x648aaa0c // bfcvtnt z12.h, p2/M, z16.s\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z16.s }, p0/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row], LSL #2\n"
".inst 0x658aaa0d // bfcvt z13.h, p2/M, z16.s\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
"mov x12, #0x8\n"
- "ld1w { z16.s }, p0/Z, [x22]\n"
+ "ld1w { z16.s }, p0/Z, [x21]\n"
".inst 0x648aaa0d // bfcvtnt z13.h, p2/M, z16.s\n"
- "add x22, x22, %x[ld_in_row], LSL #2\n"
+ "add x21, x21, %x[ld_in_row], LSL #2\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z16.s }, p0/Z, [x22]\n"
+ "ld1w { z16.s }, p0/Z, [x21]\n"
".inst 0x658aaa0e // bfcvt z14.h, p2/M, z16.s\n"
- "addvl x21, SP, #3\n"
- "add x22, x22, %x[ld_in_row], LSL #2\n"
- ".inst 0xa1402aa1 // ld1h { z1.h, z9.h }, pn10.b/Z, [x21]\n"
+ "addvl x20, SP, #3\n"
+ "add x21, x21, %x[ld_in_row], LSL #2\n"
+ ".inst 0xa1402a81 // ld1h { z1.h, z9.h }, pn10.b/Z, [x20]\n"
".inst 0xc1311150 // bfdot za.s[x8, 0], { z10.h-z13.h }, z1.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z16.s }, p0/Z, [x22]\n"
+ "ld1w { z16.s }, p0/Z, [x21]\n"
".inst 0x648aaa0e // bfcvtnt z14.h, p2/M, z16.s\n"
- "addvl x20, SP, #9\n"
- "add x22, x22, %x[ld_in_row], LSL #2\n"
+ "addvl x19, SP, #9\n"
+ "add x21, x21, %x[ld_in_row], LSL #2\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
".inst 0xc1391170 // bfdot za.s[x8, 0], { z11.h-z14.h }, z9.h\n"
- ".inst 0xa1402a81 // ld1h { z1.h, z9.h }, pn10.b/Z, [x20]\n"
- "ld1w { z16.s }, p0/Z, [x22]\n"
+ ".inst 0xa1402a61 // ld1h { z1.h, z9.h }, pn10.b/Z, [x19]\n"
+ "ld1w { z16.s }, p0/Z, [x21]\n"
".inst 0xc1311151 // bfdot za.s[x8, 1], { z10.h-z13.h }, z1.h\n"
".inst 0x658aaa0f // bfcvt z15.h, p2/M, z16.s\n"
- "add x17, x17, %x[ld_in_col], LSL #2\n"
- "ld1h { z2.h }, p2/Z, [x21, #2, MUL VL]\n"
+ "add x16, x16, %x[ld_in_col], LSL #2\n"
+ "ld1h { z2.h }, p2/Z, [x20, #2, MUL VL]\n"
".inst 0xc1391171 // bfdot za.s[x8, 1], { z11.h-z14.h }, z9.h\n"
".inst 0xc1321190 // bfdot za.s[x8, 0], { z12.h-z15.h }, z2.h\n"
- "ld1h { z2.h }, p2/Z, [x20, #2, MUL VL]\n"
+ "ld1h { z2.h }, p2/Z, [x19, #2, MUL VL]\n"
".inst 0xc1321191 // bfdot za.s[x8, 1], { z12.h-z15.h }, z2.h\n"
"17:" // Padded: 0 priming loads
- "cmp x7, #0x2\n"
+ "cmp x17, #0x2\n"
".inst 0xa1402be1 // ld1h { z1.h, z9.h }, pn10.b/Z, [SP]\n"
"ld1h { z2.h }, p2/Z, [SP, #2, MUL VL]\n"
"blt 20f\n"
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z16.s }, p0/Z, [x17]\n"
+ "ld1w { z16.s }, p0/Z, [x16]\n"
".inst 0x658aaa0a // bfcvt z10.h, p2/M, z16.s\n"
- "add x20, x17, %x[ld_in_row], LSL #2\n"
+ "add x19, x16, %x[ld_in_row], LSL #2\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
".inst 0x648aaa0a // bfcvtnt z10.h, p2/M, z16.s\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
".inst 0x658aaa0b // bfcvt z11.h, p2/M, z16.s\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
".inst 0x648aaa0b // bfcvtnt z11.h, p2/M, z16.s\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x658aaa0c // bfcvt z12.h, p2/M, z16.s\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x648aaa0c // bfcvtnt z12.h, p2/M, z16.s\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x658aaa0d // bfcvt z13.h, p2/M, z16.s\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
"mov x12, #0x8\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
".inst 0x648aaa0d // bfcvtnt z13.h, p2/M, z16.s\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
".inst 0x658aaa0e // bfcvt z14.h, p2/M, z16.s\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
".inst 0x648aaa0e // bfcvtnt z14.h, p2/M, z16.s\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
".inst 0x658aaa0f // bfcvt z15.h, p2/M, z16.s\n"
- "sub x7, x7, #0x2\n"
- "sub x16, x16, #0x1\n"
- "lsr x20, x7, #0x1\n"
- "cmp x20, x16\n"
- "csel x24, x20, x16, LT\n"
- "add x17, x17, %x[ld_in_col], LSL #2\n"
- "and x7, x7, #0x1\n"
- "sub x16, x16, x24\n"
- "cbz x24, 19f\n"
+ "sub x17, x17, #0x2\n"
+ "sub x15, x15, #0x1\n"
+ "lsr x19, x17, #0x1\n"
+ "cmp x19, x15\n"
+ "csel x23, x19, x15, LT\n"
+ "add x16, x16, %x[ld_in_col], LSL #2\n"
+ "and x17, x17, #0x1\n"
+ "sub x15, x15, x23\n"
+ "cbz x23, 19f\n"
"18:" // Padded: Main loop
".inst 0xc1311150 // bfdot za.s[x8, 0], { z10.h-z13.h }, z1.h\n"
- "addvl x23, SP, #6\n"
- "addvl x21, SP, #12\n"
+ "addvl x22, SP, #6\n"
+ "addvl x20, SP, #12\n"
".inst 0xc1391170 // bfdot za.s[x8, 0], { z11.h-z14.h }, z9.h\n"
- ".inst 0xa1402ae1 // ld1h { z1.h, z9.h }, pn10.b/Z, [x23]\n"
+ ".inst 0xa1402ac1 // ld1h { z1.h, z9.h }, pn10.b/Z, [x22]\n"
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
".inst 0xc1311151 // bfdot za.s[x8, 1], { z10.h-z13.h }, z1.h\n"
- "ld1w { z16.s }, p0/Z, [x17]\n"
- "add x20, x17, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p0/Z, [x16]\n"
+ "add x19, x16, %x[ld_in_row], LSL #2\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
".inst 0xc1391171 // bfdot za.s[x8, 1], { z11.h-z14.h }, z9.h\n"
- ".inst 0xa1402aa1 // ld1h { z1.h, z9.h }, pn10.b/Z, [x21]\n"
- "addvl x22, SP, #3\n"
- "add x17, x17, %x[ld_in_col], LSL #2\n"
+ ".inst 0xa1402a81 // ld1h { z1.h, z9.h }, pn10.b/Z, [x20]\n"
+ "addvl x21, SP, #3\n"
+ "add x16, x16, %x[ld_in_col], LSL #2\n"
".inst 0xc1311152 // bfdot za.s[x8, 2], { z10.h-z13.h }, z1.h\n"
- "ld1w { z20.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z20.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z17.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z17.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
".inst 0xc1321190 // bfdot za.s[x8, 0], { z12.h-z15.h }, z2.h\n"
"mov x12, #0x4\n"
- "ld1h { z2.h }, p2/Z, [x23, #2, MUL VL]\n"
+ "ld1h { z2.h }, p2/Z, [x22, #2, MUL VL]\n"
".inst 0xc1391172 // bfdot za.s[x8, 2], { z11.h-z14.h }, z9.h\n"
".inst 0x658aaa0a // bfcvt z10.h, p2/M, z16.s\n"
- "ld1w { z19.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z19.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
".inst 0xc1321191 // bfdot za.s[x8, 1], { z12.h-z15.h }, z2.h\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
".inst 0x658aaa2b // bfcvt z11.h, p2/M, z17.s\n"
- "ld1w { z18.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z18.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
".inst 0x648aaa8a // bfcvtnt z10.h, p2/M, z20.s\n"
- "ld1h { z2.h }, p2/Z, [x21, #2, MUL VL]\n"
+ "ld1h { z2.h }, p2/Z, [x20, #2, MUL VL]\n"
".inst 0xc1321192 // bfdot za.s[x8, 2], { z12.h-z15.h }, z2.h\n"
".inst 0x658aaa0c // bfcvt z12.h, p2/M, z16.s\n"
- "addvl x21, SP, #9\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "addvl x20, SP, #9\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
".inst 0x658aaa0d // bfcvt z13.h, p2/M, z16.s\n"
"mov x12, #0x8\n"
- "ld1w { z17.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z17.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x648aaa6b // bfcvtnt z11.h, p2/M, z19.s\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x648aaa4c // bfcvtnt z12.h, p2/M, z18.s\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
".inst 0x648aaa2d // bfcvtnt z13.h, p2/M, z17.s\n"
".inst 0x658aaa0e // bfcvt z14.h, p2/M, z16.s\n"
- ".inst 0xa1402ac1 // ld1h { z1.h, z9.h }, pn10.b/Z, [x22]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
+ ".inst 0xa1402aa1 // ld1h { z1.h, z9.h }, pn10.b/Z, [x21]\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
".inst 0xc0060c18 // mova { z24.d-z27.d }, za.d[x8, #0]\n"
"add x8, x8, #0x1\n"
".inst 0x648aaa0e // bfcvtnt z14.h, p2/M, z16.s\n"
".inst 0xc1311150 // bfdot za.s[x8, 0], { z10.h-z13.h }, z1.h\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z17.s }, p0/Z, [x20]\n"
+ "ld1w { z17.s }, p0/Z, [x19]\n"
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
".inst 0xc1391170 // bfdot za.s[x8, 0], { z11.h-z14.h }, z9.h\n"
- ".inst 0xa1402aa1 // ld1h { z1.h, z9.h }, pn10.b/Z, [x21]\n"
- "ld1w { z16.s }, p0/Z, [x17]\n"
- "add x20, x17, %x[ld_in_row], LSL #2\n"
+ ".inst 0xa1402a81 // ld1h { z1.h, z9.h }, pn10.b/Z, [x20]\n"
+ "ld1w { z16.s }, p0/Z, [x16]\n"
+ "add x19, x16, %x[ld_in_row], LSL #2\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
".inst 0xc1311151 // bfdot za.s[x8, 1], { z10.h-z13.h }, z1.h\n"
- "ld1w { z21.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z21.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
".inst 0x658aaa2f // bfcvt z15.h, p2/M, z17.s\n"
- "ld1w { z17.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z17.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
".inst 0xc1391171 // bfdot za.s[x8, 1], { z11.h-z14.h }, z9.h\n"
"mov x12, #0x4\n"
- "ld1w { z20.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z20.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x658aaa0a // bfcvt z10.h, p2/M, z16.s\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x658aaa2b // bfcvt z11.h, p2/M, z17.s\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1h { z2.h }, p2/Z, [x22, #2, MUL VL]\n"
+ "ld1h { z2.h }, p2/Z, [x21, #2, MUL VL]\n"
".inst 0xc1321190 // bfdot za.s[x8, 0], { z12.h-z15.h }, z2.h\n"
- "subs x24, x24, #0x1\n"
- "ld1w { z19.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "subs x23, x23, #0x1\n"
+ "ld1w { z19.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
".inst 0xc1b6cbd8 // fclamp { z24.s-z27.s }, z30.s, z22.s\n"
- "ld1w { z17.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z17.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "st1w { z24.s }, p1, [x15]\n"
+ "st1w { z24.s }, p1, [x14]\n"
"mov x12, #0x8\n"
- "ld1w { z18.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- "st1w { z25.s }, p1, [x14]\n"
+ "ld1w { z18.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
+ "st1w { z25.s }, p1, [x13]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1h { z2.h }, p2/Z, [x21, #2, MUL VL]\n"
+ "ld1h { z2.h }, p2/Z, [x20, #2, MUL VL]\n"
".inst 0xc1321191 // bfdot za.s[x8, 1], { z12.h-z15.h }, z2.h\n"
".inst 0x658aaa0c // bfcvt z12.h, p2/M, z16.s\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
".inst 0x658aaa2d // bfcvt z13.h, p2/M, z17.s\n"
- "ld1w { z17.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z17.s }, p0/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row], LSL #2\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
".inst 0x658aaa0e // bfcvt z14.h, p2/M, z16.s\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
- "add x15, x15, x13, LSL #2\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
"add x14, x14, x11, LSL #2\n"
- "st1w { z26.s }, p1, [x10]\n"
- "add x10, x10, x28, LSL #2\n"
- "st1w { z27.s }, p1, [x9]\n"
+ "add x13, x13, x10, LSL #2\n"
+ "st1w { z26.s }, p1, [x9]\n"
"add x9, x9, x27, LSL #2\n"
+ "st1w { z27.s }, p1, [x28]\n"
+ "add x28, x28, x26, LSL #2\n"
".inst 0xc0040c84 // mova za.d[x8, #4], { z4.d-z7.d }\n"
".inst 0xa1402be1 // ld1h { z1.h, z9.h }, pn10.b/Z, [SP]\n"
".inst 0x648aaaaa // bfcvtnt z10.h, p2/M, z21.s\n"
".inst 0x648aaa8b // bfcvtnt z11.h, p2/M, z20.s\n"
- "add x17, x17, %x[ld_in_col], LSL #2\n"
+ "add x16, x16, %x[ld_in_col], LSL #2\n"
"ld1h { z2.h }, p2/Z, [SP, #2, MUL VL]\n"
".inst 0x648aaa6c // bfcvtnt z12.h, p2/M, z19.s\n"
".inst 0x648aaa4d // bfcvtnt z13.h, p2/M, z18.s\n"
@@ -1028,215 +1028,215 @@ void sme2_fp32bf16fp32_planar_5x5_s2_4rows_dot_za_impl(
"bgt 18b\n"
"19:" // Main loop tail
".inst 0xc1311150 // bfdot za.s[x8, 0], { z10.h-z13.h }, z1.h\n"
- "addvl x24, SP, #6\n"
- "addvl x23, SP, #12\n"
+ "addvl x23, SP, #6\n"
+ "addvl x22, SP, #12\n"
".inst 0xc1391170 // bfdot za.s[x8, 0], { z11.h-z14.h }, z9.h\n"
- ".inst 0xa1402b01 // ld1h { z1.h, z9.h }, pn10.b/Z, [x24]\n"
+ ".inst 0xa1402ae1 // ld1h { z1.h, z9.h }, pn10.b/Z, [x23]\n"
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
".inst 0xc1311151 // bfdot za.s[x8, 1], { z10.h-z13.h }, z1.h\n"
- "ld1w { z16.s }, p0/Z, [x17]\n"
- "add x22, x17, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p0/Z, [x16]\n"
+ "add x21, x16, %x[ld_in_row], LSL #2\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
".inst 0xc1391171 // bfdot za.s[x8, 1], { z11.h-z14.h }, z9.h\n"
- ".inst 0xa1402ae1 // ld1h { z1.h, z9.h }, pn10.b/Z, [x23]\n"
- "addvl x21, SP, #3\n"
- "addvl x20, SP, #9\n"
+ ".inst 0xa1402ac1 // ld1h { z1.h, z9.h }, pn10.b/Z, [x22]\n"
+ "addvl x20, SP, #3\n"
+ "addvl x19, SP, #9\n"
".inst 0xc1311152 // bfdot za.s[x8, 2], { z10.h-z13.h }, z1.h\n"
- "ld1w { z20.s }, p0/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row], LSL #2\n"
+ "ld1w { z20.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row], LSL #2\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z17.s }, p0/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row], LSL #2\n"
+ "ld1w { z17.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row], LSL #2\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
".inst 0xc1321190 // bfdot za.s[x8, 0], { z12.h-z15.h }, z2.h\n"
"mov x12, #0x4\n"
- "ld1h { z2.h }, p2/Z, [x24, #2, MUL VL]\n"
+ "ld1h { z2.h }, p2/Z, [x23, #2, MUL VL]\n"
".inst 0xc1391172 // bfdot za.s[x8, 2], { z11.h-z14.h }, z9.h\n"
".inst 0x658aaa0a // bfcvt z10.h, p2/M, z16.s\n"
- "ld1w { z19.s }, p0/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row], LSL #2\n"
+ "ld1w { z19.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row], LSL #2\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
".inst 0xc1321191 // bfdot za.s[x8, 1], { z12.h-z15.h }, z2.h\n"
- "ld1w { z16.s }, p0/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row], LSL #2\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
".inst 0x658aaa2b // bfcvt z11.h, p2/M, z17.s\n"
- "ld1w { z18.s }, p0/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row], LSL #2\n"
+ "ld1w { z18.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row], LSL #2\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
".inst 0x648aaa8a // bfcvtnt z10.h, p2/M, z20.s\n"
- "ld1h { z2.h }, p2/Z, [x23, #2, MUL VL]\n"
+ "ld1h { z2.h }, p2/Z, [x22, #2, MUL VL]\n"
".inst 0xc1321192 // bfdot za.s[x8, 2], { z12.h-z15.h }, z2.h\n"
".inst 0x658aaa0c // bfcvt z12.h, p2/M, z16.s\n"
- "add x17, x17, %x[ld_in_col], LSL #2\n"
- "ld1w { z16.s }, p0/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row], LSL #2\n"
+ "add x16, x16, %x[ld_in_col], LSL #2\n"
+ "ld1w { z16.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row], LSL #2\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
".inst 0x658aaa0d // bfcvt z13.h, p2/M, z16.s\n"
"mov x12, #0x8\n"
- "ld1w { z17.s }, p0/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row], LSL #2\n"
+ "ld1w { z17.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row], LSL #2\n"
".inst 0x648aaa6b // bfcvtnt z11.h, p2/M, z19.s\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z16.s }, p0/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row], LSL #2\n"
".inst 0x648aaa4c // bfcvtnt z12.h, p2/M, z18.s\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
".inst 0x648aaa2d // bfcvtnt z13.h, p2/M, z17.s\n"
".inst 0x658aaa0e // bfcvt z14.h, p2/M, z16.s\n"
- ".inst 0xa1402aa1 // ld1h { z1.h, z9.h }, pn10.b/Z, [x21]\n"
- "ld1w { z16.s }, p0/Z, [x22]\n"
+ ".inst 0xa1402a81 // ld1h { z1.h, z9.h }, pn10.b/Z, [x20]\n"
+ "ld1w { z16.s }, p0/Z, [x21]\n"
".inst 0xc0060c18 // mova { z24.d-z27.d }, za.d[x8, #0]\n"
"add x8, x8, #0x1\n"
".inst 0x648aaa0e // bfcvtnt z14.h, p2/M, z16.s\n"
".inst 0xc1311150 // bfdot za.s[x8, 0], { z10.h-z13.h }, z1.h\n"
- "add x22, x22, %x[ld_in_row], LSL #2\n"
+ "add x21, x21, %x[ld_in_row], LSL #2\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z16.s }, p0/Z, [x22]\n"
+ "ld1w { z16.s }, p0/Z, [x21]\n"
".inst 0xc1391170 // bfdot za.s[x8, 0], { z11.h-z14.h }, z9.h\n"
- ".inst 0xa1402a81 // ld1h { z1.h, z9.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xa1402a61 // ld1h { z1.h, z9.h }, pn10.b/Z, [x19]\n"
".inst 0x658aaa0f // bfcvt z15.h, p2/M, z16.s\n"
".inst 0xc1311151 // bfdot za.s[x8, 1], { z10.h-z13.h }, z1.h\n"
- "ld1h { z2.h }, p2/Z, [x21, #2, MUL VL]\n"
+ "ld1h { z2.h }, p2/Z, [x20, #2, MUL VL]\n"
".inst 0xc1b6cbd8 // fclamp { z24.s-z27.s }, z30.s, z22.s\n"
".inst 0xc1391171 // bfdot za.s[x8, 1], { z11.h-z14.h }, z9.h\n"
- "st1w { z24.s }, p1, [x15]\n"
- "add x15, x15, x13, LSL #2\n"
+ "st1w { z24.s }, p1, [x14]\n"
+ "add x14, x14, x11, LSL #2\n"
".inst 0xa1402be1 // ld1h { z1.h, z9.h }, pn10.b/Z, [SP]\n"
".inst 0xc1321190 // bfdot za.s[x8, 0], { z12.h-z15.h }, z2.h\n"
- "ld1h { z2.h }, p2/Z, [x20, #2, MUL VL]\n"
- "st1w { z25.s }, p1, [x14]\n"
- "add x14, x14, x11, LSL #2\n"
- "st1w { z26.s }, p1, [x10]\n"
- "add x10, x10, x28, LSL #2\n"
- ".inst 0xc0040c84 // mova za.d[x8, #4], { z4.d-z7.d }\n"
- "st1w { z27.s }, p1, [x9]\n"
+ "ld1h { z2.h }, p2/Z, [x19, #2, MUL VL]\n"
+ "st1w { z25.s }, p1, [x13]\n"
+ "add x13, x13, x10, LSL #2\n"
+ "st1w { z26.s }, p1, [x9]\n"
"add x9, x9, x27, LSL #2\n"
+ ".inst 0xc0040c84 // mova za.d[x8, #4], { z4.d-z7.d }\n"
+ "st1w { z27.s }, p1, [x28]\n"
+ "add x28, x28, x26, LSL #2\n"
".inst 0xc1321191 // bfdot za.s[x8, 1], { z12.h-z15.h }, z2.h\n"
"ld1h { z2.h }, p2/Z, [SP, #2, MUL VL]\n"
"20:" // Main loop skip tail
- "cbz x7, 21f\n" // Skip remainder inputs
+ "cbz x17, 21f\n" // Skip remainder inputs
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z16.s }, p0/Z, [x17]\n"
+ "ld1w { z16.s }, p0/Z, [x16]\n"
".inst 0x658aaa0a // bfcvt z10.h, p2/M, z16.s\n"
- "add x22, x17, %x[ld_in_row], LSL #2\n"
+ "add x21, x16, %x[ld_in_row], LSL #2\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z16.s }, p0/Z, [x22]\n"
+ "ld1w { z16.s }, p0/Z, [x21]\n"
".inst 0x648aaa0a // bfcvtnt z10.h, p2/M, z16.s\n"
- "add x22, x22, %x[ld_in_row], LSL #2\n"
+ "add x21, x21, %x[ld_in_row], LSL #2\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z16.s }, p0/Z, [x22]\n"
+ "ld1w { z16.s }, p0/Z, [x21]\n"
".inst 0x658aaa0b // bfcvt z11.h, p2/M, z16.s\n"
- "add x22, x22, %x[ld_in_row], LSL #2\n"
+ "add x21, x21, %x[ld_in_row], LSL #2\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1w { z16.s }, p0/Z, [x22]\n"
+ "ld1w { z16.s }, p0/Z, [x21]\n"
".inst 0x648aaa0b // bfcvtnt z11.h, p2/M, z16.s\n"
"mov x12, #0x4\n"
- "add x22, x22, %x[ld_in_row], LSL #2\n"
+ "add x21, x21, %x[ld_in_row], LSL #2\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z16.s }, p0/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row], LSL #2\n"
".inst 0x658aaa0c // bfcvt z12.h, p2/M, z16.s\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z16.s }, p0/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row], LSL #2\n"
".inst 0x648aaa0c // bfcvtnt z12.h, p2/M, z16.s\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z16.s }, p0/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row], LSL #2\n"
".inst 0x658aaa0d // bfcvt z13.h, p2/M, z16.s\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
"mov x12, #0x8\n"
- "ld1w { z16.s }, p0/Z, [x22]\n"
+ "ld1w { z16.s }, p0/Z, [x21]\n"
".inst 0x648aaa0d // bfcvtnt z13.h, p2/M, z16.s\n"
- "add x22, x22, %x[ld_in_row], LSL #2\n"
+ "add x21, x21, %x[ld_in_row], LSL #2\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z16.s }, p0/Z, [x22]\n"
+ "ld1w { z16.s }, p0/Z, [x21]\n"
".inst 0x658aaa0e // bfcvt z14.h, p2/M, z16.s\n"
- "add x22, x22, %x[ld_in_row], LSL #2\n"
+ "add x21, x21, %x[ld_in_row], LSL #2\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z16.s }, p0/Z, [x22]\n"
+ "ld1w { z16.s }, p0/Z, [x21]\n"
".inst 0x648aaa0e // bfcvtnt z14.h, p2/M, z16.s\n"
".inst 0xc1311150 // bfdot za.s[x8, 0], { z10.h-z13.h }, z1.h\n"
- "addvl x21, SP, #6\n"
- "add x22, x22, %x[ld_in_row], LSL #2\n"
+ "addvl x20, SP, #6\n"
+ "add x21, x21, %x[ld_in_row], LSL #2\n"
".inst 0xc1391170 // bfdot za.s[x8, 0], { z11.h-z14.h }, z9.h\n"
- ".inst 0xa1402aa1 // ld1h { z1.h, z9.h }, pn10.b/Z, [x21]\n"
+ ".inst 0xa1402a81 // ld1h { z1.h, z9.h }, pn10.b/Z, [x20]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "addvl x20, SP, #12\n"
+ "addvl x19, SP, #12\n"
".inst 0xc1311151 // bfdot za.s[x8, 1], { z10.h-z13.h }, z1.h\n"
- "ld1w { z16.s }, p0/Z, [x22]\n"
+ "ld1w { z16.s }, p0/Z, [x21]\n"
".inst 0x658aaa0f // bfcvt z15.h, p2/M, z16.s\n"
- "sub x16, x16, #0x1\n"
+ "sub x15, x15, #0x1\n"
".inst 0xc1391171 // bfdot za.s[x8, 1], { z11.h-z14.h }, z9.h\n"
- ".inst 0xa1402a81 // ld1h { z1.h, z9.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xa1402a61 // ld1h { z1.h, z9.h }, pn10.b/Z, [x19]\n"
".inst 0xc1311152 // bfdot za.s[x8, 2], { z10.h-z13.h }, z1.h\n"
".inst 0xc1321190 // bfdot za.s[x8, 0], { z12.h-z15.h }, z2.h\n"
- "ld1h { z2.h }, p2/Z, [x21, #2, MUL VL]\n"
+ "ld1h { z2.h }, p2/Z, [x20, #2, MUL VL]\n"
".inst 0xc1391172 // bfdot za.s[x8, 2], { z11.h-z14.h }, z9.h\n"
".inst 0xc1321191 // bfdot za.s[x8, 1], { z12.h-z15.h }, z2.h\n"
- "ld1h { z2.h }, p2/Z, [x20, #2, MUL VL]\n"
+ "ld1h { z2.h }, p2/Z, [x19, #2, MUL VL]\n"
".inst 0xc0060c18 // mova { z24.d-z27.d }, za.d[x8, #0]\n"
".inst 0xc1b6cbd8 // fclamp { z24.s-z27.s }, z30.s, z22.s\n"
- "st1w { z24.s }, p1, [x15]\n"
- "add x15, x15, x13, LSL #2\n"
+ "st1w { z24.s }, p1, [x14]\n"
+ "add x14, x14, x11, LSL #2\n"
".inst 0xc1321192 // bfdot za.s[x8, 2], { z12.h-z15.h }, z2.h\n"
"add x8, x8, #0x1\n"
- "st1w { z25.s }, p1, [x14]\n"
- "add x14, x14, x11, LSL #2\n"
- "st1w { z26.s }, p1, [x10]\n"
- "add x10, x10, x28, LSL #2\n"
- ".inst 0xc0040c84 // mova za.d[x8, #4], { z4.d-z7.d }\n"
- "st1w { z27.s }, p1, [x9]\n"
+ "st1w { z25.s }, p1, [x13]\n"
+ "add x13, x13, x10, LSL #2\n"
+ "st1w { z26.s }, p1, [x9]\n"
"add x9, x9, x27, LSL #2\n"
+ ".inst 0xc0040c84 // mova za.d[x8, #4], { z4.d-z7.d }\n"
+ "st1w { z27.s }, p1, [x28]\n"
+ "add x28, x28, x26, LSL #2\n"
"21:" // Tail input: End
- "cbz x16, 23f\n"
+ "cbz x15, 23f\n"
"22:" // Right padding loop
".inst 0xc0060c18 // mova { z24.d-z27.d }, za.d[x8, #0]\n"
"add x8, x8, #0x1\n"
- "subs x16, x16, #0x1\n"
+ "subs x15, x15, #0x1\n"
".inst 0xc1b6cbd8 // fclamp { z24.s-z27.s }, z30.s, z22.s\n"
- "st1w { z24.s }, p1, [x15]\n"
- "add x15, x15, x13, LSL #2\n"
- ".inst 0xc0040c84 // mova za.d[x8, #4], { z4.d-z7.d }\n"
- "st1w { z25.s }, p1, [x14]\n"
+ "st1w { z24.s }, p1, [x14]\n"
"add x14, x14, x11, LSL #2\n"
- "st1w { z26.s }, p1, [x10]\n"
- "add x10, x10, x28, LSL #2\n"
- "st1w { z27.s }, p1, [x9]\n"
+ ".inst 0xc0040c84 // mova za.d[x8, #4], { z4.d-z7.d }\n"
+ "st1w { z25.s }, p1, [x13]\n"
+ "add x13, x13, x10, LSL #2\n"
+ "st1w { z26.s }, p1, [x9]\n"
"add x9, x9, x27, LSL #2\n"
+ "st1w { z27.s }, p1, [x28]\n"
+ "add x28, x28, x26, LSL #2\n"
"bgt 22b\n"
"23:" // End
- "ldr x20, [%x[args], %[offsetof_Args_weights]]\n"
- "incb x20, ALL, MUL #16\n"
- "incb x20, ALL, MUL #9\n"
- "str x20, [%x[args], %[offsetof_Args_weights]]\n"
- "ldr x20, [%x[args], %[offsetof_Args_ld_in_vl]]\n"
- "incw x6\n"
- "whilelt p1.s, x6, x5\n"
- "ldr x17, [%x[args], %[offsetof_Args_inptr]]\n"
- "add x17, x17, x20, LSL #2\n"
- "str x17, [%x[args], %[offsetof_Args_inptr]]\n"
- "ldr x25, [%x[args], %[offsetof_Args_outptrs]]\n"
- "ldr x24, [%x[args], %[offsetof_Args_ld_out_vls]]\n"
- "ldp x23, x22, [x25, #0x0]\n"
- "ldp x21, x20, [x24, #0x0]\n"
- "add x23, x23, x21, LSL #2\n"
+ "ldr x19, [%x[args], %[offsetof_Args_weights]]\n"
+ "incb x19, ALL, MUL #16\n"
+ "incb x19, ALL, MUL #9\n"
+ "str x19, [%x[args], %[offsetof_Args_weights]]\n"
+ "ldr x19, [%x[args], %[offsetof_Args_ld_in_vl]]\n"
+ "incw x7\n"
+ "whilelt p1.s, x7, x6\n"
+ "ldr x16, [%x[args], %[offsetof_Args_inptr]]\n"
+ "add x16, x16, x19, LSL #2\n"
+ "str x16, [%x[args], %[offsetof_Args_inptr]]\n"
+ "ldr x24, [%x[args], %[offsetof_Args_outptrs]]\n"
+ "ldr x23, [%x[args], %[offsetof_Args_ld_out_vls]]\n"
+ "ldp x22, x21, [x24, #0x0]\n"
+ "ldp x20, x19, [x23, #0x0]\n"
"add x22, x22, x20, LSL #2\n"
- "stp x23, x22, [x25, #0x0]\n"
- "ldp x23, x22, [x25, #0x10]\n"
- "ldp x21, x20, [x24, #0x10]\n"
- "add x23, x23, x21, LSL #2\n"
+ "add x21, x21, x19, LSL #2\n"
+ "stp x22, x21, [x24, #0x0]\n"
+ "ldp x22, x21, [x24, #0x10]\n"
+ "ldp x20, x19, [x23, #0x10]\n"
"add x22, x22, x20, LSL #2\n"
- "stp x23, x22, [x25, #0x10]\n"
+ "add x21, x21, x19, LSL #2\n"
+ "stp x22, x21, [x24, #0x10]\n"
"b.any 1b\n"
"addvl SP, SP, #15\n"
".inst 0xd503467f // SMSTOP\n"
:
: [args] "r" (&args), [ld_in_col] "r" (ld_in_col), [ld_in_row] "r" (ld_in_row), [offsetof_Args_bias] "I" (offsetof(Args, bias)), [offsetof_Args_clamp_max] "I" (offsetof(Args, clamp_max)), [offsetof_Args_clamp_min] "I" (offsetof(Args, clamp_min)), [offsetof_Args_current_channel] "I" (offsetof(Args, current_channel)), [offsetof_Args_inptr] "I" (offsetof(Args, inptr)), [offsetof_Args_input_cols] "I" (offsetof(Args, input_cols)), [offsetof_Args_ld_in_vl] "I" (offsetof(Args, ld_in_vl)), [offsetof_Args_ld_out_cols] "I" (offsetof(Args, ld_out_cols)), [offsetof_Args_ld_out_vls] "I" (offsetof(Args, ld_out_vls)), [offsetof_Args_n_channels] "I" (offsetof(Args, n_channels)), [offsetof_Args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_Args_output_cols] "I" (offsetof(Args, output_cols)), [offsetof_Args_pad_bottom] "I" (offsetof(Args, pad_bottom)), [offsetof_Args_pad_left] "I" (offsetof(Args, pad_left)), [offsetof_Args_pad_top] "I" (offsetof(Args, pad_top)), [offsetof_Args_weights] "I" (offsetof(Args, weights))
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_s8q_planar_3x3_s1_4rows_dot_za/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_s8q_planar_3x3_s1_4rows_dot_za/generic.cpp
index 6c42c76683..7fee92ba29 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_s8q_planar_3x3_s1_4rows_dot_za/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_s8q_planar_3x3_s1_4rows_dot_za/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -69,18 +69,18 @@ void sme2_s8q_planar_3x3_s1_4rows_dot_za_impl(
__asm__ __volatile__(
".inst 0xd503477f // SMSTART ZA\n"
- "ldr x6, [%x[args], %[offsetof_Args_pad_bottom]]\n"
+ "ldr x7, [%x[args], %[offsetof_Args_pad_bottom]]\n"
"ptrue p2.b\n"
- "mov x20, #0x6\n"
- "ldr x7, [%x[args], %[offsetof_Args_pad_top]]\n"
+ "mov x19, #0x6\n"
+ "ldr x8, [%x[args], %[offsetof_Args_pad_top]]\n"
"ld1rh { z24.h }, p2/Z, [%x[qp], %[offsetof_Requantize32_a_offset]]\n"
- "sub x20, x20, x6\n"
+ "sub x19, x19, x7\n"
".inst 0x25207812 // ptrue pn10.b\n"
"ldr x17, [%x[args], %[offsetof_Args_n_channels]]\n"
"whilelt p1.s, XZR, x17\n"
- "whilelt p9.s, XZR, x20\n"
+ "whilelt p9.s, XZR, x19\n"
"ld1rw { z12.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_c_offset]]\n"
- "whilelt p8.s, XZR, x7\n"
+ "whilelt p8.s, XZR, x8\n"
"addvl SP, SP, #-12\n"
"ldr x16, [%x[args], %[offsetof_Args_current_channel]]\n"
"neg z24.h, p2/M, z24.h\n"
@@ -90,377 +90,377 @@ void sme2_s8q_planar_3x3_s1_4rows_dot_za_impl(
"ld1rw { z22.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_minval]]\n"
"ld1rw { z26.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_maxval]]\n"
"1:" // Channel loop
- "ldr x20, [%x[qp], %[offsetof_Requantize32_bias]]\n"
+ "ldr x19, [%x[qp], %[offsetof_Requantize32_bias]]\n"
"mov z8.s, #0x0\n"
- "cbz x20, 2f\n"
- "ld1w { z8.s }, p1/Z, [x20, x16, LSL #2]\n"
+ "cbz x19, 2f\n"
+ "ld1w { z8.s }, p1/Z, [x19, x16, LSL #2]\n"
"2:" // Load bias: Done
- "ldr x22, [%x[args], %[offsetof_Args_weights]]\n"
- "mov x20, x22\n"
- "ld1sb { z27.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #3\n"
+ "ldr x21, [%x[args], %[offsetof_Args_weights]]\n"
+ "mov x19, x21\n"
+ "ld1sb { z27.s }, p2/Z, [x19]\n"
+ "incw x19, ALL, MUL #3\n"
"ld1rh { z21.h }, p2/Z, [%x[qp], %[offsetof_Requantize32_b_offset]]\n"
"mov z20.h, #0x0\n"
"sub z27.h, z27.h, z21.h\n"
- "incw x22\n"
- "ld1sb { z23.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #3\n"
+ "incw x21\n"
+ "ld1sb { z23.s }, p2/Z, [x19]\n"
+ "incw x19, ALL, MUL #3\n"
"sub z23.h, z23.h, z21.h\n"
"trn1 z0.h, z20.h, z27.h\n"
- "ld1sb { z16.s }, p2/Z, [x20]\n"
+ "ld1sb { z16.s }, p2/Z, [x19]\n"
"sub z16.h, z16.h, z21.h\n"
- "mov x20, x22\n"
+ "mov x19, x21\n"
"trn1 z1.h, z27.h, z23.h\n"
- "ld1sb { z27.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #3\n"
+ "ld1sb { z27.s }, p2/Z, [x19]\n"
+ "incw x19, ALL, MUL #3\n"
"trn1 z2.h, z23.h, z16.h\n"
"trn1 z3.h, z16.h, z20.h\n"
- "ld1sb { z23.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #3\n"
+ "ld1sb { z23.s }, p2/Z, [x19]\n"
+ "incw x19, ALL, MUL #3\n"
"sub z27.h, z27.h, z21.h\n"
"sub z23.h, z23.h, z21.h\n"
- "ld1sb { z16.s }, p2/Z, [x20]\n"
+ "ld1sb { z16.s }, p2/Z, [x19]\n"
"sub z16.h, z16.h, z21.h\n"
- "addvl x21, SP, #12\n"
- "incw x22\n"
- "addvl x21, x21, #-4\n"
- "mov x20, x22\n"
- "st1h { z0.h }, p2, [x21]\n"
+ "addvl x20, SP, #12\n"
+ "incw x21\n"
+ "addvl x20, x20, #-4\n"
+ "mov x19, x21\n"
+ "st1h { z0.h }, p2, [x20]\n"
"trn1 z0.h, z20.h, z27.h\n"
- "st1h { z1.h }, p2, [x21, #1, MUL VL]\n"
+ "st1h { z1.h }, p2, [x20, #1, MUL VL]\n"
"trn1 z1.h, z27.h, z23.h\n"
- "ld1sb { z27.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #3\n"
- "st1h { z2.h }, p2, [x21, #2, MUL VL]\n"
+ "ld1sb { z27.s }, p2/Z, [x19]\n"
+ "incw x19, ALL, MUL #3\n"
+ "st1h { z2.h }, p2, [x20, #2, MUL VL]\n"
"trn1 z2.h, z23.h, z16.h\n"
- "ld1sb { z23.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #3\n"
- "st1h { z3.h }, p2, [x21, #3, MUL VL]\n"
+ "ld1sb { z23.s }, p2/Z, [x19]\n"
+ "incw x19, ALL, MUL #3\n"
+ "st1h { z3.h }, p2, [x20, #3, MUL VL]\n"
"trn1 z3.h, z16.h, z20.h\n"
- "ld1sb { z16.s }, p2/Z, [x20]\n"
- "ldr x20, [%x[qp], %[offsetof_Requantize32_per_channel_muls]]\n"
+ "ld1sb { z16.s }, p2/Z, [x19]\n"
+ "ldr x19, [%x[qp], %[offsetof_Requantize32_per_channel_muls]]\n"
"sub z27.h, z27.h, z21.h\n"
"sub z23.h, z23.h, z21.h\n"
- "addvl x21, x21, #-4\n"
- "st1h { z0.h }, p2, [x21]\n"
+ "addvl x20, x20, #-4\n"
+ "st1h { z0.h }, p2, [x20]\n"
"sub z16.h, z16.h, z21.h\n"
- "st1h { z1.h }, p2, [x21, #1, MUL VL]\n"
+ "st1h { z1.h }, p2, [x20, #1, MUL VL]\n"
"mov z9.d, z8.d\n"
- "st1h { z2.h }, p2, [x21, #2, MUL VL]\n"
+ "st1h { z2.h }, p2, [x20, #2, MUL VL]\n"
"trn1 z0.h, z20.h, z27.h\n"
"trn1 z1.h, z27.h, z23.h\n"
- "st1h { z3.h }, p2, [x21, #3, MUL VL]\n"
- "addvl x21, x21, #-4\n"
+ "st1h { z3.h }, p2, [x20, #3, MUL VL]\n"
+ "addvl x20, x20, #-4\n"
"trn1 z2.h, z23.h, z16.h\n"
"trn1 z3.h, z16.h, z20.h\n"
- "st1h { z0.h }, p2, [x21]\n"
- "st1h { z1.h }, p2, [x21, #1, MUL VL]\n"
- "st1h { z2.h }, p2, [x21, #2, MUL VL]\n"
- "st1h { z3.h }, p2, [x21, #3, MUL VL]\n"
- "cbz x20, 3f\n"
- "ld1w { z10.s }, p1/Z, [x20, x16, LSL #2]\n"
+ "st1h { z0.h }, p2, [x20]\n"
+ "st1h { z1.h }, p2, [x20, #1, MUL VL]\n"
+ "st1h { z2.h }, p2, [x20, #2, MUL VL]\n"
+ "st1h { z3.h }, p2, [x20, #3, MUL VL]\n"
+ "cbz x19, 3f\n"
+ "ld1w { z10.s }, p1/Z, [x19, x16, LSL #2]\n"
"3:" // Load mul: End
- "ldr x20, [%x[qp], %[offsetof_Requantize32_per_channel_right_shifts]]\n"
- "cbz x20, 4f\n"
- "ld1w { z11.s }, p1/Z, [x20, x16, LSL #2]\n"
+ "ldr x19, [%x[qp], %[offsetof_Requantize32_per_channel_right_shifts]]\n"
+ "cbz x19, 4f\n"
+ "ld1w { z11.s }, p1/Z, [x19, x16, LSL #2]\n"
"4:" // Load right_shift: End
"ldr x15, [%x[args], %[offsetof_Args_input_cols]]\n"
- "sub x20, x15, #0x1\n"
- "orr x23, x20, %x[ld_in_col], LSL #16\n"
+ "sub x19, x15, #0x1\n"
+ "orr x22, x19, %x[ld_in_col], LSL #16\n"
"ldr x14, [%x[args], %[offsetof_Args_inptr]]\n"
- "orr x23, x17, x23, LSL #22\n"
- "mov x22, #0x6\n"
- "add x21, x7, x6\n"
- "lsl x20, %x[ld_in_row], #0x0\n"
+ "orr x22, x17, x22, LSL #22\n"
+ "mov x21, #0x6\n"
+ "add x20, x8, x7\n"
+ "lsl x19, %x[ld_in_row], #0x0\n"
"ldr x13, [%x[args], %[offsetof_Args_output_cols]]\n"
- "mov x8, #0x0\n"
- "lsl x23, x23, #0x0\n"
- "sub x22, x22, x21\n"
- "madd x20, x20, x7, x14\n"
+ "mov x11, #0x0\n"
+ "lsl x22, x22, #0x0\n"
+ "sub x21, x21, x20\n"
+ "madd x19, x19, x8, x14\n"
"5:" // Issue prefetches
- "subs x22, x22, #0x1\n"
- ".inst 0xf8b74a9c // rprfm pldstrm, x23, [x20]\n"
- "add x20, x20, %x[ld_in_col]\n"
+ "subs x21, x21, #0x1\n"
+ ".inst 0xf8b64a7c // rprfm pldstrm, x22, [x19]\n"
+ "add x19, x19, %x[ld_in_col]\n"
"bgt 5b\n"
- "ldr x25, [%x[args], %[offsetof_Args_outptrs]]\n"
- "lsl x20, %x[ld_in_row], #0x0\n"
- "msub x14, x7, x20, x14\n"
- ".inst 0xc0040900 // mova za.d[x8, #0], { z8.d-z9.d }\n"
- "ldr x20, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
- ".inst 0xc0040901 // mova za.d[x8, #1], { z8.d-z9.d }\n"
- "mov x22, #0x2\n"
- "ldp x11, x10, [x25], #0x10\n"
- ".inst 0xc0040902 // mova za.d[x8, #2], { z8.d-z9.d }\n"
- "ldp x9, x28, [x20], #0x10\n"
- ".inst 0xc0040903 // mova za.d[x8, #3], { z8.d-z9.d }\n"
- "ldr x21, [%x[args], %[offsetof_Args_pad_left]]\n"
- ".inst 0xc0040904 // mova za.d[x8, #4], { z8.d-z9.d }\n"
- "ldp x27, x26, [x25], #0x10\n"
- ".inst 0xc0040905 // mova za.d[x8, #5], { z8.d-z9.d }\n"
- "ldp x25, x24, [x20], #0x10\n"
- "cbz x21, 7f\n"
- "cmp x21, x22\n"
- "csel x20, x21, x22, LT\n"
- "sub x21, x21, x20\n"
- "sub x22, x22, x20\n"
- "cbz x21, 7f\n"
- ".inst 0xc0060804 // mova { z4.d-z5.d }, za.d[x8, #0]\n"
- "sub x13, x13, x21\n"
- ".inst 0xc0060826 // mova { z6.d-z7.d }, za.d[x8, #1]\n"
+ "ldr x24, [%x[args], %[offsetof_Args_outptrs]]\n"
+ "lsl x19, %x[ld_in_row], #0x0\n"
+ "msub x14, x8, x19, x14\n"
+ ".inst 0xc0046900 // mova za.d[x11, #0], { z8.d-z9.d }\n"
+ "ldr x19, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
+ ".inst 0xc0046901 // mova za.d[x11, #1], { z8.d-z9.d }\n"
+ "mov x21, #0x2\n"
+ "ldp x10, x9, [x24], #0x10\n"
+ ".inst 0xc0046902 // mova za.d[x11, #2], { z8.d-z9.d }\n"
+ "ldp x28, x27, [x19], #0x10\n"
+ ".inst 0xc0046903 // mova za.d[x11, #3], { z8.d-z9.d }\n"
+ "ldr x20, [%x[args], %[offsetof_Args_pad_left]]\n"
+ ".inst 0xc0046904 // mova za.d[x11, #4], { z8.d-z9.d }\n"
+ "ldp x26, x25, [x24], #0x10\n"
+ ".inst 0xc0046905 // mova za.d[x11, #5], { z8.d-z9.d }\n"
+ "ldp x24, x23, [x19], #0x10\n"
+ "cbz x20, 7f\n"
+ "cmp x20, x21\n"
+ "csel x19, x20, x21, LT\n"
+ "sub x20, x20, x19\n"
+ "sub x21, x21, x19\n"
+ "cbz x20, 7f\n"
+ ".inst 0xc0066804 // mova { z4.d-z5.d }, za.d[x11, #0]\n"
+ "sub x13, x13, x20\n"
+ ".inst 0xc0066826 // mova { z6.d-z7.d }, za.d[x11, #1]\n"
".inst 0xc1aaac04 // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z10.s\n"
".inst 0xc1abaa24 // srshl { z4.s-z7.s }, { z4.s-z7.s }, z11.s\n"
".inst 0xc1acab04 // add { z4.s-z7.s }, { z4.s-z7.s }, z12.s\n"
".inst 0xc1bacec4 // sclamp { z4.s-z7.s }, z22.s, z26.s\n"
"6:" // Left padding
- "subs x21, x21, #0x1\n"
- "st1b { z4.s }, p1, [x11]\n"
- "add x11, x11, x9\n"
- "st1b { z6.s }, p1, [x10]\n"
+ "subs x20, x20, #0x1\n"
+ "st1b { z4.s }, p1, [x10]\n"
"add x10, x10, x28\n"
- "st1b { z5.s }, p1, [x27]\n"
- "add x27, x27, x25\n"
- "st1b { z7.s }, p1, [x26]\n"
+ "st1b { z6.s }, p1, [x9]\n"
+ "add x9, x9, x27\n"
+ "st1b { z5.s }, p1, [x26]\n"
"add x26, x26, x24\n"
+ "st1b { z7.s }, p1, [x25]\n"
+ "add x25, x25, x23\n"
"bgt 6b\n"
"7:" // Left padding: End
- "adds XZR, x7, x6\n"
+ "adds XZR, x8, x7\n"
"bne 12f\n"
- "cbz x22, 10f\n"
- "cmp x22, #0x1\n"
- "sub x15, x15, x22\n"
+ "cbz x21, 10f\n"
+ "cmp x21, #0x1\n"
+ "sub x15, x15, x21\n"
"beq 9f\n"
"8:" // Unpadded: 2 priming loads
- "add x21, x14, %x[ld_in_row]\n"
+ "add x20, x14, %x[ld_in_row]\n"
"ld1sb { z17.s }, p1/Z, [x14]\n"
- "addvl x20, SP, #8\n"
- "ld1sb { z16.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "addvl x19, SP, #8\n"
+ "ld1sb { z16.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
"trn1 z13.h, z17.h, z16.h\n"
"add z13.h, z13.h, z24.h\n"
- "ld1sb { z17.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "ld1sb { z17.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
"add x14, x14, %x[ld_in_col]\n"
- "ld1sb { z16.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "ld1sb { z16.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
"trn1 z14.h, z17.h, z16.h\n"
"add z14.h, z14.h, z24.h\n"
- "ld1sb { z17.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
- "ld1sb { z16.s }, p1/Z, [x21]\n"
+ "ld1sb { z17.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z16.s }, p1/Z, [x20]\n"
"trn1 z15.h, z17.h, z16.h\n"
"add z15.h, z15.h, z24.h\n"
- ".inst 0xa0402a80 // ld1h { z0.h-z1.h }, pn10.b/Z, [x20]\n"
- ".inst 0xc16115a8 // sdot za.s[x8, 0], { z13.h-z14.h }, z1.h\n"
- ".inst 0xc16015a9 // sdot za.s[x8, 1], { z13.h-z14.h }, z0.h\n"
- ".inst 0xa0412a82 // ld1h { z2.h-z3.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
- ".inst 0xc16315c8 // sdot za.s[x8, 0], { z14.h-z15.h }, z3.h\n"
- ".inst 0xc16215c9 // sdot za.s[x8, 1], { z14.h-z15.h }, z2.h\n"
+ ".inst 0xa0402a60 // ld1h { z0.h-z1.h }, pn10.b/Z, [x19]\n"
+ ".inst 0xc16175a8 // sdot za.s[x11, 0], { z13.h-z14.h }, z1.h\n"
+ ".inst 0xc16075a9 // sdot za.s[x11, 1], { z13.h-z14.h }, z0.h\n"
+ ".inst 0xa0412a62 // ld1h { z2.h-z3.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
+ ".inst 0xc16375c8 // sdot za.s[x11, 0], { z14.h-z15.h }, z3.h\n"
+ ".inst 0xc16275c9 // sdot za.s[x11, 1], { z14.h-z15.h }, z2.h\n"
"9:" // Unpadded: 1 priming loads
- "add x22, x14, %x[ld_in_row]\n"
+ "add x21, x14, %x[ld_in_row]\n"
"ld1sb { z17.s }, p1/Z, [x14]\n"
- "addvl x21, SP, #4\n"
- "ld1sb { z16.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row]\n"
+ "addvl x20, SP, #4\n"
+ "ld1sb { z16.s }, p1/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
"trn1 z13.h, z17.h, z16.h\n"
"add z13.h, z13.h, z24.h\n"
- "ld1sb { z17.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row]\n"
- "addvl x20, SP, #8\n"
- "ld1sb { z16.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row]\n"
+ "ld1sb { z17.s }, p1/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "addvl x19, SP, #8\n"
+ "ld1sb { z16.s }, p1/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
"trn1 z14.h, z17.h, z16.h\n"
"add z14.h, z14.h, z24.h\n"
- "ld1sb { z17.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row]\n"
+ "ld1sb { z17.s }, p1/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
"add x14, x14, %x[ld_in_col]\n"
- "ld1sb { z16.s }, p1/Z, [x22]\n"
+ "ld1sb { z16.s }, p1/Z, [x21]\n"
"trn1 z15.h, z17.h, z16.h\n"
"add z15.h, z15.h, z24.h\n"
- ".inst 0xa0402aa0 // ld1h { z0.h-z1.h }, pn10.b/Z, [x21]\n"
- ".inst 0xc16115a8 // sdot za.s[x8, 0], { z13.h-z14.h }, z1.h\n"
- ".inst 0xc16015a9 // sdot za.s[x8, 1], { z13.h-z14.h }, z0.h\n"
".inst 0xa0402a80 // ld1h { z0.h-z1.h }, pn10.b/Z, [x20]\n"
- ".inst 0xa0412aa2 // ld1h { z2.h-z3.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
- ".inst 0xc16115aa // sdot za.s[x8, 2], { z13.h-z14.h }, z1.h\n"
- ".inst 0xc16015ab // sdot za.s[x8, 3], { z13.h-z14.h }, z0.h\n"
- ".inst 0xc16315c8 // sdot za.s[x8, 0], { z14.h-z15.h }, z3.h\n"
- ".inst 0xc16215c9 // sdot za.s[x8, 1], { z14.h-z15.h }, z2.h\n"
+ ".inst 0xc16175a8 // sdot za.s[x11, 0], { z13.h-z14.h }, z1.h\n"
+ ".inst 0xc16075a9 // sdot za.s[x11, 1], { z13.h-z14.h }, z0.h\n"
+ ".inst 0xa0402a60 // ld1h { z0.h-z1.h }, pn10.b/Z, [x19]\n"
".inst 0xa0412a82 // ld1h { z2.h-z3.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
- ".inst 0xc16315ca // sdot za.s[x8, 2], { z14.h-z15.h }, z3.h\n"
- ".inst 0xc16215cb // sdot za.s[x8, 3], { z14.h-z15.h }, z2.h\n"
+ ".inst 0xc16175aa // sdot za.s[x11, 2], { z13.h-z14.h }, z1.h\n"
+ ".inst 0xc16075ab // sdot za.s[x11, 3], { z13.h-z14.h }, z0.h\n"
+ ".inst 0xc16375c8 // sdot za.s[x11, 0], { z14.h-z15.h }, z3.h\n"
+ ".inst 0xc16275c9 // sdot za.s[x11, 1], { z14.h-z15.h }, z2.h\n"
+ ".inst 0xa0412a62 // ld1h { z2.h-z3.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
+ ".inst 0xc16375ca // sdot za.s[x11, 2], { z14.h-z15.h }, z3.h\n"
+ ".inst 0xc16275cb // sdot za.s[x11, 3], { z14.h-z15.h }, z2.h\n"
"10:" // Unpadded: 0 priming loads
".inst 0xa0402be0 // ld1h { z0.h-z1.h }, pn10.b/Z, [SP]\n"
".inst 0xa0412be2 // ld1h { z2.h-z3.h }, pn10.b/Z, [SP, #0x2, MUL VL]\n"
"cbz x15, 18f\n"
- "add x20, x14, %x[ld_in_row]\n"
+ "add x19, x14, %x[ld_in_row]\n"
"ld1sb { z17.s }, p1/Z, [x14]\n"
"sub x15, x15, #0x1\n"
- "ld1sb { z16.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z16.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"trn1 z13.h, z17.h, z16.h\n"
"sub x13, x13, #0x1\n"
- "ld1sb { z17.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z17.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"cmp x15, x13\n"
"add z13.h, z13.h, z24.h\n"
- "ld1sb { z16.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z16.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"trn1 z14.h, z17.h, z16.h\n"
- "csel x23, x15, x13, LT\n"
- "ld1sb { z17.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "csel x22, x15, x13, LT\n"
+ "ld1sb { z17.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"add z14.h, z14.h, z24.h\n"
"add x14, x14, %x[ld_in_col]\n"
- "ld1sb { z16.s }, p1/Z, [x20]\n"
+ "ld1sb { z16.s }, p1/Z, [x19]\n"
"trn1 z15.h, z17.h, z16.h\n"
"add z15.h, z15.h, z24.h\n"
- "sub x13, x13, x23\n"
- "cbz x23, 17f\n"
+ "sub x13, x13, x22\n"
+ "cbz x22, 17f\n"
"11:" // Unpadded: Main loop
- ".inst 0xc16115a8 // sdot za.s[x8, 0], { z13.h-z14.h }, z1.h\n"
- "addvl x22, SP, #4\n"
- "addvl x21, SP, #8\n"
+ ".inst 0xc16175a8 // sdot za.s[x11, 0], { z13.h-z14.h }, z1.h\n"
+ "addvl x21, SP, #4\n"
+ "addvl x20, SP, #8\n"
"ld1sb { z21.s }, p1/Z, [x14]\n"
- ".inst 0xc16015a9 // sdot za.s[x8, 1], { z13.h-z14.h }, z0.h\n"
- ".inst 0xa0402ac0 // ld1h { z0.h-z1.h }, pn10.b/Z, [x22]\n"
- "add x20, x14, %x[ld_in_row]\n"
- "subs x23, x23, #0x1\n"
- ".inst 0xc16315c8 // sdot za.s[x8, 0], { z14.h-z15.h }, z3.h\n"
- "ld1sb { z20.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
+ ".inst 0xc16075a9 // sdot za.s[x11, 1], { z13.h-z14.h }, z0.h\n"
+ ".inst 0xa0402aa0 // ld1h { z0.h-z1.h }, pn10.b/Z, [x21]\n"
+ "add x19, x14, %x[ld_in_row]\n"
+ "subs x22, x22, #0x1\n"
+ ".inst 0xc16375c8 // sdot za.s[x11, 0], { z14.h-z15.h }, z3.h\n"
+ "ld1sb { z20.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"add x14, x14, %x[ld_in_col]\n"
- ".inst 0xc16215c9 // sdot za.s[x8, 1], { z14.h-z15.h }, z2.h\n"
- ".inst 0xa0412ac2 // ld1h { z2.h-z3.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
- ".inst 0xc0060804 // mova { z4.d-z5.d }, za.d[x8, #0]\n"
- "ld1sb { z19.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
- ".inst 0xc0060826 // mova { z6.d-z7.d }, za.d[x8, #1]\n"
+ ".inst 0xc16275c9 // sdot za.s[x11, 1], { z14.h-z15.h }, z2.h\n"
+ ".inst 0xa0412aa2 // ld1h { z2.h-z3.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
+ ".inst 0xc0066804 // mova { z4.d-z5.d }, za.d[x11, #0]\n"
+ "ld1sb { z19.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ ".inst 0xc0066826 // mova { z6.d-z7.d }, za.d[x11, #1]\n"
".inst 0xc1aaac04 // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z10.s\n"
- ".inst 0xc16115aa // sdot za.s[x8, 2], { z13.h-z14.h }, z1.h\n"
- "ld1sb { z18.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
- ".inst 0xc16015ab // sdot za.s[x8, 3], { z13.h-z14.h }, z0.h\n"
- ".inst 0xa0402aa0 // ld1h { z0.h-z1.h }, pn10.b/Z, [x21]\n"
+ ".inst 0xc16175aa // sdot za.s[x11, 2], { z13.h-z14.h }, z1.h\n"
+ "ld1sb { z18.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ ".inst 0xc16075ab // sdot za.s[x11, 3], { z13.h-z14.h }, z0.h\n"
+ ".inst 0xa0402a80 // ld1h { z0.h-z1.h }, pn10.b/Z, [x20]\n"
".inst 0xc1abaa24 // srshl { z4.s-z7.s }, { z4.s-z7.s }, z11.s\n"
- ".inst 0xc16115ac // sdot za.s[x8, 4], { z13.h-z14.h }, z1.h\n"
- "ld1sb { z17.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
- ".inst 0xc16015ad // sdot za.s[x8, 5], { z13.h-z14.h }, z0.h\n"
- "ld1sb { z16.s }, p1/Z, [x20]\n"
+ ".inst 0xc16175ac // sdot za.s[x11, 4], { z13.h-z14.h }, z1.h\n"
+ "ld1sb { z17.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ ".inst 0xc16075ad // sdot za.s[x11, 5], { z13.h-z14.h }, z0.h\n"
+ "ld1sb { z16.s }, p1/Z, [x19]\n"
".inst 0xc1acab04 // add { z4.s-z7.s }, { z4.s-z7.s }, z12.s\n"
- ".inst 0xc16315ca // sdot za.s[x8, 2], { z14.h-z15.h }, z3.h\n"
+ ".inst 0xc16375ca // sdot za.s[x11, 2], { z14.h-z15.h }, z3.h\n"
"trn1 z13.h, z21.h, z20.h\n"
".inst 0xa0402be0 // ld1h { z0.h-z1.h }, pn10.b/Z, [SP]\n"
- ".inst 0xc16215cb // sdot za.s[x8, 3], { z14.h-z15.h }, z2.h\n"
- ".inst 0xa0412aa2 // ld1h { z2.h-z3.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
+ ".inst 0xc16275cb // sdot za.s[x11, 3], { z14.h-z15.h }, z2.h\n"
+ ".inst 0xa0412a82 // ld1h { z2.h-z3.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
".inst 0xc1bacec4 // sclamp { z4.s-z7.s }, z22.s, z26.s\n"
- ".inst 0xc16315cc // sdot za.s[x8, 4], { z14.h-z15.h }, z3.h\n"
- "st1b { z4.s }, p1, [x11]\n"
- "add x11, x11, x9\n"
+ ".inst 0xc16375cc // sdot za.s[x11, 4], { z14.h-z15.h }, z3.h\n"
+ "st1b { z4.s }, p1, [x10]\n"
+ "add x10, x10, x28\n"
"add z13.h, z13.h, z24.h\n"
- ".inst 0xc16215cd // sdot za.s[x8, 5], { z14.h-z15.h }, z2.h\n"
+ ".inst 0xc16275cd // sdot za.s[x11, 5], { z14.h-z15.h }, z2.h\n"
"trn1 z14.h, z19.h, z18.h\n"
"trn1 z15.h, z17.h, z16.h\n"
- "add x8, x8, #0x2\n"
+ "add x11, x11, #0x2\n"
".inst 0xa0412be2 // ld1h { z2.h-z3.h }, pn10.b/Z, [SP, #0x2, MUL VL]\n"
- "st1b { z6.s }, p1, [x10]\n"
- "add x10, x10, x28\n"
- ".inst 0xc0040904 // mova za.d[x8, #4], { z8.d-z9.d }\n"
- "st1b { z5.s }, p1, [x27]\n"
- "add x27, x27, x25\n"
- ".inst 0xc0040905 // mova za.d[x8, #5], { z8.d-z9.d }\n"
- "add z14.h, z14.h, z24.h\n"
- "st1b { z7.s }, p1, [x26]\n"
+ "st1b { z6.s }, p1, [x9]\n"
+ "add x9, x9, x27\n"
+ ".inst 0xc0046904 // mova za.d[x11, #4], { z8.d-z9.d }\n"
+ "st1b { z5.s }, p1, [x26]\n"
"add x26, x26, x24\n"
+ ".inst 0xc0046905 // mova za.d[x11, #5], { z8.d-z9.d }\n"
+ "add z14.h, z14.h, z24.h\n"
+ "st1b { z7.s }, p1, [x25]\n"
+ "add x25, x25, x23\n"
"add z15.h, z15.h, z24.h\n"
"bgt 11b\n"
"b 17f\n"
"12:" // Padded
- "cbz x22, 15f\n"
- "cmp x22, #0x1\n"
- "sub x15, x15, x22\n"
+ "cbz x21, 15f\n"
+ "cmp x21, #0x1\n"
+ "sub x15, x15, x21\n"
"beq 14f\n"
"13:" // Padded: 2 priming loads
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
"ld1sb { z19.s }, p0/Z, [x14]\n"
"add z19.h, p0/M, z19.h, z24.h\n"
- "add x20, x14, %x[ld_in_row]\n"
+ "add x19, x14, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z18.s }, p0/Z, [x20]\n"
+ "ld1sb { z18.s }, p0/Z, [x19]\n"
"add z18.h, p0/M, z18.h, z24.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1sb { z17.s }, p0/Z, [x20]\n"
+ "ld1sb { z17.s }, p0/Z, [x19]\n"
"add z17.h, p0/M, z17.h, z24.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1sb { z16.s }, p0/Z, [x20]\n"
+ "ld1sb { z16.s }, p0/Z, [x19]\n"
"add z16.h, p0/M, z16.h, z24.h\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"trn1 z13.h, z19.h, z18.h\n"
"trn1 z14.h, z17.h, z16.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z17.s }, p0/Z, [x20]\n"
+ "ld1sb { z17.s }, p0/Z, [x19]\n"
"add z17.h, p0/M, z17.h, z24.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z16.s }, p0/Z, [x20]\n"
- "addvl x20, SP, #8\n"
+ "ld1sb { z16.s }, p0/Z, [x19]\n"
+ "addvl x19, SP, #8\n"
"add z16.h, p0/M, z16.h, z24.h\n"
- ".inst 0xa0402a80 // ld1h { z0.h-z1.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xa0402a60 // ld1h { z0.h-z1.h }, pn10.b/Z, [x19]\n"
"trn1 z15.h, z17.h, z16.h\n"
- ".inst 0xc16115a8 // sdot za.s[x8, 0], { z13.h-z14.h }, z1.h\n"
+ ".inst 0xc16175a8 // sdot za.s[x11, 0], { z13.h-z14.h }, z1.h\n"
"add x14, x14, %x[ld_in_col]\n"
- ".inst 0xc16015a9 // sdot za.s[x8, 1], { z13.h-z14.h }, z0.h\n"
- ".inst 0xa0412a82 // ld1h { z2.h-z3.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
- ".inst 0xc16315c8 // sdot za.s[x8, 0], { z14.h-z15.h }, z3.h\n"
- ".inst 0xc16215c9 // sdot za.s[x8, 1], { z14.h-z15.h }, z2.h\n"
+ ".inst 0xc16075a9 // sdot za.s[x11, 1], { z13.h-z14.h }, z0.h\n"
+ ".inst 0xa0412a62 // ld1h { z2.h-z3.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
+ ".inst 0xc16375c8 // sdot za.s[x11, 0], { z14.h-z15.h }, z3.h\n"
+ ".inst 0xc16275c9 // sdot za.s[x11, 1], { z14.h-z15.h }, z2.h\n"
"14:" // Padded: 1 priming loads
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
"ld1sb { z19.s }, p0/Z, [x14]\n"
"add z19.h, p0/M, z19.h, z24.h\n"
- "add x20, x14, %x[ld_in_row]\n"
+ "add x19, x14, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z18.s }, p0/Z, [x20]\n"
+ "ld1sb { z18.s }, p0/Z, [x19]\n"
"add z18.h, p0/M, z18.h, z24.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1sb { z17.s }, p0/Z, [x20]\n"
+ "ld1sb { z17.s }, p0/Z, [x19]\n"
"add z17.h, p0/M, z17.h, z24.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1sb { z16.s }, p0/Z, [x20]\n"
+ "ld1sb { z16.s }, p0/Z, [x19]\n"
"add z16.h, p0/M, z16.h, z24.h\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"trn1 z13.h, z19.h, z18.h\n"
"trn1 z14.h, z17.h, z16.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z17.s }, p0/Z, [x20]\n"
+ "ld1sb { z17.s }, p0/Z, [x19]\n"
"add z17.h, p0/M, z17.h, z24.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z16.s }, p0/Z, [x20]\n"
- "addvl x21, SP, #4\n"
+ "ld1sb { z16.s }, p0/Z, [x19]\n"
+ "addvl x20, SP, #4\n"
"add z16.h, p0/M, z16.h, z24.h\n"
- ".inst 0xa0402aa0 // ld1h { z0.h-z1.h }, pn10.b/Z, [x21]\n"
- "addvl x20, SP, #8\n"
- "trn1 z15.h, z17.h, z16.h\n"
- ".inst 0xc16115a8 // sdot za.s[x8, 0], { z13.h-z14.h }, z1.h\n"
- ".inst 0xc16015a9 // sdot za.s[x8, 1], { z13.h-z14.h }, z0.h\n"
".inst 0xa0402a80 // ld1h { z0.h-z1.h }, pn10.b/Z, [x20]\n"
+ "addvl x19, SP, #8\n"
+ "trn1 z15.h, z17.h, z16.h\n"
+ ".inst 0xc16175a8 // sdot za.s[x11, 0], { z13.h-z14.h }, z1.h\n"
+ ".inst 0xc16075a9 // sdot za.s[x11, 1], { z13.h-z14.h }, z0.h\n"
+ ".inst 0xa0402a60 // ld1h { z0.h-z1.h }, pn10.b/Z, [x19]\n"
"add x14, x14, %x[ld_in_col]\n"
- ".inst 0xa0412aa2 // ld1h { z2.h-z3.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
- ".inst 0xc16115aa // sdot za.s[x8, 2], { z13.h-z14.h }, z1.h\n"
- ".inst 0xc16015ab // sdot za.s[x8, 3], { z13.h-z14.h }, z0.h\n"
- ".inst 0xc16315c8 // sdot za.s[x8, 0], { z14.h-z15.h }, z3.h\n"
- ".inst 0xc16215c9 // sdot za.s[x8, 1], { z14.h-z15.h }, z2.h\n"
".inst 0xa0412a82 // ld1h { z2.h-z3.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
- ".inst 0xc16315ca // sdot za.s[x8, 2], { z14.h-z15.h }, z3.h\n"
- ".inst 0xc16215cb // sdot za.s[x8, 3], { z14.h-z15.h }, z2.h\n"
+ ".inst 0xc16175aa // sdot za.s[x11, 2], { z13.h-z14.h }, z1.h\n"
+ ".inst 0xc16075ab // sdot za.s[x11, 3], { z13.h-z14.h }, z0.h\n"
+ ".inst 0xc16375c8 // sdot za.s[x11, 0], { z14.h-z15.h }, z3.h\n"
+ ".inst 0xc16275c9 // sdot za.s[x11, 1], { z14.h-z15.h }, z2.h\n"
+ ".inst 0xa0412a62 // ld1h { z2.h-z3.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
+ ".inst 0xc16375ca // sdot za.s[x11, 2], { z14.h-z15.h }, z3.h\n"
+ ".inst 0xc16275cb // sdot za.s[x11, 3], { z14.h-z15.h }, z2.h\n"
"15:" // Padded: 0 priming loads
".inst 0xa0402be0 // ld1h { z0.h-z1.h }, pn10.b/Z, [SP]\n"
".inst 0xa0412be2 // ld1h { z2.h-z3.h }, pn10.b/Z, [SP, #0x2, MUL VL]\n"
@@ -469,192 +469,192 @@ void sme2_s8q_planar_3x3_s1_4rows_dot_za_impl(
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
"ld1sb { z19.s }, p0/Z, [x14]\n"
"add z19.h, p0/M, z19.h, z24.h\n"
- "add x20, x14, %x[ld_in_row]\n"
+ "add x19, x14, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z18.s }, p0/Z, [x20]\n"
+ "ld1sb { z18.s }, p0/Z, [x19]\n"
"add z18.h, p0/M, z18.h, z24.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1sb { z17.s }, p0/Z, [x20]\n"
+ "ld1sb { z17.s }, p0/Z, [x19]\n"
"add z17.h, p0/M, z17.h, z24.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1sb { z16.s }, p0/Z, [x20]\n"
+ "ld1sb { z16.s }, p0/Z, [x19]\n"
"add z16.h, p0/M, z16.h, z24.h\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"trn1 z13.h, z19.h, z18.h\n"
"trn1 z14.h, z17.h, z16.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z17.s }, p0/Z, [x20]\n"
+ "ld1sb { z17.s }, p0/Z, [x19]\n"
"add z17.h, p0/M, z17.h, z24.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z16.s }, p0/Z, [x20]\n"
+ "ld1sb { z16.s }, p0/Z, [x19]\n"
"add z16.h, p0/M, z16.h, z24.h\n"
"sub x15, x15, #0x1\n"
"sub x13, x13, #0x1\n"
"cmp x15, x13\n"
"trn1 z15.h, z17.h, z16.h\n"
- "csel x23, x15, x13, LT\n"
+ "csel x22, x15, x13, LT\n"
"add x14, x14, %x[ld_in_col]\n"
- "sub x13, x13, x23\n"
- "cbz x23, 17f\n"
+ "sub x13, x13, x22\n"
+ "cbz x22, 17f\n"
"16:" // Padded: Main loop
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
"ld1sb { z21.s }, p0/Z, [x14]\n"
- ".inst 0xc16115a8 // sdot za.s[x8, 0], { z13.h-z14.h }, z1.h\n"
- ".inst 0xc16015a9 // sdot za.s[x8, 1], { z13.h-z14.h }, z0.h\n"
+ ".inst 0xc16175a8 // sdot za.s[x11, 0], { z13.h-z14.h }, z1.h\n"
+ ".inst 0xc16075a9 // sdot za.s[x11, 1], { z13.h-z14.h }, z0.h\n"
"add z21.h, p0/M, z21.h, z24.h\n"
- "add x22, x14, %x[ld_in_row]\n"
+ "add x21, x14, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z20.s }, p0/Z, [x22]\n"
- ".inst 0xc16315c8 // sdot za.s[x8, 0], { z14.h-z15.h }, z3.h\n"
+ "ld1sb { z20.s }, p0/Z, [x21]\n"
+ ".inst 0xc16375c8 // sdot za.s[x11, 0], { z14.h-z15.h }, z3.h\n"
"add z20.h, p0/M, z20.h, z24.h\n"
- "add x22, x22, %x[ld_in_row]\n"
- ".inst 0xc16215c9 // sdot za.s[x8, 1], { z14.h-z15.h }, z2.h\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ ".inst 0xc16275c9 // sdot za.s[x11, 1], { z14.h-z15.h }, z2.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1sb { z19.s }, p0/Z, [x22]\n"
+ "ld1sb { z19.s }, p0/Z, [x21]\n"
"add z19.h, p0/M, z19.h, z24.h\n"
- ".inst 0xc0060804 // mova { z4.d-z5.d }, za.d[x8, #0]\n"
- "add x22, x22, %x[ld_in_row]\n"
+ ".inst 0xc0066804 // mova { z4.d-z5.d }, za.d[x11, #0]\n"
+ "add x21, x21, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1sb { z18.s }, p0/Z, [x22]\n"
- ".inst 0xc0060826 // mova { z6.d-z7.d }, za.d[x8, #1]\n"
+ "ld1sb { z18.s }, p0/Z, [x21]\n"
+ ".inst 0xc0066826 // mova { z6.d-z7.d }, za.d[x11, #1]\n"
"mov x12, #0x4\n"
- "addvl x21, SP, #4\n"
+ "addvl x20, SP, #4\n"
"add z18.h, p0/M, z18.h, z24.h\n"
".inst 0xc1aaac04 // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z10.s\n"
- "add x22, x22, %x[ld_in_row]\n"
+ "add x21, x21, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- ".inst 0xa0402aa0 // ld1h { z0.h-z1.h }, pn10.b/Z, [x21]\n"
- "addvl x20, SP, #8\n"
- ".inst 0xc16115aa // sdot za.s[x8, 2], { z13.h-z14.h }, z1.h\n"
- "subs x23, x23, #0x1\n"
- "ld1sb { z17.s }, p0/Z, [x22]\n"
- ".inst 0xc16015ab // sdot za.s[x8, 3], { z13.h-z14.h }, z0.h\n"
- ".inst 0xc1abaa24 // srshl { z4.s-z7.s }, { z4.s-z7.s }, z11.s\n"
".inst 0xa0402a80 // ld1h { z0.h-z1.h }, pn10.b/Z, [x20]\n"
+ "addvl x19, SP, #8\n"
+ ".inst 0xc16175aa // sdot za.s[x11, 2], { z13.h-z14.h }, z1.h\n"
+ "subs x22, x22, #0x1\n"
+ "ld1sb { z17.s }, p0/Z, [x21]\n"
+ ".inst 0xc16075ab // sdot za.s[x11, 3], { z13.h-z14.h }, z0.h\n"
+ ".inst 0xc1abaa24 // srshl { z4.s-z7.s }, { z4.s-z7.s }, z11.s\n"
+ ".inst 0xa0402a60 // ld1h { z0.h-z1.h }, pn10.b/Z, [x19]\n"
"add z17.h, p0/M, z17.h, z24.h\n"
- "add x22, x22, %x[ld_in_row]\n"
+ "add x21, x21, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- ".inst 0xa0412aa2 // ld1h { z2.h-z3.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
- ".inst 0xc16115ac // sdot za.s[x8, 4], { z13.h-z14.h }, z1.h\n"
+ ".inst 0xa0412a82 // ld1h { z2.h-z3.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ ".inst 0xc16175ac // sdot za.s[x11, 4], { z13.h-z14.h }, z1.h\n"
".inst 0xc1acab04 // add { z4.s-z7.s }, { z4.s-z7.s }, z12.s\n"
- "ld1sb { z16.s }, p0/Z, [x22]\n"
- ".inst 0xc16015ad // sdot za.s[x8, 5], { z13.h-z14.h }, z0.h\n"
+ "ld1sb { z16.s }, p0/Z, [x21]\n"
+ ".inst 0xc16075ad // sdot za.s[x11, 5], { z13.h-z14.h }, z0.h\n"
"add z16.h, p0/M, z16.h, z24.h\n"
"add x14, x14, %x[ld_in_col]\n"
- ".inst 0xc16315ca // sdot za.s[x8, 2], { z14.h-z15.h }, z3.h\n"
+ ".inst 0xc16375ca // sdot za.s[x11, 2], { z14.h-z15.h }, z3.h\n"
".inst 0xa0402be0 // ld1h { z0.h-z1.h }, pn10.b/Z, [SP]\n"
".inst 0xc1bacec4 // sclamp { z4.s-z7.s }, z22.s, z26.s\n"
- ".inst 0xc16215cb // sdot za.s[x8, 3], { z14.h-z15.h }, z2.h\n"
- ".inst 0xa0412a82 // ld1h { z2.h-z3.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
- "st1b { z4.s }, p1, [x11]\n"
- "add x11, x11, x9\n"
- ".inst 0xc16315cc // sdot za.s[x8, 4], { z14.h-z15.h }, z3.h\n"
- "st1b { z6.s }, p1, [x10]\n"
+ ".inst 0xc16275cb // sdot za.s[x11, 3], { z14.h-z15.h }, z2.h\n"
+ ".inst 0xa0412a62 // ld1h { z2.h-z3.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
+ "st1b { z4.s }, p1, [x10]\n"
"add x10, x10, x28\n"
+ ".inst 0xc16375cc // sdot za.s[x11, 4], { z14.h-z15.h }, z3.h\n"
+ "st1b { z6.s }, p1, [x9]\n"
+ "add x9, x9, x27\n"
"trn1 z13.h, z21.h, z20.h\n"
- ".inst 0xc16215cd // sdot za.s[x8, 5], { z14.h-z15.h }, z2.h\n"
- "add x8, x8, #0x2\n"
+ ".inst 0xc16275cd // sdot za.s[x11, 5], { z14.h-z15.h }, z2.h\n"
+ "add x11, x11, #0x2\n"
".inst 0xa0412be2 // ld1h { z2.h-z3.h }, pn10.b/Z, [SP, #0x2, MUL VL]\n"
- "st1b { z5.s }, p1, [x27]\n"
- "add x27, x27, x25\n"
- "st1b { z7.s }, p1, [x26]\n"
+ "st1b { z5.s }, p1, [x26]\n"
"add x26, x26, x24\n"
- ".inst 0xc0040904 // mova za.d[x8, #4], { z8.d-z9.d }\n"
- ".inst 0xc0040905 // mova za.d[x8, #5], { z8.d-z9.d }\n"
+ "st1b { z7.s }, p1, [x25]\n"
+ "add x25, x25, x23\n"
+ ".inst 0xc0046904 // mova za.d[x11, #4], { z8.d-z9.d }\n"
+ ".inst 0xc0046905 // mova za.d[x11, #5], { z8.d-z9.d }\n"
"trn1 z14.h, z19.h, z18.h\n"
"trn1 z15.h, z17.h, z16.h\n"
"bgt 16b\n"
"17:" // Main loop tail
- ".inst 0xc16115a8 // sdot za.s[x8, 0], { z13.h-z14.h }, z1.h\n"
- "addvl x21, SP, #4\n"
- "addvl x20, SP, #8\n"
- ".inst 0xc16015a9 // sdot za.s[x8, 1], { z13.h-z14.h }, z0.h\n"
- ".inst 0xa0402aa0 // ld1h { z0.h-z1.h }, pn10.b/Z, [x21]\n"
- ".inst 0xc16315c8 // sdot za.s[x8, 0], { z14.h-z15.h }, z3.h\n"
- ".inst 0xc16215c9 // sdot za.s[x8, 1], { z14.h-z15.h }, z2.h\n"
- ".inst 0xa0412aa2 // ld1h { z2.h-z3.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
- ".inst 0xc0060804 // mova { z4.d-z5.d }, za.d[x8, #0]\n"
- ".inst 0xc0060826 // mova { z6.d-z7.d }, za.d[x8, #1]\n"
+ ".inst 0xc16175a8 // sdot za.s[x11, 0], { z13.h-z14.h }, z1.h\n"
+ "addvl x20, SP, #4\n"
+ "addvl x19, SP, #8\n"
+ ".inst 0xc16075a9 // sdot za.s[x11, 1], { z13.h-z14.h }, z0.h\n"
+ ".inst 0xa0402a80 // ld1h { z0.h-z1.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xc16375c8 // sdot za.s[x11, 0], { z14.h-z15.h }, z3.h\n"
+ ".inst 0xc16275c9 // sdot za.s[x11, 1], { z14.h-z15.h }, z2.h\n"
+ ".inst 0xa0412a82 // ld1h { z2.h-z3.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ ".inst 0xc0066804 // mova { z4.d-z5.d }, za.d[x11, #0]\n"
+ ".inst 0xc0066826 // mova { z6.d-z7.d }, za.d[x11, #1]\n"
".inst 0xc1aaac04 // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z10.s\n"
- ".inst 0xc16115aa // sdot za.s[x8, 2], { z13.h-z14.h }, z1.h\n"
+ ".inst 0xc16175aa // sdot za.s[x11, 2], { z13.h-z14.h }, z1.h\n"
".inst 0xc1abaa24 // srshl { z4.s-z7.s }, { z4.s-z7.s }, z11.s\n"
- ".inst 0xc16015ab // sdot za.s[x8, 3], { z13.h-z14.h }, z0.h\n"
- ".inst 0xa0402a80 // ld1h { z0.h-z1.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xc16075ab // sdot za.s[x11, 3], { z13.h-z14.h }, z0.h\n"
+ ".inst 0xa0402a60 // ld1h { z0.h-z1.h }, pn10.b/Z, [x19]\n"
".inst 0xc1acab04 // add { z4.s-z7.s }, { z4.s-z7.s }, z12.s\n"
- ".inst 0xc16115ac // sdot za.s[x8, 4], { z13.h-z14.h }, z1.h\n"
+ ".inst 0xc16175ac // sdot za.s[x11, 4], { z13.h-z14.h }, z1.h\n"
".inst 0xc1bacec4 // sclamp { z4.s-z7.s }, z22.s, z26.s\n"
- "st1b { z4.s }, p1, [x11]\n"
- "add x11, x11, x9\n"
- ".inst 0xc16015ad // sdot za.s[x8, 5], { z13.h-z14.h }, z0.h\n"
- "st1b { z6.s }, p1, [x10]\n"
+ "st1b { z4.s }, p1, [x10]\n"
"add x10, x10, x28\n"
- ".inst 0xc16315ca // sdot za.s[x8, 2], { z14.h-z15.h }, z3.h\n"
- "st1b { z5.s }, p1, [x27]\n"
- "add x27, x27, x25\n"
- ".inst 0xc16215cb // sdot za.s[x8, 3], { z14.h-z15.h }, z2.h\n"
- ".inst 0xa0412a82 // ld1h { z2.h-z3.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
- "st1b { z7.s }, p1, [x26]\n"
+ ".inst 0xc16075ad // sdot za.s[x11, 5], { z13.h-z14.h }, z0.h\n"
+ "st1b { z6.s }, p1, [x9]\n"
+ "add x9, x9, x27\n"
+ ".inst 0xc16375ca // sdot za.s[x11, 2], { z14.h-z15.h }, z3.h\n"
+ "st1b { z5.s }, p1, [x26]\n"
"add x26, x26, x24\n"
- ".inst 0xc16315cc // sdot za.s[x8, 4], { z14.h-z15.h }, z3.h\n"
- ".inst 0xc16215cd // sdot za.s[x8, 5], { z14.h-z15.h }, z2.h\n"
- "add x8, x8, #0x2\n"
- ".inst 0xc0040904 // mova za.d[x8, #4], { z8.d-z9.d }\n"
- ".inst 0xc0040905 // mova za.d[x8, #5], { z8.d-z9.d }\n"
+ ".inst 0xc16275cb // sdot za.s[x11, 3], { z14.h-z15.h }, z2.h\n"
+ ".inst 0xa0412a62 // ld1h { z2.h-z3.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
+ "st1b { z7.s }, p1, [x25]\n"
+ "add x25, x25, x23\n"
+ ".inst 0xc16375cc // sdot za.s[x11, 4], { z14.h-z15.h }, z3.h\n"
+ ".inst 0xc16275cd // sdot za.s[x11, 5], { z14.h-z15.h }, z2.h\n"
+ "add x11, x11, #0x2\n"
+ ".inst 0xc0046904 // mova za.d[x11, #4], { z8.d-z9.d }\n"
+ ".inst 0xc0046905 // mova za.d[x11, #5], { z8.d-z9.d }\n"
"18:" // Main loop skip tail
"cbz x13, 20f\n"
"19:" // Right padding loop
- ".inst 0xc0060804 // mova { z4.d-z5.d }, za.d[x8, #0]\n"
+ ".inst 0xc0066804 // mova { z4.d-z5.d }, za.d[x11, #0]\n"
"subs x13, x13, #0x1\n"
- ".inst 0xc0060826 // mova { z6.d-z7.d }, za.d[x8, #1]\n"
+ ".inst 0xc0066826 // mova { z6.d-z7.d }, za.d[x11, #1]\n"
".inst 0xc1aaac04 // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z10.s\n"
- "add x8, x8, #0x2\n"
+ "add x11, x11, #0x2\n"
".inst 0xc1abaa24 // srshl { z4.s-z7.s }, { z4.s-z7.s }, z11.s\n"
- ".inst 0xc0040904 // mova za.d[x8, #4], { z8.d-z9.d }\n"
+ ".inst 0xc0046904 // mova za.d[x11, #4], { z8.d-z9.d }\n"
".inst 0xc1acab04 // add { z4.s-z7.s }, { z4.s-z7.s }, z12.s\n"
- ".inst 0xc0040905 // mova za.d[x8, #5], { z8.d-z9.d }\n"
+ ".inst 0xc0046905 // mova za.d[x11, #5], { z8.d-z9.d }\n"
".inst 0xc1bacec4 // sclamp { z4.s-z7.s }, z22.s, z26.s\n"
- "st1b { z4.s }, p1, [x11]\n"
- "add x11, x11, x9\n"
- "st1b { z6.s }, p1, [x10]\n"
+ "st1b { z4.s }, p1, [x10]\n"
"add x10, x10, x28\n"
- "st1b { z5.s }, p1, [x27]\n"
- "add x27, x27, x25\n"
- "st1b { z7.s }, p1, [x26]\n"
+ "st1b { z6.s }, p1, [x9]\n"
+ "add x9, x9, x27\n"
+ "st1b { z5.s }, p1, [x26]\n"
"add x26, x26, x24\n"
+ "st1b { z7.s }, p1, [x25]\n"
+ "add x25, x25, x23\n"
"bgt 19b\n"
"20:" // End
- "ldr x22, [%x[args], %[offsetof_Args_weights]]\n"
- "incw x22, ALL, MUL #9\n"
- "str x22, [%x[args], %[offsetof_Args_weights]]\n"
+ "ldr x21, [%x[args], %[offsetof_Args_weights]]\n"
+ "incw x21, ALL, MUL #9\n"
+ "str x21, [%x[args], %[offsetof_Args_weights]]\n"
"incw x16\n"
- "ldr x20, [%x[args], %[offsetof_Args_ld_in_vl]]\n"
+ "ldr x19, [%x[args], %[offsetof_Args_ld_in_vl]]\n"
"whilelt p1.s, x16, x17\n"
"ldr x14, [%x[args], %[offsetof_Args_inptr]]\n"
- "add x14, x14, x20\n"
+ "add x14, x14, x19\n"
"str x14, [%x[args], %[offsetof_Args_inptr]]\n"
- "ldr x25, [%x[args], %[offsetof_Args_outptrs]]\n"
- "ldr x24, [%x[args], %[offsetof_Args_ld_out_vls]]\n"
- "ldp x23, x22, [x25, #0x0]\n"
- "ldp x21, x20, [x24, #0x0]\n"
- "add x23, x23, x21\n"
+ "ldr x24, [%x[args], %[offsetof_Args_outptrs]]\n"
+ "ldr x23, [%x[args], %[offsetof_Args_ld_out_vls]]\n"
+ "ldp x22, x21, [x24, #0x0]\n"
+ "ldp x20, x19, [x23, #0x0]\n"
"add x22, x22, x20\n"
- "stp x23, x22, [x25, #0x0]\n"
- "ldp x23, x22, [x25, #0x10]\n"
- "ldp x21, x20, [x24, #0x10]\n"
- "add x23, x23, x21\n"
+ "add x21, x21, x19\n"
+ "stp x22, x21, [x24, #0x0]\n"
+ "ldp x22, x21, [x24, #0x10]\n"
+ "ldp x20, x19, [x23, #0x10]\n"
"add x22, x22, x20\n"
- "stp x23, x22, [x25, #0x10]\n"
+ "add x21, x21, x19\n"
+ "stp x22, x21, [x24, #0x10]\n"
"b.any 1b\n"
"addvl SP, SP, #12\n"
".inst 0xd503467f // SMSTOP\n"
:
: [args] "r" (&args), [ld_in_col] "r" (ld_in_col), [ld_in_row] "r" (ld_in_row), [offsetof_Args_current_channel] "I" (offsetof(Args, current_channel)), [offsetof_Args_inptr] "I" (offsetof(Args, inptr)), [offsetof_Args_input_cols] "I" (offsetof(Args, input_cols)), [offsetof_Args_ld_in_vl] "I" (offsetof(Args, ld_in_vl)), [offsetof_Args_ld_out_cols] "I" (offsetof(Args, ld_out_cols)), [offsetof_Args_ld_out_vls] "I" (offsetof(Args, ld_out_vls)), [offsetof_Args_n_channels] "I" (offsetof(Args, n_channels)), [offsetof_Args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_Args_output_cols] "I" (offsetof(Args, output_cols)), [offsetof_Args_pad_bottom] "I" (offsetof(Args, pad_bottom)), [offsetof_Args_pad_left] "I" (offsetof(Args, pad_left)), [offsetof_Args_pad_top] "I" (offsetof(Args, pad_top)), [offsetof_Args_weights] "I" (offsetof(Args, weights)), [offsetof_Requantize32_a_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_bias] "I" (offsetof(arm_gemm::Requantize32, bias)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [offsetof_Requantize32_per_channel_muls] "I" (offsetof(arm_gemm::Requantize32, per_channel_muls)), [offsetof_Requantize32_per_channel_right_shifts] "I" (offsetof(arm_gemm::Requantize32, per_channel_right_shifts)), [offsetof_Requantize32_per_layer_mul] "I" (offsetof(arm_gemm::Requantize32, per_layer_mul)), [offsetof_Requantize32_per_layer_right_shift] "I" (offsetof(arm_gemm::Requantize32, per_layer_right_shift)), [qp] "r" (&qp)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_s8q_planar_3x3_s2_2rows_dot_za/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_s8q_planar_3x3_s2_2rows_dot_za/generic.cpp
new file mode 100644
index 0000000000..a9538acf88
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_s8q_planar_3x3_s2_2rows_dot_za/generic.cpp
@@ -0,0 +1,592 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#if defined(ARM_COMPUTE_ENABLE_SME2)
+
+#include <algorithm>
+#include <cstddef>
+#include "arm_gemm.hpp"
+
+using arm_gemm::Requantize32;
+
+namespace arm_conv {
+namespace depthwise {
+
+void sme2_s8q_planar_3x3_s2_2rows_dot_za_impl(
+ const int8_t *inptr,
+ size_t ld_in_row,
+ size_t ld_in_col,
+ unsigned int pad_top,
+ unsigned int valid_input_rows,
+ unsigned int pad_left,
+ unsigned int valid_input_cols,
+ const int8_t *weights,
+ int8_t **outptrs,
+ const size_t *outlds,
+ unsigned int output_cols,
+ unsigned int start_channel,
+ unsigned int valid_channels,
+ const arm_gemm::Requantize32 &qp
+)
+{
+ struct Args
+ {
+ const int8_t *inptr;
+ long unsigned int pad_top, pad_bottom, pad_left;
+ const int8_t *weights;
+ long unsigned int input_cols, output_cols;
+ int8_t **outptrs;
+ const size_t *ld_out_cols;
+ long unsigned int n, n_channels;
+ };
+
+ Args args = { inptr, pad_top, 5u - std::min(5u, pad_top + valid_input_rows), pad_left, weights, valid_input_cols, output_cols, outptrs, outlds, start_channel, valid_channels };
+
+ __asm__ __volatile__(
+ "ldr x11, [%x[args], %[offsetof_Args_pad_bottom]]\n"
+ "mov x19, #0x5\n"
+ ".inst 0xd503477f // SMSTART ZA\n"
+ "sub x19, x19, x11\n"
+ "ldr x10, [%x[args], %[offsetof_Args_pad_top]]\n"
+ "ptrue p0.b\n"
+ "mov z12.s, #0x0\n"
+ "ldr x22, [%x[args], %[offsetof_Args_n_channels]]\n"
+ "whilelt p5.s, XZR, x22\n"
+ "whilelt p9.s, XZR, x19\n"
+ "ldr x19, [%x[qp], %[offsetof_Requantize32_bias]]\n"
+ "whilelt p8.s, XZR, x10\n"
+ "eor p8.b, p0/Z, p8.b, p9.b\n"
+ "ldr x21, [%x[args], %[offsetof_Args_n]]\n"
+ "cbz x19, 1f\n"
+ "ld1w { z12.s }, p5/Z, [x19, x21, LSL #2]\n"
+ "1:" // Load bias: Done
+ "ldr x20, [%x[args], %[offsetof_Args_weights]]\n"
+ "ld1sb { z27.s }, p0/Z, [x20]\n"
+ "incw x20\n"
+ "mov z0.h, #0x0\n"
+ "ld1sb { z16.s }, p0/Z, [x20]\n"
+ "incw x20\n"
+ "ldr x19, [%x[qp], %[offsetof_Requantize32_per_channel_muls]]\n"
+ "mov z13.d, z12.d\n"
+ "ld1sb { z22.s }, p0/Z, [x20]\n"
+ "incw x20\n"
+ "ld1sb { z21.s }, p0/Z, [x20]\n"
+ "incw x20\n"
+ "ld1sb { z20.s }, p0/Z, [x20]\n"
+ "incw x20\n"
+ "ld1sb { z18.s }, p0/Z, [x20]\n"
+ "incw x20\n"
+ "ld1sb { z17.s }, p0/Z, [x20]\n"
+ "incw x20\n"
+ "ld1sb { z24.s }, p0/Z, [x20]\n"
+ "incw x20\n"
+ "ld1sb { z19.s }, p0/Z, [x20]\n"
+ "ld1rh { z28.h }, p0/Z, [%x[qp], %[offsetof_Requantize32_b_offset]]\n"
+ "sub z27.h, z27.h, z28.h\n"
+ "sub z16.h, z16.h, z28.h\n"
+ "sub z22.h, z22.h, z28.h\n"
+ "sub z21.h, z21.h, z28.h\n"
+ "trn1 z8.h, z27.h, z21.h\n"
+ "sub z20.h, z20.h, z28.h\n"
+ "sub z18.h, z18.h, z28.h\n"
+ "trn1 z7.h, z16.h, z20.h\n"
+ "sub z17.h, z17.h, z28.h\n"
+ "sub z24.h, z24.h, z28.h\n"
+ "trn1 z6.h, z17.h, z0.h\n"
+ "sub z19.h, z19.h, z28.h\n"
+ "trn1 z5.h, z24.h, z0.h\n"
+ "trn1 z4.h, z22.h, z18.h\n"
+ "trn1 z3.h, z19.h, z0.h\n"
+ "ld1rh { z21.h }, p0/Z, [%x[qp], %[offsetof_Requantize32_a_offset]]\n"
+ "ld1rw { z2.s }, p0/Z, [%x[qp], %[offsetof_Requantize32_c_offset]]\n"
+ "ld1rw { z1.s }, p0/Z, [%x[qp], %[offsetof_Requantize32_per_layer_mul]]\n"
+ "cbz x19, 2f\n"
+ "ld1w { z1.s }, p5/Z, [x19, x21, LSL #2]\n"
+ "2:" // Load mul: End
+ "ldr x19, [%x[qp], %[offsetof_Requantize32_per_channel_right_shifts]]\n"
+ "ld1rw { z0.s }, p0/Z, [%x[qp], %[offsetof_Requantize32_per_layer_right_shift]]\n"
+ "cbz x19, 3f\n"
+ "ld1w { z0.s }, p5/Z, [x19, x21, LSL #2]\n"
+ "3:" // Load right_shift: End
+ "ldr x28, [%x[args], %[offsetof_Args_input_cols]]\n"
+ "orr x21, x28, %x[ld_in_col], LSL #16\n"
+ "orr x21, x22, x21, LSL #22\n"
+ "ld1rw { z20.s }, p0/Z, [%x[qp], %[offsetof_Requantize32_minval]]\n"
+ "ldr x27, [%x[args], %[offsetof_Args_inptr]]\n"
+ "mov x20, #0x5\n"
+ "add x19, x10, x11\n"
+ "ld1rw { z19.s }, p0/Z, [%x[qp], %[offsetof_Requantize32_maxval]]\n"
+ "mov x9, #0x0\n"
+ "ldr x26, [%x[args], %[offsetof_Args_output_cols]]\n"
+ "lsl x21, x21, #0x0\n"
+ "sub x20, x20, x19\n"
+ "mov x19, x27\n"
+ "4:" // Issue prefetches
+ "subs x20, x20, #0x1\n"
+ ".inst 0xf8b54a7c // rprfm pldstrm, x21, [x19]\n"
+ "add x19, x19, %x[ld_in_col]\n"
+ "bgt 4b\n"
+ "ldr x21, [%x[args], %[offsetof_Args_outptrs]]\n"
+ "lsl x19, %x[ld_in_row], #0x0\n"
+ "msub x27, x10, x19, x27\n"
+ ".inst 0xc0042980 // mova za.d[x9, #0], { z12.d-z13.d }\n"
+ "ldr x19, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
+ ".inst 0xc0042981 // mova za.d[x9, #1], { z12.d-z13.d }\n"
+ "mov x25, #0x2\n"
+ "ldr x20, [%x[args], %[offsetof_Args_pad_left]]\n"
+ ".inst 0xc0042982 // mova za.d[x9, #2], { z12.d-z13.d }\n"
+ "ldp x24, x23, [x21], #0x10\n"
+ "ldp x22, x21, [x19], #0x10\n"
+ "cbz x20, 6f\n"
+ "cmp x20, x25\n"
+ "csel x19, x20, x25, LT\n"
+ "sub x20, x20, x19\n"
+ "sub x25, x25, x19\n"
+ "cbz x20, 6f\n"
+ ".inst 0xc0062818 // mova { z24.d-z25.d }, za.d[x9, #0]\n"
+ ".inst 0xc1a1a418 // sqdmulh { z24.s-z25.s }, { z24.s-z25.s }, z1.s\n"
+ "and x25, x20, #0x1\n"
+ ".inst 0xc1a0a238 // srshl { z24.s-z25.s }, { z24.s-z25.s }, z0.s\n"
+ "add x20, x20, #0x1\n"
+ "lsr x20, x20, #0x1\n"
+ ".inst 0xc1a2a318 // add { z24.s-z25.s }, { z24.s-z25.s }, z2.s\n"
+ "sub x26, x26, x20\n"
+ ".inst 0xc1b3c698 // sclamp { z24.s-z25.s }, z20.s, z19.s\n"
+ "5:" // Left padding
+ "subs x20, x20, #0x1\n"
+ "st1b { z24.s }, p5, [x24]\n"
+ "add x24, x24, x22\n"
+ "st1b { z25.s }, p5, [x23]\n"
+ "add x23, x23, x21\n"
+ "bgt 5b\n"
+ "6:" // Left padding: End
+ "adds XZR, x10, x11\n"
+ "bne 11f\n"
+ "cbz x25, 9f\n"
+ "cmp x25, #0x1\n"
+ "sub x28, x28, x25\n"
+ "beq 8f\n"
+ "7:" // Unpadded: 2 priming loads
+ "add x19, x27, %x[ld_in_row]\n"
+ "ld1sb { z14.s }, p5/Z, [x27]\n"
+ "sub z14.h, z14.h, z21.h\n"
+ "add x27, x27, %x[ld_in_col]\n"
+ "ld1sb { z18.s }, p5/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ "sub z18.h, z18.h, z21.h\n"
+ "trn1 z14.h, z14.h, z18.h\n"
+ "ld1sb { z15.s }, p5/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ "sub z15.h, z15.h, z21.h\n"
+ "ld1sb { z17.s }, p5/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ "sub z17.h, z17.h, z21.h\n"
+ "trn1 z15.h, z15.h, z17.h\n"
+ "ld1sb { z16.s }, p5/Z, [x19]\n"
+ "sub z16.h, z16.h, z21.h\n"
+ "mov z16.d, z16.d\n"
+ ".inst 0xc16835c8 // sdot za.s[x9, 0], { z14.h-z15.h }, z8.h\n"
+ ".inst 0xc16635e8 // sdot za.s[x9, 0], { z15.h-z16.h }, z6.h\n"
+ "8:" // Unpadded: 1 priming loads
+ "add x19, x27, %x[ld_in_row]\n"
+ "ld1sb { z14.s }, p5/Z, [x27]\n"
+ "sub z14.h, z14.h, z21.h\n"
+ "add x27, x27, %x[ld_in_col]\n"
+ "ld1sb { z18.s }, p5/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ "sub z18.h, z18.h, z21.h\n"
+ "trn1 z14.h, z14.h, z18.h\n"
+ "ld1sb { z15.s }, p5/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ "sub z15.h, z15.h, z21.h\n"
+ "ld1sb { z17.s }, p5/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ "sub z17.h, z17.h, z21.h\n"
+ "trn1 z15.h, z15.h, z17.h\n"
+ "ld1sb { z16.s }, p5/Z, [x19]\n"
+ "sub z16.h, z16.h, z21.h\n"
+ "mov z16.d, z16.d\n"
+ ".inst 0xc16735c8 // sdot za.s[x9, 0], { z14.h-z15.h }, z7.h\n"
+ ".inst 0xc16535e8 // sdot za.s[x9, 0], { z15.h-z16.h }, z5.h\n"
+ "9:" // Unpadded: 0 priming loads
+ "add x20, x27, %x[ld_in_row]\n"
+ "ld1sb { z14.s }, p5/Z, [x27]\n"
+ "sub z14.h, z14.h, z21.h\n"
+ "sub x28, x28, #0x2\n"
+ "ld1sb { z18.s }, p5/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "sub z18.h, z18.h, z21.h\n"
+ "sub x26, x26, #0x1\n"
+ "ld1sb { z15.s }, p5/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "sub z15.h, z15.h, z21.h\n"
+ "lsr x19, x28, #0x1\n"
+ "ld1sb { z17.s }, p5/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "sub z17.h, z17.h, z21.h\n"
+ "cmp x19, x26\n"
+ "ld1sb { z16.s }, p5/Z, [x20]\n"
+ "sub z16.h, z16.h, z21.h\n"
+ "csel x20, x19, x26, LT\n"
+ "trn1 z14.h, z14.h, z18.h\n"
+ "trn1 z15.h, z15.h, z17.h\n"
+ "mov z16.d, z16.d\n"
+ "add x27, x27, %x[ld_in_col]\n"
+ "and x28, x28, #0x1\n"
+ "sub x26, x26, x20\n"
+ "cbz x20, 16f\n"
+ "10:" // Unpadded: Main loop
+ ".inst 0xc16435c8 // sdot za.s[x9, 0], { z14.h-z15.h }, z4.h\n"
+ "add x19, x27, %x[ld_in_row]\n"
+ "subs x20, x20, #0x1\n"
+ ".inst 0xc16835c9 // sdot za.s[x9, 1], { z14.h-z15.h }, z8.h\n"
+ "ld1sb { z14.s }, p5/Z, [x27]\n"
+ "sub z14.h, z14.h, z21.h\n"
+ "add x27, x27, %x[ld_in_col]\n"
+ "ld1sb { z18.s }, p5/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ ".inst 0xc16335e8 // sdot za.s[x9, 0], { z15.h-z16.h }, z3.h\n"
+ "sub z18.h, z18.h, z21.h\n"
+ ".inst 0xc16635e9 // sdot za.s[x9, 1], { z15.h-z16.h }, z6.h\n"
+ "ld1sb { z15.s }, p5/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ "sub z15.h, z15.h, z21.h\n"
+ "ld1sb { z17.s }, p5/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ "sub z17.h, z17.h, z21.h\n"
+ "trn1 z14.h, z14.h, z18.h\n"
+ "ld1sb { z16.s }, p5/Z, [x19]\n"
+ "sub z16.h, z16.h, z21.h\n"
+ "trn1 z15.h, z15.h, z17.h\n"
+ "add x19, x27, %x[ld_in_row]\n"
+ ".inst 0xc0062818 // mova { z24.d-z25.d }, za.d[x9, #0]\n"
+ "add x9, x9, #0x1\n"
+ "mov z16.d, z16.d\n"
+ ".inst 0xc16735c8 // sdot za.s[x9, 0], { z14.h-z15.h }, z7.h\n"
+ ".inst 0xc1a1a418 // sqdmulh { z24.s-z25.s }, { z24.s-z25.s }, z1.s\n"
+ "ld1sb { z14.s }, p5/Z, [x27]\n"
+ ".inst 0xc16535e8 // sdot za.s[x9, 0], { z15.h-z16.h }, z5.h\n"
+ ".inst 0xc1a0a238 // srshl { z24.s-z25.s }, { z24.s-z25.s }, z0.s\n"
+ "ld1sb { z18.s }, p5/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ ".inst 0xc1a2a318 // add { z24.s-z25.s }, { z24.s-z25.s }, z2.s\n"
+ "ld1sb { z15.s }, p5/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ "sub z14.h, z14.h, z21.h\n"
+ "sub z18.h, z18.h, z21.h\n"
+ "ld1sb { z17.s }, p5/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ "sub z15.h, z15.h, z21.h\n"
+ "sub z17.h, z17.h, z21.h\n"
+ "ld1sb { z16.s }, p5/Z, [x19]\n"
+ "sub z16.h, z16.h, z21.h\n"
+ ".inst 0xc1b3c698 // sclamp { z24.s-z25.s }, z20.s, z19.s\n"
+ "add x27, x27, %x[ld_in_col]\n"
+ "st1b { z24.s }, p5, [x24]\n"
+ "add x24, x24, x22\n"
+ ".inst 0xc0042982 // mova za.d[x9, #2], { z12.d-z13.d }\n"
+ "trn1 z14.h, z14.h, z18.h\n"
+ "st1b { z25.s }, p5, [x23]\n"
+ "add x23, x23, x21\n"
+ "trn1 z15.h, z15.h, z17.h\n"
+ "mov z16.d, z16.d\n"
+ "bgt 10b\n"
+ "b 16f\n"
+ "11:" // Padded
+ "cbz x25, 14f\n"
+ "cmp x25, #0x1\n"
+ "sub x28, x28, x25\n"
+ "beq 13f\n"
+ "12:" // Padded: 2 priming loads
+ "mov x12, #0x0\n"
+ ".inst 0x25305504 // psel p4.s, p5.s/Z, p8.s[w12]\n"
+ "ld1sb { z14.s }, p4/Z, [x27]\n"
+ "sub z14.h, p4/M, z14.h, z21.h\n"
+ "add x19, x27, %x[ld_in_row]\n"
+ ".inst 0x25705503 // psel p3.s, p5.s/Z, p8.s[w12, #1]\n"
+ "ld1sb { z18.s }, p3/Z, [x19]\n"
+ "sub z18.h, p3/M, z18.h, z21.h\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ ".inst 0x25b05502 // psel p2.s, p5.s/Z, p8.s[w12, #2]\n"
+ "ld1sb { z15.s }, p2/Z, [x19]\n"
+ "sub z15.h, p2/M, z15.h, z21.h\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ ".inst 0x25f05501 // psel p1.s, p5.s/Z, p8.s[w12, #3]\n"
+ "ld1sb { z17.s }, p1/Z, [x19]\n"
+ "sub z17.h, p1/M, z17.h, z21.h\n"
+ "mov x12, #0x4\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ "trn1 z14.h, z14.h, z18.h\n"
+ "trn1 z15.h, z15.h, z17.h\n"
+ ".inst 0x25305500 // psel p0.s, p5.s/Z, p8.s[w12]\n"
+ "ld1sb { z16.s }, p0/Z, [x19]\n"
+ "sub z16.h, p0/M, z16.h, z21.h\n"
+ "mov z16.d, z16.d\n"
+ ".inst 0xc16835c8 // sdot za.s[x9, 0], { z14.h-z15.h }, z8.h\n"
+ "add x27, x27, %x[ld_in_col]\n"
+ ".inst 0xc16635e8 // sdot za.s[x9, 0], { z15.h-z16.h }, z6.h\n"
+ "13:" // Padded: 1 priming loads
+ "mov x12, #0x0\n"
+ ".inst 0x25305504 // psel p4.s, p5.s/Z, p8.s[w12]\n"
+ "ld1sb { z14.s }, p4/Z, [x27]\n"
+ "sub z14.h, p4/M, z14.h, z21.h\n"
+ "add x19, x27, %x[ld_in_row]\n"
+ ".inst 0x25705503 // psel p3.s, p5.s/Z, p8.s[w12, #1]\n"
+ "ld1sb { z18.s }, p3/Z, [x19]\n"
+ "sub z18.h, p3/M, z18.h, z21.h\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ ".inst 0x25b05502 // psel p2.s, p5.s/Z, p8.s[w12, #2]\n"
+ "ld1sb { z15.s }, p2/Z, [x19]\n"
+ "sub z15.h, p2/M, z15.h, z21.h\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ ".inst 0x25f05501 // psel p1.s, p5.s/Z, p8.s[w12, #3]\n"
+ "ld1sb { z17.s }, p1/Z, [x19]\n"
+ "sub z17.h, p1/M, z17.h, z21.h\n"
+ "mov x12, #0x4\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ "trn1 z14.h, z14.h, z18.h\n"
+ "trn1 z15.h, z15.h, z17.h\n"
+ ".inst 0x25305500 // psel p0.s, p5.s/Z, p8.s[w12]\n"
+ "ld1sb { z16.s }, p0/Z, [x19]\n"
+ "sub z16.h, p0/M, z16.h, z21.h\n"
+ "mov z16.d, z16.d\n"
+ ".inst 0xc16735c8 // sdot za.s[x9, 0], { z14.h-z15.h }, z7.h\n"
+ "add x27, x27, %x[ld_in_col]\n"
+ ".inst 0xc16535e8 // sdot za.s[x9, 0], { z15.h-z16.h }, z5.h\n"
+ "14:" // Padded: 0 priming loads
+ "mov x12, #0x0\n"
+ ".inst 0x25305504 // psel p4.s, p5.s/Z, p8.s[w12]\n"
+ "ld1sb { z14.s }, p4/Z, [x27]\n"
+ "sub z14.h, p4/M, z14.h, z21.h\n"
+ "add x19, x27, %x[ld_in_row]\n"
+ ".inst 0x25705503 // psel p3.s, p5.s/Z, p8.s[w12, #1]\n"
+ "ld1sb { z18.s }, p3/Z, [x19]\n"
+ "sub z18.h, p3/M, z18.h, z21.h\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ ".inst 0x25b05502 // psel p2.s, p5.s/Z, p8.s[w12, #2]\n"
+ "ld1sb { z15.s }, p2/Z, [x19]\n"
+ "sub z15.h, p2/M, z15.h, z21.h\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ ".inst 0x25f05501 // psel p1.s, p5.s/Z, p8.s[w12, #3]\n"
+ "ld1sb { z17.s }, p1/Z, [x19]\n"
+ "sub z17.h, p1/M, z17.h, z21.h\n"
+ "mov x12, #0x4\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ "trn1 z14.h, z14.h, z18.h\n"
+ "trn1 z15.h, z15.h, z17.h\n"
+ ".inst 0x25305500 // psel p0.s, p5.s/Z, p8.s[w12]\n"
+ "ld1sb { z16.s }, p0/Z, [x19]\n"
+ "sub z16.h, p0/M, z16.h, z21.h\n"
+ "sub x28, x28, #0x2\n"
+ "sub x26, x26, #0x1\n"
+ "lsr x19, x28, #0x1\n"
+ "mov z16.d, z16.d\n"
+ "cmp x19, x26\n"
+ "csel x20, x19, x26, LT\n"
+ "add x27, x27, %x[ld_in_col]\n"
+ "and x28, x28, #0x1\n"
+ "sub x26, x26, x20\n"
+ "cbz x20, 16f\n"
+ "15:" // Padded: Main loop
+ ".inst 0xc16435c8 // sdot za.s[x9, 0], { z14.h-z15.h }, z4.h\n"
+ "mov x12, #0x0\n"
+ ".inst 0x25305504 // psel p4.s, p5.s/Z, p8.s[w12]\n"
+ ".inst 0xc16835c9 // sdot za.s[x9, 1], { z14.h-z15.h }, z8.h\n"
+ "add x19, x27, %x[ld_in_row]\n"
+ ".inst 0x25705503 // psel p3.s, p5.s/Z, p8.s[w12, #1]\n"
+ "ld1sb { z14.s }, p4/Z, [x27]\n"
+ "ld1sb { z18.s }, p3/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ ".inst 0x25b05502 // psel p2.s, p5.s/Z, p8.s[w12, #2]\n"
+ ".inst 0xc16335e8 // sdot za.s[x9, 0], { z15.h-z16.h }, z3.h\n"
+ ".inst 0xc16635e9 // sdot za.s[x9, 1], { z15.h-z16.h }, z6.h\n"
+ "ld1sb { z15.s }, p2/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ ".inst 0x25f05501 // psel p1.s, p5.s/Z, p8.s[w12, #3]\n"
+ "ld1sb { z17.s }, p1/Z, [x19]\n"
+ "mov x12, #0x4\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ "sub z14.h, p4/M, z14.h, z21.h\n"
+ ".inst 0x25305500 // psel p0.s, p5.s/Z, p8.s[w12]\n"
+ "sub z18.h, p3/M, z18.h, z21.h\n"
+ "sub z15.h, p2/M, z15.h, z21.h\n"
+ "ld1sb { z16.s }, p0/Z, [x19]\n"
+ "sub z17.h, p1/M, z17.h, z21.h\n"
+ "sub z16.h, p0/M, z16.h, z21.h\n"
+ "trn1 z14.h, z14.h, z18.h\n"
+ "add x27, x27, %x[ld_in_col]\n"
+ "trn1 z15.h, z15.h, z17.h\n"
+ ".inst 0xc0062818 // mova { z24.d-z25.d }, za.d[x9, #0]\n"
+ "add x9, x9, #0x1\n"
+ "mov z16.d, z16.d\n"
+ ".inst 0xc16735c8 // sdot za.s[x9, 0], { z14.h-z15.h }, z7.h\n"
+ ".inst 0xc1a1a418 // sqdmulh { z24.s-z25.s }, { z24.s-z25.s }, z1.s\n"
+ "mov x12, #0x0\n"
+ ".inst 0x25305504 // psel p4.s, p5.s/Z, p8.s[w12]\n"
+ "add x19, x27, %x[ld_in_row]\n"
+ "ld1sb { z14.s }, p4/Z, [x27]\n"
+ ".inst 0xc16535e8 // sdot za.s[x9, 0], { z15.h-z16.h }, z5.h\n"
+ ".inst 0x25705503 // psel p3.s, p5.s/Z, p8.s[w12, #1]\n"
+ "ld1sb { z18.s }, p3/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ ".inst 0xc1a0a238 // srshl { z24.s-z25.s }, { z24.s-z25.s }, z0.s\n"
+ ".inst 0x25b05502 // psel p2.s, p5.s/Z, p8.s[w12, #2]\n"
+ "ld1sb { z15.s }, p2/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ ".inst 0xc1a2a318 // add { z24.s-z25.s }, { z24.s-z25.s }, z2.s\n"
+ ".inst 0x25f05501 // psel p1.s, p5.s/Z, p8.s[w12, #3]\n"
+ "mov x12, #0x4\n"
+ "ld1sb { z17.s }, p1/Z, [x19]\n"
+ "sub z14.h, p4/M, z14.h, z21.h\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ ".inst 0x25305500 // psel p0.s, p5.s/Z, p8.s[w12]\n"
+ "ld1sb { z16.s }, p0/Z, [x19]\n"
+ "sub z18.h, p3/M, z18.h, z21.h\n"
+ "sub z15.h, p2/M, z15.h, z21.h\n"
+ "sub z17.h, p1/M, z17.h, z21.h\n"
+ "subs x20, x20, #0x1\n"
+ ".inst 0xc0042982 // mova za.d[x9, #2], { z12.d-z13.d }\n"
+ "sub z16.h, p0/M, z16.h, z21.h\n"
+ ".inst 0xc1b3c698 // sclamp { z24.s-z25.s }, z20.s, z19.s\n"
+ "st1b { z24.s }, p5, [x24]\n"
+ "add x24, x24, x22\n"
+ "st1b { z25.s }, p5, [x23]\n"
+ "add x23, x23, x21\n"
+ "trn1 z14.h, z14.h, z18.h\n"
+ "trn1 z15.h, z15.h, z17.h\n"
+ "mov z16.d, z16.d\n"
+ "add x27, x27, %x[ld_in_col]\n"
+ "bgt 15b\n"
+ "16:" // Main loop tail
+ ".inst 0xc16435c8 // sdot za.s[x9, 0], { z14.h-z15.h }, z4.h\n"
+ "mov x12, #0x0\n"
+ ".inst 0x25305504 // psel p4.s, p5.s/Z, p8.s[w12]\n"
+ ".inst 0xc16335e8 // sdot za.s[x9, 0], { z15.h-z16.h }, z3.h\n"
+ "add x19, x27, %x[ld_in_row]\n"
+ ".inst 0x25705503 // psel p3.s, p5.s/Z, p8.s[w12, #1]\n"
+ ".inst 0xc16835c9 // sdot za.s[x9, 1], { z14.h-z15.h }, z8.h\n"
+ "ld1sb { z14.s }, p4/Z, [x27]\n"
+ ".inst 0x25b05502 // psel p2.s, p5.s/Z, p8.s[w12, #2]\n"
+ ".inst 0x25f05501 // psel p1.s, p5.s/Z, p8.s[w12, #3]\n"
+ "ld1sb { z18.s }, p3/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ ".inst 0xc16635e9 // sdot za.s[x9, 1], { z15.h-z16.h }, z6.h\n"
+ "mov x12, #0x4\n"
+ ".inst 0xc0062818 // mova { z24.d-z25.d }, za.d[x9, #0]\n"
+ "ld1sb { z15.s }, p2/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ ".inst 0xc1a1a418 // sqdmulh { z24.s-z25.s }, { z24.s-z25.s }, z1.s\n"
+ "ld1sb { z17.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ ".inst 0x25305500 // psel p0.s, p5.s/Z, p8.s[w12]\n"
+ "sub z14.h, p4/M, z14.h, z21.h\n"
+ "sub z18.h, p3/M, z18.h, z21.h\n"
+ "sub z15.h, p2/M, z15.h, z21.h\n"
+ "ld1sb { z16.s }, p0/Z, [x19]\n"
+ "add x9, x9, #0x1\n"
+ "sub z17.h, p1/M, z17.h, z21.h\n"
+ "sub z16.h, p0/M, z16.h, z21.h\n"
+ "trn1 z14.h, z14.h, z18.h\n"
+ "add x27, x27, %x[ld_in_col]\n"
+ ".inst 0xc1a0a238 // srshl { z24.s-z25.s }, { z24.s-z25.s }, z0.s\n"
+ "trn1 z15.h, z15.h, z17.h\n"
+ ".inst 0xc1a2a318 // add { z24.s-z25.s }, { z24.s-z25.s }, z2.s\n"
+ ".inst 0xc16735c8 // sdot za.s[x9, 0], { z14.h-z15.h }, z7.h\n"
+ "mov z16.d, z16.d\n"
+ ".inst 0xc1b3c698 // sclamp { z24.s-z25.s }, z20.s, z19.s\n"
+ "st1b { z24.s }, p5, [x24]\n"
+ "add x24, x24, x22\n"
+ "st1b { z25.s }, p5, [x23]\n"
+ "add x23, x23, x21\n"
+ ".inst 0xc0042982 // mova za.d[x9, #2], { z12.d-z13.d }\n"
+ ".inst 0xc16535e8 // sdot za.s[x9, 0], { z15.h-z16.h }, z5.h\n"
+ "cbz x28, 17f\n" // Skip remainder inputs
+ "mov x12, #0x0\n"
+ ".inst 0x25305504 // psel p4.s, p5.s/Z, p8.s[w12]\n"
+ "ld1sb { z14.s }, p4/Z, [x27]\n"
+ "sub z14.h, p4/M, z14.h, z21.h\n"
+ "add x19, x27, %x[ld_in_row]\n"
+ ".inst 0x25705503 // psel p3.s, p5.s/Z, p8.s[w12, #1]\n"
+ "ld1sb { z18.s }, p3/Z, [x19]\n"
+ "sub z18.h, p3/M, z18.h, z21.h\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ ".inst 0x25b05502 // psel p2.s, p5.s/Z, p8.s[w12, #2]\n"
+ "ld1sb { z15.s }, p2/Z, [x19]\n"
+ "sub z15.h, p2/M, z15.h, z21.h\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ ".inst 0x25f05501 // psel p1.s, p5.s/Z, p8.s[w12, #3]\n"
+ "ld1sb { z17.s }, p1/Z, [x19]\n"
+ "sub z17.h, p1/M, z17.h, z21.h\n"
+ "mov x12, #0x4\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ "trn1 z14.h, z14.h, z18.h\n"
+ "trn1 z15.h, z15.h, z17.h\n"
+ ".inst 0x25305500 // psel p0.s, p5.s/Z, p8.s[w12]\n"
+ "ld1sb { z16.s }, p0/Z, [x19]\n"
+ "sub z16.h, p0/M, z16.h, z21.h\n"
+ "mov z16.d, z16.d\n"
+ ".inst 0xc16435c8 // sdot za.s[x9, 0], { z14.h-z15.h }, z4.h\n"
+ "sub x26, x26, #0x1\n"
+ ".inst 0xc16335e8 // sdot za.s[x9, 0], { z15.h-z16.h }, z3.h\n"
+ ".inst 0xc0062818 // mova { z24.d-z25.d }, za.d[x9, #0]\n"
+ ".inst 0xc1a1a418 // sqdmulh { z24.s-z25.s }, { z24.s-z25.s }, z1.s\n"
+ ".inst 0xc1a0a238 // srshl { z24.s-z25.s }, { z24.s-z25.s }, z0.s\n"
+ ".inst 0xc16835c9 // sdot za.s[x9, 1], { z14.h-z15.h }, z8.h\n"
+ ".inst 0xc1a2a318 // add { z24.s-z25.s }, { z24.s-z25.s }, z2.s\n"
+ ".inst 0xc16635e9 // sdot za.s[x9, 1], { z15.h-z16.h }, z6.h\n"
+ "add x9, x9, #0x1\n"
+ ".inst 0xc1b3c698 // sclamp { z24.s-z25.s }, z20.s, z19.s\n"
+ "st1b { z24.s }, p5, [x24]\n"
+ "add x24, x24, x22\n"
+ ".inst 0xc0042982 // mova za.d[x9, #2], { z12.d-z13.d }\n"
+ "st1b { z25.s }, p5, [x23]\n"
+ "add x23, x23, x21\n"
+ "17:" // Tail input: End
+ "cbz x26, 19f\n"
+ "18:" // Right padding loop
+ ".inst 0xc0062818 // mova { z24.d-z25.d }, za.d[x9, #0]\n"
+ ".inst 0xc1a1a418 // sqdmulh { z24.s-z25.s }, { z24.s-z25.s }, z1.s\n"
+ "add x9, x9, #0x1\n"
+ ".inst 0xc1a0a238 // srshl { z24.s-z25.s }, { z24.s-z25.s }, z0.s\n"
+ "subs x26, x26, #0x1\n"
+ ".inst 0xc0042982 // mova za.d[x9, #2], { z12.d-z13.d }\n"
+ ".inst 0xc1a2a318 // add { z24.s-z25.s }, { z24.s-z25.s }, z2.s\n"
+ ".inst 0xc1b3c698 // sclamp { z24.s-z25.s }, z20.s, z19.s\n"
+ "st1b { z24.s }, p5, [x24]\n"
+ "add x24, x24, x22\n"
+ "st1b { z25.s }, p5, [x23]\n"
+ "add x23, x23, x21\n"
+ "bgt 18b\n"
+ "19:" // End
+ ".inst 0xd503467f // SMSTOP\n"
+ :
+ : [args] "r" (&args), [ld_in_col] "r" (ld_in_col), [ld_in_row] "r" (ld_in_row), [offsetof_Args_inptr] "I" (offsetof(Args, inptr)), [offsetof_Args_input_cols] "I" (offsetof(Args, input_cols)), [offsetof_Args_ld_out_cols] "I" (offsetof(Args, ld_out_cols)), [offsetof_Args_n] "I" (offsetof(Args, n)), [offsetof_Args_n_channels] "I" (offsetof(Args, n_channels)), [offsetof_Args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_Args_output_cols] "I" (offsetof(Args, output_cols)), [offsetof_Args_pad_bottom] "I" (offsetof(Args, pad_bottom)), [offsetof_Args_pad_left] "I" (offsetof(Args, pad_left)), [offsetof_Args_pad_top] "I" (offsetof(Args, pad_top)), [offsetof_Args_weights] "I" (offsetof(Args, weights)), [offsetof_Requantize32_a_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_bias] "I" (offsetof(arm_gemm::Requantize32, bias)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [offsetof_Requantize32_per_channel_muls] "I" (offsetof(arm_gemm::Requantize32, per_channel_muls)), [offsetof_Requantize32_per_channel_right_shifts] "I" (offsetof(arm_gemm::Requantize32, per_channel_right_shifts)), [offsetof_Requantize32_per_layer_mul] "I" (offsetof(arm_gemm::Requantize32, per_layer_mul)), [offsetof_Requantize32_per_layer_right_shift] "I" (offsetof(arm_gemm::Requantize32, per_layer_right_shift)), [qp] "r" (&qp)
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ );
+}
+
+} // namespace depthwise
+} // namespace arm_conv
+
+#endif // defined(ARM_COMPUTE_ENABLE_SME2)
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_s8q_planar_3x3_s2_4rows_dot_za/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_s8q_planar_3x3_s2_4rows_dot_za/generic.cpp
index 03575aa799..fd35da4010 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_s8q_planar_3x3_s2_4rows_dot_za/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_s8q_planar_3x3_s2_4rows_dot_za/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -69,18 +69,18 @@ void sme2_s8q_planar_3x3_s2_4rows_dot_za_impl(
__asm__ __volatile__(
".inst 0xd503477f // SMSTART ZA\n"
- "ldr x6, [%x[args], %[offsetof_Args_pad_bottom]]\n"
+ "ldr x7, [%x[args], %[offsetof_Args_pad_bottom]]\n"
"ptrue p2.b\n"
- "mov x20, #0x9\n"
- "ldr x7, [%x[args], %[offsetof_Args_pad_top]]\n"
+ "mov x19, #0x9\n"
+ "ldr x8, [%x[args], %[offsetof_Args_pad_top]]\n"
"ld1rh { z5.h }, p2/Z, [%x[qp], %[offsetof_Requantize32_a_offset]]\n"
- "sub x20, x20, x6\n"
+ "sub x19, x19, x7\n"
".inst 0x25207812 // ptrue pn10.b\n"
"ldr x17, [%x[args], %[offsetof_Args_n_channels]]\n"
"whilelt p1.s, XZR, x17\n"
- "whilelt p9.s, XZR, x20\n"
+ "whilelt p9.s, XZR, x19\n"
"ld1rw { z4.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_c_offset]]\n"
- "whilelt p8.s, XZR, x7\n"
+ "whilelt p8.s, XZR, x8\n"
"addvl SP, SP, #-6\n"
"ldr x16, [%x[args], %[offsetof_Args_current_channel]]\n"
"neg z5.h, p2/M, z5.h\n"
@@ -90,317 +90,317 @@ void sme2_s8q_planar_3x3_s2_4rows_dot_za_impl(
"ld1rw { z27.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_minval]]\n"
"ld1rw { z23.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_maxval]]\n"
"1:" // Channel loop
- "ldr x20, [%x[qp], %[offsetof_Requantize32_bias]]\n"
+ "ldr x19, [%x[qp], %[offsetof_Requantize32_bias]]\n"
"mov z0.s, #0x0\n"
- "cbz x20, 2f\n"
- "ld1w { z0.s }, p1/Z, [x20, x16, LSL #2]\n"
+ "cbz x19, 2f\n"
+ "ld1w { z0.s }, p1/Z, [x19, x16, LSL #2]\n"
"2:" // Load bias: Done
- "ldr x22, [%x[args], %[offsetof_Args_weights]]\n"
- "mov x20, x22\n"
- "ld1sb { z24.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #3\n"
+ "ldr x21, [%x[args], %[offsetof_Args_weights]]\n"
+ "mov x19, x21\n"
+ "ld1sb { z24.s }, p2/Z, [x19]\n"
+ "incw x19, ALL, MUL #3\n"
"ld1rh { z13.h }, p2/Z, [%x[qp], %[offsetof_Requantize32_b_offset]]\n"
"sub z24.h, z24.h, z13.h\n"
- "incw x22\n"
+ "incw x21\n"
"mov z17.h, #0x0\n"
- "ld1sb { z25.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #3\n"
+ "ld1sb { z25.s }, p2/Z, [x19]\n"
+ "incw x19, ALL, MUL #3\n"
"sub z25.h, z25.h, z13.h\n"
"trn1 z10.h, z24.h, z25.h\n"
- "ld1sb { z16.s }, p2/Z, [x20]\n"
+ "ld1sb { z16.s }, p2/Z, [x19]\n"
"sub z16.h, z16.h, z13.h\n"
- "mov x20, x22\n"
+ "mov x19, x21\n"
"trn1 z11.h, z16.h, z17.h\n"
- "ld1sb { z24.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #3\n"
+ "ld1sb { z24.s }, p2/Z, [x19]\n"
+ "incw x19, ALL, MUL #3\n"
"sub z24.h, z24.h, z13.h\n"
- "addvl x21, SP, #6\n"
- "ld1sb { z25.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #3\n"
+ "addvl x20, SP, #6\n"
+ "ld1sb { z25.s }, p2/Z, [x19]\n"
+ "incw x19, ALL, MUL #3\n"
"sub z25.h, z25.h, z13.h\n"
- "incw x22\n"
- "ld1sb { z16.s }, p2/Z, [x20]\n"
+ "incw x21\n"
+ "ld1sb { z16.s }, p2/Z, [x19]\n"
"sub z16.h, z16.h, z13.h\n"
- "addvl x21, x21, #-2\n"
- "mov x20, x22\n"
- "st1h { z10.h }, p2, [x21]\n"
+ "addvl x20, x20, #-2\n"
+ "mov x19, x21\n"
+ "st1h { z10.h }, p2, [x20]\n"
"trn1 z10.h, z24.h, z25.h\n"
- "ld1sb { z24.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #3\n"
- "ld1sb { z25.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #3\n"
- "st1h { z11.h }, p2, [x21, #1, MUL VL]\n"
+ "ld1sb { z24.s }, p2/Z, [x19]\n"
+ "incw x19, ALL, MUL #3\n"
+ "ld1sb { z25.s }, p2/Z, [x19]\n"
+ "incw x19, ALL, MUL #3\n"
+ "st1h { z11.h }, p2, [x20, #1, MUL VL]\n"
"trn1 z11.h, z16.h, z17.h\n"
- "ld1sb { z16.s }, p2/Z, [x20]\n"
+ "ld1sb { z16.s }, p2/Z, [x19]\n"
"sub z24.h, z24.h, z13.h\n"
"sub z25.h, z25.h, z13.h\n"
- "ldr x20, [%x[qp], %[offsetof_Requantize32_per_channel_muls]]\n"
+ "ldr x19, [%x[qp], %[offsetof_Requantize32_per_channel_muls]]\n"
"sub z16.h, z16.h, z13.h\n"
- "addvl x21, x21, #-2\n"
- "st1h { z10.h }, p2, [x21]\n"
+ "addvl x20, x20, #-2\n"
+ "st1h { z10.h }, p2, [x20]\n"
"mov z1.d, z0.d\n"
- "st1h { z11.h }, p2, [x21, #1, MUL VL]\n"
- "addvl x21, x21, #-2\n"
+ "st1h { z11.h }, p2, [x20, #1, MUL VL]\n"
+ "addvl x20, x20, #-2\n"
"mov z2.d, z0.d\n"
"mov z3.d, z0.d\n"
"trn1 z10.h, z24.h, z25.h\n"
- "st1h { z10.h }, p2, [x21]\n"
+ "st1h { z10.h }, p2, [x20]\n"
"trn1 z11.h, z16.h, z17.h\n"
- "st1h { z11.h }, p2, [x21, #1, MUL VL]\n"
- "cbz x20, 3f\n"
- "ld1w { z8.s }, p1/Z, [x20, x16, LSL #2]\n"
+ "st1h { z11.h }, p2, [x20, #1, MUL VL]\n"
+ "cbz x19, 3f\n"
+ "ld1w { z8.s }, p1/Z, [x19, x16, LSL #2]\n"
"3:" // Load mul: End
- "ldr x20, [%x[qp], %[offsetof_Requantize32_per_channel_right_shifts]]\n"
- "cbz x20, 4f\n"
- "ld1w { z7.s }, p1/Z, [x20, x16, LSL #2]\n"
+ "ldr x19, [%x[qp], %[offsetof_Requantize32_per_channel_right_shifts]]\n"
+ "cbz x19, 4f\n"
+ "ld1w { z7.s }, p1/Z, [x19, x16, LSL #2]\n"
"4:" // Load right_shift: End
"ldr x15, [%x[args], %[offsetof_Args_input_cols]]\n"
- "sub x20, x15, #0x1\n"
- "orr x23, x20, %x[ld_in_col], LSL #16\n"
+ "sub x19, x15, #0x1\n"
+ "orr x22, x19, %x[ld_in_col], LSL #16\n"
"ldr x14, [%x[args], %[offsetof_Args_inptr]]\n"
- "orr x23, x17, x23, LSL #22\n"
- "mov x22, #0x9\n"
- "add x21, x7, x6\n"
- "lsl x20, %x[ld_in_row], #0x0\n"
+ "orr x22, x17, x22, LSL #22\n"
+ "mov x21, #0x9\n"
+ "add x20, x8, x7\n"
+ "lsl x19, %x[ld_in_row], #0x0\n"
"ldr x13, [%x[args], %[offsetof_Args_output_cols]]\n"
- "mov x8, #0x0\n"
- "lsl x23, x23, #0x0\n"
- "sub x22, x22, x21\n"
- "madd x20, x20, x7, x14\n"
+ "mov x11, #0x0\n"
+ "lsl x22, x22, #0x0\n"
+ "sub x21, x21, x20\n"
+ "madd x19, x19, x8, x14\n"
"5:" // Issue prefetches
- "subs x22, x22, #0x1\n"
- ".inst 0xf8b74a9c // rprfm pldstrm, x23, [x20]\n"
- "add x20, x20, %x[ld_in_col]\n"
+ "subs x21, x21, #0x1\n"
+ ".inst 0xf8b64a7c // rprfm pldstrm, x22, [x19]\n"
+ "add x19, x19, %x[ld_in_col]\n"
"bgt 5b\n"
- "ldr x25, [%x[args], %[offsetof_Args_outptrs]]\n"
- "lsl x20, %x[ld_in_row], #0x0\n"
- "msub x14, x7, x20, x14\n"
- ".inst 0xc0040c00 // mova za.d[x8, #0], { z0.d-z3.d }\n"
- "ldr x20, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
- ".inst 0xc0040c01 // mova za.d[x8, #1], { z0.d-z3.d }\n"
- "mov x22, #0x2\n"
- "ldp x11, x10, [x25], #0x10\n"
- ".inst 0xc0040c02 // mova za.d[x8, #2], { z0.d-z3.d }\n"
- "ldp x9, x28, [x20], #0x10\n"
- "ldr x21, [%x[args], %[offsetof_Args_pad_left]]\n"
- "ldp x27, x26, [x25], #0x10\n"
- "ldp x25, x24, [x20], #0x10\n"
- "cbz x21, 7f\n"
- "cmp x21, x22\n"
- "csel x20, x21, x22, LT\n"
- "sub x21, x21, x20\n"
- "sub x22, x22, x20\n"
- "cbz x21, 7f\n"
- ".inst 0xc0060c1c // mova { z28.d-z31.d }, za.d[x8, #0]\n"
+ "ldr x24, [%x[args], %[offsetof_Args_outptrs]]\n"
+ "lsl x19, %x[ld_in_row], #0x0\n"
+ "msub x14, x8, x19, x14\n"
+ ".inst 0xc0046c00 // mova za.d[x11, #0], { z0.d-z3.d }\n"
+ "ldr x19, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
+ ".inst 0xc0046c01 // mova za.d[x11, #1], { z0.d-z3.d }\n"
+ "mov x21, #0x2\n"
+ "ldp x10, x9, [x24], #0x10\n"
+ ".inst 0xc0046c02 // mova za.d[x11, #2], { z0.d-z3.d }\n"
+ "ldp x28, x27, [x19], #0x10\n"
+ "ldr x20, [%x[args], %[offsetof_Args_pad_left]]\n"
+ "ldp x26, x25, [x24], #0x10\n"
+ "ldp x24, x23, [x19], #0x10\n"
+ "cbz x20, 7f\n"
+ "cmp x20, x21\n"
+ "csel x19, x20, x21, LT\n"
+ "sub x20, x20, x19\n"
+ "sub x21, x21, x19\n"
+ "cbz x20, 7f\n"
+ ".inst 0xc0066c1c // mova { z28.d-z31.d }, za.d[x11, #0]\n"
".inst 0xc1a8ac1c // sqdmulh { z28.s-z31.s }, { z28.s-z31.s }, z8.s\n"
- "and x22, x21, #0x1\n"
+ "and x21, x20, #0x1\n"
".inst 0xc1a7aa3c // srshl { z28.s-z31.s }, { z28.s-z31.s }, z7.s\n"
- "add x21, x21, #0x1\n"
- "lsr x21, x21, #0x1\n"
+ "add x20, x20, #0x1\n"
+ "lsr x20, x20, #0x1\n"
".inst 0xc1a4ab1c // add { z28.s-z31.s }, { z28.s-z31.s }, z4.s\n"
- "sub x13, x13, x21\n"
+ "sub x13, x13, x20\n"
".inst 0xc1b7cf7c // sclamp { z28.s-z31.s }, z27.s, z23.s\n"
"6:" // Left padding
- "subs x21, x21, #0x1\n"
- "st1b { z28.s }, p1, [x11]\n"
- "add x11, x11, x9\n"
- "st1b { z29.s }, p1, [x10]\n"
+ "subs x20, x20, #0x1\n"
+ "st1b { z28.s }, p1, [x10]\n"
"add x10, x10, x28\n"
- "st1b { z30.s }, p1, [x27]\n"
- "add x27, x27, x25\n"
- "st1b { z31.s }, p1, [x26]\n"
+ "st1b { z29.s }, p1, [x9]\n"
+ "add x9, x9, x27\n"
+ "st1b { z30.s }, p1, [x26]\n"
"add x26, x26, x24\n"
+ "st1b { z31.s }, p1, [x25]\n"
+ "add x25, x25, x23\n"
"bgt 6b\n"
"7:" // Left padding: End
- "adds XZR, x7, x6\n"
+ "adds XZR, x8, x7\n"
"bne 12f\n"
- "cbz x22, 10f\n"
- "cmp x22, #0x1\n"
- "sub x15, x15, x22\n"
+ "cbz x21, 10f\n"
+ "cmp x21, #0x1\n"
+ "sub x15, x15, x21\n"
"beq 9f\n"
"8:" // Unpadded: 2 priming loads
- "add x21, x14, %x[ld_in_row]\n"
+ "add x20, x14, %x[ld_in_row]\n"
"ld1sb { z12.s }, p1/Z, [x14]\n"
- "addvl x20, SP, #4\n"
- "ld1sb { z20.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "addvl x19, SP, #4\n"
+ "ld1sb { z20.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
"trn1 z12.h, z12.h, z20.h\n"
"add z12.h, z12.h, z5.h\n"
- "ld1sb { z13.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "ld1sb { z13.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
"add x14, x14, %x[ld_in_col]\n"
- "ld1sb { z19.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "ld1sb { z19.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
"trn1 z13.h, z13.h, z19.h\n"
"add z13.h, z13.h, z5.h\n"
- "ld1sb { z14.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
- "ld1sb { z18.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "ld1sb { z14.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z18.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
"trn1 z14.h, z14.h, z18.h\n"
"add z14.h, z14.h, z5.h\n"
- "ld1sb { z15.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
- "ld1sb { z17.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "ld1sb { z15.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z17.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
"trn1 z15.h, z15.h, z17.h\n"
"add z15.h, z15.h, z5.h\n"
- "ld1sb { z16.s }, p1/Z, [x21]\n"
+ "ld1sb { z16.s }, p1/Z, [x20]\n"
"mov z16.d, z16.d\n"
"add z16.h, z16.h, z5.h\n"
- ".inst 0xa0402a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20]\n"
- ".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
- ".inst 0xc17b15a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z11.h\n"
+ ".inst 0xa0402a6a // ld1h { z10.h-z11.h }, pn10.b/Z, [x19]\n"
+ ".inst 0xc17a7588 // sdot za.s[x11, 0], { z12.h-z15.h }, z10.h\n"
+ ".inst 0xc17b75a8 // sdot za.s[x11, 0], { z13.h-z16.h }, z11.h\n"
"9:" // Unpadded: 1 priming loads
- "add x21, x14, %x[ld_in_row]\n"
+ "add x20, x14, %x[ld_in_row]\n"
"ld1sb { z12.s }, p1/Z, [x14]\n"
- "addvl x20, SP, #2\n"
- "ld1sb { z20.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "addvl x19, SP, #2\n"
+ "ld1sb { z20.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
"trn1 z12.h, z12.h, z20.h\n"
"add z12.h, z12.h, z5.h\n"
- "ld1sb { z13.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "ld1sb { z13.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
"add x14, x14, %x[ld_in_col]\n"
- "ld1sb { z19.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "ld1sb { z19.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
"trn1 z13.h, z13.h, z19.h\n"
"add z13.h, z13.h, z5.h\n"
- "ld1sb { z14.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
- "ld1sb { z18.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "ld1sb { z14.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z18.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
"trn1 z14.h, z14.h, z18.h\n"
"add z14.h, z14.h, z5.h\n"
- "ld1sb { z15.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
- "ld1sb { z17.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "ld1sb { z15.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z17.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
"trn1 z15.h, z15.h, z17.h\n"
"add z15.h, z15.h, z5.h\n"
- "ld1sb { z16.s }, p1/Z, [x21]\n"
+ "ld1sb { z16.s }, p1/Z, [x20]\n"
"mov z16.d, z16.d\n"
"add z16.h, z16.h, z5.h\n"
- ".inst 0xa0402a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20]\n"
- ".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
- ".inst 0xc17b15a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z11.h\n"
+ ".inst 0xa0402a6a // ld1h { z10.h-z11.h }, pn10.b/Z, [x19]\n"
+ ".inst 0xc17a7588 // sdot za.s[x11, 0], { z12.h-z15.h }, z10.h\n"
+ ".inst 0xc17b75a8 // sdot za.s[x11, 0], { z13.h-z16.h }, z11.h\n"
"10:" // Unpadded: 0 priming loads
"cmp x15, #0x2\n"
".inst 0xa0402bea // ld1h { z10.h-z11.h }, pn10.b/Z, [SP]\n"
"blt 18f\n"
- "add x21, x14, %x[ld_in_row]\n"
+ "add x20, x14, %x[ld_in_row]\n"
"ld1sb { z12.s }, p1/Z, [x14]\n"
"sub x15, x15, #0x2\n"
- "ld1sb { z20.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "ld1sb { z20.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
"trn1 z12.h, z12.h, z20.h\n"
"sub x13, x13, #0x1\n"
- "ld1sb { z13.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
- "lsr x20, x15, #0x1\n"
+ "ld1sb { z13.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "lsr x19, x15, #0x1\n"
"add z12.h, z12.h, z5.h\n"
- "ld1sb { z19.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "ld1sb { z19.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
"trn1 z13.h, z13.h, z19.h\n"
- "cmp x20, x13\n"
- "ld1sb { z14.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
- "csel x23, x20, x13, LT\n"
+ "cmp x19, x13\n"
+ "ld1sb { z14.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "csel x22, x19, x13, LT\n"
"add z13.h, z13.h, z5.h\n"
- "ld1sb { z18.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "ld1sb { z18.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
"trn1 z14.h, z14.h, z18.h\n"
"add z14.h, z14.h, z5.h\n"
- "ld1sb { z15.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "ld1sb { z15.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
"add x14, x14, %x[ld_in_col]\n"
- "ld1sb { z17.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "ld1sb { z17.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
"trn1 z15.h, z15.h, z17.h\n"
"add z15.h, z15.h, z5.h\n"
- "ld1sb { z16.s }, p1/Z, [x21]\n"
+ "ld1sb { z16.s }, p1/Z, [x20]\n"
"mov z16.d, z16.d\n"
"add z16.h, z16.h, z5.h\n"
"and x15, x15, #0x1\n"
- "sub x13, x13, x23\n"
- "cbz x23, 17f\n"
+ "sub x13, x13, x22\n"
+ "cbz x22, 17f\n"
"11:" // Unpadded: Main loop
- ".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
- "addvl x20, SP, #4\n"
- "add x22, x14, %x[ld_in_row]\n"
- ".inst 0xc17b15a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z11.h\n"
- ".inst 0xa0402a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20]\n"
- "addvl x21, SP, #2\n"
- "subs x23, x23, #0x1\n"
- ".inst 0xc17a1589 // sdot za.s[x8, 1], { z12.h-z15.h }, z10.h\n"
+ ".inst 0xc17a7588 // sdot za.s[x11, 0], { z12.h-z15.h }, z10.h\n"
+ "addvl x19, SP, #4\n"
+ "add x21, x14, %x[ld_in_row]\n"
+ ".inst 0xc17b75a8 // sdot za.s[x11, 0], { z13.h-z16.h }, z11.h\n"
+ ".inst 0xa0402a6a // ld1h { z10.h-z11.h }, pn10.b/Z, [x19]\n"
+ "addvl x20, SP, #2\n"
+ "subs x22, x22, #0x1\n"
+ ".inst 0xc17a7589 // sdot za.s[x11, 1], { z12.h-z15.h }, z10.h\n"
"ld1sb { z12.s }, p1/Z, [x14]\n"
"add x14, x14, %x[ld_in_col]\n"
- "add x20, x14, %x[ld_in_row]\n"
- "ld1sb { z20.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row]\n"
- ".inst 0xc17b15a9 // sdot za.s[x8, 1], { z13.h-z16.h }, z11.h\n"
+ "add x19, x14, %x[ld_in_row]\n"
+ "ld1sb { z20.s }, p1/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ ".inst 0xc17b75a9 // sdot za.s[x11, 1], { z13.h-z16.h }, z11.h\n"
"trn1 z12.h, z12.h, z20.h\n"
- "ld1sb { z13.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row]\n"
+ "ld1sb { z13.s }, p1/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
"add z12.h, z12.h, z5.h\n"
- ".inst 0xc0060c1c // mova { z28.d-z31.d }, za.d[x8, #0]\n"
- "ld1sb { z19.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row]\n"
+ ".inst 0xc0066c1c // mova { z28.d-z31.d }, za.d[x11, #0]\n"
+ "ld1sb { z19.s }, p1/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
"trn1 z13.h, z13.h, z19.h\n"
"add z13.h, z13.h, z5.h\n"
- "ld1sb { z14.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row]\n"
- "add x8, x8, #0x1\n"
- ".inst 0xc0040c02 // mova za.d[x8, #2], { z0.d-z3.d }\n"
- "ld1sb { z18.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row]\n"
+ "ld1sb { z14.s }, p1/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "add x11, x11, #0x1\n"
+ ".inst 0xc0046c02 // mova za.d[x11, #2], { z0.d-z3.d }\n"
+ "ld1sb { z18.s }, p1/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
"trn1 z14.h, z14.h, z18.h\n"
"add z14.h, z14.h, z5.h\n"
- "ld1sb { z15.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row]\n"
+ "ld1sb { z15.s }, p1/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
".inst 0xc1a8ac1c // sqdmulh { z28.s-z31.s }, { z28.s-z31.s }, z8.s\n"
- "ld1sb { z17.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row]\n"
+ "ld1sb { z17.s }, p1/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
"trn1 z15.h, z15.h, z17.h\n"
"add z15.h, z15.h, z5.h\n"
- "ld1sb { z16.s }, p1/Z, [x22]\n"
+ "ld1sb { z16.s }, p1/Z, [x21]\n"
"mov z16.d, z16.d\n"
"add z16.h, z16.h, z5.h\n"
- ".inst 0xa0402aaa // ld1h { z10.h-z11.h }, pn10.b/Z, [x21]\n"
- ".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
+ ".inst 0xa0402a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xc17a7588 // sdot za.s[x11, 0], { z12.h-z15.h }, z10.h\n"
".inst 0xc1a7aa3c // srshl { z28.s-z31.s }, { z28.s-z31.s }, z7.s\n"
"ld1sb { z12.s }, p1/Z, [x14]\n"
- ".inst 0xc17b15a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z11.h\n"
+ ".inst 0xc17b75a8 // sdot za.s[x11, 0], { z13.h-z16.h }, z11.h\n"
".inst 0xc1a4ab1c // add { z28.s-z31.s }, { z28.s-z31.s }, z4.s\n"
- "ld1sb { z20.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z20.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"trn1 z12.h, z12.h, z20.h\n"
".inst 0xc1b7cf7c // sclamp { z28.s-z31.s }, z27.s, z23.s\n"
- "ld1sb { z13.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
- "st1b { z28.s }, p1, [x11]\n"
- "add x11, x11, x9\n"
- "ld1sb { z19.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
- "trn1 z13.h, z13.h, z19.h\n"
- "st1b { z29.s }, p1, [x10]\n"
- "ld1sb { z14.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z13.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ "st1b { z28.s }, p1, [x10]\n"
"add x10, x10, x28\n"
- "st1b { z30.s }, p1, [x27]\n"
- "ld1sb { z18.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z19.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ "trn1 z13.h, z13.h, z19.h\n"
+ "st1b { z29.s }, p1, [x9]\n"
+ "ld1sb { z14.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ "add x9, x9, x27\n"
+ "st1b { z30.s }, p1, [x26]\n"
+ "ld1sb { z18.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"trn1 z14.h, z14.h, z18.h\n"
- "add x27, x27, x25\n"
- "ld1sb { z15.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
- "st1b { z31.s }, p1, [x26]\n"
"add x26, x26, x24\n"
- "ld1sb { z17.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z15.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ "st1b { z31.s }, p1, [x25]\n"
+ "add x25, x25, x23\n"
+ "ld1sb { z17.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"trn1 z15.h, z15.h, z17.h\n"
"add z12.h, z12.h, z5.h\n"
- "ld1sb { z16.s }, p1/Z, [x20]\n"
+ "ld1sb { z16.s }, p1/Z, [x19]\n"
"mov z16.d, z16.d\n"
"add z13.h, z13.h, z5.h\n"
"add x14, x14, %x[ld_in_col]\n"
@@ -411,108 +411,108 @@ void sme2_s8q_planar_3x3_s2_4rows_dot_za_impl(
"bgt 11b\n"
"b 17f\n"
"12:" // Padded
- "cbz x22, 15f\n"
- "cmp x22, #0x1\n"
- "sub x15, x15, x22\n"
+ "cbz x21, 15f\n"
+ "cmp x21, #0x1\n"
+ "sub x15, x15, x21\n"
"beq 14f\n"
"13:" // Padded: 2 priming loads
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
"ld1sb { z12.s }, p0/Z, [x14]\n"
"add z12.h, p0/M, z12.h, z5.h\n"
- "add x20, x14, %x[ld_in_row]\n"
+ "add x19, x14, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z20.s }, p0/Z, [x20]\n"
+ "ld1sb { z20.s }, p0/Z, [x19]\n"
"add z20.h, p0/M, z20.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1sb { z13.s }, p0/Z, [x20]\n"
+ "ld1sb { z13.s }, p0/Z, [x19]\n"
"add z13.h, p0/M, z13.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1sb { z19.s }, p0/Z, [x20]\n"
+ "ld1sb { z19.s }, p0/Z, [x19]\n"
"add z19.h, p0/M, z19.h, z5.h\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"trn1 z12.h, z12.h, z20.h\n"
"trn1 z13.h, z13.h, z19.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z14.s }, p0/Z, [x20]\n"
+ "ld1sb { z14.s }, p0/Z, [x19]\n"
"add z14.h, p0/M, z14.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z18.s }, p0/Z, [x20]\n"
+ "ld1sb { z18.s }, p0/Z, [x19]\n"
"add z18.h, p0/M, z18.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1sb { z15.s }, p0/Z, [x20]\n"
+ "ld1sb { z15.s }, p0/Z, [x19]\n"
"add z15.h, p0/M, z15.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1sb { z17.s }, p0/Z, [x20]\n"
+ "ld1sb { z17.s }, p0/Z, [x19]\n"
"mov x12, #0x8\n"
"add z17.h, p0/M, z17.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z16.s }, p0/Z, [x20]\n"
+ "ld1sb { z16.s }, p0/Z, [x19]\n"
"add z16.h, p0/M, z16.h, z5.h\n"
- "addvl x20, SP, #4\n"
+ "addvl x19, SP, #4\n"
"trn1 z14.h, z14.h, z18.h\n"
"trn1 z15.h, z15.h, z17.h\n"
- ".inst 0xa0402a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xa0402a6a // ld1h { z10.h-z11.h }, pn10.b/Z, [x19]\n"
"mov z16.d, z16.d\n"
- ".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
+ ".inst 0xc17a7588 // sdot za.s[x11, 0], { z12.h-z15.h }, z10.h\n"
"add x14, x14, %x[ld_in_col]\n"
- ".inst 0xc17b15a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z11.h\n"
+ ".inst 0xc17b75a8 // sdot za.s[x11, 0], { z13.h-z16.h }, z11.h\n"
"14:" // Padded: 1 priming loads
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
"ld1sb { z12.s }, p0/Z, [x14]\n"
"add z12.h, p0/M, z12.h, z5.h\n"
- "add x20, x14, %x[ld_in_row]\n"
+ "add x19, x14, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z20.s }, p0/Z, [x20]\n"
+ "ld1sb { z20.s }, p0/Z, [x19]\n"
"add z20.h, p0/M, z20.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1sb { z13.s }, p0/Z, [x20]\n"
+ "ld1sb { z13.s }, p0/Z, [x19]\n"
"add z13.h, p0/M, z13.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1sb { z19.s }, p0/Z, [x20]\n"
+ "ld1sb { z19.s }, p0/Z, [x19]\n"
"add z19.h, p0/M, z19.h, z5.h\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"trn1 z12.h, z12.h, z20.h\n"
"trn1 z13.h, z13.h, z19.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z14.s }, p0/Z, [x20]\n"
+ "ld1sb { z14.s }, p0/Z, [x19]\n"
"add z14.h, p0/M, z14.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z18.s }, p0/Z, [x20]\n"
+ "ld1sb { z18.s }, p0/Z, [x19]\n"
"add z18.h, p0/M, z18.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1sb { z15.s }, p0/Z, [x20]\n"
+ "ld1sb { z15.s }, p0/Z, [x19]\n"
"add z15.h, p0/M, z15.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1sb { z17.s }, p0/Z, [x20]\n"
+ "ld1sb { z17.s }, p0/Z, [x19]\n"
"mov x12, #0x8\n"
"add z17.h, p0/M, z17.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z16.s }, p0/Z, [x20]\n"
+ "ld1sb { z16.s }, p0/Z, [x19]\n"
"add z16.h, p0/M, z16.h, z5.h\n"
- "addvl x20, SP, #2\n"
+ "addvl x19, SP, #2\n"
"trn1 z14.h, z14.h, z18.h\n"
"trn1 z15.h, z15.h, z17.h\n"
- ".inst 0xa0402a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xa0402a6a // ld1h { z10.h-z11.h }, pn10.b/Z, [x19]\n"
"mov z16.d, z16.d\n"
- ".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
+ ".inst 0xc17a7588 // sdot za.s[x11, 0], { z12.h-z15.h }, z10.h\n"
"add x14, x14, %x[ld_in_col]\n"
- ".inst 0xc17b15a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z11.h\n"
+ ".inst 0xc17b75a8 // sdot za.s[x11, 0], { z13.h-z16.h }, z11.h\n"
"15:" // Padded: 0 priming loads
"cmp x15, #0x2\n"
".inst 0xa0402bea // ld1h { z10.h-z11.h }, pn10.b/Z, [SP]\n"
@@ -521,357 +521,357 @@ void sme2_s8q_planar_3x3_s2_4rows_dot_za_impl(
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
"ld1sb { z12.s }, p0/Z, [x14]\n"
"add z12.h, p0/M, z12.h, z5.h\n"
- "add x20, x14, %x[ld_in_row]\n"
+ "add x19, x14, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z20.s }, p0/Z, [x20]\n"
+ "ld1sb { z20.s }, p0/Z, [x19]\n"
"add z20.h, p0/M, z20.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1sb { z13.s }, p0/Z, [x20]\n"
+ "ld1sb { z13.s }, p0/Z, [x19]\n"
"add z13.h, p0/M, z13.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1sb { z19.s }, p0/Z, [x20]\n"
+ "ld1sb { z19.s }, p0/Z, [x19]\n"
"add z19.h, p0/M, z19.h, z5.h\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"trn1 z12.h, z12.h, z20.h\n"
"trn1 z13.h, z13.h, z19.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z14.s }, p0/Z, [x20]\n"
+ "ld1sb { z14.s }, p0/Z, [x19]\n"
"add z14.h, p0/M, z14.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z18.s }, p0/Z, [x20]\n"
+ "ld1sb { z18.s }, p0/Z, [x19]\n"
"add z18.h, p0/M, z18.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1sb { z15.s }, p0/Z, [x20]\n"
+ "ld1sb { z15.s }, p0/Z, [x19]\n"
"add z15.h, p0/M, z15.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1sb { z17.s }, p0/Z, [x20]\n"
+ "ld1sb { z17.s }, p0/Z, [x19]\n"
"mov x12, #0x8\n"
"add z17.h, p0/M, z17.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z16.s }, p0/Z, [x20]\n"
+ "ld1sb { z16.s }, p0/Z, [x19]\n"
"add z16.h, p0/M, z16.h, z5.h\n"
"sub x15, x15, #0x2\n"
"sub x13, x13, #0x1\n"
"trn1 z14.h, z14.h, z18.h\n"
"trn1 z15.h, z15.h, z17.h\n"
- "lsr x20, x15, #0x1\n"
- "cmp x20, x13\n"
+ "lsr x19, x15, #0x1\n"
+ "cmp x19, x13\n"
"mov z16.d, z16.d\n"
- "csel x22, x20, x13, LT\n"
+ "csel x21, x19, x13, LT\n"
"add x14, x14, %x[ld_in_col]\n"
"and x15, x15, #0x1\n"
- "sub x13, x13, x22\n"
- "cbz x22, 17f\n"
+ "sub x13, x13, x21\n"
+ "cbz x21, 17f\n"
"16:" // Padded: Main loop
- ".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
- "addvl x20, SP, #4\n"
+ ".inst 0xc17a7588 // sdot za.s[x11, 0], { z12.h-z15.h }, z10.h\n"
+ "addvl x19, SP, #4\n"
"mov x12, #0x0\n"
- ".inst 0xc17b15a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z11.h\n"
- ".inst 0xa0402a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xc17b75a8 // sdot za.s[x11, 0], { z13.h-z16.h }, z11.h\n"
+ ".inst 0xa0402a6a // ld1h { z10.h-z11.h }, pn10.b/Z, [x19]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "add x21, x14, %x[ld_in_row]\n"
- ".inst 0xc17a1589 // sdot za.s[x8, 1], { z12.h-z15.h }, z10.h\n"
+ "add x20, x14, %x[ld_in_row]\n"
+ ".inst 0xc17a7589 // sdot za.s[x11, 1], { z12.h-z15.h }, z10.h\n"
"ld1sb { z12.s }, p0/Z, [x14]\n"
"add z12.h, p0/M, z12.h, z5.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z20.s }, p0/Z, [x21]\n"
+ "ld1sb { z20.s }, p0/Z, [x20]\n"
"add z20.h, p0/M, z20.h, z5.h\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "add x20, x20, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- ".inst 0xc17b15a9 // sdot za.s[x8, 1], { z13.h-z16.h }, z11.h\n"
- "ld1sb { z13.s }, p0/Z, [x21]\n"
+ ".inst 0xc17b75a9 // sdot za.s[x11, 1], { z13.h-z16.h }, z11.h\n"
+ "ld1sb { z13.s }, p0/Z, [x20]\n"
"add z13.h, p0/M, z13.h, z5.h\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "add x20, x20, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1sb { z19.s }, p0/Z, [x21]\n"
+ "ld1sb { z19.s }, p0/Z, [x20]\n"
"mov x12, #0x4\n"
"add z19.h, p0/M, z19.h, z5.h\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "add x20, x20, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z14.s }, p0/Z, [x21]\n"
+ "ld1sb { z14.s }, p0/Z, [x20]\n"
"add z14.h, p0/M, z14.h, z5.h\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "add x20, x20, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z18.s }, p0/Z, [x21]\n"
+ "ld1sb { z18.s }, p0/Z, [x20]\n"
"add z18.h, p0/M, z18.h, z5.h\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "add x20, x20, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1sb { z15.s }, p0/Z, [x21]\n"
+ "ld1sb { z15.s }, p0/Z, [x20]\n"
"add z15.h, p0/M, z15.h, z5.h\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "add x20, x20, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1sb { z17.s }, p0/Z, [x21]\n"
+ "ld1sb { z17.s }, p0/Z, [x20]\n"
"add z17.h, p0/M, z17.h, z5.h\n"
"mov x12, #0x8\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "add x20, x20, %x[ld_in_row]\n"
"trn1 z12.h, z12.h, z20.h\n"
"trn1 z13.h, z13.h, z19.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "addvl x20, SP, #2\n"
- "ld1sb { z16.s }, p0/Z, [x21]\n"
+ "addvl x19, SP, #2\n"
+ "ld1sb { z16.s }, p0/Z, [x20]\n"
"trn1 z14.h, z14.h, z18.h\n"
"trn1 z15.h, z15.h, z17.h\n"
- ".inst 0xa0402a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xa0402a6a // ld1h { z10.h-z11.h }, pn10.b/Z, [x19]\n"
"mov x12, #0x0\n"
- ".inst 0xc0060c1c // mova { z28.d-z31.d }, za.d[x8, #0]\n"
- "add x8, x8, #0x1\n"
+ ".inst 0xc0066c1c // mova { z28.d-z31.d }, za.d[x11, #0]\n"
+ "add x11, x11, #0x1\n"
"add z16.h, p0/M, z16.h, z5.h\n"
"add x14, x14, %x[ld_in_col]\n"
- ".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
+ ".inst 0xc17a7588 // sdot za.s[x11, 0], { z12.h-z15.h }, z10.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
"ld1sb { z12.s }, p0/Z, [x14]\n"
"add z12.h, p0/M, z12.h, z5.h\n"
- "add x20, x14, %x[ld_in_row]\n"
+ "add x19, x14, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
"mov z16.d, z16.d\n"
- "ld1sb { z20.s }, p0/Z, [x20]\n"
+ "ld1sb { z20.s }, p0/Z, [x19]\n"
"add z20.h, p0/M, z20.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- ".inst 0xc17b15a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z11.h\n"
- "ld1sb { z13.s }, p0/Z, [x20]\n"
+ ".inst 0xc17b75a8 // sdot za.s[x11, 0], { z13.h-z16.h }, z11.h\n"
+ "ld1sb { z13.s }, p0/Z, [x19]\n"
"add z13.h, p0/M, z13.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1sb { z19.s }, p0/Z, [x20]\n"
+ "ld1sb { z19.s }, p0/Z, [x19]\n"
"mov x12, #0x4\n"
"add z19.h, p0/M, z19.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
- ".inst 0xc0040c02 // mova za.d[x8, #2], { z0.d-z3.d }\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ ".inst 0xc0046c02 // mova za.d[x11, #2], { z0.d-z3.d }\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z14.s }, p0/Z, [x20]\n"
+ "ld1sb { z14.s }, p0/Z, [x19]\n"
"add z14.h, p0/M, z14.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z18.s }, p0/Z, [x20]\n"
+ "ld1sb { z18.s }, p0/Z, [x19]\n"
"add z18.h, p0/M, z18.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1sb { z15.s }, p0/Z, [x20]\n"
+ "ld1sb { z15.s }, p0/Z, [x19]\n"
".inst 0xc1a8ac1c // sqdmulh { z28.s-z31.s }, { z28.s-z31.s }, z8.s\n"
"add z15.h, p0/M, z15.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1sb { z17.s }, p0/Z, [x20]\n"
+ "ld1sb { z17.s }, p0/Z, [x19]\n"
"mov x12, #0x8\n"
".inst 0xc1a7aa3c // srshl { z28.s-z31.s }, { z28.s-z31.s }, z7.s\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"add z17.h, p0/M, z17.h, z5.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z16.s }, p0/Z, [x20]\n"
+ "ld1sb { z16.s }, p0/Z, [x19]\n"
"add z16.h, p0/M, z16.h, z5.h\n"
".inst 0xc1a4ab1c // add { z28.s-z31.s }, { z28.s-z31.s }, z4.s\n"
- "subs x22, x22, #0x1\n"
+ "subs x21, x21, #0x1\n"
".inst 0xa0402bea // ld1h { z10.h-z11.h }, pn10.b/Z, [SP]\n"
".inst 0xc1b7cf7c // sclamp { z28.s-z31.s }, z27.s, z23.s\n"
- "st1b { z28.s }, p1, [x11]\n"
- "add x11, x11, x9\n"
- "trn1 z12.h, z12.h, z20.h\n"
- "st1b { z29.s }, p1, [x10]\n"
+ "st1b { z28.s }, p1, [x10]\n"
"add x10, x10, x28\n"
+ "trn1 z12.h, z12.h, z20.h\n"
+ "st1b { z29.s }, p1, [x9]\n"
+ "add x9, x9, x27\n"
"trn1 z13.h, z13.h, z19.h\n"
"trn1 z14.h, z14.h, z18.h\n"
- "st1b { z30.s }, p1, [x27]\n"
- "add x27, x27, x25\n"
+ "st1b { z30.s }, p1, [x26]\n"
+ "add x26, x26, x24\n"
"trn1 z15.h, z15.h, z17.h\n"
"mov z16.d, z16.d\n"
- "st1b { z31.s }, p1, [x26]\n"
- "add x26, x26, x24\n"
+ "st1b { z31.s }, p1, [x25]\n"
+ "add x25, x25, x23\n"
"add x14, x14, %x[ld_in_col]\n"
"bgt 16b\n"
"17:" // Main loop tail
- ".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
- "addvl x20, SP, #4\n"
+ ".inst 0xc17a7588 // sdot za.s[x11, 0], { z12.h-z15.h }, z10.h\n"
+ "addvl x19, SP, #4\n"
"mov x12, #0x0\n"
- ".inst 0xc17b15a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z11.h\n"
- ".inst 0xa0402a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xc17b75a8 // sdot za.s[x11, 0], { z13.h-z16.h }, z11.h\n"
+ ".inst 0xa0402a6a // ld1h { z10.h-z11.h }, pn10.b/Z, [x19]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "add x20, x14, %x[ld_in_row]\n"
- ".inst 0xc17a1589 // sdot za.s[x8, 1], { z12.h-z15.h }, z10.h\n"
+ "add x19, x14, %x[ld_in_row]\n"
+ ".inst 0xc17a7589 // sdot za.s[x11, 1], { z12.h-z15.h }, z10.h\n"
"ld1sb { z12.s }, p0/Z, [x14]\n"
"add z12.h, p0/M, z12.h, z5.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z20.s }, p0/Z, [x20]\n"
+ "ld1sb { z20.s }, p0/Z, [x19]\n"
"add z20.h, p0/M, z20.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- ".inst 0xc17b15a9 // sdot za.s[x8, 1], { z13.h-z16.h }, z11.h\n"
- "ld1sb { z13.s }, p0/Z, [x20]\n"
+ ".inst 0xc17b75a9 // sdot za.s[x11, 1], { z13.h-z16.h }, z11.h\n"
+ "ld1sb { z13.s }, p0/Z, [x19]\n"
"add z13.h, p0/M, z13.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1sb { z19.s }, p0/Z, [x20]\n"
+ "ld1sb { z19.s }, p0/Z, [x19]\n"
"mov x12, #0x4\n"
"add z19.h, p0/M, z19.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z14.s }, p0/Z, [x20]\n"
+ "ld1sb { z14.s }, p0/Z, [x19]\n"
"add z14.h, p0/M, z14.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z18.s }, p0/Z, [x20]\n"
+ "ld1sb { z18.s }, p0/Z, [x19]\n"
"add z18.h, p0/M, z18.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1sb { z15.s }, p0/Z, [x20]\n"
- ".inst 0xc0060c1c // mova { z28.d-z31.d }, za.d[x8, #0]\n"
+ "ld1sb { z15.s }, p0/Z, [x19]\n"
+ ".inst 0xc0066c1c // mova { z28.d-z31.d }, za.d[x11, #0]\n"
"add z15.h, p0/M, z15.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1sb { z17.s }, p0/Z, [x20]\n"
+ "ld1sb { z17.s }, p0/Z, [x19]\n"
"mov x12, #0x8\n"
".inst 0xc1a8ac1c // sqdmulh { z28.s-z31.s }, { z28.s-z31.s }, z8.s\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"add z17.h, p0/M, z17.h, z5.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z16.s }, p0/Z, [x20]\n"
- "addvl x20, SP, #2\n"
+ "ld1sb { z16.s }, p0/Z, [x19]\n"
+ "addvl x19, SP, #2\n"
".inst 0xc1a7aa3c // srshl { z28.s-z31.s }, { z28.s-z31.s }, z7.s\n"
"trn1 z12.h, z12.h, z20.h\n"
- "add x8, x8, #0x1\n"
+ "add x11, x11, #0x1\n"
"add z16.h, p0/M, z16.h, z5.h\n"
"trn1 z13.h, z13.h, z19.h\n"
"trn1 z14.h, z14.h, z18.h\n"
"add x14, x14, %x[ld_in_col]\n"
"trn1 z15.h, z15.h, z17.h\n"
- ".inst 0xa0402a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xa0402a6a // ld1h { z10.h-z11.h }, pn10.b/Z, [x19]\n"
".inst 0xc1a4ab1c // add { z28.s-z31.s }, { z28.s-z31.s }, z4.s\n"
"mov z16.d, z16.d\n"
- ".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
+ ".inst 0xc17a7588 // sdot za.s[x11, 0], { z12.h-z15.h }, z10.h\n"
".inst 0xc1b7cf7c // sclamp { z28.s-z31.s }, z27.s, z23.s\n"
- "st1b { z28.s }, p1, [x11]\n"
- "add x11, x11, x9\n"
- ".inst 0xc0040c02 // mova za.d[x8, #2], { z0.d-z3.d }\n"
- "st1b { z29.s }, p1, [x10]\n"
+ "st1b { z28.s }, p1, [x10]\n"
"add x10, x10, x28\n"
- ".inst 0xc17b15a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z11.h\n"
+ ".inst 0xc0046c02 // mova za.d[x11, #2], { z0.d-z3.d }\n"
+ "st1b { z29.s }, p1, [x9]\n"
+ "add x9, x9, x27\n"
+ ".inst 0xc17b75a8 // sdot za.s[x11, 0], { z13.h-z16.h }, z11.h\n"
".inst 0xa0402bea // ld1h { z10.h-z11.h }, pn10.b/Z, [SP]\n"
- "st1b { z30.s }, p1, [x27]\n"
- "add x27, x27, x25\n"
- "st1b { z31.s }, p1, [x26]\n"
+ "st1b { z30.s }, p1, [x26]\n"
"add x26, x26, x24\n"
+ "st1b { z31.s }, p1, [x25]\n"
+ "add x25, x25, x23\n"
"18:" // Main loop skip tail
"cbz x15, 19f\n" // Skip remainder inputs
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
"ld1sb { z12.s }, p0/Z, [x14]\n"
"add z12.h, p0/M, z12.h, z5.h\n"
- "add x20, x14, %x[ld_in_row]\n"
+ "add x19, x14, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z20.s }, p0/Z, [x20]\n"
+ "ld1sb { z20.s }, p0/Z, [x19]\n"
"add z20.h, p0/M, z20.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1sb { z13.s }, p0/Z, [x20]\n"
+ "ld1sb { z13.s }, p0/Z, [x19]\n"
"add z13.h, p0/M, z13.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1sb { z19.s }, p0/Z, [x20]\n"
+ "ld1sb { z19.s }, p0/Z, [x19]\n"
"add z19.h, p0/M, z19.h, z5.h\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"trn1 z12.h, z12.h, z20.h\n"
"trn1 z13.h, z13.h, z19.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z14.s }, p0/Z, [x20]\n"
+ "ld1sb { z14.s }, p0/Z, [x19]\n"
"add z14.h, p0/M, z14.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z18.s }, p0/Z, [x20]\n"
+ "ld1sb { z18.s }, p0/Z, [x19]\n"
"add z18.h, p0/M, z18.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1sb { z15.s }, p0/Z, [x20]\n"
+ "ld1sb { z15.s }, p0/Z, [x19]\n"
"add z15.h, p0/M, z15.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1sb { z17.s }, p0/Z, [x20]\n"
+ "ld1sb { z17.s }, p0/Z, [x19]\n"
"mov x12, #0x8\n"
"add z17.h, p0/M, z17.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z16.s }, p0/Z, [x20]\n"
+ "ld1sb { z16.s }, p0/Z, [x19]\n"
"add z16.h, p0/M, z16.h, z5.h\n"
"trn1 z14.h, z14.h, z18.h\n"
"trn1 z15.h, z15.h, z17.h\n"
"mov z16.d, z16.d\n"
- "addvl x20, SP, #4\n"
- ".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
+ "addvl x19, SP, #4\n"
+ ".inst 0xc17a7588 // sdot za.s[x11, 0], { z12.h-z15.h }, z10.h\n"
"sub x13, x13, #0x1\n"
- ".inst 0xc17b15a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z11.h\n"
- ".inst 0xa0402a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20]\n"
- ".inst 0xc0060c1c // mova { z28.d-z31.d }, za.d[x8, #0]\n"
+ ".inst 0xc17b75a8 // sdot za.s[x11, 0], { z13.h-z16.h }, z11.h\n"
+ ".inst 0xa0402a6a // ld1h { z10.h-z11.h }, pn10.b/Z, [x19]\n"
+ ".inst 0xc0066c1c // mova { z28.d-z31.d }, za.d[x11, #0]\n"
".inst 0xc1a8ac1c // sqdmulh { z28.s-z31.s }, { z28.s-z31.s }, z8.s\n"
".inst 0xc1a7aa3c // srshl { z28.s-z31.s }, { z28.s-z31.s }, z7.s\n"
- ".inst 0xc17a1589 // sdot za.s[x8, 1], { z12.h-z15.h }, z10.h\n"
+ ".inst 0xc17a7589 // sdot za.s[x11, 1], { z12.h-z15.h }, z10.h\n"
".inst 0xc1a4ab1c // add { z28.s-z31.s }, { z28.s-z31.s }, z4.s\n"
- ".inst 0xc17b15a9 // sdot za.s[x8, 1], { z13.h-z16.h }, z11.h\n"
- "add x8, x8, #0x1\n"
+ ".inst 0xc17b75a9 // sdot za.s[x11, 1], { z13.h-z16.h }, z11.h\n"
+ "add x11, x11, #0x1\n"
".inst 0xc1b7cf7c // sclamp { z28.s-z31.s }, z27.s, z23.s\n"
- "st1b { z28.s }, p1, [x11]\n"
- "add x11, x11, x9\n"
- ".inst 0xc0040c02 // mova za.d[x8, #2], { z0.d-z3.d }\n"
- "st1b { z29.s }, p1, [x10]\n"
+ "st1b { z28.s }, p1, [x10]\n"
"add x10, x10, x28\n"
- "st1b { z30.s }, p1, [x27]\n"
- "add x27, x27, x25\n"
- "st1b { z31.s }, p1, [x26]\n"
+ ".inst 0xc0046c02 // mova za.d[x11, #2], { z0.d-z3.d }\n"
+ "st1b { z29.s }, p1, [x9]\n"
+ "add x9, x9, x27\n"
+ "st1b { z30.s }, p1, [x26]\n"
"add x26, x26, x24\n"
+ "st1b { z31.s }, p1, [x25]\n"
+ "add x25, x25, x23\n"
"19:" // Tail input: End
"cbz x13, 21f\n"
"20:" // Right padding loop
- ".inst 0xc0060c1c // mova { z28.d-z31.d }, za.d[x8, #0]\n"
+ ".inst 0xc0066c1c // mova { z28.d-z31.d }, za.d[x11, #0]\n"
".inst 0xc1a8ac1c // sqdmulh { z28.s-z31.s }, { z28.s-z31.s }, z8.s\n"
- "add x8, x8, #0x1\n"
+ "add x11, x11, #0x1\n"
".inst 0xc1a7aa3c // srshl { z28.s-z31.s }, { z28.s-z31.s }, z7.s\n"
"subs x13, x13, #0x1\n"
- ".inst 0xc0040c02 // mova za.d[x8, #2], { z0.d-z3.d }\n"
+ ".inst 0xc0046c02 // mova za.d[x11, #2], { z0.d-z3.d }\n"
".inst 0xc1a4ab1c // add { z28.s-z31.s }, { z28.s-z31.s }, z4.s\n"
".inst 0xc1b7cf7c // sclamp { z28.s-z31.s }, z27.s, z23.s\n"
- "st1b { z28.s }, p1, [x11]\n"
- "add x11, x11, x9\n"
- "st1b { z29.s }, p1, [x10]\n"
+ "st1b { z28.s }, p1, [x10]\n"
"add x10, x10, x28\n"
- "st1b { z30.s }, p1, [x27]\n"
- "add x27, x27, x25\n"
- "st1b { z31.s }, p1, [x26]\n"
+ "st1b { z29.s }, p1, [x9]\n"
+ "add x9, x9, x27\n"
+ "st1b { z30.s }, p1, [x26]\n"
"add x26, x26, x24\n"
+ "st1b { z31.s }, p1, [x25]\n"
+ "add x25, x25, x23\n"
"bgt 20b\n"
"21:" // End
- "ldr x22, [%x[args], %[offsetof_Args_weights]]\n"
- "incw x22, ALL, MUL #9\n"
- "str x22, [%x[args], %[offsetof_Args_weights]]\n"
+ "ldr x21, [%x[args], %[offsetof_Args_weights]]\n"
+ "incw x21, ALL, MUL #9\n"
+ "str x21, [%x[args], %[offsetof_Args_weights]]\n"
"incw x16\n"
- "ldr x20, [%x[args], %[offsetof_Args_ld_in_vl]]\n"
+ "ldr x19, [%x[args], %[offsetof_Args_ld_in_vl]]\n"
"whilelt p1.s, x16, x17\n"
"ldr x14, [%x[args], %[offsetof_Args_inptr]]\n"
- "add x14, x14, x20\n"
+ "add x14, x14, x19\n"
"str x14, [%x[args], %[offsetof_Args_inptr]]\n"
- "ldr x25, [%x[args], %[offsetof_Args_outptrs]]\n"
- "ldr x24, [%x[args], %[offsetof_Args_ld_out_vls]]\n"
- "ldp x23, x22, [x25, #0x0]\n"
- "ldp x21, x20, [x24, #0x0]\n"
- "add x23, x23, x21\n"
+ "ldr x24, [%x[args], %[offsetof_Args_outptrs]]\n"
+ "ldr x23, [%x[args], %[offsetof_Args_ld_out_vls]]\n"
+ "ldp x22, x21, [x24, #0x0]\n"
+ "ldp x20, x19, [x23, #0x0]\n"
"add x22, x22, x20\n"
- "stp x23, x22, [x25, #0x0]\n"
- "ldp x23, x22, [x25, #0x10]\n"
- "ldp x21, x20, [x24, #0x10]\n"
- "add x23, x23, x21\n"
+ "add x21, x21, x19\n"
+ "stp x22, x21, [x24, #0x0]\n"
+ "ldp x22, x21, [x24, #0x10]\n"
+ "ldp x20, x19, [x23, #0x10]\n"
"add x22, x22, x20\n"
- "stp x23, x22, [x25, #0x10]\n"
+ "add x21, x21, x19\n"
+ "stp x22, x21, [x24, #0x10]\n"
"b.any 1b\n"
"addvl SP, SP, #6\n"
".inst 0xd503467f // SMSTOP\n"
:
: [args] "r" (&args), [ld_in_col] "r" (ld_in_col), [ld_in_row] "r" (ld_in_row), [offsetof_Args_current_channel] "I" (offsetof(Args, current_channel)), [offsetof_Args_inptr] "I" (offsetof(Args, inptr)), [offsetof_Args_input_cols] "I" (offsetof(Args, input_cols)), [offsetof_Args_ld_in_vl] "I" (offsetof(Args, ld_in_vl)), [offsetof_Args_ld_out_cols] "I" (offsetof(Args, ld_out_cols)), [offsetof_Args_ld_out_vls] "I" (offsetof(Args, ld_out_vls)), [offsetof_Args_n_channels] "I" (offsetof(Args, n_channels)), [offsetof_Args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_Args_output_cols] "I" (offsetof(Args, output_cols)), [offsetof_Args_pad_bottom] "I" (offsetof(Args, pad_bottom)), [offsetof_Args_pad_left] "I" (offsetof(Args, pad_left)), [offsetof_Args_pad_top] "I" (offsetof(Args, pad_top)), [offsetof_Args_weights] "I" (offsetof(Args, weights)), [offsetof_Requantize32_a_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_bias] "I" (offsetof(arm_gemm::Requantize32, bias)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [offsetof_Requantize32_per_channel_muls] "I" (offsetof(arm_gemm::Requantize32, per_channel_muls)), [offsetof_Requantize32_per_channel_right_shifts] "I" (offsetof(arm_gemm::Requantize32, per_channel_right_shifts)), [offsetof_Requantize32_per_layer_mul] "I" (offsetof(arm_gemm::Requantize32, per_layer_mul)), [offsetof_Requantize32_per_layer_right_shift] "I" (offsetof(arm_gemm::Requantize32, per_layer_right_shift)), [qp] "r" (&qp)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_s8q_planar_5x5_s1_4rows_dot_za/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_s8q_planar_5x5_s1_4rows_dot_za/generic.cpp
index d366b3c8d5..722fd5eaad 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_s8q_planar_5x5_s1_4rows_dot_za/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_s8q_planar_5x5_s1_4rows_dot_za/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -69,20 +69,20 @@ void sme2_s8q_planar_5x5_s1_4rows_dot_za_impl(
__asm__ __volatile__(
".inst 0xd503477f // SMSTART ZA\n"
- "ldr x4, [%x[args], %[offsetof_Args_pad_bottom]]\n"
+ "ldr x5, [%x[args], %[offsetof_Args_pad_bottom]]\n"
"ptrue p2.b\n"
- "mov x20, #0x8\n"
+ "mov x19, #0x8\n"
"ldr x6, [%x[args], %[offsetof_Args_pad_top]]\n"
"ld1rh { z25.h }, p2/Z, [%x[qp], %[offsetof_Requantize32_a_offset]]\n"
- "sub x20, x20, x4\n"
+ "sub x19, x19, x5\n"
".inst 0x25207812 // ptrue pn10.b\n"
"ldr x7, [%x[args], %[offsetof_Args_n_channels]]\n"
"whilelt p1.s, XZR, x7\n"
- "whilelt p9.s, XZR, x20\n"
+ "whilelt p9.s, XZR, x19\n"
"ld1rw { z9.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_c_offset]]\n"
"whilelt p8.s, XZR, x6\n"
"addvl SP, SP, #-30\n"
- "ldr x5, [%x[args], %[offsetof_Args_current_channel]]\n"
+ "ldr x17, [%x[args], %[offsetof_Args_current_channel]]\n"
"neg z25.h, p2/M, z25.h\n"
"eor p8.b, p2/Z, p8.b, p9.b\n"
"ld1rw { z3.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_per_layer_mul]]\n"
@@ -90,262 +90,298 @@ void sme2_s8q_planar_5x5_s1_4rows_dot_za_impl(
"ld1rw { z24.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_minval]]\n"
"ld1rw { z31.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_maxval]]\n"
"1:" // Channel loop
- "ldr x20, [%x[qp], %[offsetof_Requantize32_bias]]\n"
+ "ldr x19, [%x[qp], %[offsetof_Requantize32_bias]]\n"
"mov z6.s, #0x0\n"
- "cbz x20, 2f\n"
- "ld1w { z6.s }, p1/Z, [x20, x5, LSL #2]\n"
+ "cbz x19, 2f\n"
+ "ld1w { z6.s }, p1/Z, [x19, x17, LSL #2]\n"
"2:" // Load bias: Done
"ldr x23, [%x[args], %[offsetof_Args_weights]]\n"
- "mov x22, x23\n"
- "ld1sb { z18.s }, p2/Z, [x22]\n"
- "incw x22, ALL, MUL #5\n"
+ "mov x21, x23\n"
+ "ld1sb { z18.s }, p2/Z, [x21]\n"
+ "incw x21, ALL, MUL #5\n"
"ld1rh { z12.h }, p2/Z, [%x[qp], %[offsetof_Requantize32_b_offset]]\n"
"mov z2.h, #0x0\n"
"sub z18.h, z18.h, z12.h\n"
"incw x23\n"
- "ld1sb { z17.s }, p2/Z, [x22]\n"
- "incw x22, ALL, MUL #5\n"
+ "ld1sb { z17.s }, p2/Z, [x21]\n"
+ "incw x21, ALL, MUL #5\n"
"sub z17.h, z17.h, z12.h\n"
"trn1 z0.h, z2.h, z18.h\n"
- "ld1sb { z21.s }, p2/Z, [x22]\n"
- "incw x22, ALL, MUL #5\n"
+ "ld1sb { z21.s }, p2/Z, [x21]\n"
+ "incw x21, ALL, MUL #5\n"
"sub z21.h, z21.h, z12.h\n"
"trn1 z8.h, z18.h, z17.h\n"
- "ld1sb { z16.s }, p2/Z, [x22]\n"
- "incw x22, ALL, MUL #5\n"
+ "ld1sb { z16.s }, p2/Z, [x21]\n"
+ "incw x21, ALL, MUL #5\n"
"sub z16.h, z16.h, z12.h\n"
"trn1 z4.h, z17.h, z21.h\n"
- "ld1sb { z15.s }, p2/Z, [x22]\n"
+ "ld1sb { z15.s }, p2/Z, [x21]\n"
"sub z15.h, z15.h, z12.h\n"
- "mov x22, x23\n"
+ "mov x21, x23\n"
"trn1 z5.h, z21.h, z16.h\n"
- "ld1sb { z18.s }, p2/Z, [x22]\n"
- "incw x22, ALL, MUL #5\n"
+ "ld1sb { z18.s }, p2/Z, [x21]\n"
+ "incw x21, ALL, MUL #5\n"
"trn1 z10.h, z16.h, z15.h\n"
"trn1 z11.h, z15.h, z2.h\n"
- "ld1sb { z17.s }, p2/Z, [x22]\n"
- "incw x22, ALL, MUL #5\n"
+ "ld1sb { z17.s }, p2/Z, [x21]\n"
+ "incw x21, ALL, MUL #5\n"
"sub z18.h, z18.h, z12.h\n"
"sub z17.h, z17.h, z12.h\n"
- "ld1sb { z21.s }, p2/Z, [x22]\n"
- "incw x22, ALL, MUL #5\n"
+ "ld1sb { z21.s }, p2/Z, [x21]\n"
+ "incw x21, ALL, MUL #5\n"
"sub z21.h, z21.h, z12.h\n"
- "addvl x21, SP, #30\n"
- "ld1sb { z16.s }, p2/Z, [x22]\n"
- "incw x22, ALL, MUL #5\n"
+ "addvl x20, SP, #30\n"
+ "ld1sb { z16.s }, p2/Z, [x21]\n"
+ "incw x21, ALL, MUL #5\n"
"incw x23\n"
"sub z16.h, z16.h, z12.h\n"
- "ld1sb { z15.s }, p2/Z, [x22]\n"
- "addvl x21, x21, #-6\n"
+ "ld1sb { z15.s }, p2/Z, [x21]\n"
+ "addvl x20, x20, #-6\n"
"sub z15.h, z15.h, z12.h\n"
- "mov x22, x23\n"
- "st1h { z0.h }, p2, [x21]\n"
+ "mov x21, x23\n"
+ "st1h { z0.h }, p2, [x20]\n"
"trn1 z0.h, z2.h, z18.h\n"
"incw x23\n"
- "ldr x20, [%x[qp], %[offsetof_Requantize32_per_channel_muls]]\n"
- "st1h { z8.h }, p2, [x21, #1, MUL VL]\n"
+ "ldr x19, [%x[qp], %[offsetof_Requantize32_per_channel_muls]]\n"
+ "st1h { z8.h }, p2, [x20, #1, MUL VL]\n"
"trn1 z8.h, z18.h, z17.h\n"
- "ld1sb { z18.s }, p2/Z, [x22]\n"
- "incw x22, ALL, MUL #5\n"
- "st1h { z4.h }, p2, [x21, #2, MUL VL]\n"
+ "ld1sb { z18.s }, p2/Z, [x21]\n"
+ "incw x21, ALL, MUL #5\n"
+ "st1h { z4.h }, p2, [x20, #2, MUL VL]\n"
"trn1 z4.h, z17.h, z21.h\n"
- "ld1sb { z17.s }, p2/Z, [x22]\n"
- "incw x22, ALL, MUL #5\n"
- "st1h { z5.h }, p2, [x21, #3, MUL VL]\n"
+ "ld1sb { z17.s }, p2/Z, [x21]\n"
+ "incw x21, ALL, MUL #5\n"
+ "st1h { z5.h }, p2, [x20, #3, MUL VL]\n"
"trn1 z5.h, z21.h, z16.h\n"
- "ld1sb { z21.s }, p2/Z, [x22]\n"
- "incw x22, ALL, MUL #5\n"
- "st1h { z10.h }, p2, [x21, #4, MUL VL]\n"
+ "ld1sb { z21.s }, p2/Z, [x21]\n"
+ "incw x21, ALL, MUL #5\n"
+ "st1h { z10.h }, p2, [x20, #4, MUL VL]\n"
"trn1 z10.h, z16.h, z15.h\n"
- "ld1sb { z16.s }, p2/Z, [x22]\n"
- "incw x22, ALL, MUL #5\n"
- "st1h { z11.h }, p2, [x21, #5, MUL VL]\n"
+ "ld1sb { z16.s }, p2/Z, [x21]\n"
+ "incw x21, ALL, MUL #5\n"
+ "st1h { z11.h }, p2, [x20, #5, MUL VL]\n"
"trn1 z11.h, z15.h, z2.h\n"
"sub z18.h, z18.h, z12.h\n"
- "addvl x21, x21, #-6\n"
+ "addvl x20, x20, #-6\n"
"sub z17.h, z17.h, z12.h\n"
- "ld1sb { z15.s }, p2/Z, [x22]\n"
+ "ld1sb { z15.s }, p2/Z, [x21]\n"
"sub z21.h, z21.h, z12.h\n"
- "mov x22, x23\n"
+ "mov x21, x23\n"
"sub z16.h, z16.h, z12.h\n"
"sub z15.h, z15.h, z12.h\n"
- "st1h { z0.h }, p2, [x21]\n"
+ "st1h { z0.h }, p2, [x20]\n"
"incw x23\n"
- "st1h { z8.h }, p2, [x21, #1, MUL VL]\n"
+ "st1h { z8.h }, p2, [x20, #1, MUL VL]\n"
"trn1 z0.h, z2.h, z18.h\n"
"trn1 z8.h, z18.h, z17.h\n"
- "ld1sb { z18.s }, p2/Z, [x22]\n"
- "incw x22, ALL, MUL #5\n"
- "st1h { z4.h }, p2, [x21, #2, MUL VL]\n"
+ "ld1sb { z18.s }, p2/Z, [x21]\n"
+ "incw x21, ALL, MUL #5\n"
+ "st1h { z4.h }, p2, [x20, #2, MUL VL]\n"
"trn1 z4.h, z17.h, z21.h\n"
- "ld1sb { z17.s }, p2/Z, [x22]\n"
- "incw x22, ALL, MUL #5\n"
- "st1h { z5.h }, p2, [x21, #3, MUL VL]\n"
+ "ld1sb { z17.s }, p2/Z, [x21]\n"
+ "incw x21, ALL, MUL #5\n"
+ "st1h { z5.h }, p2, [x20, #3, MUL VL]\n"
"trn1 z5.h, z21.h, z16.h\n"
- "ld1sb { z21.s }, p2/Z, [x22]\n"
- "incw x22, ALL, MUL #5\n"
- "st1h { z10.h }, p2, [x21, #4, MUL VL]\n"
+ "ld1sb { z21.s }, p2/Z, [x21]\n"
+ "incw x21, ALL, MUL #5\n"
+ "st1h { z10.h }, p2, [x20, #4, MUL VL]\n"
"trn1 z10.h, z16.h, z15.h\n"
- "ld1sb { z16.s }, p2/Z, [x22]\n"
- "incw x22, ALL, MUL #5\n"
- "st1h { z11.h }, p2, [x21, #5, MUL VL]\n"
+ "ld1sb { z16.s }, p2/Z, [x21]\n"
+ "incw x21, ALL, MUL #5\n"
+ "st1h { z11.h }, p2, [x20, #5, MUL VL]\n"
"trn1 z11.h, z15.h, z2.h\n"
"sub z18.h, z18.h, z12.h\n"
"sub z17.h, z17.h, z12.h\n"
- "ld1sb { z15.s }, p2/Z, [x22]\n"
- "addvl x21, x21, #-6\n"
+ "ld1sb { z15.s }, p2/Z, [x21]\n"
+ "addvl x20, x20, #-6\n"
"sub z21.h, z21.h, z12.h\n"
"sub z16.h, z16.h, z12.h\n"
- "mov x22, x23\n"
- "st1h { z0.h }, p2, [x21]\n"
+ "mov x21, x23\n"
+ "st1h { z0.h }, p2, [x20]\n"
"sub z15.h, z15.h, z12.h\n"
- "st1h { z8.h }, p2, [x21, #1, MUL VL]\n"
+ "st1h { z8.h }, p2, [x20, #1, MUL VL]\n"
"trn1 z0.h, z2.h, z18.h\n"
"trn1 z8.h, z18.h, z17.h\n"
- "ld1sb { z18.s }, p2/Z, [x22]\n"
- "incw x22, ALL, MUL #5\n"
- "st1h { z4.h }, p2, [x21, #2, MUL VL]\n"
+ "ld1sb { z18.s }, p2/Z, [x21]\n"
+ "incw x21, ALL, MUL #5\n"
+ "st1h { z4.h }, p2, [x20, #2, MUL VL]\n"
"trn1 z4.h, z17.h, z21.h\n"
- "ld1sb { z17.s }, p2/Z, [x22]\n"
- "incw x22, ALL, MUL #5\n"
- "st1h { z5.h }, p2, [x21, #3, MUL VL]\n"
+ "ld1sb { z17.s }, p2/Z, [x21]\n"
+ "incw x21, ALL, MUL #5\n"
+ "st1h { z5.h }, p2, [x20, #3, MUL VL]\n"
"trn1 z5.h, z21.h, z16.h\n"
- "ld1sb { z21.s }, p2/Z, [x22]\n"
- "incw x22, ALL, MUL #5\n"
- "st1h { z10.h }, p2, [x21, #4, MUL VL]\n"
+ "ld1sb { z21.s }, p2/Z, [x21]\n"
+ "incw x21, ALL, MUL #5\n"
+ "st1h { z10.h }, p2, [x20, #4, MUL VL]\n"
"trn1 z10.h, z16.h, z15.h\n"
- "ld1sb { z16.s }, p2/Z, [x22]\n"
- "incw x22, ALL, MUL #5\n"
- "st1h { z11.h }, p2, [x21, #5, MUL VL]\n"
+ "ld1sb { z16.s }, p2/Z, [x21]\n"
+ "incw x21, ALL, MUL #5\n"
+ "st1h { z11.h }, p2, [x20, #5, MUL VL]\n"
"trn1 z11.h, z15.h, z2.h\n"
- "ld1sb { z15.s }, p2/Z, [x22]\n"
+ "ld1sb { z15.s }, p2/Z, [x21]\n"
"sub z18.h, z18.h, z12.h\n"
- "addvl x21, x21, #-6\n"
+ "addvl x20, x20, #-6\n"
"sub z17.h, z17.h, z12.h\n"
"sub z21.h, z21.h, z12.h\n"
- "st1h { z0.h }, p2, [x21]\n"
+ "st1h { z0.h }, p2, [x20]\n"
"sub z16.h, z16.h, z12.h\n"
"sub z15.h, z15.h, z12.h\n"
- "st1h { z8.h }, p2, [x21, #1, MUL VL]\n"
- "st1h { z4.h }, p2, [x21, #2, MUL VL]\n"
+ "st1h { z8.h }, p2, [x20, #1, MUL VL]\n"
+ "st1h { z4.h }, p2, [x20, #2, MUL VL]\n"
"mov z7.d, z6.d\n"
"trn1 z0.h, z2.h, z18.h\n"
- "st1h { z5.h }, p2, [x21, #3, MUL VL]\n"
+ "st1h { z5.h }, p2, [x20, #3, MUL VL]\n"
"trn1 z8.h, z18.h, z17.h\n"
"trn1 z4.h, z17.h, z21.h\n"
- "st1h { z10.h }, p2, [x21, #4, MUL VL]\n"
+ "st1h { z10.h }, p2, [x20, #4, MUL VL]\n"
"trn1 z5.h, z21.h, z16.h\n"
"trn1 z10.h, z16.h, z15.h\n"
- "st1h { z11.h }, p2, [x21, #5, MUL VL]\n"
- "addvl x21, x21, #-6\n"
+ "st1h { z11.h }, p2, [x20, #5, MUL VL]\n"
+ "addvl x20, x20, #-6\n"
"trn1 z11.h, z15.h, z2.h\n"
- "st1h { z0.h }, p2, [x21]\n"
- "st1h { z8.h }, p2, [x21, #1, MUL VL]\n"
- "st1h { z4.h }, p2, [x21, #2, MUL VL]\n"
- "st1h { z5.h }, p2, [x21, #3, MUL VL]\n"
- "st1h { z10.h }, p2, [x21, #4, MUL VL]\n"
- "st1h { z11.h }, p2, [x21, #5, MUL VL]\n"
- "cbz x20, 3f\n"
- "ld1w { z3.s }, p1/Z, [x20, x5, LSL #2]\n"
+ "st1h { z0.h }, p2, [x20]\n"
+ "st1h { z8.h }, p2, [x20, #1, MUL VL]\n"
+ "st1h { z4.h }, p2, [x20, #2, MUL VL]\n"
+ "st1h { z5.h }, p2, [x20, #3, MUL VL]\n"
+ "st1h { z10.h }, p2, [x20, #4, MUL VL]\n"
+ "st1h { z11.h }, p2, [x20, #5, MUL VL]\n"
+ "cbz x19, 3f\n"
+ "ld1w { z3.s }, p1/Z, [x19, x17, LSL #2]\n"
"3:" // Load mul: End
- "ldr x20, [%x[qp], %[offsetof_Requantize32_per_channel_right_shifts]]\n"
- "cbz x20, 4f\n"
- "ld1w { z1.s }, p1/Z, [x20, x5, LSL #2]\n"
+ "ldr x19, [%x[qp], %[offsetof_Requantize32_per_channel_right_shifts]]\n"
+ "cbz x19, 4f\n"
+ "ld1w { z1.s }, p1/Z, [x19, x17, LSL #2]\n"
"4:" // Load right_shift: End
- "ldr x17, [%x[args], %[offsetof_Args_input_cols]]\n"
- "sub x20, x17, #0x1\n"
- "orr x23, x20, %x[ld_in_col], LSL #16\n"
- "ldr x16, [%x[args], %[offsetof_Args_inptr]]\n"
- "orr x23, x7, x23, LSL #22\n"
- "mov x22, #0x8\n"
- "add x21, x6, x4\n"
- "lsl x20, %x[ld_in_row], #0x0\n"
- "ldr x15, [%x[args], %[offsetof_Args_output_cols]]\n"
+ "ldr x16, [%x[args], %[offsetof_Args_input_cols]]\n"
+ "sub x19, x16, #0x1\n"
+ "orr x22, x19, %x[ld_in_col], LSL #16\n"
+ "ldr x15, [%x[args], %[offsetof_Args_inptr]]\n"
+ "orr x22, x7, x22, LSL #22\n"
+ "mov x21, #0x8\n"
+ "add x20, x6, x5\n"
+ "lsl x19, %x[ld_in_row], #0x0\n"
+ "ldr x14, [%x[args], %[offsetof_Args_output_cols]]\n"
"mov x11, #0x0\n"
"mov x8, #0x8\n"
- "lsl x23, x23, #0x0\n"
- "sub x22, x22, x21\n"
- "madd x20, x20, x6, x16\n"
+ "lsl x22, x22, #0x0\n"
+ "sub x21, x21, x20\n"
+ "madd x19, x19, x6, x15\n"
"5:" // Issue prefetches
- "subs x22, x22, #0x1\n"
- ".inst 0xf8b74a9c // rprfm pldstrm, x23, [x20]\n"
- "add x20, x20, %x[ld_in_col]\n"
+ "subs x21, x21, #0x1\n"
+ ".inst 0xf8b64a7c // rprfm pldstrm, x22, [x19]\n"
+ "add x19, x19, %x[ld_in_col]\n"
"bgt 5b\n"
- "ldr x25, [%x[args], %[offsetof_Args_outptrs]]\n"
- "lsl x20, %x[ld_in_row], #0x0\n"
- "msub x16, x6, x20, x16\n"
+ "ldr x24, [%x[args], %[offsetof_Args_outptrs]]\n"
+ "lsl x19, %x[ld_in_row], #0x0\n"
+ "msub x15, x6, x19, x15\n"
".inst 0xc00468c0 // mova za.d[x11, #0], { z6.d-z7.d }\n"
- "ldr x20, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
+ "ldr x19, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
".inst 0xc00468c1 // mova za.d[x11, #1], { z6.d-z7.d }\n"
- "mov x22, #0x4\n"
- "ldp x14, x13, [x25], #0x10\n"
+ "mov x21, #0x4\n"
+ "ldp x13, x4, [x24], #0x10\n"
".inst 0xc00468c2 // mova za.d[x11, #2], { z6.d-z7.d }\n"
- "ldp x3, x10, [x20], #0x10\n"
+ "ldp x10, x9, [x19], #0x10\n"
".inst 0xc00468c3 // mova za.d[x11, #3], { z6.d-z7.d }\n"
- "ldr x21, [%x[args], %[offsetof_Args_pad_left]]\n"
+ "ldr x20, [%x[args], %[offsetof_Args_pad_left]]\n"
".inst 0xc00468c4 // mova za.d[x11, #4], { z6.d-z7.d }\n"
- "ldp x9, x28, [x25], #0x10\n"
+ "ldp x28, x27, [x24], #0x10\n"
".inst 0xc00468c5 // mova za.d[x11, #5], { z6.d-z7.d }\n"
- "ldp x27, x26, [x20], #0x10\n"
+ "ldp x26, x25, [x19], #0x10\n"
".inst 0xc00468c6 // mova za.d[x11, #6], { z6.d-z7.d }\n"
".inst 0xc00468c7 // mova za.d[x11, #7], { z6.d-z7.d }\n"
".inst 0xc00408c0 // mova za.d[x8, #0], { z6.d-z7.d }\n"
".inst 0xc00408c1 // mova za.d[x8, #1], { z6.d-z7.d }\n"
- "cbz x21, 7f\n"
- "cmp x21, x22\n"
- "csel x20, x21, x22, LT\n"
- "sub x21, x21, x20\n"
- "sub x22, x22, x20\n"
- "cbz x21, 7f\n"
+ "cbz x20, 7f\n"
+ "cmp x20, x21\n"
+ "csel x19, x20, x21, LT\n"
+ "sub x20, x20, x19\n"
+ "sub x21, x21, x19\n"
+ "cbz x20, 7f\n"
".inst 0xc006680c // mova { z12.d-z13.d }, za.d[x11, #0]\n"
- "sub x15, x15, x21\n"
+ "sub x14, x14, x20\n"
".inst 0xc006682e // mova { z14.d-z15.d }, za.d[x11, #1]\n"
".inst 0xc1a3ac0c // sqdmulh { z12.s-z15.s }, { z12.s-z15.s }, z3.s\n"
".inst 0xc1a1aa2c // srshl { z12.s-z15.s }, { z12.s-z15.s }, z1.s\n"
".inst 0xc1a9ab0c // add { z12.s-z15.s }, { z12.s-z15.s }, z9.s\n"
".inst 0xc1bfcf0c // sclamp { z12.s-z15.s }, z24.s, z31.s\n"
"6:" // Left padding
- "subs x21, x21, #0x1\n"
- "st1b { z12.s }, p1, [x14]\n"
- "add x14, x14, x3\n"
- "st1b { z14.s }, p1, [x13]\n"
+ "subs x20, x20, #0x1\n"
+ "st1b { z12.s }, p1, [x13]\n"
"add x13, x13, x10\n"
- "st1b { z13.s }, p1, [x9]\n"
- "add x9, x9, x27\n"
- "st1b { z15.s }, p1, [x28]\n"
+ "st1b { z14.s }, p1, [x4]\n"
+ "add x4, x4, x9\n"
+ "st1b { z13.s }, p1, [x28]\n"
"add x28, x28, x26\n"
+ "st1b { z15.s }, p1, [x27]\n"
+ "add x27, x27, x25\n"
"bgt 6b\n"
"7:" // Left padding: End
- "adds XZR, x6, x4\n"
+ "adds XZR, x6, x5\n"
"bne 14f\n"
- "cbz x22, 12f\n"
- "cmp x22, #0x1\n"
- "sub x17, x17, x22\n"
+ "cbz x21, 12f\n"
+ "cmp x21, #0x1\n"
+ "sub x16, x16, x21\n"
"beq 11f\n"
- "cmp x22, #0x2\n"
+ "cmp x21, #0x2\n"
"beq 10f\n"
- "cmp x22, #0x3\n"
+ "cmp x21, #0x3\n"
"beq 9f\n"
"8:" // Unpadded: 4 priming loads
- "add x21, x16, %x[ld_in_row]\n"
- "ld1sb { z17.s }, p1/Z, [x16]\n"
- "addvl x20, SP, #24\n"
+ "add x20, x15, %x[ld_in_row]\n"
+ "ld1sb { z17.s }, p1/Z, [x15]\n"
+ "addvl x19, SP, #24\n"
+ "ld1sb { z16.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "trn1 z27.h, z17.h, z16.h\n"
+ "add z27.h, z27.h, z25.h\n"
+ "ld1sb { z17.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "add x15, x15, %x[ld_in_col]\n"
+ "ld1sb { z16.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "trn1 z28.h, z17.h, z16.h\n"
+ "add z28.h, z28.h, z25.h\n"
+ "ld1sb { z16.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z29.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "trn1 z29.h, z16.h, z29.h\n"
+ "add z29.h, z29.h, z25.h\n"
+ "ld1sb { z17.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ ".inst 0xa1402a60 // ld1h { z0.h, z8.h }, pn10.b/Z, [x19]\n"
+ ".inst 0xc1687768 // sdot za.s[x11, 0], { z27.h-z28.h }, z8.h\n"
+ "ld1sb { z16.s }, p1/Z, [x20]\n"
+ "trn1 z30.h, z17.h, z16.h\n"
+ ".inst 0xc1607769 // sdot za.s[x11, 1], { z27.h-z28.h }, z0.h\n"
+ ".inst 0xa0412a64 // ld1h { z4.h-z5.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
+ "add z30.h, z30.h, z25.h\n"
+ ".inst 0xc1657788 // sdot za.s[x11, 0], { z28.h-z29.h }, z5.h\n"
+ ".inst 0xc1647789 // sdot za.s[x11, 1], { z28.h-z29.h }, z4.h\n"
+ ".inst 0xa0422a6a // ld1h { z10.h-z11.h }, pn10.b/Z, [x19, #0x4, MUL VL]\n"
+ ".inst 0xc16b77a8 // sdot za.s[x11, 0], { z29.h-z30.h }, z11.h\n"
+ ".inst 0xc16a77a9 // sdot za.s[x11, 1], { z29.h-z30.h }, z10.h\n"
+ "9:" // Unpadded: 3 priming loads
+ "add x21, x15, %x[ld_in_row]\n"
+ "ld1sb { z17.s }, p1/Z, [x15]\n"
+ "addvl x20, SP, #18\n"
"ld1sb { z16.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
"trn1 z27.h, z17.h, z16.h\n"
"add z27.h, z27.h, z25.h\n"
"ld1sb { z17.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "add x16, x16, %x[ld_in_col]\n"
+ "addvl x19, SP, #24\n"
"ld1sb { z16.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
"trn1 z28.h, z17.h, z16.h\n"
"add z28.h, z28.h, z25.h\n"
- "ld1sb { z16.s }, p1/Z, [x21]\n"
+ "ld1sb { z17.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "ld1sb { z29.s }, p1/Z, [x21]\n"
+ "add x15, x15, %x[ld_in_col]\n"
+ "ld1sb { z16.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z29.h, z16.h, z29.h\n"
+ "trn1 z29.h, z17.h, z16.h\n"
"add z29.h, z29.h, z25.h\n"
"ld1sb { z17.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
@@ -354,37 +390,47 @@ void sme2_s8q_planar_5x5_s1_4rows_dot_za_impl(
"ld1sb { z16.s }, p1/Z, [x21]\n"
"trn1 z30.h, z17.h, z16.h\n"
".inst 0xc1607769 // sdot za.s[x11, 1], { z27.h-z28.h }, z0.h\n"
- ".inst 0xa0412a84 // ld1h { z4.h-z5.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ ".inst 0xa1402a60 // ld1h { z0.h, z8.h }, pn10.b/Z, [x19]\n"
+ ".inst 0xc168776a // sdot za.s[x11, 2], { z27.h-z28.h }, z8.h\n"
"add z30.h, z30.h, z25.h\n"
+ ".inst 0xa0412a84 // ld1h { z4.h-z5.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ ".inst 0xc160776b // sdot za.s[x11, 3], { z27.h-z28.h }, z0.h\n"
".inst 0xc1657788 // sdot za.s[x11, 0], { z28.h-z29.h }, z5.h\n"
- ".inst 0xc1647789 // sdot za.s[x11, 1], { z28.h-z29.h }, z4.h\n"
".inst 0xa0422a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+ ".inst 0xc1647789 // sdot za.s[x11, 1], { z28.h-z29.h }, z4.h\n"
+ ".inst 0xa0412a64 // ld1h { z4.h-z5.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
+ ".inst 0xc165778a // sdot za.s[x11, 2], { z28.h-z29.h }, z5.h\n"
+ ".inst 0xc164778b // sdot za.s[x11, 3], { z28.h-z29.h }, z4.h\n"
".inst 0xc16b77a8 // sdot za.s[x11, 0], { z29.h-z30.h }, z11.h\n"
".inst 0xc16a77a9 // sdot za.s[x11, 1], { z29.h-z30.h }, z10.h\n"
- "9:" // Unpadded: 3 priming loads
- "add x22, x16, %x[ld_in_row]\n"
- "ld1sb { z17.s }, p1/Z, [x16]\n"
- "addvl x21, SP, #18\n"
+ ".inst 0xa0422a6a // ld1h { z10.h-z11.h }, pn10.b/Z, [x19, #0x4, MUL VL]\n"
+ ".inst 0xc16b77aa // sdot za.s[x11, 2], { z29.h-z30.h }, z11.h\n"
+ ".inst 0xc16a77ab // sdot za.s[x11, 3], { z29.h-z30.h }, z10.h\n"
+ "10:" // Unpadded: 2 priming loads
+ "add x22, x15, %x[ld_in_row]\n"
+ "ld1sb { z17.s }, p1/Z, [x15]\n"
+ "addvl x21, SP, #12\n"
"ld1sb { z16.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
"trn1 z27.h, z17.h, z16.h\n"
"add z27.h, z27.h, z25.h\n"
"ld1sb { z17.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
- "addvl x20, SP, #24\n"
+ "addvl x20, SP, #18\n"
"ld1sb { z16.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
"trn1 z28.h, z17.h, z16.h\n"
"add z28.h, z28.h, z25.h\n"
"ld1sb { z17.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
- "add x16, x16, %x[ld_in_col]\n"
+ "addvl x19, SP, #24\n"
"ld1sb { z16.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
"trn1 z29.h, z17.h, z16.h\n"
"add z29.h, z29.h, z25.h\n"
"ld1sb { z17.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
+ "add x15, x15, %x[ld_in_col]\n"
".inst 0xa1402aa0 // ld1h { z0.h, z8.h }, pn10.b/Z, [x21]\n"
".inst 0xc1687768 // sdot za.s[x11, 0], { z27.h-z28.h }, z8.h\n"
"ld1sb { z16.s }, p1/Z, [x22]\n"
@@ -395,44 +441,54 @@ void sme2_s8q_planar_5x5_s1_4rows_dot_za_impl(
"add z30.h, z30.h, z25.h\n"
".inst 0xa0412aa4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
".inst 0xc160776b // sdot za.s[x11, 3], { z27.h-z28.h }, z0.h\n"
+ ".inst 0xa1402a60 // ld1h { z0.h, z8.h }, pn10.b/Z, [x19]\n"
".inst 0xc1657788 // sdot za.s[x11, 0], { z28.h-z29.h }, z5.h\n"
- ".inst 0xa0422aaa // ld1h { z10.h-z11.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
".inst 0xc1647789 // sdot za.s[x11, 1], { z28.h-z29.h }, z4.h\n"
".inst 0xa0412a84 // ld1h { z4.h-z5.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ ".inst 0xa0422aaa // ld1h { z10.h-z11.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
+ ".inst 0xc168776c // sdot za.s[x11, 4], { z27.h-z28.h }, z8.h\n"
+ ".inst 0xc160776d // sdot za.s[x11, 5], { z27.h-z28.h }, z0.h\n"
".inst 0xc165778a // sdot za.s[x11, 2], { z28.h-z29.h }, z5.h\n"
".inst 0xc164778b // sdot za.s[x11, 3], { z28.h-z29.h }, z4.h\n"
+ ".inst 0xa0412a64 // ld1h { z4.h-z5.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
".inst 0xc16b77a8 // sdot za.s[x11, 0], { z29.h-z30.h }, z11.h\n"
".inst 0xc16a77a9 // sdot za.s[x11, 1], { z29.h-z30.h }, z10.h\n"
".inst 0xa0422a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+ ".inst 0xc165778c // sdot za.s[x11, 4], { z28.h-z29.h }, z5.h\n"
+ ".inst 0xc164778d // sdot za.s[x11, 5], { z28.h-z29.h }, z4.h\n"
".inst 0xc16b77aa // sdot za.s[x11, 2], { z29.h-z30.h }, z11.h\n"
".inst 0xc16a77ab // sdot za.s[x11, 3], { z29.h-z30.h }, z10.h\n"
- "10:" // Unpadded: 2 priming loads
- "add x23, x16, %x[ld_in_row]\n"
- "ld1sb { z17.s }, p1/Z, [x16]\n"
- "addvl x22, SP, #12\n"
+ ".inst 0xa0422a6a // ld1h { z10.h-z11.h }, pn10.b/Z, [x19, #0x4, MUL VL]\n"
+ ".inst 0xc16b77ac // sdot za.s[x11, 4], { z29.h-z30.h }, z11.h\n"
+ ".inst 0xc16a77ad // sdot za.s[x11, 5], { z29.h-z30.h }, z10.h\n"
+ "11:" // Unpadded: 1 priming loads
+ "add x23, x15, %x[ld_in_row]\n"
+ "ld1sb { z17.s }, p1/Z, [x15]\n"
+ "addvl x22, SP, #6\n"
"ld1sb { z16.s }, p1/Z, [x23]\n"
"add x23, x23, %x[ld_in_row]\n"
"trn1 z27.h, z17.h, z16.h\n"
"add z27.h, z27.h, z25.h\n"
"ld1sb { z17.s }, p1/Z, [x23]\n"
"add x23, x23, %x[ld_in_row]\n"
- "addvl x21, SP, #18\n"
+ "addvl x21, SP, #12\n"
"ld1sb { z16.s }, p1/Z, [x23]\n"
"add x23, x23, %x[ld_in_row]\n"
"trn1 z28.h, z17.h, z16.h\n"
"add z28.h, z28.h, z25.h\n"
"ld1sb { z17.s }, p1/Z, [x23]\n"
"add x23, x23, %x[ld_in_row]\n"
- "addvl x20, SP, #24\n"
+ "addvl x20, SP, #18\n"
"ld1sb { z16.s }, p1/Z, [x23]\n"
"add x23, x23, %x[ld_in_row]\n"
"trn1 z29.h, z17.h, z16.h\n"
"add z29.h, z29.h, z25.h\n"
"ld1sb { z17.s }, p1/Z, [x23]\n"
"add x23, x23, %x[ld_in_row]\n"
- "add x16, x16, %x[ld_in_col]\n"
+ "addvl x19, SP, #24\n"
".inst 0xa1402ac0 // ld1h { z0.h, z8.h }, pn10.b/Z, [x22]\n"
".inst 0xc1687768 // sdot za.s[x11, 0], { z27.h-z28.h }, z8.h\n"
+ "add x15, x15, %x[ld_in_col]\n"
"ld1sb { z16.s }, p1/Z, [x23]\n"
"trn1 z30.h, z17.h, z16.h\n"
".inst 0xc1607769 // sdot za.s[x11, 1], { z27.h-z28.h }, z0.h\n"
@@ -448,177 +504,121 @@ void sme2_s8q_planar_5x5_s1_4rows_dot_za_impl(
".inst 0xa0422aca // ld1h { z10.h-z11.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
".inst 0xc168776c // sdot za.s[x11, 4], { z27.h-z28.h }, z8.h\n"
".inst 0xc160776d // sdot za.s[x11, 5], { z27.h-z28.h }, z0.h\n"
+ ".inst 0xa1402a60 // ld1h { z0.h, z8.h }, pn10.b/Z, [x19]\n"
".inst 0xc165778a // sdot za.s[x11, 2], { z28.h-z29.h }, z5.h\n"
".inst 0xc164778b // sdot za.s[x11, 3], { z28.h-z29.h }, z4.h\n"
".inst 0xa0412a84 // ld1h { z4.h-z5.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
".inst 0xc16b77a8 // sdot za.s[x11, 0], { z29.h-z30.h }, z11.h\n"
".inst 0xc16a77a9 // sdot za.s[x11, 1], { z29.h-z30.h }, z10.h\n"
".inst 0xa0422aaa // ld1h { z10.h-z11.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
- ".inst 0xc165778c // sdot za.s[x11, 4], { z28.h-z29.h }, z5.h\n"
- ".inst 0xc164778d // sdot za.s[x11, 5], { z28.h-z29.h }, z4.h\n"
- ".inst 0xc16b77aa // sdot za.s[x11, 2], { z29.h-z30.h }, z11.h\n"
- ".inst 0xc16a77ab // sdot za.s[x11, 3], { z29.h-z30.h }, z10.h\n"
- ".inst 0xa0422a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
- ".inst 0xc16b77ac // sdot za.s[x11, 4], { z29.h-z30.h }, z11.h\n"
- ".inst 0xc16a77ad // sdot za.s[x11, 5], { z29.h-z30.h }, z10.h\n"
- "11:" // Unpadded: 1 priming loads
- "add x24, x16, %x[ld_in_row]\n"
- "ld1sb { z17.s }, p1/Z, [x16]\n"
- "addvl x23, SP, #6\n"
- "ld1sb { z16.s }, p1/Z, [x24]\n"
- "add x24, x24, %x[ld_in_row]\n"
- "trn1 z27.h, z17.h, z16.h\n"
- "add z27.h, z27.h, z25.h\n"
- "ld1sb { z17.s }, p1/Z, [x24]\n"
- "add x24, x24, %x[ld_in_row]\n"
- "addvl x22, SP, #12\n"
- "ld1sb { z16.s }, p1/Z, [x24]\n"
- "add x24, x24, %x[ld_in_row]\n"
- "trn1 z28.h, z17.h, z16.h\n"
- "add z28.h, z28.h, z25.h\n"
- "ld1sb { z17.s }, p1/Z, [x24]\n"
- "add x24, x24, %x[ld_in_row]\n"
- "addvl x21, SP, #18\n"
- "ld1sb { z16.s }, p1/Z, [x24]\n"
- "add x24, x24, %x[ld_in_row]\n"
- "trn1 z29.h, z17.h, z16.h\n"
- "add z29.h, z29.h, z25.h\n"
- "ld1sb { z17.s }, p1/Z, [x24]\n"
- "add x24, x24, %x[ld_in_row]\n"
- "addvl x20, SP, #24\n"
- ".inst 0xa1402ae0 // ld1h { z0.h, z8.h }, pn10.b/Z, [x23]\n"
- ".inst 0xc1687768 // sdot za.s[x11, 0], { z27.h-z28.h }, z8.h\n"
- "add x16, x16, %x[ld_in_col]\n"
- "ld1sb { z16.s }, p1/Z, [x24]\n"
- "trn1 z30.h, z17.h, z16.h\n"
- ".inst 0xc1607769 // sdot za.s[x11, 1], { z27.h-z28.h }, z0.h\n"
- ".inst 0xa1402ac0 // ld1h { z0.h, z8.h }, pn10.b/Z, [x22]\n"
- ".inst 0xc168776a // sdot za.s[x11, 2], { z27.h-z28.h }, z8.h\n"
- "add z30.h, z30.h, z25.h\n"
- ".inst 0xa0412ae4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x23, #0x2, MUL VL]\n"
- ".inst 0xc160776b // sdot za.s[x11, 3], { z27.h-z28.h }, z0.h\n"
- ".inst 0xa1402aa0 // ld1h { z0.h, z8.h }, pn10.b/Z, [x21]\n"
- ".inst 0xc1657788 // sdot za.s[x11, 0], { z28.h-z29.h }, z5.h\n"
- ".inst 0xc1647789 // sdot za.s[x11, 1], { z28.h-z29.h }, z4.h\n"
- ".inst 0xa0412ac4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
- ".inst 0xa0422aea // ld1h { z10.h-z11.h }, pn10.b/Z, [x23, #0x4, MUL VL]\n"
- ".inst 0xc168776c // sdot za.s[x11, 4], { z27.h-z28.h }, z8.h\n"
- ".inst 0xc160776d // sdot za.s[x11, 5], { z27.h-z28.h }, z0.h\n"
- ".inst 0xa1402a80 // ld1h { z0.h, z8.h }, pn10.b/Z, [x20]\n"
- ".inst 0xc165778a // sdot za.s[x11, 2], { z28.h-z29.h }, z5.h\n"
- ".inst 0xc164778b // sdot za.s[x11, 3], { z28.h-z29.h }, z4.h\n"
- ".inst 0xa0412aa4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
- ".inst 0xc16b77a8 // sdot za.s[x11, 0], { z29.h-z30.h }, z11.h\n"
- ".inst 0xc16a77a9 // sdot za.s[x11, 1], { z29.h-z30.h }, z10.h\n"
- ".inst 0xa0422aca // ld1h { z10.h-z11.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
".inst 0xc168776e // sdot za.s[x11, 6], { z27.h-z28.h }, z8.h\n"
".inst 0xc160776f // sdot za.s[x11, 7], { z27.h-z28.h }, z0.h\n"
".inst 0xc165778c // sdot za.s[x11, 4], { z28.h-z29.h }, z5.h\n"
".inst 0xc164778d // sdot za.s[x11, 5], { z28.h-z29.h }, z4.h\n"
- ".inst 0xa0412a84 // ld1h { z4.h-z5.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ ".inst 0xa0412a64 // ld1h { z4.h-z5.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
".inst 0xc16b77aa // sdot za.s[x11, 2], { z29.h-z30.h }, z11.h\n"
".inst 0xc16a77ab // sdot za.s[x11, 3], { z29.h-z30.h }, z10.h\n"
- ".inst 0xa0422aaa // ld1h { z10.h-z11.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
+ ".inst 0xa0422a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
".inst 0xc165778e // sdot za.s[x11, 6], { z28.h-z29.h }, z5.h\n"
".inst 0xc164778f // sdot za.s[x11, 7], { z28.h-z29.h }, z4.h\n"
".inst 0xc16b77ac // sdot za.s[x11, 4], { z29.h-z30.h }, z11.h\n"
".inst 0xc16a77ad // sdot za.s[x11, 5], { z29.h-z30.h }, z10.h\n"
- ".inst 0xa0422a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+ ".inst 0xa0422a6a // ld1h { z10.h-z11.h }, pn10.b/Z, [x19, #0x4, MUL VL]\n"
".inst 0xc16b77ae // sdot za.s[x11, 6], { z29.h-z30.h }, z11.h\n"
".inst 0xc16a77af // sdot za.s[x11, 7], { z29.h-z30.h }, z10.h\n"
"12:" // Unpadded: 0 priming loads
".inst 0xa1402be0 // ld1h { z0.h, z8.h }, pn10.b/Z, [SP]\n"
".inst 0xa0412be4 // ld1h { z4.h-z5.h }, pn10.b/Z, [SP, #0x2, MUL VL]\n"
".inst 0xa0422bea // ld1h { z10.h-z11.h }, pn10.b/Z, [SP, #0x4, MUL VL]\n"
- "cbz x17, 22f\n"
- "add x20, x16, %x[ld_in_row]\n"
- "ld1sb { z17.s }, p1/Z, [x16]\n"
- "sub x17, x17, #0x1\n"
- "ld1sb { z16.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "cbz x16, 22f\n"
+ "add x19, x15, %x[ld_in_row]\n"
+ "ld1sb { z17.s }, p1/Z, [x15]\n"
+ "sub x16, x16, #0x1\n"
+ "ld1sb { z16.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"trn1 z27.h, z17.h, z16.h\n"
- "sub x15, x15, #0x1\n"
- "ld1sb { z17.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
- "cmp x17, x15\n"
+ "sub x14, x14, #0x1\n"
+ "ld1sb { z17.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ "cmp x16, x14\n"
"add z27.h, z27.h, z25.h\n"
- "ld1sb { z16.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z16.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"trn1 z28.h, z17.h, z16.h\n"
- "csel x25, x17, x15, LT\n"
- "ld1sb { z17.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "csel x24, x16, x14, LT\n"
+ "ld1sb { z17.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"add z28.h, z28.h, z25.h\n"
- "add x16, x16, %x[ld_in_col]\n"
- "ld1sb { z16.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x15, x15, %x[ld_in_col]\n"
+ "ld1sb { z16.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"trn1 z29.h, z17.h, z16.h\n"
"add z29.h, z29.h, z25.h\n"
- "ld1sb { z17.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
- "sub x15, x15, x25\n"
- "ld1sb { z16.s }, p1/Z, [x20]\n"
+ "ld1sb { z17.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ "sub x14, x14, x24\n"
+ "ld1sb { z16.s }, p1/Z, [x19]\n"
"trn1 z30.h, z17.h, z16.h\n"
"add z30.h, z30.h, z25.h\n"
- "cbz x25, 21f\n"
+ "cbz x24, 21f\n"
"13:" // Unpadded: Main loop
- "addvl x24, SP, #6\n"
+ "addvl x23, SP, #6\n"
".inst 0xc1687768 // sdot za.s[x11, 0], { z27.h-z28.h }, z8.h\n"
- "addvl x23, SP, #12\n"
- "ld1sb { z23.s }, p1/Z, [x16]\n"
+ "addvl x22, SP, #12\n"
+ "ld1sb { z23.s }, p1/Z, [x15]\n"
".inst 0xc1607769 // sdot za.s[x11, 1], { z27.h-z28.h }, z0.h\n"
- ".inst 0xa1402b00 // ld1h { z0.h, z8.h }, pn10.b/Z, [x24]\n"
- "addvl x22, SP, #18\n"
- "addvl x21, SP, #24\n"
+ ".inst 0xa1402ae0 // ld1h { z0.h, z8.h }, pn10.b/Z, [x23]\n"
+ "addvl x21, SP, #18\n"
+ "addvl x20, SP, #24\n"
".inst 0xc168776a // sdot za.s[x11, 2], { z27.h-z28.h }, z8.h\n"
- "add x20, x16, %x[ld_in_row]\n"
- "ld1sb { z22.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x15, %x[ld_in_row]\n"
+ "ld1sb { z22.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0xc160776b // sdot za.s[x11, 3], { z27.h-z28.h }, z0.h\n"
- ".inst 0xa1402ae0 // ld1h { z0.h, z8.h }, pn10.b/Z, [x23]\n"
- "subs x25, x25, #0x1\n"
- "add x16, x16, %x[ld_in_col]\n"
+ ".inst 0xa1402ac0 // ld1h { z0.h, z8.h }, pn10.b/Z, [x22]\n"
+ "subs x24, x24, #0x1\n"
+ "add x15, x15, %x[ld_in_col]\n"
".inst 0xc1657788 // sdot za.s[x11, 0], { z28.h-z29.h }, z5.h\n"
- "ld1sb { z21.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z21.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0xc1647789 // sdot za.s[x11, 1], { z28.h-z29.h }, z4.h\n"
- ".inst 0xa0412b04 // ld1h { z4.h-z5.h }, pn10.b/Z, [x24, #0x2, MUL VL]\n"
+ ".inst 0xa0412ae4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x23, #0x2, MUL VL]\n"
".inst 0xc168776c // sdot za.s[x11, 4], { z27.h-z28.h }, z8.h\n"
- "ld1sb { z20.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z20.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0xc160776d // sdot za.s[x11, 5], { z27.h-z28.h }, z0.h\n"
- ".inst 0xa1402ac0 // ld1h { z0.h, z8.h }, pn10.b/Z, [x22]\n"
+ ".inst 0xa1402aa0 // ld1h { z0.h, z8.h }, pn10.b/Z, [x21]\n"
".inst 0xc165778a // sdot za.s[x11, 2], { z28.h-z29.h }, z5.h\n"
- "ld1sb { z19.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z19.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0xc164778b // sdot za.s[x11, 3], { z28.h-z29.h }, z4.h\n"
- ".inst 0xa0412ae4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x23, #0x2, MUL VL]\n"
+ ".inst 0xa0412ac4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
".inst 0xc16b77a8 // sdot za.s[x11, 0], { z29.h-z30.h }, z11.h\n"
- "ld1sb { z18.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z18.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0xc16a77a9 // sdot za.s[x11, 1], { z29.h-z30.h }, z10.h\n"
- ".inst 0xa0422b0a // ld1h { z10.h-z11.h }, pn10.b/Z, [x24, #0x4, MUL VL]\n"
+ ".inst 0xa0422aea // ld1h { z10.h-z11.h }, pn10.b/Z, [x23, #0x4, MUL VL]\n"
".inst 0xc168776e // sdot za.s[x11, 6], { z27.h-z28.h }, z8.h\n"
- "ld1sb { z17.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z17.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0xc160776f // sdot za.s[x11, 7], { z27.h-z28.h }, z0.h\n"
- ".inst 0xa1402aa0 // ld1h { z0.h, z8.h }, pn10.b/Z, [x21]\n"
+ ".inst 0xa1402a80 // ld1h { z0.h, z8.h }, pn10.b/Z, [x20]\n"
".inst 0xc165778c // sdot za.s[x11, 4], { z28.h-z29.h }, z5.h\n"
- "ld1sb { z16.s }, p1/Z, [x20]\n"
+ "ld1sb { z16.s }, p1/Z, [x19]\n"
".inst 0xc164778d // sdot za.s[x11, 5], { z28.h-z29.h }, z4.h\n"
- ".inst 0xa0412ac4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
+ ".inst 0xa0412aa4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
".inst 0xc16b77aa // sdot za.s[x11, 2], { z29.h-z30.h }, z11.h\n"
".inst 0xc16a77ab // sdot za.s[x11, 3], { z29.h-z30.h }, z10.h\n"
- ".inst 0xa0422aea // ld1h { z10.h-z11.h }, pn10.b/Z, [x23, #0x4, MUL VL]\n"
+ ".inst 0xa0422aca // ld1h { z10.h-z11.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
".inst 0xc165778e // sdot za.s[x11, 6], { z28.h-z29.h }, z5.h\n"
".inst 0xc164778f // sdot za.s[x11, 7], { z28.h-z29.h }, z4.h\n"
- ".inst 0xa0412aa4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
+ ".inst 0xa0412a84 // ld1h { z4.h-z5.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
".inst 0xc16b77ac // sdot za.s[x11, 4], { z29.h-z30.h }, z11.h\n"
".inst 0xc16a77ad // sdot za.s[x11, 5], { z29.h-z30.h }, z10.h\n"
- ".inst 0xa0422aca // ld1h { z10.h-z11.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
+ ".inst 0xa0422aaa // ld1h { z10.h-z11.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
".inst 0xc16b77ae // sdot za.s[x11, 6], { z29.h-z30.h }, z11.h\n"
".inst 0xc16a77af // sdot za.s[x11, 7], { z29.h-z30.h }, z10.h\n"
- ".inst 0xa0422aaa // ld1h { z10.h-z11.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
+ ".inst 0xa0422a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
".inst 0xc1681768 // sdot za.s[x8, 0], { z27.h-z28.h }, z8.h\n"
".inst 0xc1601769 // sdot za.s[x8, 1], { z27.h-z28.h }, z0.h\n"
"trn1 z27.h, z23.h, z22.h\n"
@@ -645,407 +645,407 @@ void sme2_s8q_planar_5x5_s1_4rows_dot_za_impl(
".inst 0xc1a9ab0c // add { z12.s-z15.s }, { z12.s-z15.s }, z9.s\n"
".inst 0xc00408c1 // mova za.d[x8, #1], { z6.d-z7.d }\n"
".inst 0xc1bfcf0c // sclamp { z12.s-z15.s }, z24.s, z31.s\n"
- "st1b { z12.s }, p1, [x14]\n"
- "add x14, x14, x3\n"
- "add z30.h, z30.h, z25.h\n"
- "st1b { z14.s }, p1, [x13]\n"
+ "st1b { z12.s }, p1, [x13]\n"
"add x13, x13, x10\n"
- "st1b { z13.s }, p1, [x9]\n"
- "add x9, x9, x27\n"
- "st1b { z15.s }, p1, [x28]\n"
+ "add z30.h, z30.h, z25.h\n"
+ "st1b { z14.s }, p1, [x4]\n"
+ "add x4, x4, x9\n"
+ "st1b { z13.s }, p1, [x28]\n"
"add x28, x28, x26\n"
+ "st1b { z15.s }, p1, [x27]\n"
+ "add x27, x27, x25\n"
"bgt 13b\n"
"b 21f\n"
"14:" // Padded
- "cbz x22, 19f\n"
- "cmp x22, #0x1\n"
- "sub x17, x17, x22\n"
+ "cbz x21, 19f\n"
+ "cmp x21, #0x1\n"
+ "sub x16, x16, x21\n"
"beq 18f\n"
- "cmp x22, #0x2\n"
+ "cmp x21, #0x2\n"
"beq 17f\n"
- "cmp x22, #0x3\n"
+ "cmp x21, #0x3\n"
"beq 16f\n"
"15:" // Padded: 4 priming loads
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z19.s }, p0/Z, [x16]\n"
+ "ld1sb { z19.s }, p0/Z, [x15]\n"
"add z19.h, p0/M, z19.h, z25.h\n"
- "add x21, x16, %x[ld_in_row]\n"
+ "add x20, x15, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z18.s }, p0/Z, [x21]\n"
+ "ld1sb { z18.s }, p0/Z, [x20]\n"
"add z18.h, p0/M, z18.h, z25.h\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "add x20, x20, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1sb { z17.s }, p0/Z, [x21]\n"
+ "ld1sb { z17.s }, p0/Z, [x20]\n"
"add z17.h, p0/M, z17.h, z25.h\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "add x20, x20, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1sb { z16.s }, p0/Z, [x21]\n"
+ "ld1sb { z16.s }, p0/Z, [x20]\n"
"add z16.h, p0/M, z16.h, z25.h\n"
"mov x12, #0x4\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "add x20, x20, %x[ld_in_row]\n"
"trn1 z27.h, z19.h, z18.h\n"
"trn1 z28.h, z17.h, z16.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z18.s }, p0/Z, [x21]\n"
+ "ld1sb { z18.s }, p0/Z, [x20]\n"
"add z18.h, p0/M, z18.h, z25.h\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "add x20, x20, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z16.s }, p0/Z, [x21]\n"
+ "ld1sb { z16.s }, p0/Z, [x20]\n"
"add z16.h, p0/M, z16.h, z25.h\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "add x20, x20, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1sb { z17.s }, p0/Z, [x21]\n"
- "addvl x20, SP, #24\n"
+ "ld1sb { z17.s }, p0/Z, [x20]\n"
+ "addvl x19, SP, #24\n"
"add z17.h, p0/M, z17.h, z25.h\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "add x20, x20, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- ".inst 0xa1402a80 // ld1h { z0.h, z8.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xa1402a60 // ld1h { z0.h, z8.h }, pn10.b/Z, [x19]\n"
"trn1 z29.h, z18.h, z16.h\n"
- "ld1sb { z16.s }, p0/Z, [x21]\n"
+ "ld1sb { z16.s }, p0/Z, [x20]\n"
"add z16.h, p0/M, z16.h, z25.h\n"
".inst 0xc1687768 // sdot za.s[x11, 0], { z27.h-z28.h }, z8.h\n"
- "add x16, x16, %x[ld_in_col]\n"
+ "add x15, x15, %x[ld_in_col]\n"
".inst 0xc1607769 // sdot za.s[x11, 1], { z27.h-z28.h }, z0.h\n"
- ".inst 0xa0412a84 // ld1h { z4.h-z5.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ ".inst 0xa0412a64 // ld1h { z4.h-z5.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
"trn1 z30.h, z17.h, z16.h\n"
".inst 0xc1657788 // sdot za.s[x11, 0], { z28.h-z29.h }, z5.h\n"
- ".inst 0xa0422a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+ ".inst 0xa0422a6a // ld1h { z10.h-z11.h }, pn10.b/Z, [x19, #0x4, MUL VL]\n"
".inst 0xc1647789 // sdot za.s[x11, 1], { z28.h-z29.h }, z4.h\n"
".inst 0xc16b77a8 // sdot za.s[x11, 0], { z29.h-z30.h }, z11.h\n"
".inst 0xc16a77a9 // sdot za.s[x11, 1], { z29.h-z30.h }, z10.h\n"
"16:" // Padded: 3 priming loads
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z19.s }, p0/Z, [x16]\n"
+ "ld1sb { z19.s }, p0/Z, [x15]\n"
"add z19.h, p0/M, z19.h, z25.h\n"
- "add x20, x16, %x[ld_in_row]\n"
+ "add x19, x15, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z18.s }, p0/Z, [x20]\n"
+ "ld1sb { z18.s }, p0/Z, [x19]\n"
"add z18.h, p0/M, z18.h, z25.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1sb { z17.s }, p0/Z, [x20]\n"
+ "ld1sb { z17.s }, p0/Z, [x19]\n"
"add z17.h, p0/M, z17.h, z25.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1sb { z16.s }, p0/Z, [x20]\n"
+ "ld1sb { z16.s }, p0/Z, [x19]\n"
"add z16.h, p0/M, z16.h, z25.h\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"trn1 z27.h, z19.h, z18.h\n"
"trn1 z28.h, z17.h, z16.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z18.s }, p0/Z, [x20]\n"
+ "ld1sb { z18.s }, p0/Z, [x19]\n"
"add z18.h, p0/M, z18.h, z25.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z16.s }, p0/Z, [x20]\n"
+ "ld1sb { z16.s }, p0/Z, [x19]\n"
"add z16.h, p0/M, z16.h, z25.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1sb { z17.s }, p0/Z, [x20]\n"
- "addvl x21, SP, #18\n"
+ "ld1sb { z17.s }, p0/Z, [x19]\n"
+ "addvl x20, SP, #18\n"
"add z17.h, p0/M, z17.h, z25.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- ".inst 0xa1402aa0 // ld1h { z0.h, z8.h }, pn10.b/Z, [x21]\n"
+ ".inst 0xa1402a80 // ld1h { z0.h, z8.h }, pn10.b/Z, [x20]\n"
"trn1 z29.h, z18.h, z16.h\n"
- "ld1sb { z16.s }, p0/Z, [x20]\n"
- "addvl x20, SP, #24\n"
+ "ld1sb { z16.s }, p0/Z, [x19]\n"
+ "addvl x19, SP, #24\n"
"add z16.h, p0/M, z16.h, z25.h\n"
".inst 0xc1687768 // sdot za.s[x11, 0], { z27.h-z28.h }, z8.h\n"
".inst 0xc1607769 // sdot za.s[x11, 1], { z27.h-z28.h }, z0.h\n"
- ".inst 0xa1402a80 // ld1h { z0.h, z8.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xa1402a60 // ld1h { z0.h, z8.h }, pn10.b/Z, [x19]\n"
"trn1 z30.h, z17.h, z16.h\n"
- "add x16, x16, %x[ld_in_col]\n"
- ".inst 0xa0412aa4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
+ "add x15, x15, %x[ld_in_col]\n"
+ ".inst 0xa0412a84 // ld1h { z4.h-z5.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
".inst 0xc168776a // sdot za.s[x11, 2], { z27.h-z28.h }, z8.h\n"
".inst 0xc160776b // sdot za.s[x11, 3], { z27.h-z28.h }, z0.h\n"
- ".inst 0xa0422aaa // ld1h { z10.h-z11.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
+ ".inst 0xa0422a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
".inst 0xc1657788 // sdot za.s[x11, 0], { z28.h-z29.h }, z5.h\n"
".inst 0xc1647789 // sdot za.s[x11, 1], { z28.h-z29.h }, z4.h\n"
- ".inst 0xa0412a84 // ld1h { z4.h-z5.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ ".inst 0xa0412a64 // ld1h { z4.h-z5.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
".inst 0xc165778a // sdot za.s[x11, 2], { z28.h-z29.h }, z5.h\n"
".inst 0xc164778b // sdot za.s[x11, 3], { z28.h-z29.h }, z4.h\n"
".inst 0xc16b77a8 // sdot za.s[x11, 0], { z29.h-z30.h }, z11.h\n"
".inst 0xc16a77a9 // sdot za.s[x11, 1], { z29.h-z30.h }, z10.h\n"
- ".inst 0xa0422a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+ ".inst 0xa0422a6a // ld1h { z10.h-z11.h }, pn10.b/Z, [x19, #0x4, MUL VL]\n"
".inst 0xc16b77aa // sdot za.s[x11, 2], { z29.h-z30.h }, z11.h\n"
".inst 0xc16a77ab // sdot za.s[x11, 3], { z29.h-z30.h }, z10.h\n"
"17:" // Padded: 2 priming loads
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z19.s }, p0/Z, [x16]\n"
+ "ld1sb { z19.s }, p0/Z, [x15]\n"
"add z19.h, p0/M, z19.h, z25.h\n"
- "add x20, x16, %x[ld_in_row]\n"
+ "add x19, x15, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z18.s }, p0/Z, [x20]\n"
+ "ld1sb { z18.s }, p0/Z, [x19]\n"
"add z18.h, p0/M, z18.h, z25.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1sb { z17.s }, p0/Z, [x20]\n"
+ "ld1sb { z17.s }, p0/Z, [x19]\n"
"add z17.h, p0/M, z17.h, z25.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1sb { z16.s }, p0/Z, [x20]\n"
+ "ld1sb { z16.s }, p0/Z, [x19]\n"
"add z16.h, p0/M, z16.h, z25.h\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"trn1 z27.h, z19.h, z18.h\n"
"trn1 z28.h, z17.h, z16.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z18.s }, p0/Z, [x20]\n"
+ "ld1sb { z18.s }, p0/Z, [x19]\n"
"add z18.h, p0/M, z18.h, z25.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z16.s }, p0/Z, [x20]\n"
+ "ld1sb { z16.s }, p0/Z, [x19]\n"
"add z16.h, p0/M, z16.h, z25.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1sb { z17.s }, p0/Z, [x20]\n"
- "addvl x22, SP, #12\n"
+ "ld1sb { z17.s }, p0/Z, [x19]\n"
+ "addvl x21, SP, #12\n"
"add z17.h, p0/M, z17.h, z25.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- ".inst 0xa1402ac0 // ld1h { z0.h, z8.h }, pn10.b/Z, [x22]\n"
+ ".inst 0xa1402aa0 // ld1h { z0.h, z8.h }, pn10.b/Z, [x21]\n"
"trn1 z29.h, z18.h, z16.h\n"
- "ld1sb { z16.s }, p0/Z, [x20]\n"
- "addvl x21, SP, #18\n"
+ "ld1sb { z16.s }, p0/Z, [x19]\n"
+ "addvl x20, SP, #18\n"
"add z16.h, p0/M, z16.h, z25.h\n"
".inst 0xc1687768 // sdot za.s[x11, 0], { z27.h-z28.h }, z8.h\n"
".inst 0xc1607769 // sdot za.s[x11, 1], { z27.h-z28.h }, z0.h\n"
- ".inst 0xa1402aa0 // ld1h { z0.h, z8.h }, pn10.b/Z, [x21]\n"
- "addvl x20, SP, #24\n"
+ ".inst 0xa1402a80 // ld1h { z0.h, z8.h }, pn10.b/Z, [x20]\n"
+ "addvl x19, SP, #24\n"
"trn1 z30.h, z17.h, z16.h\n"
- ".inst 0xa0412ac4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
+ ".inst 0xa0412aa4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
".inst 0xc168776a // sdot za.s[x11, 2], { z27.h-z28.h }, z8.h\n"
- "add x16, x16, %x[ld_in_col]\n"
+ "add x15, x15, %x[ld_in_col]\n"
".inst 0xc160776b // sdot za.s[x11, 3], { z27.h-z28.h }, z0.h\n"
- ".inst 0xa1402a80 // ld1h { z0.h, z8.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xa1402a60 // ld1h { z0.h, z8.h }, pn10.b/Z, [x19]\n"
".inst 0xc1657788 // sdot za.s[x11, 0], { z28.h-z29.h }, z5.h\n"
- ".inst 0xa0422aca // ld1h { z10.h-z11.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
+ ".inst 0xa0422aaa // ld1h { z10.h-z11.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
".inst 0xc1647789 // sdot za.s[x11, 1], { z28.h-z29.h }, z4.h\n"
- ".inst 0xa0412aa4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
+ ".inst 0xa0412a84 // ld1h { z4.h-z5.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
".inst 0xc168776c // sdot za.s[x11, 4], { z27.h-z28.h }, z8.h\n"
".inst 0xc160776d // sdot za.s[x11, 5], { z27.h-z28.h }, z0.h\n"
".inst 0xc165778a // sdot za.s[x11, 2], { z28.h-z29.h }, z5.h\n"
".inst 0xc164778b // sdot za.s[x11, 3], { z28.h-z29.h }, z4.h\n"
- ".inst 0xa0412a84 // ld1h { z4.h-z5.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ ".inst 0xa0412a64 // ld1h { z4.h-z5.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
".inst 0xc16b77a8 // sdot za.s[x11, 0], { z29.h-z30.h }, z11.h\n"
".inst 0xc16a77a9 // sdot za.s[x11, 1], { z29.h-z30.h }, z10.h\n"
- ".inst 0xa0422aaa // ld1h { z10.h-z11.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
+ ".inst 0xa0422a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
".inst 0xc165778c // sdot za.s[x11, 4], { z28.h-z29.h }, z5.h\n"
".inst 0xc164778d // sdot za.s[x11, 5], { z28.h-z29.h }, z4.h\n"
".inst 0xc16b77aa // sdot za.s[x11, 2], { z29.h-z30.h }, z11.h\n"
".inst 0xc16a77ab // sdot za.s[x11, 3], { z29.h-z30.h }, z10.h\n"
- ".inst 0xa0422a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+ ".inst 0xa0422a6a // ld1h { z10.h-z11.h }, pn10.b/Z, [x19, #0x4, MUL VL]\n"
".inst 0xc16b77ac // sdot za.s[x11, 4], { z29.h-z30.h }, z11.h\n"
".inst 0xc16a77ad // sdot za.s[x11, 5], { z29.h-z30.h }, z10.h\n"
"18:" // Padded: 1 priming loads
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z19.s }, p0/Z, [x16]\n"
+ "ld1sb { z19.s }, p0/Z, [x15]\n"
"add z19.h, p0/M, z19.h, z25.h\n"
- "add x20, x16, %x[ld_in_row]\n"
+ "add x19, x15, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z18.s }, p0/Z, [x20]\n"
+ "ld1sb { z18.s }, p0/Z, [x19]\n"
"add z18.h, p0/M, z18.h, z25.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1sb { z17.s }, p0/Z, [x20]\n"
+ "ld1sb { z17.s }, p0/Z, [x19]\n"
"add z17.h, p0/M, z17.h, z25.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1sb { z16.s }, p0/Z, [x20]\n"
+ "ld1sb { z16.s }, p0/Z, [x19]\n"
"add z16.h, p0/M, z16.h, z25.h\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"trn1 z27.h, z19.h, z18.h\n"
"trn1 z28.h, z17.h, z16.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z18.s }, p0/Z, [x20]\n"
+ "ld1sb { z18.s }, p0/Z, [x19]\n"
"add z18.h, p0/M, z18.h, z25.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z16.s }, p0/Z, [x20]\n"
+ "ld1sb { z16.s }, p0/Z, [x19]\n"
"add z16.h, p0/M, z16.h, z25.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1sb { z17.s }, p0/Z, [x20]\n"
- "addvl x23, SP, #6\n"
+ "ld1sb { z17.s }, p0/Z, [x19]\n"
+ "addvl x22, SP, #6\n"
"add z17.h, p0/M, z17.h, z25.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- ".inst 0xa1402ae0 // ld1h { z0.h, z8.h }, pn10.b/Z, [x23]\n"
+ ".inst 0xa1402ac0 // ld1h { z0.h, z8.h }, pn10.b/Z, [x22]\n"
"trn1 z29.h, z18.h, z16.h\n"
- "ld1sb { z16.s }, p0/Z, [x20]\n"
- "addvl x22, SP, #12\n"
+ "ld1sb { z16.s }, p0/Z, [x19]\n"
+ "addvl x21, SP, #12\n"
"add z16.h, p0/M, z16.h, z25.h\n"
".inst 0xc1687768 // sdot za.s[x11, 0], { z27.h-z28.h }, z8.h\n"
".inst 0xc1607769 // sdot za.s[x11, 1], { z27.h-z28.h }, z0.h\n"
- ".inst 0xa1402ac0 // ld1h { z0.h, z8.h }, pn10.b/Z, [x22]\n"
- "addvl x21, SP, #18\n"
+ ".inst 0xa1402aa0 // ld1h { z0.h, z8.h }, pn10.b/Z, [x21]\n"
+ "addvl x20, SP, #18\n"
"trn1 z30.h, z17.h, z16.h\n"
- ".inst 0xa0412ae4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x23, #0x2, MUL VL]\n"
+ ".inst 0xa0412ac4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
".inst 0xc168776a // sdot za.s[x11, 2], { z27.h-z28.h }, z8.h\n"
- "addvl x20, SP, #24\n"
- "add x16, x16, %x[ld_in_col]\n"
+ "addvl x19, SP, #24\n"
+ "add x15, x15, %x[ld_in_col]\n"
".inst 0xc160776b // sdot za.s[x11, 3], { z27.h-z28.h }, z0.h\n"
- ".inst 0xa1402aa0 // ld1h { z0.h, z8.h }, pn10.b/Z, [x21]\n"
+ ".inst 0xa1402a80 // ld1h { z0.h, z8.h }, pn10.b/Z, [x20]\n"
".inst 0xc1657788 // sdot za.s[x11, 0], { z28.h-z29.h }, z5.h\n"
- ".inst 0xa0422aea // ld1h { z10.h-z11.h }, pn10.b/Z, [x23, #0x4, MUL VL]\n"
+ ".inst 0xa0422aca // ld1h { z10.h-z11.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
".inst 0xc1647789 // sdot za.s[x11, 1], { z28.h-z29.h }, z4.h\n"
- ".inst 0xa0412ac4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
+ ".inst 0xa0412aa4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
".inst 0xc168776c // sdot za.s[x11, 4], { z27.h-z28.h }, z8.h\n"
".inst 0xc160776d // sdot za.s[x11, 5], { z27.h-z28.h }, z0.h\n"
- ".inst 0xa1402a80 // ld1h { z0.h, z8.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xa1402a60 // ld1h { z0.h, z8.h }, pn10.b/Z, [x19]\n"
".inst 0xc165778a // sdot za.s[x11, 2], { z28.h-z29.h }, z5.h\n"
".inst 0xc164778b // sdot za.s[x11, 3], { z28.h-z29.h }, z4.h\n"
- ".inst 0xa0412aa4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
+ ".inst 0xa0412a84 // ld1h { z4.h-z5.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
".inst 0xc16b77a8 // sdot za.s[x11, 0], { z29.h-z30.h }, z11.h\n"
".inst 0xc16a77a9 // sdot za.s[x11, 1], { z29.h-z30.h }, z10.h\n"
- ".inst 0xa0422aca // ld1h { z10.h-z11.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
+ ".inst 0xa0422aaa // ld1h { z10.h-z11.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
".inst 0xc168776e // sdot za.s[x11, 6], { z27.h-z28.h }, z8.h\n"
".inst 0xc160776f // sdot za.s[x11, 7], { z27.h-z28.h }, z0.h\n"
".inst 0xc165778c // sdot za.s[x11, 4], { z28.h-z29.h }, z5.h\n"
".inst 0xc164778d // sdot za.s[x11, 5], { z28.h-z29.h }, z4.h\n"
- ".inst 0xa0412a84 // ld1h { z4.h-z5.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ ".inst 0xa0412a64 // ld1h { z4.h-z5.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
".inst 0xc16b77aa // sdot za.s[x11, 2], { z29.h-z30.h }, z11.h\n"
".inst 0xc16a77ab // sdot za.s[x11, 3], { z29.h-z30.h }, z10.h\n"
- ".inst 0xa0422aaa // ld1h { z10.h-z11.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
+ ".inst 0xa0422a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
".inst 0xc165778e // sdot za.s[x11, 6], { z28.h-z29.h }, z5.h\n"
".inst 0xc164778f // sdot za.s[x11, 7], { z28.h-z29.h }, z4.h\n"
".inst 0xc16b77ac // sdot za.s[x11, 4], { z29.h-z30.h }, z11.h\n"
".inst 0xc16a77ad // sdot za.s[x11, 5], { z29.h-z30.h }, z10.h\n"
- ".inst 0xa0422a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+ ".inst 0xa0422a6a // ld1h { z10.h-z11.h }, pn10.b/Z, [x19, #0x4, MUL VL]\n"
".inst 0xc16b77ae // sdot za.s[x11, 6], { z29.h-z30.h }, z11.h\n"
".inst 0xc16a77af // sdot za.s[x11, 7], { z29.h-z30.h }, z10.h\n"
"19:" // Padded: 0 priming loads
".inst 0xa1402be0 // ld1h { z0.h, z8.h }, pn10.b/Z, [SP]\n"
".inst 0xa0412be4 // ld1h { z4.h-z5.h }, pn10.b/Z, [SP, #0x2, MUL VL]\n"
".inst 0xa0422bea // ld1h { z10.h-z11.h }, pn10.b/Z, [SP, #0x4, MUL VL]\n"
- "cbz x17, 22f\n"
+ "cbz x16, 22f\n"
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z19.s }, p0/Z, [x16]\n"
+ "ld1sb { z19.s }, p0/Z, [x15]\n"
"add z19.h, p0/M, z19.h, z25.h\n"
- "add x20, x16, %x[ld_in_row]\n"
+ "add x19, x15, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z18.s }, p0/Z, [x20]\n"
+ "ld1sb { z18.s }, p0/Z, [x19]\n"
"add z18.h, p0/M, z18.h, z25.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1sb { z17.s }, p0/Z, [x20]\n"
+ "ld1sb { z17.s }, p0/Z, [x19]\n"
"add z17.h, p0/M, z17.h, z25.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1sb { z16.s }, p0/Z, [x20]\n"
+ "ld1sb { z16.s }, p0/Z, [x19]\n"
"add z16.h, p0/M, z16.h, z25.h\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"trn1 z27.h, z19.h, z18.h\n"
"trn1 z28.h, z17.h, z16.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z19.s }, p0/Z, [x20]\n"
+ "ld1sb { z19.s }, p0/Z, [x19]\n"
"add z19.h, p0/M, z19.h, z25.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z18.s }, p0/Z, [x20]\n"
+ "ld1sb { z18.s }, p0/Z, [x19]\n"
"add z18.h, p0/M, z18.h, z25.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1sb { z17.s }, p0/Z, [x20]\n"
+ "ld1sb { z17.s }, p0/Z, [x19]\n"
"add z17.h, p0/M, z17.h, z25.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1sb { z16.s }, p0/Z, [x20]\n"
+ "ld1sb { z16.s }, p0/Z, [x19]\n"
"add z16.h, p0/M, z16.h, z25.h\n"
- "sub x17, x17, #0x1\n"
- "sub x15, x15, #0x1\n"
- "cmp x17, x15\n"
+ "sub x16, x16, #0x1\n"
+ "sub x14, x14, #0x1\n"
+ "cmp x16, x14\n"
"trn1 z29.h, z19.h, z18.h\n"
"trn1 z30.h, z17.h, z16.h\n"
- "csel x25, x17, x15, LT\n"
- "add x16, x16, %x[ld_in_col]\n"
- "sub x15, x15, x25\n"
- "cbz x25, 21f\n"
+ "csel x24, x16, x14, LT\n"
+ "add x15, x15, %x[ld_in_col]\n"
+ "sub x14, x14, x24\n"
+ "cbz x24, 21f\n"
"20:" // Padded: Main loop
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z23.s }, p0/Z, [x16]\n"
+ "ld1sb { z23.s }, p0/Z, [x15]\n"
"add z23.h, p0/M, z23.h, z25.h\n"
- "add x24, x16, %x[ld_in_row]\n"
+ "add x23, x15, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z22.s }, p0/Z, [x24]\n"
+ "ld1sb { z22.s }, p0/Z, [x23]\n"
".inst 0xc1687768 // sdot za.s[x11, 0], { z27.h-z28.h }, z8.h\n"
- "addvl x23, SP, #6\n"
+ "addvl x22, SP, #6\n"
".inst 0xc1607769 // sdot za.s[x11, 1], { z27.h-z28.h }, z0.h\n"
- ".inst 0xa1402ae0 // ld1h { z0.h, z8.h }, pn10.b/Z, [x23]\n"
- "addvl x22, SP, #12\n"
+ ".inst 0xa1402ac0 // ld1h { z0.h, z8.h }, pn10.b/Z, [x22]\n"
+ "addvl x21, SP, #12\n"
"add z22.h, p0/M, z22.h, z25.h\n"
- "add x24, x24, %x[ld_in_row]\n"
+ "add x23, x23, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
".inst 0xc168776a // sdot za.s[x11, 2], { z27.h-z28.h }, z8.h\n"
".inst 0xc160776b // sdot za.s[x11, 3], { z27.h-z28.h }, z0.h\n"
- ".inst 0xa1402ac0 // ld1h { z0.h, z8.h }, pn10.b/Z, [x22]\n"
- "addvl x21, SP, #18\n"
- "addvl x20, SP, #24\n"
- "ld1sb { z21.s }, p0/Z, [x24]\n"
+ ".inst 0xa1402aa0 // ld1h { z0.h, z8.h }, pn10.b/Z, [x21]\n"
+ "addvl x20, SP, #18\n"
+ "addvl x19, SP, #24\n"
+ "ld1sb { z21.s }, p0/Z, [x23]\n"
".inst 0xc1657788 // sdot za.s[x11, 0], { z28.h-z29.h }, z5.h\n"
"add z21.h, p0/M, z21.h, z25.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
".inst 0xc1647789 // sdot za.s[x11, 1], { z28.h-z29.h }, z4.h\n"
- ".inst 0xa0412ae4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x23, #0x2, MUL VL]\n"
+ ".inst 0xa0412ac4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
"mov x12, #0x4\n"
- "add x24, x24, %x[ld_in_row]\n"
+ "add x23, x23, %x[ld_in_row]\n"
".inst 0xc168776c // sdot za.s[x11, 4], { z27.h-z28.h }, z8.h\n"
- "ld1sb { z20.s }, p0/Z, [x24]\n"
+ "ld1sb { z20.s }, p0/Z, [x23]\n"
"add z20.h, p0/M, z20.h, z25.h\n"
- "add x24, x24, %x[ld_in_row]\n"
+ "add x23, x23, %x[ld_in_row]\n"
".inst 0xc160776d // sdot za.s[x11, 5], { z27.h-z28.h }, z0.h\n"
- ".inst 0xa1402aa0 // ld1h { z0.h, z8.h }, pn10.b/Z, [x21]\n"
+ ".inst 0xa1402a80 // ld1h { z0.h, z8.h }, pn10.b/Z, [x20]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "subs x25, x25, #0x1\n"
+ "subs x24, x24, #0x1\n"
".inst 0xc165778a // sdot za.s[x11, 2], { z28.h-z29.h }, z5.h\n"
- "ld1sb { z19.s }, p0/Z, [x24]\n"
+ "ld1sb { z19.s }, p0/Z, [x23]\n"
"add z19.h, p0/M, z19.h, z25.h\n"
- "add x24, x24, %x[ld_in_row]\n"
+ "add x23, x23, %x[ld_in_row]\n"
".inst 0xc164778b // sdot za.s[x11, 3], { z28.h-z29.h }, z4.h\n"
- ".inst 0xa0412ac4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
+ ".inst 0xa0412aa4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "add x16, x16, %x[ld_in_col]\n"
+ "add x15, x15, %x[ld_in_col]\n"
".inst 0xc16b77a8 // sdot za.s[x11, 0], { z29.h-z30.h }, z11.h\n"
- "ld1sb { z18.s }, p0/Z, [x24]\n"
+ "ld1sb { z18.s }, p0/Z, [x23]\n"
"add z18.h, p0/M, z18.h, z25.h\n"
- "add x24, x24, %x[ld_in_row]\n"
+ "add x23, x23, %x[ld_in_row]\n"
".inst 0xc16a77a9 // sdot za.s[x11, 1], { z29.h-z30.h }, z10.h\n"
- ".inst 0xa0422aea // ld1h { z10.h-z11.h }, pn10.b/Z, [x23, #0x4, MUL VL]\n"
+ ".inst 0xa0422aca // ld1h { z10.h-z11.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
".inst 0xc168776e // sdot za.s[x11, 6], { z27.h-z28.h }, z8.h\n"
- "ld1sb { z17.s }, p0/Z, [x24]\n"
+ "ld1sb { z17.s }, p0/Z, [x23]\n"
"add z17.h, p0/M, z17.h, z25.h\n"
- "add x24, x24, %x[ld_in_row]\n"
+ "add x23, x23, %x[ld_in_row]\n"
".inst 0xc160776f // sdot za.s[x11, 7], { z27.h-z28.h }, z0.h\n"
- ".inst 0xa1402a80 // ld1h { z0.h, z8.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xa1402a60 // ld1h { z0.h, z8.h }, pn10.b/Z, [x19]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
".inst 0xc165778c // sdot za.s[x11, 4], { z28.h-z29.h }, z5.h\n"
- "ld1sb { z16.s }, p0/Z, [x24]\n"
+ "ld1sb { z16.s }, p0/Z, [x23]\n"
"add z16.h, p0/M, z16.h, z25.h\n"
".inst 0xc164778d // sdot za.s[x11, 5], { z28.h-z29.h }, z4.h\n"
- ".inst 0xa0412aa4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
+ ".inst 0xa0412a84 // ld1h { z4.h-z5.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
".inst 0xc16b77aa // sdot za.s[x11, 2], { z29.h-z30.h }, z11.h\n"
".inst 0xc16a77ab // sdot za.s[x11, 3], { z29.h-z30.h }, z10.h\n"
- ".inst 0xa0422aca // ld1h { z10.h-z11.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
+ ".inst 0xa0422aaa // ld1h { z10.h-z11.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
".inst 0xc165778e // sdot za.s[x11, 6], { z28.h-z29.h }, z5.h\n"
".inst 0xc164778f // sdot za.s[x11, 7], { z28.h-z29.h }, z4.h\n"
- ".inst 0xa0412a84 // ld1h { z4.h-z5.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ ".inst 0xa0412a64 // ld1h { z4.h-z5.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
".inst 0xc16b77ac // sdot za.s[x11, 4], { z29.h-z30.h }, z11.h\n"
".inst 0xc16a77ad // sdot za.s[x11, 5], { z29.h-z30.h }, z10.h\n"
- ".inst 0xa0422aaa // ld1h { z10.h-z11.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
+ ".inst 0xa0422a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
".inst 0xc16b77ae // sdot za.s[x11, 6], { z29.h-z30.h }, z11.h\n"
".inst 0xc16a77af // sdot za.s[x11, 7], { z29.h-z30.h }, z10.h\n"
- ".inst 0xa0422a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+ ".inst 0xa0422a6a // ld1h { z10.h-z11.h }, pn10.b/Z, [x19, #0x4, MUL VL]\n"
".inst 0xc1681768 // sdot za.s[x8, 0], { z27.h-z28.h }, z8.h\n"
".inst 0xc1601769 // sdot za.s[x8, 1], { z27.h-z28.h }, z0.h\n"
".inst 0xa1402be0 // ld1h { z0.h, z8.h }, pn10.b/Z, [SP]\n"
@@ -1069,56 +1069,56 @@ void sme2_s8q_planar_5x5_s1_4rows_dot_za_impl(
".inst 0xc1a9ab0c // add { z12.s-z15.s }, { z12.s-z15.s }, z9.s\n"
".inst 0xc00408c1 // mova za.d[x8, #1], { z6.d-z7.d }\n"
".inst 0xc1bfcf0c // sclamp { z12.s-z15.s }, z24.s, z31.s\n"
- "st1b { z12.s }, p1, [x14]\n"
- "add x14, x14, x3\n"
- "st1b { z14.s }, p1, [x13]\n"
+ "st1b { z12.s }, p1, [x13]\n"
"add x13, x13, x10\n"
- "st1b { z13.s }, p1, [x9]\n"
- "add x9, x9, x27\n"
- "st1b { z15.s }, p1, [x28]\n"
+ "st1b { z14.s }, p1, [x4]\n"
+ "add x4, x4, x9\n"
+ "st1b { z13.s }, p1, [x28]\n"
"add x28, x28, x26\n"
+ "st1b { z15.s }, p1, [x27]\n"
+ "add x27, x27, x25\n"
"bgt 20b\n"
"21:" // Main loop tail
- "addvl x23, SP, #6\n"
+ "addvl x22, SP, #6\n"
".inst 0xc1687768 // sdot za.s[x11, 0], { z27.h-z28.h }, z8.h\n"
- "addvl x22, SP, #12\n"
+ "addvl x21, SP, #12\n"
".inst 0xc1607769 // sdot za.s[x11, 1], { z27.h-z28.h }, z0.h\n"
- ".inst 0xa1402ae0 // ld1h { z0.h, z8.h }, pn10.b/Z, [x23]\n"
- "addvl x21, SP, #18\n"
- "addvl x20, SP, #24\n"
+ ".inst 0xa1402ac0 // ld1h { z0.h, z8.h }, pn10.b/Z, [x22]\n"
+ "addvl x20, SP, #18\n"
+ "addvl x19, SP, #24\n"
".inst 0xc168776a // sdot za.s[x11, 2], { z27.h-z28.h }, z8.h\n"
".inst 0xc160776b // sdot za.s[x11, 3], { z27.h-z28.h }, z0.h\n"
- ".inst 0xa1402ac0 // ld1h { z0.h, z8.h }, pn10.b/Z, [x22]\n"
+ ".inst 0xa1402aa0 // ld1h { z0.h, z8.h }, pn10.b/Z, [x21]\n"
".inst 0xc1657788 // sdot za.s[x11, 0], { z28.h-z29.h }, z5.h\n"
".inst 0xc1647789 // sdot za.s[x11, 1], { z28.h-z29.h }, z4.h\n"
- ".inst 0xa0412ae4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x23, #0x2, MUL VL]\n"
+ ".inst 0xa0412ac4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
".inst 0xc168776c // sdot za.s[x11, 4], { z27.h-z28.h }, z8.h\n"
".inst 0xc160776d // sdot za.s[x11, 5], { z27.h-z28.h }, z0.h\n"
- ".inst 0xa1402aa0 // ld1h { z0.h, z8.h }, pn10.b/Z, [x21]\n"
+ ".inst 0xa1402a80 // ld1h { z0.h, z8.h }, pn10.b/Z, [x20]\n"
".inst 0xc165778a // sdot za.s[x11, 2], { z28.h-z29.h }, z5.h\n"
".inst 0xc164778b // sdot za.s[x11, 3], { z28.h-z29.h }, z4.h\n"
- ".inst 0xa0412ac4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
+ ".inst 0xa0412aa4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
".inst 0xc16b77a8 // sdot za.s[x11, 0], { z29.h-z30.h }, z11.h\n"
".inst 0xc16a77a9 // sdot za.s[x11, 1], { z29.h-z30.h }, z10.h\n"
- ".inst 0xa0422aea // ld1h { z10.h-z11.h }, pn10.b/Z, [x23, #0x4, MUL VL]\n"
+ ".inst 0xa0422aca // ld1h { z10.h-z11.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
".inst 0xc168776e // sdot za.s[x11, 6], { z27.h-z28.h }, z8.h\n"
".inst 0xc160776f // sdot za.s[x11, 7], { z27.h-z28.h }, z0.h\n"
- ".inst 0xa1402a80 // ld1h { z0.h, z8.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xa1402a60 // ld1h { z0.h, z8.h }, pn10.b/Z, [x19]\n"
".inst 0xc165778c // sdot za.s[x11, 4], { z28.h-z29.h }, z5.h\n"
".inst 0xc164778d // sdot za.s[x11, 5], { z28.h-z29.h }, z4.h\n"
- ".inst 0xa0412aa4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
+ ".inst 0xa0412a84 // ld1h { z4.h-z5.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
".inst 0xc16b77aa // sdot za.s[x11, 2], { z29.h-z30.h }, z11.h\n"
".inst 0xc16a77ab // sdot za.s[x11, 3], { z29.h-z30.h }, z10.h\n"
- ".inst 0xa0422aca // ld1h { z10.h-z11.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
+ ".inst 0xa0422aaa // ld1h { z10.h-z11.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
".inst 0xc165778e // sdot za.s[x11, 6], { z28.h-z29.h }, z5.h\n"
".inst 0xc164778f // sdot za.s[x11, 7], { z28.h-z29.h }, z4.h\n"
- ".inst 0xa0412a84 // ld1h { z4.h-z5.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ ".inst 0xa0412a64 // ld1h { z4.h-z5.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
".inst 0xc16b77ac // sdot za.s[x11, 4], { z29.h-z30.h }, z11.h\n"
".inst 0xc16a77ad // sdot za.s[x11, 5], { z29.h-z30.h }, z10.h\n"
- ".inst 0xa0422aaa // ld1h { z10.h-z11.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
+ ".inst 0xa0422a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
".inst 0xc16b77ae // sdot za.s[x11, 6], { z29.h-z30.h }, z11.h\n"
".inst 0xc16a77af // sdot za.s[x11, 7], { z29.h-z30.h }, z10.h\n"
- ".inst 0xa0422a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+ ".inst 0xa0422a6a // ld1h { z10.h-z11.h }, pn10.b/Z, [x19, #0x4, MUL VL]\n"
".inst 0xc1681768 // sdot za.s[x8, 0], { z27.h-z28.h }, z8.h\n"
".inst 0xc1601769 // sdot za.s[x8, 1], { z27.h-z28.h }, z0.h\n"
".inst 0xc1651788 // sdot za.s[x8, 0], { z28.h-z29.h }, z5.h\n"
@@ -1135,20 +1135,20 @@ void sme2_s8q_planar_5x5_s1_4rows_dot_za_impl(
".inst 0xc1a9ab0c // add { z12.s-z15.s }, { z12.s-z15.s }, z9.s\n"
".inst 0xc00408c1 // mova za.d[x8, #1], { z6.d-z7.d }\n"
".inst 0xc1bfcf0c // sclamp { z12.s-z15.s }, z24.s, z31.s\n"
- "st1b { z12.s }, p1, [x14]\n"
- "add x14, x14, x3\n"
- "st1b { z14.s }, p1, [x13]\n"
+ "st1b { z12.s }, p1, [x13]\n"
"add x13, x13, x10\n"
- "st1b { z13.s }, p1, [x9]\n"
- "add x9, x9, x27\n"
- "st1b { z15.s }, p1, [x28]\n"
+ "st1b { z14.s }, p1, [x4]\n"
+ "add x4, x4, x9\n"
+ "st1b { z13.s }, p1, [x28]\n"
"add x28, x28, x26\n"
+ "st1b { z15.s }, p1, [x27]\n"
+ "add x27, x27, x25\n"
"22:" // Main loop skip tail
- "cbz x15, 24f\n"
+ "cbz x14, 24f\n"
"23:" // Right padding loop
".inst 0xc006680c // mova { z12.d-z13.d }, za.d[x11, #0]\n"
"add x8, x8, #0x2\n"
- "subs x15, x15, #0x1\n"
+ "subs x14, x14, #0x1\n"
".inst 0xc006682e // mova { z14.d-z15.d }, za.d[x11, #1]\n"
".inst 0xc1a3ac0c // sqdmulh { z12.s-z15.s }, { z12.s-z15.s }, z3.s\n"
"add x11, x11, #0x2\n"
@@ -1157,44 +1157,44 @@ void sme2_s8q_planar_5x5_s1_4rows_dot_za_impl(
".inst 0xc1a9ab0c // add { z12.s-z15.s }, { z12.s-z15.s }, z9.s\n"
".inst 0xc00408c1 // mova za.d[x8, #1], { z6.d-z7.d }\n"
".inst 0xc1bfcf0c // sclamp { z12.s-z15.s }, z24.s, z31.s\n"
- "st1b { z12.s }, p1, [x14]\n"
- "add x14, x14, x3\n"
- "st1b { z14.s }, p1, [x13]\n"
+ "st1b { z12.s }, p1, [x13]\n"
"add x13, x13, x10\n"
- "st1b { z13.s }, p1, [x9]\n"
- "add x9, x9, x27\n"
- "st1b { z15.s }, p1, [x28]\n"
+ "st1b { z14.s }, p1, [x4]\n"
+ "add x4, x4, x9\n"
+ "st1b { z13.s }, p1, [x28]\n"
"add x28, x28, x26\n"
+ "st1b { z15.s }, p1, [x27]\n"
+ "add x27, x27, x25\n"
"bgt 23b\n"
"24:" // End
"ldr x23, [%x[args], %[offsetof_Args_weights]]\n"
"incw x23, ALL, MUL #16\n"
"incw x23, ALL, MUL #9\n"
"str x23, [%x[args], %[offsetof_Args_weights]]\n"
- "ldr x20, [%x[args], %[offsetof_Args_ld_in_vl]]\n"
- "incw x5\n"
- "whilelt p1.s, x5, x7\n"
- "ldr x16, [%x[args], %[offsetof_Args_inptr]]\n"
- "add x16, x16, x20\n"
- "str x16, [%x[args], %[offsetof_Args_inptr]]\n"
- "ldr x25, [%x[args], %[offsetof_Args_outptrs]]\n"
- "ldr x24, [%x[args], %[offsetof_Args_ld_out_vls]]\n"
- "ldp x23, x22, [x25, #0x0]\n"
- "ldp x21, x20, [x24, #0x0]\n"
- "add x23, x23, x21\n"
+ "ldr x19, [%x[args], %[offsetof_Args_ld_in_vl]]\n"
+ "incw x17\n"
+ "whilelt p1.s, x17, x7\n"
+ "ldr x15, [%x[args], %[offsetof_Args_inptr]]\n"
+ "add x15, x15, x19\n"
+ "str x15, [%x[args], %[offsetof_Args_inptr]]\n"
+ "ldr x24, [%x[args], %[offsetof_Args_outptrs]]\n"
+ "ldr x23, [%x[args], %[offsetof_Args_ld_out_vls]]\n"
+ "ldp x22, x21, [x24, #0x0]\n"
+ "ldp x20, x19, [x23, #0x0]\n"
"add x22, x22, x20\n"
- "stp x23, x22, [x25, #0x0]\n"
- "ldp x23, x22, [x25, #0x10]\n"
- "ldp x21, x20, [x24, #0x10]\n"
- "add x23, x23, x21\n"
+ "add x21, x21, x19\n"
+ "stp x22, x21, [x24, #0x0]\n"
+ "ldp x22, x21, [x24, #0x10]\n"
+ "ldp x20, x19, [x23, #0x10]\n"
"add x22, x22, x20\n"
- "stp x23, x22, [x25, #0x10]\n"
+ "add x21, x21, x19\n"
+ "stp x22, x21, [x24, #0x10]\n"
"b.any 1b\n"
"addvl SP, SP, #30\n"
".inst 0xd503467f // SMSTOP\n"
:
: [args] "r" (&args), [ld_in_col] "r" (ld_in_col), [ld_in_row] "r" (ld_in_row), [offsetof_Args_current_channel] "I" (offsetof(Args, current_channel)), [offsetof_Args_inptr] "I" (offsetof(Args, inptr)), [offsetof_Args_input_cols] "I" (offsetof(Args, input_cols)), [offsetof_Args_ld_in_vl] "I" (offsetof(Args, ld_in_vl)), [offsetof_Args_ld_out_cols] "I" (offsetof(Args, ld_out_cols)), [offsetof_Args_ld_out_vls] "I" (offsetof(Args, ld_out_vls)), [offsetof_Args_n_channels] "I" (offsetof(Args, n_channels)), [offsetof_Args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_Args_output_cols] "I" (offsetof(Args, output_cols)), [offsetof_Args_pad_bottom] "I" (offsetof(Args, pad_bottom)), [offsetof_Args_pad_left] "I" (offsetof(Args, pad_left)), [offsetof_Args_pad_top] "I" (offsetof(Args, pad_top)), [offsetof_Args_weights] "I" (offsetof(Args, weights)), [offsetof_Requantize32_a_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_bias] "I" (offsetof(arm_gemm::Requantize32, bias)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [offsetof_Requantize32_per_channel_muls] "I" (offsetof(arm_gemm::Requantize32, per_channel_muls)), [offsetof_Requantize32_per_channel_right_shifts] "I" (offsetof(arm_gemm::Requantize32, per_channel_right_shifts)), [offsetof_Requantize32_per_layer_mul] "I" (offsetof(arm_gemm::Requantize32, per_layer_mul)), [offsetof_Requantize32_per_layer_right_shift] "I" (offsetof(arm_gemm::Requantize32, per_layer_right_shift)), [qp] "r" (&qp)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_s8q_planar_5x5_s2_4rows_dot_za/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_s8q_planar_5x5_s2_4rows_dot_za/generic.cpp
index 3e8510392f..81829b5f4e 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_s8q_planar_5x5_s2_4rows_dot_za/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_s8q_planar_5x5_s2_4rows_dot_za/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -69,20 +69,20 @@ void sme2_s8q_planar_5x5_s2_4rows_dot_za_impl(
__asm__ __volatile__(
".inst 0xd503477f // SMSTART ZA\n"
- "ldr x3, [%x[args], %[offsetof_Args_pad_bottom]]\n"
+ "ldr x4, [%x[args], %[offsetof_Args_pad_bottom]]\n"
"ptrue p2.b\n"
- "mov x20, #0xb\n"
- "ldr x4, [%x[args], %[offsetof_Args_pad_top]]\n"
+ "mov x19, #0xb\n"
+ "ldr x5, [%x[args], %[offsetof_Args_pad_top]]\n"
"ld1rh { z9.h }, p2/Z, [%x[qp], %[offsetof_Requantize32_a_offset]]\n"
- "sub x20, x20, x3\n"
+ "sub x19, x19, x4\n"
".inst 0x25207812 // ptrue pn10.b\n"
- "ldr x5, [%x[args], %[offsetof_Args_n_channels]]\n"
- "whilelt p1.s, XZR, x5\n"
- "whilelt p9.s, XZR, x20\n"
+ "ldr x6, [%x[args], %[offsetof_Args_n_channels]]\n"
+ "whilelt p1.s, XZR, x6\n"
+ "whilelt p9.s, XZR, x19\n"
"ld1rw { z8.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_c_offset]]\n"
- "whilelt p8.s, XZR, x4\n"
+ "whilelt p8.s, XZR, x5\n"
"addvl SP, SP, #-15\n"
- "ldr x6, [%x[args], %[offsetof_Args_current_channel]]\n"
+ "ldr x7, [%x[args], %[offsetof_Args_current_channel]]\n"
"neg z9.h, p2/M, z9.h\n"
"eor p8.b, p2/Z, p8.b, p9.b\n"
"ld1rw { z3.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_per_layer_mul]]\n"
@@ -90,227 +90,310 @@ void sme2_s8q_planar_5x5_s2_4rows_dot_za_impl(
"ld1rw { z26.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_minval]]\n"
"ld1rw { z23.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_maxval]]\n"
"1:" // Channel loop
- "ldr x20, [%x[qp], %[offsetof_Requantize32_bias]]\n"
+ "ldr x19, [%x[qp], %[offsetof_Requantize32_bias]]\n"
"mov z28.s, #0x0\n"
- "cbz x20, 2f\n"
- "ld1w { z28.s }, p1/Z, [x20, x6, LSL #2]\n"
+ "cbz x19, 2f\n"
+ "ld1w { z28.s }, p1/Z, [x19, x7, LSL #2]\n"
"2:" // Load bias: Done
- "ldr x22, [%x[args], %[offsetof_Args_weights]]\n"
- "mov x20, x22\n"
- "ld1sb { z12.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #5\n"
+ "ldr x21, [%x[args], %[offsetof_Args_weights]]\n"
+ "mov x19, x21\n"
+ "ld1sb { z12.s }, p2/Z, [x19]\n"
+ "incw x19, ALL, MUL #5\n"
"ld1rh { z18.h }, p2/Z, [%x[qp], %[offsetof_Requantize32_b_offset]]\n"
"sub z12.h, z12.h, z18.h\n"
- "incw x22\n"
+ "incw x21\n"
"mov z14.h, #0x0\n"
- "ld1sb { z25.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #5\n"
+ "ld1sb { z25.s }, p2/Z, [x19]\n"
+ "incw x19, ALL, MUL #5\n"
"sub z25.h, z25.h, z18.h\n"
"trn1 z2.h, z12.h, z25.h\n"
- "ld1sb { z24.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #5\n"
+ "ld1sb { z24.s }, p2/Z, [x19]\n"
+ "incw x19, ALL, MUL #5\n"
"sub z24.h, z24.h, z18.h\n"
- "addvl x21, SP, #15\n"
- "ld1sb { z17.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #5\n"
+ "addvl x20, SP, #15\n"
+ "ld1sb { z17.s }, p2/Z, [x19]\n"
+ "incw x19, ALL, MUL #5\n"
"sub z17.h, z17.h, z18.h\n"
"trn1 z10.h, z24.h, z17.h\n"
- "ld1sb { z16.s }, p2/Z, [x20]\n"
- "mov x20, x22\n"
+ "ld1sb { z16.s }, p2/Z, [x19]\n"
+ "mov x19, x21\n"
"sub z16.h, z16.h, z18.h\n"
- "incw x22\n"
- "ld1sb { z12.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #5\n"
+ "incw x21\n"
+ "ld1sb { z12.s }, p2/Z, [x19]\n"
+ "incw x19, ALL, MUL #5\n"
"sub z12.h, z12.h, z18.h\n"
- "addvl x21, x21, #-3\n"
- "ld1sb { z25.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #5\n"
+ "addvl x20, x20, #-3\n"
+ "ld1sb { z25.s }, p2/Z, [x19]\n"
+ "incw x19, ALL, MUL #5\n"
"sub z25.h, z25.h, z18.h\n"
"trn1 z0.h, z16.h, z14.h\n"
- "ld1sb { z24.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #5\n"
+ "ld1sb { z24.s }, p2/Z, [x19]\n"
+ "incw x19, ALL, MUL #5\n"
"sub z24.h, z24.h, z18.h\n"
- "st1h { z2.h }, p2, [x21]\n"
- "ld1sb { z17.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #5\n"
+ "st1h { z2.h }, p2, [x20]\n"
+ "ld1sb { z17.s }, p2/Z, [x19]\n"
+ "incw x19, ALL, MUL #5\n"
"sub z17.h, z17.h, z18.h\n"
"trn1 z2.h, z12.h, z25.h\n"
- "ld1sb { z16.s }, p2/Z, [x20]\n"
- "mov x20, x22\n"
- "st1h { z10.h }, p2, [x21, #1, MUL VL]\n"
+ "ld1sb { z16.s }, p2/Z, [x19]\n"
+ "mov x19, x21\n"
+ "st1h { z10.h }, p2, [x20, #1, MUL VL]\n"
"sub z16.h, z16.h, z18.h\n"
- "ld1sb { z12.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #5\n"
+ "ld1sb { z12.s }, p2/Z, [x19]\n"
+ "incw x19, ALL, MUL #5\n"
"trn1 z10.h, z24.h, z17.h\n"
"sub z12.h, z12.h, z18.h\n"
- "ld1sb { z25.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #5\n"
+ "ld1sb { z25.s }, p2/Z, [x19]\n"
+ "incw x19, ALL, MUL #5\n"
"sub z25.h, z25.h, z18.h\n"
- "st1h { z0.h }, p2, [x21, #2, MUL VL]\n"
- "ld1sb { z24.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #5\n"
+ "st1h { z0.h }, p2, [x20, #2, MUL VL]\n"
+ "ld1sb { z24.s }, p2/Z, [x19]\n"
+ "incw x19, ALL, MUL #5\n"
"trn1 z0.h, z16.h, z14.h\n"
- "incw x22\n"
- "ld1sb { z17.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #5\n"
+ "incw x21\n"
+ "ld1sb { z17.s }, p2/Z, [x19]\n"
+ "incw x19, ALL, MUL #5\n"
"sub z24.h, z24.h, z18.h\n"
"sub z17.h, z17.h, z18.h\n"
- "ld1sb { z16.s }, p2/Z, [x20]\n"
- "addvl x21, x21, #-3\n"
- "mov x20, x22\n"
- "st1h { z2.h }, p2, [x21]\n"
+ "ld1sb { z16.s }, p2/Z, [x19]\n"
+ "addvl x20, x20, #-3\n"
+ "mov x19, x21\n"
+ "st1h { z2.h }, p2, [x20]\n"
"trn1 z2.h, z12.h, z25.h\n"
- "ld1sb { z12.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #5\n"
+ "ld1sb { z12.s }, p2/Z, [x19]\n"
+ "incw x19, ALL, MUL #5\n"
"sub z16.h, z16.h, z18.h\n"
- "ld1sb { z25.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #5\n"
- "st1h { z10.h }, p2, [x21, #1, MUL VL]\n"
+ "ld1sb { z25.s }, p2/Z, [x19]\n"
+ "incw x19, ALL, MUL #5\n"
+ "st1h { z10.h }, p2, [x20, #1, MUL VL]\n"
"trn1 z10.h, z24.h, z17.h\n"
- "ld1sb { z24.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #5\n"
+ "ld1sb { z24.s }, p2/Z, [x19]\n"
+ "incw x19, ALL, MUL #5\n"
"sub z12.h, z12.h, z18.h\n"
"sub z25.h, z25.h, z18.h\n"
- "ld1sb { z17.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #5\n"
- "st1h { z0.h }, p2, [x21, #2, MUL VL]\n"
+ "ld1sb { z17.s }, p2/Z, [x19]\n"
+ "incw x19, ALL, MUL #5\n"
+ "st1h { z0.h }, p2, [x20, #2, MUL VL]\n"
"trn1 z0.h, z16.h, z14.h\n"
- "ld1sb { z16.s }, p2/Z, [x20]\n"
- "incw x22\n"
+ "ld1sb { z16.s }, p2/Z, [x19]\n"
+ "incw x21\n"
"sub z24.h, z24.h, z18.h\n"
"sub z17.h, z17.h, z18.h\n"
- "addvl x21, x21, #-3\n"
- "mov x20, x22\n"
- "st1h { z2.h }, p2, [x21]\n"
+ "addvl x20, x20, #-3\n"
+ "mov x19, x21\n"
+ "st1h { z2.h }, p2, [x20]\n"
"sub z16.h, z16.h, z18.h\n"
"trn1 z2.h, z12.h, z25.h\n"
- "ld1sb { z12.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #5\n"
- "st1h { z10.h }, p2, [x21, #1, MUL VL]\n"
- "ld1sb { z25.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #5\n"
+ "ld1sb { z12.s }, p2/Z, [x19]\n"
+ "incw x19, ALL, MUL #5\n"
+ "st1h { z10.h }, p2, [x20, #1, MUL VL]\n"
+ "ld1sb { z25.s }, p2/Z, [x19]\n"
+ "incw x19, ALL, MUL #5\n"
"trn1 z10.h, z24.h, z17.h\n"
- "st1h { z0.h }, p2, [x21, #2, MUL VL]\n"
- "ld1sb { z24.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #5\n"
+ "st1h { z0.h }, p2, [x20, #2, MUL VL]\n"
+ "ld1sb { z24.s }, p2/Z, [x19]\n"
+ "incw x19, ALL, MUL #5\n"
"trn1 z0.h, z16.h, z14.h\n"
"sub z12.h, z12.h, z18.h\n"
- "ld1sb { z17.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #5\n"
+ "ld1sb { z17.s }, p2/Z, [x19]\n"
+ "incw x19, ALL, MUL #5\n"
"sub z25.h, z25.h, z18.h\n"
"sub z24.h, z24.h, z18.h\n"
- "ld1sb { z16.s }, p2/Z, [x20]\n"
+ "ld1sb { z16.s }, p2/Z, [x19]\n"
"sub z17.h, z17.h, z18.h\n"
"sub z16.h, z16.h, z18.h\n"
- "ldr x20, [%x[qp], %[offsetof_Requantize32_per_channel_muls]]\n"
- "addvl x21, x21, #-3\n"
- "st1h { z2.h }, p2, [x21]\n"
+ "ldr x19, [%x[qp], %[offsetof_Requantize32_per_channel_muls]]\n"
+ "addvl x20, x20, #-3\n"
+ "st1h { z2.h }, p2, [x20]\n"
"mov z29.d, z28.d\n"
"mov z30.d, z28.d\n"
- "st1h { z10.h }, p2, [x21, #1, MUL VL]\n"
+ "st1h { z10.h }, p2, [x20, #1, MUL VL]\n"
"mov z31.d, z28.d\n"
"trn1 z2.h, z12.h, z25.h\n"
- "st1h { z0.h }, p2, [x21, #2, MUL VL]\n"
- "addvl x21, x21, #-3\n"
+ "st1h { z0.h }, p2, [x20, #2, MUL VL]\n"
+ "addvl x20, x20, #-3\n"
"trn1 z10.h, z24.h, z17.h\n"
"trn1 z0.h, z16.h, z14.h\n"
- "st1h { z2.h }, p2, [x21]\n"
- "st1h { z10.h }, p2, [x21, #1, MUL VL]\n"
- "st1h { z0.h }, p2, [x21, #2, MUL VL]\n"
- "cbz x20, 3f\n"
- "ld1w { z3.s }, p1/Z, [x20, x6, LSL #2]\n"
+ "st1h { z2.h }, p2, [x20]\n"
+ "st1h { z10.h }, p2, [x20, #1, MUL VL]\n"
+ "st1h { z0.h }, p2, [x20, #2, MUL VL]\n"
+ "cbz x19, 3f\n"
+ "ld1w { z3.s }, p1/Z, [x19, x7, LSL #2]\n"
"3:" // Load mul: End
- "ldr x20, [%x[qp], %[offsetof_Requantize32_per_channel_right_shifts]]\n"
- "cbz x20, 4f\n"
- "ld1w { z1.s }, p1/Z, [x20, x6, LSL #2]\n"
+ "ldr x19, [%x[qp], %[offsetof_Requantize32_per_channel_right_shifts]]\n"
+ "cbz x19, 4f\n"
+ "ld1w { z1.s }, p1/Z, [x19, x7, LSL #2]\n"
"4:" // Load right_shift: End
- "ldr x7, [%x[args], %[offsetof_Args_input_cols]]\n"
- "sub x20, x7, #0x1\n"
- "orr x23, x20, %x[ld_in_col], LSL #16\n"
- "ldr x17, [%x[args], %[offsetof_Args_inptr]]\n"
- "orr x23, x5, x23, LSL #22\n"
- "mov x22, #0xb\n"
- "add x21, x4, x3\n"
- "lsl x20, %x[ld_in_row], #0x0\n"
- "ldr x16, [%x[args], %[offsetof_Args_output_cols]]\n"
+ "ldr x17, [%x[args], %[offsetof_Args_input_cols]]\n"
+ "sub x19, x17, #0x1\n"
+ "orr x22, x19, %x[ld_in_col], LSL #16\n"
+ "ldr x16, [%x[args], %[offsetof_Args_inptr]]\n"
+ "orr x22, x6, x22, LSL #22\n"
+ "mov x21, #0xb\n"
+ "add x20, x5, x4\n"
+ "lsl x19, %x[ld_in_row], #0x0\n"
+ "ldr x15, [%x[args], %[offsetof_Args_output_cols]]\n"
"mov x8, #0x0\n"
- "lsl x23, x23, #0x0\n"
- "sub x22, x22, x21\n"
- "madd x20, x20, x4, x17\n"
+ "lsl x22, x22, #0x0\n"
+ "sub x21, x21, x20\n"
+ "madd x19, x19, x5, x16\n"
"5:" // Issue prefetches
- "subs x22, x22, #0x1\n"
- ".inst 0xf8b74a9c // rprfm pldstrm, x23, [x20]\n"
- "add x20, x20, %x[ld_in_col]\n"
+ "subs x21, x21, #0x1\n"
+ ".inst 0xf8b64a7c // rprfm pldstrm, x22, [x19]\n"
+ "add x19, x19, %x[ld_in_col]\n"
"bgt 5b\n"
- "ldr x25, [%x[args], %[offsetof_Args_outptrs]]\n"
- "lsl x20, %x[ld_in_row], #0x0\n"
- "msub x17, x4, x20, x17\n"
+ "ldr x24, [%x[args], %[offsetof_Args_outptrs]]\n"
+ "lsl x19, %x[ld_in_row], #0x0\n"
+ "msub x16, x5, x19, x16\n"
".inst 0xc0040f80 // mova za.d[x8, #0], { z28.d-z31.d }\n"
- "ldr x20, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
+ "ldr x19, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
".inst 0xc0040f81 // mova za.d[x8, #1], { z28.d-z31.d }\n"
- "mov x22, #0x4\n"
- "ldp x15, x14, [x25], #0x10\n"
+ "mov x21, #0x4\n"
+ "ldp x14, x13, [x24], #0x10\n"
".inst 0xc0040f82 // mova za.d[x8, #2], { z28.d-z31.d }\n"
- "ldp x13, x11, [x20], #0x10\n"
+ "ldp x11, x10, [x19], #0x10\n"
".inst 0xc0040f83 // mova za.d[x8, #3], { z28.d-z31.d }\n"
- "ldr x21, [%x[args], %[offsetof_Args_pad_left]]\n"
+ "ldr x20, [%x[args], %[offsetof_Args_pad_left]]\n"
".inst 0xc0040f84 // mova za.d[x8, #4], { z28.d-z31.d }\n"
- "ldp x10, x9, [x25], #0x10\n"
- "ldp x28, x27, [x20], #0x10\n"
- "cbz x21, 7f\n"
- "cmp x21, x22\n"
- "csel x20, x21, x22, LT\n"
- "sub x21, x21, x20\n"
- "sub x22, x22, x20\n"
- "cbz x21, 7f\n"
+ "ldp x9, x28, [x24], #0x10\n"
+ "ldp x27, x26, [x19], #0x10\n"
+ "cbz x20, 7f\n"
+ "cmp x20, x21\n"
+ "csel x19, x20, x21, LT\n"
+ "sub x20, x20, x19\n"
+ "sub x21, x21, x19\n"
+ "cbz x20, 7f\n"
".inst 0xc0060c04 // mova { z4.d-z7.d }, za.d[x8, #0]\n"
".inst 0xc1a3ac04 // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z3.s\n"
- "and x22, x21, #0x1\n"
+ "and x21, x20, #0x1\n"
".inst 0xc1a1aa24 // srshl { z4.s-z7.s }, { z4.s-z7.s }, z1.s\n"
- "add x21, x21, #0x1\n"
- "lsr x21, x21, #0x1\n"
+ "add x20, x20, #0x1\n"
+ "lsr x20, x20, #0x1\n"
".inst 0xc1a8ab04 // add { z4.s-z7.s }, { z4.s-z7.s }, z8.s\n"
- "sub x16, x16, x21\n"
+ "sub x15, x15, x20\n"
".inst 0xc1b7cf44 // sclamp { z4.s-z7.s }, z26.s, z23.s\n"
"6:" // Left padding
- "subs x21, x21, #0x1\n"
- "st1b { z4.s }, p1, [x15]\n"
- "add x15, x15, x13\n"
- "st1b { z5.s }, p1, [x14]\n"
+ "subs x20, x20, #0x1\n"
+ "st1b { z4.s }, p1, [x14]\n"
"add x14, x14, x11\n"
- "st1b { z6.s }, p1, [x10]\n"
- "add x10, x10, x28\n"
- "st1b { z7.s }, p1, [x9]\n"
+ "st1b { z5.s }, p1, [x13]\n"
+ "add x13, x13, x10\n"
+ "st1b { z6.s }, p1, [x9]\n"
"add x9, x9, x27\n"
+ "st1b { z7.s }, p1, [x28]\n"
+ "add x28, x28, x26\n"
"bgt 6b\n"
"7:" // Left padding: End
- "adds XZR, x4, x3\n"
+ "adds XZR, x5, x4\n"
"bne 14f\n"
- "cbz x22, 12f\n"
- "cmp x22, #0x1\n"
- "sub x7, x7, x22\n"
+ "cbz x21, 12f\n"
+ "cmp x21, #0x1\n"
+ "sub x17, x17, x21\n"
"beq 11f\n"
- "cmp x22, #0x2\n"
+ "cmp x21, #0x2\n"
"beq 10f\n"
- "cmp x22, #0x3\n"
+ "cmp x21, #0x3\n"
"beq 9f\n"
"8:" // Unpadded: 4 priming loads
- "add x21, x17, %x[ld_in_row]\n"
- "ld1sb { z11.s }, p1/Z, [x17]\n"
- "addvl x20, SP, #12\n"
+ "add x20, x16, %x[ld_in_row]\n"
+ "ld1sb { z11.s }, p1/Z, [x16]\n"
+ "addvl x19, SP, #12\n"
+ "ld1sb { z21.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "trn1 z11.h, z11.h, z21.h\n"
+ "add z11.h, z11.h, z9.h\n"
+ "ld1sb { z12.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "add x16, x16, %x[ld_in_col]\n"
+ "ld1sb { z20.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "trn1 z12.h, z12.h, z20.h\n"
+ "add z12.h, z12.h, z9.h\n"
+ "ld1sb { z13.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z19.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "trn1 z13.h, z13.h, z19.h\n"
+ "add z13.h, z13.h, z9.h\n"
+ "ld1sb { z14.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z18.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "trn1 z14.h, z14.h, z18.h\n"
+ "add z14.h, z14.h, z9.h\n"
+ "ld1sb { z15.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z17.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "trn1 z15.h, z15.h, z17.h\n"
+ "add z15.h, z15.h, z9.h\n"
+ ".inst 0xa1402a62 // ld1h { z2.h, z10.h }, pn10.b/Z, [x19]\n"
+ ".inst 0xc1721568 // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
+ "ld1sb { z16.s }, p1/Z, [x20]\n"
+ "mov z16.d, z16.d\n"
+ "add z16.h, z16.h, z9.h\n"
+ ".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
+ "ld1h { z0.h }, p2/Z, [x19, #2, MUL VL]\n"
+ ".inst 0xc17015a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
+ "9:" // Unpadded: 3 priming loads
+ "add x20, x16, %x[ld_in_row]\n"
+ "ld1sb { z11.s }, p1/Z, [x16]\n"
+ "addvl x19, SP, #9\n"
+ "ld1sb { z21.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "trn1 z11.h, z11.h, z21.h\n"
+ "add z11.h, z11.h, z9.h\n"
+ "ld1sb { z12.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "add x16, x16, %x[ld_in_col]\n"
+ "ld1sb { z20.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "trn1 z12.h, z12.h, z20.h\n"
+ "add z12.h, z12.h, z9.h\n"
+ "ld1sb { z13.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z19.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "trn1 z13.h, z13.h, z19.h\n"
+ "add z13.h, z13.h, z9.h\n"
+ "ld1sb { z14.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z18.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "trn1 z14.h, z14.h, z18.h\n"
+ "add z14.h, z14.h, z9.h\n"
+ "ld1sb { z15.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z17.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "trn1 z15.h, z15.h, z17.h\n"
+ "add z15.h, z15.h, z9.h\n"
+ ".inst 0xa1402a62 // ld1h { z2.h, z10.h }, pn10.b/Z, [x19]\n"
+ ".inst 0xc1721568 // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
+ "ld1sb { z16.s }, p1/Z, [x20]\n"
+ "mov z16.d, z16.d\n"
+ "add z16.h, z16.h, z9.h\n"
+ ".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
+ "ld1h { z0.h }, p2/Z, [x19, #2, MUL VL]\n"
+ ".inst 0xc17015a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
+ "10:" // Unpadded: 2 priming loads
+ "add x21, x16, %x[ld_in_row]\n"
+ "ld1sb { z11.s }, p1/Z, [x16]\n"
+ "addvl x20, SP, #6\n"
"ld1sb { z21.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
"trn1 z11.h, z11.h, z21.h\n"
"add z11.h, z11.h, z9.h\n"
"ld1sb { z12.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "add x17, x17, %x[ld_in_col]\n"
+ "addvl x19, SP, #12\n"
"ld1sb { z20.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
"trn1 z12.h, z12.h, z20.h\n"
"add z12.h, z12.h, z9.h\n"
"ld1sb { z13.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
+ "add x16, x16, %x[ld_in_col]\n"
"ld1sb { z19.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
"trn1 z13.h, z13.h, z19.h\n"
@@ -324,34 +407,40 @@ void sme2_s8q_planar_5x5_s2_4rows_dot_za_impl(
"ld1sb { z15.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
"ld1sb { z17.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
"trn1 z15.h, z15.h, z17.h\n"
+ "add x21, x21, %x[ld_in_row]\n"
"add z15.h, z15.h, z9.h\n"
".inst 0xa1402a82 // ld1h { z2.h, z10.h }, pn10.b/Z, [x20]\n"
".inst 0xc1721568 // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
"ld1sb { z16.s }, p1/Z, [x21]\n"
"mov z16.d, z16.d\n"
- "add z16.h, z16.h, z9.h\n"
".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
+ ".inst 0xa1402a62 // ld1h { z2.h, z10.h }, pn10.b/Z, [x19]\n"
+ ".inst 0xc1721569 // sdot za.s[x8, 1], { z11.h-z14.h }, z2.h\n"
+ "add z16.h, z16.h, z9.h\n"
"ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
+ ".inst 0xc17a1589 // sdot za.s[x8, 1], { z12.h-z15.h }, z10.h\n"
".inst 0xc17015a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
- "9:" // Unpadded: 3 priming loads
- "add x21, x17, %x[ld_in_row]\n"
- "ld1sb { z11.s }, p1/Z, [x17]\n"
- "addvl x20, SP, #9\n"
+ "ld1h { z0.h }, p2/Z, [x19, #2, MUL VL]\n"
+ ".inst 0xc17015a9 // sdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
+ "11:" // Unpadded: 1 priming loads
+ "add x21, x16, %x[ld_in_row]\n"
+ "ld1sb { z11.s }, p1/Z, [x16]\n"
+ "addvl x20, SP, #3\n"
"ld1sb { z21.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
"trn1 z11.h, z11.h, z21.h\n"
"add z11.h, z11.h, z9.h\n"
"ld1sb { z12.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "add x17, x17, %x[ld_in_col]\n"
+ "addvl x19, SP, #9\n"
"ld1sb { z20.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
"trn1 z12.h, z12.h, z20.h\n"
"add z12.h, z12.h, z9.h\n"
"ld1sb { z13.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
+ "add x16, x16, %x[ld_in_col]\n"
"ld1sb { z19.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
"trn1 z13.h, z13.h, z19.h\n"
@@ -365,100 +454,127 @@ void sme2_s8q_planar_5x5_s2_4rows_dot_za_impl(
"ld1sb { z15.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
"ld1sb { z17.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
"trn1 z15.h, z15.h, z17.h\n"
+ "add x21, x21, %x[ld_in_row]\n"
"add z15.h, z15.h, z9.h\n"
".inst 0xa1402a82 // ld1h { z2.h, z10.h }, pn10.b/Z, [x20]\n"
".inst 0xc1721568 // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
"ld1sb { z16.s }, p1/Z, [x21]\n"
"mov z16.d, z16.d\n"
- "add z16.h, z16.h, z9.h\n"
".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
+ ".inst 0xa1402a62 // ld1h { z2.h, z10.h }, pn10.b/Z, [x19]\n"
+ ".inst 0xc1721569 // sdot za.s[x8, 1], { z11.h-z14.h }, z2.h\n"
+ "add z16.h, z16.h, z9.h\n"
"ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
+ ".inst 0xc17a1589 // sdot za.s[x8, 1], { z12.h-z15.h }, z10.h\n"
".inst 0xc17015a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
- "10:" // Unpadded: 2 priming loads
- "add x22, x17, %x[ld_in_row]\n"
- "ld1sb { z11.s }, p1/Z, [x17]\n"
- "addvl x21, SP, #6\n"
- "ld1sb { z21.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row]\n"
+ "ld1h { z0.h }, p2/Z, [x19, #2, MUL VL]\n"
+ ".inst 0xc17015a9 // sdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
+ "12:" // Unpadded: 0 priming loads
+ "cmp x17, #0x2\n"
+ ".inst 0xa1402be2 // ld1h { z2.h, z10.h }, pn10.b/Z, [SP]\n"
+ "ld1h { z0.h }, p2/Z, [SP, #2, MUL VL]\n"
+ "blt 22f\n"
+ "add x20, x16, %x[ld_in_row]\n"
+ "ld1sb { z11.s }, p1/Z, [x16]\n"
+ "sub x17, x17, #0x2\n"
+ "ld1sb { z21.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
"trn1 z11.h, z11.h, z21.h\n"
+ "sub x15, x15, #0x1\n"
+ "ld1sb { z12.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "lsr x19, x17, #0x1\n"
"add z11.h, z11.h, z9.h\n"
- "ld1sb { z12.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row]\n"
- "addvl x20, SP, #12\n"
- "ld1sb { z20.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row]\n"
+ "ld1sb { z20.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
"trn1 z12.h, z12.h, z20.h\n"
+ "cmp x19, x15\n"
+ "ld1sb { z13.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "csel x25, x19, x15, LT\n"
"add z12.h, z12.h, z9.h\n"
- "ld1sb { z13.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row]\n"
- "add x17, x17, %x[ld_in_col]\n"
- "ld1sb { z19.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row]\n"
+ "ld1sb { z19.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
"trn1 z13.h, z13.h, z19.h\n"
"add z13.h, z13.h, z9.h\n"
- "ld1sb { z14.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row]\n"
- "ld1sb { z18.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row]\n"
+ "ld1sb { z14.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "add x16, x16, %x[ld_in_col]\n"
+ "ld1sb { z18.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
"trn1 z14.h, z14.h, z18.h\n"
"add z14.h, z14.h, z9.h\n"
- "ld1sb { z15.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row]\n"
- "ld1sb { z17.s }, p1/Z, [x22]\n"
+ "ld1sb { z15.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "and x17, x17, #0x1\n"
+ "ld1sb { z17.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
"trn1 z15.h, z15.h, z17.h\n"
- "add x22, x22, %x[ld_in_row]\n"
"add z15.h, z15.h, z9.h\n"
- ".inst 0xa1402aa2 // ld1h { z2.h, z10.h }, pn10.b/Z, [x21]\n"
- ".inst 0xc1721568 // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
- "ld1sb { z16.s }, p1/Z, [x22]\n"
+ "ld1sb { z16.s }, p1/Z, [x20]\n"
"mov z16.d, z16.d\n"
+ "add z16.h, z16.h, z9.h\n"
+ "sub x15, x15, x25\n"
+ "cbz x25, 21f\n"
+ "13:" // Unpadded: Main loop
+ ".inst 0xc1721568 // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
+ "addvl x24, SP, #6\n"
+ "addvl x23, SP, #12\n"
".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
- ".inst 0xa1402a82 // ld1h { z2.h, z10.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xa1402b02 // ld1h { z2.h, z10.h }, pn10.b/Z, [x24]\n"
+ "add x22, x16, %x[ld_in_row]\n"
+ "addvl x21, SP, #3\n"
".inst 0xc1721569 // sdot za.s[x8, 1], { z11.h-z14.h }, z2.h\n"
- "add z16.h, z16.h, z9.h\n"
- "ld1h { z0.h }, p2/Z, [x21, #2, MUL VL]\n"
+ "addvl x20, SP, #9\n"
+ "subs x25, x25, #0x1\n"
".inst 0xc17a1589 // sdot za.s[x8, 1], { z12.h-z15.h }, z10.h\n"
+ ".inst 0xa1402ae2 // ld1h { z2.h, z10.h }, pn10.b/Z, [x23]\n"
+ ".inst 0xc172156a // sdot za.s[x8, 2], { z11.h-z14.h }, z2.h\n"
+ "ld1sb { z11.s }, p1/Z, [x16]\n"
+ "add x16, x16, %x[ld_in_col]\n"
+ "add x19, x16, %x[ld_in_row]\n"
".inst 0xc17015a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
- "ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
- ".inst 0xc17015a9 // sdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
- "11:" // Unpadded: 1 priming loads
- "add x22, x17, %x[ld_in_row]\n"
- "ld1sb { z11.s }, p1/Z, [x17]\n"
- "addvl x21, SP, #3\n"
+ "ld1h { z0.h }, p2/Z, [x24, #2, MUL VL]\n"
+ ".inst 0xc17a158a // sdot za.s[x8, 2], { z12.h-z15.h }, z10.h\n"
"ld1sb { z21.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
"trn1 z11.h, z11.h, z21.h\n"
+ ".inst 0xc17015a9 // sdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
+ "ld1h { z0.h }, p2/Z, [x23, #2, MUL VL]\n"
"add z11.h, z11.h, z9.h\n"
"ld1sb { z12.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
- "addvl x20, SP, #9\n"
+ ".inst 0xc17015aa // sdot za.s[x8, 2], { z13.h-z16.h }, z0.h\n"
"ld1sb { z20.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
"trn1 z12.h, z12.h, z20.h\n"
"add z12.h, z12.h, z9.h\n"
"ld1sb { z13.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
- "add x17, x17, %x[ld_in_col]\n"
+ ".inst 0xc0060c04 // mova { z4.d-z7.d }, za.d[x8, #0]\n"
+ "add x8, x8, #0x1\n"
"ld1sb { z19.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
"trn1 z13.h, z13.h, z19.h\n"
"add z13.h, z13.h, z9.h\n"
"ld1sb { z14.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
+ ".inst 0xc1a3ac04 // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z3.s\n"
"ld1sb { z18.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
"trn1 z14.h, z14.h, z18.h\n"
"add z14.h, z14.h, z9.h\n"
"ld1sb { z15.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
+ ".inst 0xc1a1aa24 // srshl { z4.s-z7.s }, { z4.s-z7.s }, z1.s\n"
"ld1sb { z17.s }, p1/Z, [x22]\n"
"trn1 z15.h, z15.h, z17.h\n"
"add x22, x22, %x[ld_in_row]\n"
"add z15.h, z15.h, z9.h\n"
".inst 0xa1402aa2 // ld1h { z2.h, z10.h }, pn10.b/Z, [x21]\n"
".inst 0xc1721568 // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
+ ".inst 0xc1a8ab04 // add { z4.s-z7.s }, { z4.s-z7.s }, z8.s\n"
"ld1sb { z16.s }, p1/Z, [x22]\n"
"mov z16.d, z16.d\n"
".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
@@ -467,166 +583,50 @@ void sme2_s8q_planar_5x5_s2_4rows_dot_za_impl(
"add z16.h, z16.h, z9.h\n"
"ld1h { z0.h }, p2/Z, [x21, #2, MUL VL]\n"
".inst 0xc17a1589 // sdot za.s[x8, 1], { z12.h-z15.h }, z10.h\n"
- ".inst 0xc17015a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
- "ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
- ".inst 0xc17015a9 // sdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
- "12:" // Unpadded: 0 priming loads
- "cmp x7, #0x2\n"
- ".inst 0xa1402be2 // ld1h { z2.h, z10.h }, pn10.b/Z, [SP]\n"
- "ld1h { z0.h }, p2/Z, [SP, #2, MUL VL]\n"
- "blt 22f\n"
- "add x21, x17, %x[ld_in_row]\n"
- "ld1sb { z11.s }, p1/Z, [x17]\n"
- "sub x7, x7, #0x2\n"
- "ld1sb { z21.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
- "trn1 z11.h, z11.h, z21.h\n"
- "sub x16, x16, #0x1\n"
- "ld1sb { z12.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
- "lsr x20, x7, #0x1\n"
- "add z11.h, z11.h, z9.h\n"
- "ld1sb { z20.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
- "trn1 z12.h, z12.h, z20.h\n"
- "cmp x20, x16\n"
- "ld1sb { z13.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
- "csel x26, x20, x16, LT\n"
- "add z12.h, z12.h, z9.h\n"
- "ld1sb { z19.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
- "trn1 z13.h, z13.h, z19.h\n"
- "add z13.h, z13.h, z9.h\n"
- "ld1sb { z14.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
- "add x17, x17, %x[ld_in_col]\n"
- "ld1sb { z18.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
- "trn1 z14.h, z14.h, z18.h\n"
- "add z14.h, z14.h, z9.h\n"
- "ld1sb { z15.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
- "and x7, x7, #0x1\n"
- "ld1sb { z17.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
- "trn1 z15.h, z15.h, z17.h\n"
- "add z15.h, z15.h, z9.h\n"
- "ld1sb { z16.s }, p1/Z, [x21]\n"
- "mov z16.d, z16.d\n"
- "add z16.h, z16.h, z9.h\n"
- "sub x16, x16, x26\n"
- "cbz x26, 21f\n"
- "13:" // Unpadded: Main loop
- ".inst 0xc1721568 // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
- "addvl x25, SP, #6\n"
- "addvl x24, SP, #12\n"
- ".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
- ".inst 0xa1402b22 // ld1h { z2.h, z10.h }, pn10.b/Z, [x25]\n"
- "add x23, x17, %x[ld_in_row]\n"
- "addvl x22, SP, #3\n"
- ".inst 0xc1721569 // sdot za.s[x8, 1], { z11.h-z14.h }, z2.h\n"
- "addvl x21, SP, #9\n"
- "subs x26, x26, #0x1\n"
- ".inst 0xc17a1589 // sdot za.s[x8, 1], { z12.h-z15.h }, z10.h\n"
- ".inst 0xa1402b02 // ld1h { z2.h, z10.h }, pn10.b/Z, [x24]\n"
- ".inst 0xc172156a // sdot za.s[x8, 2], { z11.h-z14.h }, z2.h\n"
- "ld1sb { z11.s }, p1/Z, [x17]\n"
- "add x17, x17, %x[ld_in_col]\n"
- "add x20, x17, %x[ld_in_row]\n"
- ".inst 0xc17015a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
- "ld1h { z0.h }, p2/Z, [x25, #2, MUL VL]\n"
- ".inst 0xc17a158a // sdot za.s[x8, 2], { z12.h-z15.h }, z10.h\n"
- "ld1sb { z21.s }, p1/Z, [x23]\n"
- "add x23, x23, %x[ld_in_row]\n"
- "trn1 z11.h, z11.h, z21.h\n"
- ".inst 0xc17015a9 // sdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
- "ld1h { z0.h }, p2/Z, [x24, #2, MUL VL]\n"
- "add z11.h, z11.h, z9.h\n"
- "ld1sb { z12.s }, p1/Z, [x23]\n"
- "add x23, x23, %x[ld_in_row]\n"
- ".inst 0xc17015aa // sdot za.s[x8, 2], { z13.h-z16.h }, z0.h\n"
- "ld1sb { z20.s }, p1/Z, [x23]\n"
- "add x23, x23, %x[ld_in_row]\n"
- "trn1 z12.h, z12.h, z20.h\n"
- "add z12.h, z12.h, z9.h\n"
- "ld1sb { z13.s }, p1/Z, [x23]\n"
- "add x23, x23, %x[ld_in_row]\n"
- ".inst 0xc0060c04 // mova { z4.d-z7.d }, za.d[x8, #0]\n"
- "add x8, x8, #0x1\n"
- "ld1sb { z19.s }, p1/Z, [x23]\n"
- "add x23, x23, %x[ld_in_row]\n"
- "trn1 z13.h, z13.h, z19.h\n"
- "add z13.h, z13.h, z9.h\n"
- "ld1sb { z14.s }, p1/Z, [x23]\n"
- "add x23, x23, %x[ld_in_row]\n"
- ".inst 0xc1a3ac04 // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z3.s\n"
- "ld1sb { z18.s }, p1/Z, [x23]\n"
- "add x23, x23, %x[ld_in_row]\n"
- "trn1 z14.h, z14.h, z18.h\n"
- "add z14.h, z14.h, z9.h\n"
- "ld1sb { z15.s }, p1/Z, [x23]\n"
- "add x23, x23, %x[ld_in_row]\n"
- ".inst 0xc1a1aa24 // srshl { z4.s-z7.s }, { z4.s-z7.s }, z1.s\n"
- "ld1sb { z17.s }, p1/Z, [x23]\n"
- "trn1 z15.h, z15.h, z17.h\n"
- "add x23, x23, %x[ld_in_row]\n"
- "add z15.h, z15.h, z9.h\n"
- ".inst 0xa1402ac2 // ld1h { z2.h, z10.h }, pn10.b/Z, [x22]\n"
- ".inst 0xc1721568 // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
- ".inst 0xc1a8ab04 // add { z4.s-z7.s }, { z4.s-z7.s }, z8.s\n"
- "ld1sb { z16.s }, p1/Z, [x23]\n"
- "mov z16.d, z16.d\n"
- ".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
- ".inst 0xa1402aa2 // ld1h { z2.h, z10.h }, pn10.b/Z, [x21]\n"
- ".inst 0xc1721569 // sdot za.s[x8, 1], { z11.h-z14.h }, z2.h\n"
- "add z16.h, z16.h, z9.h\n"
- "ld1h { z0.h }, p2/Z, [x22, #2, MUL VL]\n"
- ".inst 0xc17a1589 // sdot za.s[x8, 1], { z12.h-z15.h }, z10.h\n"
".inst 0xc1b7cf44 // sclamp { z4.s-z7.s }, z26.s, z23.s\n"
".inst 0xc17015a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
- "ld1h { z0.h }, p2/Z, [x21, #2, MUL VL]\n"
- "st1b { z4.s }, p1, [x15]\n"
- "add x15, x15, x13\n"
- "ld1sb { z11.s }, p1/Z, [x17]\n"
- ".inst 0xc17015a9 // sdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
- "st1b { z5.s }, p1, [x14]\n"
+ "ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
+ "st1b { z4.s }, p1, [x14]\n"
"add x14, x14, x11\n"
- "ld1sb { z21.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z11.s }, p1/Z, [x16]\n"
+ ".inst 0xc17015a9 // sdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
+ "st1b { z5.s }, p1, [x13]\n"
+ "add x13, x13, x10\n"
+ "ld1sb { z21.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"trn1 z11.h, z11.h, z21.h\n"
- "st1b { z6.s }, p1, [x10]\n"
- "ld1sb { z12.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
- "add x10, x10, x28\n"
- "st1b { z7.s }, p1, [x9]\n"
- "ld1sb { z20.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
- "trn1 z12.h, z12.h, z20.h\n"
+ "st1b { z6.s }, p1, [x9]\n"
+ "ld1sb { z12.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"add x9, x9, x27\n"
- "ld1sb { z13.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "st1b { z7.s }, p1, [x28]\n"
+ "ld1sb { z20.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ "trn1 z12.h, z12.h, z20.h\n"
+ "add x28, x28, x26\n"
+ "ld1sb { z13.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0xc0040f84 // mova za.d[x8, #4], { z28.d-z31.d }\n"
"add z11.h, z11.h, z9.h\n"
- "ld1sb { z19.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z19.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"trn1 z13.h, z13.h, z19.h\n"
"add z12.h, z12.h, z9.h\n"
- "ld1sb { z14.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z14.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"add z13.h, z13.h, z9.h\n"
- "add x17, x17, %x[ld_in_col]\n"
- "ld1sb { z18.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x16, x16, %x[ld_in_col]\n"
+ "ld1sb { z18.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"trn1 z14.h, z14.h, z18.h\n"
"add z14.h, z14.h, z9.h\n"
- "ld1sb { z15.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
- "ld1sb { z17.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z15.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ "ld1sb { z17.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"trn1 z15.h, z15.h, z17.h\n"
"add z15.h, z15.h, z9.h\n"
- "ld1sb { z16.s }, p1/Z, [x20]\n"
+ "ld1sb { z16.s }, p1/Z, [x19]\n"
"mov z16.d, z16.d\n"
"add z16.h, z16.h, z9.h\n"
".inst 0xa1402be2 // ld1h { z2.h, z10.h }, pn10.b/Z, [SP]\n"
@@ -634,717 +634,717 @@ void sme2_s8q_planar_5x5_s2_4rows_dot_za_impl(
"bgt 13b\n"
"b 21f\n"
"14:" // Padded
- "cbz x22, 19f\n"
- "cmp x22, #0x1\n"
- "sub x7, x7, x22\n"
+ "cbz x21, 19f\n"
+ "cmp x21, #0x1\n"
+ "sub x17, x17, x21\n"
"beq 18f\n"
- "cmp x22, #0x2\n"
+ "cmp x21, #0x2\n"
"beq 17f\n"
- "cmp x22, #0x3\n"
+ "cmp x21, #0x3\n"
"beq 16f\n"
"15:" // Padded: 4 priming loads
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z11.s }, p0/Z, [x17]\n"
+ "ld1sb { z11.s }, p0/Z, [x16]\n"
"add z11.h, p0/M, z11.h, z9.h\n"
- "add x21, x17, %x[ld_in_row]\n"
+ "add x20, x16, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z21.s }, p0/Z, [x21]\n"
+ "ld1sb { z21.s }, p0/Z, [x20]\n"
"add z21.h, p0/M, z21.h, z9.h\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "add x20, x20, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1sb { z12.s }, p0/Z, [x21]\n"
+ "ld1sb { z12.s }, p0/Z, [x20]\n"
"add z12.h, p0/M, z12.h, z9.h\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "add x20, x20, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1sb { z20.s }, p0/Z, [x21]\n"
+ "ld1sb { z20.s }, p0/Z, [x20]\n"
"add z20.h, p0/M, z20.h, z9.h\n"
"mov x12, #0x4\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "add x20, x20, %x[ld_in_row]\n"
"trn1 z11.h, z11.h, z21.h\n"
"trn1 z12.h, z12.h, z20.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z13.s }, p0/Z, [x21]\n"
+ "ld1sb { z13.s }, p0/Z, [x20]\n"
"add z13.h, p0/M, z13.h, z9.h\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "add x20, x20, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z19.s }, p0/Z, [x21]\n"
+ "ld1sb { z19.s }, p0/Z, [x20]\n"
"add z19.h, p0/M, z19.h, z9.h\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "add x20, x20, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1sb { z14.s }, p0/Z, [x21]\n"
+ "ld1sb { z14.s }, p0/Z, [x20]\n"
"add z14.h, p0/M, z14.h, z9.h\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "add x20, x20, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1sb { z18.s }, p0/Z, [x21]\n"
+ "ld1sb { z18.s }, p0/Z, [x20]\n"
"mov x12, #0x8\n"
"add z18.h, p0/M, z18.h, z9.h\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "add x20, x20, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z15.s }, p0/Z, [x21]\n"
+ "ld1sb { z15.s }, p0/Z, [x20]\n"
"add z15.h, p0/M, z15.h, z9.h\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "add x20, x20, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z17.s }, p0/Z, [x21]\n"
+ "ld1sb { z17.s }, p0/Z, [x20]\n"
"add z17.h, p0/M, z17.h, z9.h\n"
- "addvl x20, SP, #12\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "addvl x19, SP, #12\n"
+ "add x20, x20, %x[ld_in_row]\n"
"trn1 z13.h, z13.h, z19.h\n"
"trn1 z14.h, z14.h, z18.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- ".inst 0xa1402a82 // ld1h { z2.h, z10.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xa1402a62 // ld1h { z2.h, z10.h }, pn10.b/Z, [x19]\n"
"trn1 z15.h, z15.h, z17.h\n"
".inst 0xc1721568 // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
- "ld1sb { z16.s }, p0/Z, [x21]\n"
+ "ld1sb { z16.s }, p0/Z, [x20]\n"
"add z16.h, p0/M, z16.h, z9.h\n"
"mov z16.d, z16.d\n"
- "add x17, x17, %x[ld_in_col]\n"
+ "add x16, x16, %x[ld_in_col]\n"
".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
- "ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
+ "ld1h { z0.h }, p2/Z, [x19, #2, MUL VL]\n"
".inst 0xc17015a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
"16:" // Padded: 3 priming loads
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z11.s }, p0/Z, [x17]\n"
+ "ld1sb { z11.s }, p0/Z, [x16]\n"
"add z11.h, p0/M, z11.h, z9.h\n"
- "add x21, x17, %x[ld_in_row]\n"
+ "add x20, x16, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z21.s }, p0/Z, [x21]\n"
+ "ld1sb { z21.s }, p0/Z, [x20]\n"
"add z21.h, p0/M, z21.h, z9.h\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "add x20, x20, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1sb { z12.s }, p0/Z, [x21]\n"
+ "ld1sb { z12.s }, p0/Z, [x20]\n"
"add z12.h, p0/M, z12.h, z9.h\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "add x20, x20, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1sb { z20.s }, p0/Z, [x21]\n"
+ "ld1sb { z20.s }, p0/Z, [x20]\n"
"add z20.h, p0/M, z20.h, z9.h\n"
"mov x12, #0x4\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "add x20, x20, %x[ld_in_row]\n"
"trn1 z11.h, z11.h, z21.h\n"
"trn1 z12.h, z12.h, z20.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z13.s }, p0/Z, [x21]\n"
+ "ld1sb { z13.s }, p0/Z, [x20]\n"
"add z13.h, p0/M, z13.h, z9.h\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "add x20, x20, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z19.s }, p0/Z, [x21]\n"
+ "ld1sb { z19.s }, p0/Z, [x20]\n"
"add z19.h, p0/M, z19.h, z9.h\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "add x20, x20, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1sb { z14.s }, p0/Z, [x21]\n"
+ "ld1sb { z14.s }, p0/Z, [x20]\n"
"add z14.h, p0/M, z14.h, z9.h\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "add x20, x20, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1sb { z18.s }, p0/Z, [x21]\n"
+ "ld1sb { z18.s }, p0/Z, [x20]\n"
"mov x12, #0x8\n"
"add z18.h, p0/M, z18.h, z9.h\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "add x20, x20, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z15.s }, p0/Z, [x21]\n"
+ "ld1sb { z15.s }, p0/Z, [x20]\n"
"add z15.h, p0/M, z15.h, z9.h\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "add x20, x20, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z17.s }, p0/Z, [x21]\n"
+ "ld1sb { z17.s }, p0/Z, [x20]\n"
"add z17.h, p0/M, z17.h, z9.h\n"
- "addvl x20, SP, #9\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "addvl x19, SP, #9\n"
+ "add x20, x20, %x[ld_in_row]\n"
"trn1 z13.h, z13.h, z19.h\n"
"trn1 z14.h, z14.h, z18.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- ".inst 0xa1402a82 // ld1h { z2.h, z10.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xa1402a62 // ld1h { z2.h, z10.h }, pn10.b/Z, [x19]\n"
"trn1 z15.h, z15.h, z17.h\n"
".inst 0xc1721568 // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
- "ld1sb { z16.s }, p0/Z, [x21]\n"
+ "ld1sb { z16.s }, p0/Z, [x20]\n"
"add z16.h, p0/M, z16.h, z9.h\n"
"mov z16.d, z16.d\n"
- "add x17, x17, %x[ld_in_col]\n"
+ "add x16, x16, %x[ld_in_col]\n"
".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
- "ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
+ "ld1h { z0.h }, p2/Z, [x19, #2, MUL VL]\n"
".inst 0xc17015a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
"17:" // Padded: 2 priming loads
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z11.s }, p0/Z, [x17]\n"
+ "ld1sb { z11.s }, p0/Z, [x16]\n"
"add z11.h, p0/M, z11.h, z9.h\n"
- "add x20, x17, %x[ld_in_row]\n"
+ "add x19, x16, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z21.s }, p0/Z, [x20]\n"
+ "ld1sb { z21.s }, p0/Z, [x19]\n"
"add z21.h, p0/M, z21.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1sb { z12.s }, p0/Z, [x20]\n"
+ "ld1sb { z12.s }, p0/Z, [x19]\n"
"add z12.h, p0/M, z12.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1sb { z20.s }, p0/Z, [x20]\n"
+ "ld1sb { z20.s }, p0/Z, [x19]\n"
"add z20.h, p0/M, z20.h, z9.h\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"trn1 z11.h, z11.h, z21.h\n"
"trn1 z12.h, z12.h, z20.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z13.s }, p0/Z, [x20]\n"
+ "ld1sb { z13.s }, p0/Z, [x19]\n"
"add z13.h, p0/M, z13.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z19.s }, p0/Z, [x20]\n"
+ "ld1sb { z19.s }, p0/Z, [x19]\n"
"add z19.h, p0/M, z19.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1sb { z14.s }, p0/Z, [x20]\n"
+ "ld1sb { z14.s }, p0/Z, [x19]\n"
"add z14.h, p0/M, z14.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1sb { z18.s }, p0/Z, [x20]\n"
+ "ld1sb { z18.s }, p0/Z, [x19]\n"
"mov x12, #0x8\n"
"add z18.h, p0/M, z18.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z15.s }, p0/Z, [x20]\n"
+ "ld1sb { z15.s }, p0/Z, [x19]\n"
"add z15.h, p0/M, z15.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z17.s }, p0/Z, [x20]\n"
+ "ld1sb { z17.s }, p0/Z, [x19]\n"
"add z17.h, p0/M, z17.h, z9.h\n"
- "addvl x21, SP, #6\n"
+ "addvl x20, SP, #6\n"
"trn1 z13.h, z13.h, z19.h\n"
"trn1 z14.h, z14.h, z18.h\n"
- ".inst 0xa1402aa2 // ld1h { z2.h, z10.h }, pn10.b/Z, [x21]\n"
- "add x20, x20, %x[ld_in_row]\n"
+ ".inst 0xa1402a82 // ld1h { z2.h, z10.h }, pn10.b/Z, [x20]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
"trn1 z15.h, z15.h, z17.h\n"
".inst 0xc1721568 // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
- "ld1sb { z16.s }, p0/Z, [x20]\n"
- "addvl x20, SP, #12\n"
+ "ld1sb { z16.s }, p0/Z, [x19]\n"
+ "addvl x19, SP, #12\n"
"add z16.h, p0/M, z16.h, z9.h\n"
".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
- ".inst 0xa1402a82 // ld1h { z2.h, z10.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xa1402a62 // ld1h { z2.h, z10.h }, pn10.b/Z, [x19]\n"
".inst 0xc1721569 // sdot za.s[x8, 1], { z11.h-z14.h }, z2.h\n"
"mov z16.d, z16.d\n"
- "add x17, x17, %x[ld_in_col]\n"
- "ld1h { z0.h }, p2/Z, [x21, #2, MUL VL]\n"
+ "add x16, x16, %x[ld_in_col]\n"
+ "ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
".inst 0xc17a1589 // sdot za.s[x8, 1], { z12.h-z15.h }, z10.h\n"
".inst 0xc17015a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
- "ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
+ "ld1h { z0.h }, p2/Z, [x19, #2, MUL VL]\n"
".inst 0xc17015a9 // sdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
"18:" // Padded: 1 priming loads
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z11.s }, p0/Z, [x17]\n"
+ "ld1sb { z11.s }, p0/Z, [x16]\n"
"add z11.h, p0/M, z11.h, z9.h\n"
- "add x20, x17, %x[ld_in_row]\n"
+ "add x19, x16, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z21.s }, p0/Z, [x20]\n"
+ "ld1sb { z21.s }, p0/Z, [x19]\n"
"add z21.h, p0/M, z21.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1sb { z12.s }, p0/Z, [x20]\n"
+ "ld1sb { z12.s }, p0/Z, [x19]\n"
"add z12.h, p0/M, z12.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1sb { z20.s }, p0/Z, [x20]\n"
+ "ld1sb { z20.s }, p0/Z, [x19]\n"
"add z20.h, p0/M, z20.h, z9.h\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"trn1 z11.h, z11.h, z21.h\n"
"trn1 z12.h, z12.h, z20.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z13.s }, p0/Z, [x20]\n"
+ "ld1sb { z13.s }, p0/Z, [x19]\n"
"add z13.h, p0/M, z13.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z19.s }, p0/Z, [x20]\n"
+ "ld1sb { z19.s }, p0/Z, [x19]\n"
"add z19.h, p0/M, z19.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1sb { z14.s }, p0/Z, [x20]\n"
+ "ld1sb { z14.s }, p0/Z, [x19]\n"
"add z14.h, p0/M, z14.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1sb { z18.s }, p0/Z, [x20]\n"
+ "ld1sb { z18.s }, p0/Z, [x19]\n"
"mov x12, #0x8\n"
"add z18.h, p0/M, z18.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z15.s }, p0/Z, [x20]\n"
+ "ld1sb { z15.s }, p0/Z, [x19]\n"
"add z15.h, p0/M, z15.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z17.s }, p0/Z, [x20]\n"
+ "ld1sb { z17.s }, p0/Z, [x19]\n"
"add z17.h, p0/M, z17.h, z9.h\n"
- "addvl x21, SP, #3\n"
+ "addvl x20, SP, #3\n"
"trn1 z13.h, z13.h, z19.h\n"
"trn1 z14.h, z14.h, z18.h\n"
- ".inst 0xa1402aa2 // ld1h { z2.h, z10.h }, pn10.b/Z, [x21]\n"
- "add x20, x20, %x[ld_in_row]\n"
+ ".inst 0xa1402a82 // ld1h { z2.h, z10.h }, pn10.b/Z, [x20]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
"trn1 z15.h, z15.h, z17.h\n"
".inst 0xc1721568 // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
- "ld1sb { z16.s }, p0/Z, [x20]\n"
- "addvl x20, SP, #9\n"
+ "ld1sb { z16.s }, p0/Z, [x19]\n"
+ "addvl x19, SP, #9\n"
"add z16.h, p0/M, z16.h, z9.h\n"
".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
- ".inst 0xa1402a82 // ld1h { z2.h, z10.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xa1402a62 // ld1h { z2.h, z10.h }, pn10.b/Z, [x19]\n"
".inst 0xc1721569 // sdot za.s[x8, 1], { z11.h-z14.h }, z2.h\n"
"mov z16.d, z16.d\n"
- "add x17, x17, %x[ld_in_col]\n"
- "ld1h { z0.h }, p2/Z, [x21, #2, MUL VL]\n"
+ "add x16, x16, %x[ld_in_col]\n"
+ "ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
".inst 0xc17a1589 // sdot za.s[x8, 1], { z12.h-z15.h }, z10.h\n"
".inst 0xc17015a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
- "ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
+ "ld1h { z0.h }, p2/Z, [x19, #2, MUL VL]\n"
".inst 0xc17015a9 // sdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
"19:" // Padded: 0 priming loads
- "cmp x7, #0x2\n"
+ "cmp x17, #0x2\n"
".inst 0xa1402be2 // ld1h { z2.h, z10.h }, pn10.b/Z, [SP]\n"
"ld1h { z0.h }, p2/Z, [SP, #2, MUL VL]\n"
"blt 22f\n"
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z11.s }, p0/Z, [x17]\n"
+ "ld1sb { z11.s }, p0/Z, [x16]\n"
"add z11.h, p0/M, z11.h, z9.h\n"
- "add x20, x17, %x[ld_in_row]\n"
+ "add x19, x16, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z21.s }, p0/Z, [x20]\n"
+ "ld1sb { z21.s }, p0/Z, [x19]\n"
"add z21.h, p0/M, z21.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1sb { z12.s }, p0/Z, [x20]\n"
+ "ld1sb { z12.s }, p0/Z, [x19]\n"
"add z12.h, p0/M, z12.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1sb { z20.s }, p0/Z, [x20]\n"
+ "ld1sb { z20.s }, p0/Z, [x19]\n"
"add z20.h, p0/M, z20.h, z9.h\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"trn1 z11.h, z11.h, z21.h\n"
"trn1 z12.h, z12.h, z20.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z13.s }, p0/Z, [x20]\n"
+ "ld1sb { z13.s }, p0/Z, [x19]\n"
"add z13.h, p0/M, z13.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z19.s }, p0/Z, [x20]\n"
+ "ld1sb { z19.s }, p0/Z, [x19]\n"
"add z19.h, p0/M, z19.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1sb { z14.s }, p0/Z, [x20]\n"
+ "ld1sb { z14.s }, p0/Z, [x19]\n"
"add z14.h, p0/M, z14.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1sb { z18.s }, p0/Z, [x20]\n"
+ "ld1sb { z18.s }, p0/Z, [x19]\n"
"mov x12, #0x8\n"
"add z18.h, p0/M, z18.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z15.s }, p0/Z, [x20]\n"
+ "ld1sb { z15.s }, p0/Z, [x19]\n"
"add z15.h, p0/M, z15.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z17.s }, p0/Z, [x20]\n"
+ "ld1sb { z17.s }, p0/Z, [x19]\n"
"add z17.h, p0/M, z17.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1sb { z16.s }, p0/Z, [x20]\n"
+ "ld1sb { z16.s }, p0/Z, [x19]\n"
"add z16.h, p0/M, z16.h, z9.h\n"
- "sub x7, x7, #0x2\n"
- "sub x16, x16, #0x1\n"
+ "sub x17, x17, #0x2\n"
+ "sub x15, x15, #0x1\n"
"trn1 z13.h, z13.h, z19.h\n"
"trn1 z14.h, z14.h, z18.h\n"
- "lsr x20, x7, #0x1\n"
- "cmp x20, x16\n"
+ "lsr x19, x17, #0x1\n"
+ "cmp x19, x15\n"
"trn1 z15.h, z15.h, z17.h\n"
"mov z16.d, z16.d\n"
- "csel x25, x20, x16, LT\n"
- "add x17, x17, %x[ld_in_col]\n"
- "and x7, x7, #0x1\n"
- "sub x16, x16, x25\n"
- "cbz x25, 21f\n"
+ "csel x24, x19, x15, LT\n"
+ "add x16, x16, %x[ld_in_col]\n"
+ "and x17, x17, #0x1\n"
+ "sub x15, x15, x24\n"
+ "cbz x24, 21f\n"
"20:" // Padded: Main loop
".inst 0xc1721568 // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
- "addvl x24, SP, #6\n"
- "addvl x23, SP, #12\n"
+ "addvl x23, SP, #6\n"
+ "addvl x22, SP, #12\n"
".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
- ".inst 0xa1402b02 // ld1h { z2.h, z10.h }, pn10.b/Z, [x24]\n"
+ ".inst 0xa1402ae2 // ld1h { z2.h, z10.h }, pn10.b/Z, [x23]\n"
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
".inst 0xc1721569 // sdot za.s[x8, 1], { z11.h-z14.h }, z2.h\n"
- "add x20, x17, %x[ld_in_row]\n"
- "addvl x22, SP, #3\n"
+ "add x19, x16, %x[ld_in_row]\n"
+ "addvl x21, SP, #3\n"
".inst 0xc17a1589 // sdot za.s[x8, 1], { z12.h-z15.h }, z10.h\n"
- ".inst 0xa1402ae2 // ld1h { z2.h, z10.h }, pn10.b/Z, [x23]\n"
- "addvl x21, SP, #9\n"
- "subs x25, x25, #0x1\n"
+ ".inst 0xa1402ac2 // ld1h { z2.h, z10.h }, pn10.b/Z, [x22]\n"
+ "addvl x20, SP, #9\n"
+ "subs x24, x24, #0x1\n"
".inst 0xc172156a // sdot za.s[x8, 2], { z11.h-z14.h }, z2.h\n"
- "ld1sb { z11.s }, p0/Z, [x17]\n"
+ "ld1sb { z11.s }, p0/Z, [x16]\n"
"add z11.h, p0/M, z11.h, z9.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z21.s }, p0/Z, [x20]\n"
+ "ld1sb { z21.s }, p0/Z, [x19]\n"
"add z21.h, p0/M, z21.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
".inst 0xc17a158a // sdot za.s[x8, 2], { z12.h-z15.h }, z10.h\n"
- "ld1sb { z12.s }, p0/Z, [x20]\n"
+ "ld1sb { z12.s }, p0/Z, [x19]\n"
"add z12.h, p0/M, z12.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0xc17015a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
- "ld1h { z0.h }, p2/Z, [x24, #2, MUL VL]\n"
+ "ld1h { z0.h }, p2/Z, [x23, #2, MUL VL]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
"mov x12, #0x4\n"
- "ld1sb { z20.s }, p0/Z, [x20]\n"
+ "ld1sb { z20.s }, p0/Z, [x19]\n"
".inst 0xc17015a9 // sdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
"add z20.h, p0/M, z20.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
- "ld1h { z0.h }, p2/Z, [x23, #2, MUL VL]\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ "ld1h { z0.h }, p2/Z, [x22, #2, MUL VL]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
".inst 0xc17015aa // sdot za.s[x8, 2], { z13.h-z16.h }, z0.h\n"
"trn1 z11.h, z11.h, z21.h\n"
- "ld1sb { z13.s }, p0/Z, [x20]\n"
+ "ld1sb { z13.s }, p0/Z, [x19]\n"
"add z13.h, p0/M, z13.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z19.s }, p0/Z, [x20]\n"
+ "ld1sb { z19.s }, p0/Z, [x19]\n"
"add z19.h, p0/M, z19.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1sb { z14.s }, p0/Z, [x20]\n"
+ "ld1sb { z14.s }, p0/Z, [x19]\n"
"add z14.h, p0/M, z14.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1sb { z18.s }, p0/Z, [x20]\n"
+ "ld1sb { z18.s }, p0/Z, [x19]\n"
"mov x12, #0x8\n"
"add z18.h, p0/M, z18.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z15.s }, p0/Z, [x20]\n"
+ "ld1sb { z15.s }, p0/Z, [x19]\n"
"add z15.h, p0/M, z15.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z17.s }, p0/Z, [x20]\n"
+ "ld1sb { z17.s }, p0/Z, [x19]\n"
"add z17.h, p0/M, z17.h, z9.h\n"
"trn1 z12.h, z12.h, z20.h\n"
"trn1 z13.h, z13.h, z19.h\n"
"trn1 z14.h, z14.h, z18.h\n"
- ".inst 0xa1402ac2 // ld1h { z2.h, z10.h }, pn10.b/Z, [x22]\n"
- "add x20, x20, %x[ld_in_row]\n"
+ ".inst 0xa1402aa2 // ld1h { z2.h, z10.h }, pn10.b/Z, [x21]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0xc0060c04 // mova { z4.d-z7.d }, za.d[x8, #0]\n"
"add x8, x8, #0x1\n"
"trn1 z15.h, z15.h, z17.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
".inst 0xc1721568 // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
- "ld1sb { z16.s }, p0/Z, [x20]\n"
+ "ld1sb { z16.s }, p0/Z, [x19]\n"
"mov x12, #0x0\n"
"add z16.h, p0/M, z16.h, z9.h\n"
".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
- ".inst 0xa1402aa2 // ld1h { z2.h, z10.h }, pn10.b/Z, [x21]\n"
- "add x17, x17, %x[ld_in_col]\n"
+ ".inst 0xa1402a82 // ld1h { z2.h, z10.h }, pn10.b/Z, [x20]\n"
+ "add x16, x16, %x[ld_in_col]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
".inst 0xc1721569 // sdot za.s[x8, 1], { z11.h-z14.h }, z2.h\n"
- "ld1sb { z11.s }, p0/Z, [x17]\n"
+ "ld1sb { z11.s }, p0/Z, [x16]\n"
"add z11.h, p0/M, z11.h, z9.h\n"
- "add x20, x17, %x[ld_in_row]\n"
+ "add x19, x16, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z21.s }, p0/Z, [x20]\n"
+ "ld1sb { z21.s }, p0/Z, [x19]\n"
"add z21.h, p0/M, z21.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
".inst 0xc17a1589 // sdot za.s[x8, 1], { z12.h-z15.h }, z10.h\n"
- "ld1sb { z12.s }, p0/Z, [x20]\n"
+ "ld1sb { z12.s }, p0/Z, [x19]\n"
"mov z16.d, z16.d\n"
- "ld1h { z0.h }, p2/Z, [x22, #2, MUL VL]\n"
+ "ld1h { z0.h }, p2/Z, [x21, #2, MUL VL]\n"
"add z12.h, p0/M, z12.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1sb { z20.s }, p0/Z, [x20]\n"
+ "ld1sb { z20.s }, p0/Z, [x19]\n"
".inst 0xc17015a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
"mov x12, #0x4\n"
"add z20.h, p0/M, z20.h, z9.h\n"
- "ld1h { z0.h }, p2/Z, [x21, #2, MUL VL]\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
".inst 0xc17015a9 // sdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
- "ld1sb { z13.s }, p0/Z, [x20]\n"
+ "ld1sb { z13.s }, p0/Z, [x19]\n"
"add z13.h, p0/M, z13.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z19.s }, p0/Z, [x20]\n"
+ "ld1sb { z19.s }, p0/Z, [x19]\n"
"add z19.h, p0/M, z19.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1sb { z14.s }, p0/Z, [x20]\n"
+ "ld1sb { z14.s }, p0/Z, [x19]\n"
"add z14.h, p0/M, z14.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1sb { z18.s }, p0/Z, [x20]\n"
+ "ld1sb { z18.s }, p0/Z, [x19]\n"
"mov x12, #0x8\n"
"add z18.h, p0/M, z18.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z15.s }, p0/Z, [x20]\n"
+ "ld1sb { z15.s }, p0/Z, [x19]\n"
".inst 0xc1a3ac04 // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z3.s\n"
"add z15.h, p0/M, z15.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z17.s }, p0/Z, [x20]\n"
+ "ld1sb { z17.s }, p0/Z, [x19]\n"
".inst 0xc1a1aa24 // srshl { z4.s-z7.s }, { z4.s-z7.s }, z1.s\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0xc0040f84 // mova za.d[x8, #4], { z28.d-z31.d }\n"
"add z17.h, p0/M, z17.h, z9.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1sb { z16.s }, p0/Z, [x20]\n"
+ "ld1sb { z16.s }, p0/Z, [x19]\n"
"add z16.h, p0/M, z16.h, z9.h\n"
".inst 0xc1a8ab04 // add { z4.s-z7.s }, { z4.s-z7.s }, z8.s\n"
".inst 0xa1402be2 // ld1h { z2.h, z10.h }, pn10.b/Z, [SP]\n"
- "add x17, x17, %x[ld_in_col]\n"
+ "add x16, x16, %x[ld_in_col]\n"
".inst 0xc1b7cf44 // sclamp { z4.s-z7.s }, z26.s, z23.s\n"
- "st1b { z4.s }, p1, [x15]\n"
- "add x15, x15, x13\n"
- "ld1h { z0.h }, p2/Z, [SP, #2, MUL VL]\n"
- "st1b { z5.s }, p1, [x14]\n"
+ "st1b { z4.s }, p1, [x14]\n"
"add x14, x14, x11\n"
+ "ld1h { z0.h }, p2/Z, [SP, #2, MUL VL]\n"
+ "st1b { z5.s }, p1, [x13]\n"
+ "add x13, x13, x10\n"
"trn1 z11.h, z11.h, z21.h\n"
"trn1 z12.h, z12.h, z20.h\n"
- "st1b { z6.s }, p1, [x10]\n"
- "add x10, x10, x28\n"
+ "st1b { z6.s }, p1, [x9]\n"
+ "add x9, x9, x27\n"
"trn1 z13.h, z13.h, z19.h\n"
"trn1 z14.h, z14.h, z18.h\n"
- "st1b { z7.s }, p1, [x9]\n"
- "add x9, x9, x27\n"
+ "st1b { z7.s }, p1, [x28]\n"
+ "add x28, x28, x26\n"
"trn1 z15.h, z15.h, z17.h\n"
"mov z16.d, z16.d\n"
"bgt 20b\n"
"21:" // Main loop tail
".inst 0xc1721568 // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
- "addvl x24, SP, #6\n"
- "addvl x23, SP, #12\n"
+ "addvl x23, SP, #6\n"
+ "addvl x22, SP, #12\n"
".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
- ".inst 0xa1402b02 // ld1h { z2.h, z10.h }, pn10.b/Z, [x24]\n"
+ ".inst 0xa1402ae2 // ld1h { z2.h, z10.h }, pn10.b/Z, [x23]\n"
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
".inst 0xc1721569 // sdot za.s[x8, 1], { z11.h-z14.h }, z2.h\n"
- "add x22, x17, %x[ld_in_row]\n"
- "addvl x21, SP, #3\n"
+ "add x21, x16, %x[ld_in_row]\n"
+ "addvl x20, SP, #3\n"
".inst 0xc17a1589 // sdot za.s[x8, 1], { z12.h-z15.h }, z10.h\n"
- ".inst 0xa1402ae2 // ld1h { z2.h, z10.h }, pn10.b/Z, [x23]\n"
- "addvl x20, SP, #9\n"
+ ".inst 0xa1402ac2 // ld1h { z2.h, z10.h }, pn10.b/Z, [x22]\n"
+ "addvl x19, SP, #9\n"
".inst 0xc172156a // sdot za.s[x8, 2], { z11.h-z14.h }, z2.h\n"
- "ld1sb { z11.s }, p0/Z, [x17]\n"
+ "ld1sb { z11.s }, p0/Z, [x16]\n"
"add z11.h, p0/M, z11.h, z9.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z21.s }, p0/Z, [x22]\n"
+ "ld1sb { z21.s }, p0/Z, [x21]\n"
"add z21.h, p0/M, z21.h, z9.h\n"
- "add x22, x22, %x[ld_in_row]\n"
+ "add x21, x21, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
".inst 0xc17a158a // sdot za.s[x8, 2], { z12.h-z15.h }, z10.h\n"
- "ld1sb { z12.s }, p0/Z, [x22]\n"
+ "ld1sb { z12.s }, p0/Z, [x21]\n"
"add z12.h, p0/M, z12.h, z9.h\n"
- "add x22, x22, %x[ld_in_row]\n"
+ "add x21, x21, %x[ld_in_row]\n"
".inst 0xc17015a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
- "ld1h { z0.h }, p2/Z, [x24, #2, MUL VL]\n"
+ "ld1h { z0.h }, p2/Z, [x23, #2, MUL VL]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
"mov x12, #0x4\n"
- "ld1sb { z20.s }, p0/Z, [x22]\n"
+ "ld1sb { z20.s }, p0/Z, [x21]\n"
".inst 0xc17015a9 // sdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
"add z20.h, p0/M, z20.h, z9.h\n"
- "add x22, x22, %x[ld_in_row]\n"
- "ld1h { z0.h }, p2/Z, [x23, #2, MUL VL]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "ld1h { z0.h }, p2/Z, [x22, #2, MUL VL]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
".inst 0xc17015aa // sdot za.s[x8, 2], { z13.h-z16.h }, z0.h\n"
"trn1 z11.h, z11.h, z21.h\n"
- "ld1sb { z13.s }, p0/Z, [x22]\n"
+ "ld1sb { z13.s }, p0/Z, [x21]\n"
"add z13.h, p0/M, z13.h, z9.h\n"
- "add x22, x22, %x[ld_in_row]\n"
+ "add x21, x21, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z19.s }, p0/Z, [x22]\n"
+ "ld1sb { z19.s }, p0/Z, [x21]\n"
"add z19.h, p0/M, z19.h, z9.h\n"
- "add x22, x22, %x[ld_in_row]\n"
+ "add x21, x21, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1sb { z14.s }, p0/Z, [x22]\n"
+ "ld1sb { z14.s }, p0/Z, [x21]\n"
"add z14.h, p0/M, z14.h, z9.h\n"
- "add x22, x22, %x[ld_in_row]\n"
+ "add x21, x21, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1sb { z18.s }, p0/Z, [x22]\n"
+ "ld1sb { z18.s }, p0/Z, [x21]\n"
"mov x12, #0x8\n"
"add z18.h, p0/M, z18.h, z9.h\n"
- "add x22, x22, %x[ld_in_row]\n"
+ "add x21, x21, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z15.s }, p0/Z, [x22]\n"
+ "ld1sb { z15.s }, p0/Z, [x21]\n"
"add z15.h, p0/M, z15.h, z9.h\n"
- "add x22, x22, %x[ld_in_row]\n"
+ "add x21, x21, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z17.s }, p0/Z, [x22]\n"
+ "ld1sb { z17.s }, p0/Z, [x21]\n"
"add z17.h, p0/M, z17.h, z9.h\n"
"trn1 z12.h, z12.h, z20.h\n"
"trn1 z13.h, z13.h, z19.h\n"
"trn1 z14.h, z14.h, z18.h\n"
- ".inst 0xa1402aa2 // ld1h { z2.h, z10.h }, pn10.b/Z, [x21]\n"
- "add x22, x22, %x[ld_in_row]\n"
+ ".inst 0xa1402a82 // ld1h { z2.h, z10.h }, pn10.b/Z, [x20]\n"
+ "add x21, x21, %x[ld_in_row]\n"
".inst 0xc0060c04 // mova { z4.d-z7.d }, za.d[x8, #0]\n"
"add x8, x8, #0x1\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
"trn1 z15.h, z15.h, z17.h\n"
".inst 0xc1721568 // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
- "ld1sb { z16.s }, p0/Z, [x22]\n"
+ "ld1sb { z16.s }, p0/Z, [x21]\n"
".inst 0xc1a3ac04 // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z3.s\n"
"add z16.h, p0/M, z16.h, z9.h\n"
".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
- ".inst 0xa1402a82 // ld1h { z2.h, z10.h }, pn10.b/Z, [x20]\n"
- "add x17, x17, %x[ld_in_col]\n"
+ ".inst 0xa1402a62 // ld1h { z2.h, z10.h }, pn10.b/Z, [x19]\n"
+ "add x16, x16, %x[ld_in_col]\n"
".inst 0xc1a1aa24 // srshl { z4.s-z7.s }, { z4.s-z7.s }, z1.s\n"
".inst 0xc1721569 // sdot za.s[x8, 1], { z11.h-z14.h }, z2.h\n"
"mov z16.d, z16.d\n"
- "ld1h { z0.h }, p2/Z, [x21, #2, MUL VL]\n"
+ "ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
".inst 0xc17a1589 // sdot za.s[x8, 1], { z12.h-z15.h }, z10.h\n"
".inst 0xc1a8ab04 // add { z4.s-z7.s }, { z4.s-z7.s }, z8.s\n"
".inst 0xc17015a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
- "ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
+ "ld1h { z0.h }, p2/Z, [x19, #2, MUL VL]\n"
".inst 0xc1b7cf44 // sclamp { z4.s-z7.s }, z26.s, z23.s\n"
- "st1b { z4.s }, p1, [x15]\n"
- "add x15, x15, x13\n"
- "st1b { z5.s }, p1, [x14]\n"
+ "st1b { z4.s }, p1, [x14]\n"
"add x14, x14, x11\n"
+ "st1b { z5.s }, p1, [x13]\n"
+ "add x13, x13, x10\n"
".inst 0xc0040f84 // mova za.d[x8, #4], { z28.d-z31.d }\n"
".inst 0xa1402be2 // ld1h { z2.h, z10.h }, pn10.b/Z, [SP]\n"
- "st1b { z6.s }, p1, [x10]\n"
- "add x10, x10, x28\n"
+ "st1b { z6.s }, p1, [x9]\n"
+ "add x9, x9, x27\n"
".inst 0xc17015a9 // sdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
"ld1h { z0.h }, p2/Z, [SP, #2, MUL VL]\n"
- "st1b { z7.s }, p1, [x9]\n"
- "add x9, x9, x27\n"
+ "st1b { z7.s }, p1, [x28]\n"
+ "add x28, x28, x26\n"
"22:" // Main loop skip tail
- "cbz x7, 23f\n" // Skip remainder inputs
+ "cbz x17, 23f\n" // Skip remainder inputs
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z11.s }, p0/Z, [x17]\n"
+ "ld1sb { z11.s }, p0/Z, [x16]\n"
"add z11.h, p0/M, z11.h, z9.h\n"
- "add x20, x17, %x[ld_in_row]\n"
+ "add x19, x16, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z21.s }, p0/Z, [x20]\n"
+ "ld1sb { z21.s }, p0/Z, [x19]\n"
"add z21.h, p0/M, z21.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1sb { z12.s }, p0/Z, [x20]\n"
+ "ld1sb { z12.s }, p0/Z, [x19]\n"
"add z12.h, p0/M, z12.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1sb { z20.s }, p0/Z, [x20]\n"
+ "ld1sb { z20.s }, p0/Z, [x19]\n"
"add z20.h, p0/M, z20.h, z9.h\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"trn1 z11.h, z11.h, z21.h\n"
"trn1 z12.h, z12.h, z20.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z13.s }, p0/Z, [x20]\n"
+ "ld1sb { z13.s }, p0/Z, [x19]\n"
"add z13.h, p0/M, z13.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z19.s }, p0/Z, [x20]\n"
+ "ld1sb { z19.s }, p0/Z, [x19]\n"
"add z19.h, p0/M, z19.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1sb { z14.s }, p0/Z, [x20]\n"
+ "ld1sb { z14.s }, p0/Z, [x19]\n"
"add z14.h, p0/M, z14.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1sb { z18.s }, p0/Z, [x20]\n"
+ "ld1sb { z18.s }, p0/Z, [x19]\n"
"mov x12, #0x8\n"
"add z18.h, p0/M, z18.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z15.s }, p0/Z, [x20]\n"
+ "ld1sb { z15.s }, p0/Z, [x19]\n"
"add z15.h, p0/M, z15.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z17.s }, p0/Z, [x20]\n"
+ "ld1sb { z17.s }, p0/Z, [x19]\n"
"add z17.h, p0/M, z17.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
"trn1 z13.h, z13.h, z19.h\n"
"trn1 z14.h, z14.h, z18.h\n"
- "ld1sb { z16.s }, p0/Z, [x20]\n"
+ "ld1sb { z16.s }, p0/Z, [x19]\n"
"add z16.h, p0/M, z16.h, z9.h\n"
"trn1 z15.h, z15.h, z17.h\n"
- "addvl x21, SP, #6\n"
+ "addvl x20, SP, #6\n"
".inst 0xc1721568 // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
"mov z16.d, z16.d\n"
- "addvl x20, SP, #12\n"
- "sub x16, x16, #0x1\n"
+ "addvl x19, SP, #12\n"
+ "sub x15, x15, #0x1\n"
".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
- ".inst 0xa1402aa2 // ld1h { z2.h, z10.h }, pn10.b/Z, [x21]\n"
+ ".inst 0xa1402a82 // ld1h { z2.h, z10.h }, pn10.b/Z, [x20]\n"
".inst 0xc17015a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
- "ld1h { z0.h }, p2/Z, [x21, #2, MUL VL]\n"
+ "ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
".inst 0xc1721569 // sdot za.s[x8, 1], { z11.h-z14.h }, z2.h\n"
".inst 0xc0060c04 // mova { z4.d-z7.d }, za.d[x8, #0]\n"
".inst 0xc1a3ac04 // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z3.s\n"
".inst 0xc17a1589 // sdot za.s[x8, 1], { z12.h-z15.h }, z10.h\n"
- ".inst 0xa1402a82 // ld1h { z2.h, z10.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xa1402a62 // ld1h { z2.h, z10.h }, pn10.b/Z, [x19]\n"
".inst 0xc1a1aa24 // srshl { z4.s-z7.s }, { z4.s-z7.s }, z1.s\n"
".inst 0xc172156a // sdot za.s[x8, 2], { z11.h-z14.h }, z2.h\n"
".inst 0xc1a8ab04 // add { z4.s-z7.s }, { z4.s-z7.s }, z8.s\n"
".inst 0xc17a158a // sdot za.s[x8, 2], { z12.h-z15.h }, z10.h\n"
".inst 0xc1b7cf44 // sclamp { z4.s-z7.s }, z26.s, z23.s\n"
- "st1b { z4.s }, p1, [x15]\n"
- "add x15, x15, x13\n"
- ".inst 0xc17015a9 // sdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
- "ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
- "st1b { z5.s }, p1, [x14]\n"
+ "st1b { z4.s }, p1, [x14]\n"
"add x14, x14, x11\n"
+ ".inst 0xc17015a9 // sdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
+ "ld1h { z0.h }, p2/Z, [x19, #2, MUL VL]\n"
+ "st1b { z5.s }, p1, [x13]\n"
+ "add x13, x13, x10\n"
".inst 0xc17015aa // sdot za.s[x8, 2], { z13.h-z16.h }, z0.h\n"
"add x8, x8, #0x1\n"
- "st1b { z6.s }, p1, [x10]\n"
- "add x10, x10, x28\n"
- "st1b { z7.s }, p1, [x9]\n"
+ "st1b { z6.s }, p1, [x9]\n"
"add x9, x9, x27\n"
+ "st1b { z7.s }, p1, [x28]\n"
+ "add x28, x28, x26\n"
".inst 0xc0040f84 // mova za.d[x8, #4], { z28.d-z31.d }\n"
"23:" // Tail input: End
- "cbz x16, 25f\n"
+ "cbz x15, 25f\n"
"24:" // Right padding loop
".inst 0xc0060c04 // mova { z4.d-z7.d }, za.d[x8, #0]\n"
".inst 0xc1a3ac04 // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z3.s\n"
"add x8, x8, #0x1\n"
".inst 0xc1a1aa24 // srshl { z4.s-z7.s }, { z4.s-z7.s }, z1.s\n"
- "subs x16, x16, #0x1\n"
+ "subs x15, x15, #0x1\n"
".inst 0xc0040f84 // mova za.d[x8, #4], { z28.d-z31.d }\n"
".inst 0xc1a8ab04 // add { z4.s-z7.s }, { z4.s-z7.s }, z8.s\n"
".inst 0xc1b7cf44 // sclamp { z4.s-z7.s }, z26.s, z23.s\n"
- "st1b { z4.s }, p1, [x15]\n"
- "add x15, x15, x13\n"
- "st1b { z5.s }, p1, [x14]\n"
+ "st1b { z4.s }, p1, [x14]\n"
"add x14, x14, x11\n"
- "st1b { z6.s }, p1, [x10]\n"
- "add x10, x10, x28\n"
- "st1b { z7.s }, p1, [x9]\n"
+ "st1b { z5.s }, p1, [x13]\n"
+ "add x13, x13, x10\n"
+ "st1b { z6.s }, p1, [x9]\n"
"add x9, x9, x27\n"
+ "st1b { z7.s }, p1, [x28]\n"
+ "add x28, x28, x26\n"
"bgt 24b\n"
"25:" // End
- "ldr x22, [%x[args], %[offsetof_Args_weights]]\n"
- "incw x22, ALL, MUL #16\n"
- "incw x22, ALL, MUL #9\n"
- "str x22, [%x[args], %[offsetof_Args_weights]]\n"
- "ldr x20, [%x[args], %[offsetof_Args_ld_in_vl]]\n"
- "incw x6\n"
- "whilelt p1.s, x6, x5\n"
- "ldr x17, [%x[args], %[offsetof_Args_inptr]]\n"
- "add x17, x17, x20\n"
- "str x17, [%x[args], %[offsetof_Args_inptr]]\n"
- "ldr x25, [%x[args], %[offsetof_Args_outptrs]]\n"
- "ldr x24, [%x[args], %[offsetof_Args_ld_out_vls]]\n"
- "ldp x23, x22, [x25, #0x0]\n"
- "ldp x21, x20, [x24, #0x0]\n"
- "add x23, x23, x21\n"
+ "ldr x21, [%x[args], %[offsetof_Args_weights]]\n"
+ "incw x21, ALL, MUL #16\n"
+ "incw x21, ALL, MUL #9\n"
+ "str x21, [%x[args], %[offsetof_Args_weights]]\n"
+ "ldr x19, [%x[args], %[offsetof_Args_ld_in_vl]]\n"
+ "incw x7\n"
+ "whilelt p1.s, x7, x6\n"
+ "ldr x16, [%x[args], %[offsetof_Args_inptr]]\n"
+ "add x16, x16, x19\n"
+ "str x16, [%x[args], %[offsetof_Args_inptr]]\n"
+ "ldr x24, [%x[args], %[offsetof_Args_outptrs]]\n"
+ "ldr x23, [%x[args], %[offsetof_Args_ld_out_vls]]\n"
+ "ldp x22, x21, [x24, #0x0]\n"
+ "ldp x20, x19, [x23, #0x0]\n"
"add x22, x22, x20\n"
- "stp x23, x22, [x25, #0x0]\n"
- "ldp x23, x22, [x25, #0x10]\n"
- "ldp x21, x20, [x24, #0x10]\n"
- "add x23, x23, x21\n"
+ "add x21, x21, x19\n"
+ "stp x22, x21, [x24, #0x0]\n"
+ "ldp x22, x21, [x24, #0x10]\n"
+ "ldp x20, x19, [x23, #0x10]\n"
"add x22, x22, x20\n"
- "stp x23, x22, [x25, #0x10]\n"
+ "add x21, x21, x19\n"
+ "stp x22, x21, [x24, #0x10]\n"
"b.any 1b\n"
"addvl SP, SP, #15\n"
".inst 0xd503467f // SMSTOP\n"
:
: [args] "r" (&args), [ld_in_col] "r" (ld_in_col), [ld_in_row] "r" (ld_in_row), [offsetof_Args_current_channel] "I" (offsetof(Args, current_channel)), [offsetof_Args_inptr] "I" (offsetof(Args, inptr)), [offsetof_Args_input_cols] "I" (offsetof(Args, input_cols)), [offsetof_Args_ld_in_vl] "I" (offsetof(Args, ld_in_vl)), [offsetof_Args_ld_out_cols] "I" (offsetof(Args, ld_out_cols)), [offsetof_Args_ld_out_vls] "I" (offsetof(Args, ld_out_vls)), [offsetof_Args_n_channels] "I" (offsetof(Args, n_channels)), [offsetof_Args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_Args_output_cols] "I" (offsetof(Args, output_cols)), [offsetof_Args_pad_bottom] "I" (offsetof(Args, pad_bottom)), [offsetof_Args_pad_left] "I" (offsetof(Args, pad_left)), [offsetof_Args_pad_top] "I" (offsetof(Args, pad_top)), [offsetof_Args_weights] "I" (offsetof(Args, weights)), [offsetof_Requantize32_a_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_bias] "I" (offsetof(arm_gemm::Requantize32, bias)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [offsetof_Requantize32_per_channel_muls] "I" (offsetof(arm_gemm::Requantize32, per_channel_muls)), [offsetof_Requantize32_per_channel_right_shifts] "I" (offsetof(arm_gemm::Requantize32, per_channel_right_shifts)), [offsetof_Requantize32_per_layer_mul] "I" (offsetof(arm_gemm::Requantize32, per_layer_mul)), [offsetof_Requantize32_per_layer_right_shift] "I" (offsetof(arm_gemm::Requantize32, per_layer_right_shift)), [qp] "r" (&qp)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8q_planar_3x3_s1_4rows_dot_za/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8q_planar_3x3_s1_4rows_dot_za/generic.cpp
index a7ef556840..d59879b206 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8q_planar_3x3_s1_4rows_dot_za/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8q_planar_3x3_s1_4rows_dot_za/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -69,18 +69,18 @@ void sme2_u8q_planar_3x3_s1_4rows_dot_za_impl(
__asm__ __volatile__(
".inst 0xd503477f // SMSTART ZA\n"
- "ldr x6, [%x[args], %[offsetof_Args_pad_bottom]]\n"
+ "ldr x7, [%x[args], %[offsetof_Args_pad_bottom]]\n"
"ptrue p2.b\n"
- "mov x20, #0x6\n"
- "ldr x7, [%x[args], %[offsetof_Args_pad_top]]\n"
+ "mov x19, #0x6\n"
+ "ldr x8, [%x[args], %[offsetof_Args_pad_top]]\n"
"ld1rh { z24.h }, p2/Z, [%x[qp], %[offsetof_Requantize32_a_offset]]\n"
- "sub x20, x20, x6\n"
+ "sub x19, x19, x7\n"
".inst 0x25207812 // ptrue pn10.b\n"
"ldr x17, [%x[args], %[offsetof_Args_n_channels]]\n"
"whilelt p1.s, XZR, x17\n"
- "whilelt p9.s, XZR, x20\n"
+ "whilelt p9.s, XZR, x19\n"
"ld1rw { z12.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_c_offset]]\n"
- "whilelt p8.s, XZR, x7\n"
+ "whilelt p8.s, XZR, x8\n"
"addvl SP, SP, #-12\n"
"ldr x16, [%x[args], %[offsetof_Args_current_channel]]\n"
"neg z24.h, p2/M, z24.h\n"
@@ -90,377 +90,377 @@ void sme2_u8q_planar_3x3_s1_4rows_dot_za_impl(
"ld1rw { z22.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_minval]]\n"
"ld1rw { z26.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_maxval]]\n"
"1:" // Channel loop
- "ldr x20, [%x[qp], %[offsetof_Requantize32_bias]]\n"
+ "ldr x19, [%x[qp], %[offsetof_Requantize32_bias]]\n"
"mov z8.s, #0x0\n"
- "cbz x20, 2f\n"
- "ld1w { z8.s }, p1/Z, [x20, x16, LSL #2]\n"
+ "cbz x19, 2f\n"
+ "ld1w { z8.s }, p1/Z, [x19, x16, LSL #2]\n"
"2:" // Load bias: Done
- "ldr x22, [%x[args], %[offsetof_Args_weights]]\n"
- "mov x20, x22\n"
- "ld1b { z27.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #3\n"
+ "ldr x21, [%x[args], %[offsetof_Args_weights]]\n"
+ "mov x19, x21\n"
+ "ld1b { z27.s }, p2/Z, [x19]\n"
+ "incw x19, ALL, MUL #3\n"
"ld1rh { z21.h }, p2/Z, [%x[qp], %[offsetof_Requantize32_b_offset]]\n"
"mov z20.h, #0x0\n"
"sub z27.h, z27.h, z21.h\n"
- "incw x22\n"
- "ld1b { z23.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #3\n"
+ "incw x21\n"
+ "ld1b { z23.s }, p2/Z, [x19]\n"
+ "incw x19, ALL, MUL #3\n"
"sub z23.h, z23.h, z21.h\n"
"trn1 z0.h, z20.h, z27.h\n"
- "ld1b { z16.s }, p2/Z, [x20]\n"
+ "ld1b { z16.s }, p2/Z, [x19]\n"
"sub z16.h, z16.h, z21.h\n"
- "mov x20, x22\n"
+ "mov x19, x21\n"
"trn1 z1.h, z27.h, z23.h\n"
- "ld1b { z27.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #3\n"
+ "ld1b { z27.s }, p2/Z, [x19]\n"
+ "incw x19, ALL, MUL #3\n"
"trn1 z2.h, z23.h, z16.h\n"
"trn1 z3.h, z16.h, z20.h\n"
- "ld1b { z23.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #3\n"
+ "ld1b { z23.s }, p2/Z, [x19]\n"
+ "incw x19, ALL, MUL #3\n"
"sub z27.h, z27.h, z21.h\n"
"sub z23.h, z23.h, z21.h\n"
- "ld1b { z16.s }, p2/Z, [x20]\n"
+ "ld1b { z16.s }, p2/Z, [x19]\n"
"sub z16.h, z16.h, z21.h\n"
- "addvl x21, SP, #12\n"
- "incw x22\n"
- "addvl x21, x21, #-4\n"
- "mov x20, x22\n"
- "st1h { z0.h }, p2, [x21]\n"
+ "addvl x20, SP, #12\n"
+ "incw x21\n"
+ "addvl x20, x20, #-4\n"
+ "mov x19, x21\n"
+ "st1h { z0.h }, p2, [x20]\n"
"trn1 z0.h, z20.h, z27.h\n"
- "st1h { z1.h }, p2, [x21, #1, MUL VL]\n"
+ "st1h { z1.h }, p2, [x20, #1, MUL VL]\n"
"trn1 z1.h, z27.h, z23.h\n"
- "ld1b { z27.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #3\n"
- "st1h { z2.h }, p2, [x21, #2, MUL VL]\n"
+ "ld1b { z27.s }, p2/Z, [x19]\n"
+ "incw x19, ALL, MUL #3\n"
+ "st1h { z2.h }, p2, [x20, #2, MUL VL]\n"
"trn1 z2.h, z23.h, z16.h\n"
- "ld1b { z23.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #3\n"
- "st1h { z3.h }, p2, [x21, #3, MUL VL]\n"
+ "ld1b { z23.s }, p2/Z, [x19]\n"
+ "incw x19, ALL, MUL #3\n"
+ "st1h { z3.h }, p2, [x20, #3, MUL VL]\n"
"trn1 z3.h, z16.h, z20.h\n"
- "ld1b { z16.s }, p2/Z, [x20]\n"
- "ldr x20, [%x[qp], %[offsetof_Requantize32_per_channel_muls]]\n"
+ "ld1b { z16.s }, p2/Z, [x19]\n"
+ "ldr x19, [%x[qp], %[offsetof_Requantize32_per_channel_muls]]\n"
"sub z27.h, z27.h, z21.h\n"
"sub z23.h, z23.h, z21.h\n"
- "addvl x21, x21, #-4\n"
- "st1h { z0.h }, p2, [x21]\n"
+ "addvl x20, x20, #-4\n"
+ "st1h { z0.h }, p2, [x20]\n"
"sub z16.h, z16.h, z21.h\n"
- "st1h { z1.h }, p2, [x21, #1, MUL VL]\n"
+ "st1h { z1.h }, p2, [x20, #1, MUL VL]\n"
"mov z9.d, z8.d\n"
- "st1h { z2.h }, p2, [x21, #2, MUL VL]\n"
+ "st1h { z2.h }, p2, [x20, #2, MUL VL]\n"
"trn1 z0.h, z20.h, z27.h\n"
"trn1 z1.h, z27.h, z23.h\n"
- "st1h { z3.h }, p2, [x21, #3, MUL VL]\n"
- "addvl x21, x21, #-4\n"
+ "st1h { z3.h }, p2, [x20, #3, MUL VL]\n"
+ "addvl x20, x20, #-4\n"
"trn1 z2.h, z23.h, z16.h\n"
"trn1 z3.h, z16.h, z20.h\n"
- "st1h { z0.h }, p2, [x21]\n"
- "st1h { z1.h }, p2, [x21, #1, MUL VL]\n"
- "st1h { z2.h }, p2, [x21, #2, MUL VL]\n"
- "st1h { z3.h }, p2, [x21, #3, MUL VL]\n"
- "cbz x20, 3f\n"
- "ld1w { z10.s }, p1/Z, [x20, x16, LSL #2]\n"
+ "st1h { z0.h }, p2, [x20]\n"
+ "st1h { z1.h }, p2, [x20, #1, MUL VL]\n"
+ "st1h { z2.h }, p2, [x20, #2, MUL VL]\n"
+ "st1h { z3.h }, p2, [x20, #3, MUL VL]\n"
+ "cbz x19, 3f\n"
+ "ld1w { z10.s }, p1/Z, [x19, x16, LSL #2]\n"
"3:" // Load mul: End
- "ldr x20, [%x[qp], %[offsetof_Requantize32_per_channel_right_shifts]]\n"
- "cbz x20, 4f\n"
- "ld1w { z11.s }, p1/Z, [x20, x16, LSL #2]\n"
+ "ldr x19, [%x[qp], %[offsetof_Requantize32_per_channel_right_shifts]]\n"
+ "cbz x19, 4f\n"
+ "ld1w { z11.s }, p1/Z, [x19, x16, LSL #2]\n"
"4:" // Load right_shift: End
"ldr x15, [%x[args], %[offsetof_Args_input_cols]]\n"
- "sub x20, x15, #0x1\n"
- "orr x23, x20, %x[ld_in_col], LSL #16\n"
+ "sub x19, x15, #0x1\n"
+ "orr x22, x19, %x[ld_in_col], LSL #16\n"
"ldr x14, [%x[args], %[offsetof_Args_inptr]]\n"
- "orr x23, x17, x23, LSL #22\n"
- "mov x22, #0x6\n"
- "add x21, x7, x6\n"
- "lsl x20, %x[ld_in_row], #0x0\n"
+ "orr x22, x17, x22, LSL #22\n"
+ "mov x21, #0x6\n"
+ "add x20, x8, x7\n"
+ "lsl x19, %x[ld_in_row], #0x0\n"
"ldr x13, [%x[args], %[offsetof_Args_output_cols]]\n"
- "mov x8, #0x0\n"
- "lsl x23, x23, #0x0\n"
- "sub x22, x22, x21\n"
- "madd x20, x20, x7, x14\n"
+ "mov x11, #0x0\n"
+ "lsl x22, x22, #0x0\n"
+ "sub x21, x21, x20\n"
+ "madd x19, x19, x8, x14\n"
"5:" // Issue prefetches
- "subs x22, x22, #0x1\n"
- ".inst 0xf8b74a9c // rprfm pldstrm, x23, [x20]\n"
- "add x20, x20, %x[ld_in_col]\n"
+ "subs x21, x21, #0x1\n"
+ ".inst 0xf8b64a7c // rprfm pldstrm, x22, [x19]\n"
+ "add x19, x19, %x[ld_in_col]\n"
"bgt 5b\n"
- "ldr x25, [%x[args], %[offsetof_Args_outptrs]]\n"
- "lsl x20, %x[ld_in_row], #0x0\n"
- "msub x14, x7, x20, x14\n"
- ".inst 0xc0040900 // mova za.d[x8, #0], { z8.d-z9.d }\n"
- "ldr x20, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
- ".inst 0xc0040901 // mova za.d[x8, #1], { z8.d-z9.d }\n"
- "mov x22, #0x2\n"
- "ldp x11, x10, [x25], #0x10\n"
- ".inst 0xc0040902 // mova za.d[x8, #2], { z8.d-z9.d }\n"
- "ldp x9, x28, [x20], #0x10\n"
- ".inst 0xc0040903 // mova za.d[x8, #3], { z8.d-z9.d }\n"
- "ldr x21, [%x[args], %[offsetof_Args_pad_left]]\n"
- ".inst 0xc0040904 // mova za.d[x8, #4], { z8.d-z9.d }\n"
- "ldp x27, x26, [x25], #0x10\n"
- ".inst 0xc0040905 // mova za.d[x8, #5], { z8.d-z9.d }\n"
- "ldp x25, x24, [x20], #0x10\n"
- "cbz x21, 7f\n"
- "cmp x21, x22\n"
- "csel x20, x21, x22, LT\n"
- "sub x21, x21, x20\n"
- "sub x22, x22, x20\n"
- "cbz x21, 7f\n"
- ".inst 0xc0060804 // mova { z4.d-z5.d }, za.d[x8, #0]\n"
- "sub x13, x13, x21\n"
- ".inst 0xc0060826 // mova { z6.d-z7.d }, za.d[x8, #1]\n"
+ "ldr x24, [%x[args], %[offsetof_Args_outptrs]]\n"
+ "lsl x19, %x[ld_in_row], #0x0\n"
+ "msub x14, x8, x19, x14\n"
+ ".inst 0xc0046900 // mova za.d[x11, #0], { z8.d-z9.d }\n"
+ "ldr x19, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
+ ".inst 0xc0046901 // mova za.d[x11, #1], { z8.d-z9.d }\n"
+ "mov x21, #0x2\n"
+ "ldp x10, x9, [x24], #0x10\n"
+ ".inst 0xc0046902 // mova za.d[x11, #2], { z8.d-z9.d }\n"
+ "ldp x28, x27, [x19], #0x10\n"
+ ".inst 0xc0046903 // mova za.d[x11, #3], { z8.d-z9.d }\n"
+ "ldr x20, [%x[args], %[offsetof_Args_pad_left]]\n"
+ ".inst 0xc0046904 // mova za.d[x11, #4], { z8.d-z9.d }\n"
+ "ldp x26, x25, [x24], #0x10\n"
+ ".inst 0xc0046905 // mova za.d[x11, #5], { z8.d-z9.d }\n"
+ "ldp x24, x23, [x19], #0x10\n"
+ "cbz x20, 7f\n"
+ "cmp x20, x21\n"
+ "csel x19, x20, x21, LT\n"
+ "sub x20, x20, x19\n"
+ "sub x21, x21, x19\n"
+ "cbz x20, 7f\n"
+ ".inst 0xc0066804 // mova { z4.d-z5.d }, za.d[x11, #0]\n"
+ "sub x13, x13, x20\n"
+ ".inst 0xc0066826 // mova { z6.d-z7.d }, za.d[x11, #1]\n"
".inst 0xc1aaac04 // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z10.s\n"
".inst 0xc1abaa24 // srshl { z4.s-z7.s }, { z4.s-z7.s }, z11.s\n"
".inst 0xc1acab04 // add { z4.s-z7.s }, { z4.s-z7.s }, z12.s\n"
".inst 0xc1bacec4 // sclamp { z4.s-z7.s }, z22.s, z26.s\n"
"6:" // Left padding
- "subs x21, x21, #0x1\n"
- "st1b { z4.s }, p1, [x11]\n"
- "add x11, x11, x9\n"
- "st1b { z6.s }, p1, [x10]\n"
+ "subs x20, x20, #0x1\n"
+ "st1b { z4.s }, p1, [x10]\n"
"add x10, x10, x28\n"
- "st1b { z5.s }, p1, [x27]\n"
- "add x27, x27, x25\n"
- "st1b { z7.s }, p1, [x26]\n"
+ "st1b { z6.s }, p1, [x9]\n"
+ "add x9, x9, x27\n"
+ "st1b { z5.s }, p1, [x26]\n"
"add x26, x26, x24\n"
+ "st1b { z7.s }, p1, [x25]\n"
+ "add x25, x25, x23\n"
"bgt 6b\n"
"7:" // Left padding: End
- "adds XZR, x7, x6\n"
+ "adds XZR, x8, x7\n"
"bne 12f\n"
- "cbz x22, 10f\n"
- "cmp x22, #0x1\n"
- "sub x15, x15, x22\n"
+ "cbz x21, 10f\n"
+ "cmp x21, #0x1\n"
+ "sub x15, x15, x21\n"
"beq 9f\n"
"8:" // Unpadded: 2 priming loads
- "add x21, x14, %x[ld_in_row]\n"
+ "add x20, x14, %x[ld_in_row]\n"
"ld1b { z17.s }, p1/Z, [x14]\n"
- "addvl x20, SP, #8\n"
- "ld1b { z16.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "addvl x19, SP, #8\n"
+ "ld1b { z16.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
"trn1 z13.h, z17.h, z16.h\n"
"add z13.h, z13.h, z24.h\n"
- "ld1b { z17.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "ld1b { z17.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
"add x14, x14, %x[ld_in_col]\n"
- "ld1b { z16.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "ld1b { z16.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
"trn1 z14.h, z17.h, z16.h\n"
"add z14.h, z14.h, z24.h\n"
- "ld1b { z17.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
- "ld1b { z16.s }, p1/Z, [x21]\n"
+ "ld1b { z17.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z16.s }, p1/Z, [x20]\n"
"trn1 z15.h, z17.h, z16.h\n"
"add z15.h, z15.h, z24.h\n"
- ".inst 0xa0402a80 // ld1h { z0.h-z1.h }, pn10.b/Z, [x20]\n"
- ".inst 0xc16115a8 // sdot za.s[x8, 0], { z13.h-z14.h }, z1.h\n"
- ".inst 0xc16015a9 // sdot za.s[x8, 1], { z13.h-z14.h }, z0.h\n"
- ".inst 0xa0412a82 // ld1h { z2.h-z3.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
- ".inst 0xc16315c8 // sdot za.s[x8, 0], { z14.h-z15.h }, z3.h\n"
- ".inst 0xc16215c9 // sdot za.s[x8, 1], { z14.h-z15.h }, z2.h\n"
+ ".inst 0xa0402a60 // ld1h { z0.h-z1.h }, pn10.b/Z, [x19]\n"
+ ".inst 0xc16175a8 // sdot za.s[x11, 0], { z13.h-z14.h }, z1.h\n"
+ ".inst 0xc16075a9 // sdot za.s[x11, 1], { z13.h-z14.h }, z0.h\n"
+ ".inst 0xa0412a62 // ld1h { z2.h-z3.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
+ ".inst 0xc16375c8 // sdot za.s[x11, 0], { z14.h-z15.h }, z3.h\n"
+ ".inst 0xc16275c9 // sdot za.s[x11, 1], { z14.h-z15.h }, z2.h\n"
"9:" // Unpadded: 1 priming loads
- "add x22, x14, %x[ld_in_row]\n"
+ "add x21, x14, %x[ld_in_row]\n"
"ld1b { z17.s }, p1/Z, [x14]\n"
- "addvl x21, SP, #4\n"
- "ld1b { z16.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row]\n"
+ "addvl x20, SP, #4\n"
+ "ld1b { z16.s }, p1/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
"trn1 z13.h, z17.h, z16.h\n"
"add z13.h, z13.h, z24.h\n"
- "ld1b { z17.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row]\n"
- "addvl x20, SP, #8\n"
- "ld1b { z16.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row]\n"
+ "ld1b { z17.s }, p1/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "addvl x19, SP, #8\n"
+ "ld1b { z16.s }, p1/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
"trn1 z14.h, z17.h, z16.h\n"
"add z14.h, z14.h, z24.h\n"
- "ld1b { z17.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row]\n"
+ "ld1b { z17.s }, p1/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
"add x14, x14, %x[ld_in_col]\n"
- "ld1b { z16.s }, p1/Z, [x22]\n"
+ "ld1b { z16.s }, p1/Z, [x21]\n"
"trn1 z15.h, z17.h, z16.h\n"
"add z15.h, z15.h, z24.h\n"
- ".inst 0xa0402aa0 // ld1h { z0.h-z1.h }, pn10.b/Z, [x21]\n"
- ".inst 0xc16115a8 // sdot za.s[x8, 0], { z13.h-z14.h }, z1.h\n"
- ".inst 0xc16015a9 // sdot za.s[x8, 1], { z13.h-z14.h }, z0.h\n"
".inst 0xa0402a80 // ld1h { z0.h-z1.h }, pn10.b/Z, [x20]\n"
- ".inst 0xa0412aa2 // ld1h { z2.h-z3.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
- ".inst 0xc16115aa // sdot za.s[x8, 2], { z13.h-z14.h }, z1.h\n"
- ".inst 0xc16015ab // sdot za.s[x8, 3], { z13.h-z14.h }, z0.h\n"
- ".inst 0xc16315c8 // sdot za.s[x8, 0], { z14.h-z15.h }, z3.h\n"
- ".inst 0xc16215c9 // sdot za.s[x8, 1], { z14.h-z15.h }, z2.h\n"
+ ".inst 0xc16175a8 // sdot za.s[x11, 0], { z13.h-z14.h }, z1.h\n"
+ ".inst 0xc16075a9 // sdot za.s[x11, 1], { z13.h-z14.h }, z0.h\n"
+ ".inst 0xa0402a60 // ld1h { z0.h-z1.h }, pn10.b/Z, [x19]\n"
".inst 0xa0412a82 // ld1h { z2.h-z3.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
- ".inst 0xc16315ca // sdot za.s[x8, 2], { z14.h-z15.h }, z3.h\n"
- ".inst 0xc16215cb // sdot za.s[x8, 3], { z14.h-z15.h }, z2.h\n"
+ ".inst 0xc16175aa // sdot za.s[x11, 2], { z13.h-z14.h }, z1.h\n"
+ ".inst 0xc16075ab // sdot za.s[x11, 3], { z13.h-z14.h }, z0.h\n"
+ ".inst 0xc16375c8 // sdot za.s[x11, 0], { z14.h-z15.h }, z3.h\n"
+ ".inst 0xc16275c9 // sdot za.s[x11, 1], { z14.h-z15.h }, z2.h\n"
+ ".inst 0xa0412a62 // ld1h { z2.h-z3.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
+ ".inst 0xc16375ca // sdot za.s[x11, 2], { z14.h-z15.h }, z3.h\n"
+ ".inst 0xc16275cb // sdot za.s[x11, 3], { z14.h-z15.h }, z2.h\n"
"10:" // Unpadded: 0 priming loads
".inst 0xa0402be0 // ld1h { z0.h-z1.h }, pn10.b/Z, [SP]\n"
".inst 0xa0412be2 // ld1h { z2.h-z3.h }, pn10.b/Z, [SP, #0x2, MUL VL]\n"
"cbz x15, 18f\n"
- "add x20, x14, %x[ld_in_row]\n"
+ "add x19, x14, %x[ld_in_row]\n"
"ld1b { z17.s }, p1/Z, [x14]\n"
"sub x15, x15, #0x1\n"
- "ld1b { z16.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z16.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"trn1 z13.h, z17.h, z16.h\n"
"sub x13, x13, #0x1\n"
- "ld1b { z17.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z17.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"cmp x15, x13\n"
"add z13.h, z13.h, z24.h\n"
- "ld1b { z16.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z16.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"trn1 z14.h, z17.h, z16.h\n"
- "csel x23, x15, x13, LT\n"
- "ld1b { z17.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "csel x22, x15, x13, LT\n"
+ "ld1b { z17.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"add z14.h, z14.h, z24.h\n"
"add x14, x14, %x[ld_in_col]\n"
- "ld1b { z16.s }, p1/Z, [x20]\n"
+ "ld1b { z16.s }, p1/Z, [x19]\n"
"trn1 z15.h, z17.h, z16.h\n"
"add z15.h, z15.h, z24.h\n"
- "sub x13, x13, x23\n"
- "cbz x23, 17f\n"
+ "sub x13, x13, x22\n"
+ "cbz x22, 17f\n"
"11:" // Unpadded: Main loop
- ".inst 0xc16115a8 // sdot za.s[x8, 0], { z13.h-z14.h }, z1.h\n"
- "addvl x22, SP, #4\n"
- "addvl x21, SP, #8\n"
+ ".inst 0xc16175a8 // sdot za.s[x11, 0], { z13.h-z14.h }, z1.h\n"
+ "addvl x21, SP, #4\n"
+ "addvl x20, SP, #8\n"
"ld1b { z21.s }, p1/Z, [x14]\n"
- ".inst 0xc16015a9 // sdot za.s[x8, 1], { z13.h-z14.h }, z0.h\n"
- ".inst 0xa0402ac0 // ld1h { z0.h-z1.h }, pn10.b/Z, [x22]\n"
- "add x20, x14, %x[ld_in_row]\n"
- "subs x23, x23, #0x1\n"
- ".inst 0xc16315c8 // sdot za.s[x8, 0], { z14.h-z15.h }, z3.h\n"
- "ld1b { z20.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
+ ".inst 0xc16075a9 // sdot za.s[x11, 1], { z13.h-z14.h }, z0.h\n"
+ ".inst 0xa0402aa0 // ld1h { z0.h-z1.h }, pn10.b/Z, [x21]\n"
+ "add x19, x14, %x[ld_in_row]\n"
+ "subs x22, x22, #0x1\n"
+ ".inst 0xc16375c8 // sdot za.s[x11, 0], { z14.h-z15.h }, z3.h\n"
+ "ld1b { z20.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"add x14, x14, %x[ld_in_col]\n"
- ".inst 0xc16215c9 // sdot za.s[x8, 1], { z14.h-z15.h }, z2.h\n"
- ".inst 0xa0412ac2 // ld1h { z2.h-z3.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
- ".inst 0xc0060804 // mova { z4.d-z5.d }, za.d[x8, #0]\n"
- "ld1b { z19.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
- ".inst 0xc0060826 // mova { z6.d-z7.d }, za.d[x8, #1]\n"
+ ".inst 0xc16275c9 // sdot za.s[x11, 1], { z14.h-z15.h }, z2.h\n"
+ ".inst 0xa0412aa2 // ld1h { z2.h-z3.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
+ ".inst 0xc0066804 // mova { z4.d-z5.d }, za.d[x11, #0]\n"
+ "ld1b { z19.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ ".inst 0xc0066826 // mova { z6.d-z7.d }, za.d[x11, #1]\n"
".inst 0xc1aaac04 // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z10.s\n"
- ".inst 0xc16115aa // sdot za.s[x8, 2], { z13.h-z14.h }, z1.h\n"
- "ld1b { z18.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
- ".inst 0xc16015ab // sdot za.s[x8, 3], { z13.h-z14.h }, z0.h\n"
- ".inst 0xa0402aa0 // ld1h { z0.h-z1.h }, pn10.b/Z, [x21]\n"
+ ".inst 0xc16175aa // sdot za.s[x11, 2], { z13.h-z14.h }, z1.h\n"
+ "ld1b { z18.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ ".inst 0xc16075ab // sdot za.s[x11, 3], { z13.h-z14.h }, z0.h\n"
+ ".inst 0xa0402a80 // ld1h { z0.h-z1.h }, pn10.b/Z, [x20]\n"
".inst 0xc1abaa24 // srshl { z4.s-z7.s }, { z4.s-z7.s }, z11.s\n"
- ".inst 0xc16115ac // sdot za.s[x8, 4], { z13.h-z14.h }, z1.h\n"
- "ld1b { z17.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
- ".inst 0xc16015ad // sdot za.s[x8, 5], { z13.h-z14.h }, z0.h\n"
- "ld1b { z16.s }, p1/Z, [x20]\n"
+ ".inst 0xc16175ac // sdot za.s[x11, 4], { z13.h-z14.h }, z1.h\n"
+ "ld1b { z17.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ ".inst 0xc16075ad // sdot za.s[x11, 5], { z13.h-z14.h }, z0.h\n"
+ "ld1b { z16.s }, p1/Z, [x19]\n"
".inst 0xc1acab04 // add { z4.s-z7.s }, { z4.s-z7.s }, z12.s\n"
- ".inst 0xc16315ca // sdot za.s[x8, 2], { z14.h-z15.h }, z3.h\n"
+ ".inst 0xc16375ca // sdot za.s[x11, 2], { z14.h-z15.h }, z3.h\n"
"trn1 z13.h, z21.h, z20.h\n"
".inst 0xa0402be0 // ld1h { z0.h-z1.h }, pn10.b/Z, [SP]\n"
- ".inst 0xc16215cb // sdot za.s[x8, 3], { z14.h-z15.h }, z2.h\n"
- ".inst 0xa0412aa2 // ld1h { z2.h-z3.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
+ ".inst 0xc16275cb // sdot za.s[x11, 3], { z14.h-z15.h }, z2.h\n"
+ ".inst 0xa0412a82 // ld1h { z2.h-z3.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
".inst 0xc1bacec4 // sclamp { z4.s-z7.s }, z22.s, z26.s\n"
- ".inst 0xc16315cc // sdot za.s[x8, 4], { z14.h-z15.h }, z3.h\n"
- "st1b { z4.s }, p1, [x11]\n"
- "add x11, x11, x9\n"
+ ".inst 0xc16375cc // sdot za.s[x11, 4], { z14.h-z15.h }, z3.h\n"
+ "st1b { z4.s }, p1, [x10]\n"
+ "add x10, x10, x28\n"
"add z13.h, z13.h, z24.h\n"
- ".inst 0xc16215cd // sdot za.s[x8, 5], { z14.h-z15.h }, z2.h\n"
+ ".inst 0xc16275cd // sdot za.s[x11, 5], { z14.h-z15.h }, z2.h\n"
"trn1 z14.h, z19.h, z18.h\n"
"trn1 z15.h, z17.h, z16.h\n"
- "add x8, x8, #0x2\n"
+ "add x11, x11, #0x2\n"
".inst 0xa0412be2 // ld1h { z2.h-z3.h }, pn10.b/Z, [SP, #0x2, MUL VL]\n"
- "st1b { z6.s }, p1, [x10]\n"
- "add x10, x10, x28\n"
- ".inst 0xc0040904 // mova za.d[x8, #4], { z8.d-z9.d }\n"
- "st1b { z5.s }, p1, [x27]\n"
- "add x27, x27, x25\n"
- ".inst 0xc0040905 // mova za.d[x8, #5], { z8.d-z9.d }\n"
- "add z14.h, z14.h, z24.h\n"
- "st1b { z7.s }, p1, [x26]\n"
+ "st1b { z6.s }, p1, [x9]\n"
+ "add x9, x9, x27\n"
+ ".inst 0xc0046904 // mova za.d[x11, #4], { z8.d-z9.d }\n"
+ "st1b { z5.s }, p1, [x26]\n"
"add x26, x26, x24\n"
+ ".inst 0xc0046905 // mova za.d[x11, #5], { z8.d-z9.d }\n"
+ "add z14.h, z14.h, z24.h\n"
+ "st1b { z7.s }, p1, [x25]\n"
+ "add x25, x25, x23\n"
"add z15.h, z15.h, z24.h\n"
"bgt 11b\n"
"b 17f\n"
"12:" // Padded
- "cbz x22, 15f\n"
- "cmp x22, #0x1\n"
- "sub x15, x15, x22\n"
+ "cbz x21, 15f\n"
+ "cmp x21, #0x1\n"
+ "sub x15, x15, x21\n"
"beq 14f\n"
"13:" // Padded: 2 priming loads
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
"ld1b { z19.s }, p0/Z, [x14]\n"
"add z19.h, p0/M, z19.h, z24.h\n"
- "add x20, x14, %x[ld_in_row]\n"
+ "add x19, x14, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z18.s }, p0/Z, [x20]\n"
+ "ld1b { z18.s }, p0/Z, [x19]\n"
"add z18.h, p0/M, z18.h, z24.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z17.s }, p0/Z, [x20]\n"
+ "ld1b { z17.s }, p0/Z, [x19]\n"
"add z17.h, p0/M, z17.h, z24.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z16.s }, p0/Z, [x20]\n"
+ "ld1b { z16.s }, p0/Z, [x19]\n"
"add z16.h, p0/M, z16.h, z24.h\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"trn1 z13.h, z19.h, z18.h\n"
"trn1 z14.h, z17.h, z16.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z17.s }, p0/Z, [x20]\n"
+ "ld1b { z17.s }, p0/Z, [x19]\n"
"add z17.h, p0/M, z17.h, z24.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z16.s }, p0/Z, [x20]\n"
- "addvl x20, SP, #8\n"
+ "ld1b { z16.s }, p0/Z, [x19]\n"
+ "addvl x19, SP, #8\n"
"add z16.h, p0/M, z16.h, z24.h\n"
- ".inst 0xa0402a80 // ld1h { z0.h-z1.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xa0402a60 // ld1h { z0.h-z1.h }, pn10.b/Z, [x19]\n"
"trn1 z15.h, z17.h, z16.h\n"
- ".inst 0xc16115a8 // sdot za.s[x8, 0], { z13.h-z14.h }, z1.h\n"
+ ".inst 0xc16175a8 // sdot za.s[x11, 0], { z13.h-z14.h }, z1.h\n"
"add x14, x14, %x[ld_in_col]\n"
- ".inst 0xc16015a9 // sdot za.s[x8, 1], { z13.h-z14.h }, z0.h\n"
- ".inst 0xa0412a82 // ld1h { z2.h-z3.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
- ".inst 0xc16315c8 // sdot za.s[x8, 0], { z14.h-z15.h }, z3.h\n"
- ".inst 0xc16215c9 // sdot za.s[x8, 1], { z14.h-z15.h }, z2.h\n"
+ ".inst 0xc16075a9 // sdot za.s[x11, 1], { z13.h-z14.h }, z0.h\n"
+ ".inst 0xa0412a62 // ld1h { z2.h-z3.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
+ ".inst 0xc16375c8 // sdot za.s[x11, 0], { z14.h-z15.h }, z3.h\n"
+ ".inst 0xc16275c9 // sdot za.s[x11, 1], { z14.h-z15.h }, z2.h\n"
"14:" // Padded: 1 priming loads
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
"ld1b { z19.s }, p0/Z, [x14]\n"
"add z19.h, p0/M, z19.h, z24.h\n"
- "add x20, x14, %x[ld_in_row]\n"
+ "add x19, x14, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z18.s }, p0/Z, [x20]\n"
+ "ld1b { z18.s }, p0/Z, [x19]\n"
"add z18.h, p0/M, z18.h, z24.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z17.s }, p0/Z, [x20]\n"
+ "ld1b { z17.s }, p0/Z, [x19]\n"
"add z17.h, p0/M, z17.h, z24.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z16.s }, p0/Z, [x20]\n"
+ "ld1b { z16.s }, p0/Z, [x19]\n"
"add z16.h, p0/M, z16.h, z24.h\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"trn1 z13.h, z19.h, z18.h\n"
"trn1 z14.h, z17.h, z16.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z17.s }, p0/Z, [x20]\n"
+ "ld1b { z17.s }, p0/Z, [x19]\n"
"add z17.h, p0/M, z17.h, z24.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z16.s }, p0/Z, [x20]\n"
- "addvl x21, SP, #4\n"
+ "ld1b { z16.s }, p0/Z, [x19]\n"
+ "addvl x20, SP, #4\n"
"add z16.h, p0/M, z16.h, z24.h\n"
- ".inst 0xa0402aa0 // ld1h { z0.h-z1.h }, pn10.b/Z, [x21]\n"
- "addvl x20, SP, #8\n"
- "trn1 z15.h, z17.h, z16.h\n"
- ".inst 0xc16115a8 // sdot za.s[x8, 0], { z13.h-z14.h }, z1.h\n"
- ".inst 0xc16015a9 // sdot za.s[x8, 1], { z13.h-z14.h }, z0.h\n"
".inst 0xa0402a80 // ld1h { z0.h-z1.h }, pn10.b/Z, [x20]\n"
+ "addvl x19, SP, #8\n"
+ "trn1 z15.h, z17.h, z16.h\n"
+ ".inst 0xc16175a8 // sdot za.s[x11, 0], { z13.h-z14.h }, z1.h\n"
+ ".inst 0xc16075a9 // sdot za.s[x11, 1], { z13.h-z14.h }, z0.h\n"
+ ".inst 0xa0402a60 // ld1h { z0.h-z1.h }, pn10.b/Z, [x19]\n"
"add x14, x14, %x[ld_in_col]\n"
- ".inst 0xa0412aa2 // ld1h { z2.h-z3.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
- ".inst 0xc16115aa // sdot za.s[x8, 2], { z13.h-z14.h }, z1.h\n"
- ".inst 0xc16015ab // sdot za.s[x8, 3], { z13.h-z14.h }, z0.h\n"
- ".inst 0xc16315c8 // sdot za.s[x8, 0], { z14.h-z15.h }, z3.h\n"
- ".inst 0xc16215c9 // sdot za.s[x8, 1], { z14.h-z15.h }, z2.h\n"
".inst 0xa0412a82 // ld1h { z2.h-z3.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
- ".inst 0xc16315ca // sdot za.s[x8, 2], { z14.h-z15.h }, z3.h\n"
- ".inst 0xc16215cb // sdot za.s[x8, 3], { z14.h-z15.h }, z2.h\n"
+ ".inst 0xc16175aa // sdot za.s[x11, 2], { z13.h-z14.h }, z1.h\n"
+ ".inst 0xc16075ab // sdot za.s[x11, 3], { z13.h-z14.h }, z0.h\n"
+ ".inst 0xc16375c8 // sdot za.s[x11, 0], { z14.h-z15.h }, z3.h\n"
+ ".inst 0xc16275c9 // sdot za.s[x11, 1], { z14.h-z15.h }, z2.h\n"
+ ".inst 0xa0412a62 // ld1h { z2.h-z3.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
+ ".inst 0xc16375ca // sdot za.s[x11, 2], { z14.h-z15.h }, z3.h\n"
+ ".inst 0xc16275cb // sdot za.s[x11, 3], { z14.h-z15.h }, z2.h\n"
"15:" // Padded: 0 priming loads
".inst 0xa0402be0 // ld1h { z0.h-z1.h }, pn10.b/Z, [SP]\n"
".inst 0xa0412be2 // ld1h { z2.h-z3.h }, pn10.b/Z, [SP, #0x2, MUL VL]\n"
@@ -469,192 +469,192 @@ void sme2_u8q_planar_3x3_s1_4rows_dot_za_impl(
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
"ld1b { z19.s }, p0/Z, [x14]\n"
"add z19.h, p0/M, z19.h, z24.h\n"
- "add x20, x14, %x[ld_in_row]\n"
+ "add x19, x14, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z18.s }, p0/Z, [x20]\n"
+ "ld1b { z18.s }, p0/Z, [x19]\n"
"add z18.h, p0/M, z18.h, z24.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z17.s }, p0/Z, [x20]\n"
+ "ld1b { z17.s }, p0/Z, [x19]\n"
"add z17.h, p0/M, z17.h, z24.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z16.s }, p0/Z, [x20]\n"
+ "ld1b { z16.s }, p0/Z, [x19]\n"
"add z16.h, p0/M, z16.h, z24.h\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"trn1 z13.h, z19.h, z18.h\n"
"trn1 z14.h, z17.h, z16.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z17.s }, p0/Z, [x20]\n"
+ "ld1b { z17.s }, p0/Z, [x19]\n"
"add z17.h, p0/M, z17.h, z24.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z16.s }, p0/Z, [x20]\n"
+ "ld1b { z16.s }, p0/Z, [x19]\n"
"add z16.h, p0/M, z16.h, z24.h\n"
"sub x15, x15, #0x1\n"
"sub x13, x13, #0x1\n"
"cmp x15, x13\n"
"trn1 z15.h, z17.h, z16.h\n"
- "csel x23, x15, x13, LT\n"
+ "csel x22, x15, x13, LT\n"
"add x14, x14, %x[ld_in_col]\n"
- "sub x13, x13, x23\n"
- "cbz x23, 17f\n"
+ "sub x13, x13, x22\n"
+ "cbz x22, 17f\n"
"16:" // Padded: Main loop
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
"ld1b { z21.s }, p0/Z, [x14]\n"
- ".inst 0xc16115a8 // sdot za.s[x8, 0], { z13.h-z14.h }, z1.h\n"
- ".inst 0xc16015a9 // sdot za.s[x8, 1], { z13.h-z14.h }, z0.h\n"
+ ".inst 0xc16175a8 // sdot za.s[x11, 0], { z13.h-z14.h }, z1.h\n"
+ ".inst 0xc16075a9 // sdot za.s[x11, 1], { z13.h-z14.h }, z0.h\n"
"add z21.h, p0/M, z21.h, z24.h\n"
- "add x22, x14, %x[ld_in_row]\n"
+ "add x21, x14, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z20.s }, p0/Z, [x22]\n"
- ".inst 0xc16315c8 // sdot za.s[x8, 0], { z14.h-z15.h }, z3.h\n"
+ "ld1b { z20.s }, p0/Z, [x21]\n"
+ ".inst 0xc16375c8 // sdot za.s[x11, 0], { z14.h-z15.h }, z3.h\n"
"add z20.h, p0/M, z20.h, z24.h\n"
- "add x22, x22, %x[ld_in_row]\n"
- ".inst 0xc16215c9 // sdot za.s[x8, 1], { z14.h-z15.h }, z2.h\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ ".inst 0xc16275c9 // sdot za.s[x11, 1], { z14.h-z15.h }, z2.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z19.s }, p0/Z, [x22]\n"
+ "ld1b { z19.s }, p0/Z, [x21]\n"
"add z19.h, p0/M, z19.h, z24.h\n"
- ".inst 0xc0060804 // mova { z4.d-z5.d }, za.d[x8, #0]\n"
- "add x22, x22, %x[ld_in_row]\n"
+ ".inst 0xc0066804 // mova { z4.d-z5.d }, za.d[x11, #0]\n"
+ "add x21, x21, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z18.s }, p0/Z, [x22]\n"
- ".inst 0xc0060826 // mova { z6.d-z7.d }, za.d[x8, #1]\n"
+ "ld1b { z18.s }, p0/Z, [x21]\n"
+ ".inst 0xc0066826 // mova { z6.d-z7.d }, za.d[x11, #1]\n"
"mov x12, #0x4\n"
- "addvl x21, SP, #4\n"
+ "addvl x20, SP, #4\n"
"add z18.h, p0/M, z18.h, z24.h\n"
".inst 0xc1aaac04 // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z10.s\n"
- "add x22, x22, %x[ld_in_row]\n"
+ "add x21, x21, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- ".inst 0xa0402aa0 // ld1h { z0.h-z1.h }, pn10.b/Z, [x21]\n"
- "addvl x20, SP, #8\n"
- ".inst 0xc16115aa // sdot za.s[x8, 2], { z13.h-z14.h }, z1.h\n"
- "subs x23, x23, #0x1\n"
- "ld1b { z17.s }, p0/Z, [x22]\n"
- ".inst 0xc16015ab // sdot za.s[x8, 3], { z13.h-z14.h }, z0.h\n"
- ".inst 0xc1abaa24 // srshl { z4.s-z7.s }, { z4.s-z7.s }, z11.s\n"
".inst 0xa0402a80 // ld1h { z0.h-z1.h }, pn10.b/Z, [x20]\n"
+ "addvl x19, SP, #8\n"
+ ".inst 0xc16175aa // sdot za.s[x11, 2], { z13.h-z14.h }, z1.h\n"
+ "subs x22, x22, #0x1\n"
+ "ld1b { z17.s }, p0/Z, [x21]\n"
+ ".inst 0xc16075ab // sdot za.s[x11, 3], { z13.h-z14.h }, z0.h\n"
+ ".inst 0xc1abaa24 // srshl { z4.s-z7.s }, { z4.s-z7.s }, z11.s\n"
+ ".inst 0xa0402a60 // ld1h { z0.h-z1.h }, pn10.b/Z, [x19]\n"
"add z17.h, p0/M, z17.h, z24.h\n"
- "add x22, x22, %x[ld_in_row]\n"
+ "add x21, x21, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- ".inst 0xa0412aa2 // ld1h { z2.h-z3.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
- ".inst 0xc16115ac // sdot za.s[x8, 4], { z13.h-z14.h }, z1.h\n"
+ ".inst 0xa0412a82 // ld1h { z2.h-z3.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ ".inst 0xc16175ac // sdot za.s[x11, 4], { z13.h-z14.h }, z1.h\n"
".inst 0xc1acab04 // add { z4.s-z7.s }, { z4.s-z7.s }, z12.s\n"
- "ld1b { z16.s }, p0/Z, [x22]\n"
- ".inst 0xc16015ad // sdot za.s[x8, 5], { z13.h-z14.h }, z0.h\n"
+ "ld1b { z16.s }, p0/Z, [x21]\n"
+ ".inst 0xc16075ad // sdot za.s[x11, 5], { z13.h-z14.h }, z0.h\n"
"add z16.h, p0/M, z16.h, z24.h\n"
"add x14, x14, %x[ld_in_col]\n"
- ".inst 0xc16315ca // sdot za.s[x8, 2], { z14.h-z15.h }, z3.h\n"
+ ".inst 0xc16375ca // sdot za.s[x11, 2], { z14.h-z15.h }, z3.h\n"
".inst 0xa0402be0 // ld1h { z0.h-z1.h }, pn10.b/Z, [SP]\n"
".inst 0xc1bacec4 // sclamp { z4.s-z7.s }, z22.s, z26.s\n"
- ".inst 0xc16215cb // sdot za.s[x8, 3], { z14.h-z15.h }, z2.h\n"
- ".inst 0xa0412a82 // ld1h { z2.h-z3.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
- "st1b { z4.s }, p1, [x11]\n"
- "add x11, x11, x9\n"
- ".inst 0xc16315cc // sdot za.s[x8, 4], { z14.h-z15.h }, z3.h\n"
- "st1b { z6.s }, p1, [x10]\n"
+ ".inst 0xc16275cb // sdot za.s[x11, 3], { z14.h-z15.h }, z2.h\n"
+ ".inst 0xa0412a62 // ld1h { z2.h-z3.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
+ "st1b { z4.s }, p1, [x10]\n"
"add x10, x10, x28\n"
+ ".inst 0xc16375cc // sdot za.s[x11, 4], { z14.h-z15.h }, z3.h\n"
+ "st1b { z6.s }, p1, [x9]\n"
+ "add x9, x9, x27\n"
"trn1 z13.h, z21.h, z20.h\n"
- ".inst 0xc16215cd // sdot za.s[x8, 5], { z14.h-z15.h }, z2.h\n"
- "add x8, x8, #0x2\n"
+ ".inst 0xc16275cd // sdot za.s[x11, 5], { z14.h-z15.h }, z2.h\n"
+ "add x11, x11, #0x2\n"
".inst 0xa0412be2 // ld1h { z2.h-z3.h }, pn10.b/Z, [SP, #0x2, MUL VL]\n"
- "st1b { z5.s }, p1, [x27]\n"
- "add x27, x27, x25\n"
- "st1b { z7.s }, p1, [x26]\n"
+ "st1b { z5.s }, p1, [x26]\n"
"add x26, x26, x24\n"
- ".inst 0xc0040904 // mova za.d[x8, #4], { z8.d-z9.d }\n"
- ".inst 0xc0040905 // mova za.d[x8, #5], { z8.d-z9.d }\n"
+ "st1b { z7.s }, p1, [x25]\n"
+ "add x25, x25, x23\n"
+ ".inst 0xc0046904 // mova za.d[x11, #4], { z8.d-z9.d }\n"
+ ".inst 0xc0046905 // mova za.d[x11, #5], { z8.d-z9.d }\n"
"trn1 z14.h, z19.h, z18.h\n"
"trn1 z15.h, z17.h, z16.h\n"
"bgt 16b\n"
"17:" // Main loop tail
- ".inst 0xc16115a8 // sdot za.s[x8, 0], { z13.h-z14.h }, z1.h\n"
- "addvl x21, SP, #4\n"
- "addvl x20, SP, #8\n"
- ".inst 0xc16015a9 // sdot za.s[x8, 1], { z13.h-z14.h }, z0.h\n"
- ".inst 0xa0402aa0 // ld1h { z0.h-z1.h }, pn10.b/Z, [x21]\n"
- ".inst 0xc16315c8 // sdot za.s[x8, 0], { z14.h-z15.h }, z3.h\n"
- ".inst 0xc16215c9 // sdot za.s[x8, 1], { z14.h-z15.h }, z2.h\n"
- ".inst 0xa0412aa2 // ld1h { z2.h-z3.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
- ".inst 0xc0060804 // mova { z4.d-z5.d }, za.d[x8, #0]\n"
- ".inst 0xc0060826 // mova { z6.d-z7.d }, za.d[x8, #1]\n"
+ ".inst 0xc16175a8 // sdot za.s[x11, 0], { z13.h-z14.h }, z1.h\n"
+ "addvl x20, SP, #4\n"
+ "addvl x19, SP, #8\n"
+ ".inst 0xc16075a9 // sdot za.s[x11, 1], { z13.h-z14.h }, z0.h\n"
+ ".inst 0xa0402a80 // ld1h { z0.h-z1.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xc16375c8 // sdot za.s[x11, 0], { z14.h-z15.h }, z3.h\n"
+ ".inst 0xc16275c9 // sdot za.s[x11, 1], { z14.h-z15.h }, z2.h\n"
+ ".inst 0xa0412a82 // ld1h { z2.h-z3.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ ".inst 0xc0066804 // mova { z4.d-z5.d }, za.d[x11, #0]\n"
+ ".inst 0xc0066826 // mova { z6.d-z7.d }, za.d[x11, #1]\n"
".inst 0xc1aaac04 // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z10.s\n"
- ".inst 0xc16115aa // sdot za.s[x8, 2], { z13.h-z14.h }, z1.h\n"
+ ".inst 0xc16175aa // sdot za.s[x11, 2], { z13.h-z14.h }, z1.h\n"
".inst 0xc1abaa24 // srshl { z4.s-z7.s }, { z4.s-z7.s }, z11.s\n"
- ".inst 0xc16015ab // sdot za.s[x8, 3], { z13.h-z14.h }, z0.h\n"
- ".inst 0xa0402a80 // ld1h { z0.h-z1.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xc16075ab // sdot za.s[x11, 3], { z13.h-z14.h }, z0.h\n"
+ ".inst 0xa0402a60 // ld1h { z0.h-z1.h }, pn10.b/Z, [x19]\n"
".inst 0xc1acab04 // add { z4.s-z7.s }, { z4.s-z7.s }, z12.s\n"
- ".inst 0xc16115ac // sdot za.s[x8, 4], { z13.h-z14.h }, z1.h\n"
+ ".inst 0xc16175ac // sdot za.s[x11, 4], { z13.h-z14.h }, z1.h\n"
".inst 0xc1bacec4 // sclamp { z4.s-z7.s }, z22.s, z26.s\n"
- "st1b { z4.s }, p1, [x11]\n"
- "add x11, x11, x9\n"
- ".inst 0xc16015ad // sdot za.s[x8, 5], { z13.h-z14.h }, z0.h\n"
- "st1b { z6.s }, p1, [x10]\n"
+ "st1b { z4.s }, p1, [x10]\n"
"add x10, x10, x28\n"
- ".inst 0xc16315ca // sdot za.s[x8, 2], { z14.h-z15.h }, z3.h\n"
- "st1b { z5.s }, p1, [x27]\n"
- "add x27, x27, x25\n"
- ".inst 0xc16215cb // sdot za.s[x8, 3], { z14.h-z15.h }, z2.h\n"
- ".inst 0xa0412a82 // ld1h { z2.h-z3.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
- "st1b { z7.s }, p1, [x26]\n"
+ ".inst 0xc16075ad // sdot za.s[x11, 5], { z13.h-z14.h }, z0.h\n"
+ "st1b { z6.s }, p1, [x9]\n"
+ "add x9, x9, x27\n"
+ ".inst 0xc16375ca // sdot za.s[x11, 2], { z14.h-z15.h }, z3.h\n"
+ "st1b { z5.s }, p1, [x26]\n"
"add x26, x26, x24\n"
- ".inst 0xc16315cc // sdot za.s[x8, 4], { z14.h-z15.h }, z3.h\n"
- ".inst 0xc16215cd // sdot za.s[x8, 5], { z14.h-z15.h }, z2.h\n"
- "add x8, x8, #0x2\n"
- ".inst 0xc0040904 // mova za.d[x8, #4], { z8.d-z9.d }\n"
- ".inst 0xc0040905 // mova za.d[x8, #5], { z8.d-z9.d }\n"
+ ".inst 0xc16275cb // sdot za.s[x11, 3], { z14.h-z15.h }, z2.h\n"
+ ".inst 0xa0412a62 // ld1h { z2.h-z3.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
+ "st1b { z7.s }, p1, [x25]\n"
+ "add x25, x25, x23\n"
+ ".inst 0xc16375cc // sdot za.s[x11, 4], { z14.h-z15.h }, z3.h\n"
+ ".inst 0xc16275cd // sdot za.s[x11, 5], { z14.h-z15.h }, z2.h\n"
+ "add x11, x11, #0x2\n"
+ ".inst 0xc0046904 // mova za.d[x11, #4], { z8.d-z9.d }\n"
+ ".inst 0xc0046905 // mova za.d[x11, #5], { z8.d-z9.d }\n"
"18:" // Main loop skip tail
"cbz x13, 20f\n"
"19:" // Right padding loop
- ".inst 0xc0060804 // mova { z4.d-z5.d }, za.d[x8, #0]\n"
+ ".inst 0xc0066804 // mova { z4.d-z5.d }, za.d[x11, #0]\n"
"subs x13, x13, #0x1\n"
- ".inst 0xc0060826 // mova { z6.d-z7.d }, za.d[x8, #1]\n"
+ ".inst 0xc0066826 // mova { z6.d-z7.d }, za.d[x11, #1]\n"
".inst 0xc1aaac04 // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z10.s\n"
- "add x8, x8, #0x2\n"
+ "add x11, x11, #0x2\n"
".inst 0xc1abaa24 // srshl { z4.s-z7.s }, { z4.s-z7.s }, z11.s\n"
- ".inst 0xc0040904 // mova za.d[x8, #4], { z8.d-z9.d }\n"
+ ".inst 0xc0046904 // mova za.d[x11, #4], { z8.d-z9.d }\n"
".inst 0xc1acab04 // add { z4.s-z7.s }, { z4.s-z7.s }, z12.s\n"
- ".inst 0xc0040905 // mova za.d[x8, #5], { z8.d-z9.d }\n"
+ ".inst 0xc0046905 // mova za.d[x11, #5], { z8.d-z9.d }\n"
".inst 0xc1bacec4 // sclamp { z4.s-z7.s }, z22.s, z26.s\n"
- "st1b { z4.s }, p1, [x11]\n"
- "add x11, x11, x9\n"
- "st1b { z6.s }, p1, [x10]\n"
+ "st1b { z4.s }, p1, [x10]\n"
"add x10, x10, x28\n"
- "st1b { z5.s }, p1, [x27]\n"
- "add x27, x27, x25\n"
- "st1b { z7.s }, p1, [x26]\n"
+ "st1b { z6.s }, p1, [x9]\n"
+ "add x9, x9, x27\n"
+ "st1b { z5.s }, p1, [x26]\n"
"add x26, x26, x24\n"
+ "st1b { z7.s }, p1, [x25]\n"
+ "add x25, x25, x23\n"
"bgt 19b\n"
"20:" // End
- "ldr x22, [%x[args], %[offsetof_Args_weights]]\n"
- "incw x22, ALL, MUL #9\n"
- "str x22, [%x[args], %[offsetof_Args_weights]]\n"
+ "ldr x21, [%x[args], %[offsetof_Args_weights]]\n"
+ "incw x21, ALL, MUL #9\n"
+ "str x21, [%x[args], %[offsetof_Args_weights]]\n"
"incw x16\n"
- "ldr x20, [%x[args], %[offsetof_Args_ld_in_vl]]\n"
+ "ldr x19, [%x[args], %[offsetof_Args_ld_in_vl]]\n"
"whilelt p1.s, x16, x17\n"
"ldr x14, [%x[args], %[offsetof_Args_inptr]]\n"
- "add x14, x14, x20\n"
+ "add x14, x14, x19\n"
"str x14, [%x[args], %[offsetof_Args_inptr]]\n"
- "ldr x25, [%x[args], %[offsetof_Args_outptrs]]\n"
- "ldr x24, [%x[args], %[offsetof_Args_ld_out_vls]]\n"
- "ldp x23, x22, [x25, #0x0]\n"
- "ldp x21, x20, [x24, #0x0]\n"
- "add x23, x23, x21\n"
+ "ldr x24, [%x[args], %[offsetof_Args_outptrs]]\n"
+ "ldr x23, [%x[args], %[offsetof_Args_ld_out_vls]]\n"
+ "ldp x22, x21, [x24, #0x0]\n"
+ "ldp x20, x19, [x23, #0x0]\n"
"add x22, x22, x20\n"
- "stp x23, x22, [x25, #0x0]\n"
- "ldp x23, x22, [x25, #0x10]\n"
- "ldp x21, x20, [x24, #0x10]\n"
- "add x23, x23, x21\n"
+ "add x21, x21, x19\n"
+ "stp x22, x21, [x24, #0x0]\n"
+ "ldp x22, x21, [x24, #0x10]\n"
+ "ldp x20, x19, [x23, #0x10]\n"
"add x22, x22, x20\n"
- "stp x23, x22, [x25, #0x10]\n"
+ "add x21, x21, x19\n"
+ "stp x22, x21, [x24, #0x10]\n"
"b.any 1b\n"
"addvl SP, SP, #12\n"
".inst 0xd503467f // SMSTOP\n"
:
: [args] "r" (&args), [ld_in_col] "r" (ld_in_col), [ld_in_row] "r" (ld_in_row), [offsetof_Args_current_channel] "I" (offsetof(Args, current_channel)), [offsetof_Args_inptr] "I" (offsetof(Args, inptr)), [offsetof_Args_input_cols] "I" (offsetof(Args, input_cols)), [offsetof_Args_ld_in_vl] "I" (offsetof(Args, ld_in_vl)), [offsetof_Args_ld_out_cols] "I" (offsetof(Args, ld_out_cols)), [offsetof_Args_ld_out_vls] "I" (offsetof(Args, ld_out_vls)), [offsetof_Args_n_channels] "I" (offsetof(Args, n_channels)), [offsetof_Args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_Args_output_cols] "I" (offsetof(Args, output_cols)), [offsetof_Args_pad_bottom] "I" (offsetof(Args, pad_bottom)), [offsetof_Args_pad_left] "I" (offsetof(Args, pad_left)), [offsetof_Args_pad_top] "I" (offsetof(Args, pad_top)), [offsetof_Args_weights] "I" (offsetof(Args, weights)), [offsetof_Requantize32_a_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_bias] "I" (offsetof(arm_gemm::Requantize32, bias)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [offsetof_Requantize32_per_channel_muls] "I" (offsetof(arm_gemm::Requantize32, per_channel_muls)), [offsetof_Requantize32_per_channel_right_shifts] "I" (offsetof(arm_gemm::Requantize32, per_channel_right_shifts)), [offsetof_Requantize32_per_layer_mul] "I" (offsetof(arm_gemm::Requantize32, per_layer_mul)), [offsetof_Requantize32_per_layer_right_shift] "I" (offsetof(arm_gemm::Requantize32, per_layer_right_shift)), [qp] "r" (&qp)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8q_planar_3x3_s2_2rows_dot_za/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8q_planar_3x3_s2_2rows_dot_za/generic.cpp
new file mode 100644
index 0000000000..9a0840cfc4
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8q_planar_3x3_s2_2rows_dot_za/generic.cpp
@@ -0,0 +1,592 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#if defined(ARM_COMPUTE_ENABLE_SME2)
+
+#include <algorithm>
+#include <cstddef>
+#include "arm_gemm.hpp"
+
+using arm_gemm::Requantize32;
+
+namespace arm_conv {
+namespace depthwise {
+
+void sme2_u8q_planar_3x3_s2_2rows_dot_za_impl(
+ const uint8_t *inptr,
+ size_t ld_in_row,
+ size_t ld_in_col,
+ unsigned int pad_top,
+ unsigned int valid_input_rows,
+ unsigned int pad_left,
+ unsigned int valid_input_cols,
+ const uint8_t *weights,
+ uint8_t **outptrs,
+ const size_t *outlds,
+ unsigned int output_cols,
+ unsigned int start_channel,
+ unsigned int valid_channels,
+ const arm_gemm::Requantize32 &qp
+)
+{
+ struct Args
+ {
+ const uint8_t *inptr;
+ long unsigned int pad_top, pad_bottom, pad_left;
+ const uint8_t *weights;
+ long unsigned int input_cols, output_cols;
+ uint8_t **outptrs;
+ const size_t *ld_out_cols;
+ long unsigned int n, n_channels;
+ };
+
+ Args args = { inptr, pad_top, 5u - std::min(5u, pad_top + valid_input_rows), pad_left, weights, valid_input_cols, output_cols, outptrs, outlds, start_channel, valid_channels };
+
+ __asm__ __volatile__(
+ "ldr x11, [%x[args], %[offsetof_Args_pad_bottom]]\n"
+ "mov x19, #0x5\n"
+ ".inst 0xd503477f // SMSTART ZA\n"
+ "sub x19, x19, x11\n"
+ "ldr x10, [%x[args], %[offsetof_Args_pad_top]]\n"
+ "ptrue p0.b\n"
+ "mov z12.s, #0x0\n"
+ "ldr x22, [%x[args], %[offsetof_Args_n_channels]]\n"
+ "whilelt p5.s, XZR, x22\n"
+ "whilelt p9.s, XZR, x19\n"
+ "ldr x19, [%x[qp], %[offsetof_Requantize32_bias]]\n"
+ "whilelt p8.s, XZR, x10\n"
+ "eor p8.b, p0/Z, p8.b, p9.b\n"
+ "ldr x21, [%x[args], %[offsetof_Args_n]]\n"
+ "cbz x19, 1f\n"
+ "ld1w { z12.s }, p5/Z, [x19, x21, LSL #2]\n"
+ "1:" // Load bias: Done
+ "ldr x20, [%x[args], %[offsetof_Args_weights]]\n"
+ "ld1b { z27.s }, p0/Z, [x20]\n"
+ "incw x20\n"
+ "mov z0.h, #0x0\n"
+ "ld1b { z16.s }, p0/Z, [x20]\n"
+ "incw x20\n"
+ "ldr x19, [%x[qp], %[offsetof_Requantize32_per_channel_muls]]\n"
+ "mov z13.d, z12.d\n"
+ "ld1b { z22.s }, p0/Z, [x20]\n"
+ "incw x20\n"
+ "ld1b { z21.s }, p0/Z, [x20]\n"
+ "incw x20\n"
+ "ld1b { z20.s }, p0/Z, [x20]\n"
+ "incw x20\n"
+ "ld1b { z18.s }, p0/Z, [x20]\n"
+ "incw x20\n"
+ "ld1b { z17.s }, p0/Z, [x20]\n"
+ "incw x20\n"
+ "ld1b { z24.s }, p0/Z, [x20]\n"
+ "incw x20\n"
+ "ld1b { z19.s }, p0/Z, [x20]\n"
+ "ld1rh { z28.h }, p0/Z, [%x[qp], %[offsetof_Requantize32_b_offset]]\n"
+ "sub z27.h, z27.h, z28.h\n"
+ "sub z16.h, z16.h, z28.h\n"
+ "sub z22.h, z22.h, z28.h\n"
+ "sub z21.h, z21.h, z28.h\n"
+ "trn1 z8.h, z27.h, z21.h\n"
+ "sub z20.h, z20.h, z28.h\n"
+ "sub z18.h, z18.h, z28.h\n"
+ "trn1 z7.h, z16.h, z20.h\n"
+ "sub z17.h, z17.h, z28.h\n"
+ "sub z24.h, z24.h, z28.h\n"
+ "trn1 z6.h, z17.h, z0.h\n"
+ "sub z19.h, z19.h, z28.h\n"
+ "trn1 z5.h, z24.h, z0.h\n"
+ "trn1 z4.h, z22.h, z18.h\n"
+ "trn1 z3.h, z19.h, z0.h\n"
+ "ld1rh { z21.h }, p0/Z, [%x[qp], %[offsetof_Requantize32_a_offset]]\n"
+ "ld1rw { z2.s }, p0/Z, [%x[qp], %[offsetof_Requantize32_c_offset]]\n"
+ "ld1rw { z1.s }, p0/Z, [%x[qp], %[offsetof_Requantize32_per_layer_mul]]\n"
+ "cbz x19, 2f\n"
+ "ld1w { z1.s }, p5/Z, [x19, x21, LSL #2]\n"
+ "2:" // Load mul: End
+ "ldr x19, [%x[qp], %[offsetof_Requantize32_per_channel_right_shifts]]\n"
+ "ld1rw { z0.s }, p0/Z, [%x[qp], %[offsetof_Requantize32_per_layer_right_shift]]\n"
+ "cbz x19, 3f\n"
+ "ld1w { z0.s }, p5/Z, [x19, x21, LSL #2]\n"
+ "3:" // Load right_shift: End
+ "ldr x28, [%x[args], %[offsetof_Args_input_cols]]\n"
+ "orr x21, x28, %x[ld_in_col], LSL #16\n"
+ "orr x21, x22, x21, LSL #22\n"
+ "ld1rw { z20.s }, p0/Z, [%x[qp], %[offsetof_Requantize32_minval]]\n"
+ "ldr x27, [%x[args], %[offsetof_Args_inptr]]\n"
+ "mov x20, #0x5\n"
+ "add x19, x10, x11\n"
+ "ld1rw { z19.s }, p0/Z, [%x[qp], %[offsetof_Requantize32_maxval]]\n"
+ "mov x9, #0x0\n"
+ "ldr x26, [%x[args], %[offsetof_Args_output_cols]]\n"
+ "lsl x21, x21, #0x0\n"
+ "sub x20, x20, x19\n"
+ "mov x19, x27\n"
+ "4:" // Issue prefetches
+ "subs x20, x20, #0x1\n"
+ ".inst 0xf8b54a7c // rprfm pldstrm, x21, [x19]\n"
+ "add x19, x19, %x[ld_in_col]\n"
+ "bgt 4b\n"
+ "ldr x21, [%x[args], %[offsetof_Args_outptrs]]\n"
+ "lsl x19, %x[ld_in_row], #0x0\n"
+ "msub x27, x10, x19, x27\n"
+ ".inst 0xc0042980 // mova za.d[x9, #0], { z12.d-z13.d }\n"
+ "ldr x19, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
+ ".inst 0xc0042981 // mova za.d[x9, #1], { z12.d-z13.d }\n"
+ "mov x25, #0x2\n"
+ "ldr x20, [%x[args], %[offsetof_Args_pad_left]]\n"
+ ".inst 0xc0042982 // mova za.d[x9, #2], { z12.d-z13.d }\n"
+ "ldp x24, x23, [x21], #0x10\n"
+ "ldp x22, x21, [x19], #0x10\n"
+ "cbz x20, 6f\n"
+ "cmp x20, x25\n"
+ "csel x19, x20, x25, LT\n"
+ "sub x20, x20, x19\n"
+ "sub x25, x25, x19\n"
+ "cbz x20, 6f\n"
+ ".inst 0xc0062818 // mova { z24.d-z25.d }, za.d[x9, #0]\n"
+ ".inst 0xc1a1a418 // sqdmulh { z24.s-z25.s }, { z24.s-z25.s }, z1.s\n"
+ "and x25, x20, #0x1\n"
+ ".inst 0xc1a0a238 // srshl { z24.s-z25.s }, { z24.s-z25.s }, z0.s\n"
+ "add x20, x20, #0x1\n"
+ "lsr x20, x20, #0x1\n"
+ ".inst 0xc1a2a318 // add { z24.s-z25.s }, { z24.s-z25.s }, z2.s\n"
+ "sub x26, x26, x20\n"
+ ".inst 0xc1b3c698 // sclamp { z24.s-z25.s }, z20.s, z19.s\n"
+ "5:" // Left padding
+ "subs x20, x20, #0x1\n"
+ "st1b { z24.s }, p5, [x24]\n"
+ "add x24, x24, x22\n"
+ "st1b { z25.s }, p5, [x23]\n"
+ "add x23, x23, x21\n"
+ "bgt 5b\n"
+ "6:" // Left padding: End
+ "adds XZR, x10, x11\n"
+ "bne 11f\n"
+ "cbz x25, 9f\n"
+ "cmp x25, #0x1\n"
+ "sub x28, x28, x25\n"
+ "beq 8f\n"
+ "7:" // Unpadded: 2 priming loads
+ "add x19, x27, %x[ld_in_row]\n"
+ "ld1b { z14.s }, p5/Z, [x27]\n"
+ "sub z14.h, z14.h, z21.h\n"
+ "add x27, x27, %x[ld_in_col]\n"
+ "ld1b { z18.s }, p5/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ "sub z18.h, z18.h, z21.h\n"
+ "trn1 z14.h, z14.h, z18.h\n"
+ "ld1b { z15.s }, p5/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ "sub z15.h, z15.h, z21.h\n"
+ "ld1b { z17.s }, p5/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ "sub z17.h, z17.h, z21.h\n"
+ "trn1 z15.h, z15.h, z17.h\n"
+ "ld1b { z16.s }, p5/Z, [x19]\n"
+ "sub z16.h, z16.h, z21.h\n"
+ "mov z16.d, z16.d\n"
+ ".inst 0xc16835c8 // sdot za.s[x9, 0], { z14.h-z15.h }, z8.h\n"
+ ".inst 0xc16635e8 // sdot za.s[x9, 0], { z15.h-z16.h }, z6.h\n"
+ "8:" // Unpadded: 1 priming loads
+ "add x19, x27, %x[ld_in_row]\n"
+ "ld1b { z14.s }, p5/Z, [x27]\n"
+ "sub z14.h, z14.h, z21.h\n"
+ "add x27, x27, %x[ld_in_col]\n"
+ "ld1b { z18.s }, p5/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ "sub z18.h, z18.h, z21.h\n"
+ "trn1 z14.h, z14.h, z18.h\n"
+ "ld1b { z15.s }, p5/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ "sub z15.h, z15.h, z21.h\n"
+ "ld1b { z17.s }, p5/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ "sub z17.h, z17.h, z21.h\n"
+ "trn1 z15.h, z15.h, z17.h\n"
+ "ld1b { z16.s }, p5/Z, [x19]\n"
+ "sub z16.h, z16.h, z21.h\n"
+ "mov z16.d, z16.d\n"
+ ".inst 0xc16735c8 // sdot za.s[x9, 0], { z14.h-z15.h }, z7.h\n"
+ ".inst 0xc16535e8 // sdot za.s[x9, 0], { z15.h-z16.h }, z5.h\n"
+ "9:" // Unpadded: 0 priming loads
+ "add x20, x27, %x[ld_in_row]\n"
+ "ld1b { z14.s }, p5/Z, [x27]\n"
+ "sub z14.h, z14.h, z21.h\n"
+ "sub x28, x28, #0x2\n"
+ "ld1b { z18.s }, p5/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "sub z18.h, z18.h, z21.h\n"
+ "sub x26, x26, #0x1\n"
+ "ld1b { z15.s }, p5/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "sub z15.h, z15.h, z21.h\n"
+ "lsr x19, x28, #0x1\n"
+ "ld1b { z17.s }, p5/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "sub z17.h, z17.h, z21.h\n"
+ "cmp x19, x26\n"
+ "ld1b { z16.s }, p5/Z, [x20]\n"
+ "sub z16.h, z16.h, z21.h\n"
+ "csel x20, x19, x26, LT\n"
+ "trn1 z14.h, z14.h, z18.h\n"
+ "trn1 z15.h, z15.h, z17.h\n"
+ "mov z16.d, z16.d\n"
+ "add x27, x27, %x[ld_in_col]\n"
+ "and x28, x28, #0x1\n"
+ "sub x26, x26, x20\n"
+ "cbz x20, 16f\n"
+ "10:" // Unpadded: Main loop
+ ".inst 0xc16435c8 // sdot za.s[x9, 0], { z14.h-z15.h }, z4.h\n"
+ "add x19, x27, %x[ld_in_row]\n"
+ "subs x20, x20, #0x1\n"
+ ".inst 0xc16835c9 // sdot za.s[x9, 1], { z14.h-z15.h }, z8.h\n"
+ "ld1b { z14.s }, p5/Z, [x27]\n"
+ "sub z14.h, z14.h, z21.h\n"
+ "add x27, x27, %x[ld_in_col]\n"
+ "ld1b { z18.s }, p5/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ ".inst 0xc16335e8 // sdot za.s[x9, 0], { z15.h-z16.h }, z3.h\n"
+ "sub z18.h, z18.h, z21.h\n"
+ ".inst 0xc16635e9 // sdot za.s[x9, 1], { z15.h-z16.h }, z6.h\n"
+ "ld1b { z15.s }, p5/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ "sub z15.h, z15.h, z21.h\n"
+ "ld1b { z17.s }, p5/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ "sub z17.h, z17.h, z21.h\n"
+ "trn1 z14.h, z14.h, z18.h\n"
+ "ld1b { z16.s }, p5/Z, [x19]\n"
+ "sub z16.h, z16.h, z21.h\n"
+ "trn1 z15.h, z15.h, z17.h\n"
+ "add x19, x27, %x[ld_in_row]\n"
+ ".inst 0xc0062818 // mova { z24.d-z25.d }, za.d[x9, #0]\n"
+ "add x9, x9, #0x1\n"
+ "mov z16.d, z16.d\n"
+ ".inst 0xc16735c8 // sdot za.s[x9, 0], { z14.h-z15.h }, z7.h\n"
+ ".inst 0xc1a1a418 // sqdmulh { z24.s-z25.s }, { z24.s-z25.s }, z1.s\n"
+ "ld1b { z14.s }, p5/Z, [x27]\n"
+ ".inst 0xc16535e8 // sdot za.s[x9, 0], { z15.h-z16.h }, z5.h\n"
+ ".inst 0xc1a0a238 // srshl { z24.s-z25.s }, { z24.s-z25.s }, z0.s\n"
+ "ld1b { z18.s }, p5/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ ".inst 0xc1a2a318 // add { z24.s-z25.s }, { z24.s-z25.s }, z2.s\n"
+ "ld1b { z15.s }, p5/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ "sub z14.h, z14.h, z21.h\n"
+ "sub z18.h, z18.h, z21.h\n"
+ "ld1b { z17.s }, p5/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ "sub z15.h, z15.h, z21.h\n"
+ "sub z17.h, z17.h, z21.h\n"
+ "ld1b { z16.s }, p5/Z, [x19]\n"
+ "sub z16.h, z16.h, z21.h\n"
+ ".inst 0xc1b3c698 // sclamp { z24.s-z25.s }, z20.s, z19.s\n"
+ "add x27, x27, %x[ld_in_col]\n"
+ "st1b { z24.s }, p5, [x24]\n"
+ "add x24, x24, x22\n"
+ ".inst 0xc0042982 // mova za.d[x9, #2], { z12.d-z13.d }\n"
+ "trn1 z14.h, z14.h, z18.h\n"
+ "st1b { z25.s }, p5, [x23]\n"
+ "add x23, x23, x21\n"
+ "trn1 z15.h, z15.h, z17.h\n"
+ "mov z16.d, z16.d\n"
+ "bgt 10b\n"
+ "b 16f\n"
+ "11:" // Padded
+ "cbz x25, 14f\n"
+ "cmp x25, #0x1\n"
+ "sub x28, x28, x25\n"
+ "beq 13f\n"
+ "12:" // Padded: 2 priming loads
+ "mov x12, #0x0\n"
+ ".inst 0x25305504 // psel p4.s, p5.s/Z, p8.s[w12]\n"
+ "ld1b { z14.s }, p4/Z, [x27]\n"
+ "sub z14.h, p4/M, z14.h, z21.h\n"
+ "add x19, x27, %x[ld_in_row]\n"
+ ".inst 0x25705503 // psel p3.s, p5.s/Z, p8.s[w12, #1]\n"
+ "ld1b { z18.s }, p3/Z, [x19]\n"
+ "sub z18.h, p3/M, z18.h, z21.h\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ ".inst 0x25b05502 // psel p2.s, p5.s/Z, p8.s[w12, #2]\n"
+ "ld1b { z15.s }, p2/Z, [x19]\n"
+ "sub z15.h, p2/M, z15.h, z21.h\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ ".inst 0x25f05501 // psel p1.s, p5.s/Z, p8.s[w12, #3]\n"
+ "ld1b { z17.s }, p1/Z, [x19]\n"
+ "sub z17.h, p1/M, z17.h, z21.h\n"
+ "mov x12, #0x4\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ "trn1 z14.h, z14.h, z18.h\n"
+ "trn1 z15.h, z15.h, z17.h\n"
+ ".inst 0x25305500 // psel p0.s, p5.s/Z, p8.s[w12]\n"
+ "ld1b { z16.s }, p0/Z, [x19]\n"
+ "sub z16.h, p0/M, z16.h, z21.h\n"
+ "mov z16.d, z16.d\n"
+ ".inst 0xc16835c8 // sdot za.s[x9, 0], { z14.h-z15.h }, z8.h\n"
+ "add x27, x27, %x[ld_in_col]\n"
+ ".inst 0xc16635e8 // sdot za.s[x9, 0], { z15.h-z16.h }, z6.h\n"
+ "13:" // Padded: 1 priming loads
+ "mov x12, #0x0\n"
+ ".inst 0x25305504 // psel p4.s, p5.s/Z, p8.s[w12]\n"
+ "ld1b { z14.s }, p4/Z, [x27]\n"
+ "sub z14.h, p4/M, z14.h, z21.h\n"
+ "add x19, x27, %x[ld_in_row]\n"
+ ".inst 0x25705503 // psel p3.s, p5.s/Z, p8.s[w12, #1]\n"
+ "ld1b { z18.s }, p3/Z, [x19]\n"
+ "sub z18.h, p3/M, z18.h, z21.h\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ ".inst 0x25b05502 // psel p2.s, p5.s/Z, p8.s[w12, #2]\n"
+ "ld1b { z15.s }, p2/Z, [x19]\n"
+ "sub z15.h, p2/M, z15.h, z21.h\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ ".inst 0x25f05501 // psel p1.s, p5.s/Z, p8.s[w12, #3]\n"
+ "ld1b { z17.s }, p1/Z, [x19]\n"
+ "sub z17.h, p1/M, z17.h, z21.h\n"
+ "mov x12, #0x4\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ "trn1 z14.h, z14.h, z18.h\n"
+ "trn1 z15.h, z15.h, z17.h\n"
+ ".inst 0x25305500 // psel p0.s, p5.s/Z, p8.s[w12]\n"
+ "ld1b { z16.s }, p0/Z, [x19]\n"
+ "sub z16.h, p0/M, z16.h, z21.h\n"
+ "mov z16.d, z16.d\n"
+ ".inst 0xc16735c8 // sdot za.s[x9, 0], { z14.h-z15.h }, z7.h\n"
+ "add x27, x27, %x[ld_in_col]\n"
+ ".inst 0xc16535e8 // sdot za.s[x9, 0], { z15.h-z16.h }, z5.h\n"
+ "14:" // Padded: 0 priming loads
+ "mov x12, #0x0\n"
+ ".inst 0x25305504 // psel p4.s, p5.s/Z, p8.s[w12]\n"
+ "ld1b { z14.s }, p4/Z, [x27]\n"
+ "sub z14.h, p4/M, z14.h, z21.h\n"
+ "add x19, x27, %x[ld_in_row]\n"
+ ".inst 0x25705503 // psel p3.s, p5.s/Z, p8.s[w12, #1]\n"
+ "ld1b { z18.s }, p3/Z, [x19]\n"
+ "sub z18.h, p3/M, z18.h, z21.h\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ ".inst 0x25b05502 // psel p2.s, p5.s/Z, p8.s[w12, #2]\n"
+ "ld1b { z15.s }, p2/Z, [x19]\n"
+ "sub z15.h, p2/M, z15.h, z21.h\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ ".inst 0x25f05501 // psel p1.s, p5.s/Z, p8.s[w12, #3]\n"
+ "ld1b { z17.s }, p1/Z, [x19]\n"
+ "sub z17.h, p1/M, z17.h, z21.h\n"
+ "mov x12, #0x4\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ "trn1 z14.h, z14.h, z18.h\n"
+ "trn1 z15.h, z15.h, z17.h\n"
+ ".inst 0x25305500 // psel p0.s, p5.s/Z, p8.s[w12]\n"
+ "ld1b { z16.s }, p0/Z, [x19]\n"
+ "sub z16.h, p0/M, z16.h, z21.h\n"
+ "sub x28, x28, #0x2\n"
+ "sub x26, x26, #0x1\n"
+ "lsr x19, x28, #0x1\n"
+ "mov z16.d, z16.d\n"
+ "cmp x19, x26\n"
+ "csel x20, x19, x26, LT\n"
+ "add x27, x27, %x[ld_in_col]\n"
+ "and x28, x28, #0x1\n"
+ "sub x26, x26, x20\n"
+ "cbz x20, 16f\n"
+ "15:" // Padded: Main loop
+ ".inst 0xc16435c8 // sdot za.s[x9, 0], { z14.h-z15.h }, z4.h\n"
+ "mov x12, #0x0\n"
+ ".inst 0x25305504 // psel p4.s, p5.s/Z, p8.s[w12]\n"
+ ".inst 0xc16835c9 // sdot za.s[x9, 1], { z14.h-z15.h }, z8.h\n"
+ "add x19, x27, %x[ld_in_row]\n"
+ ".inst 0x25705503 // psel p3.s, p5.s/Z, p8.s[w12, #1]\n"
+ "ld1b { z14.s }, p4/Z, [x27]\n"
+ "ld1b { z18.s }, p3/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ ".inst 0x25b05502 // psel p2.s, p5.s/Z, p8.s[w12, #2]\n"
+ ".inst 0xc16335e8 // sdot za.s[x9, 0], { z15.h-z16.h }, z3.h\n"
+ ".inst 0xc16635e9 // sdot za.s[x9, 1], { z15.h-z16.h }, z6.h\n"
+ "ld1b { z15.s }, p2/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ ".inst 0x25f05501 // psel p1.s, p5.s/Z, p8.s[w12, #3]\n"
+ "ld1b { z17.s }, p1/Z, [x19]\n"
+ "mov x12, #0x4\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ "sub z14.h, p4/M, z14.h, z21.h\n"
+ ".inst 0x25305500 // psel p0.s, p5.s/Z, p8.s[w12]\n"
+ "sub z18.h, p3/M, z18.h, z21.h\n"
+ "sub z15.h, p2/M, z15.h, z21.h\n"
+ "ld1b { z16.s }, p0/Z, [x19]\n"
+ "sub z17.h, p1/M, z17.h, z21.h\n"
+ "sub z16.h, p0/M, z16.h, z21.h\n"
+ "trn1 z14.h, z14.h, z18.h\n"
+ "add x27, x27, %x[ld_in_col]\n"
+ "trn1 z15.h, z15.h, z17.h\n"
+ ".inst 0xc0062818 // mova { z24.d-z25.d }, za.d[x9, #0]\n"
+ "add x9, x9, #0x1\n"
+ "mov z16.d, z16.d\n"
+ ".inst 0xc16735c8 // sdot za.s[x9, 0], { z14.h-z15.h }, z7.h\n"
+ ".inst 0xc1a1a418 // sqdmulh { z24.s-z25.s }, { z24.s-z25.s }, z1.s\n"
+ "mov x12, #0x0\n"
+ ".inst 0x25305504 // psel p4.s, p5.s/Z, p8.s[w12]\n"
+ "add x19, x27, %x[ld_in_row]\n"
+ "ld1b { z14.s }, p4/Z, [x27]\n"
+ ".inst 0xc16535e8 // sdot za.s[x9, 0], { z15.h-z16.h }, z5.h\n"
+ ".inst 0x25705503 // psel p3.s, p5.s/Z, p8.s[w12, #1]\n"
+ "ld1b { z18.s }, p3/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ ".inst 0xc1a0a238 // srshl { z24.s-z25.s }, { z24.s-z25.s }, z0.s\n"
+ ".inst 0x25b05502 // psel p2.s, p5.s/Z, p8.s[w12, #2]\n"
+ "ld1b { z15.s }, p2/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ ".inst 0xc1a2a318 // add { z24.s-z25.s }, { z24.s-z25.s }, z2.s\n"
+ ".inst 0x25f05501 // psel p1.s, p5.s/Z, p8.s[w12, #3]\n"
+ "mov x12, #0x4\n"
+ "ld1b { z17.s }, p1/Z, [x19]\n"
+ "sub z14.h, p4/M, z14.h, z21.h\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ ".inst 0x25305500 // psel p0.s, p5.s/Z, p8.s[w12]\n"
+ "ld1b { z16.s }, p0/Z, [x19]\n"
+ "sub z18.h, p3/M, z18.h, z21.h\n"
+ "sub z15.h, p2/M, z15.h, z21.h\n"
+ "sub z17.h, p1/M, z17.h, z21.h\n"
+ "subs x20, x20, #0x1\n"
+ ".inst 0xc0042982 // mova za.d[x9, #2], { z12.d-z13.d }\n"
+ "sub z16.h, p0/M, z16.h, z21.h\n"
+ ".inst 0xc1b3c698 // sclamp { z24.s-z25.s }, z20.s, z19.s\n"
+ "st1b { z24.s }, p5, [x24]\n"
+ "add x24, x24, x22\n"
+ "st1b { z25.s }, p5, [x23]\n"
+ "add x23, x23, x21\n"
+ "trn1 z14.h, z14.h, z18.h\n"
+ "trn1 z15.h, z15.h, z17.h\n"
+ "mov z16.d, z16.d\n"
+ "add x27, x27, %x[ld_in_col]\n"
+ "bgt 15b\n"
+ "16:" // Main loop tail
+ ".inst 0xc16435c8 // sdot za.s[x9, 0], { z14.h-z15.h }, z4.h\n"
+ "mov x12, #0x0\n"
+ ".inst 0x25305504 // psel p4.s, p5.s/Z, p8.s[w12]\n"
+ ".inst 0xc16335e8 // sdot za.s[x9, 0], { z15.h-z16.h }, z3.h\n"
+ "add x19, x27, %x[ld_in_row]\n"
+ ".inst 0x25705503 // psel p3.s, p5.s/Z, p8.s[w12, #1]\n"
+ ".inst 0xc16835c9 // sdot za.s[x9, 1], { z14.h-z15.h }, z8.h\n"
+ "ld1b { z14.s }, p4/Z, [x27]\n"
+ ".inst 0x25b05502 // psel p2.s, p5.s/Z, p8.s[w12, #2]\n"
+ ".inst 0x25f05501 // psel p1.s, p5.s/Z, p8.s[w12, #3]\n"
+ "ld1b { z18.s }, p3/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ ".inst 0xc16635e9 // sdot za.s[x9, 1], { z15.h-z16.h }, z6.h\n"
+ "mov x12, #0x4\n"
+ ".inst 0xc0062818 // mova { z24.d-z25.d }, za.d[x9, #0]\n"
+ "ld1b { z15.s }, p2/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ ".inst 0xc1a1a418 // sqdmulh { z24.s-z25.s }, { z24.s-z25.s }, z1.s\n"
+ "ld1b { z17.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ ".inst 0x25305500 // psel p0.s, p5.s/Z, p8.s[w12]\n"
+ "sub z14.h, p4/M, z14.h, z21.h\n"
+ "sub z18.h, p3/M, z18.h, z21.h\n"
+ "sub z15.h, p2/M, z15.h, z21.h\n"
+ "ld1b { z16.s }, p0/Z, [x19]\n"
+ "add x9, x9, #0x1\n"
+ "sub z17.h, p1/M, z17.h, z21.h\n"
+ "sub z16.h, p0/M, z16.h, z21.h\n"
+ "trn1 z14.h, z14.h, z18.h\n"
+ "add x27, x27, %x[ld_in_col]\n"
+ ".inst 0xc1a0a238 // srshl { z24.s-z25.s }, { z24.s-z25.s }, z0.s\n"
+ "trn1 z15.h, z15.h, z17.h\n"
+ ".inst 0xc1a2a318 // add { z24.s-z25.s }, { z24.s-z25.s }, z2.s\n"
+ ".inst 0xc16735c8 // sdot za.s[x9, 0], { z14.h-z15.h }, z7.h\n"
+ "mov z16.d, z16.d\n"
+ ".inst 0xc1b3c698 // sclamp { z24.s-z25.s }, z20.s, z19.s\n"
+ "st1b { z24.s }, p5, [x24]\n"
+ "add x24, x24, x22\n"
+ "st1b { z25.s }, p5, [x23]\n"
+ "add x23, x23, x21\n"
+ ".inst 0xc0042982 // mova za.d[x9, #2], { z12.d-z13.d }\n"
+ ".inst 0xc16535e8 // sdot za.s[x9, 0], { z15.h-z16.h }, z5.h\n"
+ "cbz x28, 17f\n" // Skip remainder inputs
+ "mov x12, #0x0\n"
+ ".inst 0x25305504 // psel p4.s, p5.s/Z, p8.s[w12]\n"
+ "ld1b { z14.s }, p4/Z, [x27]\n"
+ "sub z14.h, p4/M, z14.h, z21.h\n"
+ "add x19, x27, %x[ld_in_row]\n"
+ ".inst 0x25705503 // psel p3.s, p5.s/Z, p8.s[w12, #1]\n"
+ "ld1b { z18.s }, p3/Z, [x19]\n"
+ "sub z18.h, p3/M, z18.h, z21.h\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ ".inst 0x25b05502 // psel p2.s, p5.s/Z, p8.s[w12, #2]\n"
+ "ld1b { z15.s }, p2/Z, [x19]\n"
+ "sub z15.h, p2/M, z15.h, z21.h\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ ".inst 0x25f05501 // psel p1.s, p5.s/Z, p8.s[w12, #3]\n"
+ "ld1b { z17.s }, p1/Z, [x19]\n"
+ "sub z17.h, p1/M, z17.h, z21.h\n"
+ "mov x12, #0x4\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ "trn1 z14.h, z14.h, z18.h\n"
+ "trn1 z15.h, z15.h, z17.h\n"
+ ".inst 0x25305500 // psel p0.s, p5.s/Z, p8.s[w12]\n"
+ "ld1b { z16.s }, p0/Z, [x19]\n"
+ "sub z16.h, p0/M, z16.h, z21.h\n"
+ "mov z16.d, z16.d\n"
+ ".inst 0xc16435c8 // sdot za.s[x9, 0], { z14.h-z15.h }, z4.h\n"
+ "sub x26, x26, #0x1\n"
+ ".inst 0xc16335e8 // sdot za.s[x9, 0], { z15.h-z16.h }, z3.h\n"
+ ".inst 0xc0062818 // mova { z24.d-z25.d }, za.d[x9, #0]\n"
+ ".inst 0xc1a1a418 // sqdmulh { z24.s-z25.s }, { z24.s-z25.s }, z1.s\n"
+ ".inst 0xc1a0a238 // srshl { z24.s-z25.s }, { z24.s-z25.s }, z0.s\n"
+ ".inst 0xc16835c9 // sdot za.s[x9, 1], { z14.h-z15.h }, z8.h\n"
+ ".inst 0xc1a2a318 // add { z24.s-z25.s }, { z24.s-z25.s }, z2.s\n"
+ ".inst 0xc16635e9 // sdot za.s[x9, 1], { z15.h-z16.h }, z6.h\n"
+ "add x9, x9, #0x1\n"
+ ".inst 0xc1b3c698 // sclamp { z24.s-z25.s }, z20.s, z19.s\n"
+ "st1b { z24.s }, p5, [x24]\n"
+ "add x24, x24, x22\n"
+ ".inst 0xc0042982 // mova za.d[x9, #2], { z12.d-z13.d }\n"
+ "st1b { z25.s }, p5, [x23]\n"
+ "add x23, x23, x21\n"
+ "17:" // Tail input: End
+ "cbz x26, 19f\n"
+ "18:" // Right padding loop
+ ".inst 0xc0062818 // mova { z24.d-z25.d }, za.d[x9, #0]\n"
+ ".inst 0xc1a1a418 // sqdmulh { z24.s-z25.s }, { z24.s-z25.s }, z1.s\n"
+ "add x9, x9, #0x1\n"
+ ".inst 0xc1a0a238 // srshl { z24.s-z25.s }, { z24.s-z25.s }, z0.s\n"
+ "subs x26, x26, #0x1\n"
+ ".inst 0xc0042982 // mova za.d[x9, #2], { z12.d-z13.d }\n"
+ ".inst 0xc1a2a318 // add { z24.s-z25.s }, { z24.s-z25.s }, z2.s\n"
+ ".inst 0xc1b3c698 // sclamp { z24.s-z25.s }, z20.s, z19.s\n"
+ "st1b { z24.s }, p5, [x24]\n"
+ "add x24, x24, x22\n"
+ "st1b { z25.s }, p5, [x23]\n"
+ "add x23, x23, x21\n"
+ "bgt 18b\n"
+ "19:" // End
+ ".inst 0xd503467f // SMSTOP\n"
+ :
+ : [args] "r" (&args), [ld_in_col] "r" (ld_in_col), [ld_in_row] "r" (ld_in_row), [offsetof_Args_inptr] "I" (offsetof(Args, inptr)), [offsetof_Args_input_cols] "I" (offsetof(Args, input_cols)), [offsetof_Args_ld_out_cols] "I" (offsetof(Args, ld_out_cols)), [offsetof_Args_n] "I" (offsetof(Args, n)), [offsetof_Args_n_channels] "I" (offsetof(Args, n_channels)), [offsetof_Args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_Args_output_cols] "I" (offsetof(Args, output_cols)), [offsetof_Args_pad_bottom] "I" (offsetof(Args, pad_bottom)), [offsetof_Args_pad_left] "I" (offsetof(Args, pad_left)), [offsetof_Args_pad_top] "I" (offsetof(Args, pad_top)), [offsetof_Args_weights] "I" (offsetof(Args, weights)), [offsetof_Requantize32_a_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_bias] "I" (offsetof(arm_gemm::Requantize32, bias)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [offsetof_Requantize32_per_channel_muls] "I" (offsetof(arm_gemm::Requantize32, per_channel_muls)), [offsetof_Requantize32_per_channel_right_shifts] "I" (offsetof(arm_gemm::Requantize32, per_channel_right_shifts)), [offsetof_Requantize32_per_layer_mul] "I" (offsetof(arm_gemm::Requantize32, per_layer_mul)), [offsetof_Requantize32_per_layer_right_shift] "I" (offsetof(arm_gemm::Requantize32, per_layer_right_shift)), [qp] "r" (&qp)
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ );
+}
+
+} // namespace depthwise
+} // namespace arm_conv
+
+#endif // defined(ARM_COMPUTE_ENABLE_SME2)
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8q_planar_3x3_s2_4rows_dot_za/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8q_planar_3x3_s2_4rows_dot_za/generic.cpp
index 630d870433..bdf1ba6f9c 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8q_planar_3x3_s2_4rows_dot_za/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8q_planar_3x3_s2_4rows_dot_za/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -69,18 +69,18 @@ void sme2_u8q_planar_3x3_s2_4rows_dot_za_impl(
__asm__ __volatile__(
".inst 0xd503477f // SMSTART ZA\n"
- "ldr x6, [%x[args], %[offsetof_Args_pad_bottom]]\n"
+ "ldr x7, [%x[args], %[offsetof_Args_pad_bottom]]\n"
"ptrue p2.b\n"
- "mov x20, #0x9\n"
- "ldr x7, [%x[args], %[offsetof_Args_pad_top]]\n"
+ "mov x19, #0x9\n"
+ "ldr x8, [%x[args], %[offsetof_Args_pad_top]]\n"
"ld1rh { z5.h }, p2/Z, [%x[qp], %[offsetof_Requantize32_a_offset]]\n"
- "sub x20, x20, x6\n"
+ "sub x19, x19, x7\n"
".inst 0x25207812 // ptrue pn10.b\n"
"ldr x17, [%x[args], %[offsetof_Args_n_channels]]\n"
"whilelt p1.s, XZR, x17\n"
- "whilelt p9.s, XZR, x20\n"
+ "whilelt p9.s, XZR, x19\n"
"ld1rw { z4.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_c_offset]]\n"
- "whilelt p8.s, XZR, x7\n"
+ "whilelt p8.s, XZR, x8\n"
"addvl SP, SP, #-6\n"
"ldr x16, [%x[args], %[offsetof_Args_current_channel]]\n"
"neg z5.h, p2/M, z5.h\n"
@@ -90,317 +90,317 @@ void sme2_u8q_planar_3x3_s2_4rows_dot_za_impl(
"ld1rw { z27.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_minval]]\n"
"ld1rw { z23.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_maxval]]\n"
"1:" // Channel loop
- "ldr x20, [%x[qp], %[offsetof_Requantize32_bias]]\n"
+ "ldr x19, [%x[qp], %[offsetof_Requantize32_bias]]\n"
"mov z0.s, #0x0\n"
- "cbz x20, 2f\n"
- "ld1w { z0.s }, p1/Z, [x20, x16, LSL #2]\n"
+ "cbz x19, 2f\n"
+ "ld1w { z0.s }, p1/Z, [x19, x16, LSL #2]\n"
"2:" // Load bias: Done
- "ldr x22, [%x[args], %[offsetof_Args_weights]]\n"
- "mov x20, x22\n"
- "ld1b { z24.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #3\n"
+ "ldr x21, [%x[args], %[offsetof_Args_weights]]\n"
+ "mov x19, x21\n"
+ "ld1b { z24.s }, p2/Z, [x19]\n"
+ "incw x19, ALL, MUL #3\n"
"ld1rh { z13.h }, p2/Z, [%x[qp], %[offsetof_Requantize32_b_offset]]\n"
"sub z24.h, z24.h, z13.h\n"
- "incw x22\n"
+ "incw x21\n"
"mov z17.h, #0x0\n"
- "ld1b { z25.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #3\n"
+ "ld1b { z25.s }, p2/Z, [x19]\n"
+ "incw x19, ALL, MUL #3\n"
"sub z25.h, z25.h, z13.h\n"
"trn1 z10.h, z24.h, z25.h\n"
- "ld1b { z16.s }, p2/Z, [x20]\n"
+ "ld1b { z16.s }, p2/Z, [x19]\n"
"sub z16.h, z16.h, z13.h\n"
- "mov x20, x22\n"
+ "mov x19, x21\n"
"trn1 z11.h, z16.h, z17.h\n"
- "ld1b { z24.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #3\n"
+ "ld1b { z24.s }, p2/Z, [x19]\n"
+ "incw x19, ALL, MUL #3\n"
"sub z24.h, z24.h, z13.h\n"
- "addvl x21, SP, #6\n"
- "ld1b { z25.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #3\n"
+ "addvl x20, SP, #6\n"
+ "ld1b { z25.s }, p2/Z, [x19]\n"
+ "incw x19, ALL, MUL #3\n"
"sub z25.h, z25.h, z13.h\n"
- "incw x22\n"
- "ld1b { z16.s }, p2/Z, [x20]\n"
+ "incw x21\n"
+ "ld1b { z16.s }, p2/Z, [x19]\n"
"sub z16.h, z16.h, z13.h\n"
- "addvl x21, x21, #-2\n"
- "mov x20, x22\n"
- "st1h { z10.h }, p2, [x21]\n"
+ "addvl x20, x20, #-2\n"
+ "mov x19, x21\n"
+ "st1h { z10.h }, p2, [x20]\n"
"trn1 z10.h, z24.h, z25.h\n"
- "ld1b { z24.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #3\n"
- "ld1b { z25.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #3\n"
- "st1h { z11.h }, p2, [x21, #1, MUL VL]\n"
+ "ld1b { z24.s }, p2/Z, [x19]\n"
+ "incw x19, ALL, MUL #3\n"
+ "ld1b { z25.s }, p2/Z, [x19]\n"
+ "incw x19, ALL, MUL #3\n"
+ "st1h { z11.h }, p2, [x20, #1, MUL VL]\n"
"trn1 z11.h, z16.h, z17.h\n"
- "ld1b { z16.s }, p2/Z, [x20]\n"
+ "ld1b { z16.s }, p2/Z, [x19]\n"
"sub z24.h, z24.h, z13.h\n"
"sub z25.h, z25.h, z13.h\n"
- "ldr x20, [%x[qp], %[offsetof_Requantize32_per_channel_muls]]\n"
+ "ldr x19, [%x[qp], %[offsetof_Requantize32_per_channel_muls]]\n"
"sub z16.h, z16.h, z13.h\n"
- "addvl x21, x21, #-2\n"
- "st1h { z10.h }, p2, [x21]\n"
+ "addvl x20, x20, #-2\n"
+ "st1h { z10.h }, p2, [x20]\n"
"mov z1.d, z0.d\n"
- "st1h { z11.h }, p2, [x21, #1, MUL VL]\n"
- "addvl x21, x21, #-2\n"
+ "st1h { z11.h }, p2, [x20, #1, MUL VL]\n"
+ "addvl x20, x20, #-2\n"
"mov z2.d, z0.d\n"
"mov z3.d, z0.d\n"
"trn1 z10.h, z24.h, z25.h\n"
- "st1h { z10.h }, p2, [x21]\n"
+ "st1h { z10.h }, p2, [x20]\n"
"trn1 z11.h, z16.h, z17.h\n"
- "st1h { z11.h }, p2, [x21, #1, MUL VL]\n"
- "cbz x20, 3f\n"
- "ld1w { z8.s }, p1/Z, [x20, x16, LSL #2]\n"
+ "st1h { z11.h }, p2, [x20, #1, MUL VL]\n"
+ "cbz x19, 3f\n"
+ "ld1w { z8.s }, p1/Z, [x19, x16, LSL #2]\n"
"3:" // Load mul: End
- "ldr x20, [%x[qp], %[offsetof_Requantize32_per_channel_right_shifts]]\n"
- "cbz x20, 4f\n"
- "ld1w { z7.s }, p1/Z, [x20, x16, LSL #2]\n"
+ "ldr x19, [%x[qp], %[offsetof_Requantize32_per_channel_right_shifts]]\n"
+ "cbz x19, 4f\n"
+ "ld1w { z7.s }, p1/Z, [x19, x16, LSL #2]\n"
"4:" // Load right_shift: End
"ldr x15, [%x[args], %[offsetof_Args_input_cols]]\n"
- "sub x20, x15, #0x1\n"
- "orr x23, x20, %x[ld_in_col], LSL #16\n"
+ "sub x19, x15, #0x1\n"
+ "orr x22, x19, %x[ld_in_col], LSL #16\n"
"ldr x14, [%x[args], %[offsetof_Args_inptr]]\n"
- "orr x23, x17, x23, LSL #22\n"
- "mov x22, #0x9\n"
- "add x21, x7, x6\n"
- "lsl x20, %x[ld_in_row], #0x0\n"
+ "orr x22, x17, x22, LSL #22\n"
+ "mov x21, #0x9\n"
+ "add x20, x8, x7\n"
+ "lsl x19, %x[ld_in_row], #0x0\n"
"ldr x13, [%x[args], %[offsetof_Args_output_cols]]\n"
- "mov x8, #0x0\n"
- "lsl x23, x23, #0x0\n"
- "sub x22, x22, x21\n"
- "madd x20, x20, x7, x14\n"
+ "mov x11, #0x0\n"
+ "lsl x22, x22, #0x0\n"
+ "sub x21, x21, x20\n"
+ "madd x19, x19, x8, x14\n"
"5:" // Issue prefetches
- "subs x22, x22, #0x1\n"
- ".inst 0xf8b74a9c // rprfm pldstrm, x23, [x20]\n"
- "add x20, x20, %x[ld_in_col]\n"
+ "subs x21, x21, #0x1\n"
+ ".inst 0xf8b64a7c // rprfm pldstrm, x22, [x19]\n"
+ "add x19, x19, %x[ld_in_col]\n"
"bgt 5b\n"
- "ldr x25, [%x[args], %[offsetof_Args_outptrs]]\n"
- "lsl x20, %x[ld_in_row], #0x0\n"
- "msub x14, x7, x20, x14\n"
- ".inst 0xc0040c00 // mova za.d[x8, #0], { z0.d-z3.d }\n"
- "ldr x20, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
- ".inst 0xc0040c01 // mova za.d[x8, #1], { z0.d-z3.d }\n"
- "mov x22, #0x2\n"
- "ldp x11, x10, [x25], #0x10\n"
- ".inst 0xc0040c02 // mova za.d[x8, #2], { z0.d-z3.d }\n"
- "ldp x9, x28, [x20], #0x10\n"
- "ldr x21, [%x[args], %[offsetof_Args_pad_left]]\n"
- "ldp x27, x26, [x25], #0x10\n"
- "ldp x25, x24, [x20], #0x10\n"
- "cbz x21, 7f\n"
- "cmp x21, x22\n"
- "csel x20, x21, x22, LT\n"
- "sub x21, x21, x20\n"
- "sub x22, x22, x20\n"
- "cbz x21, 7f\n"
- ".inst 0xc0060c1c // mova { z28.d-z31.d }, za.d[x8, #0]\n"
+ "ldr x24, [%x[args], %[offsetof_Args_outptrs]]\n"
+ "lsl x19, %x[ld_in_row], #0x0\n"
+ "msub x14, x8, x19, x14\n"
+ ".inst 0xc0046c00 // mova za.d[x11, #0], { z0.d-z3.d }\n"
+ "ldr x19, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
+ ".inst 0xc0046c01 // mova za.d[x11, #1], { z0.d-z3.d }\n"
+ "mov x21, #0x2\n"
+ "ldp x10, x9, [x24], #0x10\n"
+ ".inst 0xc0046c02 // mova za.d[x11, #2], { z0.d-z3.d }\n"
+ "ldp x28, x27, [x19], #0x10\n"
+ "ldr x20, [%x[args], %[offsetof_Args_pad_left]]\n"
+ "ldp x26, x25, [x24], #0x10\n"
+ "ldp x24, x23, [x19], #0x10\n"
+ "cbz x20, 7f\n"
+ "cmp x20, x21\n"
+ "csel x19, x20, x21, LT\n"
+ "sub x20, x20, x19\n"
+ "sub x21, x21, x19\n"
+ "cbz x20, 7f\n"
+ ".inst 0xc0066c1c // mova { z28.d-z31.d }, za.d[x11, #0]\n"
".inst 0xc1a8ac1c // sqdmulh { z28.s-z31.s }, { z28.s-z31.s }, z8.s\n"
- "and x22, x21, #0x1\n"
+ "and x21, x20, #0x1\n"
".inst 0xc1a7aa3c // srshl { z28.s-z31.s }, { z28.s-z31.s }, z7.s\n"
- "add x21, x21, #0x1\n"
- "lsr x21, x21, #0x1\n"
+ "add x20, x20, #0x1\n"
+ "lsr x20, x20, #0x1\n"
".inst 0xc1a4ab1c // add { z28.s-z31.s }, { z28.s-z31.s }, z4.s\n"
- "sub x13, x13, x21\n"
+ "sub x13, x13, x20\n"
".inst 0xc1b7cf7c // sclamp { z28.s-z31.s }, z27.s, z23.s\n"
"6:" // Left padding
- "subs x21, x21, #0x1\n"
- "st1b { z28.s }, p1, [x11]\n"
- "add x11, x11, x9\n"
- "st1b { z29.s }, p1, [x10]\n"
+ "subs x20, x20, #0x1\n"
+ "st1b { z28.s }, p1, [x10]\n"
"add x10, x10, x28\n"
- "st1b { z30.s }, p1, [x27]\n"
- "add x27, x27, x25\n"
- "st1b { z31.s }, p1, [x26]\n"
+ "st1b { z29.s }, p1, [x9]\n"
+ "add x9, x9, x27\n"
+ "st1b { z30.s }, p1, [x26]\n"
"add x26, x26, x24\n"
+ "st1b { z31.s }, p1, [x25]\n"
+ "add x25, x25, x23\n"
"bgt 6b\n"
"7:" // Left padding: End
- "adds XZR, x7, x6\n"
+ "adds XZR, x8, x7\n"
"bne 12f\n"
- "cbz x22, 10f\n"
- "cmp x22, #0x1\n"
- "sub x15, x15, x22\n"
+ "cbz x21, 10f\n"
+ "cmp x21, #0x1\n"
+ "sub x15, x15, x21\n"
"beq 9f\n"
"8:" // Unpadded: 2 priming loads
- "add x21, x14, %x[ld_in_row]\n"
+ "add x20, x14, %x[ld_in_row]\n"
"ld1b { z12.s }, p1/Z, [x14]\n"
- "addvl x20, SP, #4\n"
- "ld1b { z20.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "addvl x19, SP, #4\n"
+ "ld1b { z20.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
"trn1 z12.h, z12.h, z20.h\n"
"add z12.h, z12.h, z5.h\n"
- "ld1b { z13.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "ld1b { z13.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
"add x14, x14, %x[ld_in_col]\n"
- "ld1b { z19.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "ld1b { z19.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
"trn1 z13.h, z13.h, z19.h\n"
"add z13.h, z13.h, z5.h\n"
- "ld1b { z14.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
- "ld1b { z18.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "ld1b { z14.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z18.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
"trn1 z14.h, z14.h, z18.h\n"
"add z14.h, z14.h, z5.h\n"
- "ld1b { z15.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
- "ld1b { z17.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "ld1b { z15.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z17.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
"trn1 z15.h, z15.h, z17.h\n"
"add z15.h, z15.h, z5.h\n"
- "ld1b { z16.s }, p1/Z, [x21]\n"
+ "ld1b { z16.s }, p1/Z, [x20]\n"
"mov z16.d, z16.d\n"
"add z16.h, z16.h, z5.h\n"
- ".inst 0xa0402a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20]\n"
- ".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
- ".inst 0xc17b15a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z11.h\n"
+ ".inst 0xa0402a6a // ld1h { z10.h-z11.h }, pn10.b/Z, [x19]\n"
+ ".inst 0xc17a7588 // sdot za.s[x11, 0], { z12.h-z15.h }, z10.h\n"
+ ".inst 0xc17b75a8 // sdot za.s[x11, 0], { z13.h-z16.h }, z11.h\n"
"9:" // Unpadded: 1 priming loads
- "add x21, x14, %x[ld_in_row]\n"
+ "add x20, x14, %x[ld_in_row]\n"
"ld1b { z12.s }, p1/Z, [x14]\n"
- "addvl x20, SP, #2\n"
- "ld1b { z20.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "addvl x19, SP, #2\n"
+ "ld1b { z20.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
"trn1 z12.h, z12.h, z20.h\n"
"add z12.h, z12.h, z5.h\n"
- "ld1b { z13.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "ld1b { z13.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
"add x14, x14, %x[ld_in_col]\n"
- "ld1b { z19.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "ld1b { z19.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
"trn1 z13.h, z13.h, z19.h\n"
"add z13.h, z13.h, z5.h\n"
- "ld1b { z14.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
- "ld1b { z18.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "ld1b { z14.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z18.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
"trn1 z14.h, z14.h, z18.h\n"
"add z14.h, z14.h, z5.h\n"
- "ld1b { z15.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
- "ld1b { z17.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "ld1b { z15.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z17.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
"trn1 z15.h, z15.h, z17.h\n"
"add z15.h, z15.h, z5.h\n"
- "ld1b { z16.s }, p1/Z, [x21]\n"
+ "ld1b { z16.s }, p1/Z, [x20]\n"
"mov z16.d, z16.d\n"
"add z16.h, z16.h, z5.h\n"
- ".inst 0xa0402a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20]\n"
- ".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
- ".inst 0xc17b15a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z11.h\n"
+ ".inst 0xa0402a6a // ld1h { z10.h-z11.h }, pn10.b/Z, [x19]\n"
+ ".inst 0xc17a7588 // sdot za.s[x11, 0], { z12.h-z15.h }, z10.h\n"
+ ".inst 0xc17b75a8 // sdot za.s[x11, 0], { z13.h-z16.h }, z11.h\n"
"10:" // Unpadded: 0 priming loads
"cmp x15, #0x2\n"
".inst 0xa0402bea // ld1h { z10.h-z11.h }, pn10.b/Z, [SP]\n"
"blt 18f\n"
- "add x21, x14, %x[ld_in_row]\n"
+ "add x20, x14, %x[ld_in_row]\n"
"ld1b { z12.s }, p1/Z, [x14]\n"
"sub x15, x15, #0x2\n"
- "ld1b { z20.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "ld1b { z20.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
"trn1 z12.h, z12.h, z20.h\n"
"sub x13, x13, #0x1\n"
- "ld1b { z13.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
- "lsr x20, x15, #0x1\n"
+ "ld1b { z13.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "lsr x19, x15, #0x1\n"
"add z12.h, z12.h, z5.h\n"
- "ld1b { z19.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "ld1b { z19.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
"trn1 z13.h, z13.h, z19.h\n"
- "cmp x20, x13\n"
- "ld1b { z14.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
- "csel x23, x20, x13, LT\n"
+ "cmp x19, x13\n"
+ "ld1b { z14.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "csel x22, x19, x13, LT\n"
"add z13.h, z13.h, z5.h\n"
- "ld1b { z18.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "ld1b { z18.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
"trn1 z14.h, z14.h, z18.h\n"
"add z14.h, z14.h, z5.h\n"
- "ld1b { z15.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "ld1b { z15.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
"add x14, x14, %x[ld_in_col]\n"
- "ld1b { z17.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "ld1b { z17.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
"trn1 z15.h, z15.h, z17.h\n"
"add z15.h, z15.h, z5.h\n"
- "ld1b { z16.s }, p1/Z, [x21]\n"
+ "ld1b { z16.s }, p1/Z, [x20]\n"
"mov z16.d, z16.d\n"
"add z16.h, z16.h, z5.h\n"
"and x15, x15, #0x1\n"
- "sub x13, x13, x23\n"
- "cbz x23, 17f\n"
+ "sub x13, x13, x22\n"
+ "cbz x22, 17f\n"
"11:" // Unpadded: Main loop
- ".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
- "addvl x20, SP, #4\n"
- "add x22, x14, %x[ld_in_row]\n"
- ".inst 0xc17b15a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z11.h\n"
- ".inst 0xa0402a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20]\n"
- "addvl x21, SP, #2\n"
- "subs x23, x23, #0x1\n"
- ".inst 0xc17a1589 // sdot za.s[x8, 1], { z12.h-z15.h }, z10.h\n"
+ ".inst 0xc17a7588 // sdot za.s[x11, 0], { z12.h-z15.h }, z10.h\n"
+ "addvl x19, SP, #4\n"
+ "add x21, x14, %x[ld_in_row]\n"
+ ".inst 0xc17b75a8 // sdot za.s[x11, 0], { z13.h-z16.h }, z11.h\n"
+ ".inst 0xa0402a6a // ld1h { z10.h-z11.h }, pn10.b/Z, [x19]\n"
+ "addvl x20, SP, #2\n"
+ "subs x22, x22, #0x1\n"
+ ".inst 0xc17a7589 // sdot za.s[x11, 1], { z12.h-z15.h }, z10.h\n"
"ld1b { z12.s }, p1/Z, [x14]\n"
"add x14, x14, %x[ld_in_col]\n"
- "add x20, x14, %x[ld_in_row]\n"
- "ld1b { z20.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row]\n"
- ".inst 0xc17b15a9 // sdot za.s[x8, 1], { z13.h-z16.h }, z11.h\n"
+ "add x19, x14, %x[ld_in_row]\n"
+ "ld1b { z20.s }, p1/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ ".inst 0xc17b75a9 // sdot za.s[x11, 1], { z13.h-z16.h }, z11.h\n"
"trn1 z12.h, z12.h, z20.h\n"
- "ld1b { z13.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row]\n"
+ "ld1b { z13.s }, p1/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
"add z12.h, z12.h, z5.h\n"
- ".inst 0xc0060c1c // mova { z28.d-z31.d }, za.d[x8, #0]\n"
- "ld1b { z19.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row]\n"
+ ".inst 0xc0066c1c // mova { z28.d-z31.d }, za.d[x11, #0]\n"
+ "ld1b { z19.s }, p1/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
"trn1 z13.h, z13.h, z19.h\n"
"add z13.h, z13.h, z5.h\n"
- "ld1b { z14.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row]\n"
- "add x8, x8, #0x1\n"
- ".inst 0xc0040c02 // mova za.d[x8, #2], { z0.d-z3.d }\n"
- "ld1b { z18.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row]\n"
+ "ld1b { z14.s }, p1/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "add x11, x11, #0x1\n"
+ ".inst 0xc0046c02 // mova za.d[x11, #2], { z0.d-z3.d }\n"
+ "ld1b { z18.s }, p1/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
"trn1 z14.h, z14.h, z18.h\n"
"add z14.h, z14.h, z5.h\n"
- "ld1b { z15.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row]\n"
+ "ld1b { z15.s }, p1/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
".inst 0xc1a8ac1c // sqdmulh { z28.s-z31.s }, { z28.s-z31.s }, z8.s\n"
- "ld1b { z17.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row]\n"
+ "ld1b { z17.s }, p1/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
"trn1 z15.h, z15.h, z17.h\n"
"add z15.h, z15.h, z5.h\n"
- "ld1b { z16.s }, p1/Z, [x22]\n"
+ "ld1b { z16.s }, p1/Z, [x21]\n"
"mov z16.d, z16.d\n"
"add z16.h, z16.h, z5.h\n"
- ".inst 0xa0402aaa // ld1h { z10.h-z11.h }, pn10.b/Z, [x21]\n"
- ".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
+ ".inst 0xa0402a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xc17a7588 // sdot za.s[x11, 0], { z12.h-z15.h }, z10.h\n"
".inst 0xc1a7aa3c // srshl { z28.s-z31.s }, { z28.s-z31.s }, z7.s\n"
"ld1b { z12.s }, p1/Z, [x14]\n"
- ".inst 0xc17b15a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z11.h\n"
+ ".inst 0xc17b75a8 // sdot za.s[x11, 0], { z13.h-z16.h }, z11.h\n"
".inst 0xc1a4ab1c // add { z28.s-z31.s }, { z28.s-z31.s }, z4.s\n"
- "ld1b { z20.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z20.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"trn1 z12.h, z12.h, z20.h\n"
".inst 0xc1b7cf7c // sclamp { z28.s-z31.s }, z27.s, z23.s\n"
- "ld1b { z13.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
- "st1b { z28.s }, p1, [x11]\n"
- "add x11, x11, x9\n"
- "ld1b { z19.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
- "trn1 z13.h, z13.h, z19.h\n"
- "st1b { z29.s }, p1, [x10]\n"
- "ld1b { z14.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z13.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ "st1b { z28.s }, p1, [x10]\n"
"add x10, x10, x28\n"
- "st1b { z30.s }, p1, [x27]\n"
- "ld1b { z18.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z19.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ "trn1 z13.h, z13.h, z19.h\n"
+ "st1b { z29.s }, p1, [x9]\n"
+ "ld1b { z14.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ "add x9, x9, x27\n"
+ "st1b { z30.s }, p1, [x26]\n"
+ "ld1b { z18.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"trn1 z14.h, z14.h, z18.h\n"
- "add x27, x27, x25\n"
- "ld1b { z15.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
- "st1b { z31.s }, p1, [x26]\n"
"add x26, x26, x24\n"
- "ld1b { z17.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z15.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ "st1b { z31.s }, p1, [x25]\n"
+ "add x25, x25, x23\n"
+ "ld1b { z17.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"trn1 z15.h, z15.h, z17.h\n"
"add z12.h, z12.h, z5.h\n"
- "ld1b { z16.s }, p1/Z, [x20]\n"
+ "ld1b { z16.s }, p1/Z, [x19]\n"
"mov z16.d, z16.d\n"
"add z13.h, z13.h, z5.h\n"
"add x14, x14, %x[ld_in_col]\n"
@@ -411,108 +411,108 @@ void sme2_u8q_planar_3x3_s2_4rows_dot_za_impl(
"bgt 11b\n"
"b 17f\n"
"12:" // Padded
- "cbz x22, 15f\n"
- "cmp x22, #0x1\n"
- "sub x15, x15, x22\n"
+ "cbz x21, 15f\n"
+ "cmp x21, #0x1\n"
+ "sub x15, x15, x21\n"
"beq 14f\n"
"13:" // Padded: 2 priming loads
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
"ld1b { z12.s }, p0/Z, [x14]\n"
"add z12.h, p0/M, z12.h, z5.h\n"
- "add x20, x14, %x[ld_in_row]\n"
+ "add x19, x14, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z20.s }, p0/Z, [x20]\n"
+ "ld1b { z20.s }, p0/Z, [x19]\n"
"add z20.h, p0/M, z20.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z13.s }, p0/Z, [x20]\n"
+ "ld1b { z13.s }, p0/Z, [x19]\n"
"add z13.h, p0/M, z13.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z19.s }, p0/Z, [x20]\n"
+ "ld1b { z19.s }, p0/Z, [x19]\n"
"add z19.h, p0/M, z19.h, z5.h\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"trn1 z12.h, z12.h, z20.h\n"
"trn1 z13.h, z13.h, z19.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z14.s }, p0/Z, [x20]\n"
+ "ld1b { z14.s }, p0/Z, [x19]\n"
"add z14.h, p0/M, z14.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z18.s }, p0/Z, [x20]\n"
+ "ld1b { z18.s }, p0/Z, [x19]\n"
"add z18.h, p0/M, z18.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z15.s }, p0/Z, [x20]\n"
+ "ld1b { z15.s }, p0/Z, [x19]\n"
"add z15.h, p0/M, z15.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z17.s }, p0/Z, [x20]\n"
+ "ld1b { z17.s }, p0/Z, [x19]\n"
"mov x12, #0x8\n"
"add z17.h, p0/M, z17.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z16.s }, p0/Z, [x20]\n"
+ "ld1b { z16.s }, p0/Z, [x19]\n"
"add z16.h, p0/M, z16.h, z5.h\n"
- "addvl x20, SP, #4\n"
+ "addvl x19, SP, #4\n"
"trn1 z14.h, z14.h, z18.h\n"
"trn1 z15.h, z15.h, z17.h\n"
- ".inst 0xa0402a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xa0402a6a // ld1h { z10.h-z11.h }, pn10.b/Z, [x19]\n"
"mov z16.d, z16.d\n"
- ".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
+ ".inst 0xc17a7588 // sdot za.s[x11, 0], { z12.h-z15.h }, z10.h\n"
"add x14, x14, %x[ld_in_col]\n"
- ".inst 0xc17b15a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z11.h\n"
+ ".inst 0xc17b75a8 // sdot za.s[x11, 0], { z13.h-z16.h }, z11.h\n"
"14:" // Padded: 1 priming loads
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
"ld1b { z12.s }, p0/Z, [x14]\n"
"add z12.h, p0/M, z12.h, z5.h\n"
- "add x20, x14, %x[ld_in_row]\n"
+ "add x19, x14, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z20.s }, p0/Z, [x20]\n"
+ "ld1b { z20.s }, p0/Z, [x19]\n"
"add z20.h, p0/M, z20.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z13.s }, p0/Z, [x20]\n"
+ "ld1b { z13.s }, p0/Z, [x19]\n"
"add z13.h, p0/M, z13.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z19.s }, p0/Z, [x20]\n"
+ "ld1b { z19.s }, p0/Z, [x19]\n"
"add z19.h, p0/M, z19.h, z5.h\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"trn1 z12.h, z12.h, z20.h\n"
"trn1 z13.h, z13.h, z19.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z14.s }, p0/Z, [x20]\n"
+ "ld1b { z14.s }, p0/Z, [x19]\n"
"add z14.h, p0/M, z14.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z18.s }, p0/Z, [x20]\n"
+ "ld1b { z18.s }, p0/Z, [x19]\n"
"add z18.h, p0/M, z18.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z15.s }, p0/Z, [x20]\n"
+ "ld1b { z15.s }, p0/Z, [x19]\n"
"add z15.h, p0/M, z15.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z17.s }, p0/Z, [x20]\n"
+ "ld1b { z17.s }, p0/Z, [x19]\n"
"mov x12, #0x8\n"
"add z17.h, p0/M, z17.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z16.s }, p0/Z, [x20]\n"
+ "ld1b { z16.s }, p0/Z, [x19]\n"
"add z16.h, p0/M, z16.h, z5.h\n"
- "addvl x20, SP, #2\n"
+ "addvl x19, SP, #2\n"
"trn1 z14.h, z14.h, z18.h\n"
"trn1 z15.h, z15.h, z17.h\n"
- ".inst 0xa0402a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xa0402a6a // ld1h { z10.h-z11.h }, pn10.b/Z, [x19]\n"
"mov z16.d, z16.d\n"
- ".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
+ ".inst 0xc17a7588 // sdot za.s[x11, 0], { z12.h-z15.h }, z10.h\n"
"add x14, x14, %x[ld_in_col]\n"
- ".inst 0xc17b15a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z11.h\n"
+ ".inst 0xc17b75a8 // sdot za.s[x11, 0], { z13.h-z16.h }, z11.h\n"
"15:" // Padded: 0 priming loads
"cmp x15, #0x2\n"
".inst 0xa0402bea // ld1h { z10.h-z11.h }, pn10.b/Z, [SP]\n"
@@ -521,357 +521,357 @@ void sme2_u8q_planar_3x3_s2_4rows_dot_za_impl(
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
"ld1b { z12.s }, p0/Z, [x14]\n"
"add z12.h, p0/M, z12.h, z5.h\n"
- "add x20, x14, %x[ld_in_row]\n"
+ "add x19, x14, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z20.s }, p0/Z, [x20]\n"
+ "ld1b { z20.s }, p0/Z, [x19]\n"
"add z20.h, p0/M, z20.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z13.s }, p0/Z, [x20]\n"
+ "ld1b { z13.s }, p0/Z, [x19]\n"
"add z13.h, p0/M, z13.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z19.s }, p0/Z, [x20]\n"
+ "ld1b { z19.s }, p0/Z, [x19]\n"
"add z19.h, p0/M, z19.h, z5.h\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"trn1 z12.h, z12.h, z20.h\n"
"trn1 z13.h, z13.h, z19.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z14.s }, p0/Z, [x20]\n"
+ "ld1b { z14.s }, p0/Z, [x19]\n"
"add z14.h, p0/M, z14.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z18.s }, p0/Z, [x20]\n"
+ "ld1b { z18.s }, p0/Z, [x19]\n"
"add z18.h, p0/M, z18.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z15.s }, p0/Z, [x20]\n"
+ "ld1b { z15.s }, p0/Z, [x19]\n"
"add z15.h, p0/M, z15.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z17.s }, p0/Z, [x20]\n"
+ "ld1b { z17.s }, p0/Z, [x19]\n"
"mov x12, #0x8\n"
"add z17.h, p0/M, z17.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z16.s }, p0/Z, [x20]\n"
+ "ld1b { z16.s }, p0/Z, [x19]\n"
"add z16.h, p0/M, z16.h, z5.h\n"
"sub x15, x15, #0x2\n"
"sub x13, x13, #0x1\n"
"trn1 z14.h, z14.h, z18.h\n"
"trn1 z15.h, z15.h, z17.h\n"
- "lsr x20, x15, #0x1\n"
- "cmp x20, x13\n"
+ "lsr x19, x15, #0x1\n"
+ "cmp x19, x13\n"
"mov z16.d, z16.d\n"
- "csel x22, x20, x13, LT\n"
+ "csel x21, x19, x13, LT\n"
"add x14, x14, %x[ld_in_col]\n"
"and x15, x15, #0x1\n"
- "sub x13, x13, x22\n"
- "cbz x22, 17f\n"
+ "sub x13, x13, x21\n"
+ "cbz x21, 17f\n"
"16:" // Padded: Main loop
- ".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
- "addvl x20, SP, #4\n"
+ ".inst 0xc17a7588 // sdot za.s[x11, 0], { z12.h-z15.h }, z10.h\n"
+ "addvl x19, SP, #4\n"
"mov x12, #0x0\n"
- ".inst 0xc17b15a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z11.h\n"
- ".inst 0xa0402a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xc17b75a8 // sdot za.s[x11, 0], { z13.h-z16.h }, z11.h\n"
+ ".inst 0xa0402a6a // ld1h { z10.h-z11.h }, pn10.b/Z, [x19]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "add x21, x14, %x[ld_in_row]\n"
- ".inst 0xc17a1589 // sdot za.s[x8, 1], { z12.h-z15.h }, z10.h\n"
+ "add x20, x14, %x[ld_in_row]\n"
+ ".inst 0xc17a7589 // sdot za.s[x11, 1], { z12.h-z15.h }, z10.h\n"
"ld1b { z12.s }, p0/Z, [x14]\n"
"add z12.h, p0/M, z12.h, z5.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z20.s }, p0/Z, [x21]\n"
+ "ld1b { z20.s }, p0/Z, [x20]\n"
"add z20.h, p0/M, z20.h, z5.h\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "add x20, x20, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- ".inst 0xc17b15a9 // sdot za.s[x8, 1], { z13.h-z16.h }, z11.h\n"
- "ld1b { z13.s }, p0/Z, [x21]\n"
+ ".inst 0xc17b75a9 // sdot za.s[x11, 1], { z13.h-z16.h }, z11.h\n"
+ "ld1b { z13.s }, p0/Z, [x20]\n"
"add z13.h, p0/M, z13.h, z5.h\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "add x20, x20, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z19.s }, p0/Z, [x21]\n"
+ "ld1b { z19.s }, p0/Z, [x20]\n"
"mov x12, #0x4\n"
"add z19.h, p0/M, z19.h, z5.h\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "add x20, x20, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z14.s }, p0/Z, [x21]\n"
+ "ld1b { z14.s }, p0/Z, [x20]\n"
"add z14.h, p0/M, z14.h, z5.h\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "add x20, x20, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z18.s }, p0/Z, [x21]\n"
+ "ld1b { z18.s }, p0/Z, [x20]\n"
"add z18.h, p0/M, z18.h, z5.h\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "add x20, x20, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z15.s }, p0/Z, [x21]\n"
+ "ld1b { z15.s }, p0/Z, [x20]\n"
"add z15.h, p0/M, z15.h, z5.h\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "add x20, x20, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z17.s }, p0/Z, [x21]\n"
+ "ld1b { z17.s }, p0/Z, [x20]\n"
"add z17.h, p0/M, z17.h, z5.h\n"
"mov x12, #0x8\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "add x20, x20, %x[ld_in_row]\n"
"trn1 z12.h, z12.h, z20.h\n"
"trn1 z13.h, z13.h, z19.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "addvl x20, SP, #2\n"
- "ld1b { z16.s }, p0/Z, [x21]\n"
+ "addvl x19, SP, #2\n"
+ "ld1b { z16.s }, p0/Z, [x20]\n"
"trn1 z14.h, z14.h, z18.h\n"
"trn1 z15.h, z15.h, z17.h\n"
- ".inst 0xa0402a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xa0402a6a // ld1h { z10.h-z11.h }, pn10.b/Z, [x19]\n"
"mov x12, #0x0\n"
- ".inst 0xc0060c1c // mova { z28.d-z31.d }, za.d[x8, #0]\n"
- "add x8, x8, #0x1\n"
+ ".inst 0xc0066c1c // mova { z28.d-z31.d }, za.d[x11, #0]\n"
+ "add x11, x11, #0x1\n"
"add z16.h, p0/M, z16.h, z5.h\n"
"add x14, x14, %x[ld_in_col]\n"
- ".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
+ ".inst 0xc17a7588 // sdot za.s[x11, 0], { z12.h-z15.h }, z10.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
"ld1b { z12.s }, p0/Z, [x14]\n"
"add z12.h, p0/M, z12.h, z5.h\n"
- "add x20, x14, %x[ld_in_row]\n"
+ "add x19, x14, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
"mov z16.d, z16.d\n"
- "ld1b { z20.s }, p0/Z, [x20]\n"
+ "ld1b { z20.s }, p0/Z, [x19]\n"
"add z20.h, p0/M, z20.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- ".inst 0xc17b15a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z11.h\n"
- "ld1b { z13.s }, p0/Z, [x20]\n"
+ ".inst 0xc17b75a8 // sdot za.s[x11, 0], { z13.h-z16.h }, z11.h\n"
+ "ld1b { z13.s }, p0/Z, [x19]\n"
"add z13.h, p0/M, z13.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z19.s }, p0/Z, [x20]\n"
+ "ld1b { z19.s }, p0/Z, [x19]\n"
"mov x12, #0x4\n"
"add z19.h, p0/M, z19.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
- ".inst 0xc0040c02 // mova za.d[x8, #2], { z0.d-z3.d }\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ ".inst 0xc0046c02 // mova za.d[x11, #2], { z0.d-z3.d }\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z14.s }, p0/Z, [x20]\n"
+ "ld1b { z14.s }, p0/Z, [x19]\n"
"add z14.h, p0/M, z14.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z18.s }, p0/Z, [x20]\n"
+ "ld1b { z18.s }, p0/Z, [x19]\n"
"add z18.h, p0/M, z18.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z15.s }, p0/Z, [x20]\n"
+ "ld1b { z15.s }, p0/Z, [x19]\n"
".inst 0xc1a8ac1c // sqdmulh { z28.s-z31.s }, { z28.s-z31.s }, z8.s\n"
"add z15.h, p0/M, z15.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z17.s }, p0/Z, [x20]\n"
+ "ld1b { z17.s }, p0/Z, [x19]\n"
"mov x12, #0x8\n"
".inst 0xc1a7aa3c // srshl { z28.s-z31.s }, { z28.s-z31.s }, z7.s\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"add z17.h, p0/M, z17.h, z5.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z16.s }, p0/Z, [x20]\n"
+ "ld1b { z16.s }, p0/Z, [x19]\n"
"add z16.h, p0/M, z16.h, z5.h\n"
".inst 0xc1a4ab1c // add { z28.s-z31.s }, { z28.s-z31.s }, z4.s\n"
- "subs x22, x22, #0x1\n"
+ "subs x21, x21, #0x1\n"
".inst 0xa0402bea // ld1h { z10.h-z11.h }, pn10.b/Z, [SP]\n"
".inst 0xc1b7cf7c // sclamp { z28.s-z31.s }, z27.s, z23.s\n"
- "st1b { z28.s }, p1, [x11]\n"
- "add x11, x11, x9\n"
- "trn1 z12.h, z12.h, z20.h\n"
- "st1b { z29.s }, p1, [x10]\n"
+ "st1b { z28.s }, p1, [x10]\n"
"add x10, x10, x28\n"
+ "trn1 z12.h, z12.h, z20.h\n"
+ "st1b { z29.s }, p1, [x9]\n"
+ "add x9, x9, x27\n"
"trn1 z13.h, z13.h, z19.h\n"
"trn1 z14.h, z14.h, z18.h\n"
- "st1b { z30.s }, p1, [x27]\n"
- "add x27, x27, x25\n"
+ "st1b { z30.s }, p1, [x26]\n"
+ "add x26, x26, x24\n"
"trn1 z15.h, z15.h, z17.h\n"
"mov z16.d, z16.d\n"
- "st1b { z31.s }, p1, [x26]\n"
- "add x26, x26, x24\n"
+ "st1b { z31.s }, p1, [x25]\n"
+ "add x25, x25, x23\n"
"add x14, x14, %x[ld_in_col]\n"
"bgt 16b\n"
"17:" // Main loop tail
- ".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
- "addvl x20, SP, #4\n"
+ ".inst 0xc17a7588 // sdot za.s[x11, 0], { z12.h-z15.h }, z10.h\n"
+ "addvl x19, SP, #4\n"
"mov x12, #0x0\n"
- ".inst 0xc17b15a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z11.h\n"
- ".inst 0xa0402a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xc17b75a8 // sdot za.s[x11, 0], { z13.h-z16.h }, z11.h\n"
+ ".inst 0xa0402a6a // ld1h { z10.h-z11.h }, pn10.b/Z, [x19]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "add x20, x14, %x[ld_in_row]\n"
- ".inst 0xc17a1589 // sdot za.s[x8, 1], { z12.h-z15.h }, z10.h\n"
+ "add x19, x14, %x[ld_in_row]\n"
+ ".inst 0xc17a7589 // sdot za.s[x11, 1], { z12.h-z15.h }, z10.h\n"
"ld1b { z12.s }, p0/Z, [x14]\n"
"add z12.h, p0/M, z12.h, z5.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z20.s }, p0/Z, [x20]\n"
+ "ld1b { z20.s }, p0/Z, [x19]\n"
"add z20.h, p0/M, z20.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- ".inst 0xc17b15a9 // sdot za.s[x8, 1], { z13.h-z16.h }, z11.h\n"
- "ld1b { z13.s }, p0/Z, [x20]\n"
+ ".inst 0xc17b75a9 // sdot za.s[x11, 1], { z13.h-z16.h }, z11.h\n"
+ "ld1b { z13.s }, p0/Z, [x19]\n"
"add z13.h, p0/M, z13.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z19.s }, p0/Z, [x20]\n"
+ "ld1b { z19.s }, p0/Z, [x19]\n"
"mov x12, #0x4\n"
"add z19.h, p0/M, z19.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z14.s }, p0/Z, [x20]\n"
+ "ld1b { z14.s }, p0/Z, [x19]\n"
"add z14.h, p0/M, z14.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z18.s }, p0/Z, [x20]\n"
+ "ld1b { z18.s }, p0/Z, [x19]\n"
"add z18.h, p0/M, z18.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z15.s }, p0/Z, [x20]\n"
- ".inst 0xc0060c1c // mova { z28.d-z31.d }, za.d[x8, #0]\n"
+ "ld1b { z15.s }, p0/Z, [x19]\n"
+ ".inst 0xc0066c1c // mova { z28.d-z31.d }, za.d[x11, #0]\n"
"add z15.h, p0/M, z15.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z17.s }, p0/Z, [x20]\n"
+ "ld1b { z17.s }, p0/Z, [x19]\n"
"mov x12, #0x8\n"
".inst 0xc1a8ac1c // sqdmulh { z28.s-z31.s }, { z28.s-z31.s }, z8.s\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"add z17.h, p0/M, z17.h, z5.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z16.s }, p0/Z, [x20]\n"
- "addvl x20, SP, #2\n"
+ "ld1b { z16.s }, p0/Z, [x19]\n"
+ "addvl x19, SP, #2\n"
".inst 0xc1a7aa3c // srshl { z28.s-z31.s }, { z28.s-z31.s }, z7.s\n"
"trn1 z12.h, z12.h, z20.h\n"
- "add x8, x8, #0x1\n"
+ "add x11, x11, #0x1\n"
"add z16.h, p0/M, z16.h, z5.h\n"
"trn1 z13.h, z13.h, z19.h\n"
"trn1 z14.h, z14.h, z18.h\n"
"add x14, x14, %x[ld_in_col]\n"
"trn1 z15.h, z15.h, z17.h\n"
- ".inst 0xa0402a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xa0402a6a // ld1h { z10.h-z11.h }, pn10.b/Z, [x19]\n"
".inst 0xc1a4ab1c // add { z28.s-z31.s }, { z28.s-z31.s }, z4.s\n"
"mov z16.d, z16.d\n"
- ".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
+ ".inst 0xc17a7588 // sdot za.s[x11, 0], { z12.h-z15.h }, z10.h\n"
".inst 0xc1b7cf7c // sclamp { z28.s-z31.s }, z27.s, z23.s\n"
- "st1b { z28.s }, p1, [x11]\n"
- "add x11, x11, x9\n"
- ".inst 0xc0040c02 // mova za.d[x8, #2], { z0.d-z3.d }\n"
- "st1b { z29.s }, p1, [x10]\n"
+ "st1b { z28.s }, p1, [x10]\n"
"add x10, x10, x28\n"
- ".inst 0xc17b15a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z11.h\n"
+ ".inst 0xc0046c02 // mova za.d[x11, #2], { z0.d-z3.d }\n"
+ "st1b { z29.s }, p1, [x9]\n"
+ "add x9, x9, x27\n"
+ ".inst 0xc17b75a8 // sdot za.s[x11, 0], { z13.h-z16.h }, z11.h\n"
".inst 0xa0402bea // ld1h { z10.h-z11.h }, pn10.b/Z, [SP]\n"
- "st1b { z30.s }, p1, [x27]\n"
- "add x27, x27, x25\n"
- "st1b { z31.s }, p1, [x26]\n"
+ "st1b { z30.s }, p1, [x26]\n"
"add x26, x26, x24\n"
+ "st1b { z31.s }, p1, [x25]\n"
+ "add x25, x25, x23\n"
"18:" // Main loop skip tail
"cbz x15, 19f\n" // Skip remainder inputs
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
"ld1b { z12.s }, p0/Z, [x14]\n"
"add z12.h, p0/M, z12.h, z5.h\n"
- "add x20, x14, %x[ld_in_row]\n"
+ "add x19, x14, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z20.s }, p0/Z, [x20]\n"
+ "ld1b { z20.s }, p0/Z, [x19]\n"
"add z20.h, p0/M, z20.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z13.s }, p0/Z, [x20]\n"
+ "ld1b { z13.s }, p0/Z, [x19]\n"
"add z13.h, p0/M, z13.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z19.s }, p0/Z, [x20]\n"
+ "ld1b { z19.s }, p0/Z, [x19]\n"
"add z19.h, p0/M, z19.h, z5.h\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"trn1 z12.h, z12.h, z20.h\n"
"trn1 z13.h, z13.h, z19.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z14.s }, p0/Z, [x20]\n"
+ "ld1b { z14.s }, p0/Z, [x19]\n"
"add z14.h, p0/M, z14.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z18.s }, p0/Z, [x20]\n"
+ "ld1b { z18.s }, p0/Z, [x19]\n"
"add z18.h, p0/M, z18.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z15.s }, p0/Z, [x20]\n"
+ "ld1b { z15.s }, p0/Z, [x19]\n"
"add z15.h, p0/M, z15.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z17.s }, p0/Z, [x20]\n"
+ "ld1b { z17.s }, p0/Z, [x19]\n"
"mov x12, #0x8\n"
"add z17.h, p0/M, z17.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z16.s }, p0/Z, [x20]\n"
+ "ld1b { z16.s }, p0/Z, [x19]\n"
"add z16.h, p0/M, z16.h, z5.h\n"
"trn1 z14.h, z14.h, z18.h\n"
"trn1 z15.h, z15.h, z17.h\n"
"mov z16.d, z16.d\n"
- "addvl x20, SP, #4\n"
- ".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
+ "addvl x19, SP, #4\n"
+ ".inst 0xc17a7588 // sdot za.s[x11, 0], { z12.h-z15.h }, z10.h\n"
"sub x13, x13, #0x1\n"
- ".inst 0xc17b15a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z11.h\n"
- ".inst 0xa0402a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20]\n"
- ".inst 0xc0060c1c // mova { z28.d-z31.d }, za.d[x8, #0]\n"
+ ".inst 0xc17b75a8 // sdot za.s[x11, 0], { z13.h-z16.h }, z11.h\n"
+ ".inst 0xa0402a6a // ld1h { z10.h-z11.h }, pn10.b/Z, [x19]\n"
+ ".inst 0xc0066c1c // mova { z28.d-z31.d }, za.d[x11, #0]\n"
".inst 0xc1a8ac1c // sqdmulh { z28.s-z31.s }, { z28.s-z31.s }, z8.s\n"
".inst 0xc1a7aa3c // srshl { z28.s-z31.s }, { z28.s-z31.s }, z7.s\n"
- ".inst 0xc17a1589 // sdot za.s[x8, 1], { z12.h-z15.h }, z10.h\n"
+ ".inst 0xc17a7589 // sdot za.s[x11, 1], { z12.h-z15.h }, z10.h\n"
".inst 0xc1a4ab1c // add { z28.s-z31.s }, { z28.s-z31.s }, z4.s\n"
- ".inst 0xc17b15a9 // sdot za.s[x8, 1], { z13.h-z16.h }, z11.h\n"
- "add x8, x8, #0x1\n"
+ ".inst 0xc17b75a9 // sdot za.s[x11, 1], { z13.h-z16.h }, z11.h\n"
+ "add x11, x11, #0x1\n"
".inst 0xc1b7cf7c // sclamp { z28.s-z31.s }, z27.s, z23.s\n"
- "st1b { z28.s }, p1, [x11]\n"
- "add x11, x11, x9\n"
- ".inst 0xc0040c02 // mova za.d[x8, #2], { z0.d-z3.d }\n"
- "st1b { z29.s }, p1, [x10]\n"
+ "st1b { z28.s }, p1, [x10]\n"
"add x10, x10, x28\n"
- "st1b { z30.s }, p1, [x27]\n"
- "add x27, x27, x25\n"
- "st1b { z31.s }, p1, [x26]\n"
+ ".inst 0xc0046c02 // mova za.d[x11, #2], { z0.d-z3.d }\n"
+ "st1b { z29.s }, p1, [x9]\n"
+ "add x9, x9, x27\n"
+ "st1b { z30.s }, p1, [x26]\n"
"add x26, x26, x24\n"
+ "st1b { z31.s }, p1, [x25]\n"
+ "add x25, x25, x23\n"
"19:" // Tail input: End
"cbz x13, 21f\n"
"20:" // Right padding loop
- ".inst 0xc0060c1c // mova { z28.d-z31.d }, za.d[x8, #0]\n"
+ ".inst 0xc0066c1c // mova { z28.d-z31.d }, za.d[x11, #0]\n"
".inst 0xc1a8ac1c // sqdmulh { z28.s-z31.s }, { z28.s-z31.s }, z8.s\n"
- "add x8, x8, #0x1\n"
+ "add x11, x11, #0x1\n"
".inst 0xc1a7aa3c // srshl { z28.s-z31.s }, { z28.s-z31.s }, z7.s\n"
"subs x13, x13, #0x1\n"
- ".inst 0xc0040c02 // mova za.d[x8, #2], { z0.d-z3.d }\n"
+ ".inst 0xc0046c02 // mova za.d[x11, #2], { z0.d-z3.d }\n"
".inst 0xc1a4ab1c // add { z28.s-z31.s }, { z28.s-z31.s }, z4.s\n"
".inst 0xc1b7cf7c // sclamp { z28.s-z31.s }, z27.s, z23.s\n"
- "st1b { z28.s }, p1, [x11]\n"
- "add x11, x11, x9\n"
- "st1b { z29.s }, p1, [x10]\n"
+ "st1b { z28.s }, p1, [x10]\n"
"add x10, x10, x28\n"
- "st1b { z30.s }, p1, [x27]\n"
- "add x27, x27, x25\n"
- "st1b { z31.s }, p1, [x26]\n"
+ "st1b { z29.s }, p1, [x9]\n"
+ "add x9, x9, x27\n"
+ "st1b { z30.s }, p1, [x26]\n"
"add x26, x26, x24\n"
+ "st1b { z31.s }, p1, [x25]\n"
+ "add x25, x25, x23\n"
"bgt 20b\n"
"21:" // End
- "ldr x22, [%x[args], %[offsetof_Args_weights]]\n"
- "incw x22, ALL, MUL #9\n"
- "str x22, [%x[args], %[offsetof_Args_weights]]\n"
+ "ldr x21, [%x[args], %[offsetof_Args_weights]]\n"
+ "incw x21, ALL, MUL #9\n"
+ "str x21, [%x[args], %[offsetof_Args_weights]]\n"
"incw x16\n"
- "ldr x20, [%x[args], %[offsetof_Args_ld_in_vl]]\n"
+ "ldr x19, [%x[args], %[offsetof_Args_ld_in_vl]]\n"
"whilelt p1.s, x16, x17\n"
"ldr x14, [%x[args], %[offsetof_Args_inptr]]\n"
- "add x14, x14, x20\n"
+ "add x14, x14, x19\n"
"str x14, [%x[args], %[offsetof_Args_inptr]]\n"
- "ldr x25, [%x[args], %[offsetof_Args_outptrs]]\n"
- "ldr x24, [%x[args], %[offsetof_Args_ld_out_vls]]\n"
- "ldp x23, x22, [x25, #0x0]\n"
- "ldp x21, x20, [x24, #0x0]\n"
- "add x23, x23, x21\n"
+ "ldr x24, [%x[args], %[offsetof_Args_outptrs]]\n"
+ "ldr x23, [%x[args], %[offsetof_Args_ld_out_vls]]\n"
+ "ldp x22, x21, [x24, #0x0]\n"
+ "ldp x20, x19, [x23, #0x0]\n"
"add x22, x22, x20\n"
- "stp x23, x22, [x25, #0x0]\n"
- "ldp x23, x22, [x25, #0x10]\n"
- "ldp x21, x20, [x24, #0x10]\n"
- "add x23, x23, x21\n"
+ "add x21, x21, x19\n"
+ "stp x22, x21, [x24, #0x0]\n"
+ "ldp x22, x21, [x24, #0x10]\n"
+ "ldp x20, x19, [x23, #0x10]\n"
"add x22, x22, x20\n"
- "stp x23, x22, [x25, #0x10]\n"
+ "add x21, x21, x19\n"
+ "stp x22, x21, [x24, #0x10]\n"
"b.any 1b\n"
"addvl SP, SP, #6\n"
".inst 0xd503467f // SMSTOP\n"
:
: [args] "r" (&args), [ld_in_col] "r" (ld_in_col), [ld_in_row] "r" (ld_in_row), [offsetof_Args_current_channel] "I" (offsetof(Args, current_channel)), [offsetof_Args_inptr] "I" (offsetof(Args, inptr)), [offsetof_Args_input_cols] "I" (offsetof(Args, input_cols)), [offsetof_Args_ld_in_vl] "I" (offsetof(Args, ld_in_vl)), [offsetof_Args_ld_out_cols] "I" (offsetof(Args, ld_out_cols)), [offsetof_Args_ld_out_vls] "I" (offsetof(Args, ld_out_vls)), [offsetof_Args_n_channels] "I" (offsetof(Args, n_channels)), [offsetof_Args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_Args_output_cols] "I" (offsetof(Args, output_cols)), [offsetof_Args_pad_bottom] "I" (offsetof(Args, pad_bottom)), [offsetof_Args_pad_left] "I" (offsetof(Args, pad_left)), [offsetof_Args_pad_top] "I" (offsetof(Args, pad_top)), [offsetof_Args_weights] "I" (offsetof(Args, weights)), [offsetof_Requantize32_a_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_bias] "I" (offsetof(arm_gemm::Requantize32, bias)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [offsetof_Requantize32_per_channel_muls] "I" (offsetof(arm_gemm::Requantize32, per_channel_muls)), [offsetof_Requantize32_per_channel_right_shifts] "I" (offsetof(arm_gemm::Requantize32, per_channel_right_shifts)), [offsetof_Requantize32_per_layer_mul] "I" (offsetof(arm_gemm::Requantize32, per_layer_mul)), [offsetof_Requantize32_per_layer_right_shift] "I" (offsetof(arm_gemm::Requantize32, per_layer_right_shift)), [qp] "r" (&qp)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8q_planar_5x5_s1_4rows_dot_za/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8q_planar_5x5_s1_4rows_dot_za/generic.cpp
index 2c19e232f8..4678e82f4e 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8q_planar_5x5_s1_4rows_dot_za/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8q_planar_5x5_s1_4rows_dot_za/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -69,20 +69,20 @@ void sme2_u8q_planar_5x5_s1_4rows_dot_za_impl(
__asm__ __volatile__(
".inst 0xd503477f // SMSTART ZA\n"
- "ldr x4, [%x[args], %[offsetof_Args_pad_bottom]]\n"
+ "ldr x5, [%x[args], %[offsetof_Args_pad_bottom]]\n"
"ptrue p2.b\n"
- "mov x20, #0x8\n"
+ "mov x19, #0x8\n"
"ldr x6, [%x[args], %[offsetof_Args_pad_top]]\n"
"ld1rh { z25.h }, p2/Z, [%x[qp], %[offsetof_Requantize32_a_offset]]\n"
- "sub x20, x20, x4\n"
+ "sub x19, x19, x5\n"
".inst 0x25207812 // ptrue pn10.b\n"
"ldr x7, [%x[args], %[offsetof_Args_n_channels]]\n"
"whilelt p1.s, XZR, x7\n"
- "whilelt p9.s, XZR, x20\n"
+ "whilelt p9.s, XZR, x19\n"
"ld1rw { z9.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_c_offset]]\n"
"whilelt p8.s, XZR, x6\n"
"addvl SP, SP, #-30\n"
- "ldr x5, [%x[args], %[offsetof_Args_current_channel]]\n"
+ "ldr x17, [%x[args], %[offsetof_Args_current_channel]]\n"
"neg z25.h, p2/M, z25.h\n"
"eor p8.b, p2/Z, p8.b, p9.b\n"
"ld1rw { z3.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_per_layer_mul]]\n"
@@ -90,262 +90,298 @@ void sme2_u8q_planar_5x5_s1_4rows_dot_za_impl(
"ld1rw { z24.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_minval]]\n"
"ld1rw { z31.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_maxval]]\n"
"1:" // Channel loop
- "ldr x20, [%x[qp], %[offsetof_Requantize32_bias]]\n"
+ "ldr x19, [%x[qp], %[offsetof_Requantize32_bias]]\n"
"mov z6.s, #0x0\n"
- "cbz x20, 2f\n"
- "ld1w { z6.s }, p1/Z, [x20, x5, LSL #2]\n"
+ "cbz x19, 2f\n"
+ "ld1w { z6.s }, p1/Z, [x19, x17, LSL #2]\n"
"2:" // Load bias: Done
"ldr x23, [%x[args], %[offsetof_Args_weights]]\n"
- "mov x22, x23\n"
- "ld1b { z18.s }, p2/Z, [x22]\n"
- "incw x22, ALL, MUL #5\n"
+ "mov x21, x23\n"
+ "ld1b { z18.s }, p2/Z, [x21]\n"
+ "incw x21, ALL, MUL #5\n"
"ld1rh { z12.h }, p2/Z, [%x[qp], %[offsetof_Requantize32_b_offset]]\n"
"mov z2.h, #0x0\n"
"sub z18.h, z18.h, z12.h\n"
"incw x23\n"
- "ld1b { z17.s }, p2/Z, [x22]\n"
- "incw x22, ALL, MUL #5\n"
+ "ld1b { z17.s }, p2/Z, [x21]\n"
+ "incw x21, ALL, MUL #5\n"
"sub z17.h, z17.h, z12.h\n"
"trn1 z0.h, z2.h, z18.h\n"
- "ld1b { z21.s }, p2/Z, [x22]\n"
- "incw x22, ALL, MUL #5\n"
+ "ld1b { z21.s }, p2/Z, [x21]\n"
+ "incw x21, ALL, MUL #5\n"
"sub z21.h, z21.h, z12.h\n"
"trn1 z8.h, z18.h, z17.h\n"
- "ld1b { z16.s }, p2/Z, [x22]\n"
- "incw x22, ALL, MUL #5\n"
+ "ld1b { z16.s }, p2/Z, [x21]\n"
+ "incw x21, ALL, MUL #5\n"
"sub z16.h, z16.h, z12.h\n"
"trn1 z4.h, z17.h, z21.h\n"
- "ld1b { z15.s }, p2/Z, [x22]\n"
+ "ld1b { z15.s }, p2/Z, [x21]\n"
"sub z15.h, z15.h, z12.h\n"
- "mov x22, x23\n"
+ "mov x21, x23\n"
"trn1 z5.h, z21.h, z16.h\n"
- "ld1b { z18.s }, p2/Z, [x22]\n"
- "incw x22, ALL, MUL #5\n"
+ "ld1b { z18.s }, p2/Z, [x21]\n"
+ "incw x21, ALL, MUL #5\n"
"trn1 z10.h, z16.h, z15.h\n"
"trn1 z11.h, z15.h, z2.h\n"
- "ld1b { z17.s }, p2/Z, [x22]\n"
- "incw x22, ALL, MUL #5\n"
+ "ld1b { z17.s }, p2/Z, [x21]\n"
+ "incw x21, ALL, MUL #5\n"
"sub z18.h, z18.h, z12.h\n"
"sub z17.h, z17.h, z12.h\n"
- "ld1b { z21.s }, p2/Z, [x22]\n"
- "incw x22, ALL, MUL #5\n"
+ "ld1b { z21.s }, p2/Z, [x21]\n"
+ "incw x21, ALL, MUL #5\n"
"sub z21.h, z21.h, z12.h\n"
- "addvl x21, SP, #30\n"
- "ld1b { z16.s }, p2/Z, [x22]\n"
- "incw x22, ALL, MUL #5\n"
+ "addvl x20, SP, #30\n"
+ "ld1b { z16.s }, p2/Z, [x21]\n"
+ "incw x21, ALL, MUL #5\n"
"incw x23\n"
"sub z16.h, z16.h, z12.h\n"
- "ld1b { z15.s }, p2/Z, [x22]\n"
- "addvl x21, x21, #-6\n"
+ "ld1b { z15.s }, p2/Z, [x21]\n"
+ "addvl x20, x20, #-6\n"
"sub z15.h, z15.h, z12.h\n"
- "mov x22, x23\n"
- "st1h { z0.h }, p2, [x21]\n"
+ "mov x21, x23\n"
+ "st1h { z0.h }, p2, [x20]\n"
"trn1 z0.h, z2.h, z18.h\n"
"incw x23\n"
- "ldr x20, [%x[qp], %[offsetof_Requantize32_per_channel_muls]]\n"
- "st1h { z8.h }, p2, [x21, #1, MUL VL]\n"
+ "ldr x19, [%x[qp], %[offsetof_Requantize32_per_channel_muls]]\n"
+ "st1h { z8.h }, p2, [x20, #1, MUL VL]\n"
"trn1 z8.h, z18.h, z17.h\n"
- "ld1b { z18.s }, p2/Z, [x22]\n"
- "incw x22, ALL, MUL #5\n"
- "st1h { z4.h }, p2, [x21, #2, MUL VL]\n"
+ "ld1b { z18.s }, p2/Z, [x21]\n"
+ "incw x21, ALL, MUL #5\n"
+ "st1h { z4.h }, p2, [x20, #2, MUL VL]\n"
"trn1 z4.h, z17.h, z21.h\n"
- "ld1b { z17.s }, p2/Z, [x22]\n"
- "incw x22, ALL, MUL #5\n"
- "st1h { z5.h }, p2, [x21, #3, MUL VL]\n"
+ "ld1b { z17.s }, p2/Z, [x21]\n"
+ "incw x21, ALL, MUL #5\n"
+ "st1h { z5.h }, p2, [x20, #3, MUL VL]\n"
"trn1 z5.h, z21.h, z16.h\n"
- "ld1b { z21.s }, p2/Z, [x22]\n"
- "incw x22, ALL, MUL #5\n"
- "st1h { z10.h }, p2, [x21, #4, MUL VL]\n"
+ "ld1b { z21.s }, p2/Z, [x21]\n"
+ "incw x21, ALL, MUL #5\n"
+ "st1h { z10.h }, p2, [x20, #4, MUL VL]\n"
"trn1 z10.h, z16.h, z15.h\n"
- "ld1b { z16.s }, p2/Z, [x22]\n"
- "incw x22, ALL, MUL #5\n"
- "st1h { z11.h }, p2, [x21, #5, MUL VL]\n"
+ "ld1b { z16.s }, p2/Z, [x21]\n"
+ "incw x21, ALL, MUL #5\n"
+ "st1h { z11.h }, p2, [x20, #5, MUL VL]\n"
"trn1 z11.h, z15.h, z2.h\n"
"sub z18.h, z18.h, z12.h\n"
- "addvl x21, x21, #-6\n"
+ "addvl x20, x20, #-6\n"
"sub z17.h, z17.h, z12.h\n"
- "ld1b { z15.s }, p2/Z, [x22]\n"
+ "ld1b { z15.s }, p2/Z, [x21]\n"
"sub z21.h, z21.h, z12.h\n"
- "mov x22, x23\n"
+ "mov x21, x23\n"
"sub z16.h, z16.h, z12.h\n"
"sub z15.h, z15.h, z12.h\n"
- "st1h { z0.h }, p2, [x21]\n"
+ "st1h { z0.h }, p2, [x20]\n"
"incw x23\n"
- "st1h { z8.h }, p2, [x21, #1, MUL VL]\n"
+ "st1h { z8.h }, p2, [x20, #1, MUL VL]\n"
"trn1 z0.h, z2.h, z18.h\n"
"trn1 z8.h, z18.h, z17.h\n"
- "ld1b { z18.s }, p2/Z, [x22]\n"
- "incw x22, ALL, MUL #5\n"
- "st1h { z4.h }, p2, [x21, #2, MUL VL]\n"
+ "ld1b { z18.s }, p2/Z, [x21]\n"
+ "incw x21, ALL, MUL #5\n"
+ "st1h { z4.h }, p2, [x20, #2, MUL VL]\n"
"trn1 z4.h, z17.h, z21.h\n"
- "ld1b { z17.s }, p2/Z, [x22]\n"
- "incw x22, ALL, MUL #5\n"
- "st1h { z5.h }, p2, [x21, #3, MUL VL]\n"
+ "ld1b { z17.s }, p2/Z, [x21]\n"
+ "incw x21, ALL, MUL #5\n"
+ "st1h { z5.h }, p2, [x20, #3, MUL VL]\n"
"trn1 z5.h, z21.h, z16.h\n"
- "ld1b { z21.s }, p2/Z, [x22]\n"
- "incw x22, ALL, MUL #5\n"
- "st1h { z10.h }, p2, [x21, #4, MUL VL]\n"
+ "ld1b { z21.s }, p2/Z, [x21]\n"
+ "incw x21, ALL, MUL #5\n"
+ "st1h { z10.h }, p2, [x20, #4, MUL VL]\n"
"trn1 z10.h, z16.h, z15.h\n"
- "ld1b { z16.s }, p2/Z, [x22]\n"
- "incw x22, ALL, MUL #5\n"
- "st1h { z11.h }, p2, [x21, #5, MUL VL]\n"
+ "ld1b { z16.s }, p2/Z, [x21]\n"
+ "incw x21, ALL, MUL #5\n"
+ "st1h { z11.h }, p2, [x20, #5, MUL VL]\n"
"trn1 z11.h, z15.h, z2.h\n"
"sub z18.h, z18.h, z12.h\n"
"sub z17.h, z17.h, z12.h\n"
- "ld1b { z15.s }, p2/Z, [x22]\n"
- "addvl x21, x21, #-6\n"
+ "ld1b { z15.s }, p2/Z, [x21]\n"
+ "addvl x20, x20, #-6\n"
"sub z21.h, z21.h, z12.h\n"
"sub z16.h, z16.h, z12.h\n"
- "mov x22, x23\n"
- "st1h { z0.h }, p2, [x21]\n"
+ "mov x21, x23\n"
+ "st1h { z0.h }, p2, [x20]\n"
"sub z15.h, z15.h, z12.h\n"
- "st1h { z8.h }, p2, [x21, #1, MUL VL]\n"
+ "st1h { z8.h }, p2, [x20, #1, MUL VL]\n"
"trn1 z0.h, z2.h, z18.h\n"
"trn1 z8.h, z18.h, z17.h\n"
- "ld1b { z18.s }, p2/Z, [x22]\n"
- "incw x22, ALL, MUL #5\n"
- "st1h { z4.h }, p2, [x21, #2, MUL VL]\n"
+ "ld1b { z18.s }, p2/Z, [x21]\n"
+ "incw x21, ALL, MUL #5\n"
+ "st1h { z4.h }, p2, [x20, #2, MUL VL]\n"
"trn1 z4.h, z17.h, z21.h\n"
- "ld1b { z17.s }, p2/Z, [x22]\n"
- "incw x22, ALL, MUL #5\n"
- "st1h { z5.h }, p2, [x21, #3, MUL VL]\n"
+ "ld1b { z17.s }, p2/Z, [x21]\n"
+ "incw x21, ALL, MUL #5\n"
+ "st1h { z5.h }, p2, [x20, #3, MUL VL]\n"
"trn1 z5.h, z21.h, z16.h\n"
- "ld1b { z21.s }, p2/Z, [x22]\n"
- "incw x22, ALL, MUL #5\n"
- "st1h { z10.h }, p2, [x21, #4, MUL VL]\n"
+ "ld1b { z21.s }, p2/Z, [x21]\n"
+ "incw x21, ALL, MUL #5\n"
+ "st1h { z10.h }, p2, [x20, #4, MUL VL]\n"
"trn1 z10.h, z16.h, z15.h\n"
- "ld1b { z16.s }, p2/Z, [x22]\n"
- "incw x22, ALL, MUL #5\n"
- "st1h { z11.h }, p2, [x21, #5, MUL VL]\n"
+ "ld1b { z16.s }, p2/Z, [x21]\n"
+ "incw x21, ALL, MUL #5\n"
+ "st1h { z11.h }, p2, [x20, #5, MUL VL]\n"
"trn1 z11.h, z15.h, z2.h\n"
- "ld1b { z15.s }, p2/Z, [x22]\n"
+ "ld1b { z15.s }, p2/Z, [x21]\n"
"sub z18.h, z18.h, z12.h\n"
- "addvl x21, x21, #-6\n"
+ "addvl x20, x20, #-6\n"
"sub z17.h, z17.h, z12.h\n"
"sub z21.h, z21.h, z12.h\n"
- "st1h { z0.h }, p2, [x21]\n"
+ "st1h { z0.h }, p2, [x20]\n"
"sub z16.h, z16.h, z12.h\n"
"sub z15.h, z15.h, z12.h\n"
- "st1h { z8.h }, p2, [x21, #1, MUL VL]\n"
- "st1h { z4.h }, p2, [x21, #2, MUL VL]\n"
+ "st1h { z8.h }, p2, [x20, #1, MUL VL]\n"
+ "st1h { z4.h }, p2, [x20, #2, MUL VL]\n"
"mov z7.d, z6.d\n"
"trn1 z0.h, z2.h, z18.h\n"
- "st1h { z5.h }, p2, [x21, #3, MUL VL]\n"
+ "st1h { z5.h }, p2, [x20, #3, MUL VL]\n"
"trn1 z8.h, z18.h, z17.h\n"
"trn1 z4.h, z17.h, z21.h\n"
- "st1h { z10.h }, p2, [x21, #4, MUL VL]\n"
+ "st1h { z10.h }, p2, [x20, #4, MUL VL]\n"
"trn1 z5.h, z21.h, z16.h\n"
"trn1 z10.h, z16.h, z15.h\n"
- "st1h { z11.h }, p2, [x21, #5, MUL VL]\n"
- "addvl x21, x21, #-6\n"
+ "st1h { z11.h }, p2, [x20, #5, MUL VL]\n"
+ "addvl x20, x20, #-6\n"
"trn1 z11.h, z15.h, z2.h\n"
- "st1h { z0.h }, p2, [x21]\n"
- "st1h { z8.h }, p2, [x21, #1, MUL VL]\n"
- "st1h { z4.h }, p2, [x21, #2, MUL VL]\n"
- "st1h { z5.h }, p2, [x21, #3, MUL VL]\n"
- "st1h { z10.h }, p2, [x21, #4, MUL VL]\n"
- "st1h { z11.h }, p2, [x21, #5, MUL VL]\n"
- "cbz x20, 3f\n"
- "ld1w { z3.s }, p1/Z, [x20, x5, LSL #2]\n"
+ "st1h { z0.h }, p2, [x20]\n"
+ "st1h { z8.h }, p2, [x20, #1, MUL VL]\n"
+ "st1h { z4.h }, p2, [x20, #2, MUL VL]\n"
+ "st1h { z5.h }, p2, [x20, #3, MUL VL]\n"
+ "st1h { z10.h }, p2, [x20, #4, MUL VL]\n"
+ "st1h { z11.h }, p2, [x20, #5, MUL VL]\n"
+ "cbz x19, 3f\n"
+ "ld1w { z3.s }, p1/Z, [x19, x17, LSL #2]\n"
"3:" // Load mul: End
- "ldr x20, [%x[qp], %[offsetof_Requantize32_per_channel_right_shifts]]\n"
- "cbz x20, 4f\n"
- "ld1w { z1.s }, p1/Z, [x20, x5, LSL #2]\n"
+ "ldr x19, [%x[qp], %[offsetof_Requantize32_per_channel_right_shifts]]\n"
+ "cbz x19, 4f\n"
+ "ld1w { z1.s }, p1/Z, [x19, x17, LSL #2]\n"
"4:" // Load right_shift: End
- "ldr x17, [%x[args], %[offsetof_Args_input_cols]]\n"
- "sub x20, x17, #0x1\n"
- "orr x23, x20, %x[ld_in_col], LSL #16\n"
- "ldr x16, [%x[args], %[offsetof_Args_inptr]]\n"
- "orr x23, x7, x23, LSL #22\n"
- "mov x22, #0x8\n"
- "add x21, x6, x4\n"
- "lsl x20, %x[ld_in_row], #0x0\n"
- "ldr x15, [%x[args], %[offsetof_Args_output_cols]]\n"
+ "ldr x16, [%x[args], %[offsetof_Args_input_cols]]\n"
+ "sub x19, x16, #0x1\n"
+ "orr x22, x19, %x[ld_in_col], LSL #16\n"
+ "ldr x15, [%x[args], %[offsetof_Args_inptr]]\n"
+ "orr x22, x7, x22, LSL #22\n"
+ "mov x21, #0x8\n"
+ "add x20, x6, x5\n"
+ "lsl x19, %x[ld_in_row], #0x0\n"
+ "ldr x14, [%x[args], %[offsetof_Args_output_cols]]\n"
"mov x11, #0x0\n"
"mov x8, #0x8\n"
- "lsl x23, x23, #0x0\n"
- "sub x22, x22, x21\n"
- "madd x20, x20, x6, x16\n"
+ "lsl x22, x22, #0x0\n"
+ "sub x21, x21, x20\n"
+ "madd x19, x19, x6, x15\n"
"5:" // Issue prefetches
- "subs x22, x22, #0x1\n"
- ".inst 0xf8b74a9c // rprfm pldstrm, x23, [x20]\n"
- "add x20, x20, %x[ld_in_col]\n"
+ "subs x21, x21, #0x1\n"
+ ".inst 0xf8b64a7c // rprfm pldstrm, x22, [x19]\n"
+ "add x19, x19, %x[ld_in_col]\n"
"bgt 5b\n"
- "ldr x25, [%x[args], %[offsetof_Args_outptrs]]\n"
- "lsl x20, %x[ld_in_row], #0x0\n"
- "msub x16, x6, x20, x16\n"
+ "ldr x24, [%x[args], %[offsetof_Args_outptrs]]\n"
+ "lsl x19, %x[ld_in_row], #0x0\n"
+ "msub x15, x6, x19, x15\n"
".inst 0xc00468c0 // mova za.d[x11, #0], { z6.d-z7.d }\n"
- "ldr x20, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
+ "ldr x19, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
".inst 0xc00468c1 // mova za.d[x11, #1], { z6.d-z7.d }\n"
- "mov x22, #0x4\n"
- "ldp x14, x13, [x25], #0x10\n"
+ "mov x21, #0x4\n"
+ "ldp x13, x4, [x24], #0x10\n"
".inst 0xc00468c2 // mova za.d[x11, #2], { z6.d-z7.d }\n"
- "ldp x3, x10, [x20], #0x10\n"
+ "ldp x10, x9, [x19], #0x10\n"
".inst 0xc00468c3 // mova za.d[x11, #3], { z6.d-z7.d }\n"
- "ldr x21, [%x[args], %[offsetof_Args_pad_left]]\n"
+ "ldr x20, [%x[args], %[offsetof_Args_pad_left]]\n"
".inst 0xc00468c4 // mova za.d[x11, #4], { z6.d-z7.d }\n"
- "ldp x9, x28, [x25], #0x10\n"
+ "ldp x28, x27, [x24], #0x10\n"
".inst 0xc00468c5 // mova za.d[x11, #5], { z6.d-z7.d }\n"
- "ldp x27, x26, [x20], #0x10\n"
+ "ldp x26, x25, [x19], #0x10\n"
".inst 0xc00468c6 // mova za.d[x11, #6], { z6.d-z7.d }\n"
".inst 0xc00468c7 // mova za.d[x11, #7], { z6.d-z7.d }\n"
".inst 0xc00408c0 // mova za.d[x8, #0], { z6.d-z7.d }\n"
".inst 0xc00408c1 // mova za.d[x8, #1], { z6.d-z7.d }\n"
- "cbz x21, 7f\n"
- "cmp x21, x22\n"
- "csel x20, x21, x22, LT\n"
- "sub x21, x21, x20\n"
- "sub x22, x22, x20\n"
- "cbz x21, 7f\n"
+ "cbz x20, 7f\n"
+ "cmp x20, x21\n"
+ "csel x19, x20, x21, LT\n"
+ "sub x20, x20, x19\n"
+ "sub x21, x21, x19\n"
+ "cbz x20, 7f\n"
".inst 0xc006680c // mova { z12.d-z13.d }, za.d[x11, #0]\n"
- "sub x15, x15, x21\n"
+ "sub x14, x14, x20\n"
".inst 0xc006682e // mova { z14.d-z15.d }, za.d[x11, #1]\n"
".inst 0xc1a3ac0c // sqdmulh { z12.s-z15.s }, { z12.s-z15.s }, z3.s\n"
".inst 0xc1a1aa2c // srshl { z12.s-z15.s }, { z12.s-z15.s }, z1.s\n"
".inst 0xc1a9ab0c // add { z12.s-z15.s }, { z12.s-z15.s }, z9.s\n"
".inst 0xc1bfcf0c // sclamp { z12.s-z15.s }, z24.s, z31.s\n"
"6:" // Left padding
- "subs x21, x21, #0x1\n"
- "st1b { z12.s }, p1, [x14]\n"
- "add x14, x14, x3\n"
- "st1b { z14.s }, p1, [x13]\n"
+ "subs x20, x20, #0x1\n"
+ "st1b { z12.s }, p1, [x13]\n"
"add x13, x13, x10\n"
- "st1b { z13.s }, p1, [x9]\n"
- "add x9, x9, x27\n"
- "st1b { z15.s }, p1, [x28]\n"
+ "st1b { z14.s }, p1, [x4]\n"
+ "add x4, x4, x9\n"
+ "st1b { z13.s }, p1, [x28]\n"
"add x28, x28, x26\n"
+ "st1b { z15.s }, p1, [x27]\n"
+ "add x27, x27, x25\n"
"bgt 6b\n"
"7:" // Left padding: End
- "adds XZR, x6, x4\n"
+ "adds XZR, x6, x5\n"
"bne 14f\n"
- "cbz x22, 12f\n"
- "cmp x22, #0x1\n"
- "sub x17, x17, x22\n"
+ "cbz x21, 12f\n"
+ "cmp x21, #0x1\n"
+ "sub x16, x16, x21\n"
"beq 11f\n"
- "cmp x22, #0x2\n"
+ "cmp x21, #0x2\n"
"beq 10f\n"
- "cmp x22, #0x3\n"
+ "cmp x21, #0x3\n"
"beq 9f\n"
"8:" // Unpadded: 4 priming loads
- "add x21, x16, %x[ld_in_row]\n"
- "ld1b { z17.s }, p1/Z, [x16]\n"
- "addvl x20, SP, #24\n"
+ "add x20, x15, %x[ld_in_row]\n"
+ "ld1b { z17.s }, p1/Z, [x15]\n"
+ "addvl x19, SP, #24\n"
+ "ld1b { z16.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "trn1 z27.h, z17.h, z16.h\n"
+ "add z27.h, z27.h, z25.h\n"
+ "ld1b { z17.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "add x15, x15, %x[ld_in_col]\n"
+ "ld1b { z16.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "trn1 z28.h, z17.h, z16.h\n"
+ "add z28.h, z28.h, z25.h\n"
+ "ld1b { z16.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z29.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "trn1 z29.h, z16.h, z29.h\n"
+ "add z29.h, z29.h, z25.h\n"
+ "ld1b { z17.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ ".inst 0xa1402a60 // ld1h { z0.h, z8.h }, pn10.b/Z, [x19]\n"
+ ".inst 0xc1687768 // sdot za.s[x11, 0], { z27.h-z28.h }, z8.h\n"
+ "ld1b { z16.s }, p1/Z, [x20]\n"
+ "trn1 z30.h, z17.h, z16.h\n"
+ ".inst 0xc1607769 // sdot za.s[x11, 1], { z27.h-z28.h }, z0.h\n"
+ ".inst 0xa0412a64 // ld1h { z4.h-z5.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
+ "add z30.h, z30.h, z25.h\n"
+ ".inst 0xc1657788 // sdot za.s[x11, 0], { z28.h-z29.h }, z5.h\n"
+ ".inst 0xc1647789 // sdot za.s[x11, 1], { z28.h-z29.h }, z4.h\n"
+ ".inst 0xa0422a6a // ld1h { z10.h-z11.h }, pn10.b/Z, [x19, #0x4, MUL VL]\n"
+ ".inst 0xc16b77a8 // sdot za.s[x11, 0], { z29.h-z30.h }, z11.h\n"
+ ".inst 0xc16a77a9 // sdot za.s[x11, 1], { z29.h-z30.h }, z10.h\n"
+ "9:" // Unpadded: 3 priming loads
+ "add x21, x15, %x[ld_in_row]\n"
+ "ld1b { z17.s }, p1/Z, [x15]\n"
+ "addvl x20, SP, #18\n"
"ld1b { z16.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
"trn1 z27.h, z17.h, z16.h\n"
"add z27.h, z27.h, z25.h\n"
"ld1b { z17.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "add x16, x16, %x[ld_in_col]\n"
+ "addvl x19, SP, #24\n"
"ld1b { z16.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
"trn1 z28.h, z17.h, z16.h\n"
"add z28.h, z28.h, z25.h\n"
- "ld1b { z16.s }, p1/Z, [x21]\n"
+ "ld1b { z17.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "ld1b { z29.s }, p1/Z, [x21]\n"
+ "add x15, x15, %x[ld_in_col]\n"
+ "ld1b { z16.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z29.h, z16.h, z29.h\n"
+ "trn1 z29.h, z17.h, z16.h\n"
"add z29.h, z29.h, z25.h\n"
"ld1b { z17.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
@@ -354,37 +390,47 @@ void sme2_u8q_planar_5x5_s1_4rows_dot_za_impl(
"ld1b { z16.s }, p1/Z, [x21]\n"
"trn1 z30.h, z17.h, z16.h\n"
".inst 0xc1607769 // sdot za.s[x11, 1], { z27.h-z28.h }, z0.h\n"
- ".inst 0xa0412a84 // ld1h { z4.h-z5.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ ".inst 0xa1402a60 // ld1h { z0.h, z8.h }, pn10.b/Z, [x19]\n"
+ ".inst 0xc168776a // sdot za.s[x11, 2], { z27.h-z28.h }, z8.h\n"
"add z30.h, z30.h, z25.h\n"
+ ".inst 0xa0412a84 // ld1h { z4.h-z5.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ ".inst 0xc160776b // sdot za.s[x11, 3], { z27.h-z28.h }, z0.h\n"
".inst 0xc1657788 // sdot za.s[x11, 0], { z28.h-z29.h }, z5.h\n"
- ".inst 0xc1647789 // sdot za.s[x11, 1], { z28.h-z29.h }, z4.h\n"
".inst 0xa0422a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+ ".inst 0xc1647789 // sdot za.s[x11, 1], { z28.h-z29.h }, z4.h\n"
+ ".inst 0xa0412a64 // ld1h { z4.h-z5.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
+ ".inst 0xc165778a // sdot za.s[x11, 2], { z28.h-z29.h }, z5.h\n"
+ ".inst 0xc164778b // sdot za.s[x11, 3], { z28.h-z29.h }, z4.h\n"
".inst 0xc16b77a8 // sdot za.s[x11, 0], { z29.h-z30.h }, z11.h\n"
".inst 0xc16a77a9 // sdot za.s[x11, 1], { z29.h-z30.h }, z10.h\n"
- "9:" // Unpadded: 3 priming loads
- "add x22, x16, %x[ld_in_row]\n"
- "ld1b { z17.s }, p1/Z, [x16]\n"
- "addvl x21, SP, #18\n"
+ ".inst 0xa0422a6a // ld1h { z10.h-z11.h }, pn10.b/Z, [x19, #0x4, MUL VL]\n"
+ ".inst 0xc16b77aa // sdot za.s[x11, 2], { z29.h-z30.h }, z11.h\n"
+ ".inst 0xc16a77ab // sdot za.s[x11, 3], { z29.h-z30.h }, z10.h\n"
+ "10:" // Unpadded: 2 priming loads
+ "add x22, x15, %x[ld_in_row]\n"
+ "ld1b { z17.s }, p1/Z, [x15]\n"
+ "addvl x21, SP, #12\n"
"ld1b { z16.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
"trn1 z27.h, z17.h, z16.h\n"
"add z27.h, z27.h, z25.h\n"
"ld1b { z17.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
- "addvl x20, SP, #24\n"
+ "addvl x20, SP, #18\n"
"ld1b { z16.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
"trn1 z28.h, z17.h, z16.h\n"
"add z28.h, z28.h, z25.h\n"
"ld1b { z17.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
- "add x16, x16, %x[ld_in_col]\n"
+ "addvl x19, SP, #24\n"
"ld1b { z16.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
"trn1 z29.h, z17.h, z16.h\n"
"add z29.h, z29.h, z25.h\n"
"ld1b { z17.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
+ "add x15, x15, %x[ld_in_col]\n"
".inst 0xa1402aa0 // ld1h { z0.h, z8.h }, pn10.b/Z, [x21]\n"
".inst 0xc1687768 // sdot za.s[x11, 0], { z27.h-z28.h }, z8.h\n"
"ld1b { z16.s }, p1/Z, [x22]\n"
@@ -395,44 +441,54 @@ void sme2_u8q_planar_5x5_s1_4rows_dot_za_impl(
"add z30.h, z30.h, z25.h\n"
".inst 0xa0412aa4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
".inst 0xc160776b // sdot za.s[x11, 3], { z27.h-z28.h }, z0.h\n"
+ ".inst 0xa1402a60 // ld1h { z0.h, z8.h }, pn10.b/Z, [x19]\n"
".inst 0xc1657788 // sdot za.s[x11, 0], { z28.h-z29.h }, z5.h\n"
- ".inst 0xa0422aaa // ld1h { z10.h-z11.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
".inst 0xc1647789 // sdot za.s[x11, 1], { z28.h-z29.h }, z4.h\n"
".inst 0xa0412a84 // ld1h { z4.h-z5.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ ".inst 0xa0422aaa // ld1h { z10.h-z11.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
+ ".inst 0xc168776c // sdot za.s[x11, 4], { z27.h-z28.h }, z8.h\n"
+ ".inst 0xc160776d // sdot za.s[x11, 5], { z27.h-z28.h }, z0.h\n"
".inst 0xc165778a // sdot za.s[x11, 2], { z28.h-z29.h }, z5.h\n"
".inst 0xc164778b // sdot za.s[x11, 3], { z28.h-z29.h }, z4.h\n"
+ ".inst 0xa0412a64 // ld1h { z4.h-z5.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
".inst 0xc16b77a8 // sdot za.s[x11, 0], { z29.h-z30.h }, z11.h\n"
".inst 0xc16a77a9 // sdot za.s[x11, 1], { z29.h-z30.h }, z10.h\n"
".inst 0xa0422a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+ ".inst 0xc165778c // sdot za.s[x11, 4], { z28.h-z29.h }, z5.h\n"
+ ".inst 0xc164778d // sdot za.s[x11, 5], { z28.h-z29.h }, z4.h\n"
".inst 0xc16b77aa // sdot za.s[x11, 2], { z29.h-z30.h }, z11.h\n"
".inst 0xc16a77ab // sdot za.s[x11, 3], { z29.h-z30.h }, z10.h\n"
- "10:" // Unpadded: 2 priming loads
- "add x23, x16, %x[ld_in_row]\n"
- "ld1b { z17.s }, p1/Z, [x16]\n"
- "addvl x22, SP, #12\n"
+ ".inst 0xa0422a6a // ld1h { z10.h-z11.h }, pn10.b/Z, [x19, #0x4, MUL VL]\n"
+ ".inst 0xc16b77ac // sdot za.s[x11, 4], { z29.h-z30.h }, z11.h\n"
+ ".inst 0xc16a77ad // sdot za.s[x11, 5], { z29.h-z30.h }, z10.h\n"
+ "11:" // Unpadded: 1 priming loads
+ "add x23, x15, %x[ld_in_row]\n"
+ "ld1b { z17.s }, p1/Z, [x15]\n"
+ "addvl x22, SP, #6\n"
"ld1b { z16.s }, p1/Z, [x23]\n"
"add x23, x23, %x[ld_in_row]\n"
"trn1 z27.h, z17.h, z16.h\n"
"add z27.h, z27.h, z25.h\n"
"ld1b { z17.s }, p1/Z, [x23]\n"
"add x23, x23, %x[ld_in_row]\n"
- "addvl x21, SP, #18\n"
+ "addvl x21, SP, #12\n"
"ld1b { z16.s }, p1/Z, [x23]\n"
"add x23, x23, %x[ld_in_row]\n"
"trn1 z28.h, z17.h, z16.h\n"
"add z28.h, z28.h, z25.h\n"
"ld1b { z17.s }, p1/Z, [x23]\n"
"add x23, x23, %x[ld_in_row]\n"
- "addvl x20, SP, #24\n"
+ "addvl x20, SP, #18\n"
"ld1b { z16.s }, p1/Z, [x23]\n"
"add x23, x23, %x[ld_in_row]\n"
"trn1 z29.h, z17.h, z16.h\n"
"add z29.h, z29.h, z25.h\n"
"ld1b { z17.s }, p1/Z, [x23]\n"
"add x23, x23, %x[ld_in_row]\n"
- "add x16, x16, %x[ld_in_col]\n"
+ "addvl x19, SP, #24\n"
".inst 0xa1402ac0 // ld1h { z0.h, z8.h }, pn10.b/Z, [x22]\n"
".inst 0xc1687768 // sdot za.s[x11, 0], { z27.h-z28.h }, z8.h\n"
+ "add x15, x15, %x[ld_in_col]\n"
"ld1b { z16.s }, p1/Z, [x23]\n"
"trn1 z30.h, z17.h, z16.h\n"
".inst 0xc1607769 // sdot za.s[x11, 1], { z27.h-z28.h }, z0.h\n"
@@ -448,177 +504,121 @@ void sme2_u8q_planar_5x5_s1_4rows_dot_za_impl(
".inst 0xa0422aca // ld1h { z10.h-z11.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
".inst 0xc168776c // sdot za.s[x11, 4], { z27.h-z28.h }, z8.h\n"
".inst 0xc160776d // sdot za.s[x11, 5], { z27.h-z28.h }, z0.h\n"
+ ".inst 0xa1402a60 // ld1h { z0.h, z8.h }, pn10.b/Z, [x19]\n"
".inst 0xc165778a // sdot za.s[x11, 2], { z28.h-z29.h }, z5.h\n"
".inst 0xc164778b // sdot za.s[x11, 3], { z28.h-z29.h }, z4.h\n"
".inst 0xa0412a84 // ld1h { z4.h-z5.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
".inst 0xc16b77a8 // sdot za.s[x11, 0], { z29.h-z30.h }, z11.h\n"
".inst 0xc16a77a9 // sdot za.s[x11, 1], { z29.h-z30.h }, z10.h\n"
".inst 0xa0422aaa // ld1h { z10.h-z11.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
- ".inst 0xc165778c // sdot za.s[x11, 4], { z28.h-z29.h }, z5.h\n"
- ".inst 0xc164778d // sdot za.s[x11, 5], { z28.h-z29.h }, z4.h\n"
- ".inst 0xc16b77aa // sdot za.s[x11, 2], { z29.h-z30.h }, z11.h\n"
- ".inst 0xc16a77ab // sdot za.s[x11, 3], { z29.h-z30.h }, z10.h\n"
- ".inst 0xa0422a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
- ".inst 0xc16b77ac // sdot za.s[x11, 4], { z29.h-z30.h }, z11.h\n"
- ".inst 0xc16a77ad // sdot za.s[x11, 5], { z29.h-z30.h }, z10.h\n"
- "11:" // Unpadded: 1 priming loads
- "add x24, x16, %x[ld_in_row]\n"
- "ld1b { z17.s }, p1/Z, [x16]\n"
- "addvl x23, SP, #6\n"
- "ld1b { z16.s }, p1/Z, [x24]\n"
- "add x24, x24, %x[ld_in_row]\n"
- "trn1 z27.h, z17.h, z16.h\n"
- "add z27.h, z27.h, z25.h\n"
- "ld1b { z17.s }, p1/Z, [x24]\n"
- "add x24, x24, %x[ld_in_row]\n"
- "addvl x22, SP, #12\n"
- "ld1b { z16.s }, p1/Z, [x24]\n"
- "add x24, x24, %x[ld_in_row]\n"
- "trn1 z28.h, z17.h, z16.h\n"
- "add z28.h, z28.h, z25.h\n"
- "ld1b { z17.s }, p1/Z, [x24]\n"
- "add x24, x24, %x[ld_in_row]\n"
- "addvl x21, SP, #18\n"
- "ld1b { z16.s }, p1/Z, [x24]\n"
- "add x24, x24, %x[ld_in_row]\n"
- "trn1 z29.h, z17.h, z16.h\n"
- "add z29.h, z29.h, z25.h\n"
- "ld1b { z17.s }, p1/Z, [x24]\n"
- "add x24, x24, %x[ld_in_row]\n"
- "addvl x20, SP, #24\n"
- ".inst 0xa1402ae0 // ld1h { z0.h, z8.h }, pn10.b/Z, [x23]\n"
- ".inst 0xc1687768 // sdot za.s[x11, 0], { z27.h-z28.h }, z8.h\n"
- "add x16, x16, %x[ld_in_col]\n"
- "ld1b { z16.s }, p1/Z, [x24]\n"
- "trn1 z30.h, z17.h, z16.h\n"
- ".inst 0xc1607769 // sdot za.s[x11, 1], { z27.h-z28.h }, z0.h\n"
- ".inst 0xa1402ac0 // ld1h { z0.h, z8.h }, pn10.b/Z, [x22]\n"
- ".inst 0xc168776a // sdot za.s[x11, 2], { z27.h-z28.h }, z8.h\n"
- "add z30.h, z30.h, z25.h\n"
- ".inst 0xa0412ae4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x23, #0x2, MUL VL]\n"
- ".inst 0xc160776b // sdot za.s[x11, 3], { z27.h-z28.h }, z0.h\n"
- ".inst 0xa1402aa0 // ld1h { z0.h, z8.h }, pn10.b/Z, [x21]\n"
- ".inst 0xc1657788 // sdot za.s[x11, 0], { z28.h-z29.h }, z5.h\n"
- ".inst 0xc1647789 // sdot za.s[x11, 1], { z28.h-z29.h }, z4.h\n"
- ".inst 0xa0412ac4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
- ".inst 0xa0422aea // ld1h { z10.h-z11.h }, pn10.b/Z, [x23, #0x4, MUL VL]\n"
- ".inst 0xc168776c // sdot za.s[x11, 4], { z27.h-z28.h }, z8.h\n"
- ".inst 0xc160776d // sdot za.s[x11, 5], { z27.h-z28.h }, z0.h\n"
- ".inst 0xa1402a80 // ld1h { z0.h, z8.h }, pn10.b/Z, [x20]\n"
- ".inst 0xc165778a // sdot za.s[x11, 2], { z28.h-z29.h }, z5.h\n"
- ".inst 0xc164778b // sdot za.s[x11, 3], { z28.h-z29.h }, z4.h\n"
- ".inst 0xa0412aa4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
- ".inst 0xc16b77a8 // sdot za.s[x11, 0], { z29.h-z30.h }, z11.h\n"
- ".inst 0xc16a77a9 // sdot za.s[x11, 1], { z29.h-z30.h }, z10.h\n"
- ".inst 0xa0422aca // ld1h { z10.h-z11.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
".inst 0xc168776e // sdot za.s[x11, 6], { z27.h-z28.h }, z8.h\n"
".inst 0xc160776f // sdot za.s[x11, 7], { z27.h-z28.h }, z0.h\n"
".inst 0xc165778c // sdot za.s[x11, 4], { z28.h-z29.h }, z5.h\n"
".inst 0xc164778d // sdot za.s[x11, 5], { z28.h-z29.h }, z4.h\n"
- ".inst 0xa0412a84 // ld1h { z4.h-z5.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ ".inst 0xa0412a64 // ld1h { z4.h-z5.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
".inst 0xc16b77aa // sdot za.s[x11, 2], { z29.h-z30.h }, z11.h\n"
".inst 0xc16a77ab // sdot za.s[x11, 3], { z29.h-z30.h }, z10.h\n"
- ".inst 0xa0422aaa // ld1h { z10.h-z11.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
+ ".inst 0xa0422a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
".inst 0xc165778e // sdot za.s[x11, 6], { z28.h-z29.h }, z5.h\n"
".inst 0xc164778f // sdot za.s[x11, 7], { z28.h-z29.h }, z4.h\n"
".inst 0xc16b77ac // sdot za.s[x11, 4], { z29.h-z30.h }, z11.h\n"
".inst 0xc16a77ad // sdot za.s[x11, 5], { z29.h-z30.h }, z10.h\n"
- ".inst 0xa0422a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+ ".inst 0xa0422a6a // ld1h { z10.h-z11.h }, pn10.b/Z, [x19, #0x4, MUL VL]\n"
".inst 0xc16b77ae // sdot za.s[x11, 6], { z29.h-z30.h }, z11.h\n"
".inst 0xc16a77af // sdot za.s[x11, 7], { z29.h-z30.h }, z10.h\n"
"12:" // Unpadded: 0 priming loads
".inst 0xa1402be0 // ld1h { z0.h, z8.h }, pn10.b/Z, [SP]\n"
".inst 0xa0412be4 // ld1h { z4.h-z5.h }, pn10.b/Z, [SP, #0x2, MUL VL]\n"
".inst 0xa0422bea // ld1h { z10.h-z11.h }, pn10.b/Z, [SP, #0x4, MUL VL]\n"
- "cbz x17, 22f\n"
- "add x20, x16, %x[ld_in_row]\n"
- "ld1b { z17.s }, p1/Z, [x16]\n"
- "sub x17, x17, #0x1\n"
- "ld1b { z16.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "cbz x16, 22f\n"
+ "add x19, x15, %x[ld_in_row]\n"
+ "ld1b { z17.s }, p1/Z, [x15]\n"
+ "sub x16, x16, #0x1\n"
+ "ld1b { z16.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"trn1 z27.h, z17.h, z16.h\n"
- "sub x15, x15, #0x1\n"
- "ld1b { z17.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
- "cmp x17, x15\n"
+ "sub x14, x14, #0x1\n"
+ "ld1b { z17.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ "cmp x16, x14\n"
"add z27.h, z27.h, z25.h\n"
- "ld1b { z16.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z16.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"trn1 z28.h, z17.h, z16.h\n"
- "csel x25, x17, x15, LT\n"
- "ld1b { z17.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "csel x24, x16, x14, LT\n"
+ "ld1b { z17.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"add z28.h, z28.h, z25.h\n"
- "add x16, x16, %x[ld_in_col]\n"
- "ld1b { z16.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x15, x15, %x[ld_in_col]\n"
+ "ld1b { z16.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"trn1 z29.h, z17.h, z16.h\n"
"add z29.h, z29.h, z25.h\n"
- "ld1b { z17.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
- "sub x15, x15, x25\n"
- "ld1b { z16.s }, p1/Z, [x20]\n"
+ "ld1b { z17.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ "sub x14, x14, x24\n"
+ "ld1b { z16.s }, p1/Z, [x19]\n"
"trn1 z30.h, z17.h, z16.h\n"
"add z30.h, z30.h, z25.h\n"
- "cbz x25, 21f\n"
+ "cbz x24, 21f\n"
"13:" // Unpadded: Main loop
- "addvl x24, SP, #6\n"
+ "addvl x23, SP, #6\n"
".inst 0xc1687768 // sdot za.s[x11, 0], { z27.h-z28.h }, z8.h\n"
- "addvl x23, SP, #12\n"
- "ld1b { z23.s }, p1/Z, [x16]\n"
+ "addvl x22, SP, #12\n"
+ "ld1b { z23.s }, p1/Z, [x15]\n"
".inst 0xc1607769 // sdot za.s[x11, 1], { z27.h-z28.h }, z0.h\n"
- ".inst 0xa1402b00 // ld1h { z0.h, z8.h }, pn10.b/Z, [x24]\n"
- "addvl x22, SP, #18\n"
- "addvl x21, SP, #24\n"
+ ".inst 0xa1402ae0 // ld1h { z0.h, z8.h }, pn10.b/Z, [x23]\n"
+ "addvl x21, SP, #18\n"
+ "addvl x20, SP, #24\n"
".inst 0xc168776a // sdot za.s[x11, 2], { z27.h-z28.h }, z8.h\n"
- "add x20, x16, %x[ld_in_row]\n"
- "ld1b { z22.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x15, %x[ld_in_row]\n"
+ "ld1b { z22.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0xc160776b // sdot za.s[x11, 3], { z27.h-z28.h }, z0.h\n"
- ".inst 0xa1402ae0 // ld1h { z0.h, z8.h }, pn10.b/Z, [x23]\n"
- "subs x25, x25, #0x1\n"
- "add x16, x16, %x[ld_in_col]\n"
+ ".inst 0xa1402ac0 // ld1h { z0.h, z8.h }, pn10.b/Z, [x22]\n"
+ "subs x24, x24, #0x1\n"
+ "add x15, x15, %x[ld_in_col]\n"
".inst 0xc1657788 // sdot za.s[x11, 0], { z28.h-z29.h }, z5.h\n"
- "ld1b { z21.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z21.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0xc1647789 // sdot za.s[x11, 1], { z28.h-z29.h }, z4.h\n"
- ".inst 0xa0412b04 // ld1h { z4.h-z5.h }, pn10.b/Z, [x24, #0x2, MUL VL]\n"
+ ".inst 0xa0412ae4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x23, #0x2, MUL VL]\n"
".inst 0xc168776c // sdot za.s[x11, 4], { z27.h-z28.h }, z8.h\n"
- "ld1b { z20.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z20.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0xc160776d // sdot za.s[x11, 5], { z27.h-z28.h }, z0.h\n"
- ".inst 0xa1402ac0 // ld1h { z0.h, z8.h }, pn10.b/Z, [x22]\n"
+ ".inst 0xa1402aa0 // ld1h { z0.h, z8.h }, pn10.b/Z, [x21]\n"
".inst 0xc165778a // sdot za.s[x11, 2], { z28.h-z29.h }, z5.h\n"
- "ld1b { z19.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z19.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0xc164778b // sdot za.s[x11, 3], { z28.h-z29.h }, z4.h\n"
- ".inst 0xa0412ae4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x23, #0x2, MUL VL]\n"
+ ".inst 0xa0412ac4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
".inst 0xc16b77a8 // sdot za.s[x11, 0], { z29.h-z30.h }, z11.h\n"
- "ld1b { z18.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z18.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0xc16a77a9 // sdot za.s[x11, 1], { z29.h-z30.h }, z10.h\n"
- ".inst 0xa0422b0a // ld1h { z10.h-z11.h }, pn10.b/Z, [x24, #0x4, MUL VL]\n"
+ ".inst 0xa0422aea // ld1h { z10.h-z11.h }, pn10.b/Z, [x23, #0x4, MUL VL]\n"
".inst 0xc168776e // sdot za.s[x11, 6], { z27.h-z28.h }, z8.h\n"
- "ld1b { z17.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z17.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0xc160776f // sdot za.s[x11, 7], { z27.h-z28.h }, z0.h\n"
- ".inst 0xa1402aa0 // ld1h { z0.h, z8.h }, pn10.b/Z, [x21]\n"
+ ".inst 0xa1402a80 // ld1h { z0.h, z8.h }, pn10.b/Z, [x20]\n"
".inst 0xc165778c // sdot za.s[x11, 4], { z28.h-z29.h }, z5.h\n"
- "ld1b { z16.s }, p1/Z, [x20]\n"
+ "ld1b { z16.s }, p1/Z, [x19]\n"
".inst 0xc164778d // sdot za.s[x11, 5], { z28.h-z29.h }, z4.h\n"
- ".inst 0xa0412ac4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
+ ".inst 0xa0412aa4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
".inst 0xc16b77aa // sdot za.s[x11, 2], { z29.h-z30.h }, z11.h\n"
".inst 0xc16a77ab // sdot za.s[x11, 3], { z29.h-z30.h }, z10.h\n"
- ".inst 0xa0422aea // ld1h { z10.h-z11.h }, pn10.b/Z, [x23, #0x4, MUL VL]\n"
+ ".inst 0xa0422aca // ld1h { z10.h-z11.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
".inst 0xc165778e // sdot za.s[x11, 6], { z28.h-z29.h }, z5.h\n"
".inst 0xc164778f // sdot za.s[x11, 7], { z28.h-z29.h }, z4.h\n"
- ".inst 0xa0412aa4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
+ ".inst 0xa0412a84 // ld1h { z4.h-z5.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
".inst 0xc16b77ac // sdot za.s[x11, 4], { z29.h-z30.h }, z11.h\n"
".inst 0xc16a77ad // sdot za.s[x11, 5], { z29.h-z30.h }, z10.h\n"
- ".inst 0xa0422aca // ld1h { z10.h-z11.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
+ ".inst 0xa0422aaa // ld1h { z10.h-z11.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
".inst 0xc16b77ae // sdot za.s[x11, 6], { z29.h-z30.h }, z11.h\n"
".inst 0xc16a77af // sdot za.s[x11, 7], { z29.h-z30.h }, z10.h\n"
- ".inst 0xa0422aaa // ld1h { z10.h-z11.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
+ ".inst 0xa0422a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
".inst 0xc1681768 // sdot za.s[x8, 0], { z27.h-z28.h }, z8.h\n"
".inst 0xc1601769 // sdot za.s[x8, 1], { z27.h-z28.h }, z0.h\n"
"trn1 z27.h, z23.h, z22.h\n"
@@ -645,407 +645,407 @@ void sme2_u8q_planar_5x5_s1_4rows_dot_za_impl(
".inst 0xc1a9ab0c // add { z12.s-z15.s }, { z12.s-z15.s }, z9.s\n"
".inst 0xc00408c1 // mova za.d[x8, #1], { z6.d-z7.d }\n"
".inst 0xc1bfcf0c // sclamp { z12.s-z15.s }, z24.s, z31.s\n"
- "st1b { z12.s }, p1, [x14]\n"
- "add x14, x14, x3\n"
- "add z30.h, z30.h, z25.h\n"
- "st1b { z14.s }, p1, [x13]\n"
+ "st1b { z12.s }, p1, [x13]\n"
"add x13, x13, x10\n"
- "st1b { z13.s }, p1, [x9]\n"
- "add x9, x9, x27\n"
- "st1b { z15.s }, p1, [x28]\n"
+ "add z30.h, z30.h, z25.h\n"
+ "st1b { z14.s }, p1, [x4]\n"
+ "add x4, x4, x9\n"
+ "st1b { z13.s }, p1, [x28]\n"
"add x28, x28, x26\n"
+ "st1b { z15.s }, p1, [x27]\n"
+ "add x27, x27, x25\n"
"bgt 13b\n"
"b 21f\n"
"14:" // Padded
- "cbz x22, 19f\n"
- "cmp x22, #0x1\n"
- "sub x17, x17, x22\n"
+ "cbz x21, 19f\n"
+ "cmp x21, #0x1\n"
+ "sub x16, x16, x21\n"
"beq 18f\n"
- "cmp x22, #0x2\n"
+ "cmp x21, #0x2\n"
"beq 17f\n"
- "cmp x22, #0x3\n"
+ "cmp x21, #0x3\n"
"beq 16f\n"
"15:" // Padded: 4 priming loads
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z19.s }, p0/Z, [x16]\n"
+ "ld1b { z19.s }, p0/Z, [x15]\n"
"add z19.h, p0/M, z19.h, z25.h\n"
- "add x21, x16, %x[ld_in_row]\n"
+ "add x20, x15, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z18.s }, p0/Z, [x21]\n"
+ "ld1b { z18.s }, p0/Z, [x20]\n"
"add z18.h, p0/M, z18.h, z25.h\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "add x20, x20, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z17.s }, p0/Z, [x21]\n"
+ "ld1b { z17.s }, p0/Z, [x20]\n"
"add z17.h, p0/M, z17.h, z25.h\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "add x20, x20, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z16.s }, p0/Z, [x21]\n"
+ "ld1b { z16.s }, p0/Z, [x20]\n"
"add z16.h, p0/M, z16.h, z25.h\n"
"mov x12, #0x4\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "add x20, x20, %x[ld_in_row]\n"
"trn1 z27.h, z19.h, z18.h\n"
"trn1 z28.h, z17.h, z16.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z18.s }, p0/Z, [x21]\n"
+ "ld1b { z18.s }, p0/Z, [x20]\n"
"add z18.h, p0/M, z18.h, z25.h\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "add x20, x20, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z16.s }, p0/Z, [x21]\n"
+ "ld1b { z16.s }, p0/Z, [x20]\n"
"add z16.h, p0/M, z16.h, z25.h\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "add x20, x20, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z17.s }, p0/Z, [x21]\n"
- "addvl x20, SP, #24\n"
+ "ld1b { z17.s }, p0/Z, [x20]\n"
+ "addvl x19, SP, #24\n"
"add z17.h, p0/M, z17.h, z25.h\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "add x20, x20, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- ".inst 0xa1402a80 // ld1h { z0.h, z8.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xa1402a60 // ld1h { z0.h, z8.h }, pn10.b/Z, [x19]\n"
"trn1 z29.h, z18.h, z16.h\n"
- "ld1b { z16.s }, p0/Z, [x21]\n"
+ "ld1b { z16.s }, p0/Z, [x20]\n"
"add z16.h, p0/M, z16.h, z25.h\n"
".inst 0xc1687768 // sdot za.s[x11, 0], { z27.h-z28.h }, z8.h\n"
- "add x16, x16, %x[ld_in_col]\n"
+ "add x15, x15, %x[ld_in_col]\n"
".inst 0xc1607769 // sdot za.s[x11, 1], { z27.h-z28.h }, z0.h\n"
- ".inst 0xa0412a84 // ld1h { z4.h-z5.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ ".inst 0xa0412a64 // ld1h { z4.h-z5.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
"trn1 z30.h, z17.h, z16.h\n"
".inst 0xc1657788 // sdot za.s[x11, 0], { z28.h-z29.h }, z5.h\n"
- ".inst 0xa0422a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+ ".inst 0xa0422a6a // ld1h { z10.h-z11.h }, pn10.b/Z, [x19, #0x4, MUL VL]\n"
".inst 0xc1647789 // sdot za.s[x11, 1], { z28.h-z29.h }, z4.h\n"
".inst 0xc16b77a8 // sdot za.s[x11, 0], { z29.h-z30.h }, z11.h\n"
".inst 0xc16a77a9 // sdot za.s[x11, 1], { z29.h-z30.h }, z10.h\n"
"16:" // Padded: 3 priming loads
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z19.s }, p0/Z, [x16]\n"
+ "ld1b { z19.s }, p0/Z, [x15]\n"
"add z19.h, p0/M, z19.h, z25.h\n"
- "add x20, x16, %x[ld_in_row]\n"
+ "add x19, x15, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z18.s }, p0/Z, [x20]\n"
+ "ld1b { z18.s }, p0/Z, [x19]\n"
"add z18.h, p0/M, z18.h, z25.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z17.s }, p0/Z, [x20]\n"
+ "ld1b { z17.s }, p0/Z, [x19]\n"
"add z17.h, p0/M, z17.h, z25.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z16.s }, p0/Z, [x20]\n"
+ "ld1b { z16.s }, p0/Z, [x19]\n"
"add z16.h, p0/M, z16.h, z25.h\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"trn1 z27.h, z19.h, z18.h\n"
"trn1 z28.h, z17.h, z16.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z18.s }, p0/Z, [x20]\n"
+ "ld1b { z18.s }, p0/Z, [x19]\n"
"add z18.h, p0/M, z18.h, z25.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z16.s }, p0/Z, [x20]\n"
+ "ld1b { z16.s }, p0/Z, [x19]\n"
"add z16.h, p0/M, z16.h, z25.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z17.s }, p0/Z, [x20]\n"
- "addvl x21, SP, #18\n"
+ "ld1b { z17.s }, p0/Z, [x19]\n"
+ "addvl x20, SP, #18\n"
"add z17.h, p0/M, z17.h, z25.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- ".inst 0xa1402aa0 // ld1h { z0.h, z8.h }, pn10.b/Z, [x21]\n"
+ ".inst 0xa1402a80 // ld1h { z0.h, z8.h }, pn10.b/Z, [x20]\n"
"trn1 z29.h, z18.h, z16.h\n"
- "ld1b { z16.s }, p0/Z, [x20]\n"
- "addvl x20, SP, #24\n"
+ "ld1b { z16.s }, p0/Z, [x19]\n"
+ "addvl x19, SP, #24\n"
"add z16.h, p0/M, z16.h, z25.h\n"
".inst 0xc1687768 // sdot za.s[x11, 0], { z27.h-z28.h }, z8.h\n"
".inst 0xc1607769 // sdot za.s[x11, 1], { z27.h-z28.h }, z0.h\n"
- ".inst 0xa1402a80 // ld1h { z0.h, z8.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xa1402a60 // ld1h { z0.h, z8.h }, pn10.b/Z, [x19]\n"
"trn1 z30.h, z17.h, z16.h\n"
- "add x16, x16, %x[ld_in_col]\n"
- ".inst 0xa0412aa4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
+ "add x15, x15, %x[ld_in_col]\n"
+ ".inst 0xa0412a84 // ld1h { z4.h-z5.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
".inst 0xc168776a // sdot za.s[x11, 2], { z27.h-z28.h }, z8.h\n"
".inst 0xc160776b // sdot za.s[x11, 3], { z27.h-z28.h }, z0.h\n"
- ".inst 0xa0422aaa // ld1h { z10.h-z11.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
+ ".inst 0xa0422a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
".inst 0xc1657788 // sdot za.s[x11, 0], { z28.h-z29.h }, z5.h\n"
".inst 0xc1647789 // sdot za.s[x11, 1], { z28.h-z29.h }, z4.h\n"
- ".inst 0xa0412a84 // ld1h { z4.h-z5.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ ".inst 0xa0412a64 // ld1h { z4.h-z5.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
".inst 0xc165778a // sdot za.s[x11, 2], { z28.h-z29.h }, z5.h\n"
".inst 0xc164778b // sdot za.s[x11, 3], { z28.h-z29.h }, z4.h\n"
".inst 0xc16b77a8 // sdot za.s[x11, 0], { z29.h-z30.h }, z11.h\n"
".inst 0xc16a77a9 // sdot za.s[x11, 1], { z29.h-z30.h }, z10.h\n"
- ".inst 0xa0422a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+ ".inst 0xa0422a6a // ld1h { z10.h-z11.h }, pn10.b/Z, [x19, #0x4, MUL VL]\n"
".inst 0xc16b77aa // sdot za.s[x11, 2], { z29.h-z30.h }, z11.h\n"
".inst 0xc16a77ab // sdot za.s[x11, 3], { z29.h-z30.h }, z10.h\n"
"17:" // Padded: 2 priming loads
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z19.s }, p0/Z, [x16]\n"
+ "ld1b { z19.s }, p0/Z, [x15]\n"
"add z19.h, p0/M, z19.h, z25.h\n"
- "add x20, x16, %x[ld_in_row]\n"
+ "add x19, x15, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z18.s }, p0/Z, [x20]\n"
+ "ld1b { z18.s }, p0/Z, [x19]\n"
"add z18.h, p0/M, z18.h, z25.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z17.s }, p0/Z, [x20]\n"
+ "ld1b { z17.s }, p0/Z, [x19]\n"
"add z17.h, p0/M, z17.h, z25.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z16.s }, p0/Z, [x20]\n"
+ "ld1b { z16.s }, p0/Z, [x19]\n"
"add z16.h, p0/M, z16.h, z25.h\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"trn1 z27.h, z19.h, z18.h\n"
"trn1 z28.h, z17.h, z16.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z18.s }, p0/Z, [x20]\n"
+ "ld1b { z18.s }, p0/Z, [x19]\n"
"add z18.h, p0/M, z18.h, z25.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z16.s }, p0/Z, [x20]\n"
+ "ld1b { z16.s }, p0/Z, [x19]\n"
"add z16.h, p0/M, z16.h, z25.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z17.s }, p0/Z, [x20]\n"
- "addvl x22, SP, #12\n"
+ "ld1b { z17.s }, p0/Z, [x19]\n"
+ "addvl x21, SP, #12\n"
"add z17.h, p0/M, z17.h, z25.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- ".inst 0xa1402ac0 // ld1h { z0.h, z8.h }, pn10.b/Z, [x22]\n"
+ ".inst 0xa1402aa0 // ld1h { z0.h, z8.h }, pn10.b/Z, [x21]\n"
"trn1 z29.h, z18.h, z16.h\n"
- "ld1b { z16.s }, p0/Z, [x20]\n"
- "addvl x21, SP, #18\n"
+ "ld1b { z16.s }, p0/Z, [x19]\n"
+ "addvl x20, SP, #18\n"
"add z16.h, p0/M, z16.h, z25.h\n"
".inst 0xc1687768 // sdot za.s[x11, 0], { z27.h-z28.h }, z8.h\n"
".inst 0xc1607769 // sdot za.s[x11, 1], { z27.h-z28.h }, z0.h\n"
- ".inst 0xa1402aa0 // ld1h { z0.h, z8.h }, pn10.b/Z, [x21]\n"
- "addvl x20, SP, #24\n"
+ ".inst 0xa1402a80 // ld1h { z0.h, z8.h }, pn10.b/Z, [x20]\n"
+ "addvl x19, SP, #24\n"
"trn1 z30.h, z17.h, z16.h\n"
- ".inst 0xa0412ac4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
+ ".inst 0xa0412aa4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
".inst 0xc168776a // sdot za.s[x11, 2], { z27.h-z28.h }, z8.h\n"
- "add x16, x16, %x[ld_in_col]\n"
+ "add x15, x15, %x[ld_in_col]\n"
".inst 0xc160776b // sdot za.s[x11, 3], { z27.h-z28.h }, z0.h\n"
- ".inst 0xa1402a80 // ld1h { z0.h, z8.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xa1402a60 // ld1h { z0.h, z8.h }, pn10.b/Z, [x19]\n"
".inst 0xc1657788 // sdot za.s[x11, 0], { z28.h-z29.h }, z5.h\n"
- ".inst 0xa0422aca // ld1h { z10.h-z11.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
+ ".inst 0xa0422aaa // ld1h { z10.h-z11.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
".inst 0xc1647789 // sdot za.s[x11, 1], { z28.h-z29.h }, z4.h\n"
- ".inst 0xa0412aa4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
+ ".inst 0xa0412a84 // ld1h { z4.h-z5.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
".inst 0xc168776c // sdot za.s[x11, 4], { z27.h-z28.h }, z8.h\n"
".inst 0xc160776d // sdot za.s[x11, 5], { z27.h-z28.h }, z0.h\n"
".inst 0xc165778a // sdot za.s[x11, 2], { z28.h-z29.h }, z5.h\n"
".inst 0xc164778b // sdot za.s[x11, 3], { z28.h-z29.h }, z4.h\n"
- ".inst 0xa0412a84 // ld1h { z4.h-z5.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ ".inst 0xa0412a64 // ld1h { z4.h-z5.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
".inst 0xc16b77a8 // sdot za.s[x11, 0], { z29.h-z30.h }, z11.h\n"
".inst 0xc16a77a9 // sdot za.s[x11, 1], { z29.h-z30.h }, z10.h\n"
- ".inst 0xa0422aaa // ld1h { z10.h-z11.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
+ ".inst 0xa0422a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
".inst 0xc165778c // sdot za.s[x11, 4], { z28.h-z29.h }, z5.h\n"
".inst 0xc164778d // sdot za.s[x11, 5], { z28.h-z29.h }, z4.h\n"
".inst 0xc16b77aa // sdot za.s[x11, 2], { z29.h-z30.h }, z11.h\n"
".inst 0xc16a77ab // sdot za.s[x11, 3], { z29.h-z30.h }, z10.h\n"
- ".inst 0xa0422a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+ ".inst 0xa0422a6a // ld1h { z10.h-z11.h }, pn10.b/Z, [x19, #0x4, MUL VL]\n"
".inst 0xc16b77ac // sdot za.s[x11, 4], { z29.h-z30.h }, z11.h\n"
".inst 0xc16a77ad // sdot za.s[x11, 5], { z29.h-z30.h }, z10.h\n"
"18:" // Padded: 1 priming loads
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z19.s }, p0/Z, [x16]\n"
+ "ld1b { z19.s }, p0/Z, [x15]\n"
"add z19.h, p0/M, z19.h, z25.h\n"
- "add x20, x16, %x[ld_in_row]\n"
+ "add x19, x15, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z18.s }, p0/Z, [x20]\n"
+ "ld1b { z18.s }, p0/Z, [x19]\n"
"add z18.h, p0/M, z18.h, z25.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z17.s }, p0/Z, [x20]\n"
+ "ld1b { z17.s }, p0/Z, [x19]\n"
"add z17.h, p0/M, z17.h, z25.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z16.s }, p0/Z, [x20]\n"
+ "ld1b { z16.s }, p0/Z, [x19]\n"
"add z16.h, p0/M, z16.h, z25.h\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"trn1 z27.h, z19.h, z18.h\n"
"trn1 z28.h, z17.h, z16.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z18.s }, p0/Z, [x20]\n"
+ "ld1b { z18.s }, p0/Z, [x19]\n"
"add z18.h, p0/M, z18.h, z25.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z16.s }, p0/Z, [x20]\n"
+ "ld1b { z16.s }, p0/Z, [x19]\n"
"add z16.h, p0/M, z16.h, z25.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z17.s }, p0/Z, [x20]\n"
- "addvl x23, SP, #6\n"
+ "ld1b { z17.s }, p0/Z, [x19]\n"
+ "addvl x22, SP, #6\n"
"add z17.h, p0/M, z17.h, z25.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- ".inst 0xa1402ae0 // ld1h { z0.h, z8.h }, pn10.b/Z, [x23]\n"
+ ".inst 0xa1402ac0 // ld1h { z0.h, z8.h }, pn10.b/Z, [x22]\n"
"trn1 z29.h, z18.h, z16.h\n"
- "ld1b { z16.s }, p0/Z, [x20]\n"
- "addvl x22, SP, #12\n"
+ "ld1b { z16.s }, p0/Z, [x19]\n"
+ "addvl x21, SP, #12\n"
"add z16.h, p0/M, z16.h, z25.h\n"
".inst 0xc1687768 // sdot za.s[x11, 0], { z27.h-z28.h }, z8.h\n"
".inst 0xc1607769 // sdot za.s[x11, 1], { z27.h-z28.h }, z0.h\n"
- ".inst 0xa1402ac0 // ld1h { z0.h, z8.h }, pn10.b/Z, [x22]\n"
- "addvl x21, SP, #18\n"
+ ".inst 0xa1402aa0 // ld1h { z0.h, z8.h }, pn10.b/Z, [x21]\n"
+ "addvl x20, SP, #18\n"
"trn1 z30.h, z17.h, z16.h\n"
- ".inst 0xa0412ae4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x23, #0x2, MUL VL]\n"
+ ".inst 0xa0412ac4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
".inst 0xc168776a // sdot za.s[x11, 2], { z27.h-z28.h }, z8.h\n"
- "addvl x20, SP, #24\n"
- "add x16, x16, %x[ld_in_col]\n"
+ "addvl x19, SP, #24\n"
+ "add x15, x15, %x[ld_in_col]\n"
".inst 0xc160776b // sdot za.s[x11, 3], { z27.h-z28.h }, z0.h\n"
- ".inst 0xa1402aa0 // ld1h { z0.h, z8.h }, pn10.b/Z, [x21]\n"
+ ".inst 0xa1402a80 // ld1h { z0.h, z8.h }, pn10.b/Z, [x20]\n"
".inst 0xc1657788 // sdot za.s[x11, 0], { z28.h-z29.h }, z5.h\n"
- ".inst 0xa0422aea // ld1h { z10.h-z11.h }, pn10.b/Z, [x23, #0x4, MUL VL]\n"
+ ".inst 0xa0422aca // ld1h { z10.h-z11.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
".inst 0xc1647789 // sdot za.s[x11, 1], { z28.h-z29.h }, z4.h\n"
- ".inst 0xa0412ac4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
+ ".inst 0xa0412aa4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
".inst 0xc168776c // sdot za.s[x11, 4], { z27.h-z28.h }, z8.h\n"
".inst 0xc160776d // sdot za.s[x11, 5], { z27.h-z28.h }, z0.h\n"
- ".inst 0xa1402a80 // ld1h { z0.h, z8.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xa1402a60 // ld1h { z0.h, z8.h }, pn10.b/Z, [x19]\n"
".inst 0xc165778a // sdot za.s[x11, 2], { z28.h-z29.h }, z5.h\n"
".inst 0xc164778b // sdot za.s[x11, 3], { z28.h-z29.h }, z4.h\n"
- ".inst 0xa0412aa4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
+ ".inst 0xa0412a84 // ld1h { z4.h-z5.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
".inst 0xc16b77a8 // sdot za.s[x11, 0], { z29.h-z30.h }, z11.h\n"
".inst 0xc16a77a9 // sdot za.s[x11, 1], { z29.h-z30.h }, z10.h\n"
- ".inst 0xa0422aca // ld1h { z10.h-z11.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
+ ".inst 0xa0422aaa // ld1h { z10.h-z11.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
".inst 0xc168776e // sdot za.s[x11, 6], { z27.h-z28.h }, z8.h\n"
".inst 0xc160776f // sdot za.s[x11, 7], { z27.h-z28.h }, z0.h\n"
".inst 0xc165778c // sdot za.s[x11, 4], { z28.h-z29.h }, z5.h\n"
".inst 0xc164778d // sdot za.s[x11, 5], { z28.h-z29.h }, z4.h\n"
- ".inst 0xa0412a84 // ld1h { z4.h-z5.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ ".inst 0xa0412a64 // ld1h { z4.h-z5.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
".inst 0xc16b77aa // sdot za.s[x11, 2], { z29.h-z30.h }, z11.h\n"
".inst 0xc16a77ab // sdot za.s[x11, 3], { z29.h-z30.h }, z10.h\n"
- ".inst 0xa0422aaa // ld1h { z10.h-z11.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
+ ".inst 0xa0422a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
".inst 0xc165778e // sdot za.s[x11, 6], { z28.h-z29.h }, z5.h\n"
".inst 0xc164778f // sdot za.s[x11, 7], { z28.h-z29.h }, z4.h\n"
".inst 0xc16b77ac // sdot za.s[x11, 4], { z29.h-z30.h }, z11.h\n"
".inst 0xc16a77ad // sdot za.s[x11, 5], { z29.h-z30.h }, z10.h\n"
- ".inst 0xa0422a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+ ".inst 0xa0422a6a // ld1h { z10.h-z11.h }, pn10.b/Z, [x19, #0x4, MUL VL]\n"
".inst 0xc16b77ae // sdot za.s[x11, 6], { z29.h-z30.h }, z11.h\n"
".inst 0xc16a77af // sdot za.s[x11, 7], { z29.h-z30.h }, z10.h\n"
"19:" // Padded: 0 priming loads
".inst 0xa1402be0 // ld1h { z0.h, z8.h }, pn10.b/Z, [SP]\n"
".inst 0xa0412be4 // ld1h { z4.h-z5.h }, pn10.b/Z, [SP, #0x2, MUL VL]\n"
".inst 0xa0422bea // ld1h { z10.h-z11.h }, pn10.b/Z, [SP, #0x4, MUL VL]\n"
- "cbz x17, 22f\n"
+ "cbz x16, 22f\n"
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z19.s }, p0/Z, [x16]\n"
+ "ld1b { z19.s }, p0/Z, [x15]\n"
"add z19.h, p0/M, z19.h, z25.h\n"
- "add x20, x16, %x[ld_in_row]\n"
+ "add x19, x15, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z18.s }, p0/Z, [x20]\n"
+ "ld1b { z18.s }, p0/Z, [x19]\n"
"add z18.h, p0/M, z18.h, z25.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z17.s }, p0/Z, [x20]\n"
+ "ld1b { z17.s }, p0/Z, [x19]\n"
"add z17.h, p0/M, z17.h, z25.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z16.s }, p0/Z, [x20]\n"
+ "ld1b { z16.s }, p0/Z, [x19]\n"
"add z16.h, p0/M, z16.h, z25.h\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"trn1 z27.h, z19.h, z18.h\n"
"trn1 z28.h, z17.h, z16.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z19.s }, p0/Z, [x20]\n"
+ "ld1b { z19.s }, p0/Z, [x19]\n"
"add z19.h, p0/M, z19.h, z25.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z18.s }, p0/Z, [x20]\n"
+ "ld1b { z18.s }, p0/Z, [x19]\n"
"add z18.h, p0/M, z18.h, z25.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z17.s }, p0/Z, [x20]\n"
+ "ld1b { z17.s }, p0/Z, [x19]\n"
"add z17.h, p0/M, z17.h, z25.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z16.s }, p0/Z, [x20]\n"
+ "ld1b { z16.s }, p0/Z, [x19]\n"
"add z16.h, p0/M, z16.h, z25.h\n"
- "sub x17, x17, #0x1\n"
- "sub x15, x15, #0x1\n"
- "cmp x17, x15\n"
+ "sub x16, x16, #0x1\n"
+ "sub x14, x14, #0x1\n"
+ "cmp x16, x14\n"
"trn1 z29.h, z19.h, z18.h\n"
"trn1 z30.h, z17.h, z16.h\n"
- "csel x25, x17, x15, LT\n"
- "add x16, x16, %x[ld_in_col]\n"
- "sub x15, x15, x25\n"
- "cbz x25, 21f\n"
+ "csel x24, x16, x14, LT\n"
+ "add x15, x15, %x[ld_in_col]\n"
+ "sub x14, x14, x24\n"
+ "cbz x24, 21f\n"
"20:" // Padded: Main loop
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z23.s }, p0/Z, [x16]\n"
+ "ld1b { z23.s }, p0/Z, [x15]\n"
"add z23.h, p0/M, z23.h, z25.h\n"
- "add x24, x16, %x[ld_in_row]\n"
+ "add x23, x15, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z22.s }, p0/Z, [x24]\n"
+ "ld1b { z22.s }, p0/Z, [x23]\n"
".inst 0xc1687768 // sdot za.s[x11, 0], { z27.h-z28.h }, z8.h\n"
- "addvl x23, SP, #6\n"
+ "addvl x22, SP, #6\n"
".inst 0xc1607769 // sdot za.s[x11, 1], { z27.h-z28.h }, z0.h\n"
- ".inst 0xa1402ae0 // ld1h { z0.h, z8.h }, pn10.b/Z, [x23]\n"
- "addvl x22, SP, #12\n"
+ ".inst 0xa1402ac0 // ld1h { z0.h, z8.h }, pn10.b/Z, [x22]\n"
+ "addvl x21, SP, #12\n"
"add z22.h, p0/M, z22.h, z25.h\n"
- "add x24, x24, %x[ld_in_row]\n"
+ "add x23, x23, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
".inst 0xc168776a // sdot za.s[x11, 2], { z27.h-z28.h }, z8.h\n"
".inst 0xc160776b // sdot za.s[x11, 3], { z27.h-z28.h }, z0.h\n"
- ".inst 0xa1402ac0 // ld1h { z0.h, z8.h }, pn10.b/Z, [x22]\n"
- "addvl x21, SP, #18\n"
- "addvl x20, SP, #24\n"
- "ld1b { z21.s }, p0/Z, [x24]\n"
+ ".inst 0xa1402aa0 // ld1h { z0.h, z8.h }, pn10.b/Z, [x21]\n"
+ "addvl x20, SP, #18\n"
+ "addvl x19, SP, #24\n"
+ "ld1b { z21.s }, p0/Z, [x23]\n"
".inst 0xc1657788 // sdot za.s[x11, 0], { z28.h-z29.h }, z5.h\n"
"add z21.h, p0/M, z21.h, z25.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
".inst 0xc1647789 // sdot za.s[x11, 1], { z28.h-z29.h }, z4.h\n"
- ".inst 0xa0412ae4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x23, #0x2, MUL VL]\n"
+ ".inst 0xa0412ac4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
"mov x12, #0x4\n"
- "add x24, x24, %x[ld_in_row]\n"
+ "add x23, x23, %x[ld_in_row]\n"
".inst 0xc168776c // sdot za.s[x11, 4], { z27.h-z28.h }, z8.h\n"
- "ld1b { z20.s }, p0/Z, [x24]\n"
+ "ld1b { z20.s }, p0/Z, [x23]\n"
"add z20.h, p0/M, z20.h, z25.h\n"
- "add x24, x24, %x[ld_in_row]\n"
+ "add x23, x23, %x[ld_in_row]\n"
".inst 0xc160776d // sdot za.s[x11, 5], { z27.h-z28.h }, z0.h\n"
- ".inst 0xa1402aa0 // ld1h { z0.h, z8.h }, pn10.b/Z, [x21]\n"
+ ".inst 0xa1402a80 // ld1h { z0.h, z8.h }, pn10.b/Z, [x20]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "subs x25, x25, #0x1\n"
+ "subs x24, x24, #0x1\n"
".inst 0xc165778a // sdot za.s[x11, 2], { z28.h-z29.h }, z5.h\n"
- "ld1b { z19.s }, p0/Z, [x24]\n"
+ "ld1b { z19.s }, p0/Z, [x23]\n"
"add z19.h, p0/M, z19.h, z25.h\n"
- "add x24, x24, %x[ld_in_row]\n"
+ "add x23, x23, %x[ld_in_row]\n"
".inst 0xc164778b // sdot za.s[x11, 3], { z28.h-z29.h }, z4.h\n"
- ".inst 0xa0412ac4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
+ ".inst 0xa0412aa4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "add x16, x16, %x[ld_in_col]\n"
+ "add x15, x15, %x[ld_in_col]\n"
".inst 0xc16b77a8 // sdot za.s[x11, 0], { z29.h-z30.h }, z11.h\n"
- "ld1b { z18.s }, p0/Z, [x24]\n"
+ "ld1b { z18.s }, p0/Z, [x23]\n"
"add z18.h, p0/M, z18.h, z25.h\n"
- "add x24, x24, %x[ld_in_row]\n"
+ "add x23, x23, %x[ld_in_row]\n"
".inst 0xc16a77a9 // sdot za.s[x11, 1], { z29.h-z30.h }, z10.h\n"
- ".inst 0xa0422aea // ld1h { z10.h-z11.h }, pn10.b/Z, [x23, #0x4, MUL VL]\n"
+ ".inst 0xa0422aca // ld1h { z10.h-z11.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
".inst 0xc168776e // sdot za.s[x11, 6], { z27.h-z28.h }, z8.h\n"
- "ld1b { z17.s }, p0/Z, [x24]\n"
+ "ld1b { z17.s }, p0/Z, [x23]\n"
"add z17.h, p0/M, z17.h, z25.h\n"
- "add x24, x24, %x[ld_in_row]\n"
+ "add x23, x23, %x[ld_in_row]\n"
".inst 0xc160776f // sdot za.s[x11, 7], { z27.h-z28.h }, z0.h\n"
- ".inst 0xa1402a80 // ld1h { z0.h, z8.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xa1402a60 // ld1h { z0.h, z8.h }, pn10.b/Z, [x19]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
".inst 0xc165778c // sdot za.s[x11, 4], { z28.h-z29.h }, z5.h\n"
- "ld1b { z16.s }, p0/Z, [x24]\n"
+ "ld1b { z16.s }, p0/Z, [x23]\n"
"add z16.h, p0/M, z16.h, z25.h\n"
".inst 0xc164778d // sdot za.s[x11, 5], { z28.h-z29.h }, z4.h\n"
- ".inst 0xa0412aa4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
+ ".inst 0xa0412a84 // ld1h { z4.h-z5.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
".inst 0xc16b77aa // sdot za.s[x11, 2], { z29.h-z30.h }, z11.h\n"
".inst 0xc16a77ab // sdot za.s[x11, 3], { z29.h-z30.h }, z10.h\n"
- ".inst 0xa0422aca // ld1h { z10.h-z11.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
+ ".inst 0xa0422aaa // ld1h { z10.h-z11.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
".inst 0xc165778e // sdot za.s[x11, 6], { z28.h-z29.h }, z5.h\n"
".inst 0xc164778f // sdot za.s[x11, 7], { z28.h-z29.h }, z4.h\n"
- ".inst 0xa0412a84 // ld1h { z4.h-z5.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ ".inst 0xa0412a64 // ld1h { z4.h-z5.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
".inst 0xc16b77ac // sdot za.s[x11, 4], { z29.h-z30.h }, z11.h\n"
".inst 0xc16a77ad // sdot za.s[x11, 5], { z29.h-z30.h }, z10.h\n"
- ".inst 0xa0422aaa // ld1h { z10.h-z11.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
+ ".inst 0xa0422a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
".inst 0xc16b77ae // sdot za.s[x11, 6], { z29.h-z30.h }, z11.h\n"
".inst 0xc16a77af // sdot za.s[x11, 7], { z29.h-z30.h }, z10.h\n"
- ".inst 0xa0422a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+ ".inst 0xa0422a6a // ld1h { z10.h-z11.h }, pn10.b/Z, [x19, #0x4, MUL VL]\n"
".inst 0xc1681768 // sdot za.s[x8, 0], { z27.h-z28.h }, z8.h\n"
".inst 0xc1601769 // sdot za.s[x8, 1], { z27.h-z28.h }, z0.h\n"
".inst 0xa1402be0 // ld1h { z0.h, z8.h }, pn10.b/Z, [SP]\n"
@@ -1069,56 +1069,56 @@ void sme2_u8q_planar_5x5_s1_4rows_dot_za_impl(
".inst 0xc1a9ab0c // add { z12.s-z15.s }, { z12.s-z15.s }, z9.s\n"
".inst 0xc00408c1 // mova za.d[x8, #1], { z6.d-z7.d }\n"
".inst 0xc1bfcf0c // sclamp { z12.s-z15.s }, z24.s, z31.s\n"
- "st1b { z12.s }, p1, [x14]\n"
- "add x14, x14, x3\n"
- "st1b { z14.s }, p1, [x13]\n"
+ "st1b { z12.s }, p1, [x13]\n"
"add x13, x13, x10\n"
- "st1b { z13.s }, p1, [x9]\n"
- "add x9, x9, x27\n"
- "st1b { z15.s }, p1, [x28]\n"
+ "st1b { z14.s }, p1, [x4]\n"
+ "add x4, x4, x9\n"
+ "st1b { z13.s }, p1, [x28]\n"
"add x28, x28, x26\n"
+ "st1b { z15.s }, p1, [x27]\n"
+ "add x27, x27, x25\n"
"bgt 20b\n"
"21:" // Main loop tail
- "addvl x23, SP, #6\n"
+ "addvl x22, SP, #6\n"
".inst 0xc1687768 // sdot za.s[x11, 0], { z27.h-z28.h }, z8.h\n"
- "addvl x22, SP, #12\n"
+ "addvl x21, SP, #12\n"
".inst 0xc1607769 // sdot za.s[x11, 1], { z27.h-z28.h }, z0.h\n"
- ".inst 0xa1402ae0 // ld1h { z0.h, z8.h }, pn10.b/Z, [x23]\n"
- "addvl x21, SP, #18\n"
- "addvl x20, SP, #24\n"
+ ".inst 0xa1402ac0 // ld1h { z0.h, z8.h }, pn10.b/Z, [x22]\n"
+ "addvl x20, SP, #18\n"
+ "addvl x19, SP, #24\n"
".inst 0xc168776a // sdot za.s[x11, 2], { z27.h-z28.h }, z8.h\n"
".inst 0xc160776b // sdot za.s[x11, 3], { z27.h-z28.h }, z0.h\n"
- ".inst 0xa1402ac0 // ld1h { z0.h, z8.h }, pn10.b/Z, [x22]\n"
+ ".inst 0xa1402aa0 // ld1h { z0.h, z8.h }, pn10.b/Z, [x21]\n"
".inst 0xc1657788 // sdot za.s[x11, 0], { z28.h-z29.h }, z5.h\n"
".inst 0xc1647789 // sdot za.s[x11, 1], { z28.h-z29.h }, z4.h\n"
- ".inst 0xa0412ae4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x23, #0x2, MUL VL]\n"
+ ".inst 0xa0412ac4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
".inst 0xc168776c // sdot za.s[x11, 4], { z27.h-z28.h }, z8.h\n"
".inst 0xc160776d // sdot za.s[x11, 5], { z27.h-z28.h }, z0.h\n"
- ".inst 0xa1402aa0 // ld1h { z0.h, z8.h }, pn10.b/Z, [x21]\n"
+ ".inst 0xa1402a80 // ld1h { z0.h, z8.h }, pn10.b/Z, [x20]\n"
".inst 0xc165778a // sdot za.s[x11, 2], { z28.h-z29.h }, z5.h\n"
".inst 0xc164778b // sdot za.s[x11, 3], { z28.h-z29.h }, z4.h\n"
- ".inst 0xa0412ac4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
+ ".inst 0xa0412aa4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
".inst 0xc16b77a8 // sdot za.s[x11, 0], { z29.h-z30.h }, z11.h\n"
".inst 0xc16a77a9 // sdot za.s[x11, 1], { z29.h-z30.h }, z10.h\n"
- ".inst 0xa0422aea // ld1h { z10.h-z11.h }, pn10.b/Z, [x23, #0x4, MUL VL]\n"
+ ".inst 0xa0422aca // ld1h { z10.h-z11.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
".inst 0xc168776e // sdot za.s[x11, 6], { z27.h-z28.h }, z8.h\n"
".inst 0xc160776f // sdot za.s[x11, 7], { z27.h-z28.h }, z0.h\n"
- ".inst 0xa1402a80 // ld1h { z0.h, z8.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xa1402a60 // ld1h { z0.h, z8.h }, pn10.b/Z, [x19]\n"
".inst 0xc165778c // sdot za.s[x11, 4], { z28.h-z29.h }, z5.h\n"
".inst 0xc164778d // sdot za.s[x11, 5], { z28.h-z29.h }, z4.h\n"
- ".inst 0xa0412aa4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
+ ".inst 0xa0412a84 // ld1h { z4.h-z5.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
".inst 0xc16b77aa // sdot za.s[x11, 2], { z29.h-z30.h }, z11.h\n"
".inst 0xc16a77ab // sdot za.s[x11, 3], { z29.h-z30.h }, z10.h\n"
- ".inst 0xa0422aca // ld1h { z10.h-z11.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
+ ".inst 0xa0422aaa // ld1h { z10.h-z11.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
".inst 0xc165778e // sdot za.s[x11, 6], { z28.h-z29.h }, z5.h\n"
".inst 0xc164778f // sdot za.s[x11, 7], { z28.h-z29.h }, z4.h\n"
- ".inst 0xa0412a84 // ld1h { z4.h-z5.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ ".inst 0xa0412a64 // ld1h { z4.h-z5.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
".inst 0xc16b77ac // sdot za.s[x11, 4], { z29.h-z30.h }, z11.h\n"
".inst 0xc16a77ad // sdot za.s[x11, 5], { z29.h-z30.h }, z10.h\n"
- ".inst 0xa0422aaa // ld1h { z10.h-z11.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
+ ".inst 0xa0422a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
".inst 0xc16b77ae // sdot za.s[x11, 6], { z29.h-z30.h }, z11.h\n"
".inst 0xc16a77af // sdot za.s[x11, 7], { z29.h-z30.h }, z10.h\n"
- ".inst 0xa0422a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+ ".inst 0xa0422a6a // ld1h { z10.h-z11.h }, pn10.b/Z, [x19, #0x4, MUL VL]\n"
".inst 0xc1681768 // sdot za.s[x8, 0], { z27.h-z28.h }, z8.h\n"
".inst 0xc1601769 // sdot za.s[x8, 1], { z27.h-z28.h }, z0.h\n"
".inst 0xc1651788 // sdot za.s[x8, 0], { z28.h-z29.h }, z5.h\n"
@@ -1135,20 +1135,20 @@ void sme2_u8q_planar_5x5_s1_4rows_dot_za_impl(
".inst 0xc1a9ab0c // add { z12.s-z15.s }, { z12.s-z15.s }, z9.s\n"
".inst 0xc00408c1 // mova za.d[x8, #1], { z6.d-z7.d }\n"
".inst 0xc1bfcf0c // sclamp { z12.s-z15.s }, z24.s, z31.s\n"
- "st1b { z12.s }, p1, [x14]\n"
- "add x14, x14, x3\n"
- "st1b { z14.s }, p1, [x13]\n"
+ "st1b { z12.s }, p1, [x13]\n"
"add x13, x13, x10\n"
- "st1b { z13.s }, p1, [x9]\n"
- "add x9, x9, x27\n"
- "st1b { z15.s }, p1, [x28]\n"
+ "st1b { z14.s }, p1, [x4]\n"
+ "add x4, x4, x9\n"
+ "st1b { z13.s }, p1, [x28]\n"
"add x28, x28, x26\n"
+ "st1b { z15.s }, p1, [x27]\n"
+ "add x27, x27, x25\n"
"22:" // Main loop skip tail
- "cbz x15, 24f\n"
+ "cbz x14, 24f\n"
"23:" // Right padding loop
".inst 0xc006680c // mova { z12.d-z13.d }, za.d[x11, #0]\n"
"add x8, x8, #0x2\n"
- "subs x15, x15, #0x1\n"
+ "subs x14, x14, #0x1\n"
".inst 0xc006682e // mova { z14.d-z15.d }, za.d[x11, #1]\n"
".inst 0xc1a3ac0c // sqdmulh { z12.s-z15.s }, { z12.s-z15.s }, z3.s\n"
"add x11, x11, #0x2\n"
@@ -1157,44 +1157,44 @@ void sme2_u8q_planar_5x5_s1_4rows_dot_za_impl(
".inst 0xc1a9ab0c // add { z12.s-z15.s }, { z12.s-z15.s }, z9.s\n"
".inst 0xc00408c1 // mova za.d[x8, #1], { z6.d-z7.d }\n"
".inst 0xc1bfcf0c // sclamp { z12.s-z15.s }, z24.s, z31.s\n"
- "st1b { z12.s }, p1, [x14]\n"
- "add x14, x14, x3\n"
- "st1b { z14.s }, p1, [x13]\n"
+ "st1b { z12.s }, p1, [x13]\n"
"add x13, x13, x10\n"
- "st1b { z13.s }, p1, [x9]\n"
- "add x9, x9, x27\n"
- "st1b { z15.s }, p1, [x28]\n"
+ "st1b { z14.s }, p1, [x4]\n"
+ "add x4, x4, x9\n"
+ "st1b { z13.s }, p1, [x28]\n"
"add x28, x28, x26\n"
+ "st1b { z15.s }, p1, [x27]\n"
+ "add x27, x27, x25\n"
"bgt 23b\n"
"24:" // End
"ldr x23, [%x[args], %[offsetof_Args_weights]]\n"
"incw x23, ALL, MUL #16\n"
"incw x23, ALL, MUL #9\n"
"str x23, [%x[args], %[offsetof_Args_weights]]\n"
- "ldr x20, [%x[args], %[offsetof_Args_ld_in_vl]]\n"
- "incw x5\n"
- "whilelt p1.s, x5, x7\n"
- "ldr x16, [%x[args], %[offsetof_Args_inptr]]\n"
- "add x16, x16, x20\n"
- "str x16, [%x[args], %[offsetof_Args_inptr]]\n"
- "ldr x25, [%x[args], %[offsetof_Args_outptrs]]\n"
- "ldr x24, [%x[args], %[offsetof_Args_ld_out_vls]]\n"
- "ldp x23, x22, [x25, #0x0]\n"
- "ldp x21, x20, [x24, #0x0]\n"
- "add x23, x23, x21\n"
+ "ldr x19, [%x[args], %[offsetof_Args_ld_in_vl]]\n"
+ "incw x17\n"
+ "whilelt p1.s, x17, x7\n"
+ "ldr x15, [%x[args], %[offsetof_Args_inptr]]\n"
+ "add x15, x15, x19\n"
+ "str x15, [%x[args], %[offsetof_Args_inptr]]\n"
+ "ldr x24, [%x[args], %[offsetof_Args_outptrs]]\n"
+ "ldr x23, [%x[args], %[offsetof_Args_ld_out_vls]]\n"
+ "ldp x22, x21, [x24, #0x0]\n"
+ "ldp x20, x19, [x23, #0x0]\n"
"add x22, x22, x20\n"
- "stp x23, x22, [x25, #0x0]\n"
- "ldp x23, x22, [x25, #0x10]\n"
- "ldp x21, x20, [x24, #0x10]\n"
- "add x23, x23, x21\n"
+ "add x21, x21, x19\n"
+ "stp x22, x21, [x24, #0x0]\n"
+ "ldp x22, x21, [x24, #0x10]\n"
+ "ldp x20, x19, [x23, #0x10]\n"
"add x22, x22, x20\n"
- "stp x23, x22, [x25, #0x10]\n"
+ "add x21, x21, x19\n"
+ "stp x22, x21, [x24, #0x10]\n"
"b.any 1b\n"
"addvl SP, SP, #30\n"
".inst 0xd503467f // SMSTOP\n"
:
: [args] "r" (&args), [ld_in_col] "r" (ld_in_col), [ld_in_row] "r" (ld_in_row), [offsetof_Args_current_channel] "I" (offsetof(Args, current_channel)), [offsetof_Args_inptr] "I" (offsetof(Args, inptr)), [offsetof_Args_input_cols] "I" (offsetof(Args, input_cols)), [offsetof_Args_ld_in_vl] "I" (offsetof(Args, ld_in_vl)), [offsetof_Args_ld_out_cols] "I" (offsetof(Args, ld_out_cols)), [offsetof_Args_ld_out_vls] "I" (offsetof(Args, ld_out_vls)), [offsetof_Args_n_channels] "I" (offsetof(Args, n_channels)), [offsetof_Args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_Args_output_cols] "I" (offsetof(Args, output_cols)), [offsetof_Args_pad_bottom] "I" (offsetof(Args, pad_bottom)), [offsetof_Args_pad_left] "I" (offsetof(Args, pad_left)), [offsetof_Args_pad_top] "I" (offsetof(Args, pad_top)), [offsetof_Args_weights] "I" (offsetof(Args, weights)), [offsetof_Requantize32_a_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_bias] "I" (offsetof(arm_gemm::Requantize32, bias)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [offsetof_Requantize32_per_channel_muls] "I" (offsetof(arm_gemm::Requantize32, per_channel_muls)), [offsetof_Requantize32_per_channel_right_shifts] "I" (offsetof(arm_gemm::Requantize32, per_channel_right_shifts)), [offsetof_Requantize32_per_layer_mul] "I" (offsetof(arm_gemm::Requantize32, per_layer_mul)), [offsetof_Requantize32_per_layer_right_shift] "I" (offsetof(arm_gemm::Requantize32, per_layer_right_shift)), [qp] "r" (&qp)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8q_planar_5x5_s2_4rows_dot_za/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8q_planar_5x5_s2_4rows_dot_za/generic.cpp
index 468e6778a4..84e8c8bea8 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8q_planar_5x5_s2_4rows_dot_za/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8q_planar_5x5_s2_4rows_dot_za/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -69,20 +69,20 @@ void sme2_u8q_planar_5x5_s2_4rows_dot_za_impl(
__asm__ __volatile__(
".inst 0xd503477f // SMSTART ZA\n"
- "ldr x3, [%x[args], %[offsetof_Args_pad_bottom]]\n"
+ "ldr x4, [%x[args], %[offsetof_Args_pad_bottom]]\n"
"ptrue p2.b\n"
- "mov x20, #0xb\n"
- "ldr x4, [%x[args], %[offsetof_Args_pad_top]]\n"
+ "mov x19, #0xb\n"
+ "ldr x5, [%x[args], %[offsetof_Args_pad_top]]\n"
"ld1rh { z9.h }, p2/Z, [%x[qp], %[offsetof_Requantize32_a_offset]]\n"
- "sub x20, x20, x3\n"
+ "sub x19, x19, x4\n"
".inst 0x25207812 // ptrue pn10.b\n"
- "ldr x5, [%x[args], %[offsetof_Args_n_channels]]\n"
- "whilelt p1.s, XZR, x5\n"
- "whilelt p9.s, XZR, x20\n"
+ "ldr x6, [%x[args], %[offsetof_Args_n_channels]]\n"
+ "whilelt p1.s, XZR, x6\n"
+ "whilelt p9.s, XZR, x19\n"
"ld1rw { z8.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_c_offset]]\n"
- "whilelt p8.s, XZR, x4\n"
+ "whilelt p8.s, XZR, x5\n"
"addvl SP, SP, #-15\n"
- "ldr x6, [%x[args], %[offsetof_Args_current_channel]]\n"
+ "ldr x7, [%x[args], %[offsetof_Args_current_channel]]\n"
"neg z9.h, p2/M, z9.h\n"
"eor p8.b, p2/Z, p8.b, p9.b\n"
"ld1rw { z3.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_per_layer_mul]]\n"
@@ -90,227 +90,310 @@ void sme2_u8q_planar_5x5_s2_4rows_dot_za_impl(
"ld1rw { z26.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_minval]]\n"
"ld1rw { z23.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_maxval]]\n"
"1:" // Channel loop
- "ldr x20, [%x[qp], %[offsetof_Requantize32_bias]]\n"
+ "ldr x19, [%x[qp], %[offsetof_Requantize32_bias]]\n"
"mov z28.s, #0x0\n"
- "cbz x20, 2f\n"
- "ld1w { z28.s }, p1/Z, [x20, x6, LSL #2]\n"
+ "cbz x19, 2f\n"
+ "ld1w { z28.s }, p1/Z, [x19, x7, LSL #2]\n"
"2:" // Load bias: Done
- "ldr x22, [%x[args], %[offsetof_Args_weights]]\n"
- "mov x20, x22\n"
- "ld1b { z12.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #5\n"
+ "ldr x21, [%x[args], %[offsetof_Args_weights]]\n"
+ "mov x19, x21\n"
+ "ld1b { z12.s }, p2/Z, [x19]\n"
+ "incw x19, ALL, MUL #5\n"
"ld1rh { z18.h }, p2/Z, [%x[qp], %[offsetof_Requantize32_b_offset]]\n"
"sub z12.h, z12.h, z18.h\n"
- "incw x22\n"
+ "incw x21\n"
"mov z14.h, #0x0\n"
- "ld1b { z25.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #5\n"
+ "ld1b { z25.s }, p2/Z, [x19]\n"
+ "incw x19, ALL, MUL #5\n"
"sub z25.h, z25.h, z18.h\n"
"trn1 z2.h, z12.h, z25.h\n"
- "ld1b { z24.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #5\n"
+ "ld1b { z24.s }, p2/Z, [x19]\n"
+ "incw x19, ALL, MUL #5\n"
"sub z24.h, z24.h, z18.h\n"
- "addvl x21, SP, #15\n"
- "ld1b { z17.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #5\n"
+ "addvl x20, SP, #15\n"
+ "ld1b { z17.s }, p2/Z, [x19]\n"
+ "incw x19, ALL, MUL #5\n"
"sub z17.h, z17.h, z18.h\n"
"trn1 z10.h, z24.h, z17.h\n"
- "ld1b { z16.s }, p2/Z, [x20]\n"
- "mov x20, x22\n"
+ "ld1b { z16.s }, p2/Z, [x19]\n"
+ "mov x19, x21\n"
"sub z16.h, z16.h, z18.h\n"
- "incw x22\n"
- "ld1b { z12.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #5\n"
+ "incw x21\n"
+ "ld1b { z12.s }, p2/Z, [x19]\n"
+ "incw x19, ALL, MUL #5\n"
"sub z12.h, z12.h, z18.h\n"
- "addvl x21, x21, #-3\n"
- "ld1b { z25.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #5\n"
+ "addvl x20, x20, #-3\n"
+ "ld1b { z25.s }, p2/Z, [x19]\n"
+ "incw x19, ALL, MUL #5\n"
"sub z25.h, z25.h, z18.h\n"
"trn1 z0.h, z16.h, z14.h\n"
- "ld1b { z24.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #5\n"
+ "ld1b { z24.s }, p2/Z, [x19]\n"
+ "incw x19, ALL, MUL #5\n"
"sub z24.h, z24.h, z18.h\n"
- "st1h { z2.h }, p2, [x21]\n"
- "ld1b { z17.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #5\n"
+ "st1h { z2.h }, p2, [x20]\n"
+ "ld1b { z17.s }, p2/Z, [x19]\n"
+ "incw x19, ALL, MUL #5\n"
"sub z17.h, z17.h, z18.h\n"
"trn1 z2.h, z12.h, z25.h\n"
- "ld1b { z16.s }, p2/Z, [x20]\n"
- "mov x20, x22\n"
- "st1h { z10.h }, p2, [x21, #1, MUL VL]\n"
+ "ld1b { z16.s }, p2/Z, [x19]\n"
+ "mov x19, x21\n"
+ "st1h { z10.h }, p2, [x20, #1, MUL VL]\n"
"sub z16.h, z16.h, z18.h\n"
- "ld1b { z12.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #5\n"
+ "ld1b { z12.s }, p2/Z, [x19]\n"
+ "incw x19, ALL, MUL #5\n"
"trn1 z10.h, z24.h, z17.h\n"
"sub z12.h, z12.h, z18.h\n"
- "ld1b { z25.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #5\n"
+ "ld1b { z25.s }, p2/Z, [x19]\n"
+ "incw x19, ALL, MUL #5\n"
"sub z25.h, z25.h, z18.h\n"
- "st1h { z0.h }, p2, [x21, #2, MUL VL]\n"
- "ld1b { z24.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #5\n"
+ "st1h { z0.h }, p2, [x20, #2, MUL VL]\n"
+ "ld1b { z24.s }, p2/Z, [x19]\n"
+ "incw x19, ALL, MUL #5\n"
"trn1 z0.h, z16.h, z14.h\n"
- "incw x22\n"
- "ld1b { z17.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #5\n"
+ "incw x21\n"
+ "ld1b { z17.s }, p2/Z, [x19]\n"
+ "incw x19, ALL, MUL #5\n"
"sub z24.h, z24.h, z18.h\n"
"sub z17.h, z17.h, z18.h\n"
- "ld1b { z16.s }, p2/Z, [x20]\n"
- "addvl x21, x21, #-3\n"
- "mov x20, x22\n"
- "st1h { z2.h }, p2, [x21]\n"
+ "ld1b { z16.s }, p2/Z, [x19]\n"
+ "addvl x20, x20, #-3\n"
+ "mov x19, x21\n"
+ "st1h { z2.h }, p2, [x20]\n"
"trn1 z2.h, z12.h, z25.h\n"
- "ld1b { z12.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #5\n"
+ "ld1b { z12.s }, p2/Z, [x19]\n"
+ "incw x19, ALL, MUL #5\n"
"sub z16.h, z16.h, z18.h\n"
- "ld1b { z25.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #5\n"
- "st1h { z10.h }, p2, [x21, #1, MUL VL]\n"
+ "ld1b { z25.s }, p2/Z, [x19]\n"
+ "incw x19, ALL, MUL #5\n"
+ "st1h { z10.h }, p2, [x20, #1, MUL VL]\n"
"trn1 z10.h, z24.h, z17.h\n"
- "ld1b { z24.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #5\n"
+ "ld1b { z24.s }, p2/Z, [x19]\n"
+ "incw x19, ALL, MUL #5\n"
"sub z12.h, z12.h, z18.h\n"
"sub z25.h, z25.h, z18.h\n"
- "ld1b { z17.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #5\n"
- "st1h { z0.h }, p2, [x21, #2, MUL VL]\n"
+ "ld1b { z17.s }, p2/Z, [x19]\n"
+ "incw x19, ALL, MUL #5\n"
+ "st1h { z0.h }, p2, [x20, #2, MUL VL]\n"
"trn1 z0.h, z16.h, z14.h\n"
- "ld1b { z16.s }, p2/Z, [x20]\n"
- "incw x22\n"
+ "ld1b { z16.s }, p2/Z, [x19]\n"
+ "incw x21\n"
"sub z24.h, z24.h, z18.h\n"
"sub z17.h, z17.h, z18.h\n"
- "addvl x21, x21, #-3\n"
- "mov x20, x22\n"
- "st1h { z2.h }, p2, [x21]\n"
+ "addvl x20, x20, #-3\n"
+ "mov x19, x21\n"
+ "st1h { z2.h }, p2, [x20]\n"
"sub z16.h, z16.h, z18.h\n"
"trn1 z2.h, z12.h, z25.h\n"
- "ld1b { z12.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #5\n"
- "st1h { z10.h }, p2, [x21, #1, MUL VL]\n"
- "ld1b { z25.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #5\n"
+ "ld1b { z12.s }, p2/Z, [x19]\n"
+ "incw x19, ALL, MUL #5\n"
+ "st1h { z10.h }, p2, [x20, #1, MUL VL]\n"
+ "ld1b { z25.s }, p2/Z, [x19]\n"
+ "incw x19, ALL, MUL #5\n"
"trn1 z10.h, z24.h, z17.h\n"
- "st1h { z0.h }, p2, [x21, #2, MUL VL]\n"
- "ld1b { z24.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #5\n"
+ "st1h { z0.h }, p2, [x20, #2, MUL VL]\n"
+ "ld1b { z24.s }, p2/Z, [x19]\n"
+ "incw x19, ALL, MUL #5\n"
"trn1 z0.h, z16.h, z14.h\n"
"sub z12.h, z12.h, z18.h\n"
- "ld1b { z17.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #5\n"
+ "ld1b { z17.s }, p2/Z, [x19]\n"
+ "incw x19, ALL, MUL #5\n"
"sub z25.h, z25.h, z18.h\n"
"sub z24.h, z24.h, z18.h\n"
- "ld1b { z16.s }, p2/Z, [x20]\n"
+ "ld1b { z16.s }, p2/Z, [x19]\n"
"sub z17.h, z17.h, z18.h\n"
"sub z16.h, z16.h, z18.h\n"
- "ldr x20, [%x[qp], %[offsetof_Requantize32_per_channel_muls]]\n"
- "addvl x21, x21, #-3\n"
- "st1h { z2.h }, p2, [x21]\n"
+ "ldr x19, [%x[qp], %[offsetof_Requantize32_per_channel_muls]]\n"
+ "addvl x20, x20, #-3\n"
+ "st1h { z2.h }, p2, [x20]\n"
"mov z29.d, z28.d\n"
"mov z30.d, z28.d\n"
- "st1h { z10.h }, p2, [x21, #1, MUL VL]\n"
+ "st1h { z10.h }, p2, [x20, #1, MUL VL]\n"
"mov z31.d, z28.d\n"
"trn1 z2.h, z12.h, z25.h\n"
- "st1h { z0.h }, p2, [x21, #2, MUL VL]\n"
- "addvl x21, x21, #-3\n"
+ "st1h { z0.h }, p2, [x20, #2, MUL VL]\n"
+ "addvl x20, x20, #-3\n"
"trn1 z10.h, z24.h, z17.h\n"
"trn1 z0.h, z16.h, z14.h\n"
- "st1h { z2.h }, p2, [x21]\n"
- "st1h { z10.h }, p2, [x21, #1, MUL VL]\n"
- "st1h { z0.h }, p2, [x21, #2, MUL VL]\n"
- "cbz x20, 3f\n"
- "ld1w { z3.s }, p1/Z, [x20, x6, LSL #2]\n"
+ "st1h { z2.h }, p2, [x20]\n"
+ "st1h { z10.h }, p2, [x20, #1, MUL VL]\n"
+ "st1h { z0.h }, p2, [x20, #2, MUL VL]\n"
+ "cbz x19, 3f\n"
+ "ld1w { z3.s }, p1/Z, [x19, x7, LSL #2]\n"
"3:" // Load mul: End
- "ldr x20, [%x[qp], %[offsetof_Requantize32_per_channel_right_shifts]]\n"
- "cbz x20, 4f\n"
- "ld1w { z1.s }, p1/Z, [x20, x6, LSL #2]\n"
+ "ldr x19, [%x[qp], %[offsetof_Requantize32_per_channel_right_shifts]]\n"
+ "cbz x19, 4f\n"
+ "ld1w { z1.s }, p1/Z, [x19, x7, LSL #2]\n"
"4:" // Load right_shift: End
- "ldr x7, [%x[args], %[offsetof_Args_input_cols]]\n"
- "sub x20, x7, #0x1\n"
- "orr x23, x20, %x[ld_in_col], LSL #16\n"
- "ldr x17, [%x[args], %[offsetof_Args_inptr]]\n"
- "orr x23, x5, x23, LSL #22\n"
- "mov x22, #0xb\n"
- "add x21, x4, x3\n"
- "lsl x20, %x[ld_in_row], #0x0\n"
- "ldr x16, [%x[args], %[offsetof_Args_output_cols]]\n"
+ "ldr x17, [%x[args], %[offsetof_Args_input_cols]]\n"
+ "sub x19, x17, #0x1\n"
+ "orr x22, x19, %x[ld_in_col], LSL #16\n"
+ "ldr x16, [%x[args], %[offsetof_Args_inptr]]\n"
+ "orr x22, x6, x22, LSL #22\n"
+ "mov x21, #0xb\n"
+ "add x20, x5, x4\n"
+ "lsl x19, %x[ld_in_row], #0x0\n"
+ "ldr x15, [%x[args], %[offsetof_Args_output_cols]]\n"
"mov x8, #0x0\n"
- "lsl x23, x23, #0x0\n"
- "sub x22, x22, x21\n"
- "madd x20, x20, x4, x17\n"
+ "lsl x22, x22, #0x0\n"
+ "sub x21, x21, x20\n"
+ "madd x19, x19, x5, x16\n"
"5:" // Issue prefetches
- "subs x22, x22, #0x1\n"
- ".inst 0xf8b74a9c // rprfm pldstrm, x23, [x20]\n"
- "add x20, x20, %x[ld_in_col]\n"
+ "subs x21, x21, #0x1\n"
+ ".inst 0xf8b64a7c // rprfm pldstrm, x22, [x19]\n"
+ "add x19, x19, %x[ld_in_col]\n"
"bgt 5b\n"
- "ldr x25, [%x[args], %[offsetof_Args_outptrs]]\n"
- "lsl x20, %x[ld_in_row], #0x0\n"
- "msub x17, x4, x20, x17\n"
+ "ldr x24, [%x[args], %[offsetof_Args_outptrs]]\n"
+ "lsl x19, %x[ld_in_row], #0x0\n"
+ "msub x16, x5, x19, x16\n"
".inst 0xc0040f80 // mova za.d[x8, #0], { z28.d-z31.d }\n"
- "ldr x20, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
+ "ldr x19, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
".inst 0xc0040f81 // mova za.d[x8, #1], { z28.d-z31.d }\n"
- "mov x22, #0x4\n"
- "ldp x15, x14, [x25], #0x10\n"
+ "mov x21, #0x4\n"
+ "ldp x14, x13, [x24], #0x10\n"
".inst 0xc0040f82 // mova za.d[x8, #2], { z28.d-z31.d }\n"
- "ldp x13, x11, [x20], #0x10\n"
+ "ldp x11, x10, [x19], #0x10\n"
".inst 0xc0040f83 // mova za.d[x8, #3], { z28.d-z31.d }\n"
- "ldr x21, [%x[args], %[offsetof_Args_pad_left]]\n"
+ "ldr x20, [%x[args], %[offsetof_Args_pad_left]]\n"
".inst 0xc0040f84 // mova za.d[x8, #4], { z28.d-z31.d }\n"
- "ldp x10, x9, [x25], #0x10\n"
- "ldp x28, x27, [x20], #0x10\n"
- "cbz x21, 7f\n"
- "cmp x21, x22\n"
- "csel x20, x21, x22, LT\n"
- "sub x21, x21, x20\n"
- "sub x22, x22, x20\n"
- "cbz x21, 7f\n"
+ "ldp x9, x28, [x24], #0x10\n"
+ "ldp x27, x26, [x19], #0x10\n"
+ "cbz x20, 7f\n"
+ "cmp x20, x21\n"
+ "csel x19, x20, x21, LT\n"
+ "sub x20, x20, x19\n"
+ "sub x21, x21, x19\n"
+ "cbz x20, 7f\n"
".inst 0xc0060c04 // mova { z4.d-z7.d }, za.d[x8, #0]\n"
".inst 0xc1a3ac04 // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z3.s\n"
- "and x22, x21, #0x1\n"
+ "and x21, x20, #0x1\n"
".inst 0xc1a1aa24 // srshl { z4.s-z7.s }, { z4.s-z7.s }, z1.s\n"
- "add x21, x21, #0x1\n"
- "lsr x21, x21, #0x1\n"
+ "add x20, x20, #0x1\n"
+ "lsr x20, x20, #0x1\n"
".inst 0xc1a8ab04 // add { z4.s-z7.s }, { z4.s-z7.s }, z8.s\n"
- "sub x16, x16, x21\n"
+ "sub x15, x15, x20\n"
".inst 0xc1b7cf44 // sclamp { z4.s-z7.s }, z26.s, z23.s\n"
"6:" // Left padding
- "subs x21, x21, #0x1\n"
- "st1b { z4.s }, p1, [x15]\n"
- "add x15, x15, x13\n"
- "st1b { z5.s }, p1, [x14]\n"
+ "subs x20, x20, #0x1\n"
+ "st1b { z4.s }, p1, [x14]\n"
"add x14, x14, x11\n"
- "st1b { z6.s }, p1, [x10]\n"
- "add x10, x10, x28\n"
- "st1b { z7.s }, p1, [x9]\n"
+ "st1b { z5.s }, p1, [x13]\n"
+ "add x13, x13, x10\n"
+ "st1b { z6.s }, p1, [x9]\n"
"add x9, x9, x27\n"
+ "st1b { z7.s }, p1, [x28]\n"
+ "add x28, x28, x26\n"
"bgt 6b\n"
"7:" // Left padding: End
- "adds XZR, x4, x3\n"
+ "adds XZR, x5, x4\n"
"bne 14f\n"
- "cbz x22, 12f\n"
- "cmp x22, #0x1\n"
- "sub x7, x7, x22\n"
+ "cbz x21, 12f\n"
+ "cmp x21, #0x1\n"
+ "sub x17, x17, x21\n"
"beq 11f\n"
- "cmp x22, #0x2\n"
+ "cmp x21, #0x2\n"
"beq 10f\n"
- "cmp x22, #0x3\n"
+ "cmp x21, #0x3\n"
"beq 9f\n"
"8:" // Unpadded: 4 priming loads
- "add x21, x17, %x[ld_in_row]\n"
- "ld1b { z11.s }, p1/Z, [x17]\n"
- "addvl x20, SP, #12\n"
+ "add x20, x16, %x[ld_in_row]\n"
+ "ld1b { z11.s }, p1/Z, [x16]\n"
+ "addvl x19, SP, #12\n"
+ "ld1b { z21.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "trn1 z11.h, z11.h, z21.h\n"
+ "add z11.h, z11.h, z9.h\n"
+ "ld1b { z12.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "add x16, x16, %x[ld_in_col]\n"
+ "ld1b { z20.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "trn1 z12.h, z12.h, z20.h\n"
+ "add z12.h, z12.h, z9.h\n"
+ "ld1b { z13.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z19.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "trn1 z13.h, z13.h, z19.h\n"
+ "add z13.h, z13.h, z9.h\n"
+ "ld1b { z14.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z18.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "trn1 z14.h, z14.h, z18.h\n"
+ "add z14.h, z14.h, z9.h\n"
+ "ld1b { z15.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z17.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "trn1 z15.h, z15.h, z17.h\n"
+ "add z15.h, z15.h, z9.h\n"
+ ".inst 0xa1402a62 // ld1h { z2.h, z10.h }, pn10.b/Z, [x19]\n"
+ ".inst 0xc1721568 // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
+ "ld1b { z16.s }, p1/Z, [x20]\n"
+ "mov z16.d, z16.d\n"
+ "add z16.h, z16.h, z9.h\n"
+ ".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
+ "ld1h { z0.h }, p2/Z, [x19, #2, MUL VL]\n"
+ ".inst 0xc17015a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
+ "9:" // Unpadded: 3 priming loads
+ "add x20, x16, %x[ld_in_row]\n"
+ "ld1b { z11.s }, p1/Z, [x16]\n"
+ "addvl x19, SP, #9\n"
+ "ld1b { z21.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "trn1 z11.h, z11.h, z21.h\n"
+ "add z11.h, z11.h, z9.h\n"
+ "ld1b { z12.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "add x16, x16, %x[ld_in_col]\n"
+ "ld1b { z20.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "trn1 z12.h, z12.h, z20.h\n"
+ "add z12.h, z12.h, z9.h\n"
+ "ld1b { z13.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z19.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "trn1 z13.h, z13.h, z19.h\n"
+ "add z13.h, z13.h, z9.h\n"
+ "ld1b { z14.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z18.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "trn1 z14.h, z14.h, z18.h\n"
+ "add z14.h, z14.h, z9.h\n"
+ "ld1b { z15.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z17.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "trn1 z15.h, z15.h, z17.h\n"
+ "add z15.h, z15.h, z9.h\n"
+ ".inst 0xa1402a62 // ld1h { z2.h, z10.h }, pn10.b/Z, [x19]\n"
+ ".inst 0xc1721568 // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
+ "ld1b { z16.s }, p1/Z, [x20]\n"
+ "mov z16.d, z16.d\n"
+ "add z16.h, z16.h, z9.h\n"
+ ".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
+ "ld1h { z0.h }, p2/Z, [x19, #2, MUL VL]\n"
+ ".inst 0xc17015a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
+ "10:" // Unpadded: 2 priming loads
+ "add x21, x16, %x[ld_in_row]\n"
+ "ld1b { z11.s }, p1/Z, [x16]\n"
+ "addvl x20, SP, #6\n"
"ld1b { z21.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
"trn1 z11.h, z11.h, z21.h\n"
"add z11.h, z11.h, z9.h\n"
"ld1b { z12.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "add x17, x17, %x[ld_in_col]\n"
+ "addvl x19, SP, #12\n"
"ld1b { z20.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
"trn1 z12.h, z12.h, z20.h\n"
"add z12.h, z12.h, z9.h\n"
"ld1b { z13.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
+ "add x16, x16, %x[ld_in_col]\n"
"ld1b { z19.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
"trn1 z13.h, z13.h, z19.h\n"
@@ -324,34 +407,40 @@ void sme2_u8q_planar_5x5_s2_4rows_dot_za_impl(
"ld1b { z15.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
"ld1b { z17.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
"trn1 z15.h, z15.h, z17.h\n"
+ "add x21, x21, %x[ld_in_row]\n"
"add z15.h, z15.h, z9.h\n"
".inst 0xa1402a82 // ld1h { z2.h, z10.h }, pn10.b/Z, [x20]\n"
".inst 0xc1721568 // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
"ld1b { z16.s }, p1/Z, [x21]\n"
"mov z16.d, z16.d\n"
- "add z16.h, z16.h, z9.h\n"
".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
+ ".inst 0xa1402a62 // ld1h { z2.h, z10.h }, pn10.b/Z, [x19]\n"
+ ".inst 0xc1721569 // sdot za.s[x8, 1], { z11.h-z14.h }, z2.h\n"
+ "add z16.h, z16.h, z9.h\n"
"ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
+ ".inst 0xc17a1589 // sdot za.s[x8, 1], { z12.h-z15.h }, z10.h\n"
".inst 0xc17015a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
- "9:" // Unpadded: 3 priming loads
- "add x21, x17, %x[ld_in_row]\n"
- "ld1b { z11.s }, p1/Z, [x17]\n"
- "addvl x20, SP, #9\n"
+ "ld1h { z0.h }, p2/Z, [x19, #2, MUL VL]\n"
+ ".inst 0xc17015a9 // sdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
+ "11:" // Unpadded: 1 priming loads
+ "add x21, x16, %x[ld_in_row]\n"
+ "ld1b { z11.s }, p1/Z, [x16]\n"
+ "addvl x20, SP, #3\n"
"ld1b { z21.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
"trn1 z11.h, z11.h, z21.h\n"
"add z11.h, z11.h, z9.h\n"
"ld1b { z12.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "add x17, x17, %x[ld_in_col]\n"
+ "addvl x19, SP, #9\n"
"ld1b { z20.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
"trn1 z12.h, z12.h, z20.h\n"
"add z12.h, z12.h, z9.h\n"
"ld1b { z13.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
+ "add x16, x16, %x[ld_in_col]\n"
"ld1b { z19.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
"trn1 z13.h, z13.h, z19.h\n"
@@ -365,100 +454,127 @@ void sme2_u8q_planar_5x5_s2_4rows_dot_za_impl(
"ld1b { z15.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
"ld1b { z17.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
"trn1 z15.h, z15.h, z17.h\n"
+ "add x21, x21, %x[ld_in_row]\n"
"add z15.h, z15.h, z9.h\n"
".inst 0xa1402a82 // ld1h { z2.h, z10.h }, pn10.b/Z, [x20]\n"
".inst 0xc1721568 // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
"ld1b { z16.s }, p1/Z, [x21]\n"
"mov z16.d, z16.d\n"
- "add z16.h, z16.h, z9.h\n"
".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
+ ".inst 0xa1402a62 // ld1h { z2.h, z10.h }, pn10.b/Z, [x19]\n"
+ ".inst 0xc1721569 // sdot za.s[x8, 1], { z11.h-z14.h }, z2.h\n"
+ "add z16.h, z16.h, z9.h\n"
"ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
+ ".inst 0xc17a1589 // sdot za.s[x8, 1], { z12.h-z15.h }, z10.h\n"
".inst 0xc17015a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
- "10:" // Unpadded: 2 priming loads
- "add x22, x17, %x[ld_in_row]\n"
- "ld1b { z11.s }, p1/Z, [x17]\n"
- "addvl x21, SP, #6\n"
- "ld1b { z21.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row]\n"
+ "ld1h { z0.h }, p2/Z, [x19, #2, MUL VL]\n"
+ ".inst 0xc17015a9 // sdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
+ "12:" // Unpadded: 0 priming loads
+ "cmp x17, #0x2\n"
+ ".inst 0xa1402be2 // ld1h { z2.h, z10.h }, pn10.b/Z, [SP]\n"
+ "ld1h { z0.h }, p2/Z, [SP, #2, MUL VL]\n"
+ "blt 22f\n"
+ "add x20, x16, %x[ld_in_row]\n"
+ "ld1b { z11.s }, p1/Z, [x16]\n"
+ "sub x17, x17, #0x2\n"
+ "ld1b { z21.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
"trn1 z11.h, z11.h, z21.h\n"
+ "sub x15, x15, #0x1\n"
+ "ld1b { z12.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "lsr x19, x17, #0x1\n"
"add z11.h, z11.h, z9.h\n"
- "ld1b { z12.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row]\n"
- "addvl x20, SP, #12\n"
- "ld1b { z20.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row]\n"
+ "ld1b { z20.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
"trn1 z12.h, z12.h, z20.h\n"
+ "cmp x19, x15\n"
+ "ld1b { z13.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "csel x25, x19, x15, LT\n"
"add z12.h, z12.h, z9.h\n"
- "ld1b { z13.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row]\n"
- "add x17, x17, %x[ld_in_col]\n"
- "ld1b { z19.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row]\n"
+ "ld1b { z19.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
"trn1 z13.h, z13.h, z19.h\n"
"add z13.h, z13.h, z9.h\n"
- "ld1b { z14.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row]\n"
- "ld1b { z18.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row]\n"
+ "ld1b { z14.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "add x16, x16, %x[ld_in_col]\n"
+ "ld1b { z18.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
"trn1 z14.h, z14.h, z18.h\n"
"add z14.h, z14.h, z9.h\n"
- "ld1b { z15.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row]\n"
- "ld1b { z17.s }, p1/Z, [x22]\n"
+ "ld1b { z15.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "and x17, x17, #0x1\n"
+ "ld1b { z17.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
"trn1 z15.h, z15.h, z17.h\n"
- "add x22, x22, %x[ld_in_row]\n"
"add z15.h, z15.h, z9.h\n"
- ".inst 0xa1402aa2 // ld1h { z2.h, z10.h }, pn10.b/Z, [x21]\n"
- ".inst 0xc1721568 // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
- "ld1b { z16.s }, p1/Z, [x22]\n"
+ "ld1b { z16.s }, p1/Z, [x20]\n"
"mov z16.d, z16.d\n"
+ "add z16.h, z16.h, z9.h\n"
+ "sub x15, x15, x25\n"
+ "cbz x25, 21f\n"
+ "13:" // Unpadded: Main loop
+ ".inst 0xc1721568 // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
+ "addvl x24, SP, #6\n"
+ "addvl x23, SP, #12\n"
".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
- ".inst 0xa1402a82 // ld1h { z2.h, z10.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xa1402b02 // ld1h { z2.h, z10.h }, pn10.b/Z, [x24]\n"
+ "add x22, x16, %x[ld_in_row]\n"
+ "addvl x21, SP, #3\n"
".inst 0xc1721569 // sdot za.s[x8, 1], { z11.h-z14.h }, z2.h\n"
- "add z16.h, z16.h, z9.h\n"
- "ld1h { z0.h }, p2/Z, [x21, #2, MUL VL]\n"
+ "addvl x20, SP, #9\n"
+ "subs x25, x25, #0x1\n"
".inst 0xc17a1589 // sdot za.s[x8, 1], { z12.h-z15.h }, z10.h\n"
+ ".inst 0xa1402ae2 // ld1h { z2.h, z10.h }, pn10.b/Z, [x23]\n"
+ ".inst 0xc172156a // sdot za.s[x8, 2], { z11.h-z14.h }, z2.h\n"
+ "ld1b { z11.s }, p1/Z, [x16]\n"
+ "add x16, x16, %x[ld_in_col]\n"
+ "add x19, x16, %x[ld_in_row]\n"
".inst 0xc17015a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
- "ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
- ".inst 0xc17015a9 // sdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
- "11:" // Unpadded: 1 priming loads
- "add x22, x17, %x[ld_in_row]\n"
- "ld1b { z11.s }, p1/Z, [x17]\n"
- "addvl x21, SP, #3\n"
+ "ld1h { z0.h }, p2/Z, [x24, #2, MUL VL]\n"
+ ".inst 0xc17a158a // sdot za.s[x8, 2], { z12.h-z15.h }, z10.h\n"
"ld1b { z21.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
"trn1 z11.h, z11.h, z21.h\n"
+ ".inst 0xc17015a9 // sdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
+ "ld1h { z0.h }, p2/Z, [x23, #2, MUL VL]\n"
"add z11.h, z11.h, z9.h\n"
"ld1b { z12.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
- "addvl x20, SP, #9\n"
+ ".inst 0xc17015aa // sdot za.s[x8, 2], { z13.h-z16.h }, z0.h\n"
"ld1b { z20.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
"trn1 z12.h, z12.h, z20.h\n"
"add z12.h, z12.h, z9.h\n"
"ld1b { z13.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
- "add x17, x17, %x[ld_in_col]\n"
+ ".inst 0xc0060c04 // mova { z4.d-z7.d }, za.d[x8, #0]\n"
+ "add x8, x8, #0x1\n"
"ld1b { z19.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
"trn1 z13.h, z13.h, z19.h\n"
"add z13.h, z13.h, z9.h\n"
"ld1b { z14.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
+ ".inst 0xc1a3ac04 // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z3.s\n"
"ld1b { z18.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
"trn1 z14.h, z14.h, z18.h\n"
"add z14.h, z14.h, z9.h\n"
"ld1b { z15.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
+ ".inst 0xc1a1aa24 // srshl { z4.s-z7.s }, { z4.s-z7.s }, z1.s\n"
"ld1b { z17.s }, p1/Z, [x22]\n"
"trn1 z15.h, z15.h, z17.h\n"
"add x22, x22, %x[ld_in_row]\n"
"add z15.h, z15.h, z9.h\n"
".inst 0xa1402aa2 // ld1h { z2.h, z10.h }, pn10.b/Z, [x21]\n"
".inst 0xc1721568 // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
+ ".inst 0xc1a8ab04 // add { z4.s-z7.s }, { z4.s-z7.s }, z8.s\n"
"ld1b { z16.s }, p1/Z, [x22]\n"
"mov z16.d, z16.d\n"
".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
@@ -467,166 +583,50 @@ void sme2_u8q_planar_5x5_s2_4rows_dot_za_impl(
"add z16.h, z16.h, z9.h\n"
"ld1h { z0.h }, p2/Z, [x21, #2, MUL VL]\n"
".inst 0xc17a1589 // sdot za.s[x8, 1], { z12.h-z15.h }, z10.h\n"
- ".inst 0xc17015a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
- "ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
- ".inst 0xc17015a9 // sdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
- "12:" // Unpadded: 0 priming loads
- "cmp x7, #0x2\n"
- ".inst 0xa1402be2 // ld1h { z2.h, z10.h }, pn10.b/Z, [SP]\n"
- "ld1h { z0.h }, p2/Z, [SP, #2, MUL VL]\n"
- "blt 22f\n"
- "add x21, x17, %x[ld_in_row]\n"
- "ld1b { z11.s }, p1/Z, [x17]\n"
- "sub x7, x7, #0x2\n"
- "ld1b { z21.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
- "trn1 z11.h, z11.h, z21.h\n"
- "sub x16, x16, #0x1\n"
- "ld1b { z12.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
- "lsr x20, x7, #0x1\n"
- "add z11.h, z11.h, z9.h\n"
- "ld1b { z20.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
- "trn1 z12.h, z12.h, z20.h\n"
- "cmp x20, x16\n"
- "ld1b { z13.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
- "csel x26, x20, x16, LT\n"
- "add z12.h, z12.h, z9.h\n"
- "ld1b { z19.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
- "trn1 z13.h, z13.h, z19.h\n"
- "add z13.h, z13.h, z9.h\n"
- "ld1b { z14.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
- "add x17, x17, %x[ld_in_col]\n"
- "ld1b { z18.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
- "trn1 z14.h, z14.h, z18.h\n"
- "add z14.h, z14.h, z9.h\n"
- "ld1b { z15.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
- "and x7, x7, #0x1\n"
- "ld1b { z17.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
- "trn1 z15.h, z15.h, z17.h\n"
- "add z15.h, z15.h, z9.h\n"
- "ld1b { z16.s }, p1/Z, [x21]\n"
- "mov z16.d, z16.d\n"
- "add z16.h, z16.h, z9.h\n"
- "sub x16, x16, x26\n"
- "cbz x26, 21f\n"
- "13:" // Unpadded: Main loop
- ".inst 0xc1721568 // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
- "addvl x25, SP, #6\n"
- "addvl x24, SP, #12\n"
- ".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
- ".inst 0xa1402b22 // ld1h { z2.h, z10.h }, pn10.b/Z, [x25]\n"
- "add x23, x17, %x[ld_in_row]\n"
- "addvl x22, SP, #3\n"
- ".inst 0xc1721569 // sdot za.s[x8, 1], { z11.h-z14.h }, z2.h\n"
- "addvl x21, SP, #9\n"
- "subs x26, x26, #0x1\n"
- ".inst 0xc17a1589 // sdot za.s[x8, 1], { z12.h-z15.h }, z10.h\n"
- ".inst 0xa1402b02 // ld1h { z2.h, z10.h }, pn10.b/Z, [x24]\n"
- ".inst 0xc172156a // sdot za.s[x8, 2], { z11.h-z14.h }, z2.h\n"
- "ld1b { z11.s }, p1/Z, [x17]\n"
- "add x17, x17, %x[ld_in_col]\n"
- "add x20, x17, %x[ld_in_row]\n"
- ".inst 0xc17015a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
- "ld1h { z0.h }, p2/Z, [x25, #2, MUL VL]\n"
- ".inst 0xc17a158a // sdot za.s[x8, 2], { z12.h-z15.h }, z10.h\n"
- "ld1b { z21.s }, p1/Z, [x23]\n"
- "add x23, x23, %x[ld_in_row]\n"
- "trn1 z11.h, z11.h, z21.h\n"
- ".inst 0xc17015a9 // sdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
- "ld1h { z0.h }, p2/Z, [x24, #2, MUL VL]\n"
- "add z11.h, z11.h, z9.h\n"
- "ld1b { z12.s }, p1/Z, [x23]\n"
- "add x23, x23, %x[ld_in_row]\n"
- ".inst 0xc17015aa // sdot za.s[x8, 2], { z13.h-z16.h }, z0.h\n"
- "ld1b { z20.s }, p1/Z, [x23]\n"
- "add x23, x23, %x[ld_in_row]\n"
- "trn1 z12.h, z12.h, z20.h\n"
- "add z12.h, z12.h, z9.h\n"
- "ld1b { z13.s }, p1/Z, [x23]\n"
- "add x23, x23, %x[ld_in_row]\n"
- ".inst 0xc0060c04 // mova { z4.d-z7.d }, za.d[x8, #0]\n"
- "add x8, x8, #0x1\n"
- "ld1b { z19.s }, p1/Z, [x23]\n"
- "add x23, x23, %x[ld_in_row]\n"
- "trn1 z13.h, z13.h, z19.h\n"
- "add z13.h, z13.h, z9.h\n"
- "ld1b { z14.s }, p1/Z, [x23]\n"
- "add x23, x23, %x[ld_in_row]\n"
- ".inst 0xc1a3ac04 // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z3.s\n"
- "ld1b { z18.s }, p1/Z, [x23]\n"
- "add x23, x23, %x[ld_in_row]\n"
- "trn1 z14.h, z14.h, z18.h\n"
- "add z14.h, z14.h, z9.h\n"
- "ld1b { z15.s }, p1/Z, [x23]\n"
- "add x23, x23, %x[ld_in_row]\n"
- ".inst 0xc1a1aa24 // srshl { z4.s-z7.s }, { z4.s-z7.s }, z1.s\n"
- "ld1b { z17.s }, p1/Z, [x23]\n"
- "trn1 z15.h, z15.h, z17.h\n"
- "add x23, x23, %x[ld_in_row]\n"
- "add z15.h, z15.h, z9.h\n"
- ".inst 0xa1402ac2 // ld1h { z2.h, z10.h }, pn10.b/Z, [x22]\n"
- ".inst 0xc1721568 // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
- ".inst 0xc1a8ab04 // add { z4.s-z7.s }, { z4.s-z7.s }, z8.s\n"
- "ld1b { z16.s }, p1/Z, [x23]\n"
- "mov z16.d, z16.d\n"
- ".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
- ".inst 0xa1402aa2 // ld1h { z2.h, z10.h }, pn10.b/Z, [x21]\n"
- ".inst 0xc1721569 // sdot za.s[x8, 1], { z11.h-z14.h }, z2.h\n"
- "add z16.h, z16.h, z9.h\n"
- "ld1h { z0.h }, p2/Z, [x22, #2, MUL VL]\n"
- ".inst 0xc17a1589 // sdot za.s[x8, 1], { z12.h-z15.h }, z10.h\n"
".inst 0xc1b7cf44 // sclamp { z4.s-z7.s }, z26.s, z23.s\n"
".inst 0xc17015a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
- "ld1h { z0.h }, p2/Z, [x21, #2, MUL VL]\n"
- "st1b { z4.s }, p1, [x15]\n"
- "add x15, x15, x13\n"
- "ld1b { z11.s }, p1/Z, [x17]\n"
- ".inst 0xc17015a9 // sdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
- "st1b { z5.s }, p1, [x14]\n"
+ "ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
+ "st1b { z4.s }, p1, [x14]\n"
"add x14, x14, x11\n"
- "ld1b { z21.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z11.s }, p1/Z, [x16]\n"
+ ".inst 0xc17015a9 // sdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
+ "st1b { z5.s }, p1, [x13]\n"
+ "add x13, x13, x10\n"
+ "ld1b { z21.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"trn1 z11.h, z11.h, z21.h\n"
- "st1b { z6.s }, p1, [x10]\n"
- "ld1b { z12.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
- "add x10, x10, x28\n"
- "st1b { z7.s }, p1, [x9]\n"
- "ld1b { z20.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
- "trn1 z12.h, z12.h, z20.h\n"
+ "st1b { z6.s }, p1, [x9]\n"
+ "ld1b { z12.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"add x9, x9, x27\n"
- "ld1b { z13.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "st1b { z7.s }, p1, [x28]\n"
+ "ld1b { z20.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ "trn1 z12.h, z12.h, z20.h\n"
+ "add x28, x28, x26\n"
+ "ld1b { z13.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0xc0040f84 // mova za.d[x8, #4], { z28.d-z31.d }\n"
"add z11.h, z11.h, z9.h\n"
- "ld1b { z19.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z19.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"trn1 z13.h, z13.h, z19.h\n"
"add z12.h, z12.h, z9.h\n"
- "ld1b { z14.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z14.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"add z13.h, z13.h, z9.h\n"
- "add x17, x17, %x[ld_in_col]\n"
- "ld1b { z18.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x16, x16, %x[ld_in_col]\n"
+ "ld1b { z18.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"trn1 z14.h, z14.h, z18.h\n"
"add z14.h, z14.h, z9.h\n"
- "ld1b { z15.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
- "ld1b { z17.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z15.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ "ld1b { z17.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"trn1 z15.h, z15.h, z17.h\n"
"add z15.h, z15.h, z9.h\n"
- "ld1b { z16.s }, p1/Z, [x20]\n"
+ "ld1b { z16.s }, p1/Z, [x19]\n"
"mov z16.d, z16.d\n"
"add z16.h, z16.h, z9.h\n"
".inst 0xa1402be2 // ld1h { z2.h, z10.h }, pn10.b/Z, [SP]\n"
@@ -634,717 +634,717 @@ void sme2_u8q_planar_5x5_s2_4rows_dot_za_impl(
"bgt 13b\n"
"b 21f\n"
"14:" // Padded
- "cbz x22, 19f\n"
- "cmp x22, #0x1\n"
- "sub x7, x7, x22\n"
+ "cbz x21, 19f\n"
+ "cmp x21, #0x1\n"
+ "sub x17, x17, x21\n"
"beq 18f\n"
- "cmp x22, #0x2\n"
+ "cmp x21, #0x2\n"
"beq 17f\n"
- "cmp x22, #0x3\n"
+ "cmp x21, #0x3\n"
"beq 16f\n"
"15:" // Padded: 4 priming loads
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z11.s }, p0/Z, [x17]\n"
+ "ld1b { z11.s }, p0/Z, [x16]\n"
"add z11.h, p0/M, z11.h, z9.h\n"
- "add x21, x17, %x[ld_in_row]\n"
+ "add x20, x16, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z21.s }, p0/Z, [x21]\n"
+ "ld1b { z21.s }, p0/Z, [x20]\n"
"add z21.h, p0/M, z21.h, z9.h\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "add x20, x20, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z12.s }, p0/Z, [x21]\n"
+ "ld1b { z12.s }, p0/Z, [x20]\n"
"add z12.h, p0/M, z12.h, z9.h\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "add x20, x20, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z20.s }, p0/Z, [x21]\n"
+ "ld1b { z20.s }, p0/Z, [x20]\n"
"add z20.h, p0/M, z20.h, z9.h\n"
"mov x12, #0x4\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "add x20, x20, %x[ld_in_row]\n"
"trn1 z11.h, z11.h, z21.h\n"
"trn1 z12.h, z12.h, z20.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z13.s }, p0/Z, [x21]\n"
+ "ld1b { z13.s }, p0/Z, [x20]\n"
"add z13.h, p0/M, z13.h, z9.h\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "add x20, x20, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z19.s }, p0/Z, [x21]\n"
+ "ld1b { z19.s }, p0/Z, [x20]\n"
"add z19.h, p0/M, z19.h, z9.h\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "add x20, x20, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z14.s }, p0/Z, [x21]\n"
+ "ld1b { z14.s }, p0/Z, [x20]\n"
"add z14.h, p0/M, z14.h, z9.h\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "add x20, x20, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z18.s }, p0/Z, [x21]\n"
+ "ld1b { z18.s }, p0/Z, [x20]\n"
"mov x12, #0x8\n"
"add z18.h, p0/M, z18.h, z9.h\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "add x20, x20, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z15.s }, p0/Z, [x21]\n"
+ "ld1b { z15.s }, p0/Z, [x20]\n"
"add z15.h, p0/M, z15.h, z9.h\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "add x20, x20, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z17.s }, p0/Z, [x21]\n"
+ "ld1b { z17.s }, p0/Z, [x20]\n"
"add z17.h, p0/M, z17.h, z9.h\n"
- "addvl x20, SP, #12\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "addvl x19, SP, #12\n"
+ "add x20, x20, %x[ld_in_row]\n"
"trn1 z13.h, z13.h, z19.h\n"
"trn1 z14.h, z14.h, z18.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- ".inst 0xa1402a82 // ld1h { z2.h, z10.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xa1402a62 // ld1h { z2.h, z10.h }, pn10.b/Z, [x19]\n"
"trn1 z15.h, z15.h, z17.h\n"
".inst 0xc1721568 // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
- "ld1b { z16.s }, p0/Z, [x21]\n"
+ "ld1b { z16.s }, p0/Z, [x20]\n"
"add z16.h, p0/M, z16.h, z9.h\n"
"mov z16.d, z16.d\n"
- "add x17, x17, %x[ld_in_col]\n"
+ "add x16, x16, %x[ld_in_col]\n"
".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
- "ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
+ "ld1h { z0.h }, p2/Z, [x19, #2, MUL VL]\n"
".inst 0xc17015a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
"16:" // Padded: 3 priming loads
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z11.s }, p0/Z, [x17]\n"
+ "ld1b { z11.s }, p0/Z, [x16]\n"
"add z11.h, p0/M, z11.h, z9.h\n"
- "add x21, x17, %x[ld_in_row]\n"
+ "add x20, x16, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z21.s }, p0/Z, [x21]\n"
+ "ld1b { z21.s }, p0/Z, [x20]\n"
"add z21.h, p0/M, z21.h, z9.h\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "add x20, x20, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z12.s }, p0/Z, [x21]\n"
+ "ld1b { z12.s }, p0/Z, [x20]\n"
"add z12.h, p0/M, z12.h, z9.h\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "add x20, x20, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z20.s }, p0/Z, [x21]\n"
+ "ld1b { z20.s }, p0/Z, [x20]\n"
"add z20.h, p0/M, z20.h, z9.h\n"
"mov x12, #0x4\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "add x20, x20, %x[ld_in_row]\n"
"trn1 z11.h, z11.h, z21.h\n"
"trn1 z12.h, z12.h, z20.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z13.s }, p0/Z, [x21]\n"
+ "ld1b { z13.s }, p0/Z, [x20]\n"
"add z13.h, p0/M, z13.h, z9.h\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "add x20, x20, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z19.s }, p0/Z, [x21]\n"
+ "ld1b { z19.s }, p0/Z, [x20]\n"
"add z19.h, p0/M, z19.h, z9.h\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "add x20, x20, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z14.s }, p0/Z, [x21]\n"
+ "ld1b { z14.s }, p0/Z, [x20]\n"
"add z14.h, p0/M, z14.h, z9.h\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "add x20, x20, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z18.s }, p0/Z, [x21]\n"
+ "ld1b { z18.s }, p0/Z, [x20]\n"
"mov x12, #0x8\n"
"add z18.h, p0/M, z18.h, z9.h\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "add x20, x20, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z15.s }, p0/Z, [x21]\n"
+ "ld1b { z15.s }, p0/Z, [x20]\n"
"add z15.h, p0/M, z15.h, z9.h\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "add x20, x20, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z17.s }, p0/Z, [x21]\n"
+ "ld1b { z17.s }, p0/Z, [x20]\n"
"add z17.h, p0/M, z17.h, z9.h\n"
- "addvl x20, SP, #9\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "addvl x19, SP, #9\n"
+ "add x20, x20, %x[ld_in_row]\n"
"trn1 z13.h, z13.h, z19.h\n"
"trn1 z14.h, z14.h, z18.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- ".inst 0xa1402a82 // ld1h { z2.h, z10.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xa1402a62 // ld1h { z2.h, z10.h }, pn10.b/Z, [x19]\n"
"trn1 z15.h, z15.h, z17.h\n"
".inst 0xc1721568 // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
- "ld1b { z16.s }, p0/Z, [x21]\n"
+ "ld1b { z16.s }, p0/Z, [x20]\n"
"add z16.h, p0/M, z16.h, z9.h\n"
"mov z16.d, z16.d\n"
- "add x17, x17, %x[ld_in_col]\n"
+ "add x16, x16, %x[ld_in_col]\n"
".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
- "ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
+ "ld1h { z0.h }, p2/Z, [x19, #2, MUL VL]\n"
".inst 0xc17015a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
"17:" // Padded: 2 priming loads
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z11.s }, p0/Z, [x17]\n"
+ "ld1b { z11.s }, p0/Z, [x16]\n"
"add z11.h, p0/M, z11.h, z9.h\n"
- "add x20, x17, %x[ld_in_row]\n"
+ "add x19, x16, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z21.s }, p0/Z, [x20]\n"
+ "ld1b { z21.s }, p0/Z, [x19]\n"
"add z21.h, p0/M, z21.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z12.s }, p0/Z, [x20]\n"
+ "ld1b { z12.s }, p0/Z, [x19]\n"
"add z12.h, p0/M, z12.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z20.s }, p0/Z, [x20]\n"
+ "ld1b { z20.s }, p0/Z, [x19]\n"
"add z20.h, p0/M, z20.h, z9.h\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"trn1 z11.h, z11.h, z21.h\n"
"trn1 z12.h, z12.h, z20.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z13.s }, p0/Z, [x20]\n"
+ "ld1b { z13.s }, p0/Z, [x19]\n"
"add z13.h, p0/M, z13.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z19.s }, p0/Z, [x20]\n"
+ "ld1b { z19.s }, p0/Z, [x19]\n"
"add z19.h, p0/M, z19.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z14.s }, p0/Z, [x20]\n"
+ "ld1b { z14.s }, p0/Z, [x19]\n"
"add z14.h, p0/M, z14.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z18.s }, p0/Z, [x20]\n"
+ "ld1b { z18.s }, p0/Z, [x19]\n"
"mov x12, #0x8\n"
"add z18.h, p0/M, z18.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z15.s }, p0/Z, [x20]\n"
+ "ld1b { z15.s }, p0/Z, [x19]\n"
"add z15.h, p0/M, z15.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z17.s }, p0/Z, [x20]\n"
+ "ld1b { z17.s }, p0/Z, [x19]\n"
"add z17.h, p0/M, z17.h, z9.h\n"
- "addvl x21, SP, #6\n"
+ "addvl x20, SP, #6\n"
"trn1 z13.h, z13.h, z19.h\n"
"trn1 z14.h, z14.h, z18.h\n"
- ".inst 0xa1402aa2 // ld1h { z2.h, z10.h }, pn10.b/Z, [x21]\n"
- "add x20, x20, %x[ld_in_row]\n"
+ ".inst 0xa1402a82 // ld1h { z2.h, z10.h }, pn10.b/Z, [x20]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
"trn1 z15.h, z15.h, z17.h\n"
".inst 0xc1721568 // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
- "ld1b { z16.s }, p0/Z, [x20]\n"
- "addvl x20, SP, #12\n"
+ "ld1b { z16.s }, p0/Z, [x19]\n"
+ "addvl x19, SP, #12\n"
"add z16.h, p0/M, z16.h, z9.h\n"
".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
- ".inst 0xa1402a82 // ld1h { z2.h, z10.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xa1402a62 // ld1h { z2.h, z10.h }, pn10.b/Z, [x19]\n"
".inst 0xc1721569 // sdot za.s[x8, 1], { z11.h-z14.h }, z2.h\n"
"mov z16.d, z16.d\n"
- "add x17, x17, %x[ld_in_col]\n"
- "ld1h { z0.h }, p2/Z, [x21, #2, MUL VL]\n"
+ "add x16, x16, %x[ld_in_col]\n"
+ "ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
".inst 0xc17a1589 // sdot za.s[x8, 1], { z12.h-z15.h }, z10.h\n"
".inst 0xc17015a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
- "ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
+ "ld1h { z0.h }, p2/Z, [x19, #2, MUL VL]\n"
".inst 0xc17015a9 // sdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
"18:" // Padded: 1 priming loads
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z11.s }, p0/Z, [x17]\n"
+ "ld1b { z11.s }, p0/Z, [x16]\n"
"add z11.h, p0/M, z11.h, z9.h\n"
- "add x20, x17, %x[ld_in_row]\n"
+ "add x19, x16, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z21.s }, p0/Z, [x20]\n"
+ "ld1b { z21.s }, p0/Z, [x19]\n"
"add z21.h, p0/M, z21.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z12.s }, p0/Z, [x20]\n"
+ "ld1b { z12.s }, p0/Z, [x19]\n"
"add z12.h, p0/M, z12.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z20.s }, p0/Z, [x20]\n"
+ "ld1b { z20.s }, p0/Z, [x19]\n"
"add z20.h, p0/M, z20.h, z9.h\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"trn1 z11.h, z11.h, z21.h\n"
"trn1 z12.h, z12.h, z20.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z13.s }, p0/Z, [x20]\n"
+ "ld1b { z13.s }, p0/Z, [x19]\n"
"add z13.h, p0/M, z13.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z19.s }, p0/Z, [x20]\n"
+ "ld1b { z19.s }, p0/Z, [x19]\n"
"add z19.h, p0/M, z19.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z14.s }, p0/Z, [x20]\n"
+ "ld1b { z14.s }, p0/Z, [x19]\n"
"add z14.h, p0/M, z14.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z18.s }, p0/Z, [x20]\n"
+ "ld1b { z18.s }, p0/Z, [x19]\n"
"mov x12, #0x8\n"
"add z18.h, p0/M, z18.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z15.s }, p0/Z, [x20]\n"
+ "ld1b { z15.s }, p0/Z, [x19]\n"
"add z15.h, p0/M, z15.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z17.s }, p0/Z, [x20]\n"
+ "ld1b { z17.s }, p0/Z, [x19]\n"
"add z17.h, p0/M, z17.h, z9.h\n"
- "addvl x21, SP, #3\n"
+ "addvl x20, SP, #3\n"
"trn1 z13.h, z13.h, z19.h\n"
"trn1 z14.h, z14.h, z18.h\n"
- ".inst 0xa1402aa2 // ld1h { z2.h, z10.h }, pn10.b/Z, [x21]\n"
- "add x20, x20, %x[ld_in_row]\n"
+ ".inst 0xa1402a82 // ld1h { z2.h, z10.h }, pn10.b/Z, [x20]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
"trn1 z15.h, z15.h, z17.h\n"
".inst 0xc1721568 // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
- "ld1b { z16.s }, p0/Z, [x20]\n"
- "addvl x20, SP, #9\n"
+ "ld1b { z16.s }, p0/Z, [x19]\n"
+ "addvl x19, SP, #9\n"
"add z16.h, p0/M, z16.h, z9.h\n"
".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
- ".inst 0xa1402a82 // ld1h { z2.h, z10.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xa1402a62 // ld1h { z2.h, z10.h }, pn10.b/Z, [x19]\n"
".inst 0xc1721569 // sdot za.s[x8, 1], { z11.h-z14.h }, z2.h\n"
"mov z16.d, z16.d\n"
- "add x17, x17, %x[ld_in_col]\n"
- "ld1h { z0.h }, p2/Z, [x21, #2, MUL VL]\n"
+ "add x16, x16, %x[ld_in_col]\n"
+ "ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
".inst 0xc17a1589 // sdot za.s[x8, 1], { z12.h-z15.h }, z10.h\n"
".inst 0xc17015a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
- "ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
+ "ld1h { z0.h }, p2/Z, [x19, #2, MUL VL]\n"
".inst 0xc17015a9 // sdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
"19:" // Padded: 0 priming loads
- "cmp x7, #0x2\n"
+ "cmp x17, #0x2\n"
".inst 0xa1402be2 // ld1h { z2.h, z10.h }, pn10.b/Z, [SP]\n"
"ld1h { z0.h }, p2/Z, [SP, #2, MUL VL]\n"
"blt 22f\n"
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z11.s }, p0/Z, [x17]\n"
+ "ld1b { z11.s }, p0/Z, [x16]\n"
"add z11.h, p0/M, z11.h, z9.h\n"
- "add x20, x17, %x[ld_in_row]\n"
+ "add x19, x16, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z21.s }, p0/Z, [x20]\n"
+ "ld1b { z21.s }, p0/Z, [x19]\n"
"add z21.h, p0/M, z21.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z12.s }, p0/Z, [x20]\n"
+ "ld1b { z12.s }, p0/Z, [x19]\n"
"add z12.h, p0/M, z12.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z20.s }, p0/Z, [x20]\n"
+ "ld1b { z20.s }, p0/Z, [x19]\n"
"add z20.h, p0/M, z20.h, z9.h\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"trn1 z11.h, z11.h, z21.h\n"
"trn1 z12.h, z12.h, z20.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z13.s }, p0/Z, [x20]\n"
+ "ld1b { z13.s }, p0/Z, [x19]\n"
"add z13.h, p0/M, z13.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z19.s }, p0/Z, [x20]\n"
+ "ld1b { z19.s }, p0/Z, [x19]\n"
"add z19.h, p0/M, z19.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z14.s }, p0/Z, [x20]\n"
+ "ld1b { z14.s }, p0/Z, [x19]\n"
"add z14.h, p0/M, z14.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z18.s }, p0/Z, [x20]\n"
+ "ld1b { z18.s }, p0/Z, [x19]\n"
"mov x12, #0x8\n"
"add z18.h, p0/M, z18.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z15.s }, p0/Z, [x20]\n"
+ "ld1b { z15.s }, p0/Z, [x19]\n"
"add z15.h, p0/M, z15.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z17.s }, p0/Z, [x20]\n"
+ "ld1b { z17.s }, p0/Z, [x19]\n"
"add z17.h, p0/M, z17.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z16.s }, p0/Z, [x20]\n"
+ "ld1b { z16.s }, p0/Z, [x19]\n"
"add z16.h, p0/M, z16.h, z9.h\n"
- "sub x7, x7, #0x2\n"
- "sub x16, x16, #0x1\n"
+ "sub x17, x17, #0x2\n"
+ "sub x15, x15, #0x1\n"
"trn1 z13.h, z13.h, z19.h\n"
"trn1 z14.h, z14.h, z18.h\n"
- "lsr x20, x7, #0x1\n"
- "cmp x20, x16\n"
+ "lsr x19, x17, #0x1\n"
+ "cmp x19, x15\n"
"trn1 z15.h, z15.h, z17.h\n"
"mov z16.d, z16.d\n"
- "csel x25, x20, x16, LT\n"
- "add x17, x17, %x[ld_in_col]\n"
- "and x7, x7, #0x1\n"
- "sub x16, x16, x25\n"
- "cbz x25, 21f\n"
+ "csel x24, x19, x15, LT\n"
+ "add x16, x16, %x[ld_in_col]\n"
+ "and x17, x17, #0x1\n"
+ "sub x15, x15, x24\n"
+ "cbz x24, 21f\n"
"20:" // Padded: Main loop
".inst 0xc1721568 // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
- "addvl x24, SP, #6\n"
- "addvl x23, SP, #12\n"
+ "addvl x23, SP, #6\n"
+ "addvl x22, SP, #12\n"
".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
- ".inst 0xa1402b02 // ld1h { z2.h, z10.h }, pn10.b/Z, [x24]\n"
+ ".inst 0xa1402ae2 // ld1h { z2.h, z10.h }, pn10.b/Z, [x23]\n"
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
".inst 0xc1721569 // sdot za.s[x8, 1], { z11.h-z14.h }, z2.h\n"
- "add x20, x17, %x[ld_in_row]\n"
- "addvl x22, SP, #3\n"
+ "add x19, x16, %x[ld_in_row]\n"
+ "addvl x21, SP, #3\n"
".inst 0xc17a1589 // sdot za.s[x8, 1], { z12.h-z15.h }, z10.h\n"
- ".inst 0xa1402ae2 // ld1h { z2.h, z10.h }, pn10.b/Z, [x23]\n"
- "addvl x21, SP, #9\n"
- "subs x25, x25, #0x1\n"
+ ".inst 0xa1402ac2 // ld1h { z2.h, z10.h }, pn10.b/Z, [x22]\n"
+ "addvl x20, SP, #9\n"
+ "subs x24, x24, #0x1\n"
".inst 0xc172156a // sdot za.s[x8, 2], { z11.h-z14.h }, z2.h\n"
- "ld1b { z11.s }, p0/Z, [x17]\n"
+ "ld1b { z11.s }, p0/Z, [x16]\n"
"add z11.h, p0/M, z11.h, z9.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z21.s }, p0/Z, [x20]\n"
+ "ld1b { z21.s }, p0/Z, [x19]\n"
"add z21.h, p0/M, z21.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
".inst 0xc17a158a // sdot za.s[x8, 2], { z12.h-z15.h }, z10.h\n"
- "ld1b { z12.s }, p0/Z, [x20]\n"
+ "ld1b { z12.s }, p0/Z, [x19]\n"
"add z12.h, p0/M, z12.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0xc17015a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
- "ld1h { z0.h }, p2/Z, [x24, #2, MUL VL]\n"
+ "ld1h { z0.h }, p2/Z, [x23, #2, MUL VL]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
"mov x12, #0x4\n"
- "ld1b { z20.s }, p0/Z, [x20]\n"
+ "ld1b { z20.s }, p0/Z, [x19]\n"
".inst 0xc17015a9 // sdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
"add z20.h, p0/M, z20.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
- "ld1h { z0.h }, p2/Z, [x23, #2, MUL VL]\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ "ld1h { z0.h }, p2/Z, [x22, #2, MUL VL]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
".inst 0xc17015aa // sdot za.s[x8, 2], { z13.h-z16.h }, z0.h\n"
"trn1 z11.h, z11.h, z21.h\n"
- "ld1b { z13.s }, p0/Z, [x20]\n"
+ "ld1b { z13.s }, p0/Z, [x19]\n"
"add z13.h, p0/M, z13.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z19.s }, p0/Z, [x20]\n"
+ "ld1b { z19.s }, p0/Z, [x19]\n"
"add z19.h, p0/M, z19.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z14.s }, p0/Z, [x20]\n"
+ "ld1b { z14.s }, p0/Z, [x19]\n"
"add z14.h, p0/M, z14.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z18.s }, p0/Z, [x20]\n"
+ "ld1b { z18.s }, p0/Z, [x19]\n"
"mov x12, #0x8\n"
"add z18.h, p0/M, z18.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z15.s }, p0/Z, [x20]\n"
+ "ld1b { z15.s }, p0/Z, [x19]\n"
"add z15.h, p0/M, z15.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z17.s }, p0/Z, [x20]\n"
+ "ld1b { z17.s }, p0/Z, [x19]\n"
"add z17.h, p0/M, z17.h, z9.h\n"
"trn1 z12.h, z12.h, z20.h\n"
"trn1 z13.h, z13.h, z19.h\n"
"trn1 z14.h, z14.h, z18.h\n"
- ".inst 0xa1402ac2 // ld1h { z2.h, z10.h }, pn10.b/Z, [x22]\n"
- "add x20, x20, %x[ld_in_row]\n"
+ ".inst 0xa1402aa2 // ld1h { z2.h, z10.h }, pn10.b/Z, [x21]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0xc0060c04 // mova { z4.d-z7.d }, za.d[x8, #0]\n"
"add x8, x8, #0x1\n"
"trn1 z15.h, z15.h, z17.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
".inst 0xc1721568 // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
- "ld1b { z16.s }, p0/Z, [x20]\n"
+ "ld1b { z16.s }, p0/Z, [x19]\n"
"mov x12, #0x0\n"
"add z16.h, p0/M, z16.h, z9.h\n"
".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
- ".inst 0xa1402aa2 // ld1h { z2.h, z10.h }, pn10.b/Z, [x21]\n"
- "add x17, x17, %x[ld_in_col]\n"
+ ".inst 0xa1402a82 // ld1h { z2.h, z10.h }, pn10.b/Z, [x20]\n"
+ "add x16, x16, %x[ld_in_col]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
".inst 0xc1721569 // sdot za.s[x8, 1], { z11.h-z14.h }, z2.h\n"
- "ld1b { z11.s }, p0/Z, [x17]\n"
+ "ld1b { z11.s }, p0/Z, [x16]\n"
"add z11.h, p0/M, z11.h, z9.h\n"
- "add x20, x17, %x[ld_in_row]\n"
+ "add x19, x16, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z21.s }, p0/Z, [x20]\n"
+ "ld1b { z21.s }, p0/Z, [x19]\n"
"add z21.h, p0/M, z21.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
".inst 0xc17a1589 // sdot za.s[x8, 1], { z12.h-z15.h }, z10.h\n"
- "ld1b { z12.s }, p0/Z, [x20]\n"
+ "ld1b { z12.s }, p0/Z, [x19]\n"
"mov z16.d, z16.d\n"
- "ld1h { z0.h }, p2/Z, [x22, #2, MUL VL]\n"
+ "ld1h { z0.h }, p2/Z, [x21, #2, MUL VL]\n"
"add z12.h, p0/M, z12.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z20.s }, p0/Z, [x20]\n"
+ "ld1b { z20.s }, p0/Z, [x19]\n"
".inst 0xc17015a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
"mov x12, #0x4\n"
"add z20.h, p0/M, z20.h, z9.h\n"
- "ld1h { z0.h }, p2/Z, [x21, #2, MUL VL]\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
".inst 0xc17015a9 // sdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
- "ld1b { z13.s }, p0/Z, [x20]\n"
+ "ld1b { z13.s }, p0/Z, [x19]\n"
"add z13.h, p0/M, z13.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z19.s }, p0/Z, [x20]\n"
+ "ld1b { z19.s }, p0/Z, [x19]\n"
"add z19.h, p0/M, z19.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z14.s }, p0/Z, [x20]\n"
+ "ld1b { z14.s }, p0/Z, [x19]\n"
"add z14.h, p0/M, z14.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z18.s }, p0/Z, [x20]\n"
+ "ld1b { z18.s }, p0/Z, [x19]\n"
"mov x12, #0x8\n"
"add z18.h, p0/M, z18.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z15.s }, p0/Z, [x20]\n"
+ "ld1b { z15.s }, p0/Z, [x19]\n"
".inst 0xc1a3ac04 // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z3.s\n"
"add z15.h, p0/M, z15.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z17.s }, p0/Z, [x20]\n"
+ "ld1b { z17.s }, p0/Z, [x19]\n"
".inst 0xc1a1aa24 // srshl { z4.s-z7.s }, { z4.s-z7.s }, z1.s\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0xc0040f84 // mova za.d[x8, #4], { z28.d-z31.d }\n"
"add z17.h, p0/M, z17.h, z9.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z16.s }, p0/Z, [x20]\n"
+ "ld1b { z16.s }, p0/Z, [x19]\n"
"add z16.h, p0/M, z16.h, z9.h\n"
".inst 0xc1a8ab04 // add { z4.s-z7.s }, { z4.s-z7.s }, z8.s\n"
".inst 0xa1402be2 // ld1h { z2.h, z10.h }, pn10.b/Z, [SP]\n"
- "add x17, x17, %x[ld_in_col]\n"
+ "add x16, x16, %x[ld_in_col]\n"
".inst 0xc1b7cf44 // sclamp { z4.s-z7.s }, z26.s, z23.s\n"
- "st1b { z4.s }, p1, [x15]\n"
- "add x15, x15, x13\n"
- "ld1h { z0.h }, p2/Z, [SP, #2, MUL VL]\n"
- "st1b { z5.s }, p1, [x14]\n"
+ "st1b { z4.s }, p1, [x14]\n"
"add x14, x14, x11\n"
+ "ld1h { z0.h }, p2/Z, [SP, #2, MUL VL]\n"
+ "st1b { z5.s }, p1, [x13]\n"
+ "add x13, x13, x10\n"
"trn1 z11.h, z11.h, z21.h\n"
"trn1 z12.h, z12.h, z20.h\n"
- "st1b { z6.s }, p1, [x10]\n"
- "add x10, x10, x28\n"
+ "st1b { z6.s }, p1, [x9]\n"
+ "add x9, x9, x27\n"
"trn1 z13.h, z13.h, z19.h\n"
"trn1 z14.h, z14.h, z18.h\n"
- "st1b { z7.s }, p1, [x9]\n"
- "add x9, x9, x27\n"
+ "st1b { z7.s }, p1, [x28]\n"
+ "add x28, x28, x26\n"
"trn1 z15.h, z15.h, z17.h\n"
"mov z16.d, z16.d\n"
"bgt 20b\n"
"21:" // Main loop tail
".inst 0xc1721568 // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
- "addvl x24, SP, #6\n"
- "addvl x23, SP, #12\n"
+ "addvl x23, SP, #6\n"
+ "addvl x22, SP, #12\n"
".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
- ".inst 0xa1402b02 // ld1h { z2.h, z10.h }, pn10.b/Z, [x24]\n"
+ ".inst 0xa1402ae2 // ld1h { z2.h, z10.h }, pn10.b/Z, [x23]\n"
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
".inst 0xc1721569 // sdot za.s[x8, 1], { z11.h-z14.h }, z2.h\n"
- "add x22, x17, %x[ld_in_row]\n"
- "addvl x21, SP, #3\n"
+ "add x21, x16, %x[ld_in_row]\n"
+ "addvl x20, SP, #3\n"
".inst 0xc17a1589 // sdot za.s[x8, 1], { z12.h-z15.h }, z10.h\n"
- ".inst 0xa1402ae2 // ld1h { z2.h, z10.h }, pn10.b/Z, [x23]\n"
- "addvl x20, SP, #9\n"
+ ".inst 0xa1402ac2 // ld1h { z2.h, z10.h }, pn10.b/Z, [x22]\n"
+ "addvl x19, SP, #9\n"
".inst 0xc172156a // sdot za.s[x8, 2], { z11.h-z14.h }, z2.h\n"
- "ld1b { z11.s }, p0/Z, [x17]\n"
+ "ld1b { z11.s }, p0/Z, [x16]\n"
"add z11.h, p0/M, z11.h, z9.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z21.s }, p0/Z, [x22]\n"
+ "ld1b { z21.s }, p0/Z, [x21]\n"
"add z21.h, p0/M, z21.h, z9.h\n"
- "add x22, x22, %x[ld_in_row]\n"
+ "add x21, x21, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
".inst 0xc17a158a // sdot za.s[x8, 2], { z12.h-z15.h }, z10.h\n"
- "ld1b { z12.s }, p0/Z, [x22]\n"
+ "ld1b { z12.s }, p0/Z, [x21]\n"
"add z12.h, p0/M, z12.h, z9.h\n"
- "add x22, x22, %x[ld_in_row]\n"
+ "add x21, x21, %x[ld_in_row]\n"
".inst 0xc17015a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
- "ld1h { z0.h }, p2/Z, [x24, #2, MUL VL]\n"
+ "ld1h { z0.h }, p2/Z, [x23, #2, MUL VL]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
"mov x12, #0x4\n"
- "ld1b { z20.s }, p0/Z, [x22]\n"
+ "ld1b { z20.s }, p0/Z, [x21]\n"
".inst 0xc17015a9 // sdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
"add z20.h, p0/M, z20.h, z9.h\n"
- "add x22, x22, %x[ld_in_row]\n"
- "ld1h { z0.h }, p2/Z, [x23, #2, MUL VL]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "ld1h { z0.h }, p2/Z, [x22, #2, MUL VL]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
".inst 0xc17015aa // sdot za.s[x8, 2], { z13.h-z16.h }, z0.h\n"
"trn1 z11.h, z11.h, z21.h\n"
- "ld1b { z13.s }, p0/Z, [x22]\n"
+ "ld1b { z13.s }, p0/Z, [x21]\n"
"add z13.h, p0/M, z13.h, z9.h\n"
- "add x22, x22, %x[ld_in_row]\n"
+ "add x21, x21, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z19.s }, p0/Z, [x22]\n"
+ "ld1b { z19.s }, p0/Z, [x21]\n"
"add z19.h, p0/M, z19.h, z9.h\n"
- "add x22, x22, %x[ld_in_row]\n"
+ "add x21, x21, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z14.s }, p0/Z, [x22]\n"
+ "ld1b { z14.s }, p0/Z, [x21]\n"
"add z14.h, p0/M, z14.h, z9.h\n"
- "add x22, x22, %x[ld_in_row]\n"
+ "add x21, x21, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z18.s }, p0/Z, [x22]\n"
+ "ld1b { z18.s }, p0/Z, [x21]\n"
"mov x12, #0x8\n"
"add z18.h, p0/M, z18.h, z9.h\n"
- "add x22, x22, %x[ld_in_row]\n"
+ "add x21, x21, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z15.s }, p0/Z, [x22]\n"
+ "ld1b { z15.s }, p0/Z, [x21]\n"
"add z15.h, p0/M, z15.h, z9.h\n"
- "add x22, x22, %x[ld_in_row]\n"
+ "add x21, x21, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z17.s }, p0/Z, [x22]\n"
+ "ld1b { z17.s }, p0/Z, [x21]\n"
"add z17.h, p0/M, z17.h, z9.h\n"
"trn1 z12.h, z12.h, z20.h\n"
"trn1 z13.h, z13.h, z19.h\n"
"trn1 z14.h, z14.h, z18.h\n"
- ".inst 0xa1402aa2 // ld1h { z2.h, z10.h }, pn10.b/Z, [x21]\n"
- "add x22, x22, %x[ld_in_row]\n"
+ ".inst 0xa1402a82 // ld1h { z2.h, z10.h }, pn10.b/Z, [x20]\n"
+ "add x21, x21, %x[ld_in_row]\n"
".inst 0xc0060c04 // mova { z4.d-z7.d }, za.d[x8, #0]\n"
"add x8, x8, #0x1\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
"trn1 z15.h, z15.h, z17.h\n"
".inst 0xc1721568 // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
- "ld1b { z16.s }, p0/Z, [x22]\n"
+ "ld1b { z16.s }, p0/Z, [x21]\n"
".inst 0xc1a3ac04 // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z3.s\n"
"add z16.h, p0/M, z16.h, z9.h\n"
".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
- ".inst 0xa1402a82 // ld1h { z2.h, z10.h }, pn10.b/Z, [x20]\n"
- "add x17, x17, %x[ld_in_col]\n"
+ ".inst 0xa1402a62 // ld1h { z2.h, z10.h }, pn10.b/Z, [x19]\n"
+ "add x16, x16, %x[ld_in_col]\n"
".inst 0xc1a1aa24 // srshl { z4.s-z7.s }, { z4.s-z7.s }, z1.s\n"
".inst 0xc1721569 // sdot za.s[x8, 1], { z11.h-z14.h }, z2.h\n"
"mov z16.d, z16.d\n"
- "ld1h { z0.h }, p2/Z, [x21, #2, MUL VL]\n"
+ "ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
".inst 0xc17a1589 // sdot za.s[x8, 1], { z12.h-z15.h }, z10.h\n"
".inst 0xc1a8ab04 // add { z4.s-z7.s }, { z4.s-z7.s }, z8.s\n"
".inst 0xc17015a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
- "ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
+ "ld1h { z0.h }, p2/Z, [x19, #2, MUL VL]\n"
".inst 0xc1b7cf44 // sclamp { z4.s-z7.s }, z26.s, z23.s\n"
- "st1b { z4.s }, p1, [x15]\n"
- "add x15, x15, x13\n"
- "st1b { z5.s }, p1, [x14]\n"
+ "st1b { z4.s }, p1, [x14]\n"
"add x14, x14, x11\n"
+ "st1b { z5.s }, p1, [x13]\n"
+ "add x13, x13, x10\n"
".inst 0xc0040f84 // mova za.d[x8, #4], { z28.d-z31.d }\n"
".inst 0xa1402be2 // ld1h { z2.h, z10.h }, pn10.b/Z, [SP]\n"
- "st1b { z6.s }, p1, [x10]\n"
- "add x10, x10, x28\n"
+ "st1b { z6.s }, p1, [x9]\n"
+ "add x9, x9, x27\n"
".inst 0xc17015a9 // sdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
"ld1h { z0.h }, p2/Z, [SP, #2, MUL VL]\n"
- "st1b { z7.s }, p1, [x9]\n"
- "add x9, x9, x27\n"
+ "st1b { z7.s }, p1, [x28]\n"
+ "add x28, x28, x26\n"
"22:" // Main loop skip tail
- "cbz x7, 23f\n" // Skip remainder inputs
+ "cbz x17, 23f\n" // Skip remainder inputs
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z11.s }, p0/Z, [x17]\n"
+ "ld1b { z11.s }, p0/Z, [x16]\n"
"add z11.h, p0/M, z11.h, z9.h\n"
- "add x20, x17, %x[ld_in_row]\n"
+ "add x19, x16, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z21.s }, p0/Z, [x20]\n"
+ "ld1b { z21.s }, p0/Z, [x19]\n"
"add z21.h, p0/M, z21.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z12.s }, p0/Z, [x20]\n"
+ "ld1b { z12.s }, p0/Z, [x19]\n"
"add z12.h, p0/M, z12.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z20.s }, p0/Z, [x20]\n"
+ "ld1b { z20.s }, p0/Z, [x19]\n"
"add z20.h, p0/M, z20.h, z9.h\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"trn1 z11.h, z11.h, z21.h\n"
"trn1 z12.h, z12.h, z20.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z13.s }, p0/Z, [x20]\n"
+ "ld1b { z13.s }, p0/Z, [x19]\n"
"add z13.h, p0/M, z13.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z19.s }, p0/Z, [x20]\n"
+ "ld1b { z19.s }, p0/Z, [x19]\n"
"add z19.h, p0/M, z19.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z14.s }, p0/Z, [x20]\n"
+ "ld1b { z14.s }, p0/Z, [x19]\n"
"add z14.h, p0/M, z14.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z18.s }, p0/Z, [x20]\n"
+ "ld1b { z18.s }, p0/Z, [x19]\n"
"mov x12, #0x8\n"
"add z18.h, p0/M, z18.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z15.s }, p0/Z, [x20]\n"
+ "ld1b { z15.s }, p0/Z, [x19]\n"
"add z15.h, p0/M, z15.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z17.s }, p0/Z, [x20]\n"
+ "ld1b { z17.s }, p0/Z, [x19]\n"
"add z17.h, p0/M, z17.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
"trn1 z13.h, z13.h, z19.h\n"
"trn1 z14.h, z14.h, z18.h\n"
- "ld1b { z16.s }, p0/Z, [x20]\n"
+ "ld1b { z16.s }, p0/Z, [x19]\n"
"add z16.h, p0/M, z16.h, z9.h\n"
"trn1 z15.h, z15.h, z17.h\n"
- "addvl x21, SP, #6\n"
+ "addvl x20, SP, #6\n"
".inst 0xc1721568 // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
"mov z16.d, z16.d\n"
- "addvl x20, SP, #12\n"
- "sub x16, x16, #0x1\n"
+ "addvl x19, SP, #12\n"
+ "sub x15, x15, #0x1\n"
".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
- ".inst 0xa1402aa2 // ld1h { z2.h, z10.h }, pn10.b/Z, [x21]\n"
+ ".inst 0xa1402a82 // ld1h { z2.h, z10.h }, pn10.b/Z, [x20]\n"
".inst 0xc17015a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
- "ld1h { z0.h }, p2/Z, [x21, #2, MUL VL]\n"
+ "ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
".inst 0xc1721569 // sdot za.s[x8, 1], { z11.h-z14.h }, z2.h\n"
".inst 0xc0060c04 // mova { z4.d-z7.d }, za.d[x8, #0]\n"
".inst 0xc1a3ac04 // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z3.s\n"
".inst 0xc17a1589 // sdot za.s[x8, 1], { z12.h-z15.h }, z10.h\n"
- ".inst 0xa1402a82 // ld1h { z2.h, z10.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xa1402a62 // ld1h { z2.h, z10.h }, pn10.b/Z, [x19]\n"
".inst 0xc1a1aa24 // srshl { z4.s-z7.s }, { z4.s-z7.s }, z1.s\n"
".inst 0xc172156a // sdot za.s[x8, 2], { z11.h-z14.h }, z2.h\n"
".inst 0xc1a8ab04 // add { z4.s-z7.s }, { z4.s-z7.s }, z8.s\n"
".inst 0xc17a158a // sdot za.s[x8, 2], { z12.h-z15.h }, z10.h\n"
".inst 0xc1b7cf44 // sclamp { z4.s-z7.s }, z26.s, z23.s\n"
- "st1b { z4.s }, p1, [x15]\n"
- "add x15, x15, x13\n"
- ".inst 0xc17015a9 // sdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
- "ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
- "st1b { z5.s }, p1, [x14]\n"
+ "st1b { z4.s }, p1, [x14]\n"
"add x14, x14, x11\n"
+ ".inst 0xc17015a9 // sdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
+ "ld1h { z0.h }, p2/Z, [x19, #2, MUL VL]\n"
+ "st1b { z5.s }, p1, [x13]\n"
+ "add x13, x13, x10\n"
".inst 0xc17015aa // sdot za.s[x8, 2], { z13.h-z16.h }, z0.h\n"
"add x8, x8, #0x1\n"
- "st1b { z6.s }, p1, [x10]\n"
- "add x10, x10, x28\n"
- "st1b { z7.s }, p1, [x9]\n"
+ "st1b { z6.s }, p1, [x9]\n"
"add x9, x9, x27\n"
+ "st1b { z7.s }, p1, [x28]\n"
+ "add x28, x28, x26\n"
".inst 0xc0040f84 // mova za.d[x8, #4], { z28.d-z31.d }\n"
"23:" // Tail input: End
- "cbz x16, 25f\n"
+ "cbz x15, 25f\n"
"24:" // Right padding loop
".inst 0xc0060c04 // mova { z4.d-z7.d }, za.d[x8, #0]\n"
".inst 0xc1a3ac04 // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z3.s\n"
"add x8, x8, #0x1\n"
".inst 0xc1a1aa24 // srshl { z4.s-z7.s }, { z4.s-z7.s }, z1.s\n"
- "subs x16, x16, #0x1\n"
+ "subs x15, x15, #0x1\n"
".inst 0xc0040f84 // mova za.d[x8, #4], { z28.d-z31.d }\n"
".inst 0xc1a8ab04 // add { z4.s-z7.s }, { z4.s-z7.s }, z8.s\n"
".inst 0xc1b7cf44 // sclamp { z4.s-z7.s }, z26.s, z23.s\n"
- "st1b { z4.s }, p1, [x15]\n"
- "add x15, x15, x13\n"
- "st1b { z5.s }, p1, [x14]\n"
+ "st1b { z4.s }, p1, [x14]\n"
"add x14, x14, x11\n"
- "st1b { z6.s }, p1, [x10]\n"
- "add x10, x10, x28\n"
- "st1b { z7.s }, p1, [x9]\n"
+ "st1b { z5.s }, p1, [x13]\n"
+ "add x13, x13, x10\n"
+ "st1b { z6.s }, p1, [x9]\n"
"add x9, x9, x27\n"
+ "st1b { z7.s }, p1, [x28]\n"
+ "add x28, x28, x26\n"
"bgt 24b\n"
"25:" // End
- "ldr x22, [%x[args], %[offsetof_Args_weights]]\n"
- "incw x22, ALL, MUL #16\n"
- "incw x22, ALL, MUL #9\n"
- "str x22, [%x[args], %[offsetof_Args_weights]]\n"
- "ldr x20, [%x[args], %[offsetof_Args_ld_in_vl]]\n"
- "incw x6\n"
- "whilelt p1.s, x6, x5\n"
- "ldr x17, [%x[args], %[offsetof_Args_inptr]]\n"
- "add x17, x17, x20\n"
- "str x17, [%x[args], %[offsetof_Args_inptr]]\n"
- "ldr x25, [%x[args], %[offsetof_Args_outptrs]]\n"
- "ldr x24, [%x[args], %[offsetof_Args_ld_out_vls]]\n"
- "ldp x23, x22, [x25, #0x0]\n"
- "ldp x21, x20, [x24, #0x0]\n"
- "add x23, x23, x21\n"
+ "ldr x21, [%x[args], %[offsetof_Args_weights]]\n"
+ "incw x21, ALL, MUL #16\n"
+ "incw x21, ALL, MUL #9\n"
+ "str x21, [%x[args], %[offsetof_Args_weights]]\n"
+ "ldr x19, [%x[args], %[offsetof_Args_ld_in_vl]]\n"
+ "incw x7\n"
+ "whilelt p1.s, x7, x6\n"
+ "ldr x16, [%x[args], %[offsetof_Args_inptr]]\n"
+ "add x16, x16, x19\n"
+ "str x16, [%x[args], %[offsetof_Args_inptr]]\n"
+ "ldr x24, [%x[args], %[offsetof_Args_outptrs]]\n"
+ "ldr x23, [%x[args], %[offsetof_Args_ld_out_vls]]\n"
+ "ldp x22, x21, [x24, #0x0]\n"
+ "ldp x20, x19, [x23, #0x0]\n"
"add x22, x22, x20\n"
- "stp x23, x22, [x25, #0x0]\n"
- "ldp x23, x22, [x25, #0x10]\n"
- "ldp x21, x20, [x24, #0x10]\n"
- "add x23, x23, x21\n"
+ "add x21, x21, x19\n"
+ "stp x22, x21, [x24, #0x0]\n"
+ "ldp x22, x21, [x24, #0x10]\n"
+ "ldp x20, x19, [x23, #0x10]\n"
"add x22, x22, x20\n"
- "stp x23, x22, [x25, #0x10]\n"
+ "add x21, x21, x19\n"
+ "stp x22, x21, [x24, #0x10]\n"
"b.any 1b\n"
"addvl SP, SP, #15\n"
".inst 0xd503467f // SMSTOP\n"
:
: [args] "r" (&args), [ld_in_col] "r" (ld_in_col), [ld_in_row] "r" (ld_in_row), [offsetof_Args_current_channel] "I" (offsetof(Args, current_channel)), [offsetof_Args_inptr] "I" (offsetof(Args, inptr)), [offsetof_Args_input_cols] "I" (offsetof(Args, input_cols)), [offsetof_Args_ld_in_vl] "I" (offsetof(Args, ld_in_vl)), [offsetof_Args_ld_out_cols] "I" (offsetof(Args, ld_out_cols)), [offsetof_Args_ld_out_vls] "I" (offsetof(Args, ld_out_vls)), [offsetof_Args_n_channels] "I" (offsetof(Args, n_channels)), [offsetof_Args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_Args_output_cols] "I" (offsetof(Args, output_cols)), [offsetof_Args_pad_bottom] "I" (offsetof(Args, pad_bottom)), [offsetof_Args_pad_left] "I" (offsetof(Args, pad_left)), [offsetof_Args_pad_top] "I" (offsetof(Args, pad_top)), [offsetof_Args_weights] "I" (offsetof(Args, weights)), [offsetof_Requantize32_a_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_bias] "I" (offsetof(arm_gemm::Requantize32, bias)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [offsetof_Requantize32_per_channel_muls] "I" (offsetof(arm_gemm::Requantize32, per_channel_muls)), [offsetof_Requantize32_per_channel_right_shifts] "I" (offsetof(arm_gemm::Requantize32, per_channel_right_shifts)), [offsetof_Requantize32_per_layer_mul] "I" (offsetof(arm_gemm::Requantize32, per_layer_mul)), [offsetof_Requantize32_per_layer_right_shift] "I" (offsetof(arm_gemm::Requantize32, per_layer_right_shift)), [qp] "r" (&qp)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8s8u8q_planar_3x3_s1_4rows_dot_za/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8s8u8q_planar_3x3_s1_4rows_dot_za/generic.cpp
index 1636225b31..ad765ba659 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8s8u8q_planar_3x3_s1_4rows_dot_za/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8s8u8q_planar_3x3_s1_4rows_dot_za/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -69,18 +69,18 @@ void sme2_u8s8u8q_planar_3x3_s1_4rows_dot_za_impl(
__asm__ __volatile__(
".inst 0xd503477f // SMSTART ZA\n"
- "ldr x6, [%x[args], %[offsetof_Args_pad_bottom]]\n"
+ "ldr x7, [%x[args], %[offsetof_Args_pad_bottom]]\n"
"ptrue p2.b\n"
- "mov x20, #0x6\n"
- "ldr x7, [%x[args], %[offsetof_Args_pad_top]]\n"
+ "mov x19, #0x6\n"
+ "ldr x8, [%x[args], %[offsetof_Args_pad_top]]\n"
"ld1rh { z24.h }, p2/Z, [%x[qp], %[offsetof_Requantize32_a_offset]]\n"
- "sub x20, x20, x6\n"
+ "sub x19, x19, x7\n"
".inst 0x25207812 // ptrue pn10.b\n"
"ldr x17, [%x[args], %[offsetof_Args_n_channels]]\n"
"whilelt p1.s, XZR, x17\n"
- "whilelt p9.s, XZR, x20\n"
+ "whilelt p9.s, XZR, x19\n"
"ld1rw { z12.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_c_offset]]\n"
- "whilelt p8.s, XZR, x7\n"
+ "whilelt p8.s, XZR, x8\n"
"addvl SP, SP, #-12\n"
"ldr x16, [%x[args], %[offsetof_Args_current_channel]]\n"
"neg z24.h, p2/M, z24.h\n"
@@ -90,377 +90,377 @@ void sme2_u8s8u8q_planar_3x3_s1_4rows_dot_za_impl(
"ld1rw { z22.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_minval]]\n"
"ld1rw { z26.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_maxval]]\n"
"1:" // Channel loop
- "ldr x20, [%x[qp], %[offsetof_Requantize32_bias]]\n"
+ "ldr x19, [%x[qp], %[offsetof_Requantize32_bias]]\n"
"mov z8.s, #0x0\n"
- "cbz x20, 2f\n"
- "ld1w { z8.s }, p1/Z, [x20, x16, LSL #2]\n"
+ "cbz x19, 2f\n"
+ "ld1w { z8.s }, p1/Z, [x19, x16, LSL #2]\n"
"2:" // Load bias: Done
- "ldr x22, [%x[args], %[offsetof_Args_weights]]\n"
- "mov x20, x22\n"
- "ld1sb { z27.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #3\n"
+ "ldr x21, [%x[args], %[offsetof_Args_weights]]\n"
+ "mov x19, x21\n"
+ "ld1sb { z27.s }, p2/Z, [x19]\n"
+ "incw x19, ALL, MUL #3\n"
"ld1rh { z21.h }, p2/Z, [%x[qp], %[offsetof_Requantize32_b_offset]]\n"
"mov z20.h, #0x0\n"
"sub z27.h, z27.h, z21.h\n"
- "incw x22\n"
- "ld1sb { z23.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #3\n"
+ "incw x21\n"
+ "ld1sb { z23.s }, p2/Z, [x19]\n"
+ "incw x19, ALL, MUL #3\n"
"sub z23.h, z23.h, z21.h\n"
"trn1 z0.h, z20.h, z27.h\n"
- "ld1sb { z16.s }, p2/Z, [x20]\n"
+ "ld1sb { z16.s }, p2/Z, [x19]\n"
"sub z16.h, z16.h, z21.h\n"
- "mov x20, x22\n"
+ "mov x19, x21\n"
"trn1 z1.h, z27.h, z23.h\n"
- "ld1sb { z27.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #3\n"
+ "ld1sb { z27.s }, p2/Z, [x19]\n"
+ "incw x19, ALL, MUL #3\n"
"trn1 z2.h, z23.h, z16.h\n"
"trn1 z3.h, z16.h, z20.h\n"
- "ld1sb { z23.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #3\n"
+ "ld1sb { z23.s }, p2/Z, [x19]\n"
+ "incw x19, ALL, MUL #3\n"
"sub z27.h, z27.h, z21.h\n"
"sub z23.h, z23.h, z21.h\n"
- "ld1sb { z16.s }, p2/Z, [x20]\n"
+ "ld1sb { z16.s }, p2/Z, [x19]\n"
"sub z16.h, z16.h, z21.h\n"
- "addvl x21, SP, #12\n"
- "incw x22\n"
- "addvl x21, x21, #-4\n"
- "mov x20, x22\n"
- "st1h { z0.h }, p2, [x21]\n"
+ "addvl x20, SP, #12\n"
+ "incw x21\n"
+ "addvl x20, x20, #-4\n"
+ "mov x19, x21\n"
+ "st1h { z0.h }, p2, [x20]\n"
"trn1 z0.h, z20.h, z27.h\n"
- "st1h { z1.h }, p2, [x21, #1, MUL VL]\n"
+ "st1h { z1.h }, p2, [x20, #1, MUL VL]\n"
"trn1 z1.h, z27.h, z23.h\n"
- "ld1sb { z27.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #3\n"
- "st1h { z2.h }, p2, [x21, #2, MUL VL]\n"
+ "ld1sb { z27.s }, p2/Z, [x19]\n"
+ "incw x19, ALL, MUL #3\n"
+ "st1h { z2.h }, p2, [x20, #2, MUL VL]\n"
"trn1 z2.h, z23.h, z16.h\n"
- "ld1sb { z23.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #3\n"
- "st1h { z3.h }, p2, [x21, #3, MUL VL]\n"
+ "ld1sb { z23.s }, p2/Z, [x19]\n"
+ "incw x19, ALL, MUL #3\n"
+ "st1h { z3.h }, p2, [x20, #3, MUL VL]\n"
"trn1 z3.h, z16.h, z20.h\n"
- "ld1sb { z16.s }, p2/Z, [x20]\n"
- "ldr x20, [%x[qp], %[offsetof_Requantize32_per_channel_muls]]\n"
+ "ld1sb { z16.s }, p2/Z, [x19]\n"
+ "ldr x19, [%x[qp], %[offsetof_Requantize32_per_channel_muls]]\n"
"sub z27.h, z27.h, z21.h\n"
"sub z23.h, z23.h, z21.h\n"
- "addvl x21, x21, #-4\n"
- "st1h { z0.h }, p2, [x21]\n"
+ "addvl x20, x20, #-4\n"
+ "st1h { z0.h }, p2, [x20]\n"
"sub z16.h, z16.h, z21.h\n"
- "st1h { z1.h }, p2, [x21, #1, MUL VL]\n"
+ "st1h { z1.h }, p2, [x20, #1, MUL VL]\n"
"mov z9.d, z8.d\n"
- "st1h { z2.h }, p2, [x21, #2, MUL VL]\n"
+ "st1h { z2.h }, p2, [x20, #2, MUL VL]\n"
"trn1 z0.h, z20.h, z27.h\n"
"trn1 z1.h, z27.h, z23.h\n"
- "st1h { z3.h }, p2, [x21, #3, MUL VL]\n"
- "addvl x21, x21, #-4\n"
+ "st1h { z3.h }, p2, [x20, #3, MUL VL]\n"
+ "addvl x20, x20, #-4\n"
"trn1 z2.h, z23.h, z16.h\n"
"trn1 z3.h, z16.h, z20.h\n"
- "st1h { z0.h }, p2, [x21]\n"
- "st1h { z1.h }, p2, [x21, #1, MUL VL]\n"
- "st1h { z2.h }, p2, [x21, #2, MUL VL]\n"
- "st1h { z3.h }, p2, [x21, #3, MUL VL]\n"
- "cbz x20, 3f\n"
- "ld1w { z10.s }, p1/Z, [x20, x16, LSL #2]\n"
+ "st1h { z0.h }, p2, [x20]\n"
+ "st1h { z1.h }, p2, [x20, #1, MUL VL]\n"
+ "st1h { z2.h }, p2, [x20, #2, MUL VL]\n"
+ "st1h { z3.h }, p2, [x20, #3, MUL VL]\n"
+ "cbz x19, 3f\n"
+ "ld1w { z10.s }, p1/Z, [x19, x16, LSL #2]\n"
"3:" // Load mul: End
- "ldr x20, [%x[qp], %[offsetof_Requantize32_per_channel_right_shifts]]\n"
- "cbz x20, 4f\n"
- "ld1w { z11.s }, p1/Z, [x20, x16, LSL #2]\n"
+ "ldr x19, [%x[qp], %[offsetof_Requantize32_per_channel_right_shifts]]\n"
+ "cbz x19, 4f\n"
+ "ld1w { z11.s }, p1/Z, [x19, x16, LSL #2]\n"
"4:" // Load right_shift: End
"ldr x15, [%x[args], %[offsetof_Args_input_cols]]\n"
- "sub x20, x15, #0x1\n"
- "orr x23, x20, %x[ld_in_col], LSL #16\n"
+ "sub x19, x15, #0x1\n"
+ "orr x22, x19, %x[ld_in_col], LSL #16\n"
"ldr x14, [%x[args], %[offsetof_Args_inptr]]\n"
- "orr x23, x17, x23, LSL #22\n"
- "mov x22, #0x6\n"
- "add x21, x7, x6\n"
- "lsl x20, %x[ld_in_row], #0x0\n"
+ "orr x22, x17, x22, LSL #22\n"
+ "mov x21, #0x6\n"
+ "add x20, x8, x7\n"
+ "lsl x19, %x[ld_in_row], #0x0\n"
"ldr x13, [%x[args], %[offsetof_Args_output_cols]]\n"
- "mov x8, #0x0\n"
- "lsl x23, x23, #0x0\n"
- "sub x22, x22, x21\n"
- "madd x20, x20, x7, x14\n"
+ "mov x11, #0x0\n"
+ "lsl x22, x22, #0x0\n"
+ "sub x21, x21, x20\n"
+ "madd x19, x19, x8, x14\n"
"5:" // Issue prefetches
- "subs x22, x22, #0x1\n"
- ".inst 0xf8b74a9c // rprfm pldstrm, x23, [x20]\n"
- "add x20, x20, %x[ld_in_col]\n"
+ "subs x21, x21, #0x1\n"
+ ".inst 0xf8b64a7c // rprfm pldstrm, x22, [x19]\n"
+ "add x19, x19, %x[ld_in_col]\n"
"bgt 5b\n"
- "ldr x25, [%x[args], %[offsetof_Args_outptrs]]\n"
- "lsl x20, %x[ld_in_row], #0x0\n"
- "msub x14, x7, x20, x14\n"
- ".inst 0xc0040900 // mova za.d[x8, #0], { z8.d-z9.d }\n"
- "ldr x20, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
- ".inst 0xc0040901 // mova za.d[x8, #1], { z8.d-z9.d }\n"
- "mov x22, #0x2\n"
- "ldp x11, x10, [x25], #0x10\n"
- ".inst 0xc0040902 // mova za.d[x8, #2], { z8.d-z9.d }\n"
- "ldp x9, x28, [x20], #0x10\n"
- ".inst 0xc0040903 // mova za.d[x8, #3], { z8.d-z9.d }\n"
- "ldr x21, [%x[args], %[offsetof_Args_pad_left]]\n"
- ".inst 0xc0040904 // mova za.d[x8, #4], { z8.d-z9.d }\n"
- "ldp x27, x26, [x25], #0x10\n"
- ".inst 0xc0040905 // mova za.d[x8, #5], { z8.d-z9.d }\n"
- "ldp x25, x24, [x20], #0x10\n"
- "cbz x21, 7f\n"
- "cmp x21, x22\n"
- "csel x20, x21, x22, LT\n"
- "sub x21, x21, x20\n"
- "sub x22, x22, x20\n"
- "cbz x21, 7f\n"
- ".inst 0xc0060804 // mova { z4.d-z5.d }, za.d[x8, #0]\n"
- "sub x13, x13, x21\n"
- ".inst 0xc0060826 // mova { z6.d-z7.d }, za.d[x8, #1]\n"
+ "ldr x24, [%x[args], %[offsetof_Args_outptrs]]\n"
+ "lsl x19, %x[ld_in_row], #0x0\n"
+ "msub x14, x8, x19, x14\n"
+ ".inst 0xc0046900 // mova za.d[x11, #0], { z8.d-z9.d }\n"
+ "ldr x19, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
+ ".inst 0xc0046901 // mova za.d[x11, #1], { z8.d-z9.d }\n"
+ "mov x21, #0x2\n"
+ "ldp x10, x9, [x24], #0x10\n"
+ ".inst 0xc0046902 // mova za.d[x11, #2], { z8.d-z9.d }\n"
+ "ldp x28, x27, [x19], #0x10\n"
+ ".inst 0xc0046903 // mova za.d[x11, #3], { z8.d-z9.d }\n"
+ "ldr x20, [%x[args], %[offsetof_Args_pad_left]]\n"
+ ".inst 0xc0046904 // mova za.d[x11, #4], { z8.d-z9.d }\n"
+ "ldp x26, x25, [x24], #0x10\n"
+ ".inst 0xc0046905 // mova za.d[x11, #5], { z8.d-z9.d }\n"
+ "ldp x24, x23, [x19], #0x10\n"
+ "cbz x20, 7f\n"
+ "cmp x20, x21\n"
+ "csel x19, x20, x21, LT\n"
+ "sub x20, x20, x19\n"
+ "sub x21, x21, x19\n"
+ "cbz x20, 7f\n"
+ ".inst 0xc0066804 // mova { z4.d-z5.d }, za.d[x11, #0]\n"
+ "sub x13, x13, x20\n"
+ ".inst 0xc0066826 // mova { z6.d-z7.d }, za.d[x11, #1]\n"
".inst 0xc1aaac04 // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z10.s\n"
".inst 0xc1abaa24 // srshl { z4.s-z7.s }, { z4.s-z7.s }, z11.s\n"
".inst 0xc1acab04 // add { z4.s-z7.s }, { z4.s-z7.s }, z12.s\n"
".inst 0xc1bacec4 // sclamp { z4.s-z7.s }, z22.s, z26.s\n"
"6:" // Left padding
- "subs x21, x21, #0x1\n"
- "st1b { z4.s }, p1, [x11]\n"
- "add x11, x11, x9\n"
- "st1b { z6.s }, p1, [x10]\n"
+ "subs x20, x20, #0x1\n"
+ "st1b { z4.s }, p1, [x10]\n"
"add x10, x10, x28\n"
- "st1b { z5.s }, p1, [x27]\n"
- "add x27, x27, x25\n"
- "st1b { z7.s }, p1, [x26]\n"
+ "st1b { z6.s }, p1, [x9]\n"
+ "add x9, x9, x27\n"
+ "st1b { z5.s }, p1, [x26]\n"
"add x26, x26, x24\n"
+ "st1b { z7.s }, p1, [x25]\n"
+ "add x25, x25, x23\n"
"bgt 6b\n"
"7:" // Left padding: End
- "adds XZR, x7, x6\n"
+ "adds XZR, x8, x7\n"
"bne 12f\n"
- "cbz x22, 10f\n"
- "cmp x22, #0x1\n"
- "sub x15, x15, x22\n"
+ "cbz x21, 10f\n"
+ "cmp x21, #0x1\n"
+ "sub x15, x15, x21\n"
"beq 9f\n"
"8:" // Unpadded: 2 priming loads
- "add x21, x14, %x[ld_in_row]\n"
+ "add x20, x14, %x[ld_in_row]\n"
"ld1b { z17.s }, p1/Z, [x14]\n"
- "addvl x20, SP, #8\n"
- "ld1b { z16.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "addvl x19, SP, #8\n"
+ "ld1b { z16.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
"trn1 z13.h, z17.h, z16.h\n"
"add z13.h, z13.h, z24.h\n"
- "ld1b { z17.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "ld1b { z17.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
"add x14, x14, %x[ld_in_col]\n"
- "ld1b { z16.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "ld1b { z16.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
"trn1 z14.h, z17.h, z16.h\n"
"add z14.h, z14.h, z24.h\n"
- "ld1b { z17.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
- "ld1b { z16.s }, p1/Z, [x21]\n"
+ "ld1b { z17.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z16.s }, p1/Z, [x20]\n"
"trn1 z15.h, z17.h, z16.h\n"
"add z15.h, z15.h, z24.h\n"
- ".inst 0xa0402a80 // ld1h { z0.h-z1.h }, pn10.b/Z, [x20]\n"
- ".inst 0xc16115a8 // sdot za.s[x8, 0], { z13.h-z14.h }, z1.h\n"
- ".inst 0xc16015a9 // sdot za.s[x8, 1], { z13.h-z14.h }, z0.h\n"
- ".inst 0xa0412a82 // ld1h { z2.h-z3.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
- ".inst 0xc16315c8 // sdot za.s[x8, 0], { z14.h-z15.h }, z3.h\n"
- ".inst 0xc16215c9 // sdot za.s[x8, 1], { z14.h-z15.h }, z2.h\n"
+ ".inst 0xa0402a60 // ld1h { z0.h-z1.h }, pn10.b/Z, [x19]\n"
+ ".inst 0xc16175a8 // sdot za.s[x11, 0], { z13.h-z14.h }, z1.h\n"
+ ".inst 0xc16075a9 // sdot za.s[x11, 1], { z13.h-z14.h }, z0.h\n"
+ ".inst 0xa0412a62 // ld1h { z2.h-z3.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
+ ".inst 0xc16375c8 // sdot za.s[x11, 0], { z14.h-z15.h }, z3.h\n"
+ ".inst 0xc16275c9 // sdot za.s[x11, 1], { z14.h-z15.h }, z2.h\n"
"9:" // Unpadded: 1 priming loads
- "add x22, x14, %x[ld_in_row]\n"
+ "add x21, x14, %x[ld_in_row]\n"
"ld1b { z17.s }, p1/Z, [x14]\n"
- "addvl x21, SP, #4\n"
- "ld1b { z16.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row]\n"
+ "addvl x20, SP, #4\n"
+ "ld1b { z16.s }, p1/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
"trn1 z13.h, z17.h, z16.h\n"
"add z13.h, z13.h, z24.h\n"
- "ld1b { z17.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row]\n"
- "addvl x20, SP, #8\n"
- "ld1b { z16.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row]\n"
+ "ld1b { z17.s }, p1/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "addvl x19, SP, #8\n"
+ "ld1b { z16.s }, p1/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
"trn1 z14.h, z17.h, z16.h\n"
"add z14.h, z14.h, z24.h\n"
- "ld1b { z17.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row]\n"
+ "ld1b { z17.s }, p1/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
"add x14, x14, %x[ld_in_col]\n"
- "ld1b { z16.s }, p1/Z, [x22]\n"
+ "ld1b { z16.s }, p1/Z, [x21]\n"
"trn1 z15.h, z17.h, z16.h\n"
"add z15.h, z15.h, z24.h\n"
- ".inst 0xa0402aa0 // ld1h { z0.h-z1.h }, pn10.b/Z, [x21]\n"
- ".inst 0xc16115a8 // sdot za.s[x8, 0], { z13.h-z14.h }, z1.h\n"
- ".inst 0xc16015a9 // sdot za.s[x8, 1], { z13.h-z14.h }, z0.h\n"
".inst 0xa0402a80 // ld1h { z0.h-z1.h }, pn10.b/Z, [x20]\n"
- ".inst 0xa0412aa2 // ld1h { z2.h-z3.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
- ".inst 0xc16115aa // sdot za.s[x8, 2], { z13.h-z14.h }, z1.h\n"
- ".inst 0xc16015ab // sdot za.s[x8, 3], { z13.h-z14.h }, z0.h\n"
- ".inst 0xc16315c8 // sdot za.s[x8, 0], { z14.h-z15.h }, z3.h\n"
- ".inst 0xc16215c9 // sdot za.s[x8, 1], { z14.h-z15.h }, z2.h\n"
+ ".inst 0xc16175a8 // sdot za.s[x11, 0], { z13.h-z14.h }, z1.h\n"
+ ".inst 0xc16075a9 // sdot za.s[x11, 1], { z13.h-z14.h }, z0.h\n"
+ ".inst 0xa0402a60 // ld1h { z0.h-z1.h }, pn10.b/Z, [x19]\n"
".inst 0xa0412a82 // ld1h { z2.h-z3.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
- ".inst 0xc16315ca // sdot za.s[x8, 2], { z14.h-z15.h }, z3.h\n"
- ".inst 0xc16215cb // sdot za.s[x8, 3], { z14.h-z15.h }, z2.h\n"
+ ".inst 0xc16175aa // sdot za.s[x11, 2], { z13.h-z14.h }, z1.h\n"
+ ".inst 0xc16075ab // sdot za.s[x11, 3], { z13.h-z14.h }, z0.h\n"
+ ".inst 0xc16375c8 // sdot za.s[x11, 0], { z14.h-z15.h }, z3.h\n"
+ ".inst 0xc16275c9 // sdot za.s[x11, 1], { z14.h-z15.h }, z2.h\n"
+ ".inst 0xa0412a62 // ld1h { z2.h-z3.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
+ ".inst 0xc16375ca // sdot za.s[x11, 2], { z14.h-z15.h }, z3.h\n"
+ ".inst 0xc16275cb // sdot za.s[x11, 3], { z14.h-z15.h }, z2.h\n"
"10:" // Unpadded: 0 priming loads
".inst 0xa0402be0 // ld1h { z0.h-z1.h }, pn10.b/Z, [SP]\n"
".inst 0xa0412be2 // ld1h { z2.h-z3.h }, pn10.b/Z, [SP, #0x2, MUL VL]\n"
"cbz x15, 18f\n"
- "add x20, x14, %x[ld_in_row]\n"
+ "add x19, x14, %x[ld_in_row]\n"
"ld1b { z17.s }, p1/Z, [x14]\n"
"sub x15, x15, #0x1\n"
- "ld1b { z16.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z16.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"trn1 z13.h, z17.h, z16.h\n"
"sub x13, x13, #0x1\n"
- "ld1b { z17.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z17.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"cmp x15, x13\n"
"add z13.h, z13.h, z24.h\n"
- "ld1b { z16.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z16.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"trn1 z14.h, z17.h, z16.h\n"
- "csel x23, x15, x13, LT\n"
- "ld1b { z17.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "csel x22, x15, x13, LT\n"
+ "ld1b { z17.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"add z14.h, z14.h, z24.h\n"
"add x14, x14, %x[ld_in_col]\n"
- "ld1b { z16.s }, p1/Z, [x20]\n"
+ "ld1b { z16.s }, p1/Z, [x19]\n"
"trn1 z15.h, z17.h, z16.h\n"
"add z15.h, z15.h, z24.h\n"
- "sub x13, x13, x23\n"
- "cbz x23, 17f\n"
+ "sub x13, x13, x22\n"
+ "cbz x22, 17f\n"
"11:" // Unpadded: Main loop
- ".inst 0xc16115a8 // sdot za.s[x8, 0], { z13.h-z14.h }, z1.h\n"
- "addvl x22, SP, #4\n"
- "addvl x21, SP, #8\n"
+ ".inst 0xc16175a8 // sdot za.s[x11, 0], { z13.h-z14.h }, z1.h\n"
+ "addvl x21, SP, #4\n"
+ "addvl x20, SP, #8\n"
"ld1b { z21.s }, p1/Z, [x14]\n"
- ".inst 0xc16015a9 // sdot za.s[x8, 1], { z13.h-z14.h }, z0.h\n"
- ".inst 0xa0402ac0 // ld1h { z0.h-z1.h }, pn10.b/Z, [x22]\n"
- "add x20, x14, %x[ld_in_row]\n"
- "subs x23, x23, #0x1\n"
- ".inst 0xc16315c8 // sdot za.s[x8, 0], { z14.h-z15.h }, z3.h\n"
- "ld1b { z20.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
+ ".inst 0xc16075a9 // sdot za.s[x11, 1], { z13.h-z14.h }, z0.h\n"
+ ".inst 0xa0402aa0 // ld1h { z0.h-z1.h }, pn10.b/Z, [x21]\n"
+ "add x19, x14, %x[ld_in_row]\n"
+ "subs x22, x22, #0x1\n"
+ ".inst 0xc16375c8 // sdot za.s[x11, 0], { z14.h-z15.h }, z3.h\n"
+ "ld1b { z20.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"add x14, x14, %x[ld_in_col]\n"
- ".inst 0xc16215c9 // sdot za.s[x8, 1], { z14.h-z15.h }, z2.h\n"
- ".inst 0xa0412ac2 // ld1h { z2.h-z3.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
- ".inst 0xc0060804 // mova { z4.d-z5.d }, za.d[x8, #0]\n"
- "ld1b { z19.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
- ".inst 0xc0060826 // mova { z6.d-z7.d }, za.d[x8, #1]\n"
+ ".inst 0xc16275c9 // sdot za.s[x11, 1], { z14.h-z15.h }, z2.h\n"
+ ".inst 0xa0412aa2 // ld1h { z2.h-z3.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
+ ".inst 0xc0066804 // mova { z4.d-z5.d }, za.d[x11, #0]\n"
+ "ld1b { z19.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ ".inst 0xc0066826 // mova { z6.d-z7.d }, za.d[x11, #1]\n"
".inst 0xc1aaac04 // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z10.s\n"
- ".inst 0xc16115aa // sdot za.s[x8, 2], { z13.h-z14.h }, z1.h\n"
- "ld1b { z18.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
- ".inst 0xc16015ab // sdot za.s[x8, 3], { z13.h-z14.h }, z0.h\n"
- ".inst 0xa0402aa0 // ld1h { z0.h-z1.h }, pn10.b/Z, [x21]\n"
+ ".inst 0xc16175aa // sdot za.s[x11, 2], { z13.h-z14.h }, z1.h\n"
+ "ld1b { z18.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ ".inst 0xc16075ab // sdot za.s[x11, 3], { z13.h-z14.h }, z0.h\n"
+ ".inst 0xa0402a80 // ld1h { z0.h-z1.h }, pn10.b/Z, [x20]\n"
".inst 0xc1abaa24 // srshl { z4.s-z7.s }, { z4.s-z7.s }, z11.s\n"
- ".inst 0xc16115ac // sdot za.s[x8, 4], { z13.h-z14.h }, z1.h\n"
- "ld1b { z17.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
- ".inst 0xc16015ad // sdot za.s[x8, 5], { z13.h-z14.h }, z0.h\n"
- "ld1b { z16.s }, p1/Z, [x20]\n"
+ ".inst 0xc16175ac // sdot za.s[x11, 4], { z13.h-z14.h }, z1.h\n"
+ "ld1b { z17.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ ".inst 0xc16075ad // sdot za.s[x11, 5], { z13.h-z14.h }, z0.h\n"
+ "ld1b { z16.s }, p1/Z, [x19]\n"
".inst 0xc1acab04 // add { z4.s-z7.s }, { z4.s-z7.s }, z12.s\n"
- ".inst 0xc16315ca // sdot za.s[x8, 2], { z14.h-z15.h }, z3.h\n"
+ ".inst 0xc16375ca // sdot za.s[x11, 2], { z14.h-z15.h }, z3.h\n"
"trn1 z13.h, z21.h, z20.h\n"
".inst 0xa0402be0 // ld1h { z0.h-z1.h }, pn10.b/Z, [SP]\n"
- ".inst 0xc16215cb // sdot za.s[x8, 3], { z14.h-z15.h }, z2.h\n"
- ".inst 0xa0412aa2 // ld1h { z2.h-z3.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
+ ".inst 0xc16275cb // sdot za.s[x11, 3], { z14.h-z15.h }, z2.h\n"
+ ".inst 0xa0412a82 // ld1h { z2.h-z3.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
".inst 0xc1bacec4 // sclamp { z4.s-z7.s }, z22.s, z26.s\n"
- ".inst 0xc16315cc // sdot za.s[x8, 4], { z14.h-z15.h }, z3.h\n"
- "st1b { z4.s }, p1, [x11]\n"
- "add x11, x11, x9\n"
+ ".inst 0xc16375cc // sdot za.s[x11, 4], { z14.h-z15.h }, z3.h\n"
+ "st1b { z4.s }, p1, [x10]\n"
+ "add x10, x10, x28\n"
"add z13.h, z13.h, z24.h\n"
- ".inst 0xc16215cd // sdot za.s[x8, 5], { z14.h-z15.h }, z2.h\n"
+ ".inst 0xc16275cd // sdot za.s[x11, 5], { z14.h-z15.h }, z2.h\n"
"trn1 z14.h, z19.h, z18.h\n"
"trn1 z15.h, z17.h, z16.h\n"
- "add x8, x8, #0x2\n"
+ "add x11, x11, #0x2\n"
".inst 0xa0412be2 // ld1h { z2.h-z3.h }, pn10.b/Z, [SP, #0x2, MUL VL]\n"
- "st1b { z6.s }, p1, [x10]\n"
- "add x10, x10, x28\n"
- ".inst 0xc0040904 // mova za.d[x8, #4], { z8.d-z9.d }\n"
- "st1b { z5.s }, p1, [x27]\n"
- "add x27, x27, x25\n"
- ".inst 0xc0040905 // mova za.d[x8, #5], { z8.d-z9.d }\n"
- "add z14.h, z14.h, z24.h\n"
- "st1b { z7.s }, p1, [x26]\n"
+ "st1b { z6.s }, p1, [x9]\n"
+ "add x9, x9, x27\n"
+ ".inst 0xc0046904 // mova za.d[x11, #4], { z8.d-z9.d }\n"
+ "st1b { z5.s }, p1, [x26]\n"
"add x26, x26, x24\n"
+ ".inst 0xc0046905 // mova za.d[x11, #5], { z8.d-z9.d }\n"
+ "add z14.h, z14.h, z24.h\n"
+ "st1b { z7.s }, p1, [x25]\n"
+ "add x25, x25, x23\n"
"add z15.h, z15.h, z24.h\n"
"bgt 11b\n"
"b 17f\n"
"12:" // Padded
- "cbz x22, 15f\n"
- "cmp x22, #0x1\n"
- "sub x15, x15, x22\n"
+ "cbz x21, 15f\n"
+ "cmp x21, #0x1\n"
+ "sub x15, x15, x21\n"
"beq 14f\n"
"13:" // Padded: 2 priming loads
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
"ld1b { z19.s }, p0/Z, [x14]\n"
"add z19.h, p0/M, z19.h, z24.h\n"
- "add x20, x14, %x[ld_in_row]\n"
+ "add x19, x14, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z18.s }, p0/Z, [x20]\n"
+ "ld1b { z18.s }, p0/Z, [x19]\n"
"add z18.h, p0/M, z18.h, z24.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z17.s }, p0/Z, [x20]\n"
+ "ld1b { z17.s }, p0/Z, [x19]\n"
"add z17.h, p0/M, z17.h, z24.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z16.s }, p0/Z, [x20]\n"
+ "ld1b { z16.s }, p0/Z, [x19]\n"
"add z16.h, p0/M, z16.h, z24.h\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"trn1 z13.h, z19.h, z18.h\n"
"trn1 z14.h, z17.h, z16.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z17.s }, p0/Z, [x20]\n"
+ "ld1b { z17.s }, p0/Z, [x19]\n"
"add z17.h, p0/M, z17.h, z24.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z16.s }, p0/Z, [x20]\n"
- "addvl x20, SP, #8\n"
+ "ld1b { z16.s }, p0/Z, [x19]\n"
+ "addvl x19, SP, #8\n"
"add z16.h, p0/M, z16.h, z24.h\n"
- ".inst 0xa0402a80 // ld1h { z0.h-z1.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xa0402a60 // ld1h { z0.h-z1.h }, pn10.b/Z, [x19]\n"
"trn1 z15.h, z17.h, z16.h\n"
- ".inst 0xc16115a8 // sdot za.s[x8, 0], { z13.h-z14.h }, z1.h\n"
+ ".inst 0xc16175a8 // sdot za.s[x11, 0], { z13.h-z14.h }, z1.h\n"
"add x14, x14, %x[ld_in_col]\n"
- ".inst 0xc16015a9 // sdot za.s[x8, 1], { z13.h-z14.h }, z0.h\n"
- ".inst 0xa0412a82 // ld1h { z2.h-z3.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
- ".inst 0xc16315c8 // sdot za.s[x8, 0], { z14.h-z15.h }, z3.h\n"
- ".inst 0xc16215c9 // sdot za.s[x8, 1], { z14.h-z15.h }, z2.h\n"
+ ".inst 0xc16075a9 // sdot za.s[x11, 1], { z13.h-z14.h }, z0.h\n"
+ ".inst 0xa0412a62 // ld1h { z2.h-z3.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
+ ".inst 0xc16375c8 // sdot za.s[x11, 0], { z14.h-z15.h }, z3.h\n"
+ ".inst 0xc16275c9 // sdot za.s[x11, 1], { z14.h-z15.h }, z2.h\n"
"14:" // Padded: 1 priming loads
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
"ld1b { z19.s }, p0/Z, [x14]\n"
"add z19.h, p0/M, z19.h, z24.h\n"
- "add x20, x14, %x[ld_in_row]\n"
+ "add x19, x14, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z18.s }, p0/Z, [x20]\n"
+ "ld1b { z18.s }, p0/Z, [x19]\n"
"add z18.h, p0/M, z18.h, z24.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z17.s }, p0/Z, [x20]\n"
+ "ld1b { z17.s }, p0/Z, [x19]\n"
"add z17.h, p0/M, z17.h, z24.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z16.s }, p0/Z, [x20]\n"
+ "ld1b { z16.s }, p0/Z, [x19]\n"
"add z16.h, p0/M, z16.h, z24.h\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"trn1 z13.h, z19.h, z18.h\n"
"trn1 z14.h, z17.h, z16.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z17.s }, p0/Z, [x20]\n"
+ "ld1b { z17.s }, p0/Z, [x19]\n"
"add z17.h, p0/M, z17.h, z24.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z16.s }, p0/Z, [x20]\n"
- "addvl x21, SP, #4\n"
+ "ld1b { z16.s }, p0/Z, [x19]\n"
+ "addvl x20, SP, #4\n"
"add z16.h, p0/M, z16.h, z24.h\n"
- ".inst 0xa0402aa0 // ld1h { z0.h-z1.h }, pn10.b/Z, [x21]\n"
- "addvl x20, SP, #8\n"
- "trn1 z15.h, z17.h, z16.h\n"
- ".inst 0xc16115a8 // sdot za.s[x8, 0], { z13.h-z14.h }, z1.h\n"
- ".inst 0xc16015a9 // sdot za.s[x8, 1], { z13.h-z14.h }, z0.h\n"
".inst 0xa0402a80 // ld1h { z0.h-z1.h }, pn10.b/Z, [x20]\n"
+ "addvl x19, SP, #8\n"
+ "trn1 z15.h, z17.h, z16.h\n"
+ ".inst 0xc16175a8 // sdot za.s[x11, 0], { z13.h-z14.h }, z1.h\n"
+ ".inst 0xc16075a9 // sdot za.s[x11, 1], { z13.h-z14.h }, z0.h\n"
+ ".inst 0xa0402a60 // ld1h { z0.h-z1.h }, pn10.b/Z, [x19]\n"
"add x14, x14, %x[ld_in_col]\n"
- ".inst 0xa0412aa2 // ld1h { z2.h-z3.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
- ".inst 0xc16115aa // sdot za.s[x8, 2], { z13.h-z14.h }, z1.h\n"
- ".inst 0xc16015ab // sdot za.s[x8, 3], { z13.h-z14.h }, z0.h\n"
- ".inst 0xc16315c8 // sdot za.s[x8, 0], { z14.h-z15.h }, z3.h\n"
- ".inst 0xc16215c9 // sdot za.s[x8, 1], { z14.h-z15.h }, z2.h\n"
".inst 0xa0412a82 // ld1h { z2.h-z3.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
- ".inst 0xc16315ca // sdot za.s[x8, 2], { z14.h-z15.h }, z3.h\n"
- ".inst 0xc16215cb // sdot za.s[x8, 3], { z14.h-z15.h }, z2.h\n"
+ ".inst 0xc16175aa // sdot za.s[x11, 2], { z13.h-z14.h }, z1.h\n"
+ ".inst 0xc16075ab // sdot za.s[x11, 3], { z13.h-z14.h }, z0.h\n"
+ ".inst 0xc16375c8 // sdot za.s[x11, 0], { z14.h-z15.h }, z3.h\n"
+ ".inst 0xc16275c9 // sdot za.s[x11, 1], { z14.h-z15.h }, z2.h\n"
+ ".inst 0xa0412a62 // ld1h { z2.h-z3.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
+ ".inst 0xc16375ca // sdot za.s[x11, 2], { z14.h-z15.h }, z3.h\n"
+ ".inst 0xc16275cb // sdot za.s[x11, 3], { z14.h-z15.h }, z2.h\n"
"15:" // Padded: 0 priming loads
".inst 0xa0402be0 // ld1h { z0.h-z1.h }, pn10.b/Z, [SP]\n"
".inst 0xa0412be2 // ld1h { z2.h-z3.h }, pn10.b/Z, [SP, #0x2, MUL VL]\n"
@@ -469,192 +469,192 @@ void sme2_u8s8u8q_planar_3x3_s1_4rows_dot_za_impl(
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
"ld1b { z19.s }, p0/Z, [x14]\n"
"add z19.h, p0/M, z19.h, z24.h\n"
- "add x20, x14, %x[ld_in_row]\n"
+ "add x19, x14, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z18.s }, p0/Z, [x20]\n"
+ "ld1b { z18.s }, p0/Z, [x19]\n"
"add z18.h, p0/M, z18.h, z24.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z17.s }, p0/Z, [x20]\n"
+ "ld1b { z17.s }, p0/Z, [x19]\n"
"add z17.h, p0/M, z17.h, z24.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z16.s }, p0/Z, [x20]\n"
+ "ld1b { z16.s }, p0/Z, [x19]\n"
"add z16.h, p0/M, z16.h, z24.h\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"trn1 z13.h, z19.h, z18.h\n"
"trn1 z14.h, z17.h, z16.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z17.s }, p0/Z, [x20]\n"
+ "ld1b { z17.s }, p0/Z, [x19]\n"
"add z17.h, p0/M, z17.h, z24.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z16.s }, p0/Z, [x20]\n"
+ "ld1b { z16.s }, p0/Z, [x19]\n"
"add z16.h, p0/M, z16.h, z24.h\n"
"sub x15, x15, #0x1\n"
"sub x13, x13, #0x1\n"
"cmp x15, x13\n"
"trn1 z15.h, z17.h, z16.h\n"
- "csel x23, x15, x13, LT\n"
+ "csel x22, x15, x13, LT\n"
"add x14, x14, %x[ld_in_col]\n"
- "sub x13, x13, x23\n"
- "cbz x23, 17f\n"
+ "sub x13, x13, x22\n"
+ "cbz x22, 17f\n"
"16:" // Padded: Main loop
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
"ld1b { z21.s }, p0/Z, [x14]\n"
- ".inst 0xc16115a8 // sdot za.s[x8, 0], { z13.h-z14.h }, z1.h\n"
- ".inst 0xc16015a9 // sdot za.s[x8, 1], { z13.h-z14.h }, z0.h\n"
+ ".inst 0xc16175a8 // sdot za.s[x11, 0], { z13.h-z14.h }, z1.h\n"
+ ".inst 0xc16075a9 // sdot za.s[x11, 1], { z13.h-z14.h }, z0.h\n"
"add z21.h, p0/M, z21.h, z24.h\n"
- "add x22, x14, %x[ld_in_row]\n"
+ "add x21, x14, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z20.s }, p0/Z, [x22]\n"
- ".inst 0xc16315c8 // sdot za.s[x8, 0], { z14.h-z15.h }, z3.h\n"
+ "ld1b { z20.s }, p0/Z, [x21]\n"
+ ".inst 0xc16375c8 // sdot za.s[x11, 0], { z14.h-z15.h }, z3.h\n"
"add z20.h, p0/M, z20.h, z24.h\n"
- "add x22, x22, %x[ld_in_row]\n"
- ".inst 0xc16215c9 // sdot za.s[x8, 1], { z14.h-z15.h }, z2.h\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ ".inst 0xc16275c9 // sdot za.s[x11, 1], { z14.h-z15.h }, z2.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z19.s }, p0/Z, [x22]\n"
+ "ld1b { z19.s }, p0/Z, [x21]\n"
"add z19.h, p0/M, z19.h, z24.h\n"
- ".inst 0xc0060804 // mova { z4.d-z5.d }, za.d[x8, #0]\n"
- "add x22, x22, %x[ld_in_row]\n"
+ ".inst 0xc0066804 // mova { z4.d-z5.d }, za.d[x11, #0]\n"
+ "add x21, x21, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z18.s }, p0/Z, [x22]\n"
- ".inst 0xc0060826 // mova { z6.d-z7.d }, za.d[x8, #1]\n"
+ "ld1b { z18.s }, p0/Z, [x21]\n"
+ ".inst 0xc0066826 // mova { z6.d-z7.d }, za.d[x11, #1]\n"
"mov x12, #0x4\n"
- "addvl x21, SP, #4\n"
+ "addvl x20, SP, #4\n"
"add z18.h, p0/M, z18.h, z24.h\n"
".inst 0xc1aaac04 // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z10.s\n"
- "add x22, x22, %x[ld_in_row]\n"
+ "add x21, x21, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- ".inst 0xa0402aa0 // ld1h { z0.h-z1.h }, pn10.b/Z, [x21]\n"
- "addvl x20, SP, #8\n"
- ".inst 0xc16115aa // sdot za.s[x8, 2], { z13.h-z14.h }, z1.h\n"
- "subs x23, x23, #0x1\n"
- "ld1b { z17.s }, p0/Z, [x22]\n"
- ".inst 0xc16015ab // sdot za.s[x8, 3], { z13.h-z14.h }, z0.h\n"
- ".inst 0xc1abaa24 // srshl { z4.s-z7.s }, { z4.s-z7.s }, z11.s\n"
".inst 0xa0402a80 // ld1h { z0.h-z1.h }, pn10.b/Z, [x20]\n"
+ "addvl x19, SP, #8\n"
+ ".inst 0xc16175aa // sdot za.s[x11, 2], { z13.h-z14.h }, z1.h\n"
+ "subs x22, x22, #0x1\n"
+ "ld1b { z17.s }, p0/Z, [x21]\n"
+ ".inst 0xc16075ab // sdot za.s[x11, 3], { z13.h-z14.h }, z0.h\n"
+ ".inst 0xc1abaa24 // srshl { z4.s-z7.s }, { z4.s-z7.s }, z11.s\n"
+ ".inst 0xa0402a60 // ld1h { z0.h-z1.h }, pn10.b/Z, [x19]\n"
"add z17.h, p0/M, z17.h, z24.h\n"
- "add x22, x22, %x[ld_in_row]\n"
+ "add x21, x21, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- ".inst 0xa0412aa2 // ld1h { z2.h-z3.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
- ".inst 0xc16115ac // sdot za.s[x8, 4], { z13.h-z14.h }, z1.h\n"
+ ".inst 0xa0412a82 // ld1h { z2.h-z3.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ ".inst 0xc16175ac // sdot za.s[x11, 4], { z13.h-z14.h }, z1.h\n"
".inst 0xc1acab04 // add { z4.s-z7.s }, { z4.s-z7.s }, z12.s\n"
- "ld1b { z16.s }, p0/Z, [x22]\n"
- ".inst 0xc16015ad // sdot za.s[x8, 5], { z13.h-z14.h }, z0.h\n"
+ "ld1b { z16.s }, p0/Z, [x21]\n"
+ ".inst 0xc16075ad // sdot za.s[x11, 5], { z13.h-z14.h }, z0.h\n"
"add z16.h, p0/M, z16.h, z24.h\n"
"add x14, x14, %x[ld_in_col]\n"
- ".inst 0xc16315ca // sdot za.s[x8, 2], { z14.h-z15.h }, z3.h\n"
+ ".inst 0xc16375ca // sdot za.s[x11, 2], { z14.h-z15.h }, z3.h\n"
".inst 0xa0402be0 // ld1h { z0.h-z1.h }, pn10.b/Z, [SP]\n"
".inst 0xc1bacec4 // sclamp { z4.s-z7.s }, z22.s, z26.s\n"
- ".inst 0xc16215cb // sdot za.s[x8, 3], { z14.h-z15.h }, z2.h\n"
- ".inst 0xa0412a82 // ld1h { z2.h-z3.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
- "st1b { z4.s }, p1, [x11]\n"
- "add x11, x11, x9\n"
- ".inst 0xc16315cc // sdot za.s[x8, 4], { z14.h-z15.h }, z3.h\n"
- "st1b { z6.s }, p1, [x10]\n"
+ ".inst 0xc16275cb // sdot za.s[x11, 3], { z14.h-z15.h }, z2.h\n"
+ ".inst 0xa0412a62 // ld1h { z2.h-z3.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
+ "st1b { z4.s }, p1, [x10]\n"
"add x10, x10, x28\n"
+ ".inst 0xc16375cc // sdot za.s[x11, 4], { z14.h-z15.h }, z3.h\n"
+ "st1b { z6.s }, p1, [x9]\n"
+ "add x9, x9, x27\n"
"trn1 z13.h, z21.h, z20.h\n"
- ".inst 0xc16215cd // sdot za.s[x8, 5], { z14.h-z15.h }, z2.h\n"
- "add x8, x8, #0x2\n"
+ ".inst 0xc16275cd // sdot za.s[x11, 5], { z14.h-z15.h }, z2.h\n"
+ "add x11, x11, #0x2\n"
".inst 0xa0412be2 // ld1h { z2.h-z3.h }, pn10.b/Z, [SP, #0x2, MUL VL]\n"
- "st1b { z5.s }, p1, [x27]\n"
- "add x27, x27, x25\n"
- "st1b { z7.s }, p1, [x26]\n"
+ "st1b { z5.s }, p1, [x26]\n"
"add x26, x26, x24\n"
- ".inst 0xc0040904 // mova za.d[x8, #4], { z8.d-z9.d }\n"
- ".inst 0xc0040905 // mova za.d[x8, #5], { z8.d-z9.d }\n"
+ "st1b { z7.s }, p1, [x25]\n"
+ "add x25, x25, x23\n"
+ ".inst 0xc0046904 // mova za.d[x11, #4], { z8.d-z9.d }\n"
+ ".inst 0xc0046905 // mova za.d[x11, #5], { z8.d-z9.d }\n"
"trn1 z14.h, z19.h, z18.h\n"
"trn1 z15.h, z17.h, z16.h\n"
"bgt 16b\n"
"17:" // Main loop tail
- ".inst 0xc16115a8 // sdot za.s[x8, 0], { z13.h-z14.h }, z1.h\n"
- "addvl x21, SP, #4\n"
- "addvl x20, SP, #8\n"
- ".inst 0xc16015a9 // sdot za.s[x8, 1], { z13.h-z14.h }, z0.h\n"
- ".inst 0xa0402aa0 // ld1h { z0.h-z1.h }, pn10.b/Z, [x21]\n"
- ".inst 0xc16315c8 // sdot za.s[x8, 0], { z14.h-z15.h }, z3.h\n"
- ".inst 0xc16215c9 // sdot za.s[x8, 1], { z14.h-z15.h }, z2.h\n"
- ".inst 0xa0412aa2 // ld1h { z2.h-z3.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
- ".inst 0xc0060804 // mova { z4.d-z5.d }, za.d[x8, #0]\n"
- ".inst 0xc0060826 // mova { z6.d-z7.d }, za.d[x8, #1]\n"
+ ".inst 0xc16175a8 // sdot za.s[x11, 0], { z13.h-z14.h }, z1.h\n"
+ "addvl x20, SP, #4\n"
+ "addvl x19, SP, #8\n"
+ ".inst 0xc16075a9 // sdot za.s[x11, 1], { z13.h-z14.h }, z0.h\n"
+ ".inst 0xa0402a80 // ld1h { z0.h-z1.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xc16375c8 // sdot za.s[x11, 0], { z14.h-z15.h }, z3.h\n"
+ ".inst 0xc16275c9 // sdot za.s[x11, 1], { z14.h-z15.h }, z2.h\n"
+ ".inst 0xa0412a82 // ld1h { z2.h-z3.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ ".inst 0xc0066804 // mova { z4.d-z5.d }, za.d[x11, #0]\n"
+ ".inst 0xc0066826 // mova { z6.d-z7.d }, za.d[x11, #1]\n"
".inst 0xc1aaac04 // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z10.s\n"
- ".inst 0xc16115aa // sdot za.s[x8, 2], { z13.h-z14.h }, z1.h\n"
+ ".inst 0xc16175aa // sdot za.s[x11, 2], { z13.h-z14.h }, z1.h\n"
".inst 0xc1abaa24 // srshl { z4.s-z7.s }, { z4.s-z7.s }, z11.s\n"
- ".inst 0xc16015ab // sdot za.s[x8, 3], { z13.h-z14.h }, z0.h\n"
- ".inst 0xa0402a80 // ld1h { z0.h-z1.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xc16075ab // sdot za.s[x11, 3], { z13.h-z14.h }, z0.h\n"
+ ".inst 0xa0402a60 // ld1h { z0.h-z1.h }, pn10.b/Z, [x19]\n"
".inst 0xc1acab04 // add { z4.s-z7.s }, { z4.s-z7.s }, z12.s\n"
- ".inst 0xc16115ac // sdot za.s[x8, 4], { z13.h-z14.h }, z1.h\n"
+ ".inst 0xc16175ac // sdot za.s[x11, 4], { z13.h-z14.h }, z1.h\n"
".inst 0xc1bacec4 // sclamp { z4.s-z7.s }, z22.s, z26.s\n"
- "st1b { z4.s }, p1, [x11]\n"
- "add x11, x11, x9\n"
- ".inst 0xc16015ad // sdot za.s[x8, 5], { z13.h-z14.h }, z0.h\n"
- "st1b { z6.s }, p1, [x10]\n"
+ "st1b { z4.s }, p1, [x10]\n"
"add x10, x10, x28\n"
- ".inst 0xc16315ca // sdot za.s[x8, 2], { z14.h-z15.h }, z3.h\n"
- "st1b { z5.s }, p1, [x27]\n"
- "add x27, x27, x25\n"
- ".inst 0xc16215cb // sdot za.s[x8, 3], { z14.h-z15.h }, z2.h\n"
- ".inst 0xa0412a82 // ld1h { z2.h-z3.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
- "st1b { z7.s }, p1, [x26]\n"
+ ".inst 0xc16075ad // sdot za.s[x11, 5], { z13.h-z14.h }, z0.h\n"
+ "st1b { z6.s }, p1, [x9]\n"
+ "add x9, x9, x27\n"
+ ".inst 0xc16375ca // sdot za.s[x11, 2], { z14.h-z15.h }, z3.h\n"
+ "st1b { z5.s }, p1, [x26]\n"
"add x26, x26, x24\n"
- ".inst 0xc16315cc // sdot za.s[x8, 4], { z14.h-z15.h }, z3.h\n"
- ".inst 0xc16215cd // sdot za.s[x8, 5], { z14.h-z15.h }, z2.h\n"
- "add x8, x8, #0x2\n"
- ".inst 0xc0040904 // mova za.d[x8, #4], { z8.d-z9.d }\n"
- ".inst 0xc0040905 // mova za.d[x8, #5], { z8.d-z9.d }\n"
+ ".inst 0xc16275cb // sdot za.s[x11, 3], { z14.h-z15.h }, z2.h\n"
+ ".inst 0xa0412a62 // ld1h { z2.h-z3.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
+ "st1b { z7.s }, p1, [x25]\n"
+ "add x25, x25, x23\n"
+ ".inst 0xc16375cc // sdot za.s[x11, 4], { z14.h-z15.h }, z3.h\n"
+ ".inst 0xc16275cd // sdot za.s[x11, 5], { z14.h-z15.h }, z2.h\n"
+ "add x11, x11, #0x2\n"
+ ".inst 0xc0046904 // mova za.d[x11, #4], { z8.d-z9.d }\n"
+ ".inst 0xc0046905 // mova za.d[x11, #5], { z8.d-z9.d }\n"
"18:" // Main loop skip tail
"cbz x13, 20f\n"
"19:" // Right padding loop
- ".inst 0xc0060804 // mova { z4.d-z5.d }, za.d[x8, #0]\n"
+ ".inst 0xc0066804 // mova { z4.d-z5.d }, za.d[x11, #0]\n"
"subs x13, x13, #0x1\n"
- ".inst 0xc0060826 // mova { z6.d-z7.d }, za.d[x8, #1]\n"
+ ".inst 0xc0066826 // mova { z6.d-z7.d }, za.d[x11, #1]\n"
".inst 0xc1aaac04 // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z10.s\n"
- "add x8, x8, #0x2\n"
+ "add x11, x11, #0x2\n"
".inst 0xc1abaa24 // srshl { z4.s-z7.s }, { z4.s-z7.s }, z11.s\n"
- ".inst 0xc0040904 // mova za.d[x8, #4], { z8.d-z9.d }\n"
+ ".inst 0xc0046904 // mova za.d[x11, #4], { z8.d-z9.d }\n"
".inst 0xc1acab04 // add { z4.s-z7.s }, { z4.s-z7.s }, z12.s\n"
- ".inst 0xc0040905 // mova za.d[x8, #5], { z8.d-z9.d }\n"
+ ".inst 0xc0046905 // mova za.d[x11, #5], { z8.d-z9.d }\n"
".inst 0xc1bacec4 // sclamp { z4.s-z7.s }, z22.s, z26.s\n"
- "st1b { z4.s }, p1, [x11]\n"
- "add x11, x11, x9\n"
- "st1b { z6.s }, p1, [x10]\n"
+ "st1b { z4.s }, p1, [x10]\n"
"add x10, x10, x28\n"
- "st1b { z5.s }, p1, [x27]\n"
- "add x27, x27, x25\n"
- "st1b { z7.s }, p1, [x26]\n"
+ "st1b { z6.s }, p1, [x9]\n"
+ "add x9, x9, x27\n"
+ "st1b { z5.s }, p1, [x26]\n"
"add x26, x26, x24\n"
+ "st1b { z7.s }, p1, [x25]\n"
+ "add x25, x25, x23\n"
"bgt 19b\n"
"20:" // End
- "ldr x22, [%x[args], %[offsetof_Args_weights]]\n"
- "incw x22, ALL, MUL #9\n"
- "str x22, [%x[args], %[offsetof_Args_weights]]\n"
+ "ldr x21, [%x[args], %[offsetof_Args_weights]]\n"
+ "incw x21, ALL, MUL #9\n"
+ "str x21, [%x[args], %[offsetof_Args_weights]]\n"
"incw x16\n"
- "ldr x20, [%x[args], %[offsetof_Args_ld_in_vl]]\n"
+ "ldr x19, [%x[args], %[offsetof_Args_ld_in_vl]]\n"
"whilelt p1.s, x16, x17\n"
"ldr x14, [%x[args], %[offsetof_Args_inptr]]\n"
- "add x14, x14, x20\n"
+ "add x14, x14, x19\n"
"str x14, [%x[args], %[offsetof_Args_inptr]]\n"
- "ldr x25, [%x[args], %[offsetof_Args_outptrs]]\n"
- "ldr x24, [%x[args], %[offsetof_Args_ld_out_vls]]\n"
- "ldp x23, x22, [x25, #0x0]\n"
- "ldp x21, x20, [x24, #0x0]\n"
- "add x23, x23, x21\n"
+ "ldr x24, [%x[args], %[offsetof_Args_outptrs]]\n"
+ "ldr x23, [%x[args], %[offsetof_Args_ld_out_vls]]\n"
+ "ldp x22, x21, [x24, #0x0]\n"
+ "ldp x20, x19, [x23, #0x0]\n"
"add x22, x22, x20\n"
- "stp x23, x22, [x25, #0x0]\n"
- "ldp x23, x22, [x25, #0x10]\n"
- "ldp x21, x20, [x24, #0x10]\n"
- "add x23, x23, x21\n"
+ "add x21, x21, x19\n"
+ "stp x22, x21, [x24, #0x0]\n"
+ "ldp x22, x21, [x24, #0x10]\n"
+ "ldp x20, x19, [x23, #0x10]\n"
"add x22, x22, x20\n"
- "stp x23, x22, [x25, #0x10]\n"
+ "add x21, x21, x19\n"
+ "stp x22, x21, [x24, #0x10]\n"
"b.any 1b\n"
"addvl SP, SP, #12\n"
".inst 0xd503467f // SMSTOP\n"
:
: [args] "r" (&args), [ld_in_col] "r" (ld_in_col), [ld_in_row] "r" (ld_in_row), [offsetof_Args_current_channel] "I" (offsetof(Args, current_channel)), [offsetof_Args_inptr] "I" (offsetof(Args, inptr)), [offsetof_Args_input_cols] "I" (offsetof(Args, input_cols)), [offsetof_Args_ld_in_vl] "I" (offsetof(Args, ld_in_vl)), [offsetof_Args_ld_out_cols] "I" (offsetof(Args, ld_out_cols)), [offsetof_Args_ld_out_vls] "I" (offsetof(Args, ld_out_vls)), [offsetof_Args_n_channels] "I" (offsetof(Args, n_channels)), [offsetof_Args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_Args_output_cols] "I" (offsetof(Args, output_cols)), [offsetof_Args_pad_bottom] "I" (offsetof(Args, pad_bottom)), [offsetof_Args_pad_left] "I" (offsetof(Args, pad_left)), [offsetof_Args_pad_top] "I" (offsetof(Args, pad_top)), [offsetof_Args_weights] "I" (offsetof(Args, weights)), [offsetof_Requantize32_a_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_bias] "I" (offsetof(arm_gemm::Requantize32, bias)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [offsetof_Requantize32_per_channel_muls] "I" (offsetof(arm_gemm::Requantize32, per_channel_muls)), [offsetof_Requantize32_per_channel_right_shifts] "I" (offsetof(arm_gemm::Requantize32, per_channel_right_shifts)), [offsetof_Requantize32_per_layer_mul] "I" (offsetof(arm_gemm::Requantize32, per_layer_mul)), [offsetof_Requantize32_per_layer_right_shift] "I" (offsetof(arm_gemm::Requantize32, per_layer_right_shift)), [qp] "r" (&qp)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8s8u8q_planar_3x3_s2_2rows_dot_za/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8s8u8q_planar_3x3_s2_2rows_dot_za/generic.cpp
new file mode 100644
index 0000000000..328227f91a
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8s8u8q_planar_3x3_s2_2rows_dot_za/generic.cpp
@@ -0,0 +1,592 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#if defined(ARM_COMPUTE_ENABLE_SME2)
+
+#include <algorithm>
+#include <cstddef>
+#include "arm_gemm.hpp"
+
+using arm_gemm::Requantize32;
+
+namespace arm_conv {
+namespace depthwise {
+
+void sme2_u8s8u8q_planar_3x3_s2_2rows_dot_za_impl(
+ const uint8_t *inptr,
+ size_t ld_in_row,
+ size_t ld_in_col,
+ unsigned int pad_top,
+ unsigned int valid_input_rows,
+ unsigned int pad_left,
+ unsigned int valid_input_cols,
+ const int8_t *weights,
+ uint8_t **outptrs,
+ const size_t *outlds,
+ unsigned int output_cols,
+ unsigned int start_channel,
+ unsigned int valid_channels,
+ const arm_gemm::Requantize32 &qp
+)
+{
+ struct Args
+ {
+ const uint8_t *inptr;
+ long unsigned int pad_top, pad_bottom, pad_left;
+ const int8_t *weights;
+ long unsigned int input_cols, output_cols;
+ uint8_t **outptrs;
+ const size_t *ld_out_cols;
+ long unsigned int n, n_channels;
+ };
+
+ Args args = { inptr, pad_top, 5u - std::min(5u, pad_top + valid_input_rows), pad_left, weights, valid_input_cols, output_cols, outptrs, outlds, start_channel, valid_channels };
+
+ __asm__ __volatile__(
+ "ldr x11, [%x[args], %[offsetof_Args_pad_bottom]]\n"
+ "mov x19, #0x5\n"
+ ".inst 0xd503477f // SMSTART ZA\n"
+ "sub x19, x19, x11\n"
+ "ldr x10, [%x[args], %[offsetof_Args_pad_top]]\n"
+ "ptrue p0.b\n"
+ "mov z12.s, #0x0\n"
+ "ldr x22, [%x[args], %[offsetof_Args_n_channels]]\n"
+ "whilelt p5.s, XZR, x22\n"
+ "whilelt p9.s, XZR, x19\n"
+ "ldr x19, [%x[qp], %[offsetof_Requantize32_bias]]\n"
+ "whilelt p8.s, XZR, x10\n"
+ "eor p8.b, p0/Z, p8.b, p9.b\n"
+ "ldr x21, [%x[args], %[offsetof_Args_n]]\n"
+ "cbz x19, 1f\n"
+ "ld1w { z12.s }, p5/Z, [x19, x21, LSL #2]\n"
+ "1:" // Load bias: Done
+ "ldr x20, [%x[args], %[offsetof_Args_weights]]\n"
+ "ld1sb { z27.s }, p0/Z, [x20]\n"
+ "incw x20\n"
+ "mov z0.h, #0x0\n"
+ "ld1sb { z16.s }, p0/Z, [x20]\n"
+ "incw x20\n"
+ "ldr x19, [%x[qp], %[offsetof_Requantize32_per_channel_muls]]\n"
+ "mov z13.d, z12.d\n"
+ "ld1sb { z22.s }, p0/Z, [x20]\n"
+ "incw x20\n"
+ "ld1sb { z21.s }, p0/Z, [x20]\n"
+ "incw x20\n"
+ "ld1sb { z20.s }, p0/Z, [x20]\n"
+ "incw x20\n"
+ "ld1sb { z18.s }, p0/Z, [x20]\n"
+ "incw x20\n"
+ "ld1sb { z17.s }, p0/Z, [x20]\n"
+ "incw x20\n"
+ "ld1sb { z24.s }, p0/Z, [x20]\n"
+ "incw x20\n"
+ "ld1sb { z19.s }, p0/Z, [x20]\n"
+ "ld1rh { z28.h }, p0/Z, [%x[qp], %[offsetof_Requantize32_b_offset]]\n"
+ "sub z27.h, z27.h, z28.h\n"
+ "sub z16.h, z16.h, z28.h\n"
+ "sub z22.h, z22.h, z28.h\n"
+ "sub z21.h, z21.h, z28.h\n"
+ "trn1 z8.h, z27.h, z21.h\n"
+ "sub z20.h, z20.h, z28.h\n"
+ "sub z18.h, z18.h, z28.h\n"
+ "trn1 z7.h, z16.h, z20.h\n"
+ "sub z17.h, z17.h, z28.h\n"
+ "sub z24.h, z24.h, z28.h\n"
+ "trn1 z6.h, z17.h, z0.h\n"
+ "sub z19.h, z19.h, z28.h\n"
+ "trn1 z5.h, z24.h, z0.h\n"
+ "trn1 z4.h, z22.h, z18.h\n"
+ "trn1 z3.h, z19.h, z0.h\n"
+ "ld1rh { z21.h }, p0/Z, [%x[qp], %[offsetof_Requantize32_a_offset]]\n"
+ "ld1rw { z2.s }, p0/Z, [%x[qp], %[offsetof_Requantize32_c_offset]]\n"
+ "ld1rw { z1.s }, p0/Z, [%x[qp], %[offsetof_Requantize32_per_layer_mul]]\n"
+ "cbz x19, 2f\n"
+ "ld1w { z1.s }, p5/Z, [x19, x21, LSL #2]\n"
+ "2:" // Load mul: End
+ "ldr x19, [%x[qp], %[offsetof_Requantize32_per_channel_right_shifts]]\n"
+ "ld1rw { z0.s }, p0/Z, [%x[qp], %[offsetof_Requantize32_per_layer_right_shift]]\n"
+ "cbz x19, 3f\n"
+ "ld1w { z0.s }, p5/Z, [x19, x21, LSL #2]\n"
+ "3:" // Load right_shift: End
+ "ldr x28, [%x[args], %[offsetof_Args_input_cols]]\n"
+ "orr x21, x28, %x[ld_in_col], LSL #16\n"
+ "orr x21, x22, x21, LSL #22\n"
+ "ld1rw { z20.s }, p0/Z, [%x[qp], %[offsetof_Requantize32_minval]]\n"
+ "ldr x27, [%x[args], %[offsetof_Args_inptr]]\n"
+ "mov x20, #0x5\n"
+ "add x19, x10, x11\n"
+ "ld1rw { z19.s }, p0/Z, [%x[qp], %[offsetof_Requantize32_maxval]]\n"
+ "mov x9, #0x0\n"
+ "ldr x26, [%x[args], %[offsetof_Args_output_cols]]\n"
+ "lsl x21, x21, #0x0\n"
+ "sub x20, x20, x19\n"
+ "mov x19, x27\n"
+ "4:" // Issue prefetches
+ "subs x20, x20, #0x1\n"
+ ".inst 0xf8b54a7c // rprfm pldstrm, x21, [x19]\n"
+ "add x19, x19, %x[ld_in_col]\n"
+ "bgt 4b\n"
+ "ldr x21, [%x[args], %[offsetof_Args_outptrs]]\n"
+ "lsl x19, %x[ld_in_row], #0x0\n"
+ "msub x27, x10, x19, x27\n"
+ ".inst 0xc0042980 // mova za.d[x9, #0], { z12.d-z13.d }\n"
+ "ldr x19, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
+ ".inst 0xc0042981 // mova za.d[x9, #1], { z12.d-z13.d }\n"
+ "mov x25, #0x2\n"
+ "ldr x20, [%x[args], %[offsetof_Args_pad_left]]\n"
+ ".inst 0xc0042982 // mova za.d[x9, #2], { z12.d-z13.d }\n"
+ "ldp x24, x23, [x21], #0x10\n"
+ "ldp x22, x21, [x19], #0x10\n"
+ "cbz x20, 6f\n"
+ "cmp x20, x25\n"
+ "csel x19, x20, x25, LT\n"
+ "sub x20, x20, x19\n"
+ "sub x25, x25, x19\n"
+ "cbz x20, 6f\n"
+ ".inst 0xc0062818 // mova { z24.d-z25.d }, za.d[x9, #0]\n"
+ ".inst 0xc1a1a418 // sqdmulh { z24.s-z25.s }, { z24.s-z25.s }, z1.s\n"
+ "and x25, x20, #0x1\n"
+ ".inst 0xc1a0a238 // srshl { z24.s-z25.s }, { z24.s-z25.s }, z0.s\n"
+ "add x20, x20, #0x1\n"
+ "lsr x20, x20, #0x1\n"
+ ".inst 0xc1a2a318 // add { z24.s-z25.s }, { z24.s-z25.s }, z2.s\n"
+ "sub x26, x26, x20\n"
+ ".inst 0xc1b3c698 // sclamp { z24.s-z25.s }, z20.s, z19.s\n"
+ "5:" // Left padding
+ "subs x20, x20, #0x1\n"
+ "st1b { z24.s }, p5, [x24]\n"
+ "add x24, x24, x22\n"
+ "st1b { z25.s }, p5, [x23]\n"
+ "add x23, x23, x21\n"
+ "bgt 5b\n"
+ "6:" // Left padding: End
+ "adds XZR, x10, x11\n"
+ "bne 11f\n"
+ "cbz x25, 9f\n"
+ "cmp x25, #0x1\n"
+ "sub x28, x28, x25\n"
+ "beq 8f\n"
+ "7:" // Unpadded: 2 priming loads
+ "add x19, x27, %x[ld_in_row]\n"
+ "ld1b { z14.s }, p5/Z, [x27]\n"
+ "sub z14.h, z14.h, z21.h\n"
+ "add x27, x27, %x[ld_in_col]\n"
+ "ld1b { z18.s }, p5/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ "sub z18.h, z18.h, z21.h\n"
+ "trn1 z14.h, z14.h, z18.h\n"
+ "ld1b { z15.s }, p5/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ "sub z15.h, z15.h, z21.h\n"
+ "ld1b { z17.s }, p5/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ "sub z17.h, z17.h, z21.h\n"
+ "trn1 z15.h, z15.h, z17.h\n"
+ "ld1b { z16.s }, p5/Z, [x19]\n"
+ "sub z16.h, z16.h, z21.h\n"
+ "mov z16.d, z16.d\n"
+ ".inst 0xc16835c8 // sdot za.s[x9, 0], { z14.h-z15.h }, z8.h\n"
+ ".inst 0xc16635e8 // sdot za.s[x9, 0], { z15.h-z16.h }, z6.h\n"
+ "8:" // Unpadded: 1 priming loads
+ "add x19, x27, %x[ld_in_row]\n"
+ "ld1b { z14.s }, p5/Z, [x27]\n"
+ "sub z14.h, z14.h, z21.h\n"
+ "add x27, x27, %x[ld_in_col]\n"
+ "ld1b { z18.s }, p5/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ "sub z18.h, z18.h, z21.h\n"
+ "trn1 z14.h, z14.h, z18.h\n"
+ "ld1b { z15.s }, p5/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ "sub z15.h, z15.h, z21.h\n"
+ "ld1b { z17.s }, p5/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ "sub z17.h, z17.h, z21.h\n"
+ "trn1 z15.h, z15.h, z17.h\n"
+ "ld1b { z16.s }, p5/Z, [x19]\n"
+ "sub z16.h, z16.h, z21.h\n"
+ "mov z16.d, z16.d\n"
+ ".inst 0xc16735c8 // sdot za.s[x9, 0], { z14.h-z15.h }, z7.h\n"
+ ".inst 0xc16535e8 // sdot za.s[x9, 0], { z15.h-z16.h }, z5.h\n"
+ "9:" // Unpadded: 0 priming loads
+ "add x20, x27, %x[ld_in_row]\n"
+ "ld1b { z14.s }, p5/Z, [x27]\n"
+ "sub z14.h, z14.h, z21.h\n"
+ "sub x28, x28, #0x2\n"
+ "ld1b { z18.s }, p5/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "sub z18.h, z18.h, z21.h\n"
+ "sub x26, x26, #0x1\n"
+ "ld1b { z15.s }, p5/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "sub z15.h, z15.h, z21.h\n"
+ "lsr x19, x28, #0x1\n"
+ "ld1b { z17.s }, p5/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "sub z17.h, z17.h, z21.h\n"
+ "cmp x19, x26\n"
+ "ld1b { z16.s }, p5/Z, [x20]\n"
+ "sub z16.h, z16.h, z21.h\n"
+ "csel x20, x19, x26, LT\n"
+ "trn1 z14.h, z14.h, z18.h\n"
+ "trn1 z15.h, z15.h, z17.h\n"
+ "mov z16.d, z16.d\n"
+ "add x27, x27, %x[ld_in_col]\n"
+ "and x28, x28, #0x1\n"
+ "sub x26, x26, x20\n"
+ "cbz x20, 16f\n"
+ "10:" // Unpadded: Main loop
+ ".inst 0xc16435c8 // sdot za.s[x9, 0], { z14.h-z15.h }, z4.h\n"
+ "add x19, x27, %x[ld_in_row]\n"
+ "subs x20, x20, #0x1\n"
+ ".inst 0xc16835c9 // sdot za.s[x9, 1], { z14.h-z15.h }, z8.h\n"
+ "ld1b { z14.s }, p5/Z, [x27]\n"
+ "sub z14.h, z14.h, z21.h\n"
+ "add x27, x27, %x[ld_in_col]\n"
+ "ld1b { z18.s }, p5/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ ".inst 0xc16335e8 // sdot za.s[x9, 0], { z15.h-z16.h }, z3.h\n"
+ "sub z18.h, z18.h, z21.h\n"
+ ".inst 0xc16635e9 // sdot za.s[x9, 1], { z15.h-z16.h }, z6.h\n"
+ "ld1b { z15.s }, p5/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ "sub z15.h, z15.h, z21.h\n"
+ "ld1b { z17.s }, p5/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ "sub z17.h, z17.h, z21.h\n"
+ "trn1 z14.h, z14.h, z18.h\n"
+ "ld1b { z16.s }, p5/Z, [x19]\n"
+ "sub z16.h, z16.h, z21.h\n"
+ "trn1 z15.h, z15.h, z17.h\n"
+ "add x19, x27, %x[ld_in_row]\n"
+ ".inst 0xc0062818 // mova { z24.d-z25.d }, za.d[x9, #0]\n"
+ "add x9, x9, #0x1\n"
+ "mov z16.d, z16.d\n"
+ ".inst 0xc16735c8 // sdot za.s[x9, 0], { z14.h-z15.h }, z7.h\n"
+ ".inst 0xc1a1a418 // sqdmulh { z24.s-z25.s }, { z24.s-z25.s }, z1.s\n"
+ "ld1b { z14.s }, p5/Z, [x27]\n"
+ ".inst 0xc16535e8 // sdot za.s[x9, 0], { z15.h-z16.h }, z5.h\n"
+ ".inst 0xc1a0a238 // srshl { z24.s-z25.s }, { z24.s-z25.s }, z0.s\n"
+ "ld1b { z18.s }, p5/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ ".inst 0xc1a2a318 // add { z24.s-z25.s }, { z24.s-z25.s }, z2.s\n"
+ "ld1b { z15.s }, p5/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ "sub z14.h, z14.h, z21.h\n"
+ "sub z18.h, z18.h, z21.h\n"
+ "ld1b { z17.s }, p5/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ "sub z15.h, z15.h, z21.h\n"
+ "sub z17.h, z17.h, z21.h\n"
+ "ld1b { z16.s }, p5/Z, [x19]\n"
+ "sub z16.h, z16.h, z21.h\n"
+ ".inst 0xc1b3c698 // sclamp { z24.s-z25.s }, z20.s, z19.s\n"
+ "add x27, x27, %x[ld_in_col]\n"
+ "st1b { z24.s }, p5, [x24]\n"
+ "add x24, x24, x22\n"
+ ".inst 0xc0042982 // mova za.d[x9, #2], { z12.d-z13.d }\n"
+ "trn1 z14.h, z14.h, z18.h\n"
+ "st1b { z25.s }, p5, [x23]\n"
+ "add x23, x23, x21\n"
+ "trn1 z15.h, z15.h, z17.h\n"
+ "mov z16.d, z16.d\n"
+ "bgt 10b\n"
+ "b 16f\n"
+ "11:" // Padded
+ "cbz x25, 14f\n"
+ "cmp x25, #0x1\n"
+ "sub x28, x28, x25\n"
+ "beq 13f\n"
+ "12:" // Padded: 2 priming loads
+ "mov x12, #0x0\n"
+ ".inst 0x25305504 // psel p4.s, p5.s/Z, p8.s[w12]\n"
+ "ld1b { z14.s }, p4/Z, [x27]\n"
+ "sub z14.h, p4/M, z14.h, z21.h\n"
+ "add x19, x27, %x[ld_in_row]\n"
+ ".inst 0x25705503 // psel p3.s, p5.s/Z, p8.s[w12, #1]\n"
+ "ld1b { z18.s }, p3/Z, [x19]\n"
+ "sub z18.h, p3/M, z18.h, z21.h\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ ".inst 0x25b05502 // psel p2.s, p5.s/Z, p8.s[w12, #2]\n"
+ "ld1b { z15.s }, p2/Z, [x19]\n"
+ "sub z15.h, p2/M, z15.h, z21.h\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ ".inst 0x25f05501 // psel p1.s, p5.s/Z, p8.s[w12, #3]\n"
+ "ld1b { z17.s }, p1/Z, [x19]\n"
+ "sub z17.h, p1/M, z17.h, z21.h\n"
+ "mov x12, #0x4\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ "trn1 z14.h, z14.h, z18.h\n"
+ "trn1 z15.h, z15.h, z17.h\n"
+ ".inst 0x25305500 // psel p0.s, p5.s/Z, p8.s[w12]\n"
+ "ld1b { z16.s }, p0/Z, [x19]\n"
+ "sub z16.h, p0/M, z16.h, z21.h\n"
+ "mov z16.d, z16.d\n"
+ ".inst 0xc16835c8 // sdot za.s[x9, 0], { z14.h-z15.h }, z8.h\n"
+ "add x27, x27, %x[ld_in_col]\n"
+ ".inst 0xc16635e8 // sdot za.s[x9, 0], { z15.h-z16.h }, z6.h\n"
+ "13:" // Padded: 1 priming loads
+ "mov x12, #0x0\n"
+ ".inst 0x25305504 // psel p4.s, p5.s/Z, p8.s[w12]\n"
+ "ld1b { z14.s }, p4/Z, [x27]\n"
+ "sub z14.h, p4/M, z14.h, z21.h\n"
+ "add x19, x27, %x[ld_in_row]\n"
+ ".inst 0x25705503 // psel p3.s, p5.s/Z, p8.s[w12, #1]\n"
+ "ld1b { z18.s }, p3/Z, [x19]\n"
+ "sub z18.h, p3/M, z18.h, z21.h\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ ".inst 0x25b05502 // psel p2.s, p5.s/Z, p8.s[w12, #2]\n"
+ "ld1b { z15.s }, p2/Z, [x19]\n"
+ "sub z15.h, p2/M, z15.h, z21.h\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ ".inst 0x25f05501 // psel p1.s, p5.s/Z, p8.s[w12, #3]\n"
+ "ld1b { z17.s }, p1/Z, [x19]\n"
+ "sub z17.h, p1/M, z17.h, z21.h\n"
+ "mov x12, #0x4\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ "trn1 z14.h, z14.h, z18.h\n"
+ "trn1 z15.h, z15.h, z17.h\n"
+ ".inst 0x25305500 // psel p0.s, p5.s/Z, p8.s[w12]\n"
+ "ld1b { z16.s }, p0/Z, [x19]\n"
+ "sub z16.h, p0/M, z16.h, z21.h\n"
+ "mov z16.d, z16.d\n"
+ ".inst 0xc16735c8 // sdot za.s[x9, 0], { z14.h-z15.h }, z7.h\n"
+ "add x27, x27, %x[ld_in_col]\n"
+ ".inst 0xc16535e8 // sdot za.s[x9, 0], { z15.h-z16.h }, z5.h\n"
+ "14:" // Padded: 0 priming loads
+ "mov x12, #0x0\n"
+ ".inst 0x25305504 // psel p4.s, p5.s/Z, p8.s[w12]\n"
+ "ld1b { z14.s }, p4/Z, [x27]\n"
+ "sub z14.h, p4/M, z14.h, z21.h\n"
+ "add x19, x27, %x[ld_in_row]\n"
+ ".inst 0x25705503 // psel p3.s, p5.s/Z, p8.s[w12, #1]\n"
+ "ld1b { z18.s }, p3/Z, [x19]\n"
+ "sub z18.h, p3/M, z18.h, z21.h\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ ".inst 0x25b05502 // psel p2.s, p5.s/Z, p8.s[w12, #2]\n"
+ "ld1b { z15.s }, p2/Z, [x19]\n"
+ "sub z15.h, p2/M, z15.h, z21.h\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ ".inst 0x25f05501 // psel p1.s, p5.s/Z, p8.s[w12, #3]\n"
+ "ld1b { z17.s }, p1/Z, [x19]\n"
+ "sub z17.h, p1/M, z17.h, z21.h\n"
+ "mov x12, #0x4\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ "trn1 z14.h, z14.h, z18.h\n"
+ "trn1 z15.h, z15.h, z17.h\n"
+ ".inst 0x25305500 // psel p0.s, p5.s/Z, p8.s[w12]\n"
+ "ld1b { z16.s }, p0/Z, [x19]\n"
+ "sub z16.h, p0/M, z16.h, z21.h\n"
+ "sub x28, x28, #0x2\n"
+ "sub x26, x26, #0x1\n"
+ "lsr x19, x28, #0x1\n"
+ "mov z16.d, z16.d\n"
+ "cmp x19, x26\n"
+ "csel x20, x19, x26, LT\n"
+ "add x27, x27, %x[ld_in_col]\n"
+ "and x28, x28, #0x1\n"
+ "sub x26, x26, x20\n"
+ "cbz x20, 16f\n"
+ "15:" // Padded: Main loop
+ ".inst 0xc16435c8 // sdot za.s[x9, 0], { z14.h-z15.h }, z4.h\n"
+ "mov x12, #0x0\n"
+ ".inst 0x25305504 // psel p4.s, p5.s/Z, p8.s[w12]\n"
+ ".inst 0xc16835c9 // sdot za.s[x9, 1], { z14.h-z15.h }, z8.h\n"
+ "add x19, x27, %x[ld_in_row]\n"
+ ".inst 0x25705503 // psel p3.s, p5.s/Z, p8.s[w12, #1]\n"
+ "ld1b { z14.s }, p4/Z, [x27]\n"
+ "ld1b { z18.s }, p3/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ ".inst 0x25b05502 // psel p2.s, p5.s/Z, p8.s[w12, #2]\n"
+ ".inst 0xc16335e8 // sdot za.s[x9, 0], { z15.h-z16.h }, z3.h\n"
+ ".inst 0xc16635e9 // sdot za.s[x9, 1], { z15.h-z16.h }, z6.h\n"
+ "ld1b { z15.s }, p2/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ ".inst 0x25f05501 // psel p1.s, p5.s/Z, p8.s[w12, #3]\n"
+ "ld1b { z17.s }, p1/Z, [x19]\n"
+ "mov x12, #0x4\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ "sub z14.h, p4/M, z14.h, z21.h\n"
+ ".inst 0x25305500 // psel p0.s, p5.s/Z, p8.s[w12]\n"
+ "sub z18.h, p3/M, z18.h, z21.h\n"
+ "sub z15.h, p2/M, z15.h, z21.h\n"
+ "ld1b { z16.s }, p0/Z, [x19]\n"
+ "sub z17.h, p1/M, z17.h, z21.h\n"
+ "sub z16.h, p0/M, z16.h, z21.h\n"
+ "trn1 z14.h, z14.h, z18.h\n"
+ "add x27, x27, %x[ld_in_col]\n"
+ "trn1 z15.h, z15.h, z17.h\n"
+ ".inst 0xc0062818 // mova { z24.d-z25.d }, za.d[x9, #0]\n"
+ "add x9, x9, #0x1\n"
+ "mov z16.d, z16.d\n"
+ ".inst 0xc16735c8 // sdot za.s[x9, 0], { z14.h-z15.h }, z7.h\n"
+ ".inst 0xc1a1a418 // sqdmulh { z24.s-z25.s }, { z24.s-z25.s }, z1.s\n"
+ "mov x12, #0x0\n"
+ ".inst 0x25305504 // psel p4.s, p5.s/Z, p8.s[w12]\n"
+ "add x19, x27, %x[ld_in_row]\n"
+ "ld1b { z14.s }, p4/Z, [x27]\n"
+ ".inst 0xc16535e8 // sdot za.s[x9, 0], { z15.h-z16.h }, z5.h\n"
+ ".inst 0x25705503 // psel p3.s, p5.s/Z, p8.s[w12, #1]\n"
+ "ld1b { z18.s }, p3/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ ".inst 0xc1a0a238 // srshl { z24.s-z25.s }, { z24.s-z25.s }, z0.s\n"
+ ".inst 0x25b05502 // psel p2.s, p5.s/Z, p8.s[w12, #2]\n"
+ "ld1b { z15.s }, p2/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ ".inst 0xc1a2a318 // add { z24.s-z25.s }, { z24.s-z25.s }, z2.s\n"
+ ".inst 0x25f05501 // psel p1.s, p5.s/Z, p8.s[w12, #3]\n"
+ "mov x12, #0x4\n"
+ "ld1b { z17.s }, p1/Z, [x19]\n"
+ "sub z14.h, p4/M, z14.h, z21.h\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ ".inst 0x25305500 // psel p0.s, p5.s/Z, p8.s[w12]\n"
+ "ld1b { z16.s }, p0/Z, [x19]\n"
+ "sub z18.h, p3/M, z18.h, z21.h\n"
+ "sub z15.h, p2/M, z15.h, z21.h\n"
+ "sub z17.h, p1/M, z17.h, z21.h\n"
+ "subs x20, x20, #0x1\n"
+ ".inst 0xc0042982 // mova za.d[x9, #2], { z12.d-z13.d }\n"
+ "sub z16.h, p0/M, z16.h, z21.h\n"
+ ".inst 0xc1b3c698 // sclamp { z24.s-z25.s }, z20.s, z19.s\n"
+ "st1b { z24.s }, p5, [x24]\n"
+ "add x24, x24, x22\n"
+ "st1b { z25.s }, p5, [x23]\n"
+ "add x23, x23, x21\n"
+ "trn1 z14.h, z14.h, z18.h\n"
+ "trn1 z15.h, z15.h, z17.h\n"
+ "mov z16.d, z16.d\n"
+ "add x27, x27, %x[ld_in_col]\n"
+ "bgt 15b\n"
+ "16:" // Main loop tail
+ ".inst 0xc16435c8 // sdot za.s[x9, 0], { z14.h-z15.h }, z4.h\n"
+ "mov x12, #0x0\n"
+ ".inst 0x25305504 // psel p4.s, p5.s/Z, p8.s[w12]\n"
+ ".inst 0xc16335e8 // sdot za.s[x9, 0], { z15.h-z16.h }, z3.h\n"
+ "add x19, x27, %x[ld_in_row]\n"
+ ".inst 0x25705503 // psel p3.s, p5.s/Z, p8.s[w12, #1]\n"
+ ".inst 0xc16835c9 // sdot za.s[x9, 1], { z14.h-z15.h }, z8.h\n"
+ "ld1b { z14.s }, p4/Z, [x27]\n"
+ ".inst 0x25b05502 // psel p2.s, p5.s/Z, p8.s[w12, #2]\n"
+ ".inst 0x25f05501 // psel p1.s, p5.s/Z, p8.s[w12, #3]\n"
+ "ld1b { z18.s }, p3/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ ".inst 0xc16635e9 // sdot za.s[x9, 1], { z15.h-z16.h }, z6.h\n"
+ "mov x12, #0x4\n"
+ ".inst 0xc0062818 // mova { z24.d-z25.d }, za.d[x9, #0]\n"
+ "ld1b { z15.s }, p2/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ ".inst 0xc1a1a418 // sqdmulh { z24.s-z25.s }, { z24.s-z25.s }, z1.s\n"
+ "ld1b { z17.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ ".inst 0x25305500 // psel p0.s, p5.s/Z, p8.s[w12]\n"
+ "sub z14.h, p4/M, z14.h, z21.h\n"
+ "sub z18.h, p3/M, z18.h, z21.h\n"
+ "sub z15.h, p2/M, z15.h, z21.h\n"
+ "ld1b { z16.s }, p0/Z, [x19]\n"
+ "add x9, x9, #0x1\n"
+ "sub z17.h, p1/M, z17.h, z21.h\n"
+ "sub z16.h, p0/M, z16.h, z21.h\n"
+ "trn1 z14.h, z14.h, z18.h\n"
+ "add x27, x27, %x[ld_in_col]\n"
+ ".inst 0xc1a0a238 // srshl { z24.s-z25.s }, { z24.s-z25.s }, z0.s\n"
+ "trn1 z15.h, z15.h, z17.h\n"
+ ".inst 0xc1a2a318 // add { z24.s-z25.s }, { z24.s-z25.s }, z2.s\n"
+ ".inst 0xc16735c8 // sdot za.s[x9, 0], { z14.h-z15.h }, z7.h\n"
+ "mov z16.d, z16.d\n"
+ ".inst 0xc1b3c698 // sclamp { z24.s-z25.s }, z20.s, z19.s\n"
+ "st1b { z24.s }, p5, [x24]\n"
+ "add x24, x24, x22\n"
+ "st1b { z25.s }, p5, [x23]\n"
+ "add x23, x23, x21\n"
+ ".inst 0xc0042982 // mova za.d[x9, #2], { z12.d-z13.d }\n"
+ ".inst 0xc16535e8 // sdot za.s[x9, 0], { z15.h-z16.h }, z5.h\n"
+ "cbz x28, 17f\n" // Skip remainder inputs
+ "mov x12, #0x0\n"
+ ".inst 0x25305504 // psel p4.s, p5.s/Z, p8.s[w12]\n"
+ "ld1b { z14.s }, p4/Z, [x27]\n"
+ "sub z14.h, p4/M, z14.h, z21.h\n"
+ "add x19, x27, %x[ld_in_row]\n"
+ ".inst 0x25705503 // psel p3.s, p5.s/Z, p8.s[w12, #1]\n"
+ "ld1b { z18.s }, p3/Z, [x19]\n"
+ "sub z18.h, p3/M, z18.h, z21.h\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ ".inst 0x25b05502 // psel p2.s, p5.s/Z, p8.s[w12, #2]\n"
+ "ld1b { z15.s }, p2/Z, [x19]\n"
+ "sub z15.h, p2/M, z15.h, z21.h\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ ".inst 0x25f05501 // psel p1.s, p5.s/Z, p8.s[w12, #3]\n"
+ "ld1b { z17.s }, p1/Z, [x19]\n"
+ "sub z17.h, p1/M, z17.h, z21.h\n"
+ "mov x12, #0x4\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ "trn1 z14.h, z14.h, z18.h\n"
+ "trn1 z15.h, z15.h, z17.h\n"
+ ".inst 0x25305500 // psel p0.s, p5.s/Z, p8.s[w12]\n"
+ "ld1b { z16.s }, p0/Z, [x19]\n"
+ "sub z16.h, p0/M, z16.h, z21.h\n"
+ "mov z16.d, z16.d\n"
+ ".inst 0xc16435c8 // sdot za.s[x9, 0], { z14.h-z15.h }, z4.h\n"
+ "sub x26, x26, #0x1\n"
+ ".inst 0xc16335e8 // sdot za.s[x9, 0], { z15.h-z16.h }, z3.h\n"
+ ".inst 0xc0062818 // mova { z24.d-z25.d }, za.d[x9, #0]\n"
+ ".inst 0xc1a1a418 // sqdmulh { z24.s-z25.s }, { z24.s-z25.s }, z1.s\n"
+ ".inst 0xc1a0a238 // srshl { z24.s-z25.s }, { z24.s-z25.s }, z0.s\n"
+ ".inst 0xc16835c9 // sdot za.s[x9, 1], { z14.h-z15.h }, z8.h\n"
+ ".inst 0xc1a2a318 // add { z24.s-z25.s }, { z24.s-z25.s }, z2.s\n"
+ ".inst 0xc16635e9 // sdot za.s[x9, 1], { z15.h-z16.h }, z6.h\n"
+ "add x9, x9, #0x1\n"
+ ".inst 0xc1b3c698 // sclamp { z24.s-z25.s }, z20.s, z19.s\n"
+ "st1b { z24.s }, p5, [x24]\n"
+ "add x24, x24, x22\n"
+ ".inst 0xc0042982 // mova za.d[x9, #2], { z12.d-z13.d }\n"
+ "st1b { z25.s }, p5, [x23]\n"
+ "add x23, x23, x21\n"
+ "17:" // Tail input: End
+ "cbz x26, 19f\n"
+ "18:" // Right padding loop
+ ".inst 0xc0062818 // mova { z24.d-z25.d }, za.d[x9, #0]\n"
+ ".inst 0xc1a1a418 // sqdmulh { z24.s-z25.s }, { z24.s-z25.s }, z1.s\n"
+ "add x9, x9, #0x1\n"
+ ".inst 0xc1a0a238 // srshl { z24.s-z25.s }, { z24.s-z25.s }, z0.s\n"
+ "subs x26, x26, #0x1\n"
+ ".inst 0xc0042982 // mova za.d[x9, #2], { z12.d-z13.d }\n"
+ ".inst 0xc1a2a318 // add { z24.s-z25.s }, { z24.s-z25.s }, z2.s\n"
+ ".inst 0xc1b3c698 // sclamp { z24.s-z25.s }, z20.s, z19.s\n"
+ "st1b { z24.s }, p5, [x24]\n"
+ "add x24, x24, x22\n"
+ "st1b { z25.s }, p5, [x23]\n"
+ "add x23, x23, x21\n"
+ "bgt 18b\n"
+ "19:" // End
+ ".inst 0xd503467f // SMSTOP\n"
+ :
+ : [args] "r" (&args), [ld_in_col] "r" (ld_in_col), [ld_in_row] "r" (ld_in_row), [offsetof_Args_inptr] "I" (offsetof(Args, inptr)), [offsetof_Args_input_cols] "I" (offsetof(Args, input_cols)), [offsetof_Args_ld_out_cols] "I" (offsetof(Args, ld_out_cols)), [offsetof_Args_n] "I" (offsetof(Args, n)), [offsetof_Args_n_channels] "I" (offsetof(Args, n_channels)), [offsetof_Args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_Args_output_cols] "I" (offsetof(Args, output_cols)), [offsetof_Args_pad_bottom] "I" (offsetof(Args, pad_bottom)), [offsetof_Args_pad_left] "I" (offsetof(Args, pad_left)), [offsetof_Args_pad_top] "I" (offsetof(Args, pad_top)), [offsetof_Args_weights] "I" (offsetof(Args, weights)), [offsetof_Requantize32_a_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_bias] "I" (offsetof(arm_gemm::Requantize32, bias)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [offsetof_Requantize32_per_channel_muls] "I" (offsetof(arm_gemm::Requantize32, per_channel_muls)), [offsetof_Requantize32_per_channel_right_shifts] "I" (offsetof(arm_gemm::Requantize32, per_channel_right_shifts)), [offsetof_Requantize32_per_layer_mul] "I" (offsetof(arm_gemm::Requantize32, per_layer_mul)), [offsetof_Requantize32_per_layer_right_shift] "I" (offsetof(arm_gemm::Requantize32, per_layer_right_shift)), [qp] "r" (&qp)
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ );
+}
+
+} // namespace depthwise
+} // namespace arm_conv
+
+#endif // defined(ARM_COMPUTE_ENABLE_SME2)
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8s8u8q_planar_3x3_s2_4rows_dot_za/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8s8u8q_planar_3x3_s2_4rows_dot_za/generic.cpp
index 2848a015db..7a9724c667 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8s8u8q_planar_3x3_s2_4rows_dot_za/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8s8u8q_planar_3x3_s2_4rows_dot_za/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -69,18 +69,18 @@ void sme2_u8s8u8q_planar_3x3_s2_4rows_dot_za_impl(
__asm__ __volatile__(
".inst 0xd503477f // SMSTART ZA\n"
- "ldr x6, [%x[args], %[offsetof_Args_pad_bottom]]\n"
+ "ldr x7, [%x[args], %[offsetof_Args_pad_bottom]]\n"
"ptrue p2.b\n"
- "mov x20, #0x9\n"
- "ldr x7, [%x[args], %[offsetof_Args_pad_top]]\n"
+ "mov x19, #0x9\n"
+ "ldr x8, [%x[args], %[offsetof_Args_pad_top]]\n"
"ld1rh { z5.h }, p2/Z, [%x[qp], %[offsetof_Requantize32_a_offset]]\n"
- "sub x20, x20, x6\n"
+ "sub x19, x19, x7\n"
".inst 0x25207812 // ptrue pn10.b\n"
"ldr x17, [%x[args], %[offsetof_Args_n_channels]]\n"
"whilelt p1.s, XZR, x17\n"
- "whilelt p9.s, XZR, x20\n"
+ "whilelt p9.s, XZR, x19\n"
"ld1rw { z4.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_c_offset]]\n"
- "whilelt p8.s, XZR, x7\n"
+ "whilelt p8.s, XZR, x8\n"
"addvl SP, SP, #-6\n"
"ldr x16, [%x[args], %[offsetof_Args_current_channel]]\n"
"neg z5.h, p2/M, z5.h\n"
@@ -90,317 +90,317 @@ void sme2_u8s8u8q_planar_3x3_s2_4rows_dot_za_impl(
"ld1rw { z27.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_minval]]\n"
"ld1rw { z23.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_maxval]]\n"
"1:" // Channel loop
- "ldr x20, [%x[qp], %[offsetof_Requantize32_bias]]\n"
+ "ldr x19, [%x[qp], %[offsetof_Requantize32_bias]]\n"
"mov z0.s, #0x0\n"
- "cbz x20, 2f\n"
- "ld1w { z0.s }, p1/Z, [x20, x16, LSL #2]\n"
+ "cbz x19, 2f\n"
+ "ld1w { z0.s }, p1/Z, [x19, x16, LSL #2]\n"
"2:" // Load bias: Done
- "ldr x22, [%x[args], %[offsetof_Args_weights]]\n"
- "mov x20, x22\n"
- "ld1sb { z24.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #3\n"
+ "ldr x21, [%x[args], %[offsetof_Args_weights]]\n"
+ "mov x19, x21\n"
+ "ld1sb { z24.s }, p2/Z, [x19]\n"
+ "incw x19, ALL, MUL #3\n"
"ld1rh { z13.h }, p2/Z, [%x[qp], %[offsetof_Requantize32_b_offset]]\n"
"sub z24.h, z24.h, z13.h\n"
- "incw x22\n"
+ "incw x21\n"
"mov z17.h, #0x0\n"
- "ld1sb { z25.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #3\n"
+ "ld1sb { z25.s }, p2/Z, [x19]\n"
+ "incw x19, ALL, MUL #3\n"
"sub z25.h, z25.h, z13.h\n"
"trn1 z10.h, z24.h, z25.h\n"
- "ld1sb { z16.s }, p2/Z, [x20]\n"
+ "ld1sb { z16.s }, p2/Z, [x19]\n"
"sub z16.h, z16.h, z13.h\n"
- "mov x20, x22\n"
+ "mov x19, x21\n"
"trn1 z11.h, z16.h, z17.h\n"
- "ld1sb { z24.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #3\n"
+ "ld1sb { z24.s }, p2/Z, [x19]\n"
+ "incw x19, ALL, MUL #3\n"
"sub z24.h, z24.h, z13.h\n"
- "addvl x21, SP, #6\n"
- "ld1sb { z25.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #3\n"
+ "addvl x20, SP, #6\n"
+ "ld1sb { z25.s }, p2/Z, [x19]\n"
+ "incw x19, ALL, MUL #3\n"
"sub z25.h, z25.h, z13.h\n"
- "incw x22\n"
- "ld1sb { z16.s }, p2/Z, [x20]\n"
+ "incw x21\n"
+ "ld1sb { z16.s }, p2/Z, [x19]\n"
"sub z16.h, z16.h, z13.h\n"
- "addvl x21, x21, #-2\n"
- "mov x20, x22\n"
- "st1h { z10.h }, p2, [x21]\n"
+ "addvl x20, x20, #-2\n"
+ "mov x19, x21\n"
+ "st1h { z10.h }, p2, [x20]\n"
"trn1 z10.h, z24.h, z25.h\n"
- "ld1sb { z24.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #3\n"
- "ld1sb { z25.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #3\n"
- "st1h { z11.h }, p2, [x21, #1, MUL VL]\n"
+ "ld1sb { z24.s }, p2/Z, [x19]\n"
+ "incw x19, ALL, MUL #3\n"
+ "ld1sb { z25.s }, p2/Z, [x19]\n"
+ "incw x19, ALL, MUL #3\n"
+ "st1h { z11.h }, p2, [x20, #1, MUL VL]\n"
"trn1 z11.h, z16.h, z17.h\n"
- "ld1sb { z16.s }, p2/Z, [x20]\n"
+ "ld1sb { z16.s }, p2/Z, [x19]\n"
"sub z24.h, z24.h, z13.h\n"
"sub z25.h, z25.h, z13.h\n"
- "ldr x20, [%x[qp], %[offsetof_Requantize32_per_channel_muls]]\n"
+ "ldr x19, [%x[qp], %[offsetof_Requantize32_per_channel_muls]]\n"
"sub z16.h, z16.h, z13.h\n"
- "addvl x21, x21, #-2\n"
- "st1h { z10.h }, p2, [x21]\n"
+ "addvl x20, x20, #-2\n"
+ "st1h { z10.h }, p2, [x20]\n"
"mov z1.d, z0.d\n"
- "st1h { z11.h }, p2, [x21, #1, MUL VL]\n"
- "addvl x21, x21, #-2\n"
+ "st1h { z11.h }, p2, [x20, #1, MUL VL]\n"
+ "addvl x20, x20, #-2\n"
"mov z2.d, z0.d\n"
"mov z3.d, z0.d\n"
"trn1 z10.h, z24.h, z25.h\n"
- "st1h { z10.h }, p2, [x21]\n"
+ "st1h { z10.h }, p2, [x20]\n"
"trn1 z11.h, z16.h, z17.h\n"
- "st1h { z11.h }, p2, [x21, #1, MUL VL]\n"
- "cbz x20, 3f\n"
- "ld1w { z8.s }, p1/Z, [x20, x16, LSL #2]\n"
+ "st1h { z11.h }, p2, [x20, #1, MUL VL]\n"
+ "cbz x19, 3f\n"
+ "ld1w { z8.s }, p1/Z, [x19, x16, LSL #2]\n"
"3:" // Load mul: End
- "ldr x20, [%x[qp], %[offsetof_Requantize32_per_channel_right_shifts]]\n"
- "cbz x20, 4f\n"
- "ld1w { z7.s }, p1/Z, [x20, x16, LSL #2]\n"
+ "ldr x19, [%x[qp], %[offsetof_Requantize32_per_channel_right_shifts]]\n"
+ "cbz x19, 4f\n"
+ "ld1w { z7.s }, p1/Z, [x19, x16, LSL #2]\n"
"4:" // Load right_shift: End
"ldr x15, [%x[args], %[offsetof_Args_input_cols]]\n"
- "sub x20, x15, #0x1\n"
- "orr x23, x20, %x[ld_in_col], LSL #16\n"
+ "sub x19, x15, #0x1\n"
+ "orr x22, x19, %x[ld_in_col], LSL #16\n"
"ldr x14, [%x[args], %[offsetof_Args_inptr]]\n"
- "orr x23, x17, x23, LSL #22\n"
- "mov x22, #0x9\n"
- "add x21, x7, x6\n"
- "lsl x20, %x[ld_in_row], #0x0\n"
+ "orr x22, x17, x22, LSL #22\n"
+ "mov x21, #0x9\n"
+ "add x20, x8, x7\n"
+ "lsl x19, %x[ld_in_row], #0x0\n"
"ldr x13, [%x[args], %[offsetof_Args_output_cols]]\n"
- "mov x8, #0x0\n"
- "lsl x23, x23, #0x0\n"
- "sub x22, x22, x21\n"
- "madd x20, x20, x7, x14\n"
+ "mov x11, #0x0\n"
+ "lsl x22, x22, #0x0\n"
+ "sub x21, x21, x20\n"
+ "madd x19, x19, x8, x14\n"
"5:" // Issue prefetches
- "subs x22, x22, #0x1\n"
- ".inst 0xf8b74a9c // rprfm pldstrm, x23, [x20]\n"
- "add x20, x20, %x[ld_in_col]\n"
+ "subs x21, x21, #0x1\n"
+ ".inst 0xf8b64a7c // rprfm pldstrm, x22, [x19]\n"
+ "add x19, x19, %x[ld_in_col]\n"
"bgt 5b\n"
- "ldr x25, [%x[args], %[offsetof_Args_outptrs]]\n"
- "lsl x20, %x[ld_in_row], #0x0\n"
- "msub x14, x7, x20, x14\n"
- ".inst 0xc0040c00 // mova za.d[x8, #0], { z0.d-z3.d }\n"
- "ldr x20, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
- ".inst 0xc0040c01 // mova za.d[x8, #1], { z0.d-z3.d }\n"
- "mov x22, #0x2\n"
- "ldp x11, x10, [x25], #0x10\n"
- ".inst 0xc0040c02 // mova za.d[x8, #2], { z0.d-z3.d }\n"
- "ldp x9, x28, [x20], #0x10\n"
- "ldr x21, [%x[args], %[offsetof_Args_pad_left]]\n"
- "ldp x27, x26, [x25], #0x10\n"
- "ldp x25, x24, [x20], #0x10\n"
- "cbz x21, 7f\n"
- "cmp x21, x22\n"
- "csel x20, x21, x22, LT\n"
- "sub x21, x21, x20\n"
- "sub x22, x22, x20\n"
- "cbz x21, 7f\n"
- ".inst 0xc0060c1c // mova { z28.d-z31.d }, za.d[x8, #0]\n"
+ "ldr x24, [%x[args], %[offsetof_Args_outptrs]]\n"
+ "lsl x19, %x[ld_in_row], #0x0\n"
+ "msub x14, x8, x19, x14\n"
+ ".inst 0xc0046c00 // mova za.d[x11, #0], { z0.d-z3.d }\n"
+ "ldr x19, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
+ ".inst 0xc0046c01 // mova za.d[x11, #1], { z0.d-z3.d }\n"
+ "mov x21, #0x2\n"
+ "ldp x10, x9, [x24], #0x10\n"
+ ".inst 0xc0046c02 // mova za.d[x11, #2], { z0.d-z3.d }\n"
+ "ldp x28, x27, [x19], #0x10\n"
+ "ldr x20, [%x[args], %[offsetof_Args_pad_left]]\n"
+ "ldp x26, x25, [x24], #0x10\n"
+ "ldp x24, x23, [x19], #0x10\n"
+ "cbz x20, 7f\n"
+ "cmp x20, x21\n"
+ "csel x19, x20, x21, LT\n"
+ "sub x20, x20, x19\n"
+ "sub x21, x21, x19\n"
+ "cbz x20, 7f\n"
+ ".inst 0xc0066c1c // mova { z28.d-z31.d }, za.d[x11, #0]\n"
".inst 0xc1a8ac1c // sqdmulh { z28.s-z31.s }, { z28.s-z31.s }, z8.s\n"
- "and x22, x21, #0x1\n"
+ "and x21, x20, #0x1\n"
".inst 0xc1a7aa3c // srshl { z28.s-z31.s }, { z28.s-z31.s }, z7.s\n"
- "add x21, x21, #0x1\n"
- "lsr x21, x21, #0x1\n"
+ "add x20, x20, #0x1\n"
+ "lsr x20, x20, #0x1\n"
".inst 0xc1a4ab1c // add { z28.s-z31.s }, { z28.s-z31.s }, z4.s\n"
- "sub x13, x13, x21\n"
+ "sub x13, x13, x20\n"
".inst 0xc1b7cf7c // sclamp { z28.s-z31.s }, z27.s, z23.s\n"
"6:" // Left padding
- "subs x21, x21, #0x1\n"
- "st1b { z28.s }, p1, [x11]\n"
- "add x11, x11, x9\n"
- "st1b { z29.s }, p1, [x10]\n"
+ "subs x20, x20, #0x1\n"
+ "st1b { z28.s }, p1, [x10]\n"
"add x10, x10, x28\n"
- "st1b { z30.s }, p1, [x27]\n"
- "add x27, x27, x25\n"
- "st1b { z31.s }, p1, [x26]\n"
+ "st1b { z29.s }, p1, [x9]\n"
+ "add x9, x9, x27\n"
+ "st1b { z30.s }, p1, [x26]\n"
"add x26, x26, x24\n"
+ "st1b { z31.s }, p1, [x25]\n"
+ "add x25, x25, x23\n"
"bgt 6b\n"
"7:" // Left padding: End
- "adds XZR, x7, x6\n"
+ "adds XZR, x8, x7\n"
"bne 12f\n"
- "cbz x22, 10f\n"
- "cmp x22, #0x1\n"
- "sub x15, x15, x22\n"
+ "cbz x21, 10f\n"
+ "cmp x21, #0x1\n"
+ "sub x15, x15, x21\n"
"beq 9f\n"
"8:" // Unpadded: 2 priming loads
- "add x21, x14, %x[ld_in_row]\n"
+ "add x20, x14, %x[ld_in_row]\n"
"ld1b { z12.s }, p1/Z, [x14]\n"
- "addvl x20, SP, #4\n"
- "ld1b { z20.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "addvl x19, SP, #4\n"
+ "ld1b { z20.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
"trn1 z12.h, z12.h, z20.h\n"
"add z12.h, z12.h, z5.h\n"
- "ld1b { z13.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "ld1b { z13.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
"add x14, x14, %x[ld_in_col]\n"
- "ld1b { z19.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "ld1b { z19.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
"trn1 z13.h, z13.h, z19.h\n"
"add z13.h, z13.h, z5.h\n"
- "ld1b { z14.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
- "ld1b { z18.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "ld1b { z14.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z18.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
"trn1 z14.h, z14.h, z18.h\n"
"add z14.h, z14.h, z5.h\n"
- "ld1b { z15.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
- "ld1b { z17.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "ld1b { z15.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z17.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
"trn1 z15.h, z15.h, z17.h\n"
"add z15.h, z15.h, z5.h\n"
- "ld1b { z16.s }, p1/Z, [x21]\n"
+ "ld1b { z16.s }, p1/Z, [x20]\n"
"mov z16.d, z16.d\n"
"add z16.h, z16.h, z5.h\n"
- ".inst 0xa0402a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20]\n"
- ".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
- ".inst 0xc17b15a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z11.h\n"
+ ".inst 0xa0402a6a // ld1h { z10.h-z11.h }, pn10.b/Z, [x19]\n"
+ ".inst 0xc17a7588 // sdot za.s[x11, 0], { z12.h-z15.h }, z10.h\n"
+ ".inst 0xc17b75a8 // sdot za.s[x11, 0], { z13.h-z16.h }, z11.h\n"
"9:" // Unpadded: 1 priming loads
- "add x21, x14, %x[ld_in_row]\n"
+ "add x20, x14, %x[ld_in_row]\n"
"ld1b { z12.s }, p1/Z, [x14]\n"
- "addvl x20, SP, #2\n"
- "ld1b { z20.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "addvl x19, SP, #2\n"
+ "ld1b { z20.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
"trn1 z12.h, z12.h, z20.h\n"
"add z12.h, z12.h, z5.h\n"
- "ld1b { z13.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "ld1b { z13.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
"add x14, x14, %x[ld_in_col]\n"
- "ld1b { z19.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "ld1b { z19.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
"trn1 z13.h, z13.h, z19.h\n"
"add z13.h, z13.h, z5.h\n"
- "ld1b { z14.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
- "ld1b { z18.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "ld1b { z14.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z18.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
"trn1 z14.h, z14.h, z18.h\n"
"add z14.h, z14.h, z5.h\n"
- "ld1b { z15.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
- "ld1b { z17.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "ld1b { z15.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z17.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
"trn1 z15.h, z15.h, z17.h\n"
"add z15.h, z15.h, z5.h\n"
- "ld1b { z16.s }, p1/Z, [x21]\n"
+ "ld1b { z16.s }, p1/Z, [x20]\n"
"mov z16.d, z16.d\n"
"add z16.h, z16.h, z5.h\n"
- ".inst 0xa0402a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20]\n"
- ".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
- ".inst 0xc17b15a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z11.h\n"
+ ".inst 0xa0402a6a // ld1h { z10.h-z11.h }, pn10.b/Z, [x19]\n"
+ ".inst 0xc17a7588 // sdot za.s[x11, 0], { z12.h-z15.h }, z10.h\n"
+ ".inst 0xc17b75a8 // sdot za.s[x11, 0], { z13.h-z16.h }, z11.h\n"
"10:" // Unpadded: 0 priming loads
"cmp x15, #0x2\n"
".inst 0xa0402bea // ld1h { z10.h-z11.h }, pn10.b/Z, [SP]\n"
"blt 18f\n"
- "add x21, x14, %x[ld_in_row]\n"
+ "add x20, x14, %x[ld_in_row]\n"
"ld1b { z12.s }, p1/Z, [x14]\n"
"sub x15, x15, #0x2\n"
- "ld1b { z20.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "ld1b { z20.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
"trn1 z12.h, z12.h, z20.h\n"
"sub x13, x13, #0x1\n"
- "ld1b { z13.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
- "lsr x20, x15, #0x1\n"
+ "ld1b { z13.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "lsr x19, x15, #0x1\n"
"add z12.h, z12.h, z5.h\n"
- "ld1b { z19.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "ld1b { z19.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
"trn1 z13.h, z13.h, z19.h\n"
- "cmp x20, x13\n"
- "ld1b { z14.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
- "csel x23, x20, x13, LT\n"
+ "cmp x19, x13\n"
+ "ld1b { z14.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "csel x22, x19, x13, LT\n"
"add z13.h, z13.h, z5.h\n"
- "ld1b { z18.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "ld1b { z18.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
"trn1 z14.h, z14.h, z18.h\n"
"add z14.h, z14.h, z5.h\n"
- "ld1b { z15.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "ld1b { z15.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
"add x14, x14, %x[ld_in_col]\n"
- "ld1b { z17.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "ld1b { z17.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
"trn1 z15.h, z15.h, z17.h\n"
"add z15.h, z15.h, z5.h\n"
- "ld1b { z16.s }, p1/Z, [x21]\n"
+ "ld1b { z16.s }, p1/Z, [x20]\n"
"mov z16.d, z16.d\n"
"add z16.h, z16.h, z5.h\n"
"and x15, x15, #0x1\n"
- "sub x13, x13, x23\n"
- "cbz x23, 17f\n"
+ "sub x13, x13, x22\n"
+ "cbz x22, 17f\n"
"11:" // Unpadded: Main loop
- ".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
- "addvl x20, SP, #4\n"
- "add x22, x14, %x[ld_in_row]\n"
- ".inst 0xc17b15a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z11.h\n"
- ".inst 0xa0402a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20]\n"
- "addvl x21, SP, #2\n"
- "subs x23, x23, #0x1\n"
- ".inst 0xc17a1589 // sdot za.s[x8, 1], { z12.h-z15.h }, z10.h\n"
+ ".inst 0xc17a7588 // sdot za.s[x11, 0], { z12.h-z15.h }, z10.h\n"
+ "addvl x19, SP, #4\n"
+ "add x21, x14, %x[ld_in_row]\n"
+ ".inst 0xc17b75a8 // sdot za.s[x11, 0], { z13.h-z16.h }, z11.h\n"
+ ".inst 0xa0402a6a // ld1h { z10.h-z11.h }, pn10.b/Z, [x19]\n"
+ "addvl x20, SP, #2\n"
+ "subs x22, x22, #0x1\n"
+ ".inst 0xc17a7589 // sdot za.s[x11, 1], { z12.h-z15.h }, z10.h\n"
"ld1b { z12.s }, p1/Z, [x14]\n"
"add x14, x14, %x[ld_in_col]\n"
- "add x20, x14, %x[ld_in_row]\n"
- "ld1b { z20.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row]\n"
- ".inst 0xc17b15a9 // sdot za.s[x8, 1], { z13.h-z16.h }, z11.h\n"
+ "add x19, x14, %x[ld_in_row]\n"
+ "ld1b { z20.s }, p1/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ ".inst 0xc17b75a9 // sdot za.s[x11, 1], { z13.h-z16.h }, z11.h\n"
"trn1 z12.h, z12.h, z20.h\n"
- "ld1b { z13.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row]\n"
+ "ld1b { z13.s }, p1/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
"add z12.h, z12.h, z5.h\n"
- ".inst 0xc0060c1c // mova { z28.d-z31.d }, za.d[x8, #0]\n"
- "ld1b { z19.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row]\n"
+ ".inst 0xc0066c1c // mova { z28.d-z31.d }, za.d[x11, #0]\n"
+ "ld1b { z19.s }, p1/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
"trn1 z13.h, z13.h, z19.h\n"
"add z13.h, z13.h, z5.h\n"
- "ld1b { z14.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row]\n"
- "add x8, x8, #0x1\n"
- ".inst 0xc0040c02 // mova za.d[x8, #2], { z0.d-z3.d }\n"
- "ld1b { z18.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row]\n"
+ "ld1b { z14.s }, p1/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "add x11, x11, #0x1\n"
+ ".inst 0xc0046c02 // mova za.d[x11, #2], { z0.d-z3.d }\n"
+ "ld1b { z18.s }, p1/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
"trn1 z14.h, z14.h, z18.h\n"
"add z14.h, z14.h, z5.h\n"
- "ld1b { z15.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row]\n"
+ "ld1b { z15.s }, p1/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
".inst 0xc1a8ac1c // sqdmulh { z28.s-z31.s }, { z28.s-z31.s }, z8.s\n"
- "ld1b { z17.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row]\n"
+ "ld1b { z17.s }, p1/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
"trn1 z15.h, z15.h, z17.h\n"
"add z15.h, z15.h, z5.h\n"
- "ld1b { z16.s }, p1/Z, [x22]\n"
+ "ld1b { z16.s }, p1/Z, [x21]\n"
"mov z16.d, z16.d\n"
"add z16.h, z16.h, z5.h\n"
- ".inst 0xa0402aaa // ld1h { z10.h-z11.h }, pn10.b/Z, [x21]\n"
- ".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
+ ".inst 0xa0402a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xc17a7588 // sdot za.s[x11, 0], { z12.h-z15.h }, z10.h\n"
".inst 0xc1a7aa3c // srshl { z28.s-z31.s }, { z28.s-z31.s }, z7.s\n"
"ld1b { z12.s }, p1/Z, [x14]\n"
- ".inst 0xc17b15a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z11.h\n"
+ ".inst 0xc17b75a8 // sdot za.s[x11, 0], { z13.h-z16.h }, z11.h\n"
".inst 0xc1a4ab1c // add { z28.s-z31.s }, { z28.s-z31.s }, z4.s\n"
- "ld1b { z20.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z20.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"trn1 z12.h, z12.h, z20.h\n"
".inst 0xc1b7cf7c // sclamp { z28.s-z31.s }, z27.s, z23.s\n"
- "ld1b { z13.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
- "st1b { z28.s }, p1, [x11]\n"
- "add x11, x11, x9\n"
- "ld1b { z19.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
- "trn1 z13.h, z13.h, z19.h\n"
- "st1b { z29.s }, p1, [x10]\n"
- "ld1b { z14.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z13.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ "st1b { z28.s }, p1, [x10]\n"
"add x10, x10, x28\n"
- "st1b { z30.s }, p1, [x27]\n"
- "ld1b { z18.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z19.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ "trn1 z13.h, z13.h, z19.h\n"
+ "st1b { z29.s }, p1, [x9]\n"
+ "ld1b { z14.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ "add x9, x9, x27\n"
+ "st1b { z30.s }, p1, [x26]\n"
+ "ld1b { z18.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"trn1 z14.h, z14.h, z18.h\n"
- "add x27, x27, x25\n"
- "ld1b { z15.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
- "st1b { z31.s }, p1, [x26]\n"
"add x26, x26, x24\n"
- "ld1b { z17.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z15.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ "st1b { z31.s }, p1, [x25]\n"
+ "add x25, x25, x23\n"
+ "ld1b { z17.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"trn1 z15.h, z15.h, z17.h\n"
"add z12.h, z12.h, z5.h\n"
- "ld1b { z16.s }, p1/Z, [x20]\n"
+ "ld1b { z16.s }, p1/Z, [x19]\n"
"mov z16.d, z16.d\n"
"add z13.h, z13.h, z5.h\n"
"add x14, x14, %x[ld_in_col]\n"
@@ -411,108 +411,108 @@ void sme2_u8s8u8q_planar_3x3_s2_4rows_dot_za_impl(
"bgt 11b\n"
"b 17f\n"
"12:" // Padded
- "cbz x22, 15f\n"
- "cmp x22, #0x1\n"
- "sub x15, x15, x22\n"
+ "cbz x21, 15f\n"
+ "cmp x21, #0x1\n"
+ "sub x15, x15, x21\n"
"beq 14f\n"
"13:" // Padded: 2 priming loads
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
"ld1b { z12.s }, p0/Z, [x14]\n"
"add z12.h, p0/M, z12.h, z5.h\n"
- "add x20, x14, %x[ld_in_row]\n"
+ "add x19, x14, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z20.s }, p0/Z, [x20]\n"
+ "ld1b { z20.s }, p0/Z, [x19]\n"
"add z20.h, p0/M, z20.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z13.s }, p0/Z, [x20]\n"
+ "ld1b { z13.s }, p0/Z, [x19]\n"
"add z13.h, p0/M, z13.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z19.s }, p0/Z, [x20]\n"
+ "ld1b { z19.s }, p0/Z, [x19]\n"
"add z19.h, p0/M, z19.h, z5.h\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"trn1 z12.h, z12.h, z20.h\n"
"trn1 z13.h, z13.h, z19.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z14.s }, p0/Z, [x20]\n"
+ "ld1b { z14.s }, p0/Z, [x19]\n"
"add z14.h, p0/M, z14.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z18.s }, p0/Z, [x20]\n"
+ "ld1b { z18.s }, p0/Z, [x19]\n"
"add z18.h, p0/M, z18.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z15.s }, p0/Z, [x20]\n"
+ "ld1b { z15.s }, p0/Z, [x19]\n"
"add z15.h, p0/M, z15.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z17.s }, p0/Z, [x20]\n"
+ "ld1b { z17.s }, p0/Z, [x19]\n"
"mov x12, #0x8\n"
"add z17.h, p0/M, z17.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z16.s }, p0/Z, [x20]\n"
+ "ld1b { z16.s }, p0/Z, [x19]\n"
"add z16.h, p0/M, z16.h, z5.h\n"
- "addvl x20, SP, #4\n"
+ "addvl x19, SP, #4\n"
"trn1 z14.h, z14.h, z18.h\n"
"trn1 z15.h, z15.h, z17.h\n"
- ".inst 0xa0402a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xa0402a6a // ld1h { z10.h-z11.h }, pn10.b/Z, [x19]\n"
"mov z16.d, z16.d\n"
- ".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
+ ".inst 0xc17a7588 // sdot za.s[x11, 0], { z12.h-z15.h }, z10.h\n"
"add x14, x14, %x[ld_in_col]\n"
- ".inst 0xc17b15a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z11.h\n"
+ ".inst 0xc17b75a8 // sdot za.s[x11, 0], { z13.h-z16.h }, z11.h\n"
"14:" // Padded: 1 priming loads
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
"ld1b { z12.s }, p0/Z, [x14]\n"
"add z12.h, p0/M, z12.h, z5.h\n"
- "add x20, x14, %x[ld_in_row]\n"
+ "add x19, x14, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z20.s }, p0/Z, [x20]\n"
+ "ld1b { z20.s }, p0/Z, [x19]\n"
"add z20.h, p0/M, z20.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z13.s }, p0/Z, [x20]\n"
+ "ld1b { z13.s }, p0/Z, [x19]\n"
"add z13.h, p0/M, z13.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z19.s }, p0/Z, [x20]\n"
+ "ld1b { z19.s }, p0/Z, [x19]\n"
"add z19.h, p0/M, z19.h, z5.h\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"trn1 z12.h, z12.h, z20.h\n"
"trn1 z13.h, z13.h, z19.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z14.s }, p0/Z, [x20]\n"
+ "ld1b { z14.s }, p0/Z, [x19]\n"
"add z14.h, p0/M, z14.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z18.s }, p0/Z, [x20]\n"
+ "ld1b { z18.s }, p0/Z, [x19]\n"
"add z18.h, p0/M, z18.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z15.s }, p0/Z, [x20]\n"
+ "ld1b { z15.s }, p0/Z, [x19]\n"
"add z15.h, p0/M, z15.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z17.s }, p0/Z, [x20]\n"
+ "ld1b { z17.s }, p0/Z, [x19]\n"
"mov x12, #0x8\n"
"add z17.h, p0/M, z17.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z16.s }, p0/Z, [x20]\n"
+ "ld1b { z16.s }, p0/Z, [x19]\n"
"add z16.h, p0/M, z16.h, z5.h\n"
- "addvl x20, SP, #2\n"
+ "addvl x19, SP, #2\n"
"trn1 z14.h, z14.h, z18.h\n"
"trn1 z15.h, z15.h, z17.h\n"
- ".inst 0xa0402a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xa0402a6a // ld1h { z10.h-z11.h }, pn10.b/Z, [x19]\n"
"mov z16.d, z16.d\n"
- ".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
+ ".inst 0xc17a7588 // sdot za.s[x11, 0], { z12.h-z15.h }, z10.h\n"
"add x14, x14, %x[ld_in_col]\n"
- ".inst 0xc17b15a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z11.h\n"
+ ".inst 0xc17b75a8 // sdot za.s[x11, 0], { z13.h-z16.h }, z11.h\n"
"15:" // Padded: 0 priming loads
"cmp x15, #0x2\n"
".inst 0xa0402bea // ld1h { z10.h-z11.h }, pn10.b/Z, [SP]\n"
@@ -521,357 +521,357 @@ void sme2_u8s8u8q_planar_3x3_s2_4rows_dot_za_impl(
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
"ld1b { z12.s }, p0/Z, [x14]\n"
"add z12.h, p0/M, z12.h, z5.h\n"
- "add x20, x14, %x[ld_in_row]\n"
+ "add x19, x14, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z20.s }, p0/Z, [x20]\n"
+ "ld1b { z20.s }, p0/Z, [x19]\n"
"add z20.h, p0/M, z20.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z13.s }, p0/Z, [x20]\n"
+ "ld1b { z13.s }, p0/Z, [x19]\n"
"add z13.h, p0/M, z13.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z19.s }, p0/Z, [x20]\n"
+ "ld1b { z19.s }, p0/Z, [x19]\n"
"add z19.h, p0/M, z19.h, z5.h\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"trn1 z12.h, z12.h, z20.h\n"
"trn1 z13.h, z13.h, z19.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z14.s }, p0/Z, [x20]\n"
+ "ld1b { z14.s }, p0/Z, [x19]\n"
"add z14.h, p0/M, z14.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z18.s }, p0/Z, [x20]\n"
+ "ld1b { z18.s }, p0/Z, [x19]\n"
"add z18.h, p0/M, z18.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z15.s }, p0/Z, [x20]\n"
+ "ld1b { z15.s }, p0/Z, [x19]\n"
"add z15.h, p0/M, z15.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z17.s }, p0/Z, [x20]\n"
+ "ld1b { z17.s }, p0/Z, [x19]\n"
"mov x12, #0x8\n"
"add z17.h, p0/M, z17.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z16.s }, p0/Z, [x20]\n"
+ "ld1b { z16.s }, p0/Z, [x19]\n"
"add z16.h, p0/M, z16.h, z5.h\n"
"sub x15, x15, #0x2\n"
"sub x13, x13, #0x1\n"
"trn1 z14.h, z14.h, z18.h\n"
"trn1 z15.h, z15.h, z17.h\n"
- "lsr x20, x15, #0x1\n"
- "cmp x20, x13\n"
+ "lsr x19, x15, #0x1\n"
+ "cmp x19, x13\n"
"mov z16.d, z16.d\n"
- "csel x22, x20, x13, LT\n"
+ "csel x21, x19, x13, LT\n"
"add x14, x14, %x[ld_in_col]\n"
"and x15, x15, #0x1\n"
- "sub x13, x13, x22\n"
- "cbz x22, 17f\n"
+ "sub x13, x13, x21\n"
+ "cbz x21, 17f\n"
"16:" // Padded: Main loop
- ".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
- "addvl x20, SP, #4\n"
+ ".inst 0xc17a7588 // sdot za.s[x11, 0], { z12.h-z15.h }, z10.h\n"
+ "addvl x19, SP, #4\n"
"mov x12, #0x0\n"
- ".inst 0xc17b15a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z11.h\n"
- ".inst 0xa0402a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xc17b75a8 // sdot za.s[x11, 0], { z13.h-z16.h }, z11.h\n"
+ ".inst 0xa0402a6a // ld1h { z10.h-z11.h }, pn10.b/Z, [x19]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "add x21, x14, %x[ld_in_row]\n"
- ".inst 0xc17a1589 // sdot za.s[x8, 1], { z12.h-z15.h }, z10.h\n"
+ "add x20, x14, %x[ld_in_row]\n"
+ ".inst 0xc17a7589 // sdot za.s[x11, 1], { z12.h-z15.h }, z10.h\n"
"ld1b { z12.s }, p0/Z, [x14]\n"
"add z12.h, p0/M, z12.h, z5.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z20.s }, p0/Z, [x21]\n"
+ "ld1b { z20.s }, p0/Z, [x20]\n"
"add z20.h, p0/M, z20.h, z5.h\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "add x20, x20, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- ".inst 0xc17b15a9 // sdot za.s[x8, 1], { z13.h-z16.h }, z11.h\n"
- "ld1b { z13.s }, p0/Z, [x21]\n"
+ ".inst 0xc17b75a9 // sdot za.s[x11, 1], { z13.h-z16.h }, z11.h\n"
+ "ld1b { z13.s }, p0/Z, [x20]\n"
"add z13.h, p0/M, z13.h, z5.h\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "add x20, x20, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z19.s }, p0/Z, [x21]\n"
+ "ld1b { z19.s }, p0/Z, [x20]\n"
"mov x12, #0x4\n"
"add z19.h, p0/M, z19.h, z5.h\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "add x20, x20, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z14.s }, p0/Z, [x21]\n"
+ "ld1b { z14.s }, p0/Z, [x20]\n"
"add z14.h, p0/M, z14.h, z5.h\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "add x20, x20, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z18.s }, p0/Z, [x21]\n"
+ "ld1b { z18.s }, p0/Z, [x20]\n"
"add z18.h, p0/M, z18.h, z5.h\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "add x20, x20, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z15.s }, p0/Z, [x21]\n"
+ "ld1b { z15.s }, p0/Z, [x20]\n"
"add z15.h, p0/M, z15.h, z5.h\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "add x20, x20, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z17.s }, p0/Z, [x21]\n"
+ "ld1b { z17.s }, p0/Z, [x20]\n"
"add z17.h, p0/M, z17.h, z5.h\n"
"mov x12, #0x8\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "add x20, x20, %x[ld_in_row]\n"
"trn1 z12.h, z12.h, z20.h\n"
"trn1 z13.h, z13.h, z19.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "addvl x20, SP, #2\n"
- "ld1b { z16.s }, p0/Z, [x21]\n"
+ "addvl x19, SP, #2\n"
+ "ld1b { z16.s }, p0/Z, [x20]\n"
"trn1 z14.h, z14.h, z18.h\n"
"trn1 z15.h, z15.h, z17.h\n"
- ".inst 0xa0402a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xa0402a6a // ld1h { z10.h-z11.h }, pn10.b/Z, [x19]\n"
"mov x12, #0x0\n"
- ".inst 0xc0060c1c // mova { z28.d-z31.d }, za.d[x8, #0]\n"
- "add x8, x8, #0x1\n"
+ ".inst 0xc0066c1c // mova { z28.d-z31.d }, za.d[x11, #0]\n"
+ "add x11, x11, #0x1\n"
"add z16.h, p0/M, z16.h, z5.h\n"
"add x14, x14, %x[ld_in_col]\n"
- ".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
+ ".inst 0xc17a7588 // sdot za.s[x11, 0], { z12.h-z15.h }, z10.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
"ld1b { z12.s }, p0/Z, [x14]\n"
"add z12.h, p0/M, z12.h, z5.h\n"
- "add x20, x14, %x[ld_in_row]\n"
+ "add x19, x14, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
"mov z16.d, z16.d\n"
- "ld1b { z20.s }, p0/Z, [x20]\n"
+ "ld1b { z20.s }, p0/Z, [x19]\n"
"add z20.h, p0/M, z20.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- ".inst 0xc17b15a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z11.h\n"
- "ld1b { z13.s }, p0/Z, [x20]\n"
+ ".inst 0xc17b75a8 // sdot za.s[x11, 0], { z13.h-z16.h }, z11.h\n"
+ "ld1b { z13.s }, p0/Z, [x19]\n"
"add z13.h, p0/M, z13.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z19.s }, p0/Z, [x20]\n"
+ "ld1b { z19.s }, p0/Z, [x19]\n"
"mov x12, #0x4\n"
"add z19.h, p0/M, z19.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
- ".inst 0xc0040c02 // mova za.d[x8, #2], { z0.d-z3.d }\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ ".inst 0xc0046c02 // mova za.d[x11, #2], { z0.d-z3.d }\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z14.s }, p0/Z, [x20]\n"
+ "ld1b { z14.s }, p0/Z, [x19]\n"
"add z14.h, p0/M, z14.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z18.s }, p0/Z, [x20]\n"
+ "ld1b { z18.s }, p0/Z, [x19]\n"
"add z18.h, p0/M, z18.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z15.s }, p0/Z, [x20]\n"
+ "ld1b { z15.s }, p0/Z, [x19]\n"
".inst 0xc1a8ac1c // sqdmulh { z28.s-z31.s }, { z28.s-z31.s }, z8.s\n"
"add z15.h, p0/M, z15.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z17.s }, p0/Z, [x20]\n"
+ "ld1b { z17.s }, p0/Z, [x19]\n"
"mov x12, #0x8\n"
".inst 0xc1a7aa3c // srshl { z28.s-z31.s }, { z28.s-z31.s }, z7.s\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"add z17.h, p0/M, z17.h, z5.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z16.s }, p0/Z, [x20]\n"
+ "ld1b { z16.s }, p0/Z, [x19]\n"
"add z16.h, p0/M, z16.h, z5.h\n"
".inst 0xc1a4ab1c // add { z28.s-z31.s }, { z28.s-z31.s }, z4.s\n"
- "subs x22, x22, #0x1\n"
+ "subs x21, x21, #0x1\n"
".inst 0xa0402bea // ld1h { z10.h-z11.h }, pn10.b/Z, [SP]\n"
".inst 0xc1b7cf7c // sclamp { z28.s-z31.s }, z27.s, z23.s\n"
- "st1b { z28.s }, p1, [x11]\n"
- "add x11, x11, x9\n"
- "trn1 z12.h, z12.h, z20.h\n"
- "st1b { z29.s }, p1, [x10]\n"
+ "st1b { z28.s }, p1, [x10]\n"
"add x10, x10, x28\n"
+ "trn1 z12.h, z12.h, z20.h\n"
+ "st1b { z29.s }, p1, [x9]\n"
+ "add x9, x9, x27\n"
"trn1 z13.h, z13.h, z19.h\n"
"trn1 z14.h, z14.h, z18.h\n"
- "st1b { z30.s }, p1, [x27]\n"
- "add x27, x27, x25\n"
+ "st1b { z30.s }, p1, [x26]\n"
+ "add x26, x26, x24\n"
"trn1 z15.h, z15.h, z17.h\n"
"mov z16.d, z16.d\n"
- "st1b { z31.s }, p1, [x26]\n"
- "add x26, x26, x24\n"
+ "st1b { z31.s }, p1, [x25]\n"
+ "add x25, x25, x23\n"
"add x14, x14, %x[ld_in_col]\n"
"bgt 16b\n"
"17:" // Main loop tail
- ".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
- "addvl x20, SP, #4\n"
+ ".inst 0xc17a7588 // sdot za.s[x11, 0], { z12.h-z15.h }, z10.h\n"
+ "addvl x19, SP, #4\n"
"mov x12, #0x0\n"
- ".inst 0xc17b15a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z11.h\n"
- ".inst 0xa0402a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xc17b75a8 // sdot za.s[x11, 0], { z13.h-z16.h }, z11.h\n"
+ ".inst 0xa0402a6a // ld1h { z10.h-z11.h }, pn10.b/Z, [x19]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "add x20, x14, %x[ld_in_row]\n"
- ".inst 0xc17a1589 // sdot za.s[x8, 1], { z12.h-z15.h }, z10.h\n"
+ "add x19, x14, %x[ld_in_row]\n"
+ ".inst 0xc17a7589 // sdot za.s[x11, 1], { z12.h-z15.h }, z10.h\n"
"ld1b { z12.s }, p0/Z, [x14]\n"
"add z12.h, p0/M, z12.h, z5.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z20.s }, p0/Z, [x20]\n"
+ "ld1b { z20.s }, p0/Z, [x19]\n"
"add z20.h, p0/M, z20.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- ".inst 0xc17b15a9 // sdot za.s[x8, 1], { z13.h-z16.h }, z11.h\n"
- "ld1b { z13.s }, p0/Z, [x20]\n"
+ ".inst 0xc17b75a9 // sdot za.s[x11, 1], { z13.h-z16.h }, z11.h\n"
+ "ld1b { z13.s }, p0/Z, [x19]\n"
"add z13.h, p0/M, z13.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z19.s }, p0/Z, [x20]\n"
+ "ld1b { z19.s }, p0/Z, [x19]\n"
"mov x12, #0x4\n"
"add z19.h, p0/M, z19.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z14.s }, p0/Z, [x20]\n"
+ "ld1b { z14.s }, p0/Z, [x19]\n"
"add z14.h, p0/M, z14.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z18.s }, p0/Z, [x20]\n"
+ "ld1b { z18.s }, p0/Z, [x19]\n"
"add z18.h, p0/M, z18.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z15.s }, p0/Z, [x20]\n"
- ".inst 0xc0060c1c // mova { z28.d-z31.d }, za.d[x8, #0]\n"
+ "ld1b { z15.s }, p0/Z, [x19]\n"
+ ".inst 0xc0066c1c // mova { z28.d-z31.d }, za.d[x11, #0]\n"
"add z15.h, p0/M, z15.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z17.s }, p0/Z, [x20]\n"
+ "ld1b { z17.s }, p0/Z, [x19]\n"
"mov x12, #0x8\n"
".inst 0xc1a8ac1c // sqdmulh { z28.s-z31.s }, { z28.s-z31.s }, z8.s\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"add z17.h, p0/M, z17.h, z5.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z16.s }, p0/Z, [x20]\n"
- "addvl x20, SP, #2\n"
+ "ld1b { z16.s }, p0/Z, [x19]\n"
+ "addvl x19, SP, #2\n"
".inst 0xc1a7aa3c // srshl { z28.s-z31.s }, { z28.s-z31.s }, z7.s\n"
"trn1 z12.h, z12.h, z20.h\n"
- "add x8, x8, #0x1\n"
+ "add x11, x11, #0x1\n"
"add z16.h, p0/M, z16.h, z5.h\n"
"trn1 z13.h, z13.h, z19.h\n"
"trn1 z14.h, z14.h, z18.h\n"
"add x14, x14, %x[ld_in_col]\n"
"trn1 z15.h, z15.h, z17.h\n"
- ".inst 0xa0402a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xa0402a6a // ld1h { z10.h-z11.h }, pn10.b/Z, [x19]\n"
".inst 0xc1a4ab1c // add { z28.s-z31.s }, { z28.s-z31.s }, z4.s\n"
"mov z16.d, z16.d\n"
- ".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
+ ".inst 0xc17a7588 // sdot za.s[x11, 0], { z12.h-z15.h }, z10.h\n"
".inst 0xc1b7cf7c // sclamp { z28.s-z31.s }, z27.s, z23.s\n"
- "st1b { z28.s }, p1, [x11]\n"
- "add x11, x11, x9\n"
- ".inst 0xc0040c02 // mova za.d[x8, #2], { z0.d-z3.d }\n"
- "st1b { z29.s }, p1, [x10]\n"
+ "st1b { z28.s }, p1, [x10]\n"
"add x10, x10, x28\n"
- ".inst 0xc17b15a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z11.h\n"
+ ".inst 0xc0046c02 // mova za.d[x11, #2], { z0.d-z3.d }\n"
+ "st1b { z29.s }, p1, [x9]\n"
+ "add x9, x9, x27\n"
+ ".inst 0xc17b75a8 // sdot za.s[x11, 0], { z13.h-z16.h }, z11.h\n"
".inst 0xa0402bea // ld1h { z10.h-z11.h }, pn10.b/Z, [SP]\n"
- "st1b { z30.s }, p1, [x27]\n"
- "add x27, x27, x25\n"
- "st1b { z31.s }, p1, [x26]\n"
+ "st1b { z30.s }, p1, [x26]\n"
"add x26, x26, x24\n"
+ "st1b { z31.s }, p1, [x25]\n"
+ "add x25, x25, x23\n"
"18:" // Main loop skip tail
"cbz x15, 19f\n" // Skip remainder inputs
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
"ld1b { z12.s }, p0/Z, [x14]\n"
"add z12.h, p0/M, z12.h, z5.h\n"
- "add x20, x14, %x[ld_in_row]\n"
+ "add x19, x14, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z20.s }, p0/Z, [x20]\n"
+ "ld1b { z20.s }, p0/Z, [x19]\n"
"add z20.h, p0/M, z20.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z13.s }, p0/Z, [x20]\n"
+ "ld1b { z13.s }, p0/Z, [x19]\n"
"add z13.h, p0/M, z13.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z19.s }, p0/Z, [x20]\n"
+ "ld1b { z19.s }, p0/Z, [x19]\n"
"add z19.h, p0/M, z19.h, z5.h\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"trn1 z12.h, z12.h, z20.h\n"
"trn1 z13.h, z13.h, z19.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z14.s }, p0/Z, [x20]\n"
+ "ld1b { z14.s }, p0/Z, [x19]\n"
"add z14.h, p0/M, z14.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z18.s }, p0/Z, [x20]\n"
+ "ld1b { z18.s }, p0/Z, [x19]\n"
"add z18.h, p0/M, z18.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z15.s }, p0/Z, [x20]\n"
+ "ld1b { z15.s }, p0/Z, [x19]\n"
"add z15.h, p0/M, z15.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z17.s }, p0/Z, [x20]\n"
+ "ld1b { z17.s }, p0/Z, [x19]\n"
"mov x12, #0x8\n"
"add z17.h, p0/M, z17.h, z5.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z16.s }, p0/Z, [x20]\n"
+ "ld1b { z16.s }, p0/Z, [x19]\n"
"add z16.h, p0/M, z16.h, z5.h\n"
"trn1 z14.h, z14.h, z18.h\n"
"trn1 z15.h, z15.h, z17.h\n"
"mov z16.d, z16.d\n"
- "addvl x20, SP, #4\n"
- ".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
+ "addvl x19, SP, #4\n"
+ ".inst 0xc17a7588 // sdot za.s[x11, 0], { z12.h-z15.h }, z10.h\n"
"sub x13, x13, #0x1\n"
- ".inst 0xc17b15a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z11.h\n"
- ".inst 0xa0402a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20]\n"
- ".inst 0xc0060c1c // mova { z28.d-z31.d }, za.d[x8, #0]\n"
+ ".inst 0xc17b75a8 // sdot za.s[x11, 0], { z13.h-z16.h }, z11.h\n"
+ ".inst 0xa0402a6a // ld1h { z10.h-z11.h }, pn10.b/Z, [x19]\n"
+ ".inst 0xc0066c1c // mova { z28.d-z31.d }, za.d[x11, #0]\n"
".inst 0xc1a8ac1c // sqdmulh { z28.s-z31.s }, { z28.s-z31.s }, z8.s\n"
".inst 0xc1a7aa3c // srshl { z28.s-z31.s }, { z28.s-z31.s }, z7.s\n"
- ".inst 0xc17a1589 // sdot za.s[x8, 1], { z12.h-z15.h }, z10.h\n"
+ ".inst 0xc17a7589 // sdot za.s[x11, 1], { z12.h-z15.h }, z10.h\n"
".inst 0xc1a4ab1c // add { z28.s-z31.s }, { z28.s-z31.s }, z4.s\n"
- ".inst 0xc17b15a9 // sdot za.s[x8, 1], { z13.h-z16.h }, z11.h\n"
- "add x8, x8, #0x1\n"
+ ".inst 0xc17b75a9 // sdot za.s[x11, 1], { z13.h-z16.h }, z11.h\n"
+ "add x11, x11, #0x1\n"
".inst 0xc1b7cf7c // sclamp { z28.s-z31.s }, z27.s, z23.s\n"
- "st1b { z28.s }, p1, [x11]\n"
- "add x11, x11, x9\n"
- ".inst 0xc0040c02 // mova za.d[x8, #2], { z0.d-z3.d }\n"
- "st1b { z29.s }, p1, [x10]\n"
+ "st1b { z28.s }, p1, [x10]\n"
"add x10, x10, x28\n"
- "st1b { z30.s }, p1, [x27]\n"
- "add x27, x27, x25\n"
- "st1b { z31.s }, p1, [x26]\n"
+ ".inst 0xc0046c02 // mova za.d[x11, #2], { z0.d-z3.d }\n"
+ "st1b { z29.s }, p1, [x9]\n"
+ "add x9, x9, x27\n"
+ "st1b { z30.s }, p1, [x26]\n"
"add x26, x26, x24\n"
+ "st1b { z31.s }, p1, [x25]\n"
+ "add x25, x25, x23\n"
"19:" // Tail input: End
"cbz x13, 21f\n"
"20:" // Right padding loop
- ".inst 0xc0060c1c // mova { z28.d-z31.d }, za.d[x8, #0]\n"
+ ".inst 0xc0066c1c // mova { z28.d-z31.d }, za.d[x11, #0]\n"
".inst 0xc1a8ac1c // sqdmulh { z28.s-z31.s }, { z28.s-z31.s }, z8.s\n"
- "add x8, x8, #0x1\n"
+ "add x11, x11, #0x1\n"
".inst 0xc1a7aa3c // srshl { z28.s-z31.s }, { z28.s-z31.s }, z7.s\n"
"subs x13, x13, #0x1\n"
- ".inst 0xc0040c02 // mova za.d[x8, #2], { z0.d-z3.d }\n"
+ ".inst 0xc0046c02 // mova za.d[x11, #2], { z0.d-z3.d }\n"
".inst 0xc1a4ab1c // add { z28.s-z31.s }, { z28.s-z31.s }, z4.s\n"
".inst 0xc1b7cf7c // sclamp { z28.s-z31.s }, z27.s, z23.s\n"
- "st1b { z28.s }, p1, [x11]\n"
- "add x11, x11, x9\n"
- "st1b { z29.s }, p1, [x10]\n"
+ "st1b { z28.s }, p1, [x10]\n"
"add x10, x10, x28\n"
- "st1b { z30.s }, p1, [x27]\n"
- "add x27, x27, x25\n"
- "st1b { z31.s }, p1, [x26]\n"
+ "st1b { z29.s }, p1, [x9]\n"
+ "add x9, x9, x27\n"
+ "st1b { z30.s }, p1, [x26]\n"
"add x26, x26, x24\n"
+ "st1b { z31.s }, p1, [x25]\n"
+ "add x25, x25, x23\n"
"bgt 20b\n"
"21:" // End
- "ldr x22, [%x[args], %[offsetof_Args_weights]]\n"
- "incw x22, ALL, MUL #9\n"
- "str x22, [%x[args], %[offsetof_Args_weights]]\n"
+ "ldr x21, [%x[args], %[offsetof_Args_weights]]\n"
+ "incw x21, ALL, MUL #9\n"
+ "str x21, [%x[args], %[offsetof_Args_weights]]\n"
"incw x16\n"
- "ldr x20, [%x[args], %[offsetof_Args_ld_in_vl]]\n"
+ "ldr x19, [%x[args], %[offsetof_Args_ld_in_vl]]\n"
"whilelt p1.s, x16, x17\n"
"ldr x14, [%x[args], %[offsetof_Args_inptr]]\n"
- "add x14, x14, x20\n"
+ "add x14, x14, x19\n"
"str x14, [%x[args], %[offsetof_Args_inptr]]\n"
- "ldr x25, [%x[args], %[offsetof_Args_outptrs]]\n"
- "ldr x24, [%x[args], %[offsetof_Args_ld_out_vls]]\n"
- "ldp x23, x22, [x25, #0x0]\n"
- "ldp x21, x20, [x24, #0x0]\n"
- "add x23, x23, x21\n"
+ "ldr x24, [%x[args], %[offsetof_Args_outptrs]]\n"
+ "ldr x23, [%x[args], %[offsetof_Args_ld_out_vls]]\n"
+ "ldp x22, x21, [x24, #0x0]\n"
+ "ldp x20, x19, [x23, #0x0]\n"
"add x22, x22, x20\n"
- "stp x23, x22, [x25, #0x0]\n"
- "ldp x23, x22, [x25, #0x10]\n"
- "ldp x21, x20, [x24, #0x10]\n"
- "add x23, x23, x21\n"
+ "add x21, x21, x19\n"
+ "stp x22, x21, [x24, #0x0]\n"
+ "ldp x22, x21, [x24, #0x10]\n"
+ "ldp x20, x19, [x23, #0x10]\n"
"add x22, x22, x20\n"
- "stp x23, x22, [x25, #0x10]\n"
+ "add x21, x21, x19\n"
+ "stp x22, x21, [x24, #0x10]\n"
"b.any 1b\n"
"addvl SP, SP, #6\n"
".inst 0xd503467f // SMSTOP\n"
:
: [args] "r" (&args), [ld_in_col] "r" (ld_in_col), [ld_in_row] "r" (ld_in_row), [offsetof_Args_current_channel] "I" (offsetof(Args, current_channel)), [offsetof_Args_inptr] "I" (offsetof(Args, inptr)), [offsetof_Args_input_cols] "I" (offsetof(Args, input_cols)), [offsetof_Args_ld_in_vl] "I" (offsetof(Args, ld_in_vl)), [offsetof_Args_ld_out_cols] "I" (offsetof(Args, ld_out_cols)), [offsetof_Args_ld_out_vls] "I" (offsetof(Args, ld_out_vls)), [offsetof_Args_n_channels] "I" (offsetof(Args, n_channels)), [offsetof_Args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_Args_output_cols] "I" (offsetof(Args, output_cols)), [offsetof_Args_pad_bottom] "I" (offsetof(Args, pad_bottom)), [offsetof_Args_pad_left] "I" (offsetof(Args, pad_left)), [offsetof_Args_pad_top] "I" (offsetof(Args, pad_top)), [offsetof_Args_weights] "I" (offsetof(Args, weights)), [offsetof_Requantize32_a_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_bias] "I" (offsetof(arm_gemm::Requantize32, bias)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [offsetof_Requantize32_per_channel_muls] "I" (offsetof(arm_gemm::Requantize32, per_channel_muls)), [offsetof_Requantize32_per_channel_right_shifts] "I" (offsetof(arm_gemm::Requantize32, per_channel_right_shifts)), [offsetof_Requantize32_per_layer_mul] "I" (offsetof(arm_gemm::Requantize32, per_layer_mul)), [offsetof_Requantize32_per_layer_right_shift] "I" (offsetof(arm_gemm::Requantize32, per_layer_right_shift)), [qp] "r" (&qp)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8s8u8q_planar_5x5_s1_4rows_dot_za/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8s8u8q_planar_5x5_s1_4rows_dot_za/generic.cpp
index 3e77c75ad7..d6970647d2 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8s8u8q_planar_5x5_s1_4rows_dot_za/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8s8u8q_planar_5x5_s1_4rows_dot_za/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -69,20 +69,20 @@ void sme2_u8s8u8q_planar_5x5_s1_4rows_dot_za_impl(
__asm__ __volatile__(
".inst 0xd503477f // SMSTART ZA\n"
- "ldr x4, [%x[args], %[offsetof_Args_pad_bottom]]\n"
+ "ldr x5, [%x[args], %[offsetof_Args_pad_bottom]]\n"
"ptrue p2.b\n"
- "mov x20, #0x8\n"
+ "mov x19, #0x8\n"
"ldr x6, [%x[args], %[offsetof_Args_pad_top]]\n"
"ld1rh { z25.h }, p2/Z, [%x[qp], %[offsetof_Requantize32_a_offset]]\n"
- "sub x20, x20, x4\n"
+ "sub x19, x19, x5\n"
".inst 0x25207812 // ptrue pn10.b\n"
"ldr x7, [%x[args], %[offsetof_Args_n_channels]]\n"
"whilelt p1.s, XZR, x7\n"
- "whilelt p9.s, XZR, x20\n"
+ "whilelt p9.s, XZR, x19\n"
"ld1rw { z9.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_c_offset]]\n"
"whilelt p8.s, XZR, x6\n"
"addvl SP, SP, #-30\n"
- "ldr x5, [%x[args], %[offsetof_Args_current_channel]]\n"
+ "ldr x17, [%x[args], %[offsetof_Args_current_channel]]\n"
"neg z25.h, p2/M, z25.h\n"
"eor p8.b, p2/Z, p8.b, p9.b\n"
"ld1rw { z3.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_per_layer_mul]]\n"
@@ -90,262 +90,298 @@ void sme2_u8s8u8q_planar_5x5_s1_4rows_dot_za_impl(
"ld1rw { z24.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_minval]]\n"
"ld1rw { z31.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_maxval]]\n"
"1:" // Channel loop
- "ldr x20, [%x[qp], %[offsetof_Requantize32_bias]]\n"
+ "ldr x19, [%x[qp], %[offsetof_Requantize32_bias]]\n"
"mov z6.s, #0x0\n"
- "cbz x20, 2f\n"
- "ld1w { z6.s }, p1/Z, [x20, x5, LSL #2]\n"
+ "cbz x19, 2f\n"
+ "ld1w { z6.s }, p1/Z, [x19, x17, LSL #2]\n"
"2:" // Load bias: Done
"ldr x23, [%x[args], %[offsetof_Args_weights]]\n"
- "mov x22, x23\n"
- "ld1sb { z18.s }, p2/Z, [x22]\n"
- "incw x22, ALL, MUL #5\n"
+ "mov x21, x23\n"
+ "ld1sb { z18.s }, p2/Z, [x21]\n"
+ "incw x21, ALL, MUL #5\n"
"ld1rh { z12.h }, p2/Z, [%x[qp], %[offsetof_Requantize32_b_offset]]\n"
"mov z2.h, #0x0\n"
"sub z18.h, z18.h, z12.h\n"
"incw x23\n"
- "ld1sb { z17.s }, p2/Z, [x22]\n"
- "incw x22, ALL, MUL #5\n"
+ "ld1sb { z17.s }, p2/Z, [x21]\n"
+ "incw x21, ALL, MUL #5\n"
"sub z17.h, z17.h, z12.h\n"
"trn1 z0.h, z2.h, z18.h\n"
- "ld1sb { z21.s }, p2/Z, [x22]\n"
- "incw x22, ALL, MUL #5\n"
+ "ld1sb { z21.s }, p2/Z, [x21]\n"
+ "incw x21, ALL, MUL #5\n"
"sub z21.h, z21.h, z12.h\n"
"trn1 z8.h, z18.h, z17.h\n"
- "ld1sb { z16.s }, p2/Z, [x22]\n"
- "incw x22, ALL, MUL #5\n"
+ "ld1sb { z16.s }, p2/Z, [x21]\n"
+ "incw x21, ALL, MUL #5\n"
"sub z16.h, z16.h, z12.h\n"
"trn1 z4.h, z17.h, z21.h\n"
- "ld1sb { z15.s }, p2/Z, [x22]\n"
+ "ld1sb { z15.s }, p2/Z, [x21]\n"
"sub z15.h, z15.h, z12.h\n"
- "mov x22, x23\n"
+ "mov x21, x23\n"
"trn1 z5.h, z21.h, z16.h\n"
- "ld1sb { z18.s }, p2/Z, [x22]\n"
- "incw x22, ALL, MUL #5\n"
+ "ld1sb { z18.s }, p2/Z, [x21]\n"
+ "incw x21, ALL, MUL #5\n"
"trn1 z10.h, z16.h, z15.h\n"
"trn1 z11.h, z15.h, z2.h\n"
- "ld1sb { z17.s }, p2/Z, [x22]\n"
- "incw x22, ALL, MUL #5\n"
+ "ld1sb { z17.s }, p2/Z, [x21]\n"
+ "incw x21, ALL, MUL #5\n"
"sub z18.h, z18.h, z12.h\n"
"sub z17.h, z17.h, z12.h\n"
- "ld1sb { z21.s }, p2/Z, [x22]\n"
- "incw x22, ALL, MUL #5\n"
+ "ld1sb { z21.s }, p2/Z, [x21]\n"
+ "incw x21, ALL, MUL #5\n"
"sub z21.h, z21.h, z12.h\n"
- "addvl x21, SP, #30\n"
- "ld1sb { z16.s }, p2/Z, [x22]\n"
- "incw x22, ALL, MUL #5\n"
+ "addvl x20, SP, #30\n"
+ "ld1sb { z16.s }, p2/Z, [x21]\n"
+ "incw x21, ALL, MUL #5\n"
"incw x23\n"
"sub z16.h, z16.h, z12.h\n"
- "ld1sb { z15.s }, p2/Z, [x22]\n"
- "addvl x21, x21, #-6\n"
+ "ld1sb { z15.s }, p2/Z, [x21]\n"
+ "addvl x20, x20, #-6\n"
"sub z15.h, z15.h, z12.h\n"
- "mov x22, x23\n"
- "st1h { z0.h }, p2, [x21]\n"
+ "mov x21, x23\n"
+ "st1h { z0.h }, p2, [x20]\n"
"trn1 z0.h, z2.h, z18.h\n"
"incw x23\n"
- "ldr x20, [%x[qp], %[offsetof_Requantize32_per_channel_muls]]\n"
- "st1h { z8.h }, p2, [x21, #1, MUL VL]\n"
+ "ldr x19, [%x[qp], %[offsetof_Requantize32_per_channel_muls]]\n"
+ "st1h { z8.h }, p2, [x20, #1, MUL VL]\n"
"trn1 z8.h, z18.h, z17.h\n"
- "ld1sb { z18.s }, p2/Z, [x22]\n"
- "incw x22, ALL, MUL #5\n"
- "st1h { z4.h }, p2, [x21, #2, MUL VL]\n"
+ "ld1sb { z18.s }, p2/Z, [x21]\n"
+ "incw x21, ALL, MUL #5\n"
+ "st1h { z4.h }, p2, [x20, #2, MUL VL]\n"
"trn1 z4.h, z17.h, z21.h\n"
- "ld1sb { z17.s }, p2/Z, [x22]\n"
- "incw x22, ALL, MUL #5\n"
- "st1h { z5.h }, p2, [x21, #3, MUL VL]\n"
+ "ld1sb { z17.s }, p2/Z, [x21]\n"
+ "incw x21, ALL, MUL #5\n"
+ "st1h { z5.h }, p2, [x20, #3, MUL VL]\n"
"trn1 z5.h, z21.h, z16.h\n"
- "ld1sb { z21.s }, p2/Z, [x22]\n"
- "incw x22, ALL, MUL #5\n"
- "st1h { z10.h }, p2, [x21, #4, MUL VL]\n"
+ "ld1sb { z21.s }, p2/Z, [x21]\n"
+ "incw x21, ALL, MUL #5\n"
+ "st1h { z10.h }, p2, [x20, #4, MUL VL]\n"
"trn1 z10.h, z16.h, z15.h\n"
- "ld1sb { z16.s }, p2/Z, [x22]\n"
- "incw x22, ALL, MUL #5\n"
- "st1h { z11.h }, p2, [x21, #5, MUL VL]\n"
+ "ld1sb { z16.s }, p2/Z, [x21]\n"
+ "incw x21, ALL, MUL #5\n"
+ "st1h { z11.h }, p2, [x20, #5, MUL VL]\n"
"trn1 z11.h, z15.h, z2.h\n"
"sub z18.h, z18.h, z12.h\n"
- "addvl x21, x21, #-6\n"
+ "addvl x20, x20, #-6\n"
"sub z17.h, z17.h, z12.h\n"
- "ld1sb { z15.s }, p2/Z, [x22]\n"
+ "ld1sb { z15.s }, p2/Z, [x21]\n"
"sub z21.h, z21.h, z12.h\n"
- "mov x22, x23\n"
+ "mov x21, x23\n"
"sub z16.h, z16.h, z12.h\n"
"sub z15.h, z15.h, z12.h\n"
- "st1h { z0.h }, p2, [x21]\n"
+ "st1h { z0.h }, p2, [x20]\n"
"incw x23\n"
- "st1h { z8.h }, p2, [x21, #1, MUL VL]\n"
+ "st1h { z8.h }, p2, [x20, #1, MUL VL]\n"
"trn1 z0.h, z2.h, z18.h\n"
"trn1 z8.h, z18.h, z17.h\n"
- "ld1sb { z18.s }, p2/Z, [x22]\n"
- "incw x22, ALL, MUL #5\n"
- "st1h { z4.h }, p2, [x21, #2, MUL VL]\n"
+ "ld1sb { z18.s }, p2/Z, [x21]\n"
+ "incw x21, ALL, MUL #5\n"
+ "st1h { z4.h }, p2, [x20, #2, MUL VL]\n"
"trn1 z4.h, z17.h, z21.h\n"
- "ld1sb { z17.s }, p2/Z, [x22]\n"
- "incw x22, ALL, MUL #5\n"
- "st1h { z5.h }, p2, [x21, #3, MUL VL]\n"
+ "ld1sb { z17.s }, p2/Z, [x21]\n"
+ "incw x21, ALL, MUL #5\n"
+ "st1h { z5.h }, p2, [x20, #3, MUL VL]\n"
"trn1 z5.h, z21.h, z16.h\n"
- "ld1sb { z21.s }, p2/Z, [x22]\n"
- "incw x22, ALL, MUL #5\n"
- "st1h { z10.h }, p2, [x21, #4, MUL VL]\n"
+ "ld1sb { z21.s }, p2/Z, [x21]\n"
+ "incw x21, ALL, MUL #5\n"
+ "st1h { z10.h }, p2, [x20, #4, MUL VL]\n"
"trn1 z10.h, z16.h, z15.h\n"
- "ld1sb { z16.s }, p2/Z, [x22]\n"
- "incw x22, ALL, MUL #5\n"
- "st1h { z11.h }, p2, [x21, #5, MUL VL]\n"
+ "ld1sb { z16.s }, p2/Z, [x21]\n"
+ "incw x21, ALL, MUL #5\n"
+ "st1h { z11.h }, p2, [x20, #5, MUL VL]\n"
"trn1 z11.h, z15.h, z2.h\n"
"sub z18.h, z18.h, z12.h\n"
"sub z17.h, z17.h, z12.h\n"
- "ld1sb { z15.s }, p2/Z, [x22]\n"
- "addvl x21, x21, #-6\n"
+ "ld1sb { z15.s }, p2/Z, [x21]\n"
+ "addvl x20, x20, #-6\n"
"sub z21.h, z21.h, z12.h\n"
"sub z16.h, z16.h, z12.h\n"
- "mov x22, x23\n"
- "st1h { z0.h }, p2, [x21]\n"
+ "mov x21, x23\n"
+ "st1h { z0.h }, p2, [x20]\n"
"sub z15.h, z15.h, z12.h\n"
- "st1h { z8.h }, p2, [x21, #1, MUL VL]\n"
+ "st1h { z8.h }, p2, [x20, #1, MUL VL]\n"
"trn1 z0.h, z2.h, z18.h\n"
"trn1 z8.h, z18.h, z17.h\n"
- "ld1sb { z18.s }, p2/Z, [x22]\n"
- "incw x22, ALL, MUL #5\n"
- "st1h { z4.h }, p2, [x21, #2, MUL VL]\n"
+ "ld1sb { z18.s }, p2/Z, [x21]\n"
+ "incw x21, ALL, MUL #5\n"
+ "st1h { z4.h }, p2, [x20, #2, MUL VL]\n"
"trn1 z4.h, z17.h, z21.h\n"
- "ld1sb { z17.s }, p2/Z, [x22]\n"
- "incw x22, ALL, MUL #5\n"
- "st1h { z5.h }, p2, [x21, #3, MUL VL]\n"
+ "ld1sb { z17.s }, p2/Z, [x21]\n"
+ "incw x21, ALL, MUL #5\n"
+ "st1h { z5.h }, p2, [x20, #3, MUL VL]\n"
"trn1 z5.h, z21.h, z16.h\n"
- "ld1sb { z21.s }, p2/Z, [x22]\n"
- "incw x22, ALL, MUL #5\n"
- "st1h { z10.h }, p2, [x21, #4, MUL VL]\n"
+ "ld1sb { z21.s }, p2/Z, [x21]\n"
+ "incw x21, ALL, MUL #5\n"
+ "st1h { z10.h }, p2, [x20, #4, MUL VL]\n"
"trn1 z10.h, z16.h, z15.h\n"
- "ld1sb { z16.s }, p2/Z, [x22]\n"
- "incw x22, ALL, MUL #5\n"
- "st1h { z11.h }, p2, [x21, #5, MUL VL]\n"
+ "ld1sb { z16.s }, p2/Z, [x21]\n"
+ "incw x21, ALL, MUL #5\n"
+ "st1h { z11.h }, p2, [x20, #5, MUL VL]\n"
"trn1 z11.h, z15.h, z2.h\n"
- "ld1sb { z15.s }, p2/Z, [x22]\n"
+ "ld1sb { z15.s }, p2/Z, [x21]\n"
"sub z18.h, z18.h, z12.h\n"
- "addvl x21, x21, #-6\n"
+ "addvl x20, x20, #-6\n"
"sub z17.h, z17.h, z12.h\n"
"sub z21.h, z21.h, z12.h\n"
- "st1h { z0.h }, p2, [x21]\n"
+ "st1h { z0.h }, p2, [x20]\n"
"sub z16.h, z16.h, z12.h\n"
"sub z15.h, z15.h, z12.h\n"
- "st1h { z8.h }, p2, [x21, #1, MUL VL]\n"
- "st1h { z4.h }, p2, [x21, #2, MUL VL]\n"
+ "st1h { z8.h }, p2, [x20, #1, MUL VL]\n"
+ "st1h { z4.h }, p2, [x20, #2, MUL VL]\n"
"mov z7.d, z6.d\n"
"trn1 z0.h, z2.h, z18.h\n"
- "st1h { z5.h }, p2, [x21, #3, MUL VL]\n"
+ "st1h { z5.h }, p2, [x20, #3, MUL VL]\n"
"trn1 z8.h, z18.h, z17.h\n"
"trn1 z4.h, z17.h, z21.h\n"
- "st1h { z10.h }, p2, [x21, #4, MUL VL]\n"
+ "st1h { z10.h }, p2, [x20, #4, MUL VL]\n"
"trn1 z5.h, z21.h, z16.h\n"
"trn1 z10.h, z16.h, z15.h\n"
- "st1h { z11.h }, p2, [x21, #5, MUL VL]\n"
- "addvl x21, x21, #-6\n"
+ "st1h { z11.h }, p2, [x20, #5, MUL VL]\n"
+ "addvl x20, x20, #-6\n"
"trn1 z11.h, z15.h, z2.h\n"
- "st1h { z0.h }, p2, [x21]\n"
- "st1h { z8.h }, p2, [x21, #1, MUL VL]\n"
- "st1h { z4.h }, p2, [x21, #2, MUL VL]\n"
- "st1h { z5.h }, p2, [x21, #3, MUL VL]\n"
- "st1h { z10.h }, p2, [x21, #4, MUL VL]\n"
- "st1h { z11.h }, p2, [x21, #5, MUL VL]\n"
- "cbz x20, 3f\n"
- "ld1w { z3.s }, p1/Z, [x20, x5, LSL #2]\n"
+ "st1h { z0.h }, p2, [x20]\n"
+ "st1h { z8.h }, p2, [x20, #1, MUL VL]\n"
+ "st1h { z4.h }, p2, [x20, #2, MUL VL]\n"
+ "st1h { z5.h }, p2, [x20, #3, MUL VL]\n"
+ "st1h { z10.h }, p2, [x20, #4, MUL VL]\n"
+ "st1h { z11.h }, p2, [x20, #5, MUL VL]\n"
+ "cbz x19, 3f\n"
+ "ld1w { z3.s }, p1/Z, [x19, x17, LSL #2]\n"
"3:" // Load mul: End
- "ldr x20, [%x[qp], %[offsetof_Requantize32_per_channel_right_shifts]]\n"
- "cbz x20, 4f\n"
- "ld1w { z1.s }, p1/Z, [x20, x5, LSL #2]\n"
+ "ldr x19, [%x[qp], %[offsetof_Requantize32_per_channel_right_shifts]]\n"
+ "cbz x19, 4f\n"
+ "ld1w { z1.s }, p1/Z, [x19, x17, LSL #2]\n"
"4:" // Load right_shift: End
- "ldr x17, [%x[args], %[offsetof_Args_input_cols]]\n"
- "sub x20, x17, #0x1\n"
- "orr x23, x20, %x[ld_in_col], LSL #16\n"
- "ldr x16, [%x[args], %[offsetof_Args_inptr]]\n"
- "orr x23, x7, x23, LSL #22\n"
- "mov x22, #0x8\n"
- "add x21, x6, x4\n"
- "lsl x20, %x[ld_in_row], #0x0\n"
- "ldr x15, [%x[args], %[offsetof_Args_output_cols]]\n"
+ "ldr x16, [%x[args], %[offsetof_Args_input_cols]]\n"
+ "sub x19, x16, #0x1\n"
+ "orr x22, x19, %x[ld_in_col], LSL #16\n"
+ "ldr x15, [%x[args], %[offsetof_Args_inptr]]\n"
+ "orr x22, x7, x22, LSL #22\n"
+ "mov x21, #0x8\n"
+ "add x20, x6, x5\n"
+ "lsl x19, %x[ld_in_row], #0x0\n"
+ "ldr x14, [%x[args], %[offsetof_Args_output_cols]]\n"
"mov x11, #0x0\n"
"mov x8, #0x8\n"
- "lsl x23, x23, #0x0\n"
- "sub x22, x22, x21\n"
- "madd x20, x20, x6, x16\n"
+ "lsl x22, x22, #0x0\n"
+ "sub x21, x21, x20\n"
+ "madd x19, x19, x6, x15\n"
"5:" // Issue prefetches
- "subs x22, x22, #0x1\n"
- ".inst 0xf8b74a9c // rprfm pldstrm, x23, [x20]\n"
- "add x20, x20, %x[ld_in_col]\n"
+ "subs x21, x21, #0x1\n"
+ ".inst 0xf8b64a7c // rprfm pldstrm, x22, [x19]\n"
+ "add x19, x19, %x[ld_in_col]\n"
"bgt 5b\n"
- "ldr x25, [%x[args], %[offsetof_Args_outptrs]]\n"
- "lsl x20, %x[ld_in_row], #0x0\n"
- "msub x16, x6, x20, x16\n"
+ "ldr x24, [%x[args], %[offsetof_Args_outptrs]]\n"
+ "lsl x19, %x[ld_in_row], #0x0\n"
+ "msub x15, x6, x19, x15\n"
".inst 0xc00468c0 // mova za.d[x11, #0], { z6.d-z7.d }\n"
- "ldr x20, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
+ "ldr x19, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
".inst 0xc00468c1 // mova za.d[x11, #1], { z6.d-z7.d }\n"
- "mov x22, #0x4\n"
- "ldp x14, x13, [x25], #0x10\n"
+ "mov x21, #0x4\n"
+ "ldp x13, x4, [x24], #0x10\n"
".inst 0xc00468c2 // mova za.d[x11, #2], { z6.d-z7.d }\n"
- "ldp x3, x10, [x20], #0x10\n"
+ "ldp x10, x9, [x19], #0x10\n"
".inst 0xc00468c3 // mova za.d[x11, #3], { z6.d-z7.d }\n"
- "ldr x21, [%x[args], %[offsetof_Args_pad_left]]\n"
+ "ldr x20, [%x[args], %[offsetof_Args_pad_left]]\n"
".inst 0xc00468c4 // mova za.d[x11, #4], { z6.d-z7.d }\n"
- "ldp x9, x28, [x25], #0x10\n"
+ "ldp x28, x27, [x24], #0x10\n"
".inst 0xc00468c5 // mova za.d[x11, #5], { z6.d-z7.d }\n"
- "ldp x27, x26, [x20], #0x10\n"
+ "ldp x26, x25, [x19], #0x10\n"
".inst 0xc00468c6 // mova za.d[x11, #6], { z6.d-z7.d }\n"
".inst 0xc00468c7 // mova za.d[x11, #7], { z6.d-z7.d }\n"
".inst 0xc00408c0 // mova za.d[x8, #0], { z6.d-z7.d }\n"
".inst 0xc00408c1 // mova za.d[x8, #1], { z6.d-z7.d }\n"
- "cbz x21, 7f\n"
- "cmp x21, x22\n"
- "csel x20, x21, x22, LT\n"
- "sub x21, x21, x20\n"
- "sub x22, x22, x20\n"
- "cbz x21, 7f\n"
+ "cbz x20, 7f\n"
+ "cmp x20, x21\n"
+ "csel x19, x20, x21, LT\n"
+ "sub x20, x20, x19\n"
+ "sub x21, x21, x19\n"
+ "cbz x20, 7f\n"
".inst 0xc006680c // mova { z12.d-z13.d }, za.d[x11, #0]\n"
- "sub x15, x15, x21\n"
+ "sub x14, x14, x20\n"
".inst 0xc006682e // mova { z14.d-z15.d }, za.d[x11, #1]\n"
".inst 0xc1a3ac0c // sqdmulh { z12.s-z15.s }, { z12.s-z15.s }, z3.s\n"
".inst 0xc1a1aa2c // srshl { z12.s-z15.s }, { z12.s-z15.s }, z1.s\n"
".inst 0xc1a9ab0c // add { z12.s-z15.s }, { z12.s-z15.s }, z9.s\n"
".inst 0xc1bfcf0c // sclamp { z12.s-z15.s }, z24.s, z31.s\n"
"6:" // Left padding
- "subs x21, x21, #0x1\n"
- "st1b { z12.s }, p1, [x14]\n"
- "add x14, x14, x3\n"
- "st1b { z14.s }, p1, [x13]\n"
+ "subs x20, x20, #0x1\n"
+ "st1b { z12.s }, p1, [x13]\n"
"add x13, x13, x10\n"
- "st1b { z13.s }, p1, [x9]\n"
- "add x9, x9, x27\n"
- "st1b { z15.s }, p1, [x28]\n"
+ "st1b { z14.s }, p1, [x4]\n"
+ "add x4, x4, x9\n"
+ "st1b { z13.s }, p1, [x28]\n"
"add x28, x28, x26\n"
+ "st1b { z15.s }, p1, [x27]\n"
+ "add x27, x27, x25\n"
"bgt 6b\n"
"7:" // Left padding: End
- "adds XZR, x6, x4\n"
+ "adds XZR, x6, x5\n"
"bne 14f\n"
- "cbz x22, 12f\n"
- "cmp x22, #0x1\n"
- "sub x17, x17, x22\n"
+ "cbz x21, 12f\n"
+ "cmp x21, #0x1\n"
+ "sub x16, x16, x21\n"
"beq 11f\n"
- "cmp x22, #0x2\n"
+ "cmp x21, #0x2\n"
"beq 10f\n"
- "cmp x22, #0x3\n"
+ "cmp x21, #0x3\n"
"beq 9f\n"
"8:" // Unpadded: 4 priming loads
- "add x21, x16, %x[ld_in_row]\n"
- "ld1b { z17.s }, p1/Z, [x16]\n"
- "addvl x20, SP, #24\n"
+ "add x20, x15, %x[ld_in_row]\n"
+ "ld1b { z17.s }, p1/Z, [x15]\n"
+ "addvl x19, SP, #24\n"
+ "ld1b { z16.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "trn1 z27.h, z17.h, z16.h\n"
+ "add z27.h, z27.h, z25.h\n"
+ "ld1b { z17.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "add x15, x15, %x[ld_in_col]\n"
+ "ld1b { z16.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "trn1 z28.h, z17.h, z16.h\n"
+ "add z28.h, z28.h, z25.h\n"
+ "ld1b { z16.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z29.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "trn1 z29.h, z16.h, z29.h\n"
+ "add z29.h, z29.h, z25.h\n"
+ "ld1b { z17.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ ".inst 0xa1402a60 // ld1h { z0.h, z8.h }, pn10.b/Z, [x19]\n"
+ ".inst 0xc1687768 // sdot za.s[x11, 0], { z27.h-z28.h }, z8.h\n"
+ "ld1b { z16.s }, p1/Z, [x20]\n"
+ "trn1 z30.h, z17.h, z16.h\n"
+ ".inst 0xc1607769 // sdot za.s[x11, 1], { z27.h-z28.h }, z0.h\n"
+ ".inst 0xa0412a64 // ld1h { z4.h-z5.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
+ "add z30.h, z30.h, z25.h\n"
+ ".inst 0xc1657788 // sdot za.s[x11, 0], { z28.h-z29.h }, z5.h\n"
+ ".inst 0xc1647789 // sdot za.s[x11, 1], { z28.h-z29.h }, z4.h\n"
+ ".inst 0xa0422a6a // ld1h { z10.h-z11.h }, pn10.b/Z, [x19, #0x4, MUL VL]\n"
+ ".inst 0xc16b77a8 // sdot za.s[x11, 0], { z29.h-z30.h }, z11.h\n"
+ ".inst 0xc16a77a9 // sdot za.s[x11, 1], { z29.h-z30.h }, z10.h\n"
+ "9:" // Unpadded: 3 priming loads
+ "add x21, x15, %x[ld_in_row]\n"
+ "ld1b { z17.s }, p1/Z, [x15]\n"
+ "addvl x20, SP, #18\n"
"ld1b { z16.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
"trn1 z27.h, z17.h, z16.h\n"
"add z27.h, z27.h, z25.h\n"
"ld1b { z17.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "add x16, x16, %x[ld_in_col]\n"
+ "addvl x19, SP, #24\n"
"ld1b { z16.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
"trn1 z28.h, z17.h, z16.h\n"
"add z28.h, z28.h, z25.h\n"
- "ld1b { z16.s }, p1/Z, [x21]\n"
+ "ld1b { z17.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "ld1b { z29.s }, p1/Z, [x21]\n"
+ "add x15, x15, %x[ld_in_col]\n"
+ "ld1b { z16.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z29.h, z16.h, z29.h\n"
+ "trn1 z29.h, z17.h, z16.h\n"
"add z29.h, z29.h, z25.h\n"
"ld1b { z17.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
@@ -354,37 +390,47 @@ void sme2_u8s8u8q_planar_5x5_s1_4rows_dot_za_impl(
"ld1b { z16.s }, p1/Z, [x21]\n"
"trn1 z30.h, z17.h, z16.h\n"
".inst 0xc1607769 // sdot za.s[x11, 1], { z27.h-z28.h }, z0.h\n"
- ".inst 0xa0412a84 // ld1h { z4.h-z5.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ ".inst 0xa1402a60 // ld1h { z0.h, z8.h }, pn10.b/Z, [x19]\n"
+ ".inst 0xc168776a // sdot za.s[x11, 2], { z27.h-z28.h }, z8.h\n"
"add z30.h, z30.h, z25.h\n"
+ ".inst 0xa0412a84 // ld1h { z4.h-z5.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ ".inst 0xc160776b // sdot za.s[x11, 3], { z27.h-z28.h }, z0.h\n"
".inst 0xc1657788 // sdot za.s[x11, 0], { z28.h-z29.h }, z5.h\n"
- ".inst 0xc1647789 // sdot za.s[x11, 1], { z28.h-z29.h }, z4.h\n"
".inst 0xa0422a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+ ".inst 0xc1647789 // sdot za.s[x11, 1], { z28.h-z29.h }, z4.h\n"
+ ".inst 0xa0412a64 // ld1h { z4.h-z5.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
+ ".inst 0xc165778a // sdot za.s[x11, 2], { z28.h-z29.h }, z5.h\n"
+ ".inst 0xc164778b // sdot za.s[x11, 3], { z28.h-z29.h }, z4.h\n"
".inst 0xc16b77a8 // sdot za.s[x11, 0], { z29.h-z30.h }, z11.h\n"
".inst 0xc16a77a9 // sdot za.s[x11, 1], { z29.h-z30.h }, z10.h\n"
- "9:" // Unpadded: 3 priming loads
- "add x22, x16, %x[ld_in_row]\n"
- "ld1b { z17.s }, p1/Z, [x16]\n"
- "addvl x21, SP, #18\n"
+ ".inst 0xa0422a6a // ld1h { z10.h-z11.h }, pn10.b/Z, [x19, #0x4, MUL VL]\n"
+ ".inst 0xc16b77aa // sdot za.s[x11, 2], { z29.h-z30.h }, z11.h\n"
+ ".inst 0xc16a77ab // sdot za.s[x11, 3], { z29.h-z30.h }, z10.h\n"
+ "10:" // Unpadded: 2 priming loads
+ "add x22, x15, %x[ld_in_row]\n"
+ "ld1b { z17.s }, p1/Z, [x15]\n"
+ "addvl x21, SP, #12\n"
"ld1b { z16.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
"trn1 z27.h, z17.h, z16.h\n"
"add z27.h, z27.h, z25.h\n"
"ld1b { z17.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
- "addvl x20, SP, #24\n"
+ "addvl x20, SP, #18\n"
"ld1b { z16.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
"trn1 z28.h, z17.h, z16.h\n"
"add z28.h, z28.h, z25.h\n"
"ld1b { z17.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
- "add x16, x16, %x[ld_in_col]\n"
+ "addvl x19, SP, #24\n"
"ld1b { z16.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
"trn1 z29.h, z17.h, z16.h\n"
"add z29.h, z29.h, z25.h\n"
"ld1b { z17.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
+ "add x15, x15, %x[ld_in_col]\n"
".inst 0xa1402aa0 // ld1h { z0.h, z8.h }, pn10.b/Z, [x21]\n"
".inst 0xc1687768 // sdot za.s[x11, 0], { z27.h-z28.h }, z8.h\n"
"ld1b { z16.s }, p1/Z, [x22]\n"
@@ -395,44 +441,54 @@ void sme2_u8s8u8q_planar_5x5_s1_4rows_dot_za_impl(
"add z30.h, z30.h, z25.h\n"
".inst 0xa0412aa4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
".inst 0xc160776b // sdot za.s[x11, 3], { z27.h-z28.h }, z0.h\n"
+ ".inst 0xa1402a60 // ld1h { z0.h, z8.h }, pn10.b/Z, [x19]\n"
".inst 0xc1657788 // sdot za.s[x11, 0], { z28.h-z29.h }, z5.h\n"
- ".inst 0xa0422aaa // ld1h { z10.h-z11.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
".inst 0xc1647789 // sdot za.s[x11, 1], { z28.h-z29.h }, z4.h\n"
".inst 0xa0412a84 // ld1h { z4.h-z5.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ ".inst 0xa0422aaa // ld1h { z10.h-z11.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
+ ".inst 0xc168776c // sdot za.s[x11, 4], { z27.h-z28.h }, z8.h\n"
+ ".inst 0xc160776d // sdot za.s[x11, 5], { z27.h-z28.h }, z0.h\n"
".inst 0xc165778a // sdot za.s[x11, 2], { z28.h-z29.h }, z5.h\n"
".inst 0xc164778b // sdot za.s[x11, 3], { z28.h-z29.h }, z4.h\n"
+ ".inst 0xa0412a64 // ld1h { z4.h-z5.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
".inst 0xc16b77a8 // sdot za.s[x11, 0], { z29.h-z30.h }, z11.h\n"
".inst 0xc16a77a9 // sdot za.s[x11, 1], { z29.h-z30.h }, z10.h\n"
".inst 0xa0422a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+ ".inst 0xc165778c // sdot za.s[x11, 4], { z28.h-z29.h }, z5.h\n"
+ ".inst 0xc164778d // sdot za.s[x11, 5], { z28.h-z29.h }, z4.h\n"
".inst 0xc16b77aa // sdot za.s[x11, 2], { z29.h-z30.h }, z11.h\n"
".inst 0xc16a77ab // sdot za.s[x11, 3], { z29.h-z30.h }, z10.h\n"
- "10:" // Unpadded: 2 priming loads
- "add x23, x16, %x[ld_in_row]\n"
- "ld1b { z17.s }, p1/Z, [x16]\n"
- "addvl x22, SP, #12\n"
+ ".inst 0xa0422a6a // ld1h { z10.h-z11.h }, pn10.b/Z, [x19, #0x4, MUL VL]\n"
+ ".inst 0xc16b77ac // sdot za.s[x11, 4], { z29.h-z30.h }, z11.h\n"
+ ".inst 0xc16a77ad // sdot za.s[x11, 5], { z29.h-z30.h }, z10.h\n"
+ "11:" // Unpadded: 1 priming loads
+ "add x23, x15, %x[ld_in_row]\n"
+ "ld1b { z17.s }, p1/Z, [x15]\n"
+ "addvl x22, SP, #6\n"
"ld1b { z16.s }, p1/Z, [x23]\n"
"add x23, x23, %x[ld_in_row]\n"
"trn1 z27.h, z17.h, z16.h\n"
"add z27.h, z27.h, z25.h\n"
"ld1b { z17.s }, p1/Z, [x23]\n"
"add x23, x23, %x[ld_in_row]\n"
- "addvl x21, SP, #18\n"
+ "addvl x21, SP, #12\n"
"ld1b { z16.s }, p1/Z, [x23]\n"
"add x23, x23, %x[ld_in_row]\n"
"trn1 z28.h, z17.h, z16.h\n"
"add z28.h, z28.h, z25.h\n"
"ld1b { z17.s }, p1/Z, [x23]\n"
"add x23, x23, %x[ld_in_row]\n"
- "addvl x20, SP, #24\n"
+ "addvl x20, SP, #18\n"
"ld1b { z16.s }, p1/Z, [x23]\n"
"add x23, x23, %x[ld_in_row]\n"
"trn1 z29.h, z17.h, z16.h\n"
"add z29.h, z29.h, z25.h\n"
"ld1b { z17.s }, p1/Z, [x23]\n"
"add x23, x23, %x[ld_in_row]\n"
- "add x16, x16, %x[ld_in_col]\n"
+ "addvl x19, SP, #24\n"
".inst 0xa1402ac0 // ld1h { z0.h, z8.h }, pn10.b/Z, [x22]\n"
".inst 0xc1687768 // sdot za.s[x11, 0], { z27.h-z28.h }, z8.h\n"
+ "add x15, x15, %x[ld_in_col]\n"
"ld1b { z16.s }, p1/Z, [x23]\n"
"trn1 z30.h, z17.h, z16.h\n"
".inst 0xc1607769 // sdot za.s[x11, 1], { z27.h-z28.h }, z0.h\n"
@@ -448,177 +504,121 @@ void sme2_u8s8u8q_planar_5x5_s1_4rows_dot_za_impl(
".inst 0xa0422aca // ld1h { z10.h-z11.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
".inst 0xc168776c // sdot za.s[x11, 4], { z27.h-z28.h }, z8.h\n"
".inst 0xc160776d // sdot za.s[x11, 5], { z27.h-z28.h }, z0.h\n"
+ ".inst 0xa1402a60 // ld1h { z0.h, z8.h }, pn10.b/Z, [x19]\n"
".inst 0xc165778a // sdot za.s[x11, 2], { z28.h-z29.h }, z5.h\n"
".inst 0xc164778b // sdot za.s[x11, 3], { z28.h-z29.h }, z4.h\n"
".inst 0xa0412a84 // ld1h { z4.h-z5.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
".inst 0xc16b77a8 // sdot za.s[x11, 0], { z29.h-z30.h }, z11.h\n"
".inst 0xc16a77a9 // sdot za.s[x11, 1], { z29.h-z30.h }, z10.h\n"
".inst 0xa0422aaa // ld1h { z10.h-z11.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
- ".inst 0xc165778c // sdot za.s[x11, 4], { z28.h-z29.h }, z5.h\n"
- ".inst 0xc164778d // sdot za.s[x11, 5], { z28.h-z29.h }, z4.h\n"
- ".inst 0xc16b77aa // sdot za.s[x11, 2], { z29.h-z30.h }, z11.h\n"
- ".inst 0xc16a77ab // sdot za.s[x11, 3], { z29.h-z30.h }, z10.h\n"
- ".inst 0xa0422a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
- ".inst 0xc16b77ac // sdot za.s[x11, 4], { z29.h-z30.h }, z11.h\n"
- ".inst 0xc16a77ad // sdot za.s[x11, 5], { z29.h-z30.h }, z10.h\n"
- "11:" // Unpadded: 1 priming loads
- "add x24, x16, %x[ld_in_row]\n"
- "ld1b { z17.s }, p1/Z, [x16]\n"
- "addvl x23, SP, #6\n"
- "ld1b { z16.s }, p1/Z, [x24]\n"
- "add x24, x24, %x[ld_in_row]\n"
- "trn1 z27.h, z17.h, z16.h\n"
- "add z27.h, z27.h, z25.h\n"
- "ld1b { z17.s }, p1/Z, [x24]\n"
- "add x24, x24, %x[ld_in_row]\n"
- "addvl x22, SP, #12\n"
- "ld1b { z16.s }, p1/Z, [x24]\n"
- "add x24, x24, %x[ld_in_row]\n"
- "trn1 z28.h, z17.h, z16.h\n"
- "add z28.h, z28.h, z25.h\n"
- "ld1b { z17.s }, p1/Z, [x24]\n"
- "add x24, x24, %x[ld_in_row]\n"
- "addvl x21, SP, #18\n"
- "ld1b { z16.s }, p1/Z, [x24]\n"
- "add x24, x24, %x[ld_in_row]\n"
- "trn1 z29.h, z17.h, z16.h\n"
- "add z29.h, z29.h, z25.h\n"
- "ld1b { z17.s }, p1/Z, [x24]\n"
- "add x24, x24, %x[ld_in_row]\n"
- "addvl x20, SP, #24\n"
- ".inst 0xa1402ae0 // ld1h { z0.h, z8.h }, pn10.b/Z, [x23]\n"
- ".inst 0xc1687768 // sdot za.s[x11, 0], { z27.h-z28.h }, z8.h\n"
- "add x16, x16, %x[ld_in_col]\n"
- "ld1b { z16.s }, p1/Z, [x24]\n"
- "trn1 z30.h, z17.h, z16.h\n"
- ".inst 0xc1607769 // sdot za.s[x11, 1], { z27.h-z28.h }, z0.h\n"
- ".inst 0xa1402ac0 // ld1h { z0.h, z8.h }, pn10.b/Z, [x22]\n"
- ".inst 0xc168776a // sdot za.s[x11, 2], { z27.h-z28.h }, z8.h\n"
- "add z30.h, z30.h, z25.h\n"
- ".inst 0xa0412ae4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x23, #0x2, MUL VL]\n"
- ".inst 0xc160776b // sdot za.s[x11, 3], { z27.h-z28.h }, z0.h\n"
- ".inst 0xa1402aa0 // ld1h { z0.h, z8.h }, pn10.b/Z, [x21]\n"
- ".inst 0xc1657788 // sdot za.s[x11, 0], { z28.h-z29.h }, z5.h\n"
- ".inst 0xc1647789 // sdot za.s[x11, 1], { z28.h-z29.h }, z4.h\n"
- ".inst 0xa0412ac4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
- ".inst 0xa0422aea // ld1h { z10.h-z11.h }, pn10.b/Z, [x23, #0x4, MUL VL]\n"
- ".inst 0xc168776c // sdot za.s[x11, 4], { z27.h-z28.h }, z8.h\n"
- ".inst 0xc160776d // sdot za.s[x11, 5], { z27.h-z28.h }, z0.h\n"
- ".inst 0xa1402a80 // ld1h { z0.h, z8.h }, pn10.b/Z, [x20]\n"
- ".inst 0xc165778a // sdot za.s[x11, 2], { z28.h-z29.h }, z5.h\n"
- ".inst 0xc164778b // sdot za.s[x11, 3], { z28.h-z29.h }, z4.h\n"
- ".inst 0xa0412aa4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
- ".inst 0xc16b77a8 // sdot za.s[x11, 0], { z29.h-z30.h }, z11.h\n"
- ".inst 0xc16a77a9 // sdot za.s[x11, 1], { z29.h-z30.h }, z10.h\n"
- ".inst 0xa0422aca // ld1h { z10.h-z11.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
".inst 0xc168776e // sdot za.s[x11, 6], { z27.h-z28.h }, z8.h\n"
".inst 0xc160776f // sdot za.s[x11, 7], { z27.h-z28.h }, z0.h\n"
".inst 0xc165778c // sdot za.s[x11, 4], { z28.h-z29.h }, z5.h\n"
".inst 0xc164778d // sdot za.s[x11, 5], { z28.h-z29.h }, z4.h\n"
- ".inst 0xa0412a84 // ld1h { z4.h-z5.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ ".inst 0xa0412a64 // ld1h { z4.h-z5.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
".inst 0xc16b77aa // sdot za.s[x11, 2], { z29.h-z30.h }, z11.h\n"
".inst 0xc16a77ab // sdot za.s[x11, 3], { z29.h-z30.h }, z10.h\n"
- ".inst 0xa0422aaa // ld1h { z10.h-z11.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
+ ".inst 0xa0422a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
".inst 0xc165778e // sdot za.s[x11, 6], { z28.h-z29.h }, z5.h\n"
".inst 0xc164778f // sdot za.s[x11, 7], { z28.h-z29.h }, z4.h\n"
".inst 0xc16b77ac // sdot za.s[x11, 4], { z29.h-z30.h }, z11.h\n"
".inst 0xc16a77ad // sdot za.s[x11, 5], { z29.h-z30.h }, z10.h\n"
- ".inst 0xa0422a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+ ".inst 0xa0422a6a // ld1h { z10.h-z11.h }, pn10.b/Z, [x19, #0x4, MUL VL]\n"
".inst 0xc16b77ae // sdot za.s[x11, 6], { z29.h-z30.h }, z11.h\n"
".inst 0xc16a77af // sdot za.s[x11, 7], { z29.h-z30.h }, z10.h\n"
"12:" // Unpadded: 0 priming loads
".inst 0xa1402be0 // ld1h { z0.h, z8.h }, pn10.b/Z, [SP]\n"
".inst 0xa0412be4 // ld1h { z4.h-z5.h }, pn10.b/Z, [SP, #0x2, MUL VL]\n"
".inst 0xa0422bea // ld1h { z10.h-z11.h }, pn10.b/Z, [SP, #0x4, MUL VL]\n"
- "cbz x17, 22f\n"
- "add x20, x16, %x[ld_in_row]\n"
- "ld1b { z17.s }, p1/Z, [x16]\n"
- "sub x17, x17, #0x1\n"
- "ld1b { z16.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "cbz x16, 22f\n"
+ "add x19, x15, %x[ld_in_row]\n"
+ "ld1b { z17.s }, p1/Z, [x15]\n"
+ "sub x16, x16, #0x1\n"
+ "ld1b { z16.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"trn1 z27.h, z17.h, z16.h\n"
- "sub x15, x15, #0x1\n"
- "ld1b { z17.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
- "cmp x17, x15\n"
+ "sub x14, x14, #0x1\n"
+ "ld1b { z17.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ "cmp x16, x14\n"
"add z27.h, z27.h, z25.h\n"
- "ld1b { z16.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z16.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"trn1 z28.h, z17.h, z16.h\n"
- "csel x25, x17, x15, LT\n"
- "ld1b { z17.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "csel x24, x16, x14, LT\n"
+ "ld1b { z17.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"add z28.h, z28.h, z25.h\n"
- "add x16, x16, %x[ld_in_col]\n"
- "ld1b { z16.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x15, x15, %x[ld_in_col]\n"
+ "ld1b { z16.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"trn1 z29.h, z17.h, z16.h\n"
"add z29.h, z29.h, z25.h\n"
- "ld1b { z17.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
- "sub x15, x15, x25\n"
- "ld1b { z16.s }, p1/Z, [x20]\n"
+ "ld1b { z17.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ "sub x14, x14, x24\n"
+ "ld1b { z16.s }, p1/Z, [x19]\n"
"trn1 z30.h, z17.h, z16.h\n"
"add z30.h, z30.h, z25.h\n"
- "cbz x25, 21f\n"
+ "cbz x24, 21f\n"
"13:" // Unpadded: Main loop
- "addvl x24, SP, #6\n"
+ "addvl x23, SP, #6\n"
".inst 0xc1687768 // sdot za.s[x11, 0], { z27.h-z28.h }, z8.h\n"
- "addvl x23, SP, #12\n"
- "ld1b { z23.s }, p1/Z, [x16]\n"
+ "addvl x22, SP, #12\n"
+ "ld1b { z23.s }, p1/Z, [x15]\n"
".inst 0xc1607769 // sdot za.s[x11, 1], { z27.h-z28.h }, z0.h\n"
- ".inst 0xa1402b00 // ld1h { z0.h, z8.h }, pn10.b/Z, [x24]\n"
- "addvl x22, SP, #18\n"
- "addvl x21, SP, #24\n"
+ ".inst 0xa1402ae0 // ld1h { z0.h, z8.h }, pn10.b/Z, [x23]\n"
+ "addvl x21, SP, #18\n"
+ "addvl x20, SP, #24\n"
".inst 0xc168776a // sdot za.s[x11, 2], { z27.h-z28.h }, z8.h\n"
- "add x20, x16, %x[ld_in_row]\n"
- "ld1b { z22.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x15, %x[ld_in_row]\n"
+ "ld1b { z22.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0xc160776b // sdot za.s[x11, 3], { z27.h-z28.h }, z0.h\n"
- ".inst 0xa1402ae0 // ld1h { z0.h, z8.h }, pn10.b/Z, [x23]\n"
- "subs x25, x25, #0x1\n"
- "add x16, x16, %x[ld_in_col]\n"
+ ".inst 0xa1402ac0 // ld1h { z0.h, z8.h }, pn10.b/Z, [x22]\n"
+ "subs x24, x24, #0x1\n"
+ "add x15, x15, %x[ld_in_col]\n"
".inst 0xc1657788 // sdot za.s[x11, 0], { z28.h-z29.h }, z5.h\n"
- "ld1b { z21.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z21.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0xc1647789 // sdot za.s[x11, 1], { z28.h-z29.h }, z4.h\n"
- ".inst 0xa0412b04 // ld1h { z4.h-z5.h }, pn10.b/Z, [x24, #0x2, MUL VL]\n"
+ ".inst 0xa0412ae4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x23, #0x2, MUL VL]\n"
".inst 0xc168776c // sdot za.s[x11, 4], { z27.h-z28.h }, z8.h\n"
- "ld1b { z20.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z20.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0xc160776d // sdot za.s[x11, 5], { z27.h-z28.h }, z0.h\n"
- ".inst 0xa1402ac0 // ld1h { z0.h, z8.h }, pn10.b/Z, [x22]\n"
+ ".inst 0xa1402aa0 // ld1h { z0.h, z8.h }, pn10.b/Z, [x21]\n"
".inst 0xc165778a // sdot za.s[x11, 2], { z28.h-z29.h }, z5.h\n"
- "ld1b { z19.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z19.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0xc164778b // sdot za.s[x11, 3], { z28.h-z29.h }, z4.h\n"
- ".inst 0xa0412ae4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x23, #0x2, MUL VL]\n"
+ ".inst 0xa0412ac4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
".inst 0xc16b77a8 // sdot za.s[x11, 0], { z29.h-z30.h }, z11.h\n"
- "ld1b { z18.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z18.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0xc16a77a9 // sdot za.s[x11, 1], { z29.h-z30.h }, z10.h\n"
- ".inst 0xa0422b0a // ld1h { z10.h-z11.h }, pn10.b/Z, [x24, #0x4, MUL VL]\n"
+ ".inst 0xa0422aea // ld1h { z10.h-z11.h }, pn10.b/Z, [x23, #0x4, MUL VL]\n"
".inst 0xc168776e // sdot za.s[x11, 6], { z27.h-z28.h }, z8.h\n"
- "ld1b { z17.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z17.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0xc160776f // sdot za.s[x11, 7], { z27.h-z28.h }, z0.h\n"
- ".inst 0xa1402aa0 // ld1h { z0.h, z8.h }, pn10.b/Z, [x21]\n"
+ ".inst 0xa1402a80 // ld1h { z0.h, z8.h }, pn10.b/Z, [x20]\n"
".inst 0xc165778c // sdot za.s[x11, 4], { z28.h-z29.h }, z5.h\n"
- "ld1b { z16.s }, p1/Z, [x20]\n"
+ "ld1b { z16.s }, p1/Z, [x19]\n"
".inst 0xc164778d // sdot za.s[x11, 5], { z28.h-z29.h }, z4.h\n"
- ".inst 0xa0412ac4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
+ ".inst 0xa0412aa4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
".inst 0xc16b77aa // sdot za.s[x11, 2], { z29.h-z30.h }, z11.h\n"
".inst 0xc16a77ab // sdot za.s[x11, 3], { z29.h-z30.h }, z10.h\n"
- ".inst 0xa0422aea // ld1h { z10.h-z11.h }, pn10.b/Z, [x23, #0x4, MUL VL]\n"
+ ".inst 0xa0422aca // ld1h { z10.h-z11.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
".inst 0xc165778e // sdot za.s[x11, 6], { z28.h-z29.h }, z5.h\n"
".inst 0xc164778f // sdot za.s[x11, 7], { z28.h-z29.h }, z4.h\n"
- ".inst 0xa0412aa4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
+ ".inst 0xa0412a84 // ld1h { z4.h-z5.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
".inst 0xc16b77ac // sdot za.s[x11, 4], { z29.h-z30.h }, z11.h\n"
".inst 0xc16a77ad // sdot za.s[x11, 5], { z29.h-z30.h }, z10.h\n"
- ".inst 0xa0422aca // ld1h { z10.h-z11.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
+ ".inst 0xa0422aaa // ld1h { z10.h-z11.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
".inst 0xc16b77ae // sdot za.s[x11, 6], { z29.h-z30.h }, z11.h\n"
".inst 0xc16a77af // sdot za.s[x11, 7], { z29.h-z30.h }, z10.h\n"
- ".inst 0xa0422aaa // ld1h { z10.h-z11.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
+ ".inst 0xa0422a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
".inst 0xc1681768 // sdot za.s[x8, 0], { z27.h-z28.h }, z8.h\n"
".inst 0xc1601769 // sdot za.s[x8, 1], { z27.h-z28.h }, z0.h\n"
"trn1 z27.h, z23.h, z22.h\n"
@@ -645,407 +645,407 @@ void sme2_u8s8u8q_planar_5x5_s1_4rows_dot_za_impl(
".inst 0xc1a9ab0c // add { z12.s-z15.s }, { z12.s-z15.s }, z9.s\n"
".inst 0xc00408c1 // mova za.d[x8, #1], { z6.d-z7.d }\n"
".inst 0xc1bfcf0c // sclamp { z12.s-z15.s }, z24.s, z31.s\n"
- "st1b { z12.s }, p1, [x14]\n"
- "add x14, x14, x3\n"
- "add z30.h, z30.h, z25.h\n"
- "st1b { z14.s }, p1, [x13]\n"
+ "st1b { z12.s }, p1, [x13]\n"
"add x13, x13, x10\n"
- "st1b { z13.s }, p1, [x9]\n"
- "add x9, x9, x27\n"
- "st1b { z15.s }, p1, [x28]\n"
+ "add z30.h, z30.h, z25.h\n"
+ "st1b { z14.s }, p1, [x4]\n"
+ "add x4, x4, x9\n"
+ "st1b { z13.s }, p1, [x28]\n"
"add x28, x28, x26\n"
+ "st1b { z15.s }, p1, [x27]\n"
+ "add x27, x27, x25\n"
"bgt 13b\n"
"b 21f\n"
"14:" // Padded
- "cbz x22, 19f\n"
- "cmp x22, #0x1\n"
- "sub x17, x17, x22\n"
+ "cbz x21, 19f\n"
+ "cmp x21, #0x1\n"
+ "sub x16, x16, x21\n"
"beq 18f\n"
- "cmp x22, #0x2\n"
+ "cmp x21, #0x2\n"
"beq 17f\n"
- "cmp x22, #0x3\n"
+ "cmp x21, #0x3\n"
"beq 16f\n"
"15:" // Padded: 4 priming loads
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z19.s }, p0/Z, [x16]\n"
+ "ld1b { z19.s }, p0/Z, [x15]\n"
"add z19.h, p0/M, z19.h, z25.h\n"
- "add x21, x16, %x[ld_in_row]\n"
+ "add x20, x15, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z18.s }, p0/Z, [x21]\n"
+ "ld1b { z18.s }, p0/Z, [x20]\n"
"add z18.h, p0/M, z18.h, z25.h\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "add x20, x20, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z17.s }, p0/Z, [x21]\n"
+ "ld1b { z17.s }, p0/Z, [x20]\n"
"add z17.h, p0/M, z17.h, z25.h\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "add x20, x20, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z16.s }, p0/Z, [x21]\n"
+ "ld1b { z16.s }, p0/Z, [x20]\n"
"add z16.h, p0/M, z16.h, z25.h\n"
"mov x12, #0x4\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "add x20, x20, %x[ld_in_row]\n"
"trn1 z27.h, z19.h, z18.h\n"
"trn1 z28.h, z17.h, z16.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z18.s }, p0/Z, [x21]\n"
+ "ld1b { z18.s }, p0/Z, [x20]\n"
"add z18.h, p0/M, z18.h, z25.h\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "add x20, x20, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z16.s }, p0/Z, [x21]\n"
+ "ld1b { z16.s }, p0/Z, [x20]\n"
"add z16.h, p0/M, z16.h, z25.h\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "add x20, x20, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z17.s }, p0/Z, [x21]\n"
- "addvl x20, SP, #24\n"
+ "ld1b { z17.s }, p0/Z, [x20]\n"
+ "addvl x19, SP, #24\n"
"add z17.h, p0/M, z17.h, z25.h\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "add x20, x20, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- ".inst 0xa1402a80 // ld1h { z0.h, z8.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xa1402a60 // ld1h { z0.h, z8.h }, pn10.b/Z, [x19]\n"
"trn1 z29.h, z18.h, z16.h\n"
- "ld1b { z16.s }, p0/Z, [x21]\n"
+ "ld1b { z16.s }, p0/Z, [x20]\n"
"add z16.h, p0/M, z16.h, z25.h\n"
".inst 0xc1687768 // sdot za.s[x11, 0], { z27.h-z28.h }, z8.h\n"
- "add x16, x16, %x[ld_in_col]\n"
+ "add x15, x15, %x[ld_in_col]\n"
".inst 0xc1607769 // sdot za.s[x11, 1], { z27.h-z28.h }, z0.h\n"
- ".inst 0xa0412a84 // ld1h { z4.h-z5.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ ".inst 0xa0412a64 // ld1h { z4.h-z5.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
"trn1 z30.h, z17.h, z16.h\n"
".inst 0xc1657788 // sdot za.s[x11, 0], { z28.h-z29.h }, z5.h\n"
- ".inst 0xa0422a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+ ".inst 0xa0422a6a // ld1h { z10.h-z11.h }, pn10.b/Z, [x19, #0x4, MUL VL]\n"
".inst 0xc1647789 // sdot za.s[x11, 1], { z28.h-z29.h }, z4.h\n"
".inst 0xc16b77a8 // sdot za.s[x11, 0], { z29.h-z30.h }, z11.h\n"
".inst 0xc16a77a9 // sdot za.s[x11, 1], { z29.h-z30.h }, z10.h\n"
"16:" // Padded: 3 priming loads
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z19.s }, p0/Z, [x16]\n"
+ "ld1b { z19.s }, p0/Z, [x15]\n"
"add z19.h, p0/M, z19.h, z25.h\n"
- "add x20, x16, %x[ld_in_row]\n"
+ "add x19, x15, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z18.s }, p0/Z, [x20]\n"
+ "ld1b { z18.s }, p0/Z, [x19]\n"
"add z18.h, p0/M, z18.h, z25.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z17.s }, p0/Z, [x20]\n"
+ "ld1b { z17.s }, p0/Z, [x19]\n"
"add z17.h, p0/M, z17.h, z25.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z16.s }, p0/Z, [x20]\n"
+ "ld1b { z16.s }, p0/Z, [x19]\n"
"add z16.h, p0/M, z16.h, z25.h\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"trn1 z27.h, z19.h, z18.h\n"
"trn1 z28.h, z17.h, z16.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z18.s }, p0/Z, [x20]\n"
+ "ld1b { z18.s }, p0/Z, [x19]\n"
"add z18.h, p0/M, z18.h, z25.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z16.s }, p0/Z, [x20]\n"
+ "ld1b { z16.s }, p0/Z, [x19]\n"
"add z16.h, p0/M, z16.h, z25.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z17.s }, p0/Z, [x20]\n"
- "addvl x21, SP, #18\n"
+ "ld1b { z17.s }, p0/Z, [x19]\n"
+ "addvl x20, SP, #18\n"
"add z17.h, p0/M, z17.h, z25.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- ".inst 0xa1402aa0 // ld1h { z0.h, z8.h }, pn10.b/Z, [x21]\n"
+ ".inst 0xa1402a80 // ld1h { z0.h, z8.h }, pn10.b/Z, [x20]\n"
"trn1 z29.h, z18.h, z16.h\n"
- "ld1b { z16.s }, p0/Z, [x20]\n"
- "addvl x20, SP, #24\n"
+ "ld1b { z16.s }, p0/Z, [x19]\n"
+ "addvl x19, SP, #24\n"
"add z16.h, p0/M, z16.h, z25.h\n"
".inst 0xc1687768 // sdot za.s[x11, 0], { z27.h-z28.h }, z8.h\n"
".inst 0xc1607769 // sdot za.s[x11, 1], { z27.h-z28.h }, z0.h\n"
- ".inst 0xa1402a80 // ld1h { z0.h, z8.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xa1402a60 // ld1h { z0.h, z8.h }, pn10.b/Z, [x19]\n"
"trn1 z30.h, z17.h, z16.h\n"
- "add x16, x16, %x[ld_in_col]\n"
- ".inst 0xa0412aa4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
+ "add x15, x15, %x[ld_in_col]\n"
+ ".inst 0xa0412a84 // ld1h { z4.h-z5.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
".inst 0xc168776a // sdot za.s[x11, 2], { z27.h-z28.h }, z8.h\n"
".inst 0xc160776b // sdot za.s[x11, 3], { z27.h-z28.h }, z0.h\n"
- ".inst 0xa0422aaa // ld1h { z10.h-z11.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
+ ".inst 0xa0422a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
".inst 0xc1657788 // sdot za.s[x11, 0], { z28.h-z29.h }, z5.h\n"
".inst 0xc1647789 // sdot za.s[x11, 1], { z28.h-z29.h }, z4.h\n"
- ".inst 0xa0412a84 // ld1h { z4.h-z5.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ ".inst 0xa0412a64 // ld1h { z4.h-z5.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
".inst 0xc165778a // sdot za.s[x11, 2], { z28.h-z29.h }, z5.h\n"
".inst 0xc164778b // sdot za.s[x11, 3], { z28.h-z29.h }, z4.h\n"
".inst 0xc16b77a8 // sdot za.s[x11, 0], { z29.h-z30.h }, z11.h\n"
".inst 0xc16a77a9 // sdot za.s[x11, 1], { z29.h-z30.h }, z10.h\n"
- ".inst 0xa0422a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+ ".inst 0xa0422a6a // ld1h { z10.h-z11.h }, pn10.b/Z, [x19, #0x4, MUL VL]\n"
".inst 0xc16b77aa // sdot za.s[x11, 2], { z29.h-z30.h }, z11.h\n"
".inst 0xc16a77ab // sdot za.s[x11, 3], { z29.h-z30.h }, z10.h\n"
"17:" // Padded: 2 priming loads
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z19.s }, p0/Z, [x16]\n"
+ "ld1b { z19.s }, p0/Z, [x15]\n"
"add z19.h, p0/M, z19.h, z25.h\n"
- "add x20, x16, %x[ld_in_row]\n"
+ "add x19, x15, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z18.s }, p0/Z, [x20]\n"
+ "ld1b { z18.s }, p0/Z, [x19]\n"
"add z18.h, p0/M, z18.h, z25.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z17.s }, p0/Z, [x20]\n"
+ "ld1b { z17.s }, p0/Z, [x19]\n"
"add z17.h, p0/M, z17.h, z25.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z16.s }, p0/Z, [x20]\n"
+ "ld1b { z16.s }, p0/Z, [x19]\n"
"add z16.h, p0/M, z16.h, z25.h\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"trn1 z27.h, z19.h, z18.h\n"
"trn1 z28.h, z17.h, z16.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z18.s }, p0/Z, [x20]\n"
+ "ld1b { z18.s }, p0/Z, [x19]\n"
"add z18.h, p0/M, z18.h, z25.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z16.s }, p0/Z, [x20]\n"
+ "ld1b { z16.s }, p0/Z, [x19]\n"
"add z16.h, p0/M, z16.h, z25.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z17.s }, p0/Z, [x20]\n"
- "addvl x22, SP, #12\n"
+ "ld1b { z17.s }, p0/Z, [x19]\n"
+ "addvl x21, SP, #12\n"
"add z17.h, p0/M, z17.h, z25.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- ".inst 0xa1402ac0 // ld1h { z0.h, z8.h }, pn10.b/Z, [x22]\n"
+ ".inst 0xa1402aa0 // ld1h { z0.h, z8.h }, pn10.b/Z, [x21]\n"
"trn1 z29.h, z18.h, z16.h\n"
- "ld1b { z16.s }, p0/Z, [x20]\n"
- "addvl x21, SP, #18\n"
+ "ld1b { z16.s }, p0/Z, [x19]\n"
+ "addvl x20, SP, #18\n"
"add z16.h, p0/M, z16.h, z25.h\n"
".inst 0xc1687768 // sdot za.s[x11, 0], { z27.h-z28.h }, z8.h\n"
".inst 0xc1607769 // sdot za.s[x11, 1], { z27.h-z28.h }, z0.h\n"
- ".inst 0xa1402aa0 // ld1h { z0.h, z8.h }, pn10.b/Z, [x21]\n"
- "addvl x20, SP, #24\n"
+ ".inst 0xa1402a80 // ld1h { z0.h, z8.h }, pn10.b/Z, [x20]\n"
+ "addvl x19, SP, #24\n"
"trn1 z30.h, z17.h, z16.h\n"
- ".inst 0xa0412ac4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
+ ".inst 0xa0412aa4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
".inst 0xc168776a // sdot za.s[x11, 2], { z27.h-z28.h }, z8.h\n"
- "add x16, x16, %x[ld_in_col]\n"
+ "add x15, x15, %x[ld_in_col]\n"
".inst 0xc160776b // sdot za.s[x11, 3], { z27.h-z28.h }, z0.h\n"
- ".inst 0xa1402a80 // ld1h { z0.h, z8.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xa1402a60 // ld1h { z0.h, z8.h }, pn10.b/Z, [x19]\n"
".inst 0xc1657788 // sdot za.s[x11, 0], { z28.h-z29.h }, z5.h\n"
- ".inst 0xa0422aca // ld1h { z10.h-z11.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
+ ".inst 0xa0422aaa // ld1h { z10.h-z11.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
".inst 0xc1647789 // sdot za.s[x11, 1], { z28.h-z29.h }, z4.h\n"
- ".inst 0xa0412aa4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
+ ".inst 0xa0412a84 // ld1h { z4.h-z5.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
".inst 0xc168776c // sdot za.s[x11, 4], { z27.h-z28.h }, z8.h\n"
".inst 0xc160776d // sdot za.s[x11, 5], { z27.h-z28.h }, z0.h\n"
".inst 0xc165778a // sdot za.s[x11, 2], { z28.h-z29.h }, z5.h\n"
".inst 0xc164778b // sdot za.s[x11, 3], { z28.h-z29.h }, z4.h\n"
- ".inst 0xa0412a84 // ld1h { z4.h-z5.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ ".inst 0xa0412a64 // ld1h { z4.h-z5.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
".inst 0xc16b77a8 // sdot za.s[x11, 0], { z29.h-z30.h }, z11.h\n"
".inst 0xc16a77a9 // sdot za.s[x11, 1], { z29.h-z30.h }, z10.h\n"
- ".inst 0xa0422aaa // ld1h { z10.h-z11.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
+ ".inst 0xa0422a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
".inst 0xc165778c // sdot za.s[x11, 4], { z28.h-z29.h }, z5.h\n"
".inst 0xc164778d // sdot za.s[x11, 5], { z28.h-z29.h }, z4.h\n"
".inst 0xc16b77aa // sdot za.s[x11, 2], { z29.h-z30.h }, z11.h\n"
".inst 0xc16a77ab // sdot za.s[x11, 3], { z29.h-z30.h }, z10.h\n"
- ".inst 0xa0422a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+ ".inst 0xa0422a6a // ld1h { z10.h-z11.h }, pn10.b/Z, [x19, #0x4, MUL VL]\n"
".inst 0xc16b77ac // sdot za.s[x11, 4], { z29.h-z30.h }, z11.h\n"
".inst 0xc16a77ad // sdot za.s[x11, 5], { z29.h-z30.h }, z10.h\n"
"18:" // Padded: 1 priming loads
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z19.s }, p0/Z, [x16]\n"
+ "ld1b { z19.s }, p0/Z, [x15]\n"
"add z19.h, p0/M, z19.h, z25.h\n"
- "add x20, x16, %x[ld_in_row]\n"
+ "add x19, x15, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z18.s }, p0/Z, [x20]\n"
+ "ld1b { z18.s }, p0/Z, [x19]\n"
"add z18.h, p0/M, z18.h, z25.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z17.s }, p0/Z, [x20]\n"
+ "ld1b { z17.s }, p0/Z, [x19]\n"
"add z17.h, p0/M, z17.h, z25.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z16.s }, p0/Z, [x20]\n"
+ "ld1b { z16.s }, p0/Z, [x19]\n"
"add z16.h, p0/M, z16.h, z25.h\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"trn1 z27.h, z19.h, z18.h\n"
"trn1 z28.h, z17.h, z16.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z18.s }, p0/Z, [x20]\n"
+ "ld1b { z18.s }, p0/Z, [x19]\n"
"add z18.h, p0/M, z18.h, z25.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z16.s }, p0/Z, [x20]\n"
+ "ld1b { z16.s }, p0/Z, [x19]\n"
"add z16.h, p0/M, z16.h, z25.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z17.s }, p0/Z, [x20]\n"
- "addvl x23, SP, #6\n"
+ "ld1b { z17.s }, p0/Z, [x19]\n"
+ "addvl x22, SP, #6\n"
"add z17.h, p0/M, z17.h, z25.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- ".inst 0xa1402ae0 // ld1h { z0.h, z8.h }, pn10.b/Z, [x23]\n"
+ ".inst 0xa1402ac0 // ld1h { z0.h, z8.h }, pn10.b/Z, [x22]\n"
"trn1 z29.h, z18.h, z16.h\n"
- "ld1b { z16.s }, p0/Z, [x20]\n"
- "addvl x22, SP, #12\n"
+ "ld1b { z16.s }, p0/Z, [x19]\n"
+ "addvl x21, SP, #12\n"
"add z16.h, p0/M, z16.h, z25.h\n"
".inst 0xc1687768 // sdot za.s[x11, 0], { z27.h-z28.h }, z8.h\n"
".inst 0xc1607769 // sdot za.s[x11, 1], { z27.h-z28.h }, z0.h\n"
- ".inst 0xa1402ac0 // ld1h { z0.h, z8.h }, pn10.b/Z, [x22]\n"
- "addvl x21, SP, #18\n"
+ ".inst 0xa1402aa0 // ld1h { z0.h, z8.h }, pn10.b/Z, [x21]\n"
+ "addvl x20, SP, #18\n"
"trn1 z30.h, z17.h, z16.h\n"
- ".inst 0xa0412ae4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x23, #0x2, MUL VL]\n"
+ ".inst 0xa0412ac4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
".inst 0xc168776a // sdot za.s[x11, 2], { z27.h-z28.h }, z8.h\n"
- "addvl x20, SP, #24\n"
- "add x16, x16, %x[ld_in_col]\n"
+ "addvl x19, SP, #24\n"
+ "add x15, x15, %x[ld_in_col]\n"
".inst 0xc160776b // sdot za.s[x11, 3], { z27.h-z28.h }, z0.h\n"
- ".inst 0xa1402aa0 // ld1h { z0.h, z8.h }, pn10.b/Z, [x21]\n"
+ ".inst 0xa1402a80 // ld1h { z0.h, z8.h }, pn10.b/Z, [x20]\n"
".inst 0xc1657788 // sdot za.s[x11, 0], { z28.h-z29.h }, z5.h\n"
- ".inst 0xa0422aea // ld1h { z10.h-z11.h }, pn10.b/Z, [x23, #0x4, MUL VL]\n"
+ ".inst 0xa0422aca // ld1h { z10.h-z11.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
".inst 0xc1647789 // sdot za.s[x11, 1], { z28.h-z29.h }, z4.h\n"
- ".inst 0xa0412ac4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
+ ".inst 0xa0412aa4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
".inst 0xc168776c // sdot za.s[x11, 4], { z27.h-z28.h }, z8.h\n"
".inst 0xc160776d // sdot za.s[x11, 5], { z27.h-z28.h }, z0.h\n"
- ".inst 0xa1402a80 // ld1h { z0.h, z8.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xa1402a60 // ld1h { z0.h, z8.h }, pn10.b/Z, [x19]\n"
".inst 0xc165778a // sdot za.s[x11, 2], { z28.h-z29.h }, z5.h\n"
".inst 0xc164778b // sdot za.s[x11, 3], { z28.h-z29.h }, z4.h\n"
- ".inst 0xa0412aa4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
+ ".inst 0xa0412a84 // ld1h { z4.h-z5.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
".inst 0xc16b77a8 // sdot za.s[x11, 0], { z29.h-z30.h }, z11.h\n"
".inst 0xc16a77a9 // sdot za.s[x11, 1], { z29.h-z30.h }, z10.h\n"
- ".inst 0xa0422aca // ld1h { z10.h-z11.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
+ ".inst 0xa0422aaa // ld1h { z10.h-z11.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
".inst 0xc168776e // sdot za.s[x11, 6], { z27.h-z28.h }, z8.h\n"
".inst 0xc160776f // sdot za.s[x11, 7], { z27.h-z28.h }, z0.h\n"
".inst 0xc165778c // sdot za.s[x11, 4], { z28.h-z29.h }, z5.h\n"
".inst 0xc164778d // sdot za.s[x11, 5], { z28.h-z29.h }, z4.h\n"
- ".inst 0xa0412a84 // ld1h { z4.h-z5.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ ".inst 0xa0412a64 // ld1h { z4.h-z5.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
".inst 0xc16b77aa // sdot za.s[x11, 2], { z29.h-z30.h }, z11.h\n"
".inst 0xc16a77ab // sdot za.s[x11, 3], { z29.h-z30.h }, z10.h\n"
- ".inst 0xa0422aaa // ld1h { z10.h-z11.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
+ ".inst 0xa0422a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
".inst 0xc165778e // sdot za.s[x11, 6], { z28.h-z29.h }, z5.h\n"
".inst 0xc164778f // sdot za.s[x11, 7], { z28.h-z29.h }, z4.h\n"
".inst 0xc16b77ac // sdot za.s[x11, 4], { z29.h-z30.h }, z11.h\n"
".inst 0xc16a77ad // sdot za.s[x11, 5], { z29.h-z30.h }, z10.h\n"
- ".inst 0xa0422a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+ ".inst 0xa0422a6a // ld1h { z10.h-z11.h }, pn10.b/Z, [x19, #0x4, MUL VL]\n"
".inst 0xc16b77ae // sdot za.s[x11, 6], { z29.h-z30.h }, z11.h\n"
".inst 0xc16a77af // sdot za.s[x11, 7], { z29.h-z30.h }, z10.h\n"
"19:" // Padded: 0 priming loads
".inst 0xa1402be0 // ld1h { z0.h, z8.h }, pn10.b/Z, [SP]\n"
".inst 0xa0412be4 // ld1h { z4.h-z5.h }, pn10.b/Z, [SP, #0x2, MUL VL]\n"
".inst 0xa0422bea // ld1h { z10.h-z11.h }, pn10.b/Z, [SP, #0x4, MUL VL]\n"
- "cbz x17, 22f\n"
+ "cbz x16, 22f\n"
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z19.s }, p0/Z, [x16]\n"
+ "ld1b { z19.s }, p0/Z, [x15]\n"
"add z19.h, p0/M, z19.h, z25.h\n"
- "add x20, x16, %x[ld_in_row]\n"
+ "add x19, x15, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z18.s }, p0/Z, [x20]\n"
+ "ld1b { z18.s }, p0/Z, [x19]\n"
"add z18.h, p0/M, z18.h, z25.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z17.s }, p0/Z, [x20]\n"
+ "ld1b { z17.s }, p0/Z, [x19]\n"
"add z17.h, p0/M, z17.h, z25.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z16.s }, p0/Z, [x20]\n"
+ "ld1b { z16.s }, p0/Z, [x19]\n"
"add z16.h, p0/M, z16.h, z25.h\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"trn1 z27.h, z19.h, z18.h\n"
"trn1 z28.h, z17.h, z16.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z19.s }, p0/Z, [x20]\n"
+ "ld1b { z19.s }, p0/Z, [x19]\n"
"add z19.h, p0/M, z19.h, z25.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z18.s }, p0/Z, [x20]\n"
+ "ld1b { z18.s }, p0/Z, [x19]\n"
"add z18.h, p0/M, z18.h, z25.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z17.s }, p0/Z, [x20]\n"
+ "ld1b { z17.s }, p0/Z, [x19]\n"
"add z17.h, p0/M, z17.h, z25.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z16.s }, p0/Z, [x20]\n"
+ "ld1b { z16.s }, p0/Z, [x19]\n"
"add z16.h, p0/M, z16.h, z25.h\n"
- "sub x17, x17, #0x1\n"
- "sub x15, x15, #0x1\n"
- "cmp x17, x15\n"
+ "sub x16, x16, #0x1\n"
+ "sub x14, x14, #0x1\n"
+ "cmp x16, x14\n"
"trn1 z29.h, z19.h, z18.h\n"
"trn1 z30.h, z17.h, z16.h\n"
- "csel x25, x17, x15, LT\n"
- "add x16, x16, %x[ld_in_col]\n"
- "sub x15, x15, x25\n"
- "cbz x25, 21f\n"
+ "csel x24, x16, x14, LT\n"
+ "add x15, x15, %x[ld_in_col]\n"
+ "sub x14, x14, x24\n"
+ "cbz x24, 21f\n"
"20:" // Padded: Main loop
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z23.s }, p0/Z, [x16]\n"
+ "ld1b { z23.s }, p0/Z, [x15]\n"
"add z23.h, p0/M, z23.h, z25.h\n"
- "add x24, x16, %x[ld_in_row]\n"
+ "add x23, x15, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z22.s }, p0/Z, [x24]\n"
+ "ld1b { z22.s }, p0/Z, [x23]\n"
".inst 0xc1687768 // sdot za.s[x11, 0], { z27.h-z28.h }, z8.h\n"
- "addvl x23, SP, #6\n"
+ "addvl x22, SP, #6\n"
".inst 0xc1607769 // sdot za.s[x11, 1], { z27.h-z28.h }, z0.h\n"
- ".inst 0xa1402ae0 // ld1h { z0.h, z8.h }, pn10.b/Z, [x23]\n"
- "addvl x22, SP, #12\n"
+ ".inst 0xa1402ac0 // ld1h { z0.h, z8.h }, pn10.b/Z, [x22]\n"
+ "addvl x21, SP, #12\n"
"add z22.h, p0/M, z22.h, z25.h\n"
- "add x24, x24, %x[ld_in_row]\n"
+ "add x23, x23, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
".inst 0xc168776a // sdot za.s[x11, 2], { z27.h-z28.h }, z8.h\n"
".inst 0xc160776b // sdot za.s[x11, 3], { z27.h-z28.h }, z0.h\n"
- ".inst 0xa1402ac0 // ld1h { z0.h, z8.h }, pn10.b/Z, [x22]\n"
- "addvl x21, SP, #18\n"
- "addvl x20, SP, #24\n"
- "ld1b { z21.s }, p0/Z, [x24]\n"
+ ".inst 0xa1402aa0 // ld1h { z0.h, z8.h }, pn10.b/Z, [x21]\n"
+ "addvl x20, SP, #18\n"
+ "addvl x19, SP, #24\n"
+ "ld1b { z21.s }, p0/Z, [x23]\n"
".inst 0xc1657788 // sdot za.s[x11, 0], { z28.h-z29.h }, z5.h\n"
"add z21.h, p0/M, z21.h, z25.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
".inst 0xc1647789 // sdot za.s[x11, 1], { z28.h-z29.h }, z4.h\n"
- ".inst 0xa0412ae4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x23, #0x2, MUL VL]\n"
+ ".inst 0xa0412ac4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
"mov x12, #0x4\n"
- "add x24, x24, %x[ld_in_row]\n"
+ "add x23, x23, %x[ld_in_row]\n"
".inst 0xc168776c // sdot za.s[x11, 4], { z27.h-z28.h }, z8.h\n"
- "ld1b { z20.s }, p0/Z, [x24]\n"
+ "ld1b { z20.s }, p0/Z, [x23]\n"
"add z20.h, p0/M, z20.h, z25.h\n"
- "add x24, x24, %x[ld_in_row]\n"
+ "add x23, x23, %x[ld_in_row]\n"
".inst 0xc160776d // sdot za.s[x11, 5], { z27.h-z28.h }, z0.h\n"
- ".inst 0xa1402aa0 // ld1h { z0.h, z8.h }, pn10.b/Z, [x21]\n"
+ ".inst 0xa1402a80 // ld1h { z0.h, z8.h }, pn10.b/Z, [x20]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "subs x25, x25, #0x1\n"
+ "subs x24, x24, #0x1\n"
".inst 0xc165778a // sdot za.s[x11, 2], { z28.h-z29.h }, z5.h\n"
- "ld1b { z19.s }, p0/Z, [x24]\n"
+ "ld1b { z19.s }, p0/Z, [x23]\n"
"add z19.h, p0/M, z19.h, z25.h\n"
- "add x24, x24, %x[ld_in_row]\n"
+ "add x23, x23, %x[ld_in_row]\n"
".inst 0xc164778b // sdot za.s[x11, 3], { z28.h-z29.h }, z4.h\n"
- ".inst 0xa0412ac4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
+ ".inst 0xa0412aa4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "add x16, x16, %x[ld_in_col]\n"
+ "add x15, x15, %x[ld_in_col]\n"
".inst 0xc16b77a8 // sdot za.s[x11, 0], { z29.h-z30.h }, z11.h\n"
- "ld1b { z18.s }, p0/Z, [x24]\n"
+ "ld1b { z18.s }, p0/Z, [x23]\n"
"add z18.h, p0/M, z18.h, z25.h\n"
- "add x24, x24, %x[ld_in_row]\n"
+ "add x23, x23, %x[ld_in_row]\n"
".inst 0xc16a77a9 // sdot za.s[x11, 1], { z29.h-z30.h }, z10.h\n"
- ".inst 0xa0422aea // ld1h { z10.h-z11.h }, pn10.b/Z, [x23, #0x4, MUL VL]\n"
+ ".inst 0xa0422aca // ld1h { z10.h-z11.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
".inst 0xc168776e // sdot za.s[x11, 6], { z27.h-z28.h }, z8.h\n"
- "ld1b { z17.s }, p0/Z, [x24]\n"
+ "ld1b { z17.s }, p0/Z, [x23]\n"
"add z17.h, p0/M, z17.h, z25.h\n"
- "add x24, x24, %x[ld_in_row]\n"
+ "add x23, x23, %x[ld_in_row]\n"
".inst 0xc160776f // sdot za.s[x11, 7], { z27.h-z28.h }, z0.h\n"
- ".inst 0xa1402a80 // ld1h { z0.h, z8.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xa1402a60 // ld1h { z0.h, z8.h }, pn10.b/Z, [x19]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
".inst 0xc165778c // sdot za.s[x11, 4], { z28.h-z29.h }, z5.h\n"
- "ld1b { z16.s }, p0/Z, [x24]\n"
+ "ld1b { z16.s }, p0/Z, [x23]\n"
"add z16.h, p0/M, z16.h, z25.h\n"
".inst 0xc164778d // sdot za.s[x11, 5], { z28.h-z29.h }, z4.h\n"
- ".inst 0xa0412aa4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
+ ".inst 0xa0412a84 // ld1h { z4.h-z5.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
".inst 0xc16b77aa // sdot za.s[x11, 2], { z29.h-z30.h }, z11.h\n"
".inst 0xc16a77ab // sdot za.s[x11, 3], { z29.h-z30.h }, z10.h\n"
- ".inst 0xa0422aca // ld1h { z10.h-z11.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
+ ".inst 0xa0422aaa // ld1h { z10.h-z11.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
".inst 0xc165778e // sdot za.s[x11, 6], { z28.h-z29.h }, z5.h\n"
".inst 0xc164778f // sdot za.s[x11, 7], { z28.h-z29.h }, z4.h\n"
- ".inst 0xa0412a84 // ld1h { z4.h-z5.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ ".inst 0xa0412a64 // ld1h { z4.h-z5.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
".inst 0xc16b77ac // sdot za.s[x11, 4], { z29.h-z30.h }, z11.h\n"
".inst 0xc16a77ad // sdot za.s[x11, 5], { z29.h-z30.h }, z10.h\n"
- ".inst 0xa0422aaa // ld1h { z10.h-z11.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
+ ".inst 0xa0422a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
".inst 0xc16b77ae // sdot za.s[x11, 6], { z29.h-z30.h }, z11.h\n"
".inst 0xc16a77af // sdot za.s[x11, 7], { z29.h-z30.h }, z10.h\n"
- ".inst 0xa0422a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+ ".inst 0xa0422a6a // ld1h { z10.h-z11.h }, pn10.b/Z, [x19, #0x4, MUL VL]\n"
".inst 0xc1681768 // sdot za.s[x8, 0], { z27.h-z28.h }, z8.h\n"
".inst 0xc1601769 // sdot za.s[x8, 1], { z27.h-z28.h }, z0.h\n"
".inst 0xa1402be0 // ld1h { z0.h, z8.h }, pn10.b/Z, [SP]\n"
@@ -1069,56 +1069,56 @@ void sme2_u8s8u8q_planar_5x5_s1_4rows_dot_za_impl(
".inst 0xc1a9ab0c // add { z12.s-z15.s }, { z12.s-z15.s }, z9.s\n"
".inst 0xc00408c1 // mova za.d[x8, #1], { z6.d-z7.d }\n"
".inst 0xc1bfcf0c // sclamp { z12.s-z15.s }, z24.s, z31.s\n"
- "st1b { z12.s }, p1, [x14]\n"
- "add x14, x14, x3\n"
- "st1b { z14.s }, p1, [x13]\n"
+ "st1b { z12.s }, p1, [x13]\n"
"add x13, x13, x10\n"
- "st1b { z13.s }, p1, [x9]\n"
- "add x9, x9, x27\n"
- "st1b { z15.s }, p1, [x28]\n"
+ "st1b { z14.s }, p1, [x4]\n"
+ "add x4, x4, x9\n"
+ "st1b { z13.s }, p1, [x28]\n"
"add x28, x28, x26\n"
+ "st1b { z15.s }, p1, [x27]\n"
+ "add x27, x27, x25\n"
"bgt 20b\n"
"21:" // Main loop tail
- "addvl x23, SP, #6\n"
+ "addvl x22, SP, #6\n"
".inst 0xc1687768 // sdot za.s[x11, 0], { z27.h-z28.h }, z8.h\n"
- "addvl x22, SP, #12\n"
+ "addvl x21, SP, #12\n"
".inst 0xc1607769 // sdot za.s[x11, 1], { z27.h-z28.h }, z0.h\n"
- ".inst 0xa1402ae0 // ld1h { z0.h, z8.h }, pn10.b/Z, [x23]\n"
- "addvl x21, SP, #18\n"
- "addvl x20, SP, #24\n"
+ ".inst 0xa1402ac0 // ld1h { z0.h, z8.h }, pn10.b/Z, [x22]\n"
+ "addvl x20, SP, #18\n"
+ "addvl x19, SP, #24\n"
".inst 0xc168776a // sdot za.s[x11, 2], { z27.h-z28.h }, z8.h\n"
".inst 0xc160776b // sdot za.s[x11, 3], { z27.h-z28.h }, z0.h\n"
- ".inst 0xa1402ac0 // ld1h { z0.h, z8.h }, pn10.b/Z, [x22]\n"
+ ".inst 0xa1402aa0 // ld1h { z0.h, z8.h }, pn10.b/Z, [x21]\n"
".inst 0xc1657788 // sdot za.s[x11, 0], { z28.h-z29.h }, z5.h\n"
".inst 0xc1647789 // sdot za.s[x11, 1], { z28.h-z29.h }, z4.h\n"
- ".inst 0xa0412ae4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x23, #0x2, MUL VL]\n"
+ ".inst 0xa0412ac4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
".inst 0xc168776c // sdot za.s[x11, 4], { z27.h-z28.h }, z8.h\n"
".inst 0xc160776d // sdot za.s[x11, 5], { z27.h-z28.h }, z0.h\n"
- ".inst 0xa1402aa0 // ld1h { z0.h, z8.h }, pn10.b/Z, [x21]\n"
+ ".inst 0xa1402a80 // ld1h { z0.h, z8.h }, pn10.b/Z, [x20]\n"
".inst 0xc165778a // sdot za.s[x11, 2], { z28.h-z29.h }, z5.h\n"
".inst 0xc164778b // sdot za.s[x11, 3], { z28.h-z29.h }, z4.h\n"
- ".inst 0xa0412ac4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
+ ".inst 0xa0412aa4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
".inst 0xc16b77a8 // sdot za.s[x11, 0], { z29.h-z30.h }, z11.h\n"
".inst 0xc16a77a9 // sdot za.s[x11, 1], { z29.h-z30.h }, z10.h\n"
- ".inst 0xa0422aea // ld1h { z10.h-z11.h }, pn10.b/Z, [x23, #0x4, MUL VL]\n"
+ ".inst 0xa0422aca // ld1h { z10.h-z11.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
".inst 0xc168776e // sdot za.s[x11, 6], { z27.h-z28.h }, z8.h\n"
".inst 0xc160776f // sdot za.s[x11, 7], { z27.h-z28.h }, z0.h\n"
- ".inst 0xa1402a80 // ld1h { z0.h, z8.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xa1402a60 // ld1h { z0.h, z8.h }, pn10.b/Z, [x19]\n"
".inst 0xc165778c // sdot za.s[x11, 4], { z28.h-z29.h }, z5.h\n"
".inst 0xc164778d // sdot za.s[x11, 5], { z28.h-z29.h }, z4.h\n"
- ".inst 0xa0412aa4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
+ ".inst 0xa0412a84 // ld1h { z4.h-z5.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
".inst 0xc16b77aa // sdot za.s[x11, 2], { z29.h-z30.h }, z11.h\n"
".inst 0xc16a77ab // sdot za.s[x11, 3], { z29.h-z30.h }, z10.h\n"
- ".inst 0xa0422aca // ld1h { z10.h-z11.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
+ ".inst 0xa0422aaa // ld1h { z10.h-z11.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
".inst 0xc165778e // sdot za.s[x11, 6], { z28.h-z29.h }, z5.h\n"
".inst 0xc164778f // sdot za.s[x11, 7], { z28.h-z29.h }, z4.h\n"
- ".inst 0xa0412a84 // ld1h { z4.h-z5.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ ".inst 0xa0412a64 // ld1h { z4.h-z5.h }, pn10.b/Z, [x19, #0x2, MUL VL]\n"
".inst 0xc16b77ac // sdot za.s[x11, 4], { z29.h-z30.h }, z11.h\n"
".inst 0xc16a77ad // sdot za.s[x11, 5], { z29.h-z30.h }, z10.h\n"
- ".inst 0xa0422aaa // ld1h { z10.h-z11.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
+ ".inst 0xa0422a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
".inst 0xc16b77ae // sdot za.s[x11, 6], { z29.h-z30.h }, z11.h\n"
".inst 0xc16a77af // sdot za.s[x11, 7], { z29.h-z30.h }, z10.h\n"
- ".inst 0xa0422a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+ ".inst 0xa0422a6a // ld1h { z10.h-z11.h }, pn10.b/Z, [x19, #0x4, MUL VL]\n"
".inst 0xc1681768 // sdot za.s[x8, 0], { z27.h-z28.h }, z8.h\n"
".inst 0xc1601769 // sdot za.s[x8, 1], { z27.h-z28.h }, z0.h\n"
".inst 0xc1651788 // sdot za.s[x8, 0], { z28.h-z29.h }, z5.h\n"
@@ -1135,20 +1135,20 @@ void sme2_u8s8u8q_planar_5x5_s1_4rows_dot_za_impl(
".inst 0xc1a9ab0c // add { z12.s-z15.s }, { z12.s-z15.s }, z9.s\n"
".inst 0xc00408c1 // mova za.d[x8, #1], { z6.d-z7.d }\n"
".inst 0xc1bfcf0c // sclamp { z12.s-z15.s }, z24.s, z31.s\n"
- "st1b { z12.s }, p1, [x14]\n"
- "add x14, x14, x3\n"
- "st1b { z14.s }, p1, [x13]\n"
+ "st1b { z12.s }, p1, [x13]\n"
"add x13, x13, x10\n"
- "st1b { z13.s }, p1, [x9]\n"
- "add x9, x9, x27\n"
- "st1b { z15.s }, p1, [x28]\n"
+ "st1b { z14.s }, p1, [x4]\n"
+ "add x4, x4, x9\n"
+ "st1b { z13.s }, p1, [x28]\n"
"add x28, x28, x26\n"
+ "st1b { z15.s }, p1, [x27]\n"
+ "add x27, x27, x25\n"
"22:" // Main loop skip tail
- "cbz x15, 24f\n"
+ "cbz x14, 24f\n"
"23:" // Right padding loop
".inst 0xc006680c // mova { z12.d-z13.d }, za.d[x11, #0]\n"
"add x8, x8, #0x2\n"
- "subs x15, x15, #0x1\n"
+ "subs x14, x14, #0x1\n"
".inst 0xc006682e // mova { z14.d-z15.d }, za.d[x11, #1]\n"
".inst 0xc1a3ac0c // sqdmulh { z12.s-z15.s }, { z12.s-z15.s }, z3.s\n"
"add x11, x11, #0x2\n"
@@ -1157,44 +1157,44 @@ void sme2_u8s8u8q_planar_5x5_s1_4rows_dot_za_impl(
".inst 0xc1a9ab0c // add { z12.s-z15.s }, { z12.s-z15.s }, z9.s\n"
".inst 0xc00408c1 // mova za.d[x8, #1], { z6.d-z7.d }\n"
".inst 0xc1bfcf0c // sclamp { z12.s-z15.s }, z24.s, z31.s\n"
- "st1b { z12.s }, p1, [x14]\n"
- "add x14, x14, x3\n"
- "st1b { z14.s }, p1, [x13]\n"
+ "st1b { z12.s }, p1, [x13]\n"
"add x13, x13, x10\n"
- "st1b { z13.s }, p1, [x9]\n"
- "add x9, x9, x27\n"
- "st1b { z15.s }, p1, [x28]\n"
+ "st1b { z14.s }, p1, [x4]\n"
+ "add x4, x4, x9\n"
+ "st1b { z13.s }, p1, [x28]\n"
"add x28, x28, x26\n"
+ "st1b { z15.s }, p1, [x27]\n"
+ "add x27, x27, x25\n"
"bgt 23b\n"
"24:" // End
"ldr x23, [%x[args], %[offsetof_Args_weights]]\n"
"incw x23, ALL, MUL #16\n"
"incw x23, ALL, MUL #9\n"
"str x23, [%x[args], %[offsetof_Args_weights]]\n"
- "ldr x20, [%x[args], %[offsetof_Args_ld_in_vl]]\n"
- "incw x5\n"
- "whilelt p1.s, x5, x7\n"
- "ldr x16, [%x[args], %[offsetof_Args_inptr]]\n"
- "add x16, x16, x20\n"
- "str x16, [%x[args], %[offsetof_Args_inptr]]\n"
- "ldr x25, [%x[args], %[offsetof_Args_outptrs]]\n"
- "ldr x24, [%x[args], %[offsetof_Args_ld_out_vls]]\n"
- "ldp x23, x22, [x25, #0x0]\n"
- "ldp x21, x20, [x24, #0x0]\n"
- "add x23, x23, x21\n"
+ "ldr x19, [%x[args], %[offsetof_Args_ld_in_vl]]\n"
+ "incw x17\n"
+ "whilelt p1.s, x17, x7\n"
+ "ldr x15, [%x[args], %[offsetof_Args_inptr]]\n"
+ "add x15, x15, x19\n"
+ "str x15, [%x[args], %[offsetof_Args_inptr]]\n"
+ "ldr x24, [%x[args], %[offsetof_Args_outptrs]]\n"
+ "ldr x23, [%x[args], %[offsetof_Args_ld_out_vls]]\n"
+ "ldp x22, x21, [x24, #0x0]\n"
+ "ldp x20, x19, [x23, #0x0]\n"
"add x22, x22, x20\n"
- "stp x23, x22, [x25, #0x0]\n"
- "ldp x23, x22, [x25, #0x10]\n"
- "ldp x21, x20, [x24, #0x10]\n"
- "add x23, x23, x21\n"
+ "add x21, x21, x19\n"
+ "stp x22, x21, [x24, #0x0]\n"
+ "ldp x22, x21, [x24, #0x10]\n"
+ "ldp x20, x19, [x23, #0x10]\n"
"add x22, x22, x20\n"
- "stp x23, x22, [x25, #0x10]\n"
+ "add x21, x21, x19\n"
+ "stp x22, x21, [x24, #0x10]\n"
"b.any 1b\n"
"addvl SP, SP, #30\n"
".inst 0xd503467f // SMSTOP\n"
:
: [args] "r" (&args), [ld_in_col] "r" (ld_in_col), [ld_in_row] "r" (ld_in_row), [offsetof_Args_current_channel] "I" (offsetof(Args, current_channel)), [offsetof_Args_inptr] "I" (offsetof(Args, inptr)), [offsetof_Args_input_cols] "I" (offsetof(Args, input_cols)), [offsetof_Args_ld_in_vl] "I" (offsetof(Args, ld_in_vl)), [offsetof_Args_ld_out_cols] "I" (offsetof(Args, ld_out_cols)), [offsetof_Args_ld_out_vls] "I" (offsetof(Args, ld_out_vls)), [offsetof_Args_n_channels] "I" (offsetof(Args, n_channels)), [offsetof_Args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_Args_output_cols] "I" (offsetof(Args, output_cols)), [offsetof_Args_pad_bottom] "I" (offsetof(Args, pad_bottom)), [offsetof_Args_pad_left] "I" (offsetof(Args, pad_left)), [offsetof_Args_pad_top] "I" (offsetof(Args, pad_top)), [offsetof_Args_weights] "I" (offsetof(Args, weights)), [offsetof_Requantize32_a_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_bias] "I" (offsetof(arm_gemm::Requantize32, bias)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [offsetof_Requantize32_per_channel_muls] "I" (offsetof(arm_gemm::Requantize32, per_channel_muls)), [offsetof_Requantize32_per_channel_right_shifts] "I" (offsetof(arm_gemm::Requantize32, per_channel_right_shifts)), [offsetof_Requantize32_per_layer_mul] "I" (offsetof(arm_gemm::Requantize32, per_layer_mul)), [offsetof_Requantize32_per_layer_right_shift] "I" (offsetof(arm_gemm::Requantize32, per_layer_right_shift)), [qp] "r" (&qp)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8s8u8q_planar_5x5_s2_4rows_dot_za/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8s8u8q_planar_5x5_s2_4rows_dot_za/generic.cpp
index 33bb4eb8ec..8cdc94d0e9 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8s8u8q_planar_5x5_s2_4rows_dot_za/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8s8u8q_planar_5x5_s2_4rows_dot_za/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -69,20 +69,20 @@ void sme2_u8s8u8q_planar_5x5_s2_4rows_dot_za_impl(
__asm__ __volatile__(
".inst 0xd503477f // SMSTART ZA\n"
- "ldr x3, [%x[args], %[offsetof_Args_pad_bottom]]\n"
+ "ldr x4, [%x[args], %[offsetof_Args_pad_bottom]]\n"
"ptrue p2.b\n"
- "mov x20, #0xb\n"
- "ldr x4, [%x[args], %[offsetof_Args_pad_top]]\n"
+ "mov x19, #0xb\n"
+ "ldr x5, [%x[args], %[offsetof_Args_pad_top]]\n"
"ld1rh { z9.h }, p2/Z, [%x[qp], %[offsetof_Requantize32_a_offset]]\n"
- "sub x20, x20, x3\n"
+ "sub x19, x19, x4\n"
".inst 0x25207812 // ptrue pn10.b\n"
- "ldr x5, [%x[args], %[offsetof_Args_n_channels]]\n"
- "whilelt p1.s, XZR, x5\n"
- "whilelt p9.s, XZR, x20\n"
+ "ldr x6, [%x[args], %[offsetof_Args_n_channels]]\n"
+ "whilelt p1.s, XZR, x6\n"
+ "whilelt p9.s, XZR, x19\n"
"ld1rw { z8.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_c_offset]]\n"
- "whilelt p8.s, XZR, x4\n"
+ "whilelt p8.s, XZR, x5\n"
"addvl SP, SP, #-15\n"
- "ldr x6, [%x[args], %[offsetof_Args_current_channel]]\n"
+ "ldr x7, [%x[args], %[offsetof_Args_current_channel]]\n"
"neg z9.h, p2/M, z9.h\n"
"eor p8.b, p2/Z, p8.b, p9.b\n"
"ld1rw { z3.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_per_layer_mul]]\n"
@@ -90,227 +90,310 @@ void sme2_u8s8u8q_planar_5x5_s2_4rows_dot_za_impl(
"ld1rw { z26.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_minval]]\n"
"ld1rw { z23.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_maxval]]\n"
"1:" // Channel loop
- "ldr x20, [%x[qp], %[offsetof_Requantize32_bias]]\n"
+ "ldr x19, [%x[qp], %[offsetof_Requantize32_bias]]\n"
"mov z28.s, #0x0\n"
- "cbz x20, 2f\n"
- "ld1w { z28.s }, p1/Z, [x20, x6, LSL #2]\n"
+ "cbz x19, 2f\n"
+ "ld1w { z28.s }, p1/Z, [x19, x7, LSL #2]\n"
"2:" // Load bias: Done
- "ldr x22, [%x[args], %[offsetof_Args_weights]]\n"
- "mov x20, x22\n"
- "ld1sb { z12.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #5\n"
+ "ldr x21, [%x[args], %[offsetof_Args_weights]]\n"
+ "mov x19, x21\n"
+ "ld1sb { z12.s }, p2/Z, [x19]\n"
+ "incw x19, ALL, MUL #5\n"
"ld1rh { z18.h }, p2/Z, [%x[qp], %[offsetof_Requantize32_b_offset]]\n"
"sub z12.h, z12.h, z18.h\n"
- "incw x22\n"
+ "incw x21\n"
"mov z14.h, #0x0\n"
- "ld1sb { z25.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #5\n"
+ "ld1sb { z25.s }, p2/Z, [x19]\n"
+ "incw x19, ALL, MUL #5\n"
"sub z25.h, z25.h, z18.h\n"
"trn1 z2.h, z12.h, z25.h\n"
- "ld1sb { z24.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #5\n"
+ "ld1sb { z24.s }, p2/Z, [x19]\n"
+ "incw x19, ALL, MUL #5\n"
"sub z24.h, z24.h, z18.h\n"
- "addvl x21, SP, #15\n"
- "ld1sb { z17.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #5\n"
+ "addvl x20, SP, #15\n"
+ "ld1sb { z17.s }, p2/Z, [x19]\n"
+ "incw x19, ALL, MUL #5\n"
"sub z17.h, z17.h, z18.h\n"
"trn1 z10.h, z24.h, z17.h\n"
- "ld1sb { z16.s }, p2/Z, [x20]\n"
- "mov x20, x22\n"
+ "ld1sb { z16.s }, p2/Z, [x19]\n"
+ "mov x19, x21\n"
"sub z16.h, z16.h, z18.h\n"
- "incw x22\n"
- "ld1sb { z12.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #5\n"
+ "incw x21\n"
+ "ld1sb { z12.s }, p2/Z, [x19]\n"
+ "incw x19, ALL, MUL #5\n"
"sub z12.h, z12.h, z18.h\n"
- "addvl x21, x21, #-3\n"
- "ld1sb { z25.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #5\n"
+ "addvl x20, x20, #-3\n"
+ "ld1sb { z25.s }, p2/Z, [x19]\n"
+ "incw x19, ALL, MUL #5\n"
"sub z25.h, z25.h, z18.h\n"
"trn1 z0.h, z16.h, z14.h\n"
- "ld1sb { z24.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #5\n"
+ "ld1sb { z24.s }, p2/Z, [x19]\n"
+ "incw x19, ALL, MUL #5\n"
"sub z24.h, z24.h, z18.h\n"
- "st1h { z2.h }, p2, [x21]\n"
- "ld1sb { z17.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #5\n"
+ "st1h { z2.h }, p2, [x20]\n"
+ "ld1sb { z17.s }, p2/Z, [x19]\n"
+ "incw x19, ALL, MUL #5\n"
"sub z17.h, z17.h, z18.h\n"
"trn1 z2.h, z12.h, z25.h\n"
- "ld1sb { z16.s }, p2/Z, [x20]\n"
- "mov x20, x22\n"
- "st1h { z10.h }, p2, [x21, #1, MUL VL]\n"
+ "ld1sb { z16.s }, p2/Z, [x19]\n"
+ "mov x19, x21\n"
+ "st1h { z10.h }, p2, [x20, #1, MUL VL]\n"
"sub z16.h, z16.h, z18.h\n"
- "ld1sb { z12.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #5\n"
+ "ld1sb { z12.s }, p2/Z, [x19]\n"
+ "incw x19, ALL, MUL #5\n"
"trn1 z10.h, z24.h, z17.h\n"
"sub z12.h, z12.h, z18.h\n"
- "ld1sb { z25.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #5\n"
+ "ld1sb { z25.s }, p2/Z, [x19]\n"
+ "incw x19, ALL, MUL #5\n"
"sub z25.h, z25.h, z18.h\n"
- "st1h { z0.h }, p2, [x21, #2, MUL VL]\n"
- "ld1sb { z24.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #5\n"
+ "st1h { z0.h }, p2, [x20, #2, MUL VL]\n"
+ "ld1sb { z24.s }, p2/Z, [x19]\n"
+ "incw x19, ALL, MUL #5\n"
"trn1 z0.h, z16.h, z14.h\n"
- "incw x22\n"
- "ld1sb { z17.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #5\n"
+ "incw x21\n"
+ "ld1sb { z17.s }, p2/Z, [x19]\n"
+ "incw x19, ALL, MUL #5\n"
"sub z24.h, z24.h, z18.h\n"
"sub z17.h, z17.h, z18.h\n"
- "ld1sb { z16.s }, p2/Z, [x20]\n"
- "addvl x21, x21, #-3\n"
- "mov x20, x22\n"
- "st1h { z2.h }, p2, [x21]\n"
+ "ld1sb { z16.s }, p2/Z, [x19]\n"
+ "addvl x20, x20, #-3\n"
+ "mov x19, x21\n"
+ "st1h { z2.h }, p2, [x20]\n"
"trn1 z2.h, z12.h, z25.h\n"
- "ld1sb { z12.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #5\n"
+ "ld1sb { z12.s }, p2/Z, [x19]\n"
+ "incw x19, ALL, MUL #5\n"
"sub z16.h, z16.h, z18.h\n"
- "ld1sb { z25.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #5\n"
- "st1h { z10.h }, p2, [x21, #1, MUL VL]\n"
+ "ld1sb { z25.s }, p2/Z, [x19]\n"
+ "incw x19, ALL, MUL #5\n"
+ "st1h { z10.h }, p2, [x20, #1, MUL VL]\n"
"trn1 z10.h, z24.h, z17.h\n"
- "ld1sb { z24.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #5\n"
+ "ld1sb { z24.s }, p2/Z, [x19]\n"
+ "incw x19, ALL, MUL #5\n"
"sub z12.h, z12.h, z18.h\n"
"sub z25.h, z25.h, z18.h\n"
- "ld1sb { z17.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #5\n"
- "st1h { z0.h }, p2, [x21, #2, MUL VL]\n"
+ "ld1sb { z17.s }, p2/Z, [x19]\n"
+ "incw x19, ALL, MUL #5\n"
+ "st1h { z0.h }, p2, [x20, #2, MUL VL]\n"
"trn1 z0.h, z16.h, z14.h\n"
- "ld1sb { z16.s }, p2/Z, [x20]\n"
- "incw x22\n"
+ "ld1sb { z16.s }, p2/Z, [x19]\n"
+ "incw x21\n"
"sub z24.h, z24.h, z18.h\n"
"sub z17.h, z17.h, z18.h\n"
- "addvl x21, x21, #-3\n"
- "mov x20, x22\n"
- "st1h { z2.h }, p2, [x21]\n"
+ "addvl x20, x20, #-3\n"
+ "mov x19, x21\n"
+ "st1h { z2.h }, p2, [x20]\n"
"sub z16.h, z16.h, z18.h\n"
"trn1 z2.h, z12.h, z25.h\n"
- "ld1sb { z12.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #5\n"
- "st1h { z10.h }, p2, [x21, #1, MUL VL]\n"
- "ld1sb { z25.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #5\n"
+ "ld1sb { z12.s }, p2/Z, [x19]\n"
+ "incw x19, ALL, MUL #5\n"
+ "st1h { z10.h }, p2, [x20, #1, MUL VL]\n"
+ "ld1sb { z25.s }, p2/Z, [x19]\n"
+ "incw x19, ALL, MUL #5\n"
"trn1 z10.h, z24.h, z17.h\n"
- "st1h { z0.h }, p2, [x21, #2, MUL VL]\n"
- "ld1sb { z24.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #5\n"
+ "st1h { z0.h }, p2, [x20, #2, MUL VL]\n"
+ "ld1sb { z24.s }, p2/Z, [x19]\n"
+ "incw x19, ALL, MUL #5\n"
"trn1 z0.h, z16.h, z14.h\n"
"sub z12.h, z12.h, z18.h\n"
- "ld1sb { z17.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #5\n"
+ "ld1sb { z17.s }, p2/Z, [x19]\n"
+ "incw x19, ALL, MUL #5\n"
"sub z25.h, z25.h, z18.h\n"
"sub z24.h, z24.h, z18.h\n"
- "ld1sb { z16.s }, p2/Z, [x20]\n"
+ "ld1sb { z16.s }, p2/Z, [x19]\n"
"sub z17.h, z17.h, z18.h\n"
"sub z16.h, z16.h, z18.h\n"
- "ldr x20, [%x[qp], %[offsetof_Requantize32_per_channel_muls]]\n"
- "addvl x21, x21, #-3\n"
- "st1h { z2.h }, p2, [x21]\n"
+ "ldr x19, [%x[qp], %[offsetof_Requantize32_per_channel_muls]]\n"
+ "addvl x20, x20, #-3\n"
+ "st1h { z2.h }, p2, [x20]\n"
"mov z29.d, z28.d\n"
"mov z30.d, z28.d\n"
- "st1h { z10.h }, p2, [x21, #1, MUL VL]\n"
+ "st1h { z10.h }, p2, [x20, #1, MUL VL]\n"
"mov z31.d, z28.d\n"
"trn1 z2.h, z12.h, z25.h\n"
- "st1h { z0.h }, p2, [x21, #2, MUL VL]\n"
- "addvl x21, x21, #-3\n"
+ "st1h { z0.h }, p2, [x20, #2, MUL VL]\n"
+ "addvl x20, x20, #-3\n"
"trn1 z10.h, z24.h, z17.h\n"
"trn1 z0.h, z16.h, z14.h\n"
- "st1h { z2.h }, p2, [x21]\n"
- "st1h { z10.h }, p2, [x21, #1, MUL VL]\n"
- "st1h { z0.h }, p2, [x21, #2, MUL VL]\n"
- "cbz x20, 3f\n"
- "ld1w { z3.s }, p1/Z, [x20, x6, LSL #2]\n"
+ "st1h { z2.h }, p2, [x20]\n"
+ "st1h { z10.h }, p2, [x20, #1, MUL VL]\n"
+ "st1h { z0.h }, p2, [x20, #2, MUL VL]\n"
+ "cbz x19, 3f\n"
+ "ld1w { z3.s }, p1/Z, [x19, x7, LSL #2]\n"
"3:" // Load mul: End
- "ldr x20, [%x[qp], %[offsetof_Requantize32_per_channel_right_shifts]]\n"
- "cbz x20, 4f\n"
- "ld1w { z1.s }, p1/Z, [x20, x6, LSL #2]\n"
+ "ldr x19, [%x[qp], %[offsetof_Requantize32_per_channel_right_shifts]]\n"
+ "cbz x19, 4f\n"
+ "ld1w { z1.s }, p1/Z, [x19, x7, LSL #2]\n"
"4:" // Load right_shift: End
- "ldr x7, [%x[args], %[offsetof_Args_input_cols]]\n"
- "sub x20, x7, #0x1\n"
- "orr x23, x20, %x[ld_in_col], LSL #16\n"
- "ldr x17, [%x[args], %[offsetof_Args_inptr]]\n"
- "orr x23, x5, x23, LSL #22\n"
- "mov x22, #0xb\n"
- "add x21, x4, x3\n"
- "lsl x20, %x[ld_in_row], #0x0\n"
- "ldr x16, [%x[args], %[offsetof_Args_output_cols]]\n"
+ "ldr x17, [%x[args], %[offsetof_Args_input_cols]]\n"
+ "sub x19, x17, #0x1\n"
+ "orr x22, x19, %x[ld_in_col], LSL #16\n"
+ "ldr x16, [%x[args], %[offsetof_Args_inptr]]\n"
+ "orr x22, x6, x22, LSL #22\n"
+ "mov x21, #0xb\n"
+ "add x20, x5, x4\n"
+ "lsl x19, %x[ld_in_row], #0x0\n"
+ "ldr x15, [%x[args], %[offsetof_Args_output_cols]]\n"
"mov x8, #0x0\n"
- "lsl x23, x23, #0x0\n"
- "sub x22, x22, x21\n"
- "madd x20, x20, x4, x17\n"
+ "lsl x22, x22, #0x0\n"
+ "sub x21, x21, x20\n"
+ "madd x19, x19, x5, x16\n"
"5:" // Issue prefetches
- "subs x22, x22, #0x1\n"
- ".inst 0xf8b74a9c // rprfm pldstrm, x23, [x20]\n"
- "add x20, x20, %x[ld_in_col]\n"
+ "subs x21, x21, #0x1\n"
+ ".inst 0xf8b64a7c // rprfm pldstrm, x22, [x19]\n"
+ "add x19, x19, %x[ld_in_col]\n"
"bgt 5b\n"
- "ldr x25, [%x[args], %[offsetof_Args_outptrs]]\n"
- "lsl x20, %x[ld_in_row], #0x0\n"
- "msub x17, x4, x20, x17\n"
+ "ldr x24, [%x[args], %[offsetof_Args_outptrs]]\n"
+ "lsl x19, %x[ld_in_row], #0x0\n"
+ "msub x16, x5, x19, x16\n"
".inst 0xc0040f80 // mova za.d[x8, #0], { z28.d-z31.d }\n"
- "ldr x20, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
+ "ldr x19, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
".inst 0xc0040f81 // mova za.d[x8, #1], { z28.d-z31.d }\n"
- "mov x22, #0x4\n"
- "ldp x15, x14, [x25], #0x10\n"
+ "mov x21, #0x4\n"
+ "ldp x14, x13, [x24], #0x10\n"
".inst 0xc0040f82 // mova za.d[x8, #2], { z28.d-z31.d }\n"
- "ldp x13, x11, [x20], #0x10\n"
+ "ldp x11, x10, [x19], #0x10\n"
".inst 0xc0040f83 // mova za.d[x8, #3], { z28.d-z31.d }\n"
- "ldr x21, [%x[args], %[offsetof_Args_pad_left]]\n"
+ "ldr x20, [%x[args], %[offsetof_Args_pad_left]]\n"
".inst 0xc0040f84 // mova za.d[x8, #4], { z28.d-z31.d }\n"
- "ldp x10, x9, [x25], #0x10\n"
- "ldp x28, x27, [x20], #0x10\n"
- "cbz x21, 7f\n"
- "cmp x21, x22\n"
- "csel x20, x21, x22, LT\n"
- "sub x21, x21, x20\n"
- "sub x22, x22, x20\n"
- "cbz x21, 7f\n"
+ "ldp x9, x28, [x24], #0x10\n"
+ "ldp x27, x26, [x19], #0x10\n"
+ "cbz x20, 7f\n"
+ "cmp x20, x21\n"
+ "csel x19, x20, x21, LT\n"
+ "sub x20, x20, x19\n"
+ "sub x21, x21, x19\n"
+ "cbz x20, 7f\n"
".inst 0xc0060c04 // mova { z4.d-z7.d }, za.d[x8, #0]\n"
".inst 0xc1a3ac04 // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z3.s\n"
- "and x22, x21, #0x1\n"
+ "and x21, x20, #0x1\n"
".inst 0xc1a1aa24 // srshl { z4.s-z7.s }, { z4.s-z7.s }, z1.s\n"
- "add x21, x21, #0x1\n"
- "lsr x21, x21, #0x1\n"
+ "add x20, x20, #0x1\n"
+ "lsr x20, x20, #0x1\n"
".inst 0xc1a8ab04 // add { z4.s-z7.s }, { z4.s-z7.s }, z8.s\n"
- "sub x16, x16, x21\n"
+ "sub x15, x15, x20\n"
".inst 0xc1b7cf44 // sclamp { z4.s-z7.s }, z26.s, z23.s\n"
"6:" // Left padding
- "subs x21, x21, #0x1\n"
- "st1b { z4.s }, p1, [x15]\n"
- "add x15, x15, x13\n"
- "st1b { z5.s }, p1, [x14]\n"
+ "subs x20, x20, #0x1\n"
+ "st1b { z4.s }, p1, [x14]\n"
"add x14, x14, x11\n"
- "st1b { z6.s }, p1, [x10]\n"
- "add x10, x10, x28\n"
- "st1b { z7.s }, p1, [x9]\n"
+ "st1b { z5.s }, p1, [x13]\n"
+ "add x13, x13, x10\n"
+ "st1b { z6.s }, p1, [x9]\n"
"add x9, x9, x27\n"
+ "st1b { z7.s }, p1, [x28]\n"
+ "add x28, x28, x26\n"
"bgt 6b\n"
"7:" // Left padding: End
- "adds XZR, x4, x3\n"
+ "adds XZR, x5, x4\n"
"bne 14f\n"
- "cbz x22, 12f\n"
- "cmp x22, #0x1\n"
- "sub x7, x7, x22\n"
+ "cbz x21, 12f\n"
+ "cmp x21, #0x1\n"
+ "sub x17, x17, x21\n"
"beq 11f\n"
- "cmp x22, #0x2\n"
+ "cmp x21, #0x2\n"
"beq 10f\n"
- "cmp x22, #0x3\n"
+ "cmp x21, #0x3\n"
"beq 9f\n"
"8:" // Unpadded: 4 priming loads
- "add x21, x17, %x[ld_in_row]\n"
- "ld1b { z11.s }, p1/Z, [x17]\n"
- "addvl x20, SP, #12\n"
+ "add x20, x16, %x[ld_in_row]\n"
+ "ld1b { z11.s }, p1/Z, [x16]\n"
+ "addvl x19, SP, #12\n"
+ "ld1b { z21.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "trn1 z11.h, z11.h, z21.h\n"
+ "add z11.h, z11.h, z9.h\n"
+ "ld1b { z12.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "add x16, x16, %x[ld_in_col]\n"
+ "ld1b { z20.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "trn1 z12.h, z12.h, z20.h\n"
+ "add z12.h, z12.h, z9.h\n"
+ "ld1b { z13.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z19.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "trn1 z13.h, z13.h, z19.h\n"
+ "add z13.h, z13.h, z9.h\n"
+ "ld1b { z14.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z18.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "trn1 z14.h, z14.h, z18.h\n"
+ "add z14.h, z14.h, z9.h\n"
+ "ld1b { z15.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z17.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "trn1 z15.h, z15.h, z17.h\n"
+ "add z15.h, z15.h, z9.h\n"
+ ".inst 0xa1402a62 // ld1h { z2.h, z10.h }, pn10.b/Z, [x19]\n"
+ ".inst 0xc1721568 // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
+ "ld1b { z16.s }, p1/Z, [x20]\n"
+ "mov z16.d, z16.d\n"
+ "add z16.h, z16.h, z9.h\n"
+ ".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
+ "ld1h { z0.h }, p2/Z, [x19, #2, MUL VL]\n"
+ ".inst 0xc17015a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
+ "9:" // Unpadded: 3 priming loads
+ "add x20, x16, %x[ld_in_row]\n"
+ "ld1b { z11.s }, p1/Z, [x16]\n"
+ "addvl x19, SP, #9\n"
+ "ld1b { z21.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "trn1 z11.h, z11.h, z21.h\n"
+ "add z11.h, z11.h, z9.h\n"
+ "ld1b { z12.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "add x16, x16, %x[ld_in_col]\n"
+ "ld1b { z20.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "trn1 z12.h, z12.h, z20.h\n"
+ "add z12.h, z12.h, z9.h\n"
+ "ld1b { z13.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z19.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "trn1 z13.h, z13.h, z19.h\n"
+ "add z13.h, z13.h, z9.h\n"
+ "ld1b { z14.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z18.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "trn1 z14.h, z14.h, z18.h\n"
+ "add z14.h, z14.h, z9.h\n"
+ "ld1b { z15.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z17.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "trn1 z15.h, z15.h, z17.h\n"
+ "add z15.h, z15.h, z9.h\n"
+ ".inst 0xa1402a62 // ld1h { z2.h, z10.h }, pn10.b/Z, [x19]\n"
+ ".inst 0xc1721568 // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
+ "ld1b { z16.s }, p1/Z, [x20]\n"
+ "mov z16.d, z16.d\n"
+ "add z16.h, z16.h, z9.h\n"
+ ".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
+ "ld1h { z0.h }, p2/Z, [x19, #2, MUL VL]\n"
+ ".inst 0xc17015a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
+ "10:" // Unpadded: 2 priming loads
+ "add x21, x16, %x[ld_in_row]\n"
+ "ld1b { z11.s }, p1/Z, [x16]\n"
+ "addvl x20, SP, #6\n"
"ld1b { z21.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
"trn1 z11.h, z11.h, z21.h\n"
"add z11.h, z11.h, z9.h\n"
"ld1b { z12.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "add x17, x17, %x[ld_in_col]\n"
+ "addvl x19, SP, #12\n"
"ld1b { z20.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
"trn1 z12.h, z12.h, z20.h\n"
"add z12.h, z12.h, z9.h\n"
"ld1b { z13.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
+ "add x16, x16, %x[ld_in_col]\n"
"ld1b { z19.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
"trn1 z13.h, z13.h, z19.h\n"
@@ -324,34 +407,40 @@ void sme2_u8s8u8q_planar_5x5_s2_4rows_dot_za_impl(
"ld1b { z15.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
"ld1b { z17.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
"trn1 z15.h, z15.h, z17.h\n"
+ "add x21, x21, %x[ld_in_row]\n"
"add z15.h, z15.h, z9.h\n"
".inst 0xa1402a82 // ld1h { z2.h, z10.h }, pn10.b/Z, [x20]\n"
".inst 0xc1721568 // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
"ld1b { z16.s }, p1/Z, [x21]\n"
"mov z16.d, z16.d\n"
- "add z16.h, z16.h, z9.h\n"
".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
+ ".inst 0xa1402a62 // ld1h { z2.h, z10.h }, pn10.b/Z, [x19]\n"
+ ".inst 0xc1721569 // sdot za.s[x8, 1], { z11.h-z14.h }, z2.h\n"
+ "add z16.h, z16.h, z9.h\n"
"ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
+ ".inst 0xc17a1589 // sdot za.s[x8, 1], { z12.h-z15.h }, z10.h\n"
".inst 0xc17015a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
- "9:" // Unpadded: 3 priming loads
- "add x21, x17, %x[ld_in_row]\n"
- "ld1b { z11.s }, p1/Z, [x17]\n"
- "addvl x20, SP, #9\n"
+ "ld1h { z0.h }, p2/Z, [x19, #2, MUL VL]\n"
+ ".inst 0xc17015a9 // sdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
+ "11:" // Unpadded: 1 priming loads
+ "add x21, x16, %x[ld_in_row]\n"
+ "ld1b { z11.s }, p1/Z, [x16]\n"
+ "addvl x20, SP, #3\n"
"ld1b { z21.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
"trn1 z11.h, z11.h, z21.h\n"
"add z11.h, z11.h, z9.h\n"
"ld1b { z12.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "add x17, x17, %x[ld_in_col]\n"
+ "addvl x19, SP, #12\n"
"ld1b { z20.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
"trn1 z12.h, z12.h, z20.h\n"
"add z12.h, z12.h, z9.h\n"
"ld1b { z13.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
+ "add x16, x16, %x[ld_in_col]\n"
"ld1b { z19.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
"trn1 z13.h, z13.h, z19.h\n"
@@ -365,100 +454,127 @@ void sme2_u8s8u8q_planar_5x5_s2_4rows_dot_za_impl(
"ld1b { z15.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
"ld1b { z17.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
"trn1 z15.h, z15.h, z17.h\n"
+ "add x21, x21, %x[ld_in_row]\n"
"add z15.h, z15.h, z9.h\n"
".inst 0xa1402a82 // ld1h { z2.h, z10.h }, pn10.b/Z, [x20]\n"
".inst 0xc1721568 // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
"ld1b { z16.s }, p1/Z, [x21]\n"
"mov z16.d, z16.d\n"
- "add z16.h, z16.h, z9.h\n"
".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
+ ".inst 0xa1402a62 // ld1h { z2.h, z10.h }, pn10.b/Z, [x19]\n"
+ ".inst 0xc1721569 // sdot za.s[x8, 1], { z11.h-z14.h }, z2.h\n"
+ "add z16.h, z16.h, z9.h\n"
"ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
+ ".inst 0xc17a1589 // sdot za.s[x8, 1], { z12.h-z15.h }, z10.h\n"
".inst 0xc17015a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
- "10:" // Unpadded: 2 priming loads
- "add x22, x17, %x[ld_in_row]\n"
- "ld1b { z11.s }, p1/Z, [x17]\n"
- "addvl x21, SP, #6\n"
- "ld1b { z21.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row]\n"
+ "ld1h { z0.h }, p2/Z, [x19, #2, MUL VL]\n"
+ ".inst 0xc17015a9 // sdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
+ "12:" // Unpadded: 0 priming loads
+ "cmp x17, #0x2\n"
+ ".inst 0xa1402be2 // ld1h { z2.h, z10.h }, pn10.b/Z, [SP]\n"
+ "ld1h { z0.h }, p2/Z, [SP, #2, MUL VL]\n"
+ "blt 22f\n"
+ "add x20, x16, %x[ld_in_row]\n"
+ "ld1b { z11.s }, p1/Z, [x16]\n"
+ "sub x17, x17, #0x2\n"
+ "ld1b { z21.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
"trn1 z11.h, z11.h, z21.h\n"
+ "sub x15, x15, #0x1\n"
+ "ld1b { z12.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "lsr x19, x17, #0x1\n"
"add z11.h, z11.h, z9.h\n"
- "ld1b { z12.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row]\n"
- "addvl x20, SP, #12\n"
- "ld1b { z20.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row]\n"
+ "ld1b { z20.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
"trn1 z12.h, z12.h, z20.h\n"
+ "cmp x19, x15\n"
+ "ld1b { z13.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "csel x25, x19, x15, LT\n"
"add z12.h, z12.h, z9.h\n"
- "ld1b { z13.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row]\n"
- "add x17, x17, %x[ld_in_col]\n"
- "ld1b { z19.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row]\n"
+ "ld1b { z19.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
"trn1 z13.h, z13.h, z19.h\n"
"add z13.h, z13.h, z9.h\n"
- "ld1b { z14.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row]\n"
- "ld1b { z18.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row]\n"
+ "ld1b { z14.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "add x16, x16, %x[ld_in_col]\n"
+ "ld1b { z18.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
"trn1 z14.h, z14.h, z18.h\n"
"add z14.h, z14.h, z9.h\n"
- "ld1b { z15.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row]\n"
- "ld1b { z17.s }, p1/Z, [x22]\n"
+ "ld1b { z15.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "and x17, x17, #0x1\n"
+ "ld1b { z17.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
"trn1 z15.h, z15.h, z17.h\n"
- "add x22, x22, %x[ld_in_row]\n"
"add z15.h, z15.h, z9.h\n"
- ".inst 0xa1402aa2 // ld1h { z2.h, z10.h }, pn10.b/Z, [x21]\n"
- ".inst 0xc1721568 // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
- "ld1b { z16.s }, p1/Z, [x22]\n"
+ "ld1b { z16.s }, p1/Z, [x20]\n"
"mov z16.d, z16.d\n"
+ "add z16.h, z16.h, z9.h\n"
+ "sub x15, x15, x25\n"
+ "cbz x25, 21f\n"
+ "13:" // Unpadded: Main loop
+ ".inst 0xc1721568 // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
+ "addvl x24, SP, #6\n"
+ "addvl x23, SP, #12\n"
".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
- ".inst 0xa1402a82 // ld1h { z2.h, z10.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xa1402b02 // ld1h { z2.h, z10.h }, pn10.b/Z, [x24]\n"
+ "add x22, x16, %x[ld_in_row]\n"
+ "addvl x21, SP, #3\n"
".inst 0xc1721569 // sdot za.s[x8, 1], { z11.h-z14.h }, z2.h\n"
- "add z16.h, z16.h, z9.h\n"
- "ld1h { z0.h }, p2/Z, [x21, #2, MUL VL]\n"
+ "addvl x20, SP, #9\n"
+ "subs x25, x25, #0x1\n"
".inst 0xc17a1589 // sdot za.s[x8, 1], { z12.h-z15.h }, z10.h\n"
+ ".inst 0xa1402ae2 // ld1h { z2.h, z10.h }, pn10.b/Z, [x23]\n"
+ ".inst 0xc172156a // sdot za.s[x8, 2], { z11.h-z14.h }, z2.h\n"
+ "ld1b { z11.s }, p1/Z, [x16]\n"
+ "add x16, x16, %x[ld_in_col]\n"
+ "add x19, x16, %x[ld_in_row]\n"
".inst 0xc17015a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
- "ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
- ".inst 0xc17015a9 // sdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
- "11:" // Unpadded: 1 priming loads
- "add x22, x17, %x[ld_in_row]\n"
- "ld1b { z11.s }, p1/Z, [x17]\n"
- "addvl x21, SP, #3\n"
+ "ld1h { z0.h }, p2/Z, [x24, #2, MUL VL]\n"
+ ".inst 0xc17a158a // sdot za.s[x8, 2], { z12.h-z15.h }, z10.h\n"
"ld1b { z21.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
"trn1 z11.h, z11.h, z21.h\n"
+ ".inst 0xc17015a9 // sdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
+ "ld1h { z0.h }, p2/Z, [x23, #2, MUL VL]\n"
"add z11.h, z11.h, z9.h\n"
"ld1b { z12.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
- "addvl x20, SP, #9\n"
+ ".inst 0xc17015aa // sdot za.s[x8, 2], { z13.h-z16.h }, z0.h\n"
"ld1b { z20.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
"trn1 z12.h, z12.h, z20.h\n"
"add z12.h, z12.h, z9.h\n"
"ld1b { z13.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
- "add x17, x17, %x[ld_in_col]\n"
+ ".inst 0xc0060c04 // mova { z4.d-z7.d }, za.d[x8, #0]\n"
+ "add x8, x8, #0x1\n"
"ld1b { z19.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
"trn1 z13.h, z13.h, z19.h\n"
"add z13.h, z13.h, z9.h\n"
"ld1b { z14.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
+ ".inst 0xc1a3ac04 // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z3.s\n"
"ld1b { z18.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
"trn1 z14.h, z14.h, z18.h\n"
"add z14.h, z14.h, z9.h\n"
"ld1b { z15.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
+ ".inst 0xc1a1aa24 // srshl { z4.s-z7.s }, { z4.s-z7.s }, z1.s\n"
"ld1b { z17.s }, p1/Z, [x22]\n"
"trn1 z15.h, z15.h, z17.h\n"
"add x22, x22, %x[ld_in_row]\n"
"add z15.h, z15.h, z9.h\n"
".inst 0xa1402aa2 // ld1h { z2.h, z10.h }, pn10.b/Z, [x21]\n"
".inst 0xc1721568 // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
+ ".inst 0xc1a8ab04 // add { z4.s-z7.s }, { z4.s-z7.s }, z8.s\n"
"ld1b { z16.s }, p1/Z, [x22]\n"
"mov z16.d, z16.d\n"
".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
@@ -467,166 +583,50 @@ void sme2_u8s8u8q_planar_5x5_s2_4rows_dot_za_impl(
"add z16.h, z16.h, z9.h\n"
"ld1h { z0.h }, p2/Z, [x21, #2, MUL VL]\n"
".inst 0xc17a1589 // sdot za.s[x8, 1], { z12.h-z15.h }, z10.h\n"
- ".inst 0xc17015a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
- "ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
- ".inst 0xc17015a9 // sdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
- "12:" // Unpadded: 0 priming loads
- "cmp x7, #0x2\n"
- ".inst 0xa1402be2 // ld1h { z2.h, z10.h }, pn10.b/Z, [SP]\n"
- "ld1h { z0.h }, p2/Z, [SP, #2, MUL VL]\n"
- "blt 22f\n"
- "add x21, x17, %x[ld_in_row]\n"
- "ld1b { z11.s }, p1/Z, [x17]\n"
- "sub x7, x7, #0x2\n"
- "ld1b { z21.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
- "trn1 z11.h, z11.h, z21.h\n"
- "sub x16, x16, #0x1\n"
- "ld1b { z12.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
- "lsr x20, x7, #0x1\n"
- "add z11.h, z11.h, z9.h\n"
- "ld1b { z20.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
- "trn1 z12.h, z12.h, z20.h\n"
- "cmp x20, x16\n"
- "ld1b { z13.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
- "csel x26, x20, x16, LT\n"
- "add z12.h, z12.h, z9.h\n"
- "ld1b { z19.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
- "trn1 z13.h, z13.h, z19.h\n"
- "add z13.h, z13.h, z9.h\n"
- "ld1b { z14.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
- "add x17, x17, %x[ld_in_col]\n"
- "ld1b { z18.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
- "trn1 z14.h, z14.h, z18.h\n"
- "add z14.h, z14.h, z9.h\n"
- "ld1b { z15.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
- "and x7, x7, #0x1\n"
- "ld1b { z17.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
- "trn1 z15.h, z15.h, z17.h\n"
- "add z15.h, z15.h, z9.h\n"
- "ld1b { z16.s }, p1/Z, [x21]\n"
- "mov z16.d, z16.d\n"
- "add z16.h, z16.h, z9.h\n"
- "sub x16, x16, x26\n"
- "cbz x26, 21f\n"
- "13:" // Unpadded: Main loop
- ".inst 0xc1721568 // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
- "addvl x25, SP, #6\n"
- "addvl x24, SP, #12\n"
- ".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
- ".inst 0xa1402b22 // ld1h { z2.h, z10.h }, pn10.b/Z, [x25]\n"
- "add x23, x17, %x[ld_in_row]\n"
- "addvl x22, SP, #3\n"
- ".inst 0xc1721569 // sdot za.s[x8, 1], { z11.h-z14.h }, z2.h\n"
- "addvl x21, SP, #9\n"
- "subs x26, x26, #0x1\n"
- ".inst 0xc17a1589 // sdot za.s[x8, 1], { z12.h-z15.h }, z10.h\n"
- ".inst 0xa1402b02 // ld1h { z2.h, z10.h }, pn10.b/Z, [x24]\n"
- ".inst 0xc172156a // sdot za.s[x8, 2], { z11.h-z14.h }, z2.h\n"
- "ld1b { z11.s }, p1/Z, [x17]\n"
- "add x17, x17, %x[ld_in_col]\n"
- "add x20, x17, %x[ld_in_row]\n"
- ".inst 0xc17015a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
- "ld1h { z0.h }, p2/Z, [x25, #2, MUL VL]\n"
- ".inst 0xc17a158a // sdot za.s[x8, 2], { z12.h-z15.h }, z10.h\n"
- "ld1b { z21.s }, p1/Z, [x23]\n"
- "add x23, x23, %x[ld_in_row]\n"
- "trn1 z11.h, z11.h, z21.h\n"
- ".inst 0xc17015a9 // sdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
- "ld1h { z0.h }, p2/Z, [x24, #2, MUL VL]\n"
- "add z11.h, z11.h, z9.h\n"
- "ld1b { z12.s }, p1/Z, [x23]\n"
- "add x23, x23, %x[ld_in_row]\n"
- ".inst 0xc17015aa // sdot za.s[x8, 2], { z13.h-z16.h }, z0.h\n"
- "ld1b { z20.s }, p1/Z, [x23]\n"
- "add x23, x23, %x[ld_in_row]\n"
- "trn1 z12.h, z12.h, z20.h\n"
- "add z12.h, z12.h, z9.h\n"
- "ld1b { z13.s }, p1/Z, [x23]\n"
- "add x23, x23, %x[ld_in_row]\n"
- ".inst 0xc0060c04 // mova { z4.d-z7.d }, za.d[x8, #0]\n"
- "add x8, x8, #0x1\n"
- "ld1b { z19.s }, p1/Z, [x23]\n"
- "add x23, x23, %x[ld_in_row]\n"
- "trn1 z13.h, z13.h, z19.h\n"
- "add z13.h, z13.h, z9.h\n"
- "ld1b { z14.s }, p1/Z, [x23]\n"
- "add x23, x23, %x[ld_in_row]\n"
- ".inst 0xc1a3ac04 // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z3.s\n"
- "ld1b { z18.s }, p1/Z, [x23]\n"
- "add x23, x23, %x[ld_in_row]\n"
- "trn1 z14.h, z14.h, z18.h\n"
- "add z14.h, z14.h, z9.h\n"
- "ld1b { z15.s }, p1/Z, [x23]\n"
- "add x23, x23, %x[ld_in_row]\n"
- ".inst 0xc1a1aa24 // srshl { z4.s-z7.s }, { z4.s-z7.s }, z1.s\n"
- "ld1b { z17.s }, p1/Z, [x23]\n"
- "trn1 z15.h, z15.h, z17.h\n"
- "add x23, x23, %x[ld_in_row]\n"
- "add z15.h, z15.h, z9.h\n"
- ".inst 0xa1402ac2 // ld1h { z2.h, z10.h }, pn10.b/Z, [x22]\n"
- ".inst 0xc1721568 // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
- ".inst 0xc1a8ab04 // add { z4.s-z7.s }, { z4.s-z7.s }, z8.s\n"
- "ld1b { z16.s }, p1/Z, [x23]\n"
- "mov z16.d, z16.d\n"
- ".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
- ".inst 0xa1402aa2 // ld1h { z2.h, z10.h }, pn10.b/Z, [x21]\n"
- ".inst 0xc1721569 // sdot za.s[x8, 1], { z11.h-z14.h }, z2.h\n"
- "add z16.h, z16.h, z9.h\n"
- "ld1h { z0.h }, p2/Z, [x22, #2, MUL VL]\n"
- ".inst 0xc17a1589 // sdot za.s[x8, 1], { z12.h-z15.h }, z10.h\n"
".inst 0xc1b7cf44 // sclamp { z4.s-z7.s }, z26.s, z23.s\n"
".inst 0xc17015a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
- "ld1h { z0.h }, p2/Z, [x21, #2, MUL VL]\n"
- "st1b { z4.s }, p1, [x15]\n"
- "add x15, x15, x13\n"
- "ld1b { z11.s }, p1/Z, [x17]\n"
- ".inst 0xc17015a9 // sdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
- "st1b { z5.s }, p1, [x14]\n"
+ "ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
+ "st1b { z4.s }, p1, [x14]\n"
"add x14, x14, x11\n"
- "ld1b { z21.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z11.s }, p1/Z, [x16]\n"
+ ".inst 0xc17015a9 // sdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
+ "st1b { z5.s }, p1, [x13]\n"
+ "add x13, x13, x10\n"
+ "ld1b { z21.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"trn1 z11.h, z11.h, z21.h\n"
- "st1b { z6.s }, p1, [x10]\n"
- "ld1b { z12.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
- "add x10, x10, x28\n"
- "st1b { z7.s }, p1, [x9]\n"
- "ld1b { z20.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
- "trn1 z12.h, z12.h, z20.h\n"
+ "st1b { z6.s }, p1, [x9]\n"
+ "ld1b { z12.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"add x9, x9, x27\n"
- "ld1b { z13.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "st1b { z7.s }, p1, [x28]\n"
+ "ld1b { z20.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ "trn1 z12.h, z12.h, z20.h\n"
+ "add x28, x28, x26\n"
+ "ld1b { z13.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0xc0040f84 // mova za.d[x8, #4], { z28.d-z31.d }\n"
"add z11.h, z11.h, z9.h\n"
- "ld1b { z19.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z19.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"trn1 z13.h, z13.h, z19.h\n"
"add z12.h, z12.h, z9.h\n"
- "ld1b { z14.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z14.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"add z13.h, z13.h, z9.h\n"
- "add x17, x17, %x[ld_in_col]\n"
- "ld1b { z18.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x16, x16, %x[ld_in_col]\n"
+ "ld1b { z18.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"trn1 z14.h, z14.h, z18.h\n"
"add z14.h, z14.h, z9.h\n"
- "ld1b { z15.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
- "ld1b { z17.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z15.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ "ld1b { z17.s }, p1/Z, [x19]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"trn1 z15.h, z15.h, z17.h\n"
"add z15.h, z15.h, z9.h\n"
- "ld1b { z16.s }, p1/Z, [x20]\n"
+ "ld1b { z16.s }, p1/Z, [x19]\n"
"mov z16.d, z16.d\n"
"add z16.h, z16.h, z9.h\n"
".inst 0xa1402be2 // ld1h { z2.h, z10.h }, pn10.b/Z, [SP]\n"
@@ -634,717 +634,717 @@ void sme2_u8s8u8q_planar_5x5_s2_4rows_dot_za_impl(
"bgt 13b\n"
"b 21f\n"
"14:" // Padded
- "cbz x22, 19f\n"
- "cmp x22, #0x1\n"
- "sub x7, x7, x22\n"
+ "cbz x21, 19f\n"
+ "cmp x21, #0x1\n"
+ "sub x17, x17, x21\n"
"beq 18f\n"
- "cmp x22, #0x2\n"
+ "cmp x21, #0x2\n"
"beq 17f\n"
- "cmp x22, #0x3\n"
+ "cmp x21, #0x3\n"
"beq 16f\n"
"15:" // Padded: 4 priming loads
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z11.s }, p0/Z, [x17]\n"
+ "ld1b { z11.s }, p0/Z, [x16]\n"
"add z11.h, p0/M, z11.h, z9.h\n"
- "add x21, x17, %x[ld_in_row]\n"
+ "add x20, x16, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z21.s }, p0/Z, [x21]\n"
+ "ld1b { z21.s }, p0/Z, [x20]\n"
"add z21.h, p0/M, z21.h, z9.h\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "add x20, x20, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z12.s }, p0/Z, [x21]\n"
+ "ld1b { z12.s }, p0/Z, [x20]\n"
"add z12.h, p0/M, z12.h, z9.h\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "add x20, x20, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z20.s }, p0/Z, [x21]\n"
+ "ld1b { z20.s }, p0/Z, [x20]\n"
"add z20.h, p0/M, z20.h, z9.h\n"
"mov x12, #0x4\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "add x20, x20, %x[ld_in_row]\n"
"trn1 z11.h, z11.h, z21.h\n"
"trn1 z12.h, z12.h, z20.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z13.s }, p0/Z, [x21]\n"
+ "ld1b { z13.s }, p0/Z, [x20]\n"
"add z13.h, p0/M, z13.h, z9.h\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "add x20, x20, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z19.s }, p0/Z, [x21]\n"
+ "ld1b { z19.s }, p0/Z, [x20]\n"
"add z19.h, p0/M, z19.h, z9.h\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "add x20, x20, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z14.s }, p0/Z, [x21]\n"
+ "ld1b { z14.s }, p0/Z, [x20]\n"
"add z14.h, p0/M, z14.h, z9.h\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "add x20, x20, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z18.s }, p0/Z, [x21]\n"
+ "ld1b { z18.s }, p0/Z, [x20]\n"
"mov x12, #0x8\n"
"add z18.h, p0/M, z18.h, z9.h\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "add x20, x20, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z15.s }, p0/Z, [x21]\n"
+ "ld1b { z15.s }, p0/Z, [x20]\n"
"add z15.h, p0/M, z15.h, z9.h\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "add x20, x20, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z17.s }, p0/Z, [x21]\n"
+ "ld1b { z17.s }, p0/Z, [x20]\n"
"add z17.h, p0/M, z17.h, z9.h\n"
- "addvl x20, SP, #12\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "addvl x19, SP, #12\n"
+ "add x20, x20, %x[ld_in_row]\n"
"trn1 z13.h, z13.h, z19.h\n"
"trn1 z14.h, z14.h, z18.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- ".inst 0xa1402a82 // ld1h { z2.h, z10.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xa1402a62 // ld1h { z2.h, z10.h }, pn10.b/Z, [x19]\n"
"trn1 z15.h, z15.h, z17.h\n"
".inst 0xc1721568 // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
- "ld1b { z16.s }, p0/Z, [x21]\n"
+ "ld1b { z16.s }, p0/Z, [x20]\n"
"add z16.h, p0/M, z16.h, z9.h\n"
"mov z16.d, z16.d\n"
- "add x17, x17, %x[ld_in_col]\n"
+ "add x16, x16, %x[ld_in_col]\n"
".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
- "ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
+ "ld1h { z0.h }, p2/Z, [x19, #2, MUL VL]\n"
".inst 0xc17015a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
"16:" // Padded: 3 priming loads
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z11.s }, p0/Z, [x17]\n"
+ "ld1b { z11.s }, p0/Z, [x16]\n"
"add z11.h, p0/M, z11.h, z9.h\n"
- "add x21, x17, %x[ld_in_row]\n"
+ "add x20, x16, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z21.s }, p0/Z, [x21]\n"
+ "ld1b { z21.s }, p0/Z, [x20]\n"
"add z21.h, p0/M, z21.h, z9.h\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "add x20, x20, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z12.s }, p0/Z, [x21]\n"
+ "ld1b { z12.s }, p0/Z, [x20]\n"
"add z12.h, p0/M, z12.h, z9.h\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "add x20, x20, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z20.s }, p0/Z, [x21]\n"
+ "ld1b { z20.s }, p0/Z, [x20]\n"
"add z20.h, p0/M, z20.h, z9.h\n"
"mov x12, #0x4\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "add x20, x20, %x[ld_in_row]\n"
"trn1 z11.h, z11.h, z21.h\n"
"trn1 z12.h, z12.h, z20.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z13.s }, p0/Z, [x21]\n"
+ "ld1b { z13.s }, p0/Z, [x20]\n"
"add z13.h, p0/M, z13.h, z9.h\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "add x20, x20, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z19.s }, p0/Z, [x21]\n"
+ "ld1b { z19.s }, p0/Z, [x20]\n"
"add z19.h, p0/M, z19.h, z9.h\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "add x20, x20, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z14.s }, p0/Z, [x21]\n"
+ "ld1b { z14.s }, p0/Z, [x20]\n"
"add z14.h, p0/M, z14.h, z9.h\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "add x20, x20, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z18.s }, p0/Z, [x21]\n"
+ "ld1b { z18.s }, p0/Z, [x20]\n"
"mov x12, #0x8\n"
"add z18.h, p0/M, z18.h, z9.h\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "add x20, x20, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z15.s }, p0/Z, [x21]\n"
+ "ld1b { z15.s }, p0/Z, [x20]\n"
"add z15.h, p0/M, z15.h, z9.h\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "add x20, x20, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z17.s }, p0/Z, [x21]\n"
+ "ld1b { z17.s }, p0/Z, [x20]\n"
"add z17.h, p0/M, z17.h, z9.h\n"
- "addvl x20, SP, #9\n"
- "add x21, x21, %x[ld_in_row]\n"
+ "addvl x19, SP, #9\n"
+ "add x20, x20, %x[ld_in_row]\n"
"trn1 z13.h, z13.h, z19.h\n"
"trn1 z14.h, z14.h, z18.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- ".inst 0xa1402a82 // ld1h { z2.h, z10.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xa1402a62 // ld1h { z2.h, z10.h }, pn10.b/Z, [x19]\n"
"trn1 z15.h, z15.h, z17.h\n"
".inst 0xc1721568 // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
- "ld1b { z16.s }, p0/Z, [x21]\n"
+ "ld1b { z16.s }, p0/Z, [x20]\n"
"add z16.h, p0/M, z16.h, z9.h\n"
"mov z16.d, z16.d\n"
- "add x17, x17, %x[ld_in_col]\n"
+ "add x16, x16, %x[ld_in_col]\n"
".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
- "ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
+ "ld1h { z0.h }, p2/Z, [x19, #2, MUL VL]\n"
".inst 0xc17015a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
"17:" // Padded: 2 priming loads
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z11.s }, p0/Z, [x17]\n"
+ "ld1b { z11.s }, p0/Z, [x16]\n"
"add z11.h, p0/M, z11.h, z9.h\n"
- "add x20, x17, %x[ld_in_row]\n"
+ "add x19, x16, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z21.s }, p0/Z, [x20]\n"
+ "ld1b { z21.s }, p0/Z, [x19]\n"
"add z21.h, p0/M, z21.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z12.s }, p0/Z, [x20]\n"
+ "ld1b { z12.s }, p0/Z, [x19]\n"
"add z12.h, p0/M, z12.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z20.s }, p0/Z, [x20]\n"
+ "ld1b { z20.s }, p0/Z, [x19]\n"
"add z20.h, p0/M, z20.h, z9.h\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"trn1 z11.h, z11.h, z21.h\n"
"trn1 z12.h, z12.h, z20.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z13.s }, p0/Z, [x20]\n"
+ "ld1b { z13.s }, p0/Z, [x19]\n"
"add z13.h, p0/M, z13.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z19.s }, p0/Z, [x20]\n"
+ "ld1b { z19.s }, p0/Z, [x19]\n"
"add z19.h, p0/M, z19.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z14.s }, p0/Z, [x20]\n"
+ "ld1b { z14.s }, p0/Z, [x19]\n"
"add z14.h, p0/M, z14.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z18.s }, p0/Z, [x20]\n"
+ "ld1b { z18.s }, p0/Z, [x19]\n"
"mov x12, #0x8\n"
"add z18.h, p0/M, z18.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z15.s }, p0/Z, [x20]\n"
+ "ld1b { z15.s }, p0/Z, [x19]\n"
"add z15.h, p0/M, z15.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z17.s }, p0/Z, [x20]\n"
+ "ld1b { z17.s }, p0/Z, [x19]\n"
"add z17.h, p0/M, z17.h, z9.h\n"
- "addvl x21, SP, #6\n"
+ "addvl x20, SP, #6\n"
"trn1 z13.h, z13.h, z19.h\n"
"trn1 z14.h, z14.h, z18.h\n"
- ".inst 0xa1402aa2 // ld1h { z2.h, z10.h }, pn10.b/Z, [x21]\n"
- "add x20, x20, %x[ld_in_row]\n"
+ ".inst 0xa1402a82 // ld1h { z2.h, z10.h }, pn10.b/Z, [x20]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
"trn1 z15.h, z15.h, z17.h\n"
".inst 0xc1721568 // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
- "ld1b { z16.s }, p0/Z, [x20]\n"
- "addvl x20, SP, #12\n"
+ "ld1b { z16.s }, p0/Z, [x19]\n"
+ "addvl x19, SP, #12\n"
"add z16.h, p0/M, z16.h, z9.h\n"
".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
- ".inst 0xa1402a82 // ld1h { z2.h, z10.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xa1402a62 // ld1h { z2.h, z10.h }, pn10.b/Z, [x19]\n"
".inst 0xc1721569 // sdot za.s[x8, 1], { z11.h-z14.h }, z2.h\n"
"mov z16.d, z16.d\n"
- "add x17, x17, %x[ld_in_col]\n"
- "ld1h { z0.h }, p2/Z, [x21, #2, MUL VL]\n"
+ "add x16, x16, %x[ld_in_col]\n"
+ "ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
".inst 0xc17a1589 // sdot za.s[x8, 1], { z12.h-z15.h }, z10.h\n"
".inst 0xc17015a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
- "ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
+ "ld1h { z0.h }, p2/Z, [x19, #2, MUL VL]\n"
".inst 0xc17015a9 // sdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
"18:" // Padded: 1 priming loads
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z11.s }, p0/Z, [x17]\n"
+ "ld1b { z11.s }, p0/Z, [x16]\n"
"add z11.h, p0/M, z11.h, z9.h\n"
- "add x20, x17, %x[ld_in_row]\n"
+ "add x19, x16, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z21.s }, p0/Z, [x20]\n"
+ "ld1b { z21.s }, p0/Z, [x19]\n"
"add z21.h, p0/M, z21.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z12.s }, p0/Z, [x20]\n"
+ "ld1b { z12.s }, p0/Z, [x19]\n"
"add z12.h, p0/M, z12.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z20.s }, p0/Z, [x20]\n"
+ "ld1b { z20.s }, p0/Z, [x19]\n"
"add z20.h, p0/M, z20.h, z9.h\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"trn1 z11.h, z11.h, z21.h\n"
"trn1 z12.h, z12.h, z20.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z13.s }, p0/Z, [x20]\n"
+ "ld1b { z13.s }, p0/Z, [x19]\n"
"add z13.h, p0/M, z13.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z19.s }, p0/Z, [x20]\n"
+ "ld1b { z19.s }, p0/Z, [x19]\n"
"add z19.h, p0/M, z19.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z14.s }, p0/Z, [x20]\n"
+ "ld1b { z14.s }, p0/Z, [x19]\n"
"add z14.h, p0/M, z14.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z18.s }, p0/Z, [x20]\n"
+ "ld1b { z18.s }, p0/Z, [x19]\n"
"mov x12, #0x8\n"
"add z18.h, p0/M, z18.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z15.s }, p0/Z, [x20]\n"
+ "ld1b { z15.s }, p0/Z, [x19]\n"
"add z15.h, p0/M, z15.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z17.s }, p0/Z, [x20]\n"
+ "ld1b { z17.s }, p0/Z, [x19]\n"
"add z17.h, p0/M, z17.h, z9.h\n"
- "addvl x21, SP, #3\n"
+ "addvl x20, SP, #3\n"
"trn1 z13.h, z13.h, z19.h\n"
"trn1 z14.h, z14.h, z18.h\n"
- ".inst 0xa1402aa2 // ld1h { z2.h, z10.h }, pn10.b/Z, [x21]\n"
- "add x20, x20, %x[ld_in_row]\n"
+ ".inst 0xa1402a82 // ld1h { z2.h, z10.h }, pn10.b/Z, [x20]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
"trn1 z15.h, z15.h, z17.h\n"
".inst 0xc1721568 // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
- "ld1b { z16.s }, p0/Z, [x20]\n"
- "addvl x20, SP, #9\n"
+ "ld1b { z16.s }, p0/Z, [x19]\n"
+ "addvl x19, SP, #12\n"
"add z16.h, p0/M, z16.h, z9.h\n"
".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
- ".inst 0xa1402a82 // ld1h { z2.h, z10.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xa1402a62 // ld1h { z2.h, z10.h }, pn10.b/Z, [x19]\n"
".inst 0xc1721569 // sdot za.s[x8, 1], { z11.h-z14.h }, z2.h\n"
"mov z16.d, z16.d\n"
- "add x17, x17, %x[ld_in_col]\n"
- "ld1h { z0.h }, p2/Z, [x21, #2, MUL VL]\n"
+ "add x16, x16, %x[ld_in_col]\n"
+ "ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
".inst 0xc17a1589 // sdot za.s[x8, 1], { z12.h-z15.h }, z10.h\n"
".inst 0xc17015a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
- "ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
+ "ld1h { z0.h }, p2/Z, [x19, #2, MUL VL]\n"
".inst 0xc17015a9 // sdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
"19:" // Padded: 0 priming loads
- "cmp x7, #0x2\n"
+ "cmp x17, #0x2\n"
".inst 0xa1402be2 // ld1h { z2.h, z10.h }, pn10.b/Z, [SP]\n"
"ld1h { z0.h }, p2/Z, [SP, #2, MUL VL]\n"
"blt 22f\n"
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z11.s }, p0/Z, [x17]\n"
+ "ld1b { z11.s }, p0/Z, [x16]\n"
"add z11.h, p0/M, z11.h, z9.h\n"
- "add x20, x17, %x[ld_in_row]\n"
+ "add x19, x16, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z21.s }, p0/Z, [x20]\n"
+ "ld1b { z21.s }, p0/Z, [x19]\n"
"add z21.h, p0/M, z21.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z12.s }, p0/Z, [x20]\n"
+ "ld1b { z12.s }, p0/Z, [x19]\n"
"add z12.h, p0/M, z12.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z20.s }, p0/Z, [x20]\n"
+ "ld1b { z20.s }, p0/Z, [x19]\n"
"add z20.h, p0/M, z20.h, z9.h\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"trn1 z11.h, z11.h, z21.h\n"
"trn1 z12.h, z12.h, z20.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z13.s }, p0/Z, [x20]\n"
+ "ld1b { z13.s }, p0/Z, [x19]\n"
"add z13.h, p0/M, z13.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z19.s }, p0/Z, [x20]\n"
+ "ld1b { z19.s }, p0/Z, [x19]\n"
"add z19.h, p0/M, z19.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z14.s }, p0/Z, [x20]\n"
+ "ld1b { z14.s }, p0/Z, [x19]\n"
"add z14.h, p0/M, z14.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z18.s }, p0/Z, [x20]\n"
+ "ld1b { z18.s }, p0/Z, [x19]\n"
"mov x12, #0x8\n"
"add z18.h, p0/M, z18.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z15.s }, p0/Z, [x20]\n"
+ "ld1b { z15.s }, p0/Z, [x19]\n"
"add z15.h, p0/M, z15.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z17.s }, p0/Z, [x20]\n"
+ "ld1b { z17.s }, p0/Z, [x19]\n"
"add z17.h, p0/M, z17.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z16.s }, p0/Z, [x20]\n"
+ "ld1b { z16.s }, p0/Z, [x19]\n"
"add z16.h, p0/M, z16.h, z9.h\n"
- "sub x7, x7, #0x2\n"
- "sub x16, x16, #0x1\n"
+ "sub x17, x17, #0x2\n"
+ "sub x15, x15, #0x1\n"
"trn1 z13.h, z13.h, z19.h\n"
"trn1 z14.h, z14.h, z18.h\n"
- "lsr x20, x7, #0x1\n"
- "cmp x20, x16\n"
+ "lsr x19, x17, #0x1\n"
+ "cmp x19, x15\n"
"trn1 z15.h, z15.h, z17.h\n"
"mov z16.d, z16.d\n"
- "csel x25, x20, x16, LT\n"
- "add x17, x17, %x[ld_in_col]\n"
- "and x7, x7, #0x1\n"
- "sub x16, x16, x25\n"
- "cbz x25, 21f\n"
+ "csel x24, x19, x15, LT\n"
+ "add x16, x16, %x[ld_in_col]\n"
+ "and x17, x17, #0x1\n"
+ "sub x15, x15, x24\n"
+ "cbz x24, 21f\n"
"20:" // Padded: Main loop
".inst 0xc1721568 // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
- "addvl x24, SP, #6\n"
- "addvl x23, SP, #12\n"
+ "addvl x23, SP, #6\n"
+ "addvl x22, SP, #12\n"
".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
- ".inst 0xa1402b02 // ld1h { z2.h, z10.h }, pn10.b/Z, [x24]\n"
+ ".inst 0xa1402ae2 // ld1h { z2.h, z10.h }, pn10.b/Z, [x23]\n"
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
".inst 0xc1721569 // sdot za.s[x8, 1], { z11.h-z14.h }, z2.h\n"
- "add x20, x17, %x[ld_in_row]\n"
- "addvl x22, SP, #3\n"
+ "add x19, x16, %x[ld_in_row]\n"
+ "addvl x21, SP, #3\n"
".inst 0xc17a1589 // sdot za.s[x8, 1], { z12.h-z15.h }, z10.h\n"
- ".inst 0xa1402ae2 // ld1h { z2.h, z10.h }, pn10.b/Z, [x23]\n"
- "addvl x21, SP, #9\n"
- "subs x25, x25, #0x1\n"
+ ".inst 0xa1402ac2 // ld1h { z2.h, z10.h }, pn10.b/Z, [x22]\n"
+ "addvl x20, SP, #9\n"
+ "subs x24, x24, #0x1\n"
".inst 0xc172156a // sdot za.s[x8, 2], { z11.h-z14.h }, z2.h\n"
- "ld1b { z11.s }, p0/Z, [x17]\n"
+ "ld1b { z11.s }, p0/Z, [x16]\n"
"add z11.h, p0/M, z11.h, z9.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z21.s }, p0/Z, [x20]\n"
+ "ld1b { z21.s }, p0/Z, [x19]\n"
"add z21.h, p0/M, z21.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
".inst 0xc17a158a // sdot za.s[x8, 2], { z12.h-z15.h }, z10.h\n"
- "ld1b { z12.s }, p0/Z, [x20]\n"
+ "ld1b { z12.s }, p0/Z, [x19]\n"
"add z12.h, p0/M, z12.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0xc17015a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
- "ld1h { z0.h }, p2/Z, [x24, #2, MUL VL]\n"
+ "ld1h { z0.h }, p2/Z, [x23, #2, MUL VL]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
"mov x12, #0x4\n"
- "ld1b { z20.s }, p0/Z, [x20]\n"
+ "ld1b { z20.s }, p0/Z, [x19]\n"
".inst 0xc17015a9 // sdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
"add z20.h, p0/M, z20.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
- "ld1h { z0.h }, p2/Z, [x23, #2, MUL VL]\n"
+ "add x19, x19, %x[ld_in_row]\n"
+ "ld1h { z0.h }, p2/Z, [x22, #2, MUL VL]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
".inst 0xc17015aa // sdot za.s[x8, 2], { z13.h-z16.h }, z0.h\n"
"trn1 z11.h, z11.h, z21.h\n"
- "ld1b { z13.s }, p0/Z, [x20]\n"
+ "ld1b { z13.s }, p0/Z, [x19]\n"
"add z13.h, p0/M, z13.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z19.s }, p0/Z, [x20]\n"
+ "ld1b { z19.s }, p0/Z, [x19]\n"
"add z19.h, p0/M, z19.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z14.s }, p0/Z, [x20]\n"
+ "ld1b { z14.s }, p0/Z, [x19]\n"
"add z14.h, p0/M, z14.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z18.s }, p0/Z, [x20]\n"
+ "ld1b { z18.s }, p0/Z, [x19]\n"
"mov x12, #0x8\n"
"add z18.h, p0/M, z18.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z15.s }, p0/Z, [x20]\n"
+ "ld1b { z15.s }, p0/Z, [x19]\n"
"add z15.h, p0/M, z15.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z17.s }, p0/Z, [x20]\n"
+ "ld1b { z17.s }, p0/Z, [x19]\n"
"add z17.h, p0/M, z17.h, z9.h\n"
"trn1 z12.h, z12.h, z20.h\n"
"trn1 z13.h, z13.h, z19.h\n"
"trn1 z14.h, z14.h, z18.h\n"
- ".inst 0xa1402ac2 // ld1h { z2.h, z10.h }, pn10.b/Z, [x22]\n"
- "add x20, x20, %x[ld_in_row]\n"
+ ".inst 0xa1402aa2 // ld1h { z2.h, z10.h }, pn10.b/Z, [x21]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0xc0060c04 // mova { z4.d-z7.d }, za.d[x8, #0]\n"
"add x8, x8, #0x1\n"
"trn1 z15.h, z15.h, z17.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
".inst 0xc1721568 // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
- "ld1b { z16.s }, p0/Z, [x20]\n"
+ "ld1b { z16.s }, p0/Z, [x19]\n"
"mov x12, #0x0\n"
"add z16.h, p0/M, z16.h, z9.h\n"
".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
- ".inst 0xa1402aa2 // ld1h { z2.h, z10.h }, pn10.b/Z, [x21]\n"
- "add x17, x17, %x[ld_in_col]\n"
+ ".inst 0xa1402a82 // ld1h { z2.h, z10.h }, pn10.b/Z, [x20]\n"
+ "add x16, x16, %x[ld_in_col]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
".inst 0xc1721569 // sdot za.s[x8, 1], { z11.h-z14.h }, z2.h\n"
- "ld1b { z11.s }, p0/Z, [x17]\n"
+ "ld1b { z11.s }, p0/Z, [x16]\n"
"add z11.h, p0/M, z11.h, z9.h\n"
- "add x20, x17, %x[ld_in_row]\n"
+ "add x19, x16, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z21.s }, p0/Z, [x20]\n"
+ "ld1b { z21.s }, p0/Z, [x19]\n"
"add z21.h, p0/M, z21.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
".inst 0xc17a1589 // sdot za.s[x8, 1], { z12.h-z15.h }, z10.h\n"
- "ld1b { z12.s }, p0/Z, [x20]\n"
+ "ld1b { z12.s }, p0/Z, [x19]\n"
"mov z16.d, z16.d\n"
- "ld1h { z0.h }, p2/Z, [x22, #2, MUL VL]\n"
+ "ld1h { z0.h }, p2/Z, [x21, #2, MUL VL]\n"
"add z12.h, p0/M, z12.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z20.s }, p0/Z, [x20]\n"
+ "ld1b { z20.s }, p0/Z, [x19]\n"
".inst 0xc17015a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
"mov x12, #0x4\n"
"add z20.h, p0/M, z20.h, z9.h\n"
- "ld1h { z0.h }, p2/Z, [x21, #2, MUL VL]\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
".inst 0xc17015a9 // sdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
- "ld1b { z13.s }, p0/Z, [x20]\n"
+ "ld1b { z13.s }, p0/Z, [x19]\n"
"add z13.h, p0/M, z13.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z19.s }, p0/Z, [x20]\n"
+ "ld1b { z19.s }, p0/Z, [x19]\n"
"add z19.h, p0/M, z19.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z14.s }, p0/Z, [x20]\n"
+ "ld1b { z14.s }, p0/Z, [x19]\n"
"add z14.h, p0/M, z14.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z18.s }, p0/Z, [x20]\n"
+ "ld1b { z18.s }, p0/Z, [x19]\n"
"mov x12, #0x8\n"
"add z18.h, p0/M, z18.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z15.s }, p0/Z, [x20]\n"
+ "ld1b { z15.s }, p0/Z, [x19]\n"
".inst 0xc1a3ac04 // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z3.s\n"
"add z15.h, p0/M, z15.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z17.s }, p0/Z, [x20]\n"
+ "ld1b { z17.s }, p0/Z, [x19]\n"
".inst 0xc1a1aa24 // srshl { z4.s-z7.s }, { z4.s-z7.s }, z1.s\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0xc0040f84 // mova za.d[x8, #4], { z28.d-z31.d }\n"
"add z17.h, p0/M, z17.h, z9.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z16.s }, p0/Z, [x20]\n"
+ "ld1b { z16.s }, p0/Z, [x19]\n"
"add z16.h, p0/M, z16.h, z9.h\n"
".inst 0xc1a8ab04 // add { z4.s-z7.s }, { z4.s-z7.s }, z8.s\n"
".inst 0xa1402be2 // ld1h { z2.h, z10.h }, pn10.b/Z, [SP]\n"
- "add x17, x17, %x[ld_in_col]\n"
+ "add x16, x16, %x[ld_in_col]\n"
".inst 0xc1b7cf44 // sclamp { z4.s-z7.s }, z26.s, z23.s\n"
- "st1b { z4.s }, p1, [x15]\n"
- "add x15, x15, x13\n"
- "ld1h { z0.h }, p2/Z, [SP, #2, MUL VL]\n"
- "st1b { z5.s }, p1, [x14]\n"
+ "st1b { z4.s }, p1, [x14]\n"
"add x14, x14, x11\n"
+ "ld1h { z0.h }, p2/Z, [SP, #2, MUL VL]\n"
+ "st1b { z5.s }, p1, [x13]\n"
+ "add x13, x13, x10\n"
"trn1 z11.h, z11.h, z21.h\n"
"trn1 z12.h, z12.h, z20.h\n"
- "st1b { z6.s }, p1, [x10]\n"
- "add x10, x10, x28\n"
+ "st1b { z6.s }, p1, [x9]\n"
+ "add x9, x9, x27\n"
"trn1 z13.h, z13.h, z19.h\n"
"trn1 z14.h, z14.h, z18.h\n"
- "st1b { z7.s }, p1, [x9]\n"
- "add x9, x9, x27\n"
+ "st1b { z7.s }, p1, [x28]\n"
+ "add x28, x28, x26\n"
"trn1 z15.h, z15.h, z17.h\n"
"mov z16.d, z16.d\n"
"bgt 20b\n"
"21:" // Main loop tail
".inst 0xc1721568 // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
- "addvl x24, SP, #6\n"
- "addvl x23, SP, #12\n"
+ "addvl x23, SP, #6\n"
+ "addvl x22, SP, #12\n"
".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
- ".inst 0xa1402b02 // ld1h { z2.h, z10.h }, pn10.b/Z, [x24]\n"
+ ".inst 0xa1402ae2 // ld1h { z2.h, z10.h }, pn10.b/Z, [x23]\n"
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
".inst 0xc1721569 // sdot za.s[x8, 1], { z11.h-z14.h }, z2.h\n"
- "add x22, x17, %x[ld_in_row]\n"
- "addvl x21, SP, #3\n"
+ "add x21, x16, %x[ld_in_row]\n"
+ "addvl x20, SP, #3\n"
".inst 0xc17a1589 // sdot za.s[x8, 1], { z12.h-z15.h }, z10.h\n"
- ".inst 0xa1402ae2 // ld1h { z2.h, z10.h }, pn10.b/Z, [x23]\n"
- "addvl x20, SP, #9\n"
+ ".inst 0xa1402ac2 // ld1h { z2.h, z10.h }, pn10.b/Z, [x22]\n"
+ "addvl x19, SP, #9\n"
".inst 0xc172156a // sdot za.s[x8, 2], { z11.h-z14.h }, z2.h\n"
- "ld1b { z11.s }, p0/Z, [x17]\n"
+ "ld1b { z11.s }, p0/Z, [x16]\n"
"add z11.h, p0/M, z11.h, z9.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z21.s }, p0/Z, [x22]\n"
+ "ld1b { z21.s }, p0/Z, [x21]\n"
"add z21.h, p0/M, z21.h, z9.h\n"
- "add x22, x22, %x[ld_in_row]\n"
+ "add x21, x21, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
".inst 0xc17a158a // sdot za.s[x8, 2], { z12.h-z15.h }, z10.h\n"
- "ld1b { z12.s }, p0/Z, [x22]\n"
+ "ld1b { z12.s }, p0/Z, [x21]\n"
"add z12.h, p0/M, z12.h, z9.h\n"
- "add x22, x22, %x[ld_in_row]\n"
+ "add x21, x21, %x[ld_in_row]\n"
".inst 0xc17015a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
- "ld1h { z0.h }, p2/Z, [x24, #2, MUL VL]\n"
+ "ld1h { z0.h }, p2/Z, [x23, #2, MUL VL]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
"mov x12, #0x4\n"
- "ld1b { z20.s }, p0/Z, [x22]\n"
+ "ld1b { z20.s }, p0/Z, [x21]\n"
".inst 0xc17015a9 // sdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
"add z20.h, p0/M, z20.h, z9.h\n"
- "add x22, x22, %x[ld_in_row]\n"
- "ld1h { z0.h }, p2/Z, [x23, #2, MUL VL]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "ld1h { z0.h }, p2/Z, [x22, #2, MUL VL]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
".inst 0xc17015aa // sdot za.s[x8, 2], { z13.h-z16.h }, z0.h\n"
"trn1 z11.h, z11.h, z21.h\n"
- "ld1b { z13.s }, p0/Z, [x22]\n"
+ "ld1b { z13.s }, p0/Z, [x21]\n"
"add z13.h, p0/M, z13.h, z9.h\n"
- "add x22, x22, %x[ld_in_row]\n"
+ "add x21, x21, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z19.s }, p0/Z, [x22]\n"
+ "ld1b { z19.s }, p0/Z, [x21]\n"
"add z19.h, p0/M, z19.h, z9.h\n"
- "add x22, x22, %x[ld_in_row]\n"
+ "add x21, x21, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z14.s }, p0/Z, [x22]\n"
+ "ld1b { z14.s }, p0/Z, [x21]\n"
"add z14.h, p0/M, z14.h, z9.h\n"
- "add x22, x22, %x[ld_in_row]\n"
+ "add x21, x21, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z18.s }, p0/Z, [x22]\n"
+ "ld1b { z18.s }, p0/Z, [x21]\n"
"mov x12, #0x8\n"
"add z18.h, p0/M, z18.h, z9.h\n"
- "add x22, x22, %x[ld_in_row]\n"
+ "add x21, x21, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z15.s }, p0/Z, [x22]\n"
+ "ld1b { z15.s }, p0/Z, [x21]\n"
"add z15.h, p0/M, z15.h, z9.h\n"
- "add x22, x22, %x[ld_in_row]\n"
+ "add x21, x21, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z17.s }, p0/Z, [x22]\n"
+ "ld1b { z17.s }, p0/Z, [x21]\n"
"add z17.h, p0/M, z17.h, z9.h\n"
"trn1 z12.h, z12.h, z20.h\n"
"trn1 z13.h, z13.h, z19.h\n"
"trn1 z14.h, z14.h, z18.h\n"
- ".inst 0xa1402aa2 // ld1h { z2.h, z10.h }, pn10.b/Z, [x21]\n"
- "add x22, x22, %x[ld_in_row]\n"
+ ".inst 0xa1402a82 // ld1h { z2.h, z10.h }, pn10.b/Z, [x20]\n"
+ "add x21, x21, %x[ld_in_row]\n"
".inst 0xc0060c04 // mova { z4.d-z7.d }, za.d[x8, #0]\n"
"add x8, x8, #0x1\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
"trn1 z15.h, z15.h, z17.h\n"
".inst 0xc1721568 // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
- "ld1b { z16.s }, p0/Z, [x22]\n"
+ "ld1b { z16.s }, p0/Z, [x21]\n"
".inst 0xc1a3ac04 // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z3.s\n"
"add z16.h, p0/M, z16.h, z9.h\n"
".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
- ".inst 0xa1402a82 // ld1h { z2.h, z10.h }, pn10.b/Z, [x20]\n"
- "add x17, x17, %x[ld_in_col]\n"
+ ".inst 0xa1402a62 // ld1h { z2.h, z10.h }, pn10.b/Z, [x19]\n"
+ "add x16, x16, %x[ld_in_col]\n"
".inst 0xc1a1aa24 // srshl { z4.s-z7.s }, { z4.s-z7.s }, z1.s\n"
".inst 0xc1721569 // sdot za.s[x8, 1], { z11.h-z14.h }, z2.h\n"
"mov z16.d, z16.d\n"
- "ld1h { z0.h }, p2/Z, [x21, #2, MUL VL]\n"
+ "ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
".inst 0xc17a1589 // sdot za.s[x8, 1], { z12.h-z15.h }, z10.h\n"
".inst 0xc1a8ab04 // add { z4.s-z7.s }, { z4.s-z7.s }, z8.s\n"
".inst 0xc17015a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
- "ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
+ "ld1h { z0.h }, p2/Z, [x19, #2, MUL VL]\n"
".inst 0xc1b7cf44 // sclamp { z4.s-z7.s }, z26.s, z23.s\n"
- "st1b { z4.s }, p1, [x15]\n"
- "add x15, x15, x13\n"
- "st1b { z5.s }, p1, [x14]\n"
+ "st1b { z4.s }, p1, [x14]\n"
"add x14, x14, x11\n"
+ "st1b { z5.s }, p1, [x13]\n"
+ "add x13, x13, x10\n"
".inst 0xc0040f84 // mova za.d[x8, #4], { z28.d-z31.d }\n"
".inst 0xa1402be2 // ld1h { z2.h, z10.h }, pn10.b/Z, [SP]\n"
- "st1b { z6.s }, p1, [x10]\n"
- "add x10, x10, x28\n"
+ "st1b { z6.s }, p1, [x9]\n"
+ "add x9, x9, x27\n"
".inst 0xc17015a9 // sdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
"ld1h { z0.h }, p2/Z, [SP, #2, MUL VL]\n"
- "st1b { z7.s }, p1, [x9]\n"
- "add x9, x9, x27\n"
+ "st1b { z7.s }, p1, [x28]\n"
+ "add x28, x28, x26\n"
"22:" // Main loop skip tail
- "cbz x7, 23f\n" // Skip remainder inputs
+ "cbz x17, 23f\n" // Skip remainder inputs
"mov x12, #0x0\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z11.s }, p0/Z, [x17]\n"
+ "ld1b { z11.s }, p0/Z, [x16]\n"
"add z11.h, p0/M, z11.h, z9.h\n"
- "add x20, x17, %x[ld_in_row]\n"
+ "add x19, x16, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z21.s }, p0/Z, [x20]\n"
+ "ld1b { z21.s }, p0/Z, [x19]\n"
"add z21.h, p0/M, z21.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z12.s }, p0/Z, [x20]\n"
+ "ld1b { z12.s }, p0/Z, [x19]\n"
"add z12.h, p0/M, z12.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z20.s }, p0/Z, [x20]\n"
+ "ld1b { z20.s }, p0/Z, [x19]\n"
"add z20.h, p0/M, z20.h, z9.h\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
"trn1 z11.h, z11.h, z21.h\n"
"trn1 z12.h, z12.h, z20.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z13.s }, p0/Z, [x20]\n"
+ "ld1b { z13.s }, p0/Z, [x19]\n"
"add z13.h, p0/M, z13.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z19.s }, p0/Z, [x20]\n"
+ "ld1b { z19.s }, p0/Z, [x19]\n"
"add z19.h, p0/M, z19.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z14.s }, p0/Z, [x20]\n"
+ "ld1b { z14.s }, p0/Z, [x19]\n"
"add z14.h, p0/M, z14.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z18.s }, p0/Z, [x20]\n"
+ "ld1b { z18.s }, p0/Z, [x19]\n"
"mov x12, #0x8\n"
"add z18.h, p0/M, z18.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z15.s }, p0/Z, [x20]\n"
+ "ld1b { z15.s }, p0/Z, [x19]\n"
"add z15.h, p0/M, z15.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z17.s }, p0/Z, [x20]\n"
+ "ld1b { z17.s }, p0/Z, [x19]\n"
"add z17.h, p0/M, z17.h, z9.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "add x19, x19, %x[ld_in_row]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
"trn1 z13.h, z13.h, z19.h\n"
"trn1 z14.h, z14.h, z18.h\n"
- "ld1b { z16.s }, p0/Z, [x20]\n"
+ "ld1b { z16.s }, p0/Z, [x19]\n"
"add z16.h, p0/M, z16.h, z9.h\n"
"trn1 z15.h, z15.h, z17.h\n"
- "addvl x21, SP, #6\n"
+ "addvl x20, SP, #6\n"
".inst 0xc1721568 // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
"mov z16.d, z16.d\n"
- "addvl x20, SP, #12\n"
- "sub x16, x16, #0x1\n"
+ "addvl x19, SP, #12\n"
+ "sub x15, x15, #0x1\n"
".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
- ".inst 0xa1402aa2 // ld1h { z2.h, z10.h }, pn10.b/Z, [x21]\n"
+ ".inst 0xa1402a82 // ld1h { z2.h, z10.h }, pn10.b/Z, [x20]\n"
".inst 0xc17015a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
- "ld1h { z0.h }, p2/Z, [x21, #2, MUL VL]\n"
+ "ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
".inst 0xc1721569 // sdot za.s[x8, 1], { z11.h-z14.h }, z2.h\n"
".inst 0xc0060c04 // mova { z4.d-z7.d }, za.d[x8, #0]\n"
".inst 0xc1a3ac04 // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z3.s\n"
".inst 0xc17a1589 // sdot za.s[x8, 1], { z12.h-z15.h }, z10.h\n"
- ".inst 0xa1402a82 // ld1h { z2.h, z10.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xa1402a62 // ld1h { z2.h, z10.h }, pn10.b/Z, [x19]\n"
".inst 0xc1a1aa24 // srshl { z4.s-z7.s }, { z4.s-z7.s }, z1.s\n"
".inst 0xc172156a // sdot za.s[x8, 2], { z11.h-z14.h }, z2.h\n"
".inst 0xc1a8ab04 // add { z4.s-z7.s }, { z4.s-z7.s }, z8.s\n"
".inst 0xc17a158a // sdot za.s[x8, 2], { z12.h-z15.h }, z10.h\n"
".inst 0xc1b7cf44 // sclamp { z4.s-z7.s }, z26.s, z23.s\n"
- "st1b { z4.s }, p1, [x15]\n"
- "add x15, x15, x13\n"
- ".inst 0xc17015a9 // sdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
- "ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
- "st1b { z5.s }, p1, [x14]\n"
+ "st1b { z4.s }, p1, [x14]\n"
"add x14, x14, x11\n"
+ ".inst 0xc17015a9 // sdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
+ "ld1h { z0.h }, p2/Z, [x19, #2, MUL VL]\n"
+ "st1b { z5.s }, p1, [x13]\n"
+ "add x13, x13, x10\n"
".inst 0xc17015aa // sdot za.s[x8, 2], { z13.h-z16.h }, z0.h\n"
"add x8, x8, #0x1\n"
- "st1b { z6.s }, p1, [x10]\n"
- "add x10, x10, x28\n"
- "st1b { z7.s }, p1, [x9]\n"
+ "st1b { z6.s }, p1, [x9]\n"
"add x9, x9, x27\n"
+ "st1b { z7.s }, p1, [x28]\n"
+ "add x28, x28, x26\n"
".inst 0xc0040f84 // mova za.d[x8, #4], { z28.d-z31.d }\n"
"23:" // Tail input: End
- "cbz x16, 25f\n"
+ "cbz x15, 25f\n"
"24:" // Right padding loop
".inst 0xc0060c04 // mova { z4.d-z7.d }, za.d[x8, #0]\n"
".inst 0xc1a3ac04 // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z3.s\n"
"add x8, x8, #0x1\n"
".inst 0xc1a1aa24 // srshl { z4.s-z7.s }, { z4.s-z7.s }, z1.s\n"
- "subs x16, x16, #0x1\n"
+ "subs x15, x15, #0x1\n"
".inst 0xc0040f84 // mova za.d[x8, #4], { z28.d-z31.d }\n"
".inst 0xc1a8ab04 // add { z4.s-z7.s }, { z4.s-z7.s }, z8.s\n"
".inst 0xc1b7cf44 // sclamp { z4.s-z7.s }, z26.s, z23.s\n"
- "st1b { z4.s }, p1, [x15]\n"
- "add x15, x15, x13\n"
- "st1b { z5.s }, p1, [x14]\n"
+ "st1b { z4.s }, p1, [x14]\n"
"add x14, x14, x11\n"
- "st1b { z6.s }, p1, [x10]\n"
- "add x10, x10, x28\n"
- "st1b { z7.s }, p1, [x9]\n"
+ "st1b { z5.s }, p1, [x13]\n"
+ "add x13, x13, x10\n"
+ "st1b { z6.s }, p1, [x9]\n"
"add x9, x9, x27\n"
+ "st1b { z7.s }, p1, [x28]\n"
+ "add x28, x28, x26\n"
"bgt 24b\n"
"25:" // End
- "ldr x22, [%x[args], %[offsetof_Args_weights]]\n"
- "incw x22, ALL, MUL #16\n"
- "incw x22, ALL, MUL #9\n"
- "str x22, [%x[args], %[offsetof_Args_weights]]\n"
- "ldr x20, [%x[args], %[offsetof_Args_ld_in_vl]]\n"
- "incw x6\n"
- "whilelt p1.s, x6, x5\n"
- "ldr x17, [%x[args], %[offsetof_Args_inptr]]\n"
- "add x17, x17, x20\n"
- "str x17, [%x[args], %[offsetof_Args_inptr]]\n"
- "ldr x25, [%x[args], %[offsetof_Args_outptrs]]\n"
- "ldr x24, [%x[args], %[offsetof_Args_ld_out_vls]]\n"
- "ldp x23, x22, [x25, #0x0]\n"
- "ldp x21, x20, [x24, #0x0]\n"
- "add x23, x23, x21\n"
+ "ldr x21, [%x[args], %[offsetof_Args_weights]]\n"
+ "incw x21, ALL, MUL #16\n"
+ "incw x21, ALL, MUL #9\n"
+ "str x21, [%x[args], %[offsetof_Args_weights]]\n"
+ "ldr x19, [%x[args], %[offsetof_Args_ld_in_vl]]\n"
+ "incw x7\n"
+ "whilelt p1.s, x7, x6\n"
+ "ldr x16, [%x[args], %[offsetof_Args_inptr]]\n"
+ "add x16, x16, x19\n"
+ "str x16, [%x[args], %[offsetof_Args_inptr]]\n"
+ "ldr x24, [%x[args], %[offsetof_Args_outptrs]]\n"
+ "ldr x23, [%x[args], %[offsetof_Args_ld_out_vls]]\n"
+ "ldp x22, x21, [x24, #0x0]\n"
+ "ldp x20, x19, [x23, #0x0]\n"
"add x22, x22, x20\n"
- "stp x23, x22, [x25, #0x0]\n"
- "ldp x23, x22, [x25, #0x10]\n"
- "ldp x21, x20, [x24, #0x10]\n"
- "add x23, x23, x21\n"
+ "add x21, x21, x19\n"
+ "stp x22, x21, [x24, #0x0]\n"
+ "ldp x22, x21, [x24, #0x10]\n"
+ "ldp x20, x19, [x23, #0x10]\n"
"add x22, x22, x20\n"
- "stp x23, x22, [x25, #0x10]\n"
+ "add x21, x21, x19\n"
+ "stp x22, x21, [x24, #0x10]\n"
"b.any 1b\n"
"addvl SP, SP, #15\n"
".inst 0xd503467f // SMSTOP\n"
:
: [args] "r" (&args), [ld_in_col] "r" (ld_in_col), [ld_in_row] "r" (ld_in_row), [offsetof_Args_current_channel] "I" (offsetof(Args, current_channel)), [offsetof_Args_inptr] "I" (offsetof(Args, inptr)), [offsetof_Args_input_cols] "I" (offsetof(Args, input_cols)), [offsetof_Args_ld_in_vl] "I" (offsetof(Args, ld_in_vl)), [offsetof_Args_ld_out_cols] "I" (offsetof(Args, ld_out_cols)), [offsetof_Args_ld_out_vls] "I" (offsetof(Args, ld_out_vls)), [offsetof_Args_n_channels] "I" (offsetof(Args, n_channels)), [offsetof_Args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_Args_output_cols] "I" (offsetof(Args, output_cols)), [offsetof_Args_pad_bottom] "I" (offsetof(Args, pad_bottom)), [offsetof_Args_pad_left] "I" (offsetof(Args, pad_left)), [offsetof_Args_pad_top] "I" (offsetof(Args, pad_top)), [offsetof_Args_weights] "I" (offsetof(Args, weights)), [offsetof_Requantize32_a_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_bias] "I" (offsetof(arm_gemm::Requantize32, bias)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [offsetof_Requantize32_per_channel_muls] "I" (offsetof(arm_gemm::Requantize32, per_channel_muls)), [offsetof_Requantize32_per_channel_right_shifts] "I" (offsetof(arm_gemm::Requantize32, per_channel_right_shifts)), [offsetof_Requantize32_per_layer_mul] "I" (offsetof(arm_gemm::Requantize32, per_layer_mul)), [offsetof_Requantize32_per_layer_right_shift] "I" (offsetof(arm_gemm::Requantize32, per_layer_right_shift)), [qp] "r" (&qp)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_direct.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_direct.cpp
index 9fd220abf8..955a02de57 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_direct.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_direct.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -88,225 +88,225 @@ void sve_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst_direct_impl(
__asm__ __volatile__(
"ptrue p3.b\n"
- "mov x10, #0x0\n"
- "mov x14, #0x0\n"
+ "mov x17, #0x0\n"
+ "mov x16, #0x0\n"
"1:" // Tile loop
- "str x10, [%x[params_struct], %[offsetof_args_tile_i]]\n"
- "mov x25, #0x2\n"
- "mov x24, #0x2\n"
- "str x14, [%x[params_struct], %[offsetof_args_tile_j]]\n"
- "ldr x23, [%x[params_struct], %[offsetof_args_ld_input_row]]\n"
- "ldr x22, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
- "mul x21, x10, x23\n" // offset = tile_i * ld_input_row
- "ldr x13, [%x[params_struct], %[offsetof_args_ld_input_col]]\n"
- "ldr x12, [%x[params_struct], %[offsetof_args_ld_output_col]]\n"
- "mul x20, x10, x22\n" // offset = tile_i * ld_output_row
- "cnth x11\n"
- "madd x21, x14, x13, x21\n" // offset += tile_j * ld_input_col
- "ldr x10, [%x[params_struct], %[offsetof_args_params]]\n"
- "ldr x9, [%x[params_struct], %[offsetof_args_inptr]]\n"
- "whilelt p2.h, XZR, %x[n_channels]\n"
- "madd x20, x14, x12, x20\n" // offset += tile_j * ld_output_col
+ "str x17, [%x[params_struct], %[offsetof_args_tile_i]]\n"
+ "mov x23, #0x2\n"
+ "str x16, [%x[params_struct], %[offsetof_args_tile_j]]\n"
+ "mov x15, #0x2\n"
+ "ldr x14, [%x[params_struct], %[offsetof_args_params]]\n"
+ "mov x13, #0x0\n"
+ "ldr x22, [%x[params_struct], %[offsetof_args_ld_input_row]]\n"
+ "cnth x12\n"
+ "ldr x11, [%x[params_struct], %[offsetof_args_ld_input_col]]\n"
+ "sub x21, XZR, x12\n"
+ "ldr x10, [%x[params_struct], %[offsetof_args_inptr]]\n"
+ "mul x19, x17, x22\n" // offset = tile_i * ld_input_row
+ "ldr x20, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
+ "madd x19, x16, x11, x19\n" // offset += tile_j * ld_input_col
+ "ldr x9, [%x[params_struct], %[offsetof_args_ld_output_col]]\n"
+ "mul x19, x19, x23\n" // offset *= kernel_stride * output_size
"ldr x28, [%x[params_struct], %[offsetof_args_outptr]]\n"
- "ld1h { z18.h }, p3/Z, [x10]\n"
- "add x27, x13, x13\n"
- "mul x21, x21, x25\n" // offset *= kernel_stride * output_size
- "add x9, x9, x21, LSL #1\n" // inptr[0] += offset * sizeof(__fp16)
- "ld1h { z0.h }, p3/Z, [x10, #1, MUL VL]\n"
- "ld1h { z1.h }, p3/Z, [x10, #2, MUL VL]\n"
- "mul x20, x20, x24\n" // offset *= output_tile_size
- "ld1h { z2.h }, p3/Z, [x10, #3, MUL VL]\n"
- "ld1h { z3.h }, p3/Z, [x10, #4, MUL VL]\n"
- "add x26, x9, x23, LSL #1\n"
- "ld1h { z4.h }, p3/Z, [x10, #5, MUL VL]\n"
- "ld1h { z5.h }, p3/Z, [x10, #6, MUL VL]\n"
- "add x25, x26, x23, LSL #1\n"
- "add x24, x27, x13\n"
- "ld1h { z6.h }, p3/Z, [x10, #7, MUL VL]\n"
- "addvl x10, x10, #16\n"
- "add x28, x28, x20, LSL #1\n" // outptrs[0] += offset * sizeof(__fp16)
- "ld1rh { z17.h }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
- "cmp x11, %x[n_channels]\n"
- "add x23, x25, x23, LSL #1\n"
- "ld1rh { z16.h }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
- "ld1h { z7.h }, p3/Z, [x10, #-8, MUL VL]\n"
- "add x22, x28, x22, LSL #1\n"
- "mov x21, #0x0\n"
- "ld1h { z8.h }, p3/Z, [x10, #-7, MUL VL]\n"
- "ld1h { z9.h }, p2/Z, [x26, x13, LSL #1]\n"
- "sub x20, XZR, x11\n"
- "ld1h { z10.h }, p2/Z, [x9]\n"
- "ld1h { z11.h }, p2/Z, [x9, x24, LSL #1]\n"
- "addvl x10, x10, #-6\n"
- "ld1h { z12.h }, p2/Z, [x26, x27, LSL #1]\n"
- "ld1h { z13.h }, p2/Z, [x25, x13, LSL #1]\n"
+ "add x10, x10, x19, LSL #1\n" // inptr[0] += offset * sizeof(__fp16)
+ "ld1rh { z18.h }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
+ "add x27, x10, x22, LSL #1\n"
+ "ld1rh { z17.h }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
+ "add x26, x27, x22, LSL #1\n"
+ "ld1h { z16.h }, p3/Z, [x14]\n"
+ "add x25, x26, x22, LSL #1\n"
+ "ld1h { z0.h }, p3/Z, [x14, #1, MUL VL]\n"
+ "add x24, x11, x11\n"
+ "ld1h { z1.h }, p3/Z, [x14, #2, MUL VL]\n"
+ "add x23, x24, x11\n"
+ "ld1h { z2.h }, p3/Z, [x14, #3, MUL VL]\n"
+ "mul x19, x17, x20\n" // offset = tile_i * ld_output_row
+ "ld1h { z3.h }, p3/Z, [x14, #4, MUL VL]\n"
+ "madd x19, x16, x9, x19\n" // offset += tile_j * ld_output_col
+ "ld1h { z4.h }, p3/Z, [x14, #5, MUL VL]\n"
+ "mul x19, x19, x15\n" // offset *= output_tile_size
+ "ld1h { z5.h }, p3/Z, [x14, #6, MUL VL]\n"
+ "add x28, x28, x19, LSL #1\n" // outptrs[0] += offset * sizeof(__fp16)
+ "ld1h { z6.h }, p3/Z, [x14, #7, MUL VL]\n"
+ "add x22, x28, x20, LSL #1\n"
+ "whilelt p2.h, XZR, %x[n_channels]\n"
+ "ld1h { z9.h }, p2/Z, [x27, x11, LSL #1]\n"
+ "ld1h { z10.h }, p2/Z, [x10]\n"
+ "addvl x14, x14, #16\n"
+ "ld1h { z11.h }, p2/Z, [x10, x23, LSL #1]\n"
+ "cmp x12, %x[n_channels]\n"
+ "ld1h { z7.h }, p3/Z, [x14, #-8, MUL VL]\n"
+ "ld1h { z8.h }, p3/Z, [x14, #-7, MUL VL]\n"
+ "addvl x14, x14, #-6\n"
+ "ld1h { z12.h }, p2/Z, [x27, x24, LSL #1]\n"
+ "ld1h { z13.h }, p2/Z, [x26, x11, LSL #1]\n"
"bge 3f\n"
"2:" // Tile loop: Channel loop
- "movprfx z28, z18\n fmla z28.h, p3/M, z4.h, z9.h\n"
- "movprfx z29, z18\n fmla z29.h, p3/M, z3.h, z9.h\n"
- "whilelt p1.h, x11, %x[n_channels]\n"
+ "movprfx z31, z16\n fmla z31.h, p3/M, z4.h, z9.h\n"
+ "whilelt p1.h, x12, %x[n_channels]\n"
+ "movprfx z30, z16\n fmla z30.h, p3/M, z3.h, z9.h\n"
"inch x21\n"
- "movprfx z30, z18\n fmla z30.h, p3/M, z1.h, z9.h\n"
- "movprfx z31, z18\n fmla z31.h, p3/M, z0.h, z9.h\n"
- "ld1h { z9.h }, p2/Z, [x23]\n"
- "inch x11\n"
- "fmla z28.h, p3/M, z0.h, z10.h\n"
- "fmla z29.h, p3/M, z2.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x23, x24, LSL #1]\n"
- "ld1h { z10.h }, p2/Z, [x25, x27, LSL #1]\n"
- "fmla z30.h, p3/M, z2.h, z12.h\n"
- "fmla z31.h, p3/M, z1.h, z12.h\n"
+ "movprfx z29, z16\n fmla z29.h, p3/M, z1.h, z9.h\n"
"mov p0.b, p2.b\n"
- "ld1h { z18.h }, p3/Z, [x10]\n"
- "fmla z28.h, p3/M, z5.h, z12.h\n"
- "fmla z29.h, p3/M, z4.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x9, x13, LSL #1]\n"
- "inch x20\n"
- "fmla z30.h, p3/M, z6.h, z9.h\n"
- "fmla z31.h, p3/M, z3.h, z13.h\n"
- "ld1h { z9.h }, p2/Z, [x9, x27, LSL #1]\n"
- "addvl x9, x9, #1\n"
- "fmla z28.h, p3/M, z7.h, z13.h\n"
- "fmla z29.h, p3/M, z6.h, z13.h\n"
- "fmla z30.h, p3/M, z4.h, z13.h\n"
- "fmla z31.h, p3/M, z8.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x26]\n"
+ "movprfx z28, z16\n fmla z28.h, p3/M, z0.h, z9.h\n"
+ "ld1h { z9.h }, p2/Z, [x25]\n"
+ "inch x13\n"
+ "fmla z31.h, p3/M, z0.h, z10.h\n"
+ "ld1h { z10.h }, p2/Z, [x26, x24, LSL #1]\n"
+ "inch x12\n"
+ "fmla z30.h, p3/M, z2.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x25, x23, LSL #1]\n"
+ "fmla z29.h, p3/M, z2.h, z12.h\n"
+ "ld1h { z16.h }, p3/Z, [x14]\n"
"fmla z28.h, p3/M, z1.h, z12.h\n"
- "fmla z29.h, p3/M, z0.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x26, x24, LSL #1]\n"
+ "fmla z31.h, p3/M, z5.h, z12.h\n"
+ "fmla z30.h, p3/M, z4.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x10, x11, LSL #1]\n"
+ "fmla z29.h, p3/M, z6.h, z9.h\n"
+ "ld1h { z9.h }, p2/Z, [x10, x24, LSL #1]\n"
+ "addvl x10, x10, #1\n"
+ "fmla z28.h, p3/M, z3.h, z13.h\n"
+ "fmla z31.h, p3/M, z7.h, z13.h\n"
+ "fmla z30.h, p3/M, z6.h, z13.h\n"
+ "fmla z29.h, p3/M, z4.h, z13.h\n"
+ "fmla z28.h, p3/M, z8.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x27]\n"
+ "fmla z31.h, p3/M, z1.h, z12.h\n"
+ "fmla z30.h, p3/M, z0.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x27, x23, LSL #1]\n"
+ "addvl x27, x27, #1\n"
+ "fmla z29.h, p3/M, z5.h, z10.h\n"
+ "fmla z28.h, p3/M, z4.h, z10.h\n"
+ "ld1h { z4.h }, p3/Z, [x14, #5, MUL VL]\n"
+ "fmla z31.h, p3/M, z2.h, z9.h\n"
+ "fmla z30.h, p3/M, z1.h, z9.h\n"
+ "ld1h { z9.h }, p2/Z, [x26]\n"
+ "ld1h { z1.h }, p3/Z, [x14, #2, MUL VL]\n"
+ "fmla z29.h, p3/M, z0.h, z11.h\n"
+ "ld1h { z0.h }, p3/Z, [x14, #1, MUL VL]\n"
+ "fmla z28.h, p3/M, z2.h, z12.h\n"
+ "ld1h { z2.h }, p3/Z, [x14, #3, MUL VL]\n"
+ "fmla z31.h, p3/M, z8.h, z10.h\n"
+ "fmla z30.h, p3/M, z7.h, z10.h\n"
+ "ld1h { z10.h }, p2/Z, [x26, x23, LSL #1]\n"
"addvl x26, x26, #1\n"
- "fmla z30.h, p3/M, z5.h, z10.h\n"
- "fmla z31.h, p3/M, z4.h, z10.h\n"
- "ld1h { z4.h }, p3/Z, [x10, #5, MUL VL]\n"
- "fmla z28.h, p3/M, z2.h, z9.h\n"
- "fmla z29.h, p3/M, z1.h, z9.h\n"
- "ld1h { z9.h }, p2/Z, [x25]\n"
- "ld1h { z1.h }, p3/Z, [x10, #2, MUL VL]\n"
- "fmla z30.h, p3/M, z0.h, z11.h\n"
- "fmla z31.h, p3/M, z2.h, z12.h\n"
- "ld1h { z0.h }, p3/Z, [x10, #1, MUL VL]\n"
- "ld1h { z2.h }, p3/Z, [x10, #3, MUL VL]\n"
- "fmla z28.h, p3/M, z8.h, z10.h\n"
- "fmla z29.h, p3/M, z7.h, z10.h\n"
- "ld1h { z10.h }, p2/Z, [x25, x24, LSL #1]\n"
+ "fmla z29.h, p3/M, z3.h, z9.h\n"
+ "ld1h { z13.h }, p1/Z, [x26, x11, LSL #1]\n"
+ "fmla z31.h, p3/M, z3.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x25, x11, LSL #1]\n"
+ "fmla z28.h, p3/M, z5.h, z10.h\n"
+ "ld1h { z3.h }, p3/Z, [x14, #4, MUL VL]\n"
+ "fmla z30.h, p3/M, z5.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x25, x24, LSL #1]\n"
+ "whilelt p2.h, x13, %x[n_channels]\n"
+ "fmla z29.h, p3/M, z7.h, z11.h\n"
+ "ld1h { z5.h }, p3/Z, [x14, #6, MUL VL]\n"
"addvl x25, x25, #1\n"
- "fmla z30.h, p3/M, z3.h, z9.h\n"
- "fmla z31.h, p3/M, z5.h, z10.h\n"
- "ld1h { z13.h }, p1/Z, [x25, x13, LSL #1]\n"
- "fmla z28.h, p3/M, z3.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x23, x13, LSL #1]\n"
- "fmla z29.h, p3/M, z5.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x23, x27, LSL #1]\n"
- "fmla z30.h, p3/M, z7.h, z11.h\n"
- "fmla z31.h, p3/M, z6.h, z11.h\n"
- "ld1h { z3.h }, p3/Z, [x10, #4, MUL VL]\n"
- "ld1h { z5.h }, p3/Z, [x10, #6, MUL VL]\n"
- "fmla z28.h, p3/M, z6.h, z9.h\n"
- "fmla z29.h, p3/M, z8.h, z10.h\n"
- "fmax z28.h, p3/M, z28.h, z17.h\n"
- "fmax z29.h, p3/M, z29.h, z17.h\n"
- "fmla z30.h, p3/M, z8.h, z12.h\n"
- "fmla z31.h, p3/M, z7.h, z12.h\n"
- "fmax z30.h, p3/M, z30.h, z17.h\n"
- "fmax z31.h, p3/M, z31.h, z17.h\n"
- "ld1h { z6.h }, p3/Z, [x10, #7, MUL VL]\n"
- "addvl x10, x10, #16\n"
- "whilelt p2.h, x21, %x[n_channels]\n"
- "ld1h { z9.h }, p1/Z, [x26, x13, LSL #1]\n"
- "cmp x11, %x[n_channels]\n"
- "fmin z28.h, p3/M, z28.h, z16.h\n"
- "ld1h { z10.h }, p1/Z, [x9]\n"
- "ld1h { z11.h }, p1/Z, [x9, x24, LSL #1]\n"
- "fmin z29.h, p3/M, z29.h, z16.h\n"
- "fmin z30.h, p3/M, z30.h, z16.h\n"
- "ld1h { z12.h }, p1/Z, [x26, x27, LSL #1]\n"
- "st1h { z28.h }, p0, [x28]\n"
- "fmin z31.h, p3/M, z31.h, z16.h\n"
- "addvl x23, x23, #1\n"
- "st1h { z29.h }, p0, [x28, x12, LSL #1]\n"
- "ld1h { z7.h }, p3/Z, [x10, #-8, MUL VL]\n"
- "st1h { z30.h }, p0, [x22]\n"
+ "fmla z31.h, p3/M, z6.h, z9.h\n"
+ "ld1h { z9.h }, p1/Z, [x27, x11, LSL #1]\n"
+ "cmp x12, %x[n_channels]\n"
+ "fmla z30.h, p3/M, z8.h, z10.h\n"
+ "ld1h { z10.h }, p1/Z, [x10]\n"
+ "fmla z28.h, p3/M, z6.h, z11.h\n"
+ "ld1h { z11.h }, p1/Z, [x10, x23, LSL #1]\n"
+ "ld1h { z6.h }, p3/Z, [x14, #7, MUL VL]\n"
+ "fmla z29.h, p3/M, z8.h, z12.h\n"
+ "addvl x14, x14, #16\n"
+ "fmax z31.h, p3/M, z31.h, z18.h\n"
+ "ld1h { z8.h }, p3/Z, [x14, #-7, MUL VL]\n"
+ "fmla z28.h, p3/M, z7.h, z12.h\n"
+ "ld1h { z12.h }, p1/Z, [x27, x24, LSL #1]\n"
+ "fmax z30.h, p3/M, z30.h, z18.h\n"
+ "ld1h { z7.h }, p3/Z, [x14, #-8, MUL VL]\n"
+ "addvl x14, x14, #-6\n"
+ "fmax z29.h, p3/M, z29.h, z18.h\n"
+ "fmin z31.h, p3/M, z31.h, z17.h\n"
+ "st1h { z31.h }, p0, [x28]\n"
+ "fmin z30.h, p3/M, z30.h, z17.h\n"
+ "fmin z29.h, p3/M, z29.h, z17.h\n"
+ "st1h { z30.h }, p0, [x28, x9, LSL #1]\n"
+ "fmax z28.h, p3/M, z28.h, z18.h\n"
"addvl x28, x28, #1\n"
- "ld1h { z8.h }, p3/Z, [x10, #-7, MUL VL]\n"
- "addvl x10, x10, #-6\n"
- "st1h { z31.h }, p0, [x22, x12, LSL #1]\n"
+ "fmin z28.h, p3/M, z28.h, z17.h\n"
+ "st1h { z29.h }, p0, [x22]\n"
+ "st1h { z28.h }, p0, [x22, x9, LSL #1]\n"
"addvl x22, x22, #1\n"
"blt 2b\n"
"3:" // Tile loop: Channel tail
- "movprfx z28, z18\n fmla z28.h, p3/M, z4.h, z9.h\n"
- "movprfx z29, z18\n fmla z29.h, p3/M, z3.h, z9.h\n"
- "ldr x14, [%x[params_struct], %[offsetof_args_tile_j]]\n"
- "ldr x10, [%x[params_struct], %[offsetof_args_tile_i]]\n"
- "movprfx z30, z18\n fmla z30.h, p3/M, z1.h, z9.h\n"
- "movprfx z31, z18\n fmla z31.h, p3/M, z0.h, z9.h\n"
- "ld1h { z9.h }, p2/Z, [x23]\n"
- "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
- "fmla z28.h, p3/M, z0.h, z10.h\n"
- "fmla z29.h, p3/M, z2.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x23, x24, LSL #1]\n"
- "ld1h { z10.h }, p2/Z, [x25, x27, LSL #1]\n"
- "fmla z30.h, p3/M, z2.h, z12.h\n"
- "fmla z31.h, p3/M, z1.h, z12.h\n"
- "add x14, x14, #0x1\n"
- "cmp x14, x20\n"
- "fmla z28.h, p3/M, z5.h, z12.h\n"
- "fmla z29.h, p3/M, z4.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x9, x13, LSL #1]\n"
- "add x21, x10, #0x1\n"
- "fmla z30.h, p3/M, z6.h, z9.h\n"
- "fmla z31.h, p3/M, z3.h, z13.h\n"
- "ld1h { z9.h }, p2/Z, [x9, x27, LSL #1]\n"
- "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
- "fmla z28.h, p3/M, z7.h, z13.h\n"
- "fmla z29.h, p3/M, z6.h, z13.h\n"
- "csel x10, x10, x21, LT\n"
+ "movprfx z31, z16\n fmla z31.h, p3/M, z4.h, z9.h\n"
+ "ldr x17, [%x[params_struct], %[offsetof_args_tile_i]]\n"
"mov p0.b, p2.b\n"
- "fmla z30.h, p3/M, z4.h, z13.h\n"
- "fmla z31.h, p3/M, z8.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x26]\n"
- "csel x14, x14, XZR, LT\n"
- "fmla z28.h, p3/M, z1.h, z12.h\n"
- "fmla z29.h, p3/M, z0.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x26, x24, LSL #1]\n"
- "cmp x10, x20\n"
- "fmla z30.h, p3/M, z5.h, z10.h\n"
- "fmla z31.h, p3/M, z4.h, z10.h\n"
- "fmla z28.h, p3/M, z2.h, z9.h\n"
- "fmla z29.h, p3/M, z1.h, z9.h\n"
+ "movprfx z30, z16\n fmla z30.h, p3/M, z3.h, z9.h\n"
+ "ldr x16, [%x[params_struct], %[offsetof_args_tile_j]]\n"
+ "add x21, x17, #0x1\n"
+ "movprfx z29, z16\n fmla z29.h, p3/M, z1.h, z9.h\n"
+ "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
+ "movprfx z28, z16\n fmla z28.h, p3/M, z0.h, z9.h\n"
"ld1h { z9.h }, p2/Z, [x25]\n"
- "fmla z30.h, p3/M, z0.h, z11.h\n"
- "fmla z31.h, p3/M, z2.h, z12.h\n"
- "fmla z28.h, p3/M, z8.h, z10.h\n"
- "fmla z29.h, p3/M, z7.h, z10.h\n"
- "ld1h { z10.h }, p2/Z, [x25, x24, LSL #1]\n"
- "fmla z30.h, p3/M, z3.h, z9.h\n"
- "fmla z31.h, p3/M, z5.h, z10.h\n"
- "fmla z28.h, p3/M, z3.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x23, x13, LSL #1]\n"
- "fmla z29.h, p3/M, z5.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x23, x27, LSL #1]\n"
- "fmla z30.h, p3/M, z7.h, z11.h\n"
- "fmla z31.h, p3/M, z6.h, z11.h\n"
- "fmla z28.h, p3/M, z6.h, z9.h\n"
- "fmla z29.h, p3/M, z8.h, z10.h\n"
- "fmax z28.h, p3/M, z28.h, z17.h\n"
- "fmax z29.h, p3/M, z29.h, z17.h\n"
- "fmla z30.h, p3/M, z8.h, z12.h\n"
- "fmla z31.h, p3/M, z7.h, z12.h\n"
- "fmax z30.h, p3/M, z30.h, z17.h\n"
- "fmax z31.h, p3/M, z31.h, z17.h\n"
- "fmin z28.h, p3/M, z28.h, z16.h\n"
- "fmin z29.h, p3/M, z29.h, z16.h\n"
- "st1h { z28.h }, p0, [x28]\n"
- "fmin z30.h, p3/M, z30.h, z16.h\n"
- "fmin z31.h, p3/M, z31.h, z16.h\n"
- "st1h { z29.h }, p0, [x28, x12, LSL #1]\n"
- "st1h { z30.h }, p0, [x22]\n"
- "st1h { z31.h }, p0, [x22, x12, LSL #1]\n"
+ "add x16, x16, #0x1\n"
+ "fmla z31.h, p3/M, z0.h, z10.h\n"
+ "ld1h { z10.h }, p2/Z, [x26, x24, LSL #1]\n"
+ "fmla z30.h, p3/M, z2.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x25, x23, LSL #1]\n"
+ "ldr x19, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
+ "fmla z29.h, p3/M, z2.h, z12.h\n"
+ "cmp x16, x19\n"
+ "fmla z31.h, p3/M, z5.h, z12.h\n"
+ "fmla z30.h, p3/M, z4.h, z12.h\n"
+ "csel x16, x16, XZR, LT\n"
+ "fmla z28.h, p3/M, z1.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x10, x11, LSL #1]\n"
+ "csel x17, x17, x21, LT\n"
+ "fmla z29.h, p3/M, z6.h, z9.h\n"
+ "ld1h { z9.h }, p2/Z, [x10, x24, LSL #1]\n"
+ "cmp x17, x20\n"
+ "fmla z31.h, p3/M, z7.h, z13.h\n"
+ "fmla z30.h, p3/M, z6.h, z13.h\n"
+ "fmla z28.h, p3/M, z3.h, z13.h\n"
+ "fmla z29.h, p3/M, z4.h, z13.h\n"
+ "fmla z31.h, p3/M, z1.h, z12.h\n"
+ "fmla z30.h, p3/M, z0.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x27, x23, LSL #1]\n"
+ "fmla z28.h, p3/M, z8.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x27]\n"
+ "fmla z29.h, p3/M, z5.h, z10.h\n"
+ "fmla z31.h, p3/M, z2.h, z9.h\n"
+ "fmla z30.h, p3/M, z1.h, z9.h\n"
+ "ld1h { z9.h }, p2/Z, [x26]\n"
+ "fmla z28.h, p3/M, z4.h, z10.h\n"
+ "fmla z29.h, p3/M, z0.h, z11.h\n"
+ "fmla z31.h, p3/M, z8.h, z10.h\n"
+ "fmla z30.h, p3/M, z7.h, z10.h\n"
+ "ld1h { z10.h }, p2/Z, [x26, x23, LSL #1]\n"
+ "fmla z28.h, p3/M, z2.h, z12.h\n"
+ "fmla z29.h, p3/M, z3.h, z9.h\n"
+ "fmla z31.h, p3/M, z3.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x25, x11, LSL #1]\n"
+ "fmla z30.h, p3/M, z5.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x25, x24, LSL #1]\n"
+ "fmla z28.h, p3/M, z5.h, z10.h\n"
+ "fmla z29.h, p3/M, z7.h, z11.h\n"
+ "fmla z31.h, p3/M, z6.h, z9.h\n"
+ "fmla z30.h, p3/M, z8.h, z10.h\n"
+ "fmla z28.h, p3/M, z6.h, z11.h\n"
+ "fmla z29.h, p3/M, z8.h, z12.h\n"
+ "fmax z31.h, p3/M, z31.h, z18.h\n"
+ "fmax z30.h, p3/M, z30.h, z18.h\n"
+ "fmla z28.h, p3/M, z7.h, z12.h\n"
+ "fmax z29.h, p3/M, z29.h, z18.h\n"
+ "fmin z31.h, p3/M, z31.h, z17.h\n"
+ "st1h { z31.h }, p0, [x28]\n"
+ "fmin z30.h, p3/M, z30.h, z17.h\n"
+ "fmin z29.h, p3/M, z29.h, z17.h\n"
+ "st1h { z30.h }, p0, [x28, x9, LSL #1]\n"
+ "fmax z28.h, p3/M, z28.h, z18.h\n"
+ "st1h { z29.h }, p0, [x22]\n"
+ "fmin z28.h, p3/M, z28.h, z17.h\n"
+ "st1h { z28.h }, p0, [x22, x9, LSL #1]\n"
"blt 1b\n"
:
: [n_channels] "r" ((unsigned long) n_channels), [offsetof_args_inptr] "I" (offsetof(Args, inptr)), [offsetof_args_ld_input_col] "I" (offsetof(Args, ld_input_col)), [offsetof_args_ld_input_row] "I" (offsetof(Args, ld_input_row)), [offsetof_args_ld_output_col] "I" (offsetof(Args, ld_output_col)), [offsetof_args_ld_output_row] "I" (offsetof(Args, ld_output_row)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_n_tile_cols] "I" (offsetof(Args, n_tile_cols)), [offsetof_args_n_tile_rows] "I" (offsetof(Args, n_tile_rows)), [offsetof_args_outptr] "I" (offsetof(Args, outptr)), [offsetof_args_params] "I" (offsetof(Args, params)), [offsetof_args_tile_i] "I" (offsetof(Args, tile_i)), [offsetof_args_tile_j] "I" (offsetof(Args, tile_j)), [params_struct] "r" (&params_struct)
- : "cc", "memory", "p0", "p1", "p2", "p3", "x9", "x10", "x11", "x12", "x13", "x14", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z16", "z17", "z18", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z16", "z17", "z18", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_indirect.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_indirect.cpp
index 9242b470c3..7cca6fbcbf 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_indirect.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_indirect.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -78,215 +78,215 @@ void sve_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst_indirect_impl(
activation_min, activation_max);
__asm__ __volatile__(
+ "ldr x19, [%x[params_struct], %[offsetof_args_outptrs]]\n"
"ptrue p3.b\n"
- "ldr x20, [%x[params_struct], %[offsetof_args_outptrs]]\n"
- "ldr x16, [%x[params_struct], %[offsetof_args_params]]\n"
- "add x15, %x[params_struct], %[offsetof_Args_inptrs]\n"
- "cnth x14\n"
- "ldp x13, x12, [x20, #0x0]\n"
- "ldp x11, x10, [x20, #0x10]\n"
- "mov x9, #0x0\n"
+ "ldr x15, [%x[params_struct], %[offsetof_args_params]]\n"
+ "add x14, %x[params_struct], %[offsetof_Args_inptrs]\n"
+ "ld1rh { z18.h }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
+ "mov x13, #0x0\n"
+ "ld1rh { z17.h }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
+ "cnth x12\n"
+ "ldp x11, x10, [x19, #0x0]\n"
+ "sub x9, XZR, x12\n"
+ "ldp x28, x27, [x19, #0x10]\n"
"whilelt p2.h, XZR, %x[n_channels]\n"
- "ld1h { z18.h }, p3/Z, [x16]\n"
- "ld1h { z0.h }, p3/Z, [x16, #1, MUL VL]\n"
- "cmp x14, %x[n_channels]\n"
- "ld1h { z1.h }, p3/Z, [x16, #2, MUL VL]\n"
- "ld1h { z2.h }, p3/Z, [x16, #3, MUL VL]\n"
- "sub x28, XZR, x14\n"
- "ld1h { z3.h }, p3/Z, [x16, #4, MUL VL]\n"
- "ld1h { z4.h }, p3/Z, [x16, #5, MUL VL]\n"
- "ld1h { z5.h }, p3/Z, [x16, #6, MUL VL]\n"
- "ld1h { z6.h }, p3/Z, [x16, #7, MUL VL]\n"
- "addvl x16, x16, #16\n"
- "ldp x27, x26, [x15, #0x0]\n"
- "ldp x25, x24, [x15, #0x10]\n"
- "ldr x23, [x15, #0x20]\n"
- "ld1rh { z17.h }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
- "ld1rh { z16.h }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
- "ld1h { z7.h }, p3/Z, [x16, #-8, MUL VL]\n"
- "ld1h { z8.h }, p3/Z, [x16, #-7, MUL VL]\n"
- "ld1h { z9.h }, p2/Z, [x27, x9, LSL #1]\n"
- "addvl x16, x16, #-6\n"
- "ld1h { z10.h }, p2/Z, [x26, x9, LSL #1]\n"
- "ld1h { z11.h }, p2/Z, [x25, x9, LSL #1]\n"
- "ld1h { z12.h }, p2/Z, [x24, x9, LSL #1]\n"
- "ld1h { z13.h }, p2/Z, [x23, x9, LSL #1]\n"
+ "ld1h { z16.h }, p3/Z, [x15]\n"
+ "cmp x12, %x[n_channels]\n"
+ "ld1h { z0.h }, p3/Z, [x15, #1, MUL VL]\n"
+ "ld1h { z1.h }, p3/Z, [x15, #2, MUL VL]\n"
+ "ld1h { z2.h }, p3/Z, [x15, #3, MUL VL]\n"
+ "ld1h { z3.h }, p3/Z, [x15, #4, MUL VL]\n"
+ "ld1h { z4.h }, p3/Z, [x15, #5, MUL VL]\n"
+ "ld1h { z5.h }, p3/Z, [x15, #6, MUL VL]\n"
+ "ld1h { z6.h }, p3/Z, [x15, #7, MUL VL]\n"
+ "addvl x15, x15, #16\n"
+ "ldp x26, x25, [x14, #0x0]\n"
+ "ld1h { z7.h }, p3/Z, [x15, #-8, MUL VL]\n"
+ "ld1h { z8.h }, p3/Z, [x15, #-7, MUL VL]\n"
+ "addvl x15, x15, #-6\n"
+ "ld1h { z9.h }, p2/Z, [x26, x13, LSL #1]\n"
+ "ld1h { z10.h }, p2/Z, [x25, x13, LSL #1]\n"
+ "ldp x24, x23, [x14, #0x10]\n"
+ "ldr x22, [x14, #0x20]\n"
+ "ld1h { z11.h }, p2/Z, [x24, x13, LSL #1]\n"
+ "ld1h { z12.h }, p2/Z, [x23, x13, LSL #1]\n"
+ "ld1h { z13.h }, p2/Z, [x22, x13, LSL #1]\n"
"bge 2f\n"
"1:" // Channel loop
- "movprfx z28, z18\n fmla z28.h, p3/M, z4.h, z9.h\n"
- "movprfx z29, z18\n fmla z29.h, p3/M, z3.h, z9.h\n"
- "ldr x22, [x15, #0x28]\n"
- "ldr x21, [x15, #0x30]\n"
- "movprfx z30, z18\n fmla z30.h, p3/M, z1.h, z9.h\n"
- "movprfx z31, z18\n fmla z31.h, p3/M, z0.h, z9.h\n"
- "ld1h { z9.h }, p2/Z, [x22, x9, LSL #1]\n"
- "ldr x20, [x15, #0x38]\n"
- "fmla z28.h, p3/M, z0.h, z10.h\n"
- "fmla z29.h, p3/M, z2.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x21, x9, LSL #1]\n"
- "ldr x26, [x15, #0x48]\n"
- "fmla z30.h, p3/M, z2.h, z12.h\n"
- "fmla z31.h, p3/M, z1.h, z12.h\n"
- "ldr x27, [x15, #0x40]\n"
- "ld1h { z10.h }, p2/Z, [x26, x9, LSL #1]\n"
- "fmla z28.h, p3/M, z5.h, z12.h\n"
- "fmla z29.h, p3/M, z4.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x20, x9, LSL #1]\n"
- "ldr x25, [x15, #0x50]\n"
- "fmla z30.h, p3/M, z6.h, z9.h\n"
- "fmla z31.h, p3/M, z3.h, z13.h\n"
- "ld1h { z9.h }, p2/Z, [x27, x9, LSL #1]\n"
- "ldr x24, [x15, #0x58]\n"
- "fmla z28.h, p3/M, z7.h, z13.h\n"
- "fmla z29.h, p3/M, z6.h, z13.h\n"
- "ldr x23, [x15, #0x60]\n"
- "ldr x22, [x15, #0x68]\n"
- "fmla z30.h, p3/M, z4.h, z13.h\n"
- "fmla z31.h, p3/M, z8.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x25, x9, LSL #1]\n"
- "ldr x21, [x15, #0x70]\n"
- "fmla z28.h, p3/M, z1.h, z12.h\n"
- "fmla z29.h, p3/M, z0.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x24, x9, LSL #1]\n"
- "ldr x20, [x15, #0x78]\n"
- "fmla z30.h, p3/M, z5.h, z10.h\n"
- "fmla z31.h, p3/M, z4.h, z10.h\n"
- "whilelt p1.h, x14, %x[n_channels]\n"
- "ldp x27, x26, [x15, #0x0]\n"
- "fmla z28.h, p3/M, z2.h, z9.h\n"
- "fmla z29.h, p3/M, z1.h, z9.h\n"
- "ld1h { z9.h }, p2/Z, [x23, x9, LSL #1]\n"
- "ldp x25, x24, [x15, #0x10]\n"
- "fmla z30.h, p3/M, z0.h, z11.h\n"
- "fmla z31.h, p3/M, z2.h, z12.h\n"
- "ldr x23, [x15, #0x20]\n"
- "ld1h { z13.h }, p1/Z, [x23, x14, LSL #1]\n"
- "fmla z28.h, p3/M, z8.h, z10.h\n"
- "fmla z29.h, p3/M, z7.h, z10.h\n"
- "ld1h { z10.h }, p2/Z, [x22, x9, LSL #1]\n"
- "inch x28\n"
- "fmla z30.h, p3/M, z3.h, z9.h\n"
- "fmla z31.h, p3/M, z5.h, z10.h\n"
- "mov p0.b, p2.b\n"
- "ld1h { z18.h }, p3/Z, [x16]\n"
- "fmla z28.h, p3/M, z3.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x21, x9, LSL #1]\n"
- "fmla z29.h, p3/M, z5.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x20, x9, LSL #1]\n"
- "fmla z30.h, p3/M, z7.h, z11.h\n"
- "fmla z31.h, p3/M, z6.h, z11.h\n"
+ "movprfx z31, z16\n fmla z31.h, p3/M, z4.h, z9.h\n"
+ "ldr x21, [x14, #0x28]\n"
+ "whilelt p1.h, x12, %x[n_channels]\n"
+ "movprfx z30, z16\n fmla z30.h, p3/M, z3.h, z9.h\n"
+ "ldr x20, [x14, #0x30]\n"
"inch x9\n"
- "ld1h { z11.h }, p1/Z, [x25, x14, LSL #1]\n"
- "fmla z28.h, p3/M, z6.h, z9.h\n"
- "fmla z29.h, p3/M, z8.h, z10.h\n"
- "ld1h { z9.h }, p1/Z, [x27, x14, LSL #1]\n"
- "ld1h { z10.h }, p1/Z, [x26, x14, LSL #1]\n"
- "fmla z30.h, p3/M, z8.h, z12.h\n"
- "fmla z31.h, p3/M, z7.h, z12.h\n"
- "ld1h { z12.h }, p1/Z, [x24, x14, LSL #1]\n"
- "inch x14\n"
- "fmax z28.h, p3/M, z28.h, z17.h\n"
- "fmax z29.h, p3/M, z29.h, z17.h\n"
- "ld1h { z0.h }, p3/Z, [x16, #1, MUL VL]\n"
- "ld1h { z1.h }, p3/Z, [x16, #2, MUL VL]\n"
- "fmax z30.h, p3/M, z30.h, z17.h\n"
- "fmax z31.h, p3/M, z31.h, z17.h\n"
- "ld1h { z2.h }, p3/Z, [x16, #3, MUL VL]\n"
- "ld1h { z3.h }, p3/Z, [x16, #4, MUL VL]\n"
- "ld1h { z4.h }, p3/Z, [x16, #5, MUL VL]\n"
- "ld1h { z5.h }, p3/Z, [x16, #6, MUL VL]\n"
- "whilelt p2.h, x9, %x[n_channels]\n"
- "cmp x14, %x[n_channels]\n"
- "ld1h { z6.h }, p3/Z, [x16, #7, MUL VL]\n"
- "addvl x16, x16, #16\n"
- "fmin z28.h, p3/M, z28.h, z16.h\n"
- "st1h { z28.h }, p0, [x13, x28, LSL #1]\n"
- "fmin z29.h, p3/M, z29.h, z16.h\n"
- "fmin z30.h, p3/M, z30.h, z16.h\n"
- "st1h { z29.h }, p0, [x12, x28, LSL #1]\n"
- "ld1h { z7.h }, p3/Z, [x16, #-8, MUL VL]\n"
- "fmin z31.h, p3/M, z31.h, z16.h\n"
- "st1h { z30.h }, p0, [x11, x28, LSL #1]\n"
- "ld1h { z8.h }, p3/Z, [x16, #-7, MUL VL]\n"
- "addvl x16, x16, #-6\n"
- "st1h { z31.h }, p0, [x10, x28, LSL #1]\n"
+ "movprfx z29, z16\n fmla z29.h, p3/M, z1.h, z9.h\n"
+ "ldr x19, [x14, #0x38]\n"
+ "mov p0.b, p2.b\n"
+ "movprfx z28, z16\n fmla z28.h, p3/M, z0.h, z9.h\n"
+ "ld1h { z9.h }, p2/Z, [x21, x13, LSL #1]\n"
+ "ldr x26, [x14, #0x40]\n"
+ "fmla z31.h, p3/M, z0.h, z10.h\n"
+ "ldr x25, [x14, #0x48]\n"
+ "fmla z30.h, p3/M, z2.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x20, x13, LSL #1]\n"
+ "fmla z29.h, p3/M, z2.h, z12.h\n"
+ "ldr x24, [x14, #0x50]\n"
+ "fmla z28.h, p3/M, z1.h, z12.h\n"
+ "ld1h { z10.h }, p2/Z, [x25, x13, LSL #1]\n"
+ "fmla z31.h, p3/M, z5.h, z12.h\n"
+ "ldr x23, [x14, #0x58]\n"
+ "fmla z30.h, p3/M, z4.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x19, x13, LSL #1]\n"
+ "fmla z29.h, p3/M, z6.h, z9.h\n"
+ "ld1h { z9.h }, p2/Z, [x26, x13, LSL #1]\n"
+ "fmla z28.h, p3/M, z3.h, z13.h\n"
+ "ldr x22, [x14, #0x60]\n"
+ "fmla z31.h, p3/M, z7.h, z13.h\n"
+ "ldr x21, [x14, #0x68]\n"
+ "fmla z30.h, p3/M, z6.h, z13.h\n"
+ "ldr x20, [x14, #0x70]\n"
+ "fmla z29.h, p3/M, z4.h, z13.h\n"
+ "ldr x19, [x14, #0x78]\n"
+ "fmla z28.h, p3/M, z8.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x24, x13, LSL #1]\n"
+ "fmla z31.h, p3/M, z1.h, z12.h\n"
+ "ldp x26, x25, [x14, #0x0]\n"
+ "fmla z30.h, p3/M, z0.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x23, x13, LSL #1]\n"
+ "fmla z29.h, p3/M, z5.h, z10.h\n"
+ "ldp x24, x23, [x14, #0x10]\n"
+ "fmla z28.h, p3/M, z4.h, z10.h\n"
+ "ld1h { z16.h }, p3/Z, [x15]\n"
+ "fmla z31.h, p3/M, z2.h, z9.h\n"
+ "ld1h { z4.h }, p3/Z, [x15, #5, MUL VL]\n"
+ "fmla z30.h, p3/M, z1.h, z9.h\n"
+ "ld1h { z9.h }, p2/Z, [x22, x13, LSL #1]\n"
+ "fmla z29.h, p3/M, z0.h, z11.h\n"
+ "ldr x22, [x14, #0x20]\n"
+ "fmla z28.h, p3/M, z2.h, z12.h\n"
+ "ld1h { z0.h }, p3/Z, [x15, #1, MUL VL]\n"
+ "fmla z31.h, p3/M, z8.h, z10.h\n"
+ "ld1h { z1.h }, p3/Z, [x15, #2, MUL VL]\n"
+ "fmla z30.h, p3/M, z7.h, z10.h\n"
+ "ld1h { z10.h }, p2/Z, [x21, x13, LSL #1]\n"
+ "fmla z29.h, p3/M, z3.h, z9.h\n"
+ "ld1h { z13.h }, p1/Z, [x22, x12, LSL #1]\n"
+ "fmla z31.h, p3/M, z3.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x20, x13, LSL #1]\n"
+ "fmla z28.h, p3/M, z5.h, z10.h\n"
+ "ld1h { z2.h }, p3/Z, [x15, #3, MUL VL]\n"
+ "fmla z30.h, p3/M, z5.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x19, x13, LSL #1]\n"
+ "inch x13\n"
+ "fmla z29.h, p3/M, z7.h, z11.h\n"
+ "ld1h { z3.h }, p3/Z, [x15, #4, MUL VL]\n"
+ "whilelt p2.h, x13, %x[n_channels]\n"
+ "fmla z31.h, p3/M, z6.h, z9.h\n"
+ "ld1h { z9.h }, p1/Z, [x26, x12, LSL #1]\n"
+ "fmla z28.h, p3/M, z6.h, z11.h\n"
+ "ld1h { z11.h }, p1/Z, [x24, x12, LSL #1]\n"
+ "fmla z30.h, p3/M, z8.h, z10.h\n"
+ "ld1h { z10.h }, p1/Z, [x25, x12, LSL #1]\n"
+ "ld1h { z5.h }, p3/Z, [x15, #6, MUL VL]\n"
+ "fmla z29.h, p3/M, z8.h, z12.h\n"
+ "ld1h { z6.h }, p3/Z, [x15, #7, MUL VL]\n"
+ "fmla z28.h, p3/M, z7.h, z12.h\n"
+ "addvl x15, x15, #16\n"
+ "fmax z31.h, p3/M, z31.h, z18.h\n"
+ "ld1h { z12.h }, p1/Z, [x23, x12, LSL #1]\n"
+ "inch x12\n"
+ "fmax z30.h, p3/M, z30.h, z18.h\n"
+ "ld1h { z7.h }, p3/Z, [x15, #-8, MUL VL]\n"
+ "cmp x12, %x[n_channels]\n"
+ "fmax z29.h, p3/M, z29.h, z18.h\n"
+ "ld1h { z8.h }, p3/Z, [x15, #-7, MUL VL]\n"
+ "addvl x15, x15, #-6\n"
+ "fmax z28.h, p3/M, z28.h, z18.h\n"
+ "fmin z31.h, p3/M, z31.h, z17.h\n"
+ "st1h { z31.h }, p0, [x11, x9, LSL #1]\n"
+ "fmin z30.h, p3/M, z30.h, z17.h\n"
+ "fmin z29.h, p3/M, z29.h, z17.h\n"
+ "st1h { z30.h }, p0, [x10, x9, LSL #1]\n"
+ "fmin z28.h, p3/M, z28.h, z17.h\n"
+ "st1h { z29.h }, p0, [x28, x9, LSL #1]\n"
+ "st1h { z28.h }, p0, [x27, x9, LSL #1]\n"
"blt 1b\n"
"2:" // Channel tail
- "movprfx z28, z18\n fmla z28.h, p3/M, z4.h, z9.h\n"
- "movprfx z29, z18\n fmla z29.h, p3/M, z3.h, z9.h\n"
- "ldr x22, [x15, #0x28]\n"
- "ldr x21, [x15, #0x30]\n"
- "movprfx z30, z18\n fmla z30.h, p3/M, z1.h, z9.h\n"
- "movprfx z31, z18\n fmla z31.h, p3/M, z0.h, z9.h\n"
- "ld1h { z9.h }, p2/Z, [x22, x9, LSL #1]\n"
- "ldr x20, [x15, #0x38]\n"
- "fmla z28.h, p3/M, z0.h, z10.h\n"
- "fmla z29.h, p3/M, z2.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x21, x9, LSL #1]\n"
- "ldr x26, [x15, #0x48]\n"
- "fmla z30.h, p3/M, z2.h, z12.h\n"
- "fmla z31.h, p3/M, z1.h, z12.h\n"
- "ldr x27, [x15, #0x40]\n"
- "ld1h { z10.h }, p2/Z, [x26, x9, LSL #1]\n"
- "fmla z28.h, p3/M, z5.h, z12.h\n"
- "fmla z29.h, p3/M, z4.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x20, x9, LSL #1]\n"
- "ldr x25, [x15, #0x50]\n"
- "fmla z30.h, p3/M, z6.h, z9.h\n"
- "fmla z31.h, p3/M, z3.h, z13.h\n"
- "ld1h { z9.h }, p2/Z, [x27, x9, LSL #1]\n"
- "ldr x24, [x15, #0x58]\n"
- "fmla z28.h, p3/M, z7.h, z13.h\n"
- "fmla z29.h, p3/M, z6.h, z13.h\n"
- "ldr x23, [x15, #0x60]\n"
- "ldr x22, [x15, #0x68]\n"
- "fmla z30.h, p3/M, z4.h, z13.h\n"
- "fmla z31.h, p3/M, z8.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x25, x9, LSL #1]\n"
- "ldr x21, [x15, #0x70]\n"
- "fmla z28.h, p3/M, z1.h, z12.h\n"
- "fmla z29.h, p3/M, z0.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x24, x9, LSL #1]\n"
- "ldr x20, [x15, #0x78]\n"
- "fmla z30.h, p3/M, z5.h, z10.h\n"
- "fmla z31.h, p3/M, z4.h, z10.h\n"
- "inch x28\n"
+ "movprfx z31, z16\n fmla z31.h, p3/M, z4.h, z9.h\n"
+ "ldr x21, [x14, #0x28]\n"
+ "inch x9\n"
+ "movprfx z30, z16\n fmla z30.h, p3/M, z3.h, z9.h\n"
+ "ldr x20, [x14, #0x30]\n"
"mov p0.b, p2.b\n"
- "fmla z28.h, p3/M, z2.h, z9.h\n"
- "fmla z29.h, p3/M, z1.h, z9.h\n"
- "ld1h { z9.h }, p2/Z, [x23, x9, LSL #1]\n"
- "fmla z30.h, p3/M, z0.h, z11.h\n"
- "fmla z31.h, p3/M, z2.h, z12.h\n"
- "fmla z28.h, p3/M, z8.h, z10.h\n"
- "fmla z29.h, p3/M, z7.h, z10.h\n"
- "ld1h { z10.h }, p2/Z, [x22, x9, LSL #1]\n"
- "fmla z30.h, p3/M, z3.h, z9.h\n"
- "fmla z31.h, p3/M, z5.h, z10.h\n"
- "fmla z28.h, p3/M, z3.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x21, x9, LSL #1]\n"
- "fmla z29.h, p3/M, z5.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x20, x9, LSL #1]\n"
- "fmla z30.h, p3/M, z7.h, z11.h\n"
- "fmla z31.h, p3/M, z6.h, z11.h\n"
- "fmla z28.h, p3/M, z6.h, z9.h\n"
- "fmla z29.h, p3/M, z8.h, z10.h\n"
- "fmax z28.h, p3/M, z28.h, z17.h\n"
- "fmax z29.h, p3/M, z29.h, z17.h\n"
- "fmla z30.h, p3/M, z8.h, z12.h\n"
- "fmla z31.h, p3/M, z7.h, z12.h\n"
- "fmax z30.h, p3/M, z30.h, z17.h\n"
- "fmax z31.h, p3/M, z31.h, z17.h\n"
- "fmin z28.h, p3/M, z28.h, z16.h\n"
- "fmin z29.h, p3/M, z29.h, z16.h\n"
- "st1h { z28.h }, p0, [x13, x28, LSL #1]\n"
- "fmin z30.h, p3/M, z30.h, z16.h\n"
- "fmin z31.h, p3/M, z31.h, z16.h\n"
- "st1h { z29.h }, p0, [x12, x28, LSL #1]\n"
- "st1h { z30.h }, p0, [x11, x28, LSL #1]\n"
- "st1h { z31.h }, p0, [x10, x28, LSL #1]\n"
+ "movprfx z29, z16\n fmla z29.h, p3/M, z1.h, z9.h\n"
+ "ldr x19, [x14, #0x38]\n"
+ "movprfx z28, z16\n fmla z28.h, p3/M, z0.h, z9.h\n"
+ "ld1h { z9.h }, p2/Z, [x21, x13, LSL #1]\n"
+ "ldr x26, [x14, #0x40]\n"
+ "fmla z31.h, p3/M, z0.h, z10.h\n"
+ "ldr x25, [x14, #0x48]\n"
+ "fmla z30.h, p3/M, z2.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x20, x13, LSL #1]\n"
+ "fmla z29.h, p3/M, z2.h, z12.h\n"
+ "fmla z28.h, p3/M, z1.h, z12.h\n"
+ "ld1h { z10.h }, p2/Z, [x25, x13, LSL #1]\n"
+ "ldr x24, [x14, #0x50]\n"
+ "fmla z31.h, p3/M, z5.h, z12.h\n"
+ "ldr x23, [x14, #0x58]\n"
+ "fmla z30.h, p3/M, z4.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x19, x13, LSL #1]\n"
+ "fmla z29.h, p3/M, z6.h, z9.h\n"
+ "fmla z28.h, p3/M, z3.h, z13.h\n"
+ "ld1h { z9.h }, p2/Z, [x26, x13, LSL #1]\n"
+ "ldr x22, [x14, #0x60]\n"
+ "fmla z31.h, p3/M, z7.h, z13.h\n"
+ "ldr x21, [x14, #0x68]\n"
+ "fmla z30.h, p3/M, z6.h, z13.h\n"
+ "ldr x20, [x14, #0x70]\n"
+ "fmla z29.h, p3/M, z4.h, z13.h\n"
+ "fmla z28.h, p3/M, z8.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x24, x13, LSL #1]\n"
+ "ldr x19, [x14, #0x78]\n"
+ "fmla z31.h, p3/M, z1.h, z12.h\n"
+ "fmla z30.h, p3/M, z0.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x23, x13, LSL #1]\n"
+ "fmla z29.h, p3/M, z5.h, z10.h\n"
+ "fmla z28.h, p3/M, z4.h, z10.h\n"
+ "fmla z31.h, p3/M, z2.h, z9.h\n"
+ "fmla z30.h, p3/M, z1.h, z9.h\n"
+ "ld1h { z9.h }, p2/Z, [x22, x13, LSL #1]\n"
+ "fmla z29.h, p3/M, z0.h, z11.h\n"
+ "fmla z28.h, p3/M, z2.h, z12.h\n"
+ "fmla z31.h, p3/M, z8.h, z10.h\n"
+ "fmla z30.h, p3/M, z7.h, z10.h\n"
+ "ld1h { z10.h }, p2/Z, [x21, x13, LSL #1]\n"
+ "fmla z29.h, p3/M, z3.h, z9.h\n"
+ "fmla z31.h, p3/M, z3.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x20, x13, LSL #1]\n"
+ "fmla z28.h, p3/M, z5.h, z10.h\n"
+ "fmla z30.h, p3/M, z5.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x19, x13, LSL #1]\n"
+ "fmla z29.h, p3/M, z7.h, z11.h\n"
+ "fmla z31.h, p3/M, z6.h, z9.h\n"
+ "fmla z28.h, p3/M, z6.h, z11.h\n"
+ "fmla z30.h, p3/M, z8.h, z10.h\n"
+ "fmla z29.h, p3/M, z8.h, z12.h\n"
+ "fmla z28.h, p3/M, z7.h, z12.h\n"
+ "fmax z31.h, p3/M, z31.h, z18.h\n"
+ "fmax z30.h, p3/M, z30.h, z18.h\n"
+ "fmax z29.h, p3/M, z29.h, z18.h\n"
+ "fmin z31.h, p3/M, z31.h, z17.h\n"
+ "st1h { z31.h }, p0, [x11, x9, LSL #1]\n"
+ "fmin z30.h, p3/M, z30.h, z17.h\n"
+ "fmin z29.h, p3/M, z29.h, z17.h\n"
+ "st1h { z30.h }, p0, [x10, x9, LSL #1]\n"
+ "fmax z28.h, p3/M, z28.h, z18.h\n"
+ "st1h { z29.h }, p0, [x28, x9, LSL #1]\n"
+ "fmin z28.h, p3/M, z28.h, z17.h\n"
+ "st1h { z28.h }, p0, [x27, x9, LSL #1]\n"
:
: [n_channels] "r" ((unsigned long) n_channels), [offsetof_Args_inptrs] "I" (offsetof(Args, inptrs)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_args_params] "I" (offsetof(Args, params)), [params_struct] "r" (&params_struct)
- : "cc", "memory", "p0", "p1", "p2", "p3", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z16", "z17", "z18", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z16", "z17", "z18", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_direct.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_direct.cpp
index d2dae84089..4126cefa34 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_direct.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_direct.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -88,369 +88,369 @@ void sve_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst_direct_impl(
__asm__ __volatile__(
"ptrue p3.b\n"
- "mov x13, #0x0\n"
- "mov x8, #0x0\n"
+ "mov x6, #0x0\n"
+ "mov x7, #0x0\n"
"1:" // Tile loop
- "str x13, [%x[params_struct], %[offsetof_args_tile_i]]\n"
- "mov x25, #0x3\n"
+ "str x6, [%x[params_struct], %[offsetof_args_tile_i]]\n"
"mov x24, #0x3\n"
- "str x8, [%x[params_struct], %[offsetof_args_tile_j]]\n"
- "ldr x23, [%x[params_struct], %[offsetof_args_ld_input_row]]\n"
- "ldr x17, [%x[params_struct], %[offsetof_args_ld_input_col]]\n"
- "mul x22, x13, x23\n" // offset = tile_i * ld_input_row
- "ldr x21, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
- "madd x22, x8, x17, x22\n" // offset += tile_j * ld_input_col
- "ldr x16, [%x[params_struct], %[offsetof_args_ld_output_col]]\n"
- "cnth x15\n"
- "mul x20, x13, x21\n" // offset = tile_i * ld_output_row
+ "str x7, [%x[params_struct], %[offsetof_args_tile_j]]\n"
+ "mov x23, #0x3\n"
+ "ldr x8, [%x[params_struct], %[offsetof_args_params]]\n"
+ "mov x17, #0x0\n"
+ "ldr x22, [%x[params_struct], %[offsetof_args_ld_input_row]]\n"
+ "cnth x16\n"
+ "ldr x15, [%x[params_struct], %[offsetof_args_ld_input_col]]\n"
+ "sub x21, XZR, x16\n"
"ldr x14, [%x[params_struct], %[offsetof_args_inptr]]\n"
- "ldr x13, [%x[params_struct], %[offsetof_args_params]]\n"
- "add x12, x17, x17\n"
- "mul x22, x22, x25\n" // offset *= kernel_stride * output_size
- "add x14, x14, x22, LSL #1\n" // inptr[0] += offset * sizeof(__fp16)
- "ldr x11, [%x[params_struct], %[offsetof_args_outptr]]\n"
- "add x10, x14, x23, LSL #1\n"
- "madd x20, x8, x16, x20\n" // offset += tile_j * ld_output_col
- "add x9, x10, x23, LSL #1\n"
+ "mul x19, x6, x22\n" // offset = tile_i * ld_input_row
+ "ldr x20, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
+ "madd x19, x7, x15, x19\n" // offset += tile_j * ld_input_col
+ "ldr x13, [%x[params_struct], %[offsetof_args_ld_output_col]]\n"
+ "mul x19, x19, x24\n" // offset *= kernel_stride * output_size
+ "ldr x12, [%x[params_struct], %[offsetof_args_outptr]]\n"
+ "add x14, x14, x19, LSL #1\n" // inptr[0] += offset * sizeof(__fp16)
+ "ld1rh { z18.h }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
+ "add x11, x14, x22, LSL #1\n"
+ "ld1rh { z17.h }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
+ "add x10, x11, x22, LSL #1\n"
+ "ld1h { z16.h }, p3/Z, [x8]\n"
+ "add x9, x10, x22, LSL #1\n"
+ "ld1h { z0.h }, p3/Z, [x8, #1, MUL VL]\n"
+ "add x28, x9, x22, LSL #1\n"
+ "ld1h { z1.h }, p3/Z, [x8, #2, MUL VL]\n"
+ "add x27, x15, x15\n"
+ "ld1h { z2.h }, p3/Z, [x8, #3, MUL VL]\n"
+ "add x26, x27, x15\n"
+ "ld1h { z3.h }, p3/Z, [x8, #4, MUL VL]\n"
+ "add x25, x26, x15\n"
+ "ld1h { z4.h }, p3/Z, [x8, #5, MUL VL]\n"
+ "mul x19, x6, x20\n" // offset = tile_i * ld_output_row
+ "ld1h { z5.h }, p3/Z, [x8, #6, MUL VL]\n"
+ "madd x19, x7, x13, x19\n" // offset += tile_j * ld_output_col
+ "ld1h { z6.h }, p3/Z, [x8, #7, MUL VL]\n"
+ "mul x19, x19, x23\n" // offset *= output_tile_size
+ "add x24, x13, x13\n"
+ "add x12, x12, x19, LSL #1\n" // outptrs[0] += offset * sizeof(__fp16)
+ "add x23, x12, x20, LSL #1\n"
+ "add x22, x23, x20, LSL #1\n"
"whilelt p2.h, XZR, %x[n_channels]\n"
- "ld1h { z18.h }, p3/Z, [x13]\n"
- "mul x20, x20, x24\n" // offset *= output_tile_size
- "ld1h { z0.h }, p3/Z, [x13, #1, MUL VL]\n"
- "ld1h { z1.h }, p3/Z, [x13, #2, MUL VL]\n"
- "add x28, x9, x23, LSL #1\n"
- "ld1h { z2.h }, p3/Z, [x13, #3, MUL VL]\n"
- "ld1h { z3.h }, p3/Z, [x13, #4, MUL VL]\n"
- "add x27, x12, x17\n"
- "add x11, x11, x20, LSL #1\n" // outptrs[0] += offset * sizeof(__fp16)
- "ld1h { z4.h }, p3/Z, [x13, #5, MUL VL]\n"
- "ld1h { z5.h }, p3/Z, [x13, #6, MUL VL]\n"
- "add x26, x28, x23, LSL #1\n"
- "add x25, x27, x17\n"
- "ld1h { z6.h }, p3/Z, [x13, #7, MUL VL]\n"
- "addvl x13, x13, #16\n"
- "add x24, x11, x21, LSL #1\n"
- "ld1rh { z17.h }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
- "cmp x15, %x[n_channels]\n"
- "add x23, x24, x21, LSL #1\n"
- "ld1rh { z16.h }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
- "ld1h { z7.h }, p3/Z, [x13, #-8, MUL VL]\n"
- "add x22, x16, x16\n"
- "mov x21, #0x0\n"
- "ld1h { z8.h }, p3/Z, [x13, #-7, MUL VL]\n"
- "ld1h { z9.h }, p2/Z, [x9, x12, LSL #1]\n"
- "sub x20, XZR, x15\n"
+ "ld1h { z9.h }, p2/Z, [x10, x27, LSL #1]\n"
"ld1h { z10.h }, p2/Z, [x14]\n"
+ "addvl x8, x8, #16\n"
"ld1h { z11.h }, p2/Z, [x14, x25, LSL #1]\n"
- "addvl x13, x13, #-6\n"
- "ld1h { z12.h }, p2/Z, [x26]\n"
- "ld1h { z13.h }, p2/Z, [x10, x12, LSL #1]\n"
+ "cmp x16, %x[n_channels]\n"
+ "ld1h { z7.h }, p3/Z, [x8, #-8, MUL VL]\n"
+ "ld1h { z8.h }, p3/Z, [x8, #-7, MUL VL]\n"
+ "addvl x8, x8, #-6\n"
+ "ld1h { z12.h }, p2/Z, [x28]\n"
+ "ld1h { z13.h }, p2/Z, [x11, x27, LSL #1]\n"
"bge 3f\n"
"2:" // Tile loop: Channel loop
- "movprfx z24, z18\n fmla z24.h, p3/M, z7.h, z9.h\n"
- "movprfx z23, z18\n fmla z23.h, p3/M, z8.h, z9.h\n"
- "whilelt p1.h, x15, %x[n_channels]\n"
+ "movprfx z31, z16\n fmla z31.h, p3/M, z8.h, z9.h\n"
+ "whilelt p1.h, x16, %x[n_channels]\n"
+ "movprfx z30, z16\n fmla z30.h, p3/M, z7.h, z9.h\n"
"inch x21\n"
- "movprfx z25, z18\n fmla z25.h, p3/M, z6.h, z9.h\n"
- "fmla z24.h, p3/M, z4.h, z13.h\n"
- "inch x15\n"
+ "movprfx z29, z16\n fmla z29.h, p3/M, z6.h, z9.h\n"
"mov p0.b, p2.b\n"
- "movprfx z26, z18\n fmla z26.h, p3/M, z5.h, z9.h\n"
- "movprfx z27, z18\n fmla z27.h, p3/M, z4.h, z9.h\n"
- "inch x20\n"
- "movprfx z28, z18\n fmla z28.h, p3/M, z3.h, z9.h\n"
- "fmla z23.h, p3/M, z0.h, z10.h\n"
- "ld1h { z10.h }, p2/Z, [x9, x27, LSL #1]\n"
- "fmla z25.h, p3/M, z2.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x9, x17, LSL #1]\n"
- "movprfx z29, z18\n fmla z29.h, p3/M, z2.h, z9.h\n"
- "fmla z24.h, p3/M, z6.h, z11.h\n"
- "movprfx z31, z18\n fmla z31.h, p3/M, z0.h, z9.h\n"
- "fmla z23.h, p3/M, z5.h, z13.h\n"
- "fmla z25.h, p3/M, z3.h, z13.h\n"
- "fmla z26.h, p3/M, z2.h, z13.h\n"
+ "movprfx z28, z16\n fmla z28.h, p3/M, z5.h, z9.h\n"
+ "inch x17\n"
+ "movprfx z27, z16\n fmla z27.h, p3/M, z4.h, z9.h\n"
+ "inch x16\n"
+ "movprfx z26, z16\n fmla z26.h, p3/M, z3.h, z9.h\n"
+ "movprfx z25, z16\n fmla z25.h, p3/M, z2.h, z9.h\n"
+ "movprfx z24, z16\n fmla z24.h, p3/M, z1.h, z9.h\n"
+ "movprfx z23, z16\n fmla z23.h, p3/M, z0.h, z9.h\n"
+ "ld1h { z16.h }, p3/Z, [x8]\n"
+ "fmla z31.h, p3/M, z0.h, z10.h\n"
+ "ld1h { z10.h }, p2/Z, [x10, x26, LSL #1]\n"
+ "fmla z29.h, p3/M, z2.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x10, x15, LSL #1]\n"
+ "fmla z25.h, p3/M, z6.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x28, x25, LSL #1]\n"
+ "fmla z30.h, p3/M, z4.h, z13.h\n"
+ "fmla z31.h, p3/M, z5.h, z13.h\n"
+ "fmla z29.h, p3/M, z3.h, z13.h\n"
+ "fmla z28.h, p3/M, z2.h, z13.h\n"
"fmla z27.h, p3/M, z1.h, z13.h\n"
- "fmla z28.h, p3/M, z0.h, z13.h\n"
- "ld1h { z13.h }, p2/Z, [x14, x17, LSL #1]\n"
- "fmla z29.h, p3/M, z6.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x26, x25, LSL #1]\n"
- "movprfx z30, z18\n fmla z30.h, p3/M, z1.h, z9.h\n"
- "fmla z24.h, p3/M, z0.h, z13.h\n"
- "ld1h { z18.h }, p3/Z, [x13]\n"
- "fmla z31.h, p3/M, z8.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x14, x27, LSL #1]\n"
- "fmla z23.h, p3/M, z7.h, z11.h\n"
- "fmla z30.h, p3/M, z0.h, z11.h\n"
- "fmla z26.h, p3/M, z4.h, z11.h\n"
+ "fmla z26.h, p3/M, z0.h, z13.h\n"
+ "ld1h { z13.h }, p2/Z, [x14, x15, LSL #1]\n"
+ "fmla z23.h, p3/M, z8.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x14, x26, LSL #1]\n"
+ "fmla z31.h, p3/M, z7.h, z11.h\n"
+ "fmla z30.h, p3/M, z6.h, z11.h\n"
+ "fmla z28.h, p3/M, z4.h, z11.h\n"
"fmla z27.h, p3/M, z3.h, z11.h\n"
- "fmla z29.h, p3/M, z1.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x10]\n"
- "fmla z24.h, p3/M, z2.h, z12.h\n"
- "fmla z25.h, p3/M, z1.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x28]\n"
- "fmla z28.h, p3/M, z4.h, z10.h\n"
- "fmla z23.h, p3/M, z1.h, z13.h\n"
- "ld1h { z13.h }, p2/Z, [x10, x25, LSL #1]\n"
- "fmla z30.h, p3/M, z2.h, z10.h\n"
- "fmla z31.h, p3/M, z1.h, z10.h\n"
- "fmla z24.h, p3/M, z8.h, z10.h\n"
- "fmla z25.h, p3/M, z7.h, z10.h\n"
+ "fmla z25.h, p3/M, z1.h, z11.h\n"
+ "fmla z24.h, p3/M, z0.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x11]\n"
+ "fmla z31.h, p3/M, z1.h, z13.h\n"
+ "fmla z30.h, p3/M, z0.h, z13.h\n"
+ "ld1h { z13.h }, p2/Z, [x11, x25, LSL #1]\n"
+ "fmla z29.h, p3/M, z1.h, z12.h\n"
"fmla z27.h, p3/M, z5.h, z10.h\n"
- "fmla z26.h, p3/M, z0.h, z11.h\n"
- "ld1h { z10.h }, p2/Z, [x28, x12, LSL #1]\n"
- "fmla z29.h, p3/M, z3.h, z12.h\n"
- "fmla z28.h, p3/M, z2.h, z13.h\n"
- "fmla z30.h, p3/M, z4.h, z10.h\n"
- "fmla z31.h, p3/M, z3.h, z10.h\n"
- "fmla z23.h, p3/M, z3.h, z11.h\n"
- "fmla z25.h, p3/M, z5.h, z13.h\n"
- "ld1h { z11.h }, p2/Z, [x28, x25, LSL #1]\n"
- "ld1h { z13.h }, p2/Z, [x26, x17, LSL #1]\n"
- "fmla z26.h, p3/M, z6.h, z12.h\n"
+ "fmla z26.h, p3/M, z4.h, z10.h\n"
+ "fmla z30.h, p3/M, z2.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x9]\n"
+ "fmla z29.h, p3/M, z7.h, z10.h\n"
+ "fmla z24.h, p3/M, z2.h, z10.h\n"
+ "fmla z23.h, p3/M, z1.h, z10.h\n"
+ "fmla z30.h, p3/M, z8.h, z10.h\n"
+ "ld1h { z10.h }, p2/Z, [x9, x27, LSL #1]\n"
+ "fmla z31.h, p3/M, z3.h, z11.h\n"
+ "fmla z28.h, p3/M, z0.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x9, x25, LSL #1]\n"
+ "fmla z29.h, p3/M, z5.h, z13.h\n"
+ "fmla z26.h, p3/M, z2.h, z13.h\n"
+ "ld1h { z13.h }, p2/Z, [x28, x15, LSL #1]\n"
+ "fmla z25.h, p3/M, z3.h, z12.h\n"
+ "fmla z28.h, p3/M, z6.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x11, x15, LSL #1]\n"
"fmla z27.h, p3/M, z7.h, z10.h\n"
- "ld1h { z12.h }, p2/Z, [x10, x17, LSL #1]\n"
- "fmla z29.h, p3/M, z5.h, z10.h\n"
- "fmla z28.h, p3/M, z6.h, z10.h\n"
- "fmla z31.h, p3/M, z5.h, z11.h\n"
- "fmla z30.h, p3/M, z6.h, z13.h\n"
- "fmla z26.h, p3/M, z8.h, z10.h\n"
- "fmla z29.h, p3/M, z7.h, z13.h\n"
- "ld1h { z13.h }, p2/Z, [x26, x27, LSL #1]\n"
- "fmla z24.h, p3/M, z3.h, z12.h\n"
+ "fmla z26.h, p3/M, z6.h, z10.h\n"
+ "fmla z25.h, p3/M, z5.h, z10.h\n"
+ "fmla z28.h, p3/M, z8.h, z10.h\n"
+ "fmla z24.h, p3/M, z4.h, z10.h\n"
+ "fmla z23.h, p3/M, z3.h, z10.h\n"
+ "fmla z26.h, p3/M, z8.h, z11.h\n"
+ "fmla z25.h, p3/M, z7.h, z13.h\n"
+ "fmla z24.h, p3/M, z6.h, z13.h\n"
+ "ld1h { z13.h }, p2/Z, [x28, x26, LSL #1]\n"
+ "fmla z23.h, p3/M, z5.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x11, x26, LSL #1]\n"
+ "addvl x11, x11, #1\n"
+ "fmla z31.h, p3/M, z4.h, z12.h\n"
+ "fmla z30.h, p3/M, z3.h, z12.h\n"
+ "fmla z28.h, p3/M, z1.h, z12.h\n"
"fmla z27.h, p3/M, z0.h, z12.h\n"
- "fmla z28.h, p3/M, z8.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x10, x27, LSL #1]\n"
- "fmla z30.h, p3/M, z8.h, z13.h\n"
- "addvl x10, x10, #1\n"
- "fmla z31.h, p3/M, z7.h, z13.h\n"
- "fmla z23.h, p3/M, z4.h, z12.h\n"
- "ld1h { z13.h }, p2/Z, [x28, x27, LSL #1]\n"
- "fmla z26.h, p3/M, z1.h, z12.h\n"
- "fmla z24.h, p3/M, z5.h, z11.h\n"
- "ld1h { z12.h }, p2/Z, [x28, x17, LSL #1]\n"
- "addvl x28, x28, #1\n"
- "fmla z25.h, p3/M, z4.h, z11.h\n"
+ "ld1h { z12.h }, p2/Z, [x9, x15, LSL #1]\n"
+ "fmla z29.h, p3/M, z4.h, z11.h\n"
+ "fmla z30.h, p3/M, z5.h, z11.h\n"
+ "fmla z26.h, p3/M, z1.h, z11.h\n"
"fmla z27.h, p3/M, z2.h, z11.h\n"
- "fmla z28.h, p3/M, z1.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x14, x12, LSL #1]\n"
- "fmla z29.h, p3/M, z4.h, z12.h\n"
+ "ld1h { z11.h }, p2/Z, [x14, x27, LSL #1]\n"
"addvl x14, x14, #1\n"
- "fmla z30.h, p3/M, z3.h, z12.h\n"
- "fmla z31.h, p3/M, z4.h, z13.h\n"
- "ld1h { z4.h }, p3/Z, [x13, #5, MUL VL]\n"
+ "fmla z24.h, p3/M, z8.h, z13.h\n"
"ld1h { z10.h }, p1/Z, [x14]\n"
- "fmla z26.h, p3/M, z7.h, z12.h\n"
- "fmla z27.h, p3/M, z6.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x9]\n"
- "fmla z23.h, p3/M, z2.h, z11.h\n"
- "fmla z24.h, p3/M, z1.h, z11.h\n"
- "fmax z24.h, p3/M, z24.h, z17.h\n"
- "ld1h { z1.h }, p3/Z, [x13, #2, MUL VL]\n"
- "fmla z25.h, p3/M, z0.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x9, x25, LSL #1]\n"
- "fmla z28.h, p3/M, z7.h, z13.h\n"
+ "fmla z23.h, p3/M, z7.h, z13.h\n"
+ "ld1h { z13.h }, p2/Z, [x9, x26, LSL #1]\n"
"addvl x9, x9, #1\n"
- "fmla z30.h, p3/M, z5.h, z13.h\n"
- "fmla z29.h, p3/M, z0.h, z12.h\n"
- "ld1h { z0.h }, p3/Z, [x13, #1, MUL VL]\n"
- "fmin z24.h, p3/M, z24.h, z16.h\n"
+ "fmla z28.h, p3/M, z7.h, z12.h\n"
+ "fmla z27.h, p3/M, z6.h, z12.h\n"
+ "fmla z25.h, p3/M, z4.h, z12.h\n"
+ "fmla z24.h, p3/M, z3.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x10]\n"
"fmla z31.h, p3/M, z2.h, z11.h\n"
+ "fmla z30.h, p3/M, z1.h, z11.h\n"
+ "ld1h { z1.h }, p3/Z, [x8, #2, MUL VL]\n"
+ "fmla z29.h, p3/M, z0.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x10, x25, LSL #1]\n"
+ "addvl x10, x10, #1\n"
"fmla z27.h, p3/M, z8.h, z13.h\n"
- "ld1h { z13.h }, p2/Z, [x26, x12, LSL #1]\n"
- "fmax z27.h, p3/M, z27.h, z17.h\n"
- "fmla z23.h, p3/M, z6.h, z12.h\n"
- "fmla z26.h, p3/M, z3.h, z12.h\n"
- "fmax z23.h, p3/M, z23.h, z17.h\n"
- "fmax z26.h, p3/M, z26.h, z17.h\n"
- "fmla z25.h, p3/M, z8.h, z11.h\n"
- "fmla z28.h, p3/M, z5.h, z11.h\n"
- "fmax z25.h, p3/M, z25.h, z17.h\n"
- "fmax z28.h, p3/M, z28.h, z17.h\n"
- "fmla z29.h, p3/M, z8.h, z13.h\n"
- "fmla z30.h, p3/M, z7.h, z13.h\n"
- "fmax z29.h, p3/M, z29.h, z17.h\n"
- "fmax z30.h, p3/M, z30.h, z17.h\n"
- "fmla z31.h, p3/M, z6.h, z13.h\n"
- "fmax z31.h, p3/M, z31.h, z17.h\n"
- "addvl x26, x26, #1\n"
- "ld1h { z2.h }, p3/Z, [x13, #3, MUL VL]\n"
- "ld1h { z3.h }, p3/Z, [x13, #4, MUL VL]\n"
- "ld1h { z5.h }, p3/Z, [x13, #6, MUL VL]\n"
- "whilelt p2.h, x21, %x[n_channels]\n"
- "cmp x15, %x[n_channels]\n"
- "ld1h { z6.h }, p3/Z, [x13, #7, MUL VL]\n"
- "addvl x13, x13, #16\n"
- "fmin z23.h, p3/M, z23.h, z16.h\n"
- "ld1h { z9.h }, p1/Z, [x9, x12, LSL #1]\n"
- "fmin z25.h, p3/M, z25.h, z16.h\n"
- "fmin z26.h, p3/M, z26.h, z16.h\n"
+ "ld1h { z9.h }, p1/Z, [x10, x27, LSL #1]\n"
+ "fmla z26.h, p3/M, z7.h, z13.h\n"
+ "fmla z24.h, p3/M, z5.h, z13.h\n"
+ "fmla z23.h, p3/M, z4.h, z13.h\n"
+ "ld1h { z13.h }, p2/Z, [x28, x27, LSL #1]\n"
+ "whilelt p2.h, x17, %x[n_channels]\n"
+ "fmla z31.h, p3/M, z6.h, z12.h\n"
+ "ld1h { z4.h }, p3/Z, [x8, #5, MUL VL]\n"
+ "addvl x28, x28, #1\n"
+ "fmla z28.h, p3/M, z3.h, z12.h\n"
+ "ld1h { z3.h }, p3/Z, [x8, #4, MUL VL]\n"
+ "cmp x16, %x[n_channels]\n"
+ "fmla z25.h, p3/M, z0.h, z12.h\n"
+ "ld1h { z12.h }, p1/Z, [x28]\n"
+ "fmla z29.h, p3/M, z8.h, z11.h\n"
+ "ld1h { z0.h }, p3/Z, [x8, #1, MUL VL]\n"
+ "fmla z26.h, p3/M, z5.h, z11.h\n"
+ "ld1h { z5.h }, p3/Z, [x8, #6, MUL VL]\n"
+ "fmla z23.h, p3/M, z2.h, z11.h\n"
"ld1h { z11.h }, p1/Z, [x14, x25, LSL #1]\n"
- "ld1h { z12.h }, p1/Z, [x26]\n"
- "fmin z27.h, p3/M, z27.h, z16.h\n"
- "fmin z28.h, p3/M, z28.h, z16.h\n"
- "ld1h { z13.h }, p1/Z, [x10, x12, LSL #1]\n"
- "st1h { z23.h }, p0, [x11]\n"
- "fmin z29.h, p3/M, z29.h, z16.h\n"
- "fmin z30.h, p3/M, z30.h, z16.h\n"
- "st1h { z24.h }, p0, [x11, x16, LSL #1]\n"
- "ld1h { z7.h }, p3/Z, [x13, #-8, MUL VL]\n"
- "fmin z31.h, p3/M, z31.h, z16.h\n"
- "st1h { z25.h }, p0, [x11, x22, LSL #1]\n"
- "addvl x11, x11, #1\n"
- "ld1h { z8.h }, p3/Z, [x13, #-7, MUL VL]\n"
- "st1h { z26.h }, p0, [x24]\n"
- "addvl x13, x13, #-6\n"
- "st1h { z27.h }, p0, [x24, x16, LSL #1]\n"
- "st1h { z28.h }, p0, [x24, x22, LSL #1]\n"
- "addvl x24, x24, #1\n"
- "st1h { z29.h }, p0, [x23]\n"
- "st1h { z30.h }, p0, [x23, x16, LSL #1]\n"
- "st1h { z31.h }, p0, [x23, x22, LSL #1]\n"
+ "fmla z24.h, p3/M, z7.h, z13.h\n"
+ "ld1h { z2.h }, p3/Z, [x8, #3, MUL VL]\n"
+ "fmla z25.h, p3/M, z8.h, z13.h\n"
+ "fmax z31.h, p3/M, z31.h, z18.h\n"
+ "fmla z23.h, p3/M, z6.h, z13.h\n"
+ "ld1h { z13.h }, p1/Z, [x11, x27, LSL #1]\n"
+ "fmax z30.h, p3/M, z30.h, z18.h\n"
+ "ld1h { z6.h }, p3/Z, [x8, #7, MUL VL]\n"
+ "addvl x8, x8, #16\n"
+ "fmin z31.h, p3/M, z31.h, z17.h\n"
+ "ld1h { z7.h }, p3/Z, [x8, #-8, MUL VL]\n"
+ "fmax z29.h, p3/M, z29.h, z18.h\n"
+ "ld1h { z8.h }, p3/Z, [x8, #-7, MUL VL]\n"
+ "addvl x8, x8, #-6\n"
+ "fmin z30.h, p3/M, z30.h, z17.h\n"
+ "st1h { z31.h }, p0, [x12]\n"
+ "fmin z29.h, p3/M, z29.h, z17.h\n"
+ "fmax z28.h, p3/M, z28.h, z18.h\n"
+ "st1h { z30.h }, p0, [x12, x13, LSL #1]\n"
+ "fmax z27.h, p3/M, z27.h, z18.h\n"
+ "fmax z26.h, p3/M, z26.h, z18.h\n"
+ "st1h { z29.h }, p0, [x12, x24, LSL #1]\n"
+ "fmin z28.h, p3/M, z28.h, z17.h\n"
+ "addvl x12, x12, #1\n"
+ "fmax z25.h, p3/M, z25.h, z18.h\n"
+ "st1h { z28.h }, p0, [x23]\n"
+ "fmin z27.h, p3/M, z27.h, z17.h\n"
+ "fmin z26.h, p3/M, z26.h, z17.h\n"
+ "st1h { z27.h }, p0, [x23, x13, LSL #1]\n"
+ "fmin z25.h, p3/M, z25.h, z17.h\n"
+ "fmax z24.h, p3/M, z24.h, z18.h\n"
+ "st1h { z26.h }, p0, [x23, x24, LSL #1]\n"
"addvl x23, x23, #1\n"
+ "fmax z23.h, p3/M, z23.h, z18.h\n"
+ "st1h { z25.h }, p0, [x22]\n"
+ "fmin z24.h, p3/M, z24.h, z17.h\n"
+ "fmin z23.h, p3/M, z23.h, z17.h\n"
+ "st1h { z24.h }, p0, [x22, x13, LSL #1]\n"
+ "st1h { z23.h }, p0, [x22, x24, LSL #1]\n"
+ "addvl x22, x22, #1\n"
"blt 2b\n"
"3:" // Tile loop: Channel tail
- "movprfx z24, z18\n fmla z24.h, p3/M, z7.h, z9.h\n"
- "movprfx z23, z18\n fmla z23.h, p3/M, z8.h, z9.h\n"
- "ldr x8, [%x[params_struct], %[offsetof_args_tile_j]]\n"
- "ldr x13, [%x[params_struct], %[offsetof_args_tile_i]]\n"
- "movprfx z25, z18\n fmla z25.h, p3/M, z6.h, z9.h\n"
- "fmla z24.h, p3/M, z4.h, z13.h\n"
- "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
- "add x8, x8, #0x1\n"
- "movprfx z26, z18\n fmla z26.h, p3/M, z5.h, z9.h\n"
- "movprfx z27, z18\n fmla z27.h, p3/M, z4.h, z9.h\n"
- "cmp x8, x20\n"
- "add x21, x13, #0x1\n"
- "movprfx z28, z18\n fmla z28.h, p3/M, z3.h, z9.h\n"
- "fmla z23.h, p3/M, z0.h, z10.h\n"
- "ld1h { z10.h }, p2/Z, [x9, x27, LSL #1]\n"
- "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
- "fmla z25.h, p3/M, z2.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x9, x17, LSL #1]\n"
- "movprfx z29, z18\n fmla z29.h, p3/M, z2.h, z9.h\n"
- "csel x13, x13, x21, LT\n"
- "fmla z24.h, p3/M, z6.h, z11.h\n"
- "movprfx z31, z18\n fmla z31.h, p3/M, z0.h, z9.h\n"
+ "movprfx z31, z16\n fmla z31.h, p3/M, z8.h, z9.h\n"
+ "ldr x6, [%x[params_struct], %[offsetof_args_tile_i]]\n"
"mov p0.b, p2.b\n"
- "csel x8, x8, XZR, LT\n"
- "fmla z23.h, p3/M, z5.h, z13.h\n"
- "fmla z25.h, p3/M, z3.h, z13.h\n"
- "cmp x13, x20\n"
- "fmla z26.h, p3/M, z2.h, z13.h\n"
+ "movprfx z30, z16\n fmla z30.h, p3/M, z7.h, z9.h\n"
+ "ldr x7, [%x[params_struct], %[offsetof_args_tile_j]]\n"
+ "add x21, x6, #0x1\n"
+ "movprfx z29, z16\n fmla z29.h, p3/M, z6.h, z9.h\n"
+ "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
+ "movprfx z28, z16\n fmla z28.h, p3/M, z5.h, z9.h\n"
+ "ldr x19, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
+ "add x7, x7, #0x1\n"
+ "movprfx z27, z16\n fmla z27.h, p3/M, z4.h, z9.h\n"
+ "cmp x7, x19\n"
+ "movprfx z26, z16\n fmla z26.h, p3/M, z3.h, z9.h\n"
+ "movprfx z25, z16\n fmla z25.h, p3/M, z2.h, z9.h\n"
+ "csel x7, x7, XZR, LT\n"
+ "movprfx z24, z16\n fmla z24.h, p3/M, z1.h, z9.h\n"
+ "csel x6, x6, x21, LT\n"
+ "movprfx z23, z16\n fmla z23.h, p3/M, z0.h, z9.h\n"
+ "cmp x6, x20\n"
+ "fmla z31.h, p3/M, z0.h, z10.h\n"
+ "ld1h { z10.h }, p2/Z, [x10, x26, LSL #1]\n"
+ "fmla z29.h, p3/M, z2.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x10, x15, LSL #1]\n"
+ "fmla z25.h, p3/M, z6.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x28, x25, LSL #1]\n"
+ "fmla z30.h, p3/M, z4.h, z13.h\n"
+ "fmla z31.h, p3/M, z5.h, z13.h\n"
+ "fmla z29.h, p3/M, z3.h, z13.h\n"
+ "fmla z28.h, p3/M, z2.h, z13.h\n"
"fmla z27.h, p3/M, z1.h, z13.h\n"
- "fmla z28.h, p3/M, z0.h, z13.h\n"
- "ld1h { z13.h }, p2/Z, [x14, x17, LSL #1]\n"
- "fmla z29.h, p3/M, z6.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x26, x25, LSL #1]\n"
- "movprfx z30, z18\n fmla z30.h, p3/M, z1.h, z9.h\n"
- "fmla z24.h, p3/M, z0.h, z13.h\n"
- "fmla z31.h, p3/M, z8.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x14, x27, LSL #1]\n"
- "fmla z23.h, p3/M, z7.h, z11.h\n"
- "fmla z30.h, p3/M, z0.h, z11.h\n"
- "fmla z26.h, p3/M, z4.h, z11.h\n"
+ "fmla z26.h, p3/M, z0.h, z13.h\n"
+ "ld1h { z13.h }, p2/Z, [x14, x15, LSL #1]\n"
+ "fmla z23.h, p3/M, z8.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x14, x26, LSL #1]\n"
+ "fmla z31.h, p3/M, z7.h, z11.h\n"
+ "fmla z30.h, p3/M, z6.h, z11.h\n"
+ "fmla z28.h, p3/M, z4.h, z11.h\n"
"fmla z27.h, p3/M, z3.h, z11.h\n"
- "fmla z29.h, p3/M, z1.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x10]\n"
- "fmla z24.h, p3/M, z2.h, z12.h\n"
- "fmla z25.h, p3/M, z1.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x28]\n"
- "fmla z28.h, p3/M, z4.h, z10.h\n"
- "fmla z23.h, p3/M, z1.h, z13.h\n"
- "ld1h { z13.h }, p2/Z, [x10, x25, LSL #1]\n"
- "fmla z30.h, p3/M, z2.h, z10.h\n"
- "fmla z31.h, p3/M, z1.h, z10.h\n"
- "fmla z24.h, p3/M, z8.h, z10.h\n"
- "fmla z25.h, p3/M, z7.h, z10.h\n"
+ "fmla z25.h, p3/M, z1.h, z11.h\n"
+ "fmla z24.h, p3/M, z0.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x11]\n"
+ "fmla z31.h, p3/M, z1.h, z13.h\n"
+ "fmla z30.h, p3/M, z0.h, z13.h\n"
+ "ld1h { z13.h }, p2/Z, [x11, x25, LSL #1]\n"
+ "fmla z29.h, p3/M, z1.h, z12.h\n"
"fmla z27.h, p3/M, z5.h, z10.h\n"
- "fmla z26.h, p3/M, z0.h, z11.h\n"
- "ld1h { z10.h }, p2/Z, [x28, x12, LSL #1]\n"
- "fmla z29.h, p3/M, z3.h, z12.h\n"
- "fmla z28.h, p3/M, z2.h, z13.h\n"
- "fmla z30.h, p3/M, z4.h, z10.h\n"
- "fmla z31.h, p3/M, z3.h, z10.h\n"
- "fmla z23.h, p3/M, z3.h, z11.h\n"
- "fmla z25.h, p3/M, z5.h, z13.h\n"
- "ld1h { z11.h }, p2/Z, [x28, x25, LSL #1]\n"
- "ld1h { z13.h }, p2/Z, [x26, x17, LSL #1]\n"
- "fmla z26.h, p3/M, z6.h, z12.h\n"
+ "fmla z26.h, p3/M, z4.h, z10.h\n"
+ "fmla z30.h, p3/M, z2.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x9]\n"
+ "fmla z29.h, p3/M, z7.h, z10.h\n"
+ "fmla z24.h, p3/M, z2.h, z10.h\n"
+ "fmla z23.h, p3/M, z1.h, z10.h\n"
+ "fmla z30.h, p3/M, z8.h, z10.h\n"
+ "ld1h { z10.h }, p2/Z, [x9, x27, LSL #1]\n"
+ "fmla z31.h, p3/M, z3.h, z11.h\n"
+ "fmla z28.h, p3/M, z0.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x9, x25, LSL #1]\n"
+ "fmla z29.h, p3/M, z5.h, z13.h\n"
+ "fmla z26.h, p3/M, z2.h, z13.h\n"
+ "ld1h { z13.h }, p2/Z, [x28, x15, LSL #1]\n"
+ "fmla z25.h, p3/M, z3.h, z12.h\n"
+ "fmla z28.h, p3/M, z6.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x11, x15, LSL #1]\n"
"fmla z27.h, p3/M, z7.h, z10.h\n"
- "ld1h { z12.h }, p2/Z, [x10, x17, LSL #1]\n"
- "fmla z29.h, p3/M, z5.h, z10.h\n"
- "fmla z28.h, p3/M, z6.h, z10.h\n"
- "fmla z31.h, p3/M, z5.h, z11.h\n"
- "fmla z30.h, p3/M, z6.h, z13.h\n"
- "fmla z26.h, p3/M, z8.h, z10.h\n"
- "fmla z29.h, p3/M, z7.h, z13.h\n"
- "ld1h { z13.h }, p2/Z, [x26, x27, LSL #1]\n"
- "fmla z24.h, p3/M, z3.h, z12.h\n"
+ "fmla z26.h, p3/M, z6.h, z10.h\n"
+ "fmla z25.h, p3/M, z5.h, z10.h\n"
+ "fmla z28.h, p3/M, z8.h, z10.h\n"
+ "fmla z24.h, p3/M, z4.h, z10.h\n"
+ "fmla z23.h, p3/M, z3.h, z10.h\n"
+ "fmla z26.h, p3/M, z8.h, z11.h\n"
+ "fmla z25.h, p3/M, z7.h, z13.h\n"
+ "fmla z24.h, p3/M, z6.h, z13.h\n"
+ "ld1h { z13.h }, p2/Z, [x28, x26, LSL #1]\n"
+ "fmla z23.h, p3/M, z5.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x11, x26, LSL #1]\n"
+ "fmla z31.h, p3/M, z4.h, z12.h\n"
+ "fmla z30.h, p3/M, z3.h, z12.h\n"
+ "fmla z28.h, p3/M, z1.h, z12.h\n"
"fmla z27.h, p3/M, z0.h, z12.h\n"
- "fmla z28.h, p3/M, z8.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x10, x27, LSL #1]\n"
- "fmla z30.h, p3/M, z8.h, z13.h\n"
- "fmla z31.h, p3/M, z7.h, z13.h\n"
- "fmla z23.h, p3/M, z4.h, z12.h\n"
- "ld1h { z13.h }, p2/Z, [x28, x27, LSL #1]\n"
- "fmla z26.h, p3/M, z1.h, z12.h\n"
- "fmla z24.h, p3/M, z5.h, z11.h\n"
- "ld1h { z12.h }, p2/Z, [x28, x17, LSL #1]\n"
- "fmla z25.h, p3/M, z4.h, z11.h\n"
+ "ld1h { z12.h }, p2/Z, [x9, x15, LSL #1]\n"
+ "fmla z29.h, p3/M, z4.h, z11.h\n"
+ "fmla z30.h, p3/M, z5.h, z11.h\n"
+ "fmla z26.h, p3/M, z1.h, z11.h\n"
"fmla z27.h, p3/M, z2.h, z11.h\n"
- "fmla z28.h, p3/M, z1.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x14, x12, LSL #1]\n"
- "fmla z29.h, p3/M, z4.h, z12.h\n"
- "fmla z30.h, p3/M, z3.h, z12.h\n"
- "fmla z31.h, p3/M, z4.h, z13.h\n"
- "fmla z26.h, p3/M, z7.h, z12.h\n"
+ "ld1h { z11.h }, p2/Z, [x14, x27, LSL #1]\n"
+ "fmla z24.h, p3/M, z8.h, z13.h\n"
+ "fmla z23.h, p3/M, z7.h, z13.h\n"
+ "ld1h { z13.h }, p2/Z, [x9, x26, LSL #1]\n"
+ "fmla z28.h, p3/M, z7.h, z12.h\n"
"fmla z27.h, p3/M, z6.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x9]\n"
- "fmla z23.h, p3/M, z2.h, z11.h\n"
- "fmla z24.h, p3/M, z1.h, z11.h\n"
- "fmax z24.h, p3/M, z24.h, z17.h\n"
- "fmin z24.h, p3/M, z24.h, z16.h\n"
- "fmla z25.h, p3/M, z0.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x9, x25, LSL #1]\n"
- "fmla z28.h, p3/M, z7.h, z13.h\n"
- "fmla z30.h, p3/M, z5.h, z13.h\n"
- "fmla z29.h, p3/M, z0.h, z12.h\n"
+ "fmla z25.h, p3/M, z4.h, z12.h\n"
+ "fmla z24.h, p3/M, z3.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x10]\n"
"fmla z31.h, p3/M, z2.h, z11.h\n"
+ "fmla z30.h, p3/M, z1.h, z11.h\n"
+ "fmla z29.h, p3/M, z0.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x10, x25, LSL #1]\n"
"fmla z27.h, p3/M, z8.h, z13.h\n"
- "ld1h { z13.h }, p2/Z, [x26, x12, LSL #1]\n"
- "fmax z27.h, p3/M, z27.h, z17.h\n"
- "fmla z23.h, p3/M, z6.h, z12.h\n"
- "fmla z26.h, p3/M, z3.h, z12.h\n"
- "fmax z23.h, p3/M, z23.h, z17.h\n"
- "fmax z26.h, p3/M, z26.h, z17.h\n"
- "fmla z25.h, p3/M, z8.h, z11.h\n"
- "fmla z28.h, p3/M, z5.h, z11.h\n"
- "fmax z25.h, p3/M, z25.h, z17.h\n"
- "fmax z28.h, p3/M, z28.h, z17.h\n"
- "fmla z29.h, p3/M, z8.h, z13.h\n"
- "fmla z30.h, p3/M, z7.h, z13.h\n"
- "fmax z29.h, p3/M, z29.h, z17.h\n"
- "fmax z30.h, p3/M, z30.h, z17.h\n"
- "fmla z31.h, p3/M, z6.h, z13.h\n"
- "fmax z31.h, p3/M, z31.h, z17.h\n"
- "fmin z23.h, p3/M, z23.h, z16.h\n"
- "st1h { z23.h }, p0, [x11]\n"
- "fmin z25.h, p3/M, z25.h, z16.h\n"
- "fmin z26.h, p3/M, z26.h, z16.h\n"
- "st1h { z24.h }, p0, [x11, x16, LSL #1]\n"
- "fmin z27.h, p3/M, z27.h, z16.h\n"
- "fmin z28.h, p3/M, z28.h, z16.h\n"
- "st1h { z25.h }, p0, [x11, x22, LSL #1]\n"
- "fmin z29.h, p3/M, z29.h, z16.h\n"
- "fmin z30.h, p3/M, z30.h, z16.h\n"
- "st1h { z26.h }, p0, [x24]\n"
- "fmin z31.h, p3/M, z31.h, z16.h\n"
- "st1h { z27.h }, p0, [x24, x16, LSL #1]\n"
- "st1h { z28.h }, p0, [x24, x22, LSL #1]\n"
- "st1h { z29.h }, p0, [x23]\n"
- "st1h { z30.h }, p0, [x23, x16, LSL #1]\n"
- "st1h { z31.h }, p0, [x23, x22, LSL #1]\n"
+ "fmla z26.h, p3/M, z7.h, z13.h\n"
+ "fmla z24.h, p3/M, z5.h, z13.h\n"
+ "fmla z23.h, p3/M, z4.h, z13.h\n"
+ "ld1h { z13.h }, p2/Z, [x28, x27, LSL #1]\n"
+ "fmla z31.h, p3/M, z6.h, z12.h\n"
+ "fmla z28.h, p3/M, z3.h, z12.h\n"
+ "fmla z25.h, p3/M, z0.h, z12.h\n"
+ "fmla z29.h, p3/M, z8.h, z11.h\n"
+ "fmla z26.h, p3/M, z5.h, z11.h\n"
+ "fmla z23.h, p3/M, z2.h, z11.h\n"
+ "fmla z25.h, p3/M, z8.h, z13.h\n"
+ "fmla z24.h, p3/M, z7.h, z13.h\n"
+ "fmax z31.h, p3/M, z31.h, z18.h\n"
+ "fmla z23.h, p3/M, z6.h, z13.h\n"
+ "fmax z30.h, p3/M, z30.h, z18.h\n"
+ "fmax z29.h, p3/M, z29.h, z18.h\n"
+ "fmin z31.h, p3/M, z31.h, z17.h\n"
+ "st1h { z31.h }, p0, [x12]\n"
+ "fmin z30.h, p3/M, z30.h, z17.h\n"
+ "fmin z29.h, p3/M, z29.h, z17.h\n"
+ "st1h { z30.h }, p0, [x12, x13, LSL #1]\n"
+ "fmax z28.h, p3/M, z28.h, z18.h\n"
+ "fmax z27.h, p3/M, z27.h, z18.h\n"
+ "st1h { z29.h }, p0, [x12, x24, LSL #1]\n"
+ "fmax z26.h, p3/M, z26.h, z18.h\n"
+ "fmax z25.h, p3/M, z25.h, z18.h\n"
+ "fmax z24.h, p3/M, z24.h, z18.h\n"
+ "fmin z28.h, p3/M, z28.h, z17.h\n"
+ "st1h { z28.h }, p0, [x23]\n"
+ "fmin z27.h, p3/M, z27.h, z17.h\n"
+ "fmin z26.h, p3/M, z26.h, z17.h\n"
+ "st1h { z27.h }, p0, [x23, x13, LSL #1]\n"
+ "fmin z25.h, p3/M, z25.h, z17.h\n"
+ "fmin z24.h, p3/M, z24.h, z17.h\n"
+ "st1h { z26.h }, p0, [x23, x24, LSL #1]\n"
+ "fmax z23.h, p3/M, z23.h, z18.h\n"
+ "st1h { z25.h }, p0, [x22]\n"
+ "fmin z23.h, p3/M, z23.h, z17.h\n"
+ "st1h { z24.h }, p0, [x22, x13, LSL #1]\n"
+ "st1h { z23.h }, p0, [x22, x24, LSL #1]\n"
"blt 1b\n"
:
: [n_channels] "r" ((unsigned long) n_channels), [offsetof_args_inptr] "I" (offsetof(Args, inptr)), [offsetof_args_ld_input_col] "I" (offsetof(Args, ld_input_col)), [offsetof_args_ld_input_row] "I" (offsetof(Args, ld_input_row)), [offsetof_args_ld_output_col] "I" (offsetof(Args, ld_output_col)), [offsetof_args_ld_output_row] "I" (offsetof(Args, ld_output_row)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_n_tile_cols] "I" (offsetof(Args, n_tile_cols)), [offsetof_args_n_tile_rows] "I" (offsetof(Args, n_tile_rows)), [offsetof_args_outptr] "I" (offsetof(Args, outptr)), [offsetof_args_params] "I" (offsetof(Args, params)), [offsetof_args_tile_i] "I" (offsetof(Args, tile_i)), [offsetof_args_tile_j] "I" (offsetof(Args, tile_j)), [params_struct] "r" (&params_struct)
- : "cc", "memory", "p0", "p1", "p2", "p3", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z16", "z17", "z18", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z16", "z17", "z18", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_indirect.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_indirect.cpp
index 59c0e0cf0b..f79a36b2a3 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_indirect.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_indirect.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -87,387 +87,387 @@ void sve_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst_indirect_impl(
activation_min, activation_max);
__asm__ __volatile__(
+ "ldr x16, [%x[params_struct], %[offsetof_args_outptrs]]\n"
"ptrue p3.b\n"
- "ldr x17, [%x[params_struct], %[offsetof_args_params]]\n"
- "add x16, %x[params_struct], %[offsetof_Args_inptrs]\n"
- "ld1h { z18.h }, p3/Z, [x17]\n"
- "cnth x15\n"
- "mov x14, #0x0\n"
- "ld1h { z0.h }, p3/Z, [x17, #1, MUL VL]\n"
- "ld1h { z1.h }, p3/Z, [x17, #2, MUL VL]\n"
+ "ldr x15, [%x[params_struct], %[offsetof_args_params]]\n"
+ "add x14, %x[params_struct], %[offsetof_Args_inptrs]\n"
+ "ld1rh { z18.h }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
+ "mov x13, #0x0\n"
+ "ld1rh { z17.h }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
+ "cnth x12\n"
+ "ld1h { z16.h }, p3/Z, [x15]\n"
+ "sub x11, XZR, x12\n"
+ "ld1h { z0.h }, p3/Z, [x15, #1, MUL VL]\n"
"whilelt p2.h, XZR, %x[n_channels]\n"
- "ld1h { z2.h }, p3/Z, [x17, #3, MUL VL]\n"
- "ld1h { z3.h }, p3/Z, [x17, #4, MUL VL]\n"
- "cmp x15, %x[n_channels]\n"
- "ld1h { z4.h }, p3/Z, [x17, #5, MUL VL]\n"
- "ld1h { z5.h }, p3/Z, [x17, #6, MUL VL]\n"
- "sub x13, XZR, x15\n"
- "ld1h { z6.h }, p3/Z, [x17, #7, MUL VL]\n"
- "addvl x17, x17, #16\n"
- "ldp x12, x11, [x16, #0x0]\n"
- "ldp x10, x9, [x16, #0x10]\n"
- "ldr x28, [x16, #0x20]\n"
- "ldr x27, [%x[params_struct], %[offsetof_args_outptrs]]\n"
- "ld1rh { z17.h }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
- "ld1rh { z16.h }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
- "ld1h { z7.h }, p3/Z, [x17, #-8, MUL VL]\n"
- "ld1h { z8.h }, p3/Z, [x17, #-7, MUL VL]\n"
- "ld1h { z9.h }, p2/Z, [x12, x14, LSL #1]\n"
- "addvl x17, x17, #-6\n"
- "ld1h { z10.h }, p2/Z, [x11, x14, LSL #1]\n"
- "ld1h { z11.h }, p2/Z, [x10, x14, LSL #1]\n"
- "ld1h { z12.h }, p2/Z, [x9, x14, LSL #1]\n"
- "ld1h { z13.h }, p2/Z, [x28, x14, LSL #1]\n"
+ "ld1h { z1.h }, p3/Z, [x15, #2, MUL VL]\n"
+ "cmp x12, %x[n_channels]\n"
+ "ld1h { z2.h }, p3/Z, [x15, #3, MUL VL]\n"
+ "ld1h { z3.h }, p3/Z, [x15, #4, MUL VL]\n"
+ "ld1h { z4.h }, p3/Z, [x15, #5, MUL VL]\n"
+ "ld1h { z5.h }, p3/Z, [x15, #6, MUL VL]\n"
+ "ld1h { z6.h }, p3/Z, [x15, #7, MUL VL]\n"
+ "addvl x15, x15, #16\n"
+ "ldp x10, x9, [x14, #0x0]\n"
+ "ld1h { z7.h }, p3/Z, [x15, #-8, MUL VL]\n"
+ "ld1h { z8.h }, p3/Z, [x15, #-7, MUL VL]\n"
+ "addvl x15, x15, #-6\n"
+ "ld1h { z9.h }, p2/Z, [x10, x13, LSL #1]\n"
+ "ld1h { z10.h }, p2/Z, [x9, x13, LSL #1]\n"
+ "ldp x28, x27, [x14, #0x10]\n"
+ "ldr x26, [x14, #0x20]\n"
+ "ld1h { z11.h }, p2/Z, [x28, x13, LSL #1]\n"
+ "ld1h { z12.h }, p2/Z, [x27, x13, LSL #1]\n"
+ "ld1h { z13.h }, p2/Z, [x26, x13, LSL #1]\n"
"bge 2f\n"
"1:" // Channel loop
- "movprfx z23, z18\n fmla z23.h, p3/M, z8.h, z9.h\n"
- "movprfx z24, z18\n fmla z24.h, p3/M, z7.h, z9.h\n"
- "ldr x26, [x16, #0x30]\n"
- "ldr x25, [x16, #0x38]\n"
- "movprfx z25, z18\n fmla z25.h, p3/M, z6.h, z9.h\n"
- "fmla z23.h, p3/M, z0.h, z10.h\n"
- "ldr x24, [x16, #0x28]\n"
- "ldr x11, [x16, #0x48]\n"
- "fmla z24.h, p3/M, z4.h, z13.h\n"
- "movprfx z26, z18\n fmla z26.h, p3/M, z5.h, z9.h\n"
- "ldr x12, [x16, #0x40]\n"
- "ld1h { z10.h }, p2/Z, [x11, x14, LSL #1]\n"
- "movprfx z27, z18\n fmla z27.h, p3/M, z4.h, z9.h\n"
- "movprfx z28, z18\n fmla z28.h, p3/M, z3.h, z9.h\n"
- "ldr x10, [x16, #0x50]\n"
- "ldr x9, [x16, #0x58]\n"
- "fmla z25.h, p3/M, z2.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x26, x14, LSL #1]\n"
- "movprfx z29, z18\n fmla z29.h, p3/M, z2.h, z9.h\n"
- "ldr x28, [x16, #0x60]\n"
- "fmla z23.h, p3/M, z5.h, z13.h\n"
- "fmla z24.h, p3/M, z6.h, z11.h\n"
- "ldr x26, [x16, #0x70]\n"
- "ldr x11, [x16, #0x88]\n"
- "movprfx z31, z18\n fmla z31.h, p3/M, z0.h, z9.h\n"
- "fmla z25.h, p3/M, z3.h, z13.h\n"
- "inch x13\n"
- "mov p1.b, p2.b\n"
- "fmla z26.h, p3/M, z2.h, z13.h\n"
+ "movprfx z31, z16\n fmla z31.h, p3/M, z8.h, z9.h\n"
+ "ldr x25, [x14, #0x28]\n"
+ "whilelt p1.h, x12, %x[n_channels]\n"
+ "movprfx z30, z16\n fmla z30.h, p3/M, z7.h, z9.h\n"
+ "ldr x24, [x14, #0x30]\n"
+ "inch x11\n"
+ "movprfx z29, z16\n fmla z29.h, p3/M, z6.h, z9.h\n"
+ "ldr x23, [x14, #0x38]\n"
+ "mov p0.b, p2.b\n"
+ "movprfx z28, z16\n fmla z28.h, p3/M, z5.h, z9.h\n"
+ "ldr x10, [x14, #0x40]\n"
+ "movprfx z27, z16\n fmla z27.h, p3/M, z4.h, z9.h\n"
+ "ldr x9, [x14, #0x48]\n"
+ "movprfx z26, z16\n fmla z26.h, p3/M, z3.h, z9.h\n"
+ "ldr x28, [x14, #0x50]\n"
+ "movprfx z25, z16\n fmla z25.h, p3/M, z2.h, z9.h\n"
+ "ldr x27, [x14, #0x58]\n"
+ "movprfx z24, z16\n fmla z24.h, p3/M, z1.h, z9.h\n"
+ "ldr x26, [x14, #0x60]\n"
+ "movprfx z23, z16\n fmla z23.h, p3/M, z0.h, z9.h\n"
+ "ldr x22, [x16, #0x0]\n"
+ "fmla z31.h, p3/M, z0.h, z10.h\n"
+ "ld1h { z10.h }, p2/Z, [x9, x13, LSL #1]\n"
+ "fmla z29.h, p3/M, z2.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x24, x13, LSL #1]\n"
+ "fmla z25.h, p3/M, z6.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x25, x13, LSL #1]\n"
+ "fmla z30.h, p3/M, z4.h, z13.h\n"
+ "ldr x25, [x14, #0x68]\n"
+ "fmla z31.h, p3/M, z5.h, z13.h\n"
+ "ldr x24, [x14, #0x70]\n"
+ "fmla z29.h, p3/M, z3.h, z13.h\n"
+ "ldr x9, [x14, #0x88]\n"
+ "fmla z28.h, p3/M, z2.h, z13.h\n"
+ "ldr x21, [x16, #0x8]\n"
"fmla z27.h, p3/M, z1.h, z13.h\n"
- "ldr x23, [x27, #0x0]\n"
- "whilelt p0.h, x15, %x[n_channels]\n"
- "fmla z28.h, p3/M, z0.h, z13.h\n"
- "ld1h { z13.h }, p2/Z, [x25, x14, LSL #1]\n"
- "fmla z29.h, p3/M, z6.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x24, x14, LSL #1]\n"
- "movprfx z30, z18\n fmla z30.h, p3/M, z1.h, z9.h\n"
- "fmla z23.h, p3/M, z7.h, z11.h\n"
- "ldr x24, [x16, #0x68]\n"
- "ldr x25, [x16, #0x78]\n"
- "fmla z24.h, p3/M, z0.h, z13.h\n"
- "fmla z31.h, p3/M, z8.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x12, x14, LSL #1]\n"
- "ldr x12, [x16, #0x80]\n"
- "fmla z26.h, p3/M, z4.h, z11.h\n"
+ "ldr x20, [x16, #0x10]\n"
+ "fmla z26.h, p3/M, z0.h, z13.h\n"
+ "ld1h { z13.h }, p2/Z, [x23, x13, LSL #1]\n"
+ "fmla z23.h, p3/M, z8.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x10, x13, LSL #1]\n"
+ "fmla z31.h, p3/M, z7.h, z11.h\n"
+ "ldr x23, [x14, #0x78]\n"
+ "fmla z30.h, p3/M, z6.h, z11.h\n"
+ "ldr x10, [x14, #0x80]\n"
+ "fmla z28.h, p3/M, z4.h, z11.h\n"
+ "ldr x19, [x16, #0x18]\n"
"fmla z27.h, p3/M, z3.h, z11.h\n"
- "ldr x22, [x27, #0x8]\n"
- "ldr x21, [x27, #0x10]\n"
- "fmla z30.h, p3/M, z0.h, z11.h\n"
- "fmla z28.h, p3/M, z4.h, z10.h\n"
- "ldr x20, [x27, #0x18]\n"
- "ld1h { z18.h }, p3/Z, [x17]\n"
- "fmla z29.h, p3/M, z1.h, z11.h\n"
- "fmla z23.h, p3/M, z1.h, z13.h\n"
- "ld1h { z11.h }, p2/Z, [x10, x14, LSL #1]\n"
- "ld1h { z13.h }, p2/Z, [x9, x14, LSL #1]\n"
- "fmla z24.h, p3/M, z2.h, z12.h\n"
- "fmla z25.h, p3/M, z1.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x28, x14, LSL #1]\n"
- "ldr x10, [x16, #0x90]\n"
+ "ld1h { z16.h }, p3/Z, [x15]\n"
+ "fmla z25.h, p3/M, z1.h, z11.h\n"
+ "fmla z24.h, p3/M, z0.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x28, x13, LSL #1]\n"
+ "fmla z31.h, p3/M, z1.h, z13.h\n"
+ "ldr x28, [x14, #0x90]\n"
+ "fmla z30.h, p3/M, z0.h, z13.h\n"
+ "ld1h { z13.h }, p2/Z, [x27, x13, LSL #1]\n"
+ "fmla z29.h, p3/M, z1.h, z12.h\n"
+ "ldr x27, [x14, #0x98]\n"
"fmla z27.h, p3/M, z5.h, z10.h\n"
- "fmla z30.h, p3/M, z2.h, z10.h\n"
- "ldr x28, [x16, #0xa0]\n"
- "ldr x9, [x16, #0x98]\n"
- "fmla z26.h, p3/M, z0.h, z11.h\n"
- "fmla z28.h, p3/M, z2.h, z13.h\n"
- "fmla z24.h, p3/M, z8.h, z10.h\n"
- "fmla z25.h, p3/M, z7.h, z10.h\n"
- "fmla z31.h, p3/M, z1.h, z10.h\n"
- "fmla z29.h, p3/M, z3.h, z12.h\n"
- "ld1h { z10.h }, p2/Z, [x24, x14, LSL #1]\n"
- "ldr x24, [x16, #0xa8]\n"
- "fmla z26.h, p3/M, z6.h, z12.h\n"
+ "fmla z26.h, p3/M, z4.h, z10.h\n"
+ "fmla z30.h, p3/M, z2.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x26, x13, LSL #1]\n"
+ "fmla z29.h, p3/M, z7.h, z10.h\n"
+ "ldr x26, [x14, #0xa0]\n"
+ "fmla z24.h, p3/M, z2.h, z10.h\n"
+ "fmla z23.h, p3/M, z1.h, z10.h\n"
+ "fmla z30.h, p3/M, z8.h, z10.h\n"
+ "ld1h { z10.h }, p2/Z, [x25, x13, LSL #1]\n"
+ "fmla z31.h, p3/M, z3.h, z11.h\n"
+ "ldr x25, [x14, #0xa8]\n"
+ "fmla z28.h, p3/M, z0.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x24, x13, LSL #1]\n"
+ "fmla z29.h, p3/M, z5.h, z13.h\n"
+ "ldr x24, [x14, #0xb0]\n"
+ "fmla z26.h, p3/M, z2.h, z13.h\n"
+ "ld1h { z13.h }, p2/Z, [x23, x13, LSL #1]\n"
+ "fmla z25.h, p3/M, z3.h, z12.h\n"
+ "ldr x23, [x14, #0xb8]\n"
+ "fmla z28.h, p3/M, z6.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x10, x13, LSL #1]\n"
"fmla z27.h, p3/M, z7.h, z10.h\n"
- "ld1h { z12.h }, p2/Z, [x12, x14, LSL #1]\n"
- "ldr x12, [x16, #0xc0]\n"
- "fmla z28.h, p3/M, z6.h, z10.h\n"
- "fmla z30.h, p3/M, z4.h, z10.h\n"
- "fmla z23.h, p3/M, z3.h, z11.h\n"
- "fmla z25.h, p3/M, z5.h, z13.h\n"
- "ld1h { z11.h }, p2/Z, [x26, x14, LSL #1]\n"
- "ld1h { z13.h }, p2/Z, [x25, x14, LSL #1]\n"
- "fmla z29.h, p3/M, z5.h, z10.h\n"
- "fmla z31.h, p3/M, z3.h, z10.h\n"
- "ldr x26, [x16, #0xb0]\n"
- "ldr x25, [x16, #0xb8]\n"
- "fmla z26.h, p3/M, z8.h, z10.h\n"
- "fmla z28.h, p3/M, z8.h, z11.h\n"
- "fmla z30.h, p3/M, z6.h, z13.h\n"
- "fmla z24.h, p3/M, z3.h, z12.h\n"
+ "ldr x10, [x14, #0xc0]\n"
+ "fmla z26.h, p3/M, z6.h, z10.h\n"
+ "fmla z25.h, p3/M, z5.h, z10.h\n"
+ "fmla z28.h, p3/M, z8.h, z10.h\n"
+ "fmla z24.h, p3/M, z4.h, z10.h\n"
+ "fmla z23.h, p3/M, z3.h, z10.h\n"
+ "fmla z26.h, p3/M, z8.h, z11.h\n"
+ "fmla z25.h, p3/M, z7.h, z13.h\n"
+ "fmla z24.h, p3/M, z6.h, z13.h\n"
+ "ld1h { z13.h }, p2/Z, [x28, x13, LSL #1]\n"
+ "fmla z23.h, p3/M, z5.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x9, x13, LSL #1]\n"
+ "fmla z31.h, p3/M, z4.h, z12.h\n"
+ "fmla z30.h, p3/M, z3.h, z12.h\n"
+ "fmla z28.h, p3/M, z1.h, z12.h\n"
"fmla z27.h, p3/M, z0.h, z12.h\n"
- "fmla z31.h, p3/M, z5.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x11, x14, LSL #1]\n"
- "fmla z29.h, p3/M, z7.h, z13.h\n"
- "ld1h { z13.h }, p2/Z, [x10, x14, LSL #1]\n"
- "fmla z23.h, p3/M, z4.h, z12.h\n"
- "fmla z26.h, p3/M, z1.h, z12.h\n"
- "fmla z24.h, p3/M, z5.h, z11.h\n"
- "ld1h { z12.h }, p2/Z, [x9, x14, LSL #1]\n"
- "fmla z25.h, p3/M, z4.h, z11.h\n"
+ "ld1h { z12.h }, p2/Z, [x27, x13, LSL #1]\n"
+ "fmla z29.h, p3/M, z4.h, z11.h\n"
+ "fmla z30.h, p3/M, z5.h, z11.h\n"
+ "fmla z26.h, p3/M, z1.h, z11.h\n"
"fmla z27.h, p3/M, z2.h, z11.h\n"
- "fmla z28.h, p3/M, z1.h, z11.h\n"
- "fmla z30.h, p3/M, z8.h, z13.h\n"
- "ld1h { z11.h }, p2/Z, [x28, x14, LSL #1]\n"
- "ldr x28, [x16, #0x20]\n"
- "fmla z31.h, p3/M, z7.h, z13.h\n"
- "ld1h { z13.h }, p2/Z, [x24, x14, LSL #1]\n"
- "fmla z23.h, p3/M, z2.h, z11.h\n"
- "fmla z26.h, p3/M, z7.h, z12.h\n"
+ "ld1h { z11.h }, p2/Z, [x26, x13, LSL #1]\n"
+ "fmla z24.h, p3/M, z8.h, z13.h\n"
+ "ldr x26, [x14, #0x20]\n"
+ "fmla z23.h, p3/M, z7.h, z13.h\n"
+ "ld1h { z13.h }, p2/Z, [x25, x13, LSL #1]\n"
+ "fmla z28.h, p3/M, z7.h, z12.h\n"
"fmla z27.h, p3/M, z6.h, z12.h\n"
- "fmla z29.h, p3/M, z4.h, z12.h\n"
- "fmla z30.h, p3/M, z3.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x26, x14, LSL #1]\n"
- "fmla z31.h, p3/M, z4.h, z13.h\n"
- "fmla z24.h, p3/M, z1.h, z11.h\n"
- "fmax z24.h, p3/M, z24.h, z17.h\n"
- "fmin z24.h, p3/M, z24.h, z16.h\n"
- "fmla z25.h, p3/M, z0.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x25, x14, LSL #1]\n"
- "fmla z23.h, p3/M, z6.h, z12.h\n"
- "fmax z23.h, p3/M, z23.h, z17.h\n"
- "fmla z28.h, p3/M, z7.h, z13.h\n"
- "fmla z30.h, p3/M, z5.h, z13.h\n"
- "fmin z23.h, p3/M, z23.h, z16.h\n"
- "st1h { z23.h }, p1, [x23, x13, LSL #1]\n"
- "fmla z29.h, p3/M, z0.h, z12.h\n"
+ "fmla z25.h, p3/M, z4.h, z12.h\n"
+ "fmla z24.h, p3/M, z3.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x24, x13, LSL #1]\n"
"fmla z31.h, p3/M, z2.h, z11.h\n"
- "ldr x23, [x27, #0x20]\n"
- "st1h { z24.h }, p1, [x22, x13, LSL #1]\n"
+ "fmla z30.h, p3/M, z1.h, z11.h\n"
+ "ld1h { z1.h }, p3/Z, [x15, #2, MUL VL]\n"
+ "fmla z29.h, p3/M, z0.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x23, x13, LSL #1]\n"
"fmla z27.h, p3/M, z8.h, z13.h\n"
- "fmla z26.h, p3/M, z3.h, z12.h\n"
- "ld1h { z13.h }, p2/Z, [x12, x14, LSL #1]\n"
- "ldp x12, x11, [x16, #0x0]\n"
- "fmla z25.h, p3/M, z8.h, z11.h\n"
- "fmla z28.h, p3/M, z5.h, z11.h\n"
- "ldp x10, x9, [x16, #0x10]\n"
- "fmax z25.h, p3/M, z25.h, z17.h\n"
- "fmla z29.h, p3/M, z8.h, z13.h\n"
- "fmla z30.h, p3/M, z7.h, z13.h\n"
- "fmax z26.h, p3/M, z26.h, z17.h\n"
- "fmax z27.h, p3/M, z27.h, z17.h\n"
- "fmla z31.h, p3/M, z6.h, z13.h\n"
- "inch x14\n"
- "ld1h { z9.h }, p0/Z, [x12, x15, LSL #1]\n"
- "ld1h { z10.h }, p0/Z, [x11, x15, LSL #1]\n"
- "ld1h { z11.h }, p0/Z, [x10, x15, LSL #1]\n"
- "ld1h { z12.h }, p0/Z, [x9, x15, LSL #1]\n"
- "fmin z25.h, p3/M, z25.h, z16.h\n"
- "fmin z26.h, p3/M, z26.h, z16.h\n"
- "ld1h { z13.h }, p0/Z, [x28, x15, LSL #1]\n"
- "inch x15\n"
- "fmin z27.h, p3/M, z27.h, z16.h\n"
- "st1h { z25.h }, p1, [x21, x13, LSL #1]\n"
- "fmax z28.h, p3/M, z28.h, z17.h\n"
- "fmax z29.h, p3/M, z29.h, z17.h\n"
- "st1h { z26.h }, p1, [x20, x13, LSL #1]\n"
- "ldr x22, [x27, #0x28]\n"
- "fmax z30.h, p3/M, z30.h, z17.h\n"
- "fmax z31.h, p3/M, z31.h, z17.h\n"
- "st1h { z27.h }, p1, [x23, x13, LSL #1]\n"
- "ldr x21, [x27, #0x30]\n"
- "ldr x20, [x27, #0x38]\n"
- "ldr x23, [x27, #0x40]\n"
- "whilelt p2.h, x14, %x[n_channels]\n"
- "cmp x15, %x[n_channels]\n"
- "ld1h { z0.h }, p3/Z, [x17, #1, MUL VL]\n"
- "ld1h { z1.h }, p3/Z, [x17, #2, MUL VL]\n"
- "fmin z28.h, p3/M, z28.h, z16.h\n"
- "fmin z29.h, p3/M, z29.h, z16.h\n"
- "ld1h { z2.h }, p3/Z, [x17, #3, MUL VL]\n"
- "ld1h { z3.h }, p3/Z, [x17, #4, MUL VL]\n"
- "fmin z30.h, p3/M, z30.h, z16.h\n"
- "fmin z31.h, p3/M, z31.h, z16.h\n"
- "ld1h { z4.h }, p3/Z, [x17, #5, MUL VL]\n"
- "ld1h { z5.h }, p3/Z, [x17, #6, MUL VL]\n"
- "st1h { z28.h }, p1, [x22, x13, LSL #1]\n"
- "ld1h { z6.h }, p3/Z, [x17, #7, MUL VL]\n"
- "addvl x17, x17, #16\n"
- "st1h { z29.h }, p1, [x21, x13, LSL #1]\n"
- "ld1h { z7.h }, p3/Z, [x17, #-8, MUL VL]\n"
- "st1h { z30.h }, p1, [x20, x13, LSL #1]\n"
- "ld1h { z8.h }, p3/Z, [x17, #-7, MUL VL]\n"
- "addvl x17, x17, #-6\n"
- "st1h { z31.h }, p1, [x23, x13, LSL #1]\n"
+ "fmla z26.h, p3/M, z7.h, z13.h\n"
+ "fmla z24.h, p3/M, z5.h, z13.h\n"
+ "fmla z23.h, p3/M, z4.h, z13.h\n"
+ "ld1h { z13.h }, p2/Z, [x10, x13, LSL #1]\n"
+ "inch x13\n"
+ "fmla z31.h, p3/M, z6.h, z12.h\n"
+ "ldp x10, x9, [x14, #0x0]\n"
+ "whilelt p2.h, x13, %x[n_channels]\n"
+ "fmla z28.h, p3/M, z3.h, z12.h\n"
+ "ldp x28, x27, [x14, #0x10]\n"
+ "fmla z25.h, p3/M, z0.h, z12.h\n"
+ "ld1h { z0.h }, p3/Z, [x15, #1, MUL VL]\n"
+ "fmla z29.h, p3/M, z8.h, z11.h\n"
+ "ld1h { z9.h }, p1/Z, [x10, x12, LSL #1]\n"
+ "fmla z26.h, p3/M, z5.h, z11.h\n"
+ "ld1h { z10.h }, p1/Z, [x9, x12, LSL #1]\n"
+ "fmla z23.h, p3/M, z2.h, z11.h\n"
+ "ld1h { z11.h }, p1/Z, [x28, x12, LSL #1]\n"
+ "fmla z25.h, p3/M, z8.h, z13.h\n"
+ "ld1h { z12.h }, p1/Z, [x27, x12, LSL #1]\n"
+ "fmla z24.h, p3/M, z7.h, z13.h\n"
+ "ld1h { z2.h }, p3/Z, [x15, #3, MUL VL]\n"
+ "fmax z31.h, p3/M, z31.h, z18.h\n"
+ "ld1h { z3.h }, p3/Z, [x15, #4, MUL VL]\n"
+ "fmla z23.h, p3/M, z6.h, z13.h\n"
+ "ld1h { z13.h }, p1/Z, [x26, x12, LSL #1]\n"
+ "inch x12\n"
+ "fmax z30.h, p3/M, z30.h, z18.h\n"
+ "ld1h { z4.h }, p3/Z, [x15, #5, MUL VL]\n"
+ "cmp x12, %x[n_channels]\n"
+ "fmin z31.h, p3/M, z31.h, z17.h\n"
+ "ld1h { z5.h }, p3/Z, [x15, #6, MUL VL]\n"
+ "fmax z29.h, p3/M, z29.h, z18.h\n"
+ "ld1h { z6.h }, p3/Z, [x15, #7, MUL VL]\n"
+ "addvl x15, x15, #16\n"
+ "fmax z28.h, p3/M, z28.h, z18.h\n"
+ "ld1h { z7.h }, p3/Z, [x15, #-8, MUL VL]\n"
+ "fmax z27.h, p3/M, z27.h, z18.h\n"
+ "ld1h { z8.h }, p3/Z, [x15, #-7, MUL VL]\n"
+ "addvl x15, x15, #-6\n"
+ "fmin z30.h, p3/M, z30.h, z17.h\n"
+ "st1h { z31.h }, p0, [x22, x11, LSL #1]\n"
+ "fmin z29.h, p3/M, z29.h, z17.h\n"
+ "ldr x22, [x16, #0x20]\n"
+ "fmax z26.h, p3/M, z26.h, z18.h\n"
+ "st1h { z30.h }, p0, [x21, x11, LSL #1]\n"
+ "fmin z28.h, p3/M, z28.h, z17.h\n"
+ "fmin z27.h, p3/M, z27.h, z17.h\n"
+ "st1h { z29.h }, p0, [x20, x11, LSL #1]\n"
+ "fmin z26.h, p3/M, z26.h, z17.h\n"
+ "ldr x21, [x16, #0x28]\n"
+ "fmax z25.h, p3/M, z25.h, z18.h\n"
+ "ldr x20, [x16, #0x30]\n"
+ "fmax z24.h, p3/M, z24.h, z18.h\n"
+ "st1h { z28.h }, p0, [x19, x11, LSL #1]\n"
+ "fmax z23.h, p3/M, z23.h, z18.h\n"
+ "st1h { z27.h }, p0, [x22, x11, LSL #1]\n"
+ "st1h { z26.h }, p0, [x21, x11, LSL #1]\n"
+ "fmin z25.h, p3/M, z25.h, z17.h\n"
+ "ldr x19, [x16, #0x38]\n"
+ "fmin z24.h, p3/M, z24.h, z17.h\n"
+ "ldr x22, [x16, #0x40]\n"
+ "fmin z23.h, p3/M, z23.h, z17.h\n"
+ "st1h { z25.h }, p0, [x20, x11, LSL #1]\n"
+ "st1h { z24.h }, p0, [x19, x11, LSL #1]\n"
+ "st1h { z23.h }, p0, [x22, x11, LSL #1]\n"
"blt 1b\n"
"2:" // Channel tail
- "movprfx z23, z18\n fmla z23.h, p3/M, z8.h, z9.h\n"
- "movprfx z24, z18\n fmla z24.h, p3/M, z7.h, z9.h\n"
- "ldr x26, [x16, #0x30]\n"
- "ldr x25, [x16, #0x38]\n"
- "movprfx z25, z18\n fmla z25.h, p3/M, z6.h, z9.h\n"
- "fmla z23.h, p3/M, z0.h, z10.h\n"
- "ldr x24, [x16, #0x28]\n"
- "ldr x11, [x16, #0x48]\n"
- "fmla z24.h, p3/M, z4.h, z13.h\n"
- "movprfx z26, z18\n fmla z26.h, p3/M, z5.h, z9.h\n"
- "ldr x12, [x16, #0x40]\n"
- "ld1h { z10.h }, p2/Z, [x11, x14, LSL #1]\n"
- "movprfx z27, z18\n fmla z27.h, p3/M, z4.h, z9.h\n"
- "movprfx z28, z18\n fmla z28.h, p3/M, z3.h, z9.h\n"
- "ldr x10, [x16, #0x50]\n"
- "ldr x9, [x16, #0x58]\n"
- "fmla z25.h, p3/M, z2.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x26, x14, LSL #1]\n"
- "movprfx z29, z18\n fmla z29.h, p3/M, z2.h, z9.h\n"
- "ldr x28, [x16, #0x60]\n"
- "fmla z23.h, p3/M, z5.h, z13.h\n"
- "fmla z24.h, p3/M, z6.h, z11.h\n"
- "ldr x26, [x16, #0x70]\n"
- "ldr x11, [x16, #0x88]\n"
- "movprfx z31, z18\n fmla z31.h, p3/M, z0.h, z9.h\n"
- "fmla z25.h, p3/M, z3.h, z13.h\n"
- "inch x13\n"
- "mov p1.b, p2.b\n"
- "fmla z26.h, p3/M, z2.h, z13.h\n"
+ "movprfx z31, z16\n fmla z31.h, p3/M, z8.h, z9.h\n"
+ "ldr x25, [x14, #0x28]\n"
+ "inch x11\n"
+ "movprfx z30, z16\n fmla z30.h, p3/M, z7.h, z9.h\n"
+ "ldr x24, [x14, #0x30]\n"
+ "mov p0.b, p2.b\n"
+ "movprfx z29, z16\n fmla z29.h, p3/M, z6.h, z9.h\n"
+ "ldr x23, [x14, #0x38]\n"
+ "movprfx z28, z16\n fmla z28.h, p3/M, z5.h, z9.h\n"
+ "ldr x10, [x14, #0x40]\n"
+ "movprfx z27, z16\n fmla z27.h, p3/M, z4.h, z9.h\n"
+ "ldr x9, [x14, #0x48]\n"
+ "movprfx z26, z16\n fmla z26.h, p3/M, z3.h, z9.h\n"
+ "ldr x28, [x14, #0x50]\n"
+ "movprfx z25, z16\n fmla z25.h, p3/M, z2.h, z9.h\n"
+ "ldr x27, [x14, #0x58]\n"
+ "movprfx z24, z16\n fmla z24.h, p3/M, z1.h, z9.h\n"
+ "ldr x26, [x14, #0x60]\n"
+ "movprfx z23, z16\n fmla z23.h, p3/M, z0.h, z9.h\n"
+ "ldr x22, [x16, #0x0]\n"
+ "fmla z31.h, p3/M, z0.h, z10.h\n"
+ "ld1h { z10.h }, p2/Z, [x9, x13, LSL #1]\n"
+ "fmla z29.h, p3/M, z2.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x24, x13, LSL #1]\n"
+ "fmla z25.h, p3/M, z6.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x25, x13, LSL #1]\n"
+ "fmla z30.h, p3/M, z4.h, z13.h\n"
+ "ldr x25, [x14, #0x68]\n"
+ "fmla z31.h, p3/M, z5.h, z13.h\n"
+ "ldr x24, [x14, #0x70]\n"
+ "fmla z29.h, p3/M, z3.h, z13.h\n"
+ "ldr x9, [x14, #0x88]\n"
+ "fmla z28.h, p3/M, z2.h, z13.h\n"
+ "ldr x21, [x16, #0x8]\n"
"fmla z27.h, p3/M, z1.h, z13.h\n"
- "ldr x23, [x27, #0x0]\n"
- "ldr x22, [x27, #0x8]\n"
- "fmla z28.h, p3/M, z0.h, z13.h\n"
- "ld1h { z13.h }, p2/Z, [x25, x14, LSL #1]\n"
- "fmla z29.h, p3/M, z6.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x24, x14, LSL #1]\n"
- "movprfx z30, z18\n fmla z30.h, p3/M, z1.h, z9.h\n"
- "fmla z23.h, p3/M, z7.h, z11.h\n"
- "ldr x24, [x16, #0x68]\n"
- "ldr x25, [x16, #0x78]\n"
- "fmla z24.h, p3/M, z0.h, z13.h\n"
- "fmla z31.h, p3/M, z8.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x12, x14, LSL #1]\n"
- "ldr x12, [x16, #0x80]\n"
- "fmla z26.h, p3/M, z4.h, z11.h\n"
+ "ldr x20, [x16, #0x10]\n"
+ "fmla z26.h, p3/M, z0.h, z13.h\n"
+ "ld1h { z13.h }, p2/Z, [x23, x13, LSL #1]\n"
+ "fmla z23.h, p3/M, z8.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x10, x13, LSL #1]\n"
+ "fmla z31.h, p3/M, z7.h, z11.h\n"
+ "ldr x23, [x14, #0x78]\n"
+ "fmla z30.h, p3/M, z6.h, z11.h\n"
+ "ldr x10, [x14, #0x80]\n"
+ "fmla z28.h, p3/M, z4.h, z11.h\n"
+ "ldr x19, [x16, #0x18]\n"
"fmla z27.h, p3/M, z3.h, z11.h\n"
- "ldr x21, [x27, #0x10]\n"
- "ldr x20, [x27, #0x18]\n"
- "fmla z30.h, p3/M, z0.h, z11.h\n"
- "fmla z28.h, p3/M, z4.h, z10.h\n"
- "fmla z29.h, p3/M, z1.h, z11.h\n"
- "fmla z23.h, p3/M, z1.h, z13.h\n"
- "ld1h { z11.h }, p2/Z, [x10, x14, LSL #1]\n"
- "ld1h { z13.h }, p2/Z, [x9, x14, LSL #1]\n"
- "fmla z24.h, p3/M, z2.h, z12.h\n"
- "fmla z25.h, p3/M, z1.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x28, x14, LSL #1]\n"
- "ldr x10, [x16, #0x90]\n"
+ "fmla z25.h, p3/M, z1.h, z11.h\n"
+ "fmla z24.h, p3/M, z0.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x28, x13, LSL #1]\n"
+ "fmla z31.h, p3/M, z1.h, z13.h\n"
+ "ldr x28, [x14, #0x90]\n"
+ "fmla z30.h, p3/M, z0.h, z13.h\n"
+ "ld1h { z13.h }, p2/Z, [x27, x13, LSL #1]\n"
+ "fmla z29.h, p3/M, z1.h, z12.h\n"
+ "ldr x27, [x14, #0x98]\n"
"fmla z27.h, p3/M, z5.h, z10.h\n"
- "fmla z30.h, p3/M, z2.h, z10.h\n"
- "ldr x28, [x16, #0xa0]\n"
- "ldr x9, [x16, #0x98]\n"
- "fmla z26.h, p3/M, z0.h, z11.h\n"
- "fmla z28.h, p3/M, z2.h, z13.h\n"
- "fmla z24.h, p3/M, z8.h, z10.h\n"
- "fmla z25.h, p3/M, z7.h, z10.h\n"
- "fmla z31.h, p3/M, z1.h, z10.h\n"
- "fmla z29.h, p3/M, z3.h, z12.h\n"
- "ld1h { z10.h }, p2/Z, [x24, x14, LSL #1]\n"
- "ldr x24, [x16, #0xa8]\n"
- "fmla z26.h, p3/M, z6.h, z12.h\n"
+ "fmla z26.h, p3/M, z4.h, z10.h\n"
+ "fmla z30.h, p3/M, z2.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x26, x13, LSL #1]\n"
+ "fmla z29.h, p3/M, z7.h, z10.h\n"
+ "ldr x26, [x14, #0xa0]\n"
+ "fmla z24.h, p3/M, z2.h, z10.h\n"
+ "fmla z23.h, p3/M, z1.h, z10.h\n"
+ "fmla z30.h, p3/M, z8.h, z10.h\n"
+ "ld1h { z10.h }, p2/Z, [x25, x13, LSL #1]\n"
+ "fmla z31.h, p3/M, z3.h, z11.h\n"
+ "ldr x25, [x14, #0xa8]\n"
+ "fmla z28.h, p3/M, z0.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x24, x13, LSL #1]\n"
+ "fmla z29.h, p3/M, z5.h, z13.h\n"
+ "ldr x24, [x14, #0xb0]\n"
+ "fmla z26.h, p3/M, z2.h, z13.h\n"
+ "ld1h { z13.h }, p2/Z, [x23, x13, LSL #1]\n"
+ "fmla z25.h, p3/M, z3.h, z12.h\n"
+ "ldr x23, [x14, #0xb8]\n"
+ "fmla z28.h, p3/M, z6.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x10, x13, LSL #1]\n"
"fmla z27.h, p3/M, z7.h, z10.h\n"
- "ld1h { z12.h }, p2/Z, [x12, x14, LSL #1]\n"
- "ldr x12, [x16, #0xc0]\n"
- "fmla z28.h, p3/M, z6.h, z10.h\n"
- "fmla z30.h, p3/M, z4.h, z10.h\n"
- "fmla z23.h, p3/M, z3.h, z11.h\n"
- "fmla z25.h, p3/M, z5.h, z13.h\n"
- "ld1h { z11.h }, p2/Z, [x26, x14, LSL #1]\n"
- "ld1h { z13.h }, p2/Z, [x25, x14, LSL #1]\n"
- "fmla z29.h, p3/M, z5.h, z10.h\n"
- "fmla z31.h, p3/M, z3.h, z10.h\n"
- "ldr x26, [x16, #0xb0]\n"
- "ldr x25, [x16, #0xb8]\n"
- "fmla z26.h, p3/M, z8.h, z10.h\n"
- "fmla z28.h, p3/M, z8.h, z11.h\n"
- "fmla z30.h, p3/M, z6.h, z13.h\n"
- "fmla z24.h, p3/M, z3.h, z12.h\n"
+ "ldr x10, [x14, #0xc0]\n"
+ "fmla z26.h, p3/M, z6.h, z10.h\n"
+ "fmla z25.h, p3/M, z5.h, z10.h\n"
+ "fmla z28.h, p3/M, z8.h, z10.h\n"
+ "fmla z24.h, p3/M, z4.h, z10.h\n"
+ "fmla z23.h, p3/M, z3.h, z10.h\n"
+ "fmla z26.h, p3/M, z8.h, z11.h\n"
+ "fmla z25.h, p3/M, z7.h, z13.h\n"
+ "fmla z24.h, p3/M, z6.h, z13.h\n"
+ "ld1h { z13.h }, p2/Z, [x28, x13, LSL #1]\n"
+ "fmla z23.h, p3/M, z5.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x9, x13, LSL #1]\n"
+ "fmla z31.h, p3/M, z4.h, z12.h\n"
+ "fmla z30.h, p3/M, z3.h, z12.h\n"
+ "fmla z28.h, p3/M, z1.h, z12.h\n"
"fmla z27.h, p3/M, z0.h, z12.h\n"
- "fmla z31.h, p3/M, z5.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x11, x14, LSL #1]\n"
- "fmla z29.h, p3/M, z7.h, z13.h\n"
- "ld1h { z13.h }, p2/Z, [x10, x14, LSL #1]\n"
- "fmla z23.h, p3/M, z4.h, z12.h\n"
- "fmla z26.h, p3/M, z1.h, z12.h\n"
- "fmla z24.h, p3/M, z5.h, z11.h\n"
- "ld1h { z12.h }, p2/Z, [x9, x14, LSL #1]\n"
- "fmla z25.h, p3/M, z4.h, z11.h\n"
+ "ld1h { z12.h }, p2/Z, [x27, x13, LSL #1]\n"
+ "fmla z29.h, p3/M, z4.h, z11.h\n"
+ "fmla z30.h, p3/M, z5.h, z11.h\n"
+ "fmla z26.h, p3/M, z1.h, z11.h\n"
"fmla z27.h, p3/M, z2.h, z11.h\n"
- "fmla z28.h, p3/M, z1.h, z11.h\n"
- "fmla z30.h, p3/M, z8.h, z13.h\n"
- "ld1h { z11.h }, p2/Z, [x28, x14, LSL #1]\n"
- "fmla z31.h, p3/M, z7.h, z13.h\n"
- "ld1h { z13.h }, p2/Z, [x24, x14, LSL #1]\n"
- "fmla z23.h, p3/M, z2.h, z11.h\n"
- "fmla z26.h, p3/M, z7.h, z12.h\n"
+ "ld1h { z11.h }, p2/Z, [x26, x13, LSL #1]\n"
+ "fmla z24.h, p3/M, z8.h, z13.h\n"
+ "fmla z23.h, p3/M, z7.h, z13.h\n"
+ "ld1h { z13.h }, p2/Z, [x25, x13, LSL #1]\n"
+ "fmla z28.h, p3/M, z7.h, z12.h\n"
"fmla z27.h, p3/M, z6.h, z12.h\n"
- "fmla z29.h, p3/M, z4.h, z12.h\n"
- "fmla z30.h, p3/M, z3.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x26, x14, LSL #1]\n"
- "fmla z31.h, p3/M, z4.h, z13.h\n"
- "fmla z24.h, p3/M, z1.h, z11.h\n"
- "fmax z24.h, p3/M, z24.h, z17.h\n"
- "fmin z24.h, p3/M, z24.h, z16.h\n"
- "fmla z25.h, p3/M, z0.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x25, x14, LSL #1]\n"
- "fmla z23.h, p3/M, z6.h, z12.h\n"
- "fmax z23.h, p3/M, z23.h, z17.h\n"
- "fmla z28.h, p3/M, z7.h, z13.h\n"
- "fmla z30.h, p3/M, z5.h, z13.h\n"
- "fmin z23.h, p3/M, z23.h, z16.h\n"
- "st1h { z23.h }, p1, [x23, x13, LSL #1]\n"
- "fmla z29.h, p3/M, z0.h, z12.h\n"
+ "fmla z25.h, p3/M, z4.h, z12.h\n"
+ "fmla z24.h, p3/M, z3.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x24, x13, LSL #1]\n"
"fmla z31.h, p3/M, z2.h, z11.h\n"
- "ldr x23, [x27, #0x20]\n"
- "st1h { z24.h }, p1, [x22, x13, LSL #1]\n"
+ "fmla z30.h, p3/M, z1.h, z11.h\n"
+ "fmla z29.h, p3/M, z0.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x23, x13, LSL #1]\n"
"fmla z27.h, p3/M, z8.h, z13.h\n"
- "fmla z26.h, p3/M, z3.h, z12.h\n"
- "ld1h { z13.h }, p2/Z, [x12, x14, LSL #1]\n"
- "fmax z26.h, p3/M, z26.h, z17.h\n"
- "fmla z25.h, p3/M, z8.h, z11.h\n"
- "fmla z28.h, p3/M, z5.h, z11.h\n"
- "fmax z25.h, p3/M, z25.h, z17.h\n"
- "fmax z27.h, p3/M, z27.h, z17.h\n"
- "fmla z29.h, p3/M, z8.h, z13.h\n"
- "fmla z30.h, p3/M, z7.h, z13.h\n"
- "fmin z25.h, p3/M, z25.h, z16.h\n"
- "fmin z26.h, p3/M, z26.h, z16.h\n"
- "fmla z31.h, p3/M, z6.h, z13.h\n"
- "fmin z27.h, p3/M, z27.h, z16.h\n"
- "fmax z28.h, p3/M, z28.h, z17.h\n"
- "st1h { z25.h }, p1, [x21, x13, LSL #1]\n"
- "fmax z29.h, p3/M, z29.h, z17.h\n"
- "fmax z30.h, p3/M, z30.h, z17.h\n"
- "st1h { z26.h }, p1, [x20, x13, LSL #1]\n"
- "ldr x22, [x27, #0x28]\n"
- "fmax z31.h, p3/M, z31.h, z17.h\n"
- "st1h { z27.h }, p1, [x23, x13, LSL #1]\n"
- "ldr x21, [x27, #0x30]\n"
- "ldr x20, [x27, #0x38]\n"
- "ldr x23, [x27, #0x40]\n"
- "fmin z28.h, p3/M, z28.h, z16.h\n"
- "fmin z29.h, p3/M, z29.h, z16.h\n"
- "st1h { z28.h }, p1, [x22, x13, LSL #1]\n"
- "fmin z30.h, p3/M, z30.h, z16.h\n"
- "fmin z31.h, p3/M, z31.h, z16.h\n"
- "st1h { z29.h }, p1, [x21, x13, LSL #1]\n"
- "st1h { z30.h }, p1, [x20, x13, LSL #1]\n"
- "st1h { z31.h }, p1, [x23, x13, LSL #1]\n"
+ "fmla z26.h, p3/M, z7.h, z13.h\n"
+ "fmla z24.h, p3/M, z5.h, z13.h\n"
+ "fmla z23.h, p3/M, z4.h, z13.h\n"
+ "ld1h { z13.h }, p2/Z, [x10, x13, LSL #1]\n"
+ "fmla z31.h, p3/M, z6.h, z12.h\n"
+ "fmla z28.h, p3/M, z3.h, z12.h\n"
+ "fmla z25.h, p3/M, z0.h, z12.h\n"
+ "fmla z29.h, p3/M, z8.h, z11.h\n"
+ "fmla z26.h, p3/M, z5.h, z11.h\n"
+ "fmla z23.h, p3/M, z2.h, z11.h\n"
+ "fmla z25.h, p3/M, z8.h, z13.h\n"
+ "fmla z24.h, p3/M, z7.h, z13.h\n"
+ "fmax z31.h, p3/M, z31.h, z18.h\n"
+ "fmla z23.h, p3/M, z6.h, z13.h\n"
+ "fmax z30.h, p3/M, z30.h, z18.h\n"
+ "fmax z29.h, p3/M, z29.h, z18.h\n"
+ "fmin z31.h, p3/M, z31.h, z17.h\n"
+ "st1h { z31.h }, p0, [x22, x11, LSL #1]\n"
+ "fmin z30.h, p3/M, z30.h, z17.h\n"
+ "fmin z29.h, p3/M, z29.h, z17.h\n"
+ "ldr x22, [x16, #0x20]\n"
+ "fmax z28.h, p3/M, z28.h, z18.h\n"
+ "st1h { z30.h }, p0, [x21, x11, LSL #1]\n"
+ "fmax z27.h, p3/M, z27.h, z18.h\n"
+ "fmax z26.h, p3/M, z26.h, z18.h\n"
+ "st1h { z29.h }, p0, [x20, x11, LSL #1]\n"
+ "fmin z28.h, p3/M, z28.h, z17.h\n"
+ "ldr x21, [x16, #0x28]\n"
+ "fmax z25.h, p3/M, z25.h, z18.h\n"
+ "ldr x20, [x16, #0x30]\n"
+ "fmax z24.h, p3/M, z24.h, z18.h\n"
+ "st1h { z28.h }, p0, [x19, x11, LSL #1]\n"
+ "fmin z27.h, p3/M, z27.h, z17.h\n"
+ "fmin z26.h, p3/M, z26.h, z17.h\n"
+ "ldr x19, [x16, #0x38]\n"
+ "fmin z25.h, p3/M, z25.h, z17.h\n"
+ "st1h { z27.h }, p0, [x22, x11, LSL #1]\n"
+ "fmin z24.h, p3/M, z24.h, z17.h\n"
+ "fmax z23.h, p3/M, z23.h, z18.h\n"
+ "st1h { z26.h }, p0, [x21, x11, LSL #1]\n"
+ "st1h { z25.h }, p0, [x20, x11, LSL #1]\n"
+ "fmin z23.h, p3/M, z23.h, z17.h\n"
+ "st1h { z24.h }, p0, [x19, x11, LSL #1]\n"
+ "ldr x22, [x16, #0x40]\n"
+ "st1h { z23.h }, p0, [x22, x11, LSL #1]\n"
:
: [n_channels] "r" ((unsigned long) n_channels), [offsetof_Args_inptrs] "I" (offsetof(Args, inptrs)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_args_params] "I" (offsetof(Args, params)), [params_struct] "r" (&params_struct)
- : "cc", "memory", "p0", "p1", "p2", "p3", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z16", "z17", "z18", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z16", "z17", "z18", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_direct.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_direct.cpp
index c0b9137f6b..3cfac06449 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_direct.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_direct.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -88,565 +88,565 @@ void sve_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
__asm__ __volatile__(
"ptrue p3.b\n"
- "mov x16, #0x0\n"
- "mov x4, #0x0\n"
+ "mov x2, #0x0\n"
+ "mov x3, #0x0\n"
"1:" // Tile loop
- "str x16, [%x[params_struct], %[offsetof_args_tile_i]]\n"
- "mov x25, #0x4\n"
+ "str x2, [%x[params_struct], %[offsetof_args_tile_i]]\n"
"mov x24, #0x4\n"
- "str x4, [%x[params_struct], %[offsetof_args_tile_j]]\n"
- "ldr x23, [%x[params_struct], %[offsetof_args_ld_input_row]]\n"
- "ldr x22, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
- "mul x21, x16, x23\n" // offset = tile_i * ld_input_row
- "ldr x5, [%x[params_struct], %[offsetof_args_ld_input_col]]\n"
- "ldr x6, [%x[params_struct], %[offsetof_args_ld_output_col]]\n"
- "mul x20, x16, x22\n" // offset = tile_i * ld_output_row
- "add x7, x5, x5\n"
- "madd x21, x4, x5, x21\n" // offset += tile_j * ld_input_col
+ "str x3, [%x[params_struct], %[offsetof_args_tile_j]]\n"
+ "mov x23, #0x4\n"
+ "ldr x4, [%x[params_struct], %[offsetof_args_params]]\n"
+ "mov x5, #0x0\n"
+ "ldr x22, [%x[params_struct], %[offsetof_args_ld_input_row]]\n"
+ "cnth x6\n"
+ "ldr x7, [%x[params_struct], %[offsetof_args_ld_input_col]]\n"
+ "sub x21, XZR, x6\n"
"ldr x8, [%x[params_struct], %[offsetof_args_inptr]]\n"
- "ldr x17, [%x[params_struct], %[offsetof_args_params]]\n"
- "cnth x16\n"
- "madd x20, x4, x6, x20\n" // offset += tile_j * ld_output_col
- "ldr x15, [%x[params_struct], %[offsetof_args_outptr]]\n"
- "add x14, x7, x5\n"
+ "mul x19, x2, x22\n" // offset = tile_i * ld_input_row
+ "ldr x20, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
+ "madd x19, x3, x7, x19\n" // offset += tile_j * ld_input_col
+ "ldr x17, [%x[params_struct], %[offsetof_args_ld_output_col]]\n"
+ "mul x19, x19, x24\n" // offset *= kernel_stride * output_size
+ "ldr x16, [%x[params_struct], %[offsetof_args_outptr]]\n"
+ "add x8, x8, x19, LSL #1\n" // inptr[0] += offset * sizeof(__fp16)
+ "ld1rh { z15.h }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
+ "add x15, x8, x22, LSL #1\n"
+ "ld1rh { z14.h }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
+ "add x14, x15, x22, LSL #1\n"
+ "ld1h { z13.h }, p3/Z, [x4]\n"
+ "add x13, x14, x22, LSL #1\n"
+ "ld1h { z0.h }, p3/Z, [x4, #1, MUL VL]\n"
+ "add x12, x13, x22, LSL #1\n"
+ "ld1h { z1.h }, p3/Z, [x4, #2, MUL VL]\n"
+ "add x11, x12, x22, LSL #1\n"
+ "ld1h { z2.h }, p3/Z, [x4, #3, MUL VL]\n"
+ "add x10, x7, x7\n"
+ "ld1h { z3.h }, p3/Z, [x4, #4, MUL VL]\n"
+ "add x9, x10, x7\n"
+ "ld1h { z4.h }, p3/Z, [x4, #5, MUL VL]\n"
+ "add x28, x9, x7\n"
+ "ld1h { z5.h }, p3/Z, [x4, #6, MUL VL]\n"
+ "add x27, x28, x7\n"
+ "ld1h { z6.h }, p3/Z, [x4, #7, MUL VL]\n"
+ "mul x19, x2, x20\n" // offset = tile_i * ld_output_row
+ "add x26, x17, x17\n"
+ "madd x19, x3, x17, x19\n" // offset += tile_j * ld_output_col
+ "mul x19, x19, x23\n" // offset *= output_tile_size
+ "add x16, x16, x19, LSL #1\n" // outptrs[0] += offset * sizeof(__fp16)
+ "add x25, x16, x20, LSL #1\n"
+ "add x24, x25, x20, LSL #1\n"
+ "add x23, x24, x20, LSL #1\n"
+ "add x22, x26, x17\n"
"whilelt p2.h, XZR, %x[n_channels]\n"
- "mul x21, x21, x25\n" // offset *= kernel_stride * output_size
- "add x8, x8, x21, LSL #1\n" // inptr[0] += offset * sizeof(__fp16)
- "add x13, x8, x23, LSL #1\n"
- "ld1h { z15.h }, p3/Z, [x17]\n"
- "mul x20, x20, x24\n" // offset *= output_tile_size
- "add x12, x13, x23, LSL #1\n"
- "add x15, x15, x20, LSL #1\n" // outptrs[0] += offset * sizeof(__fp16)
- "ld1h { z0.h }, p3/Z, [x17, #1, MUL VL]\n"
- "ld1h { z1.h }, p3/Z, [x17, #2, MUL VL]\n"
- "ld1h { z2.h }, p3/Z, [x17, #3, MUL VL]\n"
- "add x11, x12, x23, LSL #1\n"
- "add x10, x14, x5\n"
- "ld1h { z3.h }, p3/Z, [x17, #4, MUL VL]\n"
- "ld1h { z4.h }, p3/Z, [x17, #5, MUL VL]\n"
- "add x9, x15, x22, LSL #1\n"
- "add x28, x11, x23, LSL #1\n"
- "ld1h { z5.h }, p3/Z, [x17, #6, MUL VL]\n"
- "ld1h { z6.h }, p3/Z, [x17, #7, MUL VL]\n"
- "addvl x17, x17, #16\n"
- "add x27, x10, x5\n"
- "add x26, x9, x22, LSL #1\n"
- "add x25, x6, x6\n"
- "ld1rh { z14.h }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
- "ld1rh { z13.h }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
- "cmp x16, %x[n_channels]\n"
- "add x24, x28, x23, LSL #1\n"
- "ld1h { z7.h }, p3/Z, [x17, #-8, MUL VL]\n"
- "ld1h { z8.h }, p3/Z, [x17, #-7, MUL VL]\n"
- "add x23, x26, x22, LSL #1\n"
- "add x22, x25, x6\n"
- "ld1h { z9.h }, p2/Z, [x12, x7, LSL #1]\n"
+ "ld1h { z9.h }, p2/Z, [x14, x10, LSL #1]\n"
"ld1h { z10.h }, p2/Z, [x8]\n"
- "mov x21, #0x0\n"
- "sub x20, XZR, x16\n"
+ "addvl x4, x4, #16\n"
"ld1h { z11.h }, p2/Z, [x8, x27, LSL #1]\n"
- "ld1h { z12.h }, p2/Z, [x12, x14, LSL #1]\n"
- "addvl x17, x17, #-6\n"
+ "cmp x6, %x[n_channels]\n"
+ "ld1h { z7.h }, p3/Z, [x4, #-8, MUL VL]\n"
+ "ld1h { z8.h }, p3/Z, [x4, #-7, MUL VL]\n"
+ "addvl x4, x4, #-6\n"
+ "ld1h { z12.h }, p2/Z, [x14, x9, LSL #1]\n"
"bge 3f\n"
"2:" // Tile loop: Channel loop
- "movprfx z21, z15\n fmla z21.h, p3/M, z4.h, z9.h\n"
- "movprfx z16, z15\n fmla z16.h, p3/M, z8.h, z9.h\n"
- "whilelt p1.h, x16, %x[n_channels]\n"
+ "movprfx z31, z13\n fmla z31.h, p3/M, z8.h, z9.h\n"
+ "whilelt p1.h, x6, %x[n_channels]\n"
+ "movprfx z30, z13\n fmla z30.h, p3/M, z7.h, z9.h\n"
"inch x21\n"
- "movprfx z22, z15\n fmla z22.h, p3/M, z3.h, z9.h\n"
- "movprfx z25, z15\n fmla z25.h, p3/M, z1.h, z9.h\n"
- "inch x16\n"
+ "movprfx z29, z13\n fmla z29.h, p3/M, z6.h, z9.h\n"
"mov p0.b, p2.b\n"
- "movprfx z26, z15\n fmla z26.h, p3/M, z0.h, z9.h\n"
- "fmla z21.h, p3/M, z5.h, z12.h\n"
- "inch x20\n"
- "movprfx z17, z15\n fmla z17.h, p3/M, z7.h, z9.h\n"
- "movprfx z18, z15\n fmla z18.h, p3/M, z6.h, z9.h\n"
- "movprfx z20, z15\n fmla z20.h, p3/M, z5.h, z9.h\n"
- "movprfx z24, z15\n fmla z24.h, p3/M, z2.h, z9.h\n"
- "ld1h { z9.h }, p2/Z, [x11, x7, LSL #1]\n"
- "fmla z16.h, p3/M, z0.h, z10.h\n"
- "movprfx z19, z15\n fmla z19.h, p3/M, z2.h, z11.h\n"
- "ld1h { z10.h }, p2/Z, [x24]\n"
- "ld1h { z11.h }, p2/Z, [x24, x27, LSL #1]\n"
- "fmla z22.h, p3/M, z4.h, z12.h\n"
- "fmla z25.h, p3/M, z2.h, z12.h\n"
- "fmla z26.h, p3/M, z1.h, z12.h\n"
- "movprfx z28, z15\n fmla z28.h, p3/M, z6.h, z10.h\n"
- "ld1h { z10.h }, p2/Z, [x11, x14, LSL #1]\n"
- "fmla z21.h, p3/M, z7.h, z9.h\n"
- "fmla z17.h, p3/M, z8.h, z12.h\n"
- "fmla z18.h, p3/M, z7.h, z12.h\n"
- "fmla z19.h, p3/M, z6.h, z12.h\n"
- "movprfx z23, z15\n fmla z23.h, p3/M, z3.h, z12.h\n"
- "movprfx z27, z15\n fmla z27.h, p3/M, z0.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x8, x5, LSL #1]\n"
- "movprfx z31, z15\n fmla z31.h, p3/M, z8.h, z11.h\n"
- "fmla z22.h, p3/M, z6.h, z9.h\n"
- "ld1h { z11.h }, p2/Z, [x8, x10, LSL #1]\n"
- "fmla z25.h, p3/M, z4.h, z9.h\n"
- "fmla z26.h, p3/M, z3.h, z9.h\n"
- "fmla z20.h, p3/M, z8.h, z9.h\n"
- "fmla z24.h, p3/M, z5.h, z9.h\n"
- "fmla z28.h, p3/M, z2.h, z9.h\n"
- "fmla z21.h, p3/M, z8.h, z10.h\n"
- "fmla z16.h, p3/M, z1.h, z12.h\n"
- "fmla z17.h, p3/M, z0.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x13, x27, LSL #1]\n"
- "fmla z18.h, p3/M, z2.h, z11.h\n"
- "fmla z19.h, p3/M, z1.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x28]\n"
- "fmla z22.h, p3/M, z7.h, z10.h\n"
- "fmla z23.h, p3/M, z6.h, z10.h\n"
- "fmla z25.h, p3/M, z5.h, z10.h\n"
- "fmla z26.h, p3/M, z4.h, z10.h\n"
- "fmla z27.h, p3/M, z3.h, z10.h\n"
+ "movprfx z27, z13\n fmla z27.h, p3/M, z5.h, z9.h\n"
+ "inch x5\n"
+ "movprfx z26, z13\n fmla z26.h, p3/M, z4.h, z9.h\n"
+ "inch x6\n"
+ "movprfx z25, z13\n fmla z25.h, p3/M, z3.h, z9.h\n"
+ "movprfx z23, z13\n fmla z23.h, p3/M, z2.h, z9.h\n"
+ "movprfx z22, z13\n fmla z22.h, p3/M, z1.h, z9.h\n"
+ "movprfx z21, z13\n fmla z21.h, p3/M, z0.h, z9.h\n"
+ "ld1h { z9.h }, p2/Z, [x13, x10, LSL #1]\n"
"fmla z31.h, p3/M, z0.h, z10.h\n"
- "fmla z24.h, p3/M, z6.h, z11.h\n"
- "fmla z28.h, p3/M, z3.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x28, x27, LSL #1]\n"
- "fmla z19.h, p3/M, z5.h, z12.h\n"
- "fmla z23.h, p3/M, z2.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x13, x14, LSL #1]\n"
- "fmla z27.h, p3/M, z8.h, z11.h\n"
- "fmla z31.h, p3/M, z5.h, z11.h\n"
- "movprfx z29, z15\n fmla z29.h, p3/M, z1.h, z9.h\n"
- "movprfx z30, z15\n fmla z30.h, p3/M, z0.h, z9.h\n"
- "ld1h { z9.h }, p2/Z, [x13]\n"
- "fmla z29.h, p3/M, z2.h, z10.h\n"
- "fmla z30.h, p3/M, z1.h, z10.h\n"
- "ld1h { z10.h }, p2/Z, [x13, x7, LSL #1]\n"
- "fmla z20.h, p3/M, z0.h, z9.h\n"
- "fmla z21.h, p3/M, z1.h, z10.h\n"
- "fmla z16.h, p3/M, z3.h, z9.h\n"
- "fmla z17.h, p3/M, z4.h, z10.h\n"
- "ld1h { z11.h }, p2/Z, [x24, x5, LSL #1]\n"
- "fmla z18.h, p3/M, z3.h, z10.h\n"
- "fmla z22.h, p3/M, z0.h, z10.h\n"
- "fmla z20.h, p3/M, z2.h, z10.h\n"
- "fmla z21.h, p3/M, z2.h, z12.h\n"
- "fmla z16.h, p3/M, z5.h, z10.h\n"
- "fmla z17.h, p3/M, z5.h, z12.h\n"
- "ld1h { z10.h }, p2/Z, [x12, x5, LSL #1]\n"
- "fmla z18.h, p3/M, z4.h, z12.h\n"
- "fmla z19.h, p3/M, z3.h, z12.h\n"
- "fmla z22.h, p3/M, z1.h, z12.h\n"
- "fmla z23.h, p3/M, z0.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x12, x10, LSL #1]\n"
- "fmla z28.h, p3/M, z7.h, z11.h\n"
- "fmla z29.h, p3/M, z6.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x24, x10, LSL #1]\n"
- "fmla z20.h, p3/M, z4.h, z10.h\n"
- "fmla z21.h, p3/M, z3.h, z10.h\n"
- "fmla z24.h, p3/M, z1.h, z10.h\n"
- "fmla z25.h, p3/M, z0.h, z10.h\n"
- "fmla z16.h, p3/M, z7.h, z10.h\n"
- "fmla z17.h, p3/M, z6.h, z10.h\n"
- "ld1h { z10.h }, p2/Z, [x8, x7, LSL #1]\n"
- "fmla z30.h, p3/M, z8.h, z11.h\n"
- "fmla z31.h, p3/M, z7.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x11, x5, LSL #1]\n"
- "fmla z18.h, p3/M, z8.h, z12.h\n"
- "fmla z19.h, p3/M, z7.h, z12.h\n"
- "fmla z22.h, p3/M, z5.h, z12.h\n"
- "fmla z23.h, p3/M, z4.h, z12.h\n"
- "fmla z26.h, p3/M, z2.h, z12.h\n"
- "fmla z27.h, p3/M, z1.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x8, x14, LSL #1]\n"
- "addvl x8, x8, #1\n"
- "fmla z20.h, p3/M, z7.h, z11.h\n"
- "fmla z21.h, p3/M, z6.h, z11.h\n"
- "fmla z24.h, p3/M, z4.h, z11.h\n"
- "fmla z25.h, p3/M, z3.h, z11.h\n"
+ "ld1h { z10.h }, p2/Z, [x11]\n"
+ "movprfx z28, z13\n fmla z28.h, p3/M, z2.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x11, x27, LSL #1]\n"
+ "fmla z30.h, p3/M, z8.h, z12.h\n"
+ "fmla z29.h, p3/M, z7.h, z12.h\n"
+ "fmla z26.h, p3/M, z5.h, z12.h\n"
+ "fmla z28.h, p3/M, z6.h, z12.h\n"
+ "fmla z25.h, p3/M, z4.h, z12.h\n"
+ "movprfx z24, z13\n fmla z24.h, p3/M, z3.h, z12.h\n"
+ "fmla z22.h, p3/M, z2.h, z12.h\n"
+ "fmla z21.h, p3/M, z1.h, z12.h\n"
+ "movprfx z20, z13\n fmla z20.h, p3/M, z0.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x8, x7, LSL #1]\n"
+ "movprfx z19, z13\n fmla z19.h, p3/M, z6.h, z10.h\n"
+ "ld1h { z10.h }, p2/Z, [x13, x9, LSL #1]\n"
+ "movprfx z16, z13\n fmla z16.h, p3/M, z8.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x8, x28, LSL #1]\n"
+ "fmla z27.h, p3/M, z8.h, z9.h\n"
+ "fmla z26.h, p3/M, z7.h, z9.h\n"
+ "fmla z25.h, p3/M, z6.h, z9.h\n"
+ "fmla z23.h, p3/M, z5.h, z9.h\n"
+ "fmla z22.h, p3/M, z4.h, z9.h\n"
+ "fmla z21.h, p3/M, z3.h, z9.h\n"
+ "fmla z19.h, p3/M, z2.h, z9.h\n"
+ "movprfx z18, z13\n fmla z18.h, p3/M, z1.h, z9.h\n"
+ "movprfx z17, z13\n fmla z17.h, p3/M, z0.h, z9.h\n"
+ "ld1h { z9.h }, p2/Z, [x15]\n"
+ "fmla z31.h, p3/M, z1.h, z12.h\n"
+ "ld1h { z13.h }, p3/Z, [x4]\n"
+ "fmla z30.h, p3/M, z0.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x15, x27, LSL #1]\n"
+ "fmla z29.h, p3/M, z2.h, z11.h\n"
"fmla z28.h, p3/M, z1.h, z11.h\n"
- "fmla z29.h, p3/M, z0.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x11, x10, LSL #1]\n"
- "fmla z16.h, p3/M, z2.h, z10.h\n"
- "fmla z17.h, p3/M, z1.h, z10.h\n"
- "fmla z18.h, p3/M, z0.h, z10.h\n"
- "ld1h { z10.h }, p2/Z, [x12]\n"
- "fmla z30.h, p3/M, z2.h, z11.h\n"
- "fmla z19.h, p3/M, z0.h, z12.h\n"
+ "ld1h { z11.h }, p2/Z, [x12]\n"
+ "fmla z26.h, p3/M, z8.h, z10.h\n"
+ "fmla z25.h, p3/M, z7.h, z10.h\n"
+ "fmla z24.h, p3/M, z6.h, z10.h\n"
+ "fmla z22.h, p3/M, z5.h, z10.h\n"
+ "fmla z21.h, p3/M, z4.h, z10.h\n"
"fmla z20.h, p3/M, z3.h, z10.h\n"
- "fmla z24.h, p3/M, z0.h, z10.h\n"
- "fmla z22.h, p3/M, z8.h, z11.h\n"
- "fmla z23.h, p3/M, z7.h, z11.h\n"
- "fmla z26.h, p3/M, z5.h, z11.h\n"
- "fmla z27.h, p3/M, z4.h, z11.h\n"
- "fmla z31.h, p3/M, z1.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x28, x7, LSL #1]\n"
- "fmla z17.h, p3/M, z2.h, z12.h\n"
- "fmla z18.h, p3/M, z1.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x12, x27, LSL #1]\n"
- "addvl x12, x12, #1\n"
- "fmla z16.h, p3/M, z6.h, z10.h\n"
- "ld1h { z10.h }, p2/Z, [x11]\n"
- "fmla z29.h, p3/M, z4.h, z11.h\n"
- "fmla z30.h, p3/M, z3.h, z11.h\n"
- "fmla z19.h, p3/M, z8.h, z12.h\n"
- "fmla z23.h, p3/M, z5.h, z12.h\n"
- "fmla z27.h, p3/M, z2.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x11, x27, LSL #1]\n"
- "addvl x11, x11, #1\n"
- "fmla z20.h, p3/M, z6.h, z10.h\n"
- "fmla z24.h, p3/M, z3.h, z10.h\n"
- "fmla z28.h, p3/M, z0.h, z10.h\n"
- "ld1h { z10.h }, p2/Z, [x24, x7, LSL #1]\n"
- "fmla z31.h, p3/M, z2.h, z12.h\n"
- "fmla z29.h, p3/M, z7.h, z10.h\n"
+ "fmla z18.h, p3/M, z2.h, z10.h\n"
+ "fmla z17.h, p3/M, z1.h, z10.h\n"
+ "fmla z16.h, p3/M, z0.h, z10.h\n"
+ "ld1h { z10.h }, p2/Z, [x15, x10, LSL #1]\n"
+ "fmla z31.h, p3/M, z3.h, z9.h\n"
+ "fmla z27.h, p3/M, z0.h, z9.h\n"
+ "fmla z28.h, p3/M, z5.h, z12.h\n"
+ "fmla z24.h, p3/M, z2.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x15, x9, LSL #1]\n"
+ "fmla z23.h, p3/M, z6.h, z11.h\n"
+ "fmla z19.h, p3/M, z3.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x12, x27, LSL #1]\n"
+ "fmla z31.h, p3/M, z5.h, z10.h\n"
+ "fmla z30.h, p3/M, z4.h, z10.h\n"
+ "fmla z29.h, p3/M, z3.h, z10.h\n"
+ "fmla z27.h, p3/M, z2.h, z10.h\n"
+ "fmla z26.h, p3/M, z1.h, z10.h\n"
+ "fmla z25.h, p3/M, z0.h, z10.h\n"
+ "ld1h { z10.h }, p2/Z, [x14, x7, LSL #1]\n"
+ "fmla z20.h, p3/M, z8.h, z11.h\n"
+ "fmla z16.h, p3/M, z5.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x11, x7, LSL #1]\n"
+ "fmla z30.h, p3/M, z5.h, z12.h\n"
+ "fmla z29.h, p3/M, z4.h, z12.h\n"
+ "fmla z28.h, p3/M, z3.h, z12.h\n"
+ "fmla z26.h, p3/M, z2.h, z12.h\n"
+ "fmla z25.h, p3/M, z1.h, z12.h\n"
+ "fmla z24.h, p3/M, z0.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x14, x28, LSL #1]\n"
+ "fmla z19.h, p3/M, z7.h, z11.h\n"
+ "fmla z18.h, p3/M, z6.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x11, x28, LSL #1]\n"
+ "fmla z31.h, p3/M, z7.h, z10.h\n"
"fmla z30.h, p3/M, z6.h, z10.h\n"
- "fmla z24.h, p3/M, z8.h, z11.h\n"
- "fmla z25.h, p3/M, z7.h, z11.h\n"
+ "fmla z27.h, p3/M, z4.h, z10.h\n"
+ "fmla z26.h, p3/M, z3.h, z10.h\n"
+ "fmla z23.h, p3/M, z1.h, z10.h\n"
+ "fmla z22.h, p3/M, z0.h, z10.h\n"
+ "ld1h { z10.h }, p2/Z, [x8, x10, LSL #1]\n"
+ "fmla z17.h, p3/M, z8.h, z11.h\n"
+ "fmla z16.h, p3/M, z7.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x13, x7, LSL #1]\n"
+ "fmla z29.h, p3/M, z8.h, z12.h\n"
+ "fmla z28.h, p3/M, z7.h, z12.h\n"
+ "fmla z25.h, p3/M, z5.h, z12.h\n"
+ "fmla z24.h, p3/M, z4.h, z12.h\n"
+ "fmla z21.h, p3/M, z2.h, z12.h\n"
+ "fmla z20.h, p3/M, z1.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x8, x9, LSL #1]\n"
+ "addvl x8, x8, #1\n"
+ "fmla z31.h, p3/M, z2.h, z10.h\n"
+ "fmla z30.h, p3/M, z1.h, z10.h\n"
+ "fmla z29.h, p3/M, z0.h, z10.h\n"
+ "ld1h { z10.h }, p2/Z, [x14]\n"
+ "fmla z27.h, p3/M, z7.h, z11.h\n"
"fmla z26.h, p3/M, z6.h, z11.h\n"
- "fmla z28.h, p3/M, z5.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x28, x14, LSL #1]\n"
- "fmla z27.h, p3/M, z5.h, z12.h\n"
- "fmla z29.h, p3/M, z5.h, z11.h\n"
- "fmla z30.h, p3/M, z4.h, z11.h\n"
- "fmla z31.h, p3/M, z3.h, z11.h\n"
- "fmla z23.h, p3/M, z8.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x24, x14, LSL #1]\n"
- "fmla z28.h, p3/M, z8.h, z10.h\n"
- "ld1h { z10.h }, p2/Z, [x13, x5, LSL #1]\n"
+ "fmla z23.h, p3/M, z4.h, z11.h\n"
+ "fmla z22.h, p3/M, z3.h, z11.h\n"
+ "fmla z19.h, p3/M, z1.h, z11.h\n"
+ "fmla z18.h, p3/M, z0.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x13, x28, LSL #1]\n"
+ "fmla z30.h, p3/M, z2.h, z12.h\n"
+ "fmla z29.h, p3/M, z1.h, z12.h\n"
+ "fmla z28.h, p3/M, z0.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x14, x27, LSL #1]\n"
+ "addvl x14, x14, #1\n"
+ "fmla z31.h, p3/M, z6.h, z10.h\n"
+ "ld1h { z9.h }, p1/Z, [x14, x10, LSL #1]\n"
+ "fmla z27.h, p3/M, z3.h, z10.h\n"
+ "fmla z23.h, p3/M, z0.h, z10.h\n"
+ "ld1h { z10.h }, p2/Z, [x13]\n"
"fmla z25.h, p3/M, z8.h, z11.h\n"
- "fmla z26.h, p3/M, z7.h, z11.h\n"
- "addvl x24, x24, #1\n"
- "fmla z27.h, p3/M, z6.h, z11.h\n"
- "fmla z29.h, p3/M, z8.h, z12.h\n"
- "ld1h { z11.h }, p2/Z, [x13, x10, LSL #1]\n"
+ "fmla z24.h, p3/M, z7.h, z11.h\n"
+ "fmla z21.h, p3/M, z5.h, z11.h\n"
+ "fmla z20.h, p3/M, z4.h, z11.h\n"
+ "fmla z17.h, p3/M, z2.h, z11.h\n"
+ "fmla z16.h, p3/M, z1.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x12, x10, LSL #1]\n"
+ "fmla z28.h, p3/M, z8.h, z12.h\n"
+ "fmla z24.h, p3/M, z5.h, z12.h\n"
+ "fmla z20.h, p3/M, z2.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x13, x27, LSL #1]\n"
"addvl x13, x13, #1\n"
- "fmla z30.h, p3/M, z7.h, z12.h\n"
- "fmla z31.h, p3/M, z6.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x28, x5, LSL #1]\n"
- "fmla z16.h, p3/M, z4.h, z10.h\n"
- "fmla z17.h, p3/M, z3.h, z10.h\n"
- "fmax z16.h, p3/M, z16.h, z14.h\n"
- "fmax z17.h, p3/M, z17.h, z14.h\n"
- "fmla z20.h, p3/M, z1.h, z10.h\n"
- "fmla z21.h, p3/M, z0.h, z10.h\n"
- "ld1h { z10.h }, p2/Z, [x28, x10, LSL #1]\n"
- "fmax z20.h, p3/M, z20.h, z14.h\n"
+ "fmla z27.h, p3/M, z6.h, z10.h\n"
+ "fmla z23.h, p3/M, z3.h, z10.h\n"
+ "fmla z19.h, p3/M, z0.h, z10.h\n"
+ "ld1h { z10.h }, p2/Z, [x11, x10, LSL #1]\n"
+ "fmla z22.h, p3/M, z7.h, z11.h\n"
+ "fmla z21.h, p3/M, z6.h, z11.h\n"
+ "fmla z23.h, p3/M, z8.h, z11.h\n"
+ "fmla z19.h, p3/M, z5.h, z11.h\n"
+ "fmla z18.h, p3/M, z4.h, z11.h\n"
+ "fmla z17.h, p3/M, z3.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x12, x9, LSL #1]\n"
+ "fmla z24.h, p3/M, z8.h, z12.h\n"
+ "fmla z20.h, p3/M, z5.h, z12.h\n"
+ "fmla z16.h, p3/M, z2.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x11, x9, LSL #1]\n"
+ "addvl x11, x11, #1\n"
+ "fmla z19.h, p3/M, z8.h, z10.h\n"
+ "fmla z18.h, p3/M, z7.h, z10.h\n"
+ "fmla z17.h, p3/M, z6.h, z10.h\n"
+ "ld1h { z10.h }, p2/Z, [x15, x7, LSL #1]\n"
+ "fmla z22.h, p3/M, z8.h, z11.h\n"
+ "fmla z21.h, p3/M, z7.h, z11.h\n"
+ "fmla z20.h, p3/M, z6.h, z11.h\n"
"fmla z18.h, p3/M, z5.h, z11.h\n"
- "fmla z19.h, p3/M, z4.h, z11.h\n"
- "fmax z18.h, p3/M, z18.h, z14.h\n"
- "fmax z19.h, p3/M, z19.h, z14.h\n"
- "fmla z22.h, p3/M, z2.h, z11.h\n"
- "fmla z23.h, p3/M, z1.h, z11.h\n"
- "fmax z21.h, p3/M, z21.h, z14.h\n"
- "fmax z22.h, p3/M, z22.h, z14.h\n"
- "fmla z24.h, p3/M, z7.h, z12.h\n"
- "fmla z25.h, p3/M, z6.h, z12.h\n"
- "fmax z23.h, p3/M, z23.h, z14.h\n"
- "fmax z24.h, p3/M, z24.h, z14.h\n"
- "fmla z28.h, p3/M, z4.h, z12.h\n"
- "fmla z29.h, p3/M, z3.h, z12.h\n"
- "fmax z25.h, p3/M, z25.h, z14.h\n"
- "fmax z28.h, p3/M, z28.h, z14.h\n"
- "fmla z26.h, p3/M, z8.h, z10.h\n"
- "fmla z27.h, p3/M, z7.h, z10.h\n"
- "fmax z26.h, p3/M, z26.h, z14.h\n"
- "fmax z27.h, p3/M, z27.h, z14.h\n"
- "fmla z30.h, p3/M, z5.h, z10.h\n"
+ "fmla z17.h, p3/M, z4.h, z11.h\n"
+ "fmla z16.h, p3/M, z3.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x15, x28, LSL #1]\n"
+ "addvl x15, x15, #1\n"
+ "fmla z18.h, p3/M, z8.h, z12.h\n"
"fmla z31.h, p3/M, z4.h, z10.h\n"
- "fmax z29.h, p3/M, z29.h, z14.h\n"
- "fmax z30.h, p3/M, z30.h, z14.h\n"
- "fmax z31.h, p3/M, z31.h, z14.h\n"
- "ld1h { z15.h }, p3/Z, [x17]\n"
- "ld1h { z0.h }, p3/Z, [x17, #1, MUL VL]\n"
- "whilelt p2.h, x21, %x[n_channels]\n"
- "ld1h { z1.h }, p3/Z, [x17, #2, MUL VL]\n"
- "ld1h { z2.h }, p3/Z, [x17, #3, MUL VL]\n"
- "cmp x16, %x[n_channels]\n"
- "fmin z16.h, p3/M, z16.h, z13.h\n"
- "ld1h { z3.h }, p3/Z, [x17, #4, MUL VL]\n"
- "ld1h { z4.h }, p3/Z, [x17, #5, MUL VL]\n"
- "fmin z17.h, p3/M, z17.h, z13.h\n"
- "fmin z18.h, p3/M, z18.h, z13.h\n"
- "ld1h { z5.h }, p3/Z, [x17, #6, MUL VL]\n"
- "ld1h { z6.h }, p3/Z, [x17, #7, MUL VL]\n"
- "addvl x17, x17, #16\n"
- "fmin z19.h, p3/M, z19.h, z13.h\n"
- "fmin z20.h, p3/M, z20.h, z13.h\n"
- "fmin z21.h, p3/M, z21.h, z13.h\n"
- "ld1h { z9.h }, p1/Z, [x12, x7, LSL #1]\n"
- "ld1h { z10.h }, p1/Z, [x8]\n"
- "fmin z22.h, p3/M, z22.h, z13.h\n"
- "fmin z23.h, p3/M, z23.h, z13.h\n"
+ "fmla z17.h, p3/M, z7.h, z12.h\n"
+ "fmla z16.h, p3/M, z6.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x12, x7, LSL #1]\n"
+ "fmla z30.h, p3/M, z3.h, z10.h\n"
+ "fmla z27.h, p3/M, z1.h, z10.h\n"
+ "fmla z26.h, p3/M, z0.h, z10.h\n"
+ "ld1h { z10.h }, p2/Z, [x12, x28, LSL #1]\n"
+ "whilelt p2.h, x5, %x[n_channels]\n"
+ "fmla z29.h, p3/M, z5.h, z11.h\n"
+ "ld1h { z0.h }, p3/Z, [x4, #1, MUL VL]\n"
+ "addvl x12, x12, #1\n"
+ "fmla z28.h, p3/M, z4.h, z11.h\n"
+ "cmp x6, %x[n_channels]\n"
+ "fmla z25.h, p3/M, z2.h, z11.h\n"
+ "ld1h { z2.h }, p3/Z, [x4, #3, MUL VL]\n"
+ "fmla z24.h, p3/M, z1.h, z11.h\n"
"ld1h { z11.h }, p1/Z, [x8, x27, LSL #1]\n"
- "ld1h { z12.h }, p1/Z, [x12, x14, LSL #1]\n"
- "fmin z24.h, p3/M, z24.h, z13.h\n"
- "fmin z25.h, p3/M, z25.h, z13.h\n"
- "st1h { z16.h }, p0, [x15]\n"
- "ld1h { z7.h }, p3/Z, [x17, #-8, MUL VL]\n"
- "fmin z26.h, p3/M, z26.h, z13.h\n"
- "fmin z27.h, p3/M, z27.h, z13.h\n"
- "st1h { z17.h }, p0, [x15, x6, LSL #1]\n"
- "ld1h { z8.h }, p3/Z, [x17, #-7, MUL VL]\n"
- "fmin z28.h, p3/M, z28.h, z13.h\n"
- "fmin z29.h, p3/M, z29.h, z13.h\n"
- "st1h { z18.h }, p0, [x15, x25, LSL #1]\n"
- "fmin z30.h, p3/M, z30.h, z13.h\n"
- "fmin z31.h, p3/M, z31.h, z13.h\n"
- "st1h { z19.h }, p0, [x15, x22, LSL #1]\n"
- "addvl x28, x28, #1\n"
- "st1h { z20.h }, p0, [x9]\n"
- "addvl x15, x15, #1\n"
- "st1h { z21.h }, p0, [x9, x6, LSL #1]\n"
- "addvl x17, x17, #-6\n"
- "st1h { z22.h }, p0, [x9, x25, LSL #1]\n"
- "st1h { z23.h }, p0, [x9, x22, LSL #1]\n"
- "addvl x9, x9, #1\n"
- "st1h { z24.h }, p0, [x26]\n"
- "st1h { z25.h }, p0, [x26, x6, LSL #1]\n"
- "st1h { z26.h }, p0, [x26, x25, LSL #1]\n"
- "st1h { z27.h }, p0, [x26, x22, LSL #1]\n"
- "addvl x26, x26, #1\n"
- "st1h { z28.h }, p0, [x23]\n"
- "st1h { z29.h }, p0, [x23, x6, LSL #1]\n"
- "st1h { z30.h }, p0, [x23, x25, LSL #1]\n"
- "st1h { z31.h }, p0, [x23, x22, LSL #1]\n"
+ "fmla z23.h, p3/M, z7.h, z12.h\n"
+ "ld1h { z1.h }, p3/Z, [x4, #2, MUL VL]\n"
+ "fmla z22.h, p3/M, z6.h, z12.h\n"
+ "ld1h { z6.h }, p3/Z, [x4, #7, MUL VL]\n"
+ "fmla z19.h, p3/M, z4.h, z12.h\n"
+ "fmla z18.h, p3/M, z3.h, z12.h\n"
+ "ld1h { z12.h }, p1/Z, [x14, x9, LSL #1]\n"
+ "fmla z21.h, p3/M, z8.h, z10.h\n"
+ "ld1h { z3.h }, p3/Z, [x4, #4, MUL VL]\n"
+ "fmla z20.h, p3/M, z7.h, z10.h\n"
+ "fmla z17.h, p3/M, z5.h, z10.h\n"
+ "ld1h { z5.h }, p3/Z, [x4, #6, MUL VL]\n"
+ "fmla z16.h, p3/M, z4.h, z10.h\n"
+ "ld1h { z10.h }, p1/Z, [x8]\n"
+ "fmax z31.h, p3/M, z31.h, z15.h\n"
+ "ld1h { z4.h }, p3/Z, [x4, #5, MUL VL]\n"
+ "addvl x4, x4, #16\n"
+ "fmax z30.h, p3/M, z30.h, z15.h\n"
+ "ld1h { z7.h }, p3/Z, [x4, #-8, MUL VL]\n"
+ "fmax z29.h, p3/M, z29.h, z15.h\n"
+ "ld1h { z8.h }, p3/Z, [x4, #-7, MUL VL]\n"
+ "addvl x4, x4, #-6\n"
+ "fmin z31.h, p3/M, z31.h, z14.h\n"
+ "st1h { z31.h }, p0, [x16]\n"
+ "fmin z30.h, p3/M, z30.h, z14.h\n"
+ "fmin z29.h, p3/M, z29.h, z14.h\n"
+ "st1h { z30.h }, p0, [x16, x17, LSL #1]\n"
+ "fmax z28.h, p3/M, z28.h, z15.h\n"
+ "fmax z27.h, p3/M, z27.h, z15.h\n"
+ "st1h { z29.h }, p0, [x16, x26, LSL #1]\n"
+ "fmax z26.h, p3/M, z26.h, z15.h\n"
+ "fmax z25.h, p3/M, z25.h, z15.h\n"
+ "fmax z24.h, p3/M, z24.h, z15.h\n"
+ "fmin z28.h, p3/M, z28.h, z14.h\n"
+ "st1h { z28.h }, p0, [x16, x22, LSL #1]\n"
+ "fmin z27.h, p3/M, z27.h, z14.h\n"
+ "addvl x16, x16, #1\n"
+ "fmin z26.h, p3/M, z26.h, z14.h\n"
+ "st1h { z27.h }, p0, [x25]\n"
+ "fmin z25.h, p3/M, z25.h, z14.h\n"
+ "fmin z24.h, p3/M, z24.h, z14.h\n"
+ "st1h { z26.h }, p0, [x25, x17, LSL #1]\n"
+ "fmax z23.h, p3/M, z23.h, z15.h\n"
+ "st1h { z25.h }, p0, [x25, x26, LSL #1]\n"
+ "fmax z22.h, p3/M, z22.h, z15.h\n"
+ "fmax z21.h, p3/M, z21.h, z15.h\n"
+ "st1h { z24.h }, p0, [x25, x22, LSL #1]\n"
+ "addvl x25, x25, #1\n"
+ "fmin z23.h, p3/M, z23.h, z14.h\n"
+ "st1h { z23.h }, p0, [x24]\n"
+ "fmin z22.h, p3/M, z22.h, z14.h\n"
+ "fmin z21.h, p3/M, z21.h, z14.h\n"
+ "st1h { z22.h }, p0, [x24, x17, LSL #1]\n"
+ "fmax z20.h, p3/M, z20.h, z15.h\n"
+ "fmax z19.h, p3/M, z19.h, z15.h\n"
+ "st1h { z21.h }, p0, [x24, x26, LSL #1]\n"
+ "fmax z18.h, p3/M, z18.h, z15.h\n"
+ "fmax z17.h, p3/M, z17.h, z15.h\n"
+ "fmax z16.h, p3/M, z16.h, z15.h\n"
+ "fmin z20.h, p3/M, z20.h, z14.h\n"
+ "st1h { z20.h }, p0, [x24, x22, LSL #1]\n"
+ "fmin z19.h, p3/M, z19.h, z14.h\n"
+ "addvl x24, x24, #1\n"
+ "fmin z18.h, p3/M, z18.h, z14.h\n"
+ "st1h { z19.h }, p0, [x23]\n"
+ "fmin z17.h, p3/M, z17.h, z14.h\n"
+ "fmin z16.h, p3/M, z16.h, z14.h\n"
+ "st1h { z18.h }, p0, [x23, x17, LSL #1]\n"
+ "st1h { z17.h }, p0, [x23, x26, LSL #1]\n"
+ "st1h { z16.h }, p0, [x23, x22, LSL #1]\n"
"addvl x23, x23, #1\n"
"blt 2b\n"
"3:" // Tile loop: Channel tail
- "movprfx z21, z15\n fmla z21.h, p3/M, z4.h, z9.h\n"
- "movprfx z16, z15\n fmla z16.h, p3/M, z8.h, z9.h\n"
- "ldr x4, [%x[params_struct], %[offsetof_args_tile_j]]\n"
- "ldr x16, [%x[params_struct], %[offsetof_args_tile_i]]\n"
- "movprfx z22, z15\n fmla z22.h, p3/M, z3.h, z9.h\n"
- "movprfx z25, z15\n fmla z25.h, p3/M, z1.h, z9.h\n"
- "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
- "add x4, x4, #0x1\n"
- "movprfx z26, z15\n fmla z26.h, p3/M, z0.h, z9.h\n"
- "fmla z21.h, p3/M, z5.h, z12.h\n"
- "cmp x4, x20\n"
- "add x21, x16, #0x1\n"
- "movprfx z17, z15\n fmla z17.h, p3/M, z7.h, z9.h\n"
- "movprfx z18, z15\n fmla z18.h, p3/M, z6.h, z9.h\n"
- "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
- "csel x16, x16, x21, LT\n"
- "movprfx z20, z15\n fmla z20.h, p3/M, z5.h, z9.h\n"
- "movprfx z24, z15\n fmla z24.h, p3/M, z2.h, z9.h\n"
- "ld1h { z9.h }, p2/Z, [x11, x7, LSL #1]\n"
+ "movprfx z31, z13\n fmla z31.h, p3/M, z8.h, z9.h\n"
+ "ldr x2, [%x[params_struct], %[offsetof_args_tile_i]]\n"
"mov p0.b, p2.b\n"
- "fmla z16.h, p3/M, z0.h, z10.h\n"
- "movprfx z19, z15\n fmla z19.h, p3/M, z2.h, z11.h\n"
- "ld1h { z10.h }, p2/Z, [x24]\n"
- "ld1h { z11.h }, p2/Z, [x24, x27, LSL #1]\n"
- "fmla z22.h, p3/M, z4.h, z12.h\n"
- "fmla z25.h, p3/M, z2.h, z12.h\n"
- "csel x4, x4, XZR, LT\n"
- "cmp x16, x20\n"
- "fmla z26.h, p3/M, z1.h, z12.h\n"
- "movprfx z28, z15\n fmla z28.h, p3/M, z6.h, z10.h\n"
- "ld1h { z10.h }, p2/Z, [x11, x14, LSL #1]\n"
- "fmla z21.h, p3/M, z7.h, z9.h\n"
- "fmla z17.h, p3/M, z8.h, z12.h\n"
- "fmla z18.h, p3/M, z7.h, z12.h\n"
- "fmla z19.h, p3/M, z6.h, z12.h\n"
- "movprfx z23, z15\n fmla z23.h, p3/M, z3.h, z12.h\n"
- "movprfx z27, z15\n fmla z27.h, p3/M, z0.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x8, x5, LSL #1]\n"
- "movprfx z31, z15\n fmla z31.h, p3/M, z8.h, z11.h\n"
- "fmla z22.h, p3/M, z6.h, z9.h\n"
- "ld1h { z11.h }, p2/Z, [x8, x10, LSL #1]\n"
- "fmla z25.h, p3/M, z4.h, z9.h\n"
- "fmla z26.h, p3/M, z3.h, z9.h\n"
- "fmla z20.h, p3/M, z8.h, z9.h\n"
- "fmla z24.h, p3/M, z5.h, z9.h\n"
- "fmla z28.h, p3/M, z2.h, z9.h\n"
- "fmla z21.h, p3/M, z8.h, z10.h\n"
- "fmla z16.h, p3/M, z1.h, z12.h\n"
- "fmla z17.h, p3/M, z0.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x13, x27, LSL #1]\n"
- "fmla z18.h, p3/M, z2.h, z11.h\n"
- "fmla z19.h, p3/M, z1.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x28]\n"
- "fmla z22.h, p3/M, z7.h, z10.h\n"
- "fmla z23.h, p3/M, z6.h, z10.h\n"
- "fmla z25.h, p3/M, z5.h, z10.h\n"
- "fmla z26.h, p3/M, z4.h, z10.h\n"
- "fmla z27.h, p3/M, z3.h, z10.h\n"
+ "movprfx z30, z13\n fmla z30.h, p3/M, z7.h, z9.h\n"
+ "ldr x3, [%x[params_struct], %[offsetof_args_tile_j]]\n"
+ "add x21, x2, #0x1\n"
+ "movprfx z29, z13\n fmla z29.h, p3/M, z6.h, z9.h\n"
+ "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
+ "movprfx z27, z13\n fmla z27.h, p3/M, z5.h, z9.h\n"
+ "ldr x19, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
+ "add x3, x3, #0x1\n"
+ "movprfx z26, z13\n fmla z26.h, p3/M, z4.h, z9.h\n"
+ "cmp x3, x19\n"
+ "movprfx z25, z13\n fmla z25.h, p3/M, z3.h, z9.h\n"
+ "movprfx z23, z13\n fmla z23.h, p3/M, z2.h, z9.h\n"
+ "csel x3, x3, XZR, LT\n"
+ "movprfx z22, z13\n fmla z22.h, p3/M, z1.h, z9.h\n"
+ "csel x2, x2, x21, LT\n"
+ "movprfx z21, z13\n fmla z21.h, p3/M, z0.h, z9.h\n"
+ "ld1h { z9.h }, p2/Z, [x13, x10, LSL #1]\n"
+ "cmp x2, x20\n"
"fmla z31.h, p3/M, z0.h, z10.h\n"
- "fmla z24.h, p3/M, z6.h, z11.h\n"
- "fmla z28.h, p3/M, z3.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x28, x27, LSL #1]\n"
- "fmla z19.h, p3/M, z5.h, z12.h\n"
- "fmla z23.h, p3/M, z2.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x13, x14, LSL #1]\n"
- "fmla z27.h, p3/M, z8.h, z11.h\n"
- "fmla z31.h, p3/M, z5.h, z11.h\n"
- "movprfx z29, z15\n fmla z29.h, p3/M, z1.h, z9.h\n"
- "movprfx z30, z15\n fmla z30.h, p3/M, z0.h, z9.h\n"
- "ld1h { z9.h }, p2/Z, [x13]\n"
- "fmla z29.h, p3/M, z2.h, z10.h\n"
- "fmla z30.h, p3/M, z1.h, z10.h\n"
- "ld1h { z10.h }, p2/Z, [x13, x7, LSL #1]\n"
- "fmla z20.h, p3/M, z0.h, z9.h\n"
- "fmla z21.h, p3/M, z1.h, z10.h\n"
- "fmla z16.h, p3/M, z3.h, z9.h\n"
- "fmla z17.h, p3/M, z4.h, z10.h\n"
- "ld1h { z11.h }, p2/Z, [x24, x5, LSL #1]\n"
- "fmla z18.h, p3/M, z3.h, z10.h\n"
- "fmla z22.h, p3/M, z0.h, z10.h\n"
- "fmla z20.h, p3/M, z2.h, z10.h\n"
- "fmla z21.h, p3/M, z2.h, z12.h\n"
- "fmla z16.h, p3/M, z5.h, z10.h\n"
- "fmla z17.h, p3/M, z5.h, z12.h\n"
- "ld1h { z10.h }, p2/Z, [x12, x5, LSL #1]\n"
- "fmla z18.h, p3/M, z4.h, z12.h\n"
- "fmla z19.h, p3/M, z3.h, z12.h\n"
- "fmla z22.h, p3/M, z1.h, z12.h\n"
- "fmla z23.h, p3/M, z0.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x12, x10, LSL #1]\n"
- "fmla z28.h, p3/M, z7.h, z11.h\n"
- "fmla z29.h, p3/M, z6.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x24, x10, LSL #1]\n"
- "fmla z20.h, p3/M, z4.h, z10.h\n"
- "fmla z21.h, p3/M, z3.h, z10.h\n"
- "fmla z24.h, p3/M, z1.h, z10.h\n"
- "fmla z25.h, p3/M, z0.h, z10.h\n"
- "fmla z16.h, p3/M, z7.h, z10.h\n"
- "fmla z17.h, p3/M, z6.h, z10.h\n"
- "ld1h { z10.h }, p2/Z, [x8, x7, LSL #1]\n"
- "fmla z30.h, p3/M, z8.h, z11.h\n"
- "fmla z31.h, p3/M, z7.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x11, x5, LSL #1]\n"
- "fmla z18.h, p3/M, z8.h, z12.h\n"
- "fmla z19.h, p3/M, z7.h, z12.h\n"
- "fmla z22.h, p3/M, z5.h, z12.h\n"
- "fmla z23.h, p3/M, z4.h, z12.h\n"
- "fmla z26.h, p3/M, z2.h, z12.h\n"
- "fmla z27.h, p3/M, z1.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x8, x14, LSL #1]\n"
- "fmla z20.h, p3/M, z7.h, z11.h\n"
- "fmla z21.h, p3/M, z6.h, z11.h\n"
- "fmla z24.h, p3/M, z4.h, z11.h\n"
- "fmla z25.h, p3/M, z3.h, z11.h\n"
+ "ld1h { z10.h }, p2/Z, [x11]\n"
+ "movprfx z28, z13\n fmla z28.h, p3/M, z2.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x11, x27, LSL #1]\n"
+ "fmla z30.h, p3/M, z8.h, z12.h\n"
+ "fmla z29.h, p3/M, z7.h, z12.h\n"
+ "fmla z26.h, p3/M, z5.h, z12.h\n"
+ "fmla z28.h, p3/M, z6.h, z12.h\n"
+ "fmla z25.h, p3/M, z4.h, z12.h\n"
+ "movprfx z24, z13\n fmla z24.h, p3/M, z3.h, z12.h\n"
+ "fmla z22.h, p3/M, z2.h, z12.h\n"
+ "fmla z21.h, p3/M, z1.h, z12.h\n"
+ "movprfx z20, z13\n fmla z20.h, p3/M, z0.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x8, x7, LSL #1]\n"
+ "movprfx z19, z13\n fmla z19.h, p3/M, z6.h, z10.h\n"
+ "ld1h { z10.h }, p2/Z, [x13, x9, LSL #1]\n"
+ "movprfx z16, z13\n fmla z16.h, p3/M, z8.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x8, x28, LSL #1]\n"
+ "fmla z27.h, p3/M, z8.h, z9.h\n"
+ "fmla z26.h, p3/M, z7.h, z9.h\n"
+ "fmla z25.h, p3/M, z6.h, z9.h\n"
+ "fmla z23.h, p3/M, z5.h, z9.h\n"
+ "fmla z22.h, p3/M, z4.h, z9.h\n"
+ "fmla z21.h, p3/M, z3.h, z9.h\n"
+ "fmla z19.h, p3/M, z2.h, z9.h\n"
+ "movprfx z18, z13\n fmla z18.h, p3/M, z1.h, z9.h\n"
+ "movprfx z17, z13\n fmla z17.h, p3/M, z0.h, z9.h\n"
+ "ld1h { z9.h }, p2/Z, [x15]\n"
+ "fmla z31.h, p3/M, z1.h, z12.h\n"
+ "fmla z30.h, p3/M, z0.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x15, x27, LSL #1]\n"
+ "fmla z29.h, p3/M, z2.h, z11.h\n"
"fmla z28.h, p3/M, z1.h, z11.h\n"
- "fmla z29.h, p3/M, z0.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x11, x10, LSL #1]\n"
- "fmla z16.h, p3/M, z2.h, z10.h\n"
- "fmla z17.h, p3/M, z1.h, z10.h\n"
- "fmla z18.h, p3/M, z0.h, z10.h\n"
- "ld1h { z10.h }, p2/Z, [x12]\n"
- "fmla z30.h, p3/M, z2.h, z11.h\n"
- "fmla z19.h, p3/M, z0.h, z12.h\n"
+ "ld1h { z11.h }, p2/Z, [x12]\n"
+ "fmla z26.h, p3/M, z8.h, z10.h\n"
+ "fmla z25.h, p3/M, z7.h, z10.h\n"
+ "fmla z24.h, p3/M, z6.h, z10.h\n"
+ "fmla z22.h, p3/M, z5.h, z10.h\n"
+ "fmla z21.h, p3/M, z4.h, z10.h\n"
"fmla z20.h, p3/M, z3.h, z10.h\n"
- "fmla z24.h, p3/M, z0.h, z10.h\n"
- "fmla z22.h, p3/M, z8.h, z11.h\n"
- "fmla z23.h, p3/M, z7.h, z11.h\n"
- "fmla z26.h, p3/M, z5.h, z11.h\n"
- "fmla z27.h, p3/M, z4.h, z11.h\n"
- "fmla z31.h, p3/M, z1.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x28, x7, LSL #1]\n"
- "fmla z17.h, p3/M, z2.h, z12.h\n"
- "fmla z18.h, p3/M, z1.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x12, x27, LSL #1]\n"
- "fmla z16.h, p3/M, z6.h, z10.h\n"
- "ld1h { z10.h }, p2/Z, [x11]\n"
- "fmla z29.h, p3/M, z4.h, z11.h\n"
- "fmla z30.h, p3/M, z3.h, z11.h\n"
- "fmla z19.h, p3/M, z8.h, z12.h\n"
- "fmla z23.h, p3/M, z5.h, z12.h\n"
- "fmla z27.h, p3/M, z2.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x11, x27, LSL #1]\n"
- "fmla z20.h, p3/M, z6.h, z10.h\n"
- "fmla z24.h, p3/M, z3.h, z10.h\n"
- "fmla z28.h, p3/M, z0.h, z10.h\n"
- "ld1h { z10.h }, p2/Z, [x24, x7, LSL #1]\n"
- "fmla z31.h, p3/M, z2.h, z12.h\n"
- "fmla z29.h, p3/M, z7.h, z10.h\n"
+ "fmla z18.h, p3/M, z2.h, z10.h\n"
+ "fmla z17.h, p3/M, z1.h, z10.h\n"
+ "fmla z16.h, p3/M, z0.h, z10.h\n"
+ "ld1h { z10.h }, p2/Z, [x15, x10, LSL #1]\n"
+ "fmla z31.h, p3/M, z3.h, z9.h\n"
+ "fmla z27.h, p3/M, z0.h, z9.h\n"
+ "fmla z28.h, p3/M, z5.h, z12.h\n"
+ "fmla z24.h, p3/M, z2.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x15, x9, LSL #1]\n"
+ "fmla z23.h, p3/M, z6.h, z11.h\n"
+ "fmla z19.h, p3/M, z3.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x12, x27, LSL #1]\n"
+ "fmla z31.h, p3/M, z5.h, z10.h\n"
+ "fmla z30.h, p3/M, z4.h, z10.h\n"
+ "fmla z29.h, p3/M, z3.h, z10.h\n"
+ "fmla z27.h, p3/M, z2.h, z10.h\n"
+ "fmla z26.h, p3/M, z1.h, z10.h\n"
+ "fmla z25.h, p3/M, z0.h, z10.h\n"
+ "ld1h { z10.h }, p2/Z, [x14, x7, LSL #1]\n"
+ "fmla z20.h, p3/M, z8.h, z11.h\n"
+ "fmla z16.h, p3/M, z5.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x11, x7, LSL #1]\n"
+ "fmla z30.h, p3/M, z5.h, z12.h\n"
+ "fmla z29.h, p3/M, z4.h, z12.h\n"
+ "fmla z28.h, p3/M, z3.h, z12.h\n"
+ "fmla z26.h, p3/M, z2.h, z12.h\n"
+ "fmla z25.h, p3/M, z1.h, z12.h\n"
+ "fmla z24.h, p3/M, z0.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x14, x28, LSL #1]\n"
+ "fmla z19.h, p3/M, z7.h, z11.h\n"
+ "fmla z18.h, p3/M, z6.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x11, x28, LSL #1]\n"
+ "fmla z31.h, p3/M, z7.h, z10.h\n"
"fmla z30.h, p3/M, z6.h, z10.h\n"
- "fmla z24.h, p3/M, z8.h, z11.h\n"
- "fmla z25.h, p3/M, z7.h, z11.h\n"
+ "fmla z27.h, p3/M, z4.h, z10.h\n"
+ "fmla z26.h, p3/M, z3.h, z10.h\n"
+ "fmla z23.h, p3/M, z1.h, z10.h\n"
+ "fmla z22.h, p3/M, z0.h, z10.h\n"
+ "ld1h { z10.h }, p2/Z, [x8, x10, LSL #1]\n"
+ "fmla z17.h, p3/M, z8.h, z11.h\n"
+ "fmla z16.h, p3/M, z7.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x13, x7, LSL #1]\n"
+ "fmla z29.h, p3/M, z8.h, z12.h\n"
+ "fmla z28.h, p3/M, z7.h, z12.h\n"
+ "fmla z25.h, p3/M, z5.h, z12.h\n"
+ "fmla z24.h, p3/M, z4.h, z12.h\n"
+ "fmla z21.h, p3/M, z2.h, z12.h\n"
+ "fmla z20.h, p3/M, z1.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x8, x9, LSL #1]\n"
+ "fmla z31.h, p3/M, z2.h, z10.h\n"
+ "fmla z30.h, p3/M, z1.h, z10.h\n"
+ "fmla z29.h, p3/M, z0.h, z10.h\n"
+ "ld1h { z10.h }, p2/Z, [x14]\n"
+ "fmla z27.h, p3/M, z7.h, z11.h\n"
"fmla z26.h, p3/M, z6.h, z11.h\n"
- "fmla z28.h, p3/M, z5.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x28, x14, LSL #1]\n"
- "fmla z27.h, p3/M, z5.h, z12.h\n"
- "fmla z29.h, p3/M, z5.h, z11.h\n"
- "fmla z30.h, p3/M, z4.h, z11.h\n"
- "fmla z31.h, p3/M, z3.h, z11.h\n"
- "fmla z23.h, p3/M, z8.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x24, x14, LSL #1]\n"
- "fmla z28.h, p3/M, z8.h, z10.h\n"
- "ld1h { z10.h }, p2/Z, [x13, x5, LSL #1]\n"
+ "fmla z23.h, p3/M, z4.h, z11.h\n"
+ "fmla z22.h, p3/M, z3.h, z11.h\n"
+ "fmla z19.h, p3/M, z1.h, z11.h\n"
+ "fmla z18.h, p3/M, z0.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x13, x28, LSL #1]\n"
+ "fmla z30.h, p3/M, z2.h, z12.h\n"
+ "fmla z29.h, p3/M, z1.h, z12.h\n"
+ "fmla z28.h, p3/M, z0.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x14, x27, LSL #1]\n"
+ "fmla z31.h, p3/M, z6.h, z10.h\n"
+ "fmla z27.h, p3/M, z3.h, z10.h\n"
+ "fmla z23.h, p3/M, z0.h, z10.h\n"
+ "ld1h { z10.h }, p2/Z, [x13]\n"
"fmla z25.h, p3/M, z8.h, z11.h\n"
- "fmla z26.h, p3/M, z7.h, z11.h\n"
- "fmla z27.h, p3/M, z6.h, z11.h\n"
- "fmla z29.h, p3/M, z8.h, z12.h\n"
- "ld1h { z11.h }, p2/Z, [x13, x10, LSL #1]\n"
- "fmla z30.h, p3/M, z7.h, z12.h\n"
- "fmla z31.h, p3/M, z6.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x28, x5, LSL #1]\n"
- "fmla z16.h, p3/M, z4.h, z10.h\n"
- "fmla z17.h, p3/M, z3.h, z10.h\n"
- "fmax z16.h, p3/M, z16.h, z14.h\n"
- "fmax z17.h, p3/M, z17.h, z14.h\n"
- "fmla z20.h, p3/M, z1.h, z10.h\n"
- "fmla z21.h, p3/M, z0.h, z10.h\n"
- "ld1h { z10.h }, p2/Z, [x28, x10, LSL #1]\n"
- "fmax z20.h, p3/M, z20.h, z14.h\n"
+ "fmla z24.h, p3/M, z7.h, z11.h\n"
+ "fmla z21.h, p3/M, z5.h, z11.h\n"
+ "fmla z20.h, p3/M, z4.h, z11.h\n"
+ "fmla z17.h, p3/M, z2.h, z11.h\n"
+ "fmla z16.h, p3/M, z1.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x12, x10, LSL #1]\n"
+ "fmla z28.h, p3/M, z8.h, z12.h\n"
+ "fmla z24.h, p3/M, z5.h, z12.h\n"
+ "fmla z20.h, p3/M, z2.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x13, x27, LSL #1]\n"
+ "fmla z27.h, p3/M, z6.h, z10.h\n"
+ "fmla z23.h, p3/M, z3.h, z10.h\n"
+ "fmla z19.h, p3/M, z0.h, z10.h\n"
+ "ld1h { z10.h }, p2/Z, [x11, x10, LSL #1]\n"
+ "fmla z22.h, p3/M, z7.h, z11.h\n"
+ "fmla z21.h, p3/M, z6.h, z11.h\n"
+ "fmla z23.h, p3/M, z8.h, z11.h\n"
+ "fmla z19.h, p3/M, z5.h, z11.h\n"
+ "fmla z18.h, p3/M, z4.h, z11.h\n"
+ "fmla z17.h, p3/M, z3.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x12, x9, LSL #1]\n"
+ "fmla z24.h, p3/M, z8.h, z12.h\n"
+ "fmla z20.h, p3/M, z5.h, z12.h\n"
+ "fmla z16.h, p3/M, z2.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x11, x9, LSL #1]\n"
+ "fmla z19.h, p3/M, z8.h, z10.h\n"
+ "fmla z18.h, p3/M, z7.h, z10.h\n"
+ "fmla z17.h, p3/M, z6.h, z10.h\n"
+ "ld1h { z10.h }, p2/Z, [x15, x7, LSL #1]\n"
+ "fmla z22.h, p3/M, z8.h, z11.h\n"
+ "fmla z21.h, p3/M, z7.h, z11.h\n"
+ "fmla z20.h, p3/M, z6.h, z11.h\n"
"fmla z18.h, p3/M, z5.h, z11.h\n"
- "fmla z19.h, p3/M, z4.h, z11.h\n"
- "fmax z18.h, p3/M, z18.h, z14.h\n"
- "fmax z19.h, p3/M, z19.h, z14.h\n"
- "fmla z22.h, p3/M, z2.h, z11.h\n"
- "fmla z23.h, p3/M, z1.h, z11.h\n"
- "fmax z21.h, p3/M, z21.h, z14.h\n"
- "fmax z22.h, p3/M, z22.h, z14.h\n"
- "fmla z24.h, p3/M, z7.h, z12.h\n"
- "fmla z25.h, p3/M, z6.h, z12.h\n"
- "fmax z23.h, p3/M, z23.h, z14.h\n"
- "fmax z24.h, p3/M, z24.h, z14.h\n"
- "fmla z28.h, p3/M, z4.h, z12.h\n"
- "fmla z29.h, p3/M, z3.h, z12.h\n"
- "fmax z25.h, p3/M, z25.h, z14.h\n"
- "fmax z28.h, p3/M, z28.h, z14.h\n"
- "fmla z26.h, p3/M, z8.h, z10.h\n"
- "fmla z27.h, p3/M, z7.h, z10.h\n"
- "fmax z26.h, p3/M, z26.h, z14.h\n"
- "fmax z27.h, p3/M, z27.h, z14.h\n"
- "fmla z30.h, p3/M, z5.h, z10.h\n"
+ "fmla z17.h, p3/M, z4.h, z11.h\n"
+ "fmla z16.h, p3/M, z3.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x15, x28, LSL #1]\n"
"fmla z31.h, p3/M, z4.h, z10.h\n"
- "fmax z29.h, p3/M, z29.h, z14.h\n"
- "fmax z30.h, p3/M, z30.h, z14.h\n"
- "fmax z31.h, p3/M, z31.h, z14.h\n"
- "fmin z16.h, p3/M, z16.h, z13.h\n"
- "st1h { z16.h }, p0, [x15]\n"
- "fmin z17.h, p3/M, z17.h, z13.h\n"
- "fmin z18.h, p3/M, z18.h, z13.h\n"
- "st1h { z17.h }, p0, [x15, x6, LSL #1]\n"
- "fmin z19.h, p3/M, z19.h, z13.h\n"
- "fmin z20.h, p3/M, z20.h, z13.h\n"
- "st1h { z18.h }, p0, [x15, x25, LSL #1]\n"
- "fmin z21.h, p3/M, z21.h, z13.h\n"
- "fmin z22.h, p3/M, z22.h, z13.h\n"
- "st1h { z19.h }, p0, [x15, x22, LSL #1]\n"
- "fmin z23.h, p3/M, z23.h, z13.h\n"
- "fmin z24.h, p3/M, z24.h, z13.h\n"
- "st1h { z20.h }, p0, [x9]\n"
- "fmin z25.h, p3/M, z25.h, z13.h\n"
- "fmin z26.h, p3/M, z26.h, z13.h\n"
- "st1h { z21.h }, p0, [x9, x6, LSL #1]\n"
- "fmin z27.h, p3/M, z27.h, z13.h\n"
- "fmin z28.h, p3/M, z28.h, z13.h\n"
- "st1h { z22.h }, p0, [x9, x25, LSL #1]\n"
- "fmin z29.h, p3/M, z29.h, z13.h\n"
- "fmin z30.h, p3/M, z30.h, z13.h\n"
- "st1h { z23.h }, p0, [x9, x22, LSL #1]\n"
- "fmin z31.h, p3/M, z31.h, z13.h\n"
- "st1h { z24.h }, p0, [x26]\n"
- "st1h { z25.h }, p0, [x26, x6, LSL #1]\n"
- "st1h { z26.h }, p0, [x26, x25, LSL #1]\n"
- "st1h { z27.h }, p0, [x26, x22, LSL #1]\n"
- "st1h { z28.h }, p0, [x23]\n"
- "st1h { z29.h }, p0, [x23, x6, LSL #1]\n"
- "st1h { z30.h }, p0, [x23, x25, LSL #1]\n"
- "st1h { z31.h }, p0, [x23, x22, LSL #1]\n"
+ "fmla z18.h, p3/M, z8.h, z12.h\n"
+ "fmla z17.h, p3/M, z7.h, z12.h\n"
+ "fmla z16.h, p3/M, z6.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x12, x7, LSL #1]\n"
+ "fmla z30.h, p3/M, z3.h, z10.h\n"
+ "fmla z27.h, p3/M, z1.h, z10.h\n"
+ "fmla z26.h, p3/M, z0.h, z10.h\n"
+ "ld1h { z10.h }, p2/Z, [x12, x28, LSL #1]\n"
+ "fmla z29.h, p3/M, z5.h, z11.h\n"
+ "fmla z28.h, p3/M, z4.h, z11.h\n"
+ "fmla z25.h, p3/M, z2.h, z11.h\n"
+ "fmla z24.h, p3/M, z1.h, z11.h\n"
+ "fmla z23.h, p3/M, z7.h, z12.h\n"
+ "fmla z22.h, p3/M, z6.h, z12.h\n"
+ "fmla z19.h, p3/M, z4.h, z12.h\n"
+ "fmla z18.h, p3/M, z3.h, z12.h\n"
+ "fmla z21.h, p3/M, z8.h, z10.h\n"
+ "fmla z20.h, p3/M, z7.h, z10.h\n"
+ "fmla z17.h, p3/M, z5.h, z10.h\n"
+ "fmla z16.h, p3/M, z4.h, z10.h\n"
+ "fmax z31.h, p3/M, z31.h, z15.h\n"
+ "fmax z30.h, p3/M, z30.h, z15.h\n"
+ "fmax z29.h, p3/M, z29.h, z15.h\n"
+ "fmax z28.h, p3/M, z28.h, z15.h\n"
+ "fmin z31.h, p3/M, z31.h, z14.h\n"
+ "st1h { z31.h }, p0, [x16]\n"
+ "fmin z30.h, p3/M, z30.h, z14.h\n"
+ "fmin z29.h, p3/M, z29.h, z14.h\n"
+ "st1h { z30.h }, p0, [x16, x17, LSL #1]\n"
+ "fmin z28.h, p3/M, z28.h, z14.h\n"
+ "fmax z27.h, p3/M, z27.h, z15.h\n"
+ "st1h { z29.h }, p0, [x16, x26, LSL #1]\n"
+ "fmax z26.h, p3/M, z26.h, z15.h\n"
+ "st1h { z28.h }, p0, [x16, x22, LSL #1]\n"
+ "fmin z27.h, p3/M, z27.h, z14.h\n"
+ "fmax z25.h, p3/M, z25.h, z15.h\n"
+ "st1h { z27.h }, p0, [x25]\n"
+ "fmin z26.h, p3/M, z26.h, z14.h\n"
+ "fmin z25.h, p3/M, z25.h, z14.h\n"
+ "st1h { z26.h }, p0, [x25, x17, LSL #1]\n"
+ "fmax z24.h, p3/M, z24.h, z15.h\n"
+ "fmax z23.h, p3/M, z23.h, z15.h\n"
+ "st1h { z25.h }, p0, [x25, x26, LSL #1]\n"
+ "fmax z22.h, p3/M, z22.h, z15.h\n"
+ "fmax z21.h, p3/M, z21.h, z15.h\n"
+ "fmax z20.h, p3/M, z20.h, z15.h\n"
+ "fmin z24.h, p3/M, z24.h, z14.h\n"
+ "st1h { z24.h }, p0, [x25, x22, LSL #1]\n"
+ "fmin z23.h, p3/M, z23.h, z14.h\n"
+ "fmin z22.h, p3/M, z22.h, z14.h\n"
+ "st1h { z23.h }, p0, [x24]\n"
+ "fmin z21.h, p3/M, z21.h, z14.h\n"
+ "fmin z20.h, p3/M, z20.h, z14.h\n"
+ "st1h { z22.h }, p0, [x24, x17, LSL #1]\n"
+ "fmax z19.h, p3/M, z19.h, z15.h\n"
+ "st1h { z21.h }, p0, [x24, x26, LSL #1]\n"
+ "fmax z18.h, p3/M, z18.h, z15.h\n"
+ "fmax z17.h, p3/M, z17.h, z15.h\n"
+ "st1h { z20.h }, p0, [x24, x22, LSL #1]\n"
+ "fmin z19.h, p3/M, z19.h, z14.h\n"
+ "st1h { z19.h }, p0, [x23]\n"
+ "fmin z18.h, p3/M, z18.h, z14.h\n"
+ "fmin z17.h, p3/M, z17.h, z14.h\n"
+ "st1h { z18.h }, p0, [x23, x17, LSL #1]\n"
+ "fmax z16.h, p3/M, z16.h, z15.h\n"
+ "st1h { z17.h }, p0, [x23, x26, LSL #1]\n"
+ "fmin z16.h, p3/M, z16.h, z14.h\n"
+ "st1h { z16.h }, p0, [x23, x22, LSL #1]\n"
"blt 1b\n"
:
: [n_channels] "r" ((unsigned long) n_channels), [offsetof_args_inptr] "I" (offsetof(Args, inptr)), [offsetof_args_ld_input_col] "I" (offsetof(Args, ld_input_col)), [offsetof_args_ld_input_row] "I" (offsetof(Args, ld_input_row)), [offsetof_args_ld_output_col] "I" (offsetof(Args, ld_output_col)), [offsetof_args_ld_output_row] "I" (offsetof(Args, ld_output_row)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_n_tile_cols] "I" (offsetof(Args, n_tile_cols)), [offsetof_args_n_tile_rows] "I" (offsetof(Args, n_tile_rows)), [offsetof_args_outptr] "I" (offsetof(Args, outptr)), [offsetof_args_params] "I" (offsetof(Args, params)), [offsetof_args_tile_i] "I" (offsetof(Args, tile_i)), [offsetof_args_tile_j] "I" (offsetof(Args, tile_j)), [params_struct] "r" (&params_struct)
- : "cc", "memory", "p0", "p1", "p2", "p3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_indirect.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_indirect.cpp
index 972b78b6d5..66f6c3bb7a 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_indirect.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_indirect.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -98,613 +98,613 @@ void sve_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_indirect_impl(
activation_min, activation_max);
__asm__ __volatile__(
+ "ldr x16, [%x[params_struct], %[offsetof_args_outptrs]]\n"
"ptrue p3.b\n"
- "ldr x17, [%x[params_struct], %[offsetof_args_params]]\n"
- "add x16, %x[params_struct], %[offsetof_Args_inptrs]\n"
- "ld1h { z15.h }, p3/Z, [x17]\n"
- "cnth x15\n"
- "mov x14, #0x0\n"
- "ld1h { z0.h }, p3/Z, [x17, #1, MUL VL]\n"
- "ld1h { z1.h }, p3/Z, [x17, #2, MUL VL]\n"
+ "ldr x15, [%x[params_struct], %[offsetof_args_params]]\n"
+ "add x14, %x[params_struct], %[offsetof_Args_inptrs]\n"
+ "ld1rh { z15.h }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
+ "mov x13, #0x0\n"
+ "ld1rh { z14.h }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
+ "cnth x12\n"
+ "ld1h { z13.h }, p3/Z, [x15]\n"
+ "sub x11, XZR, x12\n"
+ "ld1h { z0.h }, p3/Z, [x15, #1, MUL VL]\n"
"whilelt p2.h, XZR, %x[n_channels]\n"
- "ld1h { z2.h }, p3/Z, [x17, #3, MUL VL]\n"
- "ld1h { z3.h }, p3/Z, [x17, #4, MUL VL]\n"
- "cmp x15, %x[n_channels]\n"
- "ld1h { z4.h }, p3/Z, [x17, #5, MUL VL]\n"
- "ld1h { z5.h }, p3/Z, [x17, #6, MUL VL]\n"
- "sub x13, XZR, x15\n"
- "ld1h { z6.h }, p3/Z, [x17, #7, MUL VL]\n"
- "addvl x17, x17, #16\n"
- "ldp x12, x11, [x16, #0x0]\n"
- "ldp x10, x9, [x16, #0x10]\n"
- "ldr x28, [%x[params_struct], %[offsetof_args_outptrs]]\n"
- "ld1rh { z14.h }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
- "ld1rh { z13.h }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
- "ld1h { z7.h }, p3/Z, [x17, #-8, MUL VL]\n"
- "ld1h { z8.h }, p3/Z, [x17, #-7, MUL VL]\n"
- "addvl x17, x17, #-6\n"
- "ld1h { z9.h }, p2/Z, [x12, x14, LSL #1]\n"
- "ld1h { z10.h }, p2/Z, [x11, x14, LSL #1]\n"
- "ld1h { z11.h }, p2/Z, [x10, x14, LSL #1]\n"
- "ld1h { z12.h }, p2/Z, [x9, x14, LSL #1]\n"
+ "ld1h { z1.h }, p3/Z, [x15, #2, MUL VL]\n"
+ "cmp x12, %x[n_channels]\n"
+ "ld1h { z2.h }, p3/Z, [x15, #3, MUL VL]\n"
+ "ld1h { z3.h }, p3/Z, [x15, #4, MUL VL]\n"
+ "ld1h { z4.h }, p3/Z, [x15, #5, MUL VL]\n"
+ "ld1h { z5.h }, p3/Z, [x15, #6, MUL VL]\n"
+ "ld1h { z6.h }, p3/Z, [x15, #7, MUL VL]\n"
+ "addvl x15, x15, #16\n"
+ "ldp x10, x9, [x14, #0x0]\n"
+ "ld1h { z7.h }, p3/Z, [x15, #-8, MUL VL]\n"
+ "ld1h { z8.h }, p3/Z, [x15, #-7, MUL VL]\n"
+ "addvl x15, x15, #-6\n"
+ "ld1h { z9.h }, p2/Z, [x10, x13, LSL #1]\n"
+ "ld1h { z10.h }, p2/Z, [x9, x13, LSL #1]\n"
+ "ldp x28, x27, [x14, #0x10]\n"
+ "ld1h { z11.h }, p2/Z, [x28, x13, LSL #1]\n"
+ "ld1h { z12.h }, p2/Z, [x27, x13, LSL #1]\n"
"bge 2f\n"
"1:" // Channel loop
- "movprfx z21, z15\n fmla z21.h, p3/M, z4.h, z9.h\n"
- "movprfx z16, z15\n fmla z16.h, p3/M, z8.h, z9.h\n"
- "ldr x27, [x16, #0x20]\n"
- "ldr x26, [x16, #0x30]\n"
- "movprfx z22, z15\n fmla z22.h, p3/M, z3.h, z9.h\n"
- "movprfx z25, z15\n fmla z25.h, p3/M, z1.h, z9.h\n"
- "ldr x25, [x16, #0x28]\n"
- "ldr x24, [x16, #0x38]\n"
- "movprfx z26, z15\n fmla z26.h, p3/M, z0.h, z9.h\n"
- "movprfx z17, z15\n fmla z17.h, p3/M, z7.h, z9.h\n"
- "ldr x12, [x16, #0x40]\n"
- "ldr x11, [x16, #0x48]\n"
- "movprfx z18, z15\n fmla z18.h, p3/M, z6.h, z9.h\n"
- "fmla z21.h, p3/M, z5.h, z12.h\n"
- "ldr x10, [x16, #0x50]\n"
- "ldr x9, [x16, #0x58]\n"
- "movprfx z20, z15\n fmla z20.h, p3/M, z5.h, z9.h\n"
- "movprfx z24, z15\n fmla z24.h, p3/M, z2.h, z9.h\n"
- "ld1h { z9.h }, p2/Z, [x26, x14, LSL #1]\n"
- "ldr x26, [x16, #0x70]\n"
- "fmla z16.h, p3/M, z0.h, z10.h\n"
- "movprfx z19, z15\n fmla z19.h, p3/M, z2.h, z11.h\n"
- "ld1h { z10.h }, p2/Z, [x27, x14, LSL #1]\n"
- "ld1h { z11.h }, p2/Z, [x25, x14, LSL #1]\n"
- "fmla z22.h, p3/M, z4.h, z12.h\n"
- "fmla z25.h, p3/M, z2.h, z12.h\n"
- "ldr x27, [x16, #0x60]\n"
- "ldr x25, [x16, #0x68]\n"
- "fmla z26.h, p3/M, z1.h, z12.h\n"
- "fmla z17.h, p3/M, z8.h, z12.h\n"
- "inch x13\n"
- "mov p1.b, p2.b\n"
- "fmla z18.h, p3/M, z7.h, z12.h\n"
- "movprfx z28, z15\n fmla z28.h, p3/M, z6.h, z10.h\n"
- "ld1h { z10.h }, p2/Z, [x11, x14, LSL #1]\n"
- "ldr x11, [x16, #0x88]\n"
- "fmla z21.h, p3/M, z7.h, z9.h\n"
- "fmla z19.h, p3/M, z6.h, z12.h\n"
- "ldr x23, [x28, #0x0]\n"
- "ldr x22, [x28, #0x8]\n"
- "movprfx z23, z15\n fmla z23.h, p3/M, z3.h, z12.h\n"
- "movprfx z27, z15\n fmla z27.h, p3/M, z0.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x24, x14, LSL #1]\n"
- "ldr x24, [x16, #0x78]\n"
- "movprfx z31, z15\n fmla z31.h, p3/M, z8.h, z11.h\n"
- "fmla z22.h, p3/M, z6.h, z9.h\n"
- "ld1h { z11.h }, p2/Z, [x12, x14, LSL #1]\n"
- "ldr x12, [x16, #0x80]\n"
- "fmla z25.h, p3/M, z4.h, z9.h\n"
- "fmla z26.h, p3/M, z3.h, z9.h\n"
- "ldr x21, [x28, #0x10]\n"
- "ldr x20, [x28, #0x18]\n"
- "fmla z20.h, p3/M, z8.h, z9.h\n"
- "fmla z24.h, p3/M, z5.h, z9.h\n"
- "whilelt p0.h, x15, %x[n_channels]\n"
- "fmla z28.h, p3/M, z2.h, z9.h\n"
- "fmla z16.h, p3/M, z1.h, z12.h\n"
- "fmla z17.h, p3/M, z0.h, z12.h\n"
- "movprfx z29, z15\n fmla z29.h, p3/M, z1.h, z9.h\n"
- "movprfx z30, z15\n fmla z30.h, p3/M, z0.h, z9.h\n"
- "fmla z18.h, p3/M, z2.h, z11.h\n"
- "ld1h { z9.h }, p2/Z, [x10, x14, LSL #1]\n"
- "ldr x10, [x16, #0x90]\n"
- "fmla z21.h, p3/M, z8.h, z10.h\n"
- "fmla z19.h, p3/M, z1.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x27, x14, LSL #1]\n"
- "ldr x27, [x16, #0xa0]\n"
- "fmla z22.h, p3/M, z7.h, z10.h\n"
- "fmla z23.h, p3/M, z6.h, z10.h\n"
- "fmla z25.h, p3/M, z5.h, z10.h\n"
- "fmla z26.h, p3/M, z4.h, z10.h\n"
- "fmla z27.h, p3/M, z3.h, z10.h\n"
- "fmla z29.h, p3/M, z2.h, z10.h\n"
- "fmla z30.h, p3/M, z1.h, z10.h\n"
+ "movprfx z31, z13\n fmla z31.h, p3/M, z8.h, z9.h\n"
+ "ldr x26, [x14, #0x20]\n"
+ "whilelt p1.h, x12, %x[n_channels]\n"
+ "movprfx z30, z13\n fmla z30.h, p3/M, z7.h, z9.h\n"
+ "ldr x25, [x14, #0x28]\n"
+ "inch x11\n"
+ "movprfx z29, z13\n fmla z29.h, p3/M, z6.h, z9.h\n"
+ "ldr x24, [x14, #0x30]\n"
+ "mov p0.b, p2.b\n"
+ "movprfx z27, z13\n fmla z27.h, p3/M, z5.h, z9.h\n"
+ "ldr x23, [x14, #0x38]\n"
+ "movprfx z26, z13\n fmla z26.h, p3/M, z4.h, z9.h\n"
+ "ldr x10, [x14, #0x40]\n"
+ "movprfx z25, z13\n fmla z25.h, p3/M, z3.h, z9.h\n"
+ "ldr x9, [x14, #0x48]\n"
+ "movprfx z23, z13\n fmla z23.h, p3/M, z2.h, z9.h\n"
+ "ldr x28, [x14, #0x50]\n"
+ "movprfx z22, z13\n fmla z22.h, p3/M, z1.h, z9.h\n"
+ "ldr x27, [x14, #0x58]\n"
+ "movprfx z21, z13\n fmla z21.h, p3/M, z0.h, z9.h\n"
+ "ld1h { z9.h }, p2/Z, [x24, x13, LSL #1]\n"
"fmla z31.h, p3/M, z0.h, z10.h\n"
- "ld1h { z10.h }, p2/Z, [x25, x14, LSL #1]\n"
- "ldr x25, [x16, #0xa8]\n"
- "fmla z16.h, p3/M, z3.h, z9.h\n"
- "fmla z20.h, p3/M, z0.h, z9.h\n"
- "ld1h { z12.h }, p2/Z, [x9, x14, LSL #1]\n"
- "ldr x9, [x16, #0x98]\n"
- "fmla z24.h, p3/M, z6.h, z11.h\n"
- "fmla z28.h, p3/M, z3.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x26, x14, LSL #1]\n"
- "ldr x26, [x16, #0xb0]\n"
- "fmla z17.h, p3/M, z4.h, z10.h\n"
- "fmla z18.h, p3/M, z3.h, z10.h\n"
- "fmla z21.h, p3/M, z1.h, z10.h\n"
- "fmla z19.h, p3/M, z5.h, z12.h\n"
- "fmla z23.h, p3/M, z2.h, z12.h\n"
- "fmla z22.h, p3/M, z0.h, z10.h\n"
- "ld1h { z12.h }, p2/Z, [x24, x14, LSL #1]\n"
- "ldr x24, [x16, #0xb8]\n"
- "fmla z27.h, p3/M, z8.h, z11.h\n"
- "fmla z31.h, p3/M, z5.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x12, x14, LSL #1]\n"
- "ldr x12, [x16, #0xc0]\n"
- "fmla z16.h, p3/M, z5.h, z10.h\n"
- "fmla z20.h, p3/M, z2.h, z10.h\n"
- "ld1h { z10.h }, p2/Z, [x11, x14, LSL #1]\n"
- "ldr x11, [x16, #0xc8]\n"
- "fmla z17.h, p3/M, z5.h, z12.h\n"
- "fmla z18.h, p3/M, z4.h, z12.h\n"
- "fmla z21.h, p3/M, z2.h, z12.h\n"
- "fmla z19.h, p3/M, z3.h, z12.h\n"
- "fmla z22.h, p3/M, z1.h, z12.h\n"
- "fmla z23.h, p3/M, z0.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x9, x14, LSL #1]\n"
- "ldr x9, [x16, #0xd8]\n"
- "fmla z28.h, p3/M, z7.h, z11.h\n"
- "fmla z29.h, p3/M, z6.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x10, x14, LSL #1]\n"
- "ldr x10, [x16, #0xd0]\n"
- "fmla z16.h, p3/M, z7.h, z10.h\n"
- "fmla z17.h, p3/M, z6.h, z10.h\n"
- "fmla z20.h, p3/M, z4.h, z10.h\n"
- "fmla z21.h, p3/M, z3.h, z10.h\n"
- "fmla z24.h, p3/M, z1.h, z10.h\n"
- "fmla z25.h, p3/M, z0.h, z10.h\n"
- "ld1h { z10.h }, p2/Z, [x27, x14, LSL #1]\n"
- "ldr x27, [x16, #0xe0]\n"
- "fmla z18.h, p3/M, z8.h, z12.h\n"
- "fmla z30.h, p3/M, z8.h, z11.h\n"
- "fmla z31.h, p3/M, z7.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x25, x14, LSL #1]\n"
- "fmla z27.h, p3/M, z1.h, z12.h\n"
- "ldr x25, [x16, #0xe8]\n"
- "fmla z19.h, p3/M, z7.h, z12.h\n"
- "fmla z22.h, p3/M, z5.h, z12.h\n"
- "fmla z23.h, p3/M, z4.h, z12.h\n"
- "fmla z26.h, p3/M, z2.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x26, x14, LSL #1]\n"
- "ldr x26, [x16, #0xf0]\n"
- "fmla z16.h, p3/M, z2.h, z10.h\n"
- "fmla z17.h, p3/M, z1.h, z10.h\n"
- "fmla z18.h, p3/M, z0.h, z10.h\n"
- "fmla z20.h, p3/M, z7.h, z11.h\n"
- "ld1h { z10.h }, p2/Z, [x24, x14, LSL #1]\n"
- "ldr x24, [x16, #0xf8]\n"
- "fmla z21.h, p3/M, z6.h, z11.h\n"
- "fmla z24.h, p3/M, z4.h, z11.h\n"
- "fmla z25.h, p3/M, z3.h, z11.h\n"
+ "ld1h { z10.h }, p2/Z, [x26, x13, LSL #1]\n"
+ "movprfx z28, z13\n fmla z28.h, p3/M, z2.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x25, x13, LSL #1]\n"
+ "fmla z30.h, p3/M, z8.h, z12.h\n"
+ "ldr x26, [x14, #0x60]\n"
+ "fmla z29.h, p3/M, z7.h, z12.h\n"
+ "ldr x25, [x14, #0x68]\n"
+ "fmla z26.h, p3/M, z5.h, z12.h\n"
+ "ldr x24, [x14, #0x70]\n"
+ "fmla z28.h, p3/M, z6.h, z12.h\n"
+ "ldr x22, [x16, #0x0]\n"
+ "fmla z25.h, p3/M, z4.h, z12.h\n"
+ "ldr x21, [x16, #0x8]\n"
+ "movprfx z24, z13\n fmla z24.h, p3/M, z3.h, z12.h\n"
+ "ldr x20, [x16, #0x10]\n"
+ "fmla z22.h, p3/M, z2.h, z12.h\n"
+ "ldr x19, [x16, #0x18]\n"
+ "fmla z21.h, p3/M, z1.h, z12.h\n"
+ "movprfx z20, z13\n fmla z20.h, p3/M, z0.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x23, x13, LSL #1]\n"
+ "movprfx z19, z13\n fmla z19.h, p3/M, z6.h, z10.h\n"
+ "ld1h { z10.h }, p2/Z, [x9, x13, LSL #1]\n"
+ "movprfx z16, z13\n fmla z16.h, p3/M, z8.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x10, x13, LSL #1]\n"
+ "fmla z27.h, p3/M, z8.h, z9.h\n"
+ "ldr x23, [x14, #0x78]\n"
+ "fmla z26.h, p3/M, z7.h, z9.h\n"
+ "ldr x10, [x14, #0x80]\n"
+ "fmla z25.h, p3/M, z6.h, z9.h\n"
+ "ldr x9, [x14, #0x88]\n"
+ "fmla z23.h, p3/M, z5.h, z9.h\n"
+ "fmla z22.h, p3/M, z4.h, z9.h\n"
+ "fmla z21.h, p3/M, z3.h, z9.h\n"
+ "fmla z19.h, p3/M, z2.h, z9.h\n"
+ "movprfx z18, z13\n fmla z18.h, p3/M, z1.h, z9.h\n"
+ "movprfx z17, z13\n fmla z17.h, p3/M, z0.h, z9.h\n"
+ "ld1h { z9.h }, p2/Z, [x28, x13, LSL #1]\n"
+ "fmla z31.h, p3/M, z1.h, z12.h\n"
+ "ldr x28, [x14, #0x90]\n"
+ "fmla z30.h, p3/M, z0.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x27, x13, LSL #1]\n"
+ "fmla z29.h, p3/M, z2.h, z11.h\n"
+ "ldr x27, [x14, #0x98]\n"
"fmla z28.h, p3/M, z1.h, z11.h\n"
- "fmla z29.h, p3/M, z0.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x12, x14, LSL #1]\n"
- "fmla z27.h, p3/M, z4.h, z11.h\n"
- "ldr x12, [x16, #0x100]\n"
- "fmla z30.h, p3/M, z2.h, z11.h\n"
- "fmla z17.h, p3/M, z2.h, z12.h\n"
- "fmla z18.h, p3/M, z1.h, z12.h\n"
- "fmla z19.h, p3/M, z0.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x11, x14, LSL #1]\n"
- "ldr x11, [x16, #0x108]\n"
- "fmla z16.h, p3/M, z6.h, z10.h\n"
+ "ld1h { z11.h }, p2/Z, [x26, x13, LSL #1]\n"
+ "fmla z26.h, p3/M, z8.h, z10.h\n"
+ "ldr x26, [x14, #0xa0]\n"
+ "fmla z25.h, p3/M, z7.h, z10.h\n"
+ "ld1h { z13.h }, p3/Z, [x15]\n"
+ "fmla z24.h, p3/M, z6.h, z10.h\n"
+ "fmla z22.h, p3/M, z5.h, z10.h\n"
+ "fmla z21.h, p3/M, z4.h, z10.h\n"
"fmla z20.h, p3/M, z3.h, z10.h\n"
- "fmla z24.h, p3/M, z0.h, z10.h\n"
- "fmla z22.h, p3/M, z8.h, z11.h\n"
- "ld1h { z10.h }, p2/Z, [x10, x14, LSL #1]\n"
- "ldr x10, [x16, #0x110]\n"
- "fmla z23.h, p3/M, z7.h, z11.h\n"
- "fmla z26.h, p3/M, z5.h, z11.h\n"
- "fmla z31.h, p3/M, z1.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x9, x14, LSL #1]\n"
- "fmla z27.h, p3/M, z2.h, z12.h\n"
- "ldr x9, [x16, #0x118]\n"
- "fmla z28.h, p3/M, z0.h, z10.h\n"
- "fmla z29.h, p3/M, z4.h, z11.h\n"
- "fmla z30.h, p3/M, z3.h, z11.h\n"
- "fmla z19.h, p3/M, z8.h, z12.h\n"
- "fmla z23.h, p3/M, z5.h, z12.h\n"
- "fmla z20.h, p3/M, z6.h, z10.h\n"
- "ld1h { z12.h }, p2/Z, [x27, x14, LSL #1]\n"
- "fmla z24.h, p3/M, z3.h, z10.h\n"
- "ld1h { z10.h }, p2/Z, [x25, x14, LSL #1]\n"
- "fmla z25.h, p3/M, z7.h, z11.h\n"
- "fmla z26.h, p3/M, z6.h, z11.h\n"
- "fmla z28.h, p3/M, z5.h, z11.h\n"
- "fmla z27.h, p3/M, z5.h, z12.h\n"
- "fmla z31.h, p3/M, z2.h, z12.h\n"
- "fmla z29.h, p3/M, z7.h, z10.h\n"
+ "fmla z18.h, p3/M, z2.h, z10.h\n"
+ "fmla z17.h, p3/M, z1.h, z10.h\n"
+ "fmla z16.h, p3/M, z0.h, z10.h\n"
+ "ld1h { z10.h }, p2/Z, [x25, x13, LSL #1]\n"
+ "fmla z31.h, p3/M, z3.h, z9.h\n"
+ "ldr x25, [x14, #0xa8]\n"
+ "fmla z27.h, p3/M, z0.h, z9.h\n"
+ "fmla z28.h, p3/M, z5.h, z12.h\n"
+ "fmla z24.h, p3/M, z2.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x23, x13, LSL #1]\n"
+ "fmla z23.h, p3/M, z6.h, z11.h\n"
+ "ldr x23, [x14, #0xb8]\n"
+ "fmla z19.h, p3/M, z3.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x24, x13, LSL #1]\n"
+ "fmla z31.h, p3/M, z5.h, z10.h\n"
+ "ldr x24, [x14, #0xb0]\n"
+ "fmla z30.h, p3/M, z4.h, z10.h\n"
+ "fmla z29.h, p3/M, z3.h, z10.h\n"
+ "fmla z27.h, p3/M, z2.h, z10.h\n"
+ "fmla z26.h, p3/M, z1.h, z10.h\n"
+ "fmla z25.h, p3/M, z0.h, z10.h\n"
+ "ld1h { z10.h }, p2/Z, [x9, x13, LSL #1]\n"
+ "fmla z20.h, p3/M, z8.h, z11.h\n"
+ "ldr x9, [x14, #0xc8]\n"
+ "fmla z16.h, p3/M, z5.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x10, x13, LSL #1]\n"
+ "fmla z30.h, p3/M, z5.h, z12.h\n"
+ "ldr x10, [x14, #0xc0]\n"
+ "fmla z29.h, p3/M, z4.h, z12.h\n"
+ "fmla z28.h, p3/M, z3.h, z12.h\n"
+ "fmla z26.h, p3/M, z2.h, z12.h\n"
+ "fmla z25.h, p3/M, z1.h, z12.h\n"
+ "fmla z24.h, p3/M, z0.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x27, x13, LSL #1]\n"
+ "fmla z19.h, p3/M, z7.h, z11.h\n"
+ "ldr x27, [x14, #0xd8]\n"
+ "fmla z18.h, p3/M, z6.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x28, x13, LSL #1]\n"
+ "fmla z31.h, p3/M, z7.h, z10.h\n"
+ "ldr x28, [x14, #0xd0]\n"
"fmla z30.h, p3/M, z6.h, z10.h\n"
- "fmla z24.h, p3/M, z8.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x26, x14, LSL #1]\n"
- "fmla z28.h, p3/M, z8.h, z10.h\n"
- "ld1h { z10.h }, p2/Z, [x12, x14, LSL #1]\n"
+ "fmla z27.h, p3/M, z4.h, z10.h\n"
+ "fmla z26.h, p3/M, z3.h, z10.h\n"
+ "fmla z23.h, p3/M, z1.h, z10.h\n"
+ "fmla z22.h, p3/M, z0.h, z10.h\n"
+ "ld1h { z10.h }, p2/Z, [x26, x13, LSL #1]\n"
+ "fmla z17.h, p3/M, z8.h, z11.h\n"
+ "ldr x26, [x14, #0xe0]\n"
+ "fmla z16.h, p3/M, z7.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x25, x13, LSL #1]\n"
+ "fmla z29.h, p3/M, z8.h, z12.h\n"
+ "ldr x25, [x14, #0xe8]\n"
+ "fmla z28.h, p3/M, z7.h, z12.h\n"
+ "fmla z25.h, p3/M, z5.h, z12.h\n"
+ "fmla z24.h, p3/M, z4.h, z12.h\n"
+ "fmla z21.h, p3/M, z2.h, z12.h\n"
+ "fmla z20.h, p3/M, z1.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x24, x13, LSL #1]\n"
+ "fmla z31.h, p3/M, z2.h, z10.h\n"
+ "ldr x24, [x14, #0xf0]\n"
+ "fmla z30.h, p3/M, z1.h, z10.h\n"
+ "fmla z29.h, p3/M, z0.h, z10.h\n"
+ "ld1h { z10.h }, p2/Z, [x23, x13, LSL #1]\n"
+ "fmla z27.h, p3/M, z7.h, z11.h\n"
+ "ldr x23, [x14, #0xf8]\n"
+ "fmla z26.h, p3/M, z6.h, z11.h\n"
+ "fmla z23.h, p3/M, z4.h, z11.h\n"
+ "fmla z22.h, p3/M, z3.h, z11.h\n"
+ "fmla z19.h, p3/M, z1.h, z11.h\n"
+ "fmla z18.h, p3/M, z0.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x10, x13, LSL #1]\n"
+ "fmla z30.h, p3/M, z2.h, z12.h\n"
+ "ldr x10, [x14, #0x100]\n"
+ "fmla z29.h, p3/M, z1.h, z12.h\n"
+ "fmla z28.h, p3/M, z0.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x9, x13, LSL #1]\n"
+ "fmla z31.h, p3/M, z6.h, z10.h\n"
+ "ldr x9, [x14, #0x108]\n"
+ "fmla z27.h, p3/M, z3.h, z10.h\n"
+ "fmla z23.h, p3/M, z0.h, z10.h\n"
+ "ld1h { z10.h }, p2/Z, [x28, x13, LSL #1]\n"
"fmla z25.h, p3/M, z8.h, z11.h\n"
- "fmla z26.h, p3/M, z7.h, z11.h\n"
- "fmla z27.h, p3/M, z6.h, z11.h\n"
- "fmla z29.h, p3/M, z5.h, z11.h\n"
- "fmla z30.h, p3/M, z4.h, z11.h\n"
- "fmla z31.h, p3/M, z3.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x11, x14, LSL #1]\n"
- "ldp x12, x11, [x16, #0x0]\n"
- "fmla z23.h, p3/M, z8.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x24, x14, LSL #1]\n"
- "fmla z16.h, p3/M, z4.h, z10.h\n"
- "fmax z16.h, p3/M, z16.h, z14.h\n"
- "fmla z17.h, p3/M, z3.h, z10.h\n"
+ "ldr x28, [x14, #0x110]\n"
+ "fmla z24.h, p3/M, z7.h, z11.h\n"
+ "fmla z21.h, p3/M, z5.h, z11.h\n"
+ "fmla z20.h, p3/M, z4.h, z11.h\n"
+ "fmla z17.h, p3/M, z2.h, z11.h\n"
+ "fmla z16.h, p3/M, z1.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x27, x13, LSL #1]\n"
+ "fmla z28.h, p3/M, z8.h, z12.h\n"
+ "ldr x27, [x14, #0x118]\n"
+ "fmla z24.h, p3/M, z5.h, z12.h\n"
+ "fmla z20.h, p3/M, z2.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x26, x13, LSL #1]\n"
+ "fmla z27.h, p3/M, z6.h, z10.h\n"
+ "fmla z23.h, p3/M, z3.h, z10.h\n"
+ "fmla z19.h, p3/M, z0.h, z10.h\n"
+ "ld1h { z10.h }, p2/Z, [x25, x13, LSL #1]\n"
+ "fmla z22.h, p3/M, z7.h, z11.h\n"
+ "fmla z21.h, p3/M, z6.h, z11.h\n"
+ "fmla z23.h, p3/M, z8.h, z11.h\n"
+ "fmla z19.h, p3/M, z5.h, z11.h\n"
+ "fmla z18.h, p3/M, z4.h, z11.h\n"
+ "fmla z17.h, p3/M, z3.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x24, x13, LSL #1]\n"
+ "fmla z24.h, p3/M, z8.h, z12.h\n"
+ "fmla z20.h, p3/M, z5.h, z12.h\n"
+ "fmla z16.h, p3/M, z2.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x23, x13, LSL #1]\n"
+ "fmla z19.h, p3/M, z8.h, z10.h\n"
+ "fmla z18.h, p3/M, z7.h, z10.h\n"
+ "fmla z17.h, p3/M, z6.h, z10.h\n"
+ "ld1h { z10.h }, p2/Z, [x10, x13, LSL #1]\n"
+ "fmla z22.h, p3/M, z8.h, z11.h\n"
+ "fmla z21.h, p3/M, z7.h, z11.h\n"
+ "fmla z20.h, p3/M, z6.h, z11.h\n"
"fmla z18.h, p3/M, z5.h, z11.h\n"
- "fmax z17.h, p3/M, z17.h, z14.h\n"
- "fmax z18.h, p3/M, z18.h, z14.h\n"
- "fmla z19.h, p3/M, z4.h, z11.h\n"
- "fmla z29.h, p3/M, z8.h, z12.h\n"
- "fmax z19.h, p3/M, z19.h, z14.h\n"
- "fmin z16.h, p3/M, z16.h, z13.h\n"
- "fmla z30.h, p3/M, z7.h, z12.h\n"
- "fmla z31.h, p3/M, z6.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x10, x14, LSL #1]\n"
- "fmin z17.h, p3/M, z17.h, z13.h\n"
- "fmla z20.h, p3/M, z1.h, z10.h\n"
- "fmla z21.h, p3/M, z0.h, z10.h\n"
- "ld1h { z10.h }, p2/Z, [x9, x14, LSL #1]\n"
- "fmin z18.h, p3/M, z18.h, z13.h\n"
- "fmla z22.h, p3/M, z2.h, z11.h\n"
- "fmla z23.h, p3/M, z1.h, z11.h\n"
- "fmin z19.h, p3/M, z19.h, z13.h\n"
- "fmax z20.h, p3/M, z20.h, z14.h\n"
- "fmla z24.h, p3/M, z7.h, z12.h\n"
- "fmla z25.h, p3/M, z6.h, z12.h\n"
- "fmax z21.h, p3/M, z21.h, z14.h\n"
- "fmax z22.h, p3/M, z22.h, z14.h\n"
- "fmla z26.h, p3/M, z8.h, z10.h\n"
- "fmla z27.h, p3/M, z7.h, z10.h\n"
- "fmax z23.h, p3/M, z23.h, z14.h\n"
- "st1h { z16.h }, p1, [x23, x13, LSL #1]\n"
- "st1h { z17.h }, p1, [x22, x13, LSL #1]\n"
- "ldr x23, [x28, #0x20]\n"
- "ldr x22, [x28, #0x28]\n"
- "fmla z28.h, p3/M, z4.h, z12.h\n"
- "st1h { z18.h }, p1, [x21, x13, LSL #1]\n"
- "ldr x21, [x28, #0x30]\n"
- "fmla z29.h, p3/M, z3.h, z12.h\n"
- "fmla z30.h, p3/M, z5.h, z10.h\n"
- "st1h { z19.h }, p1, [x20, x13, LSL #1]\n"
- "ldr x20, [x28, #0x38]\n"
+ "fmla z17.h, p3/M, z4.h, z11.h\n"
+ "fmla z16.h, p3/M, z3.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x9, x13, LSL #1]\n"
"fmla z31.h, p3/M, z4.h, z10.h\n"
- "ldp x10, x9, [x16, #0x10]\n"
- "fmin z20.h, p3/M, z20.h, z13.h\n"
- "fmin z21.h, p3/M, z21.h, z13.h\n"
- "st1h { z20.h }, p1, [x23, x13, LSL #1]\n"
- "ldr x23, [x28, #0x40]\n"
- "fmin z22.h, p3/M, z22.h, z13.h\n"
- "fmin z23.h, p3/M, z23.h, z13.h\n"
- "st1h { z21.h }, p1, [x22, x13, LSL #1]\n"
- "ldr x22, [x28, #0x48]\n"
- "fmax z24.h, p3/M, z24.h, z14.h\n"
- "fmax z25.h, p3/M, z25.h, z14.h\n"
- "st1h { z22.h }, p1, [x21, x13, LSL #1]\n"
- "ldr x21, [x28, #0x50]\n"
- "fmax z26.h, p3/M, z26.h, z14.h\n"
- "fmax z27.h, p3/M, z27.h, z14.h\n"
- "st1h { z23.h }, p1, [x20, x13, LSL #1]\n"
- "ldr x20, [x28, #0x58]\n"
- "inch x14\n"
- "ld1h { z9.h }, p0/Z, [x12, x15, LSL #1]\n"
- "ld1h { z10.h }, p0/Z, [x11, x15, LSL #1]\n"
- "fmin z24.h, p3/M, z24.h, z13.h\n"
- "ld1h { z11.h }, p0/Z, [x10, x15, LSL #1]\n"
- "ld1h { z12.h }, p0/Z, [x9, x15, LSL #1]\n"
- "inch x15\n"
- "fmin z25.h, p3/M, z25.h, z13.h\n"
- "fmin z26.h, p3/M, z26.h, z13.h\n"
- "fmin z27.h, p3/M, z27.h, z13.h\n"
- "st1h { z24.h }, p1, [x23, x13, LSL #1]\n"
- "ldr x23, [x28, #0x60]\n"
- "fmax z28.h, p3/M, z28.h, z14.h\n"
- "fmax z29.h, p3/M, z29.h, z14.h\n"
- "st1h { z25.h }, p1, [x22, x13, LSL #1]\n"
- "ldr x22, [x28, #0x68]\n"
- "fmax z30.h, p3/M, z30.h, z14.h\n"
- "fmax z31.h, p3/M, z31.h, z14.h\n"
- "st1h { z26.h }, p1, [x21, x13, LSL #1]\n"
- "ldr x21, [x28, #0x70]\n"
- "st1h { z27.h }, p1, [x20, x13, LSL #1]\n"
- "ldr x20, [x28, #0x78]\n"
- "ld1h { z15.h }, p3/Z, [x17]\n"
- "whilelt p2.h, x14, %x[n_channels]\n"
- "ld1h { z0.h }, p3/Z, [x17, #1, MUL VL]\n"
- "ld1h { z1.h }, p3/Z, [x17, #2, MUL VL]\n"
- "cmp x15, %x[n_channels]\n"
- "fmin z28.h, p3/M, z28.h, z13.h\n"
- "ld1h { z2.h }, p3/Z, [x17, #3, MUL VL]\n"
- "ld1h { z3.h }, p3/Z, [x17, #4, MUL VL]\n"
- "fmin z29.h, p3/M, z29.h, z13.h\n"
- "fmin z30.h, p3/M, z30.h, z13.h\n"
- "ld1h { z4.h }, p3/Z, [x17, #5, MUL VL]\n"
- "ld1h { z5.h }, p3/Z, [x17, #6, MUL VL]\n"
- "fmin z31.h, p3/M, z31.h, z13.h\n"
- "st1h { z28.h }, p1, [x23, x13, LSL #1]\n"
- "ld1h { z6.h }, p3/Z, [x17, #7, MUL VL]\n"
- "addvl x17, x17, #16\n"
- "st1h { z29.h }, p1, [x22, x13, LSL #1]\n"
- "ld1h { z7.h }, p3/Z, [x17, #-8, MUL VL]\n"
- "st1h { z30.h }, p1, [x21, x13, LSL #1]\n"
- "ld1h { z8.h }, p3/Z, [x17, #-7, MUL VL]\n"
- "addvl x17, x17, #-6\n"
- "st1h { z31.h }, p1, [x20, x13, LSL #1]\n"
- "blt 1b\n"
- "2:" // Channel tail
- "movprfx z21, z15\n fmla z21.h, p3/M, z4.h, z9.h\n"
- "movprfx z16, z15\n fmla z16.h, p3/M, z8.h, z9.h\n"
- "ldr x27, [x16, #0x20]\n"
- "ldr x26, [x16, #0x30]\n"
- "movprfx z22, z15\n fmla z22.h, p3/M, z3.h, z9.h\n"
- "movprfx z25, z15\n fmla z25.h, p3/M, z1.h, z9.h\n"
- "ldr x25, [x16, #0x28]\n"
- "ldr x24, [x16, #0x38]\n"
- "movprfx z26, z15\n fmla z26.h, p3/M, z0.h, z9.h\n"
- "movprfx z17, z15\n fmla z17.h, p3/M, z7.h, z9.h\n"
- "ldr x12, [x16, #0x40]\n"
- "ldr x11, [x16, #0x48]\n"
- "movprfx z18, z15\n fmla z18.h, p3/M, z6.h, z9.h\n"
- "fmla z21.h, p3/M, z5.h, z12.h\n"
- "ldr x10, [x16, #0x50]\n"
- "ldr x9, [x16, #0x58]\n"
- "movprfx z20, z15\n fmla z20.h, p3/M, z5.h, z9.h\n"
- "movprfx z24, z15\n fmla z24.h, p3/M, z2.h, z9.h\n"
- "ld1h { z9.h }, p2/Z, [x26, x14, LSL #1]\n"
- "ldr x26, [x16, #0x70]\n"
- "fmla z16.h, p3/M, z0.h, z10.h\n"
- "movprfx z19, z15\n fmla z19.h, p3/M, z2.h, z11.h\n"
- "ld1h { z10.h }, p2/Z, [x27, x14, LSL #1]\n"
- "ld1h { z11.h }, p2/Z, [x25, x14, LSL #1]\n"
- "fmla z22.h, p3/M, z4.h, z12.h\n"
- "fmla z25.h, p3/M, z2.h, z12.h\n"
- "ldr x27, [x16, #0x60]\n"
- "ldr x25, [x16, #0x68]\n"
- "fmla z26.h, p3/M, z1.h, z12.h\n"
- "fmla z17.h, p3/M, z8.h, z12.h\n"
+ "ldp x10, x9, [x14, #0x0]\n"
+ "fmla z18.h, p3/M, z8.h, z12.h\n"
+ "ld1h { z9.h }, p1/Z, [x10, x12, LSL #1]\n"
+ "fmla z17.h, p3/M, z7.h, z12.h\n"
+ "fmla z16.h, p3/M, z6.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x28, x13, LSL #1]\n"
+ "fmla z30.h, p3/M, z3.h, z10.h\n"
+ "fmla z27.h, p3/M, z1.h, z10.h\n"
+ "fmla z26.h, p3/M, z0.h, z10.h\n"
+ "ld1h { z10.h }, p2/Z, [x27, x13, LSL #1]\n"
"inch x13\n"
- "mov p1.b, p2.b\n"
- "fmla z18.h, p3/M, z7.h, z12.h\n"
- "movprfx z28, z15\n fmla z28.h, p3/M, z6.h, z10.h\n"
- "ld1h { z10.h }, p2/Z, [x11, x14, LSL #1]\n"
- "ldr x11, [x16, #0x88]\n"
- "fmla z21.h, p3/M, z7.h, z9.h\n"
- "fmla z19.h, p3/M, z6.h, z12.h\n"
- "ldr x23, [x28, #0x0]\n"
- "ldr x22, [x28, #0x8]\n"
- "movprfx z23, z15\n fmla z23.h, p3/M, z3.h, z12.h\n"
- "movprfx z27, z15\n fmla z27.h, p3/M, z0.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x24, x14, LSL #1]\n"
- "ldr x24, [x16, #0x78]\n"
- "movprfx z31, z15\n fmla z31.h, p3/M, z8.h, z11.h\n"
- "fmla z22.h, p3/M, z6.h, z9.h\n"
- "ld1h { z11.h }, p2/Z, [x12, x14, LSL #1]\n"
- "ldr x12, [x16, #0x80]\n"
- "fmla z25.h, p3/M, z4.h, z9.h\n"
- "fmla z26.h, p3/M, z3.h, z9.h\n"
- "ldr x21, [x28, #0x10]\n"
- "ldr x20, [x28, #0x18]\n"
- "fmla z20.h, p3/M, z8.h, z9.h\n"
- "fmla z24.h, p3/M, z5.h, z9.h\n"
- "fmla z28.h, p3/M, z2.h, z9.h\n"
- "fmla z16.h, p3/M, z1.h, z12.h\n"
- "fmla z17.h, p3/M, z0.h, z12.h\n"
- "movprfx z29, z15\n fmla z29.h, p3/M, z1.h, z9.h\n"
- "movprfx z30, z15\n fmla z30.h, p3/M, z0.h, z9.h\n"
- "fmla z18.h, p3/M, z2.h, z11.h\n"
- "ld1h { z9.h }, p2/Z, [x10, x14, LSL #1]\n"
- "ldr x10, [x16, #0x90]\n"
+ "fmla z29.h, p3/M, z5.h, z11.h\n"
+ "ldp x28, x27, [x14, #0x10]\n"
+ "whilelt p2.h, x13, %x[n_channels]\n"
+ "fmla z28.h, p3/M, z4.h, z11.h\n"
+ "ld1h { z0.h }, p3/Z, [x15, #1, MUL VL]\n"
+ "fmla z25.h, p3/M, z2.h, z11.h\n"
+ "ld1h { z2.h }, p3/Z, [x15, #3, MUL VL]\n"
+ "fmla z24.h, p3/M, z1.h, z11.h\n"
+ "ld1h { z11.h }, p1/Z, [x28, x12, LSL #1]\n"
+ "fmla z23.h, p3/M, z7.h, z12.h\n"
+ "ld1h { z1.h }, p3/Z, [x15, #2, MUL VL]\n"
+ "fmla z22.h, p3/M, z6.h, z12.h\n"
+ "ld1h { z6.h }, p3/Z, [x15, #7, MUL VL]\n"
+ "fmla z19.h, p3/M, z4.h, z12.h\n"
+ "fmla z18.h, p3/M, z3.h, z12.h\n"
+ "ld1h { z12.h }, p1/Z, [x27, x12, LSL #1]\n"
"fmla z21.h, p3/M, z8.h, z10.h\n"
- "fmla z19.h, p3/M, z1.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x27, x14, LSL #1]\n"
- "ldr x27, [x16, #0xa0]\n"
- "fmla z22.h, p3/M, z7.h, z10.h\n"
- "fmla z23.h, p3/M, z6.h, z10.h\n"
- "fmla z25.h, p3/M, z5.h, z10.h\n"
- "fmla z26.h, p3/M, z4.h, z10.h\n"
- "fmla z27.h, p3/M, z3.h, z10.h\n"
- "fmla z29.h, p3/M, z2.h, z10.h\n"
- "fmla z30.h, p3/M, z1.h, z10.h\n"
+ "ld1h { z3.h }, p3/Z, [x15, #4, MUL VL]\n"
+ "fmla z20.h, p3/M, z7.h, z10.h\n"
+ "fmla z17.h, p3/M, z5.h, z10.h\n"
+ "ld1h { z5.h }, p3/Z, [x15, #6, MUL VL]\n"
+ "fmla z16.h, p3/M, z4.h, z10.h\n"
+ "ld1h { z10.h }, p1/Z, [x9, x12, LSL #1]\n"
+ "inch x12\n"
+ "fmax z31.h, p3/M, z31.h, z15.h\n"
+ "ld1h { z4.h }, p3/Z, [x15, #5, MUL VL]\n"
+ "addvl x15, x15, #16\n"
+ "fmax z30.h, p3/M, z30.h, z15.h\n"
+ "ld1h { z7.h }, p3/Z, [x15, #-8, MUL VL]\n"
+ "cmp x12, %x[n_channels]\n"
+ "fmax z29.h, p3/M, z29.h, z15.h\n"
+ "ld1h { z8.h }, p3/Z, [x15, #-7, MUL VL]\n"
+ "addvl x15, x15, #-6\n"
+ "fmax z28.h, p3/M, z28.h, z15.h\n"
+ "fmax z27.h, p3/M, z27.h, z15.h\n"
+ "fmin z31.h, p3/M, z31.h, z14.h\n"
+ "st1h { z31.h }, p0, [x22, x11, LSL #1]\n"
+ "fmin z30.h, p3/M, z30.h, z14.h\n"
+ "fmin z29.h, p3/M, z29.h, z14.h\n"
+ "ldr x22, [x16, #0x20]\n"
+ "fmin z28.h, p3/M, z28.h, z14.h\n"
+ "st1h { z30.h }, p0, [x21, x11, LSL #1]\n"
+ "fmin z27.h, p3/M, z27.h, z14.h\n"
+ "fmax z26.h, p3/M, z26.h, z15.h\n"
+ "st1h { z29.h }, p0, [x20, x11, LSL #1]\n"
+ "fmax z25.h, p3/M, z25.h, z15.h\n"
+ "st1h { z28.h }, p0, [x19, x11, LSL #1]\n"
+ "fmax z24.h, p3/M, z24.h, z15.h\n"
+ "ldr x21, [x16, #0x28]\n"
+ "fmax z23.h, p3/M, z23.h, z15.h\n"
+ "st1h { z27.h }, p0, [x22, x11, LSL #1]\n"
+ "fmin z26.h, p3/M, z26.h, z14.h\n"
+ "ldr x20, [x16, #0x30]\n"
+ "fmin z25.h, p3/M, z25.h, z14.h\n"
+ "ldr x19, [x16, #0x38]\n"
+ "fmin z24.h, p3/M, z24.h, z14.h\n"
+ "ldr x22, [x16, #0x40]\n"
+ "fmin z23.h, p3/M, z23.h, z14.h\n"
+ "st1h { z26.h }, p0, [x21, x11, LSL #1]\n"
+ "fmax z22.h, p3/M, z22.h, z15.h\n"
+ "st1h { z25.h }, p0, [x20, x11, LSL #1]\n"
+ "fmax z21.h, p3/M, z21.h, z15.h\n"
+ "st1h { z24.h }, p0, [x19, x11, LSL #1]\n"
+ "fmax z20.h, p3/M, z20.h, z15.h\n"
+ "st1h { z23.h }, p0, [x22, x11, LSL #1]\n"
+ "fmax z19.h, p3/M, z19.h, z15.h\n"
+ "ldr x21, [x16, #0x48]\n"
+ "fmin z22.h, p3/M, z22.h, z14.h\n"
+ "ldr x20, [x16, #0x50]\n"
+ "fmin z21.h, p3/M, z21.h, z14.h\n"
+ "ldr x19, [x16, #0x58]\n"
+ "fmin z20.h, p3/M, z20.h, z14.h\n"
+ "ldr x22, [x16, #0x60]\n"
+ "fmin z19.h, p3/M, z19.h, z14.h\n"
+ "st1h { z22.h }, p0, [x21, x11, LSL #1]\n"
+ "fmax z18.h, p3/M, z18.h, z15.h\n"
+ "st1h { z21.h }, p0, [x20, x11, LSL #1]\n"
+ "fmax z17.h, p3/M, z17.h, z15.h\n"
+ "st1h { z20.h }, p0, [x19, x11, LSL #1]\n"
+ "fmax z16.h, p3/M, z16.h, z15.h\n"
+ "st1h { z19.h }, p0, [x22, x11, LSL #1]\n"
+ "ldr x21, [x16, #0x68]\n"
+ "fmin z18.h, p3/M, z18.h, z14.h\n"
+ "ldr x20, [x16, #0x70]\n"
+ "fmin z17.h, p3/M, z17.h, z14.h\n"
+ "ldr x19, [x16, #0x78]\n"
+ "fmin z16.h, p3/M, z16.h, z14.h\n"
+ "st1h { z18.h }, p0, [x21, x11, LSL #1]\n"
+ "st1h { z17.h }, p0, [x20, x11, LSL #1]\n"
+ "st1h { z16.h }, p0, [x19, x11, LSL #1]\n"
+ "blt 1b\n"
+ "2:" // Channel tail
+ "movprfx z31, z13\n fmla z31.h, p3/M, z8.h, z9.h\n"
+ "ldr x26, [x14, #0x20]\n"
+ "inch x11\n"
+ "movprfx z30, z13\n fmla z30.h, p3/M, z7.h, z9.h\n"
+ "ldr x25, [x14, #0x28]\n"
+ "mov p0.b, p2.b\n"
+ "movprfx z29, z13\n fmla z29.h, p3/M, z6.h, z9.h\n"
+ "ldr x24, [x14, #0x30]\n"
+ "movprfx z27, z13\n fmla z27.h, p3/M, z5.h, z9.h\n"
+ "ldr x23, [x14, #0x38]\n"
+ "movprfx z26, z13\n fmla z26.h, p3/M, z4.h, z9.h\n"
+ "ldr x10, [x14, #0x40]\n"
+ "movprfx z25, z13\n fmla z25.h, p3/M, z3.h, z9.h\n"
+ "ldr x9, [x14, #0x48]\n"
+ "movprfx z23, z13\n fmla z23.h, p3/M, z2.h, z9.h\n"
+ "ldr x28, [x14, #0x50]\n"
+ "movprfx z22, z13\n fmla z22.h, p3/M, z1.h, z9.h\n"
+ "ldr x27, [x14, #0x58]\n"
+ "movprfx z21, z13\n fmla z21.h, p3/M, z0.h, z9.h\n"
+ "ld1h { z9.h }, p2/Z, [x24, x13, LSL #1]\n"
"fmla z31.h, p3/M, z0.h, z10.h\n"
- "ld1h { z10.h }, p2/Z, [x25, x14, LSL #1]\n"
- "ldr x25, [x16, #0xa8]\n"
- "fmla z16.h, p3/M, z3.h, z9.h\n"
- "fmla z20.h, p3/M, z0.h, z9.h\n"
- "ld1h { z12.h }, p2/Z, [x9, x14, LSL #1]\n"
- "ldr x9, [x16, #0x98]\n"
- "fmla z24.h, p3/M, z6.h, z11.h\n"
- "fmla z28.h, p3/M, z3.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x26, x14, LSL #1]\n"
- "ldr x26, [x16, #0xb0]\n"
- "fmla z17.h, p3/M, z4.h, z10.h\n"
- "fmla z18.h, p3/M, z3.h, z10.h\n"
- "fmla z21.h, p3/M, z1.h, z10.h\n"
- "fmla z19.h, p3/M, z5.h, z12.h\n"
- "fmla z23.h, p3/M, z2.h, z12.h\n"
- "fmla z22.h, p3/M, z0.h, z10.h\n"
- "ld1h { z12.h }, p2/Z, [x24, x14, LSL #1]\n"
- "ldr x24, [x16, #0xb8]\n"
- "fmla z27.h, p3/M, z8.h, z11.h\n"
- "fmla z31.h, p3/M, z5.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x12, x14, LSL #1]\n"
- "ldr x12, [x16, #0xc0]\n"
- "fmla z16.h, p3/M, z5.h, z10.h\n"
- "fmla z20.h, p3/M, z2.h, z10.h\n"
- "ld1h { z10.h }, p2/Z, [x11, x14, LSL #1]\n"
- "ldr x11, [x16, #0xc8]\n"
- "fmla z17.h, p3/M, z5.h, z12.h\n"
- "fmla z18.h, p3/M, z4.h, z12.h\n"
- "fmla z21.h, p3/M, z2.h, z12.h\n"
- "fmla z19.h, p3/M, z3.h, z12.h\n"
- "fmla z22.h, p3/M, z1.h, z12.h\n"
- "fmla z23.h, p3/M, z0.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x9, x14, LSL #1]\n"
- "ldr x9, [x16, #0xd8]\n"
- "fmla z28.h, p3/M, z7.h, z11.h\n"
- "fmla z29.h, p3/M, z6.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x10, x14, LSL #1]\n"
- "ldr x10, [x16, #0xd0]\n"
- "fmla z16.h, p3/M, z7.h, z10.h\n"
- "fmla z17.h, p3/M, z6.h, z10.h\n"
- "fmla z20.h, p3/M, z4.h, z10.h\n"
- "fmla z21.h, p3/M, z3.h, z10.h\n"
- "fmla z24.h, p3/M, z1.h, z10.h\n"
- "fmla z25.h, p3/M, z0.h, z10.h\n"
- "ld1h { z10.h }, p2/Z, [x27, x14, LSL #1]\n"
- "ldr x27, [x16, #0xe0]\n"
- "fmla z18.h, p3/M, z8.h, z12.h\n"
- "fmla z30.h, p3/M, z8.h, z11.h\n"
- "fmla z31.h, p3/M, z7.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x25, x14, LSL #1]\n"
- "fmla z27.h, p3/M, z1.h, z12.h\n"
- "ldr x25, [x16, #0xe8]\n"
- "fmla z19.h, p3/M, z7.h, z12.h\n"
- "fmla z22.h, p3/M, z5.h, z12.h\n"
- "fmla z23.h, p3/M, z4.h, z12.h\n"
- "fmla z26.h, p3/M, z2.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x26, x14, LSL #1]\n"
- "ldr x26, [x16, #0xf0]\n"
- "fmla z16.h, p3/M, z2.h, z10.h\n"
- "fmla z17.h, p3/M, z1.h, z10.h\n"
- "fmla z18.h, p3/M, z0.h, z10.h\n"
- "fmla z20.h, p3/M, z7.h, z11.h\n"
- "ld1h { z10.h }, p2/Z, [x24, x14, LSL #1]\n"
- "ldr x24, [x16, #0xf8]\n"
- "fmla z21.h, p3/M, z6.h, z11.h\n"
- "fmla z24.h, p3/M, z4.h, z11.h\n"
- "fmla z25.h, p3/M, z3.h, z11.h\n"
+ "ld1h { z10.h }, p2/Z, [x26, x13, LSL #1]\n"
+ "movprfx z28, z13\n fmla z28.h, p3/M, z2.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x25, x13, LSL #1]\n"
+ "fmla z30.h, p3/M, z8.h, z12.h\n"
+ "ldr x26, [x14, #0x60]\n"
+ "fmla z29.h, p3/M, z7.h, z12.h\n"
+ "ldr x25, [x14, #0x68]\n"
+ "fmla z26.h, p3/M, z5.h, z12.h\n"
+ "ldr x24, [x14, #0x70]\n"
+ "fmla z28.h, p3/M, z6.h, z12.h\n"
+ "ldr x22, [x16, #0x0]\n"
+ "fmla z25.h, p3/M, z4.h, z12.h\n"
+ "ldr x21, [x16, #0x8]\n"
+ "movprfx z24, z13\n fmla z24.h, p3/M, z3.h, z12.h\n"
+ "ldr x20, [x16, #0x10]\n"
+ "fmla z22.h, p3/M, z2.h, z12.h\n"
+ "ldr x19, [x16, #0x18]\n"
+ "fmla z21.h, p3/M, z1.h, z12.h\n"
+ "movprfx z20, z13\n fmla z20.h, p3/M, z0.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x23, x13, LSL #1]\n"
+ "movprfx z19, z13\n fmla z19.h, p3/M, z6.h, z10.h\n"
+ "ld1h { z10.h }, p2/Z, [x9, x13, LSL #1]\n"
+ "movprfx z16, z13\n fmla z16.h, p3/M, z8.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x10, x13, LSL #1]\n"
+ "fmla z27.h, p3/M, z8.h, z9.h\n"
+ "ldr x23, [x14, #0x78]\n"
+ "fmla z26.h, p3/M, z7.h, z9.h\n"
+ "ldr x10, [x14, #0x80]\n"
+ "fmla z25.h, p3/M, z6.h, z9.h\n"
+ "ldr x9, [x14, #0x88]\n"
+ "fmla z23.h, p3/M, z5.h, z9.h\n"
+ "fmla z22.h, p3/M, z4.h, z9.h\n"
+ "fmla z21.h, p3/M, z3.h, z9.h\n"
+ "fmla z19.h, p3/M, z2.h, z9.h\n"
+ "movprfx z18, z13\n fmla z18.h, p3/M, z1.h, z9.h\n"
+ "movprfx z17, z13\n fmla z17.h, p3/M, z0.h, z9.h\n"
+ "ld1h { z9.h }, p2/Z, [x28, x13, LSL #1]\n"
+ "fmla z31.h, p3/M, z1.h, z12.h\n"
+ "ldr x28, [x14, #0x90]\n"
+ "fmla z30.h, p3/M, z0.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x27, x13, LSL #1]\n"
+ "fmla z29.h, p3/M, z2.h, z11.h\n"
+ "ldr x27, [x14, #0x98]\n"
"fmla z28.h, p3/M, z1.h, z11.h\n"
- "fmla z29.h, p3/M, z0.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x12, x14, LSL #1]\n"
- "fmla z27.h, p3/M, z4.h, z11.h\n"
- "ldr x12, [x16, #0x100]\n"
- "fmla z30.h, p3/M, z2.h, z11.h\n"
- "fmla z17.h, p3/M, z2.h, z12.h\n"
- "fmla z18.h, p3/M, z1.h, z12.h\n"
- "fmla z19.h, p3/M, z0.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x11, x14, LSL #1]\n"
- "ldr x11, [x16, #0x108]\n"
- "fmla z16.h, p3/M, z6.h, z10.h\n"
+ "ld1h { z11.h }, p2/Z, [x26, x13, LSL #1]\n"
+ "fmla z26.h, p3/M, z8.h, z10.h\n"
+ "ldr x26, [x14, #0xa0]\n"
+ "fmla z25.h, p3/M, z7.h, z10.h\n"
+ "fmla z24.h, p3/M, z6.h, z10.h\n"
+ "fmla z22.h, p3/M, z5.h, z10.h\n"
+ "fmla z21.h, p3/M, z4.h, z10.h\n"
"fmla z20.h, p3/M, z3.h, z10.h\n"
- "fmla z24.h, p3/M, z0.h, z10.h\n"
- "fmla z22.h, p3/M, z8.h, z11.h\n"
- "ld1h { z10.h }, p2/Z, [x10, x14, LSL #1]\n"
- "ldr x10, [x16, #0x110]\n"
- "fmla z23.h, p3/M, z7.h, z11.h\n"
- "fmla z26.h, p3/M, z5.h, z11.h\n"
- "fmla z31.h, p3/M, z1.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x9, x14, LSL #1]\n"
- "fmla z27.h, p3/M, z2.h, z12.h\n"
- "ldr x9, [x16, #0x118]\n"
- "fmla z28.h, p3/M, z0.h, z10.h\n"
- "fmla z29.h, p3/M, z4.h, z11.h\n"
- "fmla z30.h, p3/M, z3.h, z11.h\n"
- "fmla z19.h, p3/M, z8.h, z12.h\n"
- "fmla z23.h, p3/M, z5.h, z12.h\n"
- "fmla z20.h, p3/M, z6.h, z10.h\n"
- "ld1h { z12.h }, p2/Z, [x27, x14, LSL #1]\n"
- "fmla z24.h, p3/M, z3.h, z10.h\n"
- "ld1h { z10.h }, p2/Z, [x25, x14, LSL #1]\n"
- "fmla z25.h, p3/M, z7.h, z11.h\n"
- "fmla z26.h, p3/M, z6.h, z11.h\n"
- "fmla z28.h, p3/M, z5.h, z11.h\n"
- "fmla z27.h, p3/M, z5.h, z12.h\n"
- "fmla z31.h, p3/M, z2.h, z12.h\n"
- "fmla z29.h, p3/M, z7.h, z10.h\n"
+ "fmla z18.h, p3/M, z2.h, z10.h\n"
+ "fmla z17.h, p3/M, z1.h, z10.h\n"
+ "fmla z16.h, p3/M, z0.h, z10.h\n"
+ "ld1h { z10.h }, p2/Z, [x25, x13, LSL #1]\n"
+ "fmla z31.h, p3/M, z3.h, z9.h\n"
+ "ldr x25, [x14, #0xa8]\n"
+ "fmla z27.h, p3/M, z0.h, z9.h\n"
+ "fmla z28.h, p3/M, z5.h, z12.h\n"
+ "fmla z24.h, p3/M, z2.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x23, x13, LSL #1]\n"
+ "fmla z23.h, p3/M, z6.h, z11.h\n"
+ "ldr x23, [x14, #0xb8]\n"
+ "fmla z19.h, p3/M, z3.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x24, x13, LSL #1]\n"
+ "fmla z31.h, p3/M, z5.h, z10.h\n"
+ "ldr x24, [x14, #0xb0]\n"
+ "fmla z30.h, p3/M, z4.h, z10.h\n"
+ "fmla z29.h, p3/M, z3.h, z10.h\n"
+ "fmla z27.h, p3/M, z2.h, z10.h\n"
+ "fmla z26.h, p3/M, z1.h, z10.h\n"
+ "fmla z25.h, p3/M, z0.h, z10.h\n"
+ "ld1h { z10.h }, p2/Z, [x9, x13, LSL #1]\n"
+ "fmla z20.h, p3/M, z8.h, z11.h\n"
+ "ldr x9, [x14, #0xc8]\n"
+ "fmla z16.h, p3/M, z5.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x10, x13, LSL #1]\n"
+ "fmla z30.h, p3/M, z5.h, z12.h\n"
+ "ldr x10, [x14, #0xc0]\n"
+ "fmla z29.h, p3/M, z4.h, z12.h\n"
+ "fmla z28.h, p3/M, z3.h, z12.h\n"
+ "fmla z26.h, p3/M, z2.h, z12.h\n"
+ "fmla z25.h, p3/M, z1.h, z12.h\n"
+ "fmla z24.h, p3/M, z0.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x27, x13, LSL #1]\n"
+ "fmla z19.h, p3/M, z7.h, z11.h\n"
+ "ldr x27, [x14, #0xd8]\n"
+ "fmla z18.h, p3/M, z6.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x28, x13, LSL #1]\n"
+ "fmla z31.h, p3/M, z7.h, z10.h\n"
+ "ldr x28, [x14, #0xd0]\n"
"fmla z30.h, p3/M, z6.h, z10.h\n"
- "fmla z24.h, p3/M, z8.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x26, x14, LSL #1]\n"
- "fmla z28.h, p3/M, z8.h, z10.h\n"
- "ld1h { z10.h }, p2/Z, [x12, x14, LSL #1]\n"
+ "fmla z27.h, p3/M, z4.h, z10.h\n"
+ "fmla z26.h, p3/M, z3.h, z10.h\n"
+ "fmla z23.h, p3/M, z1.h, z10.h\n"
+ "fmla z22.h, p3/M, z0.h, z10.h\n"
+ "ld1h { z10.h }, p2/Z, [x26, x13, LSL #1]\n"
+ "fmla z17.h, p3/M, z8.h, z11.h\n"
+ "ldr x26, [x14, #0xe0]\n"
+ "fmla z16.h, p3/M, z7.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x25, x13, LSL #1]\n"
+ "fmla z29.h, p3/M, z8.h, z12.h\n"
+ "ldr x25, [x14, #0xe8]\n"
+ "fmla z28.h, p3/M, z7.h, z12.h\n"
+ "fmla z25.h, p3/M, z5.h, z12.h\n"
+ "fmla z24.h, p3/M, z4.h, z12.h\n"
+ "fmla z21.h, p3/M, z2.h, z12.h\n"
+ "fmla z20.h, p3/M, z1.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x24, x13, LSL #1]\n"
+ "fmla z31.h, p3/M, z2.h, z10.h\n"
+ "ldr x24, [x14, #0xf0]\n"
+ "fmla z30.h, p3/M, z1.h, z10.h\n"
+ "fmla z29.h, p3/M, z0.h, z10.h\n"
+ "ld1h { z10.h }, p2/Z, [x23, x13, LSL #1]\n"
+ "fmla z27.h, p3/M, z7.h, z11.h\n"
+ "ldr x23, [x14, #0xf8]\n"
+ "fmla z26.h, p3/M, z6.h, z11.h\n"
+ "fmla z23.h, p3/M, z4.h, z11.h\n"
+ "fmla z22.h, p3/M, z3.h, z11.h\n"
+ "fmla z19.h, p3/M, z1.h, z11.h\n"
+ "fmla z18.h, p3/M, z0.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x10, x13, LSL #1]\n"
+ "fmla z30.h, p3/M, z2.h, z12.h\n"
+ "ldr x10, [x14, #0x100]\n"
+ "fmla z29.h, p3/M, z1.h, z12.h\n"
+ "fmla z28.h, p3/M, z0.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x9, x13, LSL #1]\n"
+ "fmla z31.h, p3/M, z6.h, z10.h\n"
+ "ldr x9, [x14, #0x108]\n"
+ "fmla z27.h, p3/M, z3.h, z10.h\n"
+ "fmla z23.h, p3/M, z0.h, z10.h\n"
+ "ld1h { z10.h }, p2/Z, [x28, x13, LSL #1]\n"
"fmla z25.h, p3/M, z8.h, z11.h\n"
- "fmla z26.h, p3/M, z7.h, z11.h\n"
- "fmla z27.h, p3/M, z6.h, z11.h\n"
- "fmla z29.h, p3/M, z5.h, z11.h\n"
- "fmla z30.h, p3/M, z4.h, z11.h\n"
- "fmla z31.h, p3/M, z3.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x11, x14, LSL #1]\n"
- "fmla z23.h, p3/M, z8.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x24, x14, LSL #1]\n"
- "fmla z16.h, p3/M, z4.h, z10.h\n"
- "fmax z16.h, p3/M, z16.h, z14.h\n"
- "fmla z17.h, p3/M, z3.h, z10.h\n"
+ "ldr x28, [x14, #0x110]\n"
+ "fmla z24.h, p3/M, z7.h, z11.h\n"
+ "fmla z21.h, p3/M, z5.h, z11.h\n"
+ "fmla z20.h, p3/M, z4.h, z11.h\n"
+ "fmla z17.h, p3/M, z2.h, z11.h\n"
+ "fmla z16.h, p3/M, z1.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x27, x13, LSL #1]\n"
+ "fmla z28.h, p3/M, z8.h, z12.h\n"
+ "ldr x27, [x14, #0x118]\n"
+ "fmla z24.h, p3/M, z5.h, z12.h\n"
+ "fmla z20.h, p3/M, z2.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x26, x13, LSL #1]\n"
+ "fmla z27.h, p3/M, z6.h, z10.h\n"
+ "fmla z23.h, p3/M, z3.h, z10.h\n"
+ "fmla z19.h, p3/M, z0.h, z10.h\n"
+ "ld1h { z10.h }, p2/Z, [x25, x13, LSL #1]\n"
+ "fmla z22.h, p3/M, z7.h, z11.h\n"
+ "fmla z21.h, p3/M, z6.h, z11.h\n"
+ "fmla z23.h, p3/M, z8.h, z11.h\n"
+ "fmla z19.h, p3/M, z5.h, z11.h\n"
+ "fmla z18.h, p3/M, z4.h, z11.h\n"
+ "fmla z17.h, p3/M, z3.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x24, x13, LSL #1]\n"
+ "fmla z24.h, p3/M, z8.h, z12.h\n"
+ "fmla z20.h, p3/M, z5.h, z12.h\n"
+ "fmla z16.h, p3/M, z2.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x23, x13, LSL #1]\n"
+ "fmla z19.h, p3/M, z8.h, z10.h\n"
+ "fmla z18.h, p3/M, z7.h, z10.h\n"
+ "fmla z17.h, p3/M, z6.h, z10.h\n"
+ "ld1h { z10.h }, p2/Z, [x10, x13, LSL #1]\n"
+ "fmla z22.h, p3/M, z8.h, z11.h\n"
+ "fmla z21.h, p3/M, z7.h, z11.h\n"
+ "fmla z20.h, p3/M, z6.h, z11.h\n"
"fmla z18.h, p3/M, z5.h, z11.h\n"
- "fmax z17.h, p3/M, z17.h, z14.h\n"
- "fmax z18.h, p3/M, z18.h, z14.h\n"
- "fmla z19.h, p3/M, z4.h, z11.h\n"
- "fmla z29.h, p3/M, z8.h, z12.h\n"
- "fmax z19.h, p3/M, z19.h, z14.h\n"
- "fmin z16.h, p3/M, z16.h, z13.h\n"
- "fmla z30.h, p3/M, z7.h, z12.h\n"
- "fmla z31.h, p3/M, z6.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x10, x14, LSL #1]\n"
- "fmin z17.h, p3/M, z17.h, z13.h\n"
- "fmla z20.h, p3/M, z1.h, z10.h\n"
- "fmla z21.h, p3/M, z0.h, z10.h\n"
- "ld1h { z10.h }, p2/Z, [x9, x14, LSL #1]\n"
- "fmin z18.h, p3/M, z18.h, z13.h\n"
- "fmla z22.h, p3/M, z2.h, z11.h\n"
- "fmla z23.h, p3/M, z1.h, z11.h\n"
- "fmin z19.h, p3/M, z19.h, z13.h\n"
- "fmax z20.h, p3/M, z20.h, z14.h\n"
- "fmla z24.h, p3/M, z7.h, z12.h\n"
- "fmla z25.h, p3/M, z6.h, z12.h\n"
- "fmax z21.h, p3/M, z21.h, z14.h\n"
- "fmax z22.h, p3/M, z22.h, z14.h\n"
- "fmla z26.h, p3/M, z8.h, z10.h\n"
- "fmla z27.h, p3/M, z7.h, z10.h\n"
- "fmax z23.h, p3/M, z23.h, z14.h\n"
- "st1h { z16.h }, p1, [x23, x13, LSL #1]\n"
- "st1h { z17.h }, p1, [x22, x13, LSL #1]\n"
- "ldr x23, [x28, #0x20]\n"
- "ldr x22, [x28, #0x28]\n"
- "fmla z28.h, p3/M, z4.h, z12.h\n"
- "st1h { z18.h }, p1, [x21, x13, LSL #1]\n"
- "ldr x21, [x28, #0x30]\n"
- "fmla z29.h, p3/M, z3.h, z12.h\n"
- "fmla z30.h, p3/M, z5.h, z10.h\n"
- "st1h { z19.h }, p1, [x20, x13, LSL #1]\n"
- "ldr x20, [x28, #0x38]\n"
+ "fmla z17.h, p3/M, z4.h, z11.h\n"
+ "fmla z16.h, p3/M, z3.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x9, x13, LSL #1]\n"
"fmla z31.h, p3/M, z4.h, z10.h\n"
- "fmin z20.h, p3/M, z20.h, z13.h\n"
- "fmin z21.h, p3/M, z21.h, z13.h\n"
- "fmin z22.h, p3/M, z22.h, z13.h\n"
- "st1h { z20.h }, p1, [x23, x13, LSL #1]\n"
- "ldr x23, [x28, #0x40]\n"
- "fmin z23.h, p3/M, z23.h, z13.h\n"
- "fmax z24.h, p3/M, z24.h, z14.h\n"
- "st1h { z21.h }, p1, [x22, x13, LSL #1]\n"
- "ldr x22, [x28, #0x48]\n"
- "fmax z25.h, p3/M, z25.h, z14.h\n"
- "fmax z26.h, p3/M, z26.h, z14.h\n"
- "st1h { z22.h }, p1, [x21, x13, LSL #1]\n"
- "ldr x21, [x28, #0x50]\n"
- "fmax z27.h, p3/M, z27.h, z14.h\n"
- "st1h { z23.h }, p1, [x20, x13, LSL #1]\n"
- "ldr x20, [x28, #0x58]\n"
- "fmin z24.h, p3/M, z24.h, z13.h\n"
- "fmin z25.h, p3/M, z25.h, z13.h\n"
- "fmin z26.h, p3/M, z26.h, z13.h\n"
- "st1h { z24.h }, p1, [x23, x13, LSL #1]\n"
- "ldr x23, [x28, #0x60]\n"
- "fmin z27.h, p3/M, z27.h, z13.h\n"
- "fmax z28.h, p3/M, z28.h, z14.h\n"
- "st1h { z25.h }, p1, [x22, x13, LSL #1]\n"
- "ldr x22, [x28, #0x68]\n"
- "fmax z29.h, p3/M, z29.h, z14.h\n"
- "fmax z30.h, p3/M, z30.h, z14.h\n"
- "st1h { z26.h }, p1, [x21, x13, LSL #1]\n"
- "ldr x21, [x28, #0x70]\n"
- "fmax z31.h, p3/M, z31.h, z14.h\n"
- "st1h { z27.h }, p1, [x20, x13, LSL #1]\n"
- "ldr x20, [x28, #0x78]\n"
- "fmin z28.h, p3/M, z28.h, z13.h\n"
- "fmin z29.h, p3/M, z29.h, z13.h\n"
- "fmin z30.h, p3/M, z30.h, z13.h\n"
- "st1h { z28.h }, p1, [x23, x13, LSL #1]\n"
- "fmin z31.h, p3/M, z31.h, z13.h\n"
- "st1h { z29.h }, p1, [x22, x13, LSL #1]\n"
- "st1h { z30.h }, p1, [x21, x13, LSL #1]\n"
- "st1h { z31.h }, p1, [x20, x13, LSL #1]\n"
+ "fmla z18.h, p3/M, z8.h, z12.h\n"
+ "fmla z17.h, p3/M, z7.h, z12.h\n"
+ "fmla z16.h, p3/M, z6.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x28, x13, LSL #1]\n"
+ "fmla z30.h, p3/M, z3.h, z10.h\n"
+ "fmla z27.h, p3/M, z1.h, z10.h\n"
+ "fmla z26.h, p3/M, z0.h, z10.h\n"
+ "ld1h { z10.h }, p2/Z, [x27, x13, LSL #1]\n"
+ "fmla z29.h, p3/M, z5.h, z11.h\n"
+ "fmla z28.h, p3/M, z4.h, z11.h\n"
+ "fmla z25.h, p3/M, z2.h, z11.h\n"
+ "fmla z24.h, p3/M, z1.h, z11.h\n"
+ "fmla z23.h, p3/M, z7.h, z12.h\n"
+ "fmla z22.h, p3/M, z6.h, z12.h\n"
+ "fmla z19.h, p3/M, z4.h, z12.h\n"
+ "fmla z18.h, p3/M, z3.h, z12.h\n"
+ "fmla z21.h, p3/M, z8.h, z10.h\n"
+ "fmla z20.h, p3/M, z7.h, z10.h\n"
+ "fmla z17.h, p3/M, z5.h, z10.h\n"
+ "fmla z16.h, p3/M, z4.h, z10.h\n"
+ "fmax z31.h, p3/M, z31.h, z15.h\n"
+ "fmax z30.h, p3/M, z30.h, z15.h\n"
+ "fmax z29.h, p3/M, z29.h, z15.h\n"
+ "fmax z28.h, p3/M, z28.h, z15.h\n"
+ "fmin z31.h, p3/M, z31.h, z14.h\n"
+ "st1h { z31.h }, p0, [x22, x11, LSL #1]\n"
+ "fmin z30.h, p3/M, z30.h, z14.h\n"
+ "fmin z29.h, p3/M, z29.h, z14.h\n"
+ "ldr x22, [x16, #0x20]\n"
+ "fmin z28.h, p3/M, z28.h, z14.h\n"
+ "st1h { z30.h }, p0, [x21, x11, LSL #1]\n"
+ "fmax z27.h, p3/M, z27.h, z15.h\n"
+ "fmax z26.h, p3/M, z26.h, z15.h\n"
+ "st1h { z29.h }, p0, [x20, x11, LSL #1]\n"
+ "fmax z25.h, p3/M, z25.h, z15.h\n"
+ "st1h { z28.h }, p0, [x19, x11, LSL #1]\n"
+ "fmax z24.h, p3/M, z24.h, z15.h\n"
+ "ldr x21, [x16, #0x28]\n"
+ "fmax z23.h, p3/M, z23.h, z15.h\n"
+ "ldr x20, [x16, #0x30]\n"
+ "fmin z27.h, p3/M, z27.h, z14.h\n"
+ "ldr x19, [x16, #0x38]\n"
+ "fmin z26.h, p3/M, z26.h, z14.h\n"
+ "st1h { z27.h }, p0, [x22, x11, LSL #1]\n"
+ "fmin z25.h, p3/M, z25.h, z14.h\n"
+ "fmin z24.h, p3/M, z24.h, z14.h\n"
+ "st1h { z26.h }, p0, [x21, x11, LSL #1]\n"
+ "fmin z23.h, p3/M, z23.h, z14.h\n"
+ "ldr x22, [x16, #0x40]\n"
+ "fmax z22.h, p3/M, z22.h, z15.h\n"
+ "ldr x21, [x16, #0x48]\n"
+ "fmax z21.h, p3/M, z21.h, z15.h\n"
+ "st1h { z25.h }, p0, [x20, x11, LSL #1]\n"
+ "fmax z20.h, p3/M, z20.h, z15.h\n"
+ "st1h { z24.h }, p0, [x19, x11, LSL #1]\n"
+ "fmax z19.h, p3/M, z19.h, z15.h\n"
+ "st1h { z23.h }, p0, [x22, x11, LSL #1]\n"
+ "fmin z22.h, p3/M, z22.h, z14.h\n"
+ "ldr x20, [x16, #0x50]\n"
+ "fmin z21.h, p3/M, z21.h, z14.h\n"
+ "ldr x19, [x16, #0x58]\n"
+ "fmin z20.h, p3/M, z20.h, z14.h\n"
+ "ldr x22, [x16, #0x60]\n"
+ "fmin z19.h, p3/M, z19.h, z14.h\n"
+ "st1h { z22.h }, p0, [x21, x11, LSL #1]\n"
+ "fmax z18.h, p3/M, z18.h, z15.h\n"
+ "st1h { z21.h }, p0, [x20, x11, LSL #1]\n"
+ "fmax z17.h, p3/M, z17.h, z15.h\n"
+ "st1h { z20.h }, p0, [x19, x11, LSL #1]\n"
+ "fmax z16.h, p3/M, z16.h, z15.h\n"
+ "st1h { z19.h }, p0, [x22, x11, LSL #1]\n"
+ "ldr x21, [x16, #0x68]\n"
+ "fmin z18.h, p3/M, z18.h, z14.h\n"
+ "ldr x20, [x16, #0x70]\n"
+ "fmin z17.h, p3/M, z17.h, z14.h\n"
+ "ldr x19, [x16, #0x78]\n"
+ "fmin z16.h, p3/M, z16.h, z14.h\n"
+ "st1h { z18.h }, p0, [x21, x11, LSL #1]\n"
+ "st1h { z17.h }, p0, [x20, x11, LSL #1]\n"
+ "st1h { z16.h }, p0, [x19, x11, LSL #1]\n"
:
: [n_channels] "r" ((unsigned long) n_channels), [offsetof_Args_inptrs] "I" (offsetof(Args, inptrs)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_args_params] "I" (offsetof(Args, params)), [params_struct] "r" (&params_struct)
- : "cc", "memory", "p0", "p1", "p2", "p3", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_direct.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_direct.cpp
index 6a9b354c02..d20e9913ae 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_direct.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_direct.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -88,246 +88,246 @@ void sve_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst_direct_impl(
__asm__ __volatile__(
"ptrue p3.b\n"
- "mov x11, #0x0\n"
- "mov x16, #0x0\n"
+ "mov x7, #0x0\n"
+ "mov x8, #0x0\n"
"1:" // Tile loop
- "str x11, [%x[params_struct], %[offsetof_args_tile_i]]\n"
- "mov x25, #0x4\n"
- "mov x24, #0x2\n"
- "str x16, [%x[params_struct], %[offsetof_args_tile_j]]\n"
- "ldr x23, [%x[params_struct], %[offsetof_args_ld_input_row]]\n"
- "ldr x15, [%x[params_struct], %[offsetof_args_ld_input_col]]\n"
- "mul x22, x11, x23\n" // offset = tile_i * ld_input_row
- "ldr x21, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
- "madd x22, x16, x15, x22\n" // offset += tile_j * ld_input_col
- "ldr x14, [%x[params_struct], %[offsetof_args_ld_output_col]]\n"
- "cnth x13\n"
- "mul x20, x11, x21\n" // offset = tile_i * ld_output_row
- "ldr x12, [%x[params_struct], %[offsetof_args_inptr]]\n"
- "ldr x11, [%x[params_struct], %[offsetof_args_params]]\n"
- "add x10, x15, x15\n"
- "mul x22, x22, x25\n" // offset *= kernel_stride * output_size
- "add x12, x12, x22, LSL #1\n" // inptr[0] += offset * sizeof(__fp16)
- "ldr x9, [%x[params_struct], %[offsetof_args_outptr]]\n"
- "add x28, x12, x23, LSL #1\n"
- "madd x20, x16, x14, x20\n" // offset += tile_j * ld_output_col
+ "str x7, [%x[params_struct], %[offsetof_args_tile_i]]\n"
+ "mov x23, #0x4\n"
+ "str x8, [%x[params_struct], %[offsetof_args_tile_j]]\n"
+ "mov x17, #0x2\n"
+ "ldr x16, [%x[params_struct], %[offsetof_args_params]]\n"
+ "mov x15, #0x0\n"
+ "ldr x22, [%x[params_struct], %[offsetof_args_ld_input_row]]\n"
+ "cnth x14\n"
+ "ldr x13, [%x[params_struct], %[offsetof_args_ld_input_col]]\n"
+ "sub x12, XZR, x14\n"
+ "ldr x21, [%x[params_struct], %[offsetof_args_inptr]]\n"
+ "mul x19, x7, x22\n" // offset = tile_i * ld_input_row
+ "ldr x20, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
+ "madd x19, x8, x13, x19\n" // offset += tile_j * ld_input_col
+ "ldr x11, [%x[params_struct], %[offsetof_args_ld_output_col]]\n"
+ "mul x19, x19, x23\n" // offset *= kernel_stride * output_size
+ "ldr x10, [%x[params_struct], %[offsetof_args_outptr]]\n"
+ "add x21, x21, x19, LSL #1\n" // inptr[0] += offset * sizeof(__fp16)
+ "ld1rh { z19.h }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
+ "add x9, x21, x22, LSL #1\n"
+ "ld1rh { z18.h }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
+ "add x28, x9, x22, LSL #1\n"
+ "ld1h { z17.h }, p3/Z, [x16]\n"
+ "add x27, x28, x22, LSL #1\n"
+ "ld1h { z0.h }, p3/Z, [x16, #1, MUL VL]\n"
+ "add x26, x27, x22, LSL #1\n"
+ "ld1h { z1.h }, p3/Z, [x16, #2, MUL VL]\n"
+ "add x25, x13, x13\n"
+ "ld1h { z2.h }, p3/Z, [x16, #3, MUL VL]\n"
+ "add x24, x25, x13\n"
+ "ld1h { z3.h }, p3/Z, [x16, #4, MUL VL]\n"
+ "add x23, x24, x13\n"
+ "ld1h { z4.h }, p3/Z, [x16, #5, MUL VL]\n"
+ "mul x19, x7, x20\n" // offset = tile_i * ld_output_row
+ "ld1h { z5.h }, p3/Z, [x16, #6, MUL VL]\n"
+ "madd x19, x8, x11, x19\n" // offset += tile_j * ld_output_col
+ "ld1h { z6.h }, p3/Z, [x16, #7, MUL VL]\n"
+ "mul x19, x19, x17\n" // offset *= output_tile_size
"whilelt p2.h, XZR, %x[n_channels]\n"
- "ld1h { z19.h }, p3/Z, [x11]\n"
- "ld1h { z0.h }, p3/Z, [x11, #1, MUL VL]\n"
- "mul x20, x20, x24\n" // offset *= output_tile_size
- "ld1h { z1.h }, p3/Z, [x11, #2, MUL VL]\n"
- "ld1h { z2.h }, p3/Z, [x11, #3, MUL VL]\n"
- "add x27, x28, x23, LSL #1\n"
- "ld1h { z3.h }, p3/Z, [x11, #4, MUL VL]\n"
- "ld1h { z4.h }, p3/Z, [x11, #5, MUL VL]\n"
- "add x26, x10, x15\n"
- "add x25, x27, x23, LSL #1\n"
- "ld1h { z5.h }, p3/Z, [x11, #6, MUL VL]\n"
- "ld1h { z6.h }, p3/Z, [x11, #7, MUL VL]\n"
- "addvl x11, x11, #16\n"
- "add x24, x26, x15\n"
- "add x9, x9, x20, LSL #1\n" // outptrs[0] += offset * sizeof(__fp16)
- "cmp x13, %x[n_channels]\n"
- "ld1rh { z18.h }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
- "ld1rh { z17.h }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
- "add x23, x25, x23, LSL #1\n"
- "add x22, x9, x21, LSL #1\n"
- "ld1h { z7.h }, p3/Z, [x11, #-8, MUL VL]\n"
- "ld1h { z8.h }, p3/Z, [x11, #-7, MUL VL]\n"
- "mov x21, #0x0\n"
- "sub x20, XZR, x13\n"
- "ld1h { z9.h }, p2/Z, [x27, x10, LSL #1]\n"
- "ld1h { z10.h }, p2/Z, [x12]\n"
- "ld1h { z11.h }, p2/Z, [x12, x15, LSL #1]\n"
- "ld1h { z12.h }, p2/Z, [x12, x26, LSL #1]\n"
- "addvl x11, x11, #-6\n"
- "ld1h { z13.h }, p2/Z, [x12, x24, LSL #1]\n"
- "ld1h { z14.h }, p2/Z, [x28]\n"
- "ld1h { z15.h }, p2/Z, [x28, x15, LSL #1]\n"
- "ld1h { z16.h }, p2/Z, [x12, x10, LSL #1]\n"
+ "ld1h { z9.h }, p2/Z, [x28, x25, LSL #1]\n"
+ "ld1h { z10.h }, p2/Z, [x21]\n"
+ "add x10, x10, x19, LSL #1\n" // outptrs[0] += offset * sizeof(__fp16)
+ "ld1h { z11.h }, p2/Z, [x21, x13, LSL #1]\n"
+ "add x22, x10, x20, LSL #1\n"
+ "ld1h { z12.h }, p2/Z, [x21, x24, LSL #1]\n"
+ "addvl x16, x16, #16\n"
+ "ld1h { z13.h }, p2/Z, [x21, x23, LSL #1]\n"
+ "cmp x14, %x[n_channels]\n"
+ "ld1h { z7.h }, p3/Z, [x16, #-8, MUL VL]\n"
+ "ld1h { z8.h }, p3/Z, [x16, #-7, MUL VL]\n"
+ "addvl x16, x16, #-6\n"
+ "ld1h { z14.h }, p2/Z, [x9]\n"
+ "ld1h { z15.h }, p2/Z, [x9, x13, LSL #1]\n"
+ "ld1h { z16.h }, p2/Z, [x21, x25, LSL #1]\n"
"bge 3f\n"
"2:" // Tile loop: Channel loop
- "movprfx z28, z19\n fmla z28.h, p3/M, z8.h, z9.h\n"
- "movprfx z29, z19\n fmla z29.h, p3/M, z6.h, z9.h\n"
- "whilelt p1.h, x13, %x[n_channels]\n"
- "inch x21\n"
- "fmla z28.h, p3/M, z0.h, z10.h\n"
- "fmla z29.h, p3/M, z1.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x28, x24, LSL #1]\n"
- "inch x13\n"
- "fmla z28.h, p3/M, z1.h, z11.h\n"
- "fmla z29.h, p3/M, z2.h, z13.h\n"
- "ld1h { z11.h }, p2/Z, [x28, x26, LSL #1]\n"
- "ld1h { z13.h }, p2/Z, [x28, x10, LSL #1]\n"
- "fmla z28.h, p3/M, z3.h, z14.h\n"
- "fmla z29.h, p3/M, z0.h, z16.h\n"
- "ld1h { z14.h }, p2/Z, [x25]\n"
+ "movprfx z31, z17\n fmla z31.h, p3/M, z8.h, z9.h\n"
+ "whilelt p1.h, x14, %x[n_channels]\n"
+ "movprfx z30, z17\n fmla z30.h, p3/M, z6.h, z9.h\n"
+ "inch x12\n"
+ "movprfx z29, z17\n fmla z29.h, p3/M, z2.h, z9.h\n"
"mov p0.b, p2.b\n"
- "fmla z28.h, p3/M, z4.h, z15.h\n"
+ "movprfx z28, z17\n fmla z28.h, p3/M, z0.h, z9.h\n"
+ "ld1h { z17.h }, p3/Z, [x16]\n"
+ "inch x15\n"
+ "fmla z31.h, p3/M, z0.h, z10.h\n"
+ "addvl x21, x21, #1\n"
+ "ld1h { z10.h }, p1/Z, [x21]\n"
+ "fmla z30.h, p3/M, z1.h, z12.h\n"
+ "inch x14\n"
+ "fmla z31.h, p3/M, z1.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x9, x24, LSL #1]\n"
+ "ld1h { z12.h }, p2/Z, [x9, x23, LSL #1]\n"
+ "fmla z30.h, p3/M, z2.h, z13.h\n"
+ "ld1h { z13.h }, p2/Z, [x9, x25, LSL #1]\n"
+ "fmla z31.h, p3/M, z3.h, z14.h\n"
+ "ld1h { z14.h }, p2/Z, [x27]\n"
+ "addvl x9, x9, #1\n"
+ "fmla z30.h, p3/M, z0.h, z16.h\n"
+ "fmla z29.h, p3/M, z3.h, z14.h\n"
+ "ld1h { z14.h }, p2/Z, [x27, x23, LSL #1]\n"
+ "fmla z31.h, p3/M, z4.h, z15.h\n"
+ "ld1h { z15.h }, p2/Z, [x28]\n"
+ "fmla z30.h, p3/M, z4.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x27, x13, LSL #1]\n"
+ "fmla z29.h, p3/M, z0.h, z15.h\n"
+ "ld1h { z0.h }, p3/Z, [x16, #1, MUL VL]\n"
+ "fmla z31.h, p3/M, z2.h, z16.h\n"
+ "ld1h { z16.h }, p2/Z, [x28, x13, LSL #1]\n"
+ "fmla z30.h, p3/M, z5.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x28, x24, LSL #1]\n"
"fmla z29.h, p3/M, z4.h, z11.h\n"
- "ld1h { z15.h }, p2/Z, [x27]\n"
- "ld1h { z11.h }, p2/Z, [x25, x15, LSL #1]\n"
- "fmla z28.h, p3/M, z2.h, z16.h\n"
- "fmla z29.h, p3/M, z5.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x27, x26, LSL #1]\n"
- "ld1h { z16.h }, p2/Z, [x27, x15, LSL #1]\n"
- "movprfx z30, z19\n fmla z30.h, p3/M, z2.h, z9.h\n"
- "movprfx z31, z19\n fmla z31.h, p3/M, z0.h, z9.h\n"
- "addvl x12, x12, #1\n"
+ "ld1h { z11.h }, p2/Z, [x28, x23, LSL #1]\n"
"addvl x28, x28, #1\n"
- "fmla z28.h, p3/M, z5.h, z13.h\n"
- "fmla z29.h, p3/M, z3.h, z13.h\n"
- "ld1h { z13.h }, p2/Z, [x25, x26, LSL #1]\n"
- "ld1h { z19.h }, p3/Z, [x11]\n"
- "fmla z30.h, p3/M, z3.h, z14.h\n"
- "fmla z31.h, p3/M, z4.h, z13.h\n"
- "ld1h { z14.h }, p2/Z, [x25, x24, LSL #1]\n"
- "ld1h { z13.h }, p2/Z, [x23, x15, LSL #1]\n"
- "fmla z30.h, p3/M, z0.h, z15.h\n"
- "fmla z31.h, p3/M, z1.h, z12.h\n"
- "ld1h { z0.h }, p3/Z, [x11, #1, MUL VL]\n"
- "inch x20\n"
- "fmla z30.h, p3/M, z4.h, z11.h\n"
- "fmla z31.h, p3/M, z5.h, z14.h\n"
- "ld1h { z11.h }, p2/Z, [x27, x24, LSL #1]\n"
- "ld1h { z14.h }, p2/Z, [x23, x26, LSL #1]\n"
- "fmla z28.h, p3/M, z6.h, z15.h\n"
- "fmla z30.h, p3/M, z1.h, z16.h\n"
- "ld1h { z15.h }, p2/Z, [x23]\n"
- "addvl x27, x27, #1\n"
- "fmla z31.h, p3/M, z2.h, z11.h\n"
- "fmla z28.h, p3/M, z7.h, z16.h\n"
- "ld1h { z16.h }, p2/Z, [x25, x10, LSL #1]\n"
- "fmax z28.h, p3/M, z28.h, z18.h\n"
- "fmla z30.h, p3/M, z6.h, z15.h\n"
- "fmla z31.h, p3/M, z3.h, z16.h\n"
- "ld1h { z15.h }, p2/Z, [x23, x10, LSL #1]\n"
- "ld1h { z1.h }, p3/Z, [x11, #2, MUL VL]\n"
- "fmla z30.h, p3/M, z7.h, z13.h\n"
- "fmla z31.h, p3/M, z7.h, z14.h\n"
- "ld1h { z2.h }, p3/Z, [x11, #3, MUL VL]\n"
- "ld1h { z3.h }, p3/Z, [x11, #4, MUL VL]\n"
- "fmla z29.h, p3/M, z7.h, z12.h\n"
- "fmla z30.h, p3/M, z5.h, z16.h\n"
- "ld1h { z4.h }, p3/Z, [x11, #5, MUL VL]\n"
- "ld1h { z5.h }, p3/Z, [x11, #6, MUL VL]\n"
+ "fmla z31.h, p3/M, z5.h, z13.h\n"
+ "ld1h { z9.h }, p1/Z, [x28, x25, LSL #1]\n"
+ "fmla z30.h, p3/M, z3.h, z13.h\n"
+ "ld1h { z13.h }, p2/Z, [x27, x24, LSL #1]\n"
+ "fmla z29.h, p3/M, z1.h, z16.h\n"
"fmla z31.h, p3/M, z6.h, z15.h\n"
- "fmla z29.h, p3/M, z8.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x23, x24, LSL #1]\n"
- "fmax z29.h, p3/M, z29.h, z18.h\n"
- "fmla z30.h, p3/M, z8.h, z15.h\n"
- "fmla z31.h, p3/M, z8.h, z11.h\n"
- "fmax z30.h, p3/M, z30.h, z18.h\n"
- "fmax z31.h, p3/M, z31.h, z18.h\n"
- "ld1h { z6.h }, p3/Z, [x11, #7, MUL VL]\n"
- "addvl x11, x11, #16\n"
- "whilelt p2.h, x21, %x[n_channels]\n"
- "ld1h { z9.h }, p1/Z, [x27, x10, LSL #1]\n"
- "cmp x13, %x[n_channels]\n"
- "fmin z28.h, p3/M, z28.h, z17.h\n"
- "ld1h { z10.h }, p1/Z, [x12]\n"
- "ld1h { z11.h }, p1/Z, [x12, x15, LSL #1]\n"
- "fmin z29.h, p3/M, z29.h, z17.h\n"
- "fmin z30.h, p3/M, z30.h, z17.h\n"
- "ld1h { z12.h }, p1/Z, [x12, x26, LSL #1]\n"
- "ld1h { z13.h }, p1/Z, [x12, x24, LSL #1]\n"
- "fmin z31.h, p3/M, z31.h, z17.h\n"
- "addvl x25, x25, #1\n"
- "ld1h { z14.h }, p1/Z, [x28]\n"
- "ld1h { z15.h }, p1/Z, [x28, x15, LSL #1]\n"
- "addvl x23, x23, #1\n"
- "ld1h { z16.h }, p1/Z, [x12, x10, LSL #1]\n"
- "st1h { z28.h }, p0, [x9]\n"
- "ld1h { z7.h }, p3/Z, [x11, #-8, MUL VL]\n"
- "st1h { z29.h }, p0, [x9, x14, LSL #1]\n"
- "addvl x9, x9, #1\n"
- "ld1h { z8.h }, p3/Z, [x11, #-7, MUL VL]\n"
- "addvl x11, x11, #-6\n"
- "st1h { z30.h }, p0, [x22]\n"
- "st1h { z31.h }, p0, [x22, x14, LSL #1]\n"
+ "ld1h { z15.h }, p2/Z, [x26]\n"
+ "fmla z28.h, p3/M, z4.h, z13.h\n"
+ "ld1h { z13.h }, p2/Z, [x26, x13, LSL #1]\n"
+ "fmla z30.h, p3/M, z7.h, z12.h\n"
+ "ld1h { z4.h }, p3/Z, [x16, #5, MUL VL]\n"
+ "fmla z29.h, p3/M, z6.h, z15.h\n"
+ "ld1h { z15.h }, p2/Z, [x26, x25, LSL #1]\n"
+ "fmla z31.h, p3/M, z7.h, z16.h\n"
+ "ld1h { z16.h }, p2/Z, [x27, x25, LSL #1]\n"
+ "addvl x27, x27, #1\n"
+ "fmla z28.h, p3/M, z1.h, z12.h\n"
+ "ld1h { z12.h }, p1/Z, [x21, x24, LSL #1]\n"
+ "fmla z30.h, p3/M, z8.h, z11.h\n"
+ "ld1h { z1.h }, p3/Z, [x16, #2, MUL VL]\n"
+ "fmla z29.h, p3/M, z7.h, z13.h\n"
+ "ld1h { z13.h }, p1/Z, [x21, x23, LSL #1]\n"
+ "fmax z31.h, p3/M, z31.h, z19.h\n"
+ "fmla z28.h, p3/M, z5.h, z14.h\n"
+ "ld1h { z14.h }, p2/Z, [x26, x24, LSL #1]\n"
+ "fmax z30.h, p3/M, z30.h, z19.h\n"
+ "fmla z29.h, p3/M, z5.h, z16.h\n"
+ "ld1h { z5.h }, p3/Z, [x16, #6, MUL VL]\n"
+ "fmin z31.h, p3/M, z31.h, z18.h\n"
+ "st1h { z31.h }, p0, [x10]\n"
+ "fmla z28.h, p3/M, z2.h, z11.h\n"
+ "fmla z29.h, p3/M, z8.h, z15.h\n"
+ "ld1h { z11.h }, p2/Z, [x26, x23, LSL #1]\n"
+ "whilelt p2.h, x15, %x[n_channels]\n"
+ "fmin z30.h, p3/M, z30.h, z18.h\n"
+ "ld1h { z2.h }, p3/Z, [x16, #3, MUL VL]\n"
+ "addvl x26, x26, #1\n"
+ "fmla z28.h, p3/M, z3.h, z16.h\n"
+ "ld1h { z16.h }, p1/Z, [x21, x25, LSL #1]\n"
+ "cmp x14, %x[n_channels]\n"
+ "fmax z29.h, p3/M, z29.h, z19.h\n"
+ "ld1h { z3.h }, p3/Z, [x16, #4, MUL VL]\n"
+ "st1h { z30.h }, p0, [x10, x11, LSL #1]\n"
+ "fmla z28.h, p3/M, z7.h, z14.h\n"
+ "ld1h { z14.h }, p1/Z, [x9]\n"
+ "addvl x10, x10, #1\n"
+ "fmin z29.h, p3/M, z29.h, z18.h\n"
+ "st1h { z29.h }, p0, [x22]\n"
+ "fmla z28.h, p3/M, z6.h, z15.h\n"
+ "ld1h { z15.h }, p1/Z, [x9, x13, LSL #1]\n"
+ "fmla z28.h, p3/M, z8.h, z11.h\n"
+ "ld1h { z11.h }, p1/Z, [x21, x13, LSL #1]\n"
+ "ld1h { z6.h }, p3/Z, [x16, #7, MUL VL]\n"
+ "fmax z28.h, p3/M, z28.h, z19.h\n"
+ "addvl x16, x16, #16\n"
+ "ld1h { z7.h }, p3/Z, [x16, #-8, MUL VL]\n"
+ "fmin z28.h, p3/M, z28.h, z18.h\n"
+ "ld1h { z8.h }, p3/Z, [x16, #-7, MUL VL]\n"
+ "addvl x16, x16, #-6\n"
+ "st1h { z28.h }, p0, [x22, x11, LSL #1]\n"
"addvl x22, x22, #1\n"
"blt 2b\n"
"3:" // Tile loop: Channel tail
- "movprfx z28, z19\n fmla z28.h, p3/M, z8.h, z9.h\n"
- "movprfx z29, z19\n fmla z29.h, p3/M, z6.h, z9.h\n"
- "ldr x16, [%x[params_struct], %[offsetof_args_tile_j]]\n"
- "ldr x11, [%x[params_struct], %[offsetof_args_tile_i]]\n"
- "fmla z28.h, p3/M, z0.h, z10.h\n"
- "fmla z29.h, p3/M, z1.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x28, x24, LSL #1]\n"
- "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
- "fmla z28.h, p3/M, z1.h, z11.h\n"
- "fmla z29.h, p3/M, z2.h, z13.h\n"
- "ld1h { z11.h }, p2/Z, [x28, x26, LSL #1]\n"
- "ld1h { z13.h }, p2/Z, [x28, x10, LSL #1]\n"
- "fmla z28.h, p3/M, z3.h, z14.h\n"
- "fmla z29.h, p3/M, z0.h, z16.h\n"
- "ld1h { z14.h }, p2/Z, [x25]\n"
- "add x16, x16, #0x1\n"
- "fmla z28.h, p3/M, z4.h, z15.h\n"
- "fmla z29.h, p3/M, z4.h, z11.h\n"
- "ld1h { z15.h }, p2/Z, [x27]\n"
- "ld1h { z11.h }, p2/Z, [x25, x15, LSL #1]\n"
- "fmla z28.h, p3/M, z2.h, z16.h\n"
- "fmla z29.h, p3/M, z5.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x27, x26, LSL #1]\n"
- "ld1h { z16.h }, p2/Z, [x27, x15, LSL #1]\n"
- "movprfx z30, z19\n fmla z30.h, p3/M, z2.h, z9.h\n"
- "movprfx z31, z19\n fmla z31.h, p3/M, z0.h, z9.h\n"
- "cmp x16, x20\n"
- "add x21, x11, #0x1\n"
- "fmla z28.h, p3/M, z5.h, z13.h\n"
- "fmla z29.h, p3/M, z3.h, z13.h\n"
- "ld1h { z13.h }, p2/Z, [x25, x26, LSL #1]\n"
- "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
- "fmla z30.h, p3/M, z3.h, z14.h\n"
- "fmla z31.h, p3/M, z4.h, z13.h\n"
- "ld1h { z14.h }, p2/Z, [x25, x24, LSL #1]\n"
- "ld1h { z13.h }, p2/Z, [x23, x15, LSL #1]\n"
- "fmla z30.h, p3/M, z0.h, z15.h\n"
- "fmla z31.h, p3/M, z1.h, z12.h\n"
- "csel x11, x11, x21, LT\n"
+ "movprfx z31, z17\n fmla z31.h, p3/M, z8.h, z9.h\n"
+ "ldr x7, [%x[params_struct], %[offsetof_args_tile_i]]\n"
"mov p0.b, p2.b\n"
+ "movprfx z30, z17\n fmla z30.h, p3/M, z6.h, z9.h\n"
+ "ldr x8, [%x[params_struct], %[offsetof_args_tile_j]]\n"
+ "add x21, x7, #0x1\n"
+ "movprfx z29, z17\n fmla z29.h, p3/M, z2.h, z9.h\n"
+ "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
+ "movprfx z28, z17\n fmla z28.h, p3/M, z0.h, z9.h\n"
+ "ldr x19, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
+ "add x8, x8, #0x1\n"
+ "fmla z31.h, p3/M, z0.h, z10.h\n"
+ "cmp x8, x19\n"
+ "fmla z30.h, p3/M, z1.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x9, x23, LSL #1]\n"
+ "fmla z31.h, p3/M, z1.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x9, x24, LSL #1]\n"
+ "csel x8, x8, XZR, LT\n"
+ "fmla z30.h, p3/M, z2.h, z13.h\n"
+ "ld1h { z13.h }, p2/Z, [x9, x25, LSL #1]\n"
+ "csel x7, x7, x21, LT\n"
+ "fmla z31.h, p3/M, z3.h, z14.h\n"
+ "ld1h { z14.h }, p2/Z, [x27]\n"
+ "cmp x7, x20\n"
+ "fmla z30.h, p3/M, z0.h, z16.h\n"
+ "fmla z29.h, p3/M, z3.h, z14.h\n"
+ "ld1h { z14.h }, p2/Z, [x27, x23, LSL #1]\n"
+ "fmla z31.h, p3/M, z4.h, z15.h\n"
+ "ld1h { z15.h }, p2/Z, [x28]\n"
"fmla z30.h, p3/M, z4.h, z11.h\n"
- "fmla z31.h, p3/M, z5.h, z14.h\n"
- "ld1h { z11.h }, p2/Z, [x27, x24, LSL #1]\n"
- "ld1h { z14.h }, p2/Z, [x23, x26, LSL #1]\n"
- "fmla z28.h, p3/M, z6.h, z15.h\n"
- "fmla z30.h, p3/M, z1.h, z16.h\n"
- "ld1h { z15.h }, p2/Z, [x23]\n"
- "csel x16, x16, XZR, LT\n"
- "fmla z31.h, p3/M, z2.h, z11.h\n"
- "fmla z28.h, p3/M, z7.h, z16.h\n"
- "ld1h { z16.h }, p2/Z, [x25, x10, LSL #1]\n"
- "fmax z28.h, p3/M, z28.h, z18.h\n"
- "fmla z30.h, p3/M, z6.h, z15.h\n"
- "fmla z31.h, p3/M, z3.h, z16.h\n"
- "ld1h { z15.h }, p2/Z, [x23, x10, LSL #1]\n"
- "cmp x11, x20\n"
- "fmla z30.h, p3/M, z7.h, z13.h\n"
- "fmla z31.h, p3/M, z7.h, z14.h\n"
- "fmin z28.h, p3/M, z28.h, z17.h\n"
- "st1h { z28.h }, p0, [x9]\n"
- "fmla z29.h, p3/M, z7.h, z12.h\n"
- "fmla z30.h, p3/M, z5.h, z16.h\n"
+ "ld1h { z11.h }, p2/Z, [x27, x13, LSL #1]\n"
+ "fmla z29.h, p3/M, z0.h, z15.h\n"
+ "fmla z31.h, p3/M, z2.h, z16.h\n"
+ "ld1h { z16.h }, p2/Z, [x28, x13, LSL #1]\n"
+ "fmla z30.h, p3/M, z5.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x28, x24, LSL #1]\n"
+ "fmla z29.h, p3/M, z4.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x28, x23, LSL #1]\n"
+ "fmla z31.h, p3/M, z5.h, z13.h\n"
+ "fmla z30.h, p3/M, z3.h, z13.h\n"
+ "ld1h { z13.h }, p2/Z, [x27, x24, LSL #1]\n"
+ "fmla z29.h, p3/M, z1.h, z16.h\n"
"fmla z31.h, p3/M, z6.h, z15.h\n"
- "fmla z29.h, p3/M, z8.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x23, x24, LSL #1]\n"
- "fmax z29.h, p3/M, z29.h, z18.h\n"
- "fmla z30.h, p3/M, z8.h, z15.h\n"
- "fmla z31.h, p3/M, z8.h, z11.h\n"
- "fmax z30.h, p3/M, z30.h, z18.h\n"
- "fmax z31.h, p3/M, z31.h, z18.h\n"
- "fmin z29.h, p3/M, z29.h, z17.h\n"
- "fmin z30.h, p3/M, z30.h, z17.h\n"
- "st1h { z29.h }, p0, [x9, x14, LSL #1]\n"
- "fmin z31.h, p3/M, z31.h, z17.h\n"
- "st1h { z30.h }, p0, [x22]\n"
- "st1h { z31.h }, p0, [x22, x14, LSL #1]\n"
+ "ld1h { z15.h }, p2/Z, [x26]\n"
+ "fmla z28.h, p3/M, z4.h, z13.h\n"
+ "ld1h { z13.h }, p2/Z, [x26, x13, LSL #1]\n"
+ "fmla z30.h, p3/M, z7.h, z12.h\n"
+ "fmla z29.h, p3/M, z6.h, z15.h\n"
+ "ld1h { z15.h }, p2/Z, [x26, x25, LSL #1]\n"
+ "fmla z31.h, p3/M, z7.h, z16.h\n"
+ "ld1h { z16.h }, p2/Z, [x27, x25, LSL #1]\n"
+ "fmla z28.h, p3/M, z1.h, z12.h\n"
+ "fmla z30.h, p3/M, z8.h, z11.h\n"
+ "fmla z29.h, p3/M, z7.h, z13.h\n"
+ "fmax z31.h, p3/M, z31.h, z19.h\n"
+ "fmla z28.h, p3/M, z5.h, z14.h\n"
+ "ld1h { z14.h }, p2/Z, [x26, x24, LSL #1]\n"
+ "fmax z30.h, p3/M, z30.h, z19.h\n"
+ "fmla z29.h, p3/M, z5.h, z16.h\n"
+ "fmin z31.h, p3/M, z31.h, z18.h\n"
+ "st1h { z31.h }, p0, [x10]\n"
+ "fmla z28.h, p3/M, z2.h, z11.h\n"
+ "fmla z29.h, p3/M, z8.h, z15.h\n"
+ "ld1h { z11.h }, p2/Z, [x26, x23, LSL #1]\n"
+ "fmin z30.h, p3/M, z30.h, z18.h\n"
+ "st1h { z30.h }, p0, [x10, x11, LSL #1]\n"
+ "fmla z28.h, p3/M, z3.h, z16.h\n"
+ "fmax z29.h, p3/M, z29.h, z19.h\n"
+ "fmla z28.h, p3/M, z7.h, z14.h\n"
+ "fmin z29.h, p3/M, z29.h, z18.h\n"
+ "st1h { z29.h }, p0, [x22]\n"
+ "fmla z28.h, p3/M, z6.h, z15.h\n"
+ "fmla z28.h, p3/M, z8.h, z11.h\n"
+ "fmax z28.h, p3/M, z28.h, z19.h\n"
+ "fmin z28.h, p3/M, z28.h, z18.h\n"
+ "st1h { z28.h }, p0, [x22, x11, LSL #1]\n"
"blt 1b\n"
:
: [n_channels] "r" ((unsigned long) n_channels), [offsetof_args_inptr] "I" (offsetof(Args, inptr)), [offsetof_args_ld_input_col] "I" (offsetof(Args, ld_input_col)), [offsetof_args_ld_input_row] "I" (offsetof(Args, ld_input_row)), [offsetof_args_ld_output_col] "I" (offsetof(Args, ld_output_col)), [offsetof_args_ld_output_row] "I" (offsetof(Args, ld_output_row)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_n_tile_cols] "I" (offsetof(Args, n_tile_cols)), [offsetof_args_n_tile_rows] "I" (offsetof(Args, n_tile_rows)), [offsetof_args_outptr] "I" (offsetof(Args, outptr)), [offsetof_args_params] "I" (offsetof(Args, params)), [offsetof_args_tile_i] "I" (offsetof(Args, tile_i)), [offsetof_args_tile_j] "I" (offsetof(Args, tile_j)), [params_struct] "r" (&params_struct)
- : "cc", "memory", "p0", "p1", "p2", "p3", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_indirect.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_indirect.cpp
index ff97b51e28..ceba36d897 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_indirect.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_indirect.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -87,247 +87,247 @@ void sve_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst_indirect_impl(
activation_min, activation_max);
__asm__ __volatile__(
+ "ldr x19, [%x[params_struct], %[offsetof_args_outptrs]]\n"
"ptrue p3.b\n"
- "ldr x20, [%x[params_struct], %[offsetof_args_outptrs]]\n"
- "ldr x16, [%x[params_struct], %[offsetof_args_params]]\n"
- "add x15, %x[params_struct], %[offsetof_Args_inptrs]\n"
- "cnth x14\n"
- "ldp x13, x12, [x20, #0x0]\n"
- "ldp x11, x10, [x20, #0x10]\n"
- "mov x9, #0x0\n"
+ "ldr x15, [%x[params_struct], %[offsetof_args_params]]\n"
+ "add x14, %x[params_struct], %[offsetof_Args_inptrs]\n"
+ "ld1rh { z19.h }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
+ "mov x13, #0x0\n"
+ "ld1rh { z18.h }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
+ "cnth x12\n"
+ "ldp x11, x10, [x19, #0x0]\n"
+ "sub x9, XZR, x12\n"
+ "ldp x28, x27, [x19, #0x10]\n"
"whilelt p2.h, XZR, %x[n_channels]\n"
- "ld1h { z19.h }, p3/Z, [x16]\n"
- "ld1h { z0.h }, p3/Z, [x16, #1, MUL VL]\n"
- "cmp x14, %x[n_channels]\n"
- "ld1h { z1.h }, p3/Z, [x16, #2, MUL VL]\n"
- "ld1h { z2.h }, p3/Z, [x16, #3, MUL VL]\n"
- "sub x28, XZR, x14\n"
- "ld1h { z3.h }, p3/Z, [x16, #4, MUL VL]\n"
- "ld1h { z4.h }, p3/Z, [x16, #5, MUL VL]\n"
- "ld1h { z5.h }, p3/Z, [x16, #6, MUL VL]\n"
- "ld1h { z6.h }, p3/Z, [x16, #7, MUL VL]\n"
- "addvl x16, x16, #16\n"
- "ldp x27, x26, [x15, #0x0]\n"
- "ldp x25, x24, [x15, #0x10]\n"
- "ldp x23, x22, [x15, #0x20]\n"
- "ldp x21, x20, [x15, #0x30]\n"
- "ld1rh { z18.h }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
- "ld1rh { z17.h }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
- "ld1h { z7.h }, p3/Z, [x16, #-8, MUL VL]\n"
- "ld1h { z8.h }, p3/Z, [x16, #-7, MUL VL]\n"
- "addvl x16, x16, #-6\n"
- "ld1h { z9.h }, p2/Z, [x27, x9, LSL #1]\n"
- "ld1h { z10.h }, p2/Z, [x26, x9, LSL #1]\n"
- "ld1h { z11.h }, p2/Z, [x25, x9, LSL #1]\n"
- "ld1h { z12.h }, p2/Z, [x24, x9, LSL #1]\n"
- "ld1h { z13.h }, p2/Z, [x23, x9, LSL #1]\n"
- "ld1h { z14.h }, p2/Z, [x22, x9, LSL #1]\n"
- "ld1h { z15.h }, p2/Z, [x21, x9, LSL #1]\n"
- "ld1h { z16.h }, p2/Z, [x20, x9, LSL #1]\n"
+ "ld1h { z17.h }, p3/Z, [x15]\n"
+ "cmp x12, %x[n_channels]\n"
+ "ld1h { z0.h }, p3/Z, [x15, #1, MUL VL]\n"
+ "ld1h { z1.h }, p3/Z, [x15, #2, MUL VL]\n"
+ "ld1h { z2.h }, p3/Z, [x15, #3, MUL VL]\n"
+ "ld1h { z3.h }, p3/Z, [x15, #4, MUL VL]\n"
+ "ld1h { z4.h }, p3/Z, [x15, #5, MUL VL]\n"
+ "ld1h { z5.h }, p3/Z, [x15, #6, MUL VL]\n"
+ "ld1h { z6.h }, p3/Z, [x15, #7, MUL VL]\n"
+ "addvl x15, x15, #16\n"
+ "ldp x26, x25, [x14, #0x0]\n"
+ "ld1h { z7.h }, p3/Z, [x15, #-8, MUL VL]\n"
+ "ld1h { z8.h }, p3/Z, [x15, #-7, MUL VL]\n"
+ "addvl x15, x15, #-6\n"
+ "ld1h { z9.h }, p2/Z, [x26, x13, LSL #1]\n"
+ "ld1h { z10.h }, p2/Z, [x25, x13, LSL #1]\n"
+ "ldp x24, x23, [x14, #0x10]\n"
+ "ldp x22, x21, [x14, #0x20]\n"
+ "ldp x20, x19, [x14, #0x30]\n"
+ "ld1h { z11.h }, p2/Z, [x24, x13, LSL #1]\n"
+ "ld1h { z12.h }, p2/Z, [x23, x13, LSL #1]\n"
+ "ld1h { z13.h }, p2/Z, [x22, x13, LSL #1]\n"
+ "ld1h { z14.h }, p2/Z, [x21, x13, LSL #1]\n"
+ "ld1h { z15.h }, p2/Z, [x20, x13, LSL #1]\n"
+ "ld1h { z16.h }, p2/Z, [x19, x13, LSL #1]\n"
"bge 2f\n"
"1:" // Channel loop
- "movprfx z28, z19\n fmla z28.h, p3/M, z8.h, z9.h\n"
- "movprfx z29, z19\n fmla z29.h, p3/M, z6.h, z9.h\n"
- "ldr x27, [x15, #0x40]\n"
- "ldr x26, [x15, #0x48]\n"
- "fmla z28.h, p3/M, z0.h, z10.h\n"
- "fmla z29.h, p3/M, z1.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x26, x9, LSL #1]\n"
- "ldr x25, [x15, #0x50]\n"
- "fmla z28.h, p3/M, z1.h, z11.h\n"
- "fmla z29.h, p3/M, z2.h, z13.h\n"
- "ld1h { z11.h }, p2/Z, [x27, x9, LSL #1]\n"
- "ld1h { z13.h }, p2/Z, [x25, x9, LSL #1]\n"
- "fmla z28.h, p3/M, z3.h, z14.h\n"
- "fmla z29.h, p3/M, z0.h, z16.h\n"
- "ldr x24, [x15, #0x58]\n"
- "ldr x20, [x15, #0x78]\n"
- "fmla z28.h, p3/M, z4.h, z15.h\n"
- "fmla z29.h, p3/M, z4.h, z11.h\n"
- "ld1h { z14.h }, p2/Z, [x24, x9, LSL #1]\n"
- "ldr x23, [x15, #0x60]\n"
- "fmla z28.h, p3/M, z2.h, z16.h\n"
- "fmla z29.h, p3/M, z5.h, z12.h\n"
- "ldr x27, [x15, #0x80]\n"
- "ld1h { z15.h }, p2/Z, [x23, x9, LSL #1]\n"
- "movprfx z30, z19\n fmla z30.h, p3/M, z2.h, z9.h\n"
- "movprfx z31, z19\n fmla z31.h, p3/M, z0.h, z9.h\n"
- "ld1h { z12.h }, p2/Z, [x27, x9, LSL #1]\n"
- "ldr x22, [x15, #0x68]\n"
- "fmla z28.h, p3/M, z5.h, z13.h\n"
- "fmla z29.h, p3/M, z3.h, z13.h\n"
- "ld1h { z13.h }, p2/Z, [x20, x9, LSL #1]\n"
- "ldr x26, [x15, #0x88]\n"
- "fmla z30.h, p3/M, z3.h, z14.h\n"
- "fmla z31.h, p3/M, z4.h, z13.h\n"
- "ld1h { z11.h }, p2/Z, [x22, x9, LSL #1]\n"
- "ld1h { z14.h }, p2/Z, [x26, x9, LSL #1]\n"
- "fmla z30.h, p3/M, z0.h, z15.h\n"
- "fmla z31.h, p3/M, z1.h, z12.h\n"
- "ldr x21, [x15, #0x70]\n"
- "ldr x24, [x15, #0x98]\n"
- "fmla z30.h, p3/M, z4.h, z11.h\n"
- "fmla z31.h, p3/M, z5.h, z14.h\n"
- "ld1h { z16.h }, p2/Z, [x21, x9, LSL #1]\n"
- "ld1h { z11.h }, p2/Z, [x24, x9, LSL #1]\n"
- "fmla z28.h, p3/M, z6.h, z15.h\n"
- "ldr x25, [x15, #0x90]\n"
- "ldr x22, [x15, #0xa8]\n"
- "fmla z30.h, p3/M, z1.h, z16.h\n"
- "fmla z31.h, p3/M, z2.h, z11.h\n"
- "fmla z28.h, p3/M, z7.h, z16.h\n"
- "ld1h { z15.h }, p2/Z, [x25, x9, LSL #1]\n"
- "ld1h { z16.h }, p2/Z, [x22, x9, LSL #1]\n"
- "ldr x23, [x15, #0xa0]\n"
- "ldr x21, [x15, #0xb0]\n"
- "fmla z30.h, p3/M, z6.h, z15.h\n"
- "fmla z31.h, p3/M, z3.h, z16.h\n"
- "ld1h { z13.h }, p2/Z, [x23, x9, LSL #1]\n"
- "ld1h { z14.h }, p2/Z, [x21, x9, LSL #1]\n"
- "fmla z30.h, p3/M, z7.h, z13.h\n"
- "fmla z31.h, p3/M, z7.h, z14.h\n"
- "ldr x20, [x15, #0xb8]\n"
- "fmla z29.h, p3/M, z7.h, z12.h\n"
- "ld1h { z15.h }, p2/Z, [x20, x9, LSL #1]\n"
- "fmla z30.h, p3/M, z5.h, z16.h\n"
- "ldr x27, [x15, #0xc0]\n"
- "fmla z31.h, p3/M, z6.h, z15.h\n"
- "fmla z29.h, p3/M, z8.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x27, x9, LSL #1]\n"
- "fmla z30.h, p3/M, z8.h, z15.h\n"
- "fmla z31.h, p3/M, z8.h, z11.h\n"
- "whilelt p1.h, x14, %x[n_channels]\n"
- "ldp x27, x26, [x15, #0x0]\n"
- "ldp x25, x24, [x15, #0x10]\n"
- "ldp x23, x22, [x15, #0x20]\n"
+ "movprfx z31, z17\n fmla z31.h, p3/M, z8.h, z9.h\n"
+ "ldr x26, [x14, #0x40]\n"
+ "whilelt p1.h, x12, %x[n_channels]\n"
+ "movprfx z30, z17\n fmla z30.h, p3/M, z6.h, z9.h\n"
+ "ldr x25, [x14, #0x48]\n"
"inch x9\n"
- "fmax z28.h, p3/M, z28.h, z18.h\n"
- "ldp x21, x20, [x15, #0x30]\n"
- "ld1h { z9.h }, p1/Z, [x27, x14, LSL #1]\n"
- "fmax z29.h, p3/M, z29.h, z18.h\n"
- "fmax z30.h, p3/M, z30.h, z18.h\n"
- "ld1h { z10.h }, p1/Z, [x26, x14, LSL #1]\n"
- "ld1h { z11.h }, p1/Z, [x25, x14, LSL #1]\n"
- "fmax z31.h, p3/M, z31.h, z18.h\n"
- "inch x28\n"
- "ld1h { z12.h }, p1/Z, [x24, x14, LSL #1]\n"
- "ld1h { z13.h }, p1/Z, [x23, x14, LSL #1]\n"
+ "movprfx z29, z17\n fmla z29.h, p3/M, z2.h, z9.h\n"
+ "ldr x24, [x14, #0x50]\n"
"mov p0.b, p2.b\n"
- "whilelt p2.h, x9, %x[n_channels]\n"
- "ld1h { z14.h }, p1/Z, [x22, x14, LSL #1]\n"
- "ld1h { z15.h }, p1/Z, [x21, x14, LSL #1]\n"
- "fmin z28.h, p3/M, z28.h, z17.h\n"
- "fmin z29.h, p3/M, z29.h, z17.h\n"
- "ld1h { z16.h }, p1/Z, [x20, x14, LSL #1]\n"
- "inch x14\n"
- "ld1h { z19.h }, p3/Z, [x16]\n"
- "cmp x14, %x[n_channels]\n"
- "ld1h { z0.h }, p3/Z, [x16, #1, MUL VL]\n"
- "ld1h { z1.h }, p3/Z, [x16, #2, MUL VL]\n"
- "fmin z30.h, p3/M, z30.h, z17.h\n"
- "fmin z31.h, p3/M, z31.h, z17.h\n"
- "ld1h { z2.h }, p3/Z, [x16, #3, MUL VL]\n"
- "ld1h { z3.h }, p3/Z, [x16, #4, MUL VL]\n"
- "st1h { z28.h }, p0, [x13, x28, LSL #1]\n"
- "ld1h { z4.h }, p3/Z, [x16, #5, MUL VL]\n"
- "ld1h { z5.h }, p3/Z, [x16, #6, MUL VL]\n"
- "st1h { z29.h }, p0, [x12, x28, LSL #1]\n"
- "ld1h { z6.h }, p3/Z, [x16, #7, MUL VL]\n"
- "addvl x16, x16, #16\n"
- "st1h { z30.h }, p0, [x11, x28, LSL #1]\n"
- "ld1h { z7.h }, p3/Z, [x16, #-8, MUL VL]\n"
- "st1h { z31.h }, p0, [x10, x28, LSL #1]\n"
- "ld1h { z8.h }, p3/Z, [x16, #-7, MUL VL]\n"
- "addvl x16, x16, #-6\n"
+ "movprfx z28, z17\n fmla z28.h, p3/M, z0.h, z9.h\n"
+ "ldr x23, [x14, #0x58]\n"
+ "ldr x22, [x14, #0x60]\n"
+ "fmla z31.h, p3/M, z0.h, z10.h\n"
+ "ldr x21, [x14, #0x68]\n"
+ "fmla z30.h, p3/M, z1.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x25, x13, LSL #1]\n"
+ "fmla z31.h, p3/M, z1.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x26, x13, LSL #1]\n"
+ "ldr x20, [x14, #0x70]\n"
+ "fmla z30.h, p3/M, z2.h, z13.h\n"
+ "ld1h { z13.h }, p2/Z, [x24, x13, LSL #1]\n"
+ "fmla z31.h, p3/M, z3.h, z14.h\n"
+ "ld1h { z14.h }, p2/Z, [x23, x13, LSL #1]\n"
+ "ldr x19, [x14, #0x78]\n"
+ "fmla z30.h, p3/M, z0.h, z16.h\n"
+ "ldr x26, [x14, #0x80]\n"
+ "fmla z29.h, p3/M, z3.h, z14.h\n"
+ "ldr x25, [x14, #0x88]\n"
+ "ldr x24, [x14, #0x90]\n"
+ "fmla z31.h, p3/M, z4.h, z15.h\n"
+ "ld1h { z15.h }, p2/Z, [x22, x13, LSL #1]\n"
+ "fmla z30.h, p3/M, z4.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x21, x13, LSL #1]\n"
+ "fmla z29.h, p3/M, z0.h, z15.h\n"
+ "ld1h { z14.h }, p2/Z, [x25, x13, LSL #1]\n"
+ "ldr x23, [x14, #0x98]\n"
+ "fmla z31.h, p3/M, z2.h, z16.h\n"
+ "ld1h { z16.h }, p2/Z, [x20, x13, LSL #1]\n"
+ "fmla z30.h, p3/M, z5.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x26, x13, LSL #1]\n"
+ "fmla z29.h, p3/M, z4.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x23, x13, LSL #1]\n"
+ "ldr x22, [x14, #0xa0]\n"
+ "fmla z31.h, p3/M, z5.h, z13.h\n"
+ "ldr x21, [x14, #0xa8]\n"
+ "fmla z30.h, p3/M, z3.h, z13.h\n"
+ "ld1h { z13.h }, p2/Z, [x19, x13, LSL #1]\n"
+ "fmla z29.h, p3/M, z1.h, z16.h\n"
+ "ldr x20, [x14, #0xb0]\n"
+ "ldr x19, [x14, #0xb8]\n"
+ "fmla z31.h, p3/M, z6.h, z15.h\n"
+ "fmla z28.h, p3/M, z4.h, z13.h\n"
+ "ld1h { z15.h }, p2/Z, [x24, x13, LSL #1]\n"
+ "fmla z30.h, p3/M, z7.h, z12.h\n"
+ "ld1h { z13.h }, p2/Z, [x22, x13, LSL #1]\n"
+ "ldr x26, [x14, #0xc0]\n"
+ "fmla z31.h, p3/M, z7.h, z16.h\n"
+ "ld1h { z16.h }, p2/Z, [x21, x13, LSL #1]\n"
+ "fmla z28.h, p3/M, z1.h, z12.h\n"
+ "ld1h { z17.h }, p3/Z, [x15]\n"
+ "fmla z29.h, p3/M, z6.h, z15.h\n"
+ "ld1h { z15.h }, p2/Z, [x19, x13, LSL #1]\n"
+ "fmla z30.h, p3/M, z8.h, z11.h\n"
+ "ld1h { z0.h }, p3/Z, [x15, #1, MUL VL]\n"
+ "ld1h { z1.h }, p3/Z, [x15, #2, MUL VL]\n"
+ "fmla z28.h, p3/M, z5.h, z14.h\n"
+ "fmax z31.h, p3/M, z31.h, z19.h\n"
+ "ld1h { z14.h }, p2/Z, [x20, x13, LSL #1]\n"
+ "fmla z29.h, p3/M, z7.h, z13.h\n"
+ "ld1h { z4.h }, p3/Z, [x15, #5, MUL VL]\n"
+ "fmax z30.h, p3/M, z30.h, z19.h\n"
+ "fmla z28.h, p3/M, z2.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x26, x13, LSL #1]\n"
+ "inch x13\n"
+ "fmla z29.h, p3/M, z5.h, z16.h\n"
+ "ldp x26, x25, [x14, #0x0]\n"
+ "whilelt p2.h, x13, %x[n_channels]\n"
+ "fmin z31.h, p3/M, z31.h, z18.h\n"
+ "ldp x24, x23, [x14, #0x10]\n"
+ "fmin z30.h, p3/M, z30.h, z18.h\n"
+ "ldp x22, x21, [x14, #0x20]\n"
+ "ldp x20, x19, [x14, #0x30]\n"
+ "fmla z28.h, p3/M, z3.h, z16.h\n"
+ "ld1h { z9.h }, p1/Z, [x26, x12, LSL #1]\n"
+ "fmla z29.h, p3/M, z8.h, z15.h\n"
+ "ld1h { z10.h }, p1/Z, [x25, x12, LSL #1]\n"
+ "fmla z28.h, p3/M, z7.h, z14.h\n"
+ "ld1h { z12.h }, p1/Z, [x23, x12, LSL #1]\n"
+ "ld1h { z13.h }, p1/Z, [x22, x12, LSL #1]\n"
+ "fmax z29.h, p3/M, z29.h, z19.h\n"
+ "ld1h { z14.h }, p1/Z, [x21, x12, LSL #1]\n"
+ "fmla z28.h, p3/M, z6.h, z15.h\n"
+ "ld1h { z15.h }, p1/Z, [x20, x12, LSL #1]\n"
+ "ld1h { z16.h }, p1/Z, [x19, x12, LSL #1]\n"
+ "fmin z29.h, p3/M, z29.h, z18.h\n"
+ "st1h { z31.h }, p0, [x11, x9, LSL #1]\n"
+ "fmla z28.h, p3/M, z8.h, z11.h\n"
+ "ld1h { z11.h }, p1/Z, [x24, x12, LSL #1]\n"
+ "inch x12\n"
+ "fmax z28.h, p3/M, z28.h, z19.h\n"
+ "st1h { z30.h }, p0, [x10, x9, LSL #1]\n"
+ "cmp x12, %x[n_channels]\n"
+ "fmin z28.h, p3/M, z28.h, z18.h\n"
+ "st1h { z29.h }, p0, [x28, x9, LSL #1]\n"
+ "ld1h { z2.h }, p3/Z, [x15, #3, MUL VL]\n"
+ "ld1h { z3.h }, p3/Z, [x15, #4, MUL VL]\n"
+ "ld1h { z5.h }, p3/Z, [x15, #6, MUL VL]\n"
+ "ld1h { z6.h }, p3/Z, [x15, #7, MUL VL]\n"
+ "addvl x15, x15, #16\n"
+ "st1h { z28.h }, p0, [x27, x9, LSL #1]\n"
+ "ld1h { z7.h }, p3/Z, [x15, #-8, MUL VL]\n"
+ "ld1h { z8.h }, p3/Z, [x15, #-7, MUL VL]\n"
+ "addvl x15, x15, #-6\n"
"blt 1b\n"
"2:" // Channel tail
- "movprfx z28, z19\n fmla z28.h, p3/M, z8.h, z9.h\n"
- "movprfx z29, z19\n fmla z29.h, p3/M, z6.h, z9.h\n"
- "ldr x27, [x15, #0x40]\n"
- "ldr x26, [x15, #0x48]\n"
- "fmla z28.h, p3/M, z0.h, z10.h\n"
- "fmla z29.h, p3/M, z1.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x26, x9, LSL #1]\n"
- "ldr x25, [x15, #0x50]\n"
- "fmla z28.h, p3/M, z1.h, z11.h\n"
- "fmla z29.h, p3/M, z2.h, z13.h\n"
- "ld1h { z11.h }, p2/Z, [x27, x9, LSL #1]\n"
- "ld1h { z13.h }, p2/Z, [x25, x9, LSL #1]\n"
- "fmla z28.h, p3/M, z3.h, z14.h\n"
- "fmla z29.h, p3/M, z0.h, z16.h\n"
- "ldr x24, [x15, #0x58]\n"
- "ldr x20, [x15, #0x78]\n"
- "fmla z28.h, p3/M, z4.h, z15.h\n"
- "fmla z29.h, p3/M, z4.h, z11.h\n"
- "ld1h { z14.h }, p2/Z, [x24, x9, LSL #1]\n"
- "ldr x23, [x15, #0x60]\n"
- "fmla z28.h, p3/M, z2.h, z16.h\n"
- "fmla z29.h, p3/M, z5.h, z12.h\n"
- "ldr x27, [x15, #0x80]\n"
- "ld1h { z15.h }, p2/Z, [x23, x9, LSL #1]\n"
- "movprfx z30, z19\n fmla z30.h, p3/M, z2.h, z9.h\n"
- "movprfx z31, z19\n fmla z31.h, p3/M, z0.h, z9.h\n"
- "ld1h { z12.h }, p2/Z, [x27, x9, LSL #1]\n"
- "ldr x22, [x15, #0x68]\n"
- "fmla z28.h, p3/M, z5.h, z13.h\n"
- "fmla z29.h, p3/M, z3.h, z13.h\n"
- "ld1h { z13.h }, p2/Z, [x20, x9, LSL #1]\n"
- "ldr x26, [x15, #0x88]\n"
- "fmla z30.h, p3/M, z3.h, z14.h\n"
- "fmla z31.h, p3/M, z4.h, z13.h\n"
- "ld1h { z11.h }, p2/Z, [x22, x9, LSL #1]\n"
- "ld1h { z14.h }, p2/Z, [x26, x9, LSL #1]\n"
- "fmla z30.h, p3/M, z0.h, z15.h\n"
- "fmla z31.h, p3/M, z1.h, z12.h\n"
- "ldr x21, [x15, #0x70]\n"
- "ldr x24, [x15, #0x98]\n"
+ "movprfx z31, z17\n fmla z31.h, p3/M, z8.h, z9.h\n"
+ "ldr x26, [x14, #0x40]\n"
+ "inch x9\n"
+ "movprfx z30, z17\n fmla z30.h, p3/M, z6.h, z9.h\n"
+ "ldr x25, [x14, #0x48]\n"
+ "mov p0.b, p2.b\n"
+ "movprfx z29, z17\n fmla z29.h, p3/M, z2.h, z9.h\n"
+ "ldr x24, [x14, #0x50]\n"
+ "movprfx z28, z17\n fmla z28.h, p3/M, z0.h, z9.h\n"
+ "ldr x23, [x14, #0x58]\n"
+ "ldr x22, [x14, #0x60]\n"
+ "fmla z31.h, p3/M, z0.h, z10.h\n"
+ "ldr x21, [x14, #0x68]\n"
+ "fmla z30.h, p3/M, z1.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x25, x13, LSL #1]\n"
+ "fmla z31.h, p3/M, z1.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x26, x13, LSL #1]\n"
+ "ldr x20, [x14, #0x70]\n"
+ "fmla z30.h, p3/M, z2.h, z13.h\n"
+ "ld1h { z13.h }, p2/Z, [x24, x13, LSL #1]\n"
+ "fmla z31.h, p3/M, z3.h, z14.h\n"
+ "ld1h { z14.h }, p2/Z, [x23, x13, LSL #1]\n"
+ "ldr x19, [x14, #0x78]\n"
+ "fmla z30.h, p3/M, z0.h, z16.h\n"
+ "ldr x26, [x14, #0x80]\n"
+ "fmla z29.h, p3/M, z3.h, z14.h\n"
+ "ldr x25, [x14, #0x88]\n"
+ "ldr x24, [x14, #0x90]\n"
+ "fmla z31.h, p3/M, z4.h, z15.h\n"
+ "ld1h { z15.h }, p2/Z, [x22, x13, LSL #1]\n"
"fmla z30.h, p3/M, z4.h, z11.h\n"
- "fmla z31.h, p3/M, z5.h, z14.h\n"
- "ld1h { z16.h }, p2/Z, [x21, x9, LSL #1]\n"
- "ld1h { z11.h }, p2/Z, [x24, x9, LSL #1]\n"
- "fmla z28.h, p3/M, z6.h, z15.h\n"
- "ldr x25, [x15, #0x90]\n"
- "ldr x22, [x15, #0xa8]\n"
- "fmla z30.h, p3/M, z1.h, z16.h\n"
- "fmla z31.h, p3/M, z2.h, z11.h\n"
- "fmla z28.h, p3/M, z7.h, z16.h\n"
- "ld1h { z15.h }, p2/Z, [x25, x9, LSL #1]\n"
- "ld1h { z16.h }, p2/Z, [x22, x9, LSL #1]\n"
- "ldr x23, [x15, #0xa0]\n"
- "ldr x21, [x15, #0xb0]\n"
- "fmla z30.h, p3/M, z6.h, z15.h\n"
- "fmla z31.h, p3/M, z3.h, z16.h\n"
- "ld1h { z13.h }, p2/Z, [x23, x9, LSL #1]\n"
- "ld1h { z14.h }, p2/Z, [x21, x9, LSL #1]\n"
- "fmla z30.h, p3/M, z7.h, z13.h\n"
- "fmla z31.h, p3/M, z7.h, z14.h\n"
- "ldr x20, [x15, #0xb8]\n"
- "fmla z29.h, p3/M, z7.h, z12.h\n"
- "ld1h { z15.h }, p2/Z, [x20, x9, LSL #1]\n"
- "fmla z30.h, p3/M, z5.h, z16.h\n"
- "ldr x27, [x15, #0xc0]\n"
+ "ld1h { z11.h }, p2/Z, [x21, x13, LSL #1]\n"
+ "fmla z29.h, p3/M, z0.h, z15.h\n"
+ "ld1h { z14.h }, p2/Z, [x25, x13, LSL #1]\n"
+ "ldr x23, [x14, #0x98]\n"
+ "fmla z31.h, p3/M, z2.h, z16.h\n"
+ "ld1h { z16.h }, p2/Z, [x20, x13, LSL #1]\n"
+ "fmla z30.h, p3/M, z5.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x26, x13, LSL #1]\n"
+ "fmla z29.h, p3/M, z4.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x23, x13, LSL #1]\n"
+ "ldr x22, [x14, #0xa0]\n"
+ "fmla z31.h, p3/M, z5.h, z13.h\n"
+ "ldr x21, [x14, #0xa8]\n"
+ "fmla z30.h, p3/M, z3.h, z13.h\n"
+ "ld1h { z13.h }, p2/Z, [x19, x13, LSL #1]\n"
+ "fmla z29.h, p3/M, z1.h, z16.h\n"
+ "ldr x20, [x14, #0xb0]\n"
+ "ldr x19, [x14, #0xb8]\n"
"fmla z31.h, p3/M, z6.h, z15.h\n"
- "fmla z29.h, p3/M, z8.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x27, x9, LSL #1]\n"
- "fmla z30.h, p3/M, z8.h, z15.h\n"
- "fmla z31.h, p3/M, z8.h, z11.h\n"
- "inch x28\n"
- "mov p0.b, p2.b\n"
- "fmax z28.h, p3/M, z28.h, z18.h\n"
- "fmax z29.h, p3/M, z29.h, z18.h\n"
- "fmax z30.h, p3/M, z30.h, z18.h\n"
- "fmax z31.h, p3/M, z31.h, z18.h\n"
- "fmin z28.h, p3/M, z28.h, z17.h\n"
- "fmin z29.h, p3/M, z29.h, z17.h\n"
- "st1h { z28.h }, p0, [x13, x28, LSL #1]\n"
- "fmin z30.h, p3/M, z30.h, z17.h\n"
- "fmin z31.h, p3/M, z31.h, z17.h\n"
- "st1h { z29.h }, p0, [x12, x28, LSL #1]\n"
- "st1h { z30.h }, p0, [x11, x28, LSL #1]\n"
- "st1h { z31.h }, p0, [x10, x28, LSL #1]\n"
+ "fmla z28.h, p3/M, z4.h, z13.h\n"
+ "ld1h { z15.h }, p2/Z, [x24, x13, LSL #1]\n"
+ "fmla z30.h, p3/M, z7.h, z12.h\n"
+ "ld1h { z13.h }, p2/Z, [x22, x13, LSL #1]\n"
+ "ldr x26, [x14, #0xc0]\n"
+ "fmla z31.h, p3/M, z7.h, z16.h\n"
+ "ld1h { z16.h }, p2/Z, [x21, x13, LSL #1]\n"
+ "fmla z28.h, p3/M, z1.h, z12.h\n"
+ "fmla z29.h, p3/M, z6.h, z15.h\n"
+ "ld1h { z15.h }, p2/Z, [x19, x13, LSL #1]\n"
+ "fmla z30.h, p3/M, z8.h, z11.h\n"
+ "fmla z28.h, p3/M, z5.h, z14.h\n"
+ "ld1h { z14.h }, p2/Z, [x20, x13, LSL #1]\n"
+ "fmax z31.h, p3/M, z31.h, z19.h\n"
+ "fmla z29.h, p3/M, z7.h, z13.h\n"
+ "fmax z30.h, p3/M, z30.h, z19.h\n"
+ "fmla z28.h, p3/M, z2.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x26, x13, LSL #1]\n"
+ "fmin z31.h, p3/M, z31.h, z18.h\n"
+ "st1h { z31.h }, p0, [x11, x9, LSL #1]\n"
+ "fmla z29.h, p3/M, z5.h, z16.h\n"
+ "fmla z28.h, p3/M, z3.h, z16.h\n"
+ "fmin z30.h, p3/M, z30.h, z18.h\n"
+ "st1h { z30.h }, p0, [x10, x9, LSL #1]\n"
+ "fmla z28.h, p3/M, z7.h, z14.h\n"
+ "fmla z29.h, p3/M, z8.h, z15.h\n"
+ "fmla z28.h, p3/M, z6.h, z15.h\n"
+ "fmax z29.h, p3/M, z29.h, z19.h\n"
+ "fmla z28.h, p3/M, z8.h, z11.h\n"
+ "fmin z29.h, p3/M, z29.h, z18.h\n"
+ "st1h { z29.h }, p0, [x28, x9, LSL #1]\n"
+ "fmax z28.h, p3/M, z28.h, z19.h\n"
+ "fmin z28.h, p3/M, z28.h, z18.h\n"
+ "st1h { z28.h }, p0, [x27, x9, LSL #1]\n"
:
: [n_channels] "r" ((unsigned long) n_channels), [offsetof_Args_inptrs] "I" (offsetof(Args, inptrs)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_args_params] "I" (offsetof(Args, params)), [params_struct] "r" (&params_struct)
- : "cc", "memory", "p0", "p1", "p2", "p3", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst/generic_direct.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst/generic_direct.cpp
index e6bfea1790..1c2e1e27ad 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst/generic_direct.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst/generic_direct.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -88,432 +88,432 @@ void sve_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst_direct_impl(
__asm__ __volatile__(
"ptrue p3.b\n"
- "mov x12, #0x0\n"
- "mov x8, #0x0\n"
+ "mov x5, #0x0\n"
+ "mov x6, #0x0\n"
"1:" // Tile loop
- "str x12, [%x[params_struct], %[offsetof_args_tile_i]]\n"
- "mov x25, #0x2\n"
- "mov x24, #0x2\n"
- "str x8, [%x[params_struct], %[offsetof_args_tile_j]]\n"
- "ldr x23, [%x[params_struct], %[offsetof_args_ld_input_row]]\n"
- "ldr x17, [%x[params_struct], %[offsetof_args_ld_input_col]]\n"
- "mul x22, x12, x23\n" // offset = tile_i * ld_input_row
+ "str x5, [%x[params_struct], %[offsetof_args_tile_i]]\n"
+ "mov x20, #0x2\n"
+ "str x6, [%x[params_struct], %[offsetof_args_tile_j]]\n"
+ "mov x7, #0x2\n"
+ "ldr x8, [%x[params_struct], %[offsetof_args_params]]\n"
+ "mov x17, #0x0\n"
+ "ldr x22, [%x[params_struct], %[offsetof_args_ld_input_row]]\n"
+ "cnth x16\n"
+ "ldr x15, [%x[params_struct], %[offsetof_args_ld_input_col]]\n"
+ "sub x14, XZR, x16\n"
+ "ldr x13, [%x[params_struct], %[offsetof_args_inptr]]\n"
+ "mul x19, x5, x22\n" // offset = tile_i * ld_input_row
"ldr x21, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
- "madd x22, x8, x17, x22\n" // offset += tile_j * ld_input_col
- "ldr x16, [%x[params_struct], %[offsetof_args_ld_output_col]]\n"
- "add x15, x17, x17\n"
- "mul x20, x12, x21\n" // offset = tile_i * ld_output_row
- "ldr x14, [%x[params_struct], %[offsetof_args_inptr]]\n"
- "ldr x13, [%x[params_struct], %[offsetof_args_outptr]]\n"
- "cnth x12\n"
- "mul x22, x22, x25\n" // offset *= kernel_stride * output_size
- "add x14, x14, x22, LSL #1\n" // inptr[0] += offset * sizeof(__fp16)
- "add x11, x14, x23, LSL #1\n"
- "ldr x10, [%x[params_struct], %[offsetof_args_params]]\n"
- "madd x20, x8, x16, x20\n" // offset += tile_j * ld_output_col
- "add x9, x11, x23, LSL #1\n"
- "add x28, x15, x17\n"
+ "madd x19, x6, x15, x19\n" // offset += tile_j * ld_input_col
+ "ldr x12, [%x[params_struct], %[offsetof_args_ld_output_col]]\n"
+ "mul x19, x19, x20\n" // offset *= kernel_stride * output_size
+ "ldr x11, [%x[params_struct], %[offsetof_args_outptr]]\n"
+ "add x13, x13, x19, LSL #1\n" // inptr[0] += offset * sizeof(__fp16)
"ld1rh { z18.h }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
- "mul x20, x20, x24\n" // offset *= output_tile_size
- "whilelt p2.h, XZR, %x[n_channels]\n"
- "add x27, x9, x23, LSL #1\n"
+ "add x20, x13, x22, LSL #1\n"
"ld1rh { z17.h }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
- "add x26, x28, x17\n"
- "add x25, x27, x23, LSL #1\n"
- "ld1h { z16.h }, p3/Z, [x10]\n"
- "ld1h { z0.h }, p3/Z, [x10, #1, MUL VL]\n"
- "add x24, x26, x17\n"
- "add x13, x13, x20, LSL #1\n" // outptrs[0] += offset * sizeof(__fp16)
- "ld1h { z1.h }, p3/Z, [x10, #2, MUL VL]\n"
- "ld1h { z2.h }, p3/Z, [x10, #3, MUL VL]\n"
- "cmp x12, %x[n_channels]\n"
- "add x23, x25, x23, LSL #1\n"
- "ld1h { z3.h }, p3/Z, [x10, #4, MUL VL]\n"
- "ld1h { z4.h }, p3/Z, [x10, #5, MUL VL]\n"
- "add x22, x13, x21, LSL #1\n"
- "mov x21, #0x0\n"
- "ld1h { z5.h }, p2/Z, [x14]\n"
- "ld1h { z6.h }, p2/Z, [x14, x17, LSL #1]\n"
- "sub x20, XZR, x12\n"
- "ld1h { z7.h }, p2/Z, [x11]\n"
- "ld1h { z8.h }, p2/Z, [x11, x17, LSL #1]\n"
- "addvl x10, x10, #6\n"
- "ld1h { z9.h }, p2/Z, [x14, x15, LSL #1]\n"
- "ld1h { z13.h }, p2/Z, [x11, x15, LSL #1]\n"
- "ld1h { z11.h }, p2/Z, [x14, x28, LSL #1]\n"
- "ld1h { z12.h }, p2/Z, [x14, x26, LSL #1]\n"
- "ld1h { z10.h }, p2/Z, [x11, x24, LSL #1]\n"
- "ld1h { z14.h }, p2/Z, [x9]\n"
+ "add x10, x20, x22, LSL #1\n"
+ "ld1h { z16.h }, p3/Z, [x8]\n"
+ "add x9, x10, x22, LSL #1\n"
+ "ld1h { z0.h }, p3/Z, [x8, #1, MUL VL]\n"
+ "add x28, x9, x22, LSL #1\n"
+ "ld1h { z1.h }, p3/Z, [x8, #2, MUL VL]\n"
+ "add x27, x28, x22, LSL #1\n"
+ "ld1h { z2.h }, p3/Z, [x8, #3, MUL VL]\n"
+ "add x26, x15, x15\n"
+ "ld1h { z3.h }, p3/Z, [x8, #4, MUL VL]\n"
+ "add x25, x26, x15\n"
+ "ld1h { z4.h }, p3/Z, [x8, #5, MUL VL]\n"
+ "add x24, x25, x15\n"
+ "mul x19, x5, x21\n" // offset = tile_i * ld_output_row
+ "add x23, x24, x15\n"
+ "madd x19, x6, x12, x19\n" // offset += tile_j * ld_output_col
+ "mul x19, x19, x7\n" // offset *= output_tile_size
+ "add x11, x11, x19, LSL #1\n" // outptrs[0] += offset * sizeof(__fp16)
+ "add x22, x11, x21, LSL #1\n"
+ "whilelt p2.h, XZR, %x[n_channels]\n"
+ "ld1h { z5.h }, p2/Z, [x13]\n"
+ "ld1h { z6.h }, p2/Z, [x13, x15, LSL #1]\n"
+ "cmp x16, %x[n_channels]\n"
+ "ld1h { z7.h }, p2/Z, [x20]\n"
+ "addvl x8, x8, #6\n"
+ "ld1h { z8.h }, p2/Z, [x20, x15, LSL #1]\n"
+ "ld1h { z9.h }, p2/Z, [x13, x26, LSL #1]\n"
+ "ld1h { z13.h }, p2/Z, [x20, x26, LSL #1]\n"
+ "ld1h { z11.h }, p2/Z, [x13, x25, LSL #1]\n"
+ "ld1h { z12.h }, p2/Z, [x13, x24, LSL #1]\n"
+ "ld1h { z10.h }, p2/Z, [x20, x23, LSL #1]\n"
+ "ld1h { z14.h }, p2/Z, [x10]\n"
"bge 3f\n"
"2:" // Tile loop: Channel loop
- "movprfx z28, z16\n fmla z28.h, p3/M, z0.h, z5.h\n"
- "movprfx z29, z16\n fmla z29.h, p3/M, z0.h, z6.h\n"
- "ld1h { z5.h }, p2/Z, [x11, x28, LSL #1]\n"
- "whilelt p1.h, x12, %x[n_channels]\n"
- "movprfx z30, z16\n fmla z30.h, p3/M, z0.h, z7.h\n"
- "movprfx z31, z16\n fmla z31.h, p3/M, z0.h, z8.h\n"
- "ld1h { z0.h }, p3/Z, [x10]\n"
- "inch x21\n"
- "fmla z28.h, p3/M, z1.h, z6.h\n"
- "fmla z29.h, p3/M, z1.h, z9.h\n"
- "ld1h { z6.h }, p2/Z, [x11, x26, LSL #1]\n"
- "inch x12\n"
- "fmla z30.h, p3/M, z1.h, z8.h\n"
- "fmla z31.h, p3/M, z1.h, z13.h\n"
- "ld1h { z1.h }, p3/Z, [x10, #1, MUL VL]\n"
+ "movprfx z31, z16\n fmla z31.h, p3/M, z0.h, z5.h\n"
+ "ld1h { z5.h }, p2/Z, [x20, x25, LSL #1]\n"
+ "whilelt p1.h, x16, %x[n_channels]\n"
+ "movprfx z30, z16\n fmla z30.h, p3/M, z0.h, z6.h\n"
+ "inch x14\n"
+ "movprfx z29, z16\n fmla z29.h, p3/M, z0.h, z7.h\n"
"mov p0.b, p2.b\n"
+ "movprfx z28, z16\n fmla z28.h, p3/M, z0.h, z8.h\n"
+ "ld1h { z0.h }, p3/Z, [x8]\n"
+ "inch x17\n"
+ "fmla z31.h, p3/M, z1.h, z6.h\n"
+ "ld1h { z6.h }, p2/Z, [x20, x24, LSL #1]\n"
+ "addvl x20, x20, #1\n"
+ "fmla z30.h, p3/M, z1.h, z9.h\n"
+ "inch x16\n"
+ "fmla z29.h, p3/M, z1.h, z8.h\n"
+ "fmla z28.h, p3/M, z1.h, z13.h\n"
+ "ld1h { z1.h }, p3/Z, [x8, #1, MUL VL]\n"
+ "fmla z31.h, p3/M, z2.h, z9.h\n"
+ "ld1h { z9.h }, p2/Z, [x13, x23, LSL #1]\n"
+ "addvl x13, x13, #1\n"
+ "fmla z30.h, p3/M, z2.h, z11.h\n"
+ "fmla z29.h, p3/M, z2.h, z13.h\n"
+ "fmla z28.h, p3/M, z2.h, z5.h\n"
+ "ld1h { z2.h }, p3/Z, [x8, #2, MUL VL]\n"
+ "fmla z31.h, p3/M, z3.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x10, x15, LSL #1]\n"
+ "fmla z30.h, p3/M, z3.h, z12.h\n"
+ "fmla z29.h, p3/M, z3.h, z5.h\n"
+ "fmla z28.h, p3/M, z3.h, z6.h\n"
+ "ld1h { z3.h }, p3/Z, [x8, #3, MUL VL]\n"
+ "fmla z31.h, p3/M, z4.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x10, x26, LSL #1]\n"
+ "fmla z30.h, p3/M, z4.h, z9.h\n"
+ "ld1h { z9.h }, p2/Z, [x10, x25, LSL #1]\n"
+ "fmla z29.h, p3/M, z4.h, z6.h\n"
+ "fmla z28.h, p3/M, z4.h, z10.h\n"
+ "ld1h { z4.h }, p3/Z, [x8, #4, MUL VL]\n"
+ "fmla z31.h, p3/M, z0.h, z7.h\n"
+ "ld1h { z7.h }, p1/Z, [x20]\n"
+ "fmla z30.h, p3/M, z0.h, z8.h\n"
+ "fmla z29.h, p3/M, z0.h, z14.h\n"
+ "fmla z28.h, p3/M, z0.h, z11.h\n"
+ "ld1h { z0.h }, p3/Z, [x8, #5, MUL VL]\n"
+ "fmla z31.h, p3/M, z1.h, z8.h\n"
+ "ld1h { z8.h }, p2/Z, [x10, x23, LSL #1]\n"
+ "fmla z30.h, p3/M, z1.h, z13.h\n"
+ "fmla z29.h, p3/M, z1.h, z11.h\n"
+ "fmla z28.h, p3/M, z1.h, z12.h\n"
+ "ld1h { z1.h }, p3/Z, [x8, #6, MUL VL]\n"
+ "fmla z31.h, p3/M, z2.h, z13.h\n"
+ "ld1h { z13.h }, p2/Z, [x10, x24, LSL #1]\n"
+ "addvl x10, x10, #1\n"
+ "fmla z30.h, p3/M, z2.h, z5.h\n"
+ "fmla z29.h, p3/M, z2.h, z12.h\n"
"fmla z28.h, p3/M, z2.h, z9.h\n"
- "fmla z29.h, p3/M, z2.h, z11.h\n"
- "ld1h { z9.h }, p2/Z, [x14, x24, LSL #1]\n"
- "addvl x14, x14, #1\n"
- "fmla z30.h, p3/M, z2.h, z13.h\n"
- "fmla z31.h, p3/M, z2.h, z5.h\n"
- "ld1h { z2.h }, p3/Z, [x10, #2, MUL VL]\n"
- "addvl x11, x11, #1\n"
- "fmla z28.h, p3/M, z3.h, z11.h\n"
- "fmla z29.h, p3/M, z3.h, z12.h\n"
- "ld1h { z11.h }, p2/Z, [x9, x17, LSL #1]\n"
- "inch x20\n"
- "fmla z30.h, p3/M, z3.h, z5.h\n"
- "fmla z31.h, p3/M, z3.h, z6.h\n"
- "ld1h { z3.h }, p3/Z, [x10, #3, MUL VL]\n"
- "fmla z28.h, p3/M, z4.h, z12.h\n"
- "fmla z29.h, p3/M, z4.h, z9.h\n"
- "ld1h { z12.h }, p2/Z, [x9, x15, LSL #1]\n"
- "ld1h { z9.h }, p2/Z, [x9, x28, LSL #1]\n"
- "fmla z30.h, p3/M, z4.h, z6.h\n"
- "fmla z31.h, p3/M, z4.h, z10.h\n"
- "ld1h { z4.h }, p3/Z, [x10, #4, MUL VL]\n"
- "fmla z28.h, p3/M, z0.h, z7.h\n"
- "fmla z29.h, p3/M, z0.h, z8.h\n"
- "ld1h { z7.h }, p1/Z, [x11]\n"
- "fmla z30.h, p3/M, z0.h, z14.h\n"
- "fmla z31.h, p3/M, z0.h, z11.h\n"
- "ld1h { z0.h }, p3/Z, [x10, #5, MUL VL]\n"
- "fmla z28.h, p3/M, z1.h, z8.h\n"
+ "ld1h { z2.h }, p3/Z, [x8, #7, MUL VL]\n"
+ "addvl x8, x8, #16\n"
+ "fmla z31.h, p3/M, z3.h, z5.h\n"
+ "ld1h { z5.h }, p2/Z, [x9]\n"
+ "ld1h { z16.h }, p3/Z, [x8, #4, MUL VL]\n"
+ "fmla z30.h, p3/M, z3.h, z6.h\n"
+ "fmla z29.h, p3/M, z3.h, z9.h\n"
+ "fmla z28.h, p3/M, z3.h, z13.h\n"
+ "ld1h { z3.h }, p3/Z, [x8, #-8, MUL VL]\n"
+ "fmla z31.h, p3/M, z4.h, z6.h\n"
+ "ld1h { z6.h }, p2/Z, [x9, x15, LSL #1]\n"
+ "fmla z30.h, p3/M, z4.h, z10.h\n"
+ "ld1h { z10.h }, p2/Z, [x9, x26, LSL #1]\n"
+ "fmla z29.h, p3/M, z4.h, z13.h\n"
+ "fmla z28.h, p3/M, z4.h, z8.h\n"
+ "ld1h { z4.h }, p3/Z, [x8, #-7, MUL VL]\n"
+ "fmla z31.h, p3/M, z0.h, z14.h\n"
+ "ld1h { z14.h }, p2/Z, [x9, x23, LSL #1]\n"
+ "fmla z30.h, p3/M, z0.h, z11.h\n"
+ "fmla z29.h, p3/M, z0.h, z5.h\n"
+ "fmla z28.h, p3/M, z0.h, z6.h\n"
+ "ld1h { z0.h }, p3/Z, [x8, #-6, MUL VL]\n"
+ "fmla z31.h, p3/M, z1.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x9, x25, LSL #1]\n"
+ "fmla z30.h, p3/M, z1.h, z12.h\n"
+ "fmla z29.h, p3/M, z1.h, z6.h\n"
+ "fmla z28.h, p3/M, z1.h, z10.h\n"
+ "ld1h { z1.h }, p3/Z, [x8, #-5, MUL VL]\n"
+ "fmla z31.h, p3/M, z2.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x9, x24, LSL #1]\n"
+ "addvl x9, x9, #1\n"
+ "fmla z30.h, p3/M, z2.h, z9.h\n"
+ "fmla z29.h, p3/M, z2.h, z10.h\n"
+ "fmla z28.h, p3/M, z2.h, z11.h\n"
+ "ld1h { z2.h }, p3/Z, [x8, #-4, MUL VL]\n"
+ "fmla z31.h, p3/M, z3.h, z9.h\n"
+ "ld1h { z9.h }, p2/Z, [x28]\n"
+ "fmla z30.h, p3/M, z3.h, z13.h\n"
+ "fmla z29.h, p3/M, z3.h, z11.h\n"
+ "fmla z28.h, p3/M, z3.h, z12.h\n"
+ "ld1h { z3.h }, p3/Z, [x8, #-3, MUL VL]\n"
+ "fmla z31.h, p3/M, z4.h, z13.h\n"
+ "ld1h { z13.h }, p2/Z, [x28, x15, LSL #1]\n"
+ "fmla z30.h, p3/M, z4.h, z8.h\n"
+ "ld1h { z8.h }, p2/Z, [x28, x24, LSL #1]\n"
+ "fmla z29.h, p3/M, z4.h, z12.h\n"
+ "fmla z28.h, p3/M, z4.h, z14.h\n"
+ "ld1h { z4.h }, p3/Z, [x8, #-2, MUL VL]\n"
+ "fmla z31.h, p3/M, z0.h, z5.h\n"
+ "ld1h { z5.h }, p2/Z, [x28, x26, LSL #1]\n"
+ "fmla z30.h, p3/M, z0.h, z6.h\n"
+ "fmla z29.h, p3/M, z0.h, z9.h\n"
+ "fmla z28.h, p3/M, z0.h, z13.h\n"
+ "ld1h { z0.h }, p3/Z, [x8, #-1, MUL VL]\n"
+ "fmla z31.h, p3/M, z1.h, z6.h\n"
+ "ld1h { z6.h }, p2/Z, [x28, x25, LSL #1]\n"
+ "fmla z30.h, p3/M, z1.h, z10.h\n"
"fmla z29.h, p3/M, z1.h, z13.h\n"
- "ld1h { z8.h }, p2/Z, [x9, x24, LSL #1]\n"
- "fmla z30.h, p3/M, z1.h, z11.h\n"
- "fmla z31.h, p3/M, z1.h, z12.h\n"
- "ld1h { z1.h }, p3/Z, [x10, #6, MUL VL]\n"
- "fmla z28.h, p3/M, z2.h, z13.h\n"
+ "fmla z28.h, p3/M, z1.h, z5.h\n"
+ "ld1h { z1.h }, p3/Z, [x8]\n"
+ "fmla z31.h, p3/M, z2.h, z10.h\n"
+ "ld1h { z10.h }, p2/Z, [x28, x23, LSL #1]\n"
+ "addvl x28, x28, #1\n"
+ "fmla z30.h, p3/M, z2.h, z11.h\n"
"fmla z29.h, p3/M, z2.h, z5.h\n"
- "ld1h { z13.h }, p2/Z, [x9, x26, LSL #1]\n"
- "addvl x9, x9, #1\n"
- "fmla z30.h, p3/M, z2.h, z12.h\n"
- "fmla z31.h, p3/M, z2.h, z9.h\n"
- "ld1h { z2.h }, p3/Z, [x10, #7, MUL VL]\n"
- "addvl x10, x10, #16\n"
- "fmla z28.h, p3/M, z3.h, z5.h\n"
+ "fmla z28.h, p3/M, z2.h, z6.h\n"
+ "ld1h { z2.h }, p3/Z, [x8, #1, MUL VL]\n"
+ "fmla z31.h, p3/M, z3.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x27]\n"
+ "fmla z30.h, p3/M, z3.h, z12.h\n"
"fmla z29.h, p3/M, z3.h, z6.h\n"
- "ld1h { z5.h }, p2/Z, [x27]\n"
- "ld1h { z16.h }, p3/Z, [x10, #4, MUL VL]\n"
- "fmla z30.h, p3/M, z3.h, z9.h\n"
- "fmla z31.h, p3/M, z3.h, z13.h\n"
- "ld1h { z3.h }, p3/Z, [x10, #-8, MUL VL]\n"
- "fmla z28.h, p3/M, z4.h, z6.h\n"
- "fmla z29.h, p3/M, z4.h, z10.h\n"
- "ld1h { z6.h }, p2/Z, [x27, x17, LSL #1]\n"
- "ld1h { z10.h }, p2/Z, [x27, x15, LSL #1]\n"
- "fmla z30.h, p3/M, z4.h, z13.h\n"
- "fmla z31.h, p3/M, z4.h, z8.h\n"
- "ld1h { z4.h }, p3/Z, [x10, #-7, MUL VL]\n"
- "fmla z28.h, p3/M, z0.h, z14.h\n"
+ "fmla z28.h, p3/M, z3.h, z8.h\n"
+ "ld1h { z3.h }, p3/Z, [x8, #2, MUL VL]\n"
+ "fmla z31.h, p3/M, z4.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x27, x15, LSL #1]\n"
+ "fmla z30.h, p3/M, z4.h, z14.h\n"
+ "ld1h { z14.h }, p1/Z, [x10]\n"
+ "fmla z29.h, p3/M, z4.h, z8.h\n"
+ "fmla z28.h, p3/M, z4.h, z10.h\n"
+ "ld1h { z4.h }, p3/Z, [x8, #3, MUL VL]\n"
+ "fmla z31.h, p3/M, z0.h, z9.h\n"
+ "ld1h { z9.h }, p2/Z, [x27, x26, LSL #1]\n"
+ "fmla z30.h, p3/M, z0.h, z13.h\n"
"fmla z29.h, p3/M, z0.h, z11.h\n"
- "ld1h { z14.h }, p2/Z, [x27, x24, LSL #1]\n"
- "fmla z30.h, p3/M, z0.h, z5.h\n"
- "fmla z31.h, p3/M, z0.h, z6.h\n"
- "ld1h { z0.h }, p3/Z, [x10, #-6, MUL VL]\n"
- "fmla z28.h, p3/M, z1.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x27, x25, LSL #1]\n"
+ "fmla z28.h, p3/M, z0.h, z12.h\n"
+ "ld1h { z0.h }, p3/Z, [x8, #5, MUL VL]\n"
+ "fmla z31.h, p3/M, z1.h, z13.h\n"
+ "ld1h { z13.h }, p1/Z, [x20, x26, LSL #1]\n"
+ "fmla z30.h, p3/M, z1.h, z5.h\n"
"fmla z29.h, p3/M, z1.h, z12.h\n"
- "ld1h { z11.h }, p2/Z, [x27, x28, LSL #1]\n"
- "fmla z30.h, p3/M, z1.h, z6.h\n"
- "fmla z31.h, p3/M, z1.h, z10.h\n"
- "ld1h { z1.h }, p3/Z, [x10, #-5, MUL VL]\n"
- "fmla z28.h, p3/M, z2.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x27, x24, LSL #1]\n"
+ "fmla z28.h, p3/M, z1.h, z9.h\n"
+ "ld1h { z1.h }, p3/Z, [x8, #6, MUL VL]\n"
+ "fmla z31.h, p3/M, z2.h, z5.h\n"
+ "ld1h { z5.h }, p1/Z, [x13]\n"
+ "fmla z30.h, p3/M, z2.h, z6.h\n"
"fmla z29.h, p3/M, z2.h, z9.h\n"
- "ld1h { z12.h }, p2/Z, [x27, x26, LSL #1]\n"
+ "ld1h { z9.h }, p2/Z, [x27, x23, LSL #1]\n"
+ "whilelt p2.h, x17, %x[n_channels]\n"
+ "fmla z28.h, p3/M, z2.h, z11.h\n"
+ "ld1h { z2.h }, p3/Z, [x8, #7, MUL VL]\n"
"addvl x27, x27, #1\n"
- "fmla z30.h, p3/M, z2.h, z10.h\n"
- "fmla z31.h, p3/M, z2.h, z11.h\n"
- "ld1h { z2.h }, p3/Z, [x10, #-4, MUL VL]\n"
- "fmla z28.h, p3/M, z3.h, z9.h\n"
- "fmla z29.h, p3/M, z3.h, z13.h\n"
- "ld1h { z9.h }, p2/Z, [x25]\n"
- "fmla z30.h, p3/M, z3.h, z11.h\n"
- "fmla z31.h, p3/M, z3.h, z12.h\n"
- "ld1h { z3.h }, p3/Z, [x10, #-3, MUL VL]\n"
- "fmla z28.h, p3/M, z4.h, z13.h\n"
- "fmla z29.h, p3/M, z4.h, z8.h\n"
- "ld1h { z13.h }, p2/Z, [x25, x17, LSL #1]\n"
- "ld1h { z8.h }, p2/Z, [x25, x26, LSL #1]\n"
- "fmla z30.h, p3/M, z4.h, z12.h\n"
- "fmla z31.h, p3/M, z4.h, z14.h\n"
- "ld1h { z4.h }, p3/Z, [x10, #-2, MUL VL]\n"
- "fmla z28.h, p3/M, z0.h, z5.h\n"
- "fmla z29.h, p3/M, z0.h, z6.h\n"
- "ld1h { z5.h }, p2/Z, [x25, x15, LSL #1]\n"
- "fmla z30.h, p3/M, z0.h, z9.h\n"
- "fmla z31.h, p3/M, z0.h, z13.h\n"
- "ld1h { z0.h }, p3/Z, [x10, #-1, MUL VL]\n"
- "fmla z28.h, p3/M, z1.h, z6.h\n"
- "fmla z29.h, p3/M, z1.h, z10.h\n"
- "ld1h { z6.h }, p2/Z, [x25, x28, LSL #1]\n"
- "fmla z30.h, p3/M, z1.h, z13.h\n"
- "fmla z31.h, p3/M, z1.h, z5.h\n"
- "ld1h { z1.h }, p3/Z, [x10]\n"
- "fmla z28.h, p3/M, z2.h, z10.h\n"
- "fmla z29.h, p3/M, z2.h, z11.h\n"
- "ld1h { z10.h }, p2/Z, [x25, x24, LSL #1]\n"
- "addvl x25, x25, #1\n"
- "fmla z30.h, p3/M, z2.h, z5.h\n"
- "fmla z31.h, p3/M, z2.h, z6.h\n"
- "ld1h { z2.h }, p3/Z, [x10, #1, MUL VL]\n"
- "fmla z28.h, p3/M, z3.h, z11.h\n"
- "fmla z29.h, p3/M, z3.h, z12.h\n"
- "ld1h { z11.h }, p2/Z, [x23]\n"
- "fmla z30.h, p3/M, z3.h, z6.h\n"
- "fmla z31.h, p3/M, z3.h, z8.h\n"
- "ld1h { z3.h }, p3/Z, [x10, #2, MUL VL]\n"
- "fmla z28.h, p3/M, z4.h, z12.h\n"
- "fmla z29.h, p3/M, z4.h, z14.h\n"
- "ld1h { z12.h }, p2/Z, [x23, x17, LSL #1]\n"
- "ld1h { z14.h }, p1/Z, [x9]\n"
- "fmla z30.h, p3/M, z4.h, z8.h\n"
- "fmla z31.h, p3/M, z4.h, z10.h\n"
- "ld1h { z4.h }, p3/Z, [x10, #3, MUL VL]\n"
- "fmla z28.h, p3/M, z0.h, z9.h\n"
- "fmla z29.h, p3/M, z0.h, z13.h\n"
- "ld1h { z9.h }, p2/Z, [x23, x15, LSL #1]\n"
- "fmla z30.h, p3/M, z0.h, z11.h\n"
- "fmla z31.h, p3/M, z0.h, z12.h\n"
- "ld1h { z11.h }, p2/Z, [x23, x28, LSL #1]\n"
- "ld1h { z0.h }, p3/Z, [x10, #5, MUL VL]\n"
- "fmla z28.h, p3/M, z1.h, z13.h\n"
- "fmla z29.h, p3/M, z1.h, z5.h\n"
- "ld1h { z13.h }, p1/Z, [x11, x15, LSL #1]\n"
- "fmla z30.h, p3/M, z1.h, z12.h\n"
- "fmla z31.h, p3/M, z1.h, z9.h\n"
- "ld1h { z12.h }, p2/Z, [x23, x26, LSL #1]\n"
- "ld1h { z1.h }, p3/Z, [x10, #6, MUL VL]\n"
- "fmla z28.h, p3/M, z2.h, z5.h\n"
- "fmla z29.h, p3/M, z2.h, z6.h\n"
- "ld1h { z5.h }, p1/Z, [x14]\n"
- "fmla z30.h, p3/M, z2.h, z9.h\n"
- "fmla z31.h, p3/M, z2.h, z11.h\n"
- "ld1h { z9.h }, p2/Z, [x23, x24, LSL #1]\n"
- "ld1h { z2.h }, p3/Z, [x10, #7, MUL VL]\n"
- "fmla z28.h, p3/M, z3.h, z6.h\n"
- "fmla z29.h, p3/M, z3.h, z8.h\n"
- "addvl x10, x10, #16\n"
- "whilelt p2.h, x21, %x[n_channels]\n"
- "fmla z30.h, p3/M, z3.h, z11.h\n"
- "fmla z31.h, p3/M, z3.h, z12.h\n"
- "cmp x12, %x[n_channels]\n"
- "addvl x23, x23, #1\n"
- "fmla z28.h, p3/M, z4.h, z8.h\n"
- "fmla z29.h, p3/M, z4.h, z10.h\n"
- "fmax z28.h, p3/M, z28.h, z18.h\n"
- "fmax z29.h, p3/M, z29.h, z18.h\n"
- "fmla z30.h, p3/M, z4.h, z12.h\n"
- "fmla z31.h, p3/M, z4.h, z9.h\n"
- "fmax z30.h, p3/M, z30.h, z18.h\n"
+ "fmla z31.h, p3/M, z3.h, z6.h\n"
+ "ld1h { z6.h }, p1/Z, [x13, x15, LSL #1]\n"
+ "addvl x8, x8, #16\n"
+ "fmla z30.h, p3/M, z3.h, z8.h\n"
+ "cmp x16, %x[n_channels]\n"
+ "fmla z29.h, p3/M, z3.h, z11.h\n"
+ "ld1h { z11.h }, p1/Z, [x13, x25, LSL #1]\n"
+ "fmla z28.h, p3/M, z3.h, z12.h\n"
+ "ld1h { z3.h }, p3/Z, [x8, #-8, MUL VL]\n"
+ "fmla z31.h, p3/M, z4.h, z8.h\n"
+ "ld1h { z8.h }, p1/Z, [x20, x15, LSL #1]\n"
+ "fmla z30.h, p3/M, z4.h, z10.h\n"
+ "ld1h { z10.h }, p1/Z, [x20, x23, LSL #1]\n"
+ "fmla z29.h, p3/M, z4.h, z12.h\n"
+ "ld1h { z12.h }, p1/Z, [x13, x24, LSL #1]\n"
+ "fmla z28.h, p3/M, z4.h, z9.h\n"
+ "ld1h { z9.h }, p1/Z, [x13, x26, LSL #1]\n"
+ "ld1h { z4.h }, p3/Z, [x8, #-7, MUL VL]\n"
"fmax z31.h, p3/M, z31.h, z18.h\n"
- "fmin z28.h, p3/M, z28.h, z17.h\n"
- "fmin z29.h, p3/M, z29.h, z17.h\n"
- "ld1h { z6.h }, p1/Z, [x14, x17, LSL #1]\n"
- "ld1h { z8.h }, p1/Z, [x11, x17, LSL #1]\n"
- "fmin z30.h, p3/M, z30.h, z17.h\n"
+ "addvl x8, x8, #-6\n"
+ "fmax z30.h, p3/M, z30.h, z18.h\n"
+ "fmax z29.h, p3/M, z29.h, z18.h\n"
+ "fmax z28.h, p3/M, z28.h, z18.h\n"
"fmin z31.h, p3/M, z31.h, z17.h\n"
- "ld1h { z9.h }, p1/Z, [x14, x15, LSL #1]\n"
- "ld1h { z11.h }, p1/Z, [x14, x28, LSL #1]\n"
- "ld1h { z12.h }, p1/Z, [x14, x26, LSL #1]\n"
- "ld1h { z10.h }, p1/Z, [x11, x24, LSL #1]\n"
- "st1h { z28.h }, p0, [x13]\n"
- "st1h { z29.h }, p0, [x13, x16, LSL #1]\n"
- "addvl x13, x13, #1\n"
- "ld1h { z3.h }, p3/Z, [x10, #-8, MUL VL]\n"
- "ld1h { z4.h }, p3/Z, [x10, #-7, MUL VL]\n"
- "st1h { z30.h }, p0, [x22]\n"
- "addvl x10, x10, #-6\n"
- "st1h { z31.h }, p0, [x22, x16, LSL #1]\n"
+ "st1h { z31.h }, p0, [x11]\n"
+ "fmin z30.h, p3/M, z30.h, z17.h\n"
+ "fmin z29.h, p3/M, z29.h, z17.h\n"
+ "st1h { z30.h }, p0, [x11, x12, LSL #1]\n"
+ "fmin z28.h, p3/M, z28.h, z17.h\n"
+ "addvl x11, x11, #1\n"
+ "st1h { z29.h }, p0, [x22]\n"
+ "st1h { z28.h }, p0, [x22, x12, LSL #1]\n"
"addvl x22, x22, #1\n"
"blt 2b\n"
"3:" // Tile loop: Channel tail
- "movprfx z28, z16\n fmla z28.h, p3/M, z0.h, z5.h\n"
- "movprfx z29, z16\n fmla z29.h, p3/M, z0.h, z6.h\n"
- "ld1h { z5.h }, p2/Z, [x11, x28, LSL #1]\n"
- "ldr x8, [%x[params_struct], %[offsetof_args_tile_j]]\n"
- "movprfx z30, z16\n fmla z30.h, p3/M, z0.h, z7.h\n"
- "movprfx z31, z16\n fmla z31.h, p3/M, z0.h, z8.h\n"
- "ld1h { z0.h }, p3/Z, [x10]\n"
- "ldr x12, [%x[params_struct], %[offsetof_args_tile_i]]\n"
- "fmla z28.h, p3/M, z1.h, z6.h\n"
- "fmla z29.h, p3/M, z1.h, z9.h\n"
- "ld1h { z6.h }, p2/Z, [x11, x26, LSL #1]\n"
- "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
- "fmla z30.h, p3/M, z1.h, z8.h\n"
- "fmla z31.h, p3/M, z1.h, z13.h\n"
- "ld1h { z1.h }, p3/Z, [x10, #1, MUL VL]\n"
- "add x8, x8, #0x1\n"
- "fmla z28.h, p3/M, z2.h, z9.h\n"
- "fmla z29.h, p3/M, z2.h, z11.h\n"
- "ld1h { z9.h }, p2/Z, [x14, x24, LSL #1]\n"
- "cmp x8, x20\n"
- "fmla z30.h, p3/M, z2.h, z13.h\n"
- "fmla z31.h, p3/M, z2.h, z5.h\n"
- "ld1h { z2.h }, p3/Z, [x10, #2, MUL VL]\n"
- "add x21, x12, #0x1\n"
- "fmla z28.h, p3/M, z3.h, z11.h\n"
- "fmla z29.h, p3/M, z3.h, z12.h\n"
- "ld1h { z11.h }, p2/Z, [x9, x17, LSL #1]\n"
- "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
- "fmla z30.h, p3/M, z3.h, z5.h\n"
- "fmla z31.h, p3/M, z3.h, z6.h\n"
- "ld1h { z3.h }, p3/Z, [x10, #3, MUL VL]\n"
- "csel x12, x12, x21, LT\n"
- "fmla z28.h, p3/M, z4.h, z12.h\n"
- "fmla z29.h, p3/M, z4.h, z9.h\n"
- "ld1h { z12.h }, p2/Z, [x9, x15, LSL #1]\n"
- "ld1h { z9.h }, p2/Z, [x9, x28, LSL #1]\n"
- "fmla z30.h, p3/M, z4.h, z6.h\n"
- "fmla z31.h, p3/M, z4.h, z10.h\n"
- "ld1h { z4.h }, p3/Z, [x10, #4, MUL VL]\n"
+ "movprfx z31, z16\n fmla z31.h, p3/M, z0.h, z5.h\n"
+ "ld1h { z5.h }, p2/Z, [x20, x25, LSL #1]\n"
"mov p0.b, p2.b\n"
- "fmla z28.h, p3/M, z0.h, z7.h\n"
- "fmla z29.h, p3/M, z0.h, z8.h\n"
- "csel x8, x8, XZR, LT\n"
- "cmp x12, x20\n"
- "fmla z30.h, p3/M, z0.h, z14.h\n"
- "fmla z31.h, p3/M, z0.h, z11.h\n"
- "ld1h { z0.h }, p3/Z, [x10, #5, MUL VL]\n"
- "fmla z28.h, p3/M, z1.h, z8.h\n"
- "fmla z29.h, p3/M, z1.h, z13.h\n"
- "ld1h { z8.h }, p2/Z, [x9, x24, LSL #1]\n"
- "fmla z30.h, p3/M, z1.h, z11.h\n"
- "fmla z31.h, p3/M, z1.h, z12.h\n"
- "ld1h { z1.h }, p3/Z, [x10, #6, MUL VL]\n"
- "fmla z28.h, p3/M, z2.h, z13.h\n"
- "fmla z29.h, p3/M, z2.h, z5.h\n"
- "ld1h { z13.h }, p2/Z, [x9, x26, LSL #1]\n"
- "fmla z30.h, p3/M, z2.h, z12.h\n"
+ "movprfx z30, z16\n fmla z30.h, p3/M, z0.h, z6.h\n"
+ "ldr x5, [%x[params_struct], %[offsetof_args_tile_i]]\n"
+ "add x21, x5, #0x1\n"
+ "movprfx z29, z16\n fmla z29.h, p3/M, z0.h, z7.h\n"
+ "ldr x6, [%x[params_struct], %[offsetof_args_tile_j]]\n"
+ "movprfx z28, z16\n fmla z28.h, p3/M, z0.h, z8.h\n"
+ "ld1h { z0.h }, p3/Z, [x8]\n"
+ "add x6, x6, #0x1\n"
+ "fmla z31.h, p3/M, z1.h, z6.h\n"
+ "ld1h { z6.h }, p2/Z, [x20, x24, LSL #1]\n"
+ "fmla z30.h, p3/M, z1.h, z9.h\n"
+ "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
+ "fmla z29.h, p3/M, z1.h, z8.h\n"
+ "ldr x19, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
+ "cmp x6, x19\n"
+ "fmla z28.h, p3/M, z1.h, z13.h\n"
+ "ld1h { z1.h }, p3/Z, [x8, #1, MUL VL]\n"
"fmla z31.h, p3/M, z2.h, z9.h\n"
- "ld1h { z2.h }, p3/Z, [x10, #7, MUL VL]\n"
- "addvl x10, x10, #16\n"
- "fmla z28.h, p3/M, z3.h, z5.h\n"
- "fmla z29.h, p3/M, z3.h, z6.h\n"
- "ld1h { z5.h }, p2/Z, [x27]\n"
- "fmla z30.h, p3/M, z3.h, z9.h\n"
- "fmla z31.h, p3/M, z3.h, z13.h\n"
- "ld1h { z3.h }, p3/Z, [x10, #-8, MUL VL]\n"
- "fmla z28.h, p3/M, z4.h, z6.h\n"
- "fmla z29.h, p3/M, z4.h, z10.h\n"
- "ld1h { z6.h }, p2/Z, [x27, x17, LSL #1]\n"
- "ld1h { z10.h }, p2/Z, [x27, x15, LSL #1]\n"
- "fmla z30.h, p3/M, z4.h, z13.h\n"
- "fmla z31.h, p3/M, z4.h, z8.h\n"
- "ld1h { z4.h }, p3/Z, [x10, #-7, MUL VL]\n"
- "fmla z28.h, p3/M, z0.h, z14.h\n"
- "fmla z29.h, p3/M, z0.h, z11.h\n"
- "ld1h { z14.h }, p2/Z, [x27, x24, LSL #1]\n"
- "fmla z30.h, p3/M, z0.h, z5.h\n"
- "fmla z31.h, p3/M, z0.h, z6.h\n"
- "ld1h { z0.h }, p3/Z, [x10, #-6, MUL VL]\n"
- "fmla z28.h, p3/M, z1.h, z11.h\n"
- "fmla z29.h, p3/M, z1.h, z12.h\n"
- "ld1h { z11.h }, p2/Z, [x27, x28, LSL #1]\n"
- "fmla z30.h, p3/M, z1.h, z6.h\n"
- "fmla z31.h, p3/M, z1.h, z10.h\n"
- "ld1h { z1.h }, p3/Z, [x10, #-5, MUL VL]\n"
- "fmla z28.h, p3/M, z2.h, z12.h\n"
- "fmla z29.h, p3/M, z2.h, z9.h\n"
- "ld1h { z12.h }, p2/Z, [x27, x26, LSL #1]\n"
- "fmla z30.h, p3/M, z2.h, z10.h\n"
- "fmla z31.h, p3/M, z2.h, z11.h\n"
- "ld1h { z2.h }, p3/Z, [x10, #-4, MUL VL]\n"
- "fmla z28.h, p3/M, z3.h, z9.h\n"
- "fmla z29.h, p3/M, z3.h, z13.h\n"
- "ld1h { z9.h }, p2/Z, [x25]\n"
- "fmla z30.h, p3/M, z3.h, z11.h\n"
- "fmla z31.h, p3/M, z3.h, z12.h\n"
- "ld1h { z3.h }, p3/Z, [x10, #-3, MUL VL]\n"
- "fmla z28.h, p3/M, z4.h, z13.h\n"
- "fmla z29.h, p3/M, z4.h, z8.h\n"
- "ld1h { z13.h }, p2/Z, [x25, x17, LSL #1]\n"
- "ld1h { z8.h }, p2/Z, [x25, x26, LSL #1]\n"
- "fmla z30.h, p3/M, z4.h, z12.h\n"
- "fmla z31.h, p3/M, z4.h, z14.h\n"
- "ld1h { z4.h }, p3/Z, [x10, #-2, MUL VL]\n"
- "fmla z28.h, p3/M, z0.h, z5.h\n"
- "fmla z29.h, p3/M, z0.h, z6.h\n"
- "ld1h { z5.h }, p2/Z, [x25, x15, LSL #1]\n"
- "fmla z30.h, p3/M, z0.h, z9.h\n"
- "fmla z31.h, p3/M, z0.h, z13.h\n"
- "ld1h { z0.h }, p3/Z, [x10, #-1, MUL VL]\n"
- "fmla z28.h, p3/M, z1.h, z6.h\n"
- "fmla z29.h, p3/M, z1.h, z10.h\n"
- "ld1h { z6.h }, p2/Z, [x25, x28, LSL #1]\n"
+ "ld1h { z9.h }, p2/Z, [x13, x23, LSL #1]\n"
+ "csel x6, x6, XZR, LT\n"
+ "fmla z30.h, p3/M, z2.h, z11.h\n"
+ "csel x5, x5, x21, LT\n"
+ "fmla z29.h, p3/M, z2.h, z13.h\n"
+ "cmp x5, x20\n"
+ "fmla z28.h, p3/M, z2.h, z5.h\n"
+ "ld1h { z2.h }, p3/Z, [x8, #2, MUL VL]\n"
+ "fmla z31.h, p3/M, z3.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x10, x15, LSL #1]\n"
+ "fmla z30.h, p3/M, z3.h, z12.h\n"
+ "fmla z29.h, p3/M, z3.h, z5.h\n"
+ "fmla z28.h, p3/M, z3.h, z6.h\n"
+ "ld1h { z3.h }, p3/Z, [x8, #3, MUL VL]\n"
+ "fmla z31.h, p3/M, z4.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x10, x26, LSL #1]\n"
+ "fmla z30.h, p3/M, z4.h, z9.h\n"
+ "ld1h { z9.h }, p2/Z, [x10, x25, LSL #1]\n"
+ "fmla z29.h, p3/M, z4.h, z6.h\n"
+ "fmla z28.h, p3/M, z4.h, z10.h\n"
+ "ld1h { z4.h }, p3/Z, [x8, #4, MUL VL]\n"
+ "fmla z31.h, p3/M, z0.h, z7.h\n"
+ "fmla z30.h, p3/M, z0.h, z8.h\n"
+ "fmla z29.h, p3/M, z0.h, z14.h\n"
+ "fmla z28.h, p3/M, z0.h, z11.h\n"
+ "ld1h { z0.h }, p3/Z, [x8, #5, MUL VL]\n"
+ "fmla z31.h, p3/M, z1.h, z8.h\n"
+ "ld1h { z8.h }, p2/Z, [x10, x23, LSL #1]\n"
"fmla z30.h, p3/M, z1.h, z13.h\n"
- "fmla z31.h, p3/M, z1.h, z5.h\n"
- "ld1h { z1.h }, p3/Z, [x10]\n"
- "fmla z28.h, p3/M, z2.h, z10.h\n"
- "fmla z29.h, p3/M, z2.h, z11.h\n"
- "ld1h { z10.h }, p2/Z, [x25, x24, LSL #1]\n"
+ "fmla z29.h, p3/M, z1.h, z11.h\n"
+ "fmla z28.h, p3/M, z1.h, z12.h\n"
+ "ld1h { z1.h }, p3/Z, [x8, #6, MUL VL]\n"
+ "fmla z31.h, p3/M, z2.h, z13.h\n"
+ "ld1h { z13.h }, p2/Z, [x10, x24, LSL #1]\n"
"fmla z30.h, p3/M, z2.h, z5.h\n"
- "fmla z31.h, p3/M, z2.h, z6.h\n"
- "ld1h { z2.h }, p3/Z, [x10, #1, MUL VL]\n"
- "fmla z28.h, p3/M, z3.h, z11.h\n"
- "fmla z29.h, p3/M, z3.h, z12.h\n"
- "ld1h { z11.h }, p2/Z, [x23]\n"
+ "fmla z29.h, p3/M, z2.h, z12.h\n"
+ "fmla z28.h, p3/M, z2.h, z9.h\n"
+ "ld1h { z2.h }, p3/Z, [x8, #7, MUL VL]\n"
+ "addvl x8, x8, #16\n"
+ "fmla z31.h, p3/M, z3.h, z5.h\n"
+ "ld1h { z5.h }, p2/Z, [x9]\n"
"fmla z30.h, p3/M, z3.h, z6.h\n"
- "fmla z31.h, p3/M, z3.h, z8.h\n"
- "ld1h { z3.h }, p3/Z, [x10, #2, MUL VL]\n"
- "fmla z28.h, p3/M, z4.h, z12.h\n"
- "fmla z29.h, p3/M, z4.h, z14.h\n"
- "ld1h { z12.h }, p2/Z, [x23, x17, LSL #1]\n"
- "fmla z30.h, p3/M, z4.h, z8.h\n"
- "fmla z31.h, p3/M, z4.h, z10.h\n"
- "ld1h { z4.h }, p3/Z, [x10, #3, MUL VL]\n"
- "fmla z28.h, p3/M, z0.h, z9.h\n"
- "fmla z29.h, p3/M, z0.h, z13.h\n"
- "ld1h { z9.h }, p2/Z, [x23, x15, LSL #1]\n"
+ "fmla z29.h, p3/M, z3.h, z9.h\n"
+ "fmla z28.h, p3/M, z3.h, z13.h\n"
+ "ld1h { z3.h }, p3/Z, [x8, #-8, MUL VL]\n"
+ "fmla z31.h, p3/M, z4.h, z6.h\n"
+ "ld1h { z6.h }, p2/Z, [x9, x15, LSL #1]\n"
+ "fmla z30.h, p3/M, z4.h, z10.h\n"
+ "ld1h { z10.h }, p2/Z, [x9, x26, LSL #1]\n"
+ "fmla z29.h, p3/M, z4.h, z13.h\n"
+ "fmla z28.h, p3/M, z4.h, z8.h\n"
+ "ld1h { z4.h }, p3/Z, [x8, #-7, MUL VL]\n"
+ "fmla z31.h, p3/M, z0.h, z14.h\n"
+ "ld1h { z14.h }, p2/Z, [x9, x23, LSL #1]\n"
"fmla z30.h, p3/M, z0.h, z11.h\n"
- "fmla z31.h, p3/M, z0.h, z12.h\n"
- "ld1h { z11.h }, p2/Z, [x23, x28, LSL #1]\n"
- "fmla z28.h, p3/M, z1.h, z13.h\n"
- "fmla z29.h, p3/M, z1.h, z5.h\n"
+ "fmla z29.h, p3/M, z0.h, z5.h\n"
+ "fmla z28.h, p3/M, z0.h, z6.h\n"
+ "ld1h { z0.h }, p3/Z, [x8, #-6, MUL VL]\n"
+ "fmla z31.h, p3/M, z1.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x9, x25, LSL #1]\n"
"fmla z30.h, p3/M, z1.h, z12.h\n"
- "fmla z31.h, p3/M, z1.h, z9.h\n"
- "ld1h { z12.h }, p2/Z, [x23, x26, LSL #1]\n"
- "fmla z28.h, p3/M, z2.h, z5.h\n"
- "fmla z29.h, p3/M, z2.h, z6.h\n"
+ "fmla z29.h, p3/M, z1.h, z6.h\n"
+ "fmla z28.h, p3/M, z1.h, z10.h\n"
+ "ld1h { z1.h }, p3/Z, [x8, #-5, MUL VL]\n"
+ "fmla z31.h, p3/M, z2.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x9, x24, LSL #1]\n"
"fmla z30.h, p3/M, z2.h, z9.h\n"
- "fmla z31.h, p3/M, z2.h, z11.h\n"
- "ld1h { z9.h }, p2/Z, [x23, x24, LSL #1]\n"
- "fmla z28.h, p3/M, z3.h, z6.h\n"
- "fmla z29.h, p3/M, z3.h, z8.h\n"
- "fmla z30.h, p3/M, z3.h, z11.h\n"
- "fmla z31.h, p3/M, z3.h, z12.h\n"
- "fmla z28.h, p3/M, z4.h, z8.h\n"
- "fmla z29.h, p3/M, z4.h, z10.h\n"
- "fmax z28.h, p3/M, z28.h, z18.h\n"
- "fmax z29.h, p3/M, z29.h, z18.h\n"
- "fmla z30.h, p3/M, z4.h, z12.h\n"
- "fmla z31.h, p3/M, z4.h, z9.h\n"
- "fmax z30.h, p3/M, z30.h, z18.h\n"
+ "fmla z29.h, p3/M, z2.h, z10.h\n"
+ "fmla z28.h, p3/M, z2.h, z11.h\n"
+ "ld1h { z2.h }, p3/Z, [x8, #-4, MUL VL]\n"
+ "fmla z31.h, p3/M, z3.h, z9.h\n"
+ "ld1h { z9.h }, p2/Z, [x28]\n"
+ "fmla z30.h, p3/M, z3.h, z13.h\n"
+ "fmla z29.h, p3/M, z3.h, z11.h\n"
+ "fmla z28.h, p3/M, z3.h, z12.h\n"
+ "ld1h { z3.h }, p3/Z, [x8, #-3, MUL VL]\n"
+ "fmla z31.h, p3/M, z4.h, z13.h\n"
+ "ld1h { z13.h }, p2/Z, [x28, x15, LSL #1]\n"
+ "fmla z30.h, p3/M, z4.h, z8.h\n"
+ "ld1h { z8.h }, p2/Z, [x28, x24, LSL #1]\n"
+ "fmla z29.h, p3/M, z4.h, z12.h\n"
+ "fmla z28.h, p3/M, z4.h, z14.h\n"
+ "ld1h { z4.h }, p3/Z, [x8, #-2, MUL VL]\n"
+ "fmla z31.h, p3/M, z0.h, z5.h\n"
+ "ld1h { z5.h }, p2/Z, [x28, x26, LSL #1]\n"
+ "fmla z30.h, p3/M, z0.h, z6.h\n"
+ "fmla z29.h, p3/M, z0.h, z9.h\n"
+ "fmla z28.h, p3/M, z0.h, z13.h\n"
+ "ld1h { z0.h }, p3/Z, [x8, #-1, MUL VL]\n"
+ "fmla z31.h, p3/M, z1.h, z6.h\n"
+ "ld1h { z6.h }, p2/Z, [x28, x25, LSL #1]\n"
+ "fmla z30.h, p3/M, z1.h, z10.h\n"
+ "fmla z29.h, p3/M, z1.h, z13.h\n"
+ "fmla z28.h, p3/M, z1.h, z5.h\n"
+ "ld1h { z1.h }, p3/Z, [x8]\n"
+ "fmla z31.h, p3/M, z2.h, z10.h\n"
+ "ld1h { z10.h }, p2/Z, [x28, x23, LSL #1]\n"
+ "fmla z30.h, p3/M, z2.h, z11.h\n"
+ "fmla z29.h, p3/M, z2.h, z5.h\n"
+ "fmla z28.h, p3/M, z2.h, z6.h\n"
+ "ld1h { z2.h }, p3/Z, [x8, #1, MUL VL]\n"
+ "fmla z31.h, p3/M, z3.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x27]\n"
+ "fmla z30.h, p3/M, z3.h, z12.h\n"
+ "fmla z29.h, p3/M, z3.h, z6.h\n"
+ "fmla z28.h, p3/M, z3.h, z8.h\n"
+ "ld1h { z3.h }, p3/Z, [x8, #2, MUL VL]\n"
+ "fmla z31.h, p3/M, z4.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x27, x15, LSL #1]\n"
+ "fmla z30.h, p3/M, z4.h, z14.h\n"
+ "fmla z29.h, p3/M, z4.h, z8.h\n"
+ "fmla z28.h, p3/M, z4.h, z10.h\n"
+ "ld1h { z4.h }, p3/Z, [x8, #3, MUL VL]\n"
+ "fmla z31.h, p3/M, z0.h, z9.h\n"
+ "ld1h { z9.h }, p2/Z, [x27, x26, LSL #1]\n"
+ "fmla z30.h, p3/M, z0.h, z13.h\n"
+ "fmla z29.h, p3/M, z0.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x27, x25, LSL #1]\n"
+ "fmla z28.h, p3/M, z0.h, z12.h\n"
+ "fmla z31.h, p3/M, z1.h, z13.h\n"
+ "fmla z30.h, p3/M, z1.h, z5.h\n"
+ "fmla z29.h, p3/M, z1.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x27, x24, LSL #1]\n"
+ "fmla z28.h, p3/M, z1.h, z9.h\n"
+ "fmla z31.h, p3/M, z2.h, z5.h\n"
+ "fmla z30.h, p3/M, z2.h, z6.h\n"
+ "fmla z29.h, p3/M, z2.h, z9.h\n"
+ "ld1h { z9.h }, p2/Z, [x27, x23, LSL #1]\n"
+ "fmla z28.h, p3/M, z2.h, z11.h\n"
+ "fmla z31.h, p3/M, z3.h, z6.h\n"
+ "fmla z30.h, p3/M, z3.h, z8.h\n"
+ "fmla z29.h, p3/M, z3.h, z11.h\n"
+ "fmla z28.h, p3/M, z3.h, z12.h\n"
+ "fmla z31.h, p3/M, z4.h, z8.h\n"
+ "fmla z30.h, p3/M, z4.h, z10.h\n"
+ "fmla z29.h, p3/M, z4.h, z12.h\n"
+ "fmla z28.h, p3/M, z4.h, z9.h\n"
"fmax z31.h, p3/M, z31.h, z18.h\n"
- "fmin z28.h, p3/M, z28.h, z17.h\n"
- "fmin z29.h, p3/M, z29.h, z17.h\n"
- "st1h { z28.h }, p0, [x13]\n"
- "fmin z30.h, p3/M, z30.h, z17.h\n"
+ "fmax z30.h, p3/M, z30.h, z18.h\n"
+ "fmax z29.h, p3/M, z29.h, z18.h\n"
+ "fmax z28.h, p3/M, z28.h, z18.h\n"
"fmin z31.h, p3/M, z31.h, z17.h\n"
- "st1h { z29.h }, p0, [x13, x16, LSL #1]\n"
- "st1h { z30.h }, p0, [x22]\n"
- "st1h { z31.h }, p0, [x22, x16, LSL #1]\n"
+ "st1h { z31.h }, p0, [x11]\n"
+ "fmin z30.h, p3/M, z30.h, z17.h\n"
+ "fmin z29.h, p3/M, z29.h, z17.h\n"
+ "st1h { z30.h }, p0, [x11, x12, LSL #1]\n"
+ "fmin z28.h, p3/M, z28.h, z17.h\n"
+ "st1h { z29.h }, p0, [x22]\n"
+ "st1h { z28.h }, p0, [x22, x12, LSL #1]\n"
"blt 1b\n"
:
: [n_channels] "r" ((unsigned long) n_channels), [offsetof_args_inptr] "I" (offsetof(Args, inptr)), [offsetof_args_ld_input_col] "I" (offsetof(Args, ld_input_col)), [offsetof_args_ld_input_row] "I" (offsetof(Args, ld_input_row)), [offsetof_args_ld_output_col] "I" (offsetof(Args, ld_output_col)), [offsetof_args_ld_output_row] "I" (offsetof(Args, ld_output_row)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_n_tile_cols] "I" (offsetof(Args, n_tile_cols)), [offsetof_args_n_tile_rows] "I" (offsetof(Args, n_tile_rows)), [offsetof_args_outptr] "I" (offsetof(Args, outptr)), [offsetof_args_params] "I" (offsetof(Args, params)), [offsetof_args_tile_i] "I" (offsetof(Args, tile_i)), [offsetof_args_tile_j] "I" (offsetof(Args, tile_j)), [params_struct] "r" (&params_struct)
- : "cc", "memory", "p0", "p1", "p2", "p3", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z16", "z17", "z18", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z16", "z17", "z18", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst/generic_indirect.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst/generic_indirect.cpp
index 2e20b524d8..a0640daeca 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst/generic_indirect.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst/generic_indirect.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -98,450 +98,450 @@ void sve_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst_indirect_impl(
activation_min, activation_max);
__asm__ __volatile__(
- "ldr x20, [%x[params_struct], %[offsetof_args_outptrs]]\n"
- "add x16, %x[params_struct], %[offsetof_Args_inptrs]\n"
- "ldp x15, x14, [x20, #0x0]\n"
+ "ldr x19, [%x[params_struct], %[offsetof_args_outptrs]]\n"
+ "ptrue p3.b\n"
+ "ldr x15, [%x[params_struct], %[offsetof_args_params]]\n"
+ "add x14, %x[params_struct], %[offsetof_Args_inptrs]\n"
+ "ld1rh { z18.h }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
"mov x13, #0x0\n"
- "ldp x12, x11, [x20, #0x10]\n"
- "whilelt p3.h, XZR, %x[n_channels]\n"
- "ldp x10, x9, [x16, #0x0]\n"
- "cnth x28\n"
- "ptrue p2.b\n"
- "ldr x27, [%x[params_struct], %[offsetof_args_params]]\n"
- "ld1h { z5.h }, p3/Z, [x10, x13, LSL #1]\n"
- "cmp x28, %x[n_channels]\n"
- "ld1h { z6.h }, p3/Z, [x9, x13, LSL #1]\n"
- "ldp x26, x25, [x16, #0x10]\n"
- "sub x24, XZR, x28\n"
- "ldp x23, x22, [x16, #0x20]\n"
- "ldp x21, x20, [x16, #0x30]\n"
- "ldp x10, x9, [x16, #0x40]\n"
- "ld1rh { z18.h }, p2/Z, [%x[params_struct], %[offsetof_args_min]]\n"
- "ld1rh { z17.h }, p2/Z, [%x[params_struct], %[offsetof_args_max]]\n"
- "ld1h { z16.h }, p2/Z, [x27]\n"
- "ld1h { z0.h }, p2/Z, [x27, #1, MUL VL]\n"
- "ld1h { z1.h }, p2/Z, [x27, #2, MUL VL]\n"
- "ld1h { z2.h }, p2/Z, [x27, #3, MUL VL]\n"
- "ld1h { z3.h }, p2/Z, [x27, #4, MUL VL]\n"
- "ld1h { z4.h }, p2/Z, [x27, #5, MUL VL]\n"
- "ld1h { z7.h }, p3/Z, [x26, x13, LSL #1]\n"
- "addvl x27, x27, #6\n"
- "ld1h { z8.h }, p3/Z, [x25, x13, LSL #1]\n"
- "ld1h { z9.h }, p3/Z, [x23, x13, LSL #1]\n"
- "ld1h { z13.h }, p3/Z, [x22, x13, LSL #1]\n"
- "ld1h { z11.h }, p3/Z, [x21, x13, LSL #1]\n"
- "ld1h { z12.h }, p3/Z, [x20, x13, LSL #1]\n"
- "ld1h { z10.h }, p3/Z, [x10, x13, LSL #1]\n"
- "ld1h { z14.h }, p3/Z, [x9, x13, LSL #1]\n"
+ "ld1rh { z17.h }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
+ "cnth x12\n"
+ "ldp x11, x10, [x19, #0x0]\n"
+ "sub x9, XZR, x12\n"
+ "ldp x28, x27, [x19, #0x10]\n"
+ "whilelt p2.h, XZR, %x[n_channels]\n"
+ "ld1h { z16.h }, p3/Z, [x15]\n"
+ "cmp x12, %x[n_channels]\n"
+ "ld1h { z0.h }, p3/Z, [x15, #1, MUL VL]\n"
+ "ld1h { z1.h }, p3/Z, [x15, #2, MUL VL]\n"
+ "ld1h { z2.h }, p3/Z, [x15, #3, MUL VL]\n"
+ "ld1h { z3.h }, p3/Z, [x15, #4, MUL VL]\n"
+ "ld1h { z4.h }, p3/Z, [x15, #5, MUL VL]\n"
+ "addvl x15, x15, #6\n"
+ "ldp x26, x25, [x14, #0x0]\n"
+ "ldp x24, x23, [x14, #0x10]\n"
+ "ldp x22, x21, [x14, #0x20]\n"
+ "ld1h { z5.h }, p2/Z, [x26, x13, LSL #1]\n"
+ "ld1h { z6.h }, p2/Z, [x25, x13, LSL #1]\n"
+ "ld1h { z7.h }, p2/Z, [x24, x13, LSL #1]\n"
+ "ld1h { z8.h }, p2/Z, [x23, x13, LSL #1]\n"
+ "ld1h { z9.h }, p2/Z, [x22, x13, LSL #1]\n"
+ "ld1h { z13.h }, p2/Z, [x21, x13, LSL #1]\n"
+ "ldp x20, x19, [x14, #0x30]\n"
+ "ldp x26, x25, [x14, #0x40]\n"
+ "ld1h { z11.h }, p2/Z, [x20, x13, LSL #1]\n"
+ "ld1h { z12.h }, p2/Z, [x19, x13, LSL #1]\n"
+ "ld1h { z10.h }, p2/Z, [x26, x13, LSL #1]\n"
+ "ld1h { z14.h }, p2/Z, [x25, x13, LSL #1]\n"
"bge 2f\n"
"1:" // Channel loop
- "movprfx z28, z16\n fmla z28.h, p2/M, z0.h, z5.h\n"
- "movprfx z29, z16\n fmla z29.h, p2/M, z0.h, z6.h\n"
- "ldr x26, [x16, #0x50]\n"
- "ld1h { z5.h }, p3/Z, [x26, x13, LSL #1]\n"
- "movprfx z30, z16\n fmla z30.h, p2/M, z0.h, z7.h\n"
- "movprfx z31, z16\n fmla z31.h, p2/M, z0.h, z8.h\n"
- "ldr x25, [x16, #0x58]\n"
- "ldr x23, [x16, #0x60]\n"
- "fmla z28.h, p2/M, z1.h, z6.h\n"
- "fmla z29.h, p2/M, z1.h, z9.h\n"
- "ld1h { z6.h }, p3/Z, [x25, x13, LSL #1]\n"
- "ldr x22, [x16, #0x68]\n"
- "fmla z30.h, p2/M, z1.h, z8.h\n"
- "fmla z31.h, p2/M, z1.h, z13.h\n"
- "ld1h { z0.h }, p2/Z, [x27]\n"
- "ldr x21, [x16, #0x70]\n"
- "fmla z28.h, p2/M, z2.h, z9.h\n"
- "fmla z29.h, p2/M, z2.h, z11.h\n"
- "ld1h { z9.h }, p3/Z, [x23, x13, LSL #1]\n"
- "ld1h { z1.h }, p2/Z, [x27, #1, MUL VL]\n"
- "fmla z30.h, p2/M, z2.h, z13.h\n"
- "fmla z31.h, p2/M, z2.h, z5.h\n"
- "ldr x20, [x16, #0x78]\n"
- "ld1h { z2.h }, p2/Z, [x27, #2, MUL VL]\n"
- "fmla z28.h, p2/M, z3.h, z11.h\n"
- "fmla z29.h, p2/M, z3.h, z12.h\n"
- "ld1h { z11.h }, p3/Z, [x22, x13, LSL #1]\n"
- "ldr x10, [x16, #0x80]\n"
- "fmla z30.h, p2/M, z3.h, z5.h\n"
- "fmla z31.h, p2/M, z3.h, z6.h\n"
- "ld1h { z3.h }, p2/Z, [x27, #3, MUL VL]\n"
- "ldr x9, [x16, #0x88]\n"
- "fmla z28.h, p2/M, z4.h, z12.h\n"
- "fmla z29.h, p2/M, z4.h, z9.h\n"
- "ld1h { z12.h }, p3/Z, [x21, x13, LSL #1]\n"
- "ld1h { z9.h }, p3/Z, [x20, x13, LSL #1]\n"
- "fmla z30.h, p2/M, z4.h, z6.h\n"
- "fmla z31.h, p2/M, z4.h, z10.h\n"
- "ld1h { z4.h }, p2/Z, [x27, #4, MUL VL]\n"
- "ldr x26, [x16, #0x90]\n"
- "fmla z28.h, p2/M, z0.h, z7.h\n"
- "fmla z29.h, p2/M, z0.h, z8.h\n"
- "ldr x25, [x16, #0x98]\n"
- "ldr x23, [x16, #0xa0]\n"
- "fmla z30.h, p2/M, z0.h, z14.h\n"
- "fmla z31.h, p2/M, z0.h, z11.h\n"
- "ld1h { z0.h }, p2/Z, [x27, #5, MUL VL]\n"
- "ldr x22, [x16, #0xa8]\n"
- "fmla z28.h, p2/M, z1.h, z8.h\n"
- "fmla z29.h, p2/M, z1.h, z13.h\n"
- "ld1h { z8.h }, p3/Z, [x9, x13, LSL #1]\n"
- "ldr x21, [x16, #0xb0]\n"
- "fmla z30.h, p2/M, z1.h, z11.h\n"
- "fmla z31.h, p2/M, z1.h, z12.h\n"
- "ld1h { z1.h }, p2/Z, [x27, #6, MUL VL]\n"
- "ldr x20, [x16, #0xb8]\n"
- "fmla z28.h, p2/M, z2.h, z13.h\n"
- "fmla z29.h, p2/M, z2.h, z5.h\n"
- "ld1h { z13.h }, p3/Z, [x10, x13, LSL #1]\n"
- "ldr x10, [x16, #0xc0]\n"
- "fmla z30.h, p2/M, z2.h, z12.h\n"
- "fmla z31.h, p2/M, z2.h, z9.h\n"
- "ld1h { z2.h }, p2/Z, [x27, #7, MUL VL]\n"
- "addvl x27, x27, #16\n"
- "fmla z28.h, p2/M, z3.h, z5.h\n"
- "fmla z29.h, p2/M, z3.h, z6.h\n"
- "ld1h { z5.h }, p3/Z, [x26, x13, LSL #1]\n"
- "ldr x9, [x16, #0xc8]\n"
- "fmla z30.h, p2/M, z3.h, z9.h\n"
- "fmla z31.h, p2/M, z3.h, z13.h\n"
- "ld1h { z3.h }, p2/Z, [x27, #-8, MUL VL]\n"
- "ldr x26, [x16, #0xd0]\n"
- "fmla z28.h, p2/M, z4.h, z6.h\n"
- "fmla z29.h, p2/M, z4.h, z10.h\n"
- "ld1h { z6.h }, p3/Z, [x25, x13, LSL #1]\n"
- "ld1h { z10.h }, p3/Z, [x23, x13, LSL #1]\n"
- "fmla z30.h, p2/M, z4.h, z13.h\n"
- "fmla z31.h, p2/M, z4.h, z8.h\n"
- "ld1h { z4.h }, p2/Z, [x27, #-7, MUL VL]\n"
- "ldr x25, [x16, #0xd8]\n"
- "fmla z28.h, p2/M, z0.h, z14.h\n"
- "fmla z29.h, p2/M, z0.h, z11.h\n"
- "ld1h { z14.h }, p3/Z, [x20, x13, LSL #1]\n"
- "ldr x23, [x16, #0xe0]\n"
- "fmla z30.h, p2/M, z0.h, z5.h\n"
- "fmla z31.h, p2/M, z0.h, z6.h\n"
- "ld1h { z0.h }, p2/Z, [x27, #-6, MUL VL]\n"
- "ldr x20, [x16, #0xf8]\n"
- "fmla z28.h, p2/M, z1.h, z11.h\n"
- "fmla z29.h, p2/M, z1.h, z12.h\n"
- "ld1h { z11.h }, p3/Z, [x22, x13, LSL #1]\n"
- "ldr x22, [x16, #0xe8]\n"
- "fmla z30.h, p2/M, z1.h, z6.h\n"
- "fmla z31.h, p2/M, z1.h, z10.h\n"
- "ld1h { z1.h }, p2/Z, [x27, #-5, MUL VL]\n"
- "whilelt p1.h, x28, %x[n_channels]\n"
- "fmla z28.h, p2/M, z2.h, z12.h\n"
- "fmla z29.h, p2/M, z2.h, z9.h\n"
- "ld1h { z12.h }, p3/Z, [x21, x13, LSL #1]\n"
- "ldr x21, [x16, #0xf0]\n"
- "fmla z30.h, p2/M, z2.h, z10.h\n"
- "fmla z31.h, p2/M, z2.h, z11.h\n"
- "ld1h { z2.h }, p2/Z, [x27, #-4, MUL VL]\n"
- "inch x24\n"
- "fmla z28.h, p2/M, z3.h, z9.h\n"
- "fmla z29.h, p2/M, z3.h, z13.h\n"
- "ld1h { z9.h }, p3/Z, [x10, x13, LSL #1]\n"
- "ldr x10, [x16, #0x100]\n"
- "fmla z30.h, p2/M, z3.h, z11.h\n"
- "fmla z31.h, p2/M, z3.h, z12.h\n"
- "ld1h { z3.h }, p2/Z, [x27, #-3, MUL VL]\n"
- "mov p0.b, p3.b\n"
- "fmla z28.h, p2/M, z4.h, z13.h\n"
- "fmla z29.h, p2/M, z4.h, z8.h\n"
- "ld1h { z13.h }, p3/Z, [x9, x13, LSL #1]\n"
- "ld1h { z8.h }, p3/Z, [x23, x13, LSL #1]\n"
- "fmla z30.h, p2/M, z4.h, z12.h\n"
- "fmla z31.h, p2/M, z4.h, z14.h\n"
- "ld1h { z4.h }, p2/Z, [x27, #-2, MUL VL]\n"
- "ldr x9, [x16, #0x108]\n"
- "fmla z28.h, p2/M, z0.h, z5.h\n"
- "fmla z29.h, p2/M, z0.h, z6.h\n"
- "ld1h { z5.h }, p3/Z, [x26, x13, LSL #1]\n"
- "ldr x26, [x16, #0x110]\n"
- "fmla z30.h, p2/M, z0.h, z9.h\n"
- "fmla z31.h, p2/M, z0.h, z13.h\n"
- "ld1h { z0.h }, p2/Z, [x27, #-1, MUL VL]\n"
- "ld1h { z16.h }, p2/Z, [x27, #4, MUL VL]\n"
- "fmla z28.h, p2/M, z1.h, z6.h\n"
- "fmla z29.h, p2/M, z1.h, z10.h\n"
- "ld1h { z6.h }, p3/Z, [x25, x13, LSL #1]\n"
- "ldr x25, [x16, #0x118]\n"
- "fmla z30.h, p2/M, z1.h, z13.h\n"
- "fmla z31.h, p2/M, z1.h, z5.h\n"
- "ld1h { z1.h }, p2/Z, [x27]\n"
- "fmla z28.h, p2/M, z2.h, z10.h\n"
- "fmla z29.h, p2/M, z2.h, z11.h\n"
- "ld1h { z10.h }, p3/Z, [x22, x13, LSL #1]\n"
- "fmla z30.h, p2/M, z2.h, z5.h\n"
- "fmla z31.h, p2/M, z2.h, z6.h\n"
- "ld1h { z2.h }, p2/Z, [x27, #1, MUL VL]\n"
- "fmla z28.h, p2/M, z3.h, z11.h\n"
- "fmla z29.h, p2/M, z3.h, z12.h\n"
- "ld1h { z11.h }, p3/Z, [x21, x13, LSL #1]\n"
- "fmla z30.h, p2/M, z3.h, z6.h\n"
- "fmla z31.h, p2/M, z3.h, z8.h\n"
- "ld1h { z3.h }, p2/Z, [x27, #2, MUL VL]\n"
- "fmla z28.h, p2/M, z4.h, z12.h\n"
- "fmla z29.h, p2/M, z4.h, z14.h\n"
- "ld1h { z12.h }, p3/Z, [x20, x13, LSL #1]\n"
- "fmla z30.h, p2/M, z4.h, z8.h\n"
- "fmla z31.h, p2/M, z4.h, z10.h\n"
- "ld1h { z4.h }, p2/Z, [x27, #3, MUL VL]\n"
- "fmla z28.h, p2/M, z0.h, z9.h\n"
- "fmla z29.h, p2/M, z0.h, z13.h\n"
- "ld1h { z9.h }, p3/Z, [x10, x13, LSL #1]\n"
- "fmla z30.h, p2/M, z0.h, z11.h\n"
- "fmla z31.h, p2/M, z0.h, z12.h\n"
- "ld1h { z11.h }, p3/Z, [x9, x13, LSL #1]\n"
- "ldp x10, x9, [x16, #0x0]\n"
- "fmla z28.h, p2/M, z1.h, z13.h\n"
- "fmla z29.h, p2/M, z1.h, z5.h\n"
- "ld1h { z0.h }, p2/Z, [x27, #5, MUL VL]\n"
- "fmla z30.h, p2/M, z1.h, z12.h\n"
- "fmla z31.h, p2/M, z1.h, z9.h\n"
- "ld1h { z12.h }, p3/Z, [x26, x13, LSL #1]\n"
- "ld1h { z1.h }, p2/Z, [x27, #6, MUL VL]\n"
- "fmla z28.h, p2/M, z2.h, z5.h\n"
- "fmla z29.h, p2/M, z2.h, z6.h\n"
- "ld1h { z5.h }, p1/Z, [x10, x28, LSL #1]\n"
- "fmla z30.h, p2/M, z2.h, z9.h\n"
- "fmla z31.h, p2/M, z2.h, z11.h\n"
- "ld1h { z9.h }, p3/Z, [x25, x13, LSL #1]\n"
- "ldp x26, x25, [x16, #0x10]\n"
- "fmla z28.h, p2/M, z3.h, z6.h\n"
- "fmla z29.h, p2/M, z3.h, z8.h\n"
- "ld1h { z6.h }, p1/Z, [x9, x28, LSL #1]\n"
- "ldp x23, x22, [x16, #0x20]\n"
- "fmla z30.h, p2/M, z3.h, z11.h\n"
- "fmla z31.h, p2/M, z3.h, z12.h\n"
- "ldp x21, x20, [x16, #0x30]\n"
- "ldp x10, x9, [x16, #0x40]\n"
- "fmla z28.h, p2/M, z4.h, z8.h\n"
- "fmla z29.h, p2/M, z4.h, z10.h\n"
+ "movprfx z31, z16\n fmla z31.h, p3/M, z0.h, z5.h\n"
+ "ldr x24, [x14, #0x50]\n"
+ "whilelt p1.h, x12, %x[n_channels]\n"
+ "movprfx z30, z16\n fmla z30.h, p3/M, z0.h, z6.h\n"
+ "ldr x23, [x14, #0x58]\n"
+ "inch x9\n"
+ "movprfx z29, z16\n fmla z29.h, p3/M, z0.h, z7.h\n"
+ "ldr x22, [x14, #0x60]\n"
+ "mov p0.b, p2.b\n"
+ "movprfx z28, z16\n fmla z28.h, p3/M, z0.h, z8.h\n"
+ "ld1h { z5.h }, p2/Z, [x24, x13, LSL #1]\n"
+ "ld1h { z0.h }, p3/Z, [x15]\n"
+ "fmla z31.h, p3/M, z1.h, z6.h\n"
+ "ld1h { z6.h }, p2/Z, [x23, x13, LSL #1]\n"
+ "fmla z30.h, p3/M, z1.h, z9.h\n"
+ "ldr x21, [x14, #0x68]\n"
+ "fmla z29.h, p3/M, z1.h, z8.h\n"
+ "ldr x20, [x14, #0x70]\n"
+ "fmla z28.h, p3/M, z1.h, z13.h\n"
+ "ld1h { z1.h }, p3/Z, [x15, #1, MUL VL]\n"
+ "fmla z31.h, p3/M, z2.h, z9.h\n"
+ "ld1h { z9.h }, p2/Z, [x22, x13, LSL #1]\n"
+ "fmla z30.h, p3/M, z2.h, z11.h\n"
+ "ldr x19, [x14, #0x78]\n"
+ "fmla z29.h, p3/M, z2.h, z13.h\n"
+ "ldr x26, [x14, #0x80]\n"
+ "fmla z28.h, p3/M, z2.h, z5.h\n"
+ "ld1h { z2.h }, p3/Z, [x15, #2, MUL VL]\n"
+ "fmla z31.h, p3/M, z3.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x21, x13, LSL #1]\n"
+ "fmla z30.h, p3/M, z3.h, z12.h\n"
+ "ldr x25, [x14, #0x88]\n"
+ "fmla z29.h, p3/M, z3.h, z5.h\n"
+ "ldr x24, [x14, #0x90]\n"
+ "fmla z28.h, p3/M, z3.h, z6.h\n"
+ "ld1h { z3.h }, p3/Z, [x15, #3, MUL VL]\n"
+ "fmla z31.h, p3/M, z4.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x20, x13, LSL #1]\n"
+ "fmla z30.h, p3/M, z4.h, z9.h\n"
+ "ld1h { z9.h }, p2/Z, [x19, x13, LSL #1]\n"
+ "fmla z29.h, p3/M, z4.h, z6.h\n"
+ "ldr x23, [x14, #0x98]\n"
+ "fmla z28.h, p3/M, z4.h, z10.h\n"
+ "ld1h { z4.h }, p3/Z, [x15, #4, MUL VL]\n"
+ "fmla z31.h, p3/M, z0.h, z7.h\n"
+ "ldr x22, [x14, #0xa0]\n"
+ "fmla z30.h, p3/M, z0.h, z8.h\n"
+ "ldr x21, [x14, #0xa8]\n"
+ "fmla z29.h, p3/M, z0.h, z14.h\n"
+ "ldr x20, [x14, #0xb0]\n"
+ "fmla z28.h, p3/M, z0.h, z11.h\n"
+ "ld1h { z0.h }, p3/Z, [x15, #5, MUL VL]\n"
+ "fmla z31.h, p3/M, z1.h, z8.h\n"
+ "ld1h { z8.h }, p2/Z, [x25, x13, LSL #1]\n"
+ "fmla z30.h, p3/M, z1.h, z13.h\n"
+ "ldr x19, [x14, #0xb8]\n"
+ "fmla z29.h, p3/M, z1.h, z11.h\n"
+ "ldr x25, [x14, #0xc8]\n"
+ "fmla z28.h, p3/M, z1.h, z12.h\n"
+ "ld1h { z1.h }, p3/Z, [x15, #6, MUL VL]\n"
+ "fmla z31.h, p3/M, z2.h, z13.h\n"
+ "ld1h { z13.h }, p2/Z, [x26, x13, LSL #1]\n"
+ "fmla z30.h, p3/M, z2.h, z5.h\n"
+ "ldr x26, [x14, #0xc0]\n"
+ "fmla z29.h, p3/M, z2.h, z12.h\n"
+ "fmla z28.h, p3/M, z2.h, z9.h\n"
+ "ld1h { z2.h }, p3/Z, [x15, #7, MUL VL]\n"
+ "addvl x15, x15, #16\n"
+ "fmla z31.h, p3/M, z3.h, z5.h\n"
+ "ld1h { z5.h }, p2/Z, [x24, x13, LSL #1]\n"
+ "ldr x24, [x14, #0xd0]\n"
+ "fmla z30.h, p3/M, z3.h, z6.h\n"
+ "ld1h { z16.h }, p3/Z, [x15, #4, MUL VL]\n"
+ "fmla z29.h, p3/M, z3.h, z9.h\n"
+ "fmla z28.h, p3/M, z3.h, z13.h\n"
+ "ld1h { z3.h }, p3/Z, [x15, #-8, MUL VL]\n"
+ "fmla z31.h, p3/M, z4.h, z6.h\n"
+ "ld1h { z6.h }, p2/Z, [x23, x13, LSL #1]\n"
+ "ldr x23, [x14, #0xd8]\n"
+ "fmla z30.h, p3/M, z4.h, z10.h\n"
+ "ld1h { z10.h }, p2/Z, [x22, x13, LSL #1]\n"
+ "fmla z29.h, p3/M, z4.h, z13.h\n"
+ "ldr x22, [x14, #0xe0]\n"
+ "fmla z28.h, p3/M, z4.h, z8.h\n"
+ "ld1h { z4.h }, p3/Z, [x15, #-7, MUL VL]\n"
+ "fmla z31.h, p3/M, z0.h, z14.h\n"
+ "ld1h { z14.h }, p2/Z, [x19, x13, LSL #1]\n"
+ "fmla z30.h, p3/M, z0.h, z11.h\n"
+ "ldr x19, [x14, #0xf8]\n"
+ "fmla z29.h, p3/M, z0.h, z5.h\n"
+ "fmla z28.h, p3/M, z0.h, z6.h\n"
+ "ld1h { z0.h }, p3/Z, [x15, #-6, MUL VL]\n"
+ "fmla z31.h, p3/M, z1.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x21, x13, LSL #1]\n"
+ "ldr x21, [x14, #0xe8]\n"
+ "fmla z30.h, p3/M, z1.h, z12.h\n"
+ "fmla z29.h, p3/M, z1.h, z6.h\n"
+ "fmla z28.h, p3/M, z1.h, z10.h\n"
+ "ld1h { z1.h }, p3/Z, [x15, #-5, MUL VL]\n"
+ "fmla z31.h, p3/M, z2.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x20, x13, LSL #1]\n"
+ "ldr x20, [x14, #0xf0]\n"
+ "fmla z30.h, p3/M, z2.h, z9.h\n"
+ "fmla z29.h, p3/M, z2.h, z10.h\n"
+ "fmla z28.h, p3/M, z2.h, z11.h\n"
+ "ld1h { z2.h }, p3/Z, [x15, #-4, MUL VL]\n"
+ "fmla z31.h, p3/M, z3.h, z9.h\n"
+ "ld1h { z9.h }, p2/Z, [x26, x13, LSL #1]\n"
+ "ldr x26, [x14, #0x100]\n"
+ "fmla z30.h, p3/M, z3.h, z13.h\n"
+ "fmla z29.h, p3/M, z3.h, z11.h\n"
+ "fmla z28.h, p3/M, z3.h, z12.h\n"
+ "ld1h { z3.h }, p3/Z, [x15, #-3, MUL VL]\n"
+ "fmla z31.h, p3/M, z4.h, z13.h\n"
+ "ld1h { z13.h }, p2/Z, [x25, x13, LSL #1]\n"
+ "ldr x25, [x14, #0x108]\n"
+ "fmla z30.h, p3/M, z4.h, z8.h\n"
+ "ld1h { z8.h }, p2/Z, [x22, x13, LSL #1]\n"
+ "fmla z29.h, p3/M, z4.h, z12.h\n"
+ "fmla z28.h, p3/M, z4.h, z14.h\n"
+ "ld1h { z4.h }, p3/Z, [x15, #-2, MUL VL]\n"
+ "fmla z31.h, p3/M, z0.h, z5.h\n"
+ "ld1h { z5.h }, p2/Z, [x24, x13, LSL #1]\n"
+ "ldr x24, [x14, #0x110]\n"
+ "fmla z30.h, p3/M, z0.h, z6.h\n"
+ "fmla z29.h, p3/M, z0.h, z9.h\n"
+ "fmla z28.h, p3/M, z0.h, z13.h\n"
+ "ld1h { z0.h }, p3/Z, [x15, #-1, MUL VL]\n"
+ "fmla z31.h, p3/M, z1.h, z6.h\n"
+ "ld1h { z6.h }, p2/Z, [x23, x13, LSL #1]\n"
+ "ldr x23, [x14, #0x118]\n"
+ "fmla z30.h, p3/M, z1.h, z10.h\n"
+ "fmla z29.h, p3/M, z1.h, z13.h\n"
+ "fmla z28.h, p3/M, z1.h, z5.h\n"
+ "ld1h { z1.h }, p3/Z, [x15]\n"
+ "fmla z31.h, p3/M, z2.h, z10.h\n"
+ "ld1h { z10.h }, p2/Z, [x21, x13, LSL #1]\n"
+ "fmla z30.h, p3/M, z2.h, z11.h\n"
+ "fmla z29.h, p3/M, z2.h, z5.h\n"
+ "fmla z28.h, p3/M, z2.h, z6.h\n"
+ "ld1h { z2.h }, p3/Z, [x15, #1, MUL VL]\n"
+ "fmla z31.h, p3/M, z3.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x20, x13, LSL #1]\n"
+ "fmla z30.h, p3/M, z3.h, z12.h\n"
+ "fmla z29.h, p3/M, z3.h, z6.h\n"
+ "fmla z28.h, p3/M, z3.h, z8.h\n"
+ "ld1h { z3.h }, p3/Z, [x15, #2, MUL VL]\n"
+ "fmla z31.h, p3/M, z4.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x19, x13, LSL #1]\n"
+ "fmla z30.h, p3/M, z4.h, z14.h\n"
+ "fmla z29.h, p3/M, z4.h, z8.h\n"
+ "fmla z28.h, p3/M, z4.h, z10.h\n"
+ "ld1h { z4.h }, p3/Z, [x15, #3, MUL VL]\n"
+ "fmla z31.h, p3/M, z0.h, z9.h\n"
+ "ld1h { z9.h }, p2/Z, [x26, x13, LSL #1]\n"
+ "fmla z30.h, p3/M, z0.h, z13.h\n"
+ "fmla z29.h, p3/M, z0.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x25, x13, LSL #1]\n"
+ "ldp x26, x25, [x14, #0x0]\n"
+ "fmla z28.h, p3/M, z0.h, z12.h\n"
+ "ld1h { z0.h }, p3/Z, [x15, #5, MUL VL]\n"
+ "fmla z31.h, p3/M, z1.h, z13.h\n"
+ "fmla z30.h, p3/M, z1.h, z5.h\n"
+ "fmla z29.h, p3/M, z1.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x24, x13, LSL #1]\n"
+ "fmla z28.h, p3/M, z1.h, z9.h\n"
+ "ld1h { z1.h }, p3/Z, [x15, #6, MUL VL]\n"
+ "fmla z31.h, p3/M, z2.h, z5.h\n"
+ "ld1h { z5.h }, p1/Z, [x26, x12, LSL #1]\n"
+ "fmla z30.h, p3/M, z2.h, z6.h\n"
+ "fmla z29.h, p3/M, z2.h, z9.h\n"
+ "ld1h { z9.h }, p2/Z, [x23, x13, LSL #1]\n"
"inch x13\n"
- "ld1h { z7.h }, p1/Z, [x26, x28, LSL #1]\n"
- "fmla z30.h, p2/M, z4.h, z12.h\n"
- "fmla z31.h, p2/M, z4.h, z9.h\n"
- "ld1h { z8.h }, p1/Z, [x25, x28, LSL #1]\n"
- "ld1h { z9.h }, p1/Z, [x23, x28, LSL #1]\n"
- "ld1h { z13.h }, p1/Z, [x22, x28, LSL #1]\n"
- "ld1h { z11.h }, p1/Z, [x21, x28, LSL #1]\n"
- "fmax z28.h, p2/M, z28.h, z18.h\n"
- "fmax z29.h, p2/M, z29.h, z18.h\n"
- "ld1h { z12.h }, p1/Z, [x20, x28, LSL #1]\n"
- "ld1h { z10.h }, p1/Z, [x10, x28, LSL #1]\n"
- "fmax z30.h, p2/M, z30.h, z18.h\n"
- "fmax z31.h, p2/M, z31.h, z18.h\n"
- "ld1h { z14.h }, p1/Z, [x9, x28, LSL #1]\n"
- "inch x28\n"
- "ld1h { z2.h }, p2/Z, [x27, #7, MUL VL]\n"
- "addvl x27, x27, #16\n"
- "whilelt p3.h, x13, %x[n_channels]\n"
- "cmp x28, %x[n_channels]\n"
- "ld1h { z3.h }, p2/Z, [x27, #-8, MUL VL]\n"
- "ld1h { z4.h }, p2/Z, [x27, #-7, MUL VL]\n"
- "fmin z28.h, p2/M, z28.h, z17.h\n"
- "fmin z29.h, p2/M, z29.h, z17.h\n"
- "st1h { z28.h }, p0, [x15, x24, LSL #1]\n"
- "fmin z30.h, p2/M, z30.h, z17.h\n"
- "fmin z31.h, p2/M, z31.h, z17.h\n"
- "st1h { z29.h }, p0, [x14, x24, LSL #1]\n"
- "st1h { z30.h }, p0, [x12, x24, LSL #1]\n"
- "addvl x27, x27, #-6\n"
- "st1h { z31.h }, p0, [x11, x24, LSL #1]\n"
+ "fmla z28.h, p3/M, z2.h, z11.h\n"
+ "ldp x24, x23, [x14, #0x10]\n"
+ "whilelt p2.h, x13, %x[n_channels]\n"
+ "fmla z31.h, p3/M, z3.h, z6.h\n"
+ "ld1h { z6.h }, p1/Z, [x25, x12, LSL #1]\n"
+ "ldp x22, x21, [x14, #0x20]\n"
+ "fmla z30.h, p3/M, z3.h, z8.h\n"
+ "ldp x20, x19, [x14, #0x30]\n"
+ "fmla z29.h, p3/M, z3.h, z11.h\n"
+ "ld1h { z7.h }, p1/Z, [x24, x12, LSL #1]\n"
+ "fmla z28.h, p3/M, z3.h, z12.h\n"
+ "ld1h { z13.h }, p1/Z, [x21, x12, LSL #1]\n"
+ "fmla z31.h, p3/M, z4.h, z8.h\n"
+ "ld1h { z8.h }, p1/Z, [x23, x12, LSL #1]\n"
+ "fmla z30.h, p3/M, z4.h, z10.h\n"
+ "ld1h { z11.h }, p1/Z, [x20, x12, LSL #1]\n"
+ "fmla z29.h, p3/M, z4.h, z12.h\n"
+ "ld1h { z12.h }, p1/Z, [x19, x12, LSL #1]\n"
+ "fmla z28.h, p3/M, z4.h, z9.h\n"
+ "ld1h { z9.h }, p1/Z, [x22, x12, LSL #1]\n"
+ "fmax z31.h, p3/M, z31.h, z18.h\n"
+ "ldp x26, x25, [x14, #0x40]\n"
+ "fmax z30.h, p3/M, z30.h, z18.h\n"
+ "ld1h { z2.h }, p3/Z, [x15, #7, MUL VL]\n"
+ "fmax z29.h, p3/M, z29.h, z18.h\n"
+ "addvl x15, x15, #16\n"
+ "fmax z28.h, p3/M, z28.h, z18.h\n"
+ "ld1h { z10.h }, p1/Z, [x26, x12, LSL #1]\n"
+ "ld1h { z14.h }, p1/Z, [x25, x12, LSL #1]\n"
+ "fmin z31.h, p3/M, z31.h, z17.h\n"
+ "inch x12\n"
+ "fmin z30.h, p3/M, z30.h, z17.h\n"
+ "ld1h { z3.h }, p3/Z, [x15, #-8, MUL VL]\n"
+ "cmp x12, %x[n_channels]\n"
+ "fmin z29.h, p3/M, z29.h, z17.h\n"
+ "ld1h { z4.h }, p3/Z, [x15, #-7, MUL VL]\n"
+ "addvl x15, x15, #-6\n"
+ "fmin z28.h, p3/M, z28.h, z17.h\n"
+ "st1h { z31.h }, p0, [x11, x9, LSL #1]\n"
+ "st1h { z30.h }, p0, [x10, x9, LSL #1]\n"
+ "st1h { z29.h }, p0, [x28, x9, LSL #1]\n"
+ "st1h { z28.h }, p0, [x27, x9, LSL #1]\n"
"blt 1b\n"
"2:" // Channel tail
- "movprfx z28, z16\n fmla z28.h, p2/M, z0.h, z5.h\n"
- "movprfx z29, z16\n fmla z29.h, p2/M, z0.h, z6.h\n"
- "ldr x26, [x16, #0x50]\n"
- "ld1h { z5.h }, p3/Z, [x26, x13, LSL #1]\n"
- "movprfx z30, z16\n fmla z30.h, p2/M, z0.h, z7.h\n"
- "movprfx z31, z16\n fmla z31.h, p2/M, z0.h, z8.h\n"
- "ldr x25, [x16, #0x58]\n"
- "ldr x23, [x16, #0x60]\n"
- "fmla z28.h, p2/M, z1.h, z6.h\n"
- "fmla z29.h, p2/M, z1.h, z9.h\n"
- "ld1h { z6.h }, p3/Z, [x25, x13, LSL #1]\n"
- "ldr x22, [x16, #0x68]\n"
- "fmla z30.h, p2/M, z1.h, z8.h\n"
- "fmla z31.h, p2/M, z1.h, z13.h\n"
- "ld1h { z0.h }, p2/Z, [x27]\n"
- "ldr x21, [x16, #0x70]\n"
- "fmla z28.h, p2/M, z2.h, z9.h\n"
- "fmla z29.h, p2/M, z2.h, z11.h\n"
- "ld1h { z9.h }, p3/Z, [x23, x13, LSL #1]\n"
- "ld1h { z1.h }, p2/Z, [x27, #1, MUL VL]\n"
- "fmla z30.h, p2/M, z2.h, z13.h\n"
- "fmla z31.h, p2/M, z2.h, z5.h\n"
- "ldr x20, [x16, #0x78]\n"
- "ld1h { z2.h }, p2/Z, [x27, #2, MUL VL]\n"
- "fmla z28.h, p2/M, z3.h, z11.h\n"
- "fmla z29.h, p2/M, z3.h, z12.h\n"
- "ld1h { z11.h }, p3/Z, [x22, x13, LSL #1]\n"
- "ldr x10, [x16, #0x80]\n"
- "fmla z30.h, p2/M, z3.h, z5.h\n"
- "fmla z31.h, p2/M, z3.h, z6.h\n"
- "ld1h { z3.h }, p2/Z, [x27, #3, MUL VL]\n"
- "ldr x9, [x16, #0x88]\n"
- "fmla z28.h, p2/M, z4.h, z12.h\n"
- "fmla z29.h, p2/M, z4.h, z9.h\n"
- "ld1h { z12.h }, p3/Z, [x21, x13, LSL #1]\n"
- "ld1h { z9.h }, p3/Z, [x20, x13, LSL #1]\n"
- "fmla z30.h, p2/M, z4.h, z6.h\n"
- "fmla z31.h, p2/M, z4.h, z10.h\n"
- "ld1h { z4.h }, p2/Z, [x27, #4, MUL VL]\n"
- "ldr x26, [x16, #0x90]\n"
- "fmla z28.h, p2/M, z0.h, z7.h\n"
- "fmla z29.h, p2/M, z0.h, z8.h\n"
- "ldr x25, [x16, #0x98]\n"
- "ldr x23, [x16, #0xa0]\n"
- "fmla z30.h, p2/M, z0.h, z14.h\n"
- "fmla z31.h, p2/M, z0.h, z11.h\n"
- "ld1h { z0.h }, p2/Z, [x27, #5, MUL VL]\n"
- "ldr x22, [x16, #0xa8]\n"
- "fmla z28.h, p2/M, z1.h, z8.h\n"
- "fmla z29.h, p2/M, z1.h, z13.h\n"
- "ld1h { z8.h }, p3/Z, [x9, x13, LSL #1]\n"
- "ldr x21, [x16, #0xb0]\n"
- "fmla z30.h, p2/M, z1.h, z11.h\n"
- "fmla z31.h, p2/M, z1.h, z12.h\n"
- "ld1h { z1.h }, p2/Z, [x27, #6, MUL VL]\n"
- "ldr x20, [x16, #0xb8]\n"
- "fmla z28.h, p2/M, z2.h, z13.h\n"
- "fmla z29.h, p2/M, z2.h, z5.h\n"
- "ld1h { z13.h }, p3/Z, [x10, x13, LSL #1]\n"
- "ldr x10, [x16, #0xc0]\n"
- "fmla z30.h, p2/M, z2.h, z12.h\n"
- "fmla z31.h, p2/M, z2.h, z9.h\n"
- "ld1h { z2.h }, p2/Z, [x27, #7, MUL VL]\n"
- "addvl x27, x27, #16\n"
- "fmla z28.h, p2/M, z3.h, z5.h\n"
- "fmla z29.h, p2/M, z3.h, z6.h\n"
- "ld1h { z5.h }, p3/Z, [x26, x13, LSL #1]\n"
- "ldr x9, [x16, #0xc8]\n"
- "fmla z30.h, p2/M, z3.h, z9.h\n"
- "fmla z31.h, p2/M, z3.h, z13.h\n"
- "ld1h { z3.h }, p2/Z, [x27, #-8, MUL VL]\n"
- "ldr x26, [x16, #0xd0]\n"
- "fmla z28.h, p2/M, z4.h, z6.h\n"
- "fmla z29.h, p2/M, z4.h, z10.h\n"
- "ld1h { z6.h }, p3/Z, [x25, x13, LSL #1]\n"
- "ld1h { z10.h }, p3/Z, [x23, x13, LSL #1]\n"
- "fmla z30.h, p2/M, z4.h, z13.h\n"
- "fmla z31.h, p2/M, z4.h, z8.h\n"
- "ld1h { z4.h }, p2/Z, [x27, #-7, MUL VL]\n"
- "ldr x25, [x16, #0xd8]\n"
- "fmla z28.h, p2/M, z0.h, z14.h\n"
- "fmla z29.h, p2/M, z0.h, z11.h\n"
- "ld1h { z14.h }, p3/Z, [x20, x13, LSL #1]\n"
- "ldr x23, [x16, #0xe0]\n"
- "fmla z30.h, p2/M, z0.h, z5.h\n"
- "fmla z31.h, p2/M, z0.h, z6.h\n"
- "ld1h { z0.h }, p2/Z, [x27, #-6, MUL VL]\n"
- "ldr x20, [x16, #0xf8]\n"
- "fmla z28.h, p2/M, z1.h, z11.h\n"
- "fmla z29.h, p2/M, z1.h, z12.h\n"
- "ld1h { z11.h }, p3/Z, [x22, x13, LSL #1]\n"
- "ldr x22, [x16, #0xe8]\n"
- "fmla z30.h, p2/M, z1.h, z6.h\n"
- "fmla z31.h, p2/M, z1.h, z10.h\n"
- "ld1h { z1.h }, p2/Z, [x27, #-5, MUL VL]\n"
- "inch x24\n"
- "fmla z28.h, p2/M, z2.h, z12.h\n"
- "fmla z29.h, p2/M, z2.h, z9.h\n"
- "ld1h { z12.h }, p3/Z, [x21, x13, LSL #1]\n"
- "ldr x21, [x16, #0xf0]\n"
- "fmla z30.h, p2/M, z2.h, z10.h\n"
- "fmla z31.h, p2/M, z2.h, z11.h\n"
- "ld1h { z2.h }, p2/Z, [x27, #-4, MUL VL]\n"
- "mov p0.b, p3.b\n"
- "fmla z28.h, p2/M, z3.h, z9.h\n"
- "fmla z29.h, p2/M, z3.h, z13.h\n"
- "ld1h { z9.h }, p3/Z, [x10, x13, LSL #1]\n"
- "ldr x10, [x16, #0x100]\n"
- "fmla z30.h, p2/M, z3.h, z11.h\n"
- "fmla z31.h, p2/M, z3.h, z12.h\n"
- "ld1h { z3.h }, p2/Z, [x27, #-3, MUL VL]\n"
- "fmla z28.h, p2/M, z4.h, z13.h\n"
- "fmla z29.h, p2/M, z4.h, z8.h\n"
- "ld1h { z13.h }, p3/Z, [x9, x13, LSL #1]\n"
- "ld1h { z8.h }, p3/Z, [x23, x13, LSL #1]\n"
- "fmla z30.h, p2/M, z4.h, z12.h\n"
- "fmla z31.h, p2/M, z4.h, z14.h\n"
- "ld1h { z4.h }, p2/Z, [x27, #-2, MUL VL]\n"
- "ldr x9, [x16, #0x108]\n"
- "fmla z28.h, p2/M, z0.h, z5.h\n"
- "fmla z29.h, p2/M, z0.h, z6.h\n"
- "ld1h { z5.h }, p3/Z, [x26, x13, LSL #1]\n"
- "ldr x26, [x16, #0x110]\n"
- "fmla z30.h, p2/M, z0.h, z9.h\n"
- "fmla z31.h, p2/M, z0.h, z13.h\n"
- "ld1h { z0.h }, p2/Z, [x27, #-1, MUL VL]\n"
- "fmla z28.h, p2/M, z1.h, z6.h\n"
- "fmla z29.h, p2/M, z1.h, z10.h\n"
- "ld1h { z6.h }, p3/Z, [x25, x13, LSL #1]\n"
- "ldr x25, [x16, #0x118]\n"
- "fmla z30.h, p2/M, z1.h, z13.h\n"
- "fmla z31.h, p2/M, z1.h, z5.h\n"
- "ld1h { z1.h }, p2/Z, [x27]\n"
- "fmla z28.h, p2/M, z2.h, z10.h\n"
- "fmla z29.h, p2/M, z2.h, z11.h\n"
- "ld1h { z10.h }, p3/Z, [x22, x13, LSL #1]\n"
- "fmla z30.h, p2/M, z2.h, z5.h\n"
- "fmla z31.h, p2/M, z2.h, z6.h\n"
- "ld1h { z2.h }, p2/Z, [x27, #1, MUL VL]\n"
- "fmla z28.h, p2/M, z3.h, z11.h\n"
- "fmla z29.h, p2/M, z3.h, z12.h\n"
- "ld1h { z11.h }, p3/Z, [x21, x13, LSL #1]\n"
- "fmla z30.h, p2/M, z3.h, z6.h\n"
- "fmla z31.h, p2/M, z3.h, z8.h\n"
- "ld1h { z3.h }, p2/Z, [x27, #2, MUL VL]\n"
- "fmla z28.h, p2/M, z4.h, z12.h\n"
- "fmla z29.h, p2/M, z4.h, z14.h\n"
- "ld1h { z12.h }, p3/Z, [x20, x13, LSL #1]\n"
- "fmla z30.h, p2/M, z4.h, z8.h\n"
- "fmla z31.h, p2/M, z4.h, z10.h\n"
- "ld1h { z4.h }, p2/Z, [x27, #3, MUL VL]\n"
- "fmla z28.h, p2/M, z0.h, z9.h\n"
- "fmla z29.h, p2/M, z0.h, z13.h\n"
- "ld1h { z9.h }, p3/Z, [x10, x13, LSL #1]\n"
- "fmla z30.h, p2/M, z0.h, z11.h\n"
- "fmla z31.h, p2/M, z0.h, z12.h\n"
- "ld1h { z11.h }, p3/Z, [x9, x13, LSL #1]\n"
- "fmla z28.h, p2/M, z1.h, z13.h\n"
- "fmla z29.h, p2/M, z1.h, z5.h\n"
- "fmla z30.h, p2/M, z1.h, z12.h\n"
- "fmla z31.h, p2/M, z1.h, z9.h\n"
- "ld1h { z12.h }, p3/Z, [x26, x13, LSL #1]\n"
- "fmla z28.h, p2/M, z2.h, z5.h\n"
- "fmla z29.h, p2/M, z2.h, z6.h\n"
- "fmla z30.h, p2/M, z2.h, z9.h\n"
- "fmla z31.h, p2/M, z2.h, z11.h\n"
- "ld1h { z9.h }, p3/Z, [x25, x13, LSL #1]\n"
- "fmla z28.h, p2/M, z3.h, z6.h\n"
- "fmla z29.h, p2/M, z3.h, z8.h\n"
- "fmla z30.h, p2/M, z3.h, z11.h\n"
- "fmla z31.h, p2/M, z3.h, z12.h\n"
- "fmla z28.h, p2/M, z4.h, z8.h\n"
- "fmla z29.h, p2/M, z4.h, z10.h\n"
- "fmax z28.h, p2/M, z28.h, z18.h\n"
- "fmax z29.h, p2/M, z29.h, z18.h\n"
- "fmla z30.h, p2/M, z4.h, z12.h\n"
- "fmla z31.h, p2/M, z4.h, z9.h\n"
- "fmax z30.h, p2/M, z30.h, z18.h\n"
- "fmax z31.h, p2/M, z31.h, z18.h\n"
- "fmin z28.h, p2/M, z28.h, z17.h\n"
- "fmin z29.h, p2/M, z29.h, z17.h\n"
- "st1h { z28.h }, p0, [x15, x24, LSL #1]\n"
- "fmin z30.h, p2/M, z30.h, z17.h\n"
- "fmin z31.h, p2/M, z31.h, z17.h\n"
- "st1h { z29.h }, p0, [x14, x24, LSL #1]\n"
- "st1h { z30.h }, p0, [x12, x24, LSL #1]\n"
- "st1h { z31.h }, p0, [x11, x24, LSL #1]\n"
+ "movprfx z31, z16\n fmla z31.h, p3/M, z0.h, z5.h\n"
+ "ldr x24, [x14, #0x50]\n"
+ "inch x9\n"
+ "movprfx z30, z16\n fmla z30.h, p3/M, z0.h, z6.h\n"
+ "ldr x23, [x14, #0x58]\n"
+ "mov p0.b, p2.b\n"
+ "movprfx z29, z16\n fmla z29.h, p3/M, z0.h, z7.h\n"
+ "ldr x22, [x14, #0x60]\n"
+ "movprfx z28, z16\n fmla z28.h, p3/M, z0.h, z8.h\n"
+ "ld1h { z5.h }, p2/Z, [x24, x13, LSL #1]\n"
+ "ld1h { z0.h }, p3/Z, [x15]\n"
+ "fmla z31.h, p3/M, z1.h, z6.h\n"
+ "ld1h { z6.h }, p2/Z, [x23, x13, LSL #1]\n"
+ "fmla z30.h, p3/M, z1.h, z9.h\n"
+ "ldr x21, [x14, #0x68]\n"
+ "fmla z29.h, p3/M, z1.h, z8.h\n"
+ "fmla z28.h, p3/M, z1.h, z13.h\n"
+ "ld1h { z1.h }, p3/Z, [x15, #1, MUL VL]\n"
+ "ldr x20, [x14, #0x70]\n"
+ "fmla z31.h, p3/M, z2.h, z9.h\n"
+ "ld1h { z9.h }, p2/Z, [x22, x13, LSL #1]\n"
+ "fmla z30.h, p3/M, z2.h, z11.h\n"
+ "ldr x19, [x14, #0x78]\n"
+ "fmla z29.h, p3/M, z2.h, z13.h\n"
+ "fmla z28.h, p3/M, z2.h, z5.h\n"
+ "ld1h { z2.h }, p3/Z, [x15, #2, MUL VL]\n"
+ "ldr x26, [x14, #0x80]\n"
+ "fmla z31.h, p3/M, z3.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x21, x13, LSL #1]\n"
+ "fmla z30.h, p3/M, z3.h, z12.h\n"
+ "ldr x25, [x14, #0x88]\n"
+ "fmla z29.h, p3/M, z3.h, z5.h\n"
+ "fmla z28.h, p3/M, z3.h, z6.h\n"
+ "ld1h { z3.h }, p3/Z, [x15, #3, MUL VL]\n"
+ "ldr x24, [x14, #0x90]\n"
+ "fmla z31.h, p3/M, z4.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x20, x13, LSL #1]\n"
+ "fmla z30.h, p3/M, z4.h, z9.h\n"
+ "ld1h { z9.h }, p2/Z, [x19, x13, LSL #1]\n"
+ "fmla z29.h, p3/M, z4.h, z6.h\n"
+ "fmla z28.h, p3/M, z4.h, z10.h\n"
+ "ld1h { z4.h }, p3/Z, [x15, #4, MUL VL]\n"
+ "ldr x23, [x14, #0x98]\n"
+ "fmla z31.h, p3/M, z0.h, z7.h\n"
+ "ldr x22, [x14, #0xa0]\n"
+ "fmla z30.h, p3/M, z0.h, z8.h\n"
+ "ldr x21, [x14, #0xa8]\n"
+ "fmla z29.h, p3/M, z0.h, z14.h\n"
+ "fmla z28.h, p3/M, z0.h, z11.h\n"
+ "ld1h { z0.h }, p3/Z, [x15, #5, MUL VL]\n"
+ "ldr x20, [x14, #0xb0]\n"
+ "fmla z31.h, p3/M, z1.h, z8.h\n"
+ "ld1h { z8.h }, p2/Z, [x25, x13, LSL #1]\n"
+ "fmla z30.h, p3/M, z1.h, z13.h\n"
+ "ldr x19, [x14, #0xb8]\n"
+ "fmla z29.h, p3/M, z1.h, z11.h\n"
+ "fmla z28.h, p3/M, z1.h, z12.h\n"
+ "ld1h { z1.h }, p3/Z, [x15, #6, MUL VL]\n"
+ "ldr x25, [x14, #0xc8]\n"
+ "fmla z31.h, p3/M, z2.h, z13.h\n"
+ "ld1h { z13.h }, p2/Z, [x26, x13, LSL #1]\n"
+ "fmla z30.h, p3/M, z2.h, z5.h\n"
+ "ldr x26, [x14, #0xc0]\n"
+ "fmla z29.h, p3/M, z2.h, z12.h\n"
+ "fmla z28.h, p3/M, z2.h, z9.h\n"
+ "ld1h { z2.h }, p3/Z, [x15, #7, MUL VL]\n"
+ "addvl x15, x15, #16\n"
+ "fmla z31.h, p3/M, z3.h, z5.h\n"
+ "ld1h { z5.h }, p2/Z, [x24, x13, LSL #1]\n"
+ "ldr x24, [x14, #0xd0]\n"
+ "fmla z30.h, p3/M, z3.h, z6.h\n"
+ "fmla z29.h, p3/M, z3.h, z9.h\n"
+ "fmla z28.h, p3/M, z3.h, z13.h\n"
+ "ld1h { z3.h }, p3/Z, [x15, #-8, MUL VL]\n"
+ "fmla z31.h, p3/M, z4.h, z6.h\n"
+ "ld1h { z6.h }, p2/Z, [x23, x13, LSL #1]\n"
+ "ldr x23, [x14, #0xd8]\n"
+ "fmla z30.h, p3/M, z4.h, z10.h\n"
+ "ld1h { z10.h }, p2/Z, [x22, x13, LSL #1]\n"
+ "fmla z29.h, p3/M, z4.h, z13.h\n"
+ "fmla z28.h, p3/M, z4.h, z8.h\n"
+ "ld1h { z4.h }, p3/Z, [x15, #-7, MUL VL]\n"
+ "ldr x22, [x14, #0xe0]\n"
+ "fmla z31.h, p3/M, z0.h, z14.h\n"
+ "ld1h { z14.h }, p2/Z, [x19, x13, LSL #1]\n"
+ "fmla z30.h, p3/M, z0.h, z11.h\n"
+ "ldr x19, [x14, #0xf8]\n"
+ "fmla z29.h, p3/M, z0.h, z5.h\n"
+ "fmla z28.h, p3/M, z0.h, z6.h\n"
+ "ld1h { z0.h }, p3/Z, [x15, #-6, MUL VL]\n"
+ "fmla z31.h, p3/M, z1.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x21, x13, LSL #1]\n"
+ "ldr x21, [x14, #0xe8]\n"
+ "fmla z30.h, p3/M, z1.h, z12.h\n"
+ "fmla z29.h, p3/M, z1.h, z6.h\n"
+ "fmla z28.h, p3/M, z1.h, z10.h\n"
+ "ld1h { z1.h }, p3/Z, [x15, #-5, MUL VL]\n"
+ "fmla z31.h, p3/M, z2.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x20, x13, LSL #1]\n"
+ "ldr x20, [x14, #0xf0]\n"
+ "fmla z30.h, p3/M, z2.h, z9.h\n"
+ "fmla z29.h, p3/M, z2.h, z10.h\n"
+ "fmla z28.h, p3/M, z2.h, z11.h\n"
+ "ld1h { z2.h }, p3/Z, [x15, #-4, MUL VL]\n"
+ "fmla z31.h, p3/M, z3.h, z9.h\n"
+ "ld1h { z9.h }, p2/Z, [x26, x13, LSL #1]\n"
+ "ldr x26, [x14, #0x100]\n"
+ "fmla z30.h, p3/M, z3.h, z13.h\n"
+ "fmla z29.h, p3/M, z3.h, z11.h\n"
+ "fmla z28.h, p3/M, z3.h, z12.h\n"
+ "ld1h { z3.h }, p3/Z, [x15, #-3, MUL VL]\n"
+ "fmla z31.h, p3/M, z4.h, z13.h\n"
+ "ld1h { z13.h }, p2/Z, [x25, x13, LSL #1]\n"
+ "ldr x25, [x14, #0x108]\n"
+ "fmla z30.h, p3/M, z4.h, z8.h\n"
+ "ld1h { z8.h }, p2/Z, [x22, x13, LSL #1]\n"
+ "fmla z29.h, p3/M, z4.h, z12.h\n"
+ "fmla z28.h, p3/M, z4.h, z14.h\n"
+ "ld1h { z4.h }, p3/Z, [x15, #-2, MUL VL]\n"
+ "fmla z31.h, p3/M, z0.h, z5.h\n"
+ "ld1h { z5.h }, p2/Z, [x24, x13, LSL #1]\n"
+ "ldr x24, [x14, #0x110]\n"
+ "fmla z30.h, p3/M, z0.h, z6.h\n"
+ "fmla z29.h, p3/M, z0.h, z9.h\n"
+ "fmla z28.h, p3/M, z0.h, z13.h\n"
+ "ld1h { z0.h }, p3/Z, [x15, #-1, MUL VL]\n"
+ "fmla z31.h, p3/M, z1.h, z6.h\n"
+ "ld1h { z6.h }, p2/Z, [x23, x13, LSL #1]\n"
+ "ldr x23, [x14, #0x118]\n"
+ "fmla z30.h, p3/M, z1.h, z10.h\n"
+ "fmla z29.h, p3/M, z1.h, z13.h\n"
+ "fmla z28.h, p3/M, z1.h, z5.h\n"
+ "ld1h { z1.h }, p3/Z, [x15]\n"
+ "fmla z31.h, p3/M, z2.h, z10.h\n"
+ "ld1h { z10.h }, p2/Z, [x21, x13, LSL #1]\n"
+ "fmla z30.h, p3/M, z2.h, z11.h\n"
+ "fmla z29.h, p3/M, z2.h, z5.h\n"
+ "fmla z28.h, p3/M, z2.h, z6.h\n"
+ "ld1h { z2.h }, p3/Z, [x15, #1, MUL VL]\n"
+ "fmla z31.h, p3/M, z3.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x20, x13, LSL #1]\n"
+ "fmla z30.h, p3/M, z3.h, z12.h\n"
+ "fmla z29.h, p3/M, z3.h, z6.h\n"
+ "fmla z28.h, p3/M, z3.h, z8.h\n"
+ "ld1h { z3.h }, p3/Z, [x15, #2, MUL VL]\n"
+ "fmla z31.h, p3/M, z4.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x19, x13, LSL #1]\n"
+ "fmla z30.h, p3/M, z4.h, z14.h\n"
+ "fmla z29.h, p3/M, z4.h, z8.h\n"
+ "fmla z28.h, p3/M, z4.h, z10.h\n"
+ "ld1h { z4.h }, p3/Z, [x15, #3, MUL VL]\n"
+ "fmla z31.h, p3/M, z0.h, z9.h\n"
+ "ld1h { z9.h }, p2/Z, [x26, x13, LSL #1]\n"
+ "fmla z30.h, p3/M, z0.h, z13.h\n"
+ "fmla z29.h, p3/M, z0.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x25, x13, LSL #1]\n"
+ "fmla z28.h, p3/M, z0.h, z12.h\n"
+ "fmla z31.h, p3/M, z1.h, z13.h\n"
+ "fmla z30.h, p3/M, z1.h, z5.h\n"
+ "fmla z29.h, p3/M, z1.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x24, x13, LSL #1]\n"
+ "fmla z28.h, p3/M, z1.h, z9.h\n"
+ "fmla z31.h, p3/M, z2.h, z5.h\n"
+ "fmla z30.h, p3/M, z2.h, z6.h\n"
+ "fmla z29.h, p3/M, z2.h, z9.h\n"
+ "ld1h { z9.h }, p2/Z, [x23, x13, LSL #1]\n"
+ "fmla z28.h, p3/M, z2.h, z11.h\n"
+ "fmla z31.h, p3/M, z3.h, z6.h\n"
+ "fmla z30.h, p3/M, z3.h, z8.h\n"
+ "fmla z29.h, p3/M, z3.h, z11.h\n"
+ "fmla z28.h, p3/M, z3.h, z12.h\n"
+ "fmla z31.h, p3/M, z4.h, z8.h\n"
+ "fmla z30.h, p3/M, z4.h, z10.h\n"
+ "fmla z29.h, p3/M, z4.h, z12.h\n"
+ "fmla z28.h, p3/M, z4.h, z9.h\n"
+ "fmax z31.h, p3/M, z31.h, z18.h\n"
+ "fmax z30.h, p3/M, z30.h, z18.h\n"
+ "fmax z29.h, p3/M, z29.h, z18.h\n"
+ "fmax z28.h, p3/M, z28.h, z18.h\n"
+ "fmin z31.h, p3/M, z31.h, z17.h\n"
+ "st1h { z31.h }, p0, [x11, x9, LSL #1]\n"
+ "fmin z30.h, p3/M, z30.h, z17.h\n"
+ "fmin z29.h, p3/M, z29.h, z17.h\n"
+ "st1h { z30.h }, p0, [x10, x9, LSL #1]\n"
+ "fmin z28.h, p3/M, z28.h, z17.h\n"
+ "st1h { z29.h }, p0, [x28, x9, LSL #1]\n"
+ "st1h { z28.h }, p0, [x27, x9, LSL #1]\n"
:
: [n_channels] "r" ((unsigned long) n_channels), [offsetof_Args_inptrs] "I" (offsetof(Args, inptrs)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_args_params] "I" (offsetof(Args, params)), [params_struct] "r" (&params_struct)
- : "cc", "memory", "p0", "p1", "p2", "p3", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z16", "z17", "z18", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z16", "z17", "z18", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp
new file mode 100644
index 0000000000..eddcffc196
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp
@@ -0,0 +1,255 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <cstddef>
+#include <cstdint>
+
+#if defined(ARM_COMPUTE_ENABLE_SVE)
+
+namespace arm_conv {
+namespace depthwise {
+
+void sve_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst_impl(
+ const float *const *const input_ptrs,
+ float *const *const outptrs,
+ const void *params,
+ unsigned int n_channels,
+ const float activation_min,
+ const float activation_max
+)
+{
+ const float *const inptrs[16] = {
+ input_ptrs[0], input_ptrs[1], input_ptrs[4], input_ptrs[5], input_ptrs[2], input_ptrs[6], input_ptrs[3], input_ptrs[7], input_ptrs[8], input_ptrs[9], input_ptrs[10], input_ptrs[11], input_ptrs[12], input_ptrs[13], input_ptrs[14], input_ptrs[15],
+ };
+ const float minmax_vals[2] = { activation_min, activation_max };
+
+ __asm__ __volatile__(
+ "ldp x26, x23, [%x[inptrs], #0x0]\n"
+ "ptrue p2.b\n"
+ "ldp x25, x16, [%x[inptrs], #0x10]\n"
+ "mov x15, #0x0\n"
+ "ld1w { z15.s }, p2/Z, [%x[params]]\n"
+ "mov z14.d, z15.d\n"
+ "ld1w { z13.s }, p2/Z, [%x[params], #1, MUL VL]\n"
+ "cntw x14\n"
+ "mov z12.d, z15.d\n"
+ "ld1w { z11.s }, p2/Z, [%x[params], #2, MUL VL]\n"
+ "sub x13, XZR, x14\n"
+ "mov z10.d, z15.d\n"
+ "ld1w { z9.s }, p2/Z, [%x[params], #3, MUL VL]\n"
+ "whilelt p1.s, XZR, %x[n_channels]\n"
+ "mov z8.d, z15.d\n"
+ "ld1w { z7.s }, p2/Z, [%x[params], #4, MUL VL]\n"
+ "cmp x14, %x[n_channels]\n"
+ "ld1w { z6.s }, p2/Z, [%x[params], #5, MUL VL]\n"
+ "ld1w { z5.s }, p2/Z, [%x[params], #6, MUL VL]\n"
+ "ld1w { z4.s }, p2/Z, [%x[params], #7, MUL VL]\n"
+ "addvl %x[params], %x[params], #16\n"
+ "ld1w { z3.s }, p1/Z, [x26, x15, LSL #2]\n"
+ "ld1w { z2.s }, p2/Z, [%x[params], #-8, MUL VL]\n"
+ "ld1w { z1.s }, p2/Z, [%x[params], #-7, MUL VL]\n"
+ "addvl %x[params], %x[params], #-6\n"
+ "ld1w { z0.s }, p1/Z, [x23, x15, LSL #2]\n"
+ "ld1w { z31.s }, p1/Z, [x25, x15, LSL #2]\n"
+ "ld1w { z30.s }, p1/Z, [x16, x15, LSL #2]\n"
+ "ldp x24, x12, [%x[inptrs], #0x20]\n"
+ "ldp x23, x11, [%x[inptrs], #0x30]\n"
+ "ldp x10, x9, [%x[inptrs], #0x40]\n"
+ "ld1w { z29.s }, p1/Z, [x24, x15, LSL #2]\n"
+ "ld1w { z28.s }, p1/Z, [x12, x15, LSL #2]\n"
+ "ld1w { z27.s }, p1/Z, [x23, x15, LSL #2]\n"
+ "ld1w { z26.s }, p1/Z, [x11, x15, LSL #2]\n"
+ "ld1w { z25.s }, p1/Z, [x10, x15, LSL #2]\n"
+ "ld1w { z24.s }, p1/Z, [x9, x15, LSL #2]\n"
+ "ldp x28, x27, [%x[inptrs], #0x50]\n"
+ "ldp x26, x25, [%x[inptrs], #0x60]\n"
+ "ldp x24, x23, [%x[inptrs], #0x70]\n"
+ "ld1w { z23.s }, p1/Z, [x28, x15, LSL #2]\n"
+ "ld1w { z22.s }, p1/Z, [x27, x15, LSL #2]\n"
+ "ld1w { z21.s }, p1/Z, [x26, x15, LSL #2]\n"
+ "ld1w { z20.s }, p1/Z, [x25, x15, LSL #2]\n"
+ "ld1w { z19.s }, p1/Z, [x24, x15, LSL #2]\n"
+ "ld1w { z18.s }, p1/Z, [x23, x15, LSL #2]\n"
+ "ldp x22, x21, [%x[outptrs], #0x0]\n"
+ "ldp x20, x19, [%x[outptrs], #0x10]\n"
+ "ld1rw { z17.s }, p2/Z, [%x[minmax_vals]]\n"
+ "ld1rw { z16.s }, p2/Z, [%x[minmax_vals], #4]\n"
+ "bge 1f\n"
+ "1:" // Loop
+ "fmla z14.s, p2/M, z13.s, z3.s\n"
+ "ld1w { z15.s }, p2/Z, [%x[params]]\n"
+ "incw x13\n"
+ "fmla z12.s, p2/M, z13.s, z0.s\n"
+ "ldp x26, x23, [%x[inptrs], #0x0]\n"
+ "mov p0.b, p1.b\n"
+ "fmla z10.s, p2/M, z13.s, z31.s\n"
+ "ldp x25, x16, [%x[inptrs], #0x10]\n"
+ "mov x15, x14\n"
+ "fmla z8.s, p2/M, z13.s, z30.s\n"
+ "ld1w { z13.s }, p2/Z, [%x[params], #1, MUL VL]\n"
+ "incw x14\n"
+ "fmla z14.s, p2/M, z11.s, z0.s\n"
+ "ldp x24, x12, [%x[inptrs], #0x20]\n"
+ "whilelt p1.s, x15, %x[n_channels]\n"
+ "fmla z12.s, p2/M, z11.s, z29.s\n"
+ "ld1w { z3.s }, p1/Z, [x26, x15, LSL #2]\n"
+ "cmp x14, %x[n_channels]\n"
+ "fmla z10.s, p2/M, z11.s, z30.s\n"
+ "ld1w { z0.s }, p1/Z, [x23, x15, LSL #2]\n"
+ "ldp x23, x11, [%x[inptrs], #0x30]\n"
+ "fmla z8.s, p2/M, z11.s, z28.s\n"
+ "ld1w { z11.s }, p2/Z, [%x[params], #2, MUL VL]\n"
+ "fmla z14.s, p2/M, z9.s, z29.s\n"
+ "ld1w { z29.s }, p1/Z, [x24, x15, LSL #2]\n"
+ "fmla z12.s, p2/M, z9.s, z27.s\n"
+ "ld1w { z27.s }, p1/Z, [x23, x15, LSL #2]\n"
+ "fmla z10.s, p2/M, z9.s, z28.s\n"
+ "ldp x10, x9, [%x[inptrs], #0x40]\n"
+ "fmla z8.s, p2/M, z9.s, z26.s\n"
+ "ld1w { z9.s }, p2/Z, [%x[params], #3, MUL VL]\n"
+ "fmla z14.s, p2/M, z7.s, z31.s\n"
+ "ld1w { z31.s }, p1/Z, [x25, x15, LSL #2]\n"
+ "fmla z12.s, p2/M, z7.s, z30.s\n"
+ "ldp x28, x27, [%x[inptrs], #0x50]\n"
+ "fmla z10.s, p2/M, z7.s, z25.s\n"
+ "ldp x26, x25, [%x[inptrs], #0x60]\n"
+ "fmla z8.s, p2/M, z7.s, z24.s\n"
+ "ld1w { z7.s }, p2/Z, [%x[params], #4, MUL VL]\n"
+ "fmla z14.s, p2/M, z6.s, z30.s\n"
+ "ld1w { z30.s }, p1/Z, [x16, x15, LSL #2]\n"
+ "fmla z12.s, p2/M, z6.s, z28.s\n"
+ "ldp x24, x23, [%x[inptrs], #0x70]\n"
+ "fmla z10.s, p2/M, z6.s, z24.s\n"
+ "fmla z8.s, p2/M, z6.s, z23.s\n"
+ "ld1w { z6.s }, p2/Z, [%x[params], #5, MUL VL]\n"
+ "fmla z14.s, p2/M, z5.s, z28.s\n"
+ "ld1w { z28.s }, p1/Z, [x12, x15, LSL #2]\n"
+ "fmla z12.s, p2/M, z5.s, z26.s\n"
+ "ld1w { z26.s }, p1/Z, [x11, x15, LSL #2]\n"
+ "fmla z10.s, p2/M, z5.s, z23.s\n"
+ "fmla z8.s, p2/M, z5.s, z22.s\n"
+ "ld1w { z5.s }, p2/Z, [%x[params], #6, MUL VL]\n"
+ "fmla z14.s, p2/M, z4.s, z25.s\n"
+ "ld1w { z25.s }, p1/Z, [x10, x15, LSL #2]\n"
+ "fmla z12.s, p2/M, z4.s, z24.s\n"
+ "fmla z10.s, p2/M, z4.s, z21.s\n"
+ "ld1w { z21.s }, p1/Z, [x26, x15, LSL #2]\n"
+ "fmla z8.s, p2/M, z4.s, z20.s\n"
+ "ld1w { z4.s }, p2/Z, [%x[params], #7, MUL VL]\n"
+ "addvl %x[params], %x[params], #16\n"
+ "fmla z14.s, p2/M, z2.s, z24.s\n"
+ "ld1w { z24.s }, p1/Z, [x9, x15, LSL #2]\n"
+ "fmla z12.s, p2/M, z2.s, z23.s\n"
+ "fmla z10.s, p2/M, z2.s, z20.s\n"
+ "ld1w { z20.s }, p1/Z, [x25, x15, LSL #2]\n"
+ "fmla z8.s, p2/M, z2.s, z19.s\n"
+ "ld1w { z2.s }, p2/Z, [%x[params], #-8, MUL VL]\n"
+ "fmla z14.s, p2/M, z1.s, z23.s\n"
+ "ld1w { z23.s }, p1/Z, [x28, x15, LSL #2]\n"
+ "fmla z12.s, p2/M, z1.s, z22.s\n"
+ "ld1w { z22.s }, p1/Z, [x27, x15, LSL #2]\n"
+ "fmla z10.s, p2/M, z1.s, z19.s\n"
+ "ld1w { z19.s }, p1/Z, [x24, x15, LSL #2]\n"
+ "fmla z8.s, p2/M, z1.s, z18.s\n"
+ "ld1w { z1.s }, p2/Z, [%x[params], #-7, MUL VL]\n"
+ "addvl %x[params], %x[params], #-6\n"
+ "fmax z14.s, p2/M, z14.s, z17.s\n"
+ "ld1w { z18.s }, p1/Z, [x23, x15, LSL #2]\n"
+ "fmax z12.s, p2/M, z12.s, z17.s\n"
+ "fmax z10.s, p2/M, z10.s, z17.s\n"
+ "fmax z8.s, p2/M, z8.s, z17.s\n"
+ "fmin z14.s, p2/M, z14.s, z16.s\n"
+ "st1w { z14.s }, p0, [x22, x13, LSL #2]\n"
+ "mov z14.d, z15.d\n"
+ "fmin z12.s, p2/M, z12.s, z16.s\n"
+ "st1w { z12.s }, p0, [x21, x13, LSL #2]\n"
+ "mov z12.d, z15.d\n"
+ "fmin z10.s, p2/M, z10.s, z16.s\n"
+ "st1w { z10.s }, p0, [x20, x13, LSL #2]\n"
+ "mov z10.d, z15.d\n"
+ "fmin z8.s, p2/M, z8.s, z16.s\n"
+ "st1w { z8.s }, p0, [x19, x13, LSL #2]\n"
+ "mov z8.d, z15.d\n"
+ "blt 1b\n"
+ "2:" // Tail
+ "fmla z14.s, p2/M, z13.s, z3.s\n"
+ "incw x13\n"
+ "fmla z12.s, p2/M, z13.s, z0.s\n"
+ "mov p0.b, p1.b\n"
+ "fmla z10.s, p2/M, z13.s, z31.s\n"
+ "fmla z8.s, p2/M, z13.s, z30.s\n"
+ "fmla z14.s, p2/M, z11.s, z0.s\n"
+ "fmla z12.s, p2/M, z11.s, z29.s\n"
+ "fmla z10.s, p2/M, z11.s, z30.s\n"
+ "fmla z8.s, p2/M, z11.s, z28.s\n"
+ "fmla z14.s, p2/M, z9.s, z29.s\n"
+ "fmla z12.s, p2/M, z9.s, z27.s\n"
+ "fmla z10.s, p2/M, z9.s, z28.s\n"
+ "fmla z8.s, p2/M, z9.s, z26.s\n"
+ "fmla z14.s, p2/M, z7.s, z31.s\n"
+ "fmla z12.s, p2/M, z7.s, z30.s\n"
+ "fmla z10.s, p2/M, z7.s, z25.s\n"
+ "fmla z8.s, p2/M, z7.s, z24.s\n"
+ "fmla z14.s, p2/M, z6.s, z30.s\n"
+ "fmla z12.s, p2/M, z6.s, z28.s\n"
+ "fmla z10.s, p2/M, z6.s, z24.s\n"
+ "fmla z8.s, p2/M, z6.s, z23.s\n"
+ "fmla z14.s, p2/M, z5.s, z28.s\n"
+ "fmla z12.s, p2/M, z5.s, z26.s\n"
+ "fmla z10.s, p2/M, z5.s, z23.s\n"
+ "fmla z8.s, p2/M, z5.s, z22.s\n"
+ "fmla z14.s, p2/M, z4.s, z25.s\n"
+ "fmla z12.s, p2/M, z4.s, z24.s\n"
+ "fmla z10.s, p2/M, z4.s, z21.s\n"
+ "fmla z8.s, p2/M, z4.s, z20.s\n"
+ "fmla z14.s, p2/M, z2.s, z24.s\n"
+ "fmla z12.s, p2/M, z2.s, z23.s\n"
+ "fmla z10.s, p2/M, z2.s, z20.s\n"
+ "fmla z8.s, p2/M, z2.s, z19.s\n"
+ "fmla z14.s, p2/M, z1.s, z23.s\n"
+ "fmla z12.s, p2/M, z1.s, z22.s\n"
+ "fmla z10.s, p2/M, z1.s, z19.s\n"
+ "fmla z8.s, p2/M, z1.s, z18.s\n"
+ "fmax z14.s, p2/M, z14.s, z17.s\n"
+ "fmax z12.s, p2/M, z12.s, z17.s\n"
+ "fmax z10.s, p2/M, z10.s, z17.s\n"
+ "fmax z8.s, p2/M, z8.s, z17.s\n"
+ "fmin z14.s, p2/M, z14.s, z16.s\n"
+ "st1w { z14.s }, p0, [x22, x13, LSL #2]\n"
+ "fmin z12.s, p2/M, z12.s, z16.s\n"
+ "fmin z10.s, p2/M, z10.s, z16.s\n"
+ "st1w { z12.s }, p0, [x21, x13, LSL #2]\n"
+ "fmin z8.s, p2/M, z8.s, z16.s\n"
+ "st1w { z10.s }, p0, [x20, x13, LSL #2]\n"
+ "st1w { z8.s }, p0, [x19, x13, LSL #2]\n"
+ : [params] "+r" (params)
+ : [inptrs] "r" (inptrs), [minmax_vals] "r" (minmax_vals), [n_channels] "r" ((unsigned long) n_channels), [outptrs] "r" (outptrs)
+ : "cc", "memory", "p0", "p1", "p2", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ );
+}
+
+} // namespace depthwise
+} // namespace arm_conv
+
+#endif // defined(ARM_COMPUTE_ENABLE_SVE)
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_direct.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_direct.cpp
index a570c5aa6a..571246be3e 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_direct.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_direct.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -88,225 +88,225 @@ void sve_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst_direct_impl(
__asm__ __volatile__(
"ptrue p3.b\n"
- "mov x10, #0x0\n"
- "mov x14, #0x0\n"
+ "mov x17, #0x0\n"
+ "mov x16, #0x0\n"
"1:" // Tile loop
- "str x10, [%x[params_struct], %[offsetof_args_tile_i]]\n"
- "mov x25, #0x2\n"
- "mov x24, #0x2\n"
- "str x14, [%x[params_struct], %[offsetof_args_tile_j]]\n"
- "ldr x23, [%x[params_struct], %[offsetof_args_ld_input_row]]\n"
- "ldr x22, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
- "mul x21, x10, x23\n" // offset = tile_i * ld_input_row
- "ldr x13, [%x[params_struct], %[offsetof_args_ld_input_col]]\n"
- "ldr x12, [%x[params_struct], %[offsetof_args_ld_output_col]]\n"
- "mul x20, x10, x22\n" // offset = tile_i * ld_output_row
- "cntw x11\n"
- "madd x21, x14, x13, x21\n" // offset += tile_j * ld_input_col
- "ldr x10, [%x[params_struct], %[offsetof_args_params]]\n"
- "ldr x9, [%x[params_struct], %[offsetof_args_inptr]]\n"
- "whilelt p2.s, XZR, %x[n_channels]\n"
- "madd x20, x14, x12, x20\n" // offset += tile_j * ld_output_col
+ "str x17, [%x[params_struct], %[offsetof_args_tile_i]]\n"
+ "mov x23, #0x2\n"
+ "str x16, [%x[params_struct], %[offsetof_args_tile_j]]\n"
+ "mov x15, #0x2\n"
+ "ldr x14, [%x[params_struct], %[offsetof_args_params]]\n"
+ "mov x13, #0x0\n"
+ "ldr x22, [%x[params_struct], %[offsetof_args_ld_input_row]]\n"
+ "cntw x12\n"
+ "ldr x11, [%x[params_struct], %[offsetof_args_ld_input_col]]\n"
+ "sub x21, XZR, x12\n"
+ "ldr x10, [%x[params_struct], %[offsetof_args_inptr]]\n"
+ "mul x19, x17, x22\n" // offset = tile_i * ld_input_row
+ "ldr x20, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
+ "madd x19, x16, x11, x19\n" // offset += tile_j * ld_input_col
+ "ldr x9, [%x[params_struct], %[offsetof_args_ld_output_col]]\n"
+ "mul x19, x19, x23\n" // offset *= kernel_stride * output_size
"ldr x28, [%x[params_struct], %[offsetof_args_outptr]]\n"
- "ld1w { z18.s }, p3/Z, [x10]\n"
- "add x27, x13, x13\n"
- "mul x21, x21, x25\n" // offset *= kernel_stride * output_size
- "add x9, x9, x21, LSL #2\n" // inptr[0] += offset * sizeof(float)
- "ld1w { z0.s }, p3/Z, [x10, #1, MUL VL]\n"
- "ld1w { z1.s }, p3/Z, [x10, #2, MUL VL]\n"
- "mul x20, x20, x24\n" // offset *= output_tile_size
- "ld1w { z2.s }, p3/Z, [x10, #3, MUL VL]\n"
- "ld1w { z3.s }, p3/Z, [x10, #4, MUL VL]\n"
- "add x26, x9, x23, LSL #2\n"
- "ld1w { z4.s }, p3/Z, [x10, #5, MUL VL]\n"
- "ld1w { z5.s }, p3/Z, [x10, #6, MUL VL]\n"
- "add x25, x26, x23, LSL #2\n"
- "add x24, x27, x13\n"
- "ld1w { z6.s }, p3/Z, [x10, #7, MUL VL]\n"
- "addvl x10, x10, #16\n"
- "add x28, x28, x20, LSL #2\n" // outptrs[0] += offset * sizeof(float)
- "ld1rw { z17.s }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
- "cmp x11, %x[n_channels]\n"
- "add x23, x25, x23, LSL #2\n"
- "ld1rw { z16.s }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
- "ld1w { z7.s }, p3/Z, [x10, #-8, MUL VL]\n"
- "add x22, x28, x22, LSL #2\n"
- "mov x21, #0x0\n"
- "ld1w { z8.s }, p3/Z, [x10, #-7, MUL VL]\n"
- "ld1w { z9.s }, p2/Z, [x26, x13, LSL #2]\n"
- "sub x20, XZR, x11\n"
- "ld1w { z10.s }, p2/Z, [x9]\n"
- "ld1w { z11.s }, p2/Z, [x9, x24, LSL #2]\n"
- "addvl x10, x10, #-6\n"
- "ld1w { z12.s }, p2/Z, [x26, x27, LSL #2]\n"
- "ld1w { z13.s }, p2/Z, [x25, x13, LSL #2]\n"
+ "add x10, x10, x19, LSL #2\n" // inptr[0] += offset * sizeof(float)
+ "ld1rw { z18.s }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
+ "add x27, x10, x22, LSL #2\n"
+ "ld1rw { z17.s }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
+ "add x26, x27, x22, LSL #2\n"
+ "ld1w { z16.s }, p3/Z, [x14]\n"
+ "add x25, x26, x22, LSL #2\n"
+ "ld1w { z0.s }, p3/Z, [x14, #1, MUL VL]\n"
+ "add x24, x11, x11\n"
+ "ld1w { z1.s }, p3/Z, [x14, #2, MUL VL]\n"
+ "add x23, x24, x11\n"
+ "ld1w { z2.s }, p3/Z, [x14, #3, MUL VL]\n"
+ "mul x19, x17, x20\n" // offset = tile_i * ld_output_row
+ "ld1w { z3.s }, p3/Z, [x14, #4, MUL VL]\n"
+ "madd x19, x16, x9, x19\n" // offset += tile_j * ld_output_col
+ "ld1w { z4.s }, p3/Z, [x14, #5, MUL VL]\n"
+ "mul x19, x19, x15\n" // offset *= output_tile_size
+ "ld1w { z5.s }, p3/Z, [x14, #6, MUL VL]\n"
+ "add x28, x28, x19, LSL #2\n" // outptrs[0] += offset * sizeof(float)
+ "ld1w { z6.s }, p3/Z, [x14, #7, MUL VL]\n"
+ "add x22, x28, x20, LSL #2\n"
+ "whilelt p2.s, XZR, %x[n_channels]\n"
+ "ld1w { z9.s }, p2/Z, [x27, x11, LSL #2]\n"
+ "ld1w { z10.s }, p2/Z, [x10]\n"
+ "addvl x14, x14, #16\n"
+ "ld1w { z11.s }, p2/Z, [x10, x23, LSL #2]\n"
+ "cmp x12, %x[n_channels]\n"
+ "ld1w { z7.s }, p3/Z, [x14, #-8, MUL VL]\n"
+ "ld1w { z8.s }, p3/Z, [x14, #-7, MUL VL]\n"
+ "addvl x14, x14, #-6\n"
+ "ld1w { z12.s }, p2/Z, [x27, x24, LSL #2]\n"
+ "ld1w { z13.s }, p2/Z, [x26, x11, LSL #2]\n"
"bge 3f\n"
"2:" // Tile loop: Channel loop
- "movprfx z28, z18\n fmla z28.s, p3/M, z4.s, z9.s\n"
- "movprfx z29, z18\n fmla z29.s, p3/M, z3.s, z9.s\n"
- "whilelt p1.s, x11, %x[n_channels]\n"
+ "movprfx z31, z16\n fmla z31.s, p3/M, z4.s, z9.s\n"
+ "whilelt p1.s, x12, %x[n_channels]\n"
+ "movprfx z30, z16\n fmla z30.s, p3/M, z3.s, z9.s\n"
"incw x21\n"
- "movprfx z30, z18\n fmla z30.s, p3/M, z1.s, z9.s\n"
- "movprfx z31, z18\n fmla z31.s, p3/M, z0.s, z9.s\n"
- "ld1w { z9.s }, p2/Z, [x23]\n"
- "incw x11\n"
- "fmla z28.s, p3/M, z0.s, z10.s\n"
- "fmla z29.s, p3/M, z2.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x23, x24, LSL #2]\n"
- "ld1w { z10.s }, p2/Z, [x25, x27, LSL #2]\n"
- "fmla z30.s, p3/M, z2.s, z12.s\n"
- "fmla z31.s, p3/M, z1.s, z12.s\n"
+ "movprfx z29, z16\n fmla z29.s, p3/M, z1.s, z9.s\n"
"mov p0.b, p2.b\n"
- "ld1w { z18.s }, p3/Z, [x10]\n"
- "fmla z28.s, p3/M, z5.s, z12.s\n"
- "fmla z29.s, p3/M, z4.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x9, x13, LSL #2]\n"
- "incw x20\n"
- "fmla z30.s, p3/M, z6.s, z9.s\n"
- "fmla z31.s, p3/M, z3.s, z13.s\n"
- "ld1w { z9.s }, p2/Z, [x9, x27, LSL #2]\n"
- "addvl x9, x9, #1\n"
- "fmla z28.s, p3/M, z7.s, z13.s\n"
- "fmla z29.s, p3/M, z6.s, z13.s\n"
- "fmla z30.s, p3/M, z4.s, z13.s\n"
- "fmla z31.s, p3/M, z8.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x26]\n"
+ "movprfx z28, z16\n fmla z28.s, p3/M, z0.s, z9.s\n"
+ "ld1w { z9.s }, p2/Z, [x25]\n"
+ "incw x13\n"
+ "fmla z31.s, p3/M, z0.s, z10.s\n"
+ "ld1w { z10.s }, p2/Z, [x26, x24, LSL #2]\n"
+ "incw x12\n"
+ "fmla z30.s, p3/M, z2.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x25, x23, LSL #2]\n"
+ "fmla z29.s, p3/M, z2.s, z12.s\n"
+ "ld1w { z16.s }, p3/Z, [x14]\n"
"fmla z28.s, p3/M, z1.s, z12.s\n"
- "fmla z29.s, p3/M, z0.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x26, x24, LSL #2]\n"
+ "fmla z31.s, p3/M, z5.s, z12.s\n"
+ "fmla z30.s, p3/M, z4.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x10, x11, LSL #2]\n"
+ "fmla z29.s, p3/M, z6.s, z9.s\n"
+ "ld1w { z9.s }, p2/Z, [x10, x24, LSL #2]\n"
+ "addvl x10, x10, #1\n"
+ "fmla z28.s, p3/M, z3.s, z13.s\n"
+ "fmla z31.s, p3/M, z7.s, z13.s\n"
+ "fmla z30.s, p3/M, z6.s, z13.s\n"
+ "fmla z29.s, p3/M, z4.s, z13.s\n"
+ "fmla z28.s, p3/M, z8.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x27]\n"
+ "fmla z31.s, p3/M, z1.s, z12.s\n"
+ "fmla z30.s, p3/M, z0.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x27, x23, LSL #2]\n"
+ "addvl x27, x27, #1\n"
+ "fmla z29.s, p3/M, z5.s, z10.s\n"
+ "fmla z28.s, p3/M, z4.s, z10.s\n"
+ "ld1w { z4.s }, p3/Z, [x14, #5, MUL VL]\n"
+ "fmla z31.s, p3/M, z2.s, z9.s\n"
+ "fmla z30.s, p3/M, z1.s, z9.s\n"
+ "ld1w { z9.s }, p2/Z, [x26]\n"
+ "ld1w { z1.s }, p3/Z, [x14, #2, MUL VL]\n"
+ "fmla z29.s, p3/M, z0.s, z11.s\n"
+ "ld1w { z0.s }, p3/Z, [x14, #1, MUL VL]\n"
+ "fmla z28.s, p3/M, z2.s, z12.s\n"
+ "ld1w { z2.s }, p3/Z, [x14, #3, MUL VL]\n"
+ "fmla z31.s, p3/M, z8.s, z10.s\n"
+ "fmla z30.s, p3/M, z7.s, z10.s\n"
+ "ld1w { z10.s }, p2/Z, [x26, x23, LSL #2]\n"
"addvl x26, x26, #1\n"
- "fmla z30.s, p3/M, z5.s, z10.s\n"
- "fmla z31.s, p3/M, z4.s, z10.s\n"
- "ld1w { z4.s }, p3/Z, [x10, #5, MUL VL]\n"
- "fmla z28.s, p3/M, z2.s, z9.s\n"
- "fmla z29.s, p3/M, z1.s, z9.s\n"
- "ld1w { z9.s }, p2/Z, [x25]\n"
- "ld1w { z1.s }, p3/Z, [x10, #2, MUL VL]\n"
- "fmla z30.s, p3/M, z0.s, z11.s\n"
- "fmla z31.s, p3/M, z2.s, z12.s\n"
- "ld1w { z0.s }, p3/Z, [x10, #1, MUL VL]\n"
- "ld1w { z2.s }, p3/Z, [x10, #3, MUL VL]\n"
- "fmla z28.s, p3/M, z8.s, z10.s\n"
- "fmla z29.s, p3/M, z7.s, z10.s\n"
- "ld1w { z10.s }, p2/Z, [x25, x24, LSL #2]\n"
+ "fmla z29.s, p3/M, z3.s, z9.s\n"
+ "ld1w { z13.s }, p1/Z, [x26, x11, LSL #2]\n"
+ "fmla z31.s, p3/M, z3.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x25, x11, LSL #2]\n"
+ "fmla z28.s, p3/M, z5.s, z10.s\n"
+ "ld1w { z3.s }, p3/Z, [x14, #4, MUL VL]\n"
+ "fmla z30.s, p3/M, z5.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x25, x24, LSL #2]\n"
+ "whilelt p2.s, x13, %x[n_channels]\n"
+ "fmla z29.s, p3/M, z7.s, z11.s\n"
+ "ld1w { z5.s }, p3/Z, [x14, #6, MUL VL]\n"
"addvl x25, x25, #1\n"
- "fmla z30.s, p3/M, z3.s, z9.s\n"
- "fmla z31.s, p3/M, z5.s, z10.s\n"
- "ld1w { z13.s }, p1/Z, [x25, x13, LSL #2]\n"
- "fmla z28.s, p3/M, z3.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x23, x13, LSL #2]\n"
- "fmla z29.s, p3/M, z5.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x23, x27, LSL #2]\n"
- "fmla z30.s, p3/M, z7.s, z11.s\n"
- "fmla z31.s, p3/M, z6.s, z11.s\n"
- "ld1w { z3.s }, p3/Z, [x10, #4, MUL VL]\n"
- "ld1w { z5.s }, p3/Z, [x10, #6, MUL VL]\n"
- "fmla z28.s, p3/M, z6.s, z9.s\n"
- "fmla z29.s, p3/M, z8.s, z10.s\n"
- "fmax z28.s, p3/M, z28.s, z17.s\n"
- "fmax z29.s, p3/M, z29.s, z17.s\n"
- "fmla z30.s, p3/M, z8.s, z12.s\n"
- "fmla z31.s, p3/M, z7.s, z12.s\n"
- "fmax z30.s, p3/M, z30.s, z17.s\n"
- "fmax z31.s, p3/M, z31.s, z17.s\n"
- "ld1w { z6.s }, p3/Z, [x10, #7, MUL VL]\n"
- "addvl x10, x10, #16\n"
- "whilelt p2.s, x21, %x[n_channels]\n"
- "ld1w { z9.s }, p1/Z, [x26, x13, LSL #2]\n"
- "cmp x11, %x[n_channels]\n"
- "fmin z28.s, p3/M, z28.s, z16.s\n"
- "ld1w { z10.s }, p1/Z, [x9]\n"
- "ld1w { z11.s }, p1/Z, [x9, x24, LSL #2]\n"
- "fmin z29.s, p3/M, z29.s, z16.s\n"
- "fmin z30.s, p3/M, z30.s, z16.s\n"
- "ld1w { z12.s }, p1/Z, [x26, x27, LSL #2]\n"
- "st1w { z28.s }, p0, [x28]\n"
- "fmin z31.s, p3/M, z31.s, z16.s\n"
- "addvl x23, x23, #1\n"
- "st1w { z29.s }, p0, [x28, x12, LSL #2]\n"
- "ld1w { z7.s }, p3/Z, [x10, #-8, MUL VL]\n"
- "st1w { z30.s }, p0, [x22]\n"
+ "fmla z31.s, p3/M, z6.s, z9.s\n"
+ "ld1w { z9.s }, p1/Z, [x27, x11, LSL #2]\n"
+ "cmp x12, %x[n_channels]\n"
+ "fmla z30.s, p3/M, z8.s, z10.s\n"
+ "ld1w { z10.s }, p1/Z, [x10]\n"
+ "fmla z28.s, p3/M, z6.s, z11.s\n"
+ "ld1w { z11.s }, p1/Z, [x10, x23, LSL #2]\n"
+ "ld1w { z6.s }, p3/Z, [x14, #7, MUL VL]\n"
+ "fmla z29.s, p3/M, z8.s, z12.s\n"
+ "addvl x14, x14, #16\n"
+ "fmax z31.s, p3/M, z31.s, z18.s\n"
+ "ld1w { z8.s }, p3/Z, [x14, #-7, MUL VL]\n"
+ "fmla z28.s, p3/M, z7.s, z12.s\n"
+ "ld1w { z12.s }, p1/Z, [x27, x24, LSL #2]\n"
+ "fmax z30.s, p3/M, z30.s, z18.s\n"
+ "ld1w { z7.s }, p3/Z, [x14, #-8, MUL VL]\n"
+ "addvl x14, x14, #-6\n"
+ "fmax z29.s, p3/M, z29.s, z18.s\n"
+ "fmin z31.s, p3/M, z31.s, z17.s\n"
+ "st1w { z31.s }, p0, [x28]\n"
+ "fmin z30.s, p3/M, z30.s, z17.s\n"
+ "fmin z29.s, p3/M, z29.s, z17.s\n"
+ "st1w { z30.s }, p0, [x28, x9, LSL #2]\n"
+ "fmax z28.s, p3/M, z28.s, z18.s\n"
"addvl x28, x28, #1\n"
- "ld1w { z8.s }, p3/Z, [x10, #-7, MUL VL]\n"
- "addvl x10, x10, #-6\n"
- "st1w { z31.s }, p0, [x22, x12, LSL #2]\n"
+ "fmin z28.s, p3/M, z28.s, z17.s\n"
+ "st1w { z29.s }, p0, [x22]\n"
+ "st1w { z28.s }, p0, [x22, x9, LSL #2]\n"
"addvl x22, x22, #1\n"
"blt 2b\n"
"3:" // Tile loop: Channel tail
- "movprfx z28, z18\n fmla z28.s, p3/M, z4.s, z9.s\n"
- "movprfx z29, z18\n fmla z29.s, p3/M, z3.s, z9.s\n"
- "ldr x14, [%x[params_struct], %[offsetof_args_tile_j]]\n"
- "ldr x10, [%x[params_struct], %[offsetof_args_tile_i]]\n"
- "movprfx z30, z18\n fmla z30.s, p3/M, z1.s, z9.s\n"
- "movprfx z31, z18\n fmla z31.s, p3/M, z0.s, z9.s\n"
- "ld1w { z9.s }, p2/Z, [x23]\n"
- "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
- "fmla z28.s, p3/M, z0.s, z10.s\n"
- "fmla z29.s, p3/M, z2.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x23, x24, LSL #2]\n"
- "ld1w { z10.s }, p2/Z, [x25, x27, LSL #2]\n"
- "fmla z30.s, p3/M, z2.s, z12.s\n"
- "fmla z31.s, p3/M, z1.s, z12.s\n"
- "add x14, x14, #0x1\n"
- "cmp x14, x20\n"
- "fmla z28.s, p3/M, z5.s, z12.s\n"
- "fmla z29.s, p3/M, z4.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x9, x13, LSL #2]\n"
- "add x21, x10, #0x1\n"
- "fmla z30.s, p3/M, z6.s, z9.s\n"
- "fmla z31.s, p3/M, z3.s, z13.s\n"
- "ld1w { z9.s }, p2/Z, [x9, x27, LSL #2]\n"
- "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
- "fmla z28.s, p3/M, z7.s, z13.s\n"
- "fmla z29.s, p3/M, z6.s, z13.s\n"
- "csel x10, x10, x21, LT\n"
+ "movprfx z31, z16\n fmla z31.s, p3/M, z4.s, z9.s\n"
+ "ldr x17, [%x[params_struct], %[offsetof_args_tile_i]]\n"
"mov p0.b, p2.b\n"
- "fmla z30.s, p3/M, z4.s, z13.s\n"
- "fmla z31.s, p3/M, z8.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x26]\n"
- "csel x14, x14, XZR, LT\n"
- "fmla z28.s, p3/M, z1.s, z12.s\n"
- "fmla z29.s, p3/M, z0.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x26, x24, LSL #2]\n"
- "cmp x10, x20\n"
- "fmla z30.s, p3/M, z5.s, z10.s\n"
- "fmla z31.s, p3/M, z4.s, z10.s\n"
- "fmla z28.s, p3/M, z2.s, z9.s\n"
- "fmla z29.s, p3/M, z1.s, z9.s\n"
+ "movprfx z30, z16\n fmla z30.s, p3/M, z3.s, z9.s\n"
+ "ldr x16, [%x[params_struct], %[offsetof_args_tile_j]]\n"
+ "add x21, x17, #0x1\n"
+ "movprfx z29, z16\n fmla z29.s, p3/M, z1.s, z9.s\n"
+ "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
+ "movprfx z28, z16\n fmla z28.s, p3/M, z0.s, z9.s\n"
"ld1w { z9.s }, p2/Z, [x25]\n"
- "fmla z30.s, p3/M, z0.s, z11.s\n"
- "fmla z31.s, p3/M, z2.s, z12.s\n"
- "fmla z28.s, p3/M, z8.s, z10.s\n"
- "fmla z29.s, p3/M, z7.s, z10.s\n"
- "ld1w { z10.s }, p2/Z, [x25, x24, LSL #2]\n"
- "fmla z30.s, p3/M, z3.s, z9.s\n"
- "fmla z31.s, p3/M, z5.s, z10.s\n"
- "fmla z28.s, p3/M, z3.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x23, x13, LSL #2]\n"
- "fmla z29.s, p3/M, z5.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x23, x27, LSL #2]\n"
- "fmla z30.s, p3/M, z7.s, z11.s\n"
- "fmla z31.s, p3/M, z6.s, z11.s\n"
- "fmla z28.s, p3/M, z6.s, z9.s\n"
- "fmla z29.s, p3/M, z8.s, z10.s\n"
- "fmax z28.s, p3/M, z28.s, z17.s\n"
- "fmax z29.s, p3/M, z29.s, z17.s\n"
- "fmla z30.s, p3/M, z8.s, z12.s\n"
- "fmla z31.s, p3/M, z7.s, z12.s\n"
- "fmax z30.s, p3/M, z30.s, z17.s\n"
- "fmax z31.s, p3/M, z31.s, z17.s\n"
- "fmin z28.s, p3/M, z28.s, z16.s\n"
- "fmin z29.s, p3/M, z29.s, z16.s\n"
- "st1w { z28.s }, p0, [x28]\n"
- "fmin z30.s, p3/M, z30.s, z16.s\n"
- "fmin z31.s, p3/M, z31.s, z16.s\n"
- "st1w { z29.s }, p0, [x28, x12, LSL #2]\n"
- "st1w { z30.s }, p0, [x22]\n"
- "st1w { z31.s }, p0, [x22, x12, LSL #2]\n"
+ "add x16, x16, #0x1\n"
+ "fmla z31.s, p3/M, z0.s, z10.s\n"
+ "ld1w { z10.s }, p2/Z, [x26, x24, LSL #2]\n"
+ "fmla z30.s, p3/M, z2.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x25, x23, LSL #2]\n"
+ "ldr x19, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
+ "fmla z29.s, p3/M, z2.s, z12.s\n"
+ "cmp x16, x19\n"
+ "fmla z31.s, p3/M, z5.s, z12.s\n"
+ "fmla z30.s, p3/M, z4.s, z12.s\n"
+ "csel x16, x16, XZR, LT\n"
+ "fmla z28.s, p3/M, z1.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x10, x11, LSL #2]\n"
+ "csel x17, x17, x21, LT\n"
+ "fmla z29.s, p3/M, z6.s, z9.s\n"
+ "ld1w { z9.s }, p2/Z, [x10, x24, LSL #2]\n"
+ "cmp x17, x20\n"
+ "fmla z31.s, p3/M, z7.s, z13.s\n"
+ "fmla z30.s, p3/M, z6.s, z13.s\n"
+ "fmla z28.s, p3/M, z3.s, z13.s\n"
+ "fmla z29.s, p3/M, z4.s, z13.s\n"
+ "fmla z31.s, p3/M, z1.s, z12.s\n"
+ "fmla z30.s, p3/M, z0.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x27, x23, LSL #2]\n"
+ "fmla z28.s, p3/M, z8.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x27]\n"
+ "fmla z29.s, p3/M, z5.s, z10.s\n"
+ "fmla z31.s, p3/M, z2.s, z9.s\n"
+ "fmla z30.s, p3/M, z1.s, z9.s\n"
+ "ld1w { z9.s }, p2/Z, [x26]\n"
+ "fmla z28.s, p3/M, z4.s, z10.s\n"
+ "fmla z29.s, p3/M, z0.s, z11.s\n"
+ "fmla z31.s, p3/M, z8.s, z10.s\n"
+ "fmla z30.s, p3/M, z7.s, z10.s\n"
+ "ld1w { z10.s }, p2/Z, [x26, x23, LSL #2]\n"
+ "fmla z28.s, p3/M, z2.s, z12.s\n"
+ "fmla z29.s, p3/M, z3.s, z9.s\n"
+ "fmla z31.s, p3/M, z3.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x25, x11, LSL #2]\n"
+ "fmla z30.s, p3/M, z5.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x25, x24, LSL #2]\n"
+ "fmla z28.s, p3/M, z5.s, z10.s\n"
+ "fmla z29.s, p3/M, z7.s, z11.s\n"
+ "fmla z31.s, p3/M, z6.s, z9.s\n"
+ "fmla z30.s, p3/M, z8.s, z10.s\n"
+ "fmla z28.s, p3/M, z6.s, z11.s\n"
+ "fmla z29.s, p3/M, z8.s, z12.s\n"
+ "fmax z31.s, p3/M, z31.s, z18.s\n"
+ "fmax z30.s, p3/M, z30.s, z18.s\n"
+ "fmla z28.s, p3/M, z7.s, z12.s\n"
+ "fmax z29.s, p3/M, z29.s, z18.s\n"
+ "fmin z31.s, p3/M, z31.s, z17.s\n"
+ "st1w { z31.s }, p0, [x28]\n"
+ "fmin z30.s, p3/M, z30.s, z17.s\n"
+ "fmin z29.s, p3/M, z29.s, z17.s\n"
+ "st1w { z30.s }, p0, [x28, x9, LSL #2]\n"
+ "fmax z28.s, p3/M, z28.s, z18.s\n"
+ "st1w { z29.s }, p0, [x22]\n"
+ "fmin z28.s, p3/M, z28.s, z17.s\n"
+ "st1w { z28.s }, p0, [x22, x9, LSL #2]\n"
"blt 1b\n"
:
: [n_channels] "r" ((unsigned long) n_channels), [offsetof_args_inptr] "I" (offsetof(Args, inptr)), [offsetof_args_ld_input_col] "I" (offsetof(Args, ld_input_col)), [offsetof_args_ld_input_row] "I" (offsetof(Args, ld_input_row)), [offsetof_args_ld_output_col] "I" (offsetof(Args, ld_output_col)), [offsetof_args_ld_output_row] "I" (offsetof(Args, ld_output_row)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_n_tile_cols] "I" (offsetof(Args, n_tile_cols)), [offsetof_args_n_tile_rows] "I" (offsetof(Args, n_tile_rows)), [offsetof_args_outptr] "I" (offsetof(Args, outptr)), [offsetof_args_params] "I" (offsetof(Args, params)), [offsetof_args_tile_i] "I" (offsetof(Args, tile_i)), [offsetof_args_tile_j] "I" (offsetof(Args, tile_j)), [params_struct] "r" (&params_struct)
- : "cc", "memory", "p0", "p1", "p2", "p3", "x9", "x10", "x11", "x12", "x13", "x14", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z16", "z17", "z18", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z16", "z17", "z18", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_indirect.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_indirect.cpp
index 903de0d309..77a6c683b0 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_indirect.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_indirect.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -78,215 +78,215 @@ void sve_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst_indirect_impl(
activation_min, activation_max);
__asm__ __volatile__(
+ "ldr x19, [%x[params_struct], %[offsetof_args_outptrs]]\n"
"ptrue p3.b\n"
- "ldr x20, [%x[params_struct], %[offsetof_args_outptrs]]\n"
- "ldr x16, [%x[params_struct], %[offsetof_args_params]]\n"
- "add x15, %x[params_struct], %[offsetof_Args_inptrs]\n"
- "cntw x14\n"
- "ldp x13, x12, [x20, #0x0]\n"
- "ldp x11, x10, [x20, #0x10]\n"
- "mov x9, #0x0\n"
+ "ldr x15, [%x[params_struct], %[offsetof_args_params]]\n"
+ "add x14, %x[params_struct], %[offsetof_Args_inptrs]\n"
+ "ld1rw { z18.s }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
+ "mov x13, #0x0\n"
+ "ld1rw { z17.s }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
+ "cntw x12\n"
+ "ldp x11, x10, [x19, #0x0]\n"
+ "sub x9, XZR, x12\n"
+ "ldp x28, x27, [x19, #0x10]\n"
"whilelt p2.s, XZR, %x[n_channels]\n"
- "ld1w { z18.s }, p3/Z, [x16]\n"
- "ld1w { z0.s }, p3/Z, [x16, #1, MUL VL]\n"
- "cmp x14, %x[n_channels]\n"
- "ld1w { z1.s }, p3/Z, [x16, #2, MUL VL]\n"
- "ld1w { z2.s }, p3/Z, [x16, #3, MUL VL]\n"
- "sub x28, XZR, x14\n"
- "ld1w { z3.s }, p3/Z, [x16, #4, MUL VL]\n"
- "ld1w { z4.s }, p3/Z, [x16, #5, MUL VL]\n"
- "ld1w { z5.s }, p3/Z, [x16, #6, MUL VL]\n"
- "ld1w { z6.s }, p3/Z, [x16, #7, MUL VL]\n"
- "addvl x16, x16, #16\n"
- "ldp x27, x26, [x15, #0x0]\n"
- "ldp x25, x24, [x15, #0x10]\n"
- "ldr x23, [x15, #0x20]\n"
- "ld1rw { z17.s }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
- "ld1rw { z16.s }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
- "ld1w { z7.s }, p3/Z, [x16, #-8, MUL VL]\n"
- "ld1w { z8.s }, p3/Z, [x16, #-7, MUL VL]\n"
- "ld1w { z9.s }, p2/Z, [x27, x9, LSL #2]\n"
- "addvl x16, x16, #-6\n"
- "ld1w { z10.s }, p2/Z, [x26, x9, LSL #2]\n"
- "ld1w { z11.s }, p2/Z, [x25, x9, LSL #2]\n"
- "ld1w { z12.s }, p2/Z, [x24, x9, LSL #2]\n"
- "ld1w { z13.s }, p2/Z, [x23, x9, LSL #2]\n"
+ "ld1w { z16.s }, p3/Z, [x15]\n"
+ "cmp x12, %x[n_channels]\n"
+ "ld1w { z0.s }, p3/Z, [x15, #1, MUL VL]\n"
+ "ld1w { z1.s }, p3/Z, [x15, #2, MUL VL]\n"
+ "ld1w { z2.s }, p3/Z, [x15, #3, MUL VL]\n"
+ "ld1w { z3.s }, p3/Z, [x15, #4, MUL VL]\n"
+ "ld1w { z4.s }, p3/Z, [x15, #5, MUL VL]\n"
+ "ld1w { z5.s }, p3/Z, [x15, #6, MUL VL]\n"
+ "ld1w { z6.s }, p3/Z, [x15, #7, MUL VL]\n"
+ "addvl x15, x15, #16\n"
+ "ldp x26, x25, [x14, #0x0]\n"
+ "ld1w { z7.s }, p3/Z, [x15, #-8, MUL VL]\n"
+ "ld1w { z8.s }, p3/Z, [x15, #-7, MUL VL]\n"
+ "addvl x15, x15, #-6\n"
+ "ld1w { z9.s }, p2/Z, [x26, x13, LSL #2]\n"
+ "ld1w { z10.s }, p2/Z, [x25, x13, LSL #2]\n"
+ "ldp x24, x23, [x14, #0x10]\n"
+ "ldr x22, [x14, #0x20]\n"
+ "ld1w { z11.s }, p2/Z, [x24, x13, LSL #2]\n"
+ "ld1w { z12.s }, p2/Z, [x23, x13, LSL #2]\n"
+ "ld1w { z13.s }, p2/Z, [x22, x13, LSL #2]\n"
"bge 2f\n"
"1:" // Channel loop
- "movprfx z28, z18\n fmla z28.s, p3/M, z4.s, z9.s\n"
- "movprfx z29, z18\n fmla z29.s, p3/M, z3.s, z9.s\n"
- "ldr x22, [x15, #0x28]\n"
- "ldr x21, [x15, #0x30]\n"
- "movprfx z30, z18\n fmla z30.s, p3/M, z1.s, z9.s\n"
- "movprfx z31, z18\n fmla z31.s, p3/M, z0.s, z9.s\n"
- "ld1w { z9.s }, p2/Z, [x22, x9, LSL #2]\n"
- "ldr x20, [x15, #0x38]\n"
- "fmla z28.s, p3/M, z0.s, z10.s\n"
- "fmla z29.s, p3/M, z2.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x21, x9, LSL #2]\n"
- "ldr x26, [x15, #0x48]\n"
- "fmla z30.s, p3/M, z2.s, z12.s\n"
- "fmla z31.s, p3/M, z1.s, z12.s\n"
- "ldr x27, [x15, #0x40]\n"
- "ld1w { z10.s }, p2/Z, [x26, x9, LSL #2]\n"
- "fmla z28.s, p3/M, z5.s, z12.s\n"
- "fmla z29.s, p3/M, z4.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x20, x9, LSL #2]\n"
- "ldr x25, [x15, #0x50]\n"
- "fmla z30.s, p3/M, z6.s, z9.s\n"
- "fmla z31.s, p3/M, z3.s, z13.s\n"
- "ld1w { z9.s }, p2/Z, [x27, x9, LSL #2]\n"
- "ldr x24, [x15, #0x58]\n"
- "fmla z28.s, p3/M, z7.s, z13.s\n"
- "fmla z29.s, p3/M, z6.s, z13.s\n"
- "ldr x23, [x15, #0x60]\n"
- "ldr x22, [x15, #0x68]\n"
- "fmla z30.s, p3/M, z4.s, z13.s\n"
- "fmla z31.s, p3/M, z8.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x25, x9, LSL #2]\n"
- "ldr x21, [x15, #0x70]\n"
- "fmla z28.s, p3/M, z1.s, z12.s\n"
- "fmla z29.s, p3/M, z0.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x24, x9, LSL #2]\n"
- "ldr x20, [x15, #0x78]\n"
- "fmla z30.s, p3/M, z5.s, z10.s\n"
- "fmla z31.s, p3/M, z4.s, z10.s\n"
- "whilelt p1.s, x14, %x[n_channels]\n"
- "ldp x27, x26, [x15, #0x0]\n"
- "fmla z28.s, p3/M, z2.s, z9.s\n"
- "fmla z29.s, p3/M, z1.s, z9.s\n"
- "ld1w { z9.s }, p2/Z, [x23, x9, LSL #2]\n"
- "ldp x25, x24, [x15, #0x10]\n"
- "fmla z30.s, p3/M, z0.s, z11.s\n"
- "fmla z31.s, p3/M, z2.s, z12.s\n"
- "ldr x23, [x15, #0x20]\n"
- "ld1w { z13.s }, p1/Z, [x23, x14, LSL #2]\n"
- "fmla z28.s, p3/M, z8.s, z10.s\n"
- "fmla z29.s, p3/M, z7.s, z10.s\n"
- "ld1w { z10.s }, p2/Z, [x22, x9, LSL #2]\n"
- "incw x28\n"
- "fmla z30.s, p3/M, z3.s, z9.s\n"
- "fmla z31.s, p3/M, z5.s, z10.s\n"
- "mov p0.b, p2.b\n"
- "ld1w { z18.s }, p3/Z, [x16]\n"
- "fmla z28.s, p3/M, z3.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x21, x9, LSL #2]\n"
- "fmla z29.s, p3/M, z5.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x20, x9, LSL #2]\n"
- "fmla z30.s, p3/M, z7.s, z11.s\n"
- "fmla z31.s, p3/M, z6.s, z11.s\n"
+ "movprfx z31, z16\n fmla z31.s, p3/M, z4.s, z9.s\n"
+ "ldr x21, [x14, #0x28]\n"
+ "whilelt p1.s, x12, %x[n_channels]\n"
+ "movprfx z30, z16\n fmla z30.s, p3/M, z3.s, z9.s\n"
+ "ldr x20, [x14, #0x30]\n"
"incw x9\n"
- "ld1w { z11.s }, p1/Z, [x25, x14, LSL #2]\n"
- "fmla z28.s, p3/M, z6.s, z9.s\n"
- "fmla z29.s, p3/M, z8.s, z10.s\n"
- "ld1w { z9.s }, p1/Z, [x27, x14, LSL #2]\n"
- "ld1w { z10.s }, p1/Z, [x26, x14, LSL #2]\n"
- "fmla z30.s, p3/M, z8.s, z12.s\n"
- "fmla z31.s, p3/M, z7.s, z12.s\n"
- "ld1w { z12.s }, p1/Z, [x24, x14, LSL #2]\n"
- "incw x14\n"
- "fmax z28.s, p3/M, z28.s, z17.s\n"
- "fmax z29.s, p3/M, z29.s, z17.s\n"
- "ld1w { z0.s }, p3/Z, [x16, #1, MUL VL]\n"
- "ld1w { z1.s }, p3/Z, [x16, #2, MUL VL]\n"
- "fmax z30.s, p3/M, z30.s, z17.s\n"
- "fmax z31.s, p3/M, z31.s, z17.s\n"
- "ld1w { z2.s }, p3/Z, [x16, #3, MUL VL]\n"
- "ld1w { z3.s }, p3/Z, [x16, #4, MUL VL]\n"
- "ld1w { z4.s }, p3/Z, [x16, #5, MUL VL]\n"
- "ld1w { z5.s }, p3/Z, [x16, #6, MUL VL]\n"
- "whilelt p2.s, x9, %x[n_channels]\n"
- "cmp x14, %x[n_channels]\n"
- "ld1w { z6.s }, p3/Z, [x16, #7, MUL VL]\n"
- "addvl x16, x16, #16\n"
- "fmin z28.s, p3/M, z28.s, z16.s\n"
- "st1w { z28.s }, p0, [x13, x28, LSL #2]\n"
- "fmin z29.s, p3/M, z29.s, z16.s\n"
- "fmin z30.s, p3/M, z30.s, z16.s\n"
- "st1w { z29.s }, p0, [x12, x28, LSL #2]\n"
- "ld1w { z7.s }, p3/Z, [x16, #-8, MUL VL]\n"
- "fmin z31.s, p3/M, z31.s, z16.s\n"
- "st1w { z30.s }, p0, [x11, x28, LSL #2]\n"
- "ld1w { z8.s }, p3/Z, [x16, #-7, MUL VL]\n"
- "addvl x16, x16, #-6\n"
- "st1w { z31.s }, p0, [x10, x28, LSL #2]\n"
+ "movprfx z29, z16\n fmla z29.s, p3/M, z1.s, z9.s\n"
+ "ldr x19, [x14, #0x38]\n"
+ "mov p0.b, p2.b\n"
+ "movprfx z28, z16\n fmla z28.s, p3/M, z0.s, z9.s\n"
+ "ld1w { z9.s }, p2/Z, [x21, x13, LSL #2]\n"
+ "ldr x26, [x14, #0x40]\n"
+ "fmla z31.s, p3/M, z0.s, z10.s\n"
+ "ldr x25, [x14, #0x48]\n"
+ "fmla z30.s, p3/M, z2.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x20, x13, LSL #2]\n"
+ "fmla z29.s, p3/M, z2.s, z12.s\n"
+ "ldr x24, [x14, #0x50]\n"
+ "fmla z28.s, p3/M, z1.s, z12.s\n"
+ "ld1w { z10.s }, p2/Z, [x25, x13, LSL #2]\n"
+ "fmla z31.s, p3/M, z5.s, z12.s\n"
+ "ldr x23, [x14, #0x58]\n"
+ "fmla z30.s, p3/M, z4.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x19, x13, LSL #2]\n"
+ "fmla z29.s, p3/M, z6.s, z9.s\n"
+ "ld1w { z9.s }, p2/Z, [x26, x13, LSL #2]\n"
+ "fmla z28.s, p3/M, z3.s, z13.s\n"
+ "ldr x22, [x14, #0x60]\n"
+ "fmla z31.s, p3/M, z7.s, z13.s\n"
+ "ldr x21, [x14, #0x68]\n"
+ "fmla z30.s, p3/M, z6.s, z13.s\n"
+ "ldr x20, [x14, #0x70]\n"
+ "fmla z29.s, p3/M, z4.s, z13.s\n"
+ "ldr x19, [x14, #0x78]\n"
+ "fmla z28.s, p3/M, z8.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x24, x13, LSL #2]\n"
+ "fmla z31.s, p3/M, z1.s, z12.s\n"
+ "ldp x26, x25, [x14, #0x0]\n"
+ "fmla z30.s, p3/M, z0.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x23, x13, LSL #2]\n"
+ "fmla z29.s, p3/M, z5.s, z10.s\n"
+ "ldp x24, x23, [x14, #0x10]\n"
+ "fmla z28.s, p3/M, z4.s, z10.s\n"
+ "ld1w { z16.s }, p3/Z, [x15]\n"
+ "fmla z31.s, p3/M, z2.s, z9.s\n"
+ "ld1w { z4.s }, p3/Z, [x15, #5, MUL VL]\n"
+ "fmla z30.s, p3/M, z1.s, z9.s\n"
+ "ld1w { z9.s }, p2/Z, [x22, x13, LSL #2]\n"
+ "fmla z29.s, p3/M, z0.s, z11.s\n"
+ "ldr x22, [x14, #0x20]\n"
+ "fmla z28.s, p3/M, z2.s, z12.s\n"
+ "ld1w { z0.s }, p3/Z, [x15, #1, MUL VL]\n"
+ "fmla z31.s, p3/M, z8.s, z10.s\n"
+ "ld1w { z1.s }, p3/Z, [x15, #2, MUL VL]\n"
+ "fmla z30.s, p3/M, z7.s, z10.s\n"
+ "ld1w { z10.s }, p2/Z, [x21, x13, LSL #2]\n"
+ "fmla z29.s, p3/M, z3.s, z9.s\n"
+ "ld1w { z13.s }, p1/Z, [x22, x12, LSL #2]\n"
+ "fmla z31.s, p3/M, z3.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x20, x13, LSL #2]\n"
+ "fmla z28.s, p3/M, z5.s, z10.s\n"
+ "ld1w { z2.s }, p3/Z, [x15, #3, MUL VL]\n"
+ "fmla z30.s, p3/M, z5.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x19, x13, LSL #2]\n"
+ "incw x13\n"
+ "fmla z29.s, p3/M, z7.s, z11.s\n"
+ "ld1w { z3.s }, p3/Z, [x15, #4, MUL VL]\n"
+ "whilelt p2.s, x13, %x[n_channels]\n"
+ "fmla z31.s, p3/M, z6.s, z9.s\n"
+ "ld1w { z9.s }, p1/Z, [x26, x12, LSL #2]\n"
+ "fmla z28.s, p3/M, z6.s, z11.s\n"
+ "ld1w { z11.s }, p1/Z, [x24, x12, LSL #2]\n"
+ "fmla z30.s, p3/M, z8.s, z10.s\n"
+ "ld1w { z10.s }, p1/Z, [x25, x12, LSL #2]\n"
+ "ld1w { z5.s }, p3/Z, [x15, #6, MUL VL]\n"
+ "fmla z29.s, p3/M, z8.s, z12.s\n"
+ "ld1w { z6.s }, p3/Z, [x15, #7, MUL VL]\n"
+ "fmla z28.s, p3/M, z7.s, z12.s\n"
+ "addvl x15, x15, #16\n"
+ "fmax z31.s, p3/M, z31.s, z18.s\n"
+ "ld1w { z12.s }, p1/Z, [x23, x12, LSL #2]\n"
+ "incw x12\n"
+ "fmax z30.s, p3/M, z30.s, z18.s\n"
+ "ld1w { z7.s }, p3/Z, [x15, #-8, MUL VL]\n"
+ "cmp x12, %x[n_channels]\n"
+ "fmax z29.s, p3/M, z29.s, z18.s\n"
+ "ld1w { z8.s }, p3/Z, [x15, #-7, MUL VL]\n"
+ "addvl x15, x15, #-6\n"
+ "fmax z28.s, p3/M, z28.s, z18.s\n"
+ "fmin z31.s, p3/M, z31.s, z17.s\n"
+ "st1w { z31.s }, p0, [x11, x9, LSL #2]\n"
+ "fmin z30.s, p3/M, z30.s, z17.s\n"
+ "fmin z29.s, p3/M, z29.s, z17.s\n"
+ "st1w { z30.s }, p0, [x10, x9, LSL #2]\n"
+ "fmin z28.s, p3/M, z28.s, z17.s\n"
+ "st1w { z29.s }, p0, [x28, x9, LSL #2]\n"
+ "st1w { z28.s }, p0, [x27, x9, LSL #2]\n"
"blt 1b\n"
"2:" // Channel tail
- "movprfx z28, z18\n fmla z28.s, p3/M, z4.s, z9.s\n"
- "movprfx z29, z18\n fmla z29.s, p3/M, z3.s, z9.s\n"
- "ldr x22, [x15, #0x28]\n"
- "ldr x21, [x15, #0x30]\n"
- "movprfx z30, z18\n fmla z30.s, p3/M, z1.s, z9.s\n"
- "movprfx z31, z18\n fmla z31.s, p3/M, z0.s, z9.s\n"
- "ld1w { z9.s }, p2/Z, [x22, x9, LSL #2]\n"
- "ldr x20, [x15, #0x38]\n"
- "fmla z28.s, p3/M, z0.s, z10.s\n"
- "fmla z29.s, p3/M, z2.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x21, x9, LSL #2]\n"
- "ldr x26, [x15, #0x48]\n"
- "fmla z30.s, p3/M, z2.s, z12.s\n"
- "fmla z31.s, p3/M, z1.s, z12.s\n"
- "ldr x27, [x15, #0x40]\n"
- "ld1w { z10.s }, p2/Z, [x26, x9, LSL #2]\n"
- "fmla z28.s, p3/M, z5.s, z12.s\n"
- "fmla z29.s, p3/M, z4.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x20, x9, LSL #2]\n"
- "ldr x25, [x15, #0x50]\n"
- "fmla z30.s, p3/M, z6.s, z9.s\n"
- "fmla z31.s, p3/M, z3.s, z13.s\n"
- "ld1w { z9.s }, p2/Z, [x27, x9, LSL #2]\n"
- "ldr x24, [x15, #0x58]\n"
- "fmla z28.s, p3/M, z7.s, z13.s\n"
- "fmla z29.s, p3/M, z6.s, z13.s\n"
- "ldr x23, [x15, #0x60]\n"
- "ldr x22, [x15, #0x68]\n"
- "fmla z30.s, p3/M, z4.s, z13.s\n"
- "fmla z31.s, p3/M, z8.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x25, x9, LSL #2]\n"
- "ldr x21, [x15, #0x70]\n"
- "fmla z28.s, p3/M, z1.s, z12.s\n"
- "fmla z29.s, p3/M, z0.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x24, x9, LSL #2]\n"
- "ldr x20, [x15, #0x78]\n"
- "fmla z30.s, p3/M, z5.s, z10.s\n"
- "fmla z31.s, p3/M, z4.s, z10.s\n"
- "incw x28\n"
+ "movprfx z31, z16\n fmla z31.s, p3/M, z4.s, z9.s\n"
+ "ldr x21, [x14, #0x28]\n"
+ "incw x9\n"
+ "movprfx z30, z16\n fmla z30.s, p3/M, z3.s, z9.s\n"
+ "ldr x20, [x14, #0x30]\n"
"mov p0.b, p2.b\n"
- "fmla z28.s, p3/M, z2.s, z9.s\n"
- "fmla z29.s, p3/M, z1.s, z9.s\n"
- "ld1w { z9.s }, p2/Z, [x23, x9, LSL #2]\n"
- "fmla z30.s, p3/M, z0.s, z11.s\n"
- "fmla z31.s, p3/M, z2.s, z12.s\n"
- "fmla z28.s, p3/M, z8.s, z10.s\n"
- "fmla z29.s, p3/M, z7.s, z10.s\n"
- "ld1w { z10.s }, p2/Z, [x22, x9, LSL #2]\n"
- "fmla z30.s, p3/M, z3.s, z9.s\n"
- "fmla z31.s, p3/M, z5.s, z10.s\n"
- "fmla z28.s, p3/M, z3.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x21, x9, LSL #2]\n"
- "fmla z29.s, p3/M, z5.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x20, x9, LSL #2]\n"
- "fmla z30.s, p3/M, z7.s, z11.s\n"
- "fmla z31.s, p3/M, z6.s, z11.s\n"
- "fmla z28.s, p3/M, z6.s, z9.s\n"
- "fmla z29.s, p3/M, z8.s, z10.s\n"
- "fmax z28.s, p3/M, z28.s, z17.s\n"
- "fmax z29.s, p3/M, z29.s, z17.s\n"
- "fmla z30.s, p3/M, z8.s, z12.s\n"
- "fmla z31.s, p3/M, z7.s, z12.s\n"
- "fmax z30.s, p3/M, z30.s, z17.s\n"
- "fmax z31.s, p3/M, z31.s, z17.s\n"
- "fmin z28.s, p3/M, z28.s, z16.s\n"
- "fmin z29.s, p3/M, z29.s, z16.s\n"
- "st1w { z28.s }, p0, [x13, x28, LSL #2]\n"
- "fmin z30.s, p3/M, z30.s, z16.s\n"
- "fmin z31.s, p3/M, z31.s, z16.s\n"
- "st1w { z29.s }, p0, [x12, x28, LSL #2]\n"
- "st1w { z30.s }, p0, [x11, x28, LSL #2]\n"
- "st1w { z31.s }, p0, [x10, x28, LSL #2]\n"
+ "movprfx z29, z16\n fmla z29.s, p3/M, z1.s, z9.s\n"
+ "ldr x19, [x14, #0x38]\n"
+ "movprfx z28, z16\n fmla z28.s, p3/M, z0.s, z9.s\n"
+ "ld1w { z9.s }, p2/Z, [x21, x13, LSL #2]\n"
+ "ldr x26, [x14, #0x40]\n"
+ "fmla z31.s, p3/M, z0.s, z10.s\n"
+ "ldr x25, [x14, #0x48]\n"
+ "fmla z30.s, p3/M, z2.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x20, x13, LSL #2]\n"
+ "fmla z29.s, p3/M, z2.s, z12.s\n"
+ "fmla z28.s, p3/M, z1.s, z12.s\n"
+ "ld1w { z10.s }, p2/Z, [x25, x13, LSL #2]\n"
+ "ldr x24, [x14, #0x50]\n"
+ "fmla z31.s, p3/M, z5.s, z12.s\n"
+ "ldr x23, [x14, #0x58]\n"
+ "fmla z30.s, p3/M, z4.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x19, x13, LSL #2]\n"
+ "fmla z29.s, p3/M, z6.s, z9.s\n"
+ "fmla z28.s, p3/M, z3.s, z13.s\n"
+ "ld1w { z9.s }, p2/Z, [x26, x13, LSL #2]\n"
+ "ldr x22, [x14, #0x60]\n"
+ "fmla z31.s, p3/M, z7.s, z13.s\n"
+ "ldr x21, [x14, #0x68]\n"
+ "fmla z30.s, p3/M, z6.s, z13.s\n"
+ "ldr x20, [x14, #0x70]\n"
+ "fmla z29.s, p3/M, z4.s, z13.s\n"
+ "fmla z28.s, p3/M, z8.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x24, x13, LSL #2]\n"
+ "ldr x19, [x14, #0x78]\n"
+ "fmla z31.s, p3/M, z1.s, z12.s\n"
+ "fmla z30.s, p3/M, z0.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x23, x13, LSL #2]\n"
+ "fmla z29.s, p3/M, z5.s, z10.s\n"
+ "fmla z28.s, p3/M, z4.s, z10.s\n"
+ "fmla z31.s, p3/M, z2.s, z9.s\n"
+ "fmla z30.s, p3/M, z1.s, z9.s\n"
+ "ld1w { z9.s }, p2/Z, [x22, x13, LSL #2]\n"
+ "fmla z29.s, p3/M, z0.s, z11.s\n"
+ "fmla z28.s, p3/M, z2.s, z12.s\n"
+ "fmla z31.s, p3/M, z8.s, z10.s\n"
+ "fmla z30.s, p3/M, z7.s, z10.s\n"
+ "ld1w { z10.s }, p2/Z, [x21, x13, LSL #2]\n"
+ "fmla z29.s, p3/M, z3.s, z9.s\n"
+ "fmla z31.s, p3/M, z3.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x20, x13, LSL #2]\n"
+ "fmla z28.s, p3/M, z5.s, z10.s\n"
+ "fmla z30.s, p3/M, z5.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x19, x13, LSL #2]\n"
+ "fmla z29.s, p3/M, z7.s, z11.s\n"
+ "fmla z31.s, p3/M, z6.s, z9.s\n"
+ "fmla z28.s, p3/M, z6.s, z11.s\n"
+ "fmla z30.s, p3/M, z8.s, z10.s\n"
+ "fmla z29.s, p3/M, z8.s, z12.s\n"
+ "fmla z28.s, p3/M, z7.s, z12.s\n"
+ "fmax z31.s, p3/M, z31.s, z18.s\n"
+ "fmax z30.s, p3/M, z30.s, z18.s\n"
+ "fmax z29.s, p3/M, z29.s, z18.s\n"
+ "fmin z31.s, p3/M, z31.s, z17.s\n"
+ "st1w { z31.s }, p0, [x11, x9, LSL #2]\n"
+ "fmin z30.s, p3/M, z30.s, z17.s\n"
+ "fmin z29.s, p3/M, z29.s, z17.s\n"
+ "st1w { z30.s }, p0, [x10, x9, LSL #2]\n"
+ "fmax z28.s, p3/M, z28.s, z18.s\n"
+ "st1w { z29.s }, p0, [x28, x9, LSL #2]\n"
+ "fmin z28.s, p3/M, z28.s, z17.s\n"
+ "st1w { z28.s }, p0, [x27, x9, LSL #2]\n"
:
: [n_channels] "r" ((unsigned long) n_channels), [offsetof_Args_inptrs] "I" (offsetof(Args, inptrs)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_args_params] "I" (offsetof(Args, params)), [params_struct] "r" (&params_struct)
- : "cc", "memory", "p0", "p1", "p2", "p3", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z16", "z17", "z18", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z16", "z17", "z18", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst_strided.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst_strided.hpp
new file mode 100644
index 0000000000..65cb735bde
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst_strided.hpp
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <cstdint>
+
+#pragma once
+
+#if defined(ARM_COMPUTE_ENABLE_SVE)
+
+namespace arm_conv {
+namespace depthwise {
+
+void sve_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst_strided_impl(const float *const, const size_t, const size_t, float *const, const size_t, const size_t, const void *, unsigned long, const float, const float);
+
+struct sve_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst_strided
+{
+ typedef float bias_type;
+ typedef float operand_type;
+ typedef float return_type;
+
+ typedef void (*kern_type)(const float *const, const size_t, const size_t, float *const, const size_t, const size_t, const void *, unsigned long, const float, const float);
+
+ constexpr static arm_gemm::VLType vl_type = arm_gemm::VLType::SVE;
+
+ constexpr static unsigned int kernel_rows = 3;
+ constexpr static unsigned int kernel_cols = 3;
+
+ constexpr static unsigned int stride_rows = 1;
+ constexpr static unsigned int stride_cols = 1;
+
+ constexpr static unsigned int output_rows = 2;
+ constexpr static unsigned int output_cols = 2;
+
+ constexpr static unsigned int input_rows = 4;
+ constexpr static unsigned int input_cols = 4;
+
+ kern_type kernel = sve_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst_strided_impl;
+
+ sve_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst_strided(const CPUInfo *) {}
+};
+
+} // namespace depthwise
+} // namespace arm_conv
+
+#endif // defined(ARM_COMPUTE_ENABLE_SVE)
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst_strided/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst_strided/generic.cpp
new file mode 100644
index 0000000000..97c4d88119
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst_strided/generic.cpp
@@ -0,0 +1,247 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <cstddef>
+#include <cstdint>
+
+#if defined(ARM_COMPUTE_ENABLE_SVE)
+
+namespace arm_conv {
+namespace depthwise {
+
+void sve_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst_strided_impl(
+ const float *const inptr,
+ const size_t in_row_stride,
+ const size_t in_col_stride,
+ float *const outptr,
+ const size_t out_row_stride,
+ const size_t out_col_stride,
+ const void *params,
+ unsigned long n_channels,
+ const float activation_min,
+ const float activation_max
+)
+{
+ const float minmax_vals[2] = { activation_min, activation_max };
+
+ __asm__ __volatile__(
+ "ptrue p2.b\n"
+ "ld1w { z15.s }, p2/Z, [%x[params]]\n"
+ "mov z14.d, z15.d\n"
+ "ld1w { z13.s }, p2/Z, [%x[params], #1, MUL VL]\n"
+ "whilelt p1.s, XZR, %x[n_channels]\n"
+ "mov z12.d, z15.d\n"
+ "ld1w { z11.s }, p2/Z, [%x[params], #2, MUL VL]\n"
+ "mov x26, %x[inptr]\n"
+ "mov z10.d, z15.d\n"
+ "ld1w { z9.s }, p2/Z, [%x[params], #3, MUL VL]\n"
+ "add x25, x26, %x[in_row_stride], LSL #2\n"
+ "mov z8.d, z15.d\n"
+ "ld1w { z7.s }, p2/Z, [%x[params], #4, MUL VL]\n"
+ "add x24, x25, %x[in_row_stride], LSL #2\n"
+ "ld1w { z6.s }, p2/Z, [%x[params], #5, MUL VL]\n"
+ "add x23, x24, %x[in_row_stride], LSL #2\n"
+ "ld1w { z5.s }, p2/Z, [%x[params], #6, MUL VL]\n"
+ "mov x22, %x[outptr]\n"
+ "ld1w { z4.s }, p2/Z, [%x[params], #7, MUL VL]\n"
+ "add x21, x22, %x[out_row_stride], LSL #2\n"
+ "ld1w { z3.s }, p1/Z, [x26]\n"
+ "add x20, %x[in_col_stride], %x[in_col_stride]\n"
+ "ld1w { z2.s }, p1/Z, [x26, %x[in_col_stride], LSL #2]\n"
+ "add x19, x20, %x[in_col_stride]\n"
+ "ld1w { z1.s }, p1/Z, [x25]\n"
+ "addvl %x[params], %x[params], #16\n"
+ "ld1w { z0.s }, p1/Z, [x25, %x[in_col_stride], LSL #2]\n"
+ "decw %x[n_channels]\n"
+ "ld1w { z31.s }, p2/Z, [%x[params], #-8, MUL VL]\n"
+ "cmp %x[n_channels], XZR\n"
+ "ld1w { z30.s }, p2/Z, [%x[params], #-7, MUL VL]\n"
+ "addvl %x[params], %x[params], #-6\n"
+ "ld1w { z29.s }, p1/Z, [x26, x20, LSL #2]\n"
+ "ld1w { z28.s }, p1/Z, [x25, x20, LSL #2]\n"
+ "ld1w { z27.s }, p1/Z, [x26, x19, LSL #2]\n"
+ "ld1w { z26.s }, p1/Z, [x25, x19, LSL #2]\n"
+ "ld1w { z25.s }, p1/Z, [x24]\n"
+ "ld1w { z24.s }, p1/Z, [x24, %x[in_col_stride], LSL #2]\n"
+ "ld1w { z23.s }, p1/Z, [x24, x20, LSL #2]\n"
+ "ld1w { z22.s }, p1/Z, [x24, x19, LSL #2]\n"
+ "ld1w { z21.s }, p1/Z, [x23]\n"
+ "ld1w { z20.s }, p1/Z, [x23, %x[in_col_stride], LSL #2]\n"
+ "ld1w { z19.s }, p1/Z, [x23, x20, LSL #2]\n"
+ "ld1w { z18.s }, p1/Z, [x23, x19, LSL #2]\n"
+ "ld1rw { z17.s }, p2/Z, [%x[minmax_vals]]\n"
+ "ld1rw { z16.s }, p2/Z, [%x[minmax_vals], #4]\n"
+ "ble 2f\n"
+ "1:" // Loop
+ "fmla z14.s, p2/M, z13.s, z3.s\n"
+ "ld1w { z15.s }, p2/Z, [%x[params]]\n"
+ "addvl x26, x26, #1\n"
+ "fmla z12.s, p2/M, z13.s, z2.s\n"
+ "addvl x25, x25, #1\n"
+ "fmla z10.s, p2/M, z13.s, z1.s\n"
+ "addvl x24, x24, #1\n"
+ "fmla z8.s, p2/M, z13.s, z0.s\n"
+ "ld1w { z13.s }, p2/Z, [%x[params], #1, MUL VL]\n"
+ "addvl x23, x23, #1\n"
+ "fmla z14.s, p2/M, z11.s, z2.s\n"
+ "decw %x[n_channels]\n"
+ "mov p0.b, p1.b\n"
+ "fmla z12.s, p2/M, z11.s, z29.s\n"
+ "fmla z10.s, p2/M, z11.s, z0.s\n"
+ "whilelt p1.s, XZR, %x[n_channels]\n"
+ "ld1w { z3.s }, p1/Z, [x26]\n"
+ "fmla z8.s, p2/M, z11.s, z28.s\n"
+ "cmp %x[n_channels], XZR\n"
+ "fmla z14.s, p2/M, z9.s, z29.s\n"
+ "ld1w { z11.s }, p2/Z, [%x[params], #2, MUL VL]\n"
+ "ld1w { z2.s }, p1/Z, [x26, %x[in_col_stride], LSL #2]\n"
+ "fmla z12.s, p2/M, z9.s, z27.s\n"
+ "fmla z10.s, p2/M, z9.s, z28.s\n"
+ "ld1w { z29.s }, p1/Z, [x26, x20, LSL #2]\n"
+ "ld1w { z27.s }, p1/Z, [x26, x19, LSL #2]\n"
+ "fmla z8.s, p2/M, z9.s, z26.s\n"
+ "ld1w { z9.s }, p2/Z, [%x[params], #3, MUL VL]\n"
+ "fmla z14.s, p2/M, z7.s, z1.s\n"
+ "ld1w { z1.s }, p1/Z, [x25]\n"
+ "fmla z12.s, p2/M, z7.s, z0.s\n"
+ "fmla z10.s, p2/M, z7.s, z25.s\n"
+ "fmla z8.s, p2/M, z7.s, z24.s\n"
+ "ld1w { z7.s }, p2/Z, [%x[params], #4, MUL VL]\n"
+ "fmla z14.s, p2/M, z6.s, z0.s\n"
+ "ld1w { z0.s }, p1/Z, [x25, %x[in_col_stride], LSL #2]\n"
+ "fmla z12.s, p2/M, z6.s, z28.s\n"
+ "fmla z10.s, p2/M, z6.s, z24.s\n"
+ "fmla z8.s, p2/M, z6.s, z23.s\n"
+ "ld1w { z6.s }, p2/Z, [%x[params], #5, MUL VL]\n"
+ "fmla z14.s, p2/M, z5.s, z28.s\n"
+ "ld1w { z28.s }, p1/Z, [x25, x20, LSL #2]\n"
+ "fmla z12.s, p2/M, z5.s, z26.s\n"
+ "ld1w { z26.s }, p1/Z, [x25, x19, LSL #2]\n"
+ "fmla z10.s, p2/M, z5.s, z23.s\n"
+ "fmla z8.s, p2/M, z5.s, z22.s\n"
+ "ld1w { z5.s }, p2/Z, [%x[params], #6, MUL VL]\n"
+ "fmla z14.s, p2/M, z4.s, z25.s\n"
+ "ld1w { z25.s }, p1/Z, [x24]\n"
+ "fmla z12.s, p2/M, z4.s, z24.s\n"
+ "fmla z10.s, p2/M, z4.s, z21.s\n"
+ "ld1w { z21.s }, p1/Z, [x23]\n"
+ "fmla z8.s, p2/M, z4.s, z20.s\n"
+ "ld1w { z4.s }, p2/Z, [%x[params], #7, MUL VL]\n"
+ "addvl %x[params], %x[params], #16\n"
+ "fmla z14.s, p2/M, z31.s, z24.s\n"
+ "ld1w { z24.s }, p1/Z, [x24, %x[in_col_stride], LSL #2]\n"
+ "fmla z12.s, p2/M, z31.s, z23.s\n"
+ "fmla z10.s, p2/M, z31.s, z20.s\n"
+ "ld1w { z20.s }, p1/Z, [x23, %x[in_col_stride], LSL #2]\n"
+ "fmla z8.s, p2/M, z31.s, z19.s\n"
+ "ld1w { z31.s }, p2/Z, [%x[params], #-8, MUL VL]\n"
+ "fmla z14.s, p2/M, z30.s, z23.s\n"
+ "ld1w { z23.s }, p1/Z, [x24, x20, LSL #2]\n"
+ "fmla z12.s, p2/M, z30.s, z22.s\n"
+ "ld1w { z22.s }, p1/Z, [x24, x19, LSL #2]\n"
+ "fmla z10.s, p2/M, z30.s, z19.s\n"
+ "ld1w { z19.s }, p1/Z, [x23, x20, LSL #2]\n"
+ "fmla z8.s, p2/M, z30.s, z18.s\n"
+ "ld1w { z30.s }, p2/Z, [%x[params], #-7, MUL VL]\n"
+ "addvl %x[params], %x[params], #-6\n"
+ "fmax z14.s, p2/M, z14.s, z17.s\n"
+ "ld1w { z18.s }, p1/Z, [x23, x19, LSL #2]\n"
+ "fmax z12.s, p2/M, z12.s, z17.s\n"
+ "fmax z10.s, p2/M, z10.s, z17.s\n"
+ "fmax z8.s, p2/M, z8.s, z17.s\n"
+ "fmin z14.s, p2/M, z14.s, z16.s\n"
+ "st1w { z14.s }, p0, [x22]\n"
+ "mov z14.d, z15.d\n"
+ "fmin z12.s, p2/M, z12.s, z16.s\n"
+ "st1w { z12.s }, p0, [x22, %x[out_col_stride], LSL #2]\n"
+ "mov z12.d, z15.d\n"
+ "addvl x22, x22, #1\n"
+ "fmin z10.s, p2/M, z10.s, z16.s\n"
+ "st1w { z10.s }, p0, [x21]\n"
+ "mov z10.d, z15.d\n"
+ "fmin z8.s, p2/M, z8.s, z16.s\n"
+ "st1w { z8.s }, p0, [x21, %x[out_col_stride], LSL #2]\n"
+ "mov z8.d, z15.d\n"
+ "addvl x21, x21, #1\n"
+ "bgt 1b\n"
+ "2:" // Tail
+ "fmla z14.s, p2/M, z13.s, z3.s\n"
+ "mov p0.b, p1.b\n"
+ "fmla z12.s, p2/M, z13.s, z2.s\n"
+ "fmla z10.s, p2/M, z13.s, z1.s\n"
+ "fmla z8.s, p2/M, z13.s, z0.s\n"
+ "fmla z14.s, p2/M, z11.s, z2.s\n"
+ "fmla z12.s, p2/M, z11.s, z29.s\n"
+ "fmla z10.s, p2/M, z11.s, z0.s\n"
+ "fmla z8.s, p2/M, z11.s, z28.s\n"
+ "fmla z14.s, p2/M, z9.s, z29.s\n"
+ "fmla z12.s, p2/M, z9.s, z27.s\n"
+ "fmla z10.s, p2/M, z9.s, z28.s\n"
+ "fmla z8.s, p2/M, z9.s, z26.s\n"
+ "fmla z14.s, p2/M, z7.s, z1.s\n"
+ "fmla z12.s, p2/M, z7.s, z0.s\n"
+ "fmla z10.s, p2/M, z7.s, z25.s\n"
+ "fmla z8.s, p2/M, z7.s, z24.s\n"
+ "fmla z14.s, p2/M, z6.s, z0.s\n"
+ "fmla z12.s, p2/M, z6.s, z28.s\n"
+ "fmla z10.s, p2/M, z6.s, z24.s\n"
+ "fmla z8.s, p2/M, z6.s, z23.s\n"
+ "fmla z14.s, p2/M, z5.s, z28.s\n"
+ "fmla z12.s, p2/M, z5.s, z26.s\n"
+ "fmla z10.s, p2/M, z5.s, z23.s\n"
+ "fmla z8.s, p2/M, z5.s, z22.s\n"
+ "fmla z14.s, p2/M, z4.s, z25.s\n"
+ "fmla z12.s, p2/M, z4.s, z24.s\n"
+ "fmla z10.s, p2/M, z4.s, z21.s\n"
+ "fmla z8.s, p2/M, z4.s, z20.s\n"
+ "fmla z14.s, p2/M, z31.s, z24.s\n"
+ "fmla z12.s, p2/M, z31.s, z23.s\n"
+ "fmla z10.s, p2/M, z31.s, z20.s\n"
+ "fmla z8.s, p2/M, z31.s, z19.s\n"
+ "fmla z14.s, p2/M, z30.s, z23.s\n"
+ "fmla z12.s, p2/M, z30.s, z22.s\n"
+ "fmla z10.s, p2/M, z30.s, z19.s\n"
+ "fmla z8.s, p2/M, z30.s, z18.s\n"
+ "fmax z14.s, p2/M, z14.s, z17.s\n"
+ "fmax z12.s, p2/M, z12.s, z17.s\n"
+ "fmax z10.s, p2/M, z10.s, z17.s\n"
+ "fmax z8.s, p2/M, z8.s, z17.s\n"
+ "fmin z14.s, p2/M, z14.s, z16.s\n"
+ "st1w { z14.s }, p0, [x22]\n"
+ "fmin z12.s, p2/M, z12.s, z16.s\n"
+ "fmin z10.s, p2/M, z10.s, z16.s\n"
+ "st1w { z12.s }, p0, [x22, %x[out_col_stride], LSL #2]\n"
+ "fmin z8.s, p2/M, z8.s, z16.s\n"
+ "st1w { z10.s }, p0, [x21]\n"
+ "st1w { z8.s }, p0, [x21, %x[out_col_stride], LSL #2]\n"
+ : [n_channels] "+r" (n_channels), [params] "+r" (params)
+ : [in_col_stride] "r" (in_col_stride), [in_row_stride] "r" (in_row_stride), [inptr] "r" (inptr), [minmax_vals] "r" (minmax_vals), [out_col_stride] "r" (out_col_stride), [out_row_stride] "r" (out_row_stride), [outptr] "r" (outptr)
+ : "cc", "memory", "p0", "p1", "p2", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ );
+}
+
+} // namespace depthwise
+} // namespace arm_conv
+
+#endif // defined(ARM_COMPUTE_ENABLE_SVE)
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_direct.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_direct.cpp
index cda34358f5..c485b7dde0 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_direct.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_direct.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -88,369 +88,369 @@ void sve_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst_direct_impl(
__asm__ __volatile__(
"ptrue p3.b\n"
- "mov x13, #0x0\n"
- "mov x8, #0x0\n"
+ "mov x6, #0x0\n"
+ "mov x7, #0x0\n"
"1:" // Tile loop
- "str x13, [%x[params_struct], %[offsetof_args_tile_i]]\n"
- "mov x25, #0x3\n"
+ "str x6, [%x[params_struct], %[offsetof_args_tile_i]]\n"
"mov x24, #0x3\n"
- "str x8, [%x[params_struct], %[offsetof_args_tile_j]]\n"
- "ldr x23, [%x[params_struct], %[offsetof_args_ld_input_row]]\n"
- "ldr x17, [%x[params_struct], %[offsetof_args_ld_input_col]]\n"
- "mul x22, x13, x23\n" // offset = tile_i * ld_input_row
- "ldr x21, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
- "madd x22, x8, x17, x22\n" // offset += tile_j * ld_input_col
- "ldr x16, [%x[params_struct], %[offsetof_args_ld_output_col]]\n"
- "cntw x15\n"
- "mul x20, x13, x21\n" // offset = tile_i * ld_output_row
+ "str x7, [%x[params_struct], %[offsetof_args_tile_j]]\n"
+ "mov x23, #0x3\n"
+ "ldr x8, [%x[params_struct], %[offsetof_args_params]]\n"
+ "mov x17, #0x0\n"
+ "ldr x22, [%x[params_struct], %[offsetof_args_ld_input_row]]\n"
+ "cntw x16\n"
+ "ldr x15, [%x[params_struct], %[offsetof_args_ld_input_col]]\n"
+ "sub x21, XZR, x16\n"
"ldr x14, [%x[params_struct], %[offsetof_args_inptr]]\n"
- "ldr x13, [%x[params_struct], %[offsetof_args_params]]\n"
- "add x12, x17, x17\n"
- "mul x22, x22, x25\n" // offset *= kernel_stride * output_size
- "add x14, x14, x22, LSL #2\n" // inptr[0] += offset * sizeof(float)
- "ldr x11, [%x[params_struct], %[offsetof_args_outptr]]\n"
- "add x10, x14, x23, LSL #2\n"
- "madd x20, x8, x16, x20\n" // offset += tile_j * ld_output_col
- "add x9, x10, x23, LSL #2\n"
+ "mul x19, x6, x22\n" // offset = tile_i * ld_input_row
+ "ldr x20, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
+ "madd x19, x7, x15, x19\n" // offset += tile_j * ld_input_col
+ "ldr x13, [%x[params_struct], %[offsetof_args_ld_output_col]]\n"
+ "mul x19, x19, x24\n" // offset *= kernel_stride * output_size
+ "ldr x12, [%x[params_struct], %[offsetof_args_outptr]]\n"
+ "add x14, x14, x19, LSL #2\n" // inptr[0] += offset * sizeof(float)
+ "ld1rw { z18.s }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
+ "add x11, x14, x22, LSL #2\n"
+ "ld1rw { z17.s }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
+ "add x10, x11, x22, LSL #2\n"
+ "ld1w { z16.s }, p3/Z, [x8]\n"
+ "add x9, x10, x22, LSL #2\n"
+ "ld1w { z0.s }, p3/Z, [x8, #1, MUL VL]\n"
+ "add x28, x9, x22, LSL #2\n"
+ "ld1w { z1.s }, p3/Z, [x8, #2, MUL VL]\n"
+ "add x27, x15, x15\n"
+ "ld1w { z2.s }, p3/Z, [x8, #3, MUL VL]\n"
+ "add x26, x27, x15\n"
+ "ld1w { z3.s }, p3/Z, [x8, #4, MUL VL]\n"
+ "add x25, x26, x15\n"
+ "ld1w { z4.s }, p3/Z, [x8, #5, MUL VL]\n"
+ "mul x19, x6, x20\n" // offset = tile_i * ld_output_row
+ "ld1w { z5.s }, p3/Z, [x8, #6, MUL VL]\n"
+ "madd x19, x7, x13, x19\n" // offset += tile_j * ld_output_col
+ "ld1w { z6.s }, p3/Z, [x8, #7, MUL VL]\n"
+ "mul x19, x19, x23\n" // offset *= output_tile_size
+ "add x24, x13, x13\n"
+ "add x12, x12, x19, LSL #2\n" // outptrs[0] += offset * sizeof(float)
+ "add x23, x12, x20, LSL #2\n"
+ "add x22, x23, x20, LSL #2\n"
"whilelt p2.s, XZR, %x[n_channels]\n"
- "ld1w { z18.s }, p3/Z, [x13]\n"
- "mul x20, x20, x24\n" // offset *= output_tile_size
- "ld1w { z0.s }, p3/Z, [x13, #1, MUL VL]\n"
- "ld1w { z1.s }, p3/Z, [x13, #2, MUL VL]\n"
- "add x28, x9, x23, LSL #2\n"
- "ld1w { z2.s }, p3/Z, [x13, #3, MUL VL]\n"
- "ld1w { z3.s }, p3/Z, [x13, #4, MUL VL]\n"
- "add x27, x12, x17\n"
- "add x11, x11, x20, LSL #2\n" // outptrs[0] += offset * sizeof(float)
- "ld1w { z4.s }, p3/Z, [x13, #5, MUL VL]\n"
- "ld1w { z5.s }, p3/Z, [x13, #6, MUL VL]\n"
- "add x26, x28, x23, LSL #2\n"
- "add x25, x27, x17\n"
- "ld1w { z6.s }, p3/Z, [x13, #7, MUL VL]\n"
- "addvl x13, x13, #16\n"
- "add x24, x11, x21, LSL #2\n"
- "ld1rw { z17.s }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
- "cmp x15, %x[n_channels]\n"
- "add x23, x24, x21, LSL #2\n"
- "ld1rw { z16.s }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
- "ld1w { z7.s }, p3/Z, [x13, #-8, MUL VL]\n"
- "add x22, x16, x16\n"
- "mov x21, #0x0\n"
- "ld1w { z8.s }, p3/Z, [x13, #-7, MUL VL]\n"
- "ld1w { z9.s }, p2/Z, [x9, x12, LSL #2]\n"
- "sub x20, XZR, x15\n"
+ "ld1w { z9.s }, p2/Z, [x10, x27, LSL #2]\n"
"ld1w { z10.s }, p2/Z, [x14]\n"
+ "addvl x8, x8, #16\n"
"ld1w { z11.s }, p2/Z, [x14, x25, LSL #2]\n"
- "addvl x13, x13, #-6\n"
- "ld1w { z12.s }, p2/Z, [x26]\n"
- "ld1w { z13.s }, p2/Z, [x10, x12, LSL #2]\n"
+ "cmp x16, %x[n_channels]\n"
+ "ld1w { z7.s }, p3/Z, [x8, #-8, MUL VL]\n"
+ "ld1w { z8.s }, p3/Z, [x8, #-7, MUL VL]\n"
+ "addvl x8, x8, #-6\n"
+ "ld1w { z12.s }, p2/Z, [x28]\n"
+ "ld1w { z13.s }, p2/Z, [x11, x27, LSL #2]\n"
"bge 3f\n"
"2:" // Tile loop: Channel loop
- "movprfx z24, z18\n fmla z24.s, p3/M, z7.s, z9.s\n"
- "movprfx z23, z18\n fmla z23.s, p3/M, z8.s, z9.s\n"
- "whilelt p1.s, x15, %x[n_channels]\n"
+ "movprfx z31, z16\n fmla z31.s, p3/M, z8.s, z9.s\n"
+ "whilelt p1.s, x16, %x[n_channels]\n"
+ "movprfx z30, z16\n fmla z30.s, p3/M, z7.s, z9.s\n"
"incw x21\n"
- "movprfx z25, z18\n fmla z25.s, p3/M, z6.s, z9.s\n"
- "fmla z24.s, p3/M, z4.s, z13.s\n"
- "incw x15\n"
+ "movprfx z29, z16\n fmla z29.s, p3/M, z6.s, z9.s\n"
"mov p0.b, p2.b\n"
- "movprfx z26, z18\n fmla z26.s, p3/M, z5.s, z9.s\n"
- "movprfx z27, z18\n fmla z27.s, p3/M, z4.s, z9.s\n"
- "incw x20\n"
- "movprfx z28, z18\n fmla z28.s, p3/M, z3.s, z9.s\n"
- "fmla z23.s, p3/M, z0.s, z10.s\n"
- "ld1w { z10.s }, p2/Z, [x9, x27, LSL #2]\n"
- "fmla z25.s, p3/M, z2.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x9, x17, LSL #2]\n"
- "movprfx z29, z18\n fmla z29.s, p3/M, z2.s, z9.s\n"
- "fmla z24.s, p3/M, z6.s, z11.s\n"
- "movprfx z31, z18\n fmla z31.s, p3/M, z0.s, z9.s\n"
- "fmla z23.s, p3/M, z5.s, z13.s\n"
- "fmla z25.s, p3/M, z3.s, z13.s\n"
- "fmla z26.s, p3/M, z2.s, z13.s\n"
+ "movprfx z28, z16\n fmla z28.s, p3/M, z5.s, z9.s\n"
+ "incw x17\n"
+ "movprfx z27, z16\n fmla z27.s, p3/M, z4.s, z9.s\n"
+ "incw x16\n"
+ "movprfx z26, z16\n fmla z26.s, p3/M, z3.s, z9.s\n"
+ "movprfx z25, z16\n fmla z25.s, p3/M, z2.s, z9.s\n"
+ "movprfx z24, z16\n fmla z24.s, p3/M, z1.s, z9.s\n"
+ "movprfx z23, z16\n fmla z23.s, p3/M, z0.s, z9.s\n"
+ "ld1w { z16.s }, p3/Z, [x8]\n"
+ "fmla z31.s, p3/M, z0.s, z10.s\n"
+ "ld1w { z10.s }, p2/Z, [x10, x26, LSL #2]\n"
+ "fmla z29.s, p3/M, z2.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x10, x15, LSL #2]\n"
+ "fmla z25.s, p3/M, z6.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x28, x25, LSL #2]\n"
+ "fmla z30.s, p3/M, z4.s, z13.s\n"
+ "fmla z31.s, p3/M, z5.s, z13.s\n"
+ "fmla z29.s, p3/M, z3.s, z13.s\n"
+ "fmla z28.s, p3/M, z2.s, z13.s\n"
"fmla z27.s, p3/M, z1.s, z13.s\n"
- "fmla z28.s, p3/M, z0.s, z13.s\n"
- "ld1w { z13.s }, p2/Z, [x14, x17, LSL #2]\n"
- "fmla z29.s, p3/M, z6.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x26, x25, LSL #2]\n"
- "movprfx z30, z18\n fmla z30.s, p3/M, z1.s, z9.s\n"
- "fmla z24.s, p3/M, z0.s, z13.s\n"
- "ld1w { z18.s }, p3/Z, [x13]\n"
- "fmla z31.s, p3/M, z8.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x14, x27, LSL #2]\n"
- "fmla z23.s, p3/M, z7.s, z11.s\n"
- "fmla z30.s, p3/M, z0.s, z11.s\n"
- "fmla z26.s, p3/M, z4.s, z11.s\n"
+ "fmla z26.s, p3/M, z0.s, z13.s\n"
+ "ld1w { z13.s }, p2/Z, [x14, x15, LSL #2]\n"
+ "fmla z23.s, p3/M, z8.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x14, x26, LSL #2]\n"
+ "fmla z31.s, p3/M, z7.s, z11.s\n"
+ "fmla z30.s, p3/M, z6.s, z11.s\n"
+ "fmla z28.s, p3/M, z4.s, z11.s\n"
"fmla z27.s, p3/M, z3.s, z11.s\n"
- "fmla z29.s, p3/M, z1.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x10]\n"
- "fmla z24.s, p3/M, z2.s, z12.s\n"
- "fmla z25.s, p3/M, z1.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x28]\n"
- "fmla z28.s, p3/M, z4.s, z10.s\n"
- "fmla z23.s, p3/M, z1.s, z13.s\n"
- "ld1w { z13.s }, p2/Z, [x10, x25, LSL #2]\n"
- "fmla z30.s, p3/M, z2.s, z10.s\n"
- "fmla z31.s, p3/M, z1.s, z10.s\n"
- "fmla z24.s, p3/M, z8.s, z10.s\n"
- "fmla z25.s, p3/M, z7.s, z10.s\n"
+ "fmla z25.s, p3/M, z1.s, z11.s\n"
+ "fmla z24.s, p3/M, z0.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x11]\n"
+ "fmla z31.s, p3/M, z1.s, z13.s\n"
+ "fmla z30.s, p3/M, z0.s, z13.s\n"
+ "ld1w { z13.s }, p2/Z, [x11, x25, LSL #2]\n"
+ "fmla z29.s, p3/M, z1.s, z12.s\n"
"fmla z27.s, p3/M, z5.s, z10.s\n"
- "fmla z26.s, p3/M, z0.s, z11.s\n"
- "ld1w { z10.s }, p2/Z, [x28, x12, LSL #2]\n"
- "fmla z29.s, p3/M, z3.s, z12.s\n"
- "fmla z28.s, p3/M, z2.s, z13.s\n"
- "fmla z30.s, p3/M, z4.s, z10.s\n"
- "fmla z31.s, p3/M, z3.s, z10.s\n"
- "fmla z23.s, p3/M, z3.s, z11.s\n"
- "fmla z25.s, p3/M, z5.s, z13.s\n"
- "ld1w { z11.s }, p2/Z, [x28, x25, LSL #2]\n"
- "ld1w { z13.s }, p2/Z, [x26, x17, LSL #2]\n"
- "fmla z26.s, p3/M, z6.s, z12.s\n"
+ "fmla z26.s, p3/M, z4.s, z10.s\n"
+ "fmla z30.s, p3/M, z2.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x9]\n"
+ "fmla z29.s, p3/M, z7.s, z10.s\n"
+ "fmla z24.s, p3/M, z2.s, z10.s\n"
+ "fmla z23.s, p3/M, z1.s, z10.s\n"
+ "fmla z30.s, p3/M, z8.s, z10.s\n"
+ "ld1w { z10.s }, p2/Z, [x9, x27, LSL #2]\n"
+ "fmla z31.s, p3/M, z3.s, z11.s\n"
+ "fmla z28.s, p3/M, z0.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x9, x25, LSL #2]\n"
+ "fmla z29.s, p3/M, z5.s, z13.s\n"
+ "fmla z26.s, p3/M, z2.s, z13.s\n"
+ "ld1w { z13.s }, p2/Z, [x28, x15, LSL #2]\n"
+ "fmla z25.s, p3/M, z3.s, z12.s\n"
+ "fmla z28.s, p3/M, z6.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x11, x15, LSL #2]\n"
"fmla z27.s, p3/M, z7.s, z10.s\n"
- "ld1w { z12.s }, p2/Z, [x10, x17, LSL #2]\n"
- "fmla z29.s, p3/M, z5.s, z10.s\n"
- "fmla z28.s, p3/M, z6.s, z10.s\n"
- "fmla z31.s, p3/M, z5.s, z11.s\n"
- "fmla z30.s, p3/M, z6.s, z13.s\n"
- "fmla z26.s, p3/M, z8.s, z10.s\n"
- "fmla z29.s, p3/M, z7.s, z13.s\n"
- "ld1w { z13.s }, p2/Z, [x26, x27, LSL #2]\n"
- "fmla z24.s, p3/M, z3.s, z12.s\n"
+ "fmla z26.s, p3/M, z6.s, z10.s\n"
+ "fmla z25.s, p3/M, z5.s, z10.s\n"
+ "fmla z28.s, p3/M, z8.s, z10.s\n"
+ "fmla z24.s, p3/M, z4.s, z10.s\n"
+ "fmla z23.s, p3/M, z3.s, z10.s\n"
+ "fmla z26.s, p3/M, z8.s, z11.s\n"
+ "fmla z25.s, p3/M, z7.s, z13.s\n"
+ "fmla z24.s, p3/M, z6.s, z13.s\n"
+ "ld1w { z13.s }, p2/Z, [x28, x26, LSL #2]\n"
+ "fmla z23.s, p3/M, z5.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x11, x26, LSL #2]\n"
+ "addvl x11, x11, #1\n"
+ "fmla z31.s, p3/M, z4.s, z12.s\n"
+ "fmla z30.s, p3/M, z3.s, z12.s\n"
+ "fmla z28.s, p3/M, z1.s, z12.s\n"
"fmla z27.s, p3/M, z0.s, z12.s\n"
- "fmla z28.s, p3/M, z8.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x10, x27, LSL #2]\n"
- "fmla z30.s, p3/M, z8.s, z13.s\n"
- "addvl x10, x10, #1\n"
- "fmla z31.s, p3/M, z7.s, z13.s\n"
- "fmla z23.s, p3/M, z4.s, z12.s\n"
- "ld1w { z13.s }, p2/Z, [x28, x27, LSL #2]\n"
- "fmla z26.s, p3/M, z1.s, z12.s\n"
- "fmla z24.s, p3/M, z5.s, z11.s\n"
- "ld1w { z12.s }, p2/Z, [x28, x17, LSL #2]\n"
- "addvl x28, x28, #1\n"
- "fmla z25.s, p3/M, z4.s, z11.s\n"
+ "ld1w { z12.s }, p2/Z, [x9, x15, LSL #2]\n"
+ "fmla z29.s, p3/M, z4.s, z11.s\n"
+ "fmla z30.s, p3/M, z5.s, z11.s\n"
+ "fmla z26.s, p3/M, z1.s, z11.s\n"
"fmla z27.s, p3/M, z2.s, z11.s\n"
- "fmla z28.s, p3/M, z1.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x14, x12, LSL #2]\n"
- "fmla z29.s, p3/M, z4.s, z12.s\n"
+ "ld1w { z11.s }, p2/Z, [x14, x27, LSL #2]\n"
"addvl x14, x14, #1\n"
- "fmla z30.s, p3/M, z3.s, z12.s\n"
- "fmla z31.s, p3/M, z4.s, z13.s\n"
- "ld1w { z4.s }, p3/Z, [x13, #5, MUL VL]\n"
+ "fmla z24.s, p3/M, z8.s, z13.s\n"
"ld1w { z10.s }, p1/Z, [x14]\n"
- "fmla z26.s, p3/M, z7.s, z12.s\n"
- "fmla z27.s, p3/M, z6.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x9]\n"
- "fmla z23.s, p3/M, z2.s, z11.s\n"
- "fmla z24.s, p3/M, z1.s, z11.s\n"
- "fmax z24.s, p3/M, z24.s, z17.s\n"
- "ld1w { z1.s }, p3/Z, [x13, #2, MUL VL]\n"
- "fmla z25.s, p3/M, z0.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x9, x25, LSL #2]\n"
- "fmla z28.s, p3/M, z7.s, z13.s\n"
+ "fmla z23.s, p3/M, z7.s, z13.s\n"
+ "ld1w { z13.s }, p2/Z, [x9, x26, LSL #2]\n"
"addvl x9, x9, #1\n"
- "fmla z30.s, p3/M, z5.s, z13.s\n"
- "fmla z29.s, p3/M, z0.s, z12.s\n"
- "ld1w { z0.s }, p3/Z, [x13, #1, MUL VL]\n"
- "fmin z24.s, p3/M, z24.s, z16.s\n"
+ "fmla z28.s, p3/M, z7.s, z12.s\n"
+ "fmla z27.s, p3/M, z6.s, z12.s\n"
+ "fmla z25.s, p3/M, z4.s, z12.s\n"
+ "fmla z24.s, p3/M, z3.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x10]\n"
"fmla z31.s, p3/M, z2.s, z11.s\n"
+ "fmla z30.s, p3/M, z1.s, z11.s\n"
+ "ld1w { z1.s }, p3/Z, [x8, #2, MUL VL]\n"
+ "fmla z29.s, p3/M, z0.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x10, x25, LSL #2]\n"
+ "addvl x10, x10, #1\n"
"fmla z27.s, p3/M, z8.s, z13.s\n"
- "ld1w { z13.s }, p2/Z, [x26, x12, LSL #2]\n"
- "fmax z27.s, p3/M, z27.s, z17.s\n"
- "fmla z23.s, p3/M, z6.s, z12.s\n"
- "fmla z26.s, p3/M, z3.s, z12.s\n"
- "fmax z23.s, p3/M, z23.s, z17.s\n"
- "fmax z26.s, p3/M, z26.s, z17.s\n"
- "fmla z25.s, p3/M, z8.s, z11.s\n"
- "fmla z28.s, p3/M, z5.s, z11.s\n"
- "fmax z25.s, p3/M, z25.s, z17.s\n"
- "fmax z28.s, p3/M, z28.s, z17.s\n"
- "fmla z29.s, p3/M, z8.s, z13.s\n"
- "fmla z30.s, p3/M, z7.s, z13.s\n"
- "fmax z29.s, p3/M, z29.s, z17.s\n"
- "fmax z30.s, p3/M, z30.s, z17.s\n"
- "fmla z31.s, p3/M, z6.s, z13.s\n"
- "fmax z31.s, p3/M, z31.s, z17.s\n"
- "addvl x26, x26, #1\n"
- "ld1w { z2.s }, p3/Z, [x13, #3, MUL VL]\n"
- "ld1w { z3.s }, p3/Z, [x13, #4, MUL VL]\n"
- "ld1w { z5.s }, p3/Z, [x13, #6, MUL VL]\n"
- "whilelt p2.s, x21, %x[n_channels]\n"
- "cmp x15, %x[n_channels]\n"
- "ld1w { z6.s }, p3/Z, [x13, #7, MUL VL]\n"
- "addvl x13, x13, #16\n"
- "fmin z23.s, p3/M, z23.s, z16.s\n"
- "ld1w { z9.s }, p1/Z, [x9, x12, LSL #2]\n"
- "fmin z25.s, p3/M, z25.s, z16.s\n"
- "fmin z26.s, p3/M, z26.s, z16.s\n"
+ "ld1w { z9.s }, p1/Z, [x10, x27, LSL #2]\n"
+ "fmla z26.s, p3/M, z7.s, z13.s\n"
+ "fmla z24.s, p3/M, z5.s, z13.s\n"
+ "fmla z23.s, p3/M, z4.s, z13.s\n"
+ "ld1w { z13.s }, p2/Z, [x28, x27, LSL #2]\n"
+ "whilelt p2.s, x17, %x[n_channels]\n"
+ "fmla z31.s, p3/M, z6.s, z12.s\n"
+ "ld1w { z4.s }, p3/Z, [x8, #5, MUL VL]\n"
+ "addvl x28, x28, #1\n"
+ "fmla z28.s, p3/M, z3.s, z12.s\n"
+ "ld1w { z3.s }, p3/Z, [x8, #4, MUL VL]\n"
+ "cmp x16, %x[n_channels]\n"
+ "fmla z25.s, p3/M, z0.s, z12.s\n"
+ "ld1w { z12.s }, p1/Z, [x28]\n"
+ "fmla z29.s, p3/M, z8.s, z11.s\n"
+ "ld1w { z0.s }, p3/Z, [x8, #1, MUL VL]\n"
+ "fmla z26.s, p3/M, z5.s, z11.s\n"
+ "ld1w { z5.s }, p3/Z, [x8, #6, MUL VL]\n"
+ "fmla z23.s, p3/M, z2.s, z11.s\n"
"ld1w { z11.s }, p1/Z, [x14, x25, LSL #2]\n"
- "ld1w { z12.s }, p1/Z, [x26]\n"
- "fmin z27.s, p3/M, z27.s, z16.s\n"
- "fmin z28.s, p3/M, z28.s, z16.s\n"
- "ld1w { z13.s }, p1/Z, [x10, x12, LSL #2]\n"
- "st1w { z23.s }, p0, [x11]\n"
- "fmin z29.s, p3/M, z29.s, z16.s\n"
- "fmin z30.s, p3/M, z30.s, z16.s\n"
- "st1w { z24.s }, p0, [x11, x16, LSL #2]\n"
- "ld1w { z7.s }, p3/Z, [x13, #-8, MUL VL]\n"
- "fmin z31.s, p3/M, z31.s, z16.s\n"
- "st1w { z25.s }, p0, [x11, x22, LSL #2]\n"
- "addvl x11, x11, #1\n"
- "ld1w { z8.s }, p3/Z, [x13, #-7, MUL VL]\n"
- "st1w { z26.s }, p0, [x24]\n"
- "addvl x13, x13, #-6\n"
- "st1w { z27.s }, p0, [x24, x16, LSL #2]\n"
- "st1w { z28.s }, p0, [x24, x22, LSL #2]\n"
- "addvl x24, x24, #1\n"
- "st1w { z29.s }, p0, [x23]\n"
- "st1w { z30.s }, p0, [x23, x16, LSL #2]\n"
- "st1w { z31.s }, p0, [x23, x22, LSL #2]\n"
+ "fmla z24.s, p3/M, z7.s, z13.s\n"
+ "ld1w { z2.s }, p3/Z, [x8, #3, MUL VL]\n"
+ "fmla z25.s, p3/M, z8.s, z13.s\n"
+ "fmax z31.s, p3/M, z31.s, z18.s\n"
+ "fmla z23.s, p3/M, z6.s, z13.s\n"
+ "ld1w { z13.s }, p1/Z, [x11, x27, LSL #2]\n"
+ "fmax z30.s, p3/M, z30.s, z18.s\n"
+ "ld1w { z6.s }, p3/Z, [x8, #7, MUL VL]\n"
+ "addvl x8, x8, #16\n"
+ "fmin z31.s, p3/M, z31.s, z17.s\n"
+ "ld1w { z7.s }, p3/Z, [x8, #-8, MUL VL]\n"
+ "fmax z29.s, p3/M, z29.s, z18.s\n"
+ "ld1w { z8.s }, p3/Z, [x8, #-7, MUL VL]\n"
+ "addvl x8, x8, #-6\n"
+ "fmin z30.s, p3/M, z30.s, z17.s\n"
+ "st1w { z31.s }, p0, [x12]\n"
+ "fmin z29.s, p3/M, z29.s, z17.s\n"
+ "fmax z28.s, p3/M, z28.s, z18.s\n"
+ "st1w { z30.s }, p0, [x12, x13, LSL #2]\n"
+ "fmax z27.s, p3/M, z27.s, z18.s\n"
+ "fmax z26.s, p3/M, z26.s, z18.s\n"
+ "st1w { z29.s }, p0, [x12, x24, LSL #2]\n"
+ "fmin z28.s, p3/M, z28.s, z17.s\n"
+ "addvl x12, x12, #1\n"
+ "fmax z25.s, p3/M, z25.s, z18.s\n"
+ "st1w { z28.s }, p0, [x23]\n"
+ "fmin z27.s, p3/M, z27.s, z17.s\n"
+ "fmin z26.s, p3/M, z26.s, z17.s\n"
+ "st1w { z27.s }, p0, [x23, x13, LSL #2]\n"
+ "fmin z25.s, p3/M, z25.s, z17.s\n"
+ "fmax z24.s, p3/M, z24.s, z18.s\n"
+ "st1w { z26.s }, p0, [x23, x24, LSL #2]\n"
"addvl x23, x23, #1\n"
+ "fmax z23.s, p3/M, z23.s, z18.s\n"
+ "st1w { z25.s }, p0, [x22]\n"
+ "fmin z24.s, p3/M, z24.s, z17.s\n"
+ "fmin z23.s, p3/M, z23.s, z17.s\n"
+ "st1w { z24.s }, p0, [x22, x13, LSL #2]\n"
+ "st1w { z23.s }, p0, [x22, x24, LSL #2]\n"
+ "addvl x22, x22, #1\n"
"blt 2b\n"
"3:" // Tile loop: Channel tail
- "movprfx z24, z18\n fmla z24.s, p3/M, z7.s, z9.s\n"
- "movprfx z23, z18\n fmla z23.s, p3/M, z8.s, z9.s\n"
- "ldr x8, [%x[params_struct], %[offsetof_args_tile_j]]\n"
- "ldr x13, [%x[params_struct], %[offsetof_args_tile_i]]\n"
- "movprfx z25, z18\n fmla z25.s, p3/M, z6.s, z9.s\n"
- "fmla z24.s, p3/M, z4.s, z13.s\n"
- "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
- "add x8, x8, #0x1\n"
- "movprfx z26, z18\n fmla z26.s, p3/M, z5.s, z9.s\n"
- "movprfx z27, z18\n fmla z27.s, p3/M, z4.s, z9.s\n"
- "cmp x8, x20\n"
- "add x21, x13, #0x1\n"
- "movprfx z28, z18\n fmla z28.s, p3/M, z3.s, z9.s\n"
- "fmla z23.s, p3/M, z0.s, z10.s\n"
- "ld1w { z10.s }, p2/Z, [x9, x27, LSL #2]\n"
- "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
- "fmla z25.s, p3/M, z2.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x9, x17, LSL #2]\n"
- "movprfx z29, z18\n fmla z29.s, p3/M, z2.s, z9.s\n"
- "csel x13, x13, x21, LT\n"
- "fmla z24.s, p3/M, z6.s, z11.s\n"
- "movprfx z31, z18\n fmla z31.s, p3/M, z0.s, z9.s\n"
+ "movprfx z31, z16\n fmla z31.s, p3/M, z8.s, z9.s\n"
+ "ldr x6, [%x[params_struct], %[offsetof_args_tile_i]]\n"
"mov p0.b, p2.b\n"
- "csel x8, x8, XZR, LT\n"
- "fmla z23.s, p3/M, z5.s, z13.s\n"
- "fmla z25.s, p3/M, z3.s, z13.s\n"
- "cmp x13, x20\n"
- "fmla z26.s, p3/M, z2.s, z13.s\n"
+ "movprfx z30, z16\n fmla z30.s, p3/M, z7.s, z9.s\n"
+ "ldr x7, [%x[params_struct], %[offsetof_args_tile_j]]\n"
+ "add x21, x6, #0x1\n"
+ "movprfx z29, z16\n fmla z29.s, p3/M, z6.s, z9.s\n"
+ "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
+ "movprfx z28, z16\n fmla z28.s, p3/M, z5.s, z9.s\n"
+ "ldr x19, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
+ "add x7, x7, #0x1\n"
+ "movprfx z27, z16\n fmla z27.s, p3/M, z4.s, z9.s\n"
+ "cmp x7, x19\n"
+ "movprfx z26, z16\n fmla z26.s, p3/M, z3.s, z9.s\n"
+ "movprfx z25, z16\n fmla z25.s, p3/M, z2.s, z9.s\n"
+ "csel x7, x7, XZR, LT\n"
+ "movprfx z24, z16\n fmla z24.s, p3/M, z1.s, z9.s\n"
+ "csel x6, x6, x21, LT\n"
+ "movprfx z23, z16\n fmla z23.s, p3/M, z0.s, z9.s\n"
+ "cmp x6, x20\n"
+ "fmla z31.s, p3/M, z0.s, z10.s\n"
+ "ld1w { z10.s }, p2/Z, [x10, x26, LSL #2]\n"
+ "fmla z29.s, p3/M, z2.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x10, x15, LSL #2]\n"
+ "fmla z25.s, p3/M, z6.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x28, x25, LSL #2]\n"
+ "fmla z30.s, p3/M, z4.s, z13.s\n"
+ "fmla z31.s, p3/M, z5.s, z13.s\n"
+ "fmla z29.s, p3/M, z3.s, z13.s\n"
+ "fmla z28.s, p3/M, z2.s, z13.s\n"
"fmla z27.s, p3/M, z1.s, z13.s\n"
- "fmla z28.s, p3/M, z0.s, z13.s\n"
- "ld1w { z13.s }, p2/Z, [x14, x17, LSL #2]\n"
- "fmla z29.s, p3/M, z6.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x26, x25, LSL #2]\n"
- "movprfx z30, z18\n fmla z30.s, p3/M, z1.s, z9.s\n"
- "fmla z24.s, p3/M, z0.s, z13.s\n"
- "fmla z31.s, p3/M, z8.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x14, x27, LSL #2]\n"
- "fmla z23.s, p3/M, z7.s, z11.s\n"
- "fmla z30.s, p3/M, z0.s, z11.s\n"
- "fmla z26.s, p3/M, z4.s, z11.s\n"
+ "fmla z26.s, p3/M, z0.s, z13.s\n"
+ "ld1w { z13.s }, p2/Z, [x14, x15, LSL #2]\n"
+ "fmla z23.s, p3/M, z8.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x14, x26, LSL #2]\n"
+ "fmla z31.s, p3/M, z7.s, z11.s\n"
+ "fmla z30.s, p3/M, z6.s, z11.s\n"
+ "fmla z28.s, p3/M, z4.s, z11.s\n"
"fmla z27.s, p3/M, z3.s, z11.s\n"
- "fmla z29.s, p3/M, z1.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x10]\n"
- "fmla z24.s, p3/M, z2.s, z12.s\n"
- "fmla z25.s, p3/M, z1.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x28]\n"
- "fmla z28.s, p3/M, z4.s, z10.s\n"
- "fmla z23.s, p3/M, z1.s, z13.s\n"
- "ld1w { z13.s }, p2/Z, [x10, x25, LSL #2]\n"
- "fmla z30.s, p3/M, z2.s, z10.s\n"
- "fmla z31.s, p3/M, z1.s, z10.s\n"
- "fmla z24.s, p3/M, z8.s, z10.s\n"
- "fmla z25.s, p3/M, z7.s, z10.s\n"
+ "fmla z25.s, p3/M, z1.s, z11.s\n"
+ "fmla z24.s, p3/M, z0.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x11]\n"
+ "fmla z31.s, p3/M, z1.s, z13.s\n"
+ "fmla z30.s, p3/M, z0.s, z13.s\n"
+ "ld1w { z13.s }, p2/Z, [x11, x25, LSL #2]\n"
+ "fmla z29.s, p3/M, z1.s, z12.s\n"
"fmla z27.s, p3/M, z5.s, z10.s\n"
- "fmla z26.s, p3/M, z0.s, z11.s\n"
- "ld1w { z10.s }, p2/Z, [x28, x12, LSL #2]\n"
- "fmla z29.s, p3/M, z3.s, z12.s\n"
- "fmla z28.s, p3/M, z2.s, z13.s\n"
- "fmla z30.s, p3/M, z4.s, z10.s\n"
- "fmla z31.s, p3/M, z3.s, z10.s\n"
- "fmla z23.s, p3/M, z3.s, z11.s\n"
- "fmla z25.s, p3/M, z5.s, z13.s\n"
- "ld1w { z11.s }, p2/Z, [x28, x25, LSL #2]\n"
- "ld1w { z13.s }, p2/Z, [x26, x17, LSL #2]\n"
- "fmla z26.s, p3/M, z6.s, z12.s\n"
+ "fmla z26.s, p3/M, z4.s, z10.s\n"
+ "fmla z30.s, p3/M, z2.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x9]\n"
+ "fmla z29.s, p3/M, z7.s, z10.s\n"
+ "fmla z24.s, p3/M, z2.s, z10.s\n"
+ "fmla z23.s, p3/M, z1.s, z10.s\n"
+ "fmla z30.s, p3/M, z8.s, z10.s\n"
+ "ld1w { z10.s }, p2/Z, [x9, x27, LSL #2]\n"
+ "fmla z31.s, p3/M, z3.s, z11.s\n"
+ "fmla z28.s, p3/M, z0.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x9, x25, LSL #2]\n"
+ "fmla z29.s, p3/M, z5.s, z13.s\n"
+ "fmla z26.s, p3/M, z2.s, z13.s\n"
+ "ld1w { z13.s }, p2/Z, [x28, x15, LSL #2]\n"
+ "fmla z25.s, p3/M, z3.s, z12.s\n"
+ "fmla z28.s, p3/M, z6.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x11, x15, LSL #2]\n"
"fmla z27.s, p3/M, z7.s, z10.s\n"
- "ld1w { z12.s }, p2/Z, [x10, x17, LSL #2]\n"
- "fmla z29.s, p3/M, z5.s, z10.s\n"
- "fmla z28.s, p3/M, z6.s, z10.s\n"
- "fmla z31.s, p3/M, z5.s, z11.s\n"
- "fmla z30.s, p3/M, z6.s, z13.s\n"
- "fmla z26.s, p3/M, z8.s, z10.s\n"
- "fmla z29.s, p3/M, z7.s, z13.s\n"
- "ld1w { z13.s }, p2/Z, [x26, x27, LSL #2]\n"
- "fmla z24.s, p3/M, z3.s, z12.s\n"
+ "fmla z26.s, p3/M, z6.s, z10.s\n"
+ "fmla z25.s, p3/M, z5.s, z10.s\n"
+ "fmla z28.s, p3/M, z8.s, z10.s\n"
+ "fmla z24.s, p3/M, z4.s, z10.s\n"
+ "fmla z23.s, p3/M, z3.s, z10.s\n"
+ "fmla z26.s, p3/M, z8.s, z11.s\n"
+ "fmla z25.s, p3/M, z7.s, z13.s\n"
+ "fmla z24.s, p3/M, z6.s, z13.s\n"
+ "ld1w { z13.s }, p2/Z, [x28, x26, LSL #2]\n"
+ "fmla z23.s, p3/M, z5.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x11, x26, LSL #2]\n"
+ "fmla z31.s, p3/M, z4.s, z12.s\n"
+ "fmla z30.s, p3/M, z3.s, z12.s\n"
+ "fmla z28.s, p3/M, z1.s, z12.s\n"
"fmla z27.s, p3/M, z0.s, z12.s\n"
- "fmla z28.s, p3/M, z8.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x10, x27, LSL #2]\n"
- "fmla z30.s, p3/M, z8.s, z13.s\n"
- "fmla z31.s, p3/M, z7.s, z13.s\n"
- "fmla z23.s, p3/M, z4.s, z12.s\n"
- "ld1w { z13.s }, p2/Z, [x28, x27, LSL #2]\n"
- "fmla z26.s, p3/M, z1.s, z12.s\n"
- "fmla z24.s, p3/M, z5.s, z11.s\n"
- "ld1w { z12.s }, p2/Z, [x28, x17, LSL #2]\n"
- "fmla z25.s, p3/M, z4.s, z11.s\n"
+ "ld1w { z12.s }, p2/Z, [x9, x15, LSL #2]\n"
+ "fmla z29.s, p3/M, z4.s, z11.s\n"
+ "fmla z30.s, p3/M, z5.s, z11.s\n"
+ "fmla z26.s, p3/M, z1.s, z11.s\n"
"fmla z27.s, p3/M, z2.s, z11.s\n"
- "fmla z28.s, p3/M, z1.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x14, x12, LSL #2]\n"
- "fmla z29.s, p3/M, z4.s, z12.s\n"
- "fmla z30.s, p3/M, z3.s, z12.s\n"
- "fmla z31.s, p3/M, z4.s, z13.s\n"
- "fmla z26.s, p3/M, z7.s, z12.s\n"
+ "ld1w { z11.s }, p2/Z, [x14, x27, LSL #2]\n"
+ "fmla z24.s, p3/M, z8.s, z13.s\n"
+ "fmla z23.s, p3/M, z7.s, z13.s\n"
+ "ld1w { z13.s }, p2/Z, [x9, x26, LSL #2]\n"
+ "fmla z28.s, p3/M, z7.s, z12.s\n"
"fmla z27.s, p3/M, z6.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x9]\n"
- "fmla z23.s, p3/M, z2.s, z11.s\n"
- "fmla z24.s, p3/M, z1.s, z11.s\n"
- "fmax z24.s, p3/M, z24.s, z17.s\n"
- "fmin z24.s, p3/M, z24.s, z16.s\n"
- "fmla z25.s, p3/M, z0.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x9, x25, LSL #2]\n"
- "fmla z28.s, p3/M, z7.s, z13.s\n"
- "fmla z30.s, p3/M, z5.s, z13.s\n"
- "fmla z29.s, p3/M, z0.s, z12.s\n"
+ "fmla z25.s, p3/M, z4.s, z12.s\n"
+ "fmla z24.s, p3/M, z3.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x10]\n"
"fmla z31.s, p3/M, z2.s, z11.s\n"
+ "fmla z30.s, p3/M, z1.s, z11.s\n"
+ "fmla z29.s, p3/M, z0.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x10, x25, LSL #2]\n"
"fmla z27.s, p3/M, z8.s, z13.s\n"
- "ld1w { z13.s }, p2/Z, [x26, x12, LSL #2]\n"
- "fmax z27.s, p3/M, z27.s, z17.s\n"
- "fmla z23.s, p3/M, z6.s, z12.s\n"
- "fmla z26.s, p3/M, z3.s, z12.s\n"
- "fmax z23.s, p3/M, z23.s, z17.s\n"
- "fmax z26.s, p3/M, z26.s, z17.s\n"
- "fmla z25.s, p3/M, z8.s, z11.s\n"
- "fmla z28.s, p3/M, z5.s, z11.s\n"
- "fmax z25.s, p3/M, z25.s, z17.s\n"
- "fmax z28.s, p3/M, z28.s, z17.s\n"
- "fmla z29.s, p3/M, z8.s, z13.s\n"
- "fmla z30.s, p3/M, z7.s, z13.s\n"
- "fmax z29.s, p3/M, z29.s, z17.s\n"
- "fmax z30.s, p3/M, z30.s, z17.s\n"
- "fmla z31.s, p3/M, z6.s, z13.s\n"
- "fmax z31.s, p3/M, z31.s, z17.s\n"
- "fmin z23.s, p3/M, z23.s, z16.s\n"
- "st1w { z23.s }, p0, [x11]\n"
- "fmin z25.s, p3/M, z25.s, z16.s\n"
- "fmin z26.s, p3/M, z26.s, z16.s\n"
- "st1w { z24.s }, p0, [x11, x16, LSL #2]\n"
- "fmin z27.s, p3/M, z27.s, z16.s\n"
- "fmin z28.s, p3/M, z28.s, z16.s\n"
- "st1w { z25.s }, p0, [x11, x22, LSL #2]\n"
- "fmin z29.s, p3/M, z29.s, z16.s\n"
- "fmin z30.s, p3/M, z30.s, z16.s\n"
- "st1w { z26.s }, p0, [x24]\n"
- "fmin z31.s, p3/M, z31.s, z16.s\n"
- "st1w { z27.s }, p0, [x24, x16, LSL #2]\n"
- "st1w { z28.s }, p0, [x24, x22, LSL #2]\n"
- "st1w { z29.s }, p0, [x23]\n"
- "st1w { z30.s }, p0, [x23, x16, LSL #2]\n"
- "st1w { z31.s }, p0, [x23, x22, LSL #2]\n"
+ "fmla z26.s, p3/M, z7.s, z13.s\n"
+ "fmla z24.s, p3/M, z5.s, z13.s\n"
+ "fmla z23.s, p3/M, z4.s, z13.s\n"
+ "ld1w { z13.s }, p2/Z, [x28, x27, LSL #2]\n"
+ "fmla z31.s, p3/M, z6.s, z12.s\n"
+ "fmla z28.s, p3/M, z3.s, z12.s\n"
+ "fmla z25.s, p3/M, z0.s, z12.s\n"
+ "fmla z29.s, p3/M, z8.s, z11.s\n"
+ "fmla z26.s, p3/M, z5.s, z11.s\n"
+ "fmla z23.s, p3/M, z2.s, z11.s\n"
+ "fmla z25.s, p3/M, z8.s, z13.s\n"
+ "fmla z24.s, p3/M, z7.s, z13.s\n"
+ "fmax z31.s, p3/M, z31.s, z18.s\n"
+ "fmla z23.s, p3/M, z6.s, z13.s\n"
+ "fmax z30.s, p3/M, z30.s, z18.s\n"
+ "fmax z29.s, p3/M, z29.s, z18.s\n"
+ "fmin z31.s, p3/M, z31.s, z17.s\n"
+ "st1w { z31.s }, p0, [x12]\n"
+ "fmin z30.s, p3/M, z30.s, z17.s\n"
+ "fmin z29.s, p3/M, z29.s, z17.s\n"
+ "st1w { z30.s }, p0, [x12, x13, LSL #2]\n"
+ "fmax z28.s, p3/M, z28.s, z18.s\n"
+ "fmax z27.s, p3/M, z27.s, z18.s\n"
+ "st1w { z29.s }, p0, [x12, x24, LSL #2]\n"
+ "fmax z26.s, p3/M, z26.s, z18.s\n"
+ "fmax z25.s, p3/M, z25.s, z18.s\n"
+ "fmax z24.s, p3/M, z24.s, z18.s\n"
+ "fmin z28.s, p3/M, z28.s, z17.s\n"
+ "st1w { z28.s }, p0, [x23]\n"
+ "fmin z27.s, p3/M, z27.s, z17.s\n"
+ "fmin z26.s, p3/M, z26.s, z17.s\n"
+ "st1w { z27.s }, p0, [x23, x13, LSL #2]\n"
+ "fmin z25.s, p3/M, z25.s, z17.s\n"
+ "fmin z24.s, p3/M, z24.s, z17.s\n"
+ "st1w { z26.s }, p0, [x23, x24, LSL #2]\n"
+ "fmax z23.s, p3/M, z23.s, z18.s\n"
+ "st1w { z25.s }, p0, [x22]\n"
+ "fmin z23.s, p3/M, z23.s, z17.s\n"
+ "st1w { z24.s }, p0, [x22, x13, LSL #2]\n"
+ "st1w { z23.s }, p0, [x22, x24, LSL #2]\n"
"blt 1b\n"
:
: [n_channels] "r" ((unsigned long) n_channels), [offsetof_args_inptr] "I" (offsetof(Args, inptr)), [offsetof_args_ld_input_col] "I" (offsetof(Args, ld_input_col)), [offsetof_args_ld_input_row] "I" (offsetof(Args, ld_input_row)), [offsetof_args_ld_output_col] "I" (offsetof(Args, ld_output_col)), [offsetof_args_ld_output_row] "I" (offsetof(Args, ld_output_row)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_n_tile_cols] "I" (offsetof(Args, n_tile_cols)), [offsetof_args_n_tile_rows] "I" (offsetof(Args, n_tile_rows)), [offsetof_args_outptr] "I" (offsetof(Args, outptr)), [offsetof_args_params] "I" (offsetof(Args, params)), [offsetof_args_tile_i] "I" (offsetof(Args, tile_i)), [offsetof_args_tile_j] "I" (offsetof(Args, tile_j)), [params_struct] "r" (&params_struct)
- : "cc", "memory", "p0", "p1", "p2", "p3", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z16", "z17", "z18", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z16", "z17", "z18", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_indirect.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_indirect.cpp
index 2eed8cb0c4..72b182679d 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_indirect.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_indirect.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -87,387 +87,387 @@ void sve_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst_indirect_impl(
activation_min, activation_max);
__asm__ __volatile__(
+ "ldr x16, [%x[params_struct], %[offsetof_args_outptrs]]\n"
"ptrue p3.b\n"
- "ldr x17, [%x[params_struct], %[offsetof_args_params]]\n"
- "add x16, %x[params_struct], %[offsetof_Args_inptrs]\n"
- "ld1w { z18.s }, p3/Z, [x17]\n"
- "cntw x15\n"
- "mov x14, #0x0\n"
- "ld1w { z0.s }, p3/Z, [x17, #1, MUL VL]\n"
- "ld1w { z1.s }, p3/Z, [x17, #2, MUL VL]\n"
+ "ldr x15, [%x[params_struct], %[offsetof_args_params]]\n"
+ "add x14, %x[params_struct], %[offsetof_Args_inptrs]\n"
+ "ld1rw { z18.s }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
+ "mov x13, #0x0\n"
+ "ld1rw { z17.s }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
+ "cntw x12\n"
+ "ld1w { z16.s }, p3/Z, [x15]\n"
+ "sub x11, XZR, x12\n"
+ "ld1w { z0.s }, p3/Z, [x15, #1, MUL VL]\n"
"whilelt p2.s, XZR, %x[n_channels]\n"
- "ld1w { z2.s }, p3/Z, [x17, #3, MUL VL]\n"
- "ld1w { z3.s }, p3/Z, [x17, #4, MUL VL]\n"
- "cmp x15, %x[n_channels]\n"
- "ld1w { z4.s }, p3/Z, [x17, #5, MUL VL]\n"
- "ld1w { z5.s }, p3/Z, [x17, #6, MUL VL]\n"
- "sub x13, XZR, x15\n"
- "ld1w { z6.s }, p3/Z, [x17, #7, MUL VL]\n"
- "addvl x17, x17, #16\n"
- "ldp x12, x11, [x16, #0x0]\n"
- "ldp x10, x9, [x16, #0x10]\n"
- "ldr x28, [x16, #0x20]\n"
- "ldr x27, [%x[params_struct], %[offsetof_args_outptrs]]\n"
- "ld1rw { z17.s }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
- "ld1rw { z16.s }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
- "ld1w { z7.s }, p3/Z, [x17, #-8, MUL VL]\n"
- "ld1w { z8.s }, p3/Z, [x17, #-7, MUL VL]\n"
- "ld1w { z9.s }, p2/Z, [x12, x14, LSL #2]\n"
- "addvl x17, x17, #-6\n"
- "ld1w { z10.s }, p2/Z, [x11, x14, LSL #2]\n"
- "ld1w { z11.s }, p2/Z, [x10, x14, LSL #2]\n"
- "ld1w { z12.s }, p2/Z, [x9, x14, LSL #2]\n"
- "ld1w { z13.s }, p2/Z, [x28, x14, LSL #2]\n"
+ "ld1w { z1.s }, p3/Z, [x15, #2, MUL VL]\n"
+ "cmp x12, %x[n_channels]\n"
+ "ld1w { z2.s }, p3/Z, [x15, #3, MUL VL]\n"
+ "ld1w { z3.s }, p3/Z, [x15, #4, MUL VL]\n"
+ "ld1w { z4.s }, p3/Z, [x15, #5, MUL VL]\n"
+ "ld1w { z5.s }, p3/Z, [x15, #6, MUL VL]\n"
+ "ld1w { z6.s }, p3/Z, [x15, #7, MUL VL]\n"
+ "addvl x15, x15, #16\n"
+ "ldp x10, x9, [x14, #0x0]\n"
+ "ld1w { z7.s }, p3/Z, [x15, #-8, MUL VL]\n"
+ "ld1w { z8.s }, p3/Z, [x15, #-7, MUL VL]\n"
+ "addvl x15, x15, #-6\n"
+ "ld1w { z9.s }, p2/Z, [x10, x13, LSL #2]\n"
+ "ld1w { z10.s }, p2/Z, [x9, x13, LSL #2]\n"
+ "ldp x28, x27, [x14, #0x10]\n"
+ "ldr x26, [x14, #0x20]\n"
+ "ld1w { z11.s }, p2/Z, [x28, x13, LSL #2]\n"
+ "ld1w { z12.s }, p2/Z, [x27, x13, LSL #2]\n"
+ "ld1w { z13.s }, p2/Z, [x26, x13, LSL #2]\n"
"bge 2f\n"
"1:" // Channel loop
- "movprfx z23, z18\n fmla z23.s, p3/M, z8.s, z9.s\n"
- "movprfx z24, z18\n fmla z24.s, p3/M, z7.s, z9.s\n"
- "ldr x26, [x16, #0x30]\n"
- "ldr x25, [x16, #0x38]\n"
- "movprfx z25, z18\n fmla z25.s, p3/M, z6.s, z9.s\n"
- "fmla z23.s, p3/M, z0.s, z10.s\n"
- "ldr x24, [x16, #0x28]\n"
- "ldr x11, [x16, #0x48]\n"
- "fmla z24.s, p3/M, z4.s, z13.s\n"
- "movprfx z26, z18\n fmla z26.s, p3/M, z5.s, z9.s\n"
- "ldr x12, [x16, #0x40]\n"
- "ld1w { z10.s }, p2/Z, [x11, x14, LSL #2]\n"
- "movprfx z27, z18\n fmla z27.s, p3/M, z4.s, z9.s\n"
- "movprfx z28, z18\n fmla z28.s, p3/M, z3.s, z9.s\n"
- "ldr x10, [x16, #0x50]\n"
- "ldr x9, [x16, #0x58]\n"
- "fmla z25.s, p3/M, z2.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x26, x14, LSL #2]\n"
- "movprfx z29, z18\n fmla z29.s, p3/M, z2.s, z9.s\n"
- "ldr x28, [x16, #0x60]\n"
- "fmla z23.s, p3/M, z5.s, z13.s\n"
- "fmla z24.s, p3/M, z6.s, z11.s\n"
- "ldr x26, [x16, #0x70]\n"
- "ldr x11, [x16, #0x88]\n"
- "movprfx z31, z18\n fmla z31.s, p3/M, z0.s, z9.s\n"
- "fmla z25.s, p3/M, z3.s, z13.s\n"
- "incw x13\n"
- "mov p1.b, p2.b\n"
- "fmla z26.s, p3/M, z2.s, z13.s\n"
+ "movprfx z31, z16\n fmla z31.s, p3/M, z8.s, z9.s\n"
+ "ldr x25, [x14, #0x28]\n"
+ "whilelt p1.s, x12, %x[n_channels]\n"
+ "movprfx z30, z16\n fmla z30.s, p3/M, z7.s, z9.s\n"
+ "ldr x24, [x14, #0x30]\n"
+ "incw x11\n"
+ "movprfx z29, z16\n fmla z29.s, p3/M, z6.s, z9.s\n"
+ "ldr x23, [x14, #0x38]\n"
+ "mov p0.b, p2.b\n"
+ "movprfx z28, z16\n fmla z28.s, p3/M, z5.s, z9.s\n"
+ "ldr x10, [x14, #0x40]\n"
+ "movprfx z27, z16\n fmla z27.s, p3/M, z4.s, z9.s\n"
+ "ldr x9, [x14, #0x48]\n"
+ "movprfx z26, z16\n fmla z26.s, p3/M, z3.s, z9.s\n"
+ "ldr x28, [x14, #0x50]\n"
+ "movprfx z25, z16\n fmla z25.s, p3/M, z2.s, z9.s\n"
+ "ldr x27, [x14, #0x58]\n"
+ "movprfx z24, z16\n fmla z24.s, p3/M, z1.s, z9.s\n"
+ "ldr x26, [x14, #0x60]\n"
+ "movprfx z23, z16\n fmla z23.s, p3/M, z0.s, z9.s\n"
+ "ldr x22, [x16, #0x0]\n"
+ "fmla z31.s, p3/M, z0.s, z10.s\n"
+ "ld1w { z10.s }, p2/Z, [x9, x13, LSL #2]\n"
+ "fmla z29.s, p3/M, z2.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x24, x13, LSL #2]\n"
+ "fmla z25.s, p3/M, z6.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x25, x13, LSL #2]\n"
+ "fmla z30.s, p3/M, z4.s, z13.s\n"
+ "ldr x25, [x14, #0x68]\n"
+ "fmla z31.s, p3/M, z5.s, z13.s\n"
+ "ldr x24, [x14, #0x70]\n"
+ "fmla z29.s, p3/M, z3.s, z13.s\n"
+ "ldr x9, [x14, #0x88]\n"
+ "fmla z28.s, p3/M, z2.s, z13.s\n"
+ "ldr x21, [x16, #0x8]\n"
"fmla z27.s, p3/M, z1.s, z13.s\n"
- "ldr x23, [x27, #0x0]\n"
- "whilelt p0.s, x15, %x[n_channels]\n"
- "fmla z28.s, p3/M, z0.s, z13.s\n"
- "ld1w { z13.s }, p2/Z, [x25, x14, LSL #2]\n"
- "fmla z29.s, p3/M, z6.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x24, x14, LSL #2]\n"
- "movprfx z30, z18\n fmla z30.s, p3/M, z1.s, z9.s\n"
- "fmla z23.s, p3/M, z7.s, z11.s\n"
- "ldr x24, [x16, #0x68]\n"
- "ldr x25, [x16, #0x78]\n"
- "fmla z24.s, p3/M, z0.s, z13.s\n"
- "fmla z31.s, p3/M, z8.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x12, x14, LSL #2]\n"
- "ldr x12, [x16, #0x80]\n"
- "fmla z26.s, p3/M, z4.s, z11.s\n"
+ "ldr x20, [x16, #0x10]\n"
+ "fmla z26.s, p3/M, z0.s, z13.s\n"
+ "ld1w { z13.s }, p2/Z, [x23, x13, LSL #2]\n"
+ "fmla z23.s, p3/M, z8.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x10, x13, LSL #2]\n"
+ "fmla z31.s, p3/M, z7.s, z11.s\n"
+ "ldr x23, [x14, #0x78]\n"
+ "fmla z30.s, p3/M, z6.s, z11.s\n"
+ "ldr x10, [x14, #0x80]\n"
+ "fmla z28.s, p3/M, z4.s, z11.s\n"
+ "ldr x19, [x16, #0x18]\n"
"fmla z27.s, p3/M, z3.s, z11.s\n"
- "ldr x22, [x27, #0x8]\n"
- "ldr x21, [x27, #0x10]\n"
- "fmla z30.s, p3/M, z0.s, z11.s\n"
- "fmla z28.s, p3/M, z4.s, z10.s\n"
- "ldr x20, [x27, #0x18]\n"
- "ld1w { z18.s }, p3/Z, [x17]\n"
- "fmla z29.s, p3/M, z1.s, z11.s\n"
- "fmla z23.s, p3/M, z1.s, z13.s\n"
- "ld1w { z11.s }, p2/Z, [x10, x14, LSL #2]\n"
- "ld1w { z13.s }, p2/Z, [x9, x14, LSL #2]\n"
- "fmla z24.s, p3/M, z2.s, z12.s\n"
- "fmla z25.s, p3/M, z1.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x28, x14, LSL #2]\n"
- "ldr x10, [x16, #0x90]\n"
+ "ld1w { z16.s }, p3/Z, [x15]\n"
+ "fmla z25.s, p3/M, z1.s, z11.s\n"
+ "fmla z24.s, p3/M, z0.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x28, x13, LSL #2]\n"
+ "fmla z31.s, p3/M, z1.s, z13.s\n"
+ "ldr x28, [x14, #0x90]\n"
+ "fmla z30.s, p3/M, z0.s, z13.s\n"
+ "ld1w { z13.s }, p2/Z, [x27, x13, LSL #2]\n"
+ "fmla z29.s, p3/M, z1.s, z12.s\n"
+ "ldr x27, [x14, #0x98]\n"
"fmla z27.s, p3/M, z5.s, z10.s\n"
- "fmla z30.s, p3/M, z2.s, z10.s\n"
- "ldr x28, [x16, #0xa0]\n"
- "ldr x9, [x16, #0x98]\n"
- "fmla z26.s, p3/M, z0.s, z11.s\n"
- "fmla z28.s, p3/M, z2.s, z13.s\n"
- "fmla z24.s, p3/M, z8.s, z10.s\n"
- "fmla z25.s, p3/M, z7.s, z10.s\n"
- "fmla z31.s, p3/M, z1.s, z10.s\n"
- "fmla z29.s, p3/M, z3.s, z12.s\n"
- "ld1w { z10.s }, p2/Z, [x24, x14, LSL #2]\n"
- "ldr x24, [x16, #0xa8]\n"
- "fmla z26.s, p3/M, z6.s, z12.s\n"
+ "fmla z26.s, p3/M, z4.s, z10.s\n"
+ "fmla z30.s, p3/M, z2.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x26, x13, LSL #2]\n"
+ "fmla z29.s, p3/M, z7.s, z10.s\n"
+ "ldr x26, [x14, #0xa0]\n"
+ "fmla z24.s, p3/M, z2.s, z10.s\n"
+ "fmla z23.s, p3/M, z1.s, z10.s\n"
+ "fmla z30.s, p3/M, z8.s, z10.s\n"
+ "ld1w { z10.s }, p2/Z, [x25, x13, LSL #2]\n"
+ "fmla z31.s, p3/M, z3.s, z11.s\n"
+ "ldr x25, [x14, #0xa8]\n"
+ "fmla z28.s, p3/M, z0.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x24, x13, LSL #2]\n"
+ "fmla z29.s, p3/M, z5.s, z13.s\n"
+ "ldr x24, [x14, #0xb0]\n"
+ "fmla z26.s, p3/M, z2.s, z13.s\n"
+ "ld1w { z13.s }, p2/Z, [x23, x13, LSL #2]\n"
+ "fmla z25.s, p3/M, z3.s, z12.s\n"
+ "ldr x23, [x14, #0xb8]\n"
+ "fmla z28.s, p3/M, z6.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x10, x13, LSL #2]\n"
"fmla z27.s, p3/M, z7.s, z10.s\n"
- "ld1w { z12.s }, p2/Z, [x12, x14, LSL #2]\n"
- "ldr x12, [x16, #0xc0]\n"
- "fmla z28.s, p3/M, z6.s, z10.s\n"
- "fmla z30.s, p3/M, z4.s, z10.s\n"
- "fmla z23.s, p3/M, z3.s, z11.s\n"
- "fmla z25.s, p3/M, z5.s, z13.s\n"
- "ld1w { z11.s }, p2/Z, [x26, x14, LSL #2]\n"
- "ld1w { z13.s }, p2/Z, [x25, x14, LSL #2]\n"
- "fmla z29.s, p3/M, z5.s, z10.s\n"
- "fmla z31.s, p3/M, z3.s, z10.s\n"
- "ldr x26, [x16, #0xb0]\n"
- "ldr x25, [x16, #0xb8]\n"
- "fmla z26.s, p3/M, z8.s, z10.s\n"
- "fmla z28.s, p3/M, z8.s, z11.s\n"
- "fmla z30.s, p3/M, z6.s, z13.s\n"
- "fmla z24.s, p3/M, z3.s, z12.s\n"
+ "ldr x10, [x14, #0xc0]\n"
+ "fmla z26.s, p3/M, z6.s, z10.s\n"
+ "fmla z25.s, p3/M, z5.s, z10.s\n"
+ "fmla z28.s, p3/M, z8.s, z10.s\n"
+ "fmla z24.s, p3/M, z4.s, z10.s\n"
+ "fmla z23.s, p3/M, z3.s, z10.s\n"
+ "fmla z26.s, p3/M, z8.s, z11.s\n"
+ "fmla z25.s, p3/M, z7.s, z13.s\n"
+ "fmla z24.s, p3/M, z6.s, z13.s\n"
+ "ld1w { z13.s }, p2/Z, [x28, x13, LSL #2]\n"
+ "fmla z23.s, p3/M, z5.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x9, x13, LSL #2]\n"
+ "fmla z31.s, p3/M, z4.s, z12.s\n"
+ "fmla z30.s, p3/M, z3.s, z12.s\n"
+ "fmla z28.s, p3/M, z1.s, z12.s\n"
"fmla z27.s, p3/M, z0.s, z12.s\n"
- "fmla z31.s, p3/M, z5.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x11, x14, LSL #2]\n"
- "fmla z29.s, p3/M, z7.s, z13.s\n"
- "ld1w { z13.s }, p2/Z, [x10, x14, LSL #2]\n"
- "fmla z23.s, p3/M, z4.s, z12.s\n"
- "fmla z26.s, p3/M, z1.s, z12.s\n"
- "fmla z24.s, p3/M, z5.s, z11.s\n"
- "ld1w { z12.s }, p2/Z, [x9, x14, LSL #2]\n"
- "fmla z25.s, p3/M, z4.s, z11.s\n"
+ "ld1w { z12.s }, p2/Z, [x27, x13, LSL #2]\n"
+ "fmla z29.s, p3/M, z4.s, z11.s\n"
+ "fmla z30.s, p3/M, z5.s, z11.s\n"
+ "fmla z26.s, p3/M, z1.s, z11.s\n"
"fmla z27.s, p3/M, z2.s, z11.s\n"
- "fmla z28.s, p3/M, z1.s, z11.s\n"
- "fmla z30.s, p3/M, z8.s, z13.s\n"
- "ld1w { z11.s }, p2/Z, [x28, x14, LSL #2]\n"
- "ldr x28, [x16, #0x20]\n"
- "fmla z31.s, p3/M, z7.s, z13.s\n"
- "ld1w { z13.s }, p2/Z, [x24, x14, LSL #2]\n"
- "fmla z23.s, p3/M, z2.s, z11.s\n"
- "fmla z26.s, p3/M, z7.s, z12.s\n"
+ "ld1w { z11.s }, p2/Z, [x26, x13, LSL #2]\n"
+ "fmla z24.s, p3/M, z8.s, z13.s\n"
+ "ldr x26, [x14, #0x20]\n"
+ "fmla z23.s, p3/M, z7.s, z13.s\n"
+ "ld1w { z13.s }, p2/Z, [x25, x13, LSL #2]\n"
+ "fmla z28.s, p3/M, z7.s, z12.s\n"
"fmla z27.s, p3/M, z6.s, z12.s\n"
- "fmla z29.s, p3/M, z4.s, z12.s\n"
- "fmla z30.s, p3/M, z3.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x26, x14, LSL #2]\n"
- "fmla z31.s, p3/M, z4.s, z13.s\n"
- "fmla z24.s, p3/M, z1.s, z11.s\n"
- "fmax z24.s, p3/M, z24.s, z17.s\n"
- "fmin z24.s, p3/M, z24.s, z16.s\n"
- "fmla z25.s, p3/M, z0.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x25, x14, LSL #2]\n"
- "fmla z23.s, p3/M, z6.s, z12.s\n"
- "fmax z23.s, p3/M, z23.s, z17.s\n"
- "fmla z28.s, p3/M, z7.s, z13.s\n"
- "fmla z30.s, p3/M, z5.s, z13.s\n"
- "fmin z23.s, p3/M, z23.s, z16.s\n"
- "st1w { z23.s }, p1, [x23, x13, LSL #2]\n"
- "fmla z29.s, p3/M, z0.s, z12.s\n"
+ "fmla z25.s, p3/M, z4.s, z12.s\n"
+ "fmla z24.s, p3/M, z3.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x24, x13, LSL #2]\n"
"fmla z31.s, p3/M, z2.s, z11.s\n"
- "ldr x23, [x27, #0x20]\n"
- "st1w { z24.s }, p1, [x22, x13, LSL #2]\n"
+ "fmla z30.s, p3/M, z1.s, z11.s\n"
+ "ld1w { z1.s }, p3/Z, [x15, #2, MUL VL]\n"
+ "fmla z29.s, p3/M, z0.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x23, x13, LSL #2]\n"
"fmla z27.s, p3/M, z8.s, z13.s\n"
- "fmla z26.s, p3/M, z3.s, z12.s\n"
- "ld1w { z13.s }, p2/Z, [x12, x14, LSL #2]\n"
- "ldp x12, x11, [x16, #0x0]\n"
- "fmla z25.s, p3/M, z8.s, z11.s\n"
- "fmla z28.s, p3/M, z5.s, z11.s\n"
- "ldp x10, x9, [x16, #0x10]\n"
- "fmax z25.s, p3/M, z25.s, z17.s\n"
- "fmla z29.s, p3/M, z8.s, z13.s\n"
- "fmla z30.s, p3/M, z7.s, z13.s\n"
- "fmax z26.s, p3/M, z26.s, z17.s\n"
- "fmax z27.s, p3/M, z27.s, z17.s\n"
- "fmla z31.s, p3/M, z6.s, z13.s\n"
- "incw x14\n"
- "ld1w { z9.s }, p0/Z, [x12, x15, LSL #2]\n"
- "ld1w { z10.s }, p0/Z, [x11, x15, LSL #2]\n"
- "ld1w { z11.s }, p0/Z, [x10, x15, LSL #2]\n"
- "ld1w { z12.s }, p0/Z, [x9, x15, LSL #2]\n"
- "fmin z25.s, p3/M, z25.s, z16.s\n"
- "fmin z26.s, p3/M, z26.s, z16.s\n"
- "ld1w { z13.s }, p0/Z, [x28, x15, LSL #2]\n"
- "incw x15\n"
- "fmin z27.s, p3/M, z27.s, z16.s\n"
- "st1w { z25.s }, p1, [x21, x13, LSL #2]\n"
- "fmax z28.s, p3/M, z28.s, z17.s\n"
- "fmax z29.s, p3/M, z29.s, z17.s\n"
- "st1w { z26.s }, p1, [x20, x13, LSL #2]\n"
- "ldr x22, [x27, #0x28]\n"
- "fmax z30.s, p3/M, z30.s, z17.s\n"
- "fmax z31.s, p3/M, z31.s, z17.s\n"
- "st1w { z27.s }, p1, [x23, x13, LSL #2]\n"
- "ldr x21, [x27, #0x30]\n"
- "ldr x20, [x27, #0x38]\n"
- "ldr x23, [x27, #0x40]\n"
- "whilelt p2.s, x14, %x[n_channels]\n"
- "cmp x15, %x[n_channels]\n"
- "ld1w { z0.s }, p3/Z, [x17, #1, MUL VL]\n"
- "ld1w { z1.s }, p3/Z, [x17, #2, MUL VL]\n"
- "fmin z28.s, p3/M, z28.s, z16.s\n"
- "fmin z29.s, p3/M, z29.s, z16.s\n"
- "ld1w { z2.s }, p3/Z, [x17, #3, MUL VL]\n"
- "ld1w { z3.s }, p3/Z, [x17, #4, MUL VL]\n"
- "fmin z30.s, p3/M, z30.s, z16.s\n"
- "fmin z31.s, p3/M, z31.s, z16.s\n"
- "ld1w { z4.s }, p3/Z, [x17, #5, MUL VL]\n"
- "ld1w { z5.s }, p3/Z, [x17, #6, MUL VL]\n"
- "st1w { z28.s }, p1, [x22, x13, LSL #2]\n"
- "ld1w { z6.s }, p3/Z, [x17, #7, MUL VL]\n"
- "addvl x17, x17, #16\n"
- "st1w { z29.s }, p1, [x21, x13, LSL #2]\n"
- "ld1w { z7.s }, p3/Z, [x17, #-8, MUL VL]\n"
- "st1w { z30.s }, p1, [x20, x13, LSL #2]\n"
- "ld1w { z8.s }, p3/Z, [x17, #-7, MUL VL]\n"
- "addvl x17, x17, #-6\n"
- "st1w { z31.s }, p1, [x23, x13, LSL #2]\n"
+ "fmla z26.s, p3/M, z7.s, z13.s\n"
+ "fmla z24.s, p3/M, z5.s, z13.s\n"
+ "fmla z23.s, p3/M, z4.s, z13.s\n"
+ "ld1w { z13.s }, p2/Z, [x10, x13, LSL #2]\n"
+ "incw x13\n"
+ "fmla z31.s, p3/M, z6.s, z12.s\n"
+ "ldp x10, x9, [x14, #0x0]\n"
+ "whilelt p2.s, x13, %x[n_channels]\n"
+ "fmla z28.s, p3/M, z3.s, z12.s\n"
+ "ldp x28, x27, [x14, #0x10]\n"
+ "fmla z25.s, p3/M, z0.s, z12.s\n"
+ "ld1w { z0.s }, p3/Z, [x15, #1, MUL VL]\n"
+ "fmla z29.s, p3/M, z8.s, z11.s\n"
+ "ld1w { z9.s }, p1/Z, [x10, x12, LSL #2]\n"
+ "fmla z26.s, p3/M, z5.s, z11.s\n"
+ "ld1w { z10.s }, p1/Z, [x9, x12, LSL #2]\n"
+ "fmla z23.s, p3/M, z2.s, z11.s\n"
+ "ld1w { z11.s }, p1/Z, [x28, x12, LSL #2]\n"
+ "fmla z25.s, p3/M, z8.s, z13.s\n"
+ "ld1w { z12.s }, p1/Z, [x27, x12, LSL #2]\n"
+ "fmla z24.s, p3/M, z7.s, z13.s\n"
+ "ld1w { z2.s }, p3/Z, [x15, #3, MUL VL]\n"
+ "fmax z31.s, p3/M, z31.s, z18.s\n"
+ "ld1w { z3.s }, p3/Z, [x15, #4, MUL VL]\n"
+ "fmla z23.s, p3/M, z6.s, z13.s\n"
+ "ld1w { z13.s }, p1/Z, [x26, x12, LSL #2]\n"
+ "incw x12\n"
+ "fmax z30.s, p3/M, z30.s, z18.s\n"
+ "ld1w { z4.s }, p3/Z, [x15, #5, MUL VL]\n"
+ "cmp x12, %x[n_channels]\n"
+ "fmin z31.s, p3/M, z31.s, z17.s\n"
+ "ld1w { z5.s }, p3/Z, [x15, #6, MUL VL]\n"
+ "fmax z29.s, p3/M, z29.s, z18.s\n"
+ "ld1w { z6.s }, p3/Z, [x15, #7, MUL VL]\n"
+ "addvl x15, x15, #16\n"
+ "fmax z28.s, p3/M, z28.s, z18.s\n"
+ "ld1w { z7.s }, p3/Z, [x15, #-8, MUL VL]\n"
+ "fmax z27.s, p3/M, z27.s, z18.s\n"
+ "ld1w { z8.s }, p3/Z, [x15, #-7, MUL VL]\n"
+ "addvl x15, x15, #-6\n"
+ "fmin z30.s, p3/M, z30.s, z17.s\n"
+ "st1w { z31.s }, p0, [x22, x11, LSL #2]\n"
+ "fmin z29.s, p3/M, z29.s, z17.s\n"
+ "ldr x22, [x16, #0x20]\n"
+ "fmax z26.s, p3/M, z26.s, z18.s\n"
+ "st1w { z30.s }, p0, [x21, x11, LSL #2]\n"
+ "fmin z28.s, p3/M, z28.s, z17.s\n"
+ "fmin z27.s, p3/M, z27.s, z17.s\n"
+ "st1w { z29.s }, p0, [x20, x11, LSL #2]\n"
+ "fmin z26.s, p3/M, z26.s, z17.s\n"
+ "ldr x21, [x16, #0x28]\n"
+ "fmax z25.s, p3/M, z25.s, z18.s\n"
+ "ldr x20, [x16, #0x30]\n"
+ "fmax z24.s, p3/M, z24.s, z18.s\n"
+ "st1w { z28.s }, p0, [x19, x11, LSL #2]\n"
+ "fmax z23.s, p3/M, z23.s, z18.s\n"
+ "st1w { z27.s }, p0, [x22, x11, LSL #2]\n"
+ "st1w { z26.s }, p0, [x21, x11, LSL #2]\n"
+ "fmin z25.s, p3/M, z25.s, z17.s\n"
+ "ldr x19, [x16, #0x38]\n"
+ "fmin z24.s, p3/M, z24.s, z17.s\n"
+ "ldr x22, [x16, #0x40]\n"
+ "fmin z23.s, p3/M, z23.s, z17.s\n"
+ "st1w { z25.s }, p0, [x20, x11, LSL #2]\n"
+ "st1w { z24.s }, p0, [x19, x11, LSL #2]\n"
+ "st1w { z23.s }, p0, [x22, x11, LSL #2]\n"
"blt 1b\n"
"2:" // Channel tail
- "movprfx z23, z18\n fmla z23.s, p3/M, z8.s, z9.s\n"
- "movprfx z24, z18\n fmla z24.s, p3/M, z7.s, z9.s\n"
- "ldr x26, [x16, #0x30]\n"
- "ldr x25, [x16, #0x38]\n"
- "movprfx z25, z18\n fmla z25.s, p3/M, z6.s, z9.s\n"
- "fmla z23.s, p3/M, z0.s, z10.s\n"
- "ldr x24, [x16, #0x28]\n"
- "ldr x11, [x16, #0x48]\n"
- "fmla z24.s, p3/M, z4.s, z13.s\n"
- "movprfx z26, z18\n fmla z26.s, p3/M, z5.s, z9.s\n"
- "ldr x12, [x16, #0x40]\n"
- "ld1w { z10.s }, p2/Z, [x11, x14, LSL #2]\n"
- "movprfx z27, z18\n fmla z27.s, p3/M, z4.s, z9.s\n"
- "movprfx z28, z18\n fmla z28.s, p3/M, z3.s, z9.s\n"
- "ldr x10, [x16, #0x50]\n"
- "ldr x9, [x16, #0x58]\n"
- "fmla z25.s, p3/M, z2.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x26, x14, LSL #2]\n"
- "movprfx z29, z18\n fmla z29.s, p3/M, z2.s, z9.s\n"
- "ldr x28, [x16, #0x60]\n"
- "fmla z23.s, p3/M, z5.s, z13.s\n"
- "fmla z24.s, p3/M, z6.s, z11.s\n"
- "ldr x26, [x16, #0x70]\n"
- "ldr x11, [x16, #0x88]\n"
- "movprfx z31, z18\n fmla z31.s, p3/M, z0.s, z9.s\n"
- "fmla z25.s, p3/M, z3.s, z13.s\n"
- "incw x13\n"
- "mov p1.b, p2.b\n"
- "fmla z26.s, p3/M, z2.s, z13.s\n"
+ "movprfx z31, z16\n fmla z31.s, p3/M, z8.s, z9.s\n"
+ "ldr x25, [x14, #0x28]\n"
+ "incw x11\n"
+ "movprfx z30, z16\n fmla z30.s, p3/M, z7.s, z9.s\n"
+ "ldr x24, [x14, #0x30]\n"
+ "mov p0.b, p2.b\n"
+ "movprfx z29, z16\n fmla z29.s, p3/M, z6.s, z9.s\n"
+ "ldr x23, [x14, #0x38]\n"
+ "movprfx z28, z16\n fmla z28.s, p3/M, z5.s, z9.s\n"
+ "ldr x10, [x14, #0x40]\n"
+ "movprfx z27, z16\n fmla z27.s, p3/M, z4.s, z9.s\n"
+ "ldr x9, [x14, #0x48]\n"
+ "movprfx z26, z16\n fmla z26.s, p3/M, z3.s, z9.s\n"
+ "ldr x28, [x14, #0x50]\n"
+ "movprfx z25, z16\n fmla z25.s, p3/M, z2.s, z9.s\n"
+ "ldr x27, [x14, #0x58]\n"
+ "movprfx z24, z16\n fmla z24.s, p3/M, z1.s, z9.s\n"
+ "ldr x26, [x14, #0x60]\n"
+ "movprfx z23, z16\n fmla z23.s, p3/M, z0.s, z9.s\n"
+ "ldr x22, [x16, #0x0]\n"
+ "fmla z31.s, p3/M, z0.s, z10.s\n"
+ "ld1w { z10.s }, p2/Z, [x9, x13, LSL #2]\n"
+ "fmla z29.s, p3/M, z2.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x24, x13, LSL #2]\n"
+ "fmla z25.s, p3/M, z6.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x25, x13, LSL #2]\n"
+ "fmla z30.s, p3/M, z4.s, z13.s\n"
+ "ldr x25, [x14, #0x68]\n"
+ "fmla z31.s, p3/M, z5.s, z13.s\n"
+ "ldr x24, [x14, #0x70]\n"
+ "fmla z29.s, p3/M, z3.s, z13.s\n"
+ "ldr x9, [x14, #0x88]\n"
+ "fmla z28.s, p3/M, z2.s, z13.s\n"
+ "ldr x21, [x16, #0x8]\n"
"fmla z27.s, p3/M, z1.s, z13.s\n"
- "ldr x23, [x27, #0x0]\n"
- "ldr x22, [x27, #0x8]\n"
- "fmla z28.s, p3/M, z0.s, z13.s\n"
- "ld1w { z13.s }, p2/Z, [x25, x14, LSL #2]\n"
- "fmla z29.s, p3/M, z6.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x24, x14, LSL #2]\n"
- "movprfx z30, z18\n fmla z30.s, p3/M, z1.s, z9.s\n"
- "fmla z23.s, p3/M, z7.s, z11.s\n"
- "ldr x24, [x16, #0x68]\n"
- "ldr x25, [x16, #0x78]\n"
- "fmla z24.s, p3/M, z0.s, z13.s\n"
- "fmla z31.s, p3/M, z8.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x12, x14, LSL #2]\n"
- "ldr x12, [x16, #0x80]\n"
- "fmla z26.s, p3/M, z4.s, z11.s\n"
+ "ldr x20, [x16, #0x10]\n"
+ "fmla z26.s, p3/M, z0.s, z13.s\n"
+ "ld1w { z13.s }, p2/Z, [x23, x13, LSL #2]\n"
+ "fmla z23.s, p3/M, z8.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x10, x13, LSL #2]\n"
+ "fmla z31.s, p3/M, z7.s, z11.s\n"
+ "ldr x23, [x14, #0x78]\n"
+ "fmla z30.s, p3/M, z6.s, z11.s\n"
+ "ldr x10, [x14, #0x80]\n"
+ "fmla z28.s, p3/M, z4.s, z11.s\n"
+ "ldr x19, [x16, #0x18]\n"
"fmla z27.s, p3/M, z3.s, z11.s\n"
- "ldr x21, [x27, #0x10]\n"
- "ldr x20, [x27, #0x18]\n"
- "fmla z30.s, p3/M, z0.s, z11.s\n"
- "fmla z28.s, p3/M, z4.s, z10.s\n"
- "fmla z29.s, p3/M, z1.s, z11.s\n"
- "fmla z23.s, p3/M, z1.s, z13.s\n"
- "ld1w { z11.s }, p2/Z, [x10, x14, LSL #2]\n"
- "ld1w { z13.s }, p2/Z, [x9, x14, LSL #2]\n"
- "fmla z24.s, p3/M, z2.s, z12.s\n"
- "fmla z25.s, p3/M, z1.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x28, x14, LSL #2]\n"
- "ldr x10, [x16, #0x90]\n"
+ "fmla z25.s, p3/M, z1.s, z11.s\n"
+ "fmla z24.s, p3/M, z0.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x28, x13, LSL #2]\n"
+ "fmla z31.s, p3/M, z1.s, z13.s\n"
+ "ldr x28, [x14, #0x90]\n"
+ "fmla z30.s, p3/M, z0.s, z13.s\n"
+ "ld1w { z13.s }, p2/Z, [x27, x13, LSL #2]\n"
+ "fmla z29.s, p3/M, z1.s, z12.s\n"
+ "ldr x27, [x14, #0x98]\n"
"fmla z27.s, p3/M, z5.s, z10.s\n"
- "fmla z30.s, p3/M, z2.s, z10.s\n"
- "ldr x28, [x16, #0xa0]\n"
- "ldr x9, [x16, #0x98]\n"
- "fmla z26.s, p3/M, z0.s, z11.s\n"
- "fmla z28.s, p3/M, z2.s, z13.s\n"
- "fmla z24.s, p3/M, z8.s, z10.s\n"
- "fmla z25.s, p3/M, z7.s, z10.s\n"
- "fmla z31.s, p3/M, z1.s, z10.s\n"
- "fmla z29.s, p3/M, z3.s, z12.s\n"
- "ld1w { z10.s }, p2/Z, [x24, x14, LSL #2]\n"
- "ldr x24, [x16, #0xa8]\n"
- "fmla z26.s, p3/M, z6.s, z12.s\n"
+ "fmla z26.s, p3/M, z4.s, z10.s\n"
+ "fmla z30.s, p3/M, z2.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x26, x13, LSL #2]\n"
+ "fmla z29.s, p3/M, z7.s, z10.s\n"
+ "ldr x26, [x14, #0xa0]\n"
+ "fmla z24.s, p3/M, z2.s, z10.s\n"
+ "fmla z23.s, p3/M, z1.s, z10.s\n"
+ "fmla z30.s, p3/M, z8.s, z10.s\n"
+ "ld1w { z10.s }, p2/Z, [x25, x13, LSL #2]\n"
+ "fmla z31.s, p3/M, z3.s, z11.s\n"
+ "ldr x25, [x14, #0xa8]\n"
+ "fmla z28.s, p3/M, z0.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x24, x13, LSL #2]\n"
+ "fmla z29.s, p3/M, z5.s, z13.s\n"
+ "ldr x24, [x14, #0xb0]\n"
+ "fmla z26.s, p3/M, z2.s, z13.s\n"
+ "ld1w { z13.s }, p2/Z, [x23, x13, LSL #2]\n"
+ "fmla z25.s, p3/M, z3.s, z12.s\n"
+ "ldr x23, [x14, #0xb8]\n"
+ "fmla z28.s, p3/M, z6.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x10, x13, LSL #2]\n"
"fmla z27.s, p3/M, z7.s, z10.s\n"
- "ld1w { z12.s }, p2/Z, [x12, x14, LSL #2]\n"
- "ldr x12, [x16, #0xc0]\n"
- "fmla z28.s, p3/M, z6.s, z10.s\n"
- "fmla z30.s, p3/M, z4.s, z10.s\n"
- "fmla z23.s, p3/M, z3.s, z11.s\n"
- "fmla z25.s, p3/M, z5.s, z13.s\n"
- "ld1w { z11.s }, p2/Z, [x26, x14, LSL #2]\n"
- "ld1w { z13.s }, p2/Z, [x25, x14, LSL #2]\n"
- "fmla z29.s, p3/M, z5.s, z10.s\n"
- "fmla z31.s, p3/M, z3.s, z10.s\n"
- "ldr x26, [x16, #0xb0]\n"
- "ldr x25, [x16, #0xb8]\n"
- "fmla z26.s, p3/M, z8.s, z10.s\n"
- "fmla z28.s, p3/M, z8.s, z11.s\n"
- "fmla z30.s, p3/M, z6.s, z13.s\n"
- "fmla z24.s, p3/M, z3.s, z12.s\n"
+ "ldr x10, [x14, #0xc0]\n"
+ "fmla z26.s, p3/M, z6.s, z10.s\n"
+ "fmla z25.s, p3/M, z5.s, z10.s\n"
+ "fmla z28.s, p3/M, z8.s, z10.s\n"
+ "fmla z24.s, p3/M, z4.s, z10.s\n"
+ "fmla z23.s, p3/M, z3.s, z10.s\n"
+ "fmla z26.s, p3/M, z8.s, z11.s\n"
+ "fmla z25.s, p3/M, z7.s, z13.s\n"
+ "fmla z24.s, p3/M, z6.s, z13.s\n"
+ "ld1w { z13.s }, p2/Z, [x28, x13, LSL #2]\n"
+ "fmla z23.s, p3/M, z5.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x9, x13, LSL #2]\n"
+ "fmla z31.s, p3/M, z4.s, z12.s\n"
+ "fmla z30.s, p3/M, z3.s, z12.s\n"
+ "fmla z28.s, p3/M, z1.s, z12.s\n"
"fmla z27.s, p3/M, z0.s, z12.s\n"
- "fmla z31.s, p3/M, z5.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x11, x14, LSL #2]\n"
- "fmla z29.s, p3/M, z7.s, z13.s\n"
- "ld1w { z13.s }, p2/Z, [x10, x14, LSL #2]\n"
- "fmla z23.s, p3/M, z4.s, z12.s\n"
- "fmla z26.s, p3/M, z1.s, z12.s\n"
- "fmla z24.s, p3/M, z5.s, z11.s\n"
- "ld1w { z12.s }, p2/Z, [x9, x14, LSL #2]\n"
- "fmla z25.s, p3/M, z4.s, z11.s\n"
+ "ld1w { z12.s }, p2/Z, [x27, x13, LSL #2]\n"
+ "fmla z29.s, p3/M, z4.s, z11.s\n"
+ "fmla z30.s, p3/M, z5.s, z11.s\n"
+ "fmla z26.s, p3/M, z1.s, z11.s\n"
"fmla z27.s, p3/M, z2.s, z11.s\n"
- "fmla z28.s, p3/M, z1.s, z11.s\n"
- "fmla z30.s, p3/M, z8.s, z13.s\n"
- "ld1w { z11.s }, p2/Z, [x28, x14, LSL #2]\n"
- "fmla z31.s, p3/M, z7.s, z13.s\n"
- "ld1w { z13.s }, p2/Z, [x24, x14, LSL #2]\n"
- "fmla z23.s, p3/M, z2.s, z11.s\n"
- "fmla z26.s, p3/M, z7.s, z12.s\n"
+ "ld1w { z11.s }, p2/Z, [x26, x13, LSL #2]\n"
+ "fmla z24.s, p3/M, z8.s, z13.s\n"
+ "fmla z23.s, p3/M, z7.s, z13.s\n"
+ "ld1w { z13.s }, p2/Z, [x25, x13, LSL #2]\n"
+ "fmla z28.s, p3/M, z7.s, z12.s\n"
"fmla z27.s, p3/M, z6.s, z12.s\n"
- "fmla z29.s, p3/M, z4.s, z12.s\n"
- "fmla z30.s, p3/M, z3.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x26, x14, LSL #2]\n"
- "fmla z31.s, p3/M, z4.s, z13.s\n"
- "fmla z24.s, p3/M, z1.s, z11.s\n"
- "fmax z24.s, p3/M, z24.s, z17.s\n"
- "fmin z24.s, p3/M, z24.s, z16.s\n"
- "fmla z25.s, p3/M, z0.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x25, x14, LSL #2]\n"
- "fmla z23.s, p3/M, z6.s, z12.s\n"
- "fmax z23.s, p3/M, z23.s, z17.s\n"
- "fmla z28.s, p3/M, z7.s, z13.s\n"
- "fmla z30.s, p3/M, z5.s, z13.s\n"
- "fmin z23.s, p3/M, z23.s, z16.s\n"
- "st1w { z23.s }, p1, [x23, x13, LSL #2]\n"
- "fmla z29.s, p3/M, z0.s, z12.s\n"
+ "fmla z25.s, p3/M, z4.s, z12.s\n"
+ "fmla z24.s, p3/M, z3.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x24, x13, LSL #2]\n"
"fmla z31.s, p3/M, z2.s, z11.s\n"
- "ldr x23, [x27, #0x20]\n"
- "st1w { z24.s }, p1, [x22, x13, LSL #2]\n"
+ "fmla z30.s, p3/M, z1.s, z11.s\n"
+ "fmla z29.s, p3/M, z0.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x23, x13, LSL #2]\n"
"fmla z27.s, p3/M, z8.s, z13.s\n"
- "fmla z26.s, p3/M, z3.s, z12.s\n"
- "ld1w { z13.s }, p2/Z, [x12, x14, LSL #2]\n"
- "fmax z26.s, p3/M, z26.s, z17.s\n"
- "fmla z25.s, p3/M, z8.s, z11.s\n"
- "fmla z28.s, p3/M, z5.s, z11.s\n"
- "fmax z25.s, p3/M, z25.s, z17.s\n"
- "fmax z27.s, p3/M, z27.s, z17.s\n"
- "fmla z29.s, p3/M, z8.s, z13.s\n"
- "fmla z30.s, p3/M, z7.s, z13.s\n"
- "fmin z25.s, p3/M, z25.s, z16.s\n"
- "fmin z26.s, p3/M, z26.s, z16.s\n"
- "fmla z31.s, p3/M, z6.s, z13.s\n"
- "fmin z27.s, p3/M, z27.s, z16.s\n"
- "fmax z28.s, p3/M, z28.s, z17.s\n"
- "st1w { z25.s }, p1, [x21, x13, LSL #2]\n"
- "fmax z29.s, p3/M, z29.s, z17.s\n"
- "fmax z30.s, p3/M, z30.s, z17.s\n"
- "st1w { z26.s }, p1, [x20, x13, LSL #2]\n"
- "ldr x22, [x27, #0x28]\n"
- "fmax z31.s, p3/M, z31.s, z17.s\n"
- "st1w { z27.s }, p1, [x23, x13, LSL #2]\n"
- "ldr x21, [x27, #0x30]\n"
- "ldr x20, [x27, #0x38]\n"
- "ldr x23, [x27, #0x40]\n"
- "fmin z28.s, p3/M, z28.s, z16.s\n"
- "fmin z29.s, p3/M, z29.s, z16.s\n"
- "st1w { z28.s }, p1, [x22, x13, LSL #2]\n"
- "fmin z30.s, p3/M, z30.s, z16.s\n"
- "fmin z31.s, p3/M, z31.s, z16.s\n"
- "st1w { z29.s }, p1, [x21, x13, LSL #2]\n"
- "st1w { z30.s }, p1, [x20, x13, LSL #2]\n"
- "st1w { z31.s }, p1, [x23, x13, LSL #2]\n"
+ "fmla z26.s, p3/M, z7.s, z13.s\n"
+ "fmla z24.s, p3/M, z5.s, z13.s\n"
+ "fmla z23.s, p3/M, z4.s, z13.s\n"
+ "ld1w { z13.s }, p2/Z, [x10, x13, LSL #2]\n"
+ "fmla z31.s, p3/M, z6.s, z12.s\n"
+ "fmla z28.s, p3/M, z3.s, z12.s\n"
+ "fmla z25.s, p3/M, z0.s, z12.s\n"
+ "fmla z29.s, p3/M, z8.s, z11.s\n"
+ "fmla z26.s, p3/M, z5.s, z11.s\n"
+ "fmla z23.s, p3/M, z2.s, z11.s\n"
+ "fmla z25.s, p3/M, z8.s, z13.s\n"
+ "fmla z24.s, p3/M, z7.s, z13.s\n"
+ "fmax z31.s, p3/M, z31.s, z18.s\n"
+ "fmla z23.s, p3/M, z6.s, z13.s\n"
+ "fmax z30.s, p3/M, z30.s, z18.s\n"
+ "fmax z29.s, p3/M, z29.s, z18.s\n"
+ "fmin z31.s, p3/M, z31.s, z17.s\n"
+ "st1w { z31.s }, p0, [x22, x11, LSL #2]\n"
+ "fmin z30.s, p3/M, z30.s, z17.s\n"
+ "fmin z29.s, p3/M, z29.s, z17.s\n"
+ "ldr x22, [x16, #0x20]\n"
+ "fmax z28.s, p3/M, z28.s, z18.s\n"
+ "st1w { z30.s }, p0, [x21, x11, LSL #2]\n"
+ "fmax z27.s, p3/M, z27.s, z18.s\n"
+ "fmax z26.s, p3/M, z26.s, z18.s\n"
+ "st1w { z29.s }, p0, [x20, x11, LSL #2]\n"
+ "fmin z28.s, p3/M, z28.s, z17.s\n"
+ "ldr x21, [x16, #0x28]\n"
+ "fmax z25.s, p3/M, z25.s, z18.s\n"
+ "ldr x20, [x16, #0x30]\n"
+ "fmax z24.s, p3/M, z24.s, z18.s\n"
+ "st1w { z28.s }, p0, [x19, x11, LSL #2]\n"
+ "fmin z27.s, p3/M, z27.s, z17.s\n"
+ "fmin z26.s, p3/M, z26.s, z17.s\n"
+ "ldr x19, [x16, #0x38]\n"
+ "fmin z25.s, p3/M, z25.s, z17.s\n"
+ "st1w { z27.s }, p0, [x22, x11, LSL #2]\n"
+ "fmin z24.s, p3/M, z24.s, z17.s\n"
+ "fmax z23.s, p3/M, z23.s, z18.s\n"
+ "st1w { z26.s }, p0, [x21, x11, LSL #2]\n"
+ "st1w { z25.s }, p0, [x20, x11, LSL #2]\n"
+ "fmin z23.s, p3/M, z23.s, z17.s\n"
+ "st1w { z24.s }, p0, [x19, x11, LSL #2]\n"
+ "ldr x22, [x16, #0x40]\n"
+ "st1w { z23.s }, p0, [x22, x11, LSL #2]\n"
:
: [n_channels] "r" ((unsigned long) n_channels), [offsetof_Args_inptrs] "I" (offsetof(Args, inptrs)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_args_params] "I" (offsetof(Args, params)), [params_struct] "r" (&params_struct)
- : "cc", "memory", "p0", "p1", "p2", "p3", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z16", "z17", "z18", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z16", "z17", "z18", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_direct.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_direct.cpp
index cdf77a1cf0..84b4b3b72b 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_direct.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_direct.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -88,565 +88,565 @@ void sve_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
__asm__ __volatile__(
"ptrue p3.b\n"
- "mov x16, #0x0\n"
- "mov x4, #0x0\n"
+ "mov x2, #0x0\n"
+ "mov x3, #0x0\n"
"1:" // Tile loop
- "str x16, [%x[params_struct], %[offsetof_args_tile_i]]\n"
- "mov x25, #0x4\n"
+ "str x2, [%x[params_struct], %[offsetof_args_tile_i]]\n"
"mov x24, #0x4\n"
- "str x4, [%x[params_struct], %[offsetof_args_tile_j]]\n"
- "ldr x23, [%x[params_struct], %[offsetof_args_ld_input_row]]\n"
- "ldr x22, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
- "mul x21, x16, x23\n" // offset = tile_i * ld_input_row
- "ldr x5, [%x[params_struct], %[offsetof_args_ld_input_col]]\n"
- "ldr x6, [%x[params_struct], %[offsetof_args_ld_output_col]]\n"
- "mul x20, x16, x22\n" // offset = tile_i * ld_output_row
- "add x7, x5, x5\n"
- "madd x21, x4, x5, x21\n" // offset += tile_j * ld_input_col
+ "str x3, [%x[params_struct], %[offsetof_args_tile_j]]\n"
+ "mov x23, #0x4\n"
+ "ldr x4, [%x[params_struct], %[offsetof_args_params]]\n"
+ "mov x5, #0x0\n"
+ "ldr x22, [%x[params_struct], %[offsetof_args_ld_input_row]]\n"
+ "cntw x6\n"
+ "ldr x7, [%x[params_struct], %[offsetof_args_ld_input_col]]\n"
+ "sub x21, XZR, x6\n"
"ldr x8, [%x[params_struct], %[offsetof_args_inptr]]\n"
- "ldr x17, [%x[params_struct], %[offsetof_args_params]]\n"
- "cntw x16\n"
- "madd x20, x4, x6, x20\n" // offset += tile_j * ld_output_col
- "ldr x15, [%x[params_struct], %[offsetof_args_outptr]]\n"
- "add x14, x7, x5\n"
+ "mul x19, x2, x22\n" // offset = tile_i * ld_input_row
+ "ldr x20, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
+ "madd x19, x3, x7, x19\n" // offset += tile_j * ld_input_col
+ "ldr x17, [%x[params_struct], %[offsetof_args_ld_output_col]]\n"
+ "mul x19, x19, x24\n" // offset *= kernel_stride * output_size
+ "ldr x16, [%x[params_struct], %[offsetof_args_outptr]]\n"
+ "add x8, x8, x19, LSL #2\n" // inptr[0] += offset * sizeof(float)
+ "ld1rw { z15.s }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
+ "add x15, x8, x22, LSL #2\n"
+ "ld1rw { z14.s }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
+ "add x14, x15, x22, LSL #2\n"
+ "ld1w { z13.s }, p3/Z, [x4]\n"
+ "add x13, x14, x22, LSL #2\n"
+ "ld1w { z0.s }, p3/Z, [x4, #1, MUL VL]\n"
+ "add x12, x13, x22, LSL #2\n"
+ "ld1w { z1.s }, p3/Z, [x4, #2, MUL VL]\n"
+ "add x11, x12, x22, LSL #2\n"
+ "ld1w { z2.s }, p3/Z, [x4, #3, MUL VL]\n"
+ "add x10, x7, x7\n"
+ "ld1w { z3.s }, p3/Z, [x4, #4, MUL VL]\n"
+ "add x9, x10, x7\n"
+ "ld1w { z4.s }, p3/Z, [x4, #5, MUL VL]\n"
+ "add x28, x9, x7\n"
+ "ld1w { z5.s }, p3/Z, [x4, #6, MUL VL]\n"
+ "add x27, x28, x7\n"
+ "ld1w { z6.s }, p3/Z, [x4, #7, MUL VL]\n"
+ "mul x19, x2, x20\n" // offset = tile_i * ld_output_row
+ "add x26, x17, x17\n"
+ "madd x19, x3, x17, x19\n" // offset += tile_j * ld_output_col
+ "mul x19, x19, x23\n" // offset *= output_tile_size
+ "add x16, x16, x19, LSL #2\n" // outptrs[0] += offset * sizeof(float)
+ "add x25, x16, x20, LSL #2\n"
+ "add x24, x25, x20, LSL #2\n"
+ "add x23, x24, x20, LSL #2\n"
+ "add x22, x26, x17\n"
"whilelt p2.s, XZR, %x[n_channels]\n"
- "mul x21, x21, x25\n" // offset *= kernel_stride * output_size
- "add x8, x8, x21, LSL #2\n" // inptr[0] += offset * sizeof(float)
- "add x13, x8, x23, LSL #2\n"
- "ld1w { z15.s }, p3/Z, [x17]\n"
- "mul x20, x20, x24\n" // offset *= output_tile_size
- "add x12, x13, x23, LSL #2\n"
- "add x15, x15, x20, LSL #2\n" // outptrs[0] += offset * sizeof(float)
- "ld1w { z0.s }, p3/Z, [x17, #1, MUL VL]\n"
- "ld1w { z1.s }, p3/Z, [x17, #2, MUL VL]\n"
- "ld1w { z2.s }, p3/Z, [x17, #3, MUL VL]\n"
- "add x11, x12, x23, LSL #2\n"
- "add x10, x14, x5\n"
- "ld1w { z3.s }, p3/Z, [x17, #4, MUL VL]\n"
- "ld1w { z4.s }, p3/Z, [x17, #5, MUL VL]\n"
- "add x9, x15, x22, LSL #2\n"
- "add x28, x11, x23, LSL #2\n"
- "ld1w { z5.s }, p3/Z, [x17, #6, MUL VL]\n"
- "ld1w { z6.s }, p3/Z, [x17, #7, MUL VL]\n"
- "addvl x17, x17, #16\n"
- "add x27, x10, x5\n"
- "add x26, x9, x22, LSL #2\n"
- "add x25, x6, x6\n"
- "ld1rw { z14.s }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
- "ld1rw { z13.s }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
- "cmp x16, %x[n_channels]\n"
- "add x24, x28, x23, LSL #2\n"
- "ld1w { z7.s }, p3/Z, [x17, #-8, MUL VL]\n"
- "ld1w { z8.s }, p3/Z, [x17, #-7, MUL VL]\n"
- "add x23, x26, x22, LSL #2\n"
- "add x22, x25, x6\n"
- "ld1w { z9.s }, p2/Z, [x12, x7, LSL #2]\n"
+ "ld1w { z9.s }, p2/Z, [x14, x10, LSL #2]\n"
"ld1w { z10.s }, p2/Z, [x8]\n"
- "mov x21, #0x0\n"
- "sub x20, XZR, x16\n"
+ "addvl x4, x4, #16\n"
"ld1w { z11.s }, p2/Z, [x8, x27, LSL #2]\n"
- "ld1w { z12.s }, p2/Z, [x12, x14, LSL #2]\n"
- "addvl x17, x17, #-6\n"
+ "cmp x6, %x[n_channels]\n"
+ "ld1w { z7.s }, p3/Z, [x4, #-8, MUL VL]\n"
+ "ld1w { z8.s }, p3/Z, [x4, #-7, MUL VL]\n"
+ "addvl x4, x4, #-6\n"
+ "ld1w { z12.s }, p2/Z, [x14, x9, LSL #2]\n"
"bge 3f\n"
"2:" // Tile loop: Channel loop
- "movprfx z21, z15\n fmla z21.s, p3/M, z4.s, z9.s\n"
- "movprfx z16, z15\n fmla z16.s, p3/M, z8.s, z9.s\n"
- "whilelt p1.s, x16, %x[n_channels]\n"
+ "movprfx z31, z13\n fmla z31.s, p3/M, z8.s, z9.s\n"
+ "whilelt p1.s, x6, %x[n_channels]\n"
+ "movprfx z30, z13\n fmla z30.s, p3/M, z7.s, z9.s\n"
"incw x21\n"
- "movprfx z22, z15\n fmla z22.s, p3/M, z3.s, z9.s\n"
- "movprfx z25, z15\n fmla z25.s, p3/M, z1.s, z9.s\n"
- "incw x16\n"
+ "movprfx z29, z13\n fmla z29.s, p3/M, z6.s, z9.s\n"
"mov p0.b, p2.b\n"
- "movprfx z26, z15\n fmla z26.s, p3/M, z0.s, z9.s\n"
- "fmla z21.s, p3/M, z5.s, z12.s\n"
- "incw x20\n"
- "movprfx z17, z15\n fmla z17.s, p3/M, z7.s, z9.s\n"
- "movprfx z18, z15\n fmla z18.s, p3/M, z6.s, z9.s\n"
- "movprfx z20, z15\n fmla z20.s, p3/M, z5.s, z9.s\n"
- "movprfx z24, z15\n fmla z24.s, p3/M, z2.s, z9.s\n"
- "ld1w { z9.s }, p2/Z, [x11, x7, LSL #2]\n"
- "fmla z16.s, p3/M, z0.s, z10.s\n"
- "movprfx z19, z15\n fmla z19.s, p3/M, z2.s, z11.s\n"
- "ld1w { z10.s }, p2/Z, [x24]\n"
- "ld1w { z11.s }, p2/Z, [x24, x27, LSL #2]\n"
- "fmla z22.s, p3/M, z4.s, z12.s\n"
- "fmla z25.s, p3/M, z2.s, z12.s\n"
- "fmla z26.s, p3/M, z1.s, z12.s\n"
- "movprfx z28, z15\n fmla z28.s, p3/M, z6.s, z10.s\n"
- "ld1w { z10.s }, p2/Z, [x11, x14, LSL #2]\n"
- "fmla z21.s, p3/M, z7.s, z9.s\n"
- "fmla z17.s, p3/M, z8.s, z12.s\n"
- "fmla z18.s, p3/M, z7.s, z12.s\n"
- "fmla z19.s, p3/M, z6.s, z12.s\n"
- "movprfx z23, z15\n fmla z23.s, p3/M, z3.s, z12.s\n"
- "movprfx z27, z15\n fmla z27.s, p3/M, z0.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x8, x5, LSL #2]\n"
- "movprfx z31, z15\n fmla z31.s, p3/M, z8.s, z11.s\n"
- "fmla z22.s, p3/M, z6.s, z9.s\n"
- "ld1w { z11.s }, p2/Z, [x8, x10, LSL #2]\n"
- "fmla z25.s, p3/M, z4.s, z9.s\n"
- "fmla z26.s, p3/M, z3.s, z9.s\n"
- "fmla z20.s, p3/M, z8.s, z9.s\n"
- "fmla z24.s, p3/M, z5.s, z9.s\n"
- "fmla z28.s, p3/M, z2.s, z9.s\n"
- "fmla z21.s, p3/M, z8.s, z10.s\n"
- "fmla z16.s, p3/M, z1.s, z12.s\n"
- "fmla z17.s, p3/M, z0.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x13, x27, LSL #2]\n"
- "fmla z18.s, p3/M, z2.s, z11.s\n"
- "fmla z19.s, p3/M, z1.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x28]\n"
- "fmla z22.s, p3/M, z7.s, z10.s\n"
- "fmla z23.s, p3/M, z6.s, z10.s\n"
- "fmla z25.s, p3/M, z5.s, z10.s\n"
- "fmla z26.s, p3/M, z4.s, z10.s\n"
- "fmla z27.s, p3/M, z3.s, z10.s\n"
+ "movprfx z27, z13\n fmla z27.s, p3/M, z5.s, z9.s\n"
+ "incw x5\n"
+ "movprfx z26, z13\n fmla z26.s, p3/M, z4.s, z9.s\n"
+ "incw x6\n"
+ "movprfx z25, z13\n fmla z25.s, p3/M, z3.s, z9.s\n"
+ "movprfx z23, z13\n fmla z23.s, p3/M, z2.s, z9.s\n"
+ "movprfx z22, z13\n fmla z22.s, p3/M, z1.s, z9.s\n"
+ "movprfx z21, z13\n fmla z21.s, p3/M, z0.s, z9.s\n"
+ "ld1w { z9.s }, p2/Z, [x13, x10, LSL #2]\n"
"fmla z31.s, p3/M, z0.s, z10.s\n"
- "fmla z24.s, p3/M, z6.s, z11.s\n"
- "fmla z28.s, p3/M, z3.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x28, x27, LSL #2]\n"
- "fmla z19.s, p3/M, z5.s, z12.s\n"
- "fmla z23.s, p3/M, z2.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x13, x14, LSL #2]\n"
- "fmla z27.s, p3/M, z8.s, z11.s\n"
- "fmla z31.s, p3/M, z5.s, z11.s\n"
- "movprfx z29, z15\n fmla z29.s, p3/M, z1.s, z9.s\n"
- "movprfx z30, z15\n fmla z30.s, p3/M, z0.s, z9.s\n"
- "ld1w { z9.s }, p2/Z, [x13]\n"
- "fmla z29.s, p3/M, z2.s, z10.s\n"
- "fmla z30.s, p3/M, z1.s, z10.s\n"
- "ld1w { z10.s }, p2/Z, [x13, x7, LSL #2]\n"
- "fmla z20.s, p3/M, z0.s, z9.s\n"
- "fmla z21.s, p3/M, z1.s, z10.s\n"
- "fmla z16.s, p3/M, z3.s, z9.s\n"
- "fmla z17.s, p3/M, z4.s, z10.s\n"
- "ld1w { z11.s }, p2/Z, [x24, x5, LSL #2]\n"
- "fmla z18.s, p3/M, z3.s, z10.s\n"
- "fmla z22.s, p3/M, z0.s, z10.s\n"
- "fmla z20.s, p3/M, z2.s, z10.s\n"
- "fmla z21.s, p3/M, z2.s, z12.s\n"
- "fmla z16.s, p3/M, z5.s, z10.s\n"
- "fmla z17.s, p3/M, z5.s, z12.s\n"
- "ld1w { z10.s }, p2/Z, [x12, x5, LSL #2]\n"
- "fmla z18.s, p3/M, z4.s, z12.s\n"
- "fmla z19.s, p3/M, z3.s, z12.s\n"
- "fmla z22.s, p3/M, z1.s, z12.s\n"
- "fmla z23.s, p3/M, z0.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x12, x10, LSL #2]\n"
- "fmla z28.s, p3/M, z7.s, z11.s\n"
- "fmla z29.s, p3/M, z6.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x24, x10, LSL #2]\n"
- "fmla z20.s, p3/M, z4.s, z10.s\n"
- "fmla z21.s, p3/M, z3.s, z10.s\n"
- "fmla z24.s, p3/M, z1.s, z10.s\n"
- "fmla z25.s, p3/M, z0.s, z10.s\n"
- "fmla z16.s, p3/M, z7.s, z10.s\n"
- "fmla z17.s, p3/M, z6.s, z10.s\n"
- "ld1w { z10.s }, p2/Z, [x8, x7, LSL #2]\n"
- "fmla z30.s, p3/M, z8.s, z11.s\n"
- "fmla z31.s, p3/M, z7.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x11, x5, LSL #2]\n"
- "fmla z18.s, p3/M, z8.s, z12.s\n"
- "fmla z19.s, p3/M, z7.s, z12.s\n"
- "fmla z22.s, p3/M, z5.s, z12.s\n"
- "fmla z23.s, p3/M, z4.s, z12.s\n"
- "fmla z26.s, p3/M, z2.s, z12.s\n"
- "fmla z27.s, p3/M, z1.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x8, x14, LSL #2]\n"
- "addvl x8, x8, #1\n"
- "fmla z20.s, p3/M, z7.s, z11.s\n"
- "fmla z21.s, p3/M, z6.s, z11.s\n"
- "fmla z24.s, p3/M, z4.s, z11.s\n"
- "fmla z25.s, p3/M, z3.s, z11.s\n"
+ "ld1w { z10.s }, p2/Z, [x11]\n"
+ "movprfx z28, z13\n fmla z28.s, p3/M, z2.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x11, x27, LSL #2]\n"
+ "fmla z30.s, p3/M, z8.s, z12.s\n"
+ "fmla z29.s, p3/M, z7.s, z12.s\n"
+ "fmla z26.s, p3/M, z5.s, z12.s\n"
+ "fmla z28.s, p3/M, z6.s, z12.s\n"
+ "fmla z25.s, p3/M, z4.s, z12.s\n"
+ "movprfx z24, z13\n fmla z24.s, p3/M, z3.s, z12.s\n"
+ "fmla z22.s, p3/M, z2.s, z12.s\n"
+ "fmla z21.s, p3/M, z1.s, z12.s\n"
+ "movprfx z20, z13\n fmla z20.s, p3/M, z0.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x8, x7, LSL #2]\n"
+ "movprfx z19, z13\n fmla z19.s, p3/M, z6.s, z10.s\n"
+ "ld1w { z10.s }, p2/Z, [x13, x9, LSL #2]\n"
+ "movprfx z16, z13\n fmla z16.s, p3/M, z8.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x8, x28, LSL #2]\n"
+ "fmla z27.s, p3/M, z8.s, z9.s\n"
+ "fmla z26.s, p3/M, z7.s, z9.s\n"
+ "fmla z25.s, p3/M, z6.s, z9.s\n"
+ "fmla z23.s, p3/M, z5.s, z9.s\n"
+ "fmla z22.s, p3/M, z4.s, z9.s\n"
+ "fmla z21.s, p3/M, z3.s, z9.s\n"
+ "fmla z19.s, p3/M, z2.s, z9.s\n"
+ "movprfx z18, z13\n fmla z18.s, p3/M, z1.s, z9.s\n"
+ "movprfx z17, z13\n fmla z17.s, p3/M, z0.s, z9.s\n"
+ "ld1w { z9.s }, p2/Z, [x15]\n"
+ "fmla z31.s, p3/M, z1.s, z12.s\n"
+ "ld1w { z13.s }, p3/Z, [x4]\n"
+ "fmla z30.s, p3/M, z0.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x15, x27, LSL #2]\n"
+ "fmla z29.s, p3/M, z2.s, z11.s\n"
"fmla z28.s, p3/M, z1.s, z11.s\n"
- "fmla z29.s, p3/M, z0.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x11, x10, LSL #2]\n"
- "fmla z16.s, p3/M, z2.s, z10.s\n"
- "fmla z17.s, p3/M, z1.s, z10.s\n"
- "fmla z18.s, p3/M, z0.s, z10.s\n"
- "ld1w { z10.s }, p2/Z, [x12]\n"
- "fmla z30.s, p3/M, z2.s, z11.s\n"
- "fmla z19.s, p3/M, z0.s, z12.s\n"
+ "ld1w { z11.s }, p2/Z, [x12]\n"
+ "fmla z26.s, p3/M, z8.s, z10.s\n"
+ "fmla z25.s, p3/M, z7.s, z10.s\n"
+ "fmla z24.s, p3/M, z6.s, z10.s\n"
+ "fmla z22.s, p3/M, z5.s, z10.s\n"
+ "fmla z21.s, p3/M, z4.s, z10.s\n"
"fmla z20.s, p3/M, z3.s, z10.s\n"
- "fmla z24.s, p3/M, z0.s, z10.s\n"
- "fmla z22.s, p3/M, z8.s, z11.s\n"
- "fmla z23.s, p3/M, z7.s, z11.s\n"
- "fmla z26.s, p3/M, z5.s, z11.s\n"
- "fmla z27.s, p3/M, z4.s, z11.s\n"
- "fmla z31.s, p3/M, z1.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x28, x7, LSL #2]\n"
- "fmla z17.s, p3/M, z2.s, z12.s\n"
- "fmla z18.s, p3/M, z1.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x12, x27, LSL #2]\n"
- "addvl x12, x12, #1\n"
- "fmla z16.s, p3/M, z6.s, z10.s\n"
- "ld1w { z10.s }, p2/Z, [x11]\n"
- "fmla z29.s, p3/M, z4.s, z11.s\n"
- "fmla z30.s, p3/M, z3.s, z11.s\n"
- "fmla z19.s, p3/M, z8.s, z12.s\n"
- "fmla z23.s, p3/M, z5.s, z12.s\n"
- "fmla z27.s, p3/M, z2.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x11, x27, LSL #2]\n"
- "addvl x11, x11, #1\n"
- "fmla z20.s, p3/M, z6.s, z10.s\n"
- "fmla z24.s, p3/M, z3.s, z10.s\n"
- "fmla z28.s, p3/M, z0.s, z10.s\n"
- "ld1w { z10.s }, p2/Z, [x24, x7, LSL #2]\n"
- "fmla z31.s, p3/M, z2.s, z12.s\n"
- "fmla z29.s, p3/M, z7.s, z10.s\n"
+ "fmla z18.s, p3/M, z2.s, z10.s\n"
+ "fmla z17.s, p3/M, z1.s, z10.s\n"
+ "fmla z16.s, p3/M, z0.s, z10.s\n"
+ "ld1w { z10.s }, p2/Z, [x15, x10, LSL #2]\n"
+ "fmla z31.s, p3/M, z3.s, z9.s\n"
+ "fmla z27.s, p3/M, z0.s, z9.s\n"
+ "fmla z28.s, p3/M, z5.s, z12.s\n"
+ "fmla z24.s, p3/M, z2.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x15, x9, LSL #2]\n"
+ "fmla z23.s, p3/M, z6.s, z11.s\n"
+ "fmla z19.s, p3/M, z3.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x12, x27, LSL #2]\n"
+ "fmla z31.s, p3/M, z5.s, z10.s\n"
+ "fmla z30.s, p3/M, z4.s, z10.s\n"
+ "fmla z29.s, p3/M, z3.s, z10.s\n"
+ "fmla z27.s, p3/M, z2.s, z10.s\n"
+ "fmla z26.s, p3/M, z1.s, z10.s\n"
+ "fmla z25.s, p3/M, z0.s, z10.s\n"
+ "ld1w { z10.s }, p2/Z, [x14, x7, LSL #2]\n"
+ "fmla z20.s, p3/M, z8.s, z11.s\n"
+ "fmla z16.s, p3/M, z5.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x11, x7, LSL #2]\n"
+ "fmla z30.s, p3/M, z5.s, z12.s\n"
+ "fmla z29.s, p3/M, z4.s, z12.s\n"
+ "fmla z28.s, p3/M, z3.s, z12.s\n"
+ "fmla z26.s, p3/M, z2.s, z12.s\n"
+ "fmla z25.s, p3/M, z1.s, z12.s\n"
+ "fmla z24.s, p3/M, z0.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x14, x28, LSL #2]\n"
+ "fmla z19.s, p3/M, z7.s, z11.s\n"
+ "fmla z18.s, p3/M, z6.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x11, x28, LSL #2]\n"
+ "fmla z31.s, p3/M, z7.s, z10.s\n"
"fmla z30.s, p3/M, z6.s, z10.s\n"
- "fmla z24.s, p3/M, z8.s, z11.s\n"
- "fmla z25.s, p3/M, z7.s, z11.s\n"
+ "fmla z27.s, p3/M, z4.s, z10.s\n"
+ "fmla z26.s, p3/M, z3.s, z10.s\n"
+ "fmla z23.s, p3/M, z1.s, z10.s\n"
+ "fmla z22.s, p3/M, z0.s, z10.s\n"
+ "ld1w { z10.s }, p2/Z, [x8, x10, LSL #2]\n"
+ "fmla z17.s, p3/M, z8.s, z11.s\n"
+ "fmla z16.s, p3/M, z7.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x13, x7, LSL #2]\n"
+ "fmla z29.s, p3/M, z8.s, z12.s\n"
+ "fmla z28.s, p3/M, z7.s, z12.s\n"
+ "fmla z25.s, p3/M, z5.s, z12.s\n"
+ "fmla z24.s, p3/M, z4.s, z12.s\n"
+ "fmla z21.s, p3/M, z2.s, z12.s\n"
+ "fmla z20.s, p3/M, z1.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x8, x9, LSL #2]\n"
+ "addvl x8, x8, #1\n"
+ "fmla z31.s, p3/M, z2.s, z10.s\n"
+ "fmla z30.s, p3/M, z1.s, z10.s\n"
+ "fmla z29.s, p3/M, z0.s, z10.s\n"
+ "ld1w { z10.s }, p2/Z, [x14]\n"
+ "fmla z27.s, p3/M, z7.s, z11.s\n"
"fmla z26.s, p3/M, z6.s, z11.s\n"
- "fmla z28.s, p3/M, z5.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x28, x14, LSL #2]\n"
- "fmla z27.s, p3/M, z5.s, z12.s\n"
- "fmla z29.s, p3/M, z5.s, z11.s\n"
- "fmla z30.s, p3/M, z4.s, z11.s\n"
- "fmla z31.s, p3/M, z3.s, z11.s\n"
- "fmla z23.s, p3/M, z8.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x24, x14, LSL #2]\n"
- "fmla z28.s, p3/M, z8.s, z10.s\n"
- "ld1w { z10.s }, p2/Z, [x13, x5, LSL #2]\n"
+ "fmla z23.s, p3/M, z4.s, z11.s\n"
+ "fmla z22.s, p3/M, z3.s, z11.s\n"
+ "fmla z19.s, p3/M, z1.s, z11.s\n"
+ "fmla z18.s, p3/M, z0.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x13, x28, LSL #2]\n"
+ "fmla z30.s, p3/M, z2.s, z12.s\n"
+ "fmla z29.s, p3/M, z1.s, z12.s\n"
+ "fmla z28.s, p3/M, z0.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x14, x27, LSL #2]\n"
+ "addvl x14, x14, #1\n"
+ "fmla z31.s, p3/M, z6.s, z10.s\n"
+ "ld1w { z9.s }, p1/Z, [x14, x10, LSL #2]\n"
+ "fmla z27.s, p3/M, z3.s, z10.s\n"
+ "fmla z23.s, p3/M, z0.s, z10.s\n"
+ "ld1w { z10.s }, p2/Z, [x13]\n"
"fmla z25.s, p3/M, z8.s, z11.s\n"
- "fmla z26.s, p3/M, z7.s, z11.s\n"
- "addvl x24, x24, #1\n"
- "fmla z27.s, p3/M, z6.s, z11.s\n"
- "fmla z29.s, p3/M, z8.s, z12.s\n"
- "ld1w { z11.s }, p2/Z, [x13, x10, LSL #2]\n"
+ "fmla z24.s, p3/M, z7.s, z11.s\n"
+ "fmla z21.s, p3/M, z5.s, z11.s\n"
+ "fmla z20.s, p3/M, z4.s, z11.s\n"
+ "fmla z17.s, p3/M, z2.s, z11.s\n"
+ "fmla z16.s, p3/M, z1.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x12, x10, LSL #2]\n"
+ "fmla z28.s, p3/M, z8.s, z12.s\n"
+ "fmla z24.s, p3/M, z5.s, z12.s\n"
+ "fmla z20.s, p3/M, z2.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x13, x27, LSL #2]\n"
"addvl x13, x13, #1\n"
- "fmla z30.s, p3/M, z7.s, z12.s\n"
- "fmla z31.s, p3/M, z6.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x28, x5, LSL #2]\n"
- "fmla z16.s, p3/M, z4.s, z10.s\n"
- "fmla z17.s, p3/M, z3.s, z10.s\n"
- "fmax z16.s, p3/M, z16.s, z14.s\n"
- "fmax z17.s, p3/M, z17.s, z14.s\n"
- "fmla z20.s, p3/M, z1.s, z10.s\n"
- "fmla z21.s, p3/M, z0.s, z10.s\n"
- "ld1w { z10.s }, p2/Z, [x28, x10, LSL #2]\n"
- "fmax z20.s, p3/M, z20.s, z14.s\n"
+ "fmla z27.s, p3/M, z6.s, z10.s\n"
+ "fmla z23.s, p3/M, z3.s, z10.s\n"
+ "fmla z19.s, p3/M, z0.s, z10.s\n"
+ "ld1w { z10.s }, p2/Z, [x11, x10, LSL #2]\n"
+ "fmla z22.s, p3/M, z7.s, z11.s\n"
+ "fmla z21.s, p3/M, z6.s, z11.s\n"
+ "fmla z23.s, p3/M, z8.s, z11.s\n"
+ "fmla z19.s, p3/M, z5.s, z11.s\n"
+ "fmla z18.s, p3/M, z4.s, z11.s\n"
+ "fmla z17.s, p3/M, z3.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x12, x9, LSL #2]\n"
+ "fmla z24.s, p3/M, z8.s, z12.s\n"
+ "fmla z20.s, p3/M, z5.s, z12.s\n"
+ "fmla z16.s, p3/M, z2.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x11, x9, LSL #2]\n"
+ "addvl x11, x11, #1\n"
+ "fmla z19.s, p3/M, z8.s, z10.s\n"
+ "fmla z18.s, p3/M, z7.s, z10.s\n"
+ "fmla z17.s, p3/M, z6.s, z10.s\n"
+ "ld1w { z10.s }, p2/Z, [x15, x7, LSL #2]\n"
+ "fmla z22.s, p3/M, z8.s, z11.s\n"
+ "fmla z21.s, p3/M, z7.s, z11.s\n"
+ "fmla z20.s, p3/M, z6.s, z11.s\n"
"fmla z18.s, p3/M, z5.s, z11.s\n"
- "fmla z19.s, p3/M, z4.s, z11.s\n"
- "fmax z18.s, p3/M, z18.s, z14.s\n"
- "fmax z19.s, p3/M, z19.s, z14.s\n"
- "fmla z22.s, p3/M, z2.s, z11.s\n"
- "fmla z23.s, p3/M, z1.s, z11.s\n"
- "fmax z21.s, p3/M, z21.s, z14.s\n"
- "fmax z22.s, p3/M, z22.s, z14.s\n"
- "fmla z24.s, p3/M, z7.s, z12.s\n"
- "fmla z25.s, p3/M, z6.s, z12.s\n"
- "fmax z23.s, p3/M, z23.s, z14.s\n"
- "fmax z24.s, p3/M, z24.s, z14.s\n"
- "fmla z28.s, p3/M, z4.s, z12.s\n"
- "fmla z29.s, p3/M, z3.s, z12.s\n"
- "fmax z25.s, p3/M, z25.s, z14.s\n"
- "fmax z28.s, p3/M, z28.s, z14.s\n"
- "fmla z26.s, p3/M, z8.s, z10.s\n"
- "fmla z27.s, p3/M, z7.s, z10.s\n"
- "fmax z26.s, p3/M, z26.s, z14.s\n"
- "fmax z27.s, p3/M, z27.s, z14.s\n"
- "fmla z30.s, p3/M, z5.s, z10.s\n"
+ "fmla z17.s, p3/M, z4.s, z11.s\n"
+ "fmla z16.s, p3/M, z3.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x15, x28, LSL #2]\n"
+ "addvl x15, x15, #1\n"
+ "fmla z18.s, p3/M, z8.s, z12.s\n"
"fmla z31.s, p3/M, z4.s, z10.s\n"
- "fmax z29.s, p3/M, z29.s, z14.s\n"
- "fmax z30.s, p3/M, z30.s, z14.s\n"
- "fmax z31.s, p3/M, z31.s, z14.s\n"
- "ld1w { z15.s }, p3/Z, [x17]\n"
- "ld1w { z0.s }, p3/Z, [x17, #1, MUL VL]\n"
- "whilelt p2.s, x21, %x[n_channels]\n"
- "ld1w { z1.s }, p3/Z, [x17, #2, MUL VL]\n"
- "ld1w { z2.s }, p3/Z, [x17, #3, MUL VL]\n"
- "cmp x16, %x[n_channels]\n"
- "fmin z16.s, p3/M, z16.s, z13.s\n"
- "ld1w { z3.s }, p3/Z, [x17, #4, MUL VL]\n"
- "ld1w { z4.s }, p3/Z, [x17, #5, MUL VL]\n"
- "fmin z17.s, p3/M, z17.s, z13.s\n"
- "fmin z18.s, p3/M, z18.s, z13.s\n"
- "ld1w { z5.s }, p3/Z, [x17, #6, MUL VL]\n"
- "ld1w { z6.s }, p3/Z, [x17, #7, MUL VL]\n"
- "addvl x17, x17, #16\n"
- "fmin z19.s, p3/M, z19.s, z13.s\n"
- "fmin z20.s, p3/M, z20.s, z13.s\n"
- "fmin z21.s, p3/M, z21.s, z13.s\n"
- "ld1w { z9.s }, p1/Z, [x12, x7, LSL #2]\n"
- "ld1w { z10.s }, p1/Z, [x8]\n"
- "fmin z22.s, p3/M, z22.s, z13.s\n"
- "fmin z23.s, p3/M, z23.s, z13.s\n"
+ "fmla z17.s, p3/M, z7.s, z12.s\n"
+ "fmla z16.s, p3/M, z6.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x12, x7, LSL #2]\n"
+ "fmla z30.s, p3/M, z3.s, z10.s\n"
+ "fmla z27.s, p3/M, z1.s, z10.s\n"
+ "fmla z26.s, p3/M, z0.s, z10.s\n"
+ "ld1w { z10.s }, p2/Z, [x12, x28, LSL #2]\n"
+ "whilelt p2.s, x5, %x[n_channels]\n"
+ "fmla z29.s, p3/M, z5.s, z11.s\n"
+ "ld1w { z0.s }, p3/Z, [x4, #1, MUL VL]\n"
+ "addvl x12, x12, #1\n"
+ "fmla z28.s, p3/M, z4.s, z11.s\n"
+ "cmp x6, %x[n_channels]\n"
+ "fmla z25.s, p3/M, z2.s, z11.s\n"
+ "ld1w { z2.s }, p3/Z, [x4, #3, MUL VL]\n"
+ "fmla z24.s, p3/M, z1.s, z11.s\n"
"ld1w { z11.s }, p1/Z, [x8, x27, LSL #2]\n"
- "ld1w { z12.s }, p1/Z, [x12, x14, LSL #2]\n"
- "fmin z24.s, p3/M, z24.s, z13.s\n"
- "fmin z25.s, p3/M, z25.s, z13.s\n"
- "st1w { z16.s }, p0, [x15]\n"
- "ld1w { z7.s }, p3/Z, [x17, #-8, MUL VL]\n"
- "fmin z26.s, p3/M, z26.s, z13.s\n"
- "fmin z27.s, p3/M, z27.s, z13.s\n"
- "st1w { z17.s }, p0, [x15, x6, LSL #2]\n"
- "ld1w { z8.s }, p3/Z, [x17, #-7, MUL VL]\n"
- "fmin z28.s, p3/M, z28.s, z13.s\n"
- "fmin z29.s, p3/M, z29.s, z13.s\n"
- "st1w { z18.s }, p0, [x15, x25, LSL #2]\n"
- "fmin z30.s, p3/M, z30.s, z13.s\n"
- "fmin z31.s, p3/M, z31.s, z13.s\n"
- "st1w { z19.s }, p0, [x15, x22, LSL #2]\n"
- "addvl x28, x28, #1\n"
- "st1w { z20.s }, p0, [x9]\n"
- "addvl x15, x15, #1\n"
- "st1w { z21.s }, p0, [x9, x6, LSL #2]\n"
- "addvl x17, x17, #-6\n"
- "st1w { z22.s }, p0, [x9, x25, LSL #2]\n"
- "st1w { z23.s }, p0, [x9, x22, LSL #2]\n"
- "addvl x9, x9, #1\n"
- "st1w { z24.s }, p0, [x26]\n"
- "st1w { z25.s }, p0, [x26, x6, LSL #2]\n"
- "st1w { z26.s }, p0, [x26, x25, LSL #2]\n"
- "st1w { z27.s }, p0, [x26, x22, LSL #2]\n"
- "addvl x26, x26, #1\n"
- "st1w { z28.s }, p0, [x23]\n"
- "st1w { z29.s }, p0, [x23, x6, LSL #2]\n"
- "st1w { z30.s }, p0, [x23, x25, LSL #2]\n"
- "st1w { z31.s }, p0, [x23, x22, LSL #2]\n"
+ "fmla z23.s, p3/M, z7.s, z12.s\n"
+ "ld1w { z1.s }, p3/Z, [x4, #2, MUL VL]\n"
+ "fmla z22.s, p3/M, z6.s, z12.s\n"
+ "ld1w { z6.s }, p3/Z, [x4, #7, MUL VL]\n"
+ "fmla z19.s, p3/M, z4.s, z12.s\n"
+ "fmla z18.s, p3/M, z3.s, z12.s\n"
+ "ld1w { z12.s }, p1/Z, [x14, x9, LSL #2]\n"
+ "fmla z21.s, p3/M, z8.s, z10.s\n"
+ "ld1w { z3.s }, p3/Z, [x4, #4, MUL VL]\n"
+ "fmla z20.s, p3/M, z7.s, z10.s\n"
+ "fmla z17.s, p3/M, z5.s, z10.s\n"
+ "ld1w { z5.s }, p3/Z, [x4, #6, MUL VL]\n"
+ "fmla z16.s, p3/M, z4.s, z10.s\n"
+ "ld1w { z10.s }, p1/Z, [x8]\n"
+ "fmax z31.s, p3/M, z31.s, z15.s\n"
+ "ld1w { z4.s }, p3/Z, [x4, #5, MUL VL]\n"
+ "addvl x4, x4, #16\n"
+ "fmax z30.s, p3/M, z30.s, z15.s\n"
+ "ld1w { z7.s }, p3/Z, [x4, #-8, MUL VL]\n"
+ "fmax z29.s, p3/M, z29.s, z15.s\n"
+ "ld1w { z8.s }, p3/Z, [x4, #-7, MUL VL]\n"
+ "addvl x4, x4, #-6\n"
+ "fmin z31.s, p3/M, z31.s, z14.s\n"
+ "st1w { z31.s }, p0, [x16]\n"
+ "fmin z30.s, p3/M, z30.s, z14.s\n"
+ "fmin z29.s, p3/M, z29.s, z14.s\n"
+ "st1w { z30.s }, p0, [x16, x17, LSL #2]\n"
+ "fmax z28.s, p3/M, z28.s, z15.s\n"
+ "fmax z27.s, p3/M, z27.s, z15.s\n"
+ "st1w { z29.s }, p0, [x16, x26, LSL #2]\n"
+ "fmax z26.s, p3/M, z26.s, z15.s\n"
+ "fmax z25.s, p3/M, z25.s, z15.s\n"
+ "fmax z24.s, p3/M, z24.s, z15.s\n"
+ "fmin z28.s, p3/M, z28.s, z14.s\n"
+ "st1w { z28.s }, p0, [x16, x22, LSL #2]\n"
+ "fmin z27.s, p3/M, z27.s, z14.s\n"
+ "addvl x16, x16, #1\n"
+ "fmin z26.s, p3/M, z26.s, z14.s\n"
+ "st1w { z27.s }, p0, [x25]\n"
+ "fmin z25.s, p3/M, z25.s, z14.s\n"
+ "fmin z24.s, p3/M, z24.s, z14.s\n"
+ "st1w { z26.s }, p0, [x25, x17, LSL #2]\n"
+ "fmax z23.s, p3/M, z23.s, z15.s\n"
+ "st1w { z25.s }, p0, [x25, x26, LSL #2]\n"
+ "fmax z22.s, p3/M, z22.s, z15.s\n"
+ "fmax z21.s, p3/M, z21.s, z15.s\n"
+ "st1w { z24.s }, p0, [x25, x22, LSL #2]\n"
+ "addvl x25, x25, #1\n"
+ "fmin z23.s, p3/M, z23.s, z14.s\n"
+ "st1w { z23.s }, p0, [x24]\n"
+ "fmin z22.s, p3/M, z22.s, z14.s\n"
+ "fmin z21.s, p3/M, z21.s, z14.s\n"
+ "st1w { z22.s }, p0, [x24, x17, LSL #2]\n"
+ "fmax z20.s, p3/M, z20.s, z15.s\n"
+ "fmax z19.s, p3/M, z19.s, z15.s\n"
+ "st1w { z21.s }, p0, [x24, x26, LSL #2]\n"
+ "fmax z18.s, p3/M, z18.s, z15.s\n"
+ "fmax z17.s, p3/M, z17.s, z15.s\n"
+ "fmax z16.s, p3/M, z16.s, z15.s\n"
+ "fmin z20.s, p3/M, z20.s, z14.s\n"
+ "st1w { z20.s }, p0, [x24, x22, LSL #2]\n"
+ "fmin z19.s, p3/M, z19.s, z14.s\n"
+ "addvl x24, x24, #1\n"
+ "fmin z18.s, p3/M, z18.s, z14.s\n"
+ "st1w { z19.s }, p0, [x23]\n"
+ "fmin z17.s, p3/M, z17.s, z14.s\n"
+ "fmin z16.s, p3/M, z16.s, z14.s\n"
+ "st1w { z18.s }, p0, [x23, x17, LSL #2]\n"
+ "st1w { z17.s }, p0, [x23, x26, LSL #2]\n"
+ "st1w { z16.s }, p0, [x23, x22, LSL #2]\n"
"addvl x23, x23, #1\n"
"blt 2b\n"
"3:" // Tile loop: Channel tail
- "movprfx z21, z15\n fmla z21.s, p3/M, z4.s, z9.s\n"
- "movprfx z16, z15\n fmla z16.s, p3/M, z8.s, z9.s\n"
- "ldr x4, [%x[params_struct], %[offsetof_args_tile_j]]\n"
- "ldr x16, [%x[params_struct], %[offsetof_args_tile_i]]\n"
- "movprfx z22, z15\n fmla z22.s, p3/M, z3.s, z9.s\n"
- "movprfx z25, z15\n fmla z25.s, p3/M, z1.s, z9.s\n"
- "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
- "add x4, x4, #0x1\n"
- "movprfx z26, z15\n fmla z26.s, p3/M, z0.s, z9.s\n"
- "fmla z21.s, p3/M, z5.s, z12.s\n"
- "cmp x4, x20\n"
- "add x21, x16, #0x1\n"
- "movprfx z17, z15\n fmla z17.s, p3/M, z7.s, z9.s\n"
- "movprfx z18, z15\n fmla z18.s, p3/M, z6.s, z9.s\n"
- "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
- "csel x16, x16, x21, LT\n"
- "movprfx z20, z15\n fmla z20.s, p3/M, z5.s, z9.s\n"
- "movprfx z24, z15\n fmla z24.s, p3/M, z2.s, z9.s\n"
- "ld1w { z9.s }, p2/Z, [x11, x7, LSL #2]\n"
+ "movprfx z31, z13\n fmla z31.s, p3/M, z8.s, z9.s\n"
+ "ldr x2, [%x[params_struct], %[offsetof_args_tile_i]]\n"
"mov p0.b, p2.b\n"
- "fmla z16.s, p3/M, z0.s, z10.s\n"
- "movprfx z19, z15\n fmla z19.s, p3/M, z2.s, z11.s\n"
- "ld1w { z10.s }, p2/Z, [x24]\n"
- "ld1w { z11.s }, p2/Z, [x24, x27, LSL #2]\n"
- "fmla z22.s, p3/M, z4.s, z12.s\n"
- "fmla z25.s, p3/M, z2.s, z12.s\n"
- "csel x4, x4, XZR, LT\n"
- "cmp x16, x20\n"
- "fmla z26.s, p3/M, z1.s, z12.s\n"
- "movprfx z28, z15\n fmla z28.s, p3/M, z6.s, z10.s\n"
- "ld1w { z10.s }, p2/Z, [x11, x14, LSL #2]\n"
- "fmla z21.s, p3/M, z7.s, z9.s\n"
- "fmla z17.s, p3/M, z8.s, z12.s\n"
- "fmla z18.s, p3/M, z7.s, z12.s\n"
- "fmla z19.s, p3/M, z6.s, z12.s\n"
- "movprfx z23, z15\n fmla z23.s, p3/M, z3.s, z12.s\n"
- "movprfx z27, z15\n fmla z27.s, p3/M, z0.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x8, x5, LSL #2]\n"
- "movprfx z31, z15\n fmla z31.s, p3/M, z8.s, z11.s\n"
- "fmla z22.s, p3/M, z6.s, z9.s\n"
- "ld1w { z11.s }, p2/Z, [x8, x10, LSL #2]\n"
- "fmla z25.s, p3/M, z4.s, z9.s\n"
- "fmla z26.s, p3/M, z3.s, z9.s\n"
- "fmla z20.s, p3/M, z8.s, z9.s\n"
- "fmla z24.s, p3/M, z5.s, z9.s\n"
- "fmla z28.s, p3/M, z2.s, z9.s\n"
- "fmla z21.s, p3/M, z8.s, z10.s\n"
- "fmla z16.s, p3/M, z1.s, z12.s\n"
- "fmla z17.s, p3/M, z0.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x13, x27, LSL #2]\n"
- "fmla z18.s, p3/M, z2.s, z11.s\n"
- "fmla z19.s, p3/M, z1.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x28]\n"
- "fmla z22.s, p3/M, z7.s, z10.s\n"
- "fmla z23.s, p3/M, z6.s, z10.s\n"
- "fmla z25.s, p3/M, z5.s, z10.s\n"
- "fmla z26.s, p3/M, z4.s, z10.s\n"
- "fmla z27.s, p3/M, z3.s, z10.s\n"
+ "movprfx z30, z13\n fmla z30.s, p3/M, z7.s, z9.s\n"
+ "ldr x3, [%x[params_struct], %[offsetof_args_tile_j]]\n"
+ "add x21, x2, #0x1\n"
+ "movprfx z29, z13\n fmla z29.s, p3/M, z6.s, z9.s\n"
+ "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
+ "movprfx z27, z13\n fmla z27.s, p3/M, z5.s, z9.s\n"
+ "ldr x19, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
+ "add x3, x3, #0x1\n"
+ "movprfx z26, z13\n fmla z26.s, p3/M, z4.s, z9.s\n"
+ "cmp x3, x19\n"
+ "movprfx z25, z13\n fmla z25.s, p3/M, z3.s, z9.s\n"
+ "movprfx z23, z13\n fmla z23.s, p3/M, z2.s, z9.s\n"
+ "csel x3, x3, XZR, LT\n"
+ "movprfx z22, z13\n fmla z22.s, p3/M, z1.s, z9.s\n"
+ "csel x2, x2, x21, LT\n"
+ "movprfx z21, z13\n fmla z21.s, p3/M, z0.s, z9.s\n"
+ "ld1w { z9.s }, p2/Z, [x13, x10, LSL #2]\n"
+ "cmp x2, x20\n"
"fmla z31.s, p3/M, z0.s, z10.s\n"
- "fmla z24.s, p3/M, z6.s, z11.s\n"
- "fmla z28.s, p3/M, z3.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x28, x27, LSL #2]\n"
- "fmla z19.s, p3/M, z5.s, z12.s\n"
- "fmla z23.s, p3/M, z2.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x13, x14, LSL #2]\n"
- "fmla z27.s, p3/M, z8.s, z11.s\n"
- "fmla z31.s, p3/M, z5.s, z11.s\n"
- "movprfx z29, z15\n fmla z29.s, p3/M, z1.s, z9.s\n"
- "movprfx z30, z15\n fmla z30.s, p3/M, z0.s, z9.s\n"
- "ld1w { z9.s }, p2/Z, [x13]\n"
- "fmla z29.s, p3/M, z2.s, z10.s\n"
- "fmla z30.s, p3/M, z1.s, z10.s\n"
- "ld1w { z10.s }, p2/Z, [x13, x7, LSL #2]\n"
- "fmla z20.s, p3/M, z0.s, z9.s\n"
- "fmla z21.s, p3/M, z1.s, z10.s\n"
- "fmla z16.s, p3/M, z3.s, z9.s\n"
- "fmla z17.s, p3/M, z4.s, z10.s\n"
- "ld1w { z11.s }, p2/Z, [x24, x5, LSL #2]\n"
- "fmla z18.s, p3/M, z3.s, z10.s\n"
- "fmla z22.s, p3/M, z0.s, z10.s\n"
- "fmla z20.s, p3/M, z2.s, z10.s\n"
- "fmla z21.s, p3/M, z2.s, z12.s\n"
- "fmla z16.s, p3/M, z5.s, z10.s\n"
- "fmla z17.s, p3/M, z5.s, z12.s\n"
- "ld1w { z10.s }, p2/Z, [x12, x5, LSL #2]\n"
- "fmla z18.s, p3/M, z4.s, z12.s\n"
- "fmla z19.s, p3/M, z3.s, z12.s\n"
- "fmla z22.s, p3/M, z1.s, z12.s\n"
- "fmla z23.s, p3/M, z0.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x12, x10, LSL #2]\n"
- "fmla z28.s, p3/M, z7.s, z11.s\n"
- "fmla z29.s, p3/M, z6.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x24, x10, LSL #2]\n"
- "fmla z20.s, p3/M, z4.s, z10.s\n"
- "fmla z21.s, p3/M, z3.s, z10.s\n"
- "fmla z24.s, p3/M, z1.s, z10.s\n"
- "fmla z25.s, p3/M, z0.s, z10.s\n"
- "fmla z16.s, p3/M, z7.s, z10.s\n"
- "fmla z17.s, p3/M, z6.s, z10.s\n"
- "ld1w { z10.s }, p2/Z, [x8, x7, LSL #2]\n"
- "fmla z30.s, p3/M, z8.s, z11.s\n"
- "fmla z31.s, p3/M, z7.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x11, x5, LSL #2]\n"
- "fmla z18.s, p3/M, z8.s, z12.s\n"
- "fmla z19.s, p3/M, z7.s, z12.s\n"
- "fmla z22.s, p3/M, z5.s, z12.s\n"
- "fmla z23.s, p3/M, z4.s, z12.s\n"
- "fmla z26.s, p3/M, z2.s, z12.s\n"
- "fmla z27.s, p3/M, z1.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x8, x14, LSL #2]\n"
- "fmla z20.s, p3/M, z7.s, z11.s\n"
- "fmla z21.s, p3/M, z6.s, z11.s\n"
- "fmla z24.s, p3/M, z4.s, z11.s\n"
- "fmla z25.s, p3/M, z3.s, z11.s\n"
+ "ld1w { z10.s }, p2/Z, [x11]\n"
+ "movprfx z28, z13\n fmla z28.s, p3/M, z2.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x11, x27, LSL #2]\n"
+ "fmla z30.s, p3/M, z8.s, z12.s\n"
+ "fmla z29.s, p3/M, z7.s, z12.s\n"
+ "fmla z26.s, p3/M, z5.s, z12.s\n"
+ "fmla z28.s, p3/M, z6.s, z12.s\n"
+ "fmla z25.s, p3/M, z4.s, z12.s\n"
+ "movprfx z24, z13\n fmla z24.s, p3/M, z3.s, z12.s\n"
+ "fmla z22.s, p3/M, z2.s, z12.s\n"
+ "fmla z21.s, p3/M, z1.s, z12.s\n"
+ "movprfx z20, z13\n fmla z20.s, p3/M, z0.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x8, x7, LSL #2]\n"
+ "movprfx z19, z13\n fmla z19.s, p3/M, z6.s, z10.s\n"
+ "ld1w { z10.s }, p2/Z, [x13, x9, LSL #2]\n"
+ "movprfx z16, z13\n fmla z16.s, p3/M, z8.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x8, x28, LSL #2]\n"
+ "fmla z27.s, p3/M, z8.s, z9.s\n"
+ "fmla z26.s, p3/M, z7.s, z9.s\n"
+ "fmla z25.s, p3/M, z6.s, z9.s\n"
+ "fmla z23.s, p3/M, z5.s, z9.s\n"
+ "fmla z22.s, p3/M, z4.s, z9.s\n"
+ "fmla z21.s, p3/M, z3.s, z9.s\n"
+ "fmla z19.s, p3/M, z2.s, z9.s\n"
+ "movprfx z18, z13\n fmla z18.s, p3/M, z1.s, z9.s\n"
+ "movprfx z17, z13\n fmla z17.s, p3/M, z0.s, z9.s\n"
+ "ld1w { z9.s }, p2/Z, [x15]\n"
+ "fmla z31.s, p3/M, z1.s, z12.s\n"
+ "fmla z30.s, p3/M, z0.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x15, x27, LSL #2]\n"
+ "fmla z29.s, p3/M, z2.s, z11.s\n"
"fmla z28.s, p3/M, z1.s, z11.s\n"
- "fmla z29.s, p3/M, z0.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x11, x10, LSL #2]\n"
- "fmla z16.s, p3/M, z2.s, z10.s\n"
- "fmla z17.s, p3/M, z1.s, z10.s\n"
- "fmla z18.s, p3/M, z0.s, z10.s\n"
- "ld1w { z10.s }, p2/Z, [x12]\n"
- "fmla z30.s, p3/M, z2.s, z11.s\n"
- "fmla z19.s, p3/M, z0.s, z12.s\n"
+ "ld1w { z11.s }, p2/Z, [x12]\n"
+ "fmla z26.s, p3/M, z8.s, z10.s\n"
+ "fmla z25.s, p3/M, z7.s, z10.s\n"
+ "fmla z24.s, p3/M, z6.s, z10.s\n"
+ "fmla z22.s, p3/M, z5.s, z10.s\n"
+ "fmla z21.s, p3/M, z4.s, z10.s\n"
"fmla z20.s, p3/M, z3.s, z10.s\n"
- "fmla z24.s, p3/M, z0.s, z10.s\n"
- "fmla z22.s, p3/M, z8.s, z11.s\n"
- "fmla z23.s, p3/M, z7.s, z11.s\n"
- "fmla z26.s, p3/M, z5.s, z11.s\n"
- "fmla z27.s, p3/M, z4.s, z11.s\n"
- "fmla z31.s, p3/M, z1.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x28, x7, LSL #2]\n"
- "fmla z17.s, p3/M, z2.s, z12.s\n"
- "fmla z18.s, p3/M, z1.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x12, x27, LSL #2]\n"
- "fmla z16.s, p3/M, z6.s, z10.s\n"
- "ld1w { z10.s }, p2/Z, [x11]\n"
- "fmla z29.s, p3/M, z4.s, z11.s\n"
- "fmla z30.s, p3/M, z3.s, z11.s\n"
- "fmla z19.s, p3/M, z8.s, z12.s\n"
- "fmla z23.s, p3/M, z5.s, z12.s\n"
- "fmla z27.s, p3/M, z2.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x11, x27, LSL #2]\n"
- "fmla z20.s, p3/M, z6.s, z10.s\n"
- "fmla z24.s, p3/M, z3.s, z10.s\n"
- "fmla z28.s, p3/M, z0.s, z10.s\n"
- "ld1w { z10.s }, p2/Z, [x24, x7, LSL #2]\n"
- "fmla z31.s, p3/M, z2.s, z12.s\n"
- "fmla z29.s, p3/M, z7.s, z10.s\n"
+ "fmla z18.s, p3/M, z2.s, z10.s\n"
+ "fmla z17.s, p3/M, z1.s, z10.s\n"
+ "fmla z16.s, p3/M, z0.s, z10.s\n"
+ "ld1w { z10.s }, p2/Z, [x15, x10, LSL #2]\n"
+ "fmla z31.s, p3/M, z3.s, z9.s\n"
+ "fmla z27.s, p3/M, z0.s, z9.s\n"
+ "fmla z28.s, p3/M, z5.s, z12.s\n"
+ "fmla z24.s, p3/M, z2.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x15, x9, LSL #2]\n"
+ "fmla z23.s, p3/M, z6.s, z11.s\n"
+ "fmla z19.s, p3/M, z3.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x12, x27, LSL #2]\n"
+ "fmla z31.s, p3/M, z5.s, z10.s\n"
+ "fmla z30.s, p3/M, z4.s, z10.s\n"
+ "fmla z29.s, p3/M, z3.s, z10.s\n"
+ "fmla z27.s, p3/M, z2.s, z10.s\n"
+ "fmla z26.s, p3/M, z1.s, z10.s\n"
+ "fmla z25.s, p3/M, z0.s, z10.s\n"
+ "ld1w { z10.s }, p2/Z, [x14, x7, LSL #2]\n"
+ "fmla z20.s, p3/M, z8.s, z11.s\n"
+ "fmla z16.s, p3/M, z5.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x11, x7, LSL #2]\n"
+ "fmla z30.s, p3/M, z5.s, z12.s\n"
+ "fmla z29.s, p3/M, z4.s, z12.s\n"
+ "fmla z28.s, p3/M, z3.s, z12.s\n"
+ "fmla z26.s, p3/M, z2.s, z12.s\n"
+ "fmla z25.s, p3/M, z1.s, z12.s\n"
+ "fmla z24.s, p3/M, z0.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x14, x28, LSL #2]\n"
+ "fmla z19.s, p3/M, z7.s, z11.s\n"
+ "fmla z18.s, p3/M, z6.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x11, x28, LSL #2]\n"
+ "fmla z31.s, p3/M, z7.s, z10.s\n"
"fmla z30.s, p3/M, z6.s, z10.s\n"
- "fmla z24.s, p3/M, z8.s, z11.s\n"
- "fmla z25.s, p3/M, z7.s, z11.s\n"
+ "fmla z27.s, p3/M, z4.s, z10.s\n"
+ "fmla z26.s, p3/M, z3.s, z10.s\n"
+ "fmla z23.s, p3/M, z1.s, z10.s\n"
+ "fmla z22.s, p3/M, z0.s, z10.s\n"
+ "ld1w { z10.s }, p2/Z, [x8, x10, LSL #2]\n"
+ "fmla z17.s, p3/M, z8.s, z11.s\n"
+ "fmla z16.s, p3/M, z7.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x13, x7, LSL #2]\n"
+ "fmla z29.s, p3/M, z8.s, z12.s\n"
+ "fmla z28.s, p3/M, z7.s, z12.s\n"
+ "fmla z25.s, p3/M, z5.s, z12.s\n"
+ "fmla z24.s, p3/M, z4.s, z12.s\n"
+ "fmla z21.s, p3/M, z2.s, z12.s\n"
+ "fmla z20.s, p3/M, z1.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x8, x9, LSL #2]\n"
+ "fmla z31.s, p3/M, z2.s, z10.s\n"
+ "fmla z30.s, p3/M, z1.s, z10.s\n"
+ "fmla z29.s, p3/M, z0.s, z10.s\n"
+ "ld1w { z10.s }, p2/Z, [x14]\n"
+ "fmla z27.s, p3/M, z7.s, z11.s\n"
"fmla z26.s, p3/M, z6.s, z11.s\n"
- "fmla z28.s, p3/M, z5.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x28, x14, LSL #2]\n"
- "fmla z27.s, p3/M, z5.s, z12.s\n"
- "fmla z29.s, p3/M, z5.s, z11.s\n"
- "fmla z30.s, p3/M, z4.s, z11.s\n"
- "fmla z31.s, p3/M, z3.s, z11.s\n"
- "fmla z23.s, p3/M, z8.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x24, x14, LSL #2]\n"
- "fmla z28.s, p3/M, z8.s, z10.s\n"
- "ld1w { z10.s }, p2/Z, [x13, x5, LSL #2]\n"
+ "fmla z23.s, p3/M, z4.s, z11.s\n"
+ "fmla z22.s, p3/M, z3.s, z11.s\n"
+ "fmla z19.s, p3/M, z1.s, z11.s\n"
+ "fmla z18.s, p3/M, z0.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x13, x28, LSL #2]\n"
+ "fmla z30.s, p3/M, z2.s, z12.s\n"
+ "fmla z29.s, p3/M, z1.s, z12.s\n"
+ "fmla z28.s, p3/M, z0.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x14, x27, LSL #2]\n"
+ "fmla z31.s, p3/M, z6.s, z10.s\n"
+ "fmla z27.s, p3/M, z3.s, z10.s\n"
+ "fmla z23.s, p3/M, z0.s, z10.s\n"
+ "ld1w { z10.s }, p2/Z, [x13]\n"
"fmla z25.s, p3/M, z8.s, z11.s\n"
- "fmla z26.s, p3/M, z7.s, z11.s\n"
- "fmla z27.s, p3/M, z6.s, z11.s\n"
- "fmla z29.s, p3/M, z8.s, z12.s\n"
- "ld1w { z11.s }, p2/Z, [x13, x10, LSL #2]\n"
- "fmla z30.s, p3/M, z7.s, z12.s\n"
- "fmla z31.s, p3/M, z6.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x28, x5, LSL #2]\n"
- "fmla z16.s, p3/M, z4.s, z10.s\n"
- "fmla z17.s, p3/M, z3.s, z10.s\n"
- "fmax z16.s, p3/M, z16.s, z14.s\n"
- "fmax z17.s, p3/M, z17.s, z14.s\n"
- "fmla z20.s, p3/M, z1.s, z10.s\n"
- "fmla z21.s, p3/M, z0.s, z10.s\n"
- "ld1w { z10.s }, p2/Z, [x28, x10, LSL #2]\n"
- "fmax z20.s, p3/M, z20.s, z14.s\n"
+ "fmla z24.s, p3/M, z7.s, z11.s\n"
+ "fmla z21.s, p3/M, z5.s, z11.s\n"
+ "fmla z20.s, p3/M, z4.s, z11.s\n"
+ "fmla z17.s, p3/M, z2.s, z11.s\n"
+ "fmla z16.s, p3/M, z1.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x12, x10, LSL #2]\n"
+ "fmla z28.s, p3/M, z8.s, z12.s\n"
+ "fmla z24.s, p3/M, z5.s, z12.s\n"
+ "fmla z20.s, p3/M, z2.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x13, x27, LSL #2]\n"
+ "fmla z27.s, p3/M, z6.s, z10.s\n"
+ "fmla z23.s, p3/M, z3.s, z10.s\n"
+ "fmla z19.s, p3/M, z0.s, z10.s\n"
+ "ld1w { z10.s }, p2/Z, [x11, x10, LSL #2]\n"
+ "fmla z22.s, p3/M, z7.s, z11.s\n"
+ "fmla z21.s, p3/M, z6.s, z11.s\n"
+ "fmla z23.s, p3/M, z8.s, z11.s\n"
+ "fmla z19.s, p3/M, z5.s, z11.s\n"
+ "fmla z18.s, p3/M, z4.s, z11.s\n"
+ "fmla z17.s, p3/M, z3.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x12, x9, LSL #2]\n"
+ "fmla z24.s, p3/M, z8.s, z12.s\n"
+ "fmla z20.s, p3/M, z5.s, z12.s\n"
+ "fmla z16.s, p3/M, z2.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x11, x9, LSL #2]\n"
+ "fmla z19.s, p3/M, z8.s, z10.s\n"
+ "fmla z18.s, p3/M, z7.s, z10.s\n"
+ "fmla z17.s, p3/M, z6.s, z10.s\n"
+ "ld1w { z10.s }, p2/Z, [x15, x7, LSL #2]\n"
+ "fmla z22.s, p3/M, z8.s, z11.s\n"
+ "fmla z21.s, p3/M, z7.s, z11.s\n"
+ "fmla z20.s, p3/M, z6.s, z11.s\n"
"fmla z18.s, p3/M, z5.s, z11.s\n"
- "fmla z19.s, p3/M, z4.s, z11.s\n"
- "fmax z18.s, p3/M, z18.s, z14.s\n"
- "fmax z19.s, p3/M, z19.s, z14.s\n"
- "fmla z22.s, p3/M, z2.s, z11.s\n"
- "fmla z23.s, p3/M, z1.s, z11.s\n"
- "fmax z21.s, p3/M, z21.s, z14.s\n"
- "fmax z22.s, p3/M, z22.s, z14.s\n"
- "fmla z24.s, p3/M, z7.s, z12.s\n"
- "fmla z25.s, p3/M, z6.s, z12.s\n"
- "fmax z23.s, p3/M, z23.s, z14.s\n"
- "fmax z24.s, p3/M, z24.s, z14.s\n"
- "fmla z28.s, p3/M, z4.s, z12.s\n"
- "fmla z29.s, p3/M, z3.s, z12.s\n"
- "fmax z25.s, p3/M, z25.s, z14.s\n"
- "fmax z28.s, p3/M, z28.s, z14.s\n"
- "fmla z26.s, p3/M, z8.s, z10.s\n"
- "fmla z27.s, p3/M, z7.s, z10.s\n"
- "fmax z26.s, p3/M, z26.s, z14.s\n"
- "fmax z27.s, p3/M, z27.s, z14.s\n"
- "fmla z30.s, p3/M, z5.s, z10.s\n"
+ "fmla z17.s, p3/M, z4.s, z11.s\n"
+ "fmla z16.s, p3/M, z3.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x15, x28, LSL #2]\n"
"fmla z31.s, p3/M, z4.s, z10.s\n"
- "fmax z29.s, p3/M, z29.s, z14.s\n"
- "fmax z30.s, p3/M, z30.s, z14.s\n"
- "fmax z31.s, p3/M, z31.s, z14.s\n"
- "fmin z16.s, p3/M, z16.s, z13.s\n"
- "st1w { z16.s }, p0, [x15]\n"
- "fmin z17.s, p3/M, z17.s, z13.s\n"
- "fmin z18.s, p3/M, z18.s, z13.s\n"
- "st1w { z17.s }, p0, [x15, x6, LSL #2]\n"
- "fmin z19.s, p3/M, z19.s, z13.s\n"
- "fmin z20.s, p3/M, z20.s, z13.s\n"
- "st1w { z18.s }, p0, [x15, x25, LSL #2]\n"
- "fmin z21.s, p3/M, z21.s, z13.s\n"
- "fmin z22.s, p3/M, z22.s, z13.s\n"
- "st1w { z19.s }, p0, [x15, x22, LSL #2]\n"
- "fmin z23.s, p3/M, z23.s, z13.s\n"
- "fmin z24.s, p3/M, z24.s, z13.s\n"
- "st1w { z20.s }, p0, [x9]\n"
- "fmin z25.s, p3/M, z25.s, z13.s\n"
- "fmin z26.s, p3/M, z26.s, z13.s\n"
- "st1w { z21.s }, p0, [x9, x6, LSL #2]\n"
- "fmin z27.s, p3/M, z27.s, z13.s\n"
- "fmin z28.s, p3/M, z28.s, z13.s\n"
- "st1w { z22.s }, p0, [x9, x25, LSL #2]\n"
- "fmin z29.s, p3/M, z29.s, z13.s\n"
- "fmin z30.s, p3/M, z30.s, z13.s\n"
- "st1w { z23.s }, p0, [x9, x22, LSL #2]\n"
- "fmin z31.s, p3/M, z31.s, z13.s\n"
- "st1w { z24.s }, p0, [x26]\n"
- "st1w { z25.s }, p0, [x26, x6, LSL #2]\n"
- "st1w { z26.s }, p0, [x26, x25, LSL #2]\n"
- "st1w { z27.s }, p0, [x26, x22, LSL #2]\n"
- "st1w { z28.s }, p0, [x23]\n"
- "st1w { z29.s }, p0, [x23, x6, LSL #2]\n"
- "st1w { z30.s }, p0, [x23, x25, LSL #2]\n"
- "st1w { z31.s }, p0, [x23, x22, LSL #2]\n"
+ "fmla z18.s, p3/M, z8.s, z12.s\n"
+ "fmla z17.s, p3/M, z7.s, z12.s\n"
+ "fmla z16.s, p3/M, z6.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x12, x7, LSL #2]\n"
+ "fmla z30.s, p3/M, z3.s, z10.s\n"
+ "fmla z27.s, p3/M, z1.s, z10.s\n"
+ "fmla z26.s, p3/M, z0.s, z10.s\n"
+ "ld1w { z10.s }, p2/Z, [x12, x28, LSL #2]\n"
+ "fmla z29.s, p3/M, z5.s, z11.s\n"
+ "fmla z28.s, p3/M, z4.s, z11.s\n"
+ "fmla z25.s, p3/M, z2.s, z11.s\n"
+ "fmla z24.s, p3/M, z1.s, z11.s\n"
+ "fmla z23.s, p3/M, z7.s, z12.s\n"
+ "fmla z22.s, p3/M, z6.s, z12.s\n"
+ "fmla z19.s, p3/M, z4.s, z12.s\n"
+ "fmla z18.s, p3/M, z3.s, z12.s\n"
+ "fmla z21.s, p3/M, z8.s, z10.s\n"
+ "fmla z20.s, p3/M, z7.s, z10.s\n"
+ "fmla z17.s, p3/M, z5.s, z10.s\n"
+ "fmla z16.s, p3/M, z4.s, z10.s\n"
+ "fmax z31.s, p3/M, z31.s, z15.s\n"
+ "fmax z30.s, p3/M, z30.s, z15.s\n"
+ "fmax z29.s, p3/M, z29.s, z15.s\n"
+ "fmax z28.s, p3/M, z28.s, z15.s\n"
+ "fmin z31.s, p3/M, z31.s, z14.s\n"
+ "st1w { z31.s }, p0, [x16]\n"
+ "fmin z30.s, p3/M, z30.s, z14.s\n"
+ "fmin z29.s, p3/M, z29.s, z14.s\n"
+ "st1w { z30.s }, p0, [x16, x17, LSL #2]\n"
+ "fmin z28.s, p3/M, z28.s, z14.s\n"
+ "fmax z27.s, p3/M, z27.s, z15.s\n"
+ "st1w { z29.s }, p0, [x16, x26, LSL #2]\n"
+ "fmax z26.s, p3/M, z26.s, z15.s\n"
+ "st1w { z28.s }, p0, [x16, x22, LSL #2]\n"
+ "fmin z27.s, p3/M, z27.s, z14.s\n"
+ "fmax z25.s, p3/M, z25.s, z15.s\n"
+ "st1w { z27.s }, p0, [x25]\n"
+ "fmin z26.s, p3/M, z26.s, z14.s\n"
+ "fmin z25.s, p3/M, z25.s, z14.s\n"
+ "st1w { z26.s }, p0, [x25, x17, LSL #2]\n"
+ "fmax z24.s, p3/M, z24.s, z15.s\n"
+ "fmax z23.s, p3/M, z23.s, z15.s\n"
+ "st1w { z25.s }, p0, [x25, x26, LSL #2]\n"
+ "fmax z22.s, p3/M, z22.s, z15.s\n"
+ "fmax z21.s, p3/M, z21.s, z15.s\n"
+ "fmax z20.s, p3/M, z20.s, z15.s\n"
+ "fmin z24.s, p3/M, z24.s, z14.s\n"
+ "st1w { z24.s }, p0, [x25, x22, LSL #2]\n"
+ "fmin z23.s, p3/M, z23.s, z14.s\n"
+ "fmin z22.s, p3/M, z22.s, z14.s\n"
+ "st1w { z23.s }, p0, [x24]\n"
+ "fmin z21.s, p3/M, z21.s, z14.s\n"
+ "fmin z20.s, p3/M, z20.s, z14.s\n"
+ "st1w { z22.s }, p0, [x24, x17, LSL #2]\n"
+ "fmax z19.s, p3/M, z19.s, z15.s\n"
+ "st1w { z21.s }, p0, [x24, x26, LSL #2]\n"
+ "fmax z18.s, p3/M, z18.s, z15.s\n"
+ "fmax z17.s, p3/M, z17.s, z15.s\n"
+ "st1w { z20.s }, p0, [x24, x22, LSL #2]\n"
+ "fmin z19.s, p3/M, z19.s, z14.s\n"
+ "st1w { z19.s }, p0, [x23]\n"
+ "fmin z18.s, p3/M, z18.s, z14.s\n"
+ "fmin z17.s, p3/M, z17.s, z14.s\n"
+ "st1w { z18.s }, p0, [x23, x17, LSL #2]\n"
+ "fmax z16.s, p3/M, z16.s, z15.s\n"
+ "st1w { z17.s }, p0, [x23, x26, LSL #2]\n"
+ "fmin z16.s, p3/M, z16.s, z14.s\n"
+ "st1w { z16.s }, p0, [x23, x22, LSL #2]\n"
"blt 1b\n"
:
: [n_channels] "r" ((unsigned long) n_channels), [offsetof_args_inptr] "I" (offsetof(Args, inptr)), [offsetof_args_ld_input_col] "I" (offsetof(Args, ld_input_col)), [offsetof_args_ld_input_row] "I" (offsetof(Args, ld_input_row)), [offsetof_args_ld_output_col] "I" (offsetof(Args, ld_output_col)), [offsetof_args_ld_output_row] "I" (offsetof(Args, ld_output_row)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_n_tile_cols] "I" (offsetof(Args, n_tile_cols)), [offsetof_args_n_tile_rows] "I" (offsetof(Args, n_tile_rows)), [offsetof_args_outptr] "I" (offsetof(Args, outptr)), [offsetof_args_params] "I" (offsetof(Args, params)), [offsetof_args_tile_i] "I" (offsetof(Args, tile_i)), [offsetof_args_tile_j] "I" (offsetof(Args, tile_j)), [params_struct] "r" (&params_struct)
- : "cc", "memory", "p0", "p1", "p2", "p3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_indirect.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_indirect.cpp
index 0b04ae064d..158d44046c 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_indirect.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_indirect.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -98,613 +98,613 @@ void sve_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_indirect_impl(
activation_min, activation_max);
__asm__ __volatile__(
+ "ldr x16, [%x[params_struct], %[offsetof_args_outptrs]]\n"
"ptrue p3.b\n"
- "ldr x17, [%x[params_struct], %[offsetof_args_params]]\n"
- "add x16, %x[params_struct], %[offsetof_Args_inptrs]\n"
- "ld1w { z15.s }, p3/Z, [x17]\n"
- "cntw x15\n"
- "mov x14, #0x0\n"
- "ld1w { z0.s }, p3/Z, [x17, #1, MUL VL]\n"
- "ld1w { z1.s }, p3/Z, [x17, #2, MUL VL]\n"
+ "ldr x15, [%x[params_struct], %[offsetof_args_params]]\n"
+ "add x14, %x[params_struct], %[offsetof_Args_inptrs]\n"
+ "ld1rw { z15.s }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
+ "mov x13, #0x0\n"
+ "ld1rw { z14.s }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
+ "cntw x12\n"
+ "ld1w { z13.s }, p3/Z, [x15]\n"
+ "sub x11, XZR, x12\n"
+ "ld1w { z0.s }, p3/Z, [x15, #1, MUL VL]\n"
"whilelt p2.s, XZR, %x[n_channels]\n"
- "ld1w { z2.s }, p3/Z, [x17, #3, MUL VL]\n"
- "ld1w { z3.s }, p3/Z, [x17, #4, MUL VL]\n"
- "cmp x15, %x[n_channels]\n"
- "ld1w { z4.s }, p3/Z, [x17, #5, MUL VL]\n"
- "ld1w { z5.s }, p3/Z, [x17, #6, MUL VL]\n"
- "sub x13, XZR, x15\n"
- "ld1w { z6.s }, p3/Z, [x17, #7, MUL VL]\n"
- "addvl x17, x17, #16\n"
- "ldp x12, x11, [x16, #0x0]\n"
- "ldp x10, x9, [x16, #0x10]\n"
- "ldr x28, [%x[params_struct], %[offsetof_args_outptrs]]\n"
- "ld1rw { z14.s }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
- "ld1rw { z13.s }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
- "ld1w { z7.s }, p3/Z, [x17, #-8, MUL VL]\n"
- "ld1w { z8.s }, p3/Z, [x17, #-7, MUL VL]\n"
- "addvl x17, x17, #-6\n"
- "ld1w { z9.s }, p2/Z, [x12, x14, LSL #2]\n"
- "ld1w { z10.s }, p2/Z, [x11, x14, LSL #2]\n"
- "ld1w { z11.s }, p2/Z, [x10, x14, LSL #2]\n"
- "ld1w { z12.s }, p2/Z, [x9, x14, LSL #2]\n"
+ "ld1w { z1.s }, p3/Z, [x15, #2, MUL VL]\n"
+ "cmp x12, %x[n_channels]\n"
+ "ld1w { z2.s }, p3/Z, [x15, #3, MUL VL]\n"
+ "ld1w { z3.s }, p3/Z, [x15, #4, MUL VL]\n"
+ "ld1w { z4.s }, p3/Z, [x15, #5, MUL VL]\n"
+ "ld1w { z5.s }, p3/Z, [x15, #6, MUL VL]\n"
+ "ld1w { z6.s }, p3/Z, [x15, #7, MUL VL]\n"
+ "addvl x15, x15, #16\n"
+ "ldp x10, x9, [x14, #0x0]\n"
+ "ld1w { z7.s }, p3/Z, [x15, #-8, MUL VL]\n"
+ "ld1w { z8.s }, p3/Z, [x15, #-7, MUL VL]\n"
+ "addvl x15, x15, #-6\n"
+ "ld1w { z9.s }, p2/Z, [x10, x13, LSL #2]\n"
+ "ld1w { z10.s }, p2/Z, [x9, x13, LSL #2]\n"
+ "ldp x28, x27, [x14, #0x10]\n"
+ "ld1w { z11.s }, p2/Z, [x28, x13, LSL #2]\n"
+ "ld1w { z12.s }, p2/Z, [x27, x13, LSL #2]\n"
"bge 2f\n"
"1:" // Channel loop
- "movprfx z21, z15\n fmla z21.s, p3/M, z4.s, z9.s\n"
- "movprfx z16, z15\n fmla z16.s, p3/M, z8.s, z9.s\n"
- "ldr x27, [x16, #0x20]\n"
- "ldr x26, [x16, #0x30]\n"
- "movprfx z22, z15\n fmla z22.s, p3/M, z3.s, z9.s\n"
- "movprfx z25, z15\n fmla z25.s, p3/M, z1.s, z9.s\n"
- "ldr x25, [x16, #0x28]\n"
- "ldr x24, [x16, #0x38]\n"
- "movprfx z26, z15\n fmla z26.s, p3/M, z0.s, z9.s\n"
- "movprfx z17, z15\n fmla z17.s, p3/M, z7.s, z9.s\n"
- "ldr x12, [x16, #0x40]\n"
- "ldr x11, [x16, #0x48]\n"
- "movprfx z18, z15\n fmla z18.s, p3/M, z6.s, z9.s\n"
- "fmla z21.s, p3/M, z5.s, z12.s\n"
- "ldr x10, [x16, #0x50]\n"
- "ldr x9, [x16, #0x58]\n"
- "movprfx z20, z15\n fmla z20.s, p3/M, z5.s, z9.s\n"
- "movprfx z24, z15\n fmla z24.s, p3/M, z2.s, z9.s\n"
- "ld1w { z9.s }, p2/Z, [x26, x14, LSL #2]\n"
- "ldr x26, [x16, #0x70]\n"
- "fmla z16.s, p3/M, z0.s, z10.s\n"
- "movprfx z19, z15\n fmla z19.s, p3/M, z2.s, z11.s\n"
- "ld1w { z10.s }, p2/Z, [x27, x14, LSL #2]\n"
- "ld1w { z11.s }, p2/Z, [x25, x14, LSL #2]\n"
- "fmla z22.s, p3/M, z4.s, z12.s\n"
- "fmla z25.s, p3/M, z2.s, z12.s\n"
- "ldr x27, [x16, #0x60]\n"
- "ldr x25, [x16, #0x68]\n"
- "fmla z26.s, p3/M, z1.s, z12.s\n"
- "fmla z17.s, p3/M, z8.s, z12.s\n"
- "incw x13\n"
- "mov p1.b, p2.b\n"
- "fmla z18.s, p3/M, z7.s, z12.s\n"
- "movprfx z28, z15\n fmla z28.s, p3/M, z6.s, z10.s\n"
- "ld1w { z10.s }, p2/Z, [x11, x14, LSL #2]\n"
- "ldr x11, [x16, #0x88]\n"
- "fmla z21.s, p3/M, z7.s, z9.s\n"
- "fmla z19.s, p3/M, z6.s, z12.s\n"
- "ldr x23, [x28, #0x0]\n"
- "ldr x22, [x28, #0x8]\n"
- "movprfx z23, z15\n fmla z23.s, p3/M, z3.s, z12.s\n"
- "movprfx z27, z15\n fmla z27.s, p3/M, z0.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x24, x14, LSL #2]\n"
- "ldr x24, [x16, #0x78]\n"
- "movprfx z31, z15\n fmla z31.s, p3/M, z8.s, z11.s\n"
- "fmla z22.s, p3/M, z6.s, z9.s\n"
- "ld1w { z11.s }, p2/Z, [x12, x14, LSL #2]\n"
- "ldr x12, [x16, #0x80]\n"
- "fmla z25.s, p3/M, z4.s, z9.s\n"
- "fmla z26.s, p3/M, z3.s, z9.s\n"
- "ldr x21, [x28, #0x10]\n"
- "ldr x20, [x28, #0x18]\n"
- "fmla z20.s, p3/M, z8.s, z9.s\n"
- "fmla z24.s, p3/M, z5.s, z9.s\n"
- "whilelt p0.s, x15, %x[n_channels]\n"
- "fmla z28.s, p3/M, z2.s, z9.s\n"
- "fmla z16.s, p3/M, z1.s, z12.s\n"
- "fmla z17.s, p3/M, z0.s, z12.s\n"
- "movprfx z29, z15\n fmla z29.s, p3/M, z1.s, z9.s\n"
- "movprfx z30, z15\n fmla z30.s, p3/M, z0.s, z9.s\n"
- "fmla z18.s, p3/M, z2.s, z11.s\n"
- "ld1w { z9.s }, p2/Z, [x10, x14, LSL #2]\n"
- "ldr x10, [x16, #0x90]\n"
- "fmla z21.s, p3/M, z8.s, z10.s\n"
- "fmla z19.s, p3/M, z1.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x27, x14, LSL #2]\n"
- "ldr x27, [x16, #0xa0]\n"
- "fmla z22.s, p3/M, z7.s, z10.s\n"
- "fmla z23.s, p3/M, z6.s, z10.s\n"
- "fmla z25.s, p3/M, z5.s, z10.s\n"
- "fmla z26.s, p3/M, z4.s, z10.s\n"
- "fmla z27.s, p3/M, z3.s, z10.s\n"
- "fmla z29.s, p3/M, z2.s, z10.s\n"
- "fmla z30.s, p3/M, z1.s, z10.s\n"
+ "movprfx z31, z13\n fmla z31.s, p3/M, z8.s, z9.s\n"
+ "ldr x26, [x14, #0x20]\n"
+ "whilelt p1.s, x12, %x[n_channels]\n"
+ "movprfx z30, z13\n fmla z30.s, p3/M, z7.s, z9.s\n"
+ "ldr x25, [x14, #0x28]\n"
+ "incw x11\n"
+ "movprfx z29, z13\n fmla z29.s, p3/M, z6.s, z9.s\n"
+ "ldr x24, [x14, #0x30]\n"
+ "mov p0.b, p2.b\n"
+ "movprfx z27, z13\n fmla z27.s, p3/M, z5.s, z9.s\n"
+ "ldr x23, [x14, #0x38]\n"
+ "movprfx z26, z13\n fmla z26.s, p3/M, z4.s, z9.s\n"
+ "ldr x10, [x14, #0x40]\n"
+ "movprfx z25, z13\n fmla z25.s, p3/M, z3.s, z9.s\n"
+ "ldr x9, [x14, #0x48]\n"
+ "movprfx z23, z13\n fmla z23.s, p3/M, z2.s, z9.s\n"
+ "ldr x28, [x14, #0x50]\n"
+ "movprfx z22, z13\n fmla z22.s, p3/M, z1.s, z9.s\n"
+ "ldr x27, [x14, #0x58]\n"
+ "movprfx z21, z13\n fmla z21.s, p3/M, z0.s, z9.s\n"
+ "ld1w { z9.s }, p2/Z, [x24, x13, LSL #2]\n"
"fmla z31.s, p3/M, z0.s, z10.s\n"
- "ld1w { z10.s }, p2/Z, [x25, x14, LSL #2]\n"
- "ldr x25, [x16, #0xa8]\n"
- "fmla z16.s, p3/M, z3.s, z9.s\n"
- "fmla z20.s, p3/M, z0.s, z9.s\n"
- "ld1w { z12.s }, p2/Z, [x9, x14, LSL #2]\n"
- "ldr x9, [x16, #0x98]\n"
- "fmla z24.s, p3/M, z6.s, z11.s\n"
- "fmla z28.s, p3/M, z3.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x26, x14, LSL #2]\n"
- "ldr x26, [x16, #0xb0]\n"
- "fmla z17.s, p3/M, z4.s, z10.s\n"
- "fmla z18.s, p3/M, z3.s, z10.s\n"
- "fmla z21.s, p3/M, z1.s, z10.s\n"
- "fmla z19.s, p3/M, z5.s, z12.s\n"
- "fmla z23.s, p3/M, z2.s, z12.s\n"
- "fmla z22.s, p3/M, z0.s, z10.s\n"
- "ld1w { z12.s }, p2/Z, [x24, x14, LSL #2]\n"
- "ldr x24, [x16, #0xb8]\n"
- "fmla z27.s, p3/M, z8.s, z11.s\n"
- "fmla z31.s, p3/M, z5.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x12, x14, LSL #2]\n"
- "ldr x12, [x16, #0xc0]\n"
- "fmla z16.s, p3/M, z5.s, z10.s\n"
- "fmla z20.s, p3/M, z2.s, z10.s\n"
- "ld1w { z10.s }, p2/Z, [x11, x14, LSL #2]\n"
- "ldr x11, [x16, #0xc8]\n"
- "fmla z17.s, p3/M, z5.s, z12.s\n"
- "fmla z18.s, p3/M, z4.s, z12.s\n"
- "fmla z21.s, p3/M, z2.s, z12.s\n"
- "fmla z19.s, p3/M, z3.s, z12.s\n"
- "fmla z22.s, p3/M, z1.s, z12.s\n"
- "fmla z23.s, p3/M, z0.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x9, x14, LSL #2]\n"
- "ldr x9, [x16, #0xd8]\n"
- "fmla z28.s, p3/M, z7.s, z11.s\n"
- "fmla z29.s, p3/M, z6.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x10, x14, LSL #2]\n"
- "ldr x10, [x16, #0xd0]\n"
- "fmla z16.s, p3/M, z7.s, z10.s\n"
- "fmla z17.s, p3/M, z6.s, z10.s\n"
- "fmla z20.s, p3/M, z4.s, z10.s\n"
- "fmla z21.s, p3/M, z3.s, z10.s\n"
- "fmla z24.s, p3/M, z1.s, z10.s\n"
- "fmla z25.s, p3/M, z0.s, z10.s\n"
- "ld1w { z10.s }, p2/Z, [x27, x14, LSL #2]\n"
- "ldr x27, [x16, #0xe0]\n"
- "fmla z18.s, p3/M, z8.s, z12.s\n"
- "fmla z30.s, p3/M, z8.s, z11.s\n"
- "fmla z31.s, p3/M, z7.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x25, x14, LSL #2]\n"
- "fmla z27.s, p3/M, z1.s, z12.s\n"
- "ldr x25, [x16, #0xe8]\n"
- "fmla z19.s, p3/M, z7.s, z12.s\n"
- "fmla z22.s, p3/M, z5.s, z12.s\n"
- "fmla z23.s, p3/M, z4.s, z12.s\n"
- "fmla z26.s, p3/M, z2.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x26, x14, LSL #2]\n"
- "ldr x26, [x16, #0xf0]\n"
- "fmla z16.s, p3/M, z2.s, z10.s\n"
- "fmla z17.s, p3/M, z1.s, z10.s\n"
- "fmla z18.s, p3/M, z0.s, z10.s\n"
- "fmla z20.s, p3/M, z7.s, z11.s\n"
- "ld1w { z10.s }, p2/Z, [x24, x14, LSL #2]\n"
- "ldr x24, [x16, #0xf8]\n"
- "fmla z21.s, p3/M, z6.s, z11.s\n"
- "fmla z24.s, p3/M, z4.s, z11.s\n"
- "fmla z25.s, p3/M, z3.s, z11.s\n"
+ "ld1w { z10.s }, p2/Z, [x26, x13, LSL #2]\n"
+ "movprfx z28, z13\n fmla z28.s, p3/M, z2.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x25, x13, LSL #2]\n"
+ "fmla z30.s, p3/M, z8.s, z12.s\n"
+ "ldr x26, [x14, #0x60]\n"
+ "fmla z29.s, p3/M, z7.s, z12.s\n"
+ "ldr x25, [x14, #0x68]\n"
+ "fmla z26.s, p3/M, z5.s, z12.s\n"
+ "ldr x24, [x14, #0x70]\n"
+ "fmla z28.s, p3/M, z6.s, z12.s\n"
+ "ldr x22, [x16, #0x0]\n"
+ "fmla z25.s, p3/M, z4.s, z12.s\n"
+ "ldr x21, [x16, #0x8]\n"
+ "movprfx z24, z13\n fmla z24.s, p3/M, z3.s, z12.s\n"
+ "ldr x20, [x16, #0x10]\n"
+ "fmla z22.s, p3/M, z2.s, z12.s\n"
+ "ldr x19, [x16, #0x18]\n"
+ "fmla z21.s, p3/M, z1.s, z12.s\n"
+ "movprfx z20, z13\n fmla z20.s, p3/M, z0.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x23, x13, LSL #2]\n"
+ "movprfx z19, z13\n fmla z19.s, p3/M, z6.s, z10.s\n"
+ "ld1w { z10.s }, p2/Z, [x9, x13, LSL #2]\n"
+ "movprfx z16, z13\n fmla z16.s, p3/M, z8.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x10, x13, LSL #2]\n"
+ "fmla z27.s, p3/M, z8.s, z9.s\n"
+ "ldr x23, [x14, #0x78]\n"
+ "fmla z26.s, p3/M, z7.s, z9.s\n"
+ "ldr x10, [x14, #0x80]\n"
+ "fmla z25.s, p3/M, z6.s, z9.s\n"
+ "ldr x9, [x14, #0x88]\n"
+ "fmla z23.s, p3/M, z5.s, z9.s\n"
+ "fmla z22.s, p3/M, z4.s, z9.s\n"
+ "fmla z21.s, p3/M, z3.s, z9.s\n"
+ "fmla z19.s, p3/M, z2.s, z9.s\n"
+ "movprfx z18, z13\n fmla z18.s, p3/M, z1.s, z9.s\n"
+ "movprfx z17, z13\n fmla z17.s, p3/M, z0.s, z9.s\n"
+ "ld1w { z9.s }, p2/Z, [x28, x13, LSL #2]\n"
+ "fmla z31.s, p3/M, z1.s, z12.s\n"
+ "ldr x28, [x14, #0x90]\n"
+ "fmla z30.s, p3/M, z0.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x27, x13, LSL #2]\n"
+ "fmla z29.s, p3/M, z2.s, z11.s\n"
+ "ldr x27, [x14, #0x98]\n"
"fmla z28.s, p3/M, z1.s, z11.s\n"
- "fmla z29.s, p3/M, z0.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x12, x14, LSL #2]\n"
- "fmla z27.s, p3/M, z4.s, z11.s\n"
- "ldr x12, [x16, #0x100]\n"
- "fmla z30.s, p3/M, z2.s, z11.s\n"
- "fmla z17.s, p3/M, z2.s, z12.s\n"
- "fmla z18.s, p3/M, z1.s, z12.s\n"
- "fmla z19.s, p3/M, z0.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x11, x14, LSL #2]\n"
- "ldr x11, [x16, #0x108]\n"
- "fmla z16.s, p3/M, z6.s, z10.s\n"
+ "ld1w { z11.s }, p2/Z, [x26, x13, LSL #2]\n"
+ "fmla z26.s, p3/M, z8.s, z10.s\n"
+ "ldr x26, [x14, #0xa0]\n"
+ "fmla z25.s, p3/M, z7.s, z10.s\n"
+ "ld1w { z13.s }, p3/Z, [x15]\n"
+ "fmla z24.s, p3/M, z6.s, z10.s\n"
+ "fmla z22.s, p3/M, z5.s, z10.s\n"
+ "fmla z21.s, p3/M, z4.s, z10.s\n"
"fmla z20.s, p3/M, z3.s, z10.s\n"
- "fmla z24.s, p3/M, z0.s, z10.s\n"
- "fmla z22.s, p3/M, z8.s, z11.s\n"
- "ld1w { z10.s }, p2/Z, [x10, x14, LSL #2]\n"
- "ldr x10, [x16, #0x110]\n"
- "fmla z23.s, p3/M, z7.s, z11.s\n"
- "fmla z26.s, p3/M, z5.s, z11.s\n"
- "fmla z31.s, p3/M, z1.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x9, x14, LSL #2]\n"
- "fmla z27.s, p3/M, z2.s, z12.s\n"
- "ldr x9, [x16, #0x118]\n"
- "fmla z28.s, p3/M, z0.s, z10.s\n"
- "fmla z29.s, p3/M, z4.s, z11.s\n"
- "fmla z30.s, p3/M, z3.s, z11.s\n"
- "fmla z19.s, p3/M, z8.s, z12.s\n"
- "fmla z23.s, p3/M, z5.s, z12.s\n"
- "fmla z20.s, p3/M, z6.s, z10.s\n"
- "ld1w { z12.s }, p2/Z, [x27, x14, LSL #2]\n"
- "fmla z24.s, p3/M, z3.s, z10.s\n"
- "ld1w { z10.s }, p2/Z, [x25, x14, LSL #2]\n"
- "fmla z25.s, p3/M, z7.s, z11.s\n"
- "fmla z26.s, p3/M, z6.s, z11.s\n"
- "fmla z28.s, p3/M, z5.s, z11.s\n"
- "fmla z27.s, p3/M, z5.s, z12.s\n"
- "fmla z31.s, p3/M, z2.s, z12.s\n"
- "fmla z29.s, p3/M, z7.s, z10.s\n"
+ "fmla z18.s, p3/M, z2.s, z10.s\n"
+ "fmla z17.s, p3/M, z1.s, z10.s\n"
+ "fmla z16.s, p3/M, z0.s, z10.s\n"
+ "ld1w { z10.s }, p2/Z, [x25, x13, LSL #2]\n"
+ "fmla z31.s, p3/M, z3.s, z9.s\n"
+ "ldr x25, [x14, #0xa8]\n"
+ "fmla z27.s, p3/M, z0.s, z9.s\n"
+ "fmla z28.s, p3/M, z5.s, z12.s\n"
+ "fmla z24.s, p3/M, z2.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x23, x13, LSL #2]\n"
+ "fmla z23.s, p3/M, z6.s, z11.s\n"
+ "ldr x23, [x14, #0xb8]\n"
+ "fmla z19.s, p3/M, z3.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x24, x13, LSL #2]\n"
+ "fmla z31.s, p3/M, z5.s, z10.s\n"
+ "ldr x24, [x14, #0xb0]\n"
+ "fmla z30.s, p3/M, z4.s, z10.s\n"
+ "fmla z29.s, p3/M, z3.s, z10.s\n"
+ "fmla z27.s, p3/M, z2.s, z10.s\n"
+ "fmla z26.s, p3/M, z1.s, z10.s\n"
+ "fmla z25.s, p3/M, z0.s, z10.s\n"
+ "ld1w { z10.s }, p2/Z, [x9, x13, LSL #2]\n"
+ "fmla z20.s, p3/M, z8.s, z11.s\n"
+ "ldr x9, [x14, #0xc8]\n"
+ "fmla z16.s, p3/M, z5.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x10, x13, LSL #2]\n"
+ "fmla z30.s, p3/M, z5.s, z12.s\n"
+ "ldr x10, [x14, #0xc0]\n"
+ "fmla z29.s, p3/M, z4.s, z12.s\n"
+ "fmla z28.s, p3/M, z3.s, z12.s\n"
+ "fmla z26.s, p3/M, z2.s, z12.s\n"
+ "fmla z25.s, p3/M, z1.s, z12.s\n"
+ "fmla z24.s, p3/M, z0.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x27, x13, LSL #2]\n"
+ "fmla z19.s, p3/M, z7.s, z11.s\n"
+ "ldr x27, [x14, #0xd8]\n"
+ "fmla z18.s, p3/M, z6.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x28, x13, LSL #2]\n"
+ "fmla z31.s, p3/M, z7.s, z10.s\n"
+ "ldr x28, [x14, #0xd0]\n"
"fmla z30.s, p3/M, z6.s, z10.s\n"
- "fmla z24.s, p3/M, z8.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x26, x14, LSL #2]\n"
- "fmla z28.s, p3/M, z8.s, z10.s\n"
- "ld1w { z10.s }, p2/Z, [x12, x14, LSL #2]\n"
+ "fmla z27.s, p3/M, z4.s, z10.s\n"
+ "fmla z26.s, p3/M, z3.s, z10.s\n"
+ "fmla z23.s, p3/M, z1.s, z10.s\n"
+ "fmla z22.s, p3/M, z0.s, z10.s\n"
+ "ld1w { z10.s }, p2/Z, [x26, x13, LSL #2]\n"
+ "fmla z17.s, p3/M, z8.s, z11.s\n"
+ "ldr x26, [x14, #0xe0]\n"
+ "fmla z16.s, p3/M, z7.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x25, x13, LSL #2]\n"
+ "fmla z29.s, p3/M, z8.s, z12.s\n"
+ "ldr x25, [x14, #0xe8]\n"
+ "fmla z28.s, p3/M, z7.s, z12.s\n"
+ "fmla z25.s, p3/M, z5.s, z12.s\n"
+ "fmla z24.s, p3/M, z4.s, z12.s\n"
+ "fmla z21.s, p3/M, z2.s, z12.s\n"
+ "fmla z20.s, p3/M, z1.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x24, x13, LSL #2]\n"
+ "fmla z31.s, p3/M, z2.s, z10.s\n"
+ "ldr x24, [x14, #0xf0]\n"
+ "fmla z30.s, p3/M, z1.s, z10.s\n"
+ "fmla z29.s, p3/M, z0.s, z10.s\n"
+ "ld1w { z10.s }, p2/Z, [x23, x13, LSL #2]\n"
+ "fmla z27.s, p3/M, z7.s, z11.s\n"
+ "ldr x23, [x14, #0xf8]\n"
+ "fmla z26.s, p3/M, z6.s, z11.s\n"
+ "fmla z23.s, p3/M, z4.s, z11.s\n"
+ "fmla z22.s, p3/M, z3.s, z11.s\n"
+ "fmla z19.s, p3/M, z1.s, z11.s\n"
+ "fmla z18.s, p3/M, z0.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x10, x13, LSL #2]\n"
+ "fmla z30.s, p3/M, z2.s, z12.s\n"
+ "ldr x10, [x14, #0x100]\n"
+ "fmla z29.s, p3/M, z1.s, z12.s\n"
+ "fmla z28.s, p3/M, z0.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x9, x13, LSL #2]\n"
+ "fmla z31.s, p3/M, z6.s, z10.s\n"
+ "ldr x9, [x14, #0x108]\n"
+ "fmla z27.s, p3/M, z3.s, z10.s\n"
+ "fmla z23.s, p3/M, z0.s, z10.s\n"
+ "ld1w { z10.s }, p2/Z, [x28, x13, LSL #2]\n"
"fmla z25.s, p3/M, z8.s, z11.s\n"
- "fmla z26.s, p3/M, z7.s, z11.s\n"
- "fmla z27.s, p3/M, z6.s, z11.s\n"
- "fmla z29.s, p3/M, z5.s, z11.s\n"
- "fmla z30.s, p3/M, z4.s, z11.s\n"
- "fmla z31.s, p3/M, z3.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x11, x14, LSL #2]\n"
- "ldp x12, x11, [x16, #0x0]\n"
- "fmla z23.s, p3/M, z8.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x24, x14, LSL #2]\n"
- "fmla z16.s, p3/M, z4.s, z10.s\n"
- "fmax z16.s, p3/M, z16.s, z14.s\n"
- "fmla z17.s, p3/M, z3.s, z10.s\n"
+ "ldr x28, [x14, #0x110]\n"
+ "fmla z24.s, p3/M, z7.s, z11.s\n"
+ "fmla z21.s, p3/M, z5.s, z11.s\n"
+ "fmla z20.s, p3/M, z4.s, z11.s\n"
+ "fmla z17.s, p3/M, z2.s, z11.s\n"
+ "fmla z16.s, p3/M, z1.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x27, x13, LSL #2]\n"
+ "fmla z28.s, p3/M, z8.s, z12.s\n"
+ "ldr x27, [x14, #0x118]\n"
+ "fmla z24.s, p3/M, z5.s, z12.s\n"
+ "fmla z20.s, p3/M, z2.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x26, x13, LSL #2]\n"
+ "fmla z27.s, p3/M, z6.s, z10.s\n"
+ "fmla z23.s, p3/M, z3.s, z10.s\n"
+ "fmla z19.s, p3/M, z0.s, z10.s\n"
+ "ld1w { z10.s }, p2/Z, [x25, x13, LSL #2]\n"
+ "fmla z22.s, p3/M, z7.s, z11.s\n"
+ "fmla z21.s, p3/M, z6.s, z11.s\n"
+ "fmla z23.s, p3/M, z8.s, z11.s\n"
+ "fmla z19.s, p3/M, z5.s, z11.s\n"
+ "fmla z18.s, p3/M, z4.s, z11.s\n"
+ "fmla z17.s, p3/M, z3.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x24, x13, LSL #2]\n"
+ "fmla z24.s, p3/M, z8.s, z12.s\n"
+ "fmla z20.s, p3/M, z5.s, z12.s\n"
+ "fmla z16.s, p3/M, z2.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x23, x13, LSL #2]\n"
+ "fmla z19.s, p3/M, z8.s, z10.s\n"
+ "fmla z18.s, p3/M, z7.s, z10.s\n"
+ "fmla z17.s, p3/M, z6.s, z10.s\n"
+ "ld1w { z10.s }, p2/Z, [x10, x13, LSL #2]\n"
+ "fmla z22.s, p3/M, z8.s, z11.s\n"
+ "fmla z21.s, p3/M, z7.s, z11.s\n"
+ "fmla z20.s, p3/M, z6.s, z11.s\n"
"fmla z18.s, p3/M, z5.s, z11.s\n"
- "fmax z17.s, p3/M, z17.s, z14.s\n"
- "fmax z18.s, p3/M, z18.s, z14.s\n"
- "fmla z19.s, p3/M, z4.s, z11.s\n"
- "fmla z29.s, p3/M, z8.s, z12.s\n"
- "fmax z19.s, p3/M, z19.s, z14.s\n"
- "fmin z16.s, p3/M, z16.s, z13.s\n"
- "fmla z30.s, p3/M, z7.s, z12.s\n"
- "fmla z31.s, p3/M, z6.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x10, x14, LSL #2]\n"
- "fmin z17.s, p3/M, z17.s, z13.s\n"
- "fmla z20.s, p3/M, z1.s, z10.s\n"
- "fmla z21.s, p3/M, z0.s, z10.s\n"
- "ld1w { z10.s }, p2/Z, [x9, x14, LSL #2]\n"
- "fmin z18.s, p3/M, z18.s, z13.s\n"
- "fmla z22.s, p3/M, z2.s, z11.s\n"
- "fmla z23.s, p3/M, z1.s, z11.s\n"
- "fmin z19.s, p3/M, z19.s, z13.s\n"
- "fmax z20.s, p3/M, z20.s, z14.s\n"
- "fmla z24.s, p3/M, z7.s, z12.s\n"
- "fmla z25.s, p3/M, z6.s, z12.s\n"
- "fmax z21.s, p3/M, z21.s, z14.s\n"
- "fmax z22.s, p3/M, z22.s, z14.s\n"
- "fmla z26.s, p3/M, z8.s, z10.s\n"
- "fmla z27.s, p3/M, z7.s, z10.s\n"
- "fmax z23.s, p3/M, z23.s, z14.s\n"
- "st1w { z16.s }, p1, [x23, x13, LSL #2]\n"
- "st1w { z17.s }, p1, [x22, x13, LSL #2]\n"
- "ldr x23, [x28, #0x20]\n"
- "ldr x22, [x28, #0x28]\n"
- "fmla z28.s, p3/M, z4.s, z12.s\n"
- "st1w { z18.s }, p1, [x21, x13, LSL #2]\n"
- "ldr x21, [x28, #0x30]\n"
- "fmla z29.s, p3/M, z3.s, z12.s\n"
- "fmla z30.s, p3/M, z5.s, z10.s\n"
- "st1w { z19.s }, p1, [x20, x13, LSL #2]\n"
- "ldr x20, [x28, #0x38]\n"
+ "fmla z17.s, p3/M, z4.s, z11.s\n"
+ "fmla z16.s, p3/M, z3.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x9, x13, LSL #2]\n"
"fmla z31.s, p3/M, z4.s, z10.s\n"
- "ldp x10, x9, [x16, #0x10]\n"
- "fmin z20.s, p3/M, z20.s, z13.s\n"
- "fmin z21.s, p3/M, z21.s, z13.s\n"
- "st1w { z20.s }, p1, [x23, x13, LSL #2]\n"
- "ldr x23, [x28, #0x40]\n"
- "fmin z22.s, p3/M, z22.s, z13.s\n"
- "fmin z23.s, p3/M, z23.s, z13.s\n"
- "st1w { z21.s }, p1, [x22, x13, LSL #2]\n"
- "ldr x22, [x28, #0x48]\n"
- "fmax z24.s, p3/M, z24.s, z14.s\n"
- "fmax z25.s, p3/M, z25.s, z14.s\n"
- "st1w { z22.s }, p1, [x21, x13, LSL #2]\n"
- "ldr x21, [x28, #0x50]\n"
- "fmax z26.s, p3/M, z26.s, z14.s\n"
- "fmax z27.s, p3/M, z27.s, z14.s\n"
- "st1w { z23.s }, p1, [x20, x13, LSL #2]\n"
- "ldr x20, [x28, #0x58]\n"
- "incw x14\n"
- "ld1w { z9.s }, p0/Z, [x12, x15, LSL #2]\n"
- "ld1w { z10.s }, p0/Z, [x11, x15, LSL #2]\n"
- "fmin z24.s, p3/M, z24.s, z13.s\n"
- "ld1w { z11.s }, p0/Z, [x10, x15, LSL #2]\n"
- "ld1w { z12.s }, p0/Z, [x9, x15, LSL #2]\n"
- "incw x15\n"
- "fmin z25.s, p3/M, z25.s, z13.s\n"
- "fmin z26.s, p3/M, z26.s, z13.s\n"
- "fmin z27.s, p3/M, z27.s, z13.s\n"
- "st1w { z24.s }, p1, [x23, x13, LSL #2]\n"
- "ldr x23, [x28, #0x60]\n"
- "fmax z28.s, p3/M, z28.s, z14.s\n"
- "fmax z29.s, p3/M, z29.s, z14.s\n"
- "st1w { z25.s }, p1, [x22, x13, LSL #2]\n"
- "ldr x22, [x28, #0x68]\n"
- "fmax z30.s, p3/M, z30.s, z14.s\n"
- "fmax z31.s, p3/M, z31.s, z14.s\n"
- "st1w { z26.s }, p1, [x21, x13, LSL #2]\n"
- "ldr x21, [x28, #0x70]\n"
- "st1w { z27.s }, p1, [x20, x13, LSL #2]\n"
- "ldr x20, [x28, #0x78]\n"
- "ld1w { z15.s }, p3/Z, [x17]\n"
- "whilelt p2.s, x14, %x[n_channels]\n"
- "ld1w { z0.s }, p3/Z, [x17, #1, MUL VL]\n"
- "ld1w { z1.s }, p3/Z, [x17, #2, MUL VL]\n"
- "cmp x15, %x[n_channels]\n"
- "fmin z28.s, p3/M, z28.s, z13.s\n"
- "ld1w { z2.s }, p3/Z, [x17, #3, MUL VL]\n"
- "ld1w { z3.s }, p3/Z, [x17, #4, MUL VL]\n"
- "fmin z29.s, p3/M, z29.s, z13.s\n"
- "fmin z30.s, p3/M, z30.s, z13.s\n"
- "ld1w { z4.s }, p3/Z, [x17, #5, MUL VL]\n"
- "ld1w { z5.s }, p3/Z, [x17, #6, MUL VL]\n"
- "fmin z31.s, p3/M, z31.s, z13.s\n"
- "st1w { z28.s }, p1, [x23, x13, LSL #2]\n"
- "ld1w { z6.s }, p3/Z, [x17, #7, MUL VL]\n"
- "addvl x17, x17, #16\n"
- "st1w { z29.s }, p1, [x22, x13, LSL #2]\n"
- "ld1w { z7.s }, p3/Z, [x17, #-8, MUL VL]\n"
- "st1w { z30.s }, p1, [x21, x13, LSL #2]\n"
- "ld1w { z8.s }, p3/Z, [x17, #-7, MUL VL]\n"
- "addvl x17, x17, #-6\n"
- "st1w { z31.s }, p1, [x20, x13, LSL #2]\n"
- "blt 1b\n"
- "2:" // Channel tail
- "movprfx z21, z15\n fmla z21.s, p3/M, z4.s, z9.s\n"
- "movprfx z16, z15\n fmla z16.s, p3/M, z8.s, z9.s\n"
- "ldr x27, [x16, #0x20]\n"
- "ldr x26, [x16, #0x30]\n"
- "movprfx z22, z15\n fmla z22.s, p3/M, z3.s, z9.s\n"
- "movprfx z25, z15\n fmla z25.s, p3/M, z1.s, z9.s\n"
- "ldr x25, [x16, #0x28]\n"
- "ldr x24, [x16, #0x38]\n"
- "movprfx z26, z15\n fmla z26.s, p3/M, z0.s, z9.s\n"
- "movprfx z17, z15\n fmla z17.s, p3/M, z7.s, z9.s\n"
- "ldr x12, [x16, #0x40]\n"
- "ldr x11, [x16, #0x48]\n"
- "movprfx z18, z15\n fmla z18.s, p3/M, z6.s, z9.s\n"
- "fmla z21.s, p3/M, z5.s, z12.s\n"
- "ldr x10, [x16, #0x50]\n"
- "ldr x9, [x16, #0x58]\n"
- "movprfx z20, z15\n fmla z20.s, p3/M, z5.s, z9.s\n"
- "movprfx z24, z15\n fmla z24.s, p3/M, z2.s, z9.s\n"
- "ld1w { z9.s }, p2/Z, [x26, x14, LSL #2]\n"
- "ldr x26, [x16, #0x70]\n"
- "fmla z16.s, p3/M, z0.s, z10.s\n"
- "movprfx z19, z15\n fmla z19.s, p3/M, z2.s, z11.s\n"
- "ld1w { z10.s }, p2/Z, [x27, x14, LSL #2]\n"
- "ld1w { z11.s }, p2/Z, [x25, x14, LSL #2]\n"
- "fmla z22.s, p3/M, z4.s, z12.s\n"
- "fmla z25.s, p3/M, z2.s, z12.s\n"
- "ldr x27, [x16, #0x60]\n"
- "ldr x25, [x16, #0x68]\n"
- "fmla z26.s, p3/M, z1.s, z12.s\n"
- "fmla z17.s, p3/M, z8.s, z12.s\n"
+ "ldp x10, x9, [x14, #0x0]\n"
+ "fmla z18.s, p3/M, z8.s, z12.s\n"
+ "ld1w { z9.s }, p1/Z, [x10, x12, LSL #2]\n"
+ "fmla z17.s, p3/M, z7.s, z12.s\n"
+ "fmla z16.s, p3/M, z6.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x28, x13, LSL #2]\n"
+ "fmla z30.s, p3/M, z3.s, z10.s\n"
+ "fmla z27.s, p3/M, z1.s, z10.s\n"
+ "fmla z26.s, p3/M, z0.s, z10.s\n"
+ "ld1w { z10.s }, p2/Z, [x27, x13, LSL #2]\n"
"incw x13\n"
- "mov p1.b, p2.b\n"
- "fmla z18.s, p3/M, z7.s, z12.s\n"
- "movprfx z28, z15\n fmla z28.s, p3/M, z6.s, z10.s\n"
- "ld1w { z10.s }, p2/Z, [x11, x14, LSL #2]\n"
- "ldr x11, [x16, #0x88]\n"
- "fmla z21.s, p3/M, z7.s, z9.s\n"
- "fmla z19.s, p3/M, z6.s, z12.s\n"
- "ldr x23, [x28, #0x0]\n"
- "ldr x22, [x28, #0x8]\n"
- "movprfx z23, z15\n fmla z23.s, p3/M, z3.s, z12.s\n"
- "movprfx z27, z15\n fmla z27.s, p3/M, z0.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x24, x14, LSL #2]\n"
- "ldr x24, [x16, #0x78]\n"
- "movprfx z31, z15\n fmla z31.s, p3/M, z8.s, z11.s\n"
- "fmla z22.s, p3/M, z6.s, z9.s\n"
- "ld1w { z11.s }, p2/Z, [x12, x14, LSL #2]\n"
- "ldr x12, [x16, #0x80]\n"
- "fmla z25.s, p3/M, z4.s, z9.s\n"
- "fmla z26.s, p3/M, z3.s, z9.s\n"
- "ldr x21, [x28, #0x10]\n"
- "ldr x20, [x28, #0x18]\n"
- "fmla z20.s, p3/M, z8.s, z9.s\n"
- "fmla z24.s, p3/M, z5.s, z9.s\n"
- "fmla z28.s, p3/M, z2.s, z9.s\n"
- "fmla z16.s, p3/M, z1.s, z12.s\n"
- "fmla z17.s, p3/M, z0.s, z12.s\n"
- "movprfx z29, z15\n fmla z29.s, p3/M, z1.s, z9.s\n"
- "movprfx z30, z15\n fmla z30.s, p3/M, z0.s, z9.s\n"
- "fmla z18.s, p3/M, z2.s, z11.s\n"
- "ld1w { z9.s }, p2/Z, [x10, x14, LSL #2]\n"
- "ldr x10, [x16, #0x90]\n"
+ "fmla z29.s, p3/M, z5.s, z11.s\n"
+ "ldp x28, x27, [x14, #0x10]\n"
+ "whilelt p2.s, x13, %x[n_channels]\n"
+ "fmla z28.s, p3/M, z4.s, z11.s\n"
+ "ld1w { z0.s }, p3/Z, [x15, #1, MUL VL]\n"
+ "fmla z25.s, p3/M, z2.s, z11.s\n"
+ "ld1w { z2.s }, p3/Z, [x15, #3, MUL VL]\n"
+ "fmla z24.s, p3/M, z1.s, z11.s\n"
+ "ld1w { z11.s }, p1/Z, [x28, x12, LSL #2]\n"
+ "fmla z23.s, p3/M, z7.s, z12.s\n"
+ "ld1w { z1.s }, p3/Z, [x15, #2, MUL VL]\n"
+ "fmla z22.s, p3/M, z6.s, z12.s\n"
+ "ld1w { z6.s }, p3/Z, [x15, #7, MUL VL]\n"
+ "fmla z19.s, p3/M, z4.s, z12.s\n"
+ "fmla z18.s, p3/M, z3.s, z12.s\n"
+ "ld1w { z12.s }, p1/Z, [x27, x12, LSL #2]\n"
"fmla z21.s, p3/M, z8.s, z10.s\n"
- "fmla z19.s, p3/M, z1.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x27, x14, LSL #2]\n"
- "ldr x27, [x16, #0xa0]\n"
- "fmla z22.s, p3/M, z7.s, z10.s\n"
- "fmla z23.s, p3/M, z6.s, z10.s\n"
- "fmla z25.s, p3/M, z5.s, z10.s\n"
- "fmla z26.s, p3/M, z4.s, z10.s\n"
- "fmla z27.s, p3/M, z3.s, z10.s\n"
- "fmla z29.s, p3/M, z2.s, z10.s\n"
- "fmla z30.s, p3/M, z1.s, z10.s\n"
+ "ld1w { z3.s }, p3/Z, [x15, #4, MUL VL]\n"
+ "fmla z20.s, p3/M, z7.s, z10.s\n"
+ "fmla z17.s, p3/M, z5.s, z10.s\n"
+ "ld1w { z5.s }, p3/Z, [x15, #6, MUL VL]\n"
+ "fmla z16.s, p3/M, z4.s, z10.s\n"
+ "ld1w { z10.s }, p1/Z, [x9, x12, LSL #2]\n"
+ "incw x12\n"
+ "fmax z31.s, p3/M, z31.s, z15.s\n"
+ "ld1w { z4.s }, p3/Z, [x15, #5, MUL VL]\n"
+ "addvl x15, x15, #16\n"
+ "fmax z30.s, p3/M, z30.s, z15.s\n"
+ "ld1w { z7.s }, p3/Z, [x15, #-8, MUL VL]\n"
+ "cmp x12, %x[n_channels]\n"
+ "fmax z29.s, p3/M, z29.s, z15.s\n"
+ "ld1w { z8.s }, p3/Z, [x15, #-7, MUL VL]\n"
+ "addvl x15, x15, #-6\n"
+ "fmax z28.s, p3/M, z28.s, z15.s\n"
+ "fmax z27.s, p3/M, z27.s, z15.s\n"
+ "fmin z31.s, p3/M, z31.s, z14.s\n"
+ "st1w { z31.s }, p0, [x22, x11, LSL #2]\n"
+ "fmin z30.s, p3/M, z30.s, z14.s\n"
+ "fmin z29.s, p3/M, z29.s, z14.s\n"
+ "ldr x22, [x16, #0x20]\n"
+ "fmin z28.s, p3/M, z28.s, z14.s\n"
+ "st1w { z30.s }, p0, [x21, x11, LSL #2]\n"
+ "fmin z27.s, p3/M, z27.s, z14.s\n"
+ "fmax z26.s, p3/M, z26.s, z15.s\n"
+ "st1w { z29.s }, p0, [x20, x11, LSL #2]\n"
+ "fmax z25.s, p3/M, z25.s, z15.s\n"
+ "st1w { z28.s }, p0, [x19, x11, LSL #2]\n"
+ "fmax z24.s, p3/M, z24.s, z15.s\n"
+ "ldr x21, [x16, #0x28]\n"
+ "fmax z23.s, p3/M, z23.s, z15.s\n"
+ "st1w { z27.s }, p0, [x22, x11, LSL #2]\n"
+ "fmin z26.s, p3/M, z26.s, z14.s\n"
+ "ldr x20, [x16, #0x30]\n"
+ "fmin z25.s, p3/M, z25.s, z14.s\n"
+ "ldr x19, [x16, #0x38]\n"
+ "fmin z24.s, p3/M, z24.s, z14.s\n"
+ "ldr x22, [x16, #0x40]\n"
+ "fmin z23.s, p3/M, z23.s, z14.s\n"
+ "st1w { z26.s }, p0, [x21, x11, LSL #2]\n"
+ "fmax z22.s, p3/M, z22.s, z15.s\n"
+ "st1w { z25.s }, p0, [x20, x11, LSL #2]\n"
+ "fmax z21.s, p3/M, z21.s, z15.s\n"
+ "st1w { z24.s }, p0, [x19, x11, LSL #2]\n"
+ "fmax z20.s, p3/M, z20.s, z15.s\n"
+ "st1w { z23.s }, p0, [x22, x11, LSL #2]\n"
+ "fmax z19.s, p3/M, z19.s, z15.s\n"
+ "ldr x21, [x16, #0x48]\n"
+ "fmin z22.s, p3/M, z22.s, z14.s\n"
+ "ldr x20, [x16, #0x50]\n"
+ "fmin z21.s, p3/M, z21.s, z14.s\n"
+ "ldr x19, [x16, #0x58]\n"
+ "fmin z20.s, p3/M, z20.s, z14.s\n"
+ "ldr x22, [x16, #0x60]\n"
+ "fmin z19.s, p3/M, z19.s, z14.s\n"
+ "st1w { z22.s }, p0, [x21, x11, LSL #2]\n"
+ "fmax z18.s, p3/M, z18.s, z15.s\n"
+ "st1w { z21.s }, p0, [x20, x11, LSL #2]\n"
+ "fmax z17.s, p3/M, z17.s, z15.s\n"
+ "st1w { z20.s }, p0, [x19, x11, LSL #2]\n"
+ "fmax z16.s, p3/M, z16.s, z15.s\n"
+ "st1w { z19.s }, p0, [x22, x11, LSL #2]\n"
+ "ldr x21, [x16, #0x68]\n"
+ "fmin z18.s, p3/M, z18.s, z14.s\n"
+ "ldr x20, [x16, #0x70]\n"
+ "fmin z17.s, p3/M, z17.s, z14.s\n"
+ "ldr x19, [x16, #0x78]\n"
+ "fmin z16.s, p3/M, z16.s, z14.s\n"
+ "st1w { z18.s }, p0, [x21, x11, LSL #2]\n"
+ "st1w { z17.s }, p0, [x20, x11, LSL #2]\n"
+ "st1w { z16.s }, p0, [x19, x11, LSL #2]\n"
+ "blt 1b\n"
+ "2:" // Channel tail
+ "movprfx z31, z13\n fmla z31.s, p3/M, z8.s, z9.s\n"
+ "ldr x26, [x14, #0x20]\n"
+ "incw x11\n"
+ "movprfx z30, z13\n fmla z30.s, p3/M, z7.s, z9.s\n"
+ "ldr x25, [x14, #0x28]\n"
+ "mov p0.b, p2.b\n"
+ "movprfx z29, z13\n fmla z29.s, p3/M, z6.s, z9.s\n"
+ "ldr x24, [x14, #0x30]\n"
+ "movprfx z27, z13\n fmla z27.s, p3/M, z5.s, z9.s\n"
+ "ldr x23, [x14, #0x38]\n"
+ "movprfx z26, z13\n fmla z26.s, p3/M, z4.s, z9.s\n"
+ "ldr x10, [x14, #0x40]\n"
+ "movprfx z25, z13\n fmla z25.s, p3/M, z3.s, z9.s\n"
+ "ldr x9, [x14, #0x48]\n"
+ "movprfx z23, z13\n fmla z23.s, p3/M, z2.s, z9.s\n"
+ "ldr x28, [x14, #0x50]\n"
+ "movprfx z22, z13\n fmla z22.s, p3/M, z1.s, z9.s\n"
+ "ldr x27, [x14, #0x58]\n"
+ "movprfx z21, z13\n fmla z21.s, p3/M, z0.s, z9.s\n"
+ "ld1w { z9.s }, p2/Z, [x24, x13, LSL #2]\n"
"fmla z31.s, p3/M, z0.s, z10.s\n"
- "ld1w { z10.s }, p2/Z, [x25, x14, LSL #2]\n"
- "ldr x25, [x16, #0xa8]\n"
- "fmla z16.s, p3/M, z3.s, z9.s\n"
- "fmla z20.s, p3/M, z0.s, z9.s\n"
- "ld1w { z12.s }, p2/Z, [x9, x14, LSL #2]\n"
- "ldr x9, [x16, #0x98]\n"
- "fmla z24.s, p3/M, z6.s, z11.s\n"
- "fmla z28.s, p3/M, z3.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x26, x14, LSL #2]\n"
- "ldr x26, [x16, #0xb0]\n"
- "fmla z17.s, p3/M, z4.s, z10.s\n"
- "fmla z18.s, p3/M, z3.s, z10.s\n"
- "fmla z21.s, p3/M, z1.s, z10.s\n"
- "fmla z19.s, p3/M, z5.s, z12.s\n"
- "fmla z23.s, p3/M, z2.s, z12.s\n"
- "fmla z22.s, p3/M, z0.s, z10.s\n"
- "ld1w { z12.s }, p2/Z, [x24, x14, LSL #2]\n"
- "ldr x24, [x16, #0xb8]\n"
- "fmla z27.s, p3/M, z8.s, z11.s\n"
- "fmla z31.s, p3/M, z5.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x12, x14, LSL #2]\n"
- "ldr x12, [x16, #0xc0]\n"
- "fmla z16.s, p3/M, z5.s, z10.s\n"
- "fmla z20.s, p3/M, z2.s, z10.s\n"
- "ld1w { z10.s }, p2/Z, [x11, x14, LSL #2]\n"
- "ldr x11, [x16, #0xc8]\n"
- "fmla z17.s, p3/M, z5.s, z12.s\n"
- "fmla z18.s, p3/M, z4.s, z12.s\n"
- "fmla z21.s, p3/M, z2.s, z12.s\n"
- "fmla z19.s, p3/M, z3.s, z12.s\n"
- "fmla z22.s, p3/M, z1.s, z12.s\n"
- "fmla z23.s, p3/M, z0.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x9, x14, LSL #2]\n"
- "ldr x9, [x16, #0xd8]\n"
- "fmla z28.s, p3/M, z7.s, z11.s\n"
- "fmla z29.s, p3/M, z6.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x10, x14, LSL #2]\n"
- "ldr x10, [x16, #0xd0]\n"
- "fmla z16.s, p3/M, z7.s, z10.s\n"
- "fmla z17.s, p3/M, z6.s, z10.s\n"
- "fmla z20.s, p3/M, z4.s, z10.s\n"
- "fmla z21.s, p3/M, z3.s, z10.s\n"
- "fmla z24.s, p3/M, z1.s, z10.s\n"
- "fmla z25.s, p3/M, z0.s, z10.s\n"
- "ld1w { z10.s }, p2/Z, [x27, x14, LSL #2]\n"
- "ldr x27, [x16, #0xe0]\n"
- "fmla z18.s, p3/M, z8.s, z12.s\n"
- "fmla z30.s, p3/M, z8.s, z11.s\n"
- "fmla z31.s, p3/M, z7.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x25, x14, LSL #2]\n"
- "fmla z27.s, p3/M, z1.s, z12.s\n"
- "ldr x25, [x16, #0xe8]\n"
- "fmla z19.s, p3/M, z7.s, z12.s\n"
- "fmla z22.s, p3/M, z5.s, z12.s\n"
- "fmla z23.s, p3/M, z4.s, z12.s\n"
- "fmla z26.s, p3/M, z2.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x26, x14, LSL #2]\n"
- "ldr x26, [x16, #0xf0]\n"
- "fmla z16.s, p3/M, z2.s, z10.s\n"
- "fmla z17.s, p3/M, z1.s, z10.s\n"
- "fmla z18.s, p3/M, z0.s, z10.s\n"
- "fmla z20.s, p3/M, z7.s, z11.s\n"
- "ld1w { z10.s }, p2/Z, [x24, x14, LSL #2]\n"
- "ldr x24, [x16, #0xf8]\n"
- "fmla z21.s, p3/M, z6.s, z11.s\n"
- "fmla z24.s, p3/M, z4.s, z11.s\n"
- "fmla z25.s, p3/M, z3.s, z11.s\n"
+ "ld1w { z10.s }, p2/Z, [x26, x13, LSL #2]\n"
+ "movprfx z28, z13\n fmla z28.s, p3/M, z2.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x25, x13, LSL #2]\n"
+ "fmla z30.s, p3/M, z8.s, z12.s\n"
+ "ldr x26, [x14, #0x60]\n"
+ "fmla z29.s, p3/M, z7.s, z12.s\n"
+ "ldr x25, [x14, #0x68]\n"
+ "fmla z26.s, p3/M, z5.s, z12.s\n"
+ "ldr x24, [x14, #0x70]\n"
+ "fmla z28.s, p3/M, z6.s, z12.s\n"
+ "ldr x22, [x16, #0x0]\n"
+ "fmla z25.s, p3/M, z4.s, z12.s\n"
+ "ldr x21, [x16, #0x8]\n"
+ "movprfx z24, z13\n fmla z24.s, p3/M, z3.s, z12.s\n"
+ "ldr x20, [x16, #0x10]\n"
+ "fmla z22.s, p3/M, z2.s, z12.s\n"
+ "ldr x19, [x16, #0x18]\n"
+ "fmla z21.s, p3/M, z1.s, z12.s\n"
+ "movprfx z20, z13\n fmla z20.s, p3/M, z0.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x23, x13, LSL #2]\n"
+ "movprfx z19, z13\n fmla z19.s, p3/M, z6.s, z10.s\n"
+ "ld1w { z10.s }, p2/Z, [x9, x13, LSL #2]\n"
+ "movprfx z16, z13\n fmla z16.s, p3/M, z8.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x10, x13, LSL #2]\n"
+ "fmla z27.s, p3/M, z8.s, z9.s\n"
+ "ldr x23, [x14, #0x78]\n"
+ "fmla z26.s, p3/M, z7.s, z9.s\n"
+ "ldr x10, [x14, #0x80]\n"
+ "fmla z25.s, p3/M, z6.s, z9.s\n"
+ "ldr x9, [x14, #0x88]\n"
+ "fmla z23.s, p3/M, z5.s, z9.s\n"
+ "fmla z22.s, p3/M, z4.s, z9.s\n"
+ "fmla z21.s, p3/M, z3.s, z9.s\n"
+ "fmla z19.s, p3/M, z2.s, z9.s\n"
+ "movprfx z18, z13\n fmla z18.s, p3/M, z1.s, z9.s\n"
+ "movprfx z17, z13\n fmla z17.s, p3/M, z0.s, z9.s\n"
+ "ld1w { z9.s }, p2/Z, [x28, x13, LSL #2]\n"
+ "fmla z31.s, p3/M, z1.s, z12.s\n"
+ "ldr x28, [x14, #0x90]\n"
+ "fmla z30.s, p3/M, z0.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x27, x13, LSL #2]\n"
+ "fmla z29.s, p3/M, z2.s, z11.s\n"
+ "ldr x27, [x14, #0x98]\n"
"fmla z28.s, p3/M, z1.s, z11.s\n"
- "fmla z29.s, p3/M, z0.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x12, x14, LSL #2]\n"
- "fmla z27.s, p3/M, z4.s, z11.s\n"
- "ldr x12, [x16, #0x100]\n"
- "fmla z30.s, p3/M, z2.s, z11.s\n"
- "fmla z17.s, p3/M, z2.s, z12.s\n"
- "fmla z18.s, p3/M, z1.s, z12.s\n"
- "fmla z19.s, p3/M, z0.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x11, x14, LSL #2]\n"
- "ldr x11, [x16, #0x108]\n"
- "fmla z16.s, p3/M, z6.s, z10.s\n"
+ "ld1w { z11.s }, p2/Z, [x26, x13, LSL #2]\n"
+ "fmla z26.s, p3/M, z8.s, z10.s\n"
+ "ldr x26, [x14, #0xa0]\n"
+ "fmla z25.s, p3/M, z7.s, z10.s\n"
+ "fmla z24.s, p3/M, z6.s, z10.s\n"
+ "fmla z22.s, p3/M, z5.s, z10.s\n"
+ "fmla z21.s, p3/M, z4.s, z10.s\n"
"fmla z20.s, p3/M, z3.s, z10.s\n"
- "fmla z24.s, p3/M, z0.s, z10.s\n"
- "fmla z22.s, p3/M, z8.s, z11.s\n"
- "ld1w { z10.s }, p2/Z, [x10, x14, LSL #2]\n"
- "ldr x10, [x16, #0x110]\n"
- "fmla z23.s, p3/M, z7.s, z11.s\n"
- "fmla z26.s, p3/M, z5.s, z11.s\n"
- "fmla z31.s, p3/M, z1.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x9, x14, LSL #2]\n"
- "fmla z27.s, p3/M, z2.s, z12.s\n"
- "ldr x9, [x16, #0x118]\n"
- "fmla z28.s, p3/M, z0.s, z10.s\n"
- "fmla z29.s, p3/M, z4.s, z11.s\n"
- "fmla z30.s, p3/M, z3.s, z11.s\n"
- "fmla z19.s, p3/M, z8.s, z12.s\n"
- "fmla z23.s, p3/M, z5.s, z12.s\n"
- "fmla z20.s, p3/M, z6.s, z10.s\n"
- "ld1w { z12.s }, p2/Z, [x27, x14, LSL #2]\n"
- "fmla z24.s, p3/M, z3.s, z10.s\n"
- "ld1w { z10.s }, p2/Z, [x25, x14, LSL #2]\n"
- "fmla z25.s, p3/M, z7.s, z11.s\n"
- "fmla z26.s, p3/M, z6.s, z11.s\n"
- "fmla z28.s, p3/M, z5.s, z11.s\n"
- "fmla z27.s, p3/M, z5.s, z12.s\n"
- "fmla z31.s, p3/M, z2.s, z12.s\n"
- "fmla z29.s, p3/M, z7.s, z10.s\n"
+ "fmla z18.s, p3/M, z2.s, z10.s\n"
+ "fmla z17.s, p3/M, z1.s, z10.s\n"
+ "fmla z16.s, p3/M, z0.s, z10.s\n"
+ "ld1w { z10.s }, p2/Z, [x25, x13, LSL #2]\n"
+ "fmla z31.s, p3/M, z3.s, z9.s\n"
+ "ldr x25, [x14, #0xa8]\n"
+ "fmla z27.s, p3/M, z0.s, z9.s\n"
+ "fmla z28.s, p3/M, z5.s, z12.s\n"
+ "fmla z24.s, p3/M, z2.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x23, x13, LSL #2]\n"
+ "fmla z23.s, p3/M, z6.s, z11.s\n"
+ "ldr x23, [x14, #0xb8]\n"
+ "fmla z19.s, p3/M, z3.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x24, x13, LSL #2]\n"
+ "fmla z31.s, p3/M, z5.s, z10.s\n"
+ "ldr x24, [x14, #0xb0]\n"
+ "fmla z30.s, p3/M, z4.s, z10.s\n"
+ "fmla z29.s, p3/M, z3.s, z10.s\n"
+ "fmla z27.s, p3/M, z2.s, z10.s\n"
+ "fmla z26.s, p3/M, z1.s, z10.s\n"
+ "fmla z25.s, p3/M, z0.s, z10.s\n"
+ "ld1w { z10.s }, p2/Z, [x9, x13, LSL #2]\n"
+ "fmla z20.s, p3/M, z8.s, z11.s\n"
+ "ldr x9, [x14, #0xc8]\n"
+ "fmla z16.s, p3/M, z5.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x10, x13, LSL #2]\n"
+ "fmla z30.s, p3/M, z5.s, z12.s\n"
+ "ldr x10, [x14, #0xc0]\n"
+ "fmla z29.s, p3/M, z4.s, z12.s\n"
+ "fmla z28.s, p3/M, z3.s, z12.s\n"
+ "fmla z26.s, p3/M, z2.s, z12.s\n"
+ "fmla z25.s, p3/M, z1.s, z12.s\n"
+ "fmla z24.s, p3/M, z0.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x27, x13, LSL #2]\n"
+ "fmla z19.s, p3/M, z7.s, z11.s\n"
+ "ldr x27, [x14, #0xd8]\n"
+ "fmla z18.s, p3/M, z6.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x28, x13, LSL #2]\n"
+ "fmla z31.s, p3/M, z7.s, z10.s\n"
+ "ldr x28, [x14, #0xd0]\n"
"fmla z30.s, p3/M, z6.s, z10.s\n"
- "fmla z24.s, p3/M, z8.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x26, x14, LSL #2]\n"
- "fmla z28.s, p3/M, z8.s, z10.s\n"
- "ld1w { z10.s }, p2/Z, [x12, x14, LSL #2]\n"
+ "fmla z27.s, p3/M, z4.s, z10.s\n"
+ "fmla z26.s, p3/M, z3.s, z10.s\n"
+ "fmla z23.s, p3/M, z1.s, z10.s\n"
+ "fmla z22.s, p3/M, z0.s, z10.s\n"
+ "ld1w { z10.s }, p2/Z, [x26, x13, LSL #2]\n"
+ "fmla z17.s, p3/M, z8.s, z11.s\n"
+ "ldr x26, [x14, #0xe0]\n"
+ "fmla z16.s, p3/M, z7.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x25, x13, LSL #2]\n"
+ "fmla z29.s, p3/M, z8.s, z12.s\n"
+ "ldr x25, [x14, #0xe8]\n"
+ "fmla z28.s, p3/M, z7.s, z12.s\n"
+ "fmla z25.s, p3/M, z5.s, z12.s\n"
+ "fmla z24.s, p3/M, z4.s, z12.s\n"
+ "fmla z21.s, p3/M, z2.s, z12.s\n"
+ "fmla z20.s, p3/M, z1.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x24, x13, LSL #2]\n"
+ "fmla z31.s, p3/M, z2.s, z10.s\n"
+ "ldr x24, [x14, #0xf0]\n"
+ "fmla z30.s, p3/M, z1.s, z10.s\n"
+ "fmla z29.s, p3/M, z0.s, z10.s\n"
+ "ld1w { z10.s }, p2/Z, [x23, x13, LSL #2]\n"
+ "fmla z27.s, p3/M, z7.s, z11.s\n"
+ "ldr x23, [x14, #0xf8]\n"
+ "fmla z26.s, p3/M, z6.s, z11.s\n"
+ "fmla z23.s, p3/M, z4.s, z11.s\n"
+ "fmla z22.s, p3/M, z3.s, z11.s\n"
+ "fmla z19.s, p3/M, z1.s, z11.s\n"
+ "fmla z18.s, p3/M, z0.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x10, x13, LSL #2]\n"
+ "fmla z30.s, p3/M, z2.s, z12.s\n"
+ "ldr x10, [x14, #0x100]\n"
+ "fmla z29.s, p3/M, z1.s, z12.s\n"
+ "fmla z28.s, p3/M, z0.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x9, x13, LSL #2]\n"
+ "fmla z31.s, p3/M, z6.s, z10.s\n"
+ "ldr x9, [x14, #0x108]\n"
+ "fmla z27.s, p3/M, z3.s, z10.s\n"
+ "fmla z23.s, p3/M, z0.s, z10.s\n"
+ "ld1w { z10.s }, p2/Z, [x28, x13, LSL #2]\n"
"fmla z25.s, p3/M, z8.s, z11.s\n"
- "fmla z26.s, p3/M, z7.s, z11.s\n"
- "fmla z27.s, p3/M, z6.s, z11.s\n"
- "fmla z29.s, p3/M, z5.s, z11.s\n"
- "fmla z30.s, p3/M, z4.s, z11.s\n"
- "fmla z31.s, p3/M, z3.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x11, x14, LSL #2]\n"
- "fmla z23.s, p3/M, z8.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x24, x14, LSL #2]\n"
- "fmla z16.s, p3/M, z4.s, z10.s\n"
- "fmax z16.s, p3/M, z16.s, z14.s\n"
- "fmla z17.s, p3/M, z3.s, z10.s\n"
+ "ldr x28, [x14, #0x110]\n"
+ "fmla z24.s, p3/M, z7.s, z11.s\n"
+ "fmla z21.s, p3/M, z5.s, z11.s\n"
+ "fmla z20.s, p3/M, z4.s, z11.s\n"
+ "fmla z17.s, p3/M, z2.s, z11.s\n"
+ "fmla z16.s, p3/M, z1.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x27, x13, LSL #2]\n"
+ "fmla z28.s, p3/M, z8.s, z12.s\n"
+ "ldr x27, [x14, #0x118]\n"
+ "fmla z24.s, p3/M, z5.s, z12.s\n"
+ "fmla z20.s, p3/M, z2.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x26, x13, LSL #2]\n"
+ "fmla z27.s, p3/M, z6.s, z10.s\n"
+ "fmla z23.s, p3/M, z3.s, z10.s\n"
+ "fmla z19.s, p3/M, z0.s, z10.s\n"
+ "ld1w { z10.s }, p2/Z, [x25, x13, LSL #2]\n"
+ "fmla z22.s, p3/M, z7.s, z11.s\n"
+ "fmla z21.s, p3/M, z6.s, z11.s\n"
+ "fmla z23.s, p3/M, z8.s, z11.s\n"
+ "fmla z19.s, p3/M, z5.s, z11.s\n"
+ "fmla z18.s, p3/M, z4.s, z11.s\n"
+ "fmla z17.s, p3/M, z3.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x24, x13, LSL #2]\n"
+ "fmla z24.s, p3/M, z8.s, z12.s\n"
+ "fmla z20.s, p3/M, z5.s, z12.s\n"
+ "fmla z16.s, p3/M, z2.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x23, x13, LSL #2]\n"
+ "fmla z19.s, p3/M, z8.s, z10.s\n"
+ "fmla z18.s, p3/M, z7.s, z10.s\n"
+ "fmla z17.s, p3/M, z6.s, z10.s\n"
+ "ld1w { z10.s }, p2/Z, [x10, x13, LSL #2]\n"
+ "fmla z22.s, p3/M, z8.s, z11.s\n"
+ "fmla z21.s, p3/M, z7.s, z11.s\n"
+ "fmla z20.s, p3/M, z6.s, z11.s\n"
"fmla z18.s, p3/M, z5.s, z11.s\n"
- "fmax z17.s, p3/M, z17.s, z14.s\n"
- "fmax z18.s, p3/M, z18.s, z14.s\n"
- "fmla z19.s, p3/M, z4.s, z11.s\n"
- "fmla z29.s, p3/M, z8.s, z12.s\n"
- "fmax z19.s, p3/M, z19.s, z14.s\n"
- "fmin z16.s, p3/M, z16.s, z13.s\n"
- "fmla z30.s, p3/M, z7.s, z12.s\n"
- "fmla z31.s, p3/M, z6.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x10, x14, LSL #2]\n"
- "fmin z17.s, p3/M, z17.s, z13.s\n"
- "fmla z20.s, p3/M, z1.s, z10.s\n"
- "fmla z21.s, p3/M, z0.s, z10.s\n"
- "ld1w { z10.s }, p2/Z, [x9, x14, LSL #2]\n"
- "fmin z18.s, p3/M, z18.s, z13.s\n"
- "fmla z22.s, p3/M, z2.s, z11.s\n"
- "fmla z23.s, p3/M, z1.s, z11.s\n"
- "fmin z19.s, p3/M, z19.s, z13.s\n"
- "fmax z20.s, p3/M, z20.s, z14.s\n"
- "fmla z24.s, p3/M, z7.s, z12.s\n"
- "fmla z25.s, p3/M, z6.s, z12.s\n"
- "fmax z21.s, p3/M, z21.s, z14.s\n"
- "fmax z22.s, p3/M, z22.s, z14.s\n"
- "fmla z26.s, p3/M, z8.s, z10.s\n"
- "fmla z27.s, p3/M, z7.s, z10.s\n"
- "fmax z23.s, p3/M, z23.s, z14.s\n"
- "st1w { z16.s }, p1, [x23, x13, LSL #2]\n"
- "st1w { z17.s }, p1, [x22, x13, LSL #2]\n"
- "ldr x23, [x28, #0x20]\n"
- "ldr x22, [x28, #0x28]\n"
- "fmla z28.s, p3/M, z4.s, z12.s\n"
- "st1w { z18.s }, p1, [x21, x13, LSL #2]\n"
- "ldr x21, [x28, #0x30]\n"
- "fmla z29.s, p3/M, z3.s, z12.s\n"
- "fmla z30.s, p3/M, z5.s, z10.s\n"
- "st1w { z19.s }, p1, [x20, x13, LSL #2]\n"
- "ldr x20, [x28, #0x38]\n"
+ "fmla z17.s, p3/M, z4.s, z11.s\n"
+ "fmla z16.s, p3/M, z3.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x9, x13, LSL #2]\n"
"fmla z31.s, p3/M, z4.s, z10.s\n"
- "fmin z20.s, p3/M, z20.s, z13.s\n"
- "fmin z21.s, p3/M, z21.s, z13.s\n"
- "fmin z22.s, p3/M, z22.s, z13.s\n"
- "st1w { z20.s }, p1, [x23, x13, LSL #2]\n"
- "ldr x23, [x28, #0x40]\n"
- "fmin z23.s, p3/M, z23.s, z13.s\n"
- "fmax z24.s, p3/M, z24.s, z14.s\n"
- "st1w { z21.s }, p1, [x22, x13, LSL #2]\n"
- "ldr x22, [x28, #0x48]\n"
- "fmax z25.s, p3/M, z25.s, z14.s\n"
- "fmax z26.s, p3/M, z26.s, z14.s\n"
- "st1w { z22.s }, p1, [x21, x13, LSL #2]\n"
- "ldr x21, [x28, #0x50]\n"
- "fmax z27.s, p3/M, z27.s, z14.s\n"
- "st1w { z23.s }, p1, [x20, x13, LSL #2]\n"
- "ldr x20, [x28, #0x58]\n"
- "fmin z24.s, p3/M, z24.s, z13.s\n"
- "fmin z25.s, p3/M, z25.s, z13.s\n"
- "fmin z26.s, p3/M, z26.s, z13.s\n"
- "st1w { z24.s }, p1, [x23, x13, LSL #2]\n"
- "ldr x23, [x28, #0x60]\n"
- "fmin z27.s, p3/M, z27.s, z13.s\n"
- "fmax z28.s, p3/M, z28.s, z14.s\n"
- "st1w { z25.s }, p1, [x22, x13, LSL #2]\n"
- "ldr x22, [x28, #0x68]\n"
- "fmax z29.s, p3/M, z29.s, z14.s\n"
- "fmax z30.s, p3/M, z30.s, z14.s\n"
- "st1w { z26.s }, p1, [x21, x13, LSL #2]\n"
- "ldr x21, [x28, #0x70]\n"
- "fmax z31.s, p3/M, z31.s, z14.s\n"
- "st1w { z27.s }, p1, [x20, x13, LSL #2]\n"
- "ldr x20, [x28, #0x78]\n"
- "fmin z28.s, p3/M, z28.s, z13.s\n"
- "fmin z29.s, p3/M, z29.s, z13.s\n"
- "fmin z30.s, p3/M, z30.s, z13.s\n"
- "st1w { z28.s }, p1, [x23, x13, LSL #2]\n"
- "fmin z31.s, p3/M, z31.s, z13.s\n"
- "st1w { z29.s }, p1, [x22, x13, LSL #2]\n"
- "st1w { z30.s }, p1, [x21, x13, LSL #2]\n"
- "st1w { z31.s }, p1, [x20, x13, LSL #2]\n"
+ "fmla z18.s, p3/M, z8.s, z12.s\n"
+ "fmla z17.s, p3/M, z7.s, z12.s\n"
+ "fmla z16.s, p3/M, z6.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x28, x13, LSL #2]\n"
+ "fmla z30.s, p3/M, z3.s, z10.s\n"
+ "fmla z27.s, p3/M, z1.s, z10.s\n"
+ "fmla z26.s, p3/M, z0.s, z10.s\n"
+ "ld1w { z10.s }, p2/Z, [x27, x13, LSL #2]\n"
+ "fmla z29.s, p3/M, z5.s, z11.s\n"
+ "fmla z28.s, p3/M, z4.s, z11.s\n"
+ "fmla z25.s, p3/M, z2.s, z11.s\n"
+ "fmla z24.s, p3/M, z1.s, z11.s\n"
+ "fmla z23.s, p3/M, z7.s, z12.s\n"
+ "fmla z22.s, p3/M, z6.s, z12.s\n"
+ "fmla z19.s, p3/M, z4.s, z12.s\n"
+ "fmla z18.s, p3/M, z3.s, z12.s\n"
+ "fmla z21.s, p3/M, z8.s, z10.s\n"
+ "fmla z20.s, p3/M, z7.s, z10.s\n"
+ "fmla z17.s, p3/M, z5.s, z10.s\n"
+ "fmla z16.s, p3/M, z4.s, z10.s\n"
+ "fmax z31.s, p3/M, z31.s, z15.s\n"
+ "fmax z30.s, p3/M, z30.s, z15.s\n"
+ "fmax z29.s, p3/M, z29.s, z15.s\n"
+ "fmax z28.s, p3/M, z28.s, z15.s\n"
+ "fmin z31.s, p3/M, z31.s, z14.s\n"
+ "st1w { z31.s }, p0, [x22, x11, LSL #2]\n"
+ "fmin z30.s, p3/M, z30.s, z14.s\n"
+ "fmin z29.s, p3/M, z29.s, z14.s\n"
+ "ldr x22, [x16, #0x20]\n"
+ "fmin z28.s, p3/M, z28.s, z14.s\n"
+ "st1w { z30.s }, p0, [x21, x11, LSL #2]\n"
+ "fmax z27.s, p3/M, z27.s, z15.s\n"
+ "fmax z26.s, p3/M, z26.s, z15.s\n"
+ "st1w { z29.s }, p0, [x20, x11, LSL #2]\n"
+ "fmax z25.s, p3/M, z25.s, z15.s\n"
+ "st1w { z28.s }, p0, [x19, x11, LSL #2]\n"
+ "fmax z24.s, p3/M, z24.s, z15.s\n"
+ "ldr x21, [x16, #0x28]\n"
+ "fmax z23.s, p3/M, z23.s, z15.s\n"
+ "ldr x20, [x16, #0x30]\n"
+ "fmin z27.s, p3/M, z27.s, z14.s\n"
+ "ldr x19, [x16, #0x38]\n"
+ "fmin z26.s, p3/M, z26.s, z14.s\n"
+ "st1w { z27.s }, p0, [x22, x11, LSL #2]\n"
+ "fmin z25.s, p3/M, z25.s, z14.s\n"
+ "fmin z24.s, p3/M, z24.s, z14.s\n"
+ "st1w { z26.s }, p0, [x21, x11, LSL #2]\n"
+ "fmin z23.s, p3/M, z23.s, z14.s\n"
+ "ldr x22, [x16, #0x40]\n"
+ "fmax z22.s, p3/M, z22.s, z15.s\n"
+ "ldr x21, [x16, #0x48]\n"
+ "fmax z21.s, p3/M, z21.s, z15.s\n"
+ "st1w { z25.s }, p0, [x20, x11, LSL #2]\n"
+ "fmax z20.s, p3/M, z20.s, z15.s\n"
+ "st1w { z24.s }, p0, [x19, x11, LSL #2]\n"
+ "fmax z19.s, p3/M, z19.s, z15.s\n"
+ "st1w { z23.s }, p0, [x22, x11, LSL #2]\n"
+ "fmin z22.s, p3/M, z22.s, z14.s\n"
+ "ldr x20, [x16, #0x50]\n"
+ "fmin z21.s, p3/M, z21.s, z14.s\n"
+ "ldr x19, [x16, #0x58]\n"
+ "fmin z20.s, p3/M, z20.s, z14.s\n"
+ "ldr x22, [x16, #0x60]\n"
+ "fmin z19.s, p3/M, z19.s, z14.s\n"
+ "st1w { z22.s }, p0, [x21, x11, LSL #2]\n"
+ "fmax z18.s, p3/M, z18.s, z15.s\n"
+ "st1w { z21.s }, p0, [x20, x11, LSL #2]\n"
+ "fmax z17.s, p3/M, z17.s, z15.s\n"
+ "st1w { z20.s }, p0, [x19, x11, LSL #2]\n"
+ "fmax z16.s, p3/M, z16.s, z15.s\n"
+ "st1w { z19.s }, p0, [x22, x11, LSL #2]\n"
+ "ldr x21, [x16, #0x68]\n"
+ "fmin z18.s, p3/M, z18.s, z14.s\n"
+ "ldr x20, [x16, #0x70]\n"
+ "fmin z17.s, p3/M, z17.s, z14.s\n"
+ "ldr x19, [x16, #0x78]\n"
+ "fmin z16.s, p3/M, z16.s, z14.s\n"
+ "st1w { z18.s }, p0, [x21, x11, LSL #2]\n"
+ "st1w { z17.s }, p0, [x20, x11, LSL #2]\n"
+ "st1w { z16.s }, p0, [x19, x11, LSL #2]\n"
:
: [n_channels] "r" ((unsigned long) n_channels), [offsetof_Args_inptrs] "I" (offsetof(Args, inptrs)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_args_params] "I" (offsetof(Args, params)), [params_struct] "r" (&params_struct)
- : "cc", "memory", "p0", "p1", "p2", "p3", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_direct.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_direct.cpp
index 5a1f309b88..ac33dcbce5 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_direct.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_direct.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -88,246 +88,246 @@ void sve_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst_direct_impl(
__asm__ __volatile__(
"ptrue p3.b\n"
- "mov x11, #0x0\n"
- "mov x16, #0x0\n"
+ "mov x7, #0x0\n"
+ "mov x8, #0x0\n"
"1:" // Tile loop
- "str x11, [%x[params_struct], %[offsetof_args_tile_i]]\n"
- "mov x25, #0x4\n"
- "mov x24, #0x2\n"
- "str x16, [%x[params_struct], %[offsetof_args_tile_j]]\n"
- "ldr x23, [%x[params_struct], %[offsetof_args_ld_input_row]]\n"
- "ldr x15, [%x[params_struct], %[offsetof_args_ld_input_col]]\n"
- "mul x22, x11, x23\n" // offset = tile_i * ld_input_row
- "ldr x21, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
- "madd x22, x16, x15, x22\n" // offset += tile_j * ld_input_col
- "ldr x14, [%x[params_struct], %[offsetof_args_ld_output_col]]\n"
- "cntw x13\n"
- "mul x20, x11, x21\n" // offset = tile_i * ld_output_row
- "ldr x12, [%x[params_struct], %[offsetof_args_inptr]]\n"
- "ldr x11, [%x[params_struct], %[offsetof_args_params]]\n"
- "add x10, x15, x15\n"
- "mul x22, x22, x25\n" // offset *= kernel_stride * output_size
- "add x12, x12, x22, LSL #2\n" // inptr[0] += offset * sizeof(float)
- "ldr x9, [%x[params_struct], %[offsetof_args_outptr]]\n"
- "add x28, x12, x23, LSL #2\n"
- "madd x20, x16, x14, x20\n" // offset += tile_j * ld_output_col
+ "str x7, [%x[params_struct], %[offsetof_args_tile_i]]\n"
+ "mov x23, #0x4\n"
+ "str x8, [%x[params_struct], %[offsetof_args_tile_j]]\n"
+ "mov x17, #0x2\n"
+ "ldr x16, [%x[params_struct], %[offsetof_args_params]]\n"
+ "mov x15, #0x0\n"
+ "ldr x22, [%x[params_struct], %[offsetof_args_ld_input_row]]\n"
+ "cntw x14\n"
+ "ldr x13, [%x[params_struct], %[offsetof_args_ld_input_col]]\n"
+ "sub x12, XZR, x14\n"
+ "ldr x21, [%x[params_struct], %[offsetof_args_inptr]]\n"
+ "mul x19, x7, x22\n" // offset = tile_i * ld_input_row
+ "ldr x20, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
+ "madd x19, x8, x13, x19\n" // offset += tile_j * ld_input_col
+ "ldr x11, [%x[params_struct], %[offsetof_args_ld_output_col]]\n"
+ "mul x19, x19, x23\n" // offset *= kernel_stride * output_size
+ "ldr x10, [%x[params_struct], %[offsetof_args_outptr]]\n"
+ "add x21, x21, x19, LSL #2\n" // inptr[0] += offset * sizeof(float)
+ "ld1rw { z19.s }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
+ "add x9, x21, x22, LSL #2\n"
+ "ld1rw { z18.s }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
+ "add x28, x9, x22, LSL #2\n"
+ "ld1w { z17.s }, p3/Z, [x16]\n"
+ "add x27, x28, x22, LSL #2\n"
+ "ld1w { z0.s }, p3/Z, [x16, #1, MUL VL]\n"
+ "add x26, x27, x22, LSL #2\n"
+ "ld1w { z1.s }, p3/Z, [x16, #2, MUL VL]\n"
+ "add x25, x13, x13\n"
+ "ld1w { z2.s }, p3/Z, [x16, #3, MUL VL]\n"
+ "add x24, x25, x13\n"
+ "ld1w { z3.s }, p3/Z, [x16, #4, MUL VL]\n"
+ "add x23, x24, x13\n"
+ "ld1w { z4.s }, p3/Z, [x16, #5, MUL VL]\n"
+ "mul x19, x7, x20\n" // offset = tile_i * ld_output_row
+ "ld1w { z5.s }, p3/Z, [x16, #6, MUL VL]\n"
+ "madd x19, x8, x11, x19\n" // offset += tile_j * ld_output_col
+ "ld1w { z6.s }, p3/Z, [x16, #7, MUL VL]\n"
+ "mul x19, x19, x17\n" // offset *= output_tile_size
"whilelt p2.s, XZR, %x[n_channels]\n"
- "ld1w { z19.s }, p3/Z, [x11]\n"
- "ld1w { z0.s }, p3/Z, [x11, #1, MUL VL]\n"
- "mul x20, x20, x24\n" // offset *= output_tile_size
- "ld1w { z1.s }, p3/Z, [x11, #2, MUL VL]\n"
- "ld1w { z2.s }, p3/Z, [x11, #3, MUL VL]\n"
- "add x27, x28, x23, LSL #2\n"
- "ld1w { z3.s }, p3/Z, [x11, #4, MUL VL]\n"
- "ld1w { z4.s }, p3/Z, [x11, #5, MUL VL]\n"
- "add x26, x10, x15\n"
- "add x25, x27, x23, LSL #2\n"
- "ld1w { z5.s }, p3/Z, [x11, #6, MUL VL]\n"
- "ld1w { z6.s }, p3/Z, [x11, #7, MUL VL]\n"
- "addvl x11, x11, #16\n"
- "add x24, x26, x15\n"
- "add x9, x9, x20, LSL #2\n" // outptrs[0] += offset * sizeof(float)
- "cmp x13, %x[n_channels]\n"
- "ld1rw { z18.s }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
- "ld1rw { z17.s }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
- "add x23, x25, x23, LSL #2\n"
- "add x22, x9, x21, LSL #2\n"
- "ld1w { z7.s }, p3/Z, [x11, #-8, MUL VL]\n"
- "ld1w { z8.s }, p3/Z, [x11, #-7, MUL VL]\n"
- "mov x21, #0x0\n"
- "sub x20, XZR, x13\n"
- "ld1w { z9.s }, p2/Z, [x27, x10, LSL #2]\n"
- "ld1w { z10.s }, p2/Z, [x12]\n"
- "ld1w { z11.s }, p2/Z, [x12, x15, LSL #2]\n"
- "ld1w { z12.s }, p2/Z, [x12, x26, LSL #2]\n"
- "addvl x11, x11, #-6\n"
- "ld1w { z13.s }, p2/Z, [x12, x24, LSL #2]\n"
- "ld1w { z14.s }, p2/Z, [x28]\n"
- "ld1w { z15.s }, p2/Z, [x28, x15, LSL #2]\n"
- "ld1w { z16.s }, p2/Z, [x12, x10, LSL #2]\n"
+ "ld1w { z9.s }, p2/Z, [x28, x25, LSL #2]\n"
+ "ld1w { z10.s }, p2/Z, [x21]\n"
+ "add x10, x10, x19, LSL #2\n" // outptrs[0] += offset * sizeof(float)
+ "ld1w { z11.s }, p2/Z, [x21, x13, LSL #2]\n"
+ "add x22, x10, x20, LSL #2\n"
+ "ld1w { z12.s }, p2/Z, [x21, x24, LSL #2]\n"
+ "addvl x16, x16, #16\n"
+ "ld1w { z13.s }, p2/Z, [x21, x23, LSL #2]\n"
+ "cmp x14, %x[n_channels]\n"
+ "ld1w { z7.s }, p3/Z, [x16, #-8, MUL VL]\n"
+ "ld1w { z8.s }, p3/Z, [x16, #-7, MUL VL]\n"
+ "addvl x16, x16, #-6\n"
+ "ld1w { z14.s }, p2/Z, [x9]\n"
+ "ld1w { z15.s }, p2/Z, [x9, x13, LSL #2]\n"
+ "ld1w { z16.s }, p2/Z, [x21, x25, LSL #2]\n"
"bge 3f\n"
"2:" // Tile loop: Channel loop
- "movprfx z28, z19\n fmla z28.s, p3/M, z8.s, z9.s\n"
- "movprfx z29, z19\n fmla z29.s, p3/M, z6.s, z9.s\n"
- "whilelt p1.s, x13, %x[n_channels]\n"
- "incw x21\n"
- "fmla z28.s, p3/M, z0.s, z10.s\n"
- "fmla z29.s, p3/M, z1.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x28, x24, LSL #2]\n"
- "incw x13\n"
- "fmla z28.s, p3/M, z1.s, z11.s\n"
- "fmla z29.s, p3/M, z2.s, z13.s\n"
- "ld1w { z11.s }, p2/Z, [x28, x26, LSL #2]\n"
- "ld1w { z13.s }, p2/Z, [x28, x10, LSL #2]\n"
- "fmla z28.s, p3/M, z3.s, z14.s\n"
- "fmla z29.s, p3/M, z0.s, z16.s\n"
- "ld1w { z14.s }, p2/Z, [x25]\n"
+ "movprfx z31, z17\n fmla z31.s, p3/M, z8.s, z9.s\n"
+ "whilelt p1.s, x14, %x[n_channels]\n"
+ "movprfx z30, z17\n fmla z30.s, p3/M, z6.s, z9.s\n"
+ "incw x12\n"
+ "movprfx z29, z17\n fmla z29.s, p3/M, z2.s, z9.s\n"
"mov p0.b, p2.b\n"
- "fmla z28.s, p3/M, z4.s, z15.s\n"
+ "movprfx z28, z17\n fmla z28.s, p3/M, z0.s, z9.s\n"
+ "ld1w { z17.s }, p3/Z, [x16]\n"
+ "incw x15\n"
+ "fmla z31.s, p3/M, z0.s, z10.s\n"
+ "addvl x21, x21, #1\n"
+ "ld1w { z10.s }, p1/Z, [x21]\n"
+ "fmla z30.s, p3/M, z1.s, z12.s\n"
+ "incw x14\n"
+ "fmla z31.s, p3/M, z1.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x9, x24, LSL #2]\n"
+ "ld1w { z12.s }, p2/Z, [x9, x23, LSL #2]\n"
+ "fmla z30.s, p3/M, z2.s, z13.s\n"
+ "ld1w { z13.s }, p2/Z, [x9, x25, LSL #2]\n"
+ "fmla z31.s, p3/M, z3.s, z14.s\n"
+ "ld1w { z14.s }, p2/Z, [x27]\n"
+ "addvl x9, x9, #1\n"
+ "fmla z30.s, p3/M, z0.s, z16.s\n"
+ "fmla z29.s, p3/M, z3.s, z14.s\n"
+ "ld1w { z14.s }, p2/Z, [x27, x23, LSL #2]\n"
+ "fmla z31.s, p3/M, z4.s, z15.s\n"
+ "ld1w { z15.s }, p2/Z, [x28]\n"
+ "fmla z30.s, p3/M, z4.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x27, x13, LSL #2]\n"
+ "fmla z29.s, p3/M, z0.s, z15.s\n"
+ "ld1w { z0.s }, p3/Z, [x16, #1, MUL VL]\n"
+ "fmla z31.s, p3/M, z2.s, z16.s\n"
+ "ld1w { z16.s }, p2/Z, [x28, x13, LSL #2]\n"
+ "fmla z30.s, p3/M, z5.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x28, x24, LSL #2]\n"
"fmla z29.s, p3/M, z4.s, z11.s\n"
- "ld1w { z15.s }, p2/Z, [x27]\n"
- "ld1w { z11.s }, p2/Z, [x25, x15, LSL #2]\n"
- "fmla z28.s, p3/M, z2.s, z16.s\n"
- "fmla z29.s, p3/M, z5.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x27, x26, LSL #2]\n"
- "ld1w { z16.s }, p2/Z, [x27, x15, LSL #2]\n"
- "movprfx z30, z19\n fmla z30.s, p3/M, z2.s, z9.s\n"
- "movprfx z31, z19\n fmla z31.s, p3/M, z0.s, z9.s\n"
- "addvl x12, x12, #1\n"
+ "ld1w { z11.s }, p2/Z, [x28, x23, LSL #2]\n"
"addvl x28, x28, #1\n"
- "fmla z28.s, p3/M, z5.s, z13.s\n"
- "fmla z29.s, p3/M, z3.s, z13.s\n"
- "ld1w { z13.s }, p2/Z, [x25, x26, LSL #2]\n"
- "ld1w { z19.s }, p3/Z, [x11]\n"
- "fmla z30.s, p3/M, z3.s, z14.s\n"
- "fmla z31.s, p3/M, z4.s, z13.s\n"
- "ld1w { z14.s }, p2/Z, [x25, x24, LSL #2]\n"
- "ld1w { z13.s }, p2/Z, [x23, x15, LSL #2]\n"
- "fmla z30.s, p3/M, z0.s, z15.s\n"
- "fmla z31.s, p3/M, z1.s, z12.s\n"
- "ld1w { z0.s }, p3/Z, [x11, #1, MUL VL]\n"
- "incw x20\n"
- "fmla z30.s, p3/M, z4.s, z11.s\n"
- "fmla z31.s, p3/M, z5.s, z14.s\n"
- "ld1w { z11.s }, p2/Z, [x27, x24, LSL #2]\n"
- "ld1w { z14.s }, p2/Z, [x23, x26, LSL #2]\n"
- "fmla z28.s, p3/M, z6.s, z15.s\n"
- "fmla z30.s, p3/M, z1.s, z16.s\n"
- "ld1w { z15.s }, p2/Z, [x23]\n"
- "addvl x27, x27, #1\n"
- "fmla z31.s, p3/M, z2.s, z11.s\n"
- "fmla z28.s, p3/M, z7.s, z16.s\n"
- "ld1w { z16.s }, p2/Z, [x25, x10, LSL #2]\n"
- "fmax z28.s, p3/M, z28.s, z18.s\n"
- "fmla z30.s, p3/M, z6.s, z15.s\n"
- "fmla z31.s, p3/M, z3.s, z16.s\n"
- "ld1w { z15.s }, p2/Z, [x23, x10, LSL #2]\n"
- "ld1w { z1.s }, p3/Z, [x11, #2, MUL VL]\n"
- "fmla z30.s, p3/M, z7.s, z13.s\n"
- "fmla z31.s, p3/M, z7.s, z14.s\n"
- "ld1w { z2.s }, p3/Z, [x11, #3, MUL VL]\n"
- "ld1w { z3.s }, p3/Z, [x11, #4, MUL VL]\n"
- "fmla z29.s, p3/M, z7.s, z12.s\n"
- "fmla z30.s, p3/M, z5.s, z16.s\n"
- "ld1w { z4.s }, p3/Z, [x11, #5, MUL VL]\n"
- "ld1w { z5.s }, p3/Z, [x11, #6, MUL VL]\n"
+ "fmla z31.s, p3/M, z5.s, z13.s\n"
+ "ld1w { z9.s }, p1/Z, [x28, x25, LSL #2]\n"
+ "fmla z30.s, p3/M, z3.s, z13.s\n"
+ "ld1w { z13.s }, p2/Z, [x27, x24, LSL #2]\n"
+ "fmla z29.s, p3/M, z1.s, z16.s\n"
"fmla z31.s, p3/M, z6.s, z15.s\n"
- "fmla z29.s, p3/M, z8.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x23, x24, LSL #2]\n"
- "fmax z29.s, p3/M, z29.s, z18.s\n"
- "fmla z30.s, p3/M, z8.s, z15.s\n"
- "fmla z31.s, p3/M, z8.s, z11.s\n"
- "fmax z30.s, p3/M, z30.s, z18.s\n"
- "fmax z31.s, p3/M, z31.s, z18.s\n"
- "ld1w { z6.s }, p3/Z, [x11, #7, MUL VL]\n"
- "addvl x11, x11, #16\n"
- "whilelt p2.s, x21, %x[n_channels]\n"
- "ld1w { z9.s }, p1/Z, [x27, x10, LSL #2]\n"
- "cmp x13, %x[n_channels]\n"
- "fmin z28.s, p3/M, z28.s, z17.s\n"
- "ld1w { z10.s }, p1/Z, [x12]\n"
- "ld1w { z11.s }, p1/Z, [x12, x15, LSL #2]\n"
- "fmin z29.s, p3/M, z29.s, z17.s\n"
- "fmin z30.s, p3/M, z30.s, z17.s\n"
- "ld1w { z12.s }, p1/Z, [x12, x26, LSL #2]\n"
- "ld1w { z13.s }, p1/Z, [x12, x24, LSL #2]\n"
- "fmin z31.s, p3/M, z31.s, z17.s\n"
- "addvl x25, x25, #1\n"
- "ld1w { z14.s }, p1/Z, [x28]\n"
- "ld1w { z15.s }, p1/Z, [x28, x15, LSL #2]\n"
- "addvl x23, x23, #1\n"
- "ld1w { z16.s }, p1/Z, [x12, x10, LSL #2]\n"
- "st1w { z28.s }, p0, [x9]\n"
- "ld1w { z7.s }, p3/Z, [x11, #-8, MUL VL]\n"
- "st1w { z29.s }, p0, [x9, x14, LSL #2]\n"
- "addvl x9, x9, #1\n"
- "ld1w { z8.s }, p3/Z, [x11, #-7, MUL VL]\n"
- "addvl x11, x11, #-6\n"
- "st1w { z30.s }, p0, [x22]\n"
- "st1w { z31.s }, p0, [x22, x14, LSL #2]\n"
+ "ld1w { z15.s }, p2/Z, [x26]\n"
+ "fmla z28.s, p3/M, z4.s, z13.s\n"
+ "ld1w { z13.s }, p2/Z, [x26, x13, LSL #2]\n"
+ "fmla z30.s, p3/M, z7.s, z12.s\n"
+ "ld1w { z4.s }, p3/Z, [x16, #5, MUL VL]\n"
+ "fmla z29.s, p3/M, z6.s, z15.s\n"
+ "ld1w { z15.s }, p2/Z, [x26, x25, LSL #2]\n"
+ "fmla z31.s, p3/M, z7.s, z16.s\n"
+ "ld1w { z16.s }, p2/Z, [x27, x25, LSL #2]\n"
+ "addvl x27, x27, #1\n"
+ "fmla z28.s, p3/M, z1.s, z12.s\n"
+ "ld1w { z12.s }, p1/Z, [x21, x24, LSL #2]\n"
+ "fmla z30.s, p3/M, z8.s, z11.s\n"
+ "ld1w { z1.s }, p3/Z, [x16, #2, MUL VL]\n"
+ "fmla z29.s, p3/M, z7.s, z13.s\n"
+ "ld1w { z13.s }, p1/Z, [x21, x23, LSL #2]\n"
+ "fmax z31.s, p3/M, z31.s, z19.s\n"
+ "fmla z28.s, p3/M, z5.s, z14.s\n"
+ "ld1w { z14.s }, p2/Z, [x26, x24, LSL #2]\n"
+ "fmax z30.s, p3/M, z30.s, z19.s\n"
+ "fmla z29.s, p3/M, z5.s, z16.s\n"
+ "ld1w { z5.s }, p3/Z, [x16, #6, MUL VL]\n"
+ "fmin z31.s, p3/M, z31.s, z18.s\n"
+ "st1w { z31.s }, p0, [x10]\n"
+ "fmla z28.s, p3/M, z2.s, z11.s\n"
+ "fmla z29.s, p3/M, z8.s, z15.s\n"
+ "ld1w { z11.s }, p2/Z, [x26, x23, LSL #2]\n"
+ "whilelt p2.s, x15, %x[n_channels]\n"
+ "fmin z30.s, p3/M, z30.s, z18.s\n"
+ "ld1w { z2.s }, p3/Z, [x16, #3, MUL VL]\n"
+ "addvl x26, x26, #1\n"
+ "fmla z28.s, p3/M, z3.s, z16.s\n"
+ "ld1w { z16.s }, p1/Z, [x21, x25, LSL #2]\n"
+ "cmp x14, %x[n_channels]\n"
+ "fmax z29.s, p3/M, z29.s, z19.s\n"
+ "ld1w { z3.s }, p3/Z, [x16, #4, MUL VL]\n"
+ "st1w { z30.s }, p0, [x10, x11, LSL #2]\n"
+ "fmla z28.s, p3/M, z7.s, z14.s\n"
+ "ld1w { z14.s }, p1/Z, [x9]\n"
+ "addvl x10, x10, #1\n"
+ "fmin z29.s, p3/M, z29.s, z18.s\n"
+ "st1w { z29.s }, p0, [x22]\n"
+ "fmla z28.s, p3/M, z6.s, z15.s\n"
+ "ld1w { z15.s }, p1/Z, [x9, x13, LSL #2]\n"
+ "fmla z28.s, p3/M, z8.s, z11.s\n"
+ "ld1w { z11.s }, p1/Z, [x21, x13, LSL #2]\n"
+ "ld1w { z6.s }, p3/Z, [x16, #7, MUL VL]\n"
+ "fmax z28.s, p3/M, z28.s, z19.s\n"
+ "addvl x16, x16, #16\n"
+ "ld1w { z7.s }, p3/Z, [x16, #-8, MUL VL]\n"
+ "fmin z28.s, p3/M, z28.s, z18.s\n"
+ "ld1w { z8.s }, p3/Z, [x16, #-7, MUL VL]\n"
+ "addvl x16, x16, #-6\n"
+ "st1w { z28.s }, p0, [x22, x11, LSL #2]\n"
"addvl x22, x22, #1\n"
"blt 2b\n"
"3:" // Tile loop: Channel tail
- "movprfx z28, z19\n fmla z28.s, p3/M, z8.s, z9.s\n"
- "movprfx z29, z19\n fmla z29.s, p3/M, z6.s, z9.s\n"
- "ldr x16, [%x[params_struct], %[offsetof_args_tile_j]]\n"
- "ldr x11, [%x[params_struct], %[offsetof_args_tile_i]]\n"
- "fmla z28.s, p3/M, z0.s, z10.s\n"
- "fmla z29.s, p3/M, z1.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x28, x24, LSL #2]\n"
- "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
- "fmla z28.s, p3/M, z1.s, z11.s\n"
- "fmla z29.s, p3/M, z2.s, z13.s\n"
- "ld1w { z11.s }, p2/Z, [x28, x26, LSL #2]\n"
- "ld1w { z13.s }, p2/Z, [x28, x10, LSL #2]\n"
- "fmla z28.s, p3/M, z3.s, z14.s\n"
- "fmla z29.s, p3/M, z0.s, z16.s\n"
- "ld1w { z14.s }, p2/Z, [x25]\n"
- "add x16, x16, #0x1\n"
- "fmla z28.s, p3/M, z4.s, z15.s\n"
- "fmla z29.s, p3/M, z4.s, z11.s\n"
- "ld1w { z15.s }, p2/Z, [x27]\n"
- "ld1w { z11.s }, p2/Z, [x25, x15, LSL #2]\n"
- "fmla z28.s, p3/M, z2.s, z16.s\n"
- "fmla z29.s, p3/M, z5.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x27, x26, LSL #2]\n"
- "ld1w { z16.s }, p2/Z, [x27, x15, LSL #2]\n"
- "movprfx z30, z19\n fmla z30.s, p3/M, z2.s, z9.s\n"
- "movprfx z31, z19\n fmla z31.s, p3/M, z0.s, z9.s\n"
- "cmp x16, x20\n"
- "add x21, x11, #0x1\n"
- "fmla z28.s, p3/M, z5.s, z13.s\n"
- "fmla z29.s, p3/M, z3.s, z13.s\n"
- "ld1w { z13.s }, p2/Z, [x25, x26, LSL #2]\n"
- "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
- "fmla z30.s, p3/M, z3.s, z14.s\n"
- "fmla z31.s, p3/M, z4.s, z13.s\n"
- "ld1w { z14.s }, p2/Z, [x25, x24, LSL #2]\n"
- "ld1w { z13.s }, p2/Z, [x23, x15, LSL #2]\n"
- "fmla z30.s, p3/M, z0.s, z15.s\n"
- "fmla z31.s, p3/M, z1.s, z12.s\n"
- "csel x11, x11, x21, LT\n"
+ "movprfx z31, z17\n fmla z31.s, p3/M, z8.s, z9.s\n"
+ "ldr x7, [%x[params_struct], %[offsetof_args_tile_i]]\n"
"mov p0.b, p2.b\n"
+ "movprfx z30, z17\n fmla z30.s, p3/M, z6.s, z9.s\n"
+ "ldr x8, [%x[params_struct], %[offsetof_args_tile_j]]\n"
+ "add x21, x7, #0x1\n"
+ "movprfx z29, z17\n fmla z29.s, p3/M, z2.s, z9.s\n"
+ "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
+ "movprfx z28, z17\n fmla z28.s, p3/M, z0.s, z9.s\n"
+ "ldr x19, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
+ "add x8, x8, #0x1\n"
+ "fmla z31.s, p3/M, z0.s, z10.s\n"
+ "cmp x8, x19\n"
+ "fmla z30.s, p3/M, z1.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x9, x23, LSL #2]\n"
+ "fmla z31.s, p3/M, z1.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x9, x24, LSL #2]\n"
+ "csel x8, x8, XZR, LT\n"
+ "fmla z30.s, p3/M, z2.s, z13.s\n"
+ "ld1w { z13.s }, p2/Z, [x9, x25, LSL #2]\n"
+ "csel x7, x7, x21, LT\n"
+ "fmla z31.s, p3/M, z3.s, z14.s\n"
+ "ld1w { z14.s }, p2/Z, [x27]\n"
+ "cmp x7, x20\n"
+ "fmla z30.s, p3/M, z0.s, z16.s\n"
+ "fmla z29.s, p3/M, z3.s, z14.s\n"
+ "ld1w { z14.s }, p2/Z, [x27, x23, LSL #2]\n"
+ "fmla z31.s, p3/M, z4.s, z15.s\n"
+ "ld1w { z15.s }, p2/Z, [x28]\n"
"fmla z30.s, p3/M, z4.s, z11.s\n"
- "fmla z31.s, p3/M, z5.s, z14.s\n"
- "ld1w { z11.s }, p2/Z, [x27, x24, LSL #2]\n"
- "ld1w { z14.s }, p2/Z, [x23, x26, LSL #2]\n"
- "fmla z28.s, p3/M, z6.s, z15.s\n"
- "fmla z30.s, p3/M, z1.s, z16.s\n"
- "ld1w { z15.s }, p2/Z, [x23]\n"
- "csel x16, x16, XZR, LT\n"
- "fmla z31.s, p3/M, z2.s, z11.s\n"
- "fmla z28.s, p3/M, z7.s, z16.s\n"
- "ld1w { z16.s }, p2/Z, [x25, x10, LSL #2]\n"
- "fmax z28.s, p3/M, z28.s, z18.s\n"
- "fmla z30.s, p3/M, z6.s, z15.s\n"
- "fmla z31.s, p3/M, z3.s, z16.s\n"
- "ld1w { z15.s }, p2/Z, [x23, x10, LSL #2]\n"
- "cmp x11, x20\n"
- "fmla z30.s, p3/M, z7.s, z13.s\n"
- "fmla z31.s, p3/M, z7.s, z14.s\n"
- "fmin z28.s, p3/M, z28.s, z17.s\n"
- "st1w { z28.s }, p0, [x9]\n"
- "fmla z29.s, p3/M, z7.s, z12.s\n"
- "fmla z30.s, p3/M, z5.s, z16.s\n"
+ "ld1w { z11.s }, p2/Z, [x27, x13, LSL #2]\n"
+ "fmla z29.s, p3/M, z0.s, z15.s\n"
+ "fmla z31.s, p3/M, z2.s, z16.s\n"
+ "ld1w { z16.s }, p2/Z, [x28, x13, LSL #2]\n"
+ "fmla z30.s, p3/M, z5.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x28, x24, LSL #2]\n"
+ "fmla z29.s, p3/M, z4.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x28, x23, LSL #2]\n"
+ "fmla z31.s, p3/M, z5.s, z13.s\n"
+ "fmla z30.s, p3/M, z3.s, z13.s\n"
+ "ld1w { z13.s }, p2/Z, [x27, x24, LSL #2]\n"
+ "fmla z29.s, p3/M, z1.s, z16.s\n"
"fmla z31.s, p3/M, z6.s, z15.s\n"
- "fmla z29.s, p3/M, z8.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x23, x24, LSL #2]\n"
- "fmax z29.s, p3/M, z29.s, z18.s\n"
- "fmla z30.s, p3/M, z8.s, z15.s\n"
- "fmla z31.s, p3/M, z8.s, z11.s\n"
- "fmax z30.s, p3/M, z30.s, z18.s\n"
- "fmax z31.s, p3/M, z31.s, z18.s\n"
- "fmin z29.s, p3/M, z29.s, z17.s\n"
- "fmin z30.s, p3/M, z30.s, z17.s\n"
- "st1w { z29.s }, p0, [x9, x14, LSL #2]\n"
- "fmin z31.s, p3/M, z31.s, z17.s\n"
- "st1w { z30.s }, p0, [x22]\n"
- "st1w { z31.s }, p0, [x22, x14, LSL #2]\n"
+ "ld1w { z15.s }, p2/Z, [x26]\n"
+ "fmla z28.s, p3/M, z4.s, z13.s\n"
+ "ld1w { z13.s }, p2/Z, [x26, x13, LSL #2]\n"
+ "fmla z30.s, p3/M, z7.s, z12.s\n"
+ "fmla z29.s, p3/M, z6.s, z15.s\n"
+ "ld1w { z15.s }, p2/Z, [x26, x25, LSL #2]\n"
+ "fmla z31.s, p3/M, z7.s, z16.s\n"
+ "ld1w { z16.s }, p2/Z, [x27, x25, LSL #2]\n"
+ "fmla z28.s, p3/M, z1.s, z12.s\n"
+ "fmla z30.s, p3/M, z8.s, z11.s\n"
+ "fmla z29.s, p3/M, z7.s, z13.s\n"
+ "fmax z31.s, p3/M, z31.s, z19.s\n"
+ "fmla z28.s, p3/M, z5.s, z14.s\n"
+ "ld1w { z14.s }, p2/Z, [x26, x24, LSL #2]\n"
+ "fmax z30.s, p3/M, z30.s, z19.s\n"
+ "fmla z29.s, p3/M, z5.s, z16.s\n"
+ "fmin z31.s, p3/M, z31.s, z18.s\n"
+ "st1w { z31.s }, p0, [x10]\n"
+ "fmla z28.s, p3/M, z2.s, z11.s\n"
+ "fmla z29.s, p3/M, z8.s, z15.s\n"
+ "ld1w { z11.s }, p2/Z, [x26, x23, LSL #2]\n"
+ "fmin z30.s, p3/M, z30.s, z18.s\n"
+ "st1w { z30.s }, p0, [x10, x11, LSL #2]\n"
+ "fmla z28.s, p3/M, z3.s, z16.s\n"
+ "fmax z29.s, p3/M, z29.s, z19.s\n"
+ "fmla z28.s, p3/M, z7.s, z14.s\n"
+ "fmin z29.s, p3/M, z29.s, z18.s\n"
+ "st1w { z29.s }, p0, [x22]\n"
+ "fmla z28.s, p3/M, z6.s, z15.s\n"
+ "fmla z28.s, p3/M, z8.s, z11.s\n"
+ "fmax z28.s, p3/M, z28.s, z19.s\n"
+ "fmin z28.s, p3/M, z28.s, z18.s\n"
+ "st1w { z28.s }, p0, [x22, x11, LSL #2]\n"
"blt 1b\n"
:
: [n_channels] "r" ((unsigned long) n_channels), [offsetof_args_inptr] "I" (offsetof(Args, inptr)), [offsetof_args_ld_input_col] "I" (offsetof(Args, ld_input_col)), [offsetof_args_ld_input_row] "I" (offsetof(Args, ld_input_row)), [offsetof_args_ld_output_col] "I" (offsetof(Args, ld_output_col)), [offsetof_args_ld_output_row] "I" (offsetof(Args, ld_output_row)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_n_tile_cols] "I" (offsetof(Args, n_tile_cols)), [offsetof_args_n_tile_rows] "I" (offsetof(Args, n_tile_rows)), [offsetof_args_outptr] "I" (offsetof(Args, outptr)), [offsetof_args_params] "I" (offsetof(Args, params)), [offsetof_args_tile_i] "I" (offsetof(Args, tile_i)), [offsetof_args_tile_j] "I" (offsetof(Args, tile_j)), [params_struct] "r" (&params_struct)
- : "cc", "memory", "p0", "p1", "p2", "p3", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_indirect.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_indirect.cpp
index eb6c2daa97..829b0ff2c7 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_indirect.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_indirect.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -87,247 +87,247 @@ void sve_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst_indirect_impl(
activation_min, activation_max);
__asm__ __volatile__(
+ "ldr x19, [%x[params_struct], %[offsetof_args_outptrs]]\n"
"ptrue p3.b\n"
- "ldr x20, [%x[params_struct], %[offsetof_args_outptrs]]\n"
- "ldr x16, [%x[params_struct], %[offsetof_args_params]]\n"
- "add x15, %x[params_struct], %[offsetof_Args_inptrs]\n"
- "cntw x14\n"
- "ldp x13, x12, [x20, #0x0]\n"
- "ldp x11, x10, [x20, #0x10]\n"
- "mov x9, #0x0\n"
+ "ldr x15, [%x[params_struct], %[offsetof_args_params]]\n"
+ "add x14, %x[params_struct], %[offsetof_Args_inptrs]\n"
+ "ld1rw { z19.s }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
+ "mov x13, #0x0\n"
+ "ld1rw { z18.s }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
+ "cntw x12\n"
+ "ldp x11, x10, [x19, #0x0]\n"
+ "sub x9, XZR, x12\n"
+ "ldp x28, x27, [x19, #0x10]\n"
"whilelt p2.s, XZR, %x[n_channels]\n"
- "ld1w { z19.s }, p3/Z, [x16]\n"
- "ld1w { z0.s }, p3/Z, [x16, #1, MUL VL]\n"
- "cmp x14, %x[n_channels]\n"
- "ld1w { z1.s }, p3/Z, [x16, #2, MUL VL]\n"
- "ld1w { z2.s }, p3/Z, [x16, #3, MUL VL]\n"
- "sub x28, XZR, x14\n"
- "ld1w { z3.s }, p3/Z, [x16, #4, MUL VL]\n"
- "ld1w { z4.s }, p3/Z, [x16, #5, MUL VL]\n"
- "ld1w { z5.s }, p3/Z, [x16, #6, MUL VL]\n"
- "ld1w { z6.s }, p3/Z, [x16, #7, MUL VL]\n"
- "addvl x16, x16, #16\n"
- "ldp x27, x26, [x15, #0x0]\n"
- "ldp x25, x24, [x15, #0x10]\n"
- "ldp x23, x22, [x15, #0x20]\n"
- "ldp x21, x20, [x15, #0x30]\n"
- "ld1rw { z18.s }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
- "ld1rw { z17.s }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
- "ld1w { z7.s }, p3/Z, [x16, #-8, MUL VL]\n"
- "ld1w { z8.s }, p3/Z, [x16, #-7, MUL VL]\n"
- "addvl x16, x16, #-6\n"
- "ld1w { z9.s }, p2/Z, [x27, x9, LSL #2]\n"
- "ld1w { z10.s }, p2/Z, [x26, x9, LSL #2]\n"
- "ld1w { z11.s }, p2/Z, [x25, x9, LSL #2]\n"
- "ld1w { z12.s }, p2/Z, [x24, x9, LSL #2]\n"
- "ld1w { z13.s }, p2/Z, [x23, x9, LSL #2]\n"
- "ld1w { z14.s }, p2/Z, [x22, x9, LSL #2]\n"
- "ld1w { z15.s }, p2/Z, [x21, x9, LSL #2]\n"
- "ld1w { z16.s }, p2/Z, [x20, x9, LSL #2]\n"
+ "ld1w { z17.s }, p3/Z, [x15]\n"
+ "cmp x12, %x[n_channels]\n"
+ "ld1w { z0.s }, p3/Z, [x15, #1, MUL VL]\n"
+ "ld1w { z1.s }, p3/Z, [x15, #2, MUL VL]\n"
+ "ld1w { z2.s }, p3/Z, [x15, #3, MUL VL]\n"
+ "ld1w { z3.s }, p3/Z, [x15, #4, MUL VL]\n"
+ "ld1w { z4.s }, p3/Z, [x15, #5, MUL VL]\n"
+ "ld1w { z5.s }, p3/Z, [x15, #6, MUL VL]\n"
+ "ld1w { z6.s }, p3/Z, [x15, #7, MUL VL]\n"
+ "addvl x15, x15, #16\n"
+ "ldp x26, x25, [x14, #0x0]\n"
+ "ld1w { z7.s }, p3/Z, [x15, #-8, MUL VL]\n"
+ "ld1w { z8.s }, p3/Z, [x15, #-7, MUL VL]\n"
+ "addvl x15, x15, #-6\n"
+ "ld1w { z9.s }, p2/Z, [x26, x13, LSL #2]\n"
+ "ld1w { z10.s }, p2/Z, [x25, x13, LSL #2]\n"
+ "ldp x24, x23, [x14, #0x10]\n"
+ "ldp x22, x21, [x14, #0x20]\n"
+ "ldp x20, x19, [x14, #0x30]\n"
+ "ld1w { z11.s }, p2/Z, [x24, x13, LSL #2]\n"
+ "ld1w { z12.s }, p2/Z, [x23, x13, LSL #2]\n"
+ "ld1w { z13.s }, p2/Z, [x22, x13, LSL #2]\n"
+ "ld1w { z14.s }, p2/Z, [x21, x13, LSL #2]\n"
+ "ld1w { z15.s }, p2/Z, [x20, x13, LSL #2]\n"
+ "ld1w { z16.s }, p2/Z, [x19, x13, LSL #2]\n"
"bge 2f\n"
"1:" // Channel loop
- "movprfx z28, z19\n fmla z28.s, p3/M, z8.s, z9.s\n"
- "movprfx z29, z19\n fmla z29.s, p3/M, z6.s, z9.s\n"
- "ldr x27, [x15, #0x40]\n"
- "ldr x26, [x15, #0x48]\n"
- "fmla z28.s, p3/M, z0.s, z10.s\n"
- "fmla z29.s, p3/M, z1.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x26, x9, LSL #2]\n"
- "ldr x25, [x15, #0x50]\n"
- "fmla z28.s, p3/M, z1.s, z11.s\n"
- "fmla z29.s, p3/M, z2.s, z13.s\n"
- "ld1w { z11.s }, p2/Z, [x27, x9, LSL #2]\n"
- "ld1w { z13.s }, p2/Z, [x25, x9, LSL #2]\n"
- "fmla z28.s, p3/M, z3.s, z14.s\n"
- "fmla z29.s, p3/M, z0.s, z16.s\n"
- "ldr x24, [x15, #0x58]\n"
- "ldr x20, [x15, #0x78]\n"
- "fmla z28.s, p3/M, z4.s, z15.s\n"
- "fmla z29.s, p3/M, z4.s, z11.s\n"
- "ld1w { z14.s }, p2/Z, [x24, x9, LSL #2]\n"
- "ldr x23, [x15, #0x60]\n"
- "fmla z28.s, p3/M, z2.s, z16.s\n"
- "fmla z29.s, p3/M, z5.s, z12.s\n"
- "ldr x27, [x15, #0x80]\n"
- "ld1w { z15.s }, p2/Z, [x23, x9, LSL #2]\n"
- "movprfx z30, z19\n fmla z30.s, p3/M, z2.s, z9.s\n"
- "movprfx z31, z19\n fmla z31.s, p3/M, z0.s, z9.s\n"
- "ld1w { z12.s }, p2/Z, [x27, x9, LSL #2]\n"
- "ldr x22, [x15, #0x68]\n"
- "fmla z28.s, p3/M, z5.s, z13.s\n"
- "fmla z29.s, p3/M, z3.s, z13.s\n"
- "ld1w { z13.s }, p2/Z, [x20, x9, LSL #2]\n"
- "ldr x26, [x15, #0x88]\n"
- "fmla z30.s, p3/M, z3.s, z14.s\n"
- "fmla z31.s, p3/M, z4.s, z13.s\n"
- "ld1w { z11.s }, p2/Z, [x22, x9, LSL #2]\n"
- "ld1w { z14.s }, p2/Z, [x26, x9, LSL #2]\n"
- "fmla z30.s, p3/M, z0.s, z15.s\n"
- "fmla z31.s, p3/M, z1.s, z12.s\n"
- "ldr x21, [x15, #0x70]\n"
- "ldr x24, [x15, #0x98]\n"
- "fmla z30.s, p3/M, z4.s, z11.s\n"
- "fmla z31.s, p3/M, z5.s, z14.s\n"
- "ld1w { z16.s }, p2/Z, [x21, x9, LSL #2]\n"
- "ld1w { z11.s }, p2/Z, [x24, x9, LSL #2]\n"
- "fmla z28.s, p3/M, z6.s, z15.s\n"
- "ldr x25, [x15, #0x90]\n"
- "ldr x22, [x15, #0xa8]\n"
- "fmla z30.s, p3/M, z1.s, z16.s\n"
- "fmla z31.s, p3/M, z2.s, z11.s\n"
- "fmla z28.s, p3/M, z7.s, z16.s\n"
- "ld1w { z15.s }, p2/Z, [x25, x9, LSL #2]\n"
- "ld1w { z16.s }, p2/Z, [x22, x9, LSL #2]\n"
- "ldr x23, [x15, #0xa0]\n"
- "ldr x21, [x15, #0xb0]\n"
- "fmla z30.s, p3/M, z6.s, z15.s\n"
- "fmla z31.s, p3/M, z3.s, z16.s\n"
- "ld1w { z13.s }, p2/Z, [x23, x9, LSL #2]\n"
- "ld1w { z14.s }, p2/Z, [x21, x9, LSL #2]\n"
- "fmla z30.s, p3/M, z7.s, z13.s\n"
- "fmla z31.s, p3/M, z7.s, z14.s\n"
- "ldr x20, [x15, #0xb8]\n"
- "fmla z29.s, p3/M, z7.s, z12.s\n"
- "ld1w { z15.s }, p2/Z, [x20, x9, LSL #2]\n"
- "fmla z30.s, p3/M, z5.s, z16.s\n"
- "ldr x27, [x15, #0xc0]\n"
- "fmla z31.s, p3/M, z6.s, z15.s\n"
- "fmla z29.s, p3/M, z8.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x27, x9, LSL #2]\n"
- "fmla z30.s, p3/M, z8.s, z15.s\n"
- "fmla z31.s, p3/M, z8.s, z11.s\n"
- "whilelt p1.s, x14, %x[n_channels]\n"
- "ldp x27, x26, [x15, #0x0]\n"
- "ldp x25, x24, [x15, #0x10]\n"
- "ldp x23, x22, [x15, #0x20]\n"
+ "movprfx z31, z17\n fmla z31.s, p3/M, z8.s, z9.s\n"
+ "ldr x26, [x14, #0x40]\n"
+ "whilelt p1.s, x12, %x[n_channels]\n"
+ "movprfx z30, z17\n fmla z30.s, p3/M, z6.s, z9.s\n"
+ "ldr x25, [x14, #0x48]\n"
"incw x9\n"
- "fmax z28.s, p3/M, z28.s, z18.s\n"
- "ldp x21, x20, [x15, #0x30]\n"
- "ld1w { z9.s }, p1/Z, [x27, x14, LSL #2]\n"
- "fmax z29.s, p3/M, z29.s, z18.s\n"
- "fmax z30.s, p3/M, z30.s, z18.s\n"
- "ld1w { z10.s }, p1/Z, [x26, x14, LSL #2]\n"
- "ld1w { z11.s }, p1/Z, [x25, x14, LSL #2]\n"
- "fmax z31.s, p3/M, z31.s, z18.s\n"
- "incw x28\n"
- "ld1w { z12.s }, p1/Z, [x24, x14, LSL #2]\n"
- "ld1w { z13.s }, p1/Z, [x23, x14, LSL #2]\n"
+ "movprfx z29, z17\n fmla z29.s, p3/M, z2.s, z9.s\n"
+ "ldr x24, [x14, #0x50]\n"
"mov p0.b, p2.b\n"
- "whilelt p2.s, x9, %x[n_channels]\n"
- "ld1w { z14.s }, p1/Z, [x22, x14, LSL #2]\n"
- "ld1w { z15.s }, p1/Z, [x21, x14, LSL #2]\n"
- "fmin z28.s, p3/M, z28.s, z17.s\n"
- "fmin z29.s, p3/M, z29.s, z17.s\n"
- "ld1w { z16.s }, p1/Z, [x20, x14, LSL #2]\n"
- "incw x14\n"
- "ld1w { z19.s }, p3/Z, [x16]\n"
- "cmp x14, %x[n_channels]\n"
- "ld1w { z0.s }, p3/Z, [x16, #1, MUL VL]\n"
- "ld1w { z1.s }, p3/Z, [x16, #2, MUL VL]\n"
- "fmin z30.s, p3/M, z30.s, z17.s\n"
- "fmin z31.s, p3/M, z31.s, z17.s\n"
- "ld1w { z2.s }, p3/Z, [x16, #3, MUL VL]\n"
- "ld1w { z3.s }, p3/Z, [x16, #4, MUL VL]\n"
- "st1w { z28.s }, p0, [x13, x28, LSL #2]\n"
- "ld1w { z4.s }, p3/Z, [x16, #5, MUL VL]\n"
- "ld1w { z5.s }, p3/Z, [x16, #6, MUL VL]\n"
- "st1w { z29.s }, p0, [x12, x28, LSL #2]\n"
- "ld1w { z6.s }, p3/Z, [x16, #7, MUL VL]\n"
- "addvl x16, x16, #16\n"
- "st1w { z30.s }, p0, [x11, x28, LSL #2]\n"
- "ld1w { z7.s }, p3/Z, [x16, #-8, MUL VL]\n"
- "st1w { z31.s }, p0, [x10, x28, LSL #2]\n"
- "ld1w { z8.s }, p3/Z, [x16, #-7, MUL VL]\n"
- "addvl x16, x16, #-6\n"
+ "movprfx z28, z17\n fmla z28.s, p3/M, z0.s, z9.s\n"
+ "ldr x23, [x14, #0x58]\n"
+ "ldr x22, [x14, #0x60]\n"
+ "fmla z31.s, p3/M, z0.s, z10.s\n"
+ "ldr x21, [x14, #0x68]\n"
+ "fmla z30.s, p3/M, z1.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x25, x13, LSL #2]\n"
+ "fmla z31.s, p3/M, z1.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x26, x13, LSL #2]\n"
+ "ldr x20, [x14, #0x70]\n"
+ "fmla z30.s, p3/M, z2.s, z13.s\n"
+ "ld1w { z13.s }, p2/Z, [x24, x13, LSL #2]\n"
+ "fmla z31.s, p3/M, z3.s, z14.s\n"
+ "ld1w { z14.s }, p2/Z, [x23, x13, LSL #2]\n"
+ "ldr x19, [x14, #0x78]\n"
+ "fmla z30.s, p3/M, z0.s, z16.s\n"
+ "ldr x26, [x14, #0x80]\n"
+ "fmla z29.s, p3/M, z3.s, z14.s\n"
+ "ldr x25, [x14, #0x88]\n"
+ "ldr x24, [x14, #0x90]\n"
+ "fmla z31.s, p3/M, z4.s, z15.s\n"
+ "ld1w { z15.s }, p2/Z, [x22, x13, LSL #2]\n"
+ "fmla z30.s, p3/M, z4.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x21, x13, LSL #2]\n"
+ "fmla z29.s, p3/M, z0.s, z15.s\n"
+ "ld1w { z14.s }, p2/Z, [x25, x13, LSL #2]\n"
+ "ldr x23, [x14, #0x98]\n"
+ "fmla z31.s, p3/M, z2.s, z16.s\n"
+ "ld1w { z16.s }, p2/Z, [x20, x13, LSL #2]\n"
+ "fmla z30.s, p3/M, z5.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x26, x13, LSL #2]\n"
+ "fmla z29.s, p3/M, z4.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x23, x13, LSL #2]\n"
+ "ldr x22, [x14, #0xa0]\n"
+ "fmla z31.s, p3/M, z5.s, z13.s\n"
+ "ldr x21, [x14, #0xa8]\n"
+ "fmla z30.s, p3/M, z3.s, z13.s\n"
+ "ld1w { z13.s }, p2/Z, [x19, x13, LSL #2]\n"
+ "fmla z29.s, p3/M, z1.s, z16.s\n"
+ "ldr x20, [x14, #0xb0]\n"
+ "ldr x19, [x14, #0xb8]\n"
+ "fmla z31.s, p3/M, z6.s, z15.s\n"
+ "fmla z28.s, p3/M, z4.s, z13.s\n"
+ "ld1w { z15.s }, p2/Z, [x24, x13, LSL #2]\n"
+ "fmla z30.s, p3/M, z7.s, z12.s\n"
+ "ld1w { z13.s }, p2/Z, [x22, x13, LSL #2]\n"
+ "ldr x26, [x14, #0xc0]\n"
+ "fmla z31.s, p3/M, z7.s, z16.s\n"
+ "ld1w { z16.s }, p2/Z, [x21, x13, LSL #2]\n"
+ "fmla z28.s, p3/M, z1.s, z12.s\n"
+ "ld1w { z17.s }, p3/Z, [x15]\n"
+ "fmla z29.s, p3/M, z6.s, z15.s\n"
+ "ld1w { z15.s }, p2/Z, [x19, x13, LSL #2]\n"
+ "fmla z30.s, p3/M, z8.s, z11.s\n"
+ "ld1w { z0.s }, p3/Z, [x15, #1, MUL VL]\n"
+ "ld1w { z1.s }, p3/Z, [x15, #2, MUL VL]\n"
+ "fmla z28.s, p3/M, z5.s, z14.s\n"
+ "fmax z31.s, p3/M, z31.s, z19.s\n"
+ "ld1w { z14.s }, p2/Z, [x20, x13, LSL #2]\n"
+ "fmla z29.s, p3/M, z7.s, z13.s\n"
+ "ld1w { z4.s }, p3/Z, [x15, #5, MUL VL]\n"
+ "fmax z30.s, p3/M, z30.s, z19.s\n"
+ "fmla z28.s, p3/M, z2.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x26, x13, LSL #2]\n"
+ "incw x13\n"
+ "fmla z29.s, p3/M, z5.s, z16.s\n"
+ "ldp x26, x25, [x14, #0x0]\n"
+ "whilelt p2.s, x13, %x[n_channels]\n"
+ "fmin z31.s, p3/M, z31.s, z18.s\n"
+ "ldp x24, x23, [x14, #0x10]\n"
+ "fmin z30.s, p3/M, z30.s, z18.s\n"
+ "ldp x22, x21, [x14, #0x20]\n"
+ "ldp x20, x19, [x14, #0x30]\n"
+ "fmla z28.s, p3/M, z3.s, z16.s\n"
+ "ld1w { z9.s }, p1/Z, [x26, x12, LSL #2]\n"
+ "fmla z29.s, p3/M, z8.s, z15.s\n"
+ "ld1w { z10.s }, p1/Z, [x25, x12, LSL #2]\n"
+ "fmla z28.s, p3/M, z7.s, z14.s\n"
+ "ld1w { z12.s }, p1/Z, [x23, x12, LSL #2]\n"
+ "ld1w { z13.s }, p1/Z, [x22, x12, LSL #2]\n"
+ "fmax z29.s, p3/M, z29.s, z19.s\n"
+ "ld1w { z14.s }, p1/Z, [x21, x12, LSL #2]\n"
+ "fmla z28.s, p3/M, z6.s, z15.s\n"
+ "ld1w { z15.s }, p1/Z, [x20, x12, LSL #2]\n"
+ "ld1w { z16.s }, p1/Z, [x19, x12, LSL #2]\n"
+ "fmin z29.s, p3/M, z29.s, z18.s\n"
+ "st1w { z31.s }, p0, [x11, x9, LSL #2]\n"
+ "fmla z28.s, p3/M, z8.s, z11.s\n"
+ "ld1w { z11.s }, p1/Z, [x24, x12, LSL #2]\n"
+ "incw x12\n"
+ "fmax z28.s, p3/M, z28.s, z19.s\n"
+ "st1w { z30.s }, p0, [x10, x9, LSL #2]\n"
+ "cmp x12, %x[n_channels]\n"
+ "fmin z28.s, p3/M, z28.s, z18.s\n"
+ "st1w { z29.s }, p0, [x28, x9, LSL #2]\n"
+ "ld1w { z2.s }, p3/Z, [x15, #3, MUL VL]\n"
+ "ld1w { z3.s }, p3/Z, [x15, #4, MUL VL]\n"
+ "ld1w { z5.s }, p3/Z, [x15, #6, MUL VL]\n"
+ "ld1w { z6.s }, p3/Z, [x15, #7, MUL VL]\n"
+ "addvl x15, x15, #16\n"
+ "st1w { z28.s }, p0, [x27, x9, LSL #2]\n"
+ "ld1w { z7.s }, p3/Z, [x15, #-8, MUL VL]\n"
+ "ld1w { z8.s }, p3/Z, [x15, #-7, MUL VL]\n"
+ "addvl x15, x15, #-6\n"
"blt 1b\n"
"2:" // Channel tail
- "movprfx z28, z19\n fmla z28.s, p3/M, z8.s, z9.s\n"
- "movprfx z29, z19\n fmla z29.s, p3/M, z6.s, z9.s\n"
- "ldr x27, [x15, #0x40]\n"
- "ldr x26, [x15, #0x48]\n"
- "fmla z28.s, p3/M, z0.s, z10.s\n"
- "fmla z29.s, p3/M, z1.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x26, x9, LSL #2]\n"
- "ldr x25, [x15, #0x50]\n"
- "fmla z28.s, p3/M, z1.s, z11.s\n"
- "fmla z29.s, p3/M, z2.s, z13.s\n"
- "ld1w { z11.s }, p2/Z, [x27, x9, LSL #2]\n"
- "ld1w { z13.s }, p2/Z, [x25, x9, LSL #2]\n"
- "fmla z28.s, p3/M, z3.s, z14.s\n"
- "fmla z29.s, p3/M, z0.s, z16.s\n"
- "ldr x24, [x15, #0x58]\n"
- "ldr x20, [x15, #0x78]\n"
- "fmla z28.s, p3/M, z4.s, z15.s\n"
- "fmla z29.s, p3/M, z4.s, z11.s\n"
- "ld1w { z14.s }, p2/Z, [x24, x9, LSL #2]\n"
- "ldr x23, [x15, #0x60]\n"
- "fmla z28.s, p3/M, z2.s, z16.s\n"
- "fmla z29.s, p3/M, z5.s, z12.s\n"
- "ldr x27, [x15, #0x80]\n"
- "ld1w { z15.s }, p2/Z, [x23, x9, LSL #2]\n"
- "movprfx z30, z19\n fmla z30.s, p3/M, z2.s, z9.s\n"
- "movprfx z31, z19\n fmla z31.s, p3/M, z0.s, z9.s\n"
- "ld1w { z12.s }, p2/Z, [x27, x9, LSL #2]\n"
- "ldr x22, [x15, #0x68]\n"
- "fmla z28.s, p3/M, z5.s, z13.s\n"
- "fmla z29.s, p3/M, z3.s, z13.s\n"
- "ld1w { z13.s }, p2/Z, [x20, x9, LSL #2]\n"
- "ldr x26, [x15, #0x88]\n"
- "fmla z30.s, p3/M, z3.s, z14.s\n"
- "fmla z31.s, p3/M, z4.s, z13.s\n"
- "ld1w { z11.s }, p2/Z, [x22, x9, LSL #2]\n"
- "ld1w { z14.s }, p2/Z, [x26, x9, LSL #2]\n"
- "fmla z30.s, p3/M, z0.s, z15.s\n"
- "fmla z31.s, p3/M, z1.s, z12.s\n"
- "ldr x21, [x15, #0x70]\n"
- "ldr x24, [x15, #0x98]\n"
+ "movprfx z31, z17\n fmla z31.s, p3/M, z8.s, z9.s\n"
+ "ldr x26, [x14, #0x40]\n"
+ "incw x9\n"
+ "movprfx z30, z17\n fmla z30.s, p3/M, z6.s, z9.s\n"
+ "ldr x25, [x14, #0x48]\n"
+ "mov p0.b, p2.b\n"
+ "movprfx z29, z17\n fmla z29.s, p3/M, z2.s, z9.s\n"
+ "ldr x24, [x14, #0x50]\n"
+ "movprfx z28, z17\n fmla z28.s, p3/M, z0.s, z9.s\n"
+ "ldr x23, [x14, #0x58]\n"
+ "ldr x22, [x14, #0x60]\n"
+ "fmla z31.s, p3/M, z0.s, z10.s\n"
+ "ldr x21, [x14, #0x68]\n"
+ "fmla z30.s, p3/M, z1.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x25, x13, LSL #2]\n"
+ "fmla z31.s, p3/M, z1.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x26, x13, LSL #2]\n"
+ "ldr x20, [x14, #0x70]\n"
+ "fmla z30.s, p3/M, z2.s, z13.s\n"
+ "ld1w { z13.s }, p2/Z, [x24, x13, LSL #2]\n"
+ "fmla z31.s, p3/M, z3.s, z14.s\n"
+ "ld1w { z14.s }, p2/Z, [x23, x13, LSL #2]\n"
+ "ldr x19, [x14, #0x78]\n"
+ "fmla z30.s, p3/M, z0.s, z16.s\n"
+ "ldr x26, [x14, #0x80]\n"
+ "fmla z29.s, p3/M, z3.s, z14.s\n"
+ "ldr x25, [x14, #0x88]\n"
+ "ldr x24, [x14, #0x90]\n"
+ "fmla z31.s, p3/M, z4.s, z15.s\n"
+ "ld1w { z15.s }, p2/Z, [x22, x13, LSL #2]\n"
"fmla z30.s, p3/M, z4.s, z11.s\n"
- "fmla z31.s, p3/M, z5.s, z14.s\n"
- "ld1w { z16.s }, p2/Z, [x21, x9, LSL #2]\n"
- "ld1w { z11.s }, p2/Z, [x24, x9, LSL #2]\n"
- "fmla z28.s, p3/M, z6.s, z15.s\n"
- "ldr x25, [x15, #0x90]\n"
- "ldr x22, [x15, #0xa8]\n"
- "fmla z30.s, p3/M, z1.s, z16.s\n"
- "fmla z31.s, p3/M, z2.s, z11.s\n"
- "fmla z28.s, p3/M, z7.s, z16.s\n"
- "ld1w { z15.s }, p2/Z, [x25, x9, LSL #2]\n"
- "ld1w { z16.s }, p2/Z, [x22, x9, LSL #2]\n"
- "ldr x23, [x15, #0xa0]\n"
- "ldr x21, [x15, #0xb0]\n"
- "fmla z30.s, p3/M, z6.s, z15.s\n"
- "fmla z31.s, p3/M, z3.s, z16.s\n"
- "ld1w { z13.s }, p2/Z, [x23, x9, LSL #2]\n"
- "ld1w { z14.s }, p2/Z, [x21, x9, LSL #2]\n"
- "fmla z30.s, p3/M, z7.s, z13.s\n"
- "fmla z31.s, p3/M, z7.s, z14.s\n"
- "ldr x20, [x15, #0xb8]\n"
- "fmla z29.s, p3/M, z7.s, z12.s\n"
- "ld1w { z15.s }, p2/Z, [x20, x9, LSL #2]\n"
- "fmla z30.s, p3/M, z5.s, z16.s\n"
- "ldr x27, [x15, #0xc0]\n"
+ "ld1w { z11.s }, p2/Z, [x21, x13, LSL #2]\n"
+ "fmla z29.s, p3/M, z0.s, z15.s\n"
+ "ld1w { z14.s }, p2/Z, [x25, x13, LSL #2]\n"
+ "ldr x23, [x14, #0x98]\n"
+ "fmla z31.s, p3/M, z2.s, z16.s\n"
+ "ld1w { z16.s }, p2/Z, [x20, x13, LSL #2]\n"
+ "fmla z30.s, p3/M, z5.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x26, x13, LSL #2]\n"
+ "fmla z29.s, p3/M, z4.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x23, x13, LSL #2]\n"
+ "ldr x22, [x14, #0xa0]\n"
+ "fmla z31.s, p3/M, z5.s, z13.s\n"
+ "ldr x21, [x14, #0xa8]\n"
+ "fmla z30.s, p3/M, z3.s, z13.s\n"
+ "ld1w { z13.s }, p2/Z, [x19, x13, LSL #2]\n"
+ "fmla z29.s, p3/M, z1.s, z16.s\n"
+ "ldr x20, [x14, #0xb0]\n"
+ "ldr x19, [x14, #0xb8]\n"
"fmla z31.s, p3/M, z6.s, z15.s\n"
- "fmla z29.s, p3/M, z8.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x27, x9, LSL #2]\n"
- "fmla z30.s, p3/M, z8.s, z15.s\n"
- "fmla z31.s, p3/M, z8.s, z11.s\n"
- "incw x28\n"
- "mov p0.b, p2.b\n"
- "fmax z28.s, p3/M, z28.s, z18.s\n"
- "fmax z29.s, p3/M, z29.s, z18.s\n"
- "fmax z30.s, p3/M, z30.s, z18.s\n"
- "fmax z31.s, p3/M, z31.s, z18.s\n"
- "fmin z28.s, p3/M, z28.s, z17.s\n"
- "fmin z29.s, p3/M, z29.s, z17.s\n"
- "st1w { z28.s }, p0, [x13, x28, LSL #2]\n"
- "fmin z30.s, p3/M, z30.s, z17.s\n"
- "fmin z31.s, p3/M, z31.s, z17.s\n"
- "st1w { z29.s }, p0, [x12, x28, LSL #2]\n"
- "st1w { z30.s }, p0, [x11, x28, LSL #2]\n"
- "st1w { z31.s }, p0, [x10, x28, LSL #2]\n"
+ "fmla z28.s, p3/M, z4.s, z13.s\n"
+ "ld1w { z15.s }, p2/Z, [x24, x13, LSL #2]\n"
+ "fmla z30.s, p3/M, z7.s, z12.s\n"
+ "ld1w { z13.s }, p2/Z, [x22, x13, LSL #2]\n"
+ "ldr x26, [x14, #0xc0]\n"
+ "fmla z31.s, p3/M, z7.s, z16.s\n"
+ "ld1w { z16.s }, p2/Z, [x21, x13, LSL #2]\n"
+ "fmla z28.s, p3/M, z1.s, z12.s\n"
+ "fmla z29.s, p3/M, z6.s, z15.s\n"
+ "ld1w { z15.s }, p2/Z, [x19, x13, LSL #2]\n"
+ "fmla z30.s, p3/M, z8.s, z11.s\n"
+ "fmla z28.s, p3/M, z5.s, z14.s\n"
+ "ld1w { z14.s }, p2/Z, [x20, x13, LSL #2]\n"
+ "fmax z31.s, p3/M, z31.s, z19.s\n"
+ "fmla z29.s, p3/M, z7.s, z13.s\n"
+ "fmax z30.s, p3/M, z30.s, z19.s\n"
+ "fmla z28.s, p3/M, z2.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x26, x13, LSL #2]\n"
+ "fmin z31.s, p3/M, z31.s, z18.s\n"
+ "st1w { z31.s }, p0, [x11, x9, LSL #2]\n"
+ "fmla z29.s, p3/M, z5.s, z16.s\n"
+ "fmla z28.s, p3/M, z3.s, z16.s\n"
+ "fmin z30.s, p3/M, z30.s, z18.s\n"
+ "st1w { z30.s }, p0, [x10, x9, LSL #2]\n"
+ "fmla z28.s, p3/M, z7.s, z14.s\n"
+ "fmla z29.s, p3/M, z8.s, z15.s\n"
+ "fmla z28.s, p3/M, z6.s, z15.s\n"
+ "fmax z29.s, p3/M, z29.s, z19.s\n"
+ "fmla z28.s, p3/M, z8.s, z11.s\n"
+ "fmin z29.s, p3/M, z29.s, z18.s\n"
+ "st1w { z29.s }, p0, [x28, x9, LSL #2]\n"
+ "fmax z28.s, p3/M, z28.s, z19.s\n"
+ "fmin z28.s, p3/M, z28.s, z18.s\n"
+ "st1w { z28.s }, p0, [x27, x9, LSL #2]\n"
:
: [n_channels] "r" ((unsigned long) n_channels), [offsetof_Args_inptrs] "I" (offsetof(Args, inptrs)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_args_params] "I" (offsetof(Args, params)), [params_struct] "r" (&params_struct)
- : "cc", "memory", "p0", "p1", "p2", "p3", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst/generic_direct.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst/generic_direct.cpp
index b4cf6c8582..ea8bbbd7e8 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst/generic_direct.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst/generic_direct.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -88,432 +88,432 @@ void sve_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst_direct_impl(
__asm__ __volatile__(
"ptrue p3.b\n"
- "mov x12, #0x0\n"
- "mov x8, #0x0\n"
+ "mov x5, #0x0\n"
+ "mov x6, #0x0\n"
"1:" // Tile loop
- "str x12, [%x[params_struct], %[offsetof_args_tile_i]]\n"
- "mov x25, #0x2\n"
- "mov x24, #0x2\n"
- "str x8, [%x[params_struct], %[offsetof_args_tile_j]]\n"
- "ldr x23, [%x[params_struct], %[offsetof_args_ld_input_row]]\n"
- "ldr x17, [%x[params_struct], %[offsetof_args_ld_input_col]]\n"
- "mul x22, x12, x23\n" // offset = tile_i * ld_input_row
+ "str x5, [%x[params_struct], %[offsetof_args_tile_i]]\n"
+ "mov x20, #0x2\n"
+ "str x6, [%x[params_struct], %[offsetof_args_tile_j]]\n"
+ "mov x7, #0x2\n"
+ "ldr x8, [%x[params_struct], %[offsetof_args_params]]\n"
+ "mov x17, #0x0\n"
+ "ldr x22, [%x[params_struct], %[offsetof_args_ld_input_row]]\n"
+ "cntw x16\n"
+ "ldr x15, [%x[params_struct], %[offsetof_args_ld_input_col]]\n"
+ "sub x14, XZR, x16\n"
+ "ldr x13, [%x[params_struct], %[offsetof_args_inptr]]\n"
+ "mul x19, x5, x22\n" // offset = tile_i * ld_input_row
"ldr x21, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
- "madd x22, x8, x17, x22\n" // offset += tile_j * ld_input_col
- "ldr x16, [%x[params_struct], %[offsetof_args_ld_output_col]]\n"
- "add x15, x17, x17\n"
- "mul x20, x12, x21\n" // offset = tile_i * ld_output_row
- "ldr x14, [%x[params_struct], %[offsetof_args_inptr]]\n"
- "ldr x13, [%x[params_struct], %[offsetof_args_outptr]]\n"
- "cntw x12\n"
- "mul x22, x22, x25\n" // offset *= kernel_stride * output_size
- "add x14, x14, x22, LSL #2\n" // inptr[0] += offset * sizeof(float)
- "add x11, x14, x23, LSL #2\n"
- "ldr x10, [%x[params_struct], %[offsetof_args_params]]\n"
- "madd x20, x8, x16, x20\n" // offset += tile_j * ld_output_col
- "add x9, x11, x23, LSL #2\n"
- "add x28, x15, x17\n"
+ "madd x19, x6, x15, x19\n" // offset += tile_j * ld_input_col
+ "ldr x12, [%x[params_struct], %[offsetof_args_ld_output_col]]\n"
+ "mul x19, x19, x20\n" // offset *= kernel_stride * output_size
+ "ldr x11, [%x[params_struct], %[offsetof_args_outptr]]\n"
+ "add x13, x13, x19, LSL #2\n" // inptr[0] += offset * sizeof(float)
"ld1rw { z18.s }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
- "mul x20, x20, x24\n" // offset *= output_tile_size
- "whilelt p2.s, XZR, %x[n_channels]\n"
- "add x27, x9, x23, LSL #2\n"
+ "add x20, x13, x22, LSL #2\n"
"ld1rw { z17.s }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
- "add x26, x28, x17\n"
- "add x25, x27, x23, LSL #2\n"
- "ld1w { z16.s }, p3/Z, [x10]\n"
- "ld1w { z0.s }, p3/Z, [x10, #1, MUL VL]\n"
- "add x24, x26, x17\n"
- "add x13, x13, x20, LSL #2\n" // outptrs[0] += offset * sizeof(float)
- "ld1w { z1.s }, p3/Z, [x10, #2, MUL VL]\n"
- "ld1w { z2.s }, p3/Z, [x10, #3, MUL VL]\n"
- "cmp x12, %x[n_channels]\n"
- "add x23, x25, x23, LSL #2\n"
- "ld1w { z3.s }, p3/Z, [x10, #4, MUL VL]\n"
- "ld1w { z4.s }, p3/Z, [x10, #5, MUL VL]\n"
- "add x22, x13, x21, LSL #2\n"
- "mov x21, #0x0\n"
- "ld1w { z5.s }, p2/Z, [x14]\n"
- "ld1w { z6.s }, p2/Z, [x14, x17, LSL #2]\n"
- "sub x20, XZR, x12\n"
- "ld1w { z7.s }, p2/Z, [x11]\n"
- "ld1w { z8.s }, p2/Z, [x11, x17, LSL #2]\n"
- "addvl x10, x10, #6\n"
- "ld1w { z9.s }, p2/Z, [x14, x15, LSL #2]\n"
- "ld1w { z13.s }, p2/Z, [x11, x15, LSL #2]\n"
- "ld1w { z11.s }, p2/Z, [x14, x28, LSL #2]\n"
- "ld1w { z12.s }, p2/Z, [x14, x26, LSL #2]\n"
- "ld1w { z10.s }, p2/Z, [x11, x24, LSL #2]\n"
- "ld1w { z14.s }, p2/Z, [x9]\n"
+ "add x10, x20, x22, LSL #2\n"
+ "ld1w { z16.s }, p3/Z, [x8]\n"
+ "add x9, x10, x22, LSL #2\n"
+ "ld1w { z0.s }, p3/Z, [x8, #1, MUL VL]\n"
+ "add x28, x9, x22, LSL #2\n"
+ "ld1w { z1.s }, p3/Z, [x8, #2, MUL VL]\n"
+ "add x27, x28, x22, LSL #2\n"
+ "ld1w { z2.s }, p3/Z, [x8, #3, MUL VL]\n"
+ "add x26, x15, x15\n"
+ "ld1w { z3.s }, p3/Z, [x8, #4, MUL VL]\n"
+ "add x25, x26, x15\n"
+ "ld1w { z4.s }, p3/Z, [x8, #5, MUL VL]\n"
+ "add x24, x25, x15\n"
+ "mul x19, x5, x21\n" // offset = tile_i * ld_output_row
+ "add x23, x24, x15\n"
+ "madd x19, x6, x12, x19\n" // offset += tile_j * ld_output_col
+ "mul x19, x19, x7\n" // offset *= output_tile_size
+ "add x11, x11, x19, LSL #2\n" // outptrs[0] += offset * sizeof(float)
+ "add x22, x11, x21, LSL #2\n"
+ "whilelt p2.s, XZR, %x[n_channels]\n"
+ "ld1w { z5.s }, p2/Z, [x13]\n"
+ "ld1w { z6.s }, p2/Z, [x13, x15, LSL #2]\n"
+ "cmp x16, %x[n_channels]\n"
+ "ld1w { z7.s }, p2/Z, [x20]\n"
+ "addvl x8, x8, #6\n"
+ "ld1w { z8.s }, p2/Z, [x20, x15, LSL #2]\n"
+ "ld1w { z9.s }, p2/Z, [x13, x26, LSL #2]\n"
+ "ld1w { z13.s }, p2/Z, [x20, x26, LSL #2]\n"
+ "ld1w { z11.s }, p2/Z, [x13, x25, LSL #2]\n"
+ "ld1w { z12.s }, p2/Z, [x13, x24, LSL #2]\n"
+ "ld1w { z10.s }, p2/Z, [x20, x23, LSL #2]\n"
+ "ld1w { z14.s }, p2/Z, [x10]\n"
"bge 3f\n"
"2:" // Tile loop: Channel loop
- "movprfx z28, z16\n fmla z28.s, p3/M, z0.s, z5.s\n"
- "movprfx z29, z16\n fmla z29.s, p3/M, z0.s, z6.s\n"
- "ld1w { z5.s }, p2/Z, [x11, x28, LSL #2]\n"
- "whilelt p1.s, x12, %x[n_channels]\n"
- "movprfx z30, z16\n fmla z30.s, p3/M, z0.s, z7.s\n"
- "movprfx z31, z16\n fmla z31.s, p3/M, z0.s, z8.s\n"
- "ld1w { z0.s }, p3/Z, [x10]\n"
- "incw x21\n"
- "fmla z28.s, p3/M, z1.s, z6.s\n"
- "fmla z29.s, p3/M, z1.s, z9.s\n"
- "ld1w { z6.s }, p2/Z, [x11, x26, LSL #2]\n"
- "incw x12\n"
- "fmla z30.s, p3/M, z1.s, z8.s\n"
- "fmla z31.s, p3/M, z1.s, z13.s\n"
- "ld1w { z1.s }, p3/Z, [x10, #1, MUL VL]\n"
+ "movprfx z31, z16\n fmla z31.s, p3/M, z0.s, z5.s\n"
+ "ld1w { z5.s }, p2/Z, [x20, x25, LSL #2]\n"
+ "whilelt p1.s, x16, %x[n_channels]\n"
+ "movprfx z30, z16\n fmla z30.s, p3/M, z0.s, z6.s\n"
+ "incw x14\n"
+ "movprfx z29, z16\n fmla z29.s, p3/M, z0.s, z7.s\n"
"mov p0.b, p2.b\n"
+ "movprfx z28, z16\n fmla z28.s, p3/M, z0.s, z8.s\n"
+ "ld1w { z0.s }, p3/Z, [x8]\n"
+ "incw x17\n"
+ "fmla z31.s, p3/M, z1.s, z6.s\n"
+ "ld1w { z6.s }, p2/Z, [x20, x24, LSL #2]\n"
+ "addvl x20, x20, #1\n"
+ "fmla z30.s, p3/M, z1.s, z9.s\n"
+ "incw x16\n"
+ "fmla z29.s, p3/M, z1.s, z8.s\n"
+ "fmla z28.s, p3/M, z1.s, z13.s\n"
+ "ld1w { z1.s }, p3/Z, [x8, #1, MUL VL]\n"
+ "fmla z31.s, p3/M, z2.s, z9.s\n"
+ "ld1w { z9.s }, p2/Z, [x13, x23, LSL #2]\n"
+ "addvl x13, x13, #1\n"
+ "fmla z30.s, p3/M, z2.s, z11.s\n"
+ "fmla z29.s, p3/M, z2.s, z13.s\n"
+ "fmla z28.s, p3/M, z2.s, z5.s\n"
+ "ld1w { z2.s }, p3/Z, [x8, #2, MUL VL]\n"
+ "fmla z31.s, p3/M, z3.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x10, x15, LSL #2]\n"
+ "fmla z30.s, p3/M, z3.s, z12.s\n"
+ "fmla z29.s, p3/M, z3.s, z5.s\n"
+ "fmla z28.s, p3/M, z3.s, z6.s\n"
+ "ld1w { z3.s }, p3/Z, [x8, #3, MUL VL]\n"
+ "fmla z31.s, p3/M, z4.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x10, x26, LSL #2]\n"
+ "fmla z30.s, p3/M, z4.s, z9.s\n"
+ "ld1w { z9.s }, p2/Z, [x10, x25, LSL #2]\n"
+ "fmla z29.s, p3/M, z4.s, z6.s\n"
+ "fmla z28.s, p3/M, z4.s, z10.s\n"
+ "ld1w { z4.s }, p3/Z, [x8, #4, MUL VL]\n"
+ "fmla z31.s, p3/M, z0.s, z7.s\n"
+ "ld1w { z7.s }, p1/Z, [x20]\n"
+ "fmla z30.s, p3/M, z0.s, z8.s\n"
+ "fmla z29.s, p3/M, z0.s, z14.s\n"
+ "fmla z28.s, p3/M, z0.s, z11.s\n"
+ "ld1w { z0.s }, p3/Z, [x8, #5, MUL VL]\n"
+ "fmla z31.s, p3/M, z1.s, z8.s\n"
+ "ld1w { z8.s }, p2/Z, [x10, x23, LSL #2]\n"
+ "fmla z30.s, p3/M, z1.s, z13.s\n"
+ "fmla z29.s, p3/M, z1.s, z11.s\n"
+ "fmla z28.s, p3/M, z1.s, z12.s\n"
+ "ld1w { z1.s }, p3/Z, [x8, #6, MUL VL]\n"
+ "fmla z31.s, p3/M, z2.s, z13.s\n"
+ "ld1w { z13.s }, p2/Z, [x10, x24, LSL #2]\n"
+ "addvl x10, x10, #1\n"
+ "fmla z30.s, p3/M, z2.s, z5.s\n"
+ "fmla z29.s, p3/M, z2.s, z12.s\n"
"fmla z28.s, p3/M, z2.s, z9.s\n"
- "fmla z29.s, p3/M, z2.s, z11.s\n"
- "ld1w { z9.s }, p2/Z, [x14, x24, LSL #2]\n"
- "addvl x14, x14, #1\n"
- "fmla z30.s, p3/M, z2.s, z13.s\n"
- "fmla z31.s, p3/M, z2.s, z5.s\n"
- "ld1w { z2.s }, p3/Z, [x10, #2, MUL VL]\n"
- "addvl x11, x11, #1\n"
- "fmla z28.s, p3/M, z3.s, z11.s\n"
- "fmla z29.s, p3/M, z3.s, z12.s\n"
- "ld1w { z11.s }, p2/Z, [x9, x17, LSL #2]\n"
- "incw x20\n"
- "fmla z30.s, p3/M, z3.s, z5.s\n"
- "fmla z31.s, p3/M, z3.s, z6.s\n"
- "ld1w { z3.s }, p3/Z, [x10, #3, MUL VL]\n"
- "fmla z28.s, p3/M, z4.s, z12.s\n"
- "fmla z29.s, p3/M, z4.s, z9.s\n"
- "ld1w { z12.s }, p2/Z, [x9, x15, LSL #2]\n"
- "ld1w { z9.s }, p2/Z, [x9, x28, LSL #2]\n"
- "fmla z30.s, p3/M, z4.s, z6.s\n"
- "fmla z31.s, p3/M, z4.s, z10.s\n"
- "ld1w { z4.s }, p3/Z, [x10, #4, MUL VL]\n"
- "fmla z28.s, p3/M, z0.s, z7.s\n"
- "fmla z29.s, p3/M, z0.s, z8.s\n"
- "ld1w { z7.s }, p1/Z, [x11]\n"
- "fmla z30.s, p3/M, z0.s, z14.s\n"
- "fmla z31.s, p3/M, z0.s, z11.s\n"
- "ld1w { z0.s }, p3/Z, [x10, #5, MUL VL]\n"
- "fmla z28.s, p3/M, z1.s, z8.s\n"
+ "ld1w { z2.s }, p3/Z, [x8, #7, MUL VL]\n"
+ "addvl x8, x8, #16\n"
+ "fmla z31.s, p3/M, z3.s, z5.s\n"
+ "ld1w { z5.s }, p2/Z, [x9]\n"
+ "ld1w { z16.s }, p3/Z, [x8, #4, MUL VL]\n"
+ "fmla z30.s, p3/M, z3.s, z6.s\n"
+ "fmla z29.s, p3/M, z3.s, z9.s\n"
+ "fmla z28.s, p3/M, z3.s, z13.s\n"
+ "ld1w { z3.s }, p3/Z, [x8, #-8, MUL VL]\n"
+ "fmla z31.s, p3/M, z4.s, z6.s\n"
+ "ld1w { z6.s }, p2/Z, [x9, x15, LSL #2]\n"
+ "fmla z30.s, p3/M, z4.s, z10.s\n"
+ "ld1w { z10.s }, p2/Z, [x9, x26, LSL #2]\n"
+ "fmla z29.s, p3/M, z4.s, z13.s\n"
+ "fmla z28.s, p3/M, z4.s, z8.s\n"
+ "ld1w { z4.s }, p3/Z, [x8, #-7, MUL VL]\n"
+ "fmla z31.s, p3/M, z0.s, z14.s\n"
+ "ld1w { z14.s }, p2/Z, [x9, x23, LSL #2]\n"
+ "fmla z30.s, p3/M, z0.s, z11.s\n"
+ "fmla z29.s, p3/M, z0.s, z5.s\n"
+ "fmla z28.s, p3/M, z0.s, z6.s\n"
+ "ld1w { z0.s }, p3/Z, [x8, #-6, MUL VL]\n"
+ "fmla z31.s, p3/M, z1.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x9, x25, LSL #2]\n"
+ "fmla z30.s, p3/M, z1.s, z12.s\n"
+ "fmla z29.s, p3/M, z1.s, z6.s\n"
+ "fmla z28.s, p3/M, z1.s, z10.s\n"
+ "ld1w { z1.s }, p3/Z, [x8, #-5, MUL VL]\n"
+ "fmla z31.s, p3/M, z2.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x9, x24, LSL #2]\n"
+ "addvl x9, x9, #1\n"
+ "fmla z30.s, p3/M, z2.s, z9.s\n"
+ "fmla z29.s, p3/M, z2.s, z10.s\n"
+ "fmla z28.s, p3/M, z2.s, z11.s\n"
+ "ld1w { z2.s }, p3/Z, [x8, #-4, MUL VL]\n"
+ "fmla z31.s, p3/M, z3.s, z9.s\n"
+ "ld1w { z9.s }, p2/Z, [x28]\n"
+ "fmla z30.s, p3/M, z3.s, z13.s\n"
+ "fmla z29.s, p3/M, z3.s, z11.s\n"
+ "fmla z28.s, p3/M, z3.s, z12.s\n"
+ "ld1w { z3.s }, p3/Z, [x8, #-3, MUL VL]\n"
+ "fmla z31.s, p3/M, z4.s, z13.s\n"
+ "ld1w { z13.s }, p2/Z, [x28, x15, LSL #2]\n"
+ "fmla z30.s, p3/M, z4.s, z8.s\n"
+ "ld1w { z8.s }, p2/Z, [x28, x24, LSL #2]\n"
+ "fmla z29.s, p3/M, z4.s, z12.s\n"
+ "fmla z28.s, p3/M, z4.s, z14.s\n"
+ "ld1w { z4.s }, p3/Z, [x8, #-2, MUL VL]\n"
+ "fmla z31.s, p3/M, z0.s, z5.s\n"
+ "ld1w { z5.s }, p2/Z, [x28, x26, LSL #2]\n"
+ "fmla z30.s, p3/M, z0.s, z6.s\n"
+ "fmla z29.s, p3/M, z0.s, z9.s\n"
+ "fmla z28.s, p3/M, z0.s, z13.s\n"
+ "ld1w { z0.s }, p3/Z, [x8, #-1, MUL VL]\n"
+ "fmla z31.s, p3/M, z1.s, z6.s\n"
+ "ld1w { z6.s }, p2/Z, [x28, x25, LSL #2]\n"
+ "fmla z30.s, p3/M, z1.s, z10.s\n"
"fmla z29.s, p3/M, z1.s, z13.s\n"
- "ld1w { z8.s }, p2/Z, [x9, x24, LSL #2]\n"
- "fmla z30.s, p3/M, z1.s, z11.s\n"
- "fmla z31.s, p3/M, z1.s, z12.s\n"
- "ld1w { z1.s }, p3/Z, [x10, #6, MUL VL]\n"
- "fmla z28.s, p3/M, z2.s, z13.s\n"
+ "fmla z28.s, p3/M, z1.s, z5.s\n"
+ "ld1w { z1.s }, p3/Z, [x8]\n"
+ "fmla z31.s, p3/M, z2.s, z10.s\n"
+ "ld1w { z10.s }, p2/Z, [x28, x23, LSL #2]\n"
+ "addvl x28, x28, #1\n"
+ "fmla z30.s, p3/M, z2.s, z11.s\n"
"fmla z29.s, p3/M, z2.s, z5.s\n"
- "ld1w { z13.s }, p2/Z, [x9, x26, LSL #2]\n"
- "addvl x9, x9, #1\n"
- "fmla z30.s, p3/M, z2.s, z12.s\n"
- "fmla z31.s, p3/M, z2.s, z9.s\n"
- "ld1w { z2.s }, p3/Z, [x10, #7, MUL VL]\n"
- "addvl x10, x10, #16\n"
- "fmla z28.s, p3/M, z3.s, z5.s\n"
+ "fmla z28.s, p3/M, z2.s, z6.s\n"
+ "ld1w { z2.s }, p3/Z, [x8, #1, MUL VL]\n"
+ "fmla z31.s, p3/M, z3.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x27]\n"
+ "fmla z30.s, p3/M, z3.s, z12.s\n"
"fmla z29.s, p3/M, z3.s, z6.s\n"
- "ld1w { z5.s }, p2/Z, [x27]\n"
- "ld1w { z16.s }, p3/Z, [x10, #4, MUL VL]\n"
- "fmla z30.s, p3/M, z3.s, z9.s\n"
- "fmla z31.s, p3/M, z3.s, z13.s\n"
- "ld1w { z3.s }, p3/Z, [x10, #-8, MUL VL]\n"
- "fmla z28.s, p3/M, z4.s, z6.s\n"
- "fmla z29.s, p3/M, z4.s, z10.s\n"
- "ld1w { z6.s }, p2/Z, [x27, x17, LSL #2]\n"
- "ld1w { z10.s }, p2/Z, [x27, x15, LSL #2]\n"
- "fmla z30.s, p3/M, z4.s, z13.s\n"
- "fmla z31.s, p3/M, z4.s, z8.s\n"
- "ld1w { z4.s }, p3/Z, [x10, #-7, MUL VL]\n"
- "fmla z28.s, p3/M, z0.s, z14.s\n"
+ "fmla z28.s, p3/M, z3.s, z8.s\n"
+ "ld1w { z3.s }, p3/Z, [x8, #2, MUL VL]\n"
+ "fmla z31.s, p3/M, z4.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x27, x15, LSL #2]\n"
+ "fmla z30.s, p3/M, z4.s, z14.s\n"
+ "ld1w { z14.s }, p1/Z, [x10]\n"
+ "fmla z29.s, p3/M, z4.s, z8.s\n"
+ "fmla z28.s, p3/M, z4.s, z10.s\n"
+ "ld1w { z4.s }, p3/Z, [x8, #3, MUL VL]\n"
+ "fmla z31.s, p3/M, z0.s, z9.s\n"
+ "ld1w { z9.s }, p2/Z, [x27, x26, LSL #2]\n"
+ "fmla z30.s, p3/M, z0.s, z13.s\n"
"fmla z29.s, p3/M, z0.s, z11.s\n"
- "ld1w { z14.s }, p2/Z, [x27, x24, LSL #2]\n"
- "fmla z30.s, p3/M, z0.s, z5.s\n"
- "fmla z31.s, p3/M, z0.s, z6.s\n"
- "ld1w { z0.s }, p3/Z, [x10, #-6, MUL VL]\n"
- "fmla z28.s, p3/M, z1.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x27, x25, LSL #2]\n"
+ "fmla z28.s, p3/M, z0.s, z12.s\n"
+ "ld1w { z0.s }, p3/Z, [x8, #5, MUL VL]\n"
+ "fmla z31.s, p3/M, z1.s, z13.s\n"
+ "ld1w { z13.s }, p1/Z, [x20, x26, LSL #2]\n"
+ "fmla z30.s, p3/M, z1.s, z5.s\n"
"fmla z29.s, p3/M, z1.s, z12.s\n"
- "ld1w { z11.s }, p2/Z, [x27, x28, LSL #2]\n"
- "fmla z30.s, p3/M, z1.s, z6.s\n"
- "fmla z31.s, p3/M, z1.s, z10.s\n"
- "ld1w { z1.s }, p3/Z, [x10, #-5, MUL VL]\n"
- "fmla z28.s, p3/M, z2.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x27, x24, LSL #2]\n"
+ "fmla z28.s, p3/M, z1.s, z9.s\n"
+ "ld1w { z1.s }, p3/Z, [x8, #6, MUL VL]\n"
+ "fmla z31.s, p3/M, z2.s, z5.s\n"
+ "ld1w { z5.s }, p1/Z, [x13]\n"
+ "fmla z30.s, p3/M, z2.s, z6.s\n"
"fmla z29.s, p3/M, z2.s, z9.s\n"
- "ld1w { z12.s }, p2/Z, [x27, x26, LSL #2]\n"
+ "ld1w { z9.s }, p2/Z, [x27, x23, LSL #2]\n"
+ "whilelt p2.s, x17, %x[n_channels]\n"
+ "fmla z28.s, p3/M, z2.s, z11.s\n"
+ "ld1w { z2.s }, p3/Z, [x8, #7, MUL VL]\n"
"addvl x27, x27, #1\n"
- "fmla z30.s, p3/M, z2.s, z10.s\n"
- "fmla z31.s, p3/M, z2.s, z11.s\n"
- "ld1w { z2.s }, p3/Z, [x10, #-4, MUL VL]\n"
- "fmla z28.s, p3/M, z3.s, z9.s\n"
- "fmla z29.s, p3/M, z3.s, z13.s\n"
- "ld1w { z9.s }, p2/Z, [x25]\n"
- "fmla z30.s, p3/M, z3.s, z11.s\n"
- "fmla z31.s, p3/M, z3.s, z12.s\n"
- "ld1w { z3.s }, p3/Z, [x10, #-3, MUL VL]\n"
- "fmla z28.s, p3/M, z4.s, z13.s\n"
- "fmla z29.s, p3/M, z4.s, z8.s\n"
- "ld1w { z13.s }, p2/Z, [x25, x17, LSL #2]\n"
- "ld1w { z8.s }, p2/Z, [x25, x26, LSL #2]\n"
- "fmla z30.s, p3/M, z4.s, z12.s\n"
- "fmla z31.s, p3/M, z4.s, z14.s\n"
- "ld1w { z4.s }, p3/Z, [x10, #-2, MUL VL]\n"
- "fmla z28.s, p3/M, z0.s, z5.s\n"
- "fmla z29.s, p3/M, z0.s, z6.s\n"
- "ld1w { z5.s }, p2/Z, [x25, x15, LSL #2]\n"
- "fmla z30.s, p3/M, z0.s, z9.s\n"
- "fmla z31.s, p3/M, z0.s, z13.s\n"
- "ld1w { z0.s }, p3/Z, [x10, #-1, MUL VL]\n"
- "fmla z28.s, p3/M, z1.s, z6.s\n"
- "fmla z29.s, p3/M, z1.s, z10.s\n"
- "ld1w { z6.s }, p2/Z, [x25, x28, LSL #2]\n"
- "fmla z30.s, p3/M, z1.s, z13.s\n"
- "fmla z31.s, p3/M, z1.s, z5.s\n"
- "ld1w { z1.s }, p3/Z, [x10]\n"
- "fmla z28.s, p3/M, z2.s, z10.s\n"
- "fmla z29.s, p3/M, z2.s, z11.s\n"
- "ld1w { z10.s }, p2/Z, [x25, x24, LSL #2]\n"
- "addvl x25, x25, #1\n"
- "fmla z30.s, p3/M, z2.s, z5.s\n"
- "fmla z31.s, p3/M, z2.s, z6.s\n"
- "ld1w { z2.s }, p3/Z, [x10, #1, MUL VL]\n"
- "fmla z28.s, p3/M, z3.s, z11.s\n"
- "fmla z29.s, p3/M, z3.s, z12.s\n"
- "ld1w { z11.s }, p2/Z, [x23]\n"
- "fmla z30.s, p3/M, z3.s, z6.s\n"
- "fmla z31.s, p3/M, z3.s, z8.s\n"
- "ld1w { z3.s }, p3/Z, [x10, #2, MUL VL]\n"
- "fmla z28.s, p3/M, z4.s, z12.s\n"
- "fmla z29.s, p3/M, z4.s, z14.s\n"
- "ld1w { z12.s }, p2/Z, [x23, x17, LSL #2]\n"
- "ld1w { z14.s }, p1/Z, [x9]\n"
- "fmla z30.s, p3/M, z4.s, z8.s\n"
- "fmla z31.s, p3/M, z4.s, z10.s\n"
- "ld1w { z4.s }, p3/Z, [x10, #3, MUL VL]\n"
- "fmla z28.s, p3/M, z0.s, z9.s\n"
- "fmla z29.s, p3/M, z0.s, z13.s\n"
- "ld1w { z9.s }, p2/Z, [x23, x15, LSL #2]\n"
- "fmla z30.s, p3/M, z0.s, z11.s\n"
- "fmla z31.s, p3/M, z0.s, z12.s\n"
- "ld1w { z11.s }, p2/Z, [x23, x28, LSL #2]\n"
- "ld1w { z0.s }, p3/Z, [x10, #5, MUL VL]\n"
- "fmla z28.s, p3/M, z1.s, z13.s\n"
- "fmla z29.s, p3/M, z1.s, z5.s\n"
- "ld1w { z13.s }, p1/Z, [x11, x15, LSL #2]\n"
- "fmla z30.s, p3/M, z1.s, z12.s\n"
- "fmla z31.s, p3/M, z1.s, z9.s\n"
- "ld1w { z12.s }, p2/Z, [x23, x26, LSL #2]\n"
- "ld1w { z1.s }, p3/Z, [x10, #6, MUL VL]\n"
- "fmla z28.s, p3/M, z2.s, z5.s\n"
- "fmla z29.s, p3/M, z2.s, z6.s\n"
- "ld1w { z5.s }, p1/Z, [x14]\n"
- "fmla z30.s, p3/M, z2.s, z9.s\n"
- "fmla z31.s, p3/M, z2.s, z11.s\n"
- "ld1w { z9.s }, p2/Z, [x23, x24, LSL #2]\n"
- "ld1w { z2.s }, p3/Z, [x10, #7, MUL VL]\n"
- "fmla z28.s, p3/M, z3.s, z6.s\n"
- "fmla z29.s, p3/M, z3.s, z8.s\n"
- "addvl x10, x10, #16\n"
- "whilelt p2.s, x21, %x[n_channels]\n"
- "fmla z30.s, p3/M, z3.s, z11.s\n"
- "fmla z31.s, p3/M, z3.s, z12.s\n"
- "cmp x12, %x[n_channels]\n"
- "addvl x23, x23, #1\n"
- "fmla z28.s, p3/M, z4.s, z8.s\n"
- "fmla z29.s, p3/M, z4.s, z10.s\n"
- "fmax z28.s, p3/M, z28.s, z18.s\n"
- "fmax z29.s, p3/M, z29.s, z18.s\n"
- "fmla z30.s, p3/M, z4.s, z12.s\n"
- "fmla z31.s, p3/M, z4.s, z9.s\n"
- "fmax z30.s, p3/M, z30.s, z18.s\n"
+ "fmla z31.s, p3/M, z3.s, z6.s\n"
+ "ld1w { z6.s }, p1/Z, [x13, x15, LSL #2]\n"
+ "addvl x8, x8, #16\n"
+ "fmla z30.s, p3/M, z3.s, z8.s\n"
+ "cmp x16, %x[n_channels]\n"
+ "fmla z29.s, p3/M, z3.s, z11.s\n"
+ "ld1w { z11.s }, p1/Z, [x13, x25, LSL #2]\n"
+ "fmla z28.s, p3/M, z3.s, z12.s\n"
+ "ld1w { z3.s }, p3/Z, [x8, #-8, MUL VL]\n"
+ "fmla z31.s, p3/M, z4.s, z8.s\n"
+ "ld1w { z8.s }, p1/Z, [x20, x15, LSL #2]\n"
+ "fmla z30.s, p3/M, z4.s, z10.s\n"
+ "ld1w { z10.s }, p1/Z, [x20, x23, LSL #2]\n"
+ "fmla z29.s, p3/M, z4.s, z12.s\n"
+ "ld1w { z12.s }, p1/Z, [x13, x24, LSL #2]\n"
+ "fmla z28.s, p3/M, z4.s, z9.s\n"
+ "ld1w { z9.s }, p1/Z, [x13, x26, LSL #2]\n"
+ "ld1w { z4.s }, p3/Z, [x8, #-7, MUL VL]\n"
"fmax z31.s, p3/M, z31.s, z18.s\n"
- "fmin z28.s, p3/M, z28.s, z17.s\n"
- "fmin z29.s, p3/M, z29.s, z17.s\n"
- "ld1w { z6.s }, p1/Z, [x14, x17, LSL #2]\n"
- "ld1w { z8.s }, p1/Z, [x11, x17, LSL #2]\n"
- "fmin z30.s, p3/M, z30.s, z17.s\n"
+ "addvl x8, x8, #-6\n"
+ "fmax z30.s, p3/M, z30.s, z18.s\n"
+ "fmax z29.s, p3/M, z29.s, z18.s\n"
+ "fmax z28.s, p3/M, z28.s, z18.s\n"
"fmin z31.s, p3/M, z31.s, z17.s\n"
- "ld1w { z9.s }, p1/Z, [x14, x15, LSL #2]\n"
- "ld1w { z11.s }, p1/Z, [x14, x28, LSL #2]\n"
- "ld1w { z12.s }, p1/Z, [x14, x26, LSL #2]\n"
- "ld1w { z10.s }, p1/Z, [x11, x24, LSL #2]\n"
- "st1w { z28.s }, p0, [x13]\n"
- "st1w { z29.s }, p0, [x13, x16, LSL #2]\n"
- "addvl x13, x13, #1\n"
- "ld1w { z3.s }, p3/Z, [x10, #-8, MUL VL]\n"
- "ld1w { z4.s }, p3/Z, [x10, #-7, MUL VL]\n"
- "st1w { z30.s }, p0, [x22]\n"
- "addvl x10, x10, #-6\n"
- "st1w { z31.s }, p0, [x22, x16, LSL #2]\n"
+ "st1w { z31.s }, p0, [x11]\n"
+ "fmin z30.s, p3/M, z30.s, z17.s\n"
+ "fmin z29.s, p3/M, z29.s, z17.s\n"
+ "st1w { z30.s }, p0, [x11, x12, LSL #2]\n"
+ "fmin z28.s, p3/M, z28.s, z17.s\n"
+ "addvl x11, x11, #1\n"
+ "st1w { z29.s }, p0, [x22]\n"
+ "st1w { z28.s }, p0, [x22, x12, LSL #2]\n"
"addvl x22, x22, #1\n"
"blt 2b\n"
"3:" // Tile loop: Channel tail
- "movprfx z28, z16\n fmla z28.s, p3/M, z0.s, z5.s\n"
- "movprfx z29, z16\n fmla z29.s, p3/M, z0.s, z6.s\n"
- "ld1w { z5.s }, p2/Z, [x11, x28, LSL #2]\n"
- "ldr x8, [%x[params_struct], %[offsetof_args_tile_j]]\n"
- "movprfx z30, z16\n fmla z30.s, p3/M, z0.s, z7.s\n"
- "movprfx z31, z16\n fmla z31.s, p3/M, z0.s, z8.s\n"
- "ld1w { z0.s }, p3/Z, [x10]\n"
- "ldr x12, [%x[params_struct], %[offsetof_args_tile_i]]\n"
- "fmla z28.s, p3/M, z1.s, z6.s\n"
- "fmla z29.s, p3/M, z1.s, z9.s\n"
- "ld1w { z6.s }, p2/Z, [x11, x26, LSL #2]\n"
- "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
- "fmla z30.s, p3/M, z1.s, z8.s\n"
- "fmla z31.s, p3/M, z1.s, z13.s\n"
- "ld1w { z1.s }, p3/Z, [x10, #1, MUL VL]\n"
- "add x8, x8, #0x1\n"
- "fmla z28.s, p3/M, z2.s, z9.s\n"
- "fmla z29.s, p3/M, z2.s, z11.s\n"
- "ld1w { z9.s }, p2/Z, [x14, x24, LSL #2]\n"
- "cmp x8, x20\n"
- "fmla z30.s, p3/M, z2.s, z13.s\n"
- "fmla z31.s, p3/M, z2.s, z5.s\n"
- "ld1w { z2.s }, p3/Z, [x10, #2, MUL VL]\n"
- "add x21, x12, #0x1\n"
- "fmla z28.s, p3/M, z3.s, z11.s\n"
- "fmla z29.s, p3/M, z3.s, z12.s\n"
- "ld1w { z11.s }, p2/Z, [x9, x17, LSL #2]\n"
- "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
- "fmla z30.s, p3/M, z3.s, z5.s\n"
- "fmla z31.s, p3/M, z3.s, z6.s\n"
- "ld1w { z3.s }, p3/Z, [x10, #3, MUL VL]\n"
- "csel x12, x12, x21, LT\n"
- "fmla z28.s, p3/M, z4.s, z12.s\n"
- "fmla z29.s, p3/M, z4.s, z9.s\n"
- "ld1w { z12.s }, p2/Z, [x9, x15, LSL #2]\n"
- "ld1w { z9.s }, p2/Z, [x9, x28, LSL #2]\n"
- "fmla z30.s, p3/M, z4.s, z6.s\n"
- "fmla z31.s, p3/M, z4.s, z10.s\n"
- "ld1w { z4.s }, p3/Z, [x10, #4, MUL VL]\n"
+ "movprfx z31, z16\n fmla z31.s, p3/M, z0.s, z5.s\n"
+ "ld1w { z5.s }, p2/Z, [x20, x25, LSL #2]\n"
"mov p0.b, p2.b\n"
- "fmla z28.s, p3/M, z0.s, z7.s\n"
- "fmla z29.s, p3/M, z0.s, z8.s\n"
- "csel x8, x8, XZR, LT\n"
- "cmp x12, x20\n"
- "fmla z30.s, p3/M, z0.s, z14.s\n"
- "fmla z31.s, p3/M, z0.s, z11.s\n"
- "ld1w { z0.s }, p3/Z, [x10, #5, MUL VL]\n"
- "fmla z28.s, p3/M, z1.s, z8.s\n"
- "fmla z29.s, p3/M, z1.s, z13.s\n"
- "ld1w { z8.s }, p2/Z, [x9, x24, LSL #2]\n"
- "fmla z30.s, p3/M, z1.s, z11.s\n"
- "fmla z31.s, p3/M, z1.s, z12.s\n"
- "ld1w { z1.s }, p3/Z, [x10, #6, MUL VL]\n"
- "fmla z28.s, p3/M, z2.s, z13.s\n"
- "fmla z29.s, p3/M, z2.s, z5.s\n"
- "ld1w { z13.s }, p2/Z, [x9, x26, LSL #2]\n"
- "fmla z30.s, p3/M, z2.s, z12.s\n"
+ "movprfx z30, z16\n fmla z30.s, p3/M, z0.s, z6.s\n"
+ "ldr x5, [%x[params_struct], %[offsetof_args_tile_i]]\n"
+ "add x21, x5, #0x1\n"
+ "movprfx z29, z16\n fmla z29.s, p3/M, z0.s, z7.s\n"
+ "ldr x6, [%x[params_struct], %[offsetof_args_tile_j]]\n"
+ "movprfx z28, z16\n fmla z28.s, p3/M, z0.s, z8.s\n"
+ "ld1w { z0.s }, p3/Z, [x8]\n"
+ "add x6, x6, #0x1\n"
+ "fmla z31.s, p3/M, z1.s, z6.s\n"
+ "ld1w { z6.s }, p2/Z, [x20, x24, LSL #2]\n"
+ "fmla z30.s, p3/M, z1.s, z9.s\n"
+ "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
+ "fmla z29.s, p3/M, z1.s, z8.s\n"
+ "ldr x19, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
+ "cmp x6, x19\n"
+ "fmla z28.s, p3/M, z1.s, z13.s\n"
+ "ld1w { z1.s }, p3/Z, [x8, #1, MUL VL]\n"
"fmla z31.s, p3/M, z2.s, z9.s\n"
- "ld1w { z2.s }, p3/Z, [x10, #7, MUL VL]\n"
- "addvl x10, x10, #16\n"
- "fmla z28.s, p3/M, z3.s, z5.s\n"
- "fmla z29.s, p3/M, z3.s, z6.s\n"
- "ld1w { z5.s }, p2/Z, [x27]\n"
- "fmla z30.s, p3/M, z3.s, z9.s\n"
- "fmla z31.s, p3/M, z3.s, z13.s\n"
- "ld1w { z3.s }, p3/Z, [x10, #-8, MUL VL]\n"
- "fmla z28.s, p3/M, z4.s, z6.s\n"
- "fmla z29.s, p3/M, z4.s, z10.s\n"
- "ld1w { z6.s }, p2/Z, [x27, x17, LSL #2]\n"
- "ld1w { z10.s }, p2/Z, [x27, x15, LSL #2]\n"
- "fmla z30.s, p3/M, z4.s, z13.s\n"
- "fmla z31.s, p3/M, z4.s, z8.s\n"
- "ld1w { z4.s }, p3/Z, [x10, #-7, MUL VL]\n"
- "fmla z28.s, p3/M, z0.s, z14.s\n"
- "fmla z29.s, p3/M, z0.s, z11.s\n"
- "ld1w { z14.s }, p2/Z, [x27, x24, LSL #2]\n"
- "fmla z30.s, p3/M, z0.s, z5.s\n"
- "fmla z31.s, p3/M, z0.s, z6.s\n"
- "ld1w { z0.s }, p3/Z, [x10, #-6, MUL VL]\n"
- "fmla z28.s, p3/M, z1.s, z11.s\n"
- "fmla z29.s, p3/M, z1.s, z12.s\n"
- "ld1w { z11.s }, p2/Z, [x27, x28, LSL #2]\n"
- "fmla z30.s, p3/M, z1.s, z6.s\n"
- "fmla z31.s, p3/M, z1.s, z10.s\n"
- "ld1w { z1.s }, p3/Z, [x10, #-5, MUL VL]\n"
- "fmla z28.s, p3/M, z2.s, z12.s\n"
- "fmla z29.s, p3/M, z2.s, z9.s\n"
- "ld1w { z12.s }, p2/Z, [x27, x26, LSL #2]\n"
- "fmla z30.s, p3/M, z2.s, z10.s\n"
- "fmla z31.s, p3/M, z2.s, z11.s\n"
- "ld1w { z2.s }, p3/Z, [x10, #-4, MUL VL]\n"
- "fmla z28.s, p3/M, z3.s, z9.s\n"
- "fmla z29.s, p3/M, z3.s, z13.s\n"
- "ld1w { z9.s }, p2/Z, [x25]\n"
- "fmla z30.s, p3/M, z3.s, z11.s\n"
- "fmla z31.s, p3/M, z3.s, z12.s\n"
- "ld1w { z3.s }, p3/Z, [x10, #-3, MUL VL]\n"
- "fmla z28.s, p3/M, z4.s, z13.s\n"
- "fmla z29.s, p3/M, z4.s, z8.s\n"
- "ld1w { z13.s }, p2/Z, [x25, x17, LSL #2]\n"
- "ld1w { z8.s }, p2/Z, [x25, x26, LSL #2]\n"
- "fmla z30.s, p3/M, z4.s, z12.s\n"
- "fmla z31.s, p3/M, z4.s, z14.s\n"
- "ld1w { z4.s }, p3/Z, [x10, #-2, MUL VL]\n"
- "fmla z28.s, p3/M, z0.s, z5.s\n"
- "fmla z29.s, p3/M, z0.s, z6.s\n"
- "ld1w { z5.s }, p2/Z, [x25, x15, LSL #2]\n"
- "fmla z30.s, p3/M, z0.s, z9.s\n"
- "fmla z31.s, p3/M, z0.s, z13.s\n"
- "ld1w { z0.s }, p3/Z, [x10, #-1, MUL VL]\n"
- "fmla z28.s, p3/M, z1.s, z6.s\n"
- "fmla z29.s, p3/M, z1.s, z10.s\n"
- "ld1w { z6.s }, p2/Z, [x25, x28, LSL #2]\n"
+ "ld1w { z9.s }, p2/Z, [x13, x23, LSL #2]\n"
+ "csel x6, x6, XZR, LT\n"
+ "fmla z30.s, p3/M, z2.s, z11.s\n"
+ "csel x5, x5, x21, LT\n"
+ "fmla z29.s, p3/M, z2.s, z13.s\n"
+ "cmp x5, x20\n"
+ "fmla z28.s, p3/M, z2.s, z5.s\n"
+ "ld1w { z2.s }, p3/Z, [x8, #2, MUL VL]\n"
+ "fmla z31.s, p3/M, z3.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x10, x15, LSL #2]\n"
+ "fmla z30.s, p3/M, z3.s, z12.s\n"
+ "fmla z29.s, p3/M, z3.s, z5.s\n"
+ "fmla z28.s, p3/M, z3.s, z6.s\n"
+ "ld1w { z3.s }, p3/Z, [x8, #3, MUL VL]\n"
+ "fmla z31.s, p3/M, z4.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x10, x26, LSL #2]\n"
+ "fmla z30.s, p3/M, z4.s, z9.s\n"
+ "ld1w { z9.s }, p2/Z, [x10, x25, LSL #2]\n"
+ "fmla z29.s, p3/M, z4.s, z6.s\n"
+ "fmla z28.s, p3/M, z4.s, z10.s\n"
+ "ld1w { z4.s }, p3/Z, [x8, #4, MUL VL]\n"
+ "fmla z31.s, p3/M, z0.s, z7.s\n"
+ "fmla z30.s, p3/M, z0.s, z8.s\n"
+ "fmla z29.s, p3/M, z0.s, z14.s\n"
+ "fmla z28.s, p3/M, z0.s, z11.s\n"
+ "ld1w { z0.s }, p3/Z, [x8, #5, MUL VL]\n"
+ "fmla z31.s, p3/M, z1.s, z8.s\n"
+ "ld1w { z8.s }, p2/Z, [x10, x23, LSL #2]\n"
"fmla z30.s, p3/M, z1.s, z13.s\n"
- "fmla z31.s, p3/M, z1.s, z5.s\n"
- "ld1w { z1.s }, p3/Z, [x10]\n"
- "fmla z28.s, p3/M, z2.s, z10.s\n"
- "fmla z29.s, p3/M, z2.s, z11.s\n"
- "ld1w { z10.s }, p2/Z, [x25, x24, LSL #2]\n"
+ "fmla z29.s, p3/M, z1.s, z11.s\n"
+ "fmla z28.s, p3/M, z1.s, z12.s\n"
+ "ld1w { z1.s }, p3/Z, [x8, #6, MUL VL]\n"
+ "fmla z31.s, p3/M, z2.s, z13.s\n"
+ "ld1w { z13.s }, p2/Z, [x10, x24, LSL #2]\n"
"fmla z30.s, p3/M, z2.s, z5.s\n"
- "fmla z31.s, p3/M, z2.s, z6.s\n"
- "ld1w { z2.s }, p3/Z, [x10, #1, MUL VL]\n"
- "fmla z28.s, p3/M, z3.s, z11.s\n"
- "fmla z29.s, p3/M, z3.s, z12.s\n"
- "ld1w { z11.s }, p2/Z, [x23]\n"
+ "fmla z29.s, p3/M, z2.s, z12.s\n"
+ "fmla z28.s, p3/M, z2.s, z9.s\n"
+ "ld1w { z2.s }, p3/Z, [x8, #7, MUL VL]\n"
+ "addvl x8, x8, #16\n"
+ "fmla z31.s, p3/M, z3.s, z5.s\n"
+ "ld1w { z5.s }, p2/Z, [x9]\n"
"fmla z30.s, p3/M, z3.s, z6.s\n"
- "fmla z31.s, p3/M, z3.s, z8.s\n"
- "ld1w { z3.s }, p3/Z, [x10, #2, MUL VL]\n"
- "fmla z28.s, p3/M, z4.s, z12.s\n"
- "fmla z29.s, p3/M, z4.s, z14.s\n"
- "ld1w { z12.s }, p2/Z, [x23, x17, LSL #2]\n"
- "fmla z30.s, p3/M, z4.s, z8.s\n"
- "fmla z31.s, p3/M, z4.s, z10.s\n"
- "ld1w { z4.s }, p3/Z, [x10, #3, MUL VL]\n"
- "fmla z28.s, p3/M, z0.s, z9.s\n"
- "fmla z29.s, p3/M, z0.s, z13.s\n"
- "ld1w { z9.s }, p2/Z, [x23, x15, LSL #2]\n"
+ "fmla z29.s, p3/M, z3.s, z9.s\n"
+ "fmla z28.s, p3/M, z3.s, z13.s\n"
+ "ld1w { z3.s }, p3/Z, [x8, #-8, MUL VL]\n"
+ "fmla z31.s, p3/M, z4.s, z6.s\n"
+ "ld1w { z6.s }, p2/Z, [x9, x15, LSL #2]\n"
+ "fmla z30.s, p3/M, z4.s, z10.s\n"
+ "ld1w { z10.s }, p2/Z, [x9, x26, LSL #2]\n"
+ "fmla z29.s, p3/M, z4.s, z13.s\n"
+ "fmla z28.s, p3/M, z4.s, z8.s\n"
+ "ld1w { z4.s }, p3/Z, [x8, #-7, MUL VL]\n"
+ "fmla z31.s, p3/M, z0.s, z14.s\n"
+ "ld1w { z14.s }, p2/Z, [x9, x23, LSL #2]\n"
"fmla z30.s, p3/M, z0.s, z11.s\n"
- "fmla z31.s, p3/M, z0.s, z12.s\n"
- "ld1w { z11.s }, p2/Z, [x23, x28, LSL #2]\n"
- "fmla z28.s, p3/M, z1.s, z13.s\n"
- "fmla z29.s, p3/M, z1.s, z5.s\n"
+ "fmla z29.s, p3/M, z0.s, z5.s\n"
+ "fmla z28.s, p3/M, z0.s, z6.s\n"
+ "ld1w { z0.s }, p3/Z, [x8, #-6, MUL VL]\n"
+ "fmla z31.s, p3/M, z1.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x9, x25, LSL #2]\n"
"fmla z30.s, p3/M, z1.s, z12.s\n"
- "fmla z31.s, p3/M, z1.s, z9.s\n"
- "ld1w { z12.s }, p2/Z, [x23, x26, LSL #2]\n"
- "fmla z28.s, p3/M, z2.s, z5.s\n"
- "fmla z29.s, p3/M, z2.s, z6.s\n"
+ "fmla z29.s, p3/M, z1.s, z6.s\n"
+ "fmla z28.s, p3/M, z1.s, z10.s\n"
+ "ld1w { z1.s }, p3/Z, [x8, #-5, MUL VL]\n"
+ "fmla z31.s, p3/M, z2.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x9, x24, LSL #2]\n"
"fmla z30.s, p3/M, z2.s, z9.s\n"
- "fmla z31.s, p3/M, z2.s, z11.s\n"
- "ld1w { z9.s }, p2/Z, [x23, x24, LSL #2]\n"
- "fmla z28.s, p3/M, z3.s, z6.s\n"
- "fmla z29.s, p3/M, z3.s, z8.s\n"
- "fmla z30.s, p3/M, z3.s, z11.s\n"
- "fmla z31.s, p3/M, z3.s, z12.s\n"
- "fmla z28.s, p3/M, z4.s, z8.s\n"
- "fmla z29.s, p3/M, z4.s, z10.s\n"
- "fmax z28.s, p3/M, z28.s, z18.s\n"
- "fmax z29.s, p3/M, z29.s, z18.s\n"
- "fmla z30.s, p3/M, z4.s, z12.s\n"
- "fmla z31.s, p3/M, z4.s, z9.s\n"
- "fmax z30.s, p3/M, z30.s, z18.s\n"
+ "fmla z29.s, p3/M, z2.s, z10.s\n"
+ "fmla z28.s, p3/M, z2.s, z11.s\n"
+ "ld1w { z2.s }, p3/Z, [x8, #-4, MUL VL]\n"
+ "fmla z31.s, p3/M, z3.s, z9.s\n"
+ "ld1w { z9.s }, p2/Z, [x28]\n"
+ "fmla z30.s, p3/M, z3.s, z13.s\n"
+ "fmla z29.s, p3/M, z3.s, z11.s\n"
+ "fmla z28.s, p3/M, z3.s, z12.s\n"
+ "ld1w { z3.s }, p3/Z, [x8, #-3, MUL VL]\n"
+ "fmla z31.s, p3/M, z4.s, z13.s\n"
+ "ld1w { z13.s }, p2/Z, [x28, x15, LSL #2]\n"
+ "fmla z30.s, p3/M, z4.s, z8.s\n"
+ "ld1w { z8.s }, p2/Z, [x28, x24, LSL #2]\n"
+ "fmla z29.s, p3/M, z4.s, z12.s\n"
+ "fmla z28.s, p3/M, z4.s, z14.s\n"
+ "ld1w { z4.s }, p3/Z, [x8, #-2, MUL VL]\n"
+ "fmla z31.s, p3/M, z0.s, z5.s\n"
+ "ld1w { z5.s }, p2/Z, [x28, x26, LSL #2]\n"
+ "fmla z30.s, p3/M, z0.s, z6.s\n"
+ "fmla z29.s, p3/M, z0.s, z9.s\n"
+ "fmla z28.s, p3/M, z0.s, z13.s\n"
+ "ld1w { z0.s }, p3/Z, [x8, #-1, MUL VL]\n"
+ "fmla z31.s, p3/M, z1.s, z6.s\n"
+ "ld1w { z6.s }, p2/Z, [x28, x25, LSL #2]\n"
+ "fmla z30.s, p3/M, z1.s, z10.s\n"
+ "fmla z29.s, p3/M, z1.s, z13.s\n"
+ "fmla z28.s, p3/M, z1.s, z5.s\n"
+ "ld1w { z1.s }, p3/Z, [x8]\n"
+ "fmla z31.s, p3/M, z2.s, z10.s\n"
+ "ld1w { z10.s }, p2/Z, [x28, x23, LSL #2]\n"
+ "fmla z30.s, p3/M, z2.s, z11.s\n"
+ "fmla z29.s, p3/M, z2.s, z5.s\n"
+ "fmla z28.s, p3/M, z2.s, z6.s\n"
+ "ld1w { z2.s }, p3/Z, [x8, #1, MUL VL]\n"
+ "fmla z31.s, p3/M, z3.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x27]\n"
+ "fmla z30.s, p3/M, z3.s, z12.s\n"
+ "fmla z29.s, p3/M, z3.s, z6.s\n"
+ "fmla z28.s, p3/M, z3.s, z8.s\n"
+ "ld1w { z3.s }, p3/Z, [x8, #2, MUL VL]\n"
+ "fmla z31.s, p3/M, z4.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x27, x15, LSL #2]\n"
+ "fmla z30.s, p3/M, z4.s, z14.s\n"
+ "fmla z29.s, p3/M, z4.s, z8.s\n"
+ "fmla z28.s, p3/M, z4.s, z10.s\n"
+ "ld1w { z4.s }, p3/Z, [x8, #3, MUL VL]\n"
+ "fmla z31.s, p3/M, z0.s, z9.s\n"
+ "ld1w { z9.s }, p2/Z, [x27, x26, LSL #2]\n"
+ "fmla z30.s, p3/M, z0.s, z13.s\n"
+ "fmla z29.s, p3/M, z0.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x27, x25, LSL #2]\n"
+ "fmla z28.s, p3/M, z0.s, z12.s\n"
+ "fmla z31.s, p3/M, z1.s, z13.s\n"
+ "fmla z30.s, p3/M, z1.s, z5.s\n"
+ "fmla z29.s, p3/M, z1.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x27, x24, LSL #2]\n"
+ "fmla z28.s, p3/M, z1.s, z9.s\n"
+ "fmla z31.s, p3/M, z2.s, z5.s\n"
+ "fmla z30.s, p3/M, z2.s, z6.s\n"
+ "fmla z29.s, p3/M, z2.s, z9.s\n"
+ "ld1w { z9.s }, p2/Z, [x27, x23, LSL #2]\n"
+ "fmla z28.s, p3/M, z2.s, z11.s\n"
+ "fmla z31.s, p3/M, z3.s, z6.s\n"
+ "fmla z30.s, p3/M, z3.s, z8.s\n"
+ "fmla z29.s, p3/M, z3.s, z11.s\n"
+ "fmla z28.s, p3/M, z3.s, z12.s\n"
+ "fmla z31.s, p3/M, z4.s, z8.s\n"
+ "fmla z30.s, p3/M, z4.s, z10.s\n"
+ "fmla z29.s, p3/M, z4.s, z12.s\n"
+ "fmla z28.s, p3/M, z4.s, z9.s\n"
"fmax z31.s, p3/M, z31.s, z18.s\n"
- "fmin z28.s, p3/M, z28.s, z17.s\n"
- "fmin z29.s, p3/M, z29.s, z17.s\n"
- "st1w { z28.s }, p0, [x13]\n"
- "fmin z30.s, p3/M, z30.s, z17.s\n"
+ "fmax z30.s, p3/M, z30.s, z18.s\n"
+ "fmax z29.s, p3/M, z29.s, z18.s\n"
+ "fmax z28.s, p3/M, z28.s, z18.s\n"
"fmin z31.s, p3/M, z31.s, z17.s\n"
- "st1w { z29.s }, p0, [x13, x16, LSL #2]\n"
- "st1w { z30.s }, p0, [x22]\n"
- "st1w { z31.s }, p0, [x22, x16, LSL #2]\n"
+ "st1w { z31.s }, p0, [x11]\n"
+ "fmin z30.s, p3/M, z30.s, z17.s\n"
+ "fmin z29.s, p3/M, z29.s, z17.s\n"
+ "st1w { z30.s }, p0, [x11, x12, LSL #2]\n"
+ "fmin z28.s, p3/M, z28.s, z17.s\n"
+ "st1w { z29.s }, p0, [x22]\n"
+ "st1w { z28.s }, p0, [x22, x12, LSL #2]\n"
"blt 1b\n"
:
: [n_channels] "r" ((unsigned long) n_channels), [offsetof_args_inptr] "I" (offsetof(Args, inptr)), [offsetof_args_ld_input_col] "I" (offsetof(Args, ld_input_col)), [offsetof_args_ld_input_row] "I" (offsetof(Args, ld_input_row)), [offsetof_args_ld_output_col] "I" (offsetof(Args, ld_output_col)), [offsetof_args_ld_output_row] "I" (offsetof(Args, ld_output_row)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_n_tile_cols] "I" (offsetof(Args, n_tile_cols)), [offsetof_args_n_tile_rows] "I" (offsetof(Args, n_tile_rows)), [offsetof_args_outptr] "I" (offsetof(Args, outptr)), [offsetof_args_params] "I" (offsetof(Args, params)), [offsetof_args_tile_i] "I" (offsetof(Args, tile_i)), [offsetof_args_tile_j] "I" (offsetof(Args, tile_j)), [params_struct] "r" (&params_struct)
- : "cc", "memory", "p0", "p1", "p2", "p3", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z16", "z17", "z18", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z16", "z17", "z18", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst/generic_indirect.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst/generic_indirect.cpp
index cb70bd2b6f..070270764c 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst/generic_indirect.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst/generic_indirect.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -98,450 +98,450 @@ void sve_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst_indirect_impl(
activation_min, activation_max);
__asm__ __volatile__(
- "ldr x20, [%x[params_struct], %[offsetof_args_outptrs]]\n"
- "add x16, %x[params_struct], %[offsetof_Args_inptrs]\n"
- "ldp x15, x14, [x20, #0x0]\n"
+ "ldr x19, [%x[params_struct], %[offsetof_args_outptrs]]\n"
+ "ptrue p3.b\n"
+ "ldr x15, [%x[params_struct], %[offsetof_args_params]]\n"
+ "add x14, %x[params_struct], %[offsetof_Args_inptrs]\n"
+ "ld1rw { z18.s }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
"mov x13, #0x0\n"
- "ldp x12, x11, [x20, #0x10]\n"
- "whilelt p3.s, XZR, %x[n_channels]\n"
- "ldp x10, x9, [x16, #0x0]\n"
- "cntw x28\n"
- "ptrue p2.b\n"
- "ldr x27, [%x[params_struct], %[offsetof_args_params]]\n"
- "ld1w { z5.s }, p3/Z, [x10, x13, LSL #2]\n"
- "cmp x28, %x[n_channels]\n"
- "ld1w { z6.s }, p3/Z, [x9, x13, LSL #2]\n"
- "ldp x26, x25, [x16, #0x10]\n"
- "sub x24, XZR, x28\n"
- "ldp x23, x22, [x16, #0x20]\n"
- "ldp x21, x20, [x16, #0x30]\n"
- "ldp x10, x9, [x16, #0x40]\n"
- "ld1rw { z18.s }, p2/Z, [%x[params_struct], %[offsetof_args_min]]\n"
- "ld1rw { z17.s }, p2/Z, [%x[params_struct], %[offsetof_args_max]]\n"
- "ld1w { z16.s }, p2/Z, [x27]\n"
- "ld1w { z0.s }, p2/Z, [x27, #1, MUL VL]\n"
- "ld1w { z1.s }, p2/Z, [x27, #2, MUL VL]\n"
- "ld1w { z2.s }, p2/Z, [x27, #3, MUL VL]\n"
- "ld1w { z3.s }, p2/Z, [x27, #4, MUL VL]\n"
- "ld1w { z4.s }, p2/Z, [x27, #5, MUL VL]\n"
- "ld1w { z7.s }, p3/Z, [x26, x13, LSL #2]\n"
- "addvl x27, x27, #6\n"
- "ld1w { z8.s }, p3/Z, [x25, x13, LSL #2]\n"
- "ld1w { z9.s }, p3/Z, [x23, x13, LSL #2]\n"
- "ld1w { z13.s }, p3/Z, [x22, x13, LSL #2]\n"
- "ld1w { z11.s }, p3/Z, [x21, x13, LSL #2]\n"
- "ld1w { z12.s }, p3/Z, [x20, x13, LSL #2]\n"
- "ld1w { z10.s }, p3/Z, [x10, x13, LSL #2]\n"
- "ld1w { z14.s }, p3/Z, [x9, x13, LSL #2]\n"
+ "ld1rw { z17.s }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
+ "cntw x12\n"
+ "ldp x11, x10, [x19, #0x0]\n"
+ "sub x9, XZR, x12\n"
+ "ldp x28, x27, [x19, #0x10]\n"
+ "whilelt p2.s, XZR, %x[n_channels]\n"
+ "ld1w { z16.s }, p3/Z, [x15]\n"
+ "cmp x12, %x[n_channels]\n"
+ "ld1w { z0.s }, p3/Z, [x15, #1, MUL VL]\n"
+ "ld1w { z1.s }, p3/Z, [x15, #2, MUL VL]\n"
+ "ld1w { z2.s }, p3/Z, [x15, #3, MUL VL]\n"
+ "ld1w { z3.s }, p3/Z, [x15, #4, MUL VL]\n"
+ "ld1w { z4.s }, p3/Z, [x15, #5, MUL VL]\n"
+ "addvl x15, x15, #6\n"
+ "ldp x26, x25, [x14, #0x0]\n"
+ "ldp x24, x23, [x14, #0x10]\n"
+ "ldp x22, x21, [x14, #0x20]\n"
+ "ld1w { z5.s }, p2/Z, [x26, x13, LSL #2]\n"
+ "ld1w { z6.s }, p2/Z, [x25, x13, LSL #2]\n"
+ "ld1w { z7.s }, p2/Z, [x24, x13, LSL #2]\n"
+ "ld1w { z8.s }, p2/Z, [x23, x13, LSL #2]\n"
+ "ld1w { z9.s }, p2/Z, [x22, x13, LSL #2]\n"
+ "ld1w { z13.s }, p2/Z, [x21, x13, LSL #2]\n"
+ "ldp x20, x19, [x14, #0x30]\n"
+ "ldp x26, x25, [x14, #0x40]\n"
+ "ld1w { z11.s }, p2/Z, [x20, x13, LSL #2]\n"
+ "ld1w { z12.s }, p2/Z, [x19, x13, LSL #2]\n"
+ "ld1w { z10.s }, p2/Z, [x26, x13, LSL #2]\n"
+ "ld1w { z14.s }, p2/Z, [x25, x13, LSL #2]\n"
"bge 2f\n"
"1:" // Channel loop
- "movprfx z28, z16\n fmla z28.s, p2/M, z0.s, z5.s\n"
- "movprfx z29, z16\n fmla z29.s, p2/M, z0.s, z6.s\n"
- "ldr x26, [x16, #0x50]\n"
- "ld1w { z5.s }, p3/Z, [x26, x13, LSL #2]\n"
- "movprfx z30, z16\n fmla z30.s, p2/M, z0.s, z7.s\n"
- "movprfx z31, z16\n fmla z31.s, p2/M, z0.s, z8.s\n"
- "ldr x25, [x16, #0x58]\n"
- "ldr x23, [x16, #0x60]\n"
- "fmla z28.s, p2/M, z1.s, z6.s\n"
- "fmla z29.s, p2/M, z1.s, z9.s\n"
- "ld1w { z6.s }, p3/Z, [x25, x13, LSL #2]\n"
- "ldr x22, [x16, #0x68]\n"
- "fmla z30.s, p2/M, z1.s, z8.s\n"
- "fmla z31.s, p2/M, z1.s, z13.s\n"
- "ld1w { z0.s }, p2/Z, [x27]\n"
- "ldr x21, [x16, #0x70]\n"
- "fmla z28.s, p2/M, z2.s, z9.s\n"
- "fmla z29.s, p2/M, z2.s, z11.s\n"
- "ld1w { z9.s }, p3/Z, [x23, x13, LSL #2]\n"
- "ld1w { z1.s }, p2/Z, [x27, #1, MUL VL]\n"
- "fmla z30.s, p2/M, z2.s, z13.s\n"
- "fmla z31.s, p2/M, z2.s, z5.s\n"
- "ldr x20, [x16, #0x78]\n"
- "ld1w { z2.s }, p2/Z, [x27, #2, MUL VL]\n"
- "fmla z28.s, p2/M, z3.s, z11.s\n"
- "fmla z29.s, p2/M, z3.s, z12.s\n"
- "ld1w { z11.s }, p3/Z, [x22, x13, LSL #2]\n"
- "ldr x10, [x16, #0x80]\n"
- "fmla z30.s, p2/M, z3.s, z5.s\n"
- "fmla z31.s, p2/M, z3.s, z6.s\n"
- "ld1w { z3.s }, p2/Z, [x27, #3, MUL VL]\n"
- "ldr x9, [x16, #0x88]\n"
- "fmla z28.s, p2/M, z4.s, z12.s\n"
- "fmla z29.s, p2/M, z4.s, z9.s\n"
- "ld1w { z12.s }, p3/Z, [x21, x13, LSL #2]\n"
- "ld1w { z9.s }, p3/Z, [x20, x13, LSL #2]\n"
- "fmla z30.s, p2/M, z4.s, z6.s\n"
- "fmla z31.s, p2/M, z4.s, z10.s\n"
- "ld1w { z4.s }, p2/Z, [x27, #4, MUL VL]\n"
- "ldr x26, [x16, #0x90]\n"
- "fmla z28.s, p2/M, z0.s, z7.s\n"
- "fmla z29.s, p2/M, z0.s, z8.s\n"
- "ldr x25, [x16, #0x98]\n"
- "ldr x23, [x16, #0xa0]\n"
- "fmla z30.s, p2/M, z0.s, z14.s\n"
- "fmla z31.s, p2/M, z0.s, z11.s\n"
- "ld1w { z0.s }, p2/Z, [x27, #5, MUL VL]\n"
- "ldr x22, [x16, #0xa8]\n"
- "fmla z28.s, p2/M, z1.s, z8.s\n"
- "fmla z29.s, p2/M, z1.s, z13.s\n"
- "ld1w { z8.s }, p3/Z, [x9, x13, LSL #2]\n"
- "ldr x21, [x16, #0xb0]\n"
- "fmla z30.s, p2/M, z1.s, z11.s\n"
- "fmla z31.s, p2/M, z1.s, z12.s\n"
- "ld1w { z1.s }, p2/Z, [x27, #6, MUL VL]\n"
- "ldr x20, [x16, #0xb8]\n"
- "fmla z28.s, p2/M, z2.s, z13.s\n"
- "fmla z29.s, p2/M, z2.s, z5.s\n"
- "ld1w { z13.s }, p3/Z, [x10, x13, LSL #2]\n"
- "ldr x10, [x16, #0xc0]\n"
- "fmla z30.s, p2/M, z2.s, z12.s\n"
- "fmla z31.s, p2/M, z2.s, z9.s\n"
- "ld1w { z2.s }, p2/Z, [x27, #7, MUL VL]\n"
- "addvl x27, x27, #16\n"
- "fmla z28.s, p2/M, z3.s, z5.s\n"
- "fmla z29.s, p2/M, z3.s, z6.s\n"
- "ld1w { z5.s }, p3/Z, [x26, x13, LSL #2]\n"
- "ldr x9, [x16, #0xc8]\n"
- "fmla z30.s, p2/M, z3.s, z9.s\n"
- "fmla z31.s, p2/M, z3.s, z13.s\n"
- "ld1w { z3.s }, p2/Z, [x27, #-8, MUL VL]\n"
- "ldr x26, [x16, #0xd0]\n"
- "fmla z28.s, p2/M, z4.s, z6.s\n"
- "fmla z29.s, p2/M, z4.s, z10.s\n"
- "ld1w { z6.s }, p3/Z, [x25, x13, LSL #2]\n"
- "ld1w { z10.s }, p3/Z, [x23, x13, LSL #2]\n"
- "fmla z30.s, p2/M, z4.s, z13.s\n"
- "fmla z31.s, p2/M, z4.s, z8.s\n"
- "ld1w { z4.s }, p2/Z, [x27, #-7, MUL VL]\n"
- "ldr x25, [x16, #0xd8]\n"
- "fmla z28.s, p2/M, z0.s, z14.s\n"
- "fmla z29.s, p2/M, z0.s, z11.s\n"
- "ld1w { z14.s }, p3/Z, [x20, x13, LSL #2]\n"
- "ldr x23, [x16, #0xe0]\n"
- "fmla z30.s, p2/M, z0.s, z5.s\n"
- "fmla z31.s, p2/M, z0.s, z6.s\n"
- "ld1w { z0.s }, p2/Z, [x27, #-6, MUL VL]\n"
- "ldr x20, [x16, #0xf8]\n"
- "fmla z28.s, p2/M, z1.s, z11.s\n"
- "fmla z29.s, p2/M, z1.s, z12.s\n"
- "ld1w { z11.s }, p3/Z, [x22, x13, LSL #2]\n"
- "ldr x22, [x16, #0xe8]\n"
- "fmla z30.s, p2/M, z1.s, z6.s\n"
- "fmla z31.s, p2/M, z1.s, z10.s\n"
- "ld1w { z1.s }, p2/Z, [x27, #-5, MUL VL]\n"
- "whilelt p1.s, x28, %x[n_channels]\n"
- "fmla z28.s, p2/M, z2.s, z12.s\n"
- "fmla z29.s, p2/M, z2.s, z9.s\n"
- "ld1w { z12.s }, p3/Z, [x21, x13, LSL #2]\n"
- "ldr x21, [x16, #0xf0]\n"
- "fmla z30.s, p2/M, z2.s, z10.s\n"
- "fmla z31.s, p2/M, z2.s, z11.s\n"
- "ld1w { z2.s }, p2/Z, [x27, #-4, MUL VL]\n"
- "incw x24\n"
- "fmla z28.s, p2/M, z3.s, z9.s\n"
- "fmla z29.s, p2/M, z3.s, z13.s\n"
- "ld1w { z9.s }, p3/Z, [x10, x13, LSL #2]\n"
- "ldr x10, [x16, #0x100]\n"
- "fmla z30.s, p2/M, z3.s, z11.s\n"
- "fmla z31.s, p2/M, z3.s, z12.s\n"
- "ld1w { z3.s }, p2/Z, [x27, #-3, MUL VL]\n"
- "mov p0.b, p3.b\n"
- "fmla z28.s, p2/M, z4.s, z13.s\n"
- "fmla z29.s, p2/M, z4.s, z8.s\n"
- "ld1w { z13.s }, p3/Z, [x9, x13, LSL #2]\n"
- "ld1w { z8.s }, p3/Z, [x23, x13, LSL #2]\n"
- "fmla z30.s, p2/M, z4.s, z12.s\n"
- "fmla z31.s, p2/M, z4.s, z14.s\n"
- "ld1w { z4.s }, p2/Z, [x27, #-2, MUL VL]\n"
- "ldr x9, [x16, #0x108]\n"
- "fmla z28.s, p2/M, z0.s, z5.s\n"
- "fmla z29.s, p2/M, z0.s, z6.s\n"
- "ld1w { z5.s }, p3/Z, [x26, x13, LSL #2]\n"
- "ldr x26, [x16, #0x110]\n"
- "fmla z30.s, p2/M, z0.s, z9.s\n"
- "fmla z31.s, p2/M, z0.s, z13.s\n"
- "ld1w { z0.s }, p2/Z, [x27, #-1, MUL VL]\n"
- "ld1w { z16.s }, p2/Z, [x27, #4, MUL VL]\n"
- "fmla z28.s, p2/M, z1.s, z6.s\n"
- "fmla z29.s, p2/M, z1.s, z10.s\n"
- "ld1w { z6.s }, p3/Z, [x25, x13, LSL #2]\n"
- "ldr x25, [x16, #0x118]\n"
- "fmla z30.s, p2/M, z1.s, z13.s\n"
- "fmla z31.s, p2/M, z1.s, z5.s\n"
- "ld1w { z1.s }, p2/Z, [x27]\n"
- "fmla z28.s, p2/M, z2.s, z10.s\n"
- "fmla z29.s, p2/M, z2.s, z11.s\n"
- "ld1w { z10.s }, p3/Z, [x22, x13, LSL #2]\n"
- "fmla z30.s, p2/M, z2.s, z5.s\n"
- "fmla z31.s, p2/M, z2.s, z6.s\n"
- "ld1w { z2.s }, p2/Z, [x27, #1, MUL VL]\n"
- "fmla z28.s, p2/M, z3.s, z11.s\n"
- "fmla z29.s, p2/M, z3.s, z12.s\n"
- "ld1w { z11.s }, p3/Z, [x21, x13, LSL #2]\n"
- "fmla z30.s, p2/M, z3.s, z6.s\n"
- "fmla z31.s, p2/M, z3.s, z8.s\n"
- "ld1w { z3.s }, p2/Z, [x27, #2, MUL VL]\n"
- "fmla z28.s, p2/M, z4.s, z12.s\n"
- "fmla z29.s, p2/M, z4.s, z14.s\n"
- "ld1w { z12.s }, p3/Z, [x20, x13, LSL #2]\n"
- "fmla z30.s, p2/M, z4.s, z8.s\n"
- "fmla z31.s, p2/M, z4.s, z10.s\n"
- "ld1w { z4.s }, p2/Z, [x27, #3, MUL VL]\n"
- "fmla z28.s, p2/M, z0.s, z9.s\n"
- "fmla z29.s, p2/M, z0.s, z13.s\n"
- "ld1w { z9.s }, p3/Z, [x10, x13, LSL #2]\n"
- "fmla z30.s, p2/M, z0.s, z11.s\n"
- "fmla z31.s, p2/M, z0.s, z12.s\n"
- "ld1w { z11.s }, p3/Z, [x9, x13, LSL #2]\n"
- "ldp x10, x9, [x16, #0x0]\n"
- "fmla z28.s, p2/M, z1.s, z13.s\n"
- "fmla z29.s, p2/M, z1.s, z5.s\n"
- "ld1w { z0.s }, p2/Z, [x27, #5, MUL VL]\n"
- "fmla z30.s, p2/M, z1.s, z12.s\n"
- "fmla z31.s, p2/M, z1.s, z9.s\n"
- "ld1w { z12.s }, p3/Z, [x26, x13, LSL #2]\n"
- "ld1w { z1.s }, p2/Z, [x27, #6, MUL VL]\n"
- "fmla z28.s, p2/M, z2.s, z5.s\n"
- "fmla z29.s, p2/M, z2.s, z6.s\n"
- "ld1w { z5.s }, p1/Z, [x10, x28, LSL #2]\n"
- "fmla z30.s, p2/M, z2.s, z9.s\n"
- "fmla z31.s, p2/M, z2.s, z11.s\n"
- "ld1w { z9.s }, p3/Z, [x25, x13, LSL #2]\n"
- "ldp x26, x25, [x16, #0x10]\n"
- "fmla z28.s, p2/M, z3.s, z6.s\n"
- "fmla z29.s, p2/M, z3.s, z8.s\n"
- "ld1w { z6.s }, p1/Z, [x9, x28, LSL #2]\n"
- "ldp x23, x22, [x16, #0x20]\n"
- "fmla z30.s, p2/M, z3.s, z11.s\n"
- "fmla z31.s, p2/M, z3.s, z12.s\n"
- "ldp x21, x20, [x16, #0x30]\n"
- "ldp x10, x9, [x16, #0x40]\n"
- "fmla z28.s, p2/M, z4.s, z8.s\n"
- "fmla z29.s, p2/M, z4.s, z10.s\n"
+ "movprfx z31, z16\n fmla z31.s, p3/M, z0.s, z5.s\n"
+ "ldr x24, [x14, #0x50]\n"
+ "whilelt p1.s, x12, %x[n_channels]\n"
+ "movprfx z30, z16\n fmla z30.s, p3/M, z0.s, z6.s\n"
+ "ldr x23, [x14, #0x58]\n"
+ "incw x9\n"
+ "movprfx z29, z16\n fmla z29.s, p3/M, z0.s, z7.s\n"
+ "ldr x22, [x14, #0x60]\n"
+ "mov p0.b, p2.b\n"
+ "movprfx z28, z16\n fmla z28.s, p3/M, z0.s, z8.s\n"
+ "ld1w { z5.s }, p2/Z, [x24, x13, LSL #2]\n"
+ "ld1w { z0.s }, p3/Z, [x15]\n"
+ "fmla z31.s, p3/M, z1.s, z6.s\n"
+ "ld1w { z6.s }, p2/Z, [x23, x13, LSL #2]\n"
+ "fmla z30.s, p3/M, z1.s, z9.s\n"
+ "ldr x21, [x14, #0x68]\n"
+ "fmla z29.s, p3/M, z1.s, z8.s\n"
+ "ldr x20, [x14, #0x70]\n"
+ "fmla z28.s, p3/M, z1.s, z13.s\n"
+ "ld1w { z1.s }, p3/Z, [x15, #1, MUL VL]\n"
+ "fmla z31.s, p3/M, z2.s, z9.s\n"
+ "ld1w { z9.s }, p2/Z, [x22, x13, LSL #2]\n"
+ "fmla z30.s, p3/M, z2.s, z11.s\n"
+ "ldr x19, [x14, #0x78]\n"
+ "fmla z29.s, p3/M, z2.s, z13.s\n"
+ "ldr x26, [x14, #0x80]\n"
+ "fmla z28.s, p3/M, z2.s, z5.s\n"
+ "ld1w { z2.s }, p3/Z, [x15, #2, MUL VL]\n"
+ "fmla z31.s, p3/M, z3.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x21, x13, LSL #2]\n"
+ "fmla z30.s, p3/M, z3.s, z12.s\n"
+ "ldr x25, [x14, #0x88]\n"
+ "fmla z29.s, p3/M, z3.s, z5.s\n"
+ "ldr x24, [x14, #0x90]\n"
+ "fmla z28.s, p3/M, z3.s, z6.s\n"
+ "ld1w { z3.s }, p3/Z, [x15, #3, MUL VL]\n"
+ "fmla z31.s, p3/M, z4.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x20, x13, LSL #2]\n"
+ "fmla z30.s, p3/M, z4.s, z9.s\n"
+ "ld1w { z9.s }, p2/Z, [x19, x13, LSL #2]\n"
+ "fmla z29.s, p3/M, z4.s, z6.s\n"
+ "ldr x23, [x14, #0x98]\n"
+ "fmla z28.s, p3/M, z4.s, z10.s\n"
+ "ld1w { z4.s }, p3/Z, [x15, #4, MUL VL]\n"
+ "fmla z31.s, p3/M, z0.s, z7.s\n"
+ "ldr x22, [x14, #0xa0]\n"
+ "fmla z30.s, p3/M, z0.s, z8.s\n"
+ "ldr x21, [x14, #0xa8]\n"
+ "fmla z29.s, p3/M, z0.s, z14.s\n"
+ "ldr x20, [x14, #0xb0]\n"
+ "fmla z28.s, p3/M, z0.s, z11.s\n"
+ "ld1w { z0.s }, p3/Z, [x15, #5, MUL VL]\n"
+ "fmla z31.s, p3/M, z1.s, z8.s\n"
+ "ld1w { z8.s }, p2/Z, [x25, x13, LSL #2]\n"
+ "fmla z30.s, p3/M, z1.s, z13.s\n"
+ "ldr x19, [x14, #0xb8]\n"
+ "fmla z29.s, p3/M, z1.s, z11.s\n"
+ "ldr x25, [x14, #0xc8]\n"
+ "fmla z28.s, p3/M, z1.s, z12.s\n"
+ "ld1w { z1.s }, p3/Z, [x15, #6, MUL VL]\n"
+ "fmla z31.s, p3/M, z2.s, z13.s\n"
+ "ld1w { z13.s }, p2/Z, [x26, x13, LSL #2]\n"
+ "fmla z30.s, p3/M, z2.s, z5.s\n"
+ "ldr x26, [x14, #0xc0]\n"
+ "fmla z29.s, p3/M, z2.s, z12.s\n"
+ "fmla z28.s, p3/M, z2.s, z9.s\n"
+ "ld1w { z2.s }, p3/Z, [x15, #7, MUL VL]\n"
+ "addvl x15, x15, #16\n"
+ "fmla z31.s, p3/M, z3.s, z5.s\n"
+ "ld1w { z5.s }, p2/Z, [x24, x13, LSL #2]\n"
+ "ldr x24, [x14, #0xd0]\n"
+ "fmla z30.s, p3/M, z3.s, z6.s\n"
+ "ld1w { z16.s }, p3/Z, [x15, #4, MUL VL]\n"
+ "fmla z29.s, p3/M, z3.s, z9.s\n"
+ "fmla z28.s, p3/M, z3.s, z13.s\n"
+ "ld1w { z3.s }, p3/Z, [x15, #-8, MUL VL]\n"
+ "fmla z31.s, p3/M, z4.s, z6.s\n"
+ "ld1w { z6.s }, p2/Z, [x23, x13, LSL #2]\n"
+ "ldr x23, [x14, #0xd8]\n"
+ "fmla z30.s, p3/M, z4.s, z10.s\n"
+ "ld1w { z10.s }, p2/Z, [x22, x13, LSL #2]\n"
+ "fmla z29.s, p3/M, z4.s, z13.s\n"
+ "ldr x22, [x14, #0xe0]\n"
+ "fmla z28.s, p3/M, z4.s, z8.s\n"
+ "ld1w { z4.s }, p3/Z, [x15, #-7, MUL VL]\n"
+ "fmla z31.s, p3/M, z0.s, z14.s\n"
+ "ld1w { z14.s }, p2/Z, [x19, x13, LSL #2]\n"
+ "fmla z30.s, p3/M, z0.s, z11.s\n"
+ "ldr x19, [x14, #0xf8]\n"
+ "fmla z29.s, p3/M, z0.s, z5.s\n"
+ "fmla z28.s, p3/M, z0.s, z6.s\n"
+ "ld1w { z0.s }, p3/Z, [x15, #-6, MUL VL]\n"
+ "fmla z31.s, p3/M, z1.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x21, x13, LSL #2]\n"
+ "ldr x21, [x14, #0xe8]\n"
+ "fmla z30.s, p3/M, z1.s, z12.s\n"
+ "fmla z29.s, p3/M, z1.s, z6.s\n"
+ "fmla z28.s, p3/M, z1.s, z10.s\n"
+ "ld1w { z1.s }, p3/Z, [x15, #-5, MUL VL]\n"
+ "fmla z31.s, p3/M, z2.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x20, x13, LSL #2]\n"
+ "ldr x20, [x14, #0xf0]\n"
+ "fmla z30.s, p3/M, z2.s, z9.s\n"
+ "fmla z29.s, p3/M, z2.s, z10.s\n"
+ "fmla z28.s, p3/M, z2.s, z11.s\n"
+ "ld1w { z2.s }, p3/Z, [x15, #-4, MUL VL]\n"
+ "fmla z31.s, p3/M, z3.s, z9.s\n"
+ "ld1w { z9.s }, p2/Z, [x26, x13, LSL #2]\n"
+ "ldr x26, [x14, #0x100]\n"
+ "fmla z30.s, p3/M, z3.s, z13.s\n"
+ "fmla z29.s, p3/M, z3.s, z11.s\n"
+ "fmla z28.s, p3/M, z3.s, z12.s\n"
+ "ld1w { z3.s }, p3/Z, [x15, #-3, MUL VL]\n"
+ "fmla z31.s, p3/M, z4.s, z13.s\n"
+ "ld1w { z13.s }, p2/Z, [x25, x13, LSL #2]\n"
+ "ldr x25, [x14, #0x108]\n"
+ "fmla z30.s, p3/M, z4.s, z8.s\n"
+ "ld1w { z8.s }, p2/Z, [x22, x13, LSL #2]\n"
+ "fmla z29.s, p3/M, z4.s, z12.s\n"
+ "fmla z28.s, p3/M, z4.s, z14.s\n"
+ "ld1w { z4.s }, p3/Z, [x15, #-2, MUL VL]\n"
+ "fmla z31.s, p3/M, z0.s, z5.s\n"
+ "ld1w { z5.s }, p2/Z, [x24, x13, LSL #2]\n"
+ "ldr x24, [x14, #0x110]\n"
+ "fmla z30.s, p3/M, z0.s, z6.s\n"
+ "fmla z29.s, p3/M, z0.s, z9.s\n"
+ "fmla z28.s, p3/M, z0.s, z13.s\n"
+ "ld1w { z0.s }, p3/Z, [x15, #-1, MUL VL]\n"
+ "fmla z31.s, p3/M, z1.s, z6.s\n"
+ "ld1w { z6.s }, p2/Z, [x23, x13, LSL #2]\n"
+ "ldr x23, [x14, #0x118]\n"
+ "fmla z30.s, p3/M, z1.s, z10.s\n"
+ "fmla z29.s, p3/M, z1.s, z13.s\n"
+ "fmla z28.s, p3/M, z1.s, z5.s\n"
+ "ld1w { z1.s }, p3/Z, [x15]\n"
+ "fmla z31.s, p3/M, z2.s, z10.s\n"
+ "ld1w { z10.s }, p2/Z, [x21, x13, LSL #2]\n"
+ "fmla z30.s, p3/M, z2.s, z11.s\n"
+ "fmla z29.s, p3/M, z2.s, z5.s\n"
+ "fmla z28.s, p3/M, z2.s, z6.s\n"
+ "ld1w { z2.s }, p3/Z, [x15, #1, MUL VL]\n"
+ "fmla z31.s, p3/M, z3.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x20, x13, LSL #2]\n"
+ "fmla z30.s, p3/M, z3.s, z12.s\n"
+ "fmla z29.s, p3/M, z3.s, z6.s\n"
+ "fmla z28.s, p3/M, z3.s, z8.s\n"
+ "ld1w { z3.s }, p3/Z, [x15, #2, MUL VL]\n"
+ "fmla z31.s, p3/M, z4.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x19, x13, LSL #2]\n"
+ "fmla z30.s, p3/M, z4.s, z14.s\n"
+ "fmla z29.s, p3/M, z4.s, z8.s\n"
+ "fmla z28.s, p3/M, z4.s, z10.s\n"
+ "ld1w { z4.s }, p3/Z, [x15, #3, MUL VL]\n"
+ "fmla z31.s, p3/M, z0.s, z9.s\n"
+ "ld1w { z9.s }, p2/Z, [x26, x13, LSL #2]\n"
+ "fmla z30.s, p3/M, z0.s, z13.s\n"
+ "fmla z29.s, p3/M, z0.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x25, x13, LSL #2]\n"
+ "ldp x26, x25, [x14, #0x0]\n"
+ "fmla z28.s, p3/M, z0.s, z12.s\n"
+ "ld1w { z0.s }, p3/Z, [x15, #5, MUL VL]\n"
+ "fmla z31.s, p3/M, z1.s, z13.s\n"
+ "fmla z30.s, p3/M, z1.s, z5.s\n"
+ "fmla z29.s, p3/M, z1.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x24, x13, LSL #2]\n"
+ "fmla z28.s, p3/M, z1.s, z9.s\n"
+ "ld1w { z1.s }, p3/Z, [x15, #6, MUL VL]\n"
+ "fmla z31.s, p3/M, z2.s, z5.s\n"
+ "ld1w { z5.s }, p1/Z, [x26, x12, LSL #2]\n"
+ "fmla z30.s, p3/M, z2.s, z6.s\n"
+ "fmla z29.s, p3/M, z2.s, z9.s\n"
+ "ld1w { z9.s }, p2/Z, [x23, x13, LSL #2]\n"
"incw x13\n"
- "ld1w { z7.s }, p1/Z, [x26, x28, LSL #2]\n"
- "fmla z30.s, p2/M, z4.s, z12.s\n"
- "fmla z31.s, p2/M, z4.s, z9.s\n"
- "ld1w { z8.s }, p1/Z, [x25, x28, LSL #2]\n"
- "ld1w { z9.s }, p1/Z, [x23, x28, LSL #2]\n"
- "ld1w { z13.s }, p1/Z, [x22, x28, LSL #2]\n"
- "ld1w { z11.s }, p1/Z, [x21, x28, LSL #2]\n"
- "fmax z28.s, p2/M, z28.s, z18.s\n"
- "fmax z29.s, p2/M, z29.s, z18.s\n"
- "ld1w { z12.s }, p1/Z, [x20, x28, LSL #2]\n"
- "ld1w { z10.s }, p1/Z, [x10, x28, LSL #2]\n"
- "fmax z30.s, p2/M, z30.s, z18.s\n"
- "fmax z31.s, p2/M, z31.s, z18.s\n"
- "ld1w { z14.s }, p1/Z, [x9, x28, LSL #2]\n"
- "incw x28\n"
- "ld1w { z2.s }, p2/Z, [x27, #7, MUL VL]\n"
- "addvl x27, x27, #16\n"
- "whilelt p3.s, x13, %x[n_channels]\n"
- "cmp x28, %x[n_channels]\n"
- "ld1w { z3.s }, p2/Z, [x27, #-8, MUL VL]\n"
- "ld1w { z4.s }, p2/Z, [x27, #-7, MUL VL]\n"
- "fmin z28.s, p2/M, z28.s, z17.s\n"
- "fmin z29.s, p2/M, z29.s, z17.s\n"
- "st1w { z28.s }, p0, [x15, x24, LSL #2]\n"
- "fmin z30.s, p2/M, z30.s, z17.s\n"
- "fmin z31.s, p2/M, z31.s, z17.s\n"
- "st1w { z29.s }, p0, [x14, x24, LSL #2]\n"
- "st1w { z30.s }, p0, [x12, x24, LSL #2]\n"
- "addvl x27, x27, #-6\n"
- "st1w { z31.s }, p0, [x11, x24, LSL #2]\n"
+ "fmla z28.s, p3/M, z2.s, z11.s\n"
+ "ldp x24, x23, [x14, #0x10]\n"
+ "whilelt p2.s, x13, %x[n_channels]\n"
+ "fmla z31.s, p3/M, z3.s, z6.s\n"
+ "ld1w { z6.s }, p1/Z, [x25, x12, LSL #2]\n"
+ "ldp x22, x21, [x14, #0x20]\n"
+ "fmla z30.s, p3/M, z3.s, z8.s\n"
+ "ldp x20, x19, [x14, #0x30]\n"
+ "fmla z29.s, p3/M, z3.s, z11.s\n"
+ "ld1w { z7.s }, p1/Z, [x24, x12, LSL #2]\n"
+ "fmla z28.s, p3/M, z3.s, z12.s\n"
+ "ld1w { z13.s }, p1/Z, [x21, x12, LSL #2]\n"
+ "fmla z31.s, p3/M, z4.s, z8.s\n"
+ "ld1w { z8.s }, p1/Z, [x23, x12, LSL #2]\n"
+ "fmla z30.s, p3/M, z4.s, z10.s\n"
+ "ld1w { z11.s }, p1/Z, [x20, x12, LSL #2]\n"
+ "fmla z29.s, p3/M, z4.s, z12.s\n"
+ "ld1w { z12.s }, p1/Z, [x19, x12, LSL #2]\n"
+ "fmla z28.s, p3/M, z4.s, z9.s\n"
+ "ld1w { z9.s }, p1/Z, [x22, x12, LSL #2]\n"
+ "fmax z31.s, p3/M, z31.s, z18.s\n"
+ "ldp x26, x25, [x14, #0x40]\n"
+ "fmax z30.s, p3/M, z30.s, z18.s\n"
+ "ld1w { z2.s }, p3/Z, [x15, #7, MUL VL]\n"
+ "fmax z29.s, p3/M, z29.s, z18.s\n"
+ "addvl x15, x15, #16\n"
+ "fmax z28.s, p3/M, z28.s, z18.s\n"
+ "ld1w { z10.s }, p1/Z, [x26, x12, LSL #2]\n"
+ "ld1w { z14.s }, p1/Z, [x25, x12, LSL #2]\n"
+ "fmin z31.s, p3/M, z31.s, z17.s\n"
+ "incw x12\n"
+ "fmin z30.s, p3/M, z30.s, z17.s\n"
+ "ld1w { z3.s }, p3/Z, [x15, #-8, MUL VL]\n"
+ "cmp x12, %x[n_channels]\n"
+ "fmin z29.s, p3/M, z29.s, z17.s\n"
+ "ld1w { z4.s }, p3/Z, [x15, #-7, MUL VL]\n"
+ "addvl x15, x15, #-6\n"
+ "fmin z28.s, p3/M, z28.s, z17.s\n"
+ "st1w { z31.s }, p0, [x11, x9, LSL #2]\n"
+ "st1w { z30.s }, p0, [x10, x9, LSL #2]\n"
+ "st1w { z29.s }, p0, [x28, x9, LSL #2]\n"
+ "st1w { z28.s }, p0, [x27, x9, LSL #2]\n"
"blt 1b\n"
"2:" // Channel tail
- "movprfx z28, z16\n fmla z28.s, p2/M, z0.s, z5.s\n"
- "movprfx z29, z16\n fmla z29.s, p2/M, z0.s, z6.s\n"
- "ldr x26, [x16, #0x50]\n"
- "ld1w { z5.s }, p3/Z, [x26, x13, LSL #2]\n"
- "movprfx z30, z16\n fmla z30.s, p2/M, z0.s, z7.s\n"
- "movprfx z31, z16\n fmla z31.s, p2/M, z0.s, z8.s\n"
- "ldr x25, [x16, #0x58]\n"
- "ldr x23, [x16, #0x60]\n"
- "fmla z28.s, p2/M, z1.s, z6.s\n"
- "fmla z29.s, p2/M, z1.s, z9.s\n"
- "ld1w { z6.s }, p3/Z, [x25, x13, LSL #2]\n"
- "ldr x22, [x16, #0x68]\n"
- "fmla z30.s, p2/M, z1.s, z8.s\n"
- "fmla z31.s, p2/M, z1.s, z13.s\n"
- "ld1w { z0.s }, p2/Z, [x27]\n"
- "ldr x21, [x16, #0x70]\n"
- "fmla z28.s, p2/M, z2.s, z9.s\n"
- "fmla z29.s, p2/M, z2.s, z11.s\n"
- "ld1w { z9.s }, p3/Z, [x23, x13, LSL #2]\n"
- "ld1w { z1.s }, p2/Z, [x27, #1, MUL VL]\n"
- "fmla z30.s, p2/M, z2.s, z13.s\n"
- "fmla z31.s, p2/M, z2.s, z5.s\n"
- "ldr x20, [x16, #0x78]\n"
- "ld1w { z2.s }, p2/Z, [x27, #2, MUL VL]\n"
- "fmla z28.s, p2/M, z3.s, z11.s\n"
- "fmla z29.s, p2/M, z3.s, z12.s\n"
- "ld1w { z11.s }, p3/Z, [x22, x13, LSL #2]\n"
- "ldr x10, [x16, #0x80]\n"
- "fmla z30.s, p2/M, z3.s, z5.s\n"
- "fmla z31.s, p2/M, z3.s, z6.s\n"
- "ld1w { z3.s }, p2/Z, [x27, #3, MUL VL]\n"
- "ldr x9, [x16, #0x88]\n"
- "fmla z28.s, p2/M, z4.s, z12.s\n"
- "fmla z29.s, p2/M, z4.s, z9.s\n"
- "ld1w { z12.s }, p3/Z, [x21, x13, LSL #2]\n"
- "ld1w { z9.s }, p3/Z, [x20, x13, LSL #2]\n"
- "fmla z30.s, p2/M, z4.s, z6.s\n"
- "fmla z31.s, p2/M, z4.s, z10.s\n"
- "ld1w { z4.s }, p2/Z, [x27, #4, MUL VL]\n"
- "ldr x26, [x16, #0x90]\n"
- "fmla z28.s, p2/M, z0.s, z7.s\n"
- "fmla z29.s, p2/M, z0.s, z8.s\n"
- "ldr x25, [x16, #0x98]\n"
- "ldr x23, [x16, #0xa0]\n"
- "fmla z30.s, p2/M, z0.s, z14.s\n"
- "fmla z31.s, p2/M, z0.s, z11.s\n"
- "ld1w { z0.s }, p2/Z, [x27, #5, MUL VL]\n"
- "ldr x22, [x16, #0xa8]\n"
- "fmla z28.s, p2/M, z1.s, z8.s\n"
- "fmla z29.s, p2/M, z1.s, z13.s\n"
- "ld1w { z8.s }, p3/Z, [x9, x13, LSL #2]\n"
- "ldr x21, [x16, #0xb0]\n"
- "fmla z30.s, p2/M, z1.s, z11.s\n"
- "fmla z31.s, p2/M, z1.s, z12.s\n"
- "ld1w { z1.s }, p2/Z, [x27, #6, MUL VL]\n"
- "ldr x20, [x16, #0xb8]\n"
- "fmla z28.s, p2/M, z2.s, z13.s\n"
- "fmla z29.s, p2/M, z2.s, z5.s\n"
- "ld1w { z13.s }, p3/Z, [x10, x13, LSL #2]\n"
- "ldr x10, [x16, #0xc0]\n"
- "fmla z30.s, p2/M, z2.s, z12.s\n"
- "fmla z31.s, p2/M, z2.s, z9.s\n"
- "ld1w { z2.s }, p2/Z, [x27, #7, MUL VL]\n"
- "addvl x27, x27, #16\n"
- "fmla z28.s, p2/M, z3.s, z5.s\n"
- "fmla z29.s, p2/M, z3.s, z6.s\n"
- "ld1w { z5.s }, p3/Z, [x26, x13, LSL #2]\n"
- "ldr x9, [x16, #0xc8]\n"
- "fmla z30.s, p2/M, z3.s, z9.s\n"
- "fmla z31.s, p2/M, z3.s, z13.s\n"
- "ld1w { z3.s }, p2/Z, [x27, #-8, MUL VL]\n"
- "ldr x26, [x16, #0xd0]\n"
- "fmla z28.s, p2/M, z4.s, z6.s\n"
- "fmla z29.s, p2/M, z4.s, z10.s\n"
- "ld1w { z6.s }, p3/Z, [x25, x13, LSL #2]\n"
- "ld1w { z10.s }, p3/Z, [x23, x13, LSL #2]\n"
- "fmla z30.s, p2/M, z4.s, z13.s\n"
- "fmla z31.s, p2/M, z4.s, z8.s\n"
- "ld1w { z4.s }, p2/Z, [x27, #-7, MUL VL]\n"
- "ldr x25, [x16, #0xd8]\n"
- "fmla z28.s, p2/M, z0.s, z14.s\n"
- "fmla z29.s, p2/M, z0.s, z11.s\n"
- "ld1w { z14.s }, p3/Z, [x20, x13, LSL #2]\n"
- "ldr x23, [x16, #0xe0]\n"
- "fmla z30.s, p2/M, z0.s, z5.s\n"
- "fmla z31.s, p2/M, z0.s, z6.s\n"
- "ld1w { z0.s }, p2/Z, [x27, #-6, MUL VL]\n"
- "ldr x20, [x16, #0xf8]\n"
- "fmla z28.s, p2/M, z1.s, z11.s\n"
- "fmla z29.s, p2/M, z1.s, z12.s\n"
- "ld1w { z11.s }, p3/Z, [x22, x13, LSL #2]\n"
- "ldr x22, [x16, #0xe8]\n"
- "fmla z30.s, p2/M, z1.s, z6.s\n"
- "fmla z31.s, p2/M, z1.s, z10.s\n"
- "ld1w { z1.s }, p2/Z, [x27, #-5, MUL VL]\n"
- "incw x24\n"
- "fmla z28.s, p2/M, z2.s, z12.s\n"
- "fmla z29.s, p2/M, z2.s, z9.s\n"
- "ld1w { z12.s }, p3/Z, [x21, x13, LSL #2]\n"
- "ldr x21, [x16, #0xf0]\n"
- "fmla z30.s, p2/M, z2.s, z10.s\n"
- "fmla z31.s, p2/M, z2.s, z11.s\n"
- "ld1w { z2.s }, p2/Z, [x27, #-4, MUL VL]\n"
- "mov p0.b, p3.b\n"
- "fmla z28.s, p2/M, z3.s, z9.s\n"
- "fmla z29.s, p2/M, z3.s, z13.s\n"
- "ld1w { z9.s }, p3/Z, [x10, x13, LSL #2]\n"
- "ldr x10, [x16, #0x100]\n"
- "fmla z30.s, p2/M, z3.s, z11.s\n"
- "fmla z31.s, p2/M, z3.s, z12.s\n"
- "ld1w { z3.s }, p2/Z, [x27, #-3, MUL VL]\n"
- "fmla z28.s, p2/M, z4.s, z13.s\n"
- "fmla z29.s, p2/M, z4.s, z8.s\n"
- "ld1w { z13.s }, p3/Z, [x9, x13, LSL #2]\n"
- "ld1w { z8.s }, p3/Z, [x23, x13, LSL #2]\n"
- "fmla z30.s, p2/M, z4.s, z12.s\n"
- "fmla z31.s, p2/M, z4.s, z14.s\n"
- "ld1w { z4.s }, p2/Z, [x27, #-2, MUL VL]\n"
- "ldr x9, [x16, #0x108]\n"
- "fmla z28.s, p2/M, z0.s, z5.s\n"
- "fmla z29.s, p2/M, z0.s, z6.s\n"
- "ld1w { z5.s }, p3/Z, [x26, x13, LSL #2]\n"
- "ldr x26, [x16, #0x110]\n"
- "fmla z30.s, p2/M, z0.s, z9.s\n"
- "fmla z31.s, p2/M, z0.s, z13.s\n"
- "ld1w { z0.s }, p2/Z, [x27, #-1, MUL VL]\n"
- "fmla z28.s, p2/M, z1.s, z6.s\n"
- "fmla z29.s, p2/M, z1.s, z10.s\n"
- "ld1w { z6.s }, p3/Z, [x25, x13, LSL #2]\n"
- "ldr x25, [x16, #0x118]\n"
- "fmla z30.s, p2/M, z1.s, z13.s\n"
- "fmla z31.s, p2/M, z1.s, z5.s\n"
- "ld1w { z1.s }, p2/Z, [x27]\n"
- "fmla z28.s, p2/M, z2.s, z10.s\n"
- "fmla z29.s, p2/M, z2.s, z11.s\n"
- "ld1w { z10.s }, p3/Z, [x22, x13, LSL #2]\n"
- "fmla z30.s, p2/M, z2.s, z5.s\n"
- "fmla z31.s, p2/M, z2.s, z6.s\n"
- "ld1w { z2.s }, p2/Z, [x27, #1, MUL VL]\n"
- "fmla z28.s, p2/M, z3.s, z11.s\n"
- "fmla z29.s, p2/M, z3.s, z12.s\n"
- "ld1w { z11.s }, p3/Z, [x21, x13, LSL #2]\n"
- "fmla z30.s, p2/M, z3.s, z6.s\n"
- "fmla z31.s, p2/M, z3.s, z8.s\n"
- "ld1w { z3.s }, p2/Z, [x27, #2, MUL VL]\n"
- "fmla z28.s, p2/M, z4.s, z12.s\n"
- "fmla z29.s, p2/M, z4.s, z14.s\n"
- "ld1w { z12.s }, p3/Z, [x20, x13, LSL #2]\n"
- "fmla z30.s, p2/M, z4.s, z8.s\n"
- "fmla z31.s, p2/M, z4.s, z10.s\n"
- "ld1w { z4.s }, p2/Z, [x27, #3, MUL VL]\n"
- "fmla z28.s, p2/M, z0.s, z9.s\n"
- "fmla z29.s, p2/M, z0.s, z13.s\n"
- "ld1w { z9.s }, p3/Z, [x10, x13, LSL #2]\n"
- "fmla z30.s, p2/M, z0.s, z11.s\n"
- "fmla z31.s, p2/M, z0.s, z12.s\n"
- "ld1w { z11.s }, p3/Z, [x9, x13, LSL #2]\n"
- "fmla z28.s, p2/M, z1.s, z13.s\n"
- "fmla z29.s, p2/M, z1.s, z5.s\n"
- "fmla z30.s, p2/M, z1.s, z12.s\n"
- "fmla z31.s, p2/M, z1.s, z9.s\n"
- "ld1w { z12.s }, p3/Z, [x26, x13, LSL #2]\n"
- "fmla z28.s, p2/M, z2.s, z5.s\n"
- "fmla z29.s, p2/M, z2.s, z6.s\n"
- "fmla z30.s, p2/M, z2.s, z9.s\n"
- "fmla z31.s, p2/M, z2.s, z11.s\n"
- "ld1w { z9.s }, p3/Z, [x25, x13, LSL #2]\n"
- "fmla z28.s, p2/M, z3.s, z6.s\n"
- "fmla z29.s, p2/M, z3.s, z8.s\n"
- "fmla z30.s, p2/M, z3.s, z11.s\n"
- "fmla z31.s, p2/M, z3.s, z12.s\n"
- "fmla z28.s, p2/M, z4.s, z8.s\n"
- "fmla z29.s, p2/M, z4.s, z10.s\n"
- "fmax z28.s, p2/M, z28.s, z18.s\n"
- "fmax z29.s, p2/M, z29.s, z18.s\n"
- "fmla z30.s, p2/M, z4.s, z12.s\n"
- "fmla z31.s, p2/M, z4.s, z9.s\n"
- "fmax z30.s, p2/M, z30.s, z18.s\n"
- "fmax z31.s, p2/M, z31.s, z18.s\n"
- "fmin z28.s, p2/M, z28.s, z17.s\n"
- "fmin z29.s, p2/M, z29.s, z17.s\n"
- "st1w { z28.s }, p0, [x15, x24, LSL #2]\n"
- "fmin z30.s, p2/M, z30.s, z17.s\n"
- "fmin z31.s, p2/M, z31.s, z17.s\n"
- "st1w { z29.s }, p0, [x14, x24, LSL #2]\n"
- "st1w { z30.s }, p0, [x12, x24, LSL #2]\n"
- "st1w { z31.s }, p0, [x11, x24, LSL #2]\n"
+ "movprfx z31, z16\n fmla z31.s, p3/M, z0.s, z5.s\n"
+ "ldr x24, [x14, #0x50]\n"
+ "incw x9\n"
+ "movprfx z30, z16\n fmla z30.s, p3/M, z0.s, z6.s\n"
+ "ldr x23, [x14, #0x58]\n"
+ "mov p0.b, p2.b\n"
+ "movprfx z29, z16\n fmla z29.s, p3/M, z0.s, z7.s\n"
+ "ldr x22, [x14, #0x60]\n"
+ "movprfx z28, z16\n fmla z28.s, p3/M, z0.s, z8.s\n"
+ "ld1w { z5.s }, p2/Z, [x24, x13, LSL #2]\n"
+ "ld1w { z0.s }, p3/Z, [x15]\n"
+ "fmla z31.s, p3/M, z1.s, z6.s\n"
+ "ld1w { z6.s }, p2/Z, [x23, x13, LSL #2]\n"
+ "fmla z30.s, p3/M, z1.s, z9.s\n"
+ "ldr x21, [x14, #0x68]\n"
+ "fmla z29.s, p3/M, z1.s, z8.s\n"
+ "fmla z28.s, p3/M, z1.s, z13.s\n"
+ "ld1w { z1.s }, p3/Z, [x15, #1, MUL VL]\n"
+ "ldr x20, [x14, #0x70]\n"
+ "fmla z31.s, p3/M, z2.s, z9.s\n"
+ "ld1w { z9.s }, p2/Z, [x22, x13, LSL #2]\n"
+ "fmla z30.s, p3/M, z2.s, z11.s\n"
+ "ldr x19, [x14, #0x78]\n"
+ "fmla z29.s, p3/M, z2.s, z13.s\n"
+ "fmla z28.s, p3/M, z2.s, z5.s\n"
+ "ld1w { z2.s }, p3/Z, [x15, #2, MUL VL]\n"
+ "ldr x26, [x14, #0x80]\n"
+ "fmla z31.s, p3/M, z3.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x21, x13, LSL #2]\n"
+ "fmla z30.s, p3/M, z3.s, z12.s\n"
+ "ldr x25, [x14, #0x88]\n"
+ "fmla z29.s, p3/M, z3.s, z5.s\n"
+ "fmla z28.s, p3/M, z3.s, z6.s\n"
+ "ld1w { z3.s }, p3/Z, [x15, #3, MUL VL]\n"
+ "ldr x24, [x14, #0x90]\n"
+ "fmla z31.s, p3/M, z4.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x20, x13, LSL #2]\n"
+ "fmla z30.s, p3/M, z4.s, z9.s\n"
+ "ld1w { z9.s }, p2/Z, [x19, x13, LSL #2]\n"
+ "fmla z29.s, p3/M, z4.s, z6.s\n"
+ "fmla z28.s, p3/M, z4.s, z10.s\n"
+ "ld1w { z4.s }, p3/Z, [x15, #4, MUL VL]\n"
+ "ldr x23, [x14, #0x98]\n"
+ "fmla z31.s, p3/M, z0.s, z7.s\n"
+ "ldr x22, [x14, #0xa0]\n"
+ "fmla z30.s, p3/M, z0.s, z8.s\n"
+ "ldr x21, [x14, #0xa8]\n"
+ "fmla z29.s, p3/M, z0.s, z14.s\n"
+ "fmla z28.s, p3/M, z0.s, z11.s\n"
+ "ld1w { z0.s }, p3/Z, [x15, #5, MUL VL]\n"
+ "ldr x20, [x14, #0xb0]\n"
+ "fmla z31.s, p3/M, z1.s, z8.s\n"
+ "ld1w { z8.s }, p2/Z, [x25, x13, LSL #2]\n"
+ "fmla z30.s, p3/M, z1.s, z13.s\n"
+ "ldr x19, [x14, #0xb8]\n"
+ "fmla z29.s, p3/M, z1.s, z11.s\n"
+ "fmla z28.s, p3/M, z1.s, z12.s\n"
+ "ld1w { z1.s }, p3/Z, [x15, #6, MUL VL]\n"
+ "ldr x25, [x14, #0xc8]\n"
+ "fmla z31.s, p3/M, z2.s, z13.s\n"
+ "ld1w { z13.s }, p2/Z, [x26, x13, LSL #2]\n"
+ "fmla z30.s, p3/M, z2.s, z5.s\n"
+ "ldr x26, [x14, #0xc0]\n"
+ "fmla z29.s, p3/M, z2.s, z12.s\n"
+ "fmla z28.s, p3/M, z2.s, z9.s\n"
+ "ld1w { z2.s }, p3/Z, [x15, #7, MUL VL]\n"
+ "addvl x15, x15, #16\n"
+ "fmla z31.s, p3/M, z3.s, z5.s\n"
+ "ld1w { z5.s }, p2/Z, [x24, x13, LSL #2]\n"
+ "ldr x24, [x14, #0xd0]\n"
+ "fmla z30.s, p3/M, z3.s, z6.s\n"
+ "fmla z29.s, p3/M, z3.s, z9.s\n"
+ "fmla z28.s, p3/M, z3.s, z13.s\n"
+ "ld1w { z3.s }, p3/Z, [x15, #-8, MUL VL]\n"
+ "fmla z31.s, p3/M, z4.s, z6.s\n"
+ "ld1w { z6.s }, p2/Z, [x23, x13, LSL #2]\n"
+ "ldr x23, [x14, #0xd8]\n"
+ "fmla z30.s, p3/M, z4.s, z10.s\n"
+ "ld1w { z10.s }, p2/Z, [x22, x13, LSL #2]\n"
+ "fmla z29.s, p3/M, z4.s, z13.s\n"
+ "fmla z28.s, p3/M, z4.s, z8.s\n"
+ "ld1w { z4.s }, p3/Z, [x15, #-7, MUL VL]\n"
+ "ldr x22, [x14, #0xe0]\n"
+ "fmla z31.s, p3/M, z0.s, z14.s\n"
+ "ld1w { z14.s }, p2/Z, [x19, x13, LSL #2]\n"
+ "fmla z30.s, p3/M, z0.s, z11.s\n"
+ "ldr x19, [x14, #0xf8]\n"
+ "fmla z29.s, p3/M, z0.s, z5.s\n"
+ "fmla z28.s, p3/M, z0.s, z6.s\n"
+ "ld1w { z0.s }, p3/Z, [x15, #-6, MUL VL]\n"
+ "fmla z31.s, p3/M, z1.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x21, x13, LSL #2]\n"
+ "ldr x21, [x14, #0xe8]\n"
+ "fmla z30.s, p3/M, z1.s, z12.s\n"
+ "fmla z29.s, p3/M, z1.s, z6.s\n"
+ "fmla z28.s, p3/M, z1.s, z10.s\n"
+ "ld1w { z1.s }, p3/Z, [x15, #-5, MUL VL]\n"
+ "fmla z31.s, p3/M, z2.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x20, x13, LSL #2]\n"
+ "ldr x20, [x14, #0xf0]\n"
+ "fmla z30.s, p3/M, z2.s, z9.s\n"
+ "fmla z29.s, p3/M, z2.s, z10.s\n"
+ "fmla z28.s, p3/M, z2.s, z11.s\n"
+ "ld1w { z2.s }, p3/Z, [x15, #-4, MUL VL]\n"
+ "fmla z31.s, p3/M, z3.s, z9.s\n"
+ "ld1w { z9.s }, p2/Z, [x26, x13, LSL #2]\n"
+ "ldr x26, [x14, #0x100]\n"
+ "fmla z30.s, p3/M, z3.s, z13.s\n"
+ "fmla z29.s, p3/M, z3.s, z11.s\n"
+ "fmla z28.s, p3/M, z3.s, z12.s\n"
+ "ld1w { z3.s }, p3/Z, [x15, #-3, MUL VL]\n"
+ "fmla z31.s, p3/M, z4.s, z13.s\n"
+ "ld1w { z13.s }, p2/Z, [x25, x13, LSL #2]\n"
+ "ldr x25, [x14, #0x108]\n"
+ "fmla z30.s, p3/M, z4.s, z8.s\n"
+ "ld1w { z8.s }, p2/Z, [x22, x13, LSL #2]\n"
+ "fmla z29.s, p3/M, z4.s, z12.s\n"
+ "fmla z28.s, p3/M, z4.s, z14.s\n"
+ "ld1w { z4.s }, p3/Z, [x15, #-2, MUL VL]\n"
+ "fmla z31.s, p3/M, z0.s, z5.s\n"
+ "ld1w { z5.s }, p2/Z, [x24, x13, LSL #2]\n"
+ "ldr x24, [x14, #0x110]\n"
+ "fmla z30.s, p3/M, z0.s, z6.s\n"
+ "fmla z29.s, p3/M, z0.s, z9.s\n"
+ "fmla z28.s, p3/M, z0.s, z13.s\n"
+ "ld1w { z0.s }, p3/Z, [x15, #-1, MUL VL]\n"
+ "fmla z31.s, p3/M, z1.s, z6.s\n"
+ "ld1w { z6.s }, p2/Z, [x23, x13, LSL #2]\n"
+ "ldr x23, [x14, #0x118]\n"
+ "fmla z30.s, p3/M, z1.s, z10.s\n"
+ "fmla z29.s, p3/M, z1.s, z13.s\n"
+ "fmla z28.s, p3/M, z1.s, z5.s\n"
+ "ld1w { z1.s }, p3/Z, [x15]\n"
+ "fmla z31.s, p3/M, z2.s, z10.s\n"
+ "ld1w { z10.s }, p2/Z, [x21, x13, LSL #2]\n"
+ "fmla z30.s, p3/M, z2.s, z11.s\n"
+ "fmla z29.s, p3/M, z2.s, z5.s\n"
+ "fmla z28.s, p3/M, z2.s, z6.s\n"
+ "ld1w { z2.s }, p3/Z, [x15, #1, MUL VL]\n"
+ "fmla z31.s, p3/M, z3.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x20, x13, LSL #2]\n"
+ "fmla z30.s, p3/M, z3.s, z12.s\n"
+ "fmla z29.s, p3/M, z3.s, z6.s\n"
+ "fmla z28.s, p3/M, z3.s, z8.s\n"
+ "ld1w { z3.s }, p3/Z, [x15, #2, MUL VL]\n"
+ "fmla z31.s, p3/M, z4.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x19, x13, LSL #2]\n"
+ "fmla z30.s, p3/M, z4.s, z14.s\n"
+ "fmla z29.s, p3/M, z4.s, z8.s\n"
+ "fmla z28.s, p3/M, z4.s, z10.s\n"
+ "ld1w { z4.s }, p3/Z, [x15, #3, MUL VL]\n"
+ "fmla z31.s, p3/M, z0.s, z9.s\n"
+ "ld1w { z9.s }, p2/Z, [x26, x13, LSL #2]\n"
+ "fmla z30.s, p3/M, z0.s, z13.s\n"
+ "fmla z29.s, p3/M, z0.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x25, x13, LSL #2]\n"
+ "fmla z28.s, p3/M, z0.s, z12.s\n"
+ "fmla z31.s, p3/M, z1.s, z13.s\n"
+ "fmla z30.s, p3/M, z1.s, z5.s\n"
+ "fmla z29.s, p3/M, z1.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x24, x13, LSL #2]\n"
+ "fmla z28.s, p3/M, z1.s, z9.s\n"
+ "fmla z31.s, p3/M, z2.s, z5.s\n"
+ "fmla z30.s, p3/M, z2.s, z6.s\n"
+ "fmla z29.s, p3/M, z2.s, z9.s\n"
+ "ld1w { z9.s }, p2/Z, [x23, x13, LSL #2]\n"
+ "fmla z28.s, p3/M, z2.s, z11.s\n"
+ "fmla z31.s, p3/M, z3.s, z6.s\n"
+ "fmla z30.s, p3/M, z3.s, z8.s\n"
+ "fmla z29.s, p3/M, z3.s, z11.s\n"
+ "fmla z28.s, p3/M, z3.s, z12.s\n"
+ "fmla z31.s, p3/M, z4.s, z8.s\n"
+ "fmla z30.s, p3/M, z4.s, z10.s\n"
+ "fmla z29.s, p3/M, z4.s, z12.s\n"
+ "fmla z28.s, p3/M, z4.s, z9.s\n"
+ "fmax z31.s, p3/M, z31.s, z18.s\n"
+ "fmax z30.s, p3/M, z30.s, z18.s\n"
+ "fmax z29.s, p3/M, z29.s, z18.s\n"
+ "fmax z28.s, p3/M, z28.s, z18.s\n"
+ "fmin z31.s, p3/M, z31.s, z17.s\n"
+ "st1w { z31.s }, p0, [x11, x9, LSL #2]\n"
+ "fmin z30.s, p3/M, z30.s, z17.s\n"
+ "fmin z29.s, p3/M, z29.s, z17.s\n"
+ "st1w { z30.s }, p0, [x10, x9, LSL #2]\n"
+ "fmin z28.s, p3/M, z28.s, z17.s\n"
+ "st1w { z29.s }, p0, [x28, x9, LSL #2]\n"
+ "st1w { z28.s }, p0, [x27, x9, LSL #2]\n"
:
: [n_channels] "r" ((unsigned long) n_channels), [offsetof_Args_inptrs] "I" (offsetof(Args, inptrs)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_args_params] "I" (offsetof(Args, params)), [params_struct] "r" (&params_struct)
- : "cc", "memory", "p0", "p1", "p2", "p3", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z16", "z17", "z18", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z16", "z17", "z18", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_generic_output9_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_generic_output9_mla_depthfirst/generic.cpp
index 204f36edca..eac77516c2 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_generic_output9_mla_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_generic_output9_mla_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -45,118 +45,118 @@ void sve_fp32_nhwc_generic_output9_mla_depthfirst_impl(
__asm__ __volatile__(
"ptrue p1.b\n"
- "mov x11, #0x0\n"
- "ld1rw { z2.s }, p1/Z, [%x[minmax_vals]]\n"
- "ld1rw { z1.s }, p1/Z, [%x[minmax_vals], #4]\n"
- "whilelt p0.s, x11, %x[n_channels]\n"
+ "ld1rw { z4.s }, p1/Z, [%x[minmax_vals]]\n"
+ "mov x28, #0x0\n"
+ "ld1rw { z3.s }, p1/Z, [%x[minmax_vals], #4]\n"
+ "whilelt p0.s, x28, %x[n_channels]\n"
"1:" // Channel loop
- "mov z23.b, #0x0\n"
+ "mov z2.b, #0x0\n"
"cbz %x[bias], 2f\n"
- "ld1w { z23.s }, p0/Z, [%x[bias], x11, LSL #2]\n"
+ "ld1w { z2.s }, p0/Z, [%x[bias], x28, LSL #2]\n"
"2:" // Channel loop: Load bias: Done
- "mov x10, %x[inptrs]\n"
- "ldp x9, x28, [x10], #0x10\n"
- "ldp x27, x26, [x10], #0x10\n"
- "subs x25, %x[n_points], #0x1\n"
- "ldp x24, x23, [x10], #0x10\n"
- "ldp x22, x21, [x10], #0x10\n"
- "mov z24.d, z23.d\n"
- "mov z25.d, z23.d\n"
- "ldr x20, [x10], #0x8\n"
- "mov z26.d, z23.d\n"
- "mov z27.d, z23.d\n"
+ "mov z1.d, z2.d\n"
"ld1w { z0.s }, p1/Z, [%x[params]]\n"
- "mov z28.d, z23.d\n"
- "mov z29.d, z23.d\n"
- "ld1w { z14.s }, p0/Z, [x9, x11, LSL #2]\n"
- "ld1w { z15.s }, p0/Z, [x28, x11, LSL #2]\n"
- "mov z30.d, z23.d\n"
- "mov z31.d, z23.d\n"
- "ld1w { z16.s }, p0/Z, [x27, x11, LSL #2]\n"
- "ld1w { z17.s }, p0/Z, [x26, x11, LSL #2]\n"
- "ld1w { z18.s }, p0/Z, [x24, x11, LSL #2]\n"
- "ld1w { z19.s }, p0/Z, [x23, x11, LSL #2]\n"
+ "mov x22, %x[inptrs]\n"
+ "mov z31.d, z2.d\n"
+ "ldp x20, x19, [x22], #0x10\n"
+ "subs x21, %x[n_points], #0x1\n"
+ "mov z30.d, z2.d\n"
+ "ld1w { z29.s }, p0/Z, [x20, x28, LSL #2]\n"
+ "mov z28.d, z2.d\n"
"addvl %x[params], %x[params], #1\n"
- "ld1w { z20.s }, p0/Z, [x22, x11, LSL #2]\n"
- "ld1w { z21.s }, p0/Z, [x21, x11, LSL #2]\n"
- "ld1w { z22.s }, p0/Z, [x20, x11, LSL #2]\n"
+ "mov z27.d, z2.d\n"
+ "ld1w { z26.s }, p0/Z, [x19, x28, LSL #2]\n"
+ "mov z25.d, z2.d\n"
+ "ldp x20, x19, [x22], #0x10\n"
+ "mov z24.d, z2.d\n"
+ "ld1w { z23.s }, p0/Z, [x20, x28, LSL #2]\n"
+ "mov z22.d, z2.d\n"
+ "ld1w { z21.s }, p0/Z, [x19, x28, LSL #2]\n"
+ "ldp x20, x19, [x22], #0x10\n"
+ "ld1w { z20.s }, p0/Z, [x20, x28, LSL #2]\n"
+ "ld1w { z19.s }, p0/Z, [x19, x28, LSL #2]\n"
+ "ldp x20, x19, [x22], #0x10\n"
+ "ld1w { z18.s }, p0/Z, [x20, x28, LSL #2]\n"
+ "ld1w { z17.s }, p0/Z, [x19, x28, LSL #2]\n"
+ "ldr x19, [x22], #0x8\n"
+ "ld1w { z16.s }, p0/Z, [x19, x28, LSL #2]\n"
"ble 4f\n"
"3:" // Channel loop: Planar loop
- "ldp x9, x28, [x10], #0x10\n"
- "ldp x27, x26, [x10], #0x10\n"
- "subs x25, x25, #0x1\n"
- "fmla z23.s, p1/M, z14.s, z0.s\n"
- "ldp x24, x23, [x10], #0x10\n"
- "ldp x22, x21, [x10], #0x10\n"
- "fmla z24.s, p1/M, z15.s, z0.s\n"
- "fmla z25.s, p1/M, z16.s, z0.s\n"
- "ldr x20, [x10], #0x8\n"
- "fmla z26.s, p1/M, z17.s, z0.s\n"
- "fmla z27.s, p1/M, z18.s, z0.s\n"
- "ld1w { z14.s }, p0/Z, [x9, x11, LSL #2]\n"
- "fmla z28.s, p1/M, z19.s, z0.s\n"
- "fmla z29.s, p1/M, z20.s, z0.s\n"
- "ld1w { z15.s }, p0/Z, [x28, x11, LSL #2]\n"
- "ld1w { z16.s }, p0/Z, [x27, x11, LSL #2]\n"
+ "fmla z2.s, p1/M, z29.s, z0.s\n"
+ "ldp x20, x19, [x22], #0x10\n"
+ "subs x21, x21, #0x1\n"
+ "fmla z1.s, p1/M, z26.s, z0.s\n"
+ "ld1w { z29.s }, p0/Z, [x20, x28, LSL #2]\n"
+ "fmla z31.s, p1/M, z23.s, z0.s\n"
"fmla z30.s, p1/M, z21.s, z0.s\n"
- "fmla z31.s, p1/M, z22.s, z0.s\n"
+ "ld1w { z26.s }, p0/Z, [x19, x28, LSL #2]\n"
+ "fmla z28.s, p1/M, z20.s, z0.s\n"
+ "ldp x20, x19, [x22], #0x10\n"
+ "fmla z27.s, p1/M, z19.s, z0.s\n"
+ "ld1w { z23.s }, p0/Z, [x20, x28, LSL #2]\n"
+ "fmla z25.s, p1/M, z18.s, z0.s\n"
+ "fmla z24.s, p1/M, z17.s, z0.s\n"
+ "ld1w { z21.s }, p0/Z, [x19, x28, LSL #2]\n"
+ "fmla z22.s, p1/M, z16.s, z0.s\n"
"ld1w { z0.s }, p1/Z, [%x[params]]\n"
- "ld1w { z17.s }, p0/Z, [x26, x11, LSL #2]\n"
- "ld1w { z18.s }, p0/Z, [x24, x11, LSL #2]\n"
- "ld1w { z19.s }, p0/Z, [x23, x11, LSL #2]\n"
"addvl %x[params], %x[params], #1\n"
- "ld1w { z20.s }, p0/Z, [x22, x11, LSL #2]\n"
- "ld1w { z21.s }, p0/Z, [x21, x11, LSL #2]\n"
- "ld1w { z22.s }, p0/Z, [x20, x11, LSL #2]\n"
+ "ldp x20, x19, [x22], #0x10\n"
+ "ld1w { z20.s }, p0/Z, [x20, x28, LSL #2]\n"
+ "ld1w { z19.s }, p0/Z, [x19, x28, LSL #2]\n"
+ "ldp x20, x19, [x22], #0x10\n"
+ "ld1w { z18.s }, p0/Z, [x20, x28, LSL #2]\n"
+ "ld1w { z17.s }, p0/Z, [x19, x28, LSL #2]\n"
+ "ldr x19, [x22], #0x8\n"
+ "ld1w { z16.s }, p0/Z, [x19, x28, LSL #2]\n"
"bgt 3b\n"
"4:" // Channel loop: Planar tail
- "fmla z23.s, p1/M, z14.s, z0.s\n"
- "fmla z24.s, p1/M, z15.s, z0.s\n"
- "fmax z23.s, p1/M, z23.s, z2.s\n"
- "fmax z24.s, p1/M, z24.s, z2.s\n"
- "fmla z25.s, p1/M, z16.s, z0.s\n"
- "fmla z26.s, p1/M, z17.s, z0.s\n"
- "fmax z25.s, p1/M, z25.s, z2.s\n"
- "fmax z26.s, p1/M, z26.s, z2.s\n"
- "fmla z27.s, p1/M, z18.s, z0.s\n"
- "fmla z28.s, p1/M, z19.s, z0.s\n"
- "fmax z27.s, p1/M, z27.s, z2.s\n"
- "fmax z28.s, p1/M, z28.s, z2.s\n"
- "fmla z29.s, p1/M, z20.s, z0.s\n"
+ "fmla z2.s, p1/M, z29.s, z0.s\n"
+ "ldp x27, x26, [%x[outptrs], #0x0]\n"
+ "fmla z1.s, p1/M, z26.s, z0.s\n"
+ "ldp x25, x24, [%x[outptrs], #0x10]\n"
+ "fmla z31.s, p1/M, z23.s, z0.s\n"
+ "ldp x23, x22, [%x[outptrs], #0x20]\n"
"fmla z30.s, p1/M, z21.s, z0.s\n"
- "fmax z29.s, p1/M, z29.s, z2.s\n"
- "fmax z30.s, p1/M, z30.s, z2.s\n"
- "fmla z31.s, p1/M, z22.s, z0.s\n"
- "fmax z31.s, p1/M, z31.s, z2.s\n"
- "ldp x28, x27, [%x[outptrs], #0x0]\n"
- "ldp x26, x25, [%x[outptrs], #0x10]\n"
- "ldp x24, x23, [%x[outptrs], #0x20]\n"
- "ldp x22, x21, [%x[outptrs], #0x30]\n"
- "fmin z23.s, p1/M, z23.s, z1.s\n"
- "fmin z24.s, p1/M, z24.s, z1.s\n"
- "ldr x20, [%x[outptrs], #0x40]\n"
- "fmin z25.s, p1/M, z25.s, z1.s\n"
- "fmin z26.s, p1/M, z26.s, z1.s\n"
- "st1w { z23.s }, p0, [x28, x11, LSL #2]\n"
- "fmin z27.s, p1/M, z27.s, z1.s\n"
- "fmin z28.s, p1/M, z28.s, z1.s\n"
- "st1w { z24.s }, p0, [x27, x11, LSL #2]\n"
- "fmin z29.s, p1/M, z29.s, z1.s\n"
- "fmin z30.s, p1/M, z30.s, z1.s\n"
- "st1w { z25.s }, p0, [x26, x11, LSL #2]\n"
- "fmin z31.s, p1/M, z31.s, z1.s\n"
- "st1w { z26.s }, p0, [x25, x11, LSL #2]\n"
- "st1w { z27.s }, p0, [x24, x11, LSL #2]\n"
- "st1w { z28.s }, p0, [x23, x11, LSL #2]\n"
- "st1w { z29.s }, p0, [x22, x11, LSL #2]\n"
- "st1w { z30.s }, p0, [x21, x11, LSL #2]\n"
- "st1w { z31.s }, p0, [x20, x11, LSL #2]\n"
- "incw x11\n"
- "whilelt p0.s, x11, %x[n_channels]\n"
+ "ldp x21, x20, [%x[outptrs], #0x30]\n"
+ "fmla z28.s, p1/M, z20.s, z0.s\n"
+ "ldr x19, [%x[outptrs], #0x40]\n"
+ "fmla z27.s, p1/M, z19.s, z0.s\n"
+ "fmla z25.s, p1/M, z18.s, z0.s\n"
+ "fmla z24.s, p1/M, z17.s, z0.s\n"
+ "fmla z22.s, p1/M, z16.s, z0.s\n"
+ "fmax z2.s, p1/M, z2.s, z4.s\n"
+ "fmax z1.s, p1/M, z1.s, z4.s\n"
+ "fmax z31.s, p1/M, z31.s, z4.s\n"
+ "fmax z30.s, p1/M, z30.s, z4.s\n"
+ "fmin z2.s, p1/M, z2.s, z3.s\n"
+ "st1w { z2.s }, p0, [x27, x28, LSL #2]\n"
+ "fmin z1.s, p1/M, z1.s, z3.s\n"
+ "fmin z31.s, p1/M, z31.s, z3.s\n"
+ "st1w { z1.s }, p0, [x26, x28, LSL #2]\n"
+ "fmin z30.s, p1/M, z30.s, z3.s\n"
+ "fmax z28.s, p1/M, z28.s, z4.s\n"
+ "st1w { z31.s }, p0, [x25, x28, LSL #2]\n"
+ "fmax z27.s, p1/M, z27.s, z4.s\n"
+ "st1w { z30.s }, p0, [x24, x28, LSL #2]\n"
+ "fmin z28.s, p1/M, z28.s, z3.s\n"
+ "fmax z25.s, p1/M, z25.s, z4.s\n"
+ "st1w { z28.s }, p0, [x23, x28, LSL #2]\n"
+ "fmin z27.s, p1/M, z27.s, z3.s\n"
+ "fmin z25.s, p1/M, z25.s, z3.s\n"
+ "st1w { z27.s }, p0, [x22, x28, LSL #2]\n"
+ "fmax z24.s, p1/M, z24.s, z4.s\n"
+ "fmax z22.s, p1/M, z22.s, z4.s\n"
+ "st1w { z25.s }, p0, [x21, x28, LSL #2]\n"
+ "fmin z24.s, p1/M, z24.s, z3.s\n"
+ "st1w { z24.s }, p0, [x20, x28, LSL #2]\n"
+ "fmin z22.s, p1/M, z22.s, z3.s\n"
+ "st1w { z22.s }, p0, [x19, x28, LSL #2]\n"
+ "incw x28\n"
+ "whilelt p0.s, x28, %x[n_channels]\n"
"b.any 1b\n"
: [params] "+&r" (params)
: [bias] "r" (bias), [inptrs] "r" (inptrs), [minmax_vals] "r" (minmax_vals), [n_channels] "r" ((uint64_t) n_channels), [n_points] "r" ((uint64_t) n_points), [outptrs] "r" (outptrs)
- : "cc", "memory", "p0", "p1", "x9", "x10", "x11", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_packed_to_nhwc_3x3_s2_with_multiplier_output3x3_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_packed_to_nhwc_3x3_s2_with_multiplier_output3x3_mla_depthfirst/generic.cpp
index 7ba0edd991..395b112460 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_packed_to_nhwc_3x3_s2_with_multiplier_output3x3_mla_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_packed_to_nhwc_3x3_s2_with_multiplier_output3x3_mla_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -42,214 +42,214 @@ void sve_fp32_packed_to_nhwc_3x3_s2_with_multiplier_output3x3_mla_depthfirst_imp
const float minmax_vals[2] = { activation_min, activation_max };
__asm__ __volatile__(
- "mov x17, #0x0\n"
- "whilelt p2.s, x17, %x[channel_multiplier]\n"
- "ldr x16, [%x[inptrs], #0x0]\n"
- "ldr x15, [%x[inptrs], #0x8]\n"
- "ptrue p1.b\n"
- "ldr x14, [%x[inptrs], #0x10]\n"
- "ldr x13, [%x[inptrs], #0x18]\n"
- "mov x12, #0x0\n"
- "ldr x11, [%x[inptrs], #0x20]\n"
- "ldr x10, [%x[inptrs], #0x28]\n"
- "ldr x9, [%x[inptrs], #0x30]\n"
- "ld1w { z26.s }, p2/Z, [%x[params]]\n"
- "mov z25.d, z26.d\n"
- "mov z24.d, z26.d\n"
- "ldp x28, x27, [%x[outptrs], #0x0]\n"
- "ldp x26, x25, [%x[outptrs], #0x10]\n"
- "mov z23.d, z26.d\n"
- "mov z22.d, z26.d\n"
- "ldp x24, x23, [%x[outptrs], #0x20]\n"
- "ldp x22, x21, [%x[outptrs], #0x30]\n"
- "mov z21.d, z26.d\n"
- "mov z20.d, z26.d\n"
- "ldr x20, [%x[outptrs], #0x40]\n"
- "ld1rqw { z2.s }, p1/Z, [x16]\n"
- "mov z19.d, z26.d\n"
- "mov z18.d, z26.d\n"
- "ld1rqw { z3.s }, p1/Z, [x16, #16]\n"
- "ld1rqw { z4.s }, p1/Z, [x15]\n"
- "ld1rqw { z5.s }, p1/Z, [x15, #16]\n"
- "ld1rqw { z6.s }, p1/Z, [x14]\n"
- "ld1rqw { z7.s }, p1/Z, [x14, #16]\n"
- "ld1rqw { z8.s }, p1/Z, [x13]\n"
- "ld1rqw { z9.s }, p1/Z, [x13, #16]\n"
- "ld1rqw { z10.s }, p1/Z, [x11]\n"
- "ld1rqw { z11.s }, p1/Z, [x11, #16]\n"
- "ld1rqw { z12.s }, p1/Z, [x10]\n"
- "ld1rqw { z13.s }, p1/Z, [x10, #16]\n"
- "ld1rqw { z14.s }, p1/Z, [x9]\n"
- "ld1rqw { z15.s }, p1/Z, [x9, #16]\n"
- "ld1rw { z17.s }, p1/Z, [%x[clamps]]\n"
- "ld1rw { z16.s }, p1/Z, [%x[clamps], #4]\n"
- "ld1w { z31.s }, p2/Z, [%x[params], #1, MUL VL]\n"
- "ld1w { z30.s }, p2/Z, [%x[params], #2, MUL VL]\n"
- "ld1w { z29.s }, p2/Z, [%x[params], #3, MUL VL]\n"
+ "ldp x12, x11, [%x[outptrs], #0x0]\n"
+ "ptrue p2.b\n"
+ "ldp x10, x9, [%x[outptrs], #0x10]\n"
+ "mov x28, #0x0\n"
+ "ldp x27, x26, [%x[outptrs], #0x20]\n"
+ "mov x25, #0x0\n"
+ "ldp x24, x23, [%x[outptrs], #0x30]\n"
+ "whilelt p1.s, x28, %x[channel_multiplier]\n"
+ "ldr x22, [%x[outptrs], #0x40]\n"
+ "ldr x21, [%x[inptrs], #0x0]\n"
+ "ldr x20, [%x[inptrs], #0x8]\n"
+ "ldr x19, [%x[inptrs], #0x10]\n"
+ "ld1rqw { z2.s }, p2/Z, [x21]\n"
+ "ld1rqw { z3.s }, p2/Z, [x21, #16]\n"
+ "ld1rqw { z4.s }, p2/Z, [x20]\n"
+ "ld1rqw { z5.s }, p2/Z, [x20, #16]\n"
+ "ld1rqw { z6.s }, p2/Z, [x19]\n"
+ "ld1rqw { z7.s }, p2/Z, [x19, #16]\n"
+ "ldr x21, [%x[inptrs], #0x18]\n"
+ "ldr x20, [%x[inptrs], #0x20]\n"
+ "ldr x19, [%x[inptrs], #0x28]\n"
+ "ld1rqw { z8.s }, p2/Z, [x21]\n"
+ "ld1rqw { z9.s }, p2/Z, [x21, #16]\n"
+ "ld1rqw { z10.s }, p2/Z, [x20]\n"
+ "ld1rqw { z11.s }, p2/Z, [x20, #16]\n"
+ "ld1rqw { z12.s }, p2/Z, [x19]\n"
+ "ld1rqw { z13.s }, p2/Z, [x19, #16]\n"
+ "ldr x19, [%x[inptrs], #0x30]\n"
+ "ld1rw { z26.s }, p2/Z, [%x[clamps]]\n"
+ "ld1rw { z25.s }, p2/Z, [%x[clamps], #4]\n"
+ "ld1rqw { z14.s }, p2/Z, [x19]\n"
+ "ld1rqw { z15.s }, p2/Z, [x19, #16]\n"
+ "ld1w { z24.s }, p1/Z, [%x[params]]\n"
+ "mov z23.d, z24.d\n"
+ "ld1w { z31.s }, p1/Z, [%x[params], #1, MUL VL]\n"
+ "mov z22.d, z24.d\n"
+ "ld1w { z30.s }, p1/Z, [%x[params], #2, MUL VL]\n"
+ "mov z21.d, z24.d\n"
+ "ld1w { z29.s }, p1/Z, [%x[params], #3, MUL VL]\n"
"addvl %x[params], %x[params], #4\n"
+ "mov z20.d, z24.d\n"
+ "mov z19.d, z24.d\n"
+ "mov z18.d, z24.d\n"
+ "mov z17.d, z24.d\n"
+ "mov z16.d, z24.d\n"
"1:" // Output channel complete vector loop
- "fmla z26.s, z31.s, z2.s[0]\n"
- "fmla z23.s, z31.s, z6.s[0]\n"
"mov z0.d, z10.d\n"
- "incw x17\n"
- "fmla z22.s, z31.s, z6.s[2]\n"
- "fmla z21.s, z31.s, z7.s[0]\n"
+ "mov p0.b, p1.b\n"
"mov z1.d, z11.d\n"
- "mov p0.b, p2.b\n"
- "fmla z25.s, z31.s, z2.s[2]\n"
- "fmla z24.s, z31.s, z3.s[0]\n"
- "whilelt p2.s, x17, %x[channel_multiplier]\n"
- "fmla z20.s, z31.s, z0.s[0]\n"
- "fmla z19.s, z31.s, z0.s[2]\n"
- "fmla z18.s, z31.s, z1.s[0]\n"
- "fmla z26.s, z30.s, z2.s[1]\n"
- "ld1w { z31.s }, p1/Z, [%x[params]]\n"
- "fmla z23.s, z30.s, z6.s[1]\n"
- "fmla z22.s, z30.s, z6.s[3]\n"
- "fmla z21.s, z30.s, z7.s[1]\n"
- "fmla z25.s, z30.s, z2.s[3]\n"
- "fmla z24.s, z30.s, z3.s[1]\n"
- "fmla z20.s, z30.s, z0.s[1]\n"
- "fmla z19.s, z30.s, z0.s[3]\n"
- "fmla z18.s, z30.s, z1.s[1]\n"
- "ld1w { z30.s }, p1/Z, [%x[params], #1, MUL VL]\n"
- "fmla z26.s, z29.s, z2.s[2]\n"
- "fmla z23.s, z29.s, z6.s[2]\n"
- "fmla z22.s, z29.s, z7.s[0]\n"
- "fmla z21.s, z29.s, z7.s[2]\n"
- "fmla z25.s, z29.s, z3.s[0]\n"
- "fmla z24.s, z29.s, z3.s[2]\n"
- "fmla z20.s, z29.s, z0.s[2]\n"
- "fmla z19.s, z29.s, z1.s[0]\n"
+ "incw x28\n"
+ "fmla z24.s, z31.s, z2.s[0]\n"
+ "whilelt p1.s, x28, %x[channel_multiplier]\n"
+ "fmla z23.s, z31.s, z2.s[2]\n"
+ "fmla z22.s, z31.s, z3.s[0]\n"
+ "fmla z21.s, z31.s, z6.s[0]\n"
+ "fmla z20.s, z31.s, z6.s[2]\n"
+ "fmla z19.s, z31.s, z7.s[0]\n"
+ "fmla z18.s, z31.s, z0.s[0]\n"
+ "fmla z17.s, z31.s, z0.s[2]\n"
+ "fmla z16.s, z31.s, z1.s[0]\n"
+ "ld1w { z31.s }, p2/Z, [%x[params]]\n"
+ "fmla z24.s, z30.s, z2.s[1]\n"
+ "fmla z23.s, z30.s, z2.s[3]\n"
+ "fmla z22.s, z30.s, z3.s[1]\n"
+ "fmla z21.s, z30.s, z6.s[1]\n"
+ "fmla z20.s, z30.s, z6.s[3]\n"
+ "fmla z19.s, z30.s, z7.s[1]\n"
+ "fmla z18.s, z30.s, z0.s[1]\n"
+ "fmla z17.s, z30.s, z0.s[3]\n"
+ "fmla z16.s, z30.s, z1.s[1]\n"
+ "ld1w { z30.s }, p2/Z, [%x[params], #1, MUL VL]\n"
+ "fmla z24.s, z29.s, z2.s[2]\n"
+ "fmla z23.s, z29.s, z3.s[0]\n"
+ "fmla z22.s, z29.s, z3.s[2]\n"
+ "fmla z21.s, z29.s, z6.s[2]\n"
+ "fmla z20.s, z29.s, z7.s[0]\n"
+ "fmla z19.s, z29.s, z7.s[2]\n"
+ "fmla z18.s, z29.s, z0.s[2]\n"
"mov z0.d, z8.d\n"
- "fmla z18.s, z29.s, z1.s[2]\n"
+ "fmla z17.s, z29.s, z1.s[0]\n"
+ "fmla z16.s, z29.s, z1.s[2]\n"
+ "ld1w { z29.s }, p2/Z, [%x[params], #2, MUL VL]\n"
"mov z1.d, z9.d\n"
- "fmla z26.s, z31.s, z4.s[0]\n"
- "ld1w { z29.s }, p1/Z, [%x[params], #2, MUL VL]\n"
- "fmla z23.s, z31.s, z0.s[0]\n"
- "fmla z22.s, z31.s, z0.s[2]\n"
+ "fmla z24.s, z31.s, z4.s[0]\n"
+ "fmla z23.s, z31.s, z4.s[2]\n"
+ "fmla z22.s, z31.s, z5.s[0]\n"
+ "fmla z21.s, z31.s, z0.s[0]\n"
+ "fmla z20.s, z31.s, z0.s[2]\n"
"mov z0.d, z12.d\n"
- "fmla z21.s, z31.s, z1.s[0]\n"
+ "fmla z19.s, z31.s, z1.s[0]\n"
"mov z1.d, z13.d\n"
- "fmla z25.s, z31.s, z4.s[2]\n"
- "fmla z24.s, z31.s, z5.s[0]\n"
- "fmla z20.s, z31.s, z0.s[0]\n"
- "fmla z19.s, z31.s, z0.s[2]\n"
- "fmla z18.s, z31.s, z1.s[0]\n"
+ "fmla z18.s, z31.s, z0.s[0]\n"
+ "fmla z17.s, z31.s, z0.s[2]\n"
"mov z0.d, z8.d\n"
- "ld1w { z31.s }, p1/Z, [%x[params], #3, MUL VL]\n"
+ "fmla z16.s, z31.s, z1.s[0]\n"
+ "ld1w { z31.s }, p2/Z, [%x[params], #3, MUL VL]\n"
"mov z1.d, z9.d\n"
- "fmla z26.s, z30.s, z4.s[1]\n"
- "fmla z23.s, z30.s, z0.s[1]\n"
- "fmla z22.s, z30.s, z0.s[3]\n"
- "fmla z21.s, z30.s, z1.s[1]\n"
+ "fmla z24.s, z30.s, z4.s[1]\n"
+ "fmla z23.s, z30.s, z4.s[3]\n"
+ "fmla z22.s, z30.s, z5.s[1]\n"
+ "fmla z21.s, z30.s, z0.s[1]\n"
+ "fmla z20.s, z30.s, z0.s[3]\n"
"mov z0.d, z12.d\n"
+ "fmla z19.s, z30.s, z1.s[1]\n"
"mov z1.d, z13.d\n"
- "fmla z25.s, z30.s, z4.s[3]\n"
- "fmla z24.s, z30.s, z5.s[1]\n"
- "fmla z20.s, z30.s, z0.s[1]\n"
- "fmla z19.s, z30.s, z0.s[3]\n"
+ "fmla z18.s, z30.s, z0.s[1]\n"
+ "fmla z17.s, z30.s, z0.s[3]\n"
"mov z0.d, z8.d\n"
- "fmla z18.s, z30.s, z1.s[1]\n"
+ "fmla z16.s, z30.s, z1.s[1]\n"
+ "ld1w { z30.s }, p2/Z, [%x[params], #4, MUL VL]\n"
"mov z1.d, z9.d\n"
- "fmla z26.s, z29.s, z4.s[2]\n"
- "ld1w { z30.s }, p1/Z, [%x[params], #4, MUL VL]\n"
- "fmla z23.s, z29.s, z0.s[2]\n"
- "fmla z22.s, z29.s, z1.s[0]\n"
+ "fmla z24.s, z29.s, z4.s[2]\n"
+ "fmla z23.s, z29.s, z5.s[0]\n"
+ "fmla z22.s, z29.s, z5.s[2]\n"
+ "fmla z21.s, z29.s, z0.s[2]\n"
"mov z0.d, z12.d\n"
- "fmla z21.s, z29.s, z1.s[2]\n"
+ "fmla z20.s, z29.s, z1.s[0]\n"
+ "fmla z19.s, z29.s, z1.s[2]\n"
"mov z1.d, z13.d\n"
- "fmla z25.s, z29.s, z5.s[0]\n"
- "fmla z24.s, z29.s, z5.s[2]\n"
- "fmla z20.s, z29.s, z0.s[2]\n"
+ "fmla z18.s, z29.s, z0.s[2]\n"
"mov z0.d, z10.d\n"
- "fmla z19.s, z29.s, z1.s[0]\n"
- "fmla z18.s, z29.s, z1.s[2]\n"
+ "fmla z17.s, z29.s, z1.s[0]\n"
+ "fmla z16.s, z29.s, z1.s[2]\n"
+ "ld1w { z29.s }, p2/Z, [%x[params], #5, MUL VL]\n"
"mov z1.d, z11.d\n"
- "ld1w { z29.s }, p1/Z, [%x[params], #5, MUL VL]\n"
- "fmla z26.s, z31.s, z6.s[0]\n"
- "fmla z23.s, z31.s, z0.s[0]\n"
- "fmla z22.s, z31.s, z0.s[2]\n"
- "fmla z21.s, z31.s, z1.s[0]\n"
+ "fmla z24.s, z31.s, z6.s[0]\n"
+ "fmla z23.s, z31.s, z6.s[2]\n"
+ "fmla z22.s, z31.s, z7.s[0]\n"
+ "fmla z21.s, z31.s, z0.s[0]\n"
+ "fmla z20.s, z31.s, z0.s[2]\n"
"mov z0.d, z14.d\n"
+ "fmla z19.s, z31.s, z1.s[0]\n"
"mov z1.d, z15.d\n"
- "fmla z25.s, z31.s, z6.s[2]\n"
- "fmla z24.s, z31.s, z7.s[0]\n"
- "fmla z20.s, z31.s, z0.s[0]\n"
- "fmla z19.s, z31.s, z0.s[2]\n"
+ "fmla z18.s, z31.s, z0.s[0]\n"
+ "fmla z17.s, z31.s, z0.s[2]\n"
"mov z0.d, z10.d\n"
- "fmla z18.s, z31.s, z1.s[0]\n"
+ "fmla z16.s, z31.s, z1.s[0]\n"
+ "ld1w { z31.s }, p1/Z, [%x[params], #7, MUL VL]\n"
"mov z1.d, z11.d\n"
- "fmla z26.s, z30.s, z6.s[1]\n"
- "ld1w { z31.s }, p2/Z, [%x[params], #7, MUL VL]\n"
- "fmla z23.s, z30.s, z0.s[1]\n"
- "fmla z22.s, z30.s, z0.s[3]\n"
+ "fmla z24.s, z30.s, z6.s[1]\n"
+ "fmla z23.s, z30.s, z6.s[3]\n"
+ "fmla z22.s, z30.s, z7.s[1]\n"
+ "fmla z21.s, z30.s, z0.s[1]\n"
+ "fmla z20.s, z30.s, z0.s[3]\n"
"mov z0.d, z14.d\n"
- "fmla z21.s, z30.s, z1.s[1]\n"
+ "fmla z19.s, z30.s, z1.s[1]\n"
"mov z1.d, z15.d\n"
- "fmla z25.s, z30.s, z6.s[3]\n"
- "fmla z24.s, z30.s, z7.s[1]\n"
- "fmla z20.s, z30.s, z0.s[1]\n"
- "fmla z19.s, z30.s, z0.s[3]\n"
- "fmla z18.s, z30.s, z1.s[1]\n"
+ "fmla z18.s, z30.s, z0.s[1]\n"
+ "fmla z17.s, z30.s, z0.s[3]\n"
"mov z0.d, z10.d\n"
+ "fmla z16.s, z30.s, z1.s[1]\n"
"mov z1.d, z11.d\n"
- "fmla z26.s, z29.s, z6.s[2]\n"
- "fmla z23.s, z29.s, z0.s[2]\n"
- "fmin z26.s, p1/M, z26.s, z16.s\n"
- "fmla z22.s, z29.s, z1.s[0]\n"
- "fmla z21.s, z29.s, z1.s[2]\n"
+ "fmla z24.s, z29.s, z6.s[2]\n"
+ "fmla z23.s, z29.s, z7.s[0]\n"
+ "fmla z22.s, z29.s, z7.s[2]\n"
+ "fmla z21.s, z29.s, z0.s[2]\n"
"mov z0.d, z14.d\n"
- "fmax z26.s, p1/M, z26.s, z17.s\n"
+ "fmla z20.s, z29.s, z1.s[0]\n"
+ "fmla z19.s, z29.s, z1.s[2]\n"
"mov z1.d, z15.d\n"
- "fmla z25.s, z29.s, z7.s[0]\n"
- "fmla z24.s, z29.s, z7.s[2]\n"
- "fmin z25.s, p1/M, z25.s, z16.s\n"
- "fmla z20.s, z29.s, z0.s[2]\n"
- "fmla z19.s, z29.s, z1.s[0]\n"
- "fmin z24.s, p1/M, z24.s, z16.s\n"
- "fmin z23.s, p1/M, z23.s, z16.s\n"
- "fmla z18.s, z29.s, z1.s[2]\n"
- "fmin z22.s, p1/M, z22.s, z16.s\n"
- "fmin z21.s, p1/M, z21.s, z16.s\n"
- "st1w { z26.s }, p0, [x28, x12, LSL #2]\n"
- "fmin z20.s, p1/M, z20.s, z16.s\n"
- "fmin z19.s, p1/M, z19.s, z16.s\n"
- "ld1w { z26.s }, p2/Z, [%x[params], #6, MUL VL]\n"
- "fmin z18.s, p1/M, z18.s, z16.s\n"
+ "fmla z18.s, z29.s, z0.s[2]\n"
+ "fmla z17.s, z29.s, z1.s[0]\n"
+ "fmla z16.s, z29.s, z1.s[2]\n"
+ "fmin z24.s, p2/M, z24.s, z25.s\n"
+ "fmin z23.s, p2/M, z23.s, z25.s\n"
+ "fmin z22.s, p2/M, z22.s, z25.s\n"
+ "fmin z21.s, p2/M, z21.s, z25.s\n"
+ "fmax z24.s, p2/M, z24.s, z26.s\n"
+ "st1w { z24.s }, p0, [x12, x25, LSL #2]\n"
+ "fmax z23.s, p2/M, z23.s, z26.s\n"
+ "fmax z22.s, p2/M, z22.s, z26.s\n"
+ "ld1w { z24.s }, p1/Z, [%x[params], #6, MUL VL]\n"
"addvl %x[params], %x[params], #16\n"
- "ld1w { z30.s }, p2/Z, [%x[params], #-8, MUL VL]\n"
- "ld1w { z29.s }, p2/Z, [%x[params], #-7, MUL VL]\n"
- "fmax z25.s, p1/M, z25.s, z17.s\n"
- "fmax z24.s, p1/M, z24.s, z17.s\n"
- "st1w { z25.s }, p0, [x27, x12, LSL #2]\n"
- "mov z25.d, z26.d\n"
- "fmax z23.s, p1/M, z23.s, z17.s\n"
- "fmax z22.s, p1/M, z22.s, z17.s\n"
- "st1w { z24.s }, p0, [x26, x12, LSL #2]\n"
- "mov z24.d, z26.d\n"
- "fmax z21.s, p1/M, z21.s, z17.s\n"
- "fmax z20.s, p1/M, z20.s, z17.s\n"
- "st1w { z23.s }, p0, [x25, x12, LSL #2]\n"
- "mov z23.d, z26.d\n"
- "fmax z19.s, p1/M, z19.s, z17.s\n"
- "fmax z18.s, p1/M, z18.s, z17.s\n"
- "st1w { z22.s }, p0, [x24, x12, LSL #2]\n"
- "mov z22.d, z26.d\n"
- "st1w { z21.s }, p0, [x23, x12, LSL #2]\n"
- "mov z21.d, z26.d\n"
+ "fmax z21.s, p2/M, z21.s, z26.s\n"
+ "ld1w { z30.s }, p1/Z, [%x[params], #-8, MUL VL]\n"
+ "fmin z20.s, p2/M, z20.s, z25.s\n"
+ "ld1w { z29.s }, p1/Z, [%x[params], #-7, MUL VL]\n"
"addvl %x[params], %x[params], #-6\n"
- "st1w { z20.s }, p0, [x22, x12, LSL #2]\n"
- "mov z20.d, z26.d\n"
- "st1w { z19.s }, p0, [x21, x12, LSL #2]\n"
- "mov z19.d, z26.d\n"
- "st1w { z18.s }, p0, [x20, x12, LSL #2]\n"
- "incw x12\n"
- "mov z18.d, z26.d\n"
+ "fmin z19.s, p2/M, z19.s, z25.s\n"
+ "st1w { z23.s }, p0, [x11, x25, LSL #2]\n"
+ "mov z23.d, z24.d\n"
+ "st1w { z22.s }, p0, [x10, x25, LSL #2]\n"
+ "mov z22.d, z24.d\n"
+ "st1w { z21.s }, p0, [x9, x25, LSL #2]\n"
+ "mov z21.d, z24.d\n"
+ "fmax z20.s, p2/M, z20.s, z26.s\n"
+ "st1w { z20.s }, p0, [x27, x25, LSL #2]\n"
+ "mov z20.d, z24.d\n"
+ "fmax z19.s, p2/M, z19.s, z26.s\n"
+ "st1w { z19.s }, p0, [x26, x25, LSL #2]\n"
+ "mov z19.d, z24.d\n"
+ "fmin z18.s, p2/M, z18.s, z25.s\n"
+ "fmin z17.s, p2/M, z17.s, z25.s\n"
+ "fmin z16.s, p2/M, z16.s, z25.s\n"
+ "fmax z18.s, p2/M, z18.s, z26.s\n"
+ "st1w { z18.s }, p0, [x24, x25, LSL #2]\n"
+ "mov z18.d, z24.d\n"
+ "fmax z17.s, p2/M, z17.s, z26.s\n"
+ "st1w { z17.s }, p0, [x23, x25, LSL #2]\n"
+ "mov z17.d, z24.d\n"
+ "fmax z16.s, p2/M, z16.s, z26.s\n"
+ "st1w { z16.s }, p0, [x22, x25, LSL #2]\n"
+ "mov z16.d, z24.d\n"
+ "incw x25\n"
"b.any 1b\n"
: [params] "+&r" (params)
: [channel_multiplier] "r" (n_output_channels), [clamps] "r" (minmax_vals), [inptrs] "r" (inptrs), [outptrs] "r" (outptrs)
- : "cc", "memory", "p0", "p1", "p2", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "x9", "x10", "x11", "x12", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_packed_to_nhwc_5x5_s1_with_multiplier_output2x4_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_packed_to_nhwc_5x5_s1_with_multiplier_output2x4_mla_depthfirst/generic.cpp
index 2ea116fc9e..e7193d625f 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_packed_to_nhwc_5x5_s1_with_multiplier_output2x4_mla_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_packed_to_nhwc_5x5_s1_with_multiplier_output2x4_mla_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -42,347 +42,347 @@ void sve_fp32_packed_to_nhwc_5x5_s1_with_multiplier_output2x4_mla_depthfirst_imp
const float minmax_vals[2] = { activation_min, activation_max };
__asm__ __volatile__(
- "mov x15, #0x0\n"
- "whilelt p2.s, x15, %x[channel_multiplier]\n"
- "ldr x14, [%x[inptrs], #0x0]\n"
- "ldr x13, [%x[inptrs], #0x8]\n"
- "ptrue p1.b\n"
- "ldr x12, [%x[inptrs], #0x10]\n"
- "ldr x11, [%x[inptrs], #0x18]\n"
- "mov x10, #0x0\n"
- "ldr x9, [%x[inptrs], #0x20]\n"
- "ldr x28, [%x[inptrs], #0x28]\n"
- "ld1w { z25.s }, p2/Z, [%x[params]]\n"
- "ldp x27, x26, [%x[outptrs], #0x0]\n"
- "mov z24.d, z25.d\n"
- "mov z23.d, z25.d\n"
- "ldp x25, x24, [%x[outptrs], #0x10]\n"
- "ldp x23, x22, [%x[outptrs], #0x20]\n"
- "mov z22.d, z25.d\n"
- "mov z21.d, z25.d\n"
- "ldp x21, x20, [%x[outptrs], #0x30]\n"
- "ld1rqw { z2.s }, p1/Z, [x14]\n"
- "mov z20.d, z25.d\n"
- "mov z19.d, z25.d\n"
- "ld1rqw { z3.s }, p1/Z, [x14, #16]\n"
- "ld1rqw { z4.s }, p1/Z, [x13]\n"
- "mov z18.d, z25.d\n"
- "ld1rqw { z5.s }, p1/Z, [x13, #16]\n"
- "ld1rqw { z6.s }, p1/Z, [x12]\n"
- "ld1rqw { z7.s }, p1/Z, [x12, #16]\n"
- "ld1rqw { z8.s }, p1/Z, [x11]\n"
- "ld1rqw { z9.s }, p1/Z, [x11, #16]\n"
- "ld1rqw { z10.s }, p1/Z, [x9]\n"
- "ld1rqw { z11.s }, p1/Z, [x9, #16]\n"
- "ld1rqw { z12.s }, p1/Z, [x28]\n"
- "ld1rqw { z13.s }, p1/Z, [x28, #16]\n"
- "ld1rw { z17.s }, p1/Z, [%x[clamps]]\n"
- "ld1rw { z16.s }, p1/Z, [%x[clamps], #4]\n"
- "ld1w { z31.s }, p2/Z, [%x[params], #1, MUL VL]\n"
- "ld1w { z30.s }, p2/Z, [%x[params], #2, MUL VL]\n"
- "ld1w { z29.s }, p2/Z, [%x[params], #3, MUL VL]\n"
- "ld1w { z28.s }, p2/Z, [%x[params], #4, MUL VL]\n"
- "ld1w { z27.s }, p2/Z, [%x[params], #5, MUL VL]\n"
+ "ldp x11, x10, [%x[outptrs], #0x0]\n"
+ "ptrue p2.b\n"
+ "ldp x9, x28, [%x[outptrs], #0x10]\n"
+ "mov x27, #0x0\n"
+ "ldp x26, x25, [%x[outptrs], #0x20]\n"
+ "mov x24, #0x0\n"
+ "ldp x23, x22, [%x[outptrs], #0x30]\n"
+ "whilelt p1.s, x27, %x[channel_multiplier]\n"
+ "ldr x21, [%x[inptrs], #0x0]\n"
+ "ldr x20, [%x[inptrs], #0x8]\n"
+ "ldr x19, [%x[inptrs], #0x10]\n"
+ "ld1rqw { z2.s }, p2/Z, [x21]\n"
+ "ld1rqw { z3.s }, p2/Z, [x21, #16]\n"
+ "ld1rqw { z4.s }, p2/Z, [x20]\n"
+ "ld1rqw { z5.s }, p2/Z, [x20, #16]\n"
+ "ld1rqw { z6.s }, p2/Z, [x19]\n"
+ "ld1rqw { z7.s }, p2/Z, [x19, #16]\n"
+ "ldr x21, [%x[inptrs], #0x18]\n"
+ "ldr x20, [%x[inptrs], #0x20]\n"
+ "ldr x19, [%x[inptrs], #0x28]\n"
+ "ld1rqw { z8.s }, p2/Z, [x21]\n"
+ "ld1rqw { z9.s }, p2/Z, [x21, #16]\n"
+ "ld1rqw { z10.s }, p2/Z, [x20]\n"
+ "ld1rqw { z11.s }, p2/Z, [x20, #16]\n"
+ "ld1rqw { z12.s }, p2/Z, [x19]\n"
+ "ld1rqw { z13.s }, p2/Z, [x19, #16]\n"
+ "ld1rw { z25.s }, p2/Z, [%x[clamps]]\n"
+ "ld1rw { z24.s }, p2/Z, [%x[clamps], #4]\n"
+ "ld1w { z23.s }, p1/Z, [%x[params]]\n"
+ "mov z22.d, z23.d\n"
+ "ld1w { z31.s }, p1/Z, [%x[params], #1, MUL VL]\n"
+ "mov z21.d, z23.d\n"
+ "ld1w { z30.s }, p1/Z, [%x[params], #2, MUL VL]\n"
+ "mov z20.d, z23.d\n"
+ "ld1w { z29.s }, p1/Z, [%x[params], #3, MUL VL]\n"
+ "mov z19.d, z23.d\n"
+ "ld1w { z28.s }, p1/Z, [%x[params], #4, MUL VL]\n"
+ "mov z18.d, z23.d\n"
+ "ld1w { z27.s }, p1/Z, [%x[params], #5, MUL VL]\n"
"addvl %x[params], %x[params], #6\n"
+ "mov z17.d, z23.d\n"
+ "mov z16.d, z23.d\n"
"1:" // Output channel complete vector loop
- "fmla z25.s, z31.s, z2.s[0]\n"
- "fmla z24.s, z31.s, z2.s[1]\n"
"mov z0.d, z8.d\n"
- "incw x15\n"
- "fmla z23.s, z31.s, z2.s[2]\n"
- "fmla z22.s, z31.s, z2.s[3]\n"
+ "mov p0.b, p1.b\n"
"mov z1.d, z9.d\n"
- "mov p0.b, p2.b\n"
- "fmla z21.s, z31.s, z4.s[0]\n"
- "fmla z20.s, z31.s, z4.s[1]\n"
- "whilelt p2.s, x15, %x[channel_multiplier]\n"
- "fmla z19.s, z31.s, z4.s[2]\n"
- "fmla z18.s, z31.s, z4.s[3]\n"
- "ld1w { z31.s }, p1/Z, [%x[params]]\n"
- "fmla z25.s, z30.s, z2.s[1]\n"
- "fmla z24.s, z30.s, z2.s[2]\n"
- "fmla z23.s, z30.s, z2.s[3]\n"
- "fmla z22.s, z30.s, z3.s[0]\n"
- "fmla z21.s, z30.s, z4.s[1]\n"
- "fmla z20.s, z30.s, z4.s[2]\n"
- "fmla z19.s, z30.s, z4.s[3]\n"
- "fmla z18.s, z30.s, z5.s[0]\n"
- "ld1w { z30.s }, p1/Z, [%x[params], #1, MUL VL]\n"
- "fmla z25.s, z29.s, z2.s[2]\n"
- "fmla z24.s, z29.s, z2.s[3]\n"
- "fmla z23.s, z29.s, z3.s[0]\n"
- "fmla z22.s, z29.s, z3.s[1]\n"
- "fmla z21.s, z29.s, z4.s[2]\n"
- "fmla z20.s, z29.s, z4.s[3]\n"
- "fmla z19.s, z29.s, z5.s[0]\n"
- "fmla z18.s, z29.s, z5.s[1]\n"
- "ld1w { z29.s }, p1/Z, [%x[params], #2, MUL VL]\n"
- "fmla z25.s, z28.s, z2.s[3]\n"
- "fmla z24.s, z28.s, z3.s[0]\n"
- "fmla z23.s, z28.s, z3.s[1]\n"
- "fmla z22.s, z28.s, z3.s[2]\n"
- "fmla z21.s, z28.s, z4.s[3]\n"
- "fmla z20.s, z28.s, z5.s[0]\n"
- "fmla z19.s, z28.s, z5.s[1]\n"
- "fmla z18.s, z28.s, z5.s[2]\n"
- "ld1w { z28.s }, p1/Z, [%x[params], #3, MUL VL]\n"
- "fmla z25.s, z27.s, z3.s[0]\n"
- "fmla z24.s, z27.s, z3.s[1]\n"
- "fmla z23.s, z27.s, z3.s[2]\n"
- "fmla z22.s, z27.s, z3.s[3]\n"
- "fmla z21.s, z27.s, z5.s[0]\n"
- "fmla z20.s, z27.s, z5.s[1]\n"
- "fmla z19.s, z27.s, z5.s[2]\n"
- "fmla z18.s, z27.s, z5.s[3]\n"
- "ld1w { z27.s }, p1/Z, [%x[params], #4, MUL VL]\n"
- "fmla z25.s, z31.s, z4.s[0]\n"
- "fmla z24.s, z31.s, z4.s[1]\n"
- "fmla z23.s, z31.s, z4.s[2]\n"
- "fmla z22.s, z31.s, z4.s[3]\n"
- "fmla z21.s, z31.s, z6.s[0]\n"
- "fmla z20.s, z31.s, z6.s[1]\n"
- "fmla z19.s, z31.s, z6.s[2]\n"
- "fmla z18.s, z31.s, z6.s[3]\n"
- "ld1w { z31.s }, p1/Z, [%x[params], #5, MUL VL]\n"
- "fmla z25.s, z30.s, z4.s[1]\n"
- "fmla z24.s, z30.s, z4.s[2]\n"
- "fmla z23.s, z30.s, z4.s[3]\n"
- "fmla z22.s, z30.s, z5.s[0]\n"
- "fmla z21.s, z30.s, z6.s[1]\n"
- "fmla z20.s, z30.s, z6.s[2]\n"
- "fmla z19.s, z30.s, z6.s[3]\n"
- "fmla z18.s, z30.s, z7.s[0]\n"
- "ld1w { z30.s }, p1/Z, [%x[params], #6, MUL VL]\n"
- "fmla z25.s, z29.s, z4.s[2]\n"
- "fmla z24.s, z29.s, z4.s[3]\n"
- "fmla z23.s, z29.s, z5.s[0]\n"
- "fmla z22.s, z29.s, z5.s[1]\n"
- "fmla z21.s, z29.s, z6.s[2]\n"
- "fmla z20.s, z29.s, z6.s[3]\n"
- "fmla z19.s, z29.s, z7.s[0]\n"
- "fmla z18.s, z29.s, z7.s[1]\n"
- "ld1w { z29.s }, p1/Z, [%x[params], #7, MUL VL]\n"
+ "incw x27\n"
+ "fmla z23.s, z31.s, z2.s[0]\n"
+ "whilelt p1.s, x27, %x[channel_multiplier]\n"
+ "fmla z22.s, z31.s, z2.s[1]\n"
+ "fmla z21.s, z31.s, z2.s[2]\n"
+ "fmla z20.s, z31.s, z2.s[3]\n"
+ "fmla z19.s, z31.s, z4.s[0]\n"
+ "fmla z18.s, z31.s, z4.s[1]\n"
+ "fmla z17.s, z31.s, z4.s[2]\n"
+ "fmla z16.s, z31.s, z4.s[3]\n"
+ "ld1w { z31.s }, p2/Z, [%x[params]]\n"
+ "fmla z23.s, z30.s, z2.s[1]\n"
+ "fmla z22.s, z30.s, z2.s[2]\n"
+ "fmla z21.s, z30.s, z2.s[3]\n"
+ "fmla z20.s, z30.s, z3.s[0]\n"
+ "fmla z19.s, z30.s, z4.s[1]\n"
+ "fmla z18.s, z30.s, z4.s[2]\n"
+ "fmla z17.s, z30.s, z4.s[3]\n"
+ "fmla z16.s, z30.s, z5.s[0]\n"
+ "ld1w { z30.s }, p2/Z, [%x[params], #1, MUL VL]\n"
+ "fmla z23.s, z29.s, z2.s[2]\n"
+ "fmla z22.s, z29.s, z2.s[3]\n"
+ "fmla z21.s, z29.s, z3.s[0]\n"
+ "fmla z20.s, z29.s, z3.s[1]\n"
+ "fmla z19.s, z29.s, z4.s[2]\n"
+ "fmla z18.s, z29.s, z4.s[3]\n"
+ "fmla z17.s, z29.s, z5.s[0]\n"
+ "fmla z16.s, z29.s, z5.s[1]\n"
+ "ld1w { z29.s }, p2/Z, [%x[params], #2, MUL VL]\n"
+ "fmla z23.s, z28.s, z2.s[3]\n"
+ "fmla z22.s, z28.s, z3.s[0]\n"
+ "fmla z21.s, z28.s, z3.s[1]\n"
+ "fmla z20.s, z28.s, z3.s[2]\n"
+ "fmla z19.s, z28.s, z4.s[3]\n"
+ "fmla z18.s, z28.s, z5.s[0]\n"
+ "fmla z17.s, z28.s, z5.s[1]\n"
+ "fmla z16.s, z28.s, z5.s[2]\n"
+ "ld1w { z28.s }, p2/Z, [%x[params], #3, MUL VL]\n"
+ "fmla z23.s, z27.s, z3.s[0]\n"
+ "fmla z22.s, z27.s, z3.s[1]\n"
+ "fmla z21.s, z27.s, z3.s[2]\n"
+ "fmla z20.s, z27.s, z3.s[3]\n"
+ "fmla z19.s, z27.s, z5.s[0]\n"
+ "fmla z18.s, z27.s, z5.s[1]\n"
+ "fmla z17.s, z27.s, z5.s[2]\n"
+ "fmla z16.s, z27.s, z5.s[3]\n"
+ "ld1w { z27.s }, p2/Z, [%x[params], #4, MUL VL]\n"
+ "fmla z23.s, z31.s, z4.s[0]\n"
+ "fmla z22.s, z31.s, z4.s[1]\n"
+ "fmla z21.s, z31.s, z4.s[2]\n"
+ "fmla z20.s, z31.s, z4.s[3]\n"
+ "fmla z19.s, z31.s, z6.s[0]\n"
+ "fmla z18.s, z31.s, z6.s[1]\n"
+ "fmla z17.s, z31.s, z6.s[2]\n"
+ "fmla z16.s, z31.s, z6.s[3]\n"
+ "ld1w { z31.s }, p2/Z, [%x[params], #5, MUL VL]\n"
+ "fmla z23.s, z30.s, z4.s[1]\n"
+ "fmla z22.s, z30.s, z4.s[2]\n"
+ "fmla z21.s, z30.s, z4.s[3]\n"
+ "fmla z20.s, z30.s, z5.s[0]\n"
+ "fmla z19.s, z30.s, z6.s[1]\n"
+ "fmla z18.s, z30.s, z6.s[2]\n"
+ "fmla z17.s, z30.s, z6.s[3]\n"
+ "fmla z16.s, z30.s, z7.s[0]\n"
+ "ld1w { z30.s }, p2/Z, [%x[params], #6, MUL VL]\n"
+ "fmla z23.s, z29.s, z4.s[2]\n"
+ "fmla z22.s, z29.s, z4.s[3]\n"
+ "fmla z21.s, z29.s, z5.s[0]\n"
+ "fmla z20.s, z29.s, z5.s[1]\n"
+ "fmla z19.s, z29.s, z6.s[2]\n"
+ "fmla z18.s, z29.s, z6.s[3]\n"
+ "fmla z17.s, z29.s, z7.s[0]\n"
+ "fmla z16.s, z29.s, z7.s[1]\n"
+ "ld1w { z29.s }, p2/Z, [%x[params], #7, MUL VL]\n"
"addvl %x[params], %x[params], #16\n"
- "fmla z25.s, z28.s, z4.s[3]\n"
- "fmla z24.s, z28.s, z5.s[0]\n"
- "fmla z23.s, z28.s, z5.s[1]\n"
- "fmla z22.s, z28.s, z5.s[2]\n"
- "fmla z21.s, z28.s, z6.s[3]\n"
- "fmla z20.s, z28.s, z7.s[0]\n"
- "fmla z19.s, z28.s, z7.s[1]\n"
- "fmla z18.s, z28.s, z7.s[2]\n"
- "ld1w { z28.s }, p1/Z, [%x[params], #-8, MUL VL]\n"
- "fmla z25.s, z27.s, z5.s[0]\n"
- "fmla z24.s, z27.s, z5.s[1]\n"
- "fmla z23.s, z27.s, z5.s[2]\n"
- "fmla z22.s, z27.s, z5.s[3]\n"
- "fmla z21.s, z27.s, z7.s[0]\n"
- "fmla z20.s, z27.s, z7.s[1]\n"
- "fmla z19.s, z27.s, z7.s[2]\n"
- "fmla z18.s, z27.s, z7.s[3]\n"
- "ld1w { z27.s }, p1/Z, [%x[params], #-7, MUL VL]\n"
- "fmla z25.s, z31.s, z6.s[0]\n"
- "fmla z24.s, z31.s, z6.s[1]\n"
- "fmla z23.s, z31.s, z6.s[2]\n"
- "fmla z22.s, z31.s, z6.s[3]\n"
- "fmla z21.s, z31.s, z0.s[0]\n"
- "fmla z20.s, z31.s, z0.s[1]\n"
- "fmla z19.s, z31.s, z0.s[2]\n"
- "fmla z18.s, z31.s, z0.s[3]\n"
- "ld1w { z31.s }, p1/Z, [%x[params], #-6, MUL VL]\n"
- "fmla z25.s, z30.s, z6.s[1]\n"
- "fmla z24.s, z30.s, z6.s[2]\n"
- "fmla z23.s, z30.s, z6.s[3]\n"
- "fmla z22.s, z30.s, z7.s[0]\n"
- "fmla z21.s, z30.s, z0.s[1]\n"
- "fmla z20.s, z30.s, z0.s[2]\n"
- "fmla z19.s, z30.s, z0.s[3]\n"
- "fmla z18.s, z30.s, z1.s[0]\n"
- "ld1w { z30.s }, p1/Z, [%x[params], #-5, MUL VL]\n"
- "fmla z25.s, z29.s, z6.s[2]\n"
- "fmla z24.s, z29.s, z6.s[3]\n"
- "fmla z23.s, z29.s, z7.s[0]\n"
- "fmla z22.s, z29.s, z7.s[1]\n"
- "fmla z21.s, z29.s, z0.s[2]\n"
- "fmla z20.s, z29.s, z0.s[3]\n"
- "fmla z19.s, z29.s, z1.s[0]\n"
- "fmla z18.s, z29.s, z1.s[1]\n"
- "ld1w { z29.s }, p1/Z, [%x[params], #-4, MUL VL]\n"
- "fmla z25.s, z28.s, z6.s[3]\n"
- "fmla z24.s, z28.s, z7.s[0]\n"
- "fmla z23.s, z28.s, z7.s[1]\n"
- "fmla z22.s, z28.s, z7.s[2]\n"
- "fmla z21.s, z28.s, z0.s[3]\n"
- "fmla z20.s, z28.s, z1.s[0]\n"
- "fmla z19.s, z28.s, z1.s[1]\n"
- "fmla z18.s, z28.s, z1.s[2]\n"
- "ld1w { z28.s }, p1/Z, [%x[params], #-3, MUL VL]\n"
- "fmla z25.s, z27.s, z7.s[0]\n"
- "fmla z24.s, z27.s, z7.s[1]\n"
- "fmla z23.s, z27.s, z7.s[2]\n"
- "fmla z22.s, z27.s, z7.s[3]\n"
- "fmla z21.s, z27.s, z1.s[0]\n"
- "fmla z20.s, z27.s, z1.s[1]\n"
- "fmla z19.s, z27.s, z1.s[2]\n"
- "fmla z18.s, z27.s, z1.s[3]\n"
- "ld1w { z27.s }, p1/Z, [%x[params], #-2, MUL VL]\n"
- "fmla z25.s, z31.s, z0.s[0]\n"
- "fmla z24.s, z31.s, z0.s[1]\n"
- "fmla z23.s, z31.s, z0.s[2]\n"
- "fmla z22.s, z31.s, z0.s[3]\n"
+ "fmla z23.s, z28.s, z4.s[3]\n"
+ "fmla z22.s, z28.s, z5.s[0]\n"
+ "fmla z21.s, z28.s, z5.s[1]\n"
+ "fmla z20.s, z28.s, z5.s[2]\n"
+ "fmla z19.s, z28.s, z6.s[3]\n"
+ "fmla z18.s, z28.s, z7.s[0]\n"
+ "fmla z17.s, z28.s, z7.s[1]\n"
+ "fmla z16.s, z28.s, z7.s[2]\n"
+ "ld1w { z28.s }, p2/Z, [%x[params], #-8, MUL VL]\n"
+ "fmla z23.s, z27.s, z5.s[0]\n"
+ "fmla z22.s, z27.s, z5.s[1]\n"
+ "fmla z21.s, z27.s, z5.s[2]\n"
+ "fmla z20.s, z27.s, z5.s[3]\n"
+ "fmla z19.s, z27.s, z7.s[0]\n"
+ "fmla z18.s, z27.s, z7.s[1]\n"
+ "fmla z17.s, z27.s, z7.s[2]\n"
+ "fmla z16.s, z27.s, z7.s[3]\n"
+ "ld1w { z27.s }, p2/Z, [%x[params], #-7, MUL VL]\n"
+ "fmla z23.s, z31.s, z6.s[0]\n"
+ "fmla z22.s, z31.s, z6.s[1]\n"
+ "fmla z21.s, z31.s, z6.s[2]\n"
+ "fmla z20.s, z31.s, z6.s[3]\n"
+ "fmla z19.s, z31.s, z0.s[0]\n"
+ "fmla z18.s, z31.s, z0.s[1]\n"
+ "fmla z17.s, z31.s, z0.s[2]\n"
+ "fmla z16.s, z31.s, z0.s[3]\n"
+ "ld1w { z31.s }, p2/Z, [%x[params], #-6, MUL VL]\n"
+ "fmla z23.s, z30.s, z6.s[1]\n"
+ "fmla z22.s, z30.s, z6.s[2]\n"
+ "fmla z21.s, z30.s, z6.s[3]\n"
+ "fmla z20.s, z30.s, z7.s[0]\n"
+ "fmla z19.s, z30.s, z0.s[1]\n"
+ "fmla z18.s, z30.s, z0.s[2]\n"
+ "fmla z17.s, z30.s, z0.s[3]\n"
+ "fmla z16.s, z30.s, z1.s[0]\n"
+ "ld1w { z30.s }, p2/Z, [%x[params], #-5, MUL VL]\n"
+ "fmla z23.s, z29.s, z6.s[2]\n"
+ "fmla z22.s, z29.s, z6.s[3]\n"
+ "fmla z21.s, z29.s, z7.s[0]\n"
+ "fmla z20.s, z29.s, z7.s[1]\n"
+ "fmla z19.s, z29.s, z0.s[2]\n"
+ "fmla z18.s, z29.s, z0.s[3]\n"
+ "fmla z17.s, z29.s, z1.s[0]\n"
+ "fmla z16.s, z29.s, z1.s[1]\n"
+ "ld1w { z29.s }, p2/Z, [%x[params], #-4, MUL VL]\n"
+ "fmla z23.s, z28.s, z6.s[3]\n"
+ "fmla z22.s, z28.s, z7.s[0]\n"
+ "fmla z21.s, z28.s, z7.s[1]\n"
+ "fmla z20.s, z28.s, z7.s[2]\n"
+ "fmla z19.s, z28.s, z0.s[3]\n"
+ "fmla z18.s, z28.s, z1.s[0]\n"
+ "fmla z17.s, z28.s, z1.s[1]\n"
+ "fmla z16.s, z28.s, z1.s[2]\n"
+ "ld1w { z28.s }, p2/Z, [%x[params], #-3, MUL VL]\n"
+ "fmla z23.s, z27.s, z7.s[0]\n"
+ "fmla z22.s, z27.s, z7.s[1]\n"
+ "fmla z21.s, z27.s, z7.s[2]\n"
+ "fmla z20.s, z27.s, z7.s[3]\n"
+ "fmla z19.s, z27.s, z1.s[0]\n"
+ "fmla z18.s, z27.s, z1.s[1]\n"
+ "fmla z17.s, z27.s, z1.s[2]\n"
+ "fmla z16.s, z27.s, z1.s[3]\n"
+ "ld1w { z27.s }, p2/Z, [%x[params], #-2, MUL VL]\n"
+ "fmla z23.s, z31.s, z0.s[0]\n"
+ "fmla z22.s, z31.s, z0.s[1]\n"
+ "fmla z21.s, z31.s, z0.s[2]\n"
+ "fmla z20.s, z31.s, z0.s[3]\n"
"mov z0.d, z10.d\n"
- "fmla z21.s, z31.s, z0.s[0]\n"
- "fmla z20.s, z31.s, z0.s[1]\n"
- "fmla z19.s, z31.s, z0.s[2]\n"
- "fmla z18.s, z31.s, z0.s[3]\n"
+ "fmla z19.s, z31.s, z0.s[0]\n"
+ "fmla z18.s, z31.s, z0.s[1]\n"
+ "fmla z17.s, z31.s, z0.s[2]\n"
+ "fmla z16.s, z31.s, z0.s[3]\n"
+ "ld1w { z31.s }, p2/Z, [%x[params], #-1, MUL VL]\n"
"mov z0.d, z8.d\n"
- "ld1w { z31.s }, p1/Z, [%x[params], #-1, MUL VL]\n"
- "fmla z25.s, z30.s, z0.s[1]\n"
- "fmla z24.s, z30.s, z0.s[2]\n"
- "fmla z23.s, z30.s, z0.s[3]\n"
- "fmla z22.s, z30.s, z1.s[0]\n"
+ "fmla z23.s, z30.s, z0.s[1]\n"
+ "fmla z22.s, z30.s, z0.s[2]\n"
+ "fmla z21.s, z30.s, z0.s[3]\n"
"mov z0.d, z10.d\n"
+ "fmla z20.s, z30.s, z1.s[0]\n"
"mov z1.d, z11.d\n"
- "fmla z21.s, z30.s, z0.s[1]\n"
- "fmla z20.s, z30.s, z0.s[2]\n"
- "fmla z19.s, z30.s, z0.s[3]\n"
- "fmla z18.s, z30.s, z1.s[0]\n"
+ "fmla z19.s, z30.s, z0.s[1]\n"
+ "fmla z18.s, z30.s, z0.s[2]\n"
+ "fmla z17.s, z30.s, z0.s[3]\n"
"mov z0.d, z8.d\n"
- "ld1w { z30.s }, p1/Z, [%x[params]]\n"
+ "fmla z16.s, z30.s, z1.s[0]\n"
+ "ld1w { z30.s }, p2/Z, [%x[params]]\n"
"mov z1.d, z9.d\n"
- "fmla z25.s, z29.s, z0.s[2]\n"
- "fmla z24.s, z29.s, z0.s[3]\n"
- "fmla z23.s, z29.s, z1.s[0]\n"
- "fmla z22.s, z29.s, z1.s[1]\n"
+ "fmla z23.s, z29.s, z0.s[2]\n"
+ "fmla z22.s, z29.s, z0.s[3]\n"
"mov z0.d, z10.d\n"
+ "fmla z21.s, z29.s, z1.s[0]\n"
+ "fmla z20.s, z29.s, z1.s[1]\n"
"mov z1.d, z11.d\n"
- "fmla z21.s, z29.s, z0.s[2]\n"
- "fmla z20.s, z29.s, z0.s[3]\n"
- "fmla z19.s, z29.s, z1.s[0]\n"
- "fmla z18.s, z29.s, z1.s[1]\n"
+ "fmla z19.s, z29.s, z0.s[2]\n"
+ "fmla z18.s, z29.s, z0.s[3]\n"
"mov z0.d, z8.d\n"
- "ld1w { z29.s }, p1/Z, [%x[params], #1, MUL VL]\n"
+ "fmla z17.s, z29.s, z1.s[0]\n"
+ "fmla z16.s, z29.s, z1.s[1]\n"
+ "ld1w { z29.s }, p2/Z, [%x[params], #1, MUL VL]\n"
"mov z1.d, z9.d\n"
- "fmla z25.s, z28.s, z0.s[3]\n"
- "fmla z24.s, z28.s, z1.s[0]\n"
- "fmla z23.s, z28.s, z1.s[1]\n"
- "fmla z22.s, z28.s, z1.s[2]\n"
+ "fmla z23.s, z28.s, z0.s[3]\n"
"mov z0.d, z10.d\n"
+ "fmla z22.s, z28.s, z1.s[0]\n"
+ "fmla z21.s, z28.s, z1.s[1]\n"
+ "fmla z20.s, z28.s, z1.s[2]\n"
"mov z1.d, z11.d\n"
- "fmla z21.s, z28.s, z0.s[3]\n"
- "fmla z20.s, z28.s, z1.s[0]\n"
- "fmla z19.s, z28.s, z1.s[1]\n"
- "fmla z18.s, z28.s, z1.s[2]\n"
+ "fmla z19.s, z28.s, z0.s[3]\n"
+ "fmla z18.s, z28.s, z1.s[0]\n"
+ "fmla z17.s, z28.s, z1.s[1]\n"
+ "fmla z16.s, z28.s, z1.s[2]\n"
+ "ld1w { z28.s }, p2/Z, [%x[params], #2, MUL VL]\n"
"mov z1.d, z9.d\n"
- "ld1w { z28.s }, p1/Z, [%x[params], #2, MUL VL]\n"
- "fmla z25.s, z27.s, z1.s[0]\n"
- "fmla z24.s, z27.s, z1.s[1]\n"
- "fmla z23.s, z27.s, z1.s[2]\n"
- "fmla z22.s, z27.s, z1.s[3]\n"
+ "fmla z23.s, z27.s, z1.s[0]\n"
+ "fmla z22.s, z27.s, z1.s[1]\n"
+ "fmla z21.s, z27.s, z1.s[2]\n"
+ "fmla z20.s, z27.s, z1.s[3]\n"
"mov z1.d, z11.d\n"
- "fmla z21.s, z27.s, z1.s[0]\n"
- "fmla z20.s, z27.s, z1.s[1]\n"
- "fmla z19.s, z27.s, z1.s[2]\n"
- "fmla z18.s, z27.s, z1.s[3]\n"
- "ld1w { z27.s }, p1/Z, [%x[params], #3, MUL VL]\n"
- "fmla z25.s, z31.s, z0.s[0]\n"
- "fmla z24.s, z31.s, z0.s[1]\n"
- "fmla z23.s, z31.s, z0.s[2]\n"
- "fmla z22.s, z31.s, z0.s[3]\n"
+ "fmla z19.s, z27.s, z1.s[0]\n"
+ "fmla z18.s, z27.s, z1.s[1]\n"
+ "fmla z17.s, z27.s, z1.s[2]\n"
+ "fmla z16.s, z27.s, z1.s[3]\n"
+ "ld1w { z27.s }, p2/Z, [%x[params], #3, MUL VL]\n"
+ "fmla z23.s, z31.s, z0.s[0]\n"
+ "fmla z22.s, z31.s, z0.s[1]\n"
+ "fmla z21.s, z31.s, z0.s[2]\n"
+ "fmla z20.s, z31.s, z0.s[3]\n"
"mov z0.d, z12.d\n"
- "fmla z21.s, z31.s, z0.s[0]\n"
- "fmla z20.s, z31.s, z0.s[1]\n"
- "fmla z19.s, z31.s, z0.s[2]\n"
- "fmla z18.s, z31.s, z0.s[3]\n"
+ "fmla z19.s, z31.s, z0.s[0]\n"
+ "fmla z18.s, z31.s, z0.s[1]\n"
+ "fmla z17.s, z31.s, z0.s[2]\n"
+ "fmla z16.s, z31.s, z0.s[3]\n"
+ "ld1w { z31.s }, p1/Z, [%x[params], #5, MUL VL]\n"
"mov z0.d, z10.d\n"
- "ld1w { z31.s }, p2/Z, [%x[params], #5, MUL VL]\n"
- "fmla z25.s, z30.s, z0.s[1]\n"
- "fmla z24.s, z30.s, z0.s[2]\n"
- "fmla z23.s, z30.s, z0.s[3]\n"
- "fmla z22.s, z30.s, z1.s[0]\n"
+ "fmla z23.s, z30.s, z0.s[1]\n"
+ "fmla z22.s, z30.s, z0.s[2]\n"
+ "fmla z21.s, z30.s, z0.s[3]\n"
"mov z0.d, z12.d\n"
+ "fmla z20.s, z30.s, z1.s[0]\n"
"mov z1.d, z13.d\n"
- "fmla z21.s, z30.s, z0.s[1]\n"
- "fmla z20.s, z30.s, z0.s[2]\n"
- "fmla z19.s, z30.s, z0.s[3]\n"
- "fmla z18.s, z30.s, z1.s[0]\n"
+ "fmla z19.s, z30.s, z0.s[1]\n"
+ "fmla z18.s, z30.s, z0.s[2]\n"
+ "fmla z17.s, z30.s, z0.s[3]\n"
"mov z0.d, z10.d\n"
- "ld1w { z30.s }, p2/Z, [%x[params], #6, MUL VL]\n"
+ "fmla z16.s, z30.s, z1.s[0]\n"
+ "ld1w { z30.s }, p1/Z, [%x[params], #6, MUL VL]\n"
"mov z1.d, z11.d\n"
- "fmla z25.s, z29.s, z0.s[2]\n"
- "fmla z24.s, z29.s, z0.s[3]\n"
- "fmla z23.s, z29.s, z1.s[0]\n"
- "fmla z22.s, z29.s, z1.s[1]\n"
+ "fmla z23.s, z29.s, z0.s[2]\n"
+ "fmla z22.s, z29.s, z0.s[3]\n"
"mov z0.d, z12.d\n"
+ "fmla z21.s, z29.s, z1.s[0]\n"
+ "fmla z20.s, z29.s, z1.s[1]\n"
"mov z1.d, z13.d\n"
- "fmla z21.s, z29.s, z0.s[2]\n"
- "fmla z20.s, z29.s, z0.s[3]\n"
- "fmla z19.s, z29.s, z1.s[0]\n"
- "fmla z18.s, z29.s, z1.s[1]\n"
+ "fmla z19.s, z29.s, z0.s[2]\n"
+ "fmla z18.s, z29.s, z0.s[3]\n"
"mov z0.d, z10.d\n"
- "ld1w { z29.s }, p2/Z, [%x[params], #7, MUL VL]\n"
+ "fmla z17.s, z29.s, z1.s[0]\n"
+ "fmla z16.s, z29.s, z1.s[1]\n"
+ "ld1w { z29.s }, p1/Z, [%x[params], #7, MUL VL]\n"
"mov z1.d, z11.d\n"
- "fmla z25.s, z28.s, z0.s[3]\n"
- "fmla z24.s, z28.s, z1.s[0]\n"
- "fmla z23.s, z28.s, z1.s[1]\n"
- "fmla z22.s, z28.s, z1.s[2]\n"
- "mov z1.d, z13.d\n"
+ "fmla z23.s, z28.s, z0.s[3]\n"
"mov z0.d, z12.d\n"
- "fmla z20.s, z28.s, z1.s[0]\n"
- "fmla z19.s, z28.s, z1.s[1]\n"
- "fmla z18.s, z28.s, z1.s[2]\n"
+ "fmla z22.s, z28.s, z1.s[0]\n"
+ "fmla z21.s, z28.s, z1.s[1]\n"
+ "fmla z20.s, z28.s, z1.s[2]\n"
+ "mov z1.d, z13.d\n"
+ "fmla z19.s, z28.s, z0.s[3]\n"
+ "fmla z18.s, z28.s, z1.s[0]\n"
+ "fmla z17.s, z28.s, z1.s[1]\n"
+ "fmla z16.s, z28.s, z1.s[2]\n"
"mov z1.d, z11.d\n"
- "fmla z21.s, z28.s, z0.s[3]\n"
- "fmla z25.s, z27.s, z1.s[0]\n"
- "fmla z24.s, z27.s, z1.s[1]\n"
- "fmin z25.s, p1/M, z25.s, z16.s\n"
- "fmax z25.s, p1/M, z25.s, z17.s\n"
- "fmla z23.s, z27.s, z1.s[2]\n"
- "fmla z22.s, z27.s, z1.s[3]\n"
+ "fmla z23.s, z27.s, z1.s[0]\n"
+ "fmla z22.s, z27.s, z1.s[1]\n"
+ "fmla z21.s, z27.s, z1.s[2]\n"
+ "fmla z20.s, z27.s, z1.s[3]\n"
"mov z1.d, z13.d\n"
- "fmin z24.s, p1/M, z24.s, z16.s\n"
- "fmla z21.s, z27.s, z1.s[0]\n"
- "fmla z20.s, z27.s, z1.s[1]\n"
- "fmin z23.s, p1/M, z23.s, z16.s\n"
- "fmin z22.s, p1/M, z22.s, z16.s\n"
- "fmla z19.s, z27.s, z1.s[2]\n"
- "fmla z18.s, z27.s, z1.s[3]\n"
- "fmin z21.s, p1/M, z21.s, z16.s\n"
- "fmin z20.s, p1/M, z20.s, z16.s\n"
- "fmin z19.s, p1/M, z19.s, z16.s\n"
- "fmin z18.s, p1/M, z18.s, z16.s\n"
- "st1w { z25.s }, p0, [x27, x10, LSL #2]\n"
- "ld1w { z25.s }, p2/Z, [%x[params], #4, MUL VL]\n"
+ "fmla z19.s, z27.s, z1.s[0]\n"
+ "fmla z18.s, z27.s, z1.s[1]\n"
+ "fmla z17.s, z27.s, z1.s[2]\n"
+ "fmla z16.s, z27.s, z1.s[3]\n"
+ "fmin z23.s, p2/M, z23.s, z24.s\n"
+ "fmin z22.s, p2/M, z22.s, z24.s\n"
+ "fmin z21.s, p2/M, z21.s, z24.s\n"
+ "fmin z20.s, p2/M, z20.s, z24.s\n"
+ "fmax z23.s, p2/M, z23.s, z25.s\n"
+ "st1w { z23.s }, p0, [x11, x24, LSL #2]\n"
+ "fmax z22.s, p2/M, z22.s, z25.s\n"
+ "fmax z21.s, p2/M, z21.s, z25.s\n"
+ "ld1w { z23.s }, p1/Z, [%x[params], #4, MUL VL]\n"
"addvl %x[params], %x[params], #16\n"
- "fmax z24.s, p1/M, z24.s, z17.s\n"
- "st1w { z24.s }, p0, [x26, x10, LSL #2]\n"
- "mov z24.d, z25.d\n"
- "fmax z23.s, p1/M, z23.s, z17.s\n"
- "fmax z22.s, p1/M, z22.s, z17.s\n"
- "st1w { z23.s }, p0, [x25, x10, LSL #2]\n"
- "mov z23.d, z25.d\n"
- "fmax z21.s, p1/M, z21.s, z17.s\n"
- "fmax z20.s, p1/M, z20.s, z17.s\n"
- "st1w { z22.s }, p0, [x24, x10, LSL #2]\n"
- "mov z22.d, z25.d\n"
- "fmax z19.s, p1/M, z19.s, z17.s\n"
- "fmax z18.s, p1/M, z18.s, z17.s\n"
- "st1w { z21.s }, p0, [x23, x10, LSL #2]\n"
- "mov z21.d, z25.d\n"
- "st1w { z20.s }, p0, [x22, x10, LSL #2]\n"
- "mov z20.d, z25.d\n"
- "ld1w { z28.s }, p2/Z, [%x[params], #-8, MUL VL]\n"
- "ld1w { z27.s }, p2/Z, [%x[params], #-7, MUL VL]\n"
- "st1w { z19.s }, p0, [x21, x10, LSL #2]\n"
- "mov z19.d, z25.d\n"
+ "fmax z20.s, p2/M, z20.s, z25.s\n"
+ "ld1w { z28.s }, p1/Z, [%x[params], #-8, MUL VL]\n"
+ "fmin z19.s, p2/M, z19.s, z24.s\n"
+ "ld1w { z27.s }, p1/Z, [%x[params], #-7, MUL VL]\n"
"addvl %x[params], %x[params], #-6\n"
- "st1w { z18.s }, p0, [x20, x10, LSL #2]\n"
- "incw x10\n"
- "mov z18.d, z25.d\n"
+ "fmin z18.s, p2/M, z18.s, z24.s\n"
+ "st1w { z22.s }, p0, [x10, x24, LSL #2]\n"
+ "mov z22.d, z23.d\n"
+ "st1w { z21.s }, p0, [x9, x24, LSL #2]\n"
+ "mov z21.d, z23.d\n"
+ "st1w { z20.s }, p0, [x28, x24, LSL #2]\n"
+ "mov z20.d, z23.d\n"
+ "fmax z19.s, p2/M, z19.s, z25.s\n"
+ "st1w { z19.s }, p0, [x26, x24, LSL #2]\n"
+ "mov z19.d, z23.d\n"
+ "fmax z18.s, p2/M, z18.s, z25.s\n"
+ "st1w { z18.s }, p0, [x25, x24, LSL #2]\n"
+ "mov z18.d, z23.d\n"
+ "fmin z17.s, p2/M, z17.s, z24.s\n"
+ "fmin z16.s, p2/M, z16.s, z24.s\n"
+ "fmax z17.s, p2/M, z17.s, z25.s\n"
+ "st1w { z17.s }, p0, [x23, x24, LSL #2]\n"
+ "mov z17.d, z23.d\n"
+ "fmax z16.s, p2/M, z16.s, z25.s\n"
+ "st1w { z16.s }, p0, [x22, x24, LSL #2]\n"
+ "mov z16.d, z23.d\n"
+ "incw x24\n"
"b.any 1b\n"
: [params] "+&r" (params)
: [channel_multiplier] "r" (n_output_channels), [clamps] "r" (minmax_vals), [inptrs] "r" (inptrs), [outptrs] "r" (outptrs)
- : "cc", "memory", "p0", "p1", "p2", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "x9", "x10", "x11", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst/generic.cpp
index a43b81d7e8..b23cec8593 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -45,406 +45,406 @@ void sve_fp32_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst_im
__asm__ __volatile__(
"ptrue p1.b\n"
- "mov x9, #0x0\n"
- "ld1rw { z10.s }, p1/Z, [%x[minmax_vals]]\n"
- "ld1rw { z13.s }, p1/Z, [%x[minmax_vals], #4]\n"
- "whilelt p0.s, x9, %x[n_output_channels]\n"
+ "ld1rw { z11.s }, p1/Z, [%x[minmax_vals]]\n"
+ "mov x28, #0x0\n"
+ "ld1rw { z10.s }, p1/Z, [%x[minmax_vals], #4]\n"
+ "whilelt p0.s, x28, %x[n_output_channels]\n"
"1:" // Output channel loop
- "mov z5.b, #0x0\n"
+ "mov z16.b, #0x0\n"
"cbz %x[bias], 2f\n"
- "ld1w { z5.s }, p0/Z, [%x[bias], x9, LSL #2]\n"
+ "ld1w { z16.s }, p0/Z, [%x[bias], x28, LSL #2]\n"
"2:" // Output channel loop: Load bias: Done
- "mov x21, %x[inptrs]\n"
- "ldp x24, x28, [x21], #0x10\n"
- "lsr x20, %x[kernel_points], #0x1\n"
- "mov z16.d, z5.d\n"
- "mov z17.d, z5.d\n"
- "mov z18.d, z5.d\n"
- "ld1rqw { z1.s }, p1/Z, [x24]\n"
- "ld1rqw { z6.s }, p1/Z, [x24, #16]\n"
- "mov z19.d, z5.d\n"
- "mov z20.d, z5.d\n"
- "ld1rqw { z0.s }, p1/Z, [x28]\n"
- "ld1rqw { z4.s }, p1/Z, [x28, #16]\n"
- "mov z21.d, z5.d\n"
- "mov z22.d, z5.d\n"
+ "mov z9.d, z16.d\n"
"ld1w { z8.s }, p1/Z, [%x[weights]]\n"
+ "mov x20, %x[inptrs]\n"
+ "mov z31.d, z16.d\n"
+ "ldp x24, x27, [x20], #0x10\n"
+ "lsr x19, %x[kernel_points], #0x1\n"
+ "mov z30.d, z16.d\n"
+ "ld1rqw { z7.s }, p1/Z, [x24]\n"
+ "mov z29.d, z16.d\n"
"addvl %x[weights], %x[weights], #1\n"
- "mov z23.d, z5.d\n"
- "mov z24.d, z5.d\n"
- "mov z25.d, z5.d\n"
- "mov z26.d, z5.d\n"
- "mov z27.d, z5.d\n"
- "mov z28.d, z5.d\n"
- "mov z29.d, z5.d\n"
- "mov z30.d, z5.d\n"
- "mov z31.d, z5.d\n"
- "cbz x20, 6f\n"
- "ldp x24, x28, [x21], #0x10\n"
- "subs x20, x20, #0x1\n"
- "ld1rqw { z5.s }, p1/Z, [x24]\n"
- "ld1rqw { z7.s }, p1/Z, [x24, #16]\n"
- "ld1rqw { z3.s }, p1/Z, [x28]\n"
- "ld1rqw { z2.s }, p1/Z, [x28, #16]\n"
- "ld1w { z11.s }, p1/Z, [%x[weights]]\n"
+ "mov z28.d, z16.d\n"
+ "ld1rqw { z6.s }, p1/Z, [x24, #16]\n"
+ "mov z27.d, z16.d\n"
+ "ld1rqw { z5.s }, p1/Z, [x27]\n"
+ "mov z26.d, z16.d\n"
+ "ld1rqw { z4.s }, p1/Z, [x27, #16]\n"
+ "mov z25.d, z16.d\n"
+ "mov z24.d, z16.d\n"
+ "mov z23.d, z16.d\n"
+ "mov z22.d, z16.d\n"
+ "mov z21.d, z16.d\n"
+ "mov z20.d, z16.d\n"
+ "mov z19.d, z16.d\n"
+ "mov z18.d, z16.d\n"
+ "mov z17.d, z16.d\n"
+ "cbz x19, 6f\n"
+ "ldp x24, x27, [x20], #0x10\n"
+ "ld1w { z16.s }, p1/Z, [%x[weights]]\n"
+ "subs x19, x19, #0x1\n"
"addvl %x[weights], %x[weights], #1\n"
+ "ld1rqw { z3.s }, p1/Z, [x24]\n"
+ "ld1rqw { z2.s }, p1/Z, [x24, #16]\n"
+ "ld1rqw { z1.s }, p1/Z, [x27]\n"
+ "ld1rqw { z0.s }, p1/Z, [x27, #16]\n"
"beq 4f\n"
"3:" // Output channel loop: Kernel loop
- "ldp x24, x28, [x21], #0x10\n"
- "fmla z16.s, z8.s, z1.s[0]\n"
- "fmla z17.s, z8.s, z1.s[1]\n"
- "subs x20, x20, #0x1\n"
- "fmla z18.s, z8.s, z1.s[2]\n"
- "fmla z19.s, z8.s, z1.s[3]\n"
- "ld1rqw { z1.s }, p1/Z, [x24]\n"
- "fmla z20.s, z8.s, z6.s[0]\n"
- "fmla z21.s, z8.s, z6.s[1]\n"
- "fmla z22.s, z8.s, z6.s[2]\n"
- "fmla z23.s, z8.s, z6.s[3]\n"
+ "fmla z9.s, z8.s, z7.s[0]\n"
+ "ldp x24, x27, [x20], #0x10\n"
+ "subs x19, x19, #0x1\n"
+ "fmla z31.s, z8.s, z7.s[1]\n"
+ "fmla z30.s, z8.s, z7.s[2]\n"
+ "fmla z29.s, z8.s, z7.s[3]\n"
+ "ld1rqw { z7.s }, p1/Z, [x24]\n"
+ "fmla z28.s, z8.s, z6.s[0]\n"
+ "fmla z27.s, z8.s, z6.s[1]\n"
+ "fmla z26.s, z8.s, z6.s[2]\n"
+ "fmla z25.s, z8.s, z6.s[3]\n"
"ld1rqw { z6.s }, p1/Z, [x24, #16]\n"
- "fmla z24.s, z8.s, z0.s[0]\n"
- "fmla z25.s, z8.s, z0.s[1]\n"
- "fmla z26.s, z8.s, z0.s[2]\n"
- "fmla z27.s, z8.s, z0.s[3]\n"
- "ld1rqw { z0.s }, p1/Z, [x28]\n"
- "fmla z28.s, z8.s, z4.s[0]\n"
- "fmla z29.s, z8.s, z4.s[1]\n"
- "fmla z30.s, z8.s, z4.s[2]\n"
- "fmla z31.s, z8.s, z4.s[3]\n"
- "ld1rqw { z4.s }, p1/Z, [x28, #16]\n"
- "ldp x24, x28, [x21], #0x10\n"
+ "fmla z24.s, z8.s, z5.s[0]\n"
+ "fmla z23.s, z8.s, z5.s[1]\n"
+ "fmla z22.s, z8.s, z5.s[2]\n"
+ "fmla z21.s, z8.s, z5.s[3]\n"
+ "ld1rqw { z5.s }, p1/Z, [x27]\n"
+ "fmla z20.s, z8.s, z4.s[0]\n"
+ "fmla z19.s, z8.s, z4.s[1]\n"
+ "fmla z18.s, z8.s, z4.s[2]\n"
+ "fmla z17.s, z8.s, z4.s[3]\n"
+ "ld1rqw { z4.s }, p1/Z, [x27, #16]\n"
+ "fmla z9.s, z16.s, z3.s[0]\n"
"ld1w { z8.s }, p1/Z, [%x[weights]]\n"
- "fmla z16.s, z11.s, z5.s[0]\n"
- "fmla z17.s, z11.s, z5.s[1]\n"
- "fmla z18.s, z11.s, z5.s[2]\n"
- "fmla z19.s, z11.s, z5.s[3]\n"
- "ld1rqw { z5.s }, p1/Z, [x24]\n"
- "fmla z20.s, z11.s, z7.s[0]\n"
- "fmla z21.s, z11.s, z7.s[1]\n"
- "fmla z22.s, z11.s, z7.s[2]\n"
- "fmla z23.s, z11.s, z7.s[3]\n"
- "ld1rqw { z7.s }, p1/Z, [x24, #16]\n"
- "fmla z24.s, z11.s, z3.s[0]\n"
- "fmla z25.s, z11.s, z3.s[1]\n"
- "fmla z26.s, z11.s, z3.s[2]\n"
- "fmla z27.s, z11.s, z3.s[3]\n"
- "ld1rqw { z3.s }, p1/Z, [x28]\n"
- "fmla z28.s, z11.s, z2.s[0]\n"
- "fmla z29.s, z11.s, z2.s[1]\n"
- "fmla z30.s, z11.s, z2.s[2]\n"
- "fmla z31.s, z11.s, z2.s[3]\n"
- "ld1rqw { z2.s }, p1/Z, [x28, #16]\n"
- "ld1w { z11.s }, p1/Z, [%x[weights], #1, MUL VL]\n"
+ "fmla z31.s, z16.s, z3.s[1]\n"
+ "ldp x24, x27, [x20], #0x10\n"
+ "fmla z30.s, z16.s, z3.s[2]\n"
+ "fmla z29.s, z16.s, z3.s[3]\n"
+ "ld1rqw { z3.s }, p1/Z, [x24]\n"
+ "fmla z28.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z2.s[1]\n"
+ "fmla z26.s, z16.s, z2.s[2]\n"
+ "fmla z25.s, z16.s, z2.s[3]\n"
+ "ld1rqw { z2.s }, p1/Z, [x24, #16]\n"
+ "fmla z24.s, z16.s, z1.s[0]\n"
+ "fmla z23.s, z16.s, z1.s[1]\n"
+ "fmla z22.s, z16.s, z1.s[2]\n"
+ "fmla z21.s, z16.s, z1.s[3]\n"
+ "ld1rqw { z1.s }, p1/Z, [x27]\n"
+ "fmla z20.s, z16.s, z0.s[0]\n"
+ "fmla z19.s, z16.s, z0.s[1]\n"
+ "fmla z18.s, z16.s, z0.s[2]\n"
+ "fmla z17.s, z16.s, z0.s[3]\n"
+ "ld1rqw { z0.s }, p1/Z, [x27, #16]\n"
+ "ld1w { z16.s }, p1/Z, [%x[weights], #1, MUL VL]\n"
"addvl %x[weights], %x[weights], #2\n"
"bgt 3b\n"
"4:" // Output channel loop: Kernel loop tail
"tbnz %x[kernel_points], #0, 5f\n"
- "fmla z16.s, z8.s, z1.s[0]\n"
- "fmla z17.s, z8.s, z1.s[1]\n"
- "ldr x20, [%x[outptrs], #0x0]\n"
- "ldr x21, [%x[outptrs], #0x8]\n"
- "fmla z18.s, z8.s, z1.s[2]\n"
- "fmla z19.s, z8.s, z1.s[3]\n"
- "ldr x22, [%x[outptrs], #0x10]\n"
- "ldr x23, [%x[outptrs], #0x18]\n"
- "fmla z20.s, z8.s, z6.s[0]\n"
- "fmla z21.s, z8.s, z6.s[1]\n"
- "ldr x24, [%x[outptrs], #0x20]\n"
- "ldr x25, [%x[outptrs], #0x28]\n"
- "fmla z22.s, z8.s, z6.s[2]\n"
- "fmla z23.s, z8.s, z6.s[3]\n"
- "ldr x26, [%x[outptrs], #0x30]\n"
- "ldr x27, [%x[outptrs], #0x38]\n"
- "fmla z24.s, z8.s, z0.s[0]\n"
- "fmla z25.s, z8.s, z0.s[1]\n"
- "fmla z26.s, z8.s, z0.s[2]\n"
- "fmla z27.s, z8.s, z0.s[3]\n"
- "fmla z28.s, z8.s, z4.s[0]\n"
- "fmla z29.s, z8.s, z4.s[1]\n"
- "fmla z30.s, z8.s, z4.s[2]\n"
- "fmla z31.s, z8.s, z4.s[3]\n"
- "fmla z16.s, z11.s, z5.s[0]\n"
- "fmla z17.s, z11.s, z5.s[1]\n"
- "fmin z16.s, p1/M, z16.s, z13.s\n"
- "fmin z17.s, p1/M, z17.s, z13.s\n"
- "fmla z18.s, z11.s, z5.s[2]\n"
- "fmla z19.s, z11.s, z5.s[3]\n"
- "fmin z18.s, p1/M, z18.s, z13.s\n"
- "fmin z19.s, p1/M, z19.s, z13.s\n"
- "fmla z20.s, z11.s, z7.s[0]\n"
- "fmla z21.s, z11.s, z7.s[1]\n"
- "fmin z20.s, p1/M, z20.s, z13.s\n"
- "fmin z21.s, p1/M, z21.s, z13.s\n"
- "fmla z22.s, z11.s, z7.s[2]\n"
- "fmla z23.s, z11.s, z7.s[3]\n"
- "fmin z22.s, p1/M, z22.s, z13.s\n"
- "fmin z23.s, p1/M, z23.s, z13.s\n"
- "fmla z24.s, z11.s, z3.s[0]\n"
- "fmla z25.s, z11.s, z3.s[1]\n"
- "fmax z16.s, p1/M, z16.s, z10.s\n"
- "fmax z17.s, p1/M, z17.s, z10.s\n"
- "fmla z26.s, z11.s, z3.s[2]\n"
- "fmla z27.s, z11.s, z3.s[3]\n"
- "fmax z18.s, p1/M, z18.s, z10.s\n"
- "fmax z19.s, p1/M, z19.s, z10.s\n"
- "fmla z28.s, z11.s, z2.s[0]\n"
- "fmla z29.s, z11.s, z2.s[1]\n"
- "fmax z20.s, p1/M, z20.s, z10.s\n"
- "fmax z21.s, p1/M, z21.s, z10.s\n"
- "fmla z30.s, z11.s, z2.s[2]\n"
- "fmla z31.s, z11.s, z2.s[3]\n"
- "fmax z22.s, p1/M, z22.s, z10.s\n"
- "fmax z23.s, p1/M, z23.s, z10.s\n"
- "fmin z24.s, p1/M, z24.s, z13.s\n"
- "fmin z25.s, p1/M, z25.s, z13.s\n"
- "st1w { z16.s }, p0, [x20, x9, LSL #2]\n"
- "ldr x20, [%x[outptrs], #0x40]\n"
- "fmin z26.s, p1/M, z26.s, z13.s\n"
- "fmin z27.s, p1/M, z27.s, z13.s\n"
- "st1w { z17.s }, p0, [x21, x9, LSL #2]\n"
- "ldr x21, [%x[outptrs], #0x48]\n"
- "fmin z28.s, p1/M, z28.s, z13.s\n"
- "fmin z29.s, p1/M, z29.s, z13.s\n"
- "st1w { z18.s }, p0, [x22, x9, LSL #2]\n"
- "ldr x22, [%x[outptrs], #0x50]\n"
- "fmin z30.s, p1/M, z30.s, z13.s\n"
- "fmin z31.s, p1/M, z31.s, z13.s\n"
- "st1w { z19.s }, p0, [x23, x9, LSL #2]\n"
- "ldr x23, [%x[outptrs], #0x58]\n"
- "st1w { z20.s }, p0, [x24, x9, LSL #2]\n"
- "ldr x24, [%x[outptrs], #0x60]\n"
- "fmax z24.s, p1/M, z24.s, z10.s\n"
- "fmax z25.s, p1/M, z25.s, z10.s\n"
- "st1w { z21.s }, p0, [x25, x9, LSL #2]\n"
- "ldr x25, [%x[outptrs], #0x68]\n"
- "fmax z26.s, p1/M, z26.s, z10.s\n"
- "fmax z27.s, p1/M, z27.s, z10.s\n"
- "st1w { z22.s }, p0, [x26, x9, LSL #2]\n"
- "ldr x26, [%x[outptrs], #0x70]\n"
- "fmax z28.s, p1/M, z28.s, z10.s\n"
- "fmax z29.s, p1/M, z29.s, z10.s\n"
- "st1w { z23.s }, p0, [x27, x9, LSL #2]\n"
- "ldr x27, [%x[outptrs], #0x78]\n"
- "fmax z30.s, p1/M, z30.s, z10.s\n"
- "fmax z31.s, p1/M, z31.s, z10.s\n"
- "st1w { z24.s }, p0, [x20, x9, LSL #2]\n"
- "st1w { z25.s }, p0, [x21, x9, LSL #2]\n"
- "st1w { z26.s }, p0, [x22, x9, LSL #2]\n"
- "st1w { z27.s }, p0, [x23, x9, LSL #2]\n"
- "st1w { z28.s }, p0, [x24, x9, LSL #2]\n"
- "st1w { z29.s }, p0, [x25, x9, LSL #2]\n"
- "st1w { z30.s }, p0, [x26, x9, LSL #2]\n"
- "st1w { z31.s }, p0, [x27, x9, LSL #2]\n"
+ "fmla z9.s, z8.s, z7.s[0]\n"
+ "ldr x19, [%x[outptrs], #0x0]\n"
+ "fmla z31.s, z8.s, z7.s[1]\n"
+ "ldr x20, [%x[outptrs], #0x8]\n"
+ "fmla z30.s, z8.s, z7.s[2]\n"
+ "ldr x21, [%x[outptrs], #0x10]\n"
+ "fmla z29.s, z8.s, z7.s[3]\n"
+ "ldr x22, [%x[outptrs], #0x18]\n"
+ "fmla z28.s, z8.s, z6.s[0]\n"
+ "ldr x23, [%x[outptrs], #0x20]\n"
+ "fmla z27.s, z8.s, z6.s[1]\n"
+ "ldr x24, [%x[outptrs], #0x28]\n"
+ "fmla z26.s, z8.s, z6.s[2]\n"
+ "ldr x25, [%x[outptrs], #0x30]\n"
+ "fmla z25.s, z8.s, z6.s[3]\n"
+ "ldr x26, [%x[outptrs], #0x38]\n"
+ "fmla z24.s, z8.s, z5.s[0]\n"
+ "fmla z23.s, z8.s, z5.s[1]\n"
+ "fmla z22.s, z8.s, z5.s[2]\n"
+ "fmla z21.s, z8.s, z5.s[3]\n"
+ "fmla z20.s, z8.s, z4.s[0]\n"
+ "fmla z19.s, z8.s, z4.s[1]\n"
+ "fmla z18.s, z8.s, z4.s[2]\n"
+ "fmla z17.s, z8.s, z4.s[3]\n"
+ "fmla z9.s, z16.s, z3.s[0]\n"
+ "fmla z31.s, z16.s, z3.s[1]\n"
+ "fmla z30.s, z16.s, z3.s[2]\n"
+ "fmla z29.s, z16.s, z3.s[3]\n"
+ "fmla z28.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z2.s[1]\n"
+ "fmla z26.s, z16.s, z2.s[2]\n"
+ "fmla z25.s, z16.s, z2.s[3]\n"
+ "fmla z24.s, z16.s, z1.s[0]\n"
+ "fmla z23.s, z16.s, z1.s[1]\n"
+ "fmla z22.s, z16.s, z1.s[2]\n"
+ "fmla z21.s, z16.s, z1.s[3]\n"
+ "fmla z20.s, z16.s, z0.s[0]\n"
+ "fmla z19.s, z16.s, z0.s[1]\n"
+ "fmla z18.s, z16.s, z0.s[2]\n"
+ "fmla z17.s, z16.s, z0.s[3]\n"
+ "fmin z9.s, p1/M, z9.s, z10.s\n"
+ "fmin z31.s, p1/M, z31.s, z10.s\n"
+ "fmin z30.s, p1/M, z30.s, z10.s\n"
+ "fmin z29.s, p1/M, z29.s, z10.s\n"
+ "fmax z9.s, p1/M, z9.s, z11.s\n"
+ "st1w { z9.s }, p0, [x19, x28, LSL #2]\n"
+ "fmax z31.s, p1/M, z31.s, z11.s\n"
+ "fmax z30.s, p1/M, z30.s, z11.s\n"
+ "ldr x19, [%x[outptrs], #0x40]\n"
+ "fmax z29.s, p1/M, z29.s, z11.s\n"
+ "st1w { z31.s }, p0, [x20, x28, LSL #2]\n"
+ "fmin z28.s, p1/M, z28.s, z10.s\n"
+ "fmin z27.s, p1/M, z27.s, z10.s\n"
+ "st1w { z30.s }, p0, [x21, x28, LSL #2]\n"
+ "fmin z26.s, p1/M, z26.s, z10.s\n"
+ "st1w { z29.s }, p0, [x22, x28, LSL #2]\n"
+ "fmin z25.s, p1/M, z25.s, z10.s\n"
+ "ldr x20, [%x[outptrs], #0x48]\n"
+ "fmin z24.s, p1/M, z24.s, z10.s\n"
+ "ldr x21, [%x[outptrs], #0x50]\n"
+ "fmax z28.s, p1/M, z28.s, z11.s\n"
+ "ldr x22, [%x[outptrs], #0x58]\n"
+ "fmax z27.s, p1/M, z27.s, z11.s\n"
+ "st1w { z28.s }, p0, [x23, x28, LSL #2]\n"
+ "fmax z26.s, p1/M, z26.s, z11.s\n"
+ "fmax z25.s, p1/M, z25.s, z11.s\n"
+ "st1w { z27.s }, p0, [x24, x28, LSL #2]\n"
+ "fmax z24.s, p1/M, z24.s, z11.s\n"
+ "ldr x23, [%x[outptrs], #0x60]\n"
+ "fmin z23.s, p1/M, z23.s, z10.s\n"
+ "ldr x24, [%x[outptrs], #0x68]\n"
+ "fmin z22.s, p1/M, z22.s, z10.s\n"
+ "st1w { z26.s }, p0, [x25, x28, LSL #2]\n"
+ "fmin z21.s, p1/M, z21.s, z10.s\n"
+ "st1w { z25.s }, p0, [x26, x28, LSL #2]\n"
+ "fmin z20.s, p1/M, z20.s, z10.s\n"
+ "st1w { z24.s }, p0, [x19, x28, LSL #2]\n"
+ "fmax z23.s, p1/M, z23.s, z11.s\n"
+ "ldr x25, [%x[outptrs], #0x70]\n"
+ "fmax z22.s, p1/M, z22.s, z11.s\n"
+ "ldr x26, [%x[outptrs], #0x78]\n"
+ "fmax z21.s, p1/M, z21.s, z11.s\n"
+ "st1w { z23.s }, p0, [x20, x28, LSL #2]\n"
+ "fmax z20.s, p1/M, z20.s, z11.s\n"
+ "fmin z19.s, p1/M, z19.s, z10.s\n"
+ "st1w { z22.s }, p0, [x21, x28, LSL #2]\n"
+ "fmin z18.s, p1/M, z18.s, z10.s\n"
+ "st1w { z21.s }, p0, [x22, x28, LSL #2]\n"
+ "fmin z17.s, p1/M, z17.s, z10.s\n"
+ "st1w { z20.s }, p0, [x23, x28, LSL #2]\n"
+ "fmax z19.s, p1/M, z19.s, z11.s\n"
+ "fmax z18.s, p1/M, z18.s, z11.s\n"
+ "st1w { z19.s }, p0, [x24, x28, LSL #2]\n"
+ "fmax z17.s, p1/M, z17.s, z11.s\n"
+ "st1w { z18.s }, p0, [x25, x28, LSL #2]\n"
+ "st1w { z17.s }, p0, [x26, x28, LSL #2]\n"
"b 7f\n"
"5:" // Output channel loop: Odd tail
- "fmla z16.s, z8.s, z1.s[0]\n"
- "fmla z17.s, z8.s, z1.s[1]\n"
- "ldp x24, x28, [x21], #0x10\n"
- "ldr x20, [%x[outptrs], #0x0]\n"
- "fmla z18.s, z8.s, z1.s[2]\n"
- "fmla z19.s, z8.s, z1.s[3]\n"
- "ld1rqw { z1.s }, p1/Z, [x24]\n"
- "ldr x21, [%x[outptrs], #0x8]\n"
- "fmla z20.s, z8.s, z6.s[0]\n"
- "fmla z21.s, z8.s, z6.s[1]\n"
- "ldr x22, [%x[outptrs], #0x10]\n"
- "ldr x23, [%x[outptrs], #0x18]\n"
- "fmla z22.s, z8.s, z6.s[2]\n"
- "fmla z23.s, z8.s, z6.s[3]\n"
+ "fmla z9.s, z8.s, z7.s[0]\n"
+ "ldp x24, x27, [x20], #0x10\n"
+ "fmla z31.s, z8.s, z7.s[1]\n"
+ "ldr x19, [%x[outptrs], #0x0]\n"
+ "fmla z30.s, z8.s, z7.s[2]\n"
+ "ldr x20, [%x[outptrs], #0x8]\n"
+ "fmla z29.s, z8.s, z7.s[3]\n"
+ "ld1rqw { z7.s }, p1/Z, [x24]\n"
+ "fmla z28.s, z8.s, z6.s[0]\n"
+ "ldr x21, [%x[outptrs], #0x10]\n"
+ "fmla z27.s, z8.s, z6.s[1]\n"
+ "ldr x22, [%x[outptrs], #0x18]\n"
+ "fmla z26.s, z8.s, z6.s[2]\n"
+ "ldr x23, [%x[outptrs], #0x20]\n"
+ "fmla z25.s, z8.s, z6.s[3]\n"
"ld1rqw { z6.s }, p1/Z, [x24, #16]\n"
- "ldr x24, [%x[outptrs], #0x20]\n"
- "fmla z24.s, z8.s, z0.s[0]\n"
- "fmla z25.s, z8.s, z0.s[1]\n"
- "ldr x25, [%x[outptrs], #0x28]\n"
- "ldr x26, [%x[outptrs], #0x30]\n"
- "fmla z26.s, z8.s, z0.s[2]\n"
- "fmla z27.s, z8.s, z0.s[3]\n"
- "ld1rqw { z0.s }, p1/Z, [x28]\n"
- "ldr x27, [%x[outptrs], #0x38]\n"
- "fmla z28.s, z8.s, z4.s[0]\n"
- "fmla z29.s, z8.s, z4.s[1]\n"
- "fmla z30.s, z8.s, z4.s[2]\n"
- "fmla z31.s, z8.s, z4.s[3]\n"
+ "fmla z24.s, z8.s, z5.s[0]\n"
+ "ldr x24, [%x[outptrs], #0x28]\n"
+ "fmla z23.s, z8.s, z5.s[1]\n"
+ "ldr x25, [%x[outptrs], #0x30]\n"
+ "fmla z22.s, z8.s, z5.s[2]\n"
+ "ldr x26, [%x[outptrs], #0x38]\n"
+ "fmla z21.s, z8.s, z5.s[3]\n"
+ "ld1rqw { z5.s }, p1/Z, [x27]\n"
+ "fmla z20.s, z8.s, z4.s[0]\n"
+ "fmla z19.s, z8.s, z4.s[1]\n"
+ "fmla z18.s, z8.s, z4.s[2]\n"
+ "fmla z17.s, z8.s, z4.s[3]\n"
+ "ld1rqw { z4.s }, p1/Z, [x27, #16]\n"
+ "fmla z9.s, z16.s, z3.s[0]\n"
"ld1w { z8.s }, p1/Z, [%x[weights]]\n"
- "ld1rqw { z4.s }, p1/Z, [x28, #16]\n"
- "fmla z16.s, z11.s, z5.s[0]\n"
- "fmla z17.s, z11.s, z5.s[1]\n"
"addvl %x[weights], %x[weights], #1\n"
- "fmla z18.s, z11.s, z5.s[2]\n"
- "fmla z19.s, z11.s, z5.s[3]\n"
- "fmla z20.s, z11.s, z7.s[0]\n"
- "fmla z21.s, z11.s, z7.s[1]\n"
- "fmla z22.s, z11.s, z7.s[2]\n"
- "fmla z23.s, z11.s, z7.s[3]\n"
- "fmla z24.s, z11.s, z3.s[0]\n"
- "fmla z25.s, z11.s, z3.s[1]\n"
- "fmla z26.s, z11.s, z3.s[2]\n"
- "fmla z27.s, z11.s, z3.s[3]\n"
- "fmla z28.s, z11.s, z2.s[0]\n"
- "fmla z29.s, z11.s, z2.s[1]\n"
- "fmla z30.s, z11.s, z2.s[2]\n"
- "fmla z31.s, z11.s, z2.s[3]\n"
- "fmla z16.s, z8.s, z1.s[0]\n"
- "fmla z17.s, z8.s, z1.s[1]\n"
- "fmin z16.s, p1/M, z16.s, z13.s\n"
- "fmin z17.s, p1/M, z17.s, z13.s\n"
- "fmla z18.s, z8.s, z1.s[2]\n"
- "fmla z19.s, z8.s, z1.s[3]\n"
- "fmin z18.s, p1/M, z18.s, z13.s\n"
- "fmin z19.s, p1/M, z19.s, z13.s\n"
- "fmla z20.s, z8.s, z6.s[0]\n"
- "fmla z21.s, z8.s, z6.s[1]\n"
- "fmin z20.s, p1/M, z20.s, z13.s\n"
- "fmin z21.s, p1/M, z21.s, z13.s\n"
- "fmla z22.s, z8.s, z6.s[2]\n"
- "fmla z23.s, z8.s, z6.s[3]\n"
- "fmin z22.s, p1/M, z22.s, z13.s\n"
- "fmin z23.s, p1/M, z23.s, z13.s\n"
- "fmla z24.s, z8.s, z0.s[0]\n"
- "fmla z25.s, z8.s, z0.s[1]\n"
- "fmax z16.s, p1/M, z16.s, z10.s\n"
- "fmax z17.s, p1/M, z17.s, z10.s\n"
- "fmla z26.s, z8.s, z0.s[2]\n"
- "fmla z27.s, z8.s, z0.s[3]\n"
- "fmax z18.s, p1/M, z18.s, z10.s\n"
- "fmax z19.s, p1/M, z19.s, z10.s\n"
- "fmla z28.s, z8.s, z4.s[0]\n"
- "fmla z29.s, z8.s, z4.s[1]\n"
- "fmax z20.s, p1/M, z20.s, z10.s\n"
- "fmax z21.s, p1/M, z21.s, z10.s\n"
- "fmla z30.s, z8.s, z4.s[2]\n"
- "fmla z31.s, z8.s, z4.s[3]\n"
- "fmax z22.s, p1/M, z22.s, z10.s\n"
- "fmax z23.s, p1/M, z23.s, z10.s\n"
- "fmin z24.s, p1/M, z24.s, z13.s\n"
- "fmin z25.s, p1/M, z25.s, z13.s\n"
- "st1w { z16.s }, p0, [x20, x9, LSL #2]\n"
- "ldr x20, [%x[outptrs], #0x40]\n"
- "fmin z26.s, p1/M, z26.s, z13.s\n"
- "fmin z27.s, p1/M, z27.s, z13.s\n"
- "st1w { z17.s }, p0, [x21, x9, LSL #2]\n"
- "ldr x21, [%x[outptrs], #0x48]\n"
- "fmin z28.s, p1/M, z28.s, z13.s\n"
- "fmin z29.s, p1/M, z29.s, z13.s\n"
- "st1w { z18.s }, p0, [x22, x9, LSL #2]\n"
- "ldr x22, [%x[outptrs], #0x50]\n"
- "fmin z30.s, p1/M, z30.s, z13.s\n"
- "fmin z31.s, p1/M, z31.s, z13.s\n"
- "st1w { z19.s }, p0, [x23, x9, LSL #2]\n"
- "ldr x23, [%x[outptrs], #0x58]\n"
- "st1w { z20.s }, p0, [x24, x9, LSL #2]\n"
- "ldr x24, [%x[outptrs], #0x60]\n"
- "fmax z24.s, p1/M, z24.s, z10.s\n"
- "fmax z25.s, p1/M, z25.s, z10.s\n"
- "st1w { z21.s }, p0, [x25, x9, LSL #2]\n"
- "ldr x25, [%x[outptrs], #0x68]\n"
- "fmax z26.s, p1/M, z26.s, z10.s\n"
- "fmax z27.s, p1/M, z27.s, z10.s\n"
- "st1w { z22.s }, p0, [x26, x9, LSL #2]\n"
- "ldr x26, [%x[outptrs], #0x70]\n"
- "fmax z28.s, p1/M, z28.s, z10.s\n"
- "fmax z29.s, p1/M, z29.s, z10.s\n"
- "st1w { z23.s }, p0, [x27, x9, LSL #2]\n"
- "ldr x27, [%x[outptrs], #0x78]\n"
- "fmax z30.s, p1/M, z30.s, z10.s\n"
- "fmax z31.s, p1/M, z31.s, z10.s\n"
- "st1w { z24.s }, p0, [x20, x9, LSL #2]\n"
- "st1w { z25.s }, p0, [x21, x9, LSL #2]\n"
- "st1w { z26.s }, p0, [x22, x9, LSL #2]\n"
- "st1w { z27.s }, p0, [x23, x9, LSL #2]\n"
- "st1w { z28.s }, p0, [x24, x9, LSL #2]\n"
- "st1w { z29.s }, p0, [x25, x9, LSL #2]\n"
- "st1w { z30.s }, p0, [x26, x9, LSL #2]\n"
- "st1w { z31.s }, p0, [x27, x9, LSL #2]\n"
+ "fmla z31.s, z16.s, z3.s[1]\n"
+ "fmla z30.s, z16.s, z3.s[2]\n"
+ "fmla z29.s, z16.s, z3.s[3]\n"
+ "fmla z28.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z2.s[1]\n"
+ "fmla z26.s, z16.s, z2.s[2]\n"
+ "fmla z25.s, z16.s, z2.s[3]\n"
+ "fmla z24.s, z16.s, z1.s[0]\n"
+ "fmla z23.s, z16.s, z1.s[1]\n"
+ "fmla z22.s, z16.s, z1.s[2]\n"
+ "fmla z21.s, z16.s, z1.s[3]\n"
+ "fmla z20.s, z16.s, z0.s[0]\n"
+ "fmla z19.s, z16.s, z0.s[1]\n"
+ "fmla z18.s, z16.s, z0.s[2]\n"
+ "fmla z17.s, z16.s, z0.s[3]\n"
+ "fmla z9.s, z8.s, z7.s[0]\n"
+ "fmla z31.s, z8.s, z7.s[1]\n"
+ "fmla z30.s, z8.s, z7.s[2]\n"
+ "fmla z29.s, z8.s, z7.s[3]\n"
+ "fmla z28.s, z8.s, z6.s[0]\n"
+ "fmla z27.s, z8.s, z6.s[1]\n"
+ "fmla z26.s, z8.s, z6.s[2]\n"
+ "fmla z25.s, z8.s, z6.s[3]\n"
+ "fmla z24.s, z8.s, z5.s[0]\n"
+ "fmla z23.s, z8.s, z5.s[1]\n"
+ "fmla z22.s, z8.s, z5.s[2]\n"
+ "fmla z21.s, z8.s, z5.s[3]\n"
+ "fmla z20.s, z8.s, z4.s[0]\n"
+ "fmla z19.s, z8.s, z4.s[1]\n"
+ "fmla z18.s, z8.s, z4.s[2]\n"
+ "fmla z17.s, z8.s, z4.s[3]\n"
+ "fmin z9.s, p1/M, z9.s, z10.s\n"
+ "fmin z31.s, p1/M, z31.s, z10.s\n"
+ "fmin z30.s, p1/M, z30.s, z10.s\n"
+ "fmin z29.s, p1/M, z29.s, z10.s\n"
+ "fmax z9.s, p1/M, z9.s, z11.s\n"
+ "st1w { z9.s }, p0, [x19, x28, LSL #2]\n"
+ "fmax z31.s, p1/M, z31.s, z11.s\n"
+ "fmax z30.s, p1/M, z30.s, z11.s\n"
+ "ldr x19, [%x[outptrs], #0x40]\n"
+ "fmax z29.s, p1/M, z29.s, z11.s\n"
+ "st1w { z31.s }, p0, [x20, x28, LSL #2]\n"
+ "fmin z28.s, p1/M, z28.s, z10.s\n"
+ "fmin z27.s, p1/M, z27.s, z10.s\n"
+ "st1w { z30.s }, p0, [x21, x28, LSL #2]\n"
+ "fmin z26.s, p1/M, z26.s, z10.s\n"
+ "st1w { z29.s }, p0, [x22, x28, LSL #2]\n"
+ "fmin z25.s, p1/M, z25.s, z10.s\n"
+ "ldr x20, [%x[outptrs], #0x48]\n"
+ "fmin z24.s, p1/M, z24.s, z10.s\n"
+ "ldr x21, [%x[outptrs], #0x50]\n"
+ "fmax z28.s, p1/M, z28.s, z11.s\n"
+ "ldr x22, [%x[outptrs], #0x58]\n"
+ "fmax z27.s, p1/M, z27.s, z11.s\n"
+ "st1w { z28.s }, p0, [x23, x28, LSL #2]\n"
+ "fmax z26.s, p1/M, z26.s, z11.s\n"
+ "fmax z25.s, p1/M, z25.s, z11.s\n"
+ "st1w { z27.s }, p0, [x24, x28, LSL #2]\n"
+ "fmax z24.s, p1/M, z24.s, z11.s\n"
+ "ldr x23, [%x[outptrs], #0x60]\n"
+ "fmin z23.s, p1/M, z23.s, z10.s\n"
+ "ldr x24, [%x[outptrs], #0x68]\n"
+ "fmin z22.s, p1/M, z22.s, z10.s\n"
+ "st1w { z26.s }, p0, [x25, x28, LSL #2]\n"
+ "fmin z21.s, p1/M, z21.s, z10.s\n"
+ "st1w { z25.s }, p0, [x26, x28, LSL #2]\n"
+ "fmin z20.s, p1/M, z20.s, z10.s\n"
+ "st1w { z24.s }, p0, [x19, x28, LSL #2]\n"
+ "fmax z23.s, p1/M, z23.s, z11.s\n"
+ "ldr x25, [%x[outptrs], #0x70]\n"
+ "fmax z22.s, p1/M, z22.s, z11.s\n"
+ "ldr x26, [%x[outptrs], #0x78]\n"
+ "fmax z21.s, p1/M, z21.s, z11.s\n"
+ "st1w { z23.s }, p0, [x20, x28, LSL #2]\n"
+ "fmax z20.s, p1/M, z20.s, z11.s\n"
+ "fmin z19.s, p1/M, z19.s, z10.s\n"
+ "st1w { z22.s }, p0, [x21, x28, LSL #2]\n"
+ "fmin z18.s, p1/M, z18.s, z10.s\n"
+ "st1w { z21.s }, p0, [x22, x28, LSL #2]\n"
+ "fmin z17.s, p1/M, z17.s, z10.s\n"
+ "st1w { z20.s }, p0, [x23, x28, LSL #2]\n"
+ "fmax z19.s, p1/M, z19.s, z11.s\n"
+ "fmax z18.s, p1/M, z18.s, z11.s\n"
+ "st1w { z19.s }, p0, [x24, x28, LSL #2]\n"
+ "fmax z17.s, p1/M, z17.s, z11.s\n"
+ "st1w { z18.s }, p0, [x25, x28, LSL #2]\n"
+ "st1w { z17.s }, p0, [x26, x28, LSL #2]\n"
"b 7f\n"
"6:" // Output channel loop: Single kernel point
- "fmla z16.s, z8.s, z1.s[0]\n"
- "fmla z17.s, z8.s, z1.s[1]\n"
- "fmin z16.s, p1/M, z16.s, z13.s\n"
- "fmin z17.s, p1/M, z17.s, z13.s\n"
- "fmla z18.s, z8.s, z1.s[2]\n"
- "fmla z19.s, z8.s, z1.s[3]\n"
- "fmin z18.s, p1/M, z18.s, z13.s\n"
- "fmin z19.s, p1/M, z19.s, z13.s\n"
- "fmla z20.s, z8.s, z6.s[0]\n"
- "fmla z21.s, z8.s, z6.s[1]\n"
- "fmin z20.s, p1/M, z20.s, z13.s\n"
- "fmin z21.s, p1/M, z21.s, z13.s\n"
- "fmla z22.s, z8.s, z6.s[2]\n"
- "fmla z23.s, z8.s, z6.s[3]\n"
- "fmin z22.s, p1/M, z22.s, z13.s\n"
- "fmin z23.s, p1/M, z23.s, z13.s\n"
- "fmla z24.s, z8.s, z0.s[0]\n"
- "fmla z25.s, z8.s, z0.s[1]\n"
- "ldr x20, [%x[outptrs], #0x0]\n"
- "ldr x21, [%x[outptrs], #0x8]\n"
- "fmla z26.s, z8.s, z0.s[2]\n"
- "fmla z27.s, z8.s, z0.s[3]\n"
- "ldr x22, [%x[outptrs], #0x10]\n"
- "ldr x23, [%x[outptrs], #0x18]\n"
- "fmla z28.s, z8.s, z4.s[0]\n"
- "fmla z29.s, z8.s, z4.s[1]\n"
- "ldr x24, [%x[outptrs], #0x20]\n"
- "ldr x25, [%x[outptrs], #0x28]\n"
- "fmla z30.s, z8.s, z4.s[2]\n"
- "fmla z31.s, z8.s, z4.s[3]\n"
- "ldr x26, [%x[outptrs], #0x30]\n"
- "ldr x27, [%x[outptrs], #0x38]\n"
- "fmax z16.s, p1/M, z16.s, z10.s\n"
- "fmax z17.s, p1/M, z17.s, z10.s\n"
- "st1w { z16.s }, p0, [x20, x9, LSL #2]\n"
- "ldr x20, [%x[outptrs], #0x40]\n"
- "fmax z18.s, p1/M, z18.s, z10.s\n"
- "fmax z19.s, p1/M, z19.s, z10.s\n"
- "st1w { z17.s }, p0, [x21, x9, LSL #2]\n"
- "ldr x21, [%x[outptrs], #0x48]\n"
- "fmax z20.s, p1/M, z20.s, z10.s\n"
- "fmax z21.s, p1/M, z21.s, z10.s\n"
- "st1w { z18.s }, p0, [x22, x9, LSL #2]\n"
- "ldr x22, [%x[outptrs], #0x50]\n"
- "fmax z22.s, p1/M, z22.s, z10.s\n"
- "fmax z23.s, p1/M, z23.s, z10.s\n"
- "st1w { z19.s }, p0, [x23, x9, LSL #2]\n"
- "ldr x23, [%x[outptrs], #0x58]\n"
- "fmin z24.s, p1/M, z24.s, z13.s\n"
- "fmin z25.s, p1/M, z25.s, z13.s\n"
- "st1w { z20.s }, p0, [x24, x9, LSL #2]\n"
- "ldr x24, [%x[outptrs], #0x60]\n"
- "fmin z26.s, p1/M, z26.s, z13.s\n"
- "fmin z27.s, p1/M, z27.s, z13.s\n"
- "st1w { z21.s }, p0, [x25, x9, LSL #2]\n"
- "ldr x25, [%x[outptrs], #0x68]\n"
- "fmin z28.s, p1/M, z28.s, z13.s\n"
- "fmin z29.s, p1/M, z29.s, z13.s\n"
- "st1w { z22.s }, p0, [x26, x9, LSL #2]\n"
- "ldr x26, [%x[outptrs], #0x70]\n"
- "fmin z30.s, p1/M, z30.s, z13.s\n"
- "fmin z31.s, p1/M, z31.s, z13.s\n"
- "st1w { z23.s }, p0, [x27, x9, LSL #2]\n"
- "ldr x27, [%x[outptrs], #0x78]\n"
- "fmax z24.s, p1/M, z24.s, z10.s\n"
- "fmax z25.s, p1/M, z25.s, z10.s\n"
- "st1w { z24.s }, p0, [x20, x9, LSL #2]\n"
- "fmax z26.s, p1/M, z26.s, z10.s\n"
- "fmax z27.s, p1/M, z27.s, z10.s\n"
- "st1w { z25.s }, p0, [x21, x9, LSL #2]\n"
- "fmax z28.s, p1/M, z28.s, z10.s\n"
- "fmax z29.s, p1/M, z29.s, z10.s\n"
- "st1w { z26.s }, p0, [x22, x9, LSL #2]\n"
- "fmax z30.s, p1/M, z30.s, z10.s\n"
- "fmax z31.s, p1/M, z31.s, z10.s\n"
- "st1w { z27.s }, p0, [x23, x9, LSL #2]\n"
- "st1w { z28.s }, p0, [x24, x9, LSL #2]\n"
- "st1w { z29.s }, p0, [x25, x9, LSL #2]\n"
- "st1w { z30.s }, p0, [x26, x9, LSL #2]\n"
- "st1w { z31.s }, p0, [x27, x9, LSL #2]\n"
+ "fmla z9.s, z8.s, z7.s[0]\n"
+ "ldr x19, [%x[outptrs], #0x0]\n"
+ "fmla z31.s, z8.s, z7.s[1]\n"
+ "ldr x20, [%x[outptrs], #0x8]\n"
+ "fmla z30.s, z8.s, z7.s[2]\n"
+ "ldr x21, [%x[outptrs], #0x10]\n"
+ "fmla z29.s, z8.s, z7.s[3]\n"
+ "ldr x22, [%x[outptrs], #0x18]\n"
+ "fmla z28.s, z8.s, z6.s[0]\n"
+ "ldr x23, [%x[outptrs], #0x20]\n"
+ "fmla z27.s, z8.s, z6.s[1]\n"
+ "ldr x24, [%x[outptrs], #0x28]\n"
+ "fmla z26.s, z8.s, z6.s[2]\n"
+ "ldr x25, [%x[outptrs], #0x30]\n"
+ "fmla z25.s, z8.s, z6.s[3]\n"
+ "ldr x26, [%x[outptrs], #0x38]\n"
+ "fmla z24.s, z8.s, z5.s[0]\n"
+ "fmla z23.s, z8.s, z5.s[1]\n"
+ "fmla z22.s, z8.s, z5.s[2]\n"
+ "fmla z21.s, z8.s, z5.s[3]\n"
+ "fmla z20.s, z8.s, z4.s[0]\n"
+ "fmla z19.s, z8.s, z4.s[1]\n"
+ "fmla z18.s, z8.s, z4.s[2]\n"
+ "fmla z17.s, z8.s, z4.s[3]\n"
+ "fmin z9.s, p1/M, z9.s, z10.s\n"
+ "fmin z31.s, p1/M, z31.s, z10.s\n"
+ "fmin z30.s, p1/M, z30.s, z10.s\n"
+ "fmin z29.s, p1/M, z29.s, z10.s\n"
+ "fmax z9.s, p1/M, z9.s, z11.s\n"
+ "st1w { z9.s }, p0, [x19, x28, LSL #2]\n"
+ "fmax z31.s, p1/M, z31.s, z11.s\n"
+ "fmax z30.s, p1/M, z30.s, z11.s\n"
+ "ldr x19, [%x[outptrs], #0x40]\n"
+ "fmax z29.s, p1/M, z29.s, z11.s\n"
+ "st1w { z31.s }, p0, [x20, x28, LSL #2]\n"
+ "fmin z28.s, p1/M, z28.s, z10.s\n"
+ "fmin z27.s, p1/M, z27.s, z10.s\n"
+ "st1w { z30.s }, p0, [x21, x28, LSL #2]\n"
+ "fmin z26.s, p1/M, z26.s, z10.s\n"
+ "st1w { z29.s }, p0, [x22, x28, LSL #2]\n"
+ "fmin z25.s, p1/M, z25.s, z10.s\n"
+ "ldr x20, [%x[outptrs], #0x48]\n"
+ "fmin z24.s, p1/M, z24.s, z10.s\n"
+ "ldr x21, [%x[outptrs], #0x50]\n"
+ "fmax z28.s, p1/M, z28.s, z11.s\n"
+ "ldr x22, [%x[outptrs], #0x58]\n"
+ "fmax z27.s, p1/M, z27.s, z11.s\n"
+ "st1w { z28.s }, p0, [x23, x28, LSL #2]\n"
+ "fmax z26.s, p1/M, z26.s, z11.s\n"
+ "fmax z25.s, p1/M, z25.s, z11.s\n"
+ "st1w { z27.s }, p0, [x24, x28, LSL #2]\n"
+ "fmax z24.s, p1/M, z24.s, z11.s\n"
+ "ldr x23, [%x[outptrs], #0x60]\n"
+ "fmin z23.s, p1/M, z23.s, z10.s\n"
+ "ldr x24, [%x[outptrs], #0x68]\n"
+ "fmin z22.s, p1/M, z22.s, z10.s\n"
+ "st1w { z26.s }, p0, [x25, x28, LSL #2]\n"
+ "fmin z21.s, p1/M, z21.s, z10.s\n"
+ "st1w { z25.s }, p0, [x26, x28, LSL #2]\n"
+ "fmin z20.s, p1/M, z20.s, z10.s\n"
+ "st1w { z24.s }, p0, [x19, x28, LSL #2]\n"
+ "fmax z23.s, p1/M, z23.s, z11.s\n"
+ "ldr x25, [%x[outptrs], #0x70]\n"
+ "fmax z22.s, p1/M, z22.s, z11.s\n"
+ "ldr x26, [%x[outptrs], #0x78]\n"
+ "fmax z21.s, p1/M, z21.s, z11.s\n"
+ "st1w { z23.s }, p0, [x20, x28, LSL #2]\n"
+ "fmax z20.s, p1/M, z20.s, z11.s\n"
+ "fmin z19.s, p1/M, z19.s, z10.s\n"
+ "st1w { z22.s }, p0, [x21, x28, LSL #2]\n"
+ "fmin z18.s, p1/M, z18.s, z10.s\n"
+ "st1w { z21.s }, p0, [x22, x28, LSL #2]\n"
+ "fmin z17.s, p1/M, z17.s, z10.s\n"
+ "st1w { z20.s }, p0, [x23, x28, LSL #2]\n"
+ "fmax z19.s, p1/M, z19.s, z11.s\n"
+ "fmax z18.s, p1/M, z18.s, z11.s\n"
+ "st1w { z19.s }, p0, [x24, x28, LSL #2]\n"
+ "fmax z17.s, p1/M, z17.s, z11.s\n"
+ "st1w { z18.s }, p0, [x25, x28, LSL #2]\n"
+ "st1w { z17.s }, p0, [x26, x28, LSL #2]\n"
"7:" // Output channel loop: Done
- "incw x9\n"
- "whilelt p0.s, x9, %x[n_output_channels]\n"
+ "incw x28\n"
+ "whilelt p0.s, x28, %x[n_output_channels]\n"
"b.any 1b\n"
: [weights] "+&r" (weights)
: [bias] "r" (bias), [inptrs] "r" (inptrs), [kernel_points] "r" ((uint64_t) kernel_points), [minmax_vals] "r" (minmax_vals), [n_output_channels] "r" ((uint64_t) n_output_channels), [outptrs] "r" (outptrs)
- : "cc", "memory", "p0", "p1", "x9", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z10", "z11", "z13", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_nhwc_3x3_s1_output2x2_dot_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_nhwc_3x3_s1_output2x2_dot_depthfirst/generic.cpp
index 4eae5961a0..800803770a 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_nhwc_3x3_s1_output2x2_dot_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_nhwc_3x3_s1_output2x2_dot_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -41,461 +41,421 @@ void sve_s8q_nhwc_3x3_s1_output2x2_dot_depthfirst_impl(
)
{
__asm__ __volatile__(
- "mov x13, #0x0\n"
- "whilelt p2.b, x13, %x[n_channels]\n"
- "ldp x12, x11, [%x[inptrs], #0x0]\n"
- "ldp x10, x9, [%x[inptrs], #0x10]\n"
- "ldp x28, x27, [%x[inptrs], #0x20]\n"
- "ldp x26, x25, [%x[inptrs], #0x30]\n"
- "mov x20, #0x1\n"
- "ptrue p1.b\n"
- "ldp x24, x23, [%x[outptrs], #0x0]\n"
- "ldp x22, x21, [%x[outptrs], #0x10]\n"
- "orr x20, x20, #0x100\n"
- "orr x20, x20, #0x10000\n"
- "ld1b { z14.b }, p2/Z, [x12, x13]\n"
- "ld1b { z13.b }, p2/Z, [x11, x13]\n"
- "dup z12.s, w20\n"
- "mov x20, #0x0\n"
- "ldp x12, x11, [%x[inptrs], #0x40]\n"
- "ld1b { z11.b }, p2/Z, [x10, x13]\n"
- "zip2 z10.b, z14.b, z11.b\n"
- "zip1 z14.b, z14.b, z11.b\n"
- "ld1b { z9.b }, p2/Z, [x9, x13]\n"
- "ldp x10, x9, [%x[inptrs], #0x50]\n"
- "zip1 z11.b, z13.b, z9.b\n"
- "zip2 z9.b, z13.b, z9.b\n"
- "ld1b { z8.b }, p2/Z, [x28, x13]\n"
- "ld1b { z7.b }, p2/Z, [x27, x13]\n"
- "zip2 z13.b, z14.b, z11.b\n"
- "zip1 z14.b, z14.b, z11.b\n"
- "ldp x28, x27, [%x[inptrs], #0x60]\n"
- "ld1b { z6.b }, p2/Z, [x26, x13]\n"
- "zip1 z11.b, z10.b, z9.b\n"
- "zip2 z9.b, z10.b, z9.b\n"
- "ld1b { z5.b }, p2/Z, [x25, x13]\n"
- "ldp x26, x25, [%x[inptrs], #0x70]\n"
- "zip2 z4.b, z8.b, z6.b\n"
- "zip1 z8.b, z8.b, z6.b\n"
- "ld1b { z3.b }, p2/Z, [x12, x13]\n"
- "ld1b { z2.b }, p2/Z, [x11, x13]\n"
- "zip1 z6.b, z7.b, z5.b\n"
- "zip2 z5.b, z7.b, z5.b\n"
- "ld1b { z1.b }, p2/Z, [x10, x13]\n"
- "ld1b { z0.b }, p2/Z, [x9, x13]\n"
- "zip2 z31.b, z3.b, z1.b\n"
- "zip1 z3.b, z3.b, z1.b\n"
- "ld1b { z30.b }, p2/Z, [x28, x13]\n"
- "ld1b { z29.b }, p2/Z, [x27, x13]\n"
- "zip1 z1.b, z2.b, z0.b\n"
- "zip2 z0.b, z2.b, z0.b\n"
- "ld1b { z28.b }, p2/Z, [x26, x13]\n"
- "ld1b { z27.b }, p2/Z, [x25, x13]\n"
- "zip2 z26.b, z30.b, z28.b\n"
- "zip1 z30.b, z30.b, z28.b\n"
- "zip1 z28.b, z29.b, z27.b\n"
- "zip2 z27.b, z29.b, z27.b\n"
- "ld1w { z10.s }, p1/Z, [%x[params]]\n"
- "ld1rw { z25.s }, p1/Z, [%x[qp], %[offsetof_Requantize32_minval]]\n"
- "ld1rw { z24.s }, p1/Z, [%x[qp], %[offsetof_Requantize32_maxval]]\n"
- "ld1rw { z23.s }, p1/Z, [%x[qp], %[offsetof_Requantize32_b_offset]]\n"
- "zip2 z7.b, z8.b, z6.b\n"
- "zip1 z8.b, z8.b, z6.b\n"
- "ld1rw { z22.s }, p1/Z, [%x[qp], %[offsetof_Requantize32_c_offset]]\n"
- "ldp x12, x11, [%x[inptrs], #0x0]\n"
- "zip1 z6.b, z4.b, z5.b\n"
- "zip2 z5.b, z4.b, z5.b\n"
- "ldp x10, x9, [%x[inptrs], #0x10]\n"
- "ldp x28, x27, [%x[inptrs], #0x20]\n"
- "zip2 z2.b, z3.b, z1.b\n"
- "zip1 z3.b, z3.b, z1.b\n"
- "ldp x26, x25, [%x[inptrs], #0x30]\n"
- "zip1 z1.b, z31.b, z0.b\n"
- "zip2 z0.b, z31.b, z0.b\n"
- "ld1b { z21.b }, p1/Z, [%x[params], #1, MUL VL]\n"
- "zip2 z29.b, z30.b, z28.b\n"
- "zip1 z30.b, z30.b, z28.b\n"
- "ld1b { z16.b }, p1/Z, [%x[params], #2, MUL VL]\n"
- "ld1b { z20.b }, p1/Z, [%x[params], #3, MUL VL]\n"
- "zip1 z28.b, z26.b, z27.b\n"
- "zip2 z27.b, z26.b, z27.b\n"
- "addvl %x[params], %x[params], #4\n"
- "mov z4.d, z10.d\n"
- "mov z31.d, z10.d\n"
- "mov z26.d, z10.d\n"
+ "ldp x11, x10, [%x[inptrs], #0x0]\n"
+ "ptrue p2.b\n"
+ "ldp x9, x28, [%x[inptrs], #0x10]\n"
+ "addvl SP, SP, #-8\n"
+ "ldp x27, x26, [%x[inptrs], #0x20]\n"
+ "mov x19, #0x1\n"
+ "ldp x25, x24, [%x[inptrs], #0x30]\n"
+ "orr x19, x19, #0x100\n"
+ "ldp x23, x22, [%x[outptrs], #0x0]\n"
+ "orr x19, x19, #0x10000\n"
+ "dup z12.s, w19\n"
+ "ldp x21, x20, [%x[outptrs], #0x10]\n"
+ "mov x19, #0x0\n"
+ "ld1rw { z11.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_minval]]\n"
+ "whilelt p1.b, x19, %x[n_channels]\n"
+ "ld1rw { z10.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_maxval]]\n"
+ "ld1rw { z9.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_b_offset]]\n"
+ "ld1rw { z8.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_c_offset]]\n"
"1:" // Loop
- "mov z19.s, #0x0\n"
- "sdot z19.s, z12.b, z8.b\n"
- "sdot z10.s, z21.b, z14.b\n"
- "whilelt p0.s, x20, %x[n_channels]\n"
- "sdot z19.s, z12.b, z3.b\n"
- "sdot z31.s, z21.b, z8.b\n"
- "incw x13, ALL, MUL #4\n"
- "sdot z10.s, z16.b, z8.b\n"
- "ext z8.b, z8.b, z8.b, #0x1\n"
- "movprfx z18, z19\n sdot z18.s, z12.b, z30.b\n"
- "sdot z19.s, z12.b, z14.b\n"
- "ext z14.b, z14.b, z14.b, #0x1\n"
- "sdot z31.s, z16.b, z3.b\n"
- "sdot z10.s, z20.b, z3.b\n"
+ "mov z7.s, #0x0\n"
+ "ld1b { z19.b }, p1/Z, [x11, x19]\n"
+ "whilelt p0.s, x19, %x[n_channels]\n"
+ "mov z6.s, #0x0\n"
+ "ld1b { z18.b }, p1/Z, [x10, x19]\n"
+ "ldp x11, x10, [%x[inptrs], #0x40]\n"
+ "ld1b { z16.b }, p1/Z, [x9, x19]\n"
+ "zip1 z21.b, z19.b, z16.b\n"
+ "ld1b { z17.b }, p1/Z, [x28, x19]\n"
+ "zip2 z19.b, z19.b, z16.b\n"
+ "ldp x9, x28, [%x[inptrs], #0x50]\n"
+ "ld1b { z23.b }, p1/Z, [x27, x19]\n"
+ "zip1 z16.b, z18.b, z17.b\n"
+ "ld1b { z20.b }, p1/Z, [x26, x19]\n"
+ "zip2 z18.b, z18.b, z17.b\n"
+ "ldp x27, x26, [%x[inptrs], #0x60]\n"
+ "zip1 z5.b, z21.b, z16.b\n"
+ "ld1b { z17.b }, p1/Z, [x25, x19]\n"
+ "zip2 z4.b, z21.b, z16.b\n"
+ "ld1b { z16.b }, p1/Z, [x24, x19]\n"
+ "zip1 z29.b, z19.b, z18.b\n"
+ "ldp x25, x24, [%x[inptrs], #0x70]\n"
+ "zip2 z28.b, z19.b, z18.b\n"
+ "ld1b { z22.b }, p1/Z, [x11, x19]\n"
+ "zip1 z19.b, z23.b, z17.b\n"
+ "ld1b { z21.b }, p1/Z, [x10, x19]\n"
+ "zip2 z27.b, z23.b, z17.b\n"
+ "ldp x11, x10, [%x[inptrs], #0x0]\n"
+ "zip1 z18.b, z20.b, z16.b\n"
+ "ld1b { z17.b }, p1/Z, [x9, x19]\n"
+ "zip2 z20.b, z20.b, z16.b\n"
+ "ld1b { z16.b }, p1/Z, [x28, x19]\n"
+ "zip1 z3.b, z19.b, z18.b\n"
+ "ldp x9, x28, [%x[inptrs], #0x10]\n"
+ "zip2 z2.b, z19.b, z18.b\n"
+ "ld1b { z19.b }, p1/Z, [x27, x19]\n"
+ "zip1 z26.b, z22.b, z17.b\n"
+ "ld1b { z25.b }, p1/Z, [x26, x19]\n"
+ "zip2 z24.b, z22.b, z17.b\n"
+ "ldp x27, x26, [%x[inptrs], #0x20]\n"
+ "zip1 z23.b, z21.b, z16.b\n"
+ "ld1b { z18.b }, p1/Z, [x25, x19]\n"
+ "zip2 z22.b, z21.b, z16.b\n"
+ "ld1b { z21.b }, p1/Z, [x24, x19]\n"
+ "zip1 z17.b, z27.b, z20.b\n"
+ "ldp x25, x24, [%x[inptrs], #0x30]\n"
+ "zip2 z16.b, z27.b, z20.b\n"
+ "st1b { z29.b }, p2, [SP]\n"
+ "zip1 z20.b, z19.b, z18.b\n"
+ "st1b { z28.b }, p2, [SP, #1, MUL VL]\n"
+ "zip2 z19.b, z19.b, z18.b\n"
+ "st1b { z17.b }, p2, [SP, #2, MUL VL]\n"
+ "zip1 z18.b, z25.b, z21.b\n"
+ "st1b { z16.b }, p2, [SP, #3, MUL VL]\n"
+ "zip2 z17.b, z25.b, z21.b\n"
+ "ld1w { z1.s }, p2/Z, [%x[params]]\n"
+ "zip1 z0.b, z26.b, z23.b\n"
+ "ld1b { z31.b }, p2/Z, [%x[params], #1, MUL VL]\n"
+ "zip2 z30.b, z26.b, z23.b\n"
+ "ld1b { z29.b }, p2/Z, [%x[params], #2, MUL VL]\n"
+ "zip1 z16.b, z24.b, z22.b\n"
+ "st1b { z16.b }, p2, [SP, #4, MUL VL]\n"
+ "zip2 z16.b, z24.b, z22.b\n"
+ "st1b { z16.b }, p2, [SP, #5, MUL VL]\n"
+ "zip1 z28.b, z20.b, z18.b\n"
+ "ld1b { z27.b }, p2/Z, [%x[params], #3, MUL VL]\n"
+ "zip2 z26.b, z20.b, z18.b\n"
+ "ld1w { z25.s }, p2/Z, [%x[params], #4, MUL VL]\n"
+ "zip1 z16.b, z19.b, z17.b\n"
+ "st1b { z16.b }, p2, [SP, #6, MUL VL]\n"
+ "zip2 z16.b, z19.b, z17.b\n"
+ "st1b { z16.b }, p2, [SP, #7, MUL VL]\n"
+ "mov z24.d, z1.d\n"
+ "ld1w { z23.s }, p2/Z, [%x[params], #5, MUL VL]\n"
+ "mov z22.d, z1.d\n"
+ "mov z21.d, z1.d\n"
+ "sdot z1.s, z31.b, z5.b\n"
+ "sdot z22.s, z31.b, z3.b\n"
+ "sdot z7.s, z12.b, z3.b\n"
+ "sdot z1.s, z29.b, z3.b\n"
"ext z3.b, z3.b, z3.b, #0x1\n"
- "sdot z4.s, z21.b, z14.b\n"
- "sdot z26.s, z21.b, z8.b\n"
- "mov z17.s, #0x0\n"
- "sdot z17.s, z12.b, z8.b\n"
- "sdot z17.s, z12.b, z3.b\n"
- "sdot z31.s, z20.b, z30.b\n"
- "ext z30.b, z30.b, z30.b, #0x1\n"
- "sdot z4.s, z16.b, z8.b\n"
- "sdot z26.s, z16.b, z3.b\n"
- "ld1w { z8.s }, p1/Z, [%x[params], #1, MUL VL]\n"
- "mls z10.s, p1/M, z19.s, z23.s\n"
- "movprfx z16, z17\n sdot z16.s, z12.b, z30.b\n"
- "mov z19.s, #0x0\n"
- "sdot z17.s, z12.b, z14.b\n"
- "ld1w { z14.s }, p1/Z, [%x[params]]\n"
- "sdot z4.s, z20.b, z3.b\n"
- ".inst 0x04ae754a // sqrdmulh z10.s, z10.s, z14.s\n"
- "sdot z26.s, z20.b, z30.b\n"
- "mls z4.s, p1/M, z17.s, z23.s\n"
- "and z21.d, z10.d, z8.d\n"
- "mls z31.s, p1/M, z18.s, z23.s\n"
- "mls z26.s, p1/M, z16.s, z23.s\n"
- "asr z21.s, z21.s, #0x1f\n"
- ".inst 0x04ae7484 // sqrdmulh z4.s, z4.s, z14.s\n"
- ".inst 0x04ae77ff // sqrdmulh z31.s, z31.s, z14.s\n"
- "sdot z19.s, z12.b, z7.b\n"
- ".inst 0x04ae775a // sqrdmulh z26.s, z26.s, z14.s\n"
- "sqadd z10.s, z10.s, z21.s\n"
- ".inst 0x4482850a // srshl z10.s, p1/M, z10.s, z8.s\n"
- "sdot z19.s, z12.b, z2.b\n"
- "and z16.d, z4.d, z8.d\n"
- "and z20.d, z31.d, z8.d\n"
- "movprfx z18, z19\n sdot z18.s, z12.b, z29.b\n"
- "ld1w { z14.s }, p1/Z, [%x[params], #6, MUL VL]\n"
- "and z21.d, z26.d, z8.d\n"
+ "sdot z22.s, z29.b, z0.b\n"
+ "sdot z7.s, z12.b, z0.b\n"
+ "sdot z1.s, z27.b, z0.b\n"
+ "ext z0.b, z0.b, z0.b, #0x1\n"
+ "sdot z22.s, z27.b, z28.b\n"
+ "mov z20.d, z7.d\n"
+ "sdot z7.s, z12.b, z5.b\n"
+ "sdot z20.s, z12.b, z28.b\n"
+ "ext z5.b, z5.b, z5.b, #0x1\n"
+ "ext z28.b, z28.b, z28.b, #0x1\n"
+ "sdot z21.s, z31.b, z3.b\n"
+ "sdot z6.s, z12.b, z3.b\n"
+ "sdot z24.s, z31.b, z5.b\n"
+ "ld1b { z31.b }, p2/Z, [%x[params], #7, MUL VL]\n"
+ "mls z1.s, p2/M, z7.s, z9.s\n"
+ "sdot z21.s, z29.b, z0.b\n"
+ "sdot z6.s, z12.b, z0.b\n"
+ "sdot z24.s, z29.b, z3.b\n"
+ "ld1b { z3.b }, p2/Z, [SP, #2, MUL VL]\n"
+ ".inst 0x04b97421 // sqrdmulh z1.s, z1.s, z25.s\n"
+ "sdot z21.s, z27.b, z28.b\n"
+ "mov z19.d, z6.d\n"
+ "sdot z24.s, z27.b, z0.b\n"
+ "ld1b { z0.b }, p2/Z, [SP, #4, MUL VL]\n"
+ "sdot z6.s, z12.b, z5.b\n"
+ "ld1b { z5.b }, p2/Z, [SP]\n"
+ "sdot z19.s, z12.b, z28.b\n"
+ "ld1b { z28.b }, p2/Z, [SP, #6, MUL VL]\n"
+ "and z16.d, z1.d, z23.d\n"
"asr z16.s, z16.s, #0x1f\n"
- "sdot z19.s, z12.b, z13.b\n"
- "asr z20.s, z20.s, #0x1f\n"
- "asr z21.s, z21.s, #0x1f\n"
- "sqadd z4.s, z4.s, z16.s\n"
- "sqadd z31.s, z31.s, z20.s\n"
- ".inst 0x44828504 // srshl z4.s, p1/M, z4.s, z8.s\n"
- ".inst 0x4482851f // srshl z31.s, p1/M, z31.s, z8.s\n"
- "sqadd z26.s, z26.s, z21.s\n"
- "add z10.s, z10.s, z22.s\n"
- ".inst 0x4482851a // srshl z26.s, p1/M, z26.s, z8.s\n"
- "smax z10.s, p1/M, z10.s, z25.s\n"
- "add z4.s, z4.s, z22.s\n"
- "add z31.s, z31.s, z22.s\n"
- "smin z10.s, p1/M, z10.s, z24.s\n"
- "smax z4.s, p1/M, z4.s, z25.s\n"
- "add z26.s, z26.s, z22.s\n"
- "smax z31.s, p1/M, z31.s, z25.s\n"
- "smax z26.s, p1/M, z26.s, z25.s\n"
- "st1b { z10.s }, p0, [x24, x20]\n"
- "ld1w { z10.s }, p1/Z, [%x[params], #2, MUL VL]\n"
- "ld1b { z21.b }, p1/Z, [%x[params], #3, MUL VL]\n"
- "smin z4.s, p1/M, z4.s, z24.s\n"
- "smin z31.s, p1/M, z31.s, z24.s\n"
- "smin z26.s, p1/M, z26.s, z24.s\n"
- "st1b { z4.s }, p0, [x23, x20]\n"
- "mov z4.d, z10.d\n"
- "ld1b { z16.b }, p1/Z, [%x[params], #4, MUL VL]\n"
- "st1b { z31.s }, p0, [x22, x20]\n"
- "mov z31.d, z10.d\n"
- "sdot z31.s, z21.b, z7.b\n"
- "ld1b { z20.b }, p1/Z, [%x[params], #5, MUL VL]\n"
- "st1b { z26.s }, p0, [x21, x20]\n"
- "mov z26.d, z10.d\n"
- "sdot z10.s, z21.b, z13.b\n"
- "sdot z10.s, z16.b, z7.b\n"
- "ext z13.b, z13.b, z13.b, #0x1\n"
- "ext z7.b, z7.b, z7.b, #0x1\n"
- "sdot z4.s, z21.b, z13.b\n"
- "ld1w { z8.s }, p1/Z, [%x[params], #7, MUL VL]\n"
- "mov z17.s, #0x0\n"
- "sdot z26.s, z21.b, z7.b\n"
- "sdot z17.s, z12.b, z7.b\n"
- "incw x20\n"
- "sdot z31.s, z16.b, z2.b\n"
- "sdot z10.s, z20.b, z2.b\n"
- "ext z2.b, z2.b, z2.b, #0x1\n"
- "whilelt p0.s, x20, %x[n_channels]\n"
- "sdot z4.s, z16.b, z7.b\n"
- "sdot z26.s, z16.b, z2.b\n"
+ "mov z7.s, #0x0\n"
+ "mls z24.s, p2/M, z6.s, z9.s\n"
+ "sdot z7.s, z12.b, z2.b\n"
+ "mov z6.s, #0x0\n"
+ "mls z22.s, p2/M, z20.s, z9.s\n"
+ ".inst 0x04b97718 // sqrdmulh z24.s, z24.s, z25.s\n"
+ "sqadd z1.s, z1.s, z16.s\n"
+ "sdot z7.s, z12.b, z30.b\n"
+ ".inst 0x04b976d6 // sqrdmulh z22.s, z22.s, z25.s\n"
+ "and z18.d, z24.d, z23.d\n"
+ "asr z18.s, z18.s, #0x1f\n"
+ "and z17.d, z22.d, z23.d\n"
+ "mov z20.d, z7.d\n"
+ "asr z17.s, z17.s, #0x1f\n"
+ "sdot z7.s, z12.b, z4.b\n"
+ "sdot z20.s, z12.b, z26.b\n"
+ "mls z21.s, p2/M, z19.s, z9.s\n"
+ "sqadd z24.s, z24.s, z18.s\n"
+ ".inst 0x44828ae1 // srshl z1.s, p2/M, z1.s, z23.s\n"
+ "sqadd z22.s, z22.s, z17.s\n"
+ ".inst 0x04b976b5 // sqrdmulh z21.s, z21.s, z25.s\n"
+ ".inst 0x44828af8 // srshl z24.s, p2/M, z24.s, z23.s\n"
+ "add z1.s, z1.s, z8.s\n"
+ "and z16.d, z21.d, z23.d\n"
+ "asr z16.s, z16.s, #0x1f\n"
+ "add z24.s, z24.s, z8.s\n"
+ "smax z1.s, p2/M, z1.s, z11.s\n"
+ ".inst 0x44828af6 // srshl z22.s, p2/M, z22.s, z23.s\n"
+ "smax z24.s, p2/M, z24.s, z11.s\n"
+ "smin z1.s, p2/M, z1.s, z10.s\n"
+ "st1b { z1.s }, p0, [x23, x19]\n"
+ "add z22.s, z22.s, z8.s\n"
+ "sqadd z21.s, z21.s, z16.s\n"
+ "ld1w { z1.s }, p2/Z, [%x[params], #6, MUL VL]\n"
"addvl %x[params], %x[params], #16\n"
- "sdot z17.s, z12.b, z2.b\n"
- "sdot z31.s, z20.b, z29.b\n"
- "ext z29.b, z29.b, z29.b, #0x1\n"
- "mls z10.s, p1/M, z19.s, z23.s\n"
- "sdot z4.s, z20.b, z2.b\n"
- ".inst 0x04ae754a // sqrdmulh z10.s, z10.s, z14.s\n"
- "sdot z26.s, z20.b, z29.b\n"
- "movprfx z16, z17\n sdot z16.s, z12.b, z29.b\n"
- "and z21.d, z10.d, z8.d\n"
- "sdot z17.s, z12.b, z13.b\n"
- "mls z4.s, p1/M, z17.s, z23.s\n"
- "asr z21.s, z21.s, #0x1f\n"
- "mls z31.s, p1/M, z18.s, z23.s\n"
- "mls z26.s, p1/M, z16.s, z23.s\n"
- ".inst 0x04ae7484 // sqrdmulh z4.s, z4.s, z14.s\n"
- ".inst 0x04ae77ff // sqrdmulh z31.s, z31.s, z14.s\n"
- ".inst 0x04ae775a // sqrdmulh z26.s, z26.s, z14.s\n"
- "ld1w { z14.s }, p1/Z, [%x[params], #-4, MUL VL]\n"
- "sqadd z10.s, z10.s, z21.s\n"
- "and z16.d, z4.d, z8.d\n"
- ".inst 0x4482850a // srshl z10.s, p1/M, z10.s, z8.s\n"
- "and z20.d, z31.d, z8.d\n"
- "and z21.d, z26.d, z8.d\n"
+ "smin z24.s, p2/M, z24.s, z10.s\n"
+ "ld1b { z29.b }, p2/Z, [%x[params], #-8, MUL VL]\n"
+ "ld1b { z27.b }, p2/Z, [%x[params], #-7, MUL VL]\n"
+ "smax z22.s, p2/M, z22.s, z11.s\n"
+ "ld1w { z25.s }, p2/Z, [%x[params], #-6, MUL VL]\n"
+ ".inst 0x44828af5 // srshl z21.s, p2/M, z21.s, z23.s\n"
+ "ld1w { z23.s }, p2/Z, [%x[params], #-5, MUL VL]\n"
+ "smin z22.s, p2/M, z22.s, z10.s\n"
+ "st1b { z24.s }, p0, [x22, x19]\n"
+ "mov z24.d, z1.d\n"
+ "st1b { z22.s }, p0, [x21, x19]\n"
+ "add z21.s, z21.s, z8.s\n"
+ "mov z22.d, z1.d\n"
+ "sdot z22.s, z31.b, z2.b\n"
+ "smax z21.s, p2/M, z21.s, z11.s\n"
+ "sdot z22.s, z29.b, z30.b\n"
+ "smin z21.s, p2/M, z21.s, z10.s\n"
+ "st1b { z21.s }, p0, [x20, x19]\n"
+ "mov z21.d, z1.d\n"
+ "incw x19\n"
+ "sdot z1.s, z31.b, z4.b\n"
+ "whilelt p0.s, x19, %x[n_channels]\n"
+ "sdot z22.s, z27.b, z26.b\n"
+ "ext z4.b, z4.b, z4.b, #0x1\n"
+ "ext z26.b, z26.b, z26.b, #0x1\n"
+ "sdot z1.s, z29.b, z2.b\n"
+ "ext z2.b, z2.b, z2.b, #0x1\n"
+ "sdot z24.s, z31.b, z4.b\n"
+ "mls z22.s, p2/M, z20.s, z9.s\n"
+ "sdot z1.s, z27.b, z30.b\n"
+ "ext z30.b, z30.b, z30.b, #0x1\n"
+ "sdot z21.s, z31.b, z2.b\n"
+ "ld1b { z31.b }, p2/Z, [%x[params], #-3, MUL VL]\n"
+ "sdot z24.s, z29.b, z2.b\n"
+ "sdot z6.s, z12.b, z2.b\n"
+ "ld1b { z2.b }, p2/Z, [SP, #3, MUL VL]\n"
+ ".inst 0x04b976d6 // sqrdmulh z22.s, z22.s, z25.s\n"
+ "sdot z21.s, z29.b, z30.b\n"
+ "ld1b { z29.b }, p2/Z, [%x[params], #-2, MUL VL]\n"
+ "sdot z24.s, z27.b, z30.b\n"
+ "sdot z6.s, z12.b, z30.b\n"
+ "ld1b { z30.b }, p2/Z, [SP, #5, MUL VL]\n"
+ "and z17.d, z22.d, z23.d\n"
+ "asr z17.s, z17.s, #0x1f\n"
+ "sdot z21.s, z27.b, z26.b\n"
+ "ld1b { z27.b }, p2/Z, [%x[params], #-1, MUL VL]\n"
+ "mov z19.d, z6.d\n"
+ "sdot z6.s, z12.b, z4.b\n"
+ "ld1b { z4.b }, p2/Z, [SP, #1, MUL VL]\n"
+ "sdot z19.s, z12.b, z26.b\n"
+ "ld1b { z26.b }, p2/Z, [SP, #7, MUL VL]\n"
+ "mls z1.s, p2/M, z7.s, z9.s\n"
+ "mov z7.s, #0x0\n"
+ "sqadd z22.s, z22.s, z17.s\n"
+ "sdot z7.s, z12.b, z3.b\n"
+ ".inst 0x04b97421 // sqrdmulh z1.s, z1.s, z25.s\n"
+ "mls z24.s, p2/M, z6.s, z9.s\n"
+ "mov z6.s, #0x0\n"
+ "sdot z7.s, z12.b, z0.b\n"
+ "and z16.d, z1.d, z23.d\n"
"asr z16.s, z16.s, #0x1f\n"
- "asr z20.s, z20.s, #0x1f\n"
- "asr z21.s, z21.s, #0x1f\n"
- "sqadd z4.s, z4.s, z16.s\n"
- ".inst 0x44828504 // srshl z4.s, p1/M, z4.s, z8.s\n"
- "ld1b { z16.b }, p1/Z, [%x[params], #-6, MUL VL]\n"
- "sqadd z31.s, z31.s, z20.s\n"
- "sqadd z26.s, z26.s, z21.s\n"
- ".inst 0x4482851f // srshl z31.s, p1/M, z31.s, z8.s\n"
- ".inst 0x4482851a // srshl z26.s, p1/M, z26.s, z8.s\n"
- "add z10.s, z10.s, z22.s\n"
- "smax z10.s, p1/M, z10.s, z25.s\n"
- "add z4.s, z4.s, z22.s\n"
- "smin z10.s, p1/M, z10.s, z24.s\n"
- "add z31.s, z31.s, z22.s\n"
- "add z26.s, z26.s, z22.s\n"
- "smax z4.s, p1/M, z4.s, z25.s\n"
- "smax z31.s, p1/M, z31.s, z25.s\n"
- "mov z19.s, #0x0\n"
- "sdot z19.s, z12.b, z6.b\n"
- "smax z26.s, p1/M, z26.s, z25.s\n"
- "st1b { z10.s }, p0, [x24, x20]\n"
- "ld1w { z10.s }, p1/Z, [%x[params], #-8, MUL VL]\n"
- "ld1b { z21.b }, p1/Z, [%x[params], #-7, MUL VL]\n"
- "smin z4.s, p1/M, z4.s, z24.s\n"
- "smin z31.s, p1/M, z31.s, z24.s\n"
- "smin z26.s, p1/M, z26.s, z24.s\n"
- "st1b { z4.s }, p0, [x23, x20]\n"
- "mov z4.d, z10.d\n"
- "sdot z19.s, z12.b, z1.b\n"
- "st1b { z31.s }, p0, [x22, x20]\n"
- "mov z31.d, z10.d\n"
- "sdot z31.s, z21.b, z6.b\n"
- "movprfx z18, z19\n sdot z18.s, z12.b, z28.b\n"
- "st1b { z26.s }, p0, [x21, x20]\n"
- "mov z26.d, z10.d\n"
- "sdot z10.s, z21.b, z11.b\n"
- "sdot z10.s, z16.b, z6.b\n"
- "sdot z19.s, z12.b, z11.b\n"
- "ext z11.b, z11.b, z11.b, #0x1\n"
- "ld1b { z20.b }, p1/Z, [%x[params], #-5, MUL VL]\n"
- "sdot z4.s, z21.b, z11.b\n"
- "ext z6.b, z6.b, z6.b, #0x1\n"
- "mov z17.s, #0x0\n"
- "sdot z26.s, z21.b, z6.b\n"
- "ld1w { z8.s }, p1/Z, [%x[params], #-3, MUL VL]\n"
- "sdot z17.s, z12.b, z6.b\n"
- "sdot z31.s, z16.b, z1.b\n"
- "incw x20\n"
- "whilelt p0.s, x20, %x[n_channels]\n"
- "sdot z10.s, z20.b, z1.b\n"
- "ext z1.b, z1.b, z1.b, #0x1\n"
- "sdot z4.s, z16.b, z6.b\n"
- "sdot z26.s, z16.b, z1.b\n"
- "sdot z17.s, z12.b, z1.b\n"
- "sdot z31.s, z20.b, z28.b\n"
- "ext z28.b, z28.b, z28.b, #0x1\n"
- "mls z10.s, p1/M, z19.s, z23.s\n"
- "sdot z4.s, z20.b, z1.b\n"
- "sdot z26.s, z20.b, z28.b\n"
- ".inst 0x04ae754a // sqrdmulh z10.s, z10.s, z14.s\n"
- "movprfx z16, z17\n sdot z16.s, z12.b, z28.b\n"
- "sdot z17.s, z12.b, z11.b\n"
- "and z21.d, z10.d, z8.d\n"
- "mls z4.s, p1/M, z17.s, z23.s\n"
- "mls z31.s, p1/M, z18.s, z23.s\n"
- "asr z21.s, z21.s, #0x1f\n"
- "mls z26.s, p1/M, z16.s, z23.s\n"
- ".inst 0x04ae7484 // sqrdmulh z4.s, z4.s, z14.s\n"
- ".inst 0x04ae77ff // sqrdmulh z31.s, z31.s, z14.s\n"
- ".inst 0x04ae775a // sqrdmulh z26.s, z26.s, z14.s\n"
- "ld1w { z14.s }, p1/Z, [%x[params], #2, MUL VL]\n"
- "sqadd z10.s, z10.s, z21.s\n"
- "and z16.d, z4.d, z8.d\n"
- ".inst 0x4482850a // srshl z10.s, p1/M, z10.s, z8.s\n"
- "and z20.d, z31.d, z8.d\n"
- "and z21.d, z26.d, z8.d\n"
+ ".inst 0x04b97718 // sqrdmulh z24.s, z24.s, z25.s\n"
+ "mov z20.d, z7.d\n"
+ "sdot z7.s, z12.b, z5.b\n"
+ "sdot z20.s, z12.b, z28.b\n"
+ "mls z21.s, p2/M, z19.s, z9.s\n"
+ "and z18.d, z24.d, z23.d\n"
+ "asr z18.s, z18.s, #0x1f\n"
+ "sqadd z1.s, z1.s, z16.s\n"
+ ".inst 0x04b976b5 // sqrdmulh z21.s, z21.s, z25.s\n"
+ "ld1w { z25.s }, p2/Z, [%x[params]]\n"
+ ".inst 0x44828af6 // srshl z22.s, p2/M, z22.s, z23.s\n"
+ "and z16.d, z21.d, z23.d\n"
"asr z16.s, z16.s, #0x1f\n"
- "asr z20.s, z20.s, #0x1f\n"
- "asr z21.s, z21.s, #0x1f\n"
- "sqadd z4.s, z4.s, z16.s\n"
- ".inst 0x44828504 // srshl z4.s, p1/M, z4.s, z8.s\n"
- "ld1b { z16.b }, p1/Z, [%x[params]]\n"
- "sqadd z31.s, z31.s, z20.s\n"
- "sqadd z26.s, z26.s, z21.s\n"
- ".inst 0x4482851f // srshl z31.s, p1/M, z31.s, z8.s\n"
- ".inst 0x4482851a // srshl z26.s, p1/M, z26.s, z8.s\n"
- "add z10.s, z10.s, z22.s\n"
- "smax z10.s, p1/M, z10.s, z25.s\n"
- "add z4.s, z4.s, z22.s\n"
- "smin z10.s, p1/M, z10.s, z24.s\n"
- "add z31.s, z31.s, z22.s\n"
- "add z26.s, z26.s, z22.s\n"
- "smax z4.s, p1/M, z4.s, z25.s\n"
- "smax z31.s, p1/M, z31.s, z25.s\n"
- "mov z19.s, #0x0\n"
- "sdot z19.s, z12.b, z5.b\n"
- "smax z26.s, p1/M, z26.s, z25.s\n"
- "st1b { z10.s }, p0, [x24, x20]\n"
- "ld1w { z10.s }, p1/Z, [%x[params], #-2, MUL VL]\n"
- "ld1b { z21.b }, p1/Z, [%x[params], #-1, MUL VL]\n"
- "smin z4.s, p1/M, z4.s, z24.s\n"
- "smin z31.s, p1/M, z31.s, z24.s\n"
- "smin z26.s, p1/M, z26.s, z24.s\n"
- "st1b { z4.s }, p0, [x23, x20]\n"
- "mov z4.d, z10.d\n"
- "sdot z19.s, z12.b, z0.b\n"
- "st1b { z31.s }, p0, [x22, x20]\n"
- "mov z31.d, z10.d\n"
- "sdot z31.s, z21.b, z5.b\n"
- "movprfx z18, z19\n sdot z18.s, z12.b, z27.b\n"
- "st1b { z26.s }, p0, [x21, x20]\n"
- "mov z26.d, z10.d\n"
- "sdot z10.s, z21.b, z9.b\n"
- "sdot z10.s, z16.b, z5.b\n"
- "sdot z19.s, z12.b, z9.b\n"
- "ext z9.b, z9.b, z9.b, #0x1\n"
- "ld1b { z20.b }, p1/Z, [%x[params], #1, MUL VL]\n"
- "sdot z4.s, z21.b, z9.b\n"
+ "sqadd z24.s, z24.s, z18.s\n"
+ "add z22.s, z22.s, z8.s\n"
+ ".inst 0x44828ae1 // srshl z1.s, p2/M, z1.s, z23.s\n"
+ "smax z22.s, p2/M, z22.s, z11.s\n"
+ ".inst 0x44828af8 // srshl z24.s, p2/M, z24.s, z23.s\n"
+ "add z1.s, z1.s, z8.s\n"
+ "sqadd z21.s, z21.s, z16.s\n"
+ "smin z22.s, p2/M, z22.s, z10.s\n"
+ "st1b { z22.s }, p0, [x21, x19]\n"
+ "add z24.s, z24.s, z8.s\n"
+ "smax z1.s, p2/M, z1.s, z11.s\n"
+ ".inst 0x44828af5 // srshl z21.s, p2/M, z21.s, z23.s\n"
+ "ld1w { z23.s }, p2/Z, [%x[params], #1, MUL VL]\n"
+ "smax z24.s, p2/M, z24.s, z11.s\n"
+ "smin z1.s, p2/M, z1.s, z10.s\n"
+ "st1b { z1.s }, p0, [x23, x19]\n"
+ "add z21.s, z21.s, z8.s\n"
+ "smin z24.s, p2/M, z24.s, z10.s\n"
+ "ld1w { z1.s }, p2/Z, [%x[params], #-4, MUL VL]\n"
+ "smax z21.s, p2/M, z21.s, z11.s\n"
+ "st1b { z24.s }, p0, [x22, x19]\n"
+ "mov z24.d, z1.d\n"
+ "mov z22.d, z1.d\n"
+ "sdot z22.s, z31.b, z3.b\n"
+ "smin z21.s, p2/M, z21.s, z10.s\n"
+ "st1b { z21.s }, p0, [x20, x19]\n"
+ "mov z21.d, z1.d\n"
+ "incw x19\n"
+ "sdot z1.s, z31.b, z5.b\n"
+ "whilelt p0.s, x19, %x[n_channels]\n"
+ "sdot z22.s, z29.b, z0.b\n"
"ext z5.b, z5.b, z5.b, #0x1\n"
- "mov z17.s, #0x0\n"
- "sdot z26.s, z21.b, z5.b\n"
- "ld1w { z8.s }, p1/Z, [%x[params], #3, MUL VL]\n"
- "sdot z17.s, z12.b, z5.b\n"
- "sdot z31.s, z16.b, z0.b\n"
- "incw x20\n"
- "whilelt p0.s, x20, %x[n_channels]\n"
- "sdot z10.s, z20.b, z0.b\n"
+ "sdot z1.s, z29.b, z3.b\n"
+ "sdot z22.s, z27.b, z28.b\n"
+ "ext z3.b, z3.b, z3.b, #0x1\n"
+ "ext z28.b, z28.b, z28.b, #0x1\n"
+ "sdot z24.s, z31.b, z5.b\n"
+ "sdot z1.s, z27.b, z0.b\n"
"ext z0.b, z0.b, z0.b, #0x1\n"
- "sdot z4.s, z16.b, z5.b\n"
- "whilelt p2.b, x13, %x[n_channels]\n"
- "sdot z26.s, z16.b, z0.b\n"
- "sdot z17.s, z12.b, z0.b\n"
- "ld1b { z13.b }, p2/Z, [x11, x13]\n"
- "ld1b { z11.b }, p2/Z, [x10, x13]\n"
- "sdot z31.s, z20.b, z27.b\n"
- "ext z27.b, z27.b, z27.b, #0x1\n"
- "mls z10.s, p1/M, z19.s, z23.s\n"
- "ld1b { z7.b }, p2/Z, [x27, x13]\n"
- "sdot z4.s, z20.b, z0.b\n"
- "sdot z26.s, z20.b, z27.b\n"
- ".inst 0x04ae754a // sqrdmulh z10.s, z10.s, z14.s\n"
- "ld1b { z6.b }, p2/Z, [x26, x13]\n"
- "movprfx z16, z17\n sdot z16.s, z12.b, z27.b\n"
- "sdot z17.s, z12.b, z9.b\n"
- "and z21.d, z10.d, z8.d\n"
- "ld1b { z9.b }, p2/Z, [x9, x13]\n"
- "mls z4.s, p1/M, z17.s, z23.s\n"
- "mls z31.s, p1/M, z18.s, z23.s\n"
- "asr z21.s, z21.s, #0x1f\n"
- "ld1b { z5.b }, p2/Z, [x25, x13]\n"
- "mls z26.s, p1/M, z16.s, z23.s\n"
- ".inst 0x04ae7484 // sqrdmulh z4.s, z4.s, z14.s\n"
- ".inst 0x04ae77ff // sqrdmulh z31.s, z31.s, z14.s\n"
- ".inst 0x04ae775a // sqrdmulh z26.s, z26.s, z14.s\n"
- "ld1b { z14.b }, p2/Z, [x12, x13]\n"
- "ldp x12, x11, [%x[inptrs], #0x40]\n"
- "sqadd z10.s, z10.s, z21.s\n"
- "and z16.d, z4.d, z8.d\n"
- ".inst 0x4482850a // srshl z10.s, p1/M, z10.s, z8.s\n"
- "ldp x10, x9, [%x[inptrs], #0x50]\n"
- "and z20.d, z31.d, z8.d\n"
- "and z21.d, z26.d, z8.d\n"
- "ld1b { z3.b }, p2/Z, [x12, x13]\n"
- "ld1b { z2.b }, p2/Z, [x11, x13]\n"
+ "sdot z21.s, z31.b, z3.b\n"
+ "ld1b { z31.b }, p2/Z, [%x[params], #3, MUL VL]\n"
+ "sdot z24.s, z29.b, z3.b\n"
+ "sdot z6.s, z12.b, z3.b\n"
+ "mls z1.s, p2/M, z7.s, z9.s\n"
+ "sdot z21.s, z29.b, z0.b\n"
+ "ld1b { z29.b }, p2/Z, [%x[params], #4, MUL VL]\n"
+ "sdot z24.s, z27.b, z0.b\n"
+ "sdot z6.s, z12.b, z0.b\n"
+ ".inst 0x04b97421 // sqrdmulh z1.s, z1.s, z25.s\n"
+ "sdot z21.s, z27.b, z28.b\n"
+ "ld1b { z27.b }, p2/Z, [%x[params], #5, MUL VL]\n"
+ "mov z7.s, #0x0\n"
+ "mov z19.d, z6.d\n"
+ "sdot z6.s, z12.b, z5.b\n"
+ "sdot z19.s, z12.b, z28.b\n"
+ "and z16.d, z1.d, z23.d\n"
+ "asr z16.s, z16.s, #0x1f\n"
+ "sdot z7.s, z12.b, z2.b\n"
+ "mls z24.s, p2/M, z6.s, z9.s\n"
+ "mov z6.s, #0x0\n"
+ "mls z22.s, p2/M, z20.s, z9.s\n"
+ "mls z21.s, p2/M, z19.s, z9.s\n"
+ ".inst 0x04b97718 // sqrdmulh z24.s, z24.s, z25.s\n"
+ "sqadd z1.s, z1.s, z16.s\n"
+ ".inst 0x04b976d6 // sqrdmulh z22.s, z22.s, z25.s\n"
+ ".inst 0x04b976b5 // sqrdmulh z21.s, z21.s, z25.s\n"
+ "ld1w { z25.s }, p2/Z, [%x[params], #6, MUL VL]\n"
+ "and z18.d, z24.d, z23.d\n"
+ "asr z18.s, z18.s, #0x1f\n"
+ "and z17.d, z22.d, z23.d\n"
+ "and z16.d, z21.d, z23.d\n"
+ "asr z17.s, z17.s, #0x1f\n"
+ "sdot z7.s, z12.b, z30.b\n"
+ ".inst 0x44828ae1 // srshl z1.s, p2/M, z1.s, z23.s\n"
"asr z16.s, z16.s, #0x1f\n"
- "asr z20.s, z20.s, #0x1f\n"
- "ld1b { z1.b }, p2/Z, [x10, x13]\n"
- "ld1b { z0.b }, p2/Z, [x9, x13]\n"
- "asr z21.s, z21.s, #0x1f\n"
- "sqadd z4.s, z4.s, z16.s\n"
- ".inst 0x44828504 // srshl z4.s, p1/M, z4.s, z8.s\n"
- "ld1b { z16.b }, p1/Z, [%x[params], #6, MUL VL]\n"
- "sqadd z31.s, z31.s, z20.s\n"
- "sqadd z26.s, z26.s, z21.s\n"
- ".inst 0x4482851f // srshl z31.s, p1/M, z31.s, z8.s\n"
- ".inst 0x4482851a // srshl z26.s, p1/M, z26.s, z8.s\n"
- "add z10.s, z10.s, z22.s\n"
- "smax z10.s, p1/M, z10.s, z25.s\n"
- "add z4.s, z4.s, z22.s\n"
- "ld1b { z8.b }, p2/Z, [x28, x13]\n"
- "add z31.s, z31.s, z22.s\n"
- "add z26.s, z26.s, z22.s\n"
- "ldp x28, x27, [%x[inptrs], #0x60]\n"
- "ldp x26, x25, [%x[inptrs], #0x70]\n"
- "smin z10.s, p1/M, z10.s, z24.s\n"
- "smax z4.s, p1/M, z4.s, z25.s\n"
- "st1b { z10.s }, p0, [x24, x20]\n"
- "ld1b { z30.b }, p2/Z, [x28, x13]\n"
- "smax z31.s, p1/M, z31.s, z25.s\n"
- "smax z26.s, p1/M, z26.s, z25.s\n"
- "ld1b { z29.b }, p2/Z, [x27, x13]\n"
- "ld1b { z28.b }, p2/Z, [x26, x13]\n"
- "ld1b { z27.b }, p2/Z, [x25, x13]\n"
- "zip2 z10.b, z14.b, z11.b\n"
- "zip1 z14.b, z14.b, z11.b\n"
- "smin z4.s, p1/M, z4.s, z24.s\n"
- "zip1 z11.b, z13.b, z9.b\n"
- "zip2 z9.b, z13.b, z9.b\n"
- "smin z31.s, p1/M, z31.s, z24.s\n"
- "smin z26.s, p1/M, z26.s, z24.s\n"
- "st1b { z4.s }, p0, [x23, x20]\n"
- "zip2 z13.b, z14.b, z11.b\n"
- "zip1 z14.b, z14.b, z11.b\n"
- "ldp x12, x11, [%x[inptrs], #0x0]\n"
- "st1b { z31.s }, p0, [x22, x20]\n"
- "zip1 z11.b, z10.b, z9.b\n"
- "zip2 z9.b, z10.b, z9.b\n"
- "ld1w { z10.s }, p1/Z, [%x[params], #4, MUL VL]\n"
- "st1b { z26.s }, p0, [x21, x20]\n"
- "zip2 z4.b, z8.b, z6.b\n"
- "zip1 z8.b, z8.b, z6.b\n"
- "incw x20\n"
- "zip1 z6.b, z7.b, z5.b\n"
- "zip2 z5.b, z7.b, z5.b\n"
- "ldp x10, x9, [%x[inptrs], #0x10]\n"
- "ldp x28, x27, [%x[inptrs], #0x20]\n"
- "zip2 z31.b, z3.b, z1.b\n"
- "zip1 z3.b, z3.b, z1.b\n"
- "ldp x26, x25, [%x[inptrs], #0x30]\n"
- "ld1b { z21.b }, p1/Z, [%x[params], #5, MUL VL]\n"
- "zip1 z1.b, z2.b, z0.b\n"
- "zip2 z0.b, z2.b, z0.b\n"
- "ld1b { z20.b }, p1/Z, [%x[params], #7, MUL VL]\n"
+ "sqadd z24.s, z24.s, z18.s\n"
+ "add z1.s, z1.s, z8.s\n"
+ "mov z20.d, z7.d\n"
+ "sqadd z22.s, z22.s, z17.s\n"
+ "sqadd z21.s, z21.s, z16.s\n"
+ "sdot z7.s, z12.b, z4.b\n"
+ "sdot z20.s, z12.b, z26.b\n"
+ "smax z1.s, p2/M, z1.s, z11.s\n"
+ ".inst 0x44828af8 // srshl z24.s, p2/M, z24.s, z23.s\n"
+ ".inst 0x44828af6 // srshl z22.s, p2/M, z22.s, z23.s\n"
+ ".inst 0x44828af5 // srshl z21.s, p2/M, z21.s, z23.s\n"
+ "ld1w { z23.s }, p2/Z, [%x[params], #7, MUL VL]\n"
+ "smin z1.s, p2/M, z1.s, z10.s\n"
+ "st1b { z1.s }, p0, [x23, x19]\n"
+ "add z24.s, z24.s, z8.s\n"
+ "add z22.s, z22.s, z8.s\n"
+ "ld1w { z1.s }, p2/Z, [%x[params], #2, MUL VL]\n"
"addvl %x[params], %x[params], #8\n"
- "zip2 z26.b, z30.b, z28.b\n"
- "zip1 z30.b, z30.b, z28.b\n"
- "zip1 z28.b, z29.b, z27.b\n"
- "zip2 z27.b, z29.b, z27.b\n"
- "zip2 z7.b, z8.b, z6.b\n"
- "zip1 z8.b, z8.b, z6.b\n"
- "zip1 z6.b, z4.b, z5.b\n"
- "zip2 z5.b, z4.b, z5.b\n"
- "zip2 z2.b, z3.b, z1.b\n"
- "zip1 z3.b, z3.b, z1.b\n"
- "zip1 z1.b, z31.b, z0.b\n"
- "zip2 z0.b, z31.b, z0.b\n"
- "zip2 z29.b, z30.b, z28.b\n"
- "zip1 z30.b, z30.b, z28.b\n"
- "zip1 z28.b, z26.b, z27.b\n"
- "zip2 z27.b, z26.b, z27.b\n"
- "mov z4.d, z10.d\n"
- "mov z31.d, z10.d\n"
- "mov z26.d, z10.d\n"
+ "add z21.s, z21.s, z8.s\n"
+ "smax z24.s, p2/M, z24.s, z11.s\n"
+ "smax z22.s, p2/M, z22.s, z11.s\n"
+ "smax z21.s, p2/M, z21.s, z11.s\n"
+ "smin z24.s, p2/M, z24.s, z10.s\n"
+ "st1b { z24.s }, p0, [x22, x19]\n"
+ "mov z24.d, z1.d\n"
+ "smin z22.s, p2/M, z22.s, z10.s\n"
+ "st1b { z22.s }, p0, [x21, x19]\n"
+ "mov z22.d, z1.d\n"
+ "smin z21.s, p2/M, z21.s, z10.s\n"
+ "st1b { z21.s }, p0, [x20, x19]\n"
+ "mov z21.d, z1.d\n"
+ "incw x19\n"
+ "sdot z1.s, z31.b, z4.b\n"
+ "whilelt p0.s, x19, %x[n_channels]\n"
+ "sdot z22.s, z31.b, z2.b\n"
+ "ext z4.b, z4.b, z4.b, #0x1\n"
+ "sdot z1.s, z29.b, z2.b\n"
+ "sdot z22.s, z29.b, z30.b\n"
+ "ext z2.b, z2.b, z2.b, #0x1\n"
+ "sdot z24.s, z31.b, z4.b\n"
+ "sdot z1.s, z27.b, z30.b\n"
+ "sdot z22.s, z27.b, z26.b\n"
+ "ext z30.b, z30.b, z30.b, #0x1\n"
+ "ext z26.b, z26.b, z26.b, #0x1\n"
+ "sdot z21.s, z31.b, z2.b\n"
+ "sdot z24.s, z29.b, z2.b\n"
+ "sdot z6.s, z12.b, z2.b\n"
+ "mls z1.s, p2/M, z7.s, z9.s\n"
+ "sdot z21.s, z29.b, z30.b\n"
+ "sdot z24.s, z27.b, z30.b\n"
+ "sdot z6.s, z12.b, z30.b\n"
+ ".inst 0x04b97421 // sqrdmulh z1.s, z1.s, z25.s\n"
+ "sdot z21.s, z27.b, z26.b\n"
+ "mls z22.s, p2/M, z20.s, z9.s\n"
+ "mov z19.d, z6.d\n"
+ "sdot z6.s, z12.b, z4.b\n"
+ "sdot z19.s, z12.b, z26.b\n"
+ "and z16.d, z1.d, z23.d\n"
+ "asr z16.s, z16.s, #0x1f\n"
+ ".inst 0x04b976d6 // sqrdmulh z22.s, z22.s, z25.s\n"
+ "mls z24.s, p2/M, z6.s, z9.s\n"
+ "mls z21.s, p2/M, z19.s, z9.s\n"
+ ".inst 0x04b97718 // sqrdmulh z24.s, z24.s, z25.s\n"
+ "and z17.d, z22.d, z23.d\n"
+ "asr z17.s, z17.s, #0x1f\n"
+ "sqadd z1.s, z1.s, z16.s\n"
+ ".inst 0x04b976b5 // sqrdmulh z21.s, z21.s, z25.s\n"
+ "and z18.d, z24.d, z23.d\n"
+ "asr z18.s, z18.s, #0x1f\n"
+ "and z16.d, z21.d, z23.d\n"
+ ".inst 0x44828ae1 // srshl z1.s, p2/M, z1.s, z23.s\n"
+ "asr z16.s, z16.s, #0x1f\n"
+ "sqadd z22.s, z22.s, z17.s\n"
+ "add z1.s, z1.s, z8.s\n"
+ "sqadd z24.s, z24.s, z18.s\n"
+ "smax z1.s, p2/M, z1.s, z11.s\n"
+ ".inst 0x44828af6 // srshl z22.s, p2/M, z22.s, z23.s\n"
+ "sqadd z21.s, z21.s, z16.s\n"
+ ".inst 0x44828af8 // srshl z24.s, p2/M, z24.s, z23.s\n"
+ "add z22.s, z22.s, z8.s\n"
+ "smin z1.s, p2/M, z1.s, z10.s\n"
+ "st1b { z1.s }, p0, [x23, x19]\n"
+ "add z24.s, z24.s, z8.s\n"
+ "smax z22.s, p2/M, z22.s, z11.s\n"
+ ".inst 0x44828af5 // srshl z21.s, p2/M, z21.s, z23.s\n"
+ "smax z24.s, p2/M, z24.s, z11.s\n"
+ "smin z22.s, p2/M, z22.s, z10.s\n"
+ "st1b { z22.s }, p0, [x21, x19]\n"
+ "add z21.s, z21.s, z8.s\n"
+ "smin z24.s, p2/M, z24.s, z10.s\n"
+ "st1b { z24.s }, p0, [x22, x19]\n"
+ "smax z21.s, p2/M, z21.s, z11.s\n"
+ "smin z21.s, p2/M, z21.s, z10.s\n"
+ "st1b { z21.s }, p0, [x20, x19]\n"
+ "incw x19\n"
+ "whilelt p1.b, x19, %x[n_channels]\n"
"b.any 1b\n"
+ "addvl SP, SP, #8\n"
: [params] "+&r" (params)
- : [inptrs] "r" (inptrs), [n_channels] "r" (n_channels), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [outptrs] "r" (outptrs), [qp] "r" (&qp)
- : "cc", "memory", "p0", "p1", "p2", "x9", "x10", "x11", "x12", "x13", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : [inptrs] "r" (inptrs), [n_channels] "r" ((long unsigned int) n_channels), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [outptrs] "r" (outptrs), [qp] "r" (&qp)
+ : "cc", "memory", "p0", "p1", "p2", "x9", "x10", "x11", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp
index 4ebf5be285..3583308357 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -91,316 +91,324 @@ void sve_s8q_nhwc_3x3_s1_output2x2_mla_depthfirst_impl(
requant_muls, requant_shifts, outptrs);
__asm__ __volatile__(
- "mov x8, #0x0\n"
- "ldr x25, [%x[params], %[offsetof_Params_requant]]\n"
- "ptrue p4.b\n"
- "ldr x24, [%x[params], %[offsetof_Params_outptrs]]\n"
- "mov x23, x8\n"
- "add x21, x25, %[offsetof_Requantize32_a_offset]\n"
"ldr x17, [%x[params], %[offsetof_Params_n_channels]]\n"
+ "ptrue p4.b\n"
"ldr x16, [%x[params], %[offsetof_Params_weights]]\n"
- "add x20, x25, %[offsetof_Requantize32_b_offset]\n"
- "add x22, x25, %[offsetof_Requantize32_c_offset]\n"
- "ld1rb { z23.b }, p4/Z, [x21]\n"
- "ld1rb { z15.b }, p4/Z, [x20]\n"
- "add x21, x25, %[offsetof_Requantize32_minval]\n"
- "add x20, x25, %[offsetof_Requantize32_maxval]\n"
- "ld1rh { z14.h }, p4/Z, [x22]\n"
- "ld1rh { z12.h }, p4/Z, [x21]\n"
- "ld1rh { z11.h }, p4/Z, [x20]\n"
- "ldp x15, x14, [x24, #0x0]\n"
- "incw x23\n"
- "whilelt p3.h, x8, x17\n"
- "ldp x13, x12, [x24, #0x10]\n"
- "whilelt p2.s, x8, x17\n"
- "whilelt p1.s, x23, x17\n"
- "ldr x26, [%x[params], %[offsetof_Params_bias]]\n"
+ "mov x15, #0x0\n"
+ "ldr x22, [%x[params], %[offsetof_Params_requant]]\n"
+ "mov x14, #0x0\n"
+ "ldr x13, [%x[params], %[offsetof_Params_requant_muls]]\n"
+ "add x12, %x[params], %[offsetof_Params_inptrs]\n"
+ "ldr x11, [%x[params], %[offsetof_Params_requant_shifts]]\n"
+ "add x19, x22, %[offsetof_Requantize32_a_offset]\n"
+ "ldr x21, [%x[params], %[offsetof_Params_outptrs]]\n"
+ "add x20, x22, %[offsetof_Requantize32_b_offset]\n"
+ "ld1rb { z12.b }, p4/Z, [x19]\n"
+ "add x19, x22, %[offsetof_Requantize32_c_offset]\n"
+ "ld1rb { z18.b }, p4/Z, [x20]\n"
+ "add x20, x22, %[offsetof_Requantize32_minval]\n"
+ "ld1rw { z15.s }, p4/Z, [x19]\n"
+ "add x19, x22, %[offsetof_Requantize32_maxval]\n"
+ "ld1rw { z13.s }, p4/Z, [x20]\n"
+ "whilelt p3.h, x15, x17\n"
+ "ld1rw { z14.s }, p4/Z, [x19]\n"
+ "whilelt p2.s, x15, x17\n"
+ "ldp x10, x9, [x21, #0x0]\n"
+ "mov x19, x15\n"
+ "incw x19\n"
+ "ldp x28, x27, [x21, #0x10]\n"
+ "whilelt p1.s, x19, x17\n"
+ "ldr x19, [%x[params], %[offsetof_Params_bias]]\n"
+ "ld1w { z17.s }, p2/Z, [x19]\n"
+ "ld1w { z16.s }, p1/Z, [x19, #1, MUL VL]\n"
+ "uzp1 z11.s, z17.s, z16.s\n"
+ "addvl x19, x19, #2\n"
+ "str x19, [%x[params], %[offsetof_Params_bias]]\n"
+ "uzp2 z17.s, z17.s, z16.s\n"
+ "mov z9.d, z11.d\n"
"ld1sb { z0.h }, p4/Z, [x16]\n"
+ ".inst 0x45521000 // ssublb z0.h, z0.b, z18.b\n"
+ "mov z20.d, z17.d\n"
"ld1sb { z1.h }, p4/Z, [x16, #1, MUL VL]\n"
- "add x11, %x[params], %[offsetof_Params_inptrs]\n"
- "mov x10, #0x0\n"
+ "mov z24.d, z11.d\n"
"ld1sb { z2.h }, p4/Z, [x16, #2, MUL VL]\n"
+ ".inst 0x45521021 // ssublb z1.h, z1.b, z18.b\n"
+ "mov z19.d, z17.d\n"
"ld1sb { z3.h }, p4/Z, [x16, #3, MUL VL]\n"
- ".inst 0x454f1000 // ssublb z0.h, z0.b, z15.b\n"
- ".inst 0x454f1021 // ssublb z1.h, z1.b, z15.b\n"
+ "mov z26.d, z11.d\n"
"ld1sb { z4.h }, p4/Z, [x16, #4, MUL VL]\n"
+ ".inst 0x45521042 // ssublb z2.h, z2.b, z18.b\n"
+ "mov z23.d, z17.d\n"
"ld1sb { z5.h }, p4/Z, [x16, #5, MUL VL]\n"
- ".inst 0x454f1042 // ssublb z2.h, z2.b, z15.b\n"
- ".inst 0x454f1063 // ssublb z3.h, z3.b, z15.b\n"
+ ".inst 0x45521063 // ssublb z3.h, z3.b, z18.b\n"
"ld1sb { z6.h }, p4/Z, [x16, #6, MUL VL]\n"
"ld1sb { z7.h }, p4/Z, [x16, #7, MUL VL]\n"
+ ".inst 0x45521084 // ssublb z4.h, z4.b, z18.b\n"
"inch x16, ALL, MUL #8\n"
- ".inst 0x454f1084 // ssublb z4.h, z4.b, z15.b\n"
- "ld1w { z17.s }, p2/Z, [x26]\n"
- "ld1w { z16.s }, p1/Z, [x26, #1, MUL VL]\n"
- "uzp1 z13.s, z17.s, z16.s\n"
- "uzp2 z17.s, z17.s, z16.s\n"
"ld1sb { z8.h }, p4/Z, [x16]\n"
- "ldp x24, x23, [x11, #0x0]\n"
- "addvl x26, x26, #2\n"
- "mov z26.d, z13.d\n"
- "ldp x22, x21, [x11, #0x10]\n"
- "ldr x20, [x11, #0x20]\n"
- "mov z10.d, z17.d\n"
- "mov z24.d, z13.d\n"
- "ld1sb { z31.h }, p3/Z, [x24, x8]\n"
- "ld1sb { z30.h }, p3/Z, [x23, x8]\n"
- "mov z16.d, z17.d\n"
- "mov z25.d, z13.d\n"
- "ld1sb { z29.h }, p3/Z, [x22, x8]\n"
- "ld1sb { z28.h }, p3/Z, [x21, x8]\n"
- "mov z9.d, z17.d\n"
- ".inst 0x454f10a5 // ssublb z5.h, z5.b, z15.b\n"
- "ld1sb { z27.h }, p3/Z, [x20, x8]\n"
- "ldr x9, [%x[params], %[offsetof_Params_requant_muls]]\n"
- ".inst 0x454f10c6 // ssublb z6.h, z6.b, z15.b\n"
- ".inst 0x454f10e7 // ssublb z7.h, z7.b, z15.b\n"
- "ldr x28, [%x[params], %[offsetof_Params_requant_shifts]]\n"
- "str x26, [%x[params], %[offsetof_Params_bias]]\n"
- ".inst 0x454f1108 // ssublb z8.h, z8.b, z15.b\n"
- ".inst 0x455713ff // ssublb z31.h, z31.b, z23.b\n"
- ".inst 0x455713de // ssublb z30.h, z30.b, z23.b\n"
- ".inst 0x455713bd // ssublb z29.h, z29.b, z23.b\n"
- ".inst 0x4557139c // ssublb z28.h, z28.b, z23.b\n"
- ".inst 0x4557137b // ssublb z27.h, z27.b, z23.b\n"
+ "ldp x23, x22, [x12, #0x0]\n"
+ ".inst 0x455210a5 // ssublb z5.h, z5.b, z18.b\n"
+ ".inst 0x455210c6 // ssublb z6.h, z6.b, z18.b\n"
+ "ldp x21, x20, [x12, #0x10]\n"
+ ".inst 0x455210e7 // ssublb z7.h, z7.b, z18.b\n"
+ ".inst 0x45521108 // ssublb z8.h, z8.b, z18.b\n"
+ "ldr x19, [x12, #0x20]\n"
+ "ld1sb { z31.h }, p3/Z, [x23, x15]\n"
+ ".inst 0x454c13ff // ssublb z31.h, z31.b, z12.b\n"
+ "ld1sb { z30.h }, p3/Z, [x22, x15]\n"
+ "ld1sb { z29.h }, p3/Z, [x21, x15]\n"
+ ".inst 0x454c13de // ssublb z30.h, z30.b, z12.b\n"
+ "ld1sb { z28.h }, p3/Z, [x20, x15]\n"
+ "ld1sb { z27.h }, p3/Z, [x19, x15]\n"
+ ".inst 0x454c13bd // ssublb z29.h, z29.b, z12.b\n"
+ ".inst 0x454c139c // ssublb z28.h, z28.b, z12.b\n"
+ ".inst 0x454c137b // ssublb z27.h, z27.b, z12.b\n"
"1:" // Loop
- ".inst 0x448443ed // smlalb z13.s, p4/M, z31.h, z4.h\n"
+ ".inst 0x448443eb // smlalb z11.s, p4/M, z31.h, z4.h\n"
+ "ldr x21, [x12, #0x28]\n"
+ "whilelt p0.h, x14, x17\n"
".inst 0x448447f1 // smlalt z17.s, p4/M, z31.h, z4.h\n"
- "ldr x22, [x11, #0x28]\n"
- "ldr x27, [x11, #0x38]\n"
- ".inst 0x448343fa // smlalb z26.s, p4/M, z31.h, z3.h\n"
- ".inst 0x448347ea // smlalt z10.s, p4/M, z31.h, z3.h\n"
- "ldr x21, [x11, #0x30]\n"
- "ldr x26, [x11, #0x40]\n"
- ".inst 0x448043cd // smlalb z13.s, p4/M, z30.h, z0.h\n"
- ".inst 0x448047d1 // smlalt z17.s, p4/M, z30.h, z0.h\n"
- "ldr x20, [x11, #0x48]\n"
- "ld1sb { z30.h }, p3/Z, [x20, x8]\n"
- ".inst 0x448243ba // smlalb z26.s, p4/M, z29.h, z2.h\n"
- ".inst 0x448247aa // smlalt z10.s, p4/M, z29.h, z2.h\n"
- "ld1sb { z29.h }, p3/Z, [x21, x8]\n"
- ".inst 0x455713bd // ssublb z29.h, z29.b, z23.b\n"
+ "ldr x20, [x12, #0x30]\n"
+ "inch x16\n"
+ ".inst 0x448343e9 // smlalb z9.s, p4/M, z31.h, z3.h\n"
+ "ldr x26, [x12, #0x38]\n"
+ ".inst 0x448347f4 // smlalt z20.s, p4/M, z31.h, z3.h\n"
+ "ldr x25, [x12, #0x40]\n"
".inst 0x448143f8 // smlalb z24.s, p4/M, z31.h, z1.h\n"
- ".inst 0x448147f0 // smlalt z16.s, p4/M, z31.h, z1.h\n"
- "ldr x25, [x11, #0x50]\n"
- "ldr x24, [x11, #0x58]\n"
- ".inst 0x448043f9 // smlalb z25.s, p4/M, z31.h, z0.h\n"
- ".inst 0x448047e9 // smlalt z9.s, p4/M, z31.h, z0.h\n"
- "ld1sb { z31.h }, p3/Z, [x22, x8]\n"
- ".inst 0x455713ff // ssublb z31.h, z31.b, z23.b\n"
- ".inst 0x4485438d // smlalb z13.s, p4/M, z28.h, z5.h\n"
+ "ldr x19, [x12, #0x48]\n"
+ ".inst 0x448147f3 // smlalt z19.s, p4/M, z31.h, z1.h\n"
+ "ldr x24, [x12, #0x50]\n"
+ ".inst 0x448043fa // smlalb z26.s, p4/M, z31.h, z0.h\n"
+ "ldr x23, [x12, #0x58]\n"
+ ".inst 0x448047f7 // smlalt z23.s, p4/M, z31.h, z0.h\n"
+ "ld1sb { z31.h }, p3/Z, [x21, x15]\n"
+ ".inst 0x454c13ff // ssublb z31.h, z31.b, z12.b\n"
+ ".inst 0x448043cb // smlalb z11.s, p4/M, z30.h, z0.h\n"
+ "ldr x22, [x12, #0x60]\n"
+ ".inst 0x448047d1 // smlalt z17.s, p4/M, z30.h, z0.h\n"
+ "ld1sb { z30.h }, p3/Z, [x19, x15]\n"
+ ".inst 0x454c13de // ssublb z30.h, z30.b, z12.b\n"
+ ".inst 0x448243a9 // smlalb z9.s, p4/M, z29.h, z2.h\n"
+ "ldr x21, [x12, #0x68]\n"
+ ".inst 0x448247b4 // smlalt z20.s, p4/M, z29.h, z2.h\n"
+ "ld1sb { z29.h }, p3/Z, [x20, x15]\n"
+ ".inst 0x454c13bd // ssublb z29.h, z29.b, z12.b\n"
+ ".inst 0x4485438b // smlalb z11.s, p4/M, z28.h, z5.h\n"
+ "ldr x20, [x12, #0x70]\n"
".inst 0x44854791 // smlalt z17.s, p4/M, z28.h, z5.h\n"
- ".inst 0x455713de // ssublb z30.h, z30.b, z23.b\n"
- "ldr x23, [x11, #0x60]\n"
- ".inst 0x4484439a // smlalb z26.s, p4/M, z28.h, z4.h\n"
- ".inst 0x4484478a // smlalt z10.s, p4/M, z28.h, z4.h\n"
- "ldr x22, [x11, #0x68]\n"
- "ldr x21, [x11, #0x70]\n"
+ "ldr x19, [x12, #0x78]\n"
+ ".inst 0x44844389 // smlalb z9.s, p4/M, z28.h, z4.h\n"
+ "ld1w { z25.s }, p2/Z, [x13]\n"
+ ".inst 0x44844794 // smlalt z20.s, p4/M, z28.h, z4.h\n"
+ "ld1w { z16.s }, p1/Z, [x13, #1, MUL VL]\n"
+ "addvl x13, x13, #2\n"
".inst 0x44824398 // smlalb z24.s, p4/M, z28.h, z2.h\n"
- ".inst 0x44824790 // smlalt z16.s, p4/M, z28.h, z2.h\n"
- "ldr x20, [x11, #0x78]\n"
- "ld1w { z20.s }, p2/Z, [x9]\n"
- ".inst 0x44814399 // smlalb z25.s, p4/M, z28.h, z1.h\n"
- ".inst 0x44814789 // smlalt z9.s, p4/M, z28.h, z1.h\n"
- "ld1sb { z28.h }, p3/Z, [x27, x8]\n"
- ".inst 0x4557139c // ssublb z28.h, z28.b, z23.b\n"
- ".inst 0x4487436d // smlalb z13.s, p4/M, z27.h, z7.h\n"
- ".inst 0x44874771 // smlalt z17.s, p4/M, z27.h, z7.h\n"
- "ld1w { z18.s }, p1/Z, [x9, #1, MUL VL]\n"
- "uzp1 z19.s, z20.s, z18.s\n"
- ".inst 0x4486437a // smlalb z26.s, p4/M, z27.h, z6.h\n"
- ".inst 0x4486476a // smlalt z10.s, p4/M, z27.h, z6.h\n"
- "uzp2 z22.s, z20.s, z18.s\n"
- "ld1w { z20.s }, p2/Z, [x28]\n"
+ ".inst 0x44824793 // smlalt z19.s, p4/M, z28.h, z2.h\n"
+ ".inst 0x4481439a // smlalb z26.s, p4/M, z28.h, z1.h\n"
+ "uzp1 z10.s, z25.s, z16.s\n"
+ "uzp2 z22.s, z25.s, z16.s\n"
+ "ld1w { z25.s }, p2/Z, [x11]\n"
+ ".inst 0x44814797 // smlalt z23.s, p4/M, z28.h, z1.h\n"
+ "ld1sb { z28.h }, p3/Z, [x26, x15]\n"
+ ".inst 0x454c139c // ssublb z28.h, z28.b, z12.b\n"
".inst 0x448643f8 // smlalb z24.s, p4/M, z31.h, z6.h\n"
- ".inst 0x448647f0 // smlalt z16.s, p4/M, z31.h, z6.h\n"
- "ld1sb { z31.h }, p3/Z, [x26, x8]\n"
- ".inst 0x455713ff // ssublb z31.h, z31.b, z23.b\n"
- ".inst 0x44834379 // smlalb z25.s, p4/M, z27.h, z3.h\n"
- ".inst 0x44834769 // smlalt z9.s, p4/M, z27.h, z3.h\n"
- "whilelt p0.h, x10, x17\n"
- "inch x16\n"
- ".inst 0x4481438d // smlalb z13.s, p4/M, z28.h, z1.h\n"
- ".inst 0x44814791 // smlalt z17.s, p4/M, z28.h, z1.h\n"
- "ldr x26, [%x[params], %[offsetof_Params_bias]]\n"
- "addvl x9, x9, #2\n"
- ".inst 0x4480439a // smlalb z26.s, p4/M, z28.h, z0.h\n"
- ".inst 0x4480478a // smlalt z10.s, p4/M, z28.h, z0.h\n"
- "ld1sb { z28.h }, p3/Z, [x24, x8]\n"
- ".inst 0x4557139c // ssublb z28.h, z28.b, z23.b\n"
+ "ld1w { z16.s }, p1/Z, [x11, #1, MUL VL]\n"
+ ".inst 0x448647f3 // smlalt z19.s, p4/M, z31.h, z6.h\n"
+ "ld1sb { z31.h }, p3/Z, [x25, x15]\n"
+ "addvl x11, x11, #2\n"
+ ".inst 0x4487436b // smlalb z11.s, p4/M, z27.h, z7.h\n"
+ ".inst 0x454c13ff // ssublb z31.h, z31.b, z12.b\n"
+ "uzp1 z21.s, z25.s, z16.s\n"
+ "uzp2 z25.s, z25.s, z16.s\n"
+ ".inst 0x44874771 // smlalt z17.s, p4/M, z27.h, z7.h\n"
+ ".inst 0x44864369 // smlalb z9.s, p4/M, z27.h, z6.h\n"
+ ".inst 0x44864774 // smlalt z20.s, p4/M, z27.h, z6.h\n"
".inst 0x44844378 // smlalb z24.s, p4/M, z27.h, z4.h\n"
- ".inst 0x448843b9 // smlalb z25.s, p4/M, z29.h, z8.h\n"
- ".inst 0x44844770 // smlalt z16.s, p4/M, z27.h, z4.h\n"
- ".inst 0x448847a9 // smlalt z9.s, p4/M, z29.h, z8.h\n"
- "ld1sb { z29.h }, p3/Z, [x25, x8]\n"
- ".inst 0x455713bd // ssublb z29.h, z29.b, z23.b\n"
- ".inst 0x448243ed // smlalb z13.s, p4/M, z31.h, z2.h\n"
+ ".inst 0x44844773 // smlalt z19.s, p4/M, z27.h, z4.h\n"
+ ".inst 0x4483437a // smlalb z26.s, p4/M, z27.h, z3.h\n"
+ ".inst 0x44834777 // smlalt z23.s, p4/M, z27.h, z3.h\n"
+ ".inst 0x4481438b // smlalb z11.s, p4/M, z28.h, z1.h\n"
+ ".inst 0x44814791 // smlalt z17.s, p4/M, z28.h, z1.h\n"
+ ".inst 0x448843ba // smlalb z26.s, p4/M, z29.h, z8.h\n"
+ ".inst 0x448847b7 // smlalt z23.s, p4/M, z29.h, z8.h\n"
+ "ld1sb { z29.h }, p3/Z, [x24, x15]\n"
+ ".inst 0x454c13bd // ssublb z29.h, z29.b, z12.b\n"
+ ".inst 0x44804389 // smlalb z9.s, p4/M, z28.h, z0.h\n"
+ ".inst 0x44804794 // smlalt z20.s, p4/M, z28.h, z0.h\n"
+ "ld1sb { z28.h }, p3/Z, [x23, x15]\n"
+ ".inst 0x454c139c // ssublb z28.h, z28.b, z12.b\n"
+ ".inst 0x448243eb // smlalb z11.s, p4/M, z31.h, z2.h\n"
".inst 0x448247f1 // smlalt z17.s, p4/M, z31.h, z2.h\n"
- "ld1w { z18.s }, p1/Z, [x28, #1, MUL VL]\n"
- "addvl x28, x28, #2\n"
- ".inst 0x448143fa // smlalb z26.s, p4/M, z31.h, z1.h\n"
- ".inst 0x448147ea // smlalt z10.s, p4/M, z31.h, z1.h\n"
- "ld1sb { z31.h }, p3/Z, [x23, x8]\n"
- ".inst 0x455713ff // ssublb z31.h, z31.b, z23.b\n"
- ".inst 0x448543d8 // smlalb z24.s, p4/M, z30.h, z5.h\n"
- ".inst 0x448443d9 // smlalb z25.s, p4/M, z30.h, z4.h\n"
- "uzp1 z1.s, z20.s, z18.s\n"
- ".inst 0x448843cd // smlalb z13.s, p4/M, z30.h, z8.h\n"
+ ".inst 0x448143e9 // smlalb z9.s, p4/M, z31.h, z1.h\n"
+ ".inst 0x448147f4 // smlalt z20.s, p4/M, z31.h, z1.h\n"
+ "ld1sb { z31.h }, p3/Z, [x22, x15]\n"
+ ".inst 0x454c13ff // ssublb z31.h, z31.b, z12.b\n"
+ ".inst 0x448843cb // smlalb z11.s, p4/M, z30.h, z8.h\n"
".inst 0x448847d1 // smlalt z17.s, p4/M, z30.h, z8.h\n"
- "uzp2 z27.s, z20.s, z18.s\n"
- ".inst 0x448743da // smlalb z26.s, p4/M, z30.h, z7.h\n"
- ".inst 0x448747ca // smlalt z10.s, p4/M, z30.h, z7.h\n"
- ".inst 0x448547d0 // smlalt z16.s, p4/M, z30.h, z5.h\n"
- ".inst 0x448447c9 // smlalt z9.s, p4/M, z30.h, z4.h\n"
- "ld1sb { z30.h }, p3/Z, [x22, x8]\n"
- ".inst 0x455713de // ssublb z30.h, z30.b, z23.b\n"
- ".inst 0x448043b8 // smlalb z24.s, p4/M, z29.h, z0.h\n"
- ".inst 0x44824399 // smlalb z25.s, p4/M, z28.h, z2.h\n"
- ".inst 0x448343ad // smlalb z13.s, p4/M, z29.h, z3.h\n"
+ ".inst 0x448743c9 // smlalb z9.s, p4/M, z30.h, z7.h\n"
+ ".inst 0x448747d4 // smlalt z20.s, p4/M, z30.h, z7.h\n"
+ ".inst 0x448543d8 // smlalb z24.s, p4/M, z30.h, z5.h\n"
+ ".inst 0x448547d3 // smlalt z19.s, p4/M, z30.h, z5.h\n"
+ ".inst 0x448443da // smlalb z26.s, p4/M, z30.h, z4.h\n"
+ ".inst 0x448447d7 // smlalt z23.s, p4/M, z30.h, z4.h\n"
+ "ld1sb { z30.h }, p3/Z, [x21, x15]\n"
+ ".inst 0x454c13de // ssublb z30.h, z30.b, z12.b\n"
+ ".inst 0x448343ab // smlalb z11.s, p4/M, z29.h, z3.h\n"
".inst 0x448347b1 // smlalt z17.s, p4/M, z29.h, z3.h\n"
- ".inst 0x448047b0 // smlalt z16.s, p4/M, z29.h, z0.h\n"
- "ld1sb { z29.h }, p3/Z, [x21, x8]\n"
- ".inst 0x44824789 // smlalt z9.s, p4/M, z28.h, z2.h\n"
- ".inst 0x455713bd // ssublb z29.h, z29.b, z23.b\n"
- ".inst 0x448343f8 // smlalb z24.s, p4/M, z31.h, z3.h\n"
- ".inst 0x448543d9 // smlalb z25.s, p4/M, z30.h, z5.h\n"
- ".inst 0x4485439a // smlalb z26.s, p4/M, z28.h, z5.h\n"
- ".inst 0x4485478a // smlalt z10.s, p4/M, z28.h, z5.h\n"
- "ld1sb { z28.h }, p3/Z, [x20, x8]\n"
- ".inst 0x4557139c // ssublb z28.h, z28.b, z23.b\n"
- ".inst 0x448643ed // smlalb z13.s, p4/M, z31.h, z6.h\n"
- ".inst 0x448347f0 // smlalt z16.s, p4/M, z31.h, z3.h\n"
- ".inst 0x04b375ad // sqrdmulh z13.s, z13.s, z19.s\n"
- "inch x8\n"
- ".inst 0x448547c9 // smlalt z9.s, p4/M, z30.h, z5.h\n"
- ".inst 0x448743b8 // smlalb z24.s, p4/M, z29.h, z7.h\n"
- "and z21.d, z13.d, z1.d\n"
- "mov x20, x8\n"
- ".inst 0x448643b9 // smlalb z25.s, p4/M, z29.h, z6.h\n"
+ ".inst 0x448043b8 // smlalb z24.s, p4/M, z29.h, z0.h\n"
+ ".inst 0x448047b3 // smlalt z19.s, p4/M, z29.h, z0.h\n"
+ "ld1sb { z29.h }, p3/Z, [x20, x15]\n"
+ ".inst 0x454c13bd // ssublb z29.h, z29.b, z12.b\n"
+ ".inst 0x44854389 // smlalb z9.s, p4/M, z28.h, z5.h\n"
+ ".inst 0x44854794 // smlalt z20.s, p4/M, z28.h, z5.h\n"
+ ".inst 0x4482439a // smlalb z26.s, p4/M, z28.h, z2.h\n"
+ ".inst 0x44824797 // smlalt z23.s, p4/M, z28.h, z2.h\n"
+ "ld1sb { z28.h }, p3/Z, [x19, x15]\n"
+ "inch x15\n"
+ ".inst 0x448643eb // smlalb z11.s, p4/M, z31.h, z6.h\n"
+ "whilelt p2.s, x15, x17\n"
".inst 0x448647f1 // smlalt z17.s, p4/M, z31.h, z6.h\n"
+ "mov x19, x15\n"
+ ".inst 0x448343f8 // smlalb z24.s, p4/M, z31.h, z3.h\n"
+ ".inst 0x454c139c // ssublb z28.h, z28.b, z12.b\n"
+ ".inst 0x448347f3 // smlalt z19.s, p4/M, z31.h, z3.h\n"
+ "incw x19\n"
+ ".inst 0x448843c9 // smlalb z9.s, p4/M, z30.h, z8.h\n"
+ "whilelt p1.s, x19, x17\n"
+ ".inst 0x04aa756b // sqrdmulh z11.s, z11.s, z10.s\n"
+ "whilelt p3.h, x15, x17\n"
".inst 0x04b67631 // sqrdmulh z17.s, z17.s, z22.s\n"
- "incw x20\n"
- ".inst 0x448747b0 // smlalt z16.s, p4/M, z29.h, z7.h\n"
- ".inst 0x448647a9 // smlalt z9.s, p4/M, z29.h, z6.h\n"
- "asr z21.s, z21.s, #0x1f\n"
- "whilelt p2.s, x8, x17\n"
- ".inst 0x448843da // smlalb z26.s, p4/M, z30.h, z8.h\n"
+ ".inst 0x448847d4 // smlalt z20.s, p4/M, z30.h, z8.h\n"
+ ".inst 0x04aa7529 // sqrdmulh z9.s, z9.s, z10.s\n"
+ "and z16.d, z11.d, z21.d\n"
+ "asr z16.s, z16.s, #0x1f\n"
+ "and z1.d, z17.d, z25.d\n"
+ "and z27.d, z9.d, z21.d\n"
+ "asr z1.s, z1.s, #0x1f\n"
+ ".inst 0x04b67694 // sqrdmulh z20.s, z20.s, z22.s\n"
+ ".inst 0x448543da // smlalb z26.s, p4/M, z30.h, z5.h\n"
+ "asr z27.s, z27.s, #0x1f\n"
+ ".inst 0x448547d7 // smlalt z23.s, p4/M, z30.h, z5.h\n"
+ "sqadd z11.s, z11.s, z16.s\n"
+ ".inst 0x448743b8 // smlalb z24.s, p4/M, z29.h, z7.h\n"
+ "and z16.d, z20.d, z25.d\n"
+ "asr z16.s, z16.s, #0x1f\n"
+ "sqadd z17.s, z17.s, z1.s\n"
+ "sqadd z9.s, z9.s, z27.s\n"
+ ".inst 0x448747b3 // smlalt z19.s, p4/M, z29.h, z7.h\n"
+ ".inst 0x448643ba // smlalb z26.s, p4/M, z29.h, z6.h\n"
+ ".inst 0x448647b7 // smlalt z23.s, p4/M, z29.h, z6.h\n"
".inst 0x44884398 // smlalb z24.s, p4/M, z28.h, z8.h\n"
- "and z20.d, z17.d, z27.d\n"
- "whilelt p1.s, x20, x17\n"
- ".inst 0x44874399 // smlalb z25.s, p4/M, z28.h, z7.h\n"
- ".inst 0x448847ca // smlalt z10.s, p4/M, z30.h, z8.h\n"
- ".inst 0x04b3775a // sqrdmulh z26.s, z26.s, z19.s\n"
- "whilelt p3.h, x8, x17\n"
- ".inst 0x44884790 // smlalt z16.s, p4/M, z28.h, z8.h\n"
- ".inst 0x44874789 // smlalt z9.s, p4/M, z28.h, z7.h\n"
- ".inst 0x04b37718 // sqrdmulh z24.s, z24.s, z19.s\n"
- ".inst 0x04b37739 // sqrdmulh z25.s, z25.s, z19.s\n"
- "sqadd z13.s, z13.s, z21.s\n"
- ".inst 0x4482902d // srshl z13.s, p4/M, z13.s, z1.s\n"
- "asr z20.s, z20.s, #0x1f\n"
- "and z19.d, z26.d, z1.d\n"
- ".inst 0x04b6754a // sqrdmulh z10.s, z10.s, z22.s\n"
- "and z18.d, z24.d, z1.d\n"
- ".inst 0x04b67610 // sqrdmulh z16.s, z16.s, z22.s\n"
- "and z21.d, z25.d, z1.d\n"
- ".inst 0x04b67529 // sqrdmulh z9.s, z9.s, z22.s\n"
- "sqadd z17.s, z17.s, z20.s\n"
- ".inst 0x44829371 // srshl z17.s, p4/M, z17.s, z27.s\n"
- "asr z19.s, z19.s, #0x1f\n"
- "and z2.d, z10.d, z27.d\n"
- "asr z18.s, z18.s, #0x1f\n"
- "and z22.d, z16.d, z27.d\n"
- "asr z21.s, z21.s, #0x1f\n"
- "and z20.d, z9.d, z27.d\n"
- "sqadd z26.s, z26.s, z19.s\n"
- "asr z2.s, z2.s, #0x1f\n"
- ".inst 0x4482903a // srshl z26.s, p4/M, z26.s, z1.s\n"
- "sqadd z24.s, z24.s, z18.s\n"
- "asr z22.s, z22.s, #0x1f\n"
- ".inst 0x44829038 // srshl z24.s, p4/M, z24.s, z1.s\n"
- "sqadd z25.s, z25.s, z21.s\n"
- "asr z20.s, z20.s, #0x1f\n"
- ".inst 0x44829039 // srshl z25.s, p4/M, z25.s, z1.s\n"
- "sqadd z10.s, z10.s, z2.s\n"
- "sqadd z16.s, z16.s, z22.s\n"
- ".inst 0x4482936a // srshl z10.s, p4/M, z10.s, z27.s\n"
- ".inst 0x44829370 // srshl z16.s, p4/M, z16.s, z27.s\n"
- "sqadd z9.s, z9.s, z20.s\n"
- ".inst 0x453041ad // sqxtnb z13.h, z13.s\n"
- ".inst 0x44829369 // srshl z9.s, p4/M, z9.s, z27.s\n"
- ".inst 0x4530435a // sqxtnb z26.h, z26.s\n"
- ".inst 0x45304318 // sqxtnb z24.h, z24.s\n"
- ".inst 0x45304339 // sqxtnb z25.h, z25.s\n"
- ".inst 0x4530462d // sqxtnt z13.h, z17.s\n"
- ".inst 0x4530455a // sqxtnt z26.h, z10.s\n"
- ".inst 0x45304618 // sqxtnt z24.h, z16.s\n"
- ".inst 0x45304539 // sqxtnt z25.h, z9.s\n"
- "sqadd z13.h, z13.h, z14.h\n"
- "smax z13.h, p4/M, z13.h, z12.h\n"
- "smin z13.h, p4/M, z13.h, z11.h\n"
- "sqadd z26.h, z26.h, z14.h\n"
- "sqadd z24.h, z24.h, z14.h\n"
- "smax z26.h, p4/M, z26.h, z12.h\n"
- "smax z24.h, p4/M, z24.h, z12.h\n"
- "sqadd z25.h, z25.h, z14.h\n"
- "smax z25.h, p4/M, z25.h, z12.h\n"
- "smin z26.h, p4/M, z26.h, z11.h\n"
- "st1b { z13.h }, p0, [x15, x10]\n"
- "smin z24.h, p4/M, z24.h, z11.h\n"
- "smin z25.h, p4/M, z25.h, z11.h\n"
- "st1b { z26.h }, p0, [x14, x10]\n"
- "st1b { z24.h }, p0, [x13, x10]\n"
- "st1b { z25.h }, p0, [x12, x10]\n"
+ "sqadd z20.s, z20.s, z16.s\n"
+ ".inst 0x44884793 // smlalt z19.s, p4/M, z28.h, z8.h\n"
+ ".inst 0x4487439a // smlalb z26.s, p4/M, z28.h, z7.h\n"
+ ".inst 0x04aa7718 // sqrdmulh z24.s, z24.s, z10.s\n"
+ ".inst 0x44874797 // smlalt z23.s, p4/M, z28.h, z7.h\n"
+ ".inst 0x04b67673 // sqrdmulh z19.s, z19.s, z22.s\n"
+ ".inst 0x04aa775a // sqrdmulh z26.s, z26.s, z10.s\n"
+ "and z16.d, z24.d, z21.d\n"
+ "asr z16.s, z16.s, #0x1f\n"
+ "and z7.d, z19.d, z25.d\n"
+ "and z3.d, z26.d, z21.d\n"
+ "asr z7.s, z7.s, #0x1f\n"
+ ".inst 0x04b676f7 // sqrdmulh z23.s, z23.s, z22.s\n"
+ ".inst 0x448292ab // srshl z11.s, p4/M, z11.s, z21.s\n"
+ "asr z3.s, z3.s, #0x1f\n"
+ ".inst 0x44829331 // srshl z17.s, p4/M, z17.s, z25.s\n"
+ "sqadd z24.s, z24.s, z16.s\n"
+ ".inst 0x448292a9 // srshl z9.s, p4/M, z9.s, z21.s\n"
+ "add z11.s, z11.s, z15.s\n"
+ "add z17.s, z17.s, z15.s\n"
+ "sqadd z19.s, z19.s, z7.s\n"
+ "add z9.s, z9.s, z15.s\n"
+ "sqadd z26.s, z26.s, z3.s\n"
+ "and z16.d, z23.d, z25.d\n"
+ "asr z16.s, z16.s, #0x1f\n"
+ "smin z11.s, p4/M, z11.s, z14.s\n"
+ "smin z17.s, p4/M, z17.s, z14.s\n"
+ "smin z9.s, p4/M, z9.s, z14.s\n"
+ ".inst 0x44829334 // srshl z20.s, p4/M, z20.s, z25.s\n"
+ ".inst 0x448292b8 // srshl z24.s, p4/M, z24.s, z21.s\n"
+ "smax z11.s, p4/M, z11.s, z13.s\n"
+ "sqadd z23.s, z23.s, z16.s\n"
+ "add z20.s, z20.s, z15.s\n"
+ "add z24.s, z24.s, z15.s\n"
+ "smax z17.s, p4/M, z17.s, z13.s\n"
+ "smax z9.s, p4/M, z9.s, z13.s\n"
+ "smin z20.s, p4/M, z20.s, z14.s\n"
+ "smin z24.s, p4/M, z24.s, z14.s\n"
+ "trn1 z11.h, z11.h, z17.h\n"
+ "st1b { z11.h }, p0, [x10, x14]\n"
+ "smax z20.s, p4/M, z20.s, z13.s\n"
+ ".inst 0x44829333 // srshl z19.s, p4/M, z19.s, z25.s\n"
+ "smax z24.s, p4/M, z24.s, z13.s\n"
+ ".inst 0x448292ba // srshl z26.s, p4/M, z26.s, z21.s\n"
+ ".inst 0x44829337 // srshl z23.s, p4/M, z23.s, z25.s\n"
+ "trn1 z9.h, z9.h, z20.h\n"
+ "st1b { z9.h }, p0, [x9, x14]\n"
+ "add z19.s, z19.s, z15.s\n"
+ "add z26.s, z26.s, z15.s\n"
+ "add z23.s, z23.s, z15.s\n"
+ "smin z19.s, p4/M, z19.s, z14.s\n"
+ "smin z26.s, p4/M, z26.s, z14.s\n"
+ "smin z23.s, p4/M, z23.s, z14.s\n"
+ "smax z19.s, p4/M, z19.s, z13.s\n"
+ "smax z26.s, p4/M, z26.s, z13.s\n"
+ "smax z23.s, p4/M, z23.s, z13.s\n"
+ "trn1 z24.h, z24.h, z19.h\n"
+ "st1b { z24.h }, p0, [x28, x14]\n"
+ "trn1 z26.h, z26.h, z23.h\n"
+ "st1b { z26.h }, p0, [x27, x14]\n"
+ "inch x14\n"
+ "ldr x19, [%x[params], %[offsetof_Params_bias]]\n"
+ "ld1w { z17.s }, p2/Z, [x19]\n"
+ "ld1w { z16.s }, p1/Z, [x19, #1, MUL VL]\n"
+ "uzp1 z11.s, z17.s, z16.s\n"
+ "addvl x19, x19, #2\n"
+ "str x19, [%x[params], %[offsetof_Params_bias]]\n"
+ "uzp2 z17.s, z17.s, z16.s\n"
+ "mov z9.d, z11.d\n"
"ld1sb { z0.h }, p4/Z, [x16]\n"
+ ".inst 0x45521000 // ssublb z0.h, z0.b, z18.b\n"
+ "mov z20.d, z17.d\n"
"ld1sb { z1.h }, p4/Z, [x16, #1, MUL VL]\n"
- "inch x10\n"
+ "mov z24.d, z11.d\n"
"ld1sb { z2.h }, p4/Z, [x16, #2, MUL VL]\n"
+ ".inst 0x45521021 // ssublb z1.h, z1.b, z18.b\n"
+ "mov z19.d, z17.d\n"
"ld1sb { z3.h }, p4/Z, [x16, #3, MUL VL]\n"
- ".inst 0x454f1000 // ssublb z0.h, z0.b, z15.b\n"
- ".inst 0x454f1021 // ssublb z1.h, z1.b, z15.b\n"
+ "mov z26.d, z11.d\n"
"ld1sb { z4.h }, p4/Z, [x16, #4, MUL VL]\n"
+ ".inst 0x45521042 // ssublb z2.h, z2.b, z18.b\n"
+ "mov z23.d, z17.d\n"
"ld1sb { z5.h }, p4/Z, [x16, #5, MUL VL]\n"
- ".inst 0x454f1042 // ssublb z2.h, z2.b, z15.b\n"
- ".inst 0x454f1063 // ssublb z3.h, z3.b, z15.b\n"
+ ".inst 0x45521063 // ssublb z3.h, z3.b, z18.b\n"
"ld1sb { z6.h }, p4/Z, [x16, #6, MUL VL]\n"
"ld1sb { z7.h }, p4/Z, [x16, #7, MUL VL]\n"
+ ".inst 0x45521084 // ssublb z4.h, z4.b, z18.b\n"
"inch x16, ALL, MUL #8\n"
- ".inst 0x454f1084 // ssublb z4.h, z4.b, z15.b\n"
- "ld1w { z17.s }, p2/Z, [x26]\n"
- "ld1w { z16.s }, p1/Z, [x26, #1, MUL VL]\n"
- "uzp1 z13.s, z17.s, z16.s\n"
- "uzp2 z17.s, z17.s, z16.s\n"
"ld1sb { z8.h }, p4/Z, [x16]\n"
- "ldp x24, x23, [x11, #0x0]\n"
- "addvl x26, x26, #2\n"
- "str x26, [%x[params], %[offsetof_Params_bias]]\n"
- "ldp x22, x21, [x11, #0x10]\n"
- "ldr x20, [x11, #0x20]\n"
- "mov z26.d, z13.d\n"
- "mov z10.d, z17.d\n"
- "ld1sb { z31.h }, p3/Z, [x24, x8]\n"
- "ld1sb { z30.h }, p3/Z, [x23, x8]\n"
- "mov z24.d, z13.d\n"
- "mov z16.d, z17.d\n"
- "ld1sb { z29.h }, p3/Z, [x22, x8]\n"
- "ld1sb { z28.h }, p3/Z, [x21, x8]\n"
- "mov z25.d, z13.d\n"
- "mov z9.d, z17.d\n"
- "ld1sb { z27.h }, p3/Z, [x20, x8]\n"
- ".inst 0x454f10a5 // ssublb z5.h, z5.b, z15.b\n"
- ".inst 0x454f10c6 // ssublb z6.h, z6.b, z15.b\n"
- ".inst 0x454f10e7 // ssublb z7.h, z7.b, z15.b\n"
- ".inst 0x454f1108 // ssublb z8.h, z8.b, z15.b\n"
- ".inst 0x455713ff // ssublb z31.h, z31.b, z23.b\n"
- ".inst 0x455713de // ssublb z30.h, z30.b, z23.b\n"
- ".inst 0x455713bd // ssublb z29.h, z29.b, z23.b\n"
- ".inst 0x4557139c // ssublb z28.h, z28.b, z23.b\n"
- ".inst 0x4557137b // ssublb z27.h, z27.b, z23.b\n"
+ "ldp x23, x22, [x12, #0x0]\n"
+ ".inst 0x455210a5 // ssublb z5.h, z5.b, z18.b\n"
+ ".inst 0x455210c6 // ssublb z6.h, z6.b, z18.b\n"
+ "ldp x21, x20, [x12, #0x10]\n"
+ ".inst 0x455210e7 // ssublb z7.h, z7.b, z18.b\n"
+ ".inst 0x45521108 // ssublb z8.h, z8.b, z18.b\n"
+ "ldr x19, [x12, #0x20]\n"
+ "ld1sb { z31.h }, p3/Z, [x23, x15]\n"
+ ".inst 0x454c13ff // ssublb z31.h, z31.b, z12.b\n"
+ "ld1sb { z30.h }, p3/Z, [x22, x15]\n"
+ "ld1sb { z29.h }, p3/Z, [x21, x15]\n"
+ ".inst 0x454c13de // ssublb z30.h, z30.b, z12.b\n"
+ "ld1sb { z28.h }, p3/Z, [x20, x15]\n"
+ "ld1sb { z27.h }, p3/Z, [x19, x15]\n"
+ ".inst 0x454c13bd // ssublb z29.h, z29.b, z12.b\n"
+ ".inst 0x454c139c // ssublb z28.h, z28.b, z12.b\n"
+ ".inst 0x454c137b // ssublb z27.h, z27.b, z12.b\n"
"b.any 1b\n"
:
: [offsetof_Params_bias] "I" (offsetof(Params, bias)), [offsetof_Params_inptrs] "I" (offsetof(Params, inptrs)), [offsetof_Params_n_channels] "I" (offsetof(Params, n_channels)), [offsetof_Params_outptrs] "I" (offsetof(Params, outptrs)), [offsetof_Params_requant] "I" (offsetof(Params, requant)), [offsetof_Params_requant_muls] "I" (offsetof(Params, requant_muls)), [offsetof_Params_requant_shifts] "I" (offsetof(Params, requant_shifts)), [offsetof_Params_weights] "I" (offsetof(Params, weights)), [offsetof_Requantize32_a_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [params] "r" (&params)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp
index 357c9f8399..ba8c1fdb8d 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -100,348 +100,356 @@ void sve_s8q_nhwc_3x3_s2_output2x2_mla_depthfirst_impl(
requant_muls, requant_shifts, outptrs);
__asm__ __volatile__(
- "mov x7, #0x0\n"
- "ldr x25, [%x[params], %[offsetof_Params_requant]]\n"
+ "ldr x5, [%x[params], %[offsetof_Params_n_channels]]\n"
"ptrue p4.b\n"
- "ldr x24, [%x[params], %[offsetof_Params_outptrs]]\n"
- "mov x23, x7\n"
- "add x21, x25, %[offsetof_Requantize32_a_offset]\n"
- "ldr x8, [%x[params], %[offsetof_Params_n_channels]]\n"
- "ldr x17, [%x[params], %[offsetof_Params_weights]]\n"
- "add x20, x25, %[offsetof_Requantize32_b_offset]\n"
- "add x22, x25, %[offsetof_Requantize32_c_offset]\n"
- "ld1rb { z23.b }, p4/Z, [x21]\n"
+ "ldr x6, [%x[params], %[offsetof_Params_weights]]\n"
+ "mov x7, #0x0\n"
+ "ldr x22, [%x[params], %[offsetof_Params_requant]]\n"
+ "mov x8, #0x0\n"
+ "ldr x17, [%x[params], %[offsetof_Params_requant_muls]]\n"
+ "add x16, %x[params], %[offsetof_Params_inptrs]\n"
+ "ldr x15, [%x[params], %[offsetof_Params_requant_shifts]]\n"
+ "add x19, x22, %[offsetof_Requantize32_a_offset]\n"
+ "ldr x21, [%x[params], %[offsetof_Params_outptrs]]\n"
+ "add x20, x22, %[offsetof_Requantize32_b_offset]\n"
+ "ld1rb { z19.b }, p4/Z, [x19]\n"
+ "add x19, x22, %[offsetof_Requantize32_c_offset]\n"
"ld1rb { z12.b }, p4/Z, [x20]\n"
- "add x21, x25, %[offsetof_Requantize32_minval]\n"
- "add x20, x25, %[offsetof_Requantize32_maxval]\n"
- "ld1rh { z14.h }, p4/Z, [x22]\n"
- "ld1rh { z16.h }, p4/Z, [x21]\n"
- "ld1rh { z15.h }, p4/Z, [x20]\n"
- "ldp x16, x15, [x24, #0x0]\n"
- "incw x23\n"
- "whilelt p3.h, x7, x8\n"
- "ldp x14, x13, [x24, #0x10]\n"
- "whilelt p2.s, x7, x8\n"
- "whilelt p1.s, x23, x8\n"
- "ldr x12, [%x[params], %[offsetof_Params_bias]]\n"
- "ld1sb { z0.h }, p4/Z, [x17]\n"
- "ld1sb { z1.h }, p4/Z, [x17, #1, MUL VL]\n"
- "add x11, %x[params], %[offsetof_Params_inptrs]\n"
- "mov x10, #0x0\n"
- "ld1sb { z2.h }, p4/Z, [x17, #2, MUL VL]\n"
- "ld1sb { z3.h }, p4/Z, [x17, #3, MUL VL]\n"
+ "add x20, x22, %[offsetof_Requantize32_minval]\n"
+ "ld1rw { z14.s }, p4/Z, [x19]\n"
+ "add x19, x22, %[offsetof_Requantize32_maxval]\n"
+ "ld1rw { z20.s }, p4/Z, [x20]\n"
+ "whilelt p3.h, x7, x5\n"
+ "ld1rw { z15.s }, p4/Z, [x19]\n"
+ "whilelt p2.s, x7, x5\n"
+ "ldp x14, x13, [x21, #0x0]\n"
+ "mov x19, x7\n"
+ "incw x19\n"
+ "ldp x12, x11, [x21, #0x10]\n"
+ "whilelt p1.s, x19, x5\n"
+ "ldr x19, [%x[params], %[offsetof_Params_bias]]\n"
+ "ld1w { z18.s }, p2/Z, [x19]\n"
+ "ld1w { z16.s }, p1/Z, [x19, #1, MUL VL]\n"
+ "uzp1 z13.s, z18.s, z16.s\n"
+ "addvl x19, x19, #2\n"
+ "str x19, [%x[params], %[offsetof_Params_bias]]\n"
+ "uzp2 z16.s, z18.s, z16.s\n"
+ "mov z11.d, z13.d\n"
+ "ld1sb { z0.h }, p4/Z, [x6]\n"
".inst 0x454c1000 // ssublb z0.h, z0.b, z12.b\n"
+ "mov z9.d, z16.d\n"
+ "ld1sb { z1.h }, p4/Z, [x6, #1, MUL VL]\n"
+ "mov z18.d, z13.d\n"
+ "ld1sb { z2.h }, p4/Z, [x6, #2, MUL VL]\n"
".inst 0x454c1021 // ssublb z1.h, z1.b, z12.b\n"
- "ld1sb { z4.h }, p4/Z, [x17, #4, MUL VL]\n"
- "ld1sb { z5.h }, p4/Z, [x17, #5, MUL VL]\n"
+ "mov z10.d, z16.d\n"
+ "ld1sb { z3.h }, p4/Z, [x6, #3, MUL VL]\n"
+ "mov z22.d, z13.d\n"
+ "ld1sb { z4.h }, p4/Z, [x6, #4, MUL VL]\n"
".inst 0x454c1042 // ssublb z2.h, z2.b, z12.b\n"
+ "mov z23.d, z16.d\n"
+ "ld1sb { z5.h }, p4/Z, [x6, #5, MUL VL]\n"
".inst 0x454c1063 // ssublb z3.h, z3.b, z12.b\n"
- "ld1sb { z6.h }, p4/Z, [x17, #6, MUL VL]\n"
- "ld1sb { z7.h }, p4/Z, [x17, #7, MUL VL]\n"
- "inch x17, ALL, MUL #8\n"
+ "ld1sb { z6.h }, p4/Z, [x6, #6, MUL VL]\n"
+ "ld1sb { z7.h }, p4/Z, [x6, #7, MUL VL]\n"
".inst 0x454c1084 // ssublb z4.h, z4.b, z12.b\n"
- "ld1w { z18.s }, p2/Z, [x12]\n"
- "ld1w { z8.s }, p1/Z, [x12, #1, MUL VL]\n"
- "uzp1 z13.s, z18.s, z8.s\n"
- "uzp2 z17.s, z18.s, z8.s\n"
- "ld1sb { z8.h }, p4/Z, [x17]\n"
- "ldp x9, x28, [x11, #0x0]\n"
- "addvl x12, x12, #2\n"
- "mov z9.d, z13.d\n"
- "ldp x25, x24, [x11, #0x10]\n"
- "ldp x23, x22, [x11, #0x20]\n"
- "mov z10.d, z17.d\n"
- "mov z11.d, z13.d\n"
- "ldp x21, x20, [x11, #0x30]\n"
- "ld1sb { z31.h }, p3/Z, [x9, x7]\n"
- "mov z22.d, z17.d\n"
- "mov z21.d, z13.d\n"
- "ld1sb { z30.h }, p3/Z, [x28, x7]\n"
- "ld1sb { z29.h }, p3/Z, [x25, x7]\n"
- "mov z18.d, z17.d\n"
+ "inch x6, ALL, MUL #8\n"
+ "ld1sb { z8.h }, p4/Z, [x6]\n"
+ "ldp x26, x25, [x16, #0x0]\n"
".inst 0x454c10a5 // ssublb z5.h, z5.b, z12.b\n"
- "ld1sb { z28.h }, p3/Z, [x24, x7]\n"
- "ld1sb { z27.h }, p3/Z, [x23, x7]\n"
".inst 0x454c10c6 // ssublb z6.h, z6.b, z12.b\n"
+ "ldp x24, x23, [x16, #0x10]\n"
".inst 0x454c10e7 // ssublb z7.h, z7.b, z12.b\n"
- "ld1sb { z26.h }, p3/Z, [x22, x7]\n"
- "ld1sb { z25.h }, p3/Z, [x21, x7]\n"
".inst 0x454c1108 // ssublb z8.h, z8.b, z12.b\n"
- ".inst 0x455713ff // ssublb z31.h, z31.b, z23.b\n"
- "ld1sb { z24.h }, p3/Z, [x20, x7]\n"
- "ldr x27, [%x[params], %[offsetof_Params_requant_muls]]\n"
- ".inst 0x455713de // ssublb z30.h, z30.b, z23.b\n"
- ".inst 0x455713bd // ssublb z29.h, z29.b, z23.b\n"
- "ldr x26, [%x[params], %[offsetof_Params_requant_shifts]]\n"
- "str x12, [%x[params], %[offsetof_Params_bias]]\n"
- ".inst 0x4557139c // ssublb z28.h, z28.b, z23.b\n"
- ".inst 0x4557137b // ssublb z27.h, z27.b, z23.b\n"
- ".inst 0x4557135a // ssublb z26.h, z26.b, z23.b\n"
- ".inst 0x45571339 // ssublb z25.h, z25.b, z23.b\n"
- ".inst 0x45571318 // ssublb z24.h, z24.b, z23.b\n"
+ "ldp x22, x21, [x16, #0x20]\n"
+ "ldp x20, x19, [x16, #0x30]\n"
+ "ld1sb { z31.h }, p3/Z, [x26, x7]\n"
+ ".inst 0x455313ff // ssublb z31.h, z31.b, z19.b\n"
+ "ld1sb { z30.h }, p3/Z, [x25, x7]\n"
+ "ld1sb { z29.h }, p3/Z, [x24, x7]\n"
+ ".inst 0x455313de // ssublb z30.h, z30.b, z19.b\n"
+ "ld1sb { z28.h }, p3/Z, [x23, x7]\n"
+ "ld1sb { z27.h }, p3/Z, [x22, x7]\n"
+ ".inst 0x455313bd // ssublb z29.h, z29.b, z19.b\n"
+ "ld1sb { z26.h }, p3/Z, [x21, x7]\n"
+ ".inst 0x4553139c // ssublb z28.h, z28.b, z19.b\n"
+ "ld1sb { z25.h }, p3/Z, [x20, x7]\n"
+ "ld1sb { z24.h }, p3/Z, [x19, x7]\n"
+ ".inst 0x4553137b // ssublb z27.h, z27.b, z19.b\n"
+ ".inst 0x4553135a // ssublb z26.h, z26.b, z19.b\n"
+ ".inst 0x45531339 // ssublb z25.h, z25.b, z19.b\n"
+ ".inst 0x45531318 // ssublb z24.h, z24.b, z19.b\n"
"1:" // Loop
".inst 0x448843ed // smlalb z13.s, p4/M, z31.h, z8.h\n"
- ".inst 0x448847f1 // smlalt z17.s, p4/M, z31.h, z8.h\n"
- "ldr x25, [x11, #0x40]\n"
- "ldr x24, [x11, #0x48]\n"
- ".inst 0x448643e9 // smlalb z9.s, p4/M, z31.h, z6.h\n"
- ".inst 0x448647ea // smlalt z10.s, p4/M, z31.h, z6.h\n"
- "ldr x22, [x11, #0x50]\n"
- "ldr x20, [x11, #0x58]\n"
+ "ldr x23, [x16, #0x40]\n"
+ "whilelt p0.h, x8, x5\n"
+ ".inst 0x448847f0 // smlalt z16.s, p4/M, z31.h, z8.h\n"
+ "ldr x22, [x16, #0x48]\n"
+ "inch x6\n"
+ ".inst 0x448643eb // smlalb z11.s, p4/M, z31.h, z6.h\n"
+ "ldr x21, [x16, #0x50]\n"
+ ".inst 0x448647e9 // smlalt z9.s, p4/M, z31.h, z6.h\n"
+ "ldr x20, [x16, #0x58]\n"
+ ".inst 0x448243f2 // smlalb z18.s, p4/M, z31.h, z2.h\n"
+ "ldr x19, [x16, #0x60]\n"
+ ".inst 0x448247ea // smlalt z10.s, p4/M, z31.h, z2.h\n"
+ "ldr x10, [x16, #0x68]\n"
+ ".inst 0x448043f6 // smlalb z22.s, p4/M, z31.h, z0.h\n"
+ "ldr x9, [x16, #0x70]\n"
+ ".inst 0x448047f7 // smlalt z23.s, p4/M, z31.h, z0.h\n"
+ "ldr x28, [x16, #0x78]\n"
".inst 0x448043cd // smlalb z13.s, p4/M, z30.h, z0.h\n"
- ".inst 0x448047d1 // smlalt z17.s, p4/M, z30.h, z0.h\n"
- "ldr x23, [x11, #0x78]\n"
- "ldr x21, [x11, #0x60]\n"
- ".inst 0x44814389 // smlalb z9.s, p4/M, z28.h, z1.h\n"
- ".inst 0x4481478a // smlalt z10.s, p4/M, z28.h, z1.h\n"
- "ld1sb { z28.h }, p3/Z, [x24, x7]\n"
- ".inst 0x4557139c // ssublb z28.h, z28.b, z23.b\n"
+ "ldr x27, [x16, #0x80]\n"
+ ".inst 0x448047d0 // smlalt z16.s, p4/M, z30.h, z0.h\n"
+ "ldr x26, [x16, #0x88]\n"
+ ".inst 0x4481438b // smlalb z11.s, p4/M, z28.h, z1.h\n"
+ "ldr x25, [x16, #0x90]\n"
+ ".inst 0x44814789 // smlalt z9.s, p4/M, z28.h, z1.h\n"
+ "ld1sb { z28.h }, p3/Z, [x22, x7]\n"
+ ".inst 0x4553139c // ssublb z28.h, z28.b, z19.b\n"
".inst 0x448143ad // smlalb z13.s, p4/M, z29.h, z1.h\n"
- ".inst 0x448147b1 // smlalt z17.s, p4/M, z29.h, z1.h\n"
- "ld1sb { z29.h }, p3/Z, [x25, x7]\n"
- ".inst 0x455713bd // ssublb z29.h, z29.b, z23.b\n"
- ".inst 0x44824369 // smlalb z9.s, p4/M, z27.h, z2.h\n"
- ".inst 0x4482476a // smlalt z10.s, p4/M, z27.h, z2.h\n"
- "ld1sb { z27.h }, p3/Z, [x22, x7]\n"
- ".inst 0x4557137b // ssublb z27.h, z27.b, z23.b\n"
+ "ldr x24, [x16, #0x98]\n"
+ ".inst 0x448147b0 // smlalt z16.s, p4/M, z29.h, z1.h\n"
+ "ld1sb { z29.h }, p3/Z, [x23, x7]\n"
+ ".inst 0x455313bd // ssublb z29.h, z29.b, z19.b\n"
+ ".inst 0x4482436b // smlalb z11.s, p4/M, z27.h, z2.h\n"
+ "ldr x23, [x16, #0xa0]\n"
+ ".inst 0x44824769 // smlalt z9.s, p4/M, z27.h, z2.h\n"
+ "ld1sb { z27.h }, p3/Z, [x21, x7]\n"
+ ".inst 0x4553137b // ssublb z27.h, z27.b, z19.b\n"
".inst 0x4483434d // smlalb z13.s, p4/M, z26.h, z3.h\n"
- ".inst 0x44834751 // smlalt z17.s, p4/M, z26.h, z3.h\n"
+ "ldr x22, [x16, #0xa8]\n"
+ ".inst 0x44834750 // smlalt z16.s, p4/M, z26.h, z3.h\n"
"ld1sb { z26.h }, p3/Z, [x20, x7]\n"
- ".inst 0x4557135a // ssublb z26.h, z26.b, z23.b\n"
- ".inst 0x44804309 // smlalb z9.s, p4/M, z24.h, z0.h\n"
- ".inst 0x4480470a // smlalt z10.s, p4/M, z24.h, z0.h\n"
- "ldr x22, [x11, #0x80]\n"
- "ldr x20, [x11, #0x68]\n"
+ ".inst 0x4553135a // ssublb z26.h, z26.b, z19.b\n"
".inst 0x4484432d // smlalb z13.s, p4/M, z25.h, z4.h\n"
- ".inst 0x44844731 // smlalt z17.s, p4/M, z25.h, z4.h\n"
- "ld1sb { z25.h }, p3/Z, [x21, x7]\n"
- ".inst 0x45571339 // ssublb z25.h, z25.b, z23.b\n"
- ".inst 0x448443a9 // smlalb z9.s, p4/M, z29.h, z4.h\n"
- ".inst 0x448447aa // smlalt z10.s, p4/M, z29.h, z4.h\n"
- "ldr x21, [x11, #0x88]\n"
- "ld1sb { z29.h }, p3/Z, [x20, x7]\n"
+ "ldr x21, [x16, #0xb0]\n"
+ ".inst 0x44844730 // smlalt z16.s, p4/M, z25.h, z4.h\n"
+ "ld1sb { z25.h }, p3/Z, [x19, x7]\n"
+ ".inst 0x45531339 // ssublb z25.h, z25.b, z19.b\n"
".inst 0x4482430d // smlalb z13.s, p4/M, z24.h, z2.h\n"
- ".inst 0x44824711 // smlalt z17.s, p4/M, z24.h, z2.h\n"
- "ldr x20, [x11, #0x70]\n"
- ".inst 0x455713bd // ssublb z29.h, z29.b, z23.b\n"
- ".inst 0x44854389 // smlalb z9.s, p4/M, z28.h, z5.h\n"
- ".inst 0x4485478a // smlalt z10.s, p4/M, z28.h, z5.h\n"
- "ld1sb { z28.h }, p3/Z, [x22, x7]\n"
- ".inst 0x4557139c // ssublb z28.h, z28.b, z23.b\n"
- ".inst 0x448243eb // smlalb z11.s, p4/M, z31.h, z2.h\n"
- ".inst 0x448247f6 // smlalt z22.s, p4/M, z31.h, z2.h\n"
- "ldr x25, [x11, #0x98]\n"
- "ld1sb { z24.h }, p3/Z, [x20, x7]\n"
+ "ldr x20, [x16, #0xb8]\n"
+ ".inst 0x44824710 // smlalt z16.s, p4/M, z24.h, z2.h\n"
+ "ldr x19, [x16, #0xc0]\n"
+ ".inst 0x4480430b // smlalb z11.s, p4/M, z24.h, z0.h\n"
+ "ld1w { z21.s }, p2/Z, [x17]\n"
+ ".inst 0x44804709 // smlalt z9.s, p4/M, z24.h, z0.h\n"
+ "ld1sb { z24.h }, p3/Z, [x9, x7]\n"
+ ".inst 0x45531318 // ssublb z24.h, z24.b, z19.b\n"
+ ".inst 0x448443ab // smlalb z11.s, p4/M, z29.h, z4.h\n"
+ "ld1w { z17.s }, p1/Z, [x17, #1, MUL VL]\n"
+ ".inst 0x448447a9 // smlalt z9.s, p4/M, z29.h, z4.h\n"
+ "ld1sb { z29.h }, p3/Z, [x10, x7]\n"
+ "addvl x17, x17, #2\n"
".inst 0x4485436d // smlalb z13.s, p4/M, z27.h, z5.h\n"
- ".inst 0x44854771 // smlalt z17.s, p4/M, z27.h, z5.h\n"
- ".inst 0x45571318 // ssublb z24.h, z24.b, z23.b\n"
- "ldr x24, [x11, #0x90]\n"
- ".inst 0x44834369 // smlalb z9.s, p4/M, z27.h, z3.h\n"
- ".inst 0x4483476a // smlalt z10.s, p4/M, z27.h, z3.h\n"
- "ld1sb { z27.h }, p3/Z, [x23, x7]\n"
- ".inst 0x4557137b // ssublb z27.h, z27.b, z23.b\n"
- ".inst 0x448043f5 // smlalb z21.s, p4/M, z31.h, z0.h\n"
- ".inst 0x4483434b // smlalb z11.s, p4/M, z26.h, z3.h\n"
- "ldr x23, [x11, #0xa8]\n"
- "ldr x20, [x11, #0xa0]\n"
- ".inst 0x44834756 // smlalt z22.s, p4/M, z26.h, z3.h\n"
- ".inst 0x448047f2 // smlalt z18.s, p4/M, z31.h, z0.h\n"
- "ld1sb { z26.h }, p3/Z, [x21, x7]\n"
- ".inst 0x4557135a // ssublb z26.h, z26.b, z23.b\n"
- ".inst 0x44844375 // smlalb z21.s, p4/M, z27.h, z4.h\n"
- ".inst 0x4480432b // smlalb z11.s, p4/M, z25.h, z0.h\n"
- "ldr x22, [x11, #0xb0]\n"
- "ldr x21, [x11, #0xb8]\n"
- ".inst 0x44804736 // smlalt z22.s, p4/M, z25.h, z0.h\n"
- ".inst 0x44844772 // smlalt z18.s, p4/M, z27.h, z4.h\n"
- "ld1sb { z27.h }, p3/Z, [x20, x7]\n"
- ".inst 0x4557137b // ssublb z27.h, z27.b, z23.b\n"
- ".inst 0x44814395 // smlalb z21.s, p4/M, z28.h, z1.h\n"
+ ".inst 0x455313bd // ssublb z29.h, z29.b, z19.b\n"
+ "uzp1 z30.s, z21.s, z17.s\n"
+ "uzp2 z31.s, z21.s, z17.s\n"
+ "ld1w { z21.s }, p2/Z, [x15]\n"
+ ".inst 0x4485438b // smlalb z11.s, p4/M, z28.h, z5.h\n"
+ "ld1w { z17.s }, p1/Z, [x15, #1, MUL VL]\n"
+ "addvl x15, x15, #2\n"
+ ".inst 0x44854789 // smlalt z9.s, p4/M, z28.h, z5.h\n"
+ "ld1sb { z28.h }, p3/Z, [x27, x7]\n"
+ ".inst 0x4553139c // ssublb z28.h, z28.b, z19.b\n"
+ ".inst 0x44854770 // smlalt z16.s, p4/M, z27.h, z5.h\n"
+ ".inst 0x4483436b // smlalb z11.s, p4/M, z27.h, z3.h\n"
+ ".inst 0x44834769 // smlalt z9.s, p4/M, z27.h, z3.h\n"
+ "ld1sb { z27.h }, p3/Z, [x28, x7]\n"
+ ".inst 0x4553137b // ssublb z27.h, z27.b, z19.b\n"
+ ".inst 0x44834352 // smlalb z18.s, p4/M, z26.h, z3.h\n"
+ ".inst 0x4483474a // smlalt z10.s, p4/M, z26.h, z3.h\n"
+ "ld1sb { z26.h }, p3/Z, [x26, x7]\n"
+ ".inst 0x4553135a // ssublb z26.h, z26.b, z19.b\n"
".inst 0x4486432d // smlalb z13.s, p4/M, z25.h, z6.h\n"
- "ldr x20, [x11, #0xc0]\n"
- "ld1w { z31.s }, p2/Z, [x27]\n"
- ".inst 0x44864731 // smlalt z17.s, p4/M, z25.h, z6.h\n"
- ".inst 0x448443ab // smlalb z11.s, p4/M, z29.h, z4.h\n"
- "ld1sb { z25.h }, p3/Z, [x24, x7]\n"
- ".inst 0x45571339 // ssublb z25.h, z25.b, z23.b\n"
- ".inst 0x448447b6 // smlalt z22.s, p4/M, z29.h, z4.h\n"
- "ld1sb { z29.h }, p3/Z, [x25, x7]\n"
- ".inst 0x44814792 // smlalt z18.s, p4/M, z28.h, z1.h\n"
- ".inst 0x455713bd // ssublb z29.h, z29.b, z23.b\n"
- ".inst 0x44854355 // smlalb z21.s, p4/M, z26.h, z5.h\n"
+ ".inst 0x44864730 // smlalt z16.s, p4/M, z25.h, z6.h\n"
+ ".inst 0x44804332 // smlalb z18.s, p4/M, z25.h, z0.h\n"
+ ".inst 0x4480472a // smlalt z10.s, p4/M, z25.h, z0.h\n"
+ "ld1sb { z25.h }, p3/Z, [x25, x7]\n"
+ ".inst 0x45531339 // ssublb z25.h, z25.b, z19.b\n"
+ "uzp1 z0.s, z21.s, z17.s\n"
+ "uzp2 z21.s, z21.s, z17.s\n"
+ ".inst 0x448443b2 // smlalb z18.s, p4/M, z29.h, z4.h\n"
+ ".inst 0x448447aa // smlalt z10.s, p4/M, z29.h, z4.h\n"
+ "ld1sb { z29.h }, p3/Z, [x24, x7]\n"
+ ".inst 0x455313bd // ssublb z29.h, z29.b, z19.b\n"
".inst 0x4487430d // smlalb z13.s, p4/M, z24.h, z7.h\n"
- "ld1w { z20.s }, p1/Z, [x27, #1, MUL VL]\n"
- "uzp1 z19.s, z31.s, z20.s\n"
- ".inst 0x44874711 // smlalt z17.s, p4/M, z24.h, z7.h\n"
- ".inst 0x4481430b // smlalb z11.s, p4/M, z24.h, z1.h\n"
- "uzp2 z30.s, z31.s, z20.s\n"
- "ld1w { z31.s }, p2/Z, [x26]\n"
- ".inst 0x44814716 // smlalt z22.s, p4/M, z24.h, z1.h\n"
- "ld1sb { z24.h }, p3/Z, [x23, x7]\n"
- ".inst 0x44854752 // smlalt z18.s, p4/M, z26.h, z5.h\n"
- ".inst 0x45571318 // ssublb z24.h, z24.b, z23.b\n"
- ".inst 0x448243b5 // smlalb z21.s, p4/M, z29.h, z2.h\n"
- "ld1sb { z26.h }, p3/Z, [x22, x7]\n"
- ".inst 0x448247b2 // smlalt z18.s, p4/M, z29.h, z2.h\n"
- ".inst 0x4557135a // ssublb z26.h, z26.b, z23.b\n"
- ".inst 0x4486432b // smlalb z11.s, p4/M, z25.h, z6.h\n"
- ".inst 0x44834315 // smlalb z21.s, p4/M, z24.h, z3.h\n"
- "ld1w { z20.s }, p1/Z, [x26, #1, MUL VL]\n"
- "uzp1 z1.s, z31.s, z20.s\n"
- ".inst 0x44874389 // smlalb z9.s, p4/M, z28.h, z7.h\n"
- ".inst 0x4487478a // smlalt z10.s, p4/M, z28.h, z7.h\n"
- ".inst 0x04b375ad // sqrdmulh z13.s, z13.s, z19.s\n"
- "whilelt p0.h, x10, x8\n"
- ".inst 0x44864736 // smlalt z22.s, p4/M, z25.h, z6.h\n"
- "ld1sb { z25.h }, p3/Z, [x21, x7]\n"
- ".inst 0x44834712 // smlalt z18.s, p4/M, z24.h, z3.h\n"
- ".inst 0x45571339 // ssublb z25.h, z25.b, z23.b\n"
- ".inst 0x4487436b // smlalb z11.s, p4/M, z27.h, z7.h\n"
- ".inst 0x44874355 // smlalb z21.s, p4/M, z26.h, z7.h\n"
- "uzp2 z31.s, z31.s, z20.s\n"
- "inch x17\n"
- ".inst 0x448843a9 // smlalb z9.s, p4/M, z29.h, z8.h\n"
- ".inst 0x448847aa // smlalt z10.s, p4/M, z29.h, z8.h\n"
- "ld1sb { z29.h }, p3/Z, [x20, x7]\n"
- ".inst 0x455713bd // ssublb z29.h, z29.b, z23.b\n"
- ".inst 0x44874776 // smlalt z22.s, p4/M, z27.h, z7.h\n"
- ".inst 0x44874752 // smlalt z18.s, p4/M, z26.h, z7.h\n"
- "and z0.d, z13.d, z1.d\n"
+ ".inst 0x44874710 // smlalt z16.s, p4/M, z24.h, z7.h\n"
+ ".inst 0x44814312 // smlalb z18.s, p4/M, z24.h, z1.h\n"
+ ".inst 0x4481470a // smlalt z10.s, p4/M, z24.h, z1.h\n"
+ "ld1sb { z24.h }, p3/Z, [x22, x7]\n"
+ ".inst 0x45531318 // ssublb z24.h, z24.b, z19.b\n"
+ ".inst 0x04be75ad // sqrdmulh z13.s, z13.s, z30.s\n"
+ ".inst 0x04bf7610 // sqrdmulh z16.s, z16.s, z31.s\n"
+ ".inst 0x44844376 // smlalb z22.s, p4/M, z27.h, z4.h\n"
+ ".inst 0x44844777 // smlalt z23.s, p4/M, z27.h, z4.h\n"
+ "ld1sb { z27.h }, p3/Z, [x23, x7]\n"
+ ".inst 0x4553137b // ssublb z27.h, z27.b, z19.b\n"
+ "and z4.d, z13.d, z0.d\n"
+ "and z17.d, z16.d, z21.d\n"
+ "asr z4.s, z4.s, #0x1f\n"
+ ".inst 0x4487438b // smlalb z11.s, p4/M, z28.h, z7.h\n"
+ ".inst 0x44874789 // smlalt z9.s, p4/M, z28.h, z7.h\n"
+ "asr z17.s, z17.s, #0x1f\n"
+ ".inst 0x44814396 // smlalb z22.s, p4/M, z28.h, z1.h\n"
+ ".inst 0x44814797 // smlalt z23.s, p4/M, z28.h, z1.h\n"
+ ".inst 0x44864332 // smlalb z18.s, p4/M, z25.h, z6.h\n"
+ ".inst 0x4486472a // smlalt z10.s, p4/M, z25.h, z6.h\n"
+ "ld1sb { z25.h }, p3/Z, [x20, x7]\n"
+ ".inst 0x45531339 // ssublb z25.h, z25.b, z19.b\n"
+ "sqadd z13.s, z13.s, z4.s\n"
+ "sqadd z16.s, z16.s, z17.s\n"
+ ".inst 0x44854356 // smlalb z22.s, p4/M, z26.h, z5.h\n"
+ ".inst 0x44854757 // smlalt z23.s, p4/M, z26.h, z5.h\n"
+ "ld1sb { z26.h }, p3/Z, [x21, x7]\n"
+ ".inst 0x4553135a // ssublb z26.h, z26.b, z19.b\n"
+ ".inst 0x448843ab // smlalb z11.s, p4/M, z29.h, z8.h\n"
+ ".inst 0x448847a9 // smlalt z9.s, p4/M, z29.h, z8.h\n"
+ ".inst 0x448243b6 // smlalb z22.s, p4/M, z29.h, z2.h\n"
+ ".inst 0x448247b7 // smlalt z23.s, p4/M, z29.h, z2.h\n"
+ "ld1sb { z29.h }, p3/Z, [x19, x7]\n"
"inch x7\n"
- ".inst 0x4485430b // smlalb z11.s, p4/M, z24.h, z5.h\n"
- ".inst 0x44864335 // smlalb z21.s, p4/M, z25.h, z6.h\n"
- ".inst 0x04be7631 // sqrdmulh z17.s, z17.s, z30.s\n"
- "mov x20, x7\n"
- ".inst 0x44854716 // smlalt z22.s, p4/M, z24.h, z5.h\n"
- ".inst 0x44864732 // smlalt z18.s, p4/M, z25.h, z6.h\n"
- "asr z0.s, z0.s, #0x1f\n"
- "incw x20\n"
- ".inst 0x4488432b // smlalb z11.s, p4/M, z25.h, z8.h\n"
- ".inst 0x448843b5 // smlalb z21.s, p4/M, z29.h, z8.h\n"
- "and z20.d, z17.d, z31.d\n"
- "whilelt p2.s, x7, x8\n"
- ".inst 0x44884736 // smlalt z22.s, p4/M, z25.h, z8.h\n"
- ".inst 0x448847b2 // smlalt z18.s, p4/M, z29.h, z8.h\n"
- ".inst 0x04b37529 // sqrdmulh z9.s, z9.s, z19.s\n"
- "whilelt p1.s, x20, x8\n"
- ".inst 0x04b3756b // sqrdmulh z11.s, z11.s, z19.s\n"
- ".inst 0x04b376b5 // sqrdmulh z21.s, z21.s, z19.s\n"
- "ldr x12, [%x[params], %[offsetof_Params_bias]]\n"
- "whilelt p3.h, x7, x8\n"
- "sqadd z13.s, z13.s, z0.s\n"
- "asr z20.s, z20.s, #0x1f\n"
- ".inst 0x4482902d // srshl z13.s, p4/M, z13.s, z1.s\n"
- "addvl x27, x27, #2\n"
- "and z19.d, z9.d, z1.d\n"
- ".inst 0x04be754a // sqrdmulh z10.s, z10.s, z30.s\n"
- "addvl x26, x26, #2\n"
- "and z2.d, z11.d, z1.d\n"
- ".inst 0x04be76d6 // sqrdmulh z22.s, z22.s, z30.s\n"
- "and z0.d, z21.d, z1.d\n"
+ ".inst 0x04be756b // sqrdmulh z11.s, z11.s, z30.s\n"
+ "whilelt p2.s, x7, x5\n"
+ ".inst 0x04bf7529 // sqrdmulh z9.s, z9.s, z31.s\n"
+ "mov x19, x7\n"
+ ".inst 0x44874372 // smlalb z18.s, p4/M, z27.h, z7.h\n"
+ ".inst 0x455313bd // ssublb z29.h, z29.b, z19.b\n"
+ ".inst 0x4487476a // smlalt z10.s, p4/M, z27.h, z7.h\n"
+ "incw x19\n"
+ ".inst 0x44834316 // smlalb z22.s, p4/M, z24.h, z3.h\n"
+ "whilelt p1.s, x19, x5\n"
+ "and z1.d, z11.d, z0.d\n"
+ "whilelt p3.h, x7, x5\n"
+ "and z17.d, z9.d, z21.d\n"
+ "asr z1.s, z1.s, #0x1f\n"
+ ".inst 0x44854312 // smlalb z18.s, p4/M, z24.h, z5.h\n"
+ ".inst 0x4485470a // smlalt z10.s, p4/M, z24.h, z5.h\n"
+ "asr z17.s, z17.s, #0x1f\n"
+ ".inst 0x44834717 // smlalt z23.s, p4/M, z24.h, z3.h\n"
+ ".inst 0x44874356 // smlalb z22.s, p4/M, z26.h, z7.h\n"
+ ".inst 0x4482900d // srshl z13.s, p4/M, z13.s, z0.s\n"
+ ".inst 0x44884332 // smlalb z18.s, p4/M, z25.h, z8.h\n"
+ "sqadd z11.s, z11.s, z1.s\n"
+ "sqadd z9.s, z9.s, z17.s\n"
+ "add z13.s, z13.s, z14.s\n"
".inst 0x04be7652 // sqrdmulh z18.s, z18.s, z30.s\n"
- "sqadd z17.s, z17.s, z20.s\n"
- "asr z19.s, z19.s, #0x1f\n"
- ".inst 0x448293f1 // srshl z17.s, p4/M, z17.s, z31.s\n"
- "and z3.d, z10.d, z31.d\n"
+ ".inst 0x44874757 // smlalt z23.s, p4/M, z26.h, z7.h\n"
+ ".inst 0x4488472a // smlalt z10.s, p4/M, z25.h, z8.h\n"
+ ".inst 0x44864336 // smlalb z22.s, p4/M, z25.h, z6.h\n"
+ "and z17.d, z18.d, z0.d\n"
+ "asr z17.s, z17.s, #0x1f\n"
+ ".inst 0x04bf754a // sqrdmulh z10.s, z10.s, z31.s\n"
+ ".inst 0x44864737 // smlalt z23.s, p4/M, z25.h, z6.h\n"
+ ".inst 0x448843b6 // smlalb z22.s, p4/M, z29.h, z8.h\n"
+ "smin z13.s, p4/M, z13.s, z15.s\n"
+ ".inst 0x448292b0 // srshl z16.s, p4/M, z16.s, z21.s\n"
+ "and z1.d, z10.d, z21.d\n"
+ "asr z1.s, z1.s, #0x1f\n"
+ "add z16.s, z16.s, z14.s\n"
+ "sqadd z18.s, z18.s, z17.s\n"
+ ".inst 0x04be76d6 // sqrdmulh z22.s, z22.s, z30.s\n"
+ ".inst 0x448847b7 // smlalt z23.s, p4/M, z29.h, z8.h\n"
+ "smax z13.s, p4/M, z13.s, z20.s\n"
+ "smin z16.s, p4/M, z16.s, z15.s\n"
+ "sqadd z10.s, z10.s, z1.s\n"
+ "and z2.d, z22.d, z0.d\n"
"asr z2.s, z2.s, #0x1f\n"
- "and z26.d, z22.d, z31.d\n"
- "asr z0.s, z0.s, #0x1f\n"
- "and z20.d, z18.d, z31.d\n"
- "sqadd z9.s, z9.s, z19.s\n"
- ".inst 0x44829029 // srshl z9.s, p4/M, z9.s, z1.s\n"
- "asr z3.s, z3.s, #0x1f\n"
- "sqadd z11.s, z11.s, z2.s\n"
- ".inst 0x4482902b // srshl z11.s, p4/M, z11.s, z1.s\n"
- "asr z26.s, z26.s, #0x1f\n"
- "sqadd z21.s, z21.s, z0.s\n"
- ".inst 0x44829035 // srshl z21.s, p4/M, z21.s, z1.s\n"
- "asr z20.s, z20.s, #0x1f\n"
- "sqadd z10.s, z10.s, z3.s\n"
- ".inst 0x448293ea // srshl z10.s, p4/M, z10.s, z31.s\n"
- "sqadd z22.s, z22.s, z26.s\n"
- "sqadd z18.s, z18.s, z20.s\n"
- ".inst 0x448293f6 // srshl z22.s, p4/M, z22.s, z31.s\n"
- ".inst 0x448293f2 // srshl z18.s, p4/M, z18.s, z31.s\n"
- ".inst 0x453041ad // sqxtnb z13.h, z13.s\n"
- ".inst 0x45304129 // sqxtnb z9.h, z9.s\n"
- ".inst 0x4530416b // sqxtnb z11.h, z11.s\n"
- ".inst 0x453042b5 // sqxtnb z21.h, z21.s\n"
- ".inst 0x4530462d // sqxtnt z13.h, z17.s\n"
- ".inst 0x45304549 // sqxtnt z9.h, z10.s\n"
- ".inst 0x453046cb // sqxtnt z11.h, z22.s\n"
- ".inst 0x45304655 // sqxtnt z21.h, z18.s\n"
- "sqadd z13.h, z13.h, z14.h\n"
- "sqadd z9.h, z9.h, z14.h\n"
- "smax z13.h, p4/M, z13.h, z16.h\n"
- "smax z9.h, p4/M, z9.h, z16.h\n"
- "sqadd z11.h, z11.h, z14.h\n"
- "sqadd z21.h, z21.h, z14.h\n"
- "smax z11.h, p4/M, z11.h, z16.h\n"
- "smax z21.h, p4/M, z21.h, z16.h\n"
- "smin z13.h, p4/M, z13.h, z15.h\n"
- "smin z9.h, p4/M, z9.h, z15.h\n"
- "st1b { z13.h }, p0, [x16, x10]\n"
- "smin z11.h, p4/M, z11.h, z15.h\n"
- "smin z21.h, p4/M, z21.h, z15.h\n"
- "st1b { z9.h }, p0, [x15, x10]\n"
- "st1b { z11.h }, p0, [x14, x10]\n"
- "st1b { z21.h }, p0, [x13, x10]\n"
- "ld1sb { z0.h }, p4/Z, [x17]\n"
- "ld1sb { z1.h }, p4/Z, [x17, #1, MUL VL]\n"
- "inch x10\n"
- "ld1sb { z2.h }, p4/Z, [x17, #2, MUL VL]\n"
- "ld1sb { z3.h }, p4/Z, [x17, #3, MUL VL]\n"
+ ".inst 0x04bf76f7 // sqrdmulh z23.s, z23.s, z31.s\n"
+ "smax z16.s, p4/M, z16.s, z20.s\n"
+ ".inst 0x4482900b // srshl z11.s, p4/M, z11.s, z0.s\n"
+ ".inst 0x448292a9 // srshl z9.s, p4/M, z9.s, z21.s\n"
+ ".inst 0x44829012 // srshl z18.s, p4/M, z18.s, z0.s\n"
+ "trn1 z13.h, z13.h, z16.h\n"
+ "st1b { z13.h }, p0, [x14, x8]\n"
+ "add z11.s, z11.s, z14.s\n"
+ "add z9.s, z9.s, z14.s\n"
+ "add z18.s, z18.s, z14.s\n"
+ "sqadd z22.s, z22.s, z2.s\n"
+ "and z16.d, z23.d, z21.d\n"
+ "asr z16.s, z16.s, #0x1f\n"
+ "smin z11.s, p4/M, z11.s, z15.s\n"
+ "smin z9.s, p4/M, z9.s, z15.s\n"
+ "smin z18.s, p4/M, z18.s, z15.s\n"
+ ".inst 0x448292aa // srshl z10.s, p4/M, z10.s, z21.s\n"
+ ".inst 0x44829016 // srshl z22.s, p4/M, z22.s, z0.s\n"
+ "smax z11.s, p4/M, z11.s, z20.s\n"
+ "sqadd z23.s, z23.s, z16.s\n"
+ "add z10.s, z10.s, z14.s\n"
+ "add z22.s, z22.s, z14.s\n"
+ "smax z9.s, p4/M, z9.s, z20.s\n"
+ "smax z18.s, p4/M, z18.s, z20.s\n"
+ "smin z10.s, p4/M, z10.s, z15.s\n"
+ "smin z22.s, p4/M, z22.s, z15.s\n"
+ "trn1 z11.h, z11.h, z9.h\n"
+ "st1b { z11.h }, p0, [x13, x8]\n"
+ "smax z10.s, p4/M, z10.s, z20.s\n"
+ ".inst 0x448292b7 // srshl z23.s, p4/M, z23.s, z21.s\n"
+ "smax z22.s, p4/M, z22.s, z20.s\n"
+ "trn1 z18.h, z18.h, z10.h\n"
+ "st1b { z18.h }, p0, [x12, x8]\n"
+ "add z23.s, z23.s, z14.s\n"
+ "smin z23.s, p4/M, z23.s, z15.s\n"
+ "smax z23.s, p4/M, z23.s, z20.s\n"
+ "trn1 z22.h, z22.h, z23.h\n"
+ "st1b { z22.h }, p0, [x11, x8]\n"
+ "inch x8\n"
+ "ldr x19, [%x[params], %[offsetof_Params_bias]]\n"
+ "ld1w { z18.s }, p2/Z, [x19]\n"
+ "ld1w { z16.s }, p1/Z, [x19, #1, MUL VL]\n"
+ "uzp1 z13.s, z18.s, z16.s\n"
+ "addvl x19, x19, #2\n"
+ "str x19, [%x[params], %[offsetof_Params_bias]]\n"
+ "uzp2 z16.s, z18.s, z16.s\n"
+ "mov z11.d, z13.d\n"
+ "ld1sb { z0.h }, p4/Z, [x6]\n"
".inst 0x454c1000 // ssublb z0.h, z0.b, z12.b\n"
+ "mov z9.d, z16.d\n"
+ "ld1sb { z1.h }, p4/Z, [x6, #1, MUL VL]\n"
+ "mov z18.d, z13.d\n"
+ "ld1sb { z2.h }, p4/Z, [x6, #2, MUL VL]\n"
".inst 0x454c1021 // ssublb z1.h, z1.b, z12.b\n"
- "ld1sb { z4.h }, p4/Z, [x17, #4, MUL VL]\n"
- "ld1sb { z5.h }, p4/Z, [x17, #5, MUL VL]\n"
+ "mov z10.d, z16.d\n"
+ "ld1sb { z3.h }, p4/Z, [x6, #3, MUL VL]\n"
+ "mov z22.d, z13.d\n"
+ "ld1sb { z4.h }, p4/Z, [x6, #4, MUL VL]\n"
".inst 0x454c1042 // ssublb z2.h, z2.b, z12.b\n"
+ "mov z23.d, z16.d\n"
+ "ld1sb { z5.h }, p4/Z, [x6, #5, MUL VL]\n"
".inst 0x454c1063 // ssublb z3.h, z3.b, z12.b\n"
- "ld1sb { z6.h }, p4/Z, [x17, #6, MUL VL]\n"
- "ld1sb { z7.h }, p4/Z, [x17, #7, MUL VL]\n"
- "inch x17, ALL, MUL #8\n"
+ "ld1sb { z6.h }, p4/Z, [x6, #6, MUL VL]\n"
+ "ld1sb { z7.h }, p4/Z, [x6, #7, MUL VL]\n"
".inst 0x454c1084 // ssublb z4.h, z4.b, z12.b\n"
- "ld1w { z18.s }, p2/Z, [x12]\n"
- "ld1w { z8.s }, p1/Z, [x12, #1, MUL VL]\n"
- "uzp1 z13.s, z18.s, z8.s\n"
- "uzp2 z17.s, z18.s, z8.s\n"
- "ld1sb { z8.h }, p4/Z, [x17]\n"
- "ldp x9, x28, [x11, #0x0]\n"
- "addvl x12, x12, #2\n"
- "str x12, [%x[params], %[offsetof_Params_bias]]\n"
- "ldp x25, x24, [x11, #0x10]\n"
- "ldp x23, x22, [x11, #0x20]\n"
- "mov z9.d, z13.d\n"
- "mov z10.d, z17.d\n"
- "ldp x21, x20, [x11, #0x30]\n"
- "ld1sb { z31.h }, p3/Z, [x9, x7]\n"
- "mov z11.d, z13.d\n"
- "mov z22.d, z17.d\n"
- "ld1sb { z30.h }, p3/Z, [x28, x7]\n"
- "ld1sb { z29.h }, p3/Z, [x25, x7]\n"
- "mov z21.d, z13.d\n"
- "mov z18.d, z17.d\n"
- "ld1sb { z28.h }, p3/Z, [x24, x7]\n"
- "ld1sb { z27.h }, p3/Z, [x23, x7]\n"
+ "inch x6, ALL, MUL #8\n"
+ "ld1sb { z8.h }, p4/Z, [x6]\n"
+ "ldp x26, x25, [x16, #0x0]\n"
".inst 0x454c10a5 // ssublb z5.h, z5.b, z12.b\n"
".inst 0x454c10c6 // ssublb z6.h, z6.b, z12.b\n"
- "ld1sb { z26.h }, p3/Z, [x22, x7]\n"
- "ld1sb { z25.h }, p3/Z, [x21, x7]\n"
+ "ldp x24, x23, [x16, #0x10]\n"
".inst 0x454c10e7 // ssublb z7.h, z7.b, z12.b\n"
".inst 0x454c1108 // ssublb z8.h, z8.b, z12.b\n"
- "ld1sb { z24.h }, p3/Z, [x20, x7]\n"
- ".inst 0x455713ff // ssublb z31.h, z31.b, z23.b\n"
- ".inst 0x455713de // ssublb z30.h, z30.b, z23.b\n"
- ".inst 0x455713bd // ssublb z29.h, z29.b, z23.b\n"
- ".inst 0x4557139c // ssublb z28.h, z28.b, z23.b\n"
- ".inst 0x4557137b // ssublb z27.h, z27.b, z23.b\n"
- ".inst 0x4557135a // ssublb z26.h, z26.b, z23.b\n"
- ".inst 0x45571339 // ssublb z25.h, z25.b, z23.b\n"
- ".inst 0x45571318 // ssublb z24.h, z24.b, z23.b\n"
+ "ldp x22, x21, [x16, #0x20]\n"
+ "ldp x20, x19, [x16, #0x30]\n"
+ "ld1sb { z31.h }, p3/Z, [x26, x7]\n"
+ ".inst 0x455313ff // ssublb z31.h, z31.b, z19.b\n"
+ "ld1sb { z30.h }, p3/Z, [x25, x7]\n"
+ "ld1sb { z29.h }, p3/Z, [x24, x7]\n"
+ ".inst 0x455313de // ssublb z30.h, z30.b, z19.b\n"
+ "ld1sb { z28.h }, p3/Z, [x23, x7]\n"
+ "ld1sb { z27.h }, p3/Z, [x22, x7]\n"
+ ".inst 0x455313bd // ssublb z29.h, z29.b, z19.b\n"
+ "ld1sb { z26.h }, p3/Z, [x21, x7]\n"
+ ".inst 0x4553139c // ssublb z28.h, z28.b, z19.b\n"
+ "ld1sb { z25.h }, p3/Z, [x20, x7]\n"
+ "ld1sb { z24.h }, p3/Z, [x19, x7]\n"
+ ".inst 0x4553137b // ssublb z27.h, z27.b, z19.b\n"
+ ".inst 0x4553135a // ssublb z26.h, z26.b, z19.b\n"
+ ".inst 0x45531339 // ssublb z25.h, z25.b, z19.b\n"
+ ".inst 0x45531318 // ssublb z24.h, z24.b, z19.b\n"
"b.any 1b\n"
:
: [offsetof_Params_bias] "I" (offsetof(Params, bias)), [offsetof_Params_inptrs] "I" (offsetof(Params, inptrs)), [offsetof_Params_n_channels] "I" (offsetof(Params, n_channels)), [offsetof_Params_outptrs] "I" (offsetof(Params, outptrs)), [offsetof_Params_requant] "I" (offsetof(Params, requant)), [offsetof_Params_requant_muls] "I" (offsetof(Params, requant_muls)), [offsetof_Params_requant_shifts] "I" (offsetof(Params, requant_shifts)), [offsetof_Params_weights] "I" (offsetof(Params, weights)), [offsetof_Requantize32_a_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [params] "r" (&params)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp
index d8f4d8d199..4733c89199 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -111,538 +111,546 @@ void sve_s8q_nhwc_5x5_s1_output2x2_mla_depthfirst_impl(
requant_muls, requant_shifts, outptrs);
__asm__ __volatile__(
- "mov x0, #0x0\n"
- "mov x24, x0\n"
- "ldr x23, [%x[params], %[offsetof_Params_requant]]\n"
- "ldr x1, [%x[params], %[offsetof_Params_n_channels]]\n"
+ "ldr x0, [%x[params], %[offsetof_Params_n_channels]]\n"
"ptrue p4.b\n"
- "ldr x22, [%x[params], %[offsetof_Params_outptrs]]\n"
- "incw x24\n"
- "ldr x2, [%x[params], %[offsetof_Params_weights]]\n"
- "add x21, x23, %[offsetof_Requantize32_a_offset]\n"
- "add x20, x23, %[offsetof_Requantize32_b_offset]\n"
- "ld1rb { z15.b }, p4/Z, [x21]\n"
- "ld1rb { z17.b }, p4/Z, [x20]\n"
- "add x21, x23, %[offsetof_Requantize32_c_offset]\n"
- "add x20, x23, %[offsetof_Requantize32_minval]\n"
- "ld1rh { z12.h }, p4/Z, [x21]\n"
- "ld1rh { z13.h }, p4/Z, [x20]\n"
- "add x20, x23, %[offsetof_Requantize32_maxval]\n"
- "ld1rh { z11.h }, p4/Z, [x20]\n"
- "ldp x3, x4, [x22, #0x0]\n"
- "whilelt p3.h, x0, x1\n"
- "ldp x5, x6, [x22, #0x10]\n"
- "whilelt p2.s, x0, x1\n"
- "whilelt p1.s, x24, x1\n"
- "ldr x14, [%x[params], %[offsetof_Params_bias]]\n"
- "add x7, %x[params], %[offsetof_Params_inptrs]\n"
- "ld1w { z30.s }, p2/Z, [x14]\n"
- "ld1w { z16.s }, p1/Z, [x14, #1, MUL VL]\n"
- "uzp1 z14.s, z30.s, z16.s\n"
- "ld1sb { z0.h }, p4/Z, [x2]\n"
- "ld1sb { z1.h }, p4/Z, [x2, #1, MUL VL]\n"
- "uzp2 z10.s, z30.s, z16.s\n"
- "addvl x14, x14, #2\n"
- "ld1sb { z2.h }, p4/Z, [x2, #2, MUL VL]\n"
- "ld1sb { z3.h }, p4/Z, [x2, #3, MUL VL]\n"
- "mov x8, #0x0\n"
- "mov z20.d, z14.d\n"
- "ld1sb { z4.h }, p4/Z, [x2, #4, MUL VL]\n"
- "ldp x9, x28, [x7, #0x0]\n"
- "mov z7.d, z10.d\n"
- "mov z8.d, z14.d\n"
- "ldp x27, x26, [x7, #0x10]\n"
- "ldp x25, x24, [x7, #0x20]\n"
- "mov z16.d, z10.d\n"
- "mov z6.d, z14.d\n"
- "ldp x23, x22, [x7, #0x30]\n"
- "ldp x21, x20, [x7, #0x40]\n"
- "mov z5.d, z10.d\n"
- ".inst 0x45511000 // ssublb z0.h, z0.b, z17.b\n"
- "ld1sb { z31.h }, p3/Z, [x9, x0]\n"
- "ld1sb { z30.h }, p3/Z, [x28, x0]\n"
- ".inst 0x45511021 // ssublb z1.h, z1.b, z17.b\n"
- ".inst 0x45511042 // ssublb z2.h, z2.b, z17.b\n"
- "ld1sb { z29.h }, p3/Z, [x27, x0]\n"
- "ld1sb { z28.h }, p3/Z, [x26, x0]\n"
- ".inst 0x45511063 // ssublb z3.h, z3.b, z17.b\n"
- ".inst 0x45511084 // ssublb z4.h, z4.b, z17.b\n"
- "ld1sb { z27.h }, p3/Z, [x25, x0]\n"
- "ld1sb { z23.h }, p3/Z, [x24, x0]\n"
- ".inst 0x454f13ff // ssublb z31.h, z31.b, z15.b\n"
- ".inst 0x454f13de // ssublb z30.h, z30.b, z15.b\n"
- "ld1sb { z25.h }, p3/Z, [x23, x0]\n"
- "ld1sb { z24.h }, p3/Z, [x22, x0]\n"
- ".inst 0x454f13bd // ssublb z29.h, z29.b, z15.b\n"
- ".inst 0x454f139c // ssublb z28.h, z28.b, z15.b\n"
- "ld1sb { z26.h }, p3/Z, [x21, x0]\n"
- "ld1sb { z22.h }, p3/Z, [x20, x0]\n"
- ".inst 0x454f137b // ssublb z27.h, z27.b, z15.b\n"
- ".inst 0x454f12f7 // ssublb z23.h, z23.b, z15.b\n"
- "ldr x17, [%x[params], %[offsetof_Params_requant_muls]]\n"
- "ldr x16, [%x[params], %[offsetof_Params_requant_shifts]]\n"
- "str x14, [%x[params], %[offsetof_Params_bias]]\n"
- ".inst 0x454f1339 // ssublb z25.h, z25.b, z15.b\n"
- ".inst 0x454f1318 // ssublb z24.h, z24.b, z15.b\n"
- ".inst 0x454f135a // ssublb z26.h, z26.b, z15.b\n"
- ".inst 0x454f12d6 // ssublb z22.h, z22.b, z15.b\n"
+ "ldr x1, [%x[params], %[offsetof_Params_weights]]\n"
+ "mov x2, #0x0\n"
+ "ldr x22, [%x[params], %[offsetof_Params_requant]]\n"
+ "mov x3, #0x0\n"
+ "ldr x4, [%x[params], %[offsetof_Params_requant_muls]]\n"
+ "add x5, %x[params], %[offsetof_Params_inptrs]\n"
+ "ldr x6, [%x[params], %[offsetof_Params_requant_shifts]]\n"
+ "add x19, x22, %[offsetof_Requantize32_a_offset]\n"
+ "ldr x21, [%x[params], %[offsetof_Params_outptrs]]\n"
+ "add x20, x22, %[offsetof_Requantize32_b_offset]\n"
+ "ld1rb { z17.b }, p4/Z, [x19]\n"
+ "add x19, x22, %[offsetof_Requantize32_c_offset]\n"
+ "ld1rb { z13.b }, p4/Z, [x20]\n"
+ "add x20, x22, %[offsetof_Requantize32_minval]\n"
+ "ld1rw { z14.s }, p4/Z, [x19]\n"
+ "add x19, x22, %[offsetof_Requantize32_maxval]\n"
+ "ld1rw { z5.s }, p4/Z, [x20]\n"
+ "whilelt p3.h, x2, x0\n"
+ "ld1rw { z15.s }, p4/Z, [x19]\n"
+ "whilelt p2.s, x2, x0\n"
+ "ldp x7, x8, [x21, #0x0]\n"
+ "mov x19, x2\n"
+ "incw x19\n"
+ "ldp x17, x16, [x21, #0x10]\n"
+ "whilelt p1.s, x19, x0\n"
+ "ldr x19, [%x[params], %[offsetof_Params_bias]]\n"
+ "ld1w { z19.s }, p2/Z, [x19]\n"
+ "ld1w { z6.s }, p1/Z, [x19, #1, MUL VL]\n"
+ "uzp1 z11.s, z19.s, z6.s\n"
+ "addvl x19, x19, #2\n"
+ "str x19, [%x[params], %[offsetof_Params_bias]]\n"
+ "uzp2 z16.s, z19.s, z6.s\n"
+ "mov z19.d, z11.d\n"
+ "ld1sb { z0.h }, p4/Z, [x1]\n"
+ ".inst 0x454d1000 // ssublb z0.h, z0.b, z13.b\n"
+ "mov z9.d, z16.d\n"
+ "ld1sb { z1.h }, p4/Z, [x1, #1, MUL VL]\n"
+ "mov z7.d, z11.d\n"
+ "ld1sb { z2.h }, p4/Z, [x1, #2, MUL VL]\n"
+ ".inst 0x454d1021 // ssublb z1.h, z1.b, z13.b\n"
+ "mov z6.d, z16.d\n"
+ "ld1sb { z3.h }, p4/Z, [x1, #3, MUL VL]\n"
+ "mov z12.d, z11.d\n"
+ "ld1sb { z4.h }, p4/Z, [x1, #4, MUL VL]\n"
+ ".inst 0x454d1042 // ssublb z2.h, z2.b, z13.b\n"
+ "mov z8.d, z16.d\n"
+ "ldp x28, x27, [x5, #0x0]\n"
+ ".inst 0x454d1063 // ssublb z3.h, z3.b, z13.b\n"
+ "ldp x26, x25, [x5, #0x10]\n"
+ ".inst 0x454d1084 // ssublb z4.h, z4.b, z13.b\n"
+ "ldp x24, x23, [x5, #0x20]\n"
+ "ldp x22, x21, [x5, #0x30]\n"
+ "ldp x20, x19, [x5, #0x40]\n"
+ "ld1sb { z31.h }, p3/Z, [x28, x2]\n"
+ ".inst 0x455113ff // ssublb z31.h, z31.b, z17.b\n"
+ "ld1sb { z30.h }, p3/Z, [x27, x2]\n"
+ "ld1sb { z29.h }, p3/Z, [x26, x2]\n"
+ ".inst 0x455113de // ssublb z30.h, z30.b, z17.b\n"
+ "ld1sb { z28.h }, p3/Z, [x25, x2]\n"
+ "ld1sb { z27.h }, p3/Z, [x24, x2]\n"
+ ".inst 0x455113bd // ssublb z29.h, z29.b, z17.b\n"
+ "ld1sb { z23.h }, p3/Z, [x23, x2]\n"
+ ".inst 0x4551139c // ssublb z28.h, z28.b, z17.b\n"
+ "ld1sb { z25.h }, p3/Z, [x22, x2]\n"
+ "ld1sb { z24.h }, p3/Z, [x21, x2]\n"
+ ".inst 0x4551137b // ssublb z27.h, z27.b, z17.b\n"
+ "ld1sb { z26.h }, p3/Z, [x20, x2]\n"
+ ".inst 0x455112f7 // ssublb z23.h, z23.b, z17.b\n"
+ "ld1sb { z22.h }, p3/Z, [x19, x2]\n"
+ ".inst 0x45511339 // ssublb z25.h, z25.b, z17.b\n"
+ ".inst 0x45511318 // ssublb z24.h, z24.b, z17.b\n"
+ ".inst 0x4551135a // ssublb z26.h, z26.b, z17.b\n"
+ ".inst 0x455112d6 // ssublb z22.h, z22.b, z17.b\n"
"1:" // Loop
- ".inst 0x448043ee // smlalb z14.s, p4/M, z31.h, z0.h\n"
- ".inst 0x448047ea // smlalt z10.s, p4/M, z31.h, z0.h\n"
- "ldr x20, [x7, #0x50]\n"
- "ld1sb { z31.h }, p3/Z, [x20, x0]\n"
- ".inst 0x448143ce // smlalb z14.s, p4/M, z30.h, z1.h\n"
- ".inst 0x448043d4 // smlalb z20.s, p4/M, z30.h, z0.h\n"
- "ldr x22, [x7, #0x58]\n"
- ".inst 0x454f13ff // ssublb z31.h, z31.b, z15.b\n"
- ".inst 0x448043a8 // smlalb z8.s, p4/M, z29.h, z0.h\n"
- ".inst 0x44804386 // smlalb z6.s, p4/M, z28.h, z0.h\n"
- "ldr x21, [x7, #0x60]\n"
- "ldr x20, [x7, #0x68]\n"
- ".inst 0x448147ca // smlalt z10.s, p4/M, z30.h, z1.h\n"
- ".inst 0x448047c7 // smlalt z7.s, p4/M, z30.h, z0.h\n"
- "ld1sb { z30.h }, p3/Z, [x22, x0]\n"
- ".inst 0x454f13de // ssublb z30.h, z30.b, z15.b\n"
+ ".inst 0x448043eb // smlalb z11.s, p4/M, z31.h, z0.h\n"
+ "ldr x20, [x5, #0x50]\n"
+ "whilelt p0.h, x3, x0\n"
+ ".inst 0x448047f0 // smlalt z16.s, p4/M, z31.h, z0.h\n"
+ "ldr x19, [x5, #0x58]\n"
+ ".inst 0x448043d3 // smlalb z19.s, p4/M, z30.h, z0.h\n"
+ "ldr x25, [x5, #0x60]\n"
+ ".inst 0x448047c9 // smlalt z9.s, p4/M, z30.h, z0.h\n"
+ "ld1sb { z31.h }, p3/Z, [x20, x2]\n"
+ ".inst 0x455113ff // ssublb z31.h, z31.b, z17.b\n"
+ ".inst 0x448043a7 // smlalb z7.s, p4/M, z29.h, z0.h\n"
+ "ldr x24, [x5, #0x68]\n"
+ ".inst 0x448047a6 // smlalt z6.s, p4/M, z29.h, z0.h\n"
+ "ldr x23, [x5, #0x70]\n"
+ ".inst 0x4480438c // smlalb z12.s, p4/M, z28.h, z0.h\n"
+ "ldr x22, [x5, #0x78]\n"
+ ".inst 0x44804788 // smlalt z8.s, p4/M, z28.h, z0.h\n"
+ "ld1sb { z0.h }, p4/Z, [x1, #5, MUL VL]\n"
+ ".inst 0x454d1000 // ssublb z0.h, z0.b, z13.b\n"
+ ".inst 0x448143cb // smlalb z11.s, p4/M, z30.h, z1.h\n"
+ "ldr x15, [x5, #0x80]\n"
+ ".inst 0x448147d0 // smlalt z16.s, p4/M, z30.h, z1.h\n"
+ "ld1sb { z30.h }, p3/Z, [x19, x2]\n"
+ ".inst 0x455113de // ssublb z30.h, z30.b, z17.b\n"
+ ".inst 0x44814373 // smlalb z19.s, p4/M, z27.h, z1.h\n"
+ "ldr x21, [x5, #0x88]\n"
+ ".inst 0x44814769 // smlalt z9.s, p4/M, z27.h, z1.h\n"
+ "ldr x20, [x5, #0x90]\n"
+ ".inst 0x44814387 // smlalb z7.s, p4/M, z28.h, z1.h\n"
+ "ldr x19, [x5, #0x98]\n"
+ ".inst 0x44814786 // smlalt z6.s, p4/M, z28.h, z1.h\n"
+ "ldr x14, [x5, #0xa0]\n"
+ ".inst 0x448142ec // smlalb z12.s, p4/M, z23.h, z1.h\n"
+ "ldr x13, [x5, #0xa8]\n"
+ ".inst 0x448146e8 // smlalt z8.s, p4/M, z23.h, z1.h\n"
+ "ld1sb { z1.h }, p4/Z, [x1, #6, MUL VL]\n"
+ ".inst 0x454d1021 // ssublb z1.h, z1.b, z13.b\n"
+ ".inst 0x4482436b // smlalb z11.s, p4/M, z27.h, z2.h\n"
+ "ldr x12, [x5, #0xb0]\n"
+ ".inst 0x44824770 // smlalt z16.s, p4/M, z27.h, z2.h\n"
+ "ld1sb { z27.h }, p3/Z, [x25, x2]\n"
+ ".inst 0x4551137b // ssublb z27.h, z27.b, z17.b\n"
+ ".inst 0x44824333 // smlalb z19.s, p4/M, z25.h, z2.h\n"
+ "ldr x11, [x5, #0xb8]\n"
+ ".inst 0x44824729 // smlalt z9.s, p4/M, z25.h, z2.h\n"
+ "ldr x10, [x5, #0xc0]\n"
+ ".inst 0x448242e7 // smlalb z7.s, p4/M, z23.h, z2.h\n"
+ "ldr x9, [x5, #0xc8]\n"
+ ".inst 0x448246e6 // smlalt z6.s, p4/M, z23.h, z2.h\n"
+ "ldr x28, [x5, #0xd0]\n"
+ ".inst 0x448243ec // smlalb z12.s, p4/M, z31.h, z2.h\n"
+ "ldr x27, [x5, #0xd8]\n"
+ ".inst 0x448247e8 // smlalt z8.s, p4/M, z31.h, z2.h\n"
+ "ld1sb { z2.h }, p4/Z, [x1, #7, MUL VL]\n"
+ "inch x1, ALL, MUL #8\n"
+ ".inst 0x4483432b // smlalb z11.s, p4/M, z25.h, z3.h\n"
+ "ldr x26, [x5, #0xe0]\n"
+ ".inst 0x454d1042 // ssublb z2.h, z2.b, z13.b\n"
+ ".inst 0x44834730 // smlalt z16.s, p4/M, z25.h, z3.h\n"
+ "ld1sb { z25.h }, p3/Z, [x24, x2]\n"
+ ".inst 0x44834313 // smlalb z19.s, p4/M, z24.h, z3.h\n"
+ "ldr x25, [x5, #0xe8]\n"
+ ".inst 0x45511339 // ssublb z25.h, z25.b, z17.b\n"
+ ".inst 0x44834709 // smlalt z9.s, p4/M, z24.h, z3.h\n"
+ "ld1w { z18.s }, p2/Z, [x4]\n"
+ ".inst 0x448343e7 // smlalb z7.s, p4/M, z31.h, z3.h\n"
+ "ld1w { z20.s }, p1/Z, [x4, #1, MUL VL]\n"
+ "addvl x4, x4, #2\n"
+ ".inst 0x448347e6 // smlalt z6.s, p4/M, z31.h, z3.h\n"
+ ".inst 0x448343cc // smlalb z12.s, p4/M, z30.h, z3.h\n"
+ ".inst 0x448347c8 // smlalt z8.s, p4/M, z30.h, z3.h\n"
+ "ld1sb { z3.h }, p4/Z, [x1]\n"
+ ".inst 0x454d1063 // ssublb z3.h, z3.b, z13.b\n"
+ "uzp1 z21.s, z18.s, z20.s\n"
+ "uzp2 z10.s, z18.s, z20.s\n"
+ "ld1w { z18.s }, p2/Z, [x6]\n"
+ ".inst 0x4484430b // smlalb z11.s, p4/M, z24.h, z4.h\n"
+ "ld1w { z20.s }, p1/Z, [x6, #1, MUL VL]\n"
+ "addvl x6, x6, #2\n"
+ ".inst 0x44844710 // smlalt z16.s, p4/M, z24.h, z4.h\n"
+ "ld1sb { z24.h }, p3/Z, [x23, x2]\n"
+ ".inst 0x45511318 // ssublb z24.h, z24.b, z17.b\n"
+ ".inst 0x44844373 // smlalb z19.s, p4/M, z27.h, z4.h\n"
+ "ldr x24, [x5, #0xf0]\n"
+ ".inst 0x44844769 // smlalt z9.s, p4/M, z27.h, z4.h\n"
+ "ld1sb { z27.h }, p3/Z, [x22, x2]\n"
+ ".inst 0x4551137b // ssublb z27.h, z27.b, z17.b\n"
+ ".inst 0x448443c7 // smlalb z7.s, p4/M, z30.h, z4.h\n"
+ "ldr x23, [x5, #0xf8]\n"
+ ".inst 0x448447c6 // smlalt z6.s, p4/M, z30.h, z4.h\n"
+ ".inst 0x4484434c // smlalb z12.s, p4/M, z26.h, z4.h\n"
+ ".inst 0x44844748 // smlalt z8.s, p4/M, z26.h, z4.h\n"
+ "ld1sb { z4.h }, p4/Z, [x1, #1, MUL VL]\n"
+ ".inst 0x454d1084 // ssublb z4.h, z4.b, z13.b\n"
+ ".inst 0x448043ab // smlalb z11.s, p4/M, z29.h, z0.h\n"
".inst 0x448047b0 // smlalt z16.s, p4/M, z29.h, z0.h\n"
- ".inst 0x4482436e // smlalb z14.s, p4/M, z27.h, z2.h\n"
- "ldr x25, [x7, #0x70]\n"
- "ldr x24, [x7, #0x78]\n"
- ".inst 0x44804785 // smlalt z5.s, p4/M, z28.h, z0.h\n"
- ".inst 0x44814374 // smlalb z20.s, p4/M, z27.h, z1.h\n"
- "ld1sb { z0.h }, p4/Z, [x2, #5, MUL VL]\n"
- ".inst 0x45511000 // ssublb z0.h, z0.b, z17.b\n"
- ".inst 0x44814388 // smlalb z8.s, p4/M, z28.h, z1.h\n"
- ".inst 0x448142e6 // smlalb z6.s, p4/M, z23.h, z1.h\n"
- "ldr x15, [x7, #0x80]\n"
- "ldr x23, [x7, #0x88]\n"
- ".inst 0x4482476a // smlalt z10.s, p4/M, z27.h, z2.h\n"
- ".inst 0x44814767 // smlalt z7.s, p4/M, z27.h, z1.h\n"
- "ld1sb { z27.h }, p3/Z, [x21, x0]\n"
- ".inst 0x454f137b // ssublb z27.h, z27.b, z15.b\n"
+ "uzp1 z29.s, z18.s, z20.s\n"
+ "uzp2 z20.s, z18.s, z20.s\n"
+ ".inst 0x44804393 // smlalb z19.s, p4/M, z28.h, z0.h\n"
+ ".inst 0x44804789 // smlalt z9.s, p4/M, z28.h, z0.h\n"
+ ".inst 0x448042c7 // smlalb z7.s, p4/M, z22.h, z0.h\n"
+ ".inst 0x448046c6 // smlalt z6.s, p4/M, z22.h, z0.h\n"
+ ".inst 0x4480432c // smlalb z12.s, p4/M, z25.h, z0.h\n"
+ ".inst 0x44804728 // smlalt z8.s, p4/M, z25.h, z0.h\n"
+ "ld1sb { z0.h }, p4/Z, [x1, #2, MUL VL]\n"
+ ".inst 0x454d1000 // ssublb z0.h, z0.b, z13.b\n"
+ ".inst 0x4481438b // smlalb z11.s, p4/M, z28.h, z1.h\n"
".inst 0x44814790 // smlalt z16.s, p4/M, z28.h, z1.h\n"
- ".inst 0x4483432e // smlalb z14.s, p4/M, z25.h, z3.h\n"
- "ldr x22, [x7, #0x90]\n"
- "ldr x21, [x7, #0x98]\n"
- ".inst 0x448146e5 // smlalt z5.s, p4/M, z23.h, z1.h\n"
- ".inst 0x44824334 // smlalb z20.s, p4/M, z25.h, z2.h\n"
- "ld1sb { z1.h }, p4/Z, [x2, #6, MUL VL]\n"
- ".inst 0x45511021 // ssublb z1.h, z1.b, z17.b\n"
- ".inst 0x448242e8 // smlalb z8.s, p4/M, z23.h, z2.h\n"
- ".inst 0x448243e6 // smlalb z6.s, p4/M, z31.h, z2.h\n"
- "ldr x14, [x7, #0xa0]\n"
- "ldr x13, [x7, #0xa8]\n"
- ".inst 0x4483472a // smlalt z10.s, p4/M, z25.h, z3.h\n"
- ".inst 0x44824727 // smlalt z7.s, p4/M, z25.h, z2.h\n"
- "ld1sb { z25.h }, p3/Z, [x20, x0]\n"
- ".inst 0x454f1339 // ssublb z25.h, z25.b, z15.b\n"
+ "ld1sb { z28.h }, p3/Z, [x21, x2]\n"
+ ".inst 0x4551139c // ssublb z28.h, z28.b, z17.b\n"
+ ".inst 0x448142f3 // smlalb z19.s, p4/M, z23.h, z1.h\n"
+ "ldr x22, [x5, #0x100]\n"
+ ".inst 0x448146e9 // smlalt z9.s, p4/M, z23.h, z1.h\n"
+ ".inst 0x44814327 // smlalb z7.s, p4/M, z25.h, z1.h\n"
+ ".inst 0x44814726 // smlalt z6.s, p4/M, z25.h, z1.h\n"
+ ".inst 0x4481430c // smlalb z12.s, p4/M, z24.h, z1.h\n"
+ ".inst 0x44814708 // smlalt z8.s, p4/M, z24.h, z1.h\n"
+ "ld1sb { z1.h }, p4/Z, [x1, #3, MUL VL]\n"
+ ".inst 0x454d1021 // ssublb z1.h, z1.b, z13.b\n"
+ ".inst 0x448242eb // smlalb z11.s, p4/M, z23.h, z2.h\n"
".inst 0x448246f0 // smlalt z16.s, p4/M, z23.h, z2.h\n"
- ".inst 0x4484430e // smlalb z14.s, p4/M, z24.h, z4.h\n"
- "ldr x12, [x7, #0xb0]\n"
- "ldr x20, [x7, #0xb8]\n"
- ".inst 0x448247e5 // smlalt z5.s, p4/M, z31.h, z2.h\n"
- ".inst 0x44834314 // smlalb z20.s, p4/M, z24.h, z3.h\n"
- "ld1sb { z2.h }, p4/Z, [x2, #7, MUL VL]\n"
- "inch x2, ALL, MUL #8\n"
- ".inst 0x448343e8 // smlalb z8.s, p4/M, z31.h, z3.h\n"
- ".inst 0x448343c6 // smlalb z6.s, p4/M, z30.h, z3.h\n"
- ".inst 0x45511042 // ssublb z2.h, z2.b, z17.b\n"
- "ldr x11, [x7, #0xc0]\n"
- ".inst 0x4484470a // smlalt z10.s, p4/M, z24.h, z4.h\n"
- ".inst 0x44834707 // smlalt z7.s, p4/M, z24.h, z3.h\n"
- "ld1sb { z24.h }, p3/Z, [x25, x0]\n"
- ".inst 0x454f1318 // ssublb z24.h, z24.b, z15.b\n"
+ "ld1sb { z23.h }, p3/Z, [x15, x2]\n"
+ ".inst 0x455112f7 // ssublb z23.h, z23.b, z17.b\n"
+ ".inst 0x448243f3 // smlalb z19.s, p4/M, z31.h, z2.h\n"
+ "ldr x21, [x5, #0x108]\n"
+ ".inst 0x448247e9 // smlalt z9.s, p4/M, z31.h, z2.h\n"
+ ".inst 0x44824307 // smlalb z7.s, p4/M, z24.h, z2.h\n"
+ ".inst 0x44824706 // smlalt z6.s, p4/M, z24.h, z2.h\n"
+ ".inst 0x4482436c // smlalb z12.s, p4/M, z27.h, z2.h\n"
+ ".inst 0x44824768 // smlalt z8.s, p4/M, z27.h, z2.h\n"
+ "ld1sb { z2.h }, p4/Z, [x1, #4, MUL VL]\n"
+ ".inst 0x454d1042 // ssublb z2.h, z2.b, z13.b\n"
+ ".inst 0x448343eb // smlalb z11.s, p4/M, z31.h, z3.h\n"
".inst 0x448347f0 // smlalt z16.s, p4/M, z31.h, z3.h\n"
- ".inst 0x448043ae // smlalb z14.s, p4/M, z29.h, z0.h\n"
- "ldr x10, [x7, #0xc8]\n"
- "ldr x9, [x7, #0xd0]\n"
- ".inst 0x448347c5 // smlalt z5.s, p4/M, z30.h, z3.h\n"
- ".inst 0x44844374 // smlalb z20.s, p4/M, z27.h, z4.h\n"
- "ld1sb { z3.h }, p4/Z, [x2]\n"
- ".inst 0x45511063 // ssublb z3.h, z3.b, z17.b\n"
- ".inst 0x448443c8 // smlalb z8.s, p4/M, z30.h, z4.h\n"
- ".inst 0x44844346 // smlalb z6.s, p4/M, z26.h, z4.h\n"
- "ldr x28, [x7, #0xd8]\n"
- "ldr x27, [x7, #0xe0]\n"
- ".inst 0x448047aa // smlalt z10.s, p4/M, z29.h, z0.h\n"
- ".inst 0x44844767 // smlalt z7.s, p4/M, z27.h, z4.h\n"
- "ld1sb { z27.h }, p3/Z, [x24, x0]\n"
- ".inst 0x454f137b // ssublb z27.h, z27.b, z15.b\n"
+ "ld1sb { z31.h }, p3/Z, [x20, x2]\n"
+ ".inst 0x455113ff // ssublb z31.h, z31.b, z17.b\n"
+ ".inst 0x448343d3 // smlalb z19.s, p4/M, z30.h, z3.h\n"
+ "ldr x20, [x5, #0x110]\n"
+ ".inst 0x448347c9 // smlalt z9.s, p4/M, z30.h, z3.h\n"
+ ".inst 0x44834367 // smlalb z7.s, p4/M, z27.h, z3.h\n"
+ ".inst 0x44834766 // smlalt z6.s, p4/M, z27.h, z3.h\n"
+ ".inst 0x448342ec // smlalb z12.s, p4/M, z23.h, z3.h\n"
+ ".inst 0x448346e8 // smlalt z8.s, p4/M, z23.h, z3.h\n"
+ "ld1sb { z3.h }, p4/Z, [x1, #5, MUL VL]\n"
+ ".inst 0x454d1063 // ssublb z3.h, z3.b, z13.b\n"
+ ".inst 0x448443cb // smlalb z11.s, p4/M, z30.h, z4.h\n"
".inst 0x448447d0 // smlalt z16.s, p4/M, z30.h, z4.h\n"
- ".inst 0x4481438e // smlalb z14.s, p4/M, z28.h, z1.h\n"
- "ldr x26, [x7, #0xe8]\n"
- "ldr x25, [x7, #0xf0]\n"
- ".inst 0x44844745 // smlalt z5.s, p4/M, z26.h, z4.h\n"
- ".inst 0x44804394 // smlalb z20.s, p4/M, z28.h, z0.h\n"
- "ld1sb { z4.h }, p4/Z, [x2, #1, MUL VL]\n"
- ".inst 0x45511084 // ssublb z4.h, z4.b, z17.b\n"
- ".inst 0x448042c8 // smlalb z8.s, p4/M, z22.h, z0.h\n"
- ".inst 0x44804326 // smlalb z6.s, p4/M, z25.h, z0.h\n"
- "ld1w { z19.s }, p2/Z, [x17]\n"
- "ld1w { z18.s }, p1/Z, [x17, #1, MUL VL]\n"
- ".inst 0x4481478a // smlalt z10.s, p4/M, z28.h, z1.h\n"
- ".inst 0x44804787 // smlalt z7.s, p4/M, z28.h, z0.h\n"
- "ld1sb { z28.h }, p3/Z, [x23, x0]\n"
- ".inst 0x454f139c // ssublb z28.h, z28.b, z15.b\n"
+ "ld1sb { z30.h }, p3/Z, [x19, x2]\n"
+ ".inst 0x455113de // ssublb z30.h, z30.b, z17.b\n"
+ ".inst 0x44844353 // smlalb z19.s, p4/M, z26.h, z4.h\n"
+ "ldr x19, [x5, #0x118]\n"
+ ".inst 0x44844749 // smlalt z9.s, p4/M, z26.h, z4.h\n"
+ "ld1sb { z26.h }, p3/Z, [x14, x2]\n"
+ ".inst 0x4551135a // ssublb z26.h, z26.b, z17.b\n"
+ ".inst 0x448442e7 // smlalb z7.s, p4/M, z23.h, z4.h\n"
+ ".inst 0x448446e6 // smlalt z6.s, p4/M, z23.h, z4.h\n"
+ ".inst 0x4484438c // smlalb z12.s, p4/M, z28.h, z4.h\n"
+ ".inst 0x44844788 // smlalt z8.s, p4/M, z28.h, z4.h\n"
+ "ld1sb { z4.h }, p4/Z, [x1, #6, MUL VL]\n"
+ ".inst 0x454d1084 // ssublb z4.h, z4.b, z13.b\n"
+ ".inst 0x448042cb // smlalb z11.s, p4/M, z22.h, z0.h\n"
".inst 0x448046d0 // smlalt z16.s, p4/M, z22.h, z0.h\n"
- ".inst 0x448242ee // smlalb z14.s, p4/M, z23.h, z2.h\n"
- "ldr x24, [x7, #0xf8]\n"
- "uzp1 z9.s, z19.s, z18.s\n"
- ".inst 0x44804725 // smlalt z5.s, p4/M, z25.h, z0.h\n"
- ".inst 0x448142f4 // smlalb z20.s, p4/M, z23.h, z1.h\n"
- "ld1sb { z0.h }, p4/Z, [x2, #2, MUL VL]\n"
- ".inst 0x45511000 // ssublb z0.h, z0.b, z17.b\n"
- ".inst 0x44814328 // smlalb z8.s, p4/M, z25.h, z1.h\n"
- ".inst 0x44814306 // smlalb z6.s, p4/M, z24.h, z1.h\n"
- "uzp2 z29.s, z19.s, z18.s\n"
- "ld1w { z19.s }, p2/Z, [x16]\n"
- ".inst 0x448246ea // smlalt z10.s, p4/M, z23.h, z2.h\n"
- ".inst 0x448146e7 // smlalt z7.s, p4/M, z23.h, z1.h\n"
- "ld1sb { z23.h }, p3/Z, [x15, x0]\n"
- ".inst 0x454f12f7 // ssublb z23.h, z23.b, z15.b\n"
+ "ld1sb { z22.h }, p3/Z, [x11, x2]\n"
+ ".inst 0x455112d6 // ssublb z22.h, z22.b, z17.b\n"
+ ".inst 0x44804333 // smlalb z19.s, p4/M, z25.h, z0.h\n"
+ ".inst 0x44804729 // smlalt z9.s, p4/M, z25.h, z0.h\n"
+ ".inst 0x448043e7 // smlalb z7.s, p4/M, z31.h, z0.h\n"
+ ".inst 0x448047e6 // smlalt z6.s, p4/M, z31.h, z0.h\n"
+ ".inst 0x448043cc // smlalb z12.s, p4/M, z30.h, z0.h\n"
+ ".inst 0x448047c8 // smlalt z8.s, p4/M, z30.h, z0.h\n"
+ "ld1sb { z0.h }, p4/Z, [x1, #7, MUL VL]\n"
+ "inch x1, ALL, MUL #8\n"
+ ".inst 0x4481432b // smlalb z11.s, p4/M, z25.h, z1.h\n"
+ ".inst 0x454d1000 // ssublb z0.h, z0.b, z13.b\n"
".inst 0x44814730 // smlalt z16.s, p4/M, z25.h, z1.h\n"
- ".inst 0x448343ee // smlalb z14.s, p4/M, z31.h, z3.h\n"
- "ldr x23, [x7, #0x100]\n"
- "whilelt p0.h, x8, x1\n"
- ".inst 0x44814705 // smlalt z5.s, p4/M, z24.h, z1.h\n"
- ".inst 0x448243f4 // smlalb z20.s, p4/M, z31.h, z2.h\n"
- "ld1sb { z1.h }, p4/Z, [x2, #3, MUL VL]\n"
- ".inst 0x45511021 // ssublb z1.h, z1.b, z17.b\n"
- ".inst 0x44824308 // smlalb z8.s, p4/M, z24.h, z2.h\n"
- ".inst 0x44824366 // smlalb z6.s, p4/M, z27.h, z2.h\n"
- "addvl x17, x17, #2\n"
- ".inst 0x448347ea // smlalt z10.s, p4/M, z31.h, z3.h\n"
- ".inst 0x448247e7 // smlalt z7.s, p4/M, z31.h, z2.h\n"
- "ld1sb { z31.h }, p3/Z, [x22, x0]\n"
- ".inst 0x454f13ff // ssublb z31.h, z31.b, z15.b\n"
+ "ld1sb { z25.h }, p3/Z, [x13, x2]\n"
+ ".inst 0x44814313 // smlalb z19.s, p4/M, z24.h, z1.h\n"
+ ".inst 0x45511339 // ssublb z25.h, z25.b, z17.b\n"
+ ".inst 0x44814709 // smlalt z9.s, p4/M, z24.h, z1.h\n"
+ ".inst 0x448143c7 // smlalb z7.s, p4/M, z30.h, z1.h\n"
+ ".inst 0x448147c6 // smlalt z6.s, p4/M, z30.h, z1.h\n"
+ ".inst 0x4481434c // smlalb z12.s, p4/M, z26.h, z1.h\n"
+ ".inst 0x44814748 // smlalt z8.s, p4/M, z26.h, z1.h\n"
+ "ld1sb { z1.h }, p4/Z, [x1]\n"
+ ".inst 0x454d1021 // ssublb z1.h, z1.b, z13.b\n"
+ ".inst 0x4482430b // smlalb z11.s, p4/M, z24.h, z2.h\n"
".inst 0x44824710 // smlalt z16.s, p4/M, z24.h, z2.h\n"
- ".inst 0x448443ce // smlalb z14.s, p4/M, z30.h, z4.h\n"
- "ldr x22, [x7, #0x108]\n"
- ".inst 0x44824765 // smlalt z5.s, p4/M, z27.h, z2.h\n"
- ".inst 0x448343d4 // smlalb z20.s, p4/M, z30.h, z3.h\n"
- "ld1sb { z2.h }, p4/Z, [x2, #4, MUL VL]\n"
- ".inst 0x45511042 // ssublb z2.h, z2.b, z17.b\n"
- ".inst 0x44834368 // smlalb z8.s, p4/M, z27.h, z3.h\n"
- ".inst 0x448342e6 // smlalb z6.s, p4/M, z23.h, z3.h\n"
- ".inst 0x448447ca // smlalt z10.s, p4/M, z30.h, z4.h\n"
- ".inst 0x448347c7 // smlalt z7.s, p4/M, z30.h, z3.h\n"
- "ld1sb { z30.h }, p3/Z, [x21, x0]\n"
- ".inst 0x454f13de // ssublb z30.h, z30.b, z15.b\n"
+ "ld1sb { z24.h }, p3/Z, [x12, x2]\n"
+ ".inst 0x45511318 // ssublb z24.h, z24.b, z17.b\n"
+ ".inst 0x44824373 // smlalb z19.s, p4/M, z27.h, z2.h\n"
+ ".inst 0x44824769 // smlalt z9.s, p4/M, z27.h, z2.h\n"
+ ".inst 0x44824347 // smlalb z7.s, p4/M, z26.h, z2.h\n"
+ ".inst 0x44824746 // smlalt z6.s, p4/M, z26.h, z2.h\n"
+ ".inst 0x4482432c // smlalb z12.s, p4/M, z25.h, z2.h\n"
+ ".inst 0x44824728 // smlalt z8.s, p4/M, z25.h, z2.h\n"
+ "ld1sb { z2.h }, p4/Z, [x1, #1, MUL VL]\n"
+ ".inst 0x454d1042 // ssublb z2.h, z2.b, z13.b\n"
+ ".inst 0x4483436b // smlalb z11.s, p4/M, z27.h, z3.h\n"
".inst 0x44834770 // smlalt z16.s, p4/M, z27.h, z3.h\n"
- ".inst 0x448042ce // smlalb z14.s, p4/M, z22.h, z0.h\n"
- "ldr x21, [x7, #0x110]\n"
- ".inst 0x448346e5 // smlalt z5.s, p4/M, z23.h, z3.h\n"
- ".inst 0x44844354 // smlalb z20.s, p4/M, z26.h, z4.h\n"
- "ld1sb { z3.h }, p4/Z, [x2, #5, MUL VL]\n"
- ".inst 0x45511063 // ssublb z3.h, z3.b, z17.b\n"
- ".inst 0x448442e8 // smlalb z8.s, p4/M, z23.h, z4.h\n"
- ".inst 0x44844386 // smlalb z6.s, p4/M, z28.h, z4.h\n"
- ".inst 0x448046ca // smlalt z10.s, p4/M, z22.h, z0.h\n"
- ".inst 0x44844747 // smlalt z7.s, p4/M, z26.h, z4.h\n"
- "ld1sb { z26.h }, p3/Z, [x14, x0]\n"
- ".inst 0x454f135a // ssublb z26.h, z26.b, z15.b\n"
+ "ld1sb { z27.h }, p3/Z, [x10, x2]\n"
+ ".inst 0x4551137b // ssublb z27.h, z27.b, z17.b\n"
+ ".inst 0x448342f3 // smlalb z19.s, p4/M, z23.h, z3.h\n"
+ ".inst 0x448346e9 // smlalt z9.s, p4/M, z23.h, z3.h\n"
+ ".inst 0x44834327 // smlalb z7.s, p4/M, z25.h, z3.h\n"
+ ".inst 0x44834726 // smlalt z6.s, p4/M, z25.h, z3.h\n"
+ ".inst 0x4483430c // smlalb z12.s, p4/M, z24.h, z3.h\n"
+ ".inst 0x44834708 // smlalt z8.s, p4/M, z24.h, z3.h\n"
+ "ld1sb { z3.h }, p4/Z, [x1, #2, MUL VL]\n"
+ ".inst 0x454d1063 // ssublb z3.h, z3.b, z13.b\n"
+ ".inst 0x448442eb // smlalb z11.s, p4/M, z23.h, z4.h\n"
".inst 0x448446f0 // smlalt z16.s, p4/M, z23.h, z4.h\n"
- ".inst 0x4481432e // smlalb z14.s, p4/M, z25.h, z1.h\n"
- "ld1sb { z22.h }, p3/Z, [x20, x0]\n"
- ".inst 0x454f12d6 // ssublb z22.h, z22.b, z15.b\n"
- ".inst 0x44844785 // smlalt z5.s, p4/M, z28.h, z4.h\n"
- ".inst 0x44804334 // smlalb z20.s, p4/M, z25.h, z0.h\n"
- "ld1sb { z4.h }, p4/Z, [x2, #6, MUL VL]\n"
- ".inst 0x45511084 // ssublb z4.h, z4.b, z17.b\n"
- ".inst 0x448043e8 // smlalb z8.s, p4/M, z31.h, z0.h\n"
- ".inst 0x448043c6 // smlalb z6.s, p4/M, z30.h, z0.h\n"
- "ldr x20, [x7, #0x118]\n"
- "ldr x14, [%x[params], %[offsetof_Params_bias]]\n"
- ".inst 0x4481472a // smlalt z10.s, p4/M, z25.h, z1.h\n"
- ".inst 0x44804727 // smlalt z7.s, p4/M, z25.h, z0.h\n"
- "ld1sb { z25.h }, p3/Z, [x13, x0]\n"
- ".inst 0x454f1339 // ssublb z25.h, z25.b, z15.b\n"
+ "ld1sb { z23.h }, p3/Z, [x9, x2]\n"
+ ".inst 0x455112f7 // ssublb z23.h, z23.b, z17.b\n"
+ ".inst 0x44844393 // smlalb z19.s, p4/M, z28.h, z4.h\n"
+ ".inst 0x44844789 // smlalt z9.s, p4/M, z28.h, z4.h\n"
+ "ld1sb { z28.h }, p3/Z, [x26, x2]\n"
+ ".inst 0x4551139c // ssublb z28.h, z28.b, z17.b\n"
+ ".inst 0x44844307 // smlalb z7.s, p4/M, z24.h, z4.h\n"
+ ".inst 0x44844706 // smlalt z6.s, p4/M, z24.h, z4.h\n"
+ ".inst 0x448442cc // smlalb z12.s, p4/M, z22.h, z4.h\n"
+ ".inst 0x448446c8 // smlalt z8.s, p4/M, z22.h, z4.h\n"
+ "ld1sb { z4.h }, p4/Z, [x1, #3, MUL VL]\n"
+ ".inst 0x454d1084 // ssublb z4.h, z4.b, z13.b\n"
+ ".inst 0x448043eb // smlalb z11.s, p4/M, z31.h, z0.h\n"
".inst 0x448047f0 // smlalt z16.s, p4/M, z31.h, z0.h\n"
- ".inst 0x4482430e // smlalb z14.s, p4/M, z24.h, z2.h\n"
- ".inst 0x448047c5 // smlalt z5.s, p4/M, z30.h, z0.h\n"
- ".inst 0x44814314 // smlalb z20.s, p4/M, z24.h, z1.h\n"
- "ld1sb { z0.h }, p4/Z, [x2, #7, MUL VL]\n"
- "inch x2, ALL, MUL #8\n"
- ".inst 0x448143c8 // smlalb z8.s, p4/M, z30.h, z1.h\n"
- ".inst 0x44814346 // smlalb z6.s, p4/M, z26.h, z1.h\n"
- ".inst 0x45511000 // ssublb z0.h, z0.b, z17.b\n"
- ".inst 0x4482470a // smlalt z10.s, p4/M, z24.h, z2.h\n"
- ".inst 0x44814707 // smlalt z7.s, p4/M, z24.h, z1.h\n"
- "ld1sb { z24.h }, p3/Z, [x12, x0]\n"
- ".inst 0x454f1318 // ssublb z24.h, z24.b, z15.b\n"
+ "ld1sb { z31.h }, p3/Z, [x28, x2]\n"
+ ".inst 0x455113ff // ssublb z31.h, z31.b, z17.b\n"
+ ".inst 0x448043d3 // smlalb z19.s, p4/M, z30.h, z0.h\n"
+ ".inst 0x448047c9 // smlalt z9.s, p4/M, z30.h, z0.h\n"
+ ".inst 0x44804367 // smlalb z7.s, p4/M, z27.h, z0.h\n"
+ ".inst 0x44804766 // smlalt z6.s, p4/M, z27.h, z0.h\n"
+ ".inst 0x448042ec // smlalb z12.s, p4/M, z23.h, z0.h\n"
+ ".inst 0x448046e8 // smlalt z8.s, p4/M, z23.h, z0.h\n"
+ "ld1sb { z0.h }, p4/Z, [x1, #4, MUL VL]\n"
+ ".inst 0x454d1000 // ssublb z0.h, z0.b, z13.b\n"
+ ".inst 0x448143cb // smlalb z11.s, p4/M, z30.h, z1.h\n"
".inst 0x448147d0 // smlalt z16.s, p4/M, z30.h, z1.h\n"
- ".inst 0x4483436e // smlalb z14.s, p4/M, z27.h, z3.h\n"
- ".inst 0x44814745 // smlalt z5.s, p4/M, z26.h, z1.h\n"
- ".inst 0x44824374 // smlalb z20.s, p4/M, z27.h, z2.h\n"
- "ld1sb { z1.h }, p4/Z, [x2]\n"
- ".inst 0x45511021 // ssublb z1.h, z1.b, z17.b\n"
- ".inst 0x44824348 // smlalb z8.s, p4/M, z26.h, z2.h\n"
- ".inst 0x44824326 // smlalb z6.s, p4/M, z25.h, z2.h\n"
- ".inst 0x4483476a // smlalt z10.s, p4/M, z27.h, z3.h\n"
- ".inst 0x44824767 // smlalt z7.s, p4/M, z27.h, z2.h\n"
- "ld1sb { z27.h }, p3/Z, [x11, x0]\n"
- ".inst 0x454f137b // ssublb z27.h, z27.b, z15.b\n"
+ "ld1sb { z30.h }, p3/Z, [x27, x2]\n"
+ ".inst 0x455113de // ssublb z30.h, z30.b, z17.b\n"
+ ".inst 0x44814353 // smlalb z19.s, p4/M, z26.h, z1.h\n"
+ ".inst 0x44814749 // smlalt z9.s, p4/M, z26.h, z1.h\n"
+ ".inst 0x448142e7 // smlalb z7.s, p4/M, z23.h, z1.h\n"
+ ".inst 0x448146e6 // smlalt z6.s, p4/M, z23.h, z1.h\n"
+ ".inst 0x448143ec // smlalb z12.s, p4/M, z31.h, z1.h\n"
+ ".inst 0x448147e8 // smlalt z8.s, p4/M, z31.h, z1.h\n"
+ "ld1sb { z1.h }, p4/Z, [x1, #5, MUL VL]\n"
+ ".inst 0x454d1021 // ssublb z1.h, z1.b, z13.b\n"
+ ".inst 0x4482434b // smlalb z11.s, p4/M, z26.h, z2.h\n"
".inst 0x44824750 // smlalt z16.s, p4/M, z26.h, z2.h\n"
- ".inst 0x448442ee // smlalb z14.s, p4/M, z23.h, z4.h\n"
- ".inst 0x44824725 // smlalt z5.s, p4/M, z25.h, z2.h\n"
- ".inst 0x448342f4 // smlalb z20.s, p4/M, z23.h, z3.h\n"
- "ld1sb { z2.h }, p4/Z, [x2, #1, MUL VL]\n"
- ".inst 0x45511042 // ssublb z2.h, z2.b, z17.b\n"
- ".inst 0x44834328 // smlalb z8.s, p4/M, z25.h, z3.h\n"
- ".inst 0x44834306 // smlalb z6.s, p4/M, z24.h, z3.h\n"
- ".inst 0x448446ea // smlalt z10.s, p4/M, z23.h, z4.h\n"
- ".inst 0x448346e7 // smlalt z7.s, p4/M, z23.h, z3.h\n"
- "ld1sb { z23.h }, p3/Z, [x10, x0]\n"
- ".inst 0x454f12f7 // ssublb z23.h, z23.b, z15.b\n"
+ "ld1sb { z26.h }, p3/Z, [x25, x2]\n"
+ ".inst 0x4551135a // ssublb z26.h, z26.b, z17.b\n"
+ ".inst 0x44824333 // smlalb z19.s, p4/M, z25.h, z2.h\n"
+ ".inst 0x44824729 // smlalt z9.s, p4/M, z25.h, z2.h\n"
+ ".inst 0x448243e7 // smlalb z7.s, p4/M, z31.h, z2.h\n"
+ ".inst 0x448247e6 // smlalt z6.s, p4/M, z31.h, z2.h\n"
+ ".inst 0x448243cc // smlalb z12.s, p4/M, z30.h, z2.h\n"
+ ".inst 0x448247c8 // smlalt z8.s, p4/M, z30.h, z2.h\n"
+ "ld1sb { z2.h }, p4/Z, [x1, #6, MUL VL]\n"
+ ".inst 0x454d1042 // ssublb z2.h, z2.b, z13.b\n"
+ ".inst 0x4483432b // smlalb z11.s, p4/M, z25.h, z3.h\n"
".inst 0x44834730 // smlalt z16.s, p4/M, z25.h, z3.h\n"
- ".inst 0x448043ee // smlalb z14.s, p4/M, z31.h, z0.h\n"
- ".inst 0x44834705 // smlalt z5.s, p4/M, z24.h, z3.h\n"
- ".inst 0x44844394 // smlalb z20.s, p4/M, z28.h, z4.h\n"
- "ld1sb { z3.h }, p4/Z, [x2, #2, MUL VL]\n"
- ".inst 0x45511063 // ssublb z3.h, z3.b, z17.b\n"
- ".inst 0x44844308 // smlalb z8.s, p4/M, z24.h, z4.h\n"
- ".inst 0x448442c6 // smlalb z6.s, p4/M, z22.h, z4.h\n"
- ".inst 0x448047ea // smlalt z10.s, p4/M, z31.h, z0.h\n"
- ".inst 0x44844787 // smlalt z7.s, p4/M, z28.h, z4.h\n"
- "ld1sb { z31.h }, p3/Z, [x9, x0]\n"
- ".inst 0x454f13ff // ssublb z31.h, z31.b, z15.b\n"
+ "ld1sb { z25.h }, p3/Z, [x24, x2]\n"
+ ".inst 0x45511339 // ssublb z25.h, z25.b, z17.b\n"
+ ".inst 0x44834313 // smlalb z19.s, p4/M, z24.h, z3.h\n"
+ ".inst 0x44834709 // smlalt z9.s, p4/M, z24.h, z3.h\n"
+ ".inst 0x448343c7 // smlalb z7.s, p4/M, z30.h, z3.h\n"
+ ".inst 0x448347c6 // smlalt z6.s, p4/M, z30.h, z3.h\n"
+ ".inst 0x4483438c // smlalb z12.s, p4/M, z28.h, z3.h\n"
+ ".inst 0x44834788 // smlalt z8.s, p4/M, z28.h, z3.h\n"
+ "ld1sb { z3.h }, p4/Z, [x1, #7, MUL VL]\n"
+ "inch x1, ALL, MUL #8\n"
+ ".inst 0x4484430b // smlalb z11.s, p4/M, z24.h, z4.h\n"
+ ".inst 0x454d1063 // ssublb z3.h, z3.b, z13.b\n"
".inst 0x44844710 // smlalt z16.s, p4/M, z24.h, z4.h\n"
- ".inst 0x448143ce // smlalb z14.s, p4/M, z30.h, z1.h\n"
- "ld1sb { z28.h }, p3/Z, [x27, x0]\n"
- ".inst 0x454f139c // ssublb z28.h, z28.b, z15.b\n"
- ".inst 0x448446c5 // smlalt z5.s, p4/M, z22.h, z4.h\n"
- ".inst 0x448043d4 // smlalb z20.s, p4/M, z30.h, z0.h\n"
- "ld1sb { z4.h }, p4/Z, [x2, #3, MUL VL]\n"
- ".inst 0x45511084 // ssublb z4.h, z4.b, z17.b\n"
- ".inst 0x44804368 // smlalb z8.s, p4/M, z27.h, z0.h\n"
- ".inst 0x448042e6 // smlalb z6.s, p4/M, z23.h, z0.h\n"
- ".inst 0x448147ca // smlalt z10.s, p4/M, z30.h, z1.h\n"
- ".inst 0x448047c7 // smlalt z7.s, p4/M, z30.h, z0.h\n"
- "ld1sb { z30.h }, p3/Z, [x28, x0]\n"
- ".inst 0x454f13de // ssublb z30.h, z30.b, z15.b\n"
+ "ld1sb { z24.h }, p3/Z, [x23, x2]\n"
+ ".inst 0x448442d3 // smlalb z19.s, p4/M, z22.h, z4.h\n"
+ ".inst 0x45511318 // ssublb z24.h, z24.b, z17.b\n"
+ ".inst 0x448446c9 // smlalt z9.s, p4/M, z22.h, z4.h\n"
+ ".inst 0x44844387 // smlalb z7.s, p4/M, z28.h, z4.h\n"
+ ".inst 0x44844786 // smlalt z6.s, p4/M, z28.h, z4.h\n"
+ ".inst 0x4484434c // smlalb z12.s, p4/M, z26.h, z4.h\n"
+ ".inst 0x44844748 // smlalt z8.s, p4/M, z26.h, z4.h\n"
+ "ld1sb { z4.h }, p4/Z, [x1]\n"
+ "inch x1\n"
+ ".inst 0x4480436b // smlalb z11.s, p4/M, z27.h, z0.h\n"
+ ".inst 0x454d1084 // ssublb z4.h, z4.b, z13.b\n"
".inst 0x44804770 // smlalt z16.s, p4/M, z27.h, z0.h\n"
- ".inst 0x4482434e // smlalb z14.s, p4/M, z26.h, z2.h\n"
- ".inst 0x448046e5 // smlalt z5.s, p4/M, z23.h, z0.h\n"
- ".inst 0x44814354 // smlalb z20.s, p4/M, z26.h, z1.h\n"
- "ld1sb { z0.h }, p4/Z, [x2, #4, MUL VL]\n"
- ".inst 0x45511000 // ssublb z0.h, z0.b, z17.b\n"
- ".inst 0x448142e8 // smlalb z8.s, p4/M, z23.h, z1.h\n"
- ".inst 0x448143e6 // smlalb z6.s, p4/M, z31.h, z1.h\n"
- ".inst 0x4482474a // smlalt z10.s, p4/M, z26.h, z2.h\n"
- ".inst 0x44814747 // smlalt z7.s, p4/M, z26.h, z1.h\n"
- "ld1sb { z26.h }, p3/Z, [x26, x0]\n"
- ".inst 0x454f135a // ssublb z26.h, z26.b, z15.b\n"
+ "ld1sb { z27.h }, p3/Z, [x22, x2]\n"
+ ".inst 0x448042f3 // smlalb z19.s, p4/M, z23.h, z0.h\n"
+ ".inst 0x4551137b // ssublb z27.h, z27.b, z17.b\n"
+ ".inst 0x448046e9 // smlalt z9.s, p4/M, z23.h, z0.h\n"
+ ".inst 0x44804327 // smlalb z7.s, p4/M, z25.h, z0.h\n"
+ ".inst 0x44804726 // smlalt z6.s, p4/M, z25.h, z0.h\n"
+ "ld1sb { z25.h }, p3/Z, [x21, x2]\n"
+ ".inst 0x45511339 // ssublb z25.h, z25.b, z17.b\n"
+ ".inst 0x4480430c // smlalb z12.s, p4/M, z24.h, z0.h\n"
+ ".inst 0x44804708 // smlalt z8.s, p4/M, z24.h, z0.h\n"
+ ".inst 0x448142eb // smlalb z11.s, p4/M, z23.h, z1.h\n"
".inst 0x448146f0 // smlalt z16.s, p4/M, z23.h, z1.h\n"
- ".inst 0x4483432e // smlalb z14.s, p4/M, z25.h, z3.h\n"
- ".inst 0x448147e5 // smlalt z5.s, p4/M, z31.h, z1.h\n"
- ".inst 0x44824334 // smlalb z20.s, p4/M, z25.h, z2.h\n"
- "ld1sb { z1.h }, p4/Z, [x2, #5, MUL VL]\n"
- ".inst 0x45511021 // ssublb z1.h, z1.b, z17.b\n"
- ".inst 0x448243e8 // smlalb z8.s, p4/M, z31.h, z2.h\n"
- ".inst 0x448243c6 // smlalb z6.s, p4/M, z30.h, z2.h\n"
- ".inst 0x4483472a // smlalt z10.s, p4/M, z25.h, z3.h\n"
- ".inst 0x44824727 // smlalt z7.s, p4/M, z25.h, z2.h\n"
- "ld1sb { z25.h }, p3/Z, [x25, x0]\n"
- ".inst 0x454f1339 // ssublb z25.h, z25.b, z15.b\n"
+ ".inst 0x448143f3 // smlalb z19.s, p4/M, z31.h, z1.h\n"
+ ".inst 0x448147e9 // smlalt z9.s, p4/M, z31.h, z1.h\n"
+ ".inst 0x44814307 // smlalb z7.s, p4/M, z24.h, z1.h\n"
+ ".inst 0x44814706 // smlalt z6.s, p4/M, z24.h, z1.h\n"
+ "ld1sb { z24.h }, p3/Z, [x20, x2]\n"
+ ".inst 0x45511318 // ssublb z24.h, z24.b, z17.b\n"
+ ".inst 0x4481436c // smlalb z12.s, p4/M, z27.h, z1.h\n"
+ ".inst 0x44814768 // smlalt z8.s, p4/M, z27.h, z1.h\n"
+ ".inst 0x448243eb // smlalb z11.s, p4/M, z31.h, z2.h\n"
".inst 0x448247f0 // smlalt z16.s, p4/M, z31.h, z2.h\n"
- ".inst 0x4484430e // smlalb z14.s, p4/M, z24.h, z4.h\n"
- ".inst 0x448247c5 // smlalt z5.s, p4/M, z30.h, z2.h\n"
- ".inst 0x44834314 // smlalb z20.s, p4/M, z24.h, z3.h\n"
- "ld1sb { z2.h }, p4/Z, [x2, #6, MUL VL]\n"
- ".inst 0x45511042 // ssublb z2.h, z2.b, z17.b\n"
- ".inst 0x448343c8 // smlalb z8.s, p4/M, z30.h, z3.h\n"
- ".inst 0x44834386 // smlalb z6.s, p4/M, z28.h, z3.h\n"
- ".inst 0x4484470a // smlalt z10.s, p4/M, z24.h, z4.h\n"
- ".inst 0x44834707 // smlalt z7.s, p4/M, z24.h, z3.h\n"
- "ld1sb { z24.h }, p3/Z, [x24, x0]\n"
- ".inst 0x454f1318 // ssublb z24.h, z24.b, z15.b\n"
+ ".inst 0x448243d3 // smlalb z19.s, p4/M, z30.h, z2.h\n"
+ ".inst 0x448247c9 // smlalt z9.s, p4/M, z30.h, z2.h\n"
+ ".inst 0x44824367 // smlalb z7.s, p4/M, z27.h, z2.h\n"
+ ".inst 0x44824766 // smlalt z6.s, p4/M, z27.h, z2.h\n"
+ "ld1sb { z27.h }, p3/Z, [x19, x2]\n"
+ "inch x2\n"
+ ".inst 0x4482432c // smlalb z12.s, p4/M, z25.h, z2.h\n"
+ "whilelt p2.s, x2, x0\n"
+ ".inst 0x44824728 // smlalt z8.s, p4/M, z25.h, z2.h\n"
+ "mov x19, x2\n"
+ ".inst 0x448343cb // smlalb z11.s, p4/M, z30.h, z3.h\n"
+ ".inst 0x4551137b // ssublb z27.h, z27.b, z17.b\n"
".inst 0x448347d0 // smlalt z16.s, p4/M, z30.h, z3.h\n"
- ".inst 0x4480436e // smlalb z14.s, p4/M, z27.h, z0.h\n"
- ".inst 0x44834785 // smlalt z5.s, p4/M, z28.h, z3.h\n"
- ".inst 0x448442d4 // smlalb z20.s, p4/M, z22.h, z4.h\n"
- "ld1sb { z3.h }, p4/Z, [x2, #7, MUL VL]\n"
- "inch x2, ALL, MUL #8\n"
- ".inst 0x44844388 // smlalb z8.s, p4/M, z28.h, z4.h\n"
- ".inst 0x44844346 // smlalb z6.s, p4/M, z26.h, z4.h\n"
- ".inst 0x45511063 // ssublb z3.h, z3.b, z17.b\n"
- ".inst 0x4480476a // smlalt z10.s, p4/M, z27.h, z0.h\n"
+ "incw x19\n"
+ ".inst 0x44834393 // smlalb z19.s, p4/M, z28.h, z3.h\n"
+ "whilelt p1.s, x19, x0\n"
+ ".inst 0x44834789 // smlalt z9.s, p4/M, z28.h, z3.h\n"
+ "whilelt p3.h, x2, x0\n"
+ ".inst 0x44834327 // smlalb z7.s, p4/M, z25.h, z3.h\n"
+ ".inst 0x44834726 // smlalt z6.s, p4/M, z25.h, z3.h\n"
+ ".inst 0x4483430c // smlalb z12.s, p4/M, z24.h, z3.h\n"
+ ".inst 0x44834708 // smlalt z8.s, p4/M, z24.h, z3.h\n"
+ ".inst 0x4484438b // smlalb z11.s, p4/M, z28.h, z4.h\n"
".inst 0x44844790 // smlalt z16.s, p4/M, z28.h, z4.h\n"
- "ld1sb { z27.h }, p3/Z, [x23, x0]\n"
- ".inst 0x454f137b // ssublb z27.h, z27.b, z15.b\n"
- ".inst 0x448142ee // smlalb z14.s, p4/M, z23.h, z1.h\n"
- ".inst 0x448446c7 // smlalt z7.s, p4/M, z22.h, z4.h\n"
- "ld1w { z18.s }, p1/Z, [x16, #1, MUL VL]\n"
- "addvl x16, x16, #2\n"
- ".inst 0x44844745 // smlalt z5.s, p4/M, z26.h, z4.h\n"
- ".inst 0x448042f4 // smlalb z20.s, p4/M, z23.h, z0.h\n"
- "ld1sb { z4.h }, p4/Z, [x2]\n"
- ".inst 0x45511084 // ssublb z4.h, z4.b, z17.b\n"
- ".inst 0x44804328 // smlalb z8.s, p4/M, z25.h, z0.h\n"
- ".inst 0x44804306 // smlalb z6.s, p4/M, z24.h, z0.h\n"
- "inch x2\n"
- ".inst 0x448146ea // smlalt z10.s, p4/M, z23.h, z1.h\n"
- ".inst 0x44804730 // smlalt z16.s, p4/M, z25.h, z0.h\n"
- "ld1sb { z25.h }, p3/Z, [x22, x0]\n"
- ".inst 0x454f1339 // ssublb z25.h, z25.b, z15.b\n"
- ".inst 0x448243ee // smlalb z14.s, p4/M, z31.h, z2.h\n"
- ".inst 0x448046e7 // smlalt z7.s, p4/M, z23.h, z0.h\n"
- "uzp1 z23.s, z19.s, z18.s\n"
- ".inst 0x44804705 // smlalt z5.s, p4/M, z24.h, z0.h\n"
- ".inst 0x448143f4 // smlalb z20.s, p4/M, z31.h, z1.h\n"
- "uzp2 z22.s, z19.s, z18.s\n"
- ".inst 0x44814308 // smlalb z8.s, p4/M, z24.h, z1.h\n"
- ".inst 0x44814366 // smlalb z6.s, p4/M, z27.h, z1.h\n"
- ".inst 0x448247ea // smlalt z10.s, p4/M, z31.h, z2.h\n"
- ".inst 0x44814710 // smlalt z16.s, p4/M, z24.h, z1.h\n"
- "ld1sb { z24.h }, p3/Z, [x21, x0]\n"
- ".inst 0x454f1318 // ssublb z24.h, z24.b, z15.b\n"
- ".inst 0x448343ce // smlalb z14.s, p4/M, z30.h, z3.h\n"
- ".inst 0x448147e7 // smlalt z7.s, p4/M, z31.h, z1.h\n"
- ".inst 0x44814765 // smlalt z5.s, p4/M, z27.h, z1.h\n"
- ".inst 0x448243d4 // smlalb z20.s, p4/M, z30.h, z2.h\n"
- ".inst 0x44824368 // smlalb z8.s, p4/M, z27.h, z2.h\n"
- ".inst 0x44824326 // smlalb z6.s, p4/M, z25.h, z2.h\n"
- ".inst 0x448347ca // smlalt z10.s, p4/M, z30.h, z3.h\n"
- ".inst 0x44824770 // smlalt z16.s, p4/M, z27.h, z2.h\n"
- "ld1sb { z27.h }, p3/Z, [x20, x0]\n"
- ".inst 0x454f137b // ssublb z27.h, z27.b, z15.b\n"
- ".inst 0x4484438e // smlalb z14.s, p4/M, z28.h, z4.h\n"
- ".inst 0x448247c7 // smlalt z7.s, p4/M, z30.h, z2.h\n"
- ".inst 0x04a975ce // sqrdmulh z14.s, z14.s, z9.s\n"
- "inch x0\n"
- ".inst 0x44824725 // smlalt z5.s, p4/M, z25.h, z2.h\n"
- ".inst 0x44834394 // smlalb z20.s, p4/M, z28.h, z3.h\n"
- "and z21.d, z14.d, z23.d\n"
- "mov x20, x0\n"
- ".inst 0x44834328 // smlalb z8.s, p4/M, z25.h, z3.h\n"
- ".inst 0x44834306 // smlalb z6.s, p4/M, z24.h, z3.h\n"
- "asr z21.s, z21.s, #0x1f\n"
- "incw x20\n"
- ".inst 0x4484478a // smlalt z10.s, p4/M, z28.h, z4.h\n"
- ".inst 0x44834787 // smlalt z7.s, p4/M, z28.h, z3.h\n"
- ".inst 0x04bd754a // sqrdmulh z10.s, z10.s, z29.s\n"
- "whilelt p2.s, x0, x1\n"
- ".inst 0x44834730 // smlalt z16.s, p4/M, z25.h, z3.h\n"
- ".inst 0x44834705 // smlalt z5.s, p4/M, z24.h, z3.h\n"
- "and z3.d, z10.d, z22.d\n"
- "whilelt p1.s, x20, x1\n"
- ".inst 0x44844354 // smlalb z20.s, p4/M, z26.h, z4.h\n"
- ".inst 0x44844308 // smlalb z8.s, p4/M, z24.h, z4.h\n"
- ".inst 0x04a97694 // sqrdmulh z20.s, z20.s, z9.s\n"
- "whilelt p3.h, x0, x1\n"
- ".inst 0x44844366 // smlalb z6.s, p4/M, z27.h, z4.h\n"
- ".inst 0x44844747 // smlalt z7.s, p4/M, z26.h, z4.h\n"
- ".inst 0x04a97508 // sqrdmulh z8.s, z8.s, z9.s\n"
- ".inst 0x44844710 // smlalt z16.s, p4/M, z24.h, z4.h\n"
- ".inst 0x44844765 // smlalt z5.s, p4/M, z27.h, z4.h\n"
- ".inst 0x04a974c6 // sqrdmulh z6.s, z6.s, z9.s\n"
- "sqadd z14.s, z14.s, z21.s\n"
- "asr z3.s, z3.s, #0x1f\n"
- ".inst 0x448292ee // srshl z14.s, p4/M, z14.s, z23.s\n"
- "and z19.d, z20.d, z23.d\n"
- ".inst 0x04bd74e7 // sqrdmulh z7.s, z7.s, z29.s\n"
- "and z18.d, z8.d, z23.d\n"
- ".inst 0x04bd7610 // sqrdmulh z16.s, z16.s, z29.s\n"
- "and z21.d, z6.d, z23.d\n"
- ".inst 0x04bd74a5 // sqrdmulh z5.s, z5.s, z29.s\n"
- "sqadd z10.s, z10.s, z3.s\n"
- "asr z19.s, z19.s, #0x1f\n"
- ".inst 0x448292ca // srshl z10.s, p4/M, z10.s, z22.s\n"
- "and z1.d, z7.d, z22.d\n"
+ ".inst 0x44844353 // smlalb z19.s, p4/M, z26.h, z4.h\n"
+ ".inst 0x44844749 // smlalt z9.s, p4/M, z26.h, z4.h\n"
+ ".inst 0x04b5756b // sqrdmulh z11.s, z11.s, z21.s\n"
+ ".inst 0x04aa7610 // sqrdmulh z16.s, z16.s, z10.s\n"
+ ".inst 0x04b57673 // sqrdmulh z19.s, z19.s, z21.s\n"
+ ".inst 0x04aa7529 // sqrdmulh z9.s, z9.s, z10.s\n"
+ "and z31.d, z11.d, z29.d\n"
+ "asr z31.s, z31.s, #0x1f\n"
+ "and z23.d, z16.d, z20.d\n"
+ "and z25.d, z19.d, z29.d\n"
+ "asr z23.s, z23.s, #0x1f\n"
+ "and z18.d, z9.d, z20.d\n"
+ ".inst 0x44844307 // smlalb z7.s, p4/M, z24.h, z4.h\n"
+ "asr z25.s, z25.s, #0x1f\n"
+ ".inst 0x44844706 // smlalt z6.s, p4/M, z24.h, z4.h\n"
"asr z18.s, z18.s, #0x1f\n"
- "and z2.d, z16.d, z22.d\n"
- "asr z21.s, z21.s, #0x1f\n"
- "and z3.d, z5.d, z22.d\n"
- "sqadd z20.s, z20.s, z19.s\n"
- ".inst 0x448292f4 // srshl z20.s, p4/M, z20.s, z23.s\n"
+ "sqadd z11.s, z11.s, z31.s\n"
+ ".inst 0x4484436c // smlalb z12.s, p4/M, z27.h, z4.h\n"
+ ".inst 0x04b574e7 // sqrdmulh z7.s, z7.s, z21.s\n"
+ "sqadd z16.s, z16.s, z23.s\n"
+ "sqadd z19.s, z19.s, z25.s\n"
+ ".inst 0x04aa74c6 // sqrdmulh z6.s, z6.s, z10.s\n"
+ "sqadd z9.s, z9.s, z18.s\n"
+ "and z1.d, z7.d, z29.d\n"
"asr z1.s, z1.s, #0x1f\n"
- "sqadd z8.s, z8.s, z18.s\n"
- ".inst 0x448292e8 // srshl z8.s, p4/M, z8.s, z23.s\n"
- "asr z2.s, z2.s, #0x1f\n"
- "sqadd z6.s, z6.s, z21.s\n"
- ".inst 0x448292e6 // srshl z6.s, p4/M, z6.s, z23.s\n"
- "asr z3.s, z3.s, #0x1f\n"
+ "and z18.d, z6.d, z20.d\n"
+ ".inst 0x04b5758c // sqrdmulh z12.s, z12.s, z21.s\n"
+ "asr z18.s, z18.s, #0x1f\n"
+ ".inst 0x44844768 // smlalt z8.s, p4/M, z27.h, z4.h\n"
+ ".inst 0x448293ab // srshl z11.s, p4/M, z11.s, z29.s\n"
+ "and z30.d, z12.d, z29.d\n"
+ "asr z30.s, z30.s, #0x1f\n"
+ "add z11.s, z11.s, z14.s\n"
"sqadd z7.s, z7.s, z1.s\n"
- ".inst 0x448292c7 // srshl z7.s, p4/M, z7.s, z22.s\n"
- "sqadd z16.s, z16.s, z2.s\n"
- "sqadd z5.s, z5.s, z3.s\n"
- ".inst 0x448292d0 // srshl z16.s, p4/M, z16.s, z22.s\n"
- ".inst 0x448292c5 // srshl z5.s, p4/M, z5.s, z22.s\n"
- ".inst 0x453041ce // sqxtnb z14.h, z14.s\n"
- ".inst 0x45304294 // sqxtnb z20.h, z20.s\n"
- ".inst 0x45304108 // sqxtnb z8.h, z8.s\n"
- ".inst 0x453040c6 // sqxtnb z6.h, z6.s\n"
- ".inst 0x4530454e // sqxtnt z14.h, z10.s\n"
- ".inst 0x453044f4 // sqxtnt z20.h, z7.s\n"
- ".inst 0x45304608 // sqxtnt z8.h, z16.s\n"
- ".inst 0x453044a6 // sqxtnt z6.h, z5.s\n"
- "sqadd z14.h, z14.h, z12.h\n"
- "sqadd z20.h, z20.h, z12.h\n"
- "smax z14.h, p4/M, z14.h, z13.h\n"
- "smax z20.h, p4/M, z20.h, z13.h\n"
- "sqadd z8.h, z8.h, z12.h\n"
- "sqadd z6.h, z6.h, z12.h\n"
- "smax z8.h, p4/M, z8.h, z13.h\n"
- "smax z6.h, p4/M, z6.h, z13.h\n"
- "smin z14.h, p4/M, z14.h, z11.h\n"
- "smin z20.h, p4/M, z20.h, z11.h\n"
- "st1b { z14.h }, p0, [x3, x8]\n"
- "smin z8.h, p4/M, z8.h, z11.h\n"
- "smin z6.h, p4/M, z6.h, z11.h\n"
- "st1b { z20.h }, p0, [x4, x8]\n"
- "st1b { z8.h }, p0, [x5, x8]\n"
- "st1b { z6.h }, p0, [x6, x8]\n"
- "ld1w { z30.s }, p2/Z, [x14]\n"
- "ld1w { z16.s }, p1/Z, [x14, #1, MUL VL]\n"
- "uzp1 z14.s, z30.s, z16.s\n"
- "ld1sb { z0.h }, p4/Z, [x2]\n"
- "ld1sb { z1.h }, p4/Z, [x2, #1, MUL VL]\n"
- "uzp2 z10.s, z30.s, z16.s\n"
- "addvl x14, x14, #2\n"
- "ld1sb { z2.h }, p4/Z, [x2, #2, MUL VL]\n"
- "ld1sb { z3.h }, p4/Z, [x2, #3, MUL VL]\n"
- "inch x8\n"
- "str x14, [%x[params], %[offsetof_Params_bias]]\n"
- "ld1sb { z4.h }, p4/Z, [x2, #4, MUL VL]\n"
- "ldp x9, x28, [x7, #0x0]\n"
- "mov z20.d, z14.d\n"
- "mov z7.d, z10.d\n"
- "ldp x27, x26, [x7, #0x10]\n"
- "ldp x25, x24, [x7, #0x20]\n"
- "mov z8.d, z14.d\n"
- "mov z16.d, z10.d\n"
- "ldp x23, x22, [x7, #0x30]\n"
- "ldp x21, x20, [x7, #0x40]\n"
- "mov z6.d, z14.d\n"
- "mov z5.d, z10.d\n"
- "ld1sb { z31.h }, p3/Z, [x9, x0]\n"
- "ld1sb { z30.h }, p3/Z, [x28, x0]\n"
- ".inst 0x45511000 // ssublb z0.h, z0.b, z17.b\n"
- ".inst 0x45511021 // ssublb z1.h, z1.b, z17.b\n"
- "ld1sb { z29.h }, p3/Z, [x27, x0]\n"
- "ld1sb { z28.h }, p3/Z, [x26, x0]\n"
- ".inst 0x45511042 // ssublb z2.h, z2.b, z17.b\n"
- ".inst 0x45511063 // ssublb z3.h, z3.b, z17.b\n"
- "ld1sb { z27.h }, p3/Z, [x25, x0]\n"
- "ld1sb { z23.h }, p3/Z, [x24, x0]\n"
- ".inst 0x45511084 // ssublb z4.h, z4.b, z17.b\n"
- ".inst 0x454f13ff // ssublb z31.h, z31.b, z15.b\n"
- "ld1sb { z25.h }, p3/Z, [x23, x0]\n"
- "ld1sb { z24.h }, p3/Z, [x22, x0]\n"
- ".inst 0x454f13de // ssublb z30.h, z30.b, z15.b\n"
- ".inst 0x454f13bd // ssublb z29.h, z29.b, z15.b\n"
- "ld1sb { z26.h }, p3/Z, [x21, x0]\n"
- "ld1sb { z22.h }, p3/Z, [x20, x0]\n"
- ".inst 0x454f139c // ssublb z28.h, z28.b, z15.b\n"
- ".inst 0x454f137b // ssublb z27.h, z27.b, z15.b\n"
- ".inst 0x454f12f7 // ssublb z23.h, z23.b, z15.b\n"
- ".inst 0x454f1339 // ssublb z25.h, z25.b, z15.b\n"
- ".inst 0x454f1318 // ssublb z24.h, z24.b, z15.b\n"
- ".inst 0x454f135a // ssublb z26.h, z26.b, z15.b\n"
- ".inst 0x454f12d6 // ssublb z22.h, z22.b, z15.b\n"
+ "sqadd z6.s, z6.s, z18.s\n"
+ ".inst 0x04aa7508 // sqrdmulh z8.s, z8.s, z10.s\n"
+ "smin z11.s, p4/M, z11.s, z15.s\n"
+ ".inst 0x44829290 // srshl z16.s, p4/M, z16.s, z20.s\n"
+ "sqadd z12.s, z12.s, z30.s\n"
+ "and z3.d, z8.d, z20.d\n"
+ "asr z3.s, z3.s, #0x1f\n"
+ "add z16.s, z16.s, z14.s\n"
+ "smax z11.s, p4/M, z11.s, z5.s\n"
+ ".inst 0x448293b3 // srshl z19.s, p4/M, z19.s, z29.s\n"
+ ".inst 0x44829289 // srshl z9.s, p4/M, z9.s, z20.s\n"
+ "smin z16.s, p4/M, z16.s, z15.s\n"
+ ".inst 0x448293a7 // srshl z7.s, p4/M, z7.s, z29.s\n"
+ "add z19.s, z19.s, z14.s\n"
+ "add z9.s, z9.s, z14.s\n"
+ "sqadd z8.s, z8.s, z3.s\n"
+ "add z7.s, z7.s, z14.s\n"
+ "smax z16.s, p4/M, z16.s, z5.s\n"
+ "smin z19.s, p4/M, z19.s, z15.s\n"
+ "smin z9.s, p4/M, z9.s, z15.s\n"
+ "smin z7.s, p4/M, z7.s, z15.s\n"
+ "trn1 z11.h, z11.h, z16.h\n"
+ "st1b { z11.h }, p0, [x7, x3]\n"
+ "smax z19.s, p4/M, z19.s, z5.s\n"
+ "smax z9.s, p4/M, z9.s, z5.s\n"
+ "smax z7.s, p4/M, z7.s, z5.s\n"
+ ".inst 0x44829286 // srshl z6.s, p4/M, z6.s, z20.s\n"
+ ".inst 0x448293ac // srshl z12.s, p4/M, z12.s, z29.s\n"
+ "trn1 z19.h, z19.h, z9.h\n"
+ "st1b { z19.h }, p0, [x8, x3]\n"
+ "add z6.s, z6.s, z14.s\n"
+ ".inst 0x44829288 // srshl z8.s, p4/M, z8.s, z20.s\n"
+ "add z12.s, z12.s, z14.s\n"
+ "smin z6.s, p4/M, z6.s, z15.s\n"
+ "add z8.s, z8.s, z14.s\n"
+ "smin z12.s, p4/M, z12.s, z15.s\n"
+ "smax z6.s, p4/M, z6.s, z5.s\n"
+ "smin z8.s, p4/M, z8.s, z15.s\n"
+ "smax z12.s, p4/M, z12.s, z5.s\n"
+ "trn1 z7.h, z7.h, z6.h\n"
+ "st1b { z7.h }, p0, [x17, x3]\n"
+ "smax z8.s, p4/M, z8.s, z5.s\n"
+ "trn1 z12.h, z12.h, z8.h\n"
+ "st1b { z12.h }, p0, [x16, x3]\n"
+ "inch x3\n"
+ "ldr x19, [%x[params], %[offsetof_Params_bias]]\n"
+ "ld1w { z19.s }, p2/Z, [x19]\n"
+ "ld1w { z6.s }, p1/Z, [x19, #1, MUL VL]\n"
+ "uzp1 z11.s, z19.s, z6.s\n"
+ "addvl x19, x19, #2\n"
+ "str x19, [%x[params], %[offsetof_Params_bias]]\n"
+ "uzp2 z16.s, z19.s, z6.s\n"
+ "mov z19.d, z11.d\n"
+ "ld1sb { z0.h }, p4/Z, [x1]\n"
+ ".inst 0x454d1000 // ssublb z0.h, z0.b, z13.b\n"
+ "mov z9.d, z16.d\n"
+ "ld1sb { z1.h }, p4/Z, [x1, #1, MUL VL]\n"
+ "mov z7.d, z11.d\n"
+ "ld1sb { z2.h }, p4/Z, [x1, #2, MUL VL]\n"
+ ".inst 0x454d1021 // ssublb z1.h, z1.b, z13.b\n"
+ "mov z6.d, z16.d\n"
+ "ld1sb { z3.h }, p4/Z, [x1, #3, MUL VL]\n"
+ "mov z12.d, z11.d\n"
+ "ld1sb { z4.h }, p4/Z, [x1, #4, MUL VL]\n"
+ ".inst 0x454d1042 // ssublb z2.h, z2.b, z13.b\n"
+ "mov z8.d, z16.d\n"
+ "ldp x28, x27, [x5, #0x0]\n"
+ ".inst 0x454d1063 // ssublb z3.h, z3.b, z13.b\n"
+ "ldp x26, x25, [x5, #0x10]\n"
+ ".inst 0x454d1084 // ssublb z4.h, z4.b, z13.b\n"
+ "ldp x24, x23, [x5, #0x20]\n"
+ "ldp x22, x21, [x5, #0x30]\n"
+ "ldp x20, x19, [x5, #0x40]\n"
+ "ld1sb { z31.h }, p3/Z, [x28, x2]\n"
+ ".inst 0x455113ff // ssublb z31.h, z31.b, z17.b\n"
+ "ld1sb { z30.h }, p3/Z, [x27, x2]\n"
+ "ld1sb { z29.h }, p3/Z, [x26, x2]\n"
+ ".inst 0x455113de // ssublb z30.h, z30.b, z17.b\n"
+ "ld1sb { z28.h }, p3/Z, [x25, x2]\n"
+ "ld1sb { z27.h }, p3/Z, [x24, x2]\n"
+ ".inst 0x455113bd // ssublb z29.h, z29.b, z17.b\n"
+ "ld1sb { z23.h }, p3/Z, [x23, x2]\n"
+ ".inst 0x4551139c // ssublb z28.h, z28.b, z17.b\n"
+ "ld1sb { z25.h }, p3/Z, [x22, x2]\n"
+ "ld1sb { z24.h }, p3/Z, [x21, x2]\n"
+ ".inst 0x4551137b // ssublb z27.h, z27.b, z17.b\n"
+ "ld1sb { z26.h }, p3/Z, [x20, x2]\n"
+ ".inst 0x455112f7 // ssublb z23.h, z23.b, z17.b\n"
+ "ld1sb { z22.h }, p3/Z, [x19, x2]\n"
+ ".inst 0x45511339 // ssublb z25.h, z25.b, z17.b\n"
+ ".inst 0x45511318 // ssublb z24.h, z24.b, z17.b\n"
+ ".inst 0x4551135a // ssublb z26.h, z26.b, z17.b\n"
+ ".inst 0x455112d6 // ssublb z22.h, z22.b, z17.b\n"
"b.any 1b\n"
:
: [offsetof_Params_bias] "I" (offsetof(Params, bias)), [offsetof_Params_inptrs] "I" (offsetof(Params, inptrs)), [offsetof_Params_n_channels] "I" (offsetof(Params, n_channels)), [offsetof_Params_outptrs] "I" (offsetof(Params, outptrs)), [offsetof_Params_requant] "I" (offsetof(Params, requant)), [offsetof_Params_requant_muls] "I" (offsetof(Params, requant_muls)), [offsetof_Params_requant_shifts] "I" (offsetof(Params, requant_shifts)), [offsetof_Params_weights] "I" (offsetof(Params, weights)), [offsetof_Requantize32_a_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [params] "r" (&params)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst/generic.cpp
index 6fba4d47d2..ea7acf5b6e 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -41,295 +41,309 @@ void sve_s8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst_impl
)
{
__asm__ __volatile__(
- "mov x20, #0x9\n"
- "whilelt p0.b, XZR, x20\n"
- "ldr x23, [%x[inptrs], #0x8]\n"
- "ldr x20, [%x[inptrs], #0x10]\n"
- "ldr x22, [%x[inptrs], #0x20]\n"
- "ldr x21, [%x[inptrs], #0x0]\n"
- "mov z15.b, #0x1\n"
- "lsr z15.s, z15.s, #0x8\n"
- "ld1b { z1.b }, p0/Z, [x23]\n"
- "ld1b { z2.b }, p0/Z, [x20]\n"
- "mov z30.d, z1.d\n"
- "mov z29.d, z1.d\n"
- "ldr x20, [%x[inptrs], #0x18]\n"
- "ld1b { z4.b }, p0/Z, [x22]\n"
- "mov z28.d, z1.d\n"
- "mov z27.d, z2.d\n"
- "ld1b { z0.b }, p0/Z, [x21]\n"
- "mov z26.d, z2.d\n"
- "mov z25.d, z2.d\n"
- "ld1b { z3.b }, p0/Z, [x20]\n"
- "mov z24.d, z4.d\n"
- "mov z23.d, z4.d\n"
+ "mov z31.s, #0x0\n"
+ "ldr x24, [%x[inptrs], #0x0]\n"
"ptrue p2.b\n"
- "ld1rw { z14.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_b_offset]]\n"
- "mov z22.d, z4.d\n"
- "ext z30.b, z30.b, z30.b, #0x2\n"
- "lsl x10, %x[n_channels], #0x2\n"
- "neg z14.s, p2/M, z14.s\n"
- "ext z29.b, z29.b, z29.b, #0x4\n"
- "ext z28.b, z28.b, z28.b, #0x6\n"
- "mov x9, #0x0\n"
- "whilelt p1.b, x9, x10\n"
- "ext z27.b, z27.b, z27.b, #0x2\n"
- "ext z26.b, z26.b, z26.b, #0x4\n"
- "ld1w { z13.s }, p1/Z, [%x[params]]\n"
+ "mov z18.s, #0x0\n"
+ "ldr x23, [%x[inptrs], #0x8]\n"
+ "lsl x9, %x[n_channels], #0x2\n"
+ "mov z29.s, #0x0\n"
+ "ldr x22, [%x[inptrs], #0x10]\n"
+ "addvl SP, SP, #-8\n"
+ "mov z28.s, #0x0\n"
+ "ldr x21, [%x[inptrs], #0x18]\n"
+ "mov x19, #0x9\n"
+ "mov z13.s, #0x0\n"
+ "ldr x20, [%x[inptrs], #0x20]\n"
+ "whilelt p1.b, XZR, x19\n"
+ "mov z14.s, #0x0\n"
+ "ld1b { z7.b }, p1/Z, [x24]\n"
+ "mov x19, #0x3\n"
+ "mov z15.s, #0x0\n"
+ "ld1b { z3.b }, p1/Z, [x23]\n"
+ "whilelt p0.b, XZR, x19\n"
+ "mov z11.b, p0/z, #0x1\n"
+ "ld1b { z4.b }, p1/Z, [x22]\n"
"mov x28, #0x0\n"
- "ext z25.b, z25.b, z25.b, #0x6\n"
- "ext z24.b, z24.b, z24.b, #0x2\n"
- "ldp x27, x26, [%x[outptrs], #0x0]\n"
- "ldp x25, x24, [%x[outptrs], #0x10]\n"
- "ext z23.b, z23.b, z23.b, #0x4\n"
- "ext z22.b, z22.b, z22.b, #0x6\n"
- "ldp x23, x22, [%x[outptrs], #0x20]\n"
- "ldp x21, x20, [%x[outptrs], #0x30]\n"
- "mov z21.d, z0.d\n"
- "mov z20.d, z0.d\n"
+ "mov z10.d, z7.d\n"
+ "ld1b { z6.b }, p1/Z, [x21]\n"
+ "mov x27, #0x0\n"
+ "ext z10.b, z10.b, z10.b, #0x2\n"
+ "ld1b { z5.b }, p1/Z, [x20]\n"
+ "whilelt p1.b, x28, x9\n"
+ "mov z17.d, z7.d\n"
+ "ld1rw { z30.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_b_offset]]\n"
+ "mov z26.d, z7.d\n"
+ "ldp x26, x25, [%x[outptrs], #0x0]\n"
+ "ext z17.b, z17.b, z17.b, #0x4\n"
+ "ldp x24, x23, [%x[outptrs], #0x10]\n"
+ "ext z26.b, z26.b, z26.b, #0x6\n"
+ "ldp x22, x21, [%x[outptrs], #0x20]\n"
+ "mov z19.d, z3.d\n"
+ "ldp x20, x19, [%x[outptrs], #0x30]\n"
+ "ext z19.b, z19.b, z19.b, #0x2\n"
"ld1rw { z12.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_c_offset]]\n"
- "ld1rw { z11.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_minval]]\n"
- "mov z19.d, z0.d\n"
- "mov z18.d, z3.d\n"
- "ld1rw { z10.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_maxval]]\n"
- "ld1b { z5.b }, p1/Z, [%x[params], #1, MUL VL]\n"
+ "zip1 z7.s, z7.s, z17.s\n"
+ "ld1rw { z16.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_minval]]\n"
+ "zip1 z10.s, z10.s, z26.s\n"
+ "ld1rw { z0.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_maxval]]\n"
+ "zip1 z7.s, z7.s, z10.s\n"
+ "ld1w { z1.s }, p1/Z, [%x[params]]\n"
+ "mov z7.q, z7.q[0]\n"
+ "ld1b { z8.b }, p1/Z, [%x[params], #1, MUL VL]\n"
"mov z17.d, z3.d\n"
- "mov z16.d, z3.d\n"
- "ld1b { z6.b }, p1/Z, [%x[params], #2, MUL VL]\n"
- "ld1b { z7.b }, p1/Z, [%x[params], #3, MUL VL]\n"
- "ext z21.b, z21.b, z21.b, #0x2\n"
- "ext z20.b, z20.b, z20.b, #0x4\n"
- "addvl %x[params], %x[params], #4\n"
- "ext z19.b, z19.b, z19.b, #0x6\n"
- "zip1 z1.s, z1.s, z29.s\n"
- "zip1 z30.s, z30.s, z28.s\n"
- "zip1 z2.s, z2.s, z26.s\n"
- "zip1 z27.s, z27.s, z25.s\n"
- "ext z18.b, z18.b, z18.b, #0x2\n"
+ "ld1b { z9.b }, p1/Z, [%x[params], #2, MUL VL]\n"
"ext z17.b, z17.b, z17.b, #0x4\n"
- "ext z16.b, z16.b, z16.b, #0x6\n"
- "zip1 z4.s, z4.s, z23.s\n"
- "zip1 z24.s, z24.s, z22.s\n"
- "zip1 z0.s, z0.s, z20.s\n"
- "zip1 z21.s, z21.s, z19.s\n"
- "zip1 z1.s, z1.s, z30.s\n"
- "zip1 z2.s, z2.s, z27.s\n"
+ "ld1b { z10.b }, p1/Z, [%x[params], #3, MUL VL]\n"
+ "addvl %x[params], %x[params], #4\n"
+ "mov z2.d, z3.d\n"
+ "mov z20.d, z4.d\n"
+ "ext z2.b, z2.b, z2.b, #0x6\n"
"zip1 z3.s, z3.s, z17.s\n"
- "zip1 z18.s, z18.s, z16.s\n"
- "zip1 z4.s, z4.s, z24.s\n"
- "zip1 z0.s, z0.s, z21.s\n"
- "mov z1.q, z1.q[0]\n"
- "mov z2.q, z2.q[0]\n"
- "zip1 z3.s, z3.s, z18.s\n"
+ "ext z20.b, z20.b, z20.b, #0x2\n"
+ "mov z17.d, z4.d\n"
+ "zip1 z19.s, z19.s, z2.s\n"
+ "zip1 z3.s, z3.s, z19.s\n"
+ "mov z3.q, z3.q[0]\n"
+ "ext z17.b, z17.b, z17.b, #0x4\n"
+ "mov z26.d, z4.d\n"
+ "ext z26.b, z26.b, z26.b, #0x6\n"
+ "mov z21.d, z6.d\n"
+ "zip1 z4.s, z4.s, z17.s\n"
+ "ext z21.b, z21.b, z21.b, #0x2\n"
+ "zip1 z20.s, z20.s, z26.s\n"
+ "zip1 z4.s, z4.s, z20.s\n"
"mov z4.q, z4.q[0]\n"
- "mov z24.s, #0x0\n"
+ "mov z17.d, z6.d\n"
+ "ext z17.b, z17.b, z17.b, #0x4\n"
+ "mov z20.d, z6.d\n"
+ "ext z20.b, z20.b, z20.b, #0x6\n"
+ "mov z19.d, z5.d\n"
+ "zip1 z6.s, z6.s, z17.s\n"
+ "ext z19.b, z19.b, z19.b, #0x2\n"
+ "zip1 z21.s, z21.s, z20.s\n"
+ "zip1 z6.s, z6.s, z21.s\n"
+ "mov z6.q, z6.q[0]\n"
+ "mov z17.d, z5.d\n"
+ "ext z17.b, z17.b, z17.b, #0x4\n"
+ "mov z20.d, z5.d\n"
+ "ext z20.b, z20.b, z20.b, #0x6\n"
+ "mov z11.s, z11.s[0]\n"
+ "zip1 z5.s, z5.s, z17.s\n"
"mov z25.s, #0x0\n"
- "sdot z24.s, z15.b, z1.b[0]\n"
+ "zip1 z19.s, z19.s, z20.s\n"
+ "zip1 z5.s, z5.s, z19.s\n"
+ "mov z5.q, z5.q[0]\n"
+ "mov z26.s, #0x0\n"
+ "mov z27.s, #0x0\n"
+ "mov z24.s, #0x0\n"
"mov z23.s, #0x0\n"
"mov z22.s, #0x0\n"
- "sdot z25.s, z15.b, z1.b[1]\n"
"mov z21.s, #0x0\n"
- "mov z20.s, #0x0\n"
- "sdot z23.s, z15.b, z1.b[2]\n"
- "mov z9.s, #0x0\n"
- "mov z8.s, #0x0\n"
- "sdot z22.s, z15.b, z1.b[3]\n"
- "mov z19.s, #0x0\n"
- "mov z18.s, #0x0\n"
- "sdot z21.s, z15.b, z2.b[0]\n"
"mov z17.s, #0x0\n"
- "mov z16.s, #0x0\n"
- "sdot z20.s, z15.b, z2.b[1]\n"
- "sdot z9.s, z15.b, z2.b[2]\n"
- "sdot z8.s, z15.b, z2.b[3]\n"
- "mov z0.q, z0.q[0]\n"
- "sdot z19.s, z15.b, z4.b[0]\n"
- "sdot z18.s, z15.b, z4.b[1]\n"
- "mov z3.q, z3.q[0]\n"
- "sdot z17.s, z15.b, z4.b[2]\n"
- "sdot z16.s, z15.b, z4.b[3]\n"
- "mov z31.s, #0x0\n"
- "mov z30.s, #0x0\n"
- "mov z29.s, #0x0\n"
- "sdot z31.s, z15.b, z0.b[0]\n"
- "mov z28.s, #0x0\n"
- "sdot z30.s, z15.b, z0.b[1]\n"
- "sdot z29.s, z15.b, z0.b[2]\n"
- "sdot z28.s, z15.b, z0.b[3]\n"
- "add z24.s, z24.s, z21.s\n"
- "add z25.s, z25.s, z20.s\n"
- "add z26.s, z23.s, z9.s\n"
- "add z27.s, z22.s, z8.s\n"
- "add z23.s, z19.s, z21.s\n"
- "mov z22.s, #0x0\n"
- "sdot z22.s, z15.b, z3.b[0]\n"
- "add z21.s, z18.s, z20.s\n"
"mov z20.s, #0x0\n"
- "sdot z20.s, z15.b, z3.b[1]\n"
- "add z19.s, z17.s, z9.s\n"
- "mov z18.s, #0x0\n"
- "sdot z18.s, z15.b, z3.b[2]\n"
- "add z17.s, z16.s, z8.s\n"
- "mov z16.s, #0x0\n"
- "sdot z16.s, z15.b, z3.b[3]\n"
- "add z24.s, z24.s, z31.s\n"
- "add z25.s, z25.s, z30.s\n"
- "mul z24.s, p2/M, z24.s, z14.s\n"
- "mul z25.s, p2/M, z25.s, z14.s\n"
- "add z26.s, z26.s, z29.s\n"
- "add z27.s, z27.s, z28.s\n"
- "mul z26.s, p2/M, z26.s, z14.s\n"
- "mul z27.s, p2/M, z27.s, z14.s\n"
- "add z28.s, z23.s, z22.s\n"
- "add z29.s, z21.s, z20.s\n"
- "mul z28.s, p2/M, z28.s, z14.s\n"
- "mul z29.s, p2/M, z29.s, z14.s\n"
- "add z30.s, z19.s, z18.s\n"
- "add z31.s, z17.s, z16.s\n"
- "mul z30.s, p2/M, z30.s, z14.s\n"
- "mul z31.s, p2/M, z31.s, z14.s\n"
- "zip1 z19.s, z24.s, z26.s\n"
- "zip1 z18.s, z25.s, z27.s\n"
- "zip1 z17.s, z28.s, z30.s\n"
- "zip1 z16.s, z29.s, z31.s\n"
- "zip1 z22.s, z19.s, z18.s\n"
- "zip1 z23.s, z17.s, z16.s\n"
- "add z24.s, z24.s, z13.s\n"
- "add z25.s, z25.s, z13.s\n"
- "add z26.s, z26.s, z13.s\n"
- "add z27.s, z27.s, z13.s\n"
- "add z28.s, z28.s, z13.s\n"
- "add z29.s, z29.s, z13.s\n"
- "add z30.s, z30.s, z13.s\n"
+ "mov z2.s, #0x0\n"
+ "mov z19.s, #0x0\n"
+ "sdot z31.s, z11.b, z7.b[0]\n"
+ "sdot z18.s, z11.b, z7.b[1]\n"
+ "sdot z29.s, z11.b, z7.b[2]\n"
+ "sdot z28.s, z11.b, z7.b[3]\n"
+ "sdot z13.s, z11.b, z3.b[0]\n"
+ "sdot z14.s, z11.b, z3.b[1]\n"
+ "sdot z15.s, z11.b, z3.b[2]\n"
+ "sdot z25.s, z11.b, z3.b[3]\n"
+ "sdot z26.s, z11.b, z4.b[0]\n"
+ "sdot z27.s, z11.b, z4.b[1]\n"
+ "sdot z24.s, z11.b, z4.b[2]\n"
+ "sdot z23.s, z11.b, z4.b[3]\n"
+ "sdot z22.s, z11.b, z6.b[0]\n"
+ "sdot z21.s, z11.b, z6.b[1]\n"
+ "sdot z17.s, z11.b, z6.b[2]\n"
+ "sdot z20.s, z11.b, z6.b[3]\n"
+ "sdot z2.s, z11.b, z5.b[0]\n"
+ "sdot z19.s, z11.b, z5.b[1]\n"
+ "mov z31.d, z31.d\n"
+ "mov z18.d, z18.d\n"
+ "mov z29.d, z29.d\n"
+ "mov z28.d, z28.d\n"
"add z31.s, z31.s, z13.s\n"
+ "mov z13.s, #0x0\n"
+ "sdot z13.s, z11.b, z5.b[2]\n"
+ "add z18.s, z18.s, z14.s\n"
+ "mov z14.s, #0x0\n"
+ "sdot z14.s, z11.b, z5.b[3]\n"
+ "add z29.s, z29.s, z15.s\n"
+ "add z28.s, z28.s, z25.s\n"
+ "add z31.s, z31.s, z26.s\n"
+ "add z18.s, z18.s, z27.s\n"
+ "add z29.s, z29.s, z24.s\n"
+ "add z28.s, z28.s, z23.s\n"
+ "mov z26.d, z26.d\n"
+ "mov z25.d, z27.d\n"
+ "mov z24.d, z24.d\n"
+ "mov z23.d, z23.d\n"
+ "add z26.s, z26.s, z22.s\n"
+ "add z25.s, z25.s, z21.s\n"
+ "add z24.s, z24.s, z17.s\n"
+ "add z23.s, z23.s, z20.s\n"
+ "add z26.s, z26.s, z2.s\n"
+ "add z25.s, z25.s, z19.s\n"
+ "add z24.s, z24.s, z13.s\n"
+ "add z23.s, z23.s, z14.s\n"
+ "neg z30.s, p2/M, z30.s\n"
+ "mul z31.s, p2/M, z31.s, z30.s\n"
+ "st1w { z31.s }, p2, [SP]\n"
+ "add z31.s, z31.s, z1.s\n"
+ "mul z18.s, p2/M, z18.s, z30.s\n"
+ "st1w { z18.s }, p2, [SP, #1, MUL VL]\n"
+ "add z18.s, z18.s, z1.s\n"
+ "mul z29.s, p2/M, z29.s, z30.s\n"
+ "st1w { z29.s }, p2, [SP, #2, MUL VL]\n"
+ "add z29.s, z29.s, z1.s\n"
+ "mul z28.s, p2/M, z28.s, z30.s\n"
+ "st1w { z28.s }, p2, [SP, #3, MUL VL]\n"
+ "add z28.s, z28.s, z1.s\n"
+ "mul z26.s, p2/M, z26.s, z30.s\n"
+ "st1w { z26.s }, p2, [SP, #4, MUL VL]\n"
+ "add z26.s, z26.s, z1.s\n"
+ "mul z25.s, p2/M, z25.s, z30.s\n"
+ "st1w { z25.s }, p2, [SP, #5, MUL VL]\n"
+ "add z25.s, z25.s, z1.s\n"
+ "mul z24.s, p2/M, z24.s, z30.s\n"
+ "st1w { z24.s }, p2, [SP, #6, MUL VL]\n"
+ "add z24.s, z24.s, z1.s\n"
+ "mul z23.s, p2/M, z23.s, z30.s\n"
+ "st1w { z23.s }, p2, [SP, #7, MUL VL]\n"
+ "add z23.s, z23.s, z1.s\n"
"1:" // Loop
- "sdot z24.s, z5.b, z0.b[0]\n"
- "sdot z25.s, z5.b, z0.b[1]\n"
- "ld1w { z21.s }, p2/Z, [%x[params]]\n"
- "ld1w { z20.s }, p2/Z, [%x[params], #1, MUL VL]\n"
- "sdot z26.s, z5.b, z0.b[2]\n"
- "sdot z27.s, z5.b, z0.b[3]\n"
- "incb x9\n"
- "whilelt p0.s, x28, %x[n_channels]\n"
- "sdot z24.s, z6.b, z1.b[0]\n"
- "sdot z25.s, z6.b, z1.b[1]\n"
- "whilelt p1.b, x9, x10\n"
- "ld1w { z13.s }, p1/Z, [%x[params], #2, MUL VL]\n"
- "sdot z26.s, z6.b, z1.b[2]\n"
- "sdot z27.s, z6.b, z1.b[3]\n"
- "sdot z28.s, z5.b, z2.b[0]\n"
- "sdot z29.s, z5.b, z2.b[1]\n"
- "sdot z30.s, z5.b, z2.b[2]\n"
- "sdot z31.s, z5.b, z2.b[3]\n"
- "ld1b { z5.b }, p1/Z, [%x[params], #3, MUL VL]\n"
- "sdot z24.s, z7.b, z2.b[0]\n"
- "sdot z25.s, z7.b, z2.b[1]\n"
- ".inst 0x04b57718 // sqrdmulh z24.s, z24.s, z21.s\n"
- "sdot z26.s, z7.b, z2.b[2]\n"
- "sdot z27.s, z7.b, z2.b[3]\n"
- ".inst 0x04b57739 // sqrdmulh z25.s, z25.s, z21.s\n"
- "sdot z28.s, z6.b, z3.b[0]\n"
- "sdot z29.s, z6.b, z3.b[1]\n"
- ".inst 0x04b5775a // sqrdmulh z26.s, z26.s, z21.s\n"
- "sdot z30.s, z6.b, z3.b[2]\n"
- "sdot z31.s, z6.b, z3.b[3]\n"
- ".inst 0x04b5777b // sqrdmulh z27.s, z27.s, z21.s\n"
- "ld1b { z6.b }, p1/Z, [%x[params], #4, MUL VL]\n"
- "sdot z28.s, z7.b, z4.b[0]\n"
- "sdot z29.s, z7.b, z4.b[1]\n"
- "and z19.d, z24.d, z20.d\n"
- "sdot z30.s, z7.b, z4.b[2]\n"
- "sdot z31.s, z7.b, z4.b[3]\n"
- "and z18.d, z25.d, z20.d\n"
- "ld1b { z7.b }, p1/Z, [%x[params], #5, MUL VL]\n"
- "and z17.d, z26.d, z20.d\n"
- "and z16.d, z27.d, z20.d\n"
+ "sdot z31.s, z8.b, z7.b[0]\n"
+ "ld1w { z22.s }, p2/Z, [%x[params]]\n"
+ "incb x28\n"
+ "sdot z18.s, z8.b, z7.b[1]\n"
+ "ld1w { z21.s }, p2/Z, [%x[params], #1, MUL VL]\n"
+ "whilelt p0.s, x27, %x[n_channels]\n"
+ "sdot z29.s, z8.b, z7.b[2]\n"
+ "whilelt p1.b, x28, x9\n"
+ "ld1w { z1.s }, p1/Z, [%x[params], #2, MUL VL]\n"
+ "sdot z28.s, z8.b, z7.b[3]\n"
+ "sdot z26.s, z8.b, z4.b[0]\n"
+ "sdot z25.s, z8.b, z4.b[1]\n"
+ "sdot z24.s, z8.b, z4.b[2]\n"
+ "sdot z23.s, z8.b, z4.b[3]\n"
+ "ld1b { z8.b }, p1/Z, [%x[params], #3, MUL VL]\n"
+ "sdot z31.s, z9.b, z3.b[0]\n"
+ "sdot z18.s, z9.b, z3.b[1]\n"
+ "sdot z29.s, z9.b, z3.b[2]\n"
+ "sdot z28.s, z9.b, z3.b[3]\n"
+ "sdot z26.s, z9.b, z6.b[0]\n"
+ "sdot z25.s, z9.b, z6.b[1]\n"
+ "sdot z24.s, z9.b, z6.b[2]\n"
+ "sdot z23.s, z9.b, z6.b[3]\n"
+ "ld1b { z9.b }, p1/Z, [%x[params], #4, MUL VL]\n"
+ "sdot z31.s, z10.b, z4.b[0]\n"
+ "sdot z18.s, z10.b, z4.b[1]\n"
+ "sdot z29.s, z10.b, z4.b[2]\n"
+ "sdot z28.s, z10.b, z4.b[3]\n"
+ "sdot z26.s, z10.b, z5.b[0]\n"
+ "sdot z25.s, z10.b, z5.b[1]\n"
+ "sdot z24.s, z10.b, z5.b[2]\n"
+ "sdot z23.s, z10.b, z5.b[3]\n"
+ "ld1b { z10.b }, p1/Z, [%x[params], #5, MUL VL]\n"
"addvl %x[params], %x[params], #6\n"
+ ".inst 0x04b677ff // sqrdmulh z31.s, z31.s, z22.s\n"
+ ".inst 0x04b67652 // sqrdmulh z18.s, z18.s, z22.s\n"
+ ".inst 0x04b677bd // sqrdmulh z29.s, z29.s, z22.s\n"
+ ".inst 0x04b6779c // sqrdmulh z28.s, z28.s, z22.s\n"
+ ".inst 0x04b6775a // sqrdmulh z26.s, z26.s, z22.s\n"
+ "and z20.d, z31.d, z21.d\n"
+ "asr z20.s, z20.s, #0x1f\n"
+ "and z19.d, z18.d, z21.d\n"
+ "and z14.d, z29.d, z21.d\n"
"asr z19.s, z19.s, #0x1f\n"
- "asr z18.s, z18.s, #0x1f\n"
+ "and z17.d, z28.d, z21.d\n"
+ "and z2.d, z26.d, z21.d\n"
+ "asr z14.s, z14.s, #0x1f\n"
+ ".inst 0x04b67739 // sqrdmulh z25.s, z25.s, z22.s\n"
"asr z17.s, z17.s, #0x1f\n"
- "asr z16.s, z16.s, #0x1f\n"
- ".inst 0x04b5779c // sqrdmulh z28.s, z28.s, z21.s\n"
- ".inst 0x04b577bd // sqrdmulh z29.s, z29.s, z21.s\n"
- ".inst 0x04b577de // sqrdmulh z30.s, z30.s, z21.s\n"
- ".inst 0x04b577ff // sqrdmulh z31.s, z31.s, z21.s\n"
- "sqadd z24.s, z24.s, z19.s\n"
- "sqadd z25.s, z25.s, z18.s\n"
- ".inst 0x44828a98 // srshl z24.s, p2/M, z24.s, z20.s\n"
- ".inst 0x44828a99 // srshl z25.s, p2/M, z25.s, z20.s\n"
- "sqadd z26.s, z26.s, z17.s\n"
- "sqadd z27.s, z27.s, z16.s\n"
- ".inst 0x44828a9a // srshl z26.s, p2/M, z26.s, z20.s\n"
- ".inst 0x44828a9b // srshl z27.s, p2/M, z27.s, z20.s\n"
- "and z19.d, z28.d, z20.d\n"
- "and z18.d, z29.d, z20.d\n"
- "and z17.d, z30.d, z20.d\n"
- "and z16.d, z31.d, z20.d\n"
- "asr z19.s, z19.s, #0x1f\n"
- "asr z18.s, z18.s, #0x1f\n"
+ "sqadd z31.s, z31.s, z20.s\n"
+ ".inst 0x04b67718 // sqrdmulh z24.s, z24.s, z22.s\n"
+ "asr z2.s, z2.s, #0x1f\n"
+ ".inst 0x04b676f7 // sqrdmulh z23.s, z23.s, z22.s\n"
+ "sqadd z18.s, z18.s, z19.s\n"
+ "sqadd z29.s, z29.s, z14.s\n"
+ "and z27.d, z25.d, z21.d\n"
+ "asr z27.s, z27.s, #0x1f\n"
+ "sqadd z28.s, z28.s, z17.s\n"
+ "sqadd z26.s, z26.s, z2.s\n"
+ "and z17.d, z24.d, z21.d\n"
"asr z17.s, z17.s, #0x1f\n"
- "asr z16.s, z16.s, #0x1f\n"
- "sqadd z28.s, z28.s, z19.s\n"
- "sqadd z29.s, z29.s, z18.s\n"
- ".inst 0x44828a9c // srshl z28.s, p2/M, z28.s, z20.s\n"
- ".inst 0x44828a9d // srshl z29.s, p2/M, z29.s, z20.s\n"
- "sqadd z30.s, z30.s, z17.s\n"
- "sqadd z31.s, z31.s, z16.s\n"
- ".inst 0x44828a9e // srshl z30.s, p2/M, z30.s, z20.s\n"
- ".inst 0x44828a9f // srshl z31.s, p2/M, z31.s, z20.s\n"
- "add z24.s, z24.s, z12.s\n"
- "add z25.s, z25.s, z12.s\n"
- "smin z24.s, p2/M, z24.s, z10.s\n"
- "smin z25.s, p2/M, z25.s, z10.s\n"
- "add z26.s, z26.s, z12.s\n"
- "add z27.s, z27.s, z12.s\n"
- "smin z26.s, p2/M, z26.s, z10.s\n"
- "smin z27.s, p2/M, z27.s, z10.s\n"
- "add z28.s, z28.s, z12.s\n"
- "add z29.s, z29.s, z12.s\n"
- "smin z28.s, p2/M, z28.s, z10.s\n"
- "smin z29.s, p2/M, z29.s, z10.s\n"
- "add z30.s, z30.s, z12.s\n"
+ "and z15.d, z23.d, z21.d\n"
+ ".inst 0x44828abf // srshl z31.s, p2/M, z31.s, z21.s\n"
+ "asr z15.s, z15.s, #0x1f\n"
+ "sqadd z25.s, z25.s, z27.s\n"
+ ".inst 0x44828ab2 // srshl z18.s, p2/M, z18.s, z21.s\n"
"add z31.s, z31.s, z12.s\n"
- "smin z30.s, p2/M, z30.s, z10.s\n"
- "smin z31.s, p2/M, z31.s, z10.s\n"
- "smax z24.s, p2/M, z24.s, z11.s\n"
- "smax z25.s, p2/M, z25.s, z11.s\n"
- "st1b { z24.s }, p0, [x27, x28]\n"
- "mov z24.s, z22.s[0]\n"
- "smax z26.s, p2/M, z26.s, z11.s\n"
- "smax z27.s, p2/M, z27.s, z11.s\n"
- "st1b { z25.s }, p0, [x26, x28]\n"
- "mov z25.s, z22.s[1]\n"
- "smax z28.s, p2/M, z28.s, z11.s\n"
- "smax z29.s, p2/M, z29.s, z11.s\n"
- "st1b { z26.s }, p0, [x25, x28]\n"
- "mov z26.s, z22.s[2]\n"
- "smax z30.s, p2/M, z30.s, z11.s\n"
- "smax z31.s, p2/M, z31.s, z11.s\n"
- "st1b { z27.s }, p0, [x24, x28]\n"
- "mov z27.s, z22.s[3]\n"
- "st1b { z28.s }, p0, [x23, x28]\n"
- "mov z28.s, z23.s[0]\n"
- "add z24.s, z24.s, z13.s\n"
- "st1b { z29.s }, p0, [x22, x28]\n"
- "mov z29.s, z23.s[1]\n"
- "add z25.s, z25.s, z13.s\n"
- "st1b { z30.s }, p0, [x21, x28]\n"
- "mov z30.s, z23.s[2]\n"
- "add z26.s, z26.s, z13.s\n"
- "st1b { z31.s }, p0, [x20, x28]\n"
- "mov z31.s, z23.s[3]\n"
- "incw x28\n"
- "add z27.s, z27.s, z13.s\n"
- "add z28.s, z28.s, z13.s\n"
- "add z29.s, z29.s, z13.s\n"
- "add z30.s, z30.s, z13.s\n"
- "add z31.s, z31.s, z13.s\n"
+ "sqadd z24.s, z24.s, z17.s\n"
+ ".inst 0x44828abd // srshl z29.s, p2/M, z29.s, z21.s\n"
+ "add z18.s, z18.s, z12.s\n"
+ "sqadd z23.s, z23.s, z15.s\n"
+ "smin z31.s, p2/M, z31.s, z0.s\n"
+ "add z29.s, z29.s, z12.s\n"
+ "smin z18.s, p2/M, z18.s, z0.s\n"
+ ".inst 0x44828abc // srshl z28.s, p2/M, z28.s, z21.s\n"
+ "smax z31.s, p2/M, z31.s, z16.s\n"
+ "st1b { z31.s }, p0, [x26, x27]\n"
+ "add z28.s, z28.s, z12.s\n"
+ "smax z18.s, p2/M, z18.s, z16.s\n"
+ "ld1w { z31.s }, p2/Z, [SP]\n"
+ "smin z29.s, p2/M, z29.s, z0.s\n"
+ "st1b { z18.s }, p0, [x25, x27]\n"
+ "add z31.s, z31.s, z1.s\n"
+ "smin z28.s, p2/M, z28.s, z0.s\n"
+ "ld1w { z18.s }, p2/Z, [SP, #1, MUL VL]\n"
+ "smax z29.s, p2/M, z29.s, z16.s\n"
+ "st1b { z29.s }, p0, [x24, x27]\n"
+ "add z18.s, z18.s, z1.s\n"
+ "smax z28.s, p2/M, z28.s, z16.s\n"
+ "ld1w { z29.s }, p2/Z, [SP, #2, MUL VL]\n"
+ ".inst 0x44828aba // srshl z26.s, p2/M, z26.s, z21.s\n"
+ "st1b { z28.s }, p0, [x23, x27]\n"
+ "add z29.s, z29.s, z1.s\n"
+ ".inst 0x44828ab9 // srshl z25.s, p2/M, z25.s, z21.s\n"
+ "ld1w { z28.s }, p2/Z, [SP, #3, MUL VL]\n"
+ "add z26.s, z26.s, z12.s\n"
+ ".inst 0x44828ab8 // srshl z24.s, p2/M, z24.s, z21.s\n"
+ ".inst 0x44828ab7 // srshl z23.s, p2/M, z23.s, z21.s\n"
+ "add z25.s, z25.s, z12.s\n"
+ "add z28.s, z28.s, z1.s\n"
+ "add z24.s, z24.s, z12.s\n"
+ "add z23.s, z23.s, z12.s\n"
+ "smin z26.s, p2/M, z26.s, z0.s\n"
+ "smin z25.s, p2/M, z25.s, z0.s\n"
+ "smin z24.s, p2/M, z24.s, z0.s\n"
+ "smin z23.s, p2/M, z23.s, z0.s\n"
+ "smax z26.s, p2/M, z26.s, z16.s\n"
+ "st1b { z26.s }, p0, [x22, x27]\n"
+ "smax z25.s, p2/M, z25.s, z16.s\n"
+ "smax z24.s, p2/M, z24.s, z16.s\n"
+ "ld1w { z26.s }, p2/Z, [SP, #4, MUL VL]\n"
+ "smax z23.s, p2/M, z23.s, z16.s\n"
+ "st1b { z25.s }, p0, [x21, x27]\n"
+ "add z26.s, z26.s, z1.s\n"
+ "st1b { z24.s }, p0, [x20, x27]\n"
+ "st1b { z23.s }, p0, [x19, x27]\n"
+ "incw x27\n"
+ "ld1w { z25.s }, p2/Z, [SP, #5, MUL VL]\n"
+ "add z25.s, z25.s, z1.s\n"
+ "ld1w { z24.s }, p2/Z, [SP, #6, MUL VL]\n"
+ "ld1w { z23.s }, p2/Z, [SP, #7, MUL VL]\n"
+ "add z24.s, z24.s, z1.s\n"
+ "add z23.s, z23.s, z1.s\n"
"b.any 1b\n"
+ "addvl SP, SP, #8\n"
: [params] "+&r" (params)
: [inptrs] "r" (inptrs), [n_channels] "r" (n_output_channels), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [outptrs] "r" (outptrs), [qp] "r" (&qp)
- : "cc", "memory", "p0", "p1", "p2", "x9", "x10", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "x9", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst/generic.cpp
index 2ed7cfc815..6bc5935348 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -41,358 +41,384 @@ void sve_s8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst_impl
)
{
__asm__ __volatile__(
+ "mov z20.b, #0x1\n"
+ "ldr x24, [%x[inptrs], #0x0]\n"
+ "ptrue p2.b\n"
+ "mov z22.s, #0x1\n"
+ "ldr x23, [%x[inptrs], #0x8]\n"
+ "lsl x9, %x[n_channels], #0x2\n"
+ "mov z30.s, #0x0\n"
+ "ldr x22, [%x[inptrs], #0x10]\n"
+ "addvl SP, SP, #-8\n"
+ "mov z28.s, #0x0\n"
+ "ldr x21, [%x[inptrs], #0x18]\n"
"mov x20, #0x6\n"
+ "mov z29.s, #0x0\n"
+ "ldr x19, [%x[inptrs], #0x20]\n"
"whilelt p0.b, XZR, x20\n"
- "ldr x22, [%x[inptrs], #0x18]\n"
- "ldr x21, [%x[inptrs], #0x20]\n"
- "ldr x20, [%x[inptrs], #0x10]\n"
- "ld1b { z3.b }, p0/Z, [x22]\n"
- "mov z20.d, z3.d\n"
- "ext z20.b, z20.b, z20.b, #0x1\n"
+ "mov z27.s, #0x0\n"
+ "ld1b { z0.b }, p0/Z, [x24]\n"
+ "mov x28, #0x0\n"
+ "mov z26.s, #0x0\n"
+ "ld1b { z3.b }, p0/Z, [x23]\n"
+ "mov x27, #0x0\n"
+ "mov z25.s, #0x0\n"
+ "ld1b { z5.b }, p0/Z, [x22]\n"
+ "whilelt p1.b, x28, x9\n"
+ "mov z15.d, z0.d\n"
"ld1b { z4.b }, p0/Z, [x21]\n"
- "ldr x24, [%x[inptrs], #0x8]\n"
- "mov z18.d, z4.d\n"
- "ext z18.b, z18.b, z18.b, #0x1\n"
- "ld1b { z2.b }, p0/Z, [x20]\n"
- "ldr x23, [%x[inptrs], #0x28]\n"
- "mov z15.d, z2.d\n"
+ "mov z24.s, #0x0\n"
+ "ld1b { z6.b }, p0/Z, [x19]\n"
"ext z15.b, z15.b, z15.b, #0x1\n"
- "ldr x22, [%x[inptrs], #0x30]\n"
- "ldr x21, [%x[inptrs], #0x38]\n"
- "zip1 z3.d, z3.d, z20.d\n"
- "zip1 z4.d, z4.d, z18.d\n"
- "ldr x20, [%x[inptrs], #0x0]\n"
- "ld1b { z1.b }, p0/Z, [x24]\n"
- "mov z20.d, z1.d\n"
- "ext z20.b, z20.b, z20.b, #0x1\n"
- "ld1b { z5.b }, p0/Z, [x23]\n"
- "ld1b { z6.b }, p0/Z, [x22]\n"
- "mov z13.d, z5.d\n"
- "mov z19.d, z6.d\n"
+ "ldr x21, [%x[inptrs], #0x28]\n"
+ "mov z16.d, z3.d\n"
+ "ldr x20, [%x[inptrs], #0x30]\n"
+ "ext z16.b, z16.b, z16.b, #0x1\n"
+ "ldr x19, [%x[inptrs], #0x38]\n"
+ "mov z18.d, z5.d\n"
"ld1b { z7.b }, p0/Z, [x21]\n"
- "ld1b { z0.b }, p0/Z, [x20]\n"
- "mov z25.d, z7.d\n"
- "zip1 z2.d, z2.d, z15.d\n"
+ "zip1 z0.d, z0.d, z15.d\n"
+ "ld1b { z1.b }, p0/Z, [x20]\n"
+ "mov z0.q, z0.q[0]\n"
+ "ld1b { z2.b }, p0/Z, [x19]\n"
+ "zip1 z3.d, z3.d, z16.d\n"
+ "ld1rw { z15.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_b_offset]]\n"
"mov z3.q, z3.q[0]\n"
- "mov z4.q, z4.q[0]\n"
- "ptrue p2.b\n"
- "ld1rw { z23.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_b_offset]]\n"
- "ext z13.b, z13.b, z13.b, #0x1\n"
- "ext z19.b, z19.b, z19.b, #0x1\n"
- "lsl x10, %x[n_channels], #0x2\n"
- "neg z23.s, p2/M, z23.s\n"
- "ext z25.b, z25.b, z25.b, #0x1\n"
- "mov z30.b, #0x1\n"
- "mov x9, #0x0\n"
- "whilelt p1.b, x9, x10\n"
- "mov z24.s, #0x0\n"
- "mov z28.s, #0x0\n"
- "sdot z24.s, z30.b, z3.b[0]\n"
- "ld1w { z12.s }, p1/Z, [%x[params]]\n"
- "mov z18.s, #0x0\n"
- "mov z17.s, #0x0\n"
- "sdot z28.s, z30.b, z3.b[2]\n"
- "mov x28, #0x0\n"
- "mov z16.d, z0.d\n"
- "sdot z18.s, z30.b, z4.b[0]\n"
- "sdot z17.s, z30.b, z4.b[2]\n"
- "ldp x27, x26, [%x[outptrs], #0x0]\n"
+ "ldp x26, x25, [%x[outptrs], #0x0]\n"
+ "ext z18.b, z18.b, z18.b, #0x1\n"
+ "ldp x24, x23, [%x[outptrs], #0x10]\n"
+ "mov z16.d, z4.d\n"
+ "ldp x22, x21, [%x[outptrs], #0x20]\n"
"ext z16.b, z16.b, z16.b, #0x1\n"
- "zip1 z1.d, z1.d, z20.d\n"
- "ldp x25, x24, [%x[outptrs], #0x10]\n"
- "ldp x23, x22, [%x[outptrs], #0x20]\n"
- "mov z2.q, z2.q[0]\n"
- "zip1 z5.d, z5.d, z13.d\n"
- "ldp x21, x20, [%x[outptrs], #0x30]\n"
+ "ldp x20, x19, [%x[outptrs], #0x30]\n"
+ "mov z17.d, z6.d\n"
"ld1rw { z14.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_c_offset]]\n"
- "zip1 z6.d, z6.d, z19.d\n"
- "zip1 z7.d, z7.d, z25.d\n"
- "ld1rw { z13.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_minval]]\n"
- "ld1rw { z15.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_maxval]]\n"
- "mov z26.s, #0x0\n"
- "mov z22.s, #0x0\n"
- "sdot z26.s, z30.b, z2.b[0]\n"
+ "zip1 z5.d, z5.d, z18.d\n"
+ "ld1rw { z31.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_minval]]\n"
+ "mov z5.q, z5.q[0]\n"
+ "ld1rw { z12.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_maxval]]\n"
+ "zip1 z4.d, z4.d, z16.d\n"
+ "ld1w { z13.s }, p1/Z, [%x[params]]\n"
+ "mov z4.q, z4.q[0]\n"
"ld1b { z8.b }, p1/Z, [%x[params], #1, MUL VL]\n"
- "mov z29.s, #0x1\n"
- "sdot z22.s, z30.b, z2.b[2]\n"
- "sdot z24.s, z29.b, z3.b[1]\n"
+ "ext z17.b, z17.b, z17.b, #0x1\n"
"ld1b { z9.b }, p1/Z, [%x[params], #2, MUL VL]\n"
- "zip1 z0.d, z0.d, z16.d\n"
- "mov z1.q, z1.q[0]\n"
- "sdot z28.s, z29.b, z3.b[3]\n"
+ "mov z16.d, z7.d\n"
"ld1b { z10.b }, p1/Z, [%x[params], #3, MUL VL]\n"
- "mov z5.q, z5.q[0]\n"
- "mov z6.q, z6.q[0]\n"
- "sdot z18.s, z29.b, z4.b[1]\n"
+ "ext z16.b, z16.b, z16.b, #0x1\n"
"ld1b { z11.b }, p1/Z, [%x[params], #4, MUL VL]\n"
+ "addvl %x[params], %x[params], #5\n"
+ "zip1 z6.d, z6.d, z17.d\n"
+ "mov z17.d, z1.d\n"
+ "mov z6.q, z6.q[0]\n"
+ "zip1 z7.d, z7.d, z16.d\n"
"mov z7.q, z7.q[0]\n"
+ "ext z17.b, z17.b, z17.b, #0x1\n"
+ "mov z16.d, z2.d\n"
+ "ext z16.b, z16.b, z16.b, #0x1\n"
+ "mov z23.s, #0x0\n"
+ "zip1 z1.d, z1.d, z17.d\n"
+ "mov z1.q, z1.q[0]\n"
+ "zip1 z2.d, z2.d, z16.d\n"
+ "mov z2.q, z2.q[0]\n"
+ "mov z18.s, #0x0\n"
+ "mov z17.s, #0x0\n"
+ "mov z16.s, #0x0\n"
"mov z21.s, #0x0\n"
- "sdot z17.s, z29.b, z4.b[3]\n"
- "addvl %x[params], %x[params], #5\n"
- "mov z20.s, #0x0\n"
- "mov z25.s, #0x0\n"
- "sdot z21.s, z30.b, z1.b[0]\n"
- "mov z27.s, #0x0\n"
"mov z19.s, #0x0\n"
- "sdot z20.s, z30.b, z1.b[2]\n"
- "sdot z25.s, z30.b, z5.b[0]\n"
- "sdot z27.s, z30.b, z5.b[2]\n"
- "mov z0.q, z0.q[0]\n"
- "sdot z19.s, z30.b, z6.b[0]\n"
- "sdot z26.s, z29.b, z2.b[1]\n"
+ "sdot z30.s, z20.b, z0.b[0]\n"
+ "sdot z28.s, z20.b, z0.b[2]\n"
+ "sdot z29.s, z20.b, z3.b[0]\n"
+ "sdot z27.s, z20.b, z3.b[2]\n"
+ "sdot z30.s, z22.b, z0.b[1]\n"
+ "sdot z28.s, z22.b, z0.b[3]\n"
+ "sdot z29.s, z22.b, z3.b[1]\n"
+ "sdot z27.s, z22.b, z3.b[3]\n"
+ "sdot z26.s, z20.b, z5.b[0]\n"
+ "sdot z25.s, z20.b, z5.b[2]\n"
+ "sdot z24.s, z20.b, z4.b[0]\n"
+ "sdot z23.s, z20.b, z4.b[2]\n"
+ "sdot z26.s, z22.b, z5.b[1]\n"
+ "sdot z25.s, z22.b, z5.b[3]\n"
+ "sdot z24.s, z22.b, z4.b[1]\n"
+ "sdot z23.s, z22.b, z4.b[3]\n"
+ "sdot z18.s, z20.b, z6.b[0]\n"
+ "sdot z17.s, z20.b, z6.b[2]\n"
+ "sdot z16.s, z20.b, z7.b[0]\n"
+ "sdot z21.s, z20.b, z7.b[2]\n"
+ "sdot z18.s, z22.b, z6.b[1]\n"
+ "sdot z17.s, z22.b, z6.b[3]\n"
+ "sdot z16.s, z22.b, z7.b[1]\n"
+ "sdot z21.s, z22.b, z7.b[3]\n"
+ "sdot z19.s, z20.b, z1.b[0]\n"
+ "mov z30.d, z30.d\n"
+ "mov z28.d, z28.d\n"
+ "add z30.s, z30.s, z29.s\n"
+ "sdot z19.s, z22.b, z1.b[1]\n"
+ "add z28.s, z28.s, z27.s\n"
+ "add z30.s, z30.s, z26.s\n"
+ "mov z29.d, z29.d\n"
+ "add z28.s, z28.s, z25.s\n"
+ "add z30.s, z30.s, z24.s\n"
+ "mov z27.d, z27.d\n"
+ "add z28.s, z28.s, z23.s\n"
+ "add z30.s, z30.s, z18.s\n"
+ "add z29.s, z29.s, z26.s\n"
+ "add z28.s, z28.s, z17.s\n"
+ "add z27.s, z27.s, z25.s\n"
+ "add z29.s, z29.s, z24.s\n"
+ "mov z26.d, z26.d\n"
+ "add z27.s, z27.s, z23.s\n"
+ "add z29.s, z29.s, z18.s\n"
+ "mov z25.d, z25.d\n"
+ "add z27.s, z27.s, z17.s\n"
+ "add z29.s, z29.s, z16.s\n"
+ "add z26.s, z26.s, z24.s\n"
+ "add z27.s, z27.s, z21.s\n"
+ "add z25.s, z25.s, z23.s\n"
+ "add z26.s, z26.s, z18.s\n"
+ "mov z24.d, z24.d\n"
+ "add z25.s, z25.s, z17.s\n"
+ "add z26.s, z26.s, z16.s\n"
+ "mov z23.d, z23.d\n"
+ "add z25.s, z25.s, z21.s\n"
+ "add z26.s, z26.s, z19.s\n"
"add z24.s, z24.s, z18.s\n"
"mov z18.s, #0x0\n"
- "sdot z18.s, z30.b, z6.b[2]\n"
- "sdot z22.s, z29.b, z2.b[3]\n"
- "add z17.s, z28.s, z17.s\n"
- "mov z16.s, #0x0\n"
- "sdot z16.s, z30.b, z7.b[0]\n"
- "sdot z21.s, z29.b, z1.b[1]\n"
- "sdot z20.s, z29.b, z1.b[3]\n"
- "add z28.s, z26.s, z24.s\n"
- "sdot z25.s, z29.b, z5.b[1]\n"
- "sdot z27.s, z29.b, z5.b[3]\n"
- "add z31.s, z22.s, z17.s\n"
- "sdot z19.s, z29.b, z6.b[1]\n"
- "sdot z18.s, z29.b, z6.b[3]\n"
- "add z22.s, z21.s, z28.s\n"
- "sdot z16.s, z29.b, z7.b[1]\n"
- "add z21.s, z20.s, z31.s\n"
- "add z20.s, z25.s, z19.s\n"
- "add z19.s, z27.s, z18.s\n"
- "add z18.s, z16.s, z24.s\n"
- "mov z16.s, #0x0\n"
- "sdot z16.s, z30.b, z7.b[2]\n"
- "sdot z16.s, z29.b, z7.b[3]\n"
- "add z17.s, z16.s, z17.s\n"
- "mov z16.s, #0x0\n"
- "sdot z16.s, z30.b, z0.b[0]\n"
- "sdot z16.s, z29.b, z0.b[1]\n"
- "add z24.s, z22.s, z16.s\n"
- "add z26.s, z22.s, z25.s\n"
- "mul z24.s, p2/M, z24.s, z23.s\n"
- "mul z26.s, p2/M, z26.s, z23.s\n"
+ "sdot z18.s, z20.b, z1.b[2]\n"
+ "add z23.s, z23.s, z17.s\n"
+ "mov z17.s, #0x0\n"
+ "sdot z17.s, z20.b, z2.b[0]\n"
+ "sdot z18.s, z22.b, z1.b[3]\n"
+ "add z24.s, z24.s, z16.s\n"
"mov z16.s, #0x0\n"
- "sdot z16.s, z30.b, z0.b[2]\n"
- "sdot z16.s, z29.b, z0.b[3]\n"
- "add z25.s, z21.s, z16.s\n"
- "add z27.s, z21.s, z27.s\n"
- "mul z25.s, p2/M, z25.s, z23.s\n"
- "mul z27.s, p2/M, z27.s, z23.s\n"
- "add z28.s, z20.s, z28.s\n"
- "add z29.s, z19.s, z31.s\n"
- "mul z28.s, p2/M, z28.s, z23.s\n"
- "mul z29.s, p2/M, z29.s, z23.s\n"
- "add z30.s, z18.s, z20.s\n"
- "add z31.s, z17.s, z19.s\n"
- "mul z30.s, p2/M, z30.s, z23.s\n"
- "mul z31.s, p2/M, z31.s, z23.s\n"
- "zip1 z19.s, z24.s, z26.s\n"
- "zip1 z18.s, z25.s, z27.s\n"
- "zip1 z17.s, z28.s, z30.s\n"
- "zip1 z16.s, z29.s, z31.s\n"
- "zip1 z22.s, z19.s, z18.s\n"
- "zip1 z23.s, z17.s, z16.s\n"
- "add z24.s, z24.s, z12.s\n"
- "add z25.s, z25.s, z12.s\n"
- "add z26.s, z26.s, z12.s\n"
- "add z27.s, z27.s, z12.s\n"
- "add z28.s, z28.s, z12.s\n"
- "add z29.s, z29.s, z12.s\n"
- "add z30.s, z30.s, z12.s\n"
- "add z31.s, z31.s, z12.s\n"
+ "sdot z17.s, z22.b, z2.b[1]\n"
+ "sdot z16.s, z20.b, z2.b[2]\n"
+ "add z25.s, z25.s, z18.s\n"
+ "add z23.s, z23.s, z21.s\n"
+ "add z24.s, z24.s, z19.s\n"
+ "sdot z16.s, z22.b, z2.b[3]\n"
+ "add z23.s, z23.s, z18.s\n"
+ "add z24.s, z24.s, z17.s\n"
+ "neg z15.s, p2/M, z15.s\n"
+ "add z23.s, z23.s, z16.s\n"
+ "mul z30.s, p2/M, z30.s, z15.s\n"
+ "st1w { z30.s }, p2, [SP]\n"
+ "add z30.s, z30.s, z13.s\n"
+ "mul z28.s, p2/M, z28.s, z15.s\n"
+ "st1w { z28.s }, p2, [SP, #1, MUL VL]\n"
+ "add z28.s, z28.s, z13.s\n"
+ "mul z29.s, p2/M, z29.s, z15.s\n"
+ "st1w { z29.s }, p2, [SP, #2, MUL VL]\n"
+ "add z29.s, z29.s, z13.s\n"
+ "mul z27.s, p2/M, z27.s, z15.s\n"
+ "st1w { z27.s }, p2, [SP, #3, MUL VL]\n"
+ "add z27.s, z27.s, z13.s\n"
+ "mul z26.s, p2/M, z26.s, z15.s\n"
+ "st1w { z26.s }, p2, [SP, #4, MUL VL]\n"
+ "add z26.s, z26.s, z13.s\n"
+ "mul z25.s, p2/M, z25.s, z15.s\n"
+ "st1w { z25.s }, p2, [SP, #5, MUL VL]\n"
+ "add z25.s, z25.s, z13.s\n"
+ "mul z24.s, p2/M, z24.s, z15.s\n"
+ "st1w { z24.s }, p2, [SP, #6, MUL VL]\n"
+ "add z24.s, z24.s, z13.s\n"
+ "mul z23.s, p2/M, z23.s, z15.s\n"
+ "st1w { z23.s }, p2, [SP, #7, MUL VL]\n"
+ "add z23.s, z23.s, z13.s\n"
"1:" // Loop
- "sdot z24.s, z8.b, z0.b[0]\n"
- "sdot z25.s, z8.b, z0.b[2]\n"
- "ld1w { z17.s }, p2/Z, [%x[params], #6, MUL VL]\n"
- "ld1w { z19.s }, p2/Z, [%x[params], #7, MUL VL]\n"
- "sdot z26.s, z8.b, z1.b[0]\n"
- "sdot z27.s, z8.b, z1.b[2]\n"
- "incb x9\n"
- "whilelt p0.s, x28, %x[n_channels]\n"
- "sdot z24.s, z9.b, z0.b[1]\n"
- "sdot z25.s, z9.b, z0.b[3]\n"
- "whilelt p1.b, x9, x10\n"
- "sdot z26.s, z9.b, z1.b[1]\n"
- "sdot z27.s, z9.b, z1.b[3]\n"
- "sdot z28.s, z8.b, z2.b[0]\n"
- "sdot z29.s, z8.b, z2.b[2]\n"
- "sdot z30.s, z8.b, z3.b[0]\n"
- "sdot z31.s, z8.b, z3.b[2]\n"
+ "sdot z30.s, z8.b, z0.b[0]\n"
+ "ld1w { z22.s }, p2/Z, [%x[params], #6, MUL VL]\n"
+ "incb x28\n"
+ "sdot z28.s, z8.b, z0.b[2]\n"
+ "ld1w { z21.s }, p2/Z, [%x[params], #7, MUL VL]\n"
+ "whilelt p0.s, x27, %x[n_channels]\n"
+ "sdot z29.s, z8.b, z3.b[0]\n"
+ "whilelt p1.b, x28, x9\n"
+ "sdot z27.s, z8.b, z3.b[2]\n"
+ "sdot z26.s, z8.b, z5.b[0]\n"
+ "sdot z25.s, z8.b, z5.b[2]\n"
+ "sdot z24.s, z8.b, z4.b[0]\n"
+ "sdot z23.s, z8.b, z4.b[2]\n"
"ld1b { z8.b }, p2/Z, [%x[params]]\n"
- "sdot z24.s, z10.b, z1.b[0]\n"
- "sdot z25.s, z10.b, z1.b[2]\n"
- "sdot z26.s, z10.b, z2.b[0]\n"
- "sdot z27.s, z10.b, z2.b[2]\n"
- "sdot z28.s, z9.b, z2.b[1]\n"
- "sdot z29.s, z9.b, z2.b[3]\n"
- "sdot z30.s, z9.b, z3.b[1]\n"
- "sdot z31.s, z9.b, z3.b[3]\n"
+ "sdot z30.s, z9.b, z0.b[1]\n"
+ "sdot z28.s, z9.b, z0.b[3]\n"
+ "sdot z29.s, z9.b, z3.b[1]\n"
+ "sdot z27.s, z9.b, z3.b[3]\n"
+ "sdot z26.s, z9.b, z5.b[1]\n"
+ "sdot z25.s, z9.b, z5.b[3]\n"
+ "sdot z24.s, z9.b, z4.b[1]\n"
+ "sdot z23.s, z9.b, z4.b[3]\n"
"ld1b { z9.b }, p2/Z, [%x[params], #1, MUL VL]\n"
- "sdot z24.s, z11.b, z1.b[1]\n"
- "sdot z25.s, z11.b, z1.b[3]\n"
- "sdot z26.s, z11.b, z2.b[1]\n"
- "sdot z27.s, z11.b, z2.b[3]\n"
- "sdot z28.s, z10.b, z3.b[0]\n"
- "sdot z29.s, z10.b, z3.b[2]\n"
- "sdot z30.s, z10.b, z4.b[0]\n"
- "sdot z31.s, z10.b, z4.b[2]\n"
+ "sdot z30.s, z10.b, z3.b[0]\n"
+ "sdot z28.s, z10.b, z3.b[2]\n"
+ "sdot z29.s, z10.b, z5.b[0]\n"
+ "sdot z27.s, z10.b, z5.b[2]\n"
+ "sdot z26.s, z10.b, z4.b[0]\n"
+ "sdot z25.s, z10.b, z4.b[2]\n"
+ "sdot z24.s, z10.b, z6.b[0]\n"
+ "sdot z23.s, z10.b, z6.b[2]\n"
"ld1b { z10.b }, p2/Z, [%x[params], #2, MUL VL]\n"
- "sdot z24.s, z8.b, z2.b[0]\n"
- "sdot z25.s, z8.b, z2.b[2]\n"
- "sdot z26.s, z8.b, z3.b[0]\n"
- "sdot z27.s, z8.b, z3.b[2]\n"
- "sdot z28.s, z11.b, z3.b[1]\n"
- "sdot z29.s, z11.b, z3.b[3]\n"
- "sdot z30.s, z11.b, z4.b[1]\n"
- "sdot z31.s, z11.b, z4.b[3]\n"
+ "sdot z30.s, z11.b, z3.b[1]\n"
+ "sdot z28.s, z11.b, z3.b[3]\n"
+ "sdot z29.s, z11.b, z5.b[1]\n"
+ "sdot z27.s, z11.b, z5.b[3]\n"
+ "sdot z26.s, z11.b, z4.b[1]\n"
+ "sdot z25.s, z11.b, z4.b[3]\n"
+ "sdot z24.s, z11.b, z6.b[1]\n"
+ "sdot z23.s, z11.b, z6.b[3]\n"
"ld1b { z11.b }, p2/Z, [%x[params], #3, MUL VL]\n"
- "sdot z24.s, z9.b, z2.b[1]\n"
- "sdot z25.s, z9.b, z2.b[3]\n"
- "sdot z26.s, z9.b, z3.b[1]\n"
- "sdot z27.s, z9.b, z3.b[3]\n"
- "sdot z28.s, z8.b, z4.b[0]\n"
- "sdot z29.s, z8.b, z4.b[2]\n"
"sdot z30.s, z8.b, z5.b[0]\n"
- "sdot z31.s, z8.b, z5.b[2]\n"
+ "sdot z28.s, z8.b, z5.b[2]\n"
+ "sdot z29.s, z8.b, z4.b[0]\n"
+ "sdot z27.s, z8.b, z4.b[2]\n"
+ "sdot z26.s, z8.b, z6.b[0]\n"
+ "sdot z25.s, z8.b, z6.b[2]\n"
+ "sdot z24.s, z8.b, z7.b[0]\n"
+ "sdot z23.s, z8.b, z7.b[2]\n"
"ld1b { z8.b }, p2/Z, [%x[params], #4, MUL VL]\n"
- "sdot z24.s, z10.b, z3.b[0]\n"
- "sdot z25.s, z10.b, z3.b[2]\n"
- "sdot z26.s, z10.b, z4.b[0]\n"
- "sdot z27.s, z10.b, z4.b[2]\n"
- "sdot z28.s, z9.b, z4.b[1]\n"
- "sdot z29.s, z9.b, z4.b[3]\n"
"sdot z30.s, z9.b, z5.b[1]\n"
- "sdot z31.s, z9.b, z5.b[3]\n"
+ "sdot z28.s, z9.b, z5.b[3]\n"
+ "sdot z29.s, z9.b, z4.b[1]\n"
+ "sdot z27.s, z9.b, z4.b[3]\n"
+ "sdot z26.s, z9.b, z6.b[1]\n"
+ "sdot z25.s, z9.b, z6.b[3]\n"
+ "sdot z24.s, z9.b, z7.b[1]\n"
+ "sdot z23.s, z9.b, z7.b[3]\n"
"ld1b { z9.b }, p2/Z, [%x[params], #5, MUL VL]\n"
"addvl %x[params], %x[params], #16\n"
- "sdot z24.s, z11.b, z3.b[1]\n"
- "sdot z25.s, z11.b, z3.b[3]\n"
- "ld1w { z12.s }, p1/Z, [%x[params], #-8, MUL VL]\n"
- "sdot z26.s, z11.b, z4.b[1]\n"
- "sdot z27.s, z11.b, z4.b[3]\n"
- "sdot z28.s, z10.b, z5.b[0]\n"
- "sdot z29.s, z10.b, z5.b[2]\n"
- "sdot z30.s, z10.b, z6.b[0]\n"
- "sdot z31.s, z10.b, z6.b[2]\n"
+ "sdot z30.s, z10.b, z4.b[0]\n"
+ "ld1w { z13.s }, p1/Z, [%x[params], #-8, MUL VL]\n"
+ "sdot z28.s, z10.b, z4.b[2]\n"
+ "sdot z29.s, z10.b, z6.b[0]\n"
+ "sdot z27.s, z10.b, z6.b[2]\n"
+ "sdot z26.s, z10.b, z7.b[0]\n"
+ "sdot z25.s, z10.b, z7.b[2]\n"
+ "sdot z24.s, z10.b, z1.b[0]\n"
+ "sdot z23.s, z10.b, z1.b[2]\n"
"ld1b { z10.b }, p1/Z, [%x[params], #-5, MUL VL]\n"
- "sdot z24.s, z8.b, z4.b[0]\n"
- "sdot z25.s, z8.b, z4.b[2]\n"
- "sdot z26.s, z8.b, z5.b[0]\n"
- "sdot z27.s, z8.b, z5.b[2]\n"
- "sdot z28.s, z11.b, z5.b[1]\n"
- "sdot z29.s, z11.b, z5.b[3]\n"
- "sdot z30.s, z11.b, z6.b[1]\n"
- "sdot z31.s, z11.b, z6.b[3]\n"
+ "sdot z30.s, z11.b, z4.b[1]\n"
+ "sdot z28.s, z11.b, z4.b[3]\n"
+ "sdot z29.s, z11.b, z6.b[1]\n"
+ "sdot z27.s, z11.b, z6.b[3]\n"
+ "sdot z26.s, z11.b, z7.b[1]\n"
+ "sdot z25.s, z11.b, z7.b[3]\n"
+ "sdot z24.s, z11.b, z1.b[1]\n"
+ "sdot z23.s, z11.b, z1.b[3]\n"
"ld1b { z11.b }, p1/Z, [%x[params], #-4, MUL VL]\n"
- "sdot z24.s, z9.b, z4.b[1]\n"
- "sdot z25.s, z9.b, z4.b[3]\n"
- ".inst 0x04b17718 // sqrdmulh z24.s, z24.s, z17.s\n"
- "sdot z26.s, z9.b, z5.b[1]\n"
- "sdot z27.s, z9.b, z5.b[3]\n"
- ".inst 0x04b17739 // sqrdmulh z25.s, z25.s, z17.s\n"
- "sdot z28.s, z8.b, z6.b[0]\n"
- "sdot z29.s, z8.b, z6.b[2]\n"
- ".inst 0x04b1775a // sqrdmulh z26.s, z26.s, z17.s\n"
- "sdot z30.s, z8.b, z7.b[0]\n"
- "sdot z31.s, z8.b, z7.b[2]\n"
- ".inst 0x04b1777b // sqrdmulh z27.s, z27.s, z17.s\n"
+ "sdot z30.s, z8.b, z6.b[0]\n"
+ "sdot z28.s, z8.b, z6.b[2]\n"
+ "sdot z29.s, z8.b, z7.b[0]\n"
+ "sdot z27.s, z8.b, z7.b[2]\n"
+ "sdot z26.s, z8.b, z1.b[0]\n"
+ "sdot z25.s, z8.b, z1.b[2]\n"
+ "sdot z24.s, z8.b, z2.b[0]\n"
+ "sdot z23.s, z8.b, z2.b[2]\n"
"ld1b { z8.b }, p1/Z, [%x[params], #-7, MUL VL]\n"
- "sdot z28.s, z9.b, z6.b[1]\n"
- "sdot z29.s, z9.b, z6.b[3]\n"
- "and z16.d, z24.d, z19.d\n"
- "sdot z30.s, z9.b, z7.b[1]\n"
- "sdot z31.s, z9.b, z7.b[3]\n"
- "and z18.d, z25.d, z19.d\n"
+ "sdot z30.s, z9.b, z6.b[1]\n"
+ "sdot z28.s, z9.b, z6.b[3]\n"
+ "sdot z29.s, z9.b, z7.b[1]\n"
+ "sdot z27.s, z9.b, z7.b[3]\n"
+ "sdot z26.s, z9.b, z1.b[1]\n"
+ "sdot z25.s, z9.b, z1.b[3]\n"
+ "sdot z24.s, z9.b, z2.b[1]\n"
+ "sdot z23.s, z9.b, z2.b[3]\n"
"ld1b { z9.b }, p1/Z, [%x[params], #-6, MUL VL]\n"
- "asr z16.s, z16.s, #0x1f\n"
- "asr z18.s, z18.s, #0x1f\n"
"addvl %x[params], %x[params], #-3\n"
- ".inst 0x04b1779c // sqrdmulh z28.s, z28.s, z17.s\n"
- ".inst 0x04b177bd // sqrdmulh z29.s, z29.s, z17.s\n"
- ".inst 0x04b177de // sqrdmulh z30.s, z30.s, z17.s\n"
- ".inst 0x04b177ff // sqrdmulh z31.s, z31.s, z17.s\n"
- "and z17.d, z26.d, z19.d\n"
+ ".inst 0x04b677de // sqrdmulh z30.s, z30.s, z22.s\n"
+ ".inst 0x04b6779c // sqrdmulh z28.s, z28.s, z22.s\n"
+ ".inst 0x04b677bd // sqrdmulh z29.s, z29.s, z22.s\n"
+ ".inst 0x04b6777b // sqrdmulh z27.s, z27.s, z22.s\n"
+ ".inst 0x04b6775a // sqrdmulh z26.s, z26.s, z22.s\n"
+ "and z20.d, z30.d, z21.d\n"
+ "asr z20.s, z20.s, #0x1f\n"
+ "and z19.d, z28.d, z21.d\n"
+ "and z18.d, z29.d, z21.d\n"
+ "asr z19.s, z19.s, #0x1f\n"
+ "and z17.d, z27.d, z21.d\n"
+ "and z16.d, z26.d, z21.d\n"
+ "asr z18.s, z18.s, #0x1f\n"
+ ".inst 0x04b67739 // sqrdmulh z25.s, z25.s, z22.s\n"
"asr z17.s, z17.s, #0x1f\n"
- "sqadd z24.s, z24.s, z16.s\n"
- "and z16.d, z27.d, z19.d\n"
- ".inst 0x44828a78 // srshl z24.s, p2/M, z24.s, z19.s\n"
- "asr z16.s, z16.s, #0x1f\n"
- "sqadd z25.s, z25.s, z18.s\n"
- ".inst 0x44828a79 // srshl z25.s, p2/M, z25.s, z19.s\n"
- "sqadd z26.s, z26.s, z17.s\n"
- "sqadd z27.s, z27.s, z16.s\n"
- ".inst 0x44828a7a // srshl z26.s, p2/M, z26.s, z19.s\n"
- ".inst 0x44828a7b // srshl z27.s, p2/M, z27.s, z19.s\n"
- "and z16.d, z28.d, z19.d\n"
- "and z18.d, z29.d, z19.d\n"
- "and z17.d, z30.d, z19.d\n"
+ "sqadd z30.s, z30.s, z20.s\n"
+ ".inst 0x04b67718 // sqrdmulh z24.s, z24.s, z22.s\n"
"asr z16.s, z16.s, #0x1f\n"
+ ".inst 0x04b676f7 // sqrdmulh z23.s, z23.s, z22.s\n"
+ "sqadd z28.s, z28.s, z19.s\n"
+ "sqadd z29.s, z29.s, z18.s\n"
+ "and z18.d, z25.d, z21.d\n"
"asr z18.s, z18.s, #0x1f\n"
+ "sqadd z27.s, z27.s, z17.s\n"
+ "sqadd z26.s, z26.s, z16.s\n"
+ "and z17.d, z24.d, z21.d\n"
"asr z17.s, z17.s, #0x1f\n"
- "sqadd z28.s, z28.s, z16.s\n"
- "and z16.d, z31.d, z19.d\n"
- ".inst 0x44828a7c // srshl z28.s, p2/M, z28.s, z19.s\n"
+ "and z16.d, z23.d, z21.d\n"
+ ".inst 0x44828abe // srshl z30.s, p2/M, z30.s, z21.s\n"
"asr z16.s, z16.s, #0x1f\n"
- "sqadd z29.s, z29.s, z18.s\n"
- ".inst 0x44828a7d // srshl z29.s, p2/M, z29.s, z19.s\n"
- "sqadd z30.s, z30.s, z17.s\n"
- "sqadd z31.s, z31.s, z16.s\n"
- ".inst 0x44828a7e // srshl z30.s, p2/M, z30.s, z19.s\n"
- ".inst 0x44828a7f // srshl z31.s, p2/M, z31.s, z19.s\n"
- "add z24.s, z24.s, z14.s\n"
- "add z25.s, z25.s, z14.s\n"
- "smin z24.s, p2/M, z24.s, z15.s\n"
- "smin z25.s, p2/M, z25.s, z15.s\n"
- "add z26.s, z26.s, z14.s\n"
- "add z27.s, z27.s, z14.s\n"
- "smin z26.s, p2/M, z26.s, z15.s\n"
- "smin z27.s, p2/M, z27.s, z15.s\n"
+ "sqadd z25.s, z25.s, z18.s\n"
+ ".inst 0x44828abc // srshl z28.s, p2/M, z28.s, z21.s\n"
+ "add z30.s, z30.s, z14.s\n"
+ "sqadd z24.s, z24.s, z17.s\n"
+ ".inst 0x44828abd // srshl z29.s, p2/M, z29.s, z21.s\n"
"add z28.s, z28.s, z14.s\n"
+ "sqadd z23.s, z23.s, z16.s\n"
+ "smin z30.s, p2/M, z30.s, z12.s\n"
"add z29.s, z29.s, z14.s\n"
- "smin z28.s, p2/M, z28.s, z15.s\n"
- "smin z29.s, p2/M, z29.s, z15.s\n"
- "add z30.s, z30.s, z14.s\n"
- "add z31.s, z31.s, z14.s\n"
- "smin z30.s, p2/M, z30.s, z15.s\n"
- "smin z31.s, p2/M, z31.s, z15.s\n"
- "smax z24.s, p2/M, z24.s, z13.s\n"
- "smax z25.s, p2/M, z25.s, z13.s\n"
- "st1b { z24.s }, p0, [x27, x28]\n"
- "mov z24.s, z22.s[0]\n"
- "smax z26.s, p2/M, z26.s, z13.s\n"
- "smax z27.s, p2/M, z27.s, z13.s\n"
- "st1b { z25.s }, p0, [x26, x28]\n"
- "mov z25.s, z22.s[1]\n"
- "smax z28.s, p2/M, z28.s, z13.s\n"
- "smax z29.s, p2/M, z29.s, z13.s\n"
- "st1b { z26.s }, p0, [x25, x28]\n"
- "mov z26.s, z22.s[2]\n"
- "smax z30.s, p2/M, z30.s, z13.s\n"
- "smax z31.s, p2/M, z31.s, z13.s\n"
- "st1b { z27.s }, p0, [x24, x28]\n"
- "mov z27.s, z22.s[3]\n"
- "st1b { z28.s }, p0, [x23, x28]\n"
- "mov z28.s, z23.s[0]\n"
- "add z24.s, z24.s, z12.s\n"
- "st1b { z29.s }, p0, [x22, x28]\n"
- "mov z29.s, z23.s[1]\n"
- "add z25.s, z25.s, z12.s\n"
- "st1b { z30.s }, p0, [x21, x28]\n"
- "mov z30.s, z23.s[2]\n"
- "add z26.s, z26.s, z12.s\n"
- "st1b { z31.s }, p0, [x20, x28]\n"
- "mov z31.s, z23.s[3]\n"
- "incw x28\n"
- "add z27.s, z27.s, z12.s\n"
- "add z28.s, z28.s, z12.s\n"
- "add z29.s, z29.s, z12.s\n"
- "add z30.s, z30.s, z12.s\n"
- "add z31.s, z31.s, z12.s\n"
+ "smin z28.s, p2/M, z28.s, z12.s\n"
+ ".inst 0x44828abb // srshl z27.s, p2/M, z27.s, z21.s\n"
+ "smax z30.s, p2/M, z30.s, z31.s\n"
+ "st1b { z30.s }, p0, [x26, x27]\n"
+ "add z27.s, z27.s, z14.s\n"
+ "smax z28.s, p2/M, z28.s, z31.s\n"
+ "ld1w { z30.s }, p2/Z, [SP]\n"
+ "smin z29.s, p2/M, z29.s, z12.s\n"
+ "st1b { z28.s }, p0, [x25, x27]\n"
+ "add z30.s, z30.s, z13.s\n"
+ "smin z27.s, p2/M, z27.s, z12.s\n"
+ "ld1w { z28.s }, p2/Z, [SP, #1, MUL VL]\n"
+ "smax z29.s, p2/M, z29.s, z31.s\n"
+ "st1b { z29.s }, p0, [x24, x27]\n"
+ "add z28.s, z28.s, z13.s\n"
+ "smax z27.s, p2/M, z27.s, z31.s\n"
+ "ld1w { z29.s }, p2/Z, [SP, #2, MUL VL]\n"
+ ".inst 0x44828aba // srshl z26.s, p2/M, z26.s, z21.s\n"
+ "st1b { z27.s }, p0, [x23, x27]\n"
+ "add z29.s, z29.s, z13.s\n"
+ ".inst 0x44828ab9 // srshl z25.s, p2/M, z25.s, z21.s\n"
+ "ld1w { z27.s }, p2/Z, [SP, #3, MUL VL]\n"
+ "add z26.s, z26.s, z14.s\n"
+ ".inst 0x44828ab8 // srshl z24.s, p2/M, z24.s, z21.s\n"
+ ".inst 0x44828ab7 // srshl z23.s, p2/M, z23.s, z21.s\n"
+ "add z25.s, z25.s, z14.s\n"
+ "add z27.s, z27.s, z13.s\n"
+ "add z24.s, z24.s, z14.s\n"
+ "add z23.s, z23.s, z14.s\n"
+ "smin z26.s, p2/M, z26.s, z12.s\n"
+ "smin z25.s, p2/M, z25.s, z12.s\n"
+ "smin z24.s, p2/M, z24.s, z12.s\n"
+ "smin z23.s, p2/M, z23.s, z12.s\n"
+ "smax z26.s, p2/M, z26.s, z31.s\n"
+ "st1b { z26.s }, p0, [x22, x27]\n"
+ "smax z25.s, p2/M, z25.s, z31.s\n"
+ "smax z24.s, p2/M, z24.s, z31.s\n"
+ "ld1w { z26.s }, p2/Z, [SP, #4, MUL VL]\n"
+ "smax z23.s, p2/M, z23.s, z31.s\n"
+ "st1b { z25.s }, p0, [x21, x27]\n"
+ "add z26.s, z26.s, z13.s\n"
+ "st1b { z24.s }, p0, [x20, x27]\n"
+ "st1b { z23.s }, p0, [x19, x27]\n"
+ "incw x27\n"
+ "ld1w { z25.s }, p2/Z, [SP, #5, MUL VL]\n"
+ "add z25.s, z25.s, z13.s\n"
+ "ld1w { z24.s }, p2/Z, [SP, #6, MUL VL]\n"
+ "ld1w { z23.s }, p2/Z, [SP, #7, MUL VL]\n"
+ "add z24.s, z24.s, z13.s\n"
+ "add z23.s, z23.s, z13.s\n"
"b.any 1b\n"
+ "addvl SP, SP, #8\n"
: [params] "+&r" (params)
: [inptrs] "r" (inptrs), [n_channels] "r" (n_output_channels), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [outptrs] "r" (outptrs), [qp] "r" (&qp)
- : "cc", "memory", "p0", "p1", "p2", "x9", "x10", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "x9", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8qs_nhwc_3x3_s1_output2x2_dot_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8qs_nhwc_3x3_s1_output2x2_dot_depthfirst/generic.cpp
index 6a432e1961..391e98b561 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8qs_nhwc_3x3_s1_output2x2_dot_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8qs_nhwc_3x3_s1_output2x2_dot_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -41,400 +41,352 @@ void sve_s8qs_nhwc_3x3_s1_output2x2_dot_depthfirst_impl(
)
{
__asm__ __volatile__(
- "mov x13, #0x0\n"
- "whilelt p2.b, x13, %x[n_channels]\n"
- "ldp x12, x11, [%x[inptrs], #0x0]\n"
- "ldp x10, x9, [%x[inptrs], #0x10]\n"
- "ldp x28, x27, [%x[inptrs], #0x20]\n"
- "ldp x26, x25, [%x[inptrs], #0x30]\n"
- "ptrue p1.b\n"
- "mov x24, #0x0\n"
- "ldp x23, x22, [%x[outptrs], #0x0]\n"
- "ldp x21, x20, [%x[outptrs], #0x10]\n"
- "ld1b { z9.b }, p2/Z, [x12, x13]\n"
- "ld1b { z8.b }, p2/Z, [x11, x13]\n"
- "ldp x12, x11, [%x[inptrs], #0x40]\n"
- "ld1b { z7.b }, p2/Z, [x10, x13]\n"
- "zip2 z6.b, z9.b, z7.b\n"
- "zip1 z9.b, z9.b, z7.b\n"
- "ld1b { z5.b }, p2/Z, [x9, x13]\n"
- "ldp x10, x9, [%x[inptrs], #0x50]\n"
- "zip1 z7.b, z8.b, z5.b\n"
- "zip2 z5.b, z8.b, z5.b\n"
- "ld1b { z4.b }, p2/Z, [x28, x13]\n"
- "ld1b { z3.b }, p2/Z, [x27, x13]\n"
- "zip2 z8.b, z9.b, z7.b\n"
- "zip1 z9.b, z9.b, z7.b\n"
- "ldp x28, x27, [%x[inptrs], #0x60]\n"
- "ld1b { z2.b }, p2/Z, [x26, x13]\n"
- "zip1 z7.b, z6.b, z5.b\n"
- "zip2 z5.b, z6.b, z5.b\n"
- "ld1b { z1.b }, p2/Z, [x25, x13]\n"
- "ldp x26, x25, [%x[inptrs], #0x70]\n"
- "zip2 z0.b, z4.b, z2.b\n"
- "zip1 z4.b, z4.b, z2.b\n"
- "ld1b { z31.b }, p2/Z, [x12, x13]\n"
- "ld1b { z30.b }, p2/Z, [x11, x13]\n"
- "zip1 z2.b, z3.b, z1.b\n"
- "zip2 z1.b, z3.b, z1.b\n"
- "ld1b { z29.b }, p2/Z, [x10, x13]\n"
- "ld1b { z28.b }, p2/Z, [x9, x13]\n"
- "zip2 z27.b, z31.b, z29.b\n"
- "zip1 z31.b, z31.b, z29.b\n"
- "ld1b { z26.b }, p2/Z, [x28, x13]\n"
- "ld1b { z25.b }, p2/Z, [x27, x13]\n"
- "zip1 z29.b, z30.b, z28.b\n"
- "zip2 z28.b, z30.b, z28.b\n"
- "ld1b { z24.b }, p2/Z, [x26, x13]\n"
- "ld1b { z23.b }, p2/Z, [x25, x13]\n"
- "zip2 z22.b, z26.b, z24.b\n"
- "zip1 z26.b, z26.b, z24.b\n"
- "zip1 z24.b, z25.b, z23.b\n"
- "zip2 z23.b, z25.b, z23.b\n"
- "ld1w { z6.s }, p1/Z, [%x[params]]\n"
- "ld1rw { z21.s }, p1/Z, [%x[qp], %[offsetof_Requantize32_minval]]\n"
- "ld1rw { z20.s }, p1/Z, [%x[qp], %[offsetof_Requantize32_maxval]]\n"
- "ld1rw { z19.s }, p1/Z, [%x[qp], %[offsetof_Requantize32_c_offset]]\n"
- "zip2 z3.b, z4.b, z2.b\n"
- "zip1 z4.b, z4.b, z2.b\n"
- "ldp x12, x11, [%x[inptrs], #0x0]\n"
- "ldp x10, x9, [%x[inptrs], #0x10]\n"
- "zip1 z2.b, z0.b, z1.b\n"
- "zip2 z1.b, z0.b, z1.b\n"
- "ldp x28, x27, [%x[inptrs], #0x20]\n"
- "ldp x26, x25, [%x[inptrs], #0x30]\n"
- "zip2 z30.b, z31.b, z29.b\n"
- "zip1 z31.b, z31.b, z29.b\n"
- "zip1 z29.b, z27.b, z28.b\n"
- "zip2 z28.b, z27.b, z28.b\n"
- "ld1b { z18.b }, p1/Z, [%x[params], #1, MUL VL]\n"
- "ld1b { z17.b }, p1/Z, [%x[params], #2, MUL VL]\n"
- "zip2 z25.b, z26.b, z24.b\n"
- "zip1 z26.b, z26.b, z24.b\n"
- "ld1b { z16.b }, p1/Z, [%x[params], #3, MUL VL]\n"
- "addvl %x[params], %x[params], #4\n"
- "zip1 z24.b, z22.b, z23.b\n"
- "zip2 z23.b, z22.b, z23.b\n"
- "mov z0.d, z6.d\n"
- "mov z27.d, z6.d\n"
- "mov z22.d, z6.d\n"
+ "ldp x11, x10, [%x[inptrs], #0x0]\n"
+ "ptrue p2.b\n"
+ "ldp x9, x28, [%x[inptrs], #0x10]\n"
+ "addvl SP, SP, #-8\n"
+ "ldp x27, x26, [%x[inptrs], #0x20]\n"
+ "mov x25, #0x0\n"
+ "ldp x24, x23, [%x[inptrs], #0x30]\n"
+ "whilelt p1.b, x25, %x[n_channels]\n"
+ "ldp x22, x21, [%x[outptrs], #0x0]\n"
+ "ldp x20, x19, [%x[outptrs], #0x10]\n"
+ "ld1rw { z6.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_minval]]\n"
+ "ld1rw { z5.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_maxval]]\n"
+ "ld1rw { z4.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_c_offset]]\n"
"1:" // Loop
- "sdot z6.s, z18.b, z9.b\n"
- "sdot z27.s, z18.b, z4.b\n"
- "ext z9.b, z9.b, z9.b, #0x1\n"
- "whilelt p0.s, x24, %x[n_channels]\n"
- "sdot z6.s, z17.b, z4.b\n"
- "ext z4.b, z4.b, z4.b, #0x1\n"
- "sdot z0.s, z18.b, z9.b\n"
- "ld1w { z9.s }, p1/Z, [%x[params]]\n"
- "sdot z22.s, z18.b, z4.b\n"
- "sdot z27.s, z17.b, z31.b\n"
- "incw x13, ALL, MUL #4\n"
- "sdot z6.s, z16.b, z31.b\n"
- "ext z31.b, z31.b, z31.b, #0x1\n"
- "sdot z0.s, z17.b, z4.b\n"
- "ld1w { z4.s }, p1/Z, [%x[params], #1, MUL VL]\n"
- "sdot z22.s, z17.b, z31.b\n"
- "sdot z27.s, z16.b, z26.b\n"
+ "ld1b { z19.b }, p1/Z, [x11, x25]\n"
+ "whilelt p0.s, x25, %x[n_channels]\n"
+ "ld1b { z18.b }, p1/Z, [x10, x25]\n"
+ "ldp x11, x10, [%x[inptrs], #0x40]\n"
+ "ld1b { z16.b }, p1/Z, [x9, x25]\n"
+ "zip1 z21.b, z19.b, z16.b\n"
+ "ld1b { z17.b }, p1/Z, [x28, x25]\n"
+ "zip2 z19.b, z19.b, z16.b\n"
+ "ldp x9, x28, [%x[inptrs], #0x50]\n"
+ "ld1b { z23.b }, p1/Z, [x27, x25]\n"
+ "zip1 z16.b, z18.b, z17.b\n"
+ "ld1b { z20.b }, p1/Z, [x26, x25]\n"
+ "zip2 z18.b, z18.b, z17.b\n"
+ "ldp x27, x26, [%x[inptrs], #0x60]\n"
+ "zip1 z3.b, z21.b, z16.b\n"
+ "ld1b { z17.b }, p1/Z, [x24, x25]\n"
+ "zip2 z2.b, z21.b, z16.b\n"
+ "ld1b { z16.b }, p1/Z, [x23, x25]\n"
+ "zip1 z29.b, z19.b, z18.b\n"
+ "ldp x24, x23, [%x[inptrs], #0x70]\n"
+ "zip2 z28.b, z19.b, z18.b\n"
+ "ld1b { z22.b }, p1/Z, [x11, x25]\n"
+ "zip1 z19.b, z23.b, z17.b\n"
+ "ld1b { z21.b }, p1/Z, [x10, x25]\n"
+ "zip2 z27.b, z23.b, z17.b\n"
+ "ldp x11, x10, [%x[inptrs], #0x0]\n"
+ "zip1 z18.b, z20.b, z16.b\n"
+ "ld1b { z17.b }, p1/Z, [x9, x25]\n"
+ "zip2 z20.b, z20.b, z16.b\n"
+ "ld1b { z16.b }, p1/Z, [x28, x25]\n"
+ "zip1 z1.b, z19.b, z18.b\n"
+ "ldp x9, x28, [%x[inptrs], #0x10]\n"
+ "zip2 z0.b, z19.b, z18.b\n"
+ "ld1b { z19.b }, p1/Z, [x27, x25]\n"
+ "zip1 z26.b, z22.b, z17.b\n"
+ "ld1b { z25.b }, p1/Z, [x26, x25]\n"
+ "zip2 z24.b, z22.b, z17.b\n"
+ "ldp x27, x26, [%x[inptrs], #0x20]\n"
+ "zip1 z23.b, z21.b, z16.b\n"
+ "ld1b { z18.b }, p1/Z, [x24, x25]\n"
+ "zip2 z22.b, z21.b, z16.b\n"
+ "ld1b { z21.b }, p1/Z, [x23, x25]\n"
+ "zip1 z17.b, z27.b, z20.b\n"
+ "ldp x24, x23, [%x[inptrs], #0x30]\n"
+ "zip2 z16.b, z27.b, z20.b\n"
+ "st1b { z29.b }, p2, [SP]\n"
+ "zip1 z20.b, z19.b, z18.b\n"
+ "st1b { z28.b }, p2, [SP, #1, MUL VL]\n"
+ "zip2 z19.b, z19.b, z18.b\n"
+ "st1b { z17.b }, p2, [SP, #2, MUL VL]\n"
+ "zip1 z18.b, z25.b, z21.b\n"
+ "st1b { z16.b }, p2, [SP, #3, MUL VL]\n"
+ "zip2 z17.b, z25.b, z21.b\n"
+ "ld1w { z31.s }, p2/Z, [%x[params]]\n"
+ "zip1 z30.b, z26.b, z23.b\n"
+ "ld1b { z29.b }, p2/Z, [%x[params], #1, MUL VL]\n"
+ "zip2 z28.b, z26.b, z23.b\n"
+ "ld1b { z27.b }, p2/Z, [%x[params], #2, MUL VL]\n"
+ "zip1 z16.b, z24.b, z22.b\n"
+ "st1b { z16.b }, p2, [SP, #4, MUL VL]\n"
+ "zip2 z16.b, z24.b, z22.b\n"
+ "st1b { z16.b }, p2, [SP, #5, MUL VL]\n"
+ "zip1 z26.b, z20.b, z18.b\n"
+ "ld1b { z25.b }, p2/Z, [%x[params], #3, MUL VL]\n"
+ "zip2 z24.b, z20.b, z18.b\n"
+ "ld1w { z23.s }, p2/Z, [%x[params], #4, MUL VL]\n"
+ "zip1 z16.b, z19.b, z17.b\n"
+ "st1b { z16.b }, p2, [SP, #6, MUL VL]\n"
+ "zip2 z16.b, z19.b, z17.b\n"
+ "st1b { z16.b }, p2, [SP, #7, MUL VL]\n"
+ "mov z22.d, z31.d\n"
+ "ld1w { z21.s }, p2/Z, [%x[params], #5, MUL VL]\n"
+ "mov z20.d, z31.d\n"
+ "mov z19.d, z31.d\n"
+ "sdot z31.s, z29.b, z3.b\n"
+ "sdot z20.s, z29.b, z1.b\n"
+ "ext z3.b, z3.b, z3.b, #0x1\n"
+ "sdot z31.s, z27.b, z1.b\n"
+ "ext z1.b, z1.b, z1.b, #0x1\n"
+ "sdot z20.s, z27.b, z30.b\n"
+ "sdot z22.s, z29.b, z3.b\n"
+ "ld1b { z3.b }, p2/Z, [SP]\n"
+ "sdot z31.s, z25.b, z30.b\n"
+ "ext z30.b, z30.b, z30.b, #0x1\n"
+ "sdot z20.s, z25.b, z26.b\n"
"ext z26.b, z26.b, z26.b, #0x1\n"
- ".inst 0x04a974c6 // sqrdmulh z6.s, z6.s, z9.s\n"
- "sdot z0.s, z16.b, z31.b\n"
- "sdot z22.s, z16.b, z26.b\n"
- "and z18.d, z6.d, z4.d\n"
+ "sdot z19.s, z29.b, z1.b\n"
+ "ld1b { z29.b }, p2/Z, [%x[params], #7, MUL VL]\n"
+ "sdot z22.s, z27.b, z1.b\n"
+ "ld1b { z1.b }, p2/Z, [SP, #2, MUL VL]\n"
+ ".inst 0x04b777ff // sqrdmulh z31.s, z31.s, z23.s\n"
+ ".inst 0x04b77694 // sqrdmulh z20.s, z20.s, z23.s\n"
+ "sdot z19.s, z27.b, z30.b\n"
+ "sdot z22.s, z25.b, z30.b\n"
+ "ld1b { z30.b }, p2/Z, [SP, #4, MUL VL]\n"
+ "and z16.d, z31.d, z21.d\n"
+ "asr z16.s, z16.s, #0x1f\n"
+ "sdot z19.s, z25.b, z26.b\n"
+ "ld1b { z26.b }, p2/Z, [SP, #6, MUL VL]\n"
+ ".inst 0x04b776d6 // sqrdmulh z22.s, z22.s, z23.s\n"
+ "and z18.d, z20.d, z21.d\n"
"asr z18.s, z18.s, #0x1f\n"
- ".inst 0x04a97400 // sqrdmulh z0.s, z0.s, z9.s\n"
- ".inst 0x04a9777b // sqrdmulh z27.s, z27.s, z9.s\n"
- ".inst 0x04a976d6 // sqrdmulh z22.s, z22.s, z9.s\n"
- "sqadd z6.s, z6.s, z18.s\n"
- ".inst 0x44828486 // srshl z6.s, p1/M, z6.s, z4.s\n"
- "ld1w { z9.s }, p1/Z, [%x[params], #6, MUL VL]\n"
- "and z17.d, z0.d, z4.d\n"
- "and z16.d, z27.d, z4.d\n"
- "and z18.d, z22.d, z4.d\n"
+ ".inst 0x04b77673 // sqrdmulh z19.s, z19.s, z23.s\n"
+ "sqadd z31.s, z31.s, z16.s\n"
+ "and z17.d, z22.d, z21.d\n"
"asr z17.s, z17.s, #0x1f\n"
+ "and z16.d, z19.d, z21.d\n"
+ "sqadd z20.s, z20.s, z18.s\n"
"asr z16.s, z16.s, #0x1f\n"
+ ".inst 0x44828abf // srshl z31.s, p2/M, z31.s, z21.s\n"
+ "sqadd z22.s, z22.s, z17.s\n"
+ ".inst 0x44828ab4 // srshl z20.s, p2/M, z20.s, z21.s\n"
+ "add z31.s, z31.s, z4.s\n"
+ "sqadd z19.s, z19.s, z16.s\n"
+ "add z20.s, z20.s, z4.s\n"
+ ".inst 0x44828ab6 // srshl z22.s, p2/M, z22.s, z21.s\n"
+ "smax z31.s, p2/M, z31.s, z6.s\n"
+ "smax z20.s, p2/M, z20.s, z6.s\n"
+ ".inst 0x44828ab3 // srshl z19.s, p2/M, z19.s, z21.s\n"
+ "add z22.s, z22.s, z4.s\n"
+ "smin z31.s, p2/M, z31.s, z5.s\n"
+ "st1b { z31.s }, p0, [x22, x25]\n"
+ "add z19.s, z19.s, z4.s\n"
+ "smax z22.s, p2/M, z22.s, z6.s\n"
+ "ld1w { z31.s }, p2/Z, [%x[params], #6, MUL VL]\n"
+ "addvl %x[params], %x[params], #16\n"
+ "smin z20.s, p2/M, z20.s, z5.s\n"
+ "ld1b { z27.b }, p2/Z, [%x[params], #-8, MUL VL]\n"
+ "ld1b { z25.b }, p2/Z, [%x[params], #-7, MUL VL]\n"
+ "smax z19.s, p2/M, z19.s, z6.s\n"
+ "ld1w { z23.s }, p2/Z, [%x[params], #-6, MUL VL]\n"
+ "smin z22.s, p2/M, z22.s, z5.s\n"
+ "ld1w { z21.s }, p2/Z, [%x[params], #-5, MUL VL]\n"
+ "smin z19.s, p2/M, z19.s, z5.s\n"
+ "st1b { z20.s }, p0, [x20, x25]\n"
+ "mov z20.d, z31.d\n"
+ "st1b { z22.s }, p0, [x21, x25]\n"
+ "mov z22.d, z31.d\n"
+ "st1b { z19.s }, p0, [x19, x25]\n"
+ "mov z19.d, z31.d\n"
+ "incw x25\n"
+ "sdot z31.s, z29.b, z2.b\n"
+ "whilelt p0.s, x25, %x[n_channels]\n"
+ "sdot z20.s, z29.b, z0.b\n"
+ "ext z2.b, z2.b, z2.b, #0x1\n"
+ "sdot z31.s, z27.b, z0.b\n"
+ "sdot z20.s, z27.b, z28.b\n"
+ "ext z0.b, z0.b, z0.b, #0x1\n"
+ "sdot z22.s, z29.b, z2.b\n"
+ "ld1b { z2.b }, p2/Z, [SP, #1, MUL VL]\n"
+ "sdot z31.s, z25.b, z28.b\n"
+ "sdot z20.s, z25.b, z24.b\n"
+ "ext z28.b, z28.b, z28.b, #0x1\n"
+ "ext z24.b, z24.b, z24.b, #0x1\n"
+ "sdot z19.s, z29.b, z0.b\n"
+ "ld1b { z29.b }, p2/Z, [%x[params], #-3, MUL VL]\n"
+ "sdot z22.s, z27.b, z0.b\n"
+ "ld1b { z0.b }, p2/Z, [SP, #3, MUL VL]\n"
+ ".inst 0x04b777ff // sqrdmulh z31.s, z31.s, z23.s\n"
+ ".inst 0x04b77694 // sqrdmulh z20.s, z20.s, z23.s\n"
+ "sdot z19.s, z27.b, z28.b\n"
+ "ld1b { z27.b }, p2/Z, [%x[params], #-2, MUL VL]\n"
+ "sdot z22.s, z25.b, z28.b\n"
+ "ld1b { z28.b }, p2/Z, [SP, #5, MUL VL]\n"
+ "and z16.d, z31.d, z21.d\n"
+ "asr z16.s, z16.s, #0x1f\n"
+ "sdot z19.s, z25.b, z24.b\n"
+ "ld1b { z25.b }, p2/Z, [%x[params], #-1, MUL VL]\n"
+ ".inst 0x04b776d6 // sqrdmulh z22.s, z22.s, z23.s\n"
+ "ld1b { z24.b }, p2/Z, [SP, #7, MUL VL]\n"
+ "and z18.d, z20.d, z21.d\n"
"asr z18.s, z18.s, #0x1f\n"
- "sqadd z0.s, z0.s, z17.s\n"
- "sqadd z27.s, z27.s, z16.s\n"
- ".inst 0x44828480 // srshl z0.s, p1/M, z0.s, z4.s\n"
- ".inst 0x4482849b // srshl z27.s, p1/M, z27.s, z4.s\n"
- "sqadd z22.s, z22.s, z18.s\n"
- "add z6.s, z6.s, z19.s\n"
- ".inst 0x44828496 // srshl z22.s, p1/M, z22.s, z4.s\n"
- "smax z6.s, p1/M, z6.s, z21.s\n"
- "add z0.s, z0.s, z19.s\n"
- "add z27.s, z27.s, z19.s\n"
- "smin z6.s, p1/M, z6.s, z20.s\n"
- "smax z0.s, p1/M, z0.s, z21.s\n"
- "add z22.s, z22.s, z19.s\n"
- "smax z27.s, p1/M, z27.s, z21.s\n"
- "smax z22.s, p1/M, z22.s, z21.s\n"
- "st1b { z6.s }, p0, [x23, x24]\n"
- "ld1w { z6.s }, p1/Z, [%x[params], #2, MUL VL]\n"
- "ld1b { z18.b }, p1/Z, [%x[params], #3, MUL VL]\n"
- "smin z0.s, p1/M, z0.s, z20.s\n"
- "smin z27.s, p1/M, z27.s, z20.s\n"
- "smin z22.s, p1/M, z22.s, z20.s\n"
- "st1b { z0.s }, p0, [x22, x24]\n"
- "mov z0.d, z6.d\n"
- "ld1b { z17.b }, p1/Z, [%x[params], #4, MUL VL]\n"
- "st1b { z27.s }, p0, [x21, x24]\n"
- "mov z27.d, z6.d\n"
- "sdot z27.s, z18.b, z3.b\n"
- "ld1b { z16.b }, p1/Z, [%x[params], #5, MUL VL]\n"
- "st1b { z22.s }, p0, [x20, x24]\n"
- "mov z22.d, z6.d\n"
- "sdot z6.s, z18.b, z8.b\n"
- "sdot z6.s, z17.b, z3.b\n"
- "ext z8.b, z8.b, z8.b, #0x1\n"
+ ".inst 0x04b77673 // sqrdmulh z19.s, z19.s, z23.s\n"
+ "ld1w { z23.s }, p2/Z, [%x[params]]\n"
+ "sqadd z31.s, z31.s, z16.s\n"
+ "and z17.d, z22.d, z21.d\n"
+ "asr z17.s, z17.s, #0x1f\n"
+ "and z16.d, z19.d, z21.d\n"
+ "sqadd z20.s, z20.s, z18.s\n"
+ "asr z16.s, z16.s, #0x1f\n"
+ ".inst 0x44828abf // srshl z31.s, p2/M, z31.s, z21.s\n"
+ "sqadd z22.s, z22.s, z17.s\n"
+ ".inst 0x44828ab4 // srshl z20.s, p2/M, z20.s, z21.s\n"
+ "add z31.s, z31.s, z4.s\n"
+ "sqadd z19.s, z19.s, z16.s\n"
+ "add z20.s, z20.s, z4.s\n"
+ ".inst 0x44828ab6 // srshl z22.s, p2/M, z22.s, z21.s\n"
+ "smax z31.s, p2/M, z31.s, z6.s\n"
+ "smax z20.s, p2/M, z20.s, z6.s\n"
+ ".inst 0x44828ab3 // srshl z19.s, p2/M, z19.s, z21.s\n"
+ "ld1w { z21.s }, p2/Z, [%x[params], #1, MUL VL]\n"
+ "add z22.s, z22.s, z4.s\n"
+ "smin z31.s, p2/M, z31.s, z5.s\n"
+ "st1b { z31.s }, p0, [x22, x25]\n"
+ "add z19.s, z19.s, z4.s\n"
+ "smax z22.s, p2/M, z22.s, z6.s\n"
+ "ld1w { z31.s }, p2/Z, [%x[params], #-4, MUL VL]\n"
+ "smin z20.s, p2/M, z20.s, z5.s\n"
+ "st1b { z20.s }, p0, [x20, x25]\n"
+ "mov z20.d, z31.d\n"
+ "smin z22.s, p2/M, z22.s, z5.s\n"
+ "st1b { z22.s }, p0, [x21, x25]\n"
+ "mov z22.d, z31.d\n"
+ "sdot z20.s, z29.b, z1.b\n"
+ "smax z19.s, p2/M, z19.s, z6.s\n"
+ "sdot z20.s, z27.b, z30.b\n"
+ "smin z19.s, p2/M, z19.s, z5.s\n"
+ "st1b { z19.s }, p0, [x19, x25]\n"
+ "mov z19.d, z31.d\n"
+ "incw x25\n"
+ "sdot z31.s, z29.b, z3.b\n"
+ "whilelt p0.s, x25, %x[n_channels]\n"
+ "sdot z20.s, z25.b, z26.b\n"
"ext z3.b, z3.b, z3.b, #0x1\n"
- "sdot z0.s, z18.b, z8.b\n"
- "ld1w { z4.s }, p1/Z, [%x[params], #7, MUL VL]\n"
- "sdot z22.s, z18.b, z3.b\n"
- "sdot z27.s, z17.b, z30.b\n"
- "incw x24\n"
- "whilelt p0.s, x24, %x[n_channels]\n"
- "sdot z6.s, z16.b, z30.b\n"
+ "ext z26.b, z26.b, z26.b, #0x1\n"
+ "sdot z31.s, z27.b, z1.b\n"
+ "ext z1.b, z1.b, z1.b, #0x1\n"
+ "sdot z22.s, z29.b, z3.b\n"
+ ".inst 0x04b77694 // sqrdmulh z20.s, z20.s, z23.s\n"
+ "sdot z31.s, z25.b, z30.b\n"
"ext z30.b, z30.b, z30.b, #0x1\n"
- "sdot z0.s, z17.b, z3.b\n"
- "addvl %x[params], %x[params], #16\n"
- "sdot z22.s, z17.b, z30.b\n"
- "sdot z27.s, z16.b, z25.b\n"
- "ext z25.b, z25.b, z25.b, #0x1\n"
- ".inst 0x04a974c6 // sqrdmulh z6.s, z6.s, z9.s\n"
- "sdot z0.s, z16.b, z30.b\n"
- "sdot z22.s, z16.b, z25.b\n"
- "and z18.d, z6.d, z4.d\n"
+ "sdot z19.s, z29.b, z1.b\n"
+ "ld1b { z29.b }, p2/Z, [%x[params], #3, MUL VL]\n"
+ "sdot z22.s, z27.b, z1.b\n"
+ "and z18.d, z20.d, z21.d\n"
"asr z18.s, z18.s, #0x1f\n"
- ".inst 0x04a97400 // sqrdmulh z0.s, z0.s, z9.s\n"
- ".inst 0x04a9777b // sqrdmulh z27.s, z27.s, z9.s\n"
- ".inst 0x04a976d6 // sqrdmulh z22.s, z22.s, z9.s\n"
- "sqadd z6.s, z6.s, z18.s\n"
- ".inst 0x44828486 // srshl z6.s, p1/M, z6.s, z4.s\n"
- "ld1w { z9.s }, p1/Z, [%x[params], #-4, MUL VL]\n"
- "and z17.d, z0.d, z4.d\n"
- "and z16.d, z27.d, z4.d\n"
- "and z18.d, z22.d, z4.d\n"
+ "sdot z19.s, z27.b, z30.b\n"
+ "ld1b { z27.b }, p2/Z, [%x[params], #4, MUL VL]\n"
+ "sdot z22.s, z25.b, z30.b\n"
+ ".inst 0x04b777ff // sqrdmulh z31.s, z31.s, z23.s\n"
+ "sdot z19.s, z25.b, z26.b\n"
+ "ld1b { z25.b }, p2/Z, [%x[params], #5, MUL VL]\n"
+ "and z16.d, z31.d, z21.d\n"
+ "asr z16.s, z16.s, #0x1f\n"
+ ".inst 0x04b776d6 // sqrdmulh z22.s, z22.s, z23.s\n"
+ "sqadd z20.s, z20.s, z18.s\n"
+ ".inst 0x04b77673 // sqrdmulh z19.s, z19.s, z23.s\n"
+ "ld1w { z23.s }, p2/Z, [%x[params], #6, MUL VL]\n"
+ "and z17.d, z22.d, z21.d\n"
"asr z17.s, z17.s, #0x1f\n"
+ "sqadd z31.s, z31.s, z16.s\n"
+ "and z16.d, z19.d, z21.d\n"
"asr z16.s, z16.s, #0x1f\n"
- "asr z18.s, z18.s, #0x1f\n"
- "sqadd z0.s, z0.s, z17.s\n"
- "sqadd z27.s, z27.s, z16.s\n"
- ".inst 0x44828480 // srshl z0.s, p1/M, z0.s, z4.s\n"
- ".inst 0x4482849b // srshl z27.s, p1/M, z27.s, z4.s\n"
- "sqadd z22.s, z22.s, z18.s\n"
- "add z6.s, z6.s, z19.s\n"
- ".inst 0x44828496 // srshl z22.s, p1/M, z22.s, z4.s\n"
- "smax z6.s, p1/M, z6.s, z21.s\n"
- "add z0.s, z0.s, z19.s\n"
- "add z27.s, z27.s, z19.s\n"
- "smin z6.s, p1/M, z6.s, z20.s\n"
- "smax z0.s, p1/M, z0.s, z21.s\n"
- "add z22.s, z22.s, z19.s\n"
- "smax z27.s, p1/M, z27.s, z21.s\n"
- "smax z22.s, p1/M, z22.s, z21.s\n"
- "st1b { z6.s }, p0, [x23, x24]\n"
- "ld1w { z6.s }, p1/Z, [%x[params], #-8, MUL VL]\n"
- "ld1b { z18.b }, p1/Z, [%x[params], #-7, MUL VL]\n"
- "smin z0.s, p1/M, z0.s, z20.s\n"
- "smin z27.s, p1/M, z27.s, z20.s\n"
- "smin z22.s, p1/M, z22.s, z20.s\n"
- "st1b { z0.s }, p0, [x22, x24]\n"
- "mov z0.d, z6.d\n"
- "ld1b { z17.b }, p1/Z, [%x[params], #-6, MUL VL]\n"
- "st1b { z27.s }, p0, [x21, x24]\n"
- "mov z27.d, z6.d\n"
- "sdot z27.s, z18.b, z2.b\n"
- "ld1b { z16.b }, p1/Z, [%x[params], #-5, MUL VL]\n"
- "st1b { z22.s }, p0, [x20, x24]\n"
- "mov z22.d, z6.d\n"
- "sdot z6.s, z18.b, z7.b\n"
- "sdot z6.s, z17.b, z2.b\n"
- "ext z7.b, z7.b, z7.b, #0x1\n"
+ ".inst 0x44828ab4 // srshl z20.s, p2/M, z20.s, z21.s\n"
+ ".inst 0x44828abf // srshl z31.s, p2/M, z31.s, z21.s\n"
+ "sqadd z22.s, z22.s, z17.s\n"
+ "add z20.s, z20.s, z4.s\n"
+ "add z31.s, z31.s, z4.s\n"
+ "sqadd z19.s, z19.s, z16.s\n"
+ ".inst 0x44828ab6 // srshl z22.s, p2/M, z22.s, z21.s\n"
+ "smax z20.s, p2/M, z20.s, z6.s\n"
+ "smax z31.s, p2/M, z31.s, z6.s\n"
+ ".inst 0x44828ab3 // srshl z19.s, p2/M, z19.s, z21.s\n"
+ "ld1w { z21.s }, p2/Z, [%x[params], #7, MUL VL]\n"
+ "add z22.s, z22.s, z4.s\n"
+ "smin z20.s, p2/M, z20.s, z5.s\n"
+ "st1b { z20.s }, p0, [x20, x25]\n"
+ "add z19.s, z19.s, z4.s\n"
+ "smin z31.s, p2/M, z31.s, z5.s\n"
+ "st1b { z31.s }, p0, [x22, x25]\n"
+ "smax z22.s, p2/M, z22.s, z6.s\n"
+ "smax z19.s, p2/M, z19.s, z6.s\n"
+ "ld1w { z31.s }, p2/Z, [%x[params], #2, MUL VL]\n"
+ "addvl %x[params], %x[params], #8\n"
+ "mov z20.d, z31.d\n"
+ "smin z22.s, p2/M, z22.s, z5.s\n"
+ "st1b { z22.s }, p0, [x21, x25]\n"
+ "mov z22.d, z31.d\n"
+ "sdot z20.s, z29.b, z0.b\n"
+ "smin z19.s, p2/M, z19.s, z5.s\n"
+ "st1b { z19.s }, p0, [x19, x25]\n"
+ "mov z19.d, z31.d\n"
+ "incw x25\n"
+ "sdot z31.s, z29.b, z2.b\n"
+ "whilelt p0.s, x25, %x[n_channels]\n"
+ "sdot z20.s, z27.b, z28.b\n"
"ext z2.b, z2.b, z2.b, #0x1\n"
- "sdot z0.s, z18.b, z7.b\n"
- "ld1w { z4.s }, p1/Z, [%x[params], #-3, MUL VL]\n"
- "sdot z22.s, z18.b, z2.b\n"
- "sdot z27.s, z17.b, z29.b\n"
- "incw x24\n"
- "whilelt p0.s, x24, %x[n_channels]\n"
- "sdot z6.s, z16.b, z29.b\n"
- "ext z29.b, z29.b, z29.b, #0x1\n"
- "sdot z0.s, z17.b, z2.b\n"
- "sdot z22.s, z17.b, z29.b\n"
- "sdot z27.s, z16.b, z24.b\n"
+ "sdot z31.s, z27.b, z0.b\n"
+ "sdot z20.s, z25.b, z24.b\n"
+ "ext z0.b, z0.b, z0.b, #0x1\n"
"ext z24.b, z24.b, z24.b, #0x1\n"
- ".inst 0x04a974c6 // sqrdmulh z6.s, z6.s, z9.s\n"
- "sdot z0.s, z16.b, z29.b\n"
- "sdot z22.s, z16.b, z24.b\n"
- "and z18.d, z6.d, z4.d\n"
- "asr z18.s, z18.s, #0x1f\n"
- ".inst 0x04a97400 // sqrdmulh z0.s, z0.s, z9.s\n"
- ".inst 0x04a9777b // sqrdmulh z27.s, z27.s, z9.s\n"
- ".inst 0x04a976d6 // sqrdmulh z22.s, z22.s, z9.s\n"
- "sqadd z6.s, z6.s, z18.s\n"
- ".inst 0x44828486 // srshl z6.s, p1/M, z6.s, z4.s\n"
- "ld1w { z9.s }, p1/Z, [%x[params], #2, MUL VL]\n"
- "and z17.d, z0.d, z4.d\n"
- "and z16.d, z27.d, z4.d\n"
- "and z18.d, z22.d, z4.d\n"
- "asr z17.s, z17.s, #0x1f\n"
- "asr z16.s, z16.s, #0x1f\n"
- "asr z18.s, z18.s, #0x1f\n"
- "sqadd z0.s, z0.s, z17.s\n"
- "sqadd z27.s, z27.s, z16.s\n"
- ".inst 0x44828480 // srshl z0.s, p1/M, z0.s, z4.s\n"
- ".inst 0x4482849b // srshl z27.s, p1/M, z27.s, z4.s\n"
- "sqadd z22.s, z22.s, z18.s\n"
- "add z6.s, z6.s, z19.s\n"
- ".inst 0x44828496 // srshl z22.s, p1/M, z22.s, z4.s\n"
- "smax z6.s, p1/M, z6.s, z21.s\n"
- "add z0.s, z0.s, z19.s\n"
- "add z27.s, z27.s, z19.s\n"
- "smin z6.s, p1/M, z6.s, z20.s\n"
- "smax z0.s, p1/M, z0.s, z21.s\n"
- "add z22.s, z22.s, z19.s\n"
- "smax z27.s, p1/M, z27.s, z21.s\n"
- "smax z22.s, p1/M, z22.s, z21.s\n"
- "st1b { z6.s }, p0, [x23, x24]\n"
- "ld1w { z6.s }, p1/Z, [%x[params], #-2, MUL VL]\n"
- "ld1b { z18.b }, p1/Z, [%x[params], #-1, MUL VL]\n"
- "smin z0.s, p1/M, z0.s, z20.s\n"
- "smin z27.s, p1/M, z27.s, z20.s\n"
- "smin z22.s, p1/M, z22.s, z20.s\n"
- "st1b { z0.s }, p0, [x22, x24]\n"
- "mov z0.d, z6.d\n"
- "ld1b { z17.b }, p1/Z, [%x[params]]\n"
- "st1b { z27.s }, p0, [x21, x24]\n"
- "mov z27.d, z6.d\n"
- "sdot z27.s, z18.b, z1.b\n"
- "ld1b { z16.b }, p1/Z, [%x[params], #1, MUL VL]\n"
- "st1b { z22.s }, p0, [x20, x24]\n"
- "mov z22.d, z6.d\n"
- "sdot z6.s, z18.b, z5.b\n"
- "sdot z6.s, z17.b, z1.b\n"
- "ext z5.b, z5.b, z5.b, #0x1\n"
- "ext z1.b, z1.b, z1.b, #0x1\n"
- "sdot z0.s, z18.b, z5.b\n"
- "ld1w { z4.s }, p1/Z, [%x[params], #3, MUL VL]\n"
- "sdot z22.s, z18.b, z1.b\n"
- "sdot z27.s, z17.b, z28.b\n"
- "incw x24\n"
- "whilelt p0.s, x24, %x[n_channels]\n"
- "sdot z6.s, z16.b, z28.b\n"
+ "sdot z22.s, z29.b, z2.b\n"
+ "sdot z31.s, z25.b, z28.b\n"
"ext z28.b, z28.b, z28.b, #0x1\n"
- "sdot z0.s, z17.b, z1.b\n"
- "whilelt p2.b, x13, %x[n_channels]\n"
- "sdot z22.s, z17.b, z28.b\n"
- "sdot z27.s, z16.b, z23.b\n"
- "ext z23.b, z23.b, z23.b, #0x1\n"
- "ld1b { z8.b }, p2/Z, [x11, x13]\n"
- ".inst 0x04a974c6 // sqrdmulh z6.s, z6.s, z9.s\n"
- "sdot z0.s, z16.b, z28.b\n"
- "sdot z22.s, z16.b, z23.b\n"
- "ld1b { z7.b }, p2/Z, [x10, x13]\n"
- "and z18.d, z6.d, z4.d\n"
+ "sdot z19.s, z29.b, z0.b\n"
+ "sdot z22.s, z27.b, z0.b\n"
+ ".inst 0x04b777ff // sqrdmulh z31.s, z31.s, z23.s\n"
+ ".inst 0x04b77694 // sqrdmulh z20.s, z20.s, z23.s\n"
+ "sdot z19.s, z27.b, z28.b\n"
+ "sdot z22.s, z25.b, z28.b\n"
+ "and z16.d, z31.d, z21.d\n"
+ "asr z16.s, z16.s, #0x1f\n"
+ "sdot z19.s, z25.b, z24.b\n"
+ ".inst 0x04b776d6 // sqrdmulh z22.s, z22.s, z23.s\n"
+ "and z18.d, z20.d, z21.d\n"
"asr z18.s, z18.s, #0x1f\n"
- "ld1b { z5.b }, p2/Z, [x9, x13]\n"
- "ld1b { z3.b }, p2/Z, [x27, x13]\n"
- ".inst 0x04a97400 // sqrdmulh z0.s, z0.s, z9.s\n"
- ".inst 0x04a9777b // sqrdmulh z27.s, z27.s, z9.s\n"
- "ld1b { z2.b }, p2/Z, [x26, x13]\n"
- "ld1b { z1.b }, p2/Z, [x25, x13]\n"
- ".inst 0x04a976d6 // sqrdmulh z22.s, z22.s, z9.s\n"
- "sqadd z6.s, z6.s, z18.s\n"
- ".inst 0x44828486 // srshl z6.s, p1/M, z6.s, z4.s\n"
- "ld1b { z9.b }, p2/Z, [x12, x13]\n"
- "and z17.d, z0.d, z4.d\n"
- "and z16.d, z27.d, z4.d\n"
- "ldp x12, x11, [%x[inptrs], #0x40]\n"
- "ldp x10, x9, [%x[inptrs], #0x50]\n"
- "and z18.d, z22.d, z4.d\n"
+ "and z17.d, z22.d, z21.d\n"
+ ".inst 0x04b77673 // sqrdmulh z19.s, z19.s, z23.s\n"
"asr z17.s, z17.s, #0x1f\n"
- "ld1b { z31.b }, p2/Z, [x12, x13]\n"
- "ld1b { z30.b }, p2/Z, [x11, x13]\n"
+ "sqadd z31.s, z31.s, z16.s\n"
+ "and z16.d, z19.d, z21.d\n"
"asr z16.s, z16.s, #0x1f\n"
- "asr z18.s, z18.s, #0x1f\n"
- "ld1b { z29.b }, p2/Z, [x10, x13]\n"
- "ld1b { z28.b }, p2/Z, [x9, x13]\n"
- "sqadd z0.s, z0.s, z17.s\n"
- "sqadd z27.s, z27.s, z16.s\n"
- ".inst 0x44828480 // srshl z0.s, p1/M, z0.s, z4.s\n"
- ".inst 0x4482849b // srshl z27.s, p1/M, z27.s, z4.s\n"
- "sqadd z22.s, z22.s, z18.s\n"
- "add z6.s, z6.s, z19.s\n"
- ".inst 0x44828496 // srshl z22.s, p1/M, z22.s, z4.s\n"
- "smax z6.s, p1/M, z6.s, z21.s\n"
- "add z0.s, z0.s, z19.s\n"
- "add z27.s, z27.s, z19.s\n"
- "ld1b { z4.b }, p2/Z, [x28, x13]\n"
- "ldp x28, x27, [%x[inptrs], #0x60]\n"
- "add z22.s, z22.s, z19.s\n"
- "ldp x26, x25, [%x[inptrs], #0x70]\n"
- "smin z6.s, p1/M, z6.s, z20.s\n"
- "smax z0.s, p1/M, z0.s, z21.s\n"
- "smax z27.s, p1/M, z27.s, z21.s\n"
- "smax z22.s, p1/M, z22.s, z21.s\n"
- "st1b { z6.s }, p0, [x23, x24]\n"
- "ld1b { z26.b }, p2/Z, [x28, x13]\n"
- "ld1b { z25.b }, p2/Z, [x27, x13]\n"
- "ld1b { z24.b }, p2/Z, [x26, x13]\n"
- "zip2 z6.b, z9.b, z7.b\n"
- "zip1 z9.b, z9.b, z7.b\n"
- "ld1b { z23.b }, p2/Z, [x25, x13]\n"
- "zip1 z7.b, z8.b, z5.b\n"
- "zip2 z5.b, z8.b, z5.b\n"
- "smin z0.s, p1/M, z0.s, z20.s\n"
- "smin z27.s, p1/M, z27.s, z20.s\n"
- "smin z22.s, p1/M, z22.s, z20.s\n"
- "st1b { z0.s }, p0, [x22, x24]\n"
- "zip2 z8.b, z9.b, z7.b\n"
- "st1b { z27.s }, p0, [x21, x24]\n"
- "zip1 z9.b, z9.b, z7.b\n"
- "zip1 z7.b, z6.b, z5.b\n"
- "ldp x12, x11, [%x[inptrs], #0x0]\n"
- "st1b { z22.s }, p0, [x20, x24]\n"
- "zip2 z5.b, z6.b, z5.b\n"
- "zip2 z0.b, z4.b, z2.b\n"
- "ld1w { z6.s }, p1/Z, [%x[params], #4, MUL VL]\n"
- "zip1 z4.b, z4.b, z2.b\n"
- "zip1 z2.b, z3.b, z1.b\n"
- "incw x24\n"
- "ldp x10, x9, [%x[inptrs], #0x10]\n"
- "zip2 z1.b, z3.b, z1.b\n"
- "zip2 z27.b, z31.b, z29.b\n"
- "ldp x28, x27, [%x[inptrs], #0x20]\n"
- "ldp x26, x25, [%x[inptrs], #0x30]\n"
- "zip1 z31.b, z31.b, z29.b\n"
- "zip1 z29.b, z30.b, z28.b\n"
- "ld1b { z18.b }, p1/Z, [%x[params], #5, MUL VL]\n"
- "ld1b { z17.b }, p1/Z, [%x[params], #6, MUL VL]\n"
- "zip2 z28.b, z30.b, z28.b\n"
- "zip2 z22.b, z26.b, z24.b\n"
- "ld1b { z16.b }, p1/Z, [%x[params], #7, MUL VL]\n"
- "addvl %x[params], %x[params], #8\n"
- "zip1 z26.b, z26.b, z24.b\n"
- "zip1 z24.b, z25.b, z23.b\n"
- "zip2 z23.b, z25.b, z23.b\n"
- "zip2 z3.b, z4.b, z2.b\n"
- "zip1 z4.b, z4.b, z2.b\n"
- "zip1 z2.b, z0.b, z1.b\n"
- "zip2 z1.b, z0.b, z1.b\n"
- "zip2 z30.b, z31.b, z29.b\n"
- "zip1 z31.b, z31.b, z29.b\n"
- "zip1 z29.b, z27.b, z28.b\n"
- "zip2 z28.b, z27.b, z28.b\n"
- "zip2 z25.b, z26.b, z24.b\n"
- "zip1 z26.b, z26.b, z24.b\n"
- "zip1 z24.b, z22.b, z23.b\n"
- "zip2 z23.b, z22.b, z23.b\n"
- "mov z0.d, z6.d\n"
- "mov z27.d, z6.d\n"
- "mov z22.d, z6.d\n"
+ "sqadd z20.s, z20.s, z18.s\n"
+ ".inst 0x44828abf // srshl z31.s, p2/M, z31.s, z21.s\n"
+ "sqadd z22.s, z22.s, z17.s\n"
+ "add z31.s, z31.s, z4.s\n"
+ ".inst 0x44828ab4 // srshl z20.s, p2/M, z20.s, z21.s\n"
+ "sqadd z19.s, z19.s, z16.s\n"
+ ".inst 0x44828ab6 // srshl z22.s, p2/M, z22.s, z21.s\n"
+ "smax z31.s, p2/M, z31.s, z6.s\n"
+ "add z20.s, z20.s, z4.s\n"
+ ".inst 0x44828ab3 // srshl z19.s, p2/M, z19.s, z21.s\n"
+ "add z22.s, z22.s, z4.s\n"
+ "smin z31.s, p2/M, z31.s, z5.s\n"
+ "st1b { z31.s }, p0, [x22, x25]\n"
+ "add z19.s, z19.s, z4.s\n"
+ "smax z22.s, p2/M, z22.s, z6.s\n"
+ "smax z20.s, p2/M, z20.s, z6.s\n"
+ "smax z19.s, p2/M, z19.s, z6.s\n"
+ "smin z22.s, p2/M, z22.s, z5.s\n"
+ "st1b { z22.s }, p0, [x21, x25]\n"
+ "smin z20.s, p2/M, z20.s, z5.s\n"
+ "smin z19.s, p2/M, z19.s, z5.s\n"
+ "st1b { z20.s }, p0, [x20, x25]\n"
+ "st1b { z19.s }, p0, [x19, x25]\n"
+ "incw x25\n"
+ "whilelt p1.b, x25, %x[n_channels]\n"
"b.any 1b\n"
+ "addvl SP, SP, #8\n"
: [params] "+&r" (params)
- : [inptrs] "r" (inptrs), [n_channels] "r" (n_channels), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [outptrs] "r" (outptrs), [qp] "r" (&qp)
- : "cc", "memory", "p0", "p1", "p2", "x9", "x10", "x11", "x12", "x13", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : [inptrs] "r" (inptrs), [n_channels] "r" ((long unsigned int) n_channels), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [outptrs] "r" (outptrs), [qp] "r" (&qp)
+ : "cc", "memory", "p0", "p1", "p2", "x9", "x10", "x11", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_nhwc_3x3_s1_output2x2_dot_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_nhwc_3x3_s1_output2x2_dot_depthfirst/generic.cpp
index 257c4d44dc..440f57ed00 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_nhwc_3x3_s1_output2x2_dot_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_nhwc_3x3_s1_output2x2_dot_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -41,461 +41,421 @@ void sve_u8q_nhwc_3x3_s1_output2x2_dot_depthfirst_impl(
)
{
__asm__ __volatile__(
- "mov x13, #0x0\n"
- "whilelt p2.b, x13, %x[n_channels]\n"
- "ldp x12, x11, [%x[inptrs], #0x0]\n"
- "ldp x10, x9, [%x[inptrs], #0x10]\n"
- "ldp x28, x27, [%x[inptrs], #0x20]\n"
- "ldp x26, x25, [%x[inptrs], #0x30]\n"
- "mov x20, #0x1\n"
- "ptrue p1.b\n"
- "ldp x24, x23, [%x[outptrs], #0x0]\n"
- "ldp x22, x21, [%x[outptrs], #0x10]\n"
- "orr x20, x20, #0x100\n"
- "orr x20, x20, #0x10000\n"
- "ld1b { z14.b }, p2/Z, [x12, x13]\n"
- "ld1b { z13.b }, p2/Z, [x11, x13]\n"
- "dup z12.s, w20\n"
- "mov x20, #0x0\n"
- "ldp x12, x11, [%x[inptrs], #0x40]\n"
- "ld1b { z11.b }, p2/Z, [x10, x13]\n"
- "zip2 z10.b, z14.b, z11.b\n"
- "zip1 z14.b, z14.b, z11.b\n"
- "ld1b { z9.b }, p2/Z, [x9, x13]\n"
- "ldp x10, x9, [%x[inptrs], #0x50]\n"
- "zip1 z11.b, z13.b, z9.b\n"
- "zip2 z9.b, z13.b, z9.b\n"
- "ld1b { z8.b }, p2/Z, [x28, x13]\n"
- "ld1b { z7.b }, p2/Z, [x27, x13]\n"
- "zip2 z13.b, z14.b, z11.b\n"
- "zip1 z14.b, z14.b, z11.b\n"
- "ldp x28, x27, [%x[inptrs], #0x60]\n"
- "ld1b { z6.b }, p2/Z, [x26, x13]\n"
- "zip1 z11.b, z10.b, z9.b\n"
- "zip2 z9.b, z10.b, z9.b\n"
- "ld1b { z5.b }, p2/Z, [x25, x13]\n"
- "ldp x26, x25, [%x[inptrs], #0x70]\n"
- "zip2 z4.b, z8.b, z6.b\n"
- "zip1 z8.b, z8.b, z6.b\n"
- "ld1b { z3.b }, p2/Z, [x12, x13]\n"
- "ld1b { z2.b }, p2/Z, [x11, x13]\n"
- "zip1 z6.b, z7.b, z5.b\n"
- "zip2 z5.b, z7.b, z5.b\n"
- "ld1b { z1.b }, p2/Z, [x10, x13]\n"
- "ld1b { z0.b }, p2/Z, [x9, x13]\n"
- "zip2 z31.b, z3.b, z1.b\n"
- "zip1 z3.b, z3.b, z1.b\n"
- "ld1b { z30.b }, p2/Z, [x28, x13]\n"
- "ld1b { z29.b }, p2/Z, [x27, x13]\n"
- "zip1 z1.b, z2.b, z0.b\n"
- "zip2 z0.b, z2.b, z0.b\n"
- "ld1b { z28.b }, p2/Z, [x26, x13]\n"
- "ld1b { z27.b }, p2/Z, [x25, x13]\n"
- "zip2 z26.b, z30.b, z28.b\n"
- "zip1 z30.b, z30.b, z28.b\n"
- "zip1 z28.b, z29.b, z27.b\n"
- "zip2 z27.b, z29.b, z27.b\n"
- "ld1w { z10.s }, p1/Z, [%x[params]]\n"
- "ld1rw { z25.s }, p1/Z, [%x[qp], %[offsetof_Requantize32_minval]]\n"
- "ld1rw { z24.s }, p1/Z, [%x[qp], %[offsetof_Requantize32_maxval]]\n"
- "ld1rw { z23.s }, p1/Z, [%x[qp], %[offsetof_Requantize32_b_offset]]\n"
- "zip2 z7.b, z8.b, z6.b\n"
- "zip1 z8.b, z8.b, z6.b\n"
- "ld1rw { z22.s }, p1/Z, [%x[qp], %[offsetof_Requantize32_c_offset]]\n"
- "ldp x12, x11, [%x[inptrs], #0x0]\n"
- "zip1 z6.b, z4.b, z5.b\n"
- "zip2 z5.b, z4.b, z5.b\n"
- "ldp x10, x9, [%x[inptrs], #0x10]\n"
- "ldp x28, x27, [%x[inptrs], #0x20]\n"
- "zip2 z2.b, z3.b, z1.b\n"
- "zip1 z3.b, z3.b, z1.b\n"
- "ldp x26, x25, [%x[inptrs], #0x30]\n"
- "zip1 z1.b, z31.b, z0.b\n"
- "zip2 z0.b, z31.b, z0.b\n"
- "ld1b { z21.b }, p1/Z, [%x[params], #1, MUL VL]\n"
- "zip2 z29.b, z30.b, z28.b\n"
- "zip1 z30.b, z30.b, z28.b\n"
- "ld1b { z16.b }, p1/Z, [%x[params], #2, MUL VL]\n"
- "ld1b { z20.b }, p1/Z, [%x[params], #3, MUL VL]\n"
- "zip1 z28.b, z26.b, z27.b\n"
- "zip2 z27.b, z26.b, z27.b\n"
- "addvl %x[params], %x[params], #4\n"
- "mov z4.d, z10.d\n"
- "mov z31.d, z10.d\n"
- "mov z26.d, z10.d\n"
+ "ldp x11, x10, [%x[inptrs], #0x0]\n"
+ "ptrue p2.b\n"
+ "ldp x9, x28, [%x[inptrs], #0x10]\n"
+ "addvl SP, SP, #-8\n"
+ "ldp x27, x26, [%x[inptrs], #0x20]\n"
+ "mov x19, #0x1\n"
+ "ldp x25, x24, [%x[inptrs], #0x30]\n"
+ "orr x19, x19, #0x100\n"
+ "ldp x23, x22, [%x[outptrs], #0x0]\n"
+ "orr x19, x19, #0x10000\n"
+ "dup z12.s, w19\n"
+ "ldp x21, x20, [%x[outptrs], #0x10]\n"
+ "mov x19, #0x0\n"
+ "ld1rw { z11.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_minval]]\n"
+ "whilelt p1.b, x19, %x[n_channels]\n"
+ "ld1rw { z10.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_maxval]]\n"
+ "ld1rw { z9.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_b_offset]]\n"
+ "ld1rw { z8.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_c_offset]]\n"
"1:" // Loop
- "mov z19.s, #0x0\n"
- "udot z19.s, z12.b, z8.b\n"
- "udot z10.s, z21.b, z14.b\n"
- "whilelt p0.s, x20, %x[n_channels]\n"
- "udot z19.s, z12.b, z3.b\n"
- "udot z31.s, z21.b, z8.b\n"
- "incw x13, ALL, MUL #4\n"
- "udot z10.s, z16.b, z8.b\n"
- "ext z8.b, z8.b, z8.b, #0x1\n"
- "movprfx z18, z19\n udot z18.s, z12.b, z30.b\n"
- "udot z19.s, z12.b, z14.b\n"
- "ext z14.b, z14.b, z14.b, #0x1\n"
- "udot z31.s, z16.b, z3.b\n"
- "udot z10.s, z20.b, z3.b\n"
+ "mov z7.s, #0x0\n"
+ "ld1b { z19.b }, p1/Z, [x11, x19]\n"
+ "whilelt p0.s, x19, %x[n_channels]\n"
+ "mov z6.s, #0x0\n"
+ "ld1b { z18.b }, p1/Z, [x10, x19]\n"
+ "ldp x11, x10, [%x[inptrs], #0x40]\n"
+ "ld1b { z16.b }, p1/Z, [x9, x19]\n"
+ "zip1 z21.b, z19.b, z16.b\n"
+ "ld1b { z17.b }, p1/Z, [x28, x19]\n"
+ "zip2 z19.b, z19.b, z16.b\n"
+ "ldp x9, x28, [%x[inptrs], #0x50]\n"
+ "ld1b { z23.b }, p1/Z, [x27, x19]\n"
+ "zip1 z16.b, z18.b, z17.b\n"
+ "ld1b { z20.b }, p1/Z, [x26, x19]\n"
+ "zip2 z18.b, z18.b, z17.b\n"
+ "ldp x27, x26, [%x[inptrs], #0x60]\n"
+ "zip1 z5.b, z21.b, z16.b\n"
+ "ld1b { z17.b }, p1/Z, [x25, x19]\n"
+ "zip2 z4.b, z21.b, z16.b\n"
+ "ld1b { z16.b }, p1/Z, [x24, x19]\n"
+ "zip1 z29.b, z19.b, z18.b\n"
+ "ldp x25, x24, [%x[inptrs], #0x70]\n"
+ "zip2 z28.b, z19.b, z18.b\n"
+ "ld1b { z22.b }, p1/Z, [x11, x19]\n"
+ "zip1 z19.b, z23.b, z17.b\n"
+ "ld1b { z21.b }, p1/Z, [x10, x19]\n"
+ "zip2 z27.b, z23.b, z17.b\n"
+ "ldp x11, x10, [%x[inptrs], #0x0]\n"
+ "zip1 z18.b, z20.b, z16.b\n"
+ "ld1b { z17.b }, p1/Z, [x9, x19]\n"
+ "zip2 z20.b, z20.b, z16.b\n"
+ "ld1b { z16.b }, p1/Z, [x28, x19]\n"
+ "zip1 z3.b, z19.b, z18.b\n"
+ "ldp x9, x28, [%x[inptrs], #0x10]\n"
+ "zip2 z2.b, z19.b, z18.b\n"
+ "ld1b { z19.b }, p1/Z, [x27, x19]\n"
+ "zip1 z26.b, z22.b, z17.b\n"
+ "ld1b { z25.b }, p1/Z, [x26, x19]\n"
+ "zip2 z24.b, z22.b, z17.b\n"
+ "ldp x27, x26, [%x[inptrs], #0x20]\n"
+ "zip1 z23.b, z21.b, z16.b\n"
+ "ld1b { z18.b }, p1/Z, [x25, x19]\n"
+ "zip2 z22.b, z21.b, z16.b\n"
+ "ld1b { z21.b }, p1/Z, [x24, x19]\n"
+ "zip1 z17.b, z27.b, z20.b\n"
+ "ldp x25, x24, [%x[inptrs], #0x30]\n"
+ "zip2 z16.b, z27.b, z20.b\n"
+ "st1b { z29.b }, p2, [SP]\n"
+ "zip1 z20.b, z19.b, z18.b\n"
+ "st1b { z28.b }, p2, [SP, #1, MUL VL]\n"
+ "zip2 z19.b, z19.b, z18.b\n"
+ "st1b { z17.b }, p2, [SP, #2, MUL VL]\n"
+ "zip1 z18.b, z25.b, z21.b\n"
+ "st1b { z16.b }, p2, [SP, #3, MUL VL]\n"
+ "zip2 z17.b, z25.b, z21.b\n"
+ "ld1w { z1.s }, p2/Z, [%x[params]]\n"
+ "zip1 z0.b, z26.b, z23.b\n"
+ "ld1b { z31.b }, p2/Z, [%x[params], #1, MUL VL]\n"
+ "zip2 z30.b, z26.b, z23.b\n"
+ "ld1b { z29.b }, p2/Z, [%x[params], #2, MUL VL]\n"
+ "zip1 z16.b, z24.b, z22.b\n"
+ "st1b { z16.b }, p2, [SP, #4, MUL VL]\n"
+ "zip2 z16.b, z24.b, z22.b\n"
+ "st1b { z16.b }, p2, [SP, #5, MUL VL]\n"
+ "zip1 z28.b, z20.b, z18.b\n"
+ "ld1b { z27.b }, p2/Z, [%x[params], #3, MUL VL]\n"
+ "zip2 z26.b, z20.b, z18.b\n"
+ "ld1w { z25.s }, p2/Z, [%x[params], #4, MUL VL]\n"
+ "zip1 z16.b, z19.b, z17.b\n"
+ "st1b { z16.b }, p2, [SP, #6, MUL VL]\n"
+ "zip2 z16.b, z19.b, z17.b\n"
+ "st1b { z16.b }, p2, [SP, #7, MUL VL]\n"
+ "mov z24.d, z1.d\n"
+ "ld1w { z23.s }, p2/Z, [%x[params], #5, MUL VL]\n"
+ "mov z22.d, z1.d\n"
+ "mov z21.d, z1.d\n"
+ "udot z1.s, z31.b, z5.b\n"
+ "udot z22.s, z31.b, z3.b\n"
+ "udot z7.s, z12.b, z3.b\n"
+ "udot z1.s, z29.b, z3.b\n"
"ext z3.b, z3.b, z3.b, #0x1\n"
- "udot z4.s, z21.b, z14.b\n"
- "udot z26.s, z21.b, z8.b\n"
- "mov z17.s, #0x0\n"
- "udot z17.s, z12.b, z8.b\n"
- "udot z17.s, z12.b, z3.b\n"
- "udot z31.s, z20.b, z30.b\n"
- "ext z30.b, z30.b, z30.b, #0x1\n"
- "udot z4.s, z16.b, z8.b\n"
- "udot z26.s, z16.b, z3.b\n"
- "ld1w { z8.s }, p1/Z, [%x[params], #1, MUL VL]\n"
- "mls z10.s, p1/M, z19.s, z23.s\n"
- "movprfx z16, z17\n udot z16.s, z12.b, z30.b\n"
- "mov z19.s, #0x0\n"
- "udot z17.s, z12.b, z14.b\n"
- "ld1w { z14.s }, p1/Z, [%x[params]]\n"
- "udot z4.s, z20.b, z3.b\n"
- ".inst 0x04ae754a // sqrdmulh z10.s, z10.s, z14.s\n"
- "udot z26.s, z20.b, z30.b\n"
- "mls z4.s, p1/M, z17.s, z23.s\n"
- "and z21.d, z10.d, z8.d\n"
- "mls z31.s, p1/M, z18.s, z23.s\n"
- "mls z26.s, p1/M, z16.s, z23.s\n"
- "asr z21.s, z21.s, #0x1f\n"
- ".inst 0x04ae7484 // sqrdmulh z4.s, z4.s, z14.s\n"
- ".inst 0x04ae77ff // sqrdmulh z31.s, z31.s, z14.s\n"
- "udot z19.s, z12.b, z7.b\n"
- ".inst 0x04ae775a // sqrdmulh z26.s, z26.s, z14.s\n"
- "sqadd z10.s, z10.s, z21.s\n"
- ".inst 0x4482850a // srshl z10.s, p1/M, z10.s, z8.s\n"
- "udot z19.s, z12.b, z2.b\n"
- "and z16.d, z4.d, z8.d\n"
- "and z20.d, z31.d, z8.d\n"
- "movprfx z18, z19\n udot z18.s, z12.b, z29.b\n"
- "ld1w { z14.s }, p1/Z, [%x[params], #6, MUL VL]\n"
- "and z21.d, z26.d, z8.d\n"
+ "udot z22.s, z29.b, z0.b\n"
+ "udot z7.s, z12.b, z0.b\n"
+ "udot z1.s, z27.b, z0.b\n"
+ "ext z0.b, z0.b, z0.b, #0x1\n"
+ "udot z22.s, z27.b, z28.b\n"
+ "mov z20.d, z7.d\n"
+ "udot z7.s, z12.b, z5.b\n"
+ "udot z20.s, z12.b, z28.b\n"
+ "ext z5.b, z5.b, z5.b, #0x1\n"
+ "ext z28.b, z28.b, z28.b, #0x1\n"
+ "udot z21.s, z31.b, z3.b\n"
+ "udot z6.s, z12.b, z3.b\n"
+ "udot z24.s, z31.b, z5.b\n"
+ "ld1b { z31.b }, p2/Z, [%x[params], #7, MUL VL]\n"
+ "mls z1.s, p2/M, z7.s, z9.s\n"
+ "udot z21.s, z29.b, z0.b\n"
+ "udot z6.s, z12.b, z0.b\n"
+ "udot z24.s, z29.b, z3.b\n"
+ "ld1b { z3.b }, p2/Z, [SP, #2, MUL VL]\n"
+ ".inst 0x04b97421 // sqrdmulh z1.s, z1.s, z25.s\n"
+ "udot z21.s, z27.b, z28.b\n"
+ "mov z19.d, z6.d\n"
+ "udot z24.s, z27.b, z0.b\n"
+ "ld1b { z0.b }, p2/Z, [SP, #4, MUL VL]\n"
+ "udot z6.s, z12.b, z5.b\n"
+ "ld1b { z5.b }, p2/Z, [SP]\n"
+ "udot z19.s, z12.b, z28.b\n"
+ "ld1b { z28.b }, p2/Z, [SP, #6, MUL VL]\n"
+ "and z16.d, z1.d, z23.d\n"
"asr z16.s, z16.s, #0x1f\n"
- "udot z19.s, z12.b, z13.b\n"
- "asr z20.s, z20.s, #0x1f\n"
- "asr z21.s, z21.s, #0x1f\n"
- "sqadd z4.s, z4.s, z16.s\n"
- "sqadd z31.s, z31.s, z20.s\n"
- ".inst 0x44828504 // srshl z4.s, p1/M, z4.s, z8.s\n"
- ".inst 0x4482851f // srshl z31.s, p1/M, z31.s, z8.s\n"
- "sqadd z26.s, z26.s, z21.s\n"
- "add z10.s, z10.s, z22.s\n"
- ".inst 0x4482851a // srshl z26.s, p1/M, z26.s, z8.s\n"
- "smax z10.s, p1/M, z10.s, z25.s\n"
- "add z4.s, z4.s, z22.s\n"
- "add z31.s, z31.s, z22.s\n"
- "smin z10.s, p1/M, z10.s, z24.s\n"
- "smax z4.s, p1/M, z4.s, z25.s\n"
- "add z26.s, z26.s, z22.s\n"
- "smax z31.s, p1/M, z31.s, z25.s\n"
- "smax z26.s, p1/M, z26.s, z25.s\n"
- "st1b { z10.s }, p0, [x24, x20]\n"
- "ld1w { z10.s }, p1/Z, [%x[params], #2, MUL VL]\n"
- "ld1b { z21.b }, p1/Z, [%x[params], #3, MUL VL]\n"
- "smin z4.s, p1/M, z4.s, z24.s\n"
- "smin z31.s, p1/M, z31.s, z24.s\n"
- "smin z26.s, p1/M, z26.s, z24.s\n"
- "st1b { z4.s }, p0, [x23, x20]\n"
- "mov z4.d, z10.d\n"
- "ld1b { z16.b }, p1/Z, [%x[params], #4, MUL VL]\n"
- "st1b { z31.s }, p0, [x22, x20]\n"
- "mov z31.d, z10.d\n"
- "udot z31.s, z21.b, z7.b\n"
- "ld1b { z20.b }, p1/Z, [%x[params], #5, MUL VL]\n"
- "st1b { z26.s }, p0, [x21, x20]\n"
- "mov z26.d, z10.d\n"
- "udot z10.s, z21.b, z13.b\n"
- "udot z10.s, z16.b, z7.b\n"
- "ext z13.b, z13.b, z13.b, #0x1\n"
- "ext z7.b, z7.b, z7.b, #0x1\n"
- "udot z4.s, z21.b, z13.b\n"
- "ld1w { z8.s }, p1/Z, [%x[params], #7, MUL VL]\n"
- "mov z17.s, #0x0\n"
- "udot z26.s, z21.b, z7.b\n"
- "udot z17.s, z12.b, z7.b\n"
- "incw x20\n"
- "udot z31.s, z16.b, z2.b\n"
- "udot z10.s, z20.b, z2.b\n"
- "ext z2.b, z2.b, z2.b, #0x1\n"
- "whilelt p0.s, x20, %x[n_channels]\n"
- "udot z4.s, z16.b, z7.b\n"
- "udot z26.s, z16.b, z2.b\n"
+ "mov z7.s, #0x0\n"
+ "mls z24.s, p2/M, z6.s, z9.s\n"
+ "udot z7.s, z12.b, z2.b\n"
+ "mov z6.s, #0x0\n"
+ "mls z22.s, p2/M, z20.s, z9.s\n"
+ ".inst 0x04b97718 // sqrdmulh z24.s, z24.s, z25.s\n"
+ "sqadd z1.s, z1.s, z16.s\n"
+ "udot z7.s, z12.b, z30.b\n"
+ ".inst 0x04b976d6 // sqrdmulh z22.s, z22.s, z25.s\n"
+ "and z18.d, z24.d, z23.d\n"
+ "asr z18.s, z18.s, #0x1f\n"
+ "and z17.d, z22.d, z23.d\n"
+ "mov z20.d, z7.d\n"
+ "asr z17.s, z17.s, #0x1f\n"
+ "udot z7.s, z12.b, z4.b\n"
+ "udot z20.s, z12.b, z26.b\n"
+ "mls z21.s, p2/M, z19.s, z9.s\n"
+ "sqadd z24.s, z24.s, z18.s\n"
+ ".inst 0x44828ae1 // srshl z1.s, p2/M, z1.s, z23.s\n"
+ "sqadd z22.s, z22.s, z17.s\n"
+ ".inst 0x04b976b5 // sqrdmulh z21.s, z21.s, z25.s\n"
+ ".inst 0x44828af8 // srshl z24.s, p2/M, z24.s, z23.s\n"
+ "add z1.s, z1.s, z8.s\n"
+ "and z16.d, z21.d, z23.d\n"
+ "asr z16.s, z16.s, #0x1f\n"
+ "add z24.s, z24.s, z8.s\n"
+ "smax z1.s, p2/M, z1.s, z11.s\n"
+ ".inst 0x44828af6 // srshl z22.s, p2/M, z22.s, z23.s\n"
+ "smax z24.s, p2/M, z24.s, z11.s\n"
+ "smin z1.s, p2/M, z1.s, z10.s\n"
+ "st1b { z1.s }, p0, [x23, x19]\n"
+ "add z22.s, z22.s, z8.s\n"
+ "sqadd z21.s, z21.s, z16.s\n"
+ "ld1w { z1.s }, p2/Z, [%x[params], #6, MUL VL]\n"
"addvl %x[params], %x[params], #16\n"
- "udot z17.s, z12.b, z2.b\n"
- "udot z31.s, z20.b, z29.b\n"
- "ext z29.b, z29.b, z29.b, #0x1\n"
- "mls z10.s, p1/M, z19.s, z23.s\n"
- "udot z4.s, z20.b, z2.b\n"
- ".inst 0x04ae754a // sqrdmulh z10.s, z10.s, z14.s\n"
- "udot z26.s, z20.b, z29.b\n"
- "movprfx z16, z17\n udot z16.s, z12.b, z29.b\n"
- "and z21.d, z10.d, z8.d\n"
- "udot z17.s, z12.b, z13.b\n"
- "mls z4.s, p1/M, z17.s, z23.s\n"
- "asr z21.s, z21.s, #0x1f\n"
- "mls z31.s, p1/M, z18.s, z23.s\n"
- "mls z26.s, p1/M, z16.s, z23.s\n"
- ".inst 0x04ae7484 // sqrdmulh z4.s, z4.s, z14.s\n"
- ".inst 0x04ae77ff // sqrdmulh z31.s, z31.s, z14.s\n"
- ".inst 0x04ae775a // sqrdmulh z26.s, z26.s, z14.s\n"
- "ld1w { z14.s }, p1/Z, [%x[params], #-4, MUL VL]\n"
- "sqadd z10.s, z10.s, z21.s\n"
- "and z16.d, z4.d, z8.d\n"
- ".inst 0x4482850a // srshl z10.s, p1/M, z10.s, z8.s\n"
- "and z20.d, z31.d, z8.d\n"
- "and z21.d, z26.d, z8.d\n"
+ "smin z24.s, p2/M, z24.s, z10.s\n"
+ "ld1b { z29.b }, p2/Z, [%x[params], #-8, MUL VL]\n"
+ "ld1b { z27.b }, p2/Z, [%x[params], #-7, MUL VL]\n"
+ "smax z22.s, p2/M, z22.s, z11.s\n"
+ "ld1w { z25.s }, p2/Z, [%x[params], #-6, MUL VL]\n"
+ ".inst 0x44828af5 // srshl z21.s, p2/M, z21.s, z23.s\n"
+ "ld1w { z23.s }, p2/Z, [%x[params], #-5, MUL VL]\n"
+ "smin z22.s, p2/M, z22.s, z10.s\n"
+ "st1b { z24.s }, p0, [x22, x19]\n"
+ "mov z24.d, z1.d\n"
+ "st1b { z22.s }, p0, [x21, x19]\n"
+ "add z21.s, z21.s, z8.s\n"
+ "mov z22.d, z1.d\n"
+ "udot z22.s, z31.b, z2.b\n"
+ "smax z21.s, p2/M, z21.s, z11.s\n"
+ "udot z22.s, z29.b, z30.b\n"
+ "smin z21.s, p2/M, z21.s, z10.s\n"
+ "st1b { z21.s }, p0, [x20, x19]\n"
+ "mov z21.d, z1.d\n"
+ "incw x19\n"
+ "udot z1.s, z31.b, z4.b\n"
+ "whilelt p0.s, x19, %x[n_channels]\n"
+ "udot z22.s, z27.b, z26.b\n"
+ "ext z4.b, z4.b, z4.b, #0x1\n"
+ "ext z26.b, z26.b, z26.b, #0x1\n"
+ "udot z1.s, z29.b, z2.b\n"
+ "ext z2.b, z2.b, z2.b, #0x1\n"
+ "udot z24.s, z31.b, z4.b\n"
+ "mls z22.s, p2/M, z20.s, z9.s\n"
+ "udot z1.s, z27.b, z30.b\n"
+ "ext z30.b, z30.b, z30.b, #0x1\n"
+ "udot z21.s, z31.b, z2.b\n"
+ "ld1b { z31.b }, p2/Z, [%x[params], #-3, MUL VL]\n"
+ "udot z24.s, z29.b, z2.b\n"
+ "udot z6.s, z12.b, z2.b\n"
+ "ld1b { z2.b }, p2/Z, [SP, #3, MUL VL]\n"
+ ".inst 0x04b976d6 // sqrdmulh z22.s, z22.s, z25.s\n"
+ "udot z21.s, z29.b, z30.b\n"
+ "ld1b { z29.b }, p2/Z, [%x[params], #-2, MUL VL]\n"
+ "udot z24.s, z27.b, z30.b\n"
+ "udot z6.s, z12.b, z30.b\n"
+ "ld1b { z30.b }, p2/Z, [SP, #5, MUL VL]\n"
+ "and z17.d, z22.d, z23.d\n"
+ "asr z17.s, z17.s, #0x1f\n"
+ "udot z21.s, z27.b, z26.b\n"
+ "ld1b { z27.b }, p2/Z, [%x[params], #-1, MUL VL]\n"
+ "mov z19.d, z6.d\n"
+ "udot z6.s, z12.b, z4.b\n"
+ "ld1b { z4.b }, p2/Z, [SP, #1, MUL VL]\n"
+ "udot z19.s, z12.b, z26.b\n"
+ "ld1b { z26.b }, p2/Z, [SP, #7, MUL VL]\n"
+ "mls z1.s, p2/M, z7.s, z9.s\n"
+ "mov z7.s, #0x0\n"
+ "sqadd z22.s, z22.s, z17.s\n"
+ "udot z7.s, z12.b, z3.b\n"
+ ".inst 0x04b97421 // sqrdmulh z1.s, z1.s, z25.s\n"
+ "mls z24.s, p2/M, z6.s, z9.s\n"
+ "mov z6.s, #0x0\n"
+ "udot z7.s, z12.b, z0.b\n"
+ "and z16.d, z1.d, z23.d\n"
"asr z16.s, z16.s, #0x1f\n"
- "asr z20.s, z20.s, #0x1f\n"
- "asr z21.s, z21.s, #0x1f\n"
- "sqadd z4.s, z4.s, z16.s\n"
- ".inst 0x44828504 // srshl z4.s, p1/M, z4.s, z8.s\n"
- "ld1b { z16.b }, p1/Z, [%x[params], #-6, MUL VL]\n"
- "sqadd z31.s, z31.s, z20.s\n"
- "sqadd z26.s, z26.s, z21.s\n"
- ".inst 0x4482851f // srshl z31.s, p1/M, z31.s, z8.s\n"
- ".inst 0x4482851a // srshl z26.s, p1/M, z26.s, z8.s\n"
- "add z10.s, z10.s, z22.s\n"
- "smax z10.s, p1/M, z10.s, z25.s\n"
- "add z4.s, z4.s, z22.s\n"
- "smin z10.s, p1/M, z10.s, z24.s\n"
- "add z31.s, z31.s, z22.s\n"
- "add z26.s, z26.s, z22.s\n"
- "smax z4.s, p1/M, z4.s, z25.s\n"
- "smax z31.s, p1/M, z31.s, z25.s\n"
- "mov z19.s, #0x0\n"
- "udot z19.s, z12.b, z6.b\n"
- "smax z26.s, p1/M, z26.s, z25.s\n"
- "st1b { z10.s }, p0, [x24, x20]\n"
- "ld1w { z10.s }, p1/Z, [%x[params], #-8, MUL VL]\n"
- "ld1b { z21.b }, p1/Z, [%x[params], #-7, MUL VL]\n"
- "smin z4.s, p1/M, z4.s, z24.s\n"
- "smin z31.s, p1/M, z31.s, z24.s\n"
- "smin z26.s, p1/M, z26.s, z24.s\n"
- "st1b { z4.s }, p0, [x23, x20]\n"
- "mov z4.d, z10.d\n"
- "udot z19.s, z12.b, z1.b\n"
- "st1b { z31.s }, p0, [x22, x20]\n"
- "mov z31.d, z10.d\n"
- "udot z31.s, z21.b, z6.b\n"
- "movprfx z18, z19\n udot z18.s, z12.b, z28.b\n"
- "st1b { z26.s }, p0, [x21, x20]\n"
- "mov z26.d, z10.d\n"
- "udot z10.s, z21.b, z11.b\n"
- "udot z10.s, z16.b, z6.b\n"
- "udot z19.s, z12.b, z11.b\n"
- "ext z11.b, z11.b, z11.b, #0x1\n"
- "ld1b { z20.b }, p1/Z, [%x[params], #-5, MUL VL]\n"
- "udot z4.s, z21.b, z11.b\n"
- "ext z6.b, z6.b, z6.b, #0x1\n"
- "mov z17.s, #0x0\n"
- "udot z26.s, z21.b, z6.b\n"
- "ld1w { z8.s }, p1/Z, [%x[params], #-3, MUL VL]\n"
- "udot z17.s, z12.b, z6.b\n"
- "udot z31.s, z16.b, z1.b\n"
- "incw x20\n"
- "whilelt p0.s, x20, %x[n_channels]\n"
- "udot z10.s, z20.b, z1.b\n"
- "ext z1.b, z1.b, z1.b, #0x1\n"
- "udot z4.s, z16.b, z6.b\n"
- "udot z26.s, z16.b, z1.b\n"
- "udot z17.s, z12.b, z1.b\n"
- "udot z31.s, z20.b, z28.b\n"
- "ext z28.b, z28.b, z28.b, #0x1\n"
- "mls z10.s, p1/M, z19.s, z23.s\n"
- "udot z4.s, z20.b, z1.b\n"
- "udot z26.s, z20.b, z28.b\n"
- ".inst 0x04ae754a // sqrdmulh z10.s, z10.s, z14.s\n"
- "movprfx z16, z17\n udot z16.s, z12.b, z28.b\n"
- "udot z17.s, z12.b, z11.b\n"
- "and z21.d, z10.d, z8.d\n"
- "mls z4.s, p1/M, z17.s, z23.s\n"
- "mls z31.s, p1/M, z18.s, z23.s\n"
- "asr z21.s, z21.s, #0x1f\n"
- "mls z26.s, p1/M, z16.s, z23.s\n"
- ".inst 0x04ae7484 // sqrdmulh z4.s, z4.s, z14.s\n"
- ".inst 0x04ae77ff // sqrdmulh z31.s, z31.s, z14.s\n"
- ".inst 0x04ae775a // sqrdmulh z26.s, z26.s, z14.s\n"
- "ld1w { z14.s }, p1/Z, [%x[params], #2, MUL VL]\n"
- "sqadd z10.s, z10.s, z21.s\n"
- "and z16.d, z4.d, z8.d\n"
- ".inst 0x4482850a // srshl z10.s, p1/M, z10.s, z8.s\n"
- "and z20.d, z31.d, z8.d\n"
- "and z21.d, z26.d, z8.d\n"
+ ".inst 0x04b97718 // sqrdmulh z24.s, z24.s, z25.s\n"
+ "mov z20.d, z7.d\n"
+ "udot z7.s, z12.b, z5.b\n"
+ "udot z20.s, z12.b, z28.b\n"
+ "mls z21.s, p2/M, z19.s, z9.s\n"
+ "and z18.d, z24.d, z23.d\n"
+ "asr z18.s, z18.s, #0x1f\n"
+ "sqadd z1.s, z1.s, z16.s\n"
+ ".inst 0x04b976b5 // sqrdmulh z21.s, z21.s, z25.s\n"
+ "ld1w { z25.s }, p2/Z, [%x[params]]\n"
+ ".inst 0x44828af6 // srshl z22.s, p2/M, z22.s, z23.s\n"
+ "and z16.d, z21.d, z23.d\n"
"asr z16.s, z16.s, #0x1f\n"
- "asr z20.s, z20.s, #0x1f\n"
- "asr z21.s, z21.s, #0x1f\n"
- "sqadd z4.s, z4.s, z16.s\n"
- ".inst 0x44828504 // srshl z4.s, p1/M, z4.s, z8.s\n"
- "ld1b { z16.b }, p1/Z, [%x[params]]\n"
- "sqadd z31.s, z31.s, z20.s\n"
- "sqadd z26.s, z26.s, z21.s\n"
- ".inst 0x4482851f // srshl z31.s, p1/M, z31.s, z8.s\n"
- ".inst 0x4482851a // srshl z26.s, p1/M, z26.s, z8.s\n"
- "add z10.s, z10.s, z22.s\n"
- "smax z10.s, p1/M, z10.s, z25.s\n"
- "add z4.s, z4.s, z22.s\n"
- "smin z10.s, p1/M, z10.s, z24.s\n"
- "add z31.s, z31.s, z22.s\n"
- "add z26.s, z26.s, z22.s\n"
- "smax z4.s, p1/M, z4.s, z25.s\n"
- "smax z31.s, p1/M, z31.s, z25.s\n"
- "mov z19.s, #0x0\n"
- "udot z19.s, z12.b, z5.b\n"
- "smax z26.s, p1/M, z26.s, z25.s\n"
- "st1b { z10.s }, p0, [x24, x20]\n"
- "ld1w { z10.s }, p1/Z, [%x[params], #-2, MUL VL]\n"
- "ld1b { z21.b }, p1/Z, [%x[params], #-1, MUL VL]\n"
- "smin z4.s, p1/M, z4.s, z24.s\n"
- "smin z31.s, p1/M, z31.s, z24.s\n"
- "smin z26.s, p1/M, z26.s, z24.s\n"
- "st1b { z4.s }, p0, [x23, x20]\n"
- "mov z4.d, z10.d\n"
- "udot z19.s, z12.b, z0.b\n"
- "st1b { z31.s }, p0, [x22, x20]\n"
- "mov z31.d, z10.d\n"
- "udot z31.s, z21.b, z5.b\n"
- "movprfx z18, z19\n udot z18.s, z12.b, z27.b\n"
- "st1b { z26.s }, p0, [x21, x20]\n"
- "mov z26.d, z10.d\n"
- "udot z10.s, z21.b, z9.b\n"
- "udot z10.s, z16.b, z5.b\n"
- "udot z19.s, z12.b, z9.b\n"
- "ext z9.b, z9.b, z9.b, #0x1\n"
- "ld1b { z20.b }, p1/Z, [%x[params], #1, MUL VL]\n"
- "udot z4.s, z21.b, z9.b\n"
+ "sqadd z24.s, z24.s, z18.s\n"
+ "add z22.s, z22.s, z8.s\n"
+ ".inst 0x44828ae1 // srshl z1.s, p2/M, z1.s, z23.s\n"
+ "smax z22.s, p2/M, z22.s, z11.s\n"
+ ".inst 0x44828af8 // srshl z24.s, p2/M, z24.s, z23.s\n"
+ "add z1.s, z1.s, z8.s\n"
+ "sqadd z21.s, z21.s, z16.s\n"
+ "smin z22.s, p2/M, z22.s, z10.s\n"
+ "st1b { z22.s }, p0, [x21, x19]\n"
+ "add z24.s, z24.s, z8.s\n"
+ "smax z1.s, p2/M, z1.s, z11.s\n"
+ ".inst 0x44828af5 // srshl z21.s, p2/M, z21.s, z23.s\n"
+ "ld1w { z23.s }, p2/Z, [%x[params], #1, MUL VL]\n"
+ "smax z24.s, p2/M, z24.s, z11.s\n"
+ "smin z1.s, p2/M, z1.s, z10.s\n"
+ "st1b { z1.s }, p0, [x23, x19]\n"
+ "add z21.s, z21.s, z8.s\n"
+ "smin z24.s, p2/M, z24.s, z10.s\n"
+ "ld1w { z1.s }, p2/Z, [%x[params], #-4, MUL VL]\n"
+ "smax z21.s, p2/M, z21.s, z11.s\n"
+ "st1b { z24.s }, p0, [x22, x19]\n"
+ "mov z24.d, z1.d\n"
+ "mov z22.d, z1.d\n"
+ "udot z22.s, z31.b, z3.b\n"
+ "smin z21.s, p2/M, z21.s, z10.s\n"
+ "st1b { z21.s }, p0, [x20, x19]\n"
+ "mov z21.d, z1.d\n"
+ "incw x19\n"
+ "udot z1.s, z31.b, z5.b\n"
+ "whilelt p0.s, x19, %x[n_channels]\n"
+ "udot z22.s, z29.b, z0.b\n"
"ext z5.b, z5.b, z5.b, #0x1\n"
- "mov z17.s, #0x0\n"
- "udot z26.s, z21.b, z5.b\n"
- "ld1w { z8.s }, p1/Z, [%x[params], #3, MUL VL]\n"
- "udot z17.s, z12.b, z5.b\n"
- "udot z31.s, z16.b, z0.b\n"
- "incw x20\n"
- "whilelt p0.s, x20, %x[n_channels]\n"
- "udot z10.s, z20.b, z0.b\n"
+ "udot z1.s, z29.b, z3.b\n"
+ "udot z22.s, z27.b, z28.b\n"
+ "ext z3.b, z3.b, z3.b, #0x1\n"
+ "ext z28.b, z28.b, z28.b, #0x1\n"
+ "udot z24.s, z31.b, z5.b\n"
+ "udot z1.s, z27.b, z0.b\n"
"ext z0.b, z0.b, z0.b, #0x1\n"
- "udot z4.s, z16.b, z5.b\n"
- "whilelt p2.b, x13, %x[n_channels]\n"
- "udot z26.s, z16.b, z0.b\n"
- "udot z17.s, z12.b, z0.b\n"
- "ld1b { z13.b }, p2/Z, [x11, x13]\n"
- "ld1b { z11.b }, p2/Z, [x10, x13]\n"
- "udot z31.s, z20.b, z27.b\n"
- "ext z27.b, z27.b, z27.b, #0x1\n"
- "mls z10.s, p1/M, z19.s, z23.s\n"
- "ld1b { z7.b }, p2/Z, [x27, x13]\n"
- "udot z4.s, z20.b, z0.b\n"
- "udot z26.s, z20.b, z27.b\n"
- ".inst 0x04ae754a // sqrdmulh z10.s, z10.s, z14.s\n"
- "ld1b { z6.b }, p2/Z, [x26, x13]\n"
- "movprfx z16, z17\n udot z16.s, z12.b, z27.b\n"
- "udot z17.s, z12.b, z9.b\n"
- "and z21.d, z10.d, z8.d\n"
- "ld1b { z9.b }, p2/Z, [x9, x13]\n"
- "mls z4.s, p1/M, z17.s, z23.s\n"
- "mls z31.s, p1/M, z18.s, z23.s\n"
- "asr z21.s, z21.s, #0x1f\n"
- "ld1b { z5.b }, p2/Z, [x25, x13]\n"
- "mls z26.s, p1/M, z16.s, z23.s\n"
- ".inst 0x04ae7484 // sqrdmulh z4.s, z4.s, z14.s\n"
- ".inst 0x04ae77ff // sqrdmulh z31.s, z31.s, z14.s\n"
- ".inst 0x04ae775a // sqrdmulh z26.s, z26.s, z14.s\n"
- "ld1b { z14.b }, p2/Z, [x12, x13]\n"
- "ldp x12, x11, [%x[inptrs], #0x40]\n"
- "sqadd z10.s, z10.s, z21.s\n"
- "and z16.d, z4.d, z8.d\n"
- ".inst 0x4482850a // srshl z10.s, p1/M, z10.s, z8.s\n"
- "ldp x10, x9, [%x[inptrs], #0x50]\n"
- "and z20.d, z31.d, z8.d\n"
- "and z21.d, z26.d, z8.d\n"
- "ld1b { z3.b }, p2/Z, [x12, x13]\n"
- "ld1b { z2.b }, p2/Z, [x11, x13]\n"
+ "udot z21.s, z31.b, z3.b\n"
+ "ld1b { z31.b }, p2/Z, [%x[params], #3, MUL VL]\n"
+ "udot z24.s, z29.b, z3.b\n"
+ "udot z6.s, z12.b, z3.b\n"
+ "mls z1.s, p2/M, z7.s, z9.s\n"
+ "udot z21.s, z29.b, z0.b\n"
+ "ld1b { z29.b }, p2/Z, [%x[params], #4, MUL VL]\n"
+ "udot z24.s, z27.b, z0.b\n"
+ "udot z6.s, z12.b, z0.b\n"
+ ".inst 0x04b97421 // sqrdmulh z1.s, z1.s, z25.s\n"
+ "udot z21.s, z27.b, z28.b\n"
+ "ld1b { z27.b }, p2/Z, [%x[params], #5, MUL VL]\n"
+ "mov z7.s, #0x0\n"
+ "mov z19.d, z6.d\n"
+ "udot z6.s, z12.b, z5.b\n"
+ "udot z19.s, z12.b, z28.b\n"
+ "and z16.d, z1.d, z23.d\n"
+ "asr z16.s, z16.s, #0x1f\n"
+ "udot z7.s, z12.b, z2.b\n"
+ "mls z24.s, p2/M, z6.s, z9.s\n"
+ "mov z6.s, #0x0\n"
+ "mls z22.s, p2/M, z20.s, z9.s\n"
+ "mls z21.s, p2/M, z19.s, z9.s\n"
+ ".inst 0x04b97718 // sqrdmulh z24.s, z24.s, z25.s\n"
+ "sqadd z1.s, z1.s, z16.s\n"
+ ".inst 0x04b976d6 // sqrdmulh z22.s, z22.s, z25.s\n"
+ ".inst 0x04b976b5 // sqrdmulh z21.s, z21.s, z25.s\n"
+ "ld1w { z25.s }, p2/Z, [%x[params], #6, MUL VL]\n"
+ "and z18.d, z24.d, z23.d\n"
+ "asr z18.s, z18.s, #0x1f\n"
+ "and z17.d, z22.d, z23.d\n"
+ "and z16.d, z21.d, z23.d\n"
+ "asr z17.s, z17.s, #0x1f\n"
+ "udot z7.s, z12.b, z30.b\n"
+ ".inst 0x44828ae1 // srshl z1.s, p2/M, z1.s, z23.s\n"
"asr z16.s, z16.s, #0x1f\n"
- "asr z20.s, z20.s, #0x1f\n"
- "ld1b { z1.b }, p2/Z, [x10, x13]\n"
- "ld1b { z0.b }, p2/Z, [x9, x13]\n"
- "asr z21.s, z21.s, #0x1f\n"
- "sqadd z4.s, z4.s, z16.s\n"
- ".inst 0x44828504 // srshl z4.s, p1/M, z4.s, z8.s\n"
- "ld1b { z16.b }, p1/Z, [%x[params], #6, MUL VL]\n"
- "sqadd z31.s, z31.s, z20.s\n"
- "sqadd z26.s, z26.s, z21.s\n"
- ".inst 0x4482851f // srshl z31.s, p1/M, z31.s, z8.s\n"
- ".inst 0x4482851a // srshl z26.s, p1/M, z26.s, z8.s\n"
- "add z10.s, z10.s, z22.s\n"
- "smax z10.s, p1/M, z10.s, z25.s\n"
- "add z4.s, z4.s, z22.s\n"
- "ld1b { z8.b }, p2/Z, [x28, x13]\n"
- "add z31.s, z31.s, z22.s\n"
- "add z26.s, z26.s, z22.s\n"
- "ldp x28, x27, [%x[inptrs], #0x60]\n"
- "ldp x26, x25, [%x[inptrs], #0x70]\n"
- "smin z10.s, p1/M, z10.s, z24.s\n"
- "smax z4.s, p1/M, z4.s, z25.s\n"
- "st1b { z10.s }, p0, [x24, x20]\n"
- "ld1b { z30.b }, p2/Z, [x28, x13]\n"
- "smax z31.s, p1/M, z31.s, z25.s\n"
- "smax z26.s, p1/M, z26.s, z25.s\n"
- "ld1b { z29.b }, p2/Z, [x27, x13]\n"
- "ld1b { z28.b }, p2/Z, [x26, x13]\n"
- "ld1b { z27.b }, p2/Z, [x25, x13]\n"
- "zip2 z10.b, z14.b, z11.b\n"
- "zip1 z14.b, z14.b, z11.b\n"
- "smin z4.s, p1/M, z4.s, z24.s\n"
- "zip1 z11.b, z13.b, z9.b\n"
- "zip2 z9.b, z13.b, z9.b\n"
- "smin z31.s, p1/M, z31.s, z24.s\n"
- "smin z26.s, p1/M, z26.s, z24.s\n"
- "st1b { z4.s }, p0, [x23, x20]\n"
- "zip2 z13.b, z14.b, z11.b\n"
- "zip1 z14.b, z14.b, z11.b\n"
- "ldp x12, x11, [%x[inptrs], #0x0]\n"
- "st1b { z31.s }, p0, [x22, x20]\n"
- "zip1 z11.b, z10.b, z9.b\n"
- "zip2 z9.b, z10.b, z9.b\n"
- "ld1w { z10.s }, p1/Z, [%x[params], #4, MUL VL]\n"
- "st1b { z26.s }, p0, [x21, x20]\n"
- "zip2 z4.b, z8.b, z6.b\n"
- "zip1 z8.b, z8.b, z6.b\n"
- "incw x20\n"
- "zip1 z6.b, z7.b, z5.b\n"
- "zip2 z5.b, z7.b, z5.b\n"
- "ldp x10, x9, [%x[inptrs], #0x10]\n"
- "ldp x28, x27, [%x[inptrs], #0x20]\n"
- "zip2 z31.b, z3.b, z1.b\n"
- "zip1 z3.b, z3.b, z1.b\n"
- "ldp x26, x25, [%x[inptrs], #0x30]\n"
- "ld1b { z21.b }, p1/Z, [%x[params], #5, MUL VL]\n"
- "zip1 z1.b, z2.b, z0.b\n"
- "zip2 z0.b, z2.b, z0.b\n"
- "ld1b { z20.b }, p1/Z, [%x[params], #7, MUL VL]\n"
+ "sqadd z24.s, z24.s, z18.s\n"
+ "add z1.s, z1.s, z8.s\n"
+ "mov z20.d, z7.d\n"
+ "sqadd z22.s, z22.s, z17.s\n"
+ "sqadd z21.s, z21.s, z16.s\n"
+ "udot z7.s, z12.b, z4.b\n"
+ "udot z20.s, z12.b, z26.b\n"
+ "smax z1.s, p2/M, z1.s, z11.s\n"
+ ".inst 0x44828af8 // srshl z24.s, p2/M, z24.s, z23.s\n"
+ ".inst 0x44828af6 // srshl z22.s, p2/M, z22.s, z23.s\n"
+ ".inst 0x44828af5 // srshl z21.s, p2/M, z21.s, z23.s\n"
+ "ld1w { z23.s }, p2/Z, [%x[params], #7, MUL VL]\n"
+ "smin z1.s, p2/M, z1.s, z10.s\n"
+ "st1b { z1.s }, p0, [x23, x19]\n"
+ "add z24.s, z24.s, z8.s\n"
+ "add z22.s, z22.s, z8.s\n"
+ "ld1w { z1.s }, p2/Z, [%x[params], #2, MUL VL]\n"
"addvl %x[params], %x[params], #8\n"
- "zip2 z26.b, z30.b, z28.b\n"
- "zip1 z30.b, z30.b, z28.b\n"
- "zip1 z28.b, z29.b, z27.b\n"
- "zip2 z27.b, z29.b, z27.b\n"
- "zip2 z7.b, z8.b, z6.b\n"
- "zip1 z8.b, z8.b, z6.b\n"
- "zip1 z6.b, z4.b, z5.b\n"
- "zip2 z5.b, z4.b, z5.b\n"
- "zip2 z2.b, z3.b, z1.b\n"
- "zip1 z3.b, z3.b, z1.b\n"
- "zip1 z1.b, z31.b, z0.b\n"
- "zip2 z0.b, z31.b, z0.b\n"
- "zip2 z29.b, z30.b, z28.b\n"
- "zip1 z30.b, z30.b, z28.b\n"
- "zip1 z28.b, z26.b, z27.b\n"
- "zip2 z27.b, z26.b, z27.b\n"
- "mov z4.d, z10.d\n"
- "mov z31.d, z10.d\n"
- "mov z26.d, z10.d\n"
+ "add z21.s, z21.s, z8.s\n"
+ "smax z24.s, p2/M, z24.s, z11.s\n"
+ "smax z22.s, p2/M, z22.s, z11.s\n"
+ "smax z21.s, p2/M, z21.s, z11.s\n"
+ "smin z24.s, p2/M, z24.s, z10.s\n"
+ "st1b { z24.s }, p0, [x22, x19]\n"
+ "mov z24.d, z1.d\n"
+ "smin z22.s, p2/M, z22.s, z10.s\n"
+ "st1b { z22.s }, p0, [x21, x19]\n"
+ "mov z22.d, z1.d\n"
+ "smin z21.s, p2/M, z21.s, z10.s\n"
+ "st1b { z21.s }, p0, [x20, x19]\n"
+ "mov z21.d, z1.d\n"
+ "incw x19\n"
+ "udot z1.s, z31.b, z4.b\n"
+ "whilelt p0.s, x19, %x[n_channels]\n"
+ "udot z22.s, z31.b, z2.b\n"
+ "ext z4.b, z4.b, z4.b, #0x1\n"
+ "udot z1.s, z29.b, z2.b\n"
+ "udot z22.s, z29.b, z30.b\n"
+ "ext z2.b, z2.b, z2.b, #0x1\n"
+ "udot z24.s, z31.b, z4.b\n"
+ "udot z1.s, z27.b, z30.b\n"
+ "udot z22.s, z27.b, z26.b\n"
+ "ext z30.b, z30.b, z30.b, #0x1\n"
+ "ext z26.b, z26.b, z26.b, #0x1\n"
+ "udot z21.s, z31.b, z2.b\n"
+ "udot z24.s, z29.b, z2.b\n"
+ "udot z6.s, z12.b, z2.b\n"
+ "mls z1.s, p2/M, z7.s, z9.s\n"
+ "udot z21.s, z29.b, z30.b\n"
+ "udot z24.s, z27.b, z30.b\n"
+ "udot z6.s, z12.b, z30.b\n"
+ ".inst 0x04b97421 // sqrdmulh z1.s, z1.s, z25.s\n"
+ "udot z21.s, z27.b, z26.b\n"
+ "mls z22.s, p2/M, z20.s, z9.s\n"
+ "mov z19.d, z6.d\n"
+ "udot z6.s, z12.b, z4.b\n"
+ "udot z19.s, z12.b, z26.b\n"
+ "and z16.d, z1.d, z23.d\n"
+ "asr z16.s, z16.s, #0x1f\n"
+ ".inst 0x04b976d6 // sqrdmulh z22.s, z22.s, z25.s\n"
+ "mls z24.s, p2/M, z6.s, z9.s\n"
+ "mls z21.s, p2/M, z19.s, z9.s\n"
+ ".inst 0x04b97718 // sqrdmulh z24.s, z24.s, z25.s\n"
+ "and z17.d, z22.d, z23.d\n"
+ "asr z17.s, z17.s, #0x1f\n"
+ "sqadd z1.s, z1.s, z16.s\n"
+ ".inst 0x04b976b5 // sqrdmulh z21.s, z21.s, z25.s\n"
+ "and z18.d, z24.d, z23.d\n"
+ "asr z18.s, z18.s, #0x1f\n"
+ "and z16.d, z21.d, z23.d\n"
+ ".inst 0x44828ae1 // srshl z1.s, p2/M, z1.s, z23.s\n"
+ "asr z16.s, z16.s, #0x1f\n"
+ "sqadd z22.s, z22.s, z17.s\n"
+ "add z1.s, z1.s, z8.s\n"
+ "sqadd z24.s, z24.s, z18.s\n"
+ "smax z1.s, p2/M, z1.s, z11.s\n"
+ ".inst 0x44828af6 // srshl z22.s, p2/M, z22.s, z23.s\n"
+ "sqadd z21.s, z21.s, z16.s\n"
+ ".inst 0x44828af8 // srshl z24.s, p2/M, z24.s, z23.s\n"
+ "add z22.s, z22.s, z8.s\n"
+ "smin z1.s, p2/M, z1.s, z10.s\n"
+ "st1b { z1.s }, p0, [x23, x19]\n"
+ "add z24.s, z24.s, z8.s\n"
+ "smax z22.s, p2/M, z22.s, z11.s\n"
+ ".inst 0x44828af5 // srshl z21.s, p2/M, z21.s, z23.s\n"
+ "smax z24.s, p2/M, z24.s, z11.s\n"
+ "smin z22.s, p2/M, z22.s, z10.s\n"
+ "st1b { z22.s }, p0, [x21, x19]\n"
+ "add z21.s, z21.s, z8.s\n"
+ "smin z24.s, p2/M, z24.s, z10.s\n"
+ "st1b { z24.s }, p0, [x22, x19]\n"
+ "smax z21.s, p2/M, z21.s, z11.s\n"
+ "smin z21.s, p2/M, z21.s, z10.s\n"
+ "st1b { z21.s }, p0, [x20, x19]\n"
+ "incw x19\n"
+ "whilelt p1.b, x19, %x[n_channels]\n"
"b.any 1b\n"
+ "addvl SP, SP, #8\n"
: [params] "+&r" (params)
- : [inptrs] "r" (inptrs), [n_channels] "r" (n_channels), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [outptrs] "r" (outptrs), [qp] "r" (&qp)
- : "cc", "memory", "p0", "p1", "p2", "x9", "x10", "x11", "x12", "x13", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : [inptrs] "r" (inptrs), [n_channels] "r" ((long unsigned int) n_channels), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [outptrs] "r" (outptrs), [qp] "r" (&qp)
+ : "cc", "memory", "p0", "p1", "p2", "x9", "x10", "x11", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp
index 386eb96cff..7bfa5fc4c7 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -91,316 +91,324 @@ void sve_u8q_nhwc_3x3_s1_output2x2_mla_depthfirst_impl(
requant_muls, requant_shifts, outptrs);
__asm__ __volatile__(
- "mov x8, #0x0\n"
- "ldr x25, [%x[params], %[offsetof_Params_requant]]\n"
- "ptrue p4.b\n"
- "ldr x24, [%x[params], %[offsetof_Params_outptrs]]\n"
- "mov x23, x8\n"
- "add x21, x25, %[offsetof_Requantize32_a_offset]\n"
"ldr x17, [%x[params], %[offsetof_Params_n_channels]]\n"
+ "ptrue p4.b\n"
"ldr x16, [%x[params], %[offsetof_Params_weights]]\n"
- "add x20, x25, %[offsetof_Requantize32_b_offset]\n"
- "add x22, x25, %[offsetof_Requantize32_c_offset]\n"
- "ld1rb { z23.b }, p4/Z, [x21]\n"
- "ld1rb { z15.b }, p4/Z, [x20]\n"
- "add x21, x25, %[offsetof_Requantize32_minval]\n"
- "add x20, x25, %[offsetof_Requantize32_maxval]\n"
- "ld1rh { z14.h }, p4/Z, [x22]\n"
- "ld1rh { z12.h }, p4/Z, [x21]\n"
- "ld1rh { z11.h }, p4/Z, [x20]\n"
- "ldp x15, x14, [x24, #0x0]\n"
- "incw x23\n"
- "whilelt p3.h, x8, x17\n"
- "ldp x13, x12, [x24, #0x10]\n"
- "whilelt p2.s, x8, x17\n"
- "whilelt p1.s, x23, x17\n"
- "ldr x26, [%x[params], %[offsetof_Params_bias]]\n"
+ "mov x15, #0x0\n"
+ "ldr x22, [%x[params], %[offsetof_Params_requant]]\n"
+ "mov x14, #0x0\n"
+ "ldr x13, [%x[params], %[offsetof_Params_requant_muls]]\n"
+ "add x12, %x[params], %[offsetof_Params_inptrs]\n"
+ "ldr x11, [%x[params], %[offsetof_Params_requant_shifts]]\n"
+ "add x19, x22, %[offsetof_Requantize32_a_offset]\n"
+ "ldr x21, [%x[params], %[offsetof_Params_outptrs]]\n"
+ "add x20, x22, %[offsetof_Requantize32_b_offset]\n"
+ "ld1rb { z12.b }, p4/Z, [x19]\n"
+ "add x19, x22, %[offsetof_Requantize32_c_offset]\n"
+ "ld1rb { z18.b }, p4/Z, [x20]\n"
+ "add x20, x22, %[offsetof_Requantize32_minval]\n"
+ "ld1rw { z15.s }, p4/Z, [x19]\n"
+ "add x19, x22, %[offsetof_Requantize32_maxval]\n"
+ "ld1rw { z13.s }, p4/Z, [x20]\n"
+ "whilelt p3.h, x15, x17\n"
+ "ld1rw { z14.s }, p4/Z, [x19]\n"
+ "whilelt p2.s, x15, x17\n"
+ "ldp x10, x9, [x21, #0x0]\n"
+ "mov x19, x15\n"
+ "incw x19\n"
+ "ldp x28, x27, [x21, #0x10]\n"
+ "whilelt p1.s, x19, x17\n"
+ "ldr x19, [%x[params], %[offsetof_Params_bias]]\n"
+ "ld1w { z17.s }, p2/Z, [x19]\n"
+ "ld1w { z16.s }, p1/Z, [x19, #1, MUL VL]\n"
+ "uzp1 z11.s, z17.s, z16.s\n"
+ "addvl x19, x19, #2\n"
+ "str x19, [%x[params], %[offsetof_Params_bias]]\n"
+ "uzp2 z17.s, z17.s, z16.s\n"
+ "mov z9.d, z11.d\n"
"ld1b { z0.h }, p4/Z, [x16]\n"
+ ".inst 0x45521800 // usublb z0.h, z0.b, z18.b\n"
+ "mov z20.d, z17.d\n"
"ld1b { z1.h }, p4/Z, [x16, #1, MUL VL]\n"
- "add x11, %x[params], %[offsetof_Params_inptrs]\n"
- "mov x10, #0x0\n"
+ "mov z24.d, z11.d\n"
"ld1b { z2.h }, p4/Z, [x16, #2, MUL VL]\n"
+ ".inst 0x45521821 // usublb z1.h, z1.b, z18.b\n"
+ "mov z19.d, z17.d\n"
"ld1b { z3.h }, p4/Z, [x16, #3, MUL VL]\n"
- ".inst 0x454f1800 // usublb z0.h, z0.b, z15.b\n"
- ".inst 0x454f1821 // usublb z1.h, z1.b, z15.b\n"
+ "mov z26.d, z11.d\n"
"ld1b { z4.h }, p4/Z, [x16, #4, MUL VL]\n"
+ ".inst 0x45521842 // usublb z2.h, z2.b, z18.b\n"
+ "mov z23.d, z17.d\n"
"ld1b { z5.h }, p4/Z, [x16, #5, MUL VL]\n"
- ".inst 0x454f1842 // usublb z2.h, z2.b, z15.b\n"
- ".inst 0x454f1863 // usublb z3.h, z3.b, z15.b\n"
+ ".inst 0x45521863 // usublb z3.h, z3.b, z18.b\n"
"ld1b { z6.h }, p4/Z, [x16, #6, MUL VL]\n"
"ld1b { z7.h }, p4/Z, [x16, #7, MUL VL]\n"
+ ".inst 0x45521884 // usublb z4.h, z4.b, z18.b\n"
"inch x16, ALL, MUL #8\n"
- ".inst 0x454f1884 // usublb z4.h, z4.b, z15.b\n"
- "ld1w { z17.s }, p2/Z, [x26]\n"
- "ld1w { z16.s }, p1/Z, [x26, #1, MUL VL]\n"
- "uzp1 z13.s, z17.s, z16.s\n"
- "uzp2 z17.s, z17.s, z16.s\n"
"ld1b { z8.h }, p4/Z, [x16]\n"
- "ldp x24, x23, [x11, #0x0]\n"
- "addvl x26, x26, #2\n"
- "mov z26.d, z13.d\n"
- "ldp x22, x21, [x11, #0x10]\n"
- "ldr x20, [x11, #0x20]\n"
- "mov z10.d, z17.d\n"
- "mov z24.d, z13.d\n"
- "ld1b { z31.h }, p3/Z, [x24, x8]\n"
- "ld1b { z30.h }, p3/Z, [x23, x8]\n"
- "mov z16.d, z17.d\n"
- "mov z25.d, z13.d\n"
- "ld1b { z29.h }, p3/Z, [x22, x8]\n"
- "ld1b { z28.h }, p3/Z, [x21, x8]\n"
- "mov z9.d, z17.d\n"
- ".inst 0x454f18a5 // usublb z5.h, z5.b, z15.b\n"
- "ld1b { z27.h }, p3/Z, [x20, x8]\n"
- "ldr x9, [%x[params], %[offsetof_Params_requant_muls]]\n"
- ".inst 0x454f18c6 // usublb z6.h, z6.b, z15.b\n"
- ".inst 0x454f18e7 // usublb z7.h, z7.b, z15.b\n"
- "ldr x28, [%x[params], %[offsetof_Params_requant_shifts]]\n"
- "str x26, [%x[params], %[offsetof_Params_bias]]\n"
- ".inst 0x454f1908 // usublb z8.h, z8.b, z15.b\n"
- ".inst 0x45571bff // usublb z31.h, z31.b, z23.b\n"
- ".inst 0x45571bde // usublb z30.h, z30.b, z23.b\n"
- ".inst 0x45571bbd // usublb z29.h, z29.b, z23.b\n"
- ".inst 0x45571b9c // usublb z28.h, z28.b, z23.b\n"
- ".inst 0x45571b7b // usublb z27.h, z27.b, z23.b\n"
+ "ldp x23, x22, [x12, #0x0]\n"
+ ".inst 0x455218a5 // usublb z5.h, z5.b, z18.b\n"
+ ".inst 0x455218c6 // usublb z6.h, z6.b, z18.b\n"
+ "ldp x21, x20, [x12, #0x10]\n"
+ ".inst 0x455218e7 // usublb z7.h, z7.b, z18.b\n"
+ ".inst 0x45521908 // usublb z8.h, z8.b, z18.b\n"
+ "ldr x19, [x12, #0x20]\n"
+ "ld1b { z31.h }, p3/Z, [x23, x15]\n"
+ ".inst 0x454c1bff // usublb z31.h, z31.b, z12.b\n"
+ "ld1b { z30.h }, p3/Z, [x22, x15]\n"
+ "ld1b { z29.h }, p3/Z, [x21, x15]\n"
+ ".inst 0x454c1bde // usublb z30.h, z30.b, z12.b\n"
+ "ld1b { z28.h }, p3/Z, [x20, x15]\n"
+ "ld1b { z27.h }, p3/Z, [x19, x15]\n"
+ ".inst 0x454c1bbd // usublb z29.h, z29.b, z12.b\n"
+ ".inst 0x454c1b9c // usublb z28.h, z28.b, z12.b\n"
+ ".inst 0x454c1b7b // usublb z27.h, z27.b, z12.b\n"
"1:" // Loop
- ".inst 0x448443ed // smlalb z13.s, p4/M, z31.h, z4.h\n"
+ ".inst 0x448443eb // smlalb z11.s, p4/M, z31.h, z4.h\n"
+ "ldr x21, [x12, #0x28]\n"
+ "whilelt p0.h, x14, x17\n"
".inst 0x448447f1 // smlalt z17.s, p4/M, z31.h, z4.h\n"
- "ldr x22, [x11, #0x28]\n"
- "ldr x27, [x11, #0x38]\n"
- ".inst 0x448343fa // smlalb z26.s, p4/M, z31.h, z3.h\n"
- ".inst 0x448347ea // smlalt z10.s, p4/M, z31.h, z3.h\n"
- "ldr x21, [x11, #0x30]\n"
- "ldr x26, [x11, #0x40]\n"
- ".inst 0x448043cd // smlalb z13.s, p4/M, z30.h, z0.h\n"
- ".inst 0x448047d1 // smlalt z17.s, p4/M, z30.h, z0.h\n"
- "ldr x20, [x11, #0x48]\n"
- "ld1b { z30.h }, p3/Z, [x20, x8]\n"
- ".inst 0x448243ba // smlalb z26.s, p4/M, z29.h, z2.h\n"
- ".inst 0x448247aa // smlalt z10.s, p4/M, z29.h, z2.h\n"
- "ld1b { z29.h }, p3/Z, [x21, x8]\n"
- ".inst 0x45571bbd // usublb z29.h, z29.b, z23.b\n"
+ "ldr x20, [x12, #0x30]\n"
+ "inch x16\n"
+ ".inst 0x448343e9 // smlalb z9.s, p4/M, z31.h, z3.h\n"
+ "ldr x26, [x12, #0x38]\n"
+ ".inst 0x448347f4 // smlalt z20.s, p4/M, z31.h, z3.h\n"
+ "ldr x25, [x12, #0x40]\n"
".inst 0x448143f8 // smlalb z24.s, p4/M, z31.h, z1.h\n"
- ".inst 0x448147f0 // smlalt z16.s, p4/M, z31.h, z1.h\n"
- "ldr x25, [x11, #0x50]\n"
- "ldr x24, [x11, #0x58]\n"
- ".inst 0x448043f9 // smlalb z25.s, p4/M, z31.h, z0.h\n"
- ".inst 0x448047e9 // smlalt z9.s, p4/M, z31.h, z0.h\n"
- "ld1b { z31.h }, p3/Z, [x22, x8]\n"
- ".inst 0x45571bff // usublb z31.h, z31.b, z23.b\n"
- ".inst 0x4485438d // smlalb z13.s, p4/M, z28.h, z5.h\n"
+ "ldr x19, [x12, #0x48]\n"
+ ".inst 0x448147f3 // smlalt z19.s, p4/M, z31.h, z1.h\n"
+ "ldr x24, [x12, #0x50]\n"
+ ".inst 0x448043fa // smlalb z26.s, p4/M, z31.h, z0.h\n"
+ "ldr x23, [x12, #0x58]\n"
+ ".inst 0x448047f7 // smlalt z23.s, p4/M, z31.h, z0.h\n"
+ "ld1b { z31.h }, p3/Z, [x21, x15]\n"
+ ".inst 0x454c1bff // usublb z31.h, z31.b, z12.b\n"
+ ".inst 0x448043cb // smlalb z11.s, p4/M, z30.h, z0.h\n"
+ "ldr x22, [x12, #0x60]\n"
+ ".inst 0x448047d1 // smlalt z17.s, p4/M, z30.h, z0.h\n"
+ "ld1b { z30.h }, p3/Z, [x19, x15]\n"
+ ".inst 0x454c1bde // usublb z30.h, z30.b, z12.b\n"
+ ".inst 0x448243a9 // smlalb z9.s, p4/M, z29.h, z2.h\n"
+ "ldr x21, [x12, #0x68]\n"
+ ".inst 0x448247b4 // smlalt z20.s, p4/M, z29.h, z2.h\n"
+ "ld1b { z29.h }, p3/Z, [x20, x15]\n"
+ ".inst 0x454c1bbd // usublb z29.h, z29.b, z12.b\n"
+ ".inst 0x4485438b // smlalb z11.s, p4/M, z28.h, z5.h\n"
+ "ldr x20, [x12, #0x70]\n"
".inst 0x44854791 // smlalt z17.s, p4/M, z28.h, z5.h\n"
- ".inst 0x45571bde // usublb z30.h, z30.b, z23.b\n"
- "ldr x23, [x11, #0x60]\n"
- ".inst 0x4484439a // smlalb z26.s, p4/M, z28.h, z4.h\n"
- ".inst 0x4484478a // smlalt z10.s, p4/M, z28.h, z4.h\n"
- "ldr x22, [x11, #0x68]\n"
- "ldr x21, [x11, #0x70]\n"
+ "ldr x19, [x12, #0x78]\n"
+ ".inst 0x44844389 // smlalb z9.s, p4/M, z28.h, z4.h\n"
+ "ld1w { z25.s }, p2/Z, [x13]\n"
+ ".inst 0x44844794 // smlalt z20.s, p4/M, z28.h, z4.h\n"
+ "ld1w { z16.s }, p1/Z, [x13, #1, MUL VL]\n"
+ "addvl x13, x13, #2\n"
".inst 0x44824398 // smlalb z24.s, p4/M, z28.h, z2.h\n"
- ".inst 0x44824790 // smlalt z16.s, p4/M, z28.h, z2.h\n"
- "ldr x20, [x11, #0x78]\n"
- "ld1w { z20.s }, p2/Z, [x9]\n"
- ".inst 0x44814399 // smlalb z25.s, p4/M, z28.h, z1.h\n"
- ".inst 0x44814789 // smlalt z9.s, p4/M, z28.h, z1.h\n"
- "ld1b { z28.h }, p3/Z, [x27, x8]\n"
- ".inst 0x45571b9c // usublb z28.h, z28.b, z23.b\n"
- ".inst 0x4487436d // smlalb z13.s, p4/M, z27.h, z7.h\n"
- ".inst 0x44874771 // smlalt z17.s, p4/M, z27.h, z7.h\n"
- "ld1w { z18.s }, p1/Z, [x9, #1, MUL VL]\n"
- "uzp1 z19.s, z20.s, z18.s\n"
- ".inst 0x4486437a // smlalb z26.s, p4/M, z27.h, z6.h\n"
- ".inst 0x4486476a // smlalt z10.s, p4/M, z27.h, z6.h\n"
- "uzp2 z22.s, z20.s, z18.s\n"
- "ld1w { z20.s }, p2/Z, [x28]\n"
+ ".inst 0x44824793 // smlalt z19.s, p4/M, z28.h, z2.h\n"
+ ".inst 0x4481439a // smlalb z26.s, p4/M, z28.h, z1.h\n"
+ "uzp1 z10.s, z25.s, z16.s\n"
+ "uzp2 z22.s, z25.s, z16.s\n"
+ "ld1w { z25.s }, p2/Z, [x11]\n"
+ ".inst 0x44814797 // smlalt z23.s, p4/M, z28.h, z1.h\n"
+ "ld1b { z28.h }, p3/Z, [x26, x15]\n"
+ ".inst 0x454c1b9c // usublb z28.h, z28.b, z12.b\n"
".inst 0x448643f8 // smlalb z24.s, p4/M, z31.h, z6.h\n"
- ".inst 0x448647f0 // smlalt z16.s, p4/M, z31.h, z6.h\n"
- "ld1b { z31.h }, p3/Z, [x26, x8]\n"
- ".inst 0x45571bff // usublb z31.h, z31.b, z23.b\n"
- ".inst 0x44834379 // smlalb z25.s, p4/M, z27.h, z3.h\n"
- ".inst 0x44834769 // smlalt z9.s, p4/M, z27.h, z3.h\n"
- "whilelt p0.h, x10, x17\n"
- "inch x16\n"
- ".inst 0x4481438d // smlalb z13.s, p4/M, z28.h, z1.h\n"
- ".inst 0x44814791 // smlalt z17.s, p4/M, z28.h, z1.h\n"
- "ldr x26, [%x[params], %[offsetof_Params_bias]]\n"
- "addvl x9, x9, #2\n"
- ".inst 0x4480439a // smlalb z26.s, p4/M, z28.h, z0.h\n"
- ".inst 0x4480478a // smlalt z10.s, p4/M, z28.h, z0.h\n"
- "ld1b { z28.h }, p3/Z, [x24, x8]\n"
- ".inst 0x45571b9c // usublb z28.h, z28.b, z23.b\n"
+ "ld1w { z16.s }, p1/Z, [x11, #1, MUL VL]\n"
+ ".inst 0x448647f3 // smlalt z19.s, p4/M, z31.h, z6.h\n"
+ "ld1b { z31.h }, p3/Z, [x25, x15]\n"
+ "addvl x11, x11, #2\n"
+ ".inst 0x4487436b // smlalb z11.s, p4/M, z27.h, z7.h\n"
+ ".inst 0x454c1bff // usublb z31.h, z31.b, z12.b\n"
+ "uzp1 z21.s, z25.s, z16.s\n"
+ "uzp2 z25.s, z25.s, z16.s\n"
+ ".inst 0x44874771 // smlalt z17.s, p4/M, z27.h, z7.h\n"
+ ".inst 0x44864369 // smlalb z9.s, p4/M, z27.h, z6.h\n"
+ ".inst 0x44864774 // smlalt z20.s, p4/M, z27.h, z6.h\n"
".inst 0x44844378 // smlalb z24.s, p4/M, z27.h, z4.h\n"
- ".inst 0x448843b9 // smlalb z25.s, p4/M, z29.h, z8.h\n"
- ".inst 0x44844770 // smlalt z16.s, p4/M, z27.h, z4.h\n"
- ".inst 0x448847a9 // smlalt z9.s, p4/M, z29.h, z8.h\n"
- "ld1b { z29.h }, p3/Z, [x25, x8]\n"
- ".inst 0x45571bbd // usublb z29.h, z29.b, z23.b\n"
- ".inst 0x448243ed // smlalb z13.s, p4/M, z31.h, z2.h\n"
+ ".inst 0x44844773 // smlalt z19.s, p4/M, z27.h, z4.h\n"
+ ".inst 0x4483437a // smlalb z26.s, p4/M, z27.h, z3.h\n"
+ ".inst 0x44834777 // smlalt z23.s, p4/M, z27.h, z3.h\n"
+ ".inst 0x4481438b // smlalb z11.s, p4/M, z28.h, z1.h\n"
+ ".inst 0x44814791 // smlalt z17.s, p4/M, z28.h, z1.h\n"
+ ".inst 0x448843ba // smlalb z26.s, p4/M, z29.h, z8.h\n"
+ ".inst 0x448847b7 // smlalt z23.s, p4/M, z29.h, z8.h\n"
+ "ld1b { z29.h }, p3/Z, [x24, x15]\n"
+ ".inst 0x454c1bbd // usublb z29.h, z29.b, z12.b\n"
+ ".inst 0x44804389 // smlalb z9.s, p4/M, z28.h, z0.h\n"
+ ".inst 0x44804794 // smlalt z20.s, p4/M, z28.h, z0.h\n"
+ "ld1b { z28.h }, p3/Z, [x23, x15]\n"
+ ".inst 0x454c1b9c // usublb z28.h, z28.b, z12.b\n"
+ ".inst 0x448243eb // smlalb z11.s, p4/M, z31.h, z2.h\n"
".inst 0x448247f1 // smlalt z17.s, p4/M, z31.h, z2.h\n"
- "ld1w { z18.s }, p1/Z, [x28, #1, MUL VL]\n"
- "addvl x28, x28, #2\n"
- ".inst 0x448143fa // smlalb z26.s, p4/M, z31.h, z1.h\n"
- ".inst 0x448147ea // smlalt z10.s, p4/M, z31.h, z1.h\n"
- "ld1b { z31.h }, p3/Z, [x23, x8]\n"
- ".inst 0x45571bff // usublb z31.h, z31.b, z23.b\n"
- ".inst 0x448543d8 // smlalb z24.s, p4/M, z30.h, z5.h\n"
- ".inst 0x448443d9 // smlalb z25.s, p4/M, z30.h, z4.h\n"
- "uzp1 z1.s, z20.s, z18.s\n"
- ".inst 0x448843cd // smlalb z13.s, p4/M, z30.h, z8.h\n"
+ ".inst 0x448143e9 // smlalb z9.s, p4/M, z31.h, z1.h\n"
+ ".inst 0x448147f4 // smlalt z20.s, p4/M, z31.h, z1.h\n"
+ "ld1b { z31.h }, p3/Z, [x22, x15]\n"
+ ".inst 0x454c1bff // usublb z31.h, z31.b, z12.b\n"
+ ".inst 0x448843cb // smlalb z11.s, p4/M, z30.h, z8.h\n"
".inst 0x448847d1 // smlalt z17.s, p4/M, z30.h, z8.h\n"
- "uzp2 z27.s, z20.s, z18.s\n"
- ".inst 0x448743da // smlalb z26.s, p4/M, z30.h, z7.h\n"
- ".inst 0x448747ca // smlalt z10.s, p4/M, z30.h, z7.h\n"
- ".inst 0x448547d0 // smlalt z16.s, p4/M, z30.h, z5.h\n"
- ".inst 0x448447c9 // smlalt z9.s, p4/M, z30.h, z4.h\n"
- "ld1b { z30.h }, p3/Z, [x22, x8]\n"
- ".inst 0x45571bde // usublb z30.h, z30.b, z23.b\n"
- ".inst 0x448043b8 // smlalb z24.s, p4/M, z29.h, z0.h\n"
- ".inst 0x44824399 // smlalb z25.s, p4/M, z28.h, z2.h\n"
- ".inst 0x448343ad // smlalb z13.s, p4/M, z29.h, z3.h\n"
+ ".inst 0x448743c9 // smlalb z9.s, p4/M, z30.h, z7.h\n"
+ ".inst 0x448747d4 // smlalt z20.s, p4/M, z30.h, z7.h\n"
+ ".inst 0x448543d8 // smlalb z24.s, p4/M, z30.h, z5.h\n"
+ ".inst 0x448547d3 // smlalt z19.s, p4/M, z30.h, z5.h\n"
+ ".inst 0x448443da // smlalb z26.s, p4/M, z30.h, z4.h\n"
+ ".inst 0x448447d7 // smlalt z23.s, p4/M, z30.h, z4.h\n"
+ "ld1b { z30.h }, p3/Z, [x21, x15]\n"
+ ".inst 0x454c1bde // usublb z30.h, z30.b, z12.b\n"
+ ".inst 0x448343ab // smlalb z11.s, p4/M, z29.h, z3.h\n"
".inst 0x448347b1 // smlalt z17.s, p4/M, z29.h, z3.h\n"
- ".inst 0x448047b0 // smlalt z16.s, p4/M, z29.h, z0.h\n"
- "ld1b { z29.h }, p3/Z, [x21, x8]\n"
- ".inst 0x44824789 // smlalt z9.s, p4/M, z28.h, z2.h\n"
- ".inst 0x45571bbd // usublb z29.h, z29.b, z23.b\n"
- ".inst 0x448343f8 // smlalb z24.s, p4/M, z31.h, z3.h\n"
- ".inst 0x448543d9 // smlalb z25.s, p4/M, z30.h, z5.h\n"
- ".inst 0x4485439a // smlalb z26.s, p4/M, z28.h, z5.h\n"
- ".inst 0x4485478a // smlalt z10.s, p4/M, z28.h, z5.h\n"
- "ld1b { z28.h }, p3/Z, [x20, x8]\n"
- ".inst 0x45571b9c // usublb z28.h, z28.b, z23.b\n"
- ".inst 0x448643ed // smlalb z13.s, p4/M, z31.h, z6.h\n"
- ".inst 0x448347f0 // smlalt z16.s, p4/M, z31.h, z3.h\n"
- ".inst 0x04b375ad // sqrdmulh z13.s, z13.s, z19.s\n"
- "inch x8\n"
- ".inst 0x448547c9 // smlalt z9.s, p4/M, z30.h, z5.h\n"
- ".inst 0x448743b8 // smlalb z24.s, p4/M, z29.h, z7.h\n"
- "and z21.d, z13.d, z1.d\n"
- "mov x20, x8\n"
- ".inst 0x448643b9 // smlalb z25.s, p4/M, z29.h, z6.h\n"
+ ".inst 0x448043b8 // smlalb z24.s, p4/M, z29.h, z0.h\n"
+ ".inst 0x448047b3 // smlalt z19.s, p4/M, z29.h, z0.h\n"
+ "ld1b { z29.h }, p3/Z, [x20, x15]\n"
+ ".inst 0x454c1bbd // usublb z29.h, z29.b, z12.b\n"
+ ".inst 0x44854389 // smlalb z9.s, p4/M, z28.h, z5.h\n"
+ ".inst 0x44854794 // smlalt z20.s, p4/M, z28.h, z5.h\n"
+ ".inst 0x4482439a // smlalb z26.s, p4/M, z28.h, z2.h\n"
+ ".inst 0x44824797 // smlalt z23.s, p4/M, z28.h, z2.h\n"
+ "ld1b { z28.h }, p3/Z, [x19, x15]\n"
+ "inch x15\n"
+ ".inst 0x448643eb // smlalb z11.s, p4/M, z31.h, z6.h\n"
+ "whilelt p2.s, x15, x17\n"
".inst 0x448647f1 // smlalt z17.s, p4/M, z31.h, z6.h\n"
+ "mov x19, x15\n"
+ ".inst 0x448343f8 // smlalb z24.s, p4/M, z31.h, z3.h\n"
+ ".inst 0x454c1b9c // usublb z28.h, z28.b, z12.b\n"
+ ".inst 0x448347f3 // smlalt z19.s, p4/M, z31.h, z3.h\n"
+ "incw x19\n"
+ ".inst 0x448843c9 // smlalb z9.s, p4/M, z30.h, z8.h\n"
+ "whilelt p1.s, x19, x17\n"
+ ".inst 0x04aa756b // sqrdmulh z11.s, z11.s, z10.s\n"
+ "whilelt p3.h, x15, x17\n"
".inst 0x04b67631 // sqrdmulh z17.s, z17.s, z22.s\n"
- "incw x20\n"
- ".inst 0x448747b0 // smlalt z16.s, p4/M, z29.h, z7.h\n"
- ".inst 0x448647a9 // smlalt z9.s, p4/M, z29.h, z6.h\n"
- "asr z21.s, z21.s, #0x1f\n"
- "whilelt p2.s, x8, x17\n"
- ".inst 0x448843da // smlalb z26.s, p4/M, z30.h, z8.h\n"
+ ".inst 0x448847d4 // smlalt z20.s, p4/M, z30.h, z8.h\n"
+ ".inst 0x04aa7529 // sqrdmulh z9.s, z9.s, z10.s\n"
+ "and z16.d, z11.d, z21.d\n"
+ "asr z16.s, z16.s, #0x1f\n"
+ "and z1.d, z17.d, z25.d\n"
+ "and z27.d, z9.d, z21.d\n"
+ "asr z1.s, z1.s, #0x1f\n"
+ ".inst 0x04b67694 // sqrdmulh z20.s, z20.s, z22.s\n"
+ ".inst 0x448543da // smlalb z26.s, p4/M, z30.h, z5.h\n"
+ "asr z27.s, z27.s, #0x1f\n"
+ ".inst 0x448547d7 // smlalt z23.s, p4/M, z30.h, z5.h\n"
+ "sqadd z11.s, z11.s, z16.s\n"
+ ".inst 0x448743b8 // smlalb z24.s, p4/M, z29.h, z7.h\n"
+ "and z16.d, z20.d, z25.d\n"
+ "asr z16.s, z16.s, #0x1f\n"
+ "sqadd z17.s, z17.s, z1.s\n"
+ "sqadd z9.s, z9.s, z27.s\n"
+ ".inst 0x448747b3 // smlalt z19.s, p4/M, z29.h, z7.h\n"
+ ".inst 0x448643ba // smlalb z26.s, p4/M, z29.h, z6.h\n"
+ ".inst 0x448647b7 // smlalt z23.s, p4/M, z29.h, z6.h\n"
".inst 0x44884398 // smlalb z24.s, p4/M, z28.h, z8.h\n"
- "and z20.d, z17.d, z27.d\n"
- "whilelt p1.s, x20, x17\n"
- ".inst 0x44874399 // smlalb z25.s, p4/M, z28.h, z7.h\n"
- ".inst 0x448847ca // smlalt z10.s, p4/M, z30.h, z8.h\n"
- ".inst 0x04b3775a // sqrdmulh z26.s, z26.s, z19.s\n"
- "whilelt p3.h, x8, x17\n"
- ".inst 0x44884790 // smlalt z16.s, p4/M, z28.h, z8.h\n"
- ".inst 0x44874789 // smlalt z9.s, p4/M, z28.h, z7.h\n"
- ".inst 0x04b37718 // sqrdmulh z24.s, z24.s, z19.s\n"
- ".inst 0x04b37739 // sqrdmulh z25.s, z25.s, z19.s\n"
- "sqadd z13.s, z13.s, z21.s\n"
- ".inst 0x4482902d // srshl z13.s, p4/M, z13.s, z1.s\n"
- "asr z20.s, z20.s, #0x1f\n"
- "and z19.d, z26.d, z1.d\n"
- ".inst 0x04b6754a // sqrdmulh z10.s, z10.s, z22.s\n"
- "and z18.d, z24.d, z1.d\n"
- ".inst 0x04b67610 // sqrdmulh z16.s, z16.s, z22.s\n"
- "and z21.d, z25.d, z1.d\n"
- ".inst 0x04b67529 // sqrdmulh z9.s, z9.s, z22.s\n"
- "sqadd z17.s, z17.s, z20.s\n"
- ".inst 0x44829371 // srshl z17.s, p4/M, z17.s, z27.s\n"
- "asr z19.s, z19.s, #0x1f\n"
- "and z2.d, z10.d, z27.d\n"
- "asr z18.s, z18.s, #0x1f\n"
- "and z22.d, z16.d, z27.d\n"
- "asr z21.s, z21.s, #0x1f\n"
- "and z20.d, z9.d, z27.d\n"
- "sqadd z26.s, z26.s, z19.s\n"
- "asr z2.s, z2.s, #0x1f\n"
- ".inst 0x4482903a // srshl z26.s, p4/M, z26.s, z1.s\n"
- "sqadd z24.s, z24.s, z18.s\n"
- "asr z22.s, z22.s, #0x1f\n"
- ".inst 0x44829038 // srshl z24.s, p4/M, z24.s, z1.s\n"
- "sqadd z25.s, z25.s, z21.s\n"
- "asr z20.s, z20.s, #0x1f\n"
- ".inst 0x44829039 // srshl z25.s, p4/M, z25.s, z1.s\n"
- "sqadd z10.s, z10.s, z2.s\n"
- "sqadd z16.s, z16.s, z22.s\n"
- ".inst 0x4482936a // srshl z10.s, p4/M, z10.s, z27.s\n"
- ".inst 0x44829370 // srshl z16.s, p4/M, z16.s, z27.s\n"
- "sqadd z9.s, z9.s, z20.s\n"
- ".inst 0x453041ad // sqxtnb z13.h, z13.s\n"
- ".inst 0x44829369 // srshl z9.s, p4/M, z9.s, z27.s\n"
- ".inst 0x4530435a // sqxtnb z26.h, z26.s\n"
- ".inst 0x45304318 // sqxtnb z24.h, z24.s\n"
- ".inst 0x45304339 // sqxtnb z25.h, z25.s\n"
- ".inst 0x4530462d // sqxtnt z13.h, z17.s\n"
- ".inst 0x4530455a // sqxtnt z26.h, z10.s\n"
- ".inst 0x45304618 // sqxtnt z24.h, z16.s\n"
- ".inst 0x45304539 // sqxtnt z25.h, z9.s\n"
- "sqadd z13.h, z13.h, z14.h\n"
- "smax z13.h, p4/M, z13.h, z12.h\n"
- "smin z13.h, p4/M, z13.h, z11.h\n"
- "sqadd z26.h, z26.h, z14.h\n"
- "sqadd z24.h, z24.h, z14.h\n"
- "smax z26.h, p4/M, z26.h, z12.h\n"
- "smax z24.h, p4/M, z24.h, z12.h\n"
- "sqadd z25.h, z25.h, z14.h\n"
- "smax z25.h, p4/M, z25.h, z12.h\n"
- "smin z26.h, p4/M, z26.h, z11.h\n"
- "st1b { z13.h }, p0, [x15, x10]\n"
- "smin z24.h, p4/M, z24.h, z11.h\n"
- "smin z25.h, p4/M, z25.h, z11.h\n"
- "st1b { z26.h }, p0, [x14, x10]\n"
- "st1b { z24.h }, p0, [x13, x10]\n"
- "st1b { z25.h }, p0, [x12, x10]\n"
+ "sqadd z20.s, z20.s, z16.s\n"
+ ".inst 0x44884793 // smlalt z19.s, p4/M, z28.h, z8.h\n"
+ ".inst 0x4487439a // smlalb z26.s, p4/M, z28.h, z7.h\n"
+ ".inst 0x04aa7718 // sqrdmulh z24.s, z24.s, z10.s\n"
+ ".inst 0x44874797 // smlalt z23.s, p4/M, z28.h, z7.h\n"
+ ".inst 0x04b67673 // sqrdmulh z19.s, z19.s, z22.s\n"
+ ".inst 0x04aa775a // sqrdmulh z26.s, z26.s, z10.s\n"
+ "and z16.d, z24.d, z21.d\n"
+ "asr z16.s, z16.s, #0x1f\n"
+ "and z7.d, z19.d, z25.d\n"
+ "and z3.d, z26.d, z21.d\n"
+ "asr z7.s, z7.s, #0x1f\n"
+ ".inst 0x04b676f7 // sqrdmulh z23.s, z23.s, z22.s\n"
+ ".inst 0x448292ab // srshl z11.s, p4/M, z11.s, z21.s\n"
+ "asr z3.s, z3.s, #0x1f\n"
+ ".inst 0x44829331 // srshl z17.s, p4/M, z17.s, z25.s\n"
+ "sqadd z24.s, z24.s, z16.s\n"
+ ".inst 0x448292a9 // srshl z9.s, p4/M, z9.s, z21.s\n"
+ "add z11.s, z11.s, z15.s\n"
+ "add z17.s, z17.s, z15.s\n"
+ "sqadd z19.s, z19.s, z7.s\n"
+ "add z9.s, z9.s, z15.s\n"
+ "sqadd z26.s, z26.s, z3.s\n"
+ "and z16.d, z23.d, z25.d\n"
+ "asr z16.s, z16.s, #0x1f\n"
+ "smin z11.s, p4/M, z11.s, z14.s\n"
+ "smin z17.s, p4/M, z17.s, z14.s\n"
+ "smin z9.s, p4/M, z9.s, z14.s\n"
+ ".inst 0x44829334 // srshl z20.s, p4/M, z20.s, z25.s\n"
+ ".inst 0x448292b8 // srshl z24.s, p4/M, z24.s, z21.s\n"
+ "smax z11.s, p4/M, z11.s, z13.s\n"
+ "sqadd z23.s, z23.s, z16.s\n"
+ "add z20.s, z20.s, z15.s\n"
+ "add z24.s, z24.s, z15.s\n"
+ "smax z17.s, p4/M, z17.s, z13.s\n"
+ "smax z9.s, p4/M, z9.s, z13.s\n"
+ "smin z20.s, p4/M, z20.s, z14.s\n"
+ "smin z24.s, p4/M, z24.s, z14.s\n"
+ "trn1 z11.h, z11.h, z17.h\n"
+ "st1b { z11.h }, p0, [x10, x14]\n"
+ "smax z20.s, p4/M, z20.s, z13.s\n"
+ ".inst 0x44829333 // srshl z19.s, p4/M, z19.s, z25.s\n"
+ "smax z24.s, p4/M, z24.s, z13.s\n"
+ ".inst 0x448292ba // srshl z26.s, p4/M, z26.s, z21.s\n"
+ ".inst 0x44829337 // srshl z23.s, p4/M, z23.s, z25.s\n"
+ "trn1 z9.h, z9.h, z20.h\n"
+ "st1b { z9.h }, p0, [x9, x14]\n"
+ "add z19.s, z19.s, z15.s\n"
+ "add z26.s, z26.s, z15.s\n"
+ "add z23.s, z23.s, z15.s\n"
+ "smin z19.s, p4/M, z19.s, z14.s\n"
+ "smin z26.s, p4/M, z26.s, z14.s\n"
+ "smin z23.s, p4/M, z23.s, z14.s\n"
+ "smax z19.s, p4/M, z19.s, z13.s\n"
+ "smax z26.s, p4/M, z26.s, z13.s\n"
+ "smax z23.s, p4/M, z23.s, z13.s\n"
+ "trn1 z24.h, z24.h, z19.h\n"
+ "st1b { z24.h }, p0, [x28, x14]\n"
+ "trn1 z26.h, z26.h, z23.h\n"
+ "st1b { z26.h }, p0, [x27, x14]\n"
+ "inch x14\n"
+ "ldr x19, [%x[params], %[offsetof_Params_bias]]\n"
+ "ld1w { z17.s }, p2/Z, [x19]\n"
+ "ld1w { z16.s }, p1/Z, [x19, #1, MUL VL]\n"
+ "uzp1 z11.s, z17.s, z16.s\n"
+ "addvl x19, x19, #2\n"
+ "str x19, [%x[params], %[offsetof_Params_bias]]\n"
+ "uzp2 z17.s, z17.s, z16.s\n"
+ "mov z9.d, z11.d\n"
"ld1b { z0.h }, p4/Z, [x16]\n"
+ ".inst 0x45521800 // usublb z0.h, z0.b, z18.b\n"
+ "mov z20.d, z17.d\n"
"ld1b { z1.h }, p4/Z, [x16, #1, MUL VL]\n"
- "inch x10\n"
+ "mov z24.d, z11.d\n"
"ld1b { z2.h }, p4/Z, [x16, #2, MUL VL]\n"
+ ".inst 0x45521821 // usublb z1.h, z1.b, z18.b\n"
+ "mov z19.d, z17.d\n"
"ld1b { z3.h }, p4/Z, [x16, #3, MUL VL]\n"
- ".inst 0x454f1800 // usublb z0.h, z0.b, z15.b\n"
- ".inst 0x454f1821 // usublb z1.h, z1.b, z15.b\n"
+ "mov z26.d, z11.d\n"
"ld1b { z4.h }, p4/Z, [x16, #4, MUL VL]\n"
+ ".inst 0x45521842 // usublb z2.h, z2.b, z18.b\n"
+ "mov z23.d, z17.d\n"
"ld1b { z5.h }, p4/Z, [x16, #5, MUL VL]\n"
- ".inst 0x454f1842 // usublb z2.h, z2.b, z15.b\n"
- ".inst 0x454f1863 // usublb z3.h, z3.b, z15.b\n"
+ ".inst 0x45521863 // usublb z3.h, z3.b, z18.b\n"
"ld1b { z6.h }, p4/Z, [x16, #6, MUL VL]\n"
"ld1b { z7.h }, p4/Z, [x16, #7, MUL VL]\n"
+ ".inst 0x45521884 // usublb z4.h, z4.b, z18.b\n"
"inch x16, ALL, MUL #8\n"
- ".inst 0x454f1884 // usublb z4.h, z4.b, z15.b\n"
- "ld1w { z17.s }, p2/Z, [x26]\n"
- "ld1w { z16.s }, p1/Z, [x26, #1, MUL VL]\n"
- "uzp1 z13.s, z17.s, z16.s\n"
- "uzp2 z17.s, z17.s, z16.s\n"
"ld1b { z8.h }, p4/Z, [x16]\n"
- "ldp x24, x23, [x11, #0x0]\n"
- "addvl x26, x26, #2\n"
- "str x26, [%x[params], %[offsetof_Params_bias]]\n"
- "ldp x22, x21, [x11, #0x10]\n"
- "ldr x20, [x11, #0x20]\n"
- "mov z26.d, z13.d\n"
- "mov z10.d, z17.d\n"
- "ld1b { z31.h }, p3/Z, [x24, x8]\n"
- "ld1b { z30.h }, p3/Z, [x23, x8]\n"
- "mov z24.d, z13.d\n"
- "mov z16.d, z17.d\n"
- "ld1b { z29.h }, p3/Z, [x22, x8]\n"
- "ld1b { z28.h }, p3/Z, [x21, x8]\n"
- "mov z25.d, z13.d\n"
- "mov z9.d, z17.d\n"
- "ld1b { z27.h }, p3/Z, [x20, x8]\n"
- ".inst 0x454f18a5 // usublb z5.h, z5.b, z15.b\n"
- ".inst 0x454f18c6 // usublb z6.h, z6.b, z15.b\n"
- ".inst 0x454f18e7 // usublb z7.h, z7.b, z15.b\n"
- ".inst 0x454f1908 // usublb z8.h, z8.b, z15.b\n"
- ".inst 0x45571bff // usublb z31.h, z31.b, z23.b\n"
- ".inst 0x45571bde // usublb z30.h, z30.b, z23.b\n"
- ".inst 0x45571bbd // usublb z29.h, z29.b, z23.b\n"
- ".inst 0x45571b9c // usublb z28.h, z28.b, z23.b\n"
- ".inst 0x45571b7b // usublb z27.h, z27.b, z23.b\n"
+ "ldp x23, x22, [x12, #0x0]\n"
+ ".inst 0x455218a5 // usublb z5.h, z5.b, z18.b\n"
+ ".inst 0x455218c6 // usublb z6.h, z6.b, z18.b\n"
+ "ldp x21, x20, [x12, #0x10]\n"
+ ".inst 0x455218e7 // usublb z7.h, z7.b, z18.b\n"
+ ".inst 0x45521908 // usublb z8.h, z8.b, z18.b\n"
+ "ldr x19, [x12, #0x20]\n"
+ "ld1b { z31.h }, p3/Z, [x23, x15]\n"
+ ".inst 0x454c1bff // usublb z31.h, z31.b, z12.b\n"
+ "ld1b { z30.h }, p3/Z, [x22, x15]\n"
+ "ld1b { z29.h }, p3/Z, [x21, x15]\n"
+ ".inst 0x454c1bde // usublb z30.h, z30.b, z12.b\n"
+ "ld1b { z28.h }, p3/Z, [x20, x15]\n"
+ "ld1b { z27.h }, p3/Z, [x19, x15]\n"
+ ".inst 0x454c1bbd // usublb z29.h, z29.b, z12.b\n"
+ ".inst 0x454c1b9c // usublb z28.h, z28.b, z12.b\n"
+ ".inst 0x454c1b7b // usublb z27.h, z27.b, z12.b\n"
"b.any 1b\n"
:
: [offsetof_Params_bias] "I" (offsetof(Params, bias)), [offsetof_Params_inptrs] "I" (offsetof(Params, inptrs)), [offsetof_Params_n_channels] "I" (offsetof(Params, n_channels)), [offsetof_Params_outptrs] "I" (offsetof(Params, outptrs)), [offsetof_Params_requant] "I" (offsetof(Params, requant)), [offsetof_Params_requant_muls] "I" (offsetof(Params, requant_muls)), [offsetof_Params_requant_shifts] "I" (offsetof(Params, requant_shifts)), [offsetof_Params_weights] "I" (offsetof(Params, weights)), [offsetof_Requantize32_a_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [params] "r" (&params)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp
index 9f21401840..e1b2d257b0 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -100,348 +100,356 @@ void sve_u8q_nhwc_3x3_s2_output2x2_mla_depthfirst_impl(
requant_muls, requant_shifts, outptrs);
__asm__ __volatile__(
- "mov x7, #0x0\n"
- "ldr x25, [%x[params], %[offsetof_Params_requant]]\n"
+ "ldr x5, [%x[params], %[offsetof_Params_n_channels]]\n"
"ptrue p4.b\n"
- "ldr x24, [%x[params], %[offsetof_Params_outptrs]]\n"
- "mov x23, x7\n"
- "add x21, x25, %[offsetof_Requantize32_a_offset]\n"
- "ldr x8, [%x[params], %[offsetof_Params_n_channels]]\n"
- "ldr x17, [%x[params], %[offsetof_Params_weights]]\n"
- "add x20, x25, %[offsetof_Requantize32_b_offset]\n"
- "add x22, x25, %[offsetof_Requantize32_c_offset]\n"
- "ld1rb { z23.b }, p4/Z, [x21]\n"
+ "ldr x6, [%x[params], %[offsetof_Params_weights]]\n"
+ "mov x7, #0x0\n"
+ "ldr x22, [%x[params], %[offsetof_Params_requant]]\n"
+ "mov x8, #0x0\n"
+ "ldr x17, [%x[params], %[offsetof_Params_requant_muls]]\n"
+ "add x16, %x[params], %[offsetof_Params_inptrs]\n"
+ "ldr x15, [%x[params], %[offsetof_Params_requant_shifts]]\n"
+ "add x19, x22, %[offsetof_Requantize32_a_offset]\n"
+ "ldr x21, [%x[params], %[offsetof_Params_outptrs]]\n"
+ "add x20, x22, %[offsetof_Requantize32_b_offset]\n"
+ "ld1rb { z19.b }, p4/Z, [x19]\n"
+ "add x19, x22, %[offsetof_Requantize32_c_offset]\n"
"ld1rb { z12.b }, p4/Z, [x20]\n"
- "add x21, x25, %[offsetof_Requantize32_minval]\n"
- "add x20, x25, %[offsetof_Requantize32_maxval]\n"
- "ld1rh { z14.h }, p4/Z, [x22]\n"
- "ld1rh { z16.h }, p4/Z, [x21]\n"
- "ld1rh { z15.h }, p4/Z, [x20]\n"
- "ldp x16, x15, [x24, #0x0]\n"
- "incw x23\n"
- "whilelt p3.h, x7, x8\n"
- "ldp x14, x13, [x24, #0x10]\n"
- "whilelt p2.s, x7, x8\n"
- "whilelt p1.s, x23, x8\n"
- "ldr x12, [%x[params], %[offsetof_Params_bias]]\n"
- "ld1b { z0.h }, p4/Z, [x17]\n"
- "ld1b { z1.h }, p4/Z, [x17, #1, MUL VL]\n"
- "add x11, %x[params], %[offsetof_Params_inptrs]\n"
- "mov x10, #0x0\n"
- "ld1b { z2.h }, p4/Z, [x17, #2, MUL VL]\n"
- "ld1b { z3.h }, p4/Z, [x17, #3, MUL VL]\n"
+ "add x20, x22, %[offsetof_Requantize32_minval]\n"
+ "ld1rw { z14.s }, p4/Z, [x19]\n"
+ "add x19, x22, %[offsetof_Requantize32_maxval]\n"
+ "ld1rw { z20.s }, p4/Z, [x20]\n"
+ "whilelt p3.h, x7, x5\n"
+ "ld1rw { z15.s }, p4/Z, [x19]\n"
+ "whilelt p2.s, x7, x5\n"
+ "ldp x14, x13, [x21, #0x0]\n"
+ "mov x19, x7\n"
+ "incw x19\n"
+ "ldp x12, x11, [x21, #0x10]\n"
+ "whilelt p1.s, x19, x5\n"
+ "ldr x19, [%x[params], %[offsetof_Params_bias]]\n"
+ "ld1w { z18.s }, p2/Z, [x19]\n"
+ "ld1w { z16.s }, p1/Z, [x19, #1, MUL VL]\n"
+ "uzp1 z13.s, z18.s, z16.s\n"
+ "addvl x19, x19, #2\n"
+ "str x19, [%x[params], %[offsetof_Params_bias]]\n"
+ "uzp2 z16.s, z18.s, z16.s\n"
+ "mov z11.d, z13.d\n"
+ "ld1b { z0.h }, p4/Z, [x6]\n"
".inst 0x454c1800 // usublb z0.h, z0.b, z12.b\n"
+ "mov z9.d, z16.d\n"
+ "ld1b { z1.h }, p4/Z, [x6, #1, MUL VL]\n"
+ "mov z18.d, z13.d\n"
+ "ld1b { z2.h }, p4/Z, [x6, #2, MUL VL]\n"
".inst 0x454c1821 // usublb z1.h, z1.b, z12.b\n"
- "ld1b { z4.h }, p4/Z, [x17, #4, MUL VL]\n"
- "ld1b { z5.h }, p4/Z, [x17, #5, MUL VL]\n"
+ "mov z10.d, z16.d\n"
+ "ld1b { z3.h }, p4/Z, [x6, #3, MUL VL]\n"
+ "mov z22.d, z13.d\n"
+ "ld1b { z4.h }, p4/Z, [x6, #4, MUL VL]\n"
".inst 0x454c1842 // usublb z2.h, z2.b, z12.b\n"
+ "mov z23.d, z16.d\n"
+ "ld1b { z5.h }, p4/Z, [x6, #5, MUL VL]\n"
".inst 0x454c1863 // usublb z3.h, z3.b, z12.b\n"
- "ld1b { z6.h }, p4/Z, [x17, #6, MUL VL]\n"
- "ld1b { z7.h }, p4/Z, [x17, #7, MUL VL]\n"
- "inch x17, ALL, MUL #8\n"
+ "ld1b { z6.h }, p4/Z, [x6, #6, MUL VL]\n"
+ "ld1b { z7.h }, p4/Z, [x6, #7, MUL VL]\n"
".inst 0x454c1884 // usublb z4.h, z4.b, z12.b\n"
- "ld1w { z18.s }, p2/Z, [x12]\n"
- "ld1w { z8.s }, p1/Z, [x12, #1, MUL VL]\n"
- "uzp1 z13.s, z18.s, z8.s\n"
- "uzp2 z17.s, z18.s, z8.s\n"
- "ld1b { z8.h }, p4/Z, [x17]\n"
- "ldp x9, x28, [x11, #0x0]\n"
- "addvl x12, x12, #2\n"
- "mov z9.d, z13.d\n"
- "ldp x25, x24, [x11, #0x10]\n"
- "ldp x23, x22, [x11, #0x20]\n"
- "mov z10.d, z17.d\n"
- "mov z11.d, z13.d\n"
- "ldp x21, x20, [x11, #0x30]\n"
- "ld1b { z31.h }, p3/Z, [x9, x7]\n"
- "mov z22.d, z17.d\n"
- "mov z21.d, z13.d\n"
- "ld1b { z30.h }, p3/Z, [x28, x7]\n"
- "ld1b { z29.h }, p3/Z, [x25, x7]\n"
- "mov z18.d, z17.d\n"
+ "inch x6, ALL, MUL #8\n"
+ "ld1b { z8.h }, p4/Z, [x6]\n"
+ "ldp x26, x25, [x16, #0x0]\n"
".inst 0x454c18a5 // usublb z5.h, z5.b, z12.b\n"
- "ld1b { z28.h }, p3/Z, [x24, x7]\n"
- "ld1b { z27.h }, p3/Z, [x23, x7]\n"
".inst 0x454c18c6 // usublb z6.h, z6.b, z12.b\n"
+ "ldp x24, x23, [x16, #0x10]\n"
".inst 0x454c18e7 // usublb z7.h, z7.b, z12.b\n"
- "ld1b { z26.h }, p3/Z, [x22, x7]\n"
- "ld1b { z25.h }, p3/Z, [x21, x7]\n"
".inst 0x454c1908 // usublb z8.h, z8.b, z12.b\n"
- ".inst 0x45571bff // usublb z31.h, z31.b, z23.b\n"
- "ld1b { z24.h }, p3/Z, [x20, x7]\n"
- "ldr x27, [%x[params], %[offsetof_Params_requant_muls]]\n"
- ".inst 0x45571bde // usublb z30.h, z30.b, z23.b\n"
- ".inst 0x45571bbd // usublb z29.h, z29.b, z23.b\n"
- "ldr x26, [%x[params], %[offsetof_Params_requant_shifts]]\n"
- "str x12, [%x[params], %[offsetof_Params_bias]]\n"
- ".inst 0x45571b9c // usublb z28.h, z28.b, z23.b\n"
- ".inst 0x45571b7b // usublb z27.h, z27.b, z23.b\n"
- ".inst 0x45571b5a // usublb z26.h, z26.b, z23.b\n"
- ".inst 0x45571b39 // usublb z25.h, z25.b, z23.b\n"
- ".inst 0x45571b18 // usublb z24.h, z24.b, z23.b\n"
+ "ldp x22, x21, [x16, #0x20]\n"
+ "ldp x20, x19, [x16, #0x30]\n"
+ "ld1b { z31.h }, p3/Z, [x26, x7]\n"
+ ".inst 0x45531bff // usublb z31.h, z31.b, z19.b\n"
+ "ld1b { z30.h }, p3/Z, [x25, x7]\n"
+ "ld1b { z29.h }, p3/Z, [x24, x7]\n"
+ ".inst 0x45531bde // usublb z30.h, z30.b, z19.b\n"
+ "ld1b { z28.h }, p3/Z, [x23, x7]\n"
+ "ld1b { z27.h }, p3/Z, [x22, x7]\n"
+ ".inst 0x45531bbd // usublb z29.h, z29.b, z19.b\n"
+ "ld1b { z26.h }, p3/Z, [x21, x7]\n"
+ ".inst 0x45531b9c // usublb z28.h, z28.b, z19.b\n"
+ "ld1b { z25.h }, p3/Z, [x20, x7]\n"
+ "ld1b { z24.h }, p3/Z, [x19, x7]\n"
+ ".inst 0x45531b7b // usublb z27.h, z27.b, z19.b\n"
+ ".inst 0x45531b5a // usublb z26.h, z26.b, z19.b\n"
+ ".inst 0x45531b39 // usublb z25.h, z25.b, z19.b\n"
+ ".inst 0x45531b18 // usublb z24.h, z24.b, z19.b\n"
"1:" // Loop
".inst 0x448843ed // smlalb z13.s, p4/M, z31.h, z8.h\n"
- ".inst 0x448847f1 // smlalt z17.s, p4/M, z31.h, z8.h\n"
- "ldr x25, [x11, #0x40]\n"
- "ldr x24, [x11, #0x48]\n"
- ".inst 0x448643e9 // smlalb z9.s, p4/M, z31.h, z6.h\n"
- ".inst 0x448647ea // smlalt z10.s, p4/M, z31.h, z6.h\n"
- "ldr x22, [x11, #0x50]\n"
- "ldr x20, [x11, #0x58]\n"
+ "ldr x23, [x16, #0x40]\n"
+ "whilelt p0.h, x8, x5\n"
+ ".inst 0x448847f0 // smlalt z16.s, p4/M, z31.h, z8.h\n"
+ "ldr x22, [x16, #0x48]\n"
+ "inch x6\n"
+ ".inst 0x448643eb // smlalb z11.s, p4/M, z31.h, z6.h\n"
+ "ldr x21, [x16, #0x50]\n"
+ ".inst 0x448647e9 // smlalt z9.s, p4/M, z31.h, z6.h\n"
+ "ldr x20, [x16, #0x58]\n"
+ ".inst 0x448243f2 // smlalb z18.s, p4/M, z31.h, z2.h\n"
+ "ldr x19, [x16, #0x60]\n"
+ ".inst 0x448247ea // smlalt z10.s, p4/M, z31.h, z2.h\n"
+ "ldr x10, [x16, #0x68]\n"
+ ".inst 0x448043f6 // smlalb z22.s, p4/M, z31.h, z0.h\n"
+ "ldr x9, [x16, #0x70]\n"
+ ".inst 0x448047f7 // smlalt z23.s, p4/M, z31.h, z0.h\n"
+ "ldr x28, [x16, #0x78]\n"
".inst 0x448043cd // smlalb z13.s, p4/M, z30.h, z0.h\n"
- ".inst 0x448047d1 // smlalt z17.s, p4/M, z30.h, z0.h\n"
- "ldr x23, [x11, #0x78]\n"
- "ldr x21, [x11, #0x60]\n"
- ".inst 0x44814389 // smlalb z9.s, p4/M, z28.h, z1.h\n"
- ".inst 0x4481478a // smlalt z10.s, p4/M, z28.h, z1.h\n"
- "ld1b { z28.h }, p3/Z, [x24, x7]\n"
- ".inst 0x45571b9c // usublb z28.h, z28.b, z23.b\n"
+ "ldr x27, [x16, #0x80]\n"
+ ".inst 0x448047d0 // smlalt z16.s, p4/M, z30.h, z0.h\n"
+ "ldr x26, [x16, #0x88]\n"
+ ".inst 0x4481438b // smlalb z11.s, p4/M, z28.h, z1.h\n"
+ "ldr x25, [x16, #0x90]\n"
+ ".inst 0x44814789 // smlalt z9.s, p4/M, z28.h, z1.h\n"
+ "ld1b { z28.h }, p3/Z, [x22, x7]\n"
+ ".inst 0x45531b9c // usublb z28.h, z28.b, z19.b\n"
".inst 0x448143ad // smlalb z13.s, p4/M, z29.h, z1.h\n"
- ".inst 0x448147b1 // smlalt z17.s, p4/M, z29.h, z1.h\n"
- "ld1b { z29.h }, p3/Z, [x25, x7]\n"
- ".inst 0x45571bbd // usublb z29.h, z29.b, z23.b\n"
- ".inst 0x44824369 // smlalb z9.s, p4/M, z27.h, z2.h\n"
- ".inst 0x4482476a // smlalt z10.s, p4/M, z27.h, z2.h\n"
- "ld1b { z27.h }, p3/Z, [x22, x7]\n"
- ".inst 0x45571b7b // usublb z27.h, z27.b, z23.b\n"
+ "ldr x24, [x16, #0x98]\n"
+ ".inst 0x448147b0 // smlalt z16.s, p4/M, z29.h, z1.h\n"
+ "ld1b { z29.h }, p3/Z, [x23, x7]\n"
+ ".inst 0x45531bbd // usublb z29.h, z29.b, z19.b\n"
+ ".inst 0x4482436b // smlalb z11.s, p4/M, z27.h, z2.h\n"
+ "ldr x23, [x16, #0xa0]\n"
+ ".inst 0x44824769 // smlalt z9.s, p4/M, z27.h, z2.h\n"
+ "ld1b { z27.h }, p3/Z, [x21, x7]\n"
+ ".inst 0x45531b7b // usublb z27.h, z27.b, z19.b\n"
".inst 0x4483434d // smlalb z13.s, p4/M, z26.h, z3.h\n"
- ".inst 0x44834751 // smlalt z17.s, p4/M, z26.h, z3.h\n"
+ "ldr x22, [x16, #0xa8]\n"
+ ".inst 0x44834750 // smlalt z16.s, p4/M, z26.h, z3.h\n"
"ld1b { z26.h }, p3/Z, [x20, x7]\n"
- ".inst 0x45571b5a // usublb z26.h, z26.b, z23.b\n"
- ".inst 0x44804309 // smlalb z9.s, p4/M, z24.h, z0.h\n"
- ".inst 0x4480470a // smlalt z10.s, p4/M, z24.h, z0.h\n"
- "ldr x22, [x11, #0x80]\n"
- "ldr x20, [x11, #0x68]\n"
+ ".inst 0x45531b5a // usublb z26.h, z26.b, z19.b\n"
".inst 0x4484432d // smlalb z13.s, p4/M, z25.h, z4.h\n"
- ".inst 0x44844731 // smlalt z17.s, p4/M, z25.h, z4.h\n"
- "ld1b { z25.h }, p3/Z, [x21, x7]\n"
- ".inst 0x45571b39 // usublb z25.h, z25.b, z23.b\n"
- ".inst 0x448443a9 // smlalb z9.s, p4/M, z29.h, z4.h\n"
- ".inst 0x448447aa // smlalt z10.s, p4/M, z29.h, z4.h\n"
- "ldr x21, [x11, #0x88]\n"
- "ld1b { z29.h }, p3/Z, [x20, x7]\n"
+ "ldr x21, [x16, #0xb0]\n"
+ ".inst 0x44844730 // smlalt z16.s, p4/M, z25.h, z4.h\n"
+ "ld1b { z25.h }, p3/Z, [x19, x7]\n"
+ ".inst 0x45531b39 // usublb z25.h, z25.b, z19.b\n"
".inst 0x4482430d // smlalb z13.s, p4/M, z24.h, z2.h\n"
- ".inst 0x44824711 // smlalt z17.s, p4/M, z24.h, z2.h\n"
- "ldr x20, [x11, #0x70]\n"
- ".inst 0x45571bbd // usublb z29.h, z29.b, z23.b\n"
- ".inst 0x44854389 // smlalb z9.s, p4/M, z28.h, z5.h\n"
- ".inst 0x4485478a // smlalt z10.s, p4/M, z28.h, z5.h\n"
- "ld1b { z28.h }, p3/Z, [x22, x7]\n"
- ".inst 0x45571b9c // usublb z28.h, z28.b, z23.b\n"
- ".inst 0x448243eb // smlalb z11.s, p4/M, z31.h, z2.h\n"
- ".inst 0x448247f6 // smlalt z22.s, p4/M, z31.h, z2.h\n"
- "ldr x25, [x11, #0x98]\n"
- "ld1b { z24.h }, p3/Z, [x20, x7]\n"
+ "ldr x20, [x16, #0xb8]\n"
+ ".inst 0x44824710 // smlalt z16.s, p4/M, z24.h, z2.h\n"
+ "ldr x19, [x16, #0xc0]\n"
+ ".inst 0x4480430b // smlalb z11.s, p4/M, z24.h, z0.h\n"
+ "ld1w { z21.s }, p2/Z, [x17]\n"
+ ".inst 0x44804709 // smlalt z9.s, p4/M, z24.h, z0.h\n"
+ "ld1b { z24.h }, p3/Z, [x9, x7]\n"
+ ".inst 0x45531b18 // usublb z24.h, z24.b, z19.b\n"
+ ".inst 0x448443ab // smlalb z11.s, p4/M, z29.h, z4.h\n"
+ "ld1w { z17.s }, p1/Z, [x17, #1, MUL VL]\n"
+ ".inst 0x448447a9 // smlalt z9.s, p4/M, z29.h, z4.h\n"
+ "ld1b { z29.h }, p3/Z, [x10, x7]\n"
+ "addvl x17, x17, #2\n"
".inst 0x4485436d // smlalb z13.s, p4/M, z27.h, z5.h\n"
- ".inst 0x44854771 // smlalt z17.s, p4/M, z27.h, z5.h\n"
- ".inst 0x45571b18 // usublb z24.h, z24.b, z23.b\n"
- "ldr x24, [x11, #0x90]\n"
- ".inst 0x44834369 // smlalb z9.s, p4/M, z27.h, z3.h\n"
- ".inst 0x4483476a // smlalt z10.s, p4/M, z27.h, z3.h\n"
- "ld1b { z27.h }, p3/Z, [x23, x7]\n"
- ".inst 0x45571b7b // usublb z27.h, z27.b, z23.b\n"
- ".inst 0x448043f5 // smlalb z21.s, p4/M, z31.h, z0.h\n"
- ".inst 0x4483434b // smlalb z11.s, p4/M, z26.h, z3.h\n"
- "ldr x23, [x11, #0xa8]\n"
- "ldr x20, [x11, #0xa0]\n"
- ".inst 0x44834756 // smlalt z22.s, p4/M, z26.h, z3.h\n"
- ".inst 0x448047f2 // smlalt z18.s, p4/M, z31.h, z0.h\n"
- "ld1b { z26.h }, p3/Z, [x21, x7]\n"
- ".inst 0x45571b5a // usublb z26.h, z26.b, z23.b\n"
- ".inst 0x44844375 // smlalb z21.s, p4/M, z27.h, z4.h\n"
- ".inst 0x4480432b // smlalb z11.s, p4/M, z25.h, z0.h\n"
- "ldr x22, [x11, #0xb0]\n"
- "ldr x21, [x11, #0xb8]\n"
- ".inst 0x44804736 // smlalt z22.s, p4/M, z25.h, z0.h\n"
- ".inst 0x44844772 // smlalt z18.s, p4/M, z27.h, z4.h\n"
- "ld1b { z27.h }, p3/Z, [x20, x7]\n"
- ".inst 0x45571b7b // usublb z27.h, z27.b, z23.b\n"
- ".inst 0x44814395 // smlalb z21.s, p4/M, z28.h, z1.h\n"
+ ".inst 0x45531bbd // usublb z29.h, z29.b, z19.b\n"
+ "uzp1 z30.s, z21.s, z17.s\n"
+ "uzp2 z31.s, z21.s, z17.s\n"
+ "ld1w { z21.s }, p2/Z, [x15]\n"
+ ".inst 0x4485438b // smlalb z11.s, p4/M, z28.h, z5.h\n"
+ "ld1w { z17.s }, p1/Z, [x15, #1, MUL VL]\n"
+ "addvl x15, x15, #2\n"
+ ".inst 0x44854789 // smlalt z9.s, p4/M, z28.h, z5.h\n"
+ "ld1b { z28.h }, p3/Z, [x27, x7]\n"
+ ".inst 0x45531b9c // usublb z28.h, z28.b, z19.b\n"
+ ".inst 0x44854770 // smlalt z16.s, p4/M, z27.h, z5.h\n"
+ ".inst 0x4483436b // smlalb z11.s, p4/M, z27.h, z3.h\n"
+ ".inst 0x44834769 // smlalt z9.s, p4/M, z27.h, z3.h\n"
+ "ld1b { z27.h }, p3/Z, [x28, x7]\n"
+ ".inst 0x45531b7b // usublb z27.h, z27.b, z19.b\n"
+ ".inst 0x44834352 // smlalb z18.s, p4/M, z26.h, z3.h\n"
+ ".inst 0x4483474a // smlalt z10.s, p4/M, z26.h, z3.h\n"
+ "ld1b { z26.h }, p3/Z, [x26, x7]\n"
+ ".inst 0x45531b5a // usublb z26.h, z26.b, z19.b\n"
".inst 0x4486432d // smlalb z13.s, p4/M, z25.h, z6.h\n"
- "ldr x20, [x11, #0xc0]\n"
- "ld1w { z31.s }, p2/Z, [x27]\n"
- ".inst 0x44864731 // smlalt z17.s, p4/M, z25.h, z6.h\n"
- ".inst 0x448443ab // smlalb z11.s, p4/M, z29.h, z4.h\n"
- "ld1b { z25.h }, p3/Z, [x24, x7]\n"
- ".inst 0x45571b39 // usublb z25.h, z25.b, z23.b\n"
- ".inst 0x448447b6 // smlalt z22.s, p4/M, z29.h, z4.h\n"
- "ld1b { z29.h }, p3/Z, [x25, x7]\n"
- ".inst 0x44814792 // smlalt z18.s, p4/M, z28.h, z1.h\n"
- ".inst 0x45571bbd // usublb z29.h, z29.b, z23.b\n"
- ".inst 0x44854355 // smlalb z21.s, p4/M, z26.h, z5.h\n"
+ ".inst 0x44864730 // smlalt z16.s, p4/M, z25.h, z6.h\n"
+ ".inst 0x44804332 // smlalb z18.s, p4/M, z25.h, z0.h\n"
+ ".inst 0x4480472a // smlalt z10.s, p4/M, z25.h, z0.h\n"
+ "ld1b { z25.h }, p3/Z, [x25, x7]\n"
+ ".inst 0x45531b39 // usublb z25.h, z25.b, z19.b\n"
+ "uzp1 z0.s, z21.s, z17.s\n"
+ "uzp2 z21.s, z21.s, z17.s\n"
+ ".inst 0x448443b2 // smlalb z18.s, p4/M, z29.h, z4.h\n"
+ ".inst 0x448447aa // smlalt z10.s, p4/M, z29.h, z4.h\n"
+ "ld1b { z29.h }, p3/Z, [x24, x7]\n"
+ ".inst 0x45531bbd // usublb z29.h, z29.b, z19.b\n"
".inst 0x4487430d // smlalb z13.s, p4/M, z24.h, z7.h\n"
- "ld1w { z20.s }, p1/Z, [x27, #1, MUL VL]\n"
- "uzp1 z19.s, z31.s, z20.s\n"
- ".inst 0x44874711 // smlalt z17.s, p4/M, z24.h, z7.h\n"
- ".inst 0x4481430b // smlalb z11.s, p4/M, z24.h, z1.h\n"
- "uzp2 z30.s, z31.s, z20.s\n"
- "ld1w { z31.s }, p2/Z, [x26]\n"
- ".inst 0x44814716 // smlalt z22.s, p4/M, z24.h, z1.h\n"
- "ld1b { z24.h }, p3/Z, [x23, x7]\n"
- ".inst 0x44854752 // smlalt z18.s, p4/M, z26.h, z5.h\n"
- ".inst 0x45571b18 // usublb z24.h, z24.b, z23.b\n"
- ".inst 0x448243b5 // smlalb z21.s, p4/M, z29.h, z2.h\n"
- "ld1b { z26.h }, p3/Z, [x22, x7]\n"
- ".inst 0x448247b2 // smlalt z18.s, p4/M, z29.h, z2.h\n"
- ".inst 0x45571b5a // usublb z26.h, z26.b, z23.b\n"
- ".inst 0x4486432b // smlalb z11.s, p4/M, z25.h, z6.h\n"
- ".inst 0x44834315 // smlalb z21.s, p4/M, z24.h, z3.h\n"
- "ld1w { z20.s }, p1/Z, [x26, #1, MUL VL]\n"
- "uzp1 z1.s, z31.s, z20.s\n"
- ".inst 0x44874389 // smlalb z9.s, p4/M, z28.h, z7.h\n"
- ".inst 0x4487478a // smlalt z10.s, p4/M, z28.h, z7.h\n"
- ".inst 0x04b375ad // sqrdmulh z13.s, z13.s, z19.s\n"
- "whilelt p0.h, x10, x8\n"
- ".inst 0x44864736 // smlalt z22.s, p4/M, z25.h, z6.h\n"
- "ld1b { z25.h }, p3/Z, [x21, x7]\n"
- ".inst 0x44834712 // smlalt z18.s, p4/M, z24.h, z3.h\n"
- ".inst 0x45571b39 // usublb z25.h, z25.b, z23.b\n"
- ".inst 0x4487436b // smlalb z11.s, p4/M, z27.h, z7.h\n"
- ".inst 0x44874355 // smlalb z21.s, p4/M, z26.h, z7.h\n"
- "uzp2 z31.s, z31.s, z20.s\n"
- "inch x17\n"
- ".inst 0x448843a9 // smlalb z9.s, p4/M, z29.h, z8.h\n"
- ".inst 0x448847aa // smlalt z10.s, p4/M, z29.h, z8.h\n"
- "ld1b { z29.h }, p3/Z, [x20, x7]\n"
- ".inst 0x45571bbd // usublb z29.h, z29.b, z23.b\n"
- ".inst 0x44874776 // smlalt z22.s, p4/M, z27.h, z7.h\n"
- ".inst 0x44874752 // smlalt z18.s, p4/M, z26.h, z7.h\n"
- "and z0.d, z13.d, z1.d\n"
+ ".inst 0x44874710 // smlalt z16.s, p4/M, z24.h, z7.h\n"
+ ".inst 0x44814312 // smlalb z18.s, p4/M, z24.h, z1.h\n"
+ ".inst 0x4481470a // smlalt z10.s, p4/M, z24.h, z1.h\n"
+ "ld1b { z24.h }, p3/Z, [x22, x7]\n"
+ ".inst 0x45531b18 // usublb z24.h, z24.b, z19.b\n"
+ ".inst 0x04be75ad // sqrdmulh z13.s, z13.s, z30.s\n"
+ ".inst 0x04bf7610 // sqrdmulh z16.s, z16.s, z31.s\n"
+ ".inst 0x44844376 // smlalb z22.s, p4/M, z27.h, z4.h\n"
+ ".inst 0x44844777 // smlalt z23.s, p4/M, z27.h, z4.h\n"
+ "ld1b { z27.h }, p3/Z, [x23, x7]\n"
+ ".inst 0x45531b7b // usublb z27.h, z27.b, z19.b\n"
+ "and z4.d, z13.d, z0.d\n"
+ "and z17.d, z16.d, z21.d\n"
+ "asr z4.s, z4.s, #0x1f\n"
+ ".inst 0x4487438b // smlalb z11.s, p4/M, z28.h, z7.h\n"
+ ".inst 0x44874789 // smlalt z9.s, p4/M, z28.h, z7.h\n"
+ "asr z17.s, z17.s, #0x1f\n"
+ ".inst 0x44814396 // smlalb z22.s, p4/M, z28.h, z1.h\n"
+ ".inst 0x44814797 // smlalt z23.s, p4/M, z28.h, z1.h\n"
+ ".inst 0x44864332 // smlalb z18.s, p4/M, z25.h, z6.h\n"
+ ".inst 0x4486472a // smlalt z10.s, p4/M, z25.h, z6.h\n"
+ "ld1b { z25.h }, p3/Z, [x20, x7]\n"
+ ".inst 0x45531b39 // usublb z25.h, z25.b, z19.b\n"
+ "sqadd z13.s, z13.s, z4.s\n"
+ "sqadd z16.s, z16.s, z17.s\n"
+ ".inst 0x44854356 // smlalb z22.s, p4/M, z26.h, z5.h\n"
+ ".inst 0x44854757 // smlalt z23.s, p4/M, z26.h, z5.h\n"
+ "ld1b { z26.h }, p3/Z, [x21, x7]\n"
+ ".inst 0x45531b5a // usublb z26.h, z26.b, z19.b\n"
+ ".inst 0x448843ab // smlalb z11.s, p4/M, z29.h, z8.h\n"
+ ".inst 0x448847a9 // smlalt z9.s, p4/M, z29.h, z8.h\n"
+ ".inst 0x448243b6 // smlalb z22.s, p4/M, z29.h, z2.h\n"
+ ".inst 0x448247b7 // smlalt z23.s, p4/M, z29.h, z2.h\n"
+ "ld1b { z29.h }, p3/Z, [x19, x7]\n"
"inch x7\n"
- ".inst 0x4485430b // smlalb z11.s, p4/M, z24.h, z5.h\n"
- ".inst 0x44864335 // smlalb z21.s, p4/M, z25.h, z6.h\n"
- ".inst 0x04be7631 // sqrdmulh z17.s, z17.s, z30.s\n"
- "mov x20, x7\n"
- ".inst 0x44854716 // smlalt z22.s, p4/M, z24.h, z5.h\n"
- ".inst 0x44864732 // smlalt z18.s, p4/M, z25.h, z6.h\n"
- "asr z0.s, z0.s, #0x1f\n"
- "incw x20\n"
- ".inst 0x4488432b // smlalb z11.s, p4/M, z25.h, z8.h\n"
- ".inst 0x448843b5 // smlalb z21.s, p4/M, z29.h, z8.h\n"
- "and z20.d, z17.d, z31.d\n"
- "whilelt p2.s, x7, x8\n"
- ".inst 0x44884736 // smlalt z22.s, p4/M, z25.h, z8.h\n"
- ".inst 0x448847b2 // smlalt z18.s, p4/M, z29.h, z8.h\n"
- ".inst 0x04b37529 // sqrdmulh z9.s, z9.s, z19.s\n"
- "whilelt p1.s, x20, x8\n"
- ".inst 0x04b3756b // sqrdmulh z11.s, z11.s, z19.s\n"
- ".inst 0x04b376b5 // sqrdmulh z21.s, z21.s, z19.s\n"
- "ldr x12, [%x[params], %[offsetof_Params_bias]]\n"
- "whilelt p3.h, x7, x8\n"
- "sqadd z13.s, z13.s, z0.s\n"
- "asr z20.s, z20.s, #0x1f\n"
- ".inst 0x4482902d // srshl z13.s, p4/M, z13.s, z1.s\n"
- "addvl x27, x27, #2\n"
- "and z19.d, z9.d, z1.d\n"
- ".inst 0x04be754a // sqrdmulh z10.s, z10.s, z30.s\n"
- "addvl x26, x26, #2\n"
- "and z2.d, z11.d, z1.d\n"
- ".inst 0x04be76d6 // sqrdmulh z22.s, z22.s, z30.s\n"
- "and z0.d, z21.d, z1.d\n"
+ ".inst 0x04be756b // sqrdmulh z11.s, z11.s, z30.s\n"
+ "whilelt p2.s, x7, x5\n"
+ ".inst 0x04bf7529 // sqrdmulh z9.s, z9.s, z31.s\n"
+ "mov x19, x7\n"
+ ".inst 0x44874372 // smlalb z18.s, p4/M, z27.h, z7.h\n"
+ ".inst 0x45531bbd // usublb z29.h, z29.b, z19.b\n"
+ ".inst 0x4487476a // smlalt z10.s, p4/M, z27.h, z7.h\n"
+ "incw x19\n"
+ ".inst 0x44834316 // smlalb z22.s, p4/M, z24.h, z3.h\n"
+ "whilelt p1.s, x19, x5\n"
+ "and z1.d, z11.d, z0.d\n"
+ "whilelt p3.h, x7, x5\n"
+ "and z17.d, z9.d, z21.d\n"
+ "asr z1.s, z1.s, #0x1f\n"
+ ".inst 0x44854312 // smlalb z18.s, p4/M, z24.h, z5.h\n"
+ ".inst 0x4485470a // smlalt z10.s, p4/M, z24.h, z5.h\n"
+ "asr z17.s, z17.s, #0x1f\n"
+ ".inst 0x44834717 // smlalt z23.s, p4/M, z24.h, z3.h\n"
+ ".inst 0x44874356 // smlalb z22.s, p4/M, z26.h, z7.h\n"
+ ".inst 0x4482900d // srshl z13.s, p4/M, z13.s, z0.s\n"
+ ".inst 0x44884332 // smlalb z18.s, p4/M, z25.h, z8.h\n"
+ "sqadd z11.s, z11.s, z1.s\n"
+ "sqadd z9.s, z9.s, z17.s\n"
+ "add z13.s, z13.s, z14.s\n"
".inst 0x04be7652 // sqrdmulh z18.s, z18.s, z30.s\n"
- "sqadd z17.s, z17.s, z20.s\n"
- "asr z19.s, z19.s, #0x1f\n"
- ".inst 0x448293f1 // srshl z17.s, p4/M, z17.s, z31.s\n"
- "and z3.d, z10.d, z31.d\n"
+ ".inst 0x44874757 // smlalt z23.s, p4/M, z26.h, z7.h\n"
+ ".inst 0x4488472a // smlalt z10.s, p4/M, z25.h, z8.h\n"
+ ".inst 0x44864336 // smlalb z22.s, p4/M, z25.h, z6.h\n"
+ "and z17.d, z18.d, z0.d\n"
+ "asr z17.s, z17.s, #0x1f\n"
+ ".inst 0x04bf754a // sqrdmulh z10.s, z10.s, z31.s\n"
+ ".inst 0x44864737 // smlalt z23.s, p4/M, z25.h, z6.h\n"
+ ".inst 0x448843b6 // smlalb z22.s, p4/M, z29.h, z8.h\n"
+ "smin z13.s, p4/M, z13.s, z15.s\n"
+ ".inst 0x448292b0 // srshl z16.s, p4/M, z16.s, z21.s\n"
+ "and z1.d, z10.d, z21.d\n"
+ "asr z1.s, z1.s, #0x1f\n"
+ "add z16.s, z16.s, z14.s\n"
+ "sqadd z18.s, z18.s, z17.s\n"
+ ".inst 0x04be76d6 // sqrdmulh z22.s, z22.s, z30.s\n"
+ ".inst 0x448847b7 // smlalt z23.s, p4/M, z29.h, z8.h\n"
+ "smax z13.s, p4/M, z13.s, z20.s\n"
+ "smin z16.s, p4/M, z16.s, z15.s\n"
+ "sqadd z10.s, z10.s, z1.s\n"
+ "and z2.d, z22.d, z0.d\n"
"asr z2.s, z2.s, #0x1f\n"
- "and z26.d, z22.d, z31.d\n"
- "asr z0.s, z0.s, #0x1f\n"
- "and z20.d, z18.d, z31.d\n"
- "sqadd z9.s, z9.s, z19.s\n"
- ".inst 0x44829029 // srshl z9.s, p4/M, z9.s, z1.s\n"
- "asr z3.s, z3.s, #0x1f\n"
- "sqadd z11.s, z11.s, z2.s\n"
- ".inst 0x4482902b // srshl z11.s, p4/M, z11.s, z1.s\n"
- "asr z26.s, z26.s, #0x1f\n"
- "sqadd z21.s, z21.s, z0.s\n"
- ".inst 0x44829035 // srshl z21.s, p4/M, z21.s, z1.s\n"
- "asr z20.s, z20.s, #0x1f\n"
- "sqadd z10.s, z10.s, z3.s\n"
- ".inst 0x448293ea // srshl z10.s, p4/M, z10.s, z31.s\n"
- "sqadd z22.s, z22.s, z26.s\n"
- "sqadd z18.s, z18.s, z20.s\n"
- ".inst 0x448293f6 // srshl z22.s, p4/M, z22.s, z31.s\n"
- ".inst 0x448293f2 // srshl z18.s, p4/M, z18.s, z31.s\n"
- ".inst 0x453041ad // sqxtnb z13.h, z13.s\n"
- ".inst 0x45304129 // sqxtnb z9.h, z9.s\n"
- ".inst 0x4530416b // sqxtnb z11.h, z11.s\n"
- ".inst 0x453042b5 // sqxtnb z21.h, z21.s\n"
- ".inst 0x4530462d // sqxtnt z13.h, z17.s\n"
- ".inst 0x45304549 // sqxtnt z9.h, z10.s\n"
- ".inst 0x453046cb // sqxtnt z11.h, z22.s\n"
- ".inst 0x45304655 // sqxtnt z21.h, z18.s\n"
- "sqadd z13.h, z13.h, z14.h\n"
- "sqadd z9.h, z9.h, z14.h\n"
- "smax z13.h, p4/M, z13.h, z16.h\n"
- "smax z9.h, p4/M, z9.h, z16.h\n"
- "sqadd z11.h, z11.h, z14.h\n"
- "sqadd z21.h, z21.h, z14.h\n"
- "smax z11.h, p4/M, z11.h, z16.h\n"
- "smax z21.h, p4/M, z21.h, z16.h\n"
- "smin z13.h, p4/M, z13.h, z15.h\n"
- "smin z9.h, p4/M, z9.h, z15.h\n"
- "st1b { z13.h }, p0, [x16, x10]\n"
- "smin z11.h, p4/M, z11.h, z15.h\n"
- "smin z21.h, p4/M, z21.h, z15.h\n"
- "st1b { z9.h }, p0, [x15, x10]\n"
- "st1b { z11.h }, p0, [x14, x10]\n"
- "st1b { z21.h }, p0, [x13, x10]\n"
- "ld1b { z0.h }, p4/Z, [x17]\n"
- "ld1b { z1.h }, p4/Z, [x17, #1, MUL VL]\n"
- "inch x10\n"
- "ld1b { z2.h }, p4/Z, [x17, #2, MUL VL]\n"
- "ld1b { z3.h }, p4/Z, [x17, #3, MUL VL]\n"
+ ".inst 0x04bf76f7 // sqrdmulh z23.s, z23.s, z31.s\n"
+ "smax z16.s, p4/M, z16.s, z20.s\n"
+ ".inst 0x4482900b // srshl z11.s, p4/M, z11.s, z0.s\n"
+ ".inst 0x448292a9 // srshl z9.s, p4/M, z9.s, z21.s\n"
+ ".inst 0x44829012 // srshl z18.s, p4/M, z18.s, z0.s\n"
+ "trn1 z13.h, z13.h, z16.h\n"
+ "st1b { z13.h }, p0, [x14, x8]\n"
+ "add z11.s, z11.s, z14.s\n"
+ "add z9.s, z9.s, z14.s\n"
+ "add z18.s, z18.s, z14.s\n"
+ "sqadd z22.s, z22.s, z2.s\n"
+ "and z16.d, z23.d, z21.d\n"
+ "asr z16.s, z16.s, #0x1f\n"
+ "smin z11.s, p4/M, z11.s, z15.s\n"
+ "smin z9.s, p4/M, z9.s, z15.s\n"
+ "smin z18.s, p4/M, z18.s, z15.s\n"
+ ".inst 0x448292aa // srshl z10.s, p4/M, z10.s, z21.s\n"
+ ".inst 0x44829016 // srshl z22.s, p4/M, z22.s, z0.s\n"
+ "smax z11.s, p4/M, z11.s, z20.s\n"
+ "sqadd z23.s, z23.s, z16.s\n"
+ "add z10.s, z10.s, z14.s\n"
+ "add z22.s, z22.s, z14.s\n"
+ "smax z9.s, p4/M, z9.s, z20.s\n"
+ "smax z18.s, p4/M, z18.s, z20.s\n"
+ "smin z10.s, p4/M, z10.s, z15.s\n"
+ "smin z22.s, p4/M, z22.s, z15.s\n"
+ "trn1 z11.h, z11.h, z9.h\n"
+ "st1b { z11.h }, p0, [x13, x8]\n"
+ "smax z10.s, p4/M, z10.s, z20.s\n"
+ ".inst 0x448292b7 // srshl z23.s, p4/M, z23.s, z21.s\n"
+ "smax z22.s, p4/M, z22.s, z20.s\n"
+ "trn1 z18.h, z18.h, z10.h\n"
+ "st1b { z18.h }, p0, [x12, x8]\n"
+ "add z23.s, z23.s, z14.s\n"
+ "smin z23.s, p4/M, z23.s, z15.s\n"
+ "smax z23.s, p4/M, z23.s, z20.s\n"
+ "trn1 z22.h, z22.h, z23.h\n"
+ "st1b { z22.h }, p0, [x11, x8]\n"
+ "inch x8\n"
+ "ldr x19, [%x[params], %[offsetof_Params_bias]]\n"
+ "ld1w { z18.s }, p2/Z, [x19]\n"
+ "ld1w { z16.s }, p1/Z, [x19, #1, MUL VL]\n"
+ "uzp1 z13.s, z18.s, z16.s\n"
+ "addvl x19, x19, #2\n"
+ "str x19, [%x[params], %[offsetof_Params_bias]]\n"
+ "uzp2 z16.s, z18.s, z16.s\n"
+ "mov z11.d, z13.d\n"
+ "ld1b { z0.h }, p4/Z, [x6]\n"
".inst 0x454c1800 // usublb z0.h, z0.b, z12.b\n"
+ "mov z9.d, z16.d\n"
+ "ld1b { z1.h }, p4/Z, [x6, #1, MUL VL]\n"
+ "mov z18.d, z13.d\n"
+ "ld1b { z2.h }, p4/Z, [x6, #2, MUL VL]\n"
".inst 0x454c1821 // usublb z1.h, z1.b, z12.b\n"
- "ld1b { z4.h }, p4/Z, [x17, #4, MUL VL]\n"
- "ld1b { z5.h }, p4/Z, [x17, #5, MUL VL]\n"
+ "mov z10.d, z16.d\n"
+ "ld1b { z3.h }, p4/Z, [x6, #3, MUL VL]\n"
+ "mov z22.d, z13.d\n"
+ "ld1b { z4.h }, p4/Z, [x6, #4, MUL VL]\n"
".inst 0x454c1842 // usublb z2.h, z2.b, z12.b\n"
+ "mov z23.d, z16.d\n"
+ "ld1b { z5.h }, p4/Z, [x6, #5, MUL VL]\n"
".inst 0x454c1863 // usublb z3.h, z3.b, z12.b\n"
- "ld1b { z6.h }, p4/Z, [x17, #6, MUL VL]\n"
- "ld1b { z7.h }, p4/Z, [x17, #7, MUL VL]\n"
- "inch x17, ALL, MUL #8\n"
+ "ld1b { z6.h }, p4/Z, [x6, #6, MUL VL]\n"
+ "ld1b { z7.h }, p4/Z, [x6, #7, MUL VL]\n"
".inst 0x454c1884 // usublb z4.h, z4.b, z12.b\n"
- "ld1w { z18.s }, p2/Z, [x12]\n"
- "ld1w { z8.s }, p1/Z, [x12, #1, MUL VL]\n"
- "uzp1 z13.s, z18.s, z8.s\n"
- "uzp2 z17.s, z18.s, z8.s\n"
- "ld1b { z8.h }, p4/Z, [x17]\n"
- "ldp x9, x28, [x11, #0x0]\n"
- "addvl x12, x12, #2\n"
- "str x12, [%x[params], %[offsetof_Params_bias]]\n"
- "ldp x25, x24, [x11, #0x10]\n"
- "ldp x23, x22, [x11, #0x20]\n"
- "mov z9.d, z13.d\n"
- "mov z10.d, z17.d\n"
- "ldp x21, x20, [x11, #0x30]\n"
- "ld1b { z31.h }, p3/Z, [x9, x7]\n"
- "mov z11.d, z13.d\n"
- "mov z22.d, z17.d\n"
- "ld1b { z30.h }, p3/Z, [x28, x7]\n"
- "ld1b { z29.h }, p3/Z, [x25, x7]\n"
- "mov z21.d, z13.d\n"
- "mov z18.d, z17.d\n"
- "ld1b { z28.h }, p3/Z, [x24, x7]\n"
- "ld1b { z27.h }, p3/Z, [x23, x7]\n"
+ "inch x6, ALL, MUL #8\n"
+ "ld1b { z8.h }, p4/Z, [x6]\n"
+ "ldp x26, x25, [x16, #0x0]\n"
".inst 0x454c18a5 // usublb z5.h, z5.b, z12.b\n"
".inst 0x454c18c6 // usublb z6.h, z6.b, z12.b\n"
- "ld1b { z26.h }, p3/Z, [x22, x7]\n"
- "ld1b { z25.h }, p3/Z, [x21, x7]\n"
+ "ldp x24, x23, [x16, #0x10]\n"
".inst 0x454c18e7 // usublb z7.h, z7.b, z12.b\n"
".inst 0x454c1908 // usublb z8.h, z8.b, z12.b\n"
- "ld1b { z24.h }, p3/Z, [x20, x7]\n"
- ".inst 0x45571bff // usublb z31.h, z31.b, z23.b\n"
- ".inst 0x45571bde // usublb z30.h, z30.b, z23.b\n"
- ".inst 0x45571bbd // usublb z29.h, z29.b, z23.b\n"
- ".inst 0x45571b9c // usublb z28.h, z28.b, z23.b\n"
- ".inst 0x45571b7b // usublb z27.h, z27.b, z23.b\n"
- ".inst 0x45571b5a // usublb z26.h, z26.b, z23.b\n"
- ".inst 0x45571b39 // usublb z25.h, z25.b, z23.b\n"
- ".inst 0x45571b18 // usublb z24.h, z24.b, z23.b\n"
+ "ldp x22, x21, [x16, #0x20]\n"
+ "ldp x20, x19, [x16, #0x30]\n"
+ "ld1b { z31.h }, p3/Z, [x26, x7]\n"
+ ".inst 0x45531bff // usublb z31.h, z31.b, z19.b\n"
+ "ld1b { z30.h }, p3/Z, [x25, x7]\n"
+ "ld1b { z29.h }, p3/Z, [x24, x7]\n"
+ ".inst 0x45531bde // usublb z30.h, z30.b, z19.b\n"
+ "ld1b { z28.h }, p3/Z, [x23, x7]\n"
+ "ld1b { z27.h }, p3/Z, [x22, x7]\n"
+ ".inst 0x45531bbd // usublb z29.h, z29.b, z19.b\n"
+ "ld1b { z26.h }, p3/Z, [x21, x7]\n"
+ ".inst 0x45531b9c // usublb z28.h, z28.b, z19.b\n"
+ "ld1b { z25.h }, p3/Z, [x20, x7]\n"
+ "ld1b { z24.h }, p3/Z, [x19, x7]\n"
+ ".inst 0x45531b7b // usublb z27.h, z27.b, z19.b\n"
+ ".inst 0x45531b5a // usublb z26.h, z26.b, z19.b\n"
+ ".inst 0x45531b39 // usublb z25.h, z25.b, z19.b\n"
+ ".inst 0x45531b18 // usublb z24.h, z24.b, z19.b\n"
"b.any 1b\n"
:
: [offsetof_Params_bias] "I" (offsetof(Params, bias)), [offsetof_Params_inptrs] "I" (offsetof(Params, inptrs)), [offsetof_Params_n_channels] "I" (offsetof(Params, n_channels)), [offsetof_Params_outptrs] "I" (offsetof(Params, outptrs)), [offsetof_Params_requant] "I" (offsetof(Params, requant)), [offsetof_Params_requant_muls] "I" (offsetof(Params, requant_muls)), [offsetof_Params_requant_shifts] "I" (offsetof(Params, requant_shifts)), [offsetof_Params_weights] "I" (offsetof(Params, weights)), [offsetof_Requantize32_a_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [params] "r" (&params)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp
index 40e2f5df25..0b2182f995 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -111,538 +111,546 @@ void sve_u8q_nhwc_5x5_s1_output2x2_mla_depthfirst_impl(
requant_muls, requant_shifts, outptrs);
__asm__ __volatile__(
- "mov x0, #0x0\n"
- "mov x24, x0\n"
- "ldr x23, [%x[params], %[offsetof_Params_requant]]\n"
- "ldr x1, [%x[params], %[offsetof_Params_n_channels]]\n"
+ "ldr x0, [%x[params], %[offsetof_Params_n_channels]]\n"
"ptrue p4.b\n"
- "ldr x22, [%x[params], %[offsetof_Params_outptrs]]\n"
- "incw x24\n"
- "ldr x2, [%x[params], %[offsetof_Params_weights]]\n"
- "add x21, x23, %[offsetof_Requantize32_a_offset]\n"
- "add x20, x23, %[offsetof_Requantize32_b_offset]\n"
- "ld1rb { z15.b }, p4/Z, [x21]\n"
- "ld1rb { z17.b }, p4/Z, [x20]\n"
- "add x21, x23, %[offsetof_Requantize32_c_offset]\n"
- "add x20, x23, %[offsetof_Requantize32_minval]\n"
- "ld1rh { z12.h }, p4/Z, [x21]\n"
- "ld1rh { z13.h }, p4/Z, [x20]\n"
- "add x20, x23, %[offsetof_Requantize32_maxval]\n"
- "ld1rh { z11.h }, p4/Z, [x20]\n"
- "ldp x3, x4, [x22, #0x0]\n"
- "whilelt p3.h, x0, x1\n"
- "ldp x5, x6, [x22, #0x10]\n"
- "whilelt p2.s, x0, x1\n"
- "whilelt p1.s, x24, x1\n"
- "ldr x14, [%x[params], %[offsetof_Params_bias]]\n"
- "add x7, %x[params], %[offsetof_Params_inptrs]\n"
- "ld1w { z30.s }, p2/Z, [x14]\n"
- "ld1w { z16.s }, p1/Z, [x14, #1, MUL VL]\n"
- "uzp1 z14.s, z30.s, z16.s\n"
- "ld1b { z0.h }, p4/Z, [x2]\n"
- "ld1b { z1.h }, p4/Z, [x2, #1, MUL VL]\n"
- "uzp2 z10.s, z30.s, z16.s\n"
- "addvl x14, x14, #2\n"
- "ld1b { z2.h }, p4/Z, [x2, #2, MUL VL]\n"
- "ld1b { z3.h }, p4/Z, [x2, #3, MUL VL]\n"
- "mov x8, #0x0\n"
- "mov z20.d, z14.d\n"
- "ld1b { z4.h }, p4/Z, [x2, #4, MUL VL]\n"
- "ldp x9, x28, [x7, #0x0]\n"
- "mov z7.d, z10.d\n"
- "mov z8.d, z14.d\n"
- "ldp x27, x26, [x7, #0x10]\n"
- "ldp x25, x24, [x7, #0x20]\n"
- "mov z16.d, z10.d\n"
- "mov z6.d, z14.d\n"
- "ldp x23, x22, [x7, #0x30]\n"
- "ldp x21, x20, [x7, #0x40]\n"
- "mov z5.d, z10.d\n"
- ".inst 0x45511800 // usublb z0.h, z0.b, z17.b\n"
- "ld1b { z31.h }, p3/Z, [x9, x0]\n"
- "ld1b { z30.h }, p3/Z, [x28, x0]\n"
- ".inst 0x45511821 // usublb z1.h, z1.b, z17.b\n"
- ".inst 0x45511842 // usublb z2.h, z2.b, z17.b\n"
- "ld1b { z29.h }, p3/Z, [x27, x0]\n"
- "ld1b { z28.h }, p3/Z, [x26, x0]\n"
- ".inst 0x45511863 // usublb z3.h, z3.b, z17.b\n"
- ".inst 0x45511884 // usublb z4.h, z4.b, z17.b\n"
- "ld1b { z27.h }, p3/Z, [x25, x0]\n"
- "ld1b { z23.h }, p3/Z, [x24, x0]\n"
- ".inst 0x454f1bff // usublb z31.h, z31.b, z15.b\n"
- ".inst 0x454f1bde // usublb z30.h, z30.b, z15.b\n"
- "ld1b { z25.h }, p3/Z, [x23, x0]\n"
- "ld1b { z24.h }, p3/Z, [x22, x0]\n"
- ".inst 0x454f1bbd // usublb z29.h, z29.b, z15.b\n"
- ".inst 0x454f1b9c // usublb z28.h, z28.b, z15.b\n"
- "ld1b { z26.h }, p3/Z, [x21, x0]\n"
- "ld1b { z22.h }, p3/Z, [x20, x0]\n"
- ".inst 0x454f1b7b // usublb z27.h, z27.b, z15.b\n"
- ".inst 0x454f1af7 // usublb z23.h, z23.b, z15.b\n"
- "ldr x17, [%x[params], %[offsetof_Params_requant_muls]]\n"
- "ldr x16, [%x[params], %[offsetof_Params_requant_shifts]]\n"
- "str x14, [%x[params], %[offsetof_Params_bias]]\n"
- ".inst 0x454f1b39 // usublb z25.h, z25.b, z15.b\n"
- ".inst 0x454f1b18 // usublb z24.h, z24.b, z15.b\n"
- ".inst 0x454f1b5a // usublb z26.h, z26.b, z15.b\n"
- ".inst 0x454f1ad6 // usublb z22.h, z22.b, z15.b\n"
+ "ldr x1, [%x[params], %[offsetof_Params_weights]]\n"
+ "mov x2, #0x0\n"
+ "ldr x22, [%x[params], %[offsetof_Params_requant]]\n"
+ "mov x3, #0x0\n"
+ "ldr x4, [%x[params], %[offsetof_Params_requant_muls]]\n"
+ "add x5, %x[params], %[offsetof_Params_inptrs]\n"
+ "ldr x6, [%x[params], %[offsetof_Params_requant_shifts]]\n"
+ "add x19, x22, %[offsetof_Requantize32_a_offset]\n"
+ "ldr x21, [%x[params], %[offsetof_Params_outptrs]]\n"
+ "add x20, x22, %[offsetof_Requantize32_b_offset]\n"
+ "ld1rb { z17.b }, p4/Z, [x19]\n"
+ "add x19, x22, %[offsetof_Requantize32_c_offset]\n"
+ "ld1rb { z13.b }, p4/Z, [x20]\n"
+ "add x20, x22, %[offsetof_Requantize32_minval]\n"
+ "ld1rw { z14.s }, p4/Z, [x19]\n"
+ "add x19, x22, %[offsetof_Requantize32_maxval]\n"
+ "ld1rw { z5.s }, p4/Z, [x20]\n"
+ "whilelt p3.h, x2, x0\n"
+ "ld1rw { z15.s }, p4/Z, [x19]\n"
+ "whilelt p2.s, x2, x0\n"
+ "ldp x7, x8, [x21, #0x0]\n"
+ "mov x19, x2\n"
+ "incw x19\n"
+ "ldp x17, x16, [x21, #0x10]\n"
+ "whilelt p1.s, x19, x0\n"
+ "ldr x19, [%x[params], %[offsetof_Params_bias]]\n"
+ "ld1w { z19.s }, p2/Z, [x19]\n"
+ "ld1w { z6.s }, p1/Z, [x19, #1, MUL VL]\n"
+ "uzp1 z11.s, z19.s, z6.s\n"
+ "addvl x19, x19, #2\n"
+ "str x19, [%x[params], %[offsetof_Params_bias]]\n"
+ "uzp2 z16.s, z19.s, z6.s\n"
+ "mov z19.d, z11.d\n"
+ "ld1b { z0.h }, p4/Z, [x1]\n"
+ ".inst 0x454d1800 // usublb z0.h, z0.b, z13.b\n"
+ "mov z9.d, z16.d\n"
+ "ld1b { z1.h }, p4/Z, [x1, #1, MUL VL]\n"
+ "mov z7.d, z11.d\n"
+ "ld1b { z2.h }, p4/Z, [x1, #2, MUL VL]\n"
+ ".inst 0x454d1821 // usublb z1.h, z1.b, z13.b\n"
+ "mov z6.d, z16.d\n"
+ "ld1b { z3.h }, p4/Z, [x1, #3, MUL VL]\n"
+ "mov z12.d, z11.d\n"
+ "ld1b { z4.h }, p4/Z, [x1, #4, MUL VL]\n"
+ ".inst 0x454d1842 // usublb z2.h, z2.b, z13.b\n"
+ "mov z8.d, z16.d\n"
+ "ldp x28, x27, [x5, #0x0]\n"
+ ".inst 0x454d1863 // usublb z3.h, z3.b, z13.b\n"
+ "ldp x26, x25, [x5, #0x10]\n"
+ ".inst 0x454d1884 // usublb z4.h, z4.b, z13.b\n"
+ "ldp x24, x23, [x5, #0x20]\n"
+ "ldp x22, x21, [x5, #0x30]\n"
+ "ldp x20, x19, [x5, #0x40]\n"
+ "ld1b { z31.h }, p3/Z, [x28, x2]\n"
+ ".inst 0x45511bff // usublb z31.h, z31.b, z17.b\n"
+ "ld1b { z30.h }, p3/Z, [x27, x2]\n"
+ "ld1b { z29.h }, p3/Z, [x26, x2]\n"
+ ".inst 0x45511bde // usublb z30.h, z30.b, z17.b\n"
+ "ld1b { z28.h }, p3/Z, [x25, x2]\n"
+ "ld1b { z27.h }, p3/Z, [x24, x2]\n"
+ ".inst 0x45511bbd // usublb z29.h, z29.b, z17.b\n"
+ "ld1b { z23.h }, p3/Z, [x23, x2]\n"
+ ".inst 0x45511b9c // usublb z28.h, z28.b, z17.b\n"
+ "ld1b { z25.h }, p3/Z, [x22, x2]\n"
+ "ld1b { z24.h }, p3/Z, [x21, x2]\n"
+ ".inst 0x45511b7b // usublb z27.h, z27.b, z17.b\n"
+ "ld1b { z26.h }, p3/Z, [x20, x2]\n"
+ ".inst 0x45511af7 // usublb z23.h, z23.b, z17.b\n"
+ "ld1b { z22.h }, p3/Z, [x19, x2]\n"
+ ".inst 0x45511b39 // usublb z25.h, z25.b, z17.b\n"
+ ".inst 0x45511b18 // usublb z24.h, z24.b, z17.b\n"
+ ".inst 0x45511b5a // usublb z26.h, z26.b, z17.b\n"
+ ".inst 0x45511ad6 // usublb z22.h, z22.b, z17.b\n"
"1:" // Loop
- ".inst 0x448043ee // smlalb z14.s, p4/M, z31.h, z0.h\n"
- ".inst 0x448047ea // smlalt z10.s, p4/M, z31.h, z0.h\n"
- "ldr x20, [x7, #0x50]\n"
- "ld1b { z31.h }, p3/Z, [x20, x0]\n"
- ".inst 0x448143ce // smlalb z14.s, p4/M, z30.h, z1.h\n"
- ".inst 0x448043d4 // smlalb z20.s, p4/M, z30.h, z0.h\n"
- "ldr x22, [x7, #0x58]\n"
- ".inst 0x454f1bff // usublb z31.h, z31.b, z15.b\n"
- ".inst 0x448043a8 // smlalb z8.s, p4/M, z29.h, z0.h\n"
- ".inst 0x44804386 // smlalb z6.s, p4/M, z28.h, z0.h\n"
- "ldr x21, [x7, #0x60]\n"
- "ldr x20, [x7, #0x68]\n"
- ".inst 0x448147ca // smlalt z10.s, p4/M, z30.h, z1.h\n"
- ".inst 0x448047c7 // smlalt z7.s, p4/M, z30.h, z0.h\n"
- "ld1b { z30.h }, p3/Z, [x22, x0]\n"
- ".inst 0x454f1bde // usublb z30.h, z30.b, z15.b\n"
+ ".inst 0x448043eb // smlalb z11.s, p4/M, z31.h, z0.h\n"
+ "ldr x20, [x5, #0x50]\n"
+ "whilelt p0.h, x3, x0\n"
+ ".inst 0x448047f0 // smlalt z16.s, p4/M, z31.h, z0.h\n"
+ "ldr x19, [x5, #0x58]\n"
+ ".inst 0x448043d3 // smlalb z19.s, p4/M, z30.h, z0.h\n"
+ "ldr x25, [x5, #0x60]\n"
+ ".inst 0x448047c9 // smlalt z9.s, p4/M, z30.h, z0.h\n"
+ "ld1b { z31.h }, p3/Z, [x20, x2]\n"
+ ".inst 0x45511bff // usublb z31.h, z31.b, z17.b\n"
+ ".inst 0x448043a7 // smlalb z7.s, p4/M, z29.h, z0.h\n"
+ "ldr x24, [x5, #0x68]\n"
+ ".inst 0x448047a6 // smlalt z6.s, p4/M, z29.h, z0.h\n"
+ "ldr x23, [x5, #0x70]\n"
+ ".inst 0x4480438c // smlalb z12.s, p4/M, z28.h, z0.h\n"
+ "ldr x22, [x5, #0x78]\n"
+ ".inst 0x44804788 // smlalt z8.s, p4/M, z28.h, z0.h\n"
+ "ld1b { z0.h }, p4/Z, [x1, #5, MUL VL]\n"
+ ".inst 0x454d1800 // usublb z0.h, z0.b, z13.b\n"
+ ".inst 0x448143cb // smlalb z11.s, p4/M, z30.h, z1.h\n"
+ "ldr x15, [x5, #0x80]\n"
+ ".inst 0x448147d0 // smlalt z16.s, p4/M, z30.h, z1.h\n"
+ "ld1b { z30.h }, p3/Z, [x19, x2]\n"
+ ".inst 0x45511bde // usublb z30.h, z30.b, z17.b\n"
+ ".inst 0x44814373 // smlalb z19.s, p4/M, z27.h, z1.h\n"
+ "ldr x21, [x5, #0x88]\n"
+ ".inst 0x44814769 // smlalt z9.s, p4/M, z27.h, z1.h\n"
+ "ldr x20, [x5, #0x90]\n"
+ ".inst 0x44814387 // smlalb z7.s, p4/M, z28.h, z1.h\n"
+ "ldr x19, [x5, #0x98]\n"
+ ".inst 0x44814786 // smlalt z6.s, p4/M, z28.h, z1.h\n"
+ "ldr x14, [x5, #0xa0]\n"
+ ".inst 0x448142ec // smlalb z12.s, p4/M, z23.h, z1.h\n"
+ "ldr x13, [x5, #0xa8]\n"
+ ".inst 0x448146e8 // smlalt z8.s, p4/M, z23.h, z1.h\n"
+ "ld1b { z1.h }, p4/Z, [x1, #6, MUL VL]\n"
+ ".inst 0x454d1821 // usublb z1.h, z1.b, z13.b\n"
+ ".inst 0x4482436b // smlalb z11.s, p4/M, z27.h, z2.h\n"
+ "ldr x12, [x5, #0xb0]\n"
+ ".inst 0x44824770 // smlalt z16.s, p4/M, z27.h, z2.h\n"
+ "ld1b { z27.h }, p3/Z, [x25, x2]\n"
+ ".inst 0x45511b7b // usublb z27.h, z27.b, z17.b\n"
+ ".inst 0x44824333 // smlalb z19.s, p4/M, z25.h, z2.h\n"
+ "ldr x11, [x5, #0xb8]\n"
+ ".inst 0x44824729 // smlalt z9.s, p4/M, z25.h, z2.h\n"
+ "ldr x10, [x5, #0xc0]\n"
+ ".inst 0x448242e7 // smlalb z7.s, p4/M, z23.h, z2.h\n"
+ "ldr x9, [x5, #0xc8]\n"
+ ".inst 0x448246e6 // smlalt z6.s, p4/M, z23.h, z2.h\n"
+ "ldr x28, [x5, #0xd0]\n"
+ ".inst 0x448243ec // smlalb z12.s, p4/M, z31.h, z2.h\n"
+ "ldr x27, [x5, #0xd8]\n"
+ ".inst 0x448247e8 // smlalt z8.s, p4/M, z31.h, z2.h\n"
+ "ld1b { z2.h }, p4/Z, [x1, #7, MUL VL]\n"
+ "inch x1, ALL, MUL #8\n"
+ ".inst 0x4483432b // smlalb z11.s, p4/M, z25.h, z3.h\n"
+ "ldr x26, [x5, #0xe0]\n"
+ ".inst 0x454d1842 // usublb z2.h, z2.b, z13.b\n"
+ ".inst 0x44834730 // smlalt z16.s, p4/M, z25.h, z3.h\n"
+ "ld1b { z25.h }, p3/Z, [x24, x2]\n"
+ ".inst 0x44834313 // smlalb z19.s, p4/M, z24.h, z3.h\n"
+ "ldr x25, [x5, #0xe8]\n"
+ ".inst 0x45511b39 // usublb z25.h, z25.b, z17.b\n"
+ ".inst 0x44834709 // smlalt z9.s, p4/M, z24.h, z3.h\n"
+ "ld1w { z18.s }, p2/Z, [x4]\n"
+ ".inst 0x448343e7 // smlalb z7.s, p4/M, z31.h, z3.h\n"
+ "ld1w { z20.s }, p1/Z, [x4, #1, MUL VL]\n"
+ "addvl x4, x4, #2\n"
+ ".inst 0x448347e6 // smlalt z6.s, p4/M, z31.h, z3.h\n"
+ ".inst 0x448343cc // smlalb z12.s, p4/M, z30.h, z3.h\n"
+ ".inst 0x448347c8 // smlalt z8.s, p4/M, z30.h, z3.h\n"
+ "ld1b { z3.h }, p4/Z, [x1]\n"
+ ".inst 0x454d1863 // usublb z3.h, z3.b, z13.b\n"
+ "uzp1 z21.s, z18.s, z20.s\n"
+ "uzp2 z10.s, z18.s, z20.s\n"
+ "ld1w { z18.s }, p2/Z, [x6]\n"
+ ".inst 0x4484430b // smlalb z11.s, p4/M, z24.h, z4.h\n"
+ "ld1w { z20.s }, p1/Z, [x6, #1, MUL VL]\n"
+ "addvl x6, x6, #2\n"
+ ".inst 0x44844710 // smlalt z16.s, p4/M, z24.h, z4.h\n"
+ "ld1b { z24.h }, p3/Z, [x23, x2]\n"
+ ".inst 0x45511b18 // usublb z24.h, z24.b, z17.b\n"
+ ".inst 0x44844373 // smlalb z19.s, p4/M, z27.h, z4.h\n"
+ "ldr x24, [x5, #0xf0]\n"
+ ".inst 0x44844769 // smlalt z9.s, p4/M, z27.h, z4.h\n"
+ "ld1b { z27.h }, p3/Z, [x22, x2]\n"
+ ".inst 0x45511b7b // usublb z27.h, z27.b, z17.b\n"
+ ".inst 0x448443c7 // smlalb z7.s, p4/M, z30.h, z4.h\n"
+ "ldr x23, [x5, #0xf8]\n"
+ ".inst 0x448447c6 // smlalt z6.s, p4/M, z30.h, z4.h\n"
+ ".inst 0x4484434c // smlalb z12.s, p4/M, z26.h, z4.h\n"
+ ".inst 0x44844748 // smlalt z8.s, p4/M, z26.h, z4.h\n"
+ "ld1b { z4.h }, p4/Z, [x1, #1, MUL VL]\n"
+ ".inst 0x454d1884 // usublb z4.h, z4.b, z13.b\n"
+ ".inst 0x448043ab // smlalb z11.s, p4/M, z29.h, z0.h\n"
".inst 0x448047b0 // smlalt z16.s, p4/M, z29.h, z0.h\n"
- ".inst 0x4482436e // smlalb z14.s, p4/M, z27.h, z2.h\n"
- "ldr x25, [x7, #0x70]\n"
- "ldr x24, [x7, #0x78]\n"
- ".inst 0x44804785 // smlalt z5.s, p4/M, z28.h, z0.h\n"
- ".inst 0x44814374 // smlalb z20.s, p4/M, z27.h, z1.h\n"
- "ld1b { z0.h }, p4/Z, [x2, #5, MUL VL]\n"
- ".inst 0x45511800 // usublb z0.h, z0.b, z17.b\n"
- ".inst 0x44814388 // smlalb z8.s, p4/M, z28.h, z1.h\n"
- ".inst 0x448142e6 // smlalb z6.s, p4/M, z23.h, z1.h\n"
- "ldr x15, [x7, #0x80]\n"
- "ldr x23, [x7, #0x88]\n"
- ".inst 0x4482476a // smlalt z10.s, p4/M, z27.h, z2.h\n"
- ".inst 0x44814767 // smlalt z7.s, p4/M, z27.h, z1.h\n"
- "ld1b { z27.h }, p3/Z, [x21, x0]\n"
- ".inst 0x454f1b7b // usublb z27.h, z27.b, z15.b\n"
+ "uzp1 z29.s, z18.s, z20.s\n"
+ "uzp2 z20.s, z18.s, z20.s\n"
+ ".inst 0x44804393 // smlalb z19.s, p4/M, z28.h, z0.h\n"
+ ".inst 0x44804789 // smlalt z9.s, p4/M, z28.h, z0.h\n"
+ ".inst 0x448042c7 // smlalb z7.s, p4/M, z22.h, z0.h\n"
+ ".inst 0x448046c6 // smlalt z6.s, p4/M, z22.h, z0.h\n"
+ ".inst 0x4480432c // smlalb z12.s, p4/M, z25.h, z0.h\n"
+ ".inst 0x44804728 // smlalt z8.s, p4/M, z25.h, z0.h\n"
+ "ld1b { z0.h }, p4/Z, [x1, #2, MUL VL]\n"
+ ".inst 0x454d1800 // usublb z0.h, z0.b, z13.b\n"
+ ".inst 0x4481438b // smlalb z11.s, p4/M, z28.h, z1.h\n"
".inst 0x44814790 // smlalt z16.s, p4/M, z28.h, z1.h\n"
- ".inst 0x4483432e // smlalb z14.s, p4/M, z25.h, z3.h\n"
- "ldr x22, [x7, #0x90]\n"
- "ldr x21, [x7, #0x98]\n"
- ".inst 0x448146e5 // smlalt z5.s, p4/M, z23.h, z1.h\n"
- ".inst 0x44824334 // smlalb z20.s, p4/M, z25.h, z2.h\n"
- "ld1b { z1.h }, p4/Z, [x2, #6, MUL VL]\n"
- ".inst 0x45511821 // usublb z1.h, z1.b, z17.b\n"
- ".inst 0x448242e8 // smlalb z8.s, p4/M, z23.h, z2.h\n"
- ".inst 0x448243e6 // smlalb z6.s, p4/M, z31.h, z2.h\n"
- "ldr x14, [x7, #0xa0]\n"
- "ldr x13, [x7, #0xa8]\n"
- ".inst 0x4483472a // smlalt z10.s, p4/M, z25.h, z3.h\n"
- ".inst 0x44824727 // smlalt z7.s, p4/M, z25.h, z2.h\n"
- "ld1b { z25.h }, p3/Z, [x20, x0]\n"
- ".inst 0x454f1b39 // usublb z25.h, z25.b, z15.b\n"
+ "ld1b { z28.h }, p3/Z, [x21, x2]\n"
+ ".inst 0x45511b9c // usublb z28.h, z28.b, z17.b\n"
+ ".inst 0x448142f3 // smlalb z19.s, p4/M, z23.h, z1.h\n"
+ "ldr x22, [x5, #0x100]\n"
+ ".inst 0x448146e9 // smlalt z9.s, p4/M, z23.h, z1.h\n"
+ ".inst 0x44814327 // smlalb z7.s, p4/M, z25.h, z1.h\n"
+ ".inst 0x44814726 // smlalt z6.s, p4/M, z25.h, z1.h\n"
+ ".inst 0x4481430c // smlalb z12.s, p4/M, z24.h, z1.h\n"
+ ".inst 0x44814708 // smlalt z8.s, p4/M, z24.h, z1.h\n"
+ "ld1b { z1.h }, p4/Z, [x1, #3, MUL VL]\n"
+ ".inst 0x454d1821 // usublb z1.h, z1.b, z13.b\n"
+ ".inst 0x448242eb // smlalb z11.s, p4/M, z23.h, z2.h\n"
".inst 0x448246f0 // smlalt z16.s, p4/M, z23.h, z2.h\n"
- ".inst 0x4484430e // smlalb z14.s, p4/M, z24.h, z4.h\n"
- "ldr x12, [x7, #0xb0]\n"
- "ldr x20, [x7, #0xb8]\n"
- ".inst 0x448247e5 // smlalt z5.s, p4/M, z31.h, z2.h\n"
- ".inst 0x44834314 // smlalb z20.s, p4/M, z24.h, z3.h\n"
- "ld1b { z2.h }, p4/Z, [x2, #7, MUL VL]\n"
- "inch x2, ALL, MUL #8\n"
- ".inst 0x448343e8 // smlalb z8.s, p4/M, z31.h, z3.h\n"
- ".inst 0x448343c6 // smlalb z6.s, p4/M, z30.h, z3.h\n"
- ".inst 0x45511842 // usublb z2.h, z2.b, z17.b\n"
- "ldr x11, [x7, #0xc0]\n"
- ".inst 0x4484470a // smlalt z10.s, p4/M, z24.h, z4.h\n"
- ".inst 0x44834707 // smlalt z7.s, p4/M, z24.h, z3.h\n"
- "ld1b { z24.h }, p3/Z, [x25, x0]\n"
- ".inst 0x454f1b18 // usublb z24.h, z24.b, z15.b\n"
+ "ld1b { z23.h }, p3/Z, [x15, x2]\n"
+ ".inst 0x45511af7 // usublb z23.h, z23.b, z17.b\n"
+ ".inst 0x448243f3 // smlalb z19.s, p4/M, z31.h, z2.h\n"
+ "ldr x21, [x5, #0x108]\n"
+ ".inst 0x448247e9 // smlalt z9.s, p4/M, z31.h, z2.h\n"
+ ".inst 0x44824307 // smlalb z7.s, p4/M, z24.h, z2.h\n"
+ ".inst 0x44824706 // smlalt z6.s, p4/M, z24.h, z2.h\n"
+ ".inst 0x4482436c // smlalb z12.s, p4/M, z27.h, z2.h\n"
+ ".inst 0x44824768 // smlalt z8.s, p4/M, z27.h, z2.h\n"
+ "ld1b { z2.h }, p4/Z, [x1, #4, MUL VL]\n"
+ ".inst 0x454d1842 // usublb z2.h, z2.b, z13.b\n"
+ ".inst 0x448343eb // smlalb z11.s, p4/M, z31.h, z3.h\n"
".inst 0x448347f0 // smlalt z16.s, p4/M, z31.h, z3.h\n"
- ".inst 0x448043ae // smlalb z14.s, p4/M, z29.h, z0.h\n"
- "ldr x10, [x7, #0xc8]\n"
- "ldr x9, [x7, #0xd0]\n"
- ".inst 0x448347c5 // smlalt z5.s, p4/M, z30.h, z3.h\n"
- ".inst 0x44844374 // smlalb z20.s, p4/M, z27.h, z4.h\n"
- "ld1b { z3.h }, p4/Z, [x2]\n"
- ".inst 0x45511863 // usublb z3.h, z3.b, z17.b\n"
- ".inst 0x448443c8 // smlalb z8.s, p4/M, z30.h, z4.h\n"
- ".inst 0x44844346 // smlalb z6.s, p4/M, z26.h, z4.h\n"
- "ldr x28, [x7, #0xd8]\n"
- "ldr x27, [x7, #0xe0]\n"
- ".inst 0x448047aa // smlalt z10.s, p4/M, z29.h, z0.h\n"
- ".inst 0x44844767 // smlalt z7.s, p4/M, z27.h, z4.h\n"
- "ld1b { z27.h }, p3/Z, [x24, x0]\n"
- ".inst 0x454f1b7b // usublb z27.h, z27.b, z15.b\n"
+ "ld1b { z31.h }, p3/Z, [x20, x2]\n"
+ ".inst 0x45511bff // usublb z31.h, z31.b, z17.b\n"
+ ".inst 0x448343d3 // smlalb z19.s, p4/M, z30.h, z3.h\n"
+ "ldr x20, [x5, #0x110]\n"
+ ".inst 0x448347c9 // smlalt z9.s, p4/M, z30.h, z3.h\n"
+ ".inst 0x44834367 // smlalb z7.s, p4/M, z27.h, z3.h\n"
+ ".inst 0x44834766 // smlalt z6.s, p4/M, z27.h, z3.h\n"
+ ".inst 0x448342ec // smlalb z12.s, p4/M, z23.h, z3.h\n"
+ ".inst 0x448346e8 // smlalt z8.s, p4/M, z23.h, z3.h\n"
+ "ld1b { z3.h }, p4/Z, [x1, #5, MUL VL]\n"
+ ".inst 0x454d1863 // usublb z3.h, z3.b, z13.b\n"
+ ".inst 0x448443cb // smlalb z11.s, p4/M, z30.h, z4.h\n"
".inst 0x448447d0 // smlalt z16.s, p4/M, z30.h, z4.h\n"
- ".inst 0x4481438e // smlalb z14.s, p4/M, z28.h, z1.h\n"
- "ldr x26, [x7, #0xe8]\n"
- "ldr x25, [x7, #0xf0]\n"
- ".inst 0x44844745 // smlalt z5.s, p4/M, z26.h, z4.h\n"
- ".inst 0x44804394 // smlalb z20.s, p4/M, z28.h, z0.h\n"
- "ld1b { z4.h }, p4/Z, [x2, #1, MUL VL]\n"
- ".inst 0x45511884 // usublb z4.h, z4.b, z17.b\n"
- ".inst 0x448042c8 // smlalb z8.s, p4/M, z22.h, z0.h\n"
- ".inst 0x44804326 // smlalb z6.s, p4/M, z25.h, z0.h\n"
- "ld1w { z19.s }, p2/Z, [x17]\n"
- "ld1w { z18.s }, p1/Z, [x17, #1, MUL VL]\n"
- ".inst 0x4481478a // smlalt z10.s, p4/M, z28.h, z1.h\n"
- ".inst 0x44804787 // smlalt z7.s, p4/M, z28.h, z0.h\n"
- "ld1b { z28.h }, p3/Z, [x23, x0]\n"
- ".inst 0x454f1b9c // usublb z28.h, z28.b, z15.b\n"
+ "ld1b { z30.h }, p3/Z, [x19, x2]\n"
+ ".inst 0x45511bde // usublb z30.h, z30.b, z17.b\n"
+ ".inst 0x44844353 // smlalb z19.s, p4/M, z26.h, z4.h\n"
+ "ldr x19, [x5, #0x118]\n"
+ ".inst 0x44844749 // smlalt z9.s, p4/M, z26.h, z4.h\n"
+ "ld1b { z26.h }, p3/Z, [x14, x2]\n"
+ ".inst 0x45511b5a // usublb z26.h, z26.b, z17.b\n"
+ ".inst 0x448442e7 // smlalb z7.s, p4/M, z23.h, z4.h\n"
+ ".inst 0x448446e6 // smlalt z6.s, p4/M, z23.h, z4.h\n"
+ ".inst 0x4484438c // smlalb z12.s, p4/M, z28.h, z4.h\n"
+ ".inst 0x44844788 // smlalt z8.s, p4/M, z28.h, z4.h\n"
+ "ld1b { z4.h }, p4/Z, [x1, #6, MUL VL]\n"
+ ".inst 0x454d1884 // usublb z4.h, z4.b, z13.b\n"
+ ".inst 0x448042cb // smlalb z11.s, p4/M, z22.h, z0.h\n"
".inst 0x448046d0 // smlalt z16.s, p4/M, z22.h, z0.h\n"
- ".inst 0x448242ee // smlalb z14.s, p4/M, z23.h, z2.h\n"
- "ldr x24, [x7, #0xf8]\n"
- "uzp1 z9.s, z19.s, z18.s\n"
- ".inst 0x44804725 // smlalt z5.s, p4/M, z25.h, z0.h\n"
- ".inst 0x448142f4 // smlalb z20.s, p4/M, z23.h, z1.h\n"
- "ld1b { z0.h }, p4/Z, [x2, #2, MUL VL]\n"
- ".inst 0x45511800 // usublb z0.h, z0.b, z17.b\n"
- ".inst 0x44814328 // smlalb z8.s, p4/M, z25.h, z1.h\n"
- ".inst 0x44814306 // smlalb z6.s, p4/M, z24.h, z1.h\n"
- "uzp2 z29.s, z19.s, z18.s\n"
- "ld1w { z19.s }, p2/Z, [x16]\n"
- ".inst 0x448246ea // smlalt z10.s, p4/M, z23.h, z2.h\n"
- ".inst 0x448146e7 // smlalt z7.s, p4/M, z23.h, z1.h\n"
- "ld1b { z23.h }, p3/Z, [x15, x0]\n"
- ".inst 0x454f1af7 // usublb z23.h, z23.b, z15.b\n"
+ "ld1b { z22.h }, p3/Z, [x11, x2]\n"
+ ".inst 0x45511ad6 // usublb z22.h, z22.b, z17.b\n"
+ ".inst 0x44804333 // smlalb z19.s, p4/M, z25.h, z0.h\n"
+ ".inst 0x44804729 // smlalt z9.s, p4/M, z25.h, z0.h\n"
+ ".inst 0x448043e7 // smlalb z7.s, p4/M, z31.h, z0.h\n"
+ ".inst 0x448047e6 // smlalt z6.s, p4/M, z31.h, z0.h\n"
+ ".inst 0x448043cc // smlalb z12.s, p4/M, z30.h, z0.h\n"
+ ".inst 0x448047c8 // smlalt z8.s, p4/M, z30.h, z0.h\n"
+ "ld1b { z0.h }, p4/Z, [x1, #7, MUL VL]\n"
+ "inch x1, ALL, MUL #8\n"
+ ".inst 0x4481432b // smlalb z11.s, p4/M, z25.h, z1.h\n"
+ ".inst 0x454d1800 // usublb z0.h, z0.b, z13.b\n"
".inst 0x44814730 // smlalt z16.s, p4/M, z25.h, z1.h\n"
- ".inst 0x448343ee // smlalb z14.s, p4/M, z31.h, z3.h\n"
- "ldr x23, [x7, #0x100]\n"
- "whilelt p0.h, x8, x1\n"
- ".inst 0x44814705 // smlalt z5.s, p4/M, z24.h, z1.h\n"
- ".inst 0x448243f4 // smlalb z20.s, p4/M, z31.h, z2.h\n"
- "ld1b { z1.h }, p4/Z, [x2, #3, MUL VL]\n"
- ".inst 0x45511821 // usublb z1.h, z1.b, z17.b\n"
- ".inst 0x44824308 // smlalb z8.s, p4/M, z24.h, z2.h\n"
- ".inst 0x44824366 // smlalb z6.s, p4/M, z27.h, z2.h\n"
- "addvl x17, x17, #2\n"
- ".inst 0x448347ea // smlalt z10.s, p4/M, z31.h, z3.h\n"
- ".inst 0x448247e7 // smlalt z7.s, p4/M, z31.h, z2.h\n"
- "ld1b { z31.h }, p3/Z, [x22, x0]\n"
- ".inst 0x454f1bff // usublb z31.h, z31.b, z15.b\n"
+ "ld1b { z25.h }, p3/Z, [x13, x2]\n"
+ ".inst 0x44814313 // smlalb z19.s, p4/M, z24.h, z1.h\n"
+ ".inst 0x45511b39 // usublb z25.h, z25.b, z17.b\n"
+ ".inst 0x44814709 // smlalt z9.s, p4/M, z24.h, z1.h\n"
+ ".inst 0x448143c7 // smlalb z7.s, p4/M, z30.h, z1.h\n"
+ ".inst 0x448147c6 // smlalt z6.s, p4/M, z30.h, z1.h\n"
+ ".inst 0x4481434c // smlalb z12.s, p4/M, z26.h, z1.h\n"
+ ".inst 0x44814748 // smlalt z8.s, p4/M, z26.h, z1.h\n"
+ "ld1b { z1.h }, p4/Z, [x1]\n"
+ ".inst 0x454d1821 // usublb z1.h, z1.b, z13.b\n"
+ ".inst 0x4482430b // smlalb z11.s, p4/M, z24.h, z2.h\n"
".inst 0x44824710 // smlalt z16.s, p4/M, z24.h, z2.h\n"
- ".inst 0x448443ce // smlalb z14.s, p4/M, z30.h, z4.h\n"
- "ldr x22, [x7, #0x108]\n"
- ".inst 0x44824765 // smlalt z5.s, p4/M, z27.h, z2.h\n"
- ".inst 0x448343d4 // smlalb z20.s, p4/M, z30.h, z3.h\n"
- "ld1b { z2.h }, p4/Z, [x2, #4, MUL VL]\n"
- ".inst 0x45511842 // usublb z2.h, z2.b, z17.b\n"
- ".inst 0x44834368 // smlalb z8.s, p4/M, z27.h, z3.h\n"
- ".inst 0x448342e6 // smlalb z6.s, p4/M, z23.h, z3.h\n"
- ".inst 0x448447ca // smlalt z10.s, p4/M, z30.h, z4.h\n"
- ".inst 0x448347c7 // smlalt z7.s, p4/M, z30.h, z3.h\n"
- "ld1b { z30.h }, p3/Z, [x21, x0]\n"
- ".inst 0x454f1bde // usublb z30.h, z30.b, z15.b\n"
+ "ld1b { z24.h }, p3/Z, [x12, x2]\n"
+ ".inst 0x45511b18 // usublb z24.h, z24.b, z17.b\n"
+ ".inst 0x44824373 // smlalb z19.s, p4/M, z27.h, z2.h\n"
+ ".inst 0x44824769 // smlalt z9.s, p4/M, z27.h, z2.h\n"
+ ".inst 0x44824347 // smlalb z7.s, p4/M, z26.h, z2.h\n"
+ ".inst 0x44824746 // smlalt z6.s, p4/M, z26.h, z2.h\n"
+ ".inst 0x4482432c // smlalb z12.s, p4/M, z25.h, z2.h\n"
+ ".inst 0x44824728 // smlalt z8.s, p4/M, z25.h, z2.h\n"
+ "ld1b { z2.h }, p4/Z, [x1, #1, MUL VL]\n"
+ ".inst 0x454d1842 // usublb z2.h, z2.b, z13.b\n"
+ ".inst 0x4483436b // smlalb z11.s, p4/M, z27.h, z3.h\n"
".inst 0x44834770 // smlalt z16.s, p4/M, z27.h, z3.h\n"
- ".inst 0x448042ce // smlalb z14.s, p4/M, z22.h, z0.h\n"
- "ldr x21, [x7, #0x110]\n"
- ".inst 0x448346e5 // smlalt z5.s, p4/M, z23.h, z3.h\n"
- ".inst 0x44844354 // smlalb z20.s, p4/M, z26.h, z4.h\n"
- "ld1b { z3.h }, p4/Z, [x2, #5, MUL VL]\n"
- ".inst 0x45511863 // usublb z3.h, z3.b, z17.b\n"
- ".inst 0x448442e8 // smlalb z8.s, p4/M, z23.h, z4.h\n"
- ".inst 0x44844386 // smlalb z6.s, p4/M, z28.h, z4.h\n"
- ".inst 0x448046ca // smlalt z10.s, p4/M, z22.h, z0.h\n"
- ".inst 0x44844747 // smlalt z7.s, p4/M, z26.h, z4.h\n"
- "ld1b { z26.h }, p3/Z, [x14, x0]\n"
- ".inst 0x454f1b5a // usublb z26.h, z26.b, z15.b\n"
+ "ld1b { z27.h }, p3/Z, [x10, x2]\n"
+ ".inst 0x45511b7b // usublb z27.h, z27.b, z17.b\n"
+ ".inst 0x448342f3 // smlalb z19.s, p4/M, z23.h, z3.h\n"
+ ".inst 0x448346e9 // smlalt z9.s, p4/M, z23.h, z3.h\n"
+ ".inst 0x44834327 // smlalb z7.s, p4/M, z25.h, z3.h\n"
+ ".inst 0x44834726 // smlalt z6.s, p4/M, z25.h, z3.h\n"
+ ".inst 0x4483430c // smlalb z12.s, p4/M, z24.h, z3.h\n"
+ ".inst 0x44834708 // smlalt z8.s, p4/M, z24.h, z3.h\n"
+ "ld1b { z3.h }, p4/Z, [x1, #2, MUL VL]\n"
+ ".inst 0x454d1863 // usublb z3.h, z3.b, z13.b\n"
+ ".inst 0x448442eb // smlalb z11.s, p4/M, z23.h, z4.h\n"
".inst 0x448446f0 // smlalt z16.s, p4/M, z23.h, z4.h\n"
- ".inst 0x4481432e // smlalb z14.s, p4/M, z25.h, z1.h\n"
- "ld1b { z22.h }, p3/Z, [x20, x0]\n"
- ".inst 0x454f1ad6 // usublb z22.h, z22.b, z15.b\n"
- ".inst 0x44844785 // smlalt z5.s, p4/M, z28.h, z4.h\n"
- ".inst 0x44804334 // smlalb z20.s, p4/M, z25.h, z0.h\n"
- "ld1b { z4.h }, p4/Z, [x2, #6, MUL VL]\n"
- ".inst 0x45511884 // usublb z4.h, z4.b, z17.b\n"
- ".inst 0x448043e8 // smlalb z8.s, p4/M, z31.h, z0.h\n"
- ".inst 0x448043c6 // smlalb z6.s, p4/M, z30.h, z0.h\n"
- "ldr x20, [x7, #0x118]\n"
- "ldr x14, [%x[params], %[offsetof_Params_bias]]\n"
- ".inst 0x4481472a // smlalt z10.s, p4/M, z25.h, z1.h\n"
- ".inst 0x44804727 // smlalt z7.s, p4/M, z25.h, z0.h\n"
- "ld1b { z25.h }, p3/Z, [x13, x0]\n"
- ".inst 0x454f1b39 // usublb z25.h, z25.b, z15.b\n"
+ "ld1b { z23.h }, p3/Z, [x9, x2]\n"
+ ".inst 0x45511af7 // usublb z23.h, z23.b, z17.b\n"
+ ".inst 0x44844393 // smlalb z19.s, p4/M, z28.h, z4.h\n"
+ ".inst 0x44844789 // smlalt z9.s, p4/M, z28.h, z4.h\n"
+ "ld1b { z28.h }, p3/Z, [x26, x2]\n"
+ ".inst 0x45511b9c // usublb z28.h, z28.b, z17.b\n"
+ ".inst 0x44844307 // smlalb z7.s, p4/M, z24.h, z4.h\n"
+ ".inst 0x44844706 // smlalt z6.s, p4/M, z24.h, z4.h\n"
+ ".inst 0x448442cc // smlalb z12.s, p4/M, z22.h, z4.h\n"
+ ".inst 0x448446c8 // smlalt z8.s, p4/M, z22.h, z4.h\n"
+ "ld1b { z4.h }, p4/Z, [x1, #3, MUL VL]\n"
+ ".inst 0x454d1884 // usublb z4.h, z4.b, z13.b\n"
+ ".inst 0x448043eb // smlalb z11.s, p4/M, z31.h, z0.h\n"
".inst 0x448047f0 // smlalt z16.s, p4/M, z31.h, z0.h\n"
- ".inst 0x4482430e // smlalb z14.s, p4/M, z24.h, z2.h\n"
- ".inst 0x448047c5 // smlalt z5.s, p4/M, z30.h, z0.h\n"
- ".inst 0x44814314 // smlalb z20.s, p4/M, z24.h, z1.h\n"
- "ld1b { z0.h }, p4/Z, [x2, #7, MUL VL]\n"
- "inch x2, ALL, MUL #8\n"
- ".inst 0x448143c8 // smlalb z8.s, p4/M, z30.h, z1.h\n"
- ".inst 0x44814346 // smlalb z6.s, p4/M, z26.h, z1.h\n"
- ".inst 0x45511800 // usublb z0.h, z0.b, z17.b\n"
- ".inst 0x4482470a // smlalt z10.s, p4/M, z24.h, z2.h\n"
- ".inst 0x44814707 // smlalt z7.s, p4/M, z24.h, z1.h\n"
- "ld1b { z24.h }, p3/Z, [x12, x0]\n"
- ".inst 0x454f1b18 // usublb z24.h, z24.b, z15.b\n"
+ "ld1b { z31.h }, p3/Z, [x28, x2]\n"
+ ".inst 0x45511bff // usublb z31.h, z31.b, z17.b\n"
+ ".inst 0x448043d3 // smlalb z19.s, p4/M, z30.h, z0.h\n"
+ ".inst 0x448047c9 // smlalt z9.s, p4/M, z30.h, z0.h\n"
+ ".inst 0x44804367 // smlalb z7.s, p4/M, z27.h, z0.h\n"
+ ".inst 0x44804766 // smlalt z6.s, p4/M, z27.h, z0.h\n"
+ ".inst 0x448042ec // smlalb z12.s, p4/M, z23.h, z0.h\n"
+ ".inst 0x448046e8 // smlalt z8.s, p4/M, z23.h, z0.h\n"
+ "ld1b { z0.h }, p4/Z, [x1, #4, MUL VL]\n"
+ ".inst 0x454d1800 // usublb z0.h, z0.b, z13.b\n"
+ ".inst 0x448143cb // smlalb z11.s, p4/M, z30.h, z1.h\n"
".inst 0x448147d0 // smlalt z16.s, p4/M, z30.h, z1.h\n"
- ".inst 0x4483436e // smlalb z14.s, p4/M, z27.h, z3.h\n"
- ".inst 0x44814745 // smlalt z5.s, p4/M, z26.h, z1.h\n"
- ".inst 0x44824374 // smlalb z20.s, p4/M, z27.h, z2.h\n"
- "ld1b { z1.h }, p4/Z, [x2]\n"
- ".inst 0x45511821 // usublb z1.h, z1.b, z17.b\n"
- ".inst 0x44824348 // smlalb z8.s, p4/M, z26.h, z2.h\n"
- ".inst 0x44824326 // smlalb z6.s, p4/M, z25.h, z2.h\n"
- ".inst 0x4483476a // smlalt z10.s, p4/M, z27.h, z3.h\n"
- ".inst 0x44824767 // smlalt z7.s, p4/M, z27.h, z2.h\n"
- "ld1b { z27.h }, p3/Z, [x11, x0]\n"
- ".inst 0x454f1b7b // usublb z27.h, z27.b, z15.b\n"
+ "ld1b { z30.h }, p3/Z, [x27, x2]\n"
+ ".inst 0x45511bde // usublb z30.h, z30.b, z17.b\n"
+ ".inst 0x44814353 // smlalb z19.s, p4/M, z26.h, z1.h\n"
+ ".inst 0x44814749 // smlalt z9.s, p4/M, z26.h, z1.h\n"
+ ".inst 0x448142e7 // smlalb z7.s, p4/M, z23.h, z1.h\n"
+ ".inst 0x448146e6 // smlalt z6.s, p4/M, z23.h, z1.h\n"
+ ".inst 0x448143ec // smlalb z12.s, p4/M, z31.h, z1.h\n"
+ ".inst 0x448147e8 // smlalt z8.s, p4/M, z31.h, z1.h\n"
+ "ld1b { z1.h }, p4/Z, [x1, #5, MUL VL]\n"
+ ".inst 0x454d1821 // usublb z1.h, z1.b, z13.b\n"
+ ".inst 0x4482434b // smlalb z11.s, p4/M, z26.h, z2.h\n"
".inst 0x44824750 // smlalt z16.s, p4/M, z26.h, z2.h\n"
- ".inst 0x448442ee // smlalb z14.s, p4/M, z23.h, z4.h\n"
- ".inst 0x44824725 // smlalt z5.s, p4/M, z25.h, z2.h\n"
- ".inst 0x448342f4 // smlalb z20.s, p4/M, z23.h, z3.h\n"
- "ld1b { z2.h }, p4/Z, [x2, #1, MUL VL]\n"
- ".inst 0x45511842 // usublb z2.h, z2.b, z17.b\n"
- ".inst 0x44834328 // smlalb z8.s, p4/M, z25.h, z3.h\n"
- ".inst 0x44834306 // smlalb z6.s, p4/M, z24.h, z3.h\n"
- ".inst 0x448446ea // smlalt z10.s, p4/M, z23.h, z4.h\n"
- ".inst 0x448346e7 // smlalt z7.s, p4/M, z23.h, z3.h\n"
- "ld1b { z23.h }, p3/Z, [x10, x0]\n"
- ".inst 0x454f1af7 // usublb z23.h, z23.b, z15.b\n"
+ "ld1b { z26.h }, p3/Z, [x25, x2]\n"
+ ".inst 0x45511b5a // usublb z26.h, z26.b, z17.b\n"
+ ".inst 0x44824333 // smlalb z19.s, p4/M, z25.h, z2.h\n"
+ ".inst 0x44824729 // smlalt z9.s, p4/M, z25.h, z2.h\n"
+ ".inst 0x448243e7 // smlalb z7.s, p4/M, z31.h, z2.h\n"
+ ".inst 0x448247e6 // smlalt z6.s, p4/M, z31.h, z2.h\n"
+ ".inst 0x448243cc // smlalb z12.s, p4/M, z30.h, z2.h\n"
+ ".inst 0x448247c8 // smlalt z8.s, p4/M, z30.h, z2.h\n"
+ "ld1b { z2.h }, p4/Z, [x1, #6, MUL VL]\n"
+ ".inst 0x454d1842 // usublb z2.h, z2.b, z13.b\n"
+ ".inst 0x4483432b // smlalb z11.s, p4/M, z25.h, z3.h\n"
".inst 0x44834730 // smlalt z16.s, p4/M, z25.h, z3.h\n"
- ".inst 0x448043ee // smlalb z14.s, p4/M, z31.h, z0.h\n"
- ".inst 0x44834705 // smlalt z5.s, p4/M, z24.h, z3.h\n"
- ".inst 0x44844394 // smlalb z20.s, p4/M, z28.h, z4.h\n"
- "ld1b { z3.h }, p4/Z, [x2, #2, MUL VL]\n"
- ".inst 0x45511863 // usublb z3.h, z3.b, z17.b\n"
- ".inst 0x44844308 // smlalb z8.s, p4/M, z24.h, z4.h\n"
- ".inst 0x448442c6 // smlalb z6.s, p4/M, z22.h, z4.h\n"
- ".inst 0x448047ea // smlalt z10.s, p4/M, z31.h, z0.h\n"
- ".inst 0x44844787 // smlalt z7.s, p4/M, z28.h, z4.h\n"
- "ld1b { z31.h }, p3/Z, [x9, x0]\n"
- ".inst 0x454f1bff // usublb z31.h, z31.b, z15.b\n"
+ "ld1b { z25.h }, p3/Z, [x24, x2]\n"
+ ".inst 0x45511b39 // usublb z25.h, z25.b, z17.b\n"
+ ".inst 0x44834313 // smlalb z19.s, p4/M, z24.h, z3.h\n"
+ ".inst 0x44834709 // smlalt z9.s, p4/M, z24.h, z3.h\n"
+ ".inst 0x448343c7 // smlalb z7.s, p4/M, z30.h, z3.h\n"
+ ".inst 0x448347c6 // smlalt z6.s, p4/M, z30.h, z3.h\n"
+ ".inst 0x4483438c // smlalb z12.s, p4/M, z28.h, z3.h\n"
+ ".inst 0x44834788 // smlalt z8.s, p4/M, z28.h, z3.h\n"
+ "ld1b { z3.h }, p4/Z, [x1, #7, MUL VL]\n"
+ "inch x1, ALL, MUL #8\n"
+ ".inst 0x4484430b // smlalb z11.s, p4/M, z24.h, z4.h\n"
+ ".inst 0x454d1863 // usublb z3.h, z3.b, z13.b\n"
".inst 0x44844710 // smlalt z16.s, p4/M, z24.h, z4.h\n"
- ".inst 0x448143ce // smlalb z14.s, p4/M, z30.h, z1.h\n"
- "ld1b { z28.h }, p3/Z, [x27, x0]\n"
- ".inst 0x454f1b9c // usublb z28.h, z28.b, z15.b\n"
- ".inst 0x448446c5 // smlalt z5.s, p4/M, z22.h, z4.h\n"
- ".inst 0x448043d4 // smlalb z20.s, p4/M, z30.h, z0.h\n"
- "ld1b { z4.h }, p4/Z, [x2, #3, MUL VL]\n"
- ".inst 0x45511884 // usublb z4.h, z4.b, z17.b\n"
- ".inst 0x44804368 // smlalb z8.s, p4/M, z27.h, z0.h\n"
- ".inst 0x448042e6 // smlalb z6.s, p4/M, z23.h, z0.h\n"
- ".inst 0x448147ca // smlalt z10.s, p4/M, z30.h, z1.h\n"
- ".inst 0x448047c7 // smlalt z7.s, p4/M, z30.h, z0.h\n"
- "ld1b { z30.h }, p3/Z, [x28, x0]\n"
- ".inst 0x454f1bde // usublb z30.h, z30.b, z15.b\n"
+ "ld1b { z24.h }, p3/Z, [x23, x2]\n"
+ ".inst 0x448442d3 // smlalb z19.s, p4/M, z22.h, z4.h\n"
+ ".inst 0x45511b18 // usublb z24.h, z24.b, z17.b\n"
+ ".inst 0x448446c9 // smlalt z9.s, p4/M, z22.h, z4.h\n"
+ ".inst 0x44844387 // smlalb z7.s, p4/M, z28.h, z4.h\n"
+ ".inst 0x44844786 // smlalt z6.s, p4/M, z28.h, z4.h\n"
+ ".inst 0x4484434c // smlalb z12.s, p4/M, z26.h, z4.h\n"
+ ".inst 0x44844748 // smlalt z8.s, p4/M, z26.h, z4.h\n"
+ "ld1b { z4.h }, p4/Z, [x1]\n"
+ "inch x1\n"
+ ".inst 0x4480436b // smlalb z11.s, p4/M, z27.h, z0.h\n"
+ ".inst 0x454d1884 // usublb z4.h, z4.b, z13.b\n"
".inst 0x44804770 // smlalt z16.s, p4/M, z27.h, z0.h\n"
- ".inst 0x4482434e // smlalb z14.s, p4/M, z26.h, z2.h\n"
- ".inst 0x448046e5 // smlalt z5.s, p4/M, z23.h, z0.h\n"
- ".inst 0x44814354 // smlalb z20.s, p4/M, z26.h, z1.h\n"
- "ld1b { z0.h }, p4/Z, [x2, #4, MUL VL]\n"
- ".inst 0x45511800 // usublb z0.h, z0.b, z17.b\n"
- ".inst 0x448142e8 // smlalb z8.s, p4/M, z23.h, z1.h\n"
- ".inst 0x448143e6 // smlalb z6.s, p4/M, z31.h, z1.h\n"
- ".inst 0x4482474a // smlalt z10.s, p4/M, z26.h, z2.h\n"
- ".inst 0x44814747 // smlalt z7.s, p4/M, z26.h, z1.h\n"
- "ld1b { z26.h }, p3/Z, [x26, x0]\n"
- ".inst 0x454f1b5a // usublb z26.h, z26.b, z15.b\n"
+ "ld1b { z27.h }, p3/Z, [x22, x2]\n"
+ ".inst 0x448042f3 // smlalb z19.s, p4/M, z23.h, z0.h\n"
+ ".inst 0x45511b7b // usublb z27.h, z27.b, z17.b\n"
+ ".inst 0x448046e9 // smlalt z9.s, p4/M, z23.h, z0.h\n"
+ ".inst 0x44804327 // smlalb z7.s, p4/M, z25.h, z0.h\n"
+ ".inst 0x44804726 // smlalt z6.s, p4/M, z25.h, z0.h\n"
+ "ld1b { z25.h }, p3/Z, [x21, x2]\n"
+ ".inst 0x45511b39 // usublb z25.h, z25.b, z17.b\n"
+ ".inst 0x4480430c // smlalb z12.s, p4/M, z24.h, z0.h\n"
+ ".inst 0x44804708 // smlalt z8.s, p4/M, z24.h, z0.h\n"
+ ".inst 0x448142eb // smlalb z11.s, p4/M, z23.h, z1.h\n"
".inst 0x448146f0 // smlalt z16.s, p4/M, z23.h, z1.h\n"
- ".inst 0x4483432e // smlalb z14.s, p4/M, z25.h, z3.h\n"
- ".inst 0x448147e5 // smlalt z5.s, p4/M, z31.h, z1.h\n"
- ".inst 0x44824334 // smlalb z20.s, p4/M, z25.h, z2.h\n"
- "ld1b { z1.h }, p4/Z, [x2, #5, MUL VL]\n"
- ".inst 0x45511821 // usublb z1.h, z1.b, z17.b\n"
- ".inst 0x448243e8 // smlalb z8.s, p4/M, z31.h, z2.h\n"
- ".inst 0x448243c6 // smlalb z6.s, p4/M, z30.h, z2.h\n"
- ".inst 0x4483472a // smlalt z10.s, p4/M, z25.h, z3.h\n"
- ".inst 0x44824727 // smlalt z7.s, p4/M, z25.h, z2.h\n"
- "ld1b { z25.h }, p3/Z, [x25, x0]\n"
- ".inst 0x454f1b39 // usublb z25.h, z25.b, z15.b\n"
+ ".inst 0x448143f3 // smlalb z19.s, p4/M, z31.h, z1.h\n"
+ ".inst 0x448147e9 // smlalt z9.s, p4/M, z31.h, z1.h\n"
+ ".inst 0x44814307 // smlalb z7.s, p4/M, z24.h, z1.h\n"
+ ".inst 0x44814706 // smlalt z6.s, p4/M, z24.h, z1.h\n"
+ "ld1b { z24.h }, p3/Z, [x20, x2]\n"
+ ".inst 0x45511b18 // usublb z24.h, z24.b, z17.b\n"
+ ".inst 0x4481436c // smlalb z12.s, p4/M, z27.h, z1.h\n"
+ ".inst 0x44814768 // smlalt z8.s, p4/M, z27.h, z1.h\n"
+ ".inst 0x448243eb // smlalb z11.s, p4/M, z31.h, z2.h\n"
".inst 0x448247f0 // smlalt z16.s, p4/M, z31.h, z2.h\n"
- ".inst 0x4484430e // smlalb z14.s, p4/M, z24.h, z4.h\n"
- ".inst 0x448247c5 // smlalt z5.s, p4/M, z30.h, z2.h\n"
- ".inst 0x44834314 // smlalb z20.s, p4/M, z24.h, z3.h\n"
- "ld1b { z2.h }, p4/Z, [x2, #6, MUL VL]\n"
- ".inst 0x45511842 // usublb z2.h, z2.b, z17.b\n"
- ".inst 0x448343c8 // smlalb z8.s, p4/M, z30.h, z3.h\n"
- ".inst 0x44834386 // smlalb z6.s, p4/M, z28.h, z3.h\n"
- ".inst 0x4484470a // smlalt z10.s, p4/M, z24.h, z4.h\n"
- ".inst 0x44834707 // smlalt z7.s, p4/M, z24.h, z3.h\n"
- "ld1b { z24.h }, p3/Z, [x24, x0]\n"
- ".inst 0x454f1b18 // usublb z24.h, z24.b, z15.b\n"
+ ".inst 0x448243d3 // smlalb z19.s, p4/M, z30.h, z2.h\n"
+ ".inst 0x448247c9 // smlalt z9.s, p4/M, z30.h, z2.h\n"
+ ".inst 0x44824367 // smlalb z7.s, p4/M, z27.h, z2.h\n"
+ ".inst 0x44824766 // smlalt z6.s, p4/M, z27.h, z2.h\n"
+ "ld1b { z27.h }, p3/Z, [x19, x2]\n"
+ "inch x2\n"
+ ".inst 0x4482432c // smlalb z12.s, p4/M, z25.h, z2.h\n"
+ "whilelt p2.s, x2, x0\n"
+ ".inst 0x44824728 // smlalt z8.s, p4/M, z25.h, z2.h\n"
+ "mov x19, x2\n"
+ ".inst 0x448343cb // smlalb z11.s, p4/M, z30.h, z3.h\n"
+ ".inst 0x45511b7b // usublb z27.h, z27.b, z17.b\n"
".inst 0x448347d0 // smlalt z16.s, p4/M, z30.h, z3.h\n"
- ".inst 0x4480436e // smlalb z14.s, p4/M, z27.h, z0.h\n"
- ".inst 0x44834785 // smlalt z5.s, p4/M, z28.h, z3.h\n"
- ".inst 0x448442d4 // smlalb z20.s, p4/M, z22.h, z4.h\n"
- "ld1b { z3.h }, p4/Z, [x2, #7, MUL VL]\n"
- "inch x2, ALL, MUL #8\n"
- ".inst 0x44844388 // smlalb z8.s, p4/M, z28.h, z4.h\n"
- ".inst 0x44844346 // smlalb z6.s, p4/M, z26.h, z4.h\n"
- ".inst 0x45511863 // usublb z3.h, z3.b, z17.b\n"
- ".inst 0x4480476a // smlalt z10.s, p4/M, z27.h, z0.h\n"
+ "incw x19\n"
+ ".inst 0x44834393 // smlalb z19.s, p4/M, z28.h, z3.h\n"
+ "whilelt p1.s, x19, x0\n"
+ ".inst 0x44834789 // smlalt z9.s, p4/M, z28.h, z3.h\n"
+ "whilelt p3.h, x2, x0\n"
+ ".inst 0x44834327 // smlalb z7.s, p4/M, z25.h, z3.h\n"
+ ".inst 0x44834726 // smlalt z6.s, p4/M, z25.h, z3.h\n"
+ ".inst 0x4483430c // smlalb z12.s, p4/M, z24.h, z3.h\n"
+ ".inst 0x44834708 // smlalt z8.s, p4/M, z24.h, z3.h\n"
+ ".inst 0x4484438b // smlalb z11.s, p4/M, z28.h, z4.h\n"
".inst 0x44844790 // smlalt z16.s, p4/M, z28.h, z4.h\n"
- "ld1b { z27.h }, p3/Z, [x23, x0]\n"
- ".inst 0x454f1b7b // usublb z27.h, z27.b, z15.b\n"
- ".inst 0x448142ee // smlalb z14.s, p4/M, z23.h, z1.h\n"
- ".inst 0x448446c7 // smlalt z7.s, p4/M, z22.h, z4.h\n"
- "ld1w { z18.s }, p1/Z, [x16, #1, MUL VL]\n"
- "addvl x16, x16, #2\n"
- ".inst 0x44844745 // smlalt z5.s, p4/M, z26.h, z4.h\n"
- ".inst 0x448042f4 // smlalb z20.s, p4/M, z23.h, z0.h\n"
- "ld1b { z4.h }, p4/Z, [x2]\n"
- ".inst 0x45511884 // usublb z4.h, z4.b, z17.b\n"
- ".inst 0x44804328 // smlalb z8.s, p4/M, z25.h, z0.h\n"
- ".inst 0x44804306 // smlalb z6.s, p4/M, z24.h, z0.h\n"
- "inch x2\n"
- ".inst 0x448146ea // smlalt z10.s, p4/M, z23.h, z1.h\n"
- ".inst 0x44804730 // smlalt z16.s, p4/M, z25.h, z0.h\n"
- "ld1b { z25.h }, p3/Z, [x22, x0]\n"
- ".inst 0x454f1b39 // usublb z25.h, z25.b, z15.b\n"
- ".inst 0x448243ee // smlalb z14.s, p4/M, z31.h, z2.h\n"
- ".inst 0x448046e7 // smlalt z7.s, p4/M, z23.h, z0.h\n"
- "uzp1 z23.s, z19.s, z18.s\n"
- ".inst 0x44804705 // smlalt z5.s, p4/M, z24.h, z0.h\n"
- ".inst 0x448143f4 // smlalb z20.s, p4/M, z31.h, z1.h\n"
- "uzp2 z22.s, z19.s, z18.s\n"
- ".inst 0x44814308 // smlalb z8.s, p4/M, z24.h, z1.h\n"
- ".inst 0x44814366 // smlalb z6.s, p4/M, z27.h, z1.h\n"
- ".inst 0x448247ea // smlalt z10.s, p4/M, z31.h, z2.h\n"
- ".inst 0x44814710 // smlalt z16.s, p4/M, z24.h, z1.h\n"
- "ld1b { z24.h }, p3/Z, [x21, x0]\n"
- ".inst 0x454f1b18 // usublb z24.h, z24.b, z15.b\n"
- ".inst 0x448343ce // smlalb z14.s, p4/M, z30.h, z3.h\n"
- ".inst 0x448147e7 // smlalt z7.s, p4/M, z31.h, z1.h\n"
- ".inst 0x44814765 // smlalt z5.s, p4/M, z27.h, z1.h\n"
- ".inst 0x448243d4 // smlalb z20.s, p4/M, z30.h, z2.h\n"
- ".inst 0x44824368 // smlalb z8.s, p4/M, z27.h, z2.h\n"
- ".inst 0x44824326 // smlalb z6.s, p4/M, z25.h, z2.h\n"
- ".inst 0x448347ca // smlalt z10.s, p4/M, z30.h, z3.h\n"
- ".inst 0x44824770 // smlalt z16.s, p4/M, z27.h, z2.h\n"
- "ld1b { z27.h }, p3/Z, [x20, x0]\n"
- ".inst 0x454f1b7b // usublb z27.h, z27.b, z15.b\n"
- ".inst 0x4484438e // smlalb z14.s, p4/M, z28.h, z4.h\n"
- ".inst 0x448247c7 // smlalt z7.s, p4/M, z30.h, z2.h\n"
- ".inst 0x04a975ce // sqrdmulh z14.s, z14.s, z9.s\n"
- "inch x0\n"
- ".inst 0x44824725 // smlalt z5.s, p4/M, z25.h, z2.h\n"
- ".inst 0x44834394 // smlalb z20.s, p4/M, z28.h, z3.h\n"
- "and z21.d, z14.d, z23.d\n"
- "mov x20, x0\n"
- ".inst 0x44834328 // smlalb z8.s, p4/M, z25.h, z3.h\n"
- ".inst 0x44834306 // smlalb z6.s, p4/M, z24.h, z3.h\n"
- "asr z21.s, z21.s, #0x1f\n"
- "incw x20\n"
- ".inst 0x4484478a // smlalt z10.s, p4/M, z28.h, z4.h\n"
- ".inst 0x44834787 // smlalt z7.s, p4/M, z28.h, z3.h\n"
- ".inst 0x04bd754a // sqrdmulh z10.s, z10.s, z29.s\n"
- "whilelt p2.s, x0, x1\n"
- ".inst 0x44834730 // smlalt z16.s, p4/M, z25.h, z3.h\n"
- ".inst 0x44834705 // smlalt z5.s, p4/M, z24.h, z3.h\n"
- "and z3.d, z10.d, z22.d\n"
- "whilelt p1.s, x20, x1\n"
- ".inst 0x44844354 // smlalb z20.s, p4/M, z26.h, z4.h\n"
- ".inst 0x44844308 // smlalb z8.s, p4/M, z24.h, z4.h\n"
- ".inst 0x04a97694 // sqrdmulh z20.s, z20.s, z9.s\n"
- "whilelt p3.h, x0, x1\n"
- ".inst 0x44844366 // smlalb z6.s, p4/M, z27.h, z4.h\n"
- ".inst 0x44844747 // smlalt z7.s, p4/M, z26.h, z4.h\n"
- ".inst 0x04a97508 // sqrdmulh z8.s, z8.s, z9.s\n"
- ".inst 0x44844710 // smlalt z16.s, p4/M, z24.h, z4.h\n"
- ".inst 0x44844765 // smlalt z5.s, p4/M, z27.h, z4.h\n"
- ".inst 0x04a974c6 // sqrdmulh z6.s, z6.s, z9.s\n"
- "sqadd z14.s, z14.s, z21.s\n"
- "asr z3.s, z3.s, #0x1f\n"
- ".inst 0x448292ee // srshl z14.s, p4/M, z14.s, z23.s\n"
- "and z19.d, z20.d, z23.d\n"
- ".inst 0x04bd74e7 // sqrdmulh z7.s, z7.s, z29.s\n"
- "and z18.d, z8.d, z23.d\n"
- ".inst 0x04bd7610 // sqrdmulh z16.s, z16.s, z29.s\n"
- "and z21.d, z6.d, z23.d\n"
- ".inst 0x04bd74a5 // sqrdmulh z5.s, z5.s, z29.s\n"
- "sqadd z10.s, z10.s, z3.s\n"
- "asr z19.s, z19.s, #0x1f\n"
- ".inst 0x448292ca // srshl z10.s, p4/M, z10.s, z22.s\n"
- "and z1.d, z7.d, z22.d\n"
+ ".inst 0x44844353 // smlalb z19.s, p4/M, z26.h, z4.h\n"
+ ".inst 0x44844749 // smlalt z9.s, p4/M, z26.h, z4.h\n"
+ ".inst 0x04b5756b // sqrdmulh z11.s, z11.s, z21.s\n"
+ ".inst 0x04aa7610 // sqrdmulh z16.s, z16.s, z10.s\n"
+ ".inst 0x04b57673 // sqrdmulh z19.s, z19.s, z21.s\n"
+ ".inst 0x04aa7529 // sqrdmulh z9.s, z9.s, z10.s\n"
+ "and z31.d, z11.d, z29.d\n"
+ "asr z31.s, z31.s, #0x1f\n"
+ "and z23.d, z16.d, z20.d\n"
+ "and z25.d, z19.d, z29.d\n"
+ "asr z23.s, z23.s, #0x1f\n"
+ "and z18.d, z9.d, z20.d\n"
+ ".inst 0x44844307 // smlalb z7.s, p4/M, z24.h, z4.h\n"
+ "asr z25.s, z25.s, #0x1f\n"
+ ".inst 0x44844706 // smlalt z6.s, p4/M, z24.h, z4.h\n"
"asr z18.s, z18.s, #0x1f\n"
- "and z2.d, z16.d, z22.d\n"
- "asr z21.s, z21.s, #0x1f\n"
- "and z3.d, z5.d, z22.d\n"
- "sqadd z20.s, z20.s, z19.s\n"
- ".inst 0x448292f4 // srshl z20.s, p4/M, z20.s, z23.s\n"
+ "sqadd z11.s, z11.s, z31.s\n"
+ ".inst 0x4484436c // smlalb z12.s, p4/M, z27.h, z4.h\n"
+ ".inst 0x04b574e7 // sqrdmulh z7.s, z7.s, z21.s\n"
+ "sqadd z16.s, z16.s, z23.s\n"
+ "sqadd z19.s, z19.s, z25.s\n"
+ ".inst 0x04aa74c6 // sqrdmulh z6.s, z6.s, z10.s\n"
+ "sqadd z9.s, z9.s, z18.s\n"
+ "and z1.d, z7.d, z29.d\n"
"asr z1.s, z1.s, #0x1f\n"
- "sqadd z8.s, z8.s, z18.s\n"
- ".inst 0x448292e8 // srshl z8.s, p4/M, z8.s, z23.s\n"
- "asr z2.s, z2.s, #0x1f\n"
- "sqadd z6.s, z6.s, z21.s\n"
- ".inst 0x448292e6 // srshl z6.s, p4/M, z6.s, z23.s\n"
- "asr z3.s, z3.s, #0x1f\n"
+ "and z18.d, z6.d, z20.d\n"
+ ".inst 0x04b5758c // sqrdmulh z12.s, z12.s, z21.s\n"
+ "asr z18.s, z18.s, #0x1f\n"
+ ".inst 0x44844768 // smlalt z8.s, p4/M, z27.h, z4.h\n"
+ ".inst 0x448293ab // srshl z11.s, p4/M, z11.s, z29.s\n"
+ "and z30.d, z12.d, z29.d\n"
+ "asr z30.s, z30.s, #0x1f\n"
+ "add z11.s, z11.s, z14.s\n"
"sqadd z7.s, z7.s, z1.s\n"
- ".inst 0x448292c7 // srshl z7.s, p4/M, z7.s, z22.s\n"
- "sqadd z16.s, z16.s, z2.s\n"
- "sqadd z5.s, z5.s, z3.s\n"
- ".inst 0x448292d0 // srshl z16.s, p4/M, z16.s, z22.s\n"
- ".inst 0x448292c5 // srshl z5.s, p4/M, z5.s, z22.s\n"
- ".inst 0x453041ce // sqxtnb z14.h, z14.s\n"
- ".inst 0x45304294 // sqxtnb z20.h, z20.s\n"
- ".inst 0x45304108 // sqxtnb z8.h, z8.s\n"
- ".inst 0x453040c6 // sqxtnb z6.h, z6.s\n"
- ".inst 0x4530454e // sqxtnt z14.h, z10.s\n"
- ".inst 0x453044f4 // sqxtnt z20.h, z7.s\n"
- ".inst 0x45304608 // sqxtnt z8.h, z16.s\n"
- ".inst 0x453044a6 // sqxtnt z6.h, z5.s\n"
- "sqadd z14.h, z14.h, z12.h\n"
- "sqadd z20.h, z20.h, z12.h\n"
- "smax z14.h, p4/M, z14.h, z13.h\n"
- "smax z20.h, p4/M, z20.h, z13.h\n"
- "sqadd z8.h, z8.h, z12.h\n"
- "sqadd z6.h, z6.h, z12.h\n"
- "smax z8.h, p4/M, z8.h, z13.h\n"
- "smax z6.h, p4/M, z6.h, z13.h\n"
- "smin z14.h, p4/M, z14.h, z11.h\n"
- "smin z20.h, p4/M, z20.h, z11.h\n"
- "st1b { z14.h }, p0, [x3, x8]\n"
- "smin z8.h, p4/M, z8.h, z11.h\n"
- "smin z6.h, p4/M, z6.h, z11.h\n"
- "st1b { z20.h }, p0, [x4, x8]\n"
- "st1b { z8.h }, p0, [x5, x8]\n"
- "st1b { z6.h }, p0, [x6, x8]\n"
- "ld1w { z30.s }, p2/Z, [x14]\n"
- "ld1w { z16.s }, p1/Z, [x14, #1, MUL VL]\n"
- "uzp1 z14.s, z30.s, z16.s\n"
- "ld1b { z0.h }, p4/Z, [x2]\n"
- "ld1b { z1.h }, p4/Z, [x2, #1, MUL VL]\n"
- "uzp2 z10.s, z30.s, z16.s\n"
- "addvl x14, x14, #2\n"
- "ld1b { z2.h }, p4/Z, [x2, #2, MUL VL]\n"
- "ld1b { z3.h }, p4/Z, [x2, #3, MUL VL]\n"
- "inch x8\n"
- "str x14, [%x[params], %[offsetof_Params_bias]]\n"
- "ld1b { z4.h }, p4/Z, [x2, #4, MUL VL]\n"
- "ldp x9, x28, [x7, #0x0]\n"
- "mov z20.d, z14.d\n"
- "mov z7.d, z10.d\n"
- "ldp x27, x26, [x7, #0x10]\n"
- "ldp x25, x24, [x7, #0x20]\n"
- "mov z8.d, z14.d\n"
- "mov z16.d, z10.d\n"
- "ldp x23, x22, [x7, #0x30]\n"
- "ldp x21, x20, [x7, #0x40]\n"
- "mov z6.d, z14.d\n"
- "mov z5.d, z10.d\n"
- "ld1b { z31.h }, p3/Z, [x9, x0]\n"
- "ld1b { z30.h }, p3/Z, [x28, x0]\n"
- ".inst 0x45511800 // usublb z0.h, z0.b, z17.b\n"
- ".inst 0x45511821 // usublb z1.h, z1.b, z17.b\n"
- "ld1b { z29.h }, p3/Z, [x27, x0]\n"
- "ld1b { z28.h }, p3/Z, [x26, x0]\n"
- ".inst 0x45511842 // usublb z2.h, z2.b, z17.b\n"
- ".inst 0x45511863 // usublb z3.h, z3.b, z17.b\n"
- "ld1b { z27.h }, p3/Z, [x25, x0]\n"
- "ld1b { z23.h }, p3/Z, [x24, x0]\n"
- ".inst 0x45511884 // usublb z4.h, z4.b, z17.b\n"
- ".inst 0x454f1bff // usublb z31.h, z31.b, z15.b\n"
- "ld1b { z25.h }, p3/Z, [x23, x0]\n"
- "ld1b { z24.h }, p3/Z, [x22, x0]\n"
- ".inst 0x454f1bde // usublb z30.h, z30.b, z15.b\n"
- ".inst 0x454f1bbd // usublb z29.h, z29.b, z15.b\n"
- "ld1b { z26.h }, p3/Z, [x21, x0]\n"
- "ld1b { z22.h }, p3/Z, [x20, x0]\n"
- ".inst 0x454f1b9c // usublb z28.h, z28.b, z15.b\n"
- ".inst 0x454f1b7b // usublb z27.h, z27.b, z15.b\n"
- ".inst 0x454f1af7 // usublb z23.h, z23.b, z15.b\n"
- ".inst 0x454f1b39 // usublb z25.h, z25.b, z15.b\n"
- ".inst 0x454f1b18 // usublb z24.h, z24.b, z15.b\n"
- ".inst 0x454f1b5a // usublb z26.h, z26.b, z15.b\n"
- ".inst 0x454f1ad6 // usublb z22.h, z22.b, z15.b\n"
+ "sqadd z6.s, z6.s, z18.s\n"
+ ".inst 0x04aa7508 // sqrdmulh z8.s, z8.s, z10.s\n"
+ "smin z11.s, p4/M, z11.s, z15.s\n"
+ ".inst 0x44829290 // srshl z16.s, p4/M, z16.s, z20.s\n"
+ "sqadd z12.s, z12.s, z30.s\n"
+ "and z3.d, z8.d, z20.d\n"
+ "asr z3.s, z3.s, #0x1f\n"
+ "add z16.s, z16.s, z14.s\n"
+ "smax z11.s, p4/M, z11.s, z5.s\n"
+ ".inst 0x448293b3 // srshl z19.s, p4/M, z19.s, z29.s\n"
+ ".inst 0x44829289 // srshl z9.s, p4/M, z9.s, z20.s\n"
+ "smin z16.s, p4/M, z16.s, z15.s\n"
+ ".inst 0x448293a7 // srshl z7.s, p4/M, z7.s, z29.s\n"
+ "add z19.s, z19.s, z14.s\n"
+ "add z9.s, z9.s, z14.s\n"
+ "sqadd z8.s, z8.s, z3.s\n"
+ "add z7.s, z7.s, z14.s\n"
+ "smax z16.s, p4/M, z16.s, z5.s\n"
+ "smin z19.s, p4/M, z19.s, z15.s\n"
+ "smin z9.s, p4/M, z9.s, z15.s\n"
+ "smin z7.s, p4/M, z7.s, z15.s\n"
+ "trn1 z11.h, z11.h, z16.h\n"
+ "st1b { z11.h }, p0, [x7, x3]\n"
+ "smax z19.s, p4/M, z19.s, z5.s\n"
+ "smax z9.s, p4/M, z9.s, z5.s\n"
+ "smax z7.s, p4/M, z7.s, z5.s\n"
+ ".inst 0x44829286 // srshl z6.s, p4/M, z6.s, z20.s\n"
+ ".inst 0x448293ac // srshl z12.s, p4/M, z12.s, z29.s\n"
+ "trn1 z19.h, z19.h, z9.h\n"
+ "st1b { z19.h }, p0, [x8, x3]\n"
+ "add z6.s, z6.s, z14.s\n"
+ ".inst 0x44829288 // srshl z8.s, p4/M, z8.s, z20.s\n"
+ "add z12.s, z12.s, z14.s\n"
+ "smin z6.s, p4/M, z6.s, z15.s\n"
+ "add z8.s, z8.s, z14.s\n"
+ "smin z12.s, p4/M, z12.s, z15.s\n"
+ "smax z6.s, p4/M, z6.s, z5.s\n"
+ "smin z8.s, p4/M, z8.s, z15.s\n"
+ "smax z12.s, p4/M, z12.s, z5.s\n"
+ "trn1 z7.h, z7.h, z6.h\n"
+ "st1b { z7.h }, p0, [x17, x3]\n"
+ "smax z8.s, p4/M, z8.s, z5.s\n"
+ "trn1 z12.h, z12.h, z8.h\n"
+ "st1b { z12.h }, p0, [x16, x3]\n"
+ "inch x3\n"
+ "ldr x19, [%x[params], %[offsetof_Params_bias]]\n"
+ "ld1w { z19.s }, p2/Z, [x19]\n"
+ "ld1w { z6.s }, p1/Z, [x19, #1, MUL VL]\n"
+ "uzp1 z11.s, z19.s, z6.s\n"
+ "addvl x19, x19, #2\n"
+ "str x19, [%x[params], %[offsetof_Params_bias]]\n"
+ "uzp2 z16.s, z19.s, z6.s\n"
+ "mov z19.d, z11.d\n"
+ "ld1b { z0.h }, p4/Z, [x1]\n"
+ ".inst 0x454d1800 // usublb z0.h, z0.b, z13.b\n"
+ "mov z9.d, z16.d\n"
+ "ld1b { z1.h }, p4/Z, [x1, #1, MUL VL]\n"
+ "mov z7.d, z11.d\n"
+ "ld1b { z2.h }, p4/Z, [x1, #2, MUL VL]\n"
+ ".inst 0x454d1821 // usublb z1.h, z1.b, z13.b\n"
+ "mov z6.d, z16.d\n"
+ "ld1b { z3.h }, p4/Z, [x1, #3, MUL VL]\n"
+ "mov z12.d, z11.d\n"
+ "ld1b { z4.h }, p4/Z, [x1, #4, MUL VL]\n"
+ ".inst 0x454d1842 // usublb z2.h, z2.b, z13.b\n"
+ "mov z8.d, z16.d\n"
+ "ldp x28, x27, [x5, #0x0]\n"
+ ".inst 0x454d1863 // usublb z3.h, z3.b, z13.b\n"
+ "ldp x26, x25, [x5, #0x10]\n"
+ ".inst 0x454d1884 // usublb z4.h, z4.b, z13.b\n"
+ "ldp x24, x23, [x5, #0x20]\n"
+ "ldp x22, x21, [x5, #0x30]\n"
+ "ldp x20, x19, [x5, #0x40]\n"
+ "ld1b { z31.h }, p3/Z, [x28, x2]\n"
+ ".inst 0x45511bff // usublb z31.h, z31.b, z17.b\n"
+ "ld1b { z30.h }, p3/Z, [x27, x2]\n"
+ "ld1b { z29.h }, p3/Z, [x26, x2]\n"
+ ".inst 0x45511bde // usublb z30.h, z30.b, z17.b\n"
+ "ld1b { z28.h }, p3/Z, [x25, x2]\n"
+ "ld1b { z27.h }, p3/Z, [x24, x2]\n"
+ ".inst 0x45511bbd // usublb z29.h, z29.b, z17.b\n"
+ "ld1b { z23.h }, p3/Z, [x23, x2]\n"
+ ".inst 0x45511b9c // usublb z28.h, z28.b, z17.b\n"
+ "ld1b { z25.h }, p3/Z, [x22, x2]\n"
+ "ld1b { z24.h }, p3/Z, [x21, x2]\n"
+ ".inst 0x45511b7b // usublb z27.h, z27.b, z17.b\n"
+ "ld1b { z26.h }, p3/Z, [x20, x2]\n"
+ ".inst 0x45511af7 // usublb z23.h, z23.b, z17.b\n"
+ "ld1b { z22.h }, p3/Z, [x19, x2]\n"
+ ".inst 0x45511b39 // usublb z25.h, z25.b, z17.b\n"
+ ".inst 0x45511b18 // usublb z24.h, z24.b, z17.b\n"
+ ".inst 0x45511b5a // usublb z26.h, z26.b, z17.b\n"
+ ".inst 0x45511ad6 // usublb z22.h, z22.b, z17.b\n"
"b.any 1b\n"
:
: [offsetof_Params_bias] "I" (offsetof(Params, bias)), [offsetof_Params_inptrs] "I" (offsetof(Params, inptrs)), [offsetof_Params_n_channels] "I" (offsetof(Params, n_channels)), [offsetof_Params_outptrs] "I" (offsetof(Params, outptrs)), [offsetof_Params_requant] "I" (offsetof(Params, requant)), [offsetof_Params_requant_muls] "I" (offsetof(Params, requant_muls)), [offsetof_Params_requant_shifts] "I" (offsetof(Params, requant_shifts)), [offsetof_Params_weights] "I" (offsetof(Params, weights)), [offsetof_Requantize32_a_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [params] "r" (&params)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst/generic.cpp
index 66c24c34b5..1c8b8f9d19 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -41,295 +41,309 @@ void sve_u8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst_impl
)
{
__asm__ __volatile__(
- "mov x20, #0x9\n"
- "whilelt p0.b, XZR, x20\n"
- "ldr x23, [%x[inptrs], #0x8]\n"
- "ldr x20, [%x[inptrs], #0x10]\n"
- "ldr x22, [%x[inptrs], #0x20]\n"
- "ldr x21, [%x[inptrs], #0x0]\n"
- "mov z15.b, #0x1\n"
- "lsr z15.s, z15.s, #0x8\n"
- "ld1b { z1.b }, p0/Z, [x23]\n"
- "ld1b { z2.b }, p0/Z, [x20]\n"
- "mov z30.d, z1.d\n"
- "mov z29.d, z1.d\n"
- "ldr x20, [%x[inptrs], #0x18]\n"
- "ld1b { z4.b }, p0/Z, [x22]\n"
- "mov z28.d, z1.d\n"
- "mov z27.d, z2.d\n"
- "ld1b { z0.b }, p0/Z, [x21]\n"
- "mov z26.d, z2.d\n"
- "mov z25.d, z2.d\n"
- "ld1b { z3.b }, p0/Z, [x20]\n"
- "mov z24.d, z4.d\n"
- "mov z23.d, z4.d\n"
+ "mov z31.s, #0x0\n"
+ "ldr x24, [%x[inptrs], #0x0]\n"
"ptrue p2.b\n"
- "ld1rw { z14.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_b_offset]]\n"
- "mov z22.d, z4.d\n"
- "ext z30.b, z30.b, z30.b, #0x2\n"
- "lsl x10, %x[n_channels], #0x2\n"
- "neg z14.s, p2/M, z14.s\n"
- "ext z29.b, z29.b, z29.b, #0x4\n"
- "ext z28.b, z28.b, z28.b, #0x6\n"
- "mov x9, #0x0\n"
- "whilelt p1.b, x9, x10\n"
- "ext z27.b, z27.b, z27.b, #0x2\n"
- "ext z26.b, z26.b, z26.b, #0x4\n"
- "ld1w { z13.s }, p1/Z, [%x[params]]\n"
+ "mov z18.s, #0x0\n"
+ "ldr x23, [%x[inptrs], #0x8]\n"
+ "lsl x9, %x[n_channels], #0x2\n"
+ "mov z29.s, #0x0\n"
+ "ldr x22, [%x[inptrs], #0x10]\n"
+ "addvl SP, SP, #-8\n"
+ "mov z28.s, #0x0\n"
+ "ldr x21, [%x[inptrs], #0x18]\n"
+ "mov x19, #0x9\n"
+ "mov z13.s, #0x0\n"
+ "ldr x20, [%x[inptrs], #0x20]\n"
+ "whilelt p1.b, XZR, x19\n"
+ "mov z14.s, #0x0\n"
+ "ld1b { z7.b }, p1/Z, [x24]\n"
+ "mov x19, #0x3\n"
+ "mov z15.s, #0x0\n"
+ "ld1b { z3.b }, p1/Z, [x23]\n"
+ "whilelt p0.b, XZR, x19\n"
+ "mov z11.b, p0/z, #0x1\n"
+ "ld1b { z4.b }, p1/Z, [x22]\n"
"mov x28, #0x0\n"
- "ext z25.b, z25.b, z25.b, #0x6\n"
- "ext z24.b, z24.b, z24.b, #0x2\n"
- "ldp x27, x26, [%x[outptrs], #0x0]\n"
- "ldp x25, x24, [%x[outptrs], #0x10]\n"
- "ext z23.b, z23.b, z23.b, #0x4\n"
- "ext z22.b, z22.b, z22.b, #0x6\n"
- "ldp x23, x22, [%x[outptrs], #0x20]\n"
- "ldp x21, x20, [%x[outptrs], #0x30]\n"
- "mov z21.d, z0.d\n"
- "mov z20.d, z0.d\n"
+ "mov z10.d, z7.d\n"
+ "ld1b { z6.b }, p1/Z, [x21]\n"
+ "mov x27, #0x0\n"
+ "ext z10.b, z10.b, z10.b, #0x2\n"
+ "ld1b { z5.b }, p1/Z, [x20]\n"
+ "whilelt p1.b, x28, x9\n"
+ "mov z17.d, z7.d\n"
+ "ld1rw { z30.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_b_offset]]\n"
+ "mov z26.d, z7.d\n"
+ "ldp x26, x25, [%x[outptrs], #0x0]\n"
+ "ext z17.b, z17.b, z17.b, #0x4\n"
+ "ldp x24, x23, [%x[outptrs], #0x10]\n"
+ "ext z26.b, z26.b, z26.b, #0x6\n"
+ "ldp x22, x21, [%x[outptrs], #0x20]\n"
+ "mov z19.d, z3.d\n"
+ "ldp x20, x19, [%x[outptrs], #0x30]\n"
+ "ext z19.b, z19.b, z19.b, #0x2\n"
"ld1rw { z12.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_c_offset]]\n"
- "ld1rw { z11.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_minval]]\n"
- "mov z19.d, z0.d\n"
- "mov z18.d, z3.d\n"
- "ld1rw { z10.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_maxval]]\n"
- "ld1b { z5.b }, p1/Z, [%x[params], #1, MUL VL]\n"
+ "zip1 z7.s, z7.s, z17.s\n"
+ "ld1rw { z16.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_minval]]\n"
+ "zip1 z10.s, z10.s, z26.s\n"
+ "ld1rw { z0.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_maxval]]\n"
+ "zip1 z7.s, z7.s, z10.s\n"
+ "ld1w { z1.s }, p1/Z, [%x[params]]\n"
+ "mov z7.q, z7.q[0]\n"
+ "ld1b { z8.b }, p1/Z, [%x[params], #1, MUL VL]\n"
"mov z17.d, z3.d\n"
- "mov z16.d, z3.d\n"
- "ld1b { z6.b }, p1/Z, [%x[params], #2, MUL VL]\n"
- "ld1b { z7.b }, p1/Z, [%x[params], #3, MUL VL]\n"
- "ext z21.b, z21.b, z21.b, #0x2\n"
- "ext z20.b, z20.b, z20.b, #0x4\n"
- "addvl %x[params], %x[params], #4\n"
- "ext z19.b, z19.b, z19.b, #0x6\n"
- "zip1 z1.s, z1.s, z29.s\n"
- "zip1 z30.s, z30.s, z28.s\n"
- "zip1 z2.s, z2.s, z26.s\n"
- "zip1 z27.s, z27.s, z25.s\n"
- "ext z18.b, z18.b, z18.b, #0x2\n"
+ "ld1b { z9.b }, p1/Z, [%x[params], #2, MUL VL]\n"
"ext z17.b, z17.b, z17.b, #0x4\n"
- "ext z16.b, z16.b, z16.b, #0x6\n"
- "zip1 z4.s, z4.s, z23.s\n"
- "zip1 z24.s, z24.s, z22.s\n"
- "zip1 z0.s, z0.s, z20.s\n"
- "zip1 z21.s, z21.s, z19.s\n"
- "zip1 z1.s, z1.s, z30.s\n"
- "zip1 z2.s, z2.s, z27.s\n"
+ "ld1b { z10.b }, p1/Z, [%x[params], #3, MUL VL]\n"
+ "addvl %x[params], %x[params], #4\n"
+ "mov z2.d, z3.d\n"
+ "mov z20.d, z4.d\n"
+ "ext z2.b, z2.b, z2.b, #0x6\n"
"zip1 z3.s, z3.s, z17.s\n"
- "zip1 z18.s, z18.s, z16.s\n"
- "zip1 z4.s, z4.s, z24.s\n"
- "zip1 z0.s, z0.s, z21.s\n"
- "mov z1.q, z1.q[0]\n"
- "mov z2.q, z2.q[0]\n"
- "zip1 z3.s, z3.s, z18.s\n"
+ "ext z20.b, z20.b, z20.b, #0x2\n"
+ "mov z17.d, z4.d\n"
+ "zip1 z19.s, z19.s, z2.s\n"
+ "zip1 z3.s, z3.s, z19.s\n"
+ "mov z3.q, z3.q[0]\n"
+ "ext z17.b, z17.b, z17.b, #0x4\n"
+ "mov z26.d, z4.d\n"
+ "ext z26.b, z26.b, z26.b, #0x6\n"
+ "mov z21.d, z6.d\n"
+ "zip1 z4.s, z4.s, z17.s\n"
+ "ext z21.b, z21.b, z21.b, #0x2\n"
+ "zip1 z20.s, z20.s, z26.s\n"
+ "zip1 z4.s, z4.s, z20.s\n"
"mov z4.q, z4.q[0]\n"
- "mov z24.s, #0x0\n"
+ "mov z17.d, z6.d\n"
+ "ext z17.b, z17.b, z17.b, #0x4\n"
+ "mov z20.d, z6.d\n"
+ "ext z20.b, z20.b, z20.b, #0x6\n"
+ "mov z19.d, z5.d\n"
+ "zip1 z6.s, z6.s, z17.s\n"
+ "ext z19.b, z19.b, z19.b, #0x2\n"
+ "zip1 z21.s, z21.s, z20.s\n"
+ "zip1 z6.s, z6.s, z21.s\n"
+ "mov z6.q, z6.q[0]\n"
+ "mov z17.d, z5.d\n"
+ "ext z17.b, z17.b, z17.b, #0x4\n"
+ "mov z20.d, z5.d\n"
+ "ext z20.b, z20.b, z20.b, #0x6\n"
+ "mov z11.s, z11.s[0]\n"
+ "zip1 z5.s, z5.s, z17.s\n"
"mov z25.s, #0x0\n"
- "udot z24.s, z15.b, z1.b[0]\n"
+ "zip1 z19.s, z19.s, z20.s\n"
+ "zip1 z5.s, z5.s, z19.s\n"
+ "mov z5.q, z5.q[0]\n"
+ "mov z26.s, #0x0\n"
+ "mov z27.s, #0x0\n"
+ "mov z24.s, #0x0\n"
"mov z23.s, #0x0\n"
"mov z22.s, #0x0\n"
- "udot z25.s, z15.b, z1.b[1]\n"
"mov z21.s, #0x0\n"
- "mov z20.s, #0x0\n"
- "udot z23.s, z15.b, z1.b[2]\n"
- "mov z9.s, #0x0\n"
- "mov z8.s, #0x0\n"
- "udot z22.s, z15.b, z1.b[3]\n"
- "mov z19.s, #0x0\n"
- "mov z18.s, #0x0\n"
- "udot z21.s, z15.b, z2.b[0]\n"
"mov z17.s, #0x0\n"
- "mov z16.s, #0x0\n"
- "udot z20.s, z15.b, z2.b[1]\n"
- "udot z9.s, z15.b, z2.b[2]\n"
- "udot z8.s, z15.b, z2.b[3]\n"
- "mov z0.q, z0.q[0]\n"
- "udot z19.s, z15.b, z4.b[0]\n"
- "udot z18.s, z15.b, z4.b[1]\n"
- "mov z3.q, z3.q[0]\n"
- "udot z17.s, z15.b, z4.b[2]\n"
- "udot z16.s, z15.b, z4.b[3]\n"
- "mov z31.s, #0x0\n"
- "mov z30.s, #0x0\n"
- "mov z29.s, #0x0\n"
- "udot z31.s, z15.b, z0.b[0]\n"
- "mov z28.s, #0x0\n"
- "udot z30.s, z15.b, z0.b[1]\n"
- "udot z29.s, z15.b, z0.b[2]\n"
- "udot z28.s, z15.b, z0.b[3]\n"
- "add z24.s, z24.s, z21.s\n"
- "add z25.s, z25.s, z20.s\n"
- "add z26.s, z23.s, z9.s\n"
- "add z27.s, z22.s, z8.s\n"
- "add z23.s, z19.s, z21.s\n"
- "mov z22.s, #0x0\n"
- "udot z22.s, z15.b, z3.b[0]\n"
- "add z21.s, z18.s, z20.s\n"
"mov z20.s, #0x0\n"
- "udot z20.s, z15.b, z3.b[1]\n"
- "add z19.s, z17.s, z9.s\n"
- "mov z18.s, #0x0\n"
- "udot z18.s, z15.b, z3.b[2]\n"
- "add z17.s, z16.s, z8.s\n"
- "mov z16.s, #0x0\n"
- "udot z16.s, z15.b, z3.b[3]\n"
- "add z24.s, z24.s, z31.s\n"
- "add z25.s, z25.s, z30.s\n"
- "mul z24.s, p2/M, z24.s, z14.s\n"
- "mul z25.s, p2/M, z25.s, z14.s\n"
- "add z26.s, z26.s, z29.s\n"
- "add z27.s, z27.s, z28.s\n"
- "mul z26.s, p2/M, z26.s, z14.s\n"
- "mul z27.s, p2/M, z27.s, z14.s\n"
- "add z28.s, z23.s, z22.s\n"
- "add z29.s, z21.s, z20.s\n"
- "mul z28.s, p2/M, z28.s, z14.s\n"
- "mul z29.s, p2/M, z29.s, z14.s\n"
- "add z30.s, z19.s, z18.s\n"
- "add z31.s, z17.s, z16.s\n"
- "mul z30.s, p2/M, z30.s, z14.s\n"
- "mul z31.s, p2/M, z31.s, z14.s\n"
- "zip1 z19.s, z24.s, z26.s\n"
- "zip1 z18.s, z25.s, z27.s\n"
- "zip1 z17.s, z28.s, z30.s\n"
- "zip1 z16.s, z29.s, z31.s\n"
- "zip1 z22.s, z19.s, z18.s\n"
- "zip1 z23.s, z17.s, z16.s\n"
- "add z24.s, z24.s, z13.s\n"
- "add z25.s, z25.s, z13.s\n"
- "add z26.s, z26.s, z13.s\n"
- "add z27.s, z27.s, z13.s\n"
- "add z28.s, z28.s, z13.s\n"
- "add z29.s, z29.s, z13.s\n"
- "add z30.s, z30.s, z13.s\n"
+ "mov z2.s, #0x0\n"
+ "mov z19.s, #0x0\n"
+ "udot z31.s, z11.b, z7.b[0]\n"
+ "udot z18.s, z11.b, z7.b[1]\n"
+ "udot z29.s, z11.b, z7.b[2]\n"
+ "udot z28.s, z11.b, z7.b[3]\n"
+ "udot z13.s, z11.b, z3.b[0]\n"
+ "udot z14.s, z11.b, z3.b[1]\n"
+ "udot z15.s, z11.b, z3.b[2]\n"
+ "udot z25.s, z11.b, z3.b[3]\n"
+ "udot z26.s, z11.b, z4.b[0]\n"
+ "udot z27.s, z11.b, z4.b[1]\n"
+ "udot z24.s, z11.b, z4.b[2]\n"
+ "udot z23.s, z11.b, z4.b[3]\n"
+ "udot z22.s, z11.b, z6.b[0]\n"
+ "udot z21.s, z11.b, z6.b[1]\n"
+ "udot z17.s, z11.b, z6.b[2]\n"
+ "udot z20.s, z11.b, z6.b[3]\n"
+ "udot z2.s, z11.b, z5.b[0]\n"
+ "udot z19.s, z11.b, z5.b[1]\n"
+ "mov z31.d, z31.d\n"
+ "mov z18.d, z18.d\n"
+ "mov z29.d, z29.d\n"
+ "mov z28.d, z28.d\n"
"add z31.s, z31.s, z13.s\n"
+ "mov z13.s, #0x0\n"
+ "udot z13.s, z11.b, z5.b[2]\n"
+ "add z18.s, z18.s, z14.s\n"
+ "mov z14.s, #0x0\n"
+ "udot z14.s, z11.b, z5.b[3]\n"
+ "add z29.s, z29.s, z15.s\n"
+ "add z28.s, z28.s, z25.s\n"
+ "add z31.s, z31.s, z26.s\n"
+ "add z18.s, z18.s, z27.s\n"
+ "add z29.s, z29.s, z24.s\n"
+ "add z28.s, z28.s, z23.s\n"
+ "mov z26.d, z26.d\n"
+ "mov z25.d, z27.d\n"
+ "mov z24.d, z24.d\n"
+ "mov z23.d, z23.d\n"
+ "add z26.s, z26.s, z22.s\n"
+ "add z25.s, z25.s, z21.s\n"
+ "add z24.s, z24.s, z17.s\n"
+ "add z23.s, z23.s, z20.s\n"
+ "add z26.s, z26.s, z2.s\n"
+ "add z25.s, z25.s, z19.s\n"
+ "add z24.s, z24.s, z13.s\n"
+ "add z23.s, z23.s, z14.s\n"
+ "neg z30.s, p2/M, z30.s\n"
+ "mul z31.s, p2/M, z31.s, z30.s\n"
+ "st1w { z31.s }, p2, [SP]\n"
+ "add z31.s, z31.s, z1.s\n"
+ "mul z18.s, p2/M, z18.s, z30.s\n"
+ "st1w { z18.s }, p2, [SP, #1, MUL VL]\n"
+ "add z18.s, z18.s, z1.s\n"
+ "mul z29.s, p2/M, z29.s, z30.s\n"
+ "st1w { z29.s }, p2, [SP, #2, MUL VL]\n"
+ "add z29.s, z29.s, z1.s\n"
+ "mul z28.s, p2/M, z28.s, z30.s\n"
+ "st1w { z28.s }, p2, [SP, #3, MUL VL]\n"
+ "add z28.s, z28.s, z1.s\n"
+ "mul z26.s, p2/M, z26.s, z30.s\n"
+ "st1w { z26.s }, p2, [SP, #4, MUL VL]\n"
+ "add z26.s, z26.s, z1.s\n"
+ "mul z25.s, p2/M, z25.s, z30.s\n"
+ "st1w { z25.s }, p2, [SP, #5, MUL VL]\n"
+ "add z25.s, z25.s, z1.s\n"
+ "mul z24.s, p2/M, z24.s, z30.s\n"
+ "st1w { z24.s }, p2, [SP, #6, MUL VL]\n"
+ "add z24.s, z24.s, z1.s\n"
+ "mul z23.s, p2/M, z23.s, z30.s\n"
+ "st1w { z23.s }, p2, [SP, #7, MUL VL]\n"
+ "add z23.s, z23.s, z1.s\n"
"1:" // Loop
- "udot z24.s, z5.b, z0.b[0]\n"
- "udot z25.s, z5.b, z0.b[1]\n"
- "ld1w { z21.s }, p2/Z, [%x[params]]\n"
- "ld1w { z20.s }, p2/Z, [%x[params], #1, MUL VL]\n"
- "udot z26.s, z5.b, z0.b[2]\n"
- "udot z27.s, z5.b, z0.b[3]\n"
- "incb x9\n"
- "whilelt p0.s, x28, %x[n_channels]\n"
- "udot z24.s, z6.b, z1.b[0]\n"
- "udot z25.s, z6.b, z1.b[1]\n"
- "whilelt p1.b, x9, x10\n"
- "ld1w { z13.s }, p1/Z, [%x[params], #2, MUL VL]\n"
- "udot z26.s, z6.b, z1.b[2]\n"
- "udot z27.s, z6.b, z1.b[3]\n"
- "udot z28.s, z5.b, z2.b[0]\n"
- "udot z29.s, z5.b, z2.b[1]\n"
- "udot z30.s, z5.b, z2.b[2]\n"
- "udot z31.s, z5.b, z2.b[3]\n"
- "ld1b { z5.b }, p1/Z, [%x[params], #3, MUL VL]\n"
- "udot z24.s, z7.b, z2.b[0]\n"
- "udot z25.s, z7.b, z2.b[1]\n"
- ".inst 0x04b57718 // sqrdmulh z24.s, z24.s, z21.s\n"
- "udot z26.s, z7.b, z2.b[2]\n"
- "udot z27.s, z7.b, z2.b[3]\n"
- ".inst 0x04b57739 // sqrdmulh z25.s, z25.s, z21.s\n"
- "udot z28.s, z6.b, z3.b[0]\n"
- "udot z29.s, z6.b, z3.b[1]\n"
- ".inst 0x04b5775a // sqrdmulh z26.s, z26.s, z21.s\n"
- "udot z30.s, z6.b, z3.b[2]\n"
- "udot z31.s, z6.b, z3.b[3]\n"
- ".inst 0x04b5777b // sqrdmulh z27.s, z27.s, z21.s\n"
- "ld1b { z6.b }, p1/Z, [%x[params], #4, MUL VL]\n"
- "udot z28.s, z7.b, z4.b[0]\n"
- "udot z29.s, z7.b, z4.b[1]\n"
- "and z19.d, z24.d, z20.d\n"
- "udot z30.s, z7.b, z4.b[2]\n"
- "udot z31.s, z7.b, z4.b[3]\n"
- "and z18.d, z25.d, z20.d\n"
- "ld1b { z7.b }, p1/Z, [%x[params], #5, MUL VL]\n"
- "and z17.d, z26.d, z20.d\n"
- "and z16.d, z27.d, z20.d\n"
+ "udot z31.s, z8.b, z7.b[0]\n"
+ "ld1w { z22.s }, p2/Z, [%x[params]]\n"
+ "incb x28\n"
+ "udot z18.s, z8.b, z7.b[1]\n"
+ "ld1w { z21.s }, p2/Z, [%x[params], #1, MUL VL]\n"
+ "whilelt p0.s, x27, %x[n_channels]\n"
+ "udot z29.s, z8.b, z7.b[2]\n"
+ "whilelt p1.b, x28, x9\n"
+ "ld1w { z1.s }, p1/Z, [%x[params], #2, MUL VL]\n"
+ "udot z28.s, z8.b, z7.b[3]\n"
+ "udot z26.s, z8.b, z4.b[0]\n"
+ "udot z25.s, z8.b, z4.b[1]\n"
+ "udot z24.s, z8.b, z4.b[2]\n"
+ "udot z23.s, z8.b, z4.b[3]\n"
+ "ld1b { z8.b }, p1/Z, [%x[params], #3, MUL VL]\n"
+ "udot z31.s, z9.b, z3.b[0]\n"
+ "udot z18.s, z9.b, z3.b[1]\n"
+ "udot z29.s, z9.b, z3.b[2]\n"
+ "udot z28.s, z9.b, z3.b[3]\n"
+ "udot z26.s, z9.b, z6.b[0]\n"
+ "udot z25.s, z9.b, z6.b[1]\n"
+ "udot z24.s, z9.b, z6.b[2]\n"
+ "udot z23.s, z9.b, z6.b[3]\n"
+ "ld1b { z9.b }, p1/Z, [%x[params], #4, MUL VL]\n"
+ "udot z31.s, z10.b, z4.b[0]\n"
+ "udot z18.s, z10.b, z4.b[1]\n"
+ "udot z29.s, z10.b, z4.b[2]\n"
+ "udot z28.s, z10.b, z4.b[3]\n"
+ "udot z26.s, z10.b, z5.b[0]\n"
+ "udot z25.s, z10.b, z5.b[1]\n"
+ "udot z24.s, z10.b, z5.b[2]\n"
+ "udot z23.s, z10.b, z5.b[3]\n"
+ "ld1b { z10.b }, p1/Z, [%x[params], #5, MUL VL]\n"
"addvl %x[params], %x[params], #6\n"
+ ".inst 0x04b677ff // sqrdmulh z31.s, z31.s, z22.s\n"
+ ".inst 0x04b67652 // sqrdmulh z18.s, z18.s, z22.s\n"
+ ".inst 0x04b677bd // sqrdmulh z29.s, z29.s, z22.s\n"
+ ".inst 0x04b6779c // sqrdmulh z28.s, z28.s, z22.s\n"
+ ".inst 0x04b6775a // sqrdmulh z26.s, z26.s, z22.s\n"
+ "and z20.d, z31.d, z21.d\n"
+ "asr z20.s, z20.s, #0x1f\n"
+ "and z19.d, z18.d, z21.d\n"
+ "and z14.d, z29.d, z21.d\n"
"asr z19.s, z19.s, #0x1f\n"
- "asr z18.s, z18.s, #0x1f\n"
+ "and z17.d, z28.d, z21.d\n"
+ "and z2.d, z26.d, z21.d\n"
+ "asr z14.s, z14.s, #0x1f\n"
+ ".inst 0x04b67739 // sqrdmulh z25.s, z25.s, z22.s\n"
"asr z17.s, z17.s, #0x1f\n"
- "asr z16.s, z16.s, #0x1f\n"
- ".inst 0x04b5779c // sqrdmulh z28.s, z28.s, z21.s\n"
- ".inst 0x04b577bd // sqrdmulh z29.s, z29.s, z21.s\n"
- ".inst 0x04b577de // sqrdmulh z30.s, z30.s, z21.s\n"
- ".inst 0x04b577ff // sqrdmulh z31.s, z31.s, z21.s\n"
- "sqadd z24.s, z24.s, z19.s\n"
- "sqadd z25.s, z25.s, z18.s\n"
- ".inst 0x44828a98 // srshl z24.s, p2/M, z24.s, z20.s\n"
- ".inst 0x44828a99 // srshl z25.s, p2/M, z25.s, z20.s\n"
- "sqadd z26.s, z26.s, z17.s\n"
- "sqadd z27.s, z27.s, z16.s\n"
- ".inst 0x44828a9a // srshl z26.s, p2/M, z26.s, z20.s\n"
- ".inst 0x44828a9b // srshl z27.s, p2/M, z27.s, z20.s\n"
- "and z19.d, z28.d, z20.d\n"
- "and z18.d, z29.d, z20.d\n"
- "and z17.d, z30.d, z20.d\n"
- "and z16.d, z31.d, z20.d\n"
- "asr z19.s, z19.s, #0x1f\n"
- "asr z18.s, z18.s, #0x1f\n"
+ "sqadd z31.s, z31.s, z20.s\n"
+ ".inst 0x04b67718 // sqrdmulh z24.s, z24.s, z22.s\n"
+ "asr z2.s, z2.s, #0x1f\n"
+ ".inst 0x04b676f7 // sqrdmulh z23.s, z23.s, z22.s\n"
+ "sqadd z18.s, z18.s, z19.s\n"
+ "sqadd z29.s, z29.s, z14.s\n"
+ "and z27.d, z25.d, z21.d\n"
+ "asr z27.s, z27.s, #0x1f\n"
+ "sqadd z28.s, z28.s, z17.s\n"
+ "sqadd z26.s, z26.s, z2.s\n"
+ "and z17.d, z24.d, z21.d\n"
"asr z17.s, z17.s, #0x1f\n"
- "asr z16.s, z16.s, #0x1f\n"
- "sqadd z28.s, z28.s, z19.s\n"
- "sqadd z29.s, z29.s, z18.s\n"
- ".inst 0x44828a9c // srshl z28.s, p2/M, z28.s, z20.s\n"
- ".inst 0x44828a9d // srshl z29.s, p2/M, z29.s, z20.s\n"
- "sqadd z30.s, z30.s, z17.s\n"
- "sqadd z31.s, z31.s, z16.s\n"
- ".inst 0x44828a9e // srshl z30.s, p2/M, z30.s, z20.s\n"
- ".inst 0x44828a9f // srshl z31.s, p2/M, z31.s, z20.s\n"
- "add z24.s, z24.s, z12.s\n"
- "add z25.s, z25.s, z12.s\n"
- "smin z24.s, p2/M, z24.s, z10.s\n"
- "smin z25.s, p2/M, z25.s, z10.s\n"
- "add z26.s, z26.s, z12.s\n"
- "add z27.s, z27.s, z12.s\n"
- "smin z26.s, p2/M, z26.s, z10.s\n"
- "smin z27.s, p2/M, z27.s, z10.s\n"
- "add z28.s, z28.s, z12.s\n"
- "add z29.s, z29.s, z12.s\n"
- "smin z28.s, p2/M, z28.s, z10.s\n"
- "smin z29.s, p2/M, z29.s, z10.s\n"
- "add z30.s, z30.s, z12.s\n"
+ "and z15.d, z23.d, z21.d\n"
+ ".inst 0x44828abf // srshl z31.s, p2/M, z31.s, z21.s\n"
+ "asr z15.s, z15.s, #0x1f\n"
+ "sqadd z25.s, z25.s, z27.s\n"
+ ".inst 0x44828ab2 // srshl z18.s, p2/M, z18.s, z21.s\n"
"add z31.s, z31.s, z12.s\n"
- "smin z30.s, p2/M, z30.s, z10.s\n"
- "smin z31.s, p2/M, z31.s, z10.s\n"
- "smax z24.s, p2/M, z24.s, z11.s\n"
- "smax z25.s, p2/M, z25.s, z11.s\n"
- "st1b { z24.s }, p0, [x27, x28]\n"
- "mov z24.s, z22.s[0]\n"
- "smax z26.s, p2/M, z26.s, z11.s\n"
- "smax z27.s, p2/M, z27.s, z11.s\n"
- "st1b { z25.s }, p0, [x26, x28]\n"
- "mov z25.s, z22.s[1]\n"
- "smax z28.s, p2/M, z28.s, z11.s\n"
- "smax z29.s, p2/M, z29.s, z11.s\n"
- "st1b { z26.s }, p0, [x25, x28]\n"
- "mov z26.s, z22.s[2]\n"
- "smax z30.s, p2/M, z30.s, z11.s\n"
- "smax z31.s, p2/M, z31.s, z11.s\n"
- "st1b { z27.s }, p0, [x24, x28]\n"
- "mov z27.s, z22.s[3]\n"
- "st1b { z28.s }, p0, [x23, x28]\n"
- "mov z28.s, z23.s[0]\n"
- "add z24.s, z24.s, z13.s\n"
- "st1b { z29.s }, p0, [x22, x28]\n"
- "mov z29.s, z23.s[1]\n"
- "add z25.s, z25.s, z13.s\n"
- "st1b { z30.s }, p0, [x21, x28]\n"
- "mov z30.s, z23.s[2]\n"
- "add z26.s, z26.s, z13.s\n"
- "st1b { z31.s }, p0, [x20, x28]\n"
- "mov z31.s, z23.s[3]\n"
- "incw x28\n"
- "add z27.s, z27.s, z13.s\n"
- "add z28.s, z28.s, z13.s\n"
- "add z29.s, z29.s, z13.s\n"
- "add z30.s, z30.s, z13.s\n"
- "add z31.s, z31.s, z13.s\n"
+ "sqadd z24.s, z24.s, z17.s\n"
+ ".inst 0x44828abd // srshl z29.s, p2/M, z29.s, z21.s\n"
+ "add z18.s, z18.s, z12.s\n"
+ "sqadd z23.s, z23.s, z15.s\n"
+ "smin z31.s, p2/M, z31.s, z0.s\n"
+ "add z29.s, z29.s, z12.s\n"
+ "smin z18.s, p2/M, z18.s, z0.s\n"
+ ".inst 0x44828abc // srshl z28.s, p2/M, z28.s, z21.s\n"
+ "smax z31.s, p2/M, z31.s, z16.s\n"
+ "st1b { z31.s }, p0, [x26, x27]\n"
+ "add z28.s, z28.s, z12.s\n"
+ "smax z18.s, p2/M, z18.s, z16.s\n"
+ "ld1w { z31.s }, p2/Z, [SP]\n"
+ "smin z29.s, p2/M, z29.s, z0.s\n"
+ "st1b { z18.s }, p0, [x25, x27]\n"
+ "add z31.s, z31.s, z1.s\n"
+ "smin z28.s, p2/M, z28.s, z0.s\n"
+ "ld1w { z18.s }, p2/Z, [SP, #1, MUL VL]\n"
+ "smax z29.s, p2/M, z29.s, z16.s\n"
+ "st1b { z29.s }, p0, [x24, x27]\n"
+ "add z18.s, z18.s, z1.s\n"
+ "smax z28.s, p2/M, z28.s, z16.s\n"
+ "ld1w { z29.s }, p2/Z, [SP, #2, MUL VL]\n"
+ ".inst 0x44828aba // srshl z26.s, p2/M, z26.s, z21.s\n"
+ "st1b { z28.s }, p0, [x23, x27]\n"
+ "add z29.s, z29.s, z1.s\n"
+ ".inst 0x44828ab9 // srshl z25.s, p2/M, z25.s, z21.s\n"
+ "ld1w { z28.s }, p2/Z, [SP, #3, MUL VL]\n"
+ "add z26.s, z26.s, z12.s\n"
+ ".inst 0x44828ab8 // srshl z24.s, p2/M, z24.s, z21.s\n"
+ ".inst 0x44828ab7 // srshl z23.s, p2/M, z23.s, z21.s\n"
+ "add z25.s, z25.s, z12.s\n"
+ "add z28.s, z28.s, z1.s\n"
+ "add z24.s, z24.s, z12.s\n"
+ "add z23.s, z23.s, z12.s\n"
+ "smin z26.s, p2/M, z26.s, z0.s\n"
+ "smin z25.s, p2/M, z25.s, z0.s\n"
+ "smin z24.s, p2/M, z24.s, z0.s\n"
+ "smin z23.s, p2/M, z23.s, z0.s\n"
+ "smax z26.s, p2/M, z26.s, z16.s\n"
+ "st1b { z26.s }, p0, [x22, x27]\n"
+ "smax z25.s, p2/M, z25.s, z16.s\n"
+ "smax z24.s, p2/M, z24.s, z16.s\n"
+ "ld1w { z26.s }, p2/Z, [SP, #4, MUL VL]\n"
+ "smax z23.s, p2/M, z23.s, z16.s\n"
+ "st1b { z25.s }, p0, [x21, x27]\n"
+ "add z26.s, z26.s, z1.s\n"
+ "st1b { z24.s }, p0, [x20, x27]\n"
+ "st1b { z23.s }, p0, [x19, x27]\n"
+ "incw x27\n"
+ "ld1w { z25.s }, p2/Z, [SP, #5, MUL VL]\n"
+ "add z25.s, z25.s, z1.s\n"
+ "ld1w { z24.s }, p2/Z, [SP, #6, MUL VL]\n"
+ "ld1w { z23.s }, p2/Z, [SP, #7, MUL VL]\n"
+ "add z24.s, z24.s, z1.s\n"
+ "add z23.s, z23.s, z1.s\n"
"b.any 1b\n"
+ "addvl SP, SP, #8\n"
: [params] "+&r" (params)
: [inptrs] "r" (inptrs), [n_channels] "r" (n_output_channels), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [outptrs] "r" (outptrs), [qp] "r" (&qp)
- : "cc", "memory", "p0", "p1", "p2", "x9", "x10", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "x9", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst/generic.cpp
index debaa8c296..0085bbc6bc 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -41,358 +41,384 @@ void sve_u8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst_impl
)
{
__asm__ __volatile__(
+ "mov z20.b, #0x1\n"
+ "ldr x24, [%x[inptrs], #0x0]\n"
+ "ptrue p2.b\n"
+ "mov z22.s, #0x1\n"
+ "ldr x23, [%x[inptrs], #0x8]\n"
+ "lsl x9, %x[n_channels], #0x2\n"
+ "mov z30.s, #0x0\n"
+ "ldr x22, [%x[inptrs], #0x10]\n"
+ "addvl SP, SP, #-8\n"
+ "mov z28.s, #0x0\n"
+ "ldr x21, [%x[inptrs], #0x18]\n"
"mov x20, #0x6\n"
+ "mov z29.s, #0x0\n"
+ "ldr x19, [%x[inptrs], #0x20]\n"
"whilelt p0.b, XZR, x20\n"
- "ldr x22, [%x[inptrs], #0x18]\n"
- "ldr x21, [%x[inptrs], #0x20]\n"
- "ldr x20, [%x[inptrs], #0x10]\n"
- "ld1b { z3.b }, p0/Z, [x22]\n"
- "mov z20.d, z3.d\n"
- "ext z20.b, z20.b, z20.b, #0x1\n"
+ "mov z27.s, #0x0\n"
+ "ld1b { z0.b }, p0/Z, [x24]\n"
+ "mov x28, #0x0\n"
+ "mov z26.s, #0x0\n"
+ "ld1b { z3.b }, p0/Z, [x23]\n"
+ "mov x27, #0x0\n"
+ "mov z25.s, #0x0\n"
+ "ld1b { z5.b }, p0/Z, [x22]\n"
+ "whilelt p1.b, x28, x9\n"
+ "mov z15.d, z0.d\n"
"ld1b { z4.b }, p0/Z, [x21]\n"
- "ldr x24, [%x[inptrs], #0x8]\n"
- "mov z18.d, z4.d\n"
- "ext z18.b, z18.b, z18.b, #0x1\n"
- "ld1b { z2.b }, p0/Z, [x20]\n"
- "ldr x23, [%x[inptrs], #0x28]\n"
- "mov z15.d, z2.d\n"
+ "mov z24.s, #0x0\n"
+ "ld1b { z6.b }, p0/Z, [x19]\n"
"ext z15.b, z15.b, z15.b, #0x1\n"
- "ldr x22, [%x[inptrs], #0x30]\n"
- "ldr x21, [%x[inptrs], #0x38]\n"
- "zip1 z3.d, z3.d, z20.d\n"
- "zip1 z4.d, z4.d, z18.d\n"
- "ldr x20, [%x[inptrs], #0x0]\n"
- "ld1b { z1.b }, p0/Z, [x24]\n"
- "mov z20.d, z1.d\n"
- "ext z20.b, z20.b, z20.b, #0x1\n"
- "ld1b { z5.b }, p0/Z, [x23]\n"
- "ld1b { z6.b }, p0/Z, [x22]\n"
- "mov z13.d, z5.d\n"
- "mov z19.d, z6.d\n"
+ "ldr x21, [%x[inptrs], #0x28]\n"
+ "mov z16.d, z3.d\n"
+ "ldr x20, [%x[inptrs], #0x30]\n"
+ "ext z16.b, z16.b, z16.b, #0x1\n"
+ "ldr x19, [%x[inptrs], #0x38]\n"
+ "mov z18.d, z5.d\n"
"ld1b { z7.b }, p0/Z, [x21]\n"
- "ld1b { z0.b }, p0/Z, [x20]\n"
- "mov z25.d, z7.d\n"
- "zip1 z2.d, z2.d, z15.d\n"
+ "zip1 z0.d, z0.d, z15.d\n"
+ "ld1b { z1.b }, p0/Z, [x20]\n"
+ "mov z0.q, z0.q[0]\n"
+ "ld1b { z2.b }, p0/Z, [x19]\n"
+ "zip1 z3.d, z3.d, z16.d\n"
+ "ld1rw { z15.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_b_offset]]\n"
"mov z3.q, z3.q[0]\n"
- "mov z4.q, z4.q[0]\n"
- "ptrue p2.b\n"
- "ld1rw { z23.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_b_offset]]\n"
- "ext z13.b, z13.b, z13.b, #0x1\n"
- "ext z19.b, z19.b, z19.b, #0x1\n"
- "lsl x10, %x[n_channels], #0x2\n"
- "neg z23.s, p2/M, z23.s\n"
- "ext z25.b, z25.b, z25.b, #0x1\n"
- "mov z30.b, #0x1\n"
- "mov x9, #0x0\n"
- "whilelt p1.b, x9, x10\n"
- "mov z24.s, #0x0\n"
- "mov z28.s, #0x0\n"
- "udot z24.s, z30.b, z3.b[0]\n"
- "ld1w { z12.s }, p1/Z, [%x[params]]\n"
- "mov z18.s, #0x0\n"
- "mov z17.s, #0x0\n"
- "udot z28.s, z30.b, z3.b[2]\n"
- "mov x28, #0x0\n"
- "mov z16.d, z0.d\n"
- "udot z18.s, z30.b, z4.b[0]\n"
- "udot z17.s, z30.b, z4.b[2]\n"
- "ldp x27, x26, [%x[outptrs], #0x0]\n"
+ "ldp x26, x25, [%x[outptrs], #0x0]\n"
+ "ext z18.b, z18.b, z18.b, #0x1\n"
+ "ldp x24, x23, [%x[outptrs], #0x10]\n"
+ "mov z16.d, z4.d\n"
+ "ldp x22, x21, [%x[outptrs], #0x20]\n"
"ext z16.b, z16.b, z16.b, #0x1\n"
- "zip1 z1.d, z1.d, z20.d\n"
- "ldp x25, x24, [%x[outptrs], #0x10]\n"
- "ldp x23, x22, [%x[outptrs], #0x20]\n"
- "mov z2.q, z2.q[0]\n"
- "zip1 z5.d, z5.d, z13.d\n"
- "ldp x21, x20, [%x[outptrs], #0x30]\n"
+ "ldp x20, x19, [%x[outptrs], #0x30]\n"
+ "mov z17.d, z6.d\n"
"ld1rw { z14.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_c_offset]]\n"
- "zip1 z6.d, z6.d, z19.d\n"
- "zip1 z7.d, z7.d, z25.d\n"
- "ld1rw { z13.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_minval]]\n"
- "ld1rw { z15.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_maxval]]\n"
- "mov z26.s, #0x0\n"
- "mov z22.s, #0x0\n"
- "udot z26.s, z30.b, z2.b[0]\n"
+ "zip1 z5.d, z5.d, z18.d\n"
+ "ld1rw { z31.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_minval]]\n"
+ "mov z5.q, z5.q[0]\n"
+ "ld1rw { z12.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_maxval]]\n"
+ "zip1 z4.d, z4.d, z16.d\n"
+ "ld1w { z13.s }, p1/Z, [%x[params]]\n"
+ "mov z4.q, z4.q[0]\n"
"ld1b { z8.b }, p1/Z, [%x[params], #1, MUL VL]\n"
- "mov z29.s, #0x1\n"
- "udot z22.s, z30.b, z2.b[2]\n"
- "udot z24.s, z29.b, z3.b[1]\n"
+ "ext z17.b, z17.b, z17.b, #0x1\n"
"ld1b { z9.b }, p1/Z, [%x[params], #2, MUL VL]\n"
- "zip1 z0.d, z0.d, z16.d\n"
- "mov z1.q, z1.q[0]\n"
- "udot z28.s, z29.b, z3.b[3]\n"
+ "mov z16.d, z7.d\n"
"ld1b { z10.b }, p1/Z, [%x[params], #3, MUL VL]\n"
- "mov z5.q, z5.q[0]\n"
- "mov z6.q, z6.q[0]\n"
- "udot z18.s, z29.b, z4.b[1]\n"
+ "ext z16.b, z16.b, z16.b, #0x1\n"
"ld1b { z11.b }, p1/Z, [%x[params], #4, MUL VL]\n"
+ "addvl %x[params], %x[params], #5\n"
+ "zip1 z6.d, z6.d, z17.d\n"
+ "mov z17.d, z1.d\n"
+ "mov z6.q, z6.q[0]\n"
+ "zip1 z7.d, z7.d, z16.d\n"
"mov z7.q, z7.q[0]\n"
+ "ext z17.b, z17.b, z17.b, #0x1\n"
+ "mov z16.d, z2.d\n"
+ "ext z16.b, z16.b, z16.b, #0x1\n"
+ "mov z23.s, #0x0\n"
+ "zip1 z1.d, z1.d, z17.d\n"
+ "mov z1.q, z1.q[0]\n"
+ "zip1 z2.d, z2.d, z16.d\n"
+ "mov z2.q, z2.q[0]\n"
+ "mov z18.s, #0x0\n"
+ "mov z17.s, #0x0\n"
+ "mov z16.s, #0x0\n"
"mov z21.s, #0x0\n"
- "udot z17.s, z29.b, z4.b[3]\n"
- "addvl %x[params], %x[params], #5\n"
- "mov z20.s, #0x0\n"
- "mov z25.s, #0x0\n"
- "udot z21.s, z30.b, z1.b[0]\n"
- "mov z27.s, #0x0\n"
"mov z19.s, #0x0\n"
- "udot z20.s, z30.b, z1.b[2]\n"
- "udot z25.s, z30.b, z5.b[0]\n"
- "udot z27.s, z30.b, z5.b[2]\n"
- "mov z0.q, z0.q[0]\n"
- "udot z19.s, z30.b, z6.b[0]\n"
- "udot z26.s, z29.b, z2.b[1]\n"
+ "udot z30.s, z20.b, z0.b[0]\n"
+ "udot z28.s, z20.b, z0.b[2]\n"
+ "udot z29.s, z20.b, z3.b[0]\n"
+ "udot z27.s, z20.b, z3.b[2]\n"
+ "udot z30.s, z22.b, z0.b[1]\n"
+ "udot z28.s, z22.b, z0.b[3]\n"
+ "udot z29.s, z22.b, z3.b[1]\n"
+ "udot z27.s, z22.b, z3.b[3]\n"
+ "udot z26.s, z20.b, z5.b[0]\n"
+ "udot z25.s, z20.b, z5.b[2]\n"
+ "udot z24.s, z20.b, z4.b[0]\n"
+ "udot z23.s, z20.b, z4.b[2]\n"
+ "udot z26.s, z22.b, z5.b[1]\n"
+ "udot z25.s, z22.b, z5.b[3]\n"
+ "udot z24.s, z22.b, z4.b[1]\n"
+ "udot z23.s, z22.b, z4.b[3]\n"
+ "udot z18.s, z20.b, z6.b[0]\n"
+ "udot z17.s, z20.b, z6.b[2]\n"
+ "udot z16.s, z20.b, z7.b[0]\n"
+ "udot z21.s, z20.b, z7.b[2]\n"
+ "udot z18.s, z22.b, z6.b[1]\n"
+ "udot z17.s, z22.b, z6.b[3]\n"
+ "udot z16.s, z22.b, z7.b[1]\n"
+ "udot z21.s, z22.b, z7.b[3]\n"
+ "udot z19.s, z20.b, z1.b[0]\n"
+ "mov z30.d, z30.d\n"
+ "mov z28.d, z28.d\n"
+ "add z30.s, z30.s, z29.s\n"
+ "udot z19.s, z22.b, z1.b[1]\n"
+ "add z28.s, z28.s, z27.s\n"
+ "add z30.s, z30.s, z26.s\n"
+ "mov z29.d, z29.d\n"
+ "add z28.s, z28.s, z25.s\n"
+ "add z30.s, z30.s, z24.s\n"
+ "mov z27.d, z27.d\n"
+ "add z28.s, z28.s, z23.s\n"
+ "add z30.s, z30.s, z18.s\n"
+ "add z29.s, z29.s, z26.s\n"
+ "add z28.s, z28.s, z17.s\n"
+ "add z27.s, z27.s, z25.s\n"
+ "add z29.s, z29.s, z24.s\n"
+ "mov z26.d, z26.d\n"
+ "add z27.s, z27.s, z23.s\n"
+ "add z29.s, z29.s, z18.s\n"
+ "mov z25.d, z25.d\n"
+ "add z27.s, z27.s, z17.s\n"
+ "add z29.s, z29.s, z16.s\n"
+ "add z26.s, z26.s, z24.s\n"
+ "add z27.s, z27.s, z21.s\n"
+ "add z25.s, z25.s, z23.s\n"
+ "add z26.s, z26.s, z18.s\n"
+ "mov z24.d, z24.d\n"
+ "add z25.s, z25.s, z17.s\n"
+ "add z26.s, z26.s, z16.s\n"
+ "mov z23.d, z23.d\n"
+ "add z25.s, z25.s, z21.s\n"
+ "add z26.s, z26.s, z19.s\n"
"add z24.s, z24.s, z18.s\n"
"mov z18.s, #0x0\n"
- "udot z18.s, z30.b, z6.b[2]\n"
- "udot z22.s, z29.b, z2.b[3]\n"
- "add z17.s, z28.s, z17.s\n"
- "mov z16.s, #0x0\n"
- "udot z16.s, z30.b, z7.b[0]\n"
- "udot z21.s, z29.b, z1.b[1]\n"
- "udot z20.s, z29.b, z1.b[3]\n"
- "add z28.s, z26.s, z24.s\n"
- "udot z25.s, z29.b, z5.b[1]\n"
- "udot z27.s, z29.b, z5.b[3]\n"
- "add z31.s, z22.s, z17.s\n"
- "udot z19.s, z29.b, z6.b[1]\n"
- "udot z18.s, z29.b, z6.b[3]\n"
- "add z22.s, z21.s, z28.s\n"
- "udot z16.s, z29.b, z7.b[1]\n"
- "add z21.s, z20.s, z31.s\n"
- "add z20.s, z25.s, z19.s\n"
- "add z19.s, z27.s, z18.s\n"
- "add z18.s, z16.s, z24.s\n"
- "mov z16.s, #0x0\n"
- "udot z16.s, z30.b, z7.b[2]\n"
- "udot z16.s, z29.b, z7.b[3]\n"
- "add z17.s, z16.s, z17.s\n"
- "mov z16.s, #0x0\n"
- "udot z16.s, z30.b, z0.b[0]\n"
- "udot z16.s, z29.b, z0.b[1]\n"
- "add z24.s, z22.s, z16.s\n"
- "add z26.s, z22.s, z25.s\n"
- "mul z24.s, p2/M, z24.s, z23.s\n"
- "mul z26.s, p2/M, z26.s, z23.s\n"
+ "udot z18.s, z20.b, z1.b[2]\n"
+ "add z23.s, z23.s, z17.s\n"
+ "mov z17.s, #0x0\n"
+ "udot z17.s, z20.b, z2.b[0]\n"
+ "udot z18.s, z22.b, z1.b[3]\n"
+ "add z24.s, z24.s, z16.s\n"
"mov z16.s, #0x0\n"
- "udot z16.s, z30.b, z0.b[2]\n"
- "udot z16.s, z29.b, z0.b[3]\n"
- "add z25.s, z21.s, z16.s\n"
- "add z27.s, z21.s, z27.s\n"
- "mul z25.s, p2/M, z25.s, z23.s\n"
- "mul z27.s, p2/M, z27.s, z23.s\n"
- "add z28.s, z20.s, z28.s\n"
- "add z29.s, z19.s, z31.s\n"
- "mul z28.s, p2/M, z28.s, z23.s\n"
- "mul z29.s, p2/M, z29.s, z23.s\n"
- "add z30.s, z18.s, z20.s\n"
- "add z31.s, z17.s, z19.s\n"
- "mul z30.s, p2/M, z30.s, z23.s\n"
- "mul z31.s, p2/M, z31.s, z23.s\n"
- "zip1 z19.s, z24.s, z26.s\n"
- "zip1 z18.s, z25.s, z27.s\n"
- "zip1 z17.s, z28.s, z30.s\n"
- "zip1 z16.s, z29.s, z31.s\n"
- "zip1 z22.s, z19.s, z18.s\n"
- "zip1 z23.s, z17.s, z16.s\n"
- "add z24.s, z24.s, z12.s\n"
- "add z25.s, z25.s, z12.s\n"
- "add z26.s, z26.s, z12.s\n"
- "add z27.s, z27.s, z12.s\n"
- "add z28.s, z28.s, z12.s\n"
- "add z29.s, z29.s, z12.s\n"
- "add z30.s, z30.s, z12.s\n"
- "add z31.s, z31.s, z12.s\n"
+ "udot z17.s, z22.b, z2.b[1]\n"
+ "udot z16.s, z20.b, z2.b[2]\n"
+ "add z25.s, z25.s, z18.s\n"
+ "add z23.s, z23.s, z21.s\n"
+ "add z24.s, z24.s, z19.s\n"
+ "udot z16.s, z22.b, z2.b[3]\n"
+ "add z23.s, z23.s, z18.s\n"
+ "add z24.s, z24.s, z17.s\n"
+ "neg z15.s, p2/M, z15.s\n"
+ "add z23.s, z23.s, z16.s\n"
+ "mul z30.s, p2/M, z30.s, z15.s\n"
+ "st1w { z30.s }, p2, [SP]\n"
+ "add z30.s, z30.s, z13.s\n"
+ "mul z28.s, p2/M, z28.s, z15.s\n"
+ "st1w { z28.s }, p2, [SP, #1, MUL VL]\n"
+ "add z28.s, z28.s, z13.s\n"
+ "mul z29.s, p2/M, z29.s, z15.s\n"
+ "st1w { z29.s }, p2, [SP, #2, MUL VL]\n"
+ "add z29.s, z29.s, z13.s\n"
+ "mul z27.s, p2/M, z27.s, z15.s\n"
+ "st1w { z27.s }, p2, [SP, #3, MUL VL]\n"
+ "add z27.s, z27.s, z13.s\n"
+ "mul z26.s, p2/M, z26.s, z15.s\n"
+ "st1w { z26.s }, p2, [SP, #4, MUL VL]\n"
+ "add z26.s, z26.s, z13.s\n"
+ "mul z25.s, p2/M, z25.s, z15.s\n"
+ "st1w { z25.s }, p2, [SP, #5, MUL VL]\n"
+ "add z25.s, z25.s, z13.s\n"
+ "mul z24.s, p2/M, z24.s, z15.s\n"
+ "st1w { z24.s }, p2, [SP, #6, MUL VL]\n"
+ "add z24.s, z24.s, z13.s\n"
+ "mul z23.s, p2/M, z23.s, z15.s\n"
+ "st1w { z23.s }, p2, [SP, #7, MUL VL]\n"
+ "add z23.s, z23.s, z13.s\n"
"1:" // Loop
- "udot z24.s, z8.b, z0.b[0]\n"
- "udot z25.s, z8.b, z0.b[2]\n"
- "ld1w { z17.s }, p2/Z, [%x[params], #6, MUL VL]\n"
- "ld1w { z19.s }, p2/Z, [%x[params], #7, MUL VL]\n"
- "udot z26.s, z8.b, z1.b[0]\n"
- "udot z27.s, z8.b, z1.b[2]\n"
- "incb x9\n"
- "whilelt p0.s, x28, %x[n_channels]\n"
- "udot z24.s, z9.b, z0.b[1]\n"
- "udot z25.s, z9.b, z0.b[3]\n"
- "whilelt p1.b, x9, x10\n"
- "udot z26.s, z9.b, z1.b[1]\n"
- "udot z27.s, z9.b, z1.b[3]\n"
- "udot z28.s, z8.b, z2.b[0]\n"
- "udot z29.s, z8.b, z2.b[2]\n"
- "udot z30.s, z8.b, z3.b[0]\n"
- "udot z31.s, z8.b, z3.b[2]\n"
+ "udot z30.s, z8.b, z0.b[0]\n"
+ "ld1w { z22.s }, p2/Z, [%x[params], #6, MUL VL]\n"
+ "incb x28\n"
+ "udot z28.s, z8.b, z0.b[2]\n"
+ "ld1w { z21.s }, p2/Z, [%x[params], #7, MUL VL]\n"
+ "whilelt p0.s, x27, %x[n_channels]\n"
+ "udot z29.s, z8.b, z3.b[0]\n"
+ "whilelt p1.b, x28, x9\n"
+ "udot z27.s, z8.b, z3.b[2]\n"
+ "udot z26.s, z8.b, z5.b[0]\n"
+ "udot z25.s, z8.b, z5.b[2]\n"
+ "udot z24.s, z8.b, z4.b[0]\n"
+ "udot z23.s, z8.b, z4.b[2]\n"
"ld1b { z8.b }, p2/Z, [%x[params]]\n"
- "udot z24.s, z10.b, z1.b[0]\n"
- "udot z25.s, z10.b, z1.b[2]\n"
- "udot z26.s, z10.b, z2.b[0]\n"
- "udot z27.s, z10.b, z2.b[2]\n"
- "udot z28.s, z9.b, z2.b[1]\n"
- "udot z29.s, z9.b, z2.b[3]\n"
- "udot z30.s, z9.b, z3.b[1]\n"
- "udot z31.s, z9.b, z3.b[3]\n"
+ "udot z30.s, z9.b, z0.b[1]\n"
+ "udot z28.s, z9.b, z0.b[3]\n"
+ "udot z29.s, z9.b, z3.b[1]\n"
+ "udot z27.s, z9.b, z3.b[3]\n"
+ "udot z26.s, z9.b, z5.b[1]\n"
+ "udot z25.s, z9.b, z5.b[3]\n"
+ "udot z24.s, z9.b, z4.b[1]\n"
+ "udot z23.s, z9.b, z4.b[3]\n"
"ld1b { z9.b }, p2/Z, [%x[params], #1, MUL VL]\n"
- "udot z24.s, z11.b, z1.b[1]\n"
- "udot z25.s, z11.b, z1.b[3]\n"
- "udot z26.s, z11.b, z2.b[1]\n"
- "udot z27.s, z11.b, z2.b[3]\n"
- "udot z28.s, z10.b, z3.b[0]\n"
- "udot z29.s, z10.b, z3.b[2]\n"
- "udot z30.s, z10.b, z4.b[0]\n"
- "udot z31.s, z10.b, z4.b[2]\n"
+ "udot z30.s, z10.b, z3.b[0]\n"
+ "udot z28.s, z10.b, z3.b[2]\n"
+ "udot z29.s, z10.b, z5.b[0]\n"
+ "udot z27.s, z10.b, z5.b[2]\n"
+ "udot z26.s, z10.b, z4.b[0]\n"
+ "udot z25.s, z10.b, z4.b[2]\n"
+ "udot z24.s, z10.b, z6.b[0]\n"
+ "udot z23.s, z10.b, z6.b[2]\n"
"ld1b { z10.b }, p2/Z, [%x[params], #2, MUL VL]\n"
- "udot z24.s, z8.b, z2.b[0]\n"
- "udot z25.s, z8.b, z2.b[2]\n"
- "udot z26.s, z8.b, z3.b[0]\n"
- "udot z27.s, z8.b, z3.b[2]\n"
- "udot z28.s, z11.b, z3.b[1]\n"
- "udot z29.s, z11.b, z3.b[3]\n"
- "udot z30.s, z11.b, z4.b[1]\n"
- "udot z31.s, z11.b, z4.b[3]\n"
+ "udot z30.s, z11.b, z3.b[1]\n"
+ "udot z28.s, z11.b, z3.b[3]\n"
+ "udot z29.s, z11.b, z5.b[1]\n"
+ "udot z27.s, z11.b, z5.b[3]\n"
+ "udot z26.s, z11.b, z4.b[1]\n"
+ "udot z25.s, z11.b, z4.b[3]\n"
+ "udot z24.s, z11.b, z6.b[1]\n"
+ "udot z23.s, z11.b, z6.b[3]\n"
"ld1b { z11.b }, p2/Z, [%x[params], #3, MUL VL]\n"
- "udot z24.s, z9.b, z2.b[1]\n"
- "udot z25.s, z9.b, z2.b[3]\n"
- "udot z26.s, z9.b, z3.b[1]\n"
- "udot z27.s, z9.b, z3.b[3]\n"
- "udot z28.s, z8.b, z4.b[0]\n"
- "udot z29.s, z8.b, z4.b[2]\n"
"udot z30.s, z8.b, z5.b[0]\n"
- "udot z31.s, z8.b, z5.b[2]\n"
+ "udot z28.s, z8.b, z5.b[2]\n"
+ "udot z29.s, z8.b, z4.b[0]\n"
+ "udot z27.s, z8.b, z4.b[2]\n"
+ "udot z26.s, z8.b, z6.b[0]\n"
+ "udot z25.s, z8.b, z6.b[2]\n"
+ "udot z24.s, z8.b, z7.b[0]\n"
+ "udot z23.s, z8.b, z7.b[2]\n"
"ld1b { z8.b }, p2/Z, [%x[params], #4, MUL VL]\n"
- "udot z24.s, z10.b, z3.b[0]\n"
- "udot z25.s, z10.b, z3.b[2]\n"
- "udot z26.s, z10.b, z4.b[0]\n"
- "udot z27.s, z10.b, z4.b[2]\n"
- "udot z28.s, z9.b, z4.b[1]\n"
- "udot z29.s, z9.b, z4.b[3]\n"
"udot z30.s, z9.b, z5.b[1]\n"
- "udot z31.s, z9.b, z5.b[3]\n"
+ "udot z28.s, z9.b, z5.b[3]\n"
+ "udot z29.s, z9.b, z4.b[1]\n"
+ "udot z27.s, z9.b, z4.b[3]\n"
+ "udot z26.s, z9.b, z6.b[1]\n"
+ "udot z25.s, z9.b, z6.b[3]\n"
+ "udot z24.s, z9.b, z7.b[1]\n"
+ "udot z23.s, z9.b, z7.b[3]\n"
"ld1b { z9.b }, p2/Z, [%x[params], #5, MUL VL]\n"
"addvl %x[params], %x[params], #16\n"
- "udot z24.s, z11.b, z3.b[1]\n"
- "udot z25.s, z11.b, z3.b[3]\n"
- "ld1w { z12.s }, p1/Z, [%x[params], #-8, MUL VL]\n"
- "udot z26.s, z11.b, z4.b[1]\n"
- "udot z27.s, z11.b, z4.b[3]\n"
- "udot z28.s, z10.b, z5.b[0]\n"
- "udot z29.s, z10.b, z5.b[2]\n"
- "udot z30.s, z10.b, z6.b[0]\n"
- "udot z31.s, z10.b, z6.b[2]\n"
+ "udot z30.s, z10.b, z4.b[0]\n"
+ "ld1w { z13.s }, p1/Z, [%x[params], #-8, MUL VL]\n"
+ "udot z28.s, z10.b, z4.b[2]\n"
+ "udot z29.s, z10.b, z6.b[0]\n"
+ "udot z27.s, z10.b, z6.b[2]\n"
+ "udot z26.s, z10.b, z7.b[0]\n"
+ "udot z25.s, z10.b, z7.b[2]\n"
+ "udot z24.s, z10.b, z1.b[0]\n"
+ "udot z23.s, z10.b, z1.b[2]\n"
"ld1b { z10.b }, p1/Z, [%x[params], #-5, MUL VL]\n"
- "udot z24.s, z8.b, z4.b[0]\n"
- "udot z25.s, z8.b, z4.b[2]\n"
- "udot z26.s, z8.b, z5.b[0]\n"
- "udot z27.s, z8.b, z5.b[2]\n"
- "udot z28.s, z11.b, z5.b[1]\n"
- "udot z29.s, z11.b, z5.b[3]\n"
- "udot z30.s, z11.b, z6.b[1]\n"
- "udot z31.s, z11.b, z6.b[3]\n"
+ "udot z30.s, z11.b, z4.b[1]\n"
+ "udot z28.s, z11.b, z4.b[3]\n"
+ "udot z29.s, z11.b, z6.b[1]\n"
+ "udot z27.s, z11.b, z6.b[3]\n"
+ "udot z26.s, z11.b, z7.b[1]\n"
+ "udot z25.s, z11.b, z7.b[3]\n"
+ "udot z24.s, z11.b, z1.b[1]\n"
+ "udot z23.s, z11.b, z1.b[3]\n"
"ld1b { z11.b }, p1/Z, [%x[params], #-4, MUL VL]\n"
- "udot z24.s, z9.b, z4.b[1]\n"
- "udot z25.s, z9.b, z4.b[3]\n"
- ".inst 0x04b17718 // sqrdmulh z24.s, z24.s, z17.s\n"
- "udot z26.s, z9.b, z5.b[1]\n"
- "udot z27.s, z9.b, z5.b[3]\n"
- ".inst 0x04b17739 // sqrdmulh z25.s, z25.s, z17.s\n"
- "udot z28.s, z8.b, z6.b[0]\n"
- "udot z29.s, z8.b, z6.b[2]\n"
- ".inst 0x04b1775a // sqrdmulh z26.s, z26.s, z17.s\n"
- "udot z30.s, z8.b, z7.b[0]\n"
- "udot z31.s, z8.b, z7.b[2]\n"
- ".inst 0x04b1777b // sqrdmulh z27.s, z27.s, z17.s\n"
+ "udot z30.s, z8.b, z6.b[0]\n"
+ "udot z28.s, z8.b, z6.b[2]\n"
+ "udot z29.s, z8.b, z7.b[0]\n"
+ "udot z27.s, z8.b, z7.b[2]\n"
+ "udot z26.s, z8.b, z1.b[0]\n"
+ "udot z25.s, z8.b, z1.b[2]\n"
+ "udot z24.s, z8.b, z2.b[0]\n"
+ "udot z23.s, z8.b, z2.b[2]\n"
"ld1b { z8.b }, p1/Z, [%x[params], #-7, MUL VL]\n"
- "udot z28.s, z9.b, z6.b[1]\n"
- "udot z29.s, z9.b, z6.b[3]\n"
- "and z16.d, z24.d, z19.d\n"
- "udot z30.s, z9.b, z7.b[1]\n"
- "udot z31.s, z9.b, z7.b[3]\n"
- "and z18.d, z25.d, z19.d\n"
+ "udot z30.s, z9.b, z6.b[1]\n"
+ "udot z28.s, z9.b, z6.b[3]\n"
+ "udot z29.s, z9.b, z7.b[1]\n"
+ "udot z27.s, z9.b, z7.b[3]\n"
+ "udot z26.s, z9.b, z1.b[1]\n"
+ "udot z25.s, z9.b, z1.b[3]\n"
+ "udot z24.s, z9.b, z2.b[1]\n"
+ "udot z23.s, z9.b, z2.b[3]\n"
"ld1b { z9.b }, p1/Z, [%x[params], #-6, MUL VL]\n"
- "asr z16.s, z16.s, #0x1f\n"
- "asr z18.s, z18.s, #0x1f\n"
"addvl %x[params], %x[params], #-3\n"
- ".inst 0x04b1779c // sqrdmulh z28.s, z28.s, z17.s\n"
- ".inst 0x04b177bd // sqrdmulh z29.s, z29.s, z17.s\n"
- ".inst 0x04b177de // sqrdmulh z30.s, z30.s, z17.s\n"
- ".inst 0x04b177ff // sqrdmulh z31.s, z31.s, z17.s\n"
- "and z17.d, z26.d, z19.d\n"
+ ".inst 0x04b677de // sqrdmulh z30.s, z30.s, z22.s\n"
+ ".inst 0x04b6779c // sqrdmulh z28.s, z28.s, z22.s\n"
+ ".inst 0x04b677bd // sqrdmulh z29.s, z29.s, z22.s\n"
+ ".inst 0x04b6777b // sqrdmulh z27.s, z27.s, z22.s\n"
+ ".inst 0x04b6775a // sqrdmulh z26.s, z26.s, z22.s\n"
+ "and z20.d, z30.d, z21.d\n"
+ "asr z20.s, z20.s, #0x1f\n"
+ "and z19.d, z28.d, z21.d\n"
+ "and z18.d, z29.d, z21.d\n"
+ "asr z19.s, z19.s, #0x1f\n"
+ "and z17.d, z27.d, z21.d\n"
+ "and z16.d, z26.d, z21.d\n"
+ "asr z18.s, z18.s, #0x1f\n"
+ ".inst 0x04b67739 // sqrdmulh z25.s, z25.s, z22.s\n"
"asr z17.s, z17.s, #0x1f\n"
- "sqadd z24.s, z24.s, z16.s\n"
- "and z16.d, z27.d, z19.d\n"
- ".inst 0x44828a78 // srshl z24.s, p2/M, z24.s, z19.s\n"
- "asr z16.s, z16.s, #0x1f\n"
- "sqadd z25.s, z25.s, z18.s\n"
- ".inst 0x44828a79 // srshl z25.s, p2/M, z25.s, z19.s\n"
- "sqadd z26.s, z26.s, z17.s\n"
- "sqadd z27.s, z27.s, z16.s\n"
- ".inst 0x44828a7a // srshl z26.s, p2/M, z26.s, z19.s\n"
- ".inst 0x44828a7b // srshl z27.s, p2/M, z27.s, z19.s\n"
- "and z16.d, z28.d, z19.d\n"
- "and z18.d, z29.d, z19.d\n"
- "and z17.d, z30.d, z19.d\n"
+ "sqadd z30.s, z30.s, z20.s\n"
+ ".inst 0x04b67718 // sqrdmulh z24.s, z24.s, z22.s\n"
"asr z16.s, z16.s, #0x1f\n"
+ ".inst 0x04b676f7 // sqrdmulh z23.s, z23.s, z22.s\n"
+ "sqadd z28.s, z28.s, z19.s\n"
+ "sqadd z29.s, z29.s, z18.s\n"
+ "and z18.d, z25.d, z21.d\n"
"asr z18.s, z18.s, #0x1f\n"
+ "sqadd z27.s, z27.s, z17.s\n"
+ "sqadd z26.s, z26.s, z16.s\n"
+ "and z17.d, z24.d, z21.d\n"
"asr z17.s, z17.s, #0x1f\n"
- "sqadd z28.s, z28.s, z16.s\n"
- "and z16.d, z31.d, z19.d\n"
- ".inst 0x44828a7c // srshl z28.s, p2/M, z28.s, z19.s\n"
+ "and z16.d, z23.d, z21.d\n"
+ ".inst 0x44828abe // srshl z30.s, p2/M, z30.s, z21.s\n"
"asr z16.s, z16.s, #0x1f\n"
- "sqadd z29.s, z29.s, z18.s\n"
- ".inst 0x44828a7d // srshl z29.s, p2/M, z29.s, z19.s\n"
- "sqadd z30.s, z30.s, z17.s\n"
- "sqadd z31.s, z31.s, z16.s\n"
- ".inst 0x44828a7e // srshl z30.s, p2/M, z30.s, z19.s\n"
- ".inst 0x44828a7f // srshl z31.s, p2/M, z31.s, z19.s\n"
- "add z24.s, z24.s, z14.s\n"
- "add z25.s, z25.s, z14.s\n"
- "smin z24.s, p2/M, z24.s, z15.s\n"
- "smin z25.s, p2/M, z25.s, z15.s\n"
- "add z26.s, z26.s, z14.s\n"
- "add z27.s, z27.s, z14.s\n"
- "smin z26.s, p2/M, z26.s, z15.s\n"
- "smin z27.s, p2/M, z27.s, z15.s\n"
+ "sqadd z25.s, z25.s, z18.s\n"
+ ".inst 0x44828abc // srshl z28.s, p2/M, z28.s, z21.s\n"
+ "add z30.s, z30.s, z14.s\n"
+ "sqadd z24.s, z24.s, z17.s\n"
+ ".inst 0x44828abd // srshl z29.s, p2/M, z29.s, z21.s\n"
"add z28.s, z28.s, z14.s\n"
+ "sqadd z23.s, z23.s, z16.s\n"
+ "smin z30.s, p2/M, z30.s, z12.s\n"
"add z29.s, z29.s, z14.s\n"
- "smin z28.s, p2/M, z28.s, z15.s\n"
- "smin z29.s, p2/M, z29.s, z15.s\n"
- "add z30.s, z30.s, z14.s\n"
- "add z31.s, z31.s, z14.s\n"
- "smin z30.s, p2/M, z30.s, z15.s\n"
- "smin z31.s, p2/M, z31.s, z15.s\n"
- "smax z24.s, p2/M, z24.s, z13.s\n"
- "smax z25.s, p2/M, z25.s, z13.s\n"
- "st1b { z24.s }, p0, [x27, x28]\n"
- "mov z24.s, z22.s[0]\n"
- "smax z26.s, p2/M, z26.s, z13.s\n"
- "smax z27.s, p2/M, z27.s, z13.s\n"
- "st1b { z25.s }, p0, [x26, x28]\n"
- "mov z25.s, z22.s[1]\n"
- "smax z28.s, p2/M, z28.s, z13.s\n"
- "smax z29.s, p2/M, z29.s, z13.s\n"
- "st1b { z26.s }, p0, [x25, x28]\n"
- "mov z26.s, z22.s[2]\n"
- "smax z30.s, p2/M, z30.s, z13.s\n"
- "smax z31.s, p2/M, z31.s, z13.s\n"
- "st1b { z27.s }, p0, [x24, x28]\n"
- "mov z27.s, z22.s[3]\n"
- "st1b { z28.s }, p0, [x23, x28]\n"
- "mov z28.s, z23.s[0]\n"
- "add z24.s, z24.s, z12.s\n"
- "st1b { z29.s }, p0, [x22, x28]\n"
- "mov z29.s, z23.s[1]\n"
- "add z25.s, z25.s, z12.s\n"
- "st1b { z30.s }, p0, [x21, x28]\n"
- "mov z30.s, z23.s[2]\n"
- "add z26.s, z26.s, z12.s\n"
- "st1b { z31.s }, p0, [x20, x28]\n"
- "mov z31.s, z23.s[3]\n"
- "incw x28\n"
- "add z27.s, z27.s, z12.s\n"
- "add z28.s, z28.s, z12.s\n"
- "add z29.s, z29.s, z12.s\n"
- "add z30.s, z30.s, z12.s\n"
- "add z31.s, z31.s, z12.s\n"
+ "smin z28.s, p2/M, z28.s, z12.s\n"
+ ".inst 0x44828abb // srshl z27.s, p2/M, z27.s, z21.s\n"
+ "smax z30.s, p2/M, z30.s, z31.s\n"
+ "st1b { z30.s }, p0, [x26, x27]\n"
+ "add z27.s, z27.s, z14.s\n"
+ "smax z28.s, p2/M, z28.s, z31.s\n"
+ "ld1w { z30.s }, p2/Z, [SP]\n"
+ "smin z29.s, p2/M, z29.s, z12.s\n"
+ "st1b { z28.s }, p0, [x25, x27]\n"
+ "add z30.s, z30.s, z13.s\n"
+ "smin z27.s, p2/M, z27.s, z12.s\n"
+ "ld1w { z28.s }, p2/Z, [SP, #1, MUL VL]\n"
+ "smax z29.s, p2/M, z29.s, z31.s\n"
+ "st1b { z29.s }, p0, [x24, x27]\n"
+ "add z28.s, z28.s, z13.s\n"
+ "smax z27.s, p2/M, z27.s, z31.s\n"
+ "ld1w { z29.s }, p2/Z, [SP, #2, MUL VL]\n"
+ ".inst 0x44828aba // srshl z26.s, p2/M, z26.s, z21.s\n"
+ "st1b { z27.s }, p0, [x23, x27]\n"
+ "add z29.s, z29.s, z13.s\n"
+ ".inst 0x44828ab9 // srshl z25.s, p2/M, z25.s, z21.s\n"
+ "ld1w { z27.s }, p2/Z, [SP, #3, MUL VL]\n"
+ "add z26.s, z26.s, z14.s\n"
+ ".inst 0x44828ab8 // srshl z24.s, p2/M, z24.s, z21.s\n"
+ ".inst 0x44828ab7 // srshl z23.s, p2/M, z23.s, z21.s\n"
+ "add z25.s, z25.s, z14.s\n"
+ "add z27.s, z27.s, z13.s\n"
+ "add z24.s, z24.s, z14.s\n"
+ "add z23.s, z23.s, z14.s\n"
+ "smin z26.s, p2/M, z26.s, z12.s\n"
+ "smin z25.s, p2/M, z25.s, z12.s\n"
+ "smin z24.s, p2/M, z24.s, z12.s\n"
+ "smin z23.s, p2/M, z23.s, z12.s\n"
+ "smax z26.s, p2/M, z26.s, z31.s\n"
+ "st1b { z26.s }, p0, [x22, x27]\n"
+ "smax z25.s, p2/M, z25.s, z31.s\n"
+ "smax z24.s, p2/M, z24.s, z31.s\n"
+ "ld1w { z26.s }, p2/Z, [SP, #4, MUL VL]\n"
+ "smax z23.s, p2/M, z23.s, z31.s\n"
+ "st1b { z25.s }, p0, [x21, x27]\n"
+ "add z26.s, z26.s, z13.s\n"
+ "st1b { z24.s }, p0, [x20, x27]\n"
+ "st1b { z23.s }, p0, [x19, x27]\n"
+ "incw x27\n"
+ "ld1w { z25.s }, p2/Z, [SP, #5, MUL VL]\n"
+ "add z25.s, z25.s, z13.s\n"
+ "ld1w { z24.s }, p2/Z, [SP, #6, MUL VL]\n"
+ "ld1w { z23.s }, p2/Z, [SP, #7, MUL VL]\n"
+ "add z24.s, z24.s, z13.s\n"
+ "add z23.s, z23.s, z13.s\n"
"b.any 1b\n"
+ "addvl SP, SP, #8\n"
: [params] "+&r" (params)
: [inptrs] "r" (inptrs), [n_channels] "r" (n_output_channels), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [outptrs] "r" (outptrs), [qp] "r" (&qp)
- : "cc", "memory", "p0", "p1", "p2", "x9", "x10", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "x9", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8s8u8q_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8s8u8q_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp
index ff3ec0ba48..dc8fad95fa 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8s8u8q_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8s8u8q_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -91,316 +91,324 @@ void sve_u8s8u8q_nhwc_3x3_s1_output2x2_mla_depthfirst_impl(
requant_muls, requant_shifts, outptrs);
__asm__ __volatile__(
- "mov x8, #0x0\n"
- "ldr x25, [%x[params], %[offsetof_Params_requant]]\n"
+ "ldr x8, [%x[params], %[offsetof_Params_n_channels]]\n"
"ptrue p4.b\n"
- "ldr x24, [%x[params], %[offsetof_Params_outptrs]]\n"
- "mov x23, x8\n"
- "add x21, x25, %[offsetof_Requantize32_a_offset]\n"
- "ldr x17, [%x[params], %[offsetof_Params_n_channels]]\n"
- "ldr x16, [%x[params], %[offsetof_Params_weights]]\n"
- "add x20, x25, %[offsetof_Requantize32_b_offset]\n"
- "add x22, x25, %[offsetof_Requantize32_c_offset]\n"
- "ld1rb { z23.b }, p4/Z, [x21]\n"
- "ld1rb { z15.b }, p4/Z, [x20]\n"
- "add x21, x25, %[offsetof_Requantize32_minval]\n"
- "add x20, x25, %[offsetof_Requantize32_maxval]\n"
- "ld1rh { z14.h }, p4/Z, [x22]\n"
- "ld1rh { z12.h }, p4/Z, [x21]\n"
- "ld1rh { z11.h }, p4/Z, [x20]\n"
- "ldp x15, x14, [x24, #0x0]\n"
- "incw x23\n"
- "whilelt p3.h, x8, x17\n"
- "ldp x13, x12, [x24, #0x10]\n"
- "whilelt p2.s, x8, x17\n"
- "whilelt p1.s, x23, x17\n"
- "ldr x26, [%x[params], %[offsetof_Params_bias]]\n"
- "ld1sb { z0.h }, p4/Z, [x16]\n"
- "ld1sb { z1.h }, p4/Z, [x16, #1, MUL VL]\n"
- "add x11, %x[params], %[offsetof_Params_inptrs]\n"
- "mov x10, #0x0\n"
- "ld1sb { z2.h }, p4/Z, [x16, #2, MUL VL]\n"
- "ld1sb { z3.h }, p4/Z, [x16, #3, MUL VL]\n"
- ".inst 0x454f1000 // ssublb z0.h, z0.b, z15.b\n"
- ".inst 0x454f1021 // ssublb z1.h, z1.b, z15.b\n"
- "ld1sb { z4.h }, p4/Z, [x16, #4, MUL VL]\n"
- "ld1sb { z5.h }, p4/Z, [x16, #5, MUL VL]\n"
- ".inst 0x454f1042 // ssublb z2.h, z2.b, z15.b\n"
- ".inst 0x454f1063 // ssublb z3.h, z3.b, z15.b\n"
- "ld1sb { z6.h }, p4/Z, [x16, #6, MUL VL]\n"
- "ld1sb { z7.h }, p4/Z, [x16, #7, MUL VL]\n"
- "inch x16, ALL, MUL #8\n"
- ".inst 0x454f1084 // ssublb z4.h, z4.b, z15.b\n"
- "ld1w { z17.s }, p2/Z, [x26]\n"
- "ld1w { z16.s }, p1/Z, [x26, #1, MUL VL]\n"
- "uzp1 z13.s, z17.s, z16.s\n"
- "uzp2 z17.s, z17.s, z16.s\n"
- "ld1sb { z8.h }, p4/Z, [x16]\n"
- "ldp x24, x23, [x11, #0x0]\n"
- "addvl x26, x26, #2\n"
- "mov z26.d, z13.d\n"
- "ldp x22, x21, [x11, #0x10]\n"
- "ldr x20, [x11, #0x20]\n"
- "mov z10.d, z17.d\n"
- "mov z24.d, z13.d\n"
- "ld1b { z31.h }, p3/Z, [x24, x8]\n"
- "ld1b { z30.h }, p3/Z, [x23, x8]\n"
- "mov z16.d, z17.d\n"
+ "ldr x17, [%x[params], %[offsetof_Params_weights]]\n"
+ "mov x16, #0x0\n"
+ "ldr x22, [%x[params], %[offsetof_Params_requant]]\n"
+ "mov x15, #0x0\n"
+ "ldr x14, [%x[params], %[offsetof_Params_requant_muls]]\n"
+ "add x13, %x[params], %[offsetof_Params_inptrs]\n"
+ "ldr x12, [%x[params], %[offsetof_Params_requant_shifts]]\n"
+ "add x19, x22, %[offsetof_Requantize32_a_offset]\n"
+ "ldr x21, [%x[params], %[offsetof_Params_outptrs]]\n"
+ "add x20, x22, %[offsetof_Requantize32_b_offset]\n"
+ "ld1rb { z11.b }, p4/Z, [x19]\n"
+ "add x19, x22, %[offsetof_Requantize32_c_offset]\n"
+ "ld1rb { z26.b }, p4/Z, [x20]\n"
+ "add x20, x22, %[offsetof_Requantize32_minval]\n"
+ "ld1rw { z12.s }, p4/Z, [x19]\n"
+ "add x19, x22, %[offsetof_Requantize32_maxval]\n"
+ "ld1rw { z14.s }, p4/Z, [x20]\n"
+ "whilelt p3.h, x16, x8\n"
+ "ld1rw { z17.s }, p4/Z, [x19]\n"
+ "whilelt p2.s, x16, x8\n"
+ "ldp x11, x10, [x21, #0x0]\n"
+ "mov x19, x16\n"
+ "incw x19\n"
+ "ldp x9, x28, [x21, #0x10]\n"
+ "whilelt p1.s, x19, x8\n"
+ "ldr x19, [%x[params], %[offsetof_Params_bias]]\n"
+ "ld1w { z10.s }, p2/Z, [x19]\n"
+ "ld1w { z16.s }, p1/Z, [x19, #1, MUL VL]\n"
+ "uzp1 z13.s, z10.s, z16.s\n"
+ "addvl x19, x19, #2\n"
+ "str x19, [%x[params], %[offsetof_Params_bias]]\n"
+ "uzp2 z15.s, z10.s, z16.s\n"
"mov z25.d, z13.d\n"
- "ld1b { z29.h }, p3/Z, [x22, x8]\n"
- "ld1b { z28.h }, p3/Z, [x21, x8]\n"
- "mov z9.d, z17.d\n"
- ".inst 0x454f10a5 // ssublb z5.h, z5.b, z15.b\n"
- "ld1b { z27.h }, p3/Z, [x20, x8]\n"
- "ldr x9, [%x[params], %[offsetof_Params_requant_muls]]\n"
- ".inst 0x454f10c6 // ssublb z6.h, z6.b, z15.b\n"
- ".inst 0x454f10e7 // ssublb z7.h, z7.b, z15.b\n"
- "ldr x28, [%x[params], %[offsetof_Params_requant_shifts]]\n"
- "str x26, [%x[params], %[offsetof_Params_bias]]\n"
- ".inst 0x454f1108 // ssublb z8.h, z8.b, z15.b\n"
- ".inst 0x45571bff // usublb z31.h, z31.b, z23.b\n"
- ".inst 0x45571bde // usublb z30.h, z30.b, z23.b\n"
- ".inst 0x45571bbd // usublb z29.h, z29.b, z23.b\n"
- ".inst 0x45571b9c // usublb z28.h, z28.b, z23.b\n"
- ".inst 0x45571b7b // usublb z27.h, z27.b, z23.b\n"
+ "ld1sb { z0.h }, p4/Z, [x17]\n"
+ "mov z23.d, z13.d\n"
+ "ld1sb { z1.h }, p4/Z, [x17, #1, MUL VL]\n"
+ "mov z9.d, z15.d\n"
+ "ld1sb { z2.h }, p4/Z, [x17, #2, MUL VL]\n"
+ "mov z22.d, z15.d\n"
+ "ld1sb { z3.h }, p4/Z, [x17, #3, MUL VL]\n"
+ "mov z10.d, z13.d\n"
+ "ld1sb { z4.h }, p4/Z, [x17, #4, MUL VL]\n"
+ "mov z24.d, z15.d\n"
+ "ld1sb { z5.h }, p4/Z, [x17, #5, MUL VL]\n"
+ ".inst 0x455a1000 // ssublb z0.h, z0.b, z26.b\n"
+ "ld1sb { z6.h }, p4/Z, [x17, #6, MUL VL]\n"
+ ".inst 0x455a1021 // ssublb z1.h, z1.b, z26.b\n"
+ "ld1sb { z7.h }, p4/Z, [x17, #7, MUL VL]\n"
+ "inch x17, ALL, MUL #8\n"
+ ".inst 0x455a1042 // ssublb z2.h, z2.b, z26.b\n"
+ "ld1sb { z8.h }, p4/Z, [x17]\n"
+ ".inst 0x455a1063 // ssublb z3.h, z3.b, z26.b\n"
+ "ldp x23, x22, [x13, #0x0]\n"
+ ".inst 0x455a1084 // ssublb z4.h, z4.b, z26.b\n"
+ "ldp x21, x20, [x13, #0x10]\n"
+ ".inst 0x455a10a5 // ssublb z5.h, z5.b, z26.b\n"
+ ".inst 0x455a10c6 // ssublb z6.h, z6.b, z26.b\n"
+ "ldr x19, [x13, #0x20]\n"
+ ".inst 0x455a10e7 // ssublb z7.h, z7.b, z26.b\n"
+ ".inst 0x455a1108 // ssublb z8.h, z8.b, z26.b\n"
+ "ld1b { z31.h }, p3/Z, [x23, x16]\n"
+ "ld1b { z30.h }, p3/Z, [x22, x16]\n"
+ ".inst 0x454b1bff // usublb z31.h, z31.b, z11.b\n"
+ "ld1b { z29.h }, p3/Z, [x21, x16]\n"
+ ".inst 0x454b1bde // usublb z30.h, z30.b, z11.b\n"
+ "ld1b { z28.h }, p3/Z, [x20, x16]\n"
+ "ld1b { z27.h }, p3/Z, [x19, x16]\n"
+ ".inst 0x454b1bbd // usublb z29.h, z29.b, z11.b\n"
+ ".inst 0x454b1b9c // usublb z28.h, z28.b, z11.b\n"
+ ".inst 0x454b1b7b // usublb z27.h, z27.b, z11.b\n"
"1:" // Loop
".inst 0x448443ed // smlalb z13.s, p4/M, z31.h, z4.h\n"
- ".inst 0x448447f1 // smlalt z17.s, p4/M, z31.h, z4.h\n"
- "ldr x22, [x11, #0x28]\n"
- "ldr x27, [x11, #0x38]\n"
- ".inst 0x448343fa // smlalb z26.s, p4/M, z31.h, z3.h\n"
- ".inst 0x448347ea // smlalt z10.s, p4/M, z31.h, z3.h\n"
- "ldr x21, [x11, #0x30]\n"
- "ldr x26, [x11, #0x40]\n"
+ "ldr x20, [x13, #0x28]\n"
+ "whilelt p0.h, x15, x8\n"
+ ".inst 0x448447ef // smlalt z15.s, p4/M, z31.h, z4.h\n"
+ "ldr x27, [x13, #0x30]\n"
+ "inch x17\n"
+ ".inst 0x448343f9 // smlalb z25.s, p4/M, z31.h, z3.h\n"
+ "ldr x26, [x13, #0x38]\n"
+ ".inst 0x448347e9 // smlalt z9.s, p4/M, z31.h, z3.h\n"
+ "ldr x25, [x13, #0x40]\n"
+ ".inst 0x448143f7 // smlalb z23.s, p4/M, z31.h, z1.h\n"
+ "ldr x19, [x13, #0x48]\n"
+ ".inst 0x448147f6 // smlalt z22.s, p4/M, z31.h, z1.h\n"
+ "ldr x24, [x13, #0x50]\n"
+ ".inst 0x448043ea // smlalb z10.s, p4/M, z31.h, z0.h\n"
+ "ldr x23, [x13, #0x58]\n"
+ ".inst 0x448047f8 // smlalt z24.s, p4/M, z31.h, z0.h\n"
+ "ld1b { z31.h }, p3/Z, [x20, x16]\n"
".inst 0x448043cd // smlalb z13.s, p4/M, z30.h, z0.h\n"
- ".inst 0x448047d1 // smlalt z17.s, p4/M, z30.h, z0.h\n"
- "ldr x20, [x11, #0x48]\n"
- "ld1b { z30.h }, p3/Z, [x20, x8]\n"
- ".inst 0x448243ba // smlalb z26.s, p4/M, z29.h, z2.h\n"
- ".inst 0x448247aa // smlalt z10.s, p4/M, z29.h, z2.h\n"
- "ld1b { z29.h }, p3/Z, [x21, x8]\n"
- ".inst 0x45571bbd // usublb z29.h, z29.b, z23.b\n"
- ".inst 0x448143f8 // smlalb z24.s, p4/M, z31.h, z1.h\n"
- ".inst 0x448147f0 // smlalt z16.s, p4/M, z31.h, z1.h\n"
- "ldr x25, [x11, #0x50]\n"
- "ldr x24, [x11, #0x58]\n"
- ".inst 0x448043f9 // smlalb z25.s, p4/M, z31.h, z0.h\n"
- ".inst 0x448047e9 // smlalt z9.s, p4/M, z31.h, z0.h\n"
- "ld1b { z31.h }, p3/Z, [x22, x8]\n"
- ".inst 0x45571bff // usublb z31.h, z31.b, z23.b\n"
+ "ldr x22, [x13, #0x60]\n"
+ ".inst 0x448047cf // smlalt z15.s, p4/M, z30.h, z0.h\n"
+ "ld1b { z30.h }, p3/Z, [x19, x16]\n"
+ ".inst 0x448243b9 // smlalb z25.s, p4/M, z29.h, z2.h\n"
+ "ldr x21, [x13, #0x68]\n"
+ ".inst 0x454b1bff // usublb z31.h, z31.b, z11.b\n"
+ "ldr x20, [x13, #0x70]\n"
+ ".inst 0x448247a9 // smlalt z9.s, p4/M, z29.h, z2.h\n"
+ "ld1b { z29.h }, p3/Z, [x27, x16]\n"
+ ".inst 0x454b1bde // usublb z30.h, z30.b, z11.b\n"
+ "ldr x19, [x13, #0x78]\n"
".inst 0x4485438d // smlalb z13.s, p4/M, z28.h, z5.h\n"
- ".inst 0x44854791 // smlalt z17.s, p4/M, z28.h, z5.h\n"
- ".inst 0x45571bde // usublb z30.h, z30.b, z23.b\n"
- "ldr x23, [x11, #0x60]\n"
- ".inst 0x4484439a // smlalb z26.s, p4/M, z28.h, z4.h\n"
- ".inst 0x4484478a // smlalt z10.s, p4/M, z28.h, z4.h\n"
- "ldr x22, [x11, #0x68]\n"
- "ldr x21, [x11, #0x70]\n"
- ".inst 0x44824398 // smlalb z24.s, p4/M, z28.h, z2.h\n"
- ".inst 0x44824790 // smlalt z16.s, p4/M, z28.h, z2.h\n"
- "ldr x20, [x11, #0x78]\n"
- "ld1w { z20.s }, p2/Z, [x9]\n"
- ".inst 0x44814399 // smlalb z25.s, p4/M, z28.h, z1.h\n"
- ".inst 0x44814789 // smlalt z9.s, p4/M, z28.h, z1.h\n"
- "ld1b { z28.h }, p3/Z, [x27, x8]\n"
- ".inst 0x45571b9c // usublb z28.h, z28.b, z23.b\n"
+ "ld1w { z19.s }, p2/Z, [x14]\n"
+ ".inst 0x4485478f // smlalt z15.s, p4/M, z28.h, z5.h\n"
+ "ld1w { z16.s }, p1/Z, [x14, #1, MUL VL]\n"
+ "addvl x14, x14, #2\n"
+ ".inst 0x454b1bbd // usublb z29.h, z29.b, z11.b\n"
+ ".inst 0x44844399 // smlalb z25.s, p4/M, z28.h, z4.h\n"
+ ".inst 0x44844789 // smlalt z9.s, p4/M, z28.h, z4.h\n"
+ "uzp1 z21.s, z19.s, z16.s\n"
+ "uzp2 z18.s, z19.s, z16.s\n"
+ "ld1w { z19.s }, p2/Z, [x12]\n"
+ ".inst 0x44824397 // smlalb z23.s, p4/M, z28.h, z2.h\n"
+ "ld1w { z16.s }, p1/Z, [x12, #1, MUL VL]\n"
+ "addvl x12, x12, #2\n"
+ ".inst 0x44824796 // smlalt z22.s, p4/M, z28.h, z2.h\n"
+ ".inst 0x4481438a // smlalb z10.s, p4/M, z28.h, z1.h\n"
+ ".inst 0x44814798 // smlalt z24.s, p4/M, z28.h, z1.h\n"
+ "ld1b { z28.h }, p3/Z, [x26, x16]\n"
+ "uzp1 z20.s, z19.s, z16.s\n"
+ "uzp2 z19.s, z19.s, z16.s\n"
+ ".inst 0x448643f7 // smlalb z23.s, p4/M, z31.h, z6.h\n"
+ ".inst 0x454b1b9c // usublb z28.h, z28.b, z11.b\n"
+ ".inst 0x448647f6 // smlalt z22.s, p4/M, z31.h, z6.h\n"
+ "ld1b { z31.h }, p3/Z, [x25, x16]\n"
".inst 0x4487436d // smlalb z13.s, p4/M, z27.h, z7.h\n"
- ".inst 0x44874771 // smlalt z17.s, p4/M, z27.h, z7.h\n"
- "ld1w { z18.s }, p1/Z, [x9, #1, MUL VL]\n"
- "uzp1 z19.s, z20.s, z18.s\n"
- ".inst 0x4486437a // smlalb z26.s, p4/M, z27.h, z6.h\n"
- ".inst 0x4486476a // smlalt z10.s, p4/M, z27.h, z6.h\n"
- "uzp2 z22.s, z20.s, z18.s\n"
- "ld1w { z20.s }, p2/Z, [x28]\n"
- ".inst 0x448643f8 // smlalb z24.s, p4/M, z31.h, z6.h\n"
- ".inst 0x448647f0 // smlalt z16.s, p4/M, z31.h, z6.h\n"
- "ld1b { z31.h }, p3/Z, [x26, x8]\n"
- ".inst 0x45571bff // usublb z31.h, z31.b, z23.b\n"
- ".inst 0x44834379 // smlalb z25.s, p4/M, z27.h, z3.h\n"
- ".inst 0x44834769 // smlalt z9.s, p4/M, z27.h, z3.h\n"
- "whilelt p0.h, x10, x17\n"
- "inch x16\n"
+ ".inst 0x4487476f // smlalt z15.s, p4/M, z27.h, z7.h\n"
+ ".inst 0x44864379 // smlalb z25.s, p4/M, z27.h, z6.h\n"
+ ".inst 0x454b1bff // usublb z31.h, z31.b, z11.b\n"
+ ".inst 0x44864769 // smlalt z9.s, p4/M, z27.h, z6.h\n"
+ ".inst 0x44844377 // smlalb z23.s, p4/M, z27.h, z4.h\n"
+ ".inst 0x44844776 // smlalt z22.s, p4/M, z27.h, z4.h\n"
+ ".inst 0x4483436a // smlalb z10.s, p4/M, z27.h, z3.h\n"
+ ".inst 0x44834778 // smlalt z24.s, p4/M, z27.h, z3.h\n"
".inst 0x4481438d // smlalb z13.s, p4/M, z28.h, z1.h\n"
- ".inst 0x44814791 // smlalt z17.s, p4/M, z28.h, z1.h\n"
- "ldr x26, [%x[params], %[offsetof_Params_bias]]\n"
- "addvl x9, x9, #2\n"
- ".inst 0x4480439a // smlalb z26.s, p4/M, z28.h, z0.h\n"
- ".inst 0x4480478a // smlalt z10.s, p4/M, z28.h, z0.h\n"
- "ld1b { z28.h }, p3/Z, [x24, x8]\n"
- ".inst 0x45571b9c // usublb z28.h, z28.b, z23.b\n"
- ".inst 0x44844378 // smlalb z24.s, p4/M, z27.h, z4.h\n"
- ".inst 0x448843b9 // smlalb z25.s, p4/M, z29.h, z8.h\n"
- ".inst 0x44844770 // smlalt z16.s, p4/M, z27.h, z4.h\n"
- ".inst 0x448847a9 // smlalt z9.s, p4/M, z29.h, z8.h\n"
- "ld1b { z29.h }, p3/Z, [x25, x8]\n"
- ".inst 0x45571bbd // usublb z29.h, z29.b, z23.b\n"
+ ".inst 0x4481478f // smlalt z15.s, p4/M, z28.h, z1.h\n"
+ ".inst 0x448843aa // smlalb z10.s, p4/M, z29.h, z8.h\n"
+ ".inst 0x448847b8 // smlalt z24.s, p4/M, z29.h, z8.h\n"
+ "ld1b { z29.h }, p3/Z, [x24, x16]\n"
+ ".inst 0x44804399 // smlalb z25.s, p4/M, z28.h, z0.h\n"
+ ".inst 0x44804789 // smlalt z9.s, p4/M, z28.h, z0.h\n"
+ "ld1b { z28.h }, p3/Z, [x23, x16]\n"
".inst 0x448243ed // smlalb z13.s, p4/M, z31.h, z2.h\n"
- ".inst 0x448247f1 // smlalt z17.s, p4/M, z31.h, z2.h\n"
- "ld1w { z18.s }, p1/Z, [x28, #1, MUL VL]\n"
- "addvl x28, x28, #2\n"
- ".inst 0x448143fa // smlalb z26.s, p4/M, z31.h, z1.h\n"
- ".inst 0x448147ea // smlalt z10.s, p4/M, z31.h, z1.h\n"
- "ld1b { z31.h }, p3/Z, [x23, x8]\n"
- ".inst 0x45571bff // usublb z31.h, z31.b, z23.b\n"
- ".inst 0x448543d8 // smlalb z24.s, p4/M, z30.h, z5.h\n"
- ".inst 0x448443d9 // smlalb z25.s, p4/M, z30.h, z4.h\n"
- "uzp1 z1.s, z20.s, z18.s\n"
+ ".inst 0x454b1bbd // usublb z29.h, z29.b, z11.b\n"
+ ".inst 0x448247ef // smlalt z15.s, p4/M, z31.h, z2.h\n"
+ ".inst 0x454b1b9c // usublb z28.h, z28.b, z11.b\n"
+ ".inst 0x448143f9 // smlalb z25.s, p4/M, z31.h, z1.h\n"
+ ".inst 0x448147e9 // smlalt z9.s, p4/M, z31.h, z1.h\n"
+ "ld1b { z31.h }, p3/Z, [x22, x16]\n"
".inst 0x448843cd // smlalb z13.s, p4/M, z30.h, z8.h\n"
- ".inst 0x448847d1 // smlalt z17.s, p4/M, z30.h, z8.h\n"
- "uzp2 z27.s, z20.s, z18.s\n"
- ".inst 0x448743da // smlalb z26.s, p4/M, z30.h, z7.h\n"
- ".inst 0x448747ca // smlalt z10.s, p4/M, z30.h, z7.h\n"
- ".inst 0x448547d0 // smlalt z16.s, p4/M, z30.h, z5.h\n"
- ".inst 0x448447c9 // smlalt z9.s, p4/M, z30.h, z4.h\n"
- "ld1b { z30.h }, p3/Z, [x22, x8]\n"
- ".inst 0x45571bde // usublb z30.h, z30.b, z23.b\n"
- ".inst 0x448043b8 // smlalb z24.s, p4/M, z29.h, z0.h\n"
- ".inst 0x44824399 // smlalb z25.s, p4/M, z28.h, z2.h\n"
+ ".inst 0x448847cf // smlalt z15.s, p4/M, z30.h, z8.h\n"
+ ".inst 0x448743d9 // smlalb z25.s, p4/M, z30.h, z7.h\n"
+ ".inst 0x454b1bff // usublb z31.h, z31.b, z11.b\n"
+ ".inst 0x448747c9 // smlalt z9.s, p4/M, z30.h, z7.h\n"
+ ".inst 0x448543d7 // smlalb z23.s, p4/M, z30.h, z5.h\n"
+ ".inst 0x448547d6 // smlalt z22.s, p4/M, z30.h, z5.h\n"
+ ".inst 0x448443ca // smlalb z10.s, p4/M, z30.h, z4.h\n"
+ ".inst 0x448447d8 // smlalt z24.s, p4/M, z30.h, z4.h\n"
+ "ld1b { z30.h }, p3/Z, [x21, x16]\n"
".inst 0x448343ad // smlalb z13.s, p4/M, z29.h, z3.h\n"
- ".inst 0x448347b1 // smlalt z17.s, p4/M, z29.h, z3.h\n"
- ".inst 0x448047b0 // smlalt z16.s, p4/M, z29.h, z0.h\n"
- "ld1b { z29.h }, p3/Z, [x21, x8]\n"
- ".inst 0x44824789 // smlalt z9.s, p4/M, z28.h, z2.h\n"
- ".inst 0x45571bbd // usublb z29.h, z29.b, z23.b\n"
- ".inst 0x448343f8 // smlalb z24.s, p4/M, z31.h, z3.h\n"
- ".inst 0x448543d9 // smlalb z25.s, p4/M, z30.h, z5.h\n"
- ".inst 0x4485439a // smlalb z26.s, p4/M, z28.h, z5.h\n"
- ".inst 0x4485478a // smlalt z10.s, p4/M, z28.h, z5.h\n"
- "ld1b { z28.h }, p3/Z, [x20, x8]\n"
- ".inst 0x45571b9c // usublb z28.h, z28.b, z23.b\n"
+ ".inst 0x448347af // smlalt z15.s, p4/M, z29.h, z3.h\n"
+ ".inst 0x448043b7 // smlalb z23.s, p4/M, z29.h, z0.h\n"
+ ".inst 0x454b1bde // usublb z30.h, z30.b, z11.b\n"
+ ".inst 0x448047b6 // smlalt z22.s, p4/M, z29.h, z0.h\n"
+ "ld1b { z29.h }, p3/Z, [x20, x16]\n"
+ ".inst 0x44854399 // smlalb z25.s, p4/M, z28.h, z5.h\n"
+ ".inst 0x44854789 // smlalt z9.s, p4/M, z28.h, z5.h\n"
+ ".inst 0x4482438a // smlalb z10.s, p4/M, z28.h, z2.h\n"
+ ".inst 0x454b1bbd // usublb z29.h, z29.b, z11.b\n"
+ ".inst 0x44824798 // smlalt z24.s, p4/M, z28.h, z2.h\n"
+ "ld1b { z28.h }, p3/Z, [x19, x16]\n"
+ "inch x16\n"
".inst 0x448643ed // smlalb z13.s, p4/M, z31.h, z6.h\n"
- ".inst 0x448347f0 // smlalt z16.s, p4/M, z31.h, z3.h\n"
- ".inst 0x04b375ad // sqrdmulh z13.s, z13.s, z19.s\n"
- "inch x8\n"
- ".inst 0x448547c9 // smlalt z9.s, p4/M, z30.h, z5.h\n"
- ".inst 0x448743b8 // smlalb z24.s, p4/M, z29.h, z7.h\n"
- "and z21.d, z13.d, z1.d\n"
- "mov x20, x8\n"
- ".inst 0x448643b9 // smlalb z25.s, p4/M, z29.h, z6.h\n"
- ".inst 0x448647f1 // smlalt z17.s, p4/M, z31.h, z6.h\n"
- ".inst 0x04b67631 // sqrdmulh z17.s, z17.s, z22.s\n"
- "incw x20\n"
- ".inst 0x448747b0 // smlalt z16.s, p4/M, z29.h, z7.h\n"
- ".inst 0x448647a9 // smlalt z9.s, p4/M, z29.h, z6.h\n"
- "asr z21.s, z21.s, #0x1f\n"
- "whilelt p2.s, x8, x17\n"
- ".inst 0x448843da // smlalb z26.s, p4/M, z30.h, z8.h\n"
- ".inst 0x44884398 // smlalb z24.s, p4/M, z28.h, z8.h\n"
- "and z20.d, z17.d, z27.d\n"
- "whilelt p1.s, x20, x17\n"
- ".inst 0x44874399 // smlalb z25.s, p4/M, z28.h, z7.h\n"
- ".inst 0x448847ca // smlalt z10.s, p4/M, z30.h, z8.h\n"
- ".inst 0x04b3775a // sqrdmulh z26.s, z26.s, z19.s\n"
- "whilelt p3.h, x8, x17\n"
- ".inst 0x44884790 // smlalt z16.s, p4/M, z28.h, z8.h\n"
- ".inst 0x44874789 // smlalt z9.s, p4/M, z28.h, z7.h\n"
- ".inst 0x04b37718 // sqrdmulh z24.s, z24.s, z19.s\n"
- ".inst 0x04b37739 // sqrdmulh z25.s, z25.s, z19.s\n"
- "sqadd z13.s, z13.s, z21.s\n"
- ".inst 0x4482902d // srshl z13.s, p4/M, z13.s, z1.s\n"
- "asr z20.s, z20.s, #0x1f\n"
- "and z19.d, z26.d, z1.d\n"
- ".inst 0x04b6754a // sqrdmulh z10.s, z10.s, z22.s\n"
- "and z18.d, z24.d, z1.d\n"
- ".inst 0x04b67610 // sqrdmulh z16.s, z16.s, z22.s\n"
- "and z21.d, z25.d, z1.d\n"
- ".inst 0x04b67529 // sqrdmulh z9.s, z9.s, z22.s\n"
- "sqadd z17.s, z17.s, z20.s\n"
- ".inst 0x44829371 // srshl z17.s, p4/M, z17.s, z27.s\n"
- "asr z19.s, z19.s, #0x1f\n"
- "and z2.d, z10.d, z27.d\n"
- "asr z18.s, z18.s, #0x1f\n"
- "and z22.d, z16.d, z27.d\n"
- "asr z21.s, z21.s, #0x1f\n"
- "and z20.d, z9.d, z27.d\n"
- "sqadd z26.s, z26.s, z19.s\n"
+ "whilelt p2.s, x16, x8\n"
+ ".inst 0x448647ef // smlalt z15.s, p4/M, z31.h, z6.h\n"
+ "mov x19, x16\n"
+ ".inst 0x448343f7 // smlalb z23.s, p4/M, z31.h, z3.h\n"
+ "incw x19\n"
+ ".inst 0x454b1b9c // usublb z28.h, z28.b, z11.b\n"
+ "whilelt p1.s, x19, x8\n"
+ ".inst 0x448347f6 // smlalt z22.s, p4/M, z31.h, z3.h\n"
+ "whilelt p3.h, x16, x8\n"
+ ".inst 0x04b575ad // sqrdmulh z13.s, z13.s, z21.s\n"
+ ".inst 0x04b275ef // sqrdmulh z15.s, z15.s, z18.s\n"
+ ".inst 0x448843d9 // smlalb z25.s, p4/M, z30.h, z8.h\n"
+ ".inst 0x448847c9 // smlalt z9.s, p4/M, z30.h, z8.h\n"
+ "and z4.d, z13.d, z20.d\n"
+ "and z16.d, z15.d, z19.d\n"
+ ".inst 0x04b57739 // sqrdmulh z25.s, z25.s, z21.s\n"
+ "asr z4.s, z4.s, #0x1f\n"
+ "asr z16.s, z16.s, #0x1f\n"
+ ".inst 0x04b27529 // sqrdmulh z9.s, z9.s, z18.s\n"
+ "sqadd z13.s, z13.s, z4.s\n"
+ "sqadd z15.s, z15.s, z16.s\n"
+ "and z2.d, z25.d, z20.d\n"
+ "and z16.d, z9.d, z19.d\n"
+ ".inst 0x448543ca // smlalb z10.s, p4/M, z30.h, z5.h\n"
"asr z2.s, z2.s, #0x1f\n"
- ".inst 0x4482903a // srshl z26.s, p4/M, z26.s, z1.s\n"
- "sqadd z24.s, z24.s, z18.s\n"
- "asr z22.s, z22.s, #0x1f\n"
- ".inst 0x44829038 // srshl z24.s, p4/M, z24.s, z1.s\n"
- "sqadd z25.s, z25.s, z21.s\n"
- "asr z20.s, z20.s, #0x1f\n"
- ".inst 0x44829039 // srshl z25.s, p4/M, z25.s, z1.s\n"
- "sqadd z10.s, z10.s, z2.s\n"
- "sqadd z16.s, z16.s, z22.s\n"
- ".inst 0x4482936a // srshl z10.s, p4/M, z10.s, z27.s\n"
- ".inst 0x44829370 // srshl z16.s, p4/M, z16.s, z27.s\n"
- "sqadd z9.s, z9.s, z20.s\n"
- ".inst 0x453041ad // sqxtnb z13.h, z13.s\n"
- ".inst 0x44829369 // srshl z9.s, p4/M, z9.s, z27.s\n"
- ".inst 0x4530435a // sqxtnb z26.h, z26.s\n"
- ".inst 0x45304318 // sqxtnb z24.h, z24.s\n"
- ".inst 0x45304339 // sqxtnb z25.h, z25.s\n"
- ".inst 0x4530462d // sqxtnt z13.h, z17.s\n"
- ".inst 0x4530455a // sqxtnt z26.h, z10.s\n"
- ".inst 0x45304618 // sqxtnt z24.h, z16.s\n"
- ".inst 0x45304539 // sqxtnt z25.h, z9.s\n"
- "sqadd z13.h, z13.h, z14.h\n"
- "smax z13.h, p4/M, z13.h, z12.h\n"
- "smin z13.h, p4/M, z13.h, z11.h\n"
- "sqadd z26.h, z26.h, z14.h\n"
- "sqadd z24.h, z24.h, z14.h\n"
- "smax z26.h, p4/M, z26.h, z12.h\n"
- "smax z24.h, p4/M, z24.h, z12.h\n"
- "sqadd z25.h, z25.h, z14.h\n"
- "smax z25.h, p4/M, z25.h, z12.h\n"
- "smin z26.h, p4/M, z26.h, z11.h\n"
- "st1b { z13.h }, p0, [x15, x10]\n"
- "smin z24.h, p4/M, z24.h, z11.h\n"
- "smin z25.h, p4/M, z25.h, z11.h\n"
- "st1b { z26.h }, p0, [x14, x10]\n"
- "st1b { z24.h }, p0, [x13, x10]\n"
- "st1b { z25.h }, p0, [x12, x10]\n"
- "ld1sb { z0.h }, p4/Z, [x16]\n"
- "ld1sb { z1.h }, p4/Z, [x16, #1, MUL VL]\n"
- "inch x10\n"
- "ld1sb { z2.h }, p4/Z, [x16, #2, MUL VL]\n"
- "ld1sb { z3.h }, p4/Z, [x16, #3, MUL VL]\n"
- ".inst 0x454f1000 // ssublb z0.h, z0.b, z15.b\n"
- ".inst 0x454f1021 // ssublb z1.h, z1.b, z15.b\n"
- "ld1sb { z4.h }, p4/Z, [x16, #4, MUL VL]\n"
- "ld1sb { z5.h }, p4/Z, [x16, #5, MUL VL]\n"
- ".inst 0x454f1042 // ssublb z2.h, z2.b, z15.b\n"
- ".inst 0x454f1063 // ssublb z3.h, z3.b, z15.b\n"
- "ld1sb { z6.h }, p4/Z, [x16, #6, MUL VL]\n"
- "ld1sb { z7.h }, p4/Z, [x16, #7, MUL VL]\n"
- "inch x16, ALL, MUL #8\n"
- ".inst 0x454f1084 // ssublb z4.h, z4.b, z15.b\n"
- "ld1w { z17.s }, p2/Z, [x26]\n"
- "ld1w { z16.s }, p1/Z, [x26, #1, MUL VL]\n"
- "uzp1 z13.s, z17.s, z16.s\n"
- "uzp2 z17.s, z17.s, z16.s\n"
- "ld1sb { z8.h }, p4/Z, [x16]\n"
- "ldp x24, x23, [x11, #0x0]\n"
- "addvl x26, x26, #2\n"
- "str x26, [%x[params], %[offsetof_Params_bias]]\n"
- "ldp x22, x21, [x11, #0x10]\n"
- "ldr x20, [x11, #0x20]\n"
- "mov z26.d, z13.d\n"
- "mov z10.d, z17.d\n"
- "ld1b { z31.h }, p3/Z, [x24, x8]\n"
- "ld1b { z30.h }, p3/Z, [x23, x8]\n"
- "mov z24.d, z13.d\n"
- "mov z16.d, z17.d\n"
- "ld1b { z29.h }, p3/Z, [x22, x8]\n"
- "ld1b { z28.h }, p3/Z, [x21, x8]\n"
+ "asr z16.s, z16.s, #0x1f\n"
+ ".inst 0x448547d8 // smlalt z24.s, p4/M, z30.h, z5.h\n"
+ "sqadd z25.s, z25.s, z2.s\n"
+ "sqadd z9.s, z9.s, z16.s\n"
+ ".inst 0x448743b7 // smlalb z23.s, p4/M, z29.h, z7.h\n"
+ ".inst 0x448747b6 // smlalt z22.s, p4/M, z29.h, z7.h\n"
+ ".inst 0x448643aa // smlalb z10.s, p4/M, z29.h, z6.h\n"
+ ".inst 0x448647b8 // smlalt z24.s, p4/M, z29.h, z6.h\n"
+ ".inst 0x44884397 // smlalb z23.s, p4/M, z28.h, z8.h\n"
+ ".inst 0x44884796 // smlalt z22.s, p4/M, z28.h, z8.h\n"
+ ".inst 0x4487438a // smlalb z10.s, p4/M, z28.h, z7.h\n"
+ ".inst 0x44874798 // smlalt z24.s, p4/M, z28.h, z7.h\n"
+ ".inst 0x04b576f7 // sqrdmulh z23.s, z23.s, z21.s\n"
+ ".inst 0x04b276d6 // sqrdmulh z22.s, z22.s, z18.s\n"
+ ".inst 0x04b5754a // sqrdmulh z10.s, z10.s, z21.s\n"
+ ".inst 0x04b27718 // sqrdmulh z24.s, z24.s, z18.s\n"
+ "and z18.d, z23.d, z20.d\n"
+ "and z0.d, z22.d, z19.d\n"
+ "and z16.d, z10.d, z20.d\n"
+ "asr z18.s, z18.s, #0x1f\n"
+ "asr z0.s, z0.s, #0x1f\n"
+ "asr z16.s, z16.s, #0x1f\n"
+ "sqadd z23.s, z23.s, z18.s\n"
+ "sqadd z22.s, z22.s, z0.s\n"
+ "sqadd z10.s, z10.s, z16.s\n"
+ "and z16.d, z24.d, z19.d\n"
+ ".inst 0x4482928d // srshl z13.s, p4/M, z13.s, z20.s\n"
+ ".inst 0x4482926f // srshl z15.s, p4/M, z15.s, z19.s\n"
+ "asr z16.s, z16.s, #0x1f\n"
+ ".inst 0x44829299 // srshl z25.s, p4/M, z25.s, z20.s\n"
+ "add z13.s, z13.s, z12.s\n"
+ "add z15.s, z15.s, z12.s\n"
+ "sqadd z24.s, z24.s, z16.s\n"
+ "add z25.s, z25.s, z12.s\n"
+ "smin z13.s, p4/M, z13.s, z17.s\n"
+ "smin z15.s, p4/M, z15.s, z17.s\n"
+ "smin z25.s, p4/M, z25.s, z17.s\n"
+ ".inst 0x44829269 // srshl z9.s, p4/M, z9.s, z19.s\n"
+ "smax z13.s, p4/M, z13.s, z14.s\n"
+ "smax z15.s, p4/M, z15.s, z14.s\n"
+ "smax z25.s, p4/M, z25.s, z14.s\n"
+ "add z9.s, z9.s, z12.s\n"
+ ".inst 0x44829297 // srshl z23.s, p4/M, z23.s, z20.s\n"
+ "trn1 z13.h, z13.h, z15.h\n"
+ "st1b { z13.h }, p0, [x11, x15]\n"
+ "smin z9.s, p4/M, z9.s, z17.s\n"
+ ".inst 0x44829276 // srshl z22.s, p4/M, z22.s, z19.s\n"
+ "add z23.s, z23.s, z12.s\n"
+ ".inst 0x4482928a // srshl z10.s, p4/M, z10.s, z20.s\n"
+ ".inst 0x44829278 // srshl z24.s, p4/M, z24.s, z19.s\n"
+ "add z22.s, z22.s, z12.s\n"
+ "smax z9.s, p4/M, z9.s, z14.s\n"
+ "add z10.s, z10.s, z12.s\n"
+ "add z24.s, z24.s, z12.s\n"
+ "smin z23.s, p4/M, z23.s, z17.s\n"
+ "trn1 z25.h, z25.h, z9.h\n"
+ "st1b { z25.h }, p0, [x10, x15]\n"
+ "smin z22.s, p4/M, z22.s, z17.s\n"
+ "smin z10.s, p4/M, z10.s, z17.s\n"
+ "smax z23.s, p4/M, z23.s, z14.s\n"
+ "smin z24.s, p4/M, z24.s, z17.s\n"
+ "smax z22.s, p4/M, z22.s, z14.s\n"
+ "smax z10.s, p4/M, z10.s, z14.s\n"
+ "smax z24.s, p4/M, z24.s, z14.s\n"
+ "trn1 z23.h, z23.h, z22.h\n"
+ "st1b { z23.h }, p0, [x9, x15]\n"
+ "trn1 z10.h, z10.h, z24.h\n"
+ "st1b { z10.h }, p0, [x28, x15]\n"
+ "inch x15\n"
+ "ldr x19, [%x[params], %[offsetof_Params_bias]]\n"
+ "ld1w { z10.s }, p2/Z, [x19]\n"
+ "ld1w { z16.s }, p1/Z, [x19, #1, MUL VL]\n"
+ "uzp1 z13.s, z10.s, z16.s\n"
+ "addvl x19, x19, #2\n"
+ "str x19, [%x[params], %[offsetof_Params_bias]]\n"
+ "uzp2 z15.s, z10.s, z16.s\n"
"mov z25.d, z13.d\n"
- "mov z9.d, z17.d\n"
- "ld1b { z27.h }, p3/Z, [x20, x8]\n"
- ".inst 0x454f10a5 // ssublb z5.h, z5.b, z15.b\n"
- ".inst 0x454f10c6 // ssublb z6.h, z6.b, z15.b\n"
- ".inst 0x454f10e7 // ssublb z7.h, z7.b, z15.b\n"
- ".inst 0x454f1108 // ssublb z8.h, z8.b, z15.b\n"
- ".inst 0x45571bff // usublb z31.h, z31.b, z23.b\n"
- ".inst 0x45571bde // usublb z30.h, z30.b, z23.b\n"
- ".inst 0x45571bbd // usublb z29.h, z29.b, z23.b\n"
- ".inst 0x45571b9c // usublb z28.h, z28.b, z23.b\n"
- ".inst 0x45571b7b // usublb z27.h, z27.b, z23.b\n"
+ "ld1sb { z0.h }, p4/Z, [x17]\n"
+ "mov z23.d, z13.d\n"
+ "ld1sb { z1.h }, p4/Z, [x17, #1, MUL VL]\n"
+ "mov z9.d, z15.d\n"
+ "ld1sb { z2.h }, p4/Z, [x17, #2, MUL VL]\n"
+ "mov z22.d, z15.d\n"
+ "ld1sb { z3.h }, p4/Z, [x17, #3, MUL VL]\n"
+ "mov z10.d, z13.d\n"
+ "ld1sb { z4.h }, p4/Z, [x17, #4, MUL VL]\n"
+ "mov z24.d, z15.d\n"
+ "ld1sb { z5.h }, p4/Z, [x17, #5, MUL VL]\n"
+ ".inst 0x455a1000 // ssublb z0.h, z0.b, z26.b\n"
+ "ld1sb { z6.h }, p4/Z, [x17, #6, MUL VL]\n"
+ ".inst 0x455a1021 // ssublb z1.h, z1.b, z26.b\n"
+ "ld1sb { z7.h }, p4/Z, [x17, #7, MUL VL]\n"
+ "inch x17, ALL, MUL #8\n"
+ ".inst 0x455a1042 // ssublb z2.h, z2.b, z26.b\n"
+ "ld1sb { z8.h }, p4/Z, [x17]\n"
+ ".inst 0x455a1063 // ssublb z3.h, z3.b, z26.b\n"
+ "ldp x23, x22, [x13, #0x0]\n"
+ ".inst 0x455a1084 // ssublb z4.h, z4.b, z26.b\n"
+ "ldp x21, x20, [x13, #0x10]\n"
+ ".inst 0x455a10a5 // ssublb z5.h, z5.b, z26.b\n"
+ ".inst 0x455a10c6 // ssublb z6.h, z6.b, z26.b\n"
+ "ldr x19, [x13, #0x20]\n"
+ ".inst 0x455a10e7 // ssublb z7.h, z7.b, z26.b\n"
+ ".inst 0x455a1108 // ssublb z8.h, z8.b, z26.b\n"
+ "ld1b { z31.h }, p3/Z, [x23, x16]\n"
+ "ld1b { z30.h }, p3/Z, [x22, x16]\n"
+ ".inst 0x454b1bff // usublb z31.h, z31.b, z11.b\n"
+ "ld1b { z29.h }, p3/Z, [x21, x16]\n"
+ ".inst 0x454b1bde // usublb z30.h, z30.b, z11.b\n"
+ "ld1b { z28.h }, p3/Z, [x20, x16]\n"
+ "ld1b { z27.h }, p3/Z, [x19, x16]\n"
+ ".inst 0x454b1bbd // usublb z29.h, z29.b, z11.b\n"
+ ".inst 0x454b1b9c // usublb z28.h, z28.b, z11.b\n"
+ ".inst 0x454b1b7b // usublb z27.h, z27.b, z11.b\n"
"b.any 1b\n"
:
: [offsetof_Params_bias] "I" (offsetof(Params, bias)), [offsetof_Params_inptrs] "I" (offsetof(Params, inptrs)), [offsetof_Params_n_channels] "I" (offsetof(Params, n_channels)), [offsetof_Params_outptrs] "I" (offsetof(Params, outptrs)), [offsetof_Params_requant] "I" (offsetof(Params, requant)), [offsetof_Params_requant_muls] "I" (offsetof(Params, requant_muls)), [offsetof_Params_requant_shifts] "I" (offsetof(Params, requant_shifts)), [offsetof_Params_weights] "I" (offsetof(Params, weights)), [offsetof_Requantize32_a_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [params] "r" (&params)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8s8u8q_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8s8u8q_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp
index 24c4bf713d..9adf100a0f 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8s8u8q_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8s8u8q_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -100,348 +100,356 @@ void sve_u8s8u8q_nhwc_3x3_s2_output2x2_mla_depthfirst_impl(
requant_muls, requant_shifts, outptrs);
__asm__ __volatile__(
- "mov x7, #0x0\n"
- "ldr x25, [%x[params], %[offsetof_Params_requant]]\n"
+ "ldr x4, [%x[params], %[offsetof_Params_n_channels]]\n"
"ptrue p4.b\n"
- "ldr x24, [%x[params], %[offsetof_Params_outptrs]]\n"
- "mov x23, x7\n"
- "add x21, x25, %[offsetof_Requantize32_a_offset]\n"
- "ldr x8, [%x[params], %[offsetof_Params_n_channels]]\n"
- "ldr x17, [%x[params], %[offsetof_Params_weights]]\n"
- "add x20, x25, %[offsetof_Requantize32_b_offset]\n"
- "add x22, x25, %[offsetof_Requantize32_c_offset]\n"
- "ld1rb { z23.b }, p4/Z, [x21]\n"
+ "ldr x5, [%x[params], %[offsetof_Params_weights]]\n"
+ "mov x6, #0x0\n"
+ "ldr x22, [%x[params], %[offsetof_Params_requant]]\n"
+ "mov x7, #0x0\n"
+ "ldr x8, [%x[params], %[offsetof_Params_requant_muls]]\n"
+ "add x17, %x[params], %[offsetof_Params_inptrs]\n"
+ "ldr x16, [%x[params], %[offsetof_Params_requant_shifts]]\n"
+ "add x19, x22, %[offsetof_Requantize32_a_offset]\n"
+ "ldr x21, [%x[params], %[offsetof_Params_outptrs]]\n"
+ "add x20, x22, %[offsetof_Requantize32_b_offset]\n"
+ "ld1rb { z16.b }, p4/Z, [x19]\n"
+ "add x19, x22, %[offsetof_Requantize32_c_offset]\n"
"ld1rb { z12.b }, p4/Z, [x20]\n"
- "add x21, x25, %[offsetof_Requantize32_minval]\n"
- "add x20, x25, %[offsetof_Requantize32_maxval]\n"
- "ld1rh { z14.h }, p4/Z, [x22]\n"
- "ld1rh { z16.h }, p4/Z, [x21]\n"
- "ld1rh { z15.h }, p4/Z, [x20]\n"
- "ldp x16, x15, [x24, #0x0]\n"
- "incw x23\n"
- "whilelt p3.h, x7, x8\n"
- "ldp x14, x13, [x24, #0x10]\n"
- "whilelt p2.s, x7, x8\n"
- "whilelt p1.s, x23, x8\n"
- "ldr x12, [%x[params], %[offsetof_Params_bias]]\n"
- "ld1sb { z0.h }, p4/Z, [x17]\n"
- "ld1sb { z1.h }, p4/Z, [x17, #1, MUL VL]\n"
- "add x11, %x[params], %[offsetof_Params_inptrs]\n"
- "mov x10, #0x0\n"
- "ld1sb { z2.h }, p4/Z, [x17, #2, MUL VL]\n"
- "ld1sb { z3.h }, p4/Z, [x17, #3, MUL VL]\n"
+ "add x20, x22, %[offsetof_Requantize32_minval]\n"
+ "ld1rw { z14.s }, p4/Z, [x19]\n"
+ "add x19, x22, %[offsetof_Requantize32_maxval]\n"
+ "ld1rw { z17.s }, p4/Z, [x20]\n"
+ "whilelt p3.h, x6, x4\n"
+ "ld1rw { z15.s }, p4/Z, [x19]\n"
+ "whilelt p2.s, x6, x4\n"
+ "ldp x15, x14, [x21, #0x0]\n"
+ "mov x19, x6\n"
+ "incw x19\n"
+ "ldp x13, x12, [x21, #0x10]\n"
+ "whilelt p1.s, x19, x4\n"
+ "ldr x19, [%x[params], %[offsetof_Params_bias]]\n"
+ "ld1w { z20.s }, p2/Z, [x19]\n"
+ "ld1w { z10.s }, p1/Z, [x19, #1, MUL VL]\n"
+ "uzp1 z13.s, z20.s, z10.s\n"
+ "addvl x19, x19, #2\n"
+ "str x19, [%x[params], %[offsetof_Params_bias]]\n"
+ "uzp2 z20.s, z20.s, z10.s\n"
+ "mov z11.d, z13.d\n"
+ "ld1sb { z0.h }, p4/Z, [x5]\n"
+ "mov z9.d, z13.d\n"
+ "ld1sb { z1.h }, p4/Z, [x5, #1, MUL VL]\n"
+ "mov z18.d, z20.d\n"
+ "ld1sb { z2.h }, p4/Z, [x5, #2, MUL VL]\n"
+ "mov z19.d, z20.d\n"
+ "ld1sb { z3.h }, p4/Z, [x5, #3, MUL VL]\n"
+ "mov z23.d, z13.d\n"
+ "ld1sb { z4.h }, p4/Z, [x5, #4, MUL VL]\n"
+ "mov z21.d, z20.d\n"
+ "ld1sb { z5.h }, p4/Z, [x5, #5, MUL VL]\n"
".inst 0x454c1000 // ssublb z0.h, z0.b, z12.b\n"
+ "ld1sb { z6.h }, p4/Z, [x5, #6, MUL VL]\n"
".inst 0x454c1021 // ssublb z1.h, z1.b, z12.b\n"
- "ld1sb { z4.h }, p4/Z, [x17, #4, MUL VL]\n"
- "ld1sb { z5.h }, p4/Z, [x17, #5, MUL VL]\n"
+ "ld1sb { z7.h }, p4/Z, [x5, #7, MUL VL]\n"
+ "inch x5, ALL, MUL #8\n"
".inst 0x454c1042 // ssublb z2.h, z2.b, z12.b\n"
+ "ld1sb { z8.h }, p4/Z, [x5]\n"
".inst 0x454c1063 // ssublb z3.h, z3.b, z12.b\n"
- "ld1sb { z6.h }, p4/Z, [x17, #6, MUL VL]\n"
- "ld1sb { z7.h }, p4/Z, [x17, #7, MUL VL]\n"
- "inch x17, ALL, MUL #8\n"
+ "ldp x26, x25, [x17, #0x0]\n"
".inst 0x454c1084 // ssublb z4.h, z4.b, z12.b\n"
- "ld1w { z18.s }, p2/Z, [x12]\n"
- "ld1w { z8.s }, p1/Z, [x12, #1, MUL VL]\n"
- "uzp1 z13.s, z18.s, z8.s\n"
- "uzp2 z17.s, z18.s, z8.s\n"
- "ld1sb { z8.h }, p4/Z, [x17]\n"
- "ldp x9, x28, [x11, #0x0]\n"
- "addvl x12, x12, #2\n"
- "mov z9.d, z13.d\n"
- "ldp x25, x24, [x11, #0x10]\n"
- "ldp x23, x22, [x11, #0x20]\n"
- "mov z10.d, z17.d\n"
- "mov z11.d, z13.d\n"
- "ldp x21, x20, [x11, #0x30]\n"
- "ld1b { z31.h }, p3/Z, [x9, x7]\n"
- "mov z22.d, z17.d\n"
- "mov z21.d, z13.d\n"
- "ld1b { z30.h }, p3/Z, [x28, x7]\n"
- "ld1b { z29.h }, p3/Z, [x25, x7]\n"
- "mov z18.d, z17.d\n"
+ "ldp x24, x23, [x17, #0x10]\n"
".inst 0x454c10a5 // ssublb z5.h, z5.b, z12.b\n"
- "ld1b { z28.h }, p3/Z, [x24, x7]\n"
- "ld1b { z27.h }, p3/Z, [x23, x7]\n"
".inst 0x454c10c6 // ssublb z6.h, z6.b, z12.b\n"
+ "ldp x22, x21, [x17, #0x20]\n"
".inst 0x454c10e7 // ssublb z7.h, z7.b, z12.b\n"
- "ld1b { z26.h }, p3/Z, [x22, x7]\n"
- "ld1b { z25.h }, p3/Z, [x21, x7]\n"
".inst 0x454c1108 // ssublb z8.h, z8.b, z12.b\n"
- ".inst 0x45571bff // usublb z31.h, z31.b, z23.b\n"
- "ld1b { z24.h }, p3/Z, [x20, x7]\n"
- "ldr x27, [%x[params], %[offsetof_Params_requant_muls]]\n"
- ".inst 0x45571bde // usublb z30.h, z30.b, z23.b\n"
- ".inst 0x45571bbd // usublb z29.h, z29.b, z23.b\n"
- "ldr x26, [%x[params], %[offsetof_Params_requant_shifts]]\n"
- "str x12, [%x[params], %[offsetof_Params_bias]]\n"
- ".inst 0x45571b9c // usublb z28.h, z28.b, z23.b\n"
- ".inst 0x45571b7b // usublb z27.h, z27.b, z23.b\n"
- ".inst 0x45571b5a // usublb z26.h, z26.b, z23.b\n"
- ".inst 0x45571b39 // usublb z25.h, z25.b, z23.b\n"
- ".inst 0x45571b18 // usublb z24.h, z24.b, z23.b\n"
+ "ldp x20, x19, [x17, #0x30]\n"
+ "ld1b { z31.h }, p3/Z, [x26, x6]\n"
+ ".inst 0x45501bff // usublb z31.h, z31.b, z16.b\n"
+ "ld1b { z30.h }, p3/Z, [x25, x6]\n"
+ "ld1b { z29.h }, p3/Z, [x24, x6]\n"
+ ".inst 0x45501bde // usublb z30.h, z30.b, z16.b\n"
+ "ld1b { z28.h }, p3/Z, [x23, x6]\n"
+ ".inst 0x45501bbd // usublb z29.h, z29.b, z16.b\n"
+ "ld1b { z27.h }, p3/Z, [x22, x6]\n"
+ "ld1b { z26.h }, p3/Z, [x21, x6]\n"
+ ".inst 0x45501b9c // usublb z28.h, z28.b, z16.b\n"
+ "ld1b { z25.h }, p3/Z, [x20, x6]\n"
+ "ld1b { z24.h }, p3/Z, [x19, x6]\n"
+ ".inst 0x45501b7b // usublb z27.h, z27.b, z16.b\n"
+ ".inst 0x45501b5a // usublb z26.h, z26.b, z16.b\n"
+ ".inst 0x45501b39 // usublb z25.h, z25.b, z16.b\n"
+ ".inst 0x45501b18 // usublb z24.h, z24.b, z16.b\n"
"1:" // Loop
".inst 0x448843ed // smlalb z13.s, p4/M, z31.h, z8.h\n"
- ".inst 0x448847f1 // smlalt z17.s, p4/M, z31.h, z8.h\n"
- "ldr x25, [x11, #0x40]\n"
- "ldr x24, [x11, #0x48]\n"
- ".inst 0x448643e9 // smlalb z9.s, p4/M, z31.h, z6.h\n"
- ".inst 0x448647ea // smlalt z10.s, p4/M, z31.h, z6.h\n"
- "ldr x22, [x11, #0x50]\n"
- "ldr x20, [x11, #0x58]\n"
+ "ldr x22, [x17, #0x40]\n"
+ "whilelt p0.h, x7, x4\n"
+ ".inst 0x448847f4 // smlalt z20.s, p4/M, z31.h, z8.h\n"
+ "ldr x21, [x17, #0x48]\n"
+ "inch x5\n"
+ ".inst 0x448643eb // smlalb z11.s, p4/M, z31.h, z6.h\n"
+ "ldr x20, [x17, #0x50]\n"
+ ".inst 0x448647f2 // smlalt z18.s, p4/M, z31.h, z6.h\n"
+ "ldr x19, [x17, #0x58]\n"
+ ".inst 0x448243e9 // smlalb z9.s, p4/M, z31.h, z2.h\n"
+ "ldr x11, [x17, #0x60]\n"
+ ".inst 0x448247f3 // smlalt z19.s, p4/M, z31.h, z2.h\n"
+ "ldr x10, [x17, #0x68]\n"
+ ".inst 0x448043f7 // smlalb z23.s, p4/M, z31.h, z0.h\n"
+ "ldr x9, [x17, #0x70]\n"
+ ".inst 0x448047f5 // smlalt z21.s, p4/M, z31.h, z0.h\n"
+ "ldr x28, [x17, #0x78]\n"
".inst 0x448043cd // smlalb z13.s, p4/M, z30.h, z0.h\n"
- ".inst 0x448047d1 // smlalt z17.s, p4/M, z30.h, z0.h\n"
- "ldr x23, [x11, #0x78]\n"
- "ldr x21, [x11, #0x60]\n"
- ".inst 0x44814389 // smlalb z9.s, p4/M, z28.h, z1.h\n"
- ".inst 0x4481478a // smlalt z10.s, p4/M, z28.h, z1.h\n"
- "ld1b { z28.h }, p3/Z, [x24, x7]\n"
- ".inst 0x45571b9c // usublb z28.h, z28.b, z23.b\n"
+ "ldr x27, [x17, #0x80]\n"
+ ".inst 0x448047d4 // smlalt z20.s, p4/M, z30.h, z0.h\n"
+ "ldr x26, [x17, #0x88]\n"
+ ".inst 0x4481438b // smlalb z11.s, p4/M, z28.h, z1.h\n"
+ "ldr x25, [x17, #0x90]\n"
+ ".inst 0x44814792 // smlalt z18.s, p4/M, z28.h, z1.h\n"
+ "ld1b { z28.h }, p3/Z, [x21, x6]\n"
".inst 0x448143ad // smlalb z13.s, p4/M, z29.h, z1.h\n"
- ".inst 0x448147b1 // smlalt z17.s, p4/M, z29.h, z1.h\n"
- "ld1b { z29.h }, p3/Z, [x25, x7]\n"
- ".inst 0x45571bbd // usublb z29.h, z29.b, z23.b\n"
- ".inst 0x44824369 // smlalb z9.s, p4/M, z27.h, z2.h\n"
- ".inst 0x4482476a // smlalt z10.s, p4/M, z27.h, z2.h\n"
- "ld1b { z27.h }, p3/Z, [x22, x7]\n"
- ".inst 0x45571b7b // usublb z27.h, z27.b, z23.b\n"
+ "ldr x24, [x17, #0x98]\n"
+ ".inst 0x448147b4 // smlalt z20.s, p4/M, z29.h, z1.h\n"
+ "ld1b { z29.h }, p3/Z, [x22, x6]\n"
+ ".inst 0x4482436b // smlalb z11.s, p4/M, z27.h, z2.h\n"
+ "ldr x23, [x17, #0xa0]\n"
+ ".inst 0x45501b9c // usublb z28.h, z28.b, z16.b\n"
+ "ldr x22, [x17, #0xa8]\n"
+ ".inst 0x44824772 // smlalt z18.s, p4/M, z27.h, z2.h\n"
+ "ld1b { z27.h }, p3/Z, [x20, x6]\n"
+ ".inst 0x45501bbd // usublb z29.h, z29.b, z16.b\n"
+ "ldr x21, [x17, #0xb0]\n"
".inst 0x4483434d // smlalb z13.s, p4/M, z26.h, z3.h\n"
- ".inst 0x44834751 // smlalt z17.s, p4/M, z26.h, z3.h\n"
- "ld1b { z26.h }, p3/Z, [x20, x7]\n"
- ".inst 0x45571b5a // usublb z26.h, z26.b, z23.b\n"
- ".inst 0x44804309 // smlalb z9.s, p4/M, z24.h, z0.h\n"
- ".inst 0x4480470a // smlalt z10.s, p4/M, z24.h, z0.h\n"
- "ldr x22, [x11, #0x80]\n"
- "ldr x20, [x11, #0x68]\n"
+ "ldr x20, [x17, #0xb8]\n"
+ ".inst 0x44834754 // smlalt z20.s, p4/M, z26.h, z3.h\n"
+ "ld1b { z26.h }, p3/Z, [x19, x6]\n"
+ ".inst 0x45501b7b // usublb z27.h, z27.b, z16.b\n"
+ "ldr x19, [x17, #0xc0]\n"
+ ".inst 0x4480430b // smlalb z11.s, p4/M, z24.h, z0.h\n"
+ "ld1w { z10.s }, p2/Z, [x8]\n"
".inst 0x4484432d // smlalb z13.s, p4/M, z25.h, z4.h\n"
- ".inst 0x44844731 // smlalt z17.s, p4/M, z25.h, z4.h\n"
- "ld1b { z25.h }, p3/Z, [x21, x7]\n"
- ".inst 0x45571b39 // usublb z25.h, z25.b, z23.b\n"
- ".inst 0x448443a9 // smlalb z9.s, p4/M, z29.h, z4.h\n"
- ".inst 0x448447aa // smlalt z10.s, p4/M, z29.h, z4.h\n"
- "ldr x21, [x11, #0x88]\n"
- "ld1b { z29.h }, p3/Z, [x20, x7]\n"
+ "ld1w { z22.s }, p1/Z, [x8, #1, MUL VL]\n"
+ "addvl x8, x8, #2\n"
+ ".inst 0x45501b5a // usublb z26.h, z26.b, z16.b\n"
+ ".inst 0x44844734 // smlalt z20.s, p4/M, z25.h, z4.h\n"
+ "ld1b { z25.h }, p3/Z, [x11, x6]\n"
+ ".inst 0x44804712 // smlalt z18.s, p4/M, z24.h, z0.h\n"
+ "uzp1 z31.s, z10.s, z22.s\n"
+ "uzp2 z30.s, z10.s, z22.s\n"
+ "ld1w { z10.s }, p2/Z, [x16]\n"
+ ".inst 0x45501b39 // usublb z25.h, z25.b, z16.b\n"
+ "ld1w { z22.s }, p1/Z, [x16, #1, MUL VL]\n"
+ "addvl x16, x16, #2\n"
".inst 0x4482430d // smlalb z13.s, p4/M, z24.h, z2.h\n"
- ".inst 0x44824711 // smlalt z17.s, p4/M, z24.h, z2.h\n"
- "ldr x20, [x11, #0x70]\n"
- ".inst 0x45571bbd // usublb z29.h, z29.b, z23.b\n"
- ".inst 0x44854389 // smlalb z9.s, p4/M, z28.h, z5.h\n"
- ".inst 0x4485478a // smlalt z10.s, p4/M, z28.h, z5.h\n"
- "ld1b { z28.h }, p3/Z, [x22, x7]\n"
- ".inst 0x45571b9c // usublb z28.h, z28.b, z23.b\n"
- ".inst 0x448243eb // smlalb z11.s, p4/M, z31.h, z2.h\n"
- ".inst 0x448247f6 // smlalt z22.s, p4/M, z31.h, z2.h\n"
- "ldr x25, [x11, #0x98]\n"
- "ld1b { z24.h }, p3/Z, [x20, x7]\n"
+ ".inst 0x44824714 // smlalt z20.s, p4/M, z24.h, z2.h\n"
+ "ld1b { z24.h }, p3/Z, [x9, x6]\n"
+ ".inst 0x448443ab // smlalb z11.s, p4/M, z29.h, z4.h\n"
+ ".inst 0x448447b2 // smlalt z18.s, p4/M, z29.h, z4.h\n"
+ "ld1b { z29.h }, p3/Z, [x10, x6]\n"
+ ".inst 0x44834349 // smlalb z9.s, p4/M, z26.h, z3.h\n"
+ ".inst 0x45501b18 // usublb z24.h, z24.b, z16.b\n"
+ ".inst 0x4485438b // smlalb z11.s, p4/M, z28.h, z5.h\n"
+ ".inst 0x45501bbd // usublb z29.h, z29.b, z16.b\n"
+ ".inst 0x44854792 // smlalt z18.s, p4/M, z28.h, z5.h\n"
+ "ld1b { z28.h }, p3/Z, [x27, x6]\n"
".inst 0x4485436d // smlalb z13.s, p4/M, z27.h, z5.h\n"
- ".inst 0x44854771 // smlalt z17.s, p4/M, z27.h, z5.h\n"
- ".inst 0x45571b18 // usublb z24.h, z24.b, z23.b\n"
- "ldr x24, [x11, #0x90]\n"
- ".inst 0x44834369 // smlalb z9.s, p4/M, z27.h, z3.h\n"
- ".inst 0x4483476a // smlalt z10.s, p4/M, z27.h, z3.h\n"
- "ld1b { z27.h }, p3/Z, [x23, x7]\n"
- ".inst 0x45571b7b // usublb z27.h, z27.b, z23.b\n"
- ".inst 0x448043f5 // smlalb z21.s, p4/M, z31.h, z0.h\n"
- ".inst 0x4483434b // smlalb z11.s, p4/M, z26.h, z3.h\n"
- "ldr x23, [x11, #0xa8]\n"
- "ldr x20, [x11, #0xa0]\n"
- ".inst 0x44834756 // smlalt z22.s, p4/M, z26.h, z3.h\n"
- ".inst 0x448047f2 // smlalt z18.s, p4/M, z31.h, z0.h\n"
- "ld1b { z26.h }, p3/Z, [x21, x7]\n"
- ".inst 0x45571b5a // usublb z26.h, z26.b, z23.b\n"
- ".inst 0x44844375 // smlalb z21.s, p4/M, z27.h, z4.h\n"
- ".inst 0x4480432b // smlalb z11.s, p4/M, z25.h, z0.h\n"
- "ldr x22, [x11, #0xb0]\n"
- "ldr x21, [x11, #0xb8]\n"
- ".inst 0x44804736 // smlalt z22.s, p4/M, z25.h, z0.h\n"
- ".inst 0x44844772 // smlalt z18.s, p4/M, z27.h, z4.h\n"
- "ld1b { z27.h }, p3/Z, [x20, x7]\n"
- ".inst 0x45571b7b // usublb z27.h, z27.b, z23.b\n"
- ".inst 0x44814395 // smlalb z21.s, p4/M, z28.h, z1.h\n"
+ ".inst 0x44854774 // smlalt z20.s, p4/M, z27.h, z5.h\n"
+ ".inst 0x4483436b // smlalb z11.s, p4/M, z27.h, z3.h\n"
+ ".inst 0x45501b9c // usublb z28.h, z28.b, z16.b\n"
+ ".inst 0x44834772 // smlalt z18.s, p4/M, z27.h, z3.h\n"
+ "ld1b { z27.h }, p3/Z, [x28, x6]\n"
+ ".inst 0x44834753 // smlalt z19.s, p4/M, z26.h, z3.h\n"
+ "ld1b { z26.h }, p3/Z, [x26, x6]\n"
".inst 0x4486432d // smlalb z13.s, p4/M, z25.h, z6.h\n"
- "ldr x20, [x11, #0xc0]\n"
- "ld1w { z31.s }, p2/Z, [x27]\n"
- ".inst 0x44864731 // smlalt z17.s, p4/M, z25.h, z6.h\n"
- ".inst 0x448443ab // smlalb z11.s, p4/M, z29.h, z4.h\n"
- "ld1b { z25.h }, p3/Z, [x24, x7]\n"
- ".inst 0x45571b39 // usublb z25.h, z25.b, z23.b\n"
- ".inst 0x448447b6 // smlalt z22.s, p4/M, z29.h, z4.h\n"
- "ld1b { z29.h }, p3/Z, [x25, x7]\n"
- ".inst 0x44814792 // smlalt z18.s, p4/M, z28.h, z1.h\n"
- ".inst 0x45571bbd // usublb z29.h, z29.b, z23.b\n"
- ".inst 0x44854355 // smlalb z21.s, p4/M, z26.h, z5.h\n"
+ ".inst 0x44864734 // smlalt z20.s, p4/M, z25.h, z6.h\n"
+ ".inst 0x45501b7b // usublb z27.h, z27.b, z16.b\n"
+ ".inst 0x45501b5a // usublb z26.h, z26.b, z16.b\n"
+ ".inst 0x44804329 // smlalb z9.s, p4/M, z25.h, z0.h\n"
+ ".inst 0x44804733 // smlalt z19.s, p4/M, z25.h, z0.h\n"
+ "ld1b { z25.h }, p3/Z, [x25, x6]\n"
+ "uzp1 z0.s, z10.s, z22.s\n"
+ "uzp2 z22.s, z10.s, z22.s\n"
+ ".inst 0x448443a9 // smlalb z9.s, p4/M, z29.h, z4.h\n"
+ ".inst 0x45501b39 // usublb z25.h, z25.b, z16.b\n"
+ ".inst 0x448447b3 // smlalt z19.s, p4/M, z29.h, z4.h\n"
+ "ld1b { z29.h }, p3/Z, [x24, x6]\n"
".inst 0x4487430d // smlalb z13.s, p4/M, z24.h, z7.h\n"
- "ld1w { z20.s }, p1/Z, [x27, #1, MUL VL]\n"
- "uzp1 z19.s, z31.s, z20.s\n"
- ".inst 0x44874711 // smlalt z17.s, p4/M, z24.h, z7.h\n"
- ".inst 0x4481430b // smlalb z11.s, p4/M, z24.h, z1.h\n"
- "uzp2 z30.s, z31.s, z20.s\n"
- "ld1w { z31.s }, p2/Z, [x26]\n"
- ".inst 0x44814716 // smlalt z22.s, p4/M, z24.h, z1.h\n"
- "ld1b { z24.h }, p3/Z, [x23, x7]\n"
- ".inst 0x44854752 // smlalt z18.s, p4/M, z26.h, z5.h\n"
- ".inst 0x45571b18 // usublb z24.h, z24.b, z23.b\n"
- ".inst 0x448243b5 // smlalb z21.s, p4/M, z29.h, z2.h\n"
- "ld1b { z26.h }, p3/Z, [x22, x7]\n"
- ".inst 0x448247b2 // smlalt z18.s, p4/M, z29.h, z2.h\n"
- ".inst 0x45571b5a // usublb z26.h, z26.b, z23.b\n"
- ".inst 0x4486432b // smlalb z11.s, p4/M, z25.h, z6.h\n"
- ".inst 0x44834315 // smlalb z21.s, p4/M, z24.h, z3.h\n"
- "ld1w { z20.s }, p1/Z, [x26, #1, MUL VL]\n"
- "uzp1 z1.s, z31.s, z20.s\n"
- ".inst 0x44874389 // smlalb z9.s, p4/M, z28.h, z7.h\n"
- ".inst 0x4487478a // smlalt z10.s, p4/M, z28.h, z7.h\n"
- ".inst 0x04b375ad // sqrdmulh z13.s, z13.s, z19.s\n"
- "whilelt p0.h, x10, x8\n"
- ".inst 0x44864736 // smlalt z22.s, p4/M, z25.h, z6.h\n"
- "ld1b { z25.h }, p3/Z, [x21, x7]\n"
- ".inst 0x44834712 // smlalt z18.s, p4/M, z24.h, z3.h\n"
- ".inst 0x45571b39 // usublb z25.h, z25.b, z23.b\n"
- ".inst 0x4487436b // smlalb z11.s, p4/M, z27.h, z7.h\n"
- ".inst 0x44874355 // smlalb z21.s, p4/M, z26.h, z7.h\n"
- "uzp2 z31.s, z31.s, z20.s\n"
- "inch x17\n"
- ".inst 0x448843a9 // smlalb z9.s, p4/M, z29.h, z8.h\n"
- ".inst 0x448847aa // smlalt z10.s, p4/M, z29.h, z8.h\n"
- "ld1b { z29.h }, p3/Z, [x20, x7]\n"
- ".inst 0x45571bbd // usublb z29.h, z29.b, z23.b\n"
- ".inst 0x44874776 // smlalt z22.s, p4/M, z27.h, z7.h\n"
- ".inst 0x44874752 // smlalt z18.s, p4/M, z26.h, z7.h\n"
- "and z0.d, z13.d, z1.d\n"
- "inch x7\n"
- ".inst 0x4485430b // smlalb z11.s, p4/M, z24.h, z5.h\n"
- ".inst 0x44864335 // smlalb z21.s, p4/M, z25.h, z6.h\n"
- ".inst 0x04be7631 // sqrdmulh z17.s, z17.s, z30.s\n"
- "mov x20, x7\n"
- ".inst 0x44854716 // smlalt z22.s, p4/M, z24.h, z5.h\n"
- ".inst 0x44864732 // smlalt z18.s, p4/M, z25.h, z6.h\n"
- "asr z0.s, z0.s, #0x1f\n"
- "incw x20\n"
- ".inst 0x4488432b // smlalb z11.s, p4/M, z25.h, z8.h\n"
- ".inst 0x448843b5 // smlalb z21.s, p4/M, z29.h, z8.h\n"
- "and z20.d, z17.d, z31.d\n"
- "whilelt p2.s, x7, x8\n"
- ".inst 0x44884736 // smlalt z22.s, p4/M, z25.h, z8.h\n"
+ ".inst 0x44874714 // smlalt z20.s, p4/M, z24.h, z7.h\n"
+ ".inst 0x44814309 // smlalb z9.s, p4/M, z24.h, z1.h\n"
+ ".inst 0x45501bbd // usublb z29.h, z29.b, z16.b\n"
+ ".inst 0x04bf75ad // sqrdmulh z13.s, z13.s, z31.s\n"
+ ".inst 0x04be7694 // sqrdmulh z20.s, z20.s, z30.s\n"
+ ".inst 0x44814713 // smlalt z19.s, p4/M, z24.h, z1.h\n"
+ "ld1b { z24.h }, p3/Z, [x22, x6]\n"
+ ".inst 0x44844377 // smlalb z23.s, p4/M, z27.h, z4.h\n"
+ "and z10.d, z13.d, z0.d\n"
+ ".inst 0x44844775 // smlalt z21.s, p4/M, z27.h, z4.h\n"
+ "ld1b { z27.h }, p3/Z, [x23, x6]\n"
+ ".inst 0x45501b18 // usublb z24.h, z24.b, z16.b\n"
+ "asr z10.s, z10.s, #0x1f\n"
+ "and z4.d, z20.d, z22.d\n"
+ ".inst 0x45501b7b // usublb z27.h, z27.b, z16.b\n"
+ "sqadd z13.s, z13.s, z10.s\n"
+ "asr z4.s, z4.s, #0x1f\n"
+ ".inst 0x4487438b // smlalb z11.s, p4/M, z28.h, z7.h\n"
+ ".inst 0x44874792 // smlalt z18.s, p4/M, z28.h, z7.h\n"
+ "sqadd z20.s, z20.s, z4.s\n"
+ ".inst 0x44814397 // smlalb z23.s, p4/M, z28.h, z1.h\n"
+ ".inst 0x44814795 // smlalt z21.s, p4/M, z28.h, z1.h\n"
+ ".inst 0x44864329 // smlalb z9.s, p4/M, z25.h, z6.h\n"
+ ".inst 0x44864733 // smlalt z19.s, p4/M, z25.h, z6.h\n"
+ "ld1b { z25.h }, p3/Z, [x20, x6]\n"
+ ".inst 0x44854357 // smlalb z23.s, p4/M, z26.h, z5.h\n"
+ ".inst 0x44854755 // smlalt z21.s, p4/M, z26.h, z5.h\n"
+ "ld1b { z26.h }, p3/Z, [x21, x6]\n"
+ ".inst 0x448843ab // smlalb z11.s, p4/M, z29.h, z8.h\n"
+ ".inst 0x45501b39 // usublb z25.h, z25.b, z16.b\n"
".inst 0x448847b2 // smlalt z18.s, p4/M, z29.h, z8.h\n"
- ".inst 0x04b37529 // sqrdmulh z9.s, z9.s, z19.s\n"
- "whilelt p1.s, x20, x8\n"
- ".inst 0x04b3756b // sqrdmulh z11.s, z11.s, z19.s\n"
- ".inst 0x04b376b5 // sqrdmulh z21.s, z21.s, z19.s\n"
- "ldr x12, [%x[params], %[offsetof_Params_bias]]\n"
- "whilelt p3.h, x7, x8\n"
- "sqadd z13.s, z13.s, z0.s\n"
- "asr z20.s, z20.s, #0x1f\n"
- ".inst 0x4482902d // srshl z13.s, p4/M, z13.s, z1.s\n"
- "addvl x27, x27, #2\n"
- "and z19.d, z9.d, z1.d\n"
- ".inst 0x04be754a // sqrdmulh z10.s, z10.s, z30.s\n"
- "addvl x26, x26, #2\n"
- "and z2.d, z11.d, z1.d\n"
- ".inst 0x04be76d6 // sqrdmulh z22.s, z22.s, z30.s\n"
- "and z0.d, z21.d, z1.d\n"
+ ".inst 0x45501b5a // usublb z26.h, z26.b, z16.b\n"
+ ".inst 0x04bf756b // sqrdmulh z11.s, z11.s, z31.s\n"
+ ".inst 0x448243b7 // smlalb z23.s, p4/M, z29.h, z2.h\n"
".inst 0x04be7652 // sqrdmulh z18.s, z18.s, z30.s\n"
- "sqadd z17.s, z17.s, z20.s\n"
- "asr z19.s, z19.s, #0x1f\n"
- ".inst 0x448293f1 // srshl z17.s, p4/M, z17.s, z31.s\n"
- "and z3.d, z10.d, z31.d\n"
+ ".inst 0x448247b5 // smlalt z21.s, p4/M, z29.h, z2.h\n"
+ "ld1b { z29.h }, p3/Z, [x19, x6]\n"
+ "inch x6\n"
+ "and z2.d, z11.d, z0.d\n"
+ "whilelt p2.s, x6, x4\n"
+ ".inst 0x44874369 // smlalb z9.s, p4/M, z27.h, z7.h\n"
+ "mov x19, x6\n"
+ "and z10.d, z18.d, z22.d\n"
+ "incw x19\n"
+ ".inst 0x45501bbd // usublb z29.h, z29.b, z16.b\n"
+ "whilelt p1.s, x19, x4\n"
"asr z2.s, z2.s, #0x1f\n"
- "and z26.d, z22.d, z31.d\n"
- "asr z0.s, z0.s, #0x1f\n"
- "and z20.d, z18.d, z31.d\n"
- "sqadd z9.s, z9.s, z19.s\n"
- ".inst 0x44829029 // srshl z9.s, p4/M, z9.s, z1.s\n"
- "asr z3.s, z3.s, #0x1f\n"
+ "whilelt p3.h, x6, x4\n"
+ "asr z10.s, z10.s, #0x1f\n"
+ ".inst 0x44874773 // smlalt z19.s, p4/M, z27.h, z7.h\n"
"sqadd z11.s, z11.s, z2.s\n"
- ".inst 0x4482902b // srshl z11.s, p4/M, z11.s, z1.s\n"
- "asr z26.s, z26.s, #0x1f\n"
- "sqadd z21.s, z21.s, z0.s\n"
- ".inst 0x44829035 // srshl z21.s, p4/M, z21.s, z1.s\n"
- "asr z20.s, z20.s, #0x1f\n"
- "sqadd z10.s, z10.s, z3.s\n"
- ".inst 0x448293ea // srshl z10.s, p4/M, z10.s, z31.s\n"
- "sqadd z22.s, z22.s, z26.s\n"
- "sqadd z18.s, z18.s, z20.s\n"
- ".inst 0x448293f6 // srshl z22.s, p4/M, z22.s, z31.s\n"
- ".inst 0x448293f2 // srshl z18.s, p4/M, z18.s, z31.s\n"
- ".inst 0x453041ad // sqxtnb z13.h, z13.s\n"
- ".inst 0x45304129 // sqxtnb z9.h, z9.s\n"
- ".inst 0x4530416b // sqxtnb z11.h, z11.s\n"
- ".inst 0x453042b5 // sqxtnb z21.h, z21.s\n"
- ".inst 0x4530462d // sqxtnt z13.h, z17.s\n"
- ".inst 0x45304549 // sqxtnt z9.h, z10.s\n"
- ".inst 0x453046cb // sqxtnt z11.h, z22.s\n"
- ".inst 0x45304655 // sqxtnt z21.h, z18.s\n"
- "sqadd z13.h, z13.h, z14.h\n"
- "sqadd z9.h, z9.h, z14.h\n"
- "smax z13.h, p4/M, z13.h, z16.h\n"
- "smax z9.h, p4/M, z9.h, z16.h\n"
- "sqadd z11.h, z11.h, z14.h\n"
- "sqadd z21.h, z21.h, z14.h\n"
- "smax z11.h, p4/M, z11.h, z16.h\n"
- "smax z21.h, p4/M, z21.h, z16.h\n"
- "smin z13.h, p4/M, z13.h, z15.h\n"
- "smin z9.h, p4/M, z9.h, z15.h\n"
- "st1b { z13.h }, p0, [x16, x10]\n"
- "smin z11.h, p4/M, z11.h, z15.h\n"
- "smin z21.h, p4/M, z21.h, z15.h\n"
- "st1b { z9.h }, p0, [x15, x10]\n"
- "st1b { z11.h }, p0, [x14, x10]\n"
- "st1b { z21.h }, p0, [x13, x10]\n"
- "ld1sb { z0.h }, p4/Z, [x17]\n"
- "ld1sb { z1.h }, p4/Z, [x17, #1, MUL VL]\n"
- "inch x10\n"
- "ld1sb { z2.h }, p4/Z, [x17, #2, MUL VL]\n"
- "ld1sb { z3.h }, p4/Z, [x17, #3, MUL VL]\n"
+ "sqadd z18.s, z18.s, z10.s\n"
+ ".inst 0x44854309 // smlalb z9.s, p4/M, z24.h, z5.h\n"
+ ".inst 0x44854713 // smlalt z19.s, p4/M, z24.h, z5.h\n"
+ ".inst 0x44834317 // smlalb z23.s, p4/M, z24.h, z3.h\n"
+ ".inst 0x44834715 // smlalt z21.s, p4/M, z24.h, z3.h\n"
+ ".inst 0x44884329 // smlalb z9.s, p4/M, z25.h, z8.h\n"
+ ".inst 0x44884733 // smlalt z19.s, p4/M, z25.h, z8.h\n"
+ ".inst 0x44874357 // smlalb z23.s, p4/M, z26.h, z7.h\n"
+ ".inst 0x44874755 // smlalt z21.s, p4/M, z26.h, z7.h\n"
+ ".inst 0x04bf7529 // sqrdmulh z9.s, z9.s, z31.s\n"
+ ".inst 0x04be7673 // sqrdmulh z19.s, z19.s, z30.s\n"
+ ".inst 0x44864337 // smlalb z23.s, p4/M, z25.h, z6.h\n"
+ ".inst 0x44864735 // smlalt z21.s, p4/M, z25.h, z6.h\n"
+ "and z10.d, z9.d, z0.d\n"
+ "and z24.d, z19.d, z22.d\n"
+ ".inst 0x448843b7 // smlalb z23.s, p4/M, z29.h, z8.h\n"
+ "asr z10.s, z10.s, #0x1f\n"
+ "asr z24.s, z24.s, #0x1f\n"
+ ".inst 0x448847b5 // smlalt z21.s, p4/M, z29.h, z8.h\n"
+ "sqadd z9.s, z9.s, z10.s\n"
+ "sqadd z19.s, z19.s, z24.s\n"
+ ".inst 0x04bf76f7 // sqrdmulh z23.s, z23.s, z31.s\n"
+ ".inst 0x04be76b5 // sqrdmulh z21.s, z21.s, z30.s\n"
+ ".inst 0x4482900d // srshl z13.s, p4/M, z13.s, z0.s\n"
+ ".inst 0x448292d4 // srshl z20.s, p4/M, z20.s, z22.s\n"
+ "and z30.d, z23.d, z0.d\n"
+ "and z28.d, z21.d, z22.d\n"
+ "add z13.s, z13.s, z14.s\n"
+ "add z20.s, z20.s, z14.s\n"
+ "asr z30.s, z30.s, #0x1f\n"
+ "asr z28.s, z28.s, #0x1f\n"
+ "smin z13.s, p4/M, z13.s, z15.s\n"
+ "sqadd z23.s, z23.s, z30.s\n"
+ "sqadd z21.s, z21.s, z28.s\n"
+ "smin z20.s, p4/M, z20.s, z15.s\n"
+ "smax z13.s, p4/M, z13.s, z17.s\n"
+ ".inst 0x4482900b // srshl z11.s, p4/M, z11.s, z0.s\n"
+ ".inst 0x448292d2 // srshl z18.s, p4/M, z18.s, z22.s\n"
+ "smax z20.s, p4/M, z20.s, z17.s\n"
+ ".inst 0x44829009 // srshl z9.s, p4/M, z9.s, z0.s\n"
+ "add z11.s, z11.s, z14.s\n"
+ "add z18.s, z18.s, z14.s\n"
+ "trn1 z13.h, z13.h, z20.h\n"
+ "st1b { z13.h }, p0, [x15, x7]\n"
+ "add z9.s, z9.s, z14.s\n"
+ "smin z11.s, p4/M, z11.s, z15.s\n"
+ "smin z18.s, p4/M, z18.s, z15.s\n"
+ ".inst 0x448292d3 // srshl z19.s, p4/M, z19.s, z22.s\n"
+ "smin z9.s, p4/M, z9.s, z15.s\n"
+ "smax z11.s, p4/M, z11.s, z17.s\n"
+ "smax z18.s, p4/M, z18.s, z17.s\n"
+ "add z19.s, z19.s, z14.s\n"
+ "smax z9.s, p4/M, z9.s, z17.s\n"
+ ".inst 0x44829017 // srshl z23.s, p4/M, z23.s, z0.s\n"
+ "trn1 z11.h, z11.h, z18.h\n"
+ "st1b { z11.h }, p0, [x14, x7]\n"
+ "smin z19.s, p4/M, z19.s, z15.s\n"
+ ".inst 0x448292d5 // srshl z21.s, p4/M, z21.s, z22.s\n"
+ "add z23.s, z23.s, z14.s\n"
+ "add z21.s, z21.s, z14.s\n"
+ "smax z19.s, p4/M, z19.s, z17.s\n"
+ "smin z23.s, p4/M, z23.s, z15.s\n"
+ "smin z21.s, p4/M, z21.s, z15.s\n"
+ "trn1 z9.h, z9.h, z19.h\n"
+ "st1b { z9.h }, p0, [x13, x7]\n"
+ "smax z23.s, p4/M, z23.s, z17.s\n"
+ "smax z21.s, p4/M, z21.s, z17.s\n"
+ "trn1 z23.h, z23.h, z21.h\n"
+ "st1b { z23.h }, p0, [x12, x7]\n"
+ "inch x7\n"
+ "ldr x19, [%x[params], %[offsetof_Params_bias]]\n"
+ "ld1w { z20.s }, p2/Z, [x19]\n"
+ "ld1w { z10.s }, p1/Z, [x19, #1, MUL VL]\n"
+ "uzp1 z13.s, z20.s, z10.s\n"
+ "addvl x19, x19, #2\n"
+ "str x19, [%x[params], %[offsetof_Params_bias]]\n"
+ "uzp2 z20.s, z20.s, z10.s\n"
+ "mov z11.d, z13.d\n"
+ "ld1sb { z0.h }, p4/Z, [x5]\n"
+ "mov z9.d, z13.d\n"
+ "ld1sb { z1.h }, p4/Z, [x5, #1, MUL VL]\n"
+ "mov z18.d, z20.d\n"
+ "ld1sb { z2.h }, p4/Z, [x5, #2, MUL VL]\n"
+ "mov z19.d, z20.d\n"
+ "ld1sb { z3.h }, p4/Z, [x5, #3, MUL VL]\n"
+ "mov z23.d, z13.d\n"
+ "ld1sb { z4.h }, p4/Z, [x5, #4, MUL VL]\n"
+ "mov z21.d, z20.d\n"
+ "ld1sb { z5.h }, p4/Z, [x5, #5, MUL VL]\n"
".inst 0x454c1000 // ssublb z0.h, z0.b, z12.b\n"
+ "ld1sb { z6.h }, p4/Z, [x5, #6, MUL VL]\n"
".inst 0x454c1021 // ssublb z1.h, z1.b, z12.b\n"
- "ld1sb { z4.h }, p4/Z, [x17, #4, MUL VL]\n"
- "ld1sb { z5.h }, p4/Z, [x17, #5, MUL VL]\n"
+ "ld1sb { z7.h }, p4/Z, [x5, #7, MUL VL]\n"
+ "inch x5, ALL, MUL #8\n"
".inst 0x454c1042 // ssublb z2.h, z2.b, z12.b\n"
+ "ld1sb { z8.h }, p4/Z, [x5]\n"
".inst 0x454c1063 // ssublb z3.h, z3.b, z12.b\n"
- "ld1sb { z6.h }, p4/Z, [x17, #6, MUL VL]\n"
- "ld1sb { z7.h }, p4/Z, [x17, #7, MUL VL]\n"
- "inch x17, ALL, MUL #8\n"
+ "ldp x26, x25, [x17, #0x0]\n"
".inst 0x454c1084 // ssublb z4.h, z4.b, z12.b\n"
- "ld1w { z18.s }, p2/Z, [x12]\n"
- "ld1w { z8.s }, p1/Z, [x12, #1, MUL VL]\n"
- "uzp1 z13.s, z18.s, z8.s\n"
- "uzp2 z17.s, z18.s, z8.s\n"
- "ld1sb { z8.h }, p4/Z, [x17]\n"
- "ldp x9, x28, [x11, #0x0]\n"
- "addvl x12, x12, #2\n"
- "str x12, [%x[params], %[offsetof_Params_bias]]\n"
- "ldp x25, x24, [x11, #0x10]\n"
- "ldp x23, x22, [x11, #0x20]\n"
- "mov z9.d, z13.d\n"
- "mov z10.d, z17.d\n"
- "ldp x21, x20, [x11, #0x30]\n"
- "ld1b { z31.h }, p3/Z, [x9, x7]\n"
- "mov z11.d, z13.d\n"
- "mov z22.d, z17.d\n"
- "ld1b { z30.h }, p3/Z, [x28, x7]\n"
- "ld1b { z29.h }, p3/Z, [x25, x7]\n"
- "mov z21.d, z13.d\n"
- "mov z18.d, z17.d\n"
- "ld1b { z28.h }, p3/Z, [x24, x7]\n"
- "ld1b { z27.h }, p3/Z, [x23, x7]\n"
+ "ldp x24, x23, [x17, #0x10]\n"
".inst 0x454c10a5 // ssublb z5.h, z5.b, z12.b\n"
".inst 0x454c10c6 // ssublb z6.h, z6.b, z12.b\n"
- "ld1b { z26.h }, p3/Z, [x22, x7]\n"
- "ld1b { z25.h }, p3/Z, [x21, x7]\n"
+ "ldp x22, x21, [x17, #0x20]\n"
".inst 0x454c10e7 // ssublb z7.h, z7.b, z12.b\n"
".inst 0x454c1108 // ssublb z8.h, z8.b, z12.b\n"
- "ld1b { z24.h }, p3/Z, [x20, x7]\n"
- ".inst 0x45571bff // usublb z31.h, z31.b, z23.b\n"
- ".inst 0x45571bde // usublb z30.h, z30.b, z23.b\n"
- ".inst 0x45571bbd // usublb z29.h, z29.b, z23.b\n"
- ".inst 0x45571b9c // usublb z28.h, z28.b, z23.b\n"
- ".inst 0x45571b7b // usublb z27.h, z27.b, z23.b\n"
- ".inst 0x45571b5a // usublb z26.h, z26.b, z23.b\n"
- ".inst 0x45571b39 // usublb z25.h, z25.b, z23.b\n"
- ".inst 0x45571b18 // usublb z24.h, z24.b, z23.b\n"
+ "ldp x20, x19, [x17, #0x30]\n"
+ "ld1b { z31.h }, p3/Z, [x26, x6]\n"
+ ".inst 0x45501bff // usublb z31.h, z31.b, z16.b\n"
+ "ld1b { z30.h }, p3/Z, [x25, x6]\n"
+ "ld1b { z29.h }, p3/Z, [x24, x6]\n"
+ ".inst 0x45501bde // usublb z30.h, z30.b, z16.b\n"
+ "ld1b { z28.h }, p3/Z, [x23, x6]\n"
+ ".inst 0x45501bbd // usublb z29.h, z29.b, z16.b\n"
+ "ld1b { z27.h }, p3/Z, [x22, x6]\n"
+ "ld1b { z26.h }, p3/Z, [x21, x6]\n"
+ ".inst 0x45501b9c // usublb z28.h, z28.b, z16.b\n"
+ "ld1b { z25.h }, p3/Z, [x20, x6]\n"
+ "ld1b { z24.h }, p3/Z, [x19, x6]\n"
+ ".inst 0x45501b7b // usublb z27.h, z27.b, z16.b\n"
+ ".inst 0x45501b5a // usublb z26.h, z26.b, z16.b\n"
+ ".inst 0x45501b39 // usublb z25.h, z25.b, z16.b\n"
+ ".inst 0x45501b18 // usublb z24.h, z24.b, z16.b\n"
"b.any 1b\n"
:
: [offsetof_Params_bias] "I" (offsetof(Params, bias)), [offsetof_Params_inptrs] "I" (offsetof(Params, inptrs)), [offsetof_Params_n_channels] "I" (offsetof(Params, n_channels)), [offsetof_Params_outptrs] "I" (offsetof(Params, outptrs)), [offsetof_Params_requant] "I" (offsetof(Params, requant)), [offsetof_Params_requant_muls] "I" (offsetof(Params, requant_muls)), [offsetof_Params_requant_shifts] "I" (offsetof(Params, requant_shifts)), [offsetof_Params_weights] "I" (offsetof(Params, weights)), [offsetof_Requantize32_a_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [params] "r" (&params)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8s8u8q_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8s8u8q_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp
index 9c291ae186..9cf95e9588 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8s8u8q_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8s8u8q_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -111,538 +111,546 @@ void sve_u8s8u8q_nhwc_5x5_s1_output2x2_mla_depthfirst_impl(
requant_muls, requant_shifts, outptrs);
__asm__ __volatile__(
- "mov x0, #0x0\n"
- "mov x24, x0\n"
- "ldr x23, [%x[params], %[offsetof_Params_requant]]\n"
- "ldr x1, [%x[params], %[offsetof_Params_n_channels]]\n"
+ "ldr x0, [%x[params], %[offsetof_Params_n_channels]]\n"
"ptrue p4.b\n"
- "ldr x22, [%x[params], %[offsetof_Params_outptrs]]\n"
- "incw x24\n"
- "ldr x2, [%x[params], %[offsetof_Params_weights]]\n"
- "add x21, x23, %[offsetof_Requantize32_a_offset]\n"
- "add x20, x23, %[offsetof_Requantize32_b_offset]\n"
- "ld1rb { z15.b }, p4/Z, [x21]\n"
- "ld1rb { z17.b }, p4/Z, [x20]\n"
- "add x21, x23, %[offsetof_Requantize32_c_offset]\n"
- "add x20, x23, %[offsetof_Requantize32_minval]\n"
- "ld1rh { z12.h }, p4/Z, [x21]\n"
- "ld1rh { z13.h }, p4/Z, [x20]\n"
- "add x20, x23, %[offsetof_Requantize32_maxval]\n"
- "ld1rh { z11.h }, p4/Z, [x20]\n"
- "ldp x3, x4, [x22, #0x0]\n"
- "whilelt p3.h, x0, x1\n"
- "ldp x5, x6, [x22, #0x10]\n"
- "whilelt p2.s, x0, x1\n"
- "whilelt p1.s, x24, x1\n"
- "ldr x14, [%x[params], %[offsetof_Params_bias]]\n"
- "add x7, %x[params], %[offsetof_Params_inptrs]\n"
- "ld1w { z30.s }, p2/Z, [x14]\n"
- "ld1w { z16.s }, p1/Z, [x14, #1, MUL VL]\n"
- "uzp1 z14.s, z30.s, z16.s\n"
- "ld1sb { z0.h }, p4/Z, [x2]\n"
- "ld1sb { z1.h }, p4/Z, [x2, #1, MUL VL]\n"
- "uzp2 z10.s, z30.s, z16.s\n"
- "addvl x14, x14, #2\n"
- "ld1sb { z2.h }, p4/Z, [x2, #2, MUL VL]\n"
- "ld1sb { z3.h }, p4/Z, [x2, #3, MUL VL]\n"
- "mov x8, #0x0\n"
- "mov z20.d, z14.d\n"
- "ld1sb { z4.h }, p4/Z, [x2, #4, MUL VL]\n"
- "ldp x9, x28, [x7, #0x0]\n"
- "mov z7.d, z10.d\n"
- "mov z8.d, z14.d\n"
- "ldp x27, x26, [x7, #0x10]\n"
- "ldp x25, x24, [x7, #0x20]\n"
- "mov z16.d, z10.d\n"
- "mov z6.d, z14.d\n"
- "ldp x23, x22, [x7, #0x30]\n"
- "ldp x21, x20, [x7, #0x40]\n"
- "mov z5.d, z10.d\n"
- ".inst 0x45511000 // ssublb z0.h, z0.b, z17.b\n"
- "ld1b { z31.h }, p3/Z, [x9, x0]\n"
- "ld1b { z30.h }, p3/Z, [x28, x0]\n"
- ".inst 0x45511021 // ssublb z1.h, z1.b, z17.b\n"
- ".inst 0x45511042 // ssublb z2.h, z2.b, z17.b\n"
- "ld1b { z29.h }, p3/Z, [x27, x0]\n"
- "ld1b { z28.h }, p3/Z, [x26, x0]\n"
- ".inst 0x45511063 // ssublb z3.h, z3.b, z17.b\n"
- ".inst 0x45511084 // ssublb z4.h, z4.b, z17.b\n"
- "ld1b { z27.h }, p3/Z, [x25, x0]\n"
- "ld1b { z23.h }, p3/Z, [x24, x0]\n"
- ".inst 0x454f1bff // usublb z31.h, z31.b, z15.b\n"
- ".inst 0x454f1bde // usublb z30.h, z30.b, z15.b\n"
- "ld1b { z25.h }, p3/Z, [x23, x0]\n"
- "ld1b { z24.h }, p3/Z, [x22, x0]\n"
- ".inst 0x454f1bbd // usublb z29.h, z29.b, z15.b\n"
- ".inst 0x454f1b9c // usublb z28.h, z28.b, z15.b\n"
- "ld1b { z26.h }, p3/Z, [x21, x0]\n"
- "ld1b { z22.h }, p3/Z, [x20, x0]\n"
- ".inst 0x454f1b7b // usublb z27.h, z27.b, z15.b\n"
- ".inst 0x454f1af7 // usublb z23.h, z23.b, z15.b\n"
- "ldr x17, [%x[params], %[offsetof_Params_requant_muls]]\n"
- "ldr x16, [%x[params], %[offsetof_Params_requant_shifts]]\n"
- "str x14, [%x[params], %[offsetof_Params_bias]]\n"
- ".inst 0x454f1b39 // usublb z25.h, z25.b, z15.b\n"
- ".inst 0x454f1b18 // usublb z24.h, z24.b, z15.b\n"
- ".inst 0x454f1b5a // usublb z26.h, z26.b, z15.b\n"
- ".inst 0x454f1ad6 // usublb z22.h, z22.b, z15.b\n"
+ "ldr x1, [%x[params], %[offsetof_Params_weights]]\n"
+ "mov x2, #0x0\n"
+ "ldr x22, [%x[params], %[offsetof_Params_requant]]\n"
+ "mov x3, #0x0\n"
+ "ldr x4, [%x[params], %[offsetof_Params_requant_muls]]\n"
+ "add x5, %x[params], %[offsetof_Params_inptrs]\n"
+ "ldr x6, [%x[params], %[offsetof_Params_requant_shifts]]\n"
+ "add x19, x22, %[offsetof_Requantize32_a_offset]\n"
+ "ldr x21, [%x[params], %[offsetof_Params_outptrs]]\n"
+ "add x20, x22, %[offsetof_Requantize32_b_offset]\n"
+ "ld1rb { z9.b }, p4/Z, [x19]\n"
+ "add x19, x22, %[offsetof_Requantize32_c_offset]\n"
+ "ld1rb { z14.b }, p4/Z, [x20]\n"
+ "add x20, x22, %[offsetof_Requantize32_minval]\n"
+ "ld1rw { z17.s }, p4/Z, [x19]\n"
+ "add x19, x22, %[offsetof_Requantize32_maxval]\n"
+ "ld1rw { z12.s }, p4/Z, [x20]\n"
+ "whilelt p3.h, x2, x0\n"
+ "ld1rw { z11.s }, p4/Z, [x19]\n"
+ "whilelt p2.s, x2, x0\n"
+ "ldp x7, x8, [x21, #0x0]\n"
+ "mov x19, x2\n"
+ "incw x19\n"
+ "ldp x17, x16, [x21, #0x10]\n"
+ "whilelt p1.s, x19, x0\n"
+ "ldr x19, [%x[params], %[offsetof_Params_bias]]\n"
+ "ld1w { z4.s }, p2/Z, [x19]\n"
+ "ld1w { z16.s }, p1/Z, [x19, #1, MUL VL]\n"
+ "uzp1 z15.s, z4.s, z16.s\n"
+ "addvl x19, x19, #2\n"
+ "str x19, [%x[params], %[offsetof_Params_bias]]\n"
+ "uzp2 z18.s, z4.s, z16.s\n"
+ "mov z21.d, z15.d\n"
+ "ld1sb { z0.h }, p4/Z, [x1]\n"
+ "mov z5.d, z15.d\n"
+ "ld1sb { z1.h }, p4/Z, [x1, #1, MUL VL]\n"
+ "mov z13.d, z18.d\n"
+ "ld1sb { z2.h }, p4/Z, [x1, #2, MUL VL]\n"
+ "mov z7.d, z18.d\n"
+ "ld1sb { z3.h }, p4/Z, [x1, #3, MUL VL]\n"
+ "mov z6.d, z15.d\n"
+ "ld1sb { z4.h }, p4/Z, [x1, #4, MUL VL]\n"
+ "mov z8.d, z18.d\n"
+ "ldp x28, x27, [x5, #0x0]\n"
+ ".inst 0x454e1000 // ssublb z0.h, z0.b, z14.b\n"
+ "ldp x26, x25, [x5, #0x10]\n"
+ ".inst 0x454e1021 // ssublb z1.h, z1.b, z14.b\n"
+ ".inst 0x454e1042 // ssublb z2.h, z2.b, z14.b\n"
+ "ldp x24, x23, [x5, #0x20]\n"
+ ".inst 0x454e1063 // ssublb z3.h, z3.b, z14.b\n"
+ ".inst 0x454e1084 // ssublb z4.h, z4.b, z14.b\n"
+ "ldp x22, x21, [x5, #0x30]\n"
+ "ldp x20, x19, [x5, #0x40]\n"
+ "ld1b { z31.h }, p3/Z, [x28, x2]\n"
+ ".inst 0x45491bff // usublb z31.h, z31.b, z9.b\n"
+ "ld1b { z30.h }, p3/Z, [x27, x2]\n"
+ "ld1b { z29.h }, p3/Z, [x26, x2]\n"
+ ".inst 0x45491bde // usublb z30.h, z30.b, z9.b\n"
+ "ld1b { z28.h }, p3/Z, [x25, x2]\n"
+ ".inst 0x45491bbd // usublb z29.h, z29.b, z9.b\n"
+ "ld1b { z27.h }, p3/Z, [x24, x2]\n"
+ "ld1b { z23.h }, p3/Z, [x23, x2]\n"
+ ".inst 0x45491b9c // usublb z28.h, z28.b, z9.b\n"
+ "ld1b { z25.h }, p3/Z, [x22, x2]\n"
+ "ld1b { z24.h }, p3/Z, [x21, x2]\n"
+ ".inst 0x45491b7b // usublb z27.h, z27.b, z9.b\n"
+ ".inst 0x45491af7 // usublb z23.h, z23.b, z9.b\n"
+ "ld1b { z26.h }, p3/Z, [x20, x2]\n"
+ "ld1b { z22.h }, p3/Z, [x19, x2]\n"
+ ".inst 0x45491b39 // usublb z25.h, z25.b, z9.b\n"
+ ".inst 0x45491b18 // usublb z24.h, z24.b, z9.b\n"
+ ".inst 0x45491b5a // usublb z26.h, z26.b, z9.b\n"
+ ".inst 0x45491ad6 // usublb z22.h, z22.b, z9.b\n"
"1:" // Loop
- ".inst 0x448043ee // smlalb z14.s, p4/M, z31.h, z0.h\n"
- ".inst 0x448047ea // smlalt z10.s, p4/M, z31.h, z0.h\n"
- "ldr x20, [x7, #0x50]\n"
- "ld1b { z31.h }, p3/Z, [x20, x0]\n"
- ".inst 0x448143ce // smlalb z14.s, p4/M, z30.h, z1.h\n"
- ".inst 0x448043d4 // smlalb z20.s, p4/M, z30.h, z0.h\n"
- "ldr x22, [x7, #0x58]\n"
- ".inst 0x454f1bff // usublb z31.h, z31.b, z15.b\n"
- ".inst 0x448043a8 // smlalb z8.s, p4/M, z29.h, z0.h\n"
+ ".inst 0x448043ef // smlalb z15.s, p4/M, z31.h, z0.h\n"
+ "ldr x20, [x5, #0x50]\n"
+ "whilelt p0.h, x3, x0\n"
+ ".inst 0x448047f2 // smlalt z18.s, p4/M, z31.h, z0.h\n"
+ "ldr x19, [x5, #0x58]\n"
+ ".inst 0x448043d5 // smlalb z21.s, p4/M, z30.h, z0.h\n"
+ "ldr x25, [x5, #0x60]\n"
+ ".inst 0x448047cd // smlalt z13.s, p4/M, z30.h, z0.h\n"
+ "ld1b { z31.h }, p3/Z, [x20, x2]\n"
+ ".inst 0x448043a5 // smlalb z5.s, p4/M, z29.h, z0.h\n"
+ "ldr x24, [x5, #0x68]\n"
+ ".inst 0x448047a7 // smlalt z7.s, p4/M, z29.h, z0.h\n"
+ "ldr x23, [x5, #0x70]\n"
".inst 0x44804386 // smlalb z6.s, p4/M, z28.h, z0.h\n"
- "ldr x21, [x7, #0x60]\n"
- "ldr x20, [x7, #0x68]\n"
- ".inst 0x448147ca // smlalt z10.s, p4/M, z30.h, z1.h\n"
- ".inst 0x448047c7 // smlalt z7.s, p4/M, z30.h, z0.h\n"
- "ld1b { z30.h }, p3/Z, [x22, x0]\n"
- ".inst 0x454f1bde // usublb z30.h, z30.b, z15.b\n"
- ".inst 0x448047b0 // smlalt z16.s, p4/M, z29.h, z0.h\n"
- ".inst 0x4482436e // smlalb z14.s, p4/M, z27.h, z2.h\n"
- "ldr x25, [x7, #0x70]\n"
- "ldr x24, [x7, #0x78]\n"
- ".inst 0x44804785 // smlalt z5.s, p4/M, z28.h, z0.h\n"
- ".inst 0x44814374 // smlalb z20.s, p4/M, z27.h, z1.h\n"
- "ld1sb { z0.h }, p4/Z, [x2, #5, MUL VL]\n"
- ".inst 0x45511000 // ssublb z0.h, z0.b, z17.b\n"
- ".inst 0x44814388 // smlalb z8.s, p4/M, z28.h, z1.h\n"
+ "ldr x22, [x5, #0x78]\n"
+ ".inst 0x45491bff // usublb z31.h, z31.b, z9.b\n"
+ "ldr x15, [x5, #0x80]\n"
+ ".inst 0x44804788 // smlalt z8.s, p4/M, z28.h, z0.h\n"
+ "ld1sb { z0.h }, p4/Z, [x1, #5, MUL VL]\n"
+ ".inst 0x448143cf // smlalb z15.s, p4/M, z30.h, z1.h\n"
+ "ldr x21, [x5, #0x88]\n"
+ ".inst 0x448147d2 // smlalt z18.s, p4/M, z30.h, z1.h\n"
+ "ld1b { z30.h }, p3/Z, [x19, x2]\n"
+ ".inst 0x44814375 // smlalb z21.s, p4/M, z27.h, z1.h\n"
+ "ldr x20, [x5, #0x90]\n"
+ ".inst 0x454e1000 // ssublb z0.h, z0.b, z14.b\n"
+ "ldr x19, [x5, #0x98]\n"
+ ".inst 0x4481476d // smlalt z13.s, p4/M, z27.h, z1.h\n"
+ "ldr x14, [x5, #0xa0]\n"
+ ".inst 0x45491bde // usublb z30.h, z30.b, z9.b\n"
+ "ldr x13, [x5, #0xa8]\n"
+ ".inst 0x44814385 // smlalb z5.s, p4/M, z28.h, z1.h\n"
+ "ldr x12, [x5, #0xb0]\n"
+ ".inst 0x44814787 // smlalt z7.s, p4/M, z28.h, z1.h\n"
+ "ldr x11, [x5, #0xb8]\n"
".inst 0x448142e6 // smlalb z6.s, p4/M, z23.h, z1.h\n"
- "ldr x15, [x7, #0x80]\n"
- "ldr x23, [x7, #0x88]\n"
- ".inst 0x4482476a // smlalt z10.s, p4/M, z27.h, z2.h\n"
- ".inst 0x44814767 // smlalt z7.s, p4/M, z27.h, z1.h\n"
- "ld1b { z27.h }, p3/Z, [x21, x0]\n"
- ".inst 0x454f1b7b // usublb z27.h, z27.b, z15.b\n"
- ".inst 0x44814790 // smlalt z16.s, p4/M, z28.h, z1.h\n"
- ".inst 0x4483432e // smlalb z14.s, p4/M, z25.h, z3.h\n"
- "ldr x22, [x7, #0x90]\n"
- "ldr x21, [x7, #0x98]\n"
- ".inst 0x448146e5 // smlalt z5.s, p4/M, z23.h, z1.h\n"
- ".inst 0x44824334 // smlalb z20.s, p4/M, z25.h, z2.h\n"
- "ld1sb { z1.h }, p4/Z, [x2, #6, MUL VL]\n"
- ".inst 0x45511021 // ssublb z1.h, z1.b, z17.b\n"
- ".inst 0x448242e8 // smlalb z8.s, p4/M, z23.h, z2.h\n"
+ "ldr x10, [x5, #0xc0]\n"
+ ".inst 0x448146e8 // smlalt z8.s, p4/M, z23.h, z1.h\n"
+ "ld1sb { z1.h }, p4/Z, [x1, #6, MUL VL]\n"
+ ".inst 0x4482436f // smlalb z15.s, p4/M, z27.h, z2.h\n"
+ "ldr x9, [x5, #0xc8]\n"
+ ".inst 0x44824772 // smlalt z18.s, p4/M, z27.h, z2.h\n"
+ "ld1b { z27.h }, p3/Z, [x25, x2]\n"
+ ".inst 0x44824335 // smlalb z21.s, p4/M, z25.h, z2.h\n"
+ "ldr x28, [x5, #0xd0]\n"
+ ".inst 0x454e1021 // ssublb z1.h, z1.b, z14.b\n"
+ "ldr x27, [x5, #0xd8]\n"
+ ".inst 0x4482472d // smlalt z13.s, p4/M, z25.h, z2.h\n"
+ "ldr x26, [x5, #0xe0]\n"
+ ".inst 0x45491b7b // usublb z27.h, z27.b, z9.b\n"
+ "ld1w { z19.s }, p2/Z, [x4]\n"
+ ".inst 0x448242e5 // smlalb z5.s, p4/M, z23.h, z2.h\n"
+ "ld1w { z16.s }, p1/Z, [x4, #1, MUL VL]\n"
+ "addvl x4, x4, #2\n"
+ ".inst 0x448246e7 // smlalt z7.s, p4/M, z23.h, z2.h\n"
".inst 0x448243e6 // smlalb z6.s, p4/M, z31.h, z2.h\n"
- "ldr x14, [x7, #0xa0]\n"
- "ldr x13, [x7, #0xa8]\n"
- ".inst 0x4483472a // smlalt z10.s, p4/M, z25.h, z3.h\n"
- ".inst 0x44824727 // smlalt z7.s, p4/M, z25.h, z2.h\n"
- "ld1b { z25.h }, p3/Z, [x20, x0]\n"
- ".inst 0x454f1b39 // usublb z25.h, z25.b, z15.b\n"
- ".inst 0x448246f0 // smlalt z16.s, p4/M, z23.h, z2.h\n"
- ".inst 0x4484430e // smlalb z14.s, p4/M, z24.h, z4.h\n"
- "ldr x12, [x7, #0xb0]\n"
- "ldr x20, [x7, #0xb8]\n"
- ".inst 0x448247e5 // smlalt z5.s, p4/M, z31.h, z2.h\n"
- ".inst 0x44834314 // smlalb z20.s, p4/M, z24.h, z3.h\n"
- "ld1sb { z2.h }, p4/Z, [x2, #7, MUL VL]\n"
- "inch x2, ALL, MUL #8\n"
- ".inst 0x448343e8 // smlalb z8.s, p4/M, z31.h, z3.h\n"
+ ".inst 0x448247e8 // smlalt z8.s, p4/M, z31.h, z2.h\n"
+ "ld1sb { z2.h }, p4/Z, [x1, #7, MUL VL]\n"
+ "inch x1, ALL, MUL #8\n"
+ "uzp1 z10.s, z19.s, z16.s\n"
+ "uzp2 z20.s, z19.s, z16.s\n"
+ "ld1w { z19.s }, p2/Z, [x6]\n"
+ ".inst 0x4483432f // smlalb z15.s, p4/M, z25.h, z3.h\n"
+ "ld1w { z16.s }, p1/Z, [x6, #1, MUL VL]\n"
+ "addvl x6, x6, #2\n"
+ ".inst 0x454e1042 // ssublb z2.h, z2.b, z14.b\n"
+ ".inst 0x44834732 // smlalt z18.s, p4/M, z25.h, z3.h\n"
+ "ld1b { z25.h }, p3/Z, [x24, x2]\n"
+ ".inst 0x44834315 // smlalb z21.s, p4/M, z24.h, z3.h\n"
+ "ldr x25, [x5, #0xe8]\n"
+ ".inst 0x4483470d // smlalt z13.s, p4/M, z24.h, z3.h\n"
+ ".inst 0x448343e5 // smlalb z5.s, p4/M, z31.h, z3.h\n"
+ ".inst 0x45491b39 // usublb z25.h, z25.b, z9.b\n"
+ ".inst 0x448347e7 // smlalt z7.s, p4/M, z31.h, z3.h\n"
".inst 0x448343c6 // smlalb z6.s, p4/M, z30.h, z3.h\n"
- ".inst 0x45511042 // ssublb z2.h, z2.b, z17.b\n"
- "ldr x11, [x7, #0xc0]\n"
- ".inst 0x4484470a // smlalt z10.s, p4/M, z24.h, z4.h\n"
- ".inst 0x44834707 // smlalt z7.s, p4/M, z24.h, z3.h\n"
- "ld1b { z24.h }, p3/Z, [x25, x0]\n"
- ".inst 0x454f1b18 // usublb z24.h, z24.b, z15.b\n"
- ".inst 0x448347f0 // smlalt z16.s, p4/M, z31.h, z3.h\n"
- ".inst 0x448043ae // smlalb z14.s, p4/M, z29.h, z0.h\n"
- "ldr x10, [x7, #0xc8]\n"
- "ldr x9, [x7, #0xd0]\n"
- ".inst 0x448347c5 // smlalt z5.s, p4/M, z30.h, z3.h\n"
- ".inst 0x44844374 // smlalb z20.s, p4/M, z27.h, z4.h\n"
- "ld1sb { z3.h }, p4/Z, [x2]\n"
- ".inst 0x45511063 // ssublb z3.h, z3.b, z17.b\n"
- ".inst 0x448443c8 // smlalb z8.s, p4/M, z30.h, z4.h\n"
+ ".inst 0x448347c8 // smlalt z8.s, p4/M, z30.h, z3.h\n"
+ "ld1sb { z3.h }, p4/Z, [x1]\n"
+ ".inst 0x4484430f // smlalb z15.s, p4/M, z24.h, z4.h\n"
+ ".inst 0x44844712 // smlalt z18.s, p4/M, z24.h, z4.h\n"
+ "ld1b { z24.h }, p3/Z, [x23, x2]\n"
+ ".inst 0x44844375 // smlalb z21.s, p4/M, z27.h, z4.h\n"
+ "ldr x24, [x5, #0xf0]\n"
+ ".inst 0x454e1063 // ssublb z3.h, z3.b, z14.b\n"
+ ".inst 0x4484476d // smlalt z13.s, p4/M, z27.h, z4.h\n"
+ "ld1b { z27.h }, p3/Z, [x22, x2]\n"
+ ".inst 0x45491b18 // usublb z24.h, z24.b, z9.b\n"
+ "ldr x23, [x5, #0xf8]\n"
+ ".inst 0x448443c5 // smlalb z5.s, p4/M, z30.h, z4.h\n"
+ ".inst 0x448447c7 // smlalt z7.s, p4/M, z30.h, z4.h\n"
+ ".inst 0x45491b7b // usublb z27.h, z27.b, z9.b\n"
".inst 0x44844346 // smlalb z6.s, p4/M, z26.h, z4.h\n"
- "ldr x28, [x7, #0xd8]\n"
- "ldr x27, [x7, #0xe0]\n"
- ".inst 0x448047aa // smlalt z10.s, p4/M, z29.h, z0.h\n"
- ".inst 0x44844767 // smlalt z7.s, p4/M, z27.h, z4.h\n"
- "ld1b { z27.h }, p3/Z, [x24, x0]\n"
- ".inst 0x454f1b7b // usublb z27.h, z27.b, z15.b\n"
- ".inst 0x448447d0 // smlalt z16.s, p4/M, z30.h, z4.h\n"
- ".inst 0x4481438e // smlalb z14.s, p4/M, z28.h, z1.h\n"
- "ldr x26, [x7, #0xe8]\n"
- "ldr x25, [x7, #0xf0]\n"
- ".inst 0x44844745 // smlalt z5.s, p4/M, z26.h, z4.h\n"
- ".inst 0x44804394 // smlalb z20.s, p4/M, z28.h, z0.h\n"
- "ld1sb { z4.h }, p4/Z, [x2, #1, MUL VL]\n"
- ".inst 0x45511084 // ssublb z4.h, z4.b, z17.b\n"
- ".inst 0x448042c8 // smlalb z8.s, p4/M, z22.h, z0.h\n"
+ ".inst 0x44844748 // smlalt z8.s, p4/M, z26.h, z4.h\n"
+ "ld1sb { z4.h }, p4/Z, [x1, #1, MUL VL]\n"
+ ".inst 0x448043af // smlalb z15.s, p4/M, z29.h, z0.h\n"
+ ".inst 0x448047b2 // smlalt z18.s, p4/M, z29.h, z0.h\n"
+ "uzp1 z29.s, z19.s, z16.s\n"
+ ".inst 0x454e1084 // ssublb z4.h, z4.b, z14.b\n"
+ "uzp2 z19.s, z19.s, z16.s\n"
+ ".inst 0x44804395 // smlalb z21.s, p4/M, z28.h, z0.h\n"
+ ".inst 0x4480478d // smlalt z13.s, p4/M, z28.h, z0.h\n"
+ ".inst 0x448042c5 // smlalb z5.s, p4/M, z22.h, z0.h\n"
+ ".inst 0x448046c7 // smlalt z7.s, p4/M, z22.h, z0.h\n"
".inst 0x44804326 // smlalb z6.s, p4/M, z25.h, z0.h\n"
- "ld1w { z19.s }, p2/Z, [x17]\n"
- "ld1w { z18.s }, p1/Z, [x17, #1, MUL VL]\n"
- ".inst 0x4481478a // smlalt z10.s, p4/M, z28.h, z1.h\n"
- ".inst 0x44804787 // smlalt z7.s, p4/M, z28.h, z0.h\n"
- "ld1b { z28.h }, p3/Z, [x23, x0]\n"
- ".inst 0x454f1b9c // usublb z28.h, z28.b, z15.b\n"
- ".inst 0x448046d0 // smlalt z16.s, p4/M, z22.h, z0.h\n"
- ".inst 0x448242ee // smlalb z14.s, p4/M, z23.h, z2.h\n"
- "ldr x24, [x7, #0xf8]\n"
- "uzp1 z9.s, z19.s, z18.s\n"
- ".inst 0x44804725 // smlalt z5.s, p4/M, z25.h, z0.h\n"
- ".inst 0x448142f4 // smlalb z20.s, p4/M, z23.h, z1.h\n"
- "ld1sb { z0.h }, p4/Z, [x2, #2, MUL VL]\n"
- ".inst 0x45511000 // ssublb z0.h, z0.b, z17.b\n"
- ".inst 0x44814328 // smlalb z8.s, p4/M, z25.h, z1.h\n"
+ ".inst 0x44804728 // smlalt z8.s, p4/M, z25.h, z0.h\n"
+ "ld1sb { z0.h }, p4/Z, [x1, #2, MUL VL]\n"
+ ".inst 0x4481438f // smlalb z15.s, p4/M, z28.h, z1.h\n"
+ ".inst 0x44814792 // smlalt z18.s, p4/M, z28.h, z1.h\n"
+ "ld1b { z28.h }, p3/Z, [x21, x2]\n"
+ ".inst 0x448142f5 // smlalb z21.s, p4/M, z23.h, z1.h\n"
+ "ldr x22, [x5, #0x100]\n"
+ ".inst 0x454e1000 // ssublb z0.h, z0.b, z14.b\n"
+ ".inst 0x448146ed // smlalt z13.s, p4/M, z23.h, z1.h\n"
+ ".inst 0x45491b9c // usublb z28.h, z28.b, z9.b\n"
+ ".inst 0x44814325 // smlalb z5.s, p4/M, z25.h, z1.h\n"
+ ".inst 0x44814727 // smlalt z7.s, p4/M, z25.h, z1.h\n"
".inst 0x44814306 // smlalb z6.s, p4/M, z24.h, z1.h\n"
- "uzp2 z29.s, z19.s, z18.s\n"
- "ld1w { z19.s }, p2/Z, [x16]\n"
- ".inst 0x448246ea // smlalt z10.s, p4/M, z23.h, z2.h\n"
- ".inst 0x448146e7 // smlalt z7.s, p4/M, z23.h, z1.h\n"
- "ld1b { z23.h }, p3/Z, [x15, x0]\n"
- ".inst 0x454f1af7 // usublb z23.h, z23.b, z15.b\n"
- ".inst 0x44814730 // smlalt z16.s, p4/M, z25.h, z1.h\n"
- ".inst 0x448343ee // smlalb z14.s, p4/M, z31.h, z3.h\n"
- "ldr x23, [x7, #0x100]\n"
- "whilelt p0.h, x8, x1\n"
- ".inst 0x44814705 // smlalt z5.s, p4/M, z24.h, z1.h\n"
- ".inst 0x448243f4 // smlalb z20.s, p4/M, z31.h, z2.h\n"
- "ld1sb { z1.h }, p4/Z, [x2, #3, MUL VL]\n"
- ".inst 0x45511021 // ssublb z1.h, z1.b, z17.b\n"
- ".inst 0x44824308 // smlalb z8.s, p4/M, z24.h, z2.h\n"
+ ".inst 0x44814708 // smlalt z8.s, p4/M, z24.h, z1.h\n"
+ "ld1sb { z1.h }, p4/Z, [x1, #3, MUL VL]\n"
+ ".inst 0x448242ef // smlalb z15.s, p4/M, z23.h, z2.h\n"
+ ".inst 0x448246f2 // smlalt z18.s, p4/M, z23.h, z2.h\n"
+ "ld1b { z23.h }, p3/Z, [x15, x2]\n"
+ ".inst 0x448243f5 // smlalb z21.s, p4/M, z31.h, z2.h\n"
+ "ldr x21, [x5, #0x108]\n"
+ ".inst 0x454e1021 // ssublb z1.h, z1.b, z14.b\n"
+ ".inst 0x448247ed // smlalt z13.s, p4/M, z31.h, z2.h\n"
+ ".inst 0x45491af7 // usublb z23.h, z23.b, z9.b\n"
+ ".inst 0x44824305 // smlalb z5.s, p4/M, z24.h, z2.h\n"
+ ".inst 0x44824707 // smlalt z7.s, p4/M, z24.h, z2.h\n"
".inst 0x44824366 // smlalb z6.s, p4/M, z27.h, z2.h\n"
- "addvl x17, x17, #2\n"
- ".inst 0x448347ea // smlalt z10.s, p4/M, z31.h, z3.h\n"
- ".inst 0x448247e7 // smlalt z7.s, p4/M, z31.h, z2.h\n"
- "ld1b { z31.h }, p3/Z, [x22, x0]\n"
- ".inst 0x454f1bff // usublb z31.h, z31.b, z15.b\n"
- ".inst 0x44824710 // smlalt z16.s, p4/M, z24.h, z2.h\n"
- ".inst 0x448443ce // smlalb z14.s, p4/M, z30.h, z4.h\n"
- "ldr x22, [x7, #0x108]\n"
- ".inst 0x44824765 // smlalt z5.s, p4/M, z27.h, z2.h\n"
- ".inst 0x448343d4 // smlalb z20.s, p4/M, z30.h, z3.h\n"
- "ld1sb { z2.h }, p4/Z, [x2, #4, MUL VL]\n"
- ".inst 0x45511042 // ssublb z2.h, z2.b, z17.b\n"
- ".inst 0x44834368 // smlalb z8.s, p4/M, z27.h, z3.h\n"
+ ".inst 0x44824768 // smlalt z8.s, p4/M, z27.h, z2.h\n"
+ "ld1sb { z2.h }, p4/Z, [x1, #4, MUL VL]\n"
+ ".inst 0x448343ef // smlalb z15.s, p4/M, z31.h, z3.h\n"
+ ".inst 0x448347f2 // smlalt z18.s, p4/M, z31.h, z3.h\n"
+ "ld1b { z31.h }, p3/Z, [x20, x2]\n"
+ ".inst 0x448343d5 // smlalb z21.s, p4/M, z30.h, z3.h\n"
+ "ldr x20, [x5, #0x110]\n"
+ ".inst 0x454e1042 // ssublb z2.h, z2.b, z14.b\n"
+ ".inst 0x448347cd // smlalt z13.s, p4/M, z30.h, z3.h\n"
+ ".inst 0x45491bff // usublb z31.h, z31.b, z9.b\n"
+ ".inst 0x44834365 // smlalb z5.s, p4/M, z27.h, z3.h\n"
+ ".inst 0x44834767 // smlalt z7.s, p4/M, z27.h, z3.h\n"
".inst 0x448342e6 // smlalb z6.s, p4/M, z23.h, z3.h\n"
- ".inst 0x448447ca // smlalt z10.s, p4/M, z30.h, z4.h\n"
- ".inst 0x448347c7 // smlalt z7.s, p4/M, z30.h, z3.h\n"
- "ld1b { z30.h }, p3/Z, [x21, x0]\n"
- ".inst 0x454f1bde // usublb z30.h, z30.b, z15.b\n"
- ".inst 0x44834770 // smlalt z16.s, p4/M, z27.h, z3.h\n"
- ".inst 0x448042ce // smlalb z14.s, p4/M, z22.h, z0.h\n"
- "ldr x21, [x7, #0x110]\n"
- ".inst 0x448346e5 // smlalt z5.s, p4/M, z23.h, z3.h\n"
- ".inst 0x44844354 // smlalb z20.s, p4/M, z26.h, z4.h\n"
- "ld1sb { z3.h }, p4/Z, [x2, #5, MUL VL]\n"
- ".inst 0x45511063 // ssublb z3.h, z3.b, z17.b\n"
- ".inst 0x448442e8 // smlalb z8.s, p4/M, z23.h, z4.h\n"
+ ".inst 0x448346e8 // smlalt z8.s, p4/M, z23.h, z3.h\n"
+ "ld1sb { z3.h }, p4/Z, [x1, #5, MUL VL]\n"
+ ".inst 0x448443cf // smlalb z15.s, p4/M, z30.h, z4.h\n"
+ ".inst 0x448447d2 // smlalt z18.s, p4/M, z30.h, z4.h\n"
+ "ld1b { z30.h }, p3/Z, [x19, x2]\n"
+ ".inst 0x44844355 // smlalb z21.s, p4/M, z26.h, z4.h\n"
+ "ldr x19, [x5, #0x118]\n"
+ ".inst 0x454e1063 // ssublb z3.h, z3.b, z14.b\n"
+ ".inst 0x4484474d // smlalt z13.s, p4/M, z26.h, z4.h\n"
+ "ld1b { z26.h }, p3/Z, [x14, x2]\n"
+ ".inst 0x45491bde // usublb z30.h, z30.b, z9.b\n"
+ ".inst 0x448442e5 // smlalb z5.s, p4/M, z23.h, z4.h\n"
+ ".inst 0x448446e7 // smlalt z7.s, p4/M, z23.h, z4.h\n"
+ ".inst 0x45491b5a // usublb z26.h, z26.b, z9.b\n"
".inst 0x44844386 // smlalb z6.s, p4/M, z28.h, z4.h\n"
- ".inst 0x448046ca // smlalt z10.s, p4/M, z22.h, z0.h\n"
- ".inst 0x44844747 // smlalt z7.s, p4/M, z26.h, z4.h\n"
- "ld1b { z26.h }, p3/Z, [x14, x0]\n"
- ".inst 0x454f1b5a // usublb z26.h, z26.b, z15.b\n"
- ".inst 0x448446f0 // smlalt z16.s, p4/M, z23.h, z4.h\n"
- ".inst 0x4481432e // smlalb z14.s, p4/M, z25.h, z1.h\n"
- "ld1b { z22.h }, p3/Z, [x20, x0]\n"
- ".inst 0x454f1ad6 // usublb z22.h, z22.b, z15.b\n"
- ".inst 0x44844785 // smlalt z5.s, p4/M, z28.h, z4.h\n"
- ".inst 0x44804334 // smlalb z20.s, p4/M, z25.h, z0.h\n"
- "ld1sb { z4.h }, p4/Z, [x2, #6, MUL VL]\n"
- ".inst 0x45511084 // ssublb z4.h, z4.b, z17.b\n"
- ".inst 0x448043e8 // smlalb z8.s, p4/M, z31.h, z0.h\n"
+ ".inst 0x44844788 // smlalt z8.s, p4/M, z28.h, z4.h\n"
+ "ld1sb { z4.h }, p4/Z, [x1, #6, MUL VL]\n"
+ ".inst 0x448042cf // smlalb z15.s, p4/M, z22.h, z0.h\n"
+ ".inst 0x448046d2 // smlalt z18.s, p4/M, z22.h, z0.h\n"
+ "ld1b { z22.h }, p3/Z, [x11, x2]\n"
+ ".inst 0x44804335 // smlalb z21.s, p4/M, z25.h, z0.h\n"
+ ".inst 0x454e1084 // ssublb z4.h, z4.b, z14.b\n"
+ ".inst 0x4480472d // smlalt z13.s, p4/M, z25.h, z0.h\n"
+ ".inst 0x45491ad6 // usublb z22.h, z22.b, z9.b\n"
+ ".inst 0x448043e5 // smlalb z5.s, p4/M, z31.h, z0.h\n"
+ ".inst 0x448047e7 // smlalt z7.s, p4/M, z31.h, z0.h\n"
".inst 0x448043c6 // smlalb z6.s, p4/M, z30.h, z0.h\n"
- "ldr x20, [x7, #0x118]\n"
- "ldr x14, [%x[params], %[offsetof_Params_bias]]\n"
- ".inst 0x4481472a // smlalt z10.s, p4/M, z25.h, z1.h\n"
- ".inst 0x44804727 // smlalt z7.s, p4/M, z25.h, z0.h\n"
- "ld1b { z25.h }, p3/Z, [x13, x0]\n"
- ".inst 0x454f1b39 // usublb z25.h, z25.b, z15.b\n"
- ".inst 0x448047f0 // smlalt z16.s, p4/M, z31.h, z0.h\n"
- ".inst 0x4482430e // smlalb z14.s, p4/M, z24.h, z2.h\n"
- ".inst 0x448047c5 // smlalt z5.s, p4/M, z30.h, z0.h\n"
- ".inst 0x44814314 // smlalb z20.s, p4/M, z24.h, z1.h\n"
- "ld1sb { z0.h }, p4/Z, [x2, #7, MUL VL]\n"
- "inch x2, ALL, MUL #8\n"
- ".inst 0x448143c8 // smlalb z8.s, p4/M, z30.h, z1.h\n"
+ ".inst 0x448047c8 // smlalt z8.s, p4/M, z30.h, z0.h\n"
+ "ld1sb { z0.h }, p4/Z, [x1, #7, MUL VL]\n"
+ "inch x1, ALL, MUL #8\n"
+ ".inst 0x4481432f // smlalb z15.s, p4/M, z25.h, z1.h\n"
+ ".inst 0x44814732 // smlalt z18.s, p4/M, z25.h, z1.h\n"
+ "ld1b { z25.h }, p3/Z, [x13, x2]\n"
+ ".inst 0x44814315 // smlalb z21.s, p4/M, z24.h, z1.h\n"
+ ".inst 0x454e1000 // ssublb z0.h, z0.b, z14.b\n"
+ ".inst 0x4481470d // smlalt z13.s, p4/M, z24.h, z1.h\n"
+ ".inst 0x45491b39 // usublb z25.h, z25.b, z9.b\n"
+ ".inst 0x448143c5 // smlalb z5.s, p4/M, z30.h, z1.h\n"
+ ".inst 0x448147c7 // smlalt z7.s, p4/M, z30.h, z1.h\n"
".inst 0x44814346 // smlalb z6.s, p4/M, z26.h, z1.h\n"
- ".inst 0x45511000 // ssublb z0.h, z0.b, z17.b\n"
- ".inst 0x4482470a // smlalt z10.s, p4/M, z24.h, z2.h\n"
- ".inst 0x44814707 // smlalt z7.s, p4/M, z24.h, z1.h\n"
- "ld1b { z24.h }, p3/Z, [x12, x0]\n"
- ".inst 0x454f1b18 // usublb z24.h, z24.b, z15.b\n"
- ".inst 0x448147d0 // smlalt z16.s, p4/M, z30.h, z1.h\n"
- ".inst 0x4483436e // smlalb z14.s, p4/M, z27.h, z3.h\n"
- ".inst 0x44814745 // smlalt z5.s, p4/M, z26.h, z1.h\n"
- ".inst 0x44824374 // smlalb z20.s, p4/M, z27.h, z2.h\n"
- "ld1sb { z1.h }, p4/Z, [x2]\n"
- ".inst 0x45511021 // ssublb z1.h, z1.b, z17.b\n"
- ".inst 0x44824348 // smlalb z8.s, p4/M, z26.h, z2.h\n"
+ ".inst 0x44814748 // smlalt z8.s, p4/M, z26.h, z1.h\n"
+ "ld1sb { z1.h }, p4/Z, [x1]\n"
+ ".inst 0x4482430f // smlalb z15.s, p4/M, z24.h, z2.h\n"
+ ".inst 0x44824712 // smlalt z18.s, p4/M, z24.h, z2.h\n"
+ "ld1b { z24.h }, p3/Z, [x12, x2]\n"
+ ".inst 0x44824375 // smlalb z21.s, p4/M, z27.h, z2.h\n"
+ ".inst 0x454e1021 // ssublb z1.h, z1.b, z14.b\n"
+ ".inst 0x4482476d // smlalt z13.s, p4/M, z27.h, z2.h\n"
+ ".inst 0x45491b18 // usublb z24.h, z24.b, z9.b\n"
+ ".inst 0x44824345 // smlalb z5.s, p4/M, z26.h, z2.h\n"
+ ".inst 0x44824747 // smlalt z7.s, p4/M, z26.h, z2.h\n"
".inst 0x44824326 // smlalb z6.s, p4/M, z25.h, z2.h\n"
- ".inst 0x4483476a // smlalt z10.s, p4/M, z27.h, z3.h\n"
- ".inst 0x44824767 // smlalt z7.s, p4/M, z27.h, z2.h\n"
- "ld1b { z27.h }, p3/Z, [x11, x0]\n"
- ".inst 0x454f1b7b // usublb z27.h, z27.b, z15.b\n"
- ".inst 0x44824750 // smlalt z16.s, p4/M, z26.h, z2.h\n"
- ".inst 0x448442ee // smlalb z14.s, p4/M, z23.h, z4.h\n"
- ".inst 0x44824725 // smlalt z5.s, p4/M, z25.h, z2.h\n"
- ".inst 0x448342f4 // smlalb z20.s, p4/M, z23.h, z3.h\n"
- "ld1sb { z2.h }, p4/Z, [x2, #1, MUL VL]\n"
- ".inst 0x45511042 // ssublb z2.h, z2.b, z17.b\n"
- ".inst 0x44834328 // smlalb z8.s, p4/M, z25.h, z3.h\n"
+ ".inst 0x44824728 // smlalt z8.s, p4/M, z25.h, z2.h\n"
+ "ld1sb { z2.h }, p4/Z, [x1, #1, MUL VL]\n"
+ ".inst 0x4483436f // smlalb z15.s, p4/M, z27.h, z3.h\n"
+ ".inst 0x44834772 // smlalt z18.s, p4/M, z27.h, z3.h\n"
+ "ld1b { z27.h }, p3/Z, [x10, x2]\n"
+ ".inst 0x448342f5 // smlalb z21.s, p4/M, z23.h, z3.h\n"
+ ".inst 0x454e1042 // ssublb z2.h, z2.b, z14.b\n"
+ ".inst 0x448346ed // smlalt z13.s, p4/M, z23.h, z3.h\n"
+ ".inst 0x45491b7b // usublb z27.h, z27.b, z9.b\n"
+ ".inst 0x44834325 // smlalb z5.s, p4/M, z25.h, z3.h\n"
+ ".inst 0x44834727 // smlalt z7.s, p4/M, z25.h, z3.h\n"
".inst 0x44834306 // smlalb z6.s, p4/M, z24.h, z3.h\n"
- ".inst 0x448446ea // smlalt z10.s, p4/M, z23.h, z4.h\n"
- ".inst 0x448346e7 // smlalt z7.s, p4/M, z23.h, z3.h\n"
- "ld1b { z23.h }, p3/Z, [x10, x0]\n"
- ".inst 0x454f1af7 // usublb z23.h, z23.b, z15.b\n"
- ".inst 0x44834730 // smlalt z16.s, p4/M, z25.h, z3.h\n"
- ".inst 0x448043ee // smlalb z14.s, p4/M, z31.h, z0.h\n"
- ".inst 0x44834705 // smlalt z5.s, p4/M, z24.h, z3.h\n"
- ".inst 0x44844394 // smlalb z20.s, p4/M, z28.h, z4.h\n"
- "ld1sb { z3.h }, p4/Z, [x2, #2, MUL VL]\n"
- ".inst 0x45511063 // ssublb z3.h, z3.b, z17.b\n"
- ".inst 0x44844308 // smlalb z8.s, p4/M, z24.h, z4.h\n"
+ ".inst 0x44834708 // smlalt z8.s, p4/M, z24.h, z3.h\n"
+ "ld1sb { z3.h }, p4/Z, [x1, #2, MUL VL]\n"
+ ".inst 0x448442ef // smlalb z15.s, p4/M, z23.h, z4.h\n"
+ ".inst 0x448446f2 // smlalt z18.s, p4/M, z23.h, z4.h\n"
+ "ld1b { z23.h }, p3/Z, [x9, x2]\n"
+ ".inst 0x44844395 // smlalb z21.s, p4/M, z28.h, z4.h\n"
+ ".inst 0x454e1063 // ssublb z3.h, z3.b, z14.b\n"
+ ".inst 0x4484478d // smlalt z13.s, p4/M, z28.h, z4.h\n"
+ "ld1b { z28.h }, p3/Z, [x26, x2]\n"
+ ".inst 0x45491af7 // usublb z23.h, z23.b, z9.b\n"
+ ".inst 0x44844305 // smlalb z5.s, p4/M, z24.h, z4.h\n"
+ ".inst 0x44844707 // smlalt z7.s, p4/M, z24.h, z4.h\n"
+ ".inst 0x45491b9c // usublb z28.h, z28.b, z9.b\n"
".inst 0x448442c6 // smlalb z6.s, p4/M, z22.h, z4.h\n"
- ".inst 0x448047ea // smlalt z10.s, p4/M, z31.h, z0.h\n"
- ".inst 0x44844787 // smlalt z7.s, p4/M, z28.h, z4.h\n"
- "ld1b { z31.h }, p3/Z, [x9, x0]\n"
- ".inst 0x454f1bff // usublb z31.h, z31.b, z15.b\n"
- ".inst 0x44844710 // smlalt z16.s, p4/M, z24.h, z4.h\n"
- ".inst 0x448143ce // smlalb z14.s, p4/M, z30.h, z1.h\n"
- "ld1b { z28.h }, p3/Z, [x27, x0]\n"
- ".inst 0x454f1b9c // usublb z28.h, z28.b, z15.b\n"
- ".inst 0x448446c5 // smlalt z5.s, p4/M, z22.h, z4.h\n"
- ".inst 0x448043d4 // smlalb z20.s, p4/M, z30.h, z0.h\n"
- "ld1sb { z4.h }, p4/Z, [x2, #3, MUL VL]\n"
- ".inst 0x45511084 // ssublb z4.h, z4.b, z17.b\n"
- ".inst 0x44804368 // smlalb z8.s, p4/M, z27.h, z0.h\n"
+ ".inst 0x448446c8 // smlalt z8.s, p4/M, z22.h, z4.h\n"
+ "ld1sb { z4.h }, p4/Z, [x1, #3, MUL VL]\n"
+ ".inst 0x448043ef // smlalb z15.s, p4/M, z31.h, z0.h\n"
+ ".inst 0x448047f2 // smlalt z18.s, p4/M, z31.h, z0.h\n"
+ "ld1b { z31.h }, p3/Z, [x28, x2]\n"
+ ".inst 0x448043d5 // smlalb z21.s, p4/M, z30.h, z0.h\n"
+ ".inst 0x454e1084 // ssublb z4.h, z4.b, z14.b\n"
+ ".inst 0x448047cd // smlalt z13.s, p4/M, z30.h, z0.h\n"
+ ".inst 0x45491bff // usublb z31.h, z31.b, z9.b\n"
+ ".inst 0x44804365 // smlalb z5.s, p4/M, z27.h, z0.h\n"
+ ".inst 0x44804767 // smlalt z7.s, p4/M, z27.h, z0.h\n"
".inst 0x448042e6 // smlalb z6.s, p4/M, z23.h, z0.h\n"
- ".inst 0x448147ca // smlalt z10.s, p4/M, z30.h, z1.h\n"
- ".inst 0x448047c7 // smlalt z7.s, p4/M, z30.h, z0.h\n"
- "ld1b { z30.h }, p3/Z, [x28, x0]\n"
- ".inst 0x454f1bde // usublb z30.h, z30.b, z15.b\n"
- ".inst 0x44804770 // smlalt z16.s, p4/M, z27.h, z0.h\n"
- ".inst 0x4482434e // smlalb z14.s, p4/M, z26.h, z2.h\n"
- ".inst 0x448046e5 // smlalt z5.s, p4/M, z23.h, z0.h\n"
- ".inst 0x44814354 // smlalb z20.s, p4/M, z26.h, z1.h\n"
- "ld1sb { z0.h }, p4/Z, [x2, #4, MUL VL]\n"
- ".inst 0x45511000 // ssublb z0.h, z0.b, z17.b\n"
- ".inst 0x448142e8 // smlalb z8.s, p4/M, z23.h, z1.h\n"
+ ".inst 0x448046e8 // smlalt z8.s, p4/M, z23.h, z0.h\n"
+ "ld1sb { z0.h }, p4/Z, [x1, #4, MUL VL]\n"
+ ".inst 0x448143cf // smlalb z15.s, p4/M, z30.h, z1.h\n"
+ ".inst 0x448147d2 // smlalt z18.s, p4/M, z30.h, z1.h\n"
+ "ld1b { z30.h }, p3/Z, [x27, x2]\n"
+ ".inst 0x44814355 // smlalb z21.s, p4/M, z26.h, z1.h\n"
+ ".inst 0x454e1000 // ssublb z0.h, z0.b, z14.b\n"
+ ".inst 0x4481474d // smlalt z13.s, p4/M, z26.h, z1.h\n"
+ ".inst 0x45491bde // usublb z30.h, z30.b, z9.b\n"
+ ".inst 0x448142e5 // smlalb z5.s, p4/M, z23.h, z1.h\n"
+ ".inst 0x448146e7 // smlalt z7.s, p4/M, z23.h, z1.h\n"
".inst 0x448143e6 // smlalb z6.s, p4/M, z31.h, z1.h\n"
- ".inst 0x4482474a // smlalt z10.s, p4/M, z26.h, z2.h\n"
- ".inst 0x44814747 // smlalt z7.s, p4/M, z26.h, z1.h\n"
- "ld1b { z26.h }, p3/Z, [x26, x0]\n"
- ".inst 0x454f1b5a // usublb z26.h, z26.b, z15.b\n"
- ".inst 0x448146f0 // smlalt z16.s, p4/M, z23.h, z1.h\n"
- ".inst 0x4483432e // smlalb z14.s, p4/M, z25.h, z3.h\n"
- ".inst 0x448147e5 // smlalt z5.s, p4/M, z31.h, z1.h\n"
- ".inst 0x44824334 // smlalb z20.s, p4/M, z25.h, z2.h\n"
- "ld1sb { z1.h }, p4/Z, [x2, #5, MUL VL]\n"
- ".inst 0x45511021 // ssublb z1.h, z1.b, z17.b\n"
- ".inst 0x448243e8 // smlalb z8.s, p4/M, z31.h, z2.h\n"
+ ".inst 0x448147e8 // smlalt z8.s, p4/M, z31.h, z1.h\n"
+ "ld1sb { z1.h }, p4/Z, [x1, #5, MUL VL]\n"
+ ".inst 0x4482434f // smlalb z15.s, p4/M, z26.h, z2.h\n"
+ ".inst 0x44824752 // smlalt z18.s, p4/M, z26.h, z2.h\n"
+ "ld1b { z26.h }, p3/Z, [x25, x2]\n"
+ ".inst 0x44824335 // smlalb z21.s, p4/M, z25.h, z2.h\n"
+ ".inst 0x454e1021 // ssublb z1.h, z1.b, z14.b\n"
+ ".inst 0x4482472d // smlalt z13.s, p4/M, z25.h, z2.h\n"
+ ".inst 0x45491b5a // usublb z26.h, z26.b, z9.b\n"
+ ".inst 0x448243e5 // smlalb z5.s, p4/M, z31.h, z2.h\n"
+ ".inst 0x448247e7 // smlalt z7.s, p4/M, z31.h, z2.h\n"
".inst 0x448243c6 // smlalb z6.s, p4/M, z30.h, z2.h\n"
- ".inst 0x4483472a // smlalt z10.s, p4/M, z25.h, z3.h\n"
- ".inst 0x44824727 // smlalt z7.s, p4/M, z25.h, z2.h\n"
- "ld1b { z25.h }, p3/Z, [x25, x0]\n"
- ".inst 0x454f1b39 // usublb z25.h, z25.b, z15.b\n"
- ".inst 0x448247f0 // smlalt z16.s, p4/M, z31.h, z2.h\n"
- ".inst 0x4484430e // smlalb z14.s, p4/M, z24.h, z4.h\n"
- ".inst 0x448247c5 // smlalt z5.s, p4/M, z30.h, z2.h\n"
- ".inst 0x44834314 // smlalb z20.s, p4/M, z24.h, z3.h\n"
- "ld1sb { z2.h }, p4/Z, [x2, #6, MUL VL]\n"
- ".inst 0x45511042 // ssublb z2.h, z2.b, z17.b\n"
- ".inst 0x448343c8 // smlalb z8.s, p4/M, z30.h, z3.h\n"
+ ".inst 0x448247c8 // smlalt z8.s, p4/M, z30.h, z2.h\n"
+ "ld1sb { z2.h }, p4/Z, [x1, #6, MUL VL]\n"
+ ".inst 0x4483432f // smlalb z15.s, p4/M, z25.h, z3.h\n"
+ ".inst 0x44834732 // smlalt z18.s, p4/M, z25.h, z3.h\n"
+ "ld1b { z25.h }, p3/Z, [x24, x2]\n"
+ ".inst 0x44834315 // smlalb z21.s, p4/M, z24.h, z3.h\n"
+ ".inst 0x454e1042 // ssublb z2.h, z2.b, z14.b\n"
+ ".inst 0x4483470d // smlalt z13.s, p4/M, z24.h, z3.h\n"
+ ".inst 0x45491b39 // usublb z25.h, z25.b, z9.b\n"
+ ".inst 0x448343c5 // smlalb z5.s, p4/M, z30.h, z3.h\n"
+ ".inst 0x448347c7 // smlalt z7.s, p4/M, z30.h, z3.h\n"
".inst 0x44834386 // smlalb z6.s, p4/M, z28.h, z3.h\n"
- ".inst 0x4484470a // smlalt z10.s, p4/M, z24.h, z4.h\n"
- ".inst 0x44834707 // smlalt z7.s, p4/M, z24.h, z3.h\n"
- "ld1b { z24.h }, p3/Z, [x24, x0]\n"
- ".inst 0x454f1b18 // usublb z24.h, z24.b, z15.b\n"
- ".inst 0x448347d0 // smlalt z16.s, p4/M, z30.h, z3.h\n"
- ".inst 0x4480436e // smlalb z14.s, p4/M, z27.h, z0.h\n"
- ".inst 0x44834785 // smlalt z5.s, p4/M, z28.h, z3.h\n"
- ".inst 0x448442d4 // smlalb z20.s, p4/M, z22.h, z4.h\n"
- "ld1sb { z3.h }, p4/Z, [x2, #7, MUL VL]\n"
- "inch x2, ALL, MUL #8\n"
- ".inst 0x44844388 // smlalb z8.s, p4/M, z28.h, z4.h\n"
+ ".inst 0x44834788 // smlalt z8.s, p4/M, z28.h, z3.h\n"
+ "ld1sb { z3.h }, p4/Z, [x1, #7, MUL VL]\n"
+ "inch x1, ALL, MUL #8\n"
+ ".inst 0x4484430f // smlalb z15.s, p4/M, z24.h, z4.h\n"
+ ".inst 0x44844712 // smlalt z18.s, p4/M, z24.h, z4.h\n"
+ "ld1b { z24.h }, p3/Z, [x23, x2]\n"
+ ".inst 0x448442d5 // smlalb z21.s, p4/M, z22.h, z4.h\n"
+ ".inst 0x454e1063 // ssublb z3.h, z3.b, z14.b\n"
+ ".inst 0x448446cd // smlalt z13.s, p4/M, z22.h, z4.h\n"
+ ".inst 0x45491b18 // usublb z24.h, z24.b, z9.b\n"
+ ".inst 0x44844385 // smlalb z5.s, p4/M, z28.h, z4.h\n"
+ ".inst 0x44844787 // smlalt z7.s, p4/M, z28.h, z4.h\n"
".inst 0x44844346 // smlalb z6.s, p4/M, z26.h, z4.h\n"
- ".inst 0x45511063 // ssublb z3.h, z3.b, z17.b\n"
- ".inst 0x4480476a // smlalt z10.s, p4/M, z27.h, z0.h\n"
- ".inst 0x44844790 // smlalt z16.s, p4/M, z28.h, z4.h\n"
- "ld1b { z27.h }, p3/Z, [x23, x0]\n"
- ".inst 0x454f1b7b // usublb z27.h, z27.b, z15.b\n"
- ".inst 0x448142ee // smlalb z14.s, p4/M, z23.h, z1.h\n"
- ".inst 0x448446c7 // smlalt z7.s, p4/M, z22.h, z4.h\n"
- "ld1w { z18.s }, p1/Z, [x16, #1, MUL VL]\n"
- "addvl x16, x16, #2\n"
- ".inst 0x44844745 // smlalt z5.s, p4/M, z26.h, z4.h\n"
- ".inst 0x448042f4 // smlalb z20.s, p4/M, z23.h, z0.h\n"
- "ld1sb { z4.h }, p4/Z, [x2]\n"
- ".inst 0x45511084 // ssublb z4.h, z4.b, z17.b\n"
- ".inst 0x44804328 // smlalb z8.s, p4/M, z25.h, z0.h\n"
+ ".inst 0x44844748 // smlalt z8.s, p4/M, z26.h, z4.h\n"
+ "ld1sb { z4.h }, p4/Z, [x1]\n"
+ "inch x1\n"
+ ".inst 0x4480436f // smlalb z15.s, p4/M, z27.h, z0.h\n"
+ ".inst 0x44804772 // smlalt z18.s, p4/M, z27.h, z0.h\n"
+ "ld1b { z27.h }, p3/Z, [x22, x2]\n"
+ ".inst 0x448042f5 // smlalb z21.s, p4/M, z23.h, z0.h\n"
+ ".inst 0x454e1084 // ssublb z4.h, z4.b, z14.b\n"
+ ".inst 0x448046ed // smlalt z13.s, p4/M, z23.h, z0.h\n"
+ ".inst 0x45491b7b // usublb z27.h, z27.b, z9.b\n"
+ ".inst 0x44804325 // smlalb z5.s, p4/M, z25.h, z0.h\n"
+ ".inst 0x44804727 // smlalt z7.s, p4/M, z25.h, z0.h\n"
+ "ld1b { z25.h }, p3/Z, [x21, x2]\n"
".inst 0x44804306 // smlalb z6.s, p4/M, z24.h, z0.h\n"
- "inch x2\n"
- ".inst 0x448146ea // smlalt z10.s, p4/M, z23.h, z1.h\n"
- ".inst 0x44804730 // smlalt z16.s, p4/M, z25.h, z0.h\n"
- "ld1b { z25.h }, p3/Z, [x22, x0]\n"
- ".inst 0x454f1b39 // usublb z25.h, z25.b, z15.b\n"
- ".inst 0x448243ee // smlalb z14.s, p4/M, z31.h, z2.h\n"
- ".inst 0x448046e7 // smlalt z7.s, p4/M, z23.h, z0.h\n"
- "uzp1 z23.s, z19.s, z18.s\n"
- ".inst 0x44804705 // smlalt z5.s, p4/M, z24.h, z0.h\n"
- ".inst 0x448143f4 // smlalb z20.s, p4/M, z31.h, z1.h\n"
- "uzp2 z22.s, z19.s, z18.s\n"
- ".inst 0x44814308 // smlalb z8.s, p4/M, z24.h, z1.h\n"
+ ".inst 0x44804708 // smlalt z8.s, p4/M, z24.h, z0.h\n"
+ ".inst 0x448142ef // smlalb z15.s, p4/M, z23.h, z1.h\n"
+ ".inst 0x45491b39 // usublb z25.h, z25.b, z9.b\n"
+ ".inst 0x448146f2 // smlalt z18.s, p4/M, z23.h, z1.h\n"
+ ".inst 0x448143f5 // smlalb z21.s, p4/M, z31.h, z1.h\n"
+ ".inst 0x448147ed // smlalt z13.s, p4/M, z31.h, z1.h\n"
+ ".inst 0x44814305 // smlalb z5.s, p4/M, z24.h, z1.h\n"
+ ".inst 0x44814707 // smlalt z7.s, p4/M, z24.h, z1.h\n"
+ "ld1b { z24.h }, p3/Z, [x20, x2]\n"
".inst 0x44814366 // smlalb z6.s, p4/M, z27.h, z1.h\n"
- ".inst 0x448247ea // smlalt z10.s, p4/M, z31.h, z2.h\n"
- ".inst 0x44814710 // smlalt z16.s, p4/M, z24.h, z1.h\n"
- "ld1b { z24.h }, p3/Z, [x21, x0]\n"
- ".inst 0x454f1b18 // usublb z24.h, z24.b, z15.b\n"
- ".inst 0x448343ce // smlalb z14.s, p4/M, z30.h, z3.h\n"
- ".inst 0x448147e7 // smlalt z7.s, p4/M, z31.h, z1.h\n"
- ".inst 0x44814765 // smlalt z5.s, p4/M, z27.h, z1.h\n"
- ".inst 0x448243d4 // smlalb z20.s, p4/M, z30.h, z2.h\n"
- ".inst 0x44824368 // smlalb z8.s, p4/M, z27.h, z2.h\n"
+ ".inst 0x44814768 // smlalt z8.s, p4/M, z27.h, z1.h\n"
+ ".inst 0x448243ef // smlalb z15.s, p4/M, z31.h, z2.h\n"
+ ".inst 0x45491b18 // usublb z24.h, z24.b, z9.b\n"
+ ".inst 0x448247f2 // smlalt z18.s, p4/M, z31.h, z2.h\n"
+ ".inst 0x448243d5 // smlalb z21.s, p4/M, z30.h, z2.h\n"
+ ".inst 0x448247cd // smlalt z13.s, p4/M, z30.h, z2.h\n"
+ ".inst 0x44824365 // smlalb z5.s, p4/M, z27.h, z2.h\n"
+ ".inst 0x44824767 // smlalt z7.s, p4/M, z27.h, z2.h\n"
+ "ld1b { z27.h }, p3/Z, [x19, x2]\n"
+ "inch x2\n"
".inst 0x44824326 // smlalb z6.s, p4/M, z25.h, z2.h\n"
- ".inst 0x448347ca // smlalt z10.s, p4/M, z30.h, z3.h\n"
- ".inst 0x44824770 // smlalt z16.s, p4/M, z27.h, z2.h\n"
- "ld1b { z27.h }, p3/Z, [x20, x0]\n"
- ".inst 0x454f1b7b // usublb z27.h, z27.b, z15.b\n"
- ".inst 0x4484438e // smlalb z14.s, p4/M, z28.h, z4.h\n"
- ".inst 0x448247c7 // smlalt z7.s, p4/M, z30.h, z2.h\n"
- ".inst 0x04a975ce // sqrdmulh z14.s, z14.s, z9.s\n"
- "inch x0\n"
- ".inst 0x44824725 // smlalt z5.s, p4/M, z25.h, z2.h\n"
- ".inst 0x44834394 // smlalb z20.s, p4/M, z28.h, z3.h\n"
- "and z21.d, z14.d, z23.d\n"
- "mov x20, x0\n"
- ".inst 0x44834328 // smlalb z8.s, p4/M, z25.h, z3.h\n"
+ "whilelt p2.s, x2, x0\n"
+ ".inst 0x44824728 // smlalt z8.s, p4/M, z25.h, z2.h\n"
+ "mov x19, x2\n"
+ ".inst 0x448343cf // smlalb z15.s, p4/M, z30.h, z3.h\n"
+ "incw x19\n"
+ ".inst 0x45491b7b // usublb z27.h, z27.b, z9.b\n"
+ "whilelt p1.s, x19, x0\n"
+ ".inst 0x448347d2 // smlalt z18.s, p4/M, z30.h, z3.h\n"
+ "whilelt p3.h, x2, x0\n"
+ ".inst 0x44834395 // smlalb z21.s, p4/M, z28.h, z3.h\n"
+ ".inst 0x4483478d // smlalt z13.s, p4/M, z28.h, z3.h\n"
+ ".inst 0x44834325 // smlalb z5.s, p4/M, z25.h, z3.h\n"
+ ".inst 0x44834727 // smlalt z7.s, p4/M, z25.h, z3.h\n"
".inst 0x44834306 // smlalb z6.s, p4/M, z24.h, z3.h\n"
- "asr z21.s, z21.s, #0x1f\n"
- "incw x20\n"
- ".inst 0x4484478a // smlalt z10.s, p4/M, z28.h, z4.h\n"
- ".inst 0x44834787 // smlalt z7.s, p4/M, z28.h, z3.h\n"
- ".inst 0x04bd754a // sqrdmulh z10.s, z10.s, z29.s\n"
- "whilelt p2.s, x0, x1\n"
- ".inst 0x44834730 // smlalt z16.s, p4/M, z25.h, z3.h\n"
- ".inst 0x44834705 // smlalt z5.s, p4/M, z24.h, z3.h\n"
- "and z3.d, z10.d, z22.d\n"
- "whilelt p1.s, x20, x1\n"
- ".inst 0x44844354 // smlalb z20.s, p4/M, z26.h, z4.h\n"
- ".inst 0x44844308 // smlalb z8.s, p4/M, z24.h, z4.h\n"
- ".inst 0x04a97694 // sqrdmulh z20.s, z20.s, z9.s\n"
- "whilelt p3.h, x0, x1\n"
+ ".inst 0x44834708 // smlalt z8.s, p4/M, z24.h, z3.h\n"
+ ".inst 0x4484438f // smlalb z15.s, p4/M, z28.h, z4.h\n"
+ ".inst 0x44844792 // smlalt z18.s, p4/M, z28.h, z4.h\n"
+ ".inst 0x44844355 // smlalb z21.s, p4/M, z26.h, z4.h\n"
+ ".inst 0x4484474d // smlalt z13.s, p4/M, z26.h, z4.h\n"
+ ".inst 0x04aa75ef // sqrdmulh z15.s, z15.s, z10.s\n"
+ ".inst 0x04b47652 // sqrdmulh z18.s, z18.s, z20.s\n"
+ ".inst 0x04aa76b5 // sqrdmulh z21.s, z21.s, z10.s\n"
+ ".inst 0x04b475ad // sqrdmulh z13.s, z13.s, z20.s\n"
+ "and z28.d, z15.d, z29.d\n"
+ "and z26.d, z18.d, z19.d\n"
+ "and z16.d, z21.d, z29.d\n"
+ "asr z28.s, z28.s, #0x1f\n"
+ "asr z26.s, z26.s, #0x1f\n"
+ "asr z16.s, z16.s, #0x1f\n"
+ "sqadd z15.s, z15.s, z28.s\n"
+ "sqadd z18.s, z18.s, z26.s\n"
+ "sqadd z21.s, z21.s, z16.s\n"
+ "and z16.d, z13.d, z19.d\n"
+ ".inst 0x44844305 // smlalb z5.s, p4/M, z24.h, z4.h\n"
+ ".inst 0x44844707 // smlalt z7.s, p4/M, z24.h, z4.h\n"
+ "asr z16.s, z16.s, #0x1f\n"
".inst 0x44844366 // smlalb z6.s, p4/M, z27.h, z4.h\n"
- ".inst 0x44844747 // smlalt z7.s, p4/M, z26.h, z4.h\n"
- ".inst 0x04a97508 // sqrdmulh z8.s, z8.s, z9.s\n"
- ".inst 0x44844710 // smlalt z16.s, p4/M, z24.h, z4.h\n"
- ".inst 0x44844765 // smlalt z5.s, p4/M, z27.h, z4.h\n"
- ".inst 0x04a974c6 // sqrdmulh z6.s, z6.s, z9.s\n"
- "sqadd z14.s, z14.s, z21.s\n"
- "asr z3.s, z3.s, #0x1f\n"
- ".inst 0x448292ee // srshl z14.s, p4/M, z14.s, z23.s\n"
- "and z19.d, z20.d, z23.d\n"
- ".inst 0x04bd74e7 // sqrdmulh z7.s, z7.s, z29.s\n"
- "and z18.d, z8.d, z23.d\n"
- ".inst 0x04bd7610 // sqrdmulh z16.s, z16.s, z29.s\n"
- "and z21.d, z6.d, z23.d\n"
- ".inst 0x04bd74a5 // sqrdmulh z5.s, z5.s, z29.s\n"
- "sqadd z10.s, z10.s, z3.s\n"
- "asr z19.s, z19.s, #0x1f\n"
- ".inst 0x448292ca // srshl z10.s, p4/M, z10.s, z22.s\n"
- "and z1.d, z7.d, z22.d\n"
- "asr z18.s, z18.s, #0x1f\n"
- "and z2.d, z16.d, z22.d\n"
- "asr z21.s, z21.s, #0x1f\n"
- "and z3.d, z5.d, z22.d\n"
- "sqadd z20.s, z20.s, z19.s\n"
- ".inst 0x448292f4 // srshl z20.s, p4/M, z20.s, z23.s\n"
- "asr z1.s, z1.s, #0x1f\n"
- "sqadd z8.s, z8.s, z18.s\n"
- ".inst 0x448292e8 // srshl z8.s, p4/M, z8.s, z23.s\n"
- "asr z2.s, z2.s, #0x1f\n"
- "sqadd z6.s, z6.s, z21.s\n"
- ".inst 0x448292e6 // srshl z6.s, p4/M, z6.s, z23.s\n"
- "asr z3.s, z3.s, #0x1f\n"
- "sqadd z7.s, z7.s, z1.s\n"
- ".inst 0x448292c7 // srshl z7.s, p4/M, z7.s, z22.s\n"
- "sqadd z16.s, z16.s, z2.s\n"
- "sqadd z5.s, z5.s, z3.s\n"
- ".inst 0x448292d0 // srshl z16.s, p4/M, z16.s, z22.s\n"
- ".inst 0x448292c5 // srshl z5.s, p4/M, z5.s, z22.s\n"
- ".inst 0x453041ce // sqxtnb z14.h, z14.s\n"
- ".inst 0x45304294 // sqxtnb z20.h, z20.s\n"
- ".inst 0x45304108 // sqxtnb z8.h, z8.s\n"
- ".inst 0x453040c6 // sqxtnb z6.h, z6.s\n"
- ".inst 0x4530454e // sqxtnt z14.h, z10.s\n"
- ".inst 0x453044f4 // sqxtnt z20.h, z7.s\n"
- ".inst 0x45304608 // sqxtnt z8.h, z16.s\n"
- ".inst 0x453044a6 // sqxtnt z6.h, z5.s\n"
- "sqadd z14.h, z14.h, z12.h\n"
- "sqadd z20.h, z20.h, z12.h\n"
- "smax z14.h, p4/M, z14.h, z13.h\n"
- "smax z20.h, p4/M, z20.h, z13.h\n"
- "sqadd z8.h, z8.h, z12.h\n"
- "sqadd z6.h, z6.h, z12.h\n"
- "smax z8.h, p4/M, z8.h, z13.h\n"
- "smax z6.h, p4/M, z6.h, z13.h\n"
- "smin z14.h, p4/M, z14.h, z11.h\n"
- "smin z20.h, p4/M, z20.h, z11.h\n"
- "st1b { z14.h }, p0, [x3, x8]\n"
- "smin z8.h, p4/M, z8.h, z11.h\n"
- "smin z6.h, p4/M, z6.h, z11.h\n"
- "st1b { z20.h }, p0, [x4, x8]\n"
- "st1b { z8.h }, p0, [x5, x8]\n"
- "st1b { z6.h }, p0, [x6, x8]\n"
- "ld1w { z30.s }, p2/Z, [x14]\n"
- "ld1w { z16.s }, p1/Z, [x14, #1, MUL VL]\n"
- "uzp1 z14.s, z30.s, z16.s\n"
- "ld1sb { z0.h }, p4/Z, [x2]\n"
- "ld1sb { z1.h }, p4/Z, [x2, #1, MUL VL]\n"
- "uzp2 z10.s, z30.s, z16.s\n"
- "addvl x14, x14, #2\n"
- "ld1sb { z2.h }, p4/Z, [x2, #2, MUL VL]\n"
- "ld1sb { z3.h }, p4/Z, [x2, #3, MUL VL]\n"
- "inch x8\n"
- "str x14, [%x[params], %[offsetof_Params_bias]]\n"
- "ld1sb { z4.h }, p4/Z, [x2, #4, MUL VL]\n"
- "ldp x9, x28, [x7, #0x0]\n"
- "mov z20.d, z14.d\n"
- "mov z7.d, z10.d\n"
- "ldp x27, x26, [x7, #0x10]\n"
- "ldp x25, x24, [x7, #0x20]\n"
- "mov z8.d, z14.d\n"
- "mov z16.d, z10.d\n"
- "ldp x23, x22, [x7, #0x30]\n"
- "ldp x21, x20, [x7, #0x40]\n"
- "mov z6.d, z14.d\n"
- "mov z5.d, z10.d\n"
- "ld1b { z31.h }, p3/Z, [x9, x0]\n"
- "ld1b { z30.h }, p3/Z, [x28, x0]\n"
- ".inst 0x45511000 // ssublb z0.h, z0.b, z17.b\n"
- ".inst 0x45511021 // ssublb z1.h, z1.b, z17.b\n"
- "ld1b { z29.h }, p3/Z, [x27, x0]\n"
- "ld1b { z28.h }, p3/Z, [x26, x0]\n"
- ".inst 0x45511042 // ssublb z2.h, z2.b, z17.b\n"
- ".inst 0x45511063 // ssublb z3.h, z3.b, z17.b\n"
- "ld1b { z27.h }, p3/Z, [x25, x0]\n"
- "ld1b { z23.h }, p3/Z, [x24, x0]\n"
- ".inst 0x45511084 // ssublb z4.h, z4.b, z17.b\n"
- ".inst 0x454f1bff // usublb z31.h, z31.b, z15.b\n"
- "ld1b { z25.h }, p3/Z, [x23, x0]\n"
- "ld1b { z24.h }, p3/Z, [x22, x0]\n"
- ".inst 0x454f1bde // usublb z30.h, z30.b, z15.b\n"
- ".inst 0x454f1bbd // usublb z29.h, z29.b, z15.b\n"
- "ld1b { z26.h }, p3/Z, [x21, x0]\n"
- "ld1b { z22.h }, p3/Z, [x20, x0]\n"
- ".inst 0x454f1b9c // usublb z28.h, z28.b, z15.b\n"
- ".inst 0x454f1b7b // usublb z27.h, z27.b, z15.b\n"
- ".inst 0x454f1af7 // usublb z23.h, z23.b, z15.b\n"
- ".inst 0x454f1b39 // usublb z25.h, z25.b, z15.b\n"
- ".inst 0x454f1b18 // usublb z24.h, z24.b, z15.b\n"
- ".inst 0x454f1b5a // usublb z26.h, z26.b, z15.b\n"
- ".inst 0x454f1ad6 // usublb z22.h, z22.b, z15.b\n"
+ ".inst 0x04aa74a5 // sqrdmulh z5.s, z5.s, z10.s\n"
+ "sqadd z13.s, z13.s, z16.s\n"
+ ".inst 0x04b474e7 // sqrdmulh z7.s, z7.s, z20.s\n"
+ ".inst 0x04aa74c6 // sqrdmulh z6.s, z6.s, z10.s\n"
+ "and z16.d, z5.d, z29.d\n"
+ ".inst 0x44844768 // smlalt z8.s, p4/M, z27.h, z4.h\n"
+ "and z25.d, z7.d, z19.d\n"
+ "asr z16.s, z16.s, #0x1f\n"
+ "and z26.d, z6.d, z29.d\n"
+ "asr z25.s, z25.s, #0x1f\n"
+ "sqadd z5.s, z5.s, z16.s\n"
+ "asr z26.s, z26.s, #0x1f\n"
+ "sqadd z7.s, z7.s, z25.s\n"
+ ".inst 0x04b47508 // sqrdmulh z8.s, z8.s, z20.s\n"
+ "sqadd z6.s, z6.s, z26.s\n"
+ ".inst 0x448293af // srshl z15.s, p4/M, z15.s, z29.s\n"
+ ".inst 0x44829272 // srshl z18.s, p4/M, z18.s, z19.s\n"
+ "and z16.d, z8.d, z19.d\n"
+ ".inst 0x448293b5 // srshl z21.s, p4/M, z21.s, z29.s\n"
+ "add z15.s, z15.s, z17.s\n"
+ "add z18.s, z18.s, z17.s\n"
+ "asr z16.s, z16.s, #0x1f\n"
+ "add z21.s, z21.s, z17.s\n"
+ "smin z15.s, p4/M, z15.s, z11.s\n"
+ "sqadd z8.s, z8.s, z16.s\n"
+ "smin z18.s, p4/M, z18.s, z11.s\n"
+ "smin z21.s, p4/M, z21.s, z11.s\n"
+ "smax z15.s, p4/M, z15.s, z12.s\n"
+ ".inst 0x4482926d // srshl z13.s, p4/M, z13.s, z19.s\n"
+ "smax z18.s, p4/M, z18.s, z12.s\n"
+ "smax z21.s, p4/M, z21.s, z12.s\n"
+ ".inst 0x448293a5 // srshl z5.s, p4/M, z5.s, z29.s\n"
+ "add z13.s, z13.s, z17.s\n"
+ "trn1 z15.h, z15.h, z18.h\n"
+ "st1b { z15.h }, p0, [x7, x3]\n"
+ "add z5.s, z5.s, z17.s\n"
+ "smin z13.s, p4/M, z13.s, z11.s\n"
+ ".inst 0x44829267 // srshl z7.s, p4/M, z7.s, z19.s\n"
+ ".inst 0x448293a6 // srshl z6.s, p4/M, z6.s, z29.s\n"
+ "smin z5.s, p4/M, z5.s, z11.s\n"
+ "smax z13.s, p4/M, z13.s, z12.s\n"
+ "add z7.s, z7.s, z17.s\n"
+ "add z6.s, z6.s, z17.s\n"
+ "smax z5.s, p4/M, z5.s, z12.s\n"
+ "trn1 z21.h, z21.h, z13.h\n"
+ "st1b { z21.h }, p0, [x8, x3]\n"
+ "smin z7.s, p4/M, z7.s, z11.s\n"
+ "smin z6.s, p4/M, z6.s, z11.s\n"
+ ".inst 0x44829268 // srshl z8.s, p4/M, z8.s, z19.s\n"
+ "smax z7.s, p4/M, z7.s, z12.s\n"
+ "smax z6.s, p4/M, z6.s, z12.s\n"
+ "add z8.s, z8.s, z17.s\n"
+ "trn1 z5.h, z5.h, z7.h\n"
+ "st1b { z5.h }, p0, [x17, x3]\n"
+ "smin z8.s, p4/M, z8.s, z11.s\n"
+ "smax z8.s, p4/M, z8.s, z12.s\n"
+ "trn1 z6.h, z6.h, z8.h\n"
+ "st1b { z6.h }, p0, [x16, x3]\n"
+ "inch x3\n"
+ "ldr x19, [%x[params], %[offsetof_Params_bias]]\n"
+ "ld1w { z4.s }, p2/Z, [x19]\n"
+ "ld1w { z16.s }, p1/Z, [x19, #1, MUL VL]\n"
+ "uzp1 z15.s, z4.s, z16.s\n"
+ "addvl x19, x19, #2\n"
+ "str x19, [%x[params], %[offsetof_Params_bias]]\n"
+ "uzp2 z18.s, z4.s, z16.s\n"
+ "mov z21.d, z15.d\n"
+ "ld1sb { z0.h }, p4/Z, [x1]\n"
+ "mov z5.d, z15.d\n"
+ "ld1sb { z1.h }, p4/Z, [x1, #1, MUL VL]\n"
+ "mov z13.d, z18.d\n"
+ "ld1sb { z2.h }, p4/Z, [x1, #2, MUL VL]\n"
+ "mov z7.d, z18.d\n"
+ "ld1sb { z3.h }, p4/Z, [x1, #3, MUL VL]\n"
+ "mov z6.d, z15.d\n"
+ "ld1sb { z4.h }, p4/Z, [x1, #4, MUL VL]\n"
+ "mov z8.d, z18.d\n"
+ "ldp x28, x27, [x5, #0x0]\n"
+ ".inst 0x454e1000 // ssublb z0.h, z0.b, z14.b\n"
+ "ldp x26, x25, [x5, #0x10]\n"
+ ".inst 0x454e1021 // ssublb z1.h, z1.b, z14.b\n"
+ ".inst 0x454e1042 // ssublb z2.h, z2.b, z14.b\n"
+ "ldp x24, x23, [x5, #0x20]\n"
+ ".inst 0x454e1063 // ssublb z3.h, z3.b, z14.b\n"
+ ".inst 0x454e1084 // ssublb z4.h, z4.b, z14.b\n"
+ "ldp x22, x21, [x5, #0x30]\n"
+ "ldp x20, x19, [x5, #0x40]\n"
+ "ld1b { z31.h }, p3/Z, [x28, x2]\n"
+ ".inst 0x45491bff // usublb z31.h, z31.b, z9.b\n"
+ "ld1b { z30.h }, p3/Z, [x27, x2]\n"
+ "ld1b { z29.h }, p3/Z, [x26, x2]\n"
+ ".inst 0x45491bde // usublb z30.h, z30.b, z9.b\n"
+ "ld1b { z28.h }, p3/Z, [x25, x2]\n"
+ ".inst 0x45491bbd // usublb z29.h, z29.b, z9.b\n"
+ "ld1b { z27.h }, p3/Z, [x24, x2]\n"
+ "ld1b { z23.h }, p3/Z, [x23, x2]\n"
+ ".inst 0x45491b9c // usublb z28.h, z28.b, z9.b\n"
+ "ld1b { z25.h }, p3/Z, [x22, x2]\n"
+ "ld1b { z24.h }, p3/Z, [x21, x2]\n"
+ ".inst 0x45491b7b // usublb z27.h, z27.b, z9.b\n"
+ ".inst 0x45491af7 // usublb z23.h, z23.b, z9.b\n"
+ "ld1b { z26.h }, p3/Z, [x20, x2]\n"
+ "ld1b { z22.h }, p3/Z, [x19, x2]\n"
+ ".inst 0x45491b39 // usublb z25.h, z25.b, z9.b\n"
+ ".inst 0x45491b18 // usublb z24.h, z24.b, z9.b\n"
+ ".inst 0x45491b5a // usublb z26.h, z26.b, z9.b\n"
+ ".inst 0x45491ad6 // usublb z22.h, z22.b, z9.b\n"
"b.any 1b\n"
:
: [offsetof_Params_bias] "I" (offsetof(Params, bias)), [offsetof_Params_inptrs] "I" (offsetof(Params, inptrs)), [offsetof_Params_n_channels] "I" (offsetof(Params, n_channels)), [offsetof_Params_outptrs] "I" (offsetof(Params, outptrs)), [offsetof_Params_requant] "I" (offsetof(Params, requant)), [offsetof_Params_requant_muls] "I" (offsetof(Params, requant_muls)), [offsetof_Params_requant_shifts] "I" (offsetof(Params, requant_shifts)), [offsetof_Params_weights] "I" (offsetof(Params, weights)), [offsetof_Requantize32_a_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [params] "r" (&params)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst/generic.cpp
index 647103d3a4..4d71f94f1a 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -82,90 +82,90 @@ void a64_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst_impl(
pad_left, pad_top, pad_right, pad_bottom);
__asm__ __volatile__(
- "ldr d7, [%x[args], %[offsetof_rescale]]\n"
- "ldr x3, [%x[args], %[offsetof_n_channels]]\n"
- "cmp x3, #0x8\n"
- "mov x4, #0x0\n"
- "ldr x21, [%x[args], %[offsetof_outptrs]]\n"
- "ldr x20, [%x[args], %[offsetof_inptrs]]\n"
+ "ldr x4, [%x[args], %[offsetof_n_channels]]\n"
+ "ldr x20, [%x[args], %[offsetof_outptrs]]\n"
+ "cmp x4, #0x8\n"
"mov x5, #0x0\n"
- "ldp x6, x7, [x21, #0x0]\n"
- "ldp x8, x17, [x21, #0x10]\n"
- "ldp x16, x15, [x20, #0x0]\n"
- "ldp x14, x13, [x20, #0x10]\n"
- "ldp x12, x11, [x20, #0x20]\n"
- "ldp x10, x9, [x20, #0x30]\n"
- "ldp x28, x27, [x20, #0x40]\n"
- "ldp x26, x25, [x20, #0x50]\n"
- "ldp x24, x23, [x20, #0x60]\n"
- "ldp x22, x21, [x20, #0x70]\n"
+ "ldr x19, [%x[args], %[offsetof_inptrs]]\n"
+ "ldp x6, x7, [x20, #0x0]\n"
+ "mov x8, #0x0\n"
+ "ldp x17, x16, [x20, #0x10]\n"
+ "ldp x15, x14, [x19, #0x0]\n"
+ "ldp x13, x12, [x19, #0x10]\n"
+ "ldp x11, x10, [x19, #0x20]\n"
+ "ldp x9, x28, [x19, #0x30]\n"
+ "ldp x27, x26, [x19, #0x40]\n"
+ "ldp x25, x24, [x19, #0x50]\n"
+ "ldp x23, x22, [x19, #0x60]\n"
+ "ldp x21, x20, [x19, #0x70]\n"
+ "ldr d7, [%x[args], %[offsetof_rescale]]\n"
"blt 3f\n"
- "ldr q6, [x11, x4]\n"
- "ldr q5, [x10, x4]\n"
- "lsr x20, x3, #0x3\n"
- "sub x3, x3, x20, LSL #3\n"
- "ldr q4, [x27, x4]\n"
- "ldr q3, [x26, x4]\n"
- "subs x20, x20, #0x1\n"
- "ldr q2, [x15, x4]\n"
- "ldr q1, [x14, x4]\n"
- "ldr q0, [x12, x4]\n"
- "ldr q31, [x28, x4]\n"
- "ldr q30, [x9, x4]\n"
- "ldr q29, [x25, x4]\n"
- "ldr q28, [x23, x4]\n"
- "ldr q27, [x22, x4]\n"
- "ldr q26, [x16, x4]\n"
- "ldr q25, [x13, x4]\n"
- "ldr q24, [x24, x4]\n"
- "ldr q23, [x21, x4]\n"
- "add x4, x4, #0x10\n"
+ "lsr x19, x4, #0x3\n"
+ "sub x4, x4, x19, LSL #3\n"
+ "ldr q6, [x10, x5]\n"
+ "ldr q5, [x9, x5]\n"
+ "subs x19, x19, #0x1\n"
+ "ldr q4, [x26, x5]\n"
+ "ldr q3, [x25, x5]\n"
+ "ldr q2, [x14, x5]\n"
+ "ldr q1, [x13, x5]\n"
+ "ldr q0, [x11, x5]\n"
+ "ldr q31, [x27, x5]\n"
+ "ldr q30, [x28, x5]\n"
+ "ldr q29, [x24, x5]\n"
+ "ldr q28, [x22, x5]\n"
+ "ldr q27, [x21, x5]\n"
+ "ldr q26, [x15, x5]\n"
+ "ldr q25, [x12, x5]\n"
+ "ldr q24, [x23, x5]\n"
+ "ldr q23, [x20, x5]\n"
+ "add x5, x5, #0x10\n"
"beq 2f\n"
"1:" // Vector: Loop
"fadd v17.8h, v6.8h, v5.8h\n"
- "ldr q6, [x11, x4]\n"
- "ldr q5, [x10, x4]\n"
"fadd v16.8h, v4.8h, v3.8h\n"
- "ldr q4, [x27, x4]\n"
- "ldr q3, [x26, x4]\n"
+ "subs x19, x19, #0x1\n"
+ "ldr q6, [x10, x5]\n"
"fadd v19.8h, v17.8h, v16.8h\n"
"fadd v18.8h, v2.8h, v1.8h\n"
- "ldr q2, [x15, x4]\n"
- "ldr q1, [x14, x4]\n"
+ "ldr q5, [x9, x5]\n"
+ "ldr q4, [x26, x5]\n"
"fadd v17.8h, v0.8h, v31.8h\n"
"fadd v22.8h, v30.8h, v29.8h\n"
- "ldr q0, [x12, x4]\n"
- "ldr q31, [x28, x4]\n"
+ "ldr q3, [x25, x5]\n"
+ "ldr q2, [x14, x5]\n"
"fadd v16.8h, v28.8h, v27.8h\n"
"fadd v21.8h, v18.8h, v19.8h\n"
- "ldr q30, [x9, x4]\n"
- "ldr q29, [x25, x4]\n"
+ "ldr q1, [x13, x5]\n"
+ "ldr q0, [x11, x5]\n"
"fadd v20.8h, v16.8h, v19.8h\n"
"fadd v19.8h, v26.8h, v17.8h\n"
- "ldr q28, [x23, x4]\n"
- "ldr q27, [x22, x4]\n"
+ "ldr q31, [x27, x5]\n"
+ "ldr q30, [x28, x5]\n"
"fadd v18.8h, v25.8h, v22.8h\n"
"fadd v17.8h, v24.8h, v17.8h\n"
- "ldr q26, [x16, x4]\n"
- "ldr q25, [x13, x4]\n"
+ "ldr q29, [x24, x5]\n"
+ "ldr q28, [x22, x5]\n"
"fadd v16.8h, v23.8h, v22.8h\n"
- "fadd v19.8h, v21.8h, v19.8h\n"
- "ldr q24, [x24, x4]\n"
- "ldr q23, [x21, x4]\n"
- "fadd v18.8h, v21.8h, v18.8h\n"
+ "fadd v19.8h, v19.8h, v21.8h\n"
+ "ldr q27, [x21, x5]\n"
+ "ldr q26, [x15, x5]\n"
+ "fadd v18.8h, v18.8h, v21.8h\n"
"fadd v17.8h, v17.8h, v20.8h\n"
+ "ldr q25, [x12, x5]\n"
+ "ldr q24, [x23, x5]\n"
"fadd v16.8h, v16.8h, v20.8h\n"
- "subs x20, x20, #0x1\n"
"fmul v19.8h, v19.8h, v7.h[0]\n"
- "add x4, x4, #0x10\n"
+ "ldr q23, [x20, x5]\n"
+ "add x5, x5, #0x10\n"
"fmul v18.8h, v18.8h, v7.h[1]\n"
"fmul v17.8h, v17.8h, v7.h[2]\n"
- "str q19, [x6, x5]\n"
+ "str q19, [x6, x8]\n"
"fmul v16.8h, v16.8h, v7.h[3]\n"
- "str q18, [x7, x5]\n"
- "str q17, [x8, x5]\n"
- "str q16, [x17, x5]\n"
- "add x5, x5, #0x10\n"
+ "str q18, [x7, x8]\n"
+ "str q17, [x17, x8]\n"
+ "str q16, [x16, x8]\n"
+ "add x8, x8, #0x10\n"
"bgt 1b\n"
"2:" // Vector: Tail
"fadd v17.8h, v6.8h, v5.8h\n"
@@ -181,70 +181,70 @@ void a64_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst_impl(
"fadd v18.8h, v25.8h, v22.8h\n"
"fadd v17.8h, v24.8h, v17.8h\n"
"fadd v16.8h, v23.8h, v22.8h\n"
- "fadd v19.8h, v21.8h, v19.8h\n"
- "fadd v18.8h, v21.8h, v18.8h\n"
+ "fadd v19.8h, v19.8h, v21.8h\n"
+ "fadd v18.8h, v18.8h, v21.8h\n"
"fadd v17.8h, v17.8h, v20.8h\n"
"fadd v16.8h, v16.8h, v20.8h\n"
"fmul v19.8h, v19.8h, v7.h[0]\n"
- "str q19, [x6, x5]\n"
+ "str q19, [x6, x8]\n"
"fmul v18.8h, v18.8h, v7.h[1]\n"
"fmul v17.8h, v17.8h, v7.h[2]\n"
- "str q18, [x7, x5]\n"
+ "str q18, [x7, x8]\n"
"fmul v16.8h, v16.8h, v7.h[3]\n"
- "str q17, [x8, x5]\n"
- "str q16, [x17, x5]\n"
- "add x5, x5, #0x10\n"
- "cbz x3, 4f\n"
+ "str q17, [x17, x8]\n"
+ "str q16, [x16, x8]\n"
+ "add x8, x8, #0x10\n"
+ "cbz x4, 4f\n"
"3:" // Oddments
- "ldr h6, [x11, x4]\n"
- "ldr h5, [x10, x4]\n"
+ "ldr h6, [x10, x5]\n"
+ "ldr h5, [x9, x5]\n"
"fadd v17.8h, v6.8h, v5.8h\n"
- "subs x3, x3, #0x1\n"
- "ldr h4, [x27, x4]\n"
- "ldr h3, [x26, x4]\n"
+ "subs x4, x4, #0x1\n"
+ "ldr h4, [x26, x5]\n"
+ "ldr h3, [x25, x5]\n"
"fadd v16.8h, v4.8h, v3.8h\n"
"fadd v19.8h, v17.8h, v16.8h\n"
- "ldr h2, [x15, x4]\n"
- "ldr h1, [x14, x4]\n"
+ "ldr h2, [x14, x5]\n"
+ "ldr h1, [x13, x5]\n"
"fadd v18.8h, v2.8h, v1.8h\n"
"fadd v21.8h, v18.8h, v19.8h\n"
- "ldr h0, [x12, x4]\n"
- "ldr h31, [x28, x4]\n"
+ "ldr h0, [x11, x5]\n"
+ "ldr h31, [x27, x5]\n"
"fadd v17.8h, v0.8h, v31.8h\n"
- "ldr h30, [x9, x4]\n"
- "ldr h29, [x25, x4]\n"
+ "ldr h30, [x28, x5]\n"
+ "ldr h29, [x24, x5]\n"
"fadd v22.8h, v30.8h, v29.8h\n"
- "ldr h28, [x23, x4]\n"
- "ldr h27, [x22, x4]\n"
+ "ldr h28, [x22, x5]\n"
+ "ldr h27, [x21, x5]\n"
"fadd v16.8h, v28.8h, v27.8h\n"
"fadd v20.8h, v16.8h, v19.8h\n"
- "ldr h26, [x16, x4]\n"
- "ldr h25, [x13, x4]\n"
+ "ldr h26, [x15, x5]\n"
+ "ldr h25, [x12, x5]\n"
"fadd v19.8h, v26.8h, v17.8h\n"
"fadd v18.8h, v25.8h, v22.8h\n"
- "ldr h24, [x24, x4]\n"
- "ldr h23, [x21, x4]\n"
+ "ldr h24, [x23, x5]\n"
+ "ldr h23, [x20, x5]\n"
"fadd v17.8h, v24.8h, v17.8h\n"
"fadd v16.8h, v23.8h, v22.8h\n"
- "fadd v19.8h, v21.8h, v19.8h\n"
- "fadd v18.8h, v21.8h, v18.8h\n"
- "add x4, x4, #0x2\n"
+ "fadd v19.8h, v19.8h, v21.8h\n"
+ "fadd v18.8h, v18.8h, v21.8h\n"
+ "add x5, x5, #0x2\n"
"fadd v17.8h, v17.8h, v20.8h\n"
"fadd v16.8h, v16.8h, v20.8h\n"
"fmul v19.8h, v19.8h, v7.h[0]\n"
"fmul v18.8h, v18.8h, v7.h[1]\n"
- "str h19, [x6, x5]\n"
+ "str h19, [x6, x8]\n"
"fmul v17.8h, v17.8h, v7.h[2]\n"
"fmul v16.8h, v16.8h, v7.h[3]\n"
- "str h18, [x7, x5]\n"
- "str h17, [x8, x5]\n"
- "str h16, [x17, x5]\n"
- "add x5, x5, #0x2\n"
+ "str h18, [x7, x8]\n"
+ "str h17, [x17, x8]\n"
+ "str h16, [x16, x8]\n"
+ "add x8, x8, #0x2\n"
"bgt 3b\n"
"4:" // End
:
: [args] "r" (&args), [offsetof_inptrs] "I" (offsetof(KernelArgs, inptrs)), [offsetof_n_channels] "I" (offsetof(KernelArgs, n_channels)), [offsetof_outptrs] "I" (offsetof(KernelArgs, outptrs)), [offsetof_rescale] "I" (offsetof(KernelArgs, rescale_vals))
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp16_nhwc_avg_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp16_nhwc_avg_generic_depthfirst/generic.cpp
index 44adb4ffcf..fe6f4c20f4 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp16_nhwc_avg_generic_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp16_nhwc_avg_generic_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -43,306 +43,306 @@ void a64_fp16_nhwc_avg_generic_depthfirst_impl(
const auto rescale_value = static_cast<__fp16>(1.0f / static_cast<float>(window_cells));
__asm__ __volatile__(
- "ld1r { v9.8h }, [%x[rescale_ptr]]\n"
"cmp %x[n_channels], #0x20\n"
- "mov x9, #0x0\n"
- "mov x28, #0x10\n" // cntb _, ALL, #1
- "mov x27, #0x20\n" // cntb _, ALL, #2
- "mov x26, #0x30\n" // cntb _, ALL, #3
+ "ld1r { v7.8h }, [%x[rescale_ptr]]\n"
+ "mov x28, #0x0\n"
+ "mov x27, #0x10\n" // cntb _, ALL, #1
+ "mov x26, #0x20\n" // cntb _, ALL, #2
+ "mov x25, #0x30\n" // cntb _, ALL, #3
"blt 7f\n"
"1:" // 4-vectors of channels
- "lsr x25, %x[n_valid_cells], #0x2\n"
- "movi v8.16b, #0x0\n"
- "movi v7.16b, #0x0\n"
- "mov x20, %x[inptrs]\n"
+ "lsr x24, %x[n_valid_cells], #0x2\n"
"movi v6.16b, #0x0\n"
"movi v5.16b, #0x0\n"
- "cbz x25, 4f\n"
- "ldp x24, x23, [x20, #0x0]\n"
- "ldr q4, [x24, x9]\n"
- "subs x25, x25, #0x1\n"
- "ldr q3, [x23, x9]\n"
- "ldr q2, [x24, x28]\n"
- "ldr q1, [x23, x28]\n"
- "ldr q0, [x24, x27]\n"
- "ldr q31, [x23, x27]\n"
- "ldr q30, [x24, x26]\n"
- "ldr q29, [x23, x26]\n"
- "ldp x22, x21, [x20, #0x10]\n"
- "add x20, x20, #0x20\n"
- "ldr q28, [x22, x9]\n"
- "ldr q22, [x21, x9]\n"
- "ldr q27, [x22, x28]\n"
- "ldr q21, [x21, x28]\n"
- "ldr q26, [x22, x27]\n"
- "ldr q20, [x21, x27]\n"
- "ldr q25, [x22, x26]\n"
- "ldr q24, [x21, x26]\n"
+ "mov x19, %x[inptrs]\n"
+ "movi v4.16b, #0x0\n"
+ "movi v3.16b, #0x0\n"
+ "cbz x24, 4f\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "ldp x21, x20, [x19, #0x10]\n"
+ "subs x24, x24, #0x1\n"
+ "add x19, x19, #0x20\n"
+ "ldr q2, [x23, x28]\n"
+ "ldr q1, [x22, x28]\n"
+ "ldr q0, [x21, x28]\n"
+ "ldr q31, [x20, x28]\n"
+ "ldr q30, [x23, x27]\n"
+ "ldr q22, [x22, x27]\n"
+ "ldr q29, [x21, x27]\n"
+ "ldr q28, [x20, x27]\n"
+ "ldr q27, [x23, x26]\n"
+ "ldr q21, [x22, x26]\n"
+ "ldr q26, [x21, x26]\n"
+ "ldr q17, [x20, x26]\n"
+ "ldr q25, [x23, x25]\n"
+ "ldr q20, [x22, x25]\n"
+ "ldr q24, [x21, x25]\n"
+ "ldr q16, [x20, x25]\n"
"beq 3f\n"
"2:" // 4-vectors of channels: 4 inputs loop
- "fadd v23.8h, v4.8h, v3.8h\n"
- "fadd v19.8h, v28.8h, v22.8h\n"
- "ldp x24, x23, [x20, #0x0]\n"
- "ldr q4, [x24, x9]\n"
- "ldr q3, [x23, x9]\n"
- "fadd v22.8h, v2.8h, v1.8h\n"
- "ldr q2, [x24, x28]\n"
- "fadd v18.8h, v27.8h, v21.8h\n"
- "ldr q1, [x23, x28]\n"
- "fadd v21.8h, v0.8h, v31.8h\n"
- "ldr q0, [x24, x27]\n"
- "fadd v17.8h, v26.8h, v20.8h\n"
- "ldr q31, [x23, x27]\n"
- "fadd v20.8h, v30.8h, v29.8h\n"
- "ldr q30, [x24, x26]\n"
- "fadd v16.8h, v25.8h, v24.8h\n"
- "ldr q29, [x23, x26]\n"
+ "fadd v23.8h, v2.8h, v1.8h\n"
+ "fadd v19.8h, v0.8h, v31.8h\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "ldp x21, x20, [x19, #0x10]\n"
+ "fadd v22.8h, v30.8h, v22.8h\n"
+ "fadd v18.8h, v29.8h, v28.8h\n"
+ "subs x24, x24, #0x1\n"
+ "add x19, x19, #0x20\n"
+ "fadd v21.8h, v27.8h, v21.8h\n"
+ "fadd v17.8h, v26.8h, v17.8h\n"
+ "ldr q2, [x23, x28]\n"
+ "ldr q1, [x22, x28]\n"
+ "fadd v20.8h, v25.8h, v20.8h\n"
+ "fadd v16.8h, v24.8h, v16.8h\n"
+ "ldr q0, [x21, x28]\n"
+ "ldr q31, [x20, x28]\n"
"fadd v19.8h, v23.8h, v19.8h\n"
"fadd v18.8h, v22.8h, v18.8h\n"
- "ldp x22, x21, [x20, #0x10]\n"
- "ldr q28, [x22, x9]\n"
- "ldr q22, [x21, x9]\n"
+ "ldr q30, [x23, x27]\n"
+ "ldr q22, [x22, x27]\n"
"fadd v17.8h, v21.8h, v17.8h\n"
"fadd v16.8h, v20.8h, v16.8h\n"
- "ldr q27, [x22, x28]\n"
- "ldr q21, [x21, x28]\n"
- "subs x25, x25, #0x1\n"
- "fadd v8.8h, v8.8h, v19.8h\n"
- "ldr q26, [x22, x27]\n"
- "ldr q20, [x21, x27]\n"
- "fadd v7.8h, v7.8h, v18.8h\n"
- "fadd v6.8h, v6.8h, v17.8h\n"
- "ldr q25, [x22, x26]\n"
- "ldr q24, [x21, x26]\n"
- "fadd v5.8h, v5.8h, v16.8h\n"
- "add x20, x20, #0x20\n"
+ "ldr q29, [x21, x27]\n"
+ "ldr q28, [x20, x27]\n"
+ "fadd v6.8h, v6.8h, v19.8h\n"
+ "fadd v5.8h, v5.8h, v18.8h\n"
+ "ldr q27, [x23, x26]\n"
+ "ldr q21, [x22, x26]\n"
+ "fadd v4.8h, v4.8h, v17.8h\n"
+ "fadd v3.8h, v3.8h, v16.8h\n"
+ "ldr q26, [x21, x26]\n"
+ "ldr q17, [x20, x26]\n"
+ "ldr q25, [x23, x25]\n"
+ "ldr q20, [x22, x25]\n"
+ "ldr q24, [x21, x25]\n"
+ "ldr q16, [x20, x25]\n"
"bgt 2b\n"
"3:" // 4-vectors of channels: 4 inputs tail
- "fadd v23.8h, v4.8h, v3.8h\n"
- "fadd v19.8h, v28.8h, v22.8h\n"
- "fadd v22.8h, v2.8h, v1.8h\n"
- "fadd v18.8h, v27.8h, v21.8h\n"
- "fadd v21.8h, v0.8h, v31.8h\n"
- "fadd v17.8h, v26.8h, v20.8h\n"
- "fadd v20.8h, v30.8h, v29.8h\n"
- "fadd v16.8h, v25.8h, v24.8h\n"
+ "fadd v23.8h, v2.8h, v1.8h\n"
+ "fadd v19.8h, v0.8h, v31.8h\n"
+ "fadd v22.8h, v30.8h, v22.8h\n"
+ "fadd v18.8h, v29.8h, v28.8h\n"
+ "fadd v21.8h, v27.8h, v21.8h\n"
+ "fadd v17.8h, v26.8h, v17.8h\n"
+ "fadd v20.8h, v25.8h, v20.8h\n"
+ "fadd v16.8h, v24.8h, v16.8h\n"
"fadd v19.8h, v23.8h, v19.8h\n"
"fadd v18.8h, v22.8h, v18.8h\n"
"fadd v17.8h, v21.8h, v17.8h\n"
"fadd v16.8h, v20.8h, v16.8h\n"
- "fadd v8.8h, v8.8h, v19.8h\n"
- "fadd v7.8h, v7.8h, v18.8h\n"
- "fadd v6.8h, v6.8h, v17.8h\n"
- "fadd v5.8h, v5.8h, v16.8h\n"
+ "fadd v6.8h, v6.8h, v19.8h\n"
+ "fadd v5.8h, v5.8h, v18.8h\n"
+ "fadd v4.8h, v4.8h, v17.8h\n"
+ "fadd v3.8h, v3.8h, v16.8h\n"
"4:" // 4-vectors of channels: After loop
- "ands x21, %x[n_valid_cells], #0x3\n"
+ "ands x20, %x[n_valid_cells], #0x3\n"
"beq 6f\n"
"5:" // 4-vectors of channels: Single input loop
- "ldr x24, [x20], #0x8\n"
- "ldr q4, [x24, x9]\n"
- "subs x21, x21, #0x1\n"
- "fadd v8.8h, v8.8h, v4.8h\n"
- "ldr q2, [x24, x28]\n"
- "ldr q0, [x24, x27]\n"
- "fadd v7.8h, v7.8h, v2.8h\n"
- "fadd v6.8h, v6.8h, v0.8h\n"
- "ldr q30, [x24, x26]\n"
+ "ldr x23, [x19], #0x8\n"
+ "ldr q2, [x23, x28]\n"
+ "subs x20, x20, #0x1\n"
+ "fadd v6.8h, v6.8h, v2.8h\n"
+ "ldr q30, [x23, x27]\n"
+ "ldr q27, [x23, x26]\n"
"fadd v5.8h, v5.8h, v30.8h\n"
+ "fadd v4.8h, v4.8h, v27.8h\n"
+ "ldr q25, [x23, x25]\n"
+ "fadd v3.8h, v3.8h, v25.8h\n"
"bgt 5b\n"
"6:" // 4-vectors of channels: Single input loop: End
"sub %x[n_channels], %x[n_channels], #0x20\n"
"cmp %x[n_channels], #0x20\n"
- "fmul v8.8h, v8.8h, v9.8h\n"
- "fmul v7.8h, v7.8h, v9.8h\n"
- "fmul v6.8h, v6.8h, v9.8h\n"
- "fmul v5.8h, v5.8h, v9.8h\n"
- "str q8, [%x[outptr], x9]\n"
- "add x9, x9, #0x40\n"
- "str q7, [%x[outptr], x28]\n"
+ "fmul v6.8h, v6.8h, v7.8h\n"
+ "fmul v5.8h, v5.8h, v7.8h\n"
+ "fmul v4.8h, v4.8h, v7.8h\n"
+ "fmul v3.8h, v3.8h, v7.8h\n"
+ "str q6, [%x[outptr], x28]\n"
"add x28, x28, #0x40\n"
- "str q6, [%x[outptr], x27]\n"
+ "str q5, [%x[outptr], x27]\n"
"add x27, x27, #0x40\n"
- "str q5, [%x[outptr], x26]\n"
+ "str q4, [%x[outptr], x26]\n"
"add x26, x26, #0x40\n"
+ "str q3, [%x[outptr], x25]\n"
+ "add x25, x25, #0x40\n"
"bge 1b\n"
"cbz %x[n_channels], 31f\n"
"7:" // Single vector of channels
"cmp %x[n_channels], #0x8\n"
"blt 14f\n"
"8:" // Single vector of channels: Loop
- "lsr x25, %x[n_valid_cells], #0x2\n"
- "movi v8.16b, #0x0\n"
- "mov x20, %x[inptrs]\n"
- "cbz x25, 11f\n"
- "ldp x24, x23, [x20, #0x0]\n"
- "ldr q4, [x24, x9]\n"
- "subs x25, x25, #0x1\n"
- "ldr q3, [x23, x9]\n"
- "ldp x22, x21, [x20, #0x10]\n"
- "add x20, x20, #0x20\n"
- "ldr q28, [x22, x9]\n"
- "ldr q22, [x21, x9]\n"
+ "lsr x24, %x[n_valid_cells], #0x2\n"
+ "movi v6.16b, #0x0\n"
+ "mov x19, %x[inptrs]\n"
+ "cbz x24, 11f\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "ldp x21, x20, [x19, #0x10]\n"
+ "subs x24, x24, #0x1\n"
+ "add x19, x19, #0x20\n"
+ "ldr q2, [x23, x28]\n"
+ "ldr q1, [x22, x28]\n"
+ "ldr q0, [x21, x28]\n"
+ "ldr q31, [x20, x28]\n"
"beq 10f\n"
"9:" // Single vector of channels: Loop: 4 inputs loop
- "fadd v23.8h, v4.8h, v3.8h\n"
- "fadd v19.8h, v28.8h, v22.8h\n"
- "ldp x24, x23, [x20, #0x0]\n"
- "ldr q4, [x24, x9]\n"
- "ldr q3, [x23, x9]\n"
+ "fadd v23.8h, v2.8h, v1.8h\n"
+ "fadd v19.8h, v0.8h, v31.8h\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "ldp x21, x20, [x19, #0x10]\n"
"fadd v19.8h, v23.8h, v19.8h\n"
- "ldp x22, x21, [x20, #0x10]\n"
- "subs x25, x25, #0x1\n"
- "ldr q28, [x22, x9]\n"
- "ldr q22, [x21, x9]\n"
- "fadd v8.8h, v8.8h, v19.8h\n"
- "add x20, x20, #0x20\n"
+ "subs x24, x24, #0x1\n"
+ "fadd v6.8h, v6.8h, v19.8h\n"
+ "add x19, x19, #0x20\n"
+ "ldr q2, [x23, x28]\n"
+ "ldr q1, [x22, x28]\n"
+ "ldr q0, [x21, x28]\n"
+ "ldr q31, [x20, x28]\n"
"bgt 9b\n"
"10:" // Single vector of channels: Loop: 4 inputs tail
- "fadd v23.8h, v4.8h, v3.8h\n"
- "fadd v19.8h, v28.8h, v22.8h\n"
+ "fadd v23.8h, v2.8h, v1.8h\n"
+ "fadd v19.8h, v0.8h, v31.8h\n"
"fadd v19.8h, v23.8h, v19.8h\n"
- "fadd v8.8h, v8.8h, v19.8h\n"
+ "fadd v6.8h, v6.8h, v19.8h\n"
"11:" // Single vector of channels: Loop: After loop
- "ands x21, %x[n_valid_cells], #0x3\n"
+ "ands x20, %x[n_valid_cells], #0x3\n"
"beq 13f\n"
"12:" // Single vector of channels: Loop: Single input loop
- "ldr x24, [x20], #0x8\n"
- "ldr q4, [x24, x9]\n"
- "subs x21, x21, #0x1\n"
- "fadd v8.8h, v8.8h, v4.8h\n"
+ "ldr x23, [x19], #0x8\n"
+ "ldr q2, [x23, x28]\n"
+ "subs x20, x20, #0x1\n"
+ "fadd v6.8h, v6.8h, v2.8h\n"
"bgt 12b\n"
"13:" // Single vector of channels: Loop: Single input loop: End
"sub %x[n_channels], %x[n_channels], #0x8\n"
"cmp %x[n_channels], #0x8\n"
- "fmul v8.8h, v8.8h, v9.8h\n"
- "str q8, [%x[outptr], x9]\n"
- "add x9, x9, #0x10\n"
+ "fmul v6.8h, v6.8h, v7.8h\n"
+ "str q6, [%x[outptr], x28]\n"
+ "add x28, x28, #0x10\n"
"bge 8b\n"
"cbz %x[n_channels], 31f\n"
"14:" // Oddments
- "lsr x25, %x[n_valid_cells], #0x2\n"
- "add %x[outptr], %x[outptr], x9\n"
- "movi v8.16b, #0x0\n"
- "mov x20, %x[inptrs]\n"
- "cbz x25, 20f\n"
+ "lsr x24, %x[n_valid_cells], #0x2\n"
+ "add %x[outptr], %x[outptr], x28\n"
+ "movi v6.16b, #0x0\n"
+ "mov x19, %x[inptrs]\n"
+ "cbz x24, 20f\n"
"15:" // Oddments: 4 inputs loop
- "ldp x24, x23, [x20, #0x0]\n"
- "ldp x22, x21, [x20, #0x10]\n"
- "add x20, x20, #0x20\n"
- "add x24, x24, x9\n"
- "add x23, x23, x9\n"
- "add x22, x22, x9\n"
- "movi v4.16b, #0x0\n"
- "movi v3.16b, #0x0\n"
- "add x21, x21, x9\n"
- "movi v28.16b, #0x0\n"
- "movi v22.16b, #0x0\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "ldp x21, x20, [x19, #0x10]\n"
+ "add x19, x19, #0x20\n"
+ "add x23, x23, x28\n"
+ "add x22, x22, x28\n"
+ "add x21, x21, x28\n"
+ "movi v2.16b, #0x0\n"
+ "movi v1.16b, #0x0\n"
+ "add x20, x20, x28\n"
+ "movi v0.16b, #0x0\n"
+ "movi v31.16b, #0x0\n"
"tbz %x[n_channels], #2, 17f\n"
- "ldr d4, [x24], #0x8\n"
- "ldr d3, [x23], #0x8\n"
- "ldr d28, [x22], #0x8\n"
- "ldr d22, [x21], #0x8\n"
+ "ldr d2, [x23], #0x8\n"
+ "ldr d1, [x22], #0x8\n"
+ "ldr d0, [x21], #0x8\n"
+ "ldr d31, [x20], #0x8\n"
"tbz %x[n_channels], #1, 16f\n"
- "ld1 { v4.s }[2], [x24], #0x4\n"
- "ld1 { v3.s }[2], [x23], #0x4\n"
- "ld1 { v28.s }[2], [x22], #0x4\n"
- "ld1 { v22.s }[2], [x21], #0x4\n"
+ "ld1 { v2.s }[2], [x23], #0x4\n"
+ "ld1 { v1.s }[2], [x22], #0x4\n"
+ "ld1 { v0.s }[2], [x21], #0x4\n"
+ "ld1 { v31.s }[2], [x20], #0x4\n"
"tbz %x[n_channels], #0, 19f\n"
- "ld1 { v4.h }[6], [x24], #0x2\n"
- "ld1 { v3.h }[6], [x23], #0x2\n"
- "ld1 { v28.h }[6], [x22], #0x2\n"
- "ld1 { v22.h }[6], [x21], #0x2\n"
+ "ld1 { v2.h }[6], [x23], #0x2\n"
+ "ld1 { v1.h }[6], [x22], #0x2\n"
+ "ld1 { v0.h }[6], [x21], #0x2\n"
+ "ld1 { v31.h }[6], [x20], #0x2\n"
"b 19f\n"
"16:" // Oddments: 4 inputs loop: Load: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 19f\n"
- "ld1 { v4.h }[4], [x24], #0x2\n"
- "ld1 { v3.h }[4], [x23], #0x2\n"
- "ld1 { v28.h }[4], [x22], #0x2\n"
- "ld1 { v22.h }[4], [x21], #0x2\n"
+ "ld1 { v2.h }[4], [x23], #0x2\n"
+ "ld1 { v1.h }[4], [x22], #0x2\n"
+ "ld1 { v0.h }[4], [x21], #0x2\n"
+ "ld1 { v31.h }[4], [x20], #0x2\n"
"b 19f\n"
"17:" // Oddments: 4 inputs loop: Load: Bit 2: Unset
"tbz %x[n_channels], #1, 18f\n"
- "ldr s4, [x24], #0x4\n"
- "ldr s3, [x23], #0x4\n"
- "ldr s28, [x22], #0x4\n"
- "ldr s22, [x21], #0x4\n"
+ "ldr s2, [x23], #0x4\n"
+ "ldr s1, [x22], #0x4\n"
+ "ldr s0, [x21], #0x4\n"
+ "ldr s31, [x20], #0x4\n"
"tbz %x[n_channels], #0, 19f\n"
- "ld1 { v4.h }[2], [x24], #0x2\n"
- "ld1 { v3.h }[2], [x23], #0x2\n"
- "ld1 { v28.h }[2], [x22], #0x2\n"
- "ld1 { v22.h }[2], [x21], #0x2\n"
+ "ld1 { v2.h }[2], [x23], #0x2\n"
+ "ld1 { v1.h }[2], [x22], #0x2\n"
+ "ld1 { v0.h }[2], [x21], #0x2\n"
+ "ld1 { v31.h }[2], [x20], #0x2\n"
"b 19f\n"
"18:" // Oddments: 4 inputs loop: Load: Bit 2: Unset: Bit 1: Unset
"tbz %x[n_channels], #0, 19f\n"
- "ldr h4, [x24], #0x2\n"
- "ldr h3, [x23], #0x2\n"
- "ldr h28, [x22], #0x2\n"
- "ldr h22, [x21], #0x2\n"
+ "ldr h2, [x23], #0x2\n"
+ "ldr h1, [x22], #0x2\n"
+ "ldr h0, [x21], #0x2\n"
+ "ldr h31, [x20], #0x2\n"
"19:" // Oddments: 4 inputs loop: Load: Bit 2: End
- "fadd v23.8h, v4.8h, v3.8h\n"
- "fadd v19.8h, v28.8h, v22.8h\n"
- "subs x25, x25, #0x1\n"
+ "fadd v23.8h, v2.8h, v1.8h\n"
+ "fadd v19.8h, v0.8h, v31.8h\n"
+ "subs x24, x24, #0x1\n"
"fadd v19.8h, v23.8h, v19.8h\n"
- "fadd v8.8h, v8.8h, v19.8h\n"
+ "fadd v6.8h, v6.8h, v19.8h\n"
"bgt 15b\n"
"20:" // Oddments: After loop
- "ands x21, %x[n_valid_cells], #0x3\n"
+ "ands x20, %x[n_valid_cells], #0x3\n"
"beq 26f\n"
"21:" // Oddments: Single input loop
- "ldr x24, [x20], #0x8\n"
- "add x24, x24, x9\n"
- "movi v4.16b, #0x0\n"
+ "ldr x23, [x19], #0x8\n"
+ "add x23, x23, x28\n"
+ "movi v2.16b, #0x0\n"
"tbz %x[n_channels], #2, 23f\n"
- "ldr d4, [x24], #0x8\n"
+ "ldr d2, [x23], #0x8\n"
"tbz %x[n_channels], #1, 22f\n"
- "ld1 { v4.s }[2], [x24], #0x4\n"
+ "ld1 { v2.s }[2], [x23], #0x4\n"
"tbz %x[n_channels], #0, 25f\n"
- "ld1 { v4.h }[6], [x24], #0x2\n"
+ "ld1 { v2.h }[6], [x23], #0x2\n"
"b 25f\n"
"22:" // Oddments: Single input loop: Load: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 25f\n"
- "ld1 { v4.h }[4], [x24], #0x2\n"
+ "ld1 { v2.h }[4], [x23], #0x2\n"
"b 25f\n"
"23:" // Oddments: Single input loop: Load: Bit 2: Unset
"tbz %x[n_channels], #1, 24f\n"
- "ldr s4, [x24], #0x4\n"
+ "ldr s2, [x23], #0x4\n"
"tbz %x[n_channels], #0, 25f\n"
- "ld1 { v4.h }[2], [x24], #0x2\n"
+ "ld1 { v2.h }[2], [x23], #0x2\n"
"b 25f\n"
"24:" // Oddments: Single input loop: Load: Bit 2: Unset: Bit 1: Unset
"tbz %x[n_channels], #0, 25f\n"
- "ldr h4, [x24], #0x2\n"
+ "ldr h2, [x23], #0x2\n"
"25:" // Oddments: Single input loop: Load: Bit 2: End
- "subs x21, x21, #0x1\n"
- "fadd v8.8h, v8.8h, v4.8h\n"
+ "subs x20, x20, #0x1\n"
+ "fadd v6.8h, v6.8h, v2.8h\n"
"bgt 21b\n"
"26:" // Oddments: Single input loop: End
- "fmul v8.8h, v8.8h, v9.8h\n"
+ "fmul v6.8h, v6.8h, v7.8h\n"
"tbz %x[n_channels], #2, 28f\n"
- "st1 { v8.d }[0], [%x[outptr]], #0x8\n"
+ "st1 { v6.d }[0], [%x[outptr]], #0x8\n"
"tbz %x[n_channels], #1, 27f\n"
- "st1 { v8.s }[2], [%x[outptr]], #0x4\n"
+ "st1 { v6.s }[2], [%x[outptr]], #0x4\n"
"tbz %x[n_channels], #0, 30f\n"
- "st1 { v8.h }[6], [%x[outptr]], #0x2\n"
+ "st1 { v6.h }[6], [%x[outptr]], #0x2\n"
"b 30f\n"
"27:" // Oddments: Store: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 30f\n"
- "st1 { v8.h }[4], [%x[outptr]], #0x2\n"
+ "st1 { v6.h }[4], [%x[outptr]], #0x2\n"
"b 30f\n"
"28:" // Oddments: Store: Bit 2: Unset
"tbz %x[n_channels], #1, 29f\n"
- "st1 { v8.s }[0], [%x[outptr]], #0x4\n"
+ "st1 { v6.s }[0], [%x[outptr]], #0x4\n"
"tbz %x[n_channels], #0, 30f\n"
- "st1 { v8.h }[2], [%x[outptr]], #0x2\n"
+ "st1 { v6.h }[2], [%x[outptr]], #0x2\n"
"b 30f\n"
"29:" // Oddments: Store: Bit 2: Unset: Bit 1: Unset
"tbz %x[n_channels], #0, 30f\n"
- "st1 { v8.h }[0], [%x[outptr]], #0x2\n"
+ "st1 { v6.h }[0], [%x[outptr]], #0x2\n"
"30:" // Oddments: Store: Bit 2: End
"31:" // End
: [n_channels] "+&r" (n_channels), [outptr] "+&r" (outptr)
: [inptrs] "r" (inptrs), [n_valid_cells] "r" (n_valid_cells), [rescale_ptr] "r" (&rescale_value)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp16_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp16_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
index 8041453cb1..b12d090e22 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp16_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp16_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -62,111 +62,111 @@ void a64_fp16_nhwc_max_2x2_s1_output2x2_depthfirst_impl(
pad_left, pad_top, pad_right, pad_bottom);
__asm__ __volatile__(
- "ldr x16, [%x[args], %[offsetof_n_channels]]\n"
- "ldr x21, [%x[args], %[offsetof_outptrs]]\n"
- "cmp x16, #0x8\n"
- "mov x15, #0x0\n"
- "ldr x20, [%x[args], %[offsetof_inptrs]]\n"
- "ldp x14, x13, [x21, #0x0]\n"
- "mov x12, #0x0\n"
- "ldp x11, x10, [x21, #0x10]\n"
- "ldp x9, x28, [x20, #0x0]\n"
- "ldp x27, x26, [x20, #0x10]\n"
- "ldp x25, x24, [x20, #0x20]\n"
- "ldp x23, x22, [x20, #0x30]\n"
- "ldr x21, [x20, #0x40]\n"
+ "ldr x15, [%x[args], %[offsetof_n_channels]]\n"
+ "ldr x20, [%x[args], %[offsetof_outptrs]]\n"
+ "cmp x15, #0x8\n"
+ "mov x14, #0x0\n"
+ "ldr x19, [%x[args], %[offsetof_inptrs]]\n"
+ "ldp x13, x12, [x20, #0x0]\n"
+ "mov x11, #0x0\n"
+ "ldp x10, x9, [x20, #0x10]\n"
+ "ldp x28, x27, [x19, #0x0]\n"
+ "ldp x26, x25, [x19, #0x10]\n"
+ "ldp x24, x23, [x19, #0x20]\n"
+ "ldp x22, x21, [x19, #0x30]\n"
+ "ldr x20, [x19, #0x40]\n"
"blt 3f\n"
- "ldr q30, [x28, x15]\n"
- "ldr q29, [x25, x15]\n"
- "lsr x20, x16, #0x3\n"
- "sub x16, x16, x20, LSL #3\n"
- "ldr q28, [x22, x15]\n"
- "ldr q27, [x26, x15]\n"
- "subs x20, x20, #0x1\n"
- "ldr q26, [x9, x15]\n"
- "ldr q25, [x27, x15]\n"
- "ldr q24, [x24, x15]\n"
- "ldr q23, [x23, x15]\n"
- "ldr q22, [x21, x15]\n"
- "add x15, x15, #0x10\n"
+ "lsr x19, x15, #0x3\n"
+ "sub x15, x15, x19, LSL #3\n"
+ "ldr q30, [x27, x14]\n"
+ "ldr q29, [x24, x14]\n"
+ "subs x19, x19, #0x1\n"
+ "ldr q28, [x21, x14]\n"
+ "ldr q27, [x25, x14]\n"
+ "ldr q26, [x28, x14]\n"
+ "ldr q25, [x23, x14]\n"
+ "ldr q24, [x26, x14]\n"
+ "ldr q23, [x22, x14]\n"
+ "ldr q22, [x20, x14]\n"
+ "add x14, x14, #0x10\n"
"beq 2f\n"
"1:" // Vector: Loop
"fmax v21.8h, v30.8h, v29.8h\n"
- "ldr q30, [x28, x15]\n"
"fmax v20.8h, v29.8h, v28.8h\n"
- "ldr q29, [x25, x15]\n"
- "ldr q28, [x22, x15]\n"
+ "subs x19, x19, #0x1\n"
+ "ldr q30, [x27, x14]\n"
"fmax v19.8h, v27.8h, v26.8h\n"
- "ldr q26, [x9, x15]\n"
"fmax v18.8h, v25.8h, v24.8h\n"
- "ldr q25, [x27, x15]\n"
- "fmax v17.8h, v27.8h, v23.8h\n"
- "ldr q27, [x26, x15]\n"
- "fmax v16.8h, v24.8h, v22.8h\n"
- "ldr q24, [x24, x15]\n"
- "ldr q23, [x23, x15]\n"
- "subs x20, x20, #0x1\n"
+ "ldr q29, [x24, x14]\n"
+ "ldr q28, [x21, x14]\n"
+ "fmax v17.8h, v23.8h, v27.8h\n"
+ "fmax v16.8h, v25.8h, v22.8h\n"
+ "ldr q27, [x25, x14]\n"
+ "ldr q26, [x28, x14]\n"
"fmax v19.8h, v21.8h, v19.8h\n"
- "ldr q22, [x21, x15]\n"
"fmax v18.8h, v18.8h, v21.8h\n"
- "fmax v17.8h, v17.8h, v20.8h\n"
- "add x15, x15, #0x10\n"
- "fmax v16.8h, v16.8h, v20.8h\n"
- "str q19, [x14, x12]\n"
- "str q18, [x13, x12]\n"
- "str q17, [x11, x12]\n"
- "str q16, [x10, x12]\n"
- "add x12, x12, #0x10\n"
+ "ldr q25, [x23, x14]\n"
+ "ldr q24, [x26, x14]\n"
+ "fmax v17.8h, v20.8h, v17.8h\n"
+ "fmax v16.8h, v20.8h, v16.8h\n"
+ "ldr q23, [x22, x14]\n"
+ "ldr q22, [x20, x14]\n"
+ "add x14, x14, #0x10\n"
+ "str q19, [x13, x11]\n"
+ "str q18, [x12, x11]\n"
+ "str q17, [x10, x11]\n"
+ "str q16, [x9, x11]\n"
+ "add x11, x11, #0x10\n"
"bgt 1b\n"
"2:" // Vector: Tail
"fmax v21.8h, v30.8h, v29.8h\n"
"fmax v20.8h, v29.8h, v28.8h\n"
"fmax v19.8h, v27.8h, v26.8h\n"
"fmax v18.8h, v25.8h, v24.8h\n"
- "fmax v17.8h, v27.8h, v23.8h\n"
- "fmax v16.8h, v24.8h, v22.8h\n"
+ "fmax v17.8h, v23.8h, v27.8h\n"
+ "fmax v16.8h, v25.8h, v22.8h\n"
"fmax v19.8h, v21.8h, v19.8h\n"
"fmax v18.8h, v18.8h, v21.8h\n"
- "str q19, [x14, x12]\n"
- "fmax v17.8h, v17.8h, v20.8h\n"
- "fmax v16.8h, v16.8h, v20.8h\n"
- "str q18, [x13, x12]\n"
- "str q17, [x11, x12]\n"
- "str q16, [x10, x12]\n"
- "add x12, x12, #0x10\n"
- "cbz x16, 4f\n"
+ "str q19, [x13, x11]\n"
+ "fmax v17.8h, v20.8h, v17.8h\n"
+ "fmax v16.8h, v20.8h, v16.8h\n"
+ "str q18, [x12, x11]\n"
+ "str q17, [x10, x11]\n"
+ "str q16, [x9, x11]\n"
+ "add x11, x11, #0x10\n"
+ "cbz x15, 4f\n"
"3:" // Oddments
- "ldr h30, [x28, x15]\n"
- "ldr h29, [x25, x15]\n"
+ "ldr h30, [x27, x14]\n"
+ "ldr h29, [x24, x14]\n"
"fmax v21.8h, v30.8h, v29.8h\n"
- "subs x16, x16, #0x1\n"
- "ldr h28, [x22, x15]\n"
- "ldr h27, [x26, x15]\n"
+ "subs x15, x15, #0x1\n"
+ "ldr h28, [x21, x14]\n"
+ "ldr h27, [x25, x14]\n"
"fmax v20.8h, v29.8h, v28.8h\n"
- "ldr h26, [x9, x15]\n"
- "ldr h25, [x27, x15]\n"
+ "ldr h26, [x28, x14]\n"
+ "ldr h25, [x23, x14]\n"
"fmax v19.8h, v27.8h, v26.8h\n"
"fmax v19.8h, v21.8h, v19.8h\n"
- "ldr h24, [x24, x15]\n"
- "ldr h23, [x23, x15]\n"
+ "ldr h24, [x26, x14]\n"
+ "ldr h23, [x22, x14]\n"
"fmax v18.8h, v25.8h, v24.8h\n"
- "fmax v17.8h, v27.8h, v23.8h\n"
- "ldr h22, [x21, x15]\n"
- "fmax v16.8h, v24.8h, v22.8h\n"
- "add x15, x15, #0x2\n"
+ "fmax v17.8h, v23.8h, v27.8h\n"
+ "ldr h22, [x20, x14]\n"
+ "fmax v16.8h, v25.8h, v22.8h\n"
+ "add x14, x14, #0x2\n"
"fmax v18.8h, v18.8h, v21.8h\n"
- "fmax v17.8h, v17.8h, v20.8h\n"
- "fmax v16.8h, v16.8h, v20.8h\n"
- "str h19, [x14, x12]\n"
- "str h18, [x13, x12]\n"
- "str h17, [x11, x12]\n"
- "str h16, [x10, x12]\n"
- "add x12, x12, #0x2\n"
+ "fmax v17.8h, v20.8h, v17.8h\n"
+ "fmax v16.8h, v20.8h, v16.8h\n"
+ "str h19, [x13, x11]\n"
+ "str h18, [x12, x11]\n"
+ "str h17, [x10, x11]\n"
+ "str h16, [x9, x11]\n"
+ "add x11, x11, #0x2\n"
"bgt 3b\n"
"4:" // End
:
: [args] "r" (&args), [offsetof_inptrs] "I" (offsetof(KernelArgs, inptrs)), [offsetof_n_channels] "I" (offsetof(KernelArgs, n_channels)), [offsetof_outptrs] "I" (offsetof(KernelArgs, outptrs))
- : "cc", "memory", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp16_nhwc_max_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp16_nhwc_max_generic_depthfirst/generic.cpp
index e4de9fb79c..f1eec31b98 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp16_nhwc_max_generic_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp16_nhwc_max_generic_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -41,301 +41,301 @@ void a64_fp16_nhwc_max_generic_depthfirst_impl(
{
__asm__ __volatile__(
"cmp %x[n_channels], #0x20\n"
- "mov x9, #0x0\n"
- "mov x28, #0x10\n" // cntb _, ALL, #1
- "mov x27, #0x20\n" // cntb _, ALL, #2
- "mov x26, #0x30\n" // cntb _, ALL, #3
+ "mov x28, #0x0\n"
+ "mov x27, #0x10\n" // cntb _, ALL, #1
+ "mov x26, #0x20\n" // cntb _, ALL, #2
+ "mov x25, #0x30\n" // cntb _, ALL, #3
"blt 7f\n"
"1:" // 4-vectors of channels
- "mov w20, #0xfc00\n"
- "lsr x25, %x[n_valid_cells], #0x2\n"
- "dup v8.8h, w20\n"
- "dup v7.8h, w20\n"
- "dup v6.8h, w20\n"
- "dup v5.8h, w20\n"
- "mov x20, %x[inptrs]\n"
- "cbz x25, 4f\n"
- "ldp x24, x23, [x20, #0x0]\n"
- "ldr q4, [x24, x9]\n"
- "subs x25, x25, #0x1\n"
- "ldr q3, [x23, x9]\n"
- "ldr q2, [x24, x28]\n"
- "ldr q1, [x23, x28]\n"
- "ldr q0, [x24, x27]\n"
- "ldr q31, [x23, x27]\n"
- "ldr q30, [x24, x26]\n"
- "ldr q29, [x23, x26]\n"
- "ldp x22, x21, [x20, #0x10]\n"
- "add x20, x20, #0x20\n"
- "ldr q28, [x22, x9]\n"
- "ldr q22, [x21, x9]\n"
- "ldr q27, [x22, x28]\n"
- "ldr q21, [x21, x28]\n"
- "ldr q26, [x22, x27]\n"
- "ldr q20, [x21, x27]\n"
- "ldr q25, [x22, x26]\n"
- "ldr q24, [x21, x26]\n"
+ "mov w19, #0xfc00\n"
+ "lsr x24, %x[n_valid_cells], #0x2\n"
+ "dup v6.8h, w19\n"
+ "dup v5.8h, w19\n"
+ "dup v4.8h, w19\n"
+ "dup v3.8h, w19\n"
+ "mov x19, %x[inptrs]\n"
+ "cbz x24, 4f\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "ldp x21, x20, [x19, #0x10]\n"
+ "subs x24, x24, #0x1\n"
+ "add x19, x19, #0x20\n"
+ "ldr q2, [x23, x28]\n"
+ "ldr q1, [x22, x28]\n"
+ "ldr q0, [x21, x28]\n"
+ "ldr q31, [x20, x28]\n"
+ "ldr q30, [x23, x27]\n"
+ "ldr q22, [x22, x27]\n"
+ "ldr q29, [x21, x27]\n"
+ "ldr q28, [x20, x27]\n"
+ "ldr q27, [x23, x26]\n"
+ "ldr q21, [x22, x26]\n"
+ "ldr q26, [x21, x26]\n"
+ "ldr q17, [x20, x26]\n"
+ "ldr q25, [x23, x25]\n"
+ "ldr q20, [x22, x25]\n"
+ "ldr q24, [x21, x25]\n"
+ "ldr q16, [x20, x25]\n"
"beq 3f\n"
"2:" // 4-vectors of channels: 4 inputs loop
- "fmax v23.8h, v4.8h, v3.8h\n"
- "fmax v19.8h, v28.8h, v22.8h\n"
- "ldp x24, x23, [x20, #0x0]\n"
- "ldr q4, [x24, x9]\n"
- "ldr q3, [x23, x9]\n"
- "fmax v22.8h, v2.8h, v1.8h\n"
- "ldr q2, [x24, x28]\n"
- "fmax v18.8h, v27.8h, v21.8h\n"
- "ldr q1, [x23, x28]\n"
- "fmax v21.8h, v0.8h, v31.8h\n"
- "ldr q0, [x24, x27]\n"
- "fmax v17.8h, v26.8h, v20.8h\n"
- "ldr q31, [x23, x27]\n"
- "fmax v20.8h, v30.8h, v29.8h\n"
- "ldr q30, [x24, x26]\n"
- "fmax v16.8h, v25.8h, v24.8h\n"
- "ldr q29, [x23, x26]\n"
+ "fmax v23.8h, v2.8h, v1.8h\n"
+ "fmax v19.8h, v0.8h, v31.8h\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "ldp x21, x20, [x19, #0x10]\n"
+ "fmax v22.8h, v30.8h, v22.8h\n"
+ "fmax v18.8h, v29.8h, v28.8h\n"
+ "subs x24, x24, #0x1\n"
+ "add x19, x19, #0x20\n"
+ "fmax v21.8h, v27.8h, v21.8h\n"
+ "fmax v17.8h, v26.8h, v17.8h\n"
+ "ldr q2, [x23, x28]\n"
+ "ldr q1, [x22, x28]\n"
+ "fmax v20.8h, v25.8h, v20.8h\n"
+ "fmax v16.8h, v24.8h, v16.8h\n"
+ "ldr q0, [x21, x28]\n"
+ "ldr q31, [x20, x28]\n"
"fmax v19.8h, v23.8h, v19.8h\n"
"fmax v18.8h, v22.8h, v18.8h\n"
- "ldp x22, x21, [x20, #0x10]\n"
- "ldr q28, [x22, x9]\n"
- "ldr q22, [x21, x9]\n"
+ "ldr q30, [x23, x27]\n"
+ "ldr q22, [x22, x27]\n"
"fmax v17.8h, v21.8h, v17.8h\n"
"fmax v16.8h, v20.8h, v16.8h\n"
- "ldr q27, [x22, x28]\n"
- "ldr q21, [x21, x28]\n"
- "subs x25, x25, #0x1\n"
- "fmax v8.8h, v8.8h, v19.8h\n"
- "ldr q26, [x22, x27]\n"
- "ldr q20, [x21, x27]\n"
- "fmax v7.8h, v7.8h, v18.8h\n"
- "fmax v6.8h, v6.8h, v17.8h\n"
- "ldr q25, [x22, x26]\n"
- "ldr q24, [x21, x26]\n"
- "fmax v5.8h, v5.8h, v16.8h\n"
- "add x20, x20, #0x20\n"
+ "ldr q29, [x21, x27]\n"
+ "ldr q28, [x20, x27]\n"
+ "fmax v6.8h, v6.8h, v19.8h\n"
+ "fmax v5.8h, v5.8h, v18.8h\n"
+ "ldr q27, [x23, x26]\n"
+ "ldr q21, [x22, x26]\n"
+ "fmax v4.8h, v4.8h, v17.8h\n"
+ "fmax v3.8h, v3.8h, v16.8h\n"
+ "ldr q26, [x21, x26]\n"
+ "ldr q17, [x20, x26]\n"
+ "ldr q25, [x23, x25]\n"
+ "ldr q20, [x22, x25]\n"
+ "ldr q24, [x21, x25]\n"
+ "ldr q16, [x20, x25]\n"
"bgt 2b\n"
"3:" // 4-vectors of channels: 4 inputs tail
- "fmax v23.8h, v4.8h, v3.8h\n"
- "fmax v19.8h, v28.8h, v22.8h\n"
- "fmax v22.8h, v2.8h, v1.8h\n"
- "fmax v18.8h, v27.8h, v21.8h\n"
- "fmax v21.8h, v0.8h, v31.8h\n"
- "fmax v17.8h, v26.8h, v20.8h\n"
- "fmax v20.8h, v30.8h, v29.8h\n"
- "fmax v16.8h, v25.8h, v24.8h\n"
+ "fmax v23.8h, v2.8h, v1.8h\n"
+ "fmax v19.8h, v0.8h, v31.8h\n"
+ "fmax v22.8h, v30.8h, v22.8h\n"
+ "fmax v18.8h, v29.8h, v28.8h\n"
+ "fmax v21.8h, v27.8h, v21.8h\n"
+ "fmax v17.8h, v26.8h, v17.8h\n"
+ "fmax v20.8h, v25.8h, v20.8h\n"
+ "fmax v16.8h, v24.8h, v16.8h\n"
"fmax v19.8h, v23.8h, v19.8h\n"
"fmax v18.8h, v22.8h, v18.8h\n"
"fmax v17.8h, v21.8h, v17.8h\n"
"fmax v16.8h, v20.8h, v16.8h\n"
- "fmax v8.8h, v8.8h, v19.8h\n"
- "fmax v7.8h, v7.8h, v18.8h\n"
- "fmax v6.8h, v6.8h, v17.8h\n"
- "fmax v5.8h, v5.8h, v16.8h\n"
+ "fmax v6.8h, v6.8h, v19.8h\n"
+ "fmax v5.8h, v5.8h, v18.8h\n"
+ "fmax v4.8h, v4.8h, v17.8h\n"
+ "fmax v3.8h, v3.8h, v16.8h\n"
"4:" // 4-vectors of channels: After loop
- "ands x21, %x[n_valid_cells], #0x3\n"
+ "ands x20, %x[n_valid_cells], #0x3\n"
"beq 6f\n"
"5:" // 4-vectors of channels: Single input loop
- "ldr x24, [x20], #0x8\n"
- "ldr q4, [x24, x9]\n"
- "subs x21, x21, #0x1\n"
- "fmax v8.8h, v8.8h, v4.8h\n"
- "ldr q2, [x24, x28]\n"
- "ldr q0, [x24, x27]\n"
- "fmax v7.8h, v7.8h, v2.8h\n"
- "fmax v6.8h, v6.8h, v0.8h\n"
- "ldr q30, [x24, x26]\n"
+ "ldr x23, [x19], #0x8\n"
+ "ldr q2, [x23, x28]\n"
+ "subs x20, x20, #0x1\n"
+ "fmax v6.8h, v6.8h, v2.8h\n"
+ "ldr q30, [x23, x27]\n"
+ "ldr q27, [x23, x26]\n"
"fmax v5.8h, v5.8h, v30.8h\n"
+ "fmax v4.8h, v4.8h, v27.8h\n"
+ "ldr q25, [x23, x25]\n"
+ "fmax v3.8h, v3.8h, v25.8h\n"
"bgt 5b\n"
"6:" // 4-vectors of channels: Single input loop: End
"sub %x[n_channels], %x[n_channels], #0x20\n"
"cmp %x[n_channels], #0x20\n"
- "str q8, [%x[outptr], x9]\n"
- "str q7, [%x[outptr], x28]\n"
- "add x9, x9, #0x40\n"
+ "str q6, [%x[outptr], x28]\n"
+ "str q5, [%x[outptr], x27]\n"
"add x28, x28, #0x40\n"
- "str q6, [%x[outptr], x27]\n"
"add x27, x27, #0x40\n"
- "str q5, [%x[outptr], x26]\n"
+ "str q4, [%x[outptr], x26]\n"
"add x26, x26, #0x40\n"
+ "str q3, [%x[outptr], x25]\n"
+ "add x25, x25, #0x40\n"
"bge 1b\n"
"cbz %x[n_channels], 31f\n"
"7:" // Single vector of channels
"cmp %x[n_channels], #0x8\n"
"blt 14f\n"
"8:" // Single vector of channels: Loop
- "mov w20, #0xfc00\n"
- "lsr x25, %x[n_valid_cells], #0x2\n"
- "dup v8.8h, w20\n"
- "mov x20, %x[inptrs]\n"
- "cbz x25, 11f\n"
- "ldp x24, x23, [x20, #0x0]\n"
- "ldr q4, [x24, x9]\n"
- "subs x25, x25, #0x1\n"
- "ldr q3, [x23, x9]\n"
- "ldp x22, x21, [x20, #0x10]\n"
- "add x20, x20, #0x20\n"
- "ldr q28, [x22, x9]\n"
- "ldr q22, [x21, x9]\n"
+ "mov w19, #0xfc00\n"
+ "lsr x24, %x[n_valid_cells], #0x2\n"
+ "dup v6.8h, w19\n"
+ "mov x19, %x[inptrs]\n"
+ "cbz x24, 11f\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "ldp x21, x20, [x19, #0x10]\n"
+ "subs x24, x24, #0x1\n"
+ "add x19, x19, #0x20\n"
+ "ldr q2, [x23, x28]\n"
+ "ldr q1, [x22, x28]\n"
+ "ldr q0, [x21, x28]\n"
+ "ldr q31, [x20, x28]\n"
"beq 10f\n"
"9:" // Single vector of channels: Loop: 4 inputs loop
- "fmax v23.8h, v4.8h, v3.8h\n"
- "fmax v19.8h, v28.8h, v22.8h\n"
- "ldp x24, x23, [x20, #0x0]\n"
- "ldr q4, [x24, x9]\n"
- "ldr q3, [x23, x9]\n"
+ "fmax v23.8h, v2.8h, v1.8h\n"
+ "fmax v19.8h, v0.8h, v31.8h\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "ldp x21, x20, [x19, #0x10]\n"
"fmax v19.8h, v23.8h, v19.8h\n"
- "ldp x22, x21, [x20, #0x10]\n"
- "subs x25, x25, #0x1\n"
- "ldr q28, [x22, x9]\n"
- "ldr q22, [x21, x9]\n"
- "fmax v8.8h, v8.8h, v19.8h\n"
- "add x20, x20, #0x20\n"
+ "subs x24, x24, #0x1\n"
+ "fmax v6.8h, v6.8h, v19.8h\n"
+ "add x19, x19, #0x20\n"
+ "ldr q2, [x23, x28]\n"
+ "ldr q1, [x22, x28]\n"
+ "ldr q0, [x21, x28]\n"
+ "ldr q31, [x20, x28]\n"
"bgt 9b\n"
"10:" // Single vector of channels: Loop: 4 inputs tail
- "fmax v23.8h, v4.8h, v3.8h\n"
- "fmax v19.8h, v28.8h, v22.8h\n"
+ "fmax v23.8h, v2.8h, v1.8h\n"
+ "fmax v19.8h, v0.8h, v31.8h\n"
"fmax v19.8h, v23.8h, v19.8h\n"
- "fmax v8.8h, v8.8h, v19.8h\n"
+ "fmax v6.8h, v6.8h, v19.8h\n"
"11:" // Single vector of channels: Loop: After loop
- "ands x21, %x[n_valid_cells], #0x3\n"
+ "ands x20, %x[n_valid_cells], #0x3\n"
"beq 13f\n"
"12:" // Single vector of channels: Loop: Single input loop
- "ldr x24, [x20], #0x8\n"
- "ldr q4, [x24, x9]\n"
- "subs x21, x21, #0x1\n"
- "fmax v8.8h, v8.8h, v4.8h\n"
+ "ldr x23, [x19], #0x8\n"
+ "ldr q2, [x23, x28]\n"
+ "subs x20, x20, #0x1\n"
+ "fmax v6.8h, v6.8h, v2.8h\n"
"bgt 12b\n"
"13:" // Single vector of channels: Loop: Single input loop: End
"sub %x[n_channels], %x[n_channels], #0x8\n"
"cmp %x[n_channels], #0x8\n"
- "str q8, [%x[outptr], x9]\n"
- "add x9, x9, #0x10\n"
+ "str q6, [%x[outptr], x28]\n"
+ "add x28, x28, #0x10\n"
"bge 8b\n"
"cbz %x[n_channels], 31f\n"
"14:" // Oddments
- "mov w20, #0xfc00\n"
- "lsr x25, %x[n_valid_cells], #0x2\n"
- "dup v8.8h, w20\n"
- "add %x[outptr], %x[outptr], x9\n"
- "mov x20, %x[inptrs]\n"
- "cbz x25, 20f\n"
+ "mov w19, #0xfc00\n"
+ "lsr x24, %x[n_valid_cells], #0x2\n"
+ "dup v6.8h, w19\n"
+ "add %x[outptr], %x[outptr], x28\n"
+ "mov x19, %x[inptrs]\n"
+ "cbz x24, 20f\n"
"15:" // Oddments: 4 inputs loop
- "ldp x24, x23, [x20, #0x0]\n"
- "ldp x22, x21, [x20, #0x10]\n"
- "add x20, x20, #0x20\n"
- "add x24, x24, x9\n"
- "add x23, x23, x9\n"
- "add x22, x22, x9\n"
- "movi v4.16b, #0x0\n"
- "movi v3.16b, #0x0\n"
- "add x21, x21, x9\n"
- "movi v28.16b, #0x0\n"
- "movi v22.16b, #0x0\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "ldp x21, x20, [x19, #0x10]\n"
+ "add x19, x19, #0x20\n"
+ "add x23, x23, x28\n"
+ "add x22, x22, x28\n"
+ "add x21, x21, x28\n"
+ "movi v2.16b, #0x0\n"
+ "movi v1.16b, #0x0\n"
+ "add x20, x20, x28\n"
+ "movi v0.16b, #0x0\n"
+ "movi v31.16b, #0x0\n"
"tbz %x[n_channels], #2, 17f\n"
- "ldr d4, [x24], #0x8\n"
- "ldr d3, [x23], #0x8\n"
- "ldr d28, [x22], #0x8\n"
- "ldr d22, [x21], #0x8\n"
+ "ldr d2, [x23], #0x8\n"
+ "ldr d1, [x22], #0x8\n"
+ "ldr d0, [x21], #0x8\n"
+ "ldr d31, [x20], #0x8\n"
"tbz %x[n_channels], #1, 16f\n"
- "ld1 { v4.s }[2], [x24], #0x4\n"
- "ld1 { v3.s }[2], [x23], #0x4\n"
- "ld1 { v28.s }[2], [x22], #0x4\n"
- "ld1 { v22.s }[2], [x21], #0x4\n"
+ "ld1 { v2.s }[2], [x23], #0x4\n"
+ "ld1 { v1.s }[2], [x22], #0x4\n"
+ "ld1 { v0.s }[2], [x21], #0x4\n"
+ "ld1 { v31.s }[2], [x20], #0x4\n"
"tbz %x[n_channels], #0, 19f\n"
- "ld1 { v4.h }[6], [x24], #0x2\n"
- "ld1 { v3.h }[6], [x23], #0x2\n"
- "ld1 { v28.h }[6], [x22], #0x2\n"
- "ld1 { v22.h }[6], [x21], #0x2\n"
+ "ld1 { v2.h }[6], [x23], #0x2\n"
+ "ld1 { v1.h }[6], [x22], #0x2\n"
+ "ld1 { v0.h }[6], [x21], #0x2\n"
+ "ld1 { v31.h }[6], [x20], #0x2\n"
"b 19f\n"
"16:" // Oddments: 4 inputs loop: Load: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 19f\n"
- "ld1 { v4.h }[4], [x24], #0x2\n"
- "ld1 { v3.h }[4], [x23], #0x2\n"
- "ld1 { v28.h }[4], [x22], #0x2\n"
- "ld1 { v22.h }[4], [x21], #0x2\n"
+ "ld1 { v2.h }[4], [x23], #0x2\n"
+ "ld1 { v1.h }[4], [x22], #0x2\n"
+ "ld1 { v0.h }[4], [x21], #0x2\n"
+ "ld1 { v31.h }[4], [x20], #0x2\n"
"b 19f\n"
"17:" // Oddments: 4 inputs loop: Load: Bit 2: Unset
"tbz %x[n_channels], #1, 18f\n"
- "ldr s4, [x24], #0x4\n"
- "ldr s3, [x23], #0x4\n"
- "ldr s28, [x22], #0x4\n"
- "ldr s22, [x21], #0x4\n"
+ "ldr s2, [x23], #0x4\n"
+ "ldr s1, [x22], #0x4\n"
+ "ldr s0, [x21], #0x4\n"
+ "ldr s31, [x20], #0x4\n"
"tbz %x[n_channels], #0, 19f\n"
- "ld1 { v4.h }[2], [x24], #0x2\n"
- "ld1 { v3.h }[2], [x23], #0x2\n"
- "ld1 { v28.h }[2], [x22], #0x2\n"
- "ld1 { v22.h }[2], [x21], #0x2\n"
+ "ld1 { v2.h }[2], [x23], #0x2\n"
+ "ld1 { v1.h }[2], [x22], #0x2\n"
+ "ld1 { v0.h }[2], [x21], #0x2\n"
+ "ld1 { v31.h }[2], [x20], #0x2\n"
"b 19f\n"
"18:" // Oddments: 4 inputs loop: Load: Bit 2: Unset: Bit 1: Unset
"tbz %x[n_channels], #0, 19f\n"
- "ldr h4, [x24], #0x2\n"
- "ldr h3, [x23], #0x2\n"
- "ldr h28, [x22], #0x2\n"
- "ldr h22, [x21], #0x2\n"
+ "ldr h2, [x23], #0x2\n"
+ "ldr h1, [x22], #0x2\n"
+ "ldr h0, [x21], #0x2\n"
+ "ldr h31, [x20], #0x2\n"
"19:" // Oddments: 4 inputs loop: Load: Bit 2: End
- "fmax v23.8h, v4.8h, v3.8h\n"
- "fmax v19.8h, v28.8h, v22.8h\n"
- "subs x25, x25, #0x1\n"
+ "fmax v23.8h, v2.8h, v1.8h\n"
+ "fmax v19.8h, v0.8h, v31.8h\n"
+ "subs x24, x24, #0x1\n"
"fmax v19.8h, v23.8h, v19.8h\n"
- "fmax v8.8h, v8.8h, v19.8h\n"
+ "fmax v6.8h, v6.8h, v19.8h\n"
"bgt 15b\n"
"20:" // Oddments: After loop
- "ands x21, %x[n_valid_cells], #0x3\n"
+ "ands x20, %x[n_valid_cells], #0x3\n"
"beq 26f\n"
"21:" // Oddments: Single input loop
- "ldr x24, [x20], #0x8\n"
- "add x24, x24, x9\n"
- "movi v4.16b, #0x0\n"
+ "ldr x23, [x19], #0x8\n"
+ "add x23, x23, x28\n"
+ "movi v2.16b, #0x0\n"
"tbz %x[n_channels], #2, 23f\n"
- "ldr d4, [x24], #0x8\n"
+ "ldr d2, [x23], #0x8\n"
"tbz %x[n_channels], #1, 22f\n"
- "ld1 { v4.s }[2], [x24], #0x4\n"
+ "ld1 { v2.s }[2], [x23], #0x4\n"
"tbz %x[n_channels], #0, 25f\n"
- "ld1 { v4.h }[6], [x24], #0x2\n"
+ "ld1 { v2.h }[6], [x23], #0x2\n"
"b 25f\n"
"22:" // Oddments: Single input loop: Load: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 25f\n"
- "ld1 { v4.h }[4], [x24], #0x2\n"
+ "ld1 { v2.h }[4], [x23], #0x2\n"
"b 25f\n"
"23:" // Oddments: Single input loop: Load: Bit 2: Unset
"tbz %x[n_channels], #1, 24f\n"
- "ldr s4, [x24], #0x4\n"
+ "ldr s2, [x23], #0x4\n"
"tbz %x[n_channels], #0, 25f\n"
- "ld1 { v4.h }[2], [x24], #0x2\n"
+ "ld1 { v2.h }[2], [x23], #0x2\n"
"b 25f\n"
"24:" // Oddments: Single input loop: Load: Bit 2: Unset: Bit 1: Unset
"tbz %x[n_channels], #0, 25f\n"
- "ldr h4, [x24], #0x2\n"
+ "ldr h2, [x23], #0x2\n"
"25:" // Oddments: Single input loop: Load: Bit 2: End
- "subs x21, x21, #0x1\n"
- "fmax v8.8h, v8.8h, v4.8h\n"
+ "subs x20, x20, #0x1\n"
+ "fmax v6.8h, v6.8h, v2.8h\n"
"bgt 21b\n"
"26:" // Oddments: Single input loop: End
"tbz %x[n_channels], #2, 28f\n"
- "st1 { v8.d }[0], [%x[outptr]], #0x8\n"
+ "st1 { v6.d }[0], [%x[outptr]], #0x8\n"
"tbz %x[n_channels], #1, 27f\n"
- "st1 { v8.s }[2], [%x[outptr]], #0x4\n"
+ "st1 { v6.s }[2], [%x[outptr]], #0x4\n"
"tbz %x[n_channels], #0, 30f\n"
- "st1 { v8.h }[6], [%x[outptr]], #0x2\n"
+ "st1 { v6.h }[6], [%x[outptr]], #0x2\n"
"b 30f\n"
"27:" // Oddments: Store: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 30f\n"
- "st1 { v8.h }[4], [%x[outptr]], #0x2\n"
+ "st1 { v6.h }[4], [%x[outptr]], #0x2\n"
"b 30f\n"
"28:" // Oddments: Store: Bit 2: Unset
"tbz %x[n_channels], #1, 29f\n"
- "st1 { v8.s }[0], [%x[outptr]], #0x4\n"
+ "st1 { v6.s }[0], [%x[outptr]], #0x4\n"
"tbz %x[n_channels], #0, 30f\n"
- "st1 { v8.h }[2], [%x[outptr]], #0x2\n"
+ "st1 { v6.h }[2], [%x[outptr]], #0x2\n"
"b 30f\n"
"29:" // Oddments: Store: Bit 2: Unset: Bit 1: Unset
"tbz %x[n_channels], #0, 30f\n"
- "st1 { v8.h }[0], [%x[outptr]], #0x2\n"
+ "st1 { v6.h }[0], [%x[outptr]], #0x2\n"
"30:" // Oddments: Store: Bit 2: End
"31:" // End
: [n_channels] "+&r" (n_channels), [outptr] "+&r" (outptr)
: [inptrs] "r" (inptrs), [n_valid_cells] "r" (n_valid_cells)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst/generic.cpp
index 9db65d62b0..fc0efc76ce 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -82,90 +82,90 @@ void a64_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst_impl(
pad_left, pad_top, pad_right, pad_bottom);
__asm__ __volatile__(
- "ldr q7, [%x[args], %[offsetof_rescale]]\n"
- "ldr x3, [%x[args], %[offsetof_n_channels]]\n"
- "cmp x3, #0x4\n"
- "mov x4, #0x0\n"
- "ldr x21, [%x[args], %[offsetof_outptrs]]\n"
- "ldr x20, [%x[args], %[offsetof_inptrs]]\n"
+ "ldr x4, [%x[args], %[offsetof_n_channels]]\n"
+ "ldr x20, [%x[args], %[offsetof_outptrs]]\n"
+ "cmp x4, #0x4\n"
"mov x5, #0x0\n"
- "ldp x6, x7, [x21, #0x0]\n"
- "ldp x8, x17, [x21, #0x10]\n"
- "ldp x16, x15, [x20, #0x0]\n"
- "ldp x14, x13, [x20, #0x10]\n"
- "ldp x12, x11, [x20, #0x20]\n"
- "ldp x10, x9, [x20, #0x30]\n"
- "ldp x28, x27, [x20, #0x40]\n"
- "ldp x26, x25, [x20, #0x50]\n"
- "ldp x24, x23, [x20, #0x60]\n"
- "ldp x22, x21, [x20, #0x70]\n"
+ "ldr x19, [%x[args], %[offsetof_inptrs]]\n"
+ "ldp x6, x7, [x20, #0x0]\n"
+ "mov x8, #0x0\n"
+ "ldp x17, x16, [x20, #0x10]\n"
+ "ldp x15, x14, [x19, #0x0]\n"
+ "ldp x13, x12, [x19, #0x10]\n"
+ "ldp x11, x10, [x19, #0x20]\n"
+ "ldp x9, x28, [x19, #0x30]\n"
+ "ldp x27, x26, [x19, #0x40]\n"
+ "ldp x25, x24, [x19, #0x50]\n"
+ "ldp x23, x22, [x19, #0x60]\n"
+ "ldp x21, x20, [x19, #0x70]\n"
+ "ldr q7, [%x[args], %[offsetof_rescale]]\n"
"blt 3f\n"
- "ldr q6, [x11, x4]\n"
- "ldr q5, [x10, x4]\n"
- "lsr x20, x3, #0x2\n"
- "sub x3, x3, x20, LSL #2\n"
- "ldr q4, [x27, x4]\n"
- "ldr q3, [x26, x4]\n"
- "subs x20, x20, #0x1\n"
- "ldr q2, [x15, x4]\n"
- "ldr q1, [x14, x4]\n"
- "ldr q0, [x12, x4]\n"
- "ldr q31, [x28, x4]\n"
- "ldr q30, [x9, x4]\n"
- "ldr q29, [x25, x4]\n"
- "ldr q28, [x23, x4]\n"
- "ldr q27, [x22, x4]\n"
- "ldr q26, [x16, x4]\n"
- "ldr q25, [x13, x4]\n"
- "ldr q24, [x24, x4]\n"
- "ldr q23, [x21, x4]\n"
- "add x4, x4, #0x10\n"
+ "lsr x19, x4, #0x2\n"
+ "sub x4, x4, x19, LSL #2\n"
+ "ldr q6, [x10, x5]\n"
+ "ldr q5, [x9, x5]\n"
+ "subs x19, x19, #0x1\n"
+ "ldr q4, [x26, x5]\n"
+ "ldr q3, [x25, x5]\n"
+ "ldr q2, [x14, x5]\n"
+ "ldr q1, [x13, x5]\n"
+ "ldr q0, [x11, x5]\n"
+ "ldr q31, [x27, x5]\n"
+ "ldr q30, [x28, x5]\n"
+ "ldr q29, [x24, x5]\n"
+ "ldr q28, [x22, x5]\n"
+ "ldr q27, [x21, x5]\n"
+ "ldr q26, [x15, x5]\n"
+ "ldr q25, [x12, x5]\n"
+ "ldr q24, [x23, x5]\n"
+ "ldr q23, [x20, x5]\n"
+ "add x5, x5, #0x10\n"
"beq 2f\n"
"1:" // Vector: Loop
"fadd v17.4s, v6.4s, v5.4s\n"
- "ldr q6, [x11, x4]\n"
- "ldr q5, [x10, x4]\n"
"fadd v16.4s, v4.4s, v3.4s\n"
- "ldr q4, [x27, x4]\n"
- "ldr q3, [x26, x4]\n"
+ "subs x19, x19, #0x1\n"
+ "ldr q6, [x10, x5]\n"
"fadd v19.4s, v17.4s, v16.4s\n"
"fadd v18.4s, v2.4s, v1.4s\n"
- "ldr q2, [x15, x4]\n"
- "ldr q1, [x14, x4]\n"
+ "ldr q5, [x9, x5]\n"
+ "ldr q4, [x26, x5]\n"
"fadd v17.4s, v0.4s, v31.4s\n"
"fadd v22.4s, v30.4s, v29.4s\n"
- "ldr q0, [x12, x4]\n"
- "ldr q31, [x28, x4]\n"
+ "ldr q3, [x25, x5]\n"
+ "ldr q2, [x14, x5]\n"
"fadd v16.4s, v28.4s, v27.4s\n"
"fadd v21.4s, v18.4s, v19.4s\n"
- "ldr q30, [x9, x4]\n"
- "ldr q29, [x25, x4]\n"
+ "ldr q1, [x13, x5]\n"
+ "ldr q0, [x11, x5]\n"
"fadd v20.4s, v16.4s, v19.4s\n"
"fadd v19.4s, v26.4s, v17.4s\n"
- "ldr q28, [x23, x4]\n"
- "ldr q27, [x22, x4]\n"
+ "ldr q31, [x27, x5]\n"
+ "ldr q30, [x28, x5]\n"
"fadd v18.4s, v25.4s, v22.4s\n"
"fadd v17.4s, v24.4s, v17.4s\n"
- "ldr q26, [x16, x4]\n"
- "ldr q25, [x13, x4]\n"
+ "ldr q29, [x24, x5]\n"
+ "ldr q28, [x22, x5]\n"
"fadd v16.4s, v23.4s, v22.4s\n"
- "fadd v19.4s, v21.4s, v19.4s\n"
- "ldr q24, [x24, x4]\n"
- "ldr q23, [x21, x4]\n"
- "fadd v18.4s, v21.4s, v18.4s\n"
+ "fadd v19.4s, v19.4s, v21.4s\n"
+ "ldr q27, [x21, x5]\n"
+ "ldr q26, [x15, x5]\n"
+ "fadd v18.4s, v18.4s, v21.4s\n"
"fadd v17.4s, v17.4s, v20.4s\n"
+ "ldr q25, [x12, x5]\n"
+ "ldr q24, [x23, x5]\n"
"fadd v16.4s, v16.4s, v20.4s\n"
- "subs x20, x20, #0x1\n"
"fmul v19.4s, v19.4s, v7.s[0]\n"
- "add x4, x4, #0x10\n"
+ "ldr q23, [x20, x5]\n"
+ "add x5, x5, #0x10\n"
"fmul v18.4s, v18.4s, v7.s[1]\n"
"fmul v17.4s, v17.4s, v7.s[2]\n"
- "str q19, [x6, x5]\n"
+ "str q19, [x6, x8]\n"
"fmul v16.4s, v16.4s, v7.s[3]\n"
- "str q18, [x7, x5]\n"
- "str q17, [x8, x5]\n"
- "str q16, [x17, x5]\n"
- "add x5, x5, #0x10\n"
+ "str q18, [x7, x8]\n"
+ "str q17, [x17, x8]\n"
+ "str q16, [x16, x8]\n"
+ "add x8, x8, #0x10\n"
"bgt 1b\n"
"2:" // Vector: Tail
"fadd v17.4s, v6.4s, v5.4s\n"
@@ -181,70 +181,70 @@ void a64_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst_impl(
"fadd v18.4s, v25.4s, v22.4s\n"
"fadd v17.4s, v24.4s, v17.4s\n"
"fadd v16.4s, v23.4s, v22.4s\n"
- "fadd v19.4s, v21.4s, v19.4s\n"
- "fadd v18.4s, v21.4s, v18.4s\n"
+ "fadd v19.4s, v19.4s, v21.4s\n"
+ "fadd v18.4s, v18.4s, v21.4s\n"
"fadd v17.4s, v17.4s, v20.4s\n"
"fadd v16.4s, v16.4s, v20.4s\n"
"fmul v19.4s, v19.4s, v7.s[0]\n"
- "str q19, [x6, x5]\n"
+ "str q19, [x6, x8]\n"
"fmul v18.4s, v18.4s, v7.s[1]\n"
"fmul v17.4s, v17.4s, v7.s[2]\n"
- "str q18, [x7, x5]\n"
+ "str q18, [x7, x8]\n"
"fmul v16.4s, v16.4s, v7.s[3]\n"
- "str q17, [x8, x5]\n"
- "str q16, [x17, x5]\n"
- "add x5, x5, #0x10\n"
- "cbz x3, 4f\n"
+ "str q17, [x17, x8]\n"
+ "str q16, [x16, x8]\n"
+ "add x8, x8, #0x10\n"
+ "cbz x4, 4f\n"
"3:" // Oddments
- "ldr s6, [x11, x4]\n"
- "ldr s5, [x10, x4]\n"
+ "ldr s6, [x10, x5]\n"
+ "ldr s5, [x9, x5]\n"
"fadd v17.4s, v6.4s, v5.4s\n"
- "subs x3, x3, #0x1\n"
- "ldr s4, [x27, x4]\n"
- "ldr s3, [x26, x4]\n"
+ "subs x4, x4, #0x1\n"
+ "ldr s4, [x26, x5]\n"
+ "ldr s3, [x25, x5]\n"
"fadd v16.4s, v4.4s, v3.4s\n"
"fadd v19.4s, v17.4s, v16.4s\n"
- "ldr s2, [x15, x4]\n"
- "ldr s1, [x14, x4]\n"
+ "ldr s2, [x14, x5]\n"
+ "ldr s1, [x13, x5]\n"
"fadd v18.4s, v2.4s, v1.4s\n"
"fadd v21.4s, v18.4s, v19.4s\n"
- "ldr s0, [x12, x4]\n"
- "ldr s31, [x28, x4]\n"
+ "ldr s0, [x11, x5]\n"
+ "ldr s31, [x27, x5]\n"
"fadd v17.4s, v0.4s, v31.4s\n"
- "ldr s30, [x9, x4]\n"
- "ldr s29, [x25, x4]\n"
+ "ldr s30, [x28, x5]\n"
+ "ldr s29, [x24, x5]\n"
"fadd v22.4s, v30.4s, v29.4s\n"
- "ldr s28, [x23, x4]\n"
- "ldr s27, [x22, x4]\n"
+ "ldr s28, [x22, x5]\n"
+ "ldr s27, [x21, x5]\n"
"fadd v16.4s, v28.4s, v27.4s\n"
"fadd v20.4s, v16.4s, v19.4s\n"
- "ldr s26, [x16, x4]\n"
- "ldr s25, [x13, x4]\n"
+ "ldr s26, [x15, x5]\n"
+ "ldr s25, [x12, x5]\n"
"fadd v19.4s, v26.4s, v17.4s\n"
"fadd v18.4s, v25.4s, v22.4s\n"
- "ldr s24, [x24, x4]\n"
- "ldr s23, [x21, x4]\n"
+ "ldr s24, [x23, x5]\n"
+ "ldr s23, [x20, x5]\n"
"fadd v17.4s, v24.4s, v17.4s\n"
"fadd v16.4s, v23.4s, v22.4s\n"
- "fadd v19.4s, v21.4s, v19.4s\n"
- "fadd v18.4s, v21.4s, v18.4s\n"
- "add x4, x4, #0x4\n"
+ "fadd v19.4s, v19.4s, v21.4s\n"
+ "fadd v18.4s, v18.4s, v21.4s\n"
+ "add x5, x5, #0x4\n"
"fadd v17.4s, v17.4s, v20.4s\n"
"fadd v16.4s, v16.4s, v20.4s\n"
"fmul v19.4s, v19.4s, v7.s[0]\n"
"fmul v18.4s, v18.4s, v7.s[1]\n"
- "str s19, [x6, x5]\n"
+ "str s19, [x6, x8]\n"
"fmul v17.4s, v17.4s, v7.s[2]\n"
"fmul v16.4s, v16.4s, v7.s[3]\n"
- "str s18, [x7, x5]\n"
- "str s17, [x8, x5]\n"
- "str s16, [x17, x5]\n"
- "add x5, x5, #0x4\n"
+ "str s18, [x7, x8]\n"
+ "str s17, [x17, x8]\n"
+ "str s16, [x16, x8]\n"
+ "add x8, x8, #0x4\n"
"bgt 3b\n"
"4:" // End
:
: [args] "r" (&args), [offsetof_inptrs] "I" (offsetof(KernelArgs, inptrs)), [offsetof_n_channels] "I" (offsetof(KernelArgs, n_channels)), [offsetof_outptrs] "I" (offsetof(KernelArgs, outptrs)), [offsetof_rescale] "I" (offsetof(KernelArgs, rescale_vals))
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp32_nhwc_avg_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp32_nhwc_avg_generic_depthfirst/generic.cpp
index 3f90610591..2d20164640 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp32_nhwc_avg_generic_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp32_nhwc_avg_generic_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -42,258 +42,258 @@ void a64_fp32_nhwc_avg_generic_depthfirst_impl(
const auto rescale_value = static_cast<float>(1.0f / static_cast<float>(window_cells));
__asm__ __volatile__(
- "ld1r { v9.4s }, [%x[rescale_ptr]]\n"
"cmp %x[n_channels], #0x10\n"
- "mov x9, #0x0\n"
- "mov x28, #0x10\n" // cntb _, ALL, #1
- "mov x27, #0x20\n" // cntb _, ALL, #2
- "mov x26, #0x30\n" // cntb _, ALL, #3
+ "ld1r { v7.4s }, [%x[rescale_ptr]]\n"
+ "mov x28, #0x0\n"
+ "mov x27, #0x10\n" // cntb _, ALL, #1
+ "mov x26, #0x20\n" // cntb _, ALL, #2
+ "mov x25, #0x30\n" // cntb _, ALL, #3
"blt 7f\n"
"1:" // 4-vectors of channels
- "lsr x25, %x[n_valid_cells], #0x2\n"
- "movi v8.16b, #0x0\n"
- "movi v7.16b, #0x0\n"
- "mov x20, %x[inptrs]\n"
+ "lsr x24, %x[n_valid_cells], #0x2\n"
"movi v6.16b, #0x0\n"
"movi v5.16b, #0x0\n"
- "cbz x25, 4f\n"
- "ldp x24, x23, [x20, #0x0]\n"
- "ldr q4, [x24, x9]\n"
- "subs x25, x25, #0x1\n"
- "ldr q3, [x23, x9]\n"
- "ldr q2, [x24, x28]\n"
- "ldr q1, [x23, x28]\n"
- "ldr q0, [x24, x27]\n"
- "ldr q31, [x23, x27]\n"
- "ldr q30, [x24, x26]\n"
- "ldr q29, [x23, x26]\n"
- "ldp x22, x21, [x20, #0x10]\n"
- "add x20, x20, #0x20\n"
- "ldr q28, [x22, x9]\n"
- "ldr q22, [x21, x9]\n"
- "ldr q27, [x22, x28]\n"
- "ldr q21, [x21, x28]\n"
- "ldr q26, [x22, x27]\n"
- "ldr q20, [x21, x27]\n"
- "ldr q25, [x22, x26]\n"
- "ldr q24, [x21, x26]\n"
+ "mov x19, %x[inptrs]\n"
+ "movi v4.16b, #0x0\n"
+ "movi v3.16b, #0x0\n"
+ "cbz x24, 4f\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "ldp x21, x20, [x19, #0x10]\n"
+ "subs x24, x24, #0x1\n"
+ "add x19, x19, #0x20\n"
+ "ldr q2, [x23, x28]\n"
+ "ldr q1, [x22, x28]\n"
+ "ldr q0, [x21, x28]\n"
+ "ldr q31, [x20, x28]\n"
+ "ldr q30, [x23, x27]\n"
+ "ldr q22, [x22, x27]\n"
+ "ldr q29, [x21, x27]\n"
+ "ldr q28, [x20, x27]\n"
+ "ldr q27, [x23, x26]\n"
+ "ldr q21, [x22, x26]\n"
+ "ldr q26, [x21, x26]\n"
+ "ldr q17, [x20, x26]\n"
+ "ldr q25, [x23, x25]\n"
+ "ldr q20, [x22, x25]\n"
+ "ldr q24, [x21, x25]\n"
+ "ldr q16, [x20, x25]\n"
"beq 3f\n"
"2:" // 4-vectors of channels: 4 inputs loop
- "fadd v23.4s, v4.4s, v3.4s\n"
- "fadd v19.4s, v28.4s, v22.4s\n"
- "ldp x24, x23, [x20, #0x0]\n"
- "ldr q4, [x24, x9]\n"
- "ldr q3, [x23, x9]\n"
- "fadd v22.4s, v2.4s, v1.4s\n"
- "ldr q2, [x24, x28]\n"
- "fadd v18.4s, v27.4s, v21.4s\n"
- "ldr q1, [x23, x28]\n"
- "fadd v21.4s, v0.4s, v31.4s\n"
- "ldr q0, [x24, x27]\n"
- "fadd v17.4s, v26.4s, v20.4s\n"
- "ldr q31, [x23, x27]\n"
- "fadd v20.4s, v30.4s, v29.4s\n"
- "ldr q30, [x24, x26]\n"
- "fadd v16.4s, v25.4s, v24.4s\n"
- "ldr q29, [x23, x26]\n"
+ "fadd v23.4s, v2.4s, v1.4s\n"
+ "fadd v19.4s, v0.4s, v31.4s\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "ldp x21, x20, [x19, #0x10]\n"
+ "fadd v22.4s, v30.4s, v22.4s\n"
+ "fadd v18.4s, v29.4s, v28.4s\n"
+ "subs x24, x24, #0x1\n"
+ "add x19, x19, #0x20\n"
+ "fadd v21.4s, v27.4s, v21.4s\n"
+ "fadd v17.4s, v26.4s, v17.4s\n"
+ "ldr q2, [x23, x28]\n"
+ "ldr q1, [x22, x28]\n"
+ "fadd v20.4s, v25.4s, v20.4s\n"
+ "fadd v16.4s, v24.4s, v16.4s\n"
+ "ldr q0, [x21, x28]\n"
+ "ldr q31, [x20, x28]\n"
"fadd v19.4s, v23.4s, v19.4s\n"
"fadd v18.4s, v22.4s, v18.4s\n"
- "ldp x22, x21, [x20, #0x10]\n"
- "ldr q28, [x22, x9]\n"
- "ldr q22, [x21, x9]\n"
+ "ldr q30, [x23, x27]\n"
+ "ldr q22, [x22, x27]\n"
"fadd v17.4s, v21.4s, v17.4s\n"
"fadd v16.4s, v20.4s, v16.4s\n"
- "ldr q27, [x22, x28]\n"
- "ldr q21, [x21, x28]\n"
- "subs x25, x25, #0x1\n"
- "fadd v8.4s, v8.4s, v19.4s\n"
- "ldr q26, [x22, x27]\n"
- "ldr q20, [x21, x27]\n"
- "fadd v7.4s, v7.4s, v18.4s\n"
- "fadd v6.4s, v6.4s, v17.4s\n"
- "ldr q25, [x22, x26]\n"
- "ldr q24, [x21, x26]\n"
- "fadd v5.4s, v5.4s, v16.4s\n"
- "add x20, x20, #0x20\n"
+ "ldr q29, [x21, x27]\n"
+ "ldr q28, [x20, x27]\n"
+ "fadd v6.4s, v6.4s, v19.4s\n"
+ "fadd v5.4s, v5.4s, v18.4s\n"
+ "ldr q27, [x23, x26]\n"
+ "ldr q21, [x22, x26]\n"
+ "fadd v4.4s, v4.4s, v17.4s\n"
+ "fadd v3.4s, v3.4s, v16.4s\n"
+ "ldr q26, [x21, x26]\n"
+ "ldr q17, [x20, x26]\n"
+ "ldr q25, [x23, x25]\n"
+ "ldr q20, [x22, x25]\n"
+ "ldr q24, [x21, x25]\n"
+ "ldr q16, [x20, x25]\n"
"bgt 2b\n"
"3:" // 4-vectors of channels: 4 inputs tail
- "fadd v23.4s, v4.4s, v3.4s\n"
- "fadd v19.4s, v28.4s, v22.4s\n"
- "fadd v22.4s, v2.4s, v1.4s\n"
- "fadd v18.4s, v27.4s, v21.4s\n"
- "fadd v21.4s, v0.4s, v31.4s\n"
- "fadd v17.4s, v26.4s, v20.4s\n"
- "fadd v20.4s, v30.4s, v29.4s\n"
- "fadd v16.4s, v25.4s, v24.4s\n"
+ "fadd v23.4s, v2.4s, v1.4s\n"
+ "fadd v19.4s, v0.4s, v31.4s\n"
+ "fadd v22.4s, v30.4s, v22.4s\n"
+ "fadd v18.4s, v29.4s, v28.4s\n"
+ "fadd v21.4s, v27.4s, v21.4s\n"
+ "fadd v17.4s, v26.4s, v17.4s\n"
+ "fadd v20.4s, v25.4s, v20.4s\n"
+ "fadd v16.4s, v24.4s, v16.4s\n"
"fadd v19.4s, v23.4s, v19.4s\n"
"fadd v18.4s, v22.4s, v18.4s\n"
"fadd v17.4s, v21.4s, v17.4s\n"
"fadd v16.4s, v20.4s, v16.4s\n"
- "fadd v8.4s, v8.4s, v19.4s\n"
- "fadd v7.4s, v7.4s, v18.4s\n"
- "fadd v6.4s, v6.4s, v17.4s\n"
- "fadd v5.4s, v5.4s, v16.4s\n"
+ "fadd v6.4s, v6.4s, v19.4s\n"
+ "fadd v5.4s, v5.4s, v18.4s\n"
+ "fadd v4.4s, v4.4s, v17.4s\n"
+ "fadd v3.4s, v3.4s, v16.4s\n"
"4:" // 4-vectors of channels: After loop
- "ands x21, %x[n_valid_cells], #0x3\n"
+ "ands x20, %x[n_valid_cells], #0x3\n"
"beq 6f\n"
"5:" // 4-vectors of channels: Single input loop
- "ldr x24, [x20], #0x8\n"
- "ldr q4, [x24, x9]\n"
- "subs x21, x21, #0x1\n"
- "fadd v8.4s, v8.4s, v4.4s\n"
- "ldr q2, [x24, x28]\n"
- "ldr q0, [x24, x27]\n"
- "fadd v7.4s, v7.4s, v2.4s\n"
- "fadd v6.4s, v6.4s, v0.4s\n"
- "ldr q30, [x24, x26]\n"
+ "ldr x23, [x19], #0x8\n"
+ "ldr q2, [x23, x28]\n"
+ "subs x20, x20, #0x1\n"
+ "fadd v6.4s, v6.4s, v2.4s\n"
+ "ldr q30, [x23, x27]\n"
+ "ldr q27, [x23, x26]\n"
"fadd v5.4s, v5.4s, v30.4s\n"
+ "fadd v4.4s, v4.4s, v27.4s\n"
+ "ldr q25, [x23, x25]\n"
+ "fadd v3.4s, v3.4s, v25.4s\n"
"bgt 5b\n"
"6:" // 4-vectors of channels: Single input loop: End
"sub %x[n_channels], %x[n_channels], #0x10\n"
"cmp %x[n_channels], #0x10\n"
- "fmul v8.4s, v8.4s, v9.4s\n"
- "fmul v7.4s, v7.4s, v9.4s\n"
- "fmul v6.4s, v6.4s, v9.4s\n"
- "fmul v5.4s, v5.4s, v9.4s\n"
- "str q8, [%x[outptr], x9]\n"
- "add x9, x9, #0x40\n"
- "str q7, [%x[outptr], x28]\n"
+ "fmul v6.4s, v6.4s, v7.4s\n"
+ "fmul v5.4s, v5.4s, v7.4s\n"
+ "fmul v4.4s, v4.4s, v7.4s\n"
+ "fmul v3.4s, v3.4s, v7.4s\n"
+ "str q6, [%x[outptr], x28]\n"
"add x28, x28, #0x40\n"
- "str q6, [%x[outptr], x27]\n"
+ "str q5, [%x[outptr], x27]\n"
"add x27, x27, #0x40\n"
- "str q5, [%x[outptr], x26]\n"
+ "str q4, [%x[outptr], x26]\n"
"add x26, x26, #0x40\n"
+ "str q3, [%x[outptr], x25]\n"
+ "add x25, x25, #0x40\n"
"bge 1b\n"
"cbz %x[n_channels], 25f\n"
"7:" // Single vector of channels
"cmp %x[n_channels], #0x4\n"
"blt 14f\n"
"8:" // Single vector of channels: Loop
- "lsr x25, %x[n_valid_cells], #0x2\n"
- "movi v8.16b, #0x0\n"
- "mov x20, %x[inptrs]\n"
- "cbz x25, 11f\n"
- "ldp x24, x23, [x20, #0x0]\n"
- "ldr q4, [x24, x9]\n"
- "subs x25, x25, #0x1\n"
- "ldr q3, [x23, x9]\n"
- "ldp x22, x21, [x20, #0x10]\n"
- "add x20, x20, #0x20\n"
- "ldr q28, [x22, x9]\n"
- "ldr q22, [x21, x9]\n"
+ "lsr x24, %x[n_valid_cells], #0x2\n"
+ "movi v6.16b, #0x0\n"
+ "mov x19, %x[inptrs]\n"
+ "cbz x24, 11f\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "ldp x21, x20, [x19, #0x10]\n"
+ "subs x24, x24, #0x1\n"
+ "add x19, x19, #0x20\n"
+ "ldr q2, [x23, x28]\n"
+ "ldr q1, [x22, x28]\n"
+ "ldr q0, [x21, x28]\n"
+ "ldr q31, [x20, x28]\n"
"beq 10f\n"
"9:" // Single vector of channels: Loop: 4 inputs loop
- "fadd v23.4s, v4.4s, v3.4s\n"
- "fadd v19.4s, v28.4s, v22.4s\n"
- "ldp x24, x23, [x20, #0x0]\n"
- "ldr q4, [x24, x9]\n"
- "ldr q3, [x23, x9]\n"
+ "fadd v23.4s, v2.4s, v1.4s\n"
+ "fadd v19.4s, v0.4s, v31.4s\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "ldp x21, x20, [x19, #0x10]\n"
"fadd v19.4s, v23.4s, v19.4s\n"
- "ldp x22, x21, [x20, #0x10]\n"
- "subs x25, x25, #0x1\n"
- "ldr q28, [x22, x9]\n"
- "ldr q22, [x21, x9]\n"
- "fadd v8.4s, v8.4s, v19.4s\n"
- "add x20, x20, #0x20\n"
+ "subs x24, x24, #0x1\n"
+ "fadd v6.4s, v6.4s, v19.4s\n"
+ "add x19, x19, #0x20\n"
+ "ldr q2, [x23, x28]\n"
+ "ldr q1, [x22, x28]\n"
+ "ldr q0, [x21, x28]\n"
+ "ldr q31, [x20, x28]\n"
"bgt 9b\n"
"10:" // Single vector of channels: Loop: 4 inputs tail
- "fadd v23.4s, v4.4s, v3.4s\n"
- "fadd v19.4s, v28.4s, v22.4s\n"
+ "fadd v23.4s, v2.4s, v1.4s\n"
+ "fadd v19.4s, v0.4s, v31.4s\n"
"fadd v19.4s, v23.4s, v19.4s\n"
- "fadd v8.4s, v8.4s, v19.4s\n"
+ "fadd v6.4s, v6.4s, v19.4s\n"
"11:" // Single vector of channels: Loop: After loop
- "ands x21, %x[n_valid_cells], #0x3\n"
+ "ands x20, %x[n_valid_cells], #0x3\n"
"beq 13f\n"
"12:" // Single vector of channels: Loop: Single input loop
- "ldr x24, [x20], #0x8\n"
- "ldr q4, [x24, x9]\n"
- "subs x21, x21, #0x1\n"
- "fadd v8.4s, v8.4s, v4.4s\n"
+ "ldr x23, [x19], #0x8\n"
+ "ldr q2, [x23, x28]\n"
+ "subs x20, x20, #0x1\n"
+ "fadd v6.4s, v6.4s, v2.4s\n"
"bgt 12b\n"
"13:" // Single vector of channels: Loop: Single input loop: End
"sub %x[n_channels], %x[n_channels], #0x4\n"
"cmp %x[n_channels], #0x4\n"
- "fmul v8.4s, v8.4s, v9.4s\n"
- "str q8, [%x[outptr], x9]\n"
- "add x9, x9, #0x10\n"
+ "fmul v6.4s, v6.4s, v7.4s\n"
+ "str q6, [%x[outptr], x28]\n"
+ "add x28, x28, #0x10\n"
"bge 8b\n"
"cbz %x[n_channels], 25f\n"
"14:" // Oddments
- "lsr x25, %x[n_valid_cells], #0x2\n"
- "add %x[outptr], %x[outptr], x9\n"
- "movi v8.16b, #0x0\n"
- "mov x20, %x[inptrs]\n"
- "cbz x25, 18f\n"
+ "lsr x24, %x[n_valid_cells], #0x2\n"
+ "add %x[outptr], %x[outptr], x28\n"
+ "movi v6.16b, #0x0\n"
+ "mov x19, %x[inptrs]\n"
+ "cbz x24, 18f\n"
"15:" // Oddments: 4 inputs loop
- "ldp x24, x23, [x20, #0x0]\n"
- "ldp x22, x21, [x20, #0x10]\n"
- "add x20, x20, #0x20\n"
- "add x24, x24, x9\n"
- "add x23, x23, x9\n"
- "add x22, x22, x9\n"
- "movi v4.16b, #0x0\n"
- "movi v3.16b, #0x0\n"
- "add x21, x21, x9\n"
- "movi v28.16b, #0x0\n"
- "movi v22.16b, #0x0\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "ldp x21, x20, [x19, #0x10]\n"
+ "add x19, x19, #0x20\n"
+ "add x23, x23, x28\n"
+ "add x22, x22, x28\n"
+ "add x21, x21, x28\n"
+ "movi v2.16b, #0x0\n"
+ "movi v1.16b, #0x0\n"
+ "add x20, x20, x28\n"
+ "movi v0.16b, #0x0\n"
+ "movi v31.16b, #0x0\n"
"tbz %x[n_channels], #1, 16f\n"
- "ldr d4, [x24], #0x8\n"
- "ldr d3, [x23], #0x8\n"
- "ldr d28, [x22], #0x8\n"
- "ldr d22, [x21], #0x8\n"
+ "ldr d2, [x23], #0x8\n"
+ "ldr d1, [x22], #0x8\n"
+ "ldr d0, [x21], #0x8\n"
+ "ldr d31, [x20], #0x8\n"
"tbz %x[n_channels], #0, 17f\n"
- "ld1 { v4.s }[2], [x24], #0x4\n"
- "ld1 { v3.s }[2], [x23], #0x4\n"
- "ld1 { v28.s }[2], [x22], #0x4\n"
- "ld1 { v22.s }[2], [x21], #0x4\n"
+ "ld1 { v2.s }[2], [x23], #0x4\n"
+ "ld1 { v1.s }[2], [x22], #0x4\n"
+ "ld1 { v0.s }[2], [x21], #0x4\n"
+ "ld1 { v31.s }[2], [x20], #0x4\n"
"b 17f\n"
"16:" // Oddments: 4 inputs loop: Load: Bit 1: Unset
"tbz %x[n_channels], #0, 17f\n"
- "ldr s4, [x24], #0x4\n"
- "ldr s3, [x23], #0x4\n"
- "ldr s28, [x22], #0x4\n"
- "ldr s22, [x21], #0x4\n"
+ "ldr s2, [x23], #0x4\n"
+ "ldr s1, [x22], #0x4\n"
+ "ldr s0, [x21], #0x4\n"
+ "ldr s31, [x20], #0x4\n"
"17:" // Oddments: 4 inputs loop: Load: Bit 1: End
- "fadd v23.4s, v4.4s, v3.4s\n"
- "fadd v19.4s, v28.4s, v22.4s\n"
- "subs x25, x25, #0x1\n"
+ "fadd v23.4s, v2.4s, v1.4s\n"
+ "fadd v19.4s, v0.4s, v31.4s\n"
+ "subs x24, x24, #0x1\n"
"fadd v19.4s, v23.4s, v19.4s\n"
- "fadd v8.4s, v8.4s, v19.4s\n"
+ "fadd v6.4s, v6.4s, v19.4s\n"
"bgt 15b\n"
"18:" // Oddments: After loop
- "ands x21, %x[n_valid_cells], #0x3\n"
+ "ands x20, %x[n_valid_cells], #0x3\n"
"beq 22f\n"
"19:" // Oddments: Single input loop
- "ldr x24, [x20], #0x8\n"
- "add x24, x24, x9\n"
- "movi v4.16b, #0x0\n"
+ "ldr x23, [x19], #0x8\n"
+ "add x23, x23, x28\n"
+ "movi v2.16b, #0x0\n"
"tbz %x[n_channels], #1, 20f\n"
- "ldr d4, [x24], #0x8\n"
+ "ldr d2, [x23], #0x8\n"
"tbz %x[n_channels], #0, 21f\n"
- "ld1 { v4.s }[2], [x24], #0x4\n"
+ "ld1 { v2.s }[2], [x23], #0x4\n"
"b 21f\n"
"20:" // Oddments: Single input loop: Load: Bit 1: Unset
"tbz %x[n_channels], #0, 21f\n"
- "ldr s4, [x24], #0x4\n"
+ "ldr s2, [x23], #0x4\n"
"21:" // Oddments: Single input loop: Load: Bit 1: End
- "subs x21, x21, #0x1\n"
- "fadd v8.4s, v8.4s, v4.4s\n"
+ "subs x20, x20, #0x1\n"
+ "fadd v6.4s, v6.4s, v2.4s\n"
"bgt 19b\n"
"22:" // Oddments: Single input loop: End
- "fmul v8.4s, v8.4s, v9.4s\n"
+ "fmul v6.4s, v6.4s, v7.4s\n"
"tbz %x[n_channels], #1, 23f\n"
- "st1 { v8.d }[0], [%x[outptr]], #0x8\n"
+ "st1 { v6.d }[0], [%x[outptr]], #0x8\n"
"tbz %x[n_channels], #0, 24f\n"
- "st1 { v8.s }[2], [%x[outptr]], #0x4\n"
+ "st1 { v6.s }[2], [%x[outptr]], #0x4\n"
"b 24f\n"
"23:" // Oddments: Store: Bit 1: Unset
"tbz %x[n_channels], #0, 24f\n"
- "st1 { v8.s }[0], [%x[outptr]], #0x4\n"
+ "st1 { v6.s }[0], [%x[outptr]], #0x4\n"
"24:" // Oddments: Store: Bit 1: End
"25:" // End
: [n_channels] "+&r" (n_channels), [outptr] "+&r" (outptr)
: [inptrs] "r" (inptrs), [n_valid_cells] "r" (n_valid_cells), [rescale_ptr] "r" (&rescale_value)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp32_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp32_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
index 2e7fb3c5b1..db01487e31 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp32_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp32_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -62,111 +62,111 @@ void a64_fp32_nhwc_max_2x2_s1_output2x2_depthfirst_impl(
pad_left, pad_top, pad_right, pad_bottom);
__asm__ __volatile__(
- "ldr x16, [%x[args], %[offsetof_n_channels]]\n"
- "ldr x21, [%x[args], %[offsetof_outptrs]]\n"
- "cmp x16, #0x4\n"
- "mov x15, #0x0\n"
- "ldr x20, [%x[args], %[offsetof_inptrs]]\n"
- "ldp x14, x13, [x21, #0x0]\n"
- "mov x12, #0x0\n"
- "ldp x11, x10, [x21, #0x10]\n"
- "ldp x9, x28, [x20, #0x0]\n"
- "ldp x27, x26, [x20, #0x10]\n"
- "ldp x25, x24, [x20, #0x20]\n"
- "ldp x23, x22, [x20, #0x30]\n"
- "ldr x21, [x20, #0x40]\n"
+ "ldr x15, [%x[args], %[offsetof_n_channels]]\n"
+ "ldr x20, [%x[args], %[offsetof_outptrs]]\n"
+ "cmp x15, #0x4\n"
+ "mov x14, #0x0\n"
+ "ldr x19, [%x[args], %[offsetof_inptrs]]\n"
+ "ldp x13, x12, [x20, #0x0]\n"
+ "mov x11, #0x0\n"
+ "ldp x10, x9, [x20, #0x10]\n"
+ "ldp x28, x27, [x19, #0x0]\n"
+ "ldp x26, x25, [x19, #0x10]\n"
+ "ldp x24, x23, [x19, #0x20]\n"
+ "ldp x22, x21, [x19, #0x30]\n"
+ "ldr x20, [x19, #0x40]\n"
"blt 3f\n"
- "ldr q30, [x28, x15]\n"
- "ldr q29, [x25, x15]\n"
- "lsr x20, x16, #0x2\n"
- "sub x16, x16, x20, LSL #2\n"
- "ldr q28, [x22, x15]\n"
- "ldr q27, [x26, x15]\n"
- "subs x20, x20, #0x1\n"
- "ldr q26, [x9, x15]\n"
- "ldr q25, [x27, x15]\n"
- "ldr q24, [x24, x15]\n"
- "ldr q23, [x23, x15]\n"
- "ldr q22, [x21, x15]\n"
- "add x15, x15, #0x10\n"
+ "lsr x19, x15, #0x2\n"
+ "sub x15, x15, x19, LSL #2\n"
+ "ldr q30, [x27, x14]\n"
+ "ldr q29, [x24, x14]\n"
+ "subs x19, x19, #0x1\n"
+ "ldr q28, [x21, x14]\n"
+ "ldr q27, [x25, x14]\n"
+ "ldr q26, [x28, x14]\n"
+ "ldr q25, [x23, x14]\n"
+ "ldr q24, [x26, x14]\n"
+ "ldr q23, [x22, x14]\n"
+ "ldr q22, [x20, x14]\n"
+ "add x14, x14, #0x10\n"
"beq 2f\n"
"1:" // Vector: Loop
"fmax v21.4s, v30.4s, v29.4s\n"
- "ldr q30, [x28, x15]\n"
"fmax v20.4s, v29.4s, v28.4s\n"
- "ldr q29, [x25, x15]\n"
- "ldr q28, [x22, x15]\n"
+ "subs x19, x19, #0x1\n"
+ "ldr q30, [x27, x14]\n"
"fmax v19.4s, v27.4s, v26.4s\n"
- "ldr q26, [x9, x15]\n"
"fmax v18.4s, v25.4s, v24.4s\n"
- "ldr q25, [x27, x15]\n"
- "fmax v17.4s, v27.4s, v23.4s\n"
- "ldr q27, [x26, x15]\n"
- "fmax v16.4s, v24.4s, v22.4s\n"
- "ldr q24, [x24, x15]\n"
- "ldr q23, [x23, x15]\n"
- "subs x20, x20, #0x1\n"
+ "ldr q29, [x24, x14]\n"
+ "ldr q28, [x21, x14]\n"
+ "fmax v17.4s, v23.4s, v27.4s\n"
+ "fmax v16.4s, v25.4s, v22.4s\n"
+ "ldr q27, [x25, x14]\n"
+ "ldr q26, [x28, x14]\n"
"fmax v19.4s, v21.4s, v19.4s\n"
- "ldr q22, [x21, x15]\n"
"fmax v18.4s, v18.4s, v21.4s\n"
- "fmax v17.4s, v17.4s, v20.4s\n"
- "add x15, x15, #0x10\n"
- "fmax v16.4s, v16.4s, v20.4s\n"
- "str q19, [x14, x12]\n"
- "str q18, [x13, x12]\n"
- "str q17, [x11, x12]\n"
- "str q16, [x10, x12]\n"
- "add x12, x12, #0x10\n"
+ "ldr q25, [x23, x14]\n"
+ "ldr q24, [x26, x14]\n"
+ "fmax v17.4s, v20.4s, v17.4s\n"
+ "fmax v16.4s, v20.4s, v16.4s\n"
+ "ldr q23, [x22, x14]\n"
+ "ldr q22, [x20, x14]\n"
+ "add x14, x14, #0x10\n"
+ "str q19, [x13, x11]\n"
+ "str q18, [x12, x11]\n"
+ "str q17, [x10, x11]\n"
+ "str q16, [x9, x11]\n"
+ "add x11, x11, #0x10\n"
"bgt 1b\n"
"2:" // Vector: Tail
"fmax v21.4s, v30.4s, v29.4s\n"
"fmax v20.4s, v29.4s, v28.4s\n"
"fmax v19.4s, v27.4s, v26.4s\n"
"fmax v18.4s, v25.4s, v24.4s\n"
- "fmax v17.4s, v27.4s, v23.4s\n"
- "fmax v16.4s, v24.4s, v22.4s\n"
+ "fmax v17.4s, v23.4s, v27.4s\n"
+ "fmax v16.4s, v25.4s, v22.4s\n"
"fmax v19.4s, v21.4s, v19.4s\n"
"fmax v18.4s, v18.4s, v21.4s\n"
- "str q19, [x14, x12]\n"
- "fmax v17.4s, v17.4s, v20.4s\n"
- "fmax v16.4s, v16.4s, v20.4s\n"
- "str q18, [x13, x12]\n"
- "str q17, [x11, x12]\n"
- "str q16, [x10, x12]\n"
- "add x12, x12, #0x10\n"
- "cbz x16, 4f\n"
+ "str q19, [x13, x11]\n"
+ "fmax v17.4s, v20.4s, v17.4s\n"
+ "fmax v16.4s, v20.4s, v16.4s\n"
+ "str q18, [x12, x11]\n"
+ "str q17, [x10, x11]\n"
+ "str q16, [x9, x11]\n"
+ "add x11, x11, #0x10\n"
+ "cbz x15, 4f\n"
"3:" // Oddments
- "ldr s30, [x28, x15]\n"
- "ldr s29, [x25, x15]\n"
+ "ldr s30, [x27, x14]\n"
+ "ldr s29, [x24, x14]\n"
"fmax v21.4s, v30.4s, v29.4s\n"
- "subs x16, x16, #0x1\n"
- "ldr s28, [x22, x15]\n"
- "ldr s27, [x26, x15]\n"
+ "subs x15, x15, #0x1\n"
+ "ldr s28, [x21, x14]\n"
+ "ldr s27, [x25, x14]\n"
"fmax v20.4s, v29.4s, v28.4s\n"
- "ldr s26, [x9, x15]\n"
- "ldr s25, [x27, x15]\n"
+ "ldr s26, [x28, x14]\n"
+ "ldr s25, [x23, x14]\n"
"fmax v19.4s, v27.4s, v26.4s\n"
"fmax v19.4s, v21.4s, v19.4s\n"
- "ldr s24, [x24, x15]\n"
- "ldr s23, [x23, x15]\n"
+ "ldr s24, [x26, x14]\n"
+ "ldr s23, [x22, x14]\n"
"fmax v18.4s, v25.4s, v24.4s\n"
- "fmax v17.4s, v27.4s, v23.4s\n"
- "ldr s22, [x21, x15]\n"
- "fmax v16.4s, v24.4s, v22.4s\n"
- "add x15, x15, #0x4\n"
+ "fmax v17.4s, v23.4s, v27.4s\n"
+ "ldr s22, [x20, x14]\n"
+ "fmax v16.4s, v25.4s, v22.4s\n"
+ "add x14, x14, #0x4\n"
"fmax v18.4s, v18.4s, v21.4s\n"
- "fmax v17.4s, v17.4s, v20.4s\n"
- "fmax v16.4s, v16.4s, v20.4s\n"
- "str s19, [x14, x12]\n"
- "str s18, [x13, x12]\n"
- "str s17, [x11, x12]\n"
- "str s16, [x10, x12]\n"
- "add x12, x12, #0x4\n"
+ "fmax v17.4s, v20.4s, v17.4s\n"
+ "fmax v16.4s, v20.4s, v16.4s\n"
+ "str s19, [x13, x11]\n"
+ "str s18, [x12, x11]\n"
+ "str s17, [x10, x11]\n"
+ "str s16, [x9, x11]\n"
+ "add x11, x11, #0x4\n"
"bgt 3b\n"
"4:" // End
:
: [args] "r" (&args), [offsetof_inptrs] "I" (offsetof(KernelArgs, inptrs)), [offsetof_n_channels] "I" (offsetof(KernelArgs, n_channels)), [offsetof_outptrs] "I" (offsetof(KernelArgs, outptrs))
- : "cc", "memory", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp32_nhwc_max_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp32_nhwc_max_generic_depthfirst/generic.cpp
index 4f1af09e08..4752057943 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp32_nhwc_max_generic_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp32_nhwc_max_generic_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -41,253 +41,253 @@ void a64_fp32_nhwc_max_generic_depthfirst_impl(
{
__asm__ __volatile__(
"cmp %x[n_channels], #0x10\n"
- "mov x9, #0x0\n"
- "mov x28, #0x10\n" // cntb _, ALL, #1
- "mov x27, #0x20\n" // cntb _, ALL, #2
- "mov x26, #0x30\n" // cntb _, ALL, #3
+ "mov x28, #0x0\n"
+ "mov x27, #0x10\n" // cntb _, ALL, #1
+ "mov x26, #0x20\n" // cntb _, ALL, #2
+ "mov x25, #0x30\n" // cntb _, ALL, #3
"blt 7f\n"
"1:" // 4-vectors of channels
- "mov w20, #0xff800000\n"
- "lsr x25, %x[n_valid_cells], #0x2\n"
- "dup v8.4s, w20\n"
- "dup v7.4s, w20\n"
- "dup v6.4s, w20\n"
- "dup v5.4s, w20\n"
- "mov x20, %x[inptrs]\n"
- "cbz x25, 4f\n"
- "ldp x24, x23, [x20, #0x0]\n"
- "ldr q4, [x24, x9]\n"
- "subs x25, x25, #0x1\n"
- "ldr q3, [x23, x9]\n"
- "ldr q2, [x24, x28]\n"
- "ldr q1, [x23, x28]\n"
- "ldr q0, [x24, x27]\n"
- "ldr q31, [x23, x27]\n"
- "ldr q30, [x24, x26]\n"
- "ldr q29, [x23, x26]\n"
- "ldp x22, x21, [x20, #0x10]\n"
- "add x20, x20, #0x20\n"
- "ldr q28, [x22, x9]\n"
- "ldr q22, [x21, x9]\n"
- "ldr q27, [x22, x28]\n"
- "ldr q21, [x21, x28]\n"
- "ldr q26, [x22, x27]\n"
- "ldr q20, [x21, x27]\n"
- "ldr q25, [x22, x26]\n"
- "ldr q24, [x21, x26]\n"
+ "mov w19, #0xff800000\n"
+ "lsr x24, %x[n_valid_cells], #0x2\n"
+ "dup v6.4s, w19\n"
+ "dup v5.4s, w19\n"
+ "dup v4.4s, w19\n"
+ "dup v3.4s, w19\n"
+ "mov x19, %x[inptrs]\n"
+ "cbz x24, 4f\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "ldp x21, x20, [x19, #0x10]\n"
+ "subs x24, x24, #0x1\n"
+ "add x19, x19, #0x20\n"
+ "ldr q2, [x23, x28]\n"
+ "ldr q1, [x22, x28]\n"
+ "ldr q0, [x21, x28]\n"
+ "ldr q31, [x20, x28]\n"
+ "ldr q30, [x23, x27]\n"
+ "ldr q22, [x22, x27]\n"
+ "ldr q29, [x21, x27]\n"
+ "ldr q28, [x20, x27]\n"
+ "ldr q27, [x23, x26]\n"
+ "ldr q21, [x22, x26]\n"
+ "ldr q26, [x21, x26]\n"
+ "ldr q17, [x20, x26]\n"
+ "ldr q25, [x23, x25]\n"
+ "ldr q20, [x22, x25]\n"
+ "ldr q24, [x21, x25]\n"
+ "ldr q16, [x20, x25]\n"
"beq 3f\n"
"2:" // 4-vectors of channels: 4 inputs loop
- "fmax v23.4s, v4.4s, v3.4s\n"
- "fmax v19.4s, v28.4s, v22.4s\n"
- "ldp x24, x23, [x20, #0x0]\n"
- "ldr q4, [x24, x9]\n"
- "ldr q3, [x23, x9]\n"
- "fmax v22.4s, v2.4s, v1.4s\n"
- "ldr q2, [x24, x28]\n"
- "fmax v18.4s, v27.4s, v21.4s\n"
- "ldr q1, [x23, x28]\n"
- "fmax v21.4s, v0.4s, v31.4s\n"
- "ldr q0, [x24, x27]\n"
- "fmax v17.4s, v26.4s, v20.4s\n"
- "ldr q31, [x23, x27]\n"
- "fmax v20.4s, v30.4s, v29.4s\n"
- "ldr q30, [x24, x26]\n"
- "fmax v16.4s, v25.4s, v24.4s\n"
- "ldr q29, [x23, x26]\n"
+ "fmax v23.4s, v2.4s, v1.4s\n"
+ "fmax v19.4s, v0.4s, v31.4s\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "ldp x21, x20, [x19, #0x10]\n"
+ "fmax v22.4s, v30.4s, v22.4s\n"
+ "fmax v18.4s, v29.4s, v28.4s\n"
+ "subs x24, x24, #0x1\n"
+ "add x19, x19, #0x20\n"
+ "fmax v21.4s, v27.4s, v21.4s\n"
+ "fmax v17.4s, v26.4s, v17.4s\n"
+ "ldr q2, [x23, x28]\n"
+ "ldr q1, [x22, x28]\n"
+ "fmax v20.4s, v25.4s, v20.4s\n"
+ "fmax v16.4s, v24.4s, v16.4s\n"
+ "ldr q0, [x21, x28]\n"
+ "ldr q31, [x20, x28]\n"
"fmax v19.4s, v23.4s, v19.4s\n"
"fmax v18.4s, v22.4s, v18.4s\n"
- "ldp x22, x21, [x20, #0x10]\n"
- "ldr q28, [x22, x9]\n"
- "ldr q22, [x21, x9]\n"
+ "ldr q30, [x23, x27]\n"
+ "ldr q22, [x22, x27]\n"
"fmax v17.4s, v21.4s, v17.4s\n"
"fmax v16.4s, v20.4s, v16.4s\n"
- "ldr q27, [x22, x28]\n"
- "ldr q21, [x21, x28]\n"
- "subs x25, x25, #0x1\n"
- "fmax v8.4s, v8.4s, v19.4s\n"
- "ldr q26, [x22, x27]\n"
- "ldr q20, [x21, x27]\n"
- "fmax v7.4s, v7.4s, v18.4s\n"
- "fmax v6.4s, v6.4s, v17.4s\n"
- "ldr q25, [x22, x26]\n"
- "ldr q24, [x21, x26]\n"
- "fmax v5.4s, v5.4s, v16.4s\n"
- "add x20, x20, #0x20\n"
+ "ldr q29, [x21, x27]\n"
+ "ldr q28, [x20, x27]\n"
+ "fmax v6.4s, v6.4s, v19.4s\n"
+ "fmax v5.4s, v5.4s, v18.4s\n"
+ "ldr q27, [x23, x26]\n"
+ "ldr q21, [x22, x26]\n"
+ "fmax v4.4s, v4.4s, v17.4s\n"
+ "fmax v3.4s, v3.4s, v16.4s\n"
+ "ldr q26, [x21, x26]\n"
+ "ldr q17, [x20, x26]\n"
+ "ldr q25, [x23, x25]\n"
+ "ldr q20, [x22, x25]\n"
+ "ldr q24, [x21, x25]\n"
+ "ldr q16, [x20, x25]\n"
"bgt 2b\n"
"3:" // 4-vectors of channels: 4 inputs tail
- "fmax v23.4s, v4.4s, v3.4s\n"
- "fmax v19.4s, v28.4s, v22.4s\n"
- "fmax v22.4s, v2.4s, v1.4s\n"
- "fmax v18.4s, v27.4s, v21.4s\n"
- "fmax v21.4s, v0.4s, v31.4s\n"
- "fmax v17.4s, v26.4s, v20.4s\n"
- "fmax v20.4s, v30.4s, v29.4s\n"
- "fmax v16.4s, v25.4s, v24.4s\n"
+ "fmax v23.4s, v2.4s, v1.4s\n"
+ "fmax v19.4s, v0.4s, v31.4s\n"
+ "fmax v22.4s, v30.4s, v22.4s\n"
+ "fmax v18.4s, v29.4s, v28.4s\n"
+ "fmax v21.4s, v27.4s, v21.4s\n"
+ "fmax v17.4s, v26.4s, v17.4s\n"
+ "fmax v20.4s, v25.4s, v20.4s\n"
+ "fmax v16.4s, v24.4s, v16.4s\n"
"fmax v19.4s, v23.4s, v19.4s\n"
"fmax v18.4s, v22.4s, v18.4s\n"
"fmax v17.4s, v21.4s, v17.4s\n"
"fmax v16.4s, v20.4s, v16.4s\n"
- "fmax v8.4s, v8.4s, v19.4s\n"
- "fmax v7.4s, v7.4s, v18.4s\n"
- "fmax v6.4s, v6.4s, v17.4s\n"
- "fmax v5.4s, v5.4s, v16.4s\n"
+ "fmax v6.4s, v6.4s, v19.4s\n"
+ "fmax v5.4s, v5.4s, v18.4s\n"
+ "fmax v4.4s, v4.4s, v17.4s\n"
+ "fmax v3.4s, v3.4s, v16.4s\n"
"4:" // 4-vectors of channels: After loop
- "ands x21, %x[n_valid_cells], #0x3\n"
+ "ands x20, %x[n_valid_cells], #0x3\n"
"beq 6f\n"
"5:" // 4-vectors of channels: Single input loop
- "ldr x24, [x20], #0x8\n"
- "ldr q4, [x24, x9]\n"
- "subs x21, x21, #0x1\n"
- "fmax v8.4s, v8.4s, v4.4s\n"
- "ldr q2, [x24, x28]\n"
- "ldr q0, [x24, x27]\n"
- "fmax v7.4s, v7.4s, v2.4s\n"
- "fmax v6.4s, v6.4s, v0.4s\n"
- "ldr q30, [x24, x26]\n"
+ "ldr x23, [x19], #0x8\n"
+ "ldr q2, [x23, x28]\n"
+ "subs x20, x20, #0x1\n"
+ "fmax v6.4s, v6.4s, v2.4s\n"
+ "ldr q30, [x23, x27]\n"
+ "ldr q27, [x23, x26]\n"
"fmax v5.4s, v5.4s, v30.4s\n"
+ "fmax v4.4s, v4.4s, v27.4s\n"
+ "ldr q25, [x23, x25]\n"
+ "fmax v3.4s, v3.4s, v25.4s\n"
"bgt 5b\n"
"6:" // 4-vectors of channels: Single input loop: End
"sub %x[n_channels], %x[n_channels], #0x10\n"
"cmp %x[n_channels], #0x10\n"
- "str q8, [%x[outptr], x9]\n"
- "str q7, [%x[outptr], x28]\n"
- "add x9, x9, #0x40\n"
+ "str q6, [%x[outptr], x28]\n"
+ "str q5, [%x[outptr], x27]\n"
"add x28, x28, #0x40\n"
- "str q6, [%x[outptr], x27]\n"
"add x27, x27, #0x40\n"
- "str q5, [%x[outptr], x26]\n"
+ "str q4, [%x[outptr], x26]\n"
"add x26, x26, #0x40\n"
+ "str q3, [%x[outptr], x25]\n"
+ "add x25, x25, #0x40\n"
"bge 1b\n"
"cbz %x[n_channels], 25f\n"
"7:" // Single vector of channels
"cmp %x[n_channels], #0x4\n"
"blt 14f\n"
"8:" // Single vector of channels: Loop
- "mov w20, #0xff800000\n"
- "lsr x25, %x[n_valid_cells], #0x2\n"
- "dup v8.4s, w20\n"
- "mov x20, %x[inptrs]\n"
- "cbz x25, 11f\n"
- "ldp x24, x23, [x20, #0x0]\n"
- "ldr q4, [x24, x9]\n"
- "subs x25, x25, #0x1\n"
- "ldr q3, [x23, x9]\n"
- "ldp x22, x21, [x20, #0x10]\n"
- "add x20, x20, #0x20\n"
- "ldr q28, [x22, x9]\n"
- "ldr q22, [x21, x9]\n"
+ "mov w19, #0xff800000\n"
+ "lsr x24, %x[n_valid_cells], #0x2\n"
+ "dup v6.4s, w19\n"
+ "mov x19, %x[inptrs]\n"
+ "cbz x24, 11f\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "ldp x21, x20, [x19, #0x10]\n"
+ "subs x24, x24, #0x1\n"
+ "add x19, x19, #0x20\n"
+ "ldr q2, [x23, x28]\n"
+ "ldr q1, [x22, x28]\n"
+ "ldr q0, [x21, x28]\n"
+ "ldr q31, [x20, x28]\n"
"beq 10f\n"
"9:" // Single vector of channels: Loop: 4 inputs loop
- "fmax v23.4s, v4.4s, v3.4s\n"
- "fmax v19.4s, v28.4s, v22.4s\n"
- "ldp x24, x23, [x20, #0x0]\n"
- "ldr q4, [x24, x9]\n"
- "ldr q3, [x23, x9]\n"
+ "fmax v23.4s, v2.4s, v1.4s\n"
+ "fmax v19.4s, v0.4s, v31.4s\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "ldp x21, x20, [x19, #0x10]\n"
"fmax v19.4s, v23.4s, v19.4s\n"
- "ldp x22, x21, [x20, #0x10]\n"
- "subs x25, x25, #0x1\n"
- "ldr q28, [x22, x9]\n"
- "ldr q22, [x21, x9]\n"
- "fmax v8.4s, v8.4s, v19.4s\n"
- "add x20, x20, #0x20\n"
+ "subs x24, x24, #0x1\n"
+ "fmax v6.4s, v6.4s, v19.4s\n"
+ "add x19, x19, #0x20\n"
+ "ldr q2, [x23, x28]\n"
+ "ldr q1, [x22, x28]\n"
+ "ldr q0, [x21, x28]\n"
+ "ldr q31, [x20, x28]\n"
"bgt 9b\n"
"10:" // Single vector of channels: Loop: 4 inputs tail
- "fmax v23.4s, v4.4s, v3.4s\n"
- "fmax v19.4s, v28.4s, v22.4s\n"
+ "fmax v23.4s, v2.4s, v1.4s\n"
+ "fmax v19.4s, v0.4s, v31.4s\n"
"fmax v19.4s, v23.4s, v19.4s\n"
- "fmax v8.4s, v8.4s, v19.4s\n"
+ "fmax v6.4s, v6.4s, v19.4s\n"
"11:" // Single vector of channels: Loop: After loop
- "ands x21, %x[n_valid_cells], #0x3\n"
+ "ands x20, %x[n_valid_cells], #0x3\n"
"beq 13f\n"
"12:" // Single vector of channels: Loop: Single input loop
- "ldr x24, [x20], #0x8\n"
- "ldr q4, [x24, x9]\n"
- "subs x21, x21, #0x1\n"
- "fmax v8.4s, v8.4s, v4.4s\n"
+ "ldr x23, [x19], #0x8\n"
+ "ldr q2, [x23, x28]\n"
+ "subs x20, x20, #0x1\n"
+ "fmax v6.4s, v6.4s, v2.4s\n"
"bgt 12b\n"
"13:" // Single vector of channels: Loop: Single input loop: End
"sub %x[n_channels], %x[n_channels], #0x4\n"
"cmp %x[n_channels], #0x4\n"
- "str q8, [%x[outptr], x9]\n"
- "add x9, x9, #0x10\n"
+ "str q6, [%x[outptr], x28]\n"
+ "add x28, x28, #0x10\n"
"bge 8b\n"
"cbz %x[n_channels], 25f\n"
"14:" // Oddments
- "mov w20, #0xff800000\n"
- "lsr x25, %x[n_valid_cells], #0x2\n"
- "dup v8.4s, w20\n"
- "add %x[outptr], %x[outptr], x9\n"
- "mov x20, %x[inptrs]\n"
- "cbz x25, 18f\n"
+ "mov w19, #0xff800000\n"
+ "lsr x24, %x[n_valid_cells], #0x2\n"
+ "dup v6.4s, w19\n"
+ "add %x[outptr], %x[outptr], x28\n"
+ "mov x19, %x[inptrs]\n"
+ "cbz x24, 18f\n"
"15:" // Oddments: 4 inputs loop
- "ldp x24, x23, [x20, #0x0]\n"
- "ldp x22, x21, [x20, #0x10]\n"
- "add x20, x20, #0x20\n"
- "add x24, x24, x9\n"
- "add x23, x23, x9\n"
- "add x22, x22, x9\n"
- "movi v4.16b, #0x0\n"
- "movi v3.16b, #0x0\n"
- "add x21, x21, x9\n"
- "movi v28.16b, #0x0\n"
- "movi v22.16b, #0x0\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "ldp x21, x20, [x19, #0x10]\n"
+ "add x19, x19, #0x20\n"
+ "add x23, x23, x28\n"
+ "add x22, x22, x28\n"
+ "add x21, x21, x28\n"
+ "movi v2.16b, #0x0\n"
+ "movi v1.16b, #0x0\n"
+ "add x20, x20, x28\n"
+ "movi v0.16b, #0x0\n"
+ "movi v31.16b, #0x0\n"
"tbz %x[n_channels], #1, 16f\n"
- "ldr d4, [x24], #0x8\n"
- "ldr d3, [x23], #0x8\n"
- "ldr d28, [x22], #0x8\n"
- "ldr d22, [x21], #0x8\n"
+ "ldr d2, [x23], #0x8\n"
+ "ldr d1, [x22], #0x8\n"
+ "ldr d0, [x21], #0x8\n"
+ "ldr d31, [x20], #0x8\n"
"tbz %x[n_channels], #0, 17f\n"
- "ld1 { v4.s }[2], [x24], #0x4\n"
- "ld1 { v3.s }[2], [x23], #0x4\n"
- "ld1 { v28.s }[2], [x22], #0x4\n"
- "ld1 { v22.s }[2], [x21], #0x4\n"
+ "ld1 { v2.s }[2], [x23], #0x4\n"
+ "ld1 { v1.s }[2], [x22], #0x4\n"
+ "ld1 { v0.s }[2], [x21], #0x4\n"
+ "ld1 { v31.s }[2], [x20], #0x4\n"
"b 17f\n"
"16:" // Oddments: 4 inputs loop: Load: Bit 1: Unset
"tbz %x[n_channels], #0, 17f\n"
- "ldr s4, [x24], #0x4\n"
- "ldr s3, [x23], #0x4\n"
- "ldr s28, [x22], #0x4\n"
- "ldr s22, [x21], #0x4\n"
+ "ldr s2, [x23], #0x4\n"
+ "ldr s1, [x22], #0x4\n"
+ "ldr s0, [x21], #0x4\n"
+ "ldr s31, [x20], #0x4\n"
"17:" // Oddments: 4 inputs loop: Load: Bit 1: End
- "fmax v23.4s, v4.4s, v3.4s\n"
- "fmax v19.4s, v28.4s, v22.4s\n"
- "subs x25, x25, #0x1\n"
+ "fmax v23.4s, v2.4s, v1.4s\n"
+ "fmax v19.4s, v0.4s, v31.4s\n"
+ "subs x24, x24, #0x1\n"
"fmax v19.4s, v23.4s, v19.4s\n"
- "fmax v8.4s, v8.4s, v19.4s\n"
+ "fmax v6.4s, v6.4s, v19.4s\n"
"bgt 15b\n"
"18:" // Oddments: After loop
- "ands x21, %x[n_valid_cells], #0x3\n"
+ "ands x20, %x[n_valid_cells], #0x3\n"
"beq 22f\n"
"19:" // Oddments: Single input loop
- "ldr x24, [x20], #0x8\n"
- "add x24, x24, x9\n"
- "movi v4.16b, #0x0\n"
+ "ldr x23, [x19], #0x8\n"
+ "add x23, x23, x28\n"
+ "movi v2.16b, #0x0\n"
"tbz %x[n_channels], #1, 20f\n"
- "ldr d4, [x24], #0x8\n"
+ "ldr d2, [x23], #0x8\n"
"tbz %x[n_channels], #0, 21f\n"
- "ld1 { v4.s }[2], [x24], #0x4\n"
+ "ld1 { v2.s }[2], [x23], #0x4\n"
"b 21f\n"
"20:" // Oddments: Single input loop: Load: Bit 1: Unset
"tbz %x[n_channels], #0, 21f\n"
- "ldr s4, [x24], #0x4\n"
+ "ldr s2, [x23], #0x4\n"
"21:" // Oddments: Single input loop: Load: Bit 1: End
- "subs x21, x21, #0x1\n"
- "fmax v8.4s, v8.4s, v4.4s\n"
+ "subs x20, x20, #0x1\n"
+ "fmax v6.4s, v6.4s, v2.4s\n"
"bgt 19b\n"
"22:" // Oddments: Single input loop: End
"tbz %x[n_channels], #1, 23f\n"
- "st1 { v8.d }[0], [%x[outptr]], #0x8\n"
+ "st1 { v6.d }[0], [%x[outptr]], #0x8\n"
"tbz %x[n_channels], #0, 24f\n"
- "st1 { v8.s }[2], [%x[outptr]], #0x4\n"
+ "st1 { v6.s }[2], [%x[outptr]], #0x4\n"
"b 24f\n"
"23:" // Oddments: Store: Bit 1: Unset
"tbz %x[n_channels], #0, 24f\n"
- "st1 { v8.s }[0], [%x[outptr]], #0x4\n"
+ "st1 { v6.s }[0], [%x[outptr]], #0x4\n"
"24:" // Oddments: Store: Bit 1: End
"25:" // End
: [n_channels] "+&r" (n_channels), [outptr] "+&r" (outptr)
: [inptrs] "r" (inptrs), [n_valid_cells] "r" (n_valid_cells)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8_nhwc_avg_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8_nhwc_avg_generic_depthfirst/generic.cpp
index 5a7e5f981b..8d6d73ac84 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8_nhwc_avg_generic_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8_nhwc_avg_generic_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -96,16 +96,16 @@ void a64_s8_nhwc_avg_generic_depthfirst_impl(
__asm__ __volatile__(
"cmp %x[n_channels], #0x40\n"
- "mov x27, #0x0\n"
- "mov x26, #0x10\n" // cntb _, ALL, #1
- "mov x25, #0x20\n" // cntb _, ALL, #2
- "mov x24, #0x30\n" // cntb _, ALL, #3
+ "mov x26, #0x0\n"
+ "mov x25, #0x10\n" // cntb _, ALL, #1
+ "mov x24, #0x20\n" // cntb _, ALL, #2
+ "mov x23, #0x30\n" // cntb _, ALL, #3
"blt 7f\n"
"1:" // 4-vectors of channels
- "lsr x23, %x[n_valid_cells], #0x1\n"
+ "lsr x22, %x[n_valid_cells], #0x1\n"
"movi v15.4s, #0x0\n"
"movi v14.4s, #0x0\n"
- "mov x20, %x[inptrs]\n"
+ "mov x19, %x[inptrs]\n"
"movi v13.4s, #0x0\n"
"movi v12.4s, #0x0\n"
"movi v11.4s, #0x0\n"
@@ -120,43 +120,43 @@ void a64_s8_nhwc_avg_generic_depthfirst_impl(
"movi v2.4s, #0x0\n"
"movi v1.4s, #0x0\n"
"movi v0.4s, #0x0\n"
- "cbz x23, 4f\n"
- "ldp x22, x21, [x20, #0x0]\n"
- "ldr q31, [x22, x27]\n"
- "subs x23, x23, #0x1\n"
- "add x20, x20, #0x10\n"
- "ldr q30, [x21, x27]\n"
- "ldr q29, [x22, x26]\n"
- "ldr q28, [x21, x26]\n"
- "ldr q27, [x22, x25]\n"
- "ldr q26, [x21, x25]\n"
- "ldr q25, [x22, x24]\n"
- "ldr q24, [x21, x24]\n"
+ "cbz x22, 4f\n"
+ "ldp x21, x20, [x19, #0x0]\n"
+ "subs x22, x22, #0x1\n"
+ "add x19, x19, #0x10\n"
+ "ldr q31, [x21, x26]\n"
+ "ldr q30, [x20, x26]\n"
+ "ldr q29, [x21, x25]\n"
+ "ldr q28, [x20, x25]\n"
+ "ldr q27, [x21, x24]\n"
+ "ldr q26, [x20, x24]\n"
+ "ldr q25, [x21, x23]\n"
+ "ldr q24, [x20, x23]\n"
"beq 3f\n"
"2:" // 4-vectors of channels: 2 inputs loop
"saddl v23.8h, v31.8b, v30.8b\n"
"saddl2 v22.8h, v31.16b, v30.16b\n"
- "ldp x22, x21, [x20, #0x0]\n"
- "ldr q31, [x22, x27]\n"
- "ldr q30, [x21, x27]\n"
+ "ldp x21, x20, [x19, #0x0]\n"
+ "subs x22, x22, #0x1\n"
"saddl v21.8h, v29.8b, v28.8b\n"
"saddl2 v20.8h, v29.16b, v28.16b\n"
- "ldr q29, [x22, x26]\n"
- "ldr q28, [x21, x26]\n"
+ "add x19, x19, #0x10\n"
+ "ldr q31, [x21, x26]\n"
"saddl v19.8h, v27.8b, v26.8b\n"
"saddl2 v18.8h, v27.16b, v26.16b\n"
- "ldr q27, [x22, x25]\n"
- "ldr q26, [x21, x25]\n"
- "subs x23, x23, #0x1\n"
- "saddw v15.4s, v15.4s, v23.4h\n"
- "saddw2 v14.4s, v14.4s, v23.8h\n"
+ "ldr q30, [x20, x26]\n"
+ "ldr q29, [x21, x25]\n"
"saddl v17.8h, v25.8b, v24.8b\n"
"saddl2 v16.8h, v25.16b, v24.16b\n"
- "ldr q25, [x22, x24]\n"
- "add x20, x20, #0x10\n"
+ "ldr q28, [x20, x25]\n"
+ "ldr q27, [x21, x24]\n"
+ "saddw v15.4s, v15.4s, v23.4h\n"
+ "saddw2 v14.4s, v14.4s, v23.8h\n"
+ "ldr q26, [x20, x24]\n"
+ "ldr q25, [x21, x23]\n"
"saddw v13.4s, v13.4s, v22.4h\n"
"saddw2 v12.4s, v12.4s, v22.8h\n"
- "ldr q24, [x21, x24]\n"
+ "ldr q24, [x20, x23]\n"
"saddw v11.4s, v11.4s, v21.4h\n"
"saddw2 v10.4s, v10.4s, v21.8h\n"
"saddw v9.4s, v9.4s, v20.4h\n"
@@ -196,21 +196,21 @@ void a64_s8_nhwc_avg_generic_depthfirst_impl(
"saddw v1.4s, v1.4s, v16.4h\n"
"saddw2 v0.4s, v0.4s, v16.8h\n"
"4:" // 4-vectors of channels: After loop
- "ands x21, %x[n_valid_cells], #0x1\n"
+ "ands x20, %x[n_valid_cells], #0x1\n"
"beq 6f\n"
"5:" // 4-vectors of channels: Single input loop
- "ldr x22, [x20], #0x8\n"
- "ldr q31, [x22, x27]\n"
+ "ldr x21, [x19], #0x8\n"
+ "ldr q31, [x21, x26]\n"
"sxtl v23.8h, v31.8b\n"
"sxtl2 v22.8h, v31.16b\n"
- "ldr q29, [x22, x26]\n"
- "ldr q27, [x22, x25]\n"
+ "ldr q29, [x21, x25]\n"
+ "ldr q27, [x21, x24]\n"
"sxtl v21.8h, v29.8b\n"
"sxtl2 v20.8h, v29.16b\n"
- "ldr q25, [x22, x24]\n"
+ "ldr q25, [x21, x23]\n"
"sxtl v19.8h, v27.8b\n"
"sxtl2 v18.8h, v27.16b\n"
- "subs x21, x21, #0x1\n"
+ "subs x20, x20, #0x1\n"
"sxtl v17.8h, v25.8b\n"
"sxtl2 v16.8h, v25.16b\n"
"saddw v15.4s, v15.4s, v23.4h\n"
@@ -311,47 +311,47 @@ void a64_s8_nhwc_avg_generic_depthfirst_impl(
"uzp1 v19.16b, v1.16b, v0.16b\n"
"uzp1 v16.16b, v23.16b, v16.16b\n"
"uzp1 v18.16b, v22.16b, v18.16b\n"
- "str q16, [%x[outptr], x27]\n"
- "add x27, x27, #0x40\n"
+ "str q16, [%x[outptr], x26]\n"
+ "add x26, x26, #0x40\n"
"uzp1 v17.16b, v21.16b, v17.16b\n"
"uzp1 v16.16b, v20.16b, v19.16b\n"
- "str q18, [%x[outptr], x26]\n"
- "add x26, x26, #0x40\n"
- "str q17, [%x[outptr], x25]\n"
+ "str q18, [%x[outptr], x25]\n"
"add x25, x25, #0x40\n"
- "str q16, [%x[outptr], x24]\n"
+ "str q17, [%x[outptr], x24]\n"
"add x24, x24, #0x40\n"
+ "str q16, [%x[outptr], x23]\n"
+ "add x23, x23, #0x40\n"
"bge 1b\n"
"cbz %x[n_channels], 43f\n"
"7:" // Single vector of channels
"cmp %x[n_channels], #0x10\n"
"blt 14f\n"
"8:" // Single vector of channels: Loop
- "lsr x23, %x[n_valid_cells], #0x1\n"
+ "lsr x22, %x[n_valid_cells], #0x1\n"
"movi v15.4s, #0x0\n"
"movi v14.4s, #0x0\n"
- "mov x20, %x[inptrs]\n"
+ "mov x19, %x[inptrs]\n"
"movi v13.4s, #0x0\n"
"movi v12.4s, #0x0\n"
- "cbz x23, 11f\n"
- "ldp x22, x21, [x20, #0x0]\n"
- "ldr q31, [x22, x27]\n"
- "subs x23, x23, #0x1\n"
- "add x20, x20, #0x10\n"
- "ldr q30, [x21, x27]\n"
+ "cbz x22, 11f\n"
+ "ldp x21, x20, [x19, #0x0]\n"
+ "subs x22, x22, #0x1\n"
+ "add x19, x19, #0x10\n"
+ "ldr q31, [x21, x26]\n"
+ "ldr q30, [x20, x26]\n"
"beq 10f\n"
"9:" // Single vector of channels: Loop: 2 inputs loop
"saddl v23.8h, v31.8b, v30.8b\n"
"saddl2 v22.8h, v31.16b, v30.16b\n"
- "ldp x22, x21, [x20, #0x0]\n"
- "ldr q31, [x22, x27]\n"
- "ldr q30, [x21, x27]\n"
- "subs x23, x23, #0x1\n"
+ "ldp x21, x20, [x19, #0x0]\n"
+ "subs x22, x22, #0x1\n"
"saddw v15.4s, v15.4s, v23.4h\n"
"saddw2 v14.4s, v14.4s, v23.8h\n"
+ "add x19, x19, #0x10\n"
+ "ldr q31, [x21, x26]\n"
"saddw v13.4s, v13.4s, v22.4h\n"
"saddw2 v12.4s, v12.4s, v22.8h\n"
- "add x20, x20, #0x10\n"
+ "ldr q30, [x20, x26]\n"
"bgt 9b\n"
"10:" // Single vector of channels: Loop: 2 inputs tail
"saddl v23.8h, v31.8b, v30.8b\n"
@@ -361,14 +361,14 @@ void a64_s8_nhwc_avg_generic_depthfirst_impl(
"saddw v13.4s, v13.4s, v22.4h\n"
"saddw2 v12.4s, v12.4s, v22.8h\n"
"11:" // Single vector of channels: Loop: After loop
- "ands x21, %x[n_valid_cells], #0x1\n"
+ "ands x20, %x[n_valid_cells], #0x1\n"
"beq 13f\n"
"12:" // Single vector of channels: Loop: Single input loop
- "ldr x22, [x20], #0x8\n"
- "ldr q31, [x22, x27]\n"
+ "ldr x21, [x19], #0x8\n"
+ "ldr q31, [x21, x26]\n"
"sxtl v23.8h, v31.8b\n"
"sxtl2 v22.8h, v31.16b\n"
- "subs x21, x21, #0x1\n"
+ "subs x20, x20, #0x1\n"
"saddw v15.4s, v15.4s, v23.4h\n"
"saddw2 v14.4s, v14.4s, v23.8h\n"
"saddw v13.4s, v13.4s, v22.4h\n"
@@ -400,149 +400,149 @@ void a64_s8_nhwc_avg_generic_depthfirst_impl(
"uzp1 v23.16b, v15.16b, v14.16b\n"
"uzp1 v16.16b, v13.16b, v12.16b\n"
"uzp1 v16.16b, v23.16b, v16.16b\n"
- "str q16, [%x[outptr], x27]\n"
- "add x27, x27, #0x10\n"
+ "str q16, [%x[outptr], x26]\n"
+ "add x26, x26, #0x10\n"
"bge 8b\n"
"cbz %x[n_channels], 43f\n"
"14:" // Oddments
- "lsr x23, %x[n_valid_cells], #0x1\n"
- "add %x[outptr], %x[outptr], x27\n"
+ "lsr x22, %x[n_valid_cells], #0x1\n"
+ "add %x[outptr], %x[outptr], x26\n"
"movi v15.4s, #0x0\n"
"movi v14.4s, #0x0\n"
"movi v13.4s, #0x0\n"
"movi v12.4s, #0x0\n"
- "mov x20, %x[inptrs]\n"
- "cbz x23, 24f\n"
+ "mov x19, %x[inptrs]\n"
+ "cbz x22, 24f\n"
"15:" // Oddments: 2 inputs loop
- "ldp x22, x21, [x20, #0x0]\n"
- "add x20, x20, #0x10\n"
- "add x22, x22, x27\n"
+ "ldp x21, x20, [x19, #0x0]\n"
+ "add x19, x19, #0x10\n"
+ "add x21, x21, x26\n"
"movi v31.16b, #0x0\n"
- "add x21, x21, x27\n"
+ "add x20, x20, x26\n"
"movi v30.16b, #0x0\n"
"tbz %x[n_channels], #3, 19f\n"
- "ldr d31, [x22], #0x8\n"
- "ldr d30, [x21], #0x8\n"
+ "ldr d31, [x21], #0x8\n"
+ "ldr d30, [x20], #0x8\n"
"tbz %x[n_channels], #2, 17f\n"
- "ld1 { v31.s }[2], [x22], #0x4\n"
- "ld1 { v30.s }[2], [x21], #0x4\n"
+ "ld1 { v31.s }[2], [x21], #0x4\n"
+ "ld1 { v30.s }[2], [x20], #0x4\n"
"tbz %x[n_channels], #1, 16f\n"
- "ld1 { v31.h }[6], [x22], #0x2\n"
- "ld1 { v30.h }[6], [x21], #0x2\n"
+ "ld1 { v31.h }[6], [x21], #0x2\n"
+ "ld1 { v30.h }[6], [x20], #0x2\n"
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v31.b }[14], [x22], #0x1\n"
- "ld1 { v30.b }[14], [x21], #0x1\n"
+ "ld1 { v31.b }[14], [x21], #0x1\n"
+ "ld1 { v30.b }[14], [x20], #0x1\n"
"b 23f\n"
"16:" // Oddments: 2 inputs loop: Load: Bit 3: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v31.b }[12], [x22], #0x1\n"
- "ld1 { v30.b }[12], [x21], #0x1\n"
+ "ld1 { v31.b }[12], [x21], #0x1\n"
+ "ld1 { v30.b }[12], [x20], #0x1\n"
"b 23f\n"
"17:" // Oddments: 2 inputs loop: Load: Bit 3: Bit 2: Unset
"tbz %x[n_channels], #1, 18f\n"
- "ld1 { v31.h }[4], [x22], #0x2\n"
- "ld1 { v30.h }[4], [x21], #0x2\n"
+ "ld1 { v31.h }[4], [x21], #0x2\n"
+ "ld1 { v30.h }[4], [x20], #0x2\n"
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v31.b }[10], [x22], #0x1\n"
- "ld1 { v30.b }[10], [x21], #0x1\n"
+ "ld1 { v31.b }[10], [x21], #0x1\n"
+ "ld1 { v30.b }[10], [x20], #0x1\n"
"b 23f\n"
"18:" // Oddments: 2 inputs loop: Load: Bit 3: Bit 2: Unset: Bit 1: Unset
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v31.b }[8], [x22], #0x1\n"
- "ld1 { v30.b }[8], [x21], #0x1\n"
+ "ld1 { v31.b }[8], [x21], #0x1\n"
+ "ld1 { v30.b }[8], [x20], #0x1\n"
"b 23f\n"
"19:" // Oddments: 2 inputs loop: Load: Bit 3: Unset
"tbz %x[n_channels], #2, 21f\n"
- "ldr s31, [x22], #0x4\n"
- "ldr s30, [x21], #0x4\n"
+ "ldr s31, [x21], #0x4\n"
+ "ldr s30, [x20], #0x4\n"
"tbz %x[n_channels], #1, 20f\n"
- "ld1 { v31.h }[2], [x22], #0x2\n"
- "ld1 { v30.h }[2], [x21], #0x2\n"
+ "ld1 { v31.h }[2], [x21], #0x2\n"
+ "ld1 { v30.h }[2], [x20], #0x2\n"
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v31.b }[6], [x22], #0x1\n"
- "ld1 { v30.b }[6], [x21], #0x1\n"
+ "ld1 { v31.b }[6], [x21], #0x1\n"
+ "ld1 { v30.b }[6], [x20], #0x1\n"
"b 23f\n"
"20:" // Oddments: 2 inputs loop: Load: Bit 3: Unset: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v31.b }[4], [x22], #0x1\n"
- "ld1 { v30.b }[4], [x21], #0x1\n"
+ "ld1 { v31.b }[4], [x21], #0x1\n"
+ "ld1 { v30.b }[4], [x20], #0x1\n"
"b 23f\n"
"21:" // Oddments: 2 inputs loop: Load: Bit 3: Unset: Bit 2: Unset
"tbz %x[n_channels], #1, 22f\n"
- "ldr h31, [x22], #0x2\n"
- "ldr h30, [x21], #0x2\n"
+ "ldr h31, [x21], #0x2\n"
+ "ldr h30, [x20], #0x2\n"
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v31.b }[2], [x22], #0x1\n"
- "ld1 { v30.b }[2], [x21], #0x1\n"
+ "ld1 { v31.b }[2], [x21], #0x1\n"
+ "ld1 { v30.b }[2], [x20], #0x1\n"
"b 23f\n"
"22:" // Oddments: 2 inputs loop: Load: Bit 3: Unset: Bit 2: Unset: Bit 1: Unset
"tbz %x[n_channels], #0, 23f\n"
- "ldr b31, [x22], #0x1\n"
- "ldr b30, [x21], #0x1\n"
+ "ldr b31, [x21], #0x1\n"
+ "ldr b30, [x20], #0x1\n"
"23:" // Oddments: 2 inputs loop: Load: Bit 3: End
"saddl v23.8h, v31.8b, v30.8b\n"
"saddl2 v22.8h, v31.16b, v30.16b\n"
- "subs x23, x23, #0x1\n"
+ "subs x22, x22, #0x1\n"
"saddw v15.4s, v15.4s, v23.4h\n"
"saddw2 v14.4s, v14.4s, v23.8h\n"
"saddw v13.4s, v13.4s, v22.4h\n"
"saddw2 v12.4s, v12.4s, v22.8h\n"
"bgt 15b\n"
"24:" // Oddments: After loop
- "ands x21, %x[n_valid_cells], #0x1\n"
+ "ands x20, %x[n_valid_cells], #0x1\n"
"beq 34f\n"
"25:" // Oddments: Single input loop
- "ldr x22, [x20], #0x8\n"
- "add x22, x22, x27\n"
+ "ldr x21, [x19], #0x8\n"
+ "add x21, x21, x26\n"
"movi v31.16b, #0x0\n"
"tbz %x[n_channels], #3, 29f\n"
- "ldr d31, [x22], #0x8\n"
+ "ldr d31, [x21], #0x8\n"
"tbz %x[n_channels], #2, 27f\n"
- "ld1 { v31.s }[2], [x22], #0x4\n"
+ "ld1 { v31.s }[2], [x21], #0x4\n"
"tbz %x[n_channels], #1, 26f\n"
- "ld1 { v31.h }[6], [x22], #0x2\n"
+ "ld1 { v31.h }[6], [x21], #0x2\n"
"tbz %x[n_channels], #0, 33f\n"
- "ld1 { v31.b }[14], [x22], #0x1\n"
+ "ld1 { v31.b }[14], [x21], #0x1\n"
"b 33f\n"
"26:" // Oddments: Single input loop: Load: Bit 3: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 33f\n"
- "ld1 { v31.b }[12], [x22], #0x1\n"
+ "ld1 { v31.b }[12], [x21], #0x1\n"
"b 33f\n"
"27:" // Oddments: Single input loop: Load: Bit 3: Bit 2: Unset
"tbz %x[n_channels], #1, 28f\n"
- "ld1 { v31.h }[4], [x22], #0x2\n"
+ "ld1 { v31.h }[4], [x21], #0x2\n"
"tbz %x[n_channels], #0, 33f\n"
- "ld1 { v31.b }[10], [x22], #0x1\n"
+ "ld1 { v31.b }[10], [x21], #0x1\n"
"b 33f\n"
"28:" // Oddments: Single input loop: Load: Bit 3: Bit 2: Unset: Bit 1: Unset
"tbz %x[n_channels], #0, 33f\n"
- "ld1 { v31.b }[8], [x22], #0x1\n"
+ "ld1 { v31.b }[8], [x21], #0x1\n"
"b 33f\n"
"29:" // Oddments: Single input loop: Load: Bit 3: Unset
"tbz %x[n_channels], #2, 31f\n"
- "ldr s31, [x22], #0x4\n"
+ "ldr s31, [x21], #0x4\n"
"tbz %x[n_channels], #1, 30f\n"
- "ld1 { v31.h }[2], [x22], #0x2\n"
+ "ld1 { v31.h }[2], [x21], #0x2\n"
"tbz %x[n_channels], #0, 33f\n"
- "ld1 { v31.b }[6], [x22], #0x1\n"
+ "ld1 { v31.b }[6], [x21], #0x1\n"
"b 33f\n"
"30:" // Oddments: Single input loop: Load: Bit 3: Unset: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 33f\n"
- "ld1 { v31.b }[4], [x22], #0x1\n"
+ "ld1 { v31.b }[4], [x21], #0x1\n"
"b 33f\n"
"31:" // Oddments: Single input loop: Load: Bit 3: Unset: Bit 2: Unset
"tbz %x[n_channels], #1, 32f\n"
- "ldr h31, [x22], #0x2\n"
+ "ldr h31, [x21], #0x2\n"
"tbz %x[n_channels], #0, 33f\n"
- "ld1 { v31.b }[2], [x22], #0x1\n"
+ "ld1 { v31.b }[2], [x21], #0x1\n"
"b 33f\n"
"32:" // Oddments: Single input loop: Load: Bit 3: Unset: Bit 2: Unset: Bit 1: Unset
"tbz %x[n_channels], #0, 33f\n"
- "ldr b31, [x22], #0x1\n"
+ "ldr b31, [x21], #0x1\n"
"33:" // Oddments: Single input loop: Load: Bit 3: End
"sxtl v23.8h, v31.8b\n"
"sxtl2 v22.8h, v31.16b\n"
- "subs x21, x21, #0x1\n"
+ "subs x20, x20, #0x1\n"
"saddw v15.4s, v15.4s, v23.4h\n"
"saddw2 v14.4s, v14.4s, v23.8h\n"
"saddw v13.4s, v13.4s, v22.4h\n"
@@ -620,7 +620,7 @@ void a64_s8_nhwc_avg_generic_depthfirst_impl(
"43:" // End
: [n_channels] "+&r" (n_channels), [outptr] "+&r" (outptr)
: [inptrs] "r" (inptrs), [n_valid_cells] "r" (n_valid_cells), [rescale_ptr] "r" (&rescale_value), [shift_ptr] "r" (&shift_value)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
index bd14408c74..1767e5ce3d 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -62,111 +62,111 @@ void a64_s8_nhwc_max_2x2_s1_output2x2_depthfirst_impl(
pad_left, pad_top, pad_right, pad_bottom);
__asm__ __volatile__(
- "ldr x16, [%x[args], %[offsetof_n_channels]]\n"
- "ldr x21, [%x[args], %[offsetof_outptrs]]\n"
- "cmp x16, #0x10\n"
- "mov x15, #0x0\n"
- "ldr x20, [%x[args], %[offsetof_inptrs]]\n"
- "ldp x14, x13, [x21, #0x0]\n"
- "mov x12, #0x0\n"
- "ldp x11, x10, [x21, #0x10]\n"
- "ldp x9, x28, [x20, #0x0]\n"
- "ldp x27, x26, [x20, #0x10]\n"
- "ldp x25, x24, [x20, #0x20]\n"
- "ldp x23, x22, [x20, #0x30]\n"
- "ldr x21, [x20, #0x40]\n"
+ "ldr x15, [%x[args], %[offsetof_n_channels]]\n"
+ "ldr x20, [%x[args], %[offsetof_outptrs]]\n"
+ "cmp x15, #0x10\n"
+ "mov x14, #0x0\n"
+ "ldr x19, [%x[args], %[offsetof_inptrs]]\n"
+ "ldp x13, x12, [x20, #0x0]\n"
+ "mov x11, #0x0\n"
+ "ldp x10, x9, [x20, #0x10]\n"
+ "ldp x28, x27, [x19, #0x0]\n"
+ "ldp x26, x25, [x19, #0x10]\n"
+ "ldp x24, x23, [x19, #0x20]\n"
+ "ldp x22, x21, [x19, #0x30]\n"
+ "ldr x20, [x19, #0x40]\n"
"blt 3f\n"
- "ldr q30, [x28, x15]\n"
- "ldr q29, [x25, x15]\n"
- "lsr x20, x16, #0x4\n"
- "sub x16, x16, x20, LSL #4\n"
- "ldr q28, [x22, x15]\n"
- "ldr q27, [x26, x15]\n"
- "subs x20, x20, #0x1\n"
- "ldr q26, [x9, x15]\n"
- "ldr q25, [x27, x15]\n"
- "ldr q24, [x24, x15]\n"
- "ldr q23, [x23, x15]\n"
- "ldr q22, [x21, x15]\n"
- "add x15, x15, #0x10\n"
+ "lsr x19, x15, #0x4\n"
+ "sub x15, x15, x19, LSL #4\n"
+ "ldr q30, [x27, x14]\n"
+ "ldr q29, [x24, x14]\n"
+ "subs x19, x19, #0x1\n"
+ "ldr q28, [x21, x14]\n"
+ "ldr q27, [x25, x14]\n"
+ "ldr q26, [x28, x14]\n"
+ "ldr q25, [x23, x14]\n"
+ "ldr q24, [x26, x14]\n"
+ "ldr q23, [x22, x14]\n"
+ "ldr q22, [x20, x14]\n"
+ "add x14, x14, #0x10\n"
"beq 2f\n"
"1:" // Vector: Loop
"smax v21.16b, v30.16b, v29.16b\n"
- "ldr q30, [x28, x15]\n"
"smax v20.16b, v29.16b, v28.16b\n"
- "ldr q29, [x25, x15]\n"
- "ldr q28, [x22, x15]\n"
+ "subs x19, x19, #0x1\n"
+ "ldr q30, [x27, x14]\n"
"smax v19.16b, v27.16b, v26.16b\n"
- "ldr q26, [x9, x15]\n"
"smax v18.16b, v25.16b, v24.16b\n"
- "ldr q25, [x27, x15]\n"
- "smax v17.16b, v27.16b, v23.16b\n"
- "ldr q27, [x26, x15]\n"
- "smax v16.16b, v24.16b, v22.16b\n"
- "ldr q24, [x24, x15]\n"
- "ldr q23, [x23, x15]\n"
- "subs x20, x20, #0x1\n"
+ "ldr q29, [x24, x14]\n"
+ "ldr q28, [x21, x14]\n"
+ "smax v17.16b, v23.16b, v27.16b\n"
+ "smax v16.16b, v25.16b, v22.16b\n"
+ "ldr q27, [x25, x14]\n"
+ "ldr q26, [x28, x14]\n"
"smax v19.16b, v21.16b, v19.16b\n"
- "ldr q22, [x21, x15]\n"
"smax v18.16b, v18.16b, v21.16b\n"
- "smax v17.16b, v17.16b, v20.16b\n"
- "add x15, x15, #0x10\n"
- "smax v16.16b, v16.16b, v20.16b\n"
- "str q19, [x14, x12]\n"
- "str q18, [x13, x12]\n"
- "str q17, [x11, x12]\n"
- "str q16, [x10, x12]\n"
- "add x12, x12, #0x10\n"
+ "ldr q25, [x23, x14]\n"
+ "ldr q24, [x26, x14]\n"
+ "smax v17.16b, v20.16b, v17.16b\n"
+ "smax v16.16b, v20.16b, v16.16b\n"
+ "ldr q23, [x22, x14]\n"
+ "ldr q22, [x20, x14]\n"
+ "add x14, x14, #0x10\n"
+ "str q19, [x13, x11]\n"
+ "str q18, [x12, x11]\n"
+ "str q17, [x10, x11]\n"
+ "str q16, [x9, x11]\n"
+ "add x11, x11, #0x10\n"
"bgt 1b\n"
"2:" // Vector: Tail
"smax v21.16b, v30.16b, v29.16b\n"
"smax v20.16b, v29.16b, v28.16b\n"
"smax v19.16b, v27.16b, v26.16b\n"
"smax v18.16b, v25.16b, v24.16b\n"
- "smax v17.16b, v27.16b, v23.16b\n"
- "smax v16.16b, v24.16b, v22.16b\n"
+ "smax v17.16b, v23.16b, v27.16b\n"
+ "smax v16.16b, v25.16b, v22.16b\n"
"smax v19.16b, v21.16b, v19.16b\n"
"smax v18.16b, v18.16b, v21.16b\n"
- "str q19, [x14, x12]\n"
- "smax v17.16b, v17.16b, v20.16b\n"
- "smax v16.16b, v16.16b, v20.16b\n"
- "str q18, [x13, x12]\n"
- "str q17, [x11, x12]\n"
- "str q16, [x10, x12]\n"
- "add x12, x12, #0x10\n"
- "cbz x16, 4f\n"
+ "str q19, [x13, x11]\n"
+ "smax v17.16b, v20.16b, v17.16b\n"
+ "smax v16.16b, v20.16b, v16.16b\n"
+ "str q18, [x12, x11]\n"
+ "str q17, [x10, x11]\n"
+ "str q16, [x9, x11]\n"
+ "add x11, x11, #0x10\n"
+ "cbz x15, 4f\n"
"3:" // Oddments
- "ldr b30, [x28, x15]\n"
- "ldr b29, [x25, x15]\n"
+ "ldr b30, [x27, x14]\n"
+ "ldr b29, [x24, x14]\n"
"smax v21.16b, v30.16b, v29.16b\n"
- "subs x16, x16, #0x1\n"
- "ldr b28, [x22, x15]\n"
- "ldr b27, [x26, x15]\n"
+ "subs x15, x15, #0x1\n"
+ "ldr b28, [x21, x14]\n"
+ "ldr b27, [x25, x14]\n"
"smax v20.16b, v29.16b, v28.16b\n"
- "ldr b26, [x9, x15]\n"
- "ldr b25, [x27, x15]\n"
+ "ldr b26, [x28, x14]\n"
+ "ldr b25, [x23, x14]\n"
"smax v19.16b, v27.16b, v26.16b\n"
"smax v19.16b, v21.16b, v19.16b\n"
- "ldr b24, [x24, x15]\n"
- "ldr b23, [x23, x15]\n"
+ "ldr b24, [x26, x14]\n"
+ "ldr b23, [x22, x14]\n"
"smax v18.16b, v25.16b, v24.16b\n"
- "smax v17.16b, v27.16b, v23.16b\n"
- "ldr b22, [x21, x15]\n"
- "smax v16.16b, v24.16b, v22.16b\n"
- "add x15, x15, #0x1\n"
+ "smax v17.16b, v23.16b, v27.16b\n"
+ "ldr b22, [x20, x14]\n"
+ "smax v16.16b, v25.16b, v22.16b\n"
+ "add x14, x14, #0x1\n"
"smax v18.16b, v18.16b, v21.16b\n"
- "smax v17.16b, v17.16b, v20.16b\n"
- "smax v16.16b, v16.16b, v20.16b\n"
- "str b19, [x14, x12]\n"
- "str b18, [x13, x12]\n"
- "str b17, [x11, x12]\n"
- "str b16, [x10, x12]\n"
- "add x12, x12, #0x1\n"
+ "smax v17.16b, v20.16b, v17.16b\n"
+ "smax v16.16b, v20.16b, v16.16b\n"
+ "str b19, [x13, x11]\n"
+ "str b18, [x12, x11]\n"
+ "str b17, [x10, x11]\n"
+ "str b16, [x9, x11]\n"
+ "add x11, x11, #0x1\n"
"bgt 3b\n"
"4:" // End
:
: [args] "r" (&args), [offsetof_inptrs] "I" (offsetof(KernelArgs, inptrs)), [offsetof_n_channels] "I" (offsetof(KernelArgs, n_channels)), [offsetof_outptrs] "I" (offsetof(KernelArgs, outptrs))
- : "cc", "memory", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8_nhwc_max_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8_nhwc_max_generic_depthfirst/generic.cpp
index 6168a57ca4..9bf313646f 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8_nhwc_max_generic_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8_nhwc_max_generic_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -41,394 +41,394 @@ void a64_s8_nhwc_max_generic_depthfirst_impl(
{
__asm__ __volatile__(
"cmp %x[n_channels], #0x40\n"
- "mov x9, #0x0\n"
- "mov x28, #0x10\n" // cntb _, ALL, #1
- "mov x27, #0x20\n" // cntb _, ALL, #2
- "mov x26, #0x30\n" // cntb _, ALL, #3
+ "mov x28, #0x0\n"
+ "mov x27, #0x10\n" // cntb _, ALL, #1
+ "mov x26, #0x20\n" // cntb _, ALL, #2
+ "mov x25, #0x30\n" // cntb _, ALL, #3
"blt 7f\n"
"1:" // 4-vectors of channels
- "lsr x25, %x[n_valid_cells], #0x2\n"
- "movi v8.16b, #0x80\n"
- "movi v7.16b, #0x80\n"
- "mov x20, %x[inptrs]\n"
+ "lsr x24, %x[n_valid_cells], #0x2\n"
"movi v6.16b, #0x80\n"
"movi v5.16b, #0x80\n"
- "cbz x25, 4f\n"
- "ldp x24, x23, [x20, #0x0]\n"
- "ldr q4, [x24, x9]\n"
- "subs x25, x25, #0x1\n"
- "ldr q3, [x23, x9]\n"
- "ldr q2, [x24, x28]\n"
- "ldr q1, [x23, x28]\n"
- "ldr q0, [x24, x27]\n"
- "ldr q31, [x23, x27]\n"
- "ldr q30, [x24, x26]\n"
- "ldr q29, [x23, x26]\n"
- "ldp x22, x21, [x20, #0x10]\n"
- "add x20, x20, #0x20\n"
- "ldr q28, [x22, x9]\n"
- "ldr q22, [x21, x9]\n"
- "ldr q27, [x22, x28]\n"
- "ldr q21, [x21, x28]\n"
- "ldr q26, [x22, x27]\n"
- "ldr q20, [x21, x27]\n"
- "ldr q25, [x22, x26]\n"
- "ldr q24, [x21, x26]\n"
+ "mov x19, %x[inptrs]\n"
+ "movi v4.16b, #0x80\n"
+ "movi v3.16b, #0x80\n"
+ "cbz x24, 4f\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "ldp x21, x20, [x19, #0x10]\n"
+ "subs x24, x24, #0x1\n"
+ "add x19, x19, #0x20\n"
+ "ldr q2, [x23, x28]\n"
+ "ldr q1, [x22, x28]\n"
+ "ldr q0, [x21, x28]\n"
+ "ldr q31, [x20, x28]\n"
+ "ldr q30, [x23, x27]\n"
+ "ldr q22, [x22, x27]\n"
+ "ldr q29, [x21, x27]\n"
+ "ldr q28, [x20, x27]\n"
+ "ldr q27, [x23, x26]\n"
+ "ldr q21, [x22, x26]\n"
+ "ldr q26, [x21, x26]\n"
+ "ldr q17, [x20, x26]\n"
+ "ldr q25, [x23, x25]\n"
+ "ldr q20, [x22, x25]\n"
+ "ldr q24, [x21, x25]\n"
+ "ldr q16, [x20, x25]\n"
"beq 3f\n"
"2:" // 4-vectors of channels: 4 inputs loop
- "smax v23.16b, v4.16b, v3.16b\n"
- "smax v19.16b, v28.16b, v22.16b\n"
- "ldp x24, x23, [x20, #0x0]\n"
- "ldr q4, [x24, x9]\n"
- "ldr q3, [x23, x9]\n"
- "smax v22.16b, v2.16b, v1.16b\n"
- "ldr q2, [x24, x28]\n"
- "smax v18.16b, v27.16b, v21.16b\n"
- "ldr q1, [x23, x28]\n"
- "smax v21.16b, v0.16b, v31.16b\n"
- "ldr q0, [x24, x27]\n"
- "smax v17.16b, v26.16b, v20.16b\n"
- "ldr q31, [x23, x27]\n"
- "smax v20.16b, v30.16b, v29.16b\n"
- "ldr q30, [x24, x26]\n"
- "smax v16.16b, v25.16b, v24.16b\n"
- "ldr q29, [x23, x26]\n"
+ "smax v23.16b, v2.16b, v1.16b\n"
+ "smax v19.16b, v0.16b, v31.16b\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "ldp x21, x20, [x19, #0x10]\n"
+ "smax v22.16b, v30.16b, v22.16b\n"
+ "smax v18.16b, v29.16b, v28.16b\n"
+ "subs x24, x24, #0x1\n"
+ "add x19, x19, #0x20\n"
+ "smax v21.16b, v27.16b, v21.16b\n"
+ "smax v17.16b, v26.16b, v17.16b\n"
+ "ldr q2, [x23, x28]\n"
+ "ldr q1, [x22, x28]\n"
+ "smax v20.16b, v25.16b, v20.16b\n"
+ "smax v16.16b, v24.16b, v16.16b\n"
+ "ldr q0, [x21, x28]\n"
+ "ldr q31, [x20, x28]\n"
"smax v19.16b, v23.16b, v19.16b\n"
"smax v18.16b, v22.16b, v18.16b\n"
- "ldp x22, x21, [x20, #0x10]\n"
- "ldr q28, [x22, x9]\n"
- "ldr q22, [x21, x9]\n"
+ "ldr q30, [x23, x27]\n"
+ "ldr q22, [x22, x27]\n"
"smax v17.16b, v21.16b, v17.16b\n"
"smax v16.16b, v20.16b, v16.16b\n"
- "ldr q27, [x22, x28]\n"
- "ldr q21, [x21, x28]\n"
- "subs x25, x25, #0x1\n"
- "smax v8.16b, v8.16b, v19.16b\n"
- "ldr q26, [x22, x27]\n"
- "ldr q20, [x21, x27]\n"
- "smax v7.16b, v7.16b, v18.16b\n"
- "smax v6.16b, v6.16b, v17.16b\n"
- "ldr q25, [x22, x26]\n"
- "ldr q24, [x21, x26]\n"
- "smax v5.16b, v5.16b, v16.16b\n"
- "add x20, x20, #0x20\n"
+ "ldr q29, [x21, x27]\n"
+ "ldr q28, [x20, x27]\n"
+ "smax v6.16b, v6.16b, v19.16b\n"
+ "smax v5.16b, v5.16b, v18.16b\n"
+ "ldr q27, [x23, x26]\n"
+ "ldr q21, [x22, x26]\n"
+ "smax v4.16b, v4.16b, v17.16b\n"
+ "smax v3.16b, v3.16b, v16.16b\n"
+ "ldr q26, [x21, x26]\n"
+ "ldr q17, [x20, x26]\n"
+ "ldr q25, [x23, x25]\n"
+ "ldr q20, [x22, x25]\n"
+ "ldr q24, [x21, x25]\n"
+ "ldr q16, [x20, x25]\n"
"bgt 2b\n"
"3:" // 4-vectors of channels: 4 inputs tail
- "smax v23.16b, v4.16b, v3.16b\n"
- "smax v19.16b, v28.16b, v22.16b\n"
- "smax v22.16b, v2.16b, v1.16b\n"
- "smax v18.16b, v27.16b, v21.16b\n"
- "smax v21.16b, v0.16b, v31.16b\n"
- "smax v17.16b, v26.16b, v20.16b\n"
- "smax v20.16b, v30.16b, v29.16b\n"
- "smax v16.16b, v25.16b, v24.16b\n"
+ "smax v23.16b, v2.16b, v1.16b\n"
+ "smax v19.16b, v0.16b, v31.16b\n"
+ "smax v22.16b, v30.16b, v22.16b\n"
+ "smax v18.16b, v29.16b, v28.16b\n"
+ "smax v21.16b, v27.16b, v21.16b\n"
+ "smax v17.16b, v26.16b, v17.16b\n"
+ "smax v20.16b, v25.16b, v20.16b\n"
+ "smax v16.16b, v24.16b, v16.16b\n"
"smax v19.16b, v23.16b, v19.16b\n"
"smax v18.16b, v22.16b, v18.16b\n"
"smax v17.16b, v21.16b, v17.16b\n"
"smax v16.16b, v20.16b, v16.16b\n"
- "smax v8.16b, v8.16b, v19.16b\n"
- "smax v7.16b, v7.16b, v18.16b\n"
- "smax v6.16b, v6.16b, v17.16b\n"
- "smax v5.16b, v5.16b, v16.16b\n"
+ "smax v6.16b, v6.16b, v19.16b\n"
+ "smax v5.16b, v5.16b, v18.16b\n"
+ "smax v4.16b, v4.16b, v17.16b\n"
+ "smax v3.16b, v3.16b, v16.16b\n"
"4:" // 4-vectors of channels: After loop
- "ands x21, %x[n_valid_cells], #0x3\n"
+ "ands x20, %x[n_valid_cells], #0x3\n"
"beq 6f\n"
"5:" // 4-vectors of channels: Single input loop
- "ldr x24, [x20], #0x8\n"
- "ldr q4, [x24, x9]\n"
- "subs x21, x21, #0x1\n"
- "smax v8.16b, v8.16b, v4.16b\n"
- "ldr q2, [x24, x28]\n"
- "ldr q0, [x24, x27]\n"
- "smax v7.16b, v7.16b, v2.16b\n"
- "smax v6.16b, v6.16b, v0.16b\n"
- "ldr q30, [x24, x26]\n"
+ "ldr x23, [x19], #0x8\n"
+ "ldr q2, [x23, x28]\n"
+ "subs x20, x20, #0x1\n"
+ "smax v6.16b, v6.16b, v2.16b\n"
+ "ldr q30, [x23, x27]\n"
+ "ldr q27, [x23, x26]\n"
"smax v5.16b, v5.16b, v30.16b\n"
+ "smax v4.16b, v4.16b, v27.16b\n"
+ "ldr q25, [x23, x25]\n"
+ "smax v3.16b, v3.16b, v25.16b\n"
"bgt 5b\n"
"6:" // 4-vectors of channels: Single input loop: End
"sub %x[n_channels], %x[n_channels], #0x40\n"
"cmp %x[n_channels], #0x40\n"
- "str q8, [%x[outptr], x9]\n"
- "str q7, [%x[outptr], x28]\n"
- "add x9, x9, #0x40\n"
+ "str q6, [%x[outptr], x28]\n"
+ "str q5, [%x[outptr], x27]\n"
"add x28, x28, #0x40\n"
- "str q6, [%x[outptr], x27]\n"
"add x27, x27, #0x40\n"
- "str q5, [%x[outptr], x26]\n"
+ "str q4, [%x[outptr], x26]\n"
"add x26, x26, #0x40\n"
+ "str q3, [%x[outptr], x25]\n"
+ "add x25, x25, #0x40\n"
"bge 1b\n"
"cbz %x[n_channels], 43f\n"
"7:" // Single vector of channels
"cmp %x[n_channels], #0x10\n"
"blt 14f\n"
"8:" // Single vector of channels: Loop
- "lsr x25, %x[n_valid_cells], #0x2\n"
- "movi v8.16b, #0x80\n"
- "mov x20, %x[inptrs]\n"
- "cbz x25, 11f\n"
- "ldp x24, x23, [x20, #0x0]\n"
- "ldr q4, [x24, x9]\n"
- "subs x25, x25, #0x1\n"
- "ldr q3, [x23, x9]\n"
- "ldp x22, x21, [x20, #0x10]\n"
- "add x20, x20, #0x20\n"
- "ldr q28, [x22, x9]\n"
- "ldr q22, [x21, x9]\n"
+ "lsr x24, %x[n_valid_cells], #0x2\n"
+ "movi v6.16b, #0x80\n"
+ "mov x19, %x[inptrs]\n"
+ "cbz x24, 11f\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "ldp x21, x20, [x19, #0x10]\n"
+ "subs x24, x24, #0x1\n"
+ "add x19, x19, #0x20\n"
+ "ldr q2, [x23, x28]\n"
+ "ldr q1, [x22, x28]\n"
+ "ldr q0, [x21, x28]\n"
+ "ldr q31, [x20, x28]\n"
"beq 10f\n"
"9:" // Single vector of channels: Loop: 4 inputs loop
- "smax v23.16b, v4.16b, v3.16b\n"
- "smax v19.16b, v28.16b, v22.16b\n"
- "ldp x24, x23, [x20, #0x0]\n"
- "ldr q4, [x24, x9]\n"
- "ldr q3, [x23, x9]\n"
+ "smax v23.16b, v2.16b, v1.16b\n"
+ "smax v19.16b, v0.16b, v31.16b\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "ldp x21, x20, [x19, #0x10]\n"
"smax v19.16b, v23.16b, v19.16b\n"
- "ldp x22, x21, [x20, #0x10]\n"
- "subs x25, x25, #0x1\n"
- "ldr q28, [x22, x9]\n"
- "ldr q22, [x21, x9]\n"
- "smax v8.16b, v8.16b, v19.16b\n"
- "add x20, x20, #0x20\n"
+ "subs x24, x24, #0x1\n"
+ "smax v6.16b, v6.16b, v19.16b\n"
+ "add x19, x19, #0x20\n"
+ "ldr q2, [x23, x28]\n"
+ "ldr q1, [x22, x28]\n"
+ "ldr q0, [x21, x28]\n"
+ "ldr q31, [x20, x28]\n"
"bgt 9b\n"
"10:" // Single vector of channels: Loop: 4 inputs tail
- "smax v23.16b, v4.16b, v3.16b\n"
- "smax v19.16b, v28.16b, v22.16b\n"
+ "smax v23.16b, v2.16b, v1.16b\n"
+ "smax v19.16b, v0.16b, v31.16b\n"
"smax v19.16b, v23.16b, v19.16b\n"
- "smax v8.16b, v8.16b, v19.16b\n"
+ "smax v6.16b, v6.16b, v19.16b\n"
"11:" // Single vector of channels: Loop: After loop
- "ands x21, %x[n_valid_cells], #0x3\n"
+ "ands x20, %x[n_valid_cells], #0x3\n"
"beq 13f\n"
"12:" // Single vector of channels: Loop: Single input loop
- "ldr x24, [x20], #0x8\n"
- "ldr q4, [x24, x9]\n"
- "subs x21, x21, #0x1\n"
- "smax v8.16b, v8.16b, v4.16b\n"
+ "ldr x23, [x19], #0x8\n"
+ "ldr q2, [x23, x28]\n"
+ "subs x20, x20, #0x1\n"
+ "smax v6.16b, v6.16b, v2.16b\n"
"bgt 12b\n"
"13:" // Single vector of channels: Loop: Single input loop: End
"sub %x[n_channels], %x[n_channels], #0x10\n"
"cmp %x[n_channels], #0x10\n"
- "str q8, [%x[outptr], x9]\n"
- "add x9, x9, #0x10\n"
+ "str q6, [%x[outptr], x28]\n"
+ "add x28, x28, #0x10\n"
"bge 8b\n"
"cbz %x[n_channels], 43f\n"
"14:" // Oddments
- "lsr x25, %x[n_valid_cells], #0x2\n"
- "add %x[outptr], %x[outptr], x9\n"
- "movi v8.16b, #0x80\n"
- "mov x20, %x[inptrs]\n"
- "cbz x25, 24f\n"
+ "lsr x24, %x[n_valid_cells], #0x2\n"
+ "add %x[outptr], %x[outptr], x28\n"
+ "movi v6.16b, #0x80\n"
+ "mov x19, %x[inptrs]\n"
+ "cbz x24, 24f\n"
"15:" // Oddments: 4 inputs loop
- "ldp x24, x23, [x20, #0x0]\n"
- "ldp x22, x21, [x20, #0x10]\n"
- "add x20, x20, #0x20\n"
- "add x24, x24, x9\n"
- "add x23, x23, x9\n"
- "add x22, x22, x9\n"
- "movi v4.16b, #0x0\n"
- "movi v3.16b, #0x0\n"
- "add x21, x21, x9\n"
- "movi v28.16b, #0x0\n"
- "movi v22.16b, #0x0\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "ldp x21, x20, [x19, #0x10]\n"
+ "add x19, x19, #0x20\n"
+ "add x23, x23, x28\n"
+ "add x22, x22, x28\n"
+ "add x21, x21, x28\n"
+ "movi v2.16b, #0x0\n"
+ "movi v1.16b, #0x0\n"
+ "add x20, x20, x28\n"
+ "movi v0.16b, #0x0\n"
+ "movi v31.16b, #0x0\n"
"tbz %x[n_channels], #3, 19f\n"
- "ldr d4, [x24], #0x8\n"
- "ldr d3, [x23], #0x8\n"
- "ldr d28, [x22], #0x8\n"
- "ldr d22, [x21], #0x8\n"
+ "ldr d2, [x23], #0x8\n"
+ "ldr d1, [x22], #0x8\n"
+ "ldr d0, [x21], #0x8\n"
+ "ldr d31, [x20], #0x8\n"
"tbz %x[n_channels], #2, 17f\n"
- "ld1 { v4.s }[2], [x24], #0x4\n"
- "ld1 { v3.s }[2], [x23], #0x4\n"
- "ld1 { v28.s }[2], [x22], #0x4\n"
- "ld1 { v22.s }[2], [x21], #0x4\n"
+ "ld1 { v2.s }[2], [x23], #0x4\n"
+ "ld1 { v1.s }[2], [x22], #0x4\n"
+ "ld1 { v0.s }[2], [x21], #0x4\n"
+ "ld1 { v31.s }[2], [x20], #0x4\n"
"tbz %x[n_channels], #1, 16f\n"
- "ld1 { v4.h }[6], [x24], #0x2\n"
- "ld1 { v3.h }[6], [x23], #0x2\n"
- "ld1 { v28.h }[6], [x22], #0x2\n"
- "ld1 { v22.h }[6], [x21], #0x2\n"
+ "ld1 { v2.h }[6], [x23], #0x2\n"
+ "ld1 { v1.h }[6], [x22], #0x2\n"
+ "ld1 { v0.h }[6], [x21], #0x2\n"
+ "ld1 { v31.h }[6], [x20], #0x2\n"
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v4.b }[14], [x24], #0x1\n"
- "ld1 { v3.b }[14], [x23], #0x1\n"
- "ld1 { v28.b }[14], [x22], #0x1\n"
- "ld1 { v22.b }[14], [x21], #0x1\n"
+ "ld1 { v2.b }[14], [x23], #0x1\n"
+ "ld1 { v1.b }[14], [x22], #0x1\n"
+ "ld1 { v0.b }[14], [x21], #0x1\n"
+ "ld1 { v31.b }[14], [x20], #0x1\n"
"b 23f\n"
"16:" // Oddments: 4 inputs loop: Load: Bit 3: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v4.b }[12], [x24], #0x1\n"
- "ld1 { v3.b }[12], [x23], #0x1\n"
- "ld1 { v28.b }[12], [x22], #0x1\n"
- "ld1 { v22.b }[12], [x21], #0x1\n"
+ "ld1 { v2.b }[12], [x23], #0x1\n"
+ "ld1 { v1.b }[12], [x22], #0x1\n"
+ "ld1 { v0.b }[12], [x21], #0x1\n"
+ "ld1 { v31.b }[12], [x20], #0x1\n"
"b 23f\n"
"17:" // Oddments: 4 inputs loop: Load: Bit 3: Bit 2: Unset
"tbz %x[n_channels], #1, 18f\n"
- "ld1 { v4.h }[4], [x24], #0x2\n"
- "ld1 { v3.h }[4], [x23], #0x2\n"
- "ld1 { v28.h }[4], [x22], #0x2\n"
- "ld1 { v22.h }[4], [x21], #0x2\n"
+ "ld1 { v2.h }[4], [x23], #0x2\n"
+ "ld1 { v1.h }[4], [x22], #0x2\n"
+ "ld1 { v0.h }[4], [x21], #0x2\n"
+ "ld1 { v31.h }[4], [x20], #0x2\n"
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v4.b }[10], [x24], #0x1\n"
- "ld1 { v3.b }[10], [x23], #0x1\n"
- "ld1 { v28.b }[10], [x22], #0x1\n"
- "ld1 { v22.b }[10], [x21], #0x1\n"
+ "ld1 { v2.b }[10], [x23], #0x1\n"
+ "ld1 { v1.b }[10], [x22], #0x1\n"
+ "ld1 { v0.b }[10], [x21], #0x1\n"
+ "ld1 { v31.b }[10], [x20], #0x1\n"
"b 23f\n"
"18:" // Oddments: 4 inputs loop: Load: Bit 3: Bit 2: Unset: Bit 1: Unset
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v4.b }[8], [x24], #0x1\n"
- "ld1 { v3.b }[8], [x23], #0x1\n"
- "ld1 { v28.b }[8], [x22], #0x1\n"
- "ld1 { v22.b }[8], [x21], #0x1\n"
+ "ld1 { v2.b }[8], [x23], #0x1\n"
+ "ld1 { v1.b }[8], [x22], #0x1\n"
+ "ld1 { v0.b }[8], [x21], #0x1\n"
+ "ld1 { v31.b }[8], [x20], #0x1\n"
"b 23f\n"
"19:" // Oddments: 4 inputs loop: Load: Bit 3: Unset
"tbz %x[n_channels], #2, 21f\n"
- "ldr s4, [x24], #0x4\n"
- "ldr s3, [x23], #0x4\n"
- "ldr s28, [x22], #0x4\n"
- "ldr s22, [x21], #0x4\n"
+ "ldr s2, [x23], #0x4\n"
+ "ldr s1, [x22], #0x4\n"
+ "ldr s0, [x21], #0x4\n"
+ "ldr s31, [x20], #0x4\n"
"tbz %x[n_channels], #1, 20f\n"
- "ld1 { v4.h }[2], [x24], #0x2\n"
- "ld1 { v3.h }[2], [x23], #0x2\n"
- "ld1 { v28.h }[2], [x22], #0x2\n"
- "ld1 { v22.h }[2], [x21], #0x2\n"
+ "ld1 { v2.h }[2], [x23], #0x2\n"
+ "ld1 { v1.h }[2], [x22], #0x2\n"
+ "ld1 { v0.h }[2], [x21], #0x2\n"
+ "ld1 { v31.h }[2], [x20], #0x2\n"
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v4.b }[6], [x24], #0x1\n"
- "ld1 { v3.b }[6], [x23], #0x1\n"
- "ld1 { v28.b }[6], [x22], #0x1\n"
- "ld1 { v22.b }[6], [x21], #0x1\n"
+ "ld1 { v2.b }[6], [x23], #0x1\n"
+ "ld1 { v1.b }[6], [x22], #0x1\n"
+ "ld1 { v0.b }[6], [x21], #0x1\n"
+ "ld1 { v31.b }[6], [x20], #0x1\n"
"b 23f\n"
"20:" // Oddments: 4 inputs loop: Load: Bit 3: Unset: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v4.b }[4], [x24], #0x1\n"
- "ld1 { v3.b }[4], [x23], #0x1\n"
- "ld1 { v28.b }[4], [x22], #0x1\n"
- "ld1 { v22.b }[4], [x21], #0x1\n"
+ "ld1 { v2.b }[4], [x23], #0x1\n"
+ "ld1 { v1.b }[4], [x22], #0x1\n"
+ "ld1 { v0.b }[4], [x21], #0x1\n"
+ "ld1 { v31.b }[4], [x20], #0x1\n"
"b 23f\n"
"21:" // Oddments: 4 inputs loop: Load: Bit 3: Unset: Bit 2: Unset
"tbz %x[n_channels], #1, 22f\n"
- "ldr h4, [x24], #0x2\n"
- "ldr h3, [x23], #0x2\n"
- "ldr h28, [x22], #0x2\n"
- "ldr h22, [x21], #0x2\n"
+ "ldr h2, [x23], #0x2\n"
+ "ldr h1, [x22], #0x2\n"
+ "ldr h0, [x21], #0x2\n"
+ "ldr h31, [x20], #0x2\n"
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v4.b }[2], [x24], #0x1\n"
- "ld1 { v3.b }[2], [x23], #0x1\n"
- "ld1 { v28.b }[2], [x22], #0x1\n"
- "ld1 { v22.b }[2], [x21], #0x1\n"
+ "ld1 { v2.b }[2], [x23], #0x1\n"
+ "ld1 { v1.b }[2], [x22], #0x1\n"
+ "ld1 { v0.b }[2], [x21], #0x1\n"
+ "ld1 { v31.b }[2], [x20], #0x1\n"
"b 23f\n"
"22:" // Oddments: 4 inputs loop: Load: Bit 3: Unset: Bit 2: Unset: Bit 1: Unset
"tbz %x[n_channels], #0, 23f\n"
- "ldr b4, [x24], #0x1\n"
- "ldr b3, [x23], #0x1\n"
- "ldr b28, [x22], #0x1\n"
- "ldr b22, [x21], #0x1\n"
+ "ldr b2, [x23], #0x1\n"
+ "ldr b1, [x22], #0x1\n"
+ "ldr b0, [x21], #0x1\n"
+ "ldr b31, [x20], #0x1\n"
"23:" // Oddments: 4 inputs loop: Load: Bit 3: End
- "smax v23.16b, v4.16b, v3.16b\n"
- "smax v19.16b, v28.16b, v22.16b\n"
- "subs x25, x25, #0x1\n"
+ "smax v23.16b, v2.16b, v1.16b\n"
+ "smax v19.16b, v0.16b, v31.16b\n"
+ "subs x24, x24, #0x1\n"
"smax v19.16b, v23.16b, v19.16b\n"
- "smax v8.16b, v8.16b, v19.16b\n"
+ "smax v6.16b, v6.16b, v19.16b\n"
"bgt 15b\n"
"24:" // Oddments: After loop
- "ands x21, %x[n_valid_cells], #0x3\n"
+ "ands x20, %x[n_valid_cells], #0x3\n"
"beq 34f\n"
"25:" // Oddments: Single input loop
- "ldr x24, [x20], #0x8\n"
- "add x24, x24, x9\n"
- "movi v4.16b, #0x0\n"
+ "ldr x23, [x19], #0x8\n"
+ "add x23, x23, x28\n"
+ "movi v2.16b, #0x0\n"
"tbz %x[n_channels], #3, 29f\n"
- "ldr d4, [x24], #0x8\n"
+ "ldr d2, [x23], #0x8\n"
"tbz %x[n_channels], #2, 27f\n"
- "ld1 { v4.s }[2], [x24], #0x4\n"
+ "ld1 { v2.s }[2], [x23], #0x4\n"
"tbz %x[n_channels], #1, 26f\n"
- "ld1 { v4.h }[6], [x24], #0x2\n"
+ "ld1 { v2.h }[6], [x23], #0x2\n"
"tbz %x[n_channels], #0, 33f\n"
- "ld1 { v4.b }[14], [x24], #0x1\n"
+ "ld1 { v2.b }[14], [x23], #0x1\n"
"b 33f\n"
"26:" // Oddments: Single input loop: Load: Bit 3: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 33f\n"
- "ld1 { v4.b }[12], [x24], #0x1\n"
+ "ld1 { v2.b }[12], [x23], #0x1\n"
"b 33f\n"
"27:" // Oddments: Single input loop: Load: Bit 3: Bit 2: Unset
"tbz %x[n_channels], #1, 28f\n"
- "ld1 { v4.h }[4], [x24], #0x2\n"
+ "ld1 { v2.h }[4], [x23], #0x2\n"
"tbz %x[n_channels], #0, 33f\n"
- "ld1 { v4.b }[10], [x24], #0x1\n"
+ "ld1 { v2.b }[10], [x23], #0x1\n"
"b 33f\n"
"28:" // Oddments: Single input loop: Load: Bit 3: Bit 2: Unset: Bit 1: Unset
"tbz %x[n_channels], #0, 33f\n"
- "ld1 { v4.b }[8], [x24], #0x1\n"
+ "ld1 { v2.b }[8], [x23], #0x1\n"
"b 33f\n"
"29:" // Oddments: Single input loop: Load: Bit 3: Unset
"tbz %x[n_channels], #2, 31f\n"
- "ldr s4, [x24], #0x4\n"
+ "ldr s2, [x23], #0x4\n"
"tbz %x[n_channels], #1, 30f\n"
- "ld1 { v4.h }[2], [x24], #0x2\n"
+ "ld1 { v2.h }[2], [x23], #0x2\n"
"tbz %x[n_channels], #0, 33f\n"
- "ld1 { v4.b }[6], [x24], #0x1\n"
+ "ld1 { v2.b }[6], [x23], #0x1\n"
"b 33f\n"
"30:" // Oddments: Single input loop: Load: Bit 3: Unset: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 33f\n"
- "ld1 { v4.b }[4], [x24], #0x1\n"
+ "ld1 { v2.b }[4], [x23], #0x1\n"
"b 33f\n"
"31:" // Oddments: Single input loop: Load: Bit 3: Unset: Bit 2: Unset
"tbz %x[n_channels], #1, 32f\n"
- "ldr h4, [x24], #0x2\n"
+ "ldr h2, [x23], #0x2\n"
"tbz %x[n_channels], #0, 33f\n"
- "ld1 { v4.b }[2], [x24], #0x1\n"
+ "ld1 { v2.b }[2], [x23], #0x1\n"
"b 33f\n"
"32:" // Oddments: Single input loop: Load: Bit 3: Unset: Bit 2: Unset: Bit 1: Unset
"tbz %x[n_channels], #0, 33f\n"
- "ldr b4, [x24], #0x1\n"
+ "ldr b2, [x23], #0x1\n"
"33:" // Oddments: Single input loop: Load: Bit 3: End
- "subs x21, x21, #0x1\n"
- "smax v8.16b, v8.16b, v4.16b\n"
+ "subs x20, x20, #0x1\n"
+ "smax v6.16b, v6.16b, v2.16b\n"
"bgt 25b\n"
"34:" // Oddments: Single input loop: End
"tbz %x[n_channels], #3, 38f\n"
- "st1 { v8.d }[0], [%x[outptr]], #0x8\n"
+ "st1 { v6.d }[0], [%x[outptr]], #0x8\n"
"tbz %x[n_channels], #2, 36f\n"
- "st1 { v8.s }[2], [%x[outptr]], #0x4\n"
+ "st1 { v6.s }[2], [%x[outptr]], #0x4\n"
"tbz %x[n_channels], #1, 35f\n"
- "st1 { v8.h }[6], [%x[outptr]], #0x2\n"
+ "st1 { v6.h }[6], [%x[outptr]], #0x2\n"
"tbz %x[n_channels], #0, 42f\n"
- "st1 { v8.b }[14], [%x[outptr]], #0x1\n"
+ "st1 { v6.b }[14], [%x[outptr]], #0x1\n"
"b 42f\n"
"35:" // Oddments: Store: Bit 3: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 42f\n"
- "st1 { v8.b }[12], [%x[outptr]], #0x1\n"
+ "st1 { v6.b }[12], [%x[outptr]], #0x1\n"
"b 42f\n"
"36:" // Oddments: Store: Bit 3: Bit 2: Unset
"tbz %x[n_channels], #1, 37f\n"
- "st1 { v8.h }[4], [%x[outptr]], #0x2\n"
+ "st1 { v6.h }[4], [%x[outptr]], #0x2\n"
"tbz %x[n_channels], #0, 42f\n"
- "st1 { v8.b }[10], [%x[outptr]], #0x1\n"
+ "st1 { v6.b }[10], [%x[outptr]], #0x1\n"
"b 42f\n"
"37:" // Oddments: Store: Bit 3: Bit 2: Unset: Bit 1: Unset
"tbz %x[n_channels], #0, 42f\n"
- "st1 { v8.b }[8], [%x[outptr]], #0x1\n"
+ "st1 { v6.b }[8], [%x[outptr]], #0x1\n"
"b 42f\n"
"38:" // Oddments: Store: Bit 3: Unset
"tbz %x[n_channels], #2, 40f\n"
- "st1 { v8.s }[0], [%x[outptr]], #0x4\n"
+ "st1 { v6.s }[0], [%x[outptr]], #0x4\n"
"tbz %x[n_channels], #1, 39f\n"
- "st1 { v8.h }[2], [%x[outptr]], #0x2\n"
+ "st1 { v6.h }[2], [%x[outptr]], #0x2\n"
"tbz %x[n_channels], #0, 42f\n"
- "st1 { v8.b }[6], [%x[outptr]], #0x1\n"
+ "st1 { v6.b }[6], [%x[outptr]], #0x1\n"
"b 42f\n"
"39:" // Oddments: Store: Bit 3: Unset: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 42f\n"
- "st1 { v8.b }[4], [%x[outptr]], #0x1\n"
+ "st1 { v6.b }[4], [%x[outptr]], #0x1\n"
"b 42f\n"
"40:" // Oddments: Store: Bit 3: Unset: Bit 2: Unset
"tbz %x[n_channels], #1, 41f\n"
- "st1 { v8.h }[0], [%x[outptr]], #0x2\n"
+ "st1 { v6.h }[0], [%x[outptr]], #0x2\n"
"tbz %x[n_channels], #0, 42f\n"
- "st1 { v8.b }[2], [%x[outptr]], #0x1\n"
+ "st1 { v6.b }[2], [%x[outptr]], #0x1\n"
"b 42f\n"
"41:" // Oddments: Store: Bit 3: Unset: Bit 2: Unset: Bit 1: Unset
"tbz %x[n_channels], #0, 42f\n"
- "st1 { v8.b }[0], [%x[outptr]], #0x1\n"
+ "st1 { v6.b }[0], [%x[outptr]], #0x1\n"
"42:" // Oddments: Store: Bit 3: End
"43:" // End
: [n_channels] "+&r" (n_channels), [outptr] "+&r" (outptr)
: [inptrs] "r" (inptrs), [n_valid_cells] "r" (n_valid_cells)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8q_nhwc_avg_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8q_nhwc_avg_generic_depthfirst/generic.cpp
index e889782fa3..a2487b0592 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8q_nhwc_avg_generic_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8q_nhwc_avg_generic_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -115,16 +115,16 @@ void a64_s8q_nhwc_avg_generic_depthfirst_impl(
__asm__ __volatile__(
"cmp %x[n_channels], #0x40\n"
- "mov x27, #0x0\n"
- "mov x26, #0x10\n" // cntb _, ALL, #1
- "mov x25, #0x20\n" // cntb _, ALL, #2
- "mov x24, #0x30\n" // cntb _, ALL, #3
+ "mov x26, #0x0\n"
+ "mov x25, #0x10\n" // cntb _, ALL, #1
+ "mov x24, #0x20\n" // cntb _, ALL, #2
+ "mov x23, #0x30\n" // cntb _, ALL, #3
"blt 7f\n"
"1:" // 4-vectors of channels
- "lsr x23, %x[n_valid_cells], #0x1\n"
+ "lsr x22, %x[n_valid_cells], #0x1\n"
"movi v15.4s, #0x0\n"
"movi v14.4s, #0x0\n"
- "mov x20, %x[inptrs]\n"
+ "mov x19, %x[inptrs]\n"
"movi v13.4s, #0x0\n"
"movi v12.4s, #0x0\n"
"movi v11.4s, #0x0\n"
@@ -139,43 +139,43 @@ void a64_s8q_nhwc_avg_generic_depthfirst_impl(
"movi v2.4s, #0x0\n"
"movi v1.4s, #0x0\n"
"movi v0.4s, #0x0\n"
- "cbz x23, 4f\n"
- "ldp x22, x21, [x20, #0x0]\n"
- "ldr q31, [x22, x27]\n"
- "subs x23, x23, #0x1\n"
- "add x20, x20, #0x10\n"
- "ldr q30, [x21, x27]\n"
- "ldr q29, [x22, x26]\n"
- "ldr q28, [x21, x26]\n"
- "ldr q27, [x22, x25]\n"
- "ldr q26, [x21, x25]\n"
- "ldr q25, [x22, x24]\n"
- "ldr q24, [x21, x24]\n"
+ "cbz x22, 4f\n"
+ "ldp x21, x20, [x19, #0x0]\n"
+ "subs x22, x22, #0x1\n"
+ "add x19, x19, #0x10\n"
+ "ldr q31, [x21, x26]\n"
+ "ldr q30, [x20, x26]\n"
+ "ldr q29, [x21, x25]\n"
+ "ldr q28, [x20, x25]\n"
+ "ldr q27, [x21, x24]\n"
+ "ldr q26, [x20, x24]\n"
+ "ldr q25, [x21, x23]\n"
+ "ldr q24, [x20, x23]\n"
"beq 3f\n"
"2:" // 4-vectors of channels: 2 inputs loop
"saddl v23.8h, v31.8b, v30.8b\n"
"saddl2 v22.8h, v31.16b, v30.16b\n"
- "ldp x22, x21, [x20, #0x0]\n"
- "ldr q31, [x22, x27]\n"
- "ldr q30, [x21, x27]\n"
+ "ldp x21, x20, [x19, #0x0]\n"
+ "subs x22, x22, #0x1\n"
"saddl v21.8h, v29.8b, v28.8b\n"
"saddl2 v20.8h, v29.16b, v28.16b\n"
- "ldr q29, [x22, x26]\n"
- "ldr q28, [x21, x26]\n"
+ "add x19, x19, #0x10\n"
+ "ldr q31, [x21, x26]\n"
"saddl v19.8h, v27.8b, v26.8b\n"
"saddl2 v18.8h, v27.16b, v26.16b\n"
- "ldr q27, [x22, x25]\n"
- "ldr q26, [x21, x25]\n"
- "subs x23, x23, #0x1\n"
- "saddw v15.4s, v15.4s, v23.4h\n"
- "saddw2 v14.4s, v14.4s, v23.8h\n"
+ "ldr q30, [x20, x26]\n"
+ "ldr q29, [x21, x25]\n"
"saddl v17.8h, v25.8b, v24.8b\n"
"saddl2 v16.8h, v25.16b, v24.16b\n"
- "ldr q25, [x22, x24]\n"
- "add x20, x20, #0x10\n"
+ "ldr q28, [x20, x25]\n"
+ "ldr q27, [x21, x24]\n"
+ "saddw v15.4s, v15.4s, v23.4h\n"
+ "saddw2 v14.4s, v14.4s, v23.8h\n"
+ "ldr q26, [x20, x24]\n"
+ "ldr q25, [x21, x23]\n"
"saddw v13.4s, v13.4s, v22.4h\n"
"saddw2 v12.4s, v12.4s, v22.8h\n"
- "ldr q24, [x21, x24]\n"
+ "ldr q24, [x20, x23]\n"
"saddw v11.4s, v11.4s, v21.4h\n"
"saddw2 v10.4s, v10.4s, v21.8h\n"
"saddw v9.4s, v9.4s, v20.4h\n"
@@ -215,21 +215,21 @@ void a64_s8q_nhwc_avg_generic_depthfirst_impl(
"saddw v1.4s, v1.4s, v16.4h\n"
"saddw2 v0.4s, v0.4s, v16.8h\n"
"4:" // 4-vectors of channels: After loop
- "ands x21, %x[n_valid_cells], #0x1\n"
+ "ands x20, %x[n_valid_cells], #0x1\n"
"beq 6f\n"
"5:" // 4-vectors of channels: Single input loop
- "ldr x22, [x20], #0x8\n"
- "ldr q31, [x22, x27]\n"
+ "ldr x21, [x19], #0x8\n"
+ "ldr q31, [x21, x26]\n"
"sxtl v23.8h, v31.8b\n"
"sxtl2 v22.8h, v31.16b\n"
- "ldr q29, [x22, x26]\n"
- "ldr q27, [x22, x25]\n"
+ "ldr q29, [x21, x25]\n"
+ "ldr q27, [x21, x24]\n"
"sxtl v21.8h, v29.8b\n"
"sxtl2 v20.8h, v29.16b\n"
- "ldr q25, [x22, x24]\n"
+ "ldr q25, [x21, x23]\n"
"sxtl v19.8h, v27.8b\n"
"sxtl2 v18.8h, v27.16b\n"
- "subs x21, x21, #0x1\n"
+ "subs x20, x20, #0x1\n"
"sxtl v17.8h, v25.8b\n"
"sxtl2 v16.8h, v25.16b\n"
"saddw v15.4s, v15.4s, v23.4h\n"
@@ -254,9 +254,9 @@ void a64_s8q_nhwc_avg_generic_depthfirst_impl(
"ld1r { v17.4s }, [%x[combined_rescale_value]]\n"
"srshl v15.4s, v15.4s, v18.4s\n"
"srshl v14.4s, v14.4s, v18.4s\n"
- "ld1r { v16.4s }, [%x[right_shift]]\n"
"srshl v13.4s, v13.4s, v18.4s\n"
"srshl v12.4s, v12.4s, v18.4s\n"
+ "ld1r { v16.4s }, [%x[right_shift]]\n"
"sub %x[n_channels], %x[n_channels], #0x40\n"
"srshl v11.4s, v11.4s, v18.4s\n"
"srshl v10.4s, v10.4s, v18.4s\n"
@@ -347,47 +347,47 @@ void a64_s8q_nhwc_avg_generic_depthfirst_impl(
"uzp1 v19.16b, v1.16b, v0.16b\n"
"uzp1 v16.16b, v23.16b, v16.16b\n"
"uzp1 v18.16b, v22.16b, v18.16b\n"
- "str q16, [%x[outptr], x27]\n"
- "add x27, x27, #0x40\n"
+ "str q16, [%x[outptr], x26]\n"
+ "add x26, x26, #0x40\n"
"uzp1 v17.16b, v21.16b, v17.16b\n"
"uzp1 v16.16b, v20.16b, v19.16b\n"
- "str q18, [%x[outptr], x26]\n"
- "add x26, x26, #0x40\n"
- "str q17, [%x[outptr], x25]\n"
+ "str q18, [%x[outptr], x25]\n"
"add x25, x25, #0x40\n"
- "str q16, [%x[outptr], x24]\n"
+ "str q17, [%x[outptr], x24]\n"
"add x24, x24, #0x40\n"
+ "str q16, [%x[outptr], x23]\n"
+ "add x23, x23, #0x40\n"
"bge 1b\n"
"cbz %x[n_channels], 43f\n"
"7:" // Single vector of channels
"cmp %x[n_channels], #0x10\n"
"blt 14f\n"
"8:" // Single vector of channels: Loop
- "lsr x23, %x[n_valid_cells], #0x1\n"
+ "lsr x22, %x[n_valid_cells], #0x1\n"
"movi v15.4s, #0x0\n"
"movi v14.4s, #0x0\n"
- "mov x20, %x[inptrs]\n"
+ "mov x19, %x[inptrs]\n"
"movi v13.4s, #0x0\n"
"movi v12.4s, #0x0\n"
- "cbz x23, 11f\n"
- "ldp x22, x21, [x20, #0x0]\n"
- "ldr q31, [x22, x27]\n"
- "subs x23, x23, #0x1\n"
- "add x20, x20, #0x10\n"
- "ldr q30, [x21, x27]\n"
+ "cbz x22, 11f\n"
+ "ldp x21, x20, [x19, #0x0]\n"
+ "subs x22, x22, #0x1\n"
+ "add x19, x19, #0x10\n"
+ "ldr q31, [x21, x26]\n"
+ "ldr q30, [x20, x26]\n"
"beq 10f\n"
"9:" // Single vector of channels: Loop: 2 inputs loop
"saddl v23.8h, v31.8b, v30.8b\n"
"saddl2 v22.8h, v31.16b, v30.16b\n"
- "ldp x22, x21, [x20, #0x0]\n"
- "ldr q31, [x22, x27]\n"
- "ldr q30, [x21, x27]\n"
- "subs x23, x23, #0x1\n"
+ "ldp x21, x20, [x19, #0x0]\n"
+ "subs x22, x22, #0x1\n"
"saddw v15.4s, v15.4s, v23.4h\n"
"saddw2 v14.4s, v14.4s, v23.8h\n"
+ "add x19, x19, #0x10\n"
+ "ldr q31, [x21, x26]\n"
"saddw v13.4s, v13.4s, v22.4h\n"
"saddw2 v12.4s, v12.4s, v22.8h\n"
- "add x20, x20, #0x10\n"
+ "ldr q30, [x20, x26]\n"
"bgt 9b\n"
"10:" // Single vector of channels: Loop: 2 inputs tail
"saddl v23.8h, v31.8b, v30.8b\n"
@@ -397,14 +397,14 @@ void a64_s8q_nhwc_avg_generic_depthfirst_impl(
"saddw v13.4s, v13.4s, v22.4h\n"
"saddw2 v12.4s, v12.4s, v22.8h\n"
"11:" // Single vector of channels: Loop: After loop
- "ands x21, %x[n_valid_cells], #0x1\n"
+ "ands x20, %x[n_valid_cells], #0x1\n"
"beq 13f\n"
"12:" // Single vector of channels: Loop: Single input loop
- "ldr x22, [x20], #0x8\n"
- "ldr q31, [x22, x27]\n"
+ "ldr x21, [x19], #0x8\n"
+ "ldr q31, [x21, x26]\n"
"sxtl v23.8h, v31.8b\n"
"sxtl2 v22.8h, v31.16b\n"
- "subs x21, x21, #0x1\n"
+ "subs x20, x20, #0x1\n"
"saddw v15.4s, v15.4s, v23.4h\n"
"saddw2 v14.4s, v14.4s, v23.8h\n"
"saddw v13.4s, v13.4s, v22.4h\n"
@@ -415,9 +415,9 @@ void a64_s8q_nhwc_avg_generic_depthfirst_impl(
"ld1r { v17.4s }, [%x[combined_rescale_value]]\n"
"srshl v15.4s, v15.4s, v18.4s\n"
"srshl v14.4s, v14.4s, v18.4s\n"
- "ld1r { v16.4s }, [%x[right_shift]]\n"
"srshl v13.4s, v13.4s, v18.4s\n"
"srshl v12.4s, v12.4s, v18.4s\n"
+ "ld1r { v16.4s }, [%x[right_shift]]\n"
"sub %x[n_channels], %x[n_channels], #0x10\n"
"sqrdmulh v15.4s, v15.4s, v17.4s\n"
"sqrdmulh v14.4s, v14.4s, v17.4s\n"
@@ -441,149 +441,149 @@ void a64_s8q_nhwc_avg_generic_depthfirst_impl(
"uzp1 v23.16b, v15.16b, v14.16b\n"
"uzp1 v16.16b, v13.16b, v12.16b\n"
"uzp1 v16.16b, v23.16b, v16.16b\n"
- "str q16, [%x[outptr], x27]\n"
- "add x27, x27, #0x10\n"
+ "str q16, [%x[outptr], x26]\n"
+ "add x26, x26, #0x10\n"
"bge 8b\n"
"cbz %x[n_channels], 43f\n"
"14:" // Oddments
- "lsr x23, %x[n_valid_cells], #0x1\n"
- "add %x[outptr], %x[outptr], x27\n"
+ "lsr x22, %x[n_valid_cells], #0x1\n"
+ "add %x[outptr], %x[outptr], x26\n"
"movi v15.4s, #0x0\n"
"movi v14.4s, #0x0\n"
"movi v13.4s, #0x0\n"
"movi v12.4s, #0x0\n"
- "mov x20, %x[inptrs]\n"
- "cbz x23, 24f\n"
+ "mov x19, %x[inptrs]\n"
+ "cbz x22, 24f\n"
"15:" // Oddments: 2 inputs loop
- "ldp x22, x21, [x20, #0x0]\n"
- "add x20, x20, #0x10\n"
- "add x22, x22, x27\n"
+ "ldp x21, x20, [x19, #0x0]\n"
+ "add x19, x19, #0x10\n"
+ "add x21, x21, x26\n"
"movi v31.16b, #0x0\n"
- "add x21, x21, x27\n"
+ "add x20, x20, x26\n"
"movi v30.16b, #0x0\n"
"tbz %x[n_channels], #3, 19f\n"
- "ldr d31, [x22], #0x8\n"
- "ldr d30, [x21], #0x8\n"
+ "ldr d31, [x21], #0x8\n"
+ "ldr d30, [x20], #0x8\n"
"tbz %x[n_channels], #2, 17f\n"
- "ld1 { v31.s }[2], [x22], #0x4\n"
- "ld1 { v30.s }[2], [x21], #0x4\n"
+ "ld1 { v31.s }[2], [x21], #0x4\n"
+ "ld1 { v30.s }[2], [x20], #0x4\n"
"tbz %x[n_channels], #1, 16f\n"
- "ld1 { v31.h }[6], [x22], #0x2\n"
- "ld1 { v30.h }[6], [x21], #0x2\n"
+ "ld1 { v31.h }[6], [x21], #0x2\n"
+ "ld1 { v30.h }[6], [x20], #0x2\n"
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v31.b }[14], [x22], #0x1\n"
- "ld1 { v30.b }[14], [x21], #0x1\n"
+ "ld1 { v31.b }[14], [x21], #0x1\n"
+ "ld1 { v30.b }[14], [x20], #0x1\n"
"b 23f\n"
"16:" // Oddments: 2 inputs loop: Load: Bit 3: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v31.b }[12], [x22], #0x1\n"
- "ld1 { v30.b }[12], [x21], #0x1\n"
+ "ld1 { v31.b }[12], [x21], #0x1\n"
+ "ld1 { v30.b }[12], [x20], #0x1\n"
"b 23f\n"
"17:" // Oddments: 2 inputs loop: Load: Bit 3: Bit 2: Unset
"tbz %x[n_channels], #1, 18f\n"
- "ld1 { v31.h }[4], [x22], #0x2\n"
- "ld1 { v30.h }[4], [x21], #0x2\n"
+ "ld1 { v31.h }[4], [x21], #0x2\n"
+ "ld1 { v30.h }[4], [x20], #0x2\n"
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v31.b }[10], [x22], #0x1\n"
- "ld1 { v30.b }[10], [x21], #0x1\n"
+ "ld1 { v31.b }[10], [x21], #0x1\n"
+ "ld1 { v30.b }[10], [x20], #0x1\n"
"b 23f\n"
"18:" // Oddments: 2 inputs loop: Load: Bit 3: Bit 2: Unset: Bit 1: Unset
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v31.b }[8], [x22], #0x1\n"
- "ld1 { v30.b }[8], [x21], #0x1\n"
+ "ld1 { v31.b }[8], [x21], #0x1\n"
+ "ld1 { v30.b }[8], [x20], #0x1\n"
"b 23f\n"
"19:" // Oddments: 2 inputs loop: Load: Bit 3: Unset
"tbz %x[n_channels], #2, 21f\n"
- "ldr s31, [x22], #0x4\n"
- "ldr s30, [x21], #0x4\n"
+ "ldr s31, [x21], #0x4\n"
+ "ldr s30, [x20], #0x4\n"
"tbz %x[n_channels], #1, 20f\n"
- "ld1 { v31.h }[2], [x22], #0x2\n"
- "ld1 { v30.h }[2], [x21], #0x2\n"
+ "ld1 { v31.h }[2], [x21], #0x2\n"
+ "ld1 { v30.h }[2], [x20], #0x2\n"
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v31.b }[6], [x22], #0x1\n"
- "ld1 { v30.b }[6], [x21], #0x1\n"
+ "ld1 { v31.b }[6], [x21], #0x1\n"
+ "ld1 { v30.b }[6], [x20], #0x1\n"
"b 23f\n"
"20:" // Oddments: 2 inputs loop: Load: Bit 3: Unset: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v31.b }[4], [x22], #0x1\n"
- "ld1 { v30.b }[4], [x21], #0x1\n"
+ "ld1 { v31.b }[4], [x21], #0x1\n"
+ "ld1 { v30.b }[4], [x20], #0x1\n"
"b 23f\n"
"21:" // Oddments: 2 inputs loop: Load: Bit 3: Unset: Bit 2: Unset
"tbz %x[n_channels], #1, 22f\n"
- "ldr h31, [x22], #0x2\n"
- "ldr h30, [x21], #0x2\n"
+ "ldr h31, [x21], #0x2\n"
+ "ldr h30, [x20], #0x2\n"
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v31.b }[2], [x22], #0x1\n"
- "ld1 { v30.b }[2], [x21], #0x1\n"
+ "ld1 { v31.b }[2], [x21], #0x1\n"
+ "ld1 { v30.b }[2], [x20], #0x1\n"
"b 23f\n"
"22:" // Oddments: 2 inputs loop: Load: Bit 3: Unset: Bit 2: Unset: Bit 1: Unset
"tbz %x[n_channels], #0, 23f\n"
- "ldr b31, [x22], #0x1\n"
- "ldr b30, [x21], #0x1\n"
+ "ldr b31, [x21], #0x1\n"
+ "ldr b30, [x20], #0x1\n"
"23:" // Oddments: 2 inputs loop: Load: Bit 3: End
"saddl v23.8h, v31.8b, v30.8b\n"
"saddl2 v22.8h, v31.16b, v30.16b\n"
- "subs x23, x23, #0x1\n"
+ "subs x22, x22, #0x1\n"
"saddw v15.4s, v15.4s, v23.4h\n"
"saddw2 v14.4s, v14.4s, v23.8h\n"
"saddw v13.4s, v13.4s, v22.4h\n"
"saddw2 v12.4s, v12.4s, v22.8h\n"
"bgt 15b\n"
"24:" // Oddments: After loop
- "ands x21, %x[n_valid_cells], #0x1\n"
+ "ands x20, %x[n_valid_cells], #0x1\n"
"beq 34f\n"
"25:" // Oddments: Single input loop
- "ldr x22, [x20], #0x8\n"
- "add x22, x22, x27\n"
+ "ldr x21, [x19], #0x8\n"
+ "add x21, x21, x26\n"
"movi v31.16b, #0x0\n"
"tbz %x[n_channels], #3, 29f\n"
- "ldr d31, [x22], #0x8\n"
+ "ldr d31, [x21], #0x8\n"
"tbz %x[n_channels], #2, 27f\n"
- "ld1 { v31.s }[2], [x22], #0x4\n"
+ "ld1 { v31.s }[2], [x21], #0x4\n"
"tbz %x[n_channels], #1, 26f\n"
- "ld1 { v31.h }[6], [x22], #0x2\n"
+ "ld1 { v31.h }[6], [x21], #0x2\n"
"tbz %x[n_channels], #0, 33f\n"
- "ld1 { v31.b }[14], [x22], #0x1\n"
+ "ld1 { v31.b }[14], [x21], #0x1\n"
"b 33f\n"
"26:" // Oddments: Single input loop: Load: Bit 3: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 33f\n"
- "ld1 { v31.b }[12], [x22], #0x1\n"
+ "ld1 { v31.b }[12], [x21], #0x1\n"
"b 33f\n"
"27:" // Oddments: Single input loop: Load: Bit 3: Bit 2: Unset
"tbz %x[n_channels], #1, 28f\n"
- "ld1 { v31.h }[4], [x22], #0x2\n"
+ "ld1 { v31.h }[4], [x21], #0x2\n"
"tbz %x[n_channels], #0, 33f\n"
- "ld1 { v31.b }[10], [x22], #0x1\n"
+ "ld1 { v31.b }[10], [x21], #0x1\n"
"b 33f\n"
"28:" // Oddments: Single input loop: Load: Bit 3: Bit 2: Unset: Bit 1: Unset
"tbz %x[n_channels], #0, 33f\n"
- "ld1 { v31.b }[8], [x22], #0x1\n"
+ "ld1 { v31.b }[8], [x21], #0x1\n"
"b 33f\n"
"29:" // Oddments: Single input loop: Load: Bit 3: Unset
"tbz %x[n_channels], #2, 31f\n"
- "ldr s31, [x22], #0x4\n"
+ "ldr s31, [x21], #0x4\n"
"tbz %x[n_channels], #1, 30f\n"
- "ld1 { v31.h }[2], [x22], #0x2\n"
+ "ld1 { v31.h }[2], [x21], #0x2\n"
"tbz %x[n_channels], #0, 33f\n"
- "ld1 { v31.b }[6], [x22], #0x1\n"
+ "ld1 { v31.b }[6], [x21], #0x1\n"
"b 33f\n"
"30:" // Oddments: Single input loop: Load: Bit 3: Unset: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 33f\n"
- "ld1 { v31.b }[4], [x22], #0x1\n"
+ "ld1 { v31.b }[4], [x21], #0x1\n"
"b 33f\n"
"31:" // Oddments: Single input loop: Load: Bit 3: Unset: Bit 2: Unset
"tbz %x[n_channels], #1, 32f\n"
- "ldr h31, [x22], #0x2\n"
+ "ldr h31, [x21], #0x2\n"
"tbz %x[n_channels], #0, 33f\n"
- "ld1 { v31.b }[2], [x22], #0x1\n"
+ "ld1 { v31.b }[2], [x21], #0x1\n"
"b 33f\n"
"32:" // Oddments: Single input loop: Load: Bit 3: Unset: Bit 2: Unset: Bit 1: Unset
"tbz %x[n_channels], #0, 33f\n"
- "ldr b31, [x22], #0x1\n"
+ "ldr b31, [x21], #0x1\n"
"33:" // Oddments: Single input loop: Load: Bit 3: End
"sxtl v23.8h, v31.8b\n"
"sxtl2 v22.8h, v31.16b\n"
- "subs x21, x21, #0x1\n"
+ "subs x20, x20, #0x1\n"
"saddw v15.4s, v15.4s, v23.4h\n"
"saddw2 v14.4s, v14.4s, v23.8h\n"
"saddw v13.4s, v13.4s, v22.4h\n"
@@ -594,9 +594,9 @@ void a64_s8q_nhwc_avg_generic_depthfirst_impl(
"ld1r { v17.4s }, [%x[combined_rescale_value]]\n"
"srshl v15.4s, v15.4s, v18.4s\n"
"srshl v14.4s, v14.4s, v18.4s\n"
- "ld1r { v16.4s }, [%x[right_shift]]\n"
"srshl v13.4s, v13.4s, v18.4s\n"
"srshl v12.4s, v12.4s, v18.4s\n"
+ "ld1r { v16.4s }, [%x[right_shift]]\n"
"sqrdmulh v15.4s, v15.4s, v17.4s\n"
"sqrdmulh v14.4s, v14.4s, v17.4s\n"
"sqrdmulh v13.4s, v13.4s, v17.4s\n"
@@ -666,7 +666,7 @@ void a64_s8q_nhwc_avg_generic_depthfirst_impl(
"43:" // End
: [n_channels] "+&r" (n_channels), [outptr] "+&r" (outptr)
: [combined_rescale_value] "r" (&combined_rescale_value), [inptrs] "r" (inptrs), [left_shift] "r" (&left_shift), [n_valid_cells] "r" (n_valid_cells), [right_shift] "r" (&right_shift)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8q_nhwc_max_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8q_nhwc_max_generic_depthfirst/generic.cpp
index 90a31ec677..5a6cfb4711 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8q_nhwc_max_generic_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8q_nhwc_max_generic_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -42,123 +42,123 @@ void a64_s8q_nhwc_max_generic_depthfirst_impl(
{
__asm__ __volatile__(
"cmp %x[n_channels], #0x40\n"
- "mov x9, #0x0\n"
- "mov x28, #0x10\n" // cntb _, ALL, #1
- "mov x27, #0x20\n" // cntb _, ALL, #2
- "mov x26, #0x30\n" // cntb _, ALL, #3
+ "mov x28, #0x0\n"
+ "mov x27, #0x10\n" // cntb _, ALL, #1
+ "mov x26, #0x20\n" // cntb _, ALL, #2
+ "mov x25, #0x30\n" // cntb _, ALL, #3
"blt 7f\n"
"1:" // 4-vectors of channels
- "lsr x25, %x[n_valid_cells], #0x2\n"
- "movi v8.16b, #0x80\n"
- "movi v7.16b, #0x80\n"
- "mov x20, %x[inptrs]\n"
+ "lsr x24, %x[n_valid_cells], #0x2\n"
+ "movi v4.16b, #0x80\n"
+ "movi v3.16b, #0x80\n"
+ "mov x19, %x[inptrs]\n"
"movi v6.16b, #0x80\n"
"movi v5.16b, #0x80\n"
- "cbz x25, 4f\n"
- "ldp x24, x23, [x20, #0x0]\n"
- "ldr q4, [x24, x9]\n"
- "subs x25, x25, #0x1\n"
- "ldr q3, [x23, x9]\n"
- "ldr q2, [x24, x28]\n"
- "ldr q1, [x23, x28]\n"
- "ldr q0, [x24, x27]\n"
- "ldr q31, [x23, x27]\n"
- "ldr q30, [x24, x26]\n"
- "ldr q29, [x23, x26]\n"
- "ldp x22, x21, [x20, #0x10]\n"
- "add x20, x20, #0x20\n"
- "ldr q28, [x22, x9]\n"
- "ldr q22, [x21, x9]\n"
- "ldr q27, [x22, x28]\n"
- "ldr q21, [x21, x28]\n"
- "ldr q26, [x22, x27]\n"
- "ldr q20, [x21, x27]\n"
- "ldr q25, [x22, x26]\n"
- "ldr q24, [x21, x26]\n"
+ "cbz x24, 4f\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "ldp x21, x20, [x19, #0x10]\n"
+ "subs x24, x24, #0x1\n"
+ "add x19, x19, #0x20\n"
+ "ldr q2, [x23, x28]\n"
+ "ldr q1, [x22, x28]\n"
+ "ldr q0, [x21, x28]\n"
+ "ldr q31, [x20, x28]\n"
+ "ldr q30, [x23, x27]\n"
+ "ldr q22, [x22, x27]\n"
+ "ldr q29, [x21, x27]\n"
+ "ldr q28, [x20, x27]\n"
+ "ldr q27, [x23, x26]\n"
+ "ldr q21, [x22, x26]\n"
+ "ldr q26, [x21, x26]\n"
+ "ldr q17, [x20, x26]\n"
+ "ldr q25, [x23, x25]\n"
+ "ldr q20, [x22, x25]\n"
+ "ldr q24, [x21, x25]\n"
+ "ldr q16, [x20, x25]\n"
"beq 3f\n"
"2:" // 4-vectors of channels: 4 inputs loop
- "smax v23.16b, v4.16b, v3.16b\n"
- "smax v19.16b, v28.16b, v22.16b\n"
- "ldp x24, x23, [x20, #0x0]\n"
- "ldr q4, [x24, x9]\n"
- "ldr q3, [x23, x9]\n"
- "smax v22.16b, v2.16b, v1.16b\n"
- "ldr q2, [x24, x28]\n"
- "smax v18.16b, v27.16b, v21.16b\n"
- "ldr q1, [x23, x28]\n"
- "smax v21.16b, v0.16b, v31.16b\n"
- "ldr q0, [x24, x27]\n"
- "smax v17.16b, v26.16b, v20.16b\n"
- "ldr q31, [x23, x27]\n"
- "smax v20.16b, v30.16b, v29.16b\n"
- "ldr q30, [x24, x26]\n"
- "smax v16.16b, v25.16b, v24.16b\n"
- "ldr q29, [x23, x26]\n"
+ "smax v23.16b, v2.16b, v1.16b\n"
+ "smax v19.16b, v0.16b, v31.16b\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "ldp x21, x20, [x19, #0x10]\n"
+ "smax v22.16b, v30.16b, v22.16b\n"
+ "smax v18.16b, v29.16b, v28.16b\n"
+ "subs x24, x24, #0x1\n"
+ "add x19, x19, #0x20\n"
+ "smax v21.16b, v27.16b, v21.16b\n"
+ "smax v17.16b, v26.16b, v17.16b\n"
+ "ldr q2, [x23, x28]\n"
+ "ldr q1, [x22, x28]\n"
+ "smax v20.16b, v25.16b, v20.16b\n"
+ "smax v16.16b, v24.16b, v16.16b\n"
+ "ldr q0, [x21, x28]\n"
+ "ldr q31, [x20, x28]\n"
"smax v19.16b, v23.16b, v19.16b\n"
"smax v18.16b, v22.16b, v18.16b\n"
- "ldp x22, x21, [x20, #0x10]\n"
- "ldr q28, [x22, x9]\n"
- "ldr q22, [x21, x9]\n"
+ "ldr q30, [x23, x27]\n"
+ "ldr q22, [x22, x27]\n"
"smax v17.16b, v21.16b, v17.16b\n"
"smax v16.16b, v20.16b, v16.16b\n"
- "ldr q27, [x22, x28]\n"
- "ldr q21, [x21, x28]\n"
- "subs x25, x25, #0x1\n"
- "smax v8.16b, v8.16b, v19.16b\n"
- "ldr q26, [x22, x27]\n"
- "ldr q20, [x21, x27]\n"
- "smax v7.16b, v7.16b, v18.16b\n"
+ "ldr q29, [x21, x27]\n"
+ "ldr q28, [x20, x27]\n"
+ "smax v4.16b, v4.16b, v19.16b\n"
+ "smax v3.16b, v3.16b, v18.16b\n"
+ "ldr q27, [x23, x26]\n"
+ "ldr q21, [x22, x26]\n"
"smax v6.16b, v6.16b, v17.16b\n"
- "ldr q25, [x22, x26]\n"
- "ldr q24, [x21, x26]\n"
"smax v5.16b, v5.16b, v16.16b\n"
- "add x20, x20, #0x20\n"
+ "ldr q26, [x21, x26]\n"
+ "ldr q17, [x20, x26]\n"
+ "ldr q25, [x23, x25]\n"
+ "ldr q20, [x22, x25]\n"
+ "ldr q24, [x21, x25]\n"
+ "ldr q16, [x20, x25]\n"
"bgt 2b\n"
"3:" // 4-vectors of channels: 4 inputs tail
- "smax v23.16b, v4.16b, v3.16b\n"
- "smax v19.16b, v28.16b, v22.16b\n"
- "smax v22.16b, v2.16b, v1.16b\n"
- "smax v18.16b, v27.16b, v21.16b\n"
- "smax v21.16b, v0.16b, v31.16b\n"
- "smax v17.16b, v26.16b, v20.16b\n"
- "smax v20.16b, v30.16b, v29.16b\n"
- "smax v16.16b, v25.16b, v24.16b\n"
+ "smax v23.16b, v2.16b, v1.16b\n"
+ "smax v19.16b, v0.16b, v31.16b\n"
+ "smax v22.16b, v30.16b, v22.16b\n"
+ "smax v18.16b, v29.16b, v28.16b\n"
+ "smax v21.16b, v27.16b, v21.16b\n"
+ "smax v17.16b, v26.16b, v17.16b\n"
+ "smax v20.16b, v25.16b, v20.16b\n"
+ "smax v16.16b, v24.16b, v16.16b\n"
"smax v19.16b, v23.16b, v19.16b\n"
"smax v18.16b, v22.16b, v18.16b\n"
"smax v17.16b, v21.16b, v17.16b\n"
"smax v16.16b, v20.16b, v16.16b\n"
- "smax v8.16b, v8.16b, v19.16b\n"
- "smax v7.16b, v7.16b, v18.16b\n"
+ "smax v4.16b, v4.16b, v19.16b\n"
+ "smax v3.16b, v3.16b, v18.16b\n"
"smax v6.16b, v6.16b, v17.16b\n"
"smax v5.16b, v5.16b, v16.16b\n"
"4:" // 4-vectors of channels: After loop
- "ands x21, %x[n_valid_cells], #0x3\n"
+ "ands x20, %x[n_valid_cells], #0x3\n"
"beq 6f\n"
"5:" // 4-vectors of channels: Single input loop
- "ldr x24, [x20], #0x8\n"
- "ldr q4, [x24, x9]\n"
- "subs x21, x21, #0x1\n"
- "smax v8.16b, v8.16b, v4.16b\n"
- "ldr q2, [x24, x28]\n"
- "ldr q0, [x24, x27]\n"
- "smax v7.16b, v7.16b, v2.16b\n"
- "smax v6.16b, v6.16b, v0.16b\n"
- "ldr q30, [x24, x26]\n"
- "smax v5.16b, v5.16b, v30.16b\n"
+ "ldr x23, [x19], #0x8\n"
+ "ldr q2, [x23, x28]\n"
+ "subs x20, x20, #0x1\n"
+ "smax v4.16b, v4.16b, v2.16b\n"
+ "ldr q30, [x23, x27]\n"
+ "ldr q27, [x23, x26]\n"
+ "smax v3.16b, v3.16b, v30.16b\n"
+ "smax v6.16b, v6.16b, v27.16b\n"
+ "ldr q25, [x23, x25]\n"
+ "smax v5.16b, v5.16b, v25.16b\n"
"bgt 5b\n"
"6:" // 4-vectors of channels: Single input loop: End
- "sxtl v23.8h, v8.8b\n"
- "sxtl2 v22.8h, v8.16b\n"
- "add x20, %x[quant_params], %[offsetof_qp_per_layer_left_shift]\n"
- "ld1r { v4.4s }, [x20]\n"
- "sxtl v21.8h, v7.8b\n"
- "sxtl2 v18.8h, v7.16b\n"
- "add x20, %x[quant_params], %[offsetof_qp_per_layer_mul]\n"
- "ld1r { v3.4s }, [x20]\n"
+ "sxtl v23.8h, v4.8b\n"
+ "sxtl2 v22.8h, v4.16b\n"
+ "add x19, %x[quant_params], %[offsetof_qp_per_layer_left_shift]\n"
+ "ld1r { v4.4s }, [x19]\n"
+ "sxtl v21.8h, v3.8b\n"
+ "sxtl2 v18.8h, v3.16b\n"
+ "add x19, %x[quant_params], %[offsetof_qp_per_layer_mul]\n"
+ "ld1r { v3.4s }, [x19]\n"
"sxtl v20.8h, v6.8b\n"
"sxtl2 v19.8h, v6.16b\n"
- "add x20, %x[quant_params], %[offsetof_qp_per_layer_right_shift]\n"
- "ld1r { v2.4s }, [x20]\n"
+ "add x19, %x[quant_params], %[offsetof_qp_per_layer_right_shift]\n"
+ "ld1r { v2.4s }, [x19]\n"
"sxtl v17.8h, v5.8b\n"
"sxtl2 v16.8h, v5.16b\n"
"sub %x[n_channels], %x[n_channels], #0x40\n"
@@ -271,76 +271,76 @@ void a64_s8q_nhwc_max_generic_depthfirst_impl(
"uzp1 v19.16b, v24.16b, v19.16b\n"
"uzp1 v16.16b, v23.16b, v16.16b\n"
"uzp1 v18.16b, v22.16b, v18.16b\n"
- "str q16, [%x[outptr], x9]\n"
- "add x9, x9, #0x40\n"
+ "str q16, [%x[outptr], x28]\n"
+ "add x28, x28, #0x40\n"
"uzp1 v17.16b, v21.16b, v17.16b\n"
"uzp1 v16.16b, v20.16b, v19.16b\n"
- "str q18, [%x[outptr], x28]\n"
- "add x28, x28, #0x40\n"
- "str q17, [%x[outptr], x27]\n"
+ "str q18, [%x[outptr], x27]\n"
"add x27, x27, #0x40\n"
- "str q16, [%x[outptr], x26]\n"
+ "str q17, [%x[outptr], x26]\n"
"add x26, x26, #0x40\n"
+ "str q16, [%x[outptr], x25]\n"
+ "add x25, x25, #0x40\n"
"bge 1b\n"
"cbz %x[n_channels], 43f\n"
"7:" // Single vector of channels
"cmp %x[n_channels], #0x10\n"
"blt 14f\n"
"8:" // Single vector of channels: Loop
- "lsr x25, %x[n_valid_cells], #0x2\n"
- "movi v8.16b, #0x80\n"
- "mov x20, %x[inptrs]\n"
- "cbz x25, 11f\n"
- "ldp x24, x23, [x20, #0x0]\n"
- "ldr q4, [x24, x9]\n"
- "subs x25, x25, #0x1\n"
- "ldr q3, [x23, x9]\n"
- "ldp x22, x21, [x20, #0x10]\n"
- "add x20, x20, #0x20\n"
- "ldr q28, [x22, x9]\n"
- "ldr q22, [x21, x9]\n"
+ "lsr x24, %x[n_valid_cells], #0x2\n"
+ "movi v4.16b, #0x80\n"
+ "mov x19, %x[inptrs]\n"
+ "cbz x24, 11f\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "ldp x21, x20, [x19, #0x10]\n"
+ "subs x24, x24, #0x1\n"
+ "add x19, x19, #0x20\n"
+ "ldr q2, [x23, x28]\n"
+ "ldr q1, [x22, x28]\n"
+ "ldr q0, [x21, x28]\n"
+ "ldr q31, [x20, x28]\n"
"beq 10f\n"
"9:" // Single vector of channels: Loop: 4 inputs loop
- "smax v23.16b, v4.16b, v3.16b\n"
- "smax v19.16b, v28.16b, v22.16b\n"
- "ldp x24, x23, [x20, #0x0]\n"
- "ldr q4, [x24, x9]\n"
- "ldr q3, [x23, x9]\n"
+ "smax v23.16b, v2.16b, v1.16b\n"
+ "smax v19.16b, v0.16b, v31.16b\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "ldp x21, x20, [x19, #0x10]\n"
"smax v19.16b, v23.16b, v19.16b\n"
- "ldp x22, x21, [x20, #0x10]\n"
- "subs x25, x25, #0x1\n"
- "ldr q28, [x22, x9]\n"
- "ldr q22, [x21, x9]\n"
- "smax v8.16b, v8.16b, v19.16b\n"
- "add x20, x20, #0x20\n"
+ "subs x24, x24, #0x1\n"
+ "smax v4.16b, v4.16b, v19.16b\n"
+ "add x19, x19, #0x20\n"
+ "ldr q2, [x23, x28]\n"
+ "ldr q1, [x22, x28]\n"
+ "ldr q0, [x21, x28]\n"
+ "ldr q31, [x20, x28]\n"
"bgt 9b\n"
"10:" // Single vector of channels: Loop: 4 inputs tail
- "smax v23.16b, v4.16b, v3.16b\n"
- "smax v19.16b, v28.16b, v22.16b\n"
+ "smax v23.16b, v2.16b, v1.16b\n"
+ "smax v19.16b, v0.16b, v31.16b\n"
"smax v19.16b, v23.16b, v19.16b\n"
- "smax v8.16b, v8.16b, v19.16b\n"
+ "smax v4.16b, v4.16b, v19.16b\n"
"11:" // Single vector of channels: Loop: After loop
- "ands x21, %x[n_valid_cells], #0x3\n"
+ "ands x20, %x[n_valid_cells], #0x3\n"
"beq 13f\n"
"12:" // Single vector of channels: Loop: Single input loop
- "ldr x24, [x20], #0x8\n"
- "ldr q4, [x24, x9]\n"
- "subs x21, x21, #0x1\n"
- "smax v8.16b, v8.16b, v4.16b\n"
+ "ldr x23, [x19], #0x8\n"
+ "ldr q2, [x23, x28]\n"
+ "subs x20, x20, #0x1\n"
+ "smax v4.16b, v4.16b, v2.16b\n"
"bgt 12b\n"
"13:" // Single vector of channels: Loop: Single input loop: End
- "sxtl v23.8h, v8.8b\n"
- "sxtl2 v22.8h, v8.16b\n"
- "add x20, %x[quant_params], %[offsetof_qp_per_layer_left_shift]\n"
- "ld1r { v4.4s }, [x20]\n"
+ "sxtl v23.8h, v4.8b\n"
+ "sxtl2 v22.8h, v4.16b\n"
+ "add x19, %x[quant_params], %[offsetof_qp_per_layer_left_shift]\n"
+ "ld1r { v4.4s }, [x19]\n"
"sxtl v1.4s, v23.4h\n"
"sxtl2 v23.4s, v23.8h\n"
- "add x20, %x[quant_params], %[offsetof_qp_per_layer_mul]\n"
- "ld1r { v3.4s }, [x20]\n"
+ "add x19, %x[quant_params], %[offsetof_qp_per_layer_mul]\n"
+ "ld1r { v3.4s }, [x19]\n"
"sxtl v0.4s, v22.4h\n"
"sxtl2 v31.4s, v22.8h\n"
- "add x20, %x[quant_params], %[offsetof_qp_per_layer_right_shift]\n"
- "ld1r { v2.4s }, [x20]\n"
+ "add x19, %x[quant_params], %[offsetof_qp_per_layer_right_shift]\n"
+ "ld1r { v2.4s }, [x19]\n"
"srshl v1.4s, v1.4s, v4.4s\n"
"srshl v23.4s, v23.4s, v4.4s\n"
"sub %x[n_channels], %x[n_channels], #0x10\n"
@@ -368,192 +368,192 @@ void a64_s8q_nhwc_max_generic_depthfirst_impl(
"uzp1 v23.16b, v1.16b, v23.16b\n"
"uzp1 v16.16b, v0.16b, v31.16b\n"
"uzp1 v16.16b, v23.16b, v16.16b\n"
- "str q16, [%x[outptr], x9]\n"
- "add x9, x9, #0x10\n"
+ "str q16, [%x[outptr], x28]\n"
+ "add x28, x28, #0x10\n"
"bge 8b\n"
"cbz %x[n_channels], 43f\n"
"14:" // Oddments
- "lsr x25, %x[n_valid_cells], #0x2\n"
- "add %x[outptr], %x[outptr], x9\n"
- "movi v8.16b, #0x80\n"
- "mov x20, %x[inptrs]\n"
- "cbz x25, 24f\n"
+ "lsr x24, %x[n_valid_cells], #0x2\n"
+ "add %x[outptr], %x[outptr], x28\n"
+ "movi v4.16b, #0x80\n"
+ "mov x19, %x[inptrs]\n"
+ "cbz x24, 24f\n"
"15:" // Oddments: 4 inputs loop
- "ldp x24, x23, [x20, #0x0]\n"
- "ldp x22, x21, [x20, #0x10]\n"
- "add x20, x20, #0x20\n"
- "add x24, x24, x9\n"
- "add x23, x23, x9\n"
- "add x22, x22, x9\n"
- "movi v4.16b, #0x0\n"
- "movi v3.16b, #0x0\n"
- "add x21, x21, x9\n"
- "movi v28.16b, #0x0\n"
- "movi v22.16b, #0x0\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "ldp x21, x20, [x19, #0x10]\n"
+ "add x19, x19, #0x20\n"
+ "add x23, x23, x28\n"
+ "add x22, x22, x28\n"
+ "add x21, x21, x28\n"
+ "movi v2.16b, #0x0\n"
+ "movi v1.16b, #0x0\n"
+ "add x20, x20, x28\n"
+ "movi v0.16b, #0x0\n"
+ "movi v31.16b, #0x0\n"
"tbz %x[n_channels], #3, 19f\n"
- "ldr d4, [x24], #0x8\n"
- "ldr d3, [x23], #0x8\n"
- "ldr d28, [x22], #0x8\n"
- "ldr d22, [x21], #0x8\n"
+ "ldr d2, [x23], #0x8\n"
+ "ldr d1, [x22], #0x8\n"
+ "ldr d0, [x21], #0x8\n"
+ "ldr d31, [x20], #0x8\n"
"tbz %x[n_channels], #2, 17f\n"
- "ld1 { v4.s }[2], [x24], #0x4\n"
- "ld1 { v3.s }[2], [x23], #0x4\n"
- "ld1 { v28.s }[2], [x22], #0x4\n"
- "ld1 { v22.s }[2], [x21], #0x4\n"
+ "ld1 { v2.s }[2], [x23], #0x4\n"
+ "ld1 { v1.s }[2], [x22], #0x4\n"
+ "ld1 { v0.s }[2], [x21], #0x4\n"
+ "ld1 { v31.s }[2], [x20], #0x4\n"
"tbz %x[n_channels], #1, 16f\n"
- "ld1 { v4.h }[6], [x24], #0x2\n"
- "ld1 { v3.h }[6], [x23], #0x2\n"
- "ld1 { v28.h }[6], [x22], #0x2\n"
- "ld1 { v22.h }[6], [x21], #0x2\n"
+ "ld1 { v2.h }[6], [x23], #0x2\n"
+ "ld1 { v1.h }[6], [x22], #0x2\n"
+ "ld1 { v0.h }[6], [x21], #0x2\n"
+ "ld1 { v31.h }[6], [x20], #0x2\n"
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v4.b }[14], [x24], #0x1\n"
- "ld1 { v3.b }[14], [x23], #0x1\n"
- "ld1 { v28.b }[14], [x22], #0x1\n"
- "ld1 { v22.b }[14], [x21], #0x1\n"
+ "ld1 { v2.b }[14], [x23], #0x1\n"
+ "ld1 { v1.b }[14], [x22], #0x1\n"
+ "ld1 { v0.b }[14], [x21], #0x1\n"
+ "ld1 { v31.b }[14], [x20], #0x1\n"
"b 23f\n"
"16:" // Oddments: 4 inputs loop: Load: Bit 3: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v4.b }[12], [x24], #0x1\n"
- "ld1 { v3.b }[12], [x23], #0x1\n"
- "ld1 { v28.b }[12], [x22], #0x1\n"
- "ld1 { v22.b }[12], [x21], #0x1\n"
+ "ld1 { v2.b }[12], [x23], #0x1\n"
+ "ld1 { v1.b }[12], [x22], #0x1\n"
+ "ld1 { v0.b }[12], [x21], #0x1\n"
+ "ld1 { v31.b }[12], [x20], #0x1\n"
"b 23f\n"
"17:" // Oddments: 4 inputs loop: Load: Bit 3: Bit 2: Unset
"tbz %x[n_channels], #1, 18f\n"
- "ld1 { v4.h }[4], [x24], #0x2\n"
- "ld1 { v3.h }[4], [x23], #0x2\n"
- "ld1 { v28.h }[4], [x22], #0x2\n"
- "ld1 { v22.h }[4], [x21], #0x2\n"
+ "ld1 { v2.h }[4], [x23], #0x2\n"
+ "ld1 { v1.h }[4], [x22], #0x2\n"
+ "ld1 { v0.h }[4], [x21], #0x2\n"
+ "ld1 { v31.h }[4], [x20], #0x2\n"
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v4.b }[10], [x24], #0x1\n"
- "ld1 { v3.b }[10], [x23], #0x1\n"
- "ld1 { v28.b }[10], [x22], #0x1\n"
- "ld1 { v22.b }[10], [x21], #0x1\n"
+ "ld1 { v2.b }[10], [x23], #0x1\n"
+ "ld1 { v1.b }[10], [x22], #0x1\n"
+ "ld1 { v0.b }[10], [x21], #0x1\n"
+ "ld1 { v31.b }[10], [x20], #0x1\n"
"b 23f\n"
"18:" // Oddments: 4 inputs loop: Load: Bit 3: Bit 2: Unset: Bit 1: Unset
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v4.b }[8], [x24], #0x1\n"
- "ld1 { v3.b }[8], [x23], #0x1\n"
- "ld1 { v28.b }[8], [x22], #0x1\n"
- "ld1 { v22.b }[8], [x21], #0x1\n"
+ "ld1 { v2.b }[8], [x23], #0x1\n"
+ "ld1 { v1.b }[8], [x22], #0x1\n"
+ "ld1 { v0.b }[8], [x21], #0x1\n"
+ "ld1 { v31.b }[8], [x20], #0x1\n"
"b 23f\n"
"19:" // Oddments: 4 inputs loop: Load: Bit 3: Unset
"tbz %x[n_channels], #2, 21f\n"
- "ldr s4, [x24], #0x4\n"
- "ldr s3, [x23], #0x4\n"
- "ldr s28, [x22], #0x4\n"
- "ldr s22, [x21], #0x4\n"
+ "ldr s2, [x23], #0x4\n"
+ "ldr s1, [x22], #0x4\n"
+ "ldr s0, [x21], #0x4\n"
+ "ldr s31, [x20], #0x4\n"
"tbz %x[n_channels], #1, 20f\n"
- "ld1 { v4.h }[2], [x24], #0x2\n"
- "ld1 { v3.h }[2], [x23], #0x2\n"
- "ld1 { v28.h }[2], [x22], #0x2\n"
- "ld1 { v22.h }[2], [x21], #0x2\n"
+ "ld1 { v2.h }[2], [x23], #0x2\n"
+ "ld1 { v1.h }[2], [x22], #0x2\n"
+ "ld1 { v0.h }[2], [x21], #0x2\n"
+ "ld1 { v31.h }[2], [x20], #0x2\n"
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v4.b }[6], [x24], #0x1\n"
- "ld1 { v3.b }[6], [x23], #0x1\n"
- "ld1 { v28.b }[6], [x22], #0x1\n"
- "ld1 { v22.b }[6], [x21], #0x1\n"
+ "ld1 { v2.b }[6], [x23], #0x1\n"
+ "ld1 { v1.b }[6], [x22], #0x1\n"
+ "ld1 { v0.b }[6], [x21], #0x1\n"
+ "ld1 { v31.b }[6], [x20], #0x1\n"
"b 23f\n"
"20:" // Oddments: 4 inputs loop: Load: Bit 3: Unset: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v4.b }[4], [x24], #0x1\n"
- "ld1 { v3.b }[4], [x23], #0x1\n"
- "ld1 { v28.b }[4], [x22], #0x1\n"
- "ld1 { v22.b }[4], [x21], #0x1\n"
+ "ld1 { v2.b }[4], [x23], #0x1\n"
+ "ld1 { v1.b }[4], [x22], #0x1\n"
+ "ld1 { v0.b }[4], [x21], #0x1\n"
+ "ld1 { v31.b }[4], [x20], #0x1\n"
"b 23f\n"
"21:" // Oddments: 4 inputs loop: Load: Bit 3: Unset: Bit 2: Unset
"tbz %x[n_channels], #1, 22f\n"
- "ldr h4, [x24], #0x2\n"
- "ldr h3, [x23], #0x2\n"
- "ldr h28, [x22], #0x2\n"
- "ldr h22, [x21], #0x2\n"
+ "ldr h2, [x23], #0x2\n"
+ "ldr h1, [x22], #0x2\n"
+ "ldr h0, [x21], #0x2\n"
+ "ldr h31, [x20], #0x2\n"
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v4.b }[2], [x24], #0x1\n"
- "ld1 { v3.b }[2], [x23], #0x1\n"
- "ld1 { v28.b }[2], [x22], #0x1\n"
- "ld1 { v22.b }[2], [x21], #0x1\n"
+ "ld1 { v2.b }[2], [x23], #0x1\n"
+ "ld1 { v1.b }[2], [x22], #0x1\n"
+ "ld1 { v0.b }[2], [x21], #0x1\n"
+ "ld1 { v31.b }[2], [x20], #0x1\n"
"b 23f\n"
"22:" // Oddments: 4 inputs loop: Load: Bit 3: Unset: Bit 2: Unset: Bit 1: Unset
"tbz %x[n_channels], #0, 23f\n"
- "ldr b4, [x24], #0x1\n"
- "ldr b3, [x23], #0x1\n"
- "ldr b28, [x22], #0x1\n"
- "ldr b22, [x21], #0x1\n"
+ "ldr b2, [x23], #0x1\n"
+ "ldr b1, [x22], #0x1\n"
+ "ldr b0, [x21], #0x1\n"
+ "ldr b31, [x20], #0x1\n"
"23:" // Oddments: 4 inputs loop: Load: Bit 3: End
- "smax v23.16b, v4.16b, v3.16b\n"
- "smax v19.16b, v28.16b, v22.16b\n"
- "subs x25, x25, #0x1\n"
+ "smax v23.16b, v2.16b, v1.16b\n"
+ "smax v19.16b, v0.16b, v31.16b\n"
+ "subs x24, x24, #0x1\n"
"smax v19.16b, v23.16b, v19.16b\n"
- "smax v8.16b, v8.16b, v19.16b\n"
+ "smax v4.16b, v4.16b, v19.16b\n"
"bgt 15b\n"
"24:" // Oddments: After loop
- "ands x21, %x[n_valid_cells], #0x3\n"
+ "ands x20, %x[n_valid_cells], #0x3\n"
"beq 34f\n"
"25:" // Oddments: Single input loop
- "ldr x24, [x20], #0x8\n"
- "add x24, x24, x9\n"
- "movi v4.16b, #0x0\n"
+ "ldr x23, [x19], #0x8\n"
+ "add x23, x23, x28\n"
+ "movi v2.16b, #0x0\n"
"tbz %x[n_channels], #3, 29f\n"
- "ldr d4, [x24], #0x8\n"
+ "ldr d2, [x23], #0x8\n"
"tbz %x[n_channels], #2, 27f\n"
- "ld1 { v4.s }[2], [x24], #0x4\n"
+ "ld1 { v2.s }[2], [x23], #0x4\n"
"tbz %x[n_channels], #1, 26f\n"
- "ld1 { v4.h }[6], [x24], #0x2\n"
+ "ld1 { v2.h }[6], [x23], #0x2\n"
"tbz %x[n_channels], #0, 33f\n"
- "ld1 { v4.b }[14], [x24], #0x1\n"
+ "ld1 { v2.b }[14], [x23], #0x1\n"
"b 33f\n"
"26:" // Oddments: Single input loop: Load: Bit 3: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 33f\n"
- "ld1 { v4.b }[12], [x24], #0x1\n"
+ "ld1 { v2.b }[12], [x23], #0x1\n"
"b 33f\n"
"27:" // Oddments: Single input loop: Load: Bit 3: Bit 2: Unset
"tbz %x[n_channels], #1, 28f\n"
- "ld1 { v4.h }[4], [x24], #0x2\n"
+ "ld1 { v2.h }[4], [x23], #0x2\n"
"tbz %x[n_channels], #0, 33f\n"
- "ld1 { v4.b }[10], [x24], #0x1\n"
+ "ld1 { v2.b }[10], [x23], #0x1\n"
"b 33f\n"
"28:" // Oddments: Single input loop: Load: Bit 3: Bit 2: Unset: Bit 1: Unset
"tbz %x[n_channels], #0, 33f\n"
- "ld1 { v4.b }[8], [x24], #0x1\n"
+ "ld1 { v2.b }[8], [x23], #0x1\n"
"b 33f\n"
"29:" // Oddments: Single input loop: Load: Bit 3: Unset
"tbz %x[n_channels], #2, 31f\n"
- "ldr s4, [x24], #0x4\n"
+ "ldr s2, [x23], #0x4\n"
"tbz %x[n_channels], #1, 30f\n"
- "ld1 { v4.h }[2], [x24], #0x2\n"
+ "ld1 { v2.h }[2], [x23], #0x2\n"
"tbz %x[n_channels], #0, 33f\n"
- "ld1 { v4.b }[6], [x24], #0x1\n"
+ "ld1 { v2.b }[6], [x23], #0x1\n"
"b 33f\n"
"30:" // Oddments: Single input loop: Load: Bit 3: Unset: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 33f\n"
- "ld1 { v4.b }[4], [x24], #0x1\n"
+ "ld1 { v2.b }[4], [x23], #0x1\n"
"b 33f\n"
"31:" // Oddments: Single input loop: Load: Bit 3: Unset: Bit 2: Unset
"tbz %x[n_channels], #1, 32f\n"
- "ldr h4, [x24], #0x2\n"
+ "ldr h2, [x23], #0x2\n"
"tbz %x[n_channels], #0, 33f\n"
- "ld1 { v4.b }[2], [x24], #0x1\n"
+ "ld1 { v2.b }[2], [x23], #0x1\n"
"b 33f\n"
"32:" // Oddments: Single input loop: Load: Bit 3: Unset: Bit 2: Unset: Bit 1: Unset
"tbz %x[n_channels], #0, 33f\n"
- "ldr b4, [x24], #0x1\n"
+ "ldr b2, [x23], #0x1\n"
"33:" // Oddments: Single input loop: Load: Bit 3: End
- "subs x21, x21, #0x1\n"
- "smax v8.16b, v8.16b, v4.16b\n"
+ "subs x20, x20, #0x1\n"
+ "smax v4.16b, v4.16b, v2.16b\n"
"bgt 25b\n"
"34:" // Oddments: Single input loop: End
- "sxtl v23.8h, v8.8b\n"
- "sxtl2 v22.8h, v8.16b\n"
- "add x20, %x[quant_params], %[offsetof_qp_per_layer_left_shift]\n"
- "ld1r { v4.4s }, [x20]\n"
+ "sxtl v23.8h, v4.8b\n"
+ "sxtl2 v22.8h, v4.16b\n"
+ "add x19, %x[quant_params], %[offsetof_qp_per_layer_left_shift]\n"
+ "ld1r { v4.4s }, [x19]\n"
"sxtl v1.4s, v23.4h\n"
"sxtl2 v23.4s, v23.8h\n"
- "add x20, %x[quant_params], %[offsetof_qp_per_layer_mul]\n"
- "ld1r { v3.4s }, [x20]\n"
+ "add x19, %x[quant_params], %[offsetof_qp_per_layer_mul]\n"
+ "ld1r { v3.4s }, [x19]\n"
"sxtl v0.4s, v22.4h\n"
"sxtl2 v31.4s, v22.8h\n"
- "add x20, %x[quant_params], %[offsetof_qp_per_layer_right_shift]\n"
- "ld1r { v2.4s }, [x20]\n"
+ "add x19, %x[quant_params], %[offsetof_qp_per_layer_right_shift]\n"
+ "ld1r { v2.4s }, [x19]\n"
"srshl v1.4s, v1.4s, v4.4s\n"
"srshl v23.4s, v23.4s, v4.4s\n"
"srshl v0.4s, v0.4s, v4.4s\n"
@@ -627,7 +627,7 @@ void a64_s8q_nhwc_max_generic_depthfirst_impl(
"43:" // End
: [n_channels] "+&r" (n_channels), [outptr] "+&r" (outptr)
: [inptrs] "r" (inptrs), [n_valid_cells] "r" (n_valid_cells), [offsetof_qp_per_layer_left_shift] "I" (offsetof(Requantize32, per_layer_left_shift)), [offsetof_qp_per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [offsetof_qp_per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [quant_params] "r" (&qp)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8_nhwc_avg_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8_nhwc_avg_generic_depthfirst/generic.cpp
index 76828a911e..c9fdf76f33 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8_nhwc_avg_generic_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8_nhwc_avg_generic_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -96,16 +96,16 @@ void a64_u8_nhwc_avg_generic_depthfirst_impl(
__asm__ __volatile__(
"cmp %x[n_channels], #0x40\n"
- "mov x27, #0x0\n"
- "mov x26, #0x10\n" // cntb _, ALL, #1
- "mov x25, #0x20\n" // cntb _, ALL, #2
- "mov x24, #0x30\n" // cntb _, ALL, #3
+ "mov x26, #0x0\n"
+ "mov x25, #0x10\n" // cntb _, ALL, #1
+ "mov x24, #0x20\n" // cntb _, ALL, #2
+ "mov x23, #0x30\n" // cntb _, ALL, #3
"blt 7f\n"
"1:" // 4-vectors of channels
- "lsr x23, %x[n_valid_cells], #0x1\n"
+ "lsr x22, %x[n_valid_cells], #0x1\n"
"movi v15.4s, #0x0\n"
"movi v14.4s, #0x0\n"
- "mov x20, %x[inptrs]\n"
+ "mov x19, %x[inptrs]\n"
"movi v13.4s, #0x0\n"
"movi v12.4s, #0x0\n"
"movi v11.4s, #0x0\n"
@@ -120,43 +120,43 @@ void a64_u8_nhwc_avg_generic_depthfirst_impl(
"movi v2.4s, #0x0\n"
"movi v1.4s, #0x0\n"
"movi v0.4s, #0x0\n"
- "cbz x23, 4f\n"
- "ldp x22, x21, [x20, #0x0]\n"
- "ldr q31, [x22, x27]\n"
- "subs x23, x23, #0x1\n"
- "add x20, x20, #0x10\n"
- "ldr q30, [x21, x27]\n"
- "ldr q29, [x22, x26]\n"
- "ldr q28, [x21, x26]\n"
- "ldr q27, [x22, x25]\n"
- "ldr q26, [x21, x25]\n"
- "ldr q25, [x22, x24]\n"
- "ldr q24, [x21, x24]\n"
+ "cbz x22, 4f\n"
+ "ldp x21, x20, [x19, #0x0]\n"
+ "subs x22, x22, #0x1\n"
+ "add x19, x19, #0x10\n"
+ "ldr q31, [x21, x26]\n"
+ "ldr q30, [x20, x26]\n"
+ "ldr q29, [x21, x25]\n"
+ "ldr q28, [x20, x25]\n"
+ "ldr q27, [x21, x24]\n"
+ "ldr q26, [x20, x24]\n"
+ "ldr q25, [x21, x23]\n"
+ "ldr q24, [x20, x23]\n"
"beq 3f\n"
"2:" // 4-vectors of channels: 2 inputs loop
"uaddl v23.8h, v31.8b, v30.8b\n"
"uaddl2 v22.8h, v31.16b, v30.16b\n"
- "ldp x22, x21, [x20, #0x0]\n"
- "ldr q31, [x22, x27]\n"
- "ldr q30, [x21, x27]\n"
+ "ldp x21, x20, [x19, #0x0]\n"
+ "subs x22, x22, #0x1\n"
"uaddl v21.8h, v29.8b, v28.8b\n"
"uaddl2 v20.8h, v29.16b, v28.16b\n"
- "ldr q29, [x22, x26]\n"
- "ldr q28, [x21, x26]\n"
+ "add x19, x19, #0x10\n"
+ "ldr q31, [x21, x26]\n"
"uaddl v19.8h, v27.8b, v26.8b\n"
"uaddl2 v18.8h, v27.16b, v26.16b\n"
- "ldr q27, [x22, x25]\n"
- "ldr q26, [x21, x25]\n"
- "subs x23, x23, #0x1\n"
- "uaddw v15.4s, v15.4s, v23.4h\n"
- "uaddw2 v14.4s, v14.4s, v23.8h\n"
+ "ldr q30, [x20, x26]\n"
+ "ldr q29, [x21, x25]\n"
"uaddl v17.8h, v25.8b, v24.8b\n"
"uaddl2 v16.8h, v25.16b, v24.16b\n"
- "ldr q25, [x22, x24]\n"
- "add x20, x20, #0x10\n"
+ "ldr q28, [x20, x25]\n"
+ "ldr q27, [x21, x24]\n"
+ "uaddw v15.4s, v15.4s, v23.4h\n"
+ "uaddw2 v14.4s, v14.4s, v23.8h\n"
+ "ldr q26, [x20, x24]\n"
+ "ldr q25, [x21, x23]\n"
"uaddw v13.4s, v13.4s, v22.4h\n"
"uaddw2 v12.4s, v12.4s, v22.8h\n"
- "ldr q24, [x21, x24]\n"
+ "ldr q24, [x20, x23]\n"
"uaddw v11.4s, v11.4s, v21.4h\n"
"uaddw2 v10.4s, v10.4s, v21.8h\n"
"uaddw v9.4s, v9.4s, v20.4h\n"
@@ -196,21 +196,21 @@ void a64_u8_nhwc_avg_generic_depthfirst_impl(
"uaddw v1.4s, v1.4s, v16.4h\n"
"uaddw2 v0.4s, v0.4s, v16.8h\n"
"4:" // 4-vectors of channels: After loop
- "ands x21, %x[n_valid_cells], #0x1\n"
+ "ands x20, %x[n_valid_cells], #0x1\n"
"beq 6f\n"
"5:" // 4-vectors of channels: Single input loop
- "ldr x22, [x20], #0x8\n"
- "ldr q31, [x22, x27]\n"
+ "ldr x21, [x19], #0x8\n"
+ "ldr q31, [x21, x26]\n"
"uxtl v23.8h, v31.8b\n"
"uxtl2 v22.8h, v31.16b\n"
- "ldr q29, [x22, x26]\n"
- "ldr q27, [x22, x25]\n"
+ "ldr q29, [x21, x25]\n"
+ "ldr q27, [x21, x24]\n"
"uxtl v21.8h, v29.8b\n"
"uxtl2 v20.8h, v29.16b\n"
- "ldr q25, [x22, x24]\n"
+ "ldr q25, [x21, x23]\n"
"uxtl v19.8h, v27.8b\n"
"uxtl2 v18.8h, v27.16b\n"
- "subs x21, x21, #0x1\n"
+ "subs x20, x20, #0x1\n"
"uxtl v17.8h, v25.8b\n"
"uxtl2 v16.8h, v25.16b\n"
"uaddw v15.4s, v15.4s, v23.4h\n"
@@ -311,47 +311,47 @@ void a64_u8_nhwc_avg_generic_depthfirst_impl(
"uzp1 v19.16b, v1.16b, v0.16b\n"
"uzp1 v16.16b, v23.16b, v16.16b\n"
"uzp1 v18.16b, v22.16b, v18.16b\n"
- "str q16, [%x[outptr], x27]\n"
- "add x27, x27, #0x40\n"
+ "str q16, [%x[outptr], x26]\n"
+ "add x26, x26, #0x40\n"
"uzp1 v17.16b, v21.16b, v17.16b\n"
"uzp1 v16.16b, v20.16b, v19.16b\n"
- "str q18, [%x[outptr], x26]\n"
- "add x26, x26, #0x40\n"
- "str q17, [%x[outptr], x25]\n"
+ "str q18, [%x[outptr], x25]\n"
"add x25, x25, #0x40\n"
- "str q16, [%x[outptr], x24]\n"
+ "str q17, [%x[outptr], x24]\n"
"add x24, x24, #0x40\n"
+ "str q16, [%x[outptr], x23]\n"
+ "add x23, x23, #0x40\n"
"bge 1b\n"
"cbz %x[n_channels], 43f\n"
"7:" // Single vector of channels
"cmp %x[n_channels], #0x10\n"
"blt 14f\n"
"8:" // Single vector of channels: Loop
- "lsr x23, %x[n_valid_cells], #0x1\n"
+ "lsr x22, %x[n_valid_cells], #0x1\n"
"movi v15.4s, #0x0\n"
"movi v14.4s, #0x0\n"
- "mov x20, %x[inptrs]\n"
+ "mov x19, %x[inptrs]\n"
"movi v13.4s, #0x0\n"
"movi v12.4s, #0x0\n"
- "cbz x23, 11f\n"
- "ldp x22, x21, [x20, #0x0]\n"
- "ldr q31, [x22, x27]\n"
- "subs x23, x23, #0x1\n"
- "add x20, x20, #0x10\n"
- "ldr q30, [x21, x27]\n"
+ "cbz x22, 11f\n"
+ "ldp x21, x20, [x19, #0x0]\n"
+ "subs x22, x22, #0x1\n"
+ "add x19, x19, #0x10\n"
+ "ldr q31, [x21, x26]\n"
+ "ldr q30, [x20, x26]\n"
"beq 10f\n"
"9:" // Single vector of channels: Loop: 2 inputs loop
"uaddl v23.8h, v31.8b, v30.8b\n"
"uaddl2 v22.8h, v31.16b, v30.16b\n"
- "ldp x22, x21, [x20, #0x0]\n"
- "ldr q31, [x22, x27]\n"
- "ldr q30, [x21, x27]\n"
- "subs x23, x23, #0x1\n"
+ "ldp x21, x20, [x19, #0x0]\n"
+ "subs x22, x22, #0x1\n"
"uaddw v15.4s, v15.4s, v23.4h\n"
"uaddw2 v14.4s, v14.4s, v23.8h\n"
+ "add x19, x19, #0x10\n"
+ "ldr q31, [x21, x26]\n"
"uaddw v13.4s, v13.4s, v22.4h\n"
"uaddw2 v12.4s, v12.4s, v22.8h\n"
- "add x20, x20, #0x10\n"
+ "ldr q30, [x20, x26]\n"
"bgt 9b\n"
"10:" // Single vector of channels: Loop: 2 inputs tail
"uaddl v23.8h, v31.8b, v30.8b\n"
@@ -361,14 +361,14 @@ void a64_u8_nhwc_avg_generic_depthfirst_impl(
"uaddw v13.4s, v13.4s, v22.4h\n"
"uaddw2 v12.4s, v12.4s, v22.8h\n"
"11:" // Single vector of channels: Loop: After loop
- "ands x21, %x[n_valid_cells], #0x1\n"
+ "ands x20, %x[n_valid_cells], #0x1\n"
"beq 13f\n"
"12:" // Single vector of channels: Loop: Single input loop
- "ldr x22, [x20], #0x8\n"
- "ldr q31, [x22, x27]\n"
+ "ldr x21, [x19], #0x8\n"
+ "ldr q31, [x21, x26]\n"
"uxtl v23.8h, v31.8b\n"
"uxtl2 v22.8h, v31.16b\n"
- "subs x21, x21, #0x1\n"
+ "subs x20, x20, #0x1\n"
"uaddw v15.4s, v15.4s, v23.4h\n"
"uaddw2 v14.4s, v14.4s, v23.8h\n"
"uaddw v13.4s, v13.4s, v22.4h\n"
@@ -400,149 +400,149 @@ void a64_u8_nhwc_avg_generic_depthfirst_impl(
"uzp1 v23.16b, v15.16b, v14.16b\n"
"uzp1 v16.16b, v13.16b, v12.16b\n"
"uzp1 v16.16b, v23.16b, v16.16b\n"
- "str q16, [%x[outptr], x27]\n"
- "add x27, x27, #0x10\n"
+ "str q16, [%x[outptr], x26]\n"
+ "add x26, x26, #0x10\n"
"bge 8b\n"
"cbz %x[n_channels], 43f\n"
"14:" // Oddments
- "lsr x23, %x[n_valid_cells], #0x1\n"
- "add %x[outptr], %x[outptr], x27\n"
+ "lsr x22, %x[n_valid_cells], #0x1\n"
+ "add %x[outptr], %x[outptr], x26\n"
"movi v15.4s, #0x0\n"
"movi v14.4s, #0x0\n"
"movi v13.4s, #0x0\n"
"movi v12.4s, #0x0\n"
- "mov x20, %x[inptrs]\n"
- "cbz x23, 24f\n"
+ "mov x19, %x[inptrs]\n"
+ "cbz x22, 24f\n"
"15:" // Oddments: 2 inputs loop
- "ldp x22, x21, [x20, #0x0]\n"
- "add x20, x20, #0x10\n"
- "add x22, x22, x27\n"
+ "ldp x21, x20, [x19, #0x0]\n"
+ "add x19, x19, #0x10\n"
+ "add x21, x21, x26\n"
"movi v31.16b, #0x0\n"
- "add x21, x21, x27\n"
+ "add x20, x20, x26\n"
"movi v30.16b, #0x0\n"
"tbz %x[n_channels], #3, 19f\n"
- "ldr d31, [x22], #0x8\n"
- "ldr d30, [x21], #0x8\n"
+ "ldr d31, [x21], #0x8\n"
+ "ldr d30, [x20], #0x8\n"
"tbz %x[n_channels], #2, 17f\n"
- "ld1 { v31.s }[2], [x22], #0x4\n"
- "ld1 { v30.s }[2], [x21], #0x4\n"
+ "ld1 { v31.s }[2], [x21], #0x4\n"
+ "ld1 { v30.s }[2], [x20], #0x4\n"
"tbz %x[n_channels], #1, 16f\n"
- "ld1 { v31.h }[6], [x22], #0x2\n"
- "ld1 { v30.h }[6], [x21], #0x2\n"
+ "ld1 { v31.h }[6], [x21], #0x2\n"
+ "ld1 { v30.h }[6], [x20], #0x2\n"
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v31.b }[14], [x22], #0x1\n"
- "ld1 { v30.b }[14], [x21], #0x1\n"
+ "ld1 { v31.b }[14], [x21], #0x1\n"
+ "ld1 { v30.b }[14], [x20], #0x1\n"
"b 23f\n"
"16:" // Oddments: 2 inputs loop: Load: Bit 3: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v31.b }[12], [x22], #0x1\n"
- "ld1 { v30.b }[12], [x21], #0x1\n"
+ "ld1 { v31.b }[12], [x21], #0x1\n"
+ "ld1 { v30.b }[12], [x20], #0x1\n"
"b 23f\n"
"17:" // Oddments: 2 inputs loop: Load: Bit 3: Bit 2: Unset
"tbz %x[n_channels], #1, 18f\n"
- "ld1 { v31.h }[4], [x22], #0x2\n"
- "ld1 { v30.h }[4], [x21], #0x2\n"
+ "ld1 { v31.h }[4], [x21], #0x2\n"
+ "ld1 { v30.h }[4], [x20], #0x2\n"
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v31.b }[10], [x22], #0x1\n"
- "ld1 { v30.b }[10], [x21], #0x1\n"
+ "ld1 { v31.b }[10], [x21], #0x1\n"
+ "ld1 { v30.b }[10], [x20], #0x1\n"
"b 23f\n"
"18:" // Oddments: 2 inputs loop: Load: Bit 3: Bit 2: Unset: Bit 1: Unset
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v31.b }[8], [x22], #0x1\n"
- "ld1 { v30.b }[8], [x21], #0x1\n"
+ "ld1 { v31.b }[8], [x21], #0x1\n"
+ "ld1 { v30.b }[8], [x20], #0x1\n"
"b 23f\n"
"19:" // Oddments: 2 inputs loop: Load: Bit 3: Unset
"tbz %x[n_channels], #2, 21f\n"
- "ldr s31, [x22], #0x4\n"
- "ldr s30, [x21], #0x4\n"
+ "ldr s31, [x21], #0x4\n"
+ "ldr s30, [x20], #0x4\n"
"tbz %x[n_channels], #1, 20f\n"
- "ld1 { v31.h }[2], [x22], #0x2\n"
- "ld1 { v30.h }[2], [x21], #0x2\n"
+ "ld1 { v31.h }[2], [x21], #0x2\n"
+ "ld1 { v30.h }[2], [x20], #0x2\n"
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v31.b }[6], [x22], #0x1\n"
- "ld1 { v30.b }[6], [x21], #0x1\n"
+ "ld1 { v31.b }[6], [x21], #0x1\n"
+ "ld1 { v30.b }[6], [x20], #0x1\n"
"b 23f\n"
"20:" // Oddments: 2 inputs loop: Load: Bit 3: Unset: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v31.b }[4], [x22], #0x1\n"
- "ld1 { v30.b }[4], [x21], #0x1\n"
+ "ld1 { v31.b }[4], [x21], #0x1\n"
+ "ld1 { v30.b }[4], [x20], #0x1\n"
"b 23f\n"
"21:" // Oddments: 2 inputs loop: Load: Bit 3: Unset: Bit 2: Unset
"tbz %x[n_channels], #1, 22f\n"
- "ldr h31, [x22], #0x2\n"
- "ldr h30, [x21], #0x2\n"
+ "ldr h31, [x21], #0x2\n"
+ "ldr h30, [x20], #0x2\n"
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v31.b }[2], [x22], #0x1\n"
- "ld1 { v30.b }[2], [x21], #0x1\n"
+ "ld1 { v31.b }[2], [x21], #0x1\n"
+ "ld1 { v30.b }[2], [x20], #0x1\n"
"b 23f\n"
"22:" // Oddments: 2 inputs loop: Load: Bit 3: Unset: Bit 2: Unset: Bit 1: Unset
"tbz %x[n_channels], #0, 23f\n"
- "ldr b31, [x22], #0x1\n"
- "ldr b30, [x21], #0x1\n"
+ "ldr b31, [x21], #0x1\n"
+ "ldr b30, [x20], #0x1\n"
"23:" // Oddments: 2 inputs loop: Load: Bit 3: End
"uaddl v23.8h, v31.8b, v30.8b\n"
"uaddl2 v22.8h, v31.16b, v30.16b\n"
- "subs x23, x23, #0x1\n"
+ "subs x22, x22, #0x1\n"
"uaddw v15.4s, v15.4s, v23.4h\n"
"uaddw2 v14.4s, v14.4s, v23.8h\n"
"uaddw v13.4s, v13.4s, v22.4h\n"
"uaddw2 v12.4s, v12.4s, v22.8h\n"
"bgt 15b\n"
"24:" // Oddments: After loop
- "ands x21, %x[n_valid_cells], #0x1\n"
+ "ands x20, %x[n_valid_cells], #0x1\n"
"beq 34f\n"
"25:" // Oddments: Single input loop
- "ldr x22, [x20], #0x8\n"
- "add x22, x22, x27\n"
+ "ldr x21, [x19], #0x8\n"
+ "add x21, x21, x26\n"
"movi v31.16b, #0x0\n"
"tbz %x[n_channels], #3, 29f\n"
- "ldr d31, [x22], #0x8\n"
+ "ldr d31, [x21], #0x8\n"
"tbz %x[n_channels], #2, 27f\n"
- "ld1 { v31.s }[2], [x22], #0x4\n"
+ "ld1 { v31.s }[2], [x21], #0x4\n"
"tbz %x[n_channels], #1, 26f\n"
- "ld1 { v31.h }[6], [x22], #0x2\n"
+ "ld1 { v31.h }[6], [x21], #0x2\n"
"tbz %x[n_channels], #0, 33f\n"
- "ld1 { v31.b }[14], [x22], #0x1\n"
+ "ld1 { v31.b }[14], [x21], #0x1\n"
"b 33f\n"
"26:" // Oddments: Single input loop: Load: Bit 3: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 33f\n"
- "ld1 { v31.b }[12], [x22], #0x1\n"
+ "ld1 { v31.b }[12], [x21], #0x1\n"
"b 33f\n"
"27:" // Oddments: Single input loop: Load: Bit 3: Bit 2: Unset
"tbz %x[n_channels], #1, 28f\n"
- "ld1 { v31.h }[4], [x22], #0x2\n"
+ "ld1 { v31.h }[4], [x21], #0x2\n"
"tbz %x[n_channels], #0, 33f\n"
- "ld1 { v31.b }[10], [x22], #0x1\n"
+ "ld1 { v31.b }[10], [x21], #0x1\n"
"b 33f\n"
"28:" // Oddments: Single input loop: Load: Bit 3: Bit 2: Unset: Bit 1: Unset
"tbz %x[n_channels], #0, 33f\n"
- "ld1 { v31.b }[8], [x22], #0x1\n"
+ "ld1 { v31.b }[8], [x21], #0x1\n"
"b 33f\n"
"29:" // Oddments: Single input loop: Load: Bit 3: Unset
"tbz %x[n_channels], #2, 31f\n"
- "ldr s31, [x22], #0x4\n"
+ "ldr s31, [x21], #0x4\n"
"tbz %x[n_channels], #1, 30f\n"
- "ld1 { v31.h }[2], [x22], #0x2\n"
+ "ld1 { v31.h }[2], [x21], #0x2\n"
"tbz %x[n_channels], #0, 33f\n"
- "ld1 { v31.b }[6], [x22], #0x1\n"
+ "ld1 { v31.b }[6], [x21], #0x1\n"
"b 33f\n"
"30:" // Oddments: Single input loop: Load: Bit 3: Unset: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 33f\n"
- "ld1 { v31.b }[4], [x22], #0x1\n"
+ "ld1 { v31.b }[4], [x21], #0x1\n"
"b 33f\n"
"31:" // Oddments: Single input loop: Load: Bit 3: Unset: Bit 2: Unset
"tbz %x[n_channels], #1, 32f\n"
- "ldr h31, [x22], #0x2\n"
+ "ldr h31, [x21], #0x2\n"
"tbz %x[n_channels], #0, 33f\n"
- "ld1 { v31.b }[2], [x22], #0x1\n"
+ "ld1 { v31.b }[2], [x21], #0x1\n"
"b 33f\n"
"32:" // Oddments: Single input loop: Load: Bit 3: Unset: Bit 2: Unset: Bit 1: Unset
"tbz %x[n_channels], #0, 33f\n"
- "ldr b31, [x22], #0x1\n"
+ "ldr b31, [x21], #0x1\n"
"33:" // Oddments: Single input loop: Load: Bit 3: End
"uxtl v23.8h, v31.8b\n"
"uxtl2 v22.8h, v31.16b\n"
- "subs x21, x21, #0x1\n"
+ "subs x20, x20, #0x1\n"
"uaddw v15.4s, v15.4s, v23.4h\n"
"uaddw2 v14.4s, v14.4s, v23.8h\n"
"uaddw v13.4s, v13.4s, v22.4h\n"
@@ -620,7 +620,7 @@ void a64_u8_nhwc_avg_generic_depthfirst_impl(
"43:" // End
: [n_channels] "+&r" (n_channels), [outptr] "+&r" (outptr)
: [inptrs] "r" (inptrs), [n_valid_cells] "r" (n_valid_cells), [rescale_ptr] "r" (&rescale_value), [shift_ptr] "r" (&shift_value)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
index 149566197a..06ded77647 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -62,111 +62,111 @@ void a64_u8_nhwc_max_2x2_s1_output2x2_depthfirst_impl(
pad_left, pad_top, pad_right, pad_bottom);
__asm__ __volatile__(
- "ldr x16, [%x[args], %[offsetof_n_channels]]\n"
- "ldr x21, [%x[args], %[offsetof_outptrs]]\n"
- "cmp x16, #0x10\n"
- "mov x15, #0x0\n"
- "ldr x20, [%x[args], %[offsetof_inptrs]]\n"
- "ldp x14, x13, [x21, #0x0]\n"
- "mov x12, #0x0\n"
- "ldp x11, x10, [x21, #0x10]\n"
- "ldp x9, x28, [x20, #0x0]\n"
- "ldp x27, x26, [x20, #0x10]\n"
- "ldp x25, x24, [x20, #0x20]\n"
- "ldp x23, x22, [x20, #0x30]\n"
- "ldr x21, [x20, #0x40]\n"
+ "ldr x15, [%x[args], %[offsetof_n_channels]]\n"
+ "ldr x20, [%x[args], %[offsetof_outptrs]]\n"
+ "cmp x15, #0x10\n"
+ "mov x14, #0x0\n"
+ "ldr x19, [%x[args], %[offsetof_inptrs]]\n"
+ "ldp x13, x12, [x20, #0x0]\n"
+ "mov x11, #0x0\n"
+ "ldp x10, x9, [x20, #0x10]\n"
+ "ldp x28, x27, [x19, #0x0]\n"
+ "ldp x26, x25, [x19, #0x10]\n"
+ "ldp x24, x23, [x19, #0x20]\n"
+ "ldp x22, x21, [x19, #0x30]\n"
+ "ldr x20, [x19, #0x40]\n"
"blt 3f\n"
- "ldr q30, [x28, x15]\n"
- "ldr q29, [x25, x15]\n"
- "lsr x20, x16, #0x4\n"
- "sub x16, x16, x20, LSL #4\n"
- "ldr q28, [x22, x15]\n"
- "ldr q27, [x26, x15]\n"
- "subs x20, x20, #0x1\n"
- "ldr q26, [x9, x15]\n"
- "ldr q25, [x27, x15]\n"
- "ldr q24, [x24, x15]\n"
- "ldr q23, [x23, x15]\n"
- "ldr q22, [x21, x15]\n"
- "add x15, x15, #0x10\n"
+ "lsr x19, x15, #0x4\n"
+ "sub x15, x15, x19, LSL #4\n"
+ "ldr q30, [x27, x14]\n"
+ "ldr q29, [x24, x14]\n"
+ "subs x19, x19, #0x1\n"
+ "ldr q28, [x21, x14]\n"
+ "ldr q27, [x25, x14]\n"
+ "ldr q26, [x28, x14]\n"
+ "ldr q25, [x23, x14]\n"
+ "ldr q24, [x26, x14]\n"
+ "ldr q23, [x22, x14]\n"
+ "ldr q22, [x20, x14]\n"
+ "add x14, x14, #0x10\n"
"beq 2f\n"
"1:" // Vector: Loop
"umax v21.16b, v30.16b, v29.16b\n"
- "ldr q30, [x28, x15]\n"
"umax v20.16b, v29.16b, v28.16b\n"
- "ldr q29, [x25, x15]\n"
- "ldr q28, [x22, x15]\n"
+ "subs x19, x19, #0x1\n"
+ "ldr q30, [x27, x14]\n"
"umax v19.16b, v27.16b, v26.16b\n"
- "ldr q26, [x9, x15]\n"
"umax v18.16b, v25.16b, v24.16b\n"
- "ldr q25, [x27, x15]\n"
- "umax v17.16b, v27.16b, v23.16b\n"
- "ldr q27, [x26, x15]\n"
- "umax v16.16b, v24.16b, v22.16b\n"
- "ldr q24, [x24, x15]\n"
- "ldr q23, [x23, x15]\n"
- "subs x20, x20, #0x1\n"
+ "ldr q29, [x24, x14]\n"
+ "ldr q28, [x21, x14]\n"
+ "umax v17.16b, v23.16b, v27.16b\n"
+ "umax v16.16b, v25.16b, v22.16b\n"
+ "ldr q27, [x25, x14]\n"
+ "ldr q26, [x28, x14]\n"
"umax v19.16b, v21.16b, v19.16b\n"
- "ldr q22, [x21, x15]\n"
"umax v18.16b, v18.16b, v21.16b\n"
- "umax v17.16b, v17.16b, v20.16b\n"
- "add x15, x15, #0x10\n"
- "umax v16.16b, v16.16b, v20.16b\n"
- "str q19, [x14, x12]\n"
- "str q18, [x13, x12]\n"
- "str q17, [x11, x12]\n"
- "str q16, [x10, x12]\n"
- "add x12, x12, #0x10\n"
+ "ldr q25, [x23, x14]\n"
+ "ldr q24, [x26, x14]\n"
+ "umax v17.16b, v20.16b, v17.16b\n"
+ "umax v16.16b, v20.16b, v16.16b\n"
+ "ldr q23, [x22, x14]\n"
+ "ldr q22, [x20, x14]\n"
+ "add x14, x14, #0x10\n"
+ "str q19, [x13, x11]\n"
+ "str q18, [x12, x11]\n"
+ "str q17, [x10, x11]\n"
+ "str q16, [x9, x11]\n"
+ "add x11, x11, #0x10\n"
"bgt 1b\n"
"2:" // Vector: Tail
"umax v21.16b, v30.16b, v29.16b\n"
"umax v20.16b, v29.16b, v28.16b\n"
"umax v19.16b, v27.16b, v26.16b\n"
"umax v18.16b, v25.16b, v24.16b\n"
- "umax v17.16b, v27.16b, v23.16b\n"
- "umax v16.16b, v24.16b, v22.16b\n"
+ "umax v17.16b, v23.16b, v27.16b\n"
+ "umax v16.16b, v25.16b, v22.16b\n"
"umax v19.16b, v21.16b, v19.16b\n"
"umax v18.16b, v18.16b, v21.16b\n"
- "str q19, [x14, x12]\n"
- "umax v17.16b, v17.16b, v20.16b\n"
- "umax v16.16b, v16.16b, v20.16b\n"
- "str q18, [x13, x12]\n"
- "str q17, [x11, x12]\n"
- "str q16, [x10, x12]\n"
- "add x12, x12, #0x10\n"
- "cbz x16, 4f\n"
+ "str q19, [x13, x11]\n"
+ "umax v17.16b, v20.16b, v17.16b\n"
+ "umax v16.16b, v20.16b, v16.16b\n"
+ "str q18, [x12, x11]\n"
+ "str q17, [x10, x11]\n"
+ "str q16, [x9, x11]\n"
+ "add x11, x11, #0x10\n"
+ "cbz x15, 4f\n"
"3:" // Oddments
- "ldr b30, [x28, x15]\n"
- "ldr b29, [x25, x15]\n"
+ "ldr b30, [x27, x14]\n"
+ "ldr b29, [x24, x14]\n"
"umax v21.16b, v30.16b, v29.16b\n"
- "subs x16, x16, #0x1\n"
- "ldr b28, [x22, x15]\n"
- "ldr b27, [x26, x15]\n"
+ "subs x15, x15, #0x1\n"
+ "ldr b28, [x21, x14]\n"
+ "ldr b27, [x25, x14]\n"
"umax v20.16b, v29.16b, v28.16b\n"
- "ldr b26, [x9, x15]\n"
- "ldr b25, [x27, x15]\n"
+ "ldr b26, [x28, x14]\n"
+ "ldr b25, [x23, x14]\n"
"umax v19.16b, v27.16b, v26.16b\n"
"umax v19.16b, v21.16b, v19.16b\n"
- "ldr b24, [x24, x15]\n"
- "ldr b23, [x23, x15]\n"
+ "ldr b24, [x26, x14]\n"
+ "ldr b23, [x22, x14]\n"
"umax v18.16b, v25.16b, v24.16b\n"
- "umax v17.16b, v27.16b, v23.16b\n"
- "ldr b22, [x21, x15]\n"
- "umax v16.16b, v24.16b, v22.16b\n"
- "add x15, x15, #0x1\n"
+ "umax v17.16b, v23.16b, v27.16b\n"
+ "ldr b22, [x20, x14]\n"
+ "umax v16.16b, v25.16b, v22.16b\n"
+ "add x14, x14, #0x1\n"
"umax v18.16b, v18.16b, v21.16b\n"
- "umax v17.16b, v17.16b, v20.16b\n"
- "umax v16.16b, v16.16b, v20.16b\n"
- "str b19, [x14, x12]\n"
- "str b18, [x13, x12]\n"
- "str b17, [x11, x12]\n"
- "str b16, [x10, x12]\n"
- "add x12, x12, #0x1\n"
+ "umax v17.16b, v20.16b, v17.16b\n"
+ "umax v16.16b, v20.16b, v16.16b\n"
+ "str b19, [x13, x11]\n"
+ "str b18, [x12, x11]\n"
+ "str b17, [x10, x11]\n"
+ "str b16, [x9, x11]\n"
+ "add x11, x11, #0x1\n"
"bgt 3b\n"
"4:" // End
:
: [args] "r" (&args), [offsetof_inptrs] "I" (offsetof(KernelArgs, inptrs)), [offsetof_n_channels] "I" (offsetof(KernelArgs, n_channels)), [offsetof_outptrs] "I" (offsetof(KernelArgs, outptrs))
- : "cc", "memory", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8_nhwc_max_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8_nhwc_max_generic_depthfirst/generic.cpp
index 98f5b8351c..355f21795c 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8_nhwc_max_generic_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8_nhwc_max_generic_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -41,394 +41,394 @@ void a64_u8_nhwc_max_generic_depthfirst_impl(
{
__asm__ __volatile__(
"cmp %x[n_channels], #0x40\n"
- "mov x9, #0x0\n"
- "mov x28, #0x10\n" // cntb _, ALL, #1
- "mov x27, #0x20\n" // cntb _, ALL, #2
- "mov x26, #0x30\n" // cntb _, ALL, #3
+ "mov x28, #0x0\n"
+ "mov x27, #0x10\n" // cntb _, ALL, #1
+ "mov x26, #0x20\n" // cntb _, ALL, #2
+ "mov x25, #0x30\n" // cntb _, ALL, #3
"blt 7f\n"
"1:" // 4-vectors of channels
- "lsr x25, %x[n_valid_cells], #0x2\n"
- "movi v8.16b, #0x0\n"
- "movi v7.16b, #0x0\n"
- "mov x20, %x[inptrs]\n"
+ "lsr x24, %x[n_valid_cells], #0x2\n"
"movi v6.16b, #0x0\n"
"movi v5.16b, #0x0\n"
- "cbz x25, 4f\n"
- "ldp x24, x23, [x20, #0x0]\n"
- "ldr q4, [x24, x9]\n"
- "subs x25, x25, #0x1\n"
- "ldr q3, [x23, x9]\n"
- "ldr q2, [x24, x28]\n"
- "ldr q1, [x23, x28]\n"
- "ldr q0, [x24, x27]\n"
- "ldr q31, [x23, x27]\n"
- "ldr q30, [x24, x26]\n"
- "ldr q29, [x23, x26]\n"
- "ldp x22, x21, [x20, #0x10]\n"
- "add x20, x20, #0x20\n"
- "ldr q28, [x22, x9]\n"
- "ldr q22, [x21, x9]\n"
- "ldr q27, [x22, x28]\n"
- "ldr q21, [x21, x28]\n"
- "ldr q26, [x22, x27]\n"
- "ldr q20, [x21, x27]\n"
- "ldr q25, [x22, x26]\n"
- "ldr q24, [x21, x26]\n"
+ "mov x19, %x[inptrs]\n"
+ "movi v4.16b, #0x0\n"
+ "movi v3.16b, #0x0\n"
+ "cbz x24, 4f\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "ldp x21, x20, [x19, #0x10]\n"
+ "subs x24, x24, #0x1\n"
+ "add x19, x19, #0x20\n"
+ "ldr q2, [x23, x28]\n"
+ "ldr q1, [x22, x28]\n"
+ "ldr q0, [x21, x28]\n"
+ "ldr q31, [x20, x28]\n"
+ "ldr q30, [x23, x27]\n"
+ "ldr q22, [x22, x27]\n"
+ "ldr q29, [x21, x27]\n"
+ "ldr q28, [x20, x27]\n"
+ "ldr q27, [x23, x26]\n"
+ "ldr q21, [x22, x26]\n"
+ "ldr q26, [x21, x26]\n"
+ "ldr q17, [x20, x26]\n"
+ "ldr q25, [x23, x25]\n"
+ "ldr q20, [x22, x25]\n"
+ "ldr q24, [x21, x25]\n"
+ "ldr q16, [x20, x25]\n"
"beq 3f\n"
"2:" // 4-vectors of channels: 4 inputs loop
- "umax v23.16b, v4.16b, v3.16b\n"
- "umax v19.16b, v28.16b, v22.16b\n"
- "ldp x24, x23, [x20, #0x0]\n"
- "ldr q4, [x24, x9]\n"
- "ldr q3, [x23, x9]\n"
- "umax v22.16b, v2.16b, v1.16b\n"
- "ldr q2, [x24, x28]\n"
- "umax v18.16b, v27.16b, v21.16b\n"
- "ldr q1, [x23, x28]\n"
- "umax v21.16b, v0.16b, v31.16b\n"
- "ldr q0, [x24, x27]\n"
- "umax v17.16b, v26.16b, v20.16b\n"
- "ldr q31, [x23, x27]\n"
- "umax v20.16b, v30.16b, v29.16b\n"
- "ldr q30, [x24, x26]\n"
- "umax v16.16b, v25.16b, v24.16b\n"
- "ldr q29, [x23, x26]\n"
+ "umax v23.16b, v2.16b, v1.16b\n"
+ "umax v19.16b, v0.16b, v31.16b\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "ldp x21, x20, [x19, #0x10]\n"
+ "umax v22.16b, v30.16b, v22.16b\n"
+ "umax v18.16b, v29.16b, v28.16b\n"
+ "subs x24, x24, #0x1\n"
+ "add x19, x19, #0x20\n"
+ "umax v21.16b, v27.16b, v21.16b\n"
+ "umax v17.16b, v26.16b, v17.16b\n"
+ "ldr q2, [x23, x28]\n"
+ "ldr q1, [x22, x28]\n"
+ "umax v20.16b, v25.16b, v20.16b\n"
+ "umax v16.16b, v24.16b, v16.16b\n"
+ "ldr q0, [x21, x28]\n"
+ "ldr q31, [x20, x28]\n"
"umax v19.16b, v23.16b, v19.16b\n"
"umax v18.16b, v22.16b, v18.16b\n"
- "ldp x22, x21, [x20, #0x10]\n"
- "ldr q28, [x22, x9]\n"
- "ldr q22, [x21, x9]\n"
+ "ldr q30, [x23, x27]\n"
+ "ldr q22, [x22, x27]\n"
"umax v17.16b, v21.16b, v17.16b\n"
"umax v16.16b, v20.16b, v16.16b\n"
- "ldr q27, [x22, x28]\n"
- "ldr q21, [x21, x28]\n"
- "subs x25, x25, #0x1\n"
- "umax v8.16b, v8.16b, v19.16b\n"
- "ldr q26, [x22, x27]\n"
- "ldr q20, [x21, x27]\n"
- "umax v7.16b, v7.16b, v18.16b\n"
- "umax v6.16b, v6.16b, v17.16b\n"
- "ldr q25, [x22, x26]\n"
- "ldr q24, [x21, x26]\n"
- "umax v5.16b, v5.16b, v16.16b\n"
- "add x20, x20, #0x20\n"
+ "ldr q29, [x21, x27]\n"
+ "ldr q28, [x20, x27]\n"
+ "umax v6.16b, v6.16b, v19.16b\n"
+ "umax v5.16b, v5.16b, v18.16b\n"
+ "ldr q27, [x23, x26]\n"
+ "ldr q21, [x22, x26]\n"
+ "umax v4.16b, v4.16b, v17.16b\n"
+ "umax v3.16b, v3.16b, v16.16b\n"
+ "ldr q26, [x21, x26]\n"
+ "ldr q17, [x20, x26]\n"
+ "ldr q25, [x23, x25]\n"
+ "ldr q20, [x22, x25]\n"
+ "ldr q24, [x21, x25]\n"
+ "ldr q16, [x20, x25]\n"
"bgt 2b\n"
"3:" // 4-vectors of channels: 4 inputs tail
- "umax v23.16b, v4.16b, v3.16b\n"
- "umax v19.16b, v28.16b, v22.16b\n"
- "umax v22.16b, v2.16b, v1.16b\n"
- "umax v18.16b, v27.16b, v21.16b\n"
- "umax v21.16b, v0.16b, v31.16b\n"
- "umax v17.16b, v26.16b, v20.16b\n"
- "umax v20.16b, v30.16b, v29.16b\n"
- "umax v16.16b, v25.16b, v24.16b\n"
+ "umax v23.16b, v2.16b, v1.16b\n"
+ "umax v19.16b, v0.16b, v31.16b\n"
+ "umax v22.16b, v30.16b, v22.16b\n"
+ "umax v18.16b, v29.16b, v28.16b\n"
+ "umax v21.16b, v27.16b, v21.16b\n"
+ "umax v17.16b, v26.16b, v17.16b\n"
+ "umax v20.16b, v25.16b, v20.16b\n"
+ "umax v16.16b, v24.16b, v16.16b\n"
"umax v19.16b, v23.16b, v19.16b\n"
"umax v18.16b, v22.16b, v18.16b\n"
"umax v17.16b, v21.16b, v17.16b\n"
"umax v16.16b, v20.16b, v16.16b\n"
- "umax v8.16b, v8.16b, v19.16b\n"
- "umax v7.16b, v7.16b, v18.16b\n"
- "umax v6.16b, v6.16b, v17.16b\n"
- "umax v5.16b, v5.16b, v16.16b\n"
+ "umax v6.16b, v6.16b, v19.16b\n"
+ "umax v5.16b, v5.16b, v18.16b\n"
+ "umax v4.16b, v4.16b, v17.16b\n"
+ "umax v3.16b, v3.16b, v16.16b\n"
"4:" // 4-vectors of channels: After loop
- "ands x21, %x[n_valid_cells], #0x3\n"
+ "ands x20, %x[n_valid_cells], #0x3\n"
"beq 6f\n"
"5:" // 4-vectors of channels: Single input loop
- "ldr x24, [x20], #0x8\n"
- "ldr q4, [x24, x9]\n"
- "subs x21, x21, #0x1\n"
- "umax v8.16b, v8.16b, v4.16b\n"
- "ldr q2, [x24, x28]\n"
- "ldr q0, [x24, x27]\n"
- "umax v7.16b, v7.16b, v2.16b\n"
- "umax v6.16b, v6.16b, v0.16b\n"
- "ldr q30, [x24, x26]\n"
+ "ldr x23, [x19], #0x8\n"
+ "ldr q2, [x23, x28]\n"
+ "subs x20, x20, #0x1\n"
+ "umax v6.16b, v6.16b, v2.16b\n"
+ "ldr q30, [x23, x27]\n"
+ "ldr q27, [x23, x26]\n"
"umax v5.16b, v5.16b, v30.16b\n"
+ "umax v4.16b, v4.16b, v27.16b\n"
+ "ldr q25, [x23, x25]\n"
+ "umax v3.16b, v3.16b, v25.16b\n"
"bgt 5b\n"
"6:" // 4-vectors of channels: Single input loop: End
"sub %x[n_channels], %x[n_channels], #0x40\n"
"cmp %x[n_channels], #0x40\n"
- "str q8, [%x[outptr], x9]\n"
- "str q7, [%x[outptr], x28]\n"
- "add x9, x9, #0x40\n"
+ "str q6, [%x[outptr], x28]\n"
+ "str q5, [%x[outptr], x27]\n"
"add x28, x28, #0x40\n"
- "str q6, [%x[outptr], x27]\n"
"add x27, x27, #0x40\n"
- "str q5, [%x[outptr], x26]\n"
+ "str q4, [%x[outptr], x26]\n"
"add x26, x26, #0x40\n"
+ "str q3, [%x[outptr], x25]\n"
+ "add x25, x25, #0x40\n"
"bge 1b\n"
"cbz %x[n_channels], 43f\n"
"7:" // Single vector of channels
"cmp %x[n_channels], #0x10\n"
"blt 14f\n"
"8:" // Single vector of channels: Loop
- "lsr x25, %x[n_valid_cells], #0x2\n"
- "movi v8.16b, #0x0\n"
- "mov x20, %x[inptrs]\n"
- "cbz x25, 11f\n"
- "ldp x24, x23, [x20, #0x0]\n"
- "ldr q4, [x24, x9]\n"
- "subs x25, x25, #0x1\n"
- "ldr q3, [x23, x9]\n"
- "ldp x22, x21, [x20, #0x10]\n"
- "add x20, x20, #0x20\n"
- "ldr q28, [x22, x9]\n"
- "ldr q22, [x21, x9]\n"
+ "lsr x24, %x[n_valid_cells], #0x2\n"
+ "movi v6.16b, #0x0\n"
+ "mov x19, %x[inptrs]\n"
+ "cbz x24, 11f\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "ldp x21, x20, [x19, #0x10]\n"
+ "subs x24, x24, #0x1\n"
+ "add x19, x19, #0x20\n"
+ "ldr q2, [x23, x28]\n"
+ "ldr q1, [x22, x28]\n"
+ "ldr q0, [x21, x28]\n"
+ "ldr q31, [x20, x28]\n"
"beq 10f\n"
"9:" // Single vector of channels: Loop: 4 inputs loop
- "umax v23.16b, v4.16b, v3.16b\n"
- "umax v19.16b, v28.16b, v22.16b\n"
- "ldp x24, x23, [x20, #0x0]\n"
- "ldr q4, [x24, x9]\n"
- "ldr q3, [x23, x9]\n"
+ "umax v23.16b, v2.16b, v1.16b\n"
+ "umax v19.16b, v0.16b, v31.16b\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "ldp x21, x20, [x19, #0x10]\n"
"umax v19.16b, v23.16b, v19.16b\n"
- "ldp x22, x21, [x20, #0x10]\n"
- "subs x25, x25, #0x1\n"
- "ldr q28, [x22, x9]\n"
- "ldr q22, [x21, x9]\n"
- "umax v8.16b, v8.16b, v19.16b\n"
- "add x20, x20, #0x20\n"
+ "subs x24, x24, #0x1\n"
+ "umax v6.16b, v6.16b, v19.16b\n"
+ "add x19, x19, #0x20\n"
+ "ldr q2, [x23, x28]\n"
+ "ldr q1, [x22, x28]\n"
+ "ldr q0, [x21, x28]\n"
+ "ldr q31, [x20, x28]\n"
"bgt 9b\n"
"10:" // Single vector of channels: Loop: 4 inputs tail
- "umax v23.16b, v4.16b, v3.16b\n"
- "umax v19.16b, v28.16b, v22.16b\n"
+ "umax v23.16b, v2.16b, v1.16b\n"
+ "umax v19.16b, v0.16b, v31.16b\n"
"umax v19.16b, v23.16b, v19.16b\n"
- "umax v8.16b, v8.16b, v19.16b\n"
+ "umax v6.16b, v6.16b, v19.16b\n"
"11:" // Single vector of channels: Loop: After loop
- "ands x21, %x[n_valid_cells], #0x3\n"
+ "ands x20, %x[n_valid_cells], #0x3\n"
"beq 13f\n"
"12:" // Single vector of channels: Loop: Single input loop
- "ldr x24, [x20], #0x8\n"
- "ldr q4, [x24, x9]\n"
- "subs x21, x21, #0x1\n"
- "umax v8.16b, v8.16b, v4.16b\n"
+ "ldr x23, [x19], #0x8\n"
+ "ldr q2, [x23, x28]\n"
+ "subs x20, x20, #0x1\n"
+ "umax v6.16b, v6.16b, v2.16b\n"
"bgt 12b\n"
"13:" // Single vector of channels: Loop: Single input loop: End
"sub %x[n_channels], %x[n_channels], #0x10\n"
"cmp %x[n_channels], #0x10\n"
- "str q8, [%x[outptr], x9]\n"
- "add x9, x9, #0x10\n"
+ "str q6, [%x[outptr], x28]\n"
+ "add x28, x28, #0x10\n"
"bge 8b\n"
"cbz %x[n_channels], 43f\n"
"14:" // Oddments
- "lsr x25, %x[n_valid_cells], #0x2\n"
- "add %x[outptr], %x[outptr], x9\n"
- "movi v8.16b, #0x0\n"
- "mov x20, %x[inptrs]\n"
- "cbz x25, 24f\n"
+ "lsr x24, %x[n_valid_cells], #0x2\n"
+ "add %x[outptr], %x[outptr], x28\n"
+ "movi v6.16b, #0x0\n"
+ "mov x19, %x[inptrs]\n"
+ "cbz x24, 24f\n"
"15:" // Oddments: 4 inputs loop
- "ldp x24, x23, [x20, #0x0]\n"
- "ldp x22, x21, [x20, #0x10]\n"
- "add x20, x20, #0x20\n"
- "add x24, x24, x9\n"
- "add x23, x23, x9\n"
- "add x22, x22, x9\n"
- "movi v4.16b, #0x0\n"
- "movi v3.16b, #0x0\n"
- "add x21, x21, x9\n"
- "movi v28.16b, #0x0\n"
- "movi v22.16b, #0x0\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "ldp x21, x20, [x19, #0x10]\n"
+ "add x19, x19, #0x20\n"
+ "add x23, x23, x28\n"
+ "add x22, x22, x28\n"
+ "add x21, x21, x28\n"
+ "movi v2.16b, #0x0\n"
+ "movi v1.16b, #0x0\n"
+ "add x20, x20, x28\n"
+ "movi v0.16b, #0x0\n"
+ "movi v31.16b, #0x0\n"
"tbz %x[n_channels], #3, 19f\n"
- "ldr d4, [x24], #0x8\n"
- "ldr d3, [x23], #0x8\n"
- "ldr d28, [x22], #0x8\n"
- "ldr d22, [x21], #0x8\n"
+ "ldr d2, [x23], #0x8\n"
+ "ldr d1, [x22], #0x8\n"
+ "ldr d0, [x21], #0x8\n"
+ "ldr d31, [x20], #0x8\n"
"tbz %x[n_channels], #2, 17f\n"
- "ld1 { v4.s }[2], [x24], #0x4\n"
- "ld1 { v3.s }[2], [x23], #0x4\n"
- "ld1 { v28.s }[2], [x22], #0x4\n"
- "ld1 { v22.s }[2], [x21], #0x4\n"
+ "ld1 { v2.s }[2], [x23], #0x4\n"
+ "ld1 { v1.s }[2], [x22], #0x4\n"
+ "ld1 { v0.s }[2], [x21], #0x4\n"
+ "ld1 { v31.s }[2], [x20], #0x4\n"
"tbz %x[n_channels], #1, 16f\n"
- "ld1 { v4.h }[6], [x24], #0x2\n"
- "ld1 { v3.h }[6], [x23], #0x2\n"
- "ld1 { v28.h }[6], [x22], #0x2\n"
- "ld1 { v22.h }[6], [x21], #0x2\n"
+ "ld1 { v2.h }[6], [x23], #0x2\n"
+ "ld1 { v1.h }[6], [x22], #0x2\n"
+ "ld1 { v0.h }[6], [x21], #0x2\n"
+ "ld1 { v31.h }[6], [x20], #0x2\n"
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v4.b }[14], [x24], #0x1\n"
- "ld1 { v3.b }[14], [x23], #0x1\n"
- "ld1 { v28.b }[14], [x22], #0x1\n"
- "ld1 { v22.b }[14], [x21], #0x1\n"
+ "ld1 { v2.b }[14], [x23], #0x1\n"
+ "ld1 { v1.b }[14], [x22], #0x1\n"
+ "ld1 { v0.b }[14], [x21], #0x1\n"
+ "ld1 { v31.b }[14], [x20], #0x1\n"
"b 23f\n"
"16:" // Oddments: 4 inputs loop: Load: Bit 3: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v4.b }[12], [x24], #0x1\n"
- "ld1 { v3.b }[12], [x23], #0x1\n"
- "ld1 { v28.b }[12], [x22], #0x1\n"
- "ld1 { v22.b }[12], [x21], #0x1\n"
+ "ld1 { v2.b }[12], [x23], #0x1\n"
+ "ld1 { v1.b }[12], [x22], #0x1\n"
+ "ld1 { v0.b }[12], [x21], #0x1\n"
+ "ld1 { v31.b }[12], [x20], #0x1\n"
"b 23f\n"
"17:" // Oddments: 4 inputs loop: Load: Bit 3: Bit 2: Unset
"tbz %x[n_channels], #1, 18f\n"
- "ld1 { v4.h }[4], [x24], #0x2\n"
- "ld1 { v3.h }[4], [x23], #0x2\n"
- "ld1 { v28.h }[4], [x22], #0x2\n"
- "ld1 { v22.h }[4], [x21], #0x2\n"
+ "ld1 { v2.h }[4], [x23], #0x2\n"
+ "ld1 { v1.h }[4], [x22], #0x2\n"
+ "ld1 { v0.h }[4], [x21], #0x2\n"
+ "ld1 { v31.h }[4], [x20], #0x2\n"
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v4.b }[10], [x24], #0x1\n"
- "ld1 { v3.b }[10], [x23], #0x1\n"
- "ld1 { v28.b }[10], [x22], #0x1\n"
- "ld1 { v22.b }[10], [x21], #0x1\n"
+ "ld1 { v2.b }[10], [x23], #0x1\n"
+ "ld1 { v1.b }[10], [x22], #0x1\n"
+ "ld1 { v0.b }[10], [x21], #0x1\n"
+ "ld1 { v31.b }[10], [x20], #0x1\n"
"b 23f\n"
"18:" // Oddments: 4 inputs loop: Load: Bit 3: Bit 2: Unset: Bit 1: Unset
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v4.b }[8], [x24], #0x1\n"
- "ld1 { v3.b }[8], [x23], #0x1\n"
- "ld1 { v28.b }[8], [x22], #0x1\n"
- "ld1 { v22.b }[8], [x21], #0x1\n"
+ "ld1 { v2.b }[8], [x23], #0x1\n"
+ "ld1 { v1.b }[8], [x22], #0x1\n"
+ "ld1 { v0.b }[8], [x21], #0x1\n"
+ "ld1 { v31.b }[8], [x20], #0x1\n"
"b 23f\n"
"19:" // Oddments: 4 inputs loop: Load: Bit 3: Unset
"tbz %x[n_channels], #2, 21f\n"
- "ldr s4, [x24], #0x4\n"
- "ldr s3, [x23], #0x4\n"
- "ldr s28, [x22], #0x4\n"
- "ldr s22, [x21], #0x4\n"
+ "ldr s2, [x23], #0x4\n"
+ "ldr s1, [x22], #0x4\n"
+ "ldr s0, [x21], #0x4\n"
+ "ldr s31, [x20], #0x4\n"
"tbz %x[n_channels], #1, 20f\n"
- "ld1 { v4.h }[2], [x24], #0x2\n"
- "ld1 { v3.h }[2], [x23], #0x2\n"
- "ld1 { v28.h }[2], [x22], #0x2\n"
- "ld1 { v22.h }[2], [x21], #0x2\n"
+ "ld1 { v2.h }[2], [x23], #0x2\n"
+ "ld1 { v1.h }[2], [x22], #0x2\n"
+ "ld1 { v0.h }[2], [x21], #0x2\n"
+ "ld1 { v31.h }[2], [x20], #0x2\n"
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v4.b }[6], [x24], #0x1\n"
- "ld1 { v3.b }[6], [x23], #0x1\n"
- "ld1 { v28.b }[6], [x22], #0x1\n"
- "ld1 { v22.b }[6], [x21], #0x1\n"
+ "ld1 { v2.b }[6], [x23], #0x1\n"
+ "ld1 { v1.b }[6], [x22], #0x1\n"
+ "ld1 { v0.b }[6], [x21], #0x1\n"
+ "ld1 { v31.b }[6], [x20], #0x1\n"
"b 23f\n"
"20:" // Oddments: 4 inputs loop: Load: Bit 3: Unset: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v4.b }[4], [x24], #0x1\n"
- "ld1 { v3.b }[4], [x23], #0x1\n"
- "ld1 { v28.b }[4], [x22], #0x1\n"
- "ld1 { v22.b }[4], [x21], #0x1\n"
+ "ld1 { v2.b }[4], [x23], #0x1\n"
+ "ld1 { v1.b }[4], [x22], #0x1\n"
+ "ld1 { v0.b }[4], [x21], #0x1\n"
+ "ld1 { v31.b }[4], [x20], #0x1\n"
"b 23f\n"
"21:" // Oddments: 4 inputs loop: Load: Bit 3: Unset: Bit 2: Unset
"tbz %x[n_channels], #1, 22f\n"
- "ldr h4, [x24], #0x2\n"
- "ldr h3, [x23], #0x2\n"
- "ldr h28, [x22], #0x2\n"
- "ldr h22, [x21], #0x2\n"
+ "ldr h2, [x23], #0x2\n"
+ "ldr h1, [x22], #0x2\n"
+ "ldr h0, [x21], #0x2\n"
+ "ldr h31, [x20], #0x2\n"
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v4.b }[2], [x24], #0x1\n"
- "ld1 { v3.b }[2], [x23], #0x1\n"
- "ld1 { v28.b }[2], [x22], #0x1\n"
- "ld1 { v22.b }[2], [x21], #0x1\n"
+ "ld1 { v2.b }[2], [x23], #0x1\n"
+ "ld1 { v1.b }[2], [x22], #0x1\n"
+ "ld1 { v0.b }[2], [x21], #0x1\n"
+ "ld1 { v31.b }[2], [x20], #0x1\n"
"b 23f\n"
"22:" // Oddments: 4 inputs loop: Load: Bit 3: Unset: Bit 2: Unset: Bit 1: Unset
"tbz %x[n_channels], #0, 23f\n"
- "ldr b4, [x24], #0x1\n"
- "ldr b3, [x23], #0x1\n"
- "ldr b28, [x22], #0x1\n"
- "ldr b22, [x21], #0x1\n"
+ "ldr b2, [x23], #0x1\n"
+ "ldr b1, [x22], #0x1\n"
+ "ldr b0, [x21], #0x1\n"
+ "ldr b31, [x20], #0x1\n"
"23:" // Oddments: 4 inputs loop: Load: Bit 3: End
- "umax v23.16b, v4.16b, v3.16b\n"
- "umax v19.16b, v28.16b, v22.16b\n"
- "subs x25, x25, #0x1\n"
+ "umax v23.16b, v2.16b, v1.16b\n"
+ "umax v19.16b, v0.16b, v31.16b\n"
+ "subs x24, x24, #0x1\n"
"umax v19.16b, v23.16b, v19.16b\n"
- "umax v8.16b, v8.16b, v19.16b\n"
+ "umax v6.16b, v6.16b, v19.16b\n"
"bgt 15b\n"
"24:" // Oddments: After loop
- "ands x21, %x[n_valid_cells], #0x3\n"
+ "ands x20, %x[n_valid_cells], #0x3\n"
"beq 34f\n"
"25:" // Oddments: Single input loop
- "ldr x24, [x20], #0x8\n"
- "add x24, x24, x9\n"
- "movi v4.16b, #0x0\n"
+ "ldr x23, [x19], #0x8\n"
+ "add x23, x23, x28\n"
+ "movi v2.16b, #0x0\n"
"tbz %x[n_channels], #3, 29f\n"
- "ldr d4, [x24], #0x8\n"
+ "ldr d2, [x23], #0x8\n"
"tbz %x[n_channels], #2, 27f\n"
- "ld1 { v4.s }[2], [x24], #0x4\n"
+ "ld1 { v2.s }[2], [x23], #0x4\n"
"tbz %x[n_channels], #1, 26f\n"
- "ld1 { v4.h }[6], [x24], #0x2\n"
+ "ld1 { v2.h }[6], [x23], #0x2\n"
"tbz %x[n_channels], #0, 33f\n"
- "ld1 { v4.b }[14], [x24], #0x1\n"
+ "ld1 { v2.b }[14], [x23], #0x1\n"
"b 33f\n"
"26:" // Oddments: Single input loop: Load: Bit 3: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 33f\n"
- "ld1 { v4.b }[12], [x24], #0x1\n"
+ "ld1 { v2.b }[12], [x23], #0x1\n"
"b 33f\n"
"27:" // Oddments: Single input loop: Load: Bit 3: Bit 2: Unset
"tbz %x[n_channels], #1, 28f\n"
- "ld1 { v4.h }[4], [x24], #0x2\n"
+ "ld1 { v2.h }[4], [x23], #0x2\n"
"tbz %x[n_channels], #0, 33f\n"
- "ld1 { v4.b }[10], [x24], #0x1\n"
+ "ld1 { v2.b }[10], [x23], #0x1\n"
"b 33f\n"
"28:" // Oddments: Single input loop: Load: Bit 3: Bit 2: Unset: Bit 1: Unset
"tbz %x[n_channels], #0, 33f\n"
- "ld1 { v4.b }[8], [x24], #0x1\n"
+ "ld1 { v2.b }[8], [x23], #0x1\n"
"b 33f\n"
"29:" // Oddments: Single input loop: Load: Bit 3: Unset
"tbz %x[n_channels], #2, 31f\n"
- "ldr s4, [x24], #0x4\n"
+ "ldr s2, [x23], #0x4\n"
"tbz %x[n_channels], #1, 30f\n"
- "ld1 { v4.h }[2], [x24], #0x2\n"
+ "ld1 { v2.h }[2], [x23], #0x2\n"
"tbz %x[n_channels], #0, 33f\n"
- "ld1 { v4.b }[6], [x24], #0x1\n"
+ "ld1 { v2.b }[6], [x23], #0x1\n"
"b 33f\n"
"30:" // Oddments: Single input loop: Load: Bit 3: Unset: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 33f\n"
- "ld1 { v4.b }[4], [x24], #0x1\n"
+ "ld1 { v2.b }[4], [x23], #0x1\n"
"b 33f\n"
"31:" // Oddments: Single input loop: Load: Bit 3: Unset: Bit 2: Unset
"tbz %x[n_channels], #1, 32f\n"
- "ldr h4, [x24], #0x2\n"
+ "ldr h2, [x23], #0x2\n"
"tbz %x[n_channels], #0, 33f\n"
- "ld1 { v4.b }[2], [x24], #0x1\n"
+ "ld1 { v2.b }[2], [x23], #0x1\n"
"b 33f\n"
"32:" // Oddments: Single input loop: Load: Bit 3: Unset: Bit 2: Unset: Bit 1: Unset
"tbz %x[n_channels], #0, 33f\n"
- "ldr b4, [x24], #0x1\n"
+ "ldr b2, [x23], #0x1\n"
"33:" // Oddments: Single input loop: Load: Bit 3: End
- "subs x21, x21, #0x1\n"
- "umax v8.16b, v8.16b, v4.16b\n"
+ "subs x20, x20, #0x1\n"
+ "umax v6.16b, v6.16b, v2.16b\n"
"bgt 25b\n"
"34:" // Oddments: Single input loop: End
"tbz %x[n_channels], #3, 38f\n"
- "st1 { v8.d }[0], [%x[outptr]], #0x8\n"
+ "st1 { v6.d }[0], [%x[outptr]], #0x8\n"
"tbz %x[n_channels], #2, 36f\n"
- "st1 { v8.s }[2], [%x[outptr]], #0x4\n"
+ "st1 { v6.s }[2], [%x[outptr]], #0x4\n"
"tbz %x[n_channels], #1, 35f\n"
- "st1 { v8.h }[6], [%x[outptr]], #0x2\n"
+ "st1 { v6.h }[6], [%x[outptr]], #0x2\n"
"tbz %x[n_channels], #0, 42f\n"
- "st1 { v8.b }[14], [%x[outptr]], #0x1\n"
+ "st1 { v6.b }[14], [%x[outptr]], #0x1\n"
"b 42f\n"
"35:" // Oddments: Store: Bit 3: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 42f\n"
- "st1 { v8.b }[12], [%x[outptr]], #0x1\n"
+ "st1 { v6.b }[12], [%x[outptr]], #0x1\n"
"b 42f\n"
"36:" // Oddments: Store: Bit 3: Bit 2: Unset
"tbz %x[n_channels], #1, 37f\n"
- "st1 { v8.h }[4], [%x[outptr]], #0x2\n"
+ "st1 { v6.h }[4], [%x[outptr]], #0x2\n"
"tbz %x[n_channels], #0, 42f\n"
- "st1 { v8.b }[10], [%x[outptr]], #0x1\n"
+ "st1 { v6.b }[10], [%x[outptr]], #0x1\n"
"b 42f\n"
"37:" // Oddments: Store: Bit 3: Bit 2: Unset: Bit 1: Unset
"tbz %x[n_channels], #0, 42f\n"
- "st1 { v8.b }[8], [%x[outptr]], #0x1\n"
+ "st1 { v6.b }[8], [%x[outptr]], #0x1\n"
"b 42f\n"
"38:" // Oddments: Store: Bit 3: Unset
"tbz %x[n_channels], #2, 40f\n"
- "st1 { v8.s }[0], [%x[outptr]], #0x4\n"
+ "st1 { v6.s }[0], [%x[outptr]], #0x4\n"
"tbz %x[n_channels], #1, 39f\n"
- "st1 { v8.h }[2], [%x[outptr]], #0x2\n"
+ "st1 { v6.h }[2], [%x[outptr]], #0x2\n"
"tbz %x[n_channels], #0, 42f\n"
- "st1 { v8.b }[6], [%x[outptr]], #0x1\n"
+ "st1 { v6.b }[6], [%x[outptr]], #0x1\n"
"b 42f\n"
"39:" // Oddments: Store: Bit 3: Unset: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 42f\n"
- "st1 { v8.b }[4], [%x[outptr]], #0x1\n"
+ "st1 { v6.b }[4], [%x[outptr]], #0x1\n"
"b 42f\n"
"40:" // Oddments: Store: Bit 3: Unset: Bit 2: Unset
"tbz %x[n_channels], #1, 41f\n"
- "st1 { v8.h }[0], [%x[outptr]], #0x2\n"
+ "st1 { v6.h }[0], [%x[outptr]], #0x2\n"
"tbz %x[n_channels], #0, 42f\n"
- "st1 { v8.b }[2], [%x[outptr]], #0x1\n"
+ "st1 { v6.b }[2], [%x[outptr]], #0x1\n"
"b 42f\n"
"41:" // Oddments: Store: Bit 3: Unset: Bit 2: Unset: Bit 1: Unset
"tbz %x[n_channels], #0, 42f\n"
- "st1 { v8.b }[0], [%x[outptr]], #0x1\n"
+ "st1 { v6.b }[0], [%x[outptr]], #0x1\n"
"42:" // Oddments: Store: Bit 3: End
"43:" // End
: [n_channels] "+&r" (n_channels), [outptr] "+&r" (outptr)
: [inptrs] "r" (inptrs), [n_valid_cells] "r" (n_valid_cells)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8q_nhwc_avg_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8q_nhwc_avg_generic_depthfirst/generic.cpp
index 19227d8aaa..d48c4ec640 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8q_nhwc_avg_generic_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8q_nhwc_avg_generic_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -120,19 +120,19 @@ void a64_u8q_nhwc_avg_generic_depthfirst_impl(
__asm__ __volatile__(
"cmp %x[n_channels], #0x40\n"
- "mov x27, #0x0\n"
- "mov x26, #0x10\n" // cntb _, ALL, #1
- "mov x25, #0x20\n" // cntb _, ALL, #2
- "mov x24, #0x30\n" // cntb _, ALL, #3
+ "mov x26, #0x0\n"
+ "mov x25, #0x10\n" // cntb _, ALL, #1
+ "mov x24, #0x20\n" // cntb _, ALL, #2
+ "mov x23, #0x30\n" // cntb _, ALL, #3
"blt 7f\n"
"1:" // 4-vectors of channels
"ld1r { v15.4s }, [%x[accumulator_init]]\n"
- "lsr x23, %x[n_valid_cells], #0x1\n"
+ "lsr x22, %x[n_valid_cells], #0x1\n"
"mov v14.16b, v15.16b\n"
"mov v13.16b, v15.16b\n"
"mov v12.16b, v15.16b\n"
"mov v11.16b, v15.16b\n"
- "mov x20, %x[inptrs]\n"
+ "mov x19, %x[inptrs]\n"
"mov v10.16b, v15.16b\n"
"mov v9.16b, v15.16b\n"
"mov v8.16b, v15.16b\n"
@@ -144,43 +144,43 @@ void a64_u8q_nhwc_avg_generic_depthfirst_impl(
"mov v2.16b, v15.16b\n"
"mov v1.16b, v15.16b\n"
"mov v0.16b, v15.16b\n"
- "cbz x23, 4f\n"
- "ldp x22, x21, [x20, #0x0]\n"
- "ldr q31, [x22, x27]\n"
- "subs x23, x23, #0x1\n"
- "add x20, x20, #0x10\n"
- "ldr q30, [x21, x27]\n"
- "ldr q29, [x22, x26]\n"
- "ldr q28, [x21, x26]\n"
- "ldr q27, [x22, x25]\n"
- "ldr q26, [x21, x25]\n"
- "ldr q25, [x22, x24]\n"
- "ldr q24, [x21, x24]\n"
+ "cbz x22, 4f\n"
+ "ldp x21, x20, [x19, #0x0]\n"
+ "subs x22, x22, #0x1\n"
+ "add x19, x19, #0x10\n"
+ "ldr q31, [x21, x26]\n"
+ "ldr q30, [x20, x26]\n"
+ "ldr q29, [x21, x25]\n"
+ "ldr q28, [x20, x25]\n"
+ "ldr q27, [x21, x24]\n"
+ "ldr q26, [x20, x24]\n"
+ "ldr q25, [x21, x23]\n"
+ "ldr q24, [x20, x23]\n"
"beq 3f\n"
"2:" // 4-vectors of channels: 2 inputs loop
"uaddl v23.8h, v31.8b, v30.8b\n"
"uaddl2 v22.8h, v31.16b, v30.16b\n"
- "ldp x22, x21, [x20, #0x0]\n"
- "ldr q31, [x22, x27]\n"
- "ldr q30, [x21, x27]\n"
+ "ldp x21, x20, [x19, #0x0]\n"
+ "subs x22, x22, #0x1\n"
"uaddl v21.8h, v29.8b, v28.8b\n"
"uaddl2 v20.8h, v29.16b, v28.16b\n"
- "ldr q29, [x22, x26]\n"
- "ldr q28, [x21, x26]\n"
+ "add x19, x19, #0x10\n"
+ "ldr q31, [x21, x26]\n"
"uaddl v19.8h, v27.8b, v26.8b\n"
"uaddl2 v18.8h, v27.16b, v26.16b\n"
- "ldr q27, [x22, x25]\n"
- "ldr q26, [x21, x25]\n"
- "subs x23, x23, #0x1\n"
- "uaddw v15.4s, v15.4s, v23.4h\n"
- "uaddw2 v14.4s, v14.4s, v23.8h\n"
+ "ldr q30, [x20, x26]\n"
+ "ldr q29, [x21, x25]\n"
"uaddl v17.8h, v25.8b, v24.8b\n"
"uaddl2 v16.8h, v25.16b, v24.16b\n"
- "ldr q25, [x22, x24]\n"
- "add x20, x20, #0x10\n"
+ "ldr q28, [x20, x25]\n"
+ "ldr q27, [x21, x24]\n"
+ "uaddw v15.4s, v15.4s, v23.4h\n"
+ "uaddw2 v14.4s, v14.4s, v23.8h\n"
+ "ldr q26, [x20, x24]\n"
+ "ldr q25, [x21, x23]\n"
"uaddw v13.4s, v13.4s, v22.4h\n"
"uaddw2 v12.4s, v12.4s, v22.8h\n"
- "ldr q24, [x21, x24]\n"
+ "ldr q24, [x20, x23]\n"
"uaddw v11.4s, v11.4s, v21.4h\n"
"uaddw2 v10.4s, v10.4s, v21.8h\n"
"uaddw v9.4s, v9.4s, v20.4h\n"
@@ -220,21 +220,21 @@ void a64_u8q_nhwc_avg_generic_depthfirst_impl(
"uaddw v1.4s, v1.4s, v16.4h\n"
"uaddw2 v0.4s, v0.4s, v16.8h\n"
"4:" // 4-vectors of channels: After loop
- "ands x21, %x[n_valid_cells], #0x1\n"
+ "ands x20, %x[n_valid_cells], #0x1\n"
"beq 6f\n"
"5:" // 4-vectors of channels: Single input loop
- "ldr x22, [x20], #0x8\n"
- "ldr q31, [x22, x27]\n"
+ "ldr x21, [x19], #0x8\n"
+ "ldr q31, [x21, x26]\n"
"uxtl v23.8h, v31.8b\n"
"uxtl2 v22.8h, v31.16b\n"
- "ldr q29, [x22, x26]\n"
- "ldr q27, [x22, x25]\n"
+ "ldr q29, [x21, x25]\n"
+ "ldr q27, [x21, x24]\n"
"uxtl v21.8h, v29.8b\n"
"uxtl2 v20.8h, v29.16b\n"
- "ldr q25, [x22, x24]\n"
+ "ldr q25, [x21, x23]\n"
"uxtl v19.8h, v27.8b\n"
"uxtl2 v18.8h, v27.16b\n"
- "subs x21, x21, #0x1\n"
+ "subs x20, x20, #0x1\n"
"uxtl v17.8h, v25.8b\n"
"uxtl2 v16.8h, v25.16b\n"
"uaddw v15.4s, v15.4s, v23.4h\n"
@@ -259,13 +259,13 @@ void a64_u8q_nhwc_avg_generic_depthfirst_impl(
"ld1r { v18.4s }, [%x[combined_rescale_value]]\n"
"srshl v15.4s, v15.4s, v19.4s\n"
"srshl v14.4s, v14.4s, v19.4s\n"
- "ld1r { v17.4s }, [%x[right_shift]]\n"
"srshl v13.4s, v13.4s, v19.4s\n"
"srshl v12.4s, v12.4s, v19.4s\n"
- "add x20, %x[quant_params], %[offsetof_qp_output_offset]\n"
- "ld1r { v16.4s }, [x20]\n"
+ "ld1r { v17.4s }, [%x[right_shift]]\n"
+ "add x19, %x[quant_params], %[offsetof_qp_output_offset]\n"
"srshl v11.4s, v11.4s, v19.4s\n"
"srshl v10.4s, v10.4s, v19.4s\n"
+ "ld1r { v16.4s }, [x19]\n"
"sub %x[n_channels], %x[n_channels], #0x40\n"
"srshl v9.4s, v9.4s, v19.4s\n"
"srshl v8.4s, v8.4s, v19.4s\n"
@@ -370,16 +370,16 @@ void a64_u8q_nhwc_avg_generic_depthfirst_impl(
"uzp1 v19.16b, v1.16b, v0.16b\n"
"uzp1 v16.16b, v23.16b, v16.16b\n"
"uzp1 v18.16b, v22.16b, v18.16b\n"
- "str q16, [%x[outptr], x27]\n"
- "add x27, x27, #0x40\n"
+ "str q16, [%x[outptr], x26]\n"
+ "add x26, x26, #0x40\n"
"uzp1 v17.16b, v21.16b, v17.16b\n"
"uzp1 v16.16b, v20.16b, v19.16b\n"
- "str q18, [%x[outptr], x26]\n"
- "add x26, x26, #0x40\n"
- "str q17, [%x[outptr], x25]\n"
+ "str q18, [%x[outptr], x25]\n"
"add x25, x25, #0x40\n"
- "str q16, [%x[outptr], x24]\n"
+ "str q17, [%x[outptr], x24]\n"
"add x24, x24, #0x40\n"
+ "str q16, [%x[outptr], x23]\n"
+ "add x23, x23, #0x40\n"
"bge 1b\n"
"cbz %x[n_channels], 43f\n"
"7:" // Single vector of channels
@@ -387,30 +387,30 @@ void a64_u8q_nhwc_avg_generic_depthfirst_impl(
"blt 14f\n"
"8:" // Single vector of channels: Loop
"ld1r { v15.4s }, [%x[accumulator_init]]\n"
- "lsr x23, %x[n_valid_cells], #0x1\n"
+ "lsr x22, %x[n_valid_cells], #0x1\n"
"mov v14.16b, v15.16b\n"
"mov v13.16b, v15.16b\n"
"mov v12.16b, v15.16b\n"
- "mov x20, %x[inptrs]\n"
- "cbz x23, 11f\n"
- "ldp x22, x21, [x20, #0x0]\n"
- "ldr q31, [x22, x27]\n"
- "subs x23, x23, #0x1\n"
- "add x20, x20, #0x10\n"
- "ldr q30, [x21, x27]\n"
+ "mov x19, %x[inptrs]\n"
+ "cbz x22, 11f\n"
+ "ldp x21, x20, [x19, #0x0]\n"
+ "subs x22, x22, #0x1\n"
+ "add x19, x19, #0x10\n"
+ "ldr q31, [x21, x26]\n"
+ "ldr q30, [x20, x26]\n"
"beq 10f\n"
"9:" // Single vector of channels: Loop: 2 inputs loop
"uaddl v23.8h, v31.8b, v30.8b\n"
"uaddl2 v22.8h, v31.16b, v30.16b\n"
- "ldp x22, x21, [x20, #0x0]\n"
- "ldr q31, [x22, x27]\n"
- "ldr q30, [x21, x27]\n"
- "subs x23, x23, #0x1\n"
+ "ldp x21, x20, [x19, #0x0]\n"
+ "subs x22, x22, #0x1\n"
"uaddw v15.4s, v15.4s, v23.4h\n"
"uaddw2 v14.4s, v14.4s, v23.8h\n"
+ "add x19, x19, #0x10\n"
+ "ldr q31, [x21, x26]\n"
"uaddw v13.4s, v13.4s, v22.4h\n"
"uaddw2 v12.4s, v12.4s, v22.8h\n"
- "add x20, x20, #0x10\n"
+ "ldr q30, [x20, x26]\n"
"bgt 9b\n"
"10:" // Single vector of channels: Loop: 2 inputs tail
"uaddl v23.8h, v31.8b, v30.8b\n"
@@ -420,14 +420,14 @@ void a64_u8q_nhwc_avg_generic_depthfirst_impl(
"uaddw v13.4s, v13.4s, v22.4h\n"
"uaddw2 v12.4s, v12.4s, v22.8h\n"
"11:" // Single vector of channels: Loop: After loop
- "ands x21, %x[n_valid_cells], #0x1\n"
+ "ands x20, %x[n_valid_cells], #0x1\n"
"beq 13f\n"
"12:" // Single vector of channels: Loop: Single input loop
- "ldr x22, [x20], #0x8\n"
- "ldr q31, [x22, x27]\n"
+ "ldr x21, [x19], #0x8\n"
+ "ldr q31, [x21, x26]\n"
"uxtl v23.8h, v31.8b\n"
"uxtl2 v22.8h, v31.16b\n"
- "subs x21, x21, #0x1\n"
+ "subs x20, x20, #0x1\n"
"uaddw v15.4s, v15.4s, v23.4h\n"
"uaddw2 v14.4s, v14.4s, v23.8h\n"
"uaddw v13.4s, v13.4s, v22.4h\n"
@@ -438,13 +438,13 @@ void a64_u8q_nhwc_avg_generic_depthfirst_impl(
"ld1r { v18.4s }, [%x[combined_rescale_value]]\n"
"srshl v15.4s, v15.4s, v19.4s\n"
"srshl v14.4s, v14.4s, v19.4s\n"
- "ld1r { v17.4s }, [%x[right_shift]]\n"
"srshl v13.4s, v13.4s, v19.4s\n"
"srshl v12.4s, v12.4s, v19.4s\n"
- "add x20, %x[quant_params], %[offsetof_qp_output_offset]\n"
- "ld1r { v16.4s }, [x20]\n"
+ "ld1r { v17.4s }, [%x[right_shift]]\n"
+ "add x19, %x[quant_params], %[offsetof_qp_output_offset]\n"
"sqrdmulh v15.4s, v15.4s, v18.4s\n"
"sqrdmulh v14.4s, v14.4s, v18.4s\n"
+ "ld1r { v16.4s }, [x19]\n"
"sub %x[n_channels], %x[n_channels], #0x10\n"
"sqrdmulh v13.4s, v13.4s, v18.4s\n"
"sqrdmulh v12.4s, v12.4s, v18.4s\n"
@@ -470,149 +470,149 @@ void a64_u8q_nhwc_avg_generic_depthfirst_impl(
"uzp1 v23.16b, v15.16b, v14.16b\n"
"uzp1 v16.16b, v13.16b, v12.16b\n"
"uzp1 v16.16b, v23.16b, v16.16b\n"
- "str q16, [%x[outptr], x27]\n"
- "add x27, x27, #0x10\n"
+ "str q16, [%x[outptr], x26]\n"
+ "add x26, x26, #0x10\n"
"bge 8b\n"
"cbz %x[n_channels], 43f\n"
"14:" // Oddments
"ld1r { v15.4s }, [%x[accumulator_init]]\n"
- "lsr x23, %x[n_valid_cells], #0x1\n"
- "add %x[outptr], %x[outptr], x27\n"
+ "lsr x22, %x[n_valid_cells], #0x1\n"
+ "add %x[outptr], %x[outptr], x26\n"
"mov v14.16b, v15.16b\n"
"mov v13.16b, v15.16b\n"
"mov v12.16b, v15.16b\n"
- "mov x20, %x[inptrs]\n"
- "cbz x23, 24f\n"
+ "mov x19, %x[inptrs]\n"
+ "cbz x22, 24f\n"
"15:" // Oddments: 2 inputs loop
- "ldp x22, x21, [x20, #0x0]\n"
- "add x20, x20, #0x10\n"
- "add x22, x22, x27\n"
+ "ldp x21, x20, [x19, #0x0]\n"
+ "add x19, x19, #0x10\n"
+ "add x21, x21, x26\n"
"movi v31.16b, #0x0\n"
- "add x21, x21, x27\n"
+ "add x20, x20, x26\n"
"movi v30.16b, #0x0\n"
"tbz %x[n_channels], #3, 19f\n"
- "ldr d31, [x22], #0x8\n"
- "ldr d30, [x21], #0x8\n"
+ "ldr d31, [x21], #0x8\n"
+ "ldr d30, [x20], #0x8\n"
"tbz %x[n_channels], #2, 17f\n"
- "ld1 { v31.s }[2], [x22], #0x4\n"
- "ld1 { v30.s }[2], [x21], #0x4\n"
+ "ld1 { v31.s }[2], [x21], #0x4\n"
+ "ld1 { v30.s }[2], [x20], #0x4\n"
"tbz %x[n_channels], #1, 16f\n"
- "ld1 { v31.h }[6], [x22], #0x2\n"
- "ld1 { v30.h }[6], [x21], #0x2\n"
+ "ld1 { v31.h }[6], [x21], #0x2\n"
+ "ld1 { v30.h }[6], [x20], #0x2\n"
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v31.b }[14], [x22], #0x1\n"
- "ld1 { v30.b }[14], [x21], #0x1\n"
+ "ld1 { v31.b }[14], [x21], #0x1\n"
+ "ld1 { v30.b }[14], [x20], #0x1\n"
"b 23f\n"
"16:" // Oddments: 2 inputs loop: Load: Bit 3: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v31.b }[12], [x22], #0x1\n"
- "ld1 { v30.b }[12], [x21], #0x1\n"
+ "ld1 { v31.b }[12], [x21], #0x1\n"
+ "ld1 { v30.b }[12], [x20], #0x1\n"
"b 23f\n"
"17:" // Oddments: 2 inputs loop: Load: Bit 3: Bit 2: Unset
"tbz %x[n_channels], #1, 18f\n"
- "ld1 { v31.h }[4], [x22], #0x2\n"
- "ld1 { v30.h }[4], [x21], #0x2\n"
+ "ld1 { v31.h }[4], [x21], #0x2\n"
+ "ld1 { v30.h }[4], [x20], #0x2\n"
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v31.b }[10], [x22], #0x1\n"
- "ld1 { v30.b }[10], [x21], #0x1\n"
+ "ld1 { v31.b }[10], [x21], #0x1\n"
+ "ld1 { v30.b }[10], [x20], #0x1\n"
"b 23f\n"
"18:" // Oddments: 2 inputs loop: Load: Bit 3: Bit 2: Unset: Bit 1: Unset
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v31.b }[8], [x22], #0x1\n"
- "ld1 { v30.b }[8], [x21], #0x1\n"
+ "ld1 { v31.b }[8], [x21], #0x1\n"
+ "ld1 { v30.b }[8], [x20], #0x1\n"
"b 23f\n"
"19:" // Oddments: 2 inputs loop: Load: Bit 3: Unset
"tbz %x[n_channels], #2, 21f\n"
- "ldr s31, [x22], #0x4\n"
- "ldr s30, [x21], #0x4\n"
+ "ldr s31, [x21], #0x4\n"
+ "ldr s30, [x20], #0x4\n"
"tbz %x[n_channels], #1, 20f\n"
- "ld1 { v31.h }[2], [x22], #0x2\n"
- "ld1 { v30.h }[2], [x21], #0x2\n"
+ "ld1 { v31.h }[2], [x21], #0x2\n"
+ "ld1 { v30.h }[2], [x20], #0x2\n"
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v31.b }[6], [x22], #0x1\n"
- "ld1 { v30.b }[6], [x21], #0x1\n"
+ "ld1 { v31.b }[6], [x21], #0x1\n"
+ "ld1 { v30.b }[6], [x20], #0x1\n"
"b 23f\n"
"20:" // Oddments: 2 inputs loop: Load: Bit 3: Unset: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v31.b }[4], [x22], #0x1\n"
- "ld1 { v30.b }[4], [x21], #0x1\n"
+ "ld1 { v31.b }[4], [x21], #0x1\n"
+ "ld1 { v30.b }[4], [x20], #0x1\n"
"b 23f\n"
"21:" // Oddments: 2 inputs loop: Load: Bit 3: Unset: Bit 2: Unset
"tbz %x[n_channels], #1, 22f\n"
- "ldr h31, [x22], #0x2\n"
- "ldr h30, [x21], #0x2\n"
+ "ldr h31, [x21], #0x2\n"
+ "ldr h30, [x20], #0x2\n"
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v31.b }[2], [x22], #0x1\n"
- "ld1 { v30.b }[2], [x21], #0x1\n"
+ "ld1 { v31.b }[2], [x21], #0x1\n"
+ "ld1 { v30.b }[2], [x20], #0x1\n"
"b 23f\n"
"22:" // Oddments: 2 inputs loop: Load: Bit 3: Unset: Bit 2: Unset: Bit 1: Unset
"tbz %x[n_channels], #0, 23f\n"
- "ldr b31, [x22], #0x1\n"
- "ldr b30, [x21], #0x1\n"
+ "ldr b31, [x21], #0x1\n"
+ "ldr b30, [x20], #0x1\n"
"23:" // Oddments: 2 inputs loop: Load: Bit 3: End
"uaddl v23.8h, v31.8b, v30.8b\n"
"uaddl2 v22.8h, v31.16b, v30.16b\n"
- "subs x23, x23, #0x1\n"
+ "subs x22, x22, #0x1\n"
"uaddw v15.4s, v15.4s, v23.4h\n"
"uaddw2 v14.4s, v14.4s, v23.8h\n"
"uaddw v13.4s, v13.4s, v22.4h\n"
"uaddw2 v12.4s, v12.4s, v22.8h\n"
"bgt 15b\n"
"24:" // Oddments: After loop
- "ands x21, %x[n_valid_cells], #0x1\n"
+ "ands x20, %x[n_valid_cells], #0x1\n"
"beq 34f\n"
"25:" // Oddments: Single input loop
- "ldr x22, [x20], #0x8\n"
- "add x22, x22, x27\n"
+ "ldr x21, [x19], #0x8\n"
+ "add x21, x21, x26\n"
"movi v31.16b, #0x0\n"
"tbz %x[n_channels], #3, 29f\n"
- "ldr d31, [x22], #0x8\n"
+ "ldr d31, [x21], #0x8\n"
"tbz %x[n_channels], #2, 27f\n"
- "ld1 { v31.s }[2], [x22], #0x4\n"
+ "ld1 { v31.s }[2], [x21], #0x4\n"
"tbz %x[n_channels], #1, 26f\n"
- "ld1 { v31.h }[6], [x22], #0x2\n"
+ "ld1 { v31.h }[6], [x21], #0x2\n"
"tbz %x[n_channels], #0, 33f\n"
- "ld1 { v31.b }[14], [x22], #0x1\n"
+ "ld1 { v31.b }[14], [x21], #0x1\n"
"b 33f\n"
"26:" // Oddments: Single input loop: Load: Bit 3: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 33f\n"
- "ld1 { v31.b }[12], [x22], #0x1\n"
+ "ld1 { v31.b }[12], [x21], #0x1\n"
"b 33f\n"
"27:" // Oddments: Single input loop: Load: Bit 3: Bit 2: Unset
"tbz %x[n_channels], #1, 28f\n"
- "ld1 { v31.h }[4], [x22], #0x2\n"
+ "ld1 { v31.h }[4], [x21], #0x2\n"
"tbz %x[n_channels], #0, 33f\n"
- "ld1 { v31.b }[10], [x22], #0x1\n"
+ "ld1 { v31.b }[10], [x21], #0x1\n"
"b 33f\n"
"28:" // Oddments: Single input loop: Load: Bit 3: Bit 2: Unset: Bit 1: Unset
"tbz %x[n_channels], #0, 33f\n"
- "ld1 { v31.b }[8], [x22], #0x1\n"
+ "ld1 { v31.b }[8], [x21], #0x1\n"
"b 33f\n"
"29:" // Oddments: Single input loop: Load: Bit 3: Unset
"tbz %x[n_channels], #2, 31f\n"
- "ldr s31, [x22], #0x4\n"
+ "ldr s31, [x21], #0x4\n"
"tbz %x[n_channels], #1, 30f\n"
- "ld1 { v31.h }[2], [x22], #0x2\n"
+ "ld1 { v31.h }[2], [x21], #0x2\n"
"tbz %x[n_channels], #0, 33f\n"
- "ld1 { v31.b }[6], [x22], #0x1\n"
+ "ld1 { v31.b }[6], [x21], #0x1\n"
"b 33f\n"
"30:" // Oddments: Single input loop: Load: Bit 3: Unset: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 33f\n"
- "ld1 { v31.b }[4], [x22], #0x1\n"
+ "ld1 { v31.b }[4], [x21], #0x1\n"
"b 33f\n"
"31:" // Oddments: Single input loop: Load: Bit 3: Unset: Bit 2: Unset
"tbz %x[n_channels], #1, 32f\n"
- "ldr h31, [x22], #0x2\n"
+ "ldr h31, [x21], #0x2\n"
"tbz %x[n_channels], #0, 33f\n"
- "ld1 { v31.b }[2], [x22], #0x1\n"
+ "ld1 { v31.b }[2], [x21], #0x1\n"
"b 33f\n"
"32:" // Oddments: Single input loop: Load: Bit 3: Unset: Bit 2: Unset: Bit 1: Unset
"tbz %x[n_channels], #0, 33f\n"
- "ldr b31, [x22], #0x1\n"
+ "ldr b31, [x21], #0x1\n"
"33:" // Oddments: Single input loop: Load: Bit 3: End
"uxtl v23.8h, v31.8b\n"
"uxtl2 v22.8h, v31.16b\n"
- "subs x21, x21, #0x1\n"
+ "subs x20, x20, #0x1\n"
"uaddw v15.4s, v15.4s, v23.4h\n"
"uaddw2 v14.4s, v14.4s, v23.8h\n"
"uaddw v13.4s, v13.4s, v22.4h\n"
@@ -623,13 +623,13 @@ void a64_u8q_nhwc_avg_generic_depthfirst_impl(
"ld1r { v18.4s }, [%x[combined_rescale_value]]\n"
"srshl v15.4s, v15.4s, v19.4s\n"
"srshl v14.4s, v14.4s, v19.4s\n"
- "ld1r { v17.4s }, [%x[right_shift]]\n"
"srshl v13.4s, v13.4s, v19.4s\n"
"srshl v12.4s, v12.4s, v19.4s\n"
- "add x20, %x[quant_params], %[offsetof_qp_output_offset]\n"
- "ld1r { v16.4s }, [x20]\n"
+ "ld1r { v17.4s }, [%x[right_shift]]\n"
+ "add x19, %x[quant_params], %[offsetof_qp_output_offset]\n"
"sqrdmulh v15.4s, v15.4s, v18.4s\n"
"sqrdmulh v14.4s, v14.4s, v18.4s\n"
+ "ld1r { v16.4s }, [x19]\n"
"sqrdmulh v13.4s, v13.4s, v18.4s\n"
"sqrdmulh v12.4s, v12.4s, v18.4s\n"
"srshl v15.4s, v15.4s, v17.4s\n"
@@ -701,7 +701,7 @@ void a64_u8q_nhwc_avg_generic_depthfirst_impl(
"43:" // End
: [n_channels] "+&r" (n_channels), [outptr] "+&r" (outptr)
: [accumulator_init] "r" (&accumulator_init), [combined_rescale_value] "r" (&combined_rescale_value), [inptrs] "r" (inptrs), [left_shift] "r" (&left_shift), [n_valid_cells] "r" (n_valid_cells), [offsetof_qp_output_offset] "I" (offsetof(Requantize32, output_offset)), [quant_params] "r" (&qp), [right_shift] "r" (&right_shift)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8q_nhwc_max_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8q_nhwc_max_generic_depthfirst/generic.cpp
index 7eea14f70f..c5050742cb 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8q_nhwc_max_generic_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8q_nhwc_max_generic_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -43,131 +43,131 @@ void a64_u8q_nhwc_max_generic_depthfirst_impl(
{
__asm__ __volatile__(
"cmp %x[n_channels], #0x40\n"
- "mov x9, #0x0\n"
- "mov x28, #0x10\n" // cntb _, ALL, #1
- "mov x27, #0x20\n" // cntb _, ALL, #2
- "mov x26, #0x30\n" // cntb _, ALL, #3
+ "mov x28, #0x0\n"
+ "mov x27, #0x10\n" // cntb _, ALL, #1
+ "mov x26, #0x20\n" // cntb _, ALL, #2
+ "mov x25, #0x30\n" // cntb _, ALL, #3
"blt 7f\n"
"1:" // 4-vectors of channels
- "lsr x25, %x[n_valid_cells], #0x2\n"
- "movi v8.16b, #0x0\n"
+ "lsr x24, %x[n_valid_cells], #0x2\n"
"movi v7.16b, #0x0\n"
- "mov x20, %x[inptrs]\n"
+ "movi v3.16b, #0x0\n"
+ "mov x19, %x[inptrs]\n"
"movi v6.16b, #0x0\n"
"movi v5.16b, #0x0\n"
- "cbz x25, 4f\n"
- "ldp x24, x23, [x20, #0x0]\n"
- "ldr q4, [x24, x9]\n"
- "subs x25, x25, #0x1\n"
- "ldr q3, [x23, x9]\n"
- "ldr q2, [x24, x28]\n"
- "ldr q1, [x23, x28]\n"
- "ldr q0, [x24, x27]\n"
- "ldr q31, [x23, x27]\n"
- "ldr q30, [x24, x26]\n"
- "ldr q29, [x23, x26]\n"
- "ldp x22, x21, [x20, #0x10]\n"
- "add x20, x20, #0x20\n"
- "ldr q28, [x22, x9]\n"
- "ldr q22, [x21, x9]\n"
- "ldr q27, [x22, x28]\n"
- "ldr q21, [x21, x28]\n"
- "ldr q26, [x22, x27]\n"
- "ldr q20, [x21, x27]\n"
- "ldr q25, [x22, x26]\n"
- "ldr q24, [x21, x26]\n"
+ "cbz x24, 4f\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "ldp x21, x20, [x19, #0x10]\n"
+ "subs x24, x24, #0x1\n"
+ "add x19, x19, #0x20\n"
+ "ldr q2, [x23, x28]\n"
+ "ldr q1, [x22, x28]\n"
+ "ldr q0, [x21, x28]\n"
+ "ldr q31, [x20, x28]\n"
+ "ldr q30, [x23, x27]\n"
+ "ldr q22, [x22, x27]\n"
+ "ldr q29, [x21, x27]\n"
+ "ldr q28, [x20, x27]\n"
+ "ldr q27, [x23, x26]\n"
+ "ldr q21, [x22, x26]\n"
+ "ldr q26, [x21, x26]\n"
+ "ldr q17, [x20, x26]\n"
+ "ldr q25, [x23, x25]\n"
+ "ldr q20, [x22, x25]\n"
+ "ldr q24, [x21, x25]\n"
+ "ldr q16, [x20, x25]\n"
"beq 3f\n"
"2:" // 4-vectors of channels: 4 inputs loop
- "umax v23.16b, v4.16b, v3.16b\n"
- "umax v19.16b, v28.16b, v22.16b\n"
- "ldp x24, x23, [x20, #0x0]\n"
- "ldr q4, [x24, x9]\n"
- "ldr q3, [x23, x9]\n"
- "umax v22.16b, v2.16b, v1.16b\n"
- "ldr q2, [x24, x28]\n"
- "umax v18.16b, v27.16b, v21.16b\n"
- "ldr q1, [x23, x28]\n"
- "umax v21.16b, v0.16b, v31.16b\n"
- "ldr q0, [x24, x27]\n"
- "umax v17.16b, v26.16b, v20.16b\n"
- "ldr q31, [x23, x27]\n"
- "umax v20.16b, v30.16b, v29.16b\n"
- "ldr q30, [x24, x26]\n"
- "umax v16.16b, v25.16b, v24.16b\n"
- "ldr q29, [x23, x26]\n"
+ "umax v23.16b, v2.16b, v1.16b\n"
+ "umax v19.16b, v0.16b, v31.16b\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "ldp x21, x20, [x19, #0x10]\n"
+ "umax v22.16b, v30.16b, v22.16b\n"
+ "umax v18.16b, v29.16b, v28.16b\n"
+ "subs x24, x24, #0x1\n"
+ "add x19, x19, #0x20\n"
+ "umax v21.16b, v27.16b, v21.16b\n"
+ "umax v17.16b, v26.16b, v17.16b\n"
+ "ldr q2, [x23, x28]\n"
+ "ldr q1, [x22, x28]\n"
+ "umax v20.16b, v25.16b, v20.16b\n"
+ "umax v16.16b, v24.16b, v16.16b\n"
+ "ldr q0, [x21, x28]\n"
+ "ldr q31, [x20, x28]\n"
"umax v19.16b, v23.16b, v19.16b\n"
"umax v18.16b, v22.16b, v18.16b\n"
- "ldp x22, x21, [x20, #0x10]\n"
- "ldr q28, [x22, x9]\n"
- "ldr q22, [x21, x9]\n"
+ "ldr q30, [x23, x27]\n"
+ "ldr q22, [x22, x27]\n"
"umax v17.16b, v21.16b, v17.16b\n"
"umax v16.16b, v20.16b, v16.16b\n"
- "ldr q27, [x22, x28]\n"
- "ldr q21, [x21, x28]\n"
- "subs x25, x25, #0x1\n"
- "umax v8.16b, v8.16b, v19.16b\n"
- "ldr q26, [x22, x27]\n"
- "ldr q20, [x21, x27]\n"
- "umax v7.16b, v7.16b, v18.16b\n"
+ "ldr q29, [x21, x27]\n"
+ "ldr q28, [x20, x27]\n"
+ "umax v7.16b, v7.16b, v19.16b\n"
+ "umax v3.16b, v3.16b, v18.16b\n"
+ "ldr q27, [x23, x26]\n"
+ "ldr q21, [x22, x26]\n"
"umax v6.16b, v6.16b, v17.16b\n"
- "ldr q25, [x22, x26]\n"
- "ldr q24, [x21, x26]\n"
"umax v5.16b, v5.16b, v16.16b\n"
- "add x20, x20, #0x20\n"
+ "ldr q26, [x21, x26]\n"
+ "ldr q17, [x20, x26]\n"
+ "ldr q25, [x23, x25]\n"
+ "ldr q20, [x22, x25]\n"
+ "ldr q24, [x21, x25]\n"
+ "ldr q16, [x20, x25]\n"
"bgt 2b\n"
"3:" // 4-vectors of channels: 4 inputs tail
- "umax v23.16b, v4.16b, v3.16b\n"
- "umax v19.16b, v28.16b, v22.16b\n"
- "umax v22.16b, v2.16b, v1.16b\n"
- "umax v18.16b, v27.16b, v21.16b\n"
- "umax v21.16b, v0.16b, v31.16b\n"
- "umax v17.16b, v26.16b, v20.16b\n"
- "umax v20.16b, v30.16b, v29.16b\n"
- "umax v16.16b, v25.16b, v24.16b\n"
+ "umax v23.16b, v2.16b, v1.16b\n"
+ "umax v19.16b, v0.16b, v31.16b\n"
+ "umax v22.16b, v30.16b, v22.16b\n"
+ "umax v18.16b, v29.16b, v28.16b\n"
+ "umax v21.16b, v27.16b, v21.16b\n"
+ "umax v17.16b, v26.16b, v17.16b\n"
+ "umax v20.16b, v25.16b, v20.16b\n"
+ "umax v16.16b, v24.16b, v16.16b\n"
"umax v19.16b, v23.16b, v19.16b\n"
"umax v18.16b, v22.16b, v18.16b\n"
"umax v17.16b, v21.16b, v17.16b\n"
"umax v16.16b, v20.16b, v16.16b\n"
- "umax v8.16b, v8.16b, v19.16b\n"
- "umax v7.16b, v7.16b, v18.16b\n"
+ "umax v7.16b, v7.16b, v19.16b\n"
+ "umax v3.16b, v3.16b, v18.16b\n"
"umax v6.16b, v6.16b, v17.16b\n"
"umax v5.16b, v5.16b, v16.16b\n"
"4:" // 4-vectors of channels: After loop
- "ands x21, %x[n_valid_cells], #0x3\n"
+ "ands x20, %x[n_valid_cells], #0x3\n"
"beq 6f\n"
"5:" // 4-vectors of channels: Single input loop
- "ldr x24, [x20], #0x8\n"
- "ldr q4, [x24, x9]\n"
- "subs x21, x21, #0x1\n"
- "umax v8.16b, v8.16b, v4.16b\n"
- "ldr q2, [x24, x28]\n"
- "ldr q0, [x24, x27]\n"
+ "ldr x23, [x19], #0x8\n"
+ "ldr q2, [x23, x28]\n"
+ "subs x20, x20, #0x1\n"
"umax v7.16b, v7.16b, v2.16b\n"
- "umax v6.16b, v6.16b, v0.16b\n"
- "ldr q30, [x24, x26]\n"
- "umax v5.16b, v5.16b, v30.16b\n"
+ "ldr q30, [x23, x27]\n"
+ "ldr q27, [x23, x26]\n"
+ "umax v3.16b, v3.16b, v30.16b\n"
+ "umax v6.16b, v6.16b, v27.16b\n"
+ "ldr q25, [x23, x25]\n"
+ "umax v5.16b, v5.16b, v25.16b\n"
"bgt 5b\n"
"6:" // 4-vectors of channels: Single input loop: End
- "add x20, %x[quant_params], %[offsetof_qp_input_offset]\n"
- "ld1r { v4.4s }, [x20]\n"
- "uxtl v23.8h, v8.8b\n"
- "uxtl2 v24.8h, v8.16b\n"
- "uxtl v22.8h, v7.8b\n"
- "uxtl2 v21.8h, v7.16b\n"
- "add x20, %x[quant_params], %[offsetof_qp_per_layer_left_shift]\n"
- "ld1r { v3.4s }, [x20]\n"
+ "add x19, %x[quant_params], %[offsetof_qp_input_offset]\n"
+ "ld1r { v4.4s }, [x19]\n"
+ "uxtl v23.8h, v7.8b\n"
+ "uxtl2 v24.8h, v7.16b\n"
+ "uxtl v22.8h, v3.8b\n"
+ "uxtl2 v21.8h, v3.16b\n"
+ "add x19, %x[quant_params], %[offsetof_qp_per_layer_left_shift]\n"
+ "ld1r { v3.4s }, [x19]\n"
"uxtl v20.8h, v6.8b\n"
"uxtl2 v17.8h, v6.16b\n"
- "add x20, %x[quant_params], %[offsetof_qp_per_layer_mul]\n"
- "ld1r { v2.4s }, [x20]\n"
+ "add x19, %x[quant_params], %[offsetof_qp_per_layer_mul]\n"
+ "ld1r { v2.4s }, [x19]\n"
"uxtl v19.8h, v5.8b\n"
"uxtl2 v18.8h, v5.16b\n"
- "add x20, %x[quant_params], %[offsetof_qp_per_layer_right_shift]\n"
- "ld1r { v1.4s }, [x20]\n"
+ "add x19, %x[quant_params], %[offsetof_qp_per_layer_right_shift]\n"
+ "ld1r { v1.4s }, [x19]\n"
"neg v4.4s, v4.4s\n"
"saddw v0.4s, v4.4s, v23.4h\n"
- "add x20, %x[quant_params], %[offsetof_qp_output_offset]\n"
- "ld1r { v16.4s }, [x20]\n"
+ "add x19, %x[quant_params], %[offsetof_qp_output_offset]\n"
+ "ld1r { v16.4s }, [x19]\n"
"saddw2 v23.4s, v4.4s, v23.8h\n"
"saddw v31.4s, v4.4s, v24.4h\n"
"sub %x[n_channels], %x[n_channels], #0x40\n"
@@ -292,85 +292,85 @@ void a64_u8q_nhwc_max_generic_depthfirst_impl(
"uzp1 v19.16b, v25.16b, v19.16b\n"
"uzp1 v18.16b, v24.16b, v18.16b\n"
"uzp1 v16.16b, v23.16b, v16.16b\n"
- "str q16, [%x[outptr], x9]\n"
- "add x9, x9, #0x40\n"
- "uzp1 v16.16b, v22.16b, v21.16b\n"
- "uzp1 v17.16b, v20.16b, v17.16b\n"
"str q16, [%x[outptr], x28]\n"
"add x28, x28, #0x40\n"
- "uzp1 v16.16b, v19.16b, v18.16b\n"
- "str q17, [%x[outptr], x27]\n"
+ "uzp1 v16.16b, v22.16b, v21.16b\n"
+ "uzp1 v17.16b, v20.16b, v17.16b\n"
+ "str q16, [%x[outptr], x27]\n"
"add x27, x27, #0x40\n"
- "str q16, [%x[outptr], x26]\n"
+ "uzp1 v16.16b, v19.16b, v18.16b\n"
+ "str q17, [%x[outptr], x26]\n"
"add x26, x26, #0x40\n"
+ "str q16, [%x[outptr], x25]\n"
+ "add x25, x25, #0x40\n"
"bge 1b\n"
"cbz %x[n_channels], 43f\n"
"7:" // Single vector of channels
"cmp %x[n_channels], #0x10\n"
"blt 14f\n"
"8:" // Single vector of channels: Loop
- "lsr x25, %x[n_valid_cells], #0x2\n"
- "movi v8.16b, #0x0\n"
- "mov x20, %x[inptrs]\n"
- "cbz x25, 11f\n"
- "ldp x24, x23, [x20, #0x0]\n"
- "ldr q4, [x24, x9]\n"
- "subs x25, x25, #0x1\n"
- "ldr q3, [x23, x9]\n"
- "ldp x22, x21, [x20, #0x10]\n"
- "add x20, x20, #0x20\n"
- "ldr q28, [x22, x9]\n"
- "ldr q22, [x21, x9]\n"
+ "lsr x24, %x[n_valid_cells], #0x2\n"
+ "movi v7.16b, #0x0\n"
+ "mov x19, %x[inptrs]\n"
+ "cbz x24, 11f\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "ldp x21, x20, [x19, #0x10]\n"
+ "subs x24, x24, #0x1\n"
+ "add x19, x19, #0x20\n"
+ "ldr q2, [x23, x28]\n"
+ "ldr q1, [x22, x28]\n"
+ "ldr q0, [x21, x28]\n"
+ "ldr q31, [x20, x28]\n"
"beq 10f\n"
"9:" // Single vector of channels: Loop: 4 inputs loop
- "umax v23.16b, v4.16b, v3.16b\n"
- "umax v19.16b, v28.16b, v22.16b\n"
- "ldp x24, x23, [x20, #0x0]\n"
- "ldr q4, [x24, x9]\n"
- "ldr q3, [x23, x9]\n"
+ "umax v23.16b, v2.16b, v1.16b\n"
+ "umax v19.16b, v0.16b, v31.16b\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "ldp x21, x20, [x19, #0x10]\n"
"umax v19.16b, v23.16b, v19.16b\n"
- "ldp x22, x21, [x20, #0x10]\n"
- "subs x25, x25, #0x1\n"
- "ldr q28, [x22, x9]\n"
- "ldr q22, [x21, x9]\n"
- "umax v8.16b, v8.16b, v19.16b\n"
- "add x20, x20, #0x20\n"
+ "subs x24, x24, #0x1\n"
+ "umax v7.16b, v7.16b, v19.16b\n"
+ "add x19, x19, #0x20\n"
+ "ldr q2, [x23, x28]\n"
+ "ldr q1, [x22, x28]\n"
+ "ldr q0, [x21, x28]\n"
+ "ldr q31, [x20, x28]\n"
"bgt 9b\n"
"10:" // Single vector of channels: Loop: 4 inputs tail
- "umax v23.16b, v4.16b, v3.16b\n"
- "umax v19.16b, v28.16b, v22.16b\n"
+ "umax v23.16b, v2.16b, v1.16b\n"
+ "umax v19.16b, v0.16b, v31.16b\n"
"umax v19.16b, v23.16b, v19.16b\n"
- "umax v8.16b, v8.16b, v19.16b\n"
+ "umax v7.16b, v7.16b, v19.16b\n"
"11:" // Single vector of channels: Loop: After loop
- "ands x21, %x[n_valid_cells], #0x3\n"
+ "ands x20, %x[n_valid_cells], #0x3\n"
"beq 13f\n"
"12:" // Single vector of channels: Loop: Single input loop
- "ldr x24, [x20], #0x8\n"
- "ldr q4, [x24, x9]\n"
- "subs x21, x21, #0x1\n"
- "umax v8.16b, v8.16b, v4.16b\n"
+ "ldr x23, [x19], #0x8\n"
+ "ldr q2, [x23, x28]\n"
+ "subs x20, x20, #0x1\n"
+ "umax v7.16b, v7.16b, v2.16b\n"
"bgt 12b\n"
"13:" // Single vector of channels: Loop: Single input loop: End
- "add x20, %x[quant_params], %[offsetof_qp_input_offset]\n"
- "ld1r { v4.4s }, [x20]\n"
- "uxtl v23.8h, v8.8b\n"
- "uxtl2 v24.8h, v8.16b\n"
+ "add x19, %x[quant_params], %[offsetof_qp_input_offset]\n"
+ "ld1r { v4.4s }, [x19]\n"
+ "uxtl v23.8h, v7.8b\n"
+ "uxtl2 v24.8h, v7.16b\n"
"neg v4.4s, v4.4s\n"
- "add x20, %x[quant_params], %[offsetof_qp_per_layer_left_shift]\n"
- "ld1r { v3.4s }, [x20]\n"
+ "add x19, %x[quant_params], %[offsetof_qp_per_layer_left_shift]\n"
"saddw v0.4s, v4.4s, v23.4h\n"
+ "ld1r { v3.4s }, [x19]\n"
"saddw2 v23.4s, v4.4s, v23.8h\n"
"saddw v31.4s, v4.4s, v24.4h\n"
- "add x20, %x[quant_params], %[offsetof_qp_per_layer_mul]\n"
- "ld1r { v2.4s }, [x20]\n"
+ "add x19, %x[quant_params], %[offsetof_qp_per_layer_mul]\n"
+ "ld1r { v2.4s }, [x19]\n"
"saddw2 v30.4s, v4.4s, v24.8h\n"
"srshl v0.4s, v0.4s, v3.4s\n"
- "add x20, %x[quant_params], %[offsetof_qp_per_layer_right_shift]\n"
- "ld1r { v1.4s }, [x20]\n"
+ "add x19, %x[quant_params], %[offsetof_qp_per_layer_right_shift]\n"
+ "ld1r { v1.4s }, [x19]\n"
"srshl v23.4s, v23.4s, v3.4s\n"
"srshl v31.4s, v31.4s, v3.4s\n"
- "add x20, %x[quant_params], %[offsetof_qp_output_offset]\n"
- "ld1r { v16.4s }, [x20]\n"
+ "add x19, %x[quant_params], %[offsetof_qp_output_offset]\n"
+ "ld1r { v16.4s }, [x19]\n"
"srshl v30.4s, v30.4s, v3.4s\n"
"sqrdmulh v0.4s, v0.4s, v2.4s\n"
"sub %x[n_channels], %x[n_channels], #0x10\n"
@@ -399,200 +399,200 @@ void a64_u8q_nhwc_max_generic_depthfirst_impl(
"uzp1 v23.16b, v0.16b, v23.16b\n"
"uzp1 v16.16b, v31.16b, v30.16b\n"
"uzp1 v16.16b, v23.16b, v16.16b\n"
- "str q16, [%x[outptr], x9]\n"
- "add x9, x9, #0x10\n"
+ "str q16, [%x[outptr], x28]\n"
+ "add x28, x28, #0x10\n"
"bge 8b\n"
"cbz %x[n_channels], 43f\n"
"14:" // Oddments
- "lsr x25, %x[n_valid_cells], #0x2\n"
- "add %x[outptr], %x[outptr], x9\n"
- "movi v8.16b, #0x0\n"
- "mov x20, %x[inptrs]\n"
- "cbz x25, 24f\n"
+ "lsr x24, %x[n_valid_cells], #0x2\n"
+ "add %x[outptr], %x[outptr], x28\n"
+ "movi v7.16b, #0x0\n"
+ "mov x19, %x[inptrs]\n"
+ "cbz x24, 24f\n"
"15:" // Oddments: 4 inputs loop
- "ldp x24, x23, [x20, #0x0]\n"
- "ldp x22, x21, [x20, #0x10]\n"
- "add x20, x20, #0x20\n"
- "add x24, x24, x9\n"
- "add x23, x23, x9\n"
- "add x22, x22, x9\n"
- "movi v4.16b, #0x0\n"
- "movi v3.16b, #0x0\n"
- "add x21, x21, x9\n"
- "movi v28.16b, #0x0\n"
- "movi v22.16b, #0x0\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "ldp x21, x20, [x19, #0x10]\n"
+ "add x19, x19, #0x20\n"
+ "add x23, x23, x28\n"
+ "add x22, x22, x28\n"
+ "add x21, x21, x28\n"
+ "movi v2.16b, #0x0\n"
+ "movi v1.16b, #0x0\n"
+ "add x20, x20, x28\n"
+ "movi v0.16b, #0x0\n"
+ "movi v31.16b, #0x0\n"
"tbz %x[n_channels], #3, 19f\n"
- "ldr d4, [x24], #0x8\n"
- "ldr d3, [x23], #0x8\n"
- "ldr d28, [x22], #0x8\n"
- "ldr d22, [x21], #0x8\n"
+ "ldr d2, [x23], #0x8\n"
+ "ldr d1, [x22], #0x8\n"
+ "ldr d0, [x21], #0x8\n"
+ "ldr d31, [x20], #0x8\n"
"tbz %x[n_channels], #2, 17f\n"
- "ld1 { v4.s }[2], [x24], #0x4\n"
- "ld1 { v3.s }[2], [x23], #0x4\n"
- "ld1 { v28.s }[2], [x22], #0x4\n"
- "ld1 { v22.s }[2], [x21], #0x4\n"
+ "ld1 { v2.s }[2], [x23], #0x4\n"
+ "ld1 { v1.s }[2], [x22], #0x4\n"
+ "ld1 { v0.s }[2], [x21], #0x4\n"
+ "ld1 { v31.s }[2], [x20], #0x4\n"
"tbz %x[n_channels], #1, 16f\n"
- "ld1 { v4.h }[6], [x24], #0x2\n"
- "ld1 { v3.h }[6], [x23], #0x2\n"
- "ld1 { v28.h }[6], [x22], #0x2\n"
- "ld1 { v22.h }[6], [x21], #0x2\n"
+ "ld1 { v2.h }[6], [x23], #0x2\n"
+ "ld1 { v1.h }[6], [x22], #0x2\n"
+ "ld1 { v0.h }[6], [x21], #0x2\n"
+ "ld1 { v31.h }[6], [x20], #0x2\n"
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v4.b }[14], [x24], #0x1\n"
- "ld1 { v3.b }[14], [x23], #0x1\n"
- "ld1 { v28.b }[14], [x22], #0x1\n"
- "ld1 { v22.b }[14], [x21], #0x1\n"
+ "ld1 { v2.b }[14], [x23], #0x1\n"
+ "ld1 { v1.b }[14], [x22], #0x1\n"
+ "ld1 { v0.b }[14], [x21], #0x1\n"
+ "ld1 { v31.b }[14], [x20], #0x1\n"
"b 23f\n"
"16:" // Oddments: 4 inputs loop: Load: Bit 3: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v4.b }[12], [x24], #0x1\n"
- "ld1 { v3.b }[12], [x23], #0x1\n"
- "ld1 { v28.b }[12], [x22], #0x1\n"
- "ld1 { v22.b }[12], [x21], #0x1\n"
+ "ld1 { v2.b }[12], [x23], #0x1\n"
+ "ld1 { v1.b }[12], [x22], #0x1\n"
+ "ld1 { v0.b }[12], [x21], #0x1\n"
+ "ld1 { v31.b }[12], [x20], #0x1\n"
"b 23f\n"
"17:" // Oddments: 4 inputs loop: Load: Bit 3: Bit 2: Unset
"tbz %x[n_channels], #1, 18f\n"
- "ld1 { v4.h }[4], [x24], #0x2\n"
- "ld1 { v3.h }[4], [x23], #0x2\n"
- "ld1 { v28.h }[4], [x22], #0x2\n"
- "ld1 { v22.h }[4], [x21], #0x2\n"
+ "ld1 { v2.h }[4], [x23], #0x2\n"
+ "ld1 { v1.h }[4], [x22], #0x2\n"
+ "ld1 { v0.h }[4], [x21], #0x2\n"
+ "ld1 { v31.h }[4], [x20], #0x2\n"
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v4.b }[10], [x24], #0x1\n"
- "ld1 { v3.b }[10], [x23], #0x1\n"
- "ld1 { v28.b }[10], [x22], #0x1\n"
- "ld1 { v22.b }[10], [x21], #0x1\n"
+ "ld1 { v2.b }[10], [x23], #0x1\n"
+ "ld1 { v1.b }[10], [x22], #0x1\n"
+ "ld1 { v0.b }[10], [x21], #0x1\n"
+ "ld1 { v31.b }[10], [x20], #0x1\n"
"b 23f\n"
"18:" // Oddments: 4 inputs loop: Load: Bit 3: Bit 2: Unset: Bit 1: Unset
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v4.b }[8], [x24], #0x1\n"
- "ld1 { v3.b }[8], [x23], #0x1\n"
- "ld1 { v28.b }[8], [x22], #0x1\n"
- "ld1 { v22.b }[8], [x21], #0x1\n"
+ "ld1 { v2.b }[8], [x23], #0x1\n"
+ "ld1 { v1.b }[8], [x22], #0x1\n"
+ "ld1 { v0.b }[8], [x21], #0x1\n"
+ "ld1 { v31.b }[8], [x20], #0x1\n"
"b 23f\n"
"19:" // Oddments: 4 inputs loop: Load: Bit 3: Unset
"tbz %x[n_channels], #2, 21f\n"
- "ldr s4, [x24], #0x4\n"
- "ldr s3, [x23], #0x4\n"
- "ldr s28, [x22], #0x4\n"
- "ldr s22, [x21], #0x4\n"
+ "ldr s2, [x23], #0x4\n"
+ "ldr s1, [x22], #0x4\n"
+ "ldr s0, [x21], #0x4\n"
+ "ldr s31, [x20], #0x4\n"
"tbz %x[n_channels], #1, 20f\n"
- "ld1 { v4.h }[2], [x24], #0x2\n"
- "ld1 { v3.h }[2], [x23], #0x2\n"
- "ld1 { v28.h }[2], [x22], #0x2\n"
- "ld1 { v22.h }[2], [x21], #0x2\n"
+ "ld1 { v2.h }[2], [x23], #0x2\n"
+ "ld1 { v1.h }[2], [x22], #0x2\n"
+ "ld1 { v0.h }[2], [x21], #0x2\n"
+ "ld1 { v31.h }[2], [x20], #0x2\n"
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v4.b }[6], [x24], #0x1\n"
- "ld1 { v3.b }[6], [x23], #0x1\n"
- "ld1 { v28.b }[6], [x22], #0x1\n"
- "ld1 { v22.b }[6], [x21], #0x1\n"
+ "ld1 { v2.b }[6], [x23], #0x1\n"
+ "ld1 { v1.b }[6], [x22], #0x1\n"
+ "ld1 { v0.b }[6], [x21], #0x1\n"
+ "ld1 { v31.b }[6], [x20], #0x1\n"
"b 23f\n"
"20:" // Oddments: 4 inputs loop: Load: Bit 3: Unset: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v4.b }[4], [x24], #0x1\n"
- "ld1 { v3.b }[4], [x23], #0x1\n"
- "ld1 { v28.b }[4], [x22], #0x1\n"
- "ld1 { v22.b }[4], [x21], #0x1\n"
+ "ld1 { v2.b }[4], [x23], #0x1\n"
+ "ld1 { v1.b }[4], [x22], #0x1\n"
+ "ld1 { v0.b }[4], [x21], #0x1\n"
+ "ld1 { v31.b }[4], [x20], #0x1\n"
"b 23f\n"
"21:" // Oddments: 4 inputs loop: Load: Bit 3: Unset: Bit 2: Unset
"tbz %x[n_channels], #1, 22f\n"
- "ldr h4, [x24], #0x2\n"
- "ldr h3, [x23], #0x2\n"
- "ldr h28, [x22], #0x2\n"
- "ldr h22, [x21], #0x2\n"
+ "ldr h2, [x23], #0x2\n"
+ "ldr h1, [x22], #0x2\n"
+ "ldr h0, [x21], #0x2\n"
+ "ldr h31, [x20], #0x2\n"
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v4.b }[2], [x24], #0x1\n"
- "ld1 { v3.b }[2], [x23], #0x1\n"
- "ld1 { v28.b }[2], [x22], #0x1\n"
- "ld1 { v22.b }[2], [x21], #0x1\n"
+ "ld1 { v2.b }[2], [x23], #0x1\n"
+ "ld1 { v1.b }[2], [x22], #0x1\n"
+ "ld1 { v0.b }[2], [x21], #0x1\n"
+ "ld1 { v31.b }[2], [x20], #0x1\n"
"b 23f\n"
"22:" // Oddments: 4 inputs loop: Load: Bit 3: Unset: Bit 2: Unset: Bit 1: Unset
"tbz %x[n_channels], #0, 23f\n"
- "ldr b4, [x24], #0x1\n"
- "ldr b3, [x23], #0x1\n"
- "ldr b28, [x22], #0x1\n"
- "ldr b22, [x21], #0x1\n"
+ "ldr b2, [x23], #0x1\n"
+ "ldr b1, [x22], #0x1\n"
+ "ldr b0, [x21], #0x1\n"
+ "ldr b31, [x20], #0x1\n"
"23:" // Oddments: 4 inputs loop: Load: Bit 3: End
- "umax v23.16b, v4.16b, v3.16b\n"
- "umax v19.16b, v28.16b, v22.16b\n"
- "subs x25, x25, #0x1\n"
+ "umax v23.16b, v2.16b, v1.16b\n"
+ "umax v19.16b, v0.16b, v31.16b\n"
+ "subs x24, x24, #0x1\n"
"umax v19.16b, v23.16b, v19.16b\n"
- "umax v8.16b, v8.16b, v19.16b\n"
+ "umax v7.16b, v7.16b, v19.16b\n"
"bgt 15b\n"
"24:" // Oddments: After loop
- "ands x21, %x[n_valid_cells], #0x3\n"
+ "ands x20, %x[n_valid_cells], #0x3\n"
"beq 34f\n"
"25:" // Oddments: Single input loop
- "ldr x24, [x20], #0x8\n"
- "add x24, x24, x9\n"
- "movi v4.16b, #0x0\n"
+ "ldr x23, [x19], #0x8\n"
+ "add x23, x23, x28\n"
+ "movi v2.16b, #0x0\n"
"tbz %x[n_channels], #3, 29f\n"
- "ldr d4, [x24], #0x8\n"
+ "ldr d2, [x23], #0x8\n"
"tbz %x[n_channels], #2, 27f\n"
- "ld1 { v4.s }[2], [x24], #0x4\n"
+ "ld1 { v2.s }[2], [x23], #0x4\n"
"tbz %x[n_channels], #1, 26f\n"
- "ld1 { v4.h }[6], [x24], #0x2\n"
+ "ld1 { v2.h }[6], [x23], #0x2\n"
"tbz %x[n_channels], #0, 33f\n"
- "ld1 { v4.b }[14], [x24], #0x1\n"
+ "ld1 { v2.b }[14], [x23], #0x1\n"
"b 33f\n"
"26:" // Oddments: Single input loop: Load: Bit 3: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 33f\n"
- "ld1 { v4.b }[12], [x24], #0x1\n"
+ "ld1 { v2.b }[12], [x23], #0x1\n"
"b 33f\n"
"27:" // Oddments: Single input loop: Load: Bit 3: Bit 2: Unset
"tbz %x[n_channels], #1, 28f\n"
- "ld1 { v4.h }[4], [x24], #0x2\n"
+ "ld1 { v2.h }[4], [x23], #0x2\n"
"tbz %x[n_channels], #0, 33f\n"
- "ld1 { v4.b }[10], [x24], #0x1\n"
+ "ld1 { v2.b }[10], [x23], #0x1\n"
"b 33f\n"
"28:" // Oddments: Single input loop: Load: Bit 3: Bit 2: Unset: Bit 1: Unset
"tbz %x[n_channels], #0, 33f\n"
- "ld1 { v4.b }[8], [x24], #0x1\n"
+ "ld1 { v2.b }[8], [x23], #0x1\n"
"b 33f\n"
"29:" // Oddments: Single input loop: Load: Bit 3: Unset
"tbz %x[n_channels], #2, 31f\n"
- "ldr s4, [x24], #0x4\n"
+ "ldr s2, [x23], #0x4\n"
"tbz %x[n_channels], #1, 30f\n"
- "ld1 { v4.h }[2], [x24], #0x2\n"
+ "ld1 { v2.h }[2], [x23], #0x2\n"
"tbz %x[n_channels], #0, 33f\n"
- "ld1 { v4.b }[6], [x24], #0x1\n"
+ "ld1 { v2.b }[6], [x23], #0x1\n"
"b 33f\n"
"30:" // Oddments: Single input loop: Load: Bit 3: Unset: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 33f\n"
- "ld1 { v4.b }[4], [x24], #0x1\n"
+ "ld1 { v2.b }[4], [x23], #0x1\n"
"b 33f\n"
"31:" // Oddments: Single input loop: Load: Bit 3: Unset: Bit 2: Unset
"tbz %x[n_channels], #1, 32f\n"
- "ldr h4, [x24], #0x2\n"
+ "ldr h2, [x23], #0x2\n"
"tbz %x[n_channels], #0, 33f\n"
- "ld1 { v4.b }[2], [x24], #0x1\n"
+ "ld1 { v2.b }[2], [x23], #0x1\n"
"b 33f\n"
"32:" // Oddments: Single input loop: Load: Bit 3: Unset: Bit 2: Unset: Bit 1: Unset
"tbz %x[n_channels], #0, 33f\n"
- "ldr b4, [x24], #0x1\n"
+ "ldr b2, [x23], #0x1\n"
"33:" // Oddments: Single input loop: Load: Bit 3: End
- "subs x21, x21, #0x1\n"
- "umax v8.16b, v8.16b, v4.16b\n"
+ "subs x20, x20, #0x1\n"
+ "umax v7.16b, v7.16b, v2.16b\n"
"bgt 25b\n"
"34:" // Oddments: Single input loop: End
- "add x20, %x[quant_params], %[offsetof_qp_input_offset]\n"
- "ld1r { v4.4s }, [x20]\n"
- "uxtl v23.8h, v8.8b\n"
- "uxtl2 v24.8h, v8.16b\n"
+ "add x19, %x[quant_params], %[offsetof_qp_input_offset]\n"
+ "ld1r { v4.4s }, [x19]\n"
+ "uxtl v23.8h, v7.8b\n"
+ "uxtl2 v24.8h, v7.16b\n"
"neg v4.4s, v4.4s\n"
- "add x20, %x[quant_params], %[offsetof_qp_per_layer_left_shift]\n"
- "ld1r { v3.4s }, [x20]\n"
+ "add x19, %x[quant_params], %[offsetof_qp_per_layer_left_shift]\n"
"saddw v0.4s, v4.4s, v23.4h\n"
+ "ld1r { v3.4s }, [x19]\n"
"saddw2 v23.4s, v4.4s, v23.8h\n"
"saddw v31.4s, v4.4s, v24.4h\n"
- "add x20, %x[quant_params], %[offsetof_qp_per_layer_mul]\n"
- "ld1r { v2.4s }, [x20]\n"
+ "add x19, %x[quant_params], %[offsetof_qp_per_layer_mul]\n"
+ "ld1r { v2.4s }, [x19]\n"
"saddw2 v30.4s, v4.4s, v24.8h\n"
"srshl v0.4s, v0.4s, v3.4s\n"
- "add x20, %x[quant_params], %[offsetof_qp_per_layer_right_shift]\n"
- "ld1r { v1.4s }, [x20]\n"
+ "add x19, %x[quant_params], %[offsetof_qp_per_layer_right_shift]\n"
+ "ld1r { v1.4s }, [x19]\n"
"srshl v23.4s, v23.4s, v3.4s\n"
"srshl v31.4s, v31.4s, v3.4s\n"
- "add x20, %x[quant_params], %[offsetof_qp_output_offset]\n"
- "ld1r { v16.4s }, [x20]\n"
+ "add x19, %x[quant_params], %[offsetof_qp_output_offset]\n"
+ "ld1r { v16.4s }, [x19]\n"
"srshl v30.4s, v30.4s, v3.4s\n"
"sqrdmulh v0.4s, v0.4s, v2.4s\n"
"sqrdmulh v23.4s, v23.4s, v2.4s\n"
@@ -667,7 +667,7 @@ void a64_u8q_nhwc_max_generic_depthfirst_impl(
"43:" // End
: [n_channels] "+&r" (n_channels), [outptr] "+&r" (outptr)
: [inptrs] "r" (inptrs), [n_valid_cells] "r" (n_valid_cells), [offsetof_qp_input_offset] "I" (offsetof(Requantize32, input_offset)), [offsetof_qp_output_offset] "I" (offsetof(Requantize32, output_offset)), [offsetof_qp_per_layer_left_shift] "I" (offsetof(Requantize32, per_layer_left_shift)), [offsetof_qp_per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [offsetof_qp_per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [quant_params] "r" (&qp)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst/generic.cpp
index bce623acd1..a8b6f185be 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -82,97 +82,97 @@ void sme_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst_impl(
pad_left, pad_top, pad_right, pad_bottom);
__asm__ __volatile__(
- "ldr x21, [%x[args], %[offsetof_outptrs]]\n"
+ "ldr x20, [%x[args], %[offsetof_outptrs]]\n"
".inst 0xd503477f // SMSTART ZA\n"
- "mov x3, #0x0\n"
- "mov x20, #0x4\n"
- "ldr x4, [%x[args], %[offsetof_inptrs]]\n"
- "whilelt p0.h, XZR, x20\n"
- "add x20, %x[args], %[offsetof_rescale]\n"
- "ld1rqh { z4.h }, p0/Z, [x20]\n"
- "ldr x5, [%x[args], %[offsetof_n_channels]]\n"
- "whilelt p1.h, x3, x5\n"
- "mov x6, #0x0\n"
- "ldp x7, x8, [x21, #0x0]\n"
- "ldp x17, x16, [x21, #0x10]\n"
- "ldp x15, x14, [x4, #0x0]\n"
- "ld1h { z3.h }, p1/Z, [x14, x3, LSL #1]\n"
- "ldp x13, x12, [x4, #0x10]\n"
- "ld1h { z2.h }, p1/Z, [x13, x3, LSL #1]\n"
- "ldp x11, x10, [x4, #0x20]\n"
- "ld1h { z1.h }, p1/Z, [x10, x3, LSL #1]\n"
- "ldp x9, x28, [x4, #0x30]\n"
- "ld1h { z0.h }, p1/Z, [x9, x3, LSL #1]\n"
- "ldp x27, x26, [x4, #0x40]\n"
- "ld1h { z31.h }, p1/Z, [x26, x3, LSL #1]\n"
- "ldp x25, x24, [x4, #0x50]\n"
- "ld1h { z30.h }, p1/Z, [x25, x3, LSL #1]\n"
- "ldp x23, x22, [x4, #0x60]\n"
- "ld1h { z29.h }, p1/Z, [x11, x3, LSL #1]\n"
- "ldp x21, x20, [x4, #0x70]\n"
- "ld1h { z28.h }, p1/Z, [x27, x3, LSL #1]\n"
- "ld1h { z27.h }, p1/Z, [x28, x3, LSL #1]\n"
- "ld1h { z22.h }, p1/Z, [x24, x3, LSL #1]\n"
- "ld1h { z21.h }, p1/Z, [x22, x3, LSL #1]\n"
- "ld1h { z20.h }, p1/Z, [x21, x3, LSL #1]\n"
- "ld1h { z26.h }, p1/Z, [x15, x3, LSL #1]\n"
- "ld1h { z25.h }, p1/Z, [x12, x3, LSL #1]\n"
- "ld1h { z24.h }, p1/Z, [x23, x3, LSL #1]\n"
- "ld1h { z23.h }, p1/Z, [x20, x3, LSL #1]\n"
- "incw x3\n"
- "whilelt p1.h, x3, x5\n"
+ "mov x4, #0x0\n"
+ "mov x19, #0x4\n"
+ "ldr x5, [%x[args], %[offsetof_inptrs]]\n"
+ "whilelt p0.h, XZR, x19\n"
+ "add x19, %x[args], %[offsetof_rescale]\n"
+ "ld1rqh { z4.h }, p0/Z, [x19]\n"
+ "ldr x6, [%x[args], %[offsetof_n_channels]]\n"
+ "whilelt p1.h, x4, x6\n"
+ "mov x7, #0x0\n"
+ "ldp x8, x17, [x20, #0x0]\n"
+ "ldp x16, x15, [x20, #0x10]\n"
+ "ldp x14, x13, [x5, #0x0]\n"
+ "ld1h { z3.h }, p1/Z, [x13, x4, LSL #1]\n"
+ "ldp x12, x11, [x5, #0x10]\n"
+ "ld1h { z2.h }, p1/Z, [x12, x4, LSL #1]\n"
+ "ldp x10, x9, [x5, #0x20]\n"
+ "ld1h { z1.h }, p1/Z, [x9, x4, LSL #1]\n"
+ "ldp x28, x27, [x5, #0x30]\n"
+ "ld1h { z0.h }, p1/Z, [x28, x4, LSL #1]\n"
+ "ldp x26, x25, [x5, #0x40]\n"
+ "ld1h { z31.h }, p1/Z, [x25, x4, LSL #1]\n"
+ "ldp x24, x23, [x5, #0x50]\n"
+ "ld1h { z30.h }, p1/Z, [x24, x4, LSL #1]\n"
+ "ldp x22, x21, [x5, #0x60]\n"
+ "ld1h { z29.h }, p1/Z, [x10, x4, LSL #1]\n"
+ "ldp x20, x19, [x5, #0x70]\n"
+ "ld1h { z28.h }, p1/Z, [x26, x4, LSL #1]\n"
+ "ld1h { z27.h }, p1/Z, [x27, x4, LSL #1]\n"
+ "ld1h { z22.h }, p1/Z, [x23, x4, LSL #1]\n"
+ "ld1h { z21.h }, p1/Z, [x21, x4, LSL #1]\n"
+ "ld1h { z20.h }, p1/Z, [x20, x4, LSL #1]\n"
+ "ld1h { z26.h }, p1/Z, [x14, x4, LSL #1]\n"
+ "ld1h { z25.h }, p1/Z, [x11, x4, LSL #1]\n"
+ "ld1h { z24.h }, p1/Z, [x22, x4, LSL #1]\n"
+ "ld1h { z23.h }, p1/Z, [x19, x4, LSL #1]\n"
+ "incw x4\n"
+ "whilelt p1.h, x4, x6\n"
"b.none 2f\n"
"1:" // Vector: Loop
"fadd z17.h, z1.h, z0.h\n"
"fadd z16.h, z31.h, z30.h\n"
- "ld1h { z1.h }, p1/Z, [x10, x3, LSL #1]\n"
- "whilelt p0.h, x6, x5\n"
+ "ld1h { z1.h }, p1/Z, [x9, x4, LSL #1]\n"
+ "whilelt p0.h, x7, x6\n"
"fadd z19.h, z17.h, z16.h\n"
"fadd z18.h, z3.h, z2.h\n"
- "ld1h { z0.h }, p1/Z, [x9, x3, LSL #1]\n"
+ "ld1h { z0.h }, p1/Z, [x28, x4, LSL #1]\n"
"fadd z17.h, z29.h, z28.h\n"
"fadd z22.h, z27.h, z22.h\n"
- "ld1h { z31.h }, p1/Z, [x26, x3, LSL #1]\n"
+ "ld1h { z31.h }, p1/Z, [x25, x4, LSL #1]\n"
"fadd z16.h, z21.h, z20.h\n"
"fadd z21.h, z18.h, z19.h\n"
- "ld1h { z30.h }, p1/Z, [x25, x3, LSL #1]\n"
+ "ld1h { z30.h }, p1/Z, [x24, x4, LSL #1]\n"
"fadd z20.h, z16.h, z19.h\n"
"fadd z19.h, z26.h, z17.h\n"
- "ld1h { z3.h }, p1/Z, [x14, x3, LSL #1]\n"
+ "ld1h { z3.h }, p1/Z, [x13, x4, LSL #1]\n"
"fadd z18.h, z25.h, z22.h\n"
"fadd z17.h, z24.h, z17.h\n"
- "ld1h { z2.h }, p1/Z, [x13, x3, LSL #1]\n"
+ "ld1h { z2.h }, p1/Z, [x12, x4, LSL #1]\n"
"fadd z16.h, z23.h, z22.h\n"
- "fadd z19.h, z21.h, z19.h\n"
- "ld1h { z29.h }, p1/Z, [x11, x3, LSL #1]\n"
- "fadd z18.h, z21.h, z18.h\n"
+ "fadd z19.h, z19.h, z21.h\n"
+ "ld1h { z29.h }, p1/Z, [x10, x4, LSL #1]\n"
+ "fadd z18.h, z18.h, z21.h\n"
"fadd z17.h, z17.h, z20.h\n"
- "ld1h { z28.h }, p1/Z, [x27, x3, LSL #1]\n"
+ "ld1h { z28.h }, p1/Z, [x26, x4, LSL #1]\n"
"fadd z16.h, z16.h, z20.h\n"
- "ld1h { z27.h }, p1/Z, [x28, x3, LSL #1]\n"
+ "ld1h { z27.h }, p1/Z, [x27, x4, LSL #1]\n"
"fmul z19.h, z19.h, z4.h[0]\n"
- "ld1h { z22.h }, p1/Z, [x24, x3, LSL #1]\n"
+ "ld1h { z22.h }, p1/Z, [x23, x4, LSL #1]\n"
"fmul z18.h, z18.h, z4.h[1]\n"
"fmul z17.h, z17.h, z4.h[2]\n"
- "ld1h { z21.h }, p1/Z, [x22, x3, LSL #1]\n"
+ "ld1h { z21.h }, p1/Z, [x21, x4, LSL #1]\n"
"fmul z16.h, z16.h, z4.h[3]\n"
- "st1h { z19.h }, p0, [x7, x6, LSL #1]\n"
- "ld1h { z20.h }, p1/Z, [x21, x3, LSL #1]\n"
- "st1h { z18.h }, p0, [x8, x6, LSL #1]\n"
- "ld1h { z26.h }, p1/Z, [x15, x3, LSL #1]\n"
- "st1h { z17.h }, p0, [x17, x6, LSL #1]\n"
- "ld1h { z25.h }, p1/Z, [x12, x3, LSL #1]\n"
- "st1h { z16.h }, p0, [x16, x6, LSL #1]\n"
- "incw x6\n"
- "ld1h { z24.h }, p1/Z, [x23, x3, LSL #1]\n"
- "ld1h { z23.h }, p1/Z, [x20, x3, LSL #1]\n"
- "incw x3\n"
- "whilelt p1.h, x3, x5\n"
+ "st1h { z19.h }, p0, [x8, x7, LSL #1]\n"
+ "ld1h { z20.h }, p1/Z, [x20, x4, LSL #1]\n"
+ "st1h { z18.h }, p0, [x17, x7, LSL #1]\n"
+ "ld1h { z26.h }, p1/Z, [x14, x4, LSL #1]\n"
+ "st1h { z17.h }, p0, [x16, x7, LSL #1]\n"
+ "ld1h { z25.h }, p1/Z, [x11, x4, LSL #1]\n"
+ "st1h { z16.h }, p0, [x15, x7, LSL #1]\n"
+ "incw x7\n"
+ "ld1h { z24.h }, p1/Z, [x22, x4, LSL #1]\n"
+ "ld1h { z23.h }, p1/Z, [x19, x4, LSL #1]\n"
+ "incw x4\n"
+ "whilelt p1.h, x4, x6\n"
"b.any 1b\n"
"2:" // Vector: Tail
"fadd z17.h, z1.h, z0.h\n"
"fadd z16.h, z31.h, z30.h\n"
- "whilelt p0.h, x6, x5\n"
+ "whilelt p0.h, x7, x6\n"
"fadd z19.h, z17.h, z16.h\n"
"fadd z18.h, z3.h, z2.h\n"
"fadd z17.h, z29.h, z28.h\n"
@@ -184,22 +184,22 @@ void sme_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst_impl(
"fadd z18.h, z25.h, z22.h\n"
"fadd z17.h, z24.h, z17.h\n"
"fadd z16.h, z23.h, z22.h\n"
- "fadd z19.h, z21.h, z19.h\n"
- "fadd z18.h, z21.h, z18.h\n"
+ "fadd z19.h, z19.h, z21.h\n"
+ "fadd z18.h, z18.h, z21.h\n"
"fadd z17.h, z17.h, z20.h\n"
"fadd z16.h, z16.h, z20.h\n"
"fmul z19.h, z19.h, z4.h[0]\n"
- "st1h { z19.h }, p0, [x7, x6, LSL #1]\n"
+ "st1h { z19.h }, p0, [x8, x7, LSL #1]\n"
"fmul z18.h, z18.h, z4.h[1]\n"
"fmul z17.h, z17.h, z4.h[2]\n"
- "st1h { z18.h }, p0, [x8, x6, LSL #1]\n"
+ "st1h { z18.h }, p0, [x17, x7, LSL #1]\n"
"fmul z16.h, z16.h, z4.h[3]\n"
- "st1h { z17.h }, p0, [x17, x6, LSL #1]\n"
- "st1h { z16.h }, p0, [x16, x6, LSL #1]\n"
+ "st1h { z17.h }, p0, [x16, x7, LSL #1]\n"
+ "st1h { z16.h }, p0, [x15, x7, LSL #1]\n"
".inst 0xd503467f // SMSTOP\n"
:
: [args] "r" (&args), [offsetof_inptrs] "I" (offsetof(KernelArgs, inptrs)), [offsetof_n_channels] "I" (offsetof(KernelArgs, n_channels)), [offsetof_outptrs] "I" (offsetof(KernelArgs, outptrs)), [offsetof_rescale] "I" (offsetof(KernelArgs, rescale_vals))
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp16_nhwc_avg_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp16_nhwc_avg_generic_depthfirst/generic.cpp
index c43da42d9e..2c1e698ade 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp16_nhwc_avg_generic_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp16_nhwc_avg_generic_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -42,83 +42,83 @@ void sme_fp16_nhwc_avg_generic_depthfirst_impl(
__asm__ __volatile__(
".inst 0xd503477f // SMSTART ZA\n"
- "mov x9, #0x0\n"
- "cnth x28\n"
- "cnth x27, ALL, MUL #2\n"
- "cnth x26, ALL, MUL #3\n"
+ "mov x28, #0x0\n"
+ "cnth x27\n"
+ "cnth x26, ALL, MUL #2\n"
+ "cnth x25, ALL, MUL #3\n"
"ptrue p0.b\n"
- "whilelt p3.h, x9, %x[n_channels]\n"
+ "whilelt p3.h, x28, %x[n_channels]\n"
"ld1rh { z6.h }, p0/Z, [%x[rescale_ptr]]\n"
- "whilelt p2.h, x28, %x[n_channels]\n"
- "whilelt p1.h, x27, %x[n_channels]\n"
- "whilelt p0.h, x26, %x[n_channels]\n"
+ "whilelt p2.h, x27, %x[n_channels]\n"
+ "whilelt p1.h, x26, %x[n_channels]\n"
+ "whilelt p0.h, x25, %x[n_channels]\n"
"b.none 7f\n"
"1:" // 4-vectors of channels
- "lsr x25, %x[n_valid_cells], #0x2\n"
+ "lsr x24, %x[n_valid_cells], #0x2\n"
"mov z5.b, #0x0\n"
"mov z4.b, #0x0\n"
- "mov x20, %x[inptrs]\n"
+ "mov x19, %x[inptrs]\n"
"mov z3.b, #0x0\n"
"mov z2.b, #0x0\n"
- "cbz x25, 4f\n"
- "ldp x24, x23, [x20, #0x0]\n"
- "subs x25, x25, #0x1\n"
- "ld1h { z1.h }, p3/Z, [x24, x9, LSL #1]\n"
- "ldp x22, x21, [x20, #0x10]\n"
- "add x20, x20, #0x20\n"
- "ld1h { z0.h }, p3/Z, [x23, x9, LSL #1]\n"
- "ld1h { z31.h }, p3/Z, [x22, x9, LSL #1]\n"
- "ld1h { z30.h }, p3/Z, [x21, x9, LSL #1]\n"
- "ld1h { z29.h }, p2/Z, [x24, x28, LSL #1]\n"
- "ld1h { z22.h }, p2/Z, [x23, x28, LSL #1]\n"
- "ld1h { z28.h }, p2/Z, [x22, x28, LSL #1]\n"
- "ld1h { z18.h }, p2/Z, [x21, x28, LSL #1]\n"
- "ld1h { z27.h }, p1/Z, [x24, x27, LSL #1]\n"
- "ld1h { z21.h }, p1/Z, [x23, x27, LSL #1]\n"
- "ld1h { z26.h }, p1/Z, [x22, x27, LSL #1]\n"
- "ld1h { z17.h }, p1/Z, [x21, x27, LSL #1]\n"
- "ld1h { z25.h }, p0/Z, [x24, x26, LSL #1]\n"
- "ld1h { z20.h }, p0/Z, [x23, x26, LSL #1]\n"
- "ld1h { z24.h }, p0/Z, [x22, x26, LSL #1]\n"
- "ld1h { z16.h }, p0/Z, [x21, x26, LSL #1]\n"
+ "cbz x24, 4f\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "subs x24, x24, #0x1\n"
+ "ld1h { z1.h }, p3/Z, [x23, x28, LSL #1]\n"
+ "ldp x21, x20, [x19, #0x10]\n"
+ "add x19, x19, #0x20\n"
+ "ld1h { z0.h }, p3/Z, [x22, x28, LSL #1]\n"
+ "ld1h { z31.h }, p3/Z, [x21, x28, LSL #1]\n"
+ "ld1h { z30.h }, p3/Z, [x20, x28, LSL #1]\n"
+ "ld1h { z29.h }, p2/Z, [x23, x27, LSL #1]\n"
+ "ld1h { z22.h }, p2/Z, [x22, x27, LSL #1]\n"
+ "ld1h { z28.h }, p2/Z, [x21, x27, LSL #1]\n"
+ "ld1h { z18.h }, p2/Z, [x20, x27, LSL #1]\n"
+ "ld1h { z27.h }, p1/Z, [x23, x26, LSL #1]\n"
+ "ld1h { z21.h }, p1/Z, [x22, x26, LSL #1]\n"
+ "ld1h { z26.h }, p1/Z, [x21, x26, LSL #1]\n"
+ "ld1h { z17.h }, p1/Z, [x20, x26, LSL #1]\n"
+ "ld1h { z25.h }, p0/Z, [x23, x25, LSL #1]\n"
+ "ld1h { z20.h }, p0/Z, [x22, x25, LSL #1]\n"
+ "ld1h { z24.h }, p0/Z, [x21, x25, LSL #1]\n"
+ "ld1h { z16.h }, p0/Z, [x20, x25, LSL #1]\n"
"beq 3f\n"
"2:" // 4-vectors of channels: 4 inputs loop
"fadd z23.h, z1.h, z0.h\n"
"fadd z19.h, z31.h, z30.h\n"
- "ldp x24, x23, [x20, #0x0]\n"
- "subs x25, x25, #0x1\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "subs x24, x24, #0x1\n"
"fadd z22.h, z29.h, z22.h\n"
"fadd z18.h, z28.h, z18.h\n"
- "ldp x22, x21, [x20, #0x10]\n"
- "add x20, x20, #0x20\n"
+ "ldp x21, x20, [x19, #0x10]\n"
+ "add x19, x19, #0x20\n"
"fadd z21.h, z27.h, z21.h\n"
"fadd z17.h, z26.h, z17.h\n"
- "ld1h { z1.h }, p3/Z, [x24, x9, LSL #1]\n"
+ "ld1h { z1.h }, p3/Z, [x23, x28, LSL #1]\n"
"fadd z20.h, z25.h, z20.h\n"
"fadd z16.h, z24.h, z16.h\n"
- "ld1h { z0.h }, p3/Z, [x23, x9, LSL #1]\n"
+ "ld1h { z0.h }, p3/Z, [x22, x28, LSL #1]\n"
"fadd z19.h, z23.h, z19.h\n"
"fadd z18.h, z22.h, z18.h\n"
- "ld1h { z31.h }, p3/Z, [x22, x9, LSL #1]\n"
+ "ld1h { z31.h }, p3/Z, [x21, x28, LSL #1]\n"
"fadd z17.h, z21.h, z17.h\n"
"fadd z16.h, z20.h, z16.h\n"
- "ld1h { z30.h }, p3/Z, [x21, x9, LSL #1]\n"
+ "ld1h { z30.h }, p3/Z, [x20, x28, LSL #1]\n"
"fadd z5.h, z5.h, z19.h\n"
"fadd z4.h, z4.h, z18.h\n"
- "ld1h { z29.h }, p2/Z, [x24, x28, LSL #1]\n"
+ "ld1h { z29.h }, p2/Z, [x23, x27, LSL #1]\n"
"fadd z3.h, z3.h, z17.h\n"
"fadd z2.h, z2.h, z16.h\n"
- "ld1h { z22.h }, p2/Z, [x23, x28, LSL #1]\n"
- "ld1h { z28.h }, p2/Z, [x22, x28, LSL #1]\n"
- "ld1h { z18.h }, p2/Z, [x21, x28, LSL #1]\n"
- "ld1h { z27.h }, p1/Z, [x24, x27, LSL #1]\n"
- "ld1h { z21.h }, p1/Z, [x23, x27, LSL #1]\n"
- "ld1h { z26.h }, p1/Z, [x22, x27, LSL #1]\n"
- "ld1h { z17.h }, p1/Z, [x21, x27, LSL #1]\n"
- "ld1h { z25.h }, p0/Z, [x24, x26, LSL #1]\n"
- "ld1h { z20.h }, p0/Z, [x23, x26, LSL #1]\n"
- "ld1h { z24.h }, p0/Z, [x22, x26, LSL #1]\n"
- "ld1h { z16.h }, p0/Z, [x21, x26, LSL #1]\n"
+ "ld1h { z22.h }, p2/Z, [x22, x27, LSL #1]\n"
+ "ld1h { z28.h }, p2/Z, [x21, x27, LSL #1]\n"
+ "ld1h { z18.h }, p2/Z, [x20, x27, LSL #1]\n"
+ "ld1h { z27.h }, p1/Z, [x23, x26, LSL #1]\n"
+ "ld1h { z21.h }, p1/Z, [x22, x26, LSL #1]\n"
+ "ld1h { z26.h }, p1/Z, [x21, x26, LSL #1]\n"
+ "ld1h { z17.h }, p1/Z, [x20, x26, LSL #1]\n"
+ "ld1h { z25.h }, p0/Z, [x23, x25, LSL #1]\n"
+ "ld1h { z20.h }, p0/Z, [x22, x25, LSL #1]\n"
+ "ld1h { z24.h }, p0/Z, [x21, x25, LSL #1]\n"
+ "ld1h { z16.h }, p0/Z, [x20, x25, LSL #1]\n"
"bgt 2b\n"
"3:" // 4-vectors of channels: 4 inputs tail
"fadd z23.h, z1.h, z0.h\n"
@@ -138,65 +138,65 @@ void sme_fp16_nhwc_avg_generic_depthfirst_impl(
"fadd z3.h, z3.h, z17.h\n"
"fadd z2.h, z2.h, z16.h\n"
"4:" // 4-vectors of channels: After loop
- "ands x21, %x[n_valid_cells], #0x3\n"
+ "ands x20, %x[n_valid_cells], #0x3\n"
"beq 6f\n"
"5:" // 4-vectors of channels: Single input loop
- "ldr x24, [x20], #0x8\n"
- "ld1h { z1.h }, p3/Z, [x24, x9, LSL #1]\n"
- "subs x21, x21, #0x1\n"
+ "ldr x23, [x19], #0x8\n"
+ "ld1h { z1.h }, p3/Z, [x23, x28, LSL #1]\n"
+ "subs x20, x20, #0x1\n"
"fadd z5.h, z5.h, z1.h\n"
- "ld1h { z29.h }, p2/Z, [x24, x28, LSL #1]\n"
+ "ld1h { z29.h }, p2/Z, [x23, x27, LSL #1]\n"
"fadd z4.h, z4.h, z29.h\n"
- "ld1h { z27.h }, p1/Z, [x24, x27, LSL #1]\n"
+ "ld1h { z27.h }, p1/Z, [x23, x26, LSL #1]\n"
"fadd z3.h, z3.h, z27.h\n"
- "ld1h { z25.h }, p0/Z, [x24, x26, LSL #1]\n"
+ "ld1h { z25.h }, p0/Z, [x23, x25, LSL #1]\n"
"fadd z2.h, z2.h, z25.h\n"
"bgt 5b\n"
"6:" // 4-vectors of channels: Single input loop: End
"fmul z5.h, z5.h, z6.h\n"
"fmul z4.h, z4.h, z6.h\n"
- "st1h { z5.h }, p3, [%x[outptr], x9, LSL #1]\n"
- "inch x9, ALL, MUL #4\n"
+ "st1h { z5.h }, p3, [%x[outptr], x28, LSL #1]\n"
+ "inch x28, ALL, MUL #4\n"
"fmul z3.h, z3.h, z6.h\n"
"fmul z2.h, z2.h, z6.h\n"
- "st1h { z4.h }, p2, [%x[outptr], x28, LSL #1]\n"
- "inch x28, ALL, MUL #4\n"
- "st1h { z3.h }, p1, [%x[outptr], x27, LSL #1]\n"
+ "st1h { z4.h }, p2, [%x[outptr], x27, LSL #1]\n"
"inch x27, ALL, MUL #4\n"
- "st1h { z2.h }, p0, [%x[outptr], x26, LSL #1]\n"
+ "st1h { z3.h }, p1, [%x[outptr], x26, LSL #1]\n"
"inch x26, ALL, MUL #4\n"
- "whilelt p0.h, x26, %x[n_channels]\n"
+ "st1h { z2.h }, p0, [%x[outptr], x25, LSL #1]\n"
+ "inch x25, ALL, MUL #4\n"
+ "whilelt p0.h, x25, %x[n_channels]\n"
"b.any 1b\n"
"7:" // Single vector of channels
- "whilelt p3.h, x9, %x[n_channels]\n"
+ "whilelt p3.h, x28, %x[n_channels]\n"
"b.none 14f\n"
"8:" // Single vector of channels: Loop
- "lsr x25, %x[n_valid_cells], #0x2\n"
+ "lsr x24, %x[n_valid_cells], #0x2\n"
"mov z5.b, #0x0\n"
- "mov x20, %x[inptrs]\n"
- "cbz x25, 11f\n"
- "ldp x24, x23, [x20, #0x0]\n"
- "subs x25, x25, #0x1\n"
- "ld1h { z1.h }, p3/Z, [x24, x9, LSL #1]\n"
- "ldp x22, x21, [x20, #0x10]\n"
- "add x20, x20, #0x20\n"
- "ld1h { z0.h }, p3/Z, [x23, x9, LSL #1]\n"
- "ld1h { z31.h }, p3/Z, [x22, x9, LSL #1]\n"
- "ld1h { z30.h }, p3/Z, [x21, x9, LSL #1]\n"
+ "mov x19, %x[inptrs]\n"
+ "cbz x24, 11f\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "subs x24, x24, #0x1\n"
+ "ld1h { z1.h }, p3/Z, [x23, x28, LSL #1]\n"
+ "ldp x21, x20, [x19, #0x10]\n"
+ "add x19, x19, #0x20\n"
+ "ld1h { z0.h }, p3/Z, [x22, x28, LSL #1]\n"
+ "ld1h { z31.h }, p3/Z, [x21, x28, LSL #1]\n"
+ "ld1h { z30.h }, p3/Z, [x20, x28, LSL #1]\n"
"beq 10f\n"
"9:" // Single vector of channels: Loop: 4 inputs loop
"fadd z23.h, z1.h, z0.h\n"
"fadd z19.h, z31.h, z30.h\n"
- "ldp x24, x23, [x20, #0x0]\n"
- "subs x25, x25, #0x1\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "subs x24, x24, #0x1\n"
"fadd z19.h, z23.h, z19.h\n"
- "ldp x22, x21, [x20, #0x10]\n"
+ "ldp x21, x20, [x19, #0x10]\n"
"fadd z5.h, z5.h, z19.h\n"
- "add x20, x20, #0x20\n"
- "ld1h { z1.h }, p3/Z, [x24, x9, LSL #1]\n"
- "ld1h { z0.h }, p3/Z, [x23, x9, LSL #1]\n"
- "ld1h { z31.h }, p3/Z, [x22, x9, LSL #1]\n"
- "ld1h { z30.h }, p3/Z, [x21, x9, LSL #1]\n"
+ "add x19, x19, #0x20\n"
+ "ld1h { z1.h }, p3/Z, [x23, x28, LSL #1]\n"
+ "ld1h { z0.h }, p3/Z, [x22, x28, LSL #1]\n"
+ "ld1h { z31.h }, p3/Z, [x21, x28, LSL #1]\n"
+ "ld1h { z30.h }, p3/Z, [x20, x28, LSL #1]\n"
"bgt 9b\n"
"10:" // Single vector of channels: Loop: 4 inputs tail
"fadd z23.h, z1.h, z0.h\n"
@@ -204,25 +204,25 @@ void sme_fp16_nhwc_avg_generic_depthfirst_impl(
"fadd z19.h, z23.h, z19.h\n"
"fadd z5.h, z5.h, z19.h\n"
"11:" // Single vector of channels: Loop: After loop
- "ands x21, %x[n_valid_cells], #0x3\n"
+ "ands x20, %x[n_valid_cells], #0x3\n"
"beq 13f\n"
"12:" // Single vector of channels: Loop: Single input loop
- "ldr x24, [x20], #0x8\n"
- "ld1h { z1.h }, p3/Z, [x24, x9, LSL #1]\n"
- "subs x21, x21, #0x1\n"
+ "ldr x23, [x19], #0x8\n"
+ "ld1h { z1.h }, p3/Z, [x23, x28, LSL #1]\n"
+ "subs x20, x20, #0x1\n"
"fadd z5.h, z5.h, z1.h\n"
"bgt 12b\n"
"13:" // Single vector of channels: Loop: Single input loop: End
"fmul z5.h, z5.h, z6.h\n"
- "st1h { z5.h }, p3, [%x[outptr], x9, LSL #1]\n"
- "inch x9\n"
- "whilelt p3.h, x9, %x[n_channels]\n"
+ "st1h { z5.h }, p3, [%x[outptr], x28, LSL #1]\n"
+ "inch x28\n"
+ "whilelt p3.h, x28, %x[n_channels]\n"
"b.any 8b\n"
"14:" // End
".inst 0xd503467f // SMSTOP\n"
:
: [inptrs] "r" (inptrs), [n_channels] "r" (n_channels), [n_valid_cells] "r" (n_valid_cells), [outptr] "r" (outptr), [rescale_ptr] "r" (&rescale_value)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp16_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp16_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
index f71f2625b6..fe2e7c834f 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp16_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp16_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -63,82 +63,82 @@ void sme_fp16_nhwc_max_2x2_s1_output2x2_depthfirst_impl(
pad_left, pad_top, pad_right, pad_bottom);
__asm__ __volatile__(
- "ldr x21, [%x[args], %[offsetof_outptrs]]\n"
+ "ldr x20, [%x[args], %[offsetof_outptrs]]\n"
".inst 0xd503477f // SMSTART ZA\n"
- "mov x15, #0x0\n"
- "ptrue p2.b\n"
- "ldr x20, [%x[args], %[offsetof_inptrs]]\n"
"mov x14, #0x0\n"
- "ldr x13, [%x[args], %[offsetof_n_channels]]\n"
- "whilelt p1.h, x15, x13\n"
- "ldp x12, x11, [x21, #0x0]\n"
- "ldp x10, x9, [x21, #0x10]\n"
- "ldp x28, x27, [x20, #0x0]\n"
- "ld1h { z30.h }, p1/Z, [x27, x15, LSL #1]\n"
- "ldp x26, x25, [x20, #0x10]\n"
- "ld1h { z29.h }, p1/Z, [x25, x15, LSL #1]\n"
- "ldp x24, x23, [x20, #0x20]\n"
- "ld1h { z28.h }, p1/Z, [x24, x15, LSL #1]\n"
- "ldp x22, x21, [x20, #0x30]\n"
- "ld1h { z27.h }, p1/Z, [x21, x15, LSL #1]\n"
- "ldr x20, [x20, #0x40]\n"
- "ld1h { z26.h }, p1/Z, [x28, x15, LSL #1]\n"
- "ld1h { z25.h }, p1/Z, [x26, x15, LSL #1]\n"
- "ld1h { z24.h }, p1/Z, [x23, x15, LSL #1]\n"
- "ld1h { z23.h }, p1/Z, [x22, x15, LSL #1]\n"
- "ld1h { z19.h }, p1/Z, [x20, x15, LSL #1]\n"
- "incw x15\n"
- "whilelt p1.h, x15, x13\n"
+ "ptrue p2.b\n"
+ "ldr x19, [%x[args], %[offsetof_inptrs]]\n"
+ "mov x13, #0x0\n"
+ "ldr x12, [%x[args], %[offsetof_n_channels]]\n"
+ "whilelt p1.h, x14, x12\n"
+ "ldp x11, x10, [x20, #0x0]\n"
+ "ldp x9, x28, [x20, #0x10]\n"
+ "ldp x27, x26, [x19, #0x0]\n"
+ "ld1h { z29.h }, p1/Z, [x26, x14, LSL #1]\n"
+ "ldp x25, x24, [x19, #0x10]\n"
+ "ld1h { z28.h }, p1/Z, [x24, x14, LSL #1]\n"
+ "ldp x23, x22, [x19, #0x20]\n"
+ "ld1h { z27.h }, p1/Z, [x23, x14, LSL #1]\n"
+ "ldp x21, x20, [x19, #0x30]\n"
+ "ld1h { z26.h }, p1/Z, [x20, x14, LSL #1]\n"
+ "ldr x19, [x19, #0x40]\n"
+ "ld1h { z20.h }, p1/Z, [x27, x14, LSL #1]\n"
+ "ld1h { z25.h }, p1/Z, [x22, x14, LSL #1]\n"
+ "ld1h { z24.h }, p1/Z, [x25, x14, LSL #1]\n"
+ "ld1h { z23.h }, p1/Z, [x21, x14, LSL #1]\n"
+ "ld1h { z19.h }, p1/Z, [x19, x14, LSL #1]\n"
+ "incw x14\n"
+ "whilelt p1.h, x14, x12\n"
"b.none 2f\n"
"1:" // Vector: Loop
- "movprfx z22, z30\n fmax z22.h, p2/M, z22.h, z28.h\n"
- "movprfx z21, z28\n fmax z21.h, p2/M, z21.h, z27.h\n"
- "ld1h { z30.h }, p1/Z, [x27, x15, LSL #1]\n"
- "whilelt p0.h, x14, x13\n"
- "movprfx z20, z29\n fmax z20.h, p2/M, z20.h, z26.h\n"
- "movprfx z18, z25\n fmax z18.h, p2/M, z18.h, z24.h\n"
- "ld1h { z28.h }, p1/Z, [x24, x15, LSL #1]\n"
- "movprfx z17, z29\n fmax z17.h, p2/M, z17.h, z23.h\n"
- "movprfx z16, z24\n fmax z16.h, p2/M, z16.h, z19.h\n"
- "ld1h { z27.h }, p1/Z, [x21, x15, LSL #1]\n"
- "ld1h { z29.h }, p1/Z, [x25, x15, LSL #1]\n"
- "movprfx z19, z22\n fmax z19.h, p2/M, z19.h, z20.h\n"
- "fmax z18.h, p2/M, z18.h, z22.h\n"
- "ld1h { z26.h }, p1/Z, [x28, x15, LSL #1]\n"
+ "movprfx z22, z29\n fmax z22.h, p2/M, z22.h, z27.h\n"
+ "movprfx z21, z27\n fmax z21.h, p2/M, z21.h, z26.h\n"
+ "ld1h { z29.h }, p1/Z, [x26, x14, LSL #1]\n"
+ "whilelt p0.h, x13, x12\n"
+ "movprfx z18, z28\n fmax z18.h, p2/M, z18.h, z20.h\n"
+ "movprfx z20, z25\n fmax z20.h, p2/M, z20.h, z24.h\n"
+ "ld1h { z27.h }, p1/Z, [x23, x14, LSL #1]\n"
+ "movprfx z17, z23\n fmax z17.h, p2/M, z17.h, z28.h\n"
+ "movprfx z16, z25\n fmax z16.h, p2/M, z16.h, z19.h\n"
+ "ld1h { z26.h }, p1/Z, [x20, x14, LSL #1]\n"
+ "ld1h { z28.h }, p1/Z, [x24, x14, LSL #1]\n"
+ "movprfx z19, z18\n fmax z19.h, p2/M, z19.h, z22.h\n"
+ "movprfx z18, z22\n fmax z18.h, p2/M, z18.h, z20.h\n"
+ "ld1h { z20.h }, p1/Z, [x27, x14, LSL #1]\n"
"fmax z17.h, p2/M, z17.h, z21.h\n"
"fmax z16.h, p2/M, z16.h, z21.h\n"
- "ld1h { z25.h }, p1/Z, [x26, x15, LSL #1]\n"
- "st1h { z19.h }, p0, [x12, x14, LSL #1]\n"
- "ld1h { z24.h }, p1/Z, [x23, x15, LSL #1]\n"
- "st1h { z18.h }, p0, [x11, x14, LSL #1]\n"
- "ld1h { z23.h }, p1/Z, [x22, x15, LSL #1]\n"
- "st1h { z17.h }, p0, [x10, x14, LSL #1]\n"
- "ld1h { z19.h }, p1/Z, [x20, x15, LSL #1]\n"
- "incw x15\n"
- "whilelt p1.h, x15, x13\n"
- "st1h { z16.h }, p0, [x9, x14, LSL #1]\n"
+ "ld1h { z25.h }, p1/Z, [x22, x14, LSL #1]\n"
+ "st1h { z19.h }, p0, [x11, x13, LSL #1]\n"
+ "ld1h { z24.h }, p1/Z, [x25, x14, LSL #1]\n"
+ "st1h { z18.h }, p0, [x10, x13, LSL #1]\n"
+ "ld1h { z23.h }, p1/Z, [x21, x14, LSL #1]\n"
+ "st1h { z17.h }, p0, [x9, x13, LSL #1]\n"
+ "ld1h { z19.h }, p1/Z, [x19, x14, LSL #1]\n"
"incw x14\n"
+ "whilelt p1.h, x14, x12\n"
+ "st1h { z16.h }, p0, [x28, x13, LSL #1]\n"
+ "incw x13\n"
"b.any 1b\n"
"2:" // Vector: Tail
- "movprfx z22, z30\n fmax z22.h, p2/M, z22.h, z28.h\n"
- "movprfx z21, z28\n fmax z21.h, p2/M, z21.h, z27.h\n"
- "whilelt p0.h, x14, x13\n"
- "movprfx z20, z29\n fmax z20.h, p2/M, z20.h, z26.h\n"
- "movprfx z18, z25\n fmax z18.h, p2/M, z18.h, z24.h\n"
- "movprfx z17, z29\n fmax z17.h, p2/M, z17.h, z23.h\n"
- "movprfx z16, z24\n fmax z16.h, p2/M, z16.h, z19.h\n"
- "movprfx z19, z22\n fmax z19.h, p2/M, z19.h, z20.h\n"
- "fmax z18.h, p2/M, z18.h, z22.h\n"
- "st1h { z19.h }, p0, [x12, x14, LSL #1]\n"
+ "movprfx z22, z29\n fmax z22.h, p2/M, z22.h, z27.h\n"
+ "movprfx z21, z27\n fmax z21.h, p2/M, z21.h, z26.h\n"
+ "whilelt p0.h, x13, x12\n"
+ "movprfx z18, z28\n fmax z18.h, p2/M, z18.h, z20.h\n"
+ "movprfx z20, z25\n fmax z20.h, p2/M, z20.h, z24.h\n"
+ "movprfx z17, z23\n fmax z17.h, p2/M, z17.h, z28.h\n"
+ "movprfx z16, z25\n fmax z16.h, p2/M, z16.h, z19.h\n"
+ "movprfx z19, z18\n fmax z19.h, p2/M, z19.h, z22.h\n"
+ "movprfx z18, z22\n fmax z18.h, p2/M, z18.h, z20.h\n"
+ "st1h { z19.h }, p0, [x11, x13, LSL #1]\n"
"fmax z17.h, p2/M, z17.h, z21.h\n"
"fmax z16.h, p2/M, z16.h, z21.h\n"
- "st1h { z18.h }, p0, [x11, x14, LSL #1]\n"
- "st1h { z17.h }, p0, [x10, x14, LSL #1]\n"
- "st1h { z16.h }, p0, [x9, x14, LSL #1]\n"
+ "st1h { z18.h }, p0, [x10, x13, LSL #1]\n"
+ "st1h { z17.h }, p0, [x9, x13, LSL #1]\n"
+ "st1h { z16.h }, p0, [x28, x13, LSL #1]\n"
".inst 0xd503467f // SMSTOP\n"
:
: [args] "r" (&args), [offsetof_inptrs] "I" (offsetof(KernelArgs, inptrs)), [offsetof_n_channels] "I" (offsetof(KernelArgs, n_channels)), [offsetof_outptrs] "I" (offsetof(KernelArgs, outptrs))
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp16_nhwc_max_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp16_nhwc_max_generic_depthfirst/generic.cpp
index c07ce97231..1bb27e39a3 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp16_nhwc_max_generic_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp16_nhwc_max_generic_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -40,82 +40,82 @@ void sme_fp16_nhwc_max_generic_depthfirst_impl(
{
__asm__ __volatile__(
".inst 0xd503477f // SMSTART ZA\n"
- "mov x9, #0x0\n"
- "cnth x28\n"
- "cnth x27, ALL, MUL #2\n"
- "cnth x26, ALL, MUL #3\n"
- "whilelt p4.h, x9, %x[n_channels]\n"
- "whilelt p3.h, x28, %x[n_channels]\n"
- "whilelt p2.h, x27, %x[n_channels]\n"
- "whilelt p1.h, x26, %x[n_channels]\n"
+ "mov x28, #0x0\n"
+ "cnth x27\n"
+ "cnth x26, ALL, MUL #2\n"
+ "cnth x25, ALL, MUL #3\n"
+ "whilelt p4.h, x28, %x[n_channels]\n"
+ "whilelt p3.h, x27, %x[n_channels]\n"
+ "whilelt p2.h, x26, %x[n_channels]\n"
+ "whilelt p1.h, x25, %x[n_channels]\n"
"ptrue p0.b\n"
"b.none 7f\n"
"1:" // 4-vectors of channels
- "lsr x25, %x[n_valid_cells], #0x2\n"
+ "lsr x24, %x[n_valid_cells], #0x2\n"
"mov z4.h, #0xfc00\n"
"mov z3.h, #0xfc00\n"
- "mov x20, %x[inptrs]\n"
+ "mov x19, %x[inptrs]\n"
"mov z2.h, #0xfc00\n"
"mov z1.h, #0xfc00\n"
- "cbz x25, 4f\n"
- "ldp x24, x23, [x20, #0x0]\n"
- "subs x25, x25, #0x1\n"
- "ld1h { z0.h }, p4/Z, [x24, x9, LSL #1]\n"
- "ldp x22, x21, [x20, #0x10]\n"
- "add x20, x20, #0x20\n"
- "ld1h { z31.h }, p4/Z, [x23, x9, LSL #1]\n"
- "ld1h { z23.h }, p4/Z, [x22, x9, LSL #1]\n"
- "ld1h { z30.h }, p4/Z, [x21, x9, LSL #1]\n"
- "ld1h { z18.h }, p3/Z, [x24, x28, LSL #1]\n"
- "ld1h { z29.h }, p3/Z, [x23, x28, LSL #1]\n"
- "ld1h { z22.h }, p3/Z, [x22, x28, LSL #1]\n"
- "ld1h { z28.h }, p3/Z, [x21, x28, LSL #1]\n"
- "ld1h { z17.h }, p2/Z, [x24, x27, LSL #1]\n"
- "ld1h { z27.h }, p2/Z, [x23, x27, LSL #1]\n"
- "ld1h { z21.h }, p2/Z, [x22, x27, LSL #1]\n"
- "ld1h { z26.h }, p2/Z, [x21, x27, LSL #1]\n"
- "ld1h { z16.h }, p1/Z, [x24, x26, LSL #1]\n"
- "ld1h { z25.h }, p1/Z, [x23, x26, LSL #1]\n"
- "ld1h { z20.h }, p1/Z, [x22, x26, LSL #1]\n"
- "ld1h { z24.h }, p1/Z, [x21, x26, LSL #1]\n"
+ "cbz x24, 4f\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "subs x24, x24, #0x1\n"
+ "ld1h { z0.h }, p4/Z, [x23, x28, LSL #1]\n"
+ "ldp x21, x20, [x19, #0x10]\n"
+ "add x19, x19, #0x20\n"
+ "ld1h { z31.h }, p4/Z, [x22, x28, LSL #1]\n"
+ "ld1h { z23.h }, p4/Z, [x21, x28, LSL #1]\n"
+ "ld1h { z30.h }, p4/Z, [x20, x28, LSL #1]\n"
+ "ld1h { z18.h }, p3/Z, [x23, x27, LSL #1]\n"
+ "ld1h { z29.h }, p3/Z, [x22, x27, LSL #1]\n"
+ "ld1h { z22.h }, p3/Z, [x21, x27, LSL #1]\n"
+ "ld1h { z28.h }, p3/Z, [x20, x27, LSL #1]\n"
+ "ld1h { z17.h }, p2/Z, [x23, x26, LSL #1]\n"
+ "ld1h { z27.h }, p2/Z, [x22, x26, LSL #1]\n"
+ "ld1h { z21.h }, p2/Z, [x21, x26, LSL #1]\n"
+ "ld1h { z26.h }, p2/Z, [x20, x26, LSL #1]\n"
+ "ld1h { z16.h }, p1/Z, [x23, x25, LSL #1]\n"
+ "ld1h { z25.h }, p1/Z, [x22, x25, LSL #1]\n"
+ "ld1h { z20.h }, p1/Z, [x21, x25, LSL #1]\n"
+ "ld1h { z24.h }, p1/Z, [x20, x25, LSL #1]\n"
"beq 3f\n"
"2:" // 4-vectors of channels: 4 inputs loop
"movprfx z19, z0\n fmax z19.h, p0/M, z19.h, z31.h\n"
"fmax z23.h, p0/M, z23.h, z30.h\n"
- "ldp x24, x23, [x20, #0x0]\n"
- "subs x25, x25, #0x1\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "subs x24, x24, #0x1\n"
"fmax z18.h, p0/M, z18.h, z29.h\n"
"fmax z22.h, p0/M, z22.h, z28.h\n"
- "ldp x22, x21, [x20, #0x10]\n"
- "add x20, x20, #0x20\n"
+ "ldp x21, x20, [x19, #0x10]\n"
+ "add x19, x19, #0x20\n"
"fmax z17.h, p0/M, z17.h, z27.h\n"
"fmax z21.h, p0/M, z21.h, z26.h\n"
- "ld1h { z0.h }, p4/Z, [x24, x9, LSL #1]\n"
+ "ld1h { z0.h }, p4/Z, [x23, x28, LSL #1]\n"
"fmax z16.h, p0/M, z16.h, z25.h\n"
"fmax z20.h, p0/M, z20.h, z24.h\n"
- "ld1h { z31.h }, p4/Z, [x23, x9, LSL #1]\n"
+ "ld1h { z31.h }, p4/Z, [x22, x28, LSL #1]\n"
"fmax z19.h, p0/M, z19.h, z23.h\n"
"fmax z18.h, p0/M, z18.h, z22.h\n"
- "ld1h { z23.h }, p4/Z, [x22, x9, LSL #1]\n"
+ "ld1h { z23.h }, p4/Z, [x21, x28, LSL #1]\n"
"fmax z17.h, p0/M, z17.h, z21.h\n"
"fmax z16.h, p0/M, z16.h, z20.h\n"
- "ld1h { z30.h }, p4/Z, [x21, x9, LSL #1]\n"
+ "ld1h { z30.h }, p4/Z, [x20, x28, LSL #1]\n"
"fmax z4.h, p0/M, z4.h, z19.h\n"
"fmax z3.h, p0/M, z3.h, z18.h\n"
- "ld1h { z18.h }, p3/Z, [x24, x28, LSL #1]\n"
+ "ld1h { z18.h }, p3/Z, [x23, x27, LSL #1]\n"
"fmax z2.h, p0/M, z2.h, z17.h\n"
"fmax z1.h, p0/M, z1.h, z16.h\n"
- "ld1h { z29.h }, p3/Z, [x23, x28, LSL #1]\n"
- "ld1h { z22.h }, p3/Z, [x22, x28, LSL #1]\n"
- "ld1h { z28.h }, p3/Z, [x21, x28, LSL #1]\n"
- "ld1h { z17.h }, p2/Z, [x24, x27, LSL #1]\n"
- "ld1h { z27.h }, p2/Z, [x23, x27, LSL #1]\n"
- "ld1h { z21.h }, p2/Z, [x22, x27, LSL #1]\n"
- "ld1h { z26.h }, p2/Z, [x21, x27, LSL #1]\n"
- "ld1h { z16.h }, p1/Z, [x24, x26, LSL #1]\n"
- "ld1h { z25.h }, p1/Z, [x23, x26, LSL #1]\n"
- "ld1h { z20.h }, p1/Z, [x22, x26, LSL #1]\n"
- "ld1h { z24.h }, p1/Z, [x21, x26, LSL #1]\n"
+ "ld1h { z29.h }, p3/Z, [x22, x27, LSL #1]\n"
+ "ld1h { z22.h }, p3/Z, [x21, x27, LSL #1]\n"
+ "ld1h { z28.h }, p3/Z, [x20, x27, LSL #1]\n"
+ "ld1h { z17.h }, p2/Z, [x23, x26, LSL #1]\n"
+ "ld1h { z27.h }, p2/Z, [x22, x26, LSL #1]\n"
+ "ld1h { z21.h }, p2/Z, [x21, x26, LSL #1]\n"
+ "ld1h { z26.h }, p2/Z, [x20, x26, LSL #1]\n"
+ "ld1h { z16.h }, p1/Z, [x23, x25, LSL #1]\n"
+ "ld1h { z25.h }, p1/Z, [x22, x25, LSL #1]\n"
+ "ld1h { z20.h }, p1/Z, [x21, x25, LSL #1]\n"
+ "ld1h { z24.h }, p1/Z, [x20, x25, LSL #1]\n"
"bgt 2b\n"
"3:" // 4-vectors of channels: 4 inputs tail
"movprfx z19, z0\n fmax z19.h, p0/M, z19.h, z31.h\n"
@@ -135,61 +135,61 @@ void sme_fp16_nhwc_max_generic_depthfirst_impl(
"fmax z2.h, p0/M, z2.h, z17.h\n"
"fmax z1.h, p0/M, z1.h, z16.h\n"
"4:" // 4-vectors of channels: After loop
- "ands x21, %x[n_valid_cells], #0x3\n"
+ "ands x20, %x[n_valid_cells], #0x3\n"
"beq 6f\n"
"5:" // 4-vectors of channels: Single input loop
- "ldr x24, [x20], #0x8\n"
- "ld1h { z0.h }, p4/Z, [x24, x9, LSL #1]\n"
- "subs x21, x21, #0x1\n"
+ "ldr x23, [x19], #0x8\n"
+ "ld1h { z0.h }, p4/Z, [x23, x28, LSL #1]\n"
+ "subs x20, x20, #0x1\n"
"fmax z4.h, p0/M, z4.h, z0.h\n"
- "ld1h { z18.h }, p3/Z, [x24, x28, LSL #1]\n"
+ "ld1h { z18.h }, p3/Z, [x23, x27, LSL #1]\n"
"fmax z3.h, p0/M, z3.h, z18.h\n"
- "ld1h { z17.h }, p2/Z, [x24, x27, LSL #1]\n"
+ "ld1h { z17.h }, p2/Z, [x23, x26, LSL #1]\n"
"fmax z2.h, p0/M, z2.h, z17.h\n"
- "ld1h { z16.h }, p1/Z, [x24, x26, LSL #1]\n"
+ "ld1h { z16.h }, p1/Z, [x23, x25, LSL #1]\n"
"fmax z1.h, p0/M, z1.h, z16.h\n"
"bgt 5b\n"
"6:" // 4-vectors of channels: Single input loop: End
- "st1h { z4.h }, p4, [%x[outptr], x9, LSL #1]\n"
- "inch x9, ALL, MUL #4\n"
- "st1h { z3.h }, p3, [%x[outptr], x28, LSL #1]\n"
+ "st1h { z4.h }, p4, [%x[outptr], x28, LSL #1]\n"
"inch x28, ALL, MUL #4\n"
- "st1h { z2.h }, p2, [%x[outptr], x27, LSL #1]\n"
+ "st1h { z3.h }, p3, [%x[outptr], x27, LSL #1]\n"
"inch x27, ALL, MUL #4\n"
- "st1h { z1.h }, p1, [%x[outptr], x26, LSL #1]\n"
+ "st1h { z2.h }, p2, [%x[outptr], x26, LSL #1]\n"
"inch x26, ALL, MUL #4\n"
- "whilelt p1.h, x26, %x[n_channels]\n"
+ "st1h { z1.h }, p1, [%x[outptr], x25, LSL #1]\n"
+ "inch x25, ALL, MUL #4\n"
+ "whilelt p1.h, x25, %x[n_channels]\n"
"b.any 1b\n"
"7:" // Single vector of channels
- "whilelt p4.h, x9, %x[n_channels]\n"
+ "whilelt p4.h, x28, %x[n_channels]\n"
"b.none 14f\n"
"8:" // Single vector of channels: Loop
- "lsr x25, %x[n_valid_cells], #0x2\n"
+ "lsr x24, %x[n_valid_cells], #0x2\n"
"mov z4.h, #0xfc00\n"
- "mov x20, %x[inptrs]\n"
- "cbz x25, 11f\n"
- "ldp x24, x23, [x20, #0x0]\n"
- "subs x25, x25, #0x1\n"
- "ld1h { z0.h }, p4/Z, [x24, x9, LSL #1]\n"
- "ldp x22, x21, [x20, #0x10]\n"
- "add x20, x20, #0x20\n"
- "ld1h { z31.h }, p4/Z, [x23, x9, LSL #1]\n"
- "ld1h { z23.h }, p4/Z, [x22, x9, LSL #1]\n"
- "ld1h { z30.h }, p4/Z, [x21, x9, LSL #1]\n"
+ "mov x19, %x[inptrs]\n"
+ "cbz x24, 11f\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "subs x24, x24, #0x1\n"
+ "ld1h { z0.h }, p4/Z, [x23, x28, LSL #1]\n"
+ "ldp x21, x20, [x19, #0x10]\n"
+ "add x19, x19, #0x20\n"
+ "ld1h { z31.h }, p4/Z, [x22, x28, LSL #1]\n"
+ "ld1h { z23.h }, p4/Z, [x21, x28, LSL #1]\n"
+ "ld1h { z30.h }, p4/Z, [x20, x28, LSL #1]\n"
"beq 10f\n"
"9:" // Single vector of channels: Loop: 4 inputs loop
"movprfx z19, z0\n fmax z19.h, p0/M, z19.h, z31.h\n"
"fmax z23.h, p0/M, z23.h, z30.h\n"
- "ldp x24, x23, [x20, #0x0]\n"
- "subs x25, x25, #0x1\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "subs x24, x24, #0x1\n"
"fmax z19.h, p0/M, z19.h, z23.h\n"
- "ldp x22, x21, [x20, #0x10]\n"
+ "ldp x21, x20, [x19, #0x10]\n"
"fmax z4.h, p0/M, z4.h, z19.h\n"
- "add x20, x20, #0x20\n"
- "ld1h { z0.h }, p4/Z, [x24, x9, LSL #1]\n"
- "ld1h { z31.h }, p4/Z, [x23, x9, LSL #1]\n"
- "ld1h { z23.h }, p4/Z, [x22, x9, LSL #1]\n"
- "ld1h { z30.h }, p4/Z, [x21, x9, LSL #1]\n"
+ "add x19, x19, #0x20\n"
+ "ld1h { z0.h }, p4/Z, [x23, x28, LSL #1]\n"
+ "ld1h { z31.h }, p4/Z, [x22, x28, LSL #1]\n"
+ "ld1h { z23.h }, p4/Z, [x21, x28, LSL #1]\n"
+ "ld1h { z30.h }, p4/Z, [x20, x28, LSL #1]\n"
"bgt 9b\n"
"10:" // Single vector of channels: Loop: 4 inputs tail
"movprfx z19, z0\n fmax z19.h, p0/M, z19.h, z31.h\n"
@@ -197,24 +197,24 @@ void sme_fp16_nhwc_max_generic_depthfirst_impl(
"fmax z19.h, p0/M, z19.h, z23.h\n"
"fmax z4.h, p0/M, z4.h, z19.h\n"
"11:" // Single vector of channels: Loop: After loop
- "ands x21, %x[n_valid_cells], #0x3\n"
+ "ands x20, %x[n_valid_cells], #0x3\n"
"beq 13f\n"
"12:" // Single vector of channels: Loop: Single input loop
- "ldr x24, [x20], #0x8\n"
- "ld1h { z0.h }, p4/Z, [x24, x9, LSL #1]\n"
- "subs x21, x21, #0x1\n"
+ "ldr x23, [x19], #0x8\n"
+ "ld1h { z0.h }, p4/Z, [x23, x28, LSL #1]\n"
+ "subs x20, x20, #0x1\n"
"fmax z4.h, p0/M, z4.h, z0.h\n"
"bgt 12b\n"
"13:" // Single vector of channels: Loop: Single input loop: End
- "st1h { z4.h }, p4, [%x[outptr], x9, LSL #1]\n"
- "inch x9\n"
- "whilelt p4.h, x9, %x[n_channels]\n"
+ "st1h { z4.h }, p4, [%x[outptr], x28, LSL #1]\n"
+ "inch x28\n"
+ "whilelt p4.h, x28, %x[n_channels]\n"
"b.any 8b\n"
"14:" // End
".inst 0xd503467f // SMSTOP\n"
:
: [inptrs] "r" (inptrs), [n_channels] "r" (n_channels), [n_valid_cells] "r" (n_valid_cells), [outptr] "r" (outptr)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst/generic.cpp
index cf69800522..602ef59159 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -82,97 +82,97 @@ void sme_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst_impl(
pad_left, pad_top, pad_right, pad_bottom);
__asm__ __volatile__(
- "ldr x21, [%x[args], %[offsetof_outptrs]]\n"
+ "ldr x20, [%x[args], %[offsetof_outptrs]]\n"
".inst 0xd503477f // SMSTART ZA\n"
- "mov x3, #0x0\n"
- "mov x20, #0x4\n"
- "ldr x4, [%x[args], %[offsetof_inptrs]]\n"
- "whilelt p0.s, XZR, x20\n"
- "add x20, %x[args], %[offsetof_rescale]\n"
- "ld1rqw { z4.s }, p0/Z, [x20]\n"
- "ldr x5, [%x[args], %[offsetof_n_channels]]\n"
- "whilelt p1.s, x3, x5\n"
- "mov x6, #0x0\n"
- "ldp x7, x8, [x21, #0x0]\n"
- "ldp x17, x16, [x21, #0x10]\n"
- "ldp x15, x14, [x4, #0x0]\n"
- "ld1w { z3.s }, p1/Z, [x14, x3, LSL #2]\n"
- "ldp x13, x12, [x4, #0x10]\n"
- "ld1w { z2.s }, p1/Z, [x13, x3, LSL #2]\n"
- "ldp x11, x10, [x4, #0x20]\n"
- "ld1w { z1.s }, p1/Z, [x10, x3, LSL #2]\n"
- "ldp x9, x28, [x4, #0x30]\n"
- "ld1w { z0.s }, p1/Z, [x9, x3, LSL #2]\n"
- "ldp x27, x26, [x4, #0x40]\n"
- "ld1w { z31.s }, p1/Z, [x26, x3, LSL #2]\n"
- "ldp x25, x24, [x4, #0x50]\n"
- "ld1w { z30.s }, p1/Z, [x25, x3, LSL #2]\n"
- "ldp x23, x22, [x4, #0x60]\n"
- "ld1w { z29.s }, p1/Z, [x11, x3, LSL #2]\n"
- "ldp x21, x20, [x4, #0x70]\n"
- "ld1w { z28.s }, p1/Z, [x27, x3, LSL #2]\n"
- "ld1w { z27.s }, p1/Z, [x28, x3, LSL #2]\n"
- "ld1w { z22.s }, p1/Z, [x24, x3, LSL #2]\n"
- "ld1w { z21.s }, p1/Z, [x22, x3, LSL #2]\n"
- "ld1w { z20.s }, p1/Z, [x21, x3, LSL #2]\n"
- "ld1w { z26.s }, p1/Z, [x15, x3, LSL #2]\n"
- "ld1w { z25.s }, p1/Z, [x12, x3, LSL #2]\n"
- "ld1w { z24.s }, p1/Z, [x23, x3, LSL #2]\n"
- "ld1w { z23.s }, p1/Z, [x20, x3, LSL #2]\n"
- "incw x3\n"
- "whilelt p1.s, x3, x5\n"
+ "mov x4, #0x0\n"
+ "mov x19, #0x4\n"
+ "ldr x5, [%x[args], %[offsetof_inptrs]]\n"
+ "whilelt p0.s, XZR, x19\n"
+ "add x19, %x[args], %[offsetof_rescale]\n"
+ "ld1rqw { z4.s }, p0/Z, [x19]\n"
+ "ldr x6, [%x[args], %[offsetof_n_channels]]\n"
+ "whilelt p1.s, x4, x6\n"
+ "mov x7, #0x0\n"
+ "ldp x8, x17, [x20, #0x0]\n"
+ "ldp x16, x15, [x20, #0x10]\n"
+ "ldp x14, x13, [x5, #0x0]\n"
+ "ld1w { z3.s }, p1/Z, [x13, x4, LSL #2]\n"
+ "ldp x12, x11, [x5, #0x10]\n"
+ "ld1w { z2.s }, p1/Z, [x12, x4, LSL #2]\n"
+ "ldp x10, x9, [x5, #0x20]\n"
+ "ld1w { z1.s }, p1/Z, [x9, x4, LSL #2]\n"
+ "ldp x28, x27, [x5, #0x30]\n"
+ "ld1w { z0.s }, p1/Z, [x28, x4, LSL #2]\n"
+ "ldp x26, x25, [x5, #0x40]\n"
+ "ld1w { z31.s }, p1/Z, [x25, x4, LSL #2]\n"
+ "ldp x24, x23, [x5, #0x50]\n"
+ "ld1w { z30.s }, p1/Z, [x24, x4, LSL #2]\n"
+ "ldp x22, x21, [x5, #0x60]\n"
+ "ld1w { z29.s }, p1/Z, [x10, x4, LSL #2]\n"
+ "ldp x20, x19, [x5, #0x70]\n"
+ "ld1w { z28.s }, p1/Z, [x26, x4, LSL #2]\n"
+ "ld1w { z27.s }, p1/Z, [x27, x4, LSL #2]\n"
+ "ld1w { z22.s }, p1/Z, [x23, x4, LSL #2]\n"
+ "ld1w { z21.s }, p1/Z, [x21, x4, LSL #2]\n"
+ "ld1w { z20.s }, p1/Z, [x20, x4, LSL #2]\n"
+ "ld1w { z26.s }, p1/Z, [x14, x4, LSL #2]\n"
+ "ld1w { z25.s }, p1/Z, [x11, x4, LSL #2]\n"
+ "ld1w { z24.s }, p1/Z, [x22, x4, LSL #2]\n"
+ "ld1w { z23.s }, p1/Z, [x19, x4, LSL #2]\n"
+ "incw x4\n"
+ "whilelt p1.s, x4, x6\n"
"b.none 2f\n"
"1:" // Vector: Loop
"fadd z17.s, z1.s, z0.s\n"
"fadd z16.s, z31.s, z30.s\n"
- "ld1w { z1.s }, p1/Z, [x10, x3, LSL #2]\n"
- "whilelt p0.s, x6, x5\n"
+ "ld1w { z1.s }, p1/Z, [x9, x4, LSL #2]\n"
+ "whilelt p0.s, x7, x6\n"
"fadd z19.s, z17.s, z16.s\n"
"fadd z18.s, z3.s, z2.s\n"
- "ld1w { z0.s }, p1/Z, [x9, x3, LSL #2]\n"
+ "ld1w { z0.s }, p1/Z, [x28, x4, LSL #2]\n"
"fadd z17.s, z29.s, z28.s\n"
"fadd z22.s, z27.s, z22.s\n"
- "ld1w { z31.s }, p1/Z, [x26, x3, LSL #2]\n"
+ "ld1w { z31.s }, p1/Z, [x25, x4, LSL #2]\n"
"fadd z16.s, z21.s, z20.s\n"
"fadd z21.s, z18.s, z19.s\n"
- "ld1w { z30.s }, p1/Z, [x25, x3, LSL #2]\n"
+ "ld1w { z30.s }, p1/Z, [x24, x4, LSL #2]\n"
"fadd z20.s, z16.s, z19.s\n"
"fadd z19.s, z26.s, z17.s\n"
- "ld1w { z3.s }, p1/Z, [x14, x3, LSL #2]\n"
+ "ld1w { z3.s }, p1/Z, [x13, x4, LSL #2]\n"
"fadd z18.s, z25.s, z22.s\n"
"fadd z17.s, z24.s, z17.s\n"
- "ld1w { z2.s }, p1/Z, [x13, x3, LSL #2]\n"
+ "ld1w { z2.s }, p1/Z, [x12, x4, LSL #2]\n"
"fadd z16.s, z23.s, z22.s\n"
- "fadd z19.s, z21.s, z19.s\n"
- "ld1w { z29.s }, p1/Z, [x11, x3, LSL #2]\n"
- "fadd z18.s, z21.s, z18.s\n"
+ "fadd z19.s, z19.s, z21.s\n"
+ "ld1w { z29.s }, p1/Z, [x10, x4, LSL #2]\n"
+ "fadd z18.s, z18.s, z21.s\n"
"fadd z17.s, z17.s, z20.s\n"
- "ld1w { z28.s }, p1/Z, [x27, x3, LSL #2]\n"
+ "ld1w { z28.s }, p1/Z, [x26, x4, LSL #2]\n"
"fadd z16.s, z16.s, z20.s\n"
- "ld1w { z27.s }, p1/Z, [x28, x3, LSL #2]\n"
+ "ld1w { z27.s }, p1/Z, [x27, x4, LSL #2]\n"
"fmul z19.s, z19.s, z4.s[0]\n"
- "ld1w { z22.s }, p1/Z, [x24, x3, LSL #2]\n"
+ "ld1w { z22.s }, p1/Z, [x23, x4, LSL #2]\n"
"fmul z18.s, z18.s, z4.s[1]\n"
"fmul z17.s, z17.s, z4.s[2]\n"
- "ld1w { z21.s }, p1/Z, [x22, x3, LSL #2]\n"
+ "ld1w { z21.s }, p1/Z, [x21, x4, LSL #2]\n"
"fmul z16.s, z16.s, z4.s[3]\n"
- "st1w { z19.s }, p0, [x7, x6, LSL #2]\n"
- "ld1w { z20.s }, p1/Z, [x21, x3, LSL #2]\n"
- "st1w { z18.s }, p0, [x8, x6, LSL #2]\n"
- "ld1w { z26.s }, p1/Z, [x15, x3, LSL #2]\n"
- "st1w { z17.s }, p0, [x17, x6, LSL #2]\n"
- "ld1w { z25.s }, p1/Z, [x12, x3, LSL #2]\n"
- "st1w { z16.s }, p0, [x16, x6, LSL #2]\n"
- "incw x6\n"
- "ld1w { z24.s }, p1/Z, [x23, x3, LSL #2]\n"
- "ld1w { z23.s }, p1/Z, [x20, x3, LSL #2]\n"
- "incw x3\n"
- "whilelt p1.s, x3, x5\n"
+ "st1w { z19.s }, p0, [x8, x7, LSL #2]\n"
+ "ld1w { z20.s }, p1/Z, [x20, x4, LSL #2]\n"
+ "st1w { z18.s }, p0, [x17, x7, LSL #2]\n"
+ "ld1w { z26.s }, p1/Z, [x14, x4, LSL #2]\n"
+ "st1w { z17.s }, p0, [x16, x7, LSL #2]\n"
+ "ld1w { z25.s }, p1/Z, [x11, x4, LSL #2]\n"
+ "st1w { z16.s }, p0, [x15, x7, LSL #2]\n"
+ "incw x7\n"
+ "ld1w { z24.s }, p1/Z, [x22, x4, LSL #2]\n"
+ "ld1w { z23.s }, p1/Z, [x19, x4, LSL #2]\n"
+ "incw x4\n"
+ "whilelt p1.s, x4, x6\n"
"b.any 1b\n"
"2:" // Vector: Tail
"fadd z17.s, z1.s, z0.s\n"
"fadd z16.s, z31.s, z30.s\n"
- "whilelt p0.s, x6, x5\n"
+ "whilelt p0.s, x7, x6\n"
"fadd z19.s, z17.s, z16.s\n"
"fadd z18.s, z3.s, z2.s\n"
"fadd z17.s, z29.s, z28.s\n"
@@ -184,22 +184,22 @@ void sme_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst_impl(
"fadd z18.s, z25.s, z22.s\n"
"fadd z17.s, z24.s, z17.s\n"
"fadd z16.s, z23.s, z22.s\n"
- "fadd z19.s, z21.s, z19.s\n"
- "fadd z18.s, z21.s, z18.s\n"
+ "fadd z19.s, z19.s, z21.s\n"
+ "fadd z18.s, z18.s, z21.s\n"
"fadd z17.s, z17.s, z20.s\n"
"fadd z16.s, z16.s, z20.s\n"
"fmul z19.s, z19.s, z4.s[0]\n"
- "st1w { z19.s }, p0, [x7, x6, LSL #2]\n"
+ "st1w { z19.s }, p0, [x8, x7, LSL #2]\n"
"fmul z18.s, z18.s, z4.s[1]\n"
"fmul z17.s, z17.s, z4.s[2]\n"
- "st1w { z18.s }, p0, [x8, x6, LSL #2]\n"
+ "st1w { z18.s }, p0, [x17, x7, LSL #2]\n"
"fmul z16.s, z16.s, z4.s[3]\n"
- "st1w { z17.s }, p0, [x17, x6, LSL #2]\n"
- "st1w { z16.s }, p0, [x16, x6, LSL #2]\n"
+ "st1w { z17.s }, p0, [x16, x7, LSL #2]\n"
+ "st1w { z16.s }, p0, [x15, x7, LSL #2]\n"
".inst 0xd503467f // SMSTOP\n"
:
: [args] "r" (&args), [offsetof_inptrs] "I" (offsetof(KernelArgs, inptrs)), [offsetof_n_channels] "I" (offsetof(KernelArgs, n_channels)), [offsetof_outptrs] "I" (offsetof(KernelArgs, outptrs)), [offsetof_rescale] "I" (offsetof(KernelArgs, rescale_vals))
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp32_nhwc_avg_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp32_nhwc_avg_generic_depthfirst/generic.cpp
index 03ab9c0a9e..08630dba05 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp32_nhwc_avg_generic_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp32_nhwc_avg_generic_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -42,83 +42,83 @@ void sme_fp32_nhwc_avg_generic_depthfirst_impl(
__asm__ __volatile__(
".inst 0xd503477f // SMSTART ZA\n"
- "mov x9, #0x0\n"
- "cntw x28\n"
- "cntw x27, ALL, MUL #2\n"
- "cntw x26, ALL, MUL #3\n"
+ "mov x28, #0x0\n"
+ "cntw x27\n"
+ "cntw x26, ALL, MUL #2\n"
+ "cntw x25, ALL, MUL #3\n"
"ptrue p0.b\n"
- "whilelt p3.s, x9, %x[n_channels]\n"
+ "whilelt p3.s, x28, %x[n_channels]\n"
"ld1rw { z6.s }, p0/Z, [%x[rescale_ptr]]\n"
- "whilelt p2.s, x28, %x[n_channels]\n"
- "whilelt p1.s, x27, %x[n_channels]\n"
- "whilelt p0.s, x26, %x[n_channels]\n"
+ "whilelt p2.s, x27, %x[n_channels]\n"
+ "whilelt p1.s, x26, %x[n_channels]\n"
+ "whilelt p0.s, x25, %x[n_channels]\n"
"b.none 7f\n"
"1:" // 4-vectors of channels
- "lsr x25, %x[n_valid_cells], #0x2\n"
+ "lsr x24, %x[n_valid_cells], #0x2\n"
"mov z5.b, #0x0\n"
"mov z4.b, #0x0\n"
- "mov x20, %x[inptrs]\n"
+ "mov x19, %x[inptrs]\n"
"mov z3.b, #0x0\n"
"mov z2.b, #0x0\n"
- "cbz x25, 4f\n"
- "ldp x24, x23, [x20, #0x0]\n"
- "subs x25, x25, #0x1\n"
- "ld1w { z1.s }, p3/Z, [x24, x9, LSL #2]\n"
- "ldp x22, x21, [x20, #0x10]\n"
- "add x20, x20, #0x20\n"
- "ld1w { z0.s }, p3/Z, [x23, x9, LSL #2]\n"
- "ld1w { z31.s }, p3/Z, [x22, x9, LSL #2]\n"
- "ld1w { z30.s }, p3/Z, [x21, x9, LSL #2]\n"
- "ld1w { z29.s }, p2/Z, [x24, x28, LSL #2]\n"
- "ld1w { z22.s }, p2/Z, [x23, x28, LSL #2]\n"
- "ld1w { z28.s }, p2/Z, [x22, x28, LSL #2]\n"
- "ld1w { z18.s }, p2/Z, [x21, x28, LSL #2]\n"
- "ld1w { z27.s }, p1/Z, [x24, x27, LSL #2]\n"
- "ld1w { z21.s }, p1/Z, [x23, x27, LSL #2]\n"
- "ld1w { z26.s }, p1/Z, [x22, x27, LSL #2]\n"
- "ld1w { z17.s }, p1/Z, [x21, x27, LSL #2]\n"
- "ld1w { z25.s }, p0/Z, [x24, x26, LSL #2]\n"
- "ld1w { z20.s }, p0/Z, [x23, x26, LSL #2]\n"
- "ld1w { z24.s }, p0/Z, [x22, x26, LSL #2]\n"
- "ld1w { z16.s }, p0/Z, [x21, x26, LSL #2]\n"
+ "cbz x24, 4f\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "subs x24, x24, #0x1\n"
+ "ld1w { z1.s }, p3/Z, [x23, x28, LSL #2]\n"
+ "ldp x21, x20, [x19, #0x10]\n"
+ "add x19, x19, #0x20\n"
+ "ld1w { z0.s }, p3/Z, [x22, x28, LSL #2]\n"
+ "ld1w { z31.s }, p3/Z, [x21, x28, LSL #2]\n"
+ "ld1w { z30.s }, p3/Z, [x20, x28, LSL #2]\n"
+ "ld1w { z29.s }, p2/Z, [x23, x27, LSL #2]\n"
+ "ld1w { z22.s }, p2/Z, [x22, x27, LSL #2]\n"
+ "ld1w { z28.s }, p2/Z, [x21, x27, LSL #2]\n"
+ "ld1w { z18.s }, p2/Z, [x20, x27, LSL #2]\n"
+ "ld1w { z27.s }, p1/Z, [x23, x26, LSL #2]\n"
+ "ld1w { z21.s }, p1/Z, [x22, x26, LSL #2]\n"
+ "ld1w { z26.s }, p1/Z, [x21, x26, LSL #2]\n"
+ "ld1w { z17.s }, p1/Z, [x20, x26, LSL #2]\n"
+ "ld1w { z25.s }, p0/Z, [x23, x25, LSL #2]\n"
+ "ld1w { z20.s }, p0/Z, [x22, x25, LSL #2]\n"
+ "ld1w { z24.s }, p0/Z, [x21, x25, LSL #2]\n"
+ "ld1w { z16.s }, p0/Z, [x20, x25, LSL #2]\n"
"beq 3f\n"
"2:" // 4-vectors of channels: 4 inputs loop
"fadd z23.s, z1.s, z0.s\n"
"fadd z19.s, z31.s, z30.s\n"
- "ldp x24, x23, [x20, #0x0]\n"
- "subs x25, x25, #0x1\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "subs x24, x24, #0x1\n"
"fadd z22.s, z29.s, z22.s\n"
"fadd z18.s, z28.s, z18.s\n"
- "ldp x22, x21, [x20, #0x10]\n"
- "add x20, x20, #0x20\n"
+ "ldp x21, x20, [x19, #0x10]\n"
+ "add x19, x19, #0x20\n"
"fadd z21.s, z27.s, z21.s\n"
"fadd z17.s, z26.s, z17.s\n"
- "ld1w { z1.s }, p3/Z, [x24, x9, LSL #2]\n"
+ "ld1w { z1.s }, p3/Z, [x23, x28, LSL #2]\n"
"fadd z20.s, z25.s, z20.s\n"
"fadd z16.s, z24.s, z16.s\n"
- "ld1w { z0.s }, p3/Z, [x23, x9, LSL #2]\n"
+ "ld1w { z0.s }, p3/Z, [x22, x28, LSL #2]\n"
"fadd z19.s, z23.s, z19.s\n"
"fadd z18.s, z22.s, z18.s\n"
- "ld1w { z31.s }, p3/Z, [x22, x9, LSL #2]\n"
+ "ld1w { z31.s }, p3/Z, [x21, x28, LSL #2]\n"
"fadd z17.s, z21.s, z17.s\n"
"fadd z16.s, z20.s, z16.s\n"
- "ld1w { z30.s }, p3/Z, [x21, x9, LSL #2]\n"
+ "ld1w { z30.s }, p3/Z, [x20, x28, LSL #2]\n"
"fadd z5.s, z5.s, z19.s\n"
"fadd z4.s, z4.s, z18.s\n"
- "ld1w { z29.s }, p2/Z, [x24, x28, LSL #2]\n"
+ "ld1w { z29.s }, p2/Z, [x23, x27, LSL #2]\n"
"fadd z3.s, z3.s, z17.s\n"
"fadd z2.s, z2.s, z16.s\n"
- "ld1w { z22.s }, p2/Z, [x23, x28, LSL #2]\n"
- "ld1w { z28.s }, p2/Z, [x22, x28, LSL #2]\n"
- "ld1w { z18.s }, p2/Z, [x21, x28, LSL #2]\n"
- "ld1w { z27.s }, p1/Z, [x24, x27, LSL #2]\n"
- "ld1w { z21.s }, p1/Z, [x23, x27, LSL #2]\n"
- "ld1w { z26.s }, p1/Z, [x22, x27, LSL #2]\n"
- "ld1w { z17.s }, p1/Z, [x21, x27, LSL #2]\n"
- "ld1w { z25.s }, p0/Z, [x24, x26, LSL #2]\n"
- "ld1w { z20.s }, p0/Z, [x23, x26, LSL #2]\n"
- "ld1w { z24.s }, p0/Z, [x22, x26, LSL #2]\n"
- "ld1w { z16.s }, p0/Z, [x21, x26, LSL #2]\n"
+ "ld1w { z22.s }, p2/Z, [x22, x27, LSL #2]\n"
+ "ld1w { z28.s }, p2/Z, [x21, x27, LSL #2]\n"
+ "ld1w { z18.s }, p2/Z, [x20, x27, LSL #2]\n"
+ "ld1w { z27.s }, p1/Z, [x23, x26, LSL #2]\n"
+ "ld1w { z21.s }, p1/Z, [x22, x26, LSL #2]\n"
+ "ld1w { z26.s }, p1/Z, [x21, x26, LSL #2]\n"
+ "ld1w { z17.s }, p1/Z, [x20, x26, LSL #2]\n"
+ "ld1w { z25.s }, p0/Z, [x23, x25, LSL #2]\n"
+ "ld1w { z20.s }, p0/Z, [x22, x25, LSL #2]\n"
+ "ld1w { z24.s }, p0/Z, [x21, x25, LSL #2]\n"
+ "ld1w { z16.s }, p0/Z, [x20, x25, LSL #2]\n"
"bgt 2b\n"
"3:" // 4-vectors of channels: 4 inputs tail
"fadd z23.s, z1.s, z0.s\n"
@@ -138,65 +138,65 @@ void sme_fp32_nhwc_avg_generic_depthfirst_impl(
"fadd z3.s, z3.s, z17.s\n"
"fadd z2.s, z2.s, z16.s\n"
"4:" // 4-vectors of channels: After loop
- "ands x21, %x[n_valid_cells], #0x3\n"
+ "ands x20, %x[n_valid_cells], #0x3\n"
"beq 6f\n"
"5:" // 4-vectors of channels: Single input loop
- "ldr x24, [x20], #0x8\n"
- "ld1w { z1.s }, p3/Z, [x24, x9, LSL #2]\n"
- "subs x21, x21, #0x1\n"
+ "ldr x23, [x19], #0x8\n"
+ "ld1w { z1.s }, p3/Z, [x23, x28, LSL #2]\n"
+ "subs x20, x20, #0x1\n"
"fadd z5.s, z5.s, z1.s\n"
- "ld1w { z29.s }, p2/Z, [x24, x28, LSL #2]\n"
+ "ld1w { z29.s }, p2/Z, [x23, x27, LSL #2]\n"
"fadd z4.s, z4.s, z29.s\n"
- "ld1w { z27.s }, p1/Z, [x24, x27, LSL #2]\n"
+ "ld1w { z27.s }, p1/Z, [x23, x26, LSL #2]\n"
"fadd z3.s, z3.s, z27.s\n"
- "ld1w { z25.s }, p0/Z, [x24, x26, LSL #2]\n"
+ "ld1w { z25.s }, p0/Z, [x23, x25, LSL #2]\n"
"fadd z2.s, z2.s, z25.s\n"
"bgt 5b\n"
"6:" // 4-vectors of channels: Single input loop: End
"fmul z5.s, z5.s, z6.s\n"
"fmul z4.s, z4.s, z6.s\n"
- "st1w { z5.s }, p3, [%x[outptr], x9, LSL #2]\n"
- "incw x9, ALL, MUL #4\n"
+ "st1w { z5.s }, p3, [%x[outptr], x28, LSL #2]\n"
+ "incw x28, ALL, MUL #4\n"
"fmul z3.s, z3.s, z6.s\n"
"fmul z2.s, z2.s, z6.s\n"
- "st1w { z4.s }, p2, [%x[outptr], x28, LSL #2]\n"
- "incw x28, ALL, MUL #4\n"
- "st1w { z3.s }, p1, [%x[outptr], x27, LSL #2]\n"
+ "st1w { z4.s }, p2, [%x[outptr], x27, LSL #2]\n"
"incw x27, ALL, MUL #4\n"
- "st1w { z2.s }, p0, [%x[outptr], x26, LSL #2]\n"
+ "st1w { z3.s }, p1, [%x[outptr], x26, LSL #2]\n"
"incw x26, ALL, MUL #4\n"
- "whilelt p0.s, x26, %x[n_channels]\n"
+ "st1w { z2.s }, p0, [%x[outptr], x25, LSL #2]\n"
+ "incw x25, ALL, MUL #4\n"
+ "whilelt p0.s, x25, %x[n_channels]\n"
"b.any 1b\n"
"7:" // Single vector of channels
- "whilelt p3.s, x9, %x[n_channels]\n"
+ "whilelt p3.s, x28, %x[n_channels]\n"
"b.none 14f\n"
"8:" // Single vector of channels: Loop
- "lsr x25, %x[n_valid_cells], #0x2\n"
+ "lsr x24, %x[n_valid_cells], #0x2\n"
"mov z5.b, #0x0\n"
- "mov x20, %x[inptrs]\n"
- "cbz x25, 11f\n"
- "ldp x24, x23, [x20, #0x0]\n"
- "subs x25, x25, #0x1\n"
- "ld1w { z1.s }, p3/Z, [x24, x9, LSL #2]\n"
- "ldp x22, x21, [x20, #0x10]\n"
- "add x20, x20, #0x20\n"
- "ld1w { z0.s }, p3/Z, [x23, x9, LSL #2]\n"
- "ld1w { z31.s }, p3/Z, [x22, x9, LSL #2]\n"
- "ld1w { z30.s }, p3/Z, [x21, x9, LSL #2]\n"
+ "mov x19, %x[inptrs]\n"
+ "cbz x24, 11f\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "subs x24, x24, #0x1\n"
+ "ld1w { z1.s }, p3/Z, [x23, x28, LSL #2]\n"
+ "ldp x21, x20, [x19, #0x10]\n"
+ "add x19, x19, #0x20\n"
+ "ld1w { z0.s }, p3/Z, [x22, x28, LSL #2]\n"
+ "ld1w { z31.s }, p3/Z, [x21, x28, LSL #2]\n"
+ "ld1w { z30.s }, p3/Z, [x20, x28, LSL #2]\n"
"beq 10f\n"
"9:" // Single vector of channels: Loop: 4 inputs loop
"fadd z23.s, z1.s, z0.s\n"
"fadd z19.s, z31.s, z30.s\n"
- "ldp x24, x23, [x20, #0x0]\n"
- "subs x25, x25, #0x1\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "subs x24, x24, #0x1\n"
"fadd z19.s, z23.s, z19.s\n"
- "ldp x22, x21, [x20, #0x10]\n"
+ "ldp x21, x20, [x19, #0x10]\n"
"fadd z5.s, z5.s, z19.s\n"
- "add x20, x20, #0x20\n"
- "ld1w { z1.s }, p3/Z, [x24, x9, LSL #2]\n"
- "ld1w { z0.s }, p3/Z, [x23, x9, LSL #2]\n"
- "ld1w { z31.s }, p3/Z, [x22, x9, LSL #2]\n"
- "ld1w { z30.s }, p3/Z, [x21, x9, LSL #2]\n"
+ "add x19, x19, #0x20\n"
+ "ld1w { z1.s }, p3/Z, [x23, x28, LSL #2]\n"
+ "ld1w { z0.s }, p3/Z, [x22, x28, LSL #2]\n"
+ "ld1w { z31.s }, p3/Z, [x21, x28, LSL #2]\n"
+ "ld1w { z30.s }, p3/Z, [x20, x28, LSL #2]\n"
"bgt 9b\n"
"10:" // Single vector of channels: Loop: 4 inputs tail
"fadd z23.s, z1.s, z0.s\n"
@@ -204,25 +204,25 @@ void sme_fp32_nhwc_avg_generic_depthfirst_impl(
"fadd z19.s, z23.s, z19.s\n"
"fadd z5.s, z5.s, z19.s\n"
"11:" // Single vector of channels: Loop: After loop
- "ands x21, %x[n_valid_cells], #0x3\n"
+ "ands x20, %x[n_valid_cells], #0x3\n"
"beq 13f\n"
"12:" // Single vector of channels: Loop: Single input loop
- "ldr x24, [x20], #0x8\n"
- "ld1w { z1.s }, p3/Z, [x24, x9, LSL #2]\n"
- "subs x21, x21, #0x1\n"
+ "ldr x23, [x19], #0x8\n"
+ "ld1w { z1.s }, p3/Z, [x23, x28, LSL #2]\n"
+ "subs x20, x20, #0x1\n"
"fadd z5.s, z5.s, z1.s\n"
"bgt 12b\n"
"13:" // Single vector of channels: Loop: Single input loop: End
"fmul z5.s, z5.s, z6.s\n"
- "st1w { z5.s }, p3, [%x[outptr], x9, LSL #2]\n"
- "incw x9\n"
- "whilelt p3.s, x9, %x[n_channels]\n"
+ "st1w { z5.s }, p3, [%x[outptr], x28, LSL #2]\n"
+ "incw x28\n"
+ "whilelt p3.s, x28, %x[n_channels]\n"
"b.any 8b\n"
"14:" // End
".inst 0xd503467f // SMSTOP\n"
:
: [inptrs] "r" (inptrs), [n_channels] "r" (n_channels), [n_valid_cells] "r" (n_valid_cells), [outptr] "r" (outptr), [rescale_ptr] "r" (&rescale_value)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp32_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp32_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
index 05edac6623..be254d307b 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp32_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp32_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -63,82 +63,82 @@ void sme_fp32_nhwc_max_2x2_s1_output2x2_depthfirst_impl(
pad_left, pad_top, pad_right, pad_bottom);
__asm__ __volatile__(
- "ldr x21, [%x[args], %[offsetof_outptrs]]\n"
+ "ldr x20, [%x[args], %[offsetof_outptrs]]\n"
".inst 0xd503477f // SMSTART ZA\n"
- "mov x15, #0x0\n"
- "ptrue p2.b\n"
- "ldr x20, [%x[args], %[offsetof_inptrs]]\n"
"mov x14, #0x0\n"
- "ldr x13, [%x[args], %[offsetof_n_channels]]\n"
- "whilelt p1.s, x15, x13\n"
- "ldp x12, x11, [x21, #0x0]\n"
- "ldp x10, x9, [x21, #0x10]\n"
- "ldp x28, x27, [x20, #0x0]\n"
- "ld1w { z30.s }, p1/Z, [x27, x15, LSL #2]\n"
- "ldp x26, x25, [x20, #0x10]\n"
- "ld1w { z29.s }, p1/Z, [x25, x15, LSL #2]\n"
- "ldp x24, x23, [x20, #0x20]\n"
- "ld1w { z28.s }, p1/Z, [x24, x15, LSL #2]\n"
- "ldp x22, x21, [x20, #0x30]\n"
- "ld1w { z27.s }, p1/Z, [x21, x15, LSL #2]\n"
- "ldr x20, [x20, #0x40]\n"
- "ld1w { z26.s }, p1/Z, [x28, x15, LSL #2]\n"
- "ld1w { z25.s }, p1/Z, [x26, x15, LSL #2]\n"
- "ld1w { z24.s }, p1/Z, [x23, x15, LSL #2]\n"
- "ld1w { z23.s }, p1/Z, [x22, x15, LSL #2]\n"
- "ld1w { z19.s }, p1/Z, [x20, x15, LSL #2]\n"
- "incw x15\n"
- "whilelt p1.s, x15, x13\n"
+ "ptrue p2.b\n"
+ "ldr x19, [%x[args], %[offsetof_inptrs]]\n"
+ "mov x13, #0x0\n"
+ "ldr x12, [%x[args], %[offsetof_n_channels]]\n"
+ "whilelt p1.s, x14, x12\n"
+ "ldp x11, x10, [x20, #0x0]\n"
+ "ldp x9, x28, [x20, #0x10]\n"
+ "ldp x27, x26, [x19, #0x0]\n"
+ "ld1w { z29.s }, p1/Z, [x26, x14, LSL #2]\n"
+ "ldp x25, x24, [x19, #0x10]\n"
+ "ld1w { z28.s }, p1/Z, [x24, x14, LSL #2]\n"
+ "ldp x23, x22, [x19, #0x20]\n"
+ "ld1w { z27.s }, p1/Z, [x23, x14, LSL #2]\n"
+ "ldp x21, x20, [x19, #0x30]\n"
+ "ld1w { z26.s }, p1/Z, [x20, x14, LSL #2]\n"
+ "ldr x19, [x19, #0x40]\n"
+ "ld1w { z20.s }, p1/Z, [x27, x14, LSL #2]\n"
+ "ld1w { z25.s }, p1/Z, [x22, x14, LSL #2]\n"
+ "ld1w { z24.s }, p1/Z, [x25, x14, LSL #2]\n"
+ "ld1w { z23.s }, p1/Z, [x21, x14, LSL #2]\n"
+ "ld1w { z19.s }, p1/Z, [x19, x14, LSL #2]\n"
+ "incw x14\n"
+ "whilelt p1.s, x14, x12\n"
"b.none 2f\n"
"1:" // Vector: Loop
- "movprfx z22, z30\n fmax z22.s, p2/M, z22.s, z28.s\n"
- "movprfx z21, z28\n fmax z21.s, p2/M, z21.s, z27.s\n"
- "ld1w { z30.s }, p1/Z, [x27, x15, LSL #2]\n"
- "whilelt p0.s, x14, x13\n"
- "movprfx z20, z29\n fmax z20.s, p2/M, z20.s, z26.s\n"
- "movprfx z18, z25\n fmax z18.s, p2/M, z18.s, z24.s\n"
- "ld1w { z28.s }, p1/Z, [x24, x15, LSL #2]\n"
- "movprfx z17, z29\n fmax z17.s, p2/M, z17.s, z23.s\n"
- "movprfx z16, z24\n fmax z16.s, p2/M, z16.s, z19.s\n"
- "ld1w { z27.s }, p1/Z, [x21, x15, LSL #2]\n"
- "ld1w { z29.s }, p1/Z, [x25, x15, LSL #2]\n"
- "movprfx z19, z22\n fmax z19.s, p2/M, z19.s, z20.s\n"
- "fmax z18.s, p2/M, z18.s, z22.s\n"
- "ld1w { z26.s }, p1/Z, [x28, x15, LSL #2]\n"
+ "movprfx z22, z29\n fmax z22.s, p2/M, z22.s, z27.s\n"
+ "movprfx z21, z27\n fmax z21.s, p2/M, z21.s, z26.s\n"
+ "ld1w { z29.s }, p1/Z, [x26, x14, LSL #2]\n"
+ "whilelt p0.s, x13, x12\n"
+ "movprfx z18, z28\n fmax z18.s, p2/M, z18.s, z20.s\n"
+ "movprfx z20, z25\n fmax z20.s, p2/M, z20.s, z24.s\n"
+ "ld1w { z27.s }, p1/Z, [x23, x14, LSL #2]\n"
+ "movprfx z17, z23\n fmax z17.s, p2/M, z17.s, z28.s\n"
+ "movprfx z16, z25\n fmax z16.s, p2/M, z16.s, z19.s\n"
+ "ld1w { z26.s }, p1/Z, [x20, x14, LSL #2]\n"
+ "ld1w { z28.s }, p1/Z, [x24, x14, LSL #2]\n"
+ "movprfx z19, z18\n fmax z19.s, p2/M, z19.s, z22.s\n"
+ "movprfx z18, z22\n fmax z18.s, p2/M, z18.s, z20.s\n"
+ "ld1w { z20.s }, p1/Z, [x27, x14, LSL #2]\n"
"fmax z17.s, p2/M, z17.s, z21.s\n"
"fmax z16.s, p2/M, z16.s, z21.s\n"
- "ld1w { z25.s }, p1/Z, [x26, x15, LSL #2]\n"
- "st1w { z19.s }, p0, [x12, x14, LSL #2]\n"
- "ld1w { z24.s }, p1/Z, [x23, x15, LSL #2]\n"
- "st1w { z18.s }, p0, [x11, x14, LSL #2]\n"
- "ld1w { z23.s }, p1/Z, [x22, x15, LSL #2]\n"
- "st1w { z17.s }, p0, [x10, x14, LSL #2]\n"
- "ld1w { z19.s }, p1/Z, [x20, x15, LSL #2]\n"
- "incw x15\n"
- "whilelt p1.s, x15, x13\n"
- "st1w { z16.s }, p0, [x9, x14, LSL #2]\n"
+ "ld1w { z25.s }, p1/Z, [x22, x14, LSL #2]\n"
+ "st1w { z19.s }, p0, [x11, x13, LSL #2]\n"
+ "ld1w { z24.s }, p1/Z, [x25, x14, LSL #2]\n"
+ "st1w { z18.s }, p0, [x10, x13, LSL #2]\n"
+ "ld1w { z23.s }, p1/Z, [x21, x14, LSL #2]\n"
+ "st1w { z17.s }, p0, [x9, x13, LSL #2]\n"
+ "ld1w { z19.s }, p1/Z, [x19, x14, LSL #2]\n"
"incw x14\n"
+ "whilelt p1.s, x14, x12\n"
+ "st1w { z16.s }, p0, [x28, x13, LSL #2]\n"
+ "incw x13\n"
"b.any 1b\n"
"2:" // Vector: Tail
- "movprfx z22, z30\n fmax z22.s, p2/M, z22.s, z28.s\n"
- "movprfx z21, z28\n fmax z21.s, p2/M, z21.s, z27.s\n"
- "whilelt p0.s, x14, x13\n"
- "movprfx z20, z29\n fmax z20.s, p2/M, z20.s, z26.s\n"
- "movprfx z18, z25\n fmax z18.s, p2/M, z18.s, z24.s\n"
- "movprfx z17, z29\n fmax z17.s, p2/M, z17.s, z23.s\n"
- "movprfx z16, z24\n fmax z16.s, p2/M, z16.s, z19.s\n"
- "movprfx z19, z22\n fmax z19.s, p2/M, z19.s, z20.s\n"
- "fmax z18.s, p2/M, z18.s, z22.s\n"
- "st1w { z19.s }, p0, [x12, x14, LSL #2]\n"
+ "movprfx z22, z29\n fmax z22.s, p2/M, z22.s, z27.s\n"
+ "movprfx z21, z27\n fmax z21.s, p2/M, z21.s, z26.s\n"
+ "whilelt p0.s, x13, x12\n"
+ "movprfx z18, z28\n fmax z18.s, p2/M, z18.s, z20.s\n"
+ "movprfx z20, z25\n fmax z20.s, p2/M, z20.s, z24.s\n"
+ "movprfx z17, z23\n fmax z17.s, p2/M, z17.s, z28.s\n"
+ "movprfx z16, z25\n fmax z16.s, p2/M, z16.s, z19.s\n"
+ "movprfx z19, z18\n fmax z19.s, p2/M, z19.s, z22.s\n"
+ "movprfx z18, z22\n fmax z18.s, p2/M, z18.s, z20.s\n"
+ "st1w { z19.s }, p0, [x11, x13, LSL #2]\n"
"fmax z17.s, p2/M, z17.s, z21.s\n"
"fmax z16.s, p2/M, z16.s, z21.s\n"
- "st1w { z18.s }, p0, [x11, x14, LSL #2]\n"
- "st1w { z17.s }, p0, [x10, x14, LSL #2]\n"
- "st1w { z16.s }, p0, [x9, x14, LSL #2]\n"
+ "st1w { z18.s }, p0, [x10, x13, LSL #2]\n"
+ "st1w { z17.s }, p0, [x9, x13, LSL #2]\n"
+ "st1w { z16.s }, p0, [x28, x13, LSL #2]\n"
".inst 0xd503467f // SMSTOP\n"
:
: [args] "r" (&args), [offsetof_inptrs] "I" (offsetof(KernelArgs, inptrs)), [offsetof_n_channels] "I" (offsetof(KernelArgs, n_channels)), [offsetof_outptrs] "I" (offsetof(KernelArgs, outptrs))
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp32_nhwc_max_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp32_nhwc_max_generic_depthfirst/generic.cpp
index 14c07724a1..b9f90ea2ef 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp32_nhwc_max_generic_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp32_nhwc_max_generic_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -40,82 +40,82 @@ void sme_fp32_nhwc_max_generic_depthfirst_impl(
{
__asm__ __volatile__(
".inst 0xd503477f // SMSTART ZA\n"
- "mov x9, #0x0\n"
- "cntw x28\n"
- "cntw x27, ALL, MUL #2\n"
- "cntw x26, ALL, MUL #3\n"
- "whilelt p4.s, x9, %x[n_channels]\n"
- "whilelt p3.s, x28, %x[n_channels]\n"
- "whilelt p2.s, x27, %x[n_channels]\n"
- "whilelt p1.s, x26, %x[n_channels]\n"
+ "mov x28, #0x0\n"
+ "cntw x27\n"
+ "cntw x26, ALL, MUL #2\n"
+ "cntw x25, ALL, MUL #3\n"
+ "whilelt p4.s, x28, %x[n_channels]\n"
+ "whilelt p3.s, x27, %x[n_channels]\n"
+ "whilelt p2.s, x26, %x[n_channels]\n"
+ "whilelt p1.s, x25, %x[n_channels]\n"
"ptrue p0.b\n"
"b.none 7f\n"
"1:" // 4-vectors of channels
- "lsr x25, %x[n_valid_cells], #0x2\n"
+ "lsr x24, %x[n_valid_cells], #0x2\n"
"mov z4.s, #0xff800000\n"
"mov z3.s, #0xff800000\n"
- "mov x20, %x[inptrs]\n"
+ "mov x19, %x[inptrs]\n"
"mov z2.s, #0xff800000\n"
"mov z1.s, #0xff800000\n"
- "cbz x25, 4f\n"
- "ldp x24, x23, [x20, #0x0]\n"
- "subs x25, x25, #0x1\n"
- "ld1w { z0.s }, p4/Z, [x24, x9, LSL #2]\n"
- "ldp x22, x21, [x20, #0x10]\n"
- "add x20, x20, #0x20\n"
- "ld1w { z31.s }, p4/Z, [x23, x9, LSL #2]\n"
- "ld1w { z23.s }, p4/Z, [x22, x9, LSL #2]\n"
- "ld1w { z30.s }, p4/Z, [x21, x9, LSL #2]\n"
- "ld1w { z18.s }, p3/Z, [x24, x28, LSL #2]\n"
- "ld1w { z29.s }, p3/Z, [x23, x28, LSL #2]\n"
- "ld1w { z22.s }, p3/Z, [x22, x28, LSL #2]\n"
- "ld1w { z28.s }, p3/Z, [x21, x28, LSL #2]\n"
- "ld1w { z17.s }, p2/Z, [x24, x27, LSL #2]\n"
- "ld1w { z27.s }, p2/Z, [x23, x27, LSL #2]\n"
- "ld1w { z21.s }, p2/Z, [x22, x27, LSL #2]\n"
- "ld1w { z26.s }, p2/Z, [x21, x27, LSL #2]\n"
- "ld1w { z16.s }, p1/Z, [x24, x26, LSL #2]\n"
- "ld1w { z25.s }, p1/Z, [x23, x26, LSL #2]\n"
- "ld1w { z20.s }, p1/Z, [x22, x26, LSL #2]\n"
- "ld1w { z24.s }, p1/Z, [x21, x26, LSL #2]\n"
+ "cbz x24, 4f\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "subs x24, x24, #0x1\n"
+ "ld1w { z0.s }, p4/Z, [x23, x28, LSL #2]\n"
+ "ldp x21, x20, [x19, #0x10]\n"
+ "add x19, x19, #0x20\n"
+ "ld1w { z31.s }, p4/Z, [x22, x28, LSL #2]\n"
+ "ld1w { z23.s }, p4/Z, [x21, x28, LSL #2]\n"
+ "ld1w { z30.s }, p4/Z, [x20, x28, LSL #2]\n"
+ "ld1w { z18.s }, p3/Z, [x23, x27, LSL #2]\n"
+ "ld1w { z29.s }, p3/Z, [x22, x27, LSL #2]\n"
+ "ld1w { z22.s }, p3/Z, [x21, x27, LSL #2]\n"
+ "ld1w { z28.s }, p3/Z, [x20, x27, LSL #2]\n"
+ "ld1w { z17.s }, p2/Z, [x23, x26, LSL #2]\n"
+ "ld1w { z27.s }, p2/Z, [x22, x26, LSL #2]\n"
+ "ld1w { z21.s }, p2/Z, [x21, x26, LSL #2]\n"
+ "ld1w { z26.s }, p2/Z, [x20, x26, LSL #2]\n"
+ "ld1w { z16.s }, p1/Z, [x23, x25, LSL #2]\n"
+ "ld1w { z25.s }, p1/Z, [x22, x25, LSL #2]\n"
+ "ld1w { z20.s }, p1/Z, [x21, x25, LSL #2]\n"
+ "ld1w { z24.s }, p1/Z, [x20, x25, LSL #2]\n"
"beq 3f\n"
"2:" // 4-vectors of channels: 4 inputs loop
"movprfx z19, z0\n fmax z19.s, p0/M, z19.s, z31.s\n"
"fmax z23.s, p0/M, z23.s, z30.s\n"
- "ldp x24, x23, [x20, #0x0]\n"
- "subs x25, x25, #0x1\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "subs x24, x24, #0x1\n"
"fmax z18.s, p0/M, z18.s, z29.s\n"
"fmax z22.s, p0/M, z22.s, z28.s\n"
- "ldp x22, x21, [x20, #0x10]\n"
- "add x20, x20, #0x20\n"
+ "ldp x21, x20, [x19, #0x10]\n"
+ "add x19, x19, #0x20\n"
"fmax z17.s, p0/M, z17.s, z27.s\n"
"fmax z21.s, p0/M, z21.s, z26.s\n"
- "ld1w { z0.s }, p4/Z, [x24, x9, LSL #2]\n"
+ "ld1w { z0.s }, p4/Z, [x23, x28, LSL #2]\n"
"fmax z16.s, p0/M, z16.s, z25.s\n"
"fmax z20.s, p0/M, z20.s, z24.s\n"
- "ld1w { z31.s }, p4/Z, [x23, x9, LSL #2]\n"
+ "ld1w { z31.s }, p4/Z, [x22, x28, LSL #2]\n"
"fmax z19.s, p0/M, z19.s, z23.s\n"
"fmax z18.s, p0/M, z18.s, z22.s\n"
- "ld1w { z23.s }, p4/Z, [x22, x9, LSL #2]\n"
+ "ld1w { z23.s }, p4/Z, [x21, x28, LSL #2]\n"
"fmax z17.s, p0/M, z17.s, z21.s\n"
"fmax z16.s, p0/M, z16.s, z20.s\n"
- "ld1w { z30.s }, p4/Z, [x21, x9, LSL #2]\n"
+ "ld1w { z30.s }, p4/Z, [x20, x28, LSL #2]\n"
"fmax z4.s, p0/M, z4.s, z19.s\n"
"fmax z3.s, p0/M, z3.s, z18.s\n"
- "ld1w { z18.s }, p3/Z, [x24, x28, LSL #2]\n"
+ "ld1w { z18.s }, p3/Z, [x23, x27, LSL #2]\n"
"fmax z2.s, p0/M, z2.s, z17.s\n"
"fmax z1.s, p0/M, z1.s, z16.s\n"
- "ld1w { z29.s }, p3/Z, [x23, x28, LSL #2]\n"
- "ld1w { z22.s }, p3/Z, [x22, x28, LSL #2]\n"
- "ld1w { z28.s }, p3/Z, [x21, x28, LSL #2]\n"
- "ld1w { z17.s }, p2/Z, [x24, x27, LSL #2]\n"
- "ld1w { z27.s }, p2/Z, [x23, x27, LSL #2]\n"
- "ld1w { z21.s }, p2/Z, [x22, x27, LSL #2]\n"
- "ld1w { z26.s }, p2/Z, [x21, x27, LSL #2]\n"
- "ld1w { z16.s }, p1/Z, [x24, x26, LSL #2]\n"
- "ld1w { z25.s }, p1/Z, [x23, x26, LSL #2]\n"
- "ld1w { z20.s }, p1/Z, [x22, x26, LSL #2]\n"
- "ld1w { z24.s }, p1/Z, [x21, x26, LSL #2]\n"
+ "ld1w { z29.s }, p3/Z, [x22, x27, LSL #2]\n"
+ "ld1w { z22.s }, p3/Z, [x21, x27, LSL #2]\n"
+ "ld1w { z28.s }, p3/Z, [x20, x27, LSL #2]\n"
+ "ld1w { z17.s }, p2/Z, [x23, x26, LSL #2]\n"
+ "ld1w { z27.s }, p2/Z, [x22, x26, LSL #2]\n"
+ "ld1w { z21.s }, p2/Z, [x21, x26, LSL #2]\n"
+ "ld1w { z26.s }, p2/Z, [x20, x26, LSL #2]\n"
+ "ld1w { z16.s }, p1/Z, [x23, x25, LSL #2]\n"
+ "ld1w { z25.s }, p1/Z, [x22, x25, LSL #2]\n"
+ "ld1w { z20.s }, p1/Z, [x21, x25, LSL #2]\n"
+ "ld1w { z24.s }, p1/Z, [x20, x25, LSL #2]\n"
"bgt 2b\n"
"3:" // 4-vectors of channels: 4 inputs tail
"movprfx z19, z0\n fmax z19.s, p0/M, z19.s, z31.s\n"
@@ -135,61 +135,61 @@ void sme_fp32_nhwc_max_generic_depthfirst_impl(
"fmax z2.s, p0/M, z2.s, z17.s\n"
"fmax z1.s, p0/M, z1.s, z16.s\n"
"4:" // 4-vectors of channels: After loop
- "ands x21, %x[n_valid_cells], #0x3\n"
+ "ands x20, %x[n_valid_cells], #0x3\n"
"beq 6f\n"
"5:" // 4-vectors of channels: Single input loop
- "ldr x24, [x20], #0x8\n"
- "ld1w { z0.s }, p4/Z, [x24, x9, LSL #2]\n"
- "subs x21, x21, #0x1\n"
+ "ldr x23, [x19], #0x8\n"
+ "ld1w { z0.s }, p4/Z, [x23, x28, LSL #2]\n"
+ "subs x20, x20, #0x1\n"
"fmax z4.s, p0/M, z4.s, z0.s\n"
- "ld1w { z18.s }, p3/Z, [x24, x28, LSL #2]\n"
+ "ld1w { z18.s }, p3/Z, [x23, x27, LSL #2]\n"
"fmax z3.s, p0/M, z3.s, z18.s\n"
- "ld1w { z17.s }, p2/Z, [x24, x27, LSL #2]\n"
+ "ld1w { z17.s }, p2/Z, [x23, x26, LSL #2]\n"
"fmax z2.s, p0/M, z2.s, z17.s\n"
- "ld1w { z16.s }, p1/Z, [x24, x26, LSL #2]\n"
+ "ld1w { z16.s }, p1/Z, [x23, x25, LSL #2]\n"
"fmax z1.s, p0/M, z1.s, z16.s\n"
"bgt 5b\n"
"6:" // 4-vectors of channels: Single input loop: End
- "st1w { z4.s }, p4, [%x[outptr], x9, LSL #2]\n"
- "incw x9, ALL, MUL #4\n"
- "st1w { z3.s }, p3, [%x[outptr], x28, LSL #2]\n"
+ "st1w { z4.s }, p4, [%x[outptr], x28, LSL #2]\n"
"incw x28, ALL, MUL #4\n"
- "st1w { z2.s }, p2, [%x[outptr], x27, LSL #2]\n"
+ "st1w { z3.s }, p3, [%x[outptr], x27, LSL #2]\n"
"incw x27, ALL, MUL #4\n"
- "st1w { z1.s }, p1, [%x[outptr], x26, LSL #2]\n"
+ "st1w { z2.s }, p2, [%x[outptr], x26, LSL #2]\n"
"incw x26, ALL, MUL #4\n"
- "whilelt p1.s, x26, %x[n_channels]\n"
+ "st1w { z1.s }, p1, [%x[outptr], x25, LSL #2]\n"
+ "incw x25, ALL, MUL #4\n"
+ "whilelt p1.s, x25, %x[n_channels]\n"
"b.any 1b\n"
"7:" // Single vector of channels
- "whilelt p4.s, x9, %x[n_channels]\n"
+ "whilelt p4.s, x28, %x[n_channels]\n"
"b.none 14f\n"
"8:" // Single vector of channels: Loop
- "lsr x25, %x[n_valid_cells], #0x2\n"
+ "lsr x24, %x[n_valid_cells], #0x2\n"
"mov z4.s, #0xff800000\n"
- "mov x20, %x[inptrs]\n"
- "cbz x25, 11f\n"
- "ldp x24, x23, [x20, #0x0]\n"
- "subs x25, x25, #0x1\n"
- "ld1w { z0.s }, p4/Z, [x24, x9, LSL #2]\n"
- "ldp x22, x21, [x20, #0x10]\n"
- "add x20, x20, #0x20\n"
- "ld1w { z31.s }, p4/Z, [x23, x9, LSL #2]\n"
- "ld1w { z23.s }, p4/Z, [x22, x9, LSL #2]\n"
- "ld1w { z30.s }, p4/Z, [x21, x9, LSL #2]\n"
+ "mov x19, %x[inptrs]\n"
+ "cbz x24, 11f\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "subs x24, x24, #0x1\n"
+ "ld1w { z0.s }, p4/Z, [x23, x28, LSL #2]\n"
+ "ldp x21, x20, [x19, #0x10]\n"
+ "add x19, x19, #0x20\n"
+ "ld1w { z31.s }, p4/Z, [x22, x28, LSL #2]\n"
+ "ld1w { z23.s }, p4/Z, [x21, x28, LSL #2]\n"
+ "ld1w { z30.s }, p4/Z, [x20, x28, LSL #2]\n"
"beq 10f\n"
"9:" // Single vector of channels: Loop: 4 inputs loop
"movprfx z19, z0\n fmax z19.s, p0/M, z19.s, z31.s\n"
"fmax z23.s, p0/M, z23.s, z30.s\n"
- "ldp x24, x23, [x20, #0x0]\n"
- "subs x25, x25, #0x1\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "subs x24, x24, #0x1\n"
"fmax z19.s, p0/M, z19.s, z23.s\n"
- "ldp x22, x21, [x20, #0x10]\n"
+ "ldp x21, x20, [x19, #0x10]\n"
"fmax z4.s, p0/M, z4.s, z19.s\n"
- "add x20, x20, #0x20\n"
- "ld1w { z0.s }, p4/Z, [x24, x9, LSL #2]\n"
- "ld1w { z31.s }, p4/Z, [x23, x9, LSL #2]\n"
- "ld1w { z23.s }, p4/Z, [x22, x9, LSL #2]\n"
- "ld1w { z30.s }, p4/Z, [x21, x9, LSL #2]\n"
+ "add x19, x19, #0x20\n"
+ "ld1w { z0.s }, p4/Z, [x23, x28, LSL #2]\n"
+ "ld1w { z31.s }, p4/Z, [x22, x28, LSL #2]\n"
+ "ld1w { z23.s }, p4/Z, [x21, x28, LSL #2]\n"
+ "ld1w { z30.s }, p4/Z, [x20, x28, LSL #2]\n"
"bgt 9b\n"
"10:" // Single vector of channels: Loop: 4 inputs tail
"movprfx z19, z0\n fmax z19.s, p0/M, z19.s, z31.s\n"
@@ -197,24 +197,24 @@ void sme_fp32_nhwc_max_generic_depthfirst_impl(
"fmax z19.s, p0/M, z19.s, z23.s\n"
"fmax z4.s, p0/M, z4.s, z19.s\n"
"11:" // Single vector of channels: Loop: After loop
- "ands x21, %x[n_valid_cells], #0x3\n"
+ "ands x20, %x[n_valid_cells], #0x3\n"
"beq 13f\n"
"12:" // Single vector of channels: Loop: Single input loop
- "ldr x24, [x20], #0x8\n"
- "ld1w { z0.s }, p4/Z, [x24, x9, LSL #2]\n"
- "subs x21, x21, #0x1\n"
+ "ldr x23, [x19], #0x8\n"
+ "ld1w { z0.s }, p4/Z, [x23, x28, LSL #2]\n"
+ "subs x20, x20, #0x1\n"
"fmax z4.s, p0/M, z4.s, z0.s\n"
"bgt 12b\n"
"13:" // Single vector of channels: Loop: Single input loop: End
- "st1w { z4.s }, p4, [%x[outptr], x9, LSL #2]\n"
- "incw x9\n"
- "whilelt p4.s, x9, %x[n_channels]\n"
+ "st1w { z4.s }, p4, [%x[outptr], x28, LSL #2]\n"
+ "incw x28\n"
+ "whilelt p4.s, x28, %x[n_channels]\n"
"b.any 8b\n"
"14:" // End
".inst 0xd503467f // SMSTOP\n"
:
: [inptrs] "r" (inptrs), [n_channels] "r" (n_channels), [n_valid_cells] "r" (n_valid_cells), [outptr] "r" (outptr)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_s8_nhwc_avg_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_s8_nhwc_avg_generic_depthfirst/generic.cpp
index ded1274c13..c5066d1017 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_s8_nhwc_avg_generic_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_s8_nhwc_avg_generic_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -84,32 +84,31 @@ void sme_s8_nhwc_avg_generic_depthfirst_impl(
f_rescale_value *= 2.0f;
}
- int64_t long_rescale_value = round(f_rescale_value * static_cast<float>(1ll << 31));
- if (long_rescale_value == (1ll << 31))
+ rescale_value = static_cast<int32_t>(round(f_rescale_value * static_cast<float>(1ll << 31)));
+ if (static_cast<int64_t>(rescale_value) == (1ll << 31))
{
shift_value++;
- long_rescale_value >>= 1;
+ rescale_value >>= 1;
}
- rescale_value = static_cast<int32_t>(long_rescale_value);
}
__asm__ __volatile__(
".inst 0xd503477f // SMSTART ZA\n"
- "mov x27, #0x0\n"
- "cntb x26\n"
- "cntb x25, ALL, MUL #2\n"
- "cntb x24, ALL, MUL #3\n"
- "whilelt p4.b, x27, %x[n_channels]\n"
- "whilelt p3.b, x26, %x[n_channels]\n"
- "whilelt p2.b, x25, %x[n_channels]\n"
- "whilelt p1.b, x24, %x[n_channels]\n"
+ "mov x26, #0x0\n"
+ "cntb x25\n"
+ "cntb x24, ALL, MUL #2\n"
+ "cntb x23, ALL, MUL #3\n"
+ "whilelt p4.b, x26, %x[n_channels]\n"
+ "whilelt p3.b, x25, %x[n_channels]\n"
+ "whilelt p2.b, x24, %x[n_channels]\n"
+ "whilelt p1.b, x23, %x[n_channels]\n"
"ptrue p0.b\n"
"b.none 7f\n"
"1:" // 4-vectors of channels
- "lsr x23, %x[n_valid_cells], #0x1\n"
+ "lsr x22, %x[n_valid_cells], #0x1\n"
"mov z15.s, #0x0\n"
"mov z14.s, #0x0\n"
- "mov x20, %x[inptrs]\n"
+ "mov x19, %x[inptrs]\n"
"mov z13.s, #0x0\n"
"mov z12.s, #0x0\n"
"mov z11.s, #0x0\n"
@@ -124,49 +123,49 @@ void sme_s8_nhwc_avg_generic_depthfirst_impl(
"mov z2.s, #0x0\n"
"mov z1.s, #0x0\n"
"mov z0.s, #0x0\n"
- "cbz x23, 4f\n"
- "ldp x22, x21, [x20, #0x0]\n"
- "subs x23, x23, #0x1\n"
- "add x20, x20, #0x10\n"
- "ld1b { z31.b }, p4/Z, [x22, x27]\n"
- "ld1b { z30.b }, p4/Z, [x21, x27]\n"
- "ld1b { z29.b }, p3/Z, [x22, x26]\n"
- "ld1b { z28.b }, p3/Z, [x21, x26]\n"
- "ld1b { z27.b }, p2/Z, [x22, x25]\n"
- "ld1b { z26.b }, p2/Z, [x21, x25]\n"
- "ld1b { z25.b }, p1/Z, [x22, x24]\n"
- "ld1b { z24.b }, p1/Z, [x21, x24]\n"
+ "cbz x22, 4f\n"
+ "ldp x21, x20, [x19, #0x0]\n"
+ "subs x22, x22, #0x1\n"
+ "add x19, x19, #0x10\n"
+ "ld1b { z31.b }, p4/Z, [x21, x26]\n"
+ "ld1b { z30.b }, p4/Z, [x20, x26]\n"
+ "ld1b { z29.b }, p3/Z, [x21, x25]\n"
+ "ld1b { z28.b }, p3/Z, [x20, x25]\n"
+ "ld1b { z27.b }, p2/Z, [x21, x24]\n"
+ "ld1b { z26.b }, p2/Z, [x20, x24]\n"
+ "ld1b { z25.b }, p1/Z, [x21, x23]\n"
+ "ld1b { z24.b }, p1/Z, [x20, x23]\n"
"beq 3f\n"
"2:" // 4-vectors of channels: 2 inputs loop
".inst 0x455e03f7 // saddlb z23.h, z31.b, z30.b\n"
".inst 0x455e07f6 // saddlt z22.h, z31.b, z30.b\n"
- "ldp x22, x21, [x20, #0x0]\n"
- "subs x23, x23, #0x1\n"
+ "ldp x21, x20, [x19, #0x0]\n"
+ "subs x22, x22, #0x1\n"
".inst 0x455c03b5 // saddlb z21.h, z29.b, z28.b\n"
".inst 0x455c07b4 // saddlt z20.h, z29.b, z28.b\n"
- "add x20, x20, #0x10\n"
- "ld1b { z31.b }, p4/Z, [x22, x27]\n"
+ "add x19, x19, #0x10\n"
+ "ld1b { z31.b }, p4/Z, [x21, x26]\n"
".inst 0x455a0373 // saddlb z19.h, z27.b, z26.b\n"
".inst 0x455a0772 // saddlt z18.h, z27.b, z26.b\n"
- "ld1b { z30.b }, p4/Z, [x21, x27]\n"
+ "ld1b { z30.b }, p4/Z, [x20, x26]\n"
".inst 0x45580331 // saddlb z17.h, z25.b, z24.b\n"
".inst 0x45580730 // saddlt z16.h, z25.b, z24.b\n"
- "ld1b { z29.b }, p3/Z, [x22, x26]\n"
+ "ld1b { z29.b }, p3/Z, [x21, x25]\n"
".inst 0x459741ef // saddwb z15.s, z15.s, z23.h\n"
".inst 0x459745ce // saddwt z14.s, z14.s, z23.h\n"
- "ld1b { z28.b }, p3/Z, [x21, x26]\n"
+ "ld1b { z28.b }, p3/Z, [x20, x25]\n"
".inst 0x459641ad // saddwb z13.s, z13.s, z22.h\n"
".inst 0x4596458c // saddwt z12.s, z12.s, z22.h\n"
- "ld1b { z27.b }, p2/Z, [x22, x25]\n"
+ "ld1b { z27.b }, p2/Z, [x21, x24]\n"
".inst 0x4595416b // saddwb z11.s, z11.s, z21.h\n"
".inst 0x4595454a // saddwt z10.s, z10.s, z21.h\n"
- "ld1b { z26.b }, p2/Z, [x21, x25]\n"
+ "ld1b { z26.b }, p2/Z, [x20, x24]\n"
".inst 0x45944129 // saddwb z9.s, z9.s, z20.h\n"
".inst 0x45944508 // saddwt z8.s, z8.s, z20.h\n"
- "ld1b { z25.b }, p1/Z, [x22, x24]\n"
+ "ld1b { z25.b }, p1/Z, [x21, x23]\n"
".inst 0x459340e7 // saddwb z7.s, z7.s, z19.h\n"
".inst 0x459344c6 // saddwt z6.s, z6.s, z19.h\n"
- "ld1b { z24.b }, p1/Z, [x21, x24]\n"
+ "ld1b { z24.b }, p1/Z, [x20, x23]\n"
".inst 0x459240a5 // saddwb z5.s, z5.s, z18.h\n"
".inst 0x45924484 // saddwt z4.s, z4.s, z18.h\n"
".inst 0x45914063 // saddwb z3.s, z3.s, z17.h\n"
@@ -200,21 +199,21 @@ void sme_s8_nhwc_avg_generic_depthfirst_impl(
".inst 0x45904021 // saddwb z1.s, z1.s, z16.h\n"
".inst 0x45904400 // saddwt z0.s, z0.s, z16.h\n"
"4:" // 4-vectors of channels: After loop
- "ands x21, %x[n_valid_cells], #0x1\n"
+ "ands x20, %x[n_valid_cells], #0x1\n"
"beq 6f\n"
"5:" // 4-vectors of channels: Single input loop
- "ldr x22, [x20], #0x8\n"
- "ld1b { z31.b }, p4/Z, [x22, x27]\n"
+ "ldr x21, [x19], #0x8\n"
+ "ld1b { z31.b }, p4/Z, [x21, x26]\n"
".inst 0x4508a3f7 // sshllb z23.h, z31.b, #0x0\n"
".inst 0x4508a7f6 // sshllt z22.h, z31.b, #0x0\n"
- "ld1b { z29.b }, p3/Z, [x22, x26]\n"
+ "ld1b { z29.b }, p3/Z, [x21, x25]\n"
".inst 0x4508a3b5 // sshllb z21.h, z29.b, #0x0\n"
".inst 0x4508a7b4 // sshllt z20.h, z29.b, #0x0\n"
- "subs x21, x21, #0x1\n"
- "ld1b { z27.b }, p2/Z, [x22, x25]\n"
+ "subs x20, x20, #0x1\n"
+ "ld1b { z27.b }, p2/Z, [x21, x24]\n"
".inst 0x4508a373 // sshllb z19.h, z27.b, #0x0\n"
".inst 0x4508a772 // sshllt z18.h, z27.b, #0x0\n"
- "ld1b { z25.b }, p1/Z, [x22, x24]\n"
+ "ld1b { z25.b }, p1/Z, [x21, x23]\n"
".inst 0x4508a331 // sshllb z17.h, z25.b, #0x0\n"
".inst 0x4508a730 // sshllt z16.h, z25.b, #0x0\n"
".inst 0x459741ef // saddwb z15.s, z15.s, z23.h\n"
@@ -313,47 +312,47 @@ void sme_s8_nhwc_avg_generic_depthfirst_impl(
"trn1 z19.h, z1.h, z0.h\n"
"trn1 z16.b, z23.b, z16.b\n"
"trn1 z18.b, z22.b, z18.b\n"
- "st1b { z16.b }, p4, [%x[outptr], x27]\n"
- "incb x27, ALL, MUL #4\n"
+ "st1b { z16.b }, p4, [%x[outptr], x26]\n"
+ "incb x26, ALL, MUL #4\n"
"trn1 z17.b, z21.b, z17.b\n"
"trn1 z16.b, z20.b, z19.b\n"
- "st1b { z18.b }, p3, [%x[outptr], x26]\n"
- "incb x26, ALL, MUL #4\n"
- "st1b { z17.b }, p2, [%x[outptr], x25]\n"
+ "st1b { z18.b }, p3, [%x[outptr], x25]\n"
"incb x25, ALL, MUL #4\n"
- "st1b { z16.b }, p1, [%x[outptr], x24]\n"
+ "st1b { z17.b }, p2, [%x[outptr], x24]\n"
"incb x24, ALL, MUL #4\n"
- "whilelt p1.b, x24, %x[n_channels]\n"
+ "st1b { z16.b }, p1, [%x[outptr], x23]\n"
+ "incb x23, ALL, MUL #4\n"
+ "whilelt p1.b, x23, %x[n_channels]\n"
"b.any 1b\n"
"7:" // Single vector of channels
- "whilelt p4.b, x27, %x[n_channels]\n"
+ "whilelt p4.b, x26, %x[n_channels]\n"
"b.none 14f\n"
"8:" // Single vector of channels: Loop
- "lsr x23, %x[n_valid_cells], #0x1\n"
+ "lsr x22, %x[n_valid_cells], #0x1\n"
"mov z15.s, #0x0\n"
"mov z14.s, #0x0\n"
- "mov x20, %x[inptrs]\n"
+ "mov x19, %x[inptrs]\n"
"mov z13.s, #0x0\n"
"mov z12.s, #0x0\n"
- "cbz x23, 11f\n"
- "ldp x22, x21, [x20, #0x0]\n"
- "subs x23, x23, #0x1\n"
- "add x20, x20, #0x10\n"
- "ld1b { z31.b }, p4/Z, [x22, x27]\n"
- "ld1b { z30.b }, p4/Z, [x21, x27]\n"
+ "cbz x22, 11f\n"
+ "ldp x21, x20, [x19, #0x0]\n"
+ "subs x22, x22, #0x1\n"
+ "add x19, x19, #0x10\n"
+ "ld1b { z31.b }, p4/Z, [x21, x26]\n"
+ "ld1b { z30.b }, p4/Z, [x20, x26]\n"
"beq 10f\n"
"9:" // Single vector of channels: Loop: 2 inputs loop
".inst 0x455e03f7 // saddlb z23.h, z31.b, z30.b\n"
".inst 0x455e07f6 // saddlt z22.h, z31.b, z30.b\n"
- "ldp x22, x21, [x20, #0x0]\n"
- "subs x23, x23, #0x1\n"
+ "ldp x21, x20, [x19, #0x0]\n"
+ "subs x22, x22, #0x1\n"
".inst 0x459741ef // saddwb z15.s, z15.s, z23.h\n"
".inst 0x459745ce // saddwt z14.s, z14.s, z23.h\n"
- "add x20, x20, #0x10\n"
- "ld1b { z31.b }, p4/Z, [x22, x27]\n"
+ "add x19, x19, #0x10\n"
+ "ld1b { z31.b }, p4/Z, [x21, x26]\n"
".inst 0x459641ad // saddwb z13.s, z13.s, z22.h\n"
".inst 0x4596458c // saddwt z12.s, z12.s, z22.h\n"
- "ld1b { z30.b }, p4/Z, [x21, x27]\n"
+ "ld1b { z30.b }, p4/Z, [x20, x26]\n"
"bgt 9b\n"
"10:" // Single vector of channels: Loop: 2 inputs tail
".inst 0x455e03f7 // saddlb z23.h, z31.b, z30.b\n"
@@ -363,14 +362,14 @@ void sme_s8_nhwc_avg_generic_depthfirst_impl(
".inst 0x459641ad // saddwb z13.s, z13.s, z22.h\n"
".inst 0x4596458c // saddwt z12.s, z12.s, z22.h\n"
"11:" // Single vector of channels: Loop: After loop
- "ands x21, %x[n_valid_cells], #0x1\n"
+ "ands x20, %x[n_valid_cells], #0x1\n"
"beq 13f\n"
"12:" // Single vector of channels: Loop: Single input loop
- "ldr x22, [x20], #0x8\n"
- "ld1b { z31.b }, p4/Z, [x22, x27]\n"
+ "ldr x21, [x19], #0x8\n"
+ "ld1b { z31.b }, p4/Z, [x21, x26]\n"
".inst 0x4508a3f7 // sshllb z23.h, z31.b, #0x0\n"
".inst 0x4508a7f6 // sshllt z22.h, z31.b, #0x0\n"
- "subs x21, x21, #0x1\n"
+ "subs x20, x20, #0x1\n"
".inst 0x459741ef // saddwb z15.s, z15.s, z23.h\n"
".inst 0x459745ce // saddwt z14.s, z14.s, z23.h\n"
".inst 0x459641ad // saddwb z13.s, z13.s, z22.h\n"
@@ -400,15 +399,15 @@ void sme_s8_nhwc_avg_generic_depthfirst_impl(
"smin z12.s, p0/M, z12.s, z19.s\n"
"trn1 z16.h, z13.h, z12.h\n"
"trn1 z16.b, z23.b, z16.b\n"
- "st1b { z16.b }, p4, [%x[outptr], x27]\n"
- "incb x27\n"
- "whilelt p4.b, x27, %x[n_channels]\n"
+ "st1b { z16.b }, p4, [%x[outptr], x26]\n"
+ "incb x26\n"
+ "whilelt p4.b, x26, %x[n_channels]\n"
"b.any 8b\n"
"14:" // End
".inst 0xd503467f // SMSTOP\n"
:
: [inptrs] "r" (inptrs), [n_channels] "r" (n_channels), [n_valid_cells] "r" (n_valid_cells), [outptr] "r" (outptr), [rescale_ptr] "r" (&rescale_value), [shift_ptr] "r" (&shift_value)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_s8_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_s8_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
index e3b9c98d80..d25bec0edb 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_s8_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_s8_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -63,82 +63,82 @@ void sme_s8_nhwc_max_2x2_s1_output2x2_depthfirst_impl(
pad_left, pad_top, pad_right, pad_bottom);
__asm__ __volatile__(
- "ldr x21, [%x[args], %[offsetof_outptrs]]\n"
+ "ldr x20, [%x[args], %[offsetof_outptrs]]\n"
".inst 0xd503477f // SMSTART ZA\n"
- "mov x15, #0x0\n"
- "ptrue p2.b\n"
- "ldr x20, [%x[args], %[offsetof_inptrs]]\n"
"mov x14, #0x0\n"
- "ldr x13, [%x[args], %[offsetof_n_channels]]\n"
- "whilelt p1.b, x15, x13\n"
- "ldp x12, x11, [x21, #0x0]\n"
- "ldp x10, x9, [x21, #0x10]\n"
- "ldp x28, x27, [x20, #0x0]\n"
- "ld1b { z30.b }, p1/Z, [x27, x15]\n"
- "ldp x26, x25, [x20, #0x10]\n"
- "ld1b { z29.b }, p1/Z, [x25, x15]\n"
- "ldp x24, x23, [x20, #0x20]\n"
- "ld1b { z28.b }, p1/Z, [x24, x15]\n"
- "ldp x22, x21, [x20, #0x30]\n"
- "ld1b { z27.b }, p1/Z, [x21, x15]\n"
- "ldr x20, [x20, #0x40]\n"
- "ld1b { z26.b }, p1/Z, [x28, x15]\n"
- "ld1b { z25.b }, p1/Z, [x26, x15]\n"
- "ld1b { z24.b }, p1/Z, [x23, x15]\n"
- "ld1b { z23.b }, p1/Z, [x22, x15]\n"
- "ld1b { z19.b }, p1/Z, [x20, x15]\n"
- "incw x15\n"
- "whilelt p1.b, x15, x13\n"
+ "ptrue p2.b\n"
+ "ldr x19, [%x[args], %[offsetof_inptrs]]\n"
+ "mov x13, #0x0\n"
+ "ldr x12, [%x[args], %[offsetof_n_channels]]\n"
+ "whilelt p1.b, x14, x12\n"
+ "ldp x11, x10, [x20, #0x0]\n"
+ "ldp x9, x28, [x20, #0x10]\n"
+ "ldp x27, x26, [x19, #0x0]\n"
+ "ld1b { z29.b }, p1/Z, [x26, x14]\n"
+ "ldp x25, x24, [x19, #0x10]\n"
+ "ld1b { z28.b }, p1/Z, [x24, x14]\n"
+ "ldp x23, x22, [x19, #0x20]\n"
+ "ld1b { z27.b }, p1/Z, [x23, x14]\n"
+ "ldp x21, x20, [x19, #0x30]\n"
+ "ld1b { z26.b }, p1/Z, [x20, x14]\n"
+ "ldr x19, [x19, #0x40]\n"
+ "ld1b { z20.b }, p1/Z, [x27, x14]\n"
+ "ld1b { z25.b }, p1/Z, [x22, x14]\n"
+ "ld1b { z24.b }, p1/Z, [x25, x14]\n"
+ "ld1b { z23.b }, p1/Z, [x21, x14]\n"
+ "ld1b { z19.b }, p1/Z, [x19, x14]\n"
+ "incw x14\n"
+ "whilelt p1.b, x14, x12\n"
"b.none 2f\n"
"1:" // Vector: Loop
- "movprfx z22, z30\n smax z22.b, p2/M, z22.b, z28.b\n"
- "movprfx z21, z28\n smax z21.b, p2/M, z21.b, z27.b\n"
- "ld1b { z30.b }, p1/Z, [x27, x15]\n"
- "whilelt p0.b, x14, x13\n"
- "movprfx z20, z29\n smax z20.b, p2/M, z20.b, z26.b\n"
- "movprfx z18, z25\n smax z18.b, p2/M, z18.b, z24.b\n"
- "ld1b { z28.b }, p1/Z, [x24, x15]\n"
- "movprfx z17, z29\n smax z17.b, p2/M, z17.b, z23.b\n"
- "movprfx z16, z24\n smax z16.b, p2/M, z16.b, z19.b\n"
- "ld1b { z27.b }, p1/Z, [x21, x15]\n"
- "ld1b { z29.b }, p1/Z, [x25, x15]\n"
- "movprfx z19, z22\n smax z19.b, p2/M, z19.b, z20.b\n"
- "smax z18.b, p2/M, z18.b, z22.b\n"
- "ld1b { z26.b }, p1/Z, [x28, x15]\n"
+ "movprfx z22, z29\n smax z22.b, p2/M, z22.b, z27.b\n"
+ "movprfx z21, z27\n smax z21.b, p2/M, z21.b, z26.b\n"
+ "ld1b { z29.b }, p1/Z, [x26, x14]\n"
+ "whilelt p0.b, x13, x12\n"
+ "movprfx z18, z28\n smax z18.b, p2/M, z18.b, z20.b\n"
+ "movprfx z20, z25\n smax z20.b, p2/M, z20.b, z24.b\n"
+ "ld1b { z27.b }, p1/Z, [x23, x14]\n"
+ "movprfx z17, z23\n smax z17.b, p2/M, z17.b, z28.b\n"
+ "movprfx z16, z25\n smax z16.b, p2/M, z16.b, z19.b\n"
+ "ld1b { z26.b }, p1/Z, [x20, x14]\n"
+ "ld1b { z28.b }, p1/Z, [x24, x14]\n"
+ "movprfx z19, z18\n smax z19.b, p2/M, z19.b, z22.b\n"
+ "movprfx z18, z22\n smax z18.b, p2/M, z18.b, z20.b\n"
+ "ld1b { z20.b }, p1/Z, [x27, x14]\n"
"smax z17.b, p2/M, z17.b, z21.b\n"
"smax z16.b, p2/M, z16.b, z21.b\n"
- "ld1b { z25.b }, p1/Z, [x26, x15]\n"
- "st1b { z19.b }, p0, [x12, x14]\n"
- "ld1b { z24.b }, p1/Z, [x23, x15]\n"
- "st1b { z18.b }, p0, [x11, x14]\n"
- "ld1b { z23.b }, p1/Z, [x22, x15]\n"
- "st1b { z17.b }, p0, [x10, x14]\n"
- "ld1b { z19.b }, p1/Z, [x20, x15]\n"
- "incw x15\n"
- "whilelt p1.b, x15, x13\n"
- "st1b { z16.b }, p0, [x9, x14]\n"
+ "ld1b { z25.b }, p1/Z, [x22, x14]\n"
+ "st1b { z19.b }, p0, [x11, x13]\n"
+ "ld1b { z24.b }, p1/Z, [x25, x14]\n"
+ "st1b { z18.b }, p0, [x10, x13]\n"
+ "ld1b { z23.b }, p1/Z, [x21, x14]\n"
+ "st1b { z17.b }, p0, [x9, x13]\n"
+ "ld1b { z19.b }, p1/Z, [x19, x14]\n"
"incw x14\n"
+ "whilelt p1.b, x14, x12\n"
+ "st1b { z16.b }, p0, [x28, x13]\n"
+ "incw x13\n"
"b.any 1b\n"
"2:" // Vector: Tail
- "movprfx z22, z30\n smax z22.b, p2/M, z22.b, z28.b\n"
- "movprfx z21, z28\n smax z21.b, p2/M, z21.b, z27.b\n"
- "whilelt p0.b, x14, x13\n"
- "movprfx z20, z29\n smax z20.b, p2/M, z20.b, z26.b\n"
- "movprfx z18, z25\n smax z18.b, p2/M, z18.b, z24.b\n"
- "movprfx z17, z29\n smax z17.b, p2/M, z17.b, z23.b\n"
- "movprfx z16, z24\n smax z16.b, p2/M, z16.b, z19.b\n"
- "movprfx z19, z22\n smax z19.b, p2/M, z19.b, z20.b\n"
- "smax z18.b, p2/M, z18.b, z22.b\n"
- "st1b { z19.b }, p0, [x12, x14]\n"
+ "movprfx z22, z29\n smax z22.b, p2/M, z22.b, z27.b\n"
+ "movprfx z21, z27\n smax z21.b, p2/M, z21.b, z26.b\n"
+ "whilelt p0.b, x13, x12\n"
+ "movprfx z18, z28\n smax z18.b, p2/M, z18.b, z20.b\n"
+ "movprfx z20, z25\n smax z20.b, p2/M, z20.b, z24.b\n"
+ "movprfx z17, z23\n smax z17.b, p2/M, z17.b, z28.b\n"
+ "movprfx z16, z25\n smax z16.b, p2/M, z16.b, z19.b\n"
+ "movprfx z19, z18\n smax z19.b, p2/M, z19.b, z22.b\n"
+ "movprfx z18, z22\n smax z18.b, p2/M, z18.b, z20.b\n"
+ "st1b { z19.b }, p0, [x11, x13]\n"
"smax z17.b, p2/M, z17.b, z21.b\n"
"smax z16.b, p2/M, z16.b, z21.b\n"
- "st1b { z18.b }, p0, [x11, x14]\n"
- "st1b { z17.b }, p0, [x10, x14]\n"
- "st1b { z16.b }, p0, [x9, x14]\n"
+ "st1b { z18.b }, p0, [x10, x13]\n"
+ "st1b { z17.b }, p0, [x9, x13]\n"
+ "st1b { z16.b }, p0, [x28, x13]\n"
".inst 0xd503467f // SMSTOP\n"
:
: [args] "r" (&args), [offsetof_inptrs] "I" (offsetof(KernelArgs, inptrs)), [offsetof_n_channels] "I" (offsetof(KernelArgs, n_channels)), [offsetof_outptrs] "I" (offsetof(KernelArgs, outptrs))
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_s8_nhwc_max_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_s8_nhwc_max_generic_depthfirst/generic.cpp
index 4e6cad6e92..86ad4fec27 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_s8_nhwc_max_generic_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_s8_nhwc_max_generic_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -40,82 +40,82 @@ void sme_s8_nhwc_max_generic_depthfirst_impl(
{
__asm__ __volatile__(
".inst 0xd503477f // SMSTART ZA\n"
- "mov x9, #0x0\n"
- "cntb x28\n"
- "cntb x27, ALL, MUL #2\n"
- "cntb x26, ALL, MUL #3\n"
- "whilelt p4.b, x9, %x[n_channels]\n"
- "whilelt p3.b, x28, %x[n_channels]\n"
- "whilelt p2.b, x27, %x[n_channels]\n"
- "whilelt p1.b, x26, %x[n_channels]\n"
+ "mov x28, #0x0\n"
+ "cntb x27\n"
+ "cntb x26, ALL, MUL #2\n"
+ "cntb x25, ALL, MUL #3\n"
+ "whilelt p4.b, x28, %x[n_channels]\n"
+ "whilelt p3.b, x27, %x[n_channels]\n"
+ "whilelt p2.b, x26, %x[n_channels]\n"
+ "whilelt p1.b, x25, %x[n_channels]\n"
"ptrue p0.b\n"
"b.none 7f\n"
"1:" // 4-vectors of channels
- "lsr x25, %x[n_valid_cells], #0x2\n"
+ "lsr x24, %x[n_valid_cells], #0x2\n"
"mov z4.b, #0x80\n"
"mov z3.b, #0x80\n"
- "mov x20, %x[inptrs]\n"
+ "mov x19, %x[inptrs]\n"
"mov z2.b, #0x80\n"
"mov z1.b, #0x80\n"
- "cbz x25, 4f\n"
- "ldp x24, x23, [x20, #0x0]\n"
- "subs x25, x25, #0x1\n"
- "ld1b { z0.b }, p4/Z, [x24, x9]\n"
- "ldp x22, x21, [x20, #0x10]\n"
- "add x20, x20, #0x20\n"
- "ld1b { z31.b }, p4/Z, [x23, x9]\n"
- "ld1b { z23.b }, p4/Z, [x22, x9]\n"
- "ld1b { z30.b }, p4/Z, [x21, x9]\n"
- "ld1b { z18.b }, p3/Z, [x24, x28]\n"
- "ld1b { z29.b }, p3/Z, [x23, x28]\n"
- "ld1b { z22.b }, p3/Z, [x22, x28]\n"
- "ld1b { z28.b }, p3/Z, [x21, x28]\n"
- "ld1b { z17.b }, p2/Z, [x24, x27]\n"
- "ld1b { z27.b }, p2/Z, [x23, x27]\n"
- "ld1b { z21.b }, p2/Z, [x22, x27]\n"
- "ld1b { z26.b }, p2/Z, [x21, x27]\n"
- "ld1b { z16.b }, p1/Z, [x24, x26]\n"
- "ld1b { z25.b }, p1/Z, [x23, x26]\n"
- "ld1b { z20.b }, p1/Z, [x22, x26]\n"
- "ld1b { z24.b }, p1/Z, [x21, x26]\n"
+ "cbz x24, 4f\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "subs x24, x24, #0x1\n"
+ "ld1b { z0.b }, p4/Z, [x23, x28]\n"
+ "ldp x21, x20, [x19, #0x10]\n"
+ "add x19, x19, #0x20\n"
+ "ld1b { z31.b }, p4/Z, [x22, x28]\n"
+ "ld1b { z23.b }, p4/Z, [x21, x28]\n"
+ "ld1b { z30.b }, p4/Z, [x20, x28]\n"
+ "ld1b { z18.b }, p3/Z, [x23, x27]\n"
+ "ld1b { z29.b }, p3/Z, [x22, x27]\n"
+ "ld1b { z22.b }, p3/Z, [x21, x27]\n"
+ "ld1b { z28.b }, p3/Z, [x20, x27]\n"
+ "ld1b { z17.b }, p2/Z, [x23, x26]\n"
+ "ld1b { z27.b }, p2/Z, [x22, x26]\n"
+ "ld1b { z21.b }, p2/Z, [x21, x26]\n"
+ "ld1b { z26.b }, p2/Z, [x20, x26]\n"
+ "ld1b { z16.b }, p1/Z, [x23, x25]\n"
+ "ld1b { z25.b }, p1/Z, [x22, x25]\n"
+ "ld1b { z20.b }, p1/Z, [x21, x25]\n"
+ "ld1b { z24.b }, p1/Z, [x20, x25]\n"
"beq 3f\n"
"2:" // 4-vectors of channels: 4 inputs loop
"movprfx z19, z0\n smax z19.b, p0/M, z19.b, z31.b\n"
"smax z23.b, p0/M, z23.b, z30.b\n"
- "ldp x24, x23, [x20, #0x0]\n"
- "subs x25, x25, #0x1\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "subs x24, x24, #0x1\n"
"smax z18.b, p0/M, z18.b, z29.b\n"
"smax z22.b, p0/M, z22.b, z28.b\n"
- "ldp x22, x21, [x20, #0x10]\n"
- "add x20, x20, #0x20\n"
+ "ldp x21, x20, [x19, #0x10]\n"
+ "add x19, x19, #0x20\n"
"smax z17.b, p0/M, z17.b, z27.b\n"
"smax z21.b, p0/M, z21.b, z26.b\n"
- "ld1b { z0.b }, p4/Z, [x24, x9]\n"
+ "ld1b { z0.b }, p4/Z, [x23, x28]\n"
"smax z16.b, p0/M, z16.b, z25.b\n"
"smax z20.b, p0/M, z20.b, z24.b\n"
- "ld1b { z31.b }, p4/Z, [x23, x9]\n"
+ "ld1b { z31.b }, p4/Z, [x22, x28]\n"
"smax z19.b, p0/M, z19.b, z23.b\n"
"smax z18.b, p0/M, z18.b, z22.b\n"
- "ld1b { z23.b }, p4/Z, [x22, x9]\n"
+ "ld1b { z23.b }, p4/Z, [x21, x28]\n"
"smax z17.b, p0/M, z17.b, z21.b\n"
"smax z16.b, p0/M, z16.b, z20.b\n"
- "ld1b { z30.b }, p4/Z, [x21, x9]\n"
+ "ld1b { z30.b }, p4/Z, [x20, x28]\n"
"smax z4.b, p0/M, z4.b, z19.b\n"
"smax z3.b, p0/M, z3.b, z18.b\n"
- "ld1b { z18.b }, p3/Z, [x24, x28]\n"
+ "ld1b { z18.b }, p3/Z, [x23, x27]\n"
"smax z2.b, p0/M, z2.b, z17.b\n"
"smax z1.b, p0/M, z1.b, z16.b\n"
- "ld1b { z29.b }, p3/Z, [x23, x28]\n"
- "ld1b { z22.b }, p3/Z, [x22, x28]\n"
- "ld1b { z28.b }, p3/Z, [x21, x28]\n"
- "ld1b { z17.b }, p2/Z, [x24, x27]\n"
- "ld1b { z27.b }, p2/Z, [x23, x27]\n"
- "ld1b { z21.b }, p2/Z, [x22, x27]\n"
- "ld1b { z26.b }, p2/Z, [x21, x27]\n"
- "ld1b { z16.b }, p1/Z, [x24, x26]\n"
- "ld1b { z25.b }, p1/Z, [x23, x26]\n"
- "ld1b { z20.b }, p1/Z, [x22, x26]\n"
- "ld1b { z24.b }, p1/Z, [x21, x26]\n"
+ "ld1b { z29.b }, p3/Z, [x22, x27]\n"
+ "ld1b { z22.b }, p3/Z, [x21, x27]\n"
+ "ld1b { z28.b }, p3/Z, [x20, x27]\n"
+ "ld1b { z17.b }, p2/Z, [x23, x26]\n"
+ "ld1b { z27.b }, p2/Z, [x22, x26]\n"
+ "ld1b { z21.b }, p2/Z, [x21, x26]\n"
+ "ld1b { z26.b }, p2/Z, [x20, x26]\n"
+ "ld1b { z16.b }, p1/Z, [x23, x25]\n"
+ "ld1b { z25.b }, p1/Z, [x22, x25]\n"
+ "ld1b { z20.b }, p1/Z, [x21, x25]\n"
+ "ld1b { z24.b }, p1/Z, [x20, x25]\n"
"bgt 2b\n"
"3:" // 4-vectors of channels: 4 inputs tail
"movprfx z19, z0\n smax z19.b, p0/M, z19.b, z31.b\n"
@@ -135,61 +135,61 @@ void sme_s8_nhwc_max_generic_depthfirst_impl(
"smax z2.b, p0/M, z2.b, z17.b\n"
"smax z1.b, p0/M, z1.b, z16.b\n"
"4:" // 4-vectors of channels: After loop
- "ands x21, %x[n_valid_cells], #0x3\n"
+ "ands x20, %x[n_valid_cells], #0x3\n"
"beq 6f\n"
"5:" // 4-vectors of channels: Single input loop
- "ldr x24, [x20], #0x8\n"
- "ld1b { z0.b }, p4/Z, [x24, x9]\n"
- "subs x21, x21, #0x1\n"
+ "ldr x23, [x19], #0x8\n"
+ "ld1b { z0.b }, p4/Z, [x23, x28]\n"
+ "subs x20, x20, #0x1\n"
"smax z4.b, p0/M, z4.b, z0.b\n"
- "ld1b { z18.b }, p3/Z, [x24, x28]\n"
+ "ld1b { z18.b }, p3/Z, [x23, x27]\n"
"smax z3.b, p0/M, z3.b, z18.b\n"
- "ld1b { z17.b }, p2/Z, [x24, x27]\n"
+ "ld1b { z17.b }, p2/Z, [x23, x26]\n"
"smax z2.b, p0/M, z2.b, z17.b\n"
- "ld1b { z16.b }, p1/Z, [x24, x26]\n"
+ "ld1b { z16.b }, p1/Z, [x23, x25]\n"
"smax z1.b, p0/M, z1.b, z16.b\n"
"bgt 5b\n"
"6:" // 4-vectors of channels: Single input loop: End
- "st1b { z4.b }, p4, [%x[outptr], x9]\n"
- "incb x9, ALL, MUL #4\n"
- "st1b { z3.b }, p3, [%x[outptr], x28]\n"
+ "st1b { z4.b }, p4, [%x[outptr], x28]\n"
"incb x28, ALL, MUL #4\n"
- "st1b { z2.b }, p2, [%x[outptr], x27]\n"
+ "st1b { z3.b }, p3, [%x[outptr], x27]\n"
"incb x27, ALL, MUL #4\n"
- "st1b { z1.b }, p1, [%x[outptr], x26]\n"
+ "st1b { z2.b }, p2, [%x[outptr], x26]\n"
"incb x26, ALL, MUL #4\n"
- "whilelt p1.b, x26, %x[n_channels]\n"
+ "st1b { z1.b }, p1, [%x[outptr], x25]\n"
+ "incb x25, ALL, MUL #4\n"
+ "whilelt p1.b, x25, %x[n_channels]\n"
"b.any 1b\n"
"7:" // Single vector of channels
- "whilelt p4.b, x9, %x[n_channels]\n"
+ "whilelt p4.b, x28, %x[n_channels]\n"
"b.none 14f\n"
"8:" // Single vector of channels: Loop
- "lsr x25, %x[n_valid_cells], #0x2\n"
+ "lsr x24, %x[n_valid_cells], #0x2\n"
"mov z4.b, #0x80\n"
- "mov x20, %x[inptrs]\n"
- "cbz x25, 11f\n"
- "ldp x24, x23, [x20, #0x0]\n"
- "subs x25, x25, #0x1\n"
- "ld1b { z0.b }, p4/Z, [x24, x9]\n"
- "ldp x22, x21, [x20, #0x10]\n"
- "add x20, x20, #0x20\n"
- "ld1b { z31.b }, p4/Z, [x23, x9]\n"
- "ld1b { z23.b }, p4/Z, [x22, x9]\n"
- "ld1b { z30.b }, p4/Z, [x21, x9]\n"
+ "mov x19, %x[inptrs]\n"
+ "cbz x24, 11f\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "subs x24, x24, #0x1\n"
+ "ld1b { z0.b }, p4/Z, [x23, x28]\n"
+ "ldp x21, x20, [x19, #0x10]\n"
+ "add x19, x19, #0x20\n"
+ "ld1b { z31.b }, p4/Z, [x22, x28]\n"
+ "ld1b { z23.b }, p4/Z, [x21, x28]\n"
+ "ld1b { z30.b }, p4/Z, [x20, x28]\n"
"beq 10f\n"
"9:" // Single vector of channels: Loop: 4 inputs loop
"movprfx z19, z0\n smax z19.b, p0/M, z19.b, z31.b\n"
"smax z23.b, p0/M, z23.b, z30.b\n"
- "ldp x24, x23, [x20, #0x0]\n"
- "subs x25, x25, #0x1\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "subs x24, x24, #0x1\n"
"smax z19.b, p0/M, z19.b, z23.b\n"
- "ldp x22, x21, [x20, #0x10]\n"
+ "ldp x21, x20, [x19, #0x10]\n"
"smax z4.b, p0/M, z4.b, z19.b\n"
- "add x20, x20, #0x20\n"
- "ld1b { z0.b }, p4/Z, [x24, x9]\n"
- "ld1b { z31.b }, p4/Z, [x23, x9]\n"
- "ld1b { z23.b }, p4/Z, [x22, x9]\n"
- "ld1b { z30.b }, p4/Z, [x21, x9]\n"
+ "add x19, x19, #0x20\n"
+ "ld1b { z0.b }, p4/Z, [x23, x28]\n"
+ "ld1b { z31.b }, p4/Z, [x22, x28]\n"
+ "ld1b { z23.b }, p4/Z, [x21, x28]\n"
+ "ld1b { z30.b }, p4/Z, [x20, x28]\n"
"bgt 9b\n"
"10:" // Single vector of channels: Loop: 4 inputs tail
"movprfx z19, z0\n smax z19.b, p0/M, z19.b, z31.b\n"
@@ -197,24 +197,24 @@ void sme_s8_nhwc_max_generic_depthfirst_impl(
"smax z19.b, p0/M, z19.b, z23.b\n"
"smax z4.b, p0/M, z4.b, z19.b\n"
"11:" // Single vector of channels: Loop: After loop
- "ands x21, %x[n_valid_cells], #0x3\n"
+ "ands x20, %x[n_valid_cells], #0x3\n"
"beq 13f\n"
"12:" // Single vector of channels: Loop: Single input loop
- "ldr x24, [x20], #0x8\n"
- "ld1b { z0.b }, p4/Z, [x24, x9]\n"
- "subs x21, x21, #0x1\n"
+ "ldr x23, [x19], #0x8\n"
+ "ld1b { z0.b }, p4/Z, [x23, x28]\n"
+ "subs x20, x20, #0x1\n"
"smax z4.b, p0/M, z4.b, z0.b\n"
"bgt 12b\n"
"13:" // Single vector of channels: Loop: Single input loop: End
- "st1b { z4.b }, p4, [%x[outptr], x9]\n"
- "incb x9\n"
- "whilelt p4.b, x9, %x[n_channels]\n"
+ "st1b { z4.b }, p4, [%x[outptr], x28]\n"
+ "incb x28\n"
+ "whilelt p4.b, x28, %x[n_channels]\n"
"b.any 8b\n"
"14:" // End
".inst 0xd503467f // SMSTOP\n"
:
: [inptrs] "r" (inptrs), [n_channels] "r" (n_channels), [n_valid_cells] "r" (n_valid_cells), [outptr] "r" (outptr)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_s8q_nhwc_avg_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_s8q_nhwc_avg_generic_depthfirst/generic.cpp
index cc58d3e9e2..28b7426d11 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_s8q_nhwc_avg_generic_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_s8q_nhwc_avg_generic_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -86,13 +86,12 @@ void sme_s8q_nhwc_avg_generic_depthfirst_impl(
f_rescale_value *= 2.0f;
}
- int64_t long_rescale_value = round(f_rescale_value * static_cast<float>(1ll << 31));
- if (long_rescale_value == (1ll << 31))
+ rescale_value = static_cast<int32_t>(round(f_rescale_value * static_cast<float>(1ll << 31)));
+ if (static_cast<int64_t>(rescale_value) == (1ll << 31))
{
shift_value++;
- long_rescale_value >>= 1;
+ rescale_value >>= 1;
}
- rescale_value = static_cast<int32_t>(long_rescale_value);
}
// Combine together the rescale value for the requantization and the scaling
@@ -114,21 +113,21 @@ void sme_s8q_nhwc_avg_generic_depthfirst_impl(
__asm__ __volatile__(
".inst 0xd503477f // SMSTART ZA\n"
- "mov x27, #0x0\n"
- "cntb x26\n"
- "cntb x25, ALL, MUL #2\n"
- "cntb x24, ALL, MUL #3\n"
- "whilelt p4.b, x27, %x[n_channels]\n"
- "whilelt p3.b, x26, %x[n_channels]\n"
- "whilelt p2.b, x25, %x[n_channels]\n"
- "whilelt p1.b, x24, %x[n_channels]\n"
+ "mov x26, #0x0\n"
+ "cntb x25\n"
+ "cntb x24, ALL, MUL #2\n"
+ "cntb x23, ALL, MUL #3\n"
+ "whilelt p4.b, x26, %x[n_channels]\n"
+ "whilelt p3.b, x25, %x[n_channels]\n"
+ "whilelt p2.b, x24, %x[n_channels]\n"
+ "whilelt p1.b, x23, %x[n_channels]\n"
"ptrue p0.b\n"
"b.none 7f\n"
"1:" // 4-vectors of channels
- "lsr x23, %x[n_valid_cells], #0x1\n"
+ "lsr x22, %x[n_valid_cells], #0x1\n"
"mov z15.s, #0x0\n"
"mov z14.s, #0x0\n"
- "mov x20, %x[inptrs]\n"
+ "mov x19, %x[inptrs]\n"
"mov z13.s, #0x0\n"
"mov z12.s, #0x0\n"
"mov z11.s, #0x0\n"
@@ -143,49 +142,49 @@ void sme_s8q_nhwc_avg_generic_depthfirst_impl(
"mov z2.s, #0x0\n"
"mov z1.s, #0x0\n"
"mov z0.s, #0x0\n"
- "cbz x23, 4f\n"
- "ldp x22, x21, [x20, #0x0]\n"
- "subs x23, x23, #0x1\n"
- "add x20, x20, #0x10\n"
- "ld1b { z31.b }, p4/Z, [x22, x27]\n"
- "ld1b { z30.b }, p4/Z, [x21, x27]\n"
- "ld1b { z29.b }, p3/Z, [x22, x26]\n"
- "ld1b { z28.b }, p3/Z, [x21, x26]\n"
- "ld1b { z27.b }, p2/Z, [x22, x25]\n"
- "ld1b { z26.b }, p2/Z, [x21, x25]\n"
- "ld1b { z25.b }, p1/Z, [x22, x24]\n"
- "ld1b { z24.b }, p1/Z, [x21, x24]\n"
+ "cbz x22, 4f\n"
+ "ldp x21, x20, [x19, #0x0]\n"
+ "subs x22, x22, #0x1\n"
+ "add x19, x19, #0x10\n"
+ "ld1b { z31.b }, p4/Z, [x21, x26]\n"
+ "ld1b { z30.b }, p4/Z, [x20, x26]\n"
+ "ld1b { z29.b }, p3/Z, [x21, x25]\n"
+ "ld1b { z28.b }, p3/Z, [x20, x25]\n"
+ "ld1b { z27.b }, p2/Z, [x21, x24]\n"
+ "ld1b { z26.b }, p2/Z, [x20, x24]\n"
+ "ld1b { z25.b }, p1/Z, [x21, x23]\n"
+ "ld1b { z24.b }, p1/Z, [x20, x23]\n"
"beq 3f\n"
"2:" // 4-vectors of channels: 2 inputs loop
".inst 0x455e03f7 // saddlb z23.h, z31.b, z30.b\n"
".inst 0x455e07f6 // saddlt z22.h, z31.b, z30.b\n"
- "ldp x22, x21, [x20, #0x0]\n"
- "subs x23, x23, #0x1\n"
+ "ldp x21, x20, [x19, #0x0]\n"
+ "subs x22, x22, #0x1\n"
".inst 0x455c03b5 // saddlb z21.h, z29.b, z28.b\n"
".inst 0x455c07b4 // saddlt z20.h, z29.b, z28.b\n"
- "add x20, x20, #0x10\n"
- "ld1b { z31.b }, p4/Z, [x22, x27]\n"
+ "add x19, x19, #0x10\n"
+ "ld1b { z31.b }, p4/Z, [x21, x26]\n"
".inst 0x455a0373 // saddlb z19.h, z27.b, z26.b\n"
".inst 0x455a0772 // saddlt z18.h, z27.b, z26.b\n"
- "ld1b { z30.b }, p4/Z, [x21, x27]\n"
+ "ld1b { z30.b }, p4/Z, [x20, x26]\n"
".inst 0x45580331 // saddlb z17.h, z25.b, z24.b\n"
".inst 0x45580730 // saddlt z16.h, z25.b, z24.b\n"
- "ld1b { z29.b }, p3/Z, [x22, x26]\n"
+ "ld1b { z29.b }, p3/Z, [x21, x25]\n"
".inst 0x459741ef // saddwb z15.s, z15.s, z23.h\n"
".inst 0x459745ce // saddwt z14.s, z14.s, z23.h\n"
- "ld1b { z28.b }, p3/Z, [x21, x26]\n"
+ "ld1b { z28.b }, p3/Z, [x20, x25]\n"
".inst 0x459641ad // saddwb z13.s, z13.s, z22.h\n"
".inst 0x4596458c // saddwt z12.s, z12.s, z22.h\n"
- "ld1b { z27.b }, p2/Z, [x22, x25]\n"
+ "ld1b { z27.b }, p2/Z, [x21, x24]\n"
".inst 0x4595416b // saddwb z11.s, z11.s, z21.h\n"
".inst 0x4595454a // saddwt z10.s, z10.s, z21.h\n"
- "ld1b { z26.b }, p2/Z, [x21, x25]\n"
+ "ld1b { z26.b }, p2/Z, [x20, x24]\n"
".inst 0x45944129 // saddwb z9.s, z9.s, z20.h\n"
".inst 0x45944508 // saddwt z8.s, z8.s, z20.h\n"
- "ld1b { z25.b }, p1/Z, [x22, x24]\n"
+ "ld1b { z25.b }, p1/Z, [x21, x23]\n"
".inst 0x459340e7 // saddwb z7.s, z7.s, z19.h\n"
".inst 0x459344c6 // saddwt z6.s, z6.s, z19.h\n"
- "ld1b { z24.b }, p1/Z, [x21, x24]\n"
+ "ld1b { z24.b }, p1/Z, [x20, x23]\n"
".inst 0x459240a5 // saddwb z5.s, z5.s, z18.h\n"
".inst 0x45924484 // saddwt z4.s, z4.s, z18.h\n"
".inst 0x45914063 // saddwb z3.s, z3.s, z17.h\n"
@@ -219,21 +218,21 @@ void sme_s8q_nhwc_avg_generic_depthfirst_impl(
".inst 0x45904021 // saddwb z1.s, z1.s, z16.h\n"
".inst 0x45904400 // saddwt z0.s, z0.s, z16.h\n"
"4:" // 4-vectors of channels: After loop
- "ands x21, %x[n_valid_cells], #0x1\n"
+ "ands x20, %x[n_valid_cells], #0x1\n"
"beq 6f\n"
"5:" // 4-vectors of channels: Single input loop
- "ldr x22, [x20], #0x8\n"
- "ld1b { z31.b }, p4/Z, [x22, x27]\n"
+ "ldr x21, [x19], #0x8\n"
+ "ld1b { z31.b }, p4/Z, [x21, x26]\n"
".inst 0x4508a3f7 // sshllb z23.h, z31.b, #0x0\n"
".inst 0x4508a7f6 // sshllt z22.h, z31.b, #0x0\n"
- "ld1b { z29.b }, p3/Z, [x22, x26]\n"
+ "ld1b { z29.b }, p3/Z, [x21, x25]\n"
".inst 0x4508a3b5 // sshllb z21.h, z29.b, #0x0\n"
".inst 0x4508a7b4 // sshllt z20.h, z29.b, #0x0\n"
- "subs x21, x21, #0x1\n"
- "ld1b { z27.b }, p2/Z, [x22, x25]\n"
+ "subs x20, x20, #0x1\n"
+ "ld1b { z27.b }, p2/Z, [x21, x24]\n"
".inst 0x4508a373 // sshllb z19.h, z27.b, #0x0\n"
".inst 0x4508a772 // sshllt z18.h, z27.b, #0x0\n"
- "ld1b { z25.b }, p1/Z, [x22, x24]\n"
+ "ld1b { z25.b }, p1/Z, [x21, x23]\n"
".inst 0x4508a331 // sshllb z17.h, z25.b, #0x0\n"
".inst 0x4508a730 // sshllt z16.h, z25.b, #0x0\n"
".inst 0x459741ef // saddwb z15.s, z15.s, z23.h\n"
@@ -349,47 +348,47 @@ void sme_s8q_nhwc_avg_generic_depthfirst_impl(
"trn1 z19.h, z1.h, z0.h\n"
"trn1 z16.b, z23.b, z16.b\n"
"trn1 z18.b, z22.b, z18.b\n"
- "st1b { z16.b }, p4, [%x[outptr], x27]\n"
- "incb x27, ALL, MUL #4\n"
+ "st1b { z16.b }, p4, [%x[outptr], x26]\n"
+ "incb x26, ALL, MUL #4\n"
"trn1 z17.b, z21.b, z17.b\n"
"trn1 z16.b, z20.b, z19.b\n"
- "st1b { z18.b }, p3, [%x[outptr], x26]\n"
- "incb x26, ALL, MUL #4\n"
- "st1b { z17.b }, p2, [%x[outptr], x25]\n"
+ "st1b { z18.b }, p3, [%x[outptr], x25]\n"
"incb x25, ALL, MUL #4\n"
- "st1b { z16.b }, p1, [%x[outptr], x24]\n"
+ "st1b { z17.b }, p2, [%x[outptr], x24]\n"
"incb x24, ALL, MUL #4\n"
- "whilelt p1.b, x24, %x[n_channels]\n"
+ "st1b { z16.b }, p1, [%x[outptr], x23]\n"
+ "incb x23, ALL, MUL #4\n"
+ "whilelt p1.b, x23, %x[n_channels]\n"
"b.any 1b\n"
"7:" // Single vector of channels
- "whilelt p4.b, x27, %x[n_channels]\n"
+ "whilelt p4.b, x26, %x[n_channels]\n"
"b.none 14f\n"
"8:" // Single vector of channels: Loop
- "lsr x23, %x[n_valid_cells], #0x1\n"
+ "lsr x22, %x[n_valid_cells], #0x1\n"
"mov z15.s, #0x0\n"
"mov z14.s, #0x0\n"
- "mov x20, %x[inptrs]\n"
+ "mov x19, %x[inptrs]\n"
"mov z13.s, #0x0\n"
"mov z12.s, #0x0\n"
- "cbz x23, 11f\n"
- "ldp x22, x21, [x20, #0x0]\n"
- "subs x23, x23, #0x1\n"
- "add x20, x20, #0x10\n"
- "ld1b { z31.b }, p4/Z, [x22, x27]\n"
- "ld1b { z30.b }, p4/Z, [x21, x27]\n"
+ "cbz x22, 11f\n"
+ "ldp x21, x20, [x19, #0x0]\n"
+ "subs x22, x22, #0x1\n"
+ "add x19, x19, #0x10\n"
+ "ld1b { z31.b }, p4/Z, [x21, x26]\n"
+ "ld1b { z30.b }, p4/Z, [x20, x26]\n"
"beq 10f\n"
"9:" // Single vector of channels: Loop: 2 inputs loop
".inst 0x455e03f7 // saddlb z23.h, z31.b, z30.b\n"
".inst 0x455e07f6 // saddlt z22.h, z31.b, z30.b\n"
- "ldp x22, x21, [x20, #0x0]\n"
- "subs x23, x23, #0x1\n"
+ "ldp x21, x20, [x19, #0x0]\n"
+ "subs x22, x22, #0x1\n"
".inst 0x459741ef // saddwb z15.s, z15.s, z23.h\n"
".inst 0x459745ce // saddwt z14.s, z14.s, z23.h\n"
- "add x20, x20, #0x10\n"
- "ld1b { z31.b }, p4/Z, [x22, x27]\n"
+ "add x19, x19, #0x10\n"
+ "ld1b { z31.b }, p4/Z, [x21, x26]\n"
".inst 0x459641ad // saddwb z13.s, z13.s, z22.h\n"
".inst 0x4596458c // saddwt z12.s, z12.s, z22.h\n"
- "ld1b { z30.b }, p4/Z, [x21, x27]\n"
+ "ld1b { z30.b }, p4/Z, [x20, x26]\n"
"bgt 9b\n"
"10:" // Single vector of channels: Loop: 2 inputs tail
".inst 0x455e03f7 // saddlb z23.h, z31.b, z30.b\n"
@@ -399,14 +398,14 @@ void sme_s8q_nhwc_avg_generic_depthfirst_impl(
".inst 0x459641ad // saddwb z13.s, z13.s, z22.h\n"
".inst 0x4596458c // saddwt z12.s, z12.s, z22.h\n"
"11:" // Single vector of channels: Loop: After loop
- "ands x21, %x[n_valid_cells], #0x1\n"
+ "ands x20, %x[n_valid_cells], #0x1\n"
"beq 13f\n"
"12:" // Single vector of channels: Loop: Single input loop
- "ldr x22, [x20], #0x8\n"
- "ld1b { z31.b }, p4/Z, [x22, x27]\n"
+ "ldr x21, [x19], #0x8\n"
+ "ld1b { z31.b }, p4/Z, [x21, x26]\n"
".inst 0x4508a3f7 // sshllb z23.h, z31.b, #0x0\n"
".inst 0x4508a7f6 // sshllt z22.h, z31.b, #0x0\n"
- "subs x21, x21, #0x1\n"
+ "subs x20, x20, #0x1\n"
".inst 0x459741ef // saddwb z15.s, z15.s, z23.h\n"
".inst 0x459745ce // saddwt z14.s, z14.s, z23.h\n"
".inst 0x459641ad // saddwb z13.s, z13.s, z22.h\n"
@@ -441,19 +440,19 @@ void sme_s8q_nhwc_avg_generic_depthfirst_impl(
"smin z12.s, p0/M, z12.s, z19.s\n"
"trn1 z16.h, z13.h, z12.h\n"
"trn1 z16.b, z23.b, z16.b\n"
- "st1b { z16.b }, p4, [%x[outptr], x27]\n"
- "incb x27\n"
- "whilelt p4.b, x27, %x[n_channels]\n"
+ "st1b { z16.b }, p4, [%x[outptr], x26]\n"
+ "incb x26\n"
+ "whilelt p4.b, x26, %x[n_channels]\n"
"b.any 8b\n"
"14:" // End
".inst 0xd503467f // SMSTOP\n"
:
: [combined_rescale_value] "r" (&combined_rescale_value), [inptrs] "r" (inptrs), [left_shift] "r" (&left_shift), [n_channels] "r" (n_channels), [n_valid_cells] "r" (n_valid_cells), [outptr] "r" (outptr), [right_shift] "r" (&right_shift)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
} // namespace pooling
} // namespace arm_conv
-#endif // defined(ARM_COMPUTE_ENABLE_SVE)
+#endif // defined(ARM_COMPUTE_ENABLE_SME)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_s8q_nhwc_max_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_s8q_nhwc_max_generic_depthfirst/generic.cpp
index 3850ebf464..3d13991b43 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_s8q_nhwc_max_generic_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_s8q_nhwc_max_generic_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -42,82 +42,82 @@ void sme_s8q_nhwc_max_generic_depthfirst_impl(
{
__asm__ __volatile__(
".inst 0xd503477f // SMSTART ZA\n"
- "mov x9, #0x0\n"
- "cntb x28\n"
- "cntb x27, ALL, MUL #2\n"
- "cntb x26, ALL, MUL #3\n"
- "whilelt p4.b, x9, %x[n_channels]\n"
- "whilelt p3.b, x28, %x[n_channels]\n"
- "whilelt p2.b, x27, %x[n_channels]\n"
- "whilelt p1.b, x26, %x[n_channels]\n"
+ "mov x28, #0x0\n"
+ "cntb x27\n"
+ "cntb x26, ALL, MUL #2\n"
+ "cntb x25, ALL, MUL #3\n"
+ "whilelt p4.b, x28, %x[n_channels]\n"
+ "whilelt p3.b, x27, %x[n_channels]\n"
+ "whilelt p2.b, x26, %x[n_channels]\n"
+ "whilelt p1.b, x25, %x[n_channels]\n"
"ptrue p0.b\n"
"b.none 7f\n"
"1:" // 4-vectors of channels
- "lsr x25, %x[n_valid_cells], #0x2\n"
+ "lsr x24, %x[n_valid_cells], #0x2\n"
"mov z4.b, #0x80\n"
"mov z3.b, #0x80\n"
- "mov x20, %x[inptrs]\n"
+ "mov x19, %x[inptrs]\n"
"mov z2.b, #0x80\n"
"mov z1.b, #0x80\n"
- "cbz x25, 4f\n"
- "ldp x24, x23, [x20, #0x0]\n"
- "subs x25, x25, #0x1\n"
- "ld1b { z0.b }, p4/Z, [x24, x9]\n"
- "ldp x22, x21, [x20, #0x10]\n"
- "add x20, x20, #0x20\n"
- "ld1b { z31.b }, p4/Z, [x23, x9]\n"
- "ld1b { z23.b }, p4/Z, [x22, x9]\n"
- "ld1b { z30.b }, p4/Z, [x21, x9]\n"
- "ld1b { z18.b }, p3/Z, [x24, x28]\n"
- "ld1b { z29.b }, p3/Z, [x23, x28]\n"
- "ld1b { z22.b }, p3/Z, [x22, x28]\n"
- "ld1b { z28.b }, p3/Z, [x21, x28]\n"
- "ld1b { z17.b }, p2/Z, [x24, x27]\n"
- "ld1b { z27.b }, p2/Z, [x23, x27]\n"
- "ld1b { z21.b }, p2/Z, [x22, x27]\n"
- "ld1b { z26.b }, p2/Z, [x21, x27]\n"
- "ld1b { z16.b }, p1/Z, [x24, x26]\n"
- "ld1b { z25.b }, p1/Z, [x23, x26]\n"
- "ld1b { z20.b }, p1/Z, [x22, x26]\n"
- "ld1b { z24.b }, p1/Z, [x21, x26]\n"
+ "cbz x24, 4f\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "subs x24, x24, #0x1\n"
+ "ld1b { z0.b }, p4/Z, [x23, x28]\n"
+ "ldp x21, x20, [x19, #0x10]\n"
+ "add x19, x19, #0x20\n"
+ "ld1b { z31.b }, p4/Z, [x22, x28]\n"
+ "ld1b { z23.b }, p4/Z, [x21, x28]\n"
+ "ld1b { z30.b }, p4/Z, [x20, x28]\n"
+ "ld1b { z18.b }, p3/Z, [x23, x27]\n"
+ "ld1b { z29.b }, p3/Z, [x22, x27]\n"
+ "ld1b { z22.b }, p3/Z, [x21, x27]\n"
+ "ld1b { z28.b }, p3/Z, [x20, x27]\n"
+ "ld1b { z17.b }, p2/Z, [x23, x26]\n"
+ "ld1b { z27.b }, p2/Z, [x22, x26]\n"
+ "ld1b { z21.b }, p2/Z, [x21, x26]\n"
+ "ld1b { z26.b }, p2/Z, [x20, x26]\n"
+ "ld1b { z16.b }, p1/Z, [x23, x25]\n"
+ "ld1b { z25.b }, p1/Z, [x22, x25]\n"
+ "ld1b { z20.b }, p1/Z, [x21, x25]\n"
+ "ld1b { z24.b }, p1/Z, [x20, x25]\n"
"beq 3f\n"
"2:" // 4-vectors of channels: 4 inputs loop
"movprfx z19, z0\n smax z19.b, p0/M, z19.b, z31.b\n"
"smax z23.b, p0/M, z23.b, z30.b\n"
- "ldp x24, x23, [x20, #0x0]\n"
- "subs x25, x25, #0x1\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "subs x24, x24, #0x1\n"
"smax z18.b, p0/M, z18.b, z29.b\n"
"smax z22.b, p0/M, z22.b, z28.b\n"
- "ldp x22, x21, [x20, #0x10]\n"
- "add x20, x20, #0x20\n"
+ "ldp x21, x20, [x19, #0x10]\n"
+ "add x19, x19, #0x20\n"
"smax z17.b, p0/M, z17.b, z27.b\n"
"smax z21.b, p0/M, z21.b, z26.b\n"
- "ld1b { z0.b }, p4/Z, [x24, x9]\n"
+ "ld1b { z0.b }, p4/Z, [x23, x28]\n"
"smax z16.b, p0/M, z16.b, z25.b\n"
"smax z20.b, p0/M, z20.b, z24.b\n"
- "ld1b { z31.b }, p4/Z, [x23, x9]\n"
+ "ld1b { z31.b }, p4/Z, [x22, x28]\n"
"smax z19.b, p0/M, z19.b, z23.b\n"
"smax z18.b, p0/M, z18.b, z22.b\n"
- "ld1b { z23.b }, p4/Z, [x22, x9]\n"
+ "ld1b { z23.b }, p4/Z, [x21, x28]\n"
"smax z17.b, p0/M, z17.b, z21.b\n"
"smax z16.b, p0/M, z16.b, z20.b\n"
- "ld1b { z30.b }, p4/Z, [x21, x9]\n"
+ "ld1b { z30.b }, p4/Z, [x20, x28]\n"
"smax z4.b, p0/M, z4.b, z19.b\n"
"smax z3.b, p0/M, z3.b, z18.b\n"
- "ld1b { z18.b }, p3/Z, [x24, x28]\n"
+ "ld1b { z18.b }, p3/Z, [x23, x27]\n"
"smax z2.b, p0/M, z2.b, z17.b\n"
"smax z1.b, p0/M, z1.b, z16.b\n"
- "ld1b { z29.b }, p3/Z, [x23, x28]\n"
- "ld1b { z22.b }, p3/Z, [x22, x28]\n"
- "ld1b { z28.b }, p3/Z, [x21, x28]\n"
- "ld1b { z17.b }, p2/Z, [x24, x27]\n"
- "ld1b { z27.b }, p2/Z, [x23, x27]\n"
- "ld1b { z21.b }, p2/Z, [x22, x27]\n"
- "ld1b { z26.b }, p2/Z, [x21, x27]\n"
- "ld1b { z16.b }, p1/Z, [x24, x26]\n"
- "ld1b { z25.b }, p1/Z, [x23, x26]\n"
- "ld1b { z20.b }, p1/Z, [x22, x26]\n"
- "ld1b { z24.b }, p1/Z, [x21, x26]\n"
+ "ld1b { z29.b }, p3/Z, [x22, x27]\n"
+ "ld1b { z22.b }, p3/Z, [x21, x27]\n"
+ "ld1b { z28.b }, p3/Z, [x20, x27]\n"
+ "ld1b { z17.b }, p2/Z, [x23, x26]\n"
+ "ld1b { z27.b }, p2/Z, [x22, x26]\n"
+ "ld1b { z21.b }, p2/Z, [x21, x26]\n"
+ "ld1b { z26.b }, p2/Z, [x20, x26]\n"
+ "ld1b { z16.b }, p1/Z, [x23, x25]\n"
+ "ld1b { z25.b }, p1/Z, [x22, x25]\n"
+ "ld1b { z20.b }, p1/Z, [x21, x25]\n"
+ "ld1b { z24.b }, p1/Z, [x20, x25]\n"
"bgt 2b\n"
"3:" // 4-vectors of channels: 4 inputs tail
"movprfx z19, z0\n smax z19.b, p0/M, z19.b, z31.b\n"
@@ -137,33 +137,33 @@ void sme_s8q_nhwc_max_generic_depthfirst_impl(
"smax z2.b, p0/M, z2.b, z17.b\n"
"smax z1.b, p0/M, z1.b, z16.b\n"
"4:" // 4-vectors of channels: After loop
- "ands x21, %x[n_valid_cells], #0x3\n"
+ "ands x20, %x[n_valid_cells], #0x3\n"
"beq 6f\n"
"5:" // 4-vectors of channels: Single input loop
- "ldr x24, [x20], #0x8\n"
- "ld1b { z0.b }, p4/Z, [x24, x9]\n"
- "subs x21, x21, #0x1\n"
+ "ldr x23, [x19], #0x8\n"
+ "ld1b { z0.b }, p4/Z, [x23, x28]\n"
+ "subs x20, x20, #0x1\n"
"smax z4.b, p0/M, z4.b, z0.b\n"
- "ld1b { z18.b }, p3/Z, [x24, x28]\n"
+ "ld1b { z18.b }, p3/Z, [x23, x27]\n"
"smax z3.b, p0/M, z3.b, z18.b\n"
- "ld1b { z17.b }, p2/Z, [x24, x27]\n"
+ "ld1b { z17.b }, p2/Z, [x23, x26]\n"
"smax z2.b, p0/M, z2.b, z17.b\n"
- "ld1b { z16.b }, p1/Z, [x24, x26]\n"
+ "ld1b { z16.b }, p1/Z, [x23, x25]\n"
"smax z1.b, p0/M, z1.b, z16.b\n"
"bgt 5b\n"
"6:" // 4-vectors of channels: Single input loop: End
".inst 0x4508a097 // sshllb z23.h, z4.b, #0x0\n"
".inst 0x4508a496 // sshllt z22.h, z4.b, #0x0\n"
- "add x20, %x[quant_params], %[offsetof_qp_per_layer_left_shift]\n"
- "ld1rw { z4.s }, p0/Z, [x20]\n"
+ "add x19, %x[quant_params], %[offsetof_qp_per_layer_left_shift]\n"
+ "ld1rw { z4.s }, p0/Z, [x19]\n"
".inst 0x4508a075 // sshllb z21.h, z3.b, #0x0\n"
".inst 0x4508a472 // sshllt z18.h, z3.b, #0x0\n"
- "add x20, %x[quant_params], %[offsetof_qp_per_layer_mul]\n"
- "ld1rw { z3.s }, p0/Z, [x20]\n"
+ "add x19, %x[quant_params], %[offsetof_qp_per_layer_mul]\n"
+ "ld1rw { z3.s }, p0/Z, [x19]\n"
".inst 0x4508a054 // sshllb z20.h, z2.b, #0x0\n"
".inst 0x4508a451 // sshllt z17.h, z2.b, #0x0\n"
- "add x20, %x[quant_params], %[offsetof_qp_per_layer_right_shift]\n"
- "ld1rw { z2.s }, p0/Z, [x20]\n"
+ "add x19, %x[quant_params], %[offsetof_qp_per_layer_right_shift]\n"
+ "ld1rw { z2.s }, p0/Z, [x19]\n"
".inst 0x4508a033 // sshllb z19.h, z1.b, #0x0\n"
".inst 0x4508a430 // sshllt z16.h, z1.b, #0x0\n"
".inst 0x4510a2e1 // sshllb z1.s, z23.h, #0x0\n"
@@ -274,48 +274,48 @@ void sme_s8q_nhwc_max_generic_depthfirst_impl(
"trn1 z19.h, z25.h, z24.h\n"
"trn1 z16.b, z23.b, z16.b\n"
"trn1 z18.b, z22.b, z18.b\n"
- "st1b { z16.b }, p4, [%x[outptr], x9]\n"
- "incb x9, ALL, MUL #4\n"
+ "st1b { z16.b }, p4, [%x[outptr], x28]\n"
+ "incb x28, ALL, MUL #4\n"
"trn1 z17.b, z21.b, z17.b\n"
"trn1 z16.b, z20.b, z19.b\n"
- "st1b { z18.b }, p3, [%x[outptr], x28]\n"
- "incb x28, ALL, MUL #4\n"
- "st1b { z17.b }, p2, [%x[outptr], x27]\n"
+ "st1b { z18.b }, p3, [%x[outptr], x27]\n"
"incb x27, ALL, MUL #4\n"
- "st1b { z16.b }, p1, [%x[outptr], x26]\n"
+ "st1b { z17.b }, p2, [%x[outptr], x26]\n"
"incb x26, ALL, MUL #4\n"
- "whilelt p1.b, x26, %x[n_channels]\n"
+ "st1b { z16.b }, p1, [%x[outptr], x25]\n"
+ "incb x25, ALL, MUL #4\n"
+ "whilelt p1.b, x25, %x[n_channels]\n"
"b.any 1b\n"
"7:" // Single vector of channels
- "whilelt p4.b, x9, %x[n_channels]\n"
+ "whilelt p4.b, x28, %x[n_channels]\n"
"b.none 14f\n"
"8:" // Single vector of channels: Loop
- "lsr x25, %x[n_valid_cells], #0x2\n"
+ "lsr x24, %x[n_valid_cells], #0x2\n"
"mov z4.b, #0x80\n"
- "mov x20, %x[inptrs]\n"
- "cbz x25, 11f\n"
- "ldp x24, x23, [x20, #0x0]\n"
- "subs x25, x25, #0x1\n"
- "ld1b { z0.b }, p4/Z, [x24, x9]\n"
- "ldp x22, x21, [x20, #0x10]\n"
- "add x20, x20, #0x20\n"
- "ld1b { z31.b }, p4/Z, [x23, x9]\n"
- "ld1b { z23.b }, p4/Z, [x22, x9]\n"
- "ld1b { z30.b }, p4/Z, [x21, x9]\n"
+ "mov x19, %x[inptrs]\n"
+ "cbz x24, 11f\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "subs x24, x24, #0x1\n"
+ "ld1b { z0.b }, p4/Z, [x23, x28]\n"
+ "ldp x21, x20, [x19, #0x10]\n"
+ "add x19, x19, #0x20\n"
+ "ld1b { z31.b }, p4/Z, [x22, x28]\n"
+ "ld1b { z23.b }, p4/Z, [x21, x28]\n"
+ "ld1b { z30.b }, p4/Z, [x20, x28]\n"
"beq 10f\n"
"9:" // Single vector of channels: Loop: 4 inputs loop
"movprfx z19, z0\n smax z19.b, p0/M, z19.b, z31.b\n"
"smax z23.b, p0/M, z23.b, z30.b\n"
- "ldp x24, x23, [x20, #0x0]\n"
- "subs x25, x25, #0x1\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "subs x24, x24, #0x1\n"
"smax z19.b, p0/M, z19.b, z23.b\n"
- "ldp x22, x21, [x20, #0x10]\n"
+ "ldp x21, x20, [x19, #0x10]\n"
"smax z4.b, p0/M, z4.b, z19.b\n"
- "add x20, x20, #0x20\n"
- "ld1b { z0.b }, p4/Z, [x24, x9]\n"
- "ld1b { z31.b }, p4/Z, [x23, x9]\n"
- "ld1b { z23.b }, p4/Z, [x22, x9]\n"
- "ld1b { z30.b }, p4/Z, [x21, x9]\n"
+ "add x19, x19, #0x20\n"
+ "ld1b { z0.b }, p4/Z, [x23, x28]\n"
+ "ld1b { z31.b }, p4/Z, [x22, x28]\n"
+ "ld1b { z23.b }, p4/Z, [x21, x28]\n"
+ "ld1b { z30.b }, p4/Z, [x20, x28]\n"
"bgt 9b\n"
"10:" // Single vector of channels: Loop: 4 inputs tail
"movprfx z19, z0\n smax z19.b, p0/M, z19.b, z31.b\n"
@@ -323,27 +323,27 @@ void sme_s8q_nhwc_max_generic_depthfirst_impl(
"smax z19.b, p0/M, z19.b, z23.b\n"
"smax z4.b, p0/M, z4.b, z19.b\n"
"11:" // Single vector of channels: Loop: After loop
- "ands x21, %x[n_valid_cells], #0x3\n"
+ "ands x20, %x[n_valid_cells], #0x3\n"
"beq 13f\n"
"12:" // Single vector of channels: Loop: Single input loop
- "ldr x24, [x20], #0x8\n"
- "ld1b { z0.b }, p4/Z, [x24, x9]\n"
- "subs x21, x21, #0x1\n"
+ "ldr x23, [x19], #0x8\n"
+ "ld1b { z0.b }, p4/Z, [x23, x28]\n"
+ "subs x20, x20, #0x1\n"
"smax z4.b, p0/M, z4.b, z0.b\n"
"bgt 12b\n"
"13:" // Single vector of channels: Loop: Single input loop: End
".inst 0x4508a097 // sshllb z23.h, z4.b, #0x0\n"
".inst 0x4508a496 // sshllt z22.h, z4.b, #0x0\n"
- "add x20, %x[quant_params], %[offsetof_qp_per_layer_left_shift]\n"
- "ld1rw { z4.s }, p0/Z, [x20]\n"
+ "add x19, %x[quant_params], %[offsetof_qp_per_layer_left_shift]\n"
+ "ld1rw { z4.s }, p0/Z, [x19]\n"
".inst 0x4510a2e1 // sshllb z1.s, z23.h, #0x0\n"
".inst 0x4510a6f7 // sshllt z23.s, z23.h, #0x0\n"
- "add x20, %x[quant_params], %[offsetof_qp_per_layer_mul]\n"
- "ld1rw { z3.s }, p0/Z, [x20]\n"
+ "add x19, %x[quant_params], %[offsetof_qp_per_layer_mul]\n"
+ "ld1rw { z3.s }, p0/Z, [x19]\n"
".inst 0x4510a2c0 // sshllb z0.s, z22.h, #0x0\n"
".inst 0x4510a6df // sshllt z31.s, z22.h, #0x0\n"
- "add x20, %x[quant_params], %[offsetof_qp_per_layer_right_shift]\n"
- "ld1rw { z2.s }, p0/Z, [x20]\n"
+ "add x19, %x[quant_params], %[offsetof_qp_per_layer_right_shift]\n"
+ "ld1rw { z2.s }, p0/Z, [x19]\n"
".inst 0x44828081 // srshl z1.s, p0/M, z1.s, z4.s\n"
".inst 0x44828097 // srshl z23.s, p0/M, z23.s, z4.s\n"
".inst 0x44828080 // srshl z0.s, p0/M, z0.s, z4.s\n"
@@ -369,15 +369,15 @@ void sme_s8q_nhwc_max_generic_depthfirst_impl(
"smin z31.s, p0/M, z31.s, z19.s\n"
"trn1 z16.h, z0.h, z31.h\n"
"trn1 z16.b, z23.b, z16.b\n"
- "st1b { z16.b }, p4, [%x[outptr], x9]\n"
- "incb x9\n"
- "whilelt p4.b, x9, %x[n_channels]\n"
+ "st1b { z16.b }, p4, [%x[outptr], x28]\n"
+ "incb x28\n"
+ "whilelt p4.b, x28, %x[n_channels]\n"
"b.any 8b\n"
"14:" // End
".inst 0xd503467f // SMSTOP\n"
:
: [inptrs] "r" (inptrs), [n_channels] "r" (n_channels), [n_valid_cells] "r" (n_valid_cells), [offsetof_qp_per_layer_left_shift] "I" (offsetof(Requantize32, per_layer_left_shift)), [offsetof_qp_per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [offsetof_qp_per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [outptr] "r" (outptr), [quant_params] "r" (&qp)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_u8_nhwc_avg_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_u8_nhwc_avg_generic_depthfirst/generic.cpp
index a637654908..e529e4c4d0 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_u8_nhwc_avg_generic_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_u8_nhwc_avg_generic_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -84,32 +84,31 @@ void sme_u8_nhwc_avg_generic_depthfirst_impl(
f_rescale_value *= 2.0f;
}
- int64_t long_rescale_value = round(f_rescale_value * static_cast<float>(1ll << 31));
- if (long_rescale_value == (1ll << 31))
+ rescale_value = static_cast<int32_t>(round(f_rescale_value * static_cast<float>(1ll << 31)));
+ if (static_cast<int64_t>(rescale_value) == (1ll << 31))
{
shift_value++;
- long_rescale_value >>= 1;
+ rescale_value >>= 1;
}
- rescale_value = static_cast<int32_t>(long_rescale_value);
}
__asm__ __volatile__(
".inst 0xd503477f // SMSTART ZA\n"
- "mov x27, #0x0\n"
- "cntb x26\n"
- "cntb x25, ALL, MUL #2\n"
- "cntb x24, ALL, MUL #3\n"
- "whilelt p4.b, x27, %x[n_channels]\n"
- "whilelt p3.b, x26, %x[n_channels]\n"
- "whilelt p2.b, x25, %x[n_channels]\n"
- "whilelt p1.b, x24, %x[n_channels]\n"
+ "mov x26, #0x0\n"
+ "cntb x25\n"
+ "cntb x24, ALL, MUL #2\n"
+ "cntb x23, ALL, MUL #3\n"
+ "whilelt p4.b, x26, %x[n_channels]\n"
+ "whilelt p3.b, x25, %x[n_channels]\n"
+ "whilelt p2.b, x24, %x[n_channels]\n"
+ "whilelt p1.b, x23, %x[n_channels]\n"
"ptrue p0.b\n"
"b.none 7f\n"
"1:" // 4-vectors of channels
- "lsr x23, %x[n_valid_cells], #0x1\n"
+ "lsr x22, %x[n_valid_cells], #0x1\n"
"mov z15.s, #0x0\n"
"mov z14.s, #0x0\n"
- "mov x20, %x[inptrs]\n"
+ "mov x19, %x[inptrs]\n"
"mov z13.s, #0x0\n"
"mov z12.s, #0x0\n"
"mov z11.s, #0x0\n"
@@ -124,49 +123,49 @@ void sme_u8_nhwc_avg_generic_depthfirst_impl(
"mov z2.s, #0x0\n"
"mov z1.s, #0x0\n"
"mov z0.s, #0x0\n"
- "cbz x23, 4f\n"
- "ldp x22, x21, [x20, #0x0]\n"
- "subs x23, x23, #0x1\n"
- "add x20, x20, #0x10\n"
- "ld1b { z31.b }, p4/Z, [x22, x27]\n"
- "ld1b { z30.b }, p4/Z, [x21, x27]\n"
- "ld1b { z29.b }, p3/Z, [x22, x26]\n"
- "ld1b { z28.b }, p3/Z, [x21, x26]\n"
- "ld1b { z27.b }, p2/Z, [x22, x25]\n"
- "ld1b { z26.b }, p2/Z, [x21, x25]\n"
- "ld1b { z25.b }, p1/Z, [x22, x24]\n"
- "ld1b { z24.b }, p1/Z, [x21, x24]\n"
+ "cbz x22, 4f\n"
+ "ldp x21, x20, [x19, #0x0]\n"
+ "subs x22, x22, #0x1\n"
+ "add x19, x19, #0x10\n"
+ "ld1b { z31.b }, p4/Z, [x21, x26]\n"
+ "ld1b { z30.b }, p4/Z, [x20, x26]\n"
+ "ld1b { z29.b }, p3/Z, [x21, x25]\n"
+ "ld1b { z28.b }, p3/Z, [x20, x25]\n"
+ "ld1b { z27.b }, p2/Z, [x21, x24]\n"
+ "ld1b { z26.b }, p2/Z, [x20, x24]\n"
+ "ld1b { z25.b }, p1/Z, [x21, x23]\n"
+ "ld1b { z24.b }, p1/Z, [x20, x23]\n"
"beq 3f\n"
"2:" // 4-vectors of channels: 2 inputs loop
".inst 0x455e0bf7 // uaddlb z23.h, z31.b, z30.b\n"
".inst 0x455e0ff6 // uaddlt z22.h, z31.b, z30.b\n"
- "ldp x22, x21, [x20, #0x0]\n"
- "subs x23, x23, #0x1\n"
+ "ldp x21, x20, [x19, #0x0]\n"
+ "subs x22, x22, #0x1\n"
".inst 0x455c0bb5 // uaddlb z21.h, z29.b, z28.b\n"
".inst 0x455c0fb4 // uaddlt z20.h, z29.b, z28.b\n"
- "add x20, x20, #0x10\n"
- "ld1b { z31.b }, p4/Z, [x22, x27]\n"
+ "add x19, x19, #0x10\n"
+ "ld1b { z31.b }, p4/Z, [x21, x26]\n"
".inst 0x455a0b73 // uaddlb z19.h, z27.b, z26.b\n"
".inst 0x455a0f72 // uaddlt z18.h, z27.b, z26.b\n"
- "ld1b { z30.b }, p4/Z, [x21, x27]\n"
+ "ld1b { z30.b }, p4/Z, [x20, x26]\n"
".inst 0x45580b31 // uaddlb z17.h, z25.b, z24.b\n"
".inst 0x45580f30 // uaddlt z16.h, z25.b, z24.b\n"
- "ld1b { z29.b }, p3/Z, [x22, x26]\n"
+ "ld1b { z29.b }, p3/Z, [x21, x25]\n"
".inst 0x459749ef // uaddwb z15.s, z15.s, z23.h\n"
".inst 0x45974dce // uaddwt z14.s, z14.s, z23.h\n"
- "ld1b { z28.b }, p3/Z, [x21, x26]\n"
+ "ld1b { z28.b }, p3/Z, [x20, x25]\n"
".inst 0x459649ad // uaddwb z13.s, z13.s, z22.h\n"
".inst 0x45964d8c // uaddwt z12.s, z12.s, z22.h\n"
- "ld1b { z27.b }, p2/Z, [x22, x25]\n"
+ "ld1b { z27.b }, p2/Z, [x21, x24]\n"
".inst 0x4595496b // uaddwb z11.s, z11.s, z21.h\n"
".inst 0x45954d4a // uaddwt z10.s, z10.s, z21.h\n"
- "ld1b { z26.b }, p2/Z, [x21, x25]\n"
+ "ld1b { z26.b }, p2/Z, [x20, x24]\n"
".inst 0x45944929 // uaddwb z9.s, z9.s, z20.h\n"
".inst 0x45944d08 // uaddwt z8.s, z8.s, z20.h\n"
- "ld1b { z25.b }, p1/Z, [x22, x24]\n"
+ "ld1b { z25.b }, p1/Z, [x21, x23]\n"
".inst 0x459348e7 // uaddwb z7.s, z7.s, z19.h\n"
".inst 0x45934cc6 // uaddwt z6.s, z6.s, z19.h\n"
- "ld1b { z24.b }, p1/Z, [x21, x24]\n"
+ "ld1b { z24.b }, p1/Z, [x20, x23]\n"
".inst 0x459248a5 // uaddwb z5.s, z5.s, z18.h\n"
".inst 0x45924c84 // uaddwt z4.s, z4.s, z18.h\n"
".inst 0x45914863 // uaddwb z3.s, z3.s, z17.h\n"
@@ -200,21 +199,21 @@ void sme_u8_nhwc_avg_generic_depthfirst_impl(
".inst 0x45904821 // uaddwb z1.s, z1.s, z16.h\n"
".inst 0x45904c00 // uaddwt z0.s, z0.s, z16.h\n"
"4:" // 4-vectors of channels: After loop
- "ands x21, %x[n_valid_cells], #0x1\n"
+ "ands x20, %x[n_valid_cells], #0x1\n"
"beq 6f\n"
"5:" // 4-vectors of channels: Single input loop
- "ldr x22, [x20], #0x8\n"
- "ld1b { z31.b }, p4/Z, [x22, x27]\n"
+ "ldr x21, [x19], #0x8\n"
+ "ld1b { z31.b }, p4/Z, [x21, x26]\n"
".inst 0x4508abf7 // ushllb z23.h, z31.b, #0x0\n"
".inst 0x4508aff6 // ushllt z22.h, z31.b, #0x0\n"
- "ld1b { z29.b }, p3/Z, [x22, x26]\n"
+ "ld1b { z29.b }, p3/Z, [x21, x25]\n"
".inst 0x4508abb5 // ushllb z21.h, z29.b, #0x0\n"
".inst 0x4508afb4 // ushllt z20.h, z29.b, #0x0\n"
- "subs x21, x21, #0x1\n"
- "ld1b { z27.b }, p2/Z, [x22, x25]\n"
+ "subs x20, x20, #0x1\n"
+ "ld1b { z27.b }, p2/Z, [x21, x24]\n"
".inst 0x4508ab73 // ushllb z19.h, z27.b, #0x0\n"
".inst 0x4508af72 // ushllt z18.h, z27.b, #0x0\n"
- "ld1b { z25.b }, p1/Z, [x22, x24]\n"
+ "ld1b { z25.b }, p1/Z, [x21, x23]\n"
".inst 0x4508ab31 // ushllb z17.h, z25.b, #0x0\n"
".inst 0x4508af30 // ushllt z16.h, z25.b, #0x0\n"
".inst 0x459749ef // uaddwb z15.s, z15.s, z23.h\n"
@@ -313,47 +312,47 @@ void sme_u8_nhwc_avg_generic_depthfirst_impl(
"trn1 z19.h, z1.h, z0.h\n"
"trn1 z16.b, z23.b, z16.b\n"
"trn1 z18.b, z22.b, z18.b\n"
- "st1b { z16.b }, p4, [%x[outptr], x27]\n"
- "incb x27, ALL, MUL #4\n"
+ "st1b { z16.b }, p4, [%x[outptr], x26]\n"
+ "incb x26, ALL, MUL #4\n"
"trn1 z17.b, z21.b, z17.b\n"
"trn1 z16.b, z20.b, z19.b\n"
- "st1b { z18.b }, p3, [%x[outptr], x26]\n"
- "incb x26, ALL, MUL #4\n"
- "st1b { z17.b }, p2, [%x[outptr], x25]\n"
+ "st1b { z18.b }, p3, [%x[outptr], x25]\n"
"incb x25, ALL, MUL #4\n"
- "st1b { z16.b }, p1, [%x[outptr], x24]\n"
+ "st1b { z17.b }, p2, [%x[outptr], x24]\n"
"incb x24, ALL, MUL #4\n"
- "whilelt p1.b, x24, %x[n_channels]\n"
+ "st1b { z16.b }, p1, [%x[outptr], x23]\n"
+ "incb x23, ALL, MUL #4\n"
+ "whilelt p1.b, x23, %x[n_channels]\n"
"b.any 1b\n"
"7:" // Single vector of channels
- "whilelt p4.b, x27, %x[n_channels]\n"
+ "whilelt p4.b, x26, %x[n_channels]\n"
"b.none 14f\n"
"8:" // Single vector of channels: Loop
- "lsr x23, %x[n_valid_cells], #0x1\n"
+ "lsr x22, %x[n_valid_cells], #0x1\n"
"mov z15.s, #0x0\n"
"mov z14.s, #0x0\n"
- "mov x20, %x[inptrs]\n"
+ "mov x19, %x[inptrs]\n"
"mov z13.s, #0x0\n"
"mov z12.s, #0x0\n"
- "cbz x23, 11f\n"
- "ldp x22, x21, [x20, #0x0]\n"
- "subs x23, x23, #0x1\n"
- "add x20, x20, #0x10\n"
- "ld1b { z31.b }, p4/Z, [x22, x27]\n"
- "ld1b { z30.b }, p4/Z, [x21, x27]\n"
+ "cbz x22, 11f\n"
+ "ldp x21, x20, [x19, #0x0]\n"
+ "subs x22, x22, #0x1\n"
+ "add x19, x19, #0x10\n"
+ "ld1b { z31.b }, p4/Z, [x21, x26]\n"
+ "ld1b { z30.b }, p4/Z, [x20, x26]\n"
"beq 10f\n"
"9:" // Single vector of channels: Loop: 2 inputs loop
".inst 0x455e0bf7 // uaddlb z23.h, z31.b, z30.b\n"
".inst 0x455e0ff6 // uaddlt z22.h, z31.b, z30.b\n"
- "ldp x22, x21, [x20, #0x0]\n"
- "subs x23, x23, #0x1\n"
+ "ldp x21, x20, [x19, #0x0]\n"
+ "subs x22, x22, #0x1\n"
".inst 0x459749ef // uaddwb z15.s, z15.s, z23.h\n"
".inst 0x45974dce // uaddwt z14.s, z14.s, z23.h\n"
- "add x20, x20, #0x10\n"
- "ld1b { z31.b }, p4/Z, [x22, x27]\n"
+ "add x19, x19, #0x10\n"
+ "ld1b { z31.b }, p4/Z, [x21, x26]\n"
".inst 0x459649ad // uaddwb z13.s, z13.s, z22.h\n"
".inst 0x45964d8c // uaddwt z12.s, z12.s, z22.h\n"
- "ld1b { z30.b }, p4/Z, [x21, x27]\n"
+ "ld1b { z30.b }, p4/Z, [x20, x26]\n"
"bgt 9b\n"
"10:" // Single vector of channels: Loop: 2 inputs tail
".inst 0x455e0bf7 // uaddlb z23.h, z31.b, z30.b\n"
@@ -363,14 +362,14 @@ void sme_u8_nhwc_avg_generic_depthfirst_impl(
".inst 0x459649ad // uaddwb z13.s, z13.s, z22.h\n"
".inst 0x45964d8c // uaddwt z12.s, z12.s, z22.h\n"
"11:" // Single vector of channels: Loop: After loop
- "ands x21, %x[n_valid_cells], #0x1\n"
+ "ands x20, %x[n_valid_cells], #0x1\n"
"beq 13f\n"
"12:" // Single vector of channels: Loop: Single input loop
- "ldr x22, [x20], #0x8\n"
- "ld1b { z31.b }, p4/Z, [x22, x27]\n"
+ "ldr x21, [x19], #0x8\n"
+ "ld1b { z31.b }, p4/Z, [x21, x26]\n"
".inst 0x4508abf7 // ushllb z23.h, z31.b, #0x0\n"
".inst 0x4508aff6 // ushllt z22.h, z31.b, #0x0\n"
- "subs x21, x21, #0x1\n"
+ "subs x20, x20, #0x1\n"
".inst 0x459749ef // uaddwb z15.s, z15.s, z23.h\n"
".inst 0x45974dce // uaddwt z14.s, z14.s, z23.h\n"
".inst 0x459649ad // uaddwb z13.s, z13.s, z22.h\n"
@@ -400,15 +399,15 @@ void sme_u8_nhwc_avg_generic_depthfirst_impl(
"smin z12.s, p0/M, z12.s, z19.s\n"
"trn1 z16.h, z13.h, z12.h\n"
"trn1 z16.b, z23.b, z16.b\n"
- "st1b { z16.b }, p4, [%x[outptr], x27]\n"
- "incb x27\n"
- "whilelt p4.b, x27, %x[n_channels]\n"
+ "st1b { z16.b }, p4, [%x[outptr], x26]\n"
+ "incb x26\n"
+ "whilelt p4.b, x26, %x[n_channels]\n"
"b.any 8b\n"
"14:" // End
".inst 0xd503467f // SMSTOP\n"
:
: [inptrs] "r" (inptrs), [n_channels] "r" (n_channels), [n_valid_cells] "r" (n_valid_cells), [outptr] "r" (outptr), [rescale_ptr] "r" (&rescale_value), [shift_ptr] "r" (&shift_value)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_u8_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_u8_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
index 9f267d76ea..d76755ae3a 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_u8_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_u8_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -63,82 +63,82 @@ void sme_u8_nhwc_max_2x2_s1_output2x2_depthfirst_impl(
pad_left, pad_top, pad_right, pad_bottom);
__asm__ __volatile__(
- "ldr x21, [%x[args], %[offsetof_outptrs]]\n"
+ "ldr x20, [%x[args], %[offsetof_outptrs]]\n"
".inst 0xd503477f // SMSTART ZA\n"
- "mov x15, #0x0\n"
- "ptrue p2.b\n"
- "ldr x20, [%x[args], %[offsetof_inptrs]]\n"
"mov x14, #0x0\n"
- "ldr x13, [%x[args], %[offsetof_n_channels]]\n"
- "whilelt p1.b, x15, x13\n"
- "ldp x12, x11, [x21, #0x0]\n"
- "ldp x10, x9, [x21, #0x10]\n"
- "ldp x28, x27, [x20, #0x0]\n"
- "ld1b { z30.b }, p1/Z, [x27, x15]\n"
- "ldp x26, x25, [x20, #0x10]\n"
- "ld1b { z29.b }, p1/Z, [x25, x15]\n"
- "ldp x24, x23, [x20, #0x20]\n"
- "ld1b { z28.b }, p1/Z, [x24, x15]\n"
- "ldp x22, x21, [x20, #0x30]\n"
- "ld1b { z27.b }, p1/Z, [x21, x15]\n"
- "ldr x20, [x20, #0x40]\n"
- "ld1b { z26.b }, p1/Z, [x28, x15]\n"
- "ld1b { z25.b }, p1/Z, [x26, x15]\n"
- "ld1b { z24.b }, p1/Z, [x23, x15]\n"
- "ld1b { z23.b }, p1/Z, [x22, x15]\n"
- "ld1b { z19.b }, p1/Z, [x20, x15]\n"
- "incw x15\n"
- "whilelt p1.b, x15, x13\n"
+ "ptrue p2.b\n"
+ "ldr x19, [%x[args], %[offsetof_inptrs]]\n"
+ "mov x13, #0x0\n"
+ "ldr x12, [%x[args], %[offsetof_n_channels]]\n"
+ "whilelt p1.b, x14, x12\n"
+ "ldp x11, x10, [x20, #0x0]\n"
+ "ldp x9, x28, [x20, #0x10]\n"
+ "ldp x27, x26, [x19, #0x0]\n"
+ "ld1b { z29.b }, p1/Z, [x26, x14]\n"
+ "ldp x25, x24, [x19, #0x10]\n"
+ "ld1b { z28.b }, p1/Z, [x24, x14]\n"
+ "ldp x23, x22, [x19, #0x20]\n"
+ "ld1b { z27.b }, p1/Z, [x23, x14]\n"
+ "ldp x21, x20, [x19, #0x30]\n"
+ "ld1b { z26.b }, p1/Z, [x20, x14]\n"
+ "ldr x19, [x19, #0x40]\n"
+ "ld1b { z20.b }, p1/Z, [x27, x14]\n"
+ "ld1b { z25.b }, p1/Z, [x22, x14]\n"
+ "ld1b { z24.b }, p1/Z, [x25, x14]\n"
+ "ld1b { z23.b }, p1/Z, [x21, x14]\n"
+ "ld1b { z19.b }, p1/Z, [x19, x14]\n"
+ "incw x14\n"
+ "whilelt p1.b, x14, x12\n"
"b.none 2f\n"
"1:" // Vector: Loop
- "movprfx z22, z30\n umax z22.b, p2/M, z22.b, z28.b\n"
- "movprfx z21, z28\n umax z21.b, p2/M, z21.b, z27.b\n"
- "ld1b { z30.b }, p1/Z, [x27, x15]\n"
- "whilelt p0.b, x14, x13\n"
- "movprfx z20, z29\n umax z20.b, p2/M, z20.b, z26.b\n"
- "movprfx z18, z25\n umax z18.b, p2/M, z18.b, z24.b\n"
- "ld1b { z28.b }, p1/Z, [x24, x15]\n"
- "movprfx z17, z29\n umax z17.b, p2/M, z17.b, z23.b\n"
- "movprfx z16, z24\n umax z16.b, p2/M, z16.b, z19.b\n"
- "ld1b { z27.b }, p1/Z, [x21, x15]\n"
- "ld1b { z29.b }, p1/Z, [x25, x15]\n"
- "movprfx z19, z22\n umax z19.b, p2/M, z19.b, z20.b\n"
- "umax z18.b, p2/M, z18.b, z22.b\n"
- "ld1b { z26.b }, p1/Z, [x28, x15]\n"
+ "movprfx z22, z29\n umax z22.b, p2/M, z22.b, z27.b\n"
+ "movprfx z21, z27\n umax z21.b, p2/M, z21.b, z26.b\n"
+ "ld1b { z29.b }, p1/Z, [x26, x14]\n"
+ "whilelt p0.b, x13, x12\n"
+ "movprfx z18, z28\n umax z18.b, p2/M, z18.b, z20.b\n"
+ "movprfx z20, z25\n umax z20.b, p2/M, z20.b, z24.b\n"
+ "ld1b { z27.b }, p1/Z, [x23, x14]\n"
+ "movprfx z17, z23\n umax z17.b, p2/M, z17.b, z28.b\n"
+ "movprfx z16, z25\n umax z16.b, p2/M, z16.b, z19.b\n"
+ "ld1b { z26.b }, p1/Z, [x20, x14]\n"
+ "ld1b { z28.b }, p1/Z, [x24, x14]\n"
+ "movprfx z19, z18\n umax z19.b, p2/M, z19.b, z22.b\n"
+ "movprfx z18, z22\n umax z18.b, p2/M, z18.b, z20.b\n"
+ "ld1b { z20.b }, p1/Z, [x27, x14]\n"
"umax z17.b, p2/M, z17.b, z21.b\n"
"umax z16.b, p2/M, z16.b, z21.b\n"
- "ld1b { z25.b }, p1/Z, [x26, x15]\n"
- "st1b { z19.b }, p0, [x12, x14]\n"
- "ld1b { z24.b }, p1/Z, [x23, x15]\n"
- "st1b { z18.b }, p0, [x11, x14]\n"
- "ld1b { z23.b }, p1/Z, [x22, x15]\n"
- "st1b { z17.b }, p0, [x10, x14]\n"
- "ld1b { z19.b }, p1/Z, [x20, x15]\n"
- "incw x15\n"
- "whilelt p1.b, x15, x13\n"
- "st1b { z16.b }, p0, [x9, x14]\n"
+ "ld1b { z25.b }, p1/Z, [x22, x14]\n"
+ "st1b { z19.b }, p0, [x11, x13]\n"
+ "ld1b { z24.b }, p1/Z, [x25, x14]\n"
+ "st1b { z18.b }, p0, [x10, x13]\n"
+ "ld1b { z23.b }, p1/Z, [x21, x14]\n"
+ "st1b { z17.b }, p0, [x9, x13]\n"
+ "ld1b { z19.b }, p1/Z, [x19, x14]\n"
"incw x14\n"
+ "whilelt p1.b, x14, x12\n"
+ "st1b { z16.b }, p0, [x28, x13]\n"
+ "incw x13\n"
"b.any 1b\n"
"2:" // Vector: Tail
- "movprfx z22, z30\n umax z22.b, p2/M, z22.b, z28.b\n"
- "movprfx z21, z28\n umax z21.b, p2/M, z21.b, z27.b\n"
- "whilelt p0.b, x14, x13\n"
- "movprfx z20, z29\n umax z20.b, p2/M, z20.b, z26.b\n"
- "movprfx z18, z25\n umax z18.b, p2/M, z18.b, z24.b\n"
- "movprfx z17, z29\n umax z17.b, p2/M, z17.b, z23.b\n"
- "movprfx z16, z24\n umax z16.b, p2/M, z16.b, z19.b\n"
- "movprfx z19, z22\n umax z19.b, p2/M, z19.b, z20.b\n"
- "umax z18.b, p2/M, z18.b, z22.b\n"
- "st1b { z19.b }, p0, [x12, x14]\n"
+ "movprfx z22, z29\n umax z22.b, p2/M, z22.b, z27.b\n"
+ "movprfx z21, z27\n umax z21.b, p2/M, z21.b, z26.b\n"
+ "whilelt p0.b, x13, x12\n"
+ "movprfx z18, z28\n umax z18.b, p2/M, z18.b, z20.b\n"
+ "movprfx z20, z25\n umax z20.b, p2/M, z20.b, z24.b\n"
+ "movprfx z17, z23\n umax z17.b, p2/M, z17.b, z28.b\n"
+ "movprfx z16, z25\n umax z16.b, p2/M, z16.b, z19.b\n"
+ "movprfx z19, z18\n umax z19.b, p2/M, z19.b, z22.b\n"
+ "movprfx z18, z22\n umax z18.b, p2/M, z18.b, z20.b\n"
+ "st1b { z19.b }, p0, [x11, x13]\n"
"umax z17.b, p2/M, z17.b, z21.b\n"
"umax z16.b, p2/M, z16.b, z21.b\n"
- "st1b { z18.b }, p0, [x11, x14]\n"
- "st1b { z17.b }, p0, [x10, x14]\n"
- "st1b { z16.b }, p0, [x9, x14]\n"
+ "st1b { z18.b }, p0, [x10, x13]\n"
+ "st1b { z17.b }, p0, [x9, x13]\n"
+ "st1b { z16.b }, p0, [x28, x13]\n"
".inst 0xd503467f // SMSTOP\n"
:
: [args] "r" (&args), [offsetof_inptrs] "I" (offsetof(KernelArgs, inptrs)), [offsetof_n_channels] "I" (offsetof(KernelArgs, n_channels)), [offsetof_outptrs] "I" (offsetof(KernelArgs, outptrs))
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_u8_nhwc_max_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_u8_nhwc_max_generic_depthfirst/generic.cpp
index 9a13deafda..21af2eb5b1 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_u8_nhwc_max_generic_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_u8_nhwc_max_generic_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -40,82 +40,82 @@ void sme_u8_nhwc_max_generic_depthfirst_impl(
{
__asm__ __volatile__(
".inst 0xd503477f // SMSTART ZA\n"
- "mov x9, #0x0\n"
- "cntb x28\n"
- "cntb x27, ALL, MUL #2\n"
- "cntb x26, ALL, MUL #3\n"
- "whilelt p4.b, x9, %x[n_channels]\n"
- "whilelt p3.b, x28, %x[n_channels]\n"
- "whilelt p2.b, x27, %x[n_channels]\n"
- "whilelt p1.b, x26, %x[n_channels]\n"
+ "mov x28, #0x0\n"
+ "cntb x27\n"
+ "cntb x26, ALL, MUL #2\n"
+ "cntb x25, ALL, MUL #3\n"
+ "whilelt p4.b, x28, %x[n_channels]\n"
+ "whilelt p3.b, x27, %x[n_channels]\n"
+ "whilelt p2.b, x26, %x[n_channels]\n"
+ "whilelt p1.b, x25, %x[n_channels]\n"
"ptrue p0.b\n"
"b.none 7f\n"
"1:" // 4-vectors of channels
- "lsr x25, %x[n_valid_cells], #0x2\n"
+ "lsr x24, %x[n_valid_cells], #0x2\n"
"mov z4.b, #0x0\n"
"mov z3.b, #0x0\n"
- "mov x20, %x[inptrs]\n"
+ "mov x19, %x[inptrs]\n"
"mov z2.b, #0x0\n"
"mov z1.b, #0x0\n"
- "cbz x25, 4f\n"
- "ldp x24, x23, [x20, #0x0]\n"
- "subs x25, x25, #0x1\n"
- "ld1b { z0.b }, p4/Z, [x24, x9]\n"
- "ldp x22, x21, [x20, #0x10]\n"
- "add x20, x20, #0x20\n"
- "ld1b { z31.b }, p4/Z, [x23, x9]\n"
- "ld1b { z23.b }, p4/Z, [x22, x9]\n"
- "ld1b { z30.b }, p4/Z, [x21, x9]\n"
- "ld1b { z18.b }, p3/Z, [x24, x28]\n"
- "ld1b { z29.b }, p3/Z, [x23, x28]\n"
- "ld1b { z22.b }, p3/Z, [x22, x28]\n"
- "ld1b { z28.b }, p3/Z, [x21, x28]\n"
- "ld1b { z17.b }, p2/Z, [x24, x27]\n"
- "ld1b { z27.b }, p2/Z, [x23, x27]\n"
- "ld1b { z21.b }, p2/Z, [x22, x27]\n"
- "ld1b { z26.b }, p2/Z, [x21, x27]\n"
- "ld1b { z16.b }, p1/Z, [x24, x26]\n"
- "ld1b { z25.b }, p1/Z, [x23, x26]\n"
- "ld1b { z20.b }, p1/Z, [x22, x26]\n"
- "ld1b { z24.b }, p1/Z, [x21, x26]\n"
+ "cbz x24, 4f\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "subs x24, x24, #0x1\n"
+ "ld1b { z0.b }, p4/Z, [x23, x28]\n"
+ "ldp x21, x20, [x19, #0x10]\n"
+ "add x19, x19, #0x20\n"
+ "ld1b { z31.b }, p4/Z, [x22, x28]\n"
+ "ld1b { z23.b }, p4/Z, [x21, x28]\n"
+ "ld1b { z30.b }, p4/Z, [x20, x28]\n"
+ "ld1b { z18.b }, p3/Z, [x23, x27]\n"
+ "ld1b { z29.b }, p3/Z, [x22, x27]\n"
+ "ld1b { z22.b }, p3/Z, [x21, x27]\n"
+ "ld1b { z28.b }, p3/Z, [x20, x27]\n"
+ "ld1b { z17.b }, p2/Z, [x23, x26]\n"
+ "ld1b { z27.b }, p2/Z, [x22, x26]\n"
+ "ld1b { z21.b }, p2/Z, [x21, x26]\n"
+ "ld1b { z26.b }, p2/Z, [x20, x26]\n"
+ "ld1b { z16.b }, p1/Z, [x23, x25]\n"
+ "ld1b { z25.b }, p1/Z, [x22, x25]\n"
+ "ld1b { z20.b }, p1/Z, [x21, x25]\n"
+ "ld1b { z24.b }, p1/Z, [x20, x25]\n"
"beq 3f\n"
"2:" // 4-vectors of channels: 4 inputs loop
"movprfx z19, z0\n umax z19.b, p0/M, z19.b, z31.b\n"
"umax z23.b, p0/M, z23.b, z30.b\n"
- "ldp x24, x23, [x20, #0x0]\n"
- "subs x25, x25, #0x1\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "subs x24, x24, #0x1\n"
"umax z18.b, p0/M, z18.b, z29.b\n"
"umax z22.b, p0/M, z22.b, z28.b\n"
- "ldp x22, x21, [x20, #0x10]\n"
- "add x20, x20, #0x20\n"
+ "ldp x21, x20, [x19, #0x10]\n"
+ "add x19, x19, #0x20\n"
"umax z17.b, p0/M, z17.b, z27.b\n"
"umax z21.b, p0/M, z21.b, z26.b\n"
- "ld1b { z0.b }, p4/Z, [x24, x9]\n"
+ "ld1b { z0.b }, p4/Z, [x23, x28]\n"
"umax z16.b, p0/M, z16.b, z25.b\n"
"umax z20.b, p0/M, z20.b, z24.b\n"
- "ld1b { z31.b }, p4/Z, [x23, x9]\n"
+ "ld1b { z31.b }, p4/Z, [x22, x28]\n"
"umax z19.b, p0/M, z19.b, z23.b\n"
"umax z18.b, p0/M, z18.b, z22.b\n"
- "ld1b { z23.b }, p4/Z, [x22, x9]\n"
+ "ld1b { z23.b }, p4/Z, [x21, x28]\n"
"umax z17.b, p0/M, z17.b, z21.b\n"
"umax z16.b, p0/M, z16.b, z20.b\n"
- "ld1b { z30.b }, p4/Z, [x21, x9]\n"
+ "ld1b { z30.b }, p4/Z, [x20, x28]\n"
"umax z4.b, p0/M, z4.b, z19.b\n"
"umax z3.b, p0/M, z3.b, z18.b\n"
- "ld1b { z18.b }, p3/Z, [x24, x28]\n"
+ "ld1b { z18.b }, p3/Z, [x23, x27]\n"
"umax z2.b, p0/M, z2.b, z17.b\n"
"umax z1.b, p0/M, z1.b, z16.b\n"
- "ld1b { z29.b }, p3/Z, [x23, x28]\n"
- "ld1b { z22.b }, p3/Z, [x22, x28]\n"
- "ld1b { z28.b }, p3/Z, [x21, x28]\n"
- "ld1b { z17.b }, p2/Z, [x24, x27]\n"
- "ld1b { z27.b }, p2/Z, [x23, x27]\n"
- "ld1b { z21.b }, p2/Z, [x22, x27]\n"
- "ld1b { z26.b }, p2/Z, [x21, x27]\n"
- "ld1b { z16.b }, p1/Z, [x24, x26]\n"
- "ld1b { z25.b }, p1/Z, [x23, x26]\n"
- "ld1b { z20.b }, p1/Z, [x22, x26]\n"
- "ld1b { z24.b }, p1/Z, [x21, x26]\n"
+ "ld1b { z29.b }, p3/Z, [x22, x27]\n"
+ "ld1b { z22.b }, p3/Z, [x21, x27]\n"
+ "ld1b { z28.b }, p3/Z, [x20, x27]\n"
+ "ld1b { z17.b }, p2/Z, [x23, x26]\n"
+ "ld1b { z27.b }, p2/Z, [x22, x26]\n"
+ "ld1b { z21.b }, p2/Z, [x21, x26]\n"
+ "ld1b { z26.b }, p2/Z, [x20, x26]\n"
+ "ld1b { z16.b }, p1/Z, [x23, x25]\n"
+ "ld1b { z25.b }, p1/Z, [x22, x25]\n"
+ "ld1b { z20.b }, p1/Z, [x21, x25]\n"
+ "ld1b { z24.b }, p1/Z, [x20, x25]\n"
"bgt 2b\n"
"3:" // 4-vectors of channels: 4 inputs tail
"movprfx z19, z0\n umax z19.b, p0/M, z19.b, z31.b\n"
@@ -135,61 +135,61 @@ void sme_u8_nhwc_max_generic_depthfirst_impl(
"umax z2.b, p0/M, z2.b, z17.b\n"
"umax z1.b, p0/M, z1.b, z16.b\n"
"4:" // 4-vectors of channels: After loop
- "ands x21, %x[n_valid_cells], #0x3\n"
+ "ands x20, %x[n_valid_cells], #0x3\n"
"beq 6f\n"
"5:" // 4-vectors of channels: Single input loop
- "ldr x24, [x20], #0x8\n"
- "ld1b { z0.b }, p4/Z, [x24, x9]\n"
- "subs x21, x21, #0x1\n"
+ "ldr x23, [x19], #0x8\n"
+ "ld1b { z0.b }, p4/Z, [x23, x28]\n"
+ "subs x20, x20, #0x1\n"
"umax z4.b, p0/M, z4.b, z0.b\n"
- "ld1b { z18.b }, p3/Z, [x24, x28]\n"
+ "ld1b { z18.b }, p3/Z, [x23, x27]\n"
"umax z3.b, p0/M, z3.b, z18.b\n"
- "ld1b { z17.b }, p2/Z, [x24, x27]\n"
+ "ld1b { z17.b }, p2/Z, [x23, x26]\n"
"umax z2.b, p0/M, z2.b, z17.b\n"
- "ld1b { z16.b }, p1/Z, [x24, x26]\n"
+ "ld1b { z16.b }, p1/Z, [x23, x25]\n"
"umax z1.b, p0/M, z1.b, z16.b\n"
"bgt 5b\n"
"6:" // 4-vectors of channels: Single input loop: End
- "st1b { z4.b }, p4, [%x[outptr], x9]\n"
- "incb x9, ALL, MUL #4\n"
- "st1b { z3.b }, p3, [%x[outptr], x28]\n"
+ "st1b { z4.b }, p4, [%x[outptr], x28]\n"
"incb x28, ALL, MUL #4\n"
- "st1b { z2.b }, p2, [%x[outptr], x27]\n"
+ "st1b { z3.b }, p3, [%x[outptr], x27]\n"
"incb x27, ALL, MUL #4\n"
- "st1b { z1.b }, p1, [%x[outptr], x26]\n"
+ "st1b { z2.b }, p2, [%x[outptr], x26]\n"
"incb x26, ALL, MUL #4\n"
- "whilelt p1.b, x26, %x[n_channels]\n"
+ "st1b { z1.b }, p1, [%x[outptr], x25]\n"
+ "incb x25, ALL, MUL #4\n"
+ "whilelt p1.b, x25, %x[n_channels]\n"
"b.any 1b\n"
"7:" // Single vector of channels
- "whilelt p4.b, x9, %x[n_channels]\n"
+ "whilelt p4.b, x28, %x[n_channels]\n"
"b.none 14f\n"
"8:" // Single vector of channels: Loop
- "lsr x25, %x[n_valid_cells], #0x2\n"
+ "lsr x24, %x[n_valid_cells], #0x2\n"
"mov z4.b, #0x0\n"
- "mov x20, %x[inptrs]\n"
- "cbz x25, 11f\n"
- "ldp x24, x23, [x20, #0x0]\n"
- "subs x25, x25, #0x1\n"
- "ld1b { z0.b }, p4/Z, [x24, x9]\n"
- "ldp x22, x21, [x20, #0x10]\n"
- "add x20, x20, #0x20\n"
- "ld1b { z31.b }, p4/Z, [x23, x9]\n"
- "ld1b { z23.b }, p4/Z, [x22, x9]\n"
- "ld1b { z30.b }, p4/Z, [x21, x9]\n"
+ "mov x19, %x[inptrs]\n"
+ "cbz x24, 11f\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "subs x24, x24, #0x1\n"
+ "ld1b { z0.b }, p4/Z, [x23, x28]\n"
+ "ldp x21, x20, [x19, #0x10]\n"
+ "add x19, x19, #0x20\n"
+ "ld1b { z31.b }, p4/Z, [x22, x28]\n"
+ "ld1b { z23.b }, p4/Z, [x21, x28]\n"
+ "ld1b { z30.b }, p4/Z, [x20, x28]\n"
"beq 10f\n"
"9:" // Single vector of channels: Loop: 4 inputs loop
"movprfx z19, z0\n umax z19.b, p0/M, z19.b, z31.b\n"
"umax z23.b, p0/M, z23.b, z30.b\n"
- "ldp x24, x23, [x20, #0x0]\n"
- "subs x25, x25, #0x1\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "subs x24, x24, #0x1\n"
"umax z19.b, p0/M, z19.b, z23.b\n"
- "ldp x22, x21, [x20, #0x10]\n"
+ "ldp x21, x20, [x19, #0x10]\n"
"umax z4.b, p0/M, z4.b, z19.b\n"
- "add x20, x20, #0x20\n"
- "ld1b { z0.b }, p4/Z, [x24, x9]\n"
- "ld1b { z31.b }, p4/Z, [x23, x9]\n"
- "ld1b { z23.b }, p4/Z, [x22, x9]\n"
- "ld1b { z30.b }, p4/Z, [x21, x9]\n"
+ "add x19, x19, #0x20\n"
+ "ld1b { z0.b }, p4/Z, [x23, x28]\n"
+ "ld1b { z31.b }, p4/Z, [x22, x28]\n"
+ "ld1b { z23.b }, p4/Z, [x21, x28]\n"
+ "ld1b { z30.b }, p4/Z, [x20, x28]\n"
"bgt 9b\n"
"10:" // Single vector of channels: Loop: 4 inputs tail
"movprfx z19, z0\n umax z19.b, p0/M, z19.b, z31.b\n"
@@ -197,24 +197,24 @@ void sme_u8_nhwc_max_generic_depthfirst_impl(
"umax z19.b, p0/M, z19.b, z23.b\n"
"umax z4.b, p0/M, z4.b, z19.b\n"
"11:" // Single vector of channels: Loop: After loop
- "ands x21, %x[n_valid_cells], #0x3\n"
+ "ands x20, %x[n_valid_cells], #0x3\n"
"beq 13f\n"
"12:" // Single vector of channels: Loop: Single input loop
- "ldr x24, [x20], #0x8\n"
- "ld1b { z0.b }, p4/Z, [x24, x9]\n"
- "subs x21, x21, #0x1\n"
+ "ldr x23, [x19], #0x8\n"
+ "ld1b { z0.b }, p4/Z, [x23, x28]\n"
+ "subs x20, x20, #0x1\n"
"umax z4.b, p0/M, z4.b, z0.b\n"
"bgt 12b\n"
"13:" // Single vector of channels: Loop: Single input loop: End
- "st1b { z4.b }, p4, [%x[outptr], x9]\n"
- "incb x9\n"
- "whilelt p4.b, x9, %x[n_channels]\n"
+ "st1b { z4.b }, p4, [%x[outptr], x28]\n"
+ "incb x28\n"
+ "whilelt p4.b, x28, %x[n_channels]\n"
"b.any 8b\n"
"14:" // End
".inst 0xd503467f // SMSTOP\n"
:
: [inptrs] "r" (inptrs), [n_channels] "r" (n_channels), [n_valid_cells] "r" (n_valid_cells), [outptr] "r" (outptr)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_u8q_nhwc_avg_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_u8q_nhwc_avg_generic_depthfirst/generic.cpp
index a2fe7a301d..8a3cafa2c1 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_u8q_nhwc_avg_generic_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_u8q_nhwc_avg_generic_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -86,13 +86,12 @@ void sme_u8q_nhwc_avg_generic_depthfirst_impl(
f_rescale_value *= 2.0f;
}
- int64_t long_rescale_value = round(f_rescale_value * static_cast<float>(1ll << 31));
- if (long_rescale_value == (1ll << 31))
+ rescale_value = static_cast<int32_t>(round(f_rescale_value * static_cast<float>(1ll << 31)));
+ if (static_cast<int64_t>(rescale_value) == (1ll << 31))
{
shift_value++;
- long_rescale_value >>= 1;
+ rescale_value >>= 1;
}
- rescale_value = static_cast<int32_t>(long_rescale_value);
}
@@ -119,24 +118,24 @@ void sme_u8q_nhwc_avg_generic_depthfirst_impl(
__asm__ __volatile__(
".inst 0xd503477f // SMSTART ZA\n"
- "mov x27, #0x0\n"
- "cntb x26\n"
- "cntb x25, ALL, MUL #2\n"
- "cntb x24, ALL, MUL #3\n"
- "whilelt p4.b, x27, %x[n_channels]\n"
- "whilelt p3.b, x26, %x[n_channels]\n"
- "whilelt p2.b, x25, %x[n_channels]\n"
- "whilelt p1.b, x24, %x[n_channels]\n"
+ "mov x26, #0x0\n"
+ "cntb x25\n"
+ "cntb x24, ALL, MUL #2\n"
+ "cntb x23, ALL, MUL #3\n"
+ "whilelt p4.b, x26, %x[n_channels]\n"
+ "whilelt p3.b, x25, %x[n_channels]\n"
+ "whilelt p2.b, x24, %x[n_channels]\n"
+ "whilelt p1.b, x23, %x[n_channels]\n"
"ptrue p0.b\n"
"b.none 7f\n"
"1:" // 4-vectors of channels
"ld1rw { z15.s }, p0/Z, [%x[accumulator_init]]\n"
- "lsr x23, %x[n_valid_cells], #0x1\n"
+ "lsr x22, %x[n_valid_cells], #0x1\n"
"mov z14.d, z15.d\n"
"mov z13.d, z15.d\n"
"mov z12.d, z15.d\n"
"mov z11.d, z15.d\n"
- "mov x20, %x[inptrs]\n"
+ "mov x19, %x[inptrs]\n"
"mov z10.d, z15.d\n"
"mov z9.d, z15.d\n"
"mov z8.d, z15.d\n"
@@ -148,49 +147,49 @@ void sme_u8q_nhwc_avg_generic_depthfirst_impl(
"mov z2.d, z15.d\n"
"mov z1.d, z15.d\n"
"mov z0.d, z15.d\n"
- "cbz x23, 4f\n"
- "ldp x22, x21, [x20, #0x0]\n"
- "subs x23, x23, #0x1\n"
- "add x20, x20, #0x10\n"
- "ld1b { z31.b }, p4/Z, [x22, x27]\n"
- "ld1b { z30.b }, p4/Z, [x21, x27]\n"
- "ld1b { z29.b }, p3/Z, [x22, x26]\n"
- "ld1b { z28.b }, p3/Z, [x21, x26]\n"
- "ld1b { z27.b }, p2/Z, [x22, x25]\n"
- "ld1b { z26.b }, p2/Z, [x21, x25]\n"
- "ld1b { z25.b }, p1/Z, [x22, x24]\n"
- "ld1b { z24.b }, p1/Z, [x21, x24]\n"
+ "cbz x22, 4f\n"
+ "ldp x21, x20, [x19, #0x0]\n"
+ "subs x22, x22, #0x1\n"
+ "add x19, x19, #0x10\n"
+ "ld1b { z31.b }, p4/Z, [x21, x26]\n"
+ "ld1b { z30.b }, p4/Z, [x20, x26]\n"
+ "ld1b { z29.b }, p3/Z, [x21, x25]\n"
+ "ld1b { z28.b }, p3/Z, [x20, x25]\n"
+ "ld1b { z27.b }, p2/Z, [x21, x24]\n"
+ "ld1b { z26.b }, p2/Z, [x20, x24]\n"
+ "ld1b { z25.b }, p1/Z, [x21, x23]\n"
+ "ld1b { z24.b }, p1/Z, [x20, x23]\n"
"beq 3f\n"
"2:" // 4-vectors of channels: 2 inputs loop
".inst 0x455e0bf7 // uaddlb z23.h, z31.b, z30.b\n"
".inst 0x455e0ff6 // uaddlt z22.h, z31.b, z30.b\n"
- "ldp x22, x21, [x20, #0x0]\n"
- "subs x23, x23, #0x1\n"
+ "ldp x21, x20, [x19, #0x0]\n"
+ "subs x22, x22, #0x1\n"
".inst 0x455c0bb5 // uaddlb z21.h, z29.b, z28.b\n"
".inst 0x455c0fb4 // uaddlt z20.h, z29.b, z28.b\n"
- "add x20, x20, #0x10\n"
- "ld1b { z31.b }, p4/Z, [x22, x27]\n"
+ "add x19, x19, #0x10\n"
+ "ld1b { z31.b }, p4/Z, [x21, x26]\n"
".inst 0x455a0b73 // uaddlb z19.h, z27.b, z26.b\n"
".inst 0x455a0f72 // uaddlt z18.h, z27.b, z26.b\n"
- "ld1b { z30.b }, p4/Z, [x21, x27]\n"
+ "ld1b { z30.b }, p4/Z, [x20, x26]\n"
".inst 0x45580b31 // uaddlb z17.h, z25.b, z24.b\n"
".inst 0x45580f30 // uaddlt z16.h, z25.b, z24.b\n"
- "ld1b { z29.b }, p3/Z, [x22, x26]\n"
+ "ld1b { z29.b }, p3/Z, [x21, x25]\n"
".inst 0x459749ef // uaddwb z15.s, z15.s, z23.h\n"
".inst 0x45974dce // uaddwt z14.s, z14.s, z23.h\n"
- "ld1b { z28.b }, p3/Z, [x21, x26]\n"
+ "ld1b { z28.b }, p3/Z, [x20, x25]\n"
".inst 0x459649ad // uaddwb z13.s, z13.s, z22.h\n"
".inst 0x45964d8c // uaddwt z12.s, z12.s, z22.h\n"
- "ld1b { z27.b }, p2/Z, [x22, x25]\n"
+ "ld1b { z27.b }, p2/Z, [x21, x24]\n"
".inst 0x4595496b // uaddwb z11.s, z11.s, z21.h\n"
".inst 0x45954d4a // uaddwt z10.s, z10.s, z21.h\n"
- "ld1b { z26.b }, p2/Z, [x21, x25]\n"
+ "ld1b { z26.b }, p2/Z, [x20, x24]\n"
".inst 0x45944929 // uaddwb z9.s, z9.s, z20.h\n"
".inst 0x45944d08 // uaddwt z8.s, z8.s, z20.h\n"
- "ld1b { z25.b }, p1/Z, [x22, x24]\n"
+ "ld1b { z25.b }, p1/Z, [x21, x23]\n"
".inst 0x459348e7 // uaddwb z7.s, z7.s, z19.h\n"
".inst 0x45934cc6 // uaddwt z6.s, z6.s, z19.h\n"
- "ld1b { z24.b }, p1/Z, [x21, x24]\n"
+ "ld1b { z24.b }, p1/Z, [x20, x23]\n"
".inst 0x459248a5 // uaddwb z5.s, z5.s, z18.h\n"
".inst 0x45924c84 // uaddwt z4.s, z4.s, z18.h\n"
".inst 0x45914863 // uaddwb z3.s, z3.s, z17.h\n"
@@ -224,21 +223,21 @@ void sme_u8q_nhwc_avg_generic_depthfirst_impl(
".inst 0x45904821 // uaddwb z1.s, z1.s, z16.h\n"
".inst 0x45904c00 // uaddwt z0.s, z0.s, z16.h\n"
"4:" // 4-vectors of channels: After loop
- "ands x21, %x[n_valid_cells], #0x1\n"
+ "ands x20, %x[n_valid_cells], #0x1\n"
"beq 6f\n"
"5:" // 4-vectors of channels: Single input loop
- "ldr x22, [x20], #0x8\n"
- "ld1b { z31.b }, p4/Z, [x22, x27]\n"
+ "ldr x21, [x19], #0x8\n"
+ "ld1b { z31.b }, p4/Z, [x21, x26]\n"
".inst 0x4508abf7 // ushllb z23.h, z31.b, #0x0\n"
".inst 0x4508aff6 // ushllt z22.h, z31.b, #0x0\n"
- "ld1b { z29.b }, p3/Z, [x22, x26]\n"
+ "ld1b { z29.b }, p3/Z, [x21, x25]\n"
".inst 0x4508abb5 // ushllb z21.h, z29.b, #0x0\n"
".inst 0x4508afb4 // ushllt z20.h, z29.b, #0x0\n"
- "subs x21, x21, #0x1\n"
- "ld1b { z27.b }, p2/Z, [x22, x25]\n"
+ "subs x20, x20, #0x1\n"
+ "ld1b { z27.b }, p2/Z, [x21, x24]\n"
".inst 0x4508ab73 // ushllb z19.h, z27.b, #0x0\n"
".inst 0x4508af72 // ushllt z18.h, z27.b, #0x0\n"
- "ld1b { z25.b }, p1/Z, [x22, x24]\n"
+ "ld1b { z25.b }, p1/Z, [x21, x23]\n"
".inst 0x4508ab31 // ushllb z17.h, z25.b, #0x0\n"
".inst 0x4508af30 // ushllt z16.h, z25.b, #0x0\n"
".inst 0x459749ef // uaddwb z15.s, z15.s, z23.h\n"
@@ -262,7 +261,7 @@ void sme_u8q_nhwc_avg_generic_depthfirst_impl(
"ld1rw { z19.s }, p0/Z, [%x[left_shift]]\n"
".inst 0x4482826f // srshl z15.s, p0/M, z15.s, z19.s\n"
".inst 0x4482826e // srshl z14.s, p0/M, z14.s, z19.s\n"
- "add x20, %x[quant_params], %[offsetof_qp_output_offset]\n"
+ "add x19, %x[quant_params], %[offsetof_qp_output_offset]\n"
".inst 0x4482826d // srshl z13.s, p0/M, z13.s, z19.s\n"
".inst 0x4482826c // srshl z12.s, p0/M, z12.s, z19.s\n"
"ld1rw { z18.s }, p0/Z, [%x[combined_rescale_value]]\n"
@@ -271,7 +270,7 @@ void sme_u8q_nhwc_avg_generic_depthfirst_impl(
"ld1rw { z17.s }, p0/Z, [%x[right_shift]]\n"
".inst 0x44828269 // srshl z9.s, p0/M, z9.s, z19.s\n"
".inst 0x44828268 // srshl z8.s, p0/M, z8.s, z19.s\n"
- "ld1rw { z16.s }, p0/Z, [x20]\n"
+ "ld1rw { z16.s }, p0/Z, [x19]\n"
".inst 0x44828267 // srshl z7.s, p0/M, z7.s, z19.s\n"
".inst 0x44828266 // srshl z6.s, p0/M, z6.s, z19.s\n"
".inst 0x44828265 // srshl z5.s, p0/M, z5.s, z19.s\n"
@@ -372,47 +371,47 @@ void sme_u8q_nhwc_avg_generic_depthfirst_impl(
"trn1 z19.h, z1.h, z0.h\n"
"trn1 z16.b, z23.b, z16.b\n"
"trn1 z18.b, z22.b, z18.b\n"
- "st1b { z16.b }, p4, [%x[outptr], x27]\n"
- "incb x27, ALL, MUL #4\n"
+ "st1b { z16.b }, p4, [%x[outptr], x26]\n"
+ "incb x26, ALL, MUL #4\n"
"trn1 z17.b, z21.b, z17.b\n"
"trn1 z16.b, z20.b, z19.b\n"
- "st1b { z18.b }, p3, [%x[outptr], x26]\n"
- "incb x26, ALL, MUL #4\n"
- "st1b { z17.b }, p2, [%x[outptr], x25]\n"
+ "st1b { z18.b }, p3, [%x[outptr], x25]\n"
"incb x25, ALL, MUL #4\n"
- "st1b { z16.b }, p1, [%x[outptr], x24]\n"
+ "st1b { z17.b }, p2, [%x[outptr], x24]\n"
"incb x24, ALL, MUL #4\n"
- "whilelt p1.b, x24, %x[n_channels]\n"
+ "st1b { z16.b }, p1, [%x[outptr], x23]\n"
+ "incb x23, ALL, MUL #4\n"
+ "whilelt p1.b, x23, %x[n_channels]\n"
"b.any 1b\n"
"7:" // Single vector of channels
- "whilelt p4.b, x27, %x[n_channels]\n"
+ "whilelt p4.b, x26, %x[n_channels]\n"
"b.none 14f\n"
"8:" // Single vector of channels: Loop
"ld1rw { z15.s }, p0/Z, [%x[accumulator_init]]\n"
- "lsr x23, %x[n_valid_cells], #0x1\n"
+ "lsr x22, %x[n_valid_cells], #0x1\n"
"mov z14.d, z15.d\n"
"mov z13.d, z15.d\n"
"mov z12.d, z15.d\n"
- "mov x20, %x[inptrs]\n"
- "cbz x23, 11f\n"
- "ldp x22, x21, [x20, #0x0]\n"
- "subs x23, x23, #0x1\n"
- "add x20, x20, #0x10\n"
- "ld1b { z31.b }, p4/Z, [x22, x27]\n"
- "ld1b { z30.b }, p4/Z, [x21, x27]\n"
+ "mov x19, %x[inptrs]\n"
+ "cbz x22, 11f\n"
+ "ldp x21, x20, [x19, #0x0]\n"
+ "subs x22, x22, #0x1\n"
+ "add x19, x19, #0x10\n"
+ "ld1b { z31.b }, p4/Z, [x21, x26]\n"
+ "ld1b { z30.b }, p4/Z, [x20, x26]\n"
"beq 10f\n"
"9:" // Single vector of channels: Loop: 2 inputs loop
".inst 0x455e0bf7 // uaddlb z23.h, z31.b, z30.b\n"
".inst 0x455e0ff6 // uaddlt z22.h, z31.b, z30.b\n"
- "ldp x22, x21, [x20, #0x0]\n"
- "subs x23, x23, #0x1\n"
+ "ldp x21, x20, [x19, #0x0]\n"
+ "subs x22, x22, #0x1\n"
".inst 0x459749ef // uaddwb z15.s, z15.s, z23.h\n"
".inst 0x45974dce // uaddwt z14.s, z14.s, z23.h\n"
- "add x20, x20, #0x10\n"
- "ld1b { z31.b }, p4/Z, [x22, x27]\n"
+ "add x19, x19, #0x10\n"
+ "ld1b { z31.b }, p4/Z, [x21, x26]\n"
".inst 0x459649ad // uaddwb z13.s, z13.s, z22.h\n"
".inst 0x45964d8c // uaddwt z12.s, z12.s, z22.h\n"
- "ld1b { z30.b }, p4/Z, [x21, x27]\n"
+ "ld1b { z30.b }, p4/Z, [x20, x26]\n"
"bgt 9b\n"
"10:" // Single vector of channels: Loop: 2 inputs tail
".inst 0x455e0bf7 // uaddlb z23.h, z31.b, z30.b\n"
@@ -422,14 +421,14 @@ void sme_u8q_nhwc_avg_generic_depthfirst_impl(
".inst 0x459649ad // uaddwb z13.s, z13.s, z22.h\n"
".inst 0x45964d8c // uaddwt z12.s, z12.s, z22.h\n"
"11:" // Single vector of channels: Loop: After loop
- "ands x21, %x[n_valid_cells], #0x1\n"
+ "ands x20, %x[n_valid_cells], #0x1\n"
"beq 13f\n"
"12:" // Single vector of channels: Loop: Single input loop
- "ldr x22, [x20], #0x8\n"
- "ld1b { z31.b }, p4/Z, [x22, x27]\n"
+ "ldr x21, [x19], #0x8\n"
+ "ld1b { z31.b }, p4/Z, [x21, x26]\n"
".inst 0x4508abf7 // ushllb z23.h, z31.b, #0x0\n"
".inst 0x4508aff6 // ushllt z22.h, z31.b, #0x0\n"
- "subs x21, x21, #0x1\n"
+ "subs x20, x20, #0x1\n"
".inst 0x459749ef // uaddwb z15.s, z15.s, z23.h\n"
".inst 0x45974dce // uaddwt z14.s, z14.s, z23.h\n"
".inst 0x459649ad // uaddwb z13.s, z13.s, z22.h\n"
@@ -439,7 +438,7 @@ void sme_u8q_nhwc_avg_generic_depthfirst_impl(
"ld1rw { z19.s }, p0/Z, [%x[left_shift]]\n"
".inst 0x4482826f // srshl z15.s, p0/M, z15.s, z19.s\n"
".inst 0x4482826e // srshl z14.s, p0/M, z14.s, z19.s\n"
- "add x20, %x[quant_params], %[offsetof_qp_output_offset]\n"
+ "add x19, %x[quant_params], %[offsetof_qp_output_offset]\n"
".inst 0x4482826d // srshl z13.s, p0/M, z13.s, z19.s\n"
".inst 0x4482826c // srshl z12.s, p0/M, z12.s, z19.s\n"
"ld1rw { z18.s }, p0/Z, [%x[combined_rescale_value]]\n"
@@ -448,7 +447,7 @@ void sme_u8q_nhwc_avg_generic_depthfirst_impl(
"ld1rw { z17.s }, p0/Z, [%x[right_shift]]\n"
".inst 0x04b275ad // sqrdmulh z13.s, z13.s, z18.s\n"
".inst 0x04b2758c // sqrdmulh z12.s, z12.s, z18.s\n"
- "ld1rw { z16.s }, p0/Z, [x20]\n"
+ "ld1rw { z16.s }, p0/Z, [x19]\n"
".inst 0x4482822f // srshl z15.s, p0/M, z15.s, z17.s\n"
".inst 0x4482822e // srshl z14.s, p0/M, z14.s, z17.s\n"
".inst 0x4482822d // srshl z13.s, p0/M, z13.s, z17.s\n"
@@ -470,15 +469,15 @@ void sme_u8q_nhwc_avg_generic_depthfirst_impl(
"smin z12.s, p0/M, z12.s, z19.s\n"
"trn1 z16.h, z13.h, z12.h\n"
"trn1 z16.b, z23.b, z16.b\n"
- "st1b { z16.b }, p4, [%x[outptr], x27]\n"
- "incb x27\n"
- "whilelt p4.b, x27, %x[n_channels]\n"
+ "st1b { z16.b }, p4, [%x[outptr], x26]\n"
+ "incb x26\n"
+ "whilelt p4.b, x26, %x[n_channels]\n"
"b.any 8b\n"
"14:" // End
".inst 0xd503467f // SMSTOP\n"
:
: [accumulator_init] "r" (&accumulator_init), [combined_rescale_value] "r" (&combined_rescale_value), [inptrs] "r" (inptrs), [left_shift] "r" (&left_shift), [n_channels] "r" (n_channels), [n_valid_cells] "r" (n_valid_cells), [offsetof_qp_output_offset] "I" (offsetof(Requantize32, output_offset)), [outptr] "r" (outptr), [quant_params] "r" (&qp), [right_shift] "r" (&right_shift)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_u8q_nhwc_max_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_u8q_nhwc_max_generic_depthfirst/generic.cpp
index d050cd014f..7914e357c4 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_u8q_nhwc_max_generic_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_u8q_nhwc_max_generic_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -42,82 +42,82 @@ void sme_u8q_nhwc_max_generic_depthfirst_impl(
{
__asm__ __volatile__(
".inst 0xd503477f // SMSTART ZA\n"
- "mov x9, #0x0\n"
- "cntb x28\n"
- "cntb x27, ALL, MUL #2\n"
- "cntb x26, ALL, MUL #3\n"
- "whilelt p4.b, x9, %x[n_channels]\n"
- "whilelt p3.b, x28, %x[n_channels]\n"
- "whilelt p2.b, x27, %x[n_channels]\n"
- "whilelt p1.b, x26, %x[n_channels]\n"
+ "mov x28, #0x0\n"
+ "cntb x27\n"
+ "cntb x26, ALL, MUL #2\n"
+ "cntb x25, ALL, MUL #3\n"
+ "whilelt p4.b, x28, %x[n_channels]\n"
+ "whilelt p3.b, x27, %x[n_channels]\n"
+ "whilelt p2.b, x26, %x[n_channels]\n"
+ "whilelt p1.b, x25, %x[n_channels]\n"
"ptrue p0.b\n"
"b.none 7f\n"
"1:" // 4-vectors of channels
- "lsr x25, %x[n_valid_cells], #0x2\n"
+ "lsr x24, %x[n_valid_cells], #0x2\n"
"mov z5.b, #0x0\n"
"mov z3.b, #0x0\n"
- "mov x20, %x[inptrs]\n"
+ "mov x19, %x[inptrs]\n"
"mov z2.b, #0x0\n"
"mov z1.b, #0x0\n"
- "cbz x25, 4f\n"
- "ldp x24, x23, [x20, #0x0]\n"
- "subs x25, x25, #0x1\n"
- "ld1b { z0.b }, p4/Z, [x24, x9]\n"
- "ldp x22, x21, [x20, #0x10]\n"
- "add x20, x20, #0x20\n"
- "ld1b { z31.b }, p4/Z, [x23, x9]\n"
- "ld1b { z23.b }, p4/Z, [x22, x9]\n"
- "ld1b { z30.b }, p4/Z, [x21, x9]\n"
- "ld1b { z18.b }, p3/Z, [x24, x28]\n"
- "ld1b { z29.b }, p3/Z, [x23, x28]\n"
- "ld1b { z22.b }, p3/Z, [x22, x28]\n"
- "ld1b { z28.b }, p3/Z, [x21, x28]\n"
- "ld1b { z17.b }, p2/Z, [x24, x27]\n"
- "ld1b { z27.b }, p2/Z, [x23, x27]\n"
- "ld1b { z21.b }, p2/Z, [x22, x27]\n"
- "ld1b { z26.b }, p2/Z, [x21, x27]\n"
- "ld1b { z16.b }, p1/Z, [x24, x26]\n"
- "ld1b { z25.b }, p1/Z, [x23, x26]\n"
- "ld1b { z20.b }, p1/Z, [x22, x26]\n"
- "ld1b { z24.b }, p1/Z, [x21, x26]\n"
+ "cbz x24, 4f\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "subs x24, x24, #0x1\n"
+ "ld1b { z0.b }, p4/Z, [x23, x28]\n"
+ "ldp x21, x20, [x19, #0x10]\n"
+ "add x19, x19, #0x20\n"
+ "ld1b { z31.b }, p4/Z, [x22, x28]\n"
+ "ld1b { z23.b }, p4/Z, [x21, x28]\n"
+ "ld1b { z30.b }, p4/Z, [x20, x28]\n"
+ "ld1b { z18.b }, p3/Z, [x23, x27]\n"
+ "ld1b { z29.b }, p3/Z, [x22, x27]\n"
+ "ld1b { z22.b }, p3/Z, [x21, x27]\n"
+ "ld1b { z28.b }, p3/Z, [x20, x27]\n"
+ "ld1b { z17.b }, p2/Z, [x23, x26]\n"
+ "ld1b { z27.b }, p2/Z, [x22, x26]\n"
+ "ld1b { z21.b }, p2/Z, [x21, x26]\n"
+ "ld1b { z26.b }, p2/Z, [x20, x26]\n"
+ "ld1b { z16.b }, p1/Z, [x23, x25]\n"
+ "ld1b { z25.b }, p1/Z, [x22, x25]\n"
+ "ld1b { z20.b }, p1/Z, [x21, x25]\n"
+ "ld1b { z24.b }, p1/Z, [x20, x25]\n"
"beq 3f\n"
"2:" // 4-vectors of channels: 4 inputs loop
"movprfx z19, z0\n umax z19.b, p0/M, z19.b, z31.b\n"
"umax z23.b, p0/M, z23.b, z30.b\n"
- "ldp x24, x23, [x20, #0x0]\n"
- "subs x25, x25, #0x1\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "subs x24, x24, #0x1\n"
"umax z18.b, p0/M, z18.b, z29.b\n"
"umax z22.b, p0/M, z22.b, z28.b\n"
- "ldp x22, x21, [x20, #0x10]\n"
- "add x20, x20, #0x20\n"
+ "ldp x21, x20, [x19, #0x10]\n"
+ "add x19, x19, #0x20\n"
"umax z17.b, p0/M, z17.b, z27.b\n"
"umax z21.b, p0/M, z21.b, z26.b\n"
- "ld1b { z0.b }, p4/Z, [x24, x9]\n"
+ "ld1b { z0.b }, p4/Z, [x23, x28]\n"
"umax z16.b, p0/M, z16.b, z25.b\n"
"umax z20.b, p0/M, z20.b, z24.b\n"
- "ld1b { z31.b }, p4/Z, [x23, x9]\n"
+ "ld1b { z31.b }, p4/Z, [x22, x28]\n"
"umax z19.b, p0/M, z19.b, z23.b\n"
"umax z18.b, p0/M, z18.b, z22.b\n"
- "ld1b { z23.b }, p4/Z, [x22, x9]\n"
+ "ld1b { z23.b }, p4/Z, [x21, x28]\n"
"umax z17.b, p0/M, z17.b, z21.b\n"
"umax z16.b, p0/M, z16.b, z20.b\n"
- "ld1b { z30.b }, p4/Z, [x21, x9]\n"
+ "ld1b { z30.b }, p4/Z, [x20, x28]\n"
"umax z5.b, p0/M, z5.b, z19.b\n"
"umax z3.b, p0/M, z3.b, z18.b\n"
- "ld1b { z18.b }, p3/Z, [x24, x28]\n"
+ "ld1b { z18.b }, p3/Z, [x23, x27]\n"
"umax z2.b, p0/M, z2.b, z17.b\n"
"umax z1.b, p0/M, z1.b, z16.b\n"
- "ld1b { z29.b }, p3/Z, [x23, x28]\n"
- "ld1b { z22.b }, p3/Z, [x22, x28]\n"
- "ld1b { z28.b }, p3/Z, [x21, x28]\n"
- "ld1b { z17.b }, p2/Z, [x24, x27]\n"
- "ld1b { z27.b }, p2/Z, [x23, x27]\n"
- "ld1b { z21.b }, p2/Z, [x22, x27]\n"
- "ld1b { z26.b }, p2/Z, [x21, x27]\n"
- "ld1b { z16.b }, p1/Z, [x24, x26]\n"
- "ld1b { z25.b }, p1/Z, [x23, x26]\n"
- "ld1b { z20.b }, p1/Z, [x22, x26]\n"
- "ld1b { z24.b }, p1/Z, [x21, x26]\n"
+ "ld1b { z29.b }, p3/Z, [x22, x27]\n"
+ "ld1b { z22.b }, p3/Z, [x21, x27]\n"
+ "ld1b { z28.b }, p3/Z, [x20, x27]\n"
+ "ld1b { z17.b }, p2/Z, [x23, x26]\n"
+ "ld1b { z27.b }, p2/Z, [x22, x26]\n"
+ "ld1b { z21.b }, p2/Z, [x21, x26]\n"
+ "ld1b { z26.b }, p2/Z, [x20, x26]\n"
+ "ld1b { z16.b }, p1/Z, [x23, x25]\n"
+ "ld1b { z25.b }, p1/Z, [x22, x25]\n"
+ "ld1b { z20.b }, p1/Z, [x21, x25]\n"
+ "ld1b { z24.b }, p1/Z, [x20, x25]\n"
"bgt 2b\n"
"3:" // 4-vectors of channels: 4 inputs tail
"movprfx z19, z0\n umax z19.b, p0/M, z19.b, z31.b\n"
@@ -137,41 +137,41 @@ void sme_u8q_nhwc_max_generic_depthfirst_impl(
"umax z2.b, p0/M, z2.b, z17.b\n"
"umax z1.b, p0/M, z1.b, z16.b\n"
"4:" // 4-vectors of channels: After loop
- "ands x21, %x[n_valid_cells], #0x3\n"
+ "ands x20, %x[n_valid_cells], #0x3\n"
"beq 6f\n"
"5:" // 4-vectors of channels: Single input loop
- "ldr x24, [x20], #0x8\n"
- "ld1b { z0.b }, p4/Z, [x24, x9]\n"
- "subs x21, x21, #0x1\n"
+ "ldr x23, [x19], #0x8\n"
+ "ld1b { z0.b }, p4/Z, [x23, x28]\n"
+ "subs x20, x20, #0x1\n"
"umax z5.b, p0/M, z5.b, z0.b\n"
- "ld1b { z18.b }, p3/Z, [x24, x28]\n"
+ "ld1b { z18.b }, p3/Z, [x23, x27]\n"
"umax z3.b, p0/M, z3.b, z18.b\n"
- "ld1b { z17.b }, p2/Z, [x24, x27]\n"
+ "ld1b { z17.b }, p2/Z, [x23, x26]\n"
"umax z2.b, p0/M, z2.b, z17.b\n"
- "ld1b { z16.b }, p1/Z, [x24, x26]\n"
+ "ld1b { z16.b }, p1/Z, [x23, x25]\n"
"umax z1.b, p0/M, z1.b, z16.b\n"
"bgt 5b\n"
"6:" // 4-vectors of channels: Single input loop: End
- "add x20, %x[quant_params], %[offsetof_qp_input_offset]\n"
- "ld1rw { z4.s }, p0/Z, [x20]\n"
+ "add x19, %x[quant_params], %[offsetof_qp_input_offset]\n"
+ "ld1rw { z4.s }, p0/Z, [x19]\n"
".inst 0x4508a8b7 // ushllb z23.h, z5.b, #0x0\n"
".inst 0x4508acb9 // ushllt z25.h, z5.b, #0x0\n"
".inst 0x4508a876 // ushllb z22.h, z3.b, #0x0\n"
".inst 0x4508ac72 // ushllt z18.h, z3.b, #0x0\n"
- "add x20, %x[quant_params], %[offsetof_qp_per_layer_left_shift]\n"
- "ld1rw { z3.s }, p0/Z, [x20]\n"
+ "add x19, %x[quant_params], %[offsetof_qp_per_layer_left_shift]\n"
+ "ld1rw { z3.s }, p0/Z, [x19]\n"
".inst 0x4508a855 // ushllb z21.h, z2.b, #0x0\n"
".inst 0x4508ac51 // ushllt z17.h, z2.b, #0x0\n"
- "add x20, %x[quant_params], %[offsetof_qp_per_layer_mul]\n"
- "ld1rw { z2.s }, p0/Z, [x20]\n"
+ "add x19, %x[quant_params], %[offsetof_qp_per_layer_mul]\n"
+ "ld1rw { z2.s }, p0/Z, [x19]\n"
".inst 0x4508a834 // ushllb z20.h, z1.b, #0x0\n"
".inst 0x4508ac38 // ushllt z24.h, z1.b, #0x0\n"
- "add x20, %x[quant_params], %[offsetof_qp_per_layer_right_shift]\n"
- "ld1rw { z19.s }, p0/Z, [x20]\n"
+ "add x19, %x[quant_params], %[offsetof_qp_per_layer_right_shift]\n"
+ "ld1rw { z19.s }, p0/Z, [x19]\n"
"neg z4.s, p0/M, z4.s\n"
".inst 0x45974081 // saddwb z1.s, z4.s, z23.h\n"
- "add x20, %x[quant_params], %[offsetof_qp_output_offset]\n"
- "ld1rw { z16.s }, p0/Z, [x20]\n"
+ "add x19, %x[quant_params], %[offsetof_qp_output_offset]\n"
+ "ld1rw { z16.s }, p0/Z, [x19]\n"
".inst 0x45974497 // saddwt z23.s, z4.s, z23.h\n"
".inst 0x45994080 // saddwb z0.s, z4.s, z25.h\n"
".inst 0x4599449f // saddwt z31.s, z4.s, z25.h\n"
@@ -296,47 +296,47 @@ void sme_u8q_nhwc_max_generic_depthfirst_impl(
"trn1 z16.b, z23.b, z16.b\n"
"trn1 z18.b, z22.b, z18.b\n"
"trn1 z17.b, z21.b, z17.b\n"
- "st1b { z16.b }, p4, [%x[outptr], x9]\n"
- "incb x9, ALL, MUL #4\n"
- "trn1 z16.b, z20.b, z19.b\n"
- "st1b { z18.b }, p3, [%x[outptr], x28]\n"
+ "st1b { z16.b }, p4, [%x[outptr], x28]\n"
"incb x28, ALL, MUL #4\n"
- "st1b { z17.b }, p2, [%x[outptr], x27]\n"
+ "trn1 z16.b, z20.b, z19.b\n"
+ "st1b { z18.b }, p3, [%x[outptr], x27]\n"
"incb x27, ALL, MUL #4\n"
- "st1b { z16.b }, p1, [%x[outptr], x26]\n"
+ "st1b { z17.b }, p2, [%x[outptr], x26]\n"
"incb x26, ALL, MUL #4\n"
- "whilelt p1.b, x26, %x[n_channels]\n"
+ "st1b { z16.b }, p1, [%x[outptr], x25]\n"
+ "incb x25, ALL, MUL #4\n"
+ "whilelt p1.b, x25, %x[n_channels]\n"
"b.any 1b\n"
"7:" // Single vector of channels
- "whilelt p4.b, x9, %x[n_channels]\n"
+ "whilelt p4.b, x28, %x[n_channels]\n"
"b.none 14f\n"
"8:" // Single vector of channels: Loop
- "lsr x25, %x[n_valid_cells], #0x2\n"
+ "lsr x24, %x[n_valid_cells], #0x2\n"
"mov z5.b, #0x0\n"
- "mov x20, %x[inptrs]\n"
- "cbz x25, 11f\n"
- "ldp x24, x23, [x20, #0x0]\n"
- "subs x25, x25, #0x1\n"
- "ld1b { z0.b }, p4/Z, [x24, x9]\n"
- "ldp x22, x21, [x20, #0x10]\n"
- "add x20, x20, #0x20\n"
- "ld1b { z31.b }, p4/Z, [x23, x9]\n"
- "ld1b { z23.b }, p4/Z, [x22, x9]\n"
- "ld1b { z30.b }, p4/Z, [x21, x9]\n"
+ "mov x19, %x[inptrs]\n"
+ "cbz x24, 11f\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "subs x24, x24, #0x1\n"
+ "ld1b { z0.b }, p4/Z, [x23, x28]\n"
+ "ldp x21, x20, [x19, #0x10]\n"
+ "add x19, x19, #0x20\n"
+ "ld1b { z31.b }, p4/Z, [x22, x28]\n"
+ "ld1b { z23.b }, p4/Z, [x21, x28]\n"
+ "ld1b { z30.b }, p4/Z, [x20, x28]\n"
"beq 10f\n"
"9:" // Single vector of channels: Loop: 4 inputs loop
"movprfx z19, z0\n umax z19.b, p0/M, z19.b, z31.b\n"
"umax z23.b, p0/M, z23.b, z30.b\n"
- "ldp x24, x23, [x20, #0x0]\n"
- "subs x25, x25, #0x1\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "subs x24, x24, #0x1\n"
"umax z19.b, p0/M, z19.b, z23.b\n"
- "ldp x22, x21, [x20, #0x10]\n"
+ "ldp x21, x20, [x19, #0x10]\n"
"umax z5.b, p0/M, z5.b, z19.b\n"
- "add x20, x20, #0x20\n"
- "ld1b { z0.b }, p4/Z, [x24, x9]\n"
- "ld1b { z31.b }, p4/Z, [x23, x9]\n"
- "ld1b { z23.b }, p4/Z, [x22, x9]\n"
- "ld1b { z30.b }, p4/Z, [x21, x9]\n"
+ "add x19, x19, #0x20\n"
+ "ld1b { z0.b }, p4/Z, [x23, x28]\n"
+ "ld1b { z31.b }, p4/Z, [x22, x28]\n"
+ "ld1b { z23.b }, p4/Z, [x21, x28]\n"
+ "ld1b { z30.b }, p4/Z, [x20, x28]\n"
"bgt 9b\n"
"10:" // Single vector of channels: Loop: 4 inputs tail
"movprfx z19, z0\n umax z19.b, p0/M, z19.b, z31.b\n"
@@ -344,35 +344,35 @@ void sme_u8q_nhwc_max_generic_depthfirst_impl(
"umax z19.b, p0/M, z19.b, z23.b\n"
"umax z5.b, p0/M, z5.b, z19.b\n"
"11:" // Single vector of channels: Loop: After loop
- "ands x21, %x[n_valid_cells], #0x3\n"
+ "ands x20, %x[n_valid_cells], #0x3\n"
"beq 13f\n"
"12:" // Single vector of channels: Loop: Single input loop
- "ldr x24, [x20], #0x8\n"
- "ld1b { z0.b }, p4/Z, [x24, x9]\n"
- "subs x21, x21, #0x1\n"
+ "ldr x23, [x19], #0x8\n"
+ "ld1b { z0.b }, p4/Z, [x23, x28]\n"
+ "subs x20, x20, #0x1\n"
"umax z5.b, p0/M, z5.b, z0.b\n"
"bgt 12b\n"
"13:" // Single vector of channels: Loop: Single input loop: End
- "add x20, %x[quant_params], %[offsetof_qp_input_offset]\n"
- "ld1rw { z4.s }, p0/Z, [x20]\n"
+ "add x19, %x[quant_params], %[offsetof_qp_input_offset]\n"
+ "ld1rw { z4.s }, p0/Z, [x19]\n"
".inst 0x4508a8b7 // ushllb z23.h, z5.b, #0x0\n"
".inst 0x4508acb9 // ushllt z25.h, z5.b, #0x0\n"
"neg z4.s, p0/M, z4.s\n"
".inst 0x45974081 // saddwb z1.s, z4.s, z23.h\n"
- "add x20, %x[quant_params], %[offsetof_qp_per_layer_left_shift]\n"
- "ld1rw { z3.s }, p0/Z, [x20]\n"
+ "add x19, %x[quant_params], %[offsetof_qp_per_layer_left_shift]\n"
+ "ld1rw { z3.s }, p0/Z, [x19]\n"
".inst 0x45974497 // saddwt z23.s, z4.s, z23.h\n"
".inst 0x45994080 // saddwb z0.s, z4.s, z25.h\n"
- "add x20, %x[quant_params], %[offsetof_qp_per_layer_mul]\n"
- "ld1rw { z2.s }, p0/Z, [x20]\n"
+ "add x19, %x[quant_params], %[offsetof_qp_per_layer_mul]\n"
+ "ld1rw { z2.s }, p0/Z, [x19]\n"
".inst 0x4599449f // saddwt z31.s, z4.s, z25.h\n"
".inst 0x44828061 // srshl z1.s, p0/M, z1.s, z3.s\n"
- "add x20, %x[quant_params], %[offsetof_qp_per_layer_right_shift]\n"
- "ld1rw { z19.s }, p0/Z, [x20]\n"
+ "add x19, %x[quant_params], %[offsetof_qp_per_layer_right_shift]\n"
+ "ld1rw { z19.s }, p0/Z, [x19]\n"
".inst 0x44828077 // srshl z23.s, p0/M, z23.s, z3.s\n"
".inst 0x44828060 // srshl z0.s, p0/M, z0.s, z3.s\n"
- "add x20, %x[quant_params], %[offsetof_qp_output_offset]\n"
- "ld1rw { z16.s }, p0/Z, [x20]\n"
+ "add x19, %x[quant_params], %[offsetof_qp_output_offset]\n"
+ "ld1rw { z16.s }, p0/Z, [x19]\n"
".inst 0x4482807f // srshl z31.s, p0/M, z31.s, z3.s\n"
".inst 0x04a27421 // sqrdmulh z1.s, z1.s, z2.s\n"
".inst 0x04a276f7 // sqrdmulh z23.s, z23.s, z2.s\n"
@@ -399,15 +399,15 @@ void sme_u8q_nhwc_max_generic_depthfirst_impl(
"smin z31.s, p0/M, z31.s, z19.s\n"
"trn1 z16.h, z0.h, z31.h\n"
"trn1 z16.b, z23.b, z16.b\n"
- "st1b { z16.b }, p4, [%x[outptr], x9]\n"
- "incb x9\n"
- "whilelt p4.b, x9, %x[n_channels]\n"
+ "st1b { z16.b }, p4, [%x[outptr], x28]\n"
+ "incb x28\n"
+ "whilelt p4.b, x28, %x[n_channels]\n"
"b.any 8b\n"
"14:" // End
".inst 0xd503467f // SMSTOP\n"
:
: [inptrs] "r" (inptrs), [n_channels] "r" (n_channels), [n_valid_cells] "r" (n_valid_cells), [offsetof_qp_input_offset] "I" (offsetof(Requantize32, input_offset)), [offsetof_qp_output_offset] "I" (offsetof(Requantize32, output_offset)), [offsetof_qp_per_layer_left_shift] "I" (offsetof(Requantize32, per_layer_left_shift)), [offsetof_qp_per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [offsetof_qp_per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [outptr] "r" (outptr), [quant_params] "r" (&qp)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst/generic.cpp
index 593fb58445..75e4ddca15 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -82,96 +82,96 @@ void sve_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst_impl(
pad_left, pad_top, pad_right, pad_bottom);
__asm__ __volatile__(
- "ldr x2, [%x[args], %[offsetof_n_channels]]\n"
- "ldr x21, [%x[args], %[offsetof_outptrs]]\n"
- "mov x3, #0x0\n"
- "mov x20, #0x4\n"
- "ldr x4, [%x[args], %[offsetof_inptrs]]\n"
- "ldp x5, x6, [x21, #0x0]\n"
- "whilelt p0.h, XZR, x20\n"
- "whilelt p1.h, x3, x2\n"
- "ldp x7, x8, [x21, #0x10]\n"
- "ldp x17, x16, [x4, #0x0]\n"
- "add x15, %x[args], %[offsetof_rescale]\n"
- "mov x14, #0x0\n"
- "ldp x13, x12, [x4, #0x10]\n"
- "ldp x11, x10, [x4, #0x20]\n"
- "ldp x9, x28, [x4, #0x30]\n"
- "ldp x27, x26, [x4, #0x40]\n"
- "ldp x25, x24, [x4, #0x50]\n"
- "ldp x23, x22, [x4, #0x60]\n"
- "ldp x21, x20, [x4, #0x70]\n"
- "ld1h { z7.h }, p1/Z, [x10, x3, LSL #1]\n"
- "ld1h { z6.h }, p1/Z, [x9, x3, LSL #1]\n"
- "ld1h { z5.h }, p1/Z, [x26, x3, LSL #1]\n"
- "ld1h { z4.h }, p1/Z, [x25, x3, LSL #1]\n"
- "ld1h { z3.h }, p1/Z, [x16, x3, LSL #1]\n"
- "ld1h { z2.h }, p1/Z, [x13, x3, LSL #1]\n"
- "ld1h { z1.h }, p1/Z, [x11, x3, LSL #1]\n"
- "ld1h { z31.h }, p1/Z, [x27, x3, LSL #1]\n"
- "ld1h { z30.h }, p1/Z, [x28, x3, LSL #1]\n"
- "ld1h { z29.h }, p1/Z, [x24, x3, LSL #1]\n"
- "ld1h { z28.h }, p1/Z, [x22, x3, LSL #1]\n"
- "ld1h { z27.h }, p1/Z, [x21, x3, LSL #1]\n"
- "ld1h { z26.h }, p1/Z, [x17, x3, LSL #1]\n"
- "ld1h { z25.h }, p1/Z, [x12, x3, LSL #1]\n"
- "ld1h { z24.h }, p1/Z, [x23, x3, LSL #1]\n"
- "ld1h { z23.h }, p1/Z, [x20, x3, LSL #1]\n"
- "incw x3\n"
- "whilelt p1.h, x3, x2\n"
- "ld1rqh { z0.h }, p0/Z, [x15]\n"
+ "ldr x3, [%x[args], %[offsetof_n_channels]]\n"
+ "ldr x20, [%x[args], %[offsetof_outptrs]]\n"
+ "mov x4, #0x0\n"
+ "mov x19, #0x4\n"
+ "ldr x5, [%x[args], %[offsetof_inptrs]]\n"
+ "ldp x6, x7, [x20, #0x0]\n"
+ "whilelt p0.h, XZR, x19\n"
+ "whilelt p1.h, x4, x3\n"
+ "ldp x8, x17, [x20, #0x10]\n"
+ "ldp x16, x15, [x5, #0x0]\n"
+ "add x14, %x[args], %[offsetof_rescale]\n"
+ "mov x13, #0x0\n"
+ "ldp x12, x11, [x5, #0x10]\n"
+ "ldp x10, x9, [x5, #0x20]\n"
+ "ldp x28, x27, [x5, #0x30]\n"
+ "ldp x26, x25, [x5, #0x40]\n"
+ "ldp x24, x23, [x5, #0x50]\n"
+ "ldp x22, x21, [x5, #0x60]\n"
+ "ldp x20, x19, [x5, #0x70]\n"
+ "ld1h { z7.h }, p1/Z, [x9, x4, LSL #1]\n"
+ "ld1h { z6.h }, p1/Z, [x28, x4, LSL #1]\n"
+ "ld1h { z5.h }, p1/Z, [x25, x4, LSL #1]\n"
+ "ld1h { z4.h }, p1/Z, [x24, x4, LSL #1]\n"
+ "ld1h { z3.h }, p1/Z, [x15, x4, LSL #1]\n"
+ "ld1h { z2.h }, p1/Z, [x12, x4, LSL #1]\n"
+ "ld1h { z1.h }, p1/Z, [x10, x4, LSL #1]\n"
+ "ld1h { z31.h }, p1/Z, [x26, x4, LSL #1]\n"
+ "ld1h { z30.h }, p1/Z, [x27, x4, LSL #1]\n"
+ "ld1h { z29.h }, p1/Z, [x23, x4, LSL #1]\n"
+ "ld1h { z28.h }, p1/Z, [x21, x4, LSL #1]\n"
+ "ld1h { z27.h }, p1/Z, [x20, x4, LSL #1]\n"
+ "ld1h { z26.h }, p1/Z, [x16, x4, LSL #1]\n"
+ "ld1h { z25.h }, p1/Z, [x11, x4, LSL #1]\n"
+ "ld1h { z24.h }, p1/Z, [x22, x4, LSL #1]\n"
+ "ld1h { z23.h }, p1/Z, [x19, x4, LSL #1]\n"
+ "incw x4\n"
+ "whilelt p1.h, x4, x3\n"
+ "ld1rqh { z0.h }, p0/Z, [x14]\n"
"b.none 2f\n"
"1:" // Vector: Loop
"fadd z17.h, z7.h, z6.h\n"
"fadd z16.h, z5.h, z4.h\n"
- "ld1h { z7.h }, p1/Z, [x10, x3, LSL #1]\n"
- "ld1h { z6.h }, p1/Z, [x9, x3, LSL #1]\n"
+ "ld1h { z7.h }, p1/Z, [x9, x4, LSL #1]\n"
+ "ld1h { z6.h }, p1/Z, [x28, x4, LSL #1]\n"
"fadd z19.h, z17.h, z16.h\n"
"fadd z18.h, z3.h, z2.h\n"
- "ld1h { z5.h }, p1/Z, [x26, x3, LSL #1]\n"
- "ld1h { z4.h }, p1/Z, [x25, x3, LSL #1]\n"
+ "ld1h { z5.h }, p1/Z, [x25, x4, LSL #1]\n"
+ "ld1h { z4.h }, p1/Z, [x24, x4, LSL #1]\n"
"fadd z17.h, z1.h, z31.h\n"
"fadd z22.h, z30.h, z29.h\n"
- "ld1h { z3.h }, p1/Z, [x16, x3, LSL #1]\n"
- "ld1h { z2.h }, p1/Z, [x13, x3, LSL #1]\n"
+ "ld1h { z3.h }, p1/Z, [x15, x4, LSL #1]\n"
+ "ld1h { z2.h }, p1/Z, [x12, x4, LSL #1]\n"
"fadd z16.h, z28.h, z27.h\n"
"fadd z21.h, z18.h, z19.h\n"
- "ld1h { z1.h }, p1/Z, [x11, x3, LSL #1]\n"
- "ld1h { z31.h }, p1/Z, [x27, x3, LSL #1]\n"
+ "ld1h { z1.h }, p1/Z, [x10, x4, LSL #1]\n"
+ "ld1h { z31.h }, p1/Z, [x26, x4, LSL #1]\n"
"fadd z20.h, z16.h, z19.h\n"
"fadd z19.h, z26.h, z17.h\n"
- "ld1h { z30.h }, p1/Z, [x28, x3, LSL #1]\n"
- "ld1h { z29.h }, p1/Z, [x24, x3, LSL #1]\n"
+ "ld1h { z30.h }, p1/Z, [x27, x4, LSL #1]\n"
+ "ld1h { z29.h }, p1/Z, [x23, x4, LSL #1]\n"
"fadd z18.h, z25.h, z22.h\n"
"fadd z17.h, z24.h, z17.h\n"
- "ld1h { z28.h }, p1/Z, [x22, x3, LSL #1]\n"
- "ld1h { z27.h }, p1/Z, [x21, x3, LSL #1]\n"
+ "ld1h { z28.h }, p1/Z, [x21, x4, LSL #1]\n"
+ "ld1h { z27.h }, p1/Z, [x20, x4, LSL #1]\n"
"fadd z16.h, z23.h, z22.h\n"
- "ld1h { z26.h }, p1/Z, [x17, x3, LSL #1]\n"
- "ld1h { z25.h }, p1/Z, [x12, x3, LSL #1]\n"
- "fadd z19.h, z21.h, z19.h\n"
- "ld1h { z24.h }, p1/Z, [x23, x3, LSL #1]\n"
- "ld1h { z23.h }, p1/Z, [x20, x3, LSL #1]\n"
- "incw x3\n"
- "fadd z18.h, z21.h, z18.h\n"
+ "ld1h { z26.h }, p1/Z, [x16, x4, LSL #1]\n"
+ "ld1h { z25.h }, p1/Z, [x11, x4, LSL #1]\n"
+ "fadd z19.h, z19.h, z21.h\n"
+ "ld1h { z24.h }, p1/Z, [x22, x4, LSL #1]\n"
+ "ld1h { z23.h }, p1/Z, [x19, x4, LSL #1]\n"
+ "incw x4\n"
+ "fadd z18.h, z18.h, z21.h\n"
"fadd z17.h, z17.h, z20.h\n"
"fadd z16.h, z16.h, z20.h\n"
- "whilelt p0.h, x14, x2\n"
- "whilelt p1.h, x3, x2\n"
+ "whilelt p0.h, x13, x3\n"
+ "whilelt p1.h, x4, x3\n"
"fmul z19.h, z19.h, z0.h[0]\n"
"fmul z18.h, z18.h, z0.h[1]\n"
- "st1h { z19.h }, p0, [x5, x14, LSL #1]\n"
+ "st1h { z19.h }, p0, [x6, x13, LSL #1]\n"
"fmul z17.h, z17.h, z0.h[2]\n"
"fmul z16.h, z16.h, z0.h[3]\n"
- "st1h { z18.h }, p0, [x6, x14, LSL #1]\n"
- "st1h { z17.h }, p0, [x7, x14, LSL #1]\n"
- "st1h { z16.h }, p0, [x8, x14, LSL #1]\n"
- "incw x14\n"
+ "st1h { z18.h }, p0, [x7, x13, LSL #1]\n"
+ "st1h { z17.h }, p0, [x8, x13, LSL #1]\n"
+ "st1h { z16.h }, p0, [x17, x13, LSL #1]\n"
+ "incw x13\n"
"b.any 1b\n"
"2:" // Vector: Tail
"fadd z17.h, z7.h, z6.h\n"
"fadd z16.h, z5.h, z4.h\n"
- "whilelt p0.h, x14, x2\n"
+ "whilelt p0.h, x13, x3\n"
"fadd z19.h, z17.h, z16.h\n"
"fadd z18.h, z3.h, z2.h\n"
"fadd z17.h, z1.h, z31.h\n"
@@ -183,21 +183,21 @@ void sve_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst_impl(
"fadd z18.h, z25.h, z22.h\n"
"fadd z17.h, z24.h, z17.h\n"
"fadd z16.h, z23.h, z22.h\n"
- "fadd z19.h, z21.h, z19.h\n"
+ "fadd z19.h, z19.h, z21.h\n"
"fmul z19.h, z19.h, z0.h[0]\n"
- "st1h { z19.h }, p0, [x5, x14, LSL #1]\n"
- "fadd z18.h, z21.h, z18.h\n"
+ "st1h { z19.h }, p0, [x6, x13, LSL #1]\n"
+ "fadd z18.h, z18.h, z21.h\n"
"fadd z17.h, z17.h, z20.h\n"
"fmul z18.h, z18.h, z0.h[1]\n"
"fmul z17.h, z17.h, z0.h[2]\n"
"fadd z16.h, z16.h, z20.h\n"
"fmul z16.h, z16.h, z0.h[3]\n"
- "st1h { z18.h }, p0, [x6, x14, LSL #1]\n"
- "st1h { z17.h }, p0, [x7, x14, LSL #1]\n"
- "st1h { z16.h }, p0, [x8, x14, LSL #1]\n"
+ "st1h { z18.h }, p0, [x7, x13, LSL #1]\n"
+ "st1h { z17.h }, p0, [x8, x13, LSL #1]\n"
+ "st1h { z16.h }, p0, [x17, x13, LSL #1]\n"
:
: [args] "r" (&args), [offsetof_inptrs] "I" (offsetof(KernelArgs, inptrs)), [offsetof_n_channels] "I" (offsetof(KernelArgs, n_channels)), [offsetof_outptrs] "I" (offsetof(KernelArgs, outptrs)), [offsetof_rescale] "I" (offsetof(KernelArgs, rescale_vals))
- : "cc", "memory", "p0", "p1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp16_nhwc_avg_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp16_nhwc_avg_generic_depthfirst/generic.cpp
index 594c65e18d..7081206da1 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp16_nhwc_avg_generic_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp16_nhwc_avg_generic_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -42,83 +42,83 @@ void sve_fp16_nhwc_avg_generic_depthfirst_impl(
const auto rescale_value = static_cast<__fp16>(1.0f / static_cast<float>(window_cells));
__asm__ __volatile__(
- "mov x9, #0x0\n"
- "cnth x28\n"
- "cnth x27, ALL, MUL #2\n"
- "cnth x26, ALL, MUL #3\n"
+ "mov x28, #0x0\n"
+ "cnth x27\n"
+ "cnth x26, ALL, MUL #2\n"
+ "cnth x25, ALL, MUL #3\n"
"ptrue p0.b\n"
- "whilelt p3.h, x9, %x[n_channels]\n"
+ "whilelt p3.h, x28, %x[n_channels]\n"
"ld1rh { z7.h }, p0/Z, [%x[rescale_ptr]]\n"
- "whilelt p2.h, x28, %x[n_channels]\n"
- "whilelt p1.h, x27, %x[n_channels]\n"
- "whilelt p0.h, x26, %x[n_channels]\n"
+ "whilelt p2.h, x27, %x[n_channels]\n"
+ "whilelt p1.h, x26, %x[n_channels]\n"
+ "whilelt p0.h, x25, %x[n_channels]\n"
"b.none 7f\n"
"1:" // 4-vectors of channels
- "lsr x25, %x[n_valid_cells], #0x2\n"
+ "lsr x24, %x[n_valid_cells], #0x2\n"
"mov z6.b, #0x0\n"
"mov z5.b, #0x0\n"
- "mov x20, %x[inptrs]\n"
+ "mov x19, %x[inptrs]\n"
"mov z4.b, #0x0\n"
"mov z3.b, #0x0\n"
- "cbz x25, 4f\n"
- "ldp x24, x23, [x20, #0x0]\n"
- "ldp x22, x21, [x20, #0x10]\n"
- "subs x25, x25, #0x1\n"
- "add x20, x20, #0x20\n"
- "ld1h { z2.h }, p3/Z, [x24, x9, LSL #1]\n"
- "ld1h { z1.h }, p3/Z, [x23, x9, LSL #1]\n"
- "ld1h { z0.h }, p3/Z, [x22, x9, LSL #1]\n"
- "ld1h { z31.h }, p3/Z, [x21, x9, LSL #1]\n"
- "ld1h { z30.h }, p2/Z, [x24, x28, LSL #1]\n"
- "ld1h { z22.h }, p2/Z, [x23, x28, LSL #1]\n"
- "ld1h { z29.h }, p2/Z, [x22, x28, LSL #1]\n"
- "ld1h { z28.h }, p2/Z, [x21, x28, LSL #1]\n"
- "ld1h { z27.h }, p1/Z, [x24, x27, LSL #1]\n"
- "ld1h { z21.h }, p1/Z, [x23, x27, LSL #1]\n"
- "ld1h { z26.h }, p1/Z, [x22, x27, LSL #1]\n"
- "ld1h { z17.h }, p1/Z, [x21, x27, LSL #1]\n"
- "ld1h { z25.h }, p0/Z, [x24, x26, LSL #1]\n"
- "ld1h { z20.h }, p0/Z, [x23, x26, LSL #1]\n"
- "ld1h { z24.h }, p0/Z, [x22, x26, LSL #1]\n"
- "ld1h { z16.h }, p0/Z, [x21, x26, LSL #1]\n"
+ "cbz x24, 4f\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "ldp x21, x20, [x19, #0x10]\n"
+ "subs x24, x24, #0x1\n"
+ "add x19, x19, #0x20\n"
+ "ld1h { z2.h }, p3/Z, [x23, x28, LSL #1]\n"
+ "ld1h { z1.h }, p3/Z, [x22, x28, LSL #1]\n"
+ "ld1h { z0.h }, p3/Z, [x21, x28, LSL #1]\n"
+ "ld1h { z31.h }, p3/Z, [x20, x28, LSL #1]\n"
+ "ld1h { z30.h }, p2/Z, [x23, x27, LSL #1]\n"
+ "ld1h { z22.h }, p2/Z, [x22, x27, LSL #1]\n"
+ "ld1h { z29.h }, p2/Z, [x21, x27, LSL #1]\n"
+ "ld1h { z28.h }, p2/Z, [x20, x27, LSL #1]\n"
+ "ld1h { z27.h }, p1/Z, [x23, x26, LSL #1]\n"
+ "ld1h { z21.h }, p1/Z, [x22, x26, LSL #1]\n"
+ "ld1h { z26.h }, p1/Z, [x21, x26, LSL #1]\n"
+ "ld1h { z17.h }, p1/Z, [x20, x26, LSL #1]\n"
+ "ld1h { z25.h }, p0/Z, [x23, x25, LSL #1]\n"
+ "ld1h { z20.h }, p0/Z, [x22, x25, LSL #1]\n"
+ "ld1h { z24.h }, p0/Z, [x21, x25, LSL #1]\n"
+ "ld1h { z16.h }, p0/Z, [x20, x25, LSL #1]\n"
"beq 3f\n"
"2:" // 4-vectors of channels: 4 inputs loop
"fadd z23.h, z2.h, z1.h\n"
"fadd z19.h, z0.h, z31.h\n"
- "ldp x24, x23, [x20, #0x0]\n"
- "ldp x22, x21, [x20, #0x10]\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "ldp x21, x20, [x19, #0x10]\n"
"fadd z22.h, z30.h, z22.h\n"
"fadd z18.h, z29.h, z28.h\n"
- "subs x25, x25, #0x1\n"
- "add x20, x20, #0x20\n"
+ "subs x24, x24, #0x1\n"
+ "add x19, x19, #0x20\n"
"fadd z21.h, z27.h, z21.h\n"
"fadd z17.h, z26.h, z17.h\n"
- "ld1h { z2.h }, p3/Z, [x24, x9, LSL #1]\n"
- "ld1h { z1.h }, p3/Z, [x23, x9, LSL #1]\n"
+ "ld1h { z2.h }, p3/Z, [x23, x28, LSL #1]\n"
+ "ld1h { z1.h }, p3/Z, [x22, x28, LSL #1]\n"
"fadd z20.h, z25.h, z20.h\n"
"fadd z16.h, z24.h, z16.h\n"
- "ld1h { z0.h }, p3/Z, [x22, x9, LSL #1]\n"
- "ld1h { z31.h }, p3/Z, [x21, x9, LSL #1]\n"
+ "ld1h { z0.h }, p3/Z, [x21, x28, LSL #1]\n"
+ "ld1h { z31.h }, p3/Z, [x20, x28, LSL #1]\n"
"fadd z19.h, z23.h, z19.h\n"
"fadd z18.h, z22.h, z18.h\n"
- "ld1h { z30.h }, p2/Z, [x24, x28, LSL #1]\n"
- "ld1h { z22.h }, p2/Z, [x23, x28, LSL #1]\n"
+ "ld1h { z30.h }, p2/Z, [x23, x27, LSL #1]\n"
+ "ld1h { z22.h }, p2/Z, [x22, x27, LSL #1]\n"
"fadd z17.h, z21.h, z17.h\n"
"fadd z16.h, z20.h, z16.h\n"
- "ld1h { z29.h }, p2/Z, [x22, x28, LSL #1]\n"
- "ld1h { z28.h }, p2/Z, [x21, x28, LSL #1]\n"
+ "ld1h { z29.h }, p2/Z, [x21, x27, LSL #1]\n"
+ "ld1h { z28.h }, p2/Z, [x20, x27, LSL #1]\n"
"fadd z6.h, z6.h, z19.h\n"
"fadd z5.h, z5.h, z18.h\n"
- "ld1h { z27.h }, p1/Z, [x24, x27, LSL #1]\n"
- "ld1h { z21.h }, p1/Z, [x23, x27, LSL #1]\n"
+ "ld1h { z27.h }, p1/Z, [x23, x26, LSL #1]\n"
+ "ld1h { z21.h }, p1/Z, [x22, x26, LSL #1]\n"
"fadd z4.h, z4.h, z17.h\n"
"fadd z3.h, z3.h, z16.h\n"
- "ld1h { z26.h }, p1/Z, [x22, x27, LSL #1]\n"
- "ld1h { z17.h }, p1/Z, [x21, x27, LSL #1]\n"
- "ld1h { z25.h }, p0/Z, [x24, x26, LSL #1]\n"
- "ld1h { z20.h }, p0/Z, [x23, x26, LSL #1]\n"
- "ld1h { z24.h }, p0/Z, [x22, x26, LSL #1]\n"
- "ld1h { z16.h }, p0/Z, [x21, x26, LSL #1]\n"
+ "ld1h { z26.h }, p1/Z, [x21, x26, LSL #1]\n"
+ "ld1h { z17.h }, p1/Z, [x20, x26, LSL #1]\n"
+ "ld1h { z25.h }, p0/Z, [x23, x25, LSL #1]\n"
+ "ld1h { z20.h }, p0/Z, [x22, x25, LSL #1]\n"
+ "ld1h { z24.h }, p0/Z, [x21, x25, LSL #1]\n"
+ "ld1h { z16.h }, p0/Z, [x20, x25, LSL #1]\n"
"bgt 2b\n"
"3:" // 4-vectors of channels: 4 inputs tail
"fadd z23.h, z2.h, z1.h\n"
@@ -138,65 +138,65 @@ void sve_fp16_nhwc_avg_generic_depthfirst_impl(
"fadd z4.h, z4.h, z17.h\n"
"fadd z3.h, z3.h, z16.h\n"
"4:" // 4-vectors of channels: After loop
- "ands x21, %x[n_valid_cells], #0x3\n"
+ "ands x20, %x[n_valid_cells], #0x3\n"
"beq 6f\n"
"5:" // 4-vectors of channels: Single input loop
- "ldr x24, [x20], #0x8\n"
- "ld1h { z2.h }, p3/Z, [x24, x9, LSL #1]\n"
- "subs x21, x21, #0x1\n"
+ "ldr x23, [x19], #0x8\n"
+ "ld1h { z2.h }, p3/Z, [x23, x28, LSL #1]\n"
+ "subs x20, x20, #0x1\n"
"fadd z6.h, z6.h, z2.h\n"
- "ld1h { z30.h }, p2/Z, [x24, x28, LSL #1]\n"
- "ld1h { z27.h }, p1/Z, [x24, x27, LSL #1]\n"
+ "ld1h { z30.h }, p2/Z, [x23, x27, LSL #1]\n"
+ "ld1h { z27.h }, p1/Z, [x23, x26, LSL #1]\n"
"fadd z5.h, z5.h, z30.h\n"
"fadd z4.h, z4.h, z27.h\n"
- "ld1h { z25.h }, p0/Z, [x24, x26, LSL #1]\n"
+ "ld1h { z25.h }, p0/Z, [x23, x25, LSL #1]\n"
"fadd z3.h, z3.h, z25.h\n"
"bgt 5b\n"
"6:" // 4-vectors of channels: Single input loop: End
"fmul z6.h, z6.h, z7.h\n"
"fmul z5.h, z5.h, z7.h\n"
- "st1h { z6.h }, p3, [%x[outptr], x9, LSL #1]\n"
+ "st1h { z6.h }, p3, [%x[outptr], x28, LSL #1]\n"
"fmul z4.h, z4.h, z7.h\n"
"fmul z3.h, z3.h, z7.h\n"
- "st1h { z5.h }, p2, [%x[outptr], x28, LSL #1]\n"
- "st1h { z4.h }, p1, [%x[outptr], x27, LSL #1]\n"
- "inch x9, ALL, MUL #4\n"
+ "st1h { z5.h }, p2, [%x[outptr], x27, LSL #1]\n"
+ "st1h { z4.h }, p1, [%x[outptr], x26, LSL #1]\n"
"inch x28, ALL, MUL #4\n"
- "st1h { z3.h }, p0, [%x[outptr], x26, LSL #1]\n"
- "inch x26, ALL, MUL #4\n"
- "whilelt p0.h, x26, %x[n_channels]\n"
"inch x27, ALL, MUL #4\n"
+ "st1h { z3.h }, p0, [%x[outptr], x25, LSL #1]\n"
+ "inch x25, ALL, MUL #4\n"
+ "whilelt p0.h, x25, %x[n_channels]\n"
+ "inch x26, ALL, MUL #4\n"
"b.any 1b\n"
"7:" // Single vector of channels
- "whilelt p3.h, x9, %x[n_channels]\n"
+ "whilelt p3.h, x28, %x[n_channels]\n"
"b.none 14f\n"
"8:" // Single vector of channels: Loop
- "lsr x25, %x[n_valid_cells], #0x2\n"
+ "lsr x24, %x[n_valid_cells], #0x2\n"
"mov z6.b, #0x0\n"
- "mov x20, %x[inptrs]\n"
- "cbz x25, 11f\n"
- "ldp x24, x23, [x20, #0x0]\n"
- "ldp x22, x21, [x20, #0x10]\n"
- "subs x25, x25, #0x1\n"
- "add x20, x20, #0x20\n"
- "ld1h { z2.h }, p3/Z, [x24, x9, LSL #1]\n"
- "ld1h { z1.h }, p3/Z, [x23, x9, LSL #1]\n"
- "ld1h { z0.h }, p3/Z, [x22, x9, LSL #1]\n"
- "ld1h { z31.h }, p3/Z, [x21, x9, LSL #1]\n"
+ "mov x19, %x[inptrs]\n"
+ "cbz x24, 11f\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "ldp x21, x20, [x19, #0x10]\n"
+ "subs x24, x24, #0x1\n"
+ "add x19, x19, #0x20\n"
+ "ld1h { z2.h }, p3/Z, [x23, x28, LSL #1]\n"
+ "ld1h { z1.h }, p3/Z, [x22, x28, LSL #1]\n"
+ "ld1h { z0.h }, p3/Z, [x21, x28, LSL #1]\n"
+ "ld1h { z31.h }, p3/Z, [x20, x28, LSL #1]\n"
"beq 10f\n"
"9:" // Single vector of channels: Loop: 4 inputs loop
"fadd z23.h, z2.h, z1.h\n"
"fadd z19.h, z0.h, z31.h\n"
- "ldp x24, x23, [x20, #0x0]\n"
- "ldp x22, x21, [x20, #0x10]\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "ldp x21, x20, [x19, #0x10]\n"
"fadd z19.h, z23.h, z19.h\n"
- "subs x25, x25, #0x1\n"
+ "subs x24, x24, #0x1\n"
"fadd z6.h, z6.h, z19.h\n"
- "add x20, x20, #0x20\n"
- "ld1h { z2.h }, p3/Z, [x24, x9, LSL #1]\n"
- "ld1h { z1.h }, p3/Z, [x23, x9, LSL #1]\n"
- "ld1h { z0.h }, p3/Z, [x22, x9, LSL #1]\n"
- "ld1h { z31.h }, p3/Z, [x21, x9, LSL #1]\n"
+ "add x19, x19, #0x20\n"
+ "ld1h { z2.h }, p3/Z, [x23, x28, LSL #1]\n"
+ "ld1h { z1.h }, p3/Z, [x22, x28, LSL #1]\n"
+ "ld1h { z0.h }, p3/Z, [x21, x28, LSL #1]\n"
+ "ld1h { z31.h }, p3/Z, [x20, x28, LSL #1]\n"
"bgt 9b\n"
"10:" // Single vector of channels: Loop: 4 inputs tail
"fadd z23.h, z2.h, z1.h\n"
@@ -204,24 +204,24 @@ void sve_fp16_nhwc_avg_generic_depthfirst_impl(
"fadd z19.h, z23.h, z19.h\n"
"fadd z6.h, z6.h, z19.h\n"
"11:" // Single vector of channels: Loop: After loop
- "ands x21, %x[n_valid_cells], #0x3\n"
+ "ands x20, %x[n_valid_cells], #0x3\n"
"beq 13f\n"
"12:" // Single vector of channels: Loop: Single input loop
- "ldr x24, [x20], #0x8\n"
- "ld1h { z2.h }, p3/Z, [x24, x9, LSL #1]\n"
- "subs x21, x21, #0x1\n"
+ "ldr x23, [x19], #0x8\n"
+ "ld1h { z2.h }, p3/Z, [x23, x28, LSL #1]\n"
+ "subs x20, x20, #0x1\n"
"fadd z6.h, z6.h, z2.h\n"
"bgt 12b\n"
"13:" // Single vector of channels: Loop: Single input loop: End
"fmul z6.h, z6.h, z7.h\n"
- "st1h { z6.h }, p3, [%x[outptr], x9, LSL #1]\n"
- "inch x9\n"
- "whilelt p3.h, x9, %x[n_channels]\n"
+ "st1h { z6.h }, p3, [%x[outptr], x28, LSL #1]\n"
+ "inch x28\n"
+ "whilelt p3.h, x28, %x[n_channels]\n"
"b.any 8b\n"
"14:" // End
:
: [inptrs] "r" (inptrs), [n_channels] "r" (n_channels), [n_valid_cells] "r" (n_valid_cells), [outptr] "r" (outptr), [rescale_ptr] "r" (&rescale_value)
- : "cc", "memory", "p0", "p1", "p2", "p3", "x9", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp16_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp16_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
index 838cd3406c..cda3d4248a 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp16_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp16_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -63,80 +63,80 @@ void sve_fp16_nhwc_max_2x2_s1_output2x2_depthfirst_impl(
pad_left, pad_top, pad_right, pad_bottom);
__asm__ __volatile__(
- "ldr x15, [%x[args], %[offsetof_n_channels]]\n"
- "ldr x21, [%x[args], %[offsetof_outptrs]]\n"
- "mov x14, #0x0\n"
- "whilelt p2.h, x14, x15\n"
- "ldr x20, [%x[args], %[offsetof_inptrs]]\n"
- "ldp x13, x12, [x21, #0x0]\n"
+ "ldr x14, [%x[args], %[offsetof_n_channels]]\n"
+ "ldr x20, [%x[args], %[offsetof_outptrs]]\n"
+ "mov x13, #0x0\n"
+ "whilelt p2.h, x13, x14\n"
+ "ldr x19, [%x[args], %[offsetof_inptrs]]\n"
+ "ldp x12, x11, [x20, #0x0]\n"
"ptrue p1.b\n"
- "mov x11, #0x0\n"
- "ldp x10, x9, [x21, #0x10]\n"
- "ldp x28, x27, [x20, #0x0]\n"
- "ldp x26, x25, [x20, #0x10]\n"
- "ldp x24, x23, [x20, #0x20]\n"
- "ldp x22, x21, [x20, #0x30]\n"
- "ldr x20, [x20, #0x40]\n"
- "ld1h { z31.h }, p2/Z, [x27, x14, LSL #1]\n"
- "ld1h { z30.h }, p2/Z, [x24, x14, LSL #1]\n"
- "ld1h { z29.h }, p2/Z, [x21, x14, LSL #1]\n"
- "ld1h { z28.h }, p2/Z, [x25, x14, LSL #1]\n"
- "ld1h { z27.h }, p2/Z, [x28, x14, LSL #1]\n"
- "ld1h { z26.h }, p2/Z, [x26, x14, LSL #1]\n"
- "ld1h { z25.h }, p2/Z, [x23, x14, LSL #1]\n"
- "ld1h { z24.h }, p2/Z, [x22, x14, LSL #1]\n"
- "ld1h { z23.h }, p2/Z, [x20, x14, LSL #1]\n"
- "incw x14\n"
- "whilelt p2.h, x14, x15\n"
+ "mov x10, #0x0\n"
+ "ldp x9, x28, [x20, #0x10]\n"
+ "ldp x27, x26, [x19, #0x0]\n"
+ "ldp x25, x24, [x19, #0x10]\n"
+ "ldp x23, x22, [x19, #0x20]\n"
+ "ldp x21, x20, [x19, #0x30]\n"
+ "ldr x19, [x19, #0x40]\n"
+ "ld1h { z31.h }, p2/Z, [x26, x13, LSL #1]\n"
+ "ld1h { z30.h }, p2/Z, [x23, x13, LSL #1]\n"
+ "ld1h { z29.h }, p2/Z, [x20, x13, LSL #1]\n"
+ "ld1h { z28.h }, p2/Z, [x24, x13, LSL #1]\n"
+ "ld1h { z27.h }, p2/Z, [x27, x13, LSL #1]\n"
+ "ld1h { z26.h }, p2/Z, [x22, x13, LSL #1]\n"
+ "ld1h { z25.h }, p2/Z, [x25, x13, LSL #1]\n"
+ "ld1h { z24.h }, p2/Z, [x21, x13, LSL #1]\n"
+ "ld1h { z23.h }, p2/Z, [x19, x13, LSL #1]\n"
+ "incw x13\n"
+ "whilelt p2.h, x13, x14\n"
"b.none 2f\n"
"1:" // Vector: Loop
"movprfx z22, z31\n fmax z22.h, p1/M, z22.h, z30.h\n"
"movprfx z21, z30\n fmax z21.h, p1/M, z21.h, z29.h\n"
- "ld1h { z31.h }, p2/Z, [x27, x14, LSL #1]\n"
- "ld1h { z30.h }, p2/Z, [x24, x14, LSL #1]\n"
+ "ld1h { z31.h }, p2/Z, [x26, x13, LSL #1]\n"
+ "ld1h { z30.h }, p2/Z, [x23, x13, LSL #1]\n"
"movprfx z20, z28\n fmax z20.h, p1/M, z20.h, z27.h\n"
- "movprfx z19, z26\n fmax z19.h, p1/M, z19.h, z25.h\n"
- "ld1h { z29.h }, p2/Z, [x21, x14, LSL #1]\n"
- "ld1h { z27.h }, p2/Z, [x28, x14, LSL #1]\n"
- "movprfx z17, z28\n fmax z17.h, p1/M, z17.h, z24.h\n"
- "movprfx z18, z25\n fmax z18.h, p1/M, z18.h, z23.h\n"
- "ld1h { z28.h }, p2/Z, [x25, x14, LSL #1]\n"
- "ld1h { z26.h }, p2/Z, [x26, x14, LSL #1]\n"
- "ld1h { z25.h }, p2/Z, [x23, x14, LSL #1]\n"
- "ld1h { z24.h }, p2/Z, [x22, x14, LSL #1]\n"
- "whilelt p0.h, x11, x15\n"
+ "movprfx z17, z26\n fmax z17.h, p1/M, z17.h, z25.h\n"
+ "ld1h { z29.h }, p2/Z, [x20, x13, LSL #1]\n"
+ "ld1h { z27.h }, p2/Z, [x27, x13, LSL #1]\n"
+ "movprfx z19, z24\n fmax z19.h, p1/M, z19.h, z28.h\n"
+ "movprfx z18, z26\n fmax z18.h, p1/M, z18.h, z23.h\n"
+ "ld1h { z28.h }, p2/Z, [x24, x13, LSL #1]\n"
+ "ld1h { z26.h }, p2/Z, [x22, x13, LSL #1]\n"
+ "ld1h { z25.h }, p2/Z, [x25, x13, LSL #1]\n"
+ "ld1h { z24.h }, p2/Z, [x21, x13, LSL #1]\n"
+ "whilelt p0.h, x10, x14\n"
"movprfx z16, z22\n fmax z16.h, p1/M, z16.h, z20.h\n"
- "ld1h { z23.h }, p2/Z, [x20, x14, LSL #1]\n"
- "incw x14\n"
- "whilelt p2.h, x14, x15\n"
- "st1h { z16.h }, p0, [x13, x11, LSL #1]\n"
- "movprfx z16, z19\n fmax z16.h, p1/M, z16.h, z22.h\n"
- "fmax z17.h, p1/M, z17.h, z21.h\n"
- "st1h { z16.h }, p0, [x12, x11, LSL #1]\n"
- "movprfx z16, z18\n fmax z16.h, p1/M, z16.h, z21.h\n"
- "st1h { z17.h }, p0, [x10, x11, LSL #1]\n"
- "st1h { z16.h }, p0, [x9, x11, LSL #1]\n"
- "incw x11\n"
+ "ld1h { z23.h }, p2/Z, [x19, x13, LSL #1]\n"
+ "incw x13\n"
+ "whilelt p2.h, x13, x14\n"
+ "st1h { z16.h }, p0, [x12, x10, LSL #1]\n"
+ "movprfx z16, z17\n fmax z16.h, p1/M, z16.h, z22.h\n"
+ "movprfx z17, z21\n fmax z17.h, p1/M, z17.h, z19.h\n"
+ "st1h { z16.h }, p0, [x11, x10, LSL #1]\n"
+ "movprfx z16, z21\n fmax z16.h, p1/M, z16.h, z18.h\n"
+ "st1h { z17.h }, p0, [x9, x10, LSL #1]\n"
+ "st1h { z16.h }, p0, [x28, x10, LSL #1]\n"
+ "incw x10\n"
"b.any 1b\n"
"2:" // Vector: Tail
"movprfx z22, z31\n fmax z22.h, p1/M, z22.h, z30.h\n"
"movprfx z21, z30\n fmax z21.h, p1/M, z21.h, z29.h\n"
"movprfx z20, z28\n fmax z20.h, p1/M, z20.h, z27.h\n"
- "movprfx z19, z26\n fmax z19.h, p1/M, z19.h, z25.h\n"
- "movprfx z17, z28\n fmax z17.h, p1/M, z17.h, z24.h\n"
- "movprfx z18, z25\n fmax z18.h, p1/M, z18.h, z23.h\n"
- "whilelt p0.h, x11, x15\n"
+ "movprfx z17, z26\n fmax z17.h, p1/M, z17.h, z25.h\n"
+ "movprfx z19, z24\n fmax z19.h, p1/M, z19.h, z28.h\n"
+ "movprfx z18, z26\n fmax z18.h, p1/M, z18.h, z23.h\n"
+ "whilelt p0.h, x10, x14\n"
"movprfx z16, z22\n fmax z16.h, p1/M, z16.h, z20.h\n"
- "st1h { z16.h }, p0, [x13, x11, LSL #1]\n"
- "movprfx z16, z19\n fmax z16.h, p1/M, z16.h, z22.h\n"
- "fmax z17.h, p1/M, z17.h, z21.h\n"
- "st1h { z16.h }, p0, [x12, x11, LSL #1]\n"
- "movprfx z16, z18\n fmax z16.h, p1/M, z16.h, z21.h\n"
- "st1h { z17.h }, p0, [x10, x11, LSL #1]\n"
- "st1h { z16.h }, p0, [x9, x11, LSL #1]\n"
+ "st1h { z16.h }, p0, [x12, x10, LSL #1]\n"
+ "movprfx z16, z17\n fmax z16.h, p1/M, z16.h, z22.h\n"
+ "movprfx z17, z21\n fmax z17.h, p1/M, z17.h, z19.h\n"
+ "st1h { z16.h }, p0, [x11, x10, LSL #1]\n"
+ "movprfx z16, z21\n fmax z16.h, p1/M, z16.h, z18.h\n"
+ "st1h { z17.h }, p0, [x9, x10, LSL #1]\n"
+ "st1h { z16.h }, p0, [x28, x10, LSL #1]\n"
:
: [args] "r" (&args), [offsetof_inptrs] "I" (offsetof(KernelArgs, inptrs)), [offsetof_n_channels] "I" (offsetof(KernelArgs, n_channels)), [offsetof_outptrs] "I" (offsetof(KernelArgs, outptrs))
- : "cc", "memory", "p0", "p1", "p2", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "x9", "x10", "x11", "x12", "x13", "x14", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp16_nhwc_max_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp16_nhwc_max_generic_depthfirst/generic.cpp
index 9f1f9e7377..3b07befc23 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp16_nhwc_max_generic_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp16_nhwc_max_generic_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -40,82 +40,82 @@ void sve_fp16_nhwc_max_generic_depthfirst_impl(
)
{
__asm__ __volatile__(
- "mov x9, #0x0\n"
- "cnth x28\n"
- "cnth x27, ALL, MUL #2\n"
- "cnth x26, ALL, MUL #3\n"
- "whilelt p4.h, x9, %x[n_channels]\n"
- "whilelt p3.h, x28, %x[n_channels]\n"
- "whilelt p2.h, x27, %x[n_channels]\n"
- "whilelt p1.h, x26, %x[n_channels]\n"
+ "mov x28, #0x0\n"
+ "cnth x27\n"
+ "cnth x26, ALL, MUL #2\n"
+ "cnth x25, ALL, MUL #3\n"
+ "whilelt p4.h, x28, %x[n_channels]\n"
+ "whilelt p3.h, x27, %x[n_channels]\n"
+ "whilelt p2.h, x26, %x[n_channels]\n"
+ "whilelt p1.h, x25, %x[n_channels]\n"
"ptrue p0.b\n"
"b.none 7f\n"
"1:" // 4-vectors of channels
- "lsr x25, %x[n_valid_cells], #0x2\n"
+ "lsr x24, %x[n_valid_cells], #0x2\n"
"mov z8.h, #0xfc00\n"
"mov z7.h, #0xfc00\n"
- "mov x20, %x[inptrs]\n"
+ "mov x19, %x[inptrs]\n"
"mov z6.h, #0xfc00\n"
"mov z5.h, #0xfc00\n"
- "cbz x25, 4f\n"
- "ldp x24, x23, [x20, #0x0]\n"
- "ldp x22, x21, [x20, #0x10]\n"
- "subs x25, x25, #0x1\n"
- "add x20, x20, #0x20\n"
- "ld1h { z4.h }, p4/Z, [x24, x9, LSL #1]\n"
- "ld1h { z3.h }, p4/Z, [x23, x9, LSL #1]\n"
- "ld1h { z2.h }, p4/Z, [x22, x9, LSL #1]\n"
- "ld1h { z1.h }, p4/Z, [x21, x9, LSL #1]\n"
- "ld1h { z0.h }, p3/Z, [x24, x28, LSL #1]\n"
- "ld1h { z31.h }, p3/Z, [x23, x28, LSL #1]\n"
- "ld1h { z22.h }, p3/Z, [x22, x28, LSL #1]\n"
- "ld1h { z30.h }, p3/Z, [x21, x28, LSL #1]\n"
- "ld1h { z29.h }, p2/Z, [x24, x27, LSL #1]\n"
- "ld1h { z28.h }, p2/Z, [x23, x27, LSL #1]\n"
- "ld1h { z21.h }, p2/Z, [x22, x27, LSL #1]\n"
- "ld1h { z27.h }, p2/Z, [x21, x27, LSL #1]\n"
- "ld1h { z26.h }, p1/Z, [x24, x26, LSL #1]\n"
- "ld1h { z25.h }, p1/Z, [x23, x26, LSL #1]\n"
- "ld1h { z20.h }, p1/Z, [x22, x26, LSL #1]\n"
- "ld1h { z24.h }, p1/Z, [x21, x26, LSL #1]\n"
+ "cbz x24, 4f\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "ldp x21, x20, [x19, #0x10]\n"
+ "subs x24, x24, #0x1\n"
+ "add x19, x19, #0x20\n"
+ "ld1h { z4.h }, p4/Z, [x23, x28, LSL #1]\n"
+ "ld1h { z3.h }, p4/Z, [x22, x28, LSL #1]\n"
+ "ld1h { z2.h }, p4/Z, [x21, x28, LSL #1]\n"
+ "ld1h { z1.h }, p4/Z, [x20, x28, LSL #1]\n"
+ "ld1h { z0.h }, p3/Z, [x23, x27, LSL #1]\n"
+ "ld1h { z31.h }, p3/Z, [x22, x27, LSL #1]\n"
+ "ld1h { z22.h }, p3/Z, [x21, x27, LSL #1]\n"
+ "ld1h { z30.h }, p3/Z, [x20, x27, LSL #1]\n"
+ "ld1h { z29.h }, p2/Z, [x23, x26, LSL #1]\n"
+ "ld1h { z28.h }, p2/Z, [x22, x26, LSL #1]\n"
+ "ld1h { z21.h }, p2/Z, [x21, x26, LSL #1]\n"
+ "ld1h { z27.h }, p2/Z, [x20, x26, LSL #1]\n"
+ "ld1h { z26.h }, p1/Z, [x23, x25, LSL #1]\n"
+ "ld1h { z25.h }, p1/Z, [x22, x25, LSL #1]\n"
+ "ld1h { z20.h }, p1/Z, [x21, x25, LSL #1]\n"
+ "ld1h { z24.h }, p1/Z, [x20, x25, LSL #1]\n"
"beq 3f\n"
"2:" // 4-vectors of channels: 4 inputs loop
"movprfx z19, z4\n fmax z19.h, p0/M, z19.h, z3.h\n"
"movprfx z23, z2\n fmax z23.h, p0/M, z23.h, z1.h\n"
- "ldp x24, x23, [x20, #0x0]\n"
- "ldp x22, x21, [x20, #0x10]\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "ldp x21, x20, [x19, #0x10]\n"
"movprfx z18, z0\n fmax z18.h, p0/M, z18.h, z31.h\n"
"fmax z22.h, p0/M, z22.h, z30.h\n"
- "ld1h { z4.h }, p4/Z, [x24, x9, LSL #1]\n"
- "ld1h { z3.h }, p4/Z, [x23, x9, LSL #1]\n"
+ "ld1h { z4.h }, p4/Z, [x23, x28, LSL #1]\n"
+ "ld1h { z3.h }, p4/Z, [x22, x28, LSL #1]\n"
"movprfx z17, z29\n fmax z17.h, p0/M, z17.h, z28.h\n"
"fmax z21.h, p0/M, z21.h, z27.h\n"
- "ld1h { z2.h }, p4/Z, [x22, x9, LSL #1]\n"
- "ld1h { z1.h }, p4/Z, [x21, x9, LSL #1]\n"
+ "ld1h { z2.h }, p4/Z, [x21, x28, LSL #1]\n"
+ "ld1h { z1.h }, p4/Z, [x20, x28, LSL #1]\n"
"movprfx z16, z26\n fmax z16.h, p0/M, z16.h, z25.h\n"
"fmax z20.h, p0/M, z20.h, z24.h\n"
- "ld1h { z0.h }, p3/Z, [x24, x28, LSL #1]\n"
- "ld1h { z31.h }, p3/Z, [x23, x28, LSL #1]\n"
+ "ld1h { z0.h }, p3/Z, [x23, x27, LSL #1]\n"
+ "ld1h { z31.h }, p3/Z, [x22, x27, LSL #1]\n"
"fmax z19.h, p0/M, z19.h, z23.h\n"
"fmax z18.h, p0/M, z18.h, z22.h\n"
- "ld1h { z22.h }, p3/Z, [x22, x28, LSL #1]\n"
- "ld1h { z30.h }, p3/Z, [x21, x28, LSL #1]\n"
+ "ld1h { z22.h }, p3/Z, [x21, x27, LSL #1]\n"
+ "ld1h { z30.h }, p3/Z, [x20, x27, LSL #1]\n"
"fmax z17.h, p0/M, z17.h, z21.h\n"
"fmax z16.h, p0/M, z16.h, z20.h\n"
- "ld1h { z29.h }, p2/Z, [x24, x27, LSL #1]\n"
- "ld1h { z28.h }, p2/Z, [x23, x27, LSL #1]\n"
- "subs x25, x25, #0x1\n"
+ "ld1h { z29.h }, p2/Z, [x23, x26, LSL #1]\n"
+ "ld1h { z28.h }, p2/Z, [x22, x26, LSL #1]\n"
+ "subs x24, x24, #0x1\n"
"fmax z8.h, p0/M, z8.h, z19.h\n"
- "ld1h { z21.h }, p2/Z, [x22, x27, LSL #1]\n"
- "ld1h { z27.h }, p2/Z, [x21, x27, LSL #1]\n"
+ "ld1h { z21.h }, p2/Z, [x21, x26, LSL #1]\n"
+ "ld1h { z27.h }, p2/Z, [x20, x26, LSL #1]\n"
"fmax z7.h, p0/M, z7.h, z18.h\n"
"fmax z6.h, p0/M, z6.h, z17.h\n"
- "ld1h { z26.h }, p1/Z, [x24, x26, LSL #1]\n"
- "ld1h { z25.h }, p1/Z, [x23, x26, LSL #1]\n"
+ "ld1h { z26.h }, p1/Z, [x23, x25, LSL #1]\n"
+ "ld1h { z25.h }, p1/Z, [x22, x25, LSL #1]\n"
"fmax z5.h, p0/M, z5.h, z16.h\n"
- "add x20, x20, #0x20\n"
- "ld1h { z20.h }, p1/Z, [x22, x26, LSL #1]\n"
- "ld1h { z24.h }, p1/Z, [x21, x26, LSL #1]\n"
+ "add x19, x19, #0x20\n"
+ "ld1h { z20.h }, p1/Z, [x21, x25, LSL #1]\n"
+ "ld1h { z24.h }, p1/Z, [x20, x25, LSL #1]\n"
"bgt 2b\n"
"3:" // 4-vectors of channels: 4 inputs tail
"movprfx z19, z4\n fmax z19.h, p0/M, z19.h, z3.h\n"
@@ -135,61 +135,61 @@ void sve_fp16_nhwc_max_generic_depthfirst_impl(
"fmax z6.h, p0/M, z6.h, z17.h\n"
"fmax z5.h, p0/M, z5.h, z16.h\n"
"4:" // 4-vectors of channels: After loop
- "ands x21, %x[n_valid_cells], #0x3\n"
+ "ands x20, %x[n_valid_cells], #0x3\n"
"beq 6f\n"
"5:" // 4-vectors of channels: Single input loop
- "ldr x24, [x20], #0x8\n"
- "ld1h { z4.h }, p4/Z, [x24, x9, LSL #1]\n"
- "subs x21, x21, #0x1\n"
+ "ldr x23, [x19], #0x8\n"
+ "ld1h { z4.h }, p4/Z, [x23, x28, LSL #1]\n"
+ "subs x20, x20, #0x1\n"
"fmax z8.h, p0/M, z8.h, z4.h\n"
- "ld1h { z0.h }, p3/Z, [x24, x28, LSL #1]\n"
- "ld1h { z29.h }, p2/Z, [x24, x27, LSL #1]\n"
+ "ld1h { z0.h }, p3/Z, [x23, x27, LSL #1]\n"
+ "ld1h { z29.h }, p2/Z, [x23, x26, LSL #1]\n"
"fmax z7.h, p0/M, z7.h, z0.h\n"
"fmax z6.h, p0/M, z6.h, z29.h\n"
- "ld1h { z26.h }, p1/Z, [x24, x26, LSL #1]\n"
+ "ld1h { z26.h }, p1/Z, [x23, x25, LSL #1]\n"
"fmax z5.h, p0/M, z5.h, z26.h\n"
"bgt 5b\n"
"6:" // 4-vectors of channels: Single input loop: End
- "st1h { z8.h }, p4, [%x[outptr], x9, LSL #1]\n"
- "inch x9, ALL, MUL #4\n"
- "st1h { z7.h }, p3, [%x[outptr], x28, LSL #1]\n"
+ "st1h { z8.h }, p4, [%x[outptr], x28, LSL #1]\n"
"inch x28, ALL, MUL #4\n"
- "st1h { z6.h }, p2, [%x[outptr], x27, LSL #1]\n"
+ "st1h { z7.h }, p3, [%x[outptr], x27, LSL #1]\n"
"inch x27, ALL, MUL #4\n"
- "st1h { z5.h }, p1, [%x[outptr], x26, LSL #1]\n"
+ "st1h { z6.h }, p2, [%x[outptr], x26, LSL #1]\n"
"inch x26, ALL, MUL #4\n"
- "whilelt p1.h, x26, %x[n_channels]\n"
+ "st1h { z5.h }, p1, [%x[outptr], x25, LSL #1]\n"
+ "inch x25, ALL, MUL #4\n"
+ "whilelt p1.h, x25, %x[n_channels]\n"
"b.any 1b\n"
"7:" // Single vector of channels
- "whilelt p4.h, x9, %x[n_channels]\n"
+ "whilelt p4.h, x28, %x[n_channels]\n"
"b.none 14f\n"
"8:" // Single vector of channels: Loop
- "lsr x25, %x[n_valid_cells], #0x2\n"
+ "lsr x24, %x[n_valid_cells], #0x2\n"
"mov z8.h, #0xfc00\n"
- "mov x20, %x[inptrs]\n"
- "cbz x25, 11f\n"
- "ldp x24, x23, [x20, #0x0]\n"
- "ldp x22, x21, [x20, #0x10]\n"
- "subs x25, x25, #0x1\n"
- "add x20, x20, #0x20\n"
- "ld1h { z4.h }, p4/Z, [x24, x9, LSL #1]\n"
- "ld1h { z3.h }, p4/Z, [x23, x9, LSL #1]\n"
- "ld1h { z2.h }, p4/Z, [x22, x9, LSL #1]\n"
- "ld1h { z1.h }, p4/Z, [x21, x9, LSL #1]\n"
+ "mov x19, %x[inptrs]\n"
+ "cbz x24, 11f\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "ldp x21, x20, [x19, #0x10]\n"
+ "subs x24, x24, #0x1\n"
+ "add x19, x19, #0x20\n"
+ "ld1h { z4.h }, p4/Z, [x23, x28, LSL #1]\n"
+ "ld1h { z3.h }, p4/Z, [x22, x28, LSL #1]\n"
+ "ld1h { z2.h }, p4/Z, [x21, x28, LSL #1]\n"
+ "ld1h { z1.h }, p4/Z, [x20, x28, LSL #1]\n"
"beq 10f\n"
"9:" // Single vector of channels: Loop: 4 inputs loop
"movprfx z19, z4\n fmax z19.h, p0/M, z19.h, z3.h\n"
"movprfx z23, z2\n fmax z23.h, p0/M, z23.h, z1.h\n"
- "ldp x24, x23, [x20, #0x0]\n"
- "ldp x22, x21, [x20, #0x10]\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "ldp x21, x20, [x19, #0x10]\n"
"fmax z19.h, p0/M, z19.h, z23.h\n"
- "subs x25, x25, #0x1\n"
- "ld1h { z4.h }, p4/Z, [x24, x9, LSL #1]\n"
- "ld1h { z3.h }, p4/Z, [x23, x9, LSL #1]\n"
+ "subs x24, x24, #0x1\n"
+ "ld1h { z4.h }, p4/Z, [x23, x28, LSL #1]\n"
+ "ld1h { z3.h }, p4/Z, [x22, x28, LSL #1]\n"
"fmax z8.h, p0/M, z8.h, z19.h\n"
- "add x20, x20, #0x20\n"
- "ld1h { z2.h }, p4/Z, [x22, x9, LSL #1]\n"
- "ld1h { z1.h }, p4/Z, [x21, x9, LSL #1]\n"
+ "add x19, x19, #0x20\n"
+ "ld1h { z2.h }, p4/Z, [x21, x28, LSL #1]\n"
+ "ld1h { z1.h }, p4/Z, [x20, x28, LSL #1]\n"
"bgt 9b\n"
"10:" // Single vector of channels: Loop: 4 inputs tail
"movprfx z19, z4\n fmax z19.h, p0/M, z19.h, z3.h\n"
@@ -197,23 +197,23 @@ void sve_fp16_nhwc_max_generic_depthfirst_impl(
"fmax z19.h, p0/M, z19.h, z23.h\n"
"fmax z8.h, p0/M, z8.h, z19.h\n"
"11:" // Single vector of channels: Loop: After loop
- "ands x21, %x[n_valid_cells], #0x3\n"
+ "ands x20, %x[n_valid_cells], #0x3\n"
"beq 13f\n"
"12:" // Single vector of channels: Loop: Single input loop
- "ldr x24, [x20], #0x8\n"
- "ld1h { z4.h }, p4/Z, [x24, x9, LSL #1]\n"
- "subs x21, x21, #0x1\n"
+ "ldr x23, [x19], #0x8\n"
+ "ld1h { z4.h }, p4/Z, [x23, x28, LSL #1]\n"
+ "subs x20, x20, #0x1\n"
"fmax z8.h, p0/M, z8.h, z4.h\n"
"bgt 12b\n"
"13:" // Single vector of channels: Loop: Single input loop: End
- "st1h { z8.h }, p4, [%x[outptr], x9, LSL #1]\n"
- "inch x9\n"
- "whilelt p4.h, x9, %x[n_channels]\n"
+ "st1h { z8.h }, p4, [%x[outptr], x28, LSL #1]\n"
+ "inch x28\n"
+ "whilelt p4.h, x28, %x[n_channels]\n"
"b.any 8b\n"
"14:" // End
:
: [inptrs] "r" (inptrs), [n_channels] "r" (n_channels), [n_valid_cells] "r" (n_valid_cells), [outptr] "r" (outptr)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x9", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst/generic.cpp
index 39197aa04d..cd765b3669 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -82,96 +82,96 @@ void sve_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst_impl(
pad_left, pad_top, pad_right, pad_bottom);
__asm__ __volatile__(
- "ldr x2, [%x[args], %[offsetof_n_channels]]\n"
- "ldr x21, [%x[args], %[offsetof_outptrs]]\n"
- "mov x3, #0x0\n"
- "mov x20, #0x4\n"
- "ldr x4, [%x[args], %[offsetof_inptrs]]\n"
- "ldp x5, x6, [x21, #0x0]\n"
- "whilelt p0.s, XZR, x20\n"
- "whilelt p1.s, x3, x2\n"
- "ldp x7, x8, [x21, #0x10]\n"
- "ldp x17, x16, [x4, #0x0]\n"
- "add x15, %x[args], %[offsetof_rescale]\n"
- "mov x14, #0x0\n"
- "ldp x13, x12, [x4, #0x10]\n"
- "ldp x11, x10, [x4, #0x20]\n"
- "ldp x9, x28, [x4, #0x30]\n"
- "ldp x27, x26, [x4, #0x40]\n"
- "ldp x25, x24, [x4, #0x50]\n"
- "ldp x23, x22, [x4, #0x60]\n"
- "ldp x21, x20, [x4, #0x70]\n"
- "ld1w { z7.s }, p1/Z, [x10, x3, LSL #2]\n"
- "ld1w { z6.s }, p1/Z, [x9, x3, LSL #2]\n"
- "ld1w { z5.s }, p1/Z, [x26, x3, LSL #2]\n"
- "ld1w { z4.s }, p1/Z, [x25, x3, LSL #2]\n"
- "ld1w { z3.s }, p1/Z, [x16, x3, LSL #2]\n"
- "ld1w { z2.s }, p1/Z, [x13, x3, LSL #2]\n"
- "ld1w { z1.s }, p1/Z, [x11, x3, LSL #2]\n"
- "ld1w { z31.s }, p1/Z, [x27, x3, LSL #2]\n"
- "ld1w { z30.s }, p1/Z, [x28, x3, LSL #2]\n"
- "ld1w { z29.s }, p1/Z, [x24, x3, LSL #2]\n"
- "ld1w { z28.s }, p1/Z, [x22, x3, LSL #2]\n"
- "ld1w { z27.s }, p1/Z, [x21, x3, LSL #2]\n"
- "ld1w { z26.s }, p1/Z, [x17, x3, LSL #2]\n"
- "ld1w { z25.s }, p1/Z, [x12, x3, LSL #2]\n"
- "ld1w { z24.s }, p1/Z, [x23, x3, LSL #2]\n"
- "ld1w { z23.s }, p1/Z, [x20, x3, LSL #2]\n"
- "incw x3\n"
- "whilelt p1.s, x3, x2\n"
- "ld1rqw { z0.s }, p0/Z, [x15]\n"
+ "ldr x3, [%x[args], %[offsetof_n_channels]]\n"
+ "ldr x20, [%x[args], %[offsetof_outptrs]]\n"
+ "mov x4, #0x0\n"
+ "mov x19, #0x4\n"
+ "ldr x5, [%x[args], %[offsetof_inptrs]]\n"
+ "ldp x6, x7, [x20, #0x0]\n"
+ "whilelt p0.s, XZR, x19\n"
+ "whilelt p1.s, x4, x3\n"
+ "ldp x8, x17, [x20, #0x10]\n"
+ "ldp x16, x15, [x5, #0x0]\n"
+ "add x14, %x[args], %[offsetof_rescale]\n"
+ "mov x13, #0x0\n"
+ "ldp x12, x11, [x5, #0x10]\n"
+ "ldp x10, x9, [x5, #0x20]\n"
+ "ldp x28, x27, [x5, #0x30]\n"
+ "ldp x26, x25, [x5, #0x40]\n"
+ "ldp x24, x23, [x5, #0x50]\n"
+ "ldp x22, x21, [x5, #0x60]\n"
+ "ldp x20, x19, [x5, #0x70]\n"
+ "ld1w { z7.s }, p1/Z, [x9, x4, LSL #2]\n"
+ "ld1w { z6.s }, p1/Z, [x28, x4, LSL #2]\n"
+ "ld1w { z5.s }, p1/Z, [x25, x4, LSL #2]\n"
+ "ld1w { z4.s }, p1/Z, [x24, x4, LSL #2]\n"
+ "ld1w { z3.s }, p1/Z, [x15, x4, LSL #2]\n"
+ "ld1w { z2.s }, p1/Z, [x12, x4, LSL #2]\n"
+ "ld1w { z1.s }, p1/Z, [x10, x4, LSL #2]\n"
+ "ld1w { z31.s }, p1/Z, [x26, x4, LSL #2]\n"
+ "ld1w { z30.s }, p1/Z, [x27, x4, LSL #2]\n"
+ "ld1w { z29.s }, p1/Z, [x23, x4, LSL #2]\n"
+ "ld1w { z28.s }, p1/Z, [x21, x4, LSL #2]\n"
+ "ld1w { z27.s }, p1/Z, [x20, x4, LSL #2]\n"
+ "ld1w { z26.s }, p1/Z, [x16, x4, LSL #2]\n"
+ "ld1w { z25.s }, p1/Z, [x11, x4, LSL #2]\n"
+ "ld1w { z24.s }, p1/Z, [x22, x4, LSL #2]\n"
+ "ld1w { z23.s }, p1/Z, [x19, x4, LSL #2]\n"
+ "incw x4\n"
+ "whilelt p1.s, x4, x3\n"
+ "ld1rqw { z0.s }, p0/Z, [x14]\n"
"b.none 2f\n"
"1:" // Vector: Loop
"fadd z17.s, z7.s, z6.s\n"
"fadd z16.s, z5.s, z4.s\n"
- "ld1w { z7.s }, p1/Z, [x10, x3, LSL #2]\n"
- "ld1w { z6.s }, p1/Z, [x9, x3, LSL #2]\n"
+ "ld1w { z7.s }, p1/Z, [x9, x4, LSL #2]\n"
+ "ld1w { z6.s }, p1/Z, [x28, x4, LSL #2]\n"
"fadd z19.s, z17.s, z16.s\n"
"fadd z18.s, z3.s, z2.s\n"
- "ld1w { z5.s }, p1/Z, [x26, x3, LSL #2]\n"
- "ld1w { z4.s }, p1/Z, [x25, x3, LSL #2]\n"
+ "ld1w { z5.s }, p1/Z, [x25, x4, LSL #2]\n"
+ "ld1w { z4.s }, p1/Z, [x24, x4, LSL #2]\n"
"fadd z17.s, z1.s, z31.s\n"
"fadd z22.s, z30.s, z29.s\n"
- "ld1w { z3.s }, p1/Z, [x16, x3, LSL #2]\n"
- "ld1w { z2.s }, p1/Z, [x13, x3, LSL #2]\n"
+ "ld1w { z3.s }, p1/Z, [x15, x4, LSL #2]\n"
+ "ld1w { z2.s }, p1/Z, [x12, x4, LSL #2]\n"
"fadd z16.s, z28.s, z27.s\n"
"fadd z21.s, z18.s, z19.s\n"
- "ld1w { z1.s }, p1/Z, [x11, x3, LSL #2]\n"
- "ld1w { z31.s }, p1/Z, [x27, x3, LSL #2]\n"
+ "ld1w { z1.s }, p1/Z, [x10, x4, LSL #2]\n"
+ "ld1w { z31.s }, p1/Z, [x26, x4, LSL #2]\n"
"fadd z20.s, z16.s, z19.s\n"
"fadd z19.s, z26.s, z17.s\n"
- "ld1w { z30.s }, p1/Z, [x28, x3, LSL #2]\n"
- "ld1w { z29.s }, p1/Z, [x24, x3, LSL #2]\n"
+ "ld1w { z30.s }, p1/Z, [x27, x4, LSL #2]\n"
+ "ld1w { z29.s }, p1/Z, [x23, x4, LSL #2]\n"
"fadd z18.s, z25.s, z22.s\n"
"fadd z17.s, z24.s, z17.s\n"
- "ld1w { z28.s }, p1/Z, [x22, x3, LSL #2]\n"
- "ld1w { z27.s }, p1/Z, [x21, x3, LSL #2]\n"
+ "ld1w { z28.s }, p1/Z, [x21, x4, LSL #2]\n"
+ "ld1w { z27.s }, p1/Z, [x20, x4, LSL #2]\n"
"fadd z16.s, z23.s, z22.s\n"
- "ld1w { z26.s }, p1/Z, [x17, x3, LSL #2]\n"
- "ld1w { z25.s }, p1/Z, [x12, x3, LSL #2]\n"
- "fadd z19.s, z21.s, z19.s\n"
- "ld1w { z24.s }, p1/Z, [x23, x3, LSL #2]\n"
- "ld1w { z23.s }, p1/Z, [x20, x3, LSL #2]\n"
- "incw x3\n"
- "fadd z18.s, z21.s, z18.s\n"
+ "ld1w { z26.s }, p1/Z, [x16, x4, LSL #2]\n"
+ "ld1w { z25.s }, p1/Z, [x11, x4, LSL #2]\n"
+ "fadd z19.s, z19.s, z21.s\n"
+ "ld1w { z24.s }, p1/Z, [x22, x4, LSL #2]\n"
+ "ld1w { z23.s }, p1/Z, [x19, x4, LSL #2]\n"
+ "incw x4\n"
+ "fadd z18.s, z18.s, z21.s\n"
"fadd z17.s, z17.s, z20.s\n"
"fadd z16.s, z16.s, z20.s\n"
- "whilelt p0.s, x14, x2\n"
- "whilelt p1.s, x3, x2\n"
+ "whilelt p0.s, x13, x3\n"
+ "whilelt p1.s, x4, x3\n"
"fmul z19.s, z19.s, z0.s[0]\n"
"fmul z18.s, z18.s, z0.s[1]\n"
- "st1w { z19.s }, p0, [x5, x14, LSL #2]\n"
+ "st1w { z19.s }, p0, [x6, x13, LSL #2]\n"
"fmul z17.s, z17.s, z0.s[2]\n"
"fmul z16.s, z16.s, z0.s[3]\n"
- "st1w { z18.s }, p0, [x6, x14, LSL #2]\n"
- "st1w { z17.s }, p0, [x7, x14, LSL #2]\n"
- "st1w { z16.s }, p0, [x8, x14, LSL #2]\n"
- "incw x14\n"
+ "st1w { z18.s }, p0, [x7, x13, LSL #2]\n"
+ "st1w { z17.s }, p0, [x8, x13, LSL #2]\n"
+ "st1w { z16.s }, p0, [x17, x13, LSL #2]\n"
+ "incw x13\n"
"b.any 1b\n"
"2:" // Vector: Tail
"fadd z17.s, z7.s, z6.s\n"
"fadd z16.s, z5.s, z4.s\n"
- "whilelt p0.s, x14, x2\n"
+ "whilelt p0.s, x13, x3\n"
"fadd z19.s, z17.s, z16.s\n"
"fadd z18.s, z3.s, z2.s\n"
"fadd z17.s, z1.s, z31.s\n"
@@ -183,21 +183,21 @@ void sve_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst_impl(
"fadd z18.s, z25.s, z22.s\n"
"fadd z17.s, z24.s, z17.s\n"
"fadd z16.s, z23.s, z22.s\n"
- "fadd z19.s, z21.s, z19.s\n"
+ "fadd z19.s, z19.s, z21.s\n"
"fmul z19.s, z19.s, z0.s[0]\n"
- "st1w { z19.s }, p0, [x5, x14, LSL #2]\n"
- "fadd z18.s, z21.s, z18.s\n"
+ "st1w { z19.s }, p0, [x6, x13, LSL #2]\n"
+ "fadd z18.s, z18.s, z21.s\n"
"fadd z17.s, z17.s, z20.s\n"
"fmul z18.s, z18.s, z0.s[1]\n"
"fmul z17.s, z17.s, z0.s[2]\n"
"fadd z16.s, z16.s, z20.s\n"
"fmul z16.s, z16.s, z0.s[3]\n"
- "st1w { z18.s }, p0, [x6, x14, LSL #2]\n"
- "st1w { z17.s }, p0, [x7, x14, LSL #2]\n"
- "st1w { z16.s }, p0, [x8, x14, LSL #2]\n"
+ "st1w { z18.s }, p0, [x7, x13, LSL #2]\n"
+ "st1w { z17.s }, p0, [x8, x13, LSL #2]\n"
+ "st1w { z16.s }, p0, [x17, x13, LSL #2]\n"
:
: [args] "r" (&args), [offsetof_inptrs] "I" (offsetof(KernelArgs, inptrs)), [offsetof_n_channels] "I" (offsetof(KernelArgs, n_channels)), [offsetof_outptrs] "I" (offsetof(KernelArgs, outptrs)), [offsetof_rescale] "I" (offsetof(KernelArgs, rescale_vals))
- : "cc", "memory", "p0", "p1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp32_nhwc_avg_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp32_nhwc_avg_generic_depthfirst/generic.cpp
index c1a3e5de84..bb60fe8472 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp32_nhwc_avg_generic_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp32_nhwc_avg_generic_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -42,83 +42,83 @@ void sve_fp32_nhwc_avg_generic_depthfirst_impl(
const auto rescale_value = static_cast<float>(1.0f / static_cast<float>(window_cells));
__asm__ __volatile__(
- "mov x9, #0x0\n"
- "cntw x28\n"
- "cntw x27, ALL, MUL #2\n"
- "cntw x26, ALL, MUL #3\n"
+ "mov x28, #0x0\n"
+ "cntw x27\n"
+ "cntw x26, ALL, MUL #2\n"
+ "cntw x25, ALL, MUL #3\n"
"ptrue p0.b\n"
- "whilelt p3.s, x9, %x[n_channels]\n"
+ "whilelt p3.s, x28, %x[n_channels]\n"
"ld1rw { z7.s }, p0/Z, [%x[rescale_ptr]]\n"
- "whilelt p2.s, x28, %x[n_channels]\n"
- "whilelt p1.s, x27, %x[n_channels]\n"
- "whilelt p0.s, x26, %x[n_channels]\n"
+ "whilelt p2.s, x27, %x[n_channels]\n"
+ "whilelt p1.s, x26, %x[n_channels]\n"
+ "whilelt p0.s, x25, %x[n_channels]\n"
"b.none 7f\n"
"1:" // 4-vectors of channels
- "lsr x25, %x[n_valid_cells], #0x2\n"
+ "lsr x24, %x[n_valid_cells], #0x2\n"
"mov z6.b, #0x0\n"
"mov z5.b, #0x0\n"
- "mov x20, %x[inptrs]\n"
+ "mov x19, %x[inptrs]\n"
"mov z4.b, #0x0\n"
"mov z3.b, #0x0\n"
- "cbz x25, 4f\n"
- "ldp x24, x23, [x20, #0x0]\n"
- "ldp x22, x21, [x20, #0x10]\n"
- "subs x25, x25, #0x1\n"
- "add x20, x20, #0x20\n"
- "ld1w { z2.s }, p3/Z, [x24, x9, LSL #2]\n"
- "ld1w { z1.s }, p3/Z, [x23, x9, LSL #2]\n"
- "ld1w { z0.s }, p3/Z, [x22, x9, LSL #2]\n"
- "ld1w { z31.s }, p3/Z, [x21, x9, LSL #2]\n"
- "ld1w { z30.s }, p2/Z, [x24, x28, LSL #2]\n"
- "ld1w { z22.s }, p2/Z, [x23, x28, LSL #2]\n"
- "ld1w { z29.s }, p2/Z, [x22, x28, LSL #2]\n"
- "ld1w { z28.s }, p2/Z, [x21, x28, LSL #2]\n"
- "ld1w { z27.s }, p1/Z, [x24, x27, LSL #2]\n"
- "ld1w { z21.s }, p1/Z, [x23, x27, LSL #2]\n"
- "ld1w { z26.s }, p1/Z, [x22, x27, LSL #2]\n"
- "ld1w { z17.s }, p1/Z, [x21, x27, LSL #2]\n"
- "ld1w { z25.s }, p0/Z, [x24, x26, LSL #2]\n"
- "ld1w { z20.s }, p0/Z, [x23, x26, LSL #2]\n"
- "ld1w { z24.s }, p0/Z, [x22, x26, LSL #2]\n"
- "ld1w { z16.s }, p0/Z, [x21, x26, LSL #2]\n"
+ "cbz x24, 4f\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "ldp x21, x20, [x19, #0x10]\n"
+ "subs x24, x24, #0x1\n"
+ "add x19, x19, #0x20\n"
+ "ld1w { z2.s }, p3/Z, [x23, x28, LSL #2]\n"
+ "ld1w { z1.s }, p3/Z, [x22, x28, LSL #2]\n"
+ "ld1w { z0.s }, p3/Z, [x21, x28, LSL #2]\n"
+ "ld1w { z31.s }, p3/Z, [x20, x28, LSL #2]\n"
+ "ld1w { z30.s }, p2/Z, [x23, x27, LSL #2]\n"
+ "ld1w { z22.s }, p2/Z, [x22, x27, LSL #2]\n"
+ "ld1w { z29.s }, p2/Z, [x21, x27, LSL #2]\n"
+ "ld1w { z28.s }, p2/Z, [x20, x27, LSL #2]\n"
+ "ld1w { z27.s }, p1/Z, [x23, x26, LSL #2]\n"
+ "ld1w { z21.s }, p1/Z, [x22, x26, LSL #2]\n"
+ "ld1w { z26.s }, p1/Z, [x21, x26, LSL #2]\n"
+ "ld1w { z17.s }, p1/Z, [x20, x26, LSL #2]\n"
+ "ld1w { z25.s }, p0/Z, [x23, x25, LSL #2]\n"
+ "ld1w { z20.s }, p0/Z, [x22, x25, LSL #2]\n"
+ "ld1w { z24.s }, p0/Z, [x21, x25, LSL #2]\n"
+ "ld1w { z16.s }, p0/Z, [x20, x25, LSL #2]\n"
"beq 3f\n"
"2:" // 4-vectors of channels: 4 inputs loop
"fadd z23.s, z2.s, z1.s\n"
"fadd z19.s, z0.s, z31.s\n"
- "ldp x24, x23, [x20, #0x0]\n"
- "ldp x22, x21, [x20, #0x10]\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "ldp x21, x20, [x19, #0x10]\n"
"fadd z22.s, z30.s, z22.s\n"
"fadd z18.s, z29.s, z28.s\n"
- "subs x25, x25, #0x1\n"
- "add x20, x20, #0x20\n"
+ "subs x24, x24, #0x1\n"
+ "add x19, x19, #0x20\n"
"fadd z21.s, z27.s, z21.s\n"
"fadd z17.s, z26.s, z17.s\n"
- "ld1w { z2.s }, p3/Z, [x24, x9, LSL #2]\n"
- "ld1w { z1.s }, p3/Z, [x23, x9, LSL #2]\n"
+ "ld1w { z2.s }, p3/Z, [x23, x28, LSL #2]\n"
+ "ld1w { z1.s }, p3/Z, [x22, x28, LSL #2]\n"
"fadd z20.s, z25.s, z20.s\n"
"fadd z16.s, z24.s, z16.s\n"
- "ld1w { z0.s }, p3/Z, [x22, x9, LSL #2]\n"
- "ld1w { z31.s }, p3/Z, [x21, x9, LSL #2]\n"
+ "ld1w { z0.s }, p3/Z, [x21, x28, LSL #2]\n"
+ "ld1w { z31.s }, p3/Z, [x20, x28, LSL #2]\n"
"fadd z19.s, z23.s, z19.s\n"
"fadd z18.s, z22.s, z18.s\n"
- "ld1w { z30.s }, p2/Z, [x24, x28, LSL #2]\n"
- "ld1w { z22.s }, p2/Z, [x23, x28, LSL #2]\n"
+ "ld1w { z30.s }, p2/Z, [x23, x27, LSL #2]\n"
+ "ld1w { z22.s }, p2/Z, [x22, x27, LSL #2]\n"
"fadd z17.s, z21.s, z17.s\n"
"fadd z16.s, z20.s, z16.s\n"
- "ld1w { z29.s }, p2/Z, [x22, x28, LSL #2]\n"
- "ld1w { z28.s }, p2/Z, [x21, x28, LSL #2]\n"
+ "ld1w { z29.s }, p2/Z, [x21, x27, LSL #2]\n"
+ "ld1w { z28.s }, p2/Z, [x20, x27, LSL #2]\n"
"fadd z6.s, z6.s, z19.s\n"
"fadd z5.s, z5.s, z18.s\n"
- "ld1w { z27.s }, p1/Z, [x24, x27, LSL #2]\n"
- "ld1w { z21.s }, p1/Z, [x23, x27, LSL #2]\n"
+ "ld1w { z27.s }, p1/Z, [x23, x26, LSL #2]\n"
+ "ld1w { z21.s }, p1/Z, [x22, x26, LSL #2]\n"
"fadd z4.s, z4.s, z17.s\n"
"fadd z3.s, z3.s, z16.s\n"
- "ld1w { z26.s }, p1/Z, [x22, x27, LSL #2]\n"
- "ld1w { z17.s }, p1/Z, [x21, x27, LSL #2]\n"
- "ld1w { z25.s }, p0/Z, [x24, x26, LSL #2]\n"
- "ld1w { z20.s }, p0/Z, [x23, x26, LSL #2]\n"
- "ld1w { z24.s }, p0/Z, [x22, x26, LSL #2]\n"
- "ld1w { z16.s }, p0/Z, [x21, x26, LSL #2]\n"
+ "ld1w { z26.s }, p1/Z, [x21, x26, LSL #2]\n"
+ "ld1w { z17.s }, p1/Z, [x20, x26, LSL #2]\n"
+ "ld1w { z25.s }, p0/Z, [x23, x25, LSL #2]\n"
+ "ld1w { z20.s }, p0/Z, [x22, x25, LSL #2]\n"
+ "ld1w { z24.s }, p0/Z, [x21, x25, LSL #2]\n"
+ "ld1w { z16.s }, p0/Z, [x20, x25, LSL #2]\n"
"bgt 2b\n"
"3:" // 4-vectors of channels: 4 inputs tail
"fadd z23.s, z2.s, z1.s\n"
@@ -138,65 +138,65 @@ void sve_fp32_nhwc_avg_generic_depthfirst_impl(
"fadd z4.s, z4.s, z17.s\n"
"fadd z3.s, z3.s, z16.s\n"
"4:" // 4-vectors of channels: After loop
- "ands x21, %x[n_valid_cells], #0x3\n"
+ "ands x20, %x[n_valid_cells], #0x3\n"
"beq 6f\n"
"5:" // 4-vectors of channels: Single input loop
- "ldr x24, [x20], #0x8\n"
- "ld1w { z2.s }, p3/Z, [x24, x9, LSL #2]\n"
- "subs x21, x21, #0x1\n"
+ "ldr x23, [x19], #0x8\n"
+ "ld1w { z2.s }, p3/Z, [x23, x28, LSL #2]\n"
+ "subs x20, x20, #0x1\n"
"fadd z6.s, z6.s, z2.s\n"
- "ld1w { z30.s }, p2/Z, [x24, x28, LSL #2]\n"
- "ld1w { z27.s }, p1/Z, [x24, x27, LSL #2]\n"
+ "ld1w { z30.s }, p2/Z, [x23, x27, LSL #2]\n"
+ "ld1w { z27.s }, p1/Z, [x23, x26, LSL #2]\n"
"fadd z5.s, z5.s, z30.s\n"
"fadd z4.s, z4.s, z27.s\n"
- "ld1w { z25.s }, p0/Z, [x24, x26, LSL #2]\n"
+ "ld1w { z25.s }, p0/Z, [x23, x25, LSL #2]\n"
"fadd z3.s, z3.s, z25.s\n"
"bgt 5b\n"
"6:" // 4-vectors of channels: Single input loop: End
"fmul z6.s, z6.s, z7.s\n"
"fmul z5.s, z5.s, z7.s\n"
- "st1w { z6.s }, p3, [%x[outptr], x9, LSL #2]\n"
+ "st1w { z6.s }, p3, [%x[outptr], x28, LSL #2]\n"
"fmul z4.s, z4.s, z7.s\n"
"fmul z3.s, z3.s, z7.s\n"
- "st1w { z5.s }, p2, [%x[outptr], x28, LSL #2]\n"
- "st1w { z4.s }, p1, [%x[outptr], x27, LSL #2]\n"
- "incw x9, ALL, MUL #4\n"
+ "st1w { z5.s }, p2, [%x[outptr], x27, LSL #2]\n"
+ "st1w { z4.s }, p1, [%x[outptr], x26, LSL #2]\n"
"incw x28, ALL, MUL #4\n"
- "st1w { z3.s }, p0, [%x[outptr], x26, LSL #2]\n"
- "incw x26, ALL, MUL #4\n"
- "whilelt p0.s, x26, %x[n_channels]\n"
"incw x27, ALL, MUL #4\n"
+ "st1w { z3.s }, p0, [%x[outptr], x25, LSL #2]\n"
+ "incw x25, ALL, MUL #4\n"
+ "whilelt p0.s, x25, %x[n_channels]\n"
+ "incw x26, ALL, MUL #4\n"
"b.any 1b\n"
"7:" // Single vector of channels
- "whilelt p3.s, x9, %x[n_channels]\n"
+ "whilelt p3.s, x28, %x[n_channels]\n"
"b.none 14f\n"
"8:" // Single vector of channels: Loop
- "lsr x25, %x[n_valid_cells], #0x2\n"
+ "lsr x24, %x[n_valid_cells], #0x2\n"
"mov z6.b, #0x0\n"
- "mov x20, %x[inptrs]\n"
- "cbz x25, 11f\n"
- "ldp x24, x23, [x20, #0x0]\n"
- "ldp x22, x21, [x20, #0x10]\n"
- "subs x25, x25, #0x1\n"
- "add x20, x20, #0x20\n"
- "ld1w { z2.s }, p3/Z, [x24, x9, LSL #2]\n"
- "ld1w { z1.s }, p3/Z, [x23, x9, LSL #2]\n"
- "ld1w { z0.s }, p3/Z, [x22, x9, LSL #2]\n"
- "ld1w { z31.s }, p3/Z, [x21, x9, LSL #2]\n"
+ "mov x19, %x[inptrs]\n"
+ "cbz x24, 11f\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "ldp x21, x20, [x19, #0x10]\n"
+ "subs x24, x24, #0x1\n"
+ "add x19, x19, #0x20\n"
+ "ld1w { z2.s }, p3/Z, [x23, x28, LSL #2]\n"
+ "ld1w { z1.s }, p3/Z, [x22, x28, LSL #2]\n"
+ "ld1w { z0.s }, p3/Z, [x21, x28, LSL #2]\n"
+ "ld1w { z31.s }, p3/Z, [x20, x28, LSL #2]\n"
"beq 10f\n"
"9:" // Single vector of channels: Loop: 4 inputs loop
"fadd z23.s, z2.s, z1.s\n"
"fadd z19.s, z0.s, z31.s\n"
- "ldp x24, x23, [x20, #0x0]\n"
- "ldp x22, x21, [x20, #0x10]\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "ldp x21, x20, [x19, #0x10]\n"
"fadd z19.s, z23.s, z19.s\n"
- "subs x25, x25, #0x1\n"
+ "subs x24, x24, #0x1\n"
"fadd z6.s, z6.s, z19.s\n"
- "add x20, x20, #0x20\n"
- "ld1w { z2.s }, p3/Z, [x24, x9, LSL #2]\n"
- "ld1w { z1.s }, p3/Z, [x23, x9, LSL #2]\n"
- "ld1w { z0.s }, p3/Z, [x22, x9, LSL #2]\n"
- "ld1w { z31.s }, p3/Z, [x21, x9, LSL #2]\n"
+ "add x19, x19, #0x20\n"
+ "ld1w { z2.s }, p3/Z, [x23, x28, LSL #2]\n"
+ "ld1w { z1.s }, p3/Z, [x22, x28, LSL #2]\n"
+ "ld1w { z0.s }, p3/Z, [x21, x28, LSL #2]\n"
+ "ld1w { z31.s }, p3/Z, [x20, x28, LSL #2]\n"
"bgt 9b\n"
"10:" // Single vector of channels: Loop: 4 inputs tail
"fadd z23.s, z2.s, z1.s\n"
@@ -204,24 +204,24 @@ void sve_fp32_nhwc_avg_generic_depthfirst_impl(
"fadd z19.s, z23.s, z19.s\n"
"fadd z6.s, z6.s, z19.s\n"
"11:" // Single vector of channels: Loop: After loop
- "ands x21, %x[n_valid_cells], #0x3\n"
+ "ands x20, %x[n_valid_cells], #0x3\n"
"beq 13f\n"
"12:" // Single vector of channels: Loop: Single input loop
- "ldr x24, [x20], #0x8\n"
- "ld1w { z2.s }, p3/Z, [x24, x9, LSL #2]\n"
- "subs x21, x21, #0x1\n"
+ "ldr x23, [x19], #0x8\n"
+ "ld1w { z2.s }, p3/Z, [x23, x28, LSL #2]\n"
+ "subs x20, x20, #0x1\n"
"fadd z6.s, z6.s, z2.s\n"
"bgt 12b\n"
"13:" // Single vector of channels: Loop: Single input loop: End
"fmul z6.s, z6.s, z7.s\n"
- "st1w { z6.s }, p3, [%x[outptr], x9, LSL #2]\n"
- "incw x9\n"
- "whilelt p3.s, x9, %x[n_channels]\n"
+ "st1w { z6.s }, p3, [%x[outptr], x28, LSL #2]\n"
+ "incw x28\n"
+ "whilelt p3.s, x28, %x[n_channels]\n"
"b.any 8b\n"
"14:" // End
:
: [inptrs] "r" (inptrs), [n_channels] "r" (n_channels), [n_valid_cells] "r" (n_valid_cells), [outptr] "r" (outptr), [rescale_ptr] "r" (&rescale_value)
- : "cc", "memory", "p0", "p1", "p2", "p3", "x9", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp32_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp32_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
index da0239cea8..122ee050e8 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp32_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp32_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -63,80 +63,80 @@ void sve_fp32_nhwc_max_2x2_s1_output2x2_depthfirst_impl(
pad_left, pad_top, pad_right, pad_bottom);
__asm__ __volatile__(
- "ldr x15, [%x[args], %[offsetof_n_channels]]\n"
- "ldr x21, [%x[args], %[offsetof_outptrs]]\n"
- "mov x14, #0x0\n"
- "whilelt p2.s, x14, x15\n"
- "ldr x20, [%x[args], %[offsetof_inptrs]]\n"
- "ldp x13, x12, [x21, #0x0]\n"
+ "ldr x14, [%x[args], %[offsetof_n_channels]]\n"
+ "ldr x20, [%x[args], %[offsetof_outptrs]]\n"
+ "mov x13, #0x0\n"
+ "whilelt p2.s, x13, x14\n"
+ "ldr x19, [%x[args], %[offsetof_inptrs]]\n"
+ "ldp x12, x11, [x20, #0x0]\n"
"ptrue p1.b\n"
- "mov x11, #0x0\n"
- "ldp x10, x9, [x21, #0x10]\n"
- "ldp x28, x27, [x20, #0x0]\n"
- "ldp x26, x25, [x20, #0x10]\n"
- "ldp x24, x23, [x20, #0x20]\n"
- "ldp x22, x21, [x20, #0x30]\n"
- "ldr x20, [x20, #0x40]\n"
- "ld1w { z31.s }, p2/Z, [x27, x14, LSL #2]\n"
- "ld1w { z30.s }, p2/Z, [x24, x14, LSL #2]\n"
- "ld1w { z29.s }, p2/Z, [x21, x14, LSL #2]\n"
- "ld1w { z28.s }, p2/Z, [x25, x14, LSL #2]\n"
- "ld1w { z27.s }, p2/Z, [x28, x14, LSL #2]\n"
- "ld1w { z26.s }, p2/Z, [x26, x14, LSL #2]\n"
- "ld1w { z25.s }, p2/Z, [x23, x14, LSL #2]\n"
- "ld1w { z24.s }, p2/Z, [x22, x14, LSL #2]\n"
- "ld1w { z23.s }, p2/Z, [x20, x14, LSL #2]\n"
- "incw x14\n"
- "whilelt p2.s, x14, x15\n"
+ "mov x10, #0x0\n"
+ "ldp x9, x28, [x20, #0x10]\n"
+ "ldp x27, x26, [x19, #0x0]\n"
+ "ldp x25, x24, [x19, #0x10]\n"
+ "ldp x23, x22, [x19, #0x20]\n"
+ "ldp x21, x20, [x19, #0x30]\n"
+ "ldr x19, [x19, #0x40]\n"
+ "ld1w { z31.s }, p2/Z, [x26, x13, LSL #2]\n"
+ "ld1w { z30.s }, p2/Z, [x23, x13, LSL #2]\n"
+ "ld1w { z29.s }, p2/Z, [x20, x13, LSL #2]\n"
+ "ld1w { z28.s }, p2/Z, [x24, x13, LSL #2]\n"
+ "ld1w { z27.s }, p2/Z, [x27, x13, LSL #2]\n"
+ "ld1w { z26.s }, p2/Z, [x22, x13, LSL #2]\n"
+ "ld1w { z25.s }, p2/Z, [x25, x13, LSL #2]\n"
+ "ld1w { z24.s }, p2/Z, [x21, x13, LSL #2]\n"
+ "ld1w { z23.s }, p2/Z, [x19, x13, LSL #2]\n"
+ "incw x13\n"
+ "whilelt p2.s, x13, x14\n"
"b.none 2f\n"
"1:" // Vector: Loop
"movprfx z22, z31\n fmax z22.s, p1/M, z22.s, z30.s\n"
"movprfx z21, z30\n fmax z21.s, p1/M, z21.s, z29.s\n"
- "ld1w { z31.s }, p2/Z, [x27, x14, LSL #2]\n"
- "ld1w { z30.s }, p2/Z, [x24, x14, LSL #2]\n"
+ "ld1w { z31.s }, p2/Z, [x26, x13, LSL #2]\n"
+ "ld1w { z30.s }, p2/Z, [x23, x13, LSL #2]\n"
"movprfx z20, z28\n fmax z20.s, p1/M, z20.s, z27.s\n"
- "movprfx z19, z26\n fmax z19.s, p1/M, z19.s, z25.s\n"
- "ld1w { z29.s }, p2/Z, [x21, x14, LSL #2]\n"
- "ld1w { z27.s }, p2/Z, [x28, x14, LSL #2]\n"
- "movprfx z17, z28\n fmax z17.s, p1/M, z17.s, z24.s\n"
- "movprfx z18, z25\n fmax z18.s, p1/M, z18.s, z23.s\n"
- "ld1w { z28.s }, p2/Z, [x25, x14, LSL #2]\n"
- "ld1w { z26.s }, p2/Z, [x26, x14, LSL #2]\n"
- "ld1w { z25.s }, p2/Z, [x23, x14, LSL #2]\n"
- "ld1w { z24.s }, p2/Z, [x22, x14, LSL #2]\n"
- "whilelt p0.s, x11, x15\n"
+ "movprfx z17, z26\n fmax z17.s, p1/M, z17.s, z25.s\n"
+ "ld1w { z29.s }, p2/Z, [x20, x13, LSL #2]\n"
+ "ld1w { z27.s }, p2/Z, [x27, x13, LSL #2]\n"
+ "movprfx z19, z24\n fmax z19.s, p1/M, z19.s, z28.s\n"
+ "movprfx z18, z26\n fmax z18.s, p1/M, z18.s, z23.s\n"
+ "ld1w { z28.s }, p2/Z, [x24, x13, LSL #2]\n"
+ "ld1w { z26.s }, p2/Z, [x22, x13, LSL #2]\n"
+ "ld1w { z25.s }, p2/Z, [x25, x13, LSL #2]\n"
+ "ld1w { z24.s }, p2/Z, [x21, x13, LSL #2]\n"
+ "whilelt p0.s, x10, x14\n"
"movprfx z16, z22\n fmax z16.s, p1/M, z16.s, z20.s\n"
- "ld1w { z23.s }, p2/Z, [x20, x14, LSL #2]\n"
- "incw x14\n"
- "whilelt p2.s, x14, x15\n"
- "st1w { z16.s }, p0, [x13, x11, LSL #2]\n"
- "movprfx z16, z19\n fmax z16.s, p1/M, z16.s, z22.s\n"
- "fmax z17.s, p1/M, z17.s, z21.s\n"
- "st1w { z16.s }, p0, [x12, x11, LSL #2]\n"
- "movprfx z16, z18\n fmax z16.s, p1/M, z16.s, z21.s\n"
- "st1w { z17.s }, p0, [x10, x11, LSL #2]\n"
- "st1w { z16.s }, p0, [x9, x11, LSL #2]\n"
- "incw x11\n"
+ "ld1w { z23.s }, p2/Z, [x19, x13, LSL #2]\n"
+ "incw x13\n"
+ "whilelt p2.s, x13, x14\n"
+ "st1w { z16.s }, p0, [x12, x10, LSL #2]\n"
+ "movprfx z16, z17\n fmax z16.s, p1/M, z16.s, z22.s\n"
+ "movprfx z17, z21\n fmax z17.s, p1/M, z17.s, z19.s\n"
+ "st1w { z16.s }, p0, [x11, x10, LSL #2]\n"
+ "movprfx z16, z21\n fmax z16.s, p1/M, z16.s, z18.s\n"
+ "st1w { z17.s }, p0, [x9, x10, LSL #2]\n"
+ "st1w { z16.s }, p0, [x28, x10, LSL #2]\n"
+ "incw x10\n"
"b.any 1b\n"
"2:" // Vector: Tail
"movprfx z22, z31\n fmax z22.s, p1/M, z22.s, z30.s\n"
"movprfx z21, z30\n fmax z21.s, p1/M, z21.s, z29.s\n"
"movprfx z20, z28\n fmax z20.s, p1/M, z20.s, z27.s\n"
- "movprfx z19, z26\n fmax z19.s, p1/M, z19.s, z25.s\n"
- "movprfx z17, z28\n fmax z17.s, p1/M, z17.s, z24.s\n"
- "movprfx z18, z25\n fmax z18.s, p1/M, z18.s, z23.s\n"
- "whilelt p0.s, x11, x15\n"
+ "movprfx z17, z26\n fmax z17.s, p1/M, z17.s, z25.s\n"
+ "movprfx z19, z24\n fmax z19.s, p1/M, z19.s, z28.s\n"
+ "movprfx z18, z26\n fmax z18.s, p1/M, z18.s, z23.s\n"
+ "whilelt p0.s, x10, x14\n"
"movprfx z16, z22\n fmax z16.s, p1/M, z16.s, z20.s\n"
- "st1w { z16.s }, p0, [x13, x11, LSL #2]\n"
- "movprfx z16, z19\n fmax z16.s, p1/M, z16.s, z22.s\n"
- "fmax z17.s, p1/M, z17.s, z21.s\n"
- "st1w { z16.s }, p0, [x12, x11, LSL #2]\n"
- "movprfx z16, z18\n fmax z16.s, p1/M, z16.s, z21.s\n"
- "st1w { z17.s }, p0, [x10, x11, LSL #2]\n"
- "st1w { z16.s }, p0, [x9, x11, LSL #2]\n"
+ "st1w { z16.s }, p0, [x12, x10, LSL #2]\n"
+ "movprfx z16, z17\n fmax z16.s, p1/M, z16.s, z22.s\n"
+ "movprfx z17, z21\n fmax z17.s, p1/M, z17.s, z19.s\n"
+ "st1w { z16.s }, p0, [x11, x10, LSL #2]\n"
+ "movprfx z16, z21\n fmax z16.s, p1/M, z16.s, z18.s\n"
+ "st1w { z17.s }, p0, [x9, x10, LSL #2]\n"
+ "st1w { z16.s }, p0, [x28, x10, LSL #2]\n"
:
: [args] "r" (&args), [offsetof_inptrs] "I" (offsetof(KernelArgs, inptrs)), [offsetof_n_channels] "I" (offsetof(KernelArgs, n_channels)), [offsetof_outptrs] "I" (offsetof(KernelArgs, outptrs))
- : "cc", "memory", "p0", "p1", "p2", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "x9", "x10", "x11", "x12", "x13", "x14", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp32_nhwc_max_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp32_nhwc_max_generic_depthfirst/generic.cpp
index ddce2be62c..fefddae9e7 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp32_nhwc_max_generic_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp32_nhwc_max_generic_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -40,82 +40,82 @@ void sve_fp32_nhwc_max_generic_depthfirst_impl(
)
{
__asm__ __volatile__(
- "mov x9, #0x0\n"
- "cntw x28\n"
- "cntw x27, ALL, MUL #2\n"
- "cntw x26, ALL, MUL #3\n"
- "whilelt p4.s, x9, %x[n_channels]\n"
- "whilelt p3.s, x28, %x[n_channels]\n"
- "whilelt p2.s, x27, %x[n_channels]\n"
- "whilelt p1.s, x26, %x[n_channels]\n"
+ "mov x28, #0x0\n"
+ "cntw x27\n"
+ "cntw x26, ALL, MUL #2\n"
+ "cntw x25, ALL, MUL #3\n"
+ "whilelt p4.s, x28, %x[n_channels]\n"
+ "whilelt p3.s, x27, %x[n_channels]\n"
+ "whilelt p2.s, x26, %x[n_channels]\n"
+ "whilelt p1.s, x25, %x[n_channels]\n"
"ptrue p0.b\n"
"b.none 7f\n"
"1:" // 4-vectors of channels
- "lsr x25, %x[n_valid_cells], #0x2\n"
+ "lsr x24, %x[n_valid_cells], #0x2\n"
"mov z8.s, #0xff800000\n"
"mov z7.s, #0xff800000\n"
- "mov x20, %x[inptrs]\n"
+ "mov x19, %x[inptrs]\n"
"mov z6.s, #0xff800000\n"
"mov z5.s, #0xff800000\n"
- "cbz x25, 4f\n"
- "ldp x24, x23, [x20, #0x0]\n"
- "ldp x22, x21, [x20, #0x10]\n"
- "subs x25, x25, #0x1\n"
- "add x20, x20, #0x20\n"
- "ld1w { z4.s }, p4/Z, [x24, x9, LSL #2]\n"
- "ld1w { z3.s }, p4/Z, [x23, x9, LSL #2]\n"
- "ld1w { z2.s }, p4/Z, [x22, x9, LSL #2]\n"
- "ld1w { z1.s }, p4/Z, [x21, x9, LSL #2]\n"
- "ld1w { z0.s }, p3/Z, [x24, x28, LSL #2]\n"
- "ld1w { z31.s }, p3/Z, [x23, x28, LSL #2]\n"
- "ld1w { z22.s }, p3/Z, [x22, x28, LSL #2]\n"
- "ld1w { z30.s }, p3/Z, [x21, x28, LSL #2]\n"
- "ld1w { z29.s }, p2/Z, [x24, x27, LSL #2]\n"
- "ld1w { z28.s }, p2/Z, [x23, x27, LSL #2]\n"
- "ld1w { z21.s }, p2/Z, [x22, x27, LSL #2]\n"
- "ld1w { z27.s }, p2/Z, [x21, x27, LSL #2]\n"
- "ld1w { z26.s }, p1/Z, [x24, x26, LSL #2]\n"
- "ld1w { z25.s }, p1/Z, [x23, x26, LSL #2]\n"
- "ld1w { z20.s }, p1/Z, [x22, x26, LSL #2]\n"
- "ld1w { z24.s }, p1/Z, [x21, x26, LSL #2]\n"
+ "cbz x24, 4f\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "ldp x21, x20, [x19, #0x10]\n"
+ "subs x24, x24, #0x1\n"
+ "add x19, x19, #0x20\n"
+ "ld1w { z4.s }, p4/Z, [x23, x28, LSL #2]\n"
+ "ld1w { z3.s }, p4/Z, [x22, x28, LSL #2]\n"
+ "ld1w { z2.s }, p4/Z, [x21, x28, LSL #2]\n"
+ "ld1w { z1.s }, p4/Z, [x20, x28, LSL #2]\n"
+ "ld1w { z0.s }, p3/Z, [x23, x27, LSL #2]\n"
+ "ld1w { z31.s }, p3/Z, [x22, x27, LSL #2]\n"
+ "ld1w { z22.s }, p3/Z, [x21, x27, LSL #2]\n"
+ "ld1w { z30.s }, p3/Z, [x20, x27, LSL #2]\n"
+ "ld1w { z29.s }, p2/Z, [x23, x26, LSL #2]\n"
+ "ld1w { z28.s }, p2/Z, [x22, x26, LSL #2]\n"
+ "ld1w { z21.s }, p2/Z, [x21, x26, LSL #2]\n"
+ "ld1w { z27.s }, p2/Z, [x20, x26, LSL #2]\n"
+ "ld1w { z26.s }, p1/Z, [x23, x25, LSL #2]\n"
+ "ld1w { z25.s }, p1/Z, [x22, x25, LSL #2]\n"
+ "ld1w { z20.s }, p1/Z, [x21, x25, LSL #2]\n"
+ "ld1w { z24.s }, p1/Z, [x20, x25, LSL #2]\n"
"beq 3f\n"
"2:" // 4-vectors of channels: 4 inputs loop
"movprfx z19, z4\n fmax z19.s, p0/M, z19.s, z3.s\n"
"movprfx z23, z2\n fmax z23.s, p0/M, z23.s, z1.s\n"
- "ldp x24, x23, [x20, #0x0]\n"
- "ldp x22, x21, [x20, #0x10]\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "ldp x21, x20, [x19, #0x10]\n"
"movprfx z18, z0\n fmax z18.s, p0/M, z18.s, z31.s\n"
"fmax z22.s, p0/M, z22.s, z30.s\n"
- "ld1w { z4.s }, p4/Z, [x24, x9, LSL #2]\n"
- "ld1w { z3.s }, p4/Z, [x23, x9, LSL #2]\n"
+ "ld1w { z4.s }, p4/Z, [x23, x28, LSL #2]\n"
+ "ld1w { z3.s }, p4/Z, [x22, x28, LSL #2]\n"
"movprfx z17, z29\n fmax z17.s, p0/M, z17.s, z28.s\n"
"fmax z21.s, p0/M, z21.s, z27.s\n"
- "ld1w { z2.s }, p4/Z, [x22, x9, LSL #2]\n"
- "ld1w { z1.s }, p4/Z, [x21, x9, LSL #2]\n"
+ "ld1w { z2.s }, p4/Z, [x21, x28, LSL #2]\n"
+ "ld1w { z1.s }, p4/Z, [x20, x28, LSL #2]\n"
"movprfx z16, z26\n fmax z16.s, p0/M, z16.s, z25.s\n"
"fmax z20.s, p0/M, z20.s, z24.s\n"
- "ld1w { z0.s }, p3/Z, [x24, x28, LSL #2]\n"
- "ld1w { z31.s }, p3/Z, [x23, x28, LSL #2]\n"
+ "ld1w { z0.s }, p3/Z, [x23, x27, LSL #2]\n"
+ "ld1w { z31.s }, p3/Z, [x22, x27, LSL #2]\n"
"fmax z19.s, p0/M, z19.s, z23.s\n"
"fmax z18.s, p0/M, z18.s, z22.s\n"
- "ld1w { z22.s }, p3/Z, [x22, x28, LSL #2]\n"
- "ld1w { z30.s }, p3/Z, [x21, x28, LSL #2]\n"
+ "ld1w { z22.s }, p3/Z, [x21, x27, LSL #2]\n"
+ "ld1w { z30.s }, p3/Z, [x20, x27, LSL #2]\n"
"fmax z17.s, p0/M, z17.s, z21.s\n"
"fmax z16.s, p0/M, z16.s, z20.s\n"
- "ld1w { z29.s }, p2/Z, [x24, x27, LSL #2]\n"
- "ld1w { z28.s }, p2/Z, [x23, x27, LSL #2]\n"
- "subs x25, x25, #0x1\n"
+ "ld1w { z29.s }, p2/Z, [x23, x26, LSL #2]\n"
+ "ld1w { z28.s }, p2/Z, [x22, x26, LSL #2]\n"
+ "subs x24, x24, #0x1\n"
"fmax z8.s, p0/M, z8.s, z19.s\n"
- "ld1w { z21.s }, p2/Z, [x22, x27, LSL #2]\n"
- "ld1w { z27.s }, p2/Z, [x21, x27, LSL #2]\n"
+ "ld1w { z21.s }, p2/Z, [x21, x26, LSL #2]\n"
+ "ld1w { z27.s }, p2/Z, [x20, x26, LSL #2]\n"
"fmax z7.s, p0/M, z7.s, z18.s\n"
"fmax z6.s, p0/M, z6.s, z17.s\n"
- "ld1w { z26.s }, p1/Z, [x24, x26, LSL #2]\n"
- "ld1w { z25.s }, p1/Z, [x23, x26, LSL #2]\n"
+ "ld1w { z26.s }, p1/Z, [x23, x25, LSL #2]\n"
+ "ld1w { z25.s }, p1/Z, [x22, x25, LSL #2]\n"
"fmax z5.s, p0/M, z5.s, z16.s\n"
- "add x20, x20, #0x20\n"
- "ld1w { z20.s }, p1/Z, [x22, x26, LSL #2]\n"
- "ld1w { z24.s }, p1/Z, [x21, x26, LSL #2]\n"
+ "add x19, x19, #0x20\n"
+ "ld1w { z20.s }, p1/Z, [x21, x25, LSL #2]\n"
+ "ld1w { z24.s }, p1/Z, [x20, x25, LSL #2]\n"
"bgt 2b\n"
"3:" // 4-vectors of channels: 4 inputs tail
"movprfx z19, z4\n fmax z19.s, p0/M, z19.s, z3.s\n"
@@ -135,61 +135,61 @@ void sve_fp32_nhwc_max_generic_depthfirst_impl(
"fmax z6.s, p0/M, z6.s, z17.s\n"
"fmax z5.s, p0/M, z5.s, z16.s\n"
"4:" // 4-vectors of channels: After loop
- "ands x21, %x[n_valid_cells], #0x3\n"
+ "ands x20, %x[n_valid_cells], #0x3\n"
"beq 6f\n"
"5:" // 4-vectors of channels: Single input loop
- "ldr x24, [x20], #0x8\n"
- "ld1w { z4.s }, p4/Z, [x24, x9, LSL #2]\n"
- "subs x21, x21, #0x1\n"
+ "ldr x23, [x19], #0x8\n"
+ "ld1w { z4.s }, p4/Z, [x23, x28, LSL #2]\n"
+ "subs x20, x20, #0x1\n"
"fmax z8.s, p0/M, z8.s, z4.s\n"
- "ld1w { z0.s }, p3/Z, [x24, x28, LSL #2]\n"
- "ld1w { z29.s }, p2/Z, [x24, x27, LSL #2]\n"
+ "ld1w { z0.s }, p3/Z, [x23, x27, LSL #2]\n"
+ "ld1w { z29.s }, p2/Z, [x23, x26, LSL #2]\n"
"fmax z7.s, p0/M, z7.s, z0.s\n"
"fmax z6.s, p0/M, z6.s, z29.s\n"
- "ld1w { z26.s }, p1/Z, [x24, x26, LSL #2]\n"
+ "ld1w { z26.s }, p1/Z, [x23, x25, LSL #2]\n"
"fmax z5.s, p0/M, z5.s, z26.s\n"
"bgt 5b\n"
"6:" // 4-vectors of channels: Single input loop: End
- "st1w { z8.s }, p4, [%x[outptr], x9, LSL #2]\n"
- "incw x9, ALL, MUL #4\n"
- "st1w { z7.s }, p3, [%x[outptr], x28, LSL #2]\n"
+ "st1w { z8.s }, p4, [%x[outptr], x28, LSL #2]\n"
"incw x28, ALL, MUL #4\n"
- "st1w { z6.s }, p2, [%x[outptr], x27, LSL #2]\n"
+ "st1w { z7.s }, p3, [%x[outptr], x27, LSL #2]\n"
"incw x27, ALL, MUL #4\n"
- "st1w { z5.s }, p1, [%x[outptr], x26, LSL #2]\n"
+ "st1w { z6.s }, p2, [%x[outptr], x26, LSL #2]\n"
"incw x26, ALL, MUL #4\n"
- "whilelt p1.s, x26, %x[n_channels]\n"
+ "st1w { z5.s }, p1, [%x[outptr], x25, LSL #2]\n"
+ "incw x25, ALL, MUL #4\n"
+ "whilelt p1.s, x25, %x[n_channels]\n"
"b.any 1b\n"
"7:" // Single vector of channels
- "whilelt p4.s, x9, %x[n_channels]\n"
+ "whilelt p4.s, x28, %x[n_channels]\n"
"b.none 14f\n"
"8:" // Single vector of channels: Loop
- "lsr x25, %x[n_valid_cells], #0x2\n"
+ "lsr x24, %x[n_valid_cells], #0x2\n"
"mov z8.s, #0xff800000\n"
- "mov x20, %x[inptrs]\n"
- "cbz x25, 11f\n"
- "ldp x24, x23, [x20, #0x0]\n"
- "ldp x22, x21, [x20, #0x10]\n"
- "subs x25, x25, #0x1\n"
- "add x20, x20, #0x20\n"
- "ld1w { z4.s }, p4/Z, [x24, x9, LSL #2]\n"
- "ld1w { z3.s }, p4/Z, [x23, x9, LSL #2]\n"
- "ld1w { z2.s }, p4/Z, [x22, x9, LSL #2]\n"
- "ld1w { z1.s }, p4/Z, [x21, x9, LSL #2]\n"
+ "mov x19, %x[inptrs]\n"
+ "cbz x24, 11f\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "ldp x21, x20, [x19, #0x10]\n"
+ "subs x24, x24, #0x1\n"
+ "add x19, x19, #0x20\n"
+ "ld1w { z4.s }, p4/Z, [x23, x28, LSL #2]\n"
+ "ld1w { z3.s }, p4/Z, [x22, x28, LSL #2]\n"
+ "ld1w { z2.s }, p4/Z, [x21, x28, LSL #2]\n"
+ "ld1w { z1.s }, p4/Z, [x20, x28, LSL #2]\n"
"beq 10f\n"
"9:" // Single vector of channels: Loop: 4 inputs loop
"movprfx z19, z4\n fmax z19.s, p0/M, z19.s, z3.s\n"
"movprfx z23, z2\n fmax z23.s, p0/M, z23.s, z1.s\n"
- "ldp x24, x23, [x20, #0x0]\n"
- "ldp x22, x21, [x20, #0x10]\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "ldp x21, x20, [x19, #0x10]\n"
"fmax z19.s, p0/M, z19.s, z23.s\n"
- "subs x25, x25, #0x1\n"
- "ld1w { z4.s }, p4/Z, [x24, x9, LSL #2]\n"
- "ld1w { z3.s }, p4/Z, [x23, x9, LSL #2]\n"
+ "subs x24, x24, #0x1\n"
+ "ld1w { z4.s }, p4/Z, [x23, x28, LSL #2]\n"
+ "ld1w { z3.s }, p4/Z, [x22, x28, LSL #2]\n"
"fmax z8.s, p0/M, z8.s, z19.s\n"
- "add x20, x20, #0x20\n"
- "ld1w { z2.s }, p4/Z, [x22, x9, LSL #2]\n"
- "ld1w { z1.s }, p4/Z, [x21, x9, LSL #2]\n"
+ "add x19, x19, #0x20\n"
+ "ld1w { z2.s }, p4/Z, [x21, x28, LSL #2]\n"
+ "ld1w { z1.s }, p4/Z, [x20, x28, LSL #2]\n"
"bgt 9b\n"
"10:" // Single vector of channels: Loop: 4 inputs tail
"movprfx z19, z4\n fmax z19.s, p0/M, z19.s, z3.s\n"
@@ -197,23 +197,23 @@ void sve_fp32_nhwc_max_generic_depthfirst_impl(
"fmax z19.s, p0/M, z19.s, z23.s\n"
"fmax z8.s, p0/M, z8.s, z19.s\n"
"11:" // Single vector of channels: Loop: After loop
- "ands x21, %x[n_valid_cells], #0x3\n"
+ "ands x20, %x[n_valid_cells], #0x3\n"
"beq 13f\n"
"12:" // Single vector of channels: Loop: Single input loop
- "ldr x24, [x20], #0x8\n"
- "ld1w { z4.s }, p4/Z, [x24, x9, LSL #2]\n"
- "subs x21, x21, #0x1\n"
+ "ldr x23, [x19], #0x8\n"
+ "ld1w { z4.s }, p4/Z, [x23, x28, LSL #2]\n"
+ "subs x20, x20, #0x1\n"
"fmax z8.s, p0/M, z8.s, z4.s\n"
"bgt 12b\n"
"13:" // Single vector of channels: Loop: Single input loop: End
- "st1w { z8.s }, p4, [%x[outptr], x9, LSL #2]\n"
- "incw x9\n"
- "whilelt p4.s, x9, %x[n_channels]\n"
+ "st1w { z8.s }, p4, [%x[outptr], x28, LSL #2]\n"
+ "incw x28\n"
+ "whilelt p4.s, x28, %x[n_channels]\n"
"b.any 8b\n"
"14:" // End
:
: [inptrs] "r" (inptrs), [n_channels] "r" (n_channels), [n_valid_cells] "r" (n_valid_cells), [outptr] "r" (outptr)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x9", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8_nhwc_avg_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8_nhwc_avg_generic_depthfirst/generic.cpp
index 68bd831d63..dab142f3f4 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8_nhwc_avg_generic_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8_nhwc_avg_generic_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -95,21 +95,21 @@ void sve_s8_nhwc_avg_generic_depthfirst_impl(
}
__asm__ __volatile__(
- "mov x27, #0x0\n"
- "cntb x26\n"
- "cntb x25, ALL, MUL #2\n"
- "cntb x24, ALL, MUL #3\n"
- "whilelt p4.b, x27, %x[n_channels]\n"
- "whilelt p3.b, x26, %x[n_channels]\n"
- "whilelt p2.b, x25, %x[n_channels]\n"
- "whilelt p1.b, x24, %x[n_channels]\n"
+ "mov x26, #0x0\n"
+ "cntb x25\n"
+ "cntb x24, ALL, MUL #2\n"
+ "cntb x23, ALL, MUL #3\n"
+ "whilelt p4.b, x26, %x[n_channels]\n"
+ "whilelt p3.b, x25, %x[n_channels]\n"
+ "whilelt p2.b, x24, %x[n_channels]\n"
+ "whilelt p1.b, x23, %x[n_channels]\n"
"ptrue p0.b\n"
"b.none 7f\n"
"1:" // 4-vectors of channels
- "lsr x23, %x[n_valid_cells], #0x1\n"
+ "lsr x22, %x[n_valid_cells], #0x1\n"
"mov z15.s, #0x0\n"
"mov z14.s, #0x0\n"
- "mov x20, %x[inptrs]\n"
+ "mov x19, %x[inptrs]\n"
"mov z13.s, #0x0\n"
"mov z12.s, #0x0\n"
"mov z11.s, #0x0\n"
@@ -124,43 +124,43 @@ void sve_s8_nhwc_avg_generic_depthfirst_impl(
"mov z2.s, #0x0\n"
"mov z1.s, #0x0\n"
"mov z0.s, #0x0\n"
- "cbz x23, 4f\n"
- "ldp x22, x21, [x20, #0x0]\n"
- "subs x23, x23, #0x1\n"
- "add x20, x20, #0x10\n"
- "ld1b { z31.b }, p4/Z, [x22, x27]\n"
- "ld1b { z30.b }, p4/Z, [x21, x27]\n"
- "ld1b { z29.b }, p3/Z, [x22, x26]\n"
- "ld1b { z28.b }, p3/Z, [x21, x26]\n"
- "ld1b { z27.b }, p2/Z, [x22, x25]\n"
- "ld1b { z26.b }, p2/Z, [x21, x25]\n"
- "ld1b { z25.b }, p1/Z, [x22, x24]\n"
- "ld1b { z24.b }, p1/Z, [x21, x24]\n"
+ "cbz x22, 4f\n"
+ "ldp x21, x20, [x19, #0x0]\n"
+ "subs x22, x22, #0x1\n"
+ "add x19, x19, #0x10\n"
+ "ld1b { z31.b }, p4/Z, [x21, x26]\n"
+ "ld1b { z30.b }, p4/Z, [x20, x26]\n"
+ "ld1b { z29.b }, p3/Z, [x21, x25]\n"
+ "ld1b { z28.b }, p3/Z, [x20, x25]\n"
+ "ld1b { z27.b }, p2/Z, [x21, x24]\n"
+ "ld1b { z26.b }, p2/Z, [x20, x24]\n"
+ "ld1b { z25.b }, p1/Z, [x21, x23]\n"
+ "ld1b { z24.b }, p1/Z, [x20, x23]\n"
"beq 3f\n"
"2:" // 4-vectors of channels: 2 inputs loop
".inst 0x455e03f7 // saddlb z23.h, z31.b, z30.b\n"
".inst 0x455e07f6 // saddlt z22.h, z31.b, z30.b\n"
- "ldp x22, x21, [x20, #0x0]\n"
- "subs x23, x23, #0x1\n"
+ "ldp x21, x20, [x19, #0x0]\n"
+ "subs x22, x22, #0x1\n"
".inst 0x455c03b5 // saddlb z21.h, z29.b, z28.b\n"
".inst 0x455c07b4 // saddlt z20.h, z29.b, z28.b\n"
- "add x20, x20, #0x10\n"
- "ld1b { z31.b }, p4/Z, [x22, x27]\n"
+ "add x19, x19, #0x10\n"
+ "ld1b { z31.b }, p4/Z, [x21, x26]\n"
".inst 0x455a0373 // saddlb z19.h, z27.b, z26.b\n"
".inst 0x455a0772 // saddlt z18.h, z27.b, z26.b\n"
- "ld1b { z30.b }, p4/Z, [x21, x27]\n"
- "ld1b { z29.b }, p3/Z, [x22, x26]\n"
+ "ld1b { z30.b }, p4/Z, [x20, x26]\n"
+ "ld1b { z29.b }, p3/Z, [x21, x25]\n"
".inst 0x45580331 // saddlb z17.h, z25.b, z24.b\n"
".inst 0x45580730 // saddlt z16.h, z25.b, z24.b\n"
- "ld1b { z28.b }, p3/Z, [x21, x26]\n"
- "ld1b { z27.b }, p2/Z, [x22, x25]\n"
+ "ld1b { z28.b }, p3/Z, [x20, x25]\n"
+ "ld1b { z27.b }, p2/Z, [x21, x24]\n"
".inst 0x459741ef // saddwb z15.s, z15.s, z23.h\n"
".inst 0x459745ce // saddwt z14.s, z14.s, z23.h\n"
- "ld1b { z26.b }, p2/Z, [x21, x25]\n"
- "ld1b { z25.b }, p1/Z, [x22, x24]\n"
+ "ld1b { z26.b }, p2/Z, [x20, x24]\n"
+ "ld1b { z25.b }, p1/Z, [x21, x23]\n"
".inst 0x459641ad // saddwb z13.s, z13.s, z22.h\n"
".inst 0x4596458c // saddwt z12.s, z12.s, z22.h\n"
- "ld1b { z24.b }, p1/Z, [x21, x24]\n"
+ "ld1b { z24.b }, p1/Z, [x20, x23]\n"
".inst 0x4595416b // saddwb z11.s, z11.s, z21.h\n"
".inst 0x4595454a // saddwt z10.s, z10.s, z21.h\n"
".inst 0x45944129 // saddwb z9.s, z9.s, z20.h\n"
@@ -200,21 +200,21 @@ void sve_s8_nhwc_avg_generic_depthfirst_impl(
".inst 0x45904021 // saddwb z1.s, z1.s, z16.h\n"
".inst 0x45904400 // saddwt z0.s, z0.s, z16.h\n"
"4:" // 4-vectors of channels: After loop
- "ands x21, %x[n_valid_cells], #0x1\n"
+ "ands x20, %x[n_valid_cells], #0x1\n"
"beq 6f\n"
"5:" // 4-vectors of channels: Single input loop
- "ldr x22, [x20], #0x8\n"
- "ld1b { z31.b }, p4/Z, [x22, x27]\n"
+ "ldr x21, [x19], #0x8\n"
+ "ld1b { z31.b }, p4/Z, [x21, x26]\n"
".inst 0x4508a3f7 // sshllb z23.h, z31.b, #0x0\n"
".inst 0x4508a7f6 // sshllt z22.h, z31.b, #0x0\n"
- "ld1b { z29.b }, p3/Z, [x22, x26]\n"
- "ld1b { z27.b }, p2/Z, [x22, x25]\n"
+ "ld1b { z29.b }, p3/Z, [x21, x25]\n"
+ "ld1b { z27.b }, p2/Z, [x21, x24]\n"
".inst 0x4508a3b5 // sshllb z21.h, z29.b, #0x0\n"
".inst 0x4508a7b4 // sshllt z20.h, z29.b, #0x0\n"
- "ld1b { z25.b }, p1/Z, [x22, x24]\n"
+ "ld1b { z25.b }, p1/Z, [x21, x23]\n"
".inst 0x4508a373 // sshllb z19.h, z27.b, #0x0\n"
".inst 0x4508a772 // sshllt z18.h, z27.b, #0x0\n"
- "subs x21, x21, #0x1\n"
+ "subs x20, x20, #0x1\n"
".inst 0x4508a331 // sshllb z17.h, z25.b, #0x0\n"
".inst 0x4508a730 // sshllt z16.h, z25.b, #0x0\n"
".inst 0x459741ef // saddwb z15.s, z15.s, z23.h\n"
@@ -298,7 +298,7 @@ void sve_s8_nhwc_avg_generic_depthfirst_impl(
"smin z10.s, p0/M, z10.s, z18.s\n"
"smin z9.s, p0/M, z9.s, z18.s\n"
"trn1 z17.h, z11.h, z10.h\n"
- "st1b { z16.b }, p4, [%x[outptr], x27]\n"
+ "st1b { z16.b }, p4, [%x[outptr], x26]\n"
"smin z8.s, p0/M, z8.s, z18.s\n"
"smin z7.s, p0/M, z7.s, z18.s\n"
"trn1 z16.h, z9.h, z8.h\n"
@@ -306,7 +306,7 @@ void sve_s8_nhwc_avg_generic_depthfirst_impl(
"smin z6.s, p0/M, z6.s, z18.s\n"
"smin z5.s, p0/M, z5.s, z18.s\n"
"trn1 z17.h, z7.h, z6.h\n"
- "st1b { z16.b }, p3, [%x[outptr], x26]\n"
+ "st1b { z16.b }, p3, [%x[outptr], x25]\n"
"smin z4.s, p0/M, z4.s, z18.s\n"
"smin z3.s, p0/M, z3.s, z18.s\n"
"trn1 z16.h, z5.h, z4.h\n"
@@ -314,46 +314,46 @@ void sve_s8_nhwc_avg_generic_depthfirst_impl(
"smin z2.s, p0/M, z2.s, z18.s\n"
"smin z1.s, p0/M, z1.s, z18.s\n"
"trn1 z17.h, z3.h, z2.h\n"
- "st1b { z16.b }, p2, [%x[outptr], x25]\n"
+ "st1b { z16.b }, p2, [%x[outptr], x24]\n"
"smin z0.s, p0/M, z0.s, z18.s\n"
"trn1 z16.h, z1.h, z0.h\n"
"trn1 z16.b, z17.b, z16.b\n"
- "st1b { z16.b }, p1, [%x[outptr], x24]\n"
- "incb x24, ALL, MUL #4\n"
- "whilelt p1.b, x24, %x[n_channels]\n"
- "incb x27, ALL, MUL #4\n"
+ "st1b { z16.b }, p1, [%x[outptr], x23]\n"
+ "incb x23, ALL, MUL #4\n"
+ "whilelt p1.b, x23, %x[n_channels]\n"
"incb x26, ALL, MUL #4\n"
"incb x25, ALL, MUL #4\n"
+ "incb x24, ALL, MUL #4\n"
"b.any 1b\n"
"7:" // Single vector of channels
- "whilelt p4.b, x27, %x[n_channels]\n"
+ "whilelt p4.b, x26, %x[n_channels]\n"
"b.none 14f\n"
"8:" // Single vector of channels: Loop
- "lsr x23, %x[n_valid_cells], #0x1\n"
+ "lsr x22, %x[n_valid_cells], #0x1\n"
"mov z15.s, #0x0\n"
"mov z14.s, #0x0\n"
- "mov x20, %x[inptrs]\n"
+ "mov x19, %x[inptrs]\n"
"mov z13.s, #0x0\n"
"mov z12.s, #0x0\n"
- "cbz x23, 11f\n"
- "ldp x22, x21, [x20, #0x0]\n"
- "subs x23, x23, #0x1\n"
- "add x20, x20, #0x10\n"
- "ld1b { z31.b }, p4/Z, [x22, x27]\n"
- "ld1b { z30.b }, p4/Z, [x21, x27]\n"
+ "cbz x22, 11f\n"
+ "ldp x21, x20, [x19, #0x0]\n"
+ "subs x22, x22, #0x1\n"
+ "add x19, x19, #0x10\n"
+ "ld1b { z31.b }, p4/Z, [x21, x26]\n"
+ "ld1b { z30.b }, p4/Z, [x20, x26]\n"
"beq 10f\n"
"9:" // Single vector of channels: Loop: 2 inputs loop
".inst 0x455e03f7 // saddlb z23.h, z31.b, z30.b\n"
".inst 0x455e07f6 // saddlt z22.h, z31.b, z30.b\n"
- "ldp x22, x21, [x20, #0x0]\n"
- "subs x23, x23, #0x1\n"
+ "ldp x21, x20, [x19, #0x0]\n"
+ "subs x22, x22, #0x1\n"
".inst 0x459741ef // saddwb z15.s, z15.s, z23.h\n"
".inst 0x459745ce // saddwt z14.s, z14.s, z23.h\n"
- "add x20, x20, #0x10\n"
- "ld1b { z31.b }, p4/Z, [x22, x27]\n"
+ "add x19, x19, #0x10\n"
+ "ld1b { z31.b }, p4/Z, [x21, x26]\n"
".inst 0x459641ad // saddwb z13.s, z13.s, z22.h\n"
".inst 0x4596458c // saddwt z12.s, z12.s, z22.h\n"
- "ld1b { z30.b }, p4/Z, [x21, x27]\n"
+ "ld1b { z30.b }, p4/Z, [x20, x26]\n"
"bgt 9b\n"
"10:" // Single vector of channels: Loop: 2 inputs tail
".inst 0x455e03f7 // saddlb z23.h, z31.b, z30.b\n"
@@ -363,14 +363,14 @@ void sve_s8_nhwc_avg_generic_depthfirst_impl(
".inst 0x459641ad // saddwb z13.s, z13.s, z22.h\n"
".inst 0x4596458c // saddwt z12.s, z12.s, z22.h\n"
"11:" // Single vector of channels: Loop: After loop
- "ands x21, %x[n_valid_cells], #0x1\n"
+ "ands x20, %x[n_valid_cells], #0x1\n"
"beq 13f\n"
"12:" // Single vector of channels: Loop: Single input loop
- "ldr x22, [x20], #0x8\n"
- "ld1b { z31.b }, p4/Z, [x22, x27]\n"
+ "ldr x21, [x19], #0x8\n"
+ "ld1b { z31.b }, p4/Z, [x21, x26]\n"
".inst 0x4508a3f7 // sshllb z23.h, z31.b, #0x0\n"
".inst 0x4508a7f6 // sshllt z22.h, z31.b, #0x0\n"
- "subs x21, x21, #0x1\n"
+ "subs x20, x20, #0x1\n"
".inst 0x459741ef // saddwb z15.s, z15.s, z23.h\n"
".inst 0x459745ce // saddwt z14.s, z14.s, z23.h\n"
".inst 0x459641ad // saddwb z13.s, z13.s, z22.h\n"
@@ -400,14 +400,14 @@ void sve_s8_nhwc_avg_generic_depthfirst_impl(
"smin z12.s, p0/M, z12.s, z18.s\n"
"trn1 z16.h, z13.h, z12.h\n"
"trn1 z16.b, z17.b, z16.b\n"
- "st1b { z16.b }, p4, [%x[outptr], x27]\n"
- "incb x27\n"
- "whilelt p4.b, x27, %x[n_channels]\n"
+ "st1b { z16.b }, p4, [%x[outptr], x26]\n"
+ "incb x26\n"
+ "whilelt p4.b, x26, %x[n_channels]\n"
"b.any 8b\n"
"14:" // End
:
: [inptrs] "r" (inptrs), [n_channels] "r" (n_channels), [n_valid_cells] "r" (n_valid_cells), [outptr] "r" (outptr), [rescale_ptr] "r" (&rescale_value), [shift_ptr] "r" (&shift_value)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
index 96e20c752e..0cf37743d9 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -63,80 +63,80 @@ void sve_s8_nhwc_max_2x2_s1_output2x2_depthfirst_impl(
pad_left, pad_top, pad_right, pad_bottom);
__asm__ __volatile__(
- "ldr x15, [%x[args], %[offsetof_n_channels]]\n"
- "ldr x21, [%x[args], %[offsetof_outptrs]]\n"
- "mov x14, #0x0\n"
- "whilelt p2.b, x14, x15\n"
- "ldr x20, [%x[args], %[offsetof_inptrs]]\n"
- "ldp x13, x12, [x21, #0x0]\n"
+ "ldr x14, [%x[args], %[offsetof_n_channels]]\n"
+ "ldr x20, [%x[args], %[offsetof_outptrs]]\n"
+ "mov x13, #0x0\n"
+ "whilelt p2.b, x13, x14\n"
+ "ldr x19, [%x[args], %[offsetof_inptrs]]\n"
+ "ldp x12, x11, [x20, #0x0]\n"
"ptrue p1.b\n"
- "mov x11, #0x0\n"
- "ldp x10, x9, [x21, #0x10]\n"
- "ldp x28, x27, [x20, #0x0]\n"
- "ldp x26, x25, [x20, #0x10]\n"
- "ldp x24, x23, [x20, #0x20]\n"
- "ldp x22, x21, [x20, #0x30]\n"
- "ldr x20, [x20, #0x40]\n"
- "ld1b { z31.b }, p2/Z, [x27, x14]\n"
- "ld1b { z30.b }, p2/Z, [x24, x14]\n"
- "ld1b { z29.b }, p2/Z, [x21, x14]\n"
- "ld1b { z28.b }, p2/Z, [x25, x14]\n"
- "ld1b { z27.b }, p2/Z, [x28, x14]\n"
- "ld1b { z26.b }, p2/Z, [x26, x14]\n"
- "ld1b { z25.b }, p2/Z, [x23, x14]\n"
- "ld1b { z24.b }, p2/Z, [x22, x14]\n"
- "ld1b { z23.b }, p2/Z, [x20, x14]\n"
- "incw x14\n"
- "whilelt p2.b, x14, x15\n"
+ "mov x10, #0x0\n"
+ "ldp x9, x28, [x20, #0x10]\n"
+ "ldp x27, x26, [x19, #0x0]\n"
+ "ldp x25, x24, [x19, #0x10]\n"
+ "ldp x23, x22, [x19, #0x20]\n"
+ "ldp x21, x20, [x19, #0x30]\n"
+ "ldr x19, [x19, #0x40]\n"
+ "ld1b { z31.b }, p2/Z, [x26, x13]\n"
+ "ld1b { z30.b }, p2/Z, [x23, x13]\n"
+ "ld1b { z29.b }, p2/Z, [x20, x13]\n"
+ "ld1b { z28.b }, p2/Z, [x24, x13]\n"
+ "ld1b { z27.b }, p2/Z, [x27, x13]\n"
+ "ld1b { z26.b }, p2/Z, [x22, x13]\n"
+ "ld1b { z25.b }, p2/Z, [x25, x13]\n"
+ "ld1b { z24.b }, p2/Z, [x21, x13]\n"
+ "ld1b { z23.b }, p2/Z, [x19, x13]\n"
+ "incw x13\n"
+ "whilelt p2.b, x13, x14\n"
"b.none 2f\n"
"1:" // Vector: Loop
"movprfx z22, z31\n smax z22.b, p1/M, z22.b, z30.b\n"
"movprfx z21, z30\n smax z21.b, p1/M, z21.b, z29.b\n"
- "ld1b { z31.b }, p2/Z, [x27, x14]\n"
- "ld1b { z30.b }, p2/Z, [x24, x14]\n"
+ "ld1b { z31.b }, p2/Z, [x26, x13]\n"
+ "ld1b { z30.b }, p2/Z, [x23, x13]\n"
"movprfx z20, z28\n smax z20.b, p1/M, z20.b, z27.b\n"
- "movprfx z19, z26\n smax z19.b, p1/M, z19.b, z25.b\n"
- "ld1b { z29.b }, p2/Z, [x21, x14]\n"
- "ld1b { z27.b }, p2/Z, [x28, x14]\n"
- "movprfx z17, z28\n smax z17.b, p1/M, z17.b, z24.b\n"
- "movprfx z18, z25\n smax z18.b, p1/M, z18.b, z23.b\n"
- "ld1b { z28.b }, p2/Z, [x25, x14]\n"
- "ld1b { z26.b }, p2/Z, [x26, x14]\n"
- "ld1b { z25.b }, p2/Z, [x23, x14]\n"
- "ld1b { z24.b }, p2/Z, [x22, x14]\n"
- "whilelt p0.b, x11, x15\n"
+ "movprfx z17, z26\n smax z17.b, p1/M, z17.b, z25.b\n"
+ "ld1b { z29.b }, p2/Z, [x20, x13]\n"
+ "ld1b { z27.b }, p2/Z, [x27, x13]\n"
+ "movprfx z19, z24\n smax z19.b, p1/M, z19.b, z28.b\n"
+ "movprfx z18, z26\n smax z18.b, p1/M, z18.b, z23.b\n"
+ "ld1b { z28.b }, p2/Z, [x24, x13]\n"
+ "ld1b { z26.b }, p2/Z, [x22, x13]\n"
+ "ld1b { z25.b }, p2/Z, [x25, x13]\n"
+ "ld1b { z24.b }, p2/Z, [x21, x13]\n"
+ "whilelt p0.b, x10, x14\n"
"movprfx z16, z22\n smax z16.b, p1/M, z16.b, z20.b\n"
- "ld1b { z23.b }, p2/Z, [x20, x14]\n"
- "incw x14\n"
- "whilelt p2.b, x14, x15\n"
- "st1b { z16.b }, p0, [x13, x11]\n"
- "movprfx z16, z19\n smax z16.b, p1/M, z16.b, z22.b\n"
- "smax z17.b, p1/M, z17.b, z21.b\n"
- "st1b { z16.b }, p0, [x12, x11]\n"
- "movprfx z16, z18\n smax z16.b, p1/M, z16.b, z21.b\n"
- "st1b { z17.b }, p0, [x10, x11]\n"
- "st1b { z16.b }, p0, [x9, x11]\n"
- "incw x11\n"
+ "ld1b { z23.b }, p2/Z, [x19, x13]\n"
+ "incw x13\n"
+ "whilelt p2.b, x13, x14\n"
+ "st1b { z16.b }, p0, [x12, x10]\n"
+ "movprfx z16, z17\n smax z16.b, p1/M, z16.b, z22.b\n"
+ "movprfx z17, z21\n smax z17.b, p1/M, z17.b, z19.b\n"
+ "st1b { z16.b }, p0, [x11, x10]\n"
+ "movprfx z16, z21\n smax z16.b, p1/M, z16.b, z18.b\n"
+ "st1b { z17.b }, p0, [x9, x10]\n"
+ "st1b { z16.b }, p0, [x28, x10]\n"
+ "incw x10\n"
"b.any 1b\n"
"2:" // Vector: Tail
"movprfx z22, z31\n smax z22.b, p1/M, z22.b, z30.b\n"
"movprfx z21, z30\n smax z21.b, p1/M, z21.b, z29.b\n"
"movprfx z20, z28\n smax z20.b, p1/M, z20.b, z27.b\n"
- "movprfx z19, z26\n smax z19.b, p1/M, z19.b, z25.b\n"
- "movprfx z17, z28\n smax z17.b, p1/M, z17.b, z24.b\n"
- "movprfx z18, z25\n smax z18.b, p1/M, z18.b, z23.b\n"
- "whilelt p0.b, x11, x15\n"
+ "movprfx z17, z26\n smax z17.b, p1/M, z17.b, z25.b\n"
+ "movprfx z19, z24\n smax z19.b, p1/M, z19.b, z28.b\n"
+ "movprfx z18, z26\n smax z18.b, p1/M, z18.b, z23.b\n"
+ "whilelt p0.b, x10, x14\n"
"movprfx z16, z22\n smax z16.b, p1/M, z16.b, z20.b\n"
- "st1b { z16.b }, p0, [x13, x11]\n"
- "movprfx z16, z19\n smax z16.b, p1/M, z16.b, z22.b\n"
- "smax z17.b, p1/M, z17.b, z21.b\n"
- "st1b { z16.b }, p0, [x12, x11]\n"
- "movprfx z16, z18\n smax z16.b, p1/M, z16.b, z21.b\n"
- "st1b { z17.b }, p0, [x10, x11]\n"
- "st1b { z16.b }, p0, [x9, x11]\n"
+ "st1b { z16.b }, p0, [x12, x10]\n"
+ "movprfx z16, z17\n smax z16.b, p1/M, z16.b, z22.b\n"
+ "movprfx z17, z21\n smax z17.b, p1/M, z17.b, z19.b\n"
+ "st1b { z16.b }, p0, [x11, x10]\n"
+ "movprfx z16, z21\n smax z16.b, p1/M, z16.b, z18.b\n"
+ "st1b { z17.b }, p0, [x9, x10]\n"
+ "st1b { z16.b }, p0, [x28, x10]\n"
:
: [args] "r" (&args), [offsetof_inptrs] "I" (offsetof(KernelArgs, inptrs)), [offsetof_n_channels] "I" (offsetof(KernelArgs, n_channels)), [offsetof_outptrs] "I" (offsetof(KernelArgs, outptrs))
- : "cc", "memory", "p0", "p1", "p2", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "x9", "x10", "x11", "x12", "x13", "x14", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8_nhwc_max_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8_nhwc_max_generic_depthfirst/generic.cpp
index 7d14edddeb..3fd4828549 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8_nhwc_max_generic_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8_nhwc_max_generic_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -40,82 +40,82 @@ void sve_s8_nhwc_max_generic_depthfirst_impl(
)
{
__asm__ __volatile__(
- "mov x9, #0x0\n"
- "cntb x28\n"
- "cntb x27, ALL, MUL #2\n"
- "cntb x26, ALL, MUL #3\n"
- "whilelt p4.b, x9, %x[n_channels]\n"
- "whilelt p3.b, x28, %x[n_channels]\n"
- "whilelt p2.b, x27, %x[n_channels]\n"
- "whilelt p1.b, x26, %x[n_channels]\n"
+ "mov x28, #0x0\n"
+ "cntb x27\n"
+ "cntb x26, ALL, MUL #2\n"
+ "cntb x25, ALL, MUL #3\n"
+ "whilelt p4.b, x28, %x[n_channels]\n"
+ "whilelt p3.b, x27, %x[n_channels]\n"
+ "whilelt p2.b, x26, %x[n_channels]\n"
+ "whilelt p1.b, x25, %x[n_channels]\n"
"ptrue p0.b\n"
"b.none 7f\n"
"1:" // 4-vectors of channels
- "lsr x25, %x[n_valid_cells], #0x2\n"
+ "lsr x24, %x[n_valid_cells], #0x2\n"
"mov z8.b, #0x80\n"
"mov z7.b, #0x80\n"
- "mov x20, %x[inptrs]\n"
+ "mov x19, %x[inptrs]\n"
"mov z6.b, #0x80\n"
"mov z5.b, #0x80\n"
- "cbz x25, 4f\n"
- "ldp x24, x23, [x20, #0x0]\n"
- "ldp x22, x21, [x20, #0x10]\n"
- "subs x25, x25, #0x1\n"
- "add x20, x20, #0x20\n"
- "ld1b { z4.b }, p4/Z, [x24, x9]\n"
- "ld1b { z3.b }, p4/Z, [x23, x9]\n"
- "ld1b { z2.b }, p4/Z, [x22, x9]\n"
- "ld1b { z1.b }, p4/Z, [x21, x9]\n"
- "ld1b { z0.b }, p3/Z, [x24, x28]\n"
- "ld1b { z31.b }, p3/Z, [x23, x28]\n"
- "ld1b { z22.b }, p3/Z, [x22, x28]\n"
- "ld1b { z30.b }, p3/Z, [x21, x28]\n"
- "ld1b { z29.b }, p2/Z, [x24, x27]\n"
- "ld1b { z28.b }, p2/Z, [x23, x27]\n"
- "ld1b { z21.b }, p2/Z, [x22, x27]\n"
- "ld1b { z27.b }, p2/Z, [x21, x27]\n"
- "ld1b { z26.b }, p1/Z, [x24, x26]\n"
- "ld1b { z25.b }, p1/Z, [x23, x26]\n"
- "ld1b { z20.b }, p1/Z, [x22, x26]\n"
- "ld1b { z24.b }, p1/Z, [x21, x26]\n"
+ "cbz x24, 4f\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "ldp x21, x20, [x19, #0x10]\n"
+ "subs x24, x24, #0x1\n"
+ "add x19, x19, #0x20\n"
+ "ld1b { z4.b }, p4/Z, [x23, x28]\n"
+ "ld1b { z3.b }, p4/Z, [x22, x28]\n"
+ "ld1b { z2.b }, p4/Z, [x21, x28]\n"
+ "ld1b { z1.b }, p4/Z, [x20, x28]\n"
+ "ld1b { z0.b }, p3/Z, [x23, x27]\n"
+ "ld1b { z31.b }, p3/Z, [x22, x27]\n"
+ "ld1b { z22.b }, p3/Z, [x21, x27]\n"
+ "ld1b { z30.b }, p3/Z, [x20, x27]\n"
+ "ld1b { z29.b }, p2/Z, [x23, x26]\n"
+ "ld1b { z28.b }, p2/Z, [x22, x26]\n"
+ "ld1b { z21.b }, p2/Z, [x21, x26]\n"
+ "ld1b { z27.b }, p2/Z, [x20, x26]\n"
+ "ld1b { z26.b }, p1/Z, [x23, x25]\n"
+ "ld1b { z25.b }, p1/Z, [x22, x25]\n"
+ "ld1b { z20.b }, p1/Z, [x21, x25]\n"
+ "ld1b { z24.b }, p1/Z, [x20, x25]\n"
"beq 3f\n"
"2:" // 4-vectors of channels: 4 inputs loop
"movprfx z19, z4\n smax z19.b, p0/M, z19.b, z3.b\n"
"movprfx z23, z2\n smax z23.b, p0/M, z23.b, z1.b\n"
- "ldp x24, x23, [x20, #0x0]\n"
- "ldp x22, x21, [x20, #0x10]\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "ldp x21, x20, [x19, #0x10]\n"
"movprfx z18, z0\n smax z18.b, p0/M, z18.b, z31.b\n"
"smax z22.b, p0/M, z22.b, z30.b\n"
- "ld1b { z4.b }, p4/Z, [x24, x9]\n"
- "ld1b { z3.b }, p4/Z, [x23, x9]\n"
+ "ld1b { z4.b }, p4/Z, [x23, x28]\n"
+ "ld1b { z3.b }, p4/Z, [x22, x28]\n"
"movprfx z17, z29\n smax z17.b, p0/M, z17.b, z28.b\n"
"smax z21.b, p0/M, z21.b, z27.b\n"
- "ld1b { z2.b }, p4/Z, [x22, x9]\n"
- "ld1b { z1.b }, p4/Z, [x21, x9]\n"
+ "ld1b { z2.b }, p4/Z, [x21, x28]\n"
+ "ld1b { z1.b }, p4/Z, [x20, x28]\n"
"movprfx z16, z26\n smax z16.b, p0/M, z16.b, z25.b\n"
"smax z20.b, p0/M, z20.b, z24.b\n"
- "ld1b { z0.b }, p3/Z, [x24, x28]\n"
- "ld1b { z31.b }, p3/Z, [x23, x28]\n"
+ "ld1b { z0.b }, p3/Z, [x23, x27]\n"
+ "ld1b { z31.b }, p3/Z, [x22, x27]\n"
"smax z19.b, p0/M, z19.b, z23.b\n"
"smax z18.b, p0/M, z18.b, z22.b\n"
- "ld1b { z22.b }, p3/Z, [x22, x28]\n"
- "ld1b { z30.b }, p3/Z, [x21, x28]\n"
+ "ld1b { z22.b }, p3/Z, [x21, x27]\n"
+ "ld1b { z30.b }, p3/Z, [x20, x27]\n"
"smax z17.b, p0/M, z17.b, z21.b\n"
"smax z16.b, p0/M, z16.b, z20.b\n"
- "ld1b { z29.b }, p2/Z, [x24, x27]\n"
- "ld1b { z28.b }, p2/Z, [x23, x27]\n"
- "subs x25, x25, #0x1\n"
+ "ld1b { z29.b }, p2/Z, [x23, x26]\n"
+ "ld1b { z28.b }, p2/Z, [x22, x26]\n"
+ "subs x24, x24, #0x1\n"
"smax z8.b, p0/M, z8.b, z19.b\n"
- "ld1b { z21.b }, p2/Z, [x22, x27]\n"
- "ld1b { z27.b }, p2/Z, [x21, x27]\n"
+ "ld1b { z21.b }, p2/Z, [x21, x26]\n"
+ "ld1b { z27.b }, p2/Z, [x20, x26]\n"
"smax z7.b, p0/M, z7.b, z18.b\n"
"smax z6.b, p0/M, z6.b, z17.b\n"
- "ld1b { z26.b }, p1/Z, [x24, x26]\n"
- "ld1b { z25.b }, p1/Z, [x23, x26]\n"
+ "ld1b { z26.b }, p1/Z, [x23, x25]\n"
+ "ld1b { z25.b }, p1/Z, [x22, x25]\n"
"smax z5.b, p0/M, z5.b, z16.b\n"
- "add x20, x20, #0x20\n"
- "ld1b { z20.b }, p1/Z, [x22, x26]\n"
- "ld1b { z24.b }, p1/Z, [x21, x26]\n"
+ "add x19, x19, #0x20\n"
+ "ld1b { z20.b }, p1/Z, [x21, x25]\n"
+ "ld1b { z24.b }, p1/Z, [x20, x25]\n"
"bgt 2b\n"
"3:" // 4-vectors of channels: 4 inputs tail
"movprfx z19, z4\n smax z19.b, p0/M, z19.b, z3.b\n"
@@ -135,61 +135,61 @@ void sve_s8_nhwc_max_generic_depthfirst_impl(
"smax z6.b, p0/M, z6.b, z17.b\n"
"smax z5.b, p0/M, z5.b, z16.b\n"
"4:" // 4-vectors of channels: After loop
- "ands x21, %x[n_valid_cells], #0x3\n"
+ "ands x20, %x[n_valid_cells], #0x3\n"
"beq 6f\n"
"5:" // 4-vectors of channels: Single input loop
- "ldr x24, [x20], #0x8\n"
- "ld1b { z4.b }, p4/Z, [x24, x9]\n"
- "subs x21, x21, #0x1\n"
+ "ldr x23, [x19], #0x8\n"
+ "ld1b { z4.b }, p4/Z, [x23, x28]\n"
+ "subs x20, x20, #0x1\n"
"smax z8.b, p0/M, z8.b, z4.b\n"
- "ld1b { z0.b }, p3/Z, [x24, x28]\n"
- "ld1b { z29.b }, p2/Z, [x24, x27]\n"
+ "ld1b { z0.b }, p3/Z, [x23, x27]\n"
+ "ld1b { z29.b }, p2/Z, [x23, x26]\n"
"smax z7.b, p0/M, z7.b, z0.b\n"
"smax z6.b, p0/M, z6.b, z29.b\n"
- "ld1b { z26.b }, p1/Z, [x24, x26]\n"
+ "ld1b { z26.b }, p1/Z, [x23, x25]\n"
"smax z5.b, p0/M, z5.b, z26.b\n"
"bgt 5b\n"
"6:" // 4-vectors of channels: Single input loop: End
- "st1b { z8.b }, p4, [%x[outptr], x9]\n"
- "incb x9, ALL, MUL #4\n"
- "st1b { z7.b }, p3, [%x[outptr], x28]\n"
+ "st1b { z8.b }, p4, [%x[outptr], x28]\n"
"incb x28, ALL, MUL #4\n"
- "st1b { z6.b }, p2, [%x[outptr], x27]\n"
+ "st1b { z7.b }, p3, [%x[outptr], x27]\n"
"incb x27, ALL, MUL #4\n"
- "st1b { z5.b }, p1, [%x[outptr], x26]\n"
+ "st1b { z6.b }, p2, [%x[outptr], x26]\n"
"incb x26, ALL, MUL #4\n"
- "whilelt p1.b, x26, %x[n_channels]\n"
+ "st1b { z5.b }, p1, [%x[outptr], x25]\n"
+ "incb x25, ALL, MUL #4\n"
+ "whilelt p1.b, x25, %x[n_channels]\n"
"b.any 1b\n"
"7:" // Single vector of channels
- "whilelt p4.b, x9, %x[n_channels]\n"
+ "whilelt p4.b, x28, %x[n_channels]\n"
"b.none 14f\n"
"8:" // Single vector of channels: Loop
- "lsr x25, %x[n_valid_cells], #0x2\n"
+ "lsr x24, %x[n_valid_cells], #0x2\n"
"mov z8.b, #0x80\n"
- "mov x20, %x[inptrs]\n"
- "cbz x25, 11f\n"
- "ldp x24, x23, [x20, #0x0]\n"
- "ldp x22, x21, [x20, #0x10]\n"
- "subs x25, x25, #0x1\n"
- "add x20, x20, #0x20\n"
- "ld1b { z4.b }, p4/Z, [x24, x9]\n"
- "ld1b { z3.b }, p4/Z, [x23, x9]\n"
- "ld1b { z2.b }, p4/Z, [x22, x9]\n"
- "ld1b { z1.b }, p4/Z, [x21, x9]\n"
+ "mov x19, %x[inptrs]\n"
+ "cbz x24, 11f\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "ldp x21, x20, [x19, #0x10]\n"
+ "subs x24, x24, #0x1\n"
+ "add x19, x19, #0x20\n"
+ "ld1b { z4.b }, p4/Z, [x23, x28]\n"
+ "ld1b { z3.b }, p4/Z, [x22, x28]\n"
+ "ld1b { z2.b }, p4/Z, [x21, x28]\n"
+ "ld1b { z1.b }, p4/Z, [x20, x28]\n"
"beq 10f\n"
"9:" // Single vector of channels: Loop: 4 inputs loop
"movprfx z19, z4\n smax z19.b, p0/M, z19.b, z3.b\n"
"movprfx z23, z2\n smax z23.b, p0/M, z23.b, z1.b\n"
- "ldp x24, x23, [x20, #0x0]\n"
- "ldp x22, x21, [x20, #0x10]\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "ldp x21, x20, [x19, #0x10]\n"
"smax z19.b, p0/M, z19.b, z23.b\n"
- "subs x25, x25, #0x1\n"
- "ld1b { z4.b }, p4/Z, [x24, x9]\n"
- "ld1b { z3.b }, p4/Z, [x23, x9]\n"
+ "subs x24, x24, #0x1\n"
+ "ld1b { z4.b }, p4/Z, [x23, x28]\n"
+ "ld1b { z3.b }, p4/Z, [x22, x28]\n"
"smax z8.b, p0/M, z8.b, z19.b\n"
- "add x20, x20, #0x20\n"
- "ld1b { z2.b }, p4/Z, [x22, x9]\n"
- "ld1b { z1.b }, p4/Z, [x21, x9]\n"
+ "add x19, x19, #0x20\n"
+ "ld1b { z2.b }, p4/Z, [x21, x28]\n"
+ "ld1b { z1.b }, p4/Z, [x20, x28]\n"
"bgt 9b\n"
"10:" // Single vector of channels: Loop: 4 inputs tail
"movprfx z19, z4\n smax z19.b, p0/M, z19.b, z3.b\n"
@@ -197,23 +197,23 @@ void sve_s8_nhwc_max_generic_depthfirst_impl(
"smax z19.b, p0/M, z19.b, z23.b\n"
"smax z8.b, p0/M, z8.b, z19.b\n"
"11:" // Single vector of channels: Loop: After loop
- "ands x21, %x[n_valid_cells], #0x3\n"
+ "ands x20, %x[n_valid_cells], #0x3\n"
"beq 13f\n"
"12:" // Single vector of channels: Loop: Single input loop
- "ldr x24, [x20], #0x8\n"
- "ld1b { z4.b }, p4/Z, [x24, x9]\n"
- "subs x21, x21, #0x1\n"
+ "ldr x23, [x19], #0x8\n"
+ "ld1b { z4.b }, p4/Z, [x23, x28]\n"
+ "subs x20, x20, #0x1\n"
"smax z8.b, p0/M, z8.b, z4.b\n"
"bgt 12b\n"
"13:" // Single vector of channels: Loop: Single input loop: End
- "st1b { z8.b }, p4, [%x[outptr], x9]\n"
- "incb x9\n"
- "whilelt p4.b, x9, %x[n_channels]\n"
+ "st1b { z8.b }, p4, [%x[outptr], x28]\n"
+ "incb x28\n"
+ "whilelt p4.b, x28, %x[n_channels]\n"
"b.any 8b\n"
"14:" // End
:
: [inptrs] "r" (inptrs), [n_channels] "r" (n_channels), [n_valid_cells] "r" (n_valid_cells), [outptr] "r" (outptr)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x9", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8q_nhwc_avg_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8q_nhwc_avg_generic_depthfirst/generic.cpp
index 7161c4f389..c431fece8f 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8q_nhwc_avg_generic_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8q_nhwc_avg_generic_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -114,21 +114,21 @@ void sve_s8q_nhwc_avg_generic_depthfirst_impl(
);
__asm__ __volatile__(
- "mov x27, #0x0\n"
- "cntb x26\n"
- "cntb x25, ALL, MUL #2\n"
- "cntb x24, ALL, MUL #3\n"
- "whilelt p4.b, x27, %x[n_channels]\n"
- "whilelt p3.b, x26, %x[n_channels]\n"
- "whilelt p2.b, x25, %x[n_channels]\n"
- "whilelt p1.b, x24, %x[n_channels]\n"
+ "mov x26, #0x0\n"
+ "cntb x25\n"
+ "cntb x24, ALL, MUL #2\n"
+ "cntb x23, ALL, MUL #3\n"
+ "whilelt p4.b, x26, %x[n_channels]\n"
+ "whilelt p3.b, x25, %x[n_channels]\n"
+ "whilelt p2.b, x24, %x[n_channels]\n"
+ "whilelt p1.b, x23, %x[n_channels]\n"
"ptrue p0.b\n"
"b.none 7f\n"
"1:" // 4-vectors of channels
- "lsr x23, %x[n_valid_cells], #0x1\n"
+ "lsr x22, %x[n_valid_cells], #0x1\n"
"mov z15.s, #0x0\n"
"mov z14.s, #0x0\n"
- "mov x20, %x[inptrs]\n"
+ "mov x19, %x[inptrs]\n"
"mov z13.s, #0x0\n"
"mov z12.s, #0x0\n"
"mov z11.s, #0x0\n"
@@ -143,43 +143,43 @@ void sve_s8q_nhwc_avg_generic_depthfirst_impl(
"mov z2.s, #0x0\n"
"mov z1.s, #0x0\n"
"mov z0.s, #0x0\n"
- "cbz x23, 4f\n"
- "ldp x22, x21, [x20, #0x0]\n"
- "subs x23, x23, #0x1\n"
- "add x20, x20, #0x10\n"
- "ld1b { z31.b }, p4/Z, [x22, x27]\n"
- "ld1b { z30.b }, p4/Z, [x21, x27]\n"
- "ld1b { z29.b }, p3/Z, [x22, x26]\n"
- "ld1b { z28.b }, p3/Z, [x21, x26]\n"
- "ld1b { z27.b }, p2/Z, [x22, x25]\n"
- "ld1b { z26.b }, p2/Z, [x21, x25]\n"
- "ld1b { z25.b }, p1/Z, [x22, x24]\n"
- "ld1b { z24.b }, p1/Z, [x21, x24]\n"
+ "cbz x22, 4f\n"
+ "ldp x21, x20, [x19, #0x0]\n"
+ "subs x22, x22, #0x1\n"
+ "add x19, x19, #0x10\n"
+ "ld1b { z31.b }, p4/Z, [x21, x26]\n"
+ "ld1b { z30.b }, p4/Z, [x20, x26]\n"
+ "ld1b { z29.b }, p3/Z, [x21, x25]\n"
+ "ld1b { z28.b }, p3/Z, [x20, x25]\n"
+ "ld1b { z27.b }, p2/Z, [x21, x24]\n"
+ "ld1b { z26.b }, p2/Z, [x20, x24]\n"
+ "ld1b { z25.b }, p1/Z, [x21, x23]\n"
+ "ld1b { z24.b }, p1/Z, [x20, x23]\n"
"beq 3f\n"
"2:" // 4-vectors of channels: 2 inputs loop
".inst 0x455e03f7 // saddlb z23.h, z31.b, z30.b\n"
".inst 0x455e07f6 // saddlt z22.h, z31.b, z30.b\n"
- "ldp x22, x21, [x20, #0x0]\n"
- "subs x23, x23, #0x1\n"
+ "ldp x21, x20, [x19, #0x0]\n"
+ "subs x22, x22, #0x1\n"
".inst 0x455c03b5 // saddlb z21.h, z29.b, z28.b\n"
".inst 0x455c07b4 // saddlt z20.h, z29.b, z28.b\n"
- "add x20, x20, #0x10\n"
- "ld1b { z31.b }, p4/Z, [x22, x27]\n"
+ "add x19, x19, #0x10\n"
+ "ld1b { z31.b }, p4/Z, [x21, x26]\n"
".inst 0x455a0373 // saddlb z19.h, z27.b, z26.b\n"
".inst 0x455a0772 // saddlt z18.h, z27.b, z26.b\n"
- "ld1b { z30.b }, p4/Z, [x21, x27]\n"
- "ld1b { z29.b }, p3/Z, [x22, x26]\n"
+ "ld1b { z30.b }, p4/Z, [x20, x26]\n"
+ "ld1b { z29.b }, p3/Z, [x21, x25]\n"
".inst 0x45580331 // saddlb z17.h, z25.b, z24.b\n"
".inst 0x45580730 // saddlt z16.h, z25.b, z24.b\n"
- "ld1b { z28.b }, p3/Z, [x21, x26]\n"
- "ld1b { z27.b }, p2/Z, [x22, x25]\n"
+ "ld1b { z28.b }, p3/Z, [x20, x25]\n"
+ "ld1b { z27.b }, p2/Z, [x21, x24]\n"
".inst 0x459741ef // saddwb z15.s, z15.s, z23.h\n"
".inst 0x459745ce // saddwt z14.s, z14.s, z23.h\n"
- "ld1b { z26.b }, p2/Z, [x21, x25]\n"
- "ld1b { z25.b }, p1/Z, [x22, x24]\n"
+ "ld1b { z26.b }, p2/Z, [x20, x24]\n"
+ "ld1b { z25.b }, p1/Z, [x21, x23]\n"
".inst 0x459641ad // saddwb z13.s, z13.s, z22.h\n"
".inst 0x4596458c // saddwt z12.s, z12.s, z22.h\n"
- "ld1b { z24.b }, p1/Z, [x21, x24]\n"
+ "ld1b { z24.b }, p1/Z, [x20, x23]\n"
".inst 0x4595416b // saddwb z11.s, z11.s, z21.h\n"
".inst 0x4595454a // saddwt z10.s, z10.s, z21.h\n"
".inst 0x45944129 // saddwb z9.s, z9.s, z20.h\n"
@@ -219,21 +219,21 @@ void sve_s8q_nhwc_avg_generic_depthfirst_impl(
".inst 0x45904021 // saddwb z1.s, z1.s, z16.h\n"
".inst 0x45904400 // saddwt z0.s, z0.s, z16.h\n"
"4:" // 4-vectors of channels: After loop
- "ands x21, %x[n_valid_cells], #0x1\n"
+ "ands x20, %x[n_valid_cells], #0x1\n"
"beq 6f\n"
"5:" // 4-vectors of channels: Single input loop
- "ldr x22, [x20], #0x8\n"
- "ld1b { z31.b }, p4/Z, [x22, x27]\n"
+ "ldr x21, [x19], #0x8\n"
+ "ld1b { z31.b }, p4/Z, [x21, x26]\n"
".inst 0x4508a3f7 // sshllb z23.h, z31.b, #0x0\n"
".inst 0x4508a7f6 // sshllt z22.h, z31.b, #0x0\n"
- "ld1b { z29.b }, p3/Z, [x22, x26]\n"
- "ld1b { z27.b }, p2/Z, [x22, x25]\n"
+ "ld1b { z29.b }, p3/Z, [x21, x25]\n"
+ "ld1b { z27.b }, p2/Z, [x21, x24]\n"
".inst 0x4508a3b5 // sshllb z21.h, z29.b, #0x0\n"
".inst 0x4508a7b4 // sshllt z20.h, z29.b, #0x0\n"
- "ld1b { z25.b }, p1/Z, [x22, x24]\n"
+ "ld1b { z25.b }, p1/Z, [x21, x23]\n"
".inst 0x4508a373 // sshllb z19.h, z27.b, #0x0\n"
".inst 0x4508a772 // sshllt z18.h, z27.b, #0x0\n"
- "subs x21, x21, #0x1\n"
+ "subs x20, x20, #0x1\n"
".inst 0x4508a331 // sshllb z17.h, z25.b, #0x0\n"
".inst 0x4508a730 // sshllt z16.h, z25.b, #0x0\n"
".inst 0x459741ef // saddwb z15.s, z15.s, z23.h\n"
@@ -334,7 +334,7 @@ void sve_s8q_nhwc_avg_generic_depthfirst_impl(
"smin z10.s, p0/M, z10.s, z18.s\n"
"smin z9.s, p0/M, z9.s, z18.s\n"
"trn1 z17.h, z11.h, z10.h\n"
- "st1b { z16.b }, p4, [%x[outptr], x27]\n"
+ "st1b { z16.b }, p4, [%x[outptr], x26]\n"
"smin z8.s, p0/M, z8.s, z18.s\n"
"smin z7.s, p0/M, z7.s, z18.s\n"
"trn1 z16.h, z9.h, z8.h\n"
@@ -342,7 +342,7 @@ void sve_s8q_nhwc_avg_generic_depthfirst_impl(
"smin z6.s, p0/M, z6.s, z18.s\n"
"smin z5.s, p0/M, z5.s, z18.s\n"
"trn1 z17.h, z7.h, z6.h\n"
- "st1b { z16.b }, p3, [%x[outptr], x26]\n"
+ "st1b { z16.b }, p3, [%x[outptr], x25]\n"
"smin z4.s, p0/M, z4.s, z18.s\n"
"smin z3.s, p0/M, z3.s, z18.s\n"
"trn1 z16.h, z5.h, z4.h\n"
@@ -350,46 +350,46 @@ void sve_s8q_nhwc_avg_generic_depthfirst_impl(
"smin z2.s, p0/M, z2.s, z18.s\n"
"smin z1.s, p0/M, z1.s, z18.s\n"
"trn1 z17.h, z3.h, z2.h\n"
- "st1b { z16.b }, p2, [%x[outptr], x25]\n"
+ "st1b { z16.b }, p2, [%x[outptr], x24]\n"
"smin z0.s, p0/M, z0.s, z18.s\n"
"trn1 z16.h, z1.h, z0.h\n"
"trn1 z16.b, z17.b, z16.b\n"
- "st1b { z16.b }, p1, [%x[outptr], x24]\n"
- "incb x24, ALL, MUL #4\n"
- "whilelt p1.b, x24, %x[n_channels]\n"
- "incb x27, ALL, MUL #4\n"
+ "st1b { z16.b }, p1, [%x[outptr], x23]\n"
+ "incb x23, ALL, MUL #4\n"
+ "whilelt p1.b, x23, %x[n_channels]\n"
"incb x26, ALL, MUL #4\n"
"incb x25, ALL, MUL #4\n"
+ "incb x24, ALL, MUL #4\n"
"b.any 1b\n"
"7:" // Single vector of channels
- "whilelt p4.b, x27, %x[n_channels]\n"
+ "whilelt p4.b, x26, %x[n_channels]\n"
"b.none 14f\n"
"8:" // Single vector of channels: Loop
- "lsr x23, %x[n_valid_cells], #0x1\n"
+ "lsr x22, %x[n_valid_cells], #0x1\n"
"mov z15.s, #0x0\n"
"mov z14.s, #0x0\n"
- "mov x20, %x[inptrs]\n"
+ "mov x19, %x[inptrs]\n"
"mov z13.s, #0x0\n"
"mov z12.s, #0x0\n"
- "cbz x23, 11f\n"
- "ldp x22, x21, [x20, #0x0]\n"
- "subs x23, x23, #0x1\n"
- "add x20, x20, #0x10\n"
- "ld1b { z31.b }, p4/Z, [x22, x27]\n"
- "ld1b { z30.b }, p4/Z, [x21, x27]\n"
+ "cbz x22, 11f\n"
+ "ldp x21, x20, [x19, #0x0]\n"
+ "subs x22, x22, #0x1\n"
+ "add x19, x19, #0x10\n"
+ "ld1b { z31.b }, p4/Z, [x21, x26]\n"
+ "ld1b { z30.b }, p4/Z, [x20, x26]\n"
"beq 10f\n"
"9:" // Single vector of channels: Loop: 2 inputs loop
".inst 0x455e03f7 // saddlb z23.h, z31.b, z30.b\n"
".inst 0x455e07f6 // saddlt z22.h, z31.b, z30.b\n"
- "ldp x22, x21, [x20, #0x0]\n"
- "subs x23, x23, #0x1\n"
+ "ldp x21, x20, [x19, #0x0]\n"
+ "subs x22, x22, #0x1\n"
".inst 0x459741ef // saddwb z15.s, z15.s, z23.h\n"
".inst 0x459745ce // saddwt z14.s, z14.s, z23.h\n"
- "add x20, x20, #0x10\n"
- "ld1b { z31.b }, p4/Z, [x22, x27]\n"
+ "add x19, x19, #0x10\n"
+ "ld1b { z31.b }, p4/Z, [x21, x26]\n"
".inst 0x459641ad // saddwb z13.s, z13.s, z22.h\n"
".inst 0x4596458c // saddwt z12.s, z12.s, z22.h\n"
- "ld1b { z30.b }, p4/Z, [x21, x27]\n"
+ "ld1b { z30.b }, p4/Z, [x20, x26]\n"
"bgt 9b\n"
"10:" // Single vector of channels: Loop: 2 inputs tail
".inst 0x455e03f7 // saddlb z23.h, z31.b, z30.b\n"
@@ -399,14 +399,14 @@ void sve_s8q_nhwc_avg_generic_depthfirst_impl(
".inst 0x459641ad // saddwb z13.s, z13.s, z22.h\n"
".inst 0x4596458c // saddwt z12.s, z12.s, z22.h\n"
"11:" // Single vector of channels: Loop: After loop
- "ands x21, %x[n_valid_cells], #0x1\n"
+ "ands x20, %x[n_valid_cells], #0x1\n"
"beq 13f\n"
"12:" // Single vector of channels: Loop: Single input loop
- "ldr x22, [x20], #0x8\n"
- "ld1b { z31.b }, p4/Z, [x22, x27]\n"
+ "ldr x21, [x19], #0x8\n"
+ "ld1b { z31.b }, p4/Z, [x21, x26]\n"
".inst 0x4508a3f7 // sshllb z23.h, z31.b, #0x0\n"
".inst 0x4508a7f6 // sshllt z22.h, z31.b, #0x0\n"
- "subs x21, x21, #0x1\n"
+ "subs x20, x20, #0x1\n"
".inst 0x459741ef // saddwb z15.s, z15.s, z23.h\n"
".inst 0x459745ce // saddwt z14.s, z14.s, z23.h\n"
".inst 0x459641ad // saddwb z13.s, z13.s, z22.h\n"
@@ -441,14 +441,14 @@ void sve_s8q_nhwc_avg_generic_depthfirst_impl(
"smin z12.s, p0/M, z12.s, z18.s\n"
"trn1 z16.h, z13.h, z12.h\n"
"trn1 z16.b, z17.b, z16.b\n"
- "st1b { z16.b }, p4, [%x[outptr], x27]\n"
- "incb x27\n"
- "whilelt p4.b, x27, %x[n_channels]\n"
+ "st1b { z16.b }, p4, [%x[outptr], x26]\n"
+ "incb x26\n"
+ "whilelt p4.b, x26, %x[n_channels]\n"
"b.any 8b\n"
"14:" // End
:
: [combined_rescale_value] "r" (&combined_rescale_value), [inptrs] "r" (inptrs), [left_shift] "r" (&left_shift), [n_channels] "r" (n_channels), [n_valid_cells] "r" (n_valid_cells), [outptr] "r" (outptr), [right_shift] "r" (&right_shift)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8q_nhwc_max_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8q_nhwc_max_generic_depthfirst/generic.cpp
index 19209811d8..5ef141492f 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8q_nhwc_max_generic_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8q_nhwc_max_generic_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -42,82 +42,82 @@ void sve_s8q_nhwc_max_generic_depthfirst_impl(
)
{
__asm__ __volatile__(
- "mov x9, #0x0\n"
- "cntb x28\n"
- "cntb x27, ALL, MUL #2\n"
- "cntb x26, ALL, MUL #3\n"
- "whilelt p4.b, x9, %x[n_channels]\n"
- "whilelt p3.b, x28, %x[n_channels]\n"
- "whilelt p2.b, x27, %x[n_channels]\n"
- "whilelt p1.b, x26, %x[n_channels]\n"
+ "mov x28, #0x0\n"
+ "cntb x27\n"
+ "cntb x26, ALL, MUL #2\n"
+ "cntb x25, ALL, MUL #3\n"
+ "whilelt p4.b, x28, %x[n_channels]\n"
+ "whilelt p3.b, x27, %x[n_channels]\n"
+ "whilelt p2.b, x26, %x[n_channels]\n"
+ "whilelt p1.b, x25, %x[n_channels]\n"
"ptrue p0.b\n"
"b.none 7f\n"
"1:" // 4-vectors of channels
- "lsr x25, %x[n_valid_cells], #0x2\n"
+ "lsr x24, %x[n_valid_cells], #0x2\n"
"mov z8.b, #0x80\n"
"mov z7.b, #0x80\n"
- "mov x20, %x[inptrs]\n"
+ "mov x19, %x[inptrs]\n"
"mov z6.b, #0x80\n"
"mov z5.b, #0x80\n"
- "cbz x25, 4f\n"
- "ldp x24, x23, [x20, #0x0]\n"
- "ldp x22, x21, [x20, #0x10]\n"
- "subs x25, x25, #0x1\n"
- "add x20, x20, #0x20\n"
- "ld1b { z4.b }, p4/Z, [x24, x9]\n"
- "ld1b { z3.b }, p4/Z, [x23, x9]\n"
- "ld1b { z2.b }, p4/Z, [x22, x9]\n"
- "ld1b { z1.b }, p4/Z, [x21, x9]\n"
- "ld1b { z0.b }, p3/Z, [x24, x28]\n"
- "ld1b { z31.b }, p3/Z, [x23, x28]\n"
- "ld1b { z22.b }, p3/Z, [x22, x28]\n"
- "ld1b { z30.b }, p3/Z, [x21, x28]\n"
- "ld1b { z29.b }, p2/Z, [x24, x27]\n"
- "ld1b { z28.b }, p2/Z, [x23, x27]\n"
- "ld1b { z21.b }, p2/Z, [x22, x27]\n"
- "ld1b { z27.b }, p2/Z, [x21, x27]\n"
- "ld1b { z26.b }, p1/Z, [x24, x26]\n"
- "ld1b { z25.b }, p1/Z, [x23, x26]\n"
- "ld1b { z20.b }, p1/Z, [x22, x26]\n"
- "ld1b { z24.b }, p1/Z, [x21, x26]\n"
+ "cbz x24, 4f\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "ldp x21, x20, [x19, #0x10]\n"
+ "subs x24, x24, #0x1\n"
+ "add x19, x19, #0x20\n"
+ "ld1b { z4.b }, p4/Z, [x23, x28]\n"
+ "ld1b { z3.b }, p4/Z, [x22, x28]\n"
+ "ld1b { z2.b }, p4/Z, [x21, x28]\n"
+ "ld1b { z1.b }, p4/Z, [x20, x28]\n"
+ "ld1b { z0.b }, p3/Z, [x23, x27]\n"
+ "ld1b { z31.b }, p3/Z, [x22, x27]\n"
+ "ld1b { z22.b }, p3/Z, [x21, x27]\n"
+ "ld1b { z30.b }, p3/Z, [x20, x27]\n"
+ "ld1b { z29.b }, p2/Z, [x23, x26]\n"
+ "ld1b { z28.b }, p2/Z, [x22, x26]\n"
+ "ld1b { z21.b }, p2/Z, [x21, x26]\n"
+ "ld1b { z27.b }, p2/Z, [x20, x26]\n"
+ "ld1b { z26.b }, p1/Z, [x23, x25]\n"
+ "ld1b { z25.b }, p1/Z, [x22, x25]\n"
+ "ld1b { z20.b }, p1/Z, [x21, x25]\n"
+ "ld1b { z24.b }, p1/Z, [x20, x25]\n"
"beq 3f\n"
"2:" // 4-vectors of channels: 4 inputs loop
"movprfx z19, z4\n smax z19.b, p0/M, z19.b, z3.b\n"
"movprfx z23, z2\n smax z23.b, p0/M, z23.b, z1.b\n"
- "ldp x24, x23, [x20, #0x0]\n"
- "ldp x22, x21, [x20, #0x10]\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "ldp x21, x20, [x19, #0x10]\n"
"movprfx z18, z0\n smax z18.b, p0/M, z18.b, z31.b\n"
"smax z22.b, p0/M, z22.b, z30.b\n"
- "ld1b { z4.b }, p4/Z, [x24, x9]\n"
- "ld1b { z3.b }, p4/Z, [x23, x9]\n"
+ "ld1b { z4.b }, p4/Z, [x23, x28]\n"
+ "ld1b { z3.b }, p4/Z, [x22, x28]\n"
"movprfx z17, z29\n smax z17.b, p0/M, z17.b, z28.b\n"
"smax z21.b, p0/M, z21.b, z27.b\n"
- "ld1b { z2.b }, p4/Z, [x22, x9]\n"
- "ld1b { z1.b }, p4/Z, [x21, x9]\n"
+ "ld1b { z2.b }, p4/Z, [x21, x28]\n"
+ "ld1b { z1.b }, p4/Z, [x20, x28]\n"
"movprfx z16, z26\n smax z16.b, p0/M, z16.b, z25.b\n"
"smax z20.b, p0/M, z20.b, z24.b\n"
- "ld1b { z0.b }, p3/Z, [x24, x28]\n"
- "ld1b { z31.b }, p3/Z, [x23, x28]\n"
+ "ld1b { z0.b }, p3/Z, [x23, x27]\n"
+ "ld1b { z31.b }, p3/Z, [x22, x27]\n"
"smax z19.b, p0/M, z19.b, z23.b\n"
"smax z18.b, p0/M, z18.b, z22.b\n"
- "ld1b { z22.b }, p3/Z, [x22, x28]\n"
- "ld1b { z30.b }, p3/Z, [x21, x28]\n"
+ "ld1b { z22.b }, p3/Z, [x21, x27]\n"
+ "ld1b { z30.b }, p3/Z, [x20, x27]\n"
"smax z17.b, p0/M, z17.b, z21.b\n"
"smax z16.b, p0/M, z16.b, z20.b\n"
- "ld1b { z29.b }, p2/Z, [x24, x27]\n"
- "ld1b { z28.b }, p2/Z, [x23, x27]\n"
- "subs x25, x25, #0x1\n"
+ "ld1b { z29.b }, p2/Z, [x23, x26]\n"
+ "ld1b { z28.b }, p2/Z, [x22, x26]\n"
+ "subs x24, x24, #0x1\n"
"smax z8.b, p0/M, z8.b, z19.b\n"
- "ld1b { z21.b }, p2/Z, [x22, x27]\n"
- "ld1b { z27.b }, p2/Z, [x21, x27]\n"
+ "ld1b { z21.b }, p2/Z, [x21, x26]\n"
+ "ld1b { z27.b }, p2/Z, [x20, x26]\n"
"smax z7.b, p0/M, z7.b, z18.b\n"
"smax z6.b, p0/M, z6.b, z17.b\n"
- "ld1b { z26.b }, p1/Z, [x24, x26]\n"
- "ld1b { z25.b }, p1/Z, [x23, x26]\n"
+ "ld1b { z26.b }, p1/Z, [x23, x25]\n"
+ "ld1b { z25.b }, p1/Z, [x22, x25]\n"
"smax z5.b, p0/M, z5.b, z16.b\n"
- "add x20, x20, #0x20\n"
- "ld1b { z20.b }, p1/Z, [x22, x26]\n"
- "ld1b { z24.b }, p1/Z, [x21, x26]\n"
+ "add x19, x19, #0x20\n"
+ "ld1b { z20.b }, p1/Z, [x21, x25]\n"
+ "ld1b { z24.b }, p1/Z, [x20, x25]\n"
"bgt 2b\n"
"3:" // 4-vectors of channels: 4 inputs tail
"movprfx z19, z4\n smax z19.b, p0/M, z19.b, z3.b\n"
@@ -137,33 +137,33 @@ void sve_s8q_nhwc_max_generic_depthfirst_impl(
"smax z6.b, p0/M, z6.b, z17.b\n"
"smax z5.b, p0/M, z5.b, z16.b\n"
"4:" // 4-vectors of channels: After loop
- "ands x21, %x[n_valid_cells], #0x3\n"
+ "ands x20, %x[n_valid_cells], #0x3\n"
"beq 6f\n"
"5:" // 4-vectors of channels: Single input loop
- "ldr x24, [x20], #0x8\n"
- "ld1b { z4.b }, p4/Z, [x24, x9]\n"
- "subs x21, x21, #0x1\n"
+ "ldr x23, [x19], #0x8\n"
+ "ld1b { z4.b }, p4/Z, [x23, x28]\n"
+ "subs x20, x20, #0x1\n"
"smax z8.b, p0/M, z8.b, z4.b\n"
- "ld1b { z0.b }, p3/Z, [x24, x28]\n"
- "ld1b { z29.b }, p2/Z, [x24, x27]\n"
+ "ld1b { z0.b }, p3/Z, [x23, x27]\n"
+ "ld1b { z29.b }, p2/Z, [x23, x26]\n"
"smax z7.b, p0/M, z7.b, z0.b\n"
"smax z6.b, p0/M, z6.b, z29.b\n"
- "ld1b { z26.b }, p1/Z, [x24, x26]\n"
+ "ld1b { z26.b }, p1/Z, [x23, x25]\n"
"smax z5.b, p0/M, z5.b, z26.b\n"
"bgt 5b\n"
"6:" // 4-vectors of channels: Single input loop: End
".inst 0x4508a111 // sshllb z17.h, z8.b, #0x0\n"
".inst 0x4508a517 // sshllt z23.h, z8.b, #0x0\n"
- "add x20, %x[quant_params], %[offsetof_qp_per_layer_left_shift]\n"
- "ld1rw { z4.s }, p0/Z, [x20]\n"
+ "add x19, %x[quant_params], %[offsetof_qp_per_layer_left_shift]\n"
+ "ld1rw { z4.s }, p0/Z, [x19]\n"
".inst 0x4508a0f6 // sshllb z22.h, z7.b, #0x0\n"
".inst 0x4508a4f5 // sshllt z21.h, z7.b, #0x0\n"
- "add x20, %x[quant_params], %[offsetof_qp_per_layer_mul]\n"
- "ld1rw { z3.s }, p0/Z, [x20]\n"
+ "add x19, %x[quant_params], %[offsetof_qp_per_layer_mul]\n"
+ "ld1rw { z3.s }, p0/Z, [x19]\n"
".inst 0x4508a0d4 // sshllb z20.h, z6.b, #0x0\n"
".inst 0x4508a4d3 // sshllt z19.h, z6.b, #0x0\n"
- "add x20, %x[quant_params], %[offsetof_qp_per_layer_right_shift]\n"
- "ld1rw { z2.s }, p0/Z, [x20]\n"
+ "add x19, %x[quant_params], %[offsetof_qp_per_layer_right_shift]\n"
+ "ld1rw { z2.s }, p0/Z, [x19]\n"
".inst 0x4508a0b2 // sshllb z18.h, z5.b, #0x0\n"
".inst 0x4508a4b0 // sshllt z16.h, z5.b, #0x0\n"
".inst 0x4510a221 // sshllb z1.s, z17.h, #0x0\n"
@@ -259,7 +259,7 @@ void sve_s8q_nhwc_max_generic_depthfirst_impl(
"smin z29.s, p0/M, z29.s, z18.s\n"
"smin z28.s, p0/M, z28.s, z18.s\n"
"trn1 z17.h, z30.h, z29.h\n"
- "st1b { z16.b }, p4, [%x[outptr], x9]\n"
+ "st1b { z16.b }, p4, [%x[outptr], x28]\n"
"smin z27.s, p0/M, z27.s, z18.s\n"
"smin z26.s, p0/M, z26.s, z18.s\n"
"trn1 z16.h, z28.h, z27.h\n"
@@ -267,7 +267,7 @@ void sve_s8q_nhwc_max_generic_depthfirst_impl(
"smin z25.s, p0/M, z25.s, z18.s\n"
"smin z24.s, p0/M, z24.s, z18.s\n"
"trn1 z17.h, z26.h, z25.h\n"
- "st1b { z16.b }, p3, [%x[outptr], x28]\n"
+ "st1b { z16.b }, p3, [%x[outptr], x27]\n"
"smin z23.s, p0/M, z23.s, z18.s\n"
"smin z22.s, p0/M, z22.s, z18.s\n"
"trn1 z16.h, z24.h, z23.h\n"
@@ -275,47 +275,47 @@ void sve_s8q_nhwc_max_generic_depthfirst_impl(
"smin z21.s, p0/M, z21.s, z18.s\n"
"smin z20.s, p0/M, z20.s, z18.s\n"
"trn1 z17.h, z22.h, z21.h\n"
- "st1b { z16.b }, p2, [%x[outptr], x27]\n"
+ "st1b { z16.b }, p2, [%x[outptr], x26]\n"
"smin z19.s, p0/M, z19.s, z18.s\n"
"trn1 z16.h, z20.h, z19.h\n"
"trn1 z16.b, z17.b, z16.b\n"
- "st1b { z16.b }, p1, [%x[outptr], x26]\n"
- "incb x26, ALL, MUL #4\n"
- "whilelt p1.b, x26, %x[n_channels]\n"
- "incb x9, ALL, MUL #4\n"
+ "st1b { z16.b }, p1, [%x[outptr], x25]\n"
+ "incb x25, ALL, MUL #4\n"
+ "whilelt p1.b, x25, %x[n_channels]\n"
"incb x28, ALL, MUL #4\n"
"incb x27, ALL, MUL #4\n"
+ "incb x26, ALL, MUL #4\n"
"b.any 1b\n"
"7:" // Single vector of channels
- "whilelt p4.b, x9, %x[n_channels]\n"
+ "whilelt p4.b, x28, %x[n_channels]\n"
"b.none 14f\n"
"8:" // Single vector of channels: Loop
- "lsr x25, %x[n_valid_cells], #0x2\n"
+ "lsr x24, %x[n_valid_cells], #0x2\n"
"mov z8.b, #0x80\n"
- "mov x20, %x[inptrs]\n"
- "cbz x25, 11f\n"
- "ldp x24, x23, [x20, #0x0]\n"
- "ldp x22, x21, [x20, #0x10]\n"
- "subs x25, x25, #0x1\n"
- "add x20, x20, #0x20\n"
- "ld1b { z4.b }, p4/Z, [x24, x9]\n"
- "ld1b { z3.b }, p4/Z, [x23, x9]\n"
- "ld1b { z2.b }, p4/Z, [x22, x9]\n"
- "ld1b { z1.b }, p4/Z, [x21, x9]\n"
+ "mov x19, %x[inptrs]\n"
+ "cbz x24, 11f\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "ldp x21, x20, [x19, #0x10]\n"
+ "subs x24, x24, #0x1\n"
+ "add x19, x19, #0x20\n"
+ "ld1b { z4.b }, p4/Z, [x23, x28]\n"
+ "ld1b { z3.b }, p4/Z, [x22, x28]\n"
+ "ld1b { z2.b }, p4/Z, [x21, x28]\n"
+ "ld1b { z1.b }, p4/Z, [x20, x28]\n"
"beq 10f\n"
"9:" // Single vector of channels: Loop: 4 inputs loop
"movprfx z19, z4\n smax z19.b, p0/M, z19.b, z3.b\n"
"movprfx z23, z2\n smax z23.b, p0/M, z23.b, z1.b\n"
- "ldp x24, x23, [x20, #0x0]\n"
- "ldp x22, x21, [x20, #0x10]\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "ldp x21, x20, [x19, #0x10]\n"
"smax z19.b, p0/M, z19.b, z23.b\n"
- "subs x25, x25, #0x1\n"
- "ld1b { z4.b }, p4/Z, [x24, x9]\n"
- "ld1b { z3.b }, p4/Z, [x23, x9]\n"
+ "subs x24, x24, #0x1\n"
+ "ld1b { z4.b }, p4/Z, [x23, x28]\n"
+ "ld1b { z3.b }, p4/Z, [x22, x28]\n"
"smax z8.b, p0/M, z8.b, z19.b\n"
- "add x20, x20, #0x20\n"
- "ld1b { z2.b }, p4/Z, [x22, x9]\n"
- "ld1b { z1.b }, p4/Z, [x21, x9]\n"
+ "add x19, x19, #0x20\n"
+ "ld1b { z2.b }, p4/Z, [x21, x28]\n"
+ "ld1b { z1.b }, p4/Z, [x20, x28]\n"
"bgt 9b\n"
"10:" // Single vector of channels: Loop: 4 inputs tail
"movprfx z19, z4\n smax z19.b, p0/M, z19.b, z3.b\n"
@@ -323,23 +323,23 @@ void sve_s8q_nhwc_max_generic_depthfirst_impl(
"smax z19.b, p0/M, z19.b, z23.b\n"
"smax z8.b, p0/M, z8.b, z19.b\n"
"11:" // Single vector of channels: Loop: After loop
- "ands x21, %x[n_valid_cells], #0x3\n"
+ "ands x20, %x[n_valid_cells], #0x3\n"
"beq 13f\n"
"12:" // Single vector of channels: Loop: Single input loop
- "ldr x24, [x20], #0x8\n"
- "ld1b { z4.b }, p4/Z, [x24, x9]\n"
- "subs x21, x21, #0x1\n"
+ "ldr x23, [x19], #0x8\n"
+ "ld1b { z4.b }, p4/Z, [x23, x28]\n"
+ "subs x20, x20, #0x1\n"
"smax z8.b, p0/M, z8.b, z4.b\n"
"bgt 12b\n"
"13:" // Single vector of channels: Loop: Single input loop: End
".inst 0x4508a111 // sshllb z17.h, z8.b, #0x0\n"
".inst 0x4508a517 // sshllt z23.h, z8.b, #0x0\n"
- "add x20, %x[quant_params], %[offsetof_qp_per_layer_left_shift]\n"
- "ld1rw { z4.s }, p0/Z, [x20]\n"
+ "add x19, %x[quant_params], %[offsetof_qp_per_layer_left_shift]\n"
+ "ld1rw { z4.s }, p0/Z, [x19]\n"
".inst 0x4510a221 // sshllb z1.s, z17.h, #0x0\n"
".inst 0x4510a631 // sshllt z17.s, z17.h, #0x0\n"
- "add x20, %x[quant_params], %[offsetof_qp_per_layer_mul]\n"
- "ld1rw { z3.s }, p0/Z, [x20]\n"
+ "add x19, %x[quant_params], %[offsetof_qp_per_layer_mul]\n"
+ "ld1rw { z3.s }, p0/Z, [x19]\n"
".inst 0x4510a2e0 // sshllb z0.s, z23.h, #0x0\n"
".inst 0x4510a6ff // sshllt z31.s, z23.h, #0x0\n"
".inst 0x44828081 // srshl z1.s, p0/M, z1.s, z4.s\n"
@@ -348,8 +348,8 @@ void sve_s8q_nhwc_max_generic_depthfirst_impl(
".inst 0x4482809f // srshl z31.s, p0/M, z31.s, z4.s\n"
".inst 0x04a37421 // sqrdmulh z1.s, z1.s, z3.s\n"
".inst 0x04a37631 // sqrdmulh z17.s, z17.s, z3.s\n"
- "add x20, %x[quant_params], %[offsetof_qp_per_layer_right_shift]\n"
- "ld1rw { z2.s }, p0/Z, [x20]\n"
+ "add x19, %x[quant_params], %[offsetof_qp_per_layer_right_shift]\n"
+ "ld1rw { z2.s }, p0/Z, [x19]\n"
".inst 0x04a37400 // sqrdmulh z0.s, z0.s, z3.s\n"
".inst 0x04a377ff // sqrdmulh z31.s, z31.s, z3.s\n"
"mov z18.s, #0x7f\n"
@@ -369,14 +369,14 @@ void sve_s8q_nhwc_max_generic_depthfirst_impl(
"smin z31.s, p0/M, z31.s, z18.s\n"
"trn1 z16.h, z0.h, z31.h\n"
"trn1 z16.b, z17.b, z16.b\n"
- "st1b { z16.b }, p4, [%x[outptr], x9]\n"
- "incb x9\n"
- "whilelt p4.b, x9, %x[n_channels]\n"
+ "st1b { z16.b }, p4, [%x[outptr], x28]\n"
+ "incb x28\n"
+ "whilelt p4.b, x28, %x[n_channels]\n"
"b.any 8b\n"
"14:" // End
:
: [inptrs] "r" (inptrs), [n_channels] "r" (n_channels), [n_valid_cells] "r" (n_valid_cells), [offsetof_qp_per_layer_left_shift] "I" (offsetof(Requantize32, per_layer_left_shift)), [offsetof_qp_per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [offsetof_qp_per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [outptr] "r" (outptr), [quant_params] "r" (&qp)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x9", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8_nhwc_avg_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8_nhwc_avg_generic_depthfirst/generic.cpp
index f888038a2a..f853e9de4f 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8_nhwc_avg_generic_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8_nhwc_avg_generic_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -95,21 +95,21 @@ void sve_u8_nhwc_avg_generic_depthfirst_impl(
}
__asm__ __volatile__(
- "mov x27, #0x0\n"
- "cntb x26\n"
- "cntb x25, ALL, MUL #2\n"
- "cntb x24, ALL, MUL #3\n"
- "whilelt p4.b, x27, %x[n_channels]\n"
- "whilelt p3.b, x26, %x[n_channels]\n"
- "whilelt p2.b, x25, %x[n_channels]\n"
- "whilelt p1.b, x24, %x[n_channels]\n"
+ "mov x26, #0x0\n"
+ "cntb x25\n"
+ "cntb x24, ALL, MUL #2\n"
+ "cntb x23, ALL, MUL #3\n"
+ "whilelt p4.b, x26, %x[n_channels]\n"
+ "whilelt p3.b, x25, %x[n_channels]\n"
+ "whilelt p2.b, x24, %x[n_channels]\n"
+ "whilelt p1.b, x23, %x[n_channels]\n"
"ptrue p0.b\n"
"b.none 7f\n"
"1:" // 4-vectors of channels
- "lsr x23, %x[n_valid_cells], #0x1\n"
+ "lsr x22, %x[n_valid_cells], #0x1\n"
"mov z15.s, #0x0\n"
"mov z14.s, #0x0\n"
- "mov x20, %x[inptrs]\n"
+ "mov x19, %x[inptrs]\n"
"mov z13.s, #0x0\n"
"mov z12.s, #0x0\n"
"mov z11.s, #0x0\n"
@@ -124,43 +124,43 @@ void sve_u8_nhwc_avg_generic_depthfirst_impl(
"mov z2.s, #0x0\n"
"mov z1.s, #0x0\n"
"mov z0.s, #0x0\n"
- "cbz x23, 4f\n"
- "ldp x22, x21, [x20, #0x0]\n"
- "subs x23, x23, #0x1\n"
- "add x20, x20, #0x10\n"
- "ld1b { z31.b }, p4/Z, [x22, x27]\n"
- "ld1b { z30.b }, p4/Z, [x21, x27]\n"
- "ld1b { z29.b }, p3/Z, [x22, x26]\n"
- "ld1b { z28.b }, p3/Z, [x21, x26]\n"
- "ld1b { z27.b }, p2/Z, [x22, x25]\n"
- "ld1b { z26.b }, p2/Z, [x21, x25]\n"
- "ld1b { z25.b }, p1/Z, [x22, x24]\n"
- "ld1b { z24.b }, p1/Z, [x21, x24]\n"
+ "cbz x22, 4f\n"
+ "ldp x21, x20, [x19, #0x0]\n"
+ "subs x22, x22, #0x1\n"
+ "add x19, x19, #0x10\n"
+ "ld1b { z31.b }, p4/Z, [x21, x26]\n"
+ "ld1b { z30.b }, p4/Z, [x20, x26]\n"
+ "ld1b { z29.b }, p3/Z, [x21, x25]\n"
+ "ld1b { z28.b }, p3/Z, [x20, x25]\n"
+ "ld1b { z27.b }, p2/Z, [x21, x24]\n"
+ "ld1b { z26.b }, p2/Z, [x20, x24]\n"
+ "ld1b { z25.b }, p1/Z, [x21, x23]\n"
+ "ld1b { z24.b }, p1/Z, [x20, x23]\n"
"beq 3f\n"
"2:" // 4-vectors of channels: 2 inputs loop
".inst 0x455e0bf7 // uaddlb z23.h, z31.b, z30.b\n"
".inst 0x455e0ff6 // uaddlt z22.h, z31.b, z30.b\n"
- "ldp x22, x21, [x20, #0x0]\n"
- "subs x23, x23, #0x1\n"
+ "ldp x21, x20, [x19, #0x0]\n"
+ "subs x22, x22, #0x1\n"
".inst 0x455c0bb5 // uaddlb z21.h, z29.b, z28.b\n"
".inst 0x455c0fb4 // uaddlt z20.h, z29.b, z28.b\n"
- "add x20, x20, #0x10\n"
- "ld1b { z31.b }, p4/Z, [x22, x27]\n"
+ "add x19, x19, #0x10\n"
+ "ld1b { z31.b }, p4/Z, [x21, x26]\n"
".inst 0x455a0b73 // uaddlb z19.h, z27.b, z26.b\n"
".inst 0x455a0f72 // uaddlt z18.h, z27.b, z26.b\n"
- "ld1b { z30.b }, p4/Z, [x21, x27]\n"
- "ld1b { z29.b }, p3/Z, [x22, x26]\n"
+ "ld1b { z30.b }, p4/Z, [x20, x26]\n"
+ "ld1b { z29.b }, p3/Z, [x21, x25]\n"
".inst 0x45580b31 // uaddlb z17.h, z25.b, z24.b\n"
".inst 0x45580f30 // uaddlt z16.h, z25.b, z24.b\n"
- "ld1b { z28.b }, p3/Z, [x21, x26]\n"
- "ld1b { z27.b }, p2/Z, [x22, x25]\n"
+ "ld1b { z28.b }, p3/Z, [x20, x25]\n"
+ "ld1b { z27.b }, p2/Z, [x21, x24]\n"
".inst 0x459749ef // uaddwb z15.s, z15.s, z23.h\n"
".inst 0x45974dce // uaddwt z14.s, z14.s, z23.h\n"
- "ld1b { z26.b }, p2/Z, [x21, x25]\n"
- "ld1b { z25.b }, p1/Z, [x22, x24]\n"
+ "ld1b { z26.b }, p2/Z, [x20, x24]\n"
+ "ld1b { z25.b }, p1/Z, [x21, x23]\n"
".inst 0x459649ad // uaddwb z13.s, z13.s, z22.h\n"
".inst 0x45964d8c // uaddwt z12.s, z12.s, z22.h\n"
- "ld1b { z24.b }, p1/Z, [x21, x24]\n"
+ "ld1b { z24.b }, p1/Z, [x20, x23]\n"
".inst 0x4595496b // uaddwb z11.s, z11.s, z21.h\n"
".inst 0x45954d4a // uaddwt z10.s, z10.s, z21.h\n"
".inst 0x45944929 // uaddwb z9.s, z9.s, z20.h\n"
@@ -200,21 +200,21 @@ void sve_u8_nhwc_avg_generic_depthfirst_impl(
".inst 0x45904821 // uaddwb z1.s, z1.s, z16.h\n"
".inst 0x45904c00 // uaddwt z0.s, z0.s, z16.h\n"
"4:" // 4-vectors of channels: After loop
- "ands x21, %x[n_valid_cells], #0x1\n"
+ "ands x20, %x[n_valid_cells], #0x1\n"
"beq 6f\n"
"5:" // 4-vectors of channels: Single input loop
- "ldr x22, [x20], #0x8\n"
- "ld1b { z31.b }, p4/Z, [x22, x27]\n"
+ "ldr x21, [x19], #0x8\n"
+ "ld1b { z31.b }, p4/Z, [x21, x26]\n"
".inst 0x4508abf7 // ushllb z23.h, z31.b, #0x0\n"
".inst 0x4508aff6 // ushllt z22.h, z31.b, #0x0\n"
- "ld1b { z29.b }, p3/Z, [x22, x26]\n"
- "ld1b { z27.b }, p2/Z, [x22, x25]\n"
+ "ld1b { z29.b }, p3/Z, [x21, x25]\n"
+ "ld1b { z27.b }, p2/Z, [x21, x24]\n"
".inst 0x4508abb5 // ushllb z21.h, z29.b, #0x0\n"
".inst 0x4508afb4 // ushllt z20.h, z29.b, #0x0\n"
- "ld1b { z25.b }, p1/Z, [x22, x24]\n"
+ "ld1b { z25.b }, p1/Z, [x21, x23]\n"
".inst 0x4508ab73 // ushllb z19.h, z27.b, #0x0\n"
".inst 0x4508af72 // ushllt z18.h, z27.b, #0x0\n"
- "subs x21, x21, #0x1\n"
+ "subs x20, x20, #0x1\n"
".inst 0x4508ab31 // ushllb z17.h, z25.b, #0x0\n"
".inst 0x4508af30 // ushllt z16.h, z25.b, #0x0\n"
".inst 0x459749ef // uaddwb z15.s, z15.s, z23.h\n"
@@ -297,7 +297,7 @@ void sve_u8_nhwc_avg_generic_depthfirst_impl(
"smin z11.s, p0/M, z11.s, z18.s\n"
"smin z10.s, p0/M, z10.s, z18.s\n"
"trn1 z17.h, z11.h, z10.h\n"
- "st1b { z16.b }, p4, [%x[outptr], x27]\n"
+ "st1b { z16.b }, p4, [%x[outptr], x26]\n"
"smin z9.s, p0/M, z9.s, z18.s\n"
"smin z8.s, p0/M, z8.s, z18.s\n"
"trn1 z16.h, z9.h, z8.h\n"
@@ -305,7 +305,7 @@ void sve_u8_nhwc_avg_generic_depthfirst_impl(
"smin z7.s, p0/M, z7.s, z18.s\n"
"smin z6.s, p0/M, z6.s, z18.s\n"
"trn1 z17.h, z7.h, z6.h\n"
- "st1b { z16.b }, p3, [%x[outptr], x26]\n"
+ "st1b { z16.b }, p3, [%x[outptr], x25]\n"
"smin z5.s, p0/M, z5.s, z18.s\n"
"smin z4.s, p0/M, z4.s, z18.s\n"
"trn1 z16.h, z5.h, z4.h\n"
@@ -313,47 +313,47 @@ void sve_u8_nhwc_avg_generic_depthfirst_impl(
"smin z3.s, p0/M, z3.s, z18.s\n"
"smin z2.s, p0/M, z2.s, z18.s\n"
"trn1 z17.h, z3.h, z2.h\n"
- "st1b { z16.b }, p2, [%x[outptr], x25]\n"
+ "st1b { z16.b }, p2, [%x[outptr], x24]\n"
"smin z1.s, p0/M, z1.s, z18.s\n"
"smin z0.s, p0/M, z0.s, z18.s\n"
"trn1 z16.h, z1.h, z0.h\n"
"trn1 z16.b, z17.b, z16.b\n"
- "st1b { z16.b }, p1, [%x[outptr], x24]\n"
- "incb x24, ALL, MUL #4\n"
- "whilelt p1.b, x24, %x[n_channels]\n"
- "incb x27, ALL, MUL #4\n"
+ "st1b { z16.b }, p1, [%x[outptr], x23]\n"
+ "incb x23, ALL, MUL #4\n"
+ "whilelt p1.b, x23, %x[n_channels]\n"
"incb x26, ALL, MUL #4\n"
"incb x25, ALL, MUL #4\n"
+ "incb x24, ALL, MUL #4\n"
"b.any 1b\n"
"7:" // Single vector of channels
- "whilelt p4.b, x27, %x[n_channels]\n"
+ "whilelt p4.b, x26, %x[n_channels]\n"
"b.none 14f\n"
"8:" // Single vector of channels: Loop
- "lsr x23, %x[n_valid_cells], #0x1\n"
+ "lsr x22, %x[n_valid_cells], #0x1\n"
"mov z15.s, #0x0\n"
"mov z14.s, #0x0\n"
- "mov x20, %x[inptrs]\n"
+ "mov x19, %x[inptrs]\n"
"mov z13.s, #0x0\n"
"mov z12.s, #0x0\n"
- "cbz x23, 11f\n"
- "ldp x22, x21, [x20, #0x0]\n"
- "subs x23, x23, #0x1\n"
- "add x20, x20, #0x10\n"
- "ld1b { z31.b }, p4/Z, [x22, x27]\n"
- "ld1b { z30.b }, p4/Z, [x21, x27]\n"
+ "cbz x22, 11f\n"
+ "ldp x21, x20, [x19, #0x0]\n"
+ "subs x22, x22, #0x1\n"
+ "add x19, x19, #0x10\n"
+ "ld1b { z31.b }, p4/Z, [x21, x26]\n"
+ "ld1b { z30.b }, p4/Z, [x20, x26]\n"
"beq 10f\n"
"9:" // Single vector of channels: Loop: 2 inputs loop
".inst 0x455e0bf7 // uaddlb z23.h, z31.b, z30.b\n"
".inst 0x455e0ff6 // uaddlt z22.h, z31.b, z30.b\n"
- "ldp x22, x21, [x20, #0x0]\n"
- "subs x23, x23, #0x1\n"
+ "ldp x21, x20, [x19, #0x0]\n"
+ "subs x22, x22, #0x1\n"
".inst 0x459749ef // uaddwb z15.s, z15.s, z23.h\n"
".inst 0x45974dce // uaddwt z14.s, z14.s, z23.h\n"
- "add x20, x20, #0x10\n"
- "ld1b { z31.b }, p4/Z, [x22, x27]\n"
+ "add x19, x19, #0x10\n"
+ "ld1b { z31.b }, p4/Z, [x21, x26]\n"
".inst 0x459649ad // uaddwb z13.s, z13.s, z22.h\n"
".inst 0x45964d8c // uaddwt z12.s, z12.s, z22.h\n"
- "ld1b { z30.b }, p4/Z, [x21, x27]\n"
+ "ld1b { z30.b }, p4/Z, [x20, x26]\n"
"bgt 9b\n"
"10:" // Single vector of channels: Loop: 2 inputs tail
".inst 0x455e0bf7 // uaddlb z23.h, z31.b, z30.b\n"
@@ -363,14 +363,14 @@ void sve_u8_nhwc_avg_generic_depthfirst_impl(
".inst 0x459649ad // uaddwb z13.s, z13.s, z22.h\n"
".inst 0x45964d8c // uaddwt z12.s, z12.s, z22.h\n"
"11:" // Single vector of channels: Loop: After loop
- "ands x21, %x[n_valid_cells], #0x1\n"
+ "ands x20, %x[n_valid_cells], #0x1\n"
"beq 13f\n"
"12:" // Single vector of channels: Loop: Single input loop
- "ldr x22, [x20], #0x8\n"
- "ld1b { z31.b }, p4/Z, [x22, x27]\n"
+ "ldr x21, [x19], #0x8\n"
+ "ld1b { z31.b }, p4/Z, [x21, x26]\n"
".inst 0x4508abf7 // ushllb z23.h, z31.b, #0x0\n"
".inst 0x4508aff6 // ushllt z22.h, z31.b, #0x0\n"
- "subs x21, x21, #0x1\n"
+ "subs x20, x20, #0x1\n"
".inst 0x459749ef // uaddwb z15.s, z15.s, z23.h\n"
".inst 0x45974dce // uaddwt z14.s, z14.s, z23.h\n"
".inst 0x459649ad // uaddwb z13.s, z13.s, z22.h\n"
@@ -400,14 +400,14 @@ void sve_u8_nhwc_avg_generic_depthfirst_impl(
"smin z12.s, p0/M, z12.s, z18.s\n"
"trn1 z16.h, z13.h, z12.h\n"
"trn1 z16.b, z17.b, z16.b\n"
- "st1b { z16.b }, p4, [%x[outptr], x27]\n"
- "incb x27\n"
- "whilelt p4.b, x27, %x[n_channels]\n"
+ "st1b { z16.b }, p4, [%x[outptr], x26]\n"
+ "incb x26\n"
+ "whilelt p4.b, x26, %x[n_channels]\n"
"b.any 8b\n"
"14:" // End
:
: [inptrs] "r" (inptrs), [n_channels] "r" (n_channels), [n_valid_cells] "r" (n_valid_cells), [outptr] "r" (outptr), [rescale_ptr] "r" (&rescale_value), [shift_ptr] "r" (&shift_value)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
index 70d308a585..2a08610db6 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -63,80 +63,80 @@ void sve_u8_nhwc_max_2x2_s1_output2x2_depthfirst_impl(
pad_left, pad_top, pad_right, pad_bottom);
__asm__ __volatile__(
- "ldr x15, [%x[args], %[offsetof_n_channels]]\n"
- "ldr x21, [%x[args], %[offsetof_outptrs]]\n"
- "mov x14, #0x0\n"
- "whilelt p2.b, x14, x15\n"
- "ldr x20, [%x[args], %[offsetof_inptrs]]\n"
- "ldp x13, x12, [x21, #0x0]\n"
+ "ldr x14, [%x[args], %[offsetof_n_channels]]\n"
+ "ldr x20, [%x[args], %[offsetof_outptrs]]\n"
+ "mov x13, #0x0\n"
+ "whilelt p2.b, x13, x14\n"
+ "ldr x19, [%x[args], %[offsetof_inptrs]]\n"
+ "ldp x12, x11, [x20, #0x0]\n"
"ptrue p1.b\n"
- "mov x11, #0x0\n"
- "ldp x10, x9, [x21, #0x10]\n"
- "ldp x28, x27, [x20, #0x0]\n"
- "ldp x26, x25, [x20, #0x10]\n"
- "ldp x24, x23, [x20, #0x20]\n"
- "ldp x22, x21, [x20, #0x30]\n"
- "ldr x20, [x20, #0x40]\n"
- "ld1b { z31.b }, p2/Z, [x27, x14]\n"
- "ld1b { z30.b }, p2/Z, [x24, x14]\n"
- "ld1b { z29.b }, p2/Z, [x21, x14]\n"
- "ld1b { z28.b }, p2/Z, [x25, x14]\n"
- "ld1b { z27.b }, p2/Z, [x28, x14]\n"
- "ld1b { z26.b }, p2/Z, [x26, x14]\n"
- "ld1b { z25.b }, p2/Z, [x23, x14]\n"
- "ld1b { z24.b }, p2/Z, [x22, x14]\n"
- "ld1b { z23.b }, p2/Z, [x20, x14]\n"
- "incw x14\n"
- "whilelt p2.b, x14, x15\n"
+ "mov x10, #0x0\n"
+ "ldp x9, x28, [x20, #0x10]\n"
+ "ldp x27, x26, [x19, #0x0]\n"
+ "ldp x25, x24, [x19, #0x10]\n"
+ "ldp x23, x22, [x19, #0x20]\n"
+ "ldp x21, x20, [x19, #0x30]\n"
+ "ldr x19, [x19, #0x40]\n"
+ "ld1b { z31.b }, p2/Z, [x26, x13]\n"
+ "ld1b { z30.b }, p2/Z, [x23, x13]\n"
+ "ld1b { z29.b }, p2/Z, [x20, x13]\n"
+ "ld1b { z28.b }, p2/Z, [x24, x13]\n"
+ "ld1b { z27.b }, p2/Z, [x27, x13]\n"
+ "ld1b { z26.b }, p2/Z, [x22, x13]\n"
+ "ld1b { z25.b }, p2/Z, [x25, x13]\n"
+ "ld1b { z24.b }, p2/Z, [x21, x13]\n"
+ "ld1b { z23.b }, p2/Z, [x19, x13]\n"
+ "incw x13\n"
+ "whilelt p2.b, x13, x14\n"
"b.none 2f\n"
"1:" // Vector: Loop
"movprfx z22, z31\n umax z22.b, p1/M, z22.b, z30.b\n"
"movprfx z21, z30\n umax z21.b, p1/M, z21.b, z29.b\n"
- "ld1b { z31.b }, p2/Z, [x27, x14]\n"
- "ld1b { z30.b }, p2/Z, [x24, x14]\n"
+ "ld1b { z31.b }, p2/Z, [x26, x13]\n"
+ "ld1b { z30.b }, p2/Z, [x23, x13]\n"
"movprfx z20, z28\n umax z20.b, p1/M, z20.b, z27.b\n"
- "movprfx z19, z26\n umax z19.b, p1/M, z19.b, z25.b\n"
- "ld1b { z29.b }, p2/Z, [x21, x14]\n"
- "ld1b { z27.b }, p2/Z, [x28, x14]\n"
- "movprfx z17, z28\n umax z17.b, p1/M, z17.b, z24.b\n"
- "movprfx z18, z25\n umax z18.b, p1/M, z18.b, z23.b\n"
- "ld1b { z28.b }, p2/Z, [x25, x14]\n"
- "ld1b { z26.b }, p2/Z, [x26, x14]\n"
- "ld1b { z25.b }, p2/Z, [x23, x14]\n"
- "ld1b { z24.b }, p2/Z, [x22, x14]\n"
- "whilelt p0.b, x11, x15\n"
+ "movprfx z17, z26\n umax z17.b, p1/M, z17.b, z25.b\n"
+ "ld1b { z29.b }, p2/Z, [x20, x13]\n"
+ "ld1b { z27.b }, p2/Z, [x27, x13]\n"
+ "movprfx z19, z24\n umax z19.b, p1/M, z19.b, z28.b\n"
+ "movprfx z18, z26\n umax z18.b, p1/M, z18.b, z23.b\n"
+ "ld1b { z28.b }, p2/Z, [x24, x13]\n"
+ "ld1b { z26.b }, p2/Z, [x22, x13]\n"
+ "ld1b { z25.b }, p2/Z, [x25, x13]\n"
+ "ld1b { z24.b }, p2/Z, [x21, x13]\n"
+ "whilelt p0.b, x10, x14\n"
"movprfx z16, z22\n umax z16.b, p1/M, z16.b, z20.b\n"
- "ld1b { z23.b }, p2/Z, [x20, x14]\n"
- "incw x14\n"
- "whilelt p2.b, x14, x15\n"
- "st1b { z16.b }, p0, [x13, x11]\n"
- "movprfx z16, z19\n umax z16.b, p1/M, z16.b, z22.b\n"
- "umax z17.b, p1/M, z17.b, z21.b\n"
- "st1b { z16.b }, p0, [x12, x11]\n"
- "movprfx z16, z18\n umax z16.b, p1/M, z16.b, z21.b\n"
- "st1b { z17.b }, p0, [x10, x11]\n"
- "st1b { z16.b }, p0, [x9, x11]\n"
- "incw x11\n"
+ "ld1b { z23.b }, p2/Z, [x19, x13]\n"
+ "incw x13\n"
+ "whilelt p2.b, x13, x14\n"
+ "st1b { z16.b }, p0, [x12, x10]\n"
+ "movprfx z16, z17\n umax z16.b, p1/M, z16.b, z22.b\n"
+ "movprfx z17, z21\n umax z17.b, p1/M, z17.b, z19.b\n"
+ "st1b { z16.b }, p0, [x11, x10]\n"
+ "movprfx z16, z21\n umax z16.b, p1/M, z16.b, z18.b\n"
+ "st1b { z17.b }, p0, [x9, x10]\n"
+ "st1b { z16.b }, p0, [x28, x10]\n"
+ "incw x10\n"
"b.any 1b\n"
"2:" // Vector: Tail
"movprfx z22, z31\n umax z22.b, p1/M, z22.b, z30.b\n"
"movprfx z21, z30\n umax z21.b, p1/M, z21.b, z29.b\n"
"movprfx z20, z28\n umax z20.b, p1/M, z20.b, z27.b\n"
- "movprfx z19, z26\n umax z19.b, p1/M, z19.b, z25.b\n"
- "movprfx z17, z28\n umax z17.b, p1/M, z17.b, z24.b\n"
- "movprfx z18, z25\n umax z18.b, p1/M, z18.b, z23.b\n"
- "whilelt p0.b, x11, x15\n"
+ "movprfx z17, z26\n umax z17.b, p1/M, z17.b, z25.b\n"
+ "movprfx z19, z24\n umax z19.b, p1/M, z19.b, z28.b\n"
+ "movprfx z18, z26\n umax z18.b, p1/M, z18.b, z23.b\n"
+ "whilelt p0.b, x10, x14\n"
"movprfx z16, z22\n umax z16.b, p1/M, z16.b, z20.b\n"
- "st1b { z16.b }, p0, [x13, x11]\n"
- "movprfx z16, z19\n umax z16.b, p1/M, z16.b, z22.b\n"
- "umax z17.b, p1/M, z17.b, z21.b\n"
- "st1b { z16.b }, p0, [x12, x11]\n"
- "movprfx z16, z18\n umax z16.b, p1/M, z16.b, z21.b\n"
- "st1b { z17.b }, p0, [x10, x11]\n"
- "st1b { z16.b }, p0, [x9, x11]\n"
+ "st1b { z16.b }, p0, [x12, x10]\n"
+ "movprfx z16, z17\n umax z16.b, p1/M, z16.b, z22.b\n"
+ "movprfx z17, z21\n umax z17.b, p1/M, z17.b, z19.b\n"
+ "st1b { z16.b }, p0, [x11, x10]\n"
+ "movprfx z16, z21\n umax z16.b, p1/M, z16.b, z18.b\n"
+ "st1b { z17.b }, p0, [x9, x10]\n"
+ "st1b { z16.b }, p0, [x28, x10]\n"
:
: [args] "r" (&args), [offsetof_inptrs] "I" (offsetof(KernelArgs, inptrs)), [offsetof_n_channels] "I" (offsetof(KernelArgs, n_channels)), [offsetof_outptrs] "I" (offsetof(KernelArgs, outptrs))
- : "cc", "memory", "p0", "p1", "p2", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "x9", "x10", "x11", "x12", "x13", "x14", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8_nhwc_max_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8_nhwc_max_generic_depthfirst/generic.cpp
index 34aa5a3dd6..0db1ad17f2 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8_nhwc_max_generic_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8_nhwc_max_generic_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -40,82 +40,82 @@ void sve_u8_nhwc_max_generic_depthfirst_impl(
)
{
__asm__ __volatile__(
- "mov x9, #0x0\n"
- "cntb x28\n"
- "cntb x27, ALL, MUL #2\n"
- "cntb x26, ALL, MUL #3\n"
- "whilelt p4.b, x9, %x[n_channels]\n"
- "whilelt p3.b, x28, %x[n_channels]\n"
- "whilelt p2.b, x27, %x[n_channels]\n"
- "whilelt p1.b, x26, %x[n_channels]\n"
+ "mov x28, #0x0\n"
+ "cntb x27\n"
+ "cntb x26, ALL, MUL #2\n"
+ "cntb x25, ALL, MUL #3\n"
+ "whilelt p4.b, x28, %x[n_channels]\n"
+ "whilelt p3.b, x27, %x[n_channels]\n"
+ "whilelt p2.b, x26, %x[n_channels]\n"
+ "whilelt p1.b, x25, %x[n_channels]\n"
"ptrue p0.b\n"
"b.none 7f\n"
"1:" // 4-vectors of channels
- "lsr x25, %x[n_valid_cells], #0x2\n"
+ "lsr x24, %x[n_valid_cells], #0x2\n"
"mov z8.b, #0x0\n"
"mov z7.b, #0x0\n"
- "mov x20, %x[inptrs]\n"
+ "mov x19, %x[inptrs]\n"
"mov z6.b, #0x0\n"
"mov z5.b, #0x0\n"
- "cbz x25, 4f\n"
- "ldp x24, x23, [x20, #0x0]\n"
- "ldp x22, x21, [x20, #0x10]\n"
- "subs x25, x25, #0x1\n"
- "add x20, x20, #0x20\n"
- "ld1b { z4.b }, p4/Z, [x24, x9]\n"
- "ld1b { z3.b }, p4/Z, [x23, x9]\n"
- "ld1b { z2.b }, p4/Z, [x22, x9]\n"
- "ld1b { z1.b }, p4/Z, [x21, x9]\n"
- "ld1b { z0.b }, p3/Z, [x24, x28]\n"
- "ld1b { z31.b }, p3/Z, [x23, x28]\n"
- "ld1b { z22.b }, p3/Z, [x22, x28]\n"
- "ld1b { z30.b }, p3/Z, [x21, x28]\n"
- "ld1b { z29.b }, p2/Z, [x24, x27]\n"
- "ld1b { z28.b }, p2/Z, [x23, x27]\n"
- "ld1b { z21.b }, p2/Z, [x22, x27]\n"
- "ld1b { z27.b }, p2/Z, [x21, x27]\n"
- "ld1b { z26.b }, p1/Z, [x24, x26]\n"
- "ld1b { z25.b }, p1/Z, [x23, x26]\n"
- "ld1b { z20.b }, p1/Z, [x22, x26]\n"
- "ld1b { z24.b }, p1/Z, [x21, x26]\n"
+ "cbz x24, 4f\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "ldp x21, x20, [x19, #0x10]\n"
+ "subs x24, x24, #0x1\n"
+ "add x19, x19, #0x20\n"
+ "ld1b { z4.b }, p4/Z, [x23, x28]\n"
+ "ld1b { z3.b }, p4/Z, [x22, x28]\n"
+ "ld1b { z2.b }, p4/Z, [x21, x28]\n"
+ "ld1b { z1.b }, p4/Z, [x20, x28]\n"
+ "ld1b { z0.b }, p3/Z, [x23, x27]\n"
+ "ld1b { z31.b }, p3/Z, [x22, x27]\n"
+ "ld1b { z22.b }, p3/Z, [x21, x27]\n"
+ "ld1b { z30.b }, p3/Z, [x20, x27]\n"
+ "ld1b { z29.b }, p2/Z, [x23, x26]\n"
+ "ld1b { z28.b }, p2/Z, [x22, x26]\n"
+ "ld1b { z21.b }, p2/Z, [x21, x26]\n"
+ "ld1b { z27.b }, p2/Z, [x20, x26]\n"
+ "ld1b { z26.b }, p1/Z, [x23, x25]\n"
+ "ld1b { z25.b }, p1/Z, [x22, x25]\n"
+ "ld1b { z20.b }, p1/Z, [x21, x25]\n"
+ "ld1b { z24.b }, p1/Z, [x20, x25]\n"
"beq 3f\n"
"2:" // 4-vectors of channels: 4 inputs loop
"movprfx z19, z4\n umax z19.b, p0/M, z19.b, z3.b\n"
"movprfx z23, z2\n umax z23.b, p0/M, z23.b, z1.b\n"
- "ldp x24, x23, [x20, #0x0]\n"
- "ldp x22, x21, [x20, #0x10]\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "ldp x21, x20, [x19, #0x10]\n"
"movprfx z18, z0\n umax z18.b, p0/M, z18.b, z31.b\n"
"umax z22.b, p0/M, z22.b, z30.b\n"
- "ld1b { z4.b }, p4/Z, [x24, x9]\n"
- "ld1b { z3.b }, p4/Z, [x23, x9]\n"
+ "ld1b { z4.b }, p4/Z, [x23, x28]\n"
+ "ld1b { z3.b }, p4/Z, [x22, x28]\n"
"movprfx z17, z29\n umax z17.b, p0/M, z17.b, z28.b\n"
"umax z21.b, p0/M, z21.b, z27.b\n"
- "ld1b { z2.b }, p4/Z, [x22, x9]\n"
- "ld1b { z1.b }, p4/Z, [x21, x9]\n"
+ "ld1b { z2.b }, p4/Z, [x21, x28]\n"
+ "ld1b { z1.b }, p4/Z, [x20, x28]\n"
"movprfx z16, z26\n umax z16.b, p0/M, z16.b, z25.b\n"
"umax z20.b, p0/M, z20.b, z24.b\n"
- "ld1b { z0.b }, p3/Z, [x24, x28]\n"
- "ld1b { z31.b }, p3/Z, [x23, x28]\n"
+ "ld1b { z0.b }, p3/Z, [x23, x27]\n"
+ "ld1b { z31.b }, p3/Z, [x22, x27]\n"
"umax z19.b, p0/M, z19.b, z23.b\n"
"umax z18.b, p0/M, z18.b, z22.b\n"
- "ld1b { z22.b }, p3/Z, [x22, x28]\n"
- "ld1b { z30.b }, p3/Z, [x21, x28]\n"
+ "ld1b { z22.b }, p3/Z, [x21, x27]\n"
+ "ld1b { z30.b }, p3/Z, [x20, x27]\n"
"umax z17.b, p0/M, z17.b, z21.b\n"
"umax z16.b, p0/M, z16.b, z20.b\n"
- "ld1b { z29.b }, p2/Z, [x24, x27]\n"
- "ld1b { z28.b }, p2/Z, [x23, x27]\n"
- "subs x25, x25, #0x1\n"
+ "ld1b { z29.b }, p2/Z, [x23, x26]\n"
+ "ld1b { z28.b }, p2/Z, [x22, x26]\n"
+ "subs x24, x24, #0x1\n"
"umax z8.b, p0/M, z8.b, z19.b\n"
- "ld1b { z21.b }, p2/Z, [x22, x27]\n"
- "ld1b { z27.b }, p2/Z, [x21, x27]\n"
+ "ld1b { z21.b }, p2/Z, [x21, x26]\n"
+ "ld1b { z27.b }, p2/Z, [x20, x26]\n"
"umax z7.b, p0/M, z7.b, z18.b\n"
"umax z6.b, p0/M, z6.b, z17.b\n"
- "ld1b { z26.b }, p1/Z, [x24, x26]\n"
- "ld1b { z25.b }, p1/Z, [x23, x26]\n"
+ "ld1b { z26.b }, p1/Z, [x23, x25]\n"
+ "ld1b { z25.b }, p1/Z, [x22, x25]\n"
"umax z5.b, p0/M, z5.b, z16.b\n"
- "add x20, x20, #0x20\n"
- "ld1b { z20.b }, p1/Z, [x22, x26]\n"
- "ld1b { z24.b }, p1/Z, [x21, x26]\n"
+ "add x19, x19, #0x20\n"
+ "ld1b { z20.b }, p1/Z, [x21, x25]\n"
+ "ld1b { z24.b }, p1/Z, [x20, x25]\n"
"bgt 2b\n"
"3:" // 4-vectors of channels: 4 inputs tail
"movprfx z19, z4\n umax z19.b, p0/M, z19.b, z3.b\n"
@@ -135,61 +135,61 @@ void sve_u8_nhwc_max_generic_depthfirst_impl(
"umax z6.b, p0/M, z6.b, z17.b\n"
"umax z5.b, p0/M, z5.b, z16.b\n"
"4:" // 4-vectors of channels: After loop
- "ands x21, %x[n_valid_cells], #0x3\n"
+ "ands x20, %x[n_valid_cells], #0x3\n"
"beq 6f\n"
"5:" // 4-vectors of channels: Single input loop
- "ldr x24, [x20], #0x8\n"
- "ld1b { z4.b }, p4/Z, [x24, x9]\n"
- "subs x21, x21, #0x1\n"
+ "ldr x23, [x19], #0x8\n"
+ "ld1b { z4.b }, p4/Z, [x23, x28]\n"
+ "subs x20, x20, #0x1\n"
"umax z8.b, p0/M, z8.b, z4.b\n"
- "ld1b { z0.b }, p3/Z, [x24, x28]\n"
- "ld1b { z29.b }, p2/Z, [x24, x27]\n"
+ "ld1b { z0.b }, p3/Z, [x23, x27]\n"
+ "ld1b { z29.b }, p2/Z, [x23, x26]\n"
"umax z7.b, p0/M, z7.b, z0.b\n"
"umax z6.b, p0/M, z6.b, z29.b\n"
- "ld1b { z26.b }, p1/Z, [x24, x26]\n"
+ "ld1b { z26.b }, p1/Z, [x23, x25]\n"
"umax z5.b, p0/M, z5.b, z26.b\n"
"bgt 5b\n"
"6:" // 4-vectors of channels: Single input loop: End
- "st1b { z8.b }, p4, [%x[outptr], x9]\n"
- "incb x9, ALL, MUL #4\n"
- "st1b { z7.b }, p3, [%x[outptr], x28]\n"
+ "st1b { z8.b }, p4, [%x[outptr], x28]\n"
"incb x28, ALL, MUL #4\n"
- "st1b { z6.b }, p2, [%x[outptr], x27]\n"
+ "st1b { z7.b }, p3, [%x[outptr], x27]\n"
"incb x27, ALL, MUL #4\n"
- "st1b { z5.b }, p1, [%x[outptr], x26]\n"
+ "st1b { z6.b }, p2, [%x[outptr], x26]\n"
"incb x26, ALL, MUL #4\n"
- "whilelt p1.b, x26, %x[n_channels]\n"
+ "st1b { z5.b }, p1, [%x[outptr], x25]\n"
+ "incb x25, ALL, MUL #4\n"
+ "whilelt p1.b, x25, %x[n_channels]\n"
"b.any 1b\n"
"7:" // Single vector of channels
- "whilelt p4.b, x9, %x[n_channels]\n"
+ "whilelt p4.b, x28, %x[n_channels]\n"
"b.none 14f\n"
"8:" // Single vector of channels: Loop
- "lsr x25, %x[n_valid_cells], #0x2\n"
+ "lsr x24, %x[n_valid_cells], #0x2\n"
"mov z8.b, #0x0\n"
- "mov x20, %x[inptrs]\n"
- "cbz x25, 11f\n"
- "ldp x24, x23, [x20, #0x0]\n"
- "ldp x22, x21, [x20, #0x10]\n"
- "subs x25, x25, #0x1\n"
- "add x20, x20, #0x20\n"
- "ld1b { z4.b }, p4/Z, [x24, x9]\n"
- "ld1b { z3.b }, p4/Z, [x23, x9]\n"
- "ld1b { z2.b }, p4/Z, [x22, x9]\n"
- "ld1b { z1.b }, p4/Z, [x21, x9]\n"
+ "mov x19, %x[inptrs]\n"
+ "cbz x24, 11f\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "ldp x21, x20, [x19, #0x10]\n"
+ "subs x24, x24, #0x1\n"
+ "add x19, x19, #0x20\n"
+ "ld1b { z4.b }, p4/Z, [x23, x28]\n"
+ "ld1b { z3.b }, p4/Z, [x22, x28]\n"
+ "ld1b { z2.b }, p4/Z, [x21, x28]\n"
+ "ld1b { z1.b }, p4/Z, [x20, x28]\n"
"beq 10f\n"
"9:" // Single vector of channels: Loop: 4 inputs loop
"movprfx z19, z4\n umax z19.b, p0/M, z19.b, z3.b\n"
"movprfx z23, z2\n umax z23.b, p0/M, z23.b, z1.b\n"
- "ldp x24, x23, [x20, #0x0]\n"
- "ldp x22, x21, [x20, #0x10]\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "ldp x21, x20, [x19, #0x10]\n"
"umax z19.b, p0/M, z19.b, z23.b\n"
- "subs x25, x25, #0x1\n"
- "ld1b { z4.b }, p4/Z, [x24, x9]\n"
- "ld1b { z3.b }, p4/Z, [x23, x9]\n"
+ "subs x24, x24, #0x1\n"
+ "ld1b { z4.b }, p4/Z, [x23, x28]\n"
+ "ld1b { z3.b }, p4/Z, [x22, x28]\n"
"umax z8.b, p0/M, z8.b, z19.b\n"
- "add x20, x20, #0x20\n"
- "ld1b { z2.b }, p4/Z, [x22, x9]\n"
- "ld1b { z1.b }, p4/Z, [x21, x9]\n"
+ "add x19, x19, #0x20\n"
+ "ld1b { z2.b }, p4/Z, [x21, x28]\n"
+ "ld1b { z1.b }, p4/Z, [x20, x28]\n"
"bgt 9b\n"
"10:" // Single vector of channels: Loop: 4 inputs tail
"movprfx z19, z4\n umax z19.b, p0/M, z19.b, z3.b\n"
@@ -197,23 +197,23 @@ void sve_u8_nhwc_max_generic_depthfirst_impl(
"umax z19.b, p0/M, z19.b, z23.b\n"
"umax z8.b, p0/M, z8.b, z19.b\n"
"11:" // Single vector of channels: Loop: After loop
- "ands x21, %x[n_valid_cells], #0x3\n"
+ "ands x20, %x[n_valid_cells], #0x3\n"
"beq 13f\n"
"12:" // Single vector of channels: Loop: Single input loop
- "ldr x24, [x20], #0x8\n"
- "ld1b { z4.b }, p4/Z, [x24, x9]\n"
- "subs x21, x21, #0x1\n"
+ "ldr x23, [x19], #0x8\n"
+ "ld1b { z4.b }, p4/Z, [x23, x28]\n"
+ "subs x20, x20, #0x1\n"
"umax z8.b, p0/M, z8.b, z4.b\n"
"bgt 12b\n"
"13:" // Single vector of channels: Loop: Single input loop: End
- "st1b { z8.b }, p4, [%x[outptr], x9]\n"
- "incb x9\n"
- "whilelt p4.b, x9, %x[n_channels]\n"
+ "st1b { z8.b }, p4, [%x[outptr], x28]\n"
+ "incb x28\n"
+ "whilelt p4.b, x28, %x[n_channels]\n"
"b.any 8b\n"
"14:" // End
:
: [inptrs] "r" (inptrs), [n_channels] "r" (n_channels), [n_valid_cells] "r" (n_valid_cells), [outptr] "r" (outptr)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x9", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8q_nhwc_avg_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8q_nhwc_avg_generic_depthfirst/generic.cpp
index 36ac381004..903ada3462 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8q_nhwc_avg_generic_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8q_nhwc_avg_generic_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -119,24 +119,24 @@ void sve_u8q_nhwc_avg_generic_depthfirst_impl(
);
__asm__ __volatile__(
- "mov x27, #0x0\n"
- "cntb x26\n"
- "cntb x25, ALL, MUL #2\n"
- "cntb x24, ALL, MUL #3\n"
- "whilelt p4.b, x27, %x[n_channels]\n"
- "whilelt p3.b, x26, %x[n_channels]\n"
- "whilelt p2.b, x25, %x[n_channels]\n"
- "whilelt p1.b, x24, %x[n_channels]\n"
+ "mov x26, #0x0\n"
+ "cntb x25\n"
+ "cntb x24, ALL, MUL #2\n"
+ "cntb x23, ALL, MUL #3\n"
+ "whilelt p4.b, x26, %x[n_channels]\n"
+ "whilelt p3.b, x25, %x[n_channels]\n"
+ "whilelt p2.b, x24, %x[n_channels]\n"
+ "whilelt p1.b, x23, %x[n_channels]\n"
"ptrue p0.b\n"
"b.none 7f\n"
"1:" // 4-vectors of channels
"ld1rw { z15.s }, p0/Z, [%x[accumulator_init]]\n"
- "lsr x23, %x[n_valid_cells], #0x1\n"
+ "lsr x22, %x[n_valid_cells], #0x1\n"
"mov z14.d, z15.d\n"
"mov z13.d, z15.d\n"
"mov z12.d, z15.d\n"
"mov z11.d, z15.d\n"
- "mov x20, %x[inptrs]\n"
+ "mov x19, %x[inptrs]\n"
"mov z10.d, z15.d\n"
"mov z9.d, z15.d\n"
"mov z8.d, z15.d\n"
@@ -148,43 +148,43 @@ void sve_u8q_nhwc_avg_generic_depthfirst_impl(
"mov z2.d, z15.d\n"
"mov z1.d, z15.d\n"
"mov z0.d, z15.d\n"
- "cbz x23, 4f\n"
- "ldp x22, x21, [x20, #0x0]\n"
- "subs x23, x23, #0x1\n"
- "add x20, x20, #0x10\n"
- "ld1b { z31.b }, p4/Z, [x22, x27]\n"
- "ld1b { z30.b }, p4/Z, [x21, x27]\n"
- "ld1b { z29.b }, p3/Z, [x22, x26]\n"
- "ld1b { z28.b }, p3/Z, [x21, x26]\n"
- "ld1b { z27.b }, p2/Z, [x22, x25]\n"
- "ld1b { z26.b }, p2/Z, [x21, x25]\n"
- "ld1b { z25.b }, p1/Z, [x22, x24]\n"
- "ld1b { z24.b }, p1/Z, [x21, x24]\n"
+ "cbz x22, 4f\n"
+ "ldp x21, x20, [x19, #0x0]\n"
+ "subs x22, x22, #0x1\n"
+ "add x19, x19, #0x10\n"
+ "ld1b { z31.b }, p4/Z, [x21, x26]\n"
+ "ld1b { z30.b }, p4/Z, [x20, x26]\n"
+ "ld1b { z29.b }, p3/Z, [x21, x25]\n"
+ "ld1b { z28.b }, p3/Z, [x20, x25]\n"
+ "ld1b { z27.b }, p2/Z, [x21, x24]\n"
+ "ld1b { z26.b }, p2/Z, [x20, x24]\n"
+ "ld1b { z25.b }, p1/Z, [x21, x23]\n"
+ "ld1b { z24.b }, p1/Z, [x20, x23]\n"
"beq 3f\n"
"2:" // 4-vectors of channels: 2 inputs loop
".inst 0x455e0bf7 // uaddlb z23.h, z31.b, z30.b\n"
".inst 0x455e0ff6 // uaddlt z22.h, z31.b, z30.b\n"
- "ldp x22, x21, [x20, #0x0]\n"
- "subs x23, x23, #0x1\n"
+ "ldp x21, x20, [x19, #0x0]\n"
+ "subs x22, x22, #0x1\n"
".inst 0x455c0bb5 // uaddlb z21.h, z29.b, z28.b\n"
".inst 0x455c0fb4 // uaddlt z20.h, z29.b, z28.b\n"
- "add x20, x20, #0x10\n"
- "ld1b { z31.b }, p4/Z, [x22, x27]\n"
+ "add x19, x19, #0x10\n"
+ "ld1b { z31.b }, p4/Z, [x21, x26]\n"
".inst 0x455a0b73 // uaddlb z19.h, z27.b, z26.b\n"
".inst 0x455a0f72 // uaddlt z18.h, z27.b, z26.b\n"
- "ld1b { z30.b }, p4/Z, [x21, x27]\n"
- "ld1b { z29.b }, p3/Z, [x22, x26]\n"
+ "ld1b { z30.b }, p4/Z, [x20, x26]\n"
+ "ld1b { z29.b }, p3/Z, [x21, x25]\n"
".inst 0x45580b31 // uaddlb z17.h, z25.b, z24.b\n"
".inst 0x45580f30 // uaddlt z16.h, z25.b, z24.b\n"
- "ld1b { z28.b }, p3/Z, [x21, x26]\n"
- "ld1b { z27.b }, p2/Z, [x22, x25]\n"
+ "ld1b { z28.b }, p3/Z, [x20, x25]\n"
+ "ld1b { z27.b }, p2/Z, [x21, x24]\n"
".inst 0x459749ef // uaddwb z15.s, z15.s, z23.h\n"
".inst 0x45974dce // uaddwt z14.s, z14.s, z23.h\n"
- "ld1b { z26.b }, p2/Z, [x21, x25]\n"
- "ld1b { z25.b }, p1/Z, [x22, x24]\n"
+ "ld1b { z26.b }, p2/Z, [x20, x24]\n"
+ "ld1b { z25.b }, p1/Z, [x21, x23]\n"
".inst 0x459649ad // uaddwb z13.s, z13.s, z22.h\n"
".inst 0x45964d8c // uaddwt z12.s, z12.s, z22.h\n"
- "ld1b { z24.b }, p1/Z, [x21, x24]\n"
+ "ld1b { z24.b }, p1/Z, [x20, x23]\n"
".inst 0x4595496b // uaddwb z11.s, z11.s, z21.h\n"
".inst 0x45954d4a // uaddwt z10.s, z10.s, z21.h\n"
".inst 0x45944929 // uaddwb z9.s, z9.s, z20.h\n"
@@ -224,21 +224,21 @@ void sve_u8q_nhwc_avg_generic_depthfirst_impl(
".inst 0x45904821 // uaddwb z1.s, z1.s, z16.h\n"
".inst 0x45904c00 // uaddwt z0.s, z0.s, z16.h\n"
"4:" // 4-vectors of channels: After loop
- "ands x21, %x[n_valid_cells], #0x1\n"
+ "ands x20, %x[n_valid_cells], #0x1\n"
"beq 6f\n"
"5:" // 4-vectors of channels: Single input loop
- "ldr x22, [x20], #0x8\n"
- "ld1b { z31.b }, p4/Z, [x22, x27]\n"
+ "ldr x21, [x19], #0x8\n"
+ "ld1b { z31.b }, p4/Z, [x21, x26]\n"
".inst 0x4508abf7 // ushllb z23.h, z31.b, #0x0\n"
".inst 0x4508aff6 // ushllt z22.h, z31.b, #0x0\n"
- "ld1b { z29.b }, p3/Z, [x22, x26]\n"
- "ld1b { z27.b }, p2/Z, [x22, x25]\n"
+ "ld1b { z29.b }, p3/Z, [x21, x25]\n"
+ "ld1b { z27.b }, p2/Z, [x21, x24]\n"
".inst 0x4508abb5 // ushllb z21.h, z29.b, #0x0\n"
".inst 0x4508afb4 // ushllt z20.h, z29.b, #0x0\n"
- "ld1b { z25.b }, p1/Z, [x22, x24]\n"
+ "ld1b { z25.b }, p1/Z, [x21, x23]\n"
".inst 0x4508ab73 // ushllb z19.h, z27.b, #0x0\n"
".inst 0x4508af72 // ushllt z18.h, z27.b, #0x0\n"
- "subs x21, x21, #0x1\n"
+ "subs x20, x20, #0x1\n"
".inst 0x4508ab31 // ushllb z17.h, z25.b, #0x0\n"
".inst 0x4508af30 // ushllt z16.h, z25.b, #0x0\n"
".inst 0x459749ef // uaddwb z15.s, z15.s, z23.h\n"
@@ -293,12 +293,12 @@ void sve_u8q_nhwc_avg_generic_depthfirst_impl(
".inst 0x04b07463 // sqrdmulh z3.s, z3.s, z16.s\n"
".inst 0x04b07442 // sqrdmulh z2.s, z2.s, z16.s\n"
".inst 0x04b07421 // sqrdmulh z1.s, z1.s, z16.s\n"
- "add x20, %x[quant_params], %[offsetof_qp_output_offset]\n"
+ "add x19, %x[quant_params], %[offsetof_qp_output_offset]\n"
".inst 0x4482822f // srshl z15.s, p0/M, z15.s, z17.s\n"
".inst 0x04b07400 // sqrdmulh z0.s, z0.s, z16.s\n"
".inst 0x4482822e // srshl z14.s, p0/M, z14.s, z17.s\n"
".inst 0x4482822d // srshl z13.s, p0/M, z13.s, z17.s\n"
- "ld1rw { z16.s }, p0/Z, [x20]\n"
+ "ld1rw { z16.s }, p0/Z, [x19]\n"
".inst 0x4482822c // srshl z12.s, p0/M, z12.s, z17.s\n"
".inst 0x4482822b // srshl z11.s, p0/M, z11.s, z17.s\n"
"add z15.s, z15.s, z16.s\n"
@@ -356,7 +356,7 @@ void sve_u8q_nhwc_avg_generic_depthfirst_impl(
"smin z11.s, p0/M, z11.s, z18.s\n"
"smin z10.s, p0/M, z10.s, z18.s\n"
"trn1 z17.h, z11.h, z10.h\n"
- "st1b { z16.b }, p4, [%x[outptr], x27]\n"
+ "st1b { z16.b }, p4, [%x[outptr], x26]\n"
"smin z9.s, p0/M, z9.s, z18.s\n"
"smin z8.s, p0/M, z8.s, z18.s\n"
"trn1 z16.h, z9.h, z8.h\n"
@@ -364,7 +364,7 @@ void sve_u8q_nhwc_avg_generic_depthfirst_impl(
"smin z7.s, p0/M, z7.s, z18.s\n"
"smin z6.s, p0/M, z6.s, z18.s\n"
"trn1 z17.h, z7.h, z6.h\n"
- "st1b { z16.b }, p3, [%x[outptr], x26]\n"
+ "st1b { z16.b }, p3, [%x[outptr], x25]\n"
"smin z5.s, p0/M, z5.s, z18.s\n"
"smin z4.s, p0/M, z4.s, z18.s\n"
"trn1 z16.h, z5.h, z4.h\n"
@@ -372,47 +372,47 @@ void sve_u8q_nhwc_avg_generic_depthfirst_impl(
"smin z3.s, p0/M, z3.s, z18.s\n"
"smin z2.s, p0/M, z2.s, z18.s\n"
"trn1 z17.h, z3.h, z2.h\n"
- "st1b { z16.b }, p2, [%x[outptr], x25]\n"
+ "st1b { z16.b }, p2, [%x[outptr], x24]\n"
"smin z1.s, p0/M, z1.s, z18.s\n"
"smin z0.s, p0/M, z0.s, z18.s\n"
"trn1 z16.h, z1.h, z0.h\n"
"trn1 z16.b, z17.b, z16.b\n"
- "st1b { z16.b }, p1, [%x[outptr], x24]\n"
- "incb x24, ALL, MUL #4\n"
- "whilelt p1.b, x24, %x[n_channels]\n"
- "incb x27, ALL, MUL #4\n"
+ "st1b { z16.b }, p1, [%x[outptr], x23]\n"
+ "incb x23, ALL, MUL #4\n"
+ "whilelt p1.b, x23, %x[n_channels]\n"
"incb x26, ALL, MUL #4\n"
"incb x25, ALL, MUL #4\n"
+ "incb x24, ALL, MUL #4\n"
"b.any 1b\n"
"7:" // Single vector of channels
- "whilelt p4.b, x27, %x[n_channels]\n"
+ "whilelt p4.b, x26, %x[n_channels]\n"
"b.none 14f\n"
"8:" // Single vector of channels: Loop
"ld1rw { z15.s }, p0/Z, [%x[accumulator_init]]\n"
- "lsr x23, %x[n_valid_cells], #0x1\n"
+ "lsr x22, %x[n_valid_cells], #0x1\n"
"mov z14.d, z15.d\n"
"mov z13.d, z15.d\n"
"mov z12.d, z15.d\n"
- "mov x20, %x[inptrs]\n"
- "cbz x23, 11f\n"
- "ldp x22, x21, [x20, #0x0]\n"
- "subs x23, x23, #0x1\n"
- "add x20, x20, #0x10\n"
- "ld1b { z31.b }, p4/Z, [x22, x27]\n"
- "ld1b { z30.b }, p4/Z, [x21, x27]\n"
+ "mov x19, %x[inptrs]\n"
+ "cbz x22, 11f\n"
+ "ldp x21, x20, [x19, #0x0]\n"
+ "subs x22, x22, #0x1\n"
+ "add x19, x19, #0x10\n"
+ "ld1b { z31.b }, p4/Z, [x21, x26]\n"
+ "ld1b { z30.b }, p4/Z, [x20, x26]\n"
"beq 10f\n"
"9:" // Single vector of channels: Loop: 2 inputs loop
".inst 0x455e0bf7 // uaddlb z23.h, z31.b, z30.b\n"
".inst 0x455e0ff6 // uaddlt z22.h, z31.b, z30.b\n"
- "ldp x22, x21, [x20, #0x0]\n"
- "subs x23, x23, #0x1\n"
+ "ldp x21, x20, [x19, #0x0]\n"
+ "subs x22, x22, #0x1\n"
".inst 0x459749ef // uaddwb z15.s, z15.s, z23.h\n"
".inst 0x45974dce // uaddwt z14.s, z14.s, z23.h\n"
- "add x20, x20, #0x10\n"
- "ld1b { z31.b }, p4/Z, [x22, x27]\n"
+ "add x19, x19, #0x10\n"
+ "ld1b { z31.b }, p4/Z, [x21, x26]\n"
".inst 0x459649ad // uaddwb z13.s, z13.s, z22.h\n"
".inst 0x45964d8c // uaddwt z12.s, z12.s, z22.h\n"
- "ld1b { z30.b }, p4/Z, [x21, x27]\n"
+ "ld1b { z30.b }, p4/Z, [x20, x26]\n"
"bgt 9b\n"
"10:" // Single vector of channels: Loop: 2 inputs tail
".inst 0x455e0bf7 // uaddlb z23.h, z31.b, z30.b\n"
@@ -422,14 +422,14 @@ void sve_u8q_nhwc_avg_generic_depthfirst_impl(
".inst 0x459649ad // uaddwb z13.s, z13.s, z22.h\n"
".inst 0x45964d8c // uaddwt z12.s, z12.s, z22.h\n"
"11:" // Single vector of channels: Loop: After loop
- "ands x21, %x[n_valid_cells], #0x1\n"
+ "ands x20, %x[n_valid_cells], #0x1\n"
"beq 13f\n"
"12:" // Single vector of channels: Loop: Single input loop
- "ldr x22, [x20], #0x8\n"
- "ld1b { z31.b }, p4/Z, [x22, x27]\n"
+ "ldr x21, [x19], #0x8\n"
+ "ld1b { z31.b }, p4/Z, [x21, x26]\n"
".inst 0x4508abf7 // ushllb z23.h, z31.b, #0x0\n"
".inst 0x4508aff6 // ushllt z22.h, z31.b, #0x0\n"
- "subs x21, x21, #0x1\n"
+ "subs x20, x20, #0x1\n"
".inst 0x459749ef // uaddwb z15.s, z15.s, z23.h\n"
".inst 0x45974dce // uaddwt z14.s, z14.s, z23.h\n"
".inst 0x459649ad // uaddwb z13.s, z13.s, z22.h\n"
@@ -446,12 +446,12 @@ void sve_u8q_nhwc_avg_generic_depthfirst_impl(
".inst 0x04b075ef // sqrdmulh z15.s, z15.s, z16.s\n"
".inst 0x04b075ce // sqrdmulh z14.s, z14.s, z16.s\n"
".inst 0x04b075ad // sqrdmulh z13.s, z13.s, z16.s\n"
- "add x20, %x[quant_params], %[offsetof_qp_output_offset]\n"
+ "add x19, %x[quant_params], %[offsetof_qp_output_offset]\n"
".inst 0x4482822f // srshl z15.s, p0/M, z15.s, z17.s\n"
".inst 0x04b0758c // sqrdmulh z12.s, z12.s, z16.s\n"
".inst 0x4482822e // srshl z14.s, p0/M, z14.s, z17.s\n"
".inst 0x4482822d // srshl z13.s, p0/M, z13.s, z17.s\n"
- "ld1rw { z16.s }, p0/Z, [x20]\n"
+ "ld1rw { z16.s }, p0/Z, [x19]\n"
".inst 0x4482822c // srshl z12.s, p0/M, z12.s, z17.s\n"
"add z15.s, z15.s, z16.s\n"
"add z14.s, z14.s, z16.s\n"
@@ -470,14 +470,14 @@ void sve_u8q_nhwc_avg_generic_depthfirst_impl(
"smin z12.s, p0/M, z12.s, z18.s\n"
"trn1 z16.h, z13.h, z12.h\n"
"trn1 z16.b, z17.b, z16.b\n"
- "st1b { z16.b }, p4, [%x[outptr], x27]\n"
- "incb x27\n"
- "whilelt p4.b, x27, %x[n_channels]\n"
+ "st1b { z16.b }, p4, [%x[outptr], x26]\n"
+ "incb x26\n"
+ "whilelt p4.b, x26, %x[n_channels]\n"
"b.any 8b\n"
"14:" // End
:
: [accumulator_init] "r" (&accumulator_init), [combined_rescale_value] "r" (&combined_rescale_value), [inptrs] "r" (inptrs), [left_shift] "r" (&left_shift), [n_channels] "r" (n_channels), [n_valid_cells] "r" (n_valid_cells), [offsetof_qp_output_offset] "I" (offsetof(Requantize32, output_offset)), [outptr] "r" (outptr), [quant_params] "r" (&qp), [right_shift] "r" (&right_shift)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8q_nhwc_max_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8q_nhwc_max_generic_depthfirst/generic.cpp
index a00cbc59d8..26d2152561 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8q_nhwc_max_generic_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8q_nhwc_max_generic_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -42,82 +42,82 @@ void sve_u8q_nhwc_max_generic_depthfirst_impl(
)
{
__asm__ __volatile__(
- "mov x9, #0x0\n"
- "cntb x28\n"
- "cntb x27, ALL, MUL #2\n"
- "cntb x26, ALL, MUL #3\n"
- "whilelt p4.b, x9, %x[n_channels]\n"
- "whilelt p3.b, x28, %x[n_channels]\n"
- "whilelt p2.b, x27, %x[n_channels]\n"
- "whilelt p1.b, x26, %x[n_channels]\n"
+ "mov x28, #0x0\n"
+ "cntb x27\n"
+ "cntb x26, ALL, MUL #2\n"
+ "cntb x25, ALL, MUL #3\n"
+ "whilelt p4.b, x28, %x[n_channels]\n"
+ "whilelt p3.b, x27, %x[n_channels]\n"
+ "whilelt p2.b, x26, %x[n_channels]\n"
+ "whilelt p1.b, x25, %x[n_channels]\n"
"ptrue p0.b\n"
"b.none 7f\n"
"1:" // 4-vectors of channels
- "lsr x25, %x[n_valid_cells], #0x2\n"
+ "lsr x24, %x[n_valid_cells], #0x2\n"
"mov z8.b, #0x0\n"
"mov z7.b, #0x0\n"
- "mov x20, %x[inptrs]\n"
+ "mov x19, %x[inptrs]\n"
"mov z6.b, #0x0\n"
"mov z5.b, #0x0\n"
- "cbz x25, 4f\n"
- "ldp x24, x23, [x20, #0x0]\n"
- "ldp x22, x21, [x20, #0x10]\n"
- "subs x25, x25, #0x1\n"
- "add x20, x20, #0x20\n"
- "ld1b { z4.b }, p4/Z, [x24, x9]\n"
- "ld1b { z3.b }, p4/Z, [x23, x9]\n"
- "ld1b { z2.b }, p4/Z, [x22, x9]\n"
- "ld1b { z1.b }, p4/Z, [x21, x9]\n"
- "ld1b { z0.b }, p3/Z, [x24, x28]\n"
- "ld1b { z31.b }, p3/Z, [x23, x28]\n"
- "ld1b { z22.b }, p3/Z, [x22, x28]\n"
- "ld1b { z30.b }, p3/Z, [x21, x28]\n"
- "ld1b { z29.b }, p2/Z, [x24, x27]\n"
- "ld1b { z28.b }, p2/Z, [x23, x27]\n"
- "ld1b { z21.b }, p2/Z, [x22, x27]\n"
- "ld1b { z27.b }, p2/Z, [x21, x27]\n"
- "ld1b { z26.b }, p1/Z, [x24, x26]\n"
- "ld1b { z25.b }, p1/Z, [x23, x26]\n"
- "ld1b { z20.b }, p1/Z, [x22, x26]\n"
- "ld1b { z24.b }, p1/Z, [x21, x26]\n"
+ "cbz x24, 4f\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "ldp x21, x20, [x19, #0x10]\n"
+ "subs x24, x24, #0x1\n"
+ "add x19, x19, #0x20\n"
+ "ld1b { z4.b }, p4/Z, [x23, x28]\n"
+ "ld1b { z3.b }, p4/Z, [x22, x28]\n"
+ "ld1b { z2.b }, p4/Z, [x21, x28]\n"
+ "ld1b { z1.b }, p4/Z, [x20, x28]\n"
+ "ld1b { z0.b }, p3/Z, [x23, x27]\n"
+ "ld1b { z31.b }, p3/Z, [x22, x27]\n"
+ "ld1b { z22.b }, p3/Z, [x21, x27]\n"
+ "ld1b { z30.b }, p3/Z, [x20, x27]\n"
+ "ld1b { z29.b }, p2/Z, [x23, x26]\n"
+ "ld1b { z28.b }, p2/Z, [x22, x26]\n"
+ "ld1b { z21.b }, p2/Z, [x21, x26]\n"
+ "ld1b { z27.b }, p2/Z, [x20, x26]\n"
+ "ld1b { z26.b }, p1/Z, [x23, x25]\n"
+ "ld1b { z25.b }, p1/Z, [x22, x25]\n"
+ "ld1b { z20.b }, p1/Z, [x21, x25]\n"
+ "ld1b { z24.b }, p1/Z, [x20, x25]\n"
"beq 3f\n"
"2:" // 4-vectors of channels: 4 inputs loop
"movprfx z19, z4\n umax z19.b, p0/M, z19.b, z3.b\n"
"movprfx z23, z2\n umax z23.b, p0/M, z23.b, z1.b\n"
- "ldp x24, x23, [x20, #0x0]\n"
- "ldp x22, x21, [x20, #0x10]\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "ldp x21, x20, [x19, #0x10]\n"
"movprfx z18, z0\n umax z18.b, p0/M, z18.b, z31.b\n"
"umax z22.b, p0/M, z22.b, z30.b\n"
- "ld1b { z4.b }, p4/Z, [x24, x9]\n"
- "ld1b { z3.b }, p4/Z, [x23, x9]\n"
+ "ld1b { z4.b }, p4/Z, [x23, x28]\n"
+ "ld1b { z3.b }, p4/Z, [x22, x28]\n"
"movprfx z17, z29\n umax z17.b, p0/M, z17.b, z28.b\n"
"umax z21.b, p0/M, z21.b, z27.b\n"
- "ld1b { z2.b }, p4/Z, [x22, x9]\n"
- "ld1b { z1.b }, p4/Z, [x21, x9]\n"
+ "ld1b { z2.b }, p4/Z, [x21, x28]\n"
+ "ld1b { z1.b }, p4/Z, [x20, x28]\n"
"movprfx z16, z26\n umax z16.b, p0/M, z16.b, z25.b\n"
"umax z20.b, p0/M, z20.b, z24.b\n"
- "ld1b { z0.b }, p3/Z, [x24, x28]\n"
- "ld1b { z31.b }, p3/Z, [x23, x28]\n"
+ "ld1b { z0.b }, p3/Z, [x23, x27]\n"
+ "ld1b { z31.b }, p3/Z, [x22, x27]\n"
"umax z19.b, p0/M, z19.b, z23.b\n"
"umax z18.b, p0/M, z18.b, z22.b\n"
- "ld1b { z22.b }, p3/Z, [x22, x28]\n"
- "ld1b { z30.b }, p3/Z, [x21, x28]\n"
+ "ld1b { z22.b }, p3/Z, [x21, x27]\n"
+ "ld1b { z30.b }, p3/Z, [x20, x27]\n"
"umax z17.b, p0/M, z17.b, z21.b\n"
"umax z16.b, p0/M, z16.b, z20.b\n"
- "ld1b { z29.b }, p2/Z, [x24, x27]\n"
- "ld1b { z28.b }, p2/Z, [x23, x27]\n"
- "subs x25, x25, #0x1\n"
+ "ld1b { z29.b }, p2/Z, [x23, x26]\n"
+ "ld1b { z28.b }, p2/Z, [x22, x26]\n"
+ "subs x24, x24, #0x1\n"
"umax z8.b, p0/M, z8.b, z19.b\n"
- "ld1b { z21.b }, p2/Z, [x22, x27]\n"
- "ld1b { z27.b }, p2/Z, [x21, x27]\n"
+ "ld1b { z21.b }, p2/Z, [x21, x26]\n"
+ "ld1b { z27.b }, p2/Z, [x20, x26]\n"
"umax z7.b, p0/M, z7.b, z18.b\n"
"umax z6.b, p0/M, z6.b, z17.b\n"
- "ld1b { z26.b }, p1/Z, [x24, x26]\n"
- "ld1b { z25.b }, p1/Z, [x23, x26]\n"
+ "ld1b { z26.b }, p1/Z, [x23, x25]\n"
+ "ld1b { z25.b }, p1/Z, [x22, x25]\n"
"umax z5.b, p0/M, z5.b, z16.b\n"
- "add x20, x20, #0x20\n"
- "ld1b { z20.b }, p1/Z, [x22, x26]\n"
- "ld1b { z24.b }, p1/Z, [x21, x26]\n"
+ "add x19, x19, #0x20\n"
+ "ld1b { z20.b }, p1/Z, [x21, x25]\n"
+ "ld1b { z24.b }, p1/Z, [x20, x25]\n"
"bgt 2b\n"
"3:" // 4-vectors of channels: 4 inputs tail
"movprfx z19, z4\n umax z19.b, p0/M, z19.b, z3.b\n"
@@ -137,37 +137,37 @@ void sve_u8q_nhwc_max_generic_depthfirst_impl(
"umax z6.b, p0/M, z6.b, z17.b\n"
"umax z5.b, p0/M, z5.b, z16.b\n"
"4:" // 4-vectors of channels: After loop
- "ands x21, %x[n_valid_cells], #0x3\n"
+ "ands x20, %x[n_valid_cells], #0x3\n"
"beq 6f\n"
"5:" // 4-vectors of channels: Single input loop
- "ldr x24, [x20], #0x8\n"
- "ld1b { z4.b }, p4/Z, [x24, x9]\n"
- "subs x21, x21, #0x1\n"
+ "ldr x23, [x19], #0x8\n"
+ "ld1b { z4.b }, p4/Z, [x23, x28]\n"
+ "subs x20, x20, #0x1\n"
"umax z8.b, p0/M, z8.b, z4.b\n"
- "ld1b { z0.b }, p3/Z, [x24, x28]\n"
- "ld1b { z29.b }, p2/Z, [x24, x27]\n"
+ "ld1b { z0.b }, p3/Z, [x23, x27]\n"
+ "ld1b { z29.b }, p2/Z, [x23, x26]\n"
"umax z7.b, p0/M, z7.b, z0.b\n"
"umax z6.b, p0/M, z6.b, z29.b\n"
- "ld1b { z26.b }, p1/Z, [x24, x26]\n"
+ "ld1b { z26.b }, p1/Z, [x23, x25]\n"
"umax z5.b, p0/M, z5.b, z26.b\n"
"bgt 5b\n"
"6:" // 4-vectors of channels: Single input loop: End
- "add x20, %x[quant_params], %[offsetof_qp_input_offset]\n"
- "ld1rw { z4.s }, p0/Z, [x20]\n"
+ "add x19, %x[quant_params], %[offsetof_qp_input_offset]\n"
+ "ld1rw { z4.s }, p0/Z, [x19]\n"
".inst 0x4508a918 // ushllb z24.h, z8.b, #0x0\n"
".inst 0x4508ad17 // ushllt z23.h, z8.b, #0x0\n"
".inst 0x4508a8f6 // ushllb z22.h, z7.b, #0x0\n"
".inst 0x4508acf5 // ushllt z21.h, z7.b, #0x0\n"
"neg z4.s, p0/M, z4.s\n"
- "add x20, %x[quant_params], %[offsetof_qp_per_layer_left_shift]\n"
+ "add x19, %x[quant_params], %[offsetof_qp_per_layer_left_shift]\n"
".inst 0x4508a8d4 // ushllb z20.h, z6.b, #0x0\n"
".inst 0x4508acd3 // ushllt z19.h, z6.b, #0x0\n"
- "ld1rw { z3.s }, p0/Z, [x20]\n"
- "add x20, %x[quant_params], %[offsetof_qp_per_layer_mul]\n"
+ "ld1rw { z3.s }, p0/Z, [x19]\n"
+ "add x19, %x[quant_params], %[offsetof_qp_per_layer_mul]\n"
".inst 0x4508a8b2 // ushllb z18.h, z5.b, #0x0\n"
".inst 0x4508acb1 // ushllt z17.h, z5.b, #0x0\n"
- "ld1rw { z16.s }, p0/Z, [x20]\n"
- "add x20, %x[quant_params], %[offsetof_qp_per_layer_right_shift]\n"
+ "ld1rw { z16.s }, p0/Z, [x19]\n"
+ "add x19, %x[quant_params], %[offsetof_qp_per_layer_right_shift]\n"
".inst 0x45984082 // saddwb z2.s, z4.s, z24.h\n"
".inst 0x45984481 // saddwt z1.s, z4.s, z24.h\n"
".inst 0x44828062 // srshl z2.s, p0/M, z2.s, z3.s\n"
@@ -200,10 +200,10 @@ void sve_u8q_nhwc_max_generic_depthfirst_impl(
".inst 0x45914493 // saddwt z19.s, z4.s, z17.h\n"
".inst 0x44828074 // srshl z20.s, p0/M, z20.s, z3.s\n"
".inst 0x44828073 // srshl z19.s, p0/M, z19.s, z3.s\n"
- "ld1rw { z17.s }, p0/Z, [x20]\n"
+ "ld1rw { z17.s }, p0/Z, [x19]\n"
".inst 0x04b07442 // sqrdmulh z2.s, z2.s, z16.s\n"
".inst 0x04b07421 // sqrdmulh z1.s, z1.s, z16.s\n"
- "add x20, %x[quant_params], %[offsetof_qp_output_offset]\n"
+ "add x19, %x[quant_params], %[offsetof_qp_output_offset]\n"
".inst 0x04b07400 // sqrdmulh z0.s, z0.s, z16.s\n"
".inst 0x04b077ff // sqrdmulh z31.s, z31.s, z16.s\n"
".inst 0x44828222 // srshl z2.s, p0/M, z2.s, z17.s\n"
@@ -234,7 +234,7 @@ void sve_u8q_nhwc_max_generic_depthfirst_impl(
".inst 0x44828235 // srshl z21.s, p0/M, z21.s, z17.s\n"
".inst 0x44828234 // srshl z20.s, p0/M, z20.s, z17.s\n"
".inst 0x44828233 // srshl z19.s, p0/M, z19.s, z17.s\n"
- "ld1rw { z16.s }, p0/Z, [x20]\n"
+ "ld1rw { z16.s }, p0/Z, [x19]\n"
"add z2.s, z2.s, z16.s\n"
"add z1.s, z1.s, z16.s\n"
"add z0.s, z0.s, z16.s\n"
@@ -279,7 +279,7 @@ void sve_u8q_nhwc_max_generic_depthfirst_impl(
"smin z30.s, p0/M, z30.s, z18.s\n"
"smin z29.s, p0/M, z29.s, z18.s\n"
"trn1 z17.h, z30.h, z29.h\n"
- "st1b { z16.b }, p4, [%x[outptr], x9]\n"
+ "st1b { z16.b }, p4, [%x[outptr], x28]\n"
"smin z28.s, p0/M, z28.s, z18.s\n"
"smin z27.s, p0/M, z27.s, z18.s\n"
"trn1 z16.h, z28.h, z27.h\n"
@@ -287,7 +287,7 @@ void sve_u8q_nhwc_max_generic_depthfirst_impl(
"smin z26.s, p0/M, z26.s, z18.s\n"
"smin z25.s, p0/M, z25.s, z18.s\n"
"trn1 z17.h, z26.h, z25.h\n"
- "st1b { z16.b }, p3, [%x[outptr], x28]\n"
+ "st1b { z16.b }, p3, [%x[outptr], x27]\n"
"smin z24.s, p0/M, z24.s, z18.s\n"
"smin z23.s, p0/M, z23.s, z18.s\n"
"trn1 z16.h, z24.h, z23.h\n"
@@ -295,48 +295,48 @@ void sve_u8q_nhwc_max_generic_depthfirst_impl(
"smin z22.s, p0/M, z22.s, z18.s\n"
"smin z21.s, p0/M, z21.s, z18.s\n"
"trn1 z17.h, z22.h, z21.h\n"
- "st1b { z16.b }, p2, [%x[outptr], x27]\n"
+ "st1b { z16.b }, p2, [%x[outptr], x26]\n"
"smin z20.s, p0/M, z20.s, z18.s\n"
"smin z19.s, p0/M, z19.s, z18.s\n"
"trn1 z16.h, z20.h, z19.h\n"
"trn1 z16.b, z17.b, z16.b\n"
- "st1b { z16.b }, p1, [%x[outptr], x26]\n"
- "incb x26, ALL, MUL #4\n"
- "whilelt p1.b, x26, %x[n_channels]\n"
- "incb x9, ALL, MUL #4\n"
+ "st1b { z16.b }, p1, [%x[outptr], x25]\n"
+ "incb x25, ALL, MUL #4\n"
+ "whilelt p1.b, x25, %x[n_channels]\n"
"incb x28, ALL, MUL #4\n"
"incb x27, ALL, MUL #4\n"
+ "incb x26, ALL, MUL #4\n"
"b.any 1b\n"
"7:" // Single vector of channels
- "whilelt p4.b, x9, %x[n_channels]\n"
+ "whilelt p4.b, x28, %x[n_channels]\n"
"b.none 14f\n"
"8:" // Single vector of channels: Loop
- "lsr x25, %x[n_valid_cells], #0x2\n"
+ "lsr x24, %x[n_valid_cells], #0x2\n"
"mov z8.b, #0x0\n"
- "mov x20, %x[inptrs]\n"
- "cbz x25, 11f\n"
- "ldp x24, x23, [x20, #0x0]\n"
- "ldp x22, x21, [x20, #0x10]\n"
- "subs x25, x25, #0x1\n"
- "add x20, x20, #0x20\n"
- "ld1b { z4.b }, p4/Z, [x24, x9]\n"
- "ld1b { z3.b }, p4/Z, [x23, x9]\n"
- "ld1b { z2.b }, p4/Z, [x22, x9]\n"
- "ld1b { z1.b }, p4/Z, [x21, x9]\n"
+ "mov x19, %x[inptrs]\n"
+ "cbz x24, 11f\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "ldp x21, x20, [x19, #0x10]\n"
+ "subs x24, x24, #0x1\n"
+ "add x19, x19, #0x20\n"
+ "ld1b { z4.b }, p4/Z, [x23, x28]\n"
+ "ld1b { z3.b }, p4/Z, [x22, x28]\n"
+ "ld1b { z2.b }, p4/Z, [x21, x28]\n"
+ "ld1b { z1.b }, p4/Z, [x20, x28]\n"
"beq 10f\n"
"9:" // Single vector of channels: Loop: 4 inputs loop
"movprfx z19, z4\n umax z19.b, p0/M, z19.b, z3.b\n"
"movprfx z23, z2\n umax z23.b, p0/M, z23.b, z1.b\n"
- "ldp x24, x23, [x20, #0x0]\n"
- "ldp x22, x21, [x20, #0x10]\n"
+ "ldp x23, x22, [x19, #0x0]\n"
+ "ldp x21, x20, [x19, #0x10]\n"
"umax z19.b, p0/M, z19.b, z23.b\n"
- "subs x25, x25, #0x1\n"
- "ld1b { z4.b }, p4/Z, [x24, x9]\n"
- "ld1b { z3.b }, p4/Z, [x23, x9]\n"
+ "subs x24, x24, #0x1\n"
+ "ld1b { z4.b }, p4/Z, [x23, x28]\n"
+ "ld1b { z3.b }, p4/Z, [x22, x28]\n"
"umax z8.b, p0/M, z8.b, z19.b\n"
- "add x20, x20, #0x20\n"
- "ld1b { z2.b }, p4/Z, [x22, x9]\n"
- "ld1b { z1.b }, p4/Z, [x21, x9]\n"
+ "add x19, x19, #0x20\n"
+ "ld1b { z2.b }, p4/Z, [x21, x28]\n"
+ "ld1b { z1.b }, p4/Z, [x20, x28]\n"
"bgt 9b\n"
"10:" // Single vector of channels: Loop: 4 inputs tail
"movprfx z19, z4\n umax z19.b, p0/M, z19.b, z3.b\n"
@@ -344,28 +344,28 @@ void sve_u8q_nhwc_max_generic_depthfirst_impl(
"umax z19.b, p0/M, z19.b, z23.b\n"
"umax z8.b, p0/M, z8.b, z19.b\n"
"11:" // Single vector of channels: Loop: After loop
- "ands x21, %x[n_valid_cells], #0x3\n"
+ "ands x20, %x[n_valid_cells], #0x3\n"
"beq 13f\n"
"12:" // Single vector of channels: Loop: Single input loop
- "ldr x24, [x20], #0x8\n"
- "ld1b { z4.b }, p4/Z, [x24, x9]\n"
- "subs x21, x21, #0x1\n"
+ "ldr x23, [x19], #0x8\n"
+ "ld1b { z4.b }, p4/Z, [x23, x28]\n"
+ "subs x20, x20, #0x1\n"
"umax z8.b, p0/M, z8.b, z4.b\n"
"bgt 12b\n"
"13:" // Single vector of channels: Loop: Single input loop: End
- "add x20, %x[quant_params], %[offsetof_qp_input_offset]\n"
- "ld1rw { z4.s }, p0/Z, [x20]\n"
+ "add x19, %x[quant_params], %[offsetof_qp_input_offset]\n"
+ "ld1rw { z4.s }, p0/Z, [x19]\n"
".inst 0x4508a918 // ushllb z24.h, z8.b, #0x0\n"
".inst 0x4508ad17 // ushllt z23.h, z8.b, #0x0\n"
"neg z4.s, p0/M, z4.s\n"
- "add x20, %x[quant_params], %[offsetof_qp_per_layer_left_shift]\n"
+ "add x19, %x[quant_params], %[offsetof_qp_per_layer_left_shift]\n"
".inst 0x45984082 // saddwb z2.s, z4.s, z24.h\n"
".inst 0x45984481 // saddwt z1.s, z4.s, z24.h\n"
".inst 0x45974080 // saddwb z0.s, z4.s, z23.h\n"
".inst 0x4597449f // saddwt z31.s, z4.s, z23.h\n"
- "ld1rw { z3.s }, p0/Z, [x20]\n"
- "add x20, %x[quant_params], %[offsetof_qp_per_layer_mul]\n"
- "ld1rw { z16.s }, p0/Z, [x20]\n"
+ "ld1rw { z3.s }, p0/Z, [x19]\n"
+ "add x19, %x[quant_params], %[offsetof_qp_per_layer_mul]\n"
+ "ld1rw { z16.s }, p0/Z, [x19]\n"
".inst 0x44828062 // srshl z2.s, p0/M, z2.s, z3.s\n"
".inst 0x44828061 // srshl z1.s, p0/M, z1.s, z3.s\n"
".inst 0x04b07442 // sqrdmulh z2.s, z2.s, z16.s\n"
@@ -373,13 +373,13 @@ void sve_u8q_nhwc_max_generic_depthfirst_impl(
".inst 0x4482807f // srshl z31.s, p0/M, z31.s, z3.s\n"
".inst 0x04b07421 // sqrdmulh z1.s, z1.s, z16.s\n"
".inst 0x04b07400 // sqrdmulh z0.s, z0.s, z16.s\n"
- "add x20, %x[quant_params], %[offsetof_qp_per_layer_right_shift]\n"
- "ld1rw { z17.s }, p0/Z, [x20]\n"
+ "add x19, %x[quant_params], %[offsetof_qp_per_layer_right_shift]\n"
+ "ld1rw { z17.s }, p0/Z, [x19]\n"
".inst 0x04b077ff // sqrdmulh z31.s, z31.s, z16.s\n"
- "add x20, %x[quant_params], %[offsetof_qp_output_offset]\n"
+ "add x19, %x[quant_params], %[offsetof_qp_output_offset]\n"
".inst 0x44828222 // srshl z2.s, p0/M, z2.s, z17.s\n"
".inst 0x44828221 // srshl z1.s, p0/M, z1.s, z17.s\n"
- "ld1rw { z16.s }, p0/Z, [x20]\n"
+ "ld1rw { z16.s }, p0/Z, [x19]\n"
"add z2.s, z2.s, z16.s\n"
".inst 0x44828220 // srshl z0.s, p0/M, z0.s, z17.s\n"
".inst 0x4482823f // srshl z31.s, p0/M, z31.s, z17.s\n"
@@ -399,14 +399,14 @@ void sve_u8q_nhwc_max_generic_depthfirst_impl(
"smin z31.s, p0/M, z31.s, z18.s\n"
"trn1 z16.h, z0.h, z31.h\n"
"trn1 z16.b, z17.b, z16.b\n"
- "st1b { z16.b }, p4, [%x[outptr], x9]\n"
- "incb x9\n"
- "whilelt p4.b, x9, %x[n_channels]\n"
+ "st1b { z16.b }, p4, [%x[outptr], x28]\n"
+ "incb x28\n"
+ "whilelt p4.b, x28, %x[n_channels]\n"
"b.any 8b\n"
"14:" // End
:
: [inptrs] "r" (inptrs), [n_channels] "r" (n_channels), [n_valid_cells] "r" (n_valid_cells), [offsetof_qp_input_offset] "I" (offsetof(Requantize32, input_offset)), [offsetof_qp_output_offset] "I" (offsetof(Requantize32, output_offset)), [offsetof_qp_per_layer_left_shift] "I" (offsetof(Requantize32, per_layer_left_shift)), [offsetof_qp_per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [offsetof_qp_per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [outptr] "r" (outptr), [quant_params] "r" (&qp)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x9", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/gemm_fp32.cpp b/src/core/NEON/kernels/arm_gemm/gemm_fp32.cpp
index ee567a2498..f9ffd18469 100644
--- a/src/core/NEON/kernels/arm_gemm/gemm_fp32.cpp
+++ b/src/core/NEON/kernels/arm_gemm/gemm_fp32.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2023 Arm Limited.
+ * Copyright (c) 2017-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -73,6 +73,7 @@
#include "kernels/sve_interleaved_bf16fp32_mmla_8x3VL.hpp"
#include "kernels/sve_interleaved_fp32_mla_8x3VL.hpp"
#include "kernels/sve_interleaved_fp32_mmla_8x3VL.hpp"
+#include "kernels/sve_smallK_hybrid_fp32_mla_8x1VL.hpp"
#endif // ARM_COMPUTE_ENABLE_SVE
namespace arm_gemm {
@@ -220,6 +221,13 @@ GemmImplementation<float, float>::with_estimate(
// SVE kernels
{
GemmMethod::GEMM_HYBRID,
+ "sve_smallK_hybrid_fp32_mla_8x1VL",
+ [](const GemmArgs &args) { return args._ci->has_sve() && args._Ksize <= 24 && !args._indirect_input; },
+ nullptr,
+ [](const GemmArgs &args) { return new GemmHybrid<cls_sve_smallK_hybrid_fp32_mla_8x1VL, float, float>(args); }
+},
+{
+ GemmMethod::GEMM_HYBRID,
"sve_hybrid_fp32_mla_8x1VL",
[](const GemmArgs &args) { return args._ci->has_sve(); },
[](const GemmArgs &args) { return (args._Nsize < 12); },
diff --git a/src/core/NEON/kernels/arm_gemm/gemm_int8.cpp b/src/core/NEON/kernels/arm_gemm/gemm_int8.cpp
index b0a01886d2..38a7c94ef0 100644
--- a/src/core/NEON/kernels/arm_gemm/gemm_int8.cpp
+++ b/src/core/NEON/kernels/arm_gemm/gemm_int8.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020, 2022-2023 Arm Limited.
+ * Copyright (c) 2017-2020, 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -50,6 +50,7 @@
#include "kernels/sve_hybrid_s8s32_mmla_6x4VL.hpp"
#include "kernels/sve_interleaved_s8s32_dot_8x3VL.hpp"
#include "kernels/sve_interleaved_s8s32_mmla_8x3VL.hpp"
+#include "kernels/sve_smallK_hybrid_s8s32_dot_8x1VL.hpp"
#endif // ARM_COMPUTE_ENABLE_SVE
namespace arm_gemm {
@@ -96,6 +97,13 @@ GemmImplementation<int8_t, int32_t>::with_estimate(
[](const GemmArgs &args) { return GemmInterleaved<cls_sve_interleaved_s8s32_mmla_8x3VL, int8_t, int32_t>::estimate_cycles<int32_t>(args); },
[](const GemmArgs &args) { return new GemmInterleaved<cls_sve_interleaved_s8s32_mmla_8x3VL, int8_t, int32_t>(args); }
),
+{
+ GemmMethod::GEMM_HYBRID,
+ "sve_smallK_hybrid_s8s32_dot_8x1VL",
+ [](const GemmArgs &args) { return args._ci->has_svei8mm() && args._Ksize<=64 && !args._indirect_input; },
+ [](const GemmArgs &args) { return !(args._ci->has_svei8mm() || args._ci->has_i8mm()); },
+ [](const GemmArgs &args) { return new GemmHybrid<cls_sve_smallK_hybrid_s8s32_dot_8x1VL, int8_t, int32_t>(args); }
+},
GemmImplementation<int8_t, int32_t>::with_estimate(
GemmMethod::GEMM_HYBRID,
"sve_hybrid_s8s32_dot_6x4VL",
diff --git a/src/core/NEON/kernels/arm_gemm/gemm_qint8.cpp b/src/core/NEON/kernels/arm_gemm/gemm_qint8.cpp
index d168abcf6d..ac49536643 100644
--- a/src/core/NEON/kernels/arm_gemm/gemm_qint8.cpp
+++ b/src/core/NEON/kernels/arm_gemm/gemm_qint8.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2020, 2022-2023 Arm Limited.
+ * Copyright (c) 2019-2020, 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -54,6 +54,7 @@
#include "kernels/sve_hybrid_s8s32_mmla_6x4VL.hpp"
#include "kernels/sve_interleaved_s8s32_dot_8x3VL.hpp"
#include "kernels/sve_interleaved_s8s32_mmla_8x3VL.hpp"
+#include "kernels/sve_smallK_hybrid_s8s32_dot_8x1VL.hpp"
#endif // ARM_COMPUTE_ENABLE_SVE
#include "gemm_hybrid_indirect.hpp"
@@ -129,6 +130,13 @@ GemmImplementation<int8_t, int8_t, Requantize32>::with_estimate(
[](const GemmArgs &args, const Requantize32 &) { return GemmHybridIndirect<cls_sve_hybrid_s8s32_mmla_6x4VL, int8_t, int8_t, Requantize32, true>::estimate_cycles<int8_t>(args); },
[](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridIndirect<cls_sve_hybrid_s8s32_mmla_6x4VL, int8_t, int8_t, Requantize32, true>(args, qp); }
),
+{
+ GemmMethod::GEMM_HYBRID_QUANTIZED,
+ "sve_smallK_hybrid_s8s32_dot_8x1VL",
+ [](const GemmArgs &args, const Requantize32 &) { return args._ci->has_sve() && args._Ksize<=64 && !args._indirect_input; },
+ [](const GemmArgs &args, const Requantize32 &) { return !(args._ci->has_svei8mm() || args._ci->has_i8mm()); },
+ [](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridQuantized<cls_sve_smallK_hybrid_s8s32_dot_8x1VL, int8_t, int8_t>(args, qp); }
+},
GemmImplementation<int8_t, int8_t, Requantize32>::with_estimate(
GemmMethod::GEMM_HYBRID,
"sve_hybrid_s8qs_dot_6x4VL",
diff --git a/src/core/NEON/kernels/arm_gemm/gemm_quint8.cpp b/src/core/NEON/kernels/arm_gemm/gemm_quint8.cpp
index 01f5124a2c..ba9649c0e7 100644
--- a/src/core/NEON/kernels/arm_gemm/gemm_quint8.cpp
+++ b/src/core/NEON/kernels/arm_gemm/gemm_quint8.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2020, 2022-2023 Arm Limited.
+ * Copyright (c) 2019-2020, 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -50,6 +50,7 @@
#include "kernels/sve_hybrid_u8u32_mmla_6x4VL.hpp"
#include "kernels/sve_interleaved_u8u32_dot_8x3VL.hpp"
#include "kernels/sve_interleaved_u8u32_mmla_8x3VL.hpp"
+#include "kernels/sve_smallK_hybrid_u8u32_dot_8x1VL.hpp"
#endif // ARM_COMPUTE_ENABLE_SVE
#include "gemm_hybrid_indirect.hpp"
@@ -118,6 +119,13 @@ GemmImplementation<uint8_t, uint8_t, Requantize32>::with_estimate(
[](const GemmArgs &args, const Requantize32 &) { return GemmHybridIndirect<cls_sve_hybrid_u8u32_mmla_6x4VL, uint8_t, uint8_t, Requantize32, true>::estimate_cycles<uint8_t>(args); },
[](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridIndirect<cls_sve_hybrid_u8u32_mmla_6x4VL, uint8_t, uint8_t, Requantize32, true>(args, qp); }
),
+{
+ GemmMethod::GEMM_HYBRID_QUANTIZED,
+ "sve_smallK_hybrid_u8u32_dot_8x1VL",
+ [](const GemmArgs &args, const Requantize32 &) { return args._ci->has_sve() && args._Ksize<=64 && !args._indirect_input; },
+ [](const GemmArgs &args, const Requantize32 &) { return !(args._ci->has_svei8mm() || args._ci->has_i8mm()); },
+ [](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridQuantized<cls_sve_smallK_hybrid_u8u32_dot_8x1VL, uint8_t, uint8_t>(args, qp); }
+},
GemmImplementation<uint8_t, uint8_t, Requantize32>::with_estimate(
GemmMethod::GEMM_HYBRID,
"sve_hybrid_u8qa_dot_4x4VL",
diff --git a/src/core/NEON/kernels/arm_gemm/gemm_uint8.cpp b/src/core/NEON/kernels/arm_gemm/gemm_uint8.cpp
index fcc95eb503..03e9cd6c1f 100644
--- a/src/core/NEON/kernels/arm_gemm/gemm_uint8.cpp
+++ b/src/core/NEON/kernels/arm_gemm/gemm_uint8.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020, 2022-2023 Arm Limited.
+ * Copyright (c) 2017-2020, 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -43,6 +43,7 @@
#include "kernels/sve_hybrid_u8u32_mmla_6x4VL.hpp"
#include "kernels/sve_interleaved_u8u32_dot_8x3VL.hpp"
#include "kernels/sve_interleaved_u8u32_mmla_8x3VL.hpp"
+#include "kernels/sve_smallK_hybrid_u8u32_dot_8x1VL.hpp"
namespace arm_gemm {
@@ -62,6 +63,13 @@ GemmImplementation<uint8_t, uint32_t>::with_estimate(
[](const GemmArgs &args) { return GemmInterleaved<cls_sve_interleaved_u8u32_mmla_8x3VL, uint8_t, uint32_t>::estimate_cycles<uint32_t>(args); },
[](const GemmArgs &args) { return new GemmInterleaved<cls_sve_interleaved_u8u32_mmla_8x3VL, uint8_t, uint32_t>(args); }
),
+{
+ GemmMethod::GEMM_HYBRID,
+ "sve_smallK_hybrid_u8u32_dot_8x1VL",
+ [](const GemmArgs &args) { return args._ci->has_sve() && args._Ksize<=64 && !args._indirect_input; },
+ [](const GemmArgs &args) { return !(args._ci->has_svei8mm() || args._ci->has_i8mm()); },
+ [](const GemmArgs &args) { return new GemmHybrid<cls_sve_smallK_hybrid_u8u32_dot_8x1VL, uint8_t, uint32_t>(args); }
+},
GemmImplementation<uint8_t, uint32_t>::with_estimate(
GemmMethod::GEMM_HYBRID,
"sve_hybrid_u8u32_dot_6x4VL",
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave4_block16_s8_s8.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave4_block16_s8_s8.hpp
index 4dfe46446e..6a8caf6ce6 100644
--- a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave4_block16_s8_s8.hpp
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave4_block16_s8_s8.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef __aarch64__
@@ -31,44 +31,44 @@ void interleave_block<4, 16, VLType::None, false>(
)
{
__asm__ __volatile__(
- "ldr x23, [%x[in], #0x0]\n"
- "ldr x22, [%x[in], #0x8]\n"
+ "ldr x22, [%x[in], #0x0]\n"
"cmp %x[height], #0x4\n"
- "add x23, x23, %x[row_offset]\n"
- "ldr x21, [%x[in], #0x10]\n"
- "ldr x20, [%x[in], #0x18]\n"
+ "ldr x21, [%x[in], #0x8]\n"
"add x22, x22, %x[row_offset]\n"
+ "ldr x20, [%x[in], #0x10]\n"
+ "ldr x19, [%x[in], #0x18]\n"
"add x21, x21, %x[row_offset]\n"
"add x20, x20, %x[row_offset]\n"
+ "add x19, x19, %x[row_offset]\n"
"beq 1f\n"
+ "mov x19, x22\n"
"cmp %x[height], #0x2\n"
- "mov x20, x23\n"
- "csel x22, x22, x23, GE\n"
- "csel x21, x21, x23, GT\n"
+ "csel x21, x21, x22, GE\n"
+ "csel x20, x20, x22, GT\n"
"1:" // no_pointer_adj
- "cmp %x[width], #0x10\n"
- "prfm pldl1keep, [x23, #0x0]\n"
"prfm pldl1keep, [x22, #0x0]\n"
+ "cmp %x[width], #0x10\n"
"prfm pldl1keep, [x21, #0x0]\n"
"prfm pldl1keep, [x20, #0x0]\n"
- "prfm pldl1keep, [x23, #0x40]\n"
+ "prfm pldl1keep, [x19, #0x0]\n"
"prfm pldl1keep, [x22, #0x40]\n"
"prfm pldl1keep, [x21, #0x40]\n"
"prfm pldl1keep, [x20, #0x40]\n"
+ "prfm pldl1keep, [x19, #0x40]\n"
"blt 3f\n"
"2:" // Main loop head
- "ldr q19, [x23], #0x10\n"
- "ldr q18, [x22], #0x10\n"
+ "ldr q19, [x22], #0x10\n"
"subs %x[width], %x[width], #0x10\n"
+ "ldr q18, [x21], #0x10\n"
"cmp %x[width], #0x10\n"
- "ldr q17, [x21], #0x10\n"
- "ldr q16, [x20], #0x10\n"
- "str q19, [%x[out_ptr], #0x0]\n"
- "prfm pldl1keep, [x23, #0x70]\n"
+ "ldr q17, [x20], #0x10\n"
+ "ldr q16, [x19], #0x10\n"
"prfm pldl1keep, [x22, #0x70]\n"
- "str q18, [%x[out_ptr], #0x10]\n"
"prfm pldl1keep, [x21, #0x70]\n"
"prfm pldl1keep, [x20, #0x70]\n"
+ "prfm pldl1keep, [x19, #0x70]\n"
+ "str q19, [%x[out_ptr], #0x0]\n"
+ "str q18, [%x[out_ptr], #0x10]\n"
"str q17, [%x[out_ptr], #0x20]\n"
"str q16, [%x[out_ptr], #0x30]\n"
"add %x[out_ptr], %x[out_ptr], #0x40\n"
@@ -76,93 +76,93 @@ void interleave_block<4, 16, VLType::None, false>(
"3:" // Main loop skip
"cbz %x[width], 12f\n"
"tbz %x[width], #3, 7f\n"
- "ldr d19, [x23], #0x8\n"
- "ldr d18, [x22], #0x8\n"
- "ldr d17, [x21], #0x8\n"
- "ldr d16, [x20], #0x8\n"
+ "ldr d19, [x22], #0x8\n"
+ "ldr d18, [x21], #0x8\n"
+ "ldr d17, [x20], #0x8\n"
+ "ldr d16, [x19], #0x8\n"
"tbz %x[width], #2, 5f\n"
- "ld1 { v19.s }[2], [x23], #0x4\n"
- "ld1 { v18.s }[2], [x22], #0x4\n"
- "ld1 { v17.s }[2], [x21], #0x4\n"
- "ld1 { v16.s }[2], [x20], #0x4\n"
+ "ld1 { v19.s }[2], [x22], #0x4\n"
+ "ld1 { v18.s }[2], [x21], #0x4\n"
+ "ld1 { v17.s }[2], [x20], #0x4\n"
+ "ld1 { v16.s }[2], [x19], #0x4\n"
"tbz %x[width], #1, 4f\n"
- "ld1 { v19.h }[6], [x23], #0x2\n"
- "ld1 { v18.h }[6], [x22], #0x2\n"
- "ld1 { v17.h }[6], [x21], #0x2\n"
- "ld1 { v16.h }[6], [x20], #0x2\n"
+ "ld1 { v19.h }[6], [x22], #0x2\n"
+ "ld1 { v18.h }[6], [x21], #0x2\n"
+ "ld1 { v17.h }[6], [x20], #0x2\n"
+ "ld1 { v16.h }[6], [x19], #0x2\n"
"tbz %x[width], #0, 11f\n"
- "ld1 { v19.b }[14], [x23]\n"
- "ld1 { v18.b }[14], [x22]\n"
- "ld1 { v17.b }[14], [x21]\n"
- "ld1 { v16.b }[14], [x20]\n"
+ "ld1 { v19.b }[14], [x22]\n"
+ "ld1 { v18.b }[14], [x21]\n"
+ "ld1 { v17.b }[14], [x20]\n"
+ "ld1 { v16.b }[14], [x19]\n"
"b 11f\n"
"4:" // odd_loads_1_12
"tbz %x[width], #0, 11f\n"
- "ld1 { v19.b }[12], [x23]\n"
- "ld1 { v18.b }[12], [x22]\n"
- "ld1 { v17.b }[12], [x21]\n"
- "ld1 { v16.b }[12], [x20]\n"
+ "ld1 { v19.b }[12], [x22]\n"
+ "ld1 { v18.b }[12], [x21]\n"
+ "ld1 { v17.b }[12], [x20]\n"
+ "ld1 { v16.b }[12], [x19]\n"
"b 11f\n"
"5:" // odd_loads_2_8
"tbz %x[width], #1, 6f\n"
- "ld1 { v19.h }[4], [x23], #0x2\n"
- "ld1 { v18.h }[4], [x22], #0x2\n"
- "ld1 { v17.h }[4], [x21], #0x2\n"
- "ld1 { v16.h }[4], [x20], #0x2\n"
+ "ld1 { v19.h }[4], [x22], #0x2\n"
+ "ld1 { v18.h }[4], [x21], #0x2\n"
+ "ld1 { v17.h }[4], [x20], #0x2\n"
+ "ld1 { v16.h }[4], [x19], #0x2\n"
"tbz %x[width], #0, 11f\n"
- "ld1 { v19.b }[10], [x23]\n"
- "ld1 { v18.b }[10], [x22]\n"
- "ld1 { v17.b }[10], [x21]\n"
- "ld1 { v16.b }[10], [x20]\n"
+ "ld1 { v19.b }[10], [x22]\n"
+ "ld1 { v18.b }[10], [x21]\n"
+ "ld1 { v17.b }[10], [x20]\n"
+ "ld1 { v16.b }[10], [x19]\n"
"b 11f\n"
"6:" // odd_loads_1_8
"tbz %x[width], #0, 11f\n"
- "ld1 { v19.b }[8], [x23]\n"
- "ld1 { v18.b }[8], [x22]\n"
- "ld1 { v17.b }[8], [x21]\n"
- "ld1 { v16.b }[8], [x20]\n"
+ "ld1 { v19.b }[8], [x22]\n"
+ "ld1 { v18.b }[8], [x21]\n"
+ "ld1 { v17.b }[8], [x20]\n"
+ "ld1 { v16.b }[8], [x19]\n"
"b 11f\n"
"7:" // odd_loads_4_0
"tbz %x[width], #2, 9f\n"
- "ldr s19, [x23], #0x4\n"
- "ldr s18, [x22], #0x4\n"
- "ldr s17, [x21], #0x4\n"
- "ldr s16, [x20], #0x4\n"
+ "ldr s19, [x22], #0x4\n"
+ "ldr s18, [x21], #0x4\n"
+ "ldr s17, [x20], #0x4\n"
+ "ldr s16, [x19], #0x4\n"
"tbz %x[width], #1, 8f\n"
- "ld1 { v19.h }[2], [x23], #0x2\n"
- "ld1 { v18.h }[2], [x22], #0x2\n"
- "ld1 { v17.h }[2], [x21], #0x2\n"
- "ld1 { v16.h }[2], [x20], #0x2\n"
+ "ld1 { v19.h }[2], [x22], #0x2\n"
+ "ld1 { v18.h }[2], [x21], #0x2\n"
+ "ld1 { v17.h }[2], [x20], #0x2\n"
+ "ld1 { v16.h }[2], [x19], #0x2\n"
"tbz %x[width], #0, 11f\n"
- "ld1 { v19.b }[6], [x23]\n"
- "ld1 { v18.b }[6], [x22]\n"
- "ld1 { v17.b }[6], [x21]\n"
- "ld1 { v16.b }[6], [x20]\n"
+ "ld1 { v19.b }[6], [x22]\n"
+ "ld1 { v18.b }[6], [x21]\n"
+ "ld1 { v17.b }[6], [x20]\n"
+ "ld1 { v16.b }[6], [x19]\n"
"b 11f\n"
"8:" // odd_loads_1_4
"tbz %x[width], #0, 11f\n"
- "ld1 { v19.b }[4], [x23]\n"
- "ld1 { v18.b }[4], [x22]\n"
- "ld1 { v17.b }[4], [x21]\n"
- "ld1 { v16.b }[4], [x20]\n"
+ "ld1 { v19.b }[4], [x22]\n"
+ "ld1 { v18.b }[4], [x21]\n"
+ "ld1 { v17.b }[4], [x20]\n"
+ "ld1 { v16.b }[4], [x19]\n"
"b 11f\n"
"9:" // odd_loads_2_0
"tbz %x[width], #1, 10f\n"
- "ldr h19, [x23], #0x2\n"
- "ldr h18, [x22], #0x2\n"
- "ldr h17, [x21], #0x2\n"
- "ldr h16, [x20], #0x2\n"
+ "ldr h19, [x22], #0x2\n"
+ "ldr h18, [x21], #0x2\n"
+ "ldr h17, [x20], #0x2\n"
+ "ldr h16, [x19], #0x2\n"
"tbz %x[width], #0, 11f\n"
- "ld1 { v19.b }[2], [x23]\n"
- "ld1 { v18.b }[2], [x22]\n"
- "ld1 { v17.b }[2], [x21]\n"
- "ld1 { v16.b }[2], [x20]\n"
+ "ld1 { v19.b }[2], [x22]\n"
+ "ld1 { v18.b }[2], [x21]\n"
+ "ld1 { v17.b }[2], [x20]\n"
+ "ld1 { v16.b }[2], [x19]\n"
"b 11f\n"
"10:" // odd_loads_1_0
- "ldr b19, [x23, #0x0]\n"
- "ldr b18, [x22, #0x0]\n"
- "ldr b17, [x21, #0x0]\n"
- "ldr b16, [x20, #0x0]\n"
+ "ldr b19, [x22, #0x0]\n"
+ "ldr b18, [x21, #0x0]\n"
+ "ldr b17, [x20, #0x0]\n"
+ "ldr b16, [x19, #0x0]\n"
"11:" // Odd load end
"str q19, [%x[out_ptr], #0x0]\n"
"str q18, [%x[out_ptr], #0x10]\n"
@@ -173,7 +173,7 @@ void interleave_block<4, 16, VLType::None, false>(
: [out_ptr] "+&r" (out_ptr), [width] "+&r" (width)
: [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset)
- : "cc", "memory", "v16", "v17", "v18", "v19", "x20", "x21", "x22", "x23"
+ : "cc", "memory", "v16", "v17", "v18", "v19", "x19", "x20", "x21", "x22"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave4_block16_s8_s8_summing.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave4_block16_s8_s8_summing.hpp
index 56ca49a36e..954a86656e 100644
--- a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave4_block16_s8_s8_summing.hpp
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave4_block16_s8_s8_summing.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef __aarch64__
@@ -31,39 +31,39 @@ void interleave_block<4, 16, VLType::None, true>(
)
{
__asm__ __volatile__(
- "ldr x24, [%x[in], #0x0]\n"
- "ldr x23, [%x[in], #0x8]\n"
- "cmp %x[height], #0x4\n"
- "mov x22, #0x0\n"
- "ldr x21, [%x[in], #0x10]\n"
- "ldr x20, [%x[in], #0x18]\n"
"movi v28.8h, #0x0\n"
+ "ldr x23, [%x[in], #0x0]\n"
+ "mov x22, #0x0\n"
"movi v27.8h, #0x0\n"
+ "ldr x21, [%x[in], #0x8]\n"
+ "cmp %x[height], #0x4\n"
"movi v26.8h, #0x0\n"
- "movi v25.8h, #0x0\n"
- "add x24, x24, %x[row_offset]\n"
+ "ldr x20, [%x[in], #0x10]\n"
"add x23, x23, %x[row_offset]\n"
+ "movi v25.8h, #0x0\n"
+ "ldr x19, [%x[in], #0x18]\n"
"movi v24.4s, #0x0\n"
- "movi v23.4s, #0x0\n"
"add x21, x21, %x[row_offset]\n"
+ "movi v23.4s, #0x0\n"
"add x20, x20, %x[row_offset]\n"
"movi v22.4s, #0x0\n"
+ "add x19, x19, %x[row_offset]\n"
"movi v21.4s, #0x0\n"
"beq 1f\n"
+ "mov x19, x23\n"
"cmp %x[height], #0x2\n"
- "mov x20, x24\n"
- "csel x23, x23, x24, GE\n"
- "csel x21, x21, x24, GT\n"
+ "csel x21, x21, x23, GE\n"
+ "csel x20, x20, x23, GT\n"
"1:" // no_pointer_adj
- "prfm pldl1keep, [x24, #0x0]\n"
- "prfm pldl1keep, [x23, #0x0]\n"
"movi v20.4s, #0x0\n"
+ "prfm pldl1keep, [x23, #0x0]\n"
"prfm pldl1keep, [x21, #0x0]\n"
"prfm pldl1keep, [x20, #0x0]\n"
- "prfm pldl1keep, [x24, #0x40]\n"
+ "prfm pldl1keep, [x19, #0x0]\n"
"prfm pldl1keep, [x23, #0x40]\n"
"prfm pldl1keep, [x21, #0x40]\n"
"prfm pldl1keep, [x20, #0x40]\n"
+ "prfm pldl1keep, [x19, #0x40]\n"
"cbnz %w[first], 2f\n"
"sub %x[out_ptr], %x[out_ptr], #0x10\n"
"ld1 { v20.4s }, [%x[out_ptr]]\n"
@@ -75,141 +75,141 @@ void interleave_block<4, 16, VLType::None, true>(
"ble 4f\n"
"sadalp v24.4s, v28.8h\n"
"movi v28.8h, #0x0\n"
- "mov x22, #0x0\n"
"sadalp v23.4s, v27.8h\n"
"movi v27.8h, #0x0\n"
"sadalp v22.4s, v26.8h\n"
"movi v26.8h, #0x0\n"
"sadalp v21.4s, v25.8h\n"
"movi v25.8h, #0x0\n"
+ "mov x22, #0x0\n"
"4:" // no_accumulate_16
- "ldr q19, [x24], #0x10\n"
- "ldr q18, [x23], #0x10\n"
+ "ldr q19, [x23], #0x10\n"
+ "add x22, x22, #0x1\n"
+ "ldr q18, [x21], #0x10\n"
"subs %x[width], %x[width], #0x10\n"
+ "ldr q17, [x20], #0x10\n"
"cmp %x[width], #0x10\n"
- "ldr q17, [x21], #0x10\n"
- "ldr q16, [x20], #0x10\n"
- "str q19, [%x[out_ptr], #0x0]\n"
+ "ldr q16, [x19], #0x10\n"
"sadalp v28.8h, v19.16b\n"
- "prfm pldl1keep, [x24, #0x70]\n"
"prfm pldl1keep, [x23, #0x70]\n"
- "str q18, [%x[out_ptr], #0x10]\n"
- "sadalp v27.8h, v18.16b\n"
"prfm pldl1keep, [x21, #0x70]\n"
+ "sadalp v27.8h, v18.16b\n"
"prfm pldl1keep, [x20, #0x70]\n"
- "str q17, [%x[out_ptr], #0x20]\n"
"sadalp v26.8h, v17.16b\n"
- "str q16, [%x[out_ptr], #0x30]\n"
+ "prfm pldl1keep, [x19, #0x70]\n"
"sadalp v25.8h, v16.16b\n"
- "add x22, x22, #0x1\n"
+ "str q19, [%x[out_ptr], #0x0]\n"
+ "str q18, [%x[out_ptr], #0x10]\n"
+ "str q17, [%x[out_ptr], #0x20]\n"
+ "str q16, [%x[out_ptr], #0x30]\n"
"add %x[out_ptr], %x[out_ptr], #0x40\n"
"bge 3b\n"
"5:" // Main loop skip
"cbz %x[width], 14f\n"
"tbz %x[width], #3, 9f\n"
- "ldr d19, [x24], #0x8\n"
- "ldr d18, [x23], #0x8\n"
- "ldr d17, [x21], #0x8\n"
- "ldr d16, [x20], #0x8\n"
+ "ldr d19, [x23], #0x8\n"
+ "ldr d18, [x21], #0x8\n"
+ "ldr d17, [x20], #0x8\n"
+ "ldr d16, [x19], #0x8\n"
"tbz %x[width], #2, 7f\n"
- "ld1 { v19.s }[2], [x24], #0x4\n"
- "ld1 { v18.s }[2], [x23], #0x4\n"
- "ld1 { v17.s }[2], [x21], #0x4\n"
- "ld1 { v16.s }[2], [x20], #0x4\n"
+ "ld1 { v19.s }[2], [x23], #0x4\n"
+ "ld1 { v18.s }[2], [x21], #0x4\n"
+ "ld1 { v17.s }[2], [x20], #0x4\n"
+ "ld1 { v16.s }[2], [x19], #0x4\n"
"tbz %x[width], #1, 6f\n"
- "ld1 { v19.h }[6], [x24], #0x2\n"
- "ld1 { v18.h }[6], [x23], #0x2\n"
- "ld1 { v17.h }[6], [x21], #0x2\n"
- "ld1 { v16.h }[6], [x20], #0x2\n"
+ "ld1 { v19.h }[6], [x23], #0x2\n"
+ "ld1 { v18.h }[6], [x21], #0x2\n"
+ "ld1 { v17.h }[6], [x20], #0x2\n"
+ "ld1 { v16.h }[6], [x19], #0x2\n"
"tbz %x[width], #0, 13f\n"
- "ld1 { v19.b }[14], [x24]\n"
- "ld1 { v18.b }[14], [x23]\n"
- "ld1 { v17.b }[14], [x21]\n"
- "ld1 { v16.b }[14], [x20]\n"
+ "ld1 { v19.b }[14], [x23]\n"
+ "ld1 { v18.b }[14], [x21]\n"
+ "ld1 { v17.b }[14], [x20]\n"
+ "ld1 { v16.b }[14], [x19]\n"
"b 13f\n"
"6:" // odd_loads_1_12
"tbz %x[width], #0, 13f\n"
- "ld1 { v19.b }[12], [x24]\n"
- "ld1 { v18.b }[12], [x23]\n"
- "ld1 { v17.b }[12], [x21]\n"
- "ld1 { v16.b }[12], [x20]\n"
+ "ld1 { v19.b }[12], [x23]\n"
+ "ld1 { v18.b }[12], [x21]\n"
+ "ld1 { v17.b }[12], [x20]\n"
+ "ld1 { v16.b }[12], [x19]\n"
"b 13f\n"
"7:" // odd_loads_2_8
"tbz %x[width], #1, 8f\n"
- "ld1 { v19.h }[4], [x24], #0x2\n"
- "ld1 { v18.h }[4], [x23], #0x2\n"
- "ld1 { v17.h }[4], [x21], #0x2\n"
- "ld1 { v16.h }[4], [x20], #0x2\n"
+ "ld1 { v19.h }[4], [x23], #0x2\n"
+ "ld1 { v18.h }[4], [x21], #0x2\n"
+ "ld1 { v17.h }[4], [x20], #0x2\n"
+ "ld1 { v16.h }[4], [x19], #0x2\n"
"tbz %x[width], #0, 13f\n"
- "ld1 { v19.b }[10], [x24]\n"
- "ld1 { v18.b }[10], [x23]\n"
- "ld1 { v17.b }[10], [x21]\n"
- "ld1 { v16.b }[10], [x20]\n"
+ "ld1 { v19.b }[10], [x23]\n"
+ "ld1 { v18.b }[10], [x21]\n"
+ "ld1 { v17.b }[10], [x20]\n"
+ "ld1 { v16.b }[10], [x19]\n"
"b 13f\n"
"8:" // odd_loads_1_8
"tbz %x[width], #0, 13f\n"
- "ld1 { v19.b }[8], [x24]\n"
- "ld1 { v18.b }[8], [x23]\n"
- "ld1 { v17.b }[8], [x21]\n"
- "ld1 { v16.b }[8], [x20]\n"
+ "ld1 { v19.b }[8], [x23]\n"
+ "ld1 { v18.b }[8], [x21]\n"
+ "ld1 { v17.b }[8], [x20]\n"
+ "ld1 { v16.b }[8], [x19]\n"
"b 13f\n"
"9:" // odd_loads_4_0
"tbz %x[width], #2, 11f\n"
- "ldr s19, [x24], #0x4\n"
- "ldr s18, [x23], #0x4\n"
- "ldr s17, [x21], #0x4\n"
- "ldr s16, [x20], #0x4\n"
+ "ldr s19, [x23], #0x4\n"
+ "ldr s18, [x21], #0x4\n"
+ "ldr s17, [x20], #0x4\n"
+ "ldr s16, [x19], #0x4\n"
"tbz %x[width], #1, 10f\n"
- "ld1 { v19.h }[2], [x24], #0x2\n"
- "ld1 { v18.h }[2], [x23], #0x2\n"
- "ld1 { v17.h }[2], [x21], #0x2\n"
- "ld1 { v16.h }[2], [x20], #0x2\n"
+ "ld1 { v19.h }[2], [x23], #0x2\n"
+ "ld1 { v18.h }[2], [x21], #0x2\n"
+ "ld1 { v17.h }[2], [x20], #0x2\n"
+ "ld1 { v16.h }[2], [x19], #0x2\n"
"tbz %x[width], #0, 13f\n"
- "ld1 { v19.b }[6], [x24]\n"
- "ld1 { v18.b }[6], [x23]\n"
- "ld1 { v17.b }[6], [x21]\n"
- "ld1 { v16.b }[6], [x20]\n"
+ "ld1 { v19.b }[6], [x23]\n"
+ "ld1 { v18.b }[6], [x21]\n"
+ "ld1 { v17.b }[6], [x20]\n"
+ "ld1 { v16.b }[6], [x19]\n"
"b 13f\n"
"10:" // odd_loads_1_4
"tbz %x[width], #0, 13f\n"
- "ld1 { v19.b }[4], [x24]\n"
- "ld1 { v18.b }[4], [x23]\n"
- "ld1 { v17.b }[4], [x21]\n"
- "ld1 { v16.b }[4], [x20]\n"
+ "ld1 { v19.b }[4], [x23]\n"
+ "ld1 { v18.b }[4], [x21]\n"
+ "ld1 { v17.b }[4], [x20]\n"
+ "ld1 { v16.b }[4], [x19]\n"
"b 13f\n"
"11:" // odd_loads_2_0
"tbz %x[width], #1, 12f\n"
- "ldr h19, [x24], #0x2\n"
- "ldr h18, [x23], #0x2\n"
- "ldr h17, [x21], #0x2\n"
- "ldr h16, [x20], #0x2\n"
+ "ldr h19, [x23], #0x2\n"
+ "ldr h18, [x21], #0x2\n"
+ "ldr h17, [x20], #0x2\n"
+ "ldr h16, [x19], #0x2\n"
"tbz %x[width], #0, 13f\n"
- "ld1 { v19.b }[2], [x24]\n"
- "ld1 { v18.b }[2], [x23]\n"
- "ld1 { v17.b }[2], [x21]\n"
- "ld1 { v16.b }[2], [x20]\n"
+ "ld1 { v19.b }[2], [x23]\n"
+ "ld1 { v18.b }[2], [x21]\n"
+ "ld1 { v17.b }[2], [x20]\n"
+ "ld1 { v16.b }[2], [x19]\n"
"b 13f\n"
"12:" // odd_loads_1_0
- "ldr b19, [x24, #0x0]\n"
- "ldr b18, [x23, #0x0]\n"
- "ldr b17, [x21, #0x0]\n"
- "ldr b16, [x20, #0x0]\n"
+ "ldr b19, [x23, #0x0]\n"
+ "ldr b18, [x21, #0x0]\n"
+ "ldr b17, [x20, #0x0]\n"
+ "ldr b16, [x19, #0x0]\n"
"13:" // Odd load end
"str q19, [%x[out_ptr], #0x0]\n"
"sadalp v28.8h, v19.16b\n"
- "sadalp v27.8h, v18.16b\n"
"str q18, [%x[out_ptr], #0x10]\n"
- "sadalp v26.8h, v17.16b\n"
- "sadalp v25.8h, v16.16b\n"
+ "sadalp v27.8h, v18.16b\n"
"str q17, [%x[out_ptr], #0x20]\n"
+ "sadalp v26.8h, v17.16b\n"
"str q16, [%x[out_ptr], #0x30]\n"
+ "sadalp v25.8h, v16.16b\n"
"add %x[out_ptr], %x[out_ptr], #0x40\n"
"14:" // Odds skip
"sadalp v24.4s, v28.8h\n"
"sadalp v23.4s, v27.8h\n"
+ "addp v24.4s, v24.4s, v23.4s\n"
"sadalp v22.4s, v26.8h\n"
"sadalp v21.4s, v25.8h\n"
- "addp v24.4s, v24.4s, v23.4s\n"
"addp v23.4s, v22.4s, v21.4s\n"
"addp v24.4s, v24.4s, v23.4s\n"
"add v24.4s, v24.4s, v20.4s\n"
@@ -217,7 +217,7 @@ void interleave_block<4, 16, VLType::None, true>(
"add %x[out_ptr], %x[out_ptr], #0x10\n"
: [out_ptr] "+&r" (out_ptr), [width] "+&r" (width)
: [first] "r" (first), [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset)
- : "cc", "memory", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "x20", "x21", "x22", "x23", "x24"
+ : "cc", "memory", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "x19", "x20", "x21", "x22", "x23"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave4_block16_u8_u8_summing.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave4_block16_u8_u8_summing.hpp
index 4c7bb71fb2..c81146212c 100644
--- a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave4_block16_u8_u8_summing.hpp
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave4_block16_u8_u8_summing.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef __aarch64__
@@ -31,39 +31,39 @@ void interleave_block<4, 16, VLType::None, true>(
)
{
__asm__ __volatile__(
- "ldr x24, [%x[in], #0x0]\n"
- "ldr x23, [%x[in], #0x8]\n"
- "cmp %x[height], #0x4\n"
- "mov x22, #0x0\n"
- "ldr x21, [%x[in], #0x10]\n"
- "ldr x20, [%x[in], #0x18]\n"
"movi v28.8h, #0x0\n"
+ "ldr x23, [%x[in], #0x0]\n"
+ "mov x22, #0x0\n"
"movi v27.8h, #0x0\n"
+ "ldr x21, [%x[in], #0x8]\n"
+ "cmp %x[height], #0x4\n"
"movi v26.8h, #0x0\n"
- "movi v25.8h, #0x0\n"
- "add x24, x24, %x[row_offset]\n"
+ "ldr x20, [%x[in], #0x10]\n"
"add x23, x23, %x[row_offset]\n"
+ "movi v25.8h, #0x0\n"
+ "ldr x19, [%x[in], #0x18]\n"
"movi v24.4s, #0x0\n"
- "movi v23.4s, #0x0\n"
"add x21, x21, %x[row_offset]\n"
+ "movi v23.4s, #0x0\n"
"add x20, x20, %x[row_offset]\n"
"movi v22.4s, #0x0\n"
+ "add x19, x19, %x[row_offset]\n"
"movi v21.4s, #0x0\n"
"beq 1f\n"
+ "mov x19, x23\n"
"cmp %x[height], #0x2\n"
- "mov x20, x24\n"
- "csel x23, x23, x24, GE\n"
- "csel x21, x21, x24, GT\n"
+ "csel x21, x21, x23, GE\n"
+ "csel x20, x20, x23, GT\n"
"1:" // no_pointer_adj
- "prfm pldl1keep, [x24, #0x0]\n"
- "prfm pldl1keep, [x23, #0x0]\n"
"movi v20.4s, #0x0\n"
+ "prfm pldl1keep, [x23, #0x0]\n"
"prfm pldl1keep, [x21, #0x0]\n"
"prfm pldl1keep, [x20, #0x0]\n"
- "prfm pldl1keep, [x24, #0x40]\n"
+ "prfm pldl1keep, [x19, #0x0]\n"
"prfm pldl1keep, [x23, #0x40]\n"
"prfm pldl1keep, [x21, #0x40]\n"
"prfm pldl1keep, [x20, #0x40]\n"
+ "prfm pldl1keep, [x19, #0x40]\n"
"cbnz %w[first], 2f\n"
"sub %x[out_ptr], %x[out_ptr], #0x10\n"
"ld1 { v20.4s }, [%x[out_ptr]]\n"
@@ -75,141 +75,141 @@ void interleave_block<4, 16, VLType::None, true>(
"ble 4f\n"
"uadalp v24.4s, v28.8h\n"
"movi v28.8h, #0x0\n"
- "mov x22, #0x0\n"
"uadalp v23.4s, v27.8h\n"
"movi v27.8h, #0x0\n"
"uadalp v22.4s, v26.8h\n"
"movi v26.8h, #0x0\n"
"uadalp v21.4s, v25.8h\n"
"movi v25.8h, #0x0\n"
+ "mov x22, #0x0\n"
"4:" // no_accumulate_16
- "ldr q19, [x24], #0x10\n"
- "ldr q18, [x23], #0x10\n"
+ "ldr q19, [x23], #0x10\n"
+ "add x22, x22, #0x1\n"
+ "ldr q18, [x21], #0x10\n"
"subs %x[width], %x[width], #0x10\n"
+ "ldr q17, [x20], #0x10\n"
"cmp %x[width], #0x10\n"
- "ldr q17, [x21], #0x10\n"
- "ldr q16, [x20], #0x10\n"
- "str q19, [%x[out_ptr], #0x0]\n"
+ "ldr q16, [x19], #0x10\n"
"uadalp v28.8h, v19.16b\n"
- "prfm pldl1keep, [x24, #0x70]\n"
"prfm pldl1keep, [x23, #0x70]\n"
- "str q18, [%x[out_ptr], #0x10]\n"
- "uadalp v27.8h, v18.16b\n"
"prfm pldl1keep, [x21, #0x70]\n"
+ "uadalp v27.8h, v18.16b\n"
"prfm pldl1keep, [x20, #0x70]\n"
- "str q17, [%x[out_ptr], #0x20]\n"
"uadalp v26.8h, v17.16b\n"
- "str q16, [%x[out_ptr], #0x30]\n"
+ "prfm pldl1keep, [x19, #0x70]\n"
"uadalp v25.8h, v16.16b\n"
- "add x22, x22, #0x1\n"
+ "str q19, [%x[out_ptr], #0x0]\n"
+ "str q18, [%x[out_ptr], #0x10]\n"
+ "str q17, [%x[out_ptr], #0x20]\n"
+ "str q16, [%x[out_ptr], #0x30]\n"
"add %x[out_ptr], %x[out_ptr], #0x40\n"
"bge 3b\n"
"5:" // Main loop skip
"cbz %x[width], 14f\n"
"tbz %x[width], #3, 9f\n"
- "ldr d19, [x24], #0x8\n"
- "ldr d18, [x23], #0x8\n"
- "ldr d17, [x21], #0x8\n"
- "ldr d16, [x20], #0x8\n"
+ "ldr d19, [x23], #0x8\n"
+ "ldr d18, [x21], #0x8\n"
+ "ldr d17, [x20], #0x8\n"
+ "ldr d16, [x19], #0x8\n"
"tbz %x[width], #2, 7f\n"
- "ld1 { v19.s }[2], [x24], #0x4\n"
- "ld1 { v18.s }[2], [x23], #0x4\n"
- "ld1 { v17.s }[2], [x21], #0x4\n"
- "ld1 { v16.s }[2], [x20], #0x4\n"
+ "ld1 { v19.s }[2], [x23], #0x4\n"
+ "ld1 { v18.s }[2], [x21], #0x4\n"
+ "ld1 { v17.s }[2], [x20], #0x4\n"
+ "ld1 { v16.s }[2], [x19], #0x4\n"
"tbz %x[width], #1, 6f\n"
- "ld1 { v19.h }[6], [x24], #0x2\n"
- "ld1 { v18.h }[6], [x23], #0x2\n"
- "ld1 { v17.h }[6], [x21], #0x2\n"
- "ld1 { v16.h }[6], [x20], #0x2\n"
+ "ld1 { v19.h }[6], [x23], #0x2\n"
+ "ld1 { v18.h }[6], [x21], #0x2\n"
+ "ld1 { v17.h }[6], [x20], #0x2\n"
+ "ld1 { v16.h }[6], [x19], #0x2\n"
"tbz %x[width], #0, 13f\n"
- "ld1 { v19.b }[14], [x24]\n"
- "ld1 { v18.b }[14], [x23]\n"
- "ld1 { v17.b }[14], [x21]\n"
- "ld1 { v16.b }[14], [x20]\n"
+ "ld1 { v19.b }[14], [x23]\n"
+ "ld1 { v18.b }[14], [x21]\n"
+ "ld1 { v17.b }[14], [x20]\n"
+ "ld1 { v16.b }[14], [x19]\n"
"b 13f\n"
"6:" // odd_loads_1_12
"tbz %x[width], #0, 13f\n"
- "ld1 { v19.b }[12], [x24]\n"
- "ld1 { v18.b }[12], [x23]\n"
- "ld1 { v17.b }[12], [x21]\n"
- "ld1 { v16.b }[12], [x20]\n"
+ "ld1 { v19.b }[12], [x23]\n"
+ "ld1 { v18.b }[12], [x21]\n"
+ "ld1 { v17.b }[12], [x20]\n"
+ "ld1 { v16.b }[12], [x19]\n"
"b 13f\n"
"7:" // odd_loads_2_8
"tbz %x[width], #1, 8f\n"
- "ld1 { v19.h }[4], [x24], #0x2\n"
- "ld1 { v18.h }[4], [x23], #0x2\n"
- "ld1 { v17.h }[4], [x21], #0x2\n"
- "ld1 { v16.h }[4], [x20], #0x2\n"
+ "ld1 { v19.h }[4], [x23], #0x2\n"
+ "ld1 { v18.h }[4], [x21], #0x2\n"
+ "ld1 { v17.h }[4], [x20], #0x2\n"
+ "ld1 { v16.h }[4], [x19], #0x2\n"
"tbz %x[width], #0, 13f\n"
- "ld1 { v19.b }[10], [x24]\n"
- "ld1 { v18.b }[10], [x23]\n"
- "ld1 { v17.b }[10], [x21]\n"
- "ld1 { v16.b }[10], [x20]\n"
+ "ld1 { v19.b }[10], [x23]\n"
+ "ld1 { v18.b }[10], [x21]\n"
+ "ld1 { v17.b }[10], [x20]\n"
+ "ld1 { v16.b }[10], [x19]\n"
"b 13f\n"
"8:" // odd_loads_1_8
"tbz %x[width], #0, 13f\n"
- "ld1 { v19.b }[8], [x24]\n"
- "ld1 { v18.b }[8], [x23]\n"
- "ld1 { v17.b }[8], [x21]\n"
- "ld1 { v16.b }[8], [x20]\n"
+ "ld1 { v19.b }[8], [x23]\n"
+ "ld1 { v18.b }[8], [x21]\n"
+ "ld1 { v17.b }[8], [x20]\n"
+ "ld1 { v16.b }[8], [x19]\n"
"b 13f\n"
"9:" // odd_loads_4_0
"tbz %x[width], #2, 11f\n"
- "ldr s19, [x24], #0x4\n"
- "ldr s18, [x23], #0x4\n"
- "ldr s17, [x21], #0x4\n"
- "ldr s16, [x20], #0x4\n"
+ "ldr s19, [x23], #0x4\n"
+ "ldr s18, [x21], #0x4\n"
+ "ldr s17, [x20], #0x4\n"
+ "ldr s16, [x19], #0x4\n"
"tbz %x[width], #1, 10f\n"
- "ld1 { v19.h }[2], [x24], #0x2\n"
- "ld1 { v18.h }[2], [x23], #0x2\n"
- "ld1 { v17.h }[2], [x21], #0x2\n"
- "ld1 { v16.h }[2], [x20], #0x2\n"
+ "ld1 { v19.h }[2], [x23], #0x2\n"
+ "ld1 { v18.h }[2], [x21], #0x2\n"
+ "ld1 { v17.h }[2], [x20], #0x2\n"
+ "ld1 { v16.h }[2], [x19], #0x2\n"
"tbz %x[width], #0, 13f\n"
- "ld1 { v19.b }[6], [x24]\n"
- "ld1 { v18.b }[6], [x23]\n"
- "ld1 { v17.b }[6], [x21]\n"
- "ld1 { v16.b }[6], [x20]\n"
+ "ld1 { v19.b }[6], [x23]\n"
+ "ld1 { v18.b }[6], [x21]\n"
+ "ld1 { v17.b }[6], [x20]\n"
+ "ld1 { v16.b }[6], [x19]\n"
"b 13f\n"
"10:" // odd_loads_1_4
"tbz %x[width], #0, 13f\n"
- "ld1 { v19.b }[4], [x24]\n"
- "ld1 { v18.b }[4], [x23]\n"
- "ld1 { v17.b }[4], [x21]\n"
- "ld1 { v16.b }[4], [x20]\n"
+ "ld1 { v19.b }[4], [x23]\n"
+ "ld1 { v18.b }[4], [x21]\n"
+ "ld1 { v17.b }[4], [x20]\n"
+ "ld1 { v16.b }[4], [x19]\n"
"b 13f\n"
"11:" // odd_loads_2_0
"tbz %x[width], #1, 12f\n"
- "ldr h19, [x24], #0x2\n"
- "ldr h18, [x23], #0x2\n"
- "ldr h17, [x21], #0x2\n"
- "ldr h16, [x20], #0x2\n"
+ "ldr h19, [x23], #0x2\n"
+ "ldr h18, [x21], #0x2\n"
+ "ldr h17, [x20], #0x2\n"
+ "ldr h16, [x19], #0x2\n"
"tbz %x[width], #0, 13f\n"
- "ld1 { v19.b }[2], [x24]\n"
- "ld1 { v18.b }[2], [x23]\n"
- "ld1 { v17.b }[2], [x21]\n"
- "ld1 { v16.b }[2], [x20]\n"
+ "ld1 { v19.b }[2], [x23]\n"
+ "ld1 { v18.b }[2], [x21]\n"
+ "ld1 { v17.b }[2], [x20]\n"
+ "ld1 { v16.b }[2], [x19]\n"
"b 13f\n"
"12:" // odd_loads_1_0
- "ldr b19, [x24, #0x0]\n"
- "ldr b18, [x23, #0x0]\n"
- "ldr b17, [x21, #0x0]\n"
- "ldr b16, [x20, #0x0]\n"
+ "ldr b19, [x23, #0x0]\n"
+ "ldr b18, [x21, #0x0]\n"
+ "ldr b17, [x20, #0x0]\n"
+ "ldr b16, [x19, #0x0]\n"
"13:" // Odd load end
"str q19, [%x[out_ptr], #0x0]\n"
"uadalp v28.8h, v19.16b\n"
- "uadalp v27.8h, v18.16b\n"
"str q18, [%x[out_ptr], #0x10]\n"
- "uadalp v26.8h, v17.16b\n"
- "uadalp v25.8h, v16.16b\n"
+ "uadalp v27.8h, v18.16b\n"
"str q17, [%x[out_ptr], #0x20]\n"
+ "uadalp v26.8h, v17.16b\n"
"str q16, [%x[out_ptr], #0x30]\n"
+ "uadalp v25.8h, v16.16b\n"
"add %x[out_ptr], %x[out_ptr], #0x40\n"
"14:" // Odds skip
"uadalp v24.4s, v28.8h\n"
"uadalp v23.4s, v27.8h\n"
+ "addp v24.4s, v24.4s, v23.4s\n"
"uadalp v22.4s, v26.8h\n"
"uadalp v21.4s, v25.8h\n"
- "addp v24.4s, v24.4s, v23.4s\n"
"addp v23.4s, v22.4s, v21.4s\n"
"addp v24.4s, v24.4s, v23.4s\n"
"add v24.4s, v24.4s, v20.4s\n"
@@ -217,7 +217,7 @@ void interleave_block<4, 16, VLType::None, true>(
"add %x[out_ptr], %x[out_ptr], #0x10\n"
: [out_ptr] "+&r" (out_ptr), [width] "+&r" (width)
: [first] "r" (first), [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset)
- : "cc", "memory", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "x20", "x21", "x22", "x23", "x24"
+ : "cc", "memory", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "x19", "x20", "x21", "x22", "x23"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_bf16_fp32.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_bf16_fp32.hpp
index 2ba2aa854a..42574295f1 100644
--- a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_bf16_fp32.hpp
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_bf16_fp32.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef __aarch64__
@@ -31,46 +31,45 @@ void interleave_block<8, 1, VLType::None, false>(
)
{
__asm__ __volatile__(
- "ldr x28, [%x[in], #0x0]\n"
- "ldr x27, [%x[in], #0x8]\n"
+ "movi v30.8h, #0x0\n"
+ "ldr x27, [%x[in], #0x0]\n"
"cmp %x[height], #0x8\n"
- "movi v16.8h, #0x0\n"
- "ldr x26, [%x[in], #0x10]\n"
- "ldr x25, [%x[in], #0x18]\n"
- "add x28, x28, %x[row_offset], LSL #1\n"
+ "ldr x26, [%x[in], #0x8]\n"
"add x27, x27, %x[row_offset], LSL #1\n"
- "ldr x24, [%x[in], #0x20]\n"
- "ldr x23, [%x[in], #0x28]\n"
+ "ldr x25, [%x[in], #0x10]\n"
+ "ldr x24, [%x[in], #0x18]\n"
"add x26, x26, %x[row_offset], LSL #1\n"
+ "ldr x23, [%x[in], #0x20]\n"
"add x25, x25, %x[row_offset], LSL #1\n"
- "ldr x22, [%x[in], #0x30]\n"
- "ldr x21, [%x[in], #0x38]\n"
+ "ldr x22, [%x[in], #0x28]\n"
+ "ldr x21, [%x[in], #0x30]\n"
"add x24, x24, %x[row_offset], LSL #1\n"
+ "ldr x20, [%x[in], #0x38]\n"
"add x23, x23, %x[row_offset], LSL #1\n"
"add x22, x22, %x[row_offset], LSL #1\n"
"add x21, x21, %x[row_offset], LSL #1\n"
+ "add x20, x20, %x[row_offset], LSL #1\n"
"beq 1f\n"
+ "mov x20, x27\n"
"cmp %x[height], #0x2\n"
- "csel x27, x27, x28, GE\n"
- "csel x26, x26, x28, GT\n"
+ "csel x26, x26, x27, GE\n"
+ "csel x25, x25, x27, GT\n"
"cmp %x[height], #0x4\n"
- "csel x25, x25, x28, GE\n"
- "csel x24, x24, x28, GT\n"
+ "csel x24, x24, x27, GE\n"
+ "csel x23, x23, x27, GT\n"
"cmp %x[height], #0x6\n"
- "mov x21, x28\n"
- "csel x23, x23, x28, GE\n"
- "csel x22, x22, x28, GT\n"
+ "csel x22, x22, x27, GE\n"
+ "csel x21, x21, x27, GT\n"
"1:" // no_pointer_adj
- "cmp %x[width], #0x4\n"
- "prfm pldl1keep, [x28, #0x0]\n"
"prfm pldl1keep, [x27, #0x0]\n"
+ "cmp %x[width], #0x4\n"
"prfm pldl1keep, [x26, #0x0]\n"
"prfm pldl1keep, [x25, #0x0]\n"
"prfm pldl1keep, [x24, #0x0]\n"
"prfm pldl1keep, [x23, #0x0]\n"
"prfm pldl1keep, [x22, #0x0]\n"
"prfm pldl1keep, [x21, #0x0]\n"
- "prfm pldl1keep, [x28, #0x40]\n"
+ "prfm pldl1keep, [x20, #0x0]\n"
"prfm pldl1keep, [x27, #0x40]\n"
"prfm pldl1keep, [x26, #0x40]\n"
"prfm pldl1keep, [x25, #0x40]\n"
@@ -78,134 +77,135 @@ void interleave_block<8, 1, VLType::None, false>(
"prfm pldl1keep, [x23, #0x40]\n"
"prfm pldl1keep, [x22, #0x40]\n"
"prfm pldl1keep, [x21, #0x40]\n"
+ "prfm pldl1keep, [x20, #0x40]\n"
"blt 3f\n"
"2:" // Main loop head
- "ldr d28, [x28], #0x8\n"
- "ldr d27, [x27], #0x8\n"
- "shll v28.4s, v28.4h, #0x10\n"
- "shll v27.4s, v27.4h, #0x10\n"
- "ldr d22, [x26], #0x8\n"
- "ldr d21, [x25], #0x8\n"
- "shll v22.4s, v22.4h, #0x10\n"
- "shll v21.4s, v21.4h, #0x10\n"
- "ldr d26, [x24], #0x8\n"
- "ldr d25, [x23], #0x8\n"
- "shll v26.4s, v26.4h, #0x10\n"
- "shll v25.4s, v25.4h, #0x10\n"
- "ldr d20, [x22], #0x8\n"
- "ldr d19, [x21], #0x8\n"
- "shll v20.4s, v20.4h, #0x10\n"
- "shll v19.4s, v19.4h, #0x10\n"
- "zip1 v24.4s, v28.4s, v22.4s\n"
- "zip1 v23.4s, v27.4s, v21.4s\n"
+ "ldr d29, [x27], #0x8\n"
+ "zip1 v29.8h, v30.8h, v29.8h\n"
+ "ldr d28, [x26], #0x8\n"
"subs %x[width], %x[width], #0x4\n"
+ "zip1 v28.8h, v30.8h, v28.8h\n"
+ "ldr d24, [x25], #0x8\n"
"cmp %x[width], #0x4\n"
- "zip1 v18.4s, v26.4s, v20.4s\n"
- "zip1 v17.4s, v25.4s, v19.4s\n"
- "prfm pldl1keep, [x28, #0x70]\n"
+ "zip1 v24.8h, v30.8h, v24.8h\n"
+ "ldr d27, [x24], #0x8\n"
+ "ldr d26, [x23], #0x8\n"
+ "zip1 v25.4s, v29.4s, v24.4s\n"
+ "zip2 v24.4s, v29.4s, v24.4s\n"
+ "ldr d23, [x22], #0x8\n"
+ "ldr d22, [x21], #0x8\n"
+ "zip1 v27.8h, v30.8h, v27.8h\n"
+ "ldr d21, [x20], #0x8\n"
+ "zip1 v26.8h, v30.8h, v26.8h\n"
"prfm pldl1keep, [x27, #0x70]\n"
- "zip2 v22.4s, v28.4s, v22.4s\n"
- "zip2 v21.4s, v27.4s, v21.4s\n"
+ "zip1 v20.4s, v28.4s, v27.4s\n"
"prfm pldl1keep, [x26, #0x70]\n"
+ "zip1 v23.8h, v30.8h, v23.8h\n"
"prfm pldl1keep, [x25, #0x70]\n"
- "zip2 v20.4s, v26.4s, v20.4s\n"
- "zip2 v19.4s, v25.4s, v19.4s\n"
+ "zip1 v22.8h, v30.8h, v22.8h\n"
"prfm pldl1keep, [x24, #0x70]\n"
+ "zip1 v21.8h, v30.8h, v21.8h\n"
"prfm pldl1keep, [x23, #0x70]\n"
+ "zip1 v17.4s, v25.4s, v20.4s\n"
"prfm pldl1keep, [x22, #0x70]\n"
+ "zip1 v19.4s, v26.4s, v22.4s\n"
"prfm pldl1keep, [x21, #0x70]\n"
- "zip1 v16.4s, v24.4s, v23.4s\n"
- "str q16, [%x[out_ptr], #0x0]\n"
- "zip1 v16.4s, v18.4s, v17.4s\n"
+ "zip1 v18.4s, v23.4s, v21.4s\n"
+ "prfm pldl1keep, [x20, #0x70]\n"
+ "zip1 v16.4s, v19.4s, v18.4s\n"
+ "str q17, [%x[out_ptr], #0x0]\n"
+ "zip2 v17.4s, v25.4s, v20.4s\n"
"str q16, [%x[out_ptr], #0x10]\n"
- "zip2 v16.4s, v24.4s, v23.4s\n"
- "str q16, [%x[out_ptr], #0x20]\n"
- "zip2 v17.4s, v18.4s, v17.4s\n"
- "zip1 v16.4s, v22.4s, v21.4s\n"
- "str q17, [%x[out_ptr], #0x30]\n"
- "zip1 v18.4s, v20.4s, v19.4s\n"
- "zip2 v17.4s, v22.4s, v21.4s\n"
+ "zip2 v16.4s, v19.4s, v18.4s\n"
+ "str q17, [%x[out_ptr], #0x20]\n"
+ "zip2 v19.4s, v28.4s, v27.4s\n"
+ "str q16, [%x[out_ptr], #0x30]\n"
+ "zip1 v16.4s, v24.4s, v19.4s\n"
"str q16, [%x[out_ptr], #0x40]\n"
- "zip2 v16.4s, v20.4s, v19.4s\n"
- "str q18, [%x[out_ptr], #0x50]\n"
- "str q17, [%x[out_ptr], #0x60]\n"
+ "zip2 v18.4s, v26.4s, v22.4s\n"
+ "zip2 v17.4s, v23.4s, v21.4s\n"
+ "zip1 v16.4s, v18.4s, v17.4s\n"
+ "str q16, [%x[out_ptr], #0x50]\n"
+ "zip2 v16.4s, v24.4s, v19.4s\n"
+ "str q16, [%x[out_ptr], #0x60]\n"
+ "zip2 v16.4s, v18.4s, v17.4s\n"
"str q16, [%x[out_ptr], #0x70]\n"
"add %x[out_ptr], %x[out_ptr], #0x80\n"
"bge 2b\n"
"3:" // Main loop skip
"cbz %x[width], 6f\n"
"tbz %x[width], #1, 4f\n"
- "ldr s28, [x28], #0x4\n"
- "ldr s27, [x27], #0x4\n"
- "mov x20, #0x2\n"
- "ldr s22, [x26], #0x4\n"
- "ldr s21, [x25], #0x4\n"
- "ldr s26, [x24], #0x4\n"
- "ldr s25, [x23], #0x4\n"
- "ldr s20, [x22], #0x4\n"
- "ldr s19, [x21], #0x4\n"
+ "ldr s29, [x27], #0x4\n"
+ "ldr s28, [x26], #0x4\n"
+ "mov x19, #0x2\n"
+ "ldr s24, [x25], #0x4\n"
+ "ldr s27, [x24], #0x4\n"
+ "ldr s26, [x23], #0x4\n"
+ "ldr s23, [x22], #0x4\n"
+ "ldr s22, [x21], #0x4\n"
+ "ldr s21, [x20], #0x4\n"
"tbz %x[width], #0, 5f\n"
- "ld1 { v28.h }[2], [x28]\n"
- "ld1 { v27.h }[2], [x27]\n"
- "mov x20, #0x3\n"
- "ld1 { v22.h }[2], [x26]\n"
- "ld1 { v21.h }[2], [x25]\n"
- "ld1 { v26.h }[2], [x24]\n"
- "ld1 { v25.h }[2], [x23]\n"
- "ld1 { v20.h }[2], [x22]\n"
- "ld1 { v19.h }[2], [x21]\n"
+ "ld1 { v29.h }[2], [x27]\n"
+ "mov x19, #0x3\n"
+ "ld1 { v28.h }[2], [x26]\n"
+ "ld1 { v24.h }[2], [x25]\n"
+ "ld1 { v27.h }[2], [x24]\n"
+ "ld1 { v26.h }[2], [x23]\n"
+ "ld1 { v23.h }[2], [x22]\n"
+ "ld1 { v22.h }[2], [x21]\n"
+ "ld1 { v21.h }[2], [x20]\n"
"b 5f\n"
"4:" // odd_loads_1_0
- "ldr h28, [x28, #0x0]\n"
- "ldr h27, [x27, #0x0]\n"
- "mov x20, #0x1\n"
- "ldr h22, [x26, #0x0]\n"
- "ldr h21, [x25, #0x0]\n"
- "ldr h26, [x24, #0x0]\n"
- "ldr h25, [x23, #0x0]\n"
- "ldr h20, [x22, #0x0]\n"
- "ldr h19, [x21, #0x0]\n"
+ "ldr h29, [x27, #0x0]\n"
+ "mov x19, #0x1\n"
+ "ldr h28, [x26, #0x0]\n"
+ "ldr h24, [x25, #0x0]\n"
+ "ldr h27, [x24, #0x0]\n"
+ "ldr h26, [x23, #0x0]\n"
+ "ldr h23, [x22, #0x0]\n"
+ "ldr h22, [x21, #0x0]\n"
+ "ldr h21, [x20, #0x0]\n"
"5:" // Odd load end
- "shll v28.4s, v28.4h, #0x10\n"
- "shll v27.4s, v27.4h, #0x10\n"
- "subs x20, x20, #0x1\n"
- "shll v22.4s, v22.4h, #0x10\n"
- "shll v21.4s, v21.4h, #0x10\n"
- "shll v26.4s, v26.4h, #0x10\n"
- "shll v25.4s, v25.4h, #0x10\n"
- "shll v20.4s, v20.4h, #0x10\n"
- "shll v19.4s, v19.4h, #0x10\n"
- "zip1 v24.4s, v28.4s, v22.4s\n"
- "zip1 v23.4s, v27.4s, v21.4s\n"
- "zip1 v18.4s, v26.4s, v20.4s\n"
- "zip1 v17.4s, v25.4s, v19.4s\n"
- "zip1 v16.4s, v24.4s, v23.4s\n"
- "str q16, [%x[out_ptr], #0x0]\n"
- "zip1 v16.4s, v18.4s, v17.4s\n"
+ "zip1 v29.8h, v30.8h, v29.8h\n"
+ "subs x19, x19, #0x1\n"
+ "zip1 v28.8h, v30.8h, v28.8h\n"
+ "zip1 v24.8h, v30.8h, v24.8h\n"
+ "zip1 v27.8h, v30.8h, v27.8h\n"
+ "zip1 v26.8h, v30.8h, v26.8h\n"
+ "zip1 v23.8h, v30.8h, v23.8h\n"
+ "zip1 v22.8h, v30.8h, v22.8h\n"
+ "zip1 v21.8h, v30.8h, v21.8h\n"
+ "zip1 v25.4s, v29.4s, v24.4s\n"
+ "zip1 v20.4s, v28.4s, v27.4s\n"
+ "zip1 v17.4s, v25.4s, v20.4s\n"
+ "str q17, [%x[out_ptr], #0x0]\n"
+ "zip1 v19.4s, v26.4s, v22.4s\n"
+ "zip1 v18.4s, v23.4s, v21.4s\n"
+ "zip1 v16.4s, v19.4s, v18.4s\n"
"str q16, [%x[out_ptr], #0x10]\n"
"add %x[out_ptr], %x[out_ptr], #0x20\n"
"beq 6f\n"
- "subs x20, x20, #0x1\n"
- "zip2 v16.4s, v24.4s, v23.4s\n"
- "str q16, [%x[out_ptr], #0x0]\n"
- "zip2 v17.4s, v18.4s, v17.4s\n"
- "str q17, [%x[out_ptr], #0x10]\n"
+ "zip2 v17.4s, v25.4s, v20.4s\n"
+ "str q17, [%x[out_ptr], #0x0]\n"
+ "zip2 v16.4s, v19.4s, v18.4s\n"
+ "subs x19, x19, #0x1\n"
+ "str q16, [%x[out_ptr], #0x10]\n"
"add %x[out_ptr], %x[out_ptr], #0x20\n"
"beq 6f\n"
- "zip2 v22.4s, v28.4s, v22.4s\n"
- "zip2 v21.4s, v27.4s, v21.4s\n"
- "zip2 v20.4s, v26.4s, v20.4s\n"
- "zip2 v19.4s, v25.4s, v19.4s\n"
- "zip1 v16.4s, v22.4s, v21.4s\n"
+ "zip2 v24.4s, v29.4s, v24.4s\n"
+ "zip2 v19.4s, v28.4s, v27.4s\n"
+ "zip1 v16.4s, v24.4s, v19.4s\n"
"str q16, [%x[out_ptr], #0x0]\n"
- "zip1 v18.4s, v20.4s, v19.4s\n"
- "str q18, [%x[out_ptr], #0x10]\n"
+ "zip2 v18.4s, v26.4s, v22.4s\n"
+ "zip2 v17.4s, v23.4s, v21.4s\n"
+ "zip1 v16.4s, v18.4s, v17.4s\n"
+ "str q16, [%x[out_ptr], #0x10]\n"
"add %x[out_ptr], %x[out_ptr], #0x20\n"
"6:" // Odds skip
: [out_ptr] "+&r" (out_ptr), [width] "+&r" (width)
: [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset)
- : "cc", "memory", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_fp16_fp16.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_fp16_fp16.hpp
index f55c2be4a4..62d1657a9a 100644
--- a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_fp16_fp16.hpp
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_fp16_fp16.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef __aarch64__
@@ -31,45 +31,44 @@ void interleave_block<8, 1, VLType::None, false>(
)
{
__asm__ __volatile__(
- "ldr x28, [%x[in], #0x0]\n"
- "ldr x27, [%x[in], #0x8]\n"
+ "ldr x27, [%x[in], #0x0]\n"
"cmp %x[height], #0x8\n"
- "add x28, x28, %x[row_offset], LSL #1\n"
- "ldr x26, [%x[in], #0x10]\n"
- "ldr x25, [%x[in], #0x18]\n"
+ "ldr x26, [%x[in], #0x8]\n"
"add x27, x27, %x[row_offset], LSL #1\n"
+ "ldr x25, [%x[in], #0x10]\n"
+ "ldr x24, [%x[in], #0x18]\n"
"add x26, x26, %x[row_offset], LSL #1\n"
- "ldr x24, [%x[in], #0x20]\n"
- "ldr x23, [%x[in], #0x28]\n"
+ "ldr x23, [%x[in], #0x20]\n"
"add x25, x25, %x[row_offset], LSL #1\n"
+ "ldr x22, [%x[in], #0x28]\n"
+ "ldr x21, [%x[in], #0x30]\n"
"add x24, x24, %x[row_offset], LSL #1\n"
- "ldr x22, [%x[in], #0x30]\n"
- "ldr x21, [%x[in], #0x38]\n"
+ "ldr x20, [%x[in], #0x38]\n"
"add x23, x23, %x[row_offset], LSL #1\n"
"add x22, x22, %x[row_offset], LSL #1\n"
"add x21, x21, %x[row_offset], LSL #1\n"
+ "add x20, x20, %x[row_offset], LSL #1\n"
"beq 1f\n"
+ "mov x20, x27\n"
"cmp %x[height], #0x2\n"
- "csel x27, x27, x28, GE\n"
- "csel x26, x26, x28, GT\n"
+ "csel x26, x26, x27, GE\n"
+ "csel x25, x25, x27, GT\n"
"cmp %x[height], #0x4\n"
- "csel x25, x25, x28, GE\n"
- "csel x24, x24, x28, GT\n"
+ "csel x24, x24, x27, GE\n"
+ "csel x23, x23, x27, GT\n"
"cmp %x[height], #0x6\n"
- "mov x21, x28\n"
- "csel x23, x23, x28, GE\n"
- "csel x22, x22, x28, GT\n"
+ "csel x22, x22, x27, GE\n"
+ "csel x21, x21, x27, GT\n"
"1:" // no_pointer_adj
- "cmp %x[width], #0x8\n"
- "prfm pldl1keep, [x28, #0x0]\n"
"prfm pldl1keep, [x27, #0x0]\n"
+ "cmp %x[width], #0x8\n"
"prfm pldl1keep, [x26, #0x0]\n"
"prfm pldl1keep, [x25, #0x0]\n"
"prfm pldl1keep, [x24, #0x0]\n"
"prfm pldl1keep, [x23, #0x0]\n"
"prfm pldl1keep, [x22, #0x0]\n"
"prfm pldl1keep, [x21, #0x0]\n"
- "prfm pldl1keep, [x28, #0x40]\n"
+ "prfm pldl1keep, [x20, #0x0]\n"
"prfm pldl1keep, [x27, #0x40]\n"
"prfm pldl1keep, [x26, #0x40]\n"
"prfm pldl1keep, [x25, #0x40]\n"
@@ -77,192 +76,193 @@ void interleave_block<8, 1, VLType::None, false>(
"prfm pldl1keep, [x23, #0x40]\n"
"prfm pldl1keep, [x22, #0x40]\n"
"prfm pldl1keep, [x21, #0x40]\n"
+ "prfm pldl1keep, [x20, #0x40]\n"
"blt 3f\n"
"2:" // Main loop head
- "ldr q25, [x28], #0x10\n"
"ldr q30, [x27], #0x10\n"
"subs %x[width], %x[width], #0x8\n"
- "cmp %x[width], #0x8\n"
"ldr q29, [x26], #0x10\n"
+ "cmp %x[width], #0x8\n"
"ldr q28, [x25], #0x10\n"
- "ldr q21, [x24], #0x10\n"
- "ldr q27, [x23], #0x10\n"
- "zip1 v23.8h, v25.8h, v21.8h\n"
- "zip1 v26.8h, v30.8h, v27.8h\n"
- "ldr q20, [x22], #0x10\n"
- "ldr q22, [x21], #0x10\n"
- "zip1 v19.8h, v29.8h, v20.8h\n"
- "zip1 v18.8h, v28.8h, v22.8h\n"
- "zip2 v25.8h, v25.8h, v21.8h\n"
- "zip2 v21.8h, v29.8h, v20.8h\n"
- "prfm pldl1keep, [x28, #0x70]\n"
+ "ldr q27, [x24], #0x10\n"
+ "ldr q25, [x23], #0x10\n"
+ "zip1 v26.8h, v30.8h, v25.8h\n"
+ "ldr q21, [x22], #0x10\n"
+ "zip2 v25.8h, v30.8h, v25.8h\n"
+ "ldr q24, [x21], #0x10\n"
+ "ldr q23, [x20], #0x10\n"
+ "zip1 v22.8h, v29.8h, v21.8h\n"
"prfm pldl1keep, [x27, #0x70]\n"
- "zip2 v20.8h, v30.8h, v27.8h\n"
- "zip2 v16.8h, v28.8h, v22.8h\n"
+ "zip2 v21.8h, v29.8h, v21.8h\n"
"prfm pldl1keep, [x26, #0x70]\n"
+ "zip1 v20.8h, v28.8h, v24.8h\n"
"prfm pldl1keep, [x25, #0x70]\n"
- "zip1 v24.8h, v23.8h, v19.8h\n"
- "zip1 v17.8h, v26.8h, v18.8h\n"
+ "zip1 v18.8h, v26.8h, v20.8h\n"
"prfm pldl1keep, [x24, #0x70]\n"
+ "zip1 v19.8h, v27.8h, v23.8h\n"
"prfm pldl1keep, [x23, #0x70]\n"
- "zip2 v23.8h, v23.8h, v19.8h\n"
- "zip2 v19.8h, v26.8h, v18.8h\n"
+ "zip1 v16.8h, v22.8h, v19.8h\n"
"prfm pldl1keep, [x22, #0x70]\n"
+ "zip1 v17.8h, v18.8h, v16.8h\n"
"prfm pldl1keep, [x21, #0x70]\n"
- "zip1 v22.8h, v25.8h, v21.8h\n"
- "zip1 v18.8h, v20.8h, v16.8h\n"
- "zip2 v21.8h, v25.8h, v21.8h\n"
- "zip2 v20.8h, v20.8h, v16.8h\n"
- "zip1 v16.8h, v24.8h, v17.8h\n"
- "str q16, [%x[out_ptr], #0x0]\n"
- "zip2 v16.8h, v24.8h, v17.8h\n"
+ "zip2 v16.8h, v18.8h, v16.8h\n"
+ "prfm pldl1keep, [x20, #0x70]\n"
+ "zip2 v18.8h, v26.8h, v20.8h\n"
+ "str q17, [%x[out_ptr], #0x0]\n"
+ "zip2 v17.8h, v22.8h, v19.8h\n"
"str q16, [%x[out_ptr], #0x10]\n"
- "zip1 v17.8h, v23.8h, v19.8h\n"
- "zip2 v16.8h, v23.8h, v19.8h\n"
- "str q17, [%x[out_ptr], #0x20]\n"
- "zip1 v19.8h, v22.8h, v18.8h\n"
- "zip2 v18.8h, v22.8h, v18.8h\n"
+ "zip1 v16.8h, v18.8h, v17.8h\n"
+ "str q16, [%x[out_ptr], #0x20]\n"
+ "zip2 v16.8h, v18.8h, v17.8h\n"
"str q16, [%x[out_ptr], #0x30]\n"
- "zip1 v17.8h, v21.8h, v20.8h\n"
- "zip2 v16.8h, v21.8h, v20.8h\n"
- "str q19, [%x[out_ptr], #0x40]\n"
- "str q18, [%x[out_ptr], #0x50]\n"
- "str q17, [%x[out_ptr], #0x60]\n"
+ "zip2 v20.8h, v28.8h, v24.8h\n"
+ "zip1 v18.8h, v25.8h, v20.8h\n"
+ "zip2 v19.8h, v27.8h, v23.8h\n"
+ "zip1 v17.8h, v21.8h, v19.8h\n"
+ "zip1 v16.8h, v18.8h, v17.8h\n"
+ "str q16, [%x[out_ptr], #0x40]\n"
+ "zip2 v16.8h, v18.8h, v17.8h\n"
+ "str q16, [%x[out_ptr], #0x50]\n"
+ "zip2 v18.8h, v25.8h, v20.8h\n"
+ "zip2 v17.8h, v21.8h, v19.8h\n"
+ "zip1 v16.8h, v18.8h, v17.8h\n"
+ "str q16, [%x[out_ptr], #0x60]\n"
+ "zip2 v16.8h, v18.8h, v17.8h\n"
"str q16, [%x[out_ptr], #0x70]\n"
"add %x[out_ptr], %x[out_ptr], #0x80\n"
"bge 2b\n"
"3:" // Main loop skip
"cbz %x[width], 8f\n"
"tbz %x[width], #2, 5f\n"
- "ldr d25, [x28], #0x8\n"
"ldr d30, [x27], #0x8\n"
"ldr d29, [x26], #0x8\n"
"ldr d28, [x25], #0x8\n"
- "ldr d21, [x24], #0x8\n"
- "ldr d27, [x23], #0x8\n"
- "ldr d20, [x22], #0x8\n"
- "ldr d22, [x21], #0x8\n"
+ "ldr d27, [x24], #0x8\n"
+ "ldr d25, [x23], #0x8\n"
+ "ldr d21, [x22], #0x8\n"
+ "ldr d24, [x21], #0x8\n"
+ "ldr d23, [x20], #0x8\n"
"tbz %x[width], #1, 4f\n"
- "ld1 { v25.s }[2], [x28], #0x4\n"
"ld1 { v30.s }[2], [x27], #0x4\n"
- "mov x20, #0x6\n"
+ "mov x19, #0x6\n"
"ld1 { v29.s }[2], [x26], #0x4\n"
"ld1 { v28.s }[2], [x25], #0x4\n"
- "ld1 { v21.s }[2], [x24], #0x4\n"
- "ld1 { v27.s }[2], [x23], #0x4\n"
- "ld1 { v20.s }[2], [x22], #0x4\n"
- "ld1 { v22.s }[2], [x21], #0x4\n"
+ "ld1 { v27.s }[2], [x24], #0x4\n"
+ "ld1 { v25.s }[2], [x23], #0x4\n"
+ "ld1 { v21.s }[2], [x22], #0x4\n"
+ "ld1 { v24.s }[2], [x21], #0x4\n"
+ "ld1 { v23.s }[2], [x20], #0x4\n"
"tbz %x[width], #0, 7f\n"
- "ld1 { v25.h }[6], [x28]\n"
"ld1 { v30.h }[6], [x27]\n"
- "mov x20, #0x7\n"
+ "mov x19, #0x7\n"
"ld1 { v29.h }[6], [x26]\n"
"ld1 { v28.h }[6], [x25]\n"
- "ld1 { v21.h }[6], [x24]\n"
- "ld1 { v27.h }[6], [x23]\n"
- "ld1 { v20.h }[6], [x22]\n"
- "ld1 { v22.h }[6], [x21]\n"
+ "ld1 { v27.h }[6], [x24]\n"
+ "ld1 { v25.h }[6], [x23]\n"
+ "ld1 { v21.h }[6], [x22]\n"
+ "ld1 { v24.h }[6], [x21]\n"
+ "ld1 { v23.h }[6], [x20]\n"
"b 7f\n"
"4:" // odd_loads_1_4
- "mov x20, #0x4\n"
+ "mov x19, #0x4\n"
"tbz %x[width], #0, 7f\n"
- "ld1 { v25.h }[4], [x28]\n"
"ld1 { v30.h }[4], [x27]\n"
- "mov x20, #0x5\n"
"ld1 { v29.h }[4], [x26]\n"
+ "mov x19, #0x5\n"
"ld1 { v28.h }[4], [x25]\n"
- "ld1 { v21.h }[4], [x24]\n"
- "ld1 { v27.h }[4], [x23]\n"
- "ld1 { v20.h }[4], [x22]\n"
- "ld1 { v22.h }[4], [x21]\n"
+ "ld1 { v27.h }[4], [x24]\n"
+ "ld1 { v25.h }[4], [x23]\n"
+ "ld1 { v21.h }[4], [x22]\n"
+ "ld1 { v24.h }[4], [x21]\n"
+ "ld1 { v23.h }[4], [x20]\n"
"b 7f\n"
"5:" // odd_loads_2_0
"tbz %x[width], #1, 6f\n"
- "ldr s25, [x28], #0x4\n"
"ldr s30, [x27], #0x4\n"
- "mov x20, #0x2\n"
"ldr s29, [x26], #0x4\n"
+ "mov x19, #0x2\n"
"ldr s28, [x25], #0x4\n"
- "ldr s21, [x24], #0x4\n"
- "ldr s27, [x23], #0x4\n"
- "ldr s20, [x22], #0x4\n"
- "ldr s22, [x21], #0x4\n"
+ "ldr s27, [x24], #0x4\n"
+ "ldr s25, [x23], #0x4\n"
+ "ldr s21, [x22], #0x4\n"
+ "ldr s24, [x21], #0x4\n"
+ "ldr s23, [x20], #0x4\n"
"tbz %x[width], #0, 7f\n"
- "ld1 { v25.h }[2], [x28]\n"
"ld1 { v30.h }[2], [x27]\n"
- "mov x20, #0x3\n"
+ "mov x19, #0x3\n"
"ld1 { v29.h }[2], [x26]\n"
"ld1 { v28.h }[2], [x25]\n"
- "ld1 { v21.h }[2], [x24]\n"
- "ld1 { v27.h }[2], [x23]\n"
- "ld1 { v20.h }[2], [x22]\n"
- "ld1 { v22.h }[2], [x21]\n"
+ "ld1 { v27.h }[2], [x24]\n"
+ "ld1 { v25.h }[2], [x23]\n"
+ "ld1 { v21.h }[2], [x22]\n"
+ "ld1 { v24.h }[2], [x21]\n"
+ "ld1 { v23.h }[2], [x20]\n"
"b 7f\n"
"6:" // odd_loads_1_0
- "ldr h25, [x28, #0x0]\n"
"ldr h30, [x27, #0x0]\n"
- "mov x20, #0x1\n"
+ "mov x19, #0x1\n"
"ldr h29, [x26, #0x0]\n"
"ldr h28, [x25, #0x0]\n"
- "ldr h21, [x24, #0x0]\n"
- "ldr h27, [x23, #0x0]\n"
- "ldr h20, [x22, #0x0]\n"
- "ldr h22, [x21, #0x0]\n"
+ "ldr h27, [x24, #0x0]\n"
+ "ldr h25, [x23, #0x0]\n"
+ "ldr h21, [x22, #0x0]\n"
+ "ldr h24, [x21, #0x0]\n"
+ "ldr h23, [x20, #0x0]\n"
"7:" // Odd load end
- "zip1 v23.8h, v25.8h, v21.8h\n"
- "zip1 v19.8h, v29.8h, v20.8h\n"
- "subs x20, x20, #0x1\n"
- "zip1 v26.8h, v30.8h, v27.8h\n"
- "zip1 v18.8h, v28.8h, v22.8h\n"
- "zip1 v24.8h, v23.8h, v19.8h\n"
- "zip1 v17.8h, v26.8h, v18.8h\n"
- "zip1 v16.8h, v24.8h, v17.8h\n"
- "str q16, [%x[out_ptr], #0x0]\n"
+ "zip1 v26.8h, v30.8h, v25.8h\n"
+ "subs x19, x19, #0x1\n"
+ "zip1 v20.8h, v28.8h, v24.8h\n"
+ "zip1 v18.8h, v26.8h, v20.8h\n"
+ "zip1 v22.8h, v29.8h, v21.8h\n"
+ "zip1 v19.8h, v27.8h, v23.8h\n"
+ "zip1 v16.8h, v22.8h, v19.8h\n"
+ "zip1 v17.8h, v18.8h, v16.8h\n"
+ "str q17, [%x[out_ptr], #0x0]\n"
"add %x[out_ptr], %x[out_ptr], #0x10\n"
"beq 8f\n"
- "subs x20, x20, #0x1\n"
- "zip2 v16.8h, v24.8h, v17.8h\n"
+ "zip2 v16.8h, v18.8h, v16.8h\n"
"str q16, [%x[out_ptr], #0x0]\n"
+ "subs x19, x19, #0x1\n"
"add %x[out_ptr], %x[out_ptr], #0x10\n"
"beq 8f\n"
- "zip2 v23.8h, v23.8h, v19.8h\n"
- "zip2 v19.8h, v26.8h, v18.8h\n"
- "subs x20, x20, #0x1\n"
- "zip1 v17.8h, v23.8h, v19.8h\n"
- "str q17, [%x[out_ptr], #0x0]\n"
+ "zip2 v18.8h, v26.8h, v20.8h\n"
+ "zip2 v17.8h, v22.8h, v19.8h\n"
+ "subs x19, x19, #0x1\n"
+ "zip1 v16.8h, v18.8h, v17.8h\n"
+ "str q16, [%x[out_ptr], #0x0]\n"
"add %x[out_ptr], %x[out_ptr], #0x10\n"
"beq 8f\n"
- "subs x20, x20, #0x1\n"
- "zip2 v16.8h, v23.8h, v19.8h\n"
+ "zip2 v16.8h, v18.8h, v17.8h\n"
"str q16, [%x[out_ptr], #0x0]\n"
+ "subs x19, x19, #0x1\n"
"add %x[out_ptr], %x[out_ptr], #0x10\n"
"beq 8f\n"
- "zip2 v25.8h, v25.8h, v21.8h\n"
- "zip2 v21.8h, v29.8h, v20.8h\n"
- "subs x20, x20, #0x1\n"
- "zip2 v20.8h, v30.8h, v27.8h\n"
- "zip2 v16.8h, v28.8h, v22.8h\n"
- "zip1 v22.8h, v25.8h, v21.8h\n"
- "zip1 v18.8h, v20.8h, v16.8h\n"
- "zip1 v19.8h, v22.8h, v18.8h\n"
- "str q19, [%x[out_ptr], #0x0]\n"
+ "zip2 v25.8h, v30.8h, v25.8h\n"
+ "zip2 v20.8h, v28.8h, v24.8h\n"
+ "subs x19, x19, #0x1\n"
+ "zip1 v18.8h, v25.8h, v20.8h\n"
+ "zip2 v21.8h, v29.8h, v21.8h\n"
+ "zip2 v19.8h, v27.8h, v23.8h\n"
+ "zip1 v17.8h, v21.8h, v19.8h\n"
+ "zip1 v16.8h, v18.8h, v17.8h\n"
+ "str q16, [%x[out_ptr], #0x0]\n"
"add %x[out_ptr], %x[out_ptr], #0x10\n"
"beq 8f\n"
- "subs x20, x20, #0x1\n"
- "zip2 v18.8h, v22.8h, v18.8h\n"
- "str q18, [%x[out_ptr], #0x0]\n"
+ "zip2 v16.8h, v18.8h, v17.8h\n"
+ "str q16, [%x[out_ptr], #0x0]\n"
+ "subs x19, x19, #0x1\n"
"add %x[out_ptr], %x[out_ptr], #0x10\n"
"beq 8f\n"
- "zip2 v21.8h, v25.8h, v21.8h\n"
- "zip2 v20.8h, v20.8h, v16.8h\n"
- "zip1 v17.8h, v21.8h, v20.8h\n"
- "str q17, [%x[out_ptr], #0x0]\n"
+ "zip2 v18.8h, v25.8h, v20.8h\n"
+ "zip2 v17.8h, v21.8h, v19.8h\n"
+ "zip1 v16.8h, v18.8h, v17.8h\n"
+ "str q16, [%x[out_ptr], #0x0]\n"
"add %x[out_ptr], %x[out_ptr], #0x10\n"
"8:" // Odds skip
: [out_ptr] "+&r" (out_ptr), [width] "+&r" (width)
: [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset)
- : "cc", "memory", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_fp16_fp32.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_fp16_fp32.hpp
index f64db0b476..b67840b280 100644
--- a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_fp16_fp32.hpp
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_fp16_fp32.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef __aarch64__
@@ -31,45 +31,44 @@ void interleave_block<8, 1, VLType::None, false>(
)
{
__asm__ __volatile__(
- "ldr x28, [%x[in], #0x0]\n"
- "ldr x27, [%x[in], #0x8]\n"
+ "ldr x27, [%x[in], #0x0]\n"
"cmp %x[height], #0x8\n"
- "add x28, x28, %x[row_offset], LSL #1\n"
- "ldr x26, [%x[in], #0x10]\n"
- "ldr x25, [%x[in], #0x18]\n"
+ "ldr x26, [%x[in], #0x8]\n"
"add x27, x27, %x[row_offset], LSL #1\n"
+ "ldr x25, [%x[in], #0x10]\n"
+ "ldr x24, [%x[in], #0x18]\n"
"add x26, x26, %x[row_offset], LSL #1\n"
- "ldr x24, [%x[in], #0x20]\n"
- "ldr x23, [%x[in], #0x28]\n"
+ "ldr x23, [%x[in], #0x20]\n"
"add x25, x25, %x[row_offset], LSL #1\n"
+ "ldr x22, [%x[in], #0x28]\n"
+ "ldr x21, [%x[in], #0x30]\n"
"add x24, x24, %x[row_offset], LSL #1\n"
- "ldr x22, [%x[in], #0x30]\n"
- "ldr x21, [%x[in], #0x38]\n"
+ "ldr x20, [%x[in], #0x38]\n"
"add x23, x23, %x[row_offset], LSL #1\n"
"add x22, x22, %x[row_offset], LSL #1\n"
"add x21, x21, %x[row_offset], LSL #1\n"
+ "add x20, x20, %x[row_offset], LSL #1\n"
"beq 1f\n"
+ "mov x20, x27\n"
"cmp %x[height], #0x2\n"
- "csel x27, x27, x28, GE\n"
- "csel x26, x26, x28, GT\n"
+ "csel x26, x26, x27, GE\n"
+ "csel x25, x25, x27, GT\n"
"cmp %x[height], #0x4\n"
- "csel x25, x25, x28, GE\n"
- "csel x24, x24, x28, GT\n"
+ "csel x24, x24, x27, GE\n"
+ "csel x23, x23, x27, GT\n"
"cmp %x[height], #0x6\n"
- "mov x21, x28\n"
- "csel x23, x23, x28, GE\n"
- "csel x22, x22, x28, GT\n"
+ "csel x22, x22, x27, GE\n"
+ "csel x21, x21, x27, GT\n"
"1:" // no_pointer_adj
- "cmp %x[width], #0x4\n"
- "prfm pldl1keep, [x28, #0x0]\n"
"prfm pldl1keep, [x27, #0x0]\n"
+ "cmp %x[width], #0x4\n"
"prfm pldl1keep, [x26, #0x0]\n"
"prfm pldl1keep, [x25, #0x0]\n"
"prfm pldl1keep, [x24, #0x0]\n"
"prfm pldl1keep, [x23, #0x0]\n"
"prfm pldl1keep, [x22, #0x0]\n"
"prfm pldl1keep, [x21, #0x0]\n"
- "prfm pldl1keep, [x28, #0x40]\n"
+ "prfm pldl1keep, [x20, #0x0]\n"
"prfm pldl1keep, [x27, #0x40]\n"
"prfm pldl1keep, [x26, #0x40]\n"
"prfm pldl1keep, [x25, #0x40]\n"
@@ -77,134 +76,135 @@ void interleave_block<8, 1, VLType::None, false>(
"prfm pldl1keep, [x23, #0x40]\n"
"prfm pldl1keep, [x22, #0x40]\n"
"prfm pldl1keep, [x21, #0x40]\n"
+ "prfm pldl1keep, [x20, #0x40]\n"
"blt 3f\n"
"2:" // Main loop head
- "ldr d28, [x28], #0x8\n"
- "ldr d27, [x27], #0x8\n"
- "fcvtl v28.4s, v28.4h\n"
- "fcvtl v27.4s, v27.4h\n"
- "ldr d22, [x26], #0x8\n"
- "ldr d21, [x25], #0x8\n"
- "fcvtl v22.4s, v22.4h\n"
- "fcvtl v21.4s, v21.4h\n"
- "ldr d26, [x24], #0x8\n"
- "ldr d25, [x23], #0x8\n"
- "fcvtl v26.4s, v26.4h\n"
- "fcvtl v25.4s, v25.4h\n"
- "ldr d20, [x22], #0x8\n"
- "ldr d19, [x21], #0x8\n"
- "fcvtl v20.4s, v20.4h\n"
- "fcvtl v19.4s, v19.4h\n"
- "zip1 v24.4s, v28.4s, v22.4s\n"
- "zip1 v23.4s, v27.4s, v21.4s\n"
+ "ldr d30, [x27], #0x8\n"
"subs %x[width], %x[width], #0x4\n"
+ "ldr d29, [x26], #0x8\n"
"cmp %x[width], #0x4\n"
- "zip1 v18.4s, v26.4s, v20.4s\n"
- "zip1 v17.4s, v25.4s, v19.4s\n"
- "prfm pldl1keep, [x28, #0x70]\n"
+ "ldr d28, [x25], #0x8\n"
+ "fcvtl v30.4s, v30.4h\n"
+ "ldr d21, [x24], #0x8\n"
+ "ldr d27, [x23], #0x8\n"
+ "fcvtl v29.4s, v29.4h\n"
+ "ldr d26, [x22], #0x8\n"
+ "fcvtl v28.4s, v28.4h\n"
+ "zip1 v20.4s, v30.4s, v28.4s\n"
+ "ldr d25, [x21], #0x8\n"
+ "fcvtl v21.4s, v21.4h\n"
+ "zip2 v17.4s, v30.4s, v28.4s\n"
+ "ldr d24, [x20], #0x8\n"
+ "fcvtl v27.4s, v27.4h\n"
+ "zip1 v18.4s, v29.4s, v21.4s\n"
"prfm pldl1keep, [x27, #0x70]\n"
- "zip2 v22.4s, v28.4s, v22.4s\n"
- "zip2 v21.4s, v27.4s, v21.4s\n"
+ "fcvtl v26.4s, v26.4h\n"
+ "zip1 v23.4s, v20.4s, v18.4s\n"
"prfm pldl1keep, [x26, #0x70]\n"
+ "fcvtl v25.4s, v25.4h\n"
+ "zip2 v22.4s, v20.4s, v18.4s\n"
"prfm pldl1keep, [x25, #0x70]\n"
- "zip2 v20.4s, v26.4s, v20.4s\n"
- "zip2 v19.4s, v25.4s, v19.4s\n"
+ "fcvtl v24.4s, v24.4h\n"
+ "zip2 v16.4s, v29.4s, v21.4s\n"
"prfm pldl1keep, [x24, #0x70]\n"
"prfm pldl1keep, [x23, #0x70]\n"
+ "zip1 v21.4s, v17.4s, v16.4s\n"
+ "zip2 v20.4s, v17.4s, v16.4s\n"
"prfm pldl1keep, [x22, #0x70]\n"
"prfm pldl1keep, [x21, #0x70]\n"
- "zip1 v16.4s, v24.4s, v23.4s\n"
- "str q16, [%x[out_ptr], #0x0]\n"
- "zip1 v16.4s, v18.4s, v17.4s\n"
+ "zip1 v19.4s, v27.4s, v25.4s\n"
+ "zip2 v18.4s, v27.4s, v25.4s\n"
+ "prfm pldl1keep, [x20, #0x70]\n"
+ "zip1 v17.4s, v26.4s, v24.4s\n"
+ "str q23, [%x[out_ptr], #0x0]\n"
+ "zip1 v16.4s, v19.4s, v17.4s\n"
"str q16, [%x[out_ptr], #0x10]\n"
- "zip2 v16.4s, v24.4s, v23.4s\n"
- "str q16, [%x[out_ptr], #0x20]\n"
- "zip2 v17.4s, v18.4s, v17.4s\n"
- "zip1 v16.4s, v22.4s, v21.4s\n"
+ "zip2 v17.4s, v19.4s, v17.4s\n"
+ "str q22, [%x[out_ptr], #0x20]\n"
+ "zip2 v16.4s, v26.4s, v24.4s\n"
"str q17, [%x[out_ptr], #0x30]\n"
- "zip1 v18.4s, v20.4s, v19.4s\n"
- "zip2 v17.4s, v22.4s, v21.4s\n"
- "str q16, [%x[out_ptr], #0x40]\n"
- "zip2 v16.4s, v20.4s, v19.4s\n"
- "str q18, [%x[out_ptr], #0x50]\n"
- "str q17, [%x[out_ptr], #0x60]\n"
+ "zip1 v17.4s, v18.4s, v16.4s\n"
+ "str q21, [%x[out_ptr], #0x40]\n"
+ "zip2 v16.4s, v18.4s, v16.4s\n"
+ "str q17, [%x[out_ptr], #0x50]\n"
+ "str q20, [%x[out_ptr], #0x60]\n"
"str q16, [%x[out_ptr], #0x70]\n"
"add %x[out_ptr], %x[out_ptr], #0x80\n"
"bge 2b\n"
"3:" // Main loop skip
"cbz %x[width], 6f\n"
"tbz %x[width], #1, 4f\n"
- "ldr s28, [x28], #0x4\n"
- "ldr s27, [x27], #0x4\n"
- "mov x20, #0x2\n"
- "ldr s22, [x26], #0x4\n"
- "ldr s21, [x25], #0x4\n"
- "ldr s26, [x24], #0x4\n"
- "ldr s25, [x23], #0x4\n"
- "ldr s20, [x22], #0x4\n"
- "ldr s19, [x21], #0x4\n"
+ "ldr s30, [x27], #0x4\n"
+ "ldr s29, [x26], #0x4\n"
+ "mov x19, #0x2\n"
+ "ldr s28, [x25], #0x4\n"
+ "ldr s21, [x24], #0x4\n"
+ "ldr s27, [x23], #0x4\n"
+ "ldr s26, [x22], #0x4\n"
+ "ldr s25, [x21], #0x4\n"
+ "ldr s24, [x20], #0x4\n"
"tbz %x[width], #0, 5f\n"
- "ld1 { v28.h }[2], [x28]\n"
- "ld1 { v27.h }[2], [x27]\n"
- "mov x20, #0x3\n"
- "ld1 { v22.h }[2], [x26]\n"
- "ld1 { v21.h }[2], [x25]\n"
- "ld1 { v26.h }[2], [x24]\n"
- "ld1 { v25.h }[2], [x23]\n"
- "ld1 { v20.h }[2], [x22]\n"
- "ld1 { v19.h }[2], [x21]\n"
+ "ld1 { v30.h }[2], [x27]\n"
+ "mov x19, #0x3\n"
+ "ld1 { v29.h }[2], [x26]\n"
+ "ld1 { v28.h }[2], [x25]\n"
+ "ld1 { v21.h }[2], [x24]\n"
+ "ld1 { v27.h }[2], [x23]\n"
+ "ld1 { v26.h }[2], [x22]\n"
+ "ld1 { v25.h }[2], [x21]\n"
+ "ld1 { v24.h }[2], [x20]\n"
"b 5f\n"
"4:" // odd_loads_1_0
- "ldr h28, [x28, #0x0]\n"
- "ldr h27, [x27, #0x0]\n"
- "mov x20, #0x1\n"
- "ldr h22, [x26, #0x0]\n"
- "ldr h21, [x25, #0x0]\n"
- "ldr h26, [x24, #0x0]\n"
- "ldr h25, [x23, #0x0]\n"
- "ldr h20, [x22, #0x0]\n"
- "ldr h19, [x21, #0x0]\n"
+ "ldr h30, [x27, #0x0]\n"
+ "mov x19, #0x1\n"
+ "ldr h29, [x26, #0x0]\n"
+ "ldr h28, [x25, #0x0]\n"
+ "ldr h21, [x24, #0x0]\n"
+ "ldr h27, [x23, #0x0]\n"
+ "ldr h26, [x22, #0x0]\n"
+ "ldr h25, [x21, #0x0]\n"
+ "ldr h24, [x20, #0x0]\n"
"5:" // Odd load end
+ "fcvtl v30.4s, v30.4h\n"
+ "fcvtl v29.4s, v29.4h\n"
"fcvtl v28.4s, v28.4h\n"
- "fcvtl v27.4s, v27.4h\n"
- "subs x20, x20, #0x1\n"
- "fcvtl v22.4s, v22.4h\n"
+ "zip1 v20.4s, v30.4s, v28.4s\n"
"fcvtl v21.4s, v21.4h\n"
+ "fcvtl v27.4s, v27.4h\n"
+ "zip1 v18.4s, v29.4s, v21.4s\n"
"fcvtl v26.4s, v26.4h\n"
"fcvtl v25.4s, v25.4h\n"
- "fcvtl v20.4s, v20.4h\n"
- "fcvtl v19.4s, v19.4h\n"
- "zip1 v24.4s, v28.4s, v22.4s\n"
- "zip1 v23.4s, v27.4s, v21.4s\n"
- "zip1 v18.4s, v26.4s, v20.4s\n"
- "zip1 v17.4s, v25.4s, v19.4s\n"
- "zip1 v16.4s, v24.4s, v23.4s\n"
- "str q16, [%x[out_ptr], #0x0]\n"
- "zip1 v16.4s, v18.4s, v17.4s\n"
+ "zip1 v23.4s, v20.4s, v18.4s\n"
+ "str q23, [%x[out_ptr], #0x0]\n"
+ "zip1 v19.4s, v27.4s, v25.4s\n"
+ "fcvtl v24.4s, v24.4h\n"
+ "subs x19, x19, #0x1\n"
+ "zip1 v17.4s, v26.4s, v24.4s\n"
+ "zip1 v16.4s, v19.4s, v17.4s\n"
"str q16, [%x[out_ptr], #0x10]\n"
"add %x[out_ptr], %x[out_ptr], #0x20\n"
"beq 6f\n"
- "subs x20, x20, #0x1\n"
- "zip2 v16.4s, v24.4s, v23.4s\n"
- "str q16, [%x[out_ptr], #0x0]\n"
- "zip2 v17.4s, v18.4s, v17.4s\n"
+ "zip2 v22.4s, v20.4s, v18.4s\n"
+ "str q22, [%x[out_ptr], #0x0]\n"
+ "zip2 v17.4s, v19.4s, v17.4s\n"
+ "subs x19, x19, #0x1\n"
"str q17, [%x[out_ptr], #0x10]\n"
"add %x[out_ptr], %x[out_ptr], #0x20\n"
"beq 6f\n"
- "zip2 v22.4s, v28.4s, v22.4s\n"
- "zip2 v21.4s, v27.4s, v21.4s\n"
- "zip2 v20.4s, v26.4s, v20.4s\n"
- "zip2 v19.4s, v25.4s, v19.4s\n"
- "zip1 v16.4s, v22.4s, v21.4s\n"
- "str q16, [%x[out_ptr], #0x0]\n"
- "zip1 v18.4s, v20.4s, v19.4s\n"
- "str q18, [%x[out_ptr], #0x10]\n"
+ "zip2 v17.4s, v30.4s, v28.4s\n"
+ "zip2 v16.4s, v29.4s, v21.4s\n"
+ "zip1 v21.4s, v17.4s, v16.4s\n"
+ "str q21, [%x[out_ptr], #0x0]\n"
+ "zip2 v18.4s, v27.4s, v25.4s\n"
+ "zip2 v16.4s, v26.4s, v24.4s\n"
+ "zip1 v17.4s, v18.4s, v16.4s\n"
+ "str q17, [%x[out_ptr], #0x10]\n"
"add %x[out_ptr], %x[out_ptr], #0x20\n"
"6:" // Odds skip
: [out_ptr] "+&r" (out_ptr), [width] "+&r" (width)
: [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset)
- : "cc", "memory", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_fp32_fp32.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_fp32_fp32.hpp
index 6c009b34b8..eefb8549ea 100644
--- a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_fp32_fp32.hpp
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_fp32_fp32.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef __aarch64__
@@ -31,45 +31,44 @@ void interleave_block<8, 1, VLType::None, false>(
)
{
__asm__ __volatile__(
- "ldr x28, [%x[in], #0x0]\n"
- "ldr x27, [%x[in], #0x8]\n"
+ "ldr x27, [%x[in], #0x0]\n"
"cmp %x[height], #0x8\n"
- "add x28, x28, %x[row_offset], LSL #2\n"
- "ldr x26, [%x[in], #0x10]\n"
- "ldr x25, [%x[in], #0x18]\n"
+ "ldr x26, [%x[in], #0x8]\n"
"add x27, x27, %x[row_offset], LSL #2\n"
+ "ldr x25, [%x[in], #0x10]\n"
+ "ldr x24, [%x[in], #0x18]\n"
"add x26, x26, %x[row_offset], LSL #2\n"
- "ldr x24, [%x[in], #0x20]\n"
- "ldr x23, [%x[in], #0x28]\n"
+ "ldr x23, [%x[in], #0x20]\n"
"add x25, x25, %x[row_offset], LSL #2\n"
+ "ldr x22, [%x[in], #0x28]\n"
+ "ldr x21, [%x[in], #0x30]\n"
"add x24, x24, %x[row_offset], LSL #2\n"
- "ldr x22, [%x[in], #0x30]\n"
- "ldr x21, [%x[in], #0x38]\n"
+ "ldr x20, [%x[in], #0x38]\n"
"add x23, x23, %x[row_offset], LSL #2\n"
"add x22, x22, %x[row_offset], LSL #2\n"
"add x21, x21, %x[row_offset], LSL #2\n"
+ "add x20, x20, %x[row_offset], LSL #2\n"
"beq 1f\n"
+ "mov x20, x27\n"
"cmp %x[height], #0x2\n"
- "csel x27, x27, x28, GE\n"
- "csel x26, x26, x28, GT\n"
+ "csel x26, x26, x27, GE\n"
+ "csel x25, x25, x27, GT\n"
"cmp %x[height], #0x4\n"
- "csel x25, x25, x28, GE\n"
- "csel x24, x24, x28, GT\n"
+ "csel x24, x24, x27, GE\n"
+ "csel x23, x23, x27, GT\n"
"cmp %x[height], #0x6\n"
- "mov x21, x28\n"
- "csel x23, x23, x28, GE\n"
- "csel x22, x22, x28, GT\n"
+ "csel x22, x22, x27, GE\n"
+ "csel x21, x21, x27, GT\n"
"1:" // no_pointer_adj
- "cmp %x[width], #0x4\n"
- "prfm pldl1keep, [x28, #0x0]\n"
"prfm pldl1keep, [x27, #0x0]\n"
+ "cmp %x[width], #0x4\n"
"prfm pldl1keep, [x26, #0x0]\n"
"prfm pldl1keep, [x25, #0x0]\n"
"prfm pldl1keep, [x24, #0x0]\n"
"prfm pldl1keep, [x23, #0x0]\n"
"prfm pldl1keep, [x22, #0x0]\n"
"prfm pldl1keep, [x21, #0x0]\n"
- "prfm pldl1keep, [x28, #0x40]\n"
+ "prfm pldl1keep, [x20, #0x0]\n"
"prfm pldl1keep, [x27, #0x40]\n"
"prfm pldl1keep, [x26, #0x40]\n"
"prfm pldl1keep, [x25, #0x40]\n"
@@ -77,48 +76,49 @@ void interleave_block<8, 1, VLType::None, false>(
"prfm pldl1keep, [x23, #0x40]\n"
"prfm pldl1keep, [x22, #0x40]\n"
"prfm pldl1keep, [x21, #0x40]\n"
+ "prfm pldl1keep, [x20, #0x40]\n"
"blt 3f\n"
"2:" // Main loop head
- "ldr q28, [x28], #0x10\n"
- "ldr q27, [x27], #0x10\n"
+ "ldr q28, [x27], #0x10\n"
"subs %x[width], %x[width], #0x4\n"
+ "ldr q29, [x26], #0x10\n"
"cmp %x[width], #0x4\n"
- "ldr q22, [x26], #0x10\n"
- "ldr q21, [x25], #0x10\n"
- "zip1 v26.4s, v28.4s, v22.4s\n"
- "zip1 v25.4s, v27.4s, v21.4s\n"
- "ldr q24, [x24], #0x10\n"
- "ldr q23, [x23], #0x10\n"
- "zip2 v22.4s, v28.4s, v22.4s\n"
- "zip2 v21.4s, v27.4s, v21.4s\n"
- "ldr q19, [x22], #0x10\n"
- "ldr q18, [x21], #0x10\n"
- "zip1 v20.4s, v24.4s, v19.4s\n"
- "zip1 v17.4s, v23.4s, v18.4s\n"
- "zip2 v19.4s, v24.4s, v19.4s\n"
- "zip2 v18.4s, v23.4s, v18.4s\n"
- "prfm pldl1keep, [x28, #0x70]\n"
+ "ldr q25, [x25], #0x10\n"
+ "zip1 v22.4s, v28.4s, v25.4s\n"
+ "ldr q21, [x24], #0x10\n"
+ "zip2 v28.4s, v28.4s, v25.4s\n"
+ "ldr q27, [x23], #0x10\n"
+ "ldr q26, [x22], #0x10\n"
+ "zip1 v20.4s, v29.4s, v21.4s\n"
+ "ldr q19, [x21], #0x10\n"
+ "zip2 v25.4s, v29.4s, v21.4s\n"
+ "ldr q24, [x20], #0x10\n"
+ "zip1 v23.4s, v22.4s, v20.4s\n"
"prfm pldl1keep, [x27, #0x70]\n"
+ "zip2 v22.4s, v22.4s, v20.4s\n"
"prfm pldl1keep, [x26, #0x70]\n"
+ "zip1 v21.4s, v28.4s, v25.4s\n"
"prfm pldl1keep, [x25, #0x70]\n"
- "zip1 v16.4s, v26.4s, v25.4s\n"
- "str q16, [%x[out_ptr], #0x0]\n"
+ "zip1 v18.4s, v27.4s, v19.4s\n"
"prfm pldl1keep, [x24, #0x70]\n"
+ "zip1 v16.4s, v26.4s, v24.4s\n"
"prfm pldl1keep, [x23, #0x70]\n"
- "zip1 v16.4s, v20.4s, v17.4s\n"
- "str q16, [%x[out_ptr], #0x10]\n"
+ "zip1 v17.4s, v18.4s, v16.4s\n"
"prfm pldl1keep, [x22, #0x70]\n"
+ "zip2 v20.4s, v18.4s, v16.4s\n"
"prfm pldl1keep, [x21, #0x70]\n"
- "zip2 v16.4s, v26.4s, v25.4s\n"
- "str q16, [%x[out_ptr], #0x20]\n"
- "zip2 v16.4s, v20.4s, v17.4s\n"
- "str q16, [%x[out_ptr], #0x30]\n"
- "zip1 v16.4s, v22.4s, v21.4s\n"
- "str q16, [%x[out_ptr], #0x40]\n"
- "zip1 v16.4s, v19.4s, v18.4s\n"
- "zip2 v17.4s, v22.4s, v21.4s\n"
- "str q16, [%x[out_ptr], #0x50]\n"
- "zip2 v16.4s, v19.4s, v18.4s\n"
+ "zip2 v19.4s, v27.4s, v19.4s\n"
+ "prfm pldl1keep, [x20, #0x70]\n"
+ "zip2 v16.4s, v26.4s, v24.4s\n"
+ "str q23, [%x[out_ptr], #0x0]\n"
+ "zip1 v18.4s, v19.4s, v16.4s\n"
+ "str q17, [%x[out_ptr], #0x10]\n"
+ "zip2 v17.4s, v28.4s, v25.4s\n"
+ "str q22, [%x[out_ptr], #0x20]\n"
+ "zip2 v16.4s, v19.4s, v16.4s\n"
+ "str q20, [%x[out_ptr], #0x30]\n"
+ "str q21, [%x[out_ptr], #0x40]\n"
+ "str q18, [%x[out_ptr], #0x50]\n"
"str q17, [%x[out_ptr], #0x60]\n"
"str q16, [%x[out_ptr], #0x70]\n"
"add %x[out_ptr], %x[out_ptr], #0x80\n"
@@ -126,69 +126,69 @@ void interleave_block<8, 1, VLType::None, false>(
"3:" // Main loop skip
"cbz %x[width], 6f\n"
"tbz %x[width], #1, 4f\n"
- "ldr d28, [x28], #0x8\n"
- "ldr d27, [x27], #0x8\n"
- "mov x20, #0x2\n"
- "ldr d22, [x26], #0x8\n"
- "ldr d21, [x25], #0x8\n"
- "ldr d24, [x24], #0x8\n"
- "ldr d23, [x23], #0x8\n"
- "ldr d19, [x22], #0x8\n"
- "ldr d18, [x21], #0x8\n"
+ "ldr d28, [x27], #0x8\n"
+ "ldr d29, [x26], #0x8\n"
+ "mov x19, #0x2\n"
+ "ldr d25, [x25], #0x8\n"
+ "ldr d21, [x24], #0x8\n"
+ "ldr d27, [x23], #0x8\n"
+ "ldr d26, [x22], #0x8\n"
+ "ldr d19, [x21], #0x8\n"
+ "ldr d24, [x20], #0x8\n"
"tbz %x[width], #0, 5f\n"
- "ld1 { v28.s }[2], [x28]\n"
- "ld1 { v27.s }[2], [x27]\n"
- "mov x20, #0x3\n"
- "ld1 { v22.s }[2], [x26]\n"
- "ld1 { v21.s }[2], [x25]\n"
- "ld1 { v24.s }[2], [x24]\n"
- "ld1 { v23.s }[2], [x23]\n"
- "ld1 { v19.s }[2], [x22]\n"
- "ld1 { v18.s }[2], [x21]\n"
+ "ld1 { v28.s }[2], [x27]\n"
+ "mov x19, #0x3\n"
+ "ld1 { v29.s }[2], [x26]\n"
+ "ld1 { v25.s }[2], [x25]\n"
+ "ld1 { v21.s }[2], [x24]\n"
+ "ld1 { v27.s }[2], [x23]\n"
+ "ld1 { v26.s }[2], [x22]\n"
+ "ld1 { v19.s }[2], [x21]\n"
+ "ld1 { v24.s }[2], [x20]\n"
"b 5f\n"
"4:" // odd_loads_1_0
- "ldr s28, [x28, #0x0]\n"
- "ldr s27, [x27, #0x0]\n"
- "mov x20, #0x1\n"
- "ldr s22, [x26, #0x0]\n"
- "ldr s21, [x25, #0x0]\n"
- "ldr s24, [x24, #0x0]\n"
- "ldr s23, [x23, #0x0]\n"
- "ldr s19, [x22, #0x0]\n"
- "ldr s18, [x21, #0x0]\n"
+ "ldr s28, [x27, #0x0]\n"
+ "mov x19, #0x1\n"
+ "ldr s29, [x26, #0x0]\n"
+ "ldr s25, [x25, #0x0]\n"
+ "ldr s21, [x24, #0x0]\n"
+ "ldr s27, [x23, #0x0]\n"
+ "ldr s26, [x22, #0x0]\n"
+ "ldr s19, [x21, #0x0]\n"
+ "ldr s24, [x20, #0x0]\n"
"5:" // Odd load end
- "zip1 v26.4s, v28.4s, v22.4s\n"
- "zip1 v25.4s, v27.4s, v21.4s\n"
- "subs x20, x20, #0x1\n"
- "zip1 v20.4s, v24.4s, v19.4s\n"
- "zip1 v17.4s, v23.4s, v18.4s\n"
- "zip1 v16.4s, v26.4s, v25.4s\n"
- "str q16, [%x[out_ptr], #0x0]\n"
- "zip1 v16.4s, v20.4s, v17.4s\n"
- "str q16, [%x[out_ptr], #0x10]\n"
+ "zip1 v22.4s, v28.4s, v25.4s\n"
+ "subs x19, x19, #0x1\n"
+ "zip1 v20.4s, v29.4s, v21.4s\n"
+ "zip1 v23.4s, v22.4s, v20.4s\n"
+ "str q23, [%x[out_ptr], #0x0]\n"
+ "zip1 v18.4s, v27.4s, v19.4s\n"
+ "zip1 v16.4s, v26.4s, v24.4s\n"
+ "zip1 v17.4s, v18.4s, v16.4s\n"
+ "str q17, [%x[out_ptr], #0x10]\n"
"add %x[out_ptr], %x[out_ptr], #0x20\n"
"beq 6f\n"
- "subs x20, x20, #0x1\n"
- "zip2 v16.4s, v26.4s, v25.4s\n"
- "str q16, [%x[out_ptr], #0x0]\n"
- "zip2 v16.4s, v20.4s, v17.4s\n"
- "str q16, [%x[out_ptr], #0x10]\n"
+ "zip2 v22.4s, v22.4s, v20.4s\n"
+ "str q22, [%x[out_ptr], #0x0]\n"
+ "zip2 v20.4s, v18.4s, v16.4s\n"
+ "subs x19, x19, #0x1\n"
+ "str q20, [%x[out_ptr], #0x10]\n"
"add %x[out_ptr], %x[out_ptr], #0x20\n"
"beq 6f\n"
- "zip2 v22.4s, v28.4s, v22.4s\n"
- "zip2 v21.4s, v27.4s, v21.4s\n"
- "zip2 v19.4s, v24.4s, v19.4s\n"
- "zip2 v18.4s, v23.4s, v18.4s\n"
- "zip1 v16.4s, v22.4s, v21.4s\n"
- "str q16, [%x[out_ptr], #0x0]\n"
- "zip1 v16.4s, v19.4s, v18.4s\n"
- "str q16, [%x[out_ptr], #0x10]\n"
+ "zip2 v28.4s, v28.4s, v25.4s\n"
+ "zip2 v25.4s, v29.4s, v21.4s\n"
+ "zip1 v21.4s, v28.4s, v25.4s\n"
+ "str q21, [%x[out_ptr], #0x0]\n"
+ "zip2 v19.4s, v27.4s, v19.4s\n"
+ "zip2 v16.4s, v26.4s, v24.4s\n"
+ "zip1 v18.4s, v19.4s, v16.4s\n"
+ "str q18, [%x[out_ptr], #0x10]\n"
"add %x[out_ptr], %x[out_ptr], #0x20\n"
"6:" // Odds skip
: [out_ptr] "+&r" (out_ptr), [width] "+&r" (width)
: [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset)
- : "cc", "memory", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_s16_s16.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_s16_s16.hpp
index 767d468ad1..b0523b96ce 100644
--- a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_s16_s16.hpp
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_s16_s16.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef __aarch64__
@@ -31,45 +31,44 @@ void interleave_block<8, 1, VLType::None, false>(
)
{
__asm__ __volatile__(
- "ldr x28, [%x[in], #0x0]\n"
- "ldr x27, [%x[in], #0x8]\n"
+ "ldr x27, [%x[in], #0x0]\n"
"cmp %x[height], #0x8\n"
- "add x28, x28, %x[row_offset], LSL #1\n"
- "ldr x26, [%x[in], #0x10]\n"
- "ldr x25, [%x[in], #0x18]\n"
+ "ldr x26, [%x[in], #0x8]\n"
"add x27, x27, %x[row_offset], LSL #1\n"
+ "ldr x25, [%x[in], #0x10]\n"
+ "ldr x24, [%x[in], #0x18]\n"
"add x26, x26, %x[row_offset], LSL #1\n"
- "ldr x24, [%x[in], #0x20]\n"
- "ldr x23, [%x[in], #0x28]\n"
+ "ldr x23, [%x[in], #0x20]\n"
"add x25, x25, %x[row_offset], LSL #1\n"
+ "ldr x22, [%x[in], #0x28]\n"
+ "ldr x21, [%x[in], #0x30]\n"
"add x24, x24, %x[row_offset], LSL #1\n"
- "ldr x22, [%x[in], #0x30]\n"
- "ldr x21, [%x[in], #0x38]\n"
+ "ldr x20, [%x[in], #0x38]\n"
"add x23, x23, %x[row_offset], LSL #1\n"
"add x22, x22, %x[row_offset], LSL #1\n"
"add x21, x21, %x[row_offset], LSL #1\n"
+ "add x20, x20, %x[row_offset], LSL #1\n"
"beq 1f\n"
+ "mov x20, x27\n"
"cmp %x[height], #0x2\n"
- "csel x27, x27, x28, GE\n"
- "csel x26, x26, x28, GT\n"
+ "csel x26, x26, x27, GE\n"
+ "csel x25, x25, x27, GT\n"
"cmp %x[height], #0x4\n"
- "csel x25, x25, x28, GE\n"
- "csel x24, x24, x28, GT\n"
+ "csel x24, x24, x27, GE\n"
+ "csel x23, x23, x27, GT\n"
"cmp %x[height], #0x6\n"
- "mov x21, x28\n"
- "csel x23, x23, x28, GE\n"
- "csel x22, x22, x28, GT\n"
+ "csel x22, x22, x27, GE\n"
+ "csel x21, x21, x27, GT\n"
"1:" // no_pointer_adj
- "cmp %x[width], #0x8\n"
- "prfm pldl1keep, [x28, #0x0]\n"
"prfm pldl1keep, [x27, #0x0]\n"
+ "cmp %x[width], #0x8\n"
"prfm pldl1keep, [x26, #0x0]\n"
"prfm pldl1keep, [x25, #0x0]\n"
"prfm pldl1keep, [x24, #0x0]\n"
"prfm pldl1keep, [x23, #0x0]\n"
"prfm pldl1keep, [x22, #0x0]\n"
"prfm pldl1keep, [x21, #0x0]\n"
- "prfm pldl1keep, [x28, #0x40]\n"
+ "prfm pldl1keep, [x20, #0x0]\n"
"prfm pldl1keep, [x27, #0x40]\n"
"prfm pldl1keep, [x26, #0x40]\n"
"prfm pldl1keep, [x25, #0x40]\n"
@@ -77,192 +76,193 @@ void interleave_block<8, 1, VLType::None, false>(
"prfm pldl1keep, [x23, #0x40]\n"
"prfm pldl1keep, [x22, #0x40]\n"
"prfm pldl1keep, [x21, #0x40]\n"
+ "prfm pldl1keep, [x20, #0x40]\n"
"blt 3f\n"
"2:" // Main loop head
- "ldr q25, [x28], #0x10\n"
"ldr q30, [x27], #0x10\n"
"subs %x[width], %x[width], #0x8\n"
- "cmp %x[width], #0x8\n"
"ldr q29, [x26], #0x10\n"
+ "cmp %x[width], #0x8\n"
"ldr q28, [x25], #0x10\n"
- "ldr q21, [x24], #0x10\n"
- "ldr q27, [x23], #0x10\n"
- "zip1 v23.8h, v25.8h, v21.8h\n"
- "zip1 v26.8h, v30.8h, v27.8h\n"
- "ldr q20, [x22], #0x10\n"
- "ldr q22, [x21], #0x10\n"
- "zip1 v19.8h, v29.8h, v20.8h\n"
- "zip1 v18.8h, v28.8h, v22.8h\n"
- "zip2 v25.8h, v25.8h, v21.8h\n"
- "zip2 v21.8h, v29.8h, v20.8h\n"
- "prfm pldl1keep, [x28, #0x70]\n"
+ "ldr q27, [x24], #0x10\n"
+ "ldr q25, [x23], #0x10\n"
+ "zip1 v26.8h, v30.8h, v25.8h\n"
+ "ldr q21, [x22], #0x10\n"
+ "zip2 v25.8h, v30.8h, v25.8h\n"
+ "ldr q24, [x21], #0x10\n"
+ "ldr q23, [x20], #0x10\n"
+ "zip1 v22.8h, v29.8h, v21.8h\n"
"prfm pldl1keep, [x27, #0x70]\n"
- "zip2 v20.8h, v30.8h, v27.8h\n"
- "zip2 v16.8h, v28.8h, v22.8h\n"
+ "zip2 v21.8h, v29.8h, v21.8h\n"
"prfm pldl1keep, [x26, #0x70]\n"
+ "zip1 v20.8h, v28.8h, v24.8h\n"
"prfm pldl1keep, [x25, #0x70]\n"
- "zip1 v24.8h, v23.8h, v19.8h\n"
- "zip1 v17.8h, v26.8h, v18.8h\n"
+ "zip1 v18.8h, v26.8h, v20.8h\n"
"prfm pldl1keep, [x24, #0x70]\n"
+ "zip1 v19.8h, v27.8h, v23.8h\n"
"prfm pldl1keep, [x23, #0x70]\n"
- "zip2 v23.8h, v23.8h, v19.8h\n"
- "zip2 v19.8h, v26.8h, v18.8h\n"
+ "zip1 v16.8h, v22.8h, v19.8h\n"
"prfm pldl1keep, [x22, #0x70]\n"
+ "zip1 v17.8h, v18.8h, v16.8h\n"
"prfm pldl1keep, [x21, #0x70]\n"
- "zip1 v22.8h, v25.8h, v21.8h\n"
- "zip1 v18.8h, v20.8h, v16.8h\n"
- "zip2 v21.8h, v25.8h, v21.8h\n"
- "zip2 v20.8h, v20.8h, v16.8h\n"
- "zip1 v16.8h, v24.8h, v17.8h\n"
- "str q16, [%x[out_ptr], #0x0]\n"
- "zip2 v16.8h, v24.8h, v17.8h\n"
+ "zip2 v16.8h, v18.8h, v16.8h\n"
+ "prfm pldl1keep, [x20, #0x70]\n"
+ "zip2 v18.8h, v26.8h, v20.8h\n"
+ "str q17, [%x[out_ptr], #0x0]\n"
+ "zip2 v17.8h, v22.8h, v19.8h\n"
"str q16, [%x[out_ptr], #0x10]\n"
- "zip1 v17.8h, v23.8h, v19.8h\n"
- "zip2 v16.8h, v23.8h, v19.8h\n"
- "str q17, [%x[out_ptr], #0x20]\n"
- "zip1 v19.8h, v22.8h, v18.8h\n"
- "zip2 v18.8h, v22.8h, v18.8h\n"
+ "zip1 v16.8h, v18.8h, v17.8h\n"
+ "str q16, [%x[out_ptr], #0x20]\n"
+ "zip2 v16.8h, v18.8h, v17.8h\n"
"str q16, [%x[out_ptr], #0x30]\n"
- "zip1 v17.8h, v21.8h, v20.8h\n"
- "zip2 v16.8h, v21.8h, v20.8h\n"
- "str q19, [%x[out_ptr], #0x40]\n"
- "str q18, [%x[out_ptr], #0x50]\n"
- "str q17, [%x[out_ptr], #0x60]\n"
+ "zip2 v20.8h, v28.8h, v24.8h\n"
+ "zip1 v18.8h, v25.8h, v20.8h\n"
+ "zip2 v19.8h, v27.8h, v23.8h\n"
+ "zip1 v17.8h, v21.8h, v19.8h\n"
+ "zip1 v16.8h, v18.8h, v17.8h\n"
+ "str q16, [%x[out_ptr], #0x40]\n"
+ "zip2 v16.8h, v18.8h, v17.8h\n"
+ "str q16, [%x[out_ptr], #0x50]\n"
+ "zip2 v18.8h, v25.8h, v20.8h\n"
+ "zip2 v17.8h, v21.8h, v19.8h\n"
+ "zip1 v16.8h, v18.8h, v17.8h\n"
+ "str q16, [%x[out_ptr], #0x60]\n"
+ "zip2 v16.8h, v18.8h, v17.8h\n"
"str q16, [%x[out_ptr], #0x70]\n"
"add %x[out_ptr], %x[out_ptr], #0x80\n"
"bge 2b\n"
"3:" // Main loop skip
"cbz %x[width], 8f\n"
"tbz %x[width], #2, 5f\n"
- "ldr d25, [x28], #0x8\n"
"ldr d30, [x27], #0x8\n"
"ldr d29, [x26], #0x8\n"
"ldr d28, [x25], #0x8\n"
- "ldr d21, [x24], #0x8\n"
- "ldr d27, [x23], #0x8\n"
- "ldr d20, [x22], #0x8\n"
- "ldr d22, [x21], #0x8\n"
+ "ldr d27, [x24], #0x8\n"
+ "ldr d25, [x23], #0x8\n"
+ "ldr d21, [x22], #0x8\n"
+ "ldr d24, [x21], #0x8\n"
+ "ldr d23, [x20], #0x8\n"
"tbz %x[width], #1, 4f\n"
- "ld1 { v25.s }[2], [x28], #0x4\n"
"ld1 { v30.s }[2], [x27], #0x4\n"
- "mov x20, #0x6\n"
+ "mov x19, #0x6\n"
"ld1 { v29.s }[2], [x26], #0x4\n"
"ld1 { v28.s }[2], [x25], #0x4\n"
- "ld1 { v21.s }[2], [x24], #0x4\n"
- "ld1 { v27.s }[2], [x23], #0x4\n"
- "ld1 { v20.s }[2], [x22], #0x4\n"
- "ld1 { v22.s }[2], [x21], #0x4\n"
+ "ld1 { v27.s }[2], [x24], #0x4\n"
+ "ld1 { v25.s }[2], [x23], #0x4\n"
+ "ld1 { v21.s }[2], [x22], #0x4\n"
+ "ld1 { v24.s }[2], [x21], #0x4\n"
+ "ld1 { v23.s }[2], [x20], #0x4\n"
"tbz %x[width], #0, 7f\n"
- "ld1 { v25.h }[6], [x28]\n"
"ld1 { v30.h }[6], [x27]\n"
- "mov x20, #0x7\n"
+ "mov x19, #0x7\n"
"ld1 { v29.h }[6], [x26]\n"
"ld1 { v28.h }[6], [x25]\n"
- "ld1 { v21.h }[6], [x24]\n"
- "ld1 { v27.h }[6], [x23]\n"
- "ld1 { v20.h }[6], [x22]\n"
- "ld1 { v22.h }[6], [x21]\n"
+ "ld1 { v27.h }[6], [x24]\n"
+ "ld1 { v25.h }[6], [x23]\n"
+ "ld1 { v21.h }[6], [x22]\n"
+ "ld1 { v24.h }[6], [x21]\n"
+ "ld1 { v23.h }[6], [x20]\n"
"b 7f\n"
"4:" // odd_loads_1_4
- "mov x20, #0x4\n"
+ "mov x19, #0x4\n"
"tbz %x[width], #0, 7f\n"
- "ld1 { v25.h }[4], [x28]\n"
"ld1 { v30.h }[4], [x27]\n"
- "mov x20, #0x5\n"
"ld1 { v29.h }[4], [x26]\n"
+ "mov x19, #0x5\n"
"ld1 { v28.h }[4], [x25]\n"
- "ld1 { v21.h }[4], [x24]\n"
- "ld1 { v27.h }[4], [x23]\n"
- "ld1 { v20.h }[4], [x22]\n"
- "ld1 { v22.h }[4], [x21]\n"
+ "ld1 { v27.h }[4], [x24]\n"
+ "ld1 { v25.h }[4], [x23]\n"
+ "ld1 { v21.h }[4], [x22]\n"
+ "ld1 { v24.h }[4], [x21]\n"
+ "ld1 { v23.h }[4], [x20]\n"
"b 7f\n"
"5:" // odd_loads_2_0
"tbz %x[width], #1, 6f\n"
- "ldr s25, [x28], #0x4\n"
"ldr s30, [x27], #0x4\n"
- "mov x20, #0x2\n"
"ldr s29, [x26], #0x4\n"
+ "mov x19, #0x2\n"
"ldr s28, [x25], #0x4\n"
- "ldr s21, [x24], #0x4\n"
- "ldr s27, [x23], #0x4\n"
- "ldr s20, [x22], #0x4\n"
- "ldr s22, [x21], #0x4\n"
+ "ldr s27, [x24], #0x4\n"
+ "ldr s25, [x23], #0x4\n"
+ "ldr s21, [x22], #0x4\n"
+ "ldr s24, [x21], #0x4\n"
+ "ldr s23, [x20], #0x4\n"
"tbz %x[width], #0, 7f\n"
- "ld1 { v25.h }[2], [x28]\n"
"ld1 { v30.h }[2], [x27]\n"
- "mov x20, #0x3\n"
+ "mov x19, #0x3\n"
"ld1 { v29.h }[2], [x26]\n"
"ld1 { v28.h }[2], [x25]\n"
- "ld1 { v21.h }[2], [x24]\n"
- "ld1 { v27.h }[2], [x23]\n"
- "ld1 { v20.h }[2], [x22]\n"
- "ld1 { v22.h }[2], [x21]\n"
+ "ld1 { v27.h }[2], [x24]\n"
+ "ld1 { v25.h }[2], [x23]\n"
+ "ld1 { v21.h }[2], [x22]\n"
+ "ld1 { v24.h }[2], [x21]\n"
+ "ld1 { v23.h }[2], [x20]\n"
"b 7f\n"
"6:" // odd_loads_1_0
- "ldr h25, [x28, #0x0]\n"
"ldr h30, [x27, #0x0]\n"
- "mov x20, #0x1\n"
+ "mov x19, #0x1\n"
"ldr h29, [x26, #0x0]\n"
"ldr h28, [x25, #0x0]\n"
- "ldr h21, [x24, #0x0]\n"
- "ldr h27, [x23, #0x0]\n"
- "ldr h20, [x22, #0x0]\n"
- "ldr h22, [x21, #0x0]\n"
+ "ldr h27, [x24, #0x0]\n"
+ "ldr h25, [x23, #0x0]\n"
+ "ldr h21, [x22, #0x0]\n"
+ "ldr h24, [x21, #0x0]\n"
+ "ldr h23, [x20, #0x0]\n"
"7:" // Odd load end
- "zip1 v23.8h, v25.8h, v21.8h\n"
- "zip1 v19.8h, v29.8h, v20.8h\n"
- "subs x20, x20, #0x1\n"
- "zip1 v26.8h, v30.8h, v27.8h\n"
- "zip1 v18.8h, v28.8h, v22.8h\n"
- "zip1 v24.8h, v23.8h, v19.8h\n"
- "zip1 v17.8h, v26.8h, v18.8h\n"
- "zip1 v16.8h, v24.8h, v17.8h\n"
- "str q16, [%x[out_ptr], #0x0]\n"
+ "zip1 v26.8h, v30.8h, v25.8h\n"
+ "subs x19, x19, #0x1\n"
+ "zip1 v20.8h, v28.8h, v24.8h\n"
+ "zip1 v18.8h, v26.8h, v20.8h\n"
+ "zip1 v22.8h, v29.8h, v21.8h\n"
+ "zip1 v19.8h, v27.8h, v23.8h\n"
+ "zip1 v16.8h, v22.8h, v19.8h\n"
+ "zip1 v17.8h, v18.8h, v16.8h\n"
+ "str q17, [%x[out_ptr], #0x0]\n"
"add %x[out_ptr], %x[out_ptr], #0x10\n"
"beq 8f\n"
- "subs x20, x20, #0x1\n"
- "zip2 v16.8h, v24.8h, v17.8h\n"
+ "zip2 v16.8h, v18.8h, v16.8h\n"
"str q16, [%x[out_ptr], #0x0]\n"
+ "subs x19, x19, #0x1\n"
"add %x[out_ptr], %x[out_ptr], #0x10\n"
"beq 8f\n"
- "zip2 v23.8h, v23.8h, v19.8h\n"
- "zip2 v19.8h, v26.8h, v18.8h\n"
- "subs x20, x20, #0x1\n"
- "zip1 v17.8h, v23.8h, v19.8h\n"
- "str q17, [%x[out_ptr], #0x0]\n"
+ "zip2 v18.8h, v26.8h, v20.8h\n"
+ "zip2 v17.8h, v22.8h, v19.8h\n"
+ "subs x19, x19, #0x1\n"
+ "zip1 v16.8h, v18.8h, v17.8h\n"
+ "str q16, [%x[out_ptr], #0x0]\n"
"add %x[out_ptr], %x[out_ptr], #0x10\n"
"beq 8f\n"
- "subs x20, x20, #0x1\n"
- "zip2 v16.8h, v23.8h, v19.8h\n"
+ "zip2 v16.8h, v18.8h, v17.8h\n"
"str q16, [%x[out_ptr], #0x0]\n"
+ "subs x19, x19, #0x1\n"
"add %x[out_ptr], %x[out_ptr], #0x10\n"
"beq 8f\n"
- "zip2 v25.8h, v25.8h, v21.8h\n"
- "zip2 v21.8h, v29.8h, v20.8h\n"
- "subs x20, x20, #0x1\n"
- "zip2 v20.8h, v30.8h, v27.8h\n"
- "zip2 v16.8h, v28.8h, v22.8h\n"
- "zip1 v22.8h, v25.8h, v21.8h\n"
- "zip1 v18.8h, v20.8h, v16.8h\n"
- "zip1 v19.8h, v22.8h, v18.8h\n"
- "str q19, [%x[out_ptr], #0x0]\n"
+ "zip2 v25.8h, v30.8h, v25.8h\n"
+ "zip2 v20.8h, v28.8h, v24.8h\n"
+ "subs x19, x19, #0x1\n"
+ "zip1 v18.8h, v25.8h, v20.8h\n"
+ "zip2 v21.8h, v29.8h, v21.8h\n"
+ "zip2 v19.8h, v27.8h, v23.8h\n"
+ "zip1 v17.8h, v21.8h, v19.8h\n"
+ "zip1 v16.8h, v18.8h, v17.8h\n"
+ "str q16, [%x[out_ptr], #0x0]\n"
"add %x[out_ptr], %x[out_ptr], #0x10\n"
"beq 8f\n"
- "subs x20, x20, #0x1\n"
- "zip2 v18.8h, v22.8h, v18.8h\n"
- "str q18, [%x[out_ptr], #0x0]\n"
+ "zip2 v16.8h, v18.8h, v17.8h\n"
+ "str q16, [%x[out_ptr], #0x0]\n"
+ "subs x19, x19, #0x1\n"
"add %x[out_ptr], %x[out_ptr], #0x10\n"
"beq 8f\n"
- "zip2 v21.8h, v25.8h, v21.8h\n"
- "zip2 v20.8h, v20.8h, v16.8h\n"
- "zip1 v17.8h, v21.8h, v20.8h\n"
- "str q17, [%x[out_ptr], #0x0]\n"
+ "zip2 v18.8h, v25.8h, v20.8h\n"
+ "zip2 v17.8h, v21.8h, v19.8h\n"
+ "zip1 v16.8h, v18.8h, v17.8h\n"
+ "str q16, [%x[out_ptr], #0x0]\n"
"add %x[out_ptr], %x[out_ptr], #0x10\n"
"8:" // Odds skip
: [out_ptr] "+&r" (out_ptr), [width] "+&r" (width)
: [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset)
- : "cc", "memory", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_s16_s16_summing.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_s16_s16_summing.hpp
index a73792036a..292a38f401 100644
--- a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_s16_s16_summing.hpp
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_s16_s16_summing.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef __aarch64__
@@ -31,40 +31,39 @@ void interleave_block<8, 1, VLType::None, true>(
)
{
__asm__ __volatile__(
- "ldr x28, [%x[in], #0x0]\n"
- "ldr x27, [%x[in], #0x8]\n"
- "cmp %x[height], #0x8\n"
- "mov x20, #0x0\n"
- "ldr x26, [%x[in], #0x10]\n"
- "ldr x25, [%x[in], #0x18]\n"
- "movi v2.8h, #0x0\n"
- "movi v1.4s, #0x0\n"
- "ldr x24, [%x[in], #0x20]\n"
- "ldr x23, [%x[in], #0x28]\n"
+ "movi v1.8h, #0x0\n"
+ "ldr x27, [%x[in], #0x0]\n"
+ "mov x19, #0x0\n"
"movi v0.4s, #0x0\n"
- "add x28, x28, %x[row_offset], LSL #1\n"
- "ldr x22, [%x[in], #0x30]\n"
- "ldr x21, [%x[in], #0x38]\n"
+ "ldr x26, [%x[in], #0x8]\n"
+ "cmp %x[height], #0x8\n"
+ "movi v31.4s, #0x0\n"
+ "ldr x25, [%x[in], #0x10]\n"
"add x27, x27, %x[row_offset], LSL #1\n"
+ "ldr x24, [%x[in], #0x18]\n"
+ "ldr x23, [%x[in], #0x20]\n"
"add x26, x26, %x[row_offset], LSL #1\n"
+ "ldr x22, [%x[in], #0x28]\n"
"add x25, x25, %x[row_offset], LSL #1\n"
+ "ldr x21, [%x[in], #0x30]\n"
"add x24, x24, %x[row_offset], LSL #1\n"
+ "ldr x20, [%x[in], #0x38]\n"
"add x23, x23, %x[row_offset], LSL #1\n"
"add x22, x22, %x[row_offset], LSL #1\n"
"add x21, x21, %x[row_offset], LSL #1\n"
+ "add x20, x20, %x[row_offset], LSL #1\n"
"beq 1f\n"
+ "mov x20, x27\n"
"cmp %x[height], #0x2\n"
- "csel x27, x27, x28, GE\n"
- "csel x26, x26, x28, GT\n"
+ "csel x26, x26, x27, GE\n"
+ "csel x25, x25, x27, GT\n"
"cmp %x[height], #0x4\n"
- "csel x25, x25, x28, GE\n"
- "csel x24, x24, x28, GT\n"
+ "csel x24, x24, x27, GE\n"
+ "csel x23, x23, x27, GT\n"
"cmp %x[height], #0x6\n"
- "mov x21, x28\n"
- "csel x23, x23, x28, GE\n"
- "csel x22, x22, x28, GT\n"
+ "csel x22, x22, x27, GE\n"
+ "csel x21, x21, x27, GT\n"
"1:" // no_pointer_adj
- "prfm pldl1keep, [x28, #0x0]\n"
"prfm pldl1keep, [x27, #0x0]\n"
"prfm pldl1keep, [x26, #0x0]\n"
"prfm pldl1keep, [x25, #0x0]\n"
@@ -72,7 +71,7 @@ void interleave_block<8, 1, VLType::None, true>(
"prfm pldl1keep, [x23, #0x0]\n"
"prfm pldl1keep, [x22, #0x0]\n"
"prfm pldl1keep, [x21, #0x0]\n"
- "prfm pldl1keep, [x28, #0x40]\n"
+ "prfm pldl1keep, [x20, #0x0]\n"
"prfm pldl1keep, [x27, #0x40]\n"
"prfm pldl1keep, [x26, #0x40]\n"
"prfm pldl1keep, [x25, #0x40]\n"
@@ -80,225 +79,226 @@ void interleave_block<8, 1, VLType::None, true>(
"prfm pldl1keep, [x23, #0x40]\n"
"prfm pldl1keep, [x22, #0x40]\n"
"prfm pldl1keep, [x21, #0x40]\n"
+ "prfm pldl1keep, [x20, #0x40]\n"
"cbnz %w[first], 2f\n"
"sub %x[out_ptr], %x[out_ptr], #0x20\n"
- "ld1 { v1.4s }, [%x[out_ptr]]\n"
- "ldr q0, [%x[out_ptr], #0x10]\n"
+ "ld1 { v0.4s }, [%x[out_ptr]]\n"
+ "ldr q31, [%x[out_ptr], #0x10]\n"
"2:" // first_pass
"cmp %x[width], #0x8\n"
"blt 5f\n"
"3:" // Main loop head
- "cmp x20, #0xe\n"
+ "cmp x19, #0xe\n"
"ble 4f\n"
- "saddw v1.4s, v1.4s, v2.4h\n"
- "saddw2 v0.4s, v0.4s, v2.8h\n"
- "mov x20, #0x0\n"
- "movi v2.8h, #0x0\n"
+ "saddw v0.4s, v0.4s, v1.4h\n"
+ "saddw2 v31.4s, v31.4s, v1.8h\n"
+ "mov x19, #0x0\n"
+ "movi v1.8h, #0x0\n"
"4:" // no_accumulate_16
- "ldr q31, [x28], #0x10\n"
"ldr q30, [x27], #0x10\n"
- "subs %x[width], %x[width], #0x8\n"
- "cmp %x[width], #0x8\n"
+ "add x19, x19, #0x1\n"
"ldr q29, [x26], #0x10\n"
+ "subs %x[width], %x[width], #0x8\n"
"ldr q28, [x25], #0x10\n"
- "add x20, x20, #0x1\n"
+ "cmp %x[width], #0x8\n"
"ldr q27, [x24], #0x10\n"
- "ldr q26, [x23], #0x10\n"
- "zip1 v25.8h, v31.8h, v27.8h\n"
- "zip1 v22.8h, v30.8h, v26.8h\n"
- "ldr q24, [x22], #0x10\n"
- "ldr q23, [x21], #0x10\n"
- "zip1 v18.8h, v29.8h, v24.8h\n"
- "zip1 v21.8h, v28.8h, v23.8h\n"
- "zip1 v17.8h, v25.8h, v18.8h\n"
- "zip1 v16.8h, v22.8h, v21.8h\n"
- "prfm pldl1keep, [x28, #0x70]\n"
+ "ldr q25, [x23], #0x10\n"
+ "zip1 v26.8h, v30.8h, v25.8h\n"
+ "ldr q21, [x22], #0x10\n"
+ "zip2 v25.8h, v30.8h, v25.8h\n"
+ "ldr q24, [x21], #0x10\n"
+ "ldr q23, [x20], #0x10\n"
+ "zip1 v22.8h, v29.8h, v21.8h\n"
"prfm pldl1keep, [x27, #0x70]\n"
- "zip1 v20.8h, v17.8h, v16.8h\n"
- "add v2.8h, v2.8h, v20.8h\n"
+ "zip2 v21.8h, v29.8h, v21.8h\n"
"prfm pldl1keep, [x26, #0x70]\n"
+ "zip1 v20.8h, v28.8h, v24.8h\n"
"prfm pldl1keep, [x25, #0x70]\n"
- "zip2 v19.8h, v17.8h, v16.8h\n"
- "zip2 v18.8h, v25.8h, v18.8h\n"
+ "zip1 v18.8h, v26.8h, v20.8h\n"
"prfm pldl1keep, [x24, #0x70]\n"
+ "zip1 v19.8h, v27.8h, v23.8h\n"
"prfm pldl1keep, [x23, #0x70]\n"
- "zip2 v17.8h, v22.8h, v21.8h\n"
- "add v2.8h, v2.8h, v19.8h\n"
+ "zip1 v16.8h, v22.8h, v19.8h\n"
"prfm pldl1keep, [x22, #0x70]\n"
+ "zip1 v17.8h, v18.8h, v16.8h\n"
"prfm pldl1keep, [x21, #0x70]\n"
+ "add v1.8h, v1.8h, v17.8h\n"
+ "prfm pldl1keep, [x20, #0x70]\n"
+ "zip2 v16.8h, v18.8h, v16.8h\n"
+ "str q17, [%x[out_ptr], #0x0]\n"
+ "zip2 v18.8h, v26.8h, v20.8h\n"
+ "str q16, [%x[out_ptr], #0x10]\n"
+ "add v1.8h, v1.8h, v16.8h\n"
+ "zip2 v17.8h, v22.8h, v19.8h\n"
"zip1 v16.8h, v18.8h, v17.8h\n"
- "zip2 v22.8h, v31.8h, v27.8h\n"
- "str q20, [%x[out_ptr], #0x0]\n"
- "zip2 v21.8h, v29.8h, v24.8h\n"
- "zip2 v20.8h, v30.8h, v26.8h\n"
- "str q19, [%x[out_ptr], #0x10]\n"
- "zip2 v19.8h, v28.8h, v23.8h\n"
- "add v2.8h, v2.8h, v16.8h\n"
"str q16, [%x[out_ptr], #0x20]\n"
+ "add v1.8h, v1.8h, v16.8h\n"
"zip2 v16.8h, v18.8h, v17.8h\n"
- "zip1 v18.8h, v22.8h, v21.8h\n"
"str q16, [%x[out_ptr], #0x30]\n"
- "zip1 v17.8h, v20.8h, v19.8h\n"
- "add v2.8h, v2.8h, v16.8h\n"
+ "zip2 v20.8h, v28.8h, v24.8h\n"
+ "add v1.8h, v1.8h, v16.8h\n"
+ "zip1 v18.8h, v25.8h, v20.8h\n"
+ "zip2 v19.8h, v27.8h, v23.8h\n"
+ "zip1 v17.8h, v21.8h, v19.8h\n"
"zip1 v16.8h, v18.8h, v17.8h\n"
- "add v2.8h, v2.8h, v16.8h\n"
"str q16, [%x[out_ptr], #0x40]\n"
+ "add v1.8h, v1.8h, v16.8h\n"
"zip2 v16.8h, v18.8h, v17.8h\n"
- "zip2 v18.8h, v22.8h, v21.8h\n"
"str q16, [%x[out_ptr], #0x50]\n"
- "zip2 v17.8h, v20.8h, v19.8h\n"
- "add v2.8h, v2.8h, v16.8h\n"
+ "zip2 v18.8h, v25.8h, v20.8h\n"
+ "add v1.8h, v1.8h, v16.8h\n"
+ "zip2 v17.8h, v21.8h, v19.8h\n"
"zip1 v16.8h, v18.8h, v17.8h\n"
- "add v2.8h, v2.8h, v16.8h\n"
"str q16, [%x[out_ptr], #0x60]\n"
+ "add v1.8h, v1.8h, v16.8h\n"
"zip2 v16.8h, v18.8h, v17.8h\n"
"str q16, [%x[out_ptr], #0x70]\n"
- "add v2.8h, v2.8h, v16.8h\n"
"add %x[out_ptr], %x[out_ptr], #0x80\n"
+ "add v1.8h, v1.8h, v16.8h\n"
"bge 3b\n"
"5:" // Main loop skip
"cbz %x[width], 10f\n"
"tbz %x[width], #2, 7f\n"
- "ldr d31, [x28], #0x8\n"
"ldr d30, [x27], #0x8\n"
"ldr d29, [x26], #0x8\n"
"ldr d28, [x25], #0x8\n"
"ldr d27, [x24], #0x8\n"
- "ldr d26, [x23], #0x8\n"
- "ldr d24, [x22], #0x8\n"
- "ldr d23, [x21], #0x8\n"
+ "ldr d25, [x23], #0x8\n"
+ "ldr d21, [x22], #0x8\n"
+ "ldr d24, [x21], #0x8\n"
+ "ldr d23, [x20], #0x8\n"
"tbz %x[width], #1, 6f\n"
- "ld1 { v31.s }[2], [x28], #0x4\n"
"ld1 { v30.s }[2], [x27], #0x4\n"
- "mov x20, #0x6\n"
+ "mov x19, #0x6\n"
"ld1 { v29.s }[2], [x26], #0x4\n"
"ld1 { v28.s }[2], [x25], #0x4\n"
"ld1 { v27.s }[2], [x24], #0x4\n"
- "ld1 { v26.s }[2], [x23], #0x4\n"
- "ld1 { v24.s }[2], [x22], #0x4\n"
- "ld1 { v23.s }[2], [x21], #0x4\n"
+ "ld1 { v25.s }[2], [x23], #0x4\n"
+ "ld1 { v21.s }[2], [x22], #0x4\n"
+ "ld1 { v24.s }[2], [x21], #0x4\n"
+ "ld1 { v23.s }[2], [x20], #0x4\n"
"tbz %x[width], #0, 9f\n"
- "ld1 { v31.h }[6], [x28]\n"
"ld1 { v30.h }[6], [x27]\n"
- "mov x20, #0x7\n"
+ "mov x19, #0x7\n"
"ld1 { v29.h }[6], [x26]\n"
"ld1 { v28.h }[6], [x25]\n"
"ld1 { v27.h }[6], [x24]\n"
- "ld1 { v26.h }[6], [x23]\n"
- "ld1 { v24.h }[6], [x22]\n"
- "ld1 { v23.h }[6], [x21]\n"
+ "ld1 { v25.h }[6], [x23]\n"
+ "ld1 { v21.h }[6], [x22]\n"
+ "ld1 { v24.h }[6], [x21]\n"
+ "ld1 { v23.h }[6], [x20]\n"
"b 9f\n"
"6:" // odd_loads_1_4
- "mov x20, #0x4\n"
+ "mov x19, #0x4\n"
"tbz %x[width], #0, 9f\n"
- "ld1 { v31.h }[4], [x28]\n"
"ld1 { v30.h }[4], [x27]\n"
- "mov x20, #0x5\n"
"ld1 { v29.h }[4], [x26]\n"
+ "mov x19, #0x5\n"
"ld1 { v28.h }[4], [x25]\n"
"ld1 { v27.h }[4], [x24]\n"
- "ld1 { v26.h }[4], [x23]\n"
- "ld1 { v24.h }[4], [x22]\n"
- "ld1 { v23.h }[4], [x21]\n"
+ "ld1 { v25.h }[4], [x23]\n"
+ "ld1 { v21.h }[4], [x22]\n"
+ "ld1 { v24.h }[4], [x21]\n"
+ "ld1 { v23.h }[4], [x20]\n"
"b 9f\n"
"7:" // odd_loads_2_0
"tbz %x[width], #1, 8f\n"
- "ldr s31, [x28], #0x4\n"
"ldr s30, [x27], #0x4\n"
- "mov x20, #0x2\n"
"ldr s29, [x26], #0x4\n"
+ "mov x19, #0x2\n"
"ldr s28, [x25], #0x4\n"
"ldr s27, [x24], #0x4\n"
- "ldr s26, [x23], #0x4\n"
- "ldr s24, [x22], #0x4\n"
- "ldr s23, [x21], #0x4\n"
+ "ldr s25, [x23], #0x4\n"
+ "ldr s21, [x22], #0x4\n"
+ "ldr s24, [x21], #0x4\n"
+ "ldr s23, [x20], #0x4\n"
"tbz %x[width], #0, 9f\n"
- "ld1 { v31.h }[2], [x28]\n"
"ld1 { v30.h }[2], [x27]\n"
- "mov x20, #0x3\n"
+ "mov x19, #0x3\n"
"ld1 { v29.h }[2], [x26]\n"
"ld1 { v28.h }[2], [x25]\n"
"ld1 { v27.h }[2], [x24]\n"
- "ld1 { v26.h }[2], [x23]\n"
- "ld1 { v24.h }[2], [x22]\n"
- "ld1 { v23.h }[2], [x21]\n"
+ "ld1 { v25.h }[2], [x23]\n"
+ "ld1 { v21.h }[2], [x22]\n"
+ "ld1 { v24.h }[2], [x21]\n"
+ "ld1 { v23.h }[2], [x20]\n"
"b 9f\n"
"8:" // odd_loads_1_0
- "ldr h31, [x28, #0x0]\n"
"ldr h30, [x27, #0x0]\n"
- "mov x20, #0x1\n"
+ "mov x19, #0x1\n"
"ldr h29, [x26, #0x0]\n"
"ldr h28, [x25, #0x0]\n"
"ldr h27, [x24, #0x0]\n"
- "ldr h26, [x23, #0x0]\n"
- "ldr h24, [x22, #0x0]\n"
- "ldr h23, [x21, #0x0]\n"
+ "ldr h25, [x23, #0x0]\n"
+ "ldr h21, [x22, #0x0]\n"
+ "ldr h24, [x21, #0x0]\n"
+ "ldr h23, [x20, #0x0]\n"
"9:" // Odd load end
- "zip1 v25.8h, v31.8h, v27.8h\n"
- "zip1 v18.8h, v29.8h, v24.8h\n"
- "subs x20, x20, #0x1\n"
- "zip1 v22.8h, v30.8h, v26.8h\n"
- "zip1 v21.8h, v28.8h, v23.8h\n"
- "zip1 v17.8h, v25.8h, v18.8h\n"
- "zip1 v16.8h, v22.8h, v21.8h\n"
- "zip1 v20.8h, v17.8h, v16.8h\n"
- "str q20, [%x[out_ptr], #0x0]\n"
- "add v2.8h, v2.8h, v20.8h\n"
+ "zip1 v26.8h, v30.8h, v25.8h\n"
+ "subs x19, x19, #0x1\n"
+ "zip1 v20.8h, v28.8h, v24.8h\n"
+ "zip1 v18.8h, v26.8h, v20.8h\n"
+ "zip1 v22.8h, v29.8h, v21.8h\n"
+ "zip1 v19.8h, v27.8h, v23.8h\n"
+ "zip1 v16.8h, v22.8h, v19.8h\n"
+ "zip1 v17.8h, v18.8h, v16.8h\n"
+ "str q17, [%x[out_ptr], #0x0]\n"
"add %x[out_ptr], %x[out_ptr], #0x10\n"
+ "add v1.8h, v1.8h, v17.8h\n"
"beq 10f\n"
- "zip2 v19.8h, v17.8h, v16.8h\n"
- "subs x20, x20, #0x1\n"
- "str q19, [%x[out_ptr], #0x0]\n"
- "add v2.8h, v2.8h, v19.8h\n"
+ "zip2 v16.8h, v18.8h, v16.8h\n"
+ "str q16, [%x[out_ptr], #0x0]\n"
+ "subs x19, x19, #0x1\n"
+ "add v1.8h, v1.8h, v16.8h\n"
"add %x[out_ptr], %x[out_ptr], #0x10\n"
"beq 10f\n"
- "zip2 v18.8h, v25.8h, v18.8h\n"
- "zip2 v17.8h, v22.8h, v21.8h\n"
- "subs x20, x20, #0x1\n"
+ "zip2 v18.8h, v26.8h, v20.8h\n"
+ "zip2 v17.8h, v22.8h, v19.8h\n"
+ "subs x19, x19, #0x1\n"
"zip1 v16.8h, v18.8h, v17.8h\n"
"str q16, [%x[out_ptr], #0x0]\n"
- "add v2.8h, v2.8h, v16.8h\n"
"add %x[out_ptr], %x[out_ptr], #0x10\n"
+ "add v1.8h, v1.8h, v16.8h\n"
"beq 10f\n"
"zip2 v16.8h, v18.8h, v17.8h\n"
- "subs x20, x20, #0x1\n"
"str q16, [%x[out_ptr], #0x0]\n"
- "add v2.8h, v2.8h, v16.8h\n"
+ "subs x19, x19, #0x1\n"
+ "add v1.8h, v1.8h, v16.8h\n"
"add %x[out_ptr], %x[out_ptr], #0x10\n"
"beq 10f\n"
- "zip2 v22.8h, v31.8h, v27.8h\n"
- "zip2 v21.8h, v29.8h, v24.8h\n"
- "subs x20, x20, #0x1\n"
- "zip2 v20.8h, v30.8h, v26.8h\n"
- "zip2 v19.8h, v28.8h, v23.8h\n"
- "zip1 v18.8h, v22.8h, v21.8h\n"
- "zip1 v17.8h, v20.8h, v19.8h\n"
+ "zip2 v25.8h, v30.8h, v25.8h\n"
+ "zip2 v20.8h, v28.8h, v24.8h\n"
+ "subs x19, x19, #0x1\n"
+ "zip1 v18.8h, v25.8h, v20.8h\n"
+ "zip2 v21.8h, v29.8h, v21.8h\n"
+ "zip2 v19.8h, v27.8h, v23.8h\n"
+ "zip1 v17.8h, v21.8h, v19.8h\n"
"zip1 v16.8h, v18.8h, v17.8h\n"
"str q16, [%x[out_ptr], #0x0]\n"
- "add v2.8h, v2.8h, v16.8h\n"
"add %x[out_ptr], %x[out_ptr], #0x10\n"
+ "add v1.8h, v1.8h, v16.8h\n"
"beq 10f\n"
"zip2 v16.8h, v18.8h, v17.8h\n"
- "subs x20, x20, #0x1\n"
"str q16, [%x[out_ptr], #0x0]\n"
- "add v2.8h, v2.8h, v16.8h\n"
+ "subs x19, x19, #0x1\n"
+ "add v1.8h, v1.8h, v16.8h\n"
"add %x[out_ptr], %x[out_ptr], #0x10\n"
"beq 10f\n"
- "zip2 v18.8h, v22.8h, v21.8h\n"
- "zip2 v17.8h, v20.8h, v19.8h\n"
+ "zip2 v18.8h, v25.8h, v20.8h\n"
+ "zip2 v17.8h, v21.8h, v19.8h\n"
"zip1 v16.8h, v18.8h, v17.8h\n"
"str q16, [%x[out_ptr], #0x0]\n"
- "add v2.8h, v2.8h, v16.8h\n"
"add %x[out_ptr], %x[out_ptr], #0x10\n"
+ "add v1.8h, v1.8h, v16.8h\n"
"10:" // Odds skip
- "saddw v1.4s, v1.4s, v2.4h\n"
- "saddw2 v0.4s, v0.4s, v2.8h\n"
- "str q1, [%x[out_ptr], #0x0]\n"
- "str q0, [%x[out_ptr], #0x10]\n"
+ "saddw v0.4s, v0.4s, v1.4h\n"
+ "str q0, [%x[out_ptr], #0x0]\n"
+ "saddw2 v31.4s, v31.4s, v1.8h\n"
+ "str q31, [%x[out_ptr], #0x10]\n"
"add %x[out_ptr], %x[out_ptr], #0x20\n"
: [out_ptr] "+&r" (out_ptr), [width] "+&r" (width)
: [first] "r" (first), [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset)
- : "cc", "memory", "v0", "v1", "v2", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_s8_s16.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_s8_s16.hpp
index 4a38187638..6cfed8f3a4 100644
--- a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_s8_s16.hpp
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_s8_s16.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef __aarch64__
@@ -31,45 +31,44 @@ void interleave_block<8, 1, VLType::None, false>(
)
{
__asm__ __volatile__(
- "ldr x28, [%x[in], #0x0]\n"
- "ldr x27, [%x[in], #0x8]\n"
+ "ldr x27, [%x[in], #0x0]\n"
"cmp %x[height], #0x8\n"
- "add x28, x28, %x[row_offset]\n"
- "ldr x26, [%x[in], #0x10]\n"
- "ldr x25, [%x[in], #0x18]\n"
+ "ldr x26, [%x[in], #0x8]\n"
"add x27, x27, %x[row_offset]\n"
+ "ldr x25, [%x[in], #0x10]\n"
+ "ldr x24, [%x[in], #0x18]\n"
"add x26, x26, %x[row_offset]\n"
- "ldr x24, [%x[in], #0x20]\n"
- "ldr x23, [%x[in], #0x28]\n"
+ "ldr x23, [%x[in], #0x20]\n"
"add x25, x25, %x[row_offset]\n"
+ "ldr x22, [%x[in], #0x28]\n"
+ "ldr x21, [%x[in], #0x30]\n"
"add x24, x24, %x[row_offset]\n"
- "ldr x22, [%x[in], #0x30]\n"
- "ldr x21, [%x[in], #0x38]\n"
+ "ldr x20, [%x[in], #0x38]\n"
"add x23, x23, %x[row_offset]\n"
"add x22, x22, %x[row_offset]\n"
"add x21, x21, %x[row_offset]\n"
+ "add x20, x20, %x[row_offset]\n"
"beq 1f\n"
+ "mov x20, x27\n"
"cmp %x[height], #0x2\n"
- "csel x27, x27, x28, GE\n"
- "csel x26, x26, x28, GT\n"
+ "csel x26, x26, x27, GE\n"
+ "csel x25, x25, x27, GT\n"
"cmp %x[height], #0x4\n"
- "csel x25, x25, x28, GE\n"
- "csel x24, x24, x28, GT\n"
+ "csel x24, x24, x27, GE\n"
+ "csel x23, x23, x27, GT\n"
"cmp %x[height], #0x6\n"
- "mov x21, x28\n"
- "csel x23, x23, x28, GE\n"
- "csel x22, x22, x28, GT\n"
+ "csel x22, x22, x27, GE\n"
+ "csel x21, x21, x27, GT\n"
"1:" // no_pointer_adj
- "cmp %x[width], #0x8\n"
- "prfm pldl1keep, [x28, #0x0]\n"
"prfm pldl1keep, [x27, #0x0]\n"
+ "cmp %x[width], #0x8\n"
"prfm pldl1keep, [x26, #0x0]\n"
"prfm pldl1keep, [x25, #0x0]\n"
"prfm pldl1keep, [x24, #0x0]\n"
"prfm pldl1keep, [x23, #0x0]\n"
"prfm pldl1keep, [x22, #0x0]\n"
"prfm pldl1keep, [x21, #0x0]\n"
- "prfm pldl1keep, [x28, #0x40]\n"
+ "prfm pldl1keep, [x20, #0x0]\n"
"prfm pldl1keep, [x27, #0x40]\n"
"prfm pldl1keep, [x26, #0x40]\n"
"prfm pldl1keep, [x25, #0x40]\n"
@@ -77,208 +76,209 @@ void interleave_block<8, 1, VLType::None, false>(
"prfm pldl1keep, [x23, #0x40]\n"
"prfm pldl1keep, [x22, #0x40]\n"
"prfm pldl1keep, [x21, #0x40]\n"
+ "prfm pldl1keep, [x20, #0x40]\n"
"blt 3f\n"
"2:" // Main loop head
- "ldr d25, [x28], #0x8\n"
- "ldr d30, [x27], #0x8\n"
- "sshll v25.8h, v25.8b, #0x0\n"
+ "ldr d31, [x27], #0x8\n"
+ "sshll v31.8h, v31.8b, #0x0\n"
+ "ldr d30, [x26], #0x8\n"
+ "subs %x[width], %x[width], #0x8\n"
"sshll v30.8h, v30.8b, #0x0\n"
- "ldr d29, [x26], #0x8\n"
- "ldr d28, [x25], #0x8\n"
+ "ldr d29, [x25], #0x8\n"
+ "cmp %x[width], #0x8\n"
"sshll v29.8h, v29.8b, #0x0\n"
+ "ldr d28, [x24], #0x8\n"
+ "ldr d25, [x23], #0x8\n"
"sshll v28.8h, v28.8b, #0x0\n"
- "ldr d21, [x24], #0x8\n"
- "ldr d27, [x23], #0x8\n"
- "sshll v21.8h, v21.8b, #0x0\n"
- "sshll v27.8h, v27.8b, #0x0\n"
- "ldr d20, [x22], #0x8\n"
- "ldr d26, [x21], #0x8\n"
- "sshll v20.8h, v20.8b, #0x0\n"
- "sshll v26.8h, v26.8b, #0x0\n"
- "zip1 v23.8h, v25.8h, v21.8h\n"
- "zip1 v22.8h, v29.8h, v20.8h\n"
- "subs %x[width], %x[width], #0x8\n"
- "cmp %x[width], #0x8\n"
- "zip1 v19.8h, v30.8h, v27.8h\n"
- "zip1 v18.8h, v28.8h, v26.8h\n"
- "prfm pldl1keep, [x28, #0x70]\n"
+ "ldr d23, [x22], #0x8\n"
+ "sshll v25.8h, v25.8b, #0x0\n"
+ "ldr d27, [x21], #0x8\n"
+ "zip1 v20.8h, v31.8h, v25.8h\n"
+ "ldr d26, [x20], #0x8\n"
+ "zip2 v25.8h, v31.8h, v25.8h\n"
"prfm pldl1keep, [x27, #0x70]\n"
- "zip2 v25.8h, v25.8h, v21.8h\n"
- "zip2 v21.8h, v29.8h, v20.8h\n"
"prfm pldl1keep, [x26, #0x70]\n"
+ "sshll v23.8h, v23.8b, #0x0\n"
"prfm pldl1keep, [x25, #0x70]\n"
- "zip2 v20.8h, v30.8h, v27.8h\n"
- "zip2 v16.8h, v28.8h, v26.8h\n"
+ "zip1 v24.8h, v30.8h, v23.8h\n"
+ "zip2 v23.8h, v30.8h, v23.8h\n"
"prfm pldl1keep, [x24, #0x70]\n"
+ "sshll v27.8h, v27.8b, #0x0\n"
"prfm pldl1keep, [x23, #0x70]\n"
- "zip1 v24.8h, v23.8h, v22.8h\n"
- "zip1 v17.8h, v19.8h, v18.8h\n"
+ "zip1 v19.8h, v29.8h, v27.8h\n"
"prfm pldl1keep, [x22, #0x70]\n"
+ "zip1 v22.8h, v20.8h, v19.8h\n"
"prfm pldl1keep, [x21, #0x70]\n"
- "zip2 v23.8h, v23.8h, v22.8h\n"
- "zip2 v19.8h, v19.8h, v18.8h\n"
- "zip1 v22.8h, v25.8h, v21.8h\n"
- "zip1 v18.8h, v20.8h, v16.8h\n"
- "zip2 v21.8h, v25.8h, v21.8h\n"
- "zip2 v20.8h, v20.8h, v16.8h\n"
- "zip1 v16.8h, v24.8h, v17.8h\n"
+ "zip2 v21.8h, v20.8h, v19.8h\n"
+ "prfm pldl1keep, [x20, #0x70]\n"
+ "zip2 v19.8h, v29.8h, v27.8h\n"
+ "zip1 v20.8h, v25.8h, v19.8h\n"
+ "zip2 v19.8h, v25.8h, v19.8h\n"
+ "sshll v26.8h, v26.8b, #0x0\n"
+ "zip1 v18.8h, v28.8h, v26.8h\n"
+ "zip1 v17.8h, v24.8h, v18.8h\n"
+ "zip1 v16.8h, v22.8h, v17.8h\n"
"str q16, [%x[out_ptr], #0x0]\n"
- "zip2 v16.8h, v24.8h, v17.8h\n"
+ "zip2 v16.8h, v22.8h, v17.8h\n"
"str q16, [%x[out_ptr], #0x10]\n"
- "zip1 v17.8h, v23.8h, v19.8h\n"
- "zip2 v16.8h, v23.8h, v19.8h\n"
- "str q17, [%x[out_ptr], #0x20]\n"
- "zip1 v19.8h, v22.8h, v18.8h\n"
- "zip2 v18.8h, v22.8h, v18.8h\n"
+ "zip2 v17.8h, v24.8h, v18.8h\n"
+ "zip1 v16.8h, v21.8h, v17.8h\n"
+ "str q16, [%x[out_ptr], #0x20]\n"
+ "zip2 v16.8h, v21.8h, v17.8h\n"
"str q16, [%x[out_ptr], #0x30]\n"
- "zip1 v17.8h, v21.8h, v20.8h\n"
- "zip2 v16.8h, v21.8h, v20.8h\n"
- "str q19, [%x[out_ptr], #0x40]\n"
- "str q18, [%x[out_ptr], #0x50]\n"
- "str q17, [%x[out_ptr], #0x60]\n"
+ "zip2 v18.8h, v28.8h, v26.8h\n"
+ "zip1 v17.8h, v23.8h, v18.8h\n"
+ "zip1 v16.8h, v20.8h, v17.8h\n"
+ "str q16, [%x[out_ptr], #0x40]\n"
+ "zip2 v16.8h, v20.8h, v17.8h\n"
+ "str q16, [%x[out_ptr], #0x50]\n"
+ "zip2 v17.8h, v23.8h, v18.8h\n"
+ "zip1 v16.8h, v19.8h, v17.8h\n"
+ "str q16, [%x[out_ptr], #0x60]\n"
+ "zip2 v16.8h, v19.8h, v17.8h\n"
"str q16, [%x[out_ptr], #0x70]\n"
"add %x[out_ptr], %x[out_ptr], #0x80\n"
"bge 2b\n"
"3:" // Main loop skip
"cbz %x[width], 8f\n"
"tbz %x[width], #2, 5f\n"
- "ldr s25, [x28], #0x4\n"
- "ldr s30, [x27], #0x4\n"
- "ldr s29, [x26], #0x4\n"
- "ldr s28, [x25], #0x4\n"
- "ldr s21, [x24], #0x4\n"
- "ldr s27, [x23], #0x4\n"
- "ldr s20, [x22], #0x4\n"
- "ldr s26, [x21], #0x4\n"
+ "ldr s31, [x27], #0x4\n"
+ "ldr s30, [x26], #0x4\n"
+ "ldr s29, [x25], #0x4\n"
+ "ldr s28, [x24], #0x4\n"
+ "ldr s25, [x23], #0x4\n"
+ "ldr s23, [x22], #0x4\n"
+ "ldr s27, [x21], #0x4\n"
+ "ldr s26, [x20], #0x4\n"
"tbz %x[width], #1, 4f\n"
- "ld1 { v25.h }[2], [x28], #0x2\n"
- "ld1 { v30.h }[2], [x27], #0x2\n"
- "mov x20, #0x6\n"
- "ld1 { v29.h }[2], [x26], #0x2\n"
- "ld1 { v28.h }[2], [x25], #0x2\n"
- "ld1 { v21.h }[2], [x24], #0x2\n"
- "ld1 { v27.h }[2], [x23], #0x2\n"
- "ld1 { v20.h }[2], [x22], #0x2\n"
- "ld1 { v26.h }[2], [x21], #0x2\n"
+ "ld1 { v31.h }[2], [x27], #0x2\n"
+ "mov x19, #0x6\n"
+ "ld1 { v30.h }[2], [x26], #0x2\n"
+ "ld1 { v29.h }[2], [x25], #0x2\n"
+ "ld1 { v28.h }[2], [x24], #0x2\n"
+ "ld1 { v25.h }[2], [x23], #0x2\n"
+ "ld1 { v23.h }[2], [x22], #0x2\n"
+ "ld1 { v27.h }[2], [x21], #0x2\n"
+ "ld1 { v26.h }[2], [x20], #0x2\n"
"tbz %x[width], #0, 7f\n"
- "ld1 { v25.b }[6], [x28]\n"
- "ld1 { v30.b }[6], [x27]\n"
- "mov x20, #0x7\n"
- "ld1 { v29.b }[6], [x26]\n"
- "ld1 { v28.b }[6], [x25]\n"
- "ld1 { v21.b }[6], [x24]\n"
- "ld1 { v27.b }[6], [x23]\n"
- "ld1 { v20.b }[6], [x22]\n"
- "ld1 { v26.b }[6], [x21]\n"
+ "ld1 { v31.b }[6], [x27]\n"
+ "mov x19, #0x7\n"
+ "ld1 { v30.b }[6], [x26]\n"
+ "ld1 { v29.b }[6], [x25]\n"
+ "ld1 { v28.b }[6], [x24]\n"
+ "ld1 { v25.b }[6], [x23]\n"
+ "ld1 { v23.b }[6], [x22]\n"
+ "ld1 { v27.b }[6], [x21]\n"
+ "ld1 { v26.b }[6], [x20]\n"
"b 7f\n"
"4:" // odd_loads_1_4
- "mov x20, #0x4\n"
+ "mov x19, #0x4\n"
"tbz %x[width], #0, 7f\n"
- "ld1 { v25.b }[4], [x28]\n"
- "ld1 { v30.b }[4], [x27]\n"
- "mov x20, #0x5\n"
- "ld1 { v29.b }[4], [x26]\n"
- "ld1 { v28.b }[4], [x25]\n"
- "ld1 { v21.b }[4], [x24]\n"
- "ld1 { v27.b }[4], [x23]\n"
- "ld1 { v20.b }[4], [x22]\n"
- "ld1 { v26.b }[4], [x21]\n"
+ "ld1 { v31.b }[4], [x27]\n"
+ "ld1 { v30.b }[4], [x26]\n"
+ "mov x19, #0x5\n"
+ "ld1 { v29.b }[4], [x25]\n"
+ "ld1 { v28.b }[4], [x24]\n"
+ "ld1 { v25.b }[4], [x23]\n"
+ "ld1 { v23.b }[4], [x22]\n"
+ "ld1 { v27.b }[4], [x21]\n"
+ "ld1 { v26.b }[4], [x20]\n"
"b 7f\n"
"5:" // odd_loads_2_0
"tbz %x[width], #1, 6f\n"
- "ldr h25, [x28], #0x2\n"
- "ldr h30, [x27], #0x2\n"
- "mov x20, #0x2\n"
- "ldr h29, [x26], #0x2\n"
- "ldr h28, [x25], #0x2\n"
- "ldr h21, [x24], #0x2\n"
- "ldr h27, [x23], #0x2\n"
- "ldr h20, [x22], #0x2\n"
- "ldr h26, [x21], #0x2\n"
+ "ldr h31, [x27], #0x2\n"
+ "ldr h30, [x26], #0x2\n"
+ "mov x19, #0x2\n"
+ "ldr h29, [x25], #0x2\n"
+ "ldr h28, [x24], #0x2\n"
+ "ldr h25, [x23], #0x2\n"
+ "ldr h23, [x22], #0x2\n"
+ "ldr h27, [x21], #0x2\n"
+ "ldr h26, [x20], #0x2\n"
"tbz %x[width], #0, 7f\n"
- "ld1 { v25.b }[2], [x28]\n"
- "ld1 { v30.b }[2], [x27]\n"
- "mov x20, #0x3\n"
- "ld1 { v29.b }[2], [x26]\n"
- "ld1 { v28.b }[2], [x25]\n"
- "ld1 { v21.b }[2], [x24]\n"
- "ld1 { v27.b }[2], [x23]\n"
- "ld1 { v20.b }[2], [x22]\n"
- "ld1 { v26.b }[2], [x21]\n"
+ "ld1 { v31.b }[2], [x27]\n"
+ "mov x19, #0x3\n"
+ "ld1 { v30.b }[2], [x26]\n"
+ "ld1 { v29.b }[2], [x25]\n"
+ "ld1 { v28.b }[2], [x24]\n"
+ "ld1 { v25.b }[2], [x23]\n"
+ "ld1 { v23.b }[2], [x22]\n"
+ "ld1 { v27.b }[2], [x21]\n"
+ "ld1 { v26.b }[2], [x20]\n"
"b 7f\n"
"6:" // odd_loads_1_0
- "ldr b25, [x28, #0x0]\n"
- "ldr b30, [x27, #0x0]\n"
- "mov x20, #0x1\n"
- "ldr b29, [x26, #0x0]\n"
- "ldr b28, [x25, #0x0]\n"
- "ldr b21, [x24, #0x0]\n"
- "ldr b27, [x23, #0x0]\n"
- "ldr b20, [x22, #0x0]\n"
- "ldr b26, [x21, #0x0]\n"
+ "ldr b31, [x27, #0x0]\n"
+ "mov x19, #0x1\n"
+ "ldr b30, [x26, #0x0]\n"
+ "ldr b29, [x25, #0x0]\n"
+ "ldr b28, [x24, #0x0]\n"
+ "ldr b25, [x23, #0x0]\n"
+ "ldr b23, [x22, #0x0]\n"
+ "ldr b27, [x21, #0x0]\n"
+ "ldr b26, [x20, #0x0]\n"
"7:" // Odd load end
- "sshll v25.8h, v25.8b, #0x0\n"
+ "sshll v31.8h, v31.8b, #0x0\n"
+ "subs x19, x19, #0x1\n"
"sshll v30.8h, v30.8b, #0x0\n"
- "subs x20, x20, #0x1\n"
"sshll v29.8h, v29.8b, #0x0\n"
"sshll v28.8h, v28.8b, #0x0\n"
- "sshll v21.8h, v21.8b, #0x0\n"
+ "sshll v25.8h, v25.8b, #0x0\n"
+ "zip1 v20.8h, v31.8h, v25.8h\n"
+ "sshll v23.8h, v23.8b, #0x0\n"
+ "zip1 v24.8h, v30.8h, v23.8h\n"
"sshll v27.8h, v27.8b, #0x0\n"
- "sshll v20.8h, v20.8b, #0x0\n"
+ "zip1 v19.8h, v29.8h, v27.8h\n"
+ "zip1 v22.8h, v20.8h, v19.8h\n"
"sshll v26.8h, v26.8b, #0x0\n"
- "zip1 v23.8h, v25.8h, v21.8h\n"
- "zip1 v22.8h, v29.8h, v20.8h\n"
- "zip1 v19.8h, v30.8h, v27.8h\n"
"zip1 v18.8h, v28.8h, v26.8h\n"
- "zip1 v24.8h, v23.8h, v22.8h\n"
- "zip1 v17.8h, v19.8h, v18.8h\n"
- "zip1 v16.8h, v24.8h, v17.8h\n"
+ "zip1 v17.8h, v24.8h, v18.8h\n"
+ "zip1 v16.8h, v22.8h, v17.8h\n"
"str q16, [%x[out_ptr], #0x0]\n"
"add %x[out_ptr], %x[out_ptr], #0x10\n"
"beq 8f\n"
- "subs x20, x20, #0x1\n"
- "zip2 v16.8h, v24.8h, v17.8h\n"
+ "zip2 v16.8h, v22.8h, v17.8h\n"
"str q16, [%x[out_ptr], #0x0]\n"
+ "subs x19, x19, #0x1\n"
"add %x[out_ptr], %x[out_ptr], #0x10\n"
"beq 8f\n"
- "zip2 v23.8h, v23.8h, v22.8h\n"
- "zip2 v19.8h, v19.8h, v18.8h\n"
- "subs x20, x20, #0x1\n"
- "zip1 v17.8h, v23.8h, v19.8h\n"
- "str q17, [%x[out_ptr], #0x0]\n"
+ "zip2 v21.8h, v20.8h, v19.8h\n"
+ "zip2 v17.8h, v24.8h, v18.8h\n"
+ "subs x19, x19, #0x1\n"
+ "zip1 v16.8h, v21.8h, v17.8h\n"
+ "str q16, [%x[out_ptr], #0x0]\n"
"add %x[out_ptr], %x[out_ptr], #0x10\n"
"beq 8f\n"
- "subs x20, x20, #0x1\n"
- "zip2 v16.8h, v23.8h, v19.8h\n"
+ "zip2 v16.8h, v21.8h, v17.8h\n"
"str q16, [%x[out_ptr], #0x0]\n"
+ "subs x19, x19, #0x1\n"
"add %x[out_ptr], %x[out_ptr], #0x10\n"
"beq 8f\n"
- "zip2 v25.8h, v25.8h, v21.8h\n"
- "zip2 v21.8h, v29.8h, v20.8h\n"
- "subs x20, x20, #0x1\n"
- "zip2 v20.8h, v30.8h, v27.8h\n"
- "zip2 v16.8h, v28.8h, v26.8h\n"
- "zip1 v22.8h, v25.8h, v21.8h\n"
- "zip1 v18.8h, v20.8h, v16.8h\n"
- "zip1 v19.8h, v22.8h, v18.8h\n"
- "str q19, [%x[out_ptr], #0x0]\n"
+ "zip2 v25.8h, v31.8h, v25.8h\n"
+ "zip2 v19.8h, v29.8h, v27.8h\n"
+ "subs x19, x19, #0x1\n"
+ "zip1 v20.8h, v25.8h, v19.8h\n"
+ "zip2 v23.8h, v30.8h, v23.8h\n"
+ "zip2 v18.8h, v28.8h, v26.8h\n"
+ "zip1 v17.8h, v23.8h, v18.8h\n"
+ "zip1 v16.8h, v20.8h, v17.8h\n"
+ "str q16, [%x[out_ptr], #0x0]\n"
"add %x[out_ptr], %x[out_ptr], #0x10\n"
"beq 8f\n"
- "subs x20, x20, #0x1\n"
- "zip2 v18.8h, v22.8h, v18.8h\n"
- "str q18, [%x[out_ptr], #0x0]\n"
+ "zip2 v16.8h, v20.8h, v17.8h\n"
+ "str q16, [%x[out_ptr], #0x0]\n"
+ "subs x19, x19, #0x1\n"
"add %x[out_ptr], %x[out_ptr], #0x10\n"
"beq 8f\n"
- "zip2 v21.8h, v25.8h, v21.8h\n"
- "zip2 v20.8h, v20.8h, v16.8h\n"
- "zip1 v17.8h, v21.8h, v20.8h\n"
- "str q17, [%x[out_ptr], #0x0]\n"
+ "zip2 v19.8h, v25.8h, v19.8h\n"
+ "zip2 v17.8h, v23.8h, v18.8h\n"
+ "zip1 v16.8h, v19.8h, v17.8h\n"
+ "str q16, [%x[out_ptr], #0x0]\n"
"add %x[out_ptr], %x[out_ptr], #0x10\n"
"8:" // Odds skip
: [out_ptr] "+&r" (out_ptr), [width] "+&r" (width)
: [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset)
- : "cc", "memory", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_s8_s16_summing.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_s8_s16_summing.hpp
index 3ad103c8d4..b710861417 100644
--- a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_s8_s16_summing.hpp
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_s8_s16_summing.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef __aarch64__
@@ -31,40 +31,39 @@ void interleave_block<8, 1, VLType::None, true>(
)
{
__asm__ __volatile__(
- "ldr x28, [%x[in], #0x0]\n"
- "ldr x27, [%x[in], #0x8]\n"
- "cmp %x[height], #0x8\n"
- "mov x20, #0x0\n"
- "ldr x26, [%x[in], #0x10]\n"
- "ldr x25, [%x[in], #0x18]\n"
- "movi v2.8h, #0x0\n"
- "movi v1.4s, #0x0\n"
- "ldr x24, [%x[in], #0x20]\n"
- "ldr x23, [%x[in], #0x28]\n"
+ "movi v1.8h, #0x0\n"
+ "ldr x27, [%x[in], #0x0]\n"
+ "mov x19, #0x0\n"
"movi v0.4s, #0x0\n"
- "add x28, x28, %x[row_offset]\n"
- "ldr x22, [%x[in], #0x30]\n"
- "ldr x21, [%x[in], #0x38]\n"
+ "ldr x26, [%x[in], #0x8]\n"
+ "cmp %x[height], #0x8\n"
+ "movi v31.4s, #0x0\n"
+ "ldr x25, [%x[in], #0x10]\n"
"add x27, x27, %x[row_offset]\n"
+ "ldr x24, [%x[in], #0x18]\n"
+ "ldr x23, [%x[in], #0x20]\n"
"add x26, x26, %x[row_offset]\n"
+ "ldr x22, [%x[in], #0x28]\n"
"add x25, x25, %x[row_offset]\n"
+ "ldr x21, [%x[in], #0x30]\n"
"add x24, x24, %x[row_offset]\n"
+ "ldr x20, [%x[in], #0x38]\n"
"add x23, x23, %x[row_offset]\n"
"add x22, x22, %x[row_offset]\n"
"add x21, x21, %x[row_offset]\n"
+ "add x20, x20, %x[row_offset]\n"
"beq 1f\n"
+ "mov x20, x27\n"
"cmp %x[height], #0x2\n"
- "csel x27, x27, x28, GE\n"
- "csel x26, x26, x28, GT\n"
+ "csel x26, x26, x27, GE\n"
+ "csel x25, x25, x27, GT\n"
"cmp %x[height], #0x4\n"
- "csel x25, x25, x28, GE\n"
- "csel x24, x24, x28, GT\n"
+ "csel x24, x24, x27, GE\n"
+ "csel x23, x23, x27, GT\n"
"cmp %x[height], #0x6\n"
- "mov x21, x28\n"
- "csel x23, x23, x28, GE\n"
- "csel x22, x22, x28, GT\n"
+ "csel x22, x22, x27, GE\n"
+ "csel x21, x21, x27, GT\n"
"1:" // no_pointer_adj
- "prfm pldl1keep, [x28, #0x0]\n"
"prfm pldl1keep, [x27, #0x0]\n"
"prfm pldl1keep, [x26, #0x0]\n"
"prfm pldl1keep, [x25, #0x0]\n"
@@ -72,7 +71,7 @@ void interleave_block<8, 1, VLType::None, true>(
"prfm pldl1keep, [x23, #0x0]\n"
"prfm pldl1keep, [x22, #0x0]\n"
"prfm pldl1keep, [x21, #0x0]\n"
- "prfm pldl1keep, [x28, #0x40]\n"
+ "prfm pldl1keep, [x20, #0x0]\n"
"prfm pldl1keep, [x27, #0x40]\n"
"prfm pldl1keep, [x26, #0x40]\n"
"prfm pldl1keep, [x25, #0x40]\n"
@@ -80,241 +79,242 @@ void interleave_block<8, 1, VLType::None, true>(
"prfm pldl1keep, [x23, #0x40]\n"
"prfm pldl1keep, [x22, #0x40]\n"
"prfm pldl1keep, [x21, #0x40]\n"
+ "prfm pldl1keep, [x20, #0x40]\n"
"cbnz %w[first], 2f\n"
"sub %x[out_ptr], %x[out_ptr], #0x20\n"
- "ld1 { v1.4s }, [%x[out_ptr]]\n"
- "ldr q0, [%x[out_ptr], #0x10]\n"
+ "ld1 { v0.4s }, [%x[out_ptr]]\n"
+ "ldr q31, [%x[out_ptr], #0x10]\n"
"2:" // first_pass
"cmp %x[width], #0x8\n"
"blt 5f\n"
"3:" // Main loop head
- "cmp x20, #0xe\n"
+ "cmp x19, #0xe\n"
"ble 4f\n"
- "saddw v1.4s, v1.4s, v2.4h\n"
- "saddw2 v0.4s, v0.4s, v2.8h\n"
- "mov x20, #0x0\n"
- "movi v2.8h, #0x0\n"
+ "saddw v0.4s, v0.4s, v1.4h\n"
+ "saddw2 v31.4s, v31.4s, v1.8h\n"
+ "mov x19, #0x0\n"
+ "movi v1.8h, #0x0\n"
"4:" // no_accumulate_16
- "ldr d31, [x28], #0x8\n"
"ldr d30, [x27], #0x8\n"
- "sshll v31.8h, v31.8b, #0x0\n"
"sshll v30.8h, v30.8b, #0x0\n"
"ldr d29, [x26], #0x8\n"
- "ldr d28, [x25], #0x8\n"
+ "add x19, x19, #0x1\n"
"sshll v29.8h, v29.8b, #0x0\n"
+ "ldr d28, [x25], #0x8\n"
+ "subs %x[width], %x[width], #0x8\n"
"sshll v28.8h, v28.8b, #0x0\n"
"ldr d27, [x24], #0x8\n"
- "ldr d26, [x23], #0x8\n"
+ "cmp %x[width], #0x8\n"
"sshll v27.8h, v27.8b, #0x0\n"
- "sshll v26.8h, v26.8b, #0x0\n"
- "ldr d25, [x22], #0x8\n"
- "ldr d24, [x21], #0x8\n"
- "sshll v25.8h, v25.8b, #0x0\n"
+ "ldr d24, [x23], #0x8\n"
+ "ldr d23, [x22], #0x8\n"
"sshll v24.8h, v24.8b, #0x0\n"
- "zip1 v23.8h, v31.8h, v27.8h\n"
- "zip1 v22.8h, v29.8h, v25.8h\n"
- "subs %x[width], %x[width], #0x8\n"
- "cmp %x[width], #0x8\n"
- "zip1 v21.8h, v30.8h, v26.8h\n"
- "zip1 v20.8h, v28.8h, v24.8h\n"
- "prfm pldl1keep, [x28, #0x70]\n"
+ "ldr d21, [x21], #0x8\n"
+ "sshll v23.8h, v23.8b, #0x0\n"
+ "ldr d26, [x20], #0x8\n"
+ "zip1 v20.8h, v30.8h, v24.8h\n"
"prfm pldl1keep, [x27, #0x70]\n"
- "zip1 v18.8h, v23.8h, v22.8h\n"
- "zip1 v17.8h, v21.8h, v20.8h\n"
+ "zip1 v25.8h, v29.8h, v23.8h\n"
"prfm pldl1keep, [x26, #0x70]\n"
+ "zip2 v24.8h, v30.8h, v24.8h\n"
"prfm pldl1keep, [x25, #0x70]\n"
- "zip1 v16.8h, v18.8h, v17.8h\n"
- "add v2.8h, v2.8h, v16.8h\n"
+ "zip2 v23.8h, v29.8h, v23.8h\n"
"prfm pldl1keep, [x24, #0x70]\n"
+ "sshll v21.8h, v21.8b, #0x0\n"
"prfm pldl1keep, [x23, #0x70]\n"
- "zip2 v19.8h, v18.8h, v17.8h\n"
- "zip2 v18.8h, v23.8h, v22.8h\n"
+ "zip1 v19.8h, v28.8h, v21.8h\n"
"prfm pldl1keep, [x22, #0x70]\n"
+ "zip1 v22.8h, v20.8h, v19.8h\n"
"prfm pldl1keep, [x21, #0x70]\n"
- "zip2 v17.8h, v21.8h, v20.8h\n"
- "add v2.8h, v2.8h, v19.8h\n"
+ "zip2 v19.8h, v20.8h, v19.8h\n"
+ "prfm pldl1keep, [x20, #0x70]\n"
+ "zip2 v20.8h, v28.8h, v21.8h\n"
+ "zip1 v21.8h, v24.8h, v20.8h\n"
+ "zip2 v20.8h, v24.8h, v20.8h\n"
+ "sshll v26.8h, v26.8b, #0x0\n"
+ "zip1 v18.8h, v27.8h, v26.8h\n"
+ "zip1 v17.8h, v25.8h, v18.8h\n"
+ "zip1 v16.8h, v22.8h, v17.8h\n"
"str q16, [%x[out_ptr], #0x0]\n"
- "add x20, x20, #0x1\n"
- "zip1 v16.8h, v18.8h, v17.8h\n"
- "zip2 v22.8h, v31.8h, v27.8h\n"
- "str q19, [%x[out_ptr], #0x10]\n"
- "zip2 v21.8h, v29.8h, v25.8h\n"
- "zip2 v20.8h, v30.8h, v26.8h\n"
- "str q16, [%x[out_ptr], #0x20]\n"
- "zip2 v19.8h, v28.8h, v24.8h\n"
- "add v2.8h, v2.8h, v16.8h\n"
- "zip2 v16.8h, v18.8h, v17.8h\n"
- "zip1 v18.8h, v22.8h, v21.8h\n"
+ "add v1.8h, v1.8h, v16.8h\n"
+ "zip2 v17.8h, v22.8h, v17.8h\n"
+ "str q17, [%x[out_ptr], #0x10]\n"
+ "zip2 v16.8h, v25.8h, v18.8h\n"
+ "add v1.8h, v1.8h, v17.8h\n"
+ "zip1 v17.8h, v19.8h, v16.8h\n"
+ "str q17, [%x[out_ptr], #0x20]\n"
+ "zip2 v16.8h, v19.8h, v16.8h\n"
"str q16, [%x[out_ptr], #0x30]\n"
- "zip1 v17.8h, v20.8h, v19.8h\n"
- "add v2.8h, v2.8h, v16.8h\n"
- "zip1 v16.8h, v18.8h, v17.8h\n"
- "add v2.8h, v2.8h, v16.8h\n"
+ "add v1.8h, v1.8h, v17.8h\n"
+ "zip2 v19.8h, v27.8h, v26.8h\n"
+ "zip1 v17.8h, v23.8h, v19.8h\n"
+ "add v1.8h, v1.8h, v16.8h\n"
+ "zip1 v16.8h, v21.8h, v17.8h\n"
"str q16, [%x[out_ptr], #0x40]\n"
- "zip2 v16.8h, v18.8h, v17.8h\n"
- "zip2 v18.8h, v22.8h, v21.8h\n"
- "str q16, [%x[out_ptr], #0x50]\n"
- "zip2 v17.8h, v20.8h, v19.8h\n"
- "add v2.8h, v2.8h, v16.8h\n"
- "zip1 v16.8h, v18.8h, v17.8h\n"
- "add v2.8h, v2.8h, v16.8h\n"
- "str q16, [%x[out_ptr], #0x60]\n"
- "zip2 v16.8h, v18.8h, v17.8h\n"
+ "zip2 v18.8h, v21.8h, v17.8h\n"
+ "str q18, [%x[out_ptr], #0x50]\n"
+ "add v1.8h, v1.8h, v16.8h\n"
+ "zip2 v16.8h, v23.8h, v19.8h\n"
+ "zip1 v17.8h, v20.8h, v16.8h\n"
+ "str q17, [%x[out_ptr], #0x60]\n"
+ "add v1.8h, v1.8h, v18.8h\n"
+ "zip2 v16.8h, v20.8h, v16.8h\n"
"str q16, [%x[out_ptr], #0x70]\n"
- "add v2.8h, v2.8h, v16.8h\n"
"add %x[out_ptr], %x[out_ptr], #0x80\n"
+ "add v1.8h, v1.8h, v17.8h\n"
+ "add v1.8h, v1.8h, v16.8h\n"
"bge 3b\n"
"5:" // Main loop skip
"cbz %x[width], 10f\n"
"tbz %x[width], #2, 7f\n"
- "ldr s31, [x28], #0x4\n"
"ldr s30, [x27], #0x4\n"
"ldr s29, [x26], #0x4\n"
"ldr s28, [x25], #0x4\n"
"ldr s27, [x24], #0x4\n"
- "ldr s26, [x23], #0x4\n"
- "ldr s25, [x22], #0x4\n"
- "ldr s24, [x21], #0x4\n"
+ "ldr s24, [x23], #0x4\n"
+ "ldr s23, [x22], #0x4\n"
+ "ldr s21, [x21], #0x4\n"
+ "ldr s26, [x20], #0x4\n"
"tbz %x[width], #1, 6f\n"
- "ld1 { v31.h }[2], [x28], #0x2\n"
"ld1 { v30.h }[2], [x27], #0x2\n"
- "mov x20, #0x6\n"
+ "mov x19, #0x6\n"
"ld1 { v29.h }[2], [x26], #0x2\n"
"ld1 { v28.h }[2], [x25], #0x2\n"
"ld1 { v27.h }[2], [x24], #0x2\n"
- "ld1 { v26.h }[2], [x23], #0x2\n"
- "ld1 { v25.h }[2], [x22], #0x2\n"
- "ld1 { v24.h }[2], [x21], #0x2\n"
+ "ld1 { v24.h }[2], [x23], #0x2\n"
+ "ld1 { v23.h }[2], [x22], #0x2\n"
+ "ld1 { v21.h }[2], [x21], #0x2\n"
+ "ld1 { v26.h }[2], [x20], #0x2\n"
"tbz %x[width], #0, 9f\n"
- "ld1 { v31.b }[6], [x28]\n"
"ld1 { v30.b }[6], [x27]\n"
- "mov x20, #0x7\n"
+ "mov x19, #0x7\n"
"ld1 { v29.b }[6], [x26]\n"
"ld1 { v28.b }[6], [x25]\n"
"ld1 { v27.b }[6], [x24]\n"
- "ld1 { v26.b }[6], [x23]\n"
- "ld1 { v25.b }[6], [x22]\n"
- "ld1 { v24.b }[6], [x21]\n"
+ "ld1 { v24.b }[6], [x23]\n"
+ "ld1 { v23.b }[6], [x22]\n"
+ "ld1 { v21.b }[6], [x21]\n"
+ "ld1 { v26.b }[6], [x20]\n"
"b 9f\n"
"6:" // odd_loads_1_4
- "mov x20, #0x4\n"
+ "mov x19, #0x4\n"
"tbz %x[width], #0, 9f\n"
- "ld1 { v31.b }[4], [x28]\n"
"ld1 { v30.b }[4], [x27]\n"
- "mov x20, #0x5\n"
"ld1 { v29.b }[4], [x26]\n"
+ "mov x19, #0x5\n"
"ld1 { v28.b }[4], [x25]\n"
"ld1 { v27.b }[4], [x24]\n"
- "ld1 { v26.b }[4], [x23]\n"
- "ld1 { v25.b }[4], [x22]\n"
- "ld1 { v24.b }[4], [x21]\n"
+ "ld1 { v24.b }[4], [x23]\n"
+ "ld1 { v23.b }[4], [x22]\n"
+ "ld1 { v21.b }[4], [x21]\n"
+ "ld1 { v26.b }[4], [x20]\n"
"b 9f\n"
"7:" // odd_loads_2_0
"tbz %x[width], #1, 8f\n"
- "ldr h31, [x28], #0x2\n"
"ldr h30, [x27], #0x2\n"
- "mov x20, #0x2\n"
"ldr h29, [x26], #0x2\n"
+ "mov x19, #0x2\n"
"ldr h28, [x25], #0x2\n"
"ldr h27, [x24], #0x2\n"
- "ldr h26, [x23], #0x2\n"
- "ldr h25, [x22], #0x2\n"
- "ldr h24, [x21], #0x2\n"
+ "ldr h24, [x23], #0x2\n"
+ "ldr h23, [x22], #0x2\n"
+ "ldr h21, [x21], #0x2\n"
+ "ldr h26, [x20], #0x2\n"
"tbz %x[width], #0, 9f\n"
- "ld1 { v31.b }[2], [x28]\n"
"ld1 { v30.b }[2], [x27]\n"
- "mov x20, #0x3\n"
+ "mov x19, #0x3\n"
"ld1 { v29.b }[2], [x26]\n"
"ld1 { v28.b }[2], [x25]\n"
"ld1 { v27.b }[2], [x24]\n"
- "ld1 { v26.b }[2], [x23]\n"
- "ld1 { v25.b }[2], [x22]\n"
- "ld1 { v24.b }[2], [x21]\n"
+ "ld1 { v24.b }[2], [x23]\n"
+ "ld1 { v23.b }[2], [x22]\n"
+ "ld1 { v21.b }[2], [x21]\n"
+ "ld1 { v26.b }[2], [x20]\n"
"b 9f\n"
"8:" // odd_loads_1_0
- "ldr b31, [x28, #0x0]\n"
"ldr b30, [x27, #0x0]\n"
- "mov x20, #0x1\n"
+ "mov x19, #0x1\n"
"ldr b29, [x26, #0x0]\n"
"ldr b28, [x25, #0x0]\n"
"ldr b27, [x24, #0x0]\n"
- "ldr b26, [x23, #0x0]\n"
- "ldr b25, [x22, #0x0]\n"
- "ldr b24, [x21, #0x0]\n"
+ "ldr b24, [x23, #0x0]\n"
+ "ldr b23, [x22, #0x0]\n"
+ "ldr b21, [x21, #0x0]\n"
+ "ldr b26, [x20, #0x0]\n"
"9:" // Odd load end
- "sshll v31.8h, v31.8b, #0x0\n"
"sshll v30.8h, v30.8b, #0x0\n"
- "subs x20, x20, #0x1\n"
+ "subs x19, x19, #0x1\n"
"sshll v29.8h, v29.8b, #0x0\n"
"sshll v28.8h, v28.8b, #0x0\n"
"sshll v27.8h, v27.8b, #0x0\n"
- "sshll v26.8h, v26.8b, #0x0\n"
- "sshll v25.8h, v25.8b, #0x0\n"
"sshll v24.8h, v24.8b, #0x0\n"
- "zip1 v23.8h, v31.8h, v27.8h\n"
- "zip1 v22.8h, v29.8h, v25.8h\n"
- "zip1 v21.8h, v30.8h, v26.8h\n"
- "zip1 v20.8h, v28.8h, v24.8h\n"
- "zip1 v18.8h, v23.8h, v22.8h\n"
- "zip1 v17.8h, v21.8h, v20.8h\n"
- "zip1 v16.8h, v18.8h, v17.8h\n"
+ "zip1 v20.8h, v30.8h, v24.8h\n"
+ "sshll v23.8h, v23.8b, #0x0\n"
+ "zip1 v25.8h, v29.8h, v23.8h\n"
+ "sshll v21.8h, v21.8b, #0x0\n"
+ "zip1 v19.8h, v28.8h, v21.8h\n"
+ "zip1 v22.8h, v20.8h, v19.8h\n"
+ "sshll v26.8h, v26.8b, #0x0\n"
+ "zip1 v18.8h, v27.8h, v26.8h\n"
+ "zip1 v17.8h, v25.8h, v18.8h\n"
+ "zip1 v16.8h, v22.8h, v17.8h\n"
"str q16, [%x[out_ptr], #0x0]\n"
- "add v2.8h, v2.8h, v16.8h\n"
"add %x[out_ptr], %x[out_ptr], #0x10\n"
+ "add v1.8h, v1.8h, v16.8h\n"
"beq 10f\n"
- "zip2 v19.8h, v18.8h, v17.8h\n"
- "subs x20, x20, #0x1\n"
- "str q19, [%x[out_ptr], #0x0]\n"
- "add v2.8h, v2.8h, v19.8h\n"
+ "zip2 v17.8h, v22.8h, v17.8h\n"
+ "str q17, [%x[out_ptr], #0x0]\n"
+ "subs x19, x19, #0x1\n"
+ "add v1.8h, v1.8h, v17.8h\n"
"add %x[out_ptr], %x[out_ptr], #0x10\n"
"beq 10f\n"
- "zip2 v18.8h, v23.8h, v22.8h\n"
- "zip2 v17.8h, v21.8h, v20.8h\n"
- "subs x20, x20, #0x1\n"
- "zip1 v16.8h, v18.8h, v17.8h\n"
- "str q16, [%x[out_ptr], #0x0]\n"
- "add v2.8h, v2.8h, v16.8h\n"
+ "zip2 v19.8h, v20.8h, v19.8h\n"
+ "zip2 v16.8h, v25.8h, v18.8h\n"
+ "subs x19, x19, #0x1\n"
+ "zip1 v17.8h, v19.8h, v16.8h\n"
+ "str q17, [%x[out_ptr], #0x0]\n"
"add %x[out_ptr], %x[out_ptr], #0x10\n"
+ "add v1.8h, v1.8h, v17.8h\n"
"beq 10f\n"
- "zip2 v16.8h, v18.8h, v17.8h\n"
- "subs x20, x20, #0x1\n"
+ "zip2 v16.8h, v19.8h, v16.8h\n"
"str q16, [%x[out_ptr], #0x0]\n"
- "add v2.8h, v2.8h, v16.8h\n"
+ "subs x19, x19, #0x1\n"
+ "add v1.8h, v1.8h, v16.8h\n"
"add %x[out_ptr], %x[out_ptr], #0x10\n"
"beq 10f\n"
- "zip2 v22.8h, v31.8h, v27.8h\n"
- "zip2 v21.8h, v29.8h, v25.8h\n"
- "subs x20, x20, #0x1\n"
- "zip2 v20.8h, v30.8h, v26.8h\n"
- "zip2 v19.8h, v28.8h, v24.8h\n"
- "zip1 v18.8h, v22.8h, v21.8h\n"
- "zip1 v17.8h, v20.8h, v19.8h\n"
- "zip1 v16.8h, v18.8h, v17.8h\n"
+ "zip2 v24.8h, v30.8h, v24.8h\n"
+ "zip2 v20.8h, v28.8h, v21.8h\n"
+ "subs x19, x19, #0x1\n"
+ "zip1 v21.8h, v24.8h, v20.8h\n"
+ "zip2 v23.8h, v29.8h, v23.8h\n"
+ "zip2 v19.8h, v27.8h, v26.8h\n"
+ "zip1 v17.8h, v23.8h, v19.8h\n"
+ "zip1 v16.8h, v21.8h, v17.8h\n"
"str q16, [%x[out_ptr], #0x0]\n"
- "add v2.8h, v2.8h, v16.8h\n"
"add %x[out_ptr], %x[out_ptr], #0x10\n"
+ "add v1.8h, v1.8h, v16.8h\n"
"beq 10f\n"
- "zip2 v16.8h, v18.8h, v17.8h\n"
- "subs x20, x20, #0x1\n"
- "str q16, [%x[out_ptr], #0x0]\n"
- "add v2.8h, v2.8h, v16.8h\n"
+ "zip2 v18.8h, v21.8h, v17.8h\n"
+ "str q18, [%x[out_ptr], #0x0]\n"
+ "subs x19, x19, #0x1\n"
+ "add v1.8h, v1.8h, v18.8h\n"
"add %x[out_ptr], %x[out_ptr], #0x10\n"
"beq 10f\n"
- "zip2 v18.8h, v22.8h, v21.8h\n"
- "zip2 v17.8h, v20.8h, v19.8h\n"
- "zip1 v16.8h, v18.8h, v17.8h\n"
- "str q16, [%x[out_ptr], #0x0]\n"
- "add v2.8h, v2.8h, v16.8h\n"
+ "zip2 v20.8h, v24.8h, v20.8h\n"
+ "zip2 v16.8h, v23.8h, v19.8h\n"
+ "zip1 v17.8h, v20.8h, v16.8h\n"
+ "str q17, [%x[out_ptr], #0x0]\n"
"add %x[out_ptr], %x[out_ptr], #0x10\n"
+ "add v1.8h, v1.8h, v17.8h\n"
"10:" // Odds skip
- "saddw v1.4s, v1.4s, v2.4h\n"
- "saddw2 v0.4s, v0.4s, v2.8h\n"
- "str q1, [%x[out_ptr], #0x0]\n"
- "str q0, [%x[out_ptr], #0x10]\n"
+ "saddw v0.4s, v0.4s, v1.4h\n"
+ "str q0, [%x[out_ptr], #0x0]\n"
+ "saddw2 v31.4s, v31.4s, v1.8h\n"
+ "str q31, [%x[out_ptr], #0x10]\n"
"add %x[out_ptr], %x[out_ptr], #0x20\n"
: [out_ptr] "+&r" (out_ptr), [width] "+&r" (width)
: [first] "r" (first), [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset)
- : "cc", "memory", "v0", "v1", "v2", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_u16_u16_summing.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_u16_u16_summing.hpp
index de29d77a22..24ece9a68e 100644
--- a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_u16_u16_summing.hpp
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_u16_u16_summing.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef __aarch64__
@@ -31,40 +31,39 @@ void interleave_block<8, 1, VLType::None, true>(
)
{
__asm__ __volatile__(
- "ldr x28, [%x[in], #0x0]\n"
- "ldr x27, [%x[in], #0x8]\n"
- "cmp %x[height], #0x8\n"
- "mov x20, #0x0\n"
- "ldr x26, [%x[in], #0x10]\n"
- "ldr x25, [%x[in], #0x18]\n"
- "movi v2.8h, #0x0\n"
- "movi v1.4s, #0x0\n"
- "ldr x24, [%x[in], #0x20]\n"
- "ldr x23, [%x[in], #0x28]\n"
+ "movi v1.8h, #0x0\n"
+ "ldr x27, [%x[in], #0x0]\n"
+ "mov x19, #0x0\n"
"movi v0.4s, #0x0\n"
- "add x28, x28, %x[row_offset], LSL #1\n"
- "ldr x22, [%x[in], #0x30]\n"
- "ldr x21, [%x[in], #0x38]\n"
+ "ldr x26, [%x[in], #0x8]\n"
+ "cmp %x[height], #0x8\n"
+ "movi v31.4s, #0x0\n"
+ "ldr x25, [%x[in], #0x10]\n"
"add x27, x27, %x[row_offset], LSL #1\n"
+ "ldr x24, [%x[in], #0x18]\n"
+ "ldr x23, [%x[in], #0x20]\n"
"add x26, x26, %x[row_offset], LSL #1\n"
+ "ldr x22, [%x[in], #0x28]\n"
"add x25, x25, %x[row_offset], LSL #1\n"
+ "ldr x21, [%x[in], #0x30]\n"
"add x24, x24, %x[row_offset], LSL #1\n"
+ "ldr x20, [%x[in], #0x38]\n"
"add x23, x23, %x[row_offset], LSL #1\n"
"add x22, x22, %x[row_offset], LSL #1\n"
"add x21, x21, %x[row_offset], LSL #1\n"
+ "add x20, x20, %x[row_offset], LSL #1\n"
"beq 1f\n"
+ "mov x20, x27\n"
"cmp %x[height], #0x2\n"
- "csel x27, x27, x28, GE\n"
- "csel x26, x26, x28, GT\n"
+ "csel x26, x26, x27, GE\n"
+ "csel x25, x25, x27, GT\n"
"cmp %x[height], #0x4\n"
- "csel x25, x25, x28, GE\n"
- "csel x24, x24, x28, GT\n"
+ "csel x24, x24, x27, GE\n"
+ "csel x23, x23, x27, GT\n"
"cmp %x[height], #0x6\n"
- "mov x21, x28\n"
- "csel x23, x23, x28, GE\n"
- "csel x22, x22, x28, GT\n"
+ "csel x22, x22, x27, GE\n"
+ "csel x21, x21, x27, GT\n"
"1:" // no_pointer_adj
- "prfm pldl1keep, [x28, #0x0]\n"
"prfm pldl1keep, [x27, #0x0]\n"
"prfm pldl1keep, [x26, #0x0]\n"
"prfm pldl1keep, [x25, #0x0]\n"
@@ -72,7 +71,7 @@ void interleave_block<8, 1, VLType::None, true>(
"prfm pldl1keep, [x23, #0x0]\n"
"prfm pldl1keep, [x22, #0x0]\n"
"prfm pldl1keep, [x21, #0x0]\n"
- "prfm pldl1keep, [x28, #0x40]\n"
+ "prfm pldl1keep, [x20, #0x0]\n"
"prfm pldl1keep, [x27, #0x40]\n"
"prfm pldl1keep, [x26, #0x40]\n"
"prfm pldl1keep, [x25, #0x40]\n"
@@ -80,225 +79,226 @@ void interleave_block<8, 1, VLType::None, true>(
"prfm pldl1keep, [x23, #0x40]\n"
"prfm pldl1keep, [x22, #0x40]\n"
"prfm pldl1keep, [x21, #0x40]\n"
+ "prfm pldl1keep, [x20, #0x40]\n"
"cbnz %w[first], 2f\n"
"sub %x[out_ptr], %x[out_ptr], #0x20\n"
- "ld1 { v1.4s }, [%x[out_ptr]]\n"
- "ldr q0, [%x[out_ptr], #0x10]\n"
+ "ld1 { v0.4s }, [%x[out_ptr]]\n"
+ "ldr q31, [%x[out_ptr], #0x10]\n"
"2:" // first_pass
"cmp %x[width], #0x8\n"
"blt 5f\n"
"3:" // Main loop head
- "cmp x20, #0xe\n"
+ "cmp x19, #0xe\n"
"ble 4f\n"
- "uaddw v1.4s, v1.4s, v2.4h\n"
- "uaddw2 v0.4s, v0.4s, v2.8h\n"
- "mov x20, #0x0\n"
- "movi v2.8h, #0x0\n"
+ "uaddw v0.4s, v0.4s, v1.4h\n"
+ "uaddw2 v31.4s, v31.4s, v1.8h\n"
+ "mov x19, #0x0\n"
+ "movi v1.8h, #0x0\n"
"4:" // no_accumulate_16
- "ldr q31, [x28], #0x10\n"
"ldr q30, [x27], #0x10\n"
- "subs %x[width], %x[width], #0x8\n"
- "cmp %x[width], #0x8\n"
+ "add x19, x19, #0x1\n"
"ldr q29, [x26], #0x10\n"
+ "subs %x[width], %x[width], #0x8\n"
"ldr q28, [x25], #0x10\n"
- "add x20, x20, #0x1\n"
+ "cmp %x[width], #0x8\n"
"ldr q27, [x24], #0x10\n"
- "ldr q26, [x23], #0x10\n"
- "zip1 v25.8h, v31.8h, v27.8h\n"
- "zip1 v22.8h, v30.8h, v26.8h\n"
- "ldr q24, [x22], #0x10\n"
- "ldr q23, [x21], #0x10\n"
- "zip1 v18.8h, v29.8h, v24.8h\n"
- "zip1 v21.8h, v28.8h, v23.8h\n"
- "zip1 v17.8h, v25.8h, v18.8h\n"
- "zip1 v16.8h, v22.8h, v21.8h\n"
- "prfm pldl1keep, [x28, #0x70]\n"
+ "ldr q25, [x23], #0x10\n"
+ "zip1 v26.8h, v30.8h, v25.8h\n"
+ "ldr q21, [x22], #0x10\n"
+ "zip2 v25.8h, v30.8h, v25.8h\n"
+ "ldr q24, [x21], #0x10\n"
+ "ldr q23, [x20], #0x10\n"
+ "zip1 v22.8h, v29.8h, v21.8h\n"
"prfm pldl1keep, [x27, #0x70]\n"
- "zip1 v20.8h, v17.8h, v16.8h\n"
- "add v2.8h, v2.8h, v20.8h\n"
+ "zip2 v21.8h, v29.8h, v21.8h\n"
"prfm pldl1keep, [x26, #0x70]\n"
+ "zip1 v20.8h, v28.8h, v24.8h\n"
"prfm pldl1keep, [x25, #0x70]\n"
- "zip2 v19.8h, v17.8h, v16.8h\n"
- "zip2 v18.8h, v25.8h, v18.8h\n"
+ "zip1 v18.8h, v26.8h, v20.8h\n"
"prfm pldl1keep, [x24, #0x70]\n"
+ "zip1 v19.8h, v27.8h, v23.8h\n"
"prfm pldl1keep, [x23, #0x70]\n"
- "zip2 v17.8h, v22.8h, v21.8h\n"
- "add v2.8h, v2.8h, v19.8h\n"
+ "zip1 v16.8h, v22.8h, v19.8h\n"
"prfm pldl1keep, [x22, #0x70]\n"
+ "zip1 v17.8h, v18.8h, v16.8h\n"
"prfm pldl1keep, [x21, #0x70]\n"
+ "add v1.8h, v1.8h, v17.8h\n"
+ "prfm pldl1keep, [x20, #0x70]\n"
+ "zip2 v16.8h, v18.8h, v16.8h\n"
+ "str q17, [%x[out_ptr], #0x0]\n"
+ "zip2 v18.8h, v26.8h, v20.8h\n"
+ "str q16, [%x[out_ptr], #0x10]\n"
+ "add v1.8h, v1.8h, v16.8h\n"
+ "zip2 v17.8h, v22.8h, v19.8h\n"
"zip1 v16.8h, v18.8h, v17.8h\n"
- "zip2 v22.8h, v31.8h, v27.8h\n"
- "str q20, [%x[out_ptr], #0x0]\n"
- "zip2 v21.8h, v29.8h, v24.8h\n"
- "zip2 v20.8h, v30.8h, v26.8h\n"
- "str q19, [%x[out_ptr], #0x10]\n"
- "zip2 v19.8h, v28.8h, v23.8h\n"
- "add v2.8h, v2.8h, v16.8h\n"
"str q16, [%x[out_ptr], #0x20]\n"
+ "add v1.8h, v1.8h, v16.8h\n"
"zip2 v16.8h, v18.8h, v17.8h\n"
- "zip1 v18.8h, v22.8h, v21.8h\n"
"str q16, [%x[out_ptr], #0x30]\n"
- "zip1 v17.8h, v20.8h, v19.8h\n"
- "add v2.8h, v2.8h, v16.8h\n"
+ "zip2 v20.8h, v28.8h, v24.8h\n"
+ "add v1.8h, v1.8h, v16.8h\n"
+ "zip1 v18.8h, v25.8h, v20.8h\n"
+ "zip2 v19.8h, v27.8h, v23.8h\n"
+ "zip1 v17.8h, v21.8h, v19.8h\n"
"zip1 v16.8h, v18.8h, v17.8h\n"
- "add v2.8h, v2.8h, v16.8h\n"
"str q16, [%x[out_ptr], #0x40]\n"
+ "add v1.8h, v1.8h, v16.8h\n"
"zip2 v16.8h, v18.8h, v17.8h\n"
- "zip2 v18.8h, v22.8h, v21.8h\n"
"str q16, [%x[out_ptr], #0x50]\n"
- "zip2 v17.8h, v20.8h, v19.8h\n"
- "add v2.8h, v2.8h, v16.8h\n"
+ "zip2 v18.8h, v25.8h, v20.8h\n"
+ "add v1.8h, v1.8h, v16.8h\n"
+ "zip2 v17.8h, v21.8h, v19.8h\n"
"zip1 v16.8h, v18.8h, v17.8h\n"
- "add v2.8h, v2.8h, v16.8h\n"
"str q16, [%x[out_ptr], #0x60]\n"
+ "add v1.8h, v1.8h, v16.8h\n"
"zip2 v16.8h, v18.8h, v17.8h\n"
"str q16, [%x[out_ptr], #0x70]\n"
- "add v2.8h, v2.8h, v16.8h\n"
"add %x[out_ptr], %x[out_ptr], #0x80\n"
+ "add v1.8h, v1.8h, v16.8h\n"
"bge 3b\n"
"5:" // Main loop skip
"cbz %x[width], 10f\n"
"tbz %x[width], #2, 7f\n"
- "ldr d31, [x28], #0x8\n"
"ldr d30, [x27], #0x8\n"
"ldr d29, [x26], #0x8\n"
"ldr d28, [x25], #0x8\n"
"ldr d27, [x24], #0x8\n"
- "ldr d26, [x23], #0x8\n"
- "ldr d24, [x22], #0x8\n"
- "ldr d23, [x21], #0x8\n"
+ "ldr d25, [x23], #0x8\n"
+ "ldr d21, [x22], #0x8\n"
+ "ldr d24, [x21], #0x8\n"
+ "ldr d23, [x20], #0x8\n"
"tbz %x[width], #1, 6f\n"
- "ld1 { v31.s }[2], [x28], #0x4\n"
"ld1 { v30.s }[2], [x27], #0x4\n"
- "mov x20, #0x6\n"
+ "mov x19, #0x6\n"
"ld1 { v29.s }[2], [x26], #0x4\n"
"ld1 { v28.s }[2], [x25], #0x4\n"
"ld1 { v27.s }[2], [x24], #0x4\n"
- "ld1 { v26.s }[2], [x23], #0x4\n"
- "ld1 { v24.s }[2], [x22], #0x4\n"
- "ld1 { v23.s }[2], [x21], #0x4\n"
+ "ld1 { v25.s }[2], [x23], #0x4\n"
+ "ld1 { v21.s }[2], [x22], #0x4\n"
+ "ld1 { v24.s }[2], [x21], #0x4\n"
+ "ld1 { v23.s }[2], [x20], #0x4\n"
"tbz %x[width], #0, 9f\n"
- "ld1 { v31.h }[6], [x28]\n"
"ld1 { v30.h }[6], [x27]\n"
- "mov x20, #0x7\n"
+ "mov x19, #0x7\n"
"ld1 { v29.h }[6], [x26]\n"
"ld1 { v28.h }[6], [x25]\n"
"ld1 { v27.h }[6], [x24]\n"
- "ld1 { v26.h }[6], [x23]\n"
- "ld1 { v24.h }[6], [x22]\n"
- "ld1 { v23.h }[6], [x21]\n"
+ "ld1 { v25.h }[6], [x23]\n"
+ "ld1 { v21.h }[6], [x22]\n"
+ "ld1 { v24.h }[6], [x21]\n"
+ "ld1 { v23.h }[6], [x20]\n"
"b 9f\n"
"6:" // odd_loads_1_4
- "mov x20, #0x4\n"
+ "mov x19, #0x4\n"
"tbz %x[width], #0, 9f\n"
- "ld1 { v31.h }[4], [x28]\n"
"ld1 { v30.h }[4], [x27]\n"
- "mov x20, #0x5\n"
"ld1 { v29.h }[4], [x26]\n"
+ "mov x19, #0x5\n"
"ld1 { v28.h }[4], [x25]\n"
"ld1 { v27.h }[4], [x24]\n"
- "ld1 { v26.h }[4], [x23]\n"
- "ld1 { v24.h }[4], [x22]\n"
- "ld1 { v23.h }[4], [x21]\n"
+ "ld1 { v25.h }[4], [x23]\n"
+ "ld1 { v21.h }[4], [x22]\n"
+ "ld1 { v24.h }[4], [x21]\n"
+ "ld1 { v23.h }[4], [x20]\n"
"b 9f\n"
"7:" // odd_loads_2_0
"tbz %x[width], #1, 8f\n"
- "ldr s31, [x28], #0x4\n"
"ldr s30, [x27], #0x4\n"
- "mov x20, #0x2\n"
"ldr s29, [x26], #0x4\n"
+ "mov x19, #0x2\n"
"ldr s28, [x25], #0x4\n"
"ldr s27, [x24], #0x4\n"
- "ldr s26, [x23], #0x4\n"
- "ldr s24, [x22], #0x4\n"
- "ldr s23, [x21], #0x4\n"
+ "ldr s25, [x23], #0x4\n"
+ "ldr s21, [x22], #0x4\n"
+ "ldr s24, [x21], #0x4\n"
+ "ldr s23, [x20], #0x4\n"
"tbz %x[width], #0, 9f\n"
- "ld1 { v31.h }[2], [x28]\n"
"ld1 { v30.h }[2], [x27]\n"
- "mov x20, #0x3\n"
+ "mov x19, #0x3\n"
"ld1 { v29.h }[2], [x26]\n"
"ld1 { v28.h }[2], [x25]\n"
"ld1 { v27.h }[2], [x24]\n"
- "ld1 { v26.h }[2], [x23]\n"
- "ld1 { v24.h }[2], [x22]\n"
- "ld1 { v23.h }[2], [x21]\n"
+ "ld1 { v25.h }[2], [x23]\n"
+ "ld1 { v21.h }[2], [x22]\n"
+ "ld1 { v24.h }[2], [x21]\n"
+ "ld1 { v23.h }[2], [x20]\n"
"b 9f\n"
"8:" // odd_loads_1_0
- "ldr h31, [x28, #0x0]\n"
"ldr h30, [x27, #0x0]\n"
- "mov x20, #0x1\n"
+ "mov x19, #0x1\n"
"ldr h29, [x26, #0x0]\n"
"ldr h28, [x25, #0x0]\n"
"ldr h27, [x24, #0x0]\n"
- "ldr h26, [x23, #0x0]\n"
- "ldr h24, [x22, #0x0]\n"
- "ldr h23, [x21, #0x0]\n"
+ "ldr h25, [x23, #0x0]\n"
+ "ldr h21, [x22, #0x0]\n"
+ "ldr h24, [x21, #0x0]\n"
+ "ldr h23, [x20, #0x0]\n"
"9:" // Odd load end
- "zip1 v25.8h, v31.8h, v27.8h\n"
- "zip1 v18.8h, v29.8h, v24.8h\n"
- "subs x20, x20, #0x1\n"
- "zip1 v22.8h, v30.8h, v26.8h\n"
- "zip1 v21.8h, v28.8h, v23.8h\n"
- "zip1 v17.8h, v25.8h, v18.8h\n"
- "zip1 v16.8h, v22.8h, v21.8h\n"
- "zip1 v20.8h, v17.8h, v16.8h\n"
- "str q20, [%x[out_ptr], #0x0]\n"
- "add v2.8h, v2.8h, v20.8h\n"
+ "zip1 v26.8h, v30.8h, v25.8h\n"
+ "subs x19, x19, #0x1\n"
+ "zip1 v20.8h, v28.8h, v24.8h\n"
+ "zip1 v18.8h, v26.8h, v20.8h\n"
+ "zip1 v22.8h, v29.8h, v21.8h\n"
+ "zip1 v19.8h, v27.8h, v23.8h\n"
+ "zip1 v16.8h, v22.8h, v19.8h\n"
+ "zip1 v17.8h, v18.8h, v16.8h\n"
+ "str q17, [%x[out_ptr], #0x0]\n"
"add %x[out_ptr], %x[out_ptr], #0x10\n"
+ "add v1.8h, v1.8h, v17.8h\n"
"beq 10f\n"
- "zip2 v19.8h, v17.8h, v16.8h\n"
- "subs x20, x20, #0x1\n"
- "str q19, [%x[out_ptr], #0x0]\n"
- "add v2.8h, v2.8h, v19.8h\n"
+ "zip2 v16.8h, v18.8h, v16.8h\n"
+ "str q16, [%x[out_ptr], #0x0]\n"
+ "subs x19, x19, #0x1\n"
+ "add v1.8h, v1.8h, v16.8h\n"
"add %x[out_ptr], %x[out_ptr], #0x10\n"
"beq 10f\n"
- "zip2 v18.8h, v25.8h, v18.8h\n"
- "zip2 v17.8h, v22.8h, v21.8h\n"
- "subs x20, x20, #0x1\n"
+ "zip2 v18.8h, v26.8h, v20.8h\n"
+ "zip2 v17.8h, v22.8h, v19.8h\n"
+ "subs x19, x19, #0x1\n"
"zip1 v16.8h, v18.8h, v17.8h\n"
"str q16, [%x[out_ptr], #0x0]\n"
- "add v2.8h, v2.8h, v16.8h\n"
"add %x[out_ptr], %x[out_ptr], #0x10\n"
+ "add v1.8h, v1.8h, v16.8h\n"
"beq 10f\n"
"zip2 v16.8h, v18.8h, v17.8h\n"
- "subs x20, x20, #0x1\n"
"str q16, [%x[out_ptr], #0x0]\n"
- "add v2.8h, v2.8h, v16.8h\n"
+ "subs x19, x19, #0x1\n"
+ "add v1.8h, v1.8h, v16.8h\n"
"add %x[out_ptr], %x[out_ptr], #0x10\n"
"beq 10f\n"
- "zip2 v22.8h, v31.8h, v27.8h\n"
- "zip2 v21.8h, v29.8h, v24.8h\n"
- "subs x20, x20, #0x1\n"
- "zip2 v20.8h, v30.8h, v26.8h\n"
- "zip2 v19.8h, v28.8h, v23.8h\n"
- "zip1 v18.8h, v22.8h, v21.8h\n"
- "zip1 v17.8h, v20.8h, v19.8h\n"
+ "zip2 v25.8h, v30.8h, v25.8h\n"
+ "zip2 v20.8h, v28.8h, v24.8h\n"
+ "subs x19, x19, #0x1\n"
+ "zip1 v18.8h, v25.8h, v20.8h\n"
+ "zip2 v21.8h, v29.8h, v21.8h\n"
+ "zip2 v19.8h, v27.8h, v23.8h\n"
+ "zip1 v17.8h, v21.8h, v19.8h\n"
"zip1 v16.8h, v18.8h, v17.8h\n"
"str q16, [%x[out_ptr], #0x0]\n"
- "add v2.8h, v2.8h, v16.8h\n"
"add %x[out_ptr], %x[out_ptr], #0x10\n"
+ "add v1.8h, v1.8h, v16.8h\n"
"beq 10f\n"
"zip2 v16.8h, v18.8h, v17.8h\n"
- "subs x20, x20, #0x1\n"
"str q16, [%x[out_ptr], #0x0]\n"
- "add v2.8h, v2.8h, v16.8h\n"
+ "subs x19, x19, #0x1\n"
+ "add v1.8h, v1.8h, v16.8h\n"
"add %x[out_ptr], %x[out_ptr], #0x10\n"
"beq 10f\n"
- "zip2 v18.8h, v22.8h, v21.8h\n"
- "zip2 v17.8h, v20.8h, v19.8h\n"
+ "zip2 v18.8h, v25.8h, v20.8h\n"
+ "zip2 v17.8h, v21.8h, v19.8h\n"
"zip1 v16.8h, v18.8h, v17.8h\n"
"str q16, [%x[out_ptr], #0x0]\n"
- "add v2.8h, v2.8h, v16.8h\n"
"add %x[out_ptr], %x[out_ptr], #0x10\n"
+ "add v1.8h, v1.8h, v16.8h\n"
"10:" // Odds skip
- "uaddw v1.4s, v1.4s, v2.4h\n"
- "uaddw2 v0.4s, v0.4s, v2.8h\n"
- "str q1, [%x[out_ptr], #0x0]\n"
- "str q0, [%x[out_ptr], #0x10]\n"
+ "uaddw v0.4s, v0.4s, v1.4h\n"
+ "str q0, [%x[out_ptr], #0x0]\n"
+ "uaddw2 v31.4s, v31.4s, v1.8h\n"
+ "str q31, [%x[out_ptr], #0x10]\n"
"add %x[out_ptr], %x[out_ptr], #0x20\n"
: [out_ptr] "+&r" (out_ptr), [width] "+&r" (width)
: [first] "r" (first), [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset)
- : "cc", "memory", "v0", "v1", "v2", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_u8_u16.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_u8_u16.hpp
index 43a3a46801..0db2f7fd51 100644
--- a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_u8_u16.hpp
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_u8_u16.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef __aarch64__
@@ -31,45 +31,44 @@ void interleave_block<8, 1, VLType::None, false>(
)
{
__asm__ __volatile__(
- "ldr x28, [%x[in], #0x0]\n"
- "ldr x27, [%x[in], #0x8]\n"
+ "ldr x27, [%x[in], #0x0]\n"
"cmp %x[height], #0x8\n"
- "add x28, x28, %x[row_offset]\n"
- "ldr x26, [%x[in], #0x10]\n"
- "ldr x25, [%x[in], #0x18]\n"
+ "ldr x26, [%x[in], #0x8]\n"
"add x27, x27, %x[row_offset]\n"
+ "ldr x25, [%x[in], #0x10]\n"
+ "ldr x24, [%x[in], #0x18]\n"
"add x26, x26, %x[row_offset]\n"
- "ldr x24, [%x[in], #0x20]\n"
- "ldr x23, [%x[in], #0x28]\n"
+ "ldr x23, [%x[in], #0x20]\n"
"add x25, x25, %x[row_offset]\n"
+ "ldr x22, [%x[in], #0x28]\n"
+ "ldr x21, [%x[in], #0x30]\n"
"add x24, x24, %x[row_offset]\n"
- "ldr x22, [%x[in], #0x30]\n"
- "ldr x21, [%x[in], #0x38]\n"
+ "ldr x20, [%x[in], #0x38]\n"
"add x23, x23, %x[row_offset]\n"
"add x22, x22, %x[row_offset]\n"
"add x21, x21, %x[row_offset]\n"
+ "add x20, x20, %x[row_offset]\n"
"beq 1f\n"
+ "mov x20, x27\n"
"cmp %x[height], #0x2\n"
- "csel x27, x27, x28, GE\n"
- "csel x26, x26, x28, GT\n"
+ "csel x26, x26, x27, GE\n"
+ "csel x25, x25, x27, GT\n"
"cmp %x[height], #0x4\n"
- "csel x25, x25, x28, GE\n"
- "csel x24, x24, x28, GT\n"
+ "csel x24, x24, x27, GE\n"
+ "csel x23, x23, x27, GT\n"
"cmp %x[height], #0x6\n"
- "mov x21, x28\n"
- "csel x23, x23, x28, GE\n"
- "csel x22, x22, x28, GT\n"
+ "csel x22, x22, x27, GE\n"
+ "csel x21, x21, x27, GT\n"
"1:" // no_pointer_adj
- "cmp %x[width], #0x8\n"
- "prfm pldl1keep, [x28, #0x0]\n"
"prfm pldl1keep, [x27, #0x0]\n"
+ "cmp %x[width], #0x8\n"
"prfm pldl1keep, [x26, #0x0]\n"
"prfm pldl1keep, [x25, #0x0]\n"
"prfm pldl1keep, [x24, #0x0]\n"
"prfm pldl1keep, [x23, #0x0]\n"
"prfm pldl1keep, [x22, #0x0]\n"
"prfm pldl1keep, [x21, #0x0]\n"
- "prfm pldl1keep, [x28, #0x40]\n"
+ "prfm pldl1keep, [x20, #0x0]\n"
"prfm pldl1keep, [x27, #0x40]\n"
"prfm pldl1keep, [x26, #0x40]\n"
"prfm pldl1keep, [x25, #0x40]\n"
@@ -77,208 +76,209 @@ void interleave_block<8, 1, VLType::None, false>(
"prfm pldl1keep, [x23, #0x40]\n"
"prfm pldl1keep, [x22, #0x40]\n"
"prfm pldl1keep, [x21, #0x40]\n"
+ "prfm pldl1keep, [x20, #0x40]\n"
"blt 3f\n"
"2:" // Main loop head
- "ldr d25, [x28], #0x8\n"
- "ldr d30, [x27], #0x8\n"
- "ushll v25.8h, v25.8b, #0x0\n"
+ "ldr d31, [x27], #0x8\n"
+ "ushll v31.8h, v31.8b, #0x0\n"
+ "ldr d30, [x26], #0x8\n"
+ "subs %x[width], %x[width], #0x8\n"
"ushll v30.8h, v30.8b, #0x0\n"
- "ldr d29, [x26], #0x8\n"
- "ldr d28, [x25], #0x8\n"
+ "ldr d29, [x25], #0x8\n"
+ "cmp %x[width], #0x8\n"
"ushll v29.8h, v29.8b, #0x0\n"
+ "ldr d28, [x24], #0x8\n"
+ "ldr d25, [x23], #0x8\n"
"ushll v28.8h, v28.8b, #0x0\n"
- "ldr d21, [x24], #0x8\n"
- "ldr d27, [x23], #0x8\n"
- "ushll v21.8h, v21.8b, #0x0\n"
- "ushll v27.8h, v27.8b, #0x0\n"
- "ldr d20, [x22], #0x8\n"
- "ldr d26, [x21], #0x8\n"
- "ushll v20.8h, v20.8b, #0x0\n"
- "ushll v26.8h, v26.8b, #0x0\n"
- "zip1 v23.8h, v25.8h, v21.8h\n"
- "zip1 v22.8h, v29.8h, v20.8h\n"
- "subs %x[width], %x[width], #0x8\n"
- "cmp %x[width], #0x8\n"
- "zip1 v19.8h, v30.8h, v27.8h\n"
- "zip1 v18.8h, v28.8h, v26.8h\n"
- "prfm pldl1keep, [x28, #0x70]\n"
+ "ldr d23, [x22], #0x8\n"
+ "ushll v25.8h, v25.8b, #0x0\n"
+ "ldr d27, [x21], #0x8\n"
+ "zip1 v20.8h, v31.8h, v25.8h\n"
+ "ldr d26, [x20], #0x8\n"
+ "zip2 v25.8h, v31.8h, v25.8h\n"
"prfm pldl1keep, [x27, #0x70]\n"
- "zip2 v25.8h, v25.8h, v21.8h\n"
- "zip2 v21.8h, v29.8h, v20.8h\n"
"prfm pldl1keep, [x26, #0x70]\n"
+ "ushll v23.8h, v23.8b, #0x0\n"
"prfm pldl1keep, [x25, #0x70]\n"
- "zip2 v20.8h, v30.8h, v27.8h\n"
- "zip2 v16.8h, v28.8h, v26.8h\n"
+ "zip1 v24.8h, v30.8h, v23.8h\n"
+ "zip2 v23.8h, v30.8h, v23.8h\n"
"prfm pldl1keep, [x24, #0x70]\n"
+ "ushll v27.8h, v27.8b, #0x0\n"
"prfm pldl1keep, [x23, #0x70]\n"
- "zip1 v24.8h, v23.8h, v22.8h\n"
- "zip1 v17.8h, v19.8h, v18.8h\n"
+ "zip1 v19.8h, v29.8h, v27.8h\n"
"prfm pldl1keep, [x22, #0x70]\n"
+ "zip1 v22.8h, v20.8h, v19.8h\n"
"prfm pldl1keep, [x21, #0x70]\n"
- "zip2 v23.8h, v23.8h, v22.8h\n"
- "zip2 v19.8h, v19.8h, v18.8h\n"
- "zip1 v22.8h, v25.8h, v21.8h\n"
- "zip1 v18.8h, v20.8h, v16.8h\n"
- "zip2 v21.8h, v25.8h, v21.8h\n"
- "zip2 v20.8h, v20.8h, v16.8h\n"
- "zip1 v16.8h, v24.8h, v17.8h\n"
+ "zip2 v21.8h, v20.8h, v19.8h\n"
+ "prfm pldl1keep, [x20, #0x70]\n"
+ "zip2 v19.8h, v29.8h, v27.8h\n"
+ "zip1 v20.8h, v25.8h, v19.8h\n"
+ "zip2 v19.8h, v25.8h, v19.8h\n"
+ "ushll v26.8h, v26.8b, #0x0\n"
+ "zip1 v18.8h, v28.8h, v26.8h\n"
+ "zip1 v17.8h, v24.8h, v18.8h\n"
+ "zip1 v16.8h, v22.8h, v17.8h\n"
"str q16, [%x[out_ptr], #0x0]\n"
- "zip2 v16.8h, v24.8h, v17.8h\n"
+ "zip2 v16.8h, v22.8h, v17.8h\n"
"str q16, [%x[out_ptr], #0x10]\n"
- "zip1 v17.8h, v23.8h, v19.8h\n"
- "zip2 v16.8h, v23.8h, v19.8h\n"
- "str q17, [%x[out_ptr], #0x20]\n"
- "zip1 v19.8h, v22.8h, v18.8h\n"
- "zip2 v18.8h, v22.8h, v18.8h\n"
+ "zip2 v17.8h, v24.8h, v18.8h\n"
+ "zip1 v16.8h, v21.8h, v17.8h\n"
+ "str q16, [%x[out_ptr], #0x20]\n"
+ "zip2 v16.8h, v21.8h, v17.8h\n"
"str q16, [%x[out_ptr], #0x30]\n"
- "zip1 v17.8h, v21.8h, v20.8h\n"
- "zip2 v16.8h, v21.8h, v20.8h\n"
- "str q19, [%x[out_ptr], #0x40]\n"
- "str q18, [%x[out_ptr], #0x50]\n"
- "str q17, [%x[out_ptr], #0x60]\n"
+ "zip2 v18.8h, v28.8h, v26.8h\n"
+ "zip1 v17.8h, v23.8h, v18.8h\n"
+ "zip1 v16.8h, v20.8h, v17.8h\n"
+ "str q16, [%x[out_ptr], #0x40]\n"
+ "zip2 v16.8h, v20.8h, v17.8h\n"
+ "str q16, [%x[out_ptr], #0x50]\n"
+ "zip2 v17.8h, v23.8h, v18.8h\n"
+ "zip1 v16.8h, v19.8h, v17.8h\n"
+ "str q16, [%x[out_ptr], #0x60]\n"
+ "zip2 v16.8h, v19.8h, v17.8h\n"
"str q16, [%x[out_ptr], #0x70]\n"
"add %x[out_ptr], %x[out_ptr], #0x80\n"
"bge 2b\n"
"3:" // Main loop skip
"cbz %x[width], 8f\n"
"tbz %x[width], #2, 5f\n"
- "ldr s25, [x28], #0x4\n"
- "ldr s30, [x27], #0x4\n"
- "ldr s29, [x26], #0x4\n"
- "ldr s28, [x25], #0x4\n"
- "ldr s21, [x24], #0x4\n"
- "ldr s27, [x23], #0x4\n"
- "ldr s20, [x22], #0x4\n"
- "ldr s26, [x21], #0x4\n"
+ "ldr s31, [x27], #0x4\n"
+ "ldr s30, [x26], #0x4\n"
+ "ldr s29, [x25], #0x4\n"
+ "ldr s28, [x24], #0x4\n"
+ "ldr s25, [x23], #0x4\n"
+ "ldr s23, [x22], #0x4\n"
+ "ldr s27, [x21], #0x4\n"
+ "ldr s26, [x20], #0x4\n"
"tbz %x[width], #1, 4f\n"
- "ld1 { v25.h }[2], [x28], #0x2\n"
- "ld1 { v30.h }[2], [x27], #0x2\n"
- "mov x20, #0x6\n"
- "ld1 { v29.h }[2], [x26], #0x2\n"
- "ld1 { v28.h }[2], [x25], #0x2\n"
- "ld1 { v21.h }[2], [x24], #0x2\n"
- "ld1 { v27.h }[2], [x23], #0x2\n"
- "ld1 { v20.h }[2], [x22], #0x2\n"
- "ld1 { v26.h }[2], [x21], #0x2\n"
+ "ld1 { v31.h }[2], [x27], #0x2\n"
+ "mov x19, #0x6\n"
+ "ld1 { v30.h }[2], [x26], #0x2\n"
+ "ld1 { v29.h }[2], [x25], #0x2\n"
+ "ld1 { v28.h }[2], [x24], #0x2\n"
+ "ld1 { v25.h }[2], [x23], #0x2\n"
+ "ld1 { v23.h }[2], [x22], #0x2\n"
+ "ld1 { v27.h }[2], [x21], #0x2\n"
+ "ld1 { v26.h }[2], [x20], #0x2\n"
"tbz %x[width], #0, 7f\n"
- "ld1 { v25.b }[6], [x28]\n"
- "ld1 { v30.b }[6], [x27]\n"
- "mov x20, #0x7\n"
- "ld1 { v29.b }[6], [x26]\n"
- "ld1 { v28.b }[6], [x25]\n"
- "ld1 { v21.b }[6], [x24]\n"
- "ld1 { v27.b }[6], [x23]\n"
- "ld1 { v20.b }[6], [x22]\n"
- "ld1 { v26.b }[6], [x21]\n"
+ "ld1 { v31.b }[6], [x27]\n"
+ "mov x19, #0x7\n"
+ "ld1 { v30.b }[6], [x26]\n"
+ "ld1 { v29.b }[6], [x25]\n"
+ "ld1 { v28.b }[6], [x24]\n"
+ "ld1 { v25.b }[6], [x23]\n"
+ "ld1 { v23.b }[6], [x22]\n"
+ "ld1 { v27.b }[6], [x21]\n"
+ "ld1 { v26.b }[6], [x20]\n"
"b 7f\n"
"4:" // odd_loads_1_4
- "mov x20, #0x4\n"
+ "mov x19, #0x4\n"
"tbz %x[width], #0, 7f\n"
- "ld1 { v25.b }[4], [x28]\n"
- "ld1 { v30.b }[4], [x27]\n"
- "mov x20, #0x5\n"
- "ld1 { v29.b }[4], [x26]\n"
- "ld1 { v28.b }[4], [x25]\n"
- "ld1 { v21.b }[4], [x24]\n"
- "ld1 { v27.b }[4], [x23]\n"
- "ld1 { v20.b }[4], [x22]\n"
- "ld1 { v26.b }[4], [x21]\n"
+ "ld1 { v31.b }[4], [x27]\n"
+ "ld1 { v30.b }[4], [x26]\n"
+ "mov x19, #0x5\n"
+ "ld1 { v29.b }[4], [x25]\n"
+ "ld1 { v28.b }[4], [x24]\n"
+ "ld1 { v25.b }[4], [x23]\n"
+ "ld1 { v23.b }[4], [x22]\n"
+ "ld1 { v27.b }[4], [x21]\n"
+ "ld1 { v26.b }[4], [x20]\n"
"b 7f\n"
"5:" // odd_loads_2_0
"tbz %x[width], #1, 6f\n"
- "ldr h25, [x28], #0x2\n"
- "ldr h30, [x27], #0x2\n"
- "mov x20, #0x2\n"
- "ldr h29, [x26], #0x2\n"
- "ldr h28, [x25], #0x2\n"
- "ldr h21, [x24], #0x2\n"
- "ldr h27, [x23], #0x2\n"
- "ldr h20, [x22], #0x2\n"
- "ldr h26, [x21], #0x2\n"
+ "ldr h31, [x27], #0x2\n"
+ "ldr h30, [x26], #0x2\n"
+ "mov x19, #0x2\n"
+ "ldr h29, [x25], #0x2\n"
+ "ldr h28, [x24], #0x2\n"
+ "ldr h25, [x23], #0x2\n"
+ "ldr h23, [x22], #0x2\n"
+ "ldr h27, [x21], #0x2\n"
+ "ldr h26, [x20], #0x2\n"
"tbz %x[width], #0, 7f\n"
- "ld1 { v25.b }[2], [x28]\n"
- "ld1 { v30.b }[2], [x27]\n"
- "mov x20, #0x3\n"
- "ld1 { v29.b }[2], [x26]\n"
- "ld1 { v28.b }[2], [x25]\n"
- "ld1 { v21.b }[2], [x24]\n"
- "ld1 { v27.b }[2], [x23]\n"
- "ld1 { v20.b }[2], [x22]\n"
- "ld1 { v26.b }[2], [x21]\n"
+ "ld1 { v31.b }[2], [x27]\n"
+ "mov x19, #0x3\n"
+ "ld1 { v30.b }[2], [x26]\n"
+ "ld1 { v29.b }[2], [x25]\n"
+ "ld1 { v28.b }[2], [x24]\n"
+ "ld1 { v25.b }[2], [x23]\n"
+ "ld1 { v23.b }[2], [x22]\n"
+ "ld1 { v27.b }[2], [x21]\n"
+ "ld1 { v26.b }[2], [x20]\n"
"b 7f\n"
"6:" // odd_loads_1_0
- "ldr b25, [x28, #0x0]\n"
- "ldr b30, [x27, #0x0]\n"
- "mov x20, #0x1\n"
- "ldr b29, [x26, #0x0]\n"
- "ldr b28, [x25, #0x0]\n"
- "ldr b21, [x24, #0x0]\n"
- "ldr b27, [x23, #0x0]\n"
- "ldr b20, [x22, #0x0]\n"
- "ldr b26, [x21, #0x0]\n"
+ "ldr b31, [x27, #0x0]\n"
+ "mov x19, #0x1\n"
+ "ldr b30, [x26, #0x0]\n"
+ "ldr b29, [x25, #0x0]\n"
+ "ldr b28, [x24, #0x0]\n"
+ "ldr b25, [x23, #0x0]\n"
+ "ldr b23, [x22, #0x0]\n"
+ "ldr b27, [x21, #0x0]\n"
+ "ldr b26, [x20, #0x0]\n"
"7:" // Odd load end
- "ushll v25.8h, v25.8b, #0x0\n"
+ "ushll v31.8h, v31.8b, #0x0\n"
+ "subs x19, x19, #0x1\n"
"ushll v30.8h, v30.8b, #0x0\n"
- "subs x20, x20, #0x1\n"
"ushll v29.8h, v29.8b, #0x0\n"
"ushll v28.8h, v28.8b, #0x0\n"
- "ushll v21.8h, v21.8b, #0x0\n"
+ "ushll v25.8h, v25.8b, #0x0\n"
+ "zip1 v20.8h, v31.8h, v25.8h\n"
+ "ushll v23.8h, v23.8b, #0x0\n"
+ "zip1 v24.8h, v30.8h, v23.8h\n"
"ushll v27.8h, v27.8b, #0x0\n"
- "ushll v20.8h, v20.8b, #0x0\n"
+ "zip1 v19.8h, v29.8h, v27.8h\n"
+ "zip1 v22.8h, v20.8h, v19.8h\n"
"ushll v26.8h, v26.8b, #0x0\n"
- "zip1 v23.8h, v25.8h, v21.8h\n"
- "zip1 v22.8h, v29.8h, v20.8h\n"
- "zip1 v19.8h, v30.8h, v27.8h\n"
"zip1 v18.8h, v28.8h, v26.8h\n"
- "zip1 v24.8h, v23.8h, v22.8h\n"
- "zip1 v17.8h, v19.8h, v18.8h\n"
- "zip1 v16.8h, v24.8h, v17.8h\n"
+ "zip1 v17.8h, v24.8h, v18.8h\n"
+ "zip1 v16.8h, v22.8h, v17.8h\n"
"str q16, [%x[out_ptr], #0x0]\n"
"add %x[out_ptr], %x[out_ptr], #0x10\n"
"beq 8f\n"
- "subs x20, x20, #0x1\n"
- "zip2 v16.8h, v24.8h, v17.8h\n"
+ "zip2 v16.8h, v22.8h, v17.8h\n"
"str q16, [%x[out_ptr], #0x0]\n"
+ "subs x19, x19, #0x1\n"
"add %x[out_ptr], %x[out_ptr], #0x10\n"
"beq 8f\n"
- "zip2 v23.8h, v23.8h, v22.8h\n"
- "zip2 v19.8h, v19.8h, v18.8h\n"
- "subs x20, x20, #0x1\n"
- "zip1 v17.8h, v23.8h, v19.8h\n"
- "str q17, [%x[out_ptr], #0x0]\n"
+ "zip2 v21.8h, v20.8h, v19.8h\n"
+ "zip2 v17.8h, v24.8h, v18.8h\n"
+ "subs x19, x19, #0x1\n"
+ "zip1 v16.8h, v21.8h, v17.8h\n"
+ "str q16, [%x[out_ptr], #0x0]\n"
"add %x[out_ptr], %x[out_ptr], #0x10\n"
"beq 8f\n"
- "subs x20, x20, #0x1\n"
- "zip2 v16.8h, v23.8h, v19.8h\n"
+ "zip2 v16.8h, v21.8h, v17.8h\n"
"str q16, [%x[out_ptr], #0x0]\n"
+ "subs x19, x19, #0x1\n"
"add %x[out_ptr], %x[out_ptr], #0x10\n"
"beq 8f\n"
- "zip2 v25.8h, v25.8h, v21.8h\n"
- "zip2 v21.8h, v29.8h, v20.8h\n"
- "subs x20, x20, #0x1\n"
- "zip2 v20.8h, v30.8h, v27.8h\n"
- "zip2 v16.8h, v28.8h, v26.8h\n"
- "zip1 v22.8h, v25.8h, v21.8h\n"
- "zip1 v18.8h, v20.8h, v16.8h\n"
- "zip1 v19.8h, v22.8h, v18.8h\n"
- "str q19, [%x[out_ptr], #0x0]\n"
+ "zip2 v25.8h, v31.8h, v25.8h\n"
+ "zip2 v19.8h, v29.8h, v27.8h\n"
+ "subs x19, x19, #0x1\n"
+ "zip1 v20.8h, v25.8h, v19.8h\n"
+ "zip2 v23.8h, v30.8h, v23.8h\n"
+ "zip2 v18.8h, v28.8h, v26.8h\n"
+ "zip1 v17.8h, v23.8h, v18.8h\n"
+ "zip1 v16.8h, v20.8h, v17.8h\n"
+ "str q16, [%x[out_ptr], #0x0]\n"
"add %x[out_ptr], %x[out_ptr], #0x10\n"
"beq 8f\n"
- "subs x20, x20, #0x1\n"
- "zip2 v18.8h, v22.8h, v18.8h\n"
- "str q18, [%x[out_ptr], #0x0]\n"
+ "zip2 v16.8h, v20.8h, v17.8h\n"
+ "str q16, [%x[out_ptr], #0x0]\n"
+ "subs x19, x19, #0x1\n"
"add %x[out_ptr], %x[out_ptr], #0x10\n"
"beq 8f\n"
- "zip2 v21.8h, v25.8h, v21.8h\n"
- "zip2 v20.8h, v20.8h, v16.8h\n"
- "zip1 v17.8h, v21.8h, v20.8h\n"
- "str q17, [%x[out_ptr], #0x0]\n"
+ "zip2 v19.8h, v25.8h, v19.8h\n"
+ "zip2 v17.8h, v23.8h, v18.8h\n"
+ "zip1 v16.8h, v19.8h, v17.8h\n"
+ "str q16, [%x[out_ptr], #0x0]\n"
"add %x[out_ptr], %x[out_ptr], #0x10\n"
"8:" // Odds skip
: [out_ptr] "+&r" (out_ptr), [width] "+&r" (width)
: [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset)
- : "cc", "memory", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_u8_u16_summing.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_u8_u16_summing.hpp
index 3ab24365af..7c7d774a6b 100644
--- a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_u8_u16_summing.hpp
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_u8_u16_summing.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef __aarch64__
@@ -31,40 +31,39 @@ void interleave_block<8, 1, VLType::None, true>(
)
{
__asm__ __volatile__(
- "ldr x28, [%x[in], #0x0]\n"
- "ldr x27, [%x[in], #0x8]\n"
- "cmp %x[height], #0x8\n"
- "mov x20, #0x0\n"
- "ldr x26, [%x[in], #0x10]\n"
- "ldr x25, [%x[in], #0x18]\n"
- "movi v2.8h, #0x0\n"
- "movi v1.4s, #0x0\n"
- "ldr x24, [%x[in], #0x20]\n"
- "ldr x23, [%x[in], #0x28]\n"
+ "movi v1.8h, #0x0\n"
+ "ldr x27, [%x[in], #0x0]\n"
+ "mov x19, #0x0\n"
"movi v0.4s, #0x0\n"
- "add x28, x28, %x[row_offset]\n"
- "ldr x22, [%x[in], #0x30]\n"
- "ldr x21, [%x[in], #0x38]\n"
+ "ldr x26, [%x[in], #0x8]\n"
+ "cmp %x[height], #0x8\n"
+ "movi v31.4s, #0x0\n"
+ "ldr x25, [%x[in], #0x10]\n"
"add x27, x27, %x[row_offset]\n"
+ "ldr x24, [%x[in], #0x18]\n"
+ "ldr x23, [%x[in], #0x20]\n"
"add x26, x26, %x[row_offset]\n"
+ "ldr x22, [%x[in], #0x28]\n"
"add x25, x25, %x[row_offset]\n"
+ "ldr x21, [%x[in], #0x30]\n"
"add x24, x24, %x[row_offset]\n"
+ "ldr x20, [%x[in], #0x38]\n"
"add x23, x23, %x[row_offset]\n"
"add x22, x22, %x[row_offset]\n"
"add x21, x21, %x[row_offset]\n"
+ "add x20, x20, %x[row_offset]\n"
"beq 1f\n"
+ "mov x20, x27\n"
"cmp %x[height], #0x2\n"
- "csel x27, x27, x28, GE\n"
- "csel x26, x26, x28, GT\n"
+ "csel x26, x26, x27, GE\n"
+ "csel x25, x25, x27, GT\n"
"cmp %x[height], #0x4\n"
- "csel x25, x25, x28, GE\n"
- "csel x24, x24, x28, GT\n"
+ "csel x24, x24, x27, GE\n"
+ "csel x23, x23, x27, GT\n"
"cmp %x[height], #0x6\n"
- "mov x21, x28\n"
- "csel x23, x23, x28, GE\n"
- "csel x22, x22, x28, GT\n"
+ "csel x22, x22, x27, GE\n"
+ "csel x21, x21, x27, GT\n"
"1:" // no_pointer_adj
- "prfm pldl1keep, [x28, #0x0]\n"
"prfm pldl1keep, [x27, #0x0]\n"
"prfm pldl1keep, [x26, #0x0]\n"
"prfm pldl1keep, [x25, #0x0]\n"
@@ -72,7 +71,7 @@ void interleave_block<8, 1, VLType::None, true>(
"prfm pldl1keep, [x23, #0x0]\n"
"prfm pldl1keep, [x22, #0x0]\n"
"prfm pldl1keep, [x21, #0x0]\n"
- "prfm pldl1keep, [x28, #0x40]\n"
+ "prfm pldl1keep, [x20, #0x0]\n"
"prfm pldl1keep, [x27, #0x40]\n"
"prfm pldl1keep, [x26, #0x40]\n"
"prfm pldl1keep, [x25, #0x40]\n"
@@ -80,241 +79,242 @@ void interleave_block<8, 1, VLType::None, true>(
"prfm pldl1keep, [x23, #0x40]\n"
"prfm pldl1keep, [x22, #0x40]\n"
"prfm pldl1keep, [x21, #0x40]\n"
+ "prfm pldl1keep, [x20, #0x40]\n"
"cbnz %w[first], 2f\n"
"sub %x[out_ptr], %x[out_ptr], #0x20\n"
- "ld1 { v1.4s }, [%x[out_ptr]]\n"
- "ldr q0, [%x[out_ptr], #0x10]\n"
+ "ld1 { v0.4s }, [%x[out_ptr]]\n"
+ "ldr q31, [%x[out_ptr], #0x10]\n"
"2:" // first_pass
"cmp %x[width], #0x8\n"
"blt 5f\n"
"3:" // Main loop head
- "cmp x20, #0xe\n"
+ "cmp x19, #0xe\n"
"ble 4f\n"
- "uaddw v1.4s, v1.4s, v2.4h\n"
- "uaddw2 v0.4s, v0.4s, v2.8h\n"
- "mov x20, #0x0\n"
- "movi v2.8h, #0x0\n"
+ "uaddw v0.4s, v0.4s, v1.4h\n"
+ "uaddw2 v31.4s, v31.4s, v1.8h\n"
+ "mov x19, #0x0\n"
+ "movi v1.8h, #0x0\n"
"4:" // no_accumulate_16
- "ldr d31, [x28], #0x8\n"
"ldr d30, [x27], #0x8\n"
- "ushll v31.8h, v31.8b, #0x0\n"
"ushll v30.8h, v30.8b, #0x0\n"
"ldr d29, [x26], #0x8\n"
- "ldr d28, [x25], #0x8\n"
+ "add x19, x19, #0x1\n"
"ushll v29.8h, v29.8b, #0x0\n"
+ "ldr d28, [x25], #0x8\n"
+ "subs %x[width], %x[width], #0x8\n"
"ushll v28.8h, v28.8b, #0x0\n"
"ldr d27, [x24], #0x8\n"
- "ldr d26, [x23], #0x8\n"
+ "cmp %x[width], #0x8\n"
"ushll v27.8h, v27.8b, #0x0\n"
- "ushll v26.8h, v26.8b, #0x0\n"
- "ldr d25, [x22], #0x8\n"
- "ldr d24, [x21], #0x8\n"
- "ushll v25.8h, v25.8b, #0x0\n"
+ "ldr d24, [x23], #0x8\n"
+ "ldr d23, [x22], #0x8\n"
"ushll v24.8h, v24.8b, #0x0\n"
- "zip1 v23.8h, v31.8h, v27.8h\n"
- "zip1 v22.8h, v29.8h, v25.8h\n"
- "subs %x[width], %x[width], #0x8\n"
- "cmp %x[width], #0x8\n"
- "zip1 v21.8h, v30.8h, v26.8h\n"
- "zip1 v20.8h, v28.8h, v24.8h\n"
- "prfm pldl1keep, [x28, #0x70]\n"
+ "ldr d21, [x21], #0x8\n"
+ "ushll v23.8h, v23.8b, #0x0\n"
+ "ldr d26, [x20], #0x8\n"
+ "zip1 v20.8h, v30.8h, v24.8h\n"
"prfm pldl1keep, [x27, #0x70]\n"
- "zip1 v18.8h, v23.8h, v22.8h\n"
- "zip1 v17.8h, v21.8h, v20.8h\n"
+ "zip1 v25.8h, v29.8h, v23.8h\n"
"prfm pldl1keep, [x26, #0x70]\n"
+ "zip2 v24.8h, v30.8h, v24.8h\n"
"prfm pldl1keep, [x25, #0x70]\n"
- "zip1 v16.8h, v18.8h, v17.8h\n"
- "add v2.8h, v2.8h, v16.8h\n"
+ "zip2 v23.8h, v29.8h, v23.8h\n"
"prfm pldl1keep, [x24, #0x70]\n"
+ "ushll v21.8h, v21.8b, #0x0\n"
"prfm pldl1keep, [x23, #0x70]\n"
- "zip2 v19.8h, v18.8h, v17.8h\n"
- "zip2 v18.8h, v23.8h, v22.8h\n"
+ "zip1 v19.8h, v28.8h, v21.8h\n"
"prfm pldl1keep, [x22, #0x70]\n"
+ "zip1 v22.8h, v20.8h, v19.8h\n"
"prfm pldl1keep, [x21, #0x70]\n"
- "zip2 v17.8h, v21.8h, v20.8h\n"
- "add v2.8h, v2.8h, v19.8h\n"
+ "zip2 v19.8h, v20.8h, v19.8h\n"
+ "prfm pldl1keep, [x20, #0x70]\n"
+ "zip2 v20.8h, v28.8h, v21.8h\n"
+ "zip1 v21.8h, v24.8h, v20.8h\n"
+ "zip2 v20.8h, v24.8h, v20.8h\n"
+ "ushll v26.8h, v26.8b, #0x0\n"
+ "zip1 v18.8h, v27.8h, v26.8h\n"
+ "zip1 v17.8h, v25.8h, v18.8h\n"
+ "zip1 v16.8h, v22.8h, v17.8h\n"
"str q16, [%x[out_ptr], #0x0]\n"
- "add x20, x20, #0x1\n"
- "zip1 v16.8h, v18.8h, v17.8h\n"
- "zip2 v22.8h, v31.8h, v27.8h\n"
- "str q19, [%x[out_ptr], #0x10]\n"
- "zip2 v21.8h, v29.8h, v25.8h\n"
- "zip2 v20.8h, v30.8h, v26.8h\n"
- "str q16, [%x[out_ptr], #0x20]\n"
- "zip2 v19.8h, v28.8h, v24.8h\n"
- "add v2.8h, v2.8h, v16.8h\n"
- "zip2 v16.8h, v18.8h, v17.8h\n"
- "zip1 v18.8h, v22.8h, v21.8h\n"
+ "add v1.8h, v1.8h, v16.8h\n"
+ "zip2 v17.8h, v22.8h, v17.8h\n"
+ "str q17, [%x[out_ptr], #0x10]\n"
+ "zip2 v16.8h, v25.8h, v18.8h\n"
+ "add v1.8h, v1.8h, v17.8h\n"
+ "zip1 v17.8h, v19.8h, v16.8h\n"
+ "str q17, [%x[out_ptr], #0x20]\n"
+ "zip2 v16.8h, v19.8h, v16.8h\n"
"str q16, [%x[out_ptr], #0x30]\n"
- "zip1 v17.8h, v20.8h, v19.8h\n"
- "add v2.8h, v2.8h, v16.8h\n"
- "zip1 v16.8h, v18.8h, v17.8h\n"
- "add v2.8h, v2.8h, v16.8h\n"
+ "add v1.8h, v1.8h, v17.8h\n"
+ "zip2 v19.8h, v27.8h, v26.8h\n"
+ "zip1 v17.8h, v23.8h, v19.8h\n"
+ "add v1.8h, v1.8h, v16.8h\n"
+ "zip1 v16.8h, v21.8h, v17.8h\n"
"str q16, [%x[out_ptr], #0x40]\n"
- "zip2 v16.8h, v18.8h, v17.8h\n"
- "zip2 v18.8h, v22.8h, v21.8h\n"
- "str q16, [%x[out_ptr], #0x50]\n"
- "zip2 v17.8h, v20.8h, v19.8h\n"
- "add v2.8h, v2.8h, v16.8h\n"
- "zip1 v16.8h, v18.8h, v17.8h\n"
- "add v2.8h, v2.8h, v16.8h\n"
- "str q16, [%x[out_ptr], #0x60]\n"
- "zip2 v16.8h, v18.8h, v17.8h\n"
+ "zip2 v18.8h, v21.8h, v17.8h\n"
+ "str q18, [%x[out_ptr], #0x50]\n"
+ "add v1.8h, v1.8h, v16.8h\n"
+ "zip2 v16.8h, v23.8h, v19.8h\n"
+ "zip1 v17.8h, v20.8h, v16.8h\n"
+ "str q17, [%x[out_ptr], #0x60]\n"
+ "add v1.8h, v1.8h, v18.8h\n"
+ "zip2 v16.8h, v20.8h, v16.8h\n"
"str q16, [%x[out_ptr], #0x70]\n"
- "add v2.8h, v2.8h, v16.8h\n"
"add %x[out_ptr], %x[out_ptr], #0x80\n"
+ "add v1.8h, v1.8h, v17.8h\n"
+ "add v1.8h, v1.8h, v16.8h\n"
"bge 3b\n"
"5:" // Main loop skip
"cbz %x[width], 10f\n"
"tbz %x[width], #2, 7f\n"
- "ldr s31, [x28], #0x4\n"
"ldr s30, [x27], #0x4\n"
"ldr s29, [x26], #0x4\n"
"ldr s28, [x25], #0x4\n"
"ldr s27, [x24], #0x4\n"
- "ldr s26, [x23], #0x4\n"
- "ldr s25, [x22], #0x4\n"
- "ldr s24, [x21], #0x4\n"
+ "ldr s24, [x23], #0x4\n"
+ "ldr s23, [x22], #0x4\n"
+ "ldr s21, [x21], #0x4\n"
+ "ldr s26, [x20], #0x4\n"
"tbz %x[width], #1, 6f\n"
- "ld1 { v31.h }[2], [x28], #0x2\n"
"ld1 { v30.h }[2], [x27], #0x2\n"
- "mov x20, #0x6\n"
+ "mov x19, #0x6\n"
"ld1 { v29.h }[2], [x26], #0x2\n"
"ld1 { v28.h }[2], [x25], #0x2\n"
"ld1 { v27.h }[2], [x24], #0x2\n"
- "ld1 { v26.h }[2], [x23], #0x2\n"
- "ld1 { v25.h }[2], [x22], #0x2\n"
- "ld1 { v24.h }[2], [x21], #0x2\n"
+ "ld1 { v24.h }[2], [x23], #0x2\n"
+ "ld1 { v23.h }[2], [x22], #0x2\n"
+ "ld1 { v21.h }[2], [x21], #0x2\n"
+ "ld1 { v26.h }[2], [x20], #0x2\n"
"tbz %x[width], #0, 9f\n"
- "ld1 { v31.b }[6], [x28]\n"
"ld1 { v30.b }[6], [x27]\n"
- "mov x20, #0x7\n"
+ "mov x19, #0x7\n"
"ld1 { v29.b }[6], [x26]\n"
"ld1 { v28.b }[6], [x25]\n"
"ld1 { v27.b }[6], [x24]\n"
- "ld1 { v26.b }[6], [x23]\n"
- "ld1 { v25.b }[6], [x22]\n"
- "ld1 { v24.b }[6], [x21]\n"
+ "ld1 { v24.b }[6], [x23]\n"
+ "ld1 { v23.b }[6], [x22]\n"
+ "ld1 { v21.b }[6], [x21]\n"
+ "ld1 { v26.b }[6], [x20]\n"
"b 9f\n"
"6:" // odd_loads_1_4
- "mov x20, #0x4\n"
+ "mov x19, #0x4\n"
"tbz %x[width], #0, 9f\n"
- "ld1 { v31.b }[4], [x28]\n"
"ld1 { v30.b }[4], [x27]\n"
- "mov x20, #0x5\n"
"ld1 { v29.b }[4], [x26]\n"
+ "mov x19, #0x5\n"
"ld1 { v28.b }[4], [x25]\n"
"ld1 { v27.b }[4], [x24]\n"
- "ld1 { v26.b }[4], [x23]\n"
- "ld1 { v25.b }[4], [x22]\n"
- "ld1 { v24.b }[4], [x21]\n"
+ "ld1 { v24.b }[4], [x23]\n"
+ "ld1 { v23.b }[4], [x22]\n"
+ "ld1 { v21.b }[4], [x21]\n"
+ "ld1 { v26.b }[4], [x20]\n"
"b 9f\n"
"7:" // odd_loads_2_0
"tbz %x[width], #1, 8f\n"
- "ldr h31, [x28], #0x2\n"
"ldr h30, [x27], #0x2\n"
- "mov x20, #0x2\n"
"ldr h29, [x26], #0x2\n"
+ "mov x19, #0x2\n"
"ldr h28, [x25], #0x2\n"
"ldr h27, [x24], #0x2\n"
- "ldr h26, [x23], #0x2\n"
- "ldr h25, [x22], #0x2\n"
- "ldr h24, [x21], #0x2\n"
+ "ldr h24, [x23], #0x2\n"
+ "ldr h23, [x22], #0x2\n"
+ "ldr h21, [x21], #0x2\n"
+ "ldr h26, [x20], #0x2\n"
"tbz %x[width], #0, 9f\n"
- "ld1 { v31.b }[2], [x28]\n"
"ld1 { v30.b }[2], [x27]\n"
- "mov x20, #0x3\n"
+ "mov x19, #0x3\n"
"ld1 { v29.b }[2], [x26]\n"
"ld1 { v28.b }[2], [x25]\n"
"ld1 { v27.b }[2], [x24]\n"
- "ld1 { v26.b }[2], [x23]\n"
- "ld1 { v25.b }[2], [x22]\n"
- "ld1 { v24.b }[2], [x21]\n"
+ "ld1 { v24.b }[2], [x23]\n"
+ "ld1 { v23.b }[2], [x22]\n"
+ "ld1 { v21.b }[2], [x21]\n"
+ "ld1 { v26.b }[2], [x20]\n"
"b 9f\n"
"8:" // odd_loads_1_0
- "ldr b31, [x28, #0x0]\n"
"ldr b30, [x27, #0x0]\n"
- "mov x20, #0x1\n"
+ "mov x19, #0x1\n"
"ldr b29, [x26, #0x0]\n"
"ldr b28, [x25, #0x0]\n"
"ldr b27, [x24, #0x0]\n"
- "ldr b26, [x23, #0x0]\n"
- "ldr b25, [x22, #0x0]\n"
- "ldr b24, [x21, #0x0]\n"
+ "ldr b24, [x23, #0x0]\n"
+ "ldr b23, [x22, #0x0]\n"
+ "ldr b21, [x21, #0x0]\n"
+ "ldr b26, [x20, #0x0]\n"
"9:" // Odd load end
- "ushll v31.8h, v31.8b, #0x0\n"
"ushll v30.8h, v30.8b, #0x0\n"
- "subs x20, x20, #0x1\n"
+ "subs x19, x19, #0x1\n"
"ushll v29.8h, v29.8b, #0x0\n"
"ushll v28.8h, v28.8b, #0x0\n"
"ushll v27.8h, v27.8b, #0x0\n"
- "ushll v26.8h, v26.8b, #0x0\n"
- "ushll v25.8h, v25.8b, #0x0\n"
"ushll v24.8h, v24.8b, #0x0\n"
- "zip1 v23.8h, v31.8h, v27.8h\n"
- "zip1 v22.8h, v29.8h, v25.8h\n"
- "zip1 v21.8h, v30.8h, v26.8h\n"
- "zip1 v20.8h, v28.8h, v24.8h\n"
- "zip1 v18.8h, v23.8h, v22.8h\n"
- "zip1 v17.8h, v21.8h, v20.8h\n"
- "zip1 v16.8h, v18.8h, v17.8h\n"
+ "zip1 v20.8h, v30.8h, v24.8h\n"
+ "ushll v23.8h, v23.8b, #0x0\n"
+ "zip1 v25.8h, v29.8h, v23.8h\n"
+ "ushll v21.8h, v21.8b, #0x0\n"
+ "zip1 v19.8h, v28.8h, v21.8h\n"
+ "zip1 v22.8h, v20.8h, v19.8h\n"
+ "ushll v26.8h, v26.8b, #0x0\n"
+ "zip1 v18.8h, v27.8h, v26.8h\n"
+ "zip1 v17.8h, v25.8h, v18.8h\n"
+ "zip1 v16.8h, v22.8h, v17.8h\n"
"str q16, [%x[out_ptr], #0x0]\n"
- "add v2.8h, v2.8h, v16.8h\n"
"add %x[out_ptr], %x[out_ptr], #0x10\n"
+ "add v1.8h, v1.8h, v16.8h\n"
"beq 10f\n"
- "zip2 v19.8h, v18.8h, v17.8h\n"
- "subs x20, x20, #0x1\n"
- "str q19, [%x[out_ptr], #0x0]\n"
- "add v2.8h, v2.8h, v19.8h\n"
+ "zip2 v17.8h, v22.8h, v17.8h\n"
+ "str q17, [%x[out_ptr], #0x0]\n"
+ "subs x19, x19, #0x1\n"
+ "add v1.8h, v1.8h, v17.8h\n"
"add %x[out_ptr], %x[out_ptr], #0x10\n"
"beq 10f\n"
- "zip2 v18.8h, v23.8h, v22.8h\n"
- "zip2 v17.8h, v21.8h, v20.8h\n"
- "subs x20, x20, #0x1\n"
- "zip1 v16.8h, v18.8h, v17.8h\n"
- "str q16, [%x[out_ptr], #0x0]\n"
- "add v2.8h, v2.8h, v16.8h\n"
+ "zip2 v19.8h, v20.8h, v19.8h\n"
+ "zip2 v16.8h, v25.8h, v18.8h\n"
+ "subs x19, x19, #0x1\n"
+ "zip1 v17.8h, v19.8h, v16.8h\n"
+ "str q17, [%x[out_ptr], #0x0]\n"
"add %x[out_ptr], %x[out_ptr], #0x10\n"
+ "add v1.8h, v1.8h, v17.8h\n"
"beq 10f\n"
- "zip2 v16.8h, v18.8h, v17.8h\n"
- "subs x20, x20, #0x1\n"
+ "zip2 v16.8h, v19.8h, v16.8h\n"
"str q16, [%x[out_ptr], #0x0]\n"
- "add v2.8h, v2.8h, v16.8h\n"
+ "subs x19, x19, #0x1\n"
+ "add v1.8h, v1.8h, v16.8h\n"
"add %x[out_ptr], %x[out_ptr], #0x10\n"
"beq 10f\n"
- "zip2 v22.8h, v31.8h, v27.8h\n"
- "zip2 v21.8h, v29.8h, v25.8h\n"
- "subs x20, x20, #0x1\n"
- "zip2 v20.8h, v30.8h, v26.8h\n"
- "zip2 v19.8h, v28.8h, v24.8h\n"
- "zip1 v18.8h, v22.8h, v21.8h\n"
- "zip1 v17.8h, v20.8h, v19.8h\n"
- "zip1 v16.8h, v18.8h, v17.8h\n"
+ "zip2 v24.8h, v30.8h, v24.8h\n"
+ "zip2 v20.8h, v28.8h, v21.8h\n"
+ "subs x19, x19, #0x1\n"
+ "zip1 v21.8h, v24.8h, v20.8h\n"
+ "zip2 v23.8h, v29.8h, v23.8h\n"
+ "zip2 v19.8h, v27.8h, v26.8h\n"
+ "zip1 v17.8h, v23.8h, v19.8h\n"
+ "zip1 v16.8h, v21.8h, v17.8h\n"
"str q16, [%x[out_ptr], #0x0]\n"
- "add v2.8h, v2.8h, v16.8h\n"
"add %x[out_ptr], %x[out_ptr], #0x10\n"
+ "add v1.8h, v1.8h, v16.8h\n"
"beq 10f\n"
- "zip2 v16.8h, v18.8h, v17.8h\n"
- "subs x20, x20, #0x1\n"
- "str q16, [%x[out_ptr], #0x0]\n"
- "add v2.8h, v2.8h, v16.8h\n"
+ "zip2 v18.8h, v21.8h, v17.8h\n"
+ "str q18, [%x[out_ptr], #0x0]\n"
+ "subs x19, x19, #0x1\n"
+ "add v1.8h, v1.8h, v18.8h\n"
"add %x[out_ptr], %x[out_ptr], #0x10\n"
"beq 10f\n"
- "zip2 v18.8h, v22.8h, v21.8h\n"
- "zip2 v17.8h, v20.8h, v19.8h\n"
- "zip1 v16.8h, v18.8h, v17.8h\n"
- "str q16, [%x[out_ptr], #0x0]\n"
- "add v2.8h, v2.8h, v16.8h\n"
+ "zip2 v20.8h, v24.8h, v20.8h\n"
+ "zip2 v16.8h, v23.8h, v19.8h\n"
+ "zip1 v17.8h, v20.8h, v16.8h\n"
+ "str q17, [%x[out_ptr], #0x0]\n"
"add %x[out_ptr], %x[out_ptr], #0x10\n"
+ "add v1.8h, v1.8h, v17.8h\n"
"10:" // Odds skip
- "uaddw v1.4s, v1.4s, v2.4h\n"
- "uaddw2 v0.4s, v0.4s, v2.8h\n"
- "str q1, [%x[out_ptr], #0x0]\n"
- "str q0, [%x[out_ptr], #0x10]\n"
+ "uaddw v0.4s, v0.4s, v1.4h\n"
+ "str q0, [%x[out_ptr], #0x0]\n"
+ "uaddw2 v31.4s, v31.4s, v1.8h\n"
+ "str q31, [%x[out_ptr], #0x10]\n"
"add %x[out_ptr], %x[out_ptr], #0x20\n"
: [out_ptr] "+&r" (out_ptr), [width] "+&r" (width)
: [first] "r" (first), [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset)
- : "cc", "memory", "v0", "v1", "v2", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block2_bf16_bf16.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block2_bf16_bf16.hpp
index d4d150456f..1e5d395667 100644
--- a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block2_bf16_bf16.hpp
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block2_bf16_bf16.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef __aarch64__
@@ -31,45 +31,44 @@ void interleave_block<8, 2, VLType::None, false>(
)
{
__asm__ __volatile__(
- "ldr x28, [%x[in], #0x0]\n"
- "ldr x27, [%x[in], #0x8]\n"
+ "ldr x27, [%x[in], #0x0]\n"
"cmp %x[height], #0x8\n"
- "add x28, x28, %x[row_offset], LSL #1\n"
- "ldr x26, [%x[in], #0x10]\n"
- "ldr x25, [%x[in], #0x18]\n"
+ "ldr x26, [%x[in], #0x8]\n"
"add x27, x27, %x[row_offset], LSL #1\n"
+ "ldr x25, [%x[in], #0x10]\n"
+ "ldr x24, [%x[in], #0x18]\n"
"add x26, x26, %x[row_offset], LSL #1\n"
- "ldr x24, [%x[in], #0x20]\n"
- "ldr x23, [%x[in], #0x28]\n"
+ "ldr x23, [%x[in], #0x20]\n"
"add x25, x25, %x[row_offset], LSL #1\n"
+ "ldr x22, [%x[in], #0x28]\n"
+ "ldr x21, [%x[in], #0x30]\n"
"add x24, x24, %x[row_offset], LSL #1\n"
- "ldr x22, [%x[in], #0x30]\n"
- "ldr x21, [%x[in], #0x38]\n"
+ "ldr x20, [%x[in], #0x38]\n"
"add x23, x23, %x[row_offset], LSL #1\n"
"add x22, x22, %x[row_offset], LSL #1\n"
"add x21, x21, %x[row_offset], LSL #1\n"
+ "add x20, x20, %x[row_offset], LSL #1\n"
"beq 1f\n"
+ "mov x20, x27\n"
"cmp %x[height], #0x2\n"
- "csel x27, x27, x28, GE\n"
- "csel x26, x26, x28, GT\n"
+ "csel x26, x26, x27, GE\n"
+ "csel x25, x25, x27, GT\n"
"cmp %x[height], #0x4\n"
- "csel x25, x25, x28, GE\n"
- "csel x24, x24, x28, GT\n"
+ "csel x24, x24, x27, GE\n"
+ "csel x23, x23, x27, GT\n"
"cmp %x[height], #0x6\n"
- "mov x21, x28\n"
- "csel x23, x23, x28, GE\n"
- "csel x22, x22, x28, GT\n"
+ "csel x22, x22, x27, GE\n"
+ "csel x21, x21, x27, GT\n"
"1:" // no_pointer_adj
- "cmp %x[width], #0x8\n"
- "prfm pldl1keep, [x28, #0x0]\n"
"prfm pldl1keep, [x27, #0x0]\n"
+ "cmp %x[width], #0x8\n"
"prfm pldl1keep, [x26, #0x0]\n"
"prfm pldl1keep, [x25, #0x0]\n"
"prfm pldl1keep, [x24, #0x0]\n"
"prfm pldl1keep, [x23, #0x0]\n"
"prfm pldl1keep, [x22, #0x0]\n"
"prfm pldl1keep, [x21, #0x0]\n"
- "prfm pldl1keep, [x28, #0x40]\n"
+ "prfm pldl1keep, [x20, #0x0]\n"
"prfm pldl1keep, [x27, #0x40]\n"
"prfm pldl1keep, [x26, #0x40]\n"
"prfm pldl1keep, [x25, #0x40]\n"
@@ -77,48 +76,49 @@ void interleave_block<8, 2, VLType::None, false>(
"prfm pldl1keep, [x23, #0x40]\n"
"prfm pldl1keep, [x22, #0x40]\n"
"prfm pldl1keep, [x21, #0x40]\n"
+ "prfm pldl1keep, [x20, #0x40]\n"
"blt 3f\n"
"2:" // Main loop head
- "ldr q28, [x28], #0x10\n"
- "ldr q27, [x27], #0x10\n"
+ "ldr q28, [x27], #0x10\n"
"subs %x[width], %x[width], #0x8\n"
+ "ldr q29, [x26], #0x10\n"
"cmp %x[width], #0x8\n"
- "ldr q22, [x26], #0x10\n"
- "ldr q21, [x25], #0x10\n"
- "zip1 v26.4s, v28.4s, v22.4s\n"
- "zip1 v25.4s, v27.4s, v21.4s\n"
- "ldr q24, [x24], #0x10\n"
- "ldr q23, [x23], #0x10\n"
- "zip2 v22.4s, v28.4s, v22.4s\n"
- "zip2 v21.4s, v27.4s, v21.4s\n"
- "ldr q19, [x22], #0x10\n"
- "ldr q18, [x21], #0x10\n"
- "zip1 v20.4s, v24.4s, v19.4s\n"
- "zip1 v17.4s, v23.4s, v18.4s\n"
- "zip2 v19.4s, v24.4s, v19.4s\n"
- "zip2 v18.4s, v23.4s, v18.4s\n"
- "prfm pldl1keep, [x28, #0x70]\n"
+ "ldr q25, [x25], #0x10\n"
+ "zip1 v22.4s, v28.4s, v25.4s\n"
+ "ldr q21, [x24], #0x10\n"
+ "zip2 v28.4s, v28.4s, v25.4s\n"
+ "ldr q27, [x23], #0x10\n"
+ "ldr q26, [x22], #0x10\n"
+ "zip1 v20.4s, v29.4s, v21.4s\n"
+ "ldr q19, [x21], #0x10\n"
+ "zip2 v25.4s, v29.4s, v21.4s\n"
+ "ldr q24, [x20], #0x10\n"
+ "zip1 v23.4s, v22.4s, v20.4s\n"
"prfm pldl1keep, [x27, #0x70]\n"
+ "zip2 v22.4s, v22.4s, v20.4s\n"
"prfm pldl1keep, [x26, #0x70]\n"
+ "zip1 v21.4s, v28.4s, v25.4s\n"
"prfm pldl1keep, [x25, #0x70]\n"
- "zip1 v16.4s, v26.4s, v25.4s\n"
- "str q16, [%x[out_ptr], #0x0]\n"
+ "zip1 v18.4s, v27.4s, v19.4s\n"
"prfm pldl1keep, [x24, #0x70]\n"
+ "zip1 v16.4s, v26.4s, v24.4s\n"
"prfm pldl1keep, [x23, #0x70]\n"
- "zip1 v16.4s, v20.4s, v17.4s\n"
- "str q16, [%x[out_ptr], #0x10]\n"
+ "zip1 v17.4s, v18.4s, v16.4s\n"
"prfm pldl1keep, [x22, #0x70]\n"
+ "zip2 v20.4s, v18.4s, v16.4s\n"
"prfm pldl1keep, [x21, #0x70]\n"
- "zip2 v16.4s, v26.4s, v25.4s\n"
- "str q16, [%x[out_ptr], #0x20]\n"
- "zip2 v16.4s, v20.4s, v17.4s\n"
- "str q16, [%x[out_ptr], #0x30]\n"
- "zip1 v16.4s, v22.4s, v21.4s\n"
- "str q16, [%x[out_ptr], #0x40]\n"
- "zip1 v16.4s, v19.4s, v18.4s\n"
- "zip2 v17.4s, v22.4s, v21.4s\n"
- "str q16, [%x[out_ptr], #0x50]\n"
- "zip2 v16.4s, v19.4s, v18.4s\n"
+ "zip2 v19.4s, v27.4s, v19.4s\n"
+ "prfm pldl1keep, [x20, #0x70]\n"
+ "zip2 v16.4s, v26.4s, v24.4s\n"
+ "str q23, [%x[out_ptr], #0x0]\n"
+ "zip1 v18.4s, v19.4s, v16.4s\n"
+ "str q17, [%x[out_ptr], #0x10]\n"
+ "zip2 v17.4s, v28.4s, v25.4s\n"
+ "str q22, [%x[out_ptr], #0x20]\n"
+ "zip2 v16.4s, v19.4s, v16.4s\n"
+ "str q20, [%x[out_ptr], #0x30]\n"
+ "str q21, [%x[out_ptr], #0x40]\n"
+ "str q18, [%x[out_ptr], #0x50]\n"
"str q17, [%x[out_ptr], #0x60]\n"
"str q16, [%x[out_ptr], #0x70]\n"
"add %x[out_ptr], %x[out_ptr], #0x80\n"
@@ -126,120 +126,120 @@ void interleave_block<8, 2, VLType::None, false>(
"3:" // Main loop skip
"cbz %x[width], 8f\n"
"tbz %x[width], #2, 5f\n"
- "ldr d28, [x28], #0x8\n"
- "ldr d27, [x27], #0x8\n"
- "ldr d22, [x26], #0x8\n"
- "ldr d21, [x25], #0x8\n"
- "ldr d24, [x24], #0x8\n"
- "ldr d23, [x23], #0x8\n"
- "ldr d19, [x22], #0x8\n"
- "ldr d18, [x21], #0x8\n"
+ "ldr d28, [x27], #0x8\n"
+ "ldr d29, [x26], #0x8\n"
+ "ldr d25, [x25], #0x8\n"
+ "ldr d21, [x24], #0x8\n"
+ "ldr d27, [x23], #0x8\n"
+ "ldr d26, [x22], #0x8\n"
+ "ldr d19, [x21], #0x8\n"
+ "ldr d24, [x20], #0x8\n"
"tbz %x[width], #1, 4f\n"
- "ld1 { v28.s }[2], [x28], #0x4\n"
- "ld1 { v27.s }[2], [x27], #0x4\n"
- "mov x20, #0x3\n"
- "ld1 { v22.s }[2], [x26], #0x4\n"
- "ld1 { v21.s }[2], [x25], #0x4\n"
- "ld1 { v24.s }[2], [x24], #0x4\n"
- "ld1 { v23.s }[2], [x23], #0x4\n"
- "ld1 { v19.s }[2], [x22], #0x4\n"
- "ld1 { v18.s }[2], [x21], #0x4\n"
+ "ld1 { v28.s }[2], [x27], #0x4\n"
+ "mov x19, #0x3\n"
+ "ld1 { v29.s }[2], [x26], #0x4\n"
+ "ld1 { v25.s }[2], [x25], #0x4\n"
+ "ld1 { v21.s }[2], [x24], #0x4\n"
+ "ld1 { v27.s }[2], [x23], #0x4\n"
+ "ld1 { v26.s }[2], [x22], #0x4\n"
+ "ld1 { v19.s }[2], [x21], #0x4\n"
+ "ld1 { v24.s }[2], [x20], #0x4\n"
"tbz %x[width], #0, 7f\n"
- "ld1 { v28.h }[6], [x28]\n"
- "ld1 { v27.h }[6], [x27]\n"
- "mov x20, #0x4\n"
- "ld1 { v22.h }[6], [x26]\n"
- "ld1 { v21.h }[6], [x25]\n"
- "ld1 { v24.h }[6], [x24]\n"
- "ld1 { v23.h }[6], [x23]\n"
- "ld1 { v19.h }[6], [x22]\n"
- "ld1 { v18.h }[6], [x21]\n"
+ "ld1 { v28.h }[6], [x27]\n"
+ "mov x19, #0x4\n"
+ "ld1 { v29.h }[6], [x26]\n"
+ "ld1 { v25.h }[6], [x25]\n"
+ "ld1 { v21.h }[6], [x24]\n"
+ "ld1 { v27.h }[6], [x23]\n"
+ "ld1 { v26.h }[6], [x22]\n"
+ "ld1 { v19.h }[6], [x21]\n"
+ "ld1 { v24.h }[6], [x20]\n"
"b 7f\n"
"4:" // odd_loads_1_4
- "mov x20, #0x2\n"
+ "mov x19, #0x2\n"
"tbz %x[width], #0, 7f\n"
- "ld1 { v28.h }[4], [x28]\n"
- "ld1 { v27.h }[4], [x27]\n"
- "mov x20, #0x3\n"
- "ld1 { v22.h }[4], [x26]\n"
- "ld1 { v21.h }[4], [x25]\n"
- "ld1 { v24.h }[4], [x24]\n"
- "ld1 { v23.h }[4], [x23]\n"
- "ld1 { v19.h }[4], [x22]\n"
- "ld1 { v18.h }[4], [x21]\n"
+ "ld1 { v28.h }[4], [x27]\n"
+ "ld1 { v29.h }[4], [x26]\n"
+ "mov x19, #0x3\n"
+ "ld1 { v25.h }[4], [x25]\n"
+ "ld1 { v21.h }[4], [x24]\n"
+ "ld1 { v27.h }[4], [x23]\n"
+ "ld1 { v26.h }[4], [x22]\n"
+ "ld1 { v19.h }[4], [x21]\n"
+ "ld1 { v24.h }[4], [x20]\n"
"b 7f\n"
"5:" // odd_loads_2_0
"tbz %x[width], #1, 6f\n"
- "ldr s28, [x28], #0x4\n"
- "ldr s27, [x27], #0x4\n"
- "mov x20, #0x1\n"
- "ldr s22, [x26], #0x4\n"
- "ldr s21, [x25], #0x4\n"
- "ldr s24, [x24], #0x4\n"
- "ldr s23, [x23], #0x4\n"
- "ldr s19, [x22], #0x4\n"
- "ldr s18, [x21], #0x4\n"
+ "ldr s28, [x27], #0x4\n"
+ "ldr s29, [x26], #0x4\n"
+ "mov x19, #0x1\n"
+ "ldr s25, [x25], #0x4\n"
+ "ldr s21, [x24], #0x4\n"
+ "ldr s27, [x23], #0x4\n"
+ "ldr s26, [x22], #0x4\n"
+ "ldr s19, [x21], #0x4\n"
+ "ldr s24, [x20], #0x4\n"
"tbz %x[width], #0, 7f\n"
- "ld1 { v28.h }[2], [x28]\n"
- "ld1 { v27.h }[2], [x27]\n"
- "mov x20, #0x2\n"
- "ld1 { v22.h }[2], [x26]\n"
- "ld1 { v21.h }[2], [x25]\n"
- "ld1 { v24.h }[2], [x24]\n"
- "ld1 { v23.h }[2], [x23]\n"
- "ld1 { v19.h }[2], [x22]\n"
- "ld1 { v18.h }[2], [x21]\n"
+ "ld1 { v28.h }[2], [x27]\n"
+ "mov x19, #0x2\n"
+ "ld1 { v29.h }[2], [x26]\n"
+ "ld1 { v25.h }[2], [x25]\n"
+ "ld1 { v21.h }[2], [x24]\n"
+ "ld1 { v27.h }[2], [x23]\n"
+ "ld1 { v26.h }[2], [x22]\n"
+ "ld1 { v19.h }[2], [x21]\n"
+ "ld1 { v24.h }[2], [x20]\n"
"b 7f\n"
"6:" // odd_loads_1_0
- "ldr h28, [x28, #0x0]\n"
- "ldr h27, [x27, #0x0]\n"
- "mov x20, #0x1\n"
- "ldr h22, [x26, #0x0]\n"
- "ldr h21, [x25, #0x0]\n"
- "ldr h24, [x24, #0x0]\n"
- "ldr h23, [x23, #0x0]\n"
- "ldr h19, [x22, #0x0]\n"
- "ldr h18, [x21, #0x0]\n"
+ "ldr h28, [x27, #0x0]\n"
+ "mov x19, #0x1\n"
+ "ldr h29, [x26, #0x0]\n"
+ "ldr h25, [x25, #0x0]\n"
+ "ldr h21, [x24, #0x0]\n"
+ "ldr h27, [x23, #0x0]\n"
+ "ldr h26, [x22, #0x0]\n"
+ "ldr h19, [x21, #0x0]\n"
+ "ldr h24, [x20, #0x0]\n"
"7:" // Odd load end
- "zip1 v26.4s, v28.4s, v22.4s\n"
- "zip1 v25.4s, v27.4s, v21.4s\n"
- "subs x20, x20, #0x1\n"
- "zip1 v20.4s, v24.4s, v19.4s\n"
- "zip1 v17.4s, v23.4s, v18.4s\n"
- "zip1 v16.4s, v26.4s, v25.4s\n"
- "str q16, [%x[out_ptr], #0x0]\n"
- "zip1 v16.4s, v20.4s, v17.4s\n"
- "str q16, [%x[out_ptr], #0x10]\n"
+ "zip1 v22.4s, v28.4s, v25.4s\n"
+ "subs x19, x19, #0x1\n"
+ "zip1 v20.4s, v29.4s, v21.4s\n"
+ "zip1 v23.4s, v22.4s, v20.4s\n"
+ "str q23, [%x[out_ptr], #0x0]\n"
+ "zip1 v18.4s, v27.4s, v19.4s\n"
+ "zip1 v16.4s, v26.4s, v24.4s\n"
+ "zip1 v17.4s, v18.4s, v16.4s\n"
+ "str q17, [%x[out_ptr], #0x10]\n"
"add %x[out_ptr], %x[out_ptr], #0x20\n"
"beq 8f\n"
- "subs x20, x20, #0x1\n"
- "zip2 v16.4s, v26.4s, v25.4s\n"
- "str q16, [%x[out_ptr], #0x0]\n"
- "zip2 v16.4s, v20.4s, v17.4s\n"
- "str q16, [%x[out_ptr], #0x10]\n"
+ "zip2 v22.4s, v22.4s, v20.4s\n"
+ "str q22, [%x[out_ptr], #0x0]\n"
+ "zip2 v20.4s, v18.4s, v16.4s\n"
+ "subs x19, x19, #0x1\n"
+ "str q20, [%x[out_ptr], #0x10]\n"
"add %x[out_ptr], %x[out_ptr], #0x20\n"
"beq 8f\n"
- "zip2 v22.4s, v28.4s, v22.4s\n"
- "zip2 v21.4s, v27.4s, v21.4s\n"
- "subs x20, x20, #0x1\n"
- "zip2 v19.4s, v24.4s, v19.4s\n"
- "zip2 v18.4s, v23.4s, v18.4s\n"
- "zip1 v16.4s, v22.4s, v21.4s\n"
- "str q16, [%x[out_ptr], #0x0]\n"
- "zip1 v16.4s, v19.4s, v18.4s\n"
- "str q16, [%x[out_ptr], #0x10]\n"
+ "zip2 v28.4s, v28.4s, v25.4s\n"
+ "zip2 v25.4s, v29.4s, v21.4s\n"
+ "subs x19, x19, #0x1\n"
+ "zip1 v21.4s, v28.4s, v25.4s\n"
+ "str q21, [%x[out_ptr], #0x0]\n"
+ "zip2 v19.4s, v27.4s, v19.4s\n"
+ "zip2 v16.4s, v26.4s, v24.4s\n"
+ "zip1 v18.4s, v19.4s, v16.4s\n"
+ "str q18, [%x[out_ptr], #0x10]\n"
"add %x[out_ptr], %x[out_ptr], #0x20\n"
"beq 8f\n"
- "zip2 v17.4s, v22.4s, v21.4s\n"
+ "zip2 v17.4s, v28.4s, v25.4s\n"
"str q17, [%x[out_ptr], #0x0]\n"
- "zip2 v16.4s, v19.4s, v18.4s\n"
+ "zip2 v16.4s, v19.4s, v16.4s\n"
"str q16, [%x[out_ptr], #0x10]\n"
"add %x[out_ptr], %x[out_ptr], #0x20\n"
"8:" // Odds skip
: [out_ptr] "+&r" (out_ptr), [width] "+&r" (width)
: [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset)
- : "cc", "memory", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block2_fp32_fp32.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block2_fp32_fp32.hpp
index 358b83ad1b..064207c0fa 100644
--- a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block2_fp32_fp32.hpp
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block2_fp32_fp32.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef __aarch64__
@@ -31,45 +31,44 @@ void interleave_block<8, 2, VLType::None, false>(
)
{
__asm__ __volatile__(
- "ldr x28, [%x[in], #0x0]\n"
- "ldr x27, [%x[in], #0x8]\n"
+ "ldr x27, [%x[in], #0x0]\n"
"cmp %x[height], #0x8\n"
- "add x28, x28, %x[row_offset], LSL #2\n"
- "ldr x26, [%x[in], #0x10]\n"
- "ldr x25, [%x[in], #0x18]\n"
+ "ldr x26, [%x[in], #0x8]\n"
"add x27, x27, %x[row_offset], LSL #2\n"
+ "ldr x25, [%x[in], #0x10]\n"
+ "ldr x24, [%x[in], #0x18]\n"
"add x26, x26, %x[row_offset], LSL #2\n"
- "ldr x24, [%x[in], #0x20]\n"
- "ldr x23, [%x[in], #0x28]\n"
+ "ldr x23, [%x[in], #0x20]\n"
"add x25, x25, %x[row_offset], LSL #2\n"
+ "ldr x22, [%x[in], #0x28]\n"
+ "ldr x21, [%x[in], #0x30]\n"
"add x24, x24, %x[row_offset], LSL #2\n"
- "ldr x22, [%x[in], #0x30]\n"
- "ldr x21, [%x[in], #0x38]\n"
+ "ldr x20, [%x[in], #0x38]\n"
"add x23, x23, %x[row_offset], LSL #2\n"
"add x22, x22, %x[row_offset], LSL #2\n"
"add x21, x21, %x[row_offset], LSL #2\n"
+ "add x20, x20, %x[row_offset], LSL #2\n"
"beq 1f\n"
+ "mov x20, x27\n"
"cmp %x[height], #0x2\n"
- "csel x27, x27, x28, GE\n"
- "csel x26, x26, x28, GT\n"
+ "csel x26, x26, x27, GE\n"
+ "csel x25, x25, x27, GT\n"
"cmp %x[height], #0x4\n"
- "csel x25, x25, x28, GE\n"
- "csel x24, x24, x28, GT\n"
+ "csel x24, x24, x27, GE\n"
+ "csel x23, x23, x27, GT\n"
"cmp %x[height], #0x6\n"
- "mov x21, x28\n"
- "csel x23, x23, x28, GE\n"
- "csel x22, x22, x28, GT\n"
+ "csel x22, x22, x27, GE\n"
+ "csel x21, x21, x27, GT\n"
"1:" // no_pointer_adj
- "cmp %x[width], #0x4\n"
- "prfm pldl1keep, [x28, #0x0]\n"
"prfm pldl1keep, [x27, #0x0]\n"
+ "cmp %x[width], #0x4\n"
"prfm pldl1keep, [x26, #0x0]\n"
"prfm pldl1keep, [x25, #0x0]\n"
"prfm pldl1keep, [x24, #0x0]\n"
"prfm pldl1keep, [x23, #0x0]\n"
"prfm pldl1keep, [x22, #0x0]\n"
"prfm pldl1keep, [x21, #0x0]\n"
- "prfm pldl1keep, [x28, #0x40]\n"
+ "prfm pldl1keep, [x20, #0x0]\n"
"prfm pldl1keep, [x27, #0x40]\n"
"prfm pldl1keep, [x26, #0x40]\n"
"prfm pldl1keep, [x25, #0x40]\n"
@@ -77,103 +76,104 @@ void interleave_block<8, 2, VLType::None, false>(
"prfm pldl1keep, [x23, #0x40]\n"
"prfm pldl1keep, [x22, #0x40]\n"
"prfm pldl1keep, [x21, #0x40]\n"
+ "prfm pldl1keep, [x20, #0x40]\n"
"blt 3f\n"
"2:" // Main loop head
- "ldr q26, [x28], #0x10\n"
- "ldr q21, [x27], #0x10\n"
+ "ldr q27, [x27], #0x10\n"
"subs %x[width], %x[width], #0x4\n"
+ "ldr q24, [x26], #0x10\n"
+ "zip1 v26.2d, v27.2d, v24.2d\n"
+ "ldr q25, [x25], #0x10\n"
"cmp %x[width], #0x4\n"
- "ldr q25, [x26], #0x10\n"
- "ldr q24, [x25], #0x10\n"
- "zip1 v16.2d, v26.2d, v21.2d\n"
- "zip1 v18.2d, v25.2d, v24.2d\n"
- "ldr q23, [x24], #0x10\n"
- "ldr q22, [x23], #0x10\n"
- "zip1 v17.2d, v23.2d, v22.2d\n"
- "zip2 v21.2d, v26.2d, v21.2d\n"
- "ldr q20, [x22], #0x10\n"
- "ldr q19, [x21], #0x10\n"
- "str q16, [%x[out_ptr], #0x0]\n"
- "zip1 v16.2d, v20.2d, v19.2d\n"
- "prfm pldl1keep, [x28, #0x70]\n"
+ "zip2 v24.2d, v27.2d, v24.2d\n"
+ "ldr q21, [x24], #0x10\n"
+ "ldr q23, [x23], #0x10\n"
+ "zip1 v22.2d, v25.2d, v21.2d\n"
+ "ldr q18, [x22], #0x10\n"
+ "zip2 v21.2d, v25.2d, v21.2d\n"
+ "ldr q20, [x21], #0x10\n"
+ "ldr q16, [x20], #0x10\n"
+ "zip1 v19.2d, v23.2d, v18.2d\n"
"prfm pldl1keep, [x27, #0x70]\n"
- "str q18, [%x[out_ptr], #0x10]\n"
- "zip2 v18.2d, v25.2d, v24.2d\n"
+ "zip2 v18.2d, v23.2d, v18.2d\n"
"prfm pldl1keep, [x26, #0x70]\n"
+ "zip1 v17.2d, v20.2d, v16.2d\n"
"prfm pldl1keep, [x25, #0x70]\n"
- "str q17, [%x[out_ptr], #0x20]\n"
- "zip2 v17.2d, v23.2d, v22.2d\n"
+ "zip2 v16.2d, v20.2d, v16.2d\n"
"prfm pldl1keep, [x24, #0x70]\n"
"prfm pldl1keep, [x23, #0x70]\n"
- "str q16, [%x[out_ptr], #0x30]\n"
- "zip2 v16.2d, v20.2d, v19.2d\n"
"prfm pldl1keep, [x22, #0x70]\n"
"prfm pldl1keep, [x21, #0x70]\n"
- "str q21, [%x[out_ptr], #0x40]\n"
- "str q18, [%x[out_ptr], #0x50]\n"
- "str q17, [%x[out_ptr], #0x60]\n"
+ "prfm pldl1keep, [x20, #0x70]\n"
+ "str q26, [%x[out_ptr], #0x0]\n"
+ "str q22, [%x[out_ptr], #0x10]\n"
+ "str q19, [%x[out_ptr], #0x20]\n"
+ "str q17, [%x[out_ptr], #0x30]\n"
+ "str q24, [%x[out_ptr], #0x40]\n"
+ "str q21, [%x[out_ptr], #0x50]\n"
+ "str q18, [%x[out_ptr], #0x60]\n"
"str q16, [%x[out_ptr], #0x70]\n"
"add %x[out_ptr], %x[out_ptr], #0x80\n"
"bge 2b\n"
"3:" // Main loop skip
"cbz %x[width], 6f\n"
"tbz %x[width], #1, 4f\n"
- "ldr d26, [x28], #0x8\n"
- "ldr d21, [x27], #0x8\n"
- "mov x20, #0x1\n"
- "ldr d25, [x26], #0x8\n"
- "ldr d24, [x25], #0x8\n"
- "ldr d23, [x24], #0x8\n"
- "ldr d22, [x23], #0x8\n"
- "ldr d20, [x22], #0x8\n"
- "ldr d19, [x21], #0x8\n"
+ "ldr d27, [x27], #0x8\n"
+ "ldr d24, [x26], #0x8\n"
+ "mov x19, #0x1\n"
+ "ldr d25, [x25], #0x8\n"
+ "ldr d21, [x24], #0x8\n"
+ "ldr d23, [x23], #0x8\n"
+ "ldr d18, [x22], #0x8\n"
+ "ldr d20, [x21], #0x8\n"
+ "ldr d16, [x20], #0x8\n"
"tbz %x[width], #0, 5f\n"
- "ld1 { v26.s }[2], [x28]\n"
- "ld1 { v21.s }[2], [x27]\n"
- "mov x20, #0x2\n"
- "ld1 { v25.s }[2], [x26]\n"
- "ld1 { v24.s }[2], [x25]\n"
- "ld1 { v23.s }[2], [x24]\n"
- "ld1 { v22.s }[2], [x23]\n"
- "ld1 { v20.s }[2], [x22]\n"
- "ld1 { v19.s }[2], [x21]\n"
+ "ld1 { v27.s }[2], [x27]\n"
+ "mov x19, #0x2\n"
+ "ld1 { v24.s }[2], [x26]\n"
+ "ld1 { v25.s }[2], [x25]\n"
+ "ld1 { v21.s }[2], [x24]\n"
+ "ld1 { v23.s }[2], [x23]\n"
+ "ld1 { v18.s }[2], [x22]\n"
+ "ld1 { v20.s }[2], [x21]\n"
+ "ld1 { v16.s }[2], [x20]\n"
"b 5f\n"
"4:" // odd_loads_1_0
- "ldr s26, [x28, #0x0]\n"
- "ldr s21, [x27, #0x0]\n"
- "mov x20, #0x1\n"
- "ldr s25, [x26, #0x0]\n"
- "ldr s24, [x25, #0x0]\n"
- "ldr s23, [x24, #0x0]\n"
- "ldr s22, [x23, #0x0]\n"
- "ldr s20, [x22, #0x0]\n"
- "ldr s19, [x21, #0x0]\n"
+ "ldr s27, [x27, #0x0]\n"
+ "mov x19, #0x1\n"
+ "ldr s24, [x26, #0x0]\n"
+ "ldr s25, [x25, #0x0]\n"
+ "ldr s21, [x24, #0x0]\n"
+ "ldr s23, [x23, #0x0]\n"
+ "ldr s18, [x22, #0x0]\n"
+ "ldr s20, [x21, #0x0]\n"
+ "ldr s16, [x20, #0x0]\n"
"5:" // Odd load end
- "subs x20, x20, #0x1\n"
- "zip1 v16.2d, v26.2d, v21.2d\n"
- "str q16, [%x[out_ptr], #0x0]\n"
- "zip1 v18.2d, v25.2d, v24.2d\n"
- "str q18, [%x[out_ptr], #0x10]\n"
- "zip1 v17.2d, v23.2d, v22.2d\n"
- "zip1 v16.2d, v20.2d, v19.2d\n"
- "str q17, [%x[out_ptr], #0x20]\n"
- "str q16, [%x[out_ptr], #0x30]\n"
+ "zip1 v26.2d, v27.2d, v24.2d\n"
+ "str q26, [%x[out_ptr], #0x0]\n"
+ "zip1 v22.2d, v25.2d, v21.2d\n"
+ "subs x19, x19, #0x1\n"
+ "zip1 v19.2d, v23.2d, v18.2d\n"
+ "str q22, [%x[out_ptr], #0x10]\n"
+ "zip1 v17.2d, v20.2d, v16.2d\n"
+ "str q19, [%x[out_ptr], #0x20]\n"
+ "str q17, [%x[out_ptr], #0x30]\n"
"add %x[out_ptr], %x[out_ptr], #0x40\n"
"beq 6f\n"
- "zip2 v21.2d, v26.2d, v21.2d\n"
- "str q21, [%x[out_ptr], #0x0]\n"
- "zip2 v18.2d, v25.2d, v24.2d\n"
- "str q18, [%x[out_ptr], #0x10]\n"
- "zip2 v17.2d, v23.2d, v22.2d\n"
- "zip2 v16.2d, v20.2d, v19.2d\n"
- "str q17, [%x[out_ptr], #0x20]\n"
+ "zip2 v24.2d, v27.2d, v24.2d\n"
+ "str q24, [%x[out_ptr], #0x0]\n"
+ "zip2 v21.2d, v25.2d, v21.2d\n"
+ "zip2 v18.2d, v23.2d, v18.2d\n"
+ "str q21, [%x[out_ptr], #0x10]\n"
+ "zip2 v16.2d, v20.2d, v16.2d\n"
+ "str q18, [%x[out_ptr], #0x20]\n"
"str q16, [%x[out_ptr], #0x30]\n"
"add %x[out_ptr], %x[out_ptr], #0x40\n"
"6:" // Odds skip
: [out_ptr] "+&r" (out_ptr), [width] "+&r" (width)
: [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset)
- : "cc", "memory", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block4_bf16_bf16.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block4_bf16_bf16.hpp
index d606d5a5b6..1f86722bc1 100644
--- a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block4_bf16_bf16.hpp
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block4_bf16_bf16.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef __aarch64__
@@ -31,45 +31,44 @@ void interleave_block<8, 4, VLType::None, false>(
)
{
__asm__ __volatile__(
- "ldr x28, [%x[in], #0x0]\n"
- "ldr x27, [%x[in], #0x8]\n"
+ "ldr x27, [%x[in], #0x0]\n"
"cmp %x[height], #0x8\n"
- "add x28, x28, %x[row_offset], LSL #1\n"
- "ldr x26, [%x[in], #0x10]\n"
- "ldr x25, [%x[in], #0x18]\n"
+ "ldr x26, [%x[in], #0x8]\n"
"add x27, x27, %x[row_offset], LSL #1\n"
+ "ldr x25, [%x[in], #0x10]\n"
+ "ldr x24, [%x[in], #0x18]\n"
"add x26, x26, %x[row_offset], LSL #1\n"
- "ldr x24, [%x[in], #0x20]\n"
- "ldr x23, [%x[in], #0x28]\n"
+ "ldr x23, [%x[in], #0x20]\n"
"add x25, x25, %x[row_offset], LSL #1\n"
+ "ldr x22, [%x[in], #0x28]\n"
+ "ldr x21, [%x[in], #0x30]\n"
"add x24, x24, %x[row_offset], LSL #1\n"
- "ldr x22, [%x[in], #0x30]\n"
- "ldr x21, [%x[in], #0x38]\n"
+ "ldr x20, [%x[in], #0x38]\n"
"add x23, x23, %x[row_offset], LSL #1\n"
"add x22, x22, %x[row_offset], LSL #1\n"
"add x21, x21, %x[row_offset], LSL #1\n"
+ "add x20, x20, %x[row_offset], LSL #1\n"
"beq 1f\n"
+ "mov x20, x27\n"
"cmp %x[height], #0x2\n"
- "csel x27, x27, x28, GE\n"
- "csel x26, x26, x28, GT\n"
+ "csel x26, x26, x27, GE\n"
+ "csel x25, x25, x27, GT\n"
"cmp %x[height], #0x4\n"
- "csel x25, x25, x28, GE\n"
- "csel x24, x24, x28, GT\n"
+ "csel x24, x24, x27, GE\n"
+ "csel x23, x23, x27, GT\n"
"cmp %x[height], #0x6\n"
- "mov x21, x28\n"
- "csel x23, x23, x28, GE\n"
- "csel x22, x22, x28, GT\n"
+ "csel x22, x22, x27, GE\n"
+ "csel x21, x21, x27, GT\n"
"1:" // no_pointer_adj
- "cmp %x[width], #0x8\n"
- "prfm pldl1keep, [x28, #0x0]\n"
"prfm pldl1keep, [x27, #0x0]\n"
+ "cmp %x[width], #0x8\n"
"prfm pldl1keep, [x26, #0x0]\n"
"prfm pldl1keep, [x25, #0x0]\n"
"prfm pldl1keep, [x24, #0x0]\n"
"prfm pldl1keep, [x23, #0x0]\n"
"prfm pldl1keep, [x22, #0x0]\n"
"prfm pldl1keep, [x21, #0x0]\n"
- "prfm pldl1keep, [x28, #0x40]\n"
+ "prfm pldl1keep, [x20, #0x0]\n"
"prfm pldl1keep, [x27, #0x40]\n"
"prfm pldl1keep, [x26, #0x40]\n"
"prfm pldl1keep, [x25, #0x40]\n"
@@ -77,145 +76,146 @@ void interleave_block<8, 4, VLType::None, false>(
"prfm pldl1keep, [x23, #0x40]\n"
"prfm pldl1keep, [x22, #0x40]\n"
"prfm pldl1keep, [x21, #0x40]\n"
+ "prfm pldl1keep, [x20, #0x40]\n"
"blt 3f\n"
"2:" // Main loop head
- "ldr q26, [x28], #0x10\n"
- "ldr q21, [x27], #0x10\n"
+ "ldr q27, [x27], #0x10\n"
"subs %x[width], %x[width], #0x8\n"
+ "ldr q24, [x26], #0x10\n"
+ "zip1 v26.2d, v27.2d, v24.2d\n"
+ "ldr q25, [x25], #0x10\n"
"cmp %x[width], #0x8\n"
- "ldr q25, [x26], #0x10\n"
- "ldr q24, [x25], #0x10\n"
- "zip1 v16.2d, v26.2d, v21.2d\n"
- "zip1 v18.2d, v25.2d, v24.2d\n"
- "ldr q23, [x24], #0x10\n"
- "ldr q22, [x23], #0x10\n"
- "zip1 v17.2d, v23.2d, v22.2d\n"
- "zip2 v21.2d, v26.2d, v21.2d\n"
- "ldr q20, [x22], #0x10\n"
- "ldr q19, [x21], #0x10\n"
- "str q16, [%x[out_ptr], #0x0]\n"
- "zip1 v16.2d, v20.2d, v19.2d\n"
- "prfm pldl1keep, [x28, #0x70]\n"
+ "zip2 v24.2d, v27.2d, v24.2d\n"
+ "ldr q21, [x24], #0x10\n"
+ "ldr q23, [x23], #0x10\n"
+ "zip1 v22.2d, v25.2d, v21.2d\n"
+ "ldr q18, [x22], #0x10\n"
+ "zip2 v21.2d, v25.2d, v21.2d\n"
+ "ldr q20, [x21], #0x10\n"
+ "ldr q16, [x20], #0x10\n"
+ "zip1 v19.2d, v23.2d, v18.2d\n"
"prfm pldl1keep, [x27, #0x70]\n"
- "str q18, [%x[out_ptr], #0x10]\n"
- "zip2 v18.2d, v25.2d, v24.2d\n"
+ "zip2 v18.2d, v23.2d, v18.2d\n"
"prfm pldl1keep, [x26, #0x70]\n"
+ "zip1 v17.2d, v20.2d, v16.2d\n"
"prfm pldl1keep, [x25, #0x70]\n"
- "str q17, [%x[out_ptr], #0x20]\n"
- "zip2 v17.2d, v23.2d, v22.2d\n"
+ "zip2 v16.2d, v20.2d, v16.2d\n"
"prfm pldl1keep, [x24, #0x70]\n"
"prfm pldl1keep, [x23, #0x70]\n"
- "str q16, [%x[out_ptr], #0x30]\n"
- "zip2 v16.2d, v20.2d, v19.2d\n"
"prfm pldl1keep, [x22, #0x70]\n"
"prfm pldl1keep, [x21, #0x70]\n"
- "str q21, [%x[out_ptr], #0x40]\n"
- "str q18, [%x[out_ptr], #0x50]\n"
- "str q17, [%x[out_ptr], #0x60]\n"
+ "prfm pldl1keep, [x20, #0x70]\n"
+ "str q26, [%x[out_ptr], #0x0]\n"
+ "str q22, [%x[out_ptr], #0x10]\n"
+ "str q19, [%x[out_ptr], #0x20]\n"
+ "str q17, [%x[out_ptr], #0x30]\n"
+ "str q24, [%x[out_ptr], #0x40]\n"
+ "str q21, [%x[out_ptr], #0x50]\n"
+ "str q18, [%x[out_ptr], #0x60]\n"
"str q16, [%x[out_ptr], #0x70]\n"
"add %x[out_ptr], %x[out_ptr], #0x80\n"
"bge 2b\n"
"3:" // Main loop skip
"cbz %x[width], 8f\n"
"tbz %x[width], #2, 5f\n"
- "ldr d26, [x28], #0x8\n"
- "ldr d21, [x27], #0x8\n"
- "ldr d25, [x26], #0x8\n"
- "ldr d24, [x25], #0x8\n"
- "ldr d23, [x24], #0x8\n"
- "ldr d22, [x23], #0x8\n"
- "ldr d20, [x22], #0x8\n"
- "ldr d19, [x21], #0x8\n"
+ "ldr d27, [x27], #0x8\n"
+ "ldr d24, [x26], #0x8\n"
+ "ldr d25, [x25], #0x8\n"
+ "ldr d21, [x24], #0x8\n"
+ "ldr d23, [x23], #0x8\n"
+ "ldr d18, [x22], #0x8\n"
+ "ldr d20, [x21], #0x8\n"
+ "ldr d16, [x20], #0x8\n"
"tbz %x[width], #1, 4f\n"
- "ld1 { v26.s }[2], [x28], #0x4\n"
- "ld1 { v21.s }[2], [x27], #0x4\n"
- "mov x20, #0x2\n"
- "ld1 { v25.s }[2], [x26], #0x4\n"
- "ld1 { v24.s }[2], [x25], #0x4\n"
- "ld1 { v23.s }[2], [x24], #0x4\n"
- "ld1 { v22.s }[2], [x23], #0x4\n"
- "ld1 { v20.s }[2], [x22], #0x4\n"
- "ld1 { v19.s }[2], [x21], #0x4\n"
+ "ld1 { v27.s }[2], [x27], #0x4\n"
+ "mov x19, #0x2\n"
+ "ld1 { v24.s }[2], [x26], #0x4\n"
+ "ld1 { v25.s }[2], [x25], #0x4\n"
+ "ld1 { v21.s }[2], [x24], #0x4\n"
+ "ld1 { v23.s }[2], [x23], #0x4\n"
+ "ld1 { v18.s }[2], [x22], #0x4\n"
+ "ld1 { v20.s }[2], [x21], #0x4\n"
+ "ld1 { v16.s }[2], [x20], #0x4\n"
"tbz %x[width], #0, 7f\n"
- "ld1 { v26.h }[6], [x28]\n"
- "ld1 { v21.h }[6], [x27]\n"
- "ld1 { v25.h }[6], [x26]\n"
- "ld1 { v24.h }[6], [x25]\n"
- "ld1 { v23.h }[6], [x24]\n"
- "ld1 { v22.h }[6], [x23]\n"
- "ld1 { v20.h }[6], [x22]\n"
- "ld1 { v19.h }[6], [x21]\n"
+ "ld1 { v27.h }[6], [x27]\n"
+ "ld1 { v24.h }[6], [x26]\n"
+ "ld1 { v25.h }[6], [x25]\n"
+ "ld1 { v21.h }[6], [x24]\n"
+ "ld1 { v23.h }[6], [x23]\n"
+ "ld1 { v18.h }[6], [x22]\n"
+ "ld1 { v20.h }[6], [x21]\n"
+ "ld1 { v16.h }[6], [x20]\n"
"b 7f\n"
"4:" // odd_loads_1_4
- "mov x20, #0x1\n"
+ "mov x19, #0x1\n"
"tbz %x[width], #0, 7f\n"
- "ld1 { v26.h }[4], [x28]\n"
- "ld1 { v21.h }[4], [x27]\n"
- "mov x20, #0x2\n"
- "ld1 { v25.h }[4], [x26]\n"
- "ld1 { v24.h }[4], [x25]\n"
- "ld1 { v23.h }[4], [x24]\n"
- "ld1 { v22.h }[4], [x23]\n"
- "ld1 { v20.h }[4], [x22]\n"
- "ld1 { v19.h }[4], [x21]\n"
+ "ld1 { v27.h }[4], [x27]\n"
+ "ld1 { v24.h }[4], [x26]\n"
+ "mov x19, #0x2\n"
+ "ld1 { v25.h }[4], [x25]\n"
+ "ld1 { v21.h }[4], [x24]\n"
+ "ld1 { v23.h }[4], [x23]\n"
+ "ld1 { v18.h }[4], [x22]\n"
+ "ld1 { v20.h }[4], [x21]\n"
+ "ld1 { v16.h }[4], [x20]\n"
"b 7f\n"
"5:" // odd_loads_2_0
"tbz %x[width], #1, 6f\n"
- "ldr s26, [x28], #0x4\n"
- "ldr s21, [x27], #0x4\n"
- "mov x20, #0x1\n"
- "ldr s25, [x26], #0x4\n"
- "ldr s24, [x25], #0x4\n"
- "ldr s23, [x24], #0x4\n"
- "ldr s22, [x23], #0x4\n"
- "ldr s20, [x22], #0x4\n"
- "ldr s19, [x21], #0x4\n"
+ "ldr s27, [x27], #0x4\n"
+ "ldr s24, [x26], #0x4\n"
+ "mov x19, #0x1\n"
+ "ldr s25, [x25], #0x4\n"
+ "ldr s21, [x24], #0x4\n"
+ "ldr s23, [x23], #0x4\n"
+ "ldr s18, [x22], #0x4\n"
+ "ldr s20, [x21], #0x4\n"
+ "ldr s16, [x20], #0x4\n"
"tbz %x[width], #0, 7f\n"
- "ld1 { v26.h }[2], [x28]\n"
- "ld1 { v21.h }[2], [x27]\n"
- "ld1 { v25.h }[2], [x26]\n"
- "ld1 { v24.h }[2], [x25]\n"
- "ld1 { v23.h }[2], [x24]\n"
- "ld1 { v22.h }[2], [x23]\n"
- "ld1 { v20.h }[2], [x22]\n"
- "ld1 { v19.h }[2], [x21]\n"
+ "ld1 { v27.h }[2], [x27]\n"
+ "ld1 { v24.h }[2], [x26]\n"
+ "ld1 { v25.h }[2], [x25]\n"
+ "ld1 { v21.h }[2], [x24]\n"
+ "ld1 { v23.h }[2], [x23]\n"
+ "ld1 { v18.h }[2], [x22]\n"
+ "ld1 { v20.h }[2], [x21]\n"
+ "ld1 { v16.h }[2], [x20]\n"
"b 7f\n"
"6:" // odd_loads_1_0
- "ldr h26, [x28, #0x0]\n"
- "ldr h21, [x27, #0x0]\n"
- "mov x20, #0x1\n"
- "ldr h25, [x26, #0x0]\n"
- "ldr h24, [x25, #0x0]\n"
- "ldr h23, [x24, #0x0]\n"
- "ldr h22, [x23, #0x0]\n"
- "ldr h20, [x22, #0x0]\n"
- "ldr h19, [x21, #0x0]\n"
+ "ldr h27, [x27, #0x0]\n"
+ "mov x19, #0x1\n"
+ "ldr h24, [x26, #0x0]\n"
+ "ldr h25, [x25, #0x0]\n"
+ "ldr h21, [x24, #0x0]\n"
+ "ldr h23, [x23, #0x0]\n"
+ "ldr h18, [x22, #0x0]\n"
+ "ldr h20, [x21, #0x0]\n"
+ "ldr h16, [x20, #0x0]\n"
"7:" // Odd load end
- "subs x20, x20, #0x1\n"
- "zip1 v16.2d, v26.2d, v21.2d\n"
- "str q16, [%x[out_ptr], #0x0]\n"
- "zip1 v18.2d, v25.2d, v24.2d\n"
- "str q18, [%x[out_ptr], #0x10]\n"
- "zip1 v17.2d, v23.2d, v22.2d\n"
- "zip1 v16.2d, v20.2d, v19.2d\n"
- "str q17, [%x[out_ptr], #0x20]\n"
- "str q16, [%x[out_ptr], #0x30]\n"
+ "zip1 v26.2d, v27.2d, v24.2d\n"
+ "str q26, [%x[out_ptr], #0x0]\n"
+ "zip1 v22.2d, v25.2d, v21.2d\n"
+ "subs x19, x19, #0x1\n"
+ "zip1 v19.2d, v23.2d, v18.2d\n"
+ "str q22, [%x[out_ptr], #0x10]\n"
+ "zip1 v17.2d, v20.2d, v16.2d\n"
+ "str q19, [%x[out_ptr], #0x20]\n"
+ "str q17, [%x[out_ptr], #0x30]\n"
"add %x[out_ptr], %x[out_ptr], #0x40\n"
"beq 8f\n"
- "zip2 v21.2d, v26.2d, v21.2d\n"
- "str q21, [%x[out_ptr], #0x0]\n"
- "zip2 v18.2d, v25.2d, v24.2d\n"
- "str q18, [%x[out_ptr], #0x10]\n"
- "zip2 v17.2d, v23.2d, v22.2d\n"
- "zip2 v16.2d, v20.2d, v19.2d\n"
- "str q17, [%x[out_ptr], #0x20]\n"
+ "zip2 v24.2d, v27.2d, v24.2d\n"
+ "str q24, [%x[out_ptr], #0x0]\n"
+ "zip2 v21.2d, v25.2d, v21.2d\n"
+ "zip2 v18.2d, v23.2d, v18.2d\n"
+ "str q21, [%x[out_ptr], #0x10]\n"
+ "zip2 v16.2d, v20.2d, v16.2d\n"
+ "str q18, [%x[out_ptr], #0x20]\n"
"str q16, [%x[out_ptr], #0x30]\n"
"add %x[out_ptr], %x[out_ptr], #0x40\n"
"8:" // Odds skip
: [out_ptr] "+&r" (out_ptr), [width] "+&r" (width)
: [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset)
- : "cc", "memory", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block4_fp32_bf16.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block4_fp32_bf16.hpp
index dfec14358b..533682c647 100644
--- a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block4_fp32_bf16.hpp
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block4_fp32_bf16.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef __aarch64__
@@ -31,45 +31,44 @@ void interleave_block<8, 4, VLType::None, false>(
)
{
__asm__ __volatile__(
- "ldr x28, [%x[in], #0x0]\n"
- "ldr x27, [%x[in], #0x8]\n"
+ "ldr x27, [%x[in], #0x0]\n"
"cmp %x[height], #0x8\n"
- "add x28, x28, %x[row_offset], LSL #2\n"
- "ldr x26, [%x[in], #0x10]\n"
- "ldr x25, [%x[in], #0x18]\n"
+ "ldr x26, [%x[in], #0x8]\n"
"add x27, x27, %x[row_offset], LSL #2\n"
+ "ldr x25, [%x[in], #0x10]\n"
+ "ldr x24, [%x[in], #0x18]\n"
"add x26, x26, %x[row_offset], LSL #2\n"
- "ldr x24, [%x[in], #0x20]\n"
- "ldr x23, [%x[in], #0x28]\n"
+ "ldr x23, [%x[in], #0x20]\n"
"add x25, x25, %x[row_offset], LSL #2\n"
+ "ldr x22, [%x[in], #0x28]\n"
+ "ldr x21, [%x[in], #0x30]\n"
"add x24, x24, %x[row_offset], LSL #2\n"
- "ldr x22, [%x[in], #0x30]\n"
- "ldr x21, [%x[in], #0x38]\n"
+ "ldr x20, [%x[in], #0x38]\n"
"add x23, x23, %x[row_offset], LSL #2\n"
"add x22, x22, %x[row_offset], LSL #2\n"
"add x21, x21, %x[row_offset], LSL #2\n"
+ "add x20, x20, %x[row_offset], LSL #2\n"
"beq 1f\n"
+ "mov x20, x27\n"
"cmp %x[height], #0x2\n"
- "csel x27, x27, x28, GE\n"
- "csel x26, x26, x28, GT\n"
+ "csel x26, x26, x27, GE\n"
+ "csel x25, x25, x27, GT\n"
"cmp %x[height], #0x4\n"
- "csel x25, x25, x28, GE\n"
- "csel x24, x24, x28, GT\n"
+ "csel x24, x24, x27, GE\n"
+ "csel x23, x23, x27, GT\n"
"cmp %x[height], #0x6\n"
- "mov x21, x28\n"
- "csel x23, x23, x28, GE\n"
- "csel x22, x22, x28, GT\n"
+ "csel x22, x22, x27, GE\n"
+ "csel x21, x21, x27, GT\n"
"1:" // no_pointer_adj
- "cmp %x[width], #0x4\n"
- "prfm pldl1keep, [x28, #0x0]\n"
"prfm pldl1keep, [x27, #0x0]\n"
+ "cmp %x[width], #0x4\n"
"prfm pldl1keep, [x26, #0x0]\n"
"prfm pldl1keep, [x25, #0x0]\n"
"prfm pldl1keep, [x24, #0x0]\n"
"prfm pldl1keep, [x23, #0x0]\n"
"prfm pldl1keep, [x22, #0x0]\n"
"prfm pldl1keep, [x21, #0x0]\n"
- "prfm pldl1keep, [x28, #0x40]\n"
+ "prfm pldl1keep, [x20, #0x0]\n"
"prfm pldl1keep, [x27, #0x40]\n"
"prfm pldl1keep, [x26, #0x40]\n"
"prfm pldl1keep, [x25, #0x40]\n"
@@ -77,91 +76,92 @@ void interleave_block<8, 4, VLType::None, false>(
"prfm pldl1keep, [x23, #0x40]\n"
"prfm pldl1keep, [x22, #0x40]\n"
"prfm pldl1keep, [x21, #0x40]\n"
+ "prfm pldl1keep, [x20, #0x40]\n"
"blt 3f\n"
"2:" // Main loop head
- "ldr q23, [x28], #0x10\n"
- "ldr q22, [x26], #0x10\n"
+ "ldr q23, [x27], #0x10\n"
".inst 0x0ea16af7 // bfcvtn v23.4h, v23.4s\n"
- ".inst 0x0ea16ad6 // bfcvtn v22.4h, v22.4s\n"
- "ldr q21, [x24], #0x10\n"
- "ldr q20, [x22], #0x10\n"
- ".inst 0x0ea16ab5 // bfcvtn v21.4h, v21.4s\n"
- ".inst 0x0ea16a94 // bfcvtn v20.4h, v20.4s\n"
- "ldr q19, [x27], #0x10\n"
- "ldr q18, [x25], #0x10\n"
+ "ldr q22, [x26], #0x10\n"
"subs %x[width], %x[width], #0x4\n"
+ ".inst 0x4ea16ad7 // bfcvtn2 v23.8h, v22.4s\n"
+ "ldr q21, [x25], #0x10\n"
"cmp %x[width], #0x4\n"
- "ldr q17, [x23], #0x10\n"
+ ".inst 0x0ea16ab5 // bfcvtn v21.4h, v21.4s\n"
+ "ldr q20, [x24], #0x10\n"
+ "ldr q18, [x23], #0x10\n"
+ ".inst 0x4ea16a95 // bfcvtn2 v21.8h, v20.4s\n"
+ "ldr q19, [x22], #0x10\n"
+ ".inst 0x0ea16a52 // bfcvtn v18.4h, v18.4s\n"
"ldr q16, [x21], #0x10\n"
- ".inst 0x4ea16a77 // bfcvtn2 v23.8h, v19.4s\n"
- ".inst 0x4ea16a56 // bfcvtn2 v22.8h, v18.4s\n"
- "prfm pldl1keep, [x28, #0x70]\n"
+ "ldr q17, [x20], #0x10\n"
+ ".inst 0x4ea16a72 // bfcvtn2 v18.8h, v19.4s\n"
"prfm pldl1keep, [x27, #0x70]\n"
- ".inst 0x4ea16a35 // bfcvtn2 v21.8h, v17.4s\n"
- ".inst 0x4ea16a14 // bfcvtn2 v20.8h, v16.4s\n"
"prfm pldl1keep, [x26, #0x70]\n"
+ ".inst 0x0ea16a10 // bfcvtn v16.4h, v16.4s\n"
"prfm pldl1keep, [x25, #0x70]\n"
- "str q23, [%x[out_ptr], #0x0]\n"
+ ".inst 0x4ea16a30 // bfcvtn2 v16.8h, v17.4s\n"
"prfm pldl1keep, [x24, #0x70]\n"
"prfm pldl1keep, [x23, #0x70]\n"
- "str q22, [%x[out_ptr], #0x10]\n"
"prfm pldl1keep, [x22, #0x70]\n"
"prfm pldl1keep, [x21, #0x70]\n"
- "str q21, [%x[out_ptr], #0x20]\n"
- "str q20, [%x[out_ptr], #0x30]\n"
+ "prfm pldl1keep, [x20, #0x70]\n"
+ "str q23, [%x[out_ptr], #0x0]\n"
+ "str q21, [%x[out_ptr], #0x10]\n"
+ "str q18, [%x[out_ptr], #0x20]\n"
+ "str q16, [%x[out_ptr], #0x30]\n"
"add %x[out_ptr], %x[out_ptr], #0x40\n"
"bge 2b\n"
"3:" // Main loop skip
"cbz %x[width], 6f\n"
"tbz %x[width], #1, 4f\n"
- "ldr d23, [x28], #0x8\n"
- "ldr d19, [x27], #0x8\n"
- "mov x20, #0x1\n"
+ "ldr d23, [x27], #0x8\n"
"ldr d22, [x26], #0x8\n"
- "ldr d18, [x25], #0x8\n"
- "ldr d21, [x24], #0x8\n"
- "ldr d17, [x23], #0x8\n"
- "ldr d20, [x22], #0x8\n"
+ "mov x19, #0x1\n"
+ "ldr d21, [x25], #0x8\n"
+ "ldr d20, [x24], #0x8\n"
+ "ldr d18, [x23], #0x8\n"
+ "ldr d19, [x22], #0x8\n"
"ldr d16, [x21], #0x8\n"
+ "ldr d17, [x20], #0x8\n"
"tbz %x[width], #0, 5f\n"
- "ld1 { v23.s }[2], [x28]\n"
- "ld1 { v19.s }[2], [x27]\n"
+ "ld1 { v23.s }[2], [x27]\n"
"ld1 { v22.s }[2], [x26]\n"
- "ld1 { v18.s }[2], [x25]\n"
- "ld1 { v21.s }[2], [x24]\n"
- "ld1 { v17.s }[2], [x23]\n"
- "ld1 { v20.s }[2], [x22]\n"
+ "ld1 { v21.s }[2], [x25]\n"
+ "ld1 { v20.s }[2], [x24]\n"
+ "ld1 { v18.s }[2], [x23]\n"
+ "ld1 { v19.s }[2], [x22]\n"
"ld1 { v16.s }[2], [x21]\n"
+ "ld1 { v17.s }[2], [x20]\n"
"b 5f\n"
"4:" // odd_loads_1_0
- "ldr s23, [x28, #0x0]\n"
- "ldr s19, [x27, #0x0]\n"
- "mov x20, #0x1\n"
+ "ldr s23, [x27, #0x0]\n"
+ "mov x19, #0x1\n"
"ldr s22, [x26, #0x0]\n"
- "ldr s18, [x25, #0x0]\n"
- "ldr s21, [x24, #0x0]\n"
- "ldr s17, [x23, #0x0]\n"
- "ldr s20, [x22, #0x0]\n"
+ "ldr s21, [x25, #0x0]\n"
+ "ldr s20, [x24, #0x0]\n"
+ "ldr s18, [x23, #0x0]\n"
+ "ldr s19, [x22, #0x0]\n"
"ldr s16, [x21, #0x0]\n"
+ "ldr s17, [x20, #0x0]\n"
"5:" // Odd load end
".inst 0x0ea16af7 // bfcvtn v23.4h, v23.4s\n"
- ".inst 0x0ea16ad6 // bfcvtn v22.4h, v22.4s\n"
".inst 0x0ea16ab5 // bfcvtn v21.4h, v21.4s\n"
- ".inst 0x0ea16a94 // bfcvtn v20.4h, v20.4s\n"
- ".inst 0x4ea16a77 // bfcvtn2 v23.8h, v19.4s\n"
- ".inst 0x4ea16a56 // bfcvtn2 v22.8h, v18.4s\n"
+ ".inst 0x0ea16a52 // bfcvtn v18.4h, v18.4s\n"
+ ".inst 0x0ea16a10 // bfcvtn v16.4h, v16.4s\n"
+ ".inst 0x4ea16ad7 // bfcvtn2 v23.8h, v22.4s\n"
"str q23, [%x[out_ptr], #0x0]\n"
- ".inst 0x4ea16a35 // bfcvtn2 v21.8h, v17.4s\n"
- ".inst 0x4ea16a14 // bfcvtn2 v20.8h, v16.4s\n"
- "str q22, [%x[out_ptr], #0x10]\n"
- "str q21, [%x[out_ptr], #0x20]\n"
- "str q20, [%x[out_ptr], #0x30]\n"
+ ".inst 0x4ea16a95 // bfcvtn2 v21.8h, v20.4s\n"
+ ".inst 0x4ea16a72 // bfcvtn2 v18.8h, v19.4s\n"
+ "str q21, [%x[out_ptr], #0x10]\n"
+ ".inst 0x4ea16a30 // bfcvtn2 v16.8h, v17.4s\n"
+ "str q18, [%x[out_ptr], #0x20]\n"
+ "str q16, [%x[out_ptr], #0x30]\n"
"add %x[out_ptr], %x[out_ptr], #0x40\n"
"6:" // Odds skip
: [out_ptr] "+&r" (out_ptr), [width] "+&r" (width)
: [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset)
- : "cc", "memory", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block4_s8_s8.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block4_s8_s8.hpp
index 54f15f8a5c..659d9947e2 100644
--- a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block4_s8_s8.hpp
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block4_s8_s8.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef __aarch64__
@@ -31,45 +31,44 @@ void interleave_block<8, 4, VLType::None, false>(
)
{
__asm__ __volatile__(
- "ldr x28, [%x[in], #0x0]\n"
- "ldr x27, [%x[in], #0x8]\n"
+ "ldr x27, [%x[in], #0x0]\n"
"cmp %x[height], #0x8\n"
- "add x28, x28, %x[row_offset]\n"
- "ldr x26, [%x[in], #0x10]\n"
- "ldr x25, [%x[in], #0x18]\n"
+ "ldr x26, [%x[in], #0x8]\n"
"add x27, x27, %x[row_offset]\n"
+ "ldr x25, [%x[in], #0x10]\n"
+ "ldr x24, [%x[in], #0x18]\n"
"add x26, x26, %x[row_offset]\n"
- "ldr x24, [%x[in], #0x20]\n"
- "ldr x23, [%x[in], #0x28]\n"
+ "ldr x23, [%x[in], #0x20]\n"
"add x25, x25, %x[row_offset]\n"
+ "ldr x22, [%x[in], #0x28]\n"
+ "ldr x21, [%x[in], #0x30]\n"
"add x24, x24, %x[row_offset]\n"
- "ldr x22, [%x[in], #0x30]\n"
- "ldr x21, [%x[in], #0x38]\n"
+ "ldr x20, [%x[in], #0x38]\n"
"add x23, x23, %x[row_offset]\n"
"add x22, x22, %x[row_offset]\n"
"add x21, x21, %x[row_offset]\n"
+ "add x20, x20, %x[row_offset]\n"
"beq 1f\n"
+ "mov x20, x27\n"
"cmp %x[height], #0x2\n"
- "csel x27, x27, x28, GE\n"
- "csel x26, x26, x28, GT\n"
+ "csel x26, x26, x27, GE\n"
+ "csel x25, x25, x27, GT\n"
"cmp %x[height], #0x4\n"
- "csel x25, x25, x28, GE\n"
- "csel x24, x24, x28, GT\n"
+ "csel x24, x24, x27, GE\n"
+ "csel x23, x23, x27, GT\n"
"cmp %x[height], #0x6\n"
- "mov x21, x28\n"
- "csel x23, x23, x28, GE\n"
- "csel x22, x22, x28, GT\n"
+ "csel x22, x22, x27, GE\n"
+ "csel x21, x21, x27, GT\n"
"1:" // no_pointer_adj
- "cmp %x[width], #0x10\n"
- "prfm pldl1keep, [x28, #0x0]\n"
"prfm pldl1keep, [x27, #0x0]\n"
+ "cmp %x[width], #0x10\n"
"prfm pldl1keep, [x26, #0x0]\n"
"prfm pldl1keep, [x25, #0x0]\n"
"prfm pldl1keep, [x24, #0x0]\n"
"prfm pldl1keep, [x23, #0x0]\n"
"prfm pldl1keep, [x22, #0x0]\n"
"prfm pldl1keep, [x21, #0x0]\n"
- "prfm pldl1keep, [x28, #0x40]\n"
+ "prfm pldl1keep, [x20, #0x0]\n"
"prfm pldl1keep, [x27, #0x40]\n"
"prfm pldl1keep, [x26, #0x40]\n"
"prfm pldl1keep, [x25, #0x40]\n"
@@ -77,48 +76,49 @@ void interleave_block<8, 4, VLType::None, false>(
"prfm pldl1keep, [x23, #0x40]\n"
"prfm pldl1keep, [x22, #0x40]\n"
"prfm pldl1keep, [x21, #0x40]\n"
+ "prfm pldl1keep, [x20, #0x40]\n"
"blt 3f\n"
"2:" // Main loop head
- "ldr q28, [x28], #0x10\n"
- "ldr q27, [x27], #0x10\n"
+ "ldr q28, [x27], #0x10\n"
"subs %x[width], %x[width], #0x10\n"
+ "ldr q29, [x26], #0x10\n"
"cmp %x[width], #0x10\n"
- "ldr q22, [x26], #0x10\n"
- "ldr q21, [x25], #0x10\n"
- "zip1 v26.4s, v28.4s, v22.4s\n"
- "zip1 v25.4s, v27.4s, v21.4s\n"
- "ldr q24, [x24], #0x10\n"
- "ldr q23, [x23], #0x10\n"
- "zip2 v22.4s, v28.4s, v22.4s\n"
- "zip2 v21.4s, v27.4s, v21.4s\n"
- "ldr q19, [x22], #0x10\n"
- "ldr q18, [x21], #0x10\n"
- "zip1 v20.4s, v24.4s, v19.4s\n"
- "zip1 v17.4s, v23.4s, v18.4s\n"
- "zip2 v19.4s, v24.4s, v19.4s\n"
- "zip2 v18.4s, v23.4s, v18.4s\n"
- "prfm pldl1keep, [x28, #0x70]\n"
+ "ldr q25, [x25], #0x10\n"
+ "zip1 v22.4s, v28.4s, v25.4s\n"
+ "ldr q21, [x24], #0x10\n"
+ "zip2 v28.4s, v28.4s, v25.4s\n"
+ "ldr q27, [x23], #0x10\n"
+ "ldr q26, [x22], #0x10\n"
+ "zip1 v20.4s, v29.4s, v21.4s\n"
+ "ldr q19, [x21], #0x10\n"
+ "zip2 v25.4s, v29.4s, v21.4s\n"
+ "ldr q24, [x20], #0x10\n"
+ "zip1 v23.4s, v22.4s, v20.4s\n"
"prfm pldl1keep, [x27, #0x70]\n"
+ "zip2 v22.4s, v22.4s, v20.4s\n"
"prfm pldl1keep, [x26, #0x70]\n"
+ "zip1 v21.4s, v28.4s, v25.4s\n"
"prfm pldl1keep, [x25, #0x70]\n"
- "zip1 v16.4s, v26.4s, v25.4s\n"
- "str q16, [%x[out_ptr], #0x0]\n"
+ "zip1 v18.4s, v27.4s, v19.4s\n"
"prfm pldl1keep, [x24, #0x70]\n"
+ "zip1 v16.4s, v26.4s, v24.4s\n"
"prfm pldl1keep, [x23, #0x70]\n"
- "zip1 v16.4s, v20.4s, v17.4s\n"
- "str q16, [%x[out_ptr], #0x10]\n"
+ "zip1 v17.4s, v18.4s, v16.4s\n"
"prfm pldl1keep, [x22, #0x70]\n"
+ "zip2 v20.4s, v18.4s, v16.4s\n"
"prfm pldl1keep, [x21, #0x70]\n"
- "zip2 v16.4s, v26.4s, v25.4s\n"
- "str q16, [%x[out_ptr], #0x20]\n"
- "zip2 v16.4s, v20.4s, v17.4s\n"
- "str q16, [%x[out_ptr], #0x30]\n"
- "zip1 v16.4s, v22.4s, v21.4s\n"
- "str q16, [%x[out_ptr], #0x40]\n"
- "zip1 v16.4s, v19.4s, v18.4s\n"
- "zip2 v17.4s, v22.4s, v21.4s\n"
- "str q16, [%x[out_ptr], #0x50]\n"
- "zip2 v16.4s, v19.4s, v18.4s\n"
+ "zip2 v19.4s, v27.4s, v19.4s\n"
+ "prfm pldl1keep, [x20, #0x70]\n"
+ "zip2 v16.4s, v26.4s, v24.4s\n"
+ "str q23, [%x[out_ptr], #0x0]\n"
+ "zip1 v18.4s, v19.4s, v16.4s\n"
+ "str q17, [%x[out_ptr], #0x10]\n"
+ "zip2 v17.4s, v28.4s, v25.4s\n"
+ "str q22, [%x[out_ptr], #0x20]\n"
+ "zip2 v16.4s, v19.4s, v16.4s\n"
+ "str q20, [%x[out_ptr], #0x30]\n"
+ "str q21, [%x[out_ptr], #0x40]\n"
+ "str q18, [%x[out_ptr], #0x50]\n"
"str q17, [%x[out_ptr], #0x60]\n"
"str q16, [%x[out_ptr], #0x70]\n"
"add %x[out_ptr], %x[out_ptr], #0x80\n"
@@ -126,204 +126,204 @@ void interleave_block<8, 4, VLType::None, false>(
"3:" // Main loop skip
"cbz %x[width], 12f\n"
"tbz %x[width], #3, 7f\n"
- "ldr d28, [x28], #0x8\n"
- "ldr d27, [x27], #0x8\n"
- "ldr d22, [x26], #0x8\n"
- "ldr d21, [x25], #0x8\n"
- "ldr d24, [x24], #0x8\n"
- "ldr d23, [x23], #0x8\n"
- "ldr d19, [x22], #0x8\n"
- "ldr d18, [x21], #0x8\n"
+ "ldr d28, [x27], #0x8\n"
+ "ldr d29, [x26], #0x8\n"
+ "ldr d25, [x25], #0x8\n"
+ "ldr d21, [x24], #0x8\n"
+ "ldr d27, [x23], #0x8\n"
+ "ldr d26, [x22], #0x8\n"
+ "ldr d19, [x21], #0x8\n"
+ "ldr d24, [x20], #0x8\n"
"tbz %x[width], #2, 5f\n"
- "ld1 { v28.s }[2], [x28], #0x4\n"
- "ld1 { v27.s }[2], [x27], #0x4\n"
- "ld1 { v22.s }[2], [x26], #0x4\n"
- "ld1 { v21.s }[2], [x25], #0x4\n"
- "ld1 { v24.s }[2], [x24], #0x4\n"
- "ld1 { v23.s }[2], [x23], #0x4\n"
- "ld1 { v19.s }[2], [x22], #0x4\n"
- "ld1 { v18.s }[2], [x21], #0x4\n"
+ "ld1 { v28.s }[2], [x27], #0x4\n"
+ "ld1 { v29.s }[2], [x26], #0x4\n"
+ "ld1 { v25.s }[2], [x25], #0x4\n"
+ "ld1 { v21.s }[2], [x24], #0x4\n"
+ "ld1 { v27.s }[2], [x23], #0x4\n"
+ "ld1 { v26.s }[2], [x22], #0x4\n"
+ "ld1 { v19.s }[2], [x21], #0x4\n"
+ "ld1 { v24.s }[2], [x20], #0x4\n"
"tbz %x[width], #1, 4f\n"
- "ld1 { v28.h }[6], [x28], #0x2\n"
- "ld1 { v27.h }[6], [x27], #0x2\n"
- "mov x20, #0x4\n"
- "ld1 { v22.h }[6], [x26], #0x2\n"
- "ld1 { v21.h }[6], [x25], #0x2\n"
- "ld1 { v24.h }[6], [x24], #0x2\n"
- "ld1 { v23.h }[6], [x23], #0x2\n"
- "ld1 { v19.h }[6], [x22], #0x2\n"
- "ld1 { v18.h }[6], [x21], #0x2\n"
+ "ld1 { v28.h }[6], [x27], #0x2\n"
+ "mov x19, #0x4\n"
+ "ld1 { v29.h }[6], [x26], #0x2\n"
+ "ld1 { v25.h }[6], [x25], #0x2\n"
+ "ld1 { v21.h }[6], [x24], #0x2\n"
+ "ld1 { v27.h }[6], [x23], #0x2\n"
+ "ld1 { v26.h }[6], [x22], #0x2\n"
+ "ld1 { v19.h }[6], [x21], #0x2\n"
+ "ld1 { v24.h }[6], [x20], #0x2\n"
"tbz %x[width], #0, 11f\n"
- "ld1 { v28.b }[14], [x28]\n"
- "ld1 { v27.b }[14], [x27]\n"
- "ld1 { v22.b }[14], [x26]\n"
- "ld1 { v21.b }[14], [x25]\n"
- "ld1 { v24.b }[14], [x24]\n"
- "ld1 { v23.b }[14], [x23]\n"
- "ld1 { v19.b }[14], [x22]\n"
- "ld1 { v18.b }[14], [x21]\n"
+ "ld1 { v28.b }[14], [x27]\n"
+ "ld1 { v29.b }[14], [x26]\n"
+ "ld1 { v25.b }[14], [x25]\n"
+ "ld1 { v21.b }[14], [x24]\n"
+ "ld1 { v27.b }[14], [x23]\n"
+ "ld1 { v26.b }[14], [x22]\n"
+ "ld1 { v19.b }[14], [x21]\n"
+ "ld1 { v24.b }[14], [x20]\n"
"b 11f\n"
"4:" // odd_loads_1_12
- "mov x20, #0x3\n"
+ "mov x19, #0x3\n"
"tbz %x[width], #0, 11f\n"
- "ld1 { v28.b }[12], [x28]\n"
- "ld1 { v27.b }[12], [x27]\n"
- "mov x20, #0x4\n"
- "ld1 { v22.b }[12], [x26]\n"
- "ld1 { v21.b }[12], [x25]\n"
- "ld1 { v24.b }[12], [x24]\n"
- "ld1 { v23.b }[12], [x23]\n"
- "ld1 { v19.b }[12], [x22]\n"
- "ld1 { v18.b }[12], [x21]\n"
+ "ld1 { v28.b }[12], [x27]\n"
+ "ld1 { v29.b }[12], [x26]\n"
+ "mov x19, #0x4\n"
+ "ld1 { v25.b }[12], [x25]\n"
+ "ld1 { v21.b }[12], [x24]\n"
+ "ld1 { v27.b }[12], [x23]\n"
+ "ld1 { v26.b }[12], [x22]\n"
+ "ld1 { v19.b }[12], [x21]\n"
+ "ld1 { v24.b }[12], [x20]\n"
"b 11f\n"
"5:" // odd_loads_2_8
"tbz %x[width], #1, 6f\n"
- "ld1 { v28.h }[4], [x28], #0x2\n"
- "ld1 { v27.h }[4], [x27], #0x2\n"
- "mov x20, #0x3\n"
- "ld1 { v22.h }[4], [x26], #0x2\n"
- "ld1 { v21.h }[4], [x25], #0x2\n"
- "ld1 { v24.h }[4], [x24], #0x2\n"
- "ld1 { v23.h }[4], [x23], #0x2\n"
- "ld1 { v19.h }[4], [x22], #0x2\n"
- "ld1 { v18.h }[4], [x21], #0x2\n"
+ "ld1 { v28.h }[4], [x27], #0x2\n"
+ "ld1 { v29.h }[4], [x26], #0x2\n"
+ "mov x19, #0x3\n"
+ "ld1 { v25.h }[4], [x25], #0x2\n"
+ "ld1 { v21.h }[4], [x24], #0x2\n"
+ "ld1 { v27.h }[4], [x23], #0x2\n"
+ "ld1 { v26.h }[4], [x22], #0x2\n"
+ "ld1 { v19.h }[4], [x21], #0x2\n"
+ "ld1 { v24.h }[4], [x20], #0x2\n"
"tbz %x[width], #0, 11f\n"
- "ld1 { v28.b }[10], [x28]\n"
- "ld1 { v27.b }[10], [x27]\n"
- "ld1 { v22.b }[10], [x26]\n"
- "ld1 { v21.b }[10], [x25]\n"
- "ld1 { v24.b }[10], [x24]\n"
- "ld1 { v23.b }[10], [x23]\n"
- "ld1 { v19.b }[10], [x22]\n"
- "ld1 { v18.b }[10], [x21]\n"
+ "ld1 { v28.b }[10], [x27]\n"
+ "ld1 { v29.b }[10], [x26]\n"
+ "ld1 { v25.b }[10], [x25]\n"
+ "ld1 { v21.b }[10], [x24]\n"
+ "ld1 { v27.b }[10], [x23]\n"
+ "ld1 { v26.b }[10], [x22]\n"
+ "ld1 { v19.b }[10], [x21]\n"
+ "ld1 { v24.b }[10], [x20]\n"
"b 11f\n"
"6:" // odd_loads_1_8
- "mov x20, #0x2\n"
+ "mov x19, #0x2\n"
"tbz %x[width], #0, 11f\n"
- "ld1 { v28.b }[8], [x28]\n"
- "ld1 { v27.b }[8], [x27]\n"
- "mov x20, #0x3\n"
- "ld1 { v22.b }[8], [x26]\n"
- "ld1 { v21.b }[8], [x25]\n"
- "ld1 { v24.b }[8], [x24]\n"
- "ld1 { v23.b }[8], [x23]\n"
- "ld1 { v19.b }[8], [x22]\n"
- "ld1 { v18.b }[8], [x21]\n"
+ "ld1 { v28.b }[8], [x27]\n"
+ "ld1 { v29.b }[8], [x26]\n"
+ "mov x19, #0x3\n"
+ "ld1 { v25.b }[8], [x25]\n"
+ "ld1 { v21.b }[8], [x24]\n"
+ "ld1 { v27.b }[8], [x23]\n"
+ "ld1 { v26.b }[8], [x22]\n"
+ "ld1 { v19.b }[8], [x21]\n"
+ "ld1 { v24.b }[8], [x20]\n"
"b 11f\n"
"7:" // odd_loads_4_0
"tbz %x[width], #2, 9f\n"
- "ldr s28, [x28], #0x4\n"
- "ldr s27, [x27], #0x4\n"
- "ldr s22, [x26], #0x4\n"
- "ldr s21, [x25], #0x4\n"
- "ldr s24, [x24], #0x4\n"
- "ldr s23, [x23], #0x4\n"
- "ldr s19, [x22], #0x4\n"
- "ldr s18, [x21], #0x4\n"
+ "ldr s28, [x27], #0x4\n"
+ "ldr s29, [x26], #0x4\n"
+ "ldr s25, [x25], #0x4\n"
+ "ldr s21, [x24], #0x4\n"
+ "ldr s27, [x23], #0x4\n"
+ "ldr s26, [x22], #0x4\n"
+ "ldr s19, [x21], #0x4\n"
+ "ldr s24, [x20], #0x4\n"
"tbz %x[width], #1, 8f\n"
- "ld1 { v28.h }[2], [x28], #0x2\n"
- "ld1 { v27.h }[2], [x27], #0x2\n"
- "mov x20, #0x2\n"
- "ld1 { v22.h }[2], [x26], #0x2\n"
- "ld1 { v21.h }[2], [x25], #0x2\n"
- "ld1 { v24.h }[2], [x24], #0x2\n"
- "ld1 { v23.h }[2], [x23], #0x2\n"
- "ld1 { v19.h }[2], [x22], #0x2\n"
- "ld1 { v18.h }[2], [x21], #0x2\n"
+ "ld1 { v28.h }[2], [x27], #0x2\n"
+ "mov x19, #0x2\n"
+ "ld1 { v29.h }[2], [x26], #0x2\n"
+ "ld1 { v25.h }[2], [x25], #0x2\n"
+ "ld1 { v21.h }[2], [x24], #0x2\n"
+ "ld1 { v27.h }[2], [x23], #0x2\n"
+ "ld1 { v26.h }[2], [x22], #0x2\n"
+ "ld1 { v19.h }[2], [x21], #0x2\n"
+ "ld1 { v24.h }[2], [x20], #0x2\n"
"tbz %x[width], #0, 11f\n"
- "ld1 { v28.b }[6], [x28]\n"
- "ld1 { v27.b }[6], [x27]\n"
- "ld1 { v22.b }[6], [x26]\n"
- "ld1 { v21.b }[6], [x25]\n"
- "ld1 { v24.b }[6], [x24]\n"
- "ld1 { v23.b }[6], [x23]\n"
- "ld1 { v19.b }[6], [x22]\n"
- "ld1 { v18.b }[6], [x21]\n"
+ "ld1 { v28.b }[6], [x27]\n"
+ "ld1 { v29.b }[6], [x26]\n"
+ "ld1 { v25.b }[6], [x25]\n"
+ "ld1 { v21.b }[6], [x24]\n"
+ "ld1 { v27.b }[6], [x23]\n"
+ "ld1 { v26.b }[6], [x22]\n"
+ "ld1 { v19.b }[6], [x21]\n"
+ "ld1 { v24.b }[6], [x20]\n"
"b 11f\n"
"8:" // odd_loads_1_4
- "mov x20, #0x1\n"
+ "mov x19, #0x1\n"
"tbz %x[width], #0, 11f\n"
- "ld1 { v28.b }[4], [x28]\n"
- "ld1 { v27.b }[4], [x27]\n"
- "mov x20, #0x2\n"
- "ld1 { v22.b }[4], [x26]\n"
- "ld1 { v21.b }[4], [x25]\n"
- "ld1 { v24.b }[4], [x24]\n"
- "ld1 { v23.b }[4], [x23]\n"
- "ld1 { v19.b }[4], [x22]\n"
- "ld1 { v18.b }[4], [x21]\n"
+ "ld1 { v28.b }[4], [x27]\n"
+ "ld1 { v29.b }[4], [x26]\n"
+ "mov x19, #0x2\n"
+ "ld1 { v25.b }[4], [x25]\n"
+ "ld1 { v21.b }[4], [x24]\n"
+ "ld1 { v27.b }[4], [x23]\n"
+ "ld1 { v26.b }[4], [x22]\n"
+ "ld1 { v19.b }[4], [x21]\n"
+ "ld1 { v24.b }[4], [x20]\n"
"b 11f\n"
"9:" // odd_loads_2_0
"tbz %x[width], #1, 10f\n"
- "ldr h28, [x28], #0x2\n"
- "ldr h27, [x27], #0x2\n"
- "mov x20, #0x1\n"
- "ldr h22, [x26], #0x2\n"
- "ldr h21, [x25], #0x2\n"
- "ldr h24, [x24], #0x2\n"
- "ldr h23, [x23], #0x2\n"
- "ldr h19, [x22], #0x2\n"
- "ldr h18, [x21], #0x2\n"
+ "ldr h28, [x27], #0x2\n"
+ "ldr h29, [x26], #0x2\n"
+ "mov x19, #0x1\n"
+ "ldr h25, [x25], #0x2\n"
+ "ldr h21, [x24], #0x2\n"
+ "ldr h27, [x23], #0x2\n"
+ "ldr h26, [x22], #0x2\n"
+ "ldr h19, [x21], #0x2\n"
+ "ldr h24, [x20], #0x2\n"
"tbz %x[width], #0, 11f\n"
- "ld1 { v28.b }[2], [x28]\n"
- "ld1 { v27.b }[2], [x27]\n"
- "ld1 { v22.b }[2], [x26]\n"
- "ld1 { v21.b }[2], [x25]\n"
- "ld1 { v24.b }[2], [x24]\n"
- "ld1 { v23.b }[2], [x23]\n"
- "ld1 { v19.b }[2], [x22]\n"
- "ld1 { v18.b }[2], [x21]\n"
+ "ld1 { v28.b }[2], [x27]\n"
+ "ld1 { v29.b }[2], [x26]\n"
+ "ld1 { v25.b }[2], [x25]\n"
+ "ld1 { v21.b }[2], [x24]\n"
+ "ld1 { v27.b }[2], [x23]\n"
+ "ld1 { v26.b }[2], [x22]\n"
+ "ld1 { v19.b }[2], [x21]\n"
+ "ld1 { v24.b }[2], [x20]\n"
"b 11f\n"
"10:" // odd_loads_1_0
- "ldr b28, [x28, #0x0]\n"
- "ldr b27, [x27, #0x0]\n"
- "mov x20, #0x1\n"
- "ldr b22, [x26, #0x0]\n"
- "ldr b21, [x25, #0x0]\n"
- "ldr b24, [x24, #0x0]\n"
- "ldr b23, [x23, #0x0]\n"
- "ldr b19, [x22, #0x0]\n"
- "ldr b18, [x21, #0x0]\n"
+ "ldr b28, [x27, #0x0]\n"
+ "mov x19, #0x1\n"
+ "ldr b29, [x26, #0x0]\n"
+ "ldr b25, [x25, #0x0]\n"
+ "ldr b21, [x24, #0x0]\n"
+ "ldr b27, [x23, #0x0]\n"
+ "ldr b26, [x22, #0x0]\n"
+ "ldr b19, [x21, #0x0]\n"
+ "ldr b24, [x20, #0x0]\n"
"11:" // Odd load end
- "zip1 v26.4s, v28.4s, v22.4s\n"
- "zip1 v25.4s, v27.4s, v21.4s\n"
- "subs x20, x20, #0x1\n"
- "zip1 v20.4s, v24.4s, v19.4s\n"
- "zip1 v17.4s, v23.4s, v18.4s\n"
- "zip1 v16.4s, v26.4s, v25.4s\n"
- "str q16, [%x[out_ptr], #0x0]\n"
- "zip1 v16.4s, v20.4s, v17.4s\n"
- "str q16, [%x[out_ptr], #0x10]\n"
+ "zip1 v22.4s, v28.4s, v25.4s\n"
+ "subs x19, x19, #0x1\n"
+ "zip1 v20.4s, v29.4s, v21.4s\n"
+ "zip1 v23.4s, v22.4s, v20.4s\n"
+ "str q23, [%x[out_ptr], #0x0]\n"
+ "zip1 v18.4s, v27.4s, v19.4s\n"
+ "zip1 v16.4s, v26.4s, v24.4s\n"
+ "zip1 v17.4s, v18.4s, v16.4s\n"
+ "str q17, [%x[out_ptr], #0x10]\n"
"add %x[out_ptr], %x[out_ptr], #0x20\n"
"beq 12f\n"
- "subs x20, x20, #0x1\n"
- "zip2 v16.4s, v26.4s, v25.4s\n"
- "str q16, [%x[out_ptr], #0x0]\n"
- "zip2 v16.4s, v20.4s, v17.4s\n"
- "str q16, [%x[out_ptr], #0x10]\n"
+ "zip2 v22.4s, v22.4s, v20.4s\n"
+ "str q22, [%x[out_ptr], #0x0]\n"
+ "zip2 v20.4s, v18.4s, v16.4s\n"
+ "subs x19, x19, #0x1\n"
+ "str q20, [%x[out_ptr], #0x10]\n"
"add %x[out_ptr], %x[out_ptr], #0x20\n"
"beq 12f\n"
- "zip2 v22.4s, v28.4s, v22.4s\n"
- "zip2 v21.4s, v27.4s, v21.4s\n"
- "subs x20, x20, #0x1\n"
- "zip2 v19.4s, v24.4s, v19.4s\n"
- "zip2 v18.4s, v23.4s, v18.4s\n"
- "zip1 v16.4s, v22.4s, v21.4s\n"
- "str q16, [%x[out_ptr], #0x0]\n"
- "zip1 v16.4s, v19.4s, v18.4s\n"
- "str q16, [%x[out_ptr], #0x10]\n"
+ "zip2 v28.4s, v28.4s, v25.4s\n"
+ "zip2 v25.4s, v29.4s, v21.4s\n"
+ "subs x19, x19, #0x1\n"
+ "zip1 v21.4s, v28.4s, v25.4s\n"
+ "str q21, [%x[out_ptr], #0x0]\n"
+ "zip2 v19.4s, v27.4s, v19.4s\n"
+ "zip2 v16.4s, v26.4s, v24.4s\n"
+ "zip1 v18.4s, v19.4s, v16.4s\n"
+ "str q18, [%x[out_ptr], #0x10]\n"
"add %x[out_ptr], %x[out_ptr], #0x20\n"
"beq 12f\n"
- "zip2 v17.4s, v22.4s, v21.4s\n"
+ "zip2 v17.4s, v28.4s, v25.4s\n"
"str q17, [%x[out_ptr], #0x0]\n"
- "zip2 v16.4s, v19.4s, v18.4s\n"
+ "zip2 v16.4s, v19.4s, v16.4s\n"
"str q16, [%x[out_ptr], #0x10]\n"
"add %x[out_ptr], %x[out_ptr], #0x20\n"
"12:" // Odds skip
: [out_ptr] "+&r" (out_ptr), [width] "+&r" (width)
: [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset)
- : "cc", "memory", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block4_s8_s8_summing.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block4_s8_s8_summing.hpp
index 2db54126c0..dfec94c952 100644
--- a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block4_s8_s8_summing.hpp
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block4_s8_s8_summing.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef __aarch64__
@@ -31,41 +31,40 @@ void interleave_block<8, 4, VLType::None, true>(
)
{
__asm__ __volatile__(
- "ldr x28, [%x[in], #0x0]\n"
- "ldr x27, [%x[in], #0x8]\n"
- "cmp %x[height], #0x8\n"
- "mov x20, #0x0\n"
- "ldr x26, [%x[in], #0x10]\n"
- "ldr x25, [%x[in], #0x18]\n"
- "movi v2.8h, #0x0\n"
"movi v1.8h, #0x0\n"
- "ldr x24, [%x[in], #0x20]\n"
- "ldr x23, [%x[in], #0x28]\n"
- "movi v0.4s, #0x0\n"
+ "ldr x27, [%x[in], #0x0]\n"
+ "mov x19, #0x0\n"
+ "movi v0.8h, #0x0\n"
+ "ldr x26, [%x[in], #0x8]\n"
+ "cmp %x[height], #0x8\n"
"movi v31.4s, #0x0\n"
- "ldr x22, [%x[in], #0x30]\n"
- "ldr x21, [%x[in], #0x38]\n"
- "add x28, x28, %x[row_offset]\n"
+ "ldr x25, [%x[in], #0x10]\n"
"add x27, x27, %x[row_offset]\n"
+ "movi v30.4s, #0x0\n"
+ "ldr x24, [%x[in], #0x18]\n"
+ "ldr x23, [%x[in], #0x20]\n"
"add x26, x26, %x[row_offset]\n"
+ "ldr x22, [%x[in], #0x28]\n"
"add x25, x25, %x[row_offset]\n"
+ "ldr x21, [%x[in], #0x30]\n"
"add x24, x24, %x[row_offset]\n"
+ "ldr x20, [%x[in], #0x38]\n"
"add x23, x23, %x[row_offset]\n"
"add x22, x22, %x[row_offset]\n"
"add x21, x21, %x[row_offset]\n"
+ "add x20, x20, %x[row_offset]\n"
"beq 1f\n"
+ "mov x20, x27\n"
"cmp %x[height], #0x2\n"
- "csel x27, x27, x28, GE\n"
- "csel x26, x26, x28, GT\n"
+ "csel x26, x26, x27, GE\n"
+ "csel x25, x25, x27, GT\n"
"cmp %x[height], #0x4\n"
- "csel x25, x25, x28, GE\n"
- "csel x24, x24, x28, GT\n"
+ "csel x24, x24, x27, GE\n"
+ "csel x23, x23, x27, GT\n"
"cmp %x[height], #0x6\n"
- "mov x21, x28\n"
- "csel x23, x23, x28, GE\n"
- "csel x22, x22, x28, GT\n"
+ "csel x22, x22, x27, GE\n"
+ "csel x21, x21, x27, GT\n"
"1:" // no_pointer_adj
- "prfm pldl1keep, [x28, #0x0]\n"
"prfm pldl1keep, [x27, #0x0]\n"
"prfm pldl1keep, [x26, #0x0]\n"
"prfm pldl1keep, [x25, #0x0]\n"
@@ -73,7 +72,7 @@ void interleave_block<8, 4, VLType::None, true>(
"prfm pldl1keep, [x23, #0x0]\n"
"prfm pldl1keep, [x22, #0x0]\n"
"prfm pldl1keep, [x21, #0x0]\n"
- "prfm pldl1keep, [x28, #0x40]\n"
+ "prfm pldl1keep, [x20, #0x0]\n"
"prfm pldl1keep, [x27, #0x40]\n"
"prfm pldl1keep, [x26, #0x40]\n"
"prfm pldl1keep, [x25, #0x40]\n"
@@ -81,70 +80,71 @@ void interleave_block<8, 4, VLType::None, true>(
"prfm pldl1keep, [x23, #0x40]\n"
"prfm pldl1keep, [x22, #0x40]\n"
"prfm pldl1keep, [x21, #0x40]\n"
+ "prfm pldl1keep, [x20, #0x40]\n"
"cbnz %w[first], 2f\n"
"sub %x[out_ptr], %x[out_ptr], #0x20\n"
- "ld1 { v0.4s }, [%x[out_ptr]]\n"
- "ldr q31, [%x[out_ptr], #0x10]\n"
+ "ld1 { v31.4s }, [%x[out_ptr]]\n"
+ "ldr q30, [%x[out_ptr], #0x10]\n"
"2:" // first_pass
"cmp %x[width], #0x10\n"
"blt 5f\n"
"3:" // Main loop head
- "cmp x20, #0x1e\n"
+ "cmp x19, #0x1e\n"
"ble 4f\n"
- "sadalp v0.4s, v2.8h\n"
- "movi v2.8h, #0x0\n"
- "mov x20, #0x0\n"
"sadalp v31.4s, v1.8h\n"
"movi v1.8h, #0x0\n"
+ "sadalp v30.4s, v0.8h\n"
+ "movi v0.8h, #0x0\n"
+ "mov x19, #0x0\n"
"4:" // no_accumulate_16
- "ldr q30, [x28], #0x10\n"
- "ldr q29, [x27], #0x10\n"
+ "ldr q28, [x27], #0x10\n"
+ "add x19, x19, #0x1\n"
+ "ldr q29, [x26], #0x10\n"
"subs %x[width], %x[width], #0x10\n"
+ "ldr q25, [x25], #0x10\n"
+ "zip1 v22.4s, v28.4s, v25.4s\n"
+ "ldr q21, [x24], #0x10\n"
"cmp %x[width], #0x10\n"
- "ldr q28, [x26], #0x10\n"
- "ldr q27, [x25], #0x10\n"
- "zip1 v22.4s, v30.4s, v28.4s\n"
- "zip1 v21.4s, v29.4s, v27.4s\n"
- "ldr q20, [x24], #0x10\n"
- "ldr q26, [x23], #0x10\n"
- "zip1 v25.4s, v22.4s, v21.4s\n"
- "sadalp v2.8h, v25.16b\n"
- "ldr q19, [x22], #0x10\n"
- "ldr q18, [x21], #0x10\n"
- "zip1 v17.4s, v20.4s, v19.4s\n"
- "zip1 v16.4s, v26.4s, v18.4s\n"
- "zip1 v24.4s, v17.4s, v16.4s\n"
- "sadalp v1.8h, v24.16b\n"
- "prfm pldl1keep, [x28, #0x70]\n"
+ "zip2 v28.4s, v28.4s, v25.4s\n"
+ "ldr q27, [x23], #0x10\n"
+ "ldr q26, [x22], #0x10\n"
+ "zip1 v20.4s, v29.4s, v21.4s\n"
+ "ldr q19, [x21], #0x10\n"
+ "zip2 v25.4s, v29.4s, v21.4s\n"
+ "ldr q24, [x20], #0x10\n"
+ "zip1 v23.4s, v22.4s, v20.4s\n"
"prfm pldl1keep, [x27, #0x70]\n"
- "zip2 v23.4s, v22.4s, v21.4s\n"
- "zip2 v22.4s, v17.4s, v16.4s\n"
+ "sadalp v1.8h, v23.16b\n"
+ "zip2 v22.4s, v22.4s, v20.4s\n"
"prfm pldl1keep, [x26, #0x70]\n"
+ "sadalp v1.8h, v22.16b\n"
+ "zip1 v18.4s, v27.4s, v19.4s\n"
"prfm pldl1keep, [x25, #0x70]\n"
- "zip2 v21.4s, v30.4s, v28.4s\n"
- "zip2 v17.4s, v29.4s, v27.4s\n"
+ "zip1 v16.4s, v26.4s, v24.4s\n"
"prfm pldl1keep, [x24, #0x70]\n"
+ "zip1 v21.4s, v28.4s, v25.4s\n"
"prfm pldl1keep, [x23, #0x70]\n"
- "zip2 v20.4s, v20.4s, v19.4s\n"
- "zip2 v16.4s, v26.4s, v18.4s\n"
+ "sadalp v1.8h, v21.16b\n"
+ "zip1 v17.4s, v18.4s, v16.4s\n"
"prfm pldl1keep, [x22, #0x70]\n"
+ "zip2 v20.4s, v18.4s, v16.4s\n"
"prfm pldl1keep, [x21, #0x70]\n"
- "sadalp v2.8h, v23.16b\n"
- "sadalp v1.8h, v22.16b\n"
- "str q25, [%x[out_ptr], #0x0]\n"
- "add x20, x20, #0x1\n"
- "zip1 v19.4s, v21.4s, v17.4s\n"
- "zip1 v18.4s, v20.4s, v16.4s\n"
- "str q24, [%x[out_ptr], #0x10]\n"
- "sadalp v2.8h, v19.16b\n"
- "sadalp v1.8h, v18.16b\n"
- "str q23, [%x[out_ptr], #0x20]\n"
- "zip2 v17.4s, v21.4s, v17.4s\n"
- "zip2 v16.4s, v20.4s, v16.4s\n"
- "str q22, [%x[out_ptr], #0x30]\n"
- "str q19, [%x[out_ptr], #0x40]\n"
- "sadalp v2.8h, v17.16b\n"
- "sadalp v1.8h, v16.16b\n"
+ "sadalp v0.8h, v17.16b\n"
+ "zip2 v19.4s, v27.4s, v19.4s\n"
+ "prfm pldl1keep, [x20, #0x70]\n"
+ "sadalp v0.8h, v20.16b\n"
+ "zip2 v16.4s, v26.4s, v24.4s\n"
+ "str q23, [%x[out_ptr], #0x0]\n"
+ "zip1 v18.4s, v19.4s, v16.4s\n"
+ "str q17, [%x[out_ptr], #0x10]\n"
+ "sadalp v0.8h, v18.16b\n"
+ "zip2 v17.4s, v28.4s, v25.4s\n"
+ "str q22, [%x[out_ptr], #0x20]\n"
+ "zip2 v16.4s, v19.4s, v16.4s\n"
+ "str q20, [%x[out_ptr], #0x30]\n"
+ "sadalp v1.8h, v17.16b\n"
+ "str q21, [%x[out_ptr], #0x40]\n"
+ "sadalp v0.8h, v16.16b\n"
"str q18, [%x[out_ptr], #0x50]\n"
"str q17, [%x[out_ptr], #0x60]\n"
"str q16, [%x[out_ptr], #0x70]\n"
@@ -153,216 +153,216 @@ void interleave_block<8, 4, VLType::None, true>(
"5:" // Main loop skip
"cbz %x[width], 14f\n"
"tbz %x[width], #3, 9f\n"
- "ldr d30, [x28], #0x8\n"
- "ldr d29, [x27], #0x8\n"
- "ldr d28, [x26], #0x8\n"
- "ldr d27, [x25], #0x8\n"
- "ldr d20, [x24], #0x8\n"
- "ldr d26, [x23], #0x8\n"
- "ldr d19, [x22], #0x8\n"
- "ldr d18, [x21], #0x8\n"
+ "ldr d28, [x27], #0x8\n"
+ "ldr d29, [x26], #0x8\n"
+ "ldr d25, [x25], #0x8\n"
+ "ldr d21, [x24], #0x8\n"
+ "ldr d27, [x23], #0x8\n"
+ "ldr d26, [x22], #0x8\n"
+ "ldr d19, [x21], #0x8\n"
+ "ldr d24, [x20], #0x8\n"
"tbz %x[width], #2, 7f\n"
- "ld1 { v30.s }[2], [x28], #0x4\n"
- "ld1 { v29.s }[2], [x27], #0x4\n"
- "ld1 { v28.s }[2], [x26], #0x4\n"
- "ld1 { v27.s }[2], [x25], #0x4\n"
- "ld1 { v20.s }[2], [x24], #0x4\n"
- "ld1 { v26.s }[2], [x23], #0x4\n"
- "ld1 { v19.s }[2], [x22], #0x4\n"
- "ld1 { v18.s }[2], [x21], #0x4\n"
+ "ld1 { v28.s }[2], [x27], #0x4\n"
+ "ld1 { v29.s }[2], [x26], #0x4\n"
+ "ld1 { v25.s }[2], [x25], #0x4\n"
+ "ld1 { v21.s }[2], [x24], #0x4\n"
+ "ld1 { v27.s }[2], [x23], #0x4\n"
+ "ld1 { v26.s }[2], [x22], #0x4\n"
+ "ld1 { v19.s }[2], [x21], #0x4\n"
+ "ld1 { v24.s }[2], [x20], #0x4\n"
"tbz %x[width], #1, 6f\n"
- "ld1 { v30.h }[6], [x28], #0x2\n"
- "ld1 { v29.h }[6], [x27], #0x2\n"
- "mov x20, #0x4\n"
- "ld1 { v28.h }[6], [x26], #0x2\n"
- "ld1 { v27.h }[6], [x25], #0x2\n"
- "ld1 { v20.h }[6], [x24], #0x2\n"
- "ld1 { v26.h }[6], [x23], #0x2\n"
- "ld1 { v19.h }[6], [x22], #0x2\n"
- "ld1 { v18.h }[6], [x21], #0x2\n"
+ "ld1 { v28.h }[6], [x27], #0x2\n"
+ "mov x19, #0x4\n"
+ "ld1 { v29.h }[6], [x26], #0x2\n"
+ "ld1 { v25.h }[6], [x25], #0x2\n"
+ "ld1 { v21.h }[6], [x24], #0x2\n"
+ "ld1 { v27.h }[6], [x23], #0x2\n"
+ "ld1 { v26.h }[6], [x22], #0x2\n"
+ "ld1 { v19.h }[6], [x21], #0x2\n"
+ "ld1 { v24.h }[6], [x20], #0x2\n"
"tbz %x[width], #0, 13f\n"
- "ld1 { v30.b }[14], [x28]\n"
- "ld1 { v29.b }[14], [x27]\n"
- "ld1 { v28.b }[14], [x26]\n"
- "ld1 { v27.b }[14], [x25]\n"
- "ld1 { v20.b }[14], [x24]\n"
- "ld1 { v26.b }[14], [x23]\n"
- "ld1 { v19.b }[14], [x22]\n"
- "ld1 { v18.b }[14], [x21]\n"
+ "ld1 { v28.b }[14], [x27]\n"
+ "ld1 { v29.b }[14], [x26]\n"
+ "ld1 { v25.b }[14], [x25]\n"
+ "ld1 { v21.b }[14], [x24]\n"
+ "ld1 { v27.b }[14], [x23]\n"
+ "ld1 { v26.b }[14], [x22]\n"
+ "ld1 { v19.b }[14], [x21]\n"
+ "ld1 { v24.b }[14], [x20]\n"
"b 13f\n"
"6:" // odd_loads_1_12
- "mov x20, #0x3\n"
+ "mov x19, #0x3\n"
"tbz %x[width], #0, 13f\n"
- "ld1 { v30.b }[12], [x28]\n"
- "ld1 { v29.b }[12], [x27]\n"
- "mov x20, #0x4\n"
- "ld1 { v28.b }[12], [x26]\n"
- "ld1 { v27.b }[12], [x25]\n"
- "ld1 { v20.b }[12], [x24]\n"
- "ld1 { v26.b }[12], [x23]\n"
- "ld1 { v19.b }[12], [x22]\n"
- "ld1 { v18.b }[12], [x21]\n"
+ "ld1 { v28.b }[12], [x27]\n"
+ "ld1 { v29.b }[12], [x26]\n"
+ "mov x19, #0x4\n"
+ "ld1 { v25.b }[12], [x25]\n"
+ "ld1 { v21.b }[12], [x24]\n"
+ "ld1 { v27.b }[12], [x23]\n"
+ "ld1 { v26.b }[12], [x22]\n"
+ "ld1 { v19.b }[12], [x21]\n"
+ "ld1 { v24.b }[12], [x20]\n"
"b 13f\n"
"7:" // odd_loads_2_8
"tbz %x[width], #1, 8f\n"
- "ld1 { v30.h }[4], [x28], #0x2\n"
- "ld1 { v29.h }[4], [x27], #0x2\n"
- "mov x20, #0x3\n"
- "ld1 { v28.h }[4], [x26], #0x2\n"
- "ld1 { v27.h }[4], [x25], #0x2\n"
- "ld1 { v20.h }[4], [x24], #0x2\n"
- "ld1 { v26.h }[4], [x23], #0x2\n"
- "ld1 { v19.h }[4], [x22], #0x2\n"
- "ld1 { v18.h }[4], [x21], #0x2\n"
+ "ld1 { v28.h }[4], [x27], #0x2\n"
+ "ld1 { v29.h }[4], [x26], #0x2\n"
+ "mov x19, #0x3\n"
+ "ld1 { v25.h }[4], [x25], #0x2\n"
+ "ld1 { v21.h }[4], [x24], #0x2\n"
+ "ld1 { v27.h }[4], [x23], #0x2\n"
+ "ld1 { v26.h }[4], [x22], #0x2\n"
+ "ld1 { v19.h }[4], [x21], #0x2\n"
+ "ld1 { v24.h }[4], [x20], #0x2\n"
"tbz %x[width], #0, 13f\n"
- "ld1 { v30.b }[10], [x28]\n"
- "ld1 { v29.b }[10], [x27]\n"
- "ld1 { v28.b }[10], [x26]\n"
- "ld1 { v27.b }[10], [x25]\n"
- "ld1 { v20.b }[10], [x24]\n"
- "ld1 { v26.b }[10], [x23]\n"
- "ld1 { v19.b }[10], [x22]\n"
- "ld1 { v18.b }[10], [x21]\n"
+ "ld1 { v28.b }[10], [x27]\n"
+ "ld1 { v29.b }[10], [x26]\n"
+ "ld1 { v25.b }[10], [x25]\n"
+ "ld1 { v21.b }[10], [x24]\n"
+ "ld1 { v27.b }[10], [x23]\n"
+ "ld1 { v26.b }[10], [x22]\n"
+ "ld1 { v19.b }[10], [x21]\n"
+ "ld1 { v24.b }[10], [x20]\n"
"b 13f\n"
"8:" // odd_loads_1_8
- "mov x20, #0x2\n"
+ "mov x19, #0x2\n"
"tbz %x[width], #0, 13f\n"
- "ld1 { v30.b }[8], [x28]\n"
- "ld1 { v29.b }[8], [x27]\n"
- "mov x20, #0x3\n"
- "ld1 { v28.b }[8], [x26]\n"
- "ld1 { v27.b }[8], [x25]\n"
- "ld1 { v20.b }[8], [x24]\n"
- "ld1 { v26.b }[8], [x23]\n"
- "ld1 { v19.b }[8], [x22]\n"
- "ld1 { v18.b }[8], [x21]\n"
+ "ld1 { v28.b }[8], [x27]\n"
+ "ld1 { v29.b }[8], [x26]\n"
+ "mov x19, #0x3\n"
+ "ld1 { v25.b }[8], [x25]\n"
+ "ld1 { v21.b }[8], [x24]\n"
+ "ld1 { v27.b }[8], [x23]\n"
+ "ld1 { v26.b }[8], [x22]\n"
+ "ld1 { v19.b }[8], [x21]\n"
+ "ld1 { v24.b }[8], [x20]\n"
"b 13f\n"
"9:" // odd_loads_4_0
"tbz %x[width], #2, 11f\n"
- "ldr s30, [x28], #0x4\n"
- "ldr s29, [x27], #0x4\n"
- "ldr s28, [x26], #0x4\n"
- "ldr s27, [x25], #0x4\n"
- "ldr s20, [x24], #0x4\n"
- "ldr s26, [x23], #0x4\n"
- "ldr s19, [x22], #0x4\n"
- "ldr s18, [x21], #0x4\n"
+ "ldr s28, [x27], #0x4\n"
+ "ldr s29, [x26], #0x4\n"
+ "ldr s25, [x25], #0x4\n"
+ "ldr s21, [x24], #0x4\n"
+ "ldr s27, [x23], #0x4\n"
+ "ldr s26, [x22], #0x4\n"
+ "ldr s19, [x21], #0x4\n"
+ "ldr s24, [x20], #0x4\n"
"tbz %x[width], #1, 10f\n"
- "ld1 { v30.h }[2], [x28], #0x2\n"
- "ld1 { v29.h }[2], [x27], #0x2\n"
- "mov x20, #0x2\n"
- "ld1 { v28.h }[2], [x26], #0x2\n"
- "ld1 { v27.h }[2], [x25], #0x2\n"
- "ld1 { v20.h }[2], [x24], #0x2\n"
- "ld1 { v26.h }[2], [x23], #0x2\n"
- "ld1 { v19.h }[2], [x22], #0x2\n"
- "ld1 { v18.h }[2], [x21], #0x2\n"
+ "ld1 { v28.h }[2], [x27], #0x2\n"
+ "mov x19, #0x2\n"
+ "ld1 { v29.h }[2], [x26], #0x2\n"
+ "ld1 { v25.h }[2], [x25], #0x2\n"
+ "ld1 { v21.h }[2], [x24], #0x2\n"
+ "ld1 { v27.h }[2], [x23], #0x2\n"
+ "ld1 { v26.h }[2], [x22], #0x2\n"
+ "ld1 { v19.h }[2], [x21], #0x2\n"
+ "ld1 { v24.h }[2], [x20], #0x2\n"
"tbz %x[width], #0, 13f\n"
- "ld1 { v30.b }[6], [x28]\n"
- "ld1 { v29.b }[6], [x27]\n"
- "ld1 { v28.b }[6], [x26]\n"
- "ld1 { v27.b }[6], [x25]\n"
- "ld1 { v20.b }[6], [x24]\n"
- "ld1 { v26.b }[6], [x23]\n"
- "ld1 { v19.b }[6], [x22]\n"
- "ld1 { v18.b }[6], [x21]\n"
+ "ld1 { v28.b }[6], [x27]\n"
+ "ld1 { v29.b }[6], [x26]\n"
+ "ld1 { v25.b }[6], [x25]\n"
+ "ld1 { v21.b }[6], [x24]\n"
+ "ld1 { v27.b }[6], [x23]\n"
+ "ld1 { v26.b }[6], [x22]\n"
+ "ld1 { v19.b }[6], [x21]\n"
+ "ld1 { v24.b }[6], [x20]\n"
"b 13f\n"
"10:" // odd_loads_1_4
- "mov x20, #0x1\n"
+ "mov x19, #0x1\n"
"tbz %x[width], #0, 13f\n"
- "ld1 { v30.b }[4], [x28]\n"
- "ld1 { v29.b }[4], [x27]\n"
- "mov x20, #0x2\n"
- "ld1 { v28.b }[4], [x26]\n"
- "ld1 { v27.b }[4], [x25]\n"
- "ld1 { v20.b }[4], [x24]\n"
- "ld1 { v26.b }[4], [x23]\n"
- "ld1 { v19.b }[4], [x22]\n"
- "ld1 { v18.b }[4], [x21]\n"
+ "ld1 { v28.b }[4], [x27]\n"
+ "ld1 { v29.b }[4], [x26]\n"
+ "mov x19, #0x2\n"
+ "ld1 { v25.b }[4], [x25]\n"
+ "ld1 { v21.b }[4], [x24]\n"
+ "ld1 { v27.b }[4], [x23]\n"
+ "ld1 { v26.b }[4], [x22]\n"
+ "ld1 { v19.b }[4], [x21]\n"
+ "ld1 { v24.b }[4], [x20]\n"
"b 13f\n"
"11:" // odd_loads_2_0
"tbz %x[width], #1, 12f\n"
- "ldr h30, [x28], #0x2\n"
- "ldr h29, [x27], #0x2\n"
- "mov x20, #0x1\n"
- "ldr h28, [x26], #0x2\n"
- "ldr h27, [x25], #0x2\n"
- "ldr h20, [x24], #0x2\n"
- "ldr h26, [x23], #0x2\n"
- "ldr h19, [x22], #0x2\n"
- "ldr h18, [x21], #0x2\n"
+ "ldr h28, [x27], #0x2\n"
+ "ldr h29, [x26], #0x2\n"
+ "mov x19, #0x1\n"
+ "ldr h25, [x25], #0x2\n"
+ "ldr h21, [x24], #0x2\n"
+ "ldr h27, [x23], #0x2\n"
+ "ldr h26, [x22], #0x2\n"
+ "ldr h19, [x21], #0x2\n"
+ "ldr h24, [x20], #0x2\n"
"tbz %x[width], #0, 13f\n"
- "ld1 { v30.b }[2], [x28]\n"
- "ld1 { v29.b }[2], [x27]\n"
- "ld1 { v28.b }[2], [x26]\n"
- "ld1 { v27.b }[2], [x25]\n"
- "ld1 { v20.b }[2], [x24]\n"
- "ld1 { v26.b }[2], [x23]\n"
- "ld1 { v19.b }[2], [x22]\n"
- "ld1 { v18.b }[2], [x21]\n"
+ "ld1 { v28.b }[2], [x27]\n"
+ "ld1 { v29.b }[2], [x26]\n"
+ "ld1 { v25.b }[2], [x25]\n"
+ "ld1 { v21.b }[2], [x24]\n"
+ "ld1 { v27.b }[2], [x23]\n"
+ "ld1 { v26.b }[2], [x22]\n"
+ "ld1 { v19.b }[2], [x21]\n"
+ "ld1 { v24.b }[2], [x20]\n"
"b 13f\n"
"12:" // odd_loads_1_0
- "ldr b30, [x28, #0x0]\n"
- "ldr b29, [x27, #0x0]\n"
- "mov x20, #0x1\n"
- "ldr b28, [x26, #0x0]\n"
- "ldr b27, [x25, #0x0]\n"
- "ldr b20, [x24, #0x0]\n"
- "ldr b26, [x23, #0x0]\n"
- "ldr b19, [x22, #0x0]\n"
- "ldr b18, [x21, #0x0]\n"
+ "ldr b28, [x27, #0x0]\n"
+ "mov x19, #0x1\n"
+ "ldr b29, [x26, #0x0]\n"
+ "ldr b25, [x25, #0x0]\n"
+ "ldr b21, [x24, #0x0]\n"
+ "ldr b27, [x23, #0x0]\n"
+ "ldr b26, [x22, #0x0]\n"
+ "ldr b19, [x21, #0x0]\n"
+ "ldr b24, [x20, #0x0]\n"
"13:" // Odd load end
- "zip1 v22.4s, v30.4s, v28.4s\n"
- "zip1 v21.4s, v29.4s, v27.4s\n"
- "subs x20, x20, #0x1\n"
- "zip1 v17.4s, v20.4s, v19.4s\n"
- "zip1 v16.4s, v26.4s, v18.4s\n"
- "zip1 v25.4s, v22.4s, v21.4s\n"
- "zip1 v24.4s, v17.4s, v16.4s\n"
- "str q25, [%x[out_ptr], #0x0]\n"
- "sadalp v2.8h, v25.16b\n"
- "str q24, [%x[out_ptr], #0x10]\n"
- "sadalp v1.8h, v24.16b\n"
+ "zip1 v22.4s, v28.4s, v25.4s\n"
+ "subs x19, x19, #0x1\n"
+ "zip1 v20.4s, v29.4s, v21.4s\n"
+ "zip1 v23.4s, v22.4s, v20.4s\n"
+ "str q23, [%x[out_ptr], #0x0]\n"
+ "sadalp v1.8h, v23.16b\n"
+ "zip1 v18.4s, v27.4s, v19.4s\n"
+ "zip1 v16.4s, v26.4s, v24.4s\n"
+ "zip1 v17.4s, v18.4s, v16.4s\n"
+ "str q17, [%x[out_ptr], #0x10]\n"
+ "sadalp v0.8h, v17.16b\n"
"add %x[out_ptr], %x[out_ptr], #0x20\n"
"beq 14f\n"
- "zip2 v23.4s, v22.4s, v21.4s\n"
- "zip2 v22.4s, v17.4s, v16.4s\n"
- "subs x20, x20, #0x1\n"
- "str q23, [%x[out_ptr], #0x0]\n"
- "sadalp v2.8h, v23.16b\n"
- "str q22, [%x[out_ptr], #0x10]\n"
+ "zip2 v22.4s, v22.4s, v20.4s\n"
+ "str q22, [%x[out_ptr], #0x0]\n"
+ "zip2 v20.4s, v18.4s, v16.4s\n"
"sadalp v1.8h, v22.16b\n"
+ "str q20, [%x[out_ptr], #0x10]\n"
+ "subs x19, x19, #0x1\n"
"add %x[out_ptr], %x[out_ptr], #0x20\n"
+ "sadalp v0.8h, v20.16b\n"
"beq 14f\n"
- "zip2 v21.4s, v30.4s, v28.4s\n"
- "zip2 v17.4s, v29.4s, v27.4s\n"
- "subs x20, x20, #0x1\n"
- "zip2 v20.4s, v20.4s, v19.4s\n"
- "zip2 v16.4s, v26.4s, v18.4s\n"
- "zip1 v19.4s, v21.4s, v17.4s\n"
- "zip1 v18.4s, v20.4s, v16.4s\n"
- "str q19, [%x[out_ptr], #0x0]\n"
- "sadalp v2.8h, v19.16b\n"
+ "zip2 v28.4s, v28.4s, v25.4s\n"
+ "zip2 v25.4s, v29.4s, v21.4s\n"
+ "subs x19, x19, #0x1\n"
+ "zip1 v21.4s, v28.4s, v25.4s\n"
+ "str q21, [%x[out_ptr], #0x0]\n"
+ "sadalp v1.8h, v21.16b\n"
+ "zip2 v19.4s, v27.4s, v19.4s\n"
+ "zip2 v16.4s, v26.4s, v24.4s\n"
+ "zip1 v18.4s, v19.4s, v16.4s\n"
"str q18, [%x[out_ptr], #0x10]\n"
- "sadalp v1.8h, v18.16b\n"
+ "sadalp v0.8h, v18.16b\n"
"add %x[out_ptr], %x[out_ptr], #0x20\n"
"beq 14f\n"
- "zip2 v17.4s, v21.4s, v17.4s\n"
- "zip2 v16.4s, v20.4s, v16.4s\n"
+ "zip2 v17.4s, v28.4s, v25.4s\n"
"str q17, [%x[out_ptr], #0x0]\n"
- "sadalp v2.8h, v17.16b\n"
+ "zip2 v16.4s, v19.4s, v16.4s\n"
+ "sadalp v1.8h, v17.16b\n"
"str q16, [%x[out_ptr], #0x10]\n"
- "sadalp v1.8h, v16.16b\n"
"add %x[out_ptr], %x[out_ptr], #0x20\n"
+ "sadalp v0.8h, v16.16b\n"
"14:" // Odds skip
- "sadalp v0.4s, v2.8h\n"
"sadalp v31.4s, v1.8h\n"
- "str q0, [%x[out_ptr], #0x0]\n"
- "str q31, [%x[out_ptr], #0x10]\n"
+ "str q31, [%x[out_ptr], #0x0]\n"
+ "sadalp v30.4s, v0.8h\n"
+ "str q30, [%x[out_ptr], #0x10]\n"
"add %x[out_ptr], %x[out_ptr], #0x20\n"
: [out_ptr] "+&r" (out_ptr), [width] "+&r" (width)
: [first] "r" (first), [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset)
- : "cc", "memory", "v0", "v1", "v2", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block4_u8_u8_summing.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block4_u8_u8_summing.hpp
index 44a79c0f0a..1b94c7f1f1 100644
--- a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block4_u8_u8_summing.hpp
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block4_u8_u8_summing.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef __aarch64__
@@ -31,41 +31,40 @@ void interleave_block<8, 4, VLType::None, true>(
)
{
__asm__ __volatile__(
- "ldr x28, [%x[in], #0x0]\n"
- "ldr x27, [%x[in], #0x8]\n"
- "cmp %x[height], #0x8\n"
- "mov x20, #0x0\n"
- "ldr x26, [%x[in], #0x10]\n"
- "ldr x25, [%x[in], #0x18]\n"
- "movi v2.8h, #0x0\n"
"movi v1.8h, #0x0\n"
- "ldr x24, [%x[in], #0x20]\n"
- "ldr x23, [%x[in], #0x28]\n"
- "movi v0.4s, #0x0\n"
+ "ldr x27, [%x[in], #0x0]\n"
+ "mov x19, #0x0\n"
+ "movi v0.8h, #0x0\n"
+ "ldr x26, [%x[in], #0x8]\n"
+ "cmp %x[height], #0x8\n"
"movi v31.4s, #0x0\n"
- "ldr x22, [%x[in], #0x30]\n"
- "ldr x21, [%x[in], #0x38]\n"
- "add x28, x28, %x[row_offset]\n"
+ "ldr x25, [%x[in], #0x10]\n"
"add x27, x27, %x[row_offset]\n"
+ "movi v30.4s, #0x0\n"
+ "ldr x24, [%x[in], #0x18]\n"
+ "ldr x23, [%x[in], #0x20]\n"
"add x26, x26, %x[row_offset]\n"
+ "ldr x22, [%x[in], #0x28]\n"
"add x25, x25, %x[row_offset]\n"
+ "ldr x21, [%x[in], #0x30]\n"
"add x24, x24, %x[row_offset]\n"
+ "ldr x20, [%x[in], #0x38]\n"
"add x23, x23, %x[row_offset]\n"
"add x22, x22, %x[row_offset]\n"
"add x21, x21, %x[row_offset]\n"
+ "add x20, x20, %x[row_offset]\n"
"beq 1f\n"
+ "mov x20, x27\n"
"cmp %x[height], #0x2\n"
- "csel x27, x27, x28, GE\n"
- "csel x26, x26, x28, GT\n"
+ "csel x26, x26, x27, GE\n"
+ "csel x25, x25, x27, GT\n"
"cmp %x[height], #0x4\n"
- "csel x25, x25, x28, GE\n"
- "csel x24, x24, x28, GT\n"
+ "csel x24, x24, x27, GE\n"
+ "csel x23, x23, x27, GT\n"
"cmp %x[height], #0x6\n"
- "mov x21, x28\n"
- "csel x23, x23, x28, GE\n"
- "csel x22, x22, x28, GT\n"
+ "csel x22, x22, x27, GE\n"
+ "csel x21, x21, x27, GT\n"
"1:" // no_pointer_adj
- "prfm pldl1keep, [x28, #0x0]\n"
"prfm pldl1keep, [x27, #0x0]\n"
"prfm pldl1keep, [x26, #0x0]\n"
"prfm pldl1keep, [x25, #0x0]\n"
@@ -73,7 +72,7 @@ void interleave_block<8, 4, VLType::None, true>(
"prfm pldl1keep, [x23, #0x0]\n"
"prfm pldl1keep, [x22, #0x0]\n"
"prfm pldl1keep, [x21, #0x0]\n"
- "prfm pldl1keep, [x28, #0x40]\n"
+ "prfm pldl1keep, [x20, #0x0]\n"
"prfm pldl1keep, [x27, #0x40]\n"
"prfm pldl1keep, [x26, #0x40]\n"
"prfm pldl1keep, [x25, #0x40]\n"
@@ -81,70 +80,71 @@ void interleave_block<8, 4, VLType::None, true>(
"prfm pldl1keep, [x23, #0x40]\n"
"prfm pldl1keep, [x22, #0x40]\n"
"prfm pldl1keep, [x21, #0x40]\n"
+ "prfm pldl1keep, [x20, #0x40]\n"
"cbnz %w[first], 2f\n"
"sub %x[out_ptr], %x[out_ptr], #0x20\n"
- "ld1 { v0.4s }, [%x[out_ptr]]\n"
- "ldr q31, [%x[out_ptr], #0x10]\n"
+ "ld1 { v31.4s }, [%x[out_ptr]]\n"
+ "ldr q30, [%x[out_ptr], #0x10]\n"
"2:" // first_pass
"cmp %x[width], #0x10\n"
"blt 5f\n"
"3:" // Main loop head
- "cmp x20, #0x1e\n"
+ "cmp x19, #0x1e\n"
"ble 4f\n"
- "uadalp v0.4s, v2.8h\n"
- "movi v2.8h, #0x0\n"
- "mov x20, #0x0\n"
"uadalp v31.4s, v1.8h\n"
"movi v1.8h, #0x0\n"
+ "uadalp v30.4s, v0.8h\n"
+ "movi v0.8h, #0x0\n"
+ "mov x19, #0x0\n"
"4:" // no_accumulate_16
- "ldr q30, [x28], #0x10\n"
- "ldr q29, [x27], #0x10\n"
+ "ldr q28, [x27], #0x10\n"
+ "add x19, x19, #0x1\n"
+ "ldr q29, [x26], #0x10\n"
"subs %x[width], %x[width], #0x10\n"
+ "ldr q25, [x25], #0x10\n"
+ "zip1 v22.4s, v28.4s, v25.4s\n"
+ "ldr q21, [x24], #0x10\n"
"cmp %x[width], #0x10\n"
- "ldr q28, [x26], #0x10\n"
- "ldr q27, [x25], #0x10\n"
- "zip1 v22.4s, v30.4s, v28.4s\n"
- "zip1 v21.4s, v29.4s, v27.4s\n"
- "ldr q20, [x24], #0x10\n"
- "ldr q26, [x23], #0x10\n"
- "zip1 v25.4s, v22.4s, v21.4s\n"
- "uadalp v2.8h, v25.16b\n"
- "ldr q19, [x22], #0x10\n"
- "ldr q18, [x21], #0x10\n"
- "zip1 v17.4s, v20.4s, v19.4s\n"
- "zip1 v16.4s, v26.4s, v18.4s\n"
- "zip1 v24.4s, v17.4s, v16.4s\n"
- "uadalp v1.8h, v24.16b\n"
- "prfm pldl1keep, [x28, #0x70]\n"
+ "zip2 v28.4s, v28.4s, v25.4s\n"
+ "ldr q27, [x23], #0x10\n"
+ "ldr q26, [x22], #0x10\n"
+ "zip1 v20.4s, v29.4s, v21.4s\n"
+ "ldr q19, [x21], #0x10\n"
+ "zip2 v25.4s, v29.4s, v21.4s\n"
+ "ldr q24, [x20], #0x10\n"
+ "zip1 v23.4s, v22.4s, v20.4s\n"
"prfm pldl1keep, [x27, #0x70]\n"
- "zip2 v23.4s, v22.4s, v21.4s\n"
- "zip2 v22.4s, v17.4s, v16.4s\n"
+ "uadalp v1.8h, v23.16b\n"
+ "zip2 v22.4s, v22.4s, v20.4s\n"
"prfm pldl1keep, [x26, #0x70]\n"
+ "uadalp v1.8h, v22.16b\n"
+ "zip1 v18.4s, v27.4s, v19.4s\n"
"prfm pldl1keep, [x25, #0x70]\n"
- "zip2 v21.4s, v30.4s, v28.4s\n"
- "zip2 v17.4s, v29.4s, v27.4s\n"
+ "zip1 v16.4s, v26.4s, v24.4s\n"
"prfm pldl1keep, [x24, #0x70]\n"
+ "zip1 v21.4s, v28.4s, v25.4s\n"
"prfm pldl1keep, [x23, #0x70]\n"
- "zip2 v20.4s, v20.4s, v19.4s\n"
- "zip2 v16.4s, v26.4s, v18.4s\n"
+ "uadalp v1.8h, v21.16b\n"
+ "zip1 v17.4s, v18.4s, v16.4s\n"
"prfm pldl1keep, [x22, #0x70]\n"
+ "zip2 v20.4s, v18.4s, v16.4s\n"
"prfm pldl1keep, [x21, #0x70]\n"
- "uadalp v2.8h, v23.16b\n"
- "uadalp v1.8h, v22.16b\n"
- "str q25, [%x[out_ptr], #0x0]\n"
- "add x20, x20, #0x1\n"
- "zip1 v19.4s, v21.4s, v17.4s\n"
- "zip1 v18.4s, v20.4s, v16.4s\n"
- "str q24, [%x[out_ptr], #0x10]\n"
- "uadalp v2.8h, v19.16b\n"
- "uadalp v1.8h, v18.16b\n"
- "str q23, [%x[out_ptr], #0x20]\n"
- "zip2 v17.4s, v21.4s, v17.4s\n"
- "zip2 v16.4s, v20.4s, v16.4s\n"
- "str q22, [%x[out_ptr], #0x30]\n"
- "str q19, [%x[out_ptr], #0x40]\n"
- "uadalp v2.8h, v17.16b\n"
- "uadalp v1.8h, v16.16b\n"
+ "uadalp v0.8h, v17.16b\n"
+ "zip2 v19.4s, v27.4s, v19.4s\n"
+ "prfm pldl1keep, [x20, #0x70]\n"
+ "uadalp v0.8h, v20.16b\n"
+ "zip2 v16.4s, v26.4s, v24.4s\n"
+ "str q23, [%x[out_ptr], #0x0]\n"
+ "zip1 v18.4s, v19.4s, v16.4s\n"
+ "str q17, [%x[out_ptr], #0x10]\n"
+ "uadalp v0.8h, v18.16b\n"
+ "zip2 v17.4s, v28.4s, v25.4s\n"
+ "str q22, [%x[out_ptr], #0x20]\n"
+ "zip2 v16.4s, v19.4s, v16.4s\n"
+ "str q20, [%x[out_ptr], #0x30]\n"
+ "uadalp v1.8h, v17.16b\n"
+ "str q21, [%x[out_ptr], #0x40]\n"
+ "uadalp v0.8h, v16.16b\n"
"str q18, [%x[out_ptr], #0x50]\n"
"str q17, [%x[out_ptr], #0x60]\n"
"str q16, [%x[out_ptr], #0x70]\n"
@@ -153,216 +153,216 @@ void interleave_block<8, 4, VLType::None, true>(
"5:" // Main loop skip
"cbz %x[width], 14f\n"
"tbz %x[width], #3, 9f\n"
- "ldr d30, [x28], #0x8\n"
- "ldr d29, [x27], #0x8\n"
- "ldr d28, [x26], #0x8\n"
- "ldr d27, [x25], #0x8\n"
- "ldr d20, [x24], #0x8\n"
- "ldr d26, [x23], #0x8\n"
- "ldr d19, [x22], #0x8\n"
- "ldr d18, [x21], #0x8\n"
+ "ldr d28, [x27], #0x8\n"
+ "ldr d29, [x26], #0x8\n"
+ "ldr d25, [x25], #0x8\n"
+ "ldr d21, [x24], #0x8\n"
+ "ldr d27, [x23], #0x8\n"
+ "ldr d26, [x22], #0x8\n"
+ "ldr d19, [x21], #0x8\n"
+ "ldr d24, [x20], #0x8\n"
"tbz %x[width], #2, 7f\n"
- "ld1 { v30.s }[2], [x28], #0x4\n"
- "ld1 { v29.s }[2], [x27], #0x4\n"
- "ld1 { v28.s }[2], [x26], #0x4\n"
- "ld1 { v27.s }[2], [x25], #0x4\n"
- "ld1 { v20.s }[2], [x24], #0x4\n"
- "ld1 { v26.s }[2], [x23], #0x4\n"
- "ld1 { v19.s }[2], [x22], #0x4\n"
- "ld1 { v18.s }[2], [x21], #0x4\n"
+ "ld1 { v28.s }[2], [x27], #0x4\n"
+ "ld1 { v29.s }[2], [x26], #0x4\n"
+ "ld1 { v25.s }[2], [x25], #0x4\n"
+ "ld1 { v21.s }[2], [x24], #0x4\n"
+ "ld1 { v27.s }[2], [x23], #0x4\n"
+ "ld1 { v26.s }[2], [x22], #0x4\n"
+ "ld1 { v19.s }[2], [x21], #0x4\n"
+ "ld1 { v24.s }[2], [x20], #0x4\n"
"tbz %x[width], #1, 6f\n"
- "ld1 { v30.h }[6], [x28], #0x2\n"
- "ld1 { v29.h }[6], [x27], #0x2\n"
- "mov x20, #0x4\n"
- "ld1 { v28.h }[6], [x26], #0x2\n"
- "ld1 { v27.h }[6], [x25], #0x2\n"
- "ld1 { v20.h }[6], [x24], #0x2\n"
- "ld1 { v26.h }[6], [x23], #0x2\n"
- "ld1 { v19.h }[6], [x22], #0x2\n"
- "ld1 { v18.h }[6], [x21], #0x2\n"
+ "ld1 { v28.h }[6], [x27], #0x2\n"
+ "mov x19, #0x4\n"
+ "ld1 { v29.h }[6], [x26], #0x2\n"
+ "ld1 { v25.h }[6], [x25], #0x2\n"
+ "ld1 { v21.h }[6], [x24], #0x2\n"
+ "ld1 { v27.h }[6], [x23], #0x2\n"
+ "ld1 { v26.h }[6], [x22], #0x2\n"
+ "ld1 { v19.h }[6], [x21], #0x2\n"
+ "ld1 { v24.h }[6], [x20], #0x2\n"
"tbz %x[width], #0, 13f\n"
- "ld1 { v30.b }[14], [x28]\n"
- "ld1 { v29.b }[14], [x27]\n"
- "ld1 { v28.b }[14], [x26]\n"
- "ld1 { v27.b }[14], [x25]\n"
- "ld1 { v20.b }[14], [x24]\n"
- "ld1 { v26.b }[14], [x23]\n"
- "ld1 { v19.b }[14], [x22]\n"
- "ld1 { v18.b }[14], [x21]\n"
+ "ld1 { v28.b }[14], [x27]\n"
+ "ld1 { v29.b }[14], [x26]\n"
+ "ld1 { v25.b }[14], [x25]\n"
+ "ld1 { v21.b }[14], [x24]\n"
+ "ld1 { v27.b }[14], [x23]\n"
+ "ld1 { v26.b }[14], [x22]\n"
+ "ld1 { v19.b }[14], [x21]\n"
+ "ld1 { v24.b }[14], [x20]\n"
"b 13f\n"
"6:" // odd_loads_1_12
- "mov x20, #0x3\n"
+ "mov x19, #0x3\n"
"tbz %x[width], #0, 13f\n"
- "ld1 { v30.b }[12], [x28]\n"
- "ld1 { v29.b }[12], [x27]\n"
- "mov x20, #0x4\n"
- "ld1 { v28.b }[12], [x26]\n"
- "ld1 { v27.b }[12], [x25]\n"
- "ld1 { v20.b }[12], [x24]\n"
- "ld1 { v26.b }[12], [x23]\n"
- "ld1 { v19.b }[12], [x22]\n"
- "ld1 { v18.b }[12], [x21]\n"
+ "ld1 { v28.b }[12], [x27]\n"
+ "ld1 { v29.b }[12], [x26]\n"
+ "mov x19, #0x4\n"
+ "ld1 { v25.b }[12], [x25]\n"
+ "ld1 { v21.b }[12], [x24]\n"
+ "ld1 { v27.b }[12], [x23]\n"
+ "ld1 { v26.b }[12], [x22]\n"
+ "ld1 { v19.b }[12], [x21]\n"
+ "ld1 { v24.b }[12], [x20]\n"
"b 13f\n"
"7:" // odd_loads_2_8
"tbz %x[width], #1, 8f\n"
- "ld1 { v30.h }[4], [x28], #0x2\n"
- "ld1 { v29.h }[4], [x27], #0x2\n"
- "mov x20, #0x3\n"
- "ld1 { v28.h }[4], [x26], #0x2\n"
- "ld1 { v27.h }[4], [x25], #0x2\n"
- "ld1 { v20.h }[4], [x24], #0x2\n"
- "ld1 { v26.h }[4], [x23], #0x2\n"
- "ld1 { v19.h }[4], [x22], #0x2\n"
- "ld1 { v18.h }[4], [x21], #0x2\n"
+ "ld1 { v28.h }[4], [x27], #0x2\n"
+ "ld1 { v29.h }[4], [x26], #0x2\n"
+ "mov x19, #0x3\n"
+ "ld1 { v25.h }[4], [x25], #0x2\n"
+ "ld1 { v21.h }[4], [x24], #0x2\n"
+ "ld1 { v27.h }[4], [x23], #0x2\n"
+ "ld1 { v26.h }[4], [x22], #0x2\n"
+ "ld1 { v19.h }[4], [x21], #0x2\n"
+ "ld1 { v24.h }[4], [x20], #0x2\n"
"tbz %x[width], #0, 13f\n"
- "ld1 { v30.b }[10], [x28]\n"
- "ld1 { v29.b }[10], [x27]\n"
- "ld1 { v28.b }[10], [x26]\n"
- "ld1 { v27.b }[10], [x25]\n"
- "ld1 { v20.b }[10], [x24]\n"
- "ld1 { v26.b }[10], [x23]\n"
- "ld1 { v19.b }[10], [x22]\n"
- "ld1 { v18.b }[10], [x21]\n"
+ "ld1 { v28.b }[10], [x27]\n"
+ "ld1 { v29.b }[10], [x26]\n"
+ "ld1 { v25.b }[10], [x25]\n"
+ "ld1 { v21.b }[10], [x24]\n"
+ "ld1 { v27.b }[10], [x23]\n"
+ "ld1 { v26.b }[10], [x22]\n"
+ "ld1 { v19.b }[10], [x21]\n"
+ "ld1 { v24.b }[10], [x20]\n"
"b 13f\n"
"8:" // odd_loads_1_8
- "mov x20, #0x2\n"
+ "mov x19, #0x2\n"
"tbz %x[width], #0, 13f\n"
- "ld1 { v30.b }[8], [x28]\n"
- "ld1 { v29.b }[8], [x27]\n"
- "mov x20, #0x3\n"
- "ld1 { v28.b }[8], [x26]\n"
- "ld1 { v27.b }[8], [x25]\n"
- "ld1 { v20.b }[8], [x24]\n"
- "ld1 { v26.b }[8], [x23]\n"
- "ld1 { v19.b }[8], [x22]\n"
- "ld1 { v18.b }[8], [x21]\n"
+ "ld1 { v28.b }[8], [x27]\n"
+ "ld1 { v29.b }[8], [x26]\n"
+ "mov x19, #0x3\n"
+ "ld1 { v25.b }[8], [x25]\n"
+ "ld1 { v21.b }[8], [x24]\n"
+ "ld1 { v27.b }[8], [x23]\n"
+ "ld1 { v26.b }[8], [x22]\n"
+ "ld1 { v19.b }[8], [x21]\n"
+ "ld1 { v24.b }[8], [x20]\n"
"b 13f\n"
"9:" // odd_loads_4_0
"tbz %x[width], #2, 11f\n"
- "ldr s30, [x28], #0x4\n"
- "ldr s29, [x27], #0x4\n"
- "ldr s28, [x26], #0x4\n"
- "ldr s27, [x25], #0x4\n"
- "ldr s20, [x24], #0x4\n"
- "ldr s26, [x23], #0x4\n"
- "ldr s19, [x22], #0x4\n"
- "ldr s18, [x21], #0x4\n"
+ "ldr s28, [x27], #0x4\n"
+ "ldr s29, [x26], #0x4\n"
+ "ldr s25, [x25], #0x4\n"
+ "ldr s21, [x24], #0x4\n"
+ "ldr s27, [x23], #0x4\n"
+ "ldr s26, [x22], #0x4\n"
+ "ldr s19, [x21], #0x4\n"
+ "ldr s24, [x20], #0x4\n"
"tbz %x[width], #1, 10f\n"
- "ld1 { v30.h }[2], [x28], #0x2\n"
- "ld1 { v29.h }[2], [x27], #0x2\n"
- "mov x20, #0x2\n"
- "ld1 { v28.h }[2], [x26], #0x2\n"
- "ld1 { v27.h }[2], [x25], #0x2\n"
- "ld1 { v20.h }[2], [x24], #0x2\n"
- "ld1 { v26.h }[2], [x23], #0x2\n"
- "ld1 { v19.h }[2], [x22], #0x2\n"
- "ld1 { v18.h }[2], [x21], #0x2\n"
+ "ld1 { v28.h }[2], [x27], #0x2\n"
+ "mov x19, #0x2\n"
+ "ld1 { v29.h }[2], [x26], #0x2\n"
+ "ld1 { v25.h }[2], [x25], #0x2\n"
+ "ld1 { v21.h }[2], [x24], #0x2\n"
+ "ld1 { v27.h }[2], [x23], #0x2\n"
+ "ld1 { v26.h }[2], [x22], #0x2\n"
+ "ld1 { v19.h }[2], [x21], #0x2\n"
+ "ld1 { v24.h }[2], [x20], #0x2\n"
"tbz %x[width], #0, 13f\n"
- "ld1 { v30.b }[6], [x28]\n"
- "ld1 { v29.b }[6], [x27]\n"
- "ld1 { v28.b }[6], [x26]\n"
- "ld1 { v27.b }[6], [x25]\n"
- "ld1 { v20.b }[6], [x24]\n"
- "ld1 { v26.b }[6], [x23]\n"
- "ld1 { v19.b }[6], [x22]\n"
- "ld1 { v18.b }[6], [x21]\n"
+ "ld1 { v28.b }[6], [x27]\n"
+ "ld1 { v29.b }[6], [x26]\n"
+ "ld1 { v25.b }[6], [x25]\n"
+ "ld1 { v21.b }[6], [x24]\n"
+ "ld1 { v27.b }[6], [x23]\n"
+ "ld1 { v26.b }[6], [x22]\n"
+ "ld1 { v19.b }[6], [x21]\n"
+ "ld1 { v24.b }[6], [x20]\n"
"b 13f\n"
"10:" // odd_loads_1_4
- "mov x20, #0x1\n"
+ "mov x19, #0x1\n"
"tbz %x[width], #0, 13f\n"
- "ld1 { v30.b }[4], [x28]\n"
- "ld1 { v29.b }[4], [x27]\n"
- "mov x20, #0x2\n"
- "ld1 { v28.b }[4], [x26]\n"
- "ld1 { v27.b }[4], [x25]\n"
- "ld1 { v20.b }[4], [x24]\n"
- "ld1 { v26.b }[4], [x23]\n"
- "ld1 { v19.b }[4], [x22]\n"
- "ld1 { v18.b }[4], [x21]\n"
+ "ld1 { v28.b }[4], [x27]\n"
+ "ld1 { v29.b }[4], [x26]\n"
+ "mov x19, #0x2\n"
+ "ld1 { v25.b }[4], [x25]\n"
+ "ld1 { v21.b }[4], [x24]\n"
+ "ld1 { v27.b }[4], [x23]\n"
+ "ld1 { v26.b }[4], [x22]\n"
+ "ld1 { v19.b }[4], [x21]\n"
+ "ld1 { v24.b }[4], [x20]\n"
"b 13f\n"
"11:" // odd_loads_2_0
"tbz %x[width], #1, 12f\n"
- "ldr h30, [x28], #0x2\n"
- "ldr h29, [x27], #0x2\n"
- "mov x20, #0x1\n"
- "ldr h28, [x26], #0x2\n"
- "ldr h27, [x25], #0x2\n"
- "ldr h20, [x24], #0x2\n"
- "ldr h26, [x23], #0x2\n"
- "ldr h19, [x22], #0x2\n"
- "ldr h18, [x21], #0x2\n"
+ "ldr h28, [x27], #0x2\n"
+ "ldr h29, [x26], #0x2\n"
+ "mov x19, #0x1\n"
+ "ldr h25, [x25], #0x2\n"
+ "ldr h21, [x24], #0x2\n"
+ "ldr h27, [x23], #0x2\n"
+ "ldr h26, [x22], #0x2\n"
+ "ldr h19, [x21], #0x2\n"
+ "ldr h24, [x20], #0x2\n"
"tbz %x[width], #0, 13f\n"
- "ld1 { v30.b }[2], [x28]\n"
- "ld1 { v29.b }[2], [x27]\n"
- "ld1 { v28.b }[2], [x26]\n"
- "ld1 { v27.b }[2], [x25]\n"
- "ld1 { v20.b }[2], [x24]\n"
- "ld1 { v26.b }[2], [x23]\n"
- "ld1 { v19.b }[2], [x22]\n"
- "ld1 { v18.b }[2], [x21]\n"
+ "ld1 { v28.b }[2], [x27]\n"
+ "ld1 { v29.b }[2], [x26]\n"
+ "ld1 { v25.b }[2], [x25]\n"
+ "ld1 { v21.b }[2], [x24]\n"
+ "ld1 { v27.b }[2], [x23]\n"
+ "ld1 { v26.b }[2], [x22]\n"
+ "ld1 { v19.b }[2], [x21]\n"
+ "ld1 { v24.b }[2], [x20]\n"
"b 13f\n"
"12:" // odd_loads_1_0
- "ldr b30, [x28, #0x0]\n"
- "ldr b29, [x27, #0x0]\n"
- "mov x20, #0x1\n"
- "ldr b28, [x26, #0x0]\n"
- "ldr b27, [x25, #0x0]\n"
- "ldr b20, [x24, #0x0]\n"
- "ldr b26, [x23, #0x0]\n"
- "ldr b19, [x22, #0x0]\n"
- "ldr b18, [x21, #0x0]\n"
+ "ldr b28, [x27, #0x0]\n"
+ "mov x19, #0x1\n"
+ "ldr b29, [x26, #0x0]\n"
+ "ldr b25, [x25, #0x0]\n"
+ "ldr b21, [x24, #0x0]\n"
+ "ldr b27, [x23, #0x0]\n"
+ "ldr b26, [x22, #0x0]\n"
+ "ldr b19, [x21, #0x0]\n"
+ "ldr b24, [x20, #0x0]\n"
"13:" // Odd load end
- "zip1 v22.4s, v30.4s, v28.4s\n"
- "zip1 v21.4s, v29.4s, v27.4s\n"
- "subs x20, x20, #0x1\n"
- "zip1 v17.4s, v20.4s, v19.4s\n"
- "zip1 v16.4s, v26.4s, v18.4s\n"
- "zip1 v25.4s, v22.4s, v21.4s\n"
- "zip1 v24.4s, v17.4s, v16.4s\n"
- "str q25, [%x[out_ptr], #0x0]\n"
- "uadalp v2.8h, v25.16b\n"
- "str q24, [%x[out_ptr], #0x10]\n"
- "uadalp v1.8h, v24.16b\n"
+ "zip1 v22.4s, v28.4s, v25.4s\n"
+ "subs x19, x19, #0x1\n"
+ "zip1 v20.4s, v29.4s, v21.4s\n"
+ "zip1 v23.4s, v22.4s, v20.4s\n"
+ "str q23, [%x[out_ptr], #0x0]\n"
+ "uadalp v1.8h, v23.16b\n"
+ "zip1 v18.4s, v27.4s, v19.4s\n"
+ "zip1 v16.4s, v26.4s, v24.4s\n"
+ "zip1 v17.4s, v18.4s, v16.4s\n"
+ "str q17, [%x[out_ptr], #0x10]\n"
+ "uadalp v0.8h, v17.16b\n"
"add %x[out_ptr], %x[out_ptr], #0x20\n"
"beq 14f\n"
- "zip2 v23.4s, v22.4s, v21.4s\n"
- "zip2 v22.4s, v17.4s, v16.4s\n"
- "subs x20, x20, #0x1\n"
- "str q23, [%x[out_ptr], #0x0]\n"
- "uadalp v2.8h, v23.16b\n"
- "str q22, [%x[out_ptr], #0x10]\n"
+ "zip2 v22.4s, v22.4s, v20.4s\n"
+ "str q22, [%x[out_ptr], #0x0]\n"
+ "zip2 v20.4s, v18.4s, v16.4s\n"
"uadalp v1.8h, v22.16b\n"
+ "str q20, [%x[out_ptr], #0x10]\n"
+ "subs x19, x19, #0x1\n"
"add %x[out_ptr], %x[out_ptr], #0x20\n"
+ "uadalp v0.8h, v20.16b\n"
"beq 14f\n"
- "zip2 v21.4s, v30.4s, v28.4s\n"
- "zip2 v17.4s, v29.4s, v27.4s\n"
- "subs x20, x20, #0x1\n"
- "zip2 v20.4s, v20.4s, v19.4s\n"
- "zip2 v16.4s, v26.4s, v18.4s\n"
- "zip1 v19.4s, v21.4s, v17.4s\n"
- "zip1 v18.4s, v20.4s, v16.4s\n"
- "str q19, [%x[out_ptr], #0x0]\n"
- "uadalp v2.8h, v19.16b\n"
+ "zip2 v28.4s, v28.4s, v25.4s\n"
+ "zip2 v25.4s, v29.4s, v21.4s\n"
+ "subs x19, x19, #0x1\n"
+ "zip1 v21.4s, v28.4s, v25.4s\n"
+ "str q21, [%x[out_ptr], #0x0]\n"
+ "uadalp v1.8h, v21.16b\n"
+ "zip2 v19.4s, v27.4s, v19.4s\n"
+ "zip2 v16.4s, v26.4s, v24.4s\n"
+ "zip1 v18.4s, v19.4s, v16.4s\n"
"str q18, [%x[out_ptr], #0x10]\n"
- "uadalp v1.8h, v18.16b\n"
+ "uadalp v0.8h, v18.16b\n"
"add %x[out_ptr], %x[out_ptr], #0x20\n"
"beq 14f\n"
- "zip2 v17.4s, v21.4s, v17.4s\n"
- "zip2 v16.4s, v20.4s, v16.4s\n"
+ "zip2 v17.4s, v28.4s, v25.4s\n"
"str q17, [%x[out_ptr], #0x0]\n"
- "uadalp v2.8h, v17.16b\n"
+ "zip2 v16.4s, v19.4s, v16.4s\n"
+ "uadalp v1.8h, v17.16b\n"
"str q16, [%x[out_ptr], #0x10]\n"
- "uadalp v1.8h, v16.16b\n"
"add %x[out_ptr], %x[out_ptr], #0x20\n"
+ "uadalp v0.8h, v16.16b\n"
"14:" // Odds skip
- "uadalp v0.4s, v2.8h\n"
"uadalp v31.4s, v1.8h\n"
- "str q0, [%x[out_ptr], #0x0]\n"
- "str q31, [%x[out_ptr], #0x10]\n"
+ "str q31, [%x[out_ptr], #0x0]\n"
+ "uadalp v30.4s, v0.8h\n"
+ "str q30, [%x[out_ptr], #0x10]\n"
"add %x[out_ptr], %x[out_ptr], #0x20\n"
: [out_ptr] "+&r" (out_ptr), [width] "+&r" (width)
: [first] "r" (first), [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset)
- : "cc", "memory", "v0", "v1", "v2", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block8_s8_s8.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block8_s8_s8.hpp
index 4bfb36082e..1330593cbf 100644
--- a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block8_s8_s8.hpp
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block8_s8_s8.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef __aarch64__
@@ -31,45 +31,44 @@ void interleave_block<8, 8, VLType::None, false>(
)
{
__asm__ __volatile__(
- "ldr x28, [%x[in], #0x0]\n"
- "ldr x27, [%x[in], #0x8]\n"
+ "ldr x27, [%x[in], #0x0]\n"
"cmp %x[height], #0x8\n"
- "add x28, x28, %x[row_offset]\n"
- "ldr x26, [%x[in], #0x10]\n"
- "ldr x25, [%x[in], #0x18]\n"
+ "ldr x26, [%x[in], #0x8]\n"
"add x27, x27, %x[row_offset]\n"
+ "ldr x25, [%x[in], #0x10]\n"
+ "ldr x24, [%x[in], #0x18]\n"
"add x26, x26, %x[row_offset]\n"
- "ldr x24, [%x[in], #0x20]\n"
- "ldr x23, [%x[in], #0x28]\n"
+ "ldr x23, [%x[in], #0x20]\n"
"add x25, x25, %x[row_offset]\n"
+ "ldr x22, [%x[in], #0x28]\n"
+ "ldr x21, [%x[in], #0x30]\n"
"add x24, x24, %x[row_offset]\n"
- "ldr x22, [%x[in], #0x30]\n"
- "ldr x21, [%x[in], #0x38]\n"
+ "ldr x20, [%x[in], #0x38]\n"
"add x23, x23, %x[row_offset]\n"
"add x22, x22, %x[row_offset]\n"
"add x21, x21, %x[row_offset]\n"
+ "add x20, x20, %x[row_offset]\n"
"beq 1f\n"
+ "mov x20, x27\n"
"cmp %x[height], #0x2\n"
- "csel x27, x27, x28, GE\n"
- "csel x26, x26, x28, GT\n"
+ "csel x26, x26, x27, GE\n"
+ "csel x25, x25, x27, GT\n"
"cmp %x[height], #0x4\n"
- "csel x25, x25, x28, GE\n"
- "csel x24, x24, x28, GT\n"
+ "csel x24, x24, x27, GE\n"
+ "csel x23, x23, x27, GT\n"
"cmp %x[height], #0x6\n"
- "mov x21, x28\n"
- "csel x23, x23, x28, GE\n"
- "csel x22, x22, x28, GT\n"
+ "csel x22, x22, x27, GE\n"
+ "csel x21, x21, x27, GT\n"
"1:" // no_pointer_adj
- "cmp %x[width], #0x10\n"
- "prfm pldl1keep, [x28, #0x0]\n"
"prfm pldl1keep, [x27, #0x0]\n"
+ "cmp %x[width], #0x10\n"
"prfm pldl1keep, [x26, #0x0]\n"
"prfm pldl1keep, [x25, #0x0]\n"
"prfm pldl1keep, [x24, #0x0]\n"
"prfm pldl1keep, [x23, #0x0]\n"
"prfm pldl1keep, [x22, #0x0]\n"
"prfm pldl1keep, [x21, #0x0]\n"
- "prfm pldl1keep, [x28, #0x40]\n"
+ "prfm pldl1keep, [x20, #0x0]\n"
"prfm pldl1keep, [x27, #0x40]\n"
"prfm pldl1keep, [x26, #0x40]\n"
"prfm pldl1keep, [x25, #0x40]\n"
@@ -77,229 +76,230 @@ void interleave_block<8, 8, VLType::None, false>(
"prfm pldl1keep, [x23, #0x40]\n"
"prfm pldl1keep, [x22, #0x40]\n"
"prfm pldl1keep, [x21, #0x40]\n"
+ "prfm pldl1keep, [x20, #0x40]\n"
"blt 3f\n"
"2:" // Main loop head
- "ldr q26, [x28], #0x10\n"
- "ldr q21, [x27], #0x10\n"
+ "ldr q27, [x27], #0x10\n"
"subs %x[width], %x[width], #0x10\n"
+ "ldr q24, [x26], #0x10\n"
+ "zip1 v26.2d, v27.2d, v24.2d\n"
+ "ldr q25, [x25], #0x10\n"
"cmp %x[width], #0x10\n"
- "ldr q25, [x26], #0x10\n"
- "ldr q24, [x25], #0x10\n"
- "zip1 v16.2d, v26.2d, v21.2d\n"
- "zip1 v18.2d, v25.2d, v24.2d\n"
- "ldr q23, [x24], #0x10\n"
- "ldr q22, [x23], #0x10\n"
- "zip1 v17.2d, v23.2d, v22.2d\n"
- "zip2 v21.2d, v26.2d, v21.2d\n"
- "ldr q20, [x22], #0x10\n"
- "ldr q19, [x21], #0x10\n"
- "str q16, [%x[out_ptr], #0x0]\n"
- "zip1 v16.2d, v20.2d, v19.2d\n"
- "prfm pldl1keep, [x28, #0x70]\n"
+ "zip2 v24.2d, v27.2d, v24.2d\n"
+ "ldr q21, [x24], #0x10\n"
+ "ldr q23, [x23], #0x10\n"
+ "zip1 v22.2d, v25.2d, v21.2d\n"
+ "ldr q18, [x22], #0x10\n"
+ "zip2 v21.2d, v25.2d, v21.2d\n"
+ "ldr q20, [x21], #0x10\n"
+ "ldr q16, [x20], #0x10\n"
+ "zip1 v19.2d, v23.2d, v18.2d\n"
"prfm pldl1keep, [x27, #0x70]\n"
- "str q18, [%x[out_ptr], #0x10]\n"
- "zip2 v18.2d, v25.2d, v24.2d\n"
+ "zip2 v18.2d, v23.2d, v18.2d\n"
"prfm pldl1keep, [x26, #0x70]\n"
+ "zip1 v17.2d, v20.2d, v16.2d\n"
"prfm pldl1keep, [x25, #0x70]\n"
- "str q17, [%x[out_ptr], #0x20]\n"
- "zip2 v17.2d, v23.2d, v22.2d\n"
+ "zip2 v16.2d, v20.2d, v16.2d\n"
"prfm pldl1keep, [x24, #0x70]\n"
"prfm pldl1keep, [x23, #0x70]\n"
- "str q16, [%x[out_ptr], #0x30]\n"
- "zip2 v16.2d, v20.2d, v19.2d\n"
"prfm pldl1keep, [x22, #0x70]\n"
"prfm pldl1keep, [x21, #0x70]\n"
- "str q21, [%x[out_ptr], #0x40]\n"
- "str q18, [%x[out_ptr], #0x50]\n"
- "str q17, [%x[out_ptr], #0x60]\n"
+ "prfm pldl1keep, [x20, #0x70]\n"
+ "str q26, [%x[out_ptr], #0x0]\n"
+ "str q22, [%x[out_ptr], #0x10]\n"
+ "str q19, [%x[out_ptr], #0x20]\n"
+ "str q17, [%x[out_ptr], #0x30]\n"
+ "str q24, [%x[out_ptr], #0x40]\n"
+ "str q21, [%x[out_ptr], #0x50]\n"
+ "str q18, [%x[out_ptr], #0x60]\n"
"str q16, [%x[out_ptr], #0x70]\n"
"add %x[out_ptr], %x[out_ptr], #0x80\n"
"bge 2b\n"
"3:" // Main loop skip
"cbz %x[width], 12f\n"
"tbz %x[width], #3, 7f\n"
- "ldr d26, [x28], #0x8\n"
- "ldr d21, [x27], #0x8\n"
- "ldr d25, [x26], #0x8\n"
- "ldr d24, [x25], #0x8\n"
- "ldr d23, [x24], #0x8\n"
- "ldr d22, [x23], #0x8\n"
- "ldr d20, [x22], #0x8\n"
- "ldr d19, [x21], #0x8\n"
+ "ldr d27, [x27], #0x8\n"
+ "ldr d24, [x26], #0x8\n"
+ "ldr d25, [x25], #0x8\n"
+ "ldr d21, [x24], #0x8\n"
+ "ldr d23, [x23], #0x8\n"
+ "ldr d18, [x22], #0x8\n"
+ "ldr d20, [x21], #0x8\n"
+ "ldr d16, [x20], #0x8\n"
"tbz %x[width], #2, 5f\n"
- "ld1 { v26.s }[2], [x28], #0x4\n"
- "ld1 { v21.s }[2], [x27], #0x4\n"
- "ld1 { v25.s }[2], [x26], #0x4\n"
- "ld1 { v24.s }[2], [x25], #0x4\n"
- "ld1 { v23.s }[2], [x24], #0x4\n"
- "ld1 { v22.s }[2], [x23], #0x4\n"
- "ld1 { v20.s }[2], [x22], #0x4\n"
- "ld1 { v19.s }[2], [x21], #0x4\n"
+ "ld1 { v27.s }[2], [x27], #0x4\n"
+ "ld1 { v24.s }[2], [x26], #0x4\n"
+ "ld1 { v25.s }[2], [x25], #0x4\n"
+ "ld1 { v21.s }[2], [x24], #0x4\n"
+ "ld1 { v23.s }[2], [x23], #0x4\n"
+ "ld1 { v18.s }[2], [x22], #0x4\n"
+ "ld1 { v20.s }[2], [x21], #0x4\n"
+ "ld1 { v16.s }[2], [x20], #0x4\n"
"tbz %x[width], #1, 4f\n"
- "ld1 { v26.h }[6], [x28], #0x2\n"
- "ld1 { v21.h }[6], [x27], #0x2\n"
- "mov x20, #0x2\n"
- "ld1 { v25.h }[6], [x26], #0x2\n"
- "ld1 { v24.h }[6], [x25], #0x2\n"
- "ld1 { v23.h }[6], [x24], #0x2\n"
- "ld1 { v22.h }[6], [x23], #0x2\n"
- "ld1 { v20.h }[6], [x22], #0x2\n"
- "ld1 { v19.h }[6], [x21], #0x2\n"
+ "ld1 { v27.h }[6], [x27], #0x2\n"
+ "mov x19, #0x2\n"
+ "ld1 { v24.h }[6], [x26], #0x2\n"
+ "ld1 { v25.h }[6], [x25], #0x2\n"
+ "ld1 { v21.h }[6], [x24], #0x2\n"
+ "ld1 { v23.h }[6], [x23], #0x2\n"
+ "ld1 { v18.h }[6], [x22], #0x2\n"
+ "ld1 { v20.h }[6], [x21], #0x2\n"
+ "ld1 { v16.h }[6], [x20], #0x2\n"
"tbz %x[width], #0, 11f\n"
- "ld1 { v26.b }[14], [x28]\n"
- "ld1 { v21.b }[14], [x27]\n"
- "ld1 { v25.b }[14], [x26]\n"
- "ld1 { v24.b }[14], [x25]\n"
- "ld1 { v23.b }[14], [x24]\n"
- "ld1 { v22.b }[14], [x23]\n"
- "ld1 { v20.b }[14], [x22]\n"
- "ld1 { v19.b }[14], [x21]\n"
+ "ld1 { v27.b }[14], [x27]\n"
+ "ld1 { v24.b }[14], [x26]\n"
+ "ld1 { v25.b }[14], [x25]\n"
+ "ld1 { v21.b }[14], [x24]\n"
+ "ld1 { v23.b }[14], [x23]\n"
+ "ld1 { v18.b }[14], [x22]\n"
+ "ld1 { v20.b }[14], [x21]\n"
+ "ld1 { v16.b }[14], [x20]\n"
"b 11f\n"
"4:" // odd_loads_1_12
- "mov x20, #0x2\n"
+ "mov x19, #0x2\n"
"tbz %x[width], #0, 11f\n"
- "ld1 { v26.b }[12], [x28]\n"
- "ld1 { v21.b }[12], [x27]\n"
- "ld1 { v25.b }[12], [x26]\n"
- "ld1 { v24.b }[12], [x25]\n"
- "ld1 { v23.b }[12], [x24]\n"
- "ld1 { v22.b }[12], [x23]\n"
- "ld1 { v20.b }[12], [x22]\n"
- "ld1 { v19.b }[12], [x21]\n"
+ "ld1 { v27.b }[12], [x27]\n"
+ "ld1 { v24.b }[12], [x26]\n"
+ "ld1 { v25.b }[12], [x25]\n"
+ "ld1 { v21.b }[12], [x24]\n"
+ "ld1 { v23.b }[12], [x23]\n"
+ "ld1 { v18.b }[12], [x22]\n"
+ "ld1 { v20.b }[12], [x21]\n"
+ "ld1 { v16.b }[12], [x20]\n"
"b 11f\n"
"5:" // odd_loads_2_8
"tbz %x[width], #1, 6f\n"
- "ld1 { v26.h }[4], [x28], #0x2\n"
- "ld1 { v21.h }[4], [x27], #0x2\n"
- "mov x20, #0x2\n"
- "ld1 { v25.h }[4], [x26], #0x2\n"
- "ld1 { v24.h }[4], [x25], #0x2\n"
- "ld1 { v23.h }[4], [x24], #0x2\n"
- "ld1 { v22.h }[4], [x23], #0x2\n"
- "ld1 { v20.h }[4], [x22], #0x2\n"
- "ld1 { v19.h }[4], [x21], #0x2\n"
+ "ld1 { v27.h }[4], [x27], #0x2\n"
+ "ld1 { v24.h }[4], [x26], #0x2\n"
+ "mov x19, #0x2\n"
+ "ld1 { v25.h }[4], [x25], #0x2\n"
+ "ld1 { v21.h }[4], [x24], #0x2\n"
+ "ld1 { v23.h }[4], [x23], #0x2\n"
+ "ld1 { v18.h }[4], [x22], #0x2\n"
+ "ld1 { v20.h }[4], [x21], #0x2\n"
+ "ld1 { v16.h }[4], [x20], #0x2\n"
"tbz %x[width], #0, 11f\n"
- "ld1 { v26.b }[10], [x28]\n"
- "ld1 { v21.b }[10], [x27]\n"
- "ld1 { v25.b }[10], [x26]\n"
- "ld1 { v24.b }[10], [x25]\n"
- "ld1 { v23.b }[10], [x24]\n"
- "ld1 { v22.b }[10], [x23]\n"
- "ld1 { v20.b }[10], [x22]\n"
- "ld1 { v19.b }[10], [x21]\n"
+ "ld1 { v27.b }[10], [x27]\n"
+ "ld1 { v24.b }[10], [x26]\n"
+ "ld1 { v25.b }[10], [x25]\n"
+ "ld1 { v21.b }[10], [x24]\n"
+ "ld1 { v23.b }[10], [x23]\n"
+ "ld1 { v18.b }[10], [x22]\n"
+ "ld1 { v20.b }[10], [x21]\n"
+ "ld1 { v16.b }[10], [x20]\n"
"b 11f\n"
"6:" // odd_loads_1_8
- "mov x20, #0x1\n"
+ "mov x19, #0x1\n"
"tbz %x[width], #0, 11f\n"
- "ld1 { v26.b }[8], [x28]\n"
- "ld1 { v21.b }[8], [x27]\n"
- "mov x20, #0x2\n"
- "ld1 { v25.b }[8], [x26]\n"
- "ld1 { v24.b }[8], [x25]\n"
- "ld1 { v23.b }[8], [x24]\n"
- "ld1 { v22.b }[8], [x23]\n"
- "ld1 { v20.b }[8], [x22]\n"
- "ld1 { v19.b }[8], [x21]\n"
+ "ld1 { v27.b }[8], [x27]\n"
+ "ld1 { v24.b }[8], [x26]\n"
+ "mov x19, #0x2\n"
+ "ld1 { v25.b }[8], [x25]\n"
+ "ld1 { v21.b }[8], [x24]\n"
+ "ld1 { v23.b }[8], [x23]\n"
+ "ld1 { v18.b }[8], [x22]\n"
+ "ld1 { v20.b }[8], [x21]\n"
+ "ld1 { v16.b }[8], [x20]\n"
"b 11f\n"
"7:" // odd_loads_4_0
"tbz %x[width], #2, 9f\n"
- "ldr s26, [x28], #0x4\n"
- "ldr s21, [x27], #0x4\n"
- "ldr s25, [x26], #0x4\n"
- "ldr s24, [x25], #0x4\n"
- "ldr s23, [x24], #0x4\n"
- "ldr s22, [x23], #0x4\n"
- "ldr s20, [x22], #0x4\n"
- "ldr s19, [x21], #0x4\n"
+ "ldr s27, [x27], #0x4\n"
+ "ldr s24, [x26], #0x4\n"
+ "ldr s25, [x25], #0x4\n"
+ "ldr s21, [x24], #0x4\n"
+ "ldr s23, [x23], #0x4\n"
+ "ldr s18, [x22], #0x4\n"
+ "ldr s20, [x21], #0x4\n"
+ "ldr s16, [x20], #0x4\n"
"tbz %x[width], #1, 8f\n"
- "ld1 { v26.h }[2], [x28], #0x2\n"
- "ld1 { v21.h }[2], [x27], #0x2\n"
- "mov x20, #0x1\n"
- "ld1 { v25.h }[2], [x26], #0x2\n"
- "ld1 { v24.h }[2], [x25], #0x2\n"
- "ld1 { v23.h }[2], [x24], #0x2\n"
- "ld1 { v22.h }[2], [x23], #0x2\n"
- "ld1 { v20.h }[2], [x22], #0x2\n"
- "ld1 { v19.h }[2], [x21], #0x2\n"
+ "ld1 { v27.h }[2], [x27], #0x2\n"
+ "mov x19, #0x1\n"
+ "ld1 { v24.h }[2], [x26], #0x2\n"
+ "ld1 { v25.h }[2], [x25], #0x2\n"
+ "ld1 { v21.h }[2], [x24], #0x2\n"
+ "ld1 { v23.h }[2], [x23], #0x2\n"
+ "ld1 { v18.h }[2], [x22], #0x2\n"
+ "ld1 { v20.h }[2], [x21], #0x2\n"
+ "ld1 { v16.h }[2], [x20], #0x2\n"
"tbz %x[width], #0, 11f\n"
- "ld1 { v26.b }[6], [x28]\n"
- "ld1 { v21.b }[6], [x27]\n"
- "ld1 { v25.b }[6], [x26]\n"
- "ld1 { v24.b }[6], [x25]\n"
- "ld1 { v23.b }[6], [x24]\n"
- "ld1 { v22.b }[6], [x23]\n"
- "ld1 { v20.b }[6], [x22]\n"
- "ld1 { v19.b }[6], [x21]\n"
+ "ld1 { v27.b }[6], [x27]\n"
+ "ld1 { v24.b }[6], [x26]\n"
+ "ld1 { v25.b }[6], [x25]\n"
+ "ld1 { v21.b }[6], [x24]\n"
+ "ld1 { v23.b }[6], [x23]\n"
+ "ld1 { v18.b }[6], [x22]\n"
+ "ld1 { v20.b }[6], [x21]\n"
+ "ld1 { v16.b }[6], [x20]\n"
"b 11f\n"
"8:" // odd_loads_1_4
- "mov x20, #0x1\n"
+ "mov x19, #0x1\n"
"tbz %x[width], #0, 11f\n"
- "ld1 { v26.b }[4], [x28]\n"
- "ld1 { v21.b }[4], [x27]\n"
- "ld1 { v25.b }[4], [x26]\n"
- "ld1 { v24.b }[4], [x25]\n"
- "ld1 { v23.b }[4], [x24]\n"
- "ld1 { v22.b }[4], [x23]\n"
- "ld1 { v20.b }[4], [x22]\n"
- "ld1 { v19.b }[4], [x21]\n"
+ "ld1 { v27.b }[4], [x27]\n"
+ "ld1 { v24.b }[4], [x26]\n"
+ "ld1 { v25.b }[4], [x25]\n"
+ "ld1 { v21.b }[4], [x24]\n"
+ "ld1 { v23.b }[4], [x23]\n"
+ "ld1 { v18.b }[4], [x22]\n"
+ "ld1 { v20.b }[4], [x21]\n"
+ "ld1 { v16.b }[4], [x20]\n"
"b 11f\n"
"9:" // odd_loads_2_0
"tbz %x[width], #1, 10f\n"
- "ldr h26, [x28], #0x2\n"
- "ldr h21, [x27], #0x2\n"
- "mov x20, #0x1\n"
- "ldr h25, [x26], #0x2\n"
- "ldr h24, [x25], #0x2\n"
- "ldr h23, [x24], #0x2\n"
- "ldr h22, [x23], #0x2\n"
- "ldr h20, [x22], #0x2\n"
- "ldr h19, [x21], #0x2\n"
+ "ldr h27, [x27], #0x2\n"
+ "ldr h24, [x26], #0x2\n"
+ "mov x19, #0x1\n"
+ "ldr h25, [x25], #0x2\n"
+ "ldr h21, [x24], #0x2\n"
+ "ldr h23, [x23], #0x2\n"
+ "ldr h18, [x22], #0x2\n"
+ "ldr h20, [x21], #0x2\n"
+ "ldr h16, [x20], #0x2\n"
"tbz %x[width], #0, 11f\n"
- "ld1 { v26.b }[2], [x28]\n"
- "ld1 { v21.b }[2], [x27]\n"
- "ld1 { v25.b }[2], [x26]\n"
- "ld1 { v24.b }[2], [x25]\n"
- "ld1 { v23.b }[2], [x24]\n"
- "ld1 { v22.b }[2], [x23]\n"
- "ld1 { v20.b }[2], [x22]\n"
- "ld1 { v19.b }[2], [x21]\n"
+ "ld1 { v27.b }[2], [x27]\n"
+ "ld1 { v24.b }[2], [x26]\n"
+ "ld1 { v25.b }[2], [x25]\n"
+ "ld1 { v21.b }[2], [x24]\n"
+ "ld1 { v23.b }[2], [x23]\n"
+ "ld1 { v18.b }[2], [x22]\n"
+ "ld1 { v20.b }[2], [x21]\n"
+ "ld1 { v16.b }[2], [x20]\n"
"b 11f\n"
"10:" // odd_loads_1_0
- "ldr b26, [x28, #0x0]\n"
- "ldr b21, [x27, #0x0]\n"
- "mov x20, #0x1\n"
- "ldr b25, [x26, #0x0]\n"
- "ldr b24, [x25, #0x0]\n"
- "ldr b23, [x24, #0x0]\n"
- "ldr b22, [x23, #0x0]\n"
- "ldr b20, [x22, #0x0]\n"
- "ldr b19, [x21, #0x0]\n"
+ "ldr b27, [x27, #0x0]\n"
+ "mov x19, #0x1\n"
+ "ldr b24, [x26, #0x0]\n"
+ "ldr b25, [x25, #0x0]\n"
+ "ldr b21, [x24, #0x0]\n"
+ "ldr b23, [x23, #0x0]\n"
+ "ldr b18, [x22, #0x0]\n"
+ "ldr b20, [x21, #0x0]\n"
+ "ldr b16, [x20, #0x0]\n"
"11:" // Odd load end
- "subs x20, x20, #0x1\n"
- "zip1 v16.2d, v26.2d, v21.2d\n"
- "str q16, [%x[out_ptr], #0x0]\n"
- "zip1 v18.2d, v25.2d, v24.2d\n"
- "str q18, [%x[out_ptr], #0x10]\n"
- "zip1 v17.2d, v23.2d, v22.2d\n"
- "zip1 v16.2d, v20.2d, v19.2d\n"
- "str q17, [%x[out_ptr], #0x20]\n"
- "str q16, [%x[out_ptr], #0x30]\n"
+ "zip1 v26.2d, v27.2d, v24.2d\n"
+ "str q26, [%x[out_ptr], #0x0]\n"
+ "zip1 v22.2d, v25.2d, v21.2d\n"
+ "subs x19, x19, #0x1\n"
+ "zip1 v19.2d, v23.2d, v18.2d\n"
+ "str q22, [%x[out_ptr], #0x10]\n"
+ "zip1 v17.2d, v20.2d, v16.2d\n"
+ "str q19, [%x[out_ptr], #0x20]\n"
+ "str q17, [%x[out_ptr], #0x30]\n"
"add %x[out_ptr], %x[out_ptr], #0x40\n"
"beq 12f\n"
- "zip2 v21.2d, v26.2d, v21.2d\n"
- "str q21, [%x[out_ptr], #0x0]\n"
- "zip2 v18.2d, v25.2d, v24.2d\n"
- "str q18, [%x[out_ptr], #0x10]\n"
- "zip2 v17.2d, v23.2d, v22.2d\n"
- "zip2 v16.2d, v20.2d, v19.2d\n"
- "str q17, [%x[out_ptr], #0x20]\n"
+ "zip2 v24.2d, v27.2d, v24.2d\n"
+ "str q24, [%x[out_ptr], #0x0]\n"
+ "zip2 v21.2d, v25.2d, v21.2d\n"
+ "zip2 v18.2d, v23.2d, v18.2d\n"
+ "str q21, [%x[out_ptr], #0x10]\n"
+ "zip2 v16.2d, v20.2d, v16.2d\n"
+ "str q18, [%x[out_ptr], #0x20]\n"
"str q16, [%x[out_ptr], #0x30]\n"
"add %x[out_ptr], %x[out_ptr], #0x40\n"
"12:" // Odds skip
: [out_ptr] "+&r" (out_ptr), [width] "+&r" (width)
: [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset)
- : "cc", "memory", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block8_s8_s8_summing.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block8_s8_s8_summing.hpp
index c6ad2949f5..3550830fc3 100644
--- a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block8_s8_s8_summing.hpp
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block8_s8_s8_summing.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef __aarch64__
@@ -31,47 +31,46 @@ void interleave_block<8, 8, VLType::None, true>(
)
{
__asm__ __volatile__(
- "ldr x28, [%x[in], #0x0]\n"
- "ldr x27, [%x[in], #0x8]\n"
- "cmp %x[height], #0x8\n"
- "mov x20, #0x0\n"
- "ldr x26, [%x[in], #0x10]\n"
- "ldr x25, [%x[in], #0x18]\n"
"movi v5.8h, #0x0\n"
+ "ldr x27, [%x[in], #0x0]\n"
+ "mov x19, #0x0\n"
"movi v4.8h, #0x0\n"
- "ldr x24, [%x[in], #0x20]\n"
- "ldr x23, [%x[in], #0x28]\n"
+ "ldr x26, [%x[in], #0x8]\n"
+ "cmp %x[height], #0x8\n"
"movi v3.8h, #0x0\n"
+ "ldr x25, [%x[in], #0x10]\n"
+ "add x27, x27, %x[row_offset]\n"
"movi v2.8h, #0x0\n"
- "ldr x22, [%x[in], #0x30]\n"
- "ldr x21, [%x[in], #0x38]\n"
+ "ldr x24, [%x[in], #0x18]\n"
"movi v1.4s, #0x0\n"
- "movi v0.4s, #0x0\n"
- "movi v31.4s, #0x0\n"
- "movi v30.4s, #0x0\n"
- "add x28, x28, %x[row_offset]\n"
- "add x27, x27, %x[row_offset]\n"
+ "ldr x23, [%x[in], #0x20]\n"
"add x26, x26, %x[row_offset]\n"
+ "movi v0.4s, #0x0\n"
+ "ldr x22, [%x[in], #0x28]\n"
"add x25, x25, %x[row_offset]\n"
+ "movi v31.4s, #0x0\n"
+ "ldr x21, [%x[in], #0x30]\n"
"add x24, x24, %x[row_offset]\n"
+ "movi v30.4s, #0x0\n"
+ "ldr x20, [%x[in], #0x38]\n"
"add x23, x23, %x[row_offset]\n"
"add x22, x22, %x[row_offset]\n"
"add x21, x21, %x[row_offset]\n"
+ "add x20, x20, %x[row_offset]\n"
"beq 1f\n"
+ "mov x20, x27\n"
"cmp %x[height], #0x2\n"
- "csel x27, x27, x28, GE\n"
- "csel x26, x26, x28, GT\n"
+ "csel x26, x26, x27, GE\n"
+ "csel x25, x25, x27, GT\n"
"cmp %x[height], #0x4\n"
- "csel x25, x25, x28, GE\n"
- "csel x24, x24, x28, GT\n"
+ "csel x24, x24, x27, GE\n"
+ "csel x23, x23, x27, GT\n"
"cmp %x[height], #0x6\n"
- "mov x21, x28\n"
- "csel x23, x23, x28, GE\n"
- "csel x22, x22, x28, GT\n"
+ "csel x22, x22, x27, GE\n"
+ "csel x21, x21, x27, GT\n"
"1:" // no_pointer_adj
- "prfm pldl1keep, [x28, #0x0]\n"
- "prfm pldl1keep, [x27, #0x0]\n"
"movi v29.4s, #0x0\n"
+ "prfm pldl1keep, [x27, #0x0]\n"
"movi v28.4s, #0x0\n"
"prfm pldl1keep, [x26, #0x0]\n"
"prfm pldl1keep, [x25, #0x0]\n"
@@ -79,7 +78,7 @@ void interleave_block<8, 8, VLType::None, true>(
"prfm pldl1keep, [x23, #0x0]\n"
"prfm pldl1keep, [x22, #0x0]\n"
"prfm pldl1keep, [x21, #0x0]\n"
- "prfm pldl1keep, [x28, #0x40]\n"
+ "prfm pldl1keep, [x20, #0x0]\n"
"prfm pldl1keep, [x27, #0x40]\n"
"prfm pldl1keep, [x26, #0x40]\n"
"prfm pldl1keep, [x25, #0x40]\n"
@@ -87,6 +86,7 @@ void interleave_block<8, 8, VLType::None, true>(
"prfm pldl1keep, [x23, #0x40]\n"
"prfm pldl1keep, [x22, #0x40]\n"
"prfm pldl1keep, [x21, #0x40]\n"
+ "prfm pldl1keep, [x20, #0x40]\n"
"cbnz %w[first], 2f\n"
"sub %x[out_ptr], %x[out_ptr], #0x20\n"
"ld1 { v29.4s }, [%x[out_ptr]]\n"
@@ -95,266 +95,266 @@ void interleave_block<8, 8, VLType::None, true>(
"cmp %x[width], #0x10\n"
"blt 5f\n"
"3:" // Main loop head
- "cmp x20, #0x3e\n"
+ "cmp x19, #0x3e\n"
"ble 4f\n"
"sadalp v1.4s, v5.8h\n"
"movi v5.8h, #0x0\n"
- "mov x20, #0x0\n"
"sadalp v0.4s, v4.8h\n"
"movi v4.8h, #0x0\n"
"sadalp v31.4s, v3.8h\n"
"movi v3.8h, #0x0\n"
"sadalp v30.4s, v2.8h\n"
"movi v2.8h, #0x0\n"
+ "mov x19, #0x0\n"
"4:" // no_accumulate_16
- "ldr q27, [x28], #0x10\n"
- "ldr q19, [x27], #0x10\n"
- "zip1 v26.2d, v27.2d, v19.2d\n"
- "sadalp v5.8h, v26.16b\n"
- "ldr q25, [x26], #0x10\n"
- "ldr q18, [x25], #0x10\n"
- "zip1 v24.2d, v25.2d, v18.2d\n"
- "sadalp v4.8h, v24.16b\n"
- "ldr q23, [x24], #0x10\n"
- "ldr q17, [x23], #0x10\n"
- "zip1 v22.2d, v23.2d, v17.2d\n"
- "sadalp v3.8h, v22.16b\n"
- "ldr q21, [x22], #0x10\n"
- "ldr q16, [x21], #0x10\n"
- "zip1 v20.2d, v21.2d, v16.2d\n"
- "sadalp v2.8h, v20.16b\n"
- "zip2 v19.2d, v27.2d, v19.2d\n"
- "zip2 v18.2d, v25.2d, v18.2d\n"
+ "ldr q27, [x27], #0x10\n"
+ "add x19, x19, #0x1\n"
+ "ldr q24, [x26], #0x10\n"
+ "zip1 v26.2d, v27.2d, v24.2d\n"
+ "ldr q25, [x25], #0x10\n"
"subs %x[width], %x[width], #0x10\n"
+ "zip2 v24.2d, v27.2d, v24.2d\n"
+ "ldr q21, [x24], #0x10\n"
+ "sadalp v5.8h, v26.16b\n"
+ "zip1 v23.2d, v25.2d, v21.2d\n"
+ "ldr q22, [x23], #0x10\n"
"cmp %x[width], #0x10\n"
- "zip2 v17.2d, v23.2d, v17.2d\n"
- "zip2 v16.2d, v21.2d, v16.2d\n"
- "prfm pldl1keep, [x28, #0x70]\n"
+ "zip2 v21.2d, v25.2d, v21.2d\n"
+ "ldr q18, [x22], #0x10\n"
+ "sadalp v4.8h, v23.16b\n"
+ "zip1 v20.2d, v22.2d, v18.2d\n"
+ "ldr q19, [x21], #0x10\n"
+ "sadalp v5.8h, v24.16b\n"
+ "zip2 v18.2d, v22.2d, v18.2d\n"
+ "ldr q16, [x20], #0x10\n"
+ "sadalp v3.8h, v20.16b\n"
+ "zip1 v17.2d, v19.2d, v16.2d\n"
"prfm pldl1keep, [x27, #0x70]\n"
+ "sadalp v4.8h, v21.16b\n"
+ "zip2 v16.2d, v19.2d, v16.2d\n"
"prfm pldl1keep, [x26, #0x70]\n"
+ "sadalp v2.8h, v17.16b\n"
"prfm pldl1keep, [x25, #0x70]\n"
- "str q26, [%x[out_ptr], #0x0]\n"
- "sadalp v5.8h, v19.16b\n"
+ "sadalp v3.8h, v18.16b\n"
"prfm pldl1keep, [x24, #0x70]\n"
+ "sadalp v2.8h, v16.16b\n"
"prfm pldl1keep, [x23, #0x70]\n"
- "str q24, [%x[out_ptr], #0x10]\n"
- "sadalp v4.8h, v18.16b\n"
"prfm pldl1keep, [x22, #0x70]\n"
"prfm pldl1keep, [x21, #0x70]\n"
- "str q22, [%x[out_ptr], #0x20]\n"
- "sadalp v3.8h, v17.16b\n"
- "str q20, [%x[out_ptr], #0x30]\n"
- "sadalp v2.8h, v16.16b\n"
- "add x20, x20, #0x1\n"
- "str q19, [%x[out_ptr], #0x40]\n"
- "str q18, [%x[out_ptr], #0x50]\n"
- "str q17, [%x[out_ptr], #0x60]\n"
+ "prfm pldl1keep, [x20, #0x70]\n"
+ "str q26, [%x[out_ptr], #0x0]\n"
+ "str q23, [%x[out_ptr], #0x10]\n"
+ "str q20, [%x[out_ptr], #0x20]\n"
+ "str q17, [%x[out_ptr], #0x30]\n"
+ "str q24, [%x[out_ptr], #0x40]\n"
+ "str q21, [%x[out_ptr], #0x50]\n"
+ "str q18, [%x[out_ptr], #0x60]\n"
"str q16, [%x[out_ptr], #0x70]\n"
"add %x[out_ptr], %x[out_ptr], #0x80\n"
"bge 3b\n"
"5:" // Main loop skip
"cbz %x[width], 14f\n"
"tbz %x[width], #3, 9f\n"
- "ldr d27, [x28], #0x8\n"
- "ldr d19, [x27], #0x8\n"
- "ldr d25, [x26], #0x8\n"
- "ldr d18, [x25], #0x8\n"
- "ldr d23, [x24], #0x8\n"
- "ldr d17, [x23], #0x8\n"
- "ldr d21, [x22], #0x8\n"
- "ldr d16, [x21], #0x8\n"
+ "ldr d27, [x27], #0x8\n"
+ "ldr d24, [x26], #0x8\n"
+ "ldr d25, [x25], #0x8\n"
+ "ldr d21, [x24], #0x8\n"
+ "ldr d22, [x23], #0x8\n"
+ "ldr d18, [x22], #0x8\n"
+ "ldr d19, [x21], #0x8\n"
+ "ldr d16, [x20], #0x8\n"
"tbz %x[width], #2, 7f\n"
- "ld1 { v27.s }[2], [x28], #0x4\n"
- "ld1 { v19.s }[2], [x27], #0x4\n"
- "ld1 { v25.s }[2], [x26], #0x4\n"
- "ld1 { v18.s }[2], [x25], #0x4\n"
- "ld1 { v23.s }[2], [x24], #0x4\n"
- "ld1 { v17.s }[2], [x23], #0x4\n"
- "ld1 { v21.s }[2], [x22], #0x4\n"
- "ld1 { v16.s }[2], [x21], #0x4\n"
+ "ld1 { v27.s }[2], [x27], #0x4\n"
+ "ld1 { v24.s }[2], [x26], #0x4\n"
+ "ld1 { v25.s }[2], [x25], #0x4\n"
+ "ld1 { v21.s }[2], [x24], #0x4\n"
+ "ld1 { v22.s }[2], [x23], #0x4\n"
+ "ld1 { v18.s }[2], [x22], #0x4\n"
+ "ld1 { v19.s }[2], [x21], #0x4\n"
+ "ld1 { v16.s }[2], [x20], #0x4\n"
"tbz %x[width], #1, 6f\n"
- "ld1 { v27.h }[6], [x28], #0x2\n"
- "ld1 { v19.h }[6], [x27], #0x2\n"
- "mov x20, #0x2\n"
- "ld1 { v25.h }[6], [x26], #0x2\n"
- "ld1 { v18.h }[6], [x25], #0x2\n"
- "ld1 { v23.h }[6], [x24], #0x2\n"
- "ld1 { v17.h }[6], [x23], #0x2\n"
- "ld1 { v21.h }[6], [x22], #0x2\n"
- "ld1 { v16.h }[6], [x21], #0x2\n"
+ "ld1 { v27.h }[6], [x27], #0x2\n"
+ "mov x19, #0x2\n"
+ "ld1 { v24.h }[6], [x26], #0x2\n"
+ "ld1 { v25.h }[6], [x25], #0x2\n"
+ "ld1 { v21.h }[6], [x24], #0x2\n"
+ "ld1 { v22.h }[6], [x23], #0x2\n"
+ "ld1 { v18.h }[6], [x22], #0x2\n"
+ "ld1 { v19.h }[6], [x21], #0x2\n"
+ "ld1 { v16.h }[6], [x20], #0x2\n"
"tbz %x[width], #0, 13f\n"
- "ld1 { v27.b }[14], [x28]\n"
- "ld1 { v19.b }[14], [x27]\n"
- "ld1 { v25.b }[14], [x26]\n"
- "ld1 { v18.b }[14], [x25]\n"
- "ld1 { v23.b }[14], [x24]\n"
- "ld1 { v17.b }[14], [x23]\n"
- "ld1 { v21.b }[14], [x22]\n"
- "ld1 { v16.b }[14], [x21]\n"
+ "ld1 { v27.b }[14], [x27]\n"
+ "ld1 { v24.b }[14], [x26]\n"
+ "ld1 { v25.b }[14], [x25]\n"
+ "ld1 { v21.b }[14], [x24]\n"
+ "ld1 { v22.b }[14], [x23]\n"
+ "ld1 { v18.b }[14], [x22]\n"
+ "ld1 { v19.b }[14], [x21]\n"
+ "ld1 { v16.b }[14], [x20]\n"
"b 13f\n"
"6:" // odd_loads_1_12
- "mov x20, #0x2\n"
+ "mov x19, #0x2\n"
"tbz %x[width], #0, 13f\n"
- "ld1 { v27.b }[12], [x28]\n"
- "ld1 { v19.b }[12], [x27]\n"
- "ld1 { v25.b }[12], [x26]\n"
- "ld1 { v18.b }[12], [x25]\n"
- "ld1 { v23.b }[12], [x24]\n"
- "ld1 { v17.b }[12], [x23]\n"
- "ld1 { v21.b }[12], [x22]\n"
- "ld1 { v16.b }[12], [x21]\n"
+ "ld1 { v27.b }[12], [x27]\n"
+ "ld1 { v24.b }[12], [x26]\n"
+ "ld1 { v25.b }[12], [x25]\n"
+ "ld1 { v21.b }[12], [x24]\n"
+ "ld1 { v22.b }[12], [x23]\n"
+ "ld1 { v18.b }[12], [x22]\n"
+ "ld1 { v19.b }[12], [x21]\n"
+ "ld1 { v16.b }[12], [x20]\n"
"b 13f\n"
"7:" // odd_loads_2_8
"tbz %x[width], #1, 8f\n"
- "ld1 { v27.h }[4], [x28], #0x2\n"
- "ld1 { v19.h }[4], [x27], #0x2\n"
- "mov x20, #0x2\n"
- "ld1 { v25.h }[4], [x26], #0x2\n"
- "ld1 { v18.h }[4], [x25], #0x2\n"
- "ld1 { v23.h }[4], [x24], #0x2\n"
- "ld1 { v17.h }[4], [x23], #0x2\n"
- "ld1 { v21.h }[4], [x22], #0x2\n"
- "ld1 { v16.h }[4], [x21], #0x2\n"
+ "ld1 { v27.h }[4], [x27], #0x2\n"
+ "ld1 { v24.h }[4], [x26], #0x2\n"
+ "mov x19, #0x2\n"
+ "ld1 { v25.h }[4], [x25], #0x2\n"
+ "ld1 { v21.h }[4], [x24], #0x2\n"
+ "ld1 { v22.h }[4], [x23], #0x2\n"
+ "ld1 { v18.h }[4], [x22], #0x2\n"
+ "ld1 { v19.h }[4], [x21], #0x2\n"
+ "ld1 { v16.h }[4], [x20], #0x2\n"
"tbz %x[width], #0, 13f\n"
- "ld1 { v27.b }[10], [x28]\n"
- "ld1 { v19.b }[10], [x27]\n"
- "ld1 { v25.b }[10], [x26]\n"
- "ld1 { v18.b }[10], [x25]\n"
- "ld1 { v23.b }[10], [x24]\n"
- "ld1 { v17.b }[10], [x23]\n"
- "ld1 { v21.b }[10], [x22]\n"
- "ld1 { v16.b }[10], [x21]\n"
+ "ld1 { v27.b }[10], [x27]\n"
+ "ld1 { v24.b }[10], [x26]\n"
+ "ld1 { v25.b }[10], [x25]\n"
+ "ld1 { v21.b }[10], [x24]\n"
+ "ld1 { v22.b }[10], [x23]\n"
+ "ld1 { v18.b }[10], [x22]\n"
+ "ld1 { v19.b }[10], [x21]\n"
+ "ld1 { v16.b }[10], [x20]\n"
"b 13f\n"
"8:" // odd_loads_1_8
- "mov x20, #0x1\n"
+ "mov x19, #0x1\n"
"tbz %x[width], #0, 13f\n"
- "ld1 { v27.b }[8], [x28]\n"
- "ld1 { v19.b }[8], [x27]\n"
- "mov x20, #0x2\n"
- "ld1 { v25.b }[8], [x26]\n"
- "ld1 { v18.b }[8], [x25]\n"
- "ld1 { v23.b }[8], [x24]\n"
- "ld1 { v17.b }[8], [x23]\n"
- "ld1 { v21.b }[8], [x22]\n"
- "ld1 { v16.b }[8], [x21]\n"
+ "ld1 { v27.b }[8], [x27]\n"
+ "ld1 { v24.b }[8], [x26]\n"
+ "mov x19, #0x2\n"
+ "ld1 { v25.b }[8], [x25]\n"
+ "ld1 { v21.b }[8], [x24]\n"
+ "ld1 { v22.b }[8], [x23]\n"
+ "ld1 { v18.b }[8], [x22]\n"
+ "ld1 { v19.b }[8], [x21]\n"
+ "ld1 { v16.b }[8], [x20]\n"
"b 13f\n"
"9:" // odd_loads_4_0
"tbz %x[width], #2, 11f\n"
- "ldr s27, [x28], #0x4\n"
- "ldr s19, [x27], #0x4\n"
- "ldr s25, [x26], #0x4\n"
- "ldr s18, [x25], #0x4\n"
- "ldr s23, [x24], #0x4\n"
- "ldr s17, [x23], #0x4\n"
- "ldr s21, [x22], #0x4\n"
- "ldr s16, [x21], #0x4\n"
+ "ldr s27, [x27], #0x4\n"
+ "ldr s24, [x26], #0x4\n"
+ "ldr s25, [x25], #0x4\n"
+ "ldr s21, [x24], #0x4\n"
+ "ldr s22, [x23], #0x4\n"
+ "ldr s18, [x22], #0x4\n"
+ "ldr s19, [x21], #0x4\n"
+ "ldr s16, [x20], #0x4\n"
"tbz %x[width], #1, 10f\n"
- "ld1 { v27.h }[2], [x28], #0x2\n"
- "ld1 { v19.h }[2], [x27], #0x2\n"
- "mov x20, #0x1\n"
- "ld1 { v25.h }[2], [x26], #0x2\n"
- "ld1 { v18.h }[2], [x25], #0x2\n"
- "ld1 { v23.h }[2], [x24], #0x2\n"
- "ld1 { v17.h }[2], [x23], #0x2\n"
- "ld1 { v21.h }[2], [x22], #0x2\n"
- "ld1 { v16.h }[2], [x21], #0x2\n"
+ "ld1 { v27.h }[2], [x27], #0x2\n"
+ "mov x19, #0x1\n"
+ "ld1 { v24.h }[2], [x26], #0x2\n"
+ "ld1 { v25.h }[2], [x25], #0x2\n"
+ "ld1 { v21.h }[2], [x24], #0x2\n"
+ "ld1 { v22.h }[2], [x23], #0x2\n"
+ "ld1 { v18.h }[2], [x22], #0x2\n"
+ "ld1 { v19.h }[2], [x21], #0x2\n"
+ "ld1 { v16.h }[2], [x20], #0x2\n"
"tbz %x[width], #0, 13f\n"
- "ld1 { v27.b }[6], [x28]\n"
- "ld1 { v19.b }[6], [x27]\n"
- "ld1 { v25.b }[6], [x26]\n"
- "ld1 { v18.b }[6], [x25]\n"
- "ld1 { v23.b }[6], [x24]\n"
- "ld1 { v17.b }[6], [x23]\n"
- "ld1 { v21.b }[6], [x22]\n"
- "ld1 { v16.b }[6], [x21]\n"
+ "ld1 { v27.b }[6], [x27]\n"
+ "ld1 { v24.b }[6], [x26]\n"
+ "ld1 { v25.b }[6], [x25]\n"
+ "ld1 { v21.b }[6], [x24]\n"
+ "ld1 { v22.b }[6], [x23]\n"
+ "ld1 { v18.b }[6], [x22]\n"
+ "ld1 { v19.b }[6], [x21]\n"
+ "ld1 { v16.b }[6], [x20]\n"
"b 13f\n"
"10:" // odd_loads_1_4
- "mov x20, #0x1\n"
+ "mov x19, #0x1\n"
"tbz %x[width], #0, 13f\n"
- "ld1 { v27.b }[4], [x28]\n"
- "ld1 { v19.b }[4], [x27]\n"
- "ld1 { v25.b }[4], [x26]\n"
- "ld1 { v18.b }[4], [x25]\n"
- "ld1 { v23.b }[4], [x24]\n"
- "ld1 { v17.b }[4], [x23]\n"
- "ld1 { v21.b }[4], [x22]\n"
- "ld1 { v16.b }[4], [x21]\n"
+ "ld1 { v27.b }[4], [x27]\n"
+ "ld1 { v24.b }[4], [x26]\n"
+ "ld1 { v25.b }[4], [x25]\n"
+ "ld1 { v21.b }[4], [x24]\n"
+ "ld1 { v22.b }[4], [x23]\n"
+ "ld1 { v18.b }[4], [x22]\n"
+ "ld1 { v19.b }[4], [x21]\n"
+ "ld1 { v16.b }[4], [x20]\n"
"b 13f\n"
"11:" // odd_loads_2_0
"tbz %x[width], #1, 12f\n"
- "ldr h27, [x28], #0x2\n"
- "ldr h19, [x27], #0x2\n"
- "mov x20, #0x1\n"
- "ldr h25, [x26], #0x2\n"
- "ldr h18, [x25], #0x2\n"
- "ldr h23, [x24], #0x2\n"
- "ldr h17, [x23], #0x2\n"
- "ldr h21, [x22], #0x2\n"
- "ldr h16, [x21], #0x2\n"
+ "ldr h27, [x27], #0x2\n"
+ "ldr h24, [x26], #0x2\n"
+ "mov x19, #0x1\n"
+ "ldr h25, [x25], #0x2\n"
+ "ldr h21, [x24], #0x2\n"
+ "ldr h22, [x23], #0x2\n"
+ "ldr h18, [x22], #0x2\n"
+ "ldr h19, [x21], #0x2\n"
+ "ldr h16, [x20], #0x2\n"
"tbz %x[width], #0, 13f\n"
- "ld1 { v27.b }[2], [x28]\n"
- "ld1 { v19.b }[2], [x27]\n"
- "ld1 { v25.b }[2], [x26]\n"
- "ld1 { v18.b }[2], [x25]\n"
- "ld1 { v23.b }[2], [x24]\n"
- "ld1 { v17.b }[2], [x23]\n"
- "ld1 { v21.b }[2], [x22]\n"
- "ld1 { v16.b }[2], [x21]\n"
+ "ld1 { v27.b }[2], [x27]\n"
+ "ld1 { v24.b }[2], [x26]\n"
+ "ld1 { v25.b }[2], [x25]\n"
+ "ld1 { v21.b }[2], [x24]\n"
+ "ld1 { v22.b }[2], [x23]\n"
+ "ld1 { v18.b }[2], [x22]\n"
+ "ld1 { v19.b }[2], [x21]\n"
+ "ld1 { v16.b }[2], [x20]\n"
"b 13f\n"
"12:" // odd_loads_1_0
- "ldr b27, [x28, #0x0]\n"
- "ldr b19, [x27, #0x0]\n"
- "mov x20, #0x1\n"
- "ldr b25, [x26, #0x0]\n"
- "ldr b18, [x25, #0x0]\n"
- "ldr b23, [x24, #0x0]\n"
- "ldr b17, [x23, #0x0]\n"
- "ldr b21, [x22, #0x0]\n"
- "ldr b16, [x21, #0x0]\n"
+ "ldr b27, [x27, #0x0]\n"
+ "mov x19, #0x1\n"
+ "ldr b24, [x26, #0x0]\n"
+ "ldr b25, [x25, #0x0]\n"
+ "ldr b21, [x24, #0x0]\n"
+ "ldr b22, [x23, #0x0]\n"
+ "ldr b18, [x22, #0x0]\n"
+ "ldr b19, [x21, #0x0]\n"
+ "ldr b16, [x20, #0x0]\n"
"13:" // Odd load end
- "zip1 v26.2d, v27.2d, v19.2d\n"
- "zip1 v24.2d, v25.2d, v18.2d\n"
- "subs x20, x20, #0x1\n"
+ "zip1 v26.2d, v27.2d, v24.2d\n"
"str q26, [%x[out_ptr], #0x0]\n"
- "zip1 v22.2d, v23.2d, v17.2d\n"
- "zip1 v20.2d, v21.2d, v16.2d\n"
- "str q24, [%x[out_ptr], #0x10]\n"
+ "zip1 v23.2d, v25.2d, v21.2d\n"
"sadalp v5.8h, v26.16b\n"
- "sadalp v4.8h, v24.16b\n"
- "str q22, [%x[out_ptr], #0x20]\n"
- "sadalp v3.8h, v22.16b\n"
- "str q20, [%x[out_ptr], #0x30]\n"
- "sadalp v2.8h, v20.16b\n"
+ "zip1 v20.2d, v22.2d, v18.2d\n"
+ "str q23, [%x[out_ptr], #0x10]\n"
+ "sadalp v4.8h, v23.16b\n"
+ "zip1 v17.2d, v19.2d, v16.2d\n"
+ "str q20, [%x[out_ptr], #0x20]\n"
+ "sadalp v3.8h, v20.16b\n"
+ "str q17, [%x[out_ptr], #0x30]\n"
+ "sadalp v2.8h, v17.16b\n"
+ "subs x19, x19, #0x1\n"
"add %x[out_ptr], %x[out_ptr], #0x40\n"
"beq 14f\n"
- "zip2 v19.2d, v27.2d, v19.2d\n"
- "zip2 v18.2d, v25.2d, v18.2d\n"
- "str q19, [%x[out_ptr], #0x0]\n"
- "zip2 v17.2d, v23.2d, v17.2d\n"
- "zip2 v16.2d, v21.2d, v16.2d\n"
- "str q18, [%x[out_ptr], #0x10]\n"
- "sadalp v5.8h, v19.16b\n"
- "sadalp v4.8h, v18.16b\n"
- "str q17, [%x[out_ptr], #0x20]\n"
- "sadalp v3.8h, v17.16b\n"
+ "zip2 v24.2d, v27.2d, v24.2d\n"
+ "str q24, [%x[out_ptr], #0x0]\n"
+ "zip2 v21.2d, v25.2d, v21.2d\n"
+ "sadalp v5.8h, v24.16b\n"
+ "zip2 v18.2d, v22.2d, v18.2d\n"
+ "str q21, [%x[out_ptr], #0x10]\n"
+ "sadalp v4.8h, v21.16b\n"
+ "zip2 v16.2d, v19.2d, v16.2d\n"
+ "str q18, [%x[out_ptr], #0x20]\n"
+ "sadalp v3.8h, v18.16b\n"
"str q16, [%x[out_ptr], #0x30]\n"
"sadalp v2.8h, v16.16b\n"
"add %x[out_ptr], %x[out_ptr], #0x40\n"
"14:" // Odds skip
"sadalp v1.4s, v5.8h\n"
"sadalp v0.4s, v4.8h\n"
+ "addp v1.4s, v1.4s, v0.4s\n"
"sadalp v31.4s, v3.8h\n"
"sadalp v30.4s, v2.8h\n"
- "addp v1.4s, v1.4s, v0.4s\n"
- "addp v0.4s, v31.4s, v30.4s\n"
"add v1.4s, v1.4s, v29.4s\n"
- "add v0.4s, v0.4s, v28.4s\n"
"str q1, [%x[out_ptr], #0x0]\n"
+ "addp v0.4s, v31.4s, v30.4s\n"
+ "add v0.4s, v0.4s, v28.4s\n"
"str q0, [%x[out_ptr], #0x10]\n"
"add %x[out_ptr], %x[out_ptr], #0x20\n"
: [out_ptr] "+&r" (out_ptr), [width] "+&r" (width)
: [first] "r" (first), [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block8_u8_u8_summing.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block8_u8_u8_summing.hpp
index 6c4a5fa62b..454260ef1a 100644
--- a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block8_u8_u8_summing.hpp
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block8_u8_u8_summing.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef __aarch64__
@@ -31,47 +31,46 @@ void interleave_block<8, 8, VLType::None, true>(
)
{
__asm__ __volatile__(
- "ldr x28, [%x[in], #0x0]\n"
- "ldr x27, [%x[in], #0x8]\n"
- "cmp %x[height], #0x8\n"
- "mov x20, #0x0\n"
- "ldr x26, [%x[in], #0x10]\n"
- "ldr x25, [%x[in], #0x18]\n"
"movi v5.8h, #0x0\n"
+ "ldr x27, [%x[in], #0x0]\n"
+ "mov x19, #0x0\n"
"movi v4.8h, #0x0\n"
- "ldr x24, [%x[in], #0x20]\n"
- "ldr x23, [%x[in], #0x28]\n"
+ "ldr x26, [%x[in], #0x8]\n"
+ "cmp %x[height], #0x8\n"
"movi v3.8h, #0x0\n"
+ "ldr x25, [%x[in], #0x10]\n"
+ "add x27, x27, %x[row_offset]\n"
"movi v2.8h, #0x0\n"
- "ldr x22, [%x[in], #0x30]\n"
- "ldr x21, [%x[in], #0x38]\n"
+ "ldr x24, [%x[in], #0x18]\n"
"movi v1.4s, #0x0\n"
- "movi v0.4s, #0x0\n"
- "movi v31.4s, #0x0\n"
- "movi v30.4s, #0x0\n"
- "add x28, x28, %x[row_offset]\n"
- "add x27, x27, %x[row_offset]\n"
+ "ldr x23, [%x[in], #0x20]\n"
"add x26, x26, %x[row_offset]\n"
+ "movi v0.4s, #0x0\n"
+ "ldr x22, [%x[in], #0x28]\n"
"add x25, x25, %x[row_offset]\n"
+ "movi v31.4s, #0x0\n"
+ "ldr x21, [%x[in], #0x30]\n"
"add x24, x24, %x[row_offset]\n"
+ "movi v30.4s, #0x0\n"
+ "ldr x20, [%x[in], #0x38]\n"
"add x23, x23, %x[row_offset]\n"
"add x22, x22, %x[row_offset]\n"
"add x21, x21, %x[row_offset]\n"
+ "add x20, x20, %x[row_offset]\n"
"beq 1f\n"
+ "mov x20, x27\n"
"cmp %x[height], #0x2\n"
- "csel x27, x27, x28, GE\n"
- "csel x26, x26, x28, GT\n"
+ "csel x26, x26, x27, GE\n"
+ "csel x25, x25, x27, GT\n"
"cmp %x[height], #0x4\n"
- "csel x25, x25, x28, GE\n"
- "csel x24, x24, x28, GT\n"
+ "csel x24, x24, x27, GE\n"
+ "csel x23, x23, x27, GT\n"
"cmp %x[height], #0x6\n"
- "mov x21, x28\n"
- "csel x23, x23, x28, GE\n"
- "csel x22, x22, x28, GT\n"
+ "csel x22, x22, x27, GE\n"
+ "csel x21, x21, x27, GT\n"
"1:" // no_pointer_adj
- "prfm pldl1keep, [x28, #0x0]\n"
- "prfm pldl1keep, [x27, #0x0]\n"
"movi v29.4s, #0x0\n"
+ "prfm pldl1keep, [x27, #0x0]\n"
"movi v28.4s, #0x0\n"
"prfm pldl1keep, [x26, #0x0]\n"
"prfm pldl1keep, [x25, #0x0]\n"
@@ -79,7 +78,7 @@ void interleave_block<8, 8, VLType::None, true>(
"prfm pldl1keep, [x23, #0x0]\n"
"prfm pldl1keep, [x22, #0x0]\n"
"prfm pldl1keep, [x21, #0x0]\n"
- "prfm pldl1keep, [x28, #0x40]\n"
+ "prfm pldl1keep, [x20, #0x0]\n"
"prfm pldl1keep, [x27, #0x40]\n"
"prfm pldl1keep, [x26, #0x40]\n"
"prfm pldl1keep, [x25, #0x40]\n"
@@ -87,6 +86,7 @@ void interleave_block<8, 8, VLType::None, true>(
"prfm pldl1keep, [x23, #0x40]\n"
"prfm pldl1keep, [x22, #0x40]\n"
"prfm pldl1keep, [x21, #0x40]\n"
+ "prfm pldl1keep, [x20, #0x40]\n"
"cbnz %w[first], 2f\n"
"sub %x[out_ptr], %x[out_ptr], #0x20\n"
"ld1 { v29.4s }, [%x[out_ptr]]\n"
@@ -95,266 +95,266 @@ void interleave_block<8, 8, VLType::None, true>(
"cmp %x[width], #0x10\n"
"blt 5f\n"
"3:" // Main loop head
- "cmp x20, #0x3e\n"
+ "cmp x19, #0x3e\n"
"ble 4f\n"
"uadalp v1.4s, v5.8h\n"
"movi v5.8h, #0x0\n"
- "mov x20, #0x0\n"
"uadalp v0.4s, v4.8h\n"
"movi v4.8h, #0x0\n"
"uadalp v31.4s, v3.8h\n"
"movi v3.8h, #0x0\n"
"uadalp v30.4s, v2.8h\n"
"movi v2.8h, #0x0\n"
+ "mov x19, #0x0\n"
"4:" // no_accumulate_16
- "ldr q27, [x28], #0x10\n"
- "ldr q19, [x27], #0x10\n"
- "zip1 v26.2d, v27.2d, v19.2d\n"
- "uadalp v5.8h, v26.16b\n"
- "ldr q25, [x26], #0x10\n"
- "ldr q18, [x25], #0x10\n"
- "zip1 v24.2d, v25.2d, v18.2d\n"
- "uadalp v4.8h, v24.16b\n"
- "ldr q23, [x24], #0x10\n"
- "ldr q17, [x23], #0x10\n"
- "zip1 v22.2d, v23.2d, v17.2d\n"
- "uadalp v3.8h, v22.16b\n"
- "ldr q21, [x22], #0x10\n"
- "ldr q16, [x21], #0x10\n"
- "zip1 v20.2d, v21.2d, v16.2d\n"
- "uadalp v2.8h, v20.16b\n"
- "zip2 v19.2d, v27.2d, v19.2d\n"
- "zip2 v18.2d, v25.2d, v18.2d\n"
+ "ldr q27, [x27], #0x10\n"
+ "add x19, x19, #0x1\n"
+ "ldr q24, [x26], #0x10\n"
+ "zip1 v26.2d, v27.2d, v24.2d\n"
+ "ldr q25, [x25], #0x10\n"
"subs %x[width], %x[width], #0x10\n"
+ "zip2 v24.2d, v27.2d, v24.2d\n"
+ "ldr q21, [x24], #0x10\n"
+ "uadalp v5.8h, v26.16b\n"
+ "zip1 v23.2d, v25.2d, v21.2d\n"
+ "ldr q22, [x23], #0x10\n"
"cmp %x[width], #0x10\n"
- "zip2 v17.2d, v23.2d, v17.2d\n"
- "zip2 v16.2d, v21.2d, v16.2d\n"
- "prfm pldl1keep, [x28, #0x70]\n"
+ "zip2 v21.2d, v25.2d, v21.2d\n"
+ "ldr q18, [x22], #0x10\n"
+ "uadalp v4.8h, v23.16b\n"
+ "zip1 v20.2d, v22.2d, v18.2d\n"
+ "ldr q19, [x21], #0x10\n"
+ "uadalp v5.8h, v24.16b\n"
+ "zip2 v18.2d, v22.2d, v18.2d\n"
+ "ldr q16, [x20], #0x10\n"
+ "uadalp v3.8h, v20.16b\n"
+ "zip1 v17.2d, v19.2d, v16.2d\n"
"prfm pldl1keep, [x27, #0x70]\n"
+ "uadalp v4.8h, v21.16b\n"
+ "zip2 v16.2d, v19.2d, v16.2d\n"
"prfm pldl1keep, [x26, #0x70]\n"
+ "uadalp v2.8h, v17.16b\n"
"prfm pldl1keep, [x25, #0x70]\n"
- "str q26, [%x[out_ptr], #0x0]\n"
- "uadalp v5.8h, v19.16b\n"
+ "uadalp v3.8h, v18.16b\n"
"prfm pldl1keep, [x24, #0x70]\n"
+ "uadalp v2.8h, v16.16b\n"
"prfm pldl1keep, [x23, #0x70]\n"
- "str q24, [%x[out_ptr], #0x10]\n"
- "uadalp v4.8h, v18.16b\n"
"prfm pldl1keep, [x22, #0x70]\n"
"prfm pldl1keep, [x21, #0x70]\n"
- "str q22, [%x[out_ptr], #0x20]\n"
- "uadalp v3.8h, v17.16b\n"
- "str q20, [%x[out_ptr], #0x30]\n"
- "uadalp v2.8h, v16.16b\n"
- "add x20, x20, #0x1\n"
- "str q19, [%x[out_ptr], #0x40]\n"
- "str q18, [%x[out_ptr], #0x50]\n"
- "str q17, [%x[out_ptr], #0x60]\n"
+ "prfm pldl1keep, [x20, #0x70]\n"
+ "str q26, [%x[out_ptr], #0x0]\n"
+ "str q23, [%x[out_ptr], #0x10]\n"
+ "str q20, [%x[out_ptr], #0x20]\n"
+ "str q17, [%x[out_ptr], #0x30]\n"
+ "str q24, [%x[out_ptr], #0x40]\n"
+ "str q21, [%x[out_ptr], #0x50]\n"
+ "str q18, [%x[out_ptr], #0x60]\n"
"str q16, [%x[out_ptr], #0x70]\n"
"add %x[out_ptr], %x[out_ptr], #0x80\n"
"bge 3b\n"
"5:" // Main loop skip
"cbz %x[width], 14f\n"
"tbz %x[width], #3, 9f\n"
- "ldr d27, [x28], #0x8\n"
- "ldr d19, [x27], #0x8\n"
- "ldr d25, [x26], #0x8\n"
- "ldr d18, [x25], #0x8\n"
- "ldr d23, [x24], #0x8\n"
- "ldr d17, [x23], #0x8\n"
- "ldr d21, [x22], #0x8\n"
- "ldr d16, [x21], #0x8\n"
+ "ldr d27, [x27], #0x8\n"
+ "ldr d24, [x26], #0x8\n"
+ "ldr d25, [x25], #0x8\n"
+ "ldr d21, [x24], #0x8\n"
+ "ldr d22, [x23], #0x8\n"
+ "ldr d18, [x22], #0x8\n"
+ "ldr d19, [x21], #0x8\n"
+ "ldr d16, [x20], #0x8\n"
"tbz %x[width], #2, 7f\n"
- "ld1 { v27.s }[2], [x28], #0x4\n"
- "ld1 { v19.s }[2], [x27], #0x4\n"
- "ld1 { v25.s }[2], [x26], #0x4\n"
- "ld1 { v18.s }[2], [x25], #0x4\n"
- "ld1 { v23.s }[2], [x24], #0x4\n"
- "ld1 { v17.s }[2], [x23], #0x4\n"
- "ld1 { v21.s }[2], [x22], #0x4\n"
- "ld1 { v16.s }[2], [x21], #0x4\n"
+ "ld1 { v27.s }[2], [x27], #0x4\n"
+ "ld1 { v24.s }[2], [x26], #0x4\n"
+ "ld1 { v25.s }[2], [x25], #0x4\n"
+ "ld1 { v21.s }[2], [x24], #0x4\n"
+ "ld1 { v22.s }[2], [x23], #0x4\n"
+ "ld1 { v18.s }[2], [x22], #0x4\n"
+ "ld1 { v19.s }[2], [x21], #0x4\n"
+ "ld1 { v16.s }[2], [x20], #0x4\n"
"tbz %x[width], #1, 6f\n"
- "ld1 { v27.h }[6], [x28], #0x2\n"
- "ld1 { v19.h }[6], [x27], #0x2\n"
- "mov x20, #0x2\n"
- "ld1 { v25.h }[6], [x26], #0x2\n"
- "ld1 { v18.h }[6], [x25], #0x2\n"
- "ld1 { v23.h }[6], [x24], #0x2\n"
- "ld1 { v17.h }[6], [x23], #0x2\n"
- "ld1 { v21.h }[6], [x22], #0x2\n"
- "ld1 { v16.h }[6], [x21], #0x2\n"
+ "ld1 { v27.h }[6], [x27], #0x2\n"
+ "mov x19, #0x2\n"
+ "ld1 { v24.h }[6], [x26], #0x2\n"
+ "ld1 { v25.h }[6], [x25], #0x2\n"
+ "ld1 { v21.h }[6], [x24], #0x2\n"
+ "ld1 { v22.h }[6], [x23], #0x2\n"
+ "ld1 { v18.h }[6], [x22], #0x2\n"
+ "ld1 { v19.h }[6], [x21], #0x2\n"
+ "ld1 { v16.h }[6], [x20], #0x2\n"
"tbz %x[width], #0, 13f\n"
- "ld1 { v27.b }[14], [x28]\n"
- "ld1 { v19.b }[14], [x27]\n"
- "ld1 { v25.b }[14], [x26]\n"
- "ld1 { v18.b }[14], [x25]\n"
- "ld1 { v23.b }[14], [x24]\n"
- "ld1 { v17.b }[14], [x23]\n"
- "ld1 { v21.b }[14], [x22]\n"
- "ld1 { v16.b }[14], [x21]\n"
+ "ld1 { v27.b }[14], [x27]\n"
+ "ld1 { v24.b }[14], [x26]\n"
+ "ld1 { v25.b }[14], [x25]\n"
+ "ld1 { v21.b }[14], [x24]\n"
+ "ld1 { v22.b }[14], [x23]\n"
+ "ld1 { v18.b }[14], [x22]\n"
+ "ld1 { v19.b }[14], [x21]\n"
+ "ld1 { v16.b }[14], [x20]\n"
"b 13f\n"
"6:" // odd_loads_1_12
- "mov x20, #0x2\n"
+ "mov x19, #0x2\n"
"tbz %x[width], #0, 13f\n"
- "ld1 { v27.b }[12], [x28]\n"
- "ld1 { v19.b }[12], [x27]\n"
- "ld1 { v25.b }[12], [x26]\n"
- "ld1 { v18.b }[12], [x25]\n"
- "ld1 { v23.b }[12], [x24]\n"
- "ld1 { v17.b }[12], [x23]\n"
- "ld1 { v21.b }[12], [x22]\n"
- "ld1 { v16.b }[12], [x21]\n"
+ "ld1 { v27.b }[12], [x27]\n"
+ "ld1 { v24.b }[12], [x26]\n"
+ "ld1 { v25.b }[12], [x25]\n"
+ "ld1 { v21.b }[12], [x24]\n"
+ "ld1 { v22.b }[12], [x23]\n"
+ "ld1 { v18.b }[12], [x22]\n"
+ "ld1 { v19.b }[12], [x21]\n"
+ "ld1 { v16.b }[12], [x20]\n"
"b 13f\n"
"7:" // odd_loads_2_8
"tbz %x[width], #1, 8f\n"
- "ld1 { v27.h }[4], [x28], #0x2\n"
- "ld1 { v19.h }[4], [x27], #0x2\n"
- "mov x20, #0x2\n"
- "ld1 { v25.h }[4], [x26], #0x2\n"
- "ld1 { v18.h }[4], [x25], #0x2\n"
- "ld1 { v23.h }[4], [x24], #0x2\n"
- "ld1 { v17.h }[4], [x23], #0x2\n"
- "ld1 { v21.h }[4], [x22], #0x2\n"
- "ld1 { v16.h }[4], [x21], #0x2\n"
+ "ld1 { v27.h }[4], [x27], #0x2\n"
+ "ld1 { v24.h }[4], [x26], #0x2\n"
+ "mov x19, #0x2\n"
+ "ld1 { v25.h }[4], [x25], #0x2\n"
+ "ld1 { v21.h }[4], [x24], #0x2\n"
+ "ld1 { v22.h }[4], [x23], #0x2\n"
+ "ld1 { v18.h }[4], [x22], #0x2\n"
+ "ld1 { v19.h }[4], [x21], #0x2\n"
+ "ld1 { v16.h }[4], [x20], #0x2\n"
"tbz %x[width], #0, 13f\n"
- "ld1 { v27.b }[10], [x28]\n"
- "ld1 { v19.b }[10], [x27]\n"
- "ld1 { v25.b }[10], [x26]\n"
- "ld1 { v18.b }[10], [x25]\n"
- "ld1 { v23.b }[10], [x24]\n"
- "ld1 { v17.b }[10], [x23]\n"
- "ld1 { v21.b }[10], [x22]\n"
- "ld1 { v16.b }[10], [x21]\n"
+ "ld1 { v27.b }[10], [x27]\n"
+ "ld1 { v24.b }[10], [x26]\n"
+ "ld1 { v25.b }[10], [x25]\n"
+ "ld1 { v21.b }[10], [x24]\n"
+ "ld1 { v22.b }[10], [x23]\n"
+ "ld1 { v18.b }[10], [x22]\n"
+ "ld1 { v19.b }[10], [x21]\n"
+ "ld1 { v16.b }[10], [x20]\n"
"b 13f\n"
"8:" // odd_loads_1_8
- "mov x20, #0x1\n"
+ "mov x19, #0x1\n"
"tbz %x[width], #0, 13f\n"
- "ld1 { v27.b }[8], [x28]\n"
- "ld1 { v19.b }[8], [x27]\n"
- "mov x20, #0x2\n"
- "ld1 { v25.b }[8], [x26]\n"
- "ld1 { v18.b }[8], [x25]\n"
- "ld1 { v23.b }[8], [x24]\n"
- "ld1 { v17.b }[8], [x23]\n"
- "ld1 { v21.b }[8], [x22]\n"
- "ld1 { v16.b }[8], [x21]\n"
+ "ld1 { v27.b }[8], [x27]\n"
+ "ld1 { v24.b }[8], [x26]\n"
+ "mov x19, #0x2\n"
+ "ld1 { v25.b }[8], [x25]\n"
+ "ld1 { v21.b }[8], [x24]\n"
+ "ld1 { v22.b }[8], [x23]\n"
+ "ld1 { v18.b }[8], [x22]\n"
+ "ld1 { v19.b }[8], [x21]\n"
+ "ld1 { v16.b }[8], [x20]\n"
"b 13f\n"
"9:" // odd_loads_4_0
"tbz %x[width], #2, 11f\n"
- "ldr s27, [x28], #0x4\n"
- "ldr s19, [x27], #0x4\n"
- "ldr s25, [x26], #0x4\n"
- "ldr s18, [x25], #0x4\n"
- "ldr s23, [x24], #0x4\n"
- "ldr s17, [x23], #0x4\n"
- "ldr s21, [x22], #0x4\n"
- "ldr s16, [x21], #0x4\n"
+ "ldr s27, [x27], #0x4\n"
+ "ldr s24, [x26], #0x4\n"
+ "ldr s25, [x25], #0x4\n"
+ "ldr s21, [x24], #0x4\n"
+ "ldr s22, [x23], #0x4\n"
+ "ldr s18, [x22], #0x4\n"
+ "ldr s19, [x21], #0x4\n"
+ "ldr s16, [x20], #0x4\n"
"tbz %x[width], #1, 10f\n"
- "ld1 { v27.h }[2], [x28], #0x2\n"
- "ld1 { v19.h }[2], [x27], #0x2\n"
- "mov x20, #0x1\n"
- "ld1 { v25.h }[2], [x26], #0x2\n"
- "ld1 { v18.h }[2], [x25], #0x2\n"
- "ld1 { v23.h }[2], [x24], #0x2\n"
- "ld1 { v17.h }[2], [x23], #0x2\n"
- "ld1 { v21.h }[2], [x22], #0x2\n"
- "ld1 { v16.h }[2], [x21], #0x2\n"
+ "ld1 { v27.h }[2], [x27], #0x2\n"
+ "mov x19, #0x1\n"
+ "ld1 { v24.h }[2], [x26], #0x2\n"
+ "ld1 { v25.h }[2], [x25], #0x2\n"
+ "ld1 { v21.h }[2], [x24], #0x2\n"
+ "ld1 { v22.h }[2], [x23], #0x2\n"
+ "ld1 { v18.h }[2], [x22], #0x2\n"
+ "ld1 { v19.h }[2], [x21], #0x2\n"
+ "ld1 { v16.h }[2], [x20], #0x2\n"
"tbz %x[width], #0, 13f\n"
- "ld1 { v27.b }[6], [x28]\n"
- "ld1 { v19.b }[6], [x27]\n"
- "ld1 { v25.b }[6], [x26]\n"
- "ld1 { v18.b }[6], [x25]\n"
- "ld1 { v23.b }[6], [x24]\n"
- "ld1 { v17.b }[6], [x23]\n"
- "ld1 { v21.b }[6], [x22]\n"
- "ld1 { v16.b }[6], [x21]\n"
+ "ld1 { v27.b }[6], [x27]\n"
+ "ld1 { v24.b }[6], [x26]\n"
+ "ld1 { v25.b }[6], [x25]\n"
+ "ld1 { v21.b }[6], [x24]\n"
+ "ld1 { v22.b }[6], [x23]\n"
+ "ld1 { v18.b }[6], [x22]\n"
+ "ld1 { v19.b }[6], [x21]\n"
+ "ld1 { v16.b }[6], [x20]\n"
"b 13f\n"
"10:" // odd_loads_1_4
- "mov x20, #0x1\n"
+ "mov x19, #0x1\n"
"tbz %x[width], #0, 13f\n"
- "ld1 { v27.b }[4], [x28]\n"
- "ld1 { v19.b }[4], [x27]\n"
- "ld1 { v25.b }[4], [x26]\n"
- "ld1 { v18.b }[4], [x25]\n"
- "ld1 { v23.b }[4], [x24]\n"
- "ld1 { v17.b }[4], [x23]\n"
- "ld1 { v21.b }[4], [x22]\n"
- "ld1 { v16.b }[4], [x21]\n"
+ "ld1 { v27.b }[4], [x27]\n"
+ "ld1 { v24.b }[4], [x26]\n"
+ "ld1 { v25.b }[4], [x25]\n"
+ "ld1 { v21.b }[4], [x24]\n"
+ "ld1 { v22.b }[4], [x23]\n"
+ "ld1 { v18.b }[4], [x22]\n"
+ "ld1 { v19.b }[4], [x21]\n"
+ "ld1 { v16.b }[4], [x20]\n"
"b 13f\n"
"11:" // odd_loads_2_0
"tbz %x[width], #1, 12f\n"
- "ldr h27, [x28], #0x2\n"
- "ldr h19, [x27], #0x2\n"
- "mov x20, #0x1\n"
- "ldr h25, [x26], #0x2\n"
- "ldr h18, [x25], #0x2\n"
- "ldr h23, [x24], #0x2\n"
- "ldr h17, [x23], #0x2\n"
- "ldr h21, [x22], #0x2\n"
- "ldr h16, [x21], #0x2\n"
+ "ldr h27, [x27], #0x2\n"
+ "ldr h24, [x26], #0x2\n"
+ "mov x19, #0x1\n"
+ "ldr h25, [x25], #0x2\n"
+ "ldr h21, [x24], #0x2\n"
+ "ldr h22, [x23], #0x2\n"
+ "ldr h18, [x22], #0x2\n"
+ "ldr h19, [x21], #0x2\n"
+ "ldr h16, [x20], #0x2\n"
"tbz %x[width], #0, 13f\n"
- "ld1 { v27.b }[2], [x28]\n"
- "ld1 { v19.b }[2], [x27]\n"
- "ld1 { v25.b }[2], [x26]\n"
- "ld1 { v18.b }[2], [x25]\n"
- "ld1 { v23.b }[2], [x24]\n"
- "ld1 { v17.b }[2], [x23]\n"
- "ld1 { v21.b }[2], [x22]\n"
- "ld1 { v16.b }[2], [x21]\n"
+ "ld1 { v27.b }[2], [x27]\n"
+ "ld1 { v24.b }[2], [x26]\n"
+ "ld1 { v25.b }[2], [x25]\n"
+ "ld1 { v21.b }[2], [x24]\n"
+ "ld1 { v22.b }[2], [x23]\n"
+ "ld1 { v18.b }[2], [x22]\n"
+ "ld1 { v19.b }[2], [x21]\n"
+ "ld1 { v16.b }[2], [x20]\n"
"b 13f\n"
"12:" // odd_loads_1_0
- "ldr b27, [x28, #0x0]\n"
- "ldr b19, [x27, #0x0]\n"
- "mov x20, #0x1\n"
- "ldr b25, [x26, #0x0]\n"
- "ldr b18, [x25, #0x0]\n"
- "ldr b23, [x24, #0x0]\n"
- "ldr b17, [x23, #0x0]\n"
- "ldr b21, [x22, #0x0]\n"
- "ldr b16, [x21, #0x0]\n"
+ "ldr b27, [x27, #0x0]\n"
+ "mov x19, #0x1\n"
+ "ldr b24, [x26, #0x0]\n"
+ "ldr b25, [x25, #0x0]\n"
+ "ldr b21, [x24, #0x0]\n"
+ "ldr b22, [x23, #0x0]\n"
+ "ldr b18, [x22, #0x0]\n"
+ "ldr b19, [x21, #0x0]\n"
+ "ldr b16, [x20, #0x0]\n"
"13:" // Odd load end
- "zip1 v26.2d, v27.2d, v19.2d\n"
- "zip1 v24.2d, v25.2d, v18.2d\n"
- "subs x20, x20, #0x1\n"
+ "zip1 v26.2d, v27.2d, v24.2d\n"
"str q26, [%x[out_ptr], #0x0]\n"
- "zip1 v22.2d, v23.2d, v17.2d\n"
- "zip1 v20.2d, v21.2d, v16.2d\n"
- "str q24, [%x[out_ptr], #0x10]\n"
+ "zip1 v23.2d, v25.2d, v21.2d\n"
"uadalp v5.8h, v26.16b\n"
- "uadalp v4.8h, v24.16b\n"
- "str q22, [%x[out_ptr], #0x20]\n"
- "uadalp v3.8h, v22.16b\n"
- "str q20, [%x[out_ptr], #0x30]\n"
- "uadalp v2.8h, v20.16b\n"
+ "zip1 v20.2d, v22.2d, v18.2d\n"
+ "str q23, [%x[out_ptr], #0x10]\n"
+ "uadalp v4.8h, v23.16b\n"
+ "zip1 v17.2d, v19.2d, v16.2d\n"
+ "str q20, [%x[out_ptr], #0x20]\n"
+ "uadalp v3.8h, v20.16b\n"
+ "str q17, [%x[out_ptr], #0x30]\n"
+ "uadalp v2.8h, v17.16b\n"
+ "subs x19, x19, #0x1\n"
"add %x[out_ptr], %x[out_ptr], #0x40\n"
"beq 14f\n"
- "zip2 v19.2d, v27.2d, v19.2d\n"
- "zip2 v18.2d, v25.2d, v18.2d\n"
- "str q19, [%x[out_ptr], #0x0]\n"
- "zip2 v17.2d, v23.2d, v17.2d\n"
- "zip2 v16.2d, v21.2d, v16.2d\n"
- "str q18, [%x[out_ptr], #0x10]\n"
- "uadalp v5.8h, v19.16b\n"
- "uadalp v4.8h, v18.16b\n"
- "str q17, [%x[out_ptr], #0x20]\n"
- "uadalp v3.8h, v17.16b\n"
+ "zip2 v24.2d, v27.2d, v24.2d\n"
+ "str q24, [%x[out_ptr], #0x0]\n"
+ "zip2 v21.2d, v25.2d, v21.2d\n"
+ "uadalp v5.8h, v24.16b\n"
+ "zip2 v18.2d, v22.2d, v18.2d\n"
+ "str q21, [%x[out_ptr], #0x10]\n"
+ "uadalp v4.8h, v21.16b\n"
+ "zip2 v16.2d, v19.2d, v16.2d\n"
+ "str q18, [%x[out_ptr], #0x20]\n"
+ "uadalp v3.8h, v18.16b\n"
"str q16, [%x[out_ptr], #0x30]\n"
"uadalp v2.8h, v16.16b\n"
"add %x[out_ptr], %x[out_ptr], #0x40\n"
"14:" // Odds skip
"uadalp v1.4s, v5.8h\n"
"uadalp v0.4s, v4.8h\n"
+ "addp v1.4s, v1.4s, v0.4s\n"
"uadalp v31.4s, v3.8h\n"
"uadalp v30.4s, v2.8h\n"
- "addp v1.4s, v1.4s, v0.4s\n"
- "addp v0.4s, v31.4s, v30.4s\n"
"add v1.4s, v1.4s, v29.4s\n"
- "add v0.4s, v0.4s, v28.4s\n"
"str q1, [%x[out_ptr], #0x0]\n"
+ "addp v0.4s, v31.4s, v30.4s\n"
+ "add v0.4s, v0.4s, v28.4s\n"
"str q0, [%x[out_ptr], #0x10]\n"
"add %x[out_ptr], %x[out_ptr], #0x20\n"
: [out_ptr] "+&r" (out_ptr), [width] "+&r" (width)
: [first] "r" (first), [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme2_interleave1VL_block2_fp32_bf16.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme2_interleave1VL_block2_fp32_bf16.hpp
index 51b91d16e1..c6ff375ea2 100644
--- a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme2_interleave1VL_block2_fp32_bf16.hpp
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme2_interleave1VL_block2_fp32_bf16.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#if defined(__ARM_FEATURE_SVE)
@@ -34,105 +34,105 @@ void interleave_block<1, 2, VLType::SME, false>(
__asm__ __volatile__(
".inst 0xd503477f // SMSTART ZA\n"
- "cntw x22, ALL, MUL #2\n"
- "sub x28, %x[width], #0x1\n"
"cntw x21, ALL, MUL #2\n"
- "sub x20, x22, #0x1\n"
+ "sub x27, %x[width], #0x1\n"
+ "cntw x20, ALL, MUL #2\n"
+ "sub x19, x21, #0x1\n"
"whilelt p10.s, XZR, %x[height]\n"
- "add x28, x28, x21\n"
- "ands x27, %x[width], x20\n"
- "udiv x28, x28, x21\n"
- "csel x27, x27, x22, NE\n"
- "mov x26, #0x0\n"
- "and x25, x28, #0x1\n"
- "sub x28, x28, #0x1\n"
- "add x27, x27, #0x1\n"
- "mov x20, %x[width]\n"
+ "add x27, x27, x20\n"
+ "ands x26, %x[width], x19\n"
+ "udiv x27, x27, x20\n"
+ "csel x26, x26, x21, NE\n"
+ "mov x25, #0x0\n"
+ "and x24, x27, #0x1\n"
+ "sub x27, x27, #0x1\n"
+ "add x26, x26, #0x1\n"
+ "mov x19, %x[width]\n"
"ptrue p0.b\n"
- "mov x24, %x[outptr_raw]\n"
- "mov x23, %x[row_offset]\n"
- "cntw x22\n"
- "lsr x28, x28, #0x1\n"
+ "mov x23, %x[outptr_raw]\n"
+ "mov x22, %x[row_offset]\n"
+ "cntw x21\n"
"lsr x27, x27, #0x1\n"
+ "lsr x26, x26, #0x1\n"
"mov x12, #0x0\n"
- ".inst 0x25b44751 // whilelt pn9.s, x26, x20, VLx2\n"
- "mov x21, %x[in]\n"
+ ".inst 0x25b34731 // whilelt pn9.s, x25, x19, VLx2\n"
+ "mov x20, %x[in]\n"
"1:" // Width loop: Preamble: Loop
- "ldr x20, [x21], #0x8\n"
+ "ldr x19, [x20], #0x8\n"
".inst 0x25306548 // psel p8.s, p9.s/Z, p10.s[w12]\n"
- ".inst 0xa0174286 // ld1w { z6.s-z7.s }, pn8.s/Z, [x20, x23, LSL #2]\n"
+ ".inst 0xa0164266 // ld1w { z6.s-z7.s }, pn8.s/Z, [x19, x22, LSL #2]\n"
".inst 0xc160e0c6 // bfcvt z6.h, { z6.s-z7.s }\n"
".inst 0xc08000c0 // mova za0h.s[x12], p0/M, z6.s\n"
"add x12, x12, #0x1\n"
- "cmp x12, x22\n"
+ "cmp x12, x21\n"
"blt 1b\n"
- "incw x23, ALL, MUL #2\n"
- "incw x26, ALL, MUL #2\n"
- "cbz x28, 5f\n"
+ "incw x22, ALL, MUL #2\n"
+ "incw x25, ALL, MUL #2\n"
+ "cbz x27, 5f\n"
"2:" // Width loop
- "mov x20, %x[width]\n"
+ "mov x19, %x[width]\n"
"mov x12, #0x0\n"
- ".inst 0x25b44751 // whilelt pn9.s, x26, x20, VLx2\n"
- "mov x21, %x[in]\n"
+ ".inst 0x25b34731 // whilelt pn9.s, x25, x19, VLx2\n"
+ "mov x20, %x[in]\n"
"3:" // Width loop: Odd: Loop
- "ldr x20, [x21], #0x8\n"
+ "ldr x19, [x20], #0x8\n"
".inst 0x25306548 // psel p8.s, p9.s/Z, p10.s[w12]\n"
- ".inst 0xa017429e // ld1w { z30.s-z31.s }, pn8.s/Z, [x20, x23, LSL #2]\n"
+ ".inst 0xa016427e // ld1w { z30.s-z31.s }, pn8.s/Z, [x19, x22, LSL #2]\n"
".inst 0xc160e3de // bfcvt z30.h, { z30.s-z31.s }\n"
".inst 0xc08003c8 // mova za2h.s[x12], p0/M, z30.s\n"
".inst 0xc082800f // mova z15.s, p0/M, za0v.s[x12]\n"
"add x12, x12, #0x1\n"
- "cmp x12, x22\n"
- "st1w { z15.s }, p0, [x24]\n"
- "addvl x24, x24, #1\n"
+ "cmp x12, x21\n"
+ "st1w { z15.s }, p0, [x23]\n"
+ "addvl x23, x23, #1\n"
"blt 3b\n"
- "incw x26, ALL, MUL #2\n"
- "mov x20, %x[width]\n"
- "incw x23, ALL, MUL #2\n"
+ "incw x25, ALL, MUL #2\n"
+ "mov x19, %x[width]\n"
+ "incw x22, ALL, MUL #2\n"
"mov x12, #0x0\n"
- ".inst 0x25b44751 // whilelt pn9.s, x26, x20, VLx2\n"
- "mov x21, %x[in]\n"
+ ".inst 0x25b34731 // whilelt pn9.s, x25, x19, VLx2\n"
+ "mov x20, %x[in]\n"
"4:" // Width loop: Even: Loop
- "ldr x20, [x21], #0x8\n"
+ "ldr x19, [x20], #0x8\n"
".inst 0x25306548 // psel p8.s, p9.s/Z, p10.s[w12]\n"
- ".inst 0xa0174298 // ld1w { z24.s-z25.s }, pn8.s/Z, [x20, x23, LSL #2]\n"
+ ".inst 0xa0164278 // ld1w { z24.s-z25.s }, pn8.s/Z, [x19, x22, LSL #2]\n"
".inst 0xc160e318 // bfcvt z24.h, { z24.s-z25.s }\n"
".inst 0xc0800300 // mova za0h.s[x12], p0/M, z24.s\n"
".inst 0xc0828110 // mova z16.s, p0/M, za2v.s[x12]\n"
"add x12, x12, #0x1\n"
- "cmp x12, x22\n"
- "st1w { z16.s }, p0, [x24]\n"
- "addvl x24, x24, #1\n"
+ "cmp x12, x21\n"
+ "st1w { z16.s }, p0, [x23]\n"
+ "addvl x23, x23, #1\n"
"blt 4b\n"
- "subs x28, x28, #0x1\n"
- "incw x23, ALL, MUL #2\n"
- "incw x26, ALL, MUL #2\n"
+ "subs x27, x27, #0x1\n"
+ "incw x22, ALL, MUL #2\n"
+ "incw x25, ALL, MUL #2\n"
"bgt 2b\n"
"5:" // Width loop: Tails
- "cbnz x25, 8f\n"
- "mov x20, %x[width]\n"
+ "cbnz x24, 8f\n"
+ "mov x19, %x[width]\n"
"mov x12, #0x0\n"
- ".inst 0x25b44751 // whilelt pn9.s, x26, x20, VLx2\n"
- "mov x21, %x[in]\n"
+ ".inst 0x25b34731 // whilelt pn9.s, x25, x19, VLx2\n"
+ "mov x20, %x[in]\n"
"6:" // Width loop: Tails: Even: Odd: Loop
- "ldr x20, [x21], #0x8\n"
+ "ldr x19, [x20], #0x8\n"
".inst 0x25306548 // psel p8.s, p9.s/Z, p10.s[w12]\n"
- ".inst 0xa017428e // ld1w { z14.s-z15.s }, pn8.s/Z, [x20, x23, LSL #2]\n"
+ ".inst 0xa016426e // ld1w { z14.s-z15.s }, pn8.s/Z, [x19, x22, LSL #2]\n"
".inst 0xc160e1ce // bfcvt z14.h, { z14.s-z15.s }\n"
".inst 0xc08001c8 // mova za2h.s[x12], p0/M, z14.s\n"
".inst 0xc0828010 // mova z16.s, p0/M, za0v.s[x12]\n"
"add x12, x12, #0x1\n"
- "cmp x12, x22\n"
- "st1w { z16.s }, p0, [x24]\n"
- "addvl x24, x24, #1\n"
+ "cmp x12, x21\n"
+ "st1w { z16.s }, p0, [x23]\n"
+ "addvl x23, x23, #1\n"
"blt 6b\n"
"mov x12, #0x0\n"
"7:" // Width loop: Tails: Even: Even: Loop
".inst 0xc0828110 // mova z16.s, p0/M, za2v.s[x12]\n"
"add x12, x12, #0x1\n"
- "cmp x12, x27\n"
- "st1w { z16.s }, p0, [x24]\n"
- "addvl x24, x24, #1\n"
+ "cmp x12, x26\n"
+ "st1w { z16.s }, p0, [x23]\n"
+ "addvl x23, x23, #1\n"
"blt 7b\n"
"b 10f\n"
"8:" // Width loop: Tails: Odd
@@ -140,16 +140,16 @@ void interleave_block<1, 2, VLType::SME, false>(
"9:" // Width loop: Tails: Odd: Loop
".inst 0xc0828010 // mova z16.s, p0/M, za0v.s[x12]\n"
"add x12, x12, #0x1\n"
- "cmp x12, x27\n"
- "st1w { z16.s }, p0, [x24]\n"
- "addvl x24, x24, #1\n"
+ "cmp x12, x26\n"
+ "st1w { z16.s }, p0, [x23]\n"
+ "addvl x23, x23, #1\n"
"blt 9b\n"
"10:" // End
- "mov %x[outptr_raw], x24\n"
+ "mov %x[outptr_raw], x23\n"
".inst 0xd503467f // SMSTOP\n"
: [outptr_raw] "+&r" (out)
: [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x12", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x12", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme2_interleave2VL_block2_fp32_bf16.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme2_interleave2VL_block2_fp32_bf16.hpp
index 25bfad18b1..e712eca3ff 100644
--- a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme2_interleave2VL_block2_fp32_bf16.hpp
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme2_interleave2VL_block2_fp32_bf16.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#if defined(__ARM_FEATURE_SVE)
@@ -34,62 +34,62 @@ void interleave_block<2, 2, VLType::SME, false>(
__asm__ __volatile__(
".inst 0xd503477f // SMSTART ZA\n"
- "cntw x22, ALL, MUL #2\n"
- "cntw x9\n"
- "sub x28, %x[width], #0x1\n"
"cntw x21, ALL, MUL #2\n"
- "sub x20, x22, #0x1\n"
+ "cntw x28\n"
+ "sub x27, %x[width], #0x1\n"
+ "cntw x20, ALL, MUL #2\n"
+ "sub x19, x21, #0x1\n"
".inst 0x25207815 // ptrue pn13.b\n"
"whilelt p12.s, XZR, %x[height]\n"
- "whilelt p11.s, x9, %x[height]\n"
- "add x28, x28, x21\n"
- "ands x27, %x[width], x20\n"
- "udiv x28, x28, x21\n"
- "csel x27, x27, x22, NE\n"
- "mov x26, #0x0\n"
- "and x25, x28, #0x1\n"
- "sub x28, x28, #0x1\n"
- "add x27, x27, #0x1\n"
- "mov x20, %x[width]\n"
- "mov x24, %x[in]\n"
+ "whilelt p11.s, x28, %x[height]\n"
+ "add x27, x27, x20\n"
+ "ands x26, %x[width], x19\n"
+ "udiv x27, x27, x20\n"
+ "csel x26, x26, x21, NE\n"
+ "mov x25, #0x0\n"
+ "and x24, x27, #0x1\n"
+ "sub x27, x27, #0x1\n"
+ "add x26, x26, #0x1\n"
+ "mov x19, %x[width]\n"
+ "mov x23, %x[in]\n"
"ptrue p0.b\n"
- "mov x23, %x[outptr_raw]\n"
- "mov x22, %x[row_offset]\n"
- "lsr x28, x28, #0x1\n"
+ "mov x22, %x[outptr_raw]\n"
+ "mov x21, %x[row_offset]\n"
"lsr x27, x27, #0x1\n"
+ "lsr x26, x26, #0x1\n"
"mov x12, #0x0\n"
- ".inst 0x25b44752 // whilelt pn10.s, x26, x20, VLx2\n"
- "add x21, x24, x9, LSL #3\n"
+ ".inst 0x25b34732 // whilelt pn10.s, x25, x19, VLx2\n"
+ "add x20, x23, x28, LSL #3\n"
"1:" // Width loop: Preamble: Loop
- "ldr x20, [x24], #0x8\n"
+ "ldr x19, [x23], #0x8\n"
".inst 0x25306989 // psel p9.s, p10.s/Z, p12.s[w12]\n"
".inst 0x25306968 // psel p8.s, p10.s/Z, p11.s[w12]\n"
- ".inst 0xa0164698 // ld1w { z24.s-z25.s }, pn9.s/Z, [x20, x22, LSL #2]\n"
- "ldr x20, [x21], #0x8\n"
- ".inst 0xa0164296 // ld1w { z22.s-z23.s }, pn8.s/Z, [x20, x22, LSL #2]\n"
+ ".inst 0xa0154678 // ld1w { z24.s-z25.s }, pn9.s/Z, [x19, x21, LSL #2]\n"
+ "ldr x19, [x20], #0x8\n"
+ ".inst 0xa0154276 // ld1w { z22.s-z23.s }, pn8.s/Z, [x19, x21, LSL #2]\n"
".inst 0xc160e318 // bfcvt z24.h, { z24.s-z25.s }\n"
".inst 0xc160e2d6 // bfcvt z22.h, { z22.s-z23.s }\n"
".inst 0xc0800300 // mova za0h.s[x12], p0/M, z24.s\n"
".inst 0xc08002c4 // mova za1h.s[x12], p0/M, z22.s\n"
"add x12, x12, #0x1\n"
- "cmp x12, x9\n"
+ "cmp x12, x28\n"
"blt 1b\n"
- "incw x22, ALL, MUL #2\n"
- "incw x26, ALL, MUL #2\n"
- "cbz x28, 5f\n"
+ "incw x21, ALL, MUL #2\n"
+ "incw x25, ALL, MUL #2\n"
+ "cbz x27, 5f\n"
"2:" // Width loop
- "mov x20, %x[width]\n"
- "mov x24, %x[in]\n"
+ "mov x19, %x[width]\n"
+ "mov x23, %x[in]\n"
"mov x12, #0x0\n"
- ".inst 0x25b44752 // whilelt pn10.s, x26, x20, VLx2\n"
- "add x21, x24, x9, LSL #3\n"
+ ".inst 0x25b34732 // whilelt pn10.s, x25, x19, VLx2\n"
+ "add x20, x23, x28, LSL #3\n"
"3:" // Width loop: Odd: Loop
- "ldr x20, [x24], #0x8\n"
+ "ldr x19, [x23], #0x8\n"
".inst 0x25306989 // psel p9.s, p10.s/Z, p12.s[w12]\n"
".inst 0x25306968 // psel p8.s, p10.s/Z, p11.s[w12]\n"
- ".inst 0xa0164696 // ld1w { z22.s-z23.s }, pn9.s/Z, [x20, x22, LSL #2]\n"
- "ldr x20, [x21], #0x8\n"
- ".inst 0xa016428a // ld1w { z10.s-z11.s }, pn8.s/Z, [x20, x22, LSL #2]\n"
+ ".inst 0xa0154676 // ld1w { z22.s-z23.s }, pn9.s/Z, [x19, x21, LSL #2]\n"
+ "ldr x19, [x20], #0x8\n"
+ ".inst 0xa015426a // ld1w { z10.s-z11.s }, pn8.s/Z, [x19, x21, LSL #2]\n"
".inst 0xc160e2d6 // bfcvt z22.h, { z22.s-z23.s }\n"
".inst 0xc160e14a // bfcvt z10.h, { z10.s-z11.s }\n"
".inst 0xc08002c8 // mova za2h.s[x12], p0/M, z22.s\n"
@@ -97,24 +97,24 @@ void interleave_block<2, 2, VLType::SME, false>(
".inst 0xc0828008 // mova z8.s, p0/M, za0v.s[x12]\n"
".inst 0xc0828089 // mova z9.s, p0/M, za1v.s[x12]\n"
"add x12, x12, #0x1\n"
- "cmp x12, x9\n"
- ".inst 0xa06056e8 // st1w { z8.s-z9.s }, pn13.b, [x23]\n"
- "addvl x23, x23, #2\n"
+ "cmp x12, x28\n"
+ ".inst 0xa06056c8 // st1w { z8.s-z9.s }, pn13.b, [x22]\n"
+ "addvl x22, x22, #2\n"
"blt 3b\n"
- "incw x26, ALL, MUL #2\n"
- "mov x20, %x[width]\n"
- "mov x24, %x[in]\n"
- "incw x22, ALL, MUL #2\n"
+ "incw x25, ALL, MUL #2\n"
+ "mov x19, %x[width]\n"
+ "mov x23, %x[in]\n"
+ "incw x21, ALL, MUL #2\n"
"mov x12, #0x0\n"
- ".inst 0x25b44752 // whilelt pn10.s, x26, x20, VLx2\n"
- "add x21, x24, x9, LSL #3\n"
+ ".inst 0x25b34732 // whilelt pn10.s, x25, x19, VLx2\n"
+ "add x20, x23, x28, LSL #3\n"
"4:" // Width loop: Even: Loop
- "ldr x20, [x24], #0x8\n"
+ "ldr x19, [x23], #0x8\n"
".inst 0x25306989 // psel p9.s, p10.s/Z, p12.s[w12]\n"
".inst 0x25306968 // psel p8.s, p10.s/Z, p11.s[w12]\n"
- ".inst 0xa016469a // ld1w { z26.s-z27.s }, pn9.s/Z, [x20, x22, LSL #2]\n"
- "ldr x20, [x21], #0x8\n"
- ".inst 0xa016429e // ld1w { z30.s-z31.s }, pn8.s/Z, [x20, x22, LSL #2]\n"
+ ".inst 0xa015467a // ld1w { z26.s-z27.s }, pn9.s/Z, [x19, x21, LSL #2]\n"
+ "ldr x19, [x20], #0x8\n"
+ ".inst 0xa015427e // ld1w { z30.s-z31.s }, pn8.s/Z, [x19, x21, LSL #2]\n"
".inst 0xc160e35a // bfcvt z26.h, { z26.s-z27.s }\n"
".inst 0xc160e3de // bfcvt z30.h, { z30.s-z31.s }\n"
".inst 0xc0800340 // mova za0h.s[x12], p0/M, z26.s\n"
@@ -122,28 +122,28 @@ void interleave_block<2, 2, VLType::SME, false>(
".inst 0xc0828106 // mova z6.s, p0/M, za2v.s[x12]\n"
".inst 0xc082818e // mova z14.s, p0/M, za3v.s[x12]\n"
"add x12, x12, #0x1\n"
- "cmp x12, x9\n"
- ".inst 0xa16056e6 // st1w { z6.s, z14.s }, pn13.b, [x23]\n"
- "addvl x23, x23, #2\n"
+ "cmp x12, x28\n"
+ ".inst 0xa16056c6 // st1w { z6.s, z14.s }, pn13.b, [x22]\n"
+ "addvl x22, x22, #2\n"
"blt 4b\n"
- "subs x28, x28, #0x1\n"
- "incw x22, ALL, MUL #2\n"
- "incw x26, ALL, MUL #2\n"
+ "subs x27, x27, #0x1\n"
+ "incw x21, ALL, MUL #2\n"
+ "incw x25, ALL, MUL #2\n"
"bgt 2b\n"
"5:" // Width loop: Tails
- "cbnz x25, 8f\n"
- "mov x20, %x[width]\n"
- "mov x24, %x[in]\n"
+ "cbnz x24, 8f\n"
+ "mov x19, %x[width]\n"
+ "mov x23, %x[in]\n"
"mov x12, #0x0\n"
- ".inst 0x25b44752 // whilelt pn10.s, x26, x20, VLx2\n"
- "add x21, x24, x9, LSL #3\n"
+ ".inst 0x25b34732 // whilelt pn10.s, x25, x19, VLx2\n"
+ "add x20, x23, x28, LSL #3\n"
"6:" // Width loop: Tails: Even: Odd: Loop
- "ldr x20, [x24], #0x8\n"
+ "ldr x19, [x23], #0x8\n"
".inst 0x25306989 // psel p9.s, p10.s/Z, p12.s[w12]\n"
".inst 0x25306968 // psel p8.s, p10.s/Z, p11.s[w12]\n"
- ".inst 0xa016468c // ld1w { z12.s-z13.s }, pn9.s/Z, [x20, x22, LSL #2]\n"
- "ldr x20, [x21], #0x8\n"
- ".inst 0xa016428e // ld1w { z14.s-z15.s }, pn8.s/Z, [x20, x22, LSL #2]\n"
+ ".inst 0xa015466c // ld1w { z12.s-z13.s }, pn9.s/Z, [x19, x21, LSL #2]\n"
+ "ldr x19, [x20], #0x8\n"
+ ".inst 0xa015426e // ld1w { z14.s-z15.s }, pn8.s/Z, [x19, x21, LSL #2]\n"
".inst 0xc160e18c // bfcvt z12.h, { z12.s-z13.s }\n"
".inst 0xc160e1ce // bfcvt z14.h, { z14.s-z15.s }\n"
".inst 0xc0800188 // mova za2h.s[x12], p0/M, z12.s\n"
@@ -151,18 +151,18 @@ void interleave_block<2, 2, VLType::SME, false>(
".inst 0xc0828007 // mova z7.s, p0/M, za0v.s[x12]\n"
".inst 0xc082808f // mova z15.s, p0/M, za1v.s[x12]\n"
"add x12, x12, #0x1\n"
- "cmp x12, x9\n"
- ".inst 0xa16056e7 // st1w { z7.s, z15.s }, pn13.b, [x23]\n"
- "addvl x23, x23, #2\n"
+ "cmp x12, x28\n"
+ ".inst 0xa16056c7 // st1w { z7.s, z15.s }, pn13.b, [x22]\n"
+ "addvl x22, x22, #2\n"
"blt 6b\n"
"mov x12, #0x0\n"
"7:" // Width loop: Tails: Even: Even: Loop
".inst 0xc082810e // mova z14.s, p0/M, za2v.s[x12]\n"
".inst 0xc082818f // mova z15.s, p0/M, za3v.s[x12]\n"
"add x12, x12, #0x1\n"
- "cmp x12, x27\n"
- ".inst 0xa06056ee // st1w { z14.s-z15.s }, pn13.b, [x23]\n"
- "addvl x23, x23, #2\n"
+ "cmp x12, x26\n"
+ ".inst 0xa06056ce // st1w { z14.s-z15.s }, pn13.b, [x22]\n"
+ "addvl x22, x22, #2\n"
"blt 7b\n"
"b 10f\n"
"8:" // Width loop: Tails: Odd
@@ -171,16 +171,16 @@ void interleave_block<2, 2, VLType::SME, false>(
".inst 0xc0828014 // mova z20.s, p0/M, za0v.s[x12]\n"
".inst 0xc0828095 // mova z21.s, p0/M, za1v.s[x12]\n"
"add x12, x12, #0x1\n"
- "cmp x12, x27\n"
- ".inst 0xa06056f4 // st1w { z20.s-z21.s }, pn13.b, [x23]\n"
- "addvl x23, x23, #2\n"
+ "cmp x12, x26\n"
+ ".inst 0xa06056d4 // st1w { z20.s-z21.s }, pn13.b, [x22]\n"
+ "addvl x22, x22, #2\n"
"blt 9b\n"
"10:" // End
- "mov %x[outptr_raw], x23\n"
+ "mov %x[outptr_raw], x22\n"
".inst 0xd503467f // SMSTOP\n"
: [outptr_raw] "+&r" (out)
: [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x12", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x12", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme2_interleave4VL_block2_fp32_bf16.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme2_interleave4VL_block2_fp32_bf16.hpp
index 9255831e86..e08d6d992e 100644
--- a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme2_interleave4VL_block2_fp32_bf16.hpp
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme2_interleave4VL_block2_fp32_bf16.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#if defined(__ARM_FEATURE_SVE)
@@ -34,51 +34,51 @@ void interleave_block<4, 2, VLType::SME, false>(
__asm__ __volatile__(
".inst 0xd503477f // SMSTART ZA\n"
- "cntw x23, ALL, MUL #2\n"
- "cntw x10\n"
"cntw x22, ALL, MUL #2\n"
- "cntw x20, ALL, MUL #3\n"
- "sub x21, x23, #0x1\n"
+ "cntw x9\n"
+ "cntw x21, ALL, MUL #2\n"
+ "cntw x19, ALL, MUL #3\n"
+ "sub x20, x22, #0x1\n"
".inst 0x25207817 // ptrue pn15.b\n"
"whilelt p1.s, XZR, %x[height]\n"
- "whilelt p14.s, x10, %x[height]\n"
- "whilelt p13.s, x22, %x[height]\n"
- "whilelt p12.s, x20, %x[height]\n"
- "sub x9, %x[width], #0x1\n"
- "cntw x20, ALL, MUL #2\n"
- "ands x28, %x[width], x21\n"
- "mov x27, %x[in]\n"
- "add x9, x9, x20\n"
- "csel x28, x28, x23, NE\n"
- "add x26, x27, x10, LSL #3\n"
- "mov x25, #0x0\n"
- "udiv x9, x9, x20\n"
- "add x28, x28, #0x1\n"
- "mov x20, %x[width]\n"
- "add x24, x26, x10, LSL #3\n"
+ "whilelt p14.s, x9, %x[height]\n"
+ "whilelt p13.s, x21, %x[height]\n"
+ "whilelt p12.s, x19, %x[height]\n"
+ "sub x28, %x[width], #0x1\n"
+ "cntw x19, ALL, MUL #2\n"
+ "ands x27, %x[width], x20\n"
+ "mov x26, %x[in]\n"
+ "add x28, x28, x19\n"
+ "csel x27, x27, x22, NE\n"
+ "add x25, x26, x9, LSL #3\n"
+ "mov x24, #0x0\n"
+ "udiv x28, x28, x19\n"
+ "add x27, x27, #0x1\n"
+ "mov x19, %x[width]\n"
+ "add x23, x25, x9, LSL #3\n"
"ptrue p0.b\n"
- "mov x23, %x[outptr_raw]\n"
- "mov x22, %x[row_offset]\n"
- "sub x9, x9, #0x1\n"
- "lsr x28, x28, #0x1\n"
+ "mov x22, %x[outptr_raw]\n"
+ "mov x21, %x[row_offset]\n"
+ "sub x28, x28, #0x1\n"
+ "lsr x27, x27, #0x1\n"
"mov x12, #0x0\n"
- ".inst 0x25b44733 // whilelt pn11.s, x25, x20, VLx2\n"
- "add x21, x24, x10, LSL #3\n"
+ ".inst 0x25b34713 // whilelt pn11.s, x24, x19, VLx2\n"
+ "add x20, x23, x9, LSL #3\n"
"1:" // Width loop: Preamble: Loop
- "ldr x20, [x27], #0x8\n"
+ "ldr x19, [x26], #0x8\n"
".inst 0x25306c28 // psel p8.s, p11.s/Z, p1.s[w12]\n"
".inst 0x25306dca // psel p10.s, p11.s/Z, p14.s[w12]\n"
- ".inst 0xa0164298 // ld1w { z24.s-z25.s }, pn8.s/Z, [x20, x22, LSL #2]\n"
- "ldr x20, [x26], #0x8\n"
+ ".inst 0xa0154278 // ld1w { z24.s-z25.s }, pn8.s/Z, [x19, x21, LSL #2]\n"
+ "ldr x19, [x25], #0x8\n"
".inst 0x25306da9 // psel p9.s, p11.s/Z, p13.s[w12]\n"
".inst 0x25306d88 // psel p8.s, p11.s/Z, p12.s[w12]\n"
- ".inst 0xa0164a82 // ld1w { z2.s-z3.s }, pn10.s/Z, [x20, x22, LSL #2]\n"
- "ldr x20, [x24], #0x8\n"
- ".inst 0xa016468a // ld1w { z10.s-z11.s }, pn9.s/Z, [x20, x22, LSL #2]\n"
+ ".inst 0xa0154a62 // ld1w { z2.s-z3.s }, pn10.s/Z, [x19, x21, LSL #2]\n"
+ "ldr x19, [x23], #0x8\n"
+ ".inst 0xa015466a // ld1w { z10.s-z11.s }, pn9.s/Z, [x19, x21, LSL #2]\n"
".inst 0xc160e318 // bfcvt z24.h, { z24.s-z25.s }\n"
".inst 0xc160e042 // bfcvt z2.h, { z2.s-z3.s }\n"
- "ldr x20, [x21], #0x8\n"
- ".inst 0xa016428c // ld1w { z12.s-z13.s }, pn8.s/Z, [x20, x22, LSL #2]\n"
+ "ldr x19, [x20], #0x8\n"
+ ".inst 0xa015426c // ld1w { z12.s-z13.s }, pn8.s/Z, [x19, x21, LSL #2]\n"
".inst 0xc160e14a // bfcvt z10.h, { z10.s-z11.s }\n"
".inst 0xc160e18c // bfcvt z12.h, { z12.s-z13.s }\n"
".inst 0xc0800300 // mova za0h.s[x12], p0/M, z24.s\n"
@@ -86,11 +86,11 @@ void interleave_block<4, 2, VLType::SME, false>(
".inst 0xc0800148 // mova za2h.s[x12], p0/M, z10.s\n"
".inst 0xc080018c // mova za3h.s[x12], p0/M, z12.s\n"
"add x12, x12, #0x1\n"
- "cmp x12, x10\n"
+ "cmp x12, x9\n"
"blt 1b\n"
- "incw x22, ALL, MUL #2\n"
- "incw x25, ALL, MUL #2\n"
- "cbz x9, 5f\n"
+ "incw x21, ALL, MUL #2\n"
+ "incw x24, ALL, MUL #2\n"
+ "cbz x28, 5f\n"
"2:" // Width loop
"mov x12, #0x0\n"
"3:" // Width loop: Store: Loop
@@ -99,32 +99,32 @@ void interleave_block<4, 2, VLType::SME, false>(
".inst 0xc0828119 // mova z25.s, p0/M, za2v.s[x12]\n"
".inst 0xc082819d // mova z29.s, p0/M, za3v.s[x12]\n"
"add x12, x12, #0x1\n"
- "cmp x12, x10\n"
- ".inst 0xa160def1 // st1w { z17.s, z21.s, z25.s, z29.s }, pn15.b, [x23]\n"
- "addvl x23, x23, #4\n"
+ "cmp x12, x9\n"
+ ".inst 0xa160ded1 // st1w { z17.s, z21.s, z25.s, z29.s }, pn15.b, [x22]\n"
+ "addvl x22, x22, #4\n"
"blt 3b\n"
- "mov x27, %x[in]\n"
- "add x26, x27, x10, LSL #3\n"
- "mov x20, %x[width]\n"
- "add x24, x26, x10, LSL #3\n"
+ "mov x26, %x[in]\n"
+ "add x25, x26, x9, LSL #3\n"
+ "mov x19, %x[width]\n"
+ "add x23, x25, x9, LSL #3\n"
"mov x12, #0x0\n"
- ".inst 0x25b44733 // whilelt pn11.s, x25, x20, VLx2\n"
- "add x21, x24, x10, LSL #3\n"
+ ".inst 0x25b34713 // whilelt pn11.s, x24, x19, VLx2\n"
+ "add x20, x23, x9, LSL #3\n"
"4:" // Width loop: Load: Loop
- "ldr x20, [x27], #0x8\n"
+ "ldr x19, [x26], #0x8\n"
".inst 0x25306c28 // psel p8.s, p11.s/Z, p1.s[w12]\n"
".inst 0x25306dca // psel p10.s, p11.s/Z, p14.s[w12]\n"
- ".inst 0xa016428c // ld1w { z12.s-z13.s }, pn8.s/Z, [x20, x22, LSL #2]\n"
- "ldr x20, [x26], #0x8\n"
+ ".inst 0xa015426c // ld1w { z12.s-z13.s }, pn8.s/Z, [x19, x21, LSL #2]\n"
+ "ldr x19, [x25], #0x8\n"
".inst 0x25306da9 // psel p9.s, p11.s/Z, p13.s[w12]\n"
".inst 0x25306d88 // psel p8.s, p11.s/Z, p12.s[w12]\n"
- ".inst 0xa0164a8e // ld1w { z14.s-z15.s }, pn10.s/Z, [x20, x22, LSL #2]\n"
- "ldr x20, [x24], #0x8\n"
- ".inst 0xa0164692 // ld1w { z18.s-z19.s }, pn9.s/Z, [x20, x22, LSL #2]\n"
+ ".inst 0xa0154a6e // ld1w { z14.s-z15.s }, pn10.s/Z, [x19, x21, LSL #2]\n"
+ "ldr x19, [x23], #0x8\n"
+ ".inst 0xa0154672 // ld1w { z18.s-z19.s }, pn9.s/Z, [x19, x21, LSL #2]\n"
".inst 0xc160e18c // bfcvt z12.h, { z12.s-z13.s }\n"
".inst 0xc160e1ce // bfcvt z14.h, { z14.s-z15.s }\n"
- "ldr x20, [x21], #0x8\n"
- ".inst 0xa016429e // ld1w { z30.s-z31.s }, pn8.s/Z, [x20, x22, LSL #2]\n"
+ "ldr x19, [x20], #0x8\n"
+ ".inst 0xa015427e // ld1w { z30.s-z31.s }, pn8.s/Z, [x19, x21, LSL #2]\n"
".inst 0xc160e252 // bfcvt z18.h, { z18.s-z19.s }\n"
".inst 0xc160e3de // bfcvt z30.h, { z30.s-z31.s }\n"
".inst 0xc0800180 // mova za0h.s[x12], p0/M, z12.s\n"
@@ -132,11 +132,11 @@ void interleave_block<4, 2, VLType::SME, false>(
".inst 0xc0800248 // mova za2h.s[x12], p0/M, z18.s\n"
".inst 0xc08003cc // mova za3h.s[x12], p0/M, z30.s\n"
"add x12, x12, #0x1\n"
- "cmp x12, x10\n"
+ "cmp x12, x9\n"
"blt 4b\n"
- "subs x9, x9, #0x1\n"
- "incw x22, ALL, MUL #2\n"
- "incw x25, ALL, MUL #2\n"
+ "subs x28, x28, #0x1\n"
+ "incw x21, ALL, MUL #2\n"
+ "incw x24, ALL, MUL #2\n"
"bgt 2b\n"
"5:" // Width loop: Tails
"mov x12, #0x0\n"
@@ -146,16 +146,16 @@ void interleave_block<4, 2, VLType::SME, false>(
".inst 0xc0828119 // mova z25.s, p0/M, za2v.s[x12]\n"
".inst 0xc082819d // mova z29.s, p0/M, za3v.s[x12]\n"
"add x12, x12, #0x1\n"
- "cmp x12, x28\n"
- ".inst 0xa160def1 // st1w { z17.s, z21.s, z25.s, z29.s }, pn15.b, [x23]\n"
- "addvl x23, x23, #4\n"
+ "cmp x12, x27\n"
+ ".inst 0xa160ded1 // st1w { z17.s, z21.s, z25.s, z29.s }, pn15.b, [x22]\n"
+ "addvl x22, x22, #4\n"
"blt 6b\n"
"7:" // End
- "mov %x[outptr_raw], x23\n"
+ "mov %x[outptr_raw], x22\n"
".inst 0xd503467f // SMSTOP\n"
: [outptr_raw] "+&r" (out)
: [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x12", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x12", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_bf16_bf16.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_bf16_bf16.hpp
index 9b66a6fb10..3c8c70776a 100644
--- a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_bf16_bf16.hpp
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_bf16_bf16.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#if defined(__ARM_FEATURE_SVE)
@@ -34,175 +34,175 @@ void interleave_block<1, 1, VLType::SME, false>(
__asm__ __volatile__(
".inst 0xd503477f // SMSTART ZA\n"
- "mov x21, %x[width]\n"
- "inch x21\n"
- "cnth x11\n"
- "sub x21, x21, #0x1\n"
- "udiv x21, x21, x11\n" // n_passes = ceildiv(width, VL<T>)
"mov x20, %x[width]\n"
- "sub x10, x11, #0x1\n"
- "sub x9, x21, #0x1\n"
- "ands x10, x20, x10\n"
- "sub x28, x11, #0x2\n"
- "lsl x20, %x[height], #0x1\n" // height * 2
- "mov x27, #0x0\n"
- "mov x26, %x[in]\n"
- "lsr x9, x9, #0x1\n" // n_loops = (n_passes - 1) / 2
- "ldr x25, [x26, #0x0]\n"
- "and x24, x21, #0x1\n" // odd_tail = bool(n_passes & 0x1)
- "csel x10, x10, x11, NE\n"
- "ldr x23, [x26, #0x8]\n"
+ "inch x20\n"
+ "cnth x10\n"
+ "sub x20, x20, #0x1\n"
+ "udiv x20, x20, x10\n" // n_passes = ceildiv(width, VL<T>)
+ "mov x19, %x[width]\n"
+ "sub x9, x10, #0x1\n"
+ "sub x28, x20, #0x1\n"
+ "ands x9, x19, x9\n"
+ "sub x27, x10, #0x2\n"
+ "lsl x19, %x[height], #0x1\n" // height * 2
+ "mov x26, #0x0\n"
+ "mov x25, %x[in]\n"
+ "lsr x28, x28, #0x1\n" // n_loops = (n_passes - 1) / 2
+ "ldr x24, [x25, #0x0]\n"
+ "and x23, x20, #0x1\n" // odd_tail = bool(n_passes & 0x1)
+ "csel x9, x9, x10, NE\n"
+ "ldr x22, [x25, #0x8]\n"
"ptrue p11.h\n"
- "whilelt p10.h, XZR, x20\n"
- "mov x22, %x[row_offset]\n"
- "mov x21, %x[out]\n"
- "whilelt p9.h, x27, %x[width]\n"
- "whilelt p8.h, x27, %x[width]\n"
- "add x26, x26, #0x10\n"
+ "whilelt p10.h, XZR, x19\n"
+ "mov x21, %x[row_offset]\n"
+ "mov x20, %x[out]\n"
+ "whilelt p9.h, x26, %x[width]\n"
+ "whilelt p8.h, x26, %x[width]\n"
+ "add x25, x25, #0x10\n"
"mov x12, #0x0\n"
- "cbz x28, 2f\n"
+ "cbz x27, 2f\n"
"1:" // K loop: Charge: Loop
".inst 0x25286140 // psel p0.h, p8.h/Z, p10.h[w12]\n"
- ".inst 0xe0560320 // ld1h { za0h.h[x12] }, p0/Z, [x25, x22, LSL #1]\n"
+ ".inst 0xe0550300 // ld1h { za0h.h[x12] }, p0/Z, [x24, x21, LSL #1]\n"
".inst 0x25386140 // psel p0.h, p8.h/Z, p10.h[w12, #1]\n"
- "ldr x25, [x26, #0x0]\n"
- ".inst 0xe05602e1 // ld1h { za0h.h[x12, #1] }, p0/Z, [x23, x22, LSL #1]\n"
+ "ldr x24, [x25, #0x0]\n"
+ ".inst 0xe05502c1 // ld1h { za0h.h[x12, #1] }, p0/Z, [x22, x21, LSL #1]\n"
"add x12, x12, #0x2\n"
- "cmp x12, x28\n"
- "ldr x23, [x26, #0x8]\n"
- "add x26, x26, #0x10\n"
+ "cmp x12, x27\n"
+ "ldr x22, [x25, #0x8]\n"
+ "add x25, x25, #0x10\n"
"blt 1b\n"
"2:" // K loop: Charge: End
".inst 0x25286140 // psel p0.h, p8.h/Z, p10.h[w12]\n"
- ".inst 0xe0560320 // ld1h { za0h.h[x12] }, p0/Z, [x25, x22, LSL #1]\n"
+ ".inst 0xe0550300 // ld1h { za0h.h[x12] }, p0/Z, [x24, x21, LSL #1]\n"
".inst 0x25386140 // psel p0.h, p8.h/Z, p10.h[w12, #1]\n"
- "mov x26, %x[in]\n"
- ".inst 0xe05602e1 // ld1h { za0h.h[x12, #1] }, p0/Z, [x23, x22, LSL #1]\n"
- "ldr x25, [x26, #0x0]\n"
- "inch x22\n"
- "ldr x23, [x26, #0x8]\n"
- "add x26, x26, #0x10\n"
- "inch x27\n"
- "cbz x9, 8f\n"
- "mov x20, x9\n"
+ "mov x25, %x[in]\n"
+ ".inst 0xe05502c1 // ld1h { za0h.h[x12, #1] }, p0/Z, [x22, x21, LSL #1]\n"
+ "ldr x24, [x25, #0x0]\n"
+ "inch x21\n"
+ "ldr x22, [x25, #0x8]\n"
+ "add x25, x25, #0x10\n"
+ "inch x26\n"
+ "cbz x28, 8f\n"
+ "mov x19, x28\n"
"3:" // K loop: Main loop
- "whilelt p8.h, x27, %x[width]\n"
+ "whilelt p8.h, x26, %x[width]\n"
"mov x12, #0x0\n"
- "cbz x28, 5f\n"
+ "cbz x27, 5f\n"
"4:" // K loop: Main loop: First: Loop
".inst 0x25286140 // psel p0.h, p8.h/Z, p10.h[w12]\n"
- ".inst 0xe0560328 // ld1h { za1h.h[x12] }, p0/Z, [x25, x22, LSL #1]\n"
+ ".inst 0xe0550308 // ld1h { za1h.h[x12] }, p0/Z, [x24, x21, LSL #1]\n"
".inst 0x25386141 // psel p1.h, p8.h/Z, p10.h[w12, #1]\n"
- "ldr x25, [x26, #0x0]\n"
+ "ldr x24, [x25, #0x0]\n"
".inst 0x25286d20 // psel p0.h, p11.h/Z, p9.h[w12]\n"
- ".inst 0xe05606e9 // ld1h { za1h.h[x12, #1] }, p1/Z, [x23, x22, LSL #1]\n"
- "ldr x23, [x26, #0x8]\n"
- ".inst 0xe07f82a0 // st1h { za0v.h[x12] }, p0/Z, [x21, XZR, LSL #1]\n"
+ ".inst 0xe05506c9 // ld1h { za1h.h[x12, #1] }, p1/Z, [x22, x21, LSL #1]\n"
+ "ldr x22, [x25, #0x8]\n"
+ ".inst 0xe07f8280 // st1h { za0v.h[x12] }, p0/Z, [x20, XZR, LSL #1]\n"
".inst 0x25386d20 // psel p0.h, p11.h/Z, p9.h[w12, #1]\n"
- ".inst 0xe06b82a1 // st1h { za0v.h[x12, #1] }, p0/Z, [x21, x11, LSL #1]\n"
+ ".inst 0xe06a8281 // st1h { za0v.h[x12, #1] }, p0/Z, [x20, x10, LSL #1]\n"
"add x12, x12, #0x2\n"
- "cmp x12, x28\n"
- "add x26, x26, #0x10\n"
- "addvl x21, x21, #2\n"
+ "cmp x12, x27\n"
+ "add x25, x25, #0x10\n"
+ "addvl x20, x20, #2\n"
"blt 4b\n"
"5:" // K loop: Main loop: First: Tail
".inst 0x25286140 // psel p0.h, p8.h/Z, p10.h[w12]\n"
- ".inst 0xe0560328 // ld1h { za1h.h[x12] }, p0/Z, [x25, x22, LSL #1]\n"
- "mov x26, %x[in]\n"
- "ldr x25, [x26, #0x0]\n"
+ ".inst 0xe0550308 // ld1h { za1h.h[x12] }, p0/Z, [x24, x21, LSL #1]\n"
+ "mov x25, %x[in]\n"
+ "ldr x24, [x25, #0x0]\n"
".inst 0x25386141 // psel p1.h, p8.h/Z, p10.h[w12, #1]\n"
".inst 0x25286d20 // psel p0.h, p11.h/Z, p9.h[w12]\n"
- ".inst 0xe05606e9 // ld1h { za1h.h[x12, #1] }, p1/Z, [x23, x22, LSL #1]\n"
- "ldr x23, [x26, #0x8]\n"
- ".inst 0xe07f82a0 // st1h { za0v.h[x12] }, p0/Z, [x21, XZR, LSL #1]\n"
+ ".inst 0xe05506c9 // ld1h { za1h.h[x12, #1] }, p1/Z, [x22, x21, LSL #1]\n"
+ "ldr x22, [x25, #0x8]\n"
+ ".inst 0xe07f8280 // st1h { za0v.h[x12] }, p0/Z, [x20, XZR, LSL #1]\n"
".inst 0x25386d20 // psel p0.h, p11.h/Z, p9.h[w12, #1]\n"
- "whilelt p9.h, x27, %x[width]\n"
- "inch x27\n"
- "add x26, x26, #0x10\n"
- ".inst 0xe06b82a1 // st1h { za0v.h[x12, #1] }, p0/Z, [x21, x11, LSL #1]\n"
- "addvl x21, x21, #2\n"
- "inch x22\n"
- "whilelt p8.h, x27, %x[width]\n"
+ "whilelt p9.h, x26, %x[width]\n"
+ "inch x26\n"
+ "add x25, x25, #0x10\n"
+ ".inst 0xe06a8281 // st1h { za0v.h[x12, #1] }, p0/Z, [x20, x10, LSL #1]\n"
+ "addvl x20, x20, #2\n"
+ "inch x21\n"
+ "whilelt p8.h, x26, %x[width]\n"
"mov x12, #0x0\n"
- "cbz x28, 7f\n"
+ "cbz x27, 7f\n"
"6:" // K loop: Main loop: Second: Loop
".inst 0x25286140 // psel p0.h, p8.h/Z, p10.h[w12]\n"
- ".inst 0xe0560320 // ld1h { za0h.h[x12] }, p0/Z, [x25, x22, LSL #1]\n"
+ ".inst 0xe0550300 // ld1h { za0h.h[x12] }, p0/Z, [x24, x21, LSL #1]\n"
".inst 0x25386141 // psel p1.h, p8.h/Z, p10.h[w12, #1]\n"
- "ldr x25, [x26, #0x0]\n"
+ "ldr x24, [x25, #0x0]\n"
".inst 0x25286d20 // psel p0.h, p11.h/Z, p9.h[w12]\n"
- ".inst 0xe05606e1 // ld1h { za0h.h[x12, #1] }, p1/Z, [x23, x22, LSL #1]\n"
- "ldr x23, [x26, #0x8]\n"
- ".inst 0xe07f82a8 // st1h { za1v.h[x12] }, p0/Z, [x21, XZR, LSL #1]\n"
+ ".inst 0xe05506c1 // ld1h { za0h.h[x12, #1] }, p1/Z, [x22, x21, LSL #1]\n"
+ "ldr x22, [x25, #0x8]\n"
+ ".inst 0xe07f8288 // st1h { za1v.h[x12] }, p0/Z, [x20, XZR, LSL #1]\n"
".inst 0x25386d20 // psel p0.h, p11.h/Z, p9.h[w12, #1]\n"
- ".inst 0xe06b82a9 // st1h { za1v.h[x12, #1] }, p0/Z, [x21, x11, LSL #1]\n"
+ ".inst 0xe06a8289 // st1h { za1v.h[x12, #1] }, p0/Z, [x20, x10, LSL #1]\n"
"add x12, x12, #0x2\n"
- "cmp x12, x28\n"
- "add x26, x26, #0x10\n"
- "addvl x21, x21, #2\n"
+ "cmp x12, x27\n"
+ "add x25, x25, #0x10\n"
+ "addvl x20, x20, #2\n"
"blt 6b\n"
"7:" // K loop: Main loop: Second: Tail
".inst 0x25286140 // psel p0.h, p8.h/Z, p10.h[w12]\n"
- ".inst 0xe0560320 // ld1h { za0h.h[x12] }, p0/Z, [x25, x22, LSL #1]\n"
- "mov x26, %x[in]\n"
- "ldr x25, [x26, #0x0]\n"
+ ".inst 0xe0550300 // ld1h { za0h.h[x12] }, p0/Z, [x24, x21, LSL #1]\n"
+ "mov x25, %x[in]\n"
+ "ldr x24, [x25, #0x0]\n"
".inst 0x25386141 // psel p1.h, p8.h/Z, p10.h[w12, #1]\n"
".inst 0x25286d20 // psel p0.h, p11.h/Z, p9.h[w12]\n"
- ".inst 0xe05606e1 // ld1h { za0h.h[x12, #1] }, p1/Z, [x23, x22, LSL #1]\n"
- "ldr x23, [x26, #0x8]\n"
- ".inst 0xe07f82a8 // st1h { za1v.h[x12] }, p0/Z, [x21, XZR, LSL #1]\n"
+ ".inst 0xe05506c1 // ld1h { za0h.h[x12, #1] }, p1/Z, [x22, x21, LSL #1]\n"
+ "ldr x22, [x25, #0x8]\n"
+ ".inst 0xe07f8288 // st1h { za1v.h[x12] }, p0/Z, [x20, XZR, LSL #1]\n"
".inst 0x25386d20 // psel p0.h, p11.h/Z, p9.h[w12, #1]\n"
- "whilelt p9.h, x27, %x[width]\n"
- "subs x20, x20, #0x1\n"
- "add x26, x26, #0x10\n"
- ".inst 0xe06b82a9 // st1h { za1v.h[x12, #1] }, p0/Z, [x21, x11, LSL #1]\n"
- "addvl x21, x21, #2\n"
- "inch x27\n"
- "inch x22\n"
+ "whilelt p9.h, x26, %x[width]\n"
+ "subs x19, x19, #0x1\n"
+ "add x25, x25, #0x10\n"
+ ".inst 0xe06a8289 // st1h { za1v.h[x12, #1] }, p0/Z, [x20, x10, LSL #1]\n"
+ "addvl x20, x20, #2\n"
+ "inch x26\n"
+ "inch x21\n"
"bgt 3b\n"
"8:" // K loop: Tails
- "cbnz x24, 11f\n"
- "mov x26, %x[in]\n"
- "whilelt p8.h, x27, %x[width]\n"
+ "cbnz x23, 11f\n"
+ "mov x25, %x[in]\n"
+ "whilelt p8.h, x26, %x[width]\n"
"mov x12, #0x0\n"
"9:" // K loop: Tails: Even: First
".inst 0x25286d20 // psel p0.h, p11.h/Z, p9.h[w12]\n"
- ".inst 0xe07f82a0 // st1h { za0v.h[x12] }, p0/Z, [x21, XZR, LSL #1]\n"
- "ldr x25, [x26, #0x0]\n"
+ ".inst 0xe07f8280 // st1h { za0v.h[x12] }, p0/Z, [x20, XZR, LSL #1]\n"
+ "ldr x24, [x25, #0x0]\n"
".inst 0x25286140 // psel p0.h, p8.h/Z, p10.h[w12]\n"
- ".inst 0xe0560328 // ld1h { za1h.h[x12] }, p0/Z, [x25, x22, LSL #1]\n"
+ ".inst 0xe0550308 // ld1h { za1h.h[x12] }, p0/Z, [x24, x21, LSL #1]\n"
"add x12, x12, #0x1\n"
- "cmp x12, x11\n"
- "add x26, x26, #0x8\n"
- "addvl x21, x21, #1\n"
+ "cmp x12, x10\n"
+ "add x25, x25, #0x8\n"
+ "addvl x20, x20, #1\n"
"blt 9b\n"
- "whilelt p9.h, x27, %x[width]\n"
- "whilelt p8.h, x27, %x[width]\n"
+ "whilelt p9.h, x26, %x[width]\n"
+ "whilelt p8.h, x26, %x[width]\n"
"mov x12, #0x0\n"
"10:" // K loop: Tails: Even: Second
".inst 0x25286d20 // psel p0.h, p11.h/Z, p9.h[w12]\n"
- ".inst 0xe07f82a8 // st1h { za1v.h[x12] }, p0/Z, [x21, XZR, LSL #1]\n"
+ ".inst 0xe07f8288 // st1h { za1v.h[x12] }, p0/Z, [x20, XZR, LSL #1]\n"
"add x12, x12, #0x1\n"
- "cmp x12, x10\n"
- "addvl x21, x21, #1\n"
+ "cmp x12, x9\n"
+ "addvl x20, x20, #1\n"
"blt 10b\n"
- "whilelt p9.h, x27, %x[width]\n"
+ "whilelt p9.h, x26, %x[width]\n"
"b 13f\n"
"11:" // K loop: Tails: Odd
"mov x12, #0x0\n"
"12:" // K loop: Tails: Odd: Loop
".inst 0x25286d20 // psel p0.h, p11.h/Z, p9.h[w12]\n"
- ".inst 0xe07f82a0 // st1h { za0v.h[x12] }, p0/Z, [x21, XZR, LSL #1]\n"
+ ".inst 0xe07f8280 // st1h { za0v.h[x12] }, p0/Z, [x20, XZR, LSL #1]\n"
"add x12, x12, #0x1\n"
- "cmp x12, x10\n"
- "addvl x21, x21, #1\n"
+ "cmp x12, x9\n"
+ "addvl x20, x20, #1\n"
"blt 12b\n"
"13:" // K loop: End
- "mov %x[out], x21\n"
+ "mov %x[out], x20\n"
".inst 0xd503467f // SMSTOP\n"
: [out] "+&r" (out)
: [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x12", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_block2_bf16_bf16.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_block2_bf16_bf16.hpp
index d0375de76f..81b346c9ba 100644
--- a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_block2_bf16_bf16.hpp
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_block2_bf16_bf16.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#if defined(__ARM_FEATURE_SVE)
@@ -34,186 +34,186 @@ void interleave_block<1, 2, VLType::SME, false>(
__asm__ __volatile__(
".inst 0xd503477f // SMSTART ZA\n"
- "cnth x22\n"
- "mov x21, %x[width]\n"
- "inch x21\n"
- "mov x20, %x[width]\n"
- "sub x11, x22, #0x1\n"
- "sub x21, x21, #0x1\n"
- "ands x11, x20, x11\n"
+ "cnth x20\n"
"cntw x10\n"
- "udiv x21, x21, x22\n" // n_passes = ceildiv(width, VL<T>)
- "csel x11, x11, x22, NE\n"
- "sub x9, x21, #0x1\n"
- "add x11, x11, #0x1\n"
- "sub x28, x10, #0x2\n"
- "lsl x20, %x[height], #0x1\n" // height * 2
- "mov x27, #0x0\n"
- "mov x26, %x[in]\n"
- "ldr x25, [x26, #0x0]\n"
- "lsr x9, x9, #0x1\n" // n_loops = (n_passes - 1) / 2
- "and x24, x21, #0x1\n" // odd_tail = bool(n_passes & 0x1)
- "ldr x23, [x26, #0x8]\n"
- "lsr x11, x11, #0x1\n"
+ "mov x19, %x[width]\n"
+ "inch x19\n"
+ "sub x19, x19, #0x1\n"
+ "udiv x19, x19, x20\n" // n_passes = ceildiv(width, VL<T>)
+ "sub x9, x19, #0x1\n"
+ "lsr x9, x9, #0x1\n" // n_loops = (n_passes - 1) / 2
+ "and x28, x19, #0x1\n" // odd_tail = bool(n_passes & 0x1)
+ "mov x19, %x[width]\n"
+ "sub x27, x20, #0x1\n"
+ "ands x27, x19, x27\n"
+ "csel x27, x27, x20, NE\n"
+ "add x27, x27, #0x1\n"
+ "lsr x27, x27, #0x1\n"
+ "sub x26, x10, #0x2\n"
"ptrue p11.s\n"
- "whilelt p10.h, XZR, x20\n"
- "mov x22, %x[row_offset]\n"
- "mov x21, %x[out]\n"
- "whilelt p9.h, x27, %x[width]\n"
- "whilelt p8.h, x27, %x[width]\n"
- "add x26, x26, #0x10\n"
+ "lsl x19, %x[height], #0x1\n" // height * 2
+ "whilelt p10.h, XZR, x19\n"
+ "mov x25, %x[row_offset]\n"
+ "mov x24, %x[out]\n"
+ "mov x23, #0x0\n"
+ "whilelt p9.h, x23, %x[width]\n"
+ "whilelt p8.h, x23, %x[width]\n"
+ "mov x22, %x[in]\n"
+ "ldr x21, [x22, #0x0]\n"
+ "ldr x20, [x22, #0x8]\n"
+ "add x22, x22, #0x10\n"
"mov x12, #0x0\n"
- "cbz x28, 2f\n"
+ "cbz x26, 2f\n"
"1:" // K loop: Charge: Loop
- ".inst 0x25286140 // psel p0.h, p8.h/Z, p10.h[w12]\n"
- ".inst 0xe0560320 // ld1h { za0h.h[x12] }, p0/Z, [x25, x22, LSL #1]\n"
- ".inst 0x25686140 // psel p0.h, p8.h/Z, p10.h[w12, #2]\n"
- "ldr x25, [x26, #0x0]\n"
- ".inst 0xe05602e2 // ld1h { za0h.h[x12, #2] }, p0/Z, [x23, x22, LSL #1]\n"
+ ".inst 0x25286140 // dup p0.h, p8.h/Z, p10.h[w12]\n"
+ ".inst 0xe05902a0 // ld1h { za0h.h[x12] }, p0/Z, [x21, x25, LSL #1]\n"
+ ".inst 0x25686140 // dup p0.h, p8.h/Z, p10.h[w12, #2]\n"
+ "ldr x21, [x22, #0x0]\n"
+ ".inst 0xe0590282 // ld1h { za0h.h[x12, #2] }, p0/Z, [x20, x25, LSL #1]\n"
+ "ldr x20, [x22, #0x8]\n"
+ "add x22, x22, #0x10\n"
"add x12, x12, #0x4\n"
- "cmp x12, x28, LSL #1\n"
- "ldr x23, [x26, #0x8]\n"
- "add x26, x26, #0x10\n"
+ "cmp x12, x26, LSL #1\n"
"blt 1b\n"
"2:" // K loop: Charge: End
- ".inst 0x25286140 // psel p0.h, p8.h/Z, p10.h[w12]\n"
- ".inst 0xe0560320 // ld1h { za0h.h[x12] }, p0/Z, [x25, x22, LSL #1]\n"
- ".inst 0x25686140 // psel p0.h, p8.h/Z, p10.h[w12, #2]\n"
- "mov x26, %x[in]\n"
- ".inst 0xe05602e2 // ld1h { za0h.h[x12, #2] }, p0/Z, [x23, x22, LSL #1]\n"
- "ldr x25, [x26, #0x0]\n"
- "inch x22\n"
- "ldr x23, [x26, #0x8]\n"
- "add x26, x26, #0x10\n"
- "inch x27\n"
+ ".inst 0x25286140 // dup p0.h, p8.h/Z, p10.h[w12]\n"
+ ".inst 0xe05902a0 // ld1h { za0h.h[x12] }, p0/Z, [x21, x25, LSL #1]\n"
+ ".inst 0x25686140 // dup p0.h, p8.h/Z, p10.h[w12, #2]\n"
+ "mov x22, %x[in]\n"
+ ".inst 0xe0590282 // ld1h { za0h.h[x12, #2] }, p0/Z, [x20, x25, LSL #1]\n"
+ "ldr x21, [x22, #0x0]\n"
+ "ldr x20, [x22, #0x8]\n"
+ "add x22, x22, #0x10\n"
+ "inch x25\n"
+ "inch x23\n"
"cbz x9, 8f\n"
- "mov x20, x9\n"
+ "mov x19, x9\n"
"3:" // K loop: Main loop
- "whilelt p8.h, x27, %x[width]\n"
+ "whilelt p8.h, x23, %x[width]\n"
"mov x13, #0x0\n"
"mov x12, #0x0\n"
- "cbz x28, 5f\n"
+ "cbz x26, 5f\n"
"4:" // K loop: Main loop: First: Loop
- ".inst 0x25396140 // psel p0.h, p8.h/Z, p10.h[w13, #1]\n"
- ".inst 0xe0562321 // ld1h { za0h.h[x13, #1] }, p0/Z, [x25, x22, LSL #1]\n"
- ".inst 0x25796141 // psel p1.h, p8.h/Z, p10.h[w13, #3]\n"
- "ldr x25, [x26, #0x0]\n"
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe05626e3 // ld1h { za0h.h[x13, #3] }, p1/Z, [x23, x22, LSL #1]\n"
- "ldr x23, [x26, #0x8]\n"
- ".inst 0xe0bf82a0 // st1w { za0v.s[x12] }, p0/Z, [x21, XZR, LSL #2]\n"
- ".inst 0x25706d20 // psel p0.s, p11.s/Z, p9.s[w12, #1]\n"
- ".inst 0xe0aa82a1 // st1w { za0v.s[x12, #1] }, p0/Z, [x21, x10, LSL #2]\n"
- "add x12, x12, #0x2\n"
- "cmp x12, x28\n"
- "add x26, x26, #0x10\n"
- "addvl x21, x21, #2\n"
+ ".inst 0x25396140 // dup p0.h, p8.h/Z, p10.h[w13, #1]\n"
+ ".inst 0xe05922a1 // ld1h { za0h.h[x13, #1] }, p0/Z, [x21, x25, LSL #1]\n"
+ ".inst 0x25796140 // dup p0.h, p8.h/Z, p10.h[w13, #3]\n"
+ "ldr x21, [x22, #0x0]\n"
+ ".inst 0xe0592283 // ld1h { za0h.h[x13, #3] }, p0/Z, [x20, x25, LSL #1]\n"
+ "ldr x20, [x22, #0x8]\n"
+ "add x22, x22, #0x10\n"
+ ".inst 0x25306d20 // dup p0.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0xe0bf8300 // st1w { za0v.s[x12] }, p0/Z, [x24, XZR, LSL #2]\n"
+ ".inst 0x25706d20 // dup p0.s, p11.s/Z, p9.s[w12, #1]\n"
"add x13, x13, #0x4\n"
+ ".inst 0xe0aa8301 // st1w { za0v.s[x12, #1] }, p0/Z, [x24, x10, LSL #2]\n"
+ "addvl x24, x24, #2\n"
+ "add x12, x12, #0x2\n"
+ "cmp x12, x26\n"
"blt 4b\n"
"5:" // K loop: Main loop: First: Tail
- ".inst 0x25396140 // psel p0.h, p8.h/Z, p10.h[w13, #1]\n"
- ".inst 0xe0562321 // ld1h { za0h.h[x13, #1] }, p0/Z, [x25, x22, LSL #1]\n"
- "mov x26, %x[in]\n"
- "ldr x25, [x26, #0x0]\n"
- ".inst 0x25796141 // psel p1.h, p8.h/Z, p10.h[w13, #3]\n"
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe05626e3 // ld1h { za0h.h[x13, #3] }, p1/Z, [x23, x22, LSL #1]\n"
- "ldr x23, [x26, #0x8]\n"
- ".inst 0xe0bf82a0 // st1w { za0v.s[x12] }, p0/Z, [x21, XZR, LSL #2]\n"
- ".inst 0x25706d20 // psel p0.s, p11.s/Z, p9.s[w12, #1]\n"
- "whilelt p9.h, x27, %x[width]\n"
- "inch x27\n"
- "add x26, x26, #0x10\n"
- ".inst 0xe0aa82a1 // st1w { za0v.s[x12, #1] }, p0/Z, [x21, x10, LSL #2]\n"
- "addvl x21, x21, #2\n"
- "inch x22\n"
- "whilelt p8.h, x27, %x[width]\n"
+ "mov x22, %x[in]\n"
+ ".inst 0x25396140 // dup p0.h, p8.h/Z, p10.h[w13, #1]\n"
+ ".inst 0xe05922a1 // ld1h { za0h.h[x13, #1] }, p0/Z, [x21, x25, LSL #1]\n"
+ ".inst 0x25796140 // dup p0.h, p8.h/Z, p10.h[w13, #3]\n"
+ "ldr x21, [x22, #0x0]\n"
+ ".inst 0xe0592283 // ld1h { za0h.h[x13, #3] }, p0/Z, [x20, x25, LSL #1]\n"
+ "ldr x20, [x22, #0x8]\n"
+ "add x22, x22, #0x10\n"
+ ".inst 0x25306d20 // dup p0.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0xe0bf8300 // st1w { za0v.s[x12] }, p0/Z, [x24, XZR, LSL #2]\n"
+ ".inst 0x25706d20 // dup p0.s, p11.s/Z, p9.s[w12, #1]\n"
+ "whilelt p9.h, x23, %x[width]\n"
+ ".inst 0xe0aa8301 // st1w { za0v.s[x12, #1] }, p0/Z, [x24, x10, LSL #2]\n"
+ "addvl x24, x24, #2\n"
+ "inch x23\n"
+ "inch x25\n"
+ "whilelt p8.h, x23, %x[width]\n"
"mov x13, #0x0\n"
"mov x12, #0x0\n"
- "cbz x28, 7f\n"
+ "cbz x26, 7f\n"
"6:" // K loop: Main loop: Second: Loop
- ".inst 0x25296140 // psel p0.h, p8.h/Z, p10.h[w13]\n"
- ".inst 0xe0562320 // ld1h { za0h.h[x13] }, p0/Z, [x25, x22, LSL #1]\n"
- ".inst 0x25696141 // psel p1.h, p8.h/Z, p10.h[w13, #2]\n"
- "ldr x25, [x26, #0x0]\n"
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe05626e2 // ld1h { za0h.h[x13, #2] }, p1/Z, [x23, x22, LSL #1]\n"
- "ldr x23, [x26, #0x8]\n"
- ".inst 0xe0bf82a8 // st1w { za2v.s[x12] }, p0/Z, [x21, XZR, LSL #2]\n"
- ".inst 0x25706d20 // psel p0.s, p11.s/Z, p9.s[w12, #1]\n"
- ".inst 0xe0aa82a9 // st1w { za2v.s[x12, #1] }, p0/Z, [x21, x10, LSL #2]\n"
- "add x12, x12, #0x2\n"
- "cmp x12, x28\n"
- "add x26, x26, #0x10\n"
- "addvl x21, x21, #2\n"
+ ".inst 0x25296140 // dup p0.h, p8.h/Z, p10.h[w13]\n"
+ ".inst 0xe05922a0 // ld1h { za0h.h[x13] }, p0/Z, [x21, x25, LSL #1]\n"
+ ".inst 0x25696140 // dup p0.h, p8.h/Z, p10.h[w13, #2]\n"
+ "ldr x21, [x22, #0x0]\n"
+ ".inst 0xe0592282 // ld1h { za0h.h[x13, #2] }, p0/Z, [x20, x25, LSL #1]\n"
+ "ldr x20, [x22, #0x8]\n"
+ "add x22, x22, #0x10\n"
+ ".inst 0x25306d20 // dup p0.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0xe0bf8308 // st1w { za2v.s[x12] }, p0/Z, [x24, XZR, LSL #2]\n"
+ ".inst 0x25706d20 // dup p0.s, p11.s/Z, p9.s[w12, #1]\n"
"add x13, x13, #0x4\n"
+ ".inst 0xe0aa8309 // st1w { za2v.s[x12, #1] }, p0/Z, [x24, x10, LSL #2]\n"
+ "addvl x24, x24, #2\n"
+ "add x12, x12, #0x2\n"
+ "cmp x12, x26\n"
"blt 6b\n"
"7:" // K loop: Main loop: Second: Tail
- ".inst 0x25296140 // psel p0.h, p8.h/Z, p10.h[w13]\n"
- ".inst 0xe0562320 // ld1h { za0h.h[x13] }, p0/Z, [x25, x22, LSL #1]\n"
- "mov x26, %x[in]\n"
- "ldr x25, [x26, #0x0]\n"
- ".inst 0x25696141 // psel p1.h, p8.h/Z, p10.h[w13, #2]\n"
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe05626e2 // ld1h { za0h.h[x13, #2] }, p1/Z, [x23, x22, LSL #1]\n"
- "ldr x23, [x26, #0x8]\n"
- ".inst 0xe0bf82a8 // st1w { za2v.s[x12] }, p0/Z, [x21, XZR, LSL #2]\n"
- ".inst 0x25706d20 // psel p0.s, p11.s/Z, p9.s[w12, #1]\n"
- "whilelt p9.h, x27, %x[width]\n"
- "subs x20, x20, #0x1\n"
- "add x26, x26, #0x10\n"
- ".inst 0xe0aa82a9 // st1w { za2v.s[x12, #1] }, p0/Z, [x21, x10, LSL #2]\n"
- "addvl x21, x21, #2\n"
- "inch x27\n"
- "inch x22\n"
+ "mov x22, %x[in]\n"
+ ".inst 0x25296140 // dup p0.h, p8.h/Z, p10.h[w13]\n"
+ ".inst 0xe05922a0 // ld1h { za0h.h[x13] }, p0/Z, [x21, x25, LSL #1]\n"
+ ".inst 0x25696140 // dup p0.h, p8.h/Z, p10.h[w13, #2]\n"
+ "ldr x21, [x22, #0x0]\n"
+ ".inst 0xe0592282 // ld1h { za0h.h[x13, #2] }, p0/Z, [x20, x25, LSL #1]\n"
+ "ldr x20, [x22, #0x8]\n"
+ "add x22, x22, #0x10\n"
+ ".inst 0x25306d20 // dup p0.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0xe0bf8308 // st1w { za2v.s[x12] }, p0/Z, [x24, XZR, LSL #2]\n"
+ ".inst 0x25706d20 // dup p0.s, p11.s/Z, p9.s[w12, #1]\n"
+ "whilelt p9.h, x23, %x[width]\n"
+ ".inst 0xe0aa8309 // st1w { za2v.s[x12, #1] }, p0/Z, [x24, x10, LSL #2]\n"
+ "addvl x24, x24, #2\n"
+ "inch x23\n"
+ "inch x25\n"
+ "subs x19, x19, #0x1\n"
"bgt 3b\n"
"8:" // K loop: Tails
- "cbnz x24, 11f\n"
- "mov x26, %x[in]\n"
- "whilelt p8.h, x27, %x[width]\n"
+ "cbnz x28, 11f\n"
+ "mov x22, %x[in]\n"
+ "whilelt p8.h, x23, %x[width]\n"
"mov x13, #0x0\n"
"mov x12, #0x0\n"
"9:" // K loop: Tails: Even: First
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe0bf82a0 // st1w { za0v.s[x12] }, p0/Z, [x21, XZR, LSL #2]\n"
- "ldr x25, [x26, #0x0]\n"
+ ".inst 0x25306d20 // dup p0.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0xe0bf8300 // st1w { za0v.s[x12] }, p0/Z, [x24, XZR, LSL #2]\n"
+ ".inst 0x25396140 // dup p0.h, p8.h/Z, p10.h[w13, #1]\n"
+ "addvl x24, x24, #1\n"
+ "ldr x21, [x22, #0x0]\n"
+ ".inst 0xe05922a1 // ld1h { za0h.h[x13, #1] }, p0/Z, [x21, x25, LSL #1]\n"
+ "add x22, x22, #0x8\n"
+ "add x13, x13, #0x2\n"
"add x12, x12, #0x1\n"
- ".inst 0x25396140 // psel p0.h, p8.h/Z, p10.h[w13, #1]\n"
"cmp x12, x10\n"
- ".inst 0xe0562321 // ld1h { za0h.h[x13, #1] }, p0/Z, [x25, x22, LSL #1]\n"
- "add x26, x26, #0x8\n"
- "addvl x21, x21, #1\n"
- "add x13, x13, #0x2\n"
"blt 9b\n"
- "whilelt p9.h, x27, %x[width]\n"
- "whilelt p8.h, x27, %x[width]\n"
- "mov x20, #0x0\n"
+ "whilelt p9.h, x23, %x[width]\n"
+ "whilelt p8.h, x23, %x[width]\n"
+ "mov x19, #0x0\n"
"mov x12, #0x0\n"
"10:" // K loop: Tails: Even: Second
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe0bf82a8 // st1w { za2v.s[x12] }, p0/Z, [x21, XZR, LSL #2]\n"
+ ".inst 0x25306d20 // dup p0.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0xe0bf8308 // st1w { za2v.s[x12] }, p0/Z, [x24, XZR, LSL #2]\n"
+ "addvl x24, x24, #1\n"
+ "add x19, x19, #0x2\n"
"add x12, x12, #0x1\n"
- "cmp x12, x11\n"
- "addvl x21, x21, #1\n"
- "add x20, x20, #0x2\n"
+ "cmp x12, x27\n"
"blt 10b\n"
- "whilelt p9.h, x27, %x[width]\n"
+ "whilelt p9.h, x23, %x[width]\n"
"b 13f\n"
"11:" // K loop: Tails: Odd
"mov x12, #0x0\n"
"12:" // K loop: Tails: Odd: Loop
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe0bf82a0 // st1w { za0v.s[x12] }, p0/Z, [x21, XZR, LSL #2]\n"
+ ".inst 0x25306d20 // dup p0.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0xe0bf8300 // st1w { za0v.s[x12] }, p0/Z, [x24, XZR, LSL #2]\n"
+ "addvl x24, x24, #1\n"
"add x12, x12, #0x1\n"
- "cmp x12, x11\n"
- "addvl x21, x21, #1\n"
+ "cmp x12, x27\n"
"blt 12b\n"
"13:" // K loop: End
- "mov %x[out], x21\n"
+ "mov %x[out], x24\n"
".inst 0xd503467f // SMSTOP\n"
: [out] "+&r" (out)
: [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p8", "p9", "p10", "p11", "x9", "x10", "x12", "x13", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_block4_s8_s8.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_block4_s8_s8.hpp
index 622d9aa4fc..bee3cc5649 100644
--- a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_block4_s8_s8.hpp
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_block4_s8_s8.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#if defined(__ARM_FEATURE_SVE)
@@ -34,189 +34,189 @@ void interleave_block<1, 4, VLType::SME, false>(
__asm__ __volatile__(
".inst 0xd503477f // SMSTART ZA\n"
- "cntb x21\n"
- "mov x23, %x[width]\n"
- "incb x23\n"
- "mov x20, %x[width]\n"
- "sub x10, x21, #0x1\n"
- "cntw x9\n"
- "sub x23, x23, #0x1\n"
- "ands x10, x20, x10\n"
- "udiv x23, x23, x21\n" // n_passes = ceildiv(width, VL<T>)
- "csel x10, x10, x21, NE\n"
- "lsl x22, %x[height], #0x1\n" // height * 2
- "lsl x21, x9, #0x1\n"
- "sub x20, x23, #0x1\n"
- "add x10, x10, #0x3\n"
- "sub x28, x9, #0x2\n"
- "whilelt p9.b, XZR, x22\n"
- "whilelt p8.b, x21, x22\n"
- "mov x27, #0x0\n"
- "mov x26, %x[in]\n"
- "lsr x20, x20, #0x1\n" // n_loops = (n_passes - 1) / 2
- "ldr x25, [x26, #0x0]\n"
- "and x24, x23, #0x1\n" // odd_tail = bool(n_passes & 0x1)
- "lsr x10, x10, #0x2\n"
- "ldr x23, [x26, #0x8]\n"
+ "cntb x20\n"
+ "mov x22, %x[width]\n"
+ "incb x22\n"
+ "mov x19, %x[width]\n"
+ "sub x9, x20, #0x1\n"
+ "cntw x28\n"
+ "sub x22, x22, #0x1\n"
+ "ands x9, x19, x9\n"
+ "udiv x22, x22, x20\n" // n_passes = ceildiv(width, VL<T>)
+ "csel x9, x9, x20, NE\n"
+ "lsl x21, %x[height], #0x1\n" // height * 2
+ "lsl x20, x28, #0x1\n"
+ "sub x19, x22, #0x1\n"
+ "add x9, x9, #0x3\n"
+ "sub x27, x28, #0x2\n"
+ "whilelt p9.b, XZR, x21\n"
+ "whilelt p8.b, x20, x21\n"
+ "mov x26, #0x0\n"
+ "mov x25, %x[in]\n"
+ "lsr x19, x19, #0x1\n" // n_loops = (n_passes - 1) / 2
+ "ldr x24, [x25, #0x0]\n"
+ "and x23, x22, #0x1\n" // odd_tail = bool(n_passes & 0x1)
+ "lsr x9, x9, #0x2\n"
+ "ldr x22, [x25, #0x8]\n"
"ptrue p11.s\n"
"zip1 p10.b, p9.b, p8.b\n"
- "mov x22, %x[row_offset]\n"
- "mov x21, %x[out]\n"
- "whilelt p9.b, x27, %x[width]\n"
- "whilelt p8.b, x27, %x[width]\n"
- "add x26, x26, #0x10\n"
+ "mov x21, %x[row_offset]\n"
+ "mov x20, %x[out]\n"
+ "whilelt p9.b, x26, %x[width]\n"
+ "whilelt p8.b, x26, %x[width]\n"
+ "add x25, x25, #0x10\n"
"mov x12, #0x0\n"
- "cbz x28, 2f\n"
+ "cbz x27, 2f\n"
"1:" // K loop: Charge: Loop
".inst 0x25246140 // psel p0.b, p8.b/Z, p10.b[w12]\n"
- ".inst 0xe0160320 // ld1b { za0h.b[x12] }, p0/Z, [x25, x22]\n"
+ ".inst 0xe0150300 // ld1b { za0h.b[x12] }, p0/Z, [x24, x21]\n"
".inst 0x25646140 // psel p0.b, p8.b/Z, p10.b[w12, #4]\n"
- "ldr x25, [x26, #0x0]\n"
- ".inst 0xe01602e4 // ld1b { za0h.b[x12, #4] }, p0/Z, [x23, x22]\n"
+ "ldr x24, [x25, #0x0]\n"
+ ".inst 0xe01502c4 // ld1b { za0h.b[x12, #4] }, p0/Z, [x22, x21]\n"
"add x12, x12, #0x8\n"
- "cmp x12, x28, LSL #2\n"
- "ldr x23, [x26, #0x8]\n"
- "add x26, x26, #0x10\n"
+ "cmp x12, x27, LSL #2\n"
+ "ldr x22, [x25, #0x8]\n"
+ "add x25, x25, #0x10\n"
"blt 1b\n"
"2:" // K loop: Charge: End
".inst 0x25246140 // psel p0.b, p8.b/Z, p10.b[w12]\n"
- ".inst 0xe0160320 // ld1b { za0h.b[x12] }, p0/Z, [x25, x22]\n"
+ ".inst 0xe0150300 // ld1b { za0h.b[x12] }, p0/Z, [x24, x21]\n"
".inst 0x25646140 // psel p0.b, p8.b/Z, p10.b[w12, #4]\n"
- "mov x26, %x[in]\n"
- ".inst 0xe01602e4 // ld1b { za0h.b[x12, #4] }, p0/Z, [x23, x22]\n"
- "ldr x25, [x26, #0x0]\n"
- "incb x22\n"
- "ldr x23, [x26, #0x8]\n"
- "add x26, x26, #0x10\n"
- "incb x27\n"
- "cbz x20, 8f\n"
- "mov x20, x20\n"
+ "mov x25, %x[in]\n"
+ ".inst 0xe01502c4 // ld1b { za0h.b[x12, #4] }, p0/Z, [x22, x21]\n"
+ "ldr x24, [x25, #0x0]\n"
+ "incb x21\n"
+ "ldr x22, [x25, #0x8]\n"
+ "add x25, x25, #0x10\n"
+ "incb x26\n"
+ "cbz x19, 8f\n"
+ "mov x19, x19\n"
"3:" // K loop: Main loop
- "whilelt p8.b, x27, %x[width]\n"
+ "whilelt p8.b, x26, %x[width]\n"
"mov x13, #0x0\n"
"mov x12, #0x0\n"
- "cbz x28, 5f\n"
+ "cbz x27, 5f\n"
"4:" // K loop: Main loop: First: Loop
".inst 0x25356140 // psel p0.b, p8.b/Z, p10.b[w13, #2]\n"
- ".inst 0xe0162322 // ld1b { za0h.b[x13, #2] }, p0/Z, [x25, x22]\n"
+ ".inst 0xe0152302 // ld1b { za0h.b[x13, #2] }, p0/Z, [x24, x21]\n"
".inst 0x25756141 // psel p1.b, p8.b/Z, p10.b[w13, #6]\n"
- "ldr x25, [x26, #0x0]\n"
+ "ldr x24, [x25, #0x0]\n"
".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe01626e6 // ld1b { za0h.b[x13, #6] }, p1/Z, [x23, x22]\n"
- "ldr x23, [x26, #0x8]\n"
- ".inst 0xe0bf82a0 // st1w { za0v.s[x12] }, p0/Z, [x21, XZR, LSL #2]\n"
+ ".inst 0xe01526c6 // ld1b { za0h.b[x13, #6] }, p1/Z, [x22, x21]\n"
+ "ldr x22, [x25, #0x8]\n"
+ ".inst 0xe0bf8280 // st1w { za0v.s[x12] }, p0/Z, [x20, XZR, LSL #2]\n"
".inst 0x25706d20 // psel p0.s, p11.s/Z, p9.s[w12, #1]\n"
- ".inst 0xe0a982a1 // st1w { za0v.s[x12, #1] }, p0/Z, [x21, x9, LSL #2]\n"
+ ".inst 0xe0bc8281 // st1w { za0v.s[x12, #1] }, p0/Z, [x20, x28, LSL #2]\n"
"add x12, x12, #0x2\n"
- "cmp x12, x28\n"
- "add x26, x26, #0x10\n"
- "addvl x21, x21, #2\n"
+ "cmp x12, x27\n"
+ "add x25, x25, #0x10\n"
+ "addvl x20, x20, #2\n"
"add x13, x13, #0x8\n"
"blt 4b\n"
"5:" // K loop: Main loop: First: Tail
".inst 0x25356140 // psel p0.b, p8.b/Z, p10.b[w13, #2]\n"
- ".inst 0xe0162322 // ld1b { za0h.b[x13, #2] }, p0/Z, [x25, x22]\n"
- "mov x26, %x[in]\n"
- "ldr x25, [x26, #0x0]\n"
+ ".inst 0xe0152302 // ld1b { za0h.b[x13, #2] }, p0/Z, [x24, x21]\n"
+ "mov x25, %x[in]\n"
+ "ldr x24, [x25, #0x0]\n"
".inst 0x25756141 // psel p1.b, p8.b/Z, p10.b[w13, #6]\n"
".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe01626e6 // ld1b { za0h.b[x13, #6] }, p1/Z, [x23, x22]\n"
- "ldr x23, [x26, #0x8]\n"
- ".inst 0xe0bf82a0 // st1w { za0v.s[x12] }, p0/Z, [x21, XZR, LSL #2]\n"
+ ".inst 0xe01526c6 // ld1b { za0h.b[x13, #6] }, p1/Z, [x22, x21]\n"
+ "ldr x22, [x25, #0x8]\n"
+ ".inst 0xe0bf8280 // st1w { za0v.s[x12] }, p0/Z, [x20, XZR, LSL #2]\n"
".inst 0x25706d20 // psel p0.s, p11.s/Z, p9.s[w12, #1]\n"
- "whilelt p9.b, x27, %x[width]\n"
- "incb x27\n"
- "add x26, x26, #0x10\n"
- ".inst 0xe0a982a1 // st1w { za0v.s[x12, #1] }, p0/Z, [x21, x9, LSL #2]\n"
- "addvl x21, x21, #2\n"
- "incb x22\n"
- "whilelt p8.b, x27, %x[width]\n"
+ "whilelt p9.b, x26, %x[width]\n"
+ "incb x26\n"
+ "add x25, x25, #0x10\n"
+ ".inst 0xe0bc8281 // st1w { za0v.s[x12, #1] }, p0/Z, [x20, x28, LSL #2]\n"
+ "addvl x20, x20, #2\n"
+ "incb x21\n"
+ "whilelt p8.b, x26, %x[width]\n"
"mov x13, #0x0\n"
"mov x12, #0x0\n"
- "cbz x28, 7f\n"
+ "cbz x27, 7f\n"
"6:" // K loop: Main loop: Second: Loop
".inst 0x25256140 // psel p0.b, p8.b/Z, p10.b[w13]\n"
- ".inst 0xe0162320 // ld1b { za0h.b[x13] }, p0/Z, [x25, x22]\n"
+ ".inst 0xe0152300 // ld1b { za0h.b[x13] }, p0/Z, [x24, x21]\n"
".inst 0x25656141 // psel p1.b, p8.b/Z, p10.b[w13, #4]\n"
- "ldr x25, [x26, #0x0]\n"
+ "ldr x24, [x25, #0x0]\n"
".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe01626e4 // ld1b { za0h.b[x13, #4] }, p1/Z, [x23, x22]\n"
- "ldr x23, [x26, #0x8]\n"
- ".inst 0xe0bf82a8 // st1w { za2v.s[x12] }, p0/Z, [x21, XZR, LSL #2]\n"
+ ".inst 0xe01526c4 // ld1b { za0h.b[x13, #4] }, p1/Z, [x22, x21]\n"
+ "ldr x22, [x25, #0x8]\n"
+ ".inst 0xe0bf8288 // st1w { za2v.s[x12] }, p0/Z, [x20, XZR, LSL #2]\n"
".inst 0x25706d20 // psel p0.s, p11.s/Z, p9.s[w12, #1]\n"
- ".inst 0xe0a982a9 // st1w { za2v.s[x12, #1] }, p0/Z, [x21, x9, LSL #2]\n"
+ ".inst 0xe0bc8289 // st1w { za2v.s[x12, #1] }, p0/Z, [x20, x28, LSL #2]\n"
"add x12, x12, #0x2\n"
- "cmp x12, x28\n"
- "add x26, x26, #0x10\n"
- "addvl x21, x21, #2\n"
+ "cmp x12, x27\n"
+ "add x25, x25, #0x10\n"
+ "addvl x20, x20, #2\n"
"add x13, x13, #0x8\n"
"blt 6b\n"
"7:" // K loop: Main loop: Second: Tail
".inst 0x25256140 // psel p0.b, p8.b/Z, p10.b[w13]\n"
- ".inst 0xe0162320 // ld1b { za0h.b[x13] }, p0/Z, [x25, x22]\n"
- "mov x26, %x[in]\n"
- "ldr x25, [x26, #0x0]\n"
+ ".inst 0xe0152300 // ld1b { za0h.b[x13] }, p0/Z, [x24, x21]\n"
+ "mov x25, %x[in]\n"
+ "ldr x24, [x25, #0x0]\n"
".inst 0x25656141 // psel p1.b, p8.b/Z, p10.b[w13, #4]\n"
".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe01626e4 // ld1b { za0h.b[x13, #4] }, p1/Z, [x23, x22]\n"
- "ldr x23, [x26, #0x8]\n"
- ".inst 0xe0bf82a8 // st1w { za2v.s[x12] }, p0/Z, [x21, XZR, LSL #2]\n"
+ ".inst 0xe01526c4 // ld1b { za0h.b[x13, #4] }, p1/Z, [x22, x21]\n"
+ "ldr x22, [x25, #0x8]\n"
+ ".inst 0xe0bf8288 // st1w { za2v.s[x12] }, p0/Z, [x20, XZR, LSL #2]\n"
".inst 0x25706d20 // psel p0.s, p11.s/Z, p9.s[w12, #1]\n"
- "whilelt p9.b, x27, %x[width]\n"
- "subs x20, x20, #0x1\n"
- "add x26, x26, #0x10\n"
- ".inst 0xe0a982a9 // st1w { za2v.s[x12, #1] }, p0/Z, [x21, x9, LSL #2]\n"
- "addvl x21, x21, #2\n"
- "incb x27\n"
- "incb x22\n"
+ "whilelt p9.b, x26, %x[width]\n"
+ "subs x19, x19, #0x1\n"
+ "add x25, x25, #0x10\n"
+ ".inst 0xe0bc8289 // st1w { za2v.s[x12, #1] }, p0/Z, [x20, x28, LSL #2]\n"
+ "addvl x20, x20, #2\n"
+ "incb x26\n"
+ "incb x21\n"
"bgt 3b\n"
"8:" // K loop: Tails
- "cbnz x24, 11f\n"
- "mov x26, %x[in]\n"
- "whilelt p8.b, x27, %x[width]\n"
+ "cbnz x23, 11f\n"
+ "mov x25, %x[in]\n"
+ "whilelt p8.b, x26, %x[width]\n"
"mov x13, #0x0\n"
"mov x12, #0x0\n"
"9:" // K loop: Tails: Even: First
".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe0bf82a0 // st1w { za0v.s[x12] }, p0/Z, [x21, XZR, LSL #2]\n"
- "ldr x25, [x26, #0x0]\n"
+ ".inst 0xe0bf8280 // st1w { za0v.s[x12] }, p0/Z, [x20, XZR, LSL #2]\n"
+ "ldr x24, [x25, #0x0]\n"
"add x12, x12, #0x1\n"
".inst 0x25356140 // psel p0.b, p8.b/Z, p10.b[w13, #2]\n"
- "cmp x12, x9\n"
- ".inst 0xe0162322 // ld1b { za0h.b[x13, #2] }, p0/Z, [x25, x22]\n"
- "add x26, x26, #0x8\n"
- "addvl x21, x21, #1\n"
+ "cmp x12, x28\n"
+ ".inst 0xe0152302 // ld1b { za0h.b[x13, #2] }, p0/Z, [x24, x21]\n"
+ "add x25, x25, #0x8\n"
+ "addvl x20, x20, #1\n"
"add x13, x13, #0x4\n"
"blt 9b\n"
- "whilelt p9.b, x27, %x[width]\n"
- "whilelt p8.b, x27, %x[width]\n"
- "mov x20, #0x0\n"
+ "whilelt p9.b, x26, %x[width]\n"
+ "whilelt p8.b, x26, %x[width]\n"
+ "mov x19, #0x0\n"
"mov x12, #0x0\n"
"10:" // K loop: Tails: Even: Second
".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe0bf82a8 // st1w { za2v.s[x12] }, p0/Z, [x21, XZR, LSL #2]\n"
+ ".inst 0xe0bf8288 // st1w { za2v.s[x12] }, p0/Z, [x20, XZR, LSL #2]\n"
"add x12, x12, #0x1\n"
- "cmp x12, x10\n"
- "addvl x21, x21, #1\n"
- "add x20, x20, #0x4\n"
+ "cmp x12, x9\n"
+ "addvl x20, x20, #1\n"
+ "add x19, x19, #0x4\n"
"blt 10b\n"
- "whilelt p9.b, x27, %x[width]\n"
+ "whilelt p9.b, x26, %x[width]\n"
"b 13f\n"
"11:" // K loop: Tails: Odd
"mov x12, #0x0\n"
"12:" // K loop: Tails: Odd: Loop
".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe0bf82a0 // st1w { za0v.s[x12] }, p0/Z, [x21, XZR, LSL #2]\n"
+ ".inst 0xe0bf8280 // st1w { za0v.s[x12] }, p0/Z, [x20, XZR, LSL #2]\n"
"add x12, x12, #0x1\n"
- "cmp x12, x10\n"
- "addvl x21, x21, #1\n"
+ "cmp x12, x9\n"
+ "addvl x20, x20, #1\n"
"blt 12b\n"
"13:" // K loop: End
- "mov %x[out], x21\n"
+ "mov %x[out], x20\n"
".inst 0xd503467f // SMSTOP\n"
: [out] "+&r" (out)
: [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x12", "x13", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x12", "x13", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_block4_s8_s8_summing.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_block4_s8_s8_summing.hpp
index 07f03702d9..3ba1b98b73 100644
--- a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_block4_s8_s8_summing.hpp
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_block4_s8_s8_summing.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#if defined(__ARM_FEATURE_SVE)
@@ -32,220 +32,220 @@ void interleave_block<1, 4, VLType::SME, true>(
{
__asm__ __volatile__(
".inst 0xd503477f // SMSTART ZA\n"
- "cntb x21\n"
- "mov x23, %x[width]\n"
"mov z18.b, #0x1\n"
- "incb x23\n"
- "mov x20, %x[width]\n"
"mov z17.s, #0x0\n"
- "sub x10, x21, #0x1\n"
- "cntw x9\n"
- "sub x23, x23, #0x1\n"
- "ands x10, x20, x10\n"
- "udiv x23, x23, x21\n" // n_passes = ceildiv(width, VL<T>)
- "csel x10, x10, x21, NE\n"
- "lsl x22, %x[height], #0x1\n" // height * 2
- "lsl x21, x9, #0x1\n"
- "sub x20, x23, #0x1\n"
- "add x10, x10, #0x3\n"
- "whilelt p9.b, XZR, x22\n"
- "whilelt p8.b, x21, x22\n"
- "mov x28, #0x0\n"
- "ptrue p2.b\n"
- "lsr x20, x20, #0x1\n" // n_loops = (n_passes - 1) / 2
- "and x27, x23, #0x1\n" // odd_tail = bool(n_passes & 0x1)
- "lsr x10, x10, #0x2\n"
- "sub x26, x9, #0x2\n"
+ "cntb x20\n"
+ "cntw x10\n"
+ "ptrue p1.b\n"
+ "mov x19, %x[width]\n"
+ "incb x19\n"
+ "sub x19, x19, #0x1\n"
+ "udiv x19, x19, x20\n" // n_passes = ceildiv(width, VL<T>)
+ "sub x9, x19, #0x1\n"
+ "lsr x9, x9, #0x1\n" // n_loops = (n_passes - 1) / 2
+ "and x28, x19, #0x1\n" // odd_tail = bool(n_passes & 0x1)
+ "mov x19, %x[width]\n"
+ "sub x27, x20, #0x1\n"
+ "ands x27, x19, x27\n"
+ "csel x27, x27, x20, NE\n"
+ "add x27, x27, #0x3\n"
+ "lsr x27, x27, #0x2\n"
+ "sub x26, x10, #0x2\n"
"ptrue p11.s\n"
+ "lsl x20, %x[height], #0x1\n" // height * 2
+ "lsl x19, x10, #0x1\n"
+ "whilelt p9.b, XZR, x20\n"
+ "whilelt p8.b, x19, x20\n"
"zip1 p10.b, p9.b, p8.b\n"
"mov x25, %x[row_offset]\n"
"mov x24, %x[out]\n"
- "whilelt p9.b, x28, %x[width]\n"
- "whilelt p8.b, x28, %x[width]\n"
+ "mov x23, #0x0\n"
+ "whilelt p9.b, x23, %x[width]\n"
+ "whilelt p8.b, x23, %x[width]\n"
"cbnz %x[first], 1f\n"
"addvl x24, x24, #-1\n"
- "ld1w { z17.s }, p2/Z, [x24]\n"
+ "ld1w { z17.s }, p1/Z, [x24]\n"
"1:" // K loop: Load row sums: End
- "mov x23, %x[in]\n"
- "ldr x22, [x23, #0x0]\n"
+ "mov x22, %x[in]\n"
+ "ldr x21, [x22, #0x0]\n"
+ "ldr x20, [x22, #0x8]\n"
+ "add x22, x22, #0x10\n"
"mov x12, #0x0\n"
- "ldr x21, [x23, #0x8]\n"
- "add x23, x23, #0x10\n"
"cbz x26, 3f\n"
"2:" // K loop: Charge: Loop
- ".inst 0x25246140 // psel p0.b, p8.b/Z, p10.b[w12]\n"
- ".inst 0xe01902c0 // ld1b { za0h.b[x12] }, p0/Z, [x22, x25]\n"
- ".inst 0x25646140 // psel p0.b, p8.b/Z, p10.b[w12, #4]\n"
- "ldr x22, [x23, #0x0]\n"
- ".inst 0xe01902a4 // ld1b { za0h.b[x12, #4] }, p0/Z, [x21, x25]\n"
+ ".inst 0x25246140 // dup p0.b, p8.b/Z, p10.b[w12]\n"
+ ".inst 0xe01902a0 // ld1b { za0h.b[x12] }, p0/Z, [x21, x25]\n"
+ ".inst 0x25646140 // dup p0.b, p8.b/Z, p10.b[w12, #4]\n"
+ "ldr x21, [x22, #0x0]\n"
+ ".inst 0xe0190284 // ld1b { za0h.b[x12, #4] }, p0/Z, [x20, x25]\n"
+ "ldr x20, [x22, #0x8]\n"
+ "add x22, x22, #0x10\n"
"add x12, x12, #0x8\n"
"cmp x12, x26, LSL #2\n"
- "ldr x21, [x23, #0x8]\n"
- "add x23, x23, #0x10\n"
"blt 2b\n"
"3:" // K loop: Charge: End
- ".inst 0x25246140 // psel p0.b, p8.b/Z, p10.b[w12]\n"
- ".inst 0xe01902c0 // ld1b { za0h.b[x12] }, p0/Z, [x22, x25]\n"
- ".inst 0x25646140 // psel p0.b, p8.b/Z, p10.b[w12, #4]\n"
- "mov x23, %x[in]\n"
- ".inst 0xe01902a4 // ld1b { za0h.b[x12, #4] }, p0/Z, [x21, x25]\n"
- "ldr x22, [x23, #0x0]\n"
+ ".inst 0x25246140 // dup p0.b, p8.b/Z, p10.b[w12]\n"
+ ".inst 0xe01902a0 // ld1b { za0h.b[x12] }, p0/Z, [x21, x25]\n"
+ ".inst 0x25646140 // dup p0.b, p8.b/Z, p10.b[w12, #4]\n"
+ "mov x22, %x[in]\n"
+ ".inst 0xe0190284 // ld1b { za0h.b[x12, #4] }, p0/Z, [x20, x25]\n"
+ "ldr x21, [x22, #0x0]\n"
+ "ldr x20, [x22, #0x8]\n"
+ "add x22, x22, #0x10\n"
"incb x25\n"
- "ldr x21, [x23, #0x8]\n"
- "add x23, x23, #0x10\n"
- "incb x28\n"
- "cbz x20, 9f\n"
- "mov x20, x20\n"
+ "incb x23\n"
+ "cbz x9, 9f\n"
+ "mov x19, x9\n"
"4:" // K loop: Main loop
- "whilelt p8.b, x28, %x[width]\n"
+ "whilelt p8.b, x23, %x[width]\n"
"mov x13, #0x0\n"
"mov x12, #0x0\n"
"cbz x26, 6f\n"
"5:" // K loop: Main loop: First: Loop
- ".inst 0x25356140 // psel p0.b, p8.b/Z, p10.b[w13, #2]\n"
- ".inst 0xe01922c2 // ld1b { za0h.b[x13, #2] }, p0/Z, [x22, x25]\n"
- ".inst 0x25756140 // psel p0.b, p8.b/Z, p10.b[w13, #6]\n"
- "ldr x22, [x23, #0x0]\n"
- ".inst 0xe01922a6 // ld1b { za0h.b[x13, #6] }, p0/Z, [x21, x25]\n"
- ".inst 0xc0828810 // mova z16.s, p2/M, za0v.s[x12]\n"
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- "sdot z17.s, z16.b, z18.b\n"
- "ldr x21, [x23, #0x8]\n"
+ ".inst 0x25356140 // dup p0.b, p8.b/Z, p10.b[w13, #2]\n"
+ ".inst 0xe01922a2 // ld1b { za0h.b[x13, #2] }, p0/Z, [x21, x25]\n"
+ ".inst 0x25756140 // dup p0.b, p8.b/Z, p10.b[w13, #6]\n"
+ "ldr x21, [x22, #0x0]\n"
+ ".inst 0xe0192286 // ld1b { za0h.b[x13, #6] }, p0/Z, [x20, x25]\n"
+ "ldr x20, [x22, #0x8]\n"
+ "add x22, x22, #0x10\n"
+ ".inst 0xc0828410 // mova z16.s, p1/M, za0v.s[x12]\n"
+ ".inst 0x25306d20 // dup p0.s, p11.s/Z, p9.s[w12]\n"
".inst 0xe0bf8300 // st1w { za0v.s[x12] }, p0/Z, [x24, XZR, LSL #2]\n"
- ".inst 0x25706d20 // psel p0.s, p11.s/Z, p9.s[w12, #1]\n"
- ".inst 0xc0828830 // mova z16.s, p2/M, za0v.s[x12, #1]\n"
- ".inst 0xe0a98301 // st1w { za0v.s[x12, #1] }, p0/Z, [x24, x9, LSL #2]\n"
+ ".inst 0x25706d20 // dup p0.s, p11.s/Z, p9.s[w12, #1]\n"
+ "add x13, x13, #0x8\n"
+ ".inst 0xe0aa8301 // st1w { za0v.s[x12, #1] }, p0/Z, [x24, x10, LSL #2]\n"
+ "sdot z17.s, z16.b, z18.b\n"
+ ".inst 0xc0828430 // mova z16.s, p1/M, za0v.s[x12, #1]\n"
+ "addvl x24, x24, #2\n"
"add x12, x12, #0x2\n"
"cmp x12, x26\n"
"sdot z17.s, z16.b, z18.b\n"
- "add x23, x23, #0x10\n"
- "addvl x24, x24, #2\n"
- "add x13, x13, #0x8\n"
"blt 5b\n"
"6:" // K loop: Main loop: First: Tail
- ".inst 0x25356140 // psel p0.b, p8.b/Z, p10.b[w13, #2]\n"
- ".inst 0xe01922c2 // ld1b { za0h.b[x13, #2] }, p0/Z, [x22, x25]\n"
- ".inst 0x25756140 // psel p0.b, p8.b/Z, p10.b[w13, #6]\n"
- ".inst 0xe01922a6 // ld1b { za0h.b[x13, #6] }, p0/Z, [x21, x25]\n"
- ".inst 0xc0828810 // mova z16.s, p2/M, za0v.s[x12]\n"
- "sdot z17.s, z16.b, z18.b\n"
- "mov x23, %x[in]\n"
- ".inst 0x25306d21 // psel p1.s, p11.s/Z, p9.s[w12]\n"
- "ldr x22, [x23, #0x0]\n"
- ".inst 0x25706d20 // psel p0.s, p11.s/Z, p9.s[w12, #1]\n"
- ".inst 0xc0828830 // mova z16.s, p2/M, za0v.s[x12, #1]\n"
- "ldr x21, [x23, #0x8]\n"
- ".inst 0xe0bf8700 // st1w { za0v.s[x12] }, p1/Z, [x24, XZR, LSL #2]\n"
- "whilelt p9.b, x28, %x[width]\n"
- "incb x28\n"
- "add x23, x23, #0x10\n"
- ".inst 0xe0a98301 // st1w { za0v.s[x12, #1] }, p0/Z, [x24, x9, LSL #2]\n"
+ "mov x22, %x[in]\n"
+ ".inst 0x25356140 // dup p0.b, p8.b/Z, p10.b[w13, #2]\n"
+ ".inst 0xe01922a2 // ld1b { za0h.b[x13, #2] }, p0/Z, [x21, x25]\n"
+ ".inst 0x25756140 // dup p0.b, p8.b/Z, p10.b[w13, #6]\n"
+ "ldr x21, [x22, #0x0]\n"
+ ".inst 0xe0192286 // ld1b { za0h.b[x13, #6] }, p0/Z, [x20, x25]\n"
+ "ldr x20, [x22, #0x8]\n"
+ "add x22, x22, #0x10\n"
+ ".inst 0xc0828410 // mova z16.s, p1/M, za0v.s[x12]\n"
+ ".inst 0x25306d20 // dup p0.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0xe0bf8300 // st1w { za0v.s[x12] }, p0/Z, [x24, XZR, LSL #2]\n"
+ ".inst 0x25706d20 // dup p0.s, p11.s/Z, p9.s[w12, #1]\n"
+ "whilelt p9.b, x23, %x[width]\n"
+ ".inst 0xe0aa8301 // st1w { za0v.s[x12, #1] }, p0/Z, [x24, x10, LSL #2]\n"
"sdot z17.s, z16.b, z18.b\n"
+ ".inst 0xc0828430 // mova z16.s, p1/M, za0v.s[x12, #1]\n"
"addvl x24, x24, #2\n"
+ "incb x23\n"
"incb x25\n"
- "whilelt p8.b, x28, %x[width]\n"
+ "sdot z17.s, z16.b, z18.b\n"
+ "whilelt p8.b, x23, %x[width]\n"
"mov x13, #0x0\n"
"mov x12, #0x0\n"
"cbz x26, 8f\n"
"7:" // K loop: Main loop: Second: Loop
- ".inst 0x25256140 // psel p0.b, p8.b/Z, p10.b[w13]\n"
- ".inst 0xe01922c0 // ld1b { za0h.b[x13] }, p0/Z, [x22, x25]\n"
- ".inst 0x25656140 // psel p0.b, p8.b/Z, p10.b[w13, #4]\n"
- "ldr x22, [x23, #0x0]\n"
- ".inst 0xe01922a4 // ld1b { za0h.b[x13, #4] }, p0/Z, [x21, x25]\n"
- ".inst 0xc0828910 // mova z16.s, p2/M, za2v.s[x12]\n"
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- "sdot z17.s, z16.b, z18.b\n"
- "ldr x21, [x23, #0x8]\n"
+ ".inst 0x25256140 // dup p0.b, p8.b/Z, p10.b[w13]\n"
+ ".inst 0xe01922a0 // ld1b { za0h.b[x13] }, p0/Z, [x21, x25]\n"
+ ".inst 0x25656140 // dup p0.b, p8.b/Z, p10.b[w13, #4]\n"
+ "ldr x21, [x22, #0x0]\n"
+ ".inst 0xe0192284 // ld1b { za0h.b[x13, #4] }, p0/Z, [x20, x25]\n"
+ "ldr x20, [x22, #0x8]\n"
+ "add x22, x22, #0x10\n"
+ ".inst 0xc0828510 // mova z16.s, p1/M, za2v.s[x12]\n"
+ ".inst 0x25306d20 // dup p0.s, p11.s/Z, p9.s[w12]\n"
".inst 0xe0bf8308 // st1w { za2v.s[x12] }, p0/Z, [x24, XZR, LSL #2]\n"
- ".inst 0x25706d20 // psel p0.s, p11.s/Z, p9.s[w12, #1]\n"
- ".inst 0xc0828930 // mova z16.s, p2/M, za2v.s[x12, #1]\n"
- ".inst 0xe0a98309 // st1w { za2v.s[x12, #1] }, p0/Z, [x24, x9, LSL #2]\n"
+ ".inst 0x25706d20 // dup p0.s, p11.s/Z, p9.s[w12, #1]\n"
+ "add x13, x13, #0x8\n"
+ ".inst 0xe0aa8309 // st1w { za2v.s[x12, #1] }, p0/Z, [x24, x10, LSL #2]\n"
+ "sdot z17.s, z16.b, z18.b\n"
+ ".inst 0xc0828530 // mova z16.s, p1/M, za2v.s[x12, #1]\n"
+ "addvl x24, x24, #2\n"
"add x12, x12, #0x2\n"
"cmp x12, x26\n"
"sdot z17.s, z16.b, z18.b\n"
- "add x23, x23, #0x10\n"
- "addvl x24, x24, #2\n"
- "add x13, x13, #0x8\n"
"blt 7b\n"
"8:" // K loop: Main loop: Second: Tail
- ".inst 0x25256140 // psel p0.b, p8.b/Z, p10.b[w13]\n"
- ".inst 0xe01922c0 // ld1b { za0h.b[x13] }, p0/Z, [x22, x25]\n"
- ".inst 0x25656140 // psel p0.b, p8.b/Z, p10.b[w13, #4]\n"
- ".inst 0xe01922a4 // ld1b { za0h.b[x13, #4] }, p0/Z, [x21, x25]\n"
- ".inst 0xc0828910 // mova z16.s, p2/M, za2v.s[x12]\n"
- "sdot z17.s, z16.b, z18.b\n"
- "mov x23, %x[in]\n"
- ".inst 0x25306d21 // psel p1.s, p11.s/Z, p9.s[w12]\n"
- "ldr x22, [x23, #0x0]\n"
- ".inst 0x25706d20 // psel p0.s, p11.s/Z, p9.s[w12, #1]\n"
- ".inst 0xc0828930 // mova z16.s, p2/M, za2v.s[x12, #1]\n"
- "ldr x21, [x23, #0x8]\n"
- ".inst 0xe0bf8708 // st1w { za2v.s[x12] }, p1/Z, [x24, XZR, LSL #2]\n"
- "whilelt p9.b, x28, %x[width]\n"
- "subs x20, x20, #0x1\n"
- "add x23, x23, #0x10\n"
- ".inst 0xe0a98309 // st1w { za2v.s[x12, #1] }, p0/Z, [x24, x9, LSL #2]\n"
+ "mov x22, %x[in]\n"
+ ".inst 0x25256140 // dup p0.b, p8.b/Z, p10.b[w13]\n"
+ ".inst 0xe01922a0 // ld1b { za0h.b[x13] }, p0/Z, [x21, x25]\n"
+ ".inst 0x25656140 // dup p0.b, p8.b/Z, p10.b[w13, #4]\n"
+ "ldr x21, [x22, #0x0]\n"
+ ".inst 0xe0192284 // ld1b { za0h.b[x13, #4] }, p0/Z, [x20, x25]\n"
+ "ldr x20, [x22, #0x8]\n"
+ "add x22, x22, #0x10\n"
+ ".inst 0xc0828510 // mova z16.s, p1/M, za2v.s[x12]\n"
+ ".inst 0x25306d20 // dup p0.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0xe0bf8308 // st1w { za2v.s[x12] }, p0/Z, [x24, XZR, LSL #2]\n"
+ ".inst 0x25706d20 // dup p0.s, p11.s/Z, p9.s[w12, #1]\n"
+ "whilelt p9.b, x23, %x[width]\n"
+ ".inst 0xe0aa8309 // st1w { za2v.s[x12, #1] }, p0/Z, [x24, x10, LSL #2]\n"
"sdot z17.s, z16.b, z18.b\n"
+ ".inst 0xc0828530 // mova z16.s, p1/M, za2v.s[x12, #1]\n"
"addvl x24, x24, #2\n"
- "incb x28\n"
+ "incb x23\n"
"incb x25\n"
+ "sdot z17.s, z16.b, z18.b\n"
+ "subs x19, x19, #0x1\n"
"bgt 4b\n"
"9:" // K loop: Tails
- "cbnz x27, 12f\n"
- "mov x23, %x[in]\n"
- "whilelt p8.b, x28, %x[width]\n"
+ "cbnz x28, 12f\n"
+ "mov x22, %x[in]\n"
+ "whilelt p8.b, x23, %x[width]\n"
"mov x13, #0x0\n"
"mov x12, #0x0\n"
"10:" // K loop: Tails: Even: First
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0xc0828410 // mova z16.s, p1/M, za0v.s[x12]\n"
+ ".inst 0x25306d20 // dup p0.s, p11.s/Z, p9.s[w12]\n"
".inst 0xe0bf8300 // st1w { za0v.s[x12] }, p0/Z, [x24, XZR, LSL #2]\n"
- "ldr x22, [x23, #0x0]\n"
- ".inst 0xc0828810 // mova z16.s, p2/M, za0v.s[x12]\n"
- "add x12, x12, #0x1\n"
- ".inst 0x25356140 // psel p0.b, p8.b/Z, p10.b[w13, #2]\n"
- "sdot z17.s, z16.b, z18.b\n"
- ".inst 0xe01922c2 // ld1b { za0h.b[x13, #2] }, p0/Z, [x22, x25]\n"
- "cmp x12, x9\n"
- "add x23, x23, #0x8\n"
+ ".inst 0x25356140 // dup p0.b, p8.b/Z, p10.b[w13, #2]\n"
"addvl x24, x24, #1\n"
+ "ldr x21, [x22, #0x0]\n"
+ ".inst 0xe01922a2 // ld1b { za0h.b[x13, #2] }, p0/Z, [x21, x25]\n"
+ "sdot z17.s, z16.b, z18.b\n"
+ "add x22, x22, #0x8\n"
"add x13, x13, #0x4\n"
+ "add x12, x12, #0x1\n"
+ "cmp x12, x10\n"
"blt 10b\n"
- "whilelt p9.b, x28, %x[width]\n"
- "whilelt p8.b, x28, %x[width]\n"
- "mov x20, #0x0\n"
+ "whilelt p9.b, x23, %x[width]\n"
+ "whilelt p8.b, x23, %x[width]\n"
+ "mov x19, #0x0\n"
"mov x12, #0x0\n"
"11:" // K loop: Tails: Even: Second
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0xc0828510 // mova z16.s, p1/M, za2v.s[x12]\n"
+ ".inst 0x25306d20 // dup p0.s, p11.s/Z, p9.s[w12]\n"
".inst 0xe0bf8308 // st1w { za2v.s[x12] }, p0/Z, [x24, XZR, LSL #2]\n"
- ".inst 0xc0828910 // mova z16.s, p2/M, za2v.s[x12]\n"
+ "addvl x24, x24, #1\n"
+ "add x19, x19, #0x4\n"
"add x12, x12, #0x1\n"
- "cmp x12, x10\n"
"sdot z17.s, z16.b, z18.b\n"
- "addvl x24, x24, #1\n"
- "add x20, x20, #0x4\n"
+ "cmp x12, x27\n"
"blt 11b\n"
- "whilelt p9.b, x28, %x[width]\n"
+ "whilelt p9.b, x23, %x[width]\n"
"b 14f\n"
"12:" // K loop: Tails: Odd
"mov x12, #0x0\n"
"13:" // K loop: Tails: Odd: Loop
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0xc0828410 // mova z16.s, p1/M, za0v.s[x12]\n"
+ ".inst 0x25306d20 // dup p0.s, p11.s/Z, p9.s[w12]\n"
".inst 0xe0bf8300 // st1w { za0v.s[x12] }, p0/Z, [x24, XZR, LSL #2]\n"
- ".inst 0xc0828810 // mova z16.s, p2/M, za0v.s[x12]\n"
+ "addvl x24, x24, #1\n"
"add x12, x12, #0x1\n"
- "cmp x12, x10\n"
+ "cmp x12, x27\n"
"sdot z17.s, z16.b, z18.b\n"
- "addvl x24, x24, #1\n"
"blt 13b\n"
"14:" // K loop: End
- "st1w { z17.s }, p2, [x24]\n"
+ "st1w { z17.s }, p1, [x24]\n"
"addvl x24, x24, #1\n"
"mov %x[out], x24\n"
".inst 0xd503467f // SMSTOP\n"
: [out] "+&r" (out)
: [first] "r" (first), [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x12", "x13", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p8", "p9", "p10", "p11", "x9", "x10", "x12", "x13", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_block4_u8_u8.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_block4_u8_u8.hpp
index 618570de08..881dfe103e 100644
--- a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_block4_u8_u8.hpp
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_block4_u8_u8.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#if defined(__ARM_FEATURE_SVE)
@@ -34,189 +34,189 @@ void interleave_block<1, 4, VLType::SME, false>(
__asm__ __volatile__(
".inst 0xd503477f // SMSTART ZA\n"
- "cntb x21\n"
- "mov x23, %x[width]\n"
- "incb x23\n"
- "mov x20, %x[width]\n"
- "sub x10, x21, #0x1\n"
- "cntw x9\n"
- "sub x23, x23, #0x1\n"
- "ands x10, x20, x10\n"
- "udiv x23, x23, x21\n" // n_passes = ceildiv(width, VL<T>)
- "csel x10, x10, x21, NE\n"
- "lsl x22, %x[height], #0x1\n" // height * 2
- "lsl x21, x9, #0x1\n"
- "sub x20, x23, #0x1\n"
- "add x10, x10, #0x3\n"
- "sub x28, x9, #0x2\n"
- "whilelt p9.b, XZR, x22\n"
- "whilelt p8.b, x21, x22\n"
- "mov x27, #0x0\n"
- "mov x26, %x[in]\n"
- "lsr x20, x20, #0x1\n" // n_loops = (n_passes - 1) / 2
- "ldr x25, [x26, #0x0]\n"
- "and x24, x23, #0x1\n" // odd_tail = bool(n_passes & 0x1)
- "lsr x10, x10, #0x2\n"
- "ldr x23, [x26, #0x8]\n"
+ "cntb x20\n"
+ "mov x22, %x[width]\n"
+ "incb x22\n"
+ "mov x19, %x[width]\n"
+ "sub x9, x20, #0x1\n"
+ "cntw x28\n"
+ "sub x22, x22, #0x1\n"
+ "ands x9, x19, x9\n"
+ "udiv x22, x22, x20\n" // n_passes = ceildiv(width, VL<T>)
+ "csel x9, x9, x20, NE\n"
+ "lsl x21, %x[height], #0x1\n" // height * 2
+ "lsl x20, x28, #0x1\n"
+ "sub x19, x22, #0x1\n"
+ "add x9, x9, #0x3\n"
+ "sub x27, x28, #0x2\n"
+ "whilelt p9.b, XZR, x21\n"
+ "whilelt p8.b, x20, x21\n"
+ "mov x26, #0x0\n"
+ "mov x25, %x[in]\n"
+ "lsr x19, x19, #0x1\n" // n_loops = (n_passes - 1) / 2
+ "ldr x24, [x25, #0x0]\n"
+ "and x23, x22, #0x1\n" // odd_tail = bool(n_passes & 0x1)
+ "lsr x9, x9, #0x2\n"
+ "ldr x22, [x25, #0x8]\n"
"ptrue p11.s\n"
"zip1 p10.b, p9.b, p8.b\n"
- "mov x22, %x[row_offset]\n"
- "mov x21, %x[out]\n"
- "whilelt p9.b, x27, %x[width]\n"
- "whilelt p8.b, x27, %x[width]\n"
- "add x26, x26, #0x10\n"
+ "mov x21, %x[row_offset]\n"
+ "mov x20, %x[out]\n"
+ "whilelt p9.b, x26, %x[width]\n"
+ "whilelt p8.b, x26, %x[width]\n"
+ "add x25, x25, #0x10\n"
"mov x12, #0x0\n"
- "cbz x28, 2f\n"
+ "cbz x27, 2f\n"
"1:" // K loop: Charge: Loop
".inst 0x25246140 // psel p0.b, p8.b/Z, p10.b[w12]\n"
- ".inst 0xe0160320 // ld1b { za0h.b[x12] }, p0/Z, [x25, x22]\n"
+ ".inst 0xe0150300 // ld1b { za0h.b[x12] }, p0/Z, [x24, x21]\n"
".inst 0x25646140 // psel p0.b, p8.b/Z, p10.b[w12, #4]\n"
- "ldr x25, [x26, #0x0]\n"
- ".inst 0xe01602e4 // ld1b { za0h.b[x12, #4] }, p0/Z, [x23, x22]\n"
+ "ldr x24, [x25, #0x0]\n"
+ ".inst 0xe01502c4 // ld1b { za0h.b[x12, #4] }, p0/Z, [x22, x21]\n"
"add x12, x12, #0x8\n"
- "cmp x12, x28, LSL #2\n"
- "ldr x23, [x26, #0x8]\n"
- "add x26, x26, #0x10\n"
+ "cmp x12, x27, LSL #2\n"
+ "ldr x22, [x25, #0x8]\n"
+ "add x25, x25, #0x10\n"
"blt 1b\n"
"2:" // K loop: Charge: End
".inst 0x25246140 // psel p0.b, p8.b/Z, p10.b[w12]\n"
- ".inst 0xe0160320 // ld1b { za0h.b[x12] }, p0/Z, [x25, x22]\n"
+ ".inst 0xe0150300 // ld1b { za0h.b[x12] }, p0/Z, [x24, x21]\n"
".inst 0x25646140 // psel p0.b, p8.b/Z, p10.b[w12, #4]\n"
- "mov x26, %x[in]\n"
- ".inst 0xe01602e4 // ld1b { za0h.b[x12, #4] }, p0/Z, [x23, x22]\n"
- "ldr x25, [x26, #0x0]\n"
- "incb x22\n"
- "ldr x23, [x26, #0x8]\n"
- "add x26, x26, #0x10\n"
- "incb x27\n"
- "cbz x20, 8f\n"
- "mov x20, x20\n"
+ "mov x25, %x[in]\n"
+ ".inst 0xe01502c4 // ld1b { za0h.b[x12, #4] }, p0/Z, [x22, x21]\n"
+ "ldr x24, [x25, #0x0]\n"
+ "incb x21\n"
+ "ldr x22, [x25, #0x8]\n"
+ "add x25, x25, #0x10\n"
+ "incb x26\n"
+ "cbz x19, 8f\n"
+ "mov x19, x19\n"
"3:" // K loop: Main loop
- "whilelt p8.b, x27, %x[width]\n"
+ "whilelt p8.b, x26, %x[width]\n"
"mov x13, #0x0\n"
"mov x12, #0x0\n"
- "cbz x28, 5f\n"
+ "cbz x27, 5f\n"
"4:" // K loop: Main loop: First: Loop
".inst 0x25356140 // psel p0.b, p8.b/Z, p10.b[w13, #2]\n"
- ".inst 0xe0162322 // ld1b { za0h.b[x13, #2] }, p0/Z, [x25, x22]\n"
+ ".inst 0xe0152302 // ld1b { za0h.b[x13, #2] }, p0/Z, [x24, x21]\n"
".inst 0x25756141 // psel p1.b, p8.b/Z, p10.b[w13, #6]\n"
- "ldr x25, [x26, #0x0]\n"
+ "ldr x24, [x25, #0x0]\n"
".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe01626e6 // ld1b { za0h.b[x13, #6] }, p1/Z, [x23, x22]\n"
- "ldr x23, [x26, #0x8]\n"
- ".inst 0xe0bf82a0 // st1w { za0v.s[x12] }, p0/Z, [x21, XZR, LSL #2]\n"
+ ".inst 0xe01526c6 // ld1b { za0h.b[x13, #6] }, p1/Z, [x22, x21]\n"
+ "ldr x22, [x25, #0x8]\n"
+ ".inst 0xe0bf8280 // st1w { za0v.s[x12] }, p0/Z, [x20, XZR, LSL #2]\n"
".inst 0x25706d20 // psel p0.s, p11.s/Z, p9.s[w12, #1]\n"
- ".inst 0xe0a982a1 // st1w { za0v.s[x12, #1] }, p0/Z, [x21, x9, LSL #2]\n"
+ ".inst 0xe0bc8281 // st1w { za0v.s[x12, #1] }, p0/Z, [x20, x28, LSL #2]\n"
"add x12, x12, #0x2\n"
- "cmp x12, x28\n"
- "add x26, x26, #0x10\n"
- "addvl x21, x21, #2\n"
+ "cmp x12, x27\n"
+ "add x25, x25, #0x10\n"
+ "addvl x20, x20, #2\n"
"add x13, x13, #0x8\n"
"blt 4b\n"
"5:" // K loop: Main loop: First: Tail
".inst 0x25356140 // psel p0.b, p8.b/Z, p10.b[w13, #2]\n"
- ".inst 0xe0162322 // ld1b { za0h.b[x13, #2] }, p0/Z, [x25, x22]\n"
- "mov x26, %x[in]\n"
- "ldr x25, [x26, #0x0]\n"
+ ".inst 0xe0152302 // ld1b { za0h.b[x13, #2] }, p0/Z, [x24, x21]\n"
+ "mov x25, %x[in]\n"
+ "ldr x24, [x25, #0x0]\n"
".inst 0x25756141 // psel p1.b, p8.b/Z, p10.b[w13, #6]\n"
".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe01626e6 // ld1b { za0h.b[x13, #6] }, p1/Z, [x23, x22]\n"
- "ldr x23, [x26, #0x8]\n"
- ".inst 0xe0bf82a0 // st1w { za0v.s[x12] }, p0/Z, [x21, XZR, LSL #2]\n"
+ ".inst 0xe01526c6 // ld1b { za0h.b[x13, #6] }, p1/Z, [x22, x21]\n"
+ "ldr x22, [x25, #0x8]\n"
+ ".inst 0xe0bf8280 // st1w { za0v.s[x12] }, p0/Z, [x20, XZR, LSL #2]\n"
".inst 0x25706d20 // psel p0.s, p11.s/Z, p9.s[w12, #1]\n"
- "whilelt p9.b, x27, %x[width]\n"
- "incb x27\n"
- "add x26, x26, #0x10\n"
- ".inst 0xe0a982a1 // st1w { za0v.s[x12, #1] }, p0/Z, [x21, x9, LSL #2]\n"
- "addvl x21, x21, #2\n"
- "incb x22\n"
- "whilelt p8.b, x27, %x[width]\n"
+ "whilelt p9.b, x26, %x[width]\n"
+ "incb x26\n"
+ "add x25, x25, #0x10\n"
+ ".inst 0xe0bc8281 // st1w { za0v.s[x12, #1] }, p0/Z, [x20, x28, LSL #2]\n"
+ "addvl x20, x20, #2\n"
+ "incb x21\n"
+ "whilelt p8.b, x26, %x[width]\n"
"mov x13, #0x0\n"
"mov x12, #0x0\n"
- "cbz x28, 7f\n"
+ "cbz x27, 7f\n"
"6:" // K loop: Main loop: Second: Loop
".inst 0x25256140 // psel p0.b, p8.b/Z, p10.b[w13]\n"
- ".inst 0xe0162320 // ld1b { za0h.b[x13] }, p0/Z, [x25, x22]\n"
+ ".inst 0xe0152300 // ld1b { za0h.b[x13] }, p0/Z, [x24, x21]\n"
".inst 0x25656141 // psel p1.b, p8.b/Z, p10.b[w13, #4]\n"
- "ldr x25, [x26, #0x0]\n"
+ "ldr x24, [x25, #0x0]\n"
".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe01626e4 // ld1b { za0h.b[x13, #4] }, p1/Z, [x23, x22]\n"
- "ldr x23, [x26, #0x8]\n"
- ".inst 0xe0bf82a8 // st1w { za2v.s[x12] }, p0/Z, [x21, XZR, LSL #2]\n"
+ ".inst 0xe01526c4 // ld1b { za0h.b[x13, #4] }, p1/Z, [x22, x21]\n"
+ "ldr x22, [x25, #0x8]\n"
+ ".inst 0xe0bf8288 // st1w { za2v.s[x12] }, p0/Z, [x20, XZR, LSL #2]\n"
".inst 0x25706d20 // psel p0.s, p11.s/Z, p9.s[w12, #1]\n"
- ".inst 0xe0a982a9 // st1w { za2v.s[x12, #1] }, p0/Z, [x21, x9, LSL #2]\n"
+ ".inst 0xe0bc8289 // st1w { za2v.s[x12, #1] }, p0/Z, [x20, x28, LSL #2]\n"
"add x12, x12, #0x2\n"
- "cmp x12, x28\n"
- "add x26, x26, #0x10\n"
- "addvl x21, x21, #2\n"
+ "cmp x12, x27\n"
+ "add x25, x25, #0x10\n"
+ "addvl x20, x20, #2\n"
"add x13, x13, #0x8\n"
"blt 6b\n"
"7:" // K loop: Main loop: Second: Tail
".inst 0x25256140 // psel p0.b, p8.b/Z, p10.b[w13]\n"
- ".inst 0xe0162320 // ld1b { za0h.b[x13] }, p0/Z, [x25, x22]\n"
- "mov x26, %x[in]\n"
- "ldr x25, [x26, #0x0]\n"
+ ".inst 0xe0152300 // ld1b { za0h.b[x13] }, p0/Z, [x24, x21]\n"
+ "mov x25, %x[in]\n"
+ "ldr x24, [x25, #0x0]\n"
".inst 0x25656141 // psel p1.b, p8.b/Z, p10.b[w13, #4]\n"
".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe01626e4 // ld1b { za0h.b[x13, #4] }, p1/Z, [x23, x22]\n"
- "ldr x23, [x26, #0x8]\n"
- ".inst 0xe0bf82a8 // st1w { za2v.s[x12] }, p0/Z, [x21, XZR, LSL #2]\n"
+ ".inst 0xe01526c4 // ld1b { za0h.b[x13, #4] }, p1/Z, [x22, x21]\n"
+ "ldr x22, [x25, #0x8]\n"
+ ".inst 0xe0bf8288 // st1w { za2v.s[x12] }, p0/Z, [x20, XZR, LSL #2]\n"
".inst 0x25706d20 // psel p0.s, p11.s/Z, p9.s[w12, #1]\n"
- "whilelt p9.b, x27, %x[width]\n"
- "subs x20, x20, #0x1\n"
- "add x26, x26, #0x10\n"
- ".inst 0xe0a982a9 // st1w { za2v.s[x12, #1] }, p0/Z, [x21, x9, LSL #2]\n"
- "addvl x21, x21, #2\n"
- "incb x27\n"
- "incb x22\n"
+ "whilelt p9.b, x26, %x[width]\n"
+ "subs x19, x19, #0x1\n"
+ "add x25, x25, #0x10\n"
+ ".inst 0xe0bc8289 // st1w { za2v.s[x12, #1] }, p0/Z, [x20, x28, LSL #2]\n"
+ "addvl x20, x20, #2\n"
+ "incb x26\n"
+ "incb x21\n"
"bgt 3b\n"
"8:" // K loop: Tails
- "cbnz x24, 11f\n"
- "mov x26, %x[in]\n"
- "whilelt p8.b, x27, %x[width]\n"
+ "cbnz x23, 11f\n"
+ "mov x25, %x[in]\n"
+ "whilelt p8.b, x26, %x[width]\n"
"mov x13, #0x0\n"
"mov x12, #0x0\n"
"9:" // K loop: Tails: Even: First
".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe0bf82a0 // st1w { za0v.s[x12] }, p0/Z, [x21, XZR, LSL #2]\n"
- "ldr x25, [x26, #0x0]\n"
+ ".inst 0xe0bf8280 // st1w { za0v.s[x12] }, p0/Z, [x20, XZR, LSL #2]\n"
+ "ldr x24, [x25, #0x0]\n"
"add x12, x12, #0x1\n"
".inst 0x25356140 // psel p0.b, p8.b/Z, p10.b[w13, #2]\n"
- "cmp x12, x9\n"
- ".inst 0xe0162322 // ld1b { za0h.b[x13, #2] }, p0/Z, [x25, x22]\n"
- "add x26, x26, #0x8\n"
- "addvl x21, x21, #1\n"
+ "cmp x12, x28\n"
+ ".inst 0xe0152302 // ld1b { za0h.b[x13, #2] }, p0/Z, [x24, x21]\n"
+ "add x25, x25, #0x8\n"
+ "addvl x20, x20, #1\n"
"add x13, x13, #0x4\n"
"blt 9b\n"
- "whilelt p9.b, x27, %x[width]\n"
- "whilelt p8.b, x27, %x[width]\n"
- "mov x20, #0x0\n"
+ "whilelt p9.b, x26, %x[width]\n"
+ "whilelt p8.b, x26, %x[width]\n"
+ "mov x19, #0x0\n"
"mov x12, #0x0\n"
"10:" // K loop: Tails: Even: Second
".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe0bf82a8 // st1w { za2v.s[x12] }, p0/Z, [x21, XZR, LSL #2]\n"
+ ".inst 0xe0bf8288 // st1w { za2v.s[x12] }, p0/Z, [x20, XZR, LSL #2]\n"
"add x12, x12, #0x1\n"
- "cmp x12, x10\n"
- "addvl x21, x21, #1\n"
- "add x20, x20, #0x4\n"
+ "cmp x12, x9\n"
+ "addvl x20, x20, #1\n"
+ "add x19, x19, #0x4\n"
"blt 10b\n"
- "whilelt p9.b, x27, %x[width]\n"
+ "whilelt p9.b, x26, %x[width]\n"
"b 13f\n"
"11:" // K loop: Tails: Odd
"mov x12, #0x0\n"
"12:" // K loop: Tails: Odd: Loop
".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe0bf82a0 // st1w { za0v.s[x12] }, p0/Z, [x21, XZR, LSL #2]\n"
+ ".inst 0xe0bf8280 // st1w { za0v.s[x12] }, p0/Z, [x20, XZR, LSL #2]\n"
"add x12, x12, #0x1\n"
- "cmp x12, x10\n"
- "addvl x21, x21, #1\n"
+ "cmp x12, x9\n"
+ "addvl x20, x20, #1\n"
"blt 12b\n"
"13:" // K loop: End
- "mov %x[out], x21\n"
+ "mov %x[out], x20\n"
".inst 0xd503467f // SMSTOP\n"
: [out] "+&r" (out)
: [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x12", "x13", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x12", "x13", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_block4_u8_u8_summing.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_block4_u8_u8_summing.hpp
index 646db0caa8..231d7ae213 100644
--- a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_block4_u8_u8_summing.hpp
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_block4_u8_u8_summing.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#if defined(__ARM_FEATURE_SVE)
@@ -32,220 +32,220 @@ void interleave_block<1, 4, VLType::SME, true>(
{
__asm__ __volatile__(
".inst 0xd503477f // SMSTART ZA\n"
- "cntb x21\n"
- "mov x23, %x[width]\n"
"mov z18.b, #0x1\n"
- "incb x23\n"
- "mov x20, %x[width]\n"
"mov z17.s, #0x0\n"
- "sub x10, x21, #0x1\n"
- "cntw x9\n"
- "sub x23, x23, #0x1\n"
- "ands x10, x20, x10\n"
- "udiv x23, x23, x21\n" // n_passes = ceildiv(width, VL<T>)
- "csel x10, x10, x21, NE\n"
- "lsl x22, %x[height], #0x1\n" // height * 2
- "lsl x21, x9, #0x1\n"
- "sub x20, x23, #0x1\n"
- "add x10, x10, #0x3\n"
- "whilelt p9.b, XZR, x22\n"
- "whilelt p8.b, x21, x22\n"
- "mov x28, #0x0\n"
- "ptrue p2.b\n"
- "lsr x20, x20, #0x1\n" // n_loops = (n_passes - 1) / 2
- "and x27, x23, #0x1\n" // odd_tail = bool(n_passes & 0x1)
- "lsr x10, x10, #0x2\n"
- "sub x26, x9, #0x2\n"
+ "cntb x20\n"
+ "cntw x10\n"
+ "ptrue p1.b\n"
+ "mov x19, %x[width]\n"
+ "incb x19\n"
+ "sub x19, x19, #0x1\n"
+ "udiv x19, x19, x20\n" // n_passes = ceildiv(width, VL<T>)
+ "sub x9, x19, #0x1\n"
+ "lsr x9, x9, #0x1\n" // n_loops = (n_passes - 1) / 2
+ "and x28, x19, #0x1\n" // odd_tail = bool(n_passes & 0x1)
+ "mov x19, %x[width]\n"
+ "sub x27, x20, #0x1\n"
+ "ands x27, x19, x27\n"
+ "csel x27, x27, x20, NE\n"
+ "add x27, x27, #0x3\n"
+ "lsr x27, x27, #0x2\n"
+ "sub x26, x10, #0x2\n"
"ptrue p11.s\n"
+ "lsl x20, %x[height], #0x1\n" // height * 2
+ "lsl x19, x10, #0x1\n"
+ "whilelt p9.b, XZR, x20\n"
+ "whilelt p8.b, x19, x20\n"
"zip1 p10.b, p9.b, p8.b\n"
"mov x25, %x[row_offset]\n"
"mov x24, %x[out]\n"
- "whilelt p9.b, x28, %x[width]\n"
- "whilelt p8.b, x28, %x[width]\n"
+ "mov x23, #0x0\n"
+ "whilelt p9.b, x23, %x[width]\n"
+ "whilelt p8.b, x23, %x[width]\n"
"cbnz %x[first], 1f\n"
"addvl x24, x24, #-1\n"
- "ld1w { z17.s }, p2/Z, [x24]\n"
+ "ld1w { z17.s }, p1/Z, [x24]\n"
"1:" // K loop: Load row sums: End
- "mov x23, %x[in]\n"
- "ldr x22, [x23, #0x0]\n"
+ "mov x22, %x[in]\n"
+ "ldr x21, [x22, #0x0]\n"
+ "ldr x20, [x22, #0x8]\n"
+ "add x22, x22, #0x10\n"
"mov x12, #0x0\n"
- "ldr x21, [x23, #0x8]\n"
- "add x23, x23, #0x10\n"
"cbz x26, 3f\n"
"2:" // K loop: Charge: Loop
- ".inst 0x25246140 // psel p0.b, p8.b/Z, p10.b[w12]\n"
- ".inst 0xe01902c0 // ld1b { za0h.b[x12] }, p0/Z, [x22, x25]\n"
- ".inst 0x25646140 // psel p0.b, p8.b/Z, p10.b[w12, #4]\n"
- "ldr x22, [x23, #0x0]\n"
- ".inst 0xe01902a4 // ld1b { za0h.b[x12, #4] }, p0/Z, [x21, x25]\n"
+ ".inst 0x25246140 // dup p0.b, p8.b/Z, p10.b[w12]\n"
+ ".inst 0xe01902a0 // ld1b { za0h.b[x12] }, p0/Z, [x21, x25]\n"
+ ".inst 0x25646140 // dup p0.b, p8.b/Z, p10.b[w12, #4]\n"
+ "ldr x21, [x22, #0x0]\n"
+ ".inst 0xe0190284 // ld1b { za0h.b[x12, #4] }, p0/Z, [x20, x25]\n"
+ "ldr x20, [x22, #0x8]\n"
+ "add x22, x22, #0x10\n"
"add x12, x12, #0x8\n"
"cmp x12, x26, LSL #2\n"
- "ldr x21, [x23, #0x8]\n"
- "add x23, x23, #0x10\n"
"blt 2b\n"
"3:" // K loop: Charge: End
- ".inst 0x25246140 // psel p0.b, p8.b/Z, p10.b[w12]\n"
- ".inst 0xe01902c0 // ld1b { za0h.b[x12] }, p0/Z, [x22, x25]\n"
- ".inst 0x25646140 // psel p0.b, p8.b/Z, p10.b[w12, #4]\n"
- "mov x23, %x[in]\n"
- ".inst 0xe01902a4 // ld1b { za0h.b[x12, #4] }, p0/Z, [x21, x25]\n"
- "ldr x22, [x23, #0x0]\n"
+ ".inst 0x25246140 // dup p0.b, p8.b/Z, p10.b[w12]\n"
+ ".inst 0xe01902a0 // ld1b { za0h.b[x12] }, p0/Z, [x21, x25]\n"
+ ".inst 0x25646140 // dup p0.b, p8.b/Z, p10.b[w12, #4]\n"
+ "mov x22, %x[in]\n"
+ ".inst 0xe0190284 // ld1b { za0h.b[x12, #4] }, p0/Z, [x20, x25]\n"
+ "ldr x21, [x22, #0x0]\n"
+ "ldr x20, [x22, #0x8]\n"
+ "add x22, x22, #0x10\n"
"incb x25\n"
- "ldr x21, [x23, #0x8]\n"
- "add x23, x23, #0x10\n"
- "incb x28\n"
- "cbz x20, 9f\n"
- "mov x20, x20\n"
+ "incb x23\n"
+ "cbz x9, 9f\n"
+ "mov x19, x9\n"
"4:" // K loop: Main loop
- "whilelt p8.b, x28, %x[width]\n"
+ "whilelt p8.b, x23, %x[width]\n"
"mov x13, #0x0\n"
"mov x12, #0x0\n"
"cbz x26, 6f\n"
"5:" // K loop: Main loop: First: Loop
- ".inst 0x25356140 // psel p0.b, p8.b/Z, p10.b[w13, #2]\n"
- ".inst 0xe01922c2 // ld1b { za0h.b[x13, #2] }, p0/Z, [x22, x25]\n"
- ".inst 0x25756140 // psel p0.b, p8.b/Z, p10.b[w13, #6]\n"
- "ldr x22, [x23, #0x0]\n"
- ".inst 0xe01922a6 // ld1b { za0h.b[x13, #6] }, p0/Z, [x21, x25]\n"
- ".inst 0xc0828810 // mova z16.s, p2/M, za0v.s[x12]\n"
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- "udot z17.s, z16.b, z18.b\n"
- "ldr x21, [x23, #0x8]\n"
+ ".inst 0x25356140 // dup p0.b, p8.b/Z, p10.b[w13, #2]\n"
+ ".inst 0xe01922a2 // ld1b { za0h.b[x13, #2] }, p0/Z, [x21, x25]\n"
+ ".inst 0x25756140 // dup p0.b, p8.b/Z, p10.b[w13, #6]\n"
+ "ldr x21, [x22, #0x0]\n"
+ ".inst 0xe0192286 // ld1b { za0h.b[x13, #6] }, p0/Z, [x20, x25]\n"
+ "ldr x20, [x22, #0x8]\n"
+ "add x22, x22, #0x10\n"
+ ".inst 0xc0828410 // mova z16.s, p1/M, za0v.s[x12]\n"
+ ".inst 0x25306d20 // dup p0.s, p11.s/Z, p9.s[w12]\n"
".inst 0xe0bf8300 // st1w { za0v.s[x12] }, p0/Z, [x24, XZR, LSL #2]\n"
- ".inst 0x25706d20 // psel p0.s, p11.s/Z, p9.s[w12, #1]\n"
- ".inst 0xc0828830 // mova z16.s, p2/M, za0v.s[x12, #1]\n"
- ".inst 0xe0a98301 // st1w { za0v.s[x12, #1] }, p0/Z, [x24, x9, LSL #2]\n"
+ ".inst 0x25706d20 // dup p0.s, p11.s/Z, p9.s[w12, #1]\n"
+ "add x13, x13, #0x8\n"
+ ".inst 0xe0aa8301 // st1w { za0v.s[x12, #1] }, p0/Z, [x24, x10, LSL #2]\n"
+ "udot z17.s, z16.b, z18.b\n"
+ ".inst 0xc0828430 // mova z16.s, p1/M, za0v.s[x12, #1]\n"
+ "addvl x24, x24, #2\n"
"add x12, x12, #0x2\n"
"cmp x12, x26\n"
"udot z17.s, z16.b, z18.b\n"
- "add x23, x23, #0x10\n"
- "addvl x24, x24, #2\n"
- "add x13, x13, #0x8\n"
"blt 5b\n"
"6:" // K loop: Main loop: First: Tail
- ".inst 0x25356140 // psel p0.b, p8.b/Z, p10.b[w13, #2]\n"
- ".inst 0xe01922c2 // ld1b { za0h.b[x13, #2] }, p0/Z, [x22, x25]\n"
- ".inst 0x25756140 // psel p0.b, p8.b/Z, p10.b[w13, #6]\n"
- ".inst 0xe01922a6 // ld1b { za0h.b[x13, #6] }, p0/Z, [x21, x25]\n"
- ".inst 0xc0828810 // mova z16.s, p2/M, za0v.s[x12]\n"
- "udot z17.s, z16.b, z18.b\n"
- "mov x23, %x[in]\n"
- ".inst 0x25306d21 // psel p1.s, p11.s/Z, p9.s[w12]\n"
- "ldr x22, [x23, #0x0]\n"
- ".inst 0x25706d20 // psel p0.s, p11.s/Z, p9.s[w12, #1]\n"
- ".inst 0xc0828830 // mova z16.s, p2/M, za0v.s[x12, #1]\n"
- "ldr x21, [x23, #0x8]\n"
- ".inst 0xe0bf8700 // st1w { za0v.s[x12] }, p1/Z, [x24, XZR, LSL #2]\n"
- "whilelt p9.b, x28, %x[width]\n"
- "incb x28\n"
- "add x23, x23, #0x10\n"
- ".inst 0xe0a98301 // st1w { za0v.s[x12, #1] }, p0/Z, [x24, x9, LSL #2]\n"
+ "mov x22, %x[in]\n"
+ ".inst 0x25356140 // dup p0.b, p8.b/Z, p10.b[w13, #2]\n"
+ ".inst 0xe01922a2 // ld1b { za0h.b[x13, #2] }, p0/Z, [x21, x25]\n"
+ ".inst 0x25756140 // dup p0.b, p8.b/Z, p10.b[w13, #6]\n"
+ "ldr x21, [x22, #0x0]\n"
+ ".inst 0xe0192286 // ld1b { za0h.b[x13, #6] }, p0/Z, [x20, x25]\n"
+ "ldr x20, [x22, #0x8]\n"
+ "add x22, x22, #0x10\n"
+ ".inst 0xc0828410 // mova z16.s, p1/M, za0v.s[x12]\n"
+ ".inst 0x25306d20 // dup p0.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0xe0bf8300 // st1w { za0v.s[x12] }, p0/Z, [x24, XZR, LSL #2]\n"
+ ".inst 0x25706d20 // dup p0.s, p11.s/Z, p9.s[w12, #1]\n"
+ "whilelt p9.b, x23, %x[width]\n"
+ ".inst 0xe0aa8301 // st1w { za0v.s[x12, #1] }, p0/Z, [x24, x10, LSL #2]\n"
"udot z17.s, z16.b, z18.b\n"
+ ".inst 0xc0828430 // mova z16.s, p1/M, za0v.s[x12, #1]\n"
"addvl x24, x24, #2\n"
+ "incb x23\n"
"incb x25\n"
- "whilelt p8.b, x28, %x[width]\n"
+ "udot z17.s, z16.b, z18.b\n"
+ "whilelt p8.b, x23, %x[width]\n"
"mov x13, #0x0\n"
"mov x12, #0x0\n"
"cbz x26, 8f\n"
"7:" // K loop: Main loop: Second: Loop
- ".inst 0x25256140 // psel p0.b, p8.b/Z, p10.b[w13]\n"
- ".inst 0xe01922c0 // ld1b { za0h.b[x13] }, p0/Z, [x22, x25]\n"
- ".inst 0x25656140 // psel p0.b, p8.b/Z, p10.b[w13, #4]\n"
- "ldr x22, [x23, #0x0]\n"
- ".inst 0xe01922a4 // ld1b { za0h.b[x13, #4] }, p0/Z, [x21, x25]\n"
- ".inst 0xc0828910 // mova z16.s, p2/M, za2v.s[x12]\n"
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- "udot z17.s, z16.b, z18.b\n"
- "ldr x21, [x23, #0x8]\n"
+ ".inst 0x25256140 // dup p0.b, p8.b/Z, p10.b[w13]\n"
+ ".inst 0xe01922a0 // ld1b { za0h.b[x13] }, p0/Z, [x21, x25]\n"
+ ".inst 0x25656140 // dup p0.b, p8.b/Z, p10.b[w13, #4]\n"
+ "ldr x21, [x22, #0x0]\n"
+ ".inst 0xe0192284 // ld1b { za0h.b[x13, #4] }, p0/Z, [x20, x25]\n"
+ "ldr x20, [x22, #0x8]\n"
+ "add x22, x22, #0x10\n"
+ ".inst 0xc0828510 // mova z16.s, p1/M, za2v.s[x12]\n"
+ ".inst 0x25306d20 // dup p0.s, p11.s/Z, p9.s[w12]\n"
".inst 0xe0bf8308 // st1w { za2v.s[x12] }, p0/Z, [x24, XZR, LSL #2]\n"
- ".inst 0x25706d20 // psel p0.s, p11.s/Z, p9.s[w12, #1]\n"
- ".inst 0xc0828930 // mova z16.s, p2/M, za2v.s[x12, #1]\n"
- ".inst 0xe0a98309 // st1w { za2v.s[x12, #1] }, p0/Z, [x24, x9, LSL #2]\n"
+ ".inst 0x25706d20 // dup p0.s, p11.s/Z, p9.s[w12, #1]\n"
+ "add x13, x13, #0x8\n"
+ ".inst 0xe0aa8309 // st1w { za2v.s[x12, #1] }, p0/Z, [x24, x10, LSL #2]\n"
+ "udot z17.s, z16.b, z18.b\n"
+ ".inst 0xc0828530 // mova z16.s, p1/M, za2v.s[x12, #1]\n"
+ "addvl x24, x24, #2\n"
"add x12, x12, #0x2\n"
"cmp x12, x26\n"
"udot z17.s, z16.b, z18.b\n"
- "add x23, x23, #0x10\n"
- "addvl x24, x24, #2\n"
- "add x13, x13, #0x8\n"
"blt 7b\n"
"8:" // K loop: Main loop: Second: Tail
- ".inst 0x25256140 // psel p0.b, p8.b/Z, p10.b[w13]\n"
- ".inst 0xe01922c0 // ld1b { za0h.b[x13] }, p0/Z, [x22, x25]\n"
- ".inst 0x25656140 // psel p0.b, p8.b/Z, p10.b[w13, #4]\n"
- ".inst 0xe01922a4 // ld1b { za0h.b[x13, #4] }, p0/Z, [x21, x25]\n"
- ".inst 0xc0828910 // mova z16.s, p2/M, za2v.s[x12]\n"
- "udot z17.s, z16.b, z18.b\n"
- "mov x23, %x[in]\n"
- ".inst 0x25306d21 // psel p1.s, p11.s/Z, p9.s[w12]\n"
- "ldr x22, [x23, #0x0]\n"
- ".inst 0x25706d20 // psel p0.s, p11.s/Z, p9.s[w12, #1]\n"
- ".inst 0xc0828930 // mova z16.s, p2/M, za2v.s[x12, #1]\n"
- "ldr x21, [x23, #0x8]\n"
- ".inst 0xe0bf8708 // st1w { za2v.s[x12] }, p1/Z, [x24, XZR, LSL #2]\n"
- "whilelt p9.b, x28, %x[width]\n"
- "subs x20, x20, #0x1\n"
- "add x23, x23, #0x10\n"
- ".inst 0xe0a98309 // st1w { za2v.s[x12, #1] }, p0/Z, [x24, x9, LSL #2]\n"
+ "mov x22, %x[in]\n"
+ ".inst 0x25256140 // dup p0.b, p8.b/Z, p10.b[w13]\n"
+ ".inst 0xe01922a0 // ld1b { za0h.b[x13] }, p0/Z, [x21, x25]\n"
+ ".inst 0x25656140 // dup p0.b, p8.b/Z, p10.b[w13, #4]\n"
+ "ldr x21, [x22, #0x0]\n"
+ ".inst 0xe0192284 // ld1b { za0h.b[x13, #4] }, p0/Z, [x20, x25]\n"
+ "ldr x20, [x22, #0x8]\n"
+ "add x22, x22, #0x10\n"
+ ".inst 0xc0828510 // mova z16.s, p1/M, za2v.s[x12]\n"
+ ".inst 0x25306d20 // dup p0.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0xe0bf8308 // st1w { za2v.s[x12] }, p0/Z, [x24, XZR, LSL #2]\n"
+ ".inst 0x25706d20 // dup p0.s, p11.s/Z, p9.s[w12, #1]\n"
+ "whilelt p9.b, x23, %x[width]\n"
+ ".inst 0xe0aa8309 // st1w { za2v.s[x12, #1] }, p0/Z, [x24, x10, LSL #2]\n"
"udot z17.s, z16.b, z18.b\n"
+ ".inst 0xc0828530 // mova z16.s, p1/M, za2v.s[x12, #1]\n"
"addvl x24, x24, #2\n"
- "incb x28\n"
+ "incb x23\n"
"incb x25\n"
+ "udot z17.s, z16.b, z18.b\n"
+ "subs x19, x19, #0x1\n"
"bgt 4b\n"
"9:" // K loop: Tails
- "cbnz x27, 12f\n"
- "mov x23, %x[in]\n"
- "whilelt p8.b, x28, %x[width]\n"
+ "cbnz x28, 12f\n"
+ "mov x22, %x[in]\n"
+ "whilelt p8.b, x23, %x[width]\n"
"mov x13, #0x0\n"
"mov x12, #0x0\n"
"10:" // K loop: Tails: Even: First
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0xc0828410 // mova z16.s, p1/M, za0v.s[x12]\n"
+ ".inst 0x25306d20 // dup p0.s, p11.s/Z, p9.s[w12]\n"
".inst 0xe0bf8300 // st1w { za0v.s[x12] }, p0/Z, [x24, XZR, LSL #2]\n"
- "ldr x22, [x23, #0x0]\n"
- ".inst 0xc0828810 // mova z16.s, p2/M, za0v.s[x12]\n"
- "add x12, x12, #0x1\n"
- ".inst 0x25356140 // psel p0.b, p8.b/Z, p10.b[w13, #2]\n"
- "udot z17.s, z16.b, z18.b\n"
- ".inst 0xe01922c2 // ld1b { za0h.b[x13, #2] }, p0/Z, [x22, x25]\n"
- "cmp x12, x9\n"
- "add x23, x23, #0x8\n"
+ ".inst 0x25356140 // dup p0.b, p8.b/Z, p10.b[w13, #2]\n"
"addvl x24, x24, #1\n"
+ "ldr x21, [x22, #0x0]\n"
+ ".inst 0xe01922a2 // ld1b { za0h.b[x13, #2] }, p0/Z, [x21, x25]\n"
+ "udot z17.s, z16.b, z18.b\n"
+ "add x22, x22, #0x8\n"
"add x13, x13, #0x4\n"
+ "add x12, x12, #0x1\n"
+ "cmp x12, x10\n"
"blt 10b\n"
- "whilelt p9.b, x28, %x[width]\n"
- "whilelt p8.b, x28, %x[width]\n"
- "mov x20, #0x0\n"
+ "whilelt p9.b, x23, %x[width]\n"
+ "whilelt p8.b, x23, %x[width]\n"
+ "mov x19, #0x0\n"
"mov x12, #0x0\n"
"11:" // K loop: Tails: Even: Second
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0xc0828510 // mova z16.s, p1/M, za2v.s[x12]\n"
+ ".inst 0x25306d20 // dup p0.s, p11.s/Z, p9.s[w12]\n"
".inst 0xe0bf8308 // st1w { za2v.s[x12] }, p0/Z, [x24, XZR, LSL #2]\n"
- ".inst 0xc0828910 // mova z16.s, p2/M, za2v.s[x12]\n"
+ "addvl x24, x24, #1\n"
+ "add x19, x19, #0x4\n"
"add x12, x12, #0x1\n"
- "cmp x12, x10\n"
"udot z17.s, z16.b, z18.b\n"
- "addvl x24, x24, #1\n"
- "add x20, x20, #0x4\n"
+ "cmp x12, x27\n"
"blt 11b\n"
- "whilelt p9.b, x28, %x[width]\n"
+ "whilelt p9.b, x23, %x[width]\n"
"b 14f\n"
"12:" // K loop: Tails: Odd
"mov x12, #0x0\n"
"13:" // K loop: Tails: Odd: Loop
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0xc0828410 // mova z16.s, p1/M, za0v.s[x12]\n"
+ ".inst 0x25306d20 // dup p0.s, p11.s/Z, p9.s[w12]\n"
".inst 0xe0bf8300 // st1w { za0v.s[x12] }, p0/Z, [x24, XZR, LSL #2]\n"
- ".inst 0xc0828810 // mova z16.s, p2/M, za0v.s[x12]\n"
+ "addvl x24, x24, #1\n"
"add x12, x12, #0x1\n"
- "cmp x12, x10\n"
+ "cmp x12, x27\n"
"udot z17.s, z16.b, z18.b\n"
- "addvl x24, x24, #1\n"
"blt 13b\n"
"14:" // K loop: End
- "st1w { z17.s }, p2, [x24]\n"
+ "st1w { z17.s }, p1, [x24]\n"
"addvl x24, x24, #1\n"
"mov %x[out], x24\n"
".inst 0xd503467f // SMSTOP\n"
: [out] "+&r" (out)
: [first] "r" (first), [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x12", "x13", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p8", "p9", "p10", "p11", "x9", "x10", "x12", "x13", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_fp16_fp16.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_fp16_fp16.hpp
index 788c1a2eca..f80ca640ff 100644
--- a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_fp16_fp16.hpp
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_fp16_fp16.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#if defined(__ARM_FEATURE_SVE)
@@ -34,175 +34,175 @@ void interleave_block<1, 1, VLType::SME, false>(
__asm__ __volatile__(
".inst 0xd503477f // SMSTART ZA\n"
- "mov x21, %x[width]\n"
- "inch x21\n"
- "cnth x11\n"
- "sub x21, x21, #0x1\n"
- "udiv x21, x21, x11\n" // n_passes = ceildiv(width, VL<T>)
"mov x20, %x[width]\n"
- "sub x10, x11, #0x1\n"
- "sub x9, x21, #0x1\n"
- "ands x10, x20, x10\n"
- "sub x28, x11, #0x2\n"
- "lsl x20, %x[height], #0x1\n" // height * 2
- "mov x27, #0x0\n"
- "mov x26, %x[in]\n"
- "lsr x9, x9, #0x1\n" // n_loops = (n_passes - 1) / 2
- "ldr x25, [x26, #0x0]\n"
- "and x24, x21, #0x1\n" // odd_tail = bool(n_passes & 0x1)
- "csel x10, x10, x11, NE\n"
- "ldr x23, [x26, #0x8]\n"
+ "inch x20\n"
+ "cnth x10\n"
+ "sub x20, x20, #0x1\n"
+ "udiv x20, x20, x10\n" // n_passes = ceildiv(width, VL<T>)
+ "mov x19, %x[width]\n"
+ "sub x9, x10, #0x1\n"
+ "sub x28, x20, #0x1\n"
+ "ands x9, x19, x9\n"
+ "sub x27, x10, #0x2\n"
+ "lsl x19, %x[height], #0x1\n" // height * 2
+ "mov x26, #0x0\n"
+ "mov x25, %x[in]\n"
+ "lsr x28, x28, #0x1\n" // n_loops = (n_passes - 1) / 2
+ "ldr x24, [x25, #0x0]\n"
+ "and x23, x20, #0x1\n" // odd_tail = bool(n_passes & 0x1)
+ "csel x9, x9, x10, NE\n"
+ "ldr x22, [x25, #0x8]\n"
"ptrue p11.h\n"
- "whilelt p10.h, XZR, x20\n"
- "mov x22, %x[row_offset]\n"
- "mov x21, %x[out]\n"
- "whilelt p9.h, x27, %x[width]\n"
- "whilelt p8.h, x27, %x[width]\n"
- "add x26, x26, #0x10\n"
+ "whilelt p10.h, XZR, x19\n"
+ "mov x21, %x[row_offset]\n"
+ "mov x20, %x[out]\n"
+ "whilelt p9.h, x26, %x[width]\n"
+ "whilelt p8.h, x26, %x[width]\n"
+ "add x25, x25, #0x10\n"
"mov x12, #0x0\n"
- "cbz x28, 2f\n"
+ "cbz x27, 2f\n"
"1:" // K loop: Charge: Loop
".inst 0x25286140 // psel p0.h, p8.h/Z, p10.h[w12]\n"
- ".inst 0xe0560320 // ld1h { za0h.h[x12] }, p0/Z, [x25, x22, LSL #1]\n"
+ ".inst 0xe0550300 // ld1h { za0h.h[x12] }, p0/Z, [x24, x21, LSL #1]\n"
".inst 0x25386140 // psel p0.h, p8.h/Z, p10.h[w12, #1]\n"
- "ldr x25, [x26, #0x0]\n"
- ".inst 0xe05602e1 // ld1h { za0h.h[x12, #1] }, p0/Z, [x23, x22, LSL #1]\n"
+ "ldr x24, [x25, #0x0]\n"
+ ".inst 0xe05502c1 // ld1h { za0h.h[x12, #1] }, p0/Z, [x22, x21, LSL #1]\n"
"add x12, x12, #0x2\n"
- "cmp x12, x28\n"
- "ldr x23, [x26, #0x8]\n"
- "add x26, x26, #0x10\n"
+ "cmp x12, x27\n"
+ "ldr x22, [x25, #0x8]\n"
+ "add x25, x25, #0x10\n"
"blt 1b\n"
"2:" // K loop: Charge: End
".inst 0x25286140 // psel p0.h, p8.h/Z, p10.h[w12]\n"
- ".inst 0xe0560320 // ld1h { za0h.h[x12] }, p0/Z, [x25, x22, LSL #1]\n"
+ ".inst 0xe0550300 // ld1h { za0h.h[x12] }, p0/Z, [x24, x21, LSL #1]\n"
".inst 0x25386140 // psel p0.h, p8.h/Z, p10.h[w12, #1]\n"
- "mov x26, %x[in]\n"
- ".inst 0xe05602e1 // ld1h { za0h.h[x12, #1] }, p0/Z, [x23, x22, LSL #1]\n"
- "ldr x25, [x26, #0x0]\n"
- "inch x22\n"
- "ldr x23, [x26, #0x8]\n"
- "add x26, x26, #0x10\n"
- "inch x27\n"
- "cbz x9, 8f\n"
- "mov x20, x9\n"
+ "mov x25, %x[in]\n"
+ ".inst 0xe05502c1 // ld1h { za0h.h[x12, #1] }, p0/Z, [x22, x21, LSL #1]\n"
+ "ldr x24, [x25, #0x0]\n"
+ "inch x21\n"
+ "ldr x22, [x25, #0x8]\n"
+ "add x25, x25, #0x10\n"
+ "inch x26\n"
+ "cbz x28, 8f\n"
+ "mov x19, x28\n"
"3:" // K loop: Main loop
- "whilelt p8.h, x27, %x[width]\n"
+ "whilelt p8.h, x26, %x[width]\n"
"mov x12, #0x0\n"
- "cbz x28, 5f\n"
+ "cbz x27, 5f\n"
"4:" // K loop: Main loop: First: Loop
".inst 0x25286140 // psel p0.h, p8.h/Z, p10.h[w12]\n"
- ".inst 0xe0560328 // ld1h { za1h.h[x12] }, p0/Z, [x25, x22, LSL #1]\n"
+ ".inst 0xe0550308 // ld1h { za1h.h[x12] }, p0/Z, [x24, x21, LSL #1]\n"
".inst 0x25386141 // psel p1.h, p8.h/Z, p10.h[w12, #1]\n"
- "ldr x25, [x26, #0x0]\n"
+ "ldr x24, [x25, #0x0]\n"
".inst 0x25286d20 // psel p0.h, p11.h/Z, p9.h[w12]\n"
- ".inst 0xe05606e9 // ld1h { za1h.h[x12, #1] }, p1/Z, [x23, x22, LSL #1]\n"
- "ldr x23, [x26, #0x8]\n"
- ".inst 0xe07f82a0 // st1h { za0v.h[x12] }, p0/Z, [x21, XZR, LSL #1]\n"
+ ".inst 0xe05506c9 // ld1h { za1h.h[x12, #1] }, p1/Z, [x22, x21, LSL #1]\n"
+ "ldr x22, [x25, #0x8]\n"
+ ".inst 0xe07f8280 // st1h { za0v.h[x12] }, p0/Z, [x20, XZR, LSL #1]\n"
".inst 0x25386d20 // psel p0.h, p11.h/Z, p9.h[w12, #1]\n"
- ".inst 0xe06b82a1 // st1h { za0v.h[x12, #1] }, p0/Z, [x21, x11, LSL #1]\n"
+ ".inst 0xe06a8281 // st1h { za0v.h[x12, #1] }, p0/Z, [x20, x10, LSL #1]\n"
"add x12, x12, #0x2\n"
- "cmp x12, x28\n"
- "add x26, x26, #0x10\n"
- "addvl x21, x21, #2\n"
+ "cmp x12, x27\n"
+ "add x25, x25, #0x10\n"
+ "addvl x20, x20, #2\n"
"blt 4b\n"
"5:" // K loop: Main loop: First: Tail
".inst 0x25286140 // psel p0.h, p8.h/Z, p10.h[w12]\n"
- ".inst 0xe0560328 // ld1h { za1h.h[x12] }, p0/Z, [x25, x22, LSL #1]\n"
- "mov x26, %x[in]\n"
- "ldr x25, [x26, #0x0]\n"
+ ".inst 0xe0550308 // ld1h { za1h.h[x12] }, p0/Z, [x24, x21, LSL #1]\n"
+ "mov x25, %x[in]\n"
+ "ldr x24, [x25, #0x0]\n"
".inst 0x25386141 // psel p1.h, p8.h/Z, p10.h[w12, #1]\n"
".inst 0x25286d20 // psel p0.h, p11.h/Z, p9.h[w12]\n"
- ".inst 0xe05606e9 // ld1h { za1h.h[x12, #1] }, p1/Z, [x23, x22, LSL #1]\n"
- "ldr x23, [x26, #0x8]\n"
- ".inst 0xe07f82a0 // st1h { za0v.h[x12] }, p0/Z, [x21, XZR, LSL #1]\n"
+ ".inst 0xe05506c9 // ld1h { za1h.h[x12, #1] }, p1/Z, [x22, x21, LSL #1]\n"
+ "ldr x22, [x25, #0x8]\n"
+ ".inst 0xe07f8280 // st1h { za0v.h[x12] }, p0/Z, [x20, XZR, LSL #1]\n"
".inst 0x25386d20 // psel p0.h, p11.h/Z, p9.h[w12, #1]\n"
- "whilelt p9.h, x27, %x[width]\n"
- "inch x27\n"
- "add x26, x26, #0x10\n"
- ".inst 0xe06b82a1 // st1h { za0v.h[x12, #1] }, p0/Z, [x21, x11, LSL #1]\n"
- "addvl x21, x21, #2\n"
- "inch x22\n"
- "whilelt p8.h, x27, %x[width]\n"
+ "whilelt p9.h, x26, %x[width]\n"
+ "inch x26\n"
+ "add x25, x25, #0x10\n"
+ ".inst 0xe06a8281 // st1h { za0v.h[x12, #1] }, p0/Z, [x20, x10, LSL #1]\n"
+ "addvl x20, x20, #2\n"
+ "inch x21\n"
+ "whilelt p8.h, x26, %x[width]\n"
"mov x12, #0x0\n"
- "cbz x28, 7f\n"
+ "cbz x27, 7f\n"
"6:" // K loop: Main loop: Second: Loop
".inst 0x25286140 // psel p0.h, p8.h/Z, p10.h[w12]\n"
- ".inst 0xe0560320 // ld1h { za0h.h[x12] }, p0/Z, [x25, x22, LSL #1]\n"
+ ".inst 0xe0550300 // ld1h { za0h.h[x12] }, p0/Z, [x24, x21, LSL #1]\n"
".inst 0x25386141 // psel p1.h, p8.h/Z, p10.h[w12, #1]\n"
- "ldr x25, [x26, #0x0]\n"
+ "ldr x24, [x25, #0x0]\n"
".inst 0x25286d20 // psel p0.h, p11.h/Z, p9.h[w12]\n"
- ".inst 0xe05606e1 // ld1h { za0h.h[x12, #1] }, p1/Z, [x23, x22, LSL #1]\n"
- "ldr x23, [x26, #0x8]\n"
- ".inst 0xe07f82a8 // st1h { za1v.h[x12] }, p0/Z, [x21, XZR, LSL #1]\n"
+ ".inst 0xe05506c1 // ld1h { za0h.h[x12, #1] }, p1/Z, [x22, x21, LSL #1]\n"
+ "ldr x22, [x25, #0x8]\n"
+ ".inst 0xe07f8288 // st1h { za1v.h[x12] }, p0/Z, [x20, XZR, LSL #1]\n"
".inst 0x25386d20 // psel p0.h, p11.h/Z, p9.h[w12, #1]\n"
- ".inst 0xe06b82a9 // st1h { za1v.h[x12, #1] }, p0/Z, [x21, x11, LSL #1]\n"
+ ".inst 0xe06a8289 // st1h { za1v.h[x12, #1] }, p0/Z, [x20, x10, LSL #1]\n"
"add x12, x12, #0x2\n"
- "cmp x12, x28\n"
- "add x26, x26, #0x10\n"
- "addvl x21, x21, #2\n"
+ "cmp x12, x27\n"
+ "add x25, x25, #0x10\n"
+ "addvl x20, x20, #2\n"
"blt 6b\n"
"7:" // K loop: Main loop: Second: Tail
".inst 0x25286140 // psel p0.h, p8.h/Z, p10.h[w12]\n"
- ".inst 0xe0560320 // ld1h { za0h.h[x12] }, p0/Z, [x25, x22, LSL #1]\n"
- "mov x26, %x[in]\n"
- "ldr x25, [x26, #0x0]\n"
+ ".inst 0xe0550300 // ld1h { za0h.h[x12] }, p0/Z, [x24, x21, LSL #1]\n"
+ "mov x25, %x[in]\n"
+ "ldr x24, [x25, #0x0]\n"
".inst 0x25386141 // psel p1.h, p8.h/Z, p10.h[w12, #1]\n"
".inst 0x25286d20 // psel p0.h, p11.h/Z, p9.h[w12]\n"
- ".inst 0xe05606e1 // ld1h { za0h.h[x12, #1] }, p1/Z, [x23, x22, LSL #1]\n"
- "ldr x23, [x26, #0x8]\n"
- ".inst 0xe07f82a8 // st1h { za1v.h[x12] }, p0/Z, [x21, XZR, LSL #1]\n"
+ ".inst 0xe05506c1 // ld1h { za0h.h[x12, #1] }, p1/Z, [x22, x21, LSL #1]\n"
+ "ldr x22, [x25, #0x8]\n"
+ ".inst 0xe07f8288 // st1h { za1v.h[x12] }, p0/Z, [x20, XZR, LSL #1]\n"
".inst 0x25386d20 // psel p0.h, p11.h/Z, p9.h[w12, #1]\n"
- "whilelt p9.h, x27, %x[width]\n"
- "subs x20, x20, #0x1\n"
- "add x26, x26, #0x10\n"
- ".inst 0xe06b82a9 // st1h { za1v.h[x12, #1] }, p0/Z, [x21, x11, LSL #1]\n"
- "addvl x21, x21, #2\n"
- "inch x27\n"
- "inch x22\n"
+ "whilelt p9.h, x26, %x[width]\n"
+ "subs x19, x19, #0x1\n"
+ "add x25, x25, #0x10\n"
+ ".inst 0xe06a8289 // st1h { za1v.h[x12, #1] }, p0/Z, [x20, x10, LSL #1]\n"
+ "addvl x20, x20, #2\n"
+ "inch x26\n"
+ "inch x21\n"
"bgt 3b\n"
"8:" // K loop: Tails
- "cbnz x24, 11f\n"
- "mov x26, %x[in]\n"
- "whilelt p8.h, x27, %x[width]\n"
+ "cbnz x23, 11f\n"
+ "mov x25, %x[in]\n"
+ "whilelt p8.h, x26, %x[width]\n"
"mov x12, #0x0\n"
"9:" // K loop: Tails: Even: First
".inst 0x25286d20 // psel p0.h, p11.h/Z, p9.h[w12]\n"
- ".inst 0xe07f82a0 // st1h { za0v.h[x12] }, p0/Z, [x21, XZR, LSL #1]\n"
- "ldr x25, [x26, #0x0]\n"
+ ".inst 0xe07f8280 // st1h { za0v.h[x12] }, p0/Z, [x20, XZR, LSL #1]\n"
+ "ldr x24, [x25, #0x0]\n"
".inst 0x25286140 // psel p0.h, p8.h/Z, p10.h[w12]\n"
- ".inst 0xe0560328 // ld1h { za1h.h[x12] }, p0/Z, [x25, x22, LSL #1]\n"
+ ".inst 0xe0550308 // ld1h { za1h.h[x12] }, p0/Z, [x24, x21, LSL #1]\n"
"add x12, x12, #0x1\n"
- "cmp x12, x11\n"
- "add x26, x26, #0x8\n"
- "addvl x21, x21, #1\n"
+ "cmp x12, x10\n"
+ "add x25, x25, #0x8\n"
+ "addvl x20, x20, #1\n"
"blt 9b\n"
- "whilelt p9.h, x27, %x[width]\n"
- "whilelt p8.h, x27, %x[width]\n"
+ "whilelt p9.h, x26, %x[width]\n"
+ "whilelt p8.h, x26, %x[width]\n"
"mov x12, #0x0\n"
"10:" // K loop: Tails: Even: Second
".inst 0x25286d20 // psel p0.h, p11.h/Z, p9.h[w12]\n"
- ".inst 0xe07f82a8 // st1h { za1v.h[x12] }, p0/Z, [x21, XZR, LSL #1]\n"
+ ".inst 0xe07f8288 // st1h { za1v.h[x12] }, p0/Z, [x20, XZR, LSL #1]\n"
"add x12, x12, #0x1\n"
- "cmp x12, x10\n"
- "addvl x21, x21, #1\n"
+ "cmp x12, x9\n"
+ "addvl x20, x20, #1\n"
"blt 10b\n"
- "whilelt p9.h, x27, %x[width]\n"
+ "whilelt p9.h, x26, %x[width]\n"
"b 13f\n"
"11:" // K loop: Tails: Odd
"mov x12, #0x0\n"
"12:" // K loop: Tails: Odd: Loop
".inst 0x25286d20 // psel p0.h, p11.h/Z, p9.h[w12]\n"
- ".inst 0xe07f82a0 // st1h { za0v.h[x12] }, p0/Z, [x21, XZR, LSL #1]\n"
+ ".inst 0xe07f8280 // st1h { za0v.h[x12] }, p0/Z, [x20, XZR, LSL #1]\n"
"add x12, x12, #0x1\n"
- "cmp x12, x10\n"
- "addvl x21, x21, #1\n"
+ "cmp x12, x9\n"
+ "addvl x20, x20, #1\n"
"blt 12b\n"
"13:" // K loop: End
- "mov %x[out], x21\n"
+ "mov %x[out], x20\n"
".inst 0xd503467f // SMSTOP\n"
: [out] "+&r" (out)
: [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x12", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_fp32_fp32.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_fp32_fp32.hpp
index 7de88543d7..874fc797a4 100644
--- a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_fp32_fp32.hpp
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_fp32_fp32.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#if defined(__ARM_FEATURE_SVE)
@@ -34,174 +34,174 @@ void interleave_block<1, 1, VLType::SME, false>(
__asm__ __volatile__(
".inst 0xd503477f // SMSTART ZA\n"
- "mov x22, %x[width]\n"
- "incw x22\n"
"cntw x10\n"
- "sub x22, x22, #0x1\n"
- "udiv x22, x22, x10\n" // n_passes = ceildiv(width, VL<T>)
- "mov x21, %x[width]\n"
- "sub x9, x10, #0x1\n"
- "sub x20, x22, #0x1\n"
- "ands x9, x21, x9\n"
- "sub x28, x10, #0x2\n"
- "mov x27, #0x0\n"
- "mov x26, %x[in]\n"
- "ldr x25, [x26, #0x0]\n"
- "lsr x20, x20, #0x1\n" // n_loops = (n_passes - 1) / 2
- "and x24, x22, #0x1\n" // odd_tail = bool(n_passes & 0x1)
- "ldr x23, [x26, #0x8]\n"
- "csel x9, x9, x10, NE\n"
+ "mov x19, %x[width]\n"
+ "incw x19\n"
+ "sub x19, x19, #0x1\n"
+ "udiv x19, x19, x10\n" // n_passes = ceildiv(width, VL<T>)
+ "sub x9, x19, #0x1\n"
+ "lsr x9, x9, #0x1\n" // n_loops = (n_passes - 1) / 2
+ "and x28, x19, #0x1\n" // odd_tail = bool(n_passes & 0x1)
+ "mov x19, %x[width]\n"
+ "sub x27, x10, #0x1\n"
+ "ands x27, x19, x27\n"
+ "csel x27, x27, x10, NE\n"
+ "sub x26, x10, #0x2\n"
"ptrue p11.s\n"
"whilelt p10.s, XZR, %x[height]\n"
- "mov x22, %x[row_offset]\n"
- "mov x21, %x[out]\n"
- "whilelt p9.s, x27, %x[width]\n"
- "whilelt p8.s, x27, %x[width]\n"
- "add x26, x26, #0x10\n"
+ "mov x25, %x[row_offset]\n"
+ "mov x24, %x[out]\n"
+ "mov x23, #0x0\n"
+ "whilelt p9.s, x23, %x[width]\n"
+ "whilelt p8.s, x23, %x[width]\n"
+ "mov x22, %x[in]\n"
+ "ldr x21, [x22, #0x0]\n"
+ "ldr x20, [x22, #0x8]\n"
+ "add x22, x22, #0x10\n"
"mov x12, #0x0\n"
- "cbz x28, 2f\n"
+ "cbz x26, 2f\n"
"1:" // K loop: Charge: Loop
- ".inst 0x25306140 // psel p0.s, p8.s/Z, p10.s[w12]\n"
- ".inst 0xe0960320 // ld1w { za0h.s[x12] }, p0/Z, [x25, x22, LSL #2]\n"
- ".inst 0x25706140 // psel p0.s, p8.s/Z, p10.s[w12, #1]\n"
- "ldr x25, [x26, #0x0]\n"
- ".inst 0xe09602e1 // ld1w { za0h.s[x12, #1] }, p0/Z, [x23, x22, LSL #2]\n"
+ ".inst 0x25306140 // dup p0.s, p8.s/Z, p10.s[w12]\n"
+ ".inst 0xe09902a0 // ld1w { za0h.s[x12] }, p0/Z, [x21, x25, LSL #2]\n"
+ ".inst 0x25706140 // dup p0.s, p8.s/Z, p10.s[w12, #1]\n"
+ "ldr x21, [x22, #0x0]\n"
+ ".inst 0xe0990281 // ld1w { za0h.s[x12, #1] }, p0/Z, [x20, x25, LSL #2]\n"
+ "ldr x20, [x22, #0x8]\n"
+ "add x22, x22, #0x10\n"
"add x12, x12, #0x2\n"
- "cmp x12, x28\n"
- "ldr x23, [x26, #0x8]\n"
- "add x26, x26, #0x10\n"
+ "cmp x12, x26\n"
"blt 1b\n"
"2:" // K loop: Charge: End
- ".inst 0x25306140 // psel p0.s, p8.s/Z, p10.s[w12]\n"
- ".inst 0xe0960320 // ld1w { za0h.s[x12] }, p0/Z, [x25, x22, LSL #2]\n"
- ".inst 0x25706140 // psel p0.s, p8.s/Z, p10.s[w12, #1]\n"
- "mov x26, %x[in]\n"
- ".inst 0xe09602e1 // ld1w { za0h.s[x12, #1] }, p0/Z, [x23, x22, LSL #2]\n"
- "ldr x25, [x26, #0x0]\n"
- "incw x22\n"
- "ldr x23, [x26, #0x8]\n"
- "add x26, x26, #0x10\n"
- "incw x27\n"
- "cbz x20, 8f\n"
- "mov x20, x20\n"
+ ".inst 0x25306140 // dup p0.s, p8.s/Z, p10.s[w12]\n"
+ ".inst 0xe09902a0 // ld1w { za0h.s[x12] }, p0/Z, [x21, x25, LSL #2]\n"
+ ".inst 0x25706140 // dup p0.s, p8.s/Z, p10.s[w12, #1]\n"
+ "mov x22, %x[in]\n"
+ ".inst 0xe0990281 // ld1w { za0h.s[x12, #1] }, p0/Z, [x20, x25, LSL #2]\n"
+ "ldr x21, [x22, #0x0]\n"
+ "ldr x20, [x22, #0x8]\n"
+ "add x22, x22, #0x10\n"
+ "incw x25\n"
+ "incw x23\n"
+ "cbz x9, 8f\n"
+ "mov x19, x9\n"
"3:" // K loop: Main loop
- "whilelt p8.s, x27, %x[width]\n"
+ "whilelt p8.s, x23, %x[width]\n"
"mov x12, #0x0\n"
- "cbz x28, 5f\n"
+ "cbz x26, 5f\n"
"4:" // K loop: Main loop: First: Loop
- ".inst 0x25306140 // psel p0.s, p8.s/Z, p10.s[w12]\n"
- ".inst 0xe0960328 // ld1w { za2h.s[x12] }, p0/Z, [x25, x22, LSL #2]\n"
- ".inst 0x25706141 // psel p1.s, p8.s/Z, p10.s[w12, #1]\n"
- "ldr x25, [x26, #0x0]\n"
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe09606e9 // ld1w { za2h.s[x12, #1] }, p1/Z, [x23, x22, LSL #2]\n"
- "ldr x23, [x26, #0x8]\n"
- ".inst 0xe0bf82a0 // st1w { za0v.s[x12] }, p0/Z, [x21, XZR, LSL #2]\n"
- ".inst 0x25706d20 // psel p0.s, p11.s/Z, p9.s[w12, #1]\n"
- ".inst 0xe0aa82a1 // st1w { za0v.s[x12, #1] }, p0/Z, [x21, x10, LSL #2]\n"
+ ".inst 0x25306140 // dup p0.s, p8.s/Z, p10.s[w12]\n"
+ ".inst 0xe09902a8 // ld1w { za2h.s[x12] }, p0/Z, [x21, x25, LSL #2]\n"
+ ".inst 0x25706140 // dup p0.s, p8.s/Z, p10.s[w12, #1]\n"
+ "ldr x21, [x22, #0x0]\n"
+ ".inst 0xe0990289 // ld1w { za2h.s[x12, #1] }, p0/Z, [x20, x25, LSL #2]\n"
+ "ldr x20, [x22, #0x8]\n"
+ "add x22, x22, #0x10\n"
+ ".inst 0x25306d20 // dup p0.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0xe0bf8300 // st1w { za0v.s[x12] }, p0/Z, [x24, XZR, LSL #2]\n"
+ ".inst 0x25706d20 // dup p0.s, p11.s/Z, p9.s[w12, #1]\n"
+ ".inst 0xe0aa8301 // st1w { za0v.s[x12, #1] }, p0/Z, [x24, x10, LSL #2]\n"
+ "addvl x24, x24, #2\n"
"add x12, x12, #0x2\n"
- "cmp x12, x28\n"
- "add x26, x26, #0x10\n"
- "addvl x21, x21, #2\n"
+ "cmp x12, x26\n"
"blt 4b\n"
"5:" // K loop: Main loop: First: Tail
- ".inst 0x25306140 // psel p0.s, p8.s/Z, p10.s[w12]\n"
- ".inst 0xe0960328 // ld1w { za2h.s[x12] }, p0/Z, [x25, x22, LSL #2]\n"
- "mov x26, %x[in]\n"
- "ldr x25, [x26, #0x0]\n"
- ".inst 0x25706141 // psel p1.s, p8.s/Z, p10.s[w12, #1]\n"
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe09606e9 // ld1w { za2h.s[x12, #1] }, p1/Z, [x23, x22, LSL #2]\n"
- "ldr x23, [x26, #0x8]\n"
- ".inst 0xe0bf82a0 // st1w { za0v.s[x12] }, p0/Z, [x21, XZR, LSL #2]\n"
- ".inst 0x25706d20 // psel p0.s, p11.s/Z, p9.s[w12, #1]\n"
- "whilelt p9.s, x27, %x[width]\n"
- "incw x27\n"
- "add x26, x26, #0x10\n"
- ".inst 0xe0aa82a1 // st1w { za0v.s[x12, #1] }, p0/Z, [x21, x10, LSL #2]\n"
- "addvl x21, x21, #2\n"
- "incw x22\n"
- "whilelt p8.s, x27, %x[width]\n"
+ "mov x22, %x[in]\n"
+ ".inst 0x25306140 // dup p0.s, p8.s/Z, p10.s[w12]\n"
+ ".inst 0xe09902a8 // ld1w { za2h.s[x12] }, p0/Z, [x21, x25, LSL #2]\n"
+ ".inst 0x25706140 // dup p0.s, p8.s/Z, p10.s[w12, #1]\n"
+ "ldr x21, [x22, #0x0]\n"
+ ".inst 0xe0990289 // ld1w { za2h.s[x12, #1] }, p0/Z, [x20, x25, LSL #2]\n"
+ "ldr x20, [x22, #0x8]\n"
+ "add x22, x22, #0x10\n"
+ ".inst 0x25306d20 // dup p0.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0xe0bf8300 // st1w { za0v.s[x12] }, p0/Z, [x24, XZR, LSL #2]\n"
+ ".inst 0x25706d20 // dup p0.s, p11.s/Z, p9.s[w12, #1]\n"
+ "whilelt p9.s, x23, %x[width]\n"
+ ".inst 0xe0aa8301 // st1w { za0v.s[x12, #1] }, p0/Z, [x24, x10, LSL #2]\n"
+ "addvl x24, x24, #2\n"
+ "incw x23\n"
+ "incw x25\n"
+ "whilelt p8.s, x23, %x[width]\n"
"mov x12, #0x0\n"
- "cbz x28, 7f\n"
+ "cbz x26, 7f\n"
"6:" // K loop: Main loop: Second: Loop
- ".inst 0x25306140 // psel p0.s, p8.s/Z, p10.s[w12]\n"
- ".inst 0xe0960320 // ld1w { za0h.s[x12] }, p0/Z, [x25, x22, LSL #2]\n"
- ".inst 0x25706141 // psel p1.s, p8.s/Z, p10.s[w12, #1]\n"
- "ldr x25, [x26, #0x0]\n"
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe09606e1 // ld1w { za0h.s[x12, #1] }, p1/Z, [x23, x22, LSL #2]\n"
- "ldr x23, [x26, #0x8]\n"
- ".inst 0xe0bf82a8 // st1w { za2v.s[x12] }, p0/Z, [x21, XZR, LSL #2]\n"
- ".inst 0x25706d20 // psel p0.s, p11.s/Z, p9.s[w12, #1]\n"
- ".inst 0xe0aa82a9 // st1w { za2v.s[x12, #1] }, p0/Z, [x21, x10, LSL #2]\n"
+ ".inst 0x25306140 // dup p0.s, p8.s/Z, p10.s[w12]\n"
+ ".inst 0xe09902a0 // ld1w { za0h.s[x12] }, p0/Z, [x21, x25, LSL #2]\n"
+ ".inst 0x25706140 // dup p0.s, p8.s/Z, p10.s[w12, #1]\n"
+ "ldr x21, [x22, #0x0]\n"
+ ".inst 0xe0990281 // ld1w { za0h.s[x12, #1] }, p0/Z, [x20, x25, LSL #2]\n"
+ "ldr x20, [x22, #0x8]\n"
+ "add x22, x22, #0x10\n"
+ ".inst 0x25306d20 // dup p0.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0xe0bf8308 // st1w { za2v.s[x12] }, p0/Z, [x24, XZR, LSL #2]\n"
+ ".inst 0x25706d20 // dup p0.s, p11.s/Z, p9.s[w12, #1]\n"
+ ".inst 0xe0aa8309 // st1w { za2v.s[x12, #1] }, p0/Z, [x24, x10, LSL #2]\n"
+ "addvl x24, x24, #2\n"
"add x12, x12, #0x2\n"
- "cmp x12, x28\n"
- "add x26, x26, #0x10\n"
- "addvl x21, x21, #2\n"
+ "cmp x12, x26\n"
"blt 6b\n"
"7:" // K loop: Main loop: Second: Tail
- ".inst 0x25306140 // psel p0.s, p8.s/Z, p10.s[w12]\n"
- ".inst 0xe0960320 // ld1w { za0h.s[x12] }, p0/Z, [x25, x22, LSL #2]\n"
- "mov x26, %x[in]\n"
- "ldr x25, [x26, #0x0]\n"
- ".inst 0x25706141 // psel p1.s, p8.s/Z, p10.s[w12, #1]\n"
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe09606e1 // ld1w { za0h.s[x12, #1] }, p1/Z, [x23, x22, LSL #2]\n"
- "ldr x23, [x26, #0x8]\n"
- ".inst 0xe0bf82a8 // st1w { za2v.s[x12] }, p0/Z, [x21, XZR, LSL #2]\n"
- ".inst 0x25706d20 // psel p0.s, p11.s/Z, p9.s[w12, #1]\n"
- "whilelt p9.s, x27, %x[width]\n"
- "subs x20, x20, #0x1\n"
- "add x26, x26, #0x10\n"
- ".inst 0xe0aa82a9 // st1w { za2v.s[x12, #1] }, p0/Z, [x21, x10, LSL #2]\n"
- "addvl x21, x21, #2\n"
- "incw x27\n"
- "incw x22\n"
+ "mov x22, %x[in]\n"
+ ".inst 0x25306140 // dup p0.s, p8.s/Z, p10.s[w12]\n"
+ ".inst 0xe09902a0 // ld1w { za0h.s[x12] }, p0/Z, [x21, x25, LSL #2]\n"
+ ".inst 0x25706140 // dup p0.s, p8.s/Z, p10.s[w12, #1]\n"
+ "ldr x21, [x22, #0x0]\n"
+ ".inst 0xe0990281 // ld1w { za0h.s[x12, #1] }, p0/Z, [x20, x25, LSL #2]\n"
+ "ldr x20, [x22, #0x8]\n"
+ "add x22, x22, #0x10\n"
+ ".inst 0x25306d20 // dup p0.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0xe0bf8308 // st1w { za2v.s[x12] }, p0/Z, [x24, XZR, LSL #2]\n"
+ ".inst 0x25706d20 // dup p0.s, p11.s/Z, p9.s[w12, #1]\n"
+ "whilelt p9.s, x23, %x[width]\n"
+ ".inst 0xe0aa8309 // st1w { za2v.s[x12, #1] }, p0/Z, [x24, x10, LSL #2]\n"
+ "addvl x24, x24, #2\n"
+ "incw x23\n"
+ "incw x25\n"
+ "subs x19, x19, #0x1\n"
"bgt 3b\n"
"8:" // K loop: Tails
- "cbnz x24, 11f\n"
- "mov x26, %x[in]\n"
- "whilelt p8.s, x27, %x[width]\n"
+ "cbnz x28, 11f\n"
+ "mov x22, %x[in]\n"
+ "whilelt p8.s, x23, %x[width]\n"
"mov x12, #0x0\n"
"9:" // K loop: Tails: Even: First
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe0bf82a0 // st1w { za0v.s[x12] }, p0/Z, [x21, XZR, LSL #2]\n"
- "ldr x25, [x26, #0x0]\n"
- ".inst 0x25306140 // psel p0.s, p8.s/Z, p10.s[w12]\n"
- ".inst 0xe0960328 // ld1w { za2h.s[x12] }, p0/Z, [x25, x22, LSL #2]\n"
+ ".inst 0x25306d20 // dup p0.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0xe0bf8300 // st1w { za0v.s[x12] }, p0/Z, [x24, XZR, LSL #2]\n"
+ ".inst 0x25306140 // dup p0.s, p8.s/Z, p10.s[w12]\n"
+ "addvl x24, x24, #1\n"
+ "ldr x21, [x22, #0x0]\n"
+ ".inst 0xe09902a8 // ld1w { za2h.s[x12] }, p0/Z, [x21, x25, LSL #2]\n"
+ "add x22, x22, #0x8\n"
"add x12, x12, #0x1\n"
"cmp x12, x10\n"
- "add x26, x26, #0x8\n"
- "addvl x21, x21, #1\n"
"blt 9b\n"
- "whilelt p9.s, x27, %x[width]\n"
- "whilelt p8.s, x27, %x[width]\n"
+ "whilelt p9.s, x23, %x[width]\n"
+ "whilelt p8.s, x23, %x[width]\n"
"mov x12, #0x0\n"
"10:" // K loop: Tails: Even: Second
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe0bf82a8 // st1w { za2v.s[x12] }, p0/Z, [x21, XZR, LSL #2]\n"
+ ".inst 0x25306d20 // dup p0.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0xe0bf8308 // st1w { za2v.s[x12] }, p0/Z, [x24, XZR, LSL #2]\n"
+ "addvl x24, x24, #1\n"
"add x12, x12, #0x1\n"
- "cmp x12, x9\n"
- "addvl x21, x21, #1\n"
+ "cmp x12, x27\n"
"blt 10b\n"
- "whilelt p9.s, x27, %x[width]\n"
+ "whilelt p9.s, x23, %x[width]\n"
"b 13f\n"
"11:" // K loop: Tails: Odd
"mov x12, #0x0\n"
"12:" // K loop: Tails: Odd: Loop
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe0bf82a0 // st1w { za0v.s[x12] }, p0/Z, [x21, XZR, LSL #2]\n"
+ ".inst 0x25306d20 // dup p0.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0xe0bf8300 // st1w { za0v.s[x12] }, p0/Z, [x24, XZR, LSL #2]\n"
+ "addvl x24, x24, #1\n"
"add x12, x12, #0x1\n"
- "cmp x12, x9\n"
- "addvl x21, x21, #1\n"
+ "cmp x12, x27\n"
"blt 12b\n"
"13:" // K loop: End
- "mov %x[out], x21\n"
+ "mov %x[out], x24\n"
".inst 0xd503467f // SMSTOP\n"
: [out] "+&r" (out)
: [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x12", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p8", "p9", "p10", "p11", "x9", "x10", "x12", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_bf16_bf16.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_bf16_bf16.hpp
index 14ee5d6304..61fed43394 100644
--- a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_bf16_bf16.hpp
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_bf16_bf16.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#if defined(__ARM_FEATURE_SVE)
@@ -34,66 +34,66 @@ void interleave_block<2, 1, VLType::SME, false>(
__asm__ __volatile__(
".inst 0xd503477f // SMSTART ZA\n"
- "cnth x28\n"
- "cmp %x[height], x28\n"
"cnth x27\n"
- "csel x28, %x[height], x28, LT\n"
- "mov x26, #0x0\n"
+ "cmp %x[height], x27\n"
+ "cnth x26\n"
+ "csel x27, %x[height], x27, LT\n"
+ "mov x25, #0x0\n"
"ptrue p13.s\n"
- "sub x28, x28, #0x1\n"
+ "sub x27, x27, #0x1\n"
"whilelt p12.h, XZR, %x[height]\n"
- "whilelt p11.h, x27, %x[height]\n"
- "mov x25, %x[row_offset]\n"
- "mov x24, %x[out]\n"
- "whilelt p10.h, x26, %x[width]\n"
- "whilelt p9.h, x26, %x[width]\n"
- "whilelt p8.h, x26, %x[width]\n"
+ "whilelt p11.h, x26, %x[height]\n"
+ "mov x24, %x[row_offset]\n"
+ "mov x23, %x[out]\n"
+ "whilelt p10.h, x25, %x[width]\n"
+ "whilelt p9.h, x25, %x[width]\n"
+ "whilelt p8.h, x25, %x[width]\n"
"1:" // Width loop
- "add x23, %x[in], XZR, LSL #3\n"
- "add x20, %x[in], x27, LSL #3\n"
- "ldr x22, [x23], #0x8\n"
+ "add x22, %x[in], XZR, LSL #3\n"
+ "add x19, %x[in], x26, LSL #3\n"
+ "ldr x21, [x22], #0x8\n"
"mov x12, #0x0\n"
- "ldr x21, [x20], #0x8\n"
- "cbz x28, 3f\n"
+ "ldr x20, [x19], #0x8\n"
+ "cbz x27, 3f\n"
"2:" // Loads: Loop
".inst 0x25286581 // psel p1.h, p9.h/Z, p12.h[w12]\n"
".inst 0x25286160 // psel p0.h, p8.h/Z, p11.h[w12]\n"
- ".inst 0xe05906c0 // ld1h { za0h.h[x12] }, p1/Z, [x22, x25, LSL #1]\n"
- "ldr x22, [x23], #0x8\n"
- ".inst 0xe05902a8 // ld1h { za1h.h[x12] }, p0/Z, [x21, x25, LSL #1]\n"
+ ".inst 0xe05806a0 // ld1h { za0h.h[x12] }, p1/Z, [x21, x24, LSL #1]\n"
+ "ldr x21, [x22], #0x8\n"
+ ".inst 0xe0580288 // ld1h { za1h.h[x12] }, p0/Z, [x20, x24, LSL #1]\n"
"add x12, x12, #0x2\n"
- "cmp x12, x28, LSL #1\n"
- "ldr x21, [x20], #0x8\n"
+ "cmp x12, x27, LSL #1\n"
+ "ldr x20, [x19], #0x8\n"
"blt 2b\n"
"3:" // Loads: Tail
- "sub x20, %x[width], x26\n"
+ "sub x19, %x[width], x25\n"
".inst 0x25286580 // psel p0.h, p9.h/Z, p12.h[w12]\n"
- ".inst 0xe05902c0 // ld1h { za0h.h[x12] }, p0/Z, [x22, x25, LSL #1]\n"
+ ".inst 0xe05802a0 // ld1h { za0h.h[x12] }, p0/Z, [x21, x24, LSL #1]\n"
".inst 0x25286160 // psel p0.h, p8.h/Z, p11.h[w12]\n"
- "cmp x20, x27\n"
- ".inst 0xe05902a8 // ld1h { za1h.h[x12] }, p0/Z, [x21, x25, LSL #1]\n"
+ "cmp x19, x26\n"
+ ".inst 0xe0580288 // ld1h { za1h.h[x12] }, p0/Z, [x20, x24, LSL #1]\n"
"mov x12, #0x0\n"
- "csel x20, x20, x27, LT\n"
+ "csel x19, x19, x26, LT\n"
"4:" // Stores: Loop
".inst 0x25287540 // psel p0.h, p13.h/Z, p10.h[w12]\n"
- ".inst 0xe07f8300 // st1h { za0v.h[x12] }, p0/Z, [x24, XZR, LSL #1]\n"
+ ".inst 0xe07f82e0 // st1h { za0v.h[x12] }, p0/Z, [x23, XZR, LSL #1]\n"
".inst 0x25287540 // psel p0.h, p13.h/Z, p10.h[w12]\n"
- ".inst 0xe07b8308 // st1h { za1v.h[x12] }, p0/Z, [x24, x27, LSL #1]\n"
+ ".inst 0xe07a82e8 // st1h { za1v.h[x12] }, p0/Z, [x23, x26, LSL #1]\n"
"add x12, x12, #0x1\n"
- "cmp x12, x20\n"
- "addvl x24, x24, #4\n"
+ "cmp x12, x19\n"
+ "addvl x23, x23, #4\n"
"blt 4b\n"
- "inch x26\n"
- "whilelt p10.h, x26, %x[width]\n"
- "whilelt p9.h, x26, %x[width]\n"
- "whilelt p8.h, x26, %x[width]\n"
"inch x25\n"
+ "whilelt p10.h, x25, %x[width]\n"
+ "whilelt p9.h, x25, %x[width]\n"
+ "whilelt p8.h, x25, %x[width]\n"
+ "inch x24\n"
"b.any 1b\n"
- "mov %x[out], x24\n"
+ "mov %x[out], x23\n"
".inst 0xd503467f // SMSTOP\n"
: [out] "+&r" (out)
: [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x12", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x12", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_block2_bf16_bf16.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_block2_bf16_bf16.hpp
index f648ccf771..fc7596e67b 100644
--- a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_block2_bf16_bf16.hpp
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_block2_bf16_bf16.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#if defined(__ARM_FEATURE_SVE)
@@ -34,269 +34,269 @@ void interleave_block<2, 2, VLType::SME, false>(
__asm__ __volatile__(
".inst 0xd503477f // SMSTART ZA\n"
- "cnth x21\n"
- "mov x22, %x[width]\n"
- "inch x22\n"
- "mov x20, %x[width]\n"
- "sub x17, x21, #0x1\n"
- "sub x22, x22, #0x1\n"
- "ands x17, x20, x17\n"
+ "cnth x20\n"
"cntw x16\n"
- "udiv x22, x22, x21\n" // n_passes = ceildiv(width, VL<T>)
- "csel x17, x17, x21, NE\n"
- "sub x13, x22, #0x1\n"
- "add x17, x17, #0x1\n"
- "sub x15, x16, #0x2\n"
- "lsl x21, %x[height], #0x1\n" // height * 2
- "lsl x20, x16, #0x1\n"
- "mov x14, #0x0\n"
- "mov x11, %x[in]\n"
- "add x10, %x[in], x16, LSL #3\n"
- "ldr x9, [x11, #0x0]\n"
- "cntw x28, ALL, MUL #2\n"
- "cntw x27, ALL, MUL #3\n"
- "ldr x26, [x10, #0x0]\n"
- "lsr x13, x13, #0x1\n" // n_loops = (n_passes - 1) / 2
- "and x25, x22, #0x1\n" // odd_tail = bool(n_passes & 0x1)
- "ldr x24, [x11, #0x8]\n"
- "lsr x17, x17, #0x1\n"
+ "cntw x15, ALL, MUL #2\n"
+ "cntw x14, ALL, MUL #3\n"
+ "mov x19, %x[width]\n"
+ "inch x19\n"
+ "sub x19, x19, #0x1\n"
+ "udiv x19, x19, x20\n" // n_passes = ceildiv(width, VL<T>)
+ "sub x13, x19, #0x1\n"
+ "lsr x13, x13, #0x1\n" // n_loops = (n_passes - 1) / 2
+ "and x11, x19, #0x1\n" // odd_tail = bool(n_passes & 0x1)
+ "mov x19, %x[width]\n"
+ "sub x10, x20, #0x1\n"
+ "ands x10, x19, x10\n"
+ "csel x10, x10, x20, NE\n"
+ "add x10, x10, #0x1\n"
+ "lsr x10, x10, #0x1\n"
+ "sub x9, x16, #0x2\n"
"ptrue p13.s\n"
- "ldr x23, [x10, #0x8]\n"
- "whilelt p12.h, XZR, x21\n"
- "whilelt p11.h, x20, x21\n"
- "mov x22, %x[row_offset]\n"
- "mov x21, %x[out]\n"
- "whilelt p10.h, x14, %x[width]\n"
- "whilelt p9.h, x14, %x[width]\n"
- "whilelt p8.h, x14, %x[width]\n"
- "add x11, x11, #0x10\n"
- "add x10, x10, #0x10\n"
+ "lsl x20, %x[height], #0x1\n" // height * 2
+ "lsl x19, x16, #0x1\n"
+ "whilelt p12.h, XZR, x20\n"
+ "whilelt p11.h, x19, x20\n"
+ "mov x28, %x[row_offset]\n"
+ "mov x27, %x[out]\n"
+ "mov x26, #0x0\n"
+ "whilelt p10.h, x26, %x[width]\n"
+ "whilelt p9.h, x26, %x[width]\n"
+ "whilelt p8.h, x26, %x[width]\n"
+ "mov x25, %x[in]\n"
+ "add x24, %x[in], x16, LSL #3\n"
+ "ldr x23, [x25, #0x0]\n"
+ "ldr x22, [x24, #0x0]\n"
+ "ldr x21, [x25, #0x8]\n"
+ "ldr x20, [x24, #0x8]\n"
+ "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
"mov x12, #0x0\n"
- "cbz x15, 2f\n"
+ "cbz x9, 2f\n"
"1:" // K loop: Charge: Loop
- ".inst 0x25286581 // psel p1.h, p9.h/Z, p12.h[w12]\n"
- ".inst 0x25286160 // psel p0.h, p8.h/Z, p11.h[w12]\n"
- ".inst 0xe0560520 // ld1h { za0h.h[x12] }, p1/Z, [x9, x22, LSL #1]\n"
- "ldr x9, [x11, #0x0]\n"
- ".inst 0xe0560348 // ld1h { za1h.h[x12] }, p0/Z, [x26, x22, LSL #1]\n"
- ".inst 0x25686581 // psel p1.h, p9.h/Z, p12.h[w12, #2]\n"
- ".inst 0x25686160 // psel p0.h, p8.h/Z, p11.h[w12, #2]\n"
- "ldr x26, [x10, #0x0]\n"
- ".inst 0xe0560702 // ld1h { za0h.h[x12, #2] }, p1/Z, [x24, x22, LSL #1]\n"
- "ldr x24, [x11, #0x8]\n"
- "add x11, x11, #0x10\n"
- ".inst 0xe05602ea // ld1h { za1h.h[x12, #2] }, p0/Z, [x23, x22, LSL #1]\n"
+ ".inst 0x25286580 // dup p0.h, p9.h/Z, p12.h[w12]\n"
+ ".inst 0xe05c02e0 // ld1h { za0h.h[x12] }, p0/Z, [x23, x28, LSL #1]\n"
+ ".inst 0x25286160 // dup p0.h, p8.h/Z, p11.h[w12]\n"
+ ".inst 0xe05c02c8 // ld1h { za1h.h[x12] }, p0/Z, [x22, x28, LSL #1]\n"
+ ".inst 0x25686580 // dup p0.h, p9.h/Z, p12.h[w12, #2]\n"
+ ".inst 0xe05c02a2 // ld1h { za0h.h[x12, #2] }, p0/Z, [x21, x28, LSL #1]\n"
+ ".inst 0x25686160 // dup p0.h, p8.h/Z, p11.h[w12, #2]\n"
+ ".inst 0xe05c028a // ld1h { za1h.h[x12, #2] }, p0/Z, [x20, x28, LSL #1]\n"
+ "ldr x23, [x25, #0x0]\n"
+ "ldr x22, [x24, #0x0]\n"
+ "ldr x21, [x25, #0x8]\n"
+ "ldr x20, [x24, #0x8]\n"
+ "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
"add x12, x12, #0x4\n"
- "cmp x12, x15, LSL #1\n"
- "ldr x23, [x10, #0x8]\n"
- "add x10, x10, #0x10\n"
+ "cmp x12, x9, LSL #1\n"
"blt 1b\n"
"2:" // K loop: Charge: End
- ".inst 0x25286581 // psel p1.h, p9.h/Z, p12.h[w12]\n"
- ".inst 0x25286160 // psel p0.h, p8.h/Z, p11.h[w12]\n"
- ".inst 0xe0560520 // ld1h { za0h.h[x12] }, p1/Z, [x9, x22, LSL #1]\n"
- ".inst 0xe0560348 // ld1h { za1h.h[x12] }, p0/Z, [x26, x22, LSL #1]\n"
- ".inst 0x25686581 // psel p1.h, p9.h/Z, p12.h[w12, #2]\n"
- ".inst 0x25686160 // psel p0.h, p8.h/Z, p11.h[w12, #2]\n"
- "mov x11, %x[in]\n"
- "add x10, %x[in], x16, LSL #3\n"
- ".inst 0xe0560702 // ld1h { za0h.h[x12, #2] }, p1/Z, [x24, x22, LSL #1]\n"
- "ldr x9, [x11, #0x0]\n"
- ".inst 0xe05602ea // ld1h { za1h.h[x12, #2] }, p0/Z, [x23, x22, LSL #1]\n"
- "ldr x26, [x10, #0x0]\n"
- "inch x22\n"
- "inch x14\n"
- "ldr x24, [x11, #0x8]\n"
- "add x11, x11, #0x10\n"
- "ldr x23, [x10, #0x8]\n"
- "add x10, x10, #0x10\n"
+ ".inst 0x25286580 // dup p0.h, p9.h/Z, p12.h[w12]\n"
+ ".inst 0xe05c02e0 // ld1h { za0h.h[x12] }, p0/Z, [x23, x28, LSL #1]\n"
+ ".inst 0x25286160 // dup p0.h, p8.h/Z, p11.h[w12]\n"
+ ".inst 0xe05c02c8 // ld1h { za1h.h[x12] }, p0/Z, [x22, x28, LSL #1]\n"
+ ".inst 0x25686580 // dup p0.h, p9.h/Z, p12.h[w12, #2]\n"
+ ".inst 0xe05c02a2 // ld1h { za0h.h[x12, #2] }, p0/Z, [x21, x28, LSL #1]\n"
+ ".inst 0x25686160 // dup p0.h, p8.h/Z, p11.h[w12, #2]\n"
+ ".inst 0xe05c028a // ld1h { za1h.h[x12, #2] }, p0/Z, [x20, x28, LSL #1]\n"
+ "mov x25, %x[in]\n"
+ "add x24, %x[in], x16, LSL #3\n"
+ "ldr x23, [x25, #0x0]\n"
+ "ldr x22, [x24, #0x0]\n"
+ "ldr x21, [x25, #0x8]\n"
+ "ldr x20, [x24, #0x8]\n"
+ "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
+ "inch x28\n"
+ "inch x26\n"
"cbz x13, 8f\n"
- "mov x20, x13\n"
+ "mov x19, x13\n"
"3:" // K loop: Main loop
- "whilelt p9.h, x14, %x[width]\n"
- "whilelt p8.h, x14, %x[width]\n"
+ "whilelt p9.h, x26, %x[width]\n"
+ "whilelt p8.h, x26, %x[width]\n"
"mov x13, #0x0\n"
"mov x12, #0x0\n"
- "cbz x15, 5f\n"
+ "cbz x9, 5f\n"
"4:" // K loop: Main loop: First: Loop
- ".inst 0x25396581 // psel p1.h, p9.h/Z, p12.h[w13, #1]\n"
- ".inst 0x25396160 // psel p0.h, p8.h/Z, p11.h[w13, #1]\n"
- ".inst 0xe0562521 // ld1h { za0h.h[x13, #1] }, p1/Z, [x9, x22, LSL #1]\n"
- "ldr x9, [x11, #0x0]\n"
- ".inst 0xe0562349 // ld1h { za1h.h[x13, #1] }, p0/Z, [x26, x22, LSL #1]\n"
- ".inst 0x25796580 // psel p0.h, p9.h/Z, p12.h[w13, #3]\n"
- ".inst 0x25796162 // psel p2.h, p8.h/Z, p11.h[w13, #3]\n"
- "ldr x26, [x10, #0x0]\n"
- ".inst 0x25307541 // psel p1.s, p13.s/Z, p10.s[w12]\n"
- ".inst 0xe0562303 // ld1h { za0h.h[x13, #3] }, p0/Z, [x24, x22, LSL #1]\n"
- "ldr x24, [x11, #0x8]\n"
- ".inst 0x25307540 // psel p0.s, p13.s/Z, p10.s[w12]\n"
- ".inst 0xe0562aeb // ld1h { za1h.h[x13, #3] }, p2/Z, [x23, x22, LSL #1]\n"
- "ldr x23, [x10, #0x8]\n"
- ".inst 0xe0bf86a0 // st1w { za0v.s[x12] }, p1/Z, [x21, XZR, LSL #2]\n"
- ".inst 0x25707541 // psel p1.s, p13.s/Z, p10.s[w12, #1]\n"
- ".inst 0xe0b082a4 // st1w { za1v.s[x12] }, p0/Z, [x21, x16, LSL #2]\n"
- ".inst 0x25707540 // psel p0.s, p13.s/Z, p10.s[w12, #1]\n"
- "add x11, x11, #0x10\n"
- ".inst 0xe0bc86a1 // st1w { za0v.s[x12, #1] }, p1/Z, [x21, x28, LSL #2]\n"
- "add x10, x10, #0x10\n"
+ ".inst 0x25396580 // dup p0.h, p9.h/Z, p12.h[w13, #1]\n"
+ ".inst 0xe05c22e1 // ld1h { za0h.h[x13, #1] }, p0/Z, [x23, x28, LSL #1]\n"
+ ".inst 0x25396160 // dup p0.h, p8.h/Z, p11.h[w13, #1]\n"
+ ".inst 0xe05c22c9 // ld1h { za1h.h[x13, #1] }, p0/Z, [x22, x28, LSL #1]\n"
+ ".inst 0x25796580 // dup p0.h, p9.h/Z, p12.h[w13, #3]\n"
+ ".inst 0xe05c22a3 // ld1h { za0h.h[x13, #3] }, p0/Z, [x21, x28, LSL #1]\n"
+ ".inst 0x25796160 // dup p0.h, p8.h/Z, p11.h[w13, #3]\n"
+ ".inst 0xe05c228b // ld1h { za1h.h[x13, #3] }, p0/Z, [x20, x28, LSL #1]\n"
+ "ldr x23, [x25, #0x0]\n"
+ "ldr x22, [x24, #0x0]\n"
+ "ldr x21, [x25, #0x8]\n"
+ "ldr x20, [x24, #0x8]\n"
+ "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
+ ".inst 0x25307540 // dup p0.s, p13.s/Z, p10.s[w12]\n"
+ ".inst 0xe0bf8360 // st1w { za0v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+ ".inst 0x25307540 // dup p0.s, p13.s/Z, p10.s[w12]\n"
+ ".inst 0x25707541 // dup p1.s, p13.s/Z, p10.s[w12, #1]\n"
+ ".inst 0xe0b08364 // st1w { za1v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
+ ".inst 0x25707540 // dup p0.s, p13.s/Z, p10.s[w12, #1]\n"
"add x13, x13, #0x4\n"
- ".inst 0xe0bb82a5 // st1w { za1v.s[x12, #1] }, p0/Z, [x21, x27, LSL #2]\n"
+ ".inst 0xe0af8761 // st1w { za0v.s[x12, #1] }, p1/Z, [x27, x15, LSL #2]\n"
+ ".inst 0xe0ae8365 // st1w { za1v.s[x12, #1] }, p0/Z, [x27, x14, LSL #2]\n"
+ "addvl x27, x27, #4\n"
"add x12, x12, #0x2\n"
- "cmp x12, x15\n"
- "addvl x21, x21, #4\n"
+ "cmp x12, x9\n"
"blt 4b\n"
"5:" // K loop: Main loop: First: Tail
- ".inst 0x25396581 // psel p1.h, p9.h/Z, p12.h[w13, #1]\n"
- ".inst 0x25396160 // psel p0.h, p8.h/Z, p11.h[w13, #1]\n"
- ".inst 0xe0562521 // ld1h { za0h.h[x13, #1] }, p1/Z, [x9, x22, LSL #1]\n"
- ".inst 0xe0562349 // ld1h { za1h.h[x13, #1] }, p0/Z, [x26, x22, LSL #1]\n"
- "mov x11, %x[in]\n"
- "add x10, %x[in], x16, LSL #3\n"
- "ldr x9, [x11, #0x0]\n"
- ".inst 0x25796580 // psel p0.h, p9.h/Z, p12.h[w13, #3]\n"
- ".inst 0x25796161 // psel p1.h, p8.h/Z, p11.h[w13, #3]\n"
- ".inst 0xe0562303 // ld1h { za0h.h[x13, #3] }, p0/Z, [x24, x22, LSL #1]\n"
- "ldr x26, [x10, #0x0]\n"
- ".inst 0x25307540 // psel p0.s, p13.s/Z, p10.s[w12]\n"
- ".inst 0xe05626eb // ld1h { za1h.h[x13, #3] }, p1/Z, [x23, x22, LSL #1]\n"
- "ldr x24, [x11, #0x8]\n"
- ".inst 0x25307542 // psel p2.s, p13.s/Z, p10.s[w12]\n"
- "ldr x23, [x10, #0x8]\n"
- ".inst 0xe0bf82a0 // st1w { za0v.s[x12] }, p0/Z, [x21, XZR, LSL #2]\n"
- ".inst 0x25707541 // psel p1.s, p13.s/Z, p10.s[w12, #1]\n"
- ".inst 0x25707540 // psel p0.s, p13.s/Z, p10.s[w12, #1]\n"
- ".inst 0xe0b08aa4 // st1w { za1v.s[x12] }, p2/Z, [x21, x16, LSL #2]\n"
- "whilelt p10.h, x14, %x[width]\n"
- "inch x14\n"
- ".inst 0xe0bc86a1 // st1w { za0v.s[x12, #1] }, p1/Z, [x21, x28, LSL #2]\n"
- "add x11, x11, #0x10\n"
- "add x10, x10, #0x10\n"
- ".inst 0xe0bb82a5 // st1w { za1v.s[x12, #1] }, p0/Z, [x21, x27, LSL #2]\n"
- "addvl x21, x21, #4\n"
- "inch x22\n"
- "whilelt p9.h, x14, %x[width]\n"
- "whilelt p8.h, x14, %x[width]\n"
+ "mov x25, %x[in]\n"
+ "add x24, %x[in], x16, LSL #3\n"
+ ".inst 0x25396580 // dup p0.h, p9.h/Z, p12.h[w13, #1]\n"
+ ".inst 0xe05c22e1 // ld1h { za0h.h[x13, #1] }, p0/Z, [x23, x28, LSL #1]\n"
+ ".inst 0x25396160 // dup p0.h, p8.h/Z, p11.h[w13, #1]\n"
+ ".inst 0xe05c22c9 // ld1h { za1h.h[x13, #1] }, p0/Z, [x22, x28, LSL #1]\n"
+ ".inst 0x25796580 // dup p0.h, p9.h/Z, p12.h[w13, #3]\n"
+ ".inst 0xe05c22a3 // ld1h { za0h.h[x13, #3] }, p0/Z, [x21, x28, LSL #1]\n"
+ ".inst 0x25796160 // dup p0.h, p8.h/Z, p11.h[w13, #3]\n"
+ ".inst 0xe05c228b // ld1h { za1h.h[x13, #3] }, p0/Z, [x20, x28, LSL #1]\n"
+ "ldr x23, [x25, #0x0]\n"
+ "ldr x22, [x24, #0x0]\n"
+ "ldr x21, [x25, #0x8]\n"
+ "ldr x20, [x24, #0x8]\n"
+ "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
+ ".inst 0x25307540 // dup p0.s, p13.s/Z, p10.s[w12]\n"
+ ".inst 0xe0bf8360 // st1w { za0v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+ ".inst 0x25307540 // dup p0.s, p13.s/Z, p10.s[w12]\n"
+ ".inst 0x25707541 // dup p1.s, p13.s/Z, p10.s[w12, #1]\n"
+ ".inst 0xe0b08364 // st1w { za1v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
+ ".inst 0x25707540 // dup p0.s, p13.s/Z, p10.s[w12, #1]\n"
+ "whilelt p10.h, x26, %x[width]\n"
+ ".inst 0xe0af8761 // st1w { za0v.s[x12, #1] }, p1/Z, [x27, x15, LSL #2]\n"
+ "inch x26\n"
+ "inch x28\n"
+ ".inst 0xe0ae8365 // st1w { za1v.s[x12, #1] }, p0/Z, [x27, x14, LSL #2]\n"
+ "addvl x27, x27, #4\n"
+ "whilelt p9.h, x26, %x[width]\n"
+ "whilelt p8.h, x26, %x[width]\n"
"mov x13, #0x0\n"
"mov x12, #0x0\n"
- "cbz x15, 7f\n"
+ "cbz x9, 7f\n"
"6:" // K loop: Main loop: Second: Loop
- ".inst 0x25296581 // psel p1.h, p9.h/Z, p12.h[w13]\n"
- ".inst 0x25296160 // psel p0.h, p8.h/Z, p11.h[w13]\n"
- ".inst 0xe0562520 // ld1h { za0h.h[x13] }, p1/Z, [x9, x22, LSL #1]\n"
- "ldr x9, [x11, #0x0]\n"
- ".inst 0xe0562348 // ld1h { za1h.h[x13] }, p0/Z, [x26, x22, LSL #1]\n"
- ".inst 0x25696580 // psel p0.h, p9.h/Z, p12.h[w13, #2]\n"
- ".inst 0x25696162 // psel p2.h, p8.h/Z, p11.h[w13, #2]\n"
- "ldr x26, [x10, #0x0]\n"
- ".inst 0x25307541 // psel p1.s, p13.s/Z, p10.s[w12]\n"
- ".inst 0xe0562302 // ld1h { za0h.h[x13, #2] }, p0/Z, [x24, x22, LSL #1]\n"
- "ldr x24, [x11, #0x8]\n"
- ".inst 0x25307540 // psel p0.s, p13.s/Z, p10.s[w12]\n"
- ".inst 0xe0562aea // ld1h { za1h.h[x13, #2] }, p2/Z, [x23, x22, LSL #1]\n"
- "ldr x23, [x10, #0x8]\n"
- ".inst 0xe0bf86a8 // st1w { za2v.s[x12] }, p1/Z, [x21, XZR, LSL #2]\n"
- ".inst 0x25707541 // psel p1.s, p13.s/Z, p10.s[w12, #1]\n"
- ".inst 0xe0b082ac // st1w { za3v.s[x12] }, p0/Z, [x21, x16, LSL #2]\n"
- ".inst 0x25707540 // psel p0.s, p13.s/Z, p10.s[w12, #1]\n"
- "add x11, x11, #0x10\n"
- ".inst 0xe0bc86a9 // st1w { za2v.s[x12, #1] }, p1/Z, [x21, x28, LSL #2]\n"
- "add x10, x10, #0x10\n"
+ ".inst 0x25296580 // dup p0.h, p9.h/Z, p12.h[w13]\n"
+ ".inst 0xe05c22e0 // ld1h { za0h.h[x13] }, p0/Z, [x23, x28, LSL #1]\n"
+ ".inst 0x25296160 // dup p0.h, p8.h/Z, p11.h[w13]\n"
+ ".inst 0xe05c22c8 // ld1h { za1h.h[x13] }, p0/Z, [x22, x28, LSL #1]\n"
+ ".inst 0x25696580 // dup p0.h, p9.h/Z, p12.h[w13, #2]\n"
+ ".inst 0xe05c22a2 // ld1h { za0h.h[x13, #2] }, p0/Z, [x21, x28, LSL #1]\n"
+ ".inst 0x25696160 // dup p0.h, p8.h/Z, p11.h[w13, #2]\n"
+ ".inst 0xe05c228a // ld1h { za1h.h[x13, #2] }, p0/Z, [x20, x28, LSL #1]\n"
+ "ldr x23, [x25, #0x0]\n"
+ "ldr x22, [x24, #0x0]\n"
+ "ldr x21, [x25, #0x8]\n"
+ "ldr x20, [x24, #0x8]\n"
+ "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
+ ".inst 0x25307540 // dup p0.s, p13.s/Z, p10.s[w12]\n"
+ ".inst 0xe0bf8368 // st1w { za2v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+ ".inst 0x25307540 // dup p0.s, p13.s/Z, p10.s[w12]\n"
+ ".inst 0x25707541 // dup p1.s, p13.s/Z, p10.s[w12, #1]\n"
+ ".inst 0xe0b0836c // st1w { za3v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
+ ".inst 0x25707540 // dup p0.s, p13.s/Z, p10.s[w12, #1]\n"
"add x13, x13, #0x4\n"
- ".inst 0xe0bb82ad // st1w { za3v.s[x12, #1] }, p0/Z, [x21, x27, LSL #2]\n"
+ ".inst 0xe0af8769 // st1w { za2v.s[x12, #1] }, p1/Z, [x27, x15, LSL #2]\n"
+ ".inst 0xe0ae836d // st1w { za3v.s[x12, #1] }, p0/Z, [x27, x14, LSL #2]\n"
+ "addvl x27, x27, #4\n"
"add x12, x12, #0x2\n"
- "cmp x12, x15\n"
- "addvl x21, x21, #4\n"
+ "cmp x12, x9\n"
"blt 6b\n"
"7:" // K loop: Main loop: Second: Tail
- ".inst 0x25296581 // psel p1.h, p9.h/Z, p12.h[w13]\n"
- ".inst 0x25296160 // psel p0.h, p8.h/Z, p11.h[w13]\n"
- ".inst 0xe0562520 // ld1h { za0h.h[x13] }, p1/Z, [x9, x22, LSL #1]\n"
- ".inst 0xe0562348 // ld1h { za1h.h[x13] }, p0/Z, [x26, x22, LSL #1]\n"
- "mov x11, %x[in]\n"
- "add x10, %x[in], x16, LSL #3\n"
- "ldr x9, [x11, #0x0]\n"
- ".inst 0x25696580 // psel p0.h, p9.h/Z, p12.h[w13, #2]\n"
- ".inst 0x25696161 // psel p1.h, p8.h/Z, p11.h[w13, #2]\n"
- ".inst 0xe0562302 // ld1h { za0h.h[x13, #2] }, p0/Z, [x24, x22, LSL #1]\n"
- "ldr x26, [x10, #0x0]\n"
- ".inst 0x25307540 // psel p0.s, p13.s/Z, p10.s[w12]\n"
- ".inst 0xe05626ea // ld1h { za1h.h[x13, #2] }, p1/Z, [x23, x22, LSL #1]\n"
- "ldr x24, [x11, #0x8]\n"
- ".inst 0x25307542 // psel p2.s, p13.s/Z, p10.s[w12]\n"
- "ldr x23, [x10, #0x8]\n"
- ".inst 0xe0bf82a8 // st1w { za2v.s[x12] }, p0/Z, [x21, XZR, LSL #2]\n"
- ".inst 0x25707541 // psel p1.s, p13.s/Z, p10.s[w12, #1]\n"
- ".inst 0x25707540 // psel p0.s, p13.s/Z, p10.s[w12, #1]\n"
- ".inst 0xe0b08aac // st1w { za3v.s[x12] }, p2/Z, [x21, x16, LSL #2]\n"
- "whilelt p10.h, x14, %x[width]\n"
- "subs x20, x20, #0x1\n"
- ".inst 0xe0bc86a9 // st1w { za2v.s[x12, #1] }, p1/Z, [x21, x28, LSL #2]\n"
- "add x11, x11, #0x10\n"
- "add x10, x10, #0x10\n"
- ".inst 0xe0bb82ad // st1w { za3v.s[x12, #1] }, p0/Z, [x21, x27, LSL #2]\n"
- "addvl x21, x21, #4\n"
- "inch x14\n"
- "inch x22\n"
+ "mov x25, %x[in]\n"
+ "add x24, %x[in], x16, LSL #3\n"
+ ".inst 0x25296580 // dup p0.h, p9.h/Z, p12.h[w13]\n"
+ ".inst 0xe05c22e0 // ld1h { za0h.h[x13] }, p0/Z, [x23, x28, LSL #1]\n"
+ ".inst 0x25296160 // dup p0.h, p8.h/Z, p11.h[w13]\n"
+ ".inst 0xe05c22c8 // ld1h { za1h.h[x13] }, p0/Z, [x22, x28, LSL #1]\n"
+ ".inst 0x25696580 // dup p0.h, p9.h/Z, p12.h[w13, #2]\n"
+ ".inst 0xe05c22a2 // ld1h { za0h.h[x13, #2] }, p0/Z, [x21, x28, LSL #1]\n"
+ ".inst 0x25696160 // dup p0.h, p8.h/Z, p11.h[w13, #2]\n"
+ ".inst 0xe05c228a // ld1h { za1h.h[x13, #2] }, p0/Z, [x20, x28, LSL #1]\n"
+ "ldr x23, [x25, #0x0]\n"
+ "ldr x22, [x24, #0x0]\n"
+ "ldr x21, [x25, #0x8]\n"
+ "ldr x20, [x24, #0x8]\n"
+ "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
+ ".inst 0x25307540 // dup p0.s, p13.s/Z, p10.s[w12]\n"
+ ".inst 0xe0bf8368 // st1w { za2v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+ ".inst 0x25307540 // dup p0.s, p13.s/Z, p10.s[w12]\n"
+ ".inst 0x25707541 // dup p1.s, p13.s/Z, p10.s[w12, #1]\n"
+ ".inst 0xe0b0836c // st1w { za3v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
+ ".inst 0x25707540 // dup p0.s, p13.s/Z, p10.s[w12, #1]\n"
+ "whilelt p10.h, x26, %x[width]\n"
+ ".inst 0xe0af8769 // st1w { za2v.s[x12, #1] }, p1/Z, [x27, x15, LSL #2]\n"
+ "inch x26\n"
+ "inch x28\n"
+ ".inst 0xe0ae836d // st1w { za3v.s[x12, #1] }, p0/Z, [x27, x14, LSL #2]\n"
+ "addvl x27, x27, #4\n"
+ "subs x19, x19, #0x1\n"
"bgt 3b\n"
"8:" // K loop: Tails
- "cbnz x25, 11f\n"
- "mov x11, %x[in]\n"
- "whilelt p9.h, x14, %x[width]\n"
- "whilelt p8.h, x14, %x[width]\n"
+ "cbnz x11, 11f\n"
+ "mov x25, %x[in]\n"
+ "whilelt p9.h, x26, %x[width]\n"
+ "whilelt p8.h, x26, %x[width]\n"
"mov x13, #0x0\n"
"mov x12, #0x0\n"
"9:" // K loop: Tails: Even: First
- ".inst 0x25307540 // psel p0.s, p13.s/Z, p10.s[w12]\n"
- ".inst 0xe0bf82a0 // st1w { za0v.s[x12] }, p0/Z, [x21, XZR, LSL #2]\n"
- ".inst 0x25307540 // psel p0.s, p13.s/Z, p10.s[w12]\n"
- ".inst 0xe0b082a4 // st1w { za1v.s[x12] }, p0/Z, [x21, x16, LSL #2]\n"
- "ldr x9, [x11, #0x0]\n"
+ ".inst 0x25307540 // dup p0.s, p13.s/Z, p10.s[w12]\n"
+ ".inst 0xe0bf8360 // st1w { za0v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+ ".inst 0x25307540 // dup p0.s, p13.s/Z, p10.s[w12]\n"
+ ".inst 0x25396581 // dup p1.h, p9.h/Z, p12.h[w13, #1]\n"
+ ".inst 0xe0b08364 // st1w { za1v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
+ ".inst 0x25396160 // dup p0.h, p8.h/Z, p11.h[w13, #1]\n"
+ "addvl x27, x27, #2\n"
+ "ldr x23, [x25, #0x0]\n"
+ ".inst 0xe05c26e1 // ld1h { za0h.h[x13, #1] }, p1/Z, [x23, x28, LSL #1]\n"
+ "ldr x22, [x25, x16, LSL #0x3]\n"
+ ".inst 0xe05c22c9 // ld1h { za1h.h[x13, #1] }, p0/Z, [x22, x28, LSL #1]\n"
"add x12, x12, #0x1\n"
- ".inst 0x25396581 // psel p1.h, p9.h/Z, p12.h[w13, #1]\n"
- "ldr x26, [x11, x16, LSL #0x3]\n"
- ".inst 0x25396160 // psel p0.h, p8.h/Z, p11.h[w13, #1]\n"
- "cmp x12, x16\n"
- ".inst 0xe0562521 // ld1h { za0h.h[x13, #1] }, p1/Z, [x9, x22, LSL #1]\n"
- ".inst 0xe0562349 // ld1h { za1h.h[x13, #1] }, p0/Z, [x26, x22, LSL #1]\n"
- "add x11, x11, #0x8\n"
- "addvl x21, x21, #2\n"
+ "add x25, x25, #0x8\n"
"add x13, x13, #0x2\n"
+ "cmp x12, x16\n"
"blt 9b\n"
- "whilelt p10.h, x14, %x[width]\n"
- "whilelt p9.h, x14, %x[width]\n"
- "whilelt p8.h, x14, %x[width]\n"
- "mov x20, #0x0\n"
+ "whilelt p10.h, x26, %x[width]\n"
+ "whilelt p9.h, x26, %x[width]\n"
+ "whilelt p8.h, x26, %x[width]\n"
+ "mov x19, #0x0\n"
"mov x12, #0x0\n"
"10:" // K loop: Tails: Even: Second
- ".inst 0x25307540 // psel p0.s, p13.s/Z, p10.s[w12]\n"
- ".inst 0xe0bf82a8 // st1w { za2v.s[x12] }, p0/Z, [x21, XZR, LSL #2]\n"
- ".inst 0x25307540 // psel p0.s, p13.s/Z, p10.s[w12]\n"
- ".inst 0xe0b082ac // st1w { za3v.s[x12] }, p0/Z, [x21, x16, LSL #2]\n"
+ ".inst 0x25307540 // dup p0.s, p13.s/Z, p10.s[w12]\n"
+ ".inst 0xe0bf8368 // st1w { za2v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+ ".inst 0x25307540 // dup p0.s, p13.s/Z, p10.s[w12]\n"
+ "add x19, x19, #0x2\n"
+ ".inst 0xe0b0836c // st1w { za3v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
+ "addvl x27, x27, #2\n"
"add x12, x12, #0x1\n"
- "cmp x12, x17\n"
- "addvl x21, x21, #2\n"
- "add x20, x20, #0x2\n"
+ "cmp x12, x10\n"
"blt 10b\n"
- "whilelt p10.h, x14, %x[width]\n"
+ "whilelt p10.h, x26, %x[width]\n"
"b 13f\n"
"11:" // K loop: Tails: Odd
"mov x12, #0x0\n"
"12:" // K loop: Tails: Odd: Loop
- ".inst 0x25307540 // psel p0.s, p13.s/Z, p10.s[w12]\n"
- ".inst 0xe0bf82a0 // st1w { za0v.s[x12] }, p0/Z, [x21, XZR, LSL #2]\n"
- ".inst 0x25307540 // psel p0.s, p13.s/Z, p10.s[w12]\n"
- ".inst 0xe0b082a4 // st1w { za1v.s[x12] }, p0/Z, [x21, x16, LSL #2]\n"
+ ".inst 0x25307540 // dup p0.s, p13.s/Z, p10.s[w12]\n"
+ ".inst 0xe0bf8360 // st1w { za0v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+ ".inst 0x25307540 // dup p0.s, p13.s/Z, p10.s[w12]\n"
+ ".inst 0xe0b08364 // st1w { za1v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
+ "addvl x27, x27, #2\n"
"add x12, x12, #0x1\n"
- "cmp x12, x17\n"
- "addvl x21, x21, #2\n"
+ "cmp x12, x10\n"
"blt 12b\n"
"13:" // K loop: End
- "mov %x[out], x21\n"
+ "mov %x[out], x27\n"
".inst 0xd503467f // SMSTOP\n"
: [out] "+&r" (out)
: [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p8", "p9", "p10", "p11", "p12", "p13", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_block2_fp16_fp16.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_block2_fp16_fp16.hpp
index 61536d38a5..67570a1302 100644
--- a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_block2_fp16_fp16.hpp
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_block2_fp16_fp16.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#if defined(__ARM_FEATURE_SVE)
@@ -34,269 +34,269 @@ void interleave_block<2, 2, VLType::SME, false>(
__asm__ __volatile__(
".inst 0xd503477f // SMSTART ZA\n"
- "cnth x21\n"
- "mov x22, %x[width]\n"
- "inch x22\n"
- "mov x20, %x[width]\n"
- "sub x17, x21, #0x1\n"
- "sub x22, x22, #0x1\n"
- "ands x17, x20, x17\n"
+ "cnth x20\n"
"cntw x16\n"
- "udiv x22, x22, x21\n" // n_passes = ceildiv(width, VL<T>)
- "csel x17, x17, x21, NE\n"
- "sub x13, x22, #0x1\n"
- "add x17, x17, #0x1\n"
- "sub x15, x16, #0x2\n"
- "lsl x21, %x[height], #0x1\n" // height * 2
- "lsl x20, x16, #0x1\n"
- "mov x14, #0x0\n"
- "mov x11, %x[in]\n"
- "add x10, %x[in], x16, LSL #3\n"
- "ldr x9, [x11, #0x0]\n"
- "cntw x28, ALL, MUL #2\n"
- "cntw x27, ALL, MUL #3\n"
- "ldr x26, [x10, #0x0]\n"
- "lsr x13, x13, #0x1\n" // n_loops = (n_passes - 1) / 2
- "and x25, x22, #0x1\n" // odd_tail = bool(n_passes & 0x1)
- "ldr x24, [x11, #0x8]\n"
- "lsr x17, x17, #0x1\n"
+ "cntw x15, ALL, MUL #2\n"
+ "cntw x14, ALL, MUL #3\n"
+ "mov x19, %x[width]\n"
+ "inch x19\n"
+ "sub x19, x19, #0x1\n"
+ "udiv x19, x19, x20\n" // n_passes = ceildiv(width, VL<T>)
+ "sub x13, x19, #0x1\n"
+ "lsr x13, x13, #0x1\n" // n_loops = (n_passes - 1) / 2
+ "and x11, x19, #0x1\n" // odd_tail = bool(n_passes & 0x1)
+ "mov x19, %x[width]\n"
+ "sub x10, x20, #0x1\n"
+ "ands x10, x19, x10\n"
+ "csel x10, x10, x20, NE\n"
+ "add x10, x10, #0x1\n"
+ "lsr x10, x10, #0x1\n"
+ "sub x9, x16, #0x2\n"
"ptrue p13.s\n"
- "ldr x23, [x10, #0x8]\n"
- "whilelt p12.h, XZR, x21\n"
- "whilelt p11.h, x20, x21\n"
- "mov x22, %x[row_offset]\n"
- "mov x21, %x[out]\n"
- "whilelt p10.h, x14, %x[width]\n"
- "whilelt p9.h, x14, %x[width]\n"
- "whilelt p8.h, x14, %x[width]\n"
- "add x11, x11, #0x10\n"
- "add x10, x10, #0x10\n"
+ "lsl x20, %x[height], #0x1\n" // height * 2
+ "lsl x19, x16, #0x1\n"
+ "whilelt p12.h, XZR, x20\n"
+ "whilelt p11.h, x19, x20\n"
+ "mov x28, %x[row_offset]\n"
+ "mov x27, %x[out]\n"
+ "mov x26, #0x0\n"
+ "whilelt p10.h, x26, %x[width]\n"
+ "whilelt p9.h, x26, %x[width]\n"
+ "whilelt p8.h, x26, %x[width]\n"
+ "mov x25, %x[in]\n"
+ "add x24, %x[in], x16, LSL #3\n"
+ "ldr x23, [x25, #0x0]\n"
+ "ldr x22, [x24, #0x0]\n"
+ "ldr x21, [x25, #0x8]\n"
+ "ldr x20, [x24, #0x8]\n"
+ "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
"mov x12, #0x0\n"
- "cbz x15, 2f\n"
+ "cbz x9, 2f\n"
"1:" // K loop: Charge: Loop
- ".inst 0x25286581 // psel p1.h, p9.h/Z, p12.h[w12]\n"
- ".inst 0x25286160 // psel p0.h, p8.h/Z, p11.h[w12]\n"
- ".inst 0xe0560520 // ld1h { za0h.h[x12] }, p1/Z, [x9, x22, LSL #1]\n"
- "ldr x9, [x11, #0x0]\n"
- ".inst 0xe0560348 // ld1h { za1h.h[x12] }, p0/Z, [x26, x22, LSL #1]\n"
- ".inst 0x25686581 // psel p1.h, p9.h/Z, p12.h[w12, #2]\n"
- ".inst 0x25686160 // psel p0.h, p8.h/Z, p11.h[w12, #2]\n"
- "ldr x26, [x10, #0x0]\n"
- ".inst 0xe0560702 // ld1h { za0h.h[x12, #2] }, p1/Z, [x24, x22, LSL #1]\n"
- "ldr x24, [x11, #0x8]\n"
- "add x11, x11, #0x10\n"
- ".inst 0xe05602ea // ld1h { za1h.h[x12, #2] }, p0/Z, [x23, x22, LSL #1]\n"
+ ".inst 0x25286580 // dup p0.h, p9.h/Z, p12.h[w12]\n"
+ ".inst 0xe05c02e0 // ld1h { za0h.h[x12] }, p0/Z, [x23, x28, LSL #1]\n"
+ ".inst 0x25286160 // dup p0.h, p8.h/Z, p11.h[w12]\n"
+ ".inst 0xe05c02c8 // ld1h { za1h.h[x12] }, p0/Z, [x22, x28, LSL #1]\n"
+ ".inst 0x25686580 // dup p0.h, p9.h/Z, p12.h[w12, #2]\n"
+ ".inst 0xe05c02a2 // ld1h { za0h.h[x12, #2] }, p0/Z, [x21, x28, LSL #1]\n"
+ ".inst 0x25686160 // dup p0.h, p8.h/Z, p11.h[w12, #2]\n"
+ ".inst 0xe05c028a // ld1h { za1h.h[x12, #2] }, p0/Z, [x20, x28, LSL #1]\n"
+ "ldr x23, [x25, #0x0]\n"
+ "ldr x22, [x24, #0x0]\n"
+ "ldr x21, [x25, #0x8]\n"
+ "ldr x20, [x24, #0x8]\n"
+ "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
"add x12, x12, #0x4\n"
- "cmp x12, x15, LSL #1\n"
- "ldr x23, [x10, #0x8]\n"
- "add x10, x10, #0x10\n"
+ "cmp x12, x9, LSL #1\n"
"blt 1b\n"
"2:" // K loop: Charge: End
- ".inst 0x25286581 // psel p1.h, p9.h/Z, p12.h[w12]\n"
- ".inst 0x25286160 // psel p0.h, p8.h/Z, p11.h[w12]\n"
- ".inst 0xe0560520 // ld1h { za0h.h[x12] }, p1/Z, [x9, x22, LSL #1]\n"
- ".inst 0xe0560348 // ld1h { za1h.h[x12] }, p0/Z, [x26, x22, LSL #1]\n"
- ".inst 0x25686581 // psel p1.h, p9.h/Z, p12.h[w12, #2]\n"
- ".inst 0x25686160 // psel p0.h, p8.h/Z, p11.h[w12, #2]\n"
- "mov x11, %x[in]\n"
- "add x10, %x[in], x16, LSL #3\n"
- ".inst 0xe0560702 // ld1h { za0h.h[x12, #2] }, p1/Z, [x24, x22, LSL #1]\n"
- "ldr x9, [x11, #0x0]\n"
- ".inst 0xe05602ea // ld1h { za1h.h[x12, #2] }, p0/Z, [x23, x22, LSL #1]\n"
- "ldr x26, [x10, #0x0]\n"
- "inch x22\n"
- "inch x14\n"
- "ldr x24, [x11, #0x8]\n"
- "add x11, x11, #0x10\n"
- "ldr x23, [x10, #0x8]\n"
- "add x10, x10, #0x10\n"
+ ".inst 0x25286580 // dup p0.h, p9.h/Z, p12.h[w12]\n"
+ ".inst 0xe05c02e0 // ld1h { za0h.h[x12] }, p0/Z, [x23, x28, LSL #1]\n"
+ ".inst 0x25286160 // dup p0.h, p8.h/Z, p11.h[w12]\n"
+ ".inst 0xe05c02c8 // ld1h { za1h.h[x12] }, p0/Z, [x22, x28, LSL #1]\n"
+ ".inst 0x25686580 // dup p0.h, p9.h/Z, p12.h[w12, #2]\n"
+ ".inst 0xe05c02a2 // ld1h { za0h.h[x12, #2] }, p0/Z, [x21, x28, LSL #1]\n"
+ ".inst 0x25686160 // dup p0.h, p8.h/Z, p11.h[w12, #2]\n"
+ ".inst 0xe05c028a // ld1h { za1h.h[x12, #2] }, p0/Z, [x20, x28, LSL #1]\n"
+ "mov x25, %x[in]\n"
+ "add x24, %x[in], x16, LSL #3\n"
+ "ldr x23, [x25, #0x0]\n"
+ "ldr x22, [x24, #0x0]\n"
+ "ldr x21, [x25, #0x8]\n"
+ "ldr x20, [x24, #0x8]\n"
+ "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
+ "inch x28\n"
+ "inch x26\n"
"cbz x13, 8f\n"
- "mov x20, x13\n"
+ "mov x19, x13\n"
"3:" // K loop: Main loop
- "whilelt p9.h, x14, %x[width]\n"
- "whilelt p8.h, x14, %x[width]\n"
+ "whilelt p9.h, x26, %x[width]\n"
+ "whilelt p8.h, x26, %x[width]\n"
"mov x13, #0x0\n"
"mov x12, #0x0\n"
- "cbz x15, 5f\n"
+ "cbz x9, 5f\n"
"4:" // K loop: Main loop: First: Loop
- ".inst 0x25396581 // psel p1.h, p9.h/Z, p12.h[w13, #1]\n"
- ".inst 0x25396160 // psel p0.h, p8.h/Z, p11.h[w13, #1]\n"
- ".inst 0xe0562521 // ld1h { za0h.h[x13, #1] }, p1/Z, [x9, x22, LSL #1]\n"
- "ldr x9, [x11, #0x0]\n"
- ".inst 0xe0562349 // ld1h { za1h.h[x13, #1] }, p0/Z, [x26, x22, LSL #1]\n"
- ".inst 0x25796580 // psel p0.h, p9.h/Z, p12.h[w13, #3]\n"
- ".inst 0x25796162 // psel p2.h, p8.h/Z, p11.h[w13, #3]\n"
- "ldr x26, [x10, #0x0]\n"
- ".inst 0x25307541 // psel p1.s, p13.s/Z, p10.s[w12]\n"
- ".inst 0xe0562303 // ld1h { za0h.h[x13, #3] }, p0/Z, [x24, x22, LSL #1]\n"
- "ldr x24, [x11, #0x8]\n"
- ".inst 0x25307540 // psel p0.s, p13.s/Z, p10.s[w12]\n"
- ".inst 0xe0562aeb // ld1h { za1h.h[x13, #3] }, p2/Z, [x23, x22, LSL #1]\n"
- "ldr x23, [x10, #0x8]\n"
- ".inst 0xe0bf86a0 // st1w { za0v.s[x12] }, p1/Z, [x21, XZR, LSL #2]\n"
- ".inst 0x25707541 // psel p1.s, p13.s/Z, p10.s[w12, #1]\n"
- ".inst 0xe0b082a4 // st1w { za1v.s[x12] }, p0/Z, [x21, x16, LSL #2]\n"
- ".inst 0x25707540 // psel p0.s, p13.s/Z, p10.s[w12, #1]\n"
- "add x11, x11, #0x10\n"
- ".inst 0xe0bc86a1 // st1w { za0v.s[x12, #1] }, p1/Z, [x21, x28, LSL #2]\n"
- "add x10, x10, #0x10\n"
+ ".inst 0x25396580 // dup p0.h, p9.h/Z, p12.h[w13, #1]\n"
+ ".inst 0xe05c22e1 // ld1h { za0h.h[x13, #1] }, p0/Z, [x23, x28, LSL #1]\n"
+ ".inst 0x25396160 // dup p0.h, p8.h/Z, p11.h[w13, #1]\n"
+ ".inst 0xe05c22c9 // ld1h { za1h.h[x13, #1] }, p0/Z, [x22, x28, LSL #1]\n"
+ ".inst 0x25796580 // dup p0.h, p9.h/Z, p12.h[w13, #3]\n"
+ ".inst 0xe05c22a3 // ld1h { za0h.h[x13, #3] }, p0/Z, [x21, x28, LSL #1]\n"
+ ".inst 0x25796160 // dup p0.h, p8.h/Z, p11.h[w13, #3]\n"
+ ".inst 0xe05c228b // ld1h { za1h.h[x13, #3] }, p0/Z, [x20, x28, LSL #1]\n"
+ "ldr x23, [x25, #0x0]\n"
+ "ldr x22, [x24, #0x0]\n"
+ "ldr x21, [x25, #0x8]\n"
+ "ldr x20, [x24, #0x8]\n"
+ "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
+ ".inst 0x25307540 // dup p0.s, p13.s/Z, p10.s[w12]\n"
+ ".inst 0xe0bf8360 // st1w { za0v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+ ".inst 0x25307540 // dup p0.s, p13.s/Z, p10.s[w12]\n"
+ ".inst 0x25707541 // dup p1.s, p13.s/Z, p10.s[w12, #1]\n"
+ ".inst 0xe0b08364 // st1w { za1v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
+ ".inst 0x25707540 // dup p0.s, p13.s/Z, p10.s[w12, #1]\n"
"add x13, x13, #0x4\n"
- ".inst 0xe0bb82a5 // st1w { za1v.s[x12, #1] }, p0/Z, [x21, x27, LSL #2]\n"
+ ".inst 0xe0af8761 // st1w { za0v.s[x12, #1] }, p1/Z, [x27, x15, LSL #2]\n"
+ ".inst 0xe0ae8365 // st1w { za1v.s[x12, #1] }, p0/Z, [x27, x14, LSL #2]\n"
+ "addvl x27, x27, #4\n"
"add x12, x12, #0x2\n"
- "cmp x12, x15\n"
- "addvl x21, x21, #4\n"
+ "cmp x12, x9\n"
"blt 4b\n"
"5:" // K loop: Main loop: First: Tail
- ".inst 0x25396581 // psel p1.h, p9.h/Z, p12.h[w13, #1]\n"
- ".inst 0x25396160 // psel p0.h, p8.h/Z, p11.h[w13, #1]\n"
- ".inst 0xe0562521 // ld1h { za0h.h[x13, #1] }, p1/Z, [x9, x22, LSL #1]\n"
- ".inst 0xe0562349 // ld1h { za1h.h[x13, #1] }, p0/Z, [x26, x22, LSL #1]\n"
- "mov x11, %x[in]\n"
- "add x10, %x[in], x16, LSL #3\n"
- "ldr x9, [x11, #0x0]\n"
- ".inst 0x25796580 // psel p0.h, p9.h/Z, p12.h[w13, #3]\n"
- ".inst 0x25796161 // psel p1.h, p8.h/Z, p11.h[w13, #3]\n"
- ".inst 0xe0562303 // ld1h { za0h.h[x13, #3] }, p0/Z, [x24, x22, LSL #1]\n"
- "ldr x26, [x10, #0x0]\n"
- ".inst 0x25307540 // psel p0.s, p13.s/Z, p10.s[w12]\n"
- ".inst 0xe05626eb // ld1h { za1h.h[x13, #3] }, p1/Z, [x23, x22, LSL #1]\n"
- "ldr x24, [x11, #0x8]\n"
- ".inst 0x25307542 // psel p2.s, p13.s/Z, p10.s[w12]\n"
- "ldr x23, [x10, #0x8]\n"
- ".inst 0xe0bf82a0 // st1w { za0v.s[x12] }, p0/Z, [x21, XZR, LSL #2]\n"
- ".inst 0x25707541 // psel p1.s, p13.s/Z, p10.s[w12, #1]\n"
- ".inst 0x25707540 // psel p0.s, p13.s/Z, p10.s[w12, #1]\n"
- ".inst 0xe0b08aa4 // st1w { za1v.s[x12] }, p2/Z, [x21, x16, LSL #2]\n"
- "whilelt p10.h, x14, %x[width]\n"
- "inch x14\n"
- ".inst 0xe0bc86a1 // st1w { za0v.s[x12, #1] }, p1/Z, [x21, x28, LSL #2]\n"
- "add x11, x11, #0x10\n"
- "add x10, x10, #0x10\n"
- ".inst 0xe0bb82a5 // st1w { za1v.s[x12, #1] }, p0/Z, [x21, x27, LSL #2]\n"
- "addvl x21, x21, #4\n"
- "inch x22\n"
- "whilelt p9.h, x14, %x[width]\n"
- "whilelt p8.h, x14, %x[width]\n"
+ "mov x25, %x[in]\n"
+ "add x24, %x[in], x16, LSL #3\n"
+ ".inst 0x25396580 // dup p0.h, p9.h/Z, p12.h[w13, #1]\n"
+ ".inst 0xe05c22e1 // ld1h { za0h.h[x13, #1] }, p0/Z, [x23, x28, LSL #1]\n"
+ ".inst 0x25396160 // dup p0.h, p8.h/Z, p11.h[w13, #1]\n"
+ ".inst 0xe05c22c9 // ld1h { za1h.h[x13, #1] }, p0/Z, [x22, x28, LSL #1]\n"
+ ".inst 0x25796580 // dup p0.h, p9.h/Z, p12.h[w13, #3]\n"
+ ".inst 0xe05c22a3 // ld1h { za0h.h[x13, #3] }, p0/Z, [x21, x28, LSL #1]\n"
+ ".inst 0x25796160 // dup p0.h, p8.h/Z, p11.h[w13, #3]\n"
+ ".inst 0xe05c228b // ld1h { za1h.h[x13, #3] }, p0/Z, [x20, x28, LSL #1]\n"
+ "ldr x23, [x25, #0x0]\n"
+ "ldr x22, [x24, #0x0]\n"
+ "ldr x21, [x25, #0x8]\n"
+ "ldr x20, [x24, #0x8]\n"
+ "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
+ ".inst 0x25307540 // dup p0.s, p13.s/Z, p10.s[w12]\n"
+ ".inst 0xe0bf8360 // st1w { za0v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+ ".inst 0x25307540 // dup p0.s, p13.s/Z, p10.s[w12]\n"
+ ".inst 0x25707541 // dup p1.s, p13.s/Z, p10.s[w12, #1]\n"
+ ".inst 0xe0b08364 // st1w { za1v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
+ ".inst 0x25707540 // dup p0.s, p13.s/Z, p10.s[w12, #1]\n"
+ "whilelt p10.h, x26, %x[width]\n"
+ ".inst 0xe0af8761 // st1w { za0v.s[x12, #1] }, p1/Z, [x27, x15, LSL #2]\n"
+ "inch x26\n"
+ "inch x28\n"
+ ".inst 0xe0ae8365 // st1w { za1v.s[x12, #1] }, p0/Z, [x27, x14, LSL #2]\n"
+ "addvl x27, x27, #4\n"
+ "whilelt p9.h, x26, %x[width]\n"
+ "whilelt p8.h, x26, %x[width]\n"
"mov x13, #0x0\n"
"mov x12, #0x0\n"
- "cbz x15, 7f\n"
+ "cbz x9, 7f\n"
"6:" // K loop: Main loop: Second: Loop
- ".inst 0x25296581 // psel p1.h, p9.h/Z, p12.h[w13]\n"
- ".inst 0x25296160 // psel p0.h, p8.h/Z, p11.h[w13]\n"
- ".inst 0xe0562520 // ld1h { za0h.h[x13] }, p1/Z, [x9, x22, LSL #1]\n"
- "ldr x9, [x11, #0x0]\n"
- ".inst 0xe0562348 // ld1h { za1h.h[x13] }, p0/Z, [x26, x22, LSL #1]\n"
- ".inst 0x25696580 // psel p0.h, p9.h/Z, p12.h[w13, #2]\n"
- ".inst 0x25696162 // psel p2.h, p8.h/Z, p11.h[w13, #2]\n"
- "ldr x26, [x10, #0x0]\n"
- ".inst 0x25307541 // psel p1.s, p13.s/Z, p10.s[w12]\n"
- ".inst 0xe0562302 // ld1h { za0h.h[x13, #2] }, p0/Z, [x24, x22, LSL #1]\n"
- "ldr x24, [x11, #0x8]\n"
- ".inst 0x25307540 // psel p0.s, p13.s/Z, p10.s[w12]\n"
- ".inst 0xe0562aea // ld1h { za1h.h[x13, #2] }, p2/Z, [x23, x22, LSL #1]\n"
- "ldr x23, [x10, #0x8]\n"
- ".inst 0xe0bf86a8 // st1w { za2v.s[x12] }, p1/Z, [x21, XZR, LSL #2]\n"
- ".inst 0x25707541 // psel p1.s, p13.s/Z, p10.s[w12, #1]\n"
- ".inst 0xe0b082ac // st1w { za3v.s[x12] }, p0/Z, [x21, x16, LSL #2]\n"
- ".inst 0x25707540 // psel p0.s, p13.s/Z, p10.s[w12, #1]\n"
- "add x11, x11, #0x10\n"
- ".inst 0xe0bc86a9 // st1w { za2v.s[x12, #1] }, p1/Z, [x21, x28, LSL #2]\n"
- "add x10, x10, #0x10\n"
+ ".inst 0x25296580 // dup p0.h, p9.h/Z, p12.h[w13]\n"
+ ".inst 0xe05c22e0 // ld1h { za0h.h[x13] }, p0/Z, [x23, x28, LSL #1]\n"
+ ".inst 0x25296160 // dup p0.h, p8.h/Z, p11.h[w13]\n"
+ ".inst 0xe05c22c8 // ld1h { za1h.h[x13] }, p0/Z, [x22, x28, LSL #1]\n"
+ ".inst 0x25696580 // dup p0.h, p9.h/Z, p12.h[w13, #2]\n"
+ ".inst 0xe05c22a2 // ld1h { za0h.h[x13, #2] }, p0/Z, [x21, x28, LSL #1]\n"
+ ".inst 0x25696160 // dup p0.h, p8.h/Z, p11.h[w13, #2]\n"
+ ".inst 0xe05c228a // ld1h { za1h.h[x13, #2] }, p0/Z, [x20, x28, LSL #1]\n"
+ "ldr x23, [x25, #0x0]\n"
+ "ldr x22, [x24, #0x0]\n"
+ "ldr x21, [x25, #0x8]\n"
+ "ldr x20, [x24, #0x8]\n"
+ "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
+ ".inst 0x25307540 // dup p0.s, p13.s/Z, p10.s[w12]\n"
+ ".inst 0xe0bf8368 // st1w { za2v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+ ".inst 0x25307540 // dup p0.s, p13.s/Z, p10.s[w12]\n"
+ ".inst 0x25707541 // dup p1.s, p13.s/Z, p10.s[w12, #1]\n"
+ ".inst 0xe0b0836c // st1w { za3v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
+ ".inst 0x25707540 // dup p0.s, p13.s/Z, p10.s[w12, #1]\n"
"add x13, x13, #0x4\n"
- ".inst 0xe0bb82ad // st1w { za3v.s[x12, #1] }, p0/Z, [x21, x27, LSL #2]\n"
+ ".inst 0xe0af8769 // st1w { za2v.s[x12, #1] }, p1/Z, [x27, x15, LSL #2]\n"
+ ".inst 0xe0ae836d // st1w { za3v.s[x12, #1] }, p0/Z, [x27, x14, LSL #2]\n"
+ "addvl x27, x27, #4\n"
"add x12, x12, #0x2\n"
- "cmp x12, x15\n"
- "addvl x21, x21, #4\n"
+ "cmp x12, x9\n"
"blt 6b\n"
"7:" // K loop: Main loop: Second: Tail
- ".inst 0x25296581 // psel p1.h, p9.h/Z, p12.h[w13]\n"
- ".inst 0x25296160 // psel p0.h, p8.h/Z, p11.h[w13]\n"
- ".inst 0xe0562520 // ld1h { za0h.h[x13] }, p1/Z, [x9, x22, LSL #1]\n"
- ".inst 0xe0562348 // ld1h { za1h.h[x13] }, p0/Z, [x26, x22, LSL #1]\n"
- "mov x11, %x[in]\n"
- "add x10, %x[in], x16, LSL #3\n"
- "ldr x9, [x11, #0x0]\n"
- ".inst 0x25696580 // psel p0.h, p9.h/Z, p12.h[w13, #2]\n"
- ".inst 0x25696161 // psel p1.h, p8.h/Z, p11.h[w13, #2]\n"
- ".inst 0xe0562302 // ld1h { za0h.h[x13, #2] }, p0/Z, [x24, x22, LSL #1]\n"
- "ldr x26, [x10, #0x0]\n"
- ".inst 0x25307540 // psel p0.s, p13.s/Z, p10.s[w12]\n"
- ".inst 0xe05626ea // ld1h { za1h.h[x13, #2] }, p1/Z, [x23, x22, LSL #1]\n"
- "ldr x24, [x11, #0x8]\n"
- ".inst 0x25307542 // psel p2.s, p13.s/Z, p10.s[w12]\n"
- "ldr x23, [x10, #0x8]\n"
- ".inst 0xe0bf82a8 // st1w { za2v.s[x12] }, p0/Z, [x21, XZR, LSL #2]\n"
- ".inst 0x25707541 // psel p1.s, p13.s/Z, p10.s[w12, #1]\n"
- ".inst 0x25707540 // psel p0.s, p13.s/Z, p10.s[w12, #1]\n"
- ".inst 0xe0b08aac // st1w { za3v.s[x12] }, p2/Z, [x21, x16, LSL #2]\n"
- "whilelt p10.h, x14, %x[width]\n"
- "subs x20, x20, #0x1\n"
- ".inst 0xe0bc86a9 // st1w { za2v.s[x12, #1] }, p1/Z, [x21, x28, LSL #2]\n"
- "add x11, x11, #0x10\n"
- "add x10, x10, #0x10\n"
- ".inst 0xe0bb82ad // st1w { za3v.s[x12, #1] }, p0/Z, [x21, x27, LSL #2]\n"
- "addvl x21, x21, #4\n"
- "inch x14\n"
- "inch x22\n"
+ "mov x25, %x[in]\n"
+ "add x24, %x[in], x16, LSL #3\n"
+ ".inst 0x25296580 // dup p0.h, p9.h/Z, p12.h[w13]\n"
+ ".inst 0xe05c22e0 // ld1h { za0h.h[x13] }, p0/Z, [x23, x28, LSL #1]\n"
+ ".inst 0x25296160 // dup p0.h, p8.h/Z, p11.h[w13]\n"
+ ".inst 0xe05c22c8 // ld1h { za1h.h[x13] }, p0/Z, [x22, x28, LSL #1]\n"
+ ".inst 0x25696580 // dup p0.h, p9.h/Z, p12.h[w13, #2]\n"
+ ".inst 0xe05c22a2 // ld1h { za0h.h[x13, #2] }, p0/Z, [x21, x28, LSL #1]\n"
+ ".inst 0x25696160 // dup p0.h, p8.h/Z, p11.h[w13, #2]\n"
+ ".inst 0xe05c228a // ld1h { za1h.h[x13, #2] }, p0/Z, [x20, x28, LSL #1]\n"
+ "ldr x23, [x25, #0x0]\n"
+ "ldr x22, [x24, #0x0]\n"
+ "ldr x21, [x25, #0x8]\n"
+ "ldr x20, [x24, #0x8]\n"
+ "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
+ ".inst 0x25307540 // dup p0.s, p13.s/Z, p10.s[w12]\n"
+ ".inst 0xe0bf8368 // st1w { za2v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+ ".inst 0x25307540 // dup p0.s, p13.s/Z, p10.s[w12]\n"
+ ".inst 0x25707541 // dup p1.s, p13.s/Z, p10.s[w12, #1]\n"
+ ".inst 0xe0b0836c // st1w { za3v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
+ ".inst 0x25707540 // dup p0.s, p13.s/Z, p10.s[w12, #1]\n"
+ "whilelt p10.h, x26, %x[width]\n"
+ ".inst 0xe0af8769 // st1w { za2v.s[x12, #1] }, p1/Z, [x27, x15, LSL #2]\n"
+ "inch x26\n"
+ "inch x28\n"
+ ".inst 0xe0ae836d // st1w { za3v.s[x12, #1] }, p0/Z, [x27, x14, LSL #2]\n"
+ "addvl x27, x27, #4\n"
+ "subs x19, x19, #0x1\n"
"bgt 3b\n"
"8:" // K loop: Tails
- "cbnz x25, 11f\n"
- "mov x11, %x[in]\n"
- "whilelt p9.h, x14, %x[width]\n"
- "whilelt p8.h, x14, %x[width]\n"
+ "cbnz x11, 11f\n"
+ "mov x25, %x[in]\n"
+ "whilelt p9.h, x26, %x[width]\n"
+ "whilelt p8.h, x26, %x[width]\n"
"mov x13, #0x0\n"
"mov x12, #0x0\n"
"9:" // K loop: Tails: Even: First
- ".inst 0x25307540 // psel p0.s, p13.s/Z, p10.s[w12]\n"
- ".inst 0xe0bf82a0 // st1w { za0v.s[x12] }, p0/Z, [x21, XZR, LSL #2]\n"
- ".inst 0x25307540 // psel p0.s, p13.s/Z, p10.s[w12]\n"
- ".inst 0xe0b082a4 // st1w { za1v.s[x12] }, p0/Z, [x21, x16, LSL #2]\n"
- "ldr x9, [x11, #0x0]\n"
+ ".inst 0x25307540 // dup p0.s, p13.s/Z, p10.s[w12]\n"
+ ".inst 0xe0bf8360 // st1w { za0v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+ ".inst 0x25307540 // dup p0.s, p13.s/Z, p10.s[w12]\n"
+ ".inst 0x25396581 // dup p1.h, p9.h/Z, p12.h[w13, #1]\n"
+ ".inst 0xe0b08364 // st1w { za1v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
+ ".inst 0x25396160 // dup p0.h, p8.h/Z, p11.h[w13, #1]\n"
+ "addvl x27, x27, #2\n"
+ "ldr x23, [x25, #0x0]\n"
+ ".inst 0xe05c26e1 // ld1h { za0h.h[x13, #1] }, p1/Z, [x23, x28, LSL #1]\n"
+ "ldr x22, [x25, x16, LSL #0x3]\n"
+ ".inst 0xe05c22c9 // ld1h { za1h.h[x13, #1] }, p0/Z, [x22, x28, LSL #1]\n"
"add x12, x12, #0x1\n"
- ".inst 0x25396581 // psel p1.h, p9.h/Z, p12.h[w13, #1]\n"
- "ldr x26, [x11, x16, LSL #0x3]\n"
- ".inst 0x25396160 // psel p0.h, p8.h/Z, p11.h[w13, #1]\n"
- "cmp x12, x16\n"
- ".inst 0xe0562521 // ld1h { za0h.h[x13, #1] }, p1/Z, [x9, x22, LSL #1]\n"
- ".inst 0xe0562349 // ld1h { za1h.h[x13, #1] }, p0/Z, [x26, x22, LSL #1]\n"
- "add x11, x11, #0x8\n"
- "addvl x21, x21, #2\n"
+ "add x25, x25, #0x8\n"
"add x13, x13, #0x2\n"
+ "cmp x12, x16\n"
"blt 9b\n"
- "whilelt p10.h, x14, %x[width]\n"
- "whilelt p9.h, x14, %x[width]\n"
- "whilelt p8.h, x14, %x[width]\n"
- "mov x20, #0x0\n"
+ "whilelt p10.h, x26, %x[width]\n"
+ "whilelt p9.h, x26, %x[width]\n"
+ "whilelt p8.h, x26, %x[width]\n"
+ "mov x19, #0x0\n"
"mov x12, #0x0\n"
"10:" // K loop: Tails: Even: Second
- ".inst 0x25307540 // psel p0.s, p13.s/Z, p10.s[w12]\n"
- ".inst 0xe0bf82a8 // st1w { za2v.s[x12] }, p0/Z, [x21, XZR, LSL #2]\n"
- ".inst 0x25307540 // psel p0.s, p13.s/Z, p10.s[w12]\n"
- ".inst 0xe0b082ac // st1w { za3v.s[x12] }, p0/Z, [x21, x16, LSL #2]\n"
+ ".inst 0x25307540 // dup p0.s, p13.s/Z, p10.s[w12]\n"
+ ".inst 0xe0bf8368 // st1w { za2v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+ ".inst 0x25307540 // dup p0.s, p13.s/Z, p10.s[w12]\n"
+ "add x19, x19, #0x2\n"
+ ".inst 0xe0b0836c // st1w { za3v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
+ "addvl x27, x27, #2\n"
"add x12, x12, #0x1\n"
- "cmp x12, x17\n"
- "addvl x21, x21, #2\n"
- "add x20, x20, #0x2\n"
+ "cmp x12, x10\n"
"blt 10b\n"
- "whilelt p10.h, x14, %x[width]\n"
+ "whilelt p10.h, x26, %x[width]\n"
"b 13f\n"
"11:" // K loop: Tails: Odd
"mov x12, #0x0\n"
"12:" // K loop: Tails: Odd: Loop
- ".inst 0x25307540 // psel p0.s, p13.s/Z, p10.s[w12]\n"
- ".inst 0xe0bf82a0 // st1w { za0v.s[x12] }, p0/Z, [x21, XZR, LSL #2]\n"
- ".inst 0x25307540 // psel p0.s, p13.s/Z, p10.s[w12]\n"
- ".inst 0xe0b082a4 // st1w { za1v.s[x12] }, p0/Z, [x21, x16, LSL #2]\n"
+ ".inst 0x25307540 // dup p0.s, p13.s/Z, p10.s[w12]\n"
+ ".inst 0xe0bf8360 // st1w { za0v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+ ".inst 0x25307540 // dup p0.s, p13.s/Z, p10.s[w12]\n"
+ ".inst 0xe0b08364 // st1w { za1v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
+ "addvl x27, x27, #2\n"
"add x12, x12, #0x1\n"
- "cmp x12, x17\n"
- "addvl x21, x21, #2\n"
+ "cmp x12, x10\n"
"blt 12b\n"
"13:" // K loop: End
- "mov %x[out], x21\n"
+ "mov %x[out], x27\n"
".inst 0xd503467f // SMSTOP\n"
: [out] "+&r" (out)
: [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p8", "p9", "p10", "p11", "p12", "p13", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_block4_s8_s8.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_block4_s8_s8.hpp
index 4c701cff19..22f09339b2 100644
--- a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_block4_s8_s8.hpp
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_block4_s8_s8.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#if defined(__ARM_FEATURE_SVE)
@@ -34,265 +34,265 @@ void interleave_block<2, 4, VLType::SME, false>(
__asm__ __volatile__(
".inst 0xd503477f // SMSTART ZA\n"
- "cntb x21\n"
- "mov x23, %x[width]\n"
- "incb x23\n"
- "mov x20, %x[width]\n"
- "sub x17, x21, #0x1\n"
+ "cntb x20\n"
"cntw x16\n"
- "sub x23, x23, #0x1\n"
- "ands x17, x20, x17\n"
- "udiv x23, x23, x21\n" // n_passes = ceildiv(width, VL<T>)
- "csel x17, x17, x21, NE\n"
- "lsl x22, %x[height], #0x1\n" // height * 2
- "lsl x21, x16, #0x1\n"
- "sub x20, x23, #0x1\n"
- "add x17, x17, #0x3\n"
- "sub x15, x16, #0x2\n"
- "whilelt p9.b, XZR, x22\n"
- "whilelt p8.b, x21, x22\n"
- "mov x14, #0x0\n"
- "mov x11, %x[in]\n"
- "add x10, %x[in], x16, LSL #3\n"
- "ldr x9, [x11, #0x0]\n"
- "cntw x28, ALL, MUL #2\n"
- "cntw x27, ALL, MUL #3\n"
- "ldr x26, [x10, #0x0]\n"
- "lsr x20, x20, #0x1\n" // n_loops = (n_passes - 1) / 2
- "and x25, x23, #0x1\n" // odd_tail = bool(n_passes & 0x1)
- "ldr x24, [x11, #0x8]\n"
- "lsr x17, x17, #0x2\n"
+ "cntw x15, ALL, MUL #2\n"
+ "cntw x14, ALL, MUL #3\n"
+ "mov x19, %x[width]\n"
+ "incb x19\n"
+ "sub x19, x19, #0x1\n"
+ "udiv x19, x19, x20\n" // n_passes = ceildiv(width, VL<T>)
+ "sub x13, x19, #0x1\n"
+ "lsr x13, x13, #0x1\n" // n_loops = (n_passes - 1) / 2
+ "and x11, x19, #0x1\n" // odd_tail = bool(n_passes & 0x1)
+ "mov x19, %x[width]\n"
+ "sub x10, x20, #0x1\n"
+ "ands x10, x19, x10\n"
+ "csel x10, x10, x20, NE\n"
+ "add x10, x10, #0x3\n"
+ "lsr x10, x10, #0x2\n"
+ "sub x9, x16, #0x2\n"
"ptrue p11.s\n"
- "ldr x23, [x10, #0x8]\n"
+ "lsl x20, %x[height], #0x1\n" // height * 2
+ "lsl x19, x16, #0x1\n"
+ "whilelt p9.b, XZR, x20\n"
+ "whilelt p8.b, x19, x20\n"
"zip1 p10.b, p9.b, p8.b\n"
- "mov x22, %x[row_offset]\n"
- "mov x21, %x[out]\n"
- "whilelt p9.b, x14, %x[width]\n"
- "whilelt p8.b, x14, %x[width]\n"
- "add x11, x11, #0x10\n"
- "add x10, x10, #0x10\n"
+ "mov x28, %x[row_offset]\n"
+ "mov x27, %x[out]\n"
+ "mov x26, #0x0\n"
+ "whilelt p9.b, x26, %x[width]\n"
+ "whilelt p8.b, x26, %x[width]\n"
+ "mov x25, %x[in]\n"
+ "add x24, %x[in], x16, LSL #3\n"
+ "ldr x23, [x25, #0x0]\n"
+ "ldr x22, [x24, #0x0]\n"
+ "ldr x21, [x25, #0x8]\n"
+ "ldr x20, [x24, #0x8]\n"
+ "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
"mov x12, #0x0\n"
- "cbz x15, 2f\n"
+ "cbz x9, 2f\n"
"1:" // K loop: Charge: Loop
- ".inst 0x25246140 // psel p0.b, p8.b/Z, p10.b[w12]\n"
- ".inst 0xe0160120 // ld1b { za0h.b[x12] }, p0/Z, [x9, x22]\n"
- ".inst 0x252c6140 // psel p0.b, p8.b/Z, p10.b[w12, #1]\n"
- "ldr x9, [x11, #0x0]\n"
- ".inst 0xe0160341 // ld1b { za0h.b[x12, #1] }, p0/Z, [x26, x22]\n"
- ".inst 0x25646141 // psel p1.b, p8.b/Z, p10.b[w12, #4]\n"
- ".inst 0x256c6140 // psel p0.b, p8.b/Z, p10.b[w12, #5]\n"
- "ldr x26, [x10, #0x0]\n"
- ".inst 0xe0160704 // ld1b { za0h.b[x12, #4] }, p1/Z, [x24, x22]\n"
- "ldr x24, [x11, #0x8]\n"
- "add x11, x11, #0x10\n"
- ".inst 0xe01602e5 // ld1b { za0h.b[x12, #5] }, p0/Z, [x23, x22]\n"
+ ".inst 0x25246140 // dup p0.b, p8.b/Z, p10.b[w12]\n"
+ ".inst 0xe01c02e0 // ld1b { za0h.b[x12] }, p0/Z, [x23, x28]\n"
+ ".inst 0x252c6140 // dup p0.b, p8.b/Z, p10.b[w12, #1]\n"
+ ".inst 0x25646141 // dup p1.b, p8.b/Z, p10.b[w12, #4]\n"
+ ".inst 0xe01c02c1 // ld1b { za0h.b[x12, #1] }, p0/Z, [x22, x28]\n"
+ ".inst 0x256c6140 // dup p0.b, p8.b/Z, p10.b[w12, #5]\n"
+ "ldr x23, [x25, #0x0]\n"
+ ".inst 0xe01c06a4 // ld1b { za0h.b[x12, #4] }, p1/Z, [x21, x28]\n"
+ "ldr x22, [x24, #0x0]\n"
+ "ldr x21, [x25, #0x8]\n"
+ ".inst 0xe01c0285 // ld1b { za0h.b[x12, #5] }, p0/Z, [x20, x28]\n"
+ "ldr x20, [x24, #0x8]\n"
+ "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
"add x12, x12, #0x8\n"
- "cmp x12, x15, LSL #2\n"
- "ldr x23, [x10, #0x8]\n"
- "add x10, x10, #0x10\n"
+ "cmp x12, x9, LSL #2\n"
"blt 1b\n"
"2:" // K loop: Charge: End
- ".inst 0x25246140 // psel p0.b, p8.b/Z, p10.b[w12]\n"
- ".inst 0xe0160120 // ld1b { za0h.b[x12] }, p0/Z, [x9, x22]\n"
- ".inst 0x252c6140 // psel p0.b, p8.b/Z, p10.b[w12, #1]\n"
- ".inst 0xe0160341 // ld1b { za0h.b[x12, #1] }, p0/Z, [x26, x22]\n"
- ".inst 0x25646141 // psel p1.b, p8.b/Z, p10.b[w12, #4]\n"
- ".inst 0x256c6140 // psel p0.b, p8.b/Z, p10.b[w12, #5]\n"
- ".inst 0xe0160704 // ld1b { za0h.b[x12, #4] }, p1/Z, [x24, x22]\n"
- "mov x11, %x[in]\n"
- "add x10, %x[in], x16, LSL #3\n"
- "ldr x9, [x11, #0x0]\n"
- ".inst 0xe01602e5 // ld1b { za0h.b[x12, #5] }, p0/Z, [x23, x22]\n"
- "ldr x26, [x10, #0x0]\n"
- "incb x22\n"
- "incb x14\n"
- "ldr x24, [x11, #0x8]\n"
- "add x11, x11, #0x10\n"
- "ldr x23, [x10, #0x8]\n"
- "add x10, x10, #0x10\n"
- "cbz x20, 8f\n"
- "mov x20, x20\n"
+ ".inst 0x25246140 // dup p0.b, p8.b/Z, p10.b[w12]\n"
+ ".inst 0xe01c02e0 // ld1b { za0h.b[x12] }, p0/Z, [x23, x28]\n"
+ ".inst 0x252c6140 // dup p0.b, p8.b/Z, p10.b[w12, #1]\n"
+ ".inst 0x25646141 // dup p1.b, p8.b/Z, p10.b[w12, #4]\n"
+ ".inst 0xe01c02c1 // ld1b { za0h.b[x12, #1] }, p0/Z, [x22, x28]\n"
+ ".inst 0x256c6140 // dup p0.b, p8.b/Z, p10.b[w12, #5]\n"
+ "mov x25, %x[in]\n"
+ ".inst 0xe01c06a4 // ld1b { za0h.b[x12, #4] }, p1/Z, [x21, x28]\n"
+ "add x24, %x[in], x16, LSL #3\n"
+ "ldr x23, [x25, #0x0]\n"
+ ".inst 0xe01c0285 // ld1b { za0h.b[x12, #5] }, p0/Z, [x20, x28]\n"
+ "ldr x22, [x24, #0x0]\n"
+ "ldr x21, [x25, #0x8]\n"
+ "ldr x20, [x24, #0x8]\n"
+ "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
+ "incb x28\n"
+ "incb x26\n"
+ "cbz x13, 8f\n"
+ "mov x19, x13\n"
"3:" // K loop: Main loop
- "whilelt p8.b, x14, %x[width]\n"
+ "whilelt p8.b, x26, %x[width]\n"
"mov x13, #0x0\n"
"mov x12, #0x0\n"
- "cbz x15, 5f\n"
+ "cbz x9, 5f\n"
"4:" // K loop: Main loop: First: Loop
- ".inst 0x25356140 // psel p0.b, p8.b/Z, p10.b[w13, #2]\n"
- ".inst 0xe0162122 // ld1b { za0h.b[x13, #2] }, p0/Z, [x9, x22]\n"
- ".inst 0x253d6140 // psel p0.b, p8.b/Z, p10.b[w13, #3]\n"
- "ldr x9, [x11, #0x0]\n"
- ".inst 0xe0162343 // ld1b { za0h.b[x13, #3] }, p0/Z, [x26, x22]\n"
- ".inst 0x25756140 // psel p0.b, p8.b/Z, p10.b[w13, #6]\n"
- ".inst 0x257d6142 // psel p2.b, p8.b/Z, p10.b[w13, #7]\n"
- "ldr x26, [x10, #0x0]\n"
- ".inst 0xe0162306 // ld1b { za0h.b[x13, #6] }, p0/Z, [x24, x22]\n"
- ".inst 0x25306d21 // psel p1.s, p11.s/Z, p9.s[w12]\n"
- "ldr x24, [x11, #0x8]\n"
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe0162ae7 // ld1b { za0h.b[x13, #7] }, p2/Z, [x23, x22]\n"
- "ldr x23, [x10, #0x8]\n"
- ".inst 0xe0bf86a0 // st1w { za0v.s[x12] }, p1/Z, [x21, XZR, LSL #2]\n"
- ".inst 0x25706d21 // psel p1.s, p11.s/Z, p9.s[w12, #1]\n"
- ".inst 0xe0b082a4 // st1w { za1v.s[x12] }, p0/Z, [x21, x16, LSL #2]\n"
- ".inst 0x25706d20 // psel p0.s, p11.s/Z, p9.s[w12, #1]\n"
- "add x11, x11, #0x10\n"
- ".inst 0xe0bc86a1 // st1w { za0v.s[x12, #1] }, p1/Z, [x21, x28, LSL #2]\n"
- "add x10, x10, #0x10\n"
+ ".inst 0x25356140 // dup p0.b, p8.b/Z, p10.b[w13, #2]\n"
+ ".inst 0xe01c22e2 // ld1b { za0h.b[x13, #2] }, p0/Z, [x23, x28]\n"
+ ".inst 0x253d6140 // dup p0.b, p8.b/Z, p10.b[w13, #3]\n"
+ ".inst 0x25756141 // dup p1.b, p8.b/Z, p10.b[w13, #6]\n"
+ ".inst 0xe01c22c3 // ld1b { za0h.b[x13, #3] }, p0/Z, [x22, x28]\n"
+ ".inst 0x257d6140 // dup p0.b, p8.b/Z, p10.b[w13, #7]\n"
+ "ldr x23, [x25, #0x0]\n"
+ ".inst 0xe01c26a6 // ld1b { za0h.b[x13, #6] }, p1/Z, [x21, x28]\n"
+ "ldr x22, [x24, #0x0]\n"
+ "ldr x21, [x25, #0x8]\n"
+ ".inst 0xe01c2287 // ld1b { za0h.b[x13, #7] }, p0/Z, [x20, x28]\n"
+ "ldr x20, [x24, #0x8]\n"
+ "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
+ ".inst 0x25306d20 // dup p0.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0xe0bf8360 // st1w { za0v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+ ".inst 0x25306d20 // dup p0.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0x25706d21 // dup p1.s, p11.s/Z, p9.s[w12, #1]\n"
+ ".inst 0xe0b08364 // st1w { za1v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
+ ".inst 0x25706d20 // dup p0.s, p11.s/Z, p9.s[w12, #1]\n"
"add x13, x13, #0x8\n"
- ".inst 0xe0bb82a5 // st1w { za1v.s[x12, #1] }, p0/Z, [x21, x27, LSL #2]\n"
+ ".inst 0xe0af8761 // st1w { za0v.s[x12, #1] }, p1/Z, [x27, x15, LSL #2]\n"
+ ".inst 0xe0ae8365 // st1w { za1v.s[x12, #1] }, p0/Z, [x27, x14, LSL #2]\n"
+ "addvl x27, x27, #4\n"
"add x12, x12, #0x2\n"
- "cmp x12, x15\n"
- "addvl x21, x21, #4\n"
+ "cmp x12, x9\n"
"blt 4b\n"
"5:" // K loop: Main loop: First: Tail
- ".inst 0x25356140 // psel p0.b, p8.b/Z, p10.b[w13, #2]\n"
- ".inst 0xe0162122 // ld1b { za0h.b[x13, #2] }, p0/Z, [x9, x22]\n"
- ".inst 0x253d6140 // psel p0.b, p8.b/Z, p10.b[w13, #3]\n"
- ".inst 0xe0162343 // ld1b { za0h.b[x13, #3] }, p0/Z, [x26, x22]\n"
- ".inst 0x25756140 // psel p0.b, p8.b/Z, p10.b[w13, #6]\n"
- "mov x11, %x[in]\n"
- "ldr x9, [x11, #0x0]\n"
- ".inst 0xe0162306 // ld1b { za0h.b[x13, #6] }, p0/Z, [x24, x22]\n"
- "add x10, %x[in], x16, LSL #3\n"
- ".inst 0x257d6141 // psel p1.b, p8.b/Z, p10.b[w13, #7]\n"
- "ldr x26, [x10, #0x0]\n"
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe01626e7 // ld1b { za0h.b[x13, #7] }, p1/Z, [x23, x22]\n"
- "ldr x24, [x11, #0x8]\n"
- ".inst 0x25306d22 // psel p2.s, p11.s/Z, p9.s[w12]\n"
- "ldr x23, [x10, #0x8]\n"
- ".inst 0xe0bf82a0 // st1w { za0v.s[x12] }, p0/Z, [x21, XZR, LSL #2]\n"
- ".inst 0x25706d21 // psel p1.s, p11.s/Z, p9.s[w12, #1]\n"
- ".inst 0x25706d20 // psel p0.s, p11.s/Z, p9.s[w12, #1]\n"
- ".inst 0xe0b08aa4 // st1w { za1v.s[x12] }, p2/Z, [x21, x16, LSL #2]\n"
- "whilelt p9.b, x14, %x[width]\n"
- "incb x14\n"
- ".inst 0xe0bc86a1 // st1w { za0v.s[x12, #1] }, p1/Z, [x21, x28, LSL #2]\n"
- "add x11, x11, #0x10\n"
- "add x10, x10, #0x10\n"
- ".inst 0xe0bb82a5 // st1w { za1v.s[x12, #1] }, p0/Z, [x21, x27, LSL #2]\n"
- "addvl x21, x21, #4\n"
- "incb x22\n"
- "whilelt p8.b, x14, %x[width]\n"
+ "mov x25, %x[in]\n"
+ "add x24, %x[in], x16, LSL #3\n"
+ ".inst 0x25356140 // dup p0.b, p8.b/Z, p10.b[w13, #2]\n"
+ ".inst 0xe01c22e2 // ld1b { za0h.b[x13, #2] }, p0/Z, [x23, x28]\n"
+ ".inst 0x253d6140 // dup p0.b, p8.b/Z, p10.b[w13, #3]\n"
+ ".inst 0x25756141 // dup p1.b, p8.b/Z, p10.b[w13, #6]\n"
+ ".inst 0xe01c22c3 // ld1b { za0h.b[x13, #3] }, p0/Z, [x22, x28]\n"
+ ".inst 0x257d6140 // dup p0.b, p8.b/Z, p10.b[w13, #7]\n"
+ "ldr x23, [x25, #0x0]\n"
+ ".inst 0xe01c26a6 // ld1b { za0h.b[x13, #6] }, p1/Z, [x21, x28]\n"
+ "ldr x22, [x24, #0x0]\n"
+ "ldr x21, [x25, #0x8]\n"
+ ".inst 0xe01c2287 // ld1b { za0h.b[x13, #7] }, p0/Z, [x20, x28]\n"
+ "ldr x20, [x24, #0x8]\n"
+ "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
+ ".inst 0x25306d20 // dup p0.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0xe0bf8360 // st1w { za0v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+ ".inst 0x25306d20 // dup p0.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0x25706d21 // dup p1.s, p11.s/Z, p9.s[w12, #1]\n"
+ ".inst 0xe0b08364 // st1w { za1v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
+ ".inst 0x25706d20 // dup p0.s, p11.s/Z, p9.s[w12, #1]\n"
+ "whilelt p9.b, x26, %x[width]\n"
+ ".inst 0xe0af8761 // st1w { za0v.s[x12, #1] }, p1/Z, [x27, x15, LSL #2]\n"
+ "incb x26\n"
+ "incb x28\n"
+ ".inst 0xe0ae8365 // st1w { za1v.s[x12, #1] }, p0/Z, [x27, x14, LSL #2]\n"
+ "addvl x27, x27, #4\n"
+ "whilelt p8.b, x26, %x[width]\n"
"mov x13, #0x0\n"
"mov x12, #0x0\n"
- "cbz x15, 7f\n"
+ "cbz x9, 7f\n"
"6:" // K loop: Main loop: Second: Loop
- ".inst 0x25256140 // psel p0.b, p8.b/Z, p10.b[w13]\n"
- ".inst 0xe0162120 // ld1b { za0h.b[x13] }, p0/Z, [x9, x22]\n"
- ".inst 0x252d6140 // psel p0.b, p8.b/Z, p10.b[w13, #1]\n"
- "ldr x9, [x11, #0x0]\n"
- ".inst 0xe0162341 // ld1b { za0h.b[x13, #1] }, p0/Z, [x26, x22]\n"
- ".inst 0x25656140 // psel p0.b, p8.b/Z, p10.b[w13, #4]\n"
- ".inst 0x256d6142 // psel p2.b, p8.b/Z, p10.b[w13, #5]\n"
- "ldr x26, [x10, #0x0]\n"
- ".inst 0xe0162304 // ld1b { za0h.b[x13, #4] }, p0/Z, [x24, x22]\n"
- ".inst 0x25306d21 // psel p1.s, p11.s/Z, p9.s[w12]\n"
- "ldr x24, [x11, #0x8]\n"
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe0162ae5 // ld1b { za0h.b[x13, #5] }, p2/Z, [x23, x22]\n"
- "ldr x23, [x10, #0x8]\n"
- ".inst 0xe0bf86a8 // st1w { za2v.s[x12] }, p1/Z, [x21, XZR, LSL #2]\n"
- ".inst 0x25706d21 // psel p1.s, p11.s/Z, p9.s[w12, #1]\n"
- ".inst 0xe0b082ac // st1w { za3v.s[x12] }, p0/Z, [x21, x16, LSL #2]\n"
- ".inst 0x25706d20 // psel p0.s, p11.s/Z, p9.s[w12, #1]\n"
- "add x11, x11, #0x10\n"
- ".inst 0xe0bc86a9 // st1w { za2v.s[x12, #1] }, p1/Z, [x21, x28, LSL #2]\n"
- "add x10, x10, #0x10\n"
+ ".inst 0x25256140 // dup p0.b, p8.b/Z, p10.b[w13]\n"
+ ".inst 0xe01c22e0 // ld1b { za0h.b[x13] }, p0/Z, [x23, x28]\n"
+ ".inst 0x252d6140 // dup p0.b, p8.b/Z, p10.b[w13, #1]\n"
+ ".inst 0x25656141 // dup p1.b, p8.b/Z, p10.b[w13, #4]\n"
+ ".inst 0xe01c22c1 // ld1b { za0h.b[x13, #1] }, p0/Z, [x22, x28]\n"
+ ".inst 0x256d6140 // dup p0.b, p8.b/Z, p10.b[w13, #5]\n"
+ "ldr x23, [x25, #0x0]\n"
+ ".inst 0xe01c26a4 // ld1b { za0h.b[x13, #4] }, p1/Z, [x21, x28]\n"
+ "ldr x22, [x24, #0x0]\n"
+ "ldr x21, [x25, #0x8]\n"
+ ".inst 0xe01c2285 // ld1b { za0h.b[x13, #5] }, p0/Z, [x20, x28]\n"
+ "ldr x20, [x24, #0x8]\n"
+ "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
+ ".inst 0x25306d20 // dup p0.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0xe0bf8368 // st1w { za2v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+ ".inst 0x25306d20 // dup p0.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0x25706d21 // dup p1.s, p11.s/Z, p9.s[w12, #1]\n"
+ ".inst 0xe0b0836c // st1w { za3v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
+ ".inst 0x25706d20 // dup p0.s, p11.s/Z, p9.s[w12, #1]\n"
"add x13, x13, #0x8\n"
- ".inst 0xe0bb82ad // st1w { za3v.s[x12, #1] }, p0/Z, [x21, x27, LSL #2]\n"
+ ".inst 0xe0af8769 // st1w { za2v.s[x12, #1] }, p1/Z, [x27, x15, LSL #2]\n"
+ ".inst 0xe0ae836d // st1w { za3v.s[x12, #1] }, p0/Z, [x27, x14, LSL #2]\n"
+ "addvl x27, x27, #4\n"
"add x12, x12, #0x2\n"
- "cmp x12, x15\n"
- "addvl x21, x21, #4\n"
+ "cmp x12, x9\n"
"blt 6b\n"
"7:" // K loop: Main loop: Second: Tail
- ".inst 0x25256140 // psel p0.b, p8.b/Z, p10.b[w13]\n"
- ".inst 0xe0162120 // ld1b { za0h.b[x13] }, p0/Z, [x9, x22]\n"
- ".inst 0x252d6140 // psel p0.b, p8.b/Z, p10.b[w13, #1]\n"
- ".inst 0xe0162341 // ld1b { za0h.b[x13, #1] }, p0/Z, [x26, x22]\n"
- ".inst 0x25656140 // psel p0.b, p8.b/Z, p10.b[w13, #4]\n"
- "mov x11, %x[in]\n"
- "ldr x9, [x11, #0x0]\n"
- ".inst 0xe0162304 // ld1b { za0h.b[x13, #4] }, p0/Z, [x24, x22]\n"
- "add x10, %x[in], x16, LSL #3\n"
- ".inst 0x256d6141 // psel p1.b, p8.b/Z, p10.b[w13, #5]\n"
- "ldr x26, [x10, #0x0]\n"
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe01626e5 // ld1b { za0h.b[x13, #5] }, p1/Z, [x23, x22]\n"
- "ldr x24, [x11, #0x8]\n"
- ".inst 0x25306d22 // psel p2.s, p11.s/Z, p9.s[w12]\n"
- "ldr x23, [x10, #0x8]\n"
- ".inst 0xe0bf82a8 // st1w { za2v.s[x12] }, p0/Z, [x21, XZR, LSL #2]\n"
- ".inst 0x25706d21 // psel p1.s, p11.s/Z, p9.s[w12, #1]\n"
- ".inst 0x25706d20 // psel p0.s, p11.s/Z, p9.s[w12, #1]\n"
- ".inst 0xe0b08aac // st1w { za3v.s[x12] }, p2/Z, [x21, x16, LSL #2]\n"
- "whilelt p9.b, x14, %x[width]\n"
- "subs x20, x20, #0x1\n"
- ".inst 0xe0bc86a9 // st1w { za2v.s[x12, #1] }, p1/Z, [x21, x28, LSL #2]\n"
- "add x11, x11, #0x10\n"
- "add x10, x10, #0x10\n"
- ".inst 0xe0bb82ad // st1w { za3v.s[x12, #1] }, p0/Z, [x21, x27, LSL #2]\n"
- "addvl x21, x21, #4\n"
- "incb x14\n"
- "incb x22\n"
+ "mov x25, %x[in]\n"
+ "add x24, %x[in], x16, LSL #3\n"
+ ".inst 0x25256140 // dup p0.b, p8.b/Z, p10.b[w13]\n"
+ ".inst 0xe01c22e0 // ld1b { za0h.b[x13] }, p0/Z, [x23, x28]\n"
+ ".inst 0x252d6140 // dup p0.b, p8.b/Z, p10.b[w13, #1]\n"
+ ".inst 0x25656141 // dup p1.b, p8.b/Z, p10.b[w13, #4]\n"
+ ".inst 0xe01c22c1 // ld1b { za0h.b[x13, #1] }, p0/Z, [x22, x28]\n"
+ ".inst 0x256d6140 // dup p0.b, p8.b/Z, p10.b[w13, #5]\n"
+ "ldr x23, [x25, #0x0]\n"
+ ".inst 0xe01c26a4 // ld1b { za0h.b[x13, #4] }, p1/Z, [x21, x28]\n"
+ "ldr x22, [x24, #0x0]\n"
+ "ldr x21, [x25, #0x8]\n"
+ ".inst 0xe01c2285 // ld1b { za0h.b[x13, #5] }, p0/Z, [x20, x28]\n"
+ "ldr x20, [x24, #0x8]\n"
+ "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
+ ".inst 0x25306d20 // dup p0.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0xe0bf8368 // st1w { za2v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+ ".inst 0x25306d20 // dup p0.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0x25706d21 // dup p1.s, p11.s/Z, p9.s[w12, #1]\n"
+ ".inst 0xe0b0836c // st1w { za3v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
+ ".inst 0x25706d20 // dup p0.s, p11.s/Z, p9.s[w12, #1]\n"
+ "whilelt p9.b, x26, %x[width]\n"
+ ".inst 0xe0af8769 // st1w { za2v.s[x12, #1] }, p1/Z, [x27, x15, LSL #2]\n"
+ "incb x26\n"
+ "incb x28\n"
+ ".inst 0xe0ae836d // st1w { za3v.s[x12, #1] }, p0/Z, [x27, x14, LSL #2]\n"
+ "addvl x27, x27, #4\n"
+ "subs x19, x19, #0x1\n"
"bgt 3b\n"
"8:" // K loop: Tails
- "cbnz x25, 11f\n"
- "mov x11, %x[in]\n"
- "whilelt p8.b, x14, %x[width]\n"
+ "cbnz x11, 11f\n"
+ "mov x25, %x[in]\n"
+ "whilelt p8.b, x26, %x[width]\n"
"mov x13, #0x0\n"
"mov x12, #0x0\n"
"9:" // K loop: Tails: Even: First
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe0bf82a0 // st1w { za0v.s[x12] }, p0/Z, [x21, XZR, LSL #2]\n"
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe0b082a4 // st1w { za1v.s[x12] }, p0/Z, [x21, x16, LSL #2]\n"
- "ldr x9, [x11, #0x0]\n"
- ".inst 0x25356140 // psel p0.b, p8.b/Z, p10.b[w13, #2]\n"
- ".inst 0xe0162122 // ld1b { za0h.b[x13, #2] }, p0/Z, [x9, x22]\n"
- "ldr x26, [x11, x16, LSL #0x3]\n"
+ ".inst 0x25306d20 // dup p0.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0xe0bf8360 // st1w { za0v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+ ".inst 0x25306d20 // dup p0.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0x25356141 // dup p1.b, p8.b/Z, p10.b[w13, #2]\n"
+ ".inst 0xe0b08364 // st1w { za1v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
+ ".inst 0x253d6140 // dup p0.b, p8.b/Z, p10.b[w13, #3]\n"
+ "addvl x27, x27, #2\n"
+ "ldr x23, [x25, #0x0]\n"
+ ".inst 0xe01c26e2 // ld1b { za0h.b[x13, #2] }, p1/Z, [x23, x28]\n"
+ "ldr x22, [x25, x16, LSL #0x3]\n"
+ ".inst 0xe01c22c3 // ld1b { za0h.b[x13, #3] }, p0/Z, [x22, x28]\n"
+ "add x25, x25, #0x8\n"
+ "add x13, x13, #0x4\n"
"add x12, x12, #0x1\n"
- ".inst 0x253d6140 // psel p0.b, p8.b/Z, p10.b[w13, #3]\n"
- ".inst 0xe0162343 // ld1b { za0h.b[x13, #3] }, p0/Z, [x26, x22]\n"
"cmp x12, x16\n"
- "add x11, x11, #0x8\n"
- "addvl x21, x21, #2\n"
- "add x13, x13, #0x4\n"
"blt 9b\n"
- "whilelt p9.b, x14, %x[width]\n"
- "whilelt p8.b, x14, %x[width]\n"
- "mov x20, #0x0\n"
+ "whilelt p9.b, x26, %x[width]\n"
+ "whilelt p8.b, x26, %x[width]\n"
+ "mov x19, #0x0\n"
"mov x12, #0x0\n"
"10:" // K loop: Tails: Even: Second
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe0bf82a8 // st1w { za2v.s[x12] }, p0/Z, [x21, XZR, LSL #2]\n"
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe0b082ac // st1w { za3v.s[x12] }, p0/Z, [x21, x16, LSL #2]\n"
+ ".inst 0x25306d20 // dup p0.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0xe0bf8368 // st1w { za2v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+ ".inst 0x25306d20 // dup p0.s, p11.s/Z, p9.s[w12]\n"
+ "add x19, x19, #0x4\n"
+ ".inst 0xe0b0836c // st1w { za3v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
+ "addvl x27, x27, #2\n"
"add x12, x12, #0x1\n"
- "cmp x12, x17\n"
- "addvl x21, x21, #2\n"
- "add x20, x20, #0x4\n"
+ "cmp x12, x10\n"
"blt 10b\n"
- "whilelt p9.b, x14, %x[width]\n"
+ "whilelt p9.b, x26, %x[width]\n"
"b 13f\n"
"11:" // K loop: Tails: Odd
"mov x12, #0x0\n"
"12:" // K loop: Tails: Odd: Loop
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe0bf82a0 // st1w { za0v.s[x12] }, p0/Z, [x21, XZR, LSL #2]\n"
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe0b082a4 // st1w { za1v.s[x12] }, p0/Z, [x21, x16, LSL #2]\n"
+ ".inst 0x25306d20 // dup p0.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0xe0bf8360 // st1w { za0v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+ ".inst 0x25306d20 // dup p0.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0xe0b08364 // st1w { za1v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
+ "addvl x27, x27, #2\n"
"add x12, x12, #0x1\n"
- "cmp x12, x17\n"
- "addvl x21, x21, #2\n"
+ "cmp x12, x10\n"
"blt 12b\n"
"13:" // K loop: End
- "mov %x[out], x21\n"
+ "mov %x[out], x27\n"
".inst 0xd503467f // SMSTOP\n"
: [out] "+&r" (out)
: [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p8", "p9", "p10", "p11", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_block4_s8_s8_summing.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_block4_s8_s8_summing.hpp
index 25262d3db9..81cde6c8ee 100644
--- a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_block4_s8_s8_summing.hpp
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_block4_s8_s8_summing.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#if defined(__ARM_FEATURE_SVE)
@@ -32,321 +32,321 @@ void interleave_block<2, 4, VLType::SME, true>(
{
__asm__ __volatile__(
".inst 0xd503477f // SMSTART ZA\n"
- "cntb x21\n"
- "mov x23, %x[width]\n"
"mov z20.b, #0x1\n"
- "incb x23\n"
- "mov x20, %x[width]\n"
"mov z19.s, #0x0\n"
+ "cntb x20\n"
"mov z18.s, #0x0\n"
- "sub x17, x21, #0x1\n"
"cntw x16\n"
- "sub x23, x23, #0x1\n"
- "ands x17, x20, x17\n"
- "udiv x23, x23, x21\n" // n_passes = ceildiv(width, VL<T>)
- "csel x17, x17, x21, NE\n"
- "lsl x22, %x[height], #0x1\n" // height * 2
- "lsl x21, x16, #0x1\n"
- "sub x20, x23, #0x1\n"
- "add x17, x17, #0x3\n"
- "whilelt p9.b, XZR, x22\n"
- "whilelt p8.b, x21, x22\n"
- "mov x15, #0x0\n"
- "cntw x14, ALL, MUL #2\n"
- "cntw x11, ALL, MUL #3\n"
- "ptrue p4.b\n"
- "lsr x20, x20, #0x1\n" // n_loops = (n_passes - 1) / 2
- "and x10, x23, #0x1\n" // odd_tail = bool(n_passes & 0x1)
- "lsr x17, x17, #0x2\n"
+ "cntw x15, ALL, MUL #2\n"
+ "cntw x14, ALL, MUL #3\n"
+ "ptrue p2.b\n"
+ "mov x19, %x[width]\n"
+ "incb x19\n"
+ "sub x19, x19, #0x1\n"
+ "udiv x19, x19, x20\n" // n_passes = ceildiv(width, VL<T>)
+ "sub x13, x19, #0x1\n"
+ "lsr x13, x13, #0x1\n" // n_loops = (n_passes - 1) / 2
+ "and x11, x19, #0x1\n" // odd_tail = bool(n_passes & 0x1)
+ "mov x19, %x[width]\n"
+ "sub x10, x20, #0x1\n"
+ "ands x10, x19, x10\n"
+ "csel x10, x10, x20, NE\n"
+ "add x10, x10, #0x3\n"
+ "lsr x10, x10, #0x2\n"
"sub x9, x16, #0x2\n"
"ptrue p11.s\n"
+ "lsl x20, %x[height], #0x1\n" // height * 2
+ "lsl x19, x16, #0x1\n"
+ "whilelt p9.b, XZR, x20\n"
+ "whilelt p8.b, x19, x20\n"
"zip1 p10.b, p9.b, p8.b\n"
"mov x28, %x[row_offset]\n"
"mov x27, %x[out]\n"
- "whilelt p9.b, x15, %x[width]\n"
- "whilelt p8.b, x15, %x[width]\n"
+ "mov x26, #0x0\n"
+ "whilelt p9.b, x26, %x[width]\n"
+ "whilelt p8.b, x26, %x[width]\n"
"cbnz %x[first], 1f\n"
"addvl x27, x27, #-2\n"
- "ld1w { z19.s }, p4/Z, [x27]\n"
- "ld1w { z18.s }, p4/Z, [x27, #1, MUL VL]\n"
+ "ld1w { z19.s }, p2/Z, [x27]\n"
+ "ld1w { z18.s }, p2/Z, [x27, #1, MUL VL]\n"
"1:" // K loop: Load row sums: End
- "mov x26, %x[in]\n"
- "add x25, %x[in], x16, LSL #3\n"
- "ldr x24, [x26, #0x0]\n"
+ "mov x25, %x[in]\n"
+ "add x24, %x[in], x16, LSL #3\n"
"ldr x23, [x25, #0x0]\n"
- "mov x12, #0x0\n"
- "ldr x22, [x26, #0x8]\n"
- "add x26, x26, #0x10\n"
+ "ldr x22, [x24, #0x0]\n"
"ldr x21, [x25, #0x8]\n"
+ "ldr x20, [x24, #0x8]\n"
"add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
+ "mov x12, #0x0\n"
"cbz x9, 3f\n"
"2:" // K loop: Charge: Loop
- ".inst 0x25246140 // psel p0.b, p8.b/Z, p10.b[w12]\n"
- ".inst 0xe01c0300 // ld1b { za0h.b[x12] }, p0/Z, [x24, x28]\n"
- ".inst 0x252c6140 // psel p0.b, p8.b/Z, p10.b[w12, #1]\n"
- "ldr x24, [x26, #0x0]\n"
- ".inst 0xe01c02e1 // ld1b { za0h.b[x12, #1] }, p0/Z, [x23, x28]\n"
- ".inst 0x25646141 // psel p1.b, p8.b/Z, p10.b[w12, #4]\n"
- ".inst 0x256c6140 // psel p0.b, p8.b/Z, p10.b[w12, #5]\n"
+ ".inst 0x25246140 // dup p0.b, p8.b/Z, p10.b[w12]\n"
+ ".inst 0xe01c02e0 // ld1b { za0h.b[x12] }, p0/Z, [x23, x28]\n"
+ ".inst 0x252c6140 // dup p0.b, p8.b/Z, p10.b[w12, #1]\n"
+ ".inst 0x25646141 // dup p1.b, p8.b/Z, p10.b[w12, #4]\n"
+ ".inst 0xe01c02c1 // ld1b { za0h.b[x12, #1] }, p0/Z, [x22, x28]\n"
+ ".inst 0x256c6140 // dup p0.b, p8.b/Z, p10.b[w12, #5]\n"
"ldr x23, [x25, #0x0]\n"
- ".inst 0xe01c06c4 // ld1b { za0h.b[x12, #4] }, p1/Z, [x22, x28]\n"
- "ldr x22, [x26, #0x8]\n"
- "add x26, x26, #0x10\n"
- ".inst 0xe01c02a5 // ld1b { za0h.b[x12, #5] }, p0/Z, [x21, x28]\n"
- "add x12, x12, #0x8\n"
- "cmp x12, x9, LSL #2\n"
+ ".inst 0xe01c06a4 // ld1b { za0h.b[x12, #4] }, p1/Z, [x21, x28]\n"
+ "ldr x22, [x24, #0x0]\n"
"ldr x21, [x25, #0x8]\n"
+ ".inst 0xe01c0285 // ld1b { za0h.b[x12, #5] }, p0/Z, [x20, x28]\n"
+ "ldr x20, [x24, #0x8]\n"
"add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
+ "add x12, x12, #0x8\n"
+ "cmp x12, x9, LSL #2\n"
"blt 2b\n"
"3:" // K loop: Charge: End
- ".inst 0x25246140 // psel p0.b, p8.b/Z, p10.b[w12]\n"
- ".inst 0xe01c0300 // ld1b { za0h.b[x12] }, p0/Z, [x24, x28]\n"
- ".inst 0x252c6140 // psel p0.b, p8.b/Z, p10.b[w12, #1]\n"
- ".inst 0xe01c02e1 // ld1b { za0h.b[x12, #1] }, p0/Z, [x23, x28]\n"
- ".inst 0x25646141 // psel p1.b, p8.b/Z, p10.b[w12, #4]\n"
- ".inst 0x256c6140 // psel p0.b, p8.b/Z, p10.b[w12, #5]\n"
- ".inst 0xe01c06c4 // ld1b { za0h.b[x12, #4] }, p1/Z, [x22, x28]\n"
- "mov x26, %x[in]\n"
- "add x25, %x[in], x16, LSL #3\n"
- "ldr x24, [x26, #0x0]\n"
- ".inst 0xe01c02a5 // ld1b { za0h.b[x12, #5] }, p0/Z, [x21, x28]\n"
+ ".inst 0x25246140 // dup p0.b, p8.b/Z, p10.b[w12]\n"
+ ".inst 0xe01c02e0 // ld1b { za0h.b[x12] }, p0/Z, [x23, x28]\n"
+ ".inst 0x252c6140 // dup p0.b, p8.b/Z, p10.b[w12, #1]\n"
+ ".inst 0x25646141 // dup p1.b, p8.b/Z, p10.b[w12, #4]\n"
+ ".inst 0xe01c02c1 // ld1b { za0h.b[x12, #1] }, p0/Z, [x22, x28]\n"
+ ".inst 0x256c6140 // dup p0.b, p8.b/Z, p10.b[w12, #5]\n"
+ "mov x25, %x[in]\n"
+ ".inst 0xe01c06a4 // ld1b { za0h.b[x12, #4] }, p1/Z, [x21, x28]\n"
+ "add x24, %x[in], x16, LSL #3\n"
"ldr x23, [x25, #0x0]\n"
- "incb x28\n"
- "incb x15\n"
- "ldr x22, [x26, #0x8]\n"
- "add x26, x26, #0x10\n"
+ ".inst 0xe01c0285 // ld1b { za0h.b[x12, #5] }, p0/Z, [x20, x28]\n"
+ "ldr x22, [x24, #0x0]\n"
"ldr x21, [x25, #0x8]\n"
+ "ldr x20, [x24, #0x8]\n"
"add x25, x25, #0x10\n"
- "cbz x20, 9f\n"
- "mov x20, x20\n"
+ "add x24, x24, #0x10\n"
+ "incb x28\n"
+ "incb x26\n"
+ "cbz x13, 9f\n"
+ "mov x19, x13\n"
"4:" // K loop: Main loop
- "whilelt p8.b, x15, %x[width]\n"
+ "whilelt p8.b, x26, %x[width]\n"
"mov x13, #0x0\n"
"mov x12, #0x0\n"
"cbz x9, 6f\n"
"5:" // K loop: Main loop: First: Loop
- ".inst 0x25356140 // psel p0.b, p8.b/Z, p10.b[w13, #2]\n"
- ".inst 0xe01c2302 // ld1b { za0h.b[x13, #2] }, p0/Z, [x24, x28]\n"
- ".inst 0x253d6140 // psel p0.b, p8.b/Z, p10.b[w13, #3]\n"
- "ldr x24, [x26, #0x0]\n"
- ".inst 0xe01c22e3 // ld1b { za0h.b[x13, #3] }, p0/Z, [x23, x28]\n"
- ".inst 0x25756140 // psel p0.b, p8.b/Z, p10.b[w13, #6]\n"
- ".inst 0x257d6142 // psel p2.b, p8.b/Z, p10.b[w13, #7]\n"
+ ".inst 0x25356140 // dup p0.b, p8.b/Z, p10.b[w13, #2]\n"
+ ".inst 0xe01c22e2 // ld1b { za0h.b[x13, #2] }, p0/Z, [x23, x28]\n"
+ ".inst 0x253d6140 // dup p0.b, p8.b/Z, p10.b[w13, #3]\n"
+ ".inst 0x25756141 // dup p1.b, p8.b/Z, p10.b[w13, #6]\n"
+ ".inst 0xe01c22c3 // ld1b { za0h.b[x13, #3] }, p0/Z, [x22, x28]\n"
+ ".inst 0x257d6140 // dup p0.b, p8.b/Z, p10.b[w13, #7]\n"
"ldr x23, [x25, #0x0]\n"
- ".inst 0xe01c22c6 // ld1b { za0h.b[x13, #6] }, p0/Z, [x22, x28]\n"
- ".inst 0x25306d21 // psel p1.s, p11.s/Z, p9.s[w12]\n"
- "ldr x22, [x26, #0x8]\n"
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe01c2aa7 // ld1b { za0h.b[x13, #7] }, p2/Z, [x21, x28]\n"
+ ".inst 0xe01c26a6 // ld1b { za0h.b[x13, #6] }, p1/Z, [x21, x28]\n"
+ "ldr x22, [x24, #0x0]\n"
"ldr x21, [x25, #0x8]\n"
- ".inst 0xe0bf8760 // st1w { za0v.s[x12] }, p1/Z, [x27, XZR, LSL #2]\n"
- ".inst 0xc0829010 // mova z16.s, p4/M, za0v.s[x12]\n"
- ".inst 0xc0829091 // mova z17.s, p4/M, za1v.s[x12]\n"
- "sdot z19.s, z16.b, z20.b\n"
+ ".inst 0xe01c2287 // ld1b { za0h.b[x13, #7] }, p0/Z, [x20, x28]\n"
+ "ldr x20, [x24, #0x8]\n"
+ "add x25, x25, #0x10\n"
+ ".inst 0xc0828811 // mova z17.s, p2/M, za0v.s[x12]\n"
+ ".inst 0xc0828890 // mova z16.s, p2/M, za1v.s[x12]\n"
+ "add x24, x24, #0x10\n"
+ ".inst 0x25306d20 // dup p0.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0xe0bf8360 // st1w { za0v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+ ".inst 0x25306d20 // dup p0.s, p11.s/Z, p9.s[w12]\n"
+ "sdot z19.s, z17.b, z20.b\n"
+ ".inst 0xc0828831 // mova z17.s, p2/M, za0v.s[x12, #1]\n"
+ "sdot z18.s, z16.b, z20.b\n"
+ ".inst 0xc08288b0 // mova z16.s, p2/M, za1v.s[x12, #1]\n"
".inst 0xe0b08364 // st1w { za1v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
- ".inst 0x25706d20 // psel p0.s, p11.s/Z, p9.s[w12, #1]\n"
- "sdot z18.s, z17.b, z20.b\n"
- ".inst 0xe0ae8361 // st1w { za0v.s[x12, #1] }, p0/Z, [x27, x14, LSL #2]\n"
- ".inst 0x25706d20 // psel p0.s, p11.s/Z, p9.s[w12, #1]\n"
- ".inst 0xc0829030 // mova z16.s, p4/M, za0v.s[x12, #1]\n"
- ".inst 0xe0ab8365 // st1w { za1v.s[x12, #1] }, p0/Z, [x27, x11, LSL #2]\n"
- ".inst 0xc08290b1 // mova z17.s, p4/M, za1v.s[x12, #1]\n"
+ ".inst 0x25706d21 // dup p1.s, p11.s/Z, p9.s[w12, #1]\n"
+ ".inst 0x25706d20 // dup p0.s, p11.s/Z, p9.s[w12, #1]\n"
+ "add x13, x13, #0x8\n"
+ ".inst 0xe0af8761 // st1w { za0v.s[x12, #1] }, p1/Z, [x27, x15, LSL #2]\n"
+ "sdot z19.s, z17.b, z20.b\n"
+ "sdot z18.s, z16.b, z20.b\n"
+ ".inst 0xe0ae8365 // st1w { za1v.s[x12, #1] }, p0/Z, [x27, x14, LSL #2]\n"
+ "addvl x27, x27, #4\n"
"add x12, x12, #0x2\n"
"cmp x12, x9\n"
- "add x26, x26, #0x10\n"
- "add x25, x25, #0x10\n"
- "sdot z19.s, z16.b, z20.b\n"
- "sdot z18.s, z17.b, z20.b\n"
- "addvl x27, x27, #4\n"
- "add x13, x13, #0x8\n"
"blt 5b\n"
"6:" // K loop: Main loop: First: Tail
- ".inst 0x25356140 // psel p0.b, p8.b/Z, p10.b[w13, #2]\n"
- ".inst 0xe01c2302 // ld1b { za0h.b[x13, #2] }, p0/Z, [x24, x28]\n"
- ".inst 0x253d6140 // psel p0.b, p8.b/Z, p10.b[w13, #3]\n"
- ".inst 0xe01c22e3 // ld1b { za0h.b[x13, #3] }, p0/Z, [x23, x28]\n"
- ".inst 0x25756141 // psel p1.b, p8.b/Z, p10.b[w13, #6]\n"
- ".inst 0x257d6140 // psel p0.b, p8.b/Z, p10.b[w13, #7]\n"
- ".inst 0xe01c26c6 // ld1b { za0h.b[x13, #6] }, p1/Z, [x22, x28]\n"
- "mov x26, %x[in]\n"
- "add x25, %x[in], x16, LSL #3\n"
- "ldr x24, [x26, #0x0]\n"
- ".inst 0xe01c22a7 // ld1b { za0h.b[x13, #7] }, p0/Z, [x21, x28]\n"
- ".inst 0xc0829010 // mova z16.s, p4/M, za0v.s[x12]\n"
- ".inst 0x25306d23 // psel p3.s, p11.s/Z, p9.s[w12]\n"
- "sdot z19.s, z16.b, z20.b\n"
- ".inst 0xc0829091 // mova z17.s, p4/M, za1v.s[x12]\n"
- "sdot z18.s, z17.b, z20.b\n"
+ "mov x25, %x[in]\n"
+ "add x24, %x[in], x16, LSL #3\n"
+ ".inst 0x25356140 // dup p0.b, p8.b/Z, p10.b[w13, #2]\n"
+ ".inst 0xe01c22e2 // ld1b { za0h.b[x13, #2] }, p0/Z, [x23, x28]\n"
+ ".inst 0x253d6140 // dup p0.b, p8.b/Z, p10.b[w13, #3]\n"
+ ".inst 0x25756141 // dup p1.b, p8.b/Z, p10.b[w13, #6]\n"
+ ".inst 0xe01c22c3 // ld1b { za0h.b[x13, #3] }, p0/Z, [x22, x28]\n"
+ ".inst 0x257d6140 // dup p0.b, p8.b/Z, p10.b[w13, #7]\n"
"ldr x23, [x25, #0x0]\n"
- ".inst 0x25306d22 // psel p2.s, p11.s/Z, p9.s[w12]\n"
- "ldr x22, [x26, #0x8]\n"
- ".inst 0x25706d21 // psel p1.s, p11.s/Z, p9.s[w12, #1]\n"
- ".inst 0xc0829030 // mova z16.s, p4/M, za0v.s[x12, #1]\n"
- ".inst 0x25706d20 // psel p0.s, p11.s/Z, p9.s[w12, #1]\n"
+ ".inst 0xe01c26a6 // ld1b { za0h.b[x13, #6] }, p1/Z, [x21, x28]\n"
+ "ldr x22, [x24, #0x0]\n"
"ldr x21, [x25, #0x8]\n"
- ".inst 0xe0bf8f60 // st1w { za0v.s[x12] }, p3/Z, [x27, XZR, LSL #2]\n"
- ".inst 0xc08290b1 // mova z17.s, p4/M, za1v.s[x12, #1]\n"
- "whilelt p9.b, x15, %x[width]\n"
- ".inst 0xe0b08b64 // st1w { za1v.s[x12] }, p2/Z, [x27, x16, LSL #2]\n"
- "incb x15\n"
- "add x26, x26, #0x10\n"
- "sdot z19.s, z16.b, z20.b\n"
- ".inst 0xe0ae8761 // st1w { za0v.s[x12, #1] }, p1/Z, [x27, x14, LSL #2]\n"
+ ".inst 0xe01c2287 // ld1b { za0h.b[x13, #7] }, p0/Z, [x20, x28]\n"
+ "ldr x20, [x24, #0x8]\n"
"add x25, x25, #0x10\n"
- "sdot z18.s, z17.b, z20.b\n"
+ ".inst 0xc0828811 // mova z17.s, p2/M, za0v.s[x12]\n"
+ ".inst 0xc0828890 // mova z16.s, p2/M, za1v.s[x12]\n"
+ "add x24, x24, #0x10\n"
+ ".inst 0x25306d20 // dup p0.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0xe0bf8360 // st1w { za0v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+ ".inst 0x25306d20 // dup p0.s, p11.s/Z, p9.s[w12]\n"
+ "sdot z19.s, z17.b, z20.b\n"
+ ".inst 0xc0828831 // mova z17.s, p2/M, za0v.s[x12, #1]\n"
+ "sdot z18.s, z16.b, z20.b\n"
+ ".inst 0xc08288b0 // mova z16.s, p2/M, za1v.s[x12, #1]\n"
+ ".inst 0xe0b08364 // st1w { za1v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
+ ".inst 0x25706d21 // dup p1.s, p11.s/Z, p9.s[w12, #1]\n"
+ ".inst 0x25706d20 // dup p0.s, p11.s/Z, p9.s[w12, #1]\n"
+ "whilelt p9.b, x26, %x[width]\n"
+ ".inst 0xe0af8761 // st1w { za0v.s[x12, #1] }, p1/Z, [x27, x15, LSL #2]\n"
+ "sdot z19.s, z17.b, z20.b\n"
+ "incb x26\n"
+ "sdot z18.s, z16.b, z20.b\n"
"incb x28\n"
- ".inst 0xe0ab8365 // st1w { za1v.s[x12, #1] }, p0/Z, [x27, x11, LSL #2]\n"
+ ".inst 0xe0ae8365 // st1w { za1v.s[x12, #1] }, p0/Z, [x27, x14, LSL #2]\n"
"addvl x27, x27, #4\n"
- "whilelt p8.b, x15, %x[width]\n"
+ "whilelt p8.b, x26, %x[width]\n"
"mov x13, #0x0\n"
"mov x12, #0x0\n"
"cbz x9, 8f\n"
"7:" // K loop: Main loop: Second: Loop
- ".inst 0x25256140 // psel p0.b, p8.b/Z, p10.b[w13]\n"
- ".inst 0xe01c2300 // ld1b { za0h.b[x13] }, p0/Z, [x24, x28]\n"
- ".inst 0x252d6140 // psel p0.b, p8.b/Z, p10.b[w13, #1]\n"
- "ldr x24, [x26, #0x0]\n"
- ".inst 0xe01c22e1 // ld1b { za0h.b[x13, #1] }, p0/Z, [x23, x28]\n"
- ".inst 0x25656140 // psel p0.b, p8.b/Z, p10.b[w13, #4]\n"
- ".inst 0x256d6142 // psel p2.b, p8.b/Z, p10.b[w13, #5]\n"
+ ".inst 0x25256140 // dup p0.b, p8.b/Z, p10.b[w13]\n"
+ ".inst 0xe01c22e0 // ld1b { za0h.b[x13] }, p0/Z, [x23, x28]\n"
+ ".inst 0x252d6140 // dup p0.b, p8.b/Z, p10.b[w13, #1]\n"
+ ".inst 0x25656141 // dup p1.b, p8.b/Z, p10.b[w13, #4]\n"
+ ".inst 0xe01c22c1 // ld1b { za0h.b[x13, #1] }, p0/Z, [x22, x28]\n"
+ ".inst 0x256d6140 // dup p0.b, p8.b/Z, p10.b[w13, #5]\n"
"ldr x23, [x25, #0x0]\n"
- ".inst 0xe01c22c4 // ld1b { za0h.b[x13, #4] }, p0/Z, [x22, x28]\n"
- ".inst 0x25306d21 // psel p1.s, p11.s/Z, p9.s[w12]\n"
- "ldr x22, [x26, #0x8]\n"
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe01c2aa5 // ld1b { za0h.b[x13, #5] }, p2/Z, [x21, x28]\n"
+ ".inst 0xe01c26a4 // ld1b { za0h.b[x13, #4] }, p1/Z, [x21, x28]\n"
+ "ldr x22, [x24, #0x0]\n"
"ldr x21, [x25, #0x8]\n"
- ".inst 0xe0bf8768 // st1w { za2v.s[x12] }, p1/Z, [x27, XZR, LSL #2]\n"
- ".inst 0xc0829110 // mova z16.s, p4/M, za2v.s[x12]\n"
- ".inst 0xc0829191 // mova z17.s, p4/M, za3v.s[x12]\n"
- "sdot z19.s, z16.b, z20.b\n"
+ ".inst 0xe01c2285 // ld1b { za0h.b[x13, #5] }, p0/Z, [x20, x28]\n"
+ "ldr x20, [x24, #0x8]\n"
+ "add x25, x25, #0x10\n"
+ ".inst 0xc0828911 // mova z17.s, p2/M, za2v.s[x12]\n"
+ ".inst 0xc0828990 // mova z16.s, p2/M, za3v.s[x12]\n"
+ "add x24, x24, #0x10\n"
+ ".inst 0x25306d20 // dup p0.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0xe0bf8368 // st1w { za2v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+ ".inst 0x25306d20 // dup p0.s, p11.s/Z, p9.s[w12]\n"
+ "sdot z19.s, z17.b, z20.b\n"
+ ".inst 0xc0828931 // mova z17.s, p2/M, za2v.s[x12, #1]\n"
+ "sdot z18.s, z16.b, z20.b\n"
+ ".inst 0xc08289b0 // mova z16.s, p2/M, za3v.s[x12, #1]\n"
".inst 0xe0b0836c // st1w { za3v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
- ".inst 0x25706d20 // psel p0.s, p11.s/Z, p9.s[w12, #1]\n"
- "sdot z18.s, z17.b, z20.b\n"
- ".inst 0xe0ae8369 // st1w { za2v.s[x12, #1] }, p0/Z, [x27, x14, LSL #2]\n"
- ".inst 0x25706d20 // psel p0.s, p11.s/Z, p9.s[w12, #1]\n"
- ".inst 0xc0829130 // mova z16.s, p4/M, za2v.s[x12, #1]\n"
- ".inst 0xe0ab836d // st1w { za3v.s[x12, #1] }, p0/Z, [x27, x11, LSL #2]\n"
- ".inst 0xc08291b1 // mova z17.s, p4/M, za3v.s[x12, #1]\n"
+ ".inst 0x25706d21 // dup p1.s, p11.s/Z, p9.s[w12, #1]\n"
+ ".inst 0x25706d20 // dup p0.s, p11.s/Z, p9.s[w12, #1]\n"
+ "add x13, x13, #0x8\n"
+ ".inst 0xe0af8769 // st1w { za2v.s[x12, #1] }, p1/Z, [x27, x15, LSL #2]\n"
+ "sdot z19.s, z17.b, z20.b\n"
+ "sdot z18.s, z16.b, z20.b\n"
+ ".inst 0xe0ae836d // st1w { za3v.s[x12, #1] }, p0/Z, [x27, x14, LSL #2]\n"
+ "addvl x27, x27, #4\n"
"add x12, x12, #0x2\n"
"cmp x12, x9\n"
- "add x26, x26, #0x10\n"
- "add x25, x25, #0x10\n"
- "sdot z19.s, z16.b, z20.b\n"
- "sdot z18.s, z17.b, z20.b\n"
- "addvl x27, x27, #4\n"
- "add x13, x13, #0x8\n"
"blt 7b\n"
"8:" // K loop: Main loop: Second: Tail
- ".inst 0x25256140 // psel p0.b, p8.b/Z, p10.b[w13]\n"
- ".inst 0xe01c2300 // ld1b { za0h.b[x13] }, p0/Z, [x24, x28]\n"
- ".inst 0x252d6140 // psel p0.b, p8.b/Z, p10.b[w13, #1]\n"
- ".inst 0xe01c22e1 // ld1b { za0h.b[x13, #1] }, p0/Z, [x23, x28]\n"
- ".inst 0x25656141 // psel p1.b, p8.b/Z, p10.b[w13, #4]\n"
- ".inst 0x256d6140 // psel p0.b, p8.b/Z, p10.b[w13, #5]\n"
- ".inst 0xe01c26c4 // ld1b { za0h.b[x13, #4] }, p1/Z, [x22, x28]\n"
- "mov x26, %x[in]\n"
- "add x25, %x[in], x16, LSL #3\n"
- "ldr x24, [x26, #0x0]\n"
- ".inst 0xe01c22a5 // ld1b { za0h.b[x13, #5] }, p0/Z, [x21, x28]\n"
- ".inst 0xc0829110 // mova z16.s, p4/M, za2v.s[x12]\n"
- ".inst 0x25306d23 // psel p3.s, p11.s/Z, p9.s[w12]\n"
- "sdot z19.s, z16.b, z20.b\n"
- ".inst 0xc0829191 // mova z17.s, p4/M, za3v.s[x12]\n"
- "sdot z18.s, z17.b, z20.b\n"
+ "mov x25, %x[in]\n"
+ "add x24, %x[in], x16, LSL #3\n"
+ ".inst 0x25256140 // dup p0.b, p8.b/Z, p10.b[w13]\n"
+ ".inst 0xe01c22e0 // ld1b { za0h.b[x13] }, p0/Z, [x23, x28]\n"
+ ".inst 0x252d6140 // dup p0.b, p8.b/Z, p10.b[w13, #1]\n"
+ ".inst 0x25656141 // dup p1.b, p8.b/Z, p10.b[w13, #4]\n"
+ ".inst 0xe01c22c1 // ld1b { za0h.b[x13, #1] }, p0/Z, [x22, x28]\n"
+ ".inst 0x256d6140 // dup p0.b, p8.b/Z, p10.b[w13, #5]\n"
"ldr x23, [x25, #0x0]\n"
- ".inst 0x25306d22 // psel p2.s, p11.s/Z, p9.s[w12]\n"
- "ldr x22, [x26, #0x8]\n"
- ".inst 0x25706d21 // psel p1.s, p11.s/Z, p9.s[w12, #1]\n"
- ".inst 0xc0829130 // mova z16.s, p4/M, za2v.s[x12, #1]\n"
- ".inst 0x25706d20 // psel p0.s, p11.s/Z, p9.s[w12, #1]\n"
+ ".inst 0xe01c26a4 // ld1b { za0h.b[x13, #4] }, p1/Z, [x21, x28]\n"
+ "ldr x22, [x24, #0x0]\n"
"ldr x21, [x25, #0x8]\n"
- ".inst 0xe0bf8f68 // st1w { za2v.s[x12] }, p3/Z, [x27, XZR, LSL #2]\n"
- ".inst 0xc08291b1 // mova z17.s, p4/M, za3v.s[x12, #1]\n"
- "whilelt p9.b, x15, %x[width]\n"
- ".inst 0xe0b08b6c // st1w { za3v.s[x12] }, p2/Z, [x27, x16, LSL #2]\n"
- "subs x20, x20, #0x1\n"
- "add x26, x26, #0x10\n"
- "sdot z19.s, z16.b, z20.b\n"
- ".inst 0xe0ae8769 // st1w { za2v.s[x12, #1] }, p1/Z, [x27, x14, LSL #2]\n"
+ ".inst 0xe01c2285 // ld1b { za0h.b[x13, #5] }, p0/Z, [x20, x28]\n"
+ "ldr x20, [x24, #0x8]\n"
"add x25, x25, #0x10\n"
- "sdot z18.s, z17.b, z20.b\n"
- "incb x15\n"
- ".inst 0xe0ab836d // st1w { za3v.s[x12, #1] }, p0/Z, [x27, x11, LSL #2]\n"
- "addvl x27, x27, #4\n"
+ ".inst 0xc0828911 // mova z17.s, p2/M, za2v.s[x12]\n"
+ ".inst 0xc0828990 // mova z16.s, p2/M, za3v.s[x12]\n"
+ "add x24, x24, #0x10\n"
+ ".inst 0x25306d20 // dup p0.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0xe0bf8368 // st1w { za2v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+ ".inst 0x25306d20 // dup p0.s, p11.s/Z, p9.s[w12]\n"
+ "sdot z19.s, z17.b, z20.b\n"
+ ".inst 0xc0828931 // mova z17.s, p2/M, za2v.s[x12, #1]\n"
+ "sdot z18.s, z16.b, z20.b\n"
+ ".inst 0xc08289b0 // mova z16.s, p2/M, za3v.s[x12, #1]\n"
+ ".inst 0xe0b0836c // st1w { za3v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
+ ".inst 0x25706d21 // dup p1.s, p11.s/Z, p9.s[w12, #1]\n"
+ ".inst 0x25706d20 // dup p0.s, p11.s/Z, p9.s[w12, #1]\n"
+ "whilelt p9.b, x26, %x[width]\n"
+ ".inst 0xe0af8769 // st1w { za2v.s[x12, #1] }, p1/Z, [x27, x15, LSL #2]\n"
+ "sdot z19.s, z17.b, z20.b\n"
+ "incb x26\n"
+ "sdot z18.s, z16.b, z20.b\n"
"incb x28\n"
+ ".inst 0xe0ae836d // st1w { za3v.s[x12, #1] }, p0/Z, [x27, x14, LSL #2]\n"
+ "addvl x27, x27, #4\n"
+ "subs x19, x19, #0x1\n"
"bgt 4b\n"
"9:" // K loop: Tails
- "cbnz x10, 12f\n"
- "mov x26, %x[in]\n"
- "whilelt p8.b, x15, %x[width]\n"
+ "cbnz x11, 12f\n"
+ "mov x25, %x[in]\n"
+ "whilelt p8.b, x26, %x[width]\n"
"mov x13, #0x0\n"
"mov x12, #0x0\n"
"10:" // K loop: Tails: Even: First
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0xc0828811 // mova z17.s, p2/M, za0v.s[x12]\n"
+ ".inst 0x25306d20 // dup p0.s, p11.s/Z, p9.s[w12]\n"
".inst 0xe0bf8360 // st1w { za0v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xc0829010 // mova z16.s, p4/M, za0v.s[x12]\n"
+ ".inst 0xc0828890 // mova z16.s, p2/M, za1v.s[x12]\n"
+ ".inst 0x25306d20 // dup p0.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0x25356141 // dup p1.b, p8.b/Z, p10.b[w13, #2]\n"
".inst 0xe0b08364 // st1w { za1v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
- "ldr x24, [x26, #0x0]\n"
- ".inst 0x25356140 // psel p0.b, p8.b/Z, p10.b[w13, #2]\n"
- ".inst 0xc0829091 // mova z17.s, p4/M, za1v.s[x12]\n"
- "ldr x23, [x26, x16, LSL #0x3]\n"
- ".inst 0xe01c2302 // ld1b { za0h.b[x13, #2] }, p0/Z, [x24, x28]\n"
- "add x12, x12, #0x1\n"
- ".inst 0x253d6140 // psel p0.b, p8.b/Z, p10.b[w13, #3]\n"
- "cmp x12, x16\n"
- "sdot z19.s, z16.b, z20.b\n"
- "sdot z18.s, z17.b, z20.b\n"
- ".inst 0xe01c22e3 // ld1b { za0h.b[x13, #3] }, p0/Z, [x23, x28]\n"
- "add x26, x26, #0x8\n"
+ ".inst 0x253d6140 // dup p0.b, p8.b/Z, p10.b[w13, #3]\n"
+ "sdot z19.s, z17.b, z20.b\n"
+ "sdot z18.s, z16.b, z20.b\n"
+ "ldr x23, [x25, #0x0]\n"
+ ".inst 0xe01c26e2 // ld1b { za0h.b[x13, #2] }, p1/Z, [x23, x28]\n"
+ "ldr x22, [x25, x16, LSL #0x3]\n"
"addvl x27, x27, #2\n"
+ ".inst 0xe01c22c3 // ld1b { za0h.b[x13, #3] }, p0/Z, [x22, x28]\n"
+ "add x25, x25, #0x8\n"
"add x13, x13, #0x4\n"
+ "add x12, x12, #0x1\n"
+ "cmp x12, x16\n"
"blt 10b\n"
- "whilelt p9.b, x15, %x[width]\n"
- "whilelt p8.b, x15, %x[width]\n"
- "mov x20, #0x0\n"
+ "whilelt p9.b, x26, %x[width]\n"
+ "whilelt p8.b, x26, %x[width]\n"
+ "mov x19, #0x0\n"
"mov x12, #0x0\n"
"11:" // K loop: Tails: Even: Second
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0xc0828911 // mova z17.s, p2/M, za2v.s[x12]\n"
+ ".inst 0x25306d20 // dup p0.s, p11.s/Z, p9.s[w12]\n"
".inst 0xe0bf8368 // st1w { za2v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xc0829110 // mova z16.s, p4/M, za2v.s[x12]\n"
+ ".inst 0xc0828990 // mova z16.s, p2/M, za3v.s[x12]\n"
+ ".inst 0x25306d20 // dup p0.s, p11.s/Z, p9.s[w12]\n"
+ "add x19, x19, #0x4\n"
".inst 0xe0b0836c // st1w { za3v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
- ".inst 0xc0829191 // mova z17.s, p4/M, za3v.s[x12]\n"
- "add x12, x12, #0x1\n"
- "cmp x12, x17\n"
- "sdot z19.s, z16.b, z20.b\n"
- "sdot z18.s, z17.b, z20.b\n"
"addvl x27, x27, #2\n"
- "add x20, x20, #0x4\n"
+ "sdot z19.s, z17.b, z20.b\n"
+ "sdot z18.s, z16.b, z20.b\n"
+ "add x12, x12, #0x1\n"
+ "cmp x12, x10\n"
"blt 11b\n"
- "whilelt p9.b, x15, %x[width]\n"
+ "whilelt p9.b, x26, %x[width]\n"
"b 14f\n"
"12:" // K loop: Tails: Odd
"mov x12, #0x0\n"
"13:" // K loop: Tails: Odd: Loop
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0xc0828811 // mova z17.s, p2/M, za0v.s[x12]\n"
+ ".inst 0x25306d20 // dup p0.s, p11.s/Z, p9.s[w12]\n"
".inst 0xe0bf8360 // st1w { za0v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xc0829010 // mova z16.s, p4/M, za0v.s[x12]\n"
+ ".inst 0xc0828890 // mova z16.s, p2/M, za1v.s[x12]\n"
+ ".inst 0x25306d20 // dup p0.s, p11.s/Z, p9.s[w12]\n"
".inst 0xe0b08364 // st1w { za1v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
- ".inst 0xc0829091 // mova z17.s, p4/M, za1v.s[x12]\n"
- "add x12, x12, #0x1\n"
- "cmp x12, x17\n"
- "sdot z19.s, z16.b, z20.b\n"
- "sdot z18.s, z17.b, z20.b\n"
"addvl x27, x27, #2\n"
+ "sdot z19.s, z17.b, z20.b\n"
+ "sdot z18.s, z16.b, z20.b\n"
+ "add x12, x12, #0x1\n"
+ "cmp x12, x10\n"
"blt 13b\n"
"14:" // K loop: End
- "st1w { z19.s }, p4, [x27]\n"
- "st1w { z18.s }, p4, [x27, #1, MUL VL]\n"
+ "st1w { z19.s }, p2, [x27]\n"
+ "st1w { z18.s }, p2, [x27, #1, MUL VL]\n"
"addvl x27, x27, #2\n"
"mov %x[out], x27\n"
".inst 0xd503467f // SMSTOP\n"
: [out] "+&r" (out)
: [first] "r" (first), [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p8", "p9", "p10", "p11", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_block4_u8_u8.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_block4_u8_u8.hpp
index 683a315a96..cd4a76654b 100644
--- a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_block4_u8_u8.hpp
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_block4_u8_u8.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#if defined(__ARM_FEATURE_SVE)
@@ -34,265 +34,265 @@ void interleave_block<2, 4, VLType::SME, false>(
__asm__ __volatile__(
".inst 0xd503477f // SMSTART ZA\n"
- "cntb x21\n"
- "mov x23, %x[width]\n"
- "incb x23\n"
- "mov x20, %x[width]\n"
- "sub x17, x21, #0x1\n"
+ "cntb x20\n"
"cntw x16\n"
- "sub x23, x23, #0x1\n"
- "ands x17, x20, x17\n"
- "udiv x23, x23, x21\n" // n_passes = ceildiv(width, VL<T>)
- "csel x17, x17, x21, NE\n"
- "lsl x22, %x[height], #0x1\n" // height * 2
- "lsl x21, x16, #0x1\n"
- "sub x20, x23, #0x1\n"
- "add x17, x17, #0x3\n"
- "sub x15, x16, #0x2\n"
- "whilelt p9.b, XZR, x22\n"
- "whilelt p8.b, x21, x22\n"
- "mov x14, #0x0\n"
- "mov x11, %x[in]\n"
- "add x10, %x[in], x16, LSL #3\n"
- "ldr x9, [x11, #0x0]\n"
- "cntw x28, ALL, MUL #2\n"
- "cntw x27, ALL, MUL #3\n"
- "ldr x26, [x10, #0x0]\n"
- "lsr x20, x20, #0x1\n" // n_loops = (n_passes - 1) / 2
- "and x25, x23, #0x1\n" // odd_tail = bool(n_passes & 0x1)
- "ldr x24, [x11, #0x8]\n"
- "lsr x17, x17, #0x2\n"
+ "cntw x15, ALL, MUL #2\n"
+ "cntw x14, ALL, MUL #3\n"
+ "mov x19, %x[width]\n"
+ "incb x19\n"
+ "sub x19, x19, #0x1\n"
+ "udiv x19, x19, x20\n" // n_passes = ceildiv(width, VL<T>)
+ "sub x13, x19, #0x1\n"
+ "lsr x13, x13, #0x1\n" // n_loops = (n_passes - 1) / 2
+ "and x11, x19, #0x1\n" // odd_tail = bool(n_passes & 0x1)
+ "mov x19, %x[width]\n"
+ "sub x10, x20, #0x1\n"
+ "ands x10, x19, x10\n"
+ "csel x10, x10, x20, NE\n"
+ "add x10, x10, #0x3\n"
+ "lsr x10, x10, #0x2\n"
+ "sub x9, x16, #0x2\n"
"ptrue p11.s\n"
- "ldr x23, [x10, #0x8]\n"
+ "lsl x20, %x[height], #0x1\n" // height * 2
+ "lsl x19, x16, #0x1\n"
+ "whilelt p9.b, XZR, x20\n"
+ "whilelt p8.b, x19, x20\n"
"zip1 p10.b, p9.b, p8.b\n"
- "mov x22, %x[row_offset]\n"
- "mov x21, %x[out]\n"
- "whilelt p9.b, x14, %x[width]\n"
- "whilelt p8.b, x14, %x[width]\n"
- "add x11, x11, #0x10\n"
- "add x10, x10, #0x10\n"
+ "mov x28, %x[row_offset]\n"
+ "mov x27, %x[out]\n"
+ "mov x26, #0x0\n"
+ "whilelt p9.b, x26, %x[width]\n"
+ "whilelt p8.b, x26, %x[width]\n"
+ "mov x25, %x[in]\n"
+ "add x24, %x[in], x16, LSL #3\n"
+ "ldr x23, [x25, #0x0]\n"
+ "ldr x22, [x24, #0x0]\n"
+ "ldr x21, [x25, #0x8]\n"
+ "ldr x20, [x24, #0x8]\n"
+ "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
"mov x12, #0x0\n"
- "cbz x15, 2f\n"
+ "cbz x9, 2f\n"
"1:" // K loop: Charge: Loop
- ".inst 0x25246140 // psel p0.b, p8.b/Z, p10.b[w12]\n"
- ".inst 0xe0160120 // ld1b { za0h.b[x12] }, p0/Z, [x9, x22]\n"
- ".inst 0x252c6140 // psel p0.b, p8.b/Z, p10.b[w12, #1]\n"
- "ldr x9, [x11, #0x0]\n"
- ".inst 0xe0160341 // ld1b { za0h.b[x12, #1] }, p0/Z, [x26, x22]\n"
- ".inst 0x25646141 // psel p1.b, p8.b/Z, p10.b[w12, #4]\n"
- ".inst 0x256c6140 // psel p0.b, p8.b/Z, p10.b[w12, #5]\n"
- "ldr x26, [x10, #0x0]\n"
- ".inst 0xe0160704 // ld1b { za0h.b[x12, #4] }, p1/Z, [x24, x22]\n"
- "ldr x24, [x11, #0x8]\n"
- "add x11, x11, #0x10\n"
- ".inst 0xe01602e5 // ld1b { za0h.b[x12, #5] }, p0/Z, [x23, x22]\n"
+ ".inst 0x25246140 // dup p0.b, p8.b/Z, p10.b[w12]\n"
+ ".inst 0xe01c02e0 // ld1b { za0h.b[x12] }, p0/Z, [x23, x28]\n"
+ ".inst 0x252c6140 // dup p0.b, p8.b/Z, p10.b[w12, #1]\n"
+ ".inst 0x25646141 // dup p1.b, p8.b/Z, p10.b[w12, #4]\n"
+ ".inst 0xe01c02c1 // ld1b { za0h.b[x12, #1] }, p0/Z, [x22, x28]\n"
+ ".inst 0x256c6140 // dup p0.b, p8.b/Z, p10.b[w12, #5]\n"
+ "ldr x23, [x25, #0x0]\n"
+ ".inst 0xe01c06a4 // ld1b { za0h.b[x12, #4] }, p1/Z, [x21, x28]\n"
+ "ldr x22, [x24, #0x0]\n"
+ "ldr x21, [x25, #0x8]\n"
+ ".inst 0xe01c0285 // ld1b { za0h.b[x12, #5] }, p0/Z, [x20, x28]\n"
+ "ldr x20, [x24, #0x8]\n"
+ "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
"add x12, x12, #0x8\n"
- "cmp x12, x15, LSL #2\n"
- "ldr x23, [x10, #0x8]\n"
- "add x10, x10, #0x10\n"
+ "cmp x12, x9, LSL #2\n"
"blt 1b\n"
"2:" // K loop: Charge: End
- ".inst 0x25246140 // psel p0.b, p8.b/Z, p10.b[w12]\n"
- ".inst 0xe0160120 // ld1b { za0h.b[x12] }, p0/Z, [x9, x22]\n"
- ".inst 0x252c6140 // psel p0.b, p8.b/Z, p10.b[w12, #1]\n"
- ".inst 0xe0160341 // ld1b { za0h.b[x12, #1] }, p0/Z, [x26, x22]\n"
- ".inst 0x25646141 // psel p1.b, p8.b/Z, p10.b[w12, #4]\n"
- ".inst 0x256c6140 // psel p0.b, p8.b/Z, p10.b[w12, #5]\n"
- ".inst 0xe0160704 // ld1b { za0h.b[x12, #4] }, p1/Z, [x24, x22]\n"
- "mov x11, %x[in]\n"
- "add x10, %x[in], x16, LSL #3\n"
- "ldr x9, [x11, #0x0]\n"
- ".inst 0xe01602e5 // ld1b { za0h.b[x12, #5] }, p0/Z, [x23, x22]\n"
- "ldr x26, [x10, #0x0]\n"
- "incb x22\n"
- "incb x14\n"
- "ldr x24, [x11, #0x8]\n"
- "add x11, x11, #0x10\n"
- "ldr x23, [x10, #0x8]\n"
- "add x10, x10, #0x10\n"
- "cbz x20, 8f\n"
- "mov x20, x20\n"
+ ".inst 0x25246140 // dup p0.b, p8.b/Z, p10.b[w12]\n"
+ ".inst 0xe01c02e0 // ld1b { za0h.b[x12] }, p0/Z, [x23, x28]\n"
+ ".inst 0x252c6140 // dup p0.b, p8.b/Z, p10.b[w12, #1]\n"
+ ".inst 0x25646141 // dup p1.b, p8.b/Z, p10.b[w12, #4]\n"
+ ".inst 0xe01c02c1 // ld1b { za0h.b[x12, #1] }, p0/Z, [x22, x28]\n"
+ ".inst 0x256c6140 // dup p0.b, p8.b/Z, p10.b[w12, #5]\n"
+ "mov x25, %x[in]\n"
+ ".inst 0xe01c06a4 // ld1b { za0h.b[x12, #4] }, p1/Z, [x21, x28]\n"
+ "add x24, %x[in], x16, LSL #3\n"
+ "ldr x23, [x25, #0x0]\n"
+ ".inst 0xe01c0285 // ld1b { za0h.b[x12, #5] }, p0/Z, [x20, x28]\n"
+ "ldr x22, [x24, #0x0]\n"
+ "ldr x21, [x25, #0x8]\n"
+ "ldr x20, [x24, #0x8]\n"
+ "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
+ "incb x28\n"
+ "incb x26\n"
+ "cbz x13, 8f\n"
+ "mov x19, x13\n"
"3:" // K loop: Main loop
- "whilelt p8.b, x14, %x[width]\n"
+ "whilelt p8.b, x26, %x[width]\n"
"mov x13, #0x0\n"
"mov x12, #0x0\n"
- "cbz x15, 5f\n"
+ "cbz x9, 5f\n"
"4:" // K loop: Main loop: First: Loop
- ".inst 0x25356140 // psel p0.b, p8.b/Z, p10.b[w13, #2]\n"
- ".inst 0xe0162122 // ld1b { za0h.b[x13, #2] }, p0/Z, [x9, x22]\n"
- ".inst 0x253d6140 // psel p0.b, p8.b/Z, p10.b[w13, #3]\n"
- "ldr x9, [x11, #0x0]\n"
- ".inst 0xe0162343 // ld1b { za0h.b[x13, #3] }, p0/Z, [x26, x22]\n"
- ".inst 0x25756140 // psel p0.b, p8.b/Z, p10.b[w13, #6]\n"
- ".inst 0x257d6142 // psel p2.b, p8.b/Z, p10.b[w13, #7]\n"
- "ldr x26, [x10, #0x0]\n"
- ".inst 0xe0162306 // ld1b { za0h.b[x13, #6] }, p0/Z, [x24, x22]\n"
- ".inst 0x25306d21 // psel p1.s, p11.s/Z, p9.s[w12]\n"
- "ldr x24, [x11, #0x8]\n"
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe0162ae7 // ld1b { za0h.b[x13, #7] }, p2/Z, [x23, x22]\n"
- "ldr x23, [x10, #0x8]\n"
- ".inst 0xe0bf86a0 // st1w { za0v.s[x12] }, p1/Z, [x21, XZR, LSL #2]\n"
- ".inst 0x25706d21 // psel p1.s, p11.s/Z, p9.s[w12, #1]\n"
- ".inst 0xe0b082a4 // st1w { za1v.s[x12] }, p0/Z, [x21, x16, LSL #2]\n"
- ".inst 0x25706d20 // psel p0.s, p11.s/Z, p9.s[w12, #1]\n"
- "add x11, x11, #0x10\n"
- ".inst 0xe0bc86a1 // st1w { za0v.s[x12, #1] }, p1/Z, [x21, x28, LSL #2]\n"
- "add x10, x10, #0x10\n"
+ ".inst 0x25356140 // dup p0.b, p8.b/Z, p10.b[w13, #2]\n"
+ ".inst 0xe01c22e2 // ld1b { za0h.b[x13, #2] }, p0/Z, [x23, x28]\n"
+ ".inst 0x253d6140 // dup p0.b, p8.b/Z, p10.b[w13, #3]\n"
+ ".inst 0x25756141 // dup p1.b, p8.b/Z, p10.b[w13, #6]\n"
+ ".inst 0xe01c22c3 // ld1b { za0h.b[x13, #3] }, p0/Z, [x22, x28]\n"
+ ".inst 0x257d6140 // dup p0.b, p8.b/Z, p10.b[w13, #7]\n"
+ "ldr x23, [x25, #0x0]\n"
+ ".inst 0xe01c26a6 // ld1b { za0h.b[x13, #6] }, p1/Z, [x21, x28]\n"
+ "ldr x22, [x24, #0x0]\n"
+ "ldr x21, [x25, #0x8]\n"
+ ".inst 0xe01c2287 // ld1b { za0h.b[x13, #7] }, p0/Z, [x20, x28]\n"
+ "ldr x20, [x24, #0x8]\n"
+ "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
+ ".inst 0x25306d20 // dup p0.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0xe0bf8360 // st1w { za0v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+ ".inst 0x25306d20 // dup p0.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0x25706d21 // dup p1.s, p11.s/Z, p9.s[w12, #1]\n"
+ ".inst 0xe0b08364 // st1w { za1v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
+ ".inst 0x25706d20 // dup p0.s, p11.s/Z, p9.s[w12, #1]\n"
"add x13, x13, #0x8\n"
- ".inst 0xe0bb82a5 // st1w { za1v.s[x12, #1] }, p0/Z, [x21, x27, LSL #2]\n"
+ ".inst 0xe0af8761 // st1w { za0v.s[x12, #1] }, p1/Z, [x27, x15, LSL #2]\n"
+ ".inst 0xe0ae8365 // st1w { za1v.s[x12, #1] }, p0/Z, [x27, x14, LSL #2]\n"
+ "addvl x27, x27, #4\n"
"add x12, x12, #0x2\n"
- "cmp x12, x15\n"
- "addvl x21, x21, #4\n"
+ "cmp x12, x9\n"
"blt 4b\n"
"5:" // K loop: Main loop: First: Tail
- ".inst 0x25356140 // psel p0.b, p8.b/Z, p10.b[w13, #2]\n"
- ".inst 0xe0162122 // ld1b { za0h.b[x13, #2] }, p0/Z, [x9, x22]\n"
- ".inst 0x253d6140 // psel p0.b, p8.b/Z, p10.b[w13, #3]\n"
- ".inst 0xe0162343 // ld1b { za0h.b[x13, #3] }, p0/Z, [x26, x22]\n"
- ".inst 0x25756140 // psel p0.b, p8.b/Z, p10.b[w13, #6]\n"
- "mov x11, %x[in]\n"
- "ldr x9, [x11, #0x0]\n"
- ".inst 0xe0162306 // ld1b { za0h.b[x13, #6] }, p0/Z, [x24, x22]\n"
- "add x10, %x[in], x16, LSL #3\n"
- ".inst 0x257d6141 // psel p1.b, p8.b/Z, p10.b[w13, #7]\n"
- "ldr x26, [x10, #0x0]\n"
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe01626e7 // ld1b { za0h.b[x13, #7] }, p1/Z, [x23, x22]\n"
- "ldr x24, [x11, #0x8]\n"
- ".inst 0x25306d22 // psel p2.s, p11.s/Z, p9.s[w12]\n"
- "ldr x23, [x10, #0x8]\n"
- ".inst 0xe0bf82a0 // st1w { za0v.s[x12] }, p0/Z, [x21, XZR, LSL #2]\n"
- ".inst 0x25706d21 // psel p1.s, p11.s/Z, p9.s[w12, #1]\n"
- ".inst 0x25706d20 // psel p0.s, p11.s/Z, p9.s[w12, #1]\n"
- ".inst 0xe0b08aa4 // st1w { za1v.s[x12] }, p2/Z, [x21, x16, LSL #2]\n"
- "whilelt p9.b, x14, %x[width]\n"
- "incb x14\n"
- ".inst 0xe0bc86a1 // st1w { za0v.s[x12, #1] }, p1/Z, [x21, x28, LSL #2]\n"
- "add x11, x11, #0x10\n"
- "add x10, x10, #0x10\n"
- ".inst 0xe0bb82a5 // st1w { za1v.s[x12, #1] }, p0/Z, [x21, x27, LSL #2]\n"
- "addvl x21, x21, #4\n"
- "incb x22\n"
- "whilelt p8.b, x14, %x[width]\n"
+ "mov x25, %x[in]\n"
+ "add x24, %x[in], x16, LSL #3\n"
+ ".inst 0x25356140 // dup p0.b, p8.b/Z, p10.b[w13, #2]\n"
+ ".inst 0xe01c22e2 // ld1b { za0h.b[x13, #2] }, p0/Z, [x23, x28]\n"
+ ".inst 0x253d6140 // dup p0.b, p8.b/Z, p10.b[w13, #3]\n"
+ ".inst 0x25756141 // dup p1.b, p8.b/Z, p10.b[w13, #6]\n"
+ ".inst 0xe01c22c3 // ld1b { za0h.b[x13, #3] }, p0/Z, [x22, x28]\n"
+ ".inst 0x257d6140 // dup p0.b, p8.b/Z, p10.b[w13, #7]\n"
+ "ldr x23, [x25, #0x0]\n"
+ ".inst 0xe01c26a6 // ld1b { za0h.b[x13, #6] }, p1/Z, [x21, x28]\n"
+ "ldr x22, [x24, #0x0]\n"
+ "ldr x21, [x25, #0x8]\n"
+ ".inst 0xe01c2287 // ld1b { za0h.b[x13, #7] }, p0/Z, [x20, x28]\n"
+ "ldr x20, [x24, #0x8]\n"
+ "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
+ ".inst 0x25306d20 // dup p0.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0xe0bf8360 // st1w { za0v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+ ".inst 0x25306d20 // dup p0.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0x25706d21 // dup p1.s, p11.s/Z, p9.s[w12, #1]\n"
+ ".inst 0xe0b08364 // st1w { za1v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
+ ".inst 0x25706d20 // dup p0.s, p11.s/Z, p9.s[w12, #1]\n"
+ "whilelt p9.b, x26, %x[width]\n"
+ ".inst 0xe0af8761 // st1w { za0v.s[x12, #1] }, p1/Z, [x27, x15, LSL #2]\n"
+ "incb x26\n"
+ "incb x28\n"
+ ".inst 0xe0ae8365 // st1w { za1v.s[x12, #1] }, p0/Z, [x27, x14, LSL #2]\n"
+ "addvl x27, x27, #4\n"
+ "whilelt p8.b, x26, %x[width]\n"
"mov x13, #0x0\n"
"mov x12, #0x0\n"
- "cbz x15, 7f\n"
+ "cbz x9, 7f\n"
"6:" // K loop: Main loop: Second: Loop
- ".inst 0x25256140 // psel p0.b, p8.b/Z, p10.b[w13]\n"
- ".inst 0xe0162120 // ld1b { za0h.b[x13] }, p0/Z, [x9, x22]\n"
- ".inst 0x252d6140 // psel p0.b, p8.b/Z, p10.b[w13, #1]\n"
- "ldr x9, [x11, #0x0]\n"
- ".inst 0xe0162341 // ld1b { za0h.b[x13, #1] }, p0/Z, [x26, x22]\n"
- ".inst 0x25656140 // psel p0.b, p8.b/Z, p10.b[w13, #4]\n"
- ".inst 0x256d6142 // psel p2.b, p8.b/Z, p10.b[w13, #5]\n"
- "ldr x26, [x10, #0x0]\n"
- ".inst 0xe0162304 // ld1b { za0h.b[x13, #4] }, p0/Z, [x24, x22]\n"
- ".inst 0x25306d21 // psel p1.s, p11.s/Z, p9.s[w12]\n"
- "ldr x24, [x11, #0x8]\n"
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe0162ae5 // ld1b { za0h.b[x13, #5] }, p2/Z, [x23, x22]\n"
- "ldr x23, [x10, #0x8]\n"
- ".inst 0xe0bf86a8 // st1w { za2v.s[x12] }, p1/Z, [x21, XZR, LSL #2]\n"
- ".inst 0x25706d21 // psel p1.s, p11.s/Z, p9.s[w12, #1]\n"
- ".inst 0xe0b082ac // st1w { za3v.s[x12] }, p0/Z, [x21, x16, LSL #2]\n"
- ".inst 0x25706d20 // psel p0.s, p11.s/Z, p9.s[w12, #1]\n"
- "add x11, x11, #0x10\n"
- ".inst 0xe0bc86a9 // st1w { za2v.s[x12, #1] }, p1/Z, [x21, x28, LSL #2]\n"
- "add x10, x10, #0x10\n"
+ ".inst 0x25256140 // dup p0.b, p8.b/Z, p10.b[w13]\n"
+ ".inst 0xe01c22e0 // ld1b { za0h.b[x13] }, p0/Z, [x23, x28]\n"
+ ".inst 0x252d6140 // dup p0.b, p8.b/Z, p10.b[w13, #1]\n"
+ ".inst 0x25656141 // dup p1.b, p8.b/Z, p10.b[w13, #4]\n"
+ ".inst 0xe01c22c1 // ld1b { za0h.b[x13, #1] }, p0/Z, [x22, x28]\n"
+ ".inst 0x256d6140 // dup p0.b, p8.b/Z, p10.b[w13, #5]\n"
+ "ldr x23, [x25, #0x0]\n"
+ ".inst 0xe01c26a4 // ld1b { za0h.b[x13, #4] }, p1/Z, [x21, x28]\n"
+ "ldr x22, [x24, #0x0]\n"
+ "ldr x21, [x25, #0x8]\n"
+ ".inst 0xe01c2285 // ld1b { za0h.b[x13, #5] }, p0/Z, [x20, x28]\n"
+ "ldr x20, [x24, #0x8]\n"
+ "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
+ ".inst 0x25306d20 // dup p0.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0xe0bf8368 // st1w { za2v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+ ".inst 0x25306d20 // dup p0.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0x25706d21 // dup p1.s, p11.s/Z, p9.s[w12, #1]\n"
+ ".inst 0xe0b0836c // st1w { za3v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
+ ".inst 0x25706d20 // dup p0.s, p11.s/Z, p9.s[w12, #1]\n"
"add x13, x13, #0x8\n"
- ".inst 0xe0bb82ad // st1w { za3v.s[x12, #1] }, p0/Z, [x21, x27, LSL #2]\n"
+ ".inst 0xe0af8769 // st1w { za2v.s[x12, #1] }, p1/Z, [x27, x15, LSL #2]\n"
+ ".inst 0xe0ae836d // st1w { za3v.s[x12, #1] }, p0/Z, [x27, x14, LSL #2]\n"
+ "addvl x27, x27, #4\n"
"add x12, x12, #0x2\n"
- "cmp x12, x15\n"
- "addvl x21, x21, #4\n"
+ "cmp x12, x9\n"
"blt 6b\n"
"7:" // K loop: Main loop: Second: Tail
- ".inst 0x25256140 // psel p0.b, p8.b/Z, p10.b[w13]\n"
- ".inst 0xe0162120 // ld1b { za0h.b[x13] }, p0/Z, [x9, x22]\n"
- ".inst 0x252d6140 // psel p0.b, p8.b/Z, p10.b[w13, #1]\n"
- ".inst 0xe0162341 // ld1b { za0h.b[x13, #1] }, p0/Z, [x26, x22]\n"
- ".inst 0x25656140 // psel p0.b, p8.b/Z, p10.b[w13, #4]\n"
- "mov x11, %x[in]\n"
- "ldr x9, [x11, #0x0]\n"
- ".inst 0xe0162304 // ld1b { za0h.b[x13, #4] }, p0/Z, [x24, x22]\n"
- "add x10, %x[in], x16, LSL #3\n"
- ".inst 0x256d6141 // psel p1.b, p8.b/Z, p10.b[w13, #5]\n"
- "ldr x26, [x10, #0x0]\n"
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe01626e5 // ld1b { za0h.b[x13, #5] }, p1/Z, [x23, x22]\n"
- "ldr x24, [x11, #0x8]\n"
- ".inst 0x25306d22 // psel p2.s, p11.s/Z, p9.s[w12]\n"
- "ldr x23, [x10, #0x8]\n"
- ".inst 0xe0bf82a8 // st1w { za2v.s[x12] }, p0/Z, [x21, XZR, LSL #2]\n"
- ".inst 0x25706d21 // psel p1.s, p11.s/Z, p9.s[w12, #1]\n"
- ".inst 0x25706d20 // psel p0.s, p11.s/Z, p9.s[w12, #1]\n"
- ".inst 0xe0b08aac // st1w { za3v.s[x12] }, p2/Z, [x21, x16, LSL #2]\n"
- "whilelt p9.b, x14, %x[width]\n"
- "subs x20, x20, #0x1\n"
- ".inst 0xe0bc86a9 // st1w { za2v.s[x12, #1] }, p1/Z, [x21, x28, LSL #2]\n"
- "add x11, x11, #0x10\n"
- "add x10, x10, #0x10\n"
- ".inst 0xe0bb82ad // st1w { za3v.s[x12, #1] }, p0/Z, [x21, x27, LSL #2]\n"
- "addvl x21, x21, #4\n"
- "incb x14\n"
- "incb x22\n"
+ "mov x25, %x[in]\n"
+ "add x24, %x[in], x16, LSL #3\n"
+ ".inst 0x25256140 // dup p0.b, p8.b/Z, p10.b[w13]\n"
+ ".inst 0xe01c22e0 // ld1b { za0h.b[x13] }, p0/Z, [x23, x28]\n"
+ ".inst 0x252d6140 // dup p0.b, p8.b/Z, p10.b[w13, #1]\n"
+ ".inst 0x25656141 // dup p1.b, p8.b/Z, p10.b[w13, #4]\n"
+ ".inst 0xe01c22c1 // ld1b { za0h.b[x13, #1] }, p0/Z, [x22, x28]\n"
+ ".inst 0x256d6140 // dup p0.b, p8.b/Z, p10.b[w13, #5]\n"
+ "ldr x23, [x25, #0x0]\n"
+ ".inst 0xe01c26a4 // ld1b { za0h.b[x13, #4] }, p1/Z, [x21, x28]\n"
+ "ldr x22, [x24, #0x0]\n"
+ "ldr x21, [x25, #0x8]\n"
+ ".inst 0xe01c2285 // ld1b { za0h.b[x13, #5] }, p0/Z, [x20, x28]\n"
+ "ldr x20, [x24, #0x8]\n"
+ "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
+ ".inst 0x25306d20 // dup p0.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0xe0bf8368 // st1w { za2v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+ ".inst 0x25306d20 // dup p0.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0x25706d21 // dup p1.s, p11.s/Z, p9.s[w12, #1]\n"
+ ".inst 0xe0b0836c // st1w { za3v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
+ ".inst 0x25706d20 // dup p0.s, p11.s/Z, p9.s[w12, #1]\n"
+ "whilelt p9.b, x26, %x[width]\n"
+ ".inst 0xe0af8769 // st1w { za2v.s[x12, #1] }, p1/Z, [x27, x15, LSL #2]\n"
+ "incb x26\n"
+ "incb x28\n"
+ ".inst 0xe0ae836d // st1w { za3v.s[x12, #1] }, p0/Z, [x27, x14, LSL #2]\n"
+ "addvl x27, x27, #4\n"
+ "subs x19, x19, #0x1\n"
"bgt 3b\n"
"8:" // K loop: Tails
- "cbnz x25, 11f\n"
- "mov x11, %x[in]\n"
- "whilelt p8.b, x14, %x[width]\n"
+ "cbnz x11, 11f\n"
+ "mov x25, %x[in]\n"
+ "whilelt p8.b, x26, %x[width]\n"
"mov x13, #0x0\n"
"mov x12, #0x0\n"
"9:" // K loop: Tails: Even: First
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe0bf82a0 // st1w { za0v.s[x12] }, p0/Z, [x21, XZR, LSL #2]\n"
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe0b082a4 // st1w { za1v.s[x12] }, p0/Z, [x21, x16, LSL #2]\n"
- "ldr x9, [x11, #0x0]\n"
- ".inst 0x25356140 // psel p0.b, p8.b/Z, p10.b[w13, #2]\n"
- ".inst 0xe0162122 // ld1b { za0h.b[x13, #2] }, p0/Z, [x9, x22]\n"
- "ldr x26, [x11, x16, LSL #0x3]\n"
+ ".inst 0x25306d20 // dup p0.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0xe0bf8360 // st1w { za0v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+ ".inst 0x25306d20 // dup p0.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0x25356141 // dup p1.b, p8.b/Z, p10.b[w13, #2]\n"
+ ".inst 0xe0b08364 // st1w { za1v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
+ ".inst 0x253d6140 // dup p0.b, p8.b/Z, p10.b[w13, #3]\n"
+ "addvl x27, x27, #2\n"
+ "ldr x23, [x25, #0x0]\n"
+ ".inst 0xe01c26e2 // ld1b { za0h.b[x13, #2] }, p1/Z, [x23, x28]\n"
+ "ldr x22, [x25, x16, LSL #0x3]\n"
+ ".inst 0xe01c22c3 // ld1b { za0h.b[x13, #3] }, p0/Z, [x22, x28]\n"
+ "add x25, x25, #0x8\n"
+ "add x13, x13, #0x4\n"
"add x12, x12, #0x1\n"
- ".inst 0x253d6140 // psel p0.b, p8.b/Z, p10.b[w13, #3]\n"
- ".inst 0xe0162343 // ld1b { za0h.b[x13, #3] }, p0/Z, [x26, x22]\n"
"cmp x12, x16\n"
- "add x11, x11, #0x8\n"
- "addvl x21, x21, #2\n"
- "add x13, x13, #0x4\n"
"blt 9b\n"
- "whilelt p9.b, x14, %x[width]\n"
- "whilelt p8.b, x14, %x[width]\n"
- "mov x20, #0x0\n"
+ "whilelt p9.b, x26, %x[width]\n"
+ "whilelt p8.b, x26, %x[width]\n"
+ "mov x19, #0x0\n"
"mov x12, #0x0\n"
"10:" // K loop: Tails: Even: Second
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe0bf82a8 // st1w { za2v.s[x12] }, p0/Z, [x21, XZR, LSL #2]\n"
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe0b082ac // st1w { za3v.s[x12] }, p0/Z, [x21, x16, LSL #2]\n"
+ ".inst 0x25306d20 // dup p0.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0xe0bf8368 // st1w { za2v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+ ".inst 0x25306d20 // dup p0.s, p11.s/Z, p9.s[w12]\n"
+ "add x19, x19, #0x4\n"
+ ".inst 0xe0b0836c // st1w { za3v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
+ "addvl x27, x27, #2\n"
"add x12, x12, #0x1\n"
- "cmp x12, x17\n"
- "addvl x21, x21, #2\n"
- "add x20, x20, #0x4\n"
+ "cmp x12, x10\n"
"blt 10b\n"
- "whilelt p9.b, x14, %x[width]\n"
+ "whilelt p9.b, x26, %x[width]\n"
"b 13f\n"
"11:" // K loop: Tails: Odd
"mov x12, #0x0\n"
"12:" // K loop: Tails: Odd: Loop
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe0bf82a0 // st1w { za0v.s[x12] }, p0/Z, [x21, XZR, LSL #2]\n"
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe0b082a4 // st1w { za1v.s[x12] }, p0/Z, [x21, x16, LSL #2]\n"
+ ".inst 0x25306d20 // dup p0.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0xe0bf8360 // st1w { za0v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+ ".inst 0x25306d20 // dup p0.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0xe0b08364 // st1w { za1v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
+ "addvl x27, x27, #2\n"
"add x12, x12, #0x1\n"
- "cmp x12, x17\n"
- "addvl x21, x21, #2\n"
+ "cmp x12, x10\n"
"blt 12b\n"
"13:" // K loop: End
- "mov %x[out], x21\n"
+ "mov %x[out], x27\n"
".inst 0xd503467f // SMSTOP\n"
: [out] "+&r" (out)
: [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p8", "p9", "p10", "p11", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_block4_u8_u8_summing.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_block4_u8_u8_summing.hpp
index e7571f7da7..5a71613feb 100644
--- a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_block4_u8_u8_summing.hpp
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_block4_u8_u8_summing.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#if defined(__ARM_FEATURE_SVE)
@@ -32,321 +32,321 @@ void interleave_block<2, 4, VLType::SME, true>(
{
__asm__ __volatile__(
".inst 0xd503477f // SMSTART ZA\n"
- "cntb x21\n"
- "mov x23, %x[width]\n"
"mov z20.b, #0x1\n"
- "incb x23\n"
- "mov x20, %x[width]\n"
"mov z19.s, #0x0\n"
+ "cntb x20\n"
"mov z18.s, #0x0\n"
- "sub x17, x21, #0x1\n"
"cntw x16\n"
- "sub x23, x23, #0x1\n"
- "ands x17, x20, x17\n"
- "udiv x23, x23, x21\n" // n_passes = ceildiv(width, VL<T>)
- "csel x17, x17, x21, NE\n"
- "lsl x22, %x[height], #0x1\n" // height * 2
- "lsl x21, x16, #0x1\n"
- "sub x20, x23, #0x1\n"
- "add x17, x17, #0x3\n"
- "whilelt p9.b, XZR, x22\n"
- "whilelt p8.b, x21, x22\n"
- "mov x15, #0x0\n"
- "cntw x14, ALL, MUL #2\n"
- "cntw x11, ALL, MUL #3\n"
- "ptrue p4.b\n"
- "lsr x20, x20, #0x1\n" // n_loops = (n_passes - 1) / 2
- "and x10, x23, #0x1\n" // odd_tail = bool(n_passes & 0x1)
- "lsr x17, x17, #0x2\n"
+ "cntw x15, ALL, MUL #2\n"
+ "cntw x14, ALL, MUL #3\n"
+ "ptrue p2.b\n"
+ "mov x19, %x[width]\n"
+ "incb x19\n"
+ "sub x19, x19, #0x1\n"
+ "udiv x19, x19, x20\n" // n_passes = ceildiv(width, VL<T>)
+ "sub x13, x19, #0x1\n"
+ "lsr x13, x13, #0x1\n" // n_loops = (n_passes - 1) / 2
+ "and x11, x19, #0x1\n" // odd_tail = bool(n_passes & 0x1)
+ "mov x19, %x[width]\n"
+ "sub x10, x20, #0x1\n"
+ "ands x10, x19, x10\n"
+ "csel x10, x10, x20, NE\n"
+ "add x10, x10, #0x3\n"
+ "lsr x10, x10, #0x2\n"
"sub x9, x16, #0x2\n"
"ptrue p11.s\n"
+ "lsl x20, %x[height], #0x1\n" // height * 2
+ "lsl x19, x16, #0x1\n"
+ "whilelt p9.b, XZR, x20\n"
+ "whilelt p8.b, x19, x20\n"
"zip1 p10.b, p9.b, p8.b\n"
"mov x28, %x[row_offset]\n"
"mov x27, %x[out]\n"
- "whilelt p9.b, x15, %x[width]\n"
- "whilelt p8.b, x15, %x[width]\n"
+ "mov x26, #0x0\n"
+ "whilelt p9.b, x26, %x[width]\n"
+ "whilelt p8.b, x26, %x[width]\n"
"cbnz %x[first], 1f\n"
"addvl x27, x27, #-2\n"
- "ld1w { z19.s }, p4/Z, [x27]\n"
- "ld1w { z18.s }, p4/Z, [x27, #1, MUL VL]\n"
+ "ld1w { z19.s }, p2/Z, [x27]\n"
+ "ld1w { z18.s }, p2/Z, [x27, #1, MUL VL]\n"
"1:" // K loop: Load row sums: End
- "mov x26, %x[in]\n"
- "add x25, %x[in], x16, LSL #3\n"
- "ldr x24, [x26, #0x0]\n"
+ "mov x25, %x[in]\n"
+ "add x24, %x[in], x16, LSL #3\n"
"ldr x23, [x25, #0x0]\n"
- "mov x12, #0x0\n"
- "ldr x22, [x26, #0x8]\n"
- "add x26, x26, #0x10\n"
+ "ldr x22, [x24, #0x0]\n"
"ldr x21, [x25, #0x8]\n"
+ "ldr x20, [x24, #0x8]\n"
"add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
+ "mov x12, #0x0\n"
"cbz x9, 3f\n"
"2:" // K loop: Charge: Loop
- ".inst 0x25246140 // psel p0.b, p8.b/Z, p10.b[w12]\n"
- ".inst 0xe01c0300 // ld1b { za0h.b[x12] }, p0/Z, [x24, x28]\n"
- ".inst 0x252c6140 // psel p0.b, p8.b/Z, p10.b[w12, #1]\n"
- "ldr x24, [x26, #0x0]\n"
- ".inst 0xe01c02e1 // ld1b { za0h.b[x12, #1] }, p0/Z, [x23, x28]\n"
- ".inst 0x25646141 // psel p1.b, p8.b/Z, p10.b[w12, #4]\n"
- ".inst 0x256c6140 // psel p0.b, p8.b/Z, p10.b[w12, #5]\n"
+ ".inst 0x25246140 // dup p0.b, p8.b/Z, p10.b[w12]\n"
+ ".inst 0xe01c02e0 // ld1b { za0h.b[x12] }, p0/Z, [x23, x28]\n"
+ ".inst 0x252c6140 // dup p0.b, p8.b/Z, p10.b[w12, #1]\n"
+ ".inst 0x25646141 // dup p1.b, p8.b/Z, p10.b[w12, #4]\n"
+ ".inst 0xe01c02c1 // ld1b { za0h.b[x12, #1] }, p0/Z, [x22, x28]\n"
+ ".inst 0x256c6140 // dup p0.b, p8.b/Z, p10.b[w12, #5]\n"
"ldr x23, [x25, #0x0]\n"
- ".inst 0xe01c06c4 // ld1b { za0h.b[x12, #4] }, p1/Z, [x22, x28]\n"
- "ldr x22, [x26, #0x8]\n"
- "add x26, x26, #0x10\n"
- ".inst 0xe01c02a5 // ld1b { za0h.b[x12, #5] }, p0/Z, [x21, x28]\n"
- "add x12, x12, #0x8\n"
- "cmp x12, x9, LSL #2\n"
+ ".inst 0xe01c06a4 // ld1b { za0h.b[x12, #4] }, p1/Z, [x21, x28]\n"
+ "ldr x22, [x24, #0x0]\n"
"ldr x21, [x25, #0x8]\n"
+ ".inst 0xe01c0285 // ld1b { za0h.b[x12, #5] }, p0/Z, [x20, x28]\n"
+ "ldr x20, [x24, #0x8]\n"
"add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
+ "add x12, x12, #0x8\n"
+ "cmp x12, x9, LSL #2\n"
"blt 2b\n"
"3:" // K loop: Charge: End
- ".inst 0x25246140 // psel p0.b, p8.b/Z, p10.b[w12]\n"
- ".inst 0xe01c0300 // ld1b { za0h.b[x12] }, p0/Z, [x24, x28]\n"
- ".inst 0x252c6140 // psel p0.b, p8.b/Z, p10.b[w12, #1]\n"
- ".inst 0xe01c02e1 // ld1b { za0h.b[x12, #1] }, p0/Z, [x23, x28]\n"
- ".inst 0x25646141 // psel p1.b, p8.b/Z, p10.b[w12, #4]\n"
- ".inst 0x256c6140 // psel p0.b, p8.b/Z, p10.b[w12, #5]\n"
- ".inst 0xe01c06c4 // ld1b { za0h.b[x12, #4] }, p1/Z, [x22, x28]\n"
- "mov x26, %x[in]\n"
- "add x25, %x[in], x16, LSL #3\n"
- "ldr x24, [x26, #0x0]\n"
- ".inst 0xe01c02a5 // ld1b { za0h.b[x12, #5] }, p0/Z, [x21, x28]\n"
+ ".inst 0x25246140 // dup p0.b, p8.b/Z, p10.b[w12]\n"
+ ".inst 0xe01c02e0 // ld1b { za0h.b[x12] }, p0/Z, [x23, x28]\n"
+ ".inst 0x252c6140 // dup p0.b, p8.b/Z, p10.b[w12, #1]\n"
+ ".inst 0x25646141 // dup p1.b, p8.b/Z, p10.b[w12, #4]\n"
+ ".inst 0xe01c02c1 // ld1b { za0h.b[x12, #1] }, p0/Z, [x22, x28]\n"
+ ".inst 0x256c6140 // dup p0.b, p8.b/Z, p10.b[w12, #5]\n"
+ "mov x25, %x[in]\n"
+ ".inst 0xe01c06a4 // ld1b { za0h.b[x12, #4] }, p1/Z, [x21, x28]\n"
+ "add x24, %x[in], x16, LSL #3\n"
"ldr x23, [x25, #0x0]\n"
- "incb x28\n"
- "incb x15\n"
- "ldr x22, [x26, #0x8]\n"
- "add x26, x26, #0x10\n"
+ ".inst 0xe01c0285 // ld1b { za0h.b[x12, #5] }, p0/Z, [x20, x28]\n"
+ "ldr x22, [x24, #0x0]\n"
"ldr x21, [x25, #0x8]\n"
+ "ldr x20, [x24, #0x8]\n"
"add x25, x25, #0x10\n"
- "cbz x20, 9f\n"
- "mov x20, x20\n"
+ "add x24, x24, #0x10\n"
+ "incb x28\n"
+ "incb x26\n"
+ "cbz x13, 9f\n"
+ "mov x19, x13\n"
"4:" // K loop: Main loop
- "whilelt p8.b, x15, %x[width]\n"
+ "whilelt p8.b, x26, %x[width]\n"
"mov x13, #0x0\n"
"mov x12, #0x0\n"
"cbz x9, 6f\n"
"5:" // K loop: Main loop: First: Loop
- ".inst 0x25356140 // psel p0.b, p8.b/Z, p10.b[w13, #2]\n"
- ".inst 0xe01c2302 // ld1b { za0h.b[x13, #2] }, p0/Z, [x24, x28]\n"
- ".inst 0x253d6140 // psel p0.b, p8.b/Z, p10.b[w13, #3]\n"
- "ldr x24, [x26, #0x0]\n"
- ".inst 0xe01c22e3 // ld1b { za0h.b[x13, #3] }, p0/Z, [x23, x28]\n"
- ".inst 0x25756140 // psel p0.b, p8.b/Z, p10.b[w13, #6]\n"
- ".inst 0x257d6142 // psel p2.b, p8.b/Z, p10.b[w13, #7]\n"
+ ".inst 0x25356140 // dup p0.b, p8.b/Z, p10.b[w13, #2]\n"
+ ".inst 0xe01c22e2 // ld1b { za0h.b[x13, #2] }, p0/Z, [x23, x28]\n"
+ ".inst 0x253d6140 // dup p0.b, p8.b/Z, p10.b[w13, #3]\n"
+ ".inst 0x25756141 // dup p1.b, p8.b/Z, p10.b[w13, #6]\n"
+ ".inst 0xe01c22c3 // ld1b { za0h.b[x13, #3] }, p0/Z, [x22, x28]\n"
+ ".inst 0x257d6140 // dup p0.b, p8.b/Z, p10.b[w13, #7]\n"
"ldr x23, [x25, #0x0]\n"
- ".inst 0xe01c22c6 // ld1b { za0h.b[x13, #6] }, p0/Z, [x22, x28]\n"
- ".inst 0x25306d21 // psel p1.s, p11.s/Z, p9.s[w12]\n"
- "ldr x22, [x26, #0x8]\n"
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe01c2aa7 // ld1b { za0h.b[x13, #7] }, p2/Z, [x21, x28]\n"
+ ".inst 0xe01c26a6 // ld1b { za0h.b[x13, #6] }, p1/Z, [x21, x28]\n"
+ "ldr x22, [x24, #0x0]\n"
"ldr x21, [x25, #0x8]\n"
- ".inst 0xe0bf8760 // st1w { za0v.s[x12] }, p1/Z, [x27, XZR, LSL #2]\n"
- ".inst 0xc0829011 // mova z17.s, p4/M, za0v.s[x12]\n"
- ".inst 0xc0829090 // mova z16.s, p4/M, za1v.s[x12]\n"
+ ".inst 0xe01c2287 // ld1b { za0h.b[x13, #7] }, p0/Z, [x20, x28]\n"
+ "ldr x20, [x24, #0x8]\n"
+ "add x25, x25, #0x10\n"
+ ".inst 0xc0828811 // mova z17.s, p2/M, za0v.s[x12]\n"
+ ".inst 0xc0828890 // mova z16.s, p2/M, za1v.s[x12]\n"
+ "add x24, x24, #0x10\n"
+ ".inst 0x25306d20 // dup p0.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0xe0bf8360 // st1w { za0v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+ ".inst 0x25306d20 // dup p0.s, p11.s/Z, p9.s[w12]\n"
"udot z19.s, z17.b, z20.b\n"
- ".inst 0xe0b08364 // st1w { za1v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
- ".inst 0x25706d20 // psel p0.s, p11.s/Z, p9.s[w12, #1]\n"
+ ".inst 0xc0828831 // mova z17.s, p2/M, za0v.s[x12, #1]\n"
"udot z18.s, z16.b, z20.b\n"
- ".inst 0xe0ae8361 // st1w { za0v.s[x12, #1] }, p0/Z, [x27, x14, LSL #2]\n"
- ".inst 0x25706d20 // psel p0.s, p11.s/Z, p9.s[w12, #1]\n"
- ".inst 0xc0829031 // mova z17.s, p4/M, za0v.s[x12, #1]\n"
- ".inst 0xe0ab8365 // st1w { za1v.s[x12, #1] }, p0/Z, [x27, x11, LSL #2]\n"
- ".inst 0xc08290b0 // mova z16.s, p4/M, za1v.s[x12, #1]\n"
- "add x12, x12, #0x2\n"
- "cmp x12, x9\n"
- "add x26, x26, #0x10\n"
- "add x25, x25, #0x10\n"
+ ".inst 0xc08288b0 // mova z16.s, p2/M, za1v.s[x12, #1]\n"
+ ".inst 0xe0b08364 // st1w { za1v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
+ ".inst 0x25706d21 // dup p1.s, p11.s/Z, p9.s[w12, #1]\n"
+ ".inst 0x25706d20 // dup p0.s, p11.s/Z, p9.s[w12, #1]\n"
+ "add x13, x13, #0x8\n"
+ ".inst 0xe0af8761 // st1w { za0v.s[x12, #1] }, p1/Z, [x27, x15, LSL #2]\n"
"udot z19.s, z17.b, z20.b\n"
"udot z18.s, z16.b, z20.b\n"
+ ".inst 0xe0ae8365 // st1w { za1v.s[x12, #1] }, p0/Z, [x27, x14, LSL #2]\n"
"addvl x27, x27, #4\n"
- "add x13, x13, #0x8\n"
+ "add x12, x12, #0x2\n"
+ "cmp x12, x9\n"
"blt 5b\n"
"6:" // K loop: Main loop: First: Tail
- ".inst 0x25356140 // psel p0.b, p8.b/Z, p10.b[w13, #2]\n"
- ".inst 0xe01c2302 // ld1b { za0h.b[x13, #2] }, p0/Z, [x24, x28]\n"
- ".inst 0x253d6140 // psel p0.b, p8.b/Z, p10.b[w13, #3]\n"
- ".inst 0xe01c22e3 // ld1b { za0h.b[x13, #3] }, p0/Z, [x23, x28]\n"
- ".inst 0x25756141 // psel p1.b, p8.b/Z, p10.b[w13, #6]\n"
- ".inst 0x257d6140 // psel p0.b, p8.b/Z, p10.b[w13, #7]\n"
- ".inst 0xe01c26c6 // ld1b { za0h.b[x13, #6] }, p1/Z, [x22, x28]\n"
- "mov x26, %x[in]\n"
- "add x25, %x[in], x16, LSL #3\n"
- "ldr x24, [x26, #0x0]\n"
- ".inst 0xe01c22a7 // ld1b { za0h.b[x13, #7] }, p0/Z, [x21, x28]\n"
- ".inst 0xc0829011 // mova z17.s, p4/M, za0v.s[x12]\n"
- ".inst 0x25306d23 // psel p3.s, p11.s/Z, p9.s[w12]\n"
- "udot z19.s, z17.b, z20.b\n"
- ".inst 0xc0829090 // mova z16.s, p4/M, za1v.s[x12]\n"
- "udot z18.s, z16.b, z20.b\n"
+ "mov x25, %x[in]\n"
+ "add x24, %x[in], x16, LSL #3\n"
+ ".inst 0x25356140 // dup p0.b, p8.b/Z, p10.b[w13, #2]\n"
+ ".inst 0xe01c22e2 // ld1b { za0h.b[x13, #2] }, p0/Z, [x23, x28]\n"
+ ".inst 0x253d6140 // dup p0.b, p8.b/Z, p10.b[w13, #3]\n"
+ ".inst 0x25756141 // dup p1.b, p8.b/Z, p10.b[w13, #6]\n"
+ ".inst 0xe01c22c3 // ld1b { za0h.b[x13, #3] }, p0/Z, [x22, x28]\n"
+ ".inst 0x257d6140 // dup p0.b, p8.b/Z, p10.b[w13, #7]\n"
"ldr x23, [x25, #0x0]\n"
- ".inst 0x25306d22 // psel p2.s, p11.s/Z, p9.s[w12]\n"
- "ldr x22, [x26, #0x8]\n"
- ".inst 0x25706d21 // psel p1.s, p11.s/Z, p9.s[w12, #1]\n"
- ".inst 0xc0829031 // mova z17.s, p4/M, za0v.s[x12, #1]\n"
- ".inst 0x25706d20 // psel p0.s, p11.s/Z, p9.s[w12, #1]\n"
+ ".inst 0xe01c26a6 // ld1b { za0h.b[x13, #6] }, p1/Z, [x21, x28]\n"
+ "ldr x22, [x24, #0x0]\n"
"ldr x21, [x25, #0x8]\n"
- ".inst 0xe0bf8f60 // st1w { za0v.s[x12] }, p3/Z, [x27, XZR, LSL #2]\n"
- ".inst 0xc08290b0 // mova z16.s, p4/M, za1v.s[x12, #1]\n"
- "whilelt p9.b, x15, %x[width]\n"
- ".inst 0xe0b08b64 // st1w { za1v.s[x12] }, p2/Z, [x27, x16, LSL #2]\n"
- "incb x15\n"
- "add x26, x26, #0x10\n"
- "udot z19.s, z17.b, z20.b\n"
- ".inst 0xe0ae8761 // st1w { za0v.s[x12, #1] }, p1/Z, [x27, x14, LSL #2]\n"
+ ".inst 0xe01c2287 // ld1b { za0h.b[x13, #7] }, p0/Z, [x20, x28]\n"
+ "ldr x20, [x24, #0x8]\n"
"add x25, x25, #0x10\n"
+ ".inst 0xc0828811 // mova z17.s, p2/M, za0v.s[x12]\n"
+ ".inst 0xc0828890 // mova z16.s, p2/M, za1v.s[x12]\n"
+ "add x24, x24, #0x10\n"
+ ".inst 0x25306d20 // dup p0.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0xe0bf8360 // st1w { za0v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+ ".inst 0x25306d20 // dup p0.s, p11.s/Z, p9.s[w12]\n"
+ "udot z19.s, z17.b, z20.b\n"
+ ".inst 0xc0828831 // mova z17.s, p2/M, za0v.s[x12, #1]\n"
+ "udot z18.s, z16.b, z20.b\n"
+ ".inst 0xc08288b0 // mova z16.s, p2/M, za1v.s[x12, #1]\n"
+ ".inst 0xe0b08364 // st1w { za1v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
+ ".inst 0x25706d21 // dup p1.s, p11.s/Z, p9.s[w12, #1]\n"
+ ".inst 0x25706d20 // dup p0.s, p11.s/Z, p9.s[w12, #1]\n"
+ "whilelt p9.b, x26, %x[width]\n"
+ ".inst 0xe0af8761 // st1w { za0v.s[x12, #1] }, p1/Z, [x27, x15, LSL #2]\n"
+ "udot z19.s, z17.b, z20.b\n"
+ "incb x26\n"
"udot z18.s, z16.b, z20.b\n"
"incb x28\n"
- ".inst 0xe0ab8365 // st1w { za1v.s[x12, #1] }, p0/Z, [x27, x11, LSL #2]\n"
+ ".inst 0xe0ae8365 // st1w { za1v.s[x12, #1] }, p0/Z, [x27, x14, LSL #2]\n"
"addvl x27, x27, #4\n"
- "whilelt p8.b, x15, %x[width]\n"
+ "whilelt p8.b, x26, %x[width]\n"
"mov x13, #0x0\n"
"mov x12, #0x0\n"
"cbz x9, 8f\n"
"7:" // K loop: Main loop: Second: Loop
- ".inst 0x25256140 // psel p0.b, p8.b/Z, p10.b[w13]\n"
- ".inst 0xe01c2300 // ld1b { za0h.b[x13] }, p0/Z, [x24, x28]\n"
- ".inst 0x252d6140 // psel p0.b, p8.b/Z, p10.b[w13, #1]\n"
- "ldr x24, [x26, #0x0]\n"
- ".inst 0xe01c22e1 // ld1b { za0h.b[x13, #1] }, p0/Z, [x23, x28]\n"
- ".inst 0x25656140 // psel p0.b, p8.b/Z, p10.b[w13, #4]\n"
- ".inst 0x256d6142 // psel p2.b, p8.b/Z, p10.b[w13, #5]\n"
+ ".inst 0x25256140 // dup p0.b, p8.b/Z, p10.b[w13]\n"
+ ".inst 0xe01c22e0 // ld1b { za0h.b[x13] }, p0/Z, [x23, x28]\n"
+ ".inst 0x252d6140 // dup p0.b, p8.b/Z, p10.b[w13, #1]\n"
+ ".inst 0x25656141 // dup p1.b, p8.b/Z, p10.b[w13, #4]\n"
+ ".inst 0xe01c22c1 // ld1b { za0h.b[x13, #1] }, p0/Z, [x22, x28]\n"
+ ".inst 0x256d6140 // dup p0.b, p8.b/Z, p10.b[w13, #5]\n"
"ldr x23, [x25, #0x0]\n"
- ".inst 0xe01c22c4 // ld1b { za0h.b[x13, #4] }, p0/Z, [x22, x28]\n"
- ".inst 0x25306d21 // psel p1.s, p11.s/Z, p9.s[w12]\n"
- "ldr x22, [x26, #0x8]\n"
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe01c2aa5 // ld1b { za0h.b[x13, #5] }, p2/Z, [x21, x28]\n"
+ ".inst 0xe01c26a4 // ld1b { za0h.b[x13, #4] }, p1/Z, [x21, x28]\n"
+ "ldr x22, [x24, #0x0]\n"
"ldr x21, [x25, #0x8]\n"
- ".inst 0xe0bf8768 // st1w { za2v.s[x12] }, p1/Z, [x27, XZR, LSL #2]\n"
- ".inst 0xc0829111 // mova z17.s, p4/M, za2v.s[x12]\n"
- ".inst 0xc0829190 // mova z16.s, p4/M, za3v.s[x12]\n"
+ ".inst 0xe01c2285 // ld1b { za0h.b[x13, #5] }, p0/Z, [x20, x28]\n"
+ "ldr x20, [x24, #0x8]\n"
+ "add x25, x25, #0x10\n"
+ ".inst 0xc0828911 // mova z17.s, p2/M, za2v.s[x12]\n"
+ ".inst 0xc0828990 // mova z16.s, p2/M, za3v.s[x12]\n"
+ "add x24, x24, #0x10\n"
+ ".inst 0x25306d20 // dup p0.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0xe0bf8368 // st1w { za2v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+ ".inst 0x25306d20 // dup p0.s, p11.s/Z, p9.s[w12]\n"
"udot z19.s, z17.b, z20.b\n"
- ".inst 0xe0b0836c // st1w { za3v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
- ".inst 0x25706d20 // psel p0.s, p11.s/Z, p9.s[w12, #1]\n"
+ ".inst 0xc0828931 // mova z17.s, p2/M, za2v.s[x12, #1]\n"
"udot z18.s, z16.b, z20.b\n"
- ".inst 0xe0ae8369 // st1w { za2v.s[x12, #1] }, p0/Z, [x27, x14, LSL #2]\n"
- ".inst 0x25706d20 // psel p0.s, p11.s/Z, p9.s[w12, #1]\n"
- ".inst 0xc0829131 // mova z17.s, p4/M, za2v.s[x12, #1]\n"
- ".inst 0xe0ab836d // st1w { za3v.s[x12, #1] }, p0/Z, [x27, x11, LSL #2]\n"
- ".inst 0xc08291b0 // mova z16.s, p4/M, za3v.s[x12, #1]\n"
- "add x12, x12, #0x2\n"
- "cmp x12, x9\n"
- "add x26, x26, #0x10\n"
- "add x25, x25, #0x10\n"
+ ".inst 0xc08289b0 // mova z16.s, p2/M, za3v.s[x12, #1]\n"
+ ".inst 0xe0b0836c // st1w { za3v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
+ ".inst 0x25706d21 // dup p1.s, p11.s/Z, p9.s[w12, #1]\n"
+ ".inst 0x25706d20 // dup p0.s, p11.s/Z, p9.s[w12, #1]\n"
+ "add x13, x13, #0x8\n"
+ ".inst 0xe0af8769 // st1w { za2v.s[x12, #1] }, p1/Z, [x27, x15, LSL #2]\n"
"udot z19.s, z17.b, z20.b\n"
"udot z18.s, z16.b, z20.b\n"
+ ".inst 0xe0ae836d // st1w { za3v.s[x12, #1] }, p0/Z, [x27, x14, LSL #2]\n"
"addvl x27, x27, #4\n"
- "add x13, x13, #0x8\n"
+ "add x12, x12, #0x2\n"
+ "cmp x12, x9\n"
"blt 7b\n"
"8:" // K loop: Main loop: Second: Tail
- ".inst 0x25256140 // psel p0.b, p8.b/Z, p10.b[w13]\n"
- ".inst 0xe01c2300 // ld1b { za0h.b[x13] }, p0/Z, [x24, x28]\n"
- ".inst 0x252d6140 // psel p0.b, p8.b/Z, p10.b[w13, #1]\n"
- ".inst 0xe01c22e1 // ld1b { za0h.b[x13, #1] }, p0/Z, [x23, x28]\n"
- ".inst 0x25656141 // psel p1.b, p8.b/Z, p10.b[w13, #4]\n"
- ".inst 0x256d6140 // psel p0.b, p8.b/Z, p10.b[w13, #5]\n"
- ".inst 0xe01c26c4 // ld1b { za0h.b[x13, #4] }, p1/Z, [x22, x28]\n"
- "mov x26, %x[in]\n"
- "add x25, %x[in], x16, LSL #3\n"
- "ldr x24, [x26, #0x0]\n"
- ".inst 0xe01c22a5 // ld1b { za0h.b[x13, #5] }, p0/Z, [x21, x28]\n"
- ".inst 0xc0829111 // mova z17.s, p4/M, za2v.s[x12]\n"
- ".inst 0x25306d23 // psel p3.s, p11.s/Z, p9.s[w12]\n"
- "udot z19.s, z17.b, z20.b\n"
- ".inst 0xc0829190 // mova z16.s, p4/M, za3v.s[x12]\n"
- "udot z18.s, z16.b, z20.b\n"
+ "mov x25, %x[in]\n"
+ "add x24, %x[in], x16, LSL #3\n"
+ ".inst 0x25256140 // dup p0.b, p8.b/Z, p10.b[w13]\n"
+ ".inst 0xe01c22e0 // ld1b { za0h.b[x13] }, p0/Z, [x23, x28]\n"
+ ".inst 0x252d6140 // dup p0.b, p8.b/Z, p10.b[w13, #1]\n"
+ ".inst 0x25656141 // dup p1.b, p8.b/Z, p10.b[w13, #4]\n"
+ ".inst 0xe01c22c1 // ld1b { za0h.b[x13, #1] }, p0/Z, [x22, x28]\n"
+ ".inst 0x256d6140 // dup p0.b, p8.b/Z, p10.b[w13, #5]\n"
"ldr x23, [x25, #0x0]\n"
- ".inst 0x25306d22 // psel p2.s, p11.s/Z, p9.s[w12]\n"
- "ldr x22, [x26, #0x8]\n"
- ".inst 0x25706d21 // psel p1.s, p11.s/Z, p9.s[w12, #1]\n"
- ".inst 0xc0829131 // mova z17.s, p4/M, za2v.s[x12, #1]\n"
- ".inst 0x25706d20 // psel p0.s, p11.s/Z, p9.s[w12, #1]\n"
+ ".inst 0xe01c26a4 // ld1b { za0h.b[x13, #4] }, p1/Z, [x21, x28]\n"
+ "ldr x22, [x24, #0x0]\n"
"ldr x21, [x25, #0x8]\n"
- ".inst 0xe0bf8f68 // st1w { za2v.s[x12] }, p3/Z, [x27, XZR, LSL #2]\n"
- ".inst 0xc08291b0 // mova z16.s, p4/M, za3v.s[x12, #1]\n"
- "whilelt p9.b, x15, %x[width]\n"
- ".inst 0xe0b08b6c // st1w { za3v.s[x12] }, p2/Z, [x27, x16, LSL #2]\n"
- "subs x20, x20, #0x1\n"
- "add x26, x26, #0x10\n"
- "udot z19.s, z17.b, z20.b\n"
- ".inst 0xe0ae8769 // st1w { za2v.s[x12, #1] }, p1/Z, [x27, x14, LSL #2]\n"
+ ".inst 0xe01c2285 // ld1b { za0h.b[x13, #5] }, p0/Z, [x20, x28]\n"
+ "ldr x20, [x24, #0x8]\n"
"add x25, x25, #0x10\n"
+ ".inst 0xc0828911 // mova z17.s, p2/M, za2v.s[x12]\n"
+ ".inst 0xc0828990 // mova z16.s, p2/M, za3v.s[x12]\n"
+ "add x24, x24, #0x10\n"
+ ".inst 0x25306d20 // dup p0.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0xe0bf8368 // st1w { za2v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+ ".inst 0x25306d20 // dup p0.s, p11.s/Z, p9.s[w12]\n"
+ "udot z19.s, z17.b, z20.b\n"
+ ".inst 0xc0828931 // mova z17.s, p2/M, za2v.s[x12, #1]\n"
+ "udot z18.s, z16.b, z20.b\n"
+ ".inst 0xc08289b0 // mova z16.s, p2/M, za3v.s[x12, #1]\n"
+ ".inst 0xe0b0836c // st1w { za3v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
+ ".inst 0x25706d21 // dup p1.s, p11.s/Z, p9.s[w12, #1]\n"
+ ".inst 0x25706d20 // dup p0.s, p11.s/Z, p9.s[w12, #1]\n"
+ "whilelt p9.b, x26, %x[width]\n"
+ ".inst 0xe0af8769 // st1w { za2v.s[x12, #1] }, p1/Z, [x27, x15, LSL #2]\n"
+ "udot z19.s, z17.b, z20.b\n"
+ "incb x26\n"
"udot z18.s, z16.b, z20.b\n"
- "incb x15\n"
- ".inst 0xe0ab836d // st1w { za3v.s[x12, #1] }, p0/Z, [x27, x11, LSL #2]\n"
- "addvl x27, x27, #4\n"
"incb x28\n"
+ ".inst 0xe0ae836d // st1w { za3v.s[x12, #1] }, p0/Z, [x27, x14, LSL #2]\n"
+ "addvl x27, x27, #4\n"
+ "subs x19, x19, #0x1\n"
"bgt 4b\n"
"9:" // K loop: Tails
- "cbnz x10, 12f\n"
- "mov x26, %x[in]\n"
- "whilelt p8.b, x15, %x[width]\n"
+ "cbnz x11, 12f\n"
+ "mov x25, %x[in]\n"
+ "whilelt p8.b, x26, %x[width]\n"
"mov x13, #0x0\n"
"mov x12, #0x0\n"
"10:" // K loop: Tails: Even: First
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0xc0828811 // mova z17.s, p2/M, za0v.s[x12]\n"
+ ".inst 0x25306d20 // dup p0.s, p11.s/Z, p9.s[w12]\n"
".inst 0xe0bf8360 // st1w { za0v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xc0829011 // mova z17.s, p4/M, za0v.s[x12]\n"
+ ".inst 0xc0828890 // mova z16.s, p2/M, za1v.s[x12]\n"
+ ".inst 0x25306d20 // dup p0.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0x25356141 // dup p1.b, p8.b/Z, p10.b[w13, #2]\n"
".inst 0xe0b08364 // st1w { za1v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
- "ldr x24, [x26, #0x0]\n"
- ".inst 0x25356140 // psel p0.b, p8.b/Z, p10.b[w13, #2]\n"
- ".inst 0xc0829090 // mova z16.s, p4/M, za1v.s[x12]\n"
- "ldr x23, [x26, x16, LSL #0x3]\n"
- ".inst 0xe01c2302 // ld1b { za0h.b[x13, #2] }, p0/Z, [x24, x28]\n"
- "add x12, x12, #0x1\n"
- ".inst 0x253d6140 // psel p0.b, p8.b/Z, p10.b[w13, #3]\n"
- "cmp x12, x16\n"
+ ".inst 0x253d6140 // dup p0.b, p8.b/Z, p10.b[w13, #3]\n"
"udot z19.s, z17.b, z20.b\n"
"udot z18.s, z16.b, z20.b\n"
- ".inst 0xe01c22e3 // ld1b { za0h.b[x13, #3] }, p0/Z, [x23, x28]\n"
- "add x26, x26, #0x8\n"
+ "ldr x23, [x25, #0x0]\n"
+ ".inst 0xe01c26e2 // ld1b { za0h.b[x13, #2] }, p1/Z, [x23, x28]\n"
+ "ldr x22, [x25, x16, LSL #0x3]\n"
"addvl x27, x27, #2\n"
+ ".inst 0xe01c22c3 // ld1b { za0h.b[x13, #3] }, p0/Z, [x22, x28]\n"
+ "add x25, x25, #0x8\n"
"add x13, x13, #0x4\n"
+ "add x12, x12, #0x1\n"
+ "cmp x12, x16\n"
"blt 10b\n"
- "whilelt p9.b, x15, %x[width]\n"
- "whilelt p8.b, x15, %x[width]\n"
- "mov x20, #0x0\n"
+ "whilelt p9.b, x26, %x[width]\n"
+ "whilelt p8.b, x26, %x[width]\n"
+ "mov x19, #0x0\n"
"mov x12, #0x0\n"
"11:" // K loop: Tails: Even: Second
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0xc0828911 // mova z17.s, p2/M, za2v.s[x12]\n"
+ ".inst 0x25306d20 // dup p0.s, p11.s/Z, p9.s[w12]\n"
".inst 0xe0bf8368 // st1w { za2v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xc0829111 // mova z17.s, p4/M, za2v.s[x12]\n"
+ ".inst 0xc0828990 // mova z16.s, p2/M, za3v.s[x12]\n"
+ ".inst 0x25306d20 // dup p0.s, p11.s/Z, p9.s[w12]\n"
+ "add x19, x19, #0x4\n"
".inst 0xe0b0836c // st1w { za3v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
- ".inst 0xc0829190 // mova z16.s, p4/M, za3v.s[x12]\n"
- "add x12, x12, #0x1\n"
- "cmp x12, x17\n"
+ "addvl x27, x27, #2\n"
"udot z19.s, z17.b, z20.b\n"
"udot z18.s, z16.b, z20.b\n"
- "addvl x27, x27, #2\n"
- "add x20, x20, #0x4\n"
+ "add x12, x12, #0x1\n"
+ "cmp x12, x10\n"
"blt 11b\n"
- "whilelt p9.b, x15, %x[width]\n"
+ "whilelt p9.b, x26, %x[width]\n"
"b 14f\n"
"12:" // K loop: Tails: Odd
"mov x12, #0x0\n"
"13:" // K loop: Tails: Odd: Loop
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0xc0828811 // mova z17.s, p2/M, za0v.s[x12]\n"
+ ".inst 0x25306d20 // dup p0.s, p11.s/Z, p9.s[w12]\n"
".inst 0xe0bf8360 // st1w { za0v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xc0829011 // mova z17.s, p4/M, za0v.s[x12]\n"
+ ".inst 0xc0828890 // mova z16.s, p2/M, za1v.s[x12]\n"
+ ".inst 0x25306d20 // dup p0.s, p11.s/Z, p9.s[w12]\n"
".inst 0xe0b08364 // st1w { za1v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
- ".inst 0xc0829090 // mova z16.s, p4/M, za1v.s[x12]\n"
- "add x12, x12, #0x1\n"
- "cmp x12, x17\n"
+ "addvl x27, x27, #2\n"
"udot z19.s, z17.b, z20.b\n"
"udot z18.s, z16.b, z20.b\n"
- "addvl x27, x27, #2\n"
+ "add x12, x12, #0x1\n"
+ "cmp x12, x10\n"
"blt 13b\n"
"14:" // K loop: End
- "st1w { z19.s }, p4, [x27]\n"
- "st1w { z18.s }, p4, [x27, #1, MUL VL]\n"
+ "st1w { z19.s }, p2, [x27]\n"
+ "st1w { z18.s }, p2, [x27, #1, MUL VL]\n"
"addvl x27, x27, #2\n"
"mov %x[out], x27\n"
".inst 0xd503467f // SMSTOP\n"
: [out] "+&r" (out)
: [first] "r" (first), [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p8", "p9", "p10", "p11", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_fp16_fp16.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_fp16_fp16.hpp
index 522f310cc0..3ea616f007 100644
--- a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_fp16_fp16.hpp
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_fp16_fp16.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#if defined(__ARM_FEATURE_SVE)
@@ -34,66 +34,66 @@ void interleave_block<2, 1, VLType::SME, false>(
__asm__ __volatile__(
".inst 0xd503477f // SMSTART ZA\n"
- "cnth x28\n"
- "cmp %x[height], x28\n"
"cnth x27\n"
- "csel x28, %x[height], x28, LT\n"
- "mov x26, #0x0\n"
+ "cmp %x[height], x27\n"
+ "cnth x26\n"
+ "csel x27, %x[height], x27, LT\n"
+ "mov x25, #0x0\n"
"ptrue p13.s\n"
- "sub x28, x28, #0x1\n"
+ "sub x27, x27, #0x1\n"
"whilelt p12.h, XZR, %x[height]\n"
- "whilelt p11.h, x27, %x[height]\n"
- "mov x25, %x[row_offset]\n"
- "mov x24, %x[out]\n"
- "whilelt p10.h, x26, %x[width]\n"
- "whilelt p9.h, x26, %x[width]\n"
- "whilelt p8.h, x26, %x[width]\n"
+ "whilelt p11.h, x26, %x[height]\n"
+ "mov x24, %x[row_offset]\n"
+ "mov x23, %x[out]\n"
+ "whilelt p10.h, x25, %x[width]\n"
+ "whilelt p9.h, x25, %x[width]\n"
+ "whilelt p8.h, x25, %x[width]\n"
"1:" // Width loop
- "add x23, %x[in], XZR, LSL #3\n"
- "add x20, %x[in], x27, LSL #3\n"
- "ldr x22, [x23], #0x8\n"
+ "add x22, %x[in], XZR, LSL #3\n"
+ "add x19, %x[in], x26, LSL #3\n"
+ "ldr x21, [x22], #0x8\n"
"mov x12, #0x0\n"
- "ldr x21, [x20], #0x8\n"
- "cbz x28, 3f\n"
+ "ldr x20, [x19], #0x8\n"
+ "cbz x27, 3f\n"
"2:" // Loads: Loop
".inst 0x25286581 // psel p1.h, p9.h/Z, p12.h[w12]\n"
".inst 0x25286160 // psel p0.h, p8.h/Z, p11.h[w12]\n"
- ".inst 0xe05906c0 // ld1h { za0h.h[x12] }, p1/Z, [x22, x25, LSL #1]\n"
- "ldr x22, [x23], #0x8\n"
- ".inst 0xe05902a8 // ld1h { za1h.h[x12] }, p0/Z, [x21, x25, LSL #1]\n"
+ ".inst 0xe05806a0 // ld1h { za0h.h[x12] }, p1/Z, [x21, x24, LSL #1]\n"
+ "ldr x21, [x22], #0x8\n"
+ ".inst 0xe0580288 // ld1h { za1h.h[x12] }, p0/Z, [x20, x24, LSL #1]\n"
"add x12, x12, #0x2\n"
- "cmp x12, x28, LSL #1\n"
- "ldr x21, [x20], #0x8\n"
+ "cmp x12, x27, LSL #1\n"
+ "ldr x20, [x19], #0x8\n"
"blt 2b\n"
"3:" // Loads: Tail
- "sub x20, %x[width], x26\n"
+ "sub x19, %x[width], x25\n"
".inst 0x25286580 // psel p0.h, p9.h/Z, p12.h[w12]\n"
- ".inst 0xe05902c0 // ld1h { za0h.h[x12] }, p0/Z, [x22, x25, LSL #1]\n"
+ ".inst 0xe05802a0 // ld1h { za0h.h[x12] }, p0/Z, [x21, x24, LSL #1]\n"
".inst 0x25286160 // psel p0.h, p8.h/Z, p11.h[w12]\n"
- "cmp x20, x27\n"
- ".inst 0xe05902a8 // ld1h { za1h.h[x12] }, p0/Z, [x21, x25, LSL #1]\n"
+ "cmp x19, x26\n"
+ ".inst 0xe0580288 // ld1h { za1h.h[x12] }, p0/Z, [x20, x24, LSL #1]\n"
"mov x12, #0x0\n"
- "csel x20, x20, x27, LT\n"
+ "csel x19, x19, x26, LT\n"
"4:" // Stores: Loop
".inst 0x25287540 // psel p0.h, p13.h/Z, p10.h[w12]\n"
- ".inst 0xe07f8300 // st1h { za0v.h[x12] }, p0/Z, [x24, XZR, LSL #1]\n"
+ ".inst 0xe07f82e0 // st1h { za0v.h[x12] }, p0/Z, [x23, XZR, LSL #1]\n"
".inst 0x25287540 // psel p0.h, p13.h/Z, p10.h[w12]\n"
- ".inst 0xe07b8308 // st1h { za1v.h[x12] }, p0/Z, [x24, x27, LSL #1]\n"
+ ".inst 0xe07a82e8 // st1h { za1v.h[x12] }, p0/Z, [x23, x26, LSL #1]\n"
"add x12, x12, #0x1\n"
- "cmp x12, x20\n"
- "addvl x24, x24, #4\n"
+ "cmp x12, x19\n"
+ "addvl x23, x23, #4\n"
"blt 4b\n"
- "inch x26\n"
- "whilelt p10.h, x26, %x[width]\n"
- "whilelt p9.h, x26, %x[width]\n"
- "whilelt p8.h, x26, %x[width]\n"
"inch x25\n"
+ "whilelt p10.h, x25, %x[width]\n"
+ "whilelt p9.h, x25, %x[width]\n"
+ "whilelt p8.h, x25, %x[width]\n"
+ "inch x24\n"
"b.any 1b\n"
- "mov %x[out], x24\n"
+ "mov %x[out], x23\n"
".inst 0xd503467f // SMSTOP\n"
: [out] "+&r" (out)
: [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x12", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x12", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_fp32_fp32.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_fp32_fp32.hpp
index 949e003598..d7025420e9 100644
--- a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_fp32_fp32.hpp
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_fp32_fp32.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#if defined(__ARM_FEATURE_SVE)
@@ -34,256 +34,256 @@ void interleave_block<2, 1, VLType::SME, false>(
__asm__ __volatile__(
".inst 0xd503477f // SMSTART ZA\n"
- "mov x22, %x[width]\n"
- "incw x22\n"
"cntw x16\n"
- "sub x22, x22, #0x1\n"
- "udiv x22, x22, x16\n" // n_passes = ceildiv(width, VL<T>)
- "mov x21, %x[width]\n"
- "sub x15, x16, #0x1\n"
- "sub x20, x22, #0x1\n"
- "ands x15, x21, x15\n"
- "sub x14, x16, #0x2\n"
- "mov x13, #0x0\n"
- "mov x11, %x[in]\n"
- "ldr x10, [x11, #0x0]\n"
- "add x9, %x[in], x16, LSL #3\n"
- "cntw x28, ALL, MUL #2\n"
- "ldr x27, [x9, #0x0]\n"
- "cntw x26, ALL, MUL #3\n"
- "lsr x20, x20, #0x1\n" // n_loops = (n_passes - 1) / 2
- "ldr x25, [x11, #0x8]\n"
- "and x24, x22, #0x1\n" // odd_tail = bool(n_passes & 0x1)
- "csel x15, x15, x16, NE\n"
- "ldr x23, [x9, #0x8]\n"
+ "cntw x15, ALL, MUL #2\n"
+ "cntw x14, ALL, MUL #3\n"
+ "mov x19, %x[width]\n"
+ "incw x19\n"
+ "sub x19, x19, #0x1\n"
+ "udiv x19, x19, x16\n" // n_passes = ceildiv(width, VL<T>)
+ "sub x13, x19, #0x1\n"
+ "lsr x13, x13, #0x1\n" // n_loops = (n_passes - 1) / 2
+ "and x11, x19, #0x1\n" // odd_tail = bool(n_passes & 0x1)
+ "mov x19, %x[width]\n"
+ "sub x10, x16, #0x1\n"
+ "ands x10, x19, x10\n"
+ "csel x10, x10, x16, NE\n"
+ "sub x9, x16, #0x2\n"
"ptrue p13.s\n"
"whilelt p12.s, XZR, %x[height]\n"
"whilelt p11.s, x16, %x[height]\n"
- "mov x22, %x[row_offset]\n"
- "mov x21, %x[out]\n"
- "whilelt p10.s, x13, %x[width]\n"
- "whilelt p9.s, x13, %x[width]\n"
- "whilelt p8.s, x13, %x[width]\n"
- "add x11, x11, #0x10\n"
- "add x9, x9, #0x10\n"
+ "mov x28, %x[row_offset]\n"
+ "mov x27, %x[out]\n"
+ "mov x26, #0x0\n"
+ "whilelt p10.s, x26, %x[width]\n"
+ "whilelt p9.s, x26, %x[width]\n"
+ "whilelt p8.s, x26, %x[width]\n"
+ "mov x25, %x[in]\n"
+ "add x24, %x[in], x16, LSL #3\n"
+ "ldr x23, [x25, #0x0]\n"
+ "ldr x22, [x24, #0x0]\n"
+ "ldr x21, [x25, #0x8]\n"
+ "ldr x20, [x24, #0x8]\n"
+ "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
"mov x12, #0x0\n"
- "cbz x14, 2f\n"
+ "cbz x9, 2f\n"
"1:" // K loop: Charge: Loop
- ".inst 0x25306581 // psel p1.s, p9.s/Z, p12.s[w12]\n"
- ".inst 0x25306160 // psel p0.s, p8.s/Z, p11.s[w12]\n"
- ".inst 0xe0960540 // ld1w { za0h.s[x12] }, p1/Z, [x10, x22, LSL #2]\n"
- "ldr x10, [x11, #0x0]\n"
- ".inst 0xe0960364 // ld1w { za1h.s[x12] }, p0/Z, [x27, x22, LSL #2]\n"
- ".inst 0x25706581 // psel p1.s, p9.s/Z, p12.s[w12, #1]\n"
- ".inst 0x25706160 // psel p0.s, p8.s/Z, p11.s[w12, #1]\n"
- "ldr x27, [x9, #0x0]\n"
- ".inst 0xe0960721 // ld1w { za0h.s[x12, #1] }, p1/Z, [x25, x22, LSL #2]\n"
- "ldr x25, [x11, #0x8]\n"
- "add x11, x11, #0x10\n"
- ".inst 0xe09602e5 // ld1w { za1h.s[x12, #1] }, p0/Z, [x23, x22, LSL #2]\n"
+ ".inst 0x25306580 // dup p0.s, p9.s/Z, p12.s[w12]\n"
+ ".inst 0xe09c02e0 // ld1w { za0h.s[x12] }, p0/Z, [x23, x28, LSL #2]\n"
+ ".inst 0x25306160 // dup p0.s, p8.s/Z, p11.s[w12]\n"
+ ".inst 0xe09c02c4 // ld1w { za1h.s[x12] }, p0/Z, [x22, x28, LSL #2]\n"
+ ".inst 0x25706580 // dup p0.s, p9.s/Z, p12.s[w12, #1]\n"
+ ".inst 0xe09c02a1 // ld1w { za0h.s[x12, #1] }, p0/Z, [x21, x28, LSL #2]\n"
+ ".inst 0x25706160 // dup p0.s, p8.s/Z, p11.s[w12, #1]\n"
+ ".inst 0xe09c0285 // ld1w { za1h.s[x12, #1] }, p0/Z, [x20, x28, LSL #2]\n"
+ "ldr x23, [x25, #0x0]\n"
+ "ldr x22, [x24, #0x0]\n"
+ "ldr x21, [x25, #0x8]\n"
+ "ldr x20, [x24, #0x8]\n"
+ "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
"add x12, x12, #0x2\n"
- "cmp x12, x14\n"
- "ldr x23, [x9, #0x8]\n"
- "add x9, x9, #0x10\n"
+ "cmp x12, x9\n"
"blt 1b\n"
"2:" // K loop: Charge: End
- ".inst 0x25306581 // psel p1.s, p9.s/Z, p12.s[w12]\n"
- ".inst 0x25306160 // psel p0.s, p8.s/Z, p11.s[w12]\n"
- ".inst 0xe0960540 // ld1w { za0h.s[x12] }, p1/Z, [x10, x22, LSL #2]\n"
- ".inst 0xe0960364 // ld1w { za1h.s[x12] }, p0/Z, [x27, x22, LSL #2]\n"
- ".inst 0x25706581 // psel p1.s, p9.s/Z, p12.s[w12, #1]\n"
- ".inst 0x25706160 // psel p0.s, p8.s/Z, p11.s[w12, #1]\n"
- "mov x11, %x[in]\n"
- "add x9, %x[in], x16, LSL #3\n"
- ".inst 0xe0960721 // ld1w { za0h.s[x12, #1] }, p1/Z, [x25, x22, LSL #2]\n"
- "ldr x10, [x11, #0x0]\n"
- ".inst 0xe09602e5 // ld1w { za1h.s[x12, #1] }, p0/Z, [x23, x22, LSL #2]\n"
- "ldr x27, [x9, #0x0]\n"
- "incw x22\n"
- "incw x13\n"
- "ldr x25, [x11, #0x8]\n"
- "add x11, x11, #0x10\n"
- "ldr x23, [x9, #0x8]\n"
- "add x9, x9, #0x10\n"
- "cbz x20, 8f\n"
- "mov x20, x20\n"
+ ".inst 0x25306580 // dup p0.s, p9.s/Z, p12.s[w12]\n"
+ ".inst 0xe09c02e0 // ld1w { za0h.s[x12] }, p0/Z, [x23, x28, LSL #2]\n"
+ ".inst 0x25306160 // dup p0.s, p8.s/Z, p11.s[w12]\n"
+ ".inst 0xe09c02c4 // ld1w { za1h.s[x12] }, p0/Z, [x22, x28, LSL #2]\n"
+ ".inst 0x25706580 // dup p0.s, p9.s/Z, p12.s[w12, #1]\n"
+ ".inst 0xe09c02a1 // ld1w { za0h.s[x12, #1] }, p0/Z, [x21, x28, LSL #2]\n"
+ ".inst 0x25706160 // dup p0.s, p8.s/Z, p11.s[w12, #1]\n"
+ ".inst 0xe09c0285 // ld1w { za1h.s[x12, #1] }, p0/Z, [x20, x28, LSL #2]\n"
+ "mov x25, %x[in]\n"
+ "add x24, %x[in], x16, LSL #3\n"
+ "ldr x23, [x25, #0x0]\n"
+ "ldr x22, [x24, #0x0]\n"
+ "ldr x21, [x25, #0x8]\n"
+ "ldr x20, [x24, #0x8]\n"
+ "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
+ "incw x28\n"
+ "incw x26\n"
+ "cbz x13, 8f\n"
+ "mov x19, x13\n"
"3:" // K loop: Main loop
- "whilelt p9.s, x13, %x[width]\n"
- "whilelt p8.s, x13, %x[width]\n"
+ "whilelt p9.s, x26, %x[width]\n"
+ "whilelt p8.s, x26, %x[width]\n"
"mov x12, #0x0\n"
- "cbz x14, 5f\n"
+ "cbz x9, 5f\n"
"4:" // K loop: Main loop: First: Loop
- ".inst 0x25306581 // psel p1.s, p9.s/Z, p12.s[w12]\n"
- ".inst 0x25306160 // psel p0.s, p8.s/Z, p11.s[w12]\n"
- ".inst 0xe0960548 // ld1w { za2h.s[x12] }, p1/Z, [x10, x22, LSL #2]\n"
- "ldr x10, [x11, #0x0]\n"
- ".inst 0xe096036c // ld1w { za3h.s[x12] }, p0/Z, [x27, x22, LSL #2]\n"
- ".inst 0x25706580 // psel p0.s, p9.s/Z, p12.s[w12, #1]\n"
- ".inst 0x25706162 // psel p2.s, p8.s/Z, p11.s[w12, #1]\n"
- "ldr x27, [x9, #0x0]\n"
- ".inst 0x25307541 // psel p1.s, p13.s/Z, p10.s[w12]\n"
- ".inst 0xe0960329 // ld1w { za2h.s[x12, #1] }, p0/Z, [x25, x22, LSL #2]\n"
- "ldr x25, [x11, #0x8]\n"
- ".inst 0x25307540 // psel p0.s, p13.s/Z, p10.s[w12]\n"
- ".inst 0xe0960aed // ld1w { za3h.s[x12, #1] }, p2/Z, [x23, x22, LSL #2]\n"
- "ldr x23, [x9, #0x8]\n"
- ".inst 0xe0bf86a0 // st1w { za0v.s[x12] }, p1/Z, [x21, XZR, LSL #2]\n"
- ".inst 0x25707541 // psel p1.s, p13.s/Z, p10.s[w12, #1]\n"
- ".inst 0xe0b082a4 // st1w { za1v.s[x12] }, p0/Z, [x21, x16, LSL #2]\n"
- ".inst 0x25707540 // psel p0.s, p13.s/Z, p10.s[w12, #1]\n"
- "add x11, x11, #0x10\n"
- ".inst 0xe0bc86a1 // st1w { za0v.s[x12, #1] }, p1/Z, [x21, x28, LSL #2]\n"
- "add x9, x9, #0x10\n"
- ".inst 0xe0ba82a5 // st1w { za1v.s[x12, #1] }, p0/Z, [x21, x26, LSL #2]\n"
+ ".inst 0x25306580 // dup p0.s, p9.s/Z, p12.s[w12]\n"
+ ".inst 0xe09c02e8 // ld1w { za2h.s[x12] }, p0/Z, [x23, x28, LSL #2]\n"
+ ".inst 0x25306160 // dup p0.s, p8.s/Z, p11.s[w12]\n"
+ ".inst 0xe09c02cc // ld1w { za3h.s[x12] }, p0/Z, [x22, x28, LSL #2]\n"
+ ".inst 0x25706580 // dup p0.s, p9.s/Z, p12.s[w12, #1]\n"
+ ".inst 0xe09c02a9 // ld1w { za2h.s[x12, #1] }, p0/Z, [x21, x28, LSL #2]\n"
+ ".inst 0x25706160 // dup p0.s, p8.s/Z, p11.s[w12, #1]\n"
+ ".inst 0xe09c028d // ld1w { za3h.s[x12, #1] }, p0/Z, [x20, x28, LSL #2]\n"
+ "ldr x23, [x25, #0x0]\n"
+ "ldr x22, [x24, #0x0]\n"
+ "ldr x21, [x25, #0x8]\n"
+ "ldr x20, [x24, #0x8]\n"
+ "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
+ ".inst 0x25307540 // dup p0.s, p13.s/Z, p10.s[w12]\n"
+ ".inst 0xe0bf8360 // st1w { za0v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+ ".inst 0x25307540 // dup p0.s, p13.s/Z, p10.s[w12]\n"
+ ".inst 0x25707541 // dup p1.s, p13.s/Z, p10.s[w12, #1]\n"
+ ".inst 0xe0b08364 // st1w { za1v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
+ ".inst 0x25707540 // dup p0.s, p13.s/Z, p10.s[w12, #1]\n"
+ ".inst 0xe0af8761 // st1w { za0v.s[x12, #1] }, p1/Z, [x27, x15, LSL #2]\n"
+ ".inst 0xe0ae8365 // st1w { za1v.s[x12, #1] }, p0/Z, [x27, x14, LSL #2]\n"
+ "addvl x27, x27, #4\n"
"add x12, x12, #0x2\n"
- "cmp x12, x14\n"
- "addvl x21, x21, #4\n"
+ "cmp x12, x9\n"
"blt 4b\n"
"5:" // K loop: Main loop: First: Tail
- ".inst 0x25306581 // psel p1.s, p9.s/Z, p12.s[w12]\n"
- ".inst 0x25306160 // psel p0.s, p8.s/Z, p11.s[w12]\n"
- ".inst 0xe0960548 // ld1w { za2h.s[x12] }, p1/Z, [x10, x22, LSL #2]\n"
- ".inst 0xe096036c // ld1w { za3h.s[x12] }, p0/Z, [x27, x22, LSL #2]\n"
- "mov x11, %x[in]\n"
- "add x9, %x[in], x16, LSL #3\n"
- "ldr x10, [x11, #0x0]\n"
- ".inst 0x25706580 // psel p0.s, p9.s/Z, p12.s[w12, #1]\n"
- ".inst 0x25706161 // psel p1.s, p8.s/Z, p11.s[w12, #1]\n"
- ".inst 0xe0960329 // ld1w { za2h.s[x12, #1] }, p0/Z, [x25, x22, LSL #2]\n"
- "ldr x27, [x9, #0x0]\n"
- ".inst 0x25307540 // psel p0.s, p13.s/Z, p10.s[w12]\n"
- ".inst 0xe09606ed // ld1w { za3h.s[x12, #1] }, p1/Z, [x23, x22, LSL #2]\n"
- "ldr x25, [x11, #0x8]\n"
- ".inst 0x25307542 // psel p2.s, p13.s/Z, p10.s[w12]\n"
- "ldr x23, [x9, #0x8]\n"
- ".inst 0xe0bf82a0 // st1w { za0v.s[x12] }, p0/Z, [x21, XZR, LSL #2]\n"
- ".inst 0x25707541 // psel p1.s, p13.s/Z, p10.s[w12, #1]\n"
- ".inst 0x25707540 // psel p0.s, p13.s/Z, p10.s[w12, #1]\n"
- ".inst 0xe0b08aa4 // st1w { za1v.s[x12] }, p2/Z, [x21, x16, LSL #2]\n"
- "whilelt p10.s, x13, %x[width]\n"
- "incw x13\n"
- ".inst 0xe0bc86a1 // st1w { za0v.s[x12, #1] }, p1/Z, [x21, x28, LSL #2]\n"
- "add x11, x11, #0x10\n"
- "add x9, x9, #0x10\n"
- ".inst 0xe0ba82a5 // st1w { za1v.s[x12, #1] }, p0/Z, [x21, x26, LSL #2]\n"
- "addvl x21, x21, #4\n"
- "incw x22\n"
- "whilelt p9.s, x13, %x[width]\n"
- "whilelt p8.s, x13, %x[width]\n"
+ "mov x25, %x[in]\n"
+ "add x24, %x[in], x16, LSL #3\n"
+ ".inst 0x25306580 // dup p0.s, p9.s/Z, p12.s[w12]\n"
+ ".inst 0xe09c02e8 // ld1w { za2h.s[x12] }, p0/Z, [x23, x28, LSL #2]\n"
+ ".inst 0x25306160 // dup p0.s, p8.s/Z, p11.s[w12]\n"
+ ".inst 0xe09c02cc // ld1w { za3h.s[x12] }, p0/Z, [x22, x28, LSL #2]\n"
+ ".inst 0x25706580 // dup p0.s, p9.s/Z, p12.s[w12, #1]\n"
+ ".inst 0xe09c02a9 // ld1w { za2h.s[x12, #1] }, p0/Z, [x21, x28, LSL #2]\n"
+ ".inst 0x25706160 // dup p0.s, p8.s/Z, p11.s[w12, #1]\n"
+ ".inst 0xe09c028d // ld1w { za3h.s[x12, #1] }, p0/Z, [x20, x28, LSL #2]\n"
+ "ldr x23, [x25, #0x0]\n"
+ "ldr x22, [x24, #0x0]\n"
+ "ldr x21, [x25, #0x8]\n"
+ "ldr x20, [x24, #0x8]\n"
+ "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
+ ".inst 0x25307540 // dup p0.s, p13.s/Z, p10.s[w12]\n"
+ ".inst 0xe0bf8360 // st1w { za0v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+ ".inst 0x25307540 // dup p0.s, p13.s/Z, p10.s[w12]\n"
+ ".inst 0x25707541 // dup p1.s, p13.s/Z, p10.s[w12, #1]\n"
+ ".inst 0xe0b08364 // st1w { za1v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
+ ".inst 0x25707540 // dup p0.s, p13.s/Z, p10.s[w12, #1]\n"
+ "whilelt p10.s, x26, %x[width]\n"
+ ".inst 0xe0af8761 // st1w { za0v.s[x12, #1] }, p1/Z, [x27, x15, LSL #2]\n"
+ "incw x26\n"
+ "incw x28\n"
+ ".inst 0xe0ae8365 // st1w { za1v.s[x12, #1] }, p0/Z, [x27, x14, LSL #2]\n"
+ "addvl x27, x27, #4\n"
+ "whilelt p9.s, x26, %x[width]\n"
+ "whilelt p8.s, x26, %x[width]\n"
"mov x12, #0x0\n"
- "cbz x14, 7f\n"
+ "cbz x9, 7f\n"
"6:" // K loop: Main loop: Second: Loop
- ".inst 0x25306581 // psel p1.s, p9.s/Z, p12.s[w12]\n"
- ".inst 0x25306160 // psel p0.s, p8.s/Z, p11.s[w12]\n"
- ".inst 0xe0960540 // ld1w { za0h.s[x12] }, p1/Z, [x10, x22, LSL #2]\n"
- "ldr x10, [x11, #0x0]\n"
- ".inst 0xe0960364 // ld1w { za1h.s[x12] }, p0/Z, [x27, x22, LSL #2]\n"
- ".inst 0x25706580 // psel p0.s, p9.s/Z, p12.s[w12, #1]\n"
- ".inst 0x25706162 // psel p2.s, p8.s/Z, p11.s[w12, #1]\n"
- "ldr x27, [x9, #0x0]\n"
- ".inst 0x25307541 // psel p1.s, p13.s/Z, p10.s[w12]\n"
- ".inst 0xe0960321 // ld1w { za0h.s[x12, #1] }, p0/Z, [x25, x22, LSL #2]\n"
- "ldr x25, [x11, #0x8]\n"
- ".inst 0x25307540 // psel p0.s, p13.s/Z, p10.s[w12]\n"
- ".inst 0xe0960ae5 // ld1w { za1h.s[x12, #1] }, p2/Z, [x23, x22, LSL #2]\n"
- "ldr x23, [x9, #0x8]\n"
- ".inst 0xe0bf86a8 // st1w { za2v.s[x12] }, p1/Z, [x21, XZR, LSL #2]\n"
- ".inst 0x25707541 // psel p1.s, p13.s/Z, p10.s[w12, #1]\n"
- ".inst 0xe0b082ac // st1w { za3v.s[x12] }, p0/Z, [x21, x16, LSL #2]\n"
- ".inst 0x25707540 // psel p0.s, p13.s/Z, p10.s[w12, #1]\n"
- "add x11, x11, #0x10\n"
- ".inst 0xe0bc86a9 // st1w { za2v.s[x12, #1] }, p1/Z, [x21, x28, LSL #2]\n"
- "add x9, x9, #0x10\n"
- ".inst 0xe0ba82ad // st1w { za3v.s[x12, #1] }, p0/Z, [x21, x26, LSL #2]\n"
+ ".inst 0x25306580 // dup p0.s, p9.s/Z, p12.s[w12]\n"
+ ".inst 0xe09c02e0 // ld1w { za0h.s[x12] }, p0/Z, [x23, x28, LSL #2]\n"
+ ".inst 0x25306160 // dup p0.s, p8.s/Z, p11.s[w12]\n"
+ ".inst 0xe09c02c4 // ld1w { za1h.s[x12] }, p0/Z, [x22, x28, LSL #2]\n"
+ ".inst 0x25706580 // dup p0.s, p9.s/Z, p12.s[w12, #1]\n"
+ ".inst 0xe09c02a1 // ld1w { za0h.s[x12, #1] }, p0/Z, [x21, x28, LSL #2]\n"
+ ".inst 0x25706160 // dup p0.s, p8.s/Z, p11.s[w12, #1]\n"
+ ".inst 0xe09c0285 // ld1w { za1h.s[x12, #1] }, p0/Z, [x20, x28, LSL #2]\n"
+ "ldr x23, [x25, #0x0]\n"
+ "ldr x22, [x24, #0x0]\n"
+ "ldr x21, [x25, #0x8]\n"
+ "ldr x20, [x24, #0x8]\n"
+ "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
+ ".inst 0x25307540 // dup p0.s, p13.s/Z, p10.s[w12]\n"
+ ".inst 0xe0bf8368 // st1w { za2v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+ ".inst 0x25307540 // dup p0.s, p13.s/Z, p10.s[w12]\n"
+ ".inst 0x25707541 // dup p1.s, p13.s/Z, p10.s[w12, #1]\n"
+ ".inst 0xe0b0836c // st1w { za3v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
+ ".inst 0x25707540 // dup p0.s, p13.s/Z, p10.s[w12, #1]\n"
+ ".inst 0xe0af8769 // st1w { za2v.s[x12, #1] }, p1/Z, [x27, x15, LSL #2]\n"
+ ".inst 0xe0ae836d // st1w { za3v.s[x12, #1] }, p0/Z, [x27, x14, LSL #2]\n"
+ "addvl x27, x27, #4\n"
"add x12, x12, #0x2\n"
- "cmp x12, x14\n"
- "addvl x21, x21, #4\n"
+ "cmp x12, x9\n"
"blt 6b\n"
"7:" // K loop: Main loop: Second: Tail
- ".inst 0x25306581 // psel p1.s, p9.s/Z, p12.s[w12]\n"
- ".inst 0x25306160 // psel p0.s, p8.s/Z, p11.s[w12]\n"
- ".inst 0xe0960540 // ld1w { za0h.s[x12] }, p1/Z, [x10, x22, LSL #2]\n"
- ".inst 0xe0960364 // ld1w { za1h.s[x12] }, p0/Z, [x27, x22, LSL #2]\n"
- "mov x11, %x[in]\n"
- "add x9, %x[in], x16, LSL #3\n"
- "ldr x10, [x11, #0x0]\n"
- ".inst 0x25706580 // psel p0.s, p9.s/Z, p12.s[w12, #1]\n"
- ".inst 0x25706161 // psel p1.s, p8.s/Z, p11.s[w12, #1]\n"
- ".inst 0xe0960321 // ld1w { za0h.s[x12, #1] }, p0/Z, [x25, x22, LSL #2]\n"
- "ldr x27, [x9, #0x0]\n"
- ".inst 0x25307540 // psel p0.s, p13.s/Z, p10.s[w12]\n"
- ".inst 0xe09606e5 // ld1w { za1h.s[x12, #1] }, p1/Z, [x23, x22, LSL #2]\n"
- "ldr x25, [x11, #0x8]\n"
- ".inst 0x25307542 // psel p2.s, p13.s/Z, p10.s[w12]\n"
- "ldr x23, [x9, #0x8]\n"
- ".inst 0xe0bf82a8 // st1w { za2v.s[x12] }, p0/Z, [x21, XZR, LSL #2]\n"
- ".inst 0x25707541 // psel p1.s, p13.s/Z, p10.s[w12, #1]\n"
- ".inst 0x25707540 // psel p0.s, p13.s/Z, p10.s[w12, #1]\n"
- ".inst 0xe0b08aac // st1w { za3v.s[x12] }, p2/Z, [x21, x16, LSL #2]\n"
- "whilelt p10.s, x13, %x[width]\n"
- "subs x20, x20, #0x1\n"
- ".inst 0xe0bc86a9 // st1w { za2v.s[x12, #1] }, p1/Z, [x21, x28, LSL #2]\n"
- "add x11, x11, #0x10\n"
- "add x9, x9, #0x10\n"
- ".inst 0xe0ba82ad // st1w { za3v.s[x12, #1] }, p0/Z, [x21, x26, LSL #2]\n"
- "addvl x21, x21, #4\n"
- "incw x13\n"
- "incw x22\n"
+ "mov x25, %x[in]\n"
+ "add x24, %x[in], x16, LSL #3\n"
+ ".inst 0x25306580 // dup p0.s, p9.s/Z, p12.s[w12]\n"
+ ".inst 0xe09c02e0 // ld1w { za0h.s[x12] }, p0/Z, [x23, x28, LSL #2]\n"
+ ".inst 0x25306160 // dup p0.s, p8.s/Z, p11.s[w12]\n"
+ ".inst 0xe09c02c4 // ld1w { za1h.s[x12] }, p0/Z, [x22, x28, LSL #2]\n"
+ ".inst 0x25706580 // dup p0.s, p9.s/Z, p12.s[w12, #1]\n"
+ ".inst 0xe09c02a1 // ld1w { za0h.s[x12, #1] }, p0/Z, [x21, x28, LSL #2]\n"
+ ".inst 0x25706160 // dup p0.s, p8.s/Z, p11.s[w12, #1]\n"
+ ".inst 0xe09c0285 // ld1w { za1h.s[x12, #1] }, p0/Z, [x20, x28, LSL #2]\n"
+ "ldr x23, [x25, #0x0]\n"
+ "ldr x22, [x24, #0x0]\n"
+ "ldr x21, [x25, #0x8]\n"
+ "ldr x20, [x24, #0x8]\n"
+ "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
+ ".inst 0x25307540 // dup p0.s, p13.s/Z, p10.s[w12]\n"
+ ".inst 0xe0bf8368 // st1w { za2v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+ ".inst 0x25307540 // dup p0.s, p13.s/Z, p10.s[w12]\n"
+ ".inst 0x25707541 // dup p1.s, p13.s/Z, p10.s[w12, #1]\n"
+ ".inst 0xe0b0836c // st1w { za3v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
+ ".inst 0x25707540 // dup p0.s, p13.s/Z, p10.s[w12, #1]\n"
+ "whilelt p10.s, x26, %x[width]\n"
+ ".inst 0xe0af8769 // st1w { za2v.s[x12, #1] }, p1/Z, [x27, x15, LSL #2]\n"
+ "incw x26\n"
+ "incw x28\n"
+ ".inst 0xe0ae836d // st1w { za3v.s[x12, #1] }, p0/Z, [x27, x14, LSL #2]\n"
+ "addvl x27, x27, #4\n"
+ "subs x19, x19, #0x1\n"
"bgt 3b\n"
"8:" // K loop: Tails
- "cbnz x24, 11f\n"
- "mov x11, %x[in]\n"
- "whilelt p9.s, x13, %x[width]\n"
- "whilelt p8.s, x13, %x[width]\n"
+ "cbnz x11, 11f\n"
+ "mov x25, %x[in]\n"
+ "whilelt p9.s, x26, %x[width]\n"
+ "whilelt p8.s, x26, %x[width]\n"
"mov x12, #0x0\n"
"9:" // K loop: Tails: Even: First
- ".inst 0x25307540 // psel p0.s, p13.s/Z, p10.s[w12]\n"
- ".inst 0xe0bf82a0 // st1w { za0v.s[x12] }, p0/Z, [x21, XZR, LSL #2]\n"
- ".inst 0x25307540 // psel p0.s, p13.s/Z, p10.s[w12]\n"
- ".inst 0xe0b082a4 // st1w { za1v.s[x12] }, p0/Z, [x21, x16, LSL #2]\n"
- "ldr x10, [x11, #0x0]\n"
- ".inst 0x25306581 // psel p1.s, p9.s/Z, p12.s[w12]\n"
- ".inst 0x25306160 // psel p0.s, p8.s/Z, p11.s[w12]\n"
- "ldr x27, [x11, x16, LSL #0x3]\n"
- ".inst 0xe0960548 // ld1w { za2h.s[x12] }, p1/Z, [x10, x22, LSL #2]\n"
- "add x11, x11, #0x8\n"
- "addvl x21, x21, #2\n"
- ".inst 0xe096036c // ld1w { za3h.s[x12] }, p0/Z, [x27, x22, LSL #2]\n"
+ ".inst 0x25307540 // dup p0.s, p13.s/Z, p10.s[w12]\n"
+ ".inst 0xe0bf8360 // st1w { za0v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+ ".inst 0x25307540 // dup p0.s, p13.s/Z, p10.s[w12]\n"
+ ".inst 0x25306581 // dup p1.s, p9.s/Z, p12.s[w12]\n"
+ ".inst 0xe0b08364 // st1w { za1v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
+ ".inst 0x25306160 // dup p0.s, p8.s/Z, p11.s[w12]\n"
+ "addvl x27, x27, #2\n"
+ "ldr x23, [x25, #0x0]\n"
+ ".inst 0xe09c06e8 // ld1w { za2h.s[x12] }, p1/Z, [x23, x28, LSL #2]\n"
+ "ldr x22, [x25, x16, LSL #0x3]\n"
+ ".inst 0xe09c02cc // ld1w { za3h.s[x12] }, p0/Z, [x22, x28, LSL #2]\n"
"add x12, x12, #0x1\n"
"cmp x12, x16\n"
+ "add x25, x25, #0x8\n"
"blt 9b\n"
- "whilelt p10.s, x13, %x[width]\n"
- "whilelt p9.s, x13, %x[width]\n"
- "whilelt p8.s, x13, %x[width]\n"
+ "whilelt p10.s, x26, %x[width]\n"
+ "whilelt p9.s, x26, %x[width]\n"
+ "whilelt p8.s, x26, %x[width]\n"
"mov x12, #0x0\n"
"10:" // K loop: Tails: Even: Second
- ".inst 0x25307540 // psel p0.s, p13.s/Z, p10.s[w12]\n"
- ".inst 0xe0bf82a8 // st1w { za2v.s[x12] }, p0/Z, [x21, XZR, LSL #2]\n"
- ".inst 0x25307540 // psel p0.s, p13.s/Z, p10.s[w12]\n"
- ".inst 0xe0b082ac // st1w { za3v.s[x12] }, p0/Z, [x21, x16, LSL #2]\n"
+ ".inst 0x25307540 // dup p0.s, p13.s/Z, p10.s[w12]\n"
+ ".inst 0xe0bf8368 // st1w { za2v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+ ".inst 0x25307540 // dup p0.s, p13.s/Z, p10.s[w12]\n"
+ ".inst 0xe0b0836c // st1w { za3v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
+ "addvl x27, x27, #2\n"
"add x12, x12, #0x1\n"
- "cmp x12, x15\n"
- "addvl x21, x21, #2\n"
+ "cmp x12, x10\n"
"blt 10b\n"
- "whilelt p10.s, x13, %x[width]\n"
+ "whilelt p10.s, x26, %x[width]\n"
"b 13f\n"
"11:" // K loop: Tails: Odd
"mov x12, #0x0\n"
"12:" // K loop: Tails: Odd: Loop
- ".inst 0x25307540 // psel p0.s, p13.s/Z, p10.s[w12]\n"
- ".inst 0xe0bf82a0 // st1w { za0v.s[x12] }, p0/Z, [x21, XZR, LSL #2]\n"
- ".inst 0x25307540 // psel p0.s, p13.s/Z, p10.s[w12]\n"
- ".inst 0xe0b082a4 // st1w { za1v.s[x12] }, p0/Z, [x21, x16, LSL #2]\n"
+ ".inst 0x25307540 // dup p0.s, p13.s/Z, p10.s[w12]\n"
+ ".inst 0xe0bf8360 // st1w { za0v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+ ".inst 0x25307540 // dup p0.s, p13.s/Z, p10.s[w12]\n"
+ ".inst 0xe0b08364 // st1w { za1v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
+ "addvl x27, x27, #2\n"
"add x12, x12, #0x1\n"
- "cmp x12, x15\n"
- "addvl x21, x21, #2\n"
+ "cmp x12, x10\n"
"blt 12b\n"
"13:" // K loop: End
- "mov %x[out], x21\n"
+ "mov %x[out], x27\n"
".inst 0xd503467f // SMSTOP\n"
: [out] "+&r" (out)
: [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p8", "p9", "p10", "p11", "p12", "p13", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave4VL_block2_bf16_bf16.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave4VL_block2_bf16_bf16.hpp
index 4cc84d344a..556d1481de 100644
--- a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave4VL_block2_bf16_bf16.hpp
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave4VL_block2_bf16_bf16.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#if defined(__ARM_FEATURE_SVE)
@@ -34,93 +34,93 @@ void interleave_block<4, 2, VLType::SME, false>(
__asm__ __volatile__(
".inst 0xd503477f // SMSTART ZA\n"
- "cntw x16\n"
"cntw x15\n"
"cntw x14, ALL, MUL #2\n"
"cntw x13, ALL, MUL #3\n"
- "cmp %x[height], x16\n"
- "csel x16, %x[height], x16, LT\n"
- "whilelt p11.h, XZR, %x[height]\n"
- "whilelt p10.h, x15, %x[height]\n"
- "whilelt p9.h, x14, %x[height]\n"
- "whilelt p8.h, x13, %x[height]\n"
- "mov x11, #0x0\n"
- "cnth x10\n"
+ "cnth x11\n"
"ptrue p13.s\n"
- "sub x16, x16, #0x1\n"
- "zip1 p12.h, p11.h, p9.h\n"
- "zip1 p11.h, p10.h, p8.h\n"
+ "cntw x10\n"
+ "cmp %x[height], x10\n"
+ "csel x10, %x[height], x10, LT\n"
+ "sub x10, x10, #0x1\n"
+ "whilelt p10.h, XZR, %x[height]\n"
+ "whilelt p9.h, x15, %x[height]\n"
+ "whilelt p8.h, x14, %x[height]\n"
+ "zip1 p12.h, p10.h, p8.h\n"
+ "whilelt p8.h, x13, %x[height]\n"
+ "zip1 p11.h, p9.h, p8.h\n"
"mov x9, %x[row_offset]\n"
"mov x28, %x[out]\n"
- "whilelt p10.h, x11, %x[width]\n"
- "whilelt p9.h, x11, %x[width]\n"
- "whilelt p8.h, x11, %x[width]\n"
+ "mov x27, #0x0\n"
+ "whilelt p10.h, x27, %x[width]\n"
+ "whilelt p9.h, x27, %x[width]\n"
+ "whilelt p8.h, x27, %x[width]\n"
"1:" // Width loop
- "add x27, %x[in], XZR, LSL #3\n"
- "add x26, %x[in], x15, LSL #3\n"
- "ldr x25, [x27], #0x8\n"
- "add x24, %x[in], x14, LSL #3\n"
- "add x20, %x[in], x13, LSL #3\n"
- "ldr x23, [x26], #0x8\n"
"mov x12, #0x0\n"
- "ldr x22, [x24], #0x8\n"
- "ldr x21, [x20], #0x8\n"
- "cbz x16, 3f\n"
+ "add x26, %x[in], XZR, LSL #3\n"
+ "add x25, %x[in], x15, LSL #3\n"
+ "add x24, %x[in], x14, LSL #3\n"
+ "add x23, %x[in], x13, LSL #3\n"
+ "ldr x22, [x26], #0x8\n"
+ "ldr x21, [x25], #0x8\n"
+ "ldr x20, [x24], #0x8\n"
+ "ldr x19, [x23], #0x8\n"
+ "cbz x10, 3f\n"
"2:" // Loads: Loop
- ".inst 0x25286581 // psel p1.h, p9.h/Z, p12.h[w12]\n"
- ".inst 0x25286160 // psel p0.h, p8.h/Z, p11.h[w12]\n"
- ".inst 0xe0490720 // ld1h { za0h.h[x12] }, p1/Z, [x25, x9, LSL #1]\n"
- "ldr x25, [x27], #0x8\n"
- ".inst 0xe04902e8 // ld1h { za1h.h[x12] }, p0/Z, [x23, x9, LSL #1]\n"
- ".inst 0x25386581 // psel p1.h, p9.h/Z, p12.h[w12, #1]\n"
- ".inst 0x25386160 // psel p0.h, p8.h/Z, p11.h[w12, #1]\n"
- "ldr x23, [x26], #0x8\n"
- ".inst 0xe04906c1 // ld1h { za0h.h[x12, #1] }, p1/Z, [x22, x9, LSL #1]\n"
- "ldr x22, [x24], #0x8\n"
- ".inst 0xe04902a9 // ld1h { za1h.h[x12, #1] }, p0/Z, [x21, x9, LSL #1]\n"
+ ".inst 0x25286580 // dup p0.h, p9.h/Z, p12.h[w12]\n"
+ ".inst 0xe04902c0 // ld1h { za0h.h[x12] }, p0/Z, [x22, x9, LSL #1]\n"
+ ".inst 0x25286160 // dup p0.h, p8.h/Z, p11.h[w12]\n"
+ ".inst 0xe04902a8 // ld1h { za1h.h[x12] }, p0/Z, [x21, x9, LSL #1]\n"
+ ".inst 0x25386580 // dup p0.h, p9.h/Z, p12.h[w12, #1]\n"
+ ".inst 0xe0490281 // ld1h { za0h.h[x12, #1] }, p0/Z, [x20, x9, LSL #1]\n"
+ ".inst 0x25386160 // dup p0.h, p8.h/Z, p11.h[w12, #1]\n"
+ ".inst 0xe0490269 // ld1h { za1h.h[x12, #1] }, p0/Z, [x19, x9, LSL #1]\n"
+ "ldr x22, [x26], #0x8\n"
+ "ldr x21, [x25], #0x8\n"
+ "ldr x20, [x24], #0x8\n"
+ "ldr x19, [x23], #0x8\n"
"add x12, x12, #0x2\n"
- "cmp x12, x16, LSL #1\n"
- "ldr x21, [x20], #0x8\n"
+ "cmp x12, x10, LSL #1\n"
"blt 2b\n"
"3:" // Loads: Tail
- ".inst 0x25286581 // psel p1.h, p9.h/Z, p12.h[w12]\n"
- ".inst 0x25286160 // psel p0.h, p8.h/Z, p11.h[w12]\n"
- ".inst 0xe0490720 // ld1h { za0h.h[x12] }, p1/Z, [x25, x9, LSL #1]\n"
- "sub x20, %x[width], x11\n"
- ".inst 0xe04902e8 // ld1h { za1h.h[x12] }, p0/Z, [x23, x9, LSL #1]\n"
- "cmp x20, x10\n"
- "csel x20, x20, x10, LT\n"
- ".inst 0x25386580 // psel p0.h, p9.h/Z, p12.h[w12, #1]\n"
- ".inst 0xe04902c1 // ld1h { za0h.h[x12, #1] }, p0/Z, [x22, x9, LSL #1]\n"
- ".inst 0x25386160 // psel p0.h, p8.h/Z, p11.h[w12, #1]\n"
- "add x20, x20, #0x1\n"
- ".inst 0xe04902a9 // ld1h { za1h.h[x12, #1] }, p0/Z, [x21, x9, LSL #1]\n"
+ ".inst 0x25286580 // dup p0.h, p9.h/Z, p12.h[w12]\n"
+ ".inst 0xe04902c0 // ld1h { za0h.h[x12] }, p0/Z, [x22, x9, LSL #1]\n"
+ ".inst 0x25286160 // dup p0.h, p8.h/Z, p11.h[w12]\n"
+ ".inst 0xe04902a8 // ld1h { za1h.h[x12] }, p0/Z, [x21, x9, LSL #1]\n"
+ ".inst 0x25386580 // dup p0.h, p9.h/Z, p12.h[w12, #1]\n"
+ ".inst 0xe0490281 // ld1h { za0h.h[x12, #1] }, p0/Z, [x20, x9, LSL #1]\n"
+ ".inst 0x25386160 // dup p0.h, p8.h/Z, p11.h[w12, #1]\n"
+ ".inst 0xe0490269 // ld1h { za1h.h[x12, #1] }, p0/Z, [x19, x9, LSL #1]\n"
"mov x12, #0x0\n"
- "lsr x20, x20, #0x1\n"
+ "sub x19, %x[width], x27\n"
+ "cmp x19, x11\n"
+ "csel x19, x19, x11, LT\n"
+ "add x19, x19, #0x1\n"
+ "lsr x19, x19, #0x1\n"
"4:" // Stores: Loop
- ".inst 0x25307540 // psel p0.s, p13.s/Z, p10.s[w12]\n"
+ ".inst 0x25307540 // dup p0.s, p13.s/Z, p10.s[w12]\n"
".inst 0xe0bf8380 // st1w { za0v.s[x12] }, p0/Z, [x28, XZR, LSL #2]\n"
- ".inst 0x25307540 // psel p0.s, p13.s/Z, p10.s[w12]\n"
+ ".inst 0x25307540 // dup p0.s, p13.s/Z, p10.s[w12]\n"
+ ".inst 0x25307541 // dup p1.s, p13.s/Z, p10.s[w12]\n"
".inst 0xe0af8384 // st1w { za1v.s[x12] }, p0/Z, [x28, x15, LSL #2]\n"
- ".inst 0x25307541 // psel p1.s, p13.s/Z, p10.s[w12]\n"
- ".inst 0x25307540 // psel p0.s, p13.s/Z, p10.s[w12]\n"
+ ".inst 0x25307540 // dup p0.s, p13.s/Z, p10.s[w12]\n"
".inst 0xe0ae8788 // st1w { za2v.s[x12] }, p1/Z, [x28, x14, LSL #2]\n"
".inst 0xe0ad838c // st1w { za3v.s[x12] }, p0/Z, [x28, x13, LSL #2]\n"
- "add x12, x12, #0x1\n"
- "cmp x12, x20\n"
"addvl x28, x28, #4\n"
+ "add x12, x12, #0x1\n"
+ "cmp x12, x19\n"
"blt 4b\n"
- "inch x11\n"
- "whilelt p10.h, x11, %x[width]\n"
- "whilelt p9.h, x11, %x[width]\n"
- "whilelt p8.h, x11, %x[width]\n"
"inch x9\n"
+ "inch x27\n"
+ "whilelt p10.h, x27, %x[width]\n"
+ "whilelt p9.h, x27, %x[width]\n"
+ "whilelt p8.h, x27, %x[width]\n"
"b.any 1b\n"
"mov %x[out], x28\n"
".inst 0xd503467f // SMSTOP\n"
: [out] "+&r" (out)
: [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p8", "p9", "p10", "p11", "p12", "p13", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave4VL_block4_s8_s8.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave4VL_block4_s8_s8.hpp
index 465939c30d..49d1aa5cc5 100644
--- a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave4VL_block4_s8_s8.hpp
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave4VL_block4_s8_s8.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#if defined(__ARM_FEATURE_SVE)
@@ -34,92 +34,92 @@ void interleave_block<4, 4, VLType::SME, false>(
__asm__ __volatile__(
".inst 0xd503477f // SMSTART ZA\n"
- "cntw x16\n"
"cntw x15\n"
- "cntw x14, ALL, MUL #2\n"
- "cntw x13, ALL, MUL #3\n"
- "cmp %x[height], x16\n"
- "csel x16, %x[height], x16, LT\n"
+ "cntw x14\n"
+ "cntw x13, ALL, MUL #2\n"
+ "cntw x11, ALL, MUL #3\n"
+ "cmp %x[height], x15\n"
+ "csel x15, %x[height], x15, LT\n"
"whilelt p12.b, XZR, %x[height]\n"
- "whilelt p10.b, x15, %x[height]\n"
- "whilelt p9.b, x14, %x[height]\n"
- "whilelt p8.b, x13, %x[height]\n"
+ "whilelt p10.b, x14, %x[height]\n"
+ "whilelt p9.b, x13, %x[height]\n"
+ "whilelt p8.b, x11, %x[height]\n"
"zip1 p12.b, p12.b, p9.b\n"
"zip1 p10.b, p10.b, p8.b\n"
- "mov x11, #0x0\n"
- "cntb x10\n"
+ "mov x10, #0x0\n"
+ "cntb x9\n"
"ptrue p11.s\n"
- "sub x16, x16, #0x1\n"
+ "sub x15, x15, #0x1\n"
"zip1 p10.b, p12.b, p10.b\n"
- "mov x9, %x[row_offset]\n"
- "mov x28, %x[out]\n"
- "whilelt p9.b, x11, %x[width]\n"
- "whilelt p8.b, x11, %x[width]\n"
+ "mov x28, %x[row_offset]\n"
+ "mov x27, %x[out]\n"
+ "whilelt p9.b, x10, %x[width]\n"
+ "whilelt p8.b, x10, %x[width]\n"
"1:" // Width loop
- "add x27, %x[in], XZR, LSL #3\n"
- "add x26, %x[in], x15, LSL #3\n"
- "ldr x25, [x27], #0x8\n"
- "add x24, %x[in], x14, LSL #3\n"
+ "add x26, %x[in], XZR, LSL #3\n"
+ "add x25, %x[in], x14, LSL #3\n"
+ "ldr x24, [x26], #0x8\n"
"add x23, %x[in], x13, LSL #3\n"
- "ldr x20, [x26], #0x8\n"
+ "add x22, %x[in], x11, LSL #3\n"
+ "ldr x19, [x25], #0x8\n"
"mov x12, #0x0\n"
- "ldr x22, [x24], #0x8\n"
"ldr x21, [x23], #0x8\n"
- "cbz x16, 3f\n"
+ "ldr x20, [x22], #0x8\n"
+ "cbz x15, 3f\n"
"2:" // Loads: Loop
".inst 0x25246140 // psel p0.b, p8.b/Z, p10.b[w12]\n"
- ".inst 0xe0090320 // ld1b { za0h.b[x12] }, p0/Z, [x25, x9]\n"
+ ".inst 0xe01c0300 // ld1b { za0h.b[x12] }, p0/Z, [x24, x28]\n"
".inst 0x252c6140 // psel p0.b, p8.b/Z, p10.b[w12, #1]\n"
- "ldr x25, [x27], #0x8\n"
- ".inst 0xe0090281 // ld1b { za0h.b[x12, #1] }, p0/Z, [x20, x9]\n"
+ "ldr x24, [x26], #0x8\n"
+ ".inst 0xe01c0261 // ld1b { za0h.b[x12, #1] }, p0/Z, [x19, x28]\n"
".inst 0x25346141 // psel p1.b, p8.b/Z, p10.b[w12, #2]\n"
".inst 0x253c6140 // psel p0.b, p8.b/Z, p10.b[w12, #3]\n"
- "ldr x20, [x26], #0x8\n"
- ".inst 0xe00906c2 // ld1b { za0h.b[x12, #2] }, p1/Z, [x22, x9]\n"
- "ldr x22, [x24], #0x8\n"
- ".inst 0xe00902a3 // ld1b { za0h.b[x12, #3] }, p0/Z, [x21, x9]\n"
- "add x12, x12, #0x4\n"
- "cmp x12, x16, LSL #2\n"
+ "ldr x19, [x25], #0x8\n"
+ ".inst 0xe01c06a2 // ld1b { za0h.b[x12, #2] }, p1/Z, [x21, x28]\n"
"ldr x21, [x23], #0x8\n"
+ ".inst 0xe01c0283 // ld1b { za0h.b[x12, #3] }, p0/Z, [x20, x28]\n"
+ "add x12, x12, #0x4\n"
+ "cmp x12, x15, LSL #2\n"
+ "ldr x20, [x22], #0x8\n"
"blt 2b\n"
"3:" // Loads: Tail
".inst 0x25246140 // psel p0.b, p8.b/Z, p10.b[w12]\n"
- ".inst 0xe0090320 // ld1b { za0h.b[x12] }, p0/Z, [x25, x9]\n"
+ ".inst 0xe01c0300 // ld1b { za0h.b[x12] }, p0/Z, [x24, x28]\n"
".inst 0x252c6140 // psel p0.b, p8.b/Z, p10.b[w12, #1]\n"
- ".inst 0xe0090281 // ld1b { za0h.b[x12, #1] }, p0/Z, [x20, x9]\n"
+ ".inst 0xe01c0261 // ld1b { za0h.b[x12, #1] }, p0/Z, [x19, x28]\n"
".inst 0x25346140 // psel p0.b, p8.b/Z, p10.b[w12, #2]\n"
- "sub x20, %x[width], x11\n"
- ".inst 0xe00902c2 // ld1b { za0h.b[x12, #2] }, p0/Z, [x22, x9]\n"
- "cmp x20, x10\n"
- "csel x20, x20, x10, LT\n"
+ "sub x19, %x[width], x10\n"
+ ".inst 0xe01c02a2 // ld1b { za0h.b[x12, #2] }, p0/Z, [x21, x28]\n"
+ "cmp x19, x9\n"
+ "csel x19, x19, x9, LT\n"
".inst 0x253c6140 // psel p0.b, p8.b/Z, p10.b[w12, #3]\n"
- "add x20, x20, #0x3\n"
- ".inst 0xe00902a3 // ld1b { za0h.b[x12, #3] }, p0/Z, [x21, x9]\n"
+ "add x19, x19, #0x3\n"
+ ".inst 0xe01c0283 // ld1b { za0h.b[x12, #3] }, p0/Z, [x20, x28]\n"
"mov x12, #0x0\n"
- "lsr x20, x20, #0x2\n"
+ "lsr x19, x19, #0x2\n"
"4:" // Stores: Loop
".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe0bf8380 // st1w { za0v.s[x12] }, p0/Z, [x28, XZR, LSL #2]\n"
+ ".inst 0xe0bf8360 // st1w { za0v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe0af8384 // st1w { za1v.s[x12] }, p0/Z, [x28, x15, LSL #2]\n"
+ ".inst 0xe0ae8364 // st1w { za1v.s[x12] }, p0/Z, [x27, x14, LSL #2]\n"
".inst 0x25306d21 // psel p1.s, p11.s/Z, p9.s[w12]\n"
".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe0ae8788 // st1w { za2v.s[x12] }, p1/Z, [x28, x14, LSL #2]\n"
- ".inst 0xe0ad838c // st1w { za3v.s[x12] }, p0/Z, [x28, x13, LSL #2]\n"
+ ".inst 0xe0ad8768 // st1w { za2v.s[x12] }, p1/Z, [x27, x13, LSL #2]\n"
+ ".inst 0xe0ab836c // st1w { za3v.s[x12] }, p0/Z, [x27, x11, LSL #2]\n"
"add x12, x12, #0x1\n"
- "cmp x12, x20\n"
- "addvl x28, x28, #4\n"
+ "cmp x12, x19\n"
+ "addvl x27, x27, #4\n"
"blt 4b\n"
- "incb x11\n"
- "whilelt p9.b, x11, %x[width]\n"
- "whilelt p8.b, x11, %x[width]\n"
- "incb x9\n"
+ "incb x10\n"
+ "whilelt p9.b, x10, %x[width]\n"
+ "whilelt p8.b, x10, %x[width]\n"
+ "incb x28\n"
"b.any 1b\n"
- "mov %x[out], x28\n"
+ "mov %x[out], x27\n"
".inst 0xd503467f // SMSTOP\n"
: [out] "+&r" (out)
: [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave4VL_block4_s8_s8_summing.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave4VL_block4_s8_s8_summing.hpp
index ffd9384a13..94673d41d8 100644
--- a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave4VL_block4_s8_s8_summing.hpp
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave4VL_block4_s8_s8_summing.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#if defined(__ARM_FEATURE_SVE)
@@ -32,118 +32,118 @@ void interleave_block<4, 4, VLType::SME, true>(
{
__asm__ __volatile__(
".inst 0xd503477f // SMSTART ZA\n"
- "cntw x16\n"
- "cntw x15\n"
"mov z24.b, #0x1\n"
- "cntw x14, ALL, MUL #2\n"
- "cntw x13, ALL, MUL #3\n"
"mov z23.s, #0x0\n"
+ "ptrue p2.b\n"
"mov z22.s, #0x0\n"
- "cmp %x[height], x16\n"
- "csel x16, %x[height], x16, LT\n"
+ "cntw x16\n"
"mov z21.s, #0x0\n"
+ "cntw x15, ALL, MUL #2\n"
"mov z20.s, #0x0\n"
- "whilelt p12.b, XZR, %x[height]\n"
- "whilelt p10.b, x15, %x[height]\n"
- "whilelt p9.b, x14, %x[height]\n"
- "whilelt p8.b, x13, %x[height]\n"
- "zip1 p12.b, p12.b, p9.b\n"
- "zip1 p10.b, p10.b, p8.b\n"
- "ptrue p2.b\n"
+ "cntw x14, ALL, MUL #3\n"
"cntb x11\n"
"ptrue p11.s\n"
- "sub x16, x16, #0x1\n"
- "zip1 p10.b, p12.b, p10.b\n"
- "mov x10, %x[row_offset]\n"
- "mov x9, %x[out]\n"
+ "cntw x10\n"
+ "cmp %x[height], x10\n"
+ "csel x10, %x[height], x10, LT\n"
+ "sub x10, x10, #0x1\n"
+ "whilelt p10.b, XZR, %x[height]\n"
+ "whilelt p9.b, x16, %x[height]\n"
+ "whilelt p8.b, x15, %x[height]\n"
+ "zip1 p10.b, p10.b, p8.b\n"
+ "whilelt p8.b, x14, %x[height]\n"
+ "zip1 p9.b, p9.b, p8.b\n"
+ "mov x9, %x[row_offset]\n"
+ "mov x28, %x[out]\n"
+ "zip1 p10.b, p10.b, p9.b\n"
"cbnz %x[first], 1f\n"
- "addvl x9, x9, #-4\n"
- "ld1w { z23.s }, p2/Z, [x9]\n"
- "ld1w { z22.s }, p2/Z, [x9, #1, MUL VL]\n"
- "ld1w { z21.s }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1w { z20.s }, p2/Z, [x9, #3, MUL VL]\n"
+ "addvl x28, x28, #-4\n"
+ "ld1w { z23.s }, p2/Z, [x28]\n"
+ "ld1w { z22.s }, p2/Z, [x28, #1, MUL VL]\n"
+ "ld1w { z21.s }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1w { z20.s }, p2/Z, [x28, #3, MUL VL]\n"
"1:" // Initialise row sums: End
- "mov x28, #0x0\n"
- "whilelt p9.b, x28, %x[width]\n"
- "whilelt p8.b, x28, %x[width]\n"
+ "mov x27, #0x0\n"
+ "whilelt p9.b, x27, %x[width]\n"
+ "whilelt p8.b, x27, %x[width]\n"
"2:" // Width loop
- "add x27, %x[in], XZR, LSL #3\n"
- "add x26, %x[in], x15, LSL #3\n"
- "ldr x25, [x27], #0x8\n"
- "add x24, %x[in], x14, LSL #3\n"
- "add x23, %x[in], x13, LSL #3\n"
- "ldr x20, [x26], #0x8\n"
- "mov x12, #0x0\n"
- "ldr x22, [x24], #0x8\n"
- "ldr x21, [x23], #0x8\n"
- "cbz x16, 4f\n"
+ "mov x13, #0x0\n"
+ "add x26, %x[in], XZR, LSL #3\n"
+ "add x25, %x[in], x16, LSL #3\n"
+ "add x24, %x[in], x15, LSL #3\n"
+ "add x23, %x[in], x14, LSL #3\n"
+ "ldr x22, [x26], #0x8\n"
+ "ldr x21, [x25], #0x8\n"
+ "ldr x19, [x24], #0x8\n"
+ "ldr x20, [x23], #0x8\n"
+ "cbz x10, 4f\n"
"3:" // Loads: Loop
- ".inst 0x25246140 // psel p0.b, p8.b/Z, p10.b[w12]\n"
- ".inst 0xe00a0320 // ld1b { za0h.b[x12] }, p0/Z, [x25, x10]\n"
- ".inst 0x252c6140 // psel p0.b, p8.b/Z, p10.b[w12, #1]\n"
- "ldr x25, [x27], #0x8\n"
- ".inst 0xe00a0281 // ld1b { za0h.b[x12, #1] }, p0/Z, [x20, x10]\n"
- ".inst 0x25346141 // psel p1.b, p8.b/Z, p10.b[w12, #2]\n"
- ".inst 0x253c6140 // psel p0.b, p8.b/Z, p10.b[w12, #3]\n"
- "ldr x20, [x26], #0x8\n"
- ".inst 0xe00a06c2 // ld1b { za0h.b[x12, #2] }, p1/Z, [x22, x10]\n"
- "ldr x22, [x24], #0x8\n"
- ".inst 0xe00a02a3 // ld1b { za0h.b[x12, #3] }, p0/Z, [x21, x10]\n"
- "add x12, x12, #0x4\n"
- "cmp x12, x16, LSL #2\n"
- "ldr x21, [x23], #0x8\n"
+ ".inst 0x25256140 // dup p0.b, p8.b/Z, p10.b[w13]\n"
+ ".inst 0xe00922c0 // ld1b { za0h.b[x13] }, p0/Z, [x22, x9]\n"
+ ".inst 0x252d6140 // dup p0.b, p8.b/Z, p10.b[w13, #1]\n"
+ ".inst 0x25356141 // dup p1.b, p8.b/Z, p10.b[w13, #2]\n"
+ ".inst 0xe00922a1 // ld1b { za0h.b[x13, #1] }, p0/Z, [x21, x9]\n"
+ ".inst 0x253d6140 // dup p0.b, p8.b/Z, p10.b[w13, #3]\n"
+ "ldr x22, [x26], #0x8\n"
+ ".inst 0xe0092662 // ld1b { za0h.b[x13, #2] }, p1/Z, [x19, x9]\n"
+ "ldr x21, [x25], #0x8\n"
+ "ldr x19, [x24], #0x8\n"
+ ".inst 0xe0092283 // ld1b { za0h.b[x13, #3] }, p0/Z, [x20, x9]\n"
+ "ldr x20, [x23], #0x8\n"
+ "add x13, x13, #0x4\n"
+ "cmp x13, x10, LSL #2\n"
"blt 3b\n"
"4:" // Loads: Tail
- ".inst 0x25246140 // psel p0.b, p8.b/Z, p10.b[w12]\n"
- ".inst 0xe00a0320 // ld1b { za0h.b[x12] }, p0/Z, [x25, x10]\n"
- ".inst 0x252c6140 // psel p0.b, p8.b/Z, p10.b[w12, #1]\n"
- ".inst 0xe00a0281 // ld1b { za0h.b[x12, #1] }, p0/Z, [x20, x10]\n"
- ".inst 0x25346140 // psel p0.b, p8.b/Z, p10.b[w12, #2]\n"
- "sub x20, %x[width], x28\n"
- ".inst 0xe00a02c2 // ld1b { za0h.b[x12, #2] }, p0/Z, [x22, x10]\n"
- "cmp x20, x11\n"
- "csel x20, x20, x11, LT\n"
- ".inst 0x253c6140 // psel p0.b, p8.b/Z, p10.b[w12, #3]\n"
- "add x20, x20, #0x3\n"
- ".inst 0xe00a02a3 // ld1b { za0h.b[x12, #3] }, p0/Z, [x21, x10]\n"
+ ".inst 0x25256140 // dup p0.b, p8.b/Z, p10.b[w13]\n"
+ ".inst 0xe00922c0 // ld1b { za0h.b[x13] }, p0/Z, [x22, x9]\n"
+ ".inst 0x252d6140 // dup p0.b, p8.b/Z, p10.b[w13, #1]\n"
+ ".inst 0x25356141 // dup p1.b, p8.b/Z, p10.b[w13, #2]\n"
+ ".inst 0xe00922a1 // ld1b { za0h.b[x13, #1] }, p0/Z, [x21, x9]\n"
+ ".inst 0x253d6140 // dup p0.b, p8.b/Z, p10.b[w13, #3]\n"
"mov x12, #0x0\n"
- "lsr x20, x20, #0x2\n"
+ ".inst 0xe0092662 // ld1b { za0h.b[x13, #2] }, p1/Z, [x19, x9]\n"
+ "sub x19, %x[width], x27\n"
+ "cmp x19, x11\n"
+ ".inst 0xe0092283 // ld1b { za0h.b[x13, #3] }, p0/Z, [x20, x9]\n"
+ "csel x19, x19, x11, LT\n"
+ "add x19, x19, #0x3\n"
+ "lsr x19, x19, #0x2\n"
"5:" // Stores: Loop
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe0bf8120 // st1w { za0v.s[x12] }, p0/Z, [x9, XZR, LSL #2]\n"
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xc0828812 // mova z18.s, p2/M, za0v.s[x12]\n"
- ".inst 0xe0af8124 // st1w { za1v.s[x12] }, p0/Z, [x9, x15, LSL #2]\n"
- ".inst 0x25306d21 // psel p1.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xc0828891 // mova z17.s, p2/M, za1v.s[x12]\n"
- ".inst 0xe0ae8528 // st1w { za2v.s[x12] }, p1/Z, [x9, x14, LSL #2]\n"
- ".inst 0xc0828910 // mova z16.s, p2/M, za2v.s[x12]\n"
- "sdot z23.s, z18.b, z24.b\n"
- ".inst 0xe0ad812c // st1w { za3v.s[x12] }, p0/Z, [x9, x13, LSL #2]\n"
- ".inst 0xc0828993 // mova z19.s, p2/M, za3v.s[x12]\n"
+ ".inst 0xc0828813 // mova z19.s, p2/M, za0v.s[x12]\n"
+ ".inst 0x25306d20 // dup p0.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0xe0bf8380 // st1w { za0v.s[x12] }, p0/Z, [x28, XZR, LSL #2]\n"
+ ".inst 0xc0828892 // mova z18.s, p2/M, za1v.s[x12]\n"
+ ".inst 0x25306d20 // dup p0.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0xc0828911 // mova z17.s, p2/M, za2v.s[x12]\n"
+ ".inst 0xe0b08384 // st1w { za1v.s[x12] }, p0/Z, [x28, x16, LSL #2]\n"
+ ".inst 0x25306d21 // dup p1.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0xc0828990 // mova z16.s, p2/M, za3v.s[x12]\n"
+ "sdot z23.s, z19.b, z24.b\n"
+ ".inst 0x25306d20 // dup p0.s, p11.s/Z, p9.s[w12]\n"
+ "sdot z22.s, z18.b, z24.b\n"
+ ".inst 0xe0af8788 // st1w { za2v.s[x12] }, p1/Z, [x28, x15, LSL #2]\n"
+ "sdot z21.s, z17.b, z24.b\n"
+ "sdot z20.s, z16.b, z24.b\n"
+ ".inst 0xe0ae838c // st1w { za3v.s[x12] }, p0/Z, [x28, x14, LSL #2]\n"
+ "addvl x28, x28, #4\n"
"add x12, x12, #0x1\n"
- "cmp x12, x20\n"
- "sdot z22.s, z17.b, z24.b\n"
- "sdot z21.s, z16.b, z24.b\n"
- "addvl x9, x9, #4\n"
- "sdot z20.s, z19.b, z24.b\n"
+ "cmp x12, x19\n"
"blt 5b\n"
- "incb x28\n"
- "whilelt p9.b, x28, %x[width]\n"
- "whilelt p8.b, x28, %x[width]\n"
- "incb x10\n"
+ "incb x9\n"
+ "incb x27\n"
+ "whilelt p9.b, x27, %x[width]\n"
+ "whilelt p8.b, x27, %x[width]\n"
"b.any 2b\n"
- "st1w { z23.s }, p2, [x9]\n"
- "st1w { z22.s }, p2, [x9, #1, MUL VL]\n"
- "st1w { z21.s }, p2, [x9, #2, MUL VL]\n"
- "st1w { z20.s }, p2, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
- "mov %x[out], x9\n"
+ "st1w { z23.s }, p2, [x28]\n"
+ "st1w { z22.s }, p2, [x28, #1, MUL VL]\n"
+ "st1w { z21.s }, p2, [x28, #2, MUL VL]\n"
+ "st1w { z20.s }, p2, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "mov %x[out], x28\n"
".inst 0xd503467f // SMSTOP\n"
: [out] "+&r" (out)
: [first] "r" (first), [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p8", "p9", "p10", "p11", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave4VL_block4_u8_u8.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave4VL_block4_u8_u8.hpp
index 9f5db6ba3d..bbdaaa3217 100644
--- a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave4VL_block4_u8_u8.hpp
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave4VL_block4_u8_u8.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#if defined(__ARM_FEATURE_SVE)
@@ -34,92 +34,92 @@ void interleave_block<4, 4, VLType::SME, false>(
__asm__ __volatile__(
".inst 0xd503477f // SMSTART ZA\n"
- "cntw x16\n"
"cntw x15\n"
- "cntw x14, ALL, MUL #2\n"
- "cntw x13, ALL, MUL #3\n"
- "cmp %x[height], x16\n"
- "csel x16, %x[height], x16, LT\n"
+ "cntw x14\n"
+ "cntw x13, ALL, MUL #2\n"
+ "cntw x11, ALL, MUL #3\n"
+ "cmp %x[height], x15\n"
+ "csel x15, %x[height], x15, LT\n"
"whilelt p12.b, XZR, %x[height]\n"
- "whilelt p10.b, x15, %x[height]\n"
- "whilelt p9.b, x14, %x[height]\n"
- "whilelt p8.b, x13, %x[height]\n"
+ "whilelt p10.b, x14, %x[height]\n"
+ "whilelt p9.b, x13, %x[height]\n"
+ "whilelt p8.b, x11, %x[height]\n"
"zip1 p12.b, p12.b, p9.b\n"
"zip1 p10.b, p10.b, p8.b\n"
- "mov x11, #0x0\n"
- "cntb x10\n"
+ "mov x10, #0x0\n"
+ "cntb x9\n"
"ptrue p11.s\n"
- "sub x16, x16, #0x1\n"
+ "sub x15, x15, #0x1\n"
"zip1 p10.b, p12.b, p10.b\n"
- "mov x9, %x[row_offset]\n"
- "mov x28, %x[out]\n"
- "whilelt p9.b, x11, %x[width]\n"
- "whilelt p8.b, x11, %x[width]\n"
+ "mov x28, %x[row_offset]\n"
+ "mov x27, %x[out]\n"
+ "whilelt p9.b, x10, %x[width]\n"
+ "whilelt p8.b, x10, %x[width]\n"
"1:" // Width loop
- "add x27, %x[in], XZR, LSL #3\n"
- "add x26, %x[in], x15, LSL #3\n"
- "ldr x25, [x27], #0x8\n"
- "add x24, %x[in], x14, LSL #3\n"
+ "add x26, %x[in], XZR, LSL #3\n"
+ "add x25, %x[in], x14, LSL #3\n"
+ "ldr x24, [x26], #0x8\n"
"add x23, %x[in], x13, LSL #3\n"
- "ldr x20, [x26], #0x8\n"
+ "add x22, %x[in], x11, LSL #3\n"
+ "ldr x19, [x25], #0x8\n"
"mov x12, #0x0\n"
- "ldr x22, [x24], #0x8\n"
"ldr x21, [x23], #0x8\n"
- "cbz x16, 3f\n"
+ "ldr x20, [x22], #0x8\n"
+ "cbz x15, 3f\n"
"2:" // Loads: Loop
".inst 0x25246140 // psel p0.b, p8.b/Z, p10.b[w12]\n"
- ".inst 0xe0090320 // ld1b { za0h.b[x12] }, p0/Z, [x25, x9]\n"
+ ".inst 0xe01c0300 // ld1b { za0h.b[x12] }, p0/Z, [x24, x28]\n"
".inst 0x252c6140 // psel p0.b, p8.b/Z, p10.b[w12, #1]\n"
- "ldr x25, [x27], #0x8\n"
- ".inst 0xe0090281 // ld1b { za0h.b[x12, #1] }, p0/Z, [x20, x9]\n"
+ "ldr x24, [x26], #0x8\n"
+ ".inst 0xe01c0261 // ld1b { za0h.b[x12, #1] }, p0/Z, [x19, x28]\n"
".inst 0x25346141 // psel p1.b, p8.b/Z, p10.b[w12, #2]\n"
".inst 0x253c6140 // psel p0.b, p8.b/Z, p10.b[w12, #3]\n"
- "ldr x20, [x26], #0x8\n"
- ".inst 0xe00906c2 // ld1b { za0h.b[x12, #2] }, p1/Z, [x22, x9]\n"
- "ldr x22, [x24], #0x8\n"
- ".inst 0xe00902a3 // ld1b { za0h.b[x12, #3] }, p0/Z, [x21, x9]\n"
- "add x12, x12, #0x4\n"
- "cmp x12, x16, LSL #2\n"
+ "ldr x19, [x25], #0x8\n"
+ ".inst 0xe01c06a2 // ld1b { za0h.b[x12, #2] }, p1/Z, [x21, x28]\n"
"ldr x21, [x23], #0x8\n"
+ ".inst 0xe01c0283 // ld1b { za0h.b[x12, #3] }, p0/Z, [x20, x28]\n"
+ "add x12, x12, #0x4\n"
+ "cmp x12, x15, LSL #2\n"
+ "ldr x20, [x22], #0x8\n"
"blt 2b\n"
"3:" // Loads: Tail
".inst 0x25246140 // psel p0.b, p8.b/Z, p10.b[w12]\n"
- ".inst 0xe0090320 // ld1b { za0h.b[x12] }, p0/Z, [x25, x9]\n"
+ ".inst 0xe01c0300 // ld1b { za0h.b[x12] }, p0/Z, [x24, x28]\n"
".inst 0x252c6140 // psel p0.b, p8.b/Z, p10.b[w12, #1]\n"
- ".inst 0xe0090281 // ld1b { za0h.b[x12, #1] }, p0/Z, [x20, x9]\n"
+ ".inst 0xe01c0261 // ld1b { za0h.b[x12, #1] }, p0/Z, [x19, x28]\n"
".inst 0x25346140 // psel p0.b, p8.b/Z, p10.b[w12, #2]\n"
- "sub x20, %x[width], x11\n"
- ".inst 0xe00902c2 // ld1b { za0h.b[x12, #2] }, p0/Z, [x22, x9]\n"
- "cmp x20, x10\n"
- "csel x20, x20, x10, LT\n"
+ "sub x19, %x[width], x10\n"
+ ".inst 0xe01c02a2 // ld1b { za0h.b[x12, #2] }, p0/Z, [x21, x28]\n"
+ "cmp x19, x9\n"
+ "csel x19, x19, x9, LT\n"
".inst 0x253c6140 // psel p0.b, p8.b/Z, p10.b[w12, #3]\n"
- "add x20, x20, #0x3\n"
- ".inst 0xe00902a3 // ld1b { za0h.b[x12, #3] }, p0/Z, [x21, x9]\n"
+ "add x19, x19, #0x3\n"
+ ".inst 0xe01c0283 // ld1b { za0h.b[x12, #3] }, p0/Z, [x20, x28]\n"
"mov x12, #0x0\n"
- "lsr x20, x20, #0x2\n"
+ "lsr x19, x19, #0x2\n"
"4:" // Stores: Loop
".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe0bf8380 // st1w { za0v.s[x12] }, p0/Z, [x28, XZR, LSL #2]\n"
+ ".inst 0xe0bf8360 // st1w { za0v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe0af8384 // st1w { za1v.s[x12] }, p0/Z, [x28, x15, LSL #2]\n"
+ ".inst 0xe0ae8364 // st1w { za1v.s[x12] }, p0/Z, [x27, x14, LSL #2]\n"
".inst 0x25306d21 // psel p1.s, p11.s/Z, p9.s[w12]\n"
".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe0ae8788 // st1w { za2v.s[x12] }, p1/Z, [x28, x14, LSL #2]\n"
- ".inst 0xe0ad838c // st1w { za3v.s[x12] }, p0/Z, [x28, x13, LSL #2]\n"
+ ".inst 0xe0ad8768 // st1w { za2v.s[x12] }, p1/Z, [x27, x13, LSL #2]\n"
+ ".inst 0xe0ab836c // st1w { za3v.s[x12] }, p0/Z, [x27, x11, LSL #2]\n"
"add x12, x12, #0x1\n"
- "cmp x12, x20\n"
- "addvl x28, x28, #4\n"
+ "cmp x12, x19\n"
+ "addvl x27, x27, #4\n"
"blt 4b\n"
- "incb x11\n"
- "whilelt p9.b, x11, %x[width]\n"
- "whilelt p8.b, x11, %x[width]\n"
- "incb x9\n"
+ "incb x10\n"
+ "whilelt p9.b, x10, %x[width]\n"
+ "whilelt p8.b, x10, %x[width]\n"
+ "incb x28\n"
"b.any 1b\n"
- "mov %x[out], x28\n"
+ "mov %x[out], x27\n"
".inst 0xd503467f // SMSTOP\n"
: [out] "+&r" (out)
: [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave4VL_block4_u8_u8_summing.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave4VL_block4_u8_u8_summing.hpp
index 49d2acf1cd..961008a3f2 100644
--- a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave4VL_block4_u8_u8_summing.hpp
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave4VL_block4_u8_u8_summing.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#if defined(__ARM_FEATURE_SVE)
@@ -32,118 +32,118 @@ void interleave_block<4, 4, VLType::SME, true>(
{
__asm__ __volatile__(
".inst 0xd503477f // SMSTART ZA\n"
- "cntw x16\n"
- "cntw x15\n"
"mov z24.b, #0x1\n"
- "cntw x14, ALL, MUL #2\n"
- "cntw x13, ALL, MUL #3\n"
"mov z23.s, #0x0\n"
+ "ptrue p2.b\n"
"mov z22.s, #0x0\n"
- "cmp %x[height], x16\n"
- "csel x16, %x[height], x16, LT\n"
+ "cntw x16\n"
"mov z21.s, #0x0\n"
+ "cntw x15, ALL, MUL #2\n"
"mov z20.s, #0x0\n"
- "whilelt p12.b, XZR, %x[height]\n"
- "whilelt p10.b, x15, %x[height]\n"
- "whilelt p9.b, x14, %x[height]\n"
- "whilelt p8.b, x13, %x[height]\n"
- "zip1 p12.b, p12.b, p9.b\n"
- "zip1 p10.b, p10.b, p8.b\n"
- "ptrue p2.b\n"
+ "cntw x14, ALL, MUL #3\n"
"cntb x11\n"
"ptrue p11.s\n"
- "sub x16, x16, #0x1\n"
- "zip1 p10.b, p12.b, p10.b\n"
- "mov x10, %x[row_offset]\n"
- "mov x9, %x[out]\n"
+ "cntw x10\n"
+ "cmp %x[height], x10\n"
+ "csel x10, %x[height], x10, LT\n"
+ "sub x10, x10, #0x1\n"
+ "whilelt p10.b, XZR, %x[height]\n"
+ "whilelt p9.b, x16, %x[height]\n"
+ "whilelt p8.b, x15, %x[height]\n"
+ "zip1 p10.b, p10.b, p8.b\n"
+ "whilelt p8.b, x14, %x[height]\n"
+ "zip1 p9.b, p9.b, p8.b\n"
+ "mov x9, %x[row_offset]\n"
+ "mov x28, %x[out]\n"
+ "zip1 p10.b, p10.b, p9.b\n"
"cbnz %x[first], 1f\n"
- "addvl x9, x9, #-4\n"
- "ld1w { z23.s }, p2/Z, [x9]\n"
- "ld1w { z22.s }, p2/Z, [x9, #1, MUL VL]\n"
- "ld1w { z21.s }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1w { z20.s }, p2/Z, [x9, #3, MUL VL]\n"
+ "addvl x28, x28, #-4\n"
+ "ld1w { z23.s }, p2/Z, [x28]\n"
+ "ld1w { z22.s }, p2/Z, [x28, #1, MUL VL]\n"
+ "ld1w { z21.s }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1w { z20.s }, p2/Z, [x28, #3, MUL VL]\n"
"1:" // Initialise row sums: End
- "mov x28, #0x0\n"
- "whilelt p9.b, x28, %x[width]\n"
- "whilelt p8.b, x28, %x[width]\n"
+ "mov x27, #0x0\n"
+ "whilelt p9.b, x27, %x[width]\n"
+ "whilelt p8.b, x27, %x[width]\n"
"2:" // Width loop
- "add x27, %x[in], XZR, LSL #3\n"
- "add x26, %x[in], x15, LSL #3\n"
- "ldr x25, [x27], #0x8\n"
- "add x24, %x[in], x14, LSL #3\n"
- "add x23, %x[in], x13, LSL #3\n"
- "ldr x20, [x26], #0x8\n"
- "mov x12, #0x0\n"
- "ldr x22, [x24], #0x8\n"
- "ldr x21, [x23], #0x8\n"
- "cbz x16, 4f\n"
+ "mov x13, #0x0\n"
+ "add x26, %x[in], XZR, LSL #3\n"
+ "add x25, %x[in], x16, LSL #3\n"
+ "add x24, %x[in], x15, LSL #3\n"
+ "add x23, %x[in], x14, LSL #3\n"
+ "ldr x22, [x26], #0x8\n"
+ "ldr x21, [x25], #0x8\n"
+ "ldr x19, [x24], #0x8\n"
+ "ldr x20, [x23], #0x8\n"
+ "cbz x10, 4f\n"
"3:" // Loads: Loop
- ".inst 0x25246140 // psel p0.b, p8.b/Z, p10.b[w12]\n"
- ".inst 0xe00a0320 // ld1b { za0h.b[x12] }, p0/Z, [x25, x10]\n"
- ".inst 0x252c6140 // psel p0.b, p8.b/Z, p10.b[w12, #1]\n"
- "ldr x25, [x27], #0x8\n"
- ".inst 0xe00a0281 // ld1b { za0h.b[x12, #1] }, p0/Z, [x20, x10]\n"
- ".inst 0x25346141 // psel p1.b, p8.b/Z, p10.b[w12, #2]\n"
- ".inst 0x253c6140 // psel p0.b, p8.b/Z, p10.b[w12, #3]\n"
- "ldr x20, [x26], #0x8\n"
- ".inst 0xe00a06c2 // ld1b { za0h.b[x12, #2] }, p1/Z, [x22, x10]\n"
- "ldr x22, [x24], #0x8\n"
- ".inst 0xe00a02a3 // ld1b { za0h.b[x12, #3] }, p0/Z, [x21, x10]\n"
- "add x12, x12, #0x4\n"
- "cmp x12, x16, LSL #2\n"
- "ldr x21, [x23], #0x8\n"
+ ".inst 0x25256140 // dup p0.b, p8.b/Z, p10.b[w13]\n"
+ ".inst 0xe00922c0 // ld1b { za0h.b[x13] }, p0/Z, [x22, x9]\n"
+ ".inst 0x252d6140 // dup p0.b, p8.b/Z, p10.b[w13, #1]\n"
+ ".inst 0x25356141 // dup p1.b, p8.b/Z, p10.b[w13, #2]\n"
+ ".inst 0xe00922a1 // ld1b { za0h.b[x13, #1] }, p0/Z, [x21, x9]\n"
+ ".inst 0x253d6140 // dup p0.b, p8.b/Z, p10.b[w13, #3]\n"
+ "ldr x22, [x26], #0x8\n"
+ ".inst 0xe0092662 // ld1b { za0h.b[x13, #2] }, p1/Z, [x19, x9]\n"
+ "ldr x21, [x25], #0x8\n"
+ "ldr x19, [x24], #0x8\n"
+ ".inst 0xe0092283 // ld1b { za0h.b[x13, #3] }, p0/Z, [x20, x9]\n"
+ "ldr x20, [x23], #0x8\n"
+ "add x13, x13, #0x4\n"
+ "cmp x13, x10, LSL #2\n"
"blt 3b\n"
"4:" // Loads: Tail
- ".inst 0x25246140 // psel p0.b, p8.b/Z, p10.b[w12]\n"
- ".inst 0xe00a0320 // ld1b { za0h.b[x12] }, p0/Z, [x25, x10]\n"
- ".inst 0x252c6140 // psel p0.b, p8.b/Z, p10.b[w12, #1]\n"
- ".inst 0xe00a0281 // ld1b { za0h.b[x12, #1] }, p0/Z, [x20, x10]\n"
- ".inst 0x25346140 // psel p0.b, p8.b/Z, p10.b[w12, #2]\n"
- "sub x20, %x[width], x28\n"
- ".inst 0xe00a02c2 // ld1b { za0h.b[x12, #2] }, p0/Z, [x22, x10]\n"
- "cmp x20, x11\n"
- "csel x20, x20, x11, LT\n"
- ".inst 0x253c6140 // psel p0.b, p8.b/Z, p10.b[w12, #3]\n"
- "add x20, x20, #0x3\n"
- ".inst 0xe00a02a3 // ld1b { za0h.b[x12, #3] }, p0/Z, [x21, x10]\n"
+ ".inst 0x25256140 // dup p0.b, p8.b/Z, p10.b[w13]\n"
+ ".inst 0xe00922c0 // ld1b { za0h.b[x13] }, p0/Z, [x22, x9]\n"
+ ".inst 0x252d6140 // dup p0.b, p8.b/Z, p10.b[w13, #1]\n"
+ ".inst 0x25356141 // dup p1.b, p8.b/Z, p10.b[w13, #2]\n"
+ ".inst 0xe00922a1 // ld1b { za0h.b[x13, #1] }, p0/Z, [x21, x9]\n"
+ ".inst 0x253d6140 // dup p0.b, p8.b/Z, p10.b[w13, #3]\n"
"mov x12, #0x0\n"
- "lsr x20, x20, #0x2\n"
+ ".inst 0xe0092662 // ld1b { za0h.b[x13, #2] }, p1/Z, [x19, x9]\n"
+ "sub x19, %x[width], x27\n"
+ "cmp x19, x11\n"
+ ".inst 0xe0092283 // ld1b { za0h.b[x13, #3] }, p0/Z, [x20, x9]\n"
+ "csel x19, x19, x11, LT\n"
+ "add x19, x19, #0x3\n"
+ "lsr x19, x19, #0x2\n"
"5:" // Stores: Loop
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe0bf8120 // st1w { za0v.s[x12] }, p0/Z, [x9, XZR, LSL #2]\n"
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
".inst 0xc0828813 // mova z19.s, p2/M, za0v.s[x12]\n"
- ".inst 0xe0af8124 // st1w { za1v.s[x12] }, p0/Z, [x9, x15, LSL #2]\n"
- ".inst 0x25306d21 // psel p1.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xc0828891 // mova z17.s, p2/M, za1v.s[x12]\n"
- ".inst 0xe0ae8528 // st1w { za2v.s[x12] }, p1/Z, [x9, x14, LSL #2]\n"
- ".inst 0xc0828912 // mova z18.s, p2/M, za2v.s[x12]\n"
- "udot z23.s, z19.b, z24.b\n"
- ".inst 0xe0ad812c // st1w { za3v.s[x12] }, p0/Z, [x9, x13, LSL #2]\n"
+ ".inst 0x25306d20 // dup p0.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0xe0bf8380 // st1w { za0v.s[x12] }, p0/Z, [x28, XZR, LSL #2]\n"
+ ".inst 0xc0828892 // mova z18.s, p2/M, za1v.s[x12]\n"
+ ".inst 0x25306d20 // dup p0.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0xc0828911 // mova z17.s, p2/M, za2v.s[x12]\n"
+ ".inst 0xe0b08384 // st1w { za1v.s[x12] }, p0/Z, [x28, x16, LSL #2]\n"
+ ".inst 0x25306d21 // dup p1.s, p11.s/Z, p9.s[w12]\n"
".inst 0xc0828990 // mova z16.s, p2/M, za3v.s[x12]\n"
- "add x12, x12, #0x1\n"
- "cmp x12, x20\n"
- "udot z22.s, z17.b, z24.b\n"
- "udot z21.s, z18.b, z24.b\n"
- "addvl x9, x9, #4\n"
+ "udot z23.s, z19.b, z24.b\n"
+ ".inst 0x25306d20 // dup p0.s, p11.s/Z, p9.s[w12]\n"
+ "udot z22.s, z18.b, z24.b\n"
+ ".inst 0xe0af8788 // st1w { za2v.s[x12] }, p1/Z, [x28, x15, LSL #2]\n"
+ "udot z21.s, z17.b, z24.b\n"
"udot z20.s, z16.b, z24.b\n"
+ ".inst 0xe0ae838c // st1w { za3v.s[x12] }, p0/Z, [x28, x14, LSL #2]\n"
+ "addvl x28, x28, #4\n"
+ "add x12, x12, #0x1\n"
+ "cmp x12, x19\n"
"blt 5b\n"
- "incb x28\n"
- "whilelt p9.b, x28, %x[width]\n"
- "whilelt p8.b, x28, %x[width]\n"
- "incb x10\n"
+ "incb x9\n"
+ "incb x27\n"
+ "whilelt p9.b, x27, %x[width]\n"
+ "whilelt p8.b, x27, %x[width]\n"
"b.any 2b\n"
- "st1w { z23.s }, p2, [x9]\n"
- "st1w { z22.s }, p2, [x9, #1, MUL VL]\n"
- "st1w { z21.s }, p2, [x9, #2, MUL VL]\n"
- "st1w { z20.s }, p2, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
- "mov %x[out], x9\n"
+ "st1w { z23.s }, p2, [x28]\n"
+ "st1w { z22.s }, p2, [x28, #1, MUL VL]\n"
+ "st1w { z21.s }, p2, [x28, #2, MUL VL]\n"
+ "st1w { z20.s }, p2, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "mov %x[out], x28\n"
".inst 0xd503467f // SMSTOP\n"
: [out] "+&r" (out)
: [first] "r" (first), [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p8", "p9", "p10", "p11", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave4VL_fp32_fp32.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave4VL_fp32_fp32.hpp
index 9579263204..141ab00a52 100644
--- a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave4VL_fp32_fp32.hpp
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave4VL_fp32_fp32.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#if defined(__ARM_FEATURE_SVE)
@@ -34,92 +34,92 @@ void interleave_block<4, 1, VLType::SME, false>(
__asm__ __volatile__(
".inst 0xd503477f // SMSTART ZA\n"
- "cntw x15\n"
- "cmp %x[height], x15\n"
"cntw x14\n"
"cntw x13, ALL, MUL #2\n"
"cntw x11, ALL, MUL #3\n"
- "csel x15, %x[height], x15, LT\n"
- "mov x10, #0x0\n"
- "ptrue p4.s\n"
- "sub x15, x15, #0x1\n"
- "whilelt p3.s, XZR, %x[height]\n"
+ "ptrue p3.s\n"
+ "cntw x10\n"
+ "cmp %x[height], x10\n"
+ "csel x10, %x[height], x10, LT\n"
+ "sub x10, x10, #0x1\n"
+ "whilelt p2.s, XZR, %x[height]\n"
"whilelt p15.s, x14, %x[height]\n"
"whilelt p14.s, x13, %x[height]\n"
"whilelt p13.s, x11, %x[height]\n"
"mov x9, %x[row_offset]\n"
"mov x28, %x[out]\n"
- "whilelt p12.s, x10, %x[width]\n"
- "whilelt p11.s, x10, %x[width]\n"
- "whilelt p10.s, x10, %x[width]\n"
- "whilelt p9.s, x10, %x[width]\n"
- "whilelt p8.s, x10, %x[width]\n"
+ "mov x27, #0x0\n"
+ "whilelt p12.s, x27, %x[width]\n"
+ "whilelt p11.s, x27, %x[width]\n"
+ "whilelt p10.s, x27, %x[width]\n"
+ "whilelt p9.s, x27, %x[width]\n"
+ "whilelt p8.s, x27, %x[width]\n"
"1:" // Width loop
- "add x27, %x[in], XZR, LSL #3\n"
- "add x26, %x[in], x14, LSL #3\n"
- "ldr x25, [x27], #0x8\n"
- "add x24, %x[in], x13, LSL #3\n"
- "add x20, %x[in], x11, LSL #3\n"
- "ldr x23, [x26], #0x8\n"
"mov x12, #0x0\n"
- "ldr x22, [x24], #0x8\n"
- "ldr x21, [x20], #0x8\n"
- "cbz x15, 3f\n"
+ "add x26, %x[in], XZR, LSL #3\n"
+ "add x25, %x[in], x14, LSL #3\n"
+ "add x24, %x[in], x13, LSL #3\n"
+ "add x23, %x[in], x11, LSL #3\n"
+ "ldr x22, [x26], #0x8\n"
+ "ldr x21, [x25], #0x8\n"
+ "ldr x20, [x24], #0x8\n"
+ "ldr x19, [x23], #0x8\n"
+ "cbz x10, 3f\n"
"2:" // Loads: Loop
- ".inst 0x25306c60 // psel p0.s, p11.s/Z, p3.s[w12]\n"
- ".inst 0x253069e2 // psel p2.s, p10.s/Z, p15.s[w12]\n"
- ".inst 0xe0890320 // ld1w { za0h.s[x12] }, p0/Z, [x25, x9, LSL #2]\n"
- "ldr x25, [x27], #0x8\n"
- ".inst 0x253065c1 // psel p1.s, p9.s/Z, p14.s[w12]\n"
- ".inst 0x253061a0 // psel p0.s, p8.s/Z, p13.s[w12]\n"
- ".inst 0xe0890ae4 // ld1w { za1h.s[x12] }, p2/Z, [x23, x9, LSL #2]\n"
- "ldr x23, [x26], #0x8\n"
- ".inst 0xe08906c8 // ld1w { za2h.s[x12] }, p1/Z, [x22, x9, LSL #2]\n"
- "ldr x22, [x24], #0x8\n"
- ".inst 0xe08902ac // ld1w { za3h.s[x12] }, p0/Z, [x21, x9, LSL #2]\n"
+ ".inst 0x25306c40 // dup p0.s, p11.s/Z, p2.s[w12]\n"
+ ".inst 0xe08902c0 // ld1w { za0h.s[x12] }, p0/Z, [x22, x9, LSL #2]\n"
+ ".inst 0x253069e0 // dup p0.s, p10.s/Z, p15.s[w12]\n"
+ ".inst 0xe08902a4 // ld1w { za1h.s[x12] }, p0/Z, [x21, x9, LSL #2]\n"
+ ".inst 0x253065c0 // dup p0.s, p9.s/Z, p14.s[w12]\n"
+ ".inst 0xe0890288 // ld1w { za2h.s[x12] }, p0/Z, [x20, x9, LSL #2]\n"
+ ".inst 0x253061a0 // dup p0.s, p8.s/Z, p13.s[w12]\n"
+ ".inst 0xe089026c // ld1w { za3h.s[x12] }, p0/Z, [x19, x9, LSL #2]\n"
+ "ldr x22, [x26], #0x8\n"
+ "ldr x21, [x25], #0x8\n"
+ "ldr x20, [x24], #0x8\n"
+ "ldr x19, [x23], #0x8\n"
"add x12, x12, #0x1\n"
- "cmp x12, x15\n"
- "ldr x21, [x20], #0x8\n"
+ "cmp x12, x10\n"
"blt 2b\n"
"3:" // Loads: Tail
- "sub x20, %x[width], x10\n"
- ".inst 0x25306c60 // psel p0.s, p11.s/Z, p3.s[w12]\n"
- ".inst 0xe0890320 // ld1w { za0h.s[x12] }, p0/Z, [x25, x9, LSL #2]\n"
- ".inst 0x253069e0 // psel p0.s, p10.s/Z, p15.s[w12]\n"
- ".inst 0x253065c1 // psel p1.s, p9.s/Z, p14.s[w12]\n"
- ".inst 0xe08902e4 // ld1w { za1h.s[x12] }, p0/Z, [x23, x9, LSL #2]\n"
- ".inst 0x253061a0 // psel p0.s, p8.s/Z, p13.s[w12]\n"
- "cmp x20, x14\n"
- ".inst 0xe08906c8 // ld1w { za2h.s[x12] }, p1/Z, [x22, x9, LSL #2]\n"
- ".inst 0xe08902ac // ld1w { za3h.s[x12] }, p0/Z, [x21, x9, LSL #2]\n"
+ ".inst 0x25306c40 // dup p0.s, p11.s/Z, p2.s[w12]\n"
+ ".inst 0xe08902c0 // ld1w { za0h.s[x12] }, p0/Z, [x22, x9, LSL #2]\n"
+ ".inst 0x253069e0 // dup p0.s, p10.s/Z, p15.s[w12]\n"
+ ".inst 0xe08902a4 // ld1w { za1h.s[x12] }, p0/Z, [x21, x9, LSL #2]\n"
+ ".inst 0x253065c0 // dup p0.s, p9.s/Z, p14.s[w12]\n"
+ ".inst 0xe0890288 // ld1w { za2h.s[x12] }, p0/Z, [x20, x9, LSL #2]\n"
+ ".inst 0x253061a0 // dup p0.s, p8.s/Z, p13.s[w12]\n"
+ ".inst 0xe089026c // ld1w { za3h.s[x12] }, p0/Z, [x19, x9, LSL #2]\n"
"mov x12, #0x0\n"
- "csel x20, x20, x14, LT\n"
+ "sub x19, %x[width], x27\n"
+ "cmp x19, x14\n"
+ "csel x19, x19, x14, LT\n"
"4:" // Stores: Loop
- ".inst 0x25305180 // psel p0.s, p4.s/Z, p12.s[w12]\n"
+ ".inst 0x25304d80 // dup p0.s, p3.s/Z, p12.s[w12]\n"
".inst 0xe0bf8380 // st1w { za0v.s[x12] }, p0/Z, [x28, XZR, LSL #2]\n"
- ".inst 0x25305180 // psel p0.s, p4.s/Z, p12.s[w12]\n"
+ ".inst 0x25304d80 // dup p0.s, p3.s/Z, p12.s[w12]\n"
+ ".inst 0x25304d81 // dup p1.s, p3.s/Z, p12.s[w12]\n"
".inst 0xe0ae8384 // st1w { za1v.s[x12] }, p0/Z, [x28, x14, LSL #2]\n"
- ".inst 0x25305181 // psel p1.s, p4.s/Z, p12.s[w12]\n"
- ".inst 0x25305180 // psel p0.s, p4.s/Z, p12.s[w12]\n"
+ ".inst 0x25304d80 // dup p0.s, p3.s/Z, p12.s[w12]\n"
".inst 0xe0ad8788 // st1w { za2v.s[x12] }, p1/Z, [x28, x13, LSL #2]\n"
".inst 0xe0ab838c // st1w { za3v.s[x12] }, p0/Z, [x28, x11, LSL #2]\n"
- "add x12, x12, #0x1\n"
- "cmp x12, x20\n"
"addvl x28, x28, #4\n"
+ "add x12, x12, #0x1\n"
+ "cmp x12, x19\n"
"blt 4b\n"
- "incw x10\n"
- "whilelt p12.s, x10, %x[width]\n"
- "whilelt p11.s, x10, %x[width]\n"
- "whilelt p10.s, x10, %x[width]\n"
- "whilelt p9.s, x10, %x[width]\n"
- "whilelt p8.s, x10, %x[width]\n"
"incw x9\n"
+ "incw x27\n"
+ "whilelt p12.s, x27, %x[width]\n"
+ "whilelt p11.s, x27, %x[width]\n"
+ "whilelt p10.s, x27, %x[width]\n"
+ "whilelt p9.s, x27, %x[width]\n"
+ "whilelt p8.s, x27, %x[width]\n"
"b.any 1b\n"
"mov %x[out], x28\n"
".inst 0xd503467f // SMSTOP\n"
: [out] "+&r" (out)
: [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_bf16fp32_mmla_6x16/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_bf16fp32_mmla_6x16/generic.cpp
index 74791f8d30..ec93586f57 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_bf16fp32_mmla_6x16/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_bf16fp32_mmla_6x16/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef __aarch64__
@@ -105,108 +105,108 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
"cmp %x[M], #0x2\n"
"bgt 77f\n"
"beq 39f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x15, %x[bias]\n"
- "ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x13, %x[output_ptr]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x14, %x[bias]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x12, %x[output_ptr]\n"
"2:" // Height 1: Column loop
- "ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
- "add x11, x12, x20, LSL #1\n"
- "add x10, x11, x20, LSL #1\n"
- "add x9, x10, x20, LSL #1\n"
- "add x20, x9, x20, LSL #1\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "cmp x14, #0xc\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #1\n"
+ "add x9, x10, x19, LSL #1\n"
+ "add x28, x9, x19, LSL #1\n"
+ "add x19, x28, x19, LSL #1\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "cmp x13, #0xc\n"
"bgt 3f\n"
- "cmp x14, #0x8\n"
- "mov x9, x12\n"
+ "cmp x13, #0x8\n"
+ "mov x28, x11\n"
"bgt 3f\n"
- "cmp x14, #0x4\n"
- "mov x10, x12\n"
+ "cmp x13, #0x4\n"
+ "mov x9, x11\n"
"bgt 3f\n"
- "mov x11, x12\n"
+ "mov x10, x11\n"
"3:" // Height 1: B setup done
- "cbz x15, 4f\n"
- "ldr q8, [x15, #0x0]\n"
- "ldr q9, [x15, #0x10]\n"
+ "cbz x14, 4f\n"
+ "ldr q8, [x14, #0x0]\n"
+ "ldr q9, [x14, #0x10]\n"
"zip2 v12.2d, v8.2d, v8.2d\n"
"zip1 v8.2d, v8.2d, v8.2d\n"
- "ldr q10, [x15, #0x20]\n"
- "ldr q11, [x15, #0x30]\n"
+ "ldr q10, [x14, #0x20]\n"
+ "ldr q11, [x14, #0x30]\n"
"zip2 v13.2d, v9.2d, v9.2d\n"
"zip1 v9.2d, v9.2d, v9.2d\n"
"zip2 v14.2d, v10.2d, v10.2d\n"
"zip1 v10.2d, v10.2d, v10.2d\n"
- "add x15, x15, #0x40\n"
+ "add x14, x14, #0x40\n"
"zip2 v15.2d, v11.2d, v11.2d\n"
"zip1 v11.2d, v11.2d, v11.2d\n"
"b 16f\n"
"4:" // Height 1: no bias
"tbz %x[flags], #0, 15f\n"
- "cmp x14, #0x10\n"
+ "cmp x13, #0x10\n"
"bge 13f\n"
- "tbz x14, #3, 8f\n"
- "ld1 { v9.4s }, [x13], #0x10\n"
- "ld1 { v10.4s }, [x13], #0x10\n"
- "tbz x14, #2, 6f\n"
- "ld1 { v11.4s }, [x13], #0x10\n"
- "tbz x14, #1, 5f\n"
- "ldr d16, [x13], #0x8\n"
- "mov x20, #0x38\n"
- "tbz x14, #0, 12f\n"
- "ld1 { v16.s }[2], [x13]\n"
+ "tbz x13, #3, 8f\n"
+ "ld1 { v9.4s }, [x12], #0x10\n"
+ "ld1 { v10.4s }, [x12], #0x10\n"
+ "tbz x13, #2, 6f\n"
+ "ld1 { v11.4s }, [x12], #0x10\n"
+ "tbz x13, #1, 5f\n"
+ "ldr d16, [x12], #0x8\n"
+ "mov x19, #0x38\n"
+ "tbz x13, #0, 12f\n"
+ "ld1 { v16.s }[2], [x12]\n"
"b 12f\n"
"5:" // Height 1: Partial accumulate: partial_1_12
- "mov x20, #0x30\n"
- "tbz x14, #0, 12f\n"
- "ldr s16, [x13, #0x0]\n"
+ "mov x19, #0x30\n"
+ "tbz x13, #0, 12f\n"
+ "ldr s16, [x12, #0x0]\n"
"b 12f\n"
"6:" // Height 1: Partial accumulate: partial_2_8
- "tbz x14, #1, 7f\n"
- "ldr d11, [x13], #0x8\n"
- "mov x20, #0x28\n"
- "tbz x14, #0, 12f\n"
- "ld1 { v11.s }[2], [x13]\n"
+ "tbz x13, #1, 7f\n"
+ "ldr d11, [x12], #0x8\n"
+ "mov x19, #0x28\n"
+ "tbz x13, #0, 12f\n"
+ "ld1 { v11.s }[2], [x12]\n"
"b 12f\n"
"7:" // Height 1: Partial accumulate: partial_1_8
- "mov x20, #0x20\n"
- "tbz x14, #0, 12f\n"
- "ldr s11, [x13, #0x0]\n"
+ "mov x19, #0x20\n"
+ "tbz x13, #0, 12f\n"
+ "ldr s11, [x12, #0x0]\n"
"b 12f\n"
"8:" // Height 1: Partial accumulate: partial_4_0
- "tbz x14, #2, 10f\n"
- "ld1 { v9.4s }, [x13], #0x10\n"
- "tbz x14, #1, 9f\n"
- "ldr d10, [x13], #0x8\n"
- "mov x20, #0x18\n"
- "tbz x14, #0, 12f\n"
- "ld1 { v10.s }[2], [x13]\n"
+ "tbz x13, #2, 10f\n"
+ "ld1 { v9.4s }, [x12], #0x10\n"
+ "tbz x13, #1, 9f\n"
+ "ldr d10, [x12], #0x8\n"
+ "mov x19, #0x18\n"
+ "tbz x13, #0, 12f\n"
+ "ld1 { v10.s }[2], [x12]\n"
"b 12f\n"
"9:" // Height 1: Partial accumulate: partial_1_4
- "mov x20, #0x10\n"
- "tbz x14, #0, 12f\n"
- "ldr s10, [x13, #0x0]\n"
+ "mov x19, #0x10\n"
+ "tbz x13, #0, 12f\n"
+ "ldr s10, [x12, #0x0]\n"
"b 12f\n"
"10:" // Height 1: Partial accumulate: partial_2_0
- "tbz x14, #1, 11f\n"
- "ldr d9, [x13], #0x8\n"
- "mov x20, #0x8\n"
- "tbz x14, #0, 12f\n"
- "ld1 { v9.s }[2], [x13]\n"
+ "tbz x13, #1, 11f\n"
+ "ldr d9, [x12], #0x8\n"
+ "mov x19, #0x8\n"
+ "tbz x13, #0, 12f\n"
+ "ld1 { v9.s }[2], [x12]\n"
"b 12f\n"
"11:" // Height 1: Partial accumulate: partial_1_0
- "ldr s9, [x13, #0x0]\n"
- "mov x20, #0x0\n"
+ "ldr s9, [x12, #0x0]\n"
+ "mov x19, #0x0\n"
"12:" // Height 1: Partial accumulate: Done
- "sub x13, x13, x20\n"
+ "sub x12, x12, x19\n"
"b 14f\n"
"13:" // Height 1: full accumulate
- "ldr q9, [x13, #0x0]\n"
- "ldr q10, [x13, #0x10]\n"
- "ldr q11, [x13, #0x20]\n"
- "ldr q16, [x13, #0x30]\n"
+ "ldr q9, [x12, #0x0]\n"
+ "ldr q10, [x12, #0x10]\n"
+ "ldr q11, [x12, #0x20]\n"
+ "ldr q16, [x12, #0x30]\n"
"14:" // Height 1: MMLA fixup
"zip1 v8.2d, v9.2d, v12.2d\n"
"zip2 v12.2d, v9.2d, v12.2d\n"
@@ -227,187 +227,187 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
"movi v14.16b, #0x0\n"
"movi v15.16b, #0x0\n"
"16:" // Height 1: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"17:" // Height 1: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 18f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "cbnz x28, 19f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #1\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "cbnz x27, 19f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
"b 19f\n"
"18:" // Height 1: setup direct input
- "mov x26, %x[input_ptr]\n"
+ "mov x25, %x[input_ptr]\n"
"19:" // Height 1: input setup done
- "cmp x27, #0x8\n"
+ "cmp x26, #0x8\n"
"blt 22f\n"
- "ldr q1, [x26, #0x0]\n"
- "ldr q7, [x12, #0x0]\n"
- "cmp x27, #0x10\n"
- "ldr q6, [x12, #0x10]\n"
+ "ldr q1, [x25, #0x0]\n"
+ "ldr q7, [x11, #0x0]\n"
+ "cmp x26, #0x10\n"
+ "ldr q6, [x11, #0x10]\n"
"blt 21f\n"
"20:" // Height 1: Multiply loop: Main loop head
"trn1 v0.2d, v1.2d, v2.2d\n"
".inst 0x6e47ec08 // bfmmla v8.4s, v0.8h, v7.8h\n"
- "ldr q7, [x11, #0x0]\n"
+ "ldr q7, [x10, #0x0]\n"
".inst 0x6e46ec0c // bfmmla v12.4s, v0.8h, v6.8h\n"
- "ldr q6, [x11, #0x10]\n"
+ "ldr q6, [x10, #0x10]\n"
".inst 0x6e47ec09 // bfmmla v9.4s, v0.8h, v7.8h\n"
- "ldr q7, [x10, #0x0]\n"
+ "ldr q7, [x9, #0x0]\n"
".inst 0x6e46ec0d // bfmmla v13.4s, v0.8h, v6.8h\n"
- "ldr q6, [x10, #0x10]\n"
+ "ldr q6, [x9, #0x10]\n"
".inst 0x6e47ec0a // bfmmla v10.4s, v0.8h, v7.8h\n"
- "ldr q7, [x9, #0x0]\n"
+ "ldr q7, [x28, #0x0]\n"
".inst 0x6e46ec0e // bfmmla v14.4s, v0.8h, v6.8h\n"
- "ldr q6, [x9, #0x10]\n"
+ "ldr q6, [x28, #0x10]\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
".inst 0x6e47ec0b // bfmmla v11.4s, v0.8h, v7.8h\n"
- "ldr q7, [x12, #0x20]\n"
+ "ldr q7, [x11, #0x20]\n"
".inst 0x6e46ec0f // bfmmla v15.4s, v0.8h, v6.8h\n"
- "ldr q6, [x12, #0x30]\n"
+ "ldr q6, [x11, #0x30]\n"
".inst 0x6e47ec28 // bfmmla v8.4s, v1.8h, v7.8h\n"
- "ldr q7, [x11, #0x20]\n"
+ "ldr q7, [x10, #0x20]\n"
".inst 0x6e46ec2c // bfmmla v12.4s, v1.8h, v6.8h\n"
- "ldr q6, [x11, #0x30]\n"
+ "ldr q6, [x10, #0x30]\n"
".inst 0x6e47ec29 // bfmmla v9.4s, v1.8h, v7.8h\n"
- "ldr q7, [x10, #0x20]\n"
+ "ldr q7, [x9, #0x20]\n"
".inst 0x6e46ec2d // bfmmla v13.4s, v1.8h, v6.8h\n"
- "ldr q6, [x10, #0x30]\n"
+ "ldr q6, [x9, #0x30]\n"
".inst 0x6e47ec2a // bfmmla v10.4s, v1.8h, v7.8h\n"
- "ldr q7, [x9, #0x20]\n"
+ "ldr q7, [x28, #0x20]\n"
".inst 0x6e46ec2e // bfmmla v14.4s, v1.8h, v6.8h\n"
- "ldr q6, [x9, #0x30]\n"
- "sub x27, x27, #0x8\n"
- "cmp x27, #0x10\n"
- "add x26, x26, #0x10\n"
+ "ldr q6, [x28, #0x30]\n"
+ "sub x26, x26, #0x8\n"
+ "cmp x26, #0x10\n"
+ "add x25, x25, #0x10\n"
".inst 0x6e47ec2b // bfmmla v11.4s, v1.8h, v7.8h\n"
".inst 0x6e46ec2f // bfmmla v15.4s, v1.8h, v6.8h\n"
- "ldr q1, [x26, #0x0]\n"
- "add x12, x12, #0x40\n"
- "ldr q7, [x12, #0x0]\n"
+ "ldr q1, [x25, #0x0]\n"
"add x11, x11, #0x40\n"
- "ldr q6, [x12, #0x10]\n"
+ "ldr q7, [x11, #0x0]\n"
"add x10, x10, #0x40\n"
+ "ldr q6, [x11, #0x10]\n"
"add x9, x9, #0x40\n"
+ "add x28, x28, #0x40\n"
"bge 20b\n"
"21:" // Height 1: Multiply loop: Single iteration only
"trn1 v0.2d, v1.2d, v2.2d\n"
".inst 0x6e47ec08 // bfmmla v8.4s, v0.8h, v7.8h\n"
- "ldr q7, [x11, #0x0]\n"
+ "ldr q7, [x10, #0x0]\n"
".inst 0x6e46ec0c // bfmmla v12.4s, v0.8h, v6.8h\n"
- "ldr q6, [x11, #0x10]\n"
+ "ldr q6, [x10, #0x10]\n"
".inst 0x6e47ec09 // bfmmla v9.4s, v0.8h, v7.8h\n"
- "ldr q7, [x10, #0x0]\n"
+ "ldr q7, [x9, #0x0]\n"
".inst 0x6e46ec0d // bfmmla v13.4s, v0.8h, v6.8h\n"
- "ldr q6, [x10, #0x10]\n"
+ "ldr q6, [x9, #0x10]\n"
".inst 0x6e47ec0a // bfmmla v10.4s, v0.8h, v7.8h\n"
- "ldr q7, [x9, #0x0]\n"
+ "ldr q7, [x28, #0x0]\n"
".inst 0x6e46ec0e // bfmmla v14.4s, v0.8h, v6.8h\n"
- "ldr q6, [x9, #0x10]\n"
+ "ldr q6, [x28, #0x10]\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
".inst 0x6e47ec0b // bfmmla v11.4s, v0.8h, v7.8h\n"
- "ldr q7, [x12, #0x20]\n"
+ "ldr q7, [x11, #0x20]\n"
".inst 0x6e46ec0f // bfmmla v15.4s, v0.8h, v6.8h\n"
- "ldr q6, [x12, #0x30]\n"
+ "ldr q6, [x11, #0x30]\n"
".inst 0x6e47ec28 // bfmmla v8.4s, v1.8h, v7.8h\n"
- "ldr q7, [x11, #0x20]\n"
+ "ldr q7, [x10, #0x20]\n"
".inst 0x6e46ec2c // bfmmla v12.4s, v1.8h, v6.8h\n"
- "ldr q6, [x11, #0x30]\n"
+ "ldr q6, [x10, #0x30]\n"
".inst 0x6e47ec29 // bfmmla v9.4s, v1.8h, v7.8h\n"
- "ldr q7, [x10, #0x20]\n"
+ "ldr q7, [x9, #0x20]\n"
".inst 0x6e46ec2d // bfmmla v13.4s, v1.8h, v6.8h\n"
- "ldr q6, [x10, #0x30]\n"
+ "ldr q6, [x9, #0x30]\n"
".inst 0x6e47ec2a // bfmmla v10.4s, v1.8h, v7.8h\n"
- "ldr q7, [x9, #0x20]\n"
+ "ldr q7, [x28, #0x20]\n"
".inst 0x6e46ec2e // bfmmla v14.4s, v1.8h, v6.8h\n"
- "ldr q6, [x9, #0x30]\n"
- "sub x27, x27, #0x8\n"
+ "ldr q6, [x28, #0x30]\n"
+ "sub x26, x26, #0x8\n"
".inst 0x6e47ec2b // bfmmla v11.4s, v1.8h, v7.8h\n"
".inst 0x6e46ec2f // bfmmla v15.4s, v1.8h, v6.8h\n"
- "add x26, x26, #0x10\n"
- "add x12, x12, #0x40\n"
+ "add x25, x25, #0x10\n"
"add x11, x11, #0x40\n"
"add x10, x10, #0x40\n"
"add x9, x9, #0x40\n"
+ "add x28, x28, #0x40\n"
"22:" // Height 1: Multiply loop: Main loop skip
- "cbz x27, 27f\n"
- "cmp x27, #0x4\n"
+ "cbz x26, 27f\n"
+ "cmp x26, #0x4\n"
"blt 24f\n"
"23:" // Height 1: Multiply loop: Odd block loop
- "ldr d1, [x26], #0x8\n"
- "ldr q6, [x12, #0x0]\n"
+ "ldr d1, [x25], #0x8\n"
+ "ldr q6, [x11, #0x0]\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
- "ldr q7, [x12, #0x10]\n"
+ "ldr q7, [x11, #0x10]\n"
".inst 0x6e46ec08 // bfmmla v8.4s, v0.8h, v6.8h\n"
- "ldr q6, [x11, #0x0]\n"
+ "ldr q6, [x10, #0x0]\n"
".inst 0x6e47ec0c // bfmmla v12.4s, v0.8h, v7.8h\n"
- "ldr q7, [x11, #0x10]\n"
+ "ldr q7, [x10, #0x10]\n"
".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n"
- "ldr q6, [x10, #0x0]\n"
+ "ldr q6, [x9, #0x0]\n"
".inst 0x6e47ec0d // bfmmla v13.4s, v0.8h, v7.8h\n"
- "ldr q7, [x10, #0x10]\n"
+ "ldr q7, [x9, #0x10]\n"
".inst 0x6e46ec0a // bfmmla v10.4s, v0.8h, v6.8h\n"
- "ldr q6, [x9, #0x0]\n"
+ "ldr q6, [x28, #0x0]\n"
".inst 0x6e47ec0e // bfmmla v14.4s, v0.8h, v7.8h\n"
- "ldr q7, [x9, #0x10]\n"
- "sub x27, x27, #0x4\n"
- "cmp x27, #0x4\n"
+ "ldr q7, [x28, #0x10]\n"
+ "sub x26, x26, #0x4\n"
+ "cmp x26, #0x4\n"
".inst 0x6e46ec0b // bfmmla v11.4s, v0.8h, v6.8h\n"
".inst 0x6e47ec0f // bfmmla v15.4s, v0.8h, v7.8h\n"
- "add x12, x12, #0x20\n"
"add x11, x11, #0x20\n"
"add x10, x10, #0x20\n"
"add x9, x9, #0x20\n"
+ "add x28, x28, #0x20\n"
"bge 23b\n"
"24:" // Height 1: Multiply loop: Skip odd blocks
- "cbz x27, 27f\n"
- "tbz x27, #1, 25f\n"
- "ldr s1, [x26], #0x4\n"
- "tbz x27, #0, 26f\n"
- "ld1 { v1.h }[2], [x26]\n"
+ "cbz x26, 27f\n"
+ "tbz x26, #1, 25f\n"
+ "ldr s1, [x25], #0x4\n"
+ "tbz x26, #0, 26f\n"
+ "ld1 { v1.h }[2], [x25]\n"
"b 26f\n"
"25:" // Height 1: Multiply loop: Ragged operand read: partial_1_0
- "ldr h1, [x26, #0x0]\n"
+ "ldr h1, [x25, #0x0]\n"
"26:" // Height 1: Multiply loop: Ragged operand read: Done
- "ldr q7, [x12, #0x0]\n"
- "ldr q6, [x12, #0x10]\n"
+ "ldr q7, [x11, #0x0]\n"
+ "ldr q6, [x11, #0x10]\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
".inst 0x6e47ec08 // bfmmla v8.4s, v0.8h, v7.8h\n"
- "ldr q7, [x11, #0x0]\n"
+ "ldr q7, [x10, #0x0]\n"
".inst 0x6e46ec0c // bfmmla v12.4s, v0.8h, v6.8h\n"
- "ldr q6, [x11, #0x10]\n"
+ "ldr q6, [x10, #0x10]\n"
".inst 0x6e47ec09 // bfmmla v9.4s, v0.8h, v7.8h\n"
- "ldr q7, [x10, #0x0]\n"
+ "ldr q7, [x9, #0x0]\n"
".inst 0x6e46ec0d // bfmmla v13.4s, v0.8h, v6.8h\n"
- "ldr q6, [x10, #0x10]\n"
+ "ldr q6, [x9, #0x10]\n"
".inst 0x6e47ec0a // bfmmla v10.4s, v0.8h, v7.8h\n"
- "ldr q7, [x9, #0x0]\n"
+ "ldr q7, [x28, #0x0]\n"
".inst 0x6e46ec0e // bfmmla v14.4s, v0.8h, v6.8h\n"
- "ldr q6, [x9, #0x10]\n"
+ "ldr q6, [x28, #0x10]\n"
".inst 0x6e47ec0b // bfmmla v11.4s, v0.8h, v7.8h\n"
".inst 0x6e46ec0f // bfmmla v15.4s, v0.8h, v6.8h\n"
- "add x12, x12, #0x20\n"
"add x11, x11, #0x20\n"
"add x10, x10, #0x20\n"
"add x9, x9, #0x20\n"
+ "add x28, x28, #0x20\n"
"27:" // Height 1: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 17b\n"
"uzp1 v8.2d, v8.2d, v12.2d\n"
"uzp1 v9.2d, v9.2d, v13.2d\n"
"uzp1 v10.2d, v10.2d, v14.2d\n"
"uzp1 v11.2d, v11.2d, v15.2d\n"
"tbz %x[flags], #1, 28f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v1.4s }, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1r { v0.4s }, [x20]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v1.4s }, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v0.4s }, [x19]\n"
"fmin v8.4s, v8.4s, v1.4s\n"
"fmin v9.4s, v9.4s, v1.4s\n"
"fmin v10.4s, v10.4s, v1.4s\n"
@@ -417,189 +417,189 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
"fmax v10.4s, v10.4s, v0.4s\n"
"fmax v11.4s, v11.4s, v0.4s\n"
"28:" // Height 1: No activation
- "cmp x14, #0x10\n"
+ "cmp x13, #0x10\n"
"bge 37f\n"
- "tbz x14, #3, 32f\n"
- "st1 { v8.4s }, [x13], #0x10\n"
- "st1 { v9.4s }, [x13], #0x10\n"
- "tbz x14, #2, 30f\n"
- "st1 { v10.4s }, [x13], #0x10\n"
- "tbz x14, #1, 29f\n"
- "str d11, [x13], #0x8\n"
- "tbz x14, #0, 36f\n"
- "st1 { v11.s }[2], [x13]\n"
+ "tbz x13, #3, 32f\n"
+ "st1 { v8.4s }, [x12], #0x10\n"
+ "st1 { v9.4s }, [x12], #0x10\n"
+ "tbz x13, #2, 30f\n"
+ "st1 { v10.4s }, [x12], #0x10\n"
+ "tbz x13, #1, 29f\n"
+ "str d11, [x12], #0x8\n"
+ "tbz x13, #0, 36f\n"
+ "st1 { v11.s }[2], [x12]\n"
"b 36f\n"
"29:" // Height 1: Partial direct writeback: partial_1_12
- "tbz x14, #0, 36f\n"
- "str s11, [x13, #0x0]\n"
+ "tbz x13, #0, 36f\n"
+ "str s11, [x12, #0x0]\n"
"b 36f\n"
"30:" // Height 1: Partial direct writeback: partial_2_8
- "tbz x14, #1, 31f\n"
- "str d10, [x13], #0x8\n"
- "tbz x14, #0, 36f\n"
- "st1 { v10.s }[2], [x13]\n"
+ "tbz x13, #1, 31f\n"
+ "str d10, [x12], #0x8\n"
+ "tbz x13, #0, 36f\n"
+ "st1 { v10.s }[2], [x12]\n"
"b 36f\n"
"31:" // Height 1: Partial direct writeback: partial_1_8
- "tbz x14, #0, 36f\n"
- "str s10, [x13, #0x0]\n"
+ "tbz x13, #0, 36f\n"
+ "str s10, [x12, #0x0]\n"
"b 36f\n"
"32:" // Height 1: Partial direct writeback: partial_4_0
- "tbz x14, #2, 34f\n"
- "st1 { v8.4s }, [x13], #0x10\n"
- "tbz x14, #1, 33f\n"
- "str d9, [x13], #0x8\n"
- "tbz x14, #0, 36f\n"
- "st1 { v9.s }[2], [x13]\n"
+ "tbz x13, #2, 34f\n"
+ "st1 { v8.4s }, [x12], #0x10\n"
+ "tbz x13, #1, 33f\n"
+ "str d9, [x12], #0x8\n"
+ "tbz x13, #0, 36f\n"
+ "st1 { v9.s }[2], [x12]\n"
"b 36f\n"
"33:" // Height 1: Partial direct writeback: partial_1_4
- "tbz x14, #0, 36f\n"
- "str s9, [x13, #0x0]\n"
+ "tbz x13, #0, 36f\n"
+ "str s9, [x12, #0x0]\n"
"b 36f\n"
"34:" // Height 1: Partial direct writeback: partial_2_0
- "tbz x14, #1, 35f\n"
- "str d8, [x13], #0x8\n"
- "tbz x14, #0, 36f\n"
- "st1 { v8.s }[2], [x13]\n"
+ "tbz x13, #1, 35f\n"
+ "str d8, [x12], #0x8\n"
+ "tbz x13, #0, 36f\n"
+ "st1 { v8.s }[2], [x12]\n"
"b 36f\n"
"35:" // Height 1: Partial direct writeback: partial_1_0
- "str s8, [x13, #0x0]\n"
+ "str s8, [x12, #0x0]\n"
"36:" // Height 1: Partial direct writeback: Done
"b 38f\n"
"37:" // Height 1: Full writeback
- "str q8, [x13, #0x0]\n"
- "str q9, [x13, #0x10]\n"
- "str q10, [x13, #0x20]\n"
- "str q11, [x13, #0x30]\n"
- "add x13, x13, #0x40\n"
+ "str q8, [x12, #0x0]\n"
+ "str q9, [x12, #0x10]\n"
+ "str q10, [x12, #0x20]\n"
+ "str q11, [x12, #0x30]\n"
+ "add x12, x12, #0x40\n"
"38:" // Height 1: Writeback done
- "subs x14, x14, #0x10\n"
+ "subs x13, x13, #0x10\n"
"bgt 2b\n"
"b 230f\n"
"39:" // Height 2
- "ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x15, %x[bias]\n"
- "ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x13, %x[output_ptr]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x14, %x[bias]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x12, %x[output_ptr]\n"
"40:" // Height 2: Column loop
- "ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
- "add x11, x12, x20, LSL #1\n"
- "add x10, x11, x20, LSL #1\n"
- "add x9, x10, x20, LSL #1\n"
- "add x20, x9, x20, LSL #1\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "cmp x14, #0xc\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #1\n"
+ "add x9, x10, x19, LSL #1\n"
+ "add x28, x9, x19, LSL #1\n"
+ "add x19, x28, x19, LSL #1\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "cmp x13, #0xc\n"
"bgt 41f\n"
- "cmp x14, #0x8\n"
- "mov x9, x12\n"
+ "cmp x13, #0x8\n"
+ "mov x28, x11\n"
"bgt 41f\n"
- "cmp x14, #0x4\n"
- "mov x10, x12\n"
+ "cmp x13, #0x4\n"
+ "mov x9, x11\n"
"bgt 41f\n"
- "mov x11, x12\n"
+ "mov x10, x11\n"
"41:" // Height 2: B setup done
- "cbz x15, 42f\n"
- "ldr q8, [x15, #0x0]\n"
- "ldr q9, [x15, #0x10]\n"
+ "cbz x14, 42f\n"
+ "ldr q8, [x14, #0x0]\n"
+ "ldr q9, [x14, #0x10]\n"
"zip2 v12.2d, v8.2d, v8.2d\n"
"zip1 v8.2d, v8.2d, v8.2d\n"
- "ldr q10, [x15, #0x20]\n"
- "ldr q11, [x15, #0x30]\n"
+ "ldr q10, [x14, #0x20]\n"
+ "ldr q11, [x14, #0x30]\n"
"zip2 v13.2d, v9.2d, v9.2d\n"
"zip1 v9.2d, v9.2d, v9.2d\n"
"zip2 v14.2d, v10.2d, v10.2d\n"
"zip1 v10.2d, v10.2d, v10.2d\n"
- "add x15, x15, #0x40\n"
+ "add x14, x14, #0x40\n"
"zip2 v15.2d, v11.2d, v11.2d\n"
"zip1 v11.2d, v11.2d, v11.2d\n"
"b 54f\n"
"42:" // Height 2: no bias
"tbz %x[flags], #0, 53f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "cmp x14, #0x10\n"
- "add x25, x13, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "cmp x13, #0x10\n"
+ "add x24, x12, x19, LSL #2\n"
"bge 51f\n"
- "tbz x14, #3, 46f\n"
- "ld1 { v9.4s }, [x13], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v10.4s }, [x13], #0x10\n"
- "ld1 { v13.4s }, [x25], #0x10\n"
- "tbz x14, #2, 44f\n"
- "ld1 { v11.4s }, [x13], #0x10\n"
- "ld1 { v14.4s }, [x25], #0x10\n"
- "tbz x14, #1, 43f\n"
- "ldr d16, [x13], #0x8\n"
- "ldr d15, [x25], #0x8\n"
- "mov x20, #0x38\n"
- "tbz x14, #0, 50f\n"
- "ld1 { v16.s }[2], [x13]\n"
- "ld1 { v15.s }[2], [x25]\n"
+ "tbz x13, #3, 46f\n"
+ "ld1 { v9.4s }, [x12], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
+ "ld1 { v10.4s }, [x12], #0x10\n"
+ "ld1 { v13.4s }, [x24], #0x10\n"
+ "tbz x13, #2, 44f\n"
+ "ld1 { v11.4s }, [x12], #0x10\n"
+ "ld1 { v14.4s }, [x24], #0x10\n"
+ "tbz x13, #1, 43f\n"
+ "ldr d16, [x12], #0x8\n"
+ "ldr d15, [x24], #0x8\n"
+ "mov x19, #0x38\n"
+ "tbz x13, #0, 50f\n"
+ "ld1 { v16.s }[2], [x12]\n"
+ "ld1 { v15.s }[2], [x24]\n"
"b 50f\n"
"43:" // Height 2: Partial accumulate: partial_1_12
- "mov x20, #0x30\n"
- "tbz x14, #0, 50f\n"
- "ldr s16, [x13, #0x0]\n"
- "ldr s15, [x25, #0x0]\n"
+ "mov x19, #0x30\n"
+ "tbz x13, #0, 50f\n"
+ "ldr s16, [x12, #0x0]\n"
+ "ldr s15, [x24, #0x0]\n"
"b 50f\n"
"44:" // Height 2: Partial accumulate: partial_2_8
- "tbz x14, #1, 45f\n"
- "ldr d11, [x13], #0x8\n"
- "ldr d14, [x25], #0x8\n"
- "mov x20, #0x28\n"
- "tbz x14, #0, 50f\n"
- "ld1 { v11.s }[2], [x13]\n"
- "ld1 { v14.s }[2], [x25]\n"
+ "tbz x13, #1, 45f\n"
+ "ldr d11, [x12], #0x8\n"
+ "ldr d14, [x24], #0x8\n"
+ "mov x19, #0x28\n"
+ "tbz x13, #0, 50f\n"
+ "ld1 { v11.s }[2], [x12]\n"
+ "ld1 { v14.s }[2], [x24]\n"
"b 50f\n"
"45:" // Height 2: Partial accumulate: partial_1_8
- "mov x20, #0x20\n"
- "tbz x14, #0, 50f\n"
- "ldr s11, [x13, #0x0]\n"
- "ldr s14, [x25, #0x0]\n"
+ "mov x19, #0x20\n"
+ "tbz x13, #0, 50f\n"
+ "ldr s11, [x12, #0x0]\n"
+ "ldr s14, [x24, #0x0]\n"
"b 50f\n"
"46:" // Height 2: Partial accumulate: partial_4_0
- "tbz x14, #2, 48f\n"
- "ld1 { v9.4s }, [x13], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "tbz x14, #1, 47f\n"
- "ldr d10, [x13], #0x8\n"
- "ldr d13, [x25], #0x8\n"
- "mov x20, #0x18\n"
- "tbz x14, #0, 50f\n"
- "ld1 { v10.s }[2], [x13]\n"
- "ld1 { v13.s }[2], [x25]\n"
+ "tbz x13, #2, 48f\n"
+ "ld1 { v9.4s }, [x12], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
+ "tbz x13, #1, 47f\n"
+ "ldr d10, [x12], #0x8\n"
+ "ldr d13, [x24], #0x8\n"
+ "mov x19, #0x18\n"
+ "tbz x13, #0, 50f\n"
+ "ld1 { v10.s }[2], [x12]\n"
+ "ld1 { v13.s }[2], [x24]\n"
"b 50f\n"
"47:" // Height 2: Partial accumulate: partial_1_4
- "mov x20, #0x10\n"
- "tbz x14, #0, 50f\n"
- "ldr s10, [x13, #0x0]\n"
- "ldr s13, [x25, #0x0]\n"
+ "mov x19, #0x10\n"
+ "tbz x13, #0, 50f\n"
+ "ldr s10, [x12, #0x0]\n"
+ "ldr s13, [x24, #0x0]\n"
"b 50f\n"
"48:" // Height 2: Partial accumulate: partial_2_0
- "tbz x14, #1, 49f\n"
- "ldr d9, [x13], #0x8\n"
- "ldr d12, [x25], #0x8\n"
- "mov x20, #0x8\n"
- "tbz x14, #0, 50f\n"
- "ld1 { v9.s }[2], [x13]\n"
- "ld1 { v12.s }[2], [x25]\n"
+ "tbz x13, #1, 49f\n"
+ "ldr d9, [x12], #0x8\n"
+ "ldr d12, [x24], #0x8\n"
+ "mov x19, #0x8\n"
+ "tbz x13, #0, 50f\n"
+ "ld1 { v9.s }[2], [x12]\n"
+ "ld1 { v12.s }[2], [x24]\n"
"b 50f\n"
"49:" // Height 2: Partial accumulate: partial_1_0
- "ldr s9, [x13, #0x0]\n"
- "ldr s12, [x25, #0x0]\n"
- "mov x20, #0x0\n"
+ "ldr s9, [x12, #0x0]\n"
+ "ldr s12, [x24, #0x0]\n"
+ "mov x19, #0x0\n"
"50:" // Height 2: Partial accumulate: Done
- "sub x13, x13, x20\n"
+ "sub x12, x12, x19\n"
"b 52f\n"
"51:" // Height 2: full accumulate
- "ldr q9, [x13, #0x0]\n"
- "ldr q10, [x13, #0x10]\n"
- "ldr q11, [x13, #0x20]\n"
- "ldr q16, [x13, #0x30]\n"
- "ldr q12, [x25, #0x0]\n"
- "ldr q13, [x25, #0x10]\n"
- "ldr q14, [x25, #0x20]\n"
- "ldr q15, [x25, #0x30]\n"
+ "ldr q9, [x12, #0x0]\n"
+ "ldr q10, [x12, #0x10]\n"
+ "ldr q11, [x12, #0x20]\n"
+ "ldr q16, [x12, #0x30]\n"
+ "ldr q12, [x24, #0x0]\n"
+ "ldr q13, [x24, #0x10]\n"
+ "ldr q14, [x24, #0x20]\n"
+ "ldr q15, [x24, #0x30]\n"
"52:" // Height 2: MMLA fixup
"zip1 v8.2d, v9.2d, v12.2d\n"
"zip2 v12.2d, v9.2d, v12.2d\n"
@@ -620,193 +620,193 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
"movi v14.16b, #0x0\n"
"movi v15.16b, #0x0\n"
"54:" // Height 2: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"55:" // Height 2: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 56f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "cbnz x28, 57f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #1\n"
- "add x25, x25, x20, LSL #1\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "cbnz x27, 57f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
+ "add x24, x24, x19, LSL #1\n"
"b 57f\n"
"56:" // Height 2: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #1\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #1\n"
"57:" // Height 2: input setup done
- "cmp x27, #0x8\n"
+ "cmp x26, #0x8\n"
"blt 60f\n"
- "ldr q1, [x26, #0x0]\n"
- "ldr q2, [x25, #0x0]\n"
- "cmp x27, #0x10\n"
- "ldr q7, [x12, #0x0]\n"
- "ldr q6, [x12, #0x10]\n"
+ "ldr q1, [x25, #0x0]\n"
+ "ldr q2, [x24, #0x0]\n"
+ "cmp x26, #0x10\n"
+ "ldr q7, [x11, #0x0]\n"
+ "ldr q6, [x11, #0x10]\n"
"blt 59f\n"
"58:" // Height 2: Multiply loop: Main loop head
"trn1 v0.2d, v1.2d, v2.2d\n"
".inst 0x6e47ec08 // bfmmla v8.4s, v0.8h, v7.8h\n"
- "ldr q7, [x11, #0x0]\n"
+ "ldr q7, [x10, #0x0]\n"
".inst 0x6e46ec0c // bfmmla v12.4s, v0.8h, v6.8h\n"
- "ldr q6, [x11, #0x10]\n"
+ "ldr q6, [x10, #0x10]\n"
".inst 0x6e47ec09 // bfmmla v9.4s, v0.8h, v7.8h\n"
- "ldr q7, [x10, #0x0]\n"
+ "ldr q7, [x9, #0x0]\n"
".inst 0x6e46ec0d // bfmmla v13.4s, v0.8h, v6.8h\n"
- "ldr q6, [x10, #0x10]\n"
+ "ldr q6, [x9, #0x10]\n"
".inst 0x6e47ec0a // bfmmla v10.4s, v0.8h, v7.8h\n"
- "ldr q7, [x9, #0x0]\n"
+ "ldr q7, [x28, #0x0]\n"
".inst 0x6e46ec0e // bfmmla v14.4s, v0.8h, v6.8h\n"
- "ldr q6, [x9, #0x10]\n"
+ "ldr q6, [x28, #0x10]\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
".inst 0x6e47ec0b // bfmmla v11.4s, v0.8h, v7.8h\n"
- "ldr q7, [x12, #0x20]\n"
+ "ldr q7, [x11, #0x20]\n"
".inst 0x6e46ec0f // bfmmla v15.4s, v0.8h, v6.8h\n"
- "ldr q6, [x12, #0x30]\n"
+ "ldr q6, [x11, #0x30]\n"
".inst 0x6e47ec28 // bfmmla v8.4s, v1.8h, v7.8h\n"
- "ldr q7, [x11, #0x20]\n"
+ "ldr q7, [x10, #0x20]\n"
".inst 0x6e46ec2c // bfmmla v12.4s, v1.8h, v6.8h\n"
- "ldr q6, [x11, #0x30]\n"
+ "ldr q6, [x10, #0x30]\n"
".inst 0x6e47ec29 // bfmmla v9.4s, v1.8h, v7.8h\n"
- "ldr q7, [x10, #0x20]\n"
+ "ldr q7, [x9, #0x20]\n"
".inst 0x6e46ec2d // bfmmla v13.4s, v1.8h, v6.8h\n"
- "ldr q6, [x10, #0x30]\n"
+ "ldr q6, [x9, #0x30]\n"
".inst 0x6e47ec2a // bfmmla v10.4s, v1.8h, v7.8h\n"
- "ldr q7, [x9, #0x20]\n"
+ "ldr q7, [x28, #0x20]\n"
".inst 0x6e46ec2e // bfmmla v14.4s, v1.8h, v6.8h\n"
- "ldr q6, [x9, #0x30]\n"
- "sub x27, x27, #0x8\n"
- "cmp x27, #0x10\n"
- "add x26, x26, #0x10\n"
+ "ldr q6, [x28, #0x30]\n"
+ "sub x26, x26, #0x8\n"
+ "cmp x26, #0x10\n"
"add x25, x25, #0x10\n"
- "ldr q2, [x25, #0x0]\n"
+ "add x24, x24, #0x10\n"
+ "ldr q2, [x24, #0x0]\n"
".inst 0x6e47ec2b // bfmmla v11.4s, v1.8h, v7.8h\n"
- "add x12, x12, #0x40\n"
- "ldr q7, [x12, #0x0]\n"
- ".inst 0x6e46ec2f // bfmmla v15.4s, v1.8h, v6.8h\n"
- "ldr q1, [x26, #0x0]\n"
- "ldr q6, [x12, #0x10]\n"
"add x11, x11, #0x40\n"
+ "ldr q7, [x11, #0x0]\n"
+ ".inst 0x6e46ec2f // bfmmla v15.4s, v1.8h, v6.8h\n"
+ "ldr q1, [x25, #0x0]\n"
+ "ldr q6, [x11, #0x10]\n"
"add x10, x10, #0x40\n"
"add x9, x9, #0x40\n"
+ "add x28, x28, #0x40\n"
"bge 58b\n"
"59:" // Height 2: Multiply loop: Single iteration only
"trn1 v0.2d, v1.2d, v2.2d\n"
".inst 0x6e47ec08 // bfmmla v8.4s, v0.8h, v7.8h\n"
- "ldr q7, [x11, #0x0]\n"
+ "ldr q7, [x10, #0x0]\n"
".inst 0x6e46ec0c // bfmmla v12.4s, v0.8h, v6.8h\n"
- "ldr q6, [x11, #0x10]\n"
+ "ldr q6, [x10, #0x10]\n"
".inst 0x6e47ec09 // bfmmla v9.4s, v0.8h, v7.8h\n"
- "ldr q7, [x10, #0x0]\n"
+ "ldr q7, [x9, #0x0]\n"
".inst 0x6e46ec0d // bfmmla v13.4s, v0.8h, v6.8h\n"
- "ldr q6, [x10, #0x10]\n"
+ "ldr q6, [x9, #0x10]\n"
".inst 0x6e47ec0a // bfmmla v10.4s, v0.8h, v7.8h\n"
- "ldr q7, [x9, #0x0]\n"
+ "ldr q7, [x28, #0x0]\n"
".inst 0x6e46ec0e // bfmmla v14.4s, v0.8h, v6.8h\n"
- "ldr q6, [x9, #0x10]\n"
+ "ldr q6, [x28, #0x10]\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
".inst 0x6e47ec0b // bfmmla v11.4s, v0.8h, v7.8h\n"
- "ldr q7, [x12, #0x20]\n"
+ "ldr q7, [x11, #0x20]\n"
".inst 0x6e46ec0f // bfmmla v15.4s, v0.8h, v6.8h\n"
- "ldr q6, [x12, #0x30]\n"
+ "ldr q6, [x11, #0x30]\n"
".inst 0x6e47ec28 // bfmmla v8.4s, v1.8h, v7.8h\n"
- "ldr q7, [x11, #0x20]\n"
+ "ldr q7, [x10, #0x20]\n"
".inst 0x6e46ec2c // bfmmla v12.4s, v1.8h, v6.8h\n"
- "ldr q6, [x11, #0x30]\n"
+ "ldr q6, [x10, #0x30]\n"
".inst 0x6e47ec29 // bfmmla v9.4s, v1.8h, v7.8h\n"
- "ldr q7, [x10, #0x20]\n"
+ "ldr q7, [x9, #0x20]\n"
".inst 0x6e46ec2d // bfmmla v13.4s, v1.8h, v6.8h\n"
- "ldr q6, [x10, #0x30]\n"
+ "ldr q6, [x9, #0x30]\n"
".inst 0x6e47ec2a // bfmmla v10.4s, v1.8h, v7.8h\n"
- "ldr q7, [x9, #0x20]\n"
+ "ldr q7, [x28, #0x20]\n"
".inst 0x6e46ec2e // bfmmla v14.4s, v1.8h, v6.8h\n"
- "ldr q6, [x9, #0x30]\n"
- "sub x27, x27, #0x8\n"
+ "ldr q6, [x28, #0x30]\n"
+ "sub x26, x26, #0x8\n"
".inst 0x6e47ec2b // bfmmla v11.4s, v1.8h, v7.8h\n"
".inst 0x6e46ec2f // bfmmla v15.4s, v1.8h, v6.8h\n"
- "add x26, x26, #0x10\n"
"add x25, x25, #0x10\n"
- "add x12, x12, #0x40\n"
+ "add x24, x24, #0x10\n"
"add x11, x11, #0x40\n"
"add x10, x10, #0x40\n"
"add x9, x9, #0x40\n"
+ "add x28, x28, #0x40\n"
"60:" // Height 2: Multiply loop: Main loop skip
- "cbz x27, 65f\n"
- "cmp x27, #0x4\n"
+ "cbz x26, 65f\n"
+ "cmp x26, #0x4\n"
"blt 62f\n"
"61:" // Height 2: Multiply loop: Odd block loop
- "ldr d1, [x26], #0x8\n"
- "ldr d2, [x25], #0x8\n"
+ "ldr d1, [x25], #0x8\n"
+ "ldr d2, [x24], #0x8\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
- "sub x27, x27, #0x4\n"
- "ldr q6, [x12, #0x0]\n"
- "ldr q7, [x12, #0x10]\n"
- ".inst 0x6e46ec08 // bfmmla v8.4s, v0.8h, v6.8h\n"
- ".inst 0x6e47ec0c // bfmmla v12.4s, v0.8h, v7.8h\n"
+ "sub x26, x26, #0x4\n"
"ldr q6, [x11, #0x0]\n"
"ldr q7, [x11, #0x10]\n"
- ".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n"
- ".inst 0x6e47ec0d // bfmmla v13.4s, v0.8h, v7.8h\n"
+ ".inst 0x6e46ec08 // bfmmla v8.4s, v0.8h, v6.8h\n"
+ ".inst 0x6e47ec0c // bfmmla v12.4s, v0.8h, v7.8h\n"
"ldr q6, [x10, #0x0]\n"
"ldr q7, [x10, #0x10]\n"
- ".inst 0x6e46ec0a // bfmmla v10.4s, v0.8h, v6.8h\n"
- ".inst 0x6e47ec0e // bfmmla v14.4s, v0.8h, v7.8h\n"
+ ".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n"
+ ".inst 0x6e47ec0d // bfmmla v13.4s, v0.8h, v7.8h\n"
"ldr q6, [x9, #0x0]\n"
"ldr q7, [x9, #0x10]\n"
- "cmp x27, #0x4\n"
+ ".inst 0x6e46ec0a // bfmmla v10.4s, v0.8h, v6.8h\n"
+ ".inst 0x6e47ec0e // bfmmla v14.4s, v0.8h, v7.8h\n"
+ "ldr q6, [x28, #0x0]\n"
+ "ldr q7, [x28, #0x10]\n"
+ "cmp x26, #0x4\n"
".inst 0x6e46ec0b // bfmmla v11.4s, v0.8h, v6.8h\n"
".inst 0x6e47ec0f // bfmmla v15.4s, v0.8h, v7.8h\n"
- "add x12, x12, #0x20\n"
"add x11, x11, #0x20\n"
"add x10, x10, #0x20\n"
"add x9, x9, #0x20\n"
+ "add x28, x28, #0x20\n"
"bge 61b\n"
"62:" // Height 2: Multiply loop: Skip odd blocks
- "cbz x27, 65f\n"
- "tbz x27, #1, 63f\n"
- "ldr s1, [x26], #0x4\n"
- "ldr s2, [x25], #0x4\n"
- "tbz x27, #0, 64f\n"
- "ld1 { v1.h }[2], [x26]\n"
- "ld1 { v2.h }[2], [x25]\n"
+ "cbz x26, 65f\n"
+ "tbz x26, #1, 63f\n"
+ "ldr s1, [x25], #0x4\n"
+ "ldr s2, [x24], #0x4\n"
+ "tbz x26, #0, 64f\n"
+ "ld1 { v1.h }[2], [x25]\n"
+ "ld1 { v2.h }[2], [x24]\n"
"b 64f\n"
"63:" // Height 2: Multiply loop: Ragged operand read: partial_1_0
- "ldr h1, [x26, #0x0]\n"
- "ldr h2, [x25, #0x0]\n"
+ "ldr h1, [x25, #0x0]\n"
+ "ldr h2, [x24, #0x0]\n"
"64:" // Height 2: Multiply loop: Ragged operand read: Done
- "ldr q7, [x12, #0x0]\n"
- "ldr q6, [x12, #0x10]\n"
+ "ldr q7, [x11, #0x0]\n"
+ "ldr q6, [x11, #0x10]\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
".inst 0x6e47ec08 // bfmmla v8.4s, v0.8h, v7.8h\n"
- "ldr q7, [x11, #0x0]\n"
+ "ldr q7, [x10, #0x0]\n"
".inst 0x6e46ec0c // bfmmla v12.4s, v0.8h, v6.8h\n"
- "ldr q6, [x11, #0x10]\n"
+ "ldr q6, [x10, #0x10]\n"
".inst 0x6e47ec09 // bfmmla v9.4s, v0.8h, v7.8h\n"
- "ldr q7, [x10, #0x0]\n"
+ "ldr q7, [x9, #0x0]\n"
".inst 0x6e46ec0d // bfmmla v13.4s, v0.8h, v6.8h\n"
- "ldr q6, [x10, #0x10]\n"
+ "ldr q6, [x9, #0x10]\n"
".inst 0x6e47ec0a // bfmmla v10.4s, v0.8h, v7.8h\n"
- "ldr q7, [x9, #0x0]\n"
+ "ldr q7, [x28, #0x0]\n"
".inst 0x6e46ec0e // bfmmla v14.4s, v0.8h, v6.8h\n"
- "ldr q6, [x9, #0x10]\n"
+ "ldr q6, [x28, #0x10]\n"
".inst 0x6e47ec0b // bfmmla v11.4s, v0.8h, v7.8h\n"
".inst 0x6e46ec0f // bfmmla v15.4s, v0.8h, v6.8h\n"
- "add x12, x12, #0x20\n"
"add x11, x11, #0x20\n"
"add x10, x10, #0x20\n"
"add x9, x9, #0x20\n"
+ "add x28, x28, #0x20\n"
"65:" // Height 2: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 55b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp1 v7.2d, v8.2d, v12.2d\n"
"uzp2 v8.2d, v8.2d, v12.2d\n"
- "add x25, x13, x20, LSL #2\n"
+ "add x24, x12, x19, LSL #2\n"
"uzp1 v12.2d, v9.2d, v13.2d\n"
"uzp2 v9.2d, v9.2d, v13.2d\n"
"uzp1 v13.2d, v10.2d, v14.2d\n"
@@ -814,10 +814,10 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
"uzp1 v14.2d, v11.2d, v15.2d\n"
"uzp2 v11.2d, v11.2d, v15.2d\n"
"tbz %x[flags], #1, 66f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v1.4s }, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1r { v0.4s }, [x20]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v1.4s }, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v0.4s }, [x19]\n"
"fmin v7.4s, v7.4s, v1.4s\n"
"fmin v12.4s, v12.4s, v1.4s\n"
"fmin v13.4s, v13.4s, v1.4s\n"
@@ -835,120 +835,120 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
"fmax v10.4s, v10.4s, v0.4s\n"
"fmax v11.4s, v11.4s, v0.4s\n"
"66:" // Height 2: No activation
- "cmp x14, #0x10\n"
+ "cmp x13, #0x10\n"
"bge 75f\n"
- "tbz x14, #3, 70f\n"
- "st1 { v7.4s }, [x13], #0x10\n"
- "st1 { v12.4s }, [x13], #0x10\n"
- "st1 { v8.4s }, [x25], #0x10\n"
- "st1 { v9.4s }, [x25], #0x10\n"
- "tbz x14, #2, 68f\n"
- "st1 { v13.4s }, [x13], #0x10\n"
- "st1 { v10.4s }, [x25], #0x10\n"
- "tbz x14, #1, 67f\n"
- "str d14, [x13], #0x8\n"
- "str d11, [x25], #0x8\n"
- "tbz x14, #0, 74f\n"
- "st1 { v14.s }[2], [x13]\n"
- "st1 { v11.s }[2], [x25]\n"
+ "tbz x13, #3, 70f\n"
+ "st1 { v7.4s }, [x12], #0x10\n"
+ "st1 { v12.4s }, [x12], #0x10\n"
+ "st1 { v8.4s }, [x24], #0x10\n"
+ "st1 { v9.4s }, [x24], #0x10\n"
+ "tbz x13, #2, 68f\n"
+ "st1 { v13.4s }, [x12], #0x10\n"
+ "st1 { v10.4s }, [x24], #0x10\n"
+ "tbz x13, #1, 67f\n"
+ "str d14, [x12], #0x8\n"
+ "str d11, [x24], #0x8\n"
+ "tbz x13, #0, 74f\n"
+ "st1 { v14.s }[2], [x12]\n"
+ "st1 { v11.s }[2], [x24]\n"
"b 74f\n"
"67:" // Height 2: Partial direct writeback: partial_1_12
- "tbz x14, #0, 74f\n"
- "str s14, [x13, #0x0]\n"
- "str s11, [x25, #0x0]\n"
+ "tbz x13, #0, 74f\n"
+ "str s14, [x12, #0x0]\n"
+ "str s11, [x24, #0x0]\n"
"b 74f\n"
"68:" // Height 2: Partial direct writeback: partial_2_8
- "tbz x14, #1, 69f\n"
- "str d13, [x13], #0x8\n"
- "str d10, [x25], #0x8\n"
- "tbz x14, #0, 74f\n"
- "st1 { v13.s }[2], [x13]\n"
- "st1 { v10.s }[2], [x25]\n"
+ "tbz x13, #1, 69f\n"
+ "str d13, [x12], #0x8\n"
+ "str d10, [x24], #0x8\n"
+ "tbz x13, #0, 74f\n"
+ "st1 { v13.s }[2], [x12]\n"
+ "st1 { v10.s }[2], [x24]\n"
"b 74f\n"
"69:" // Height 2: Partial direct writeback: partial_1_8
- "tbz x14, #0, 74f\n"
- "str s13, [x13, #0x0]\n"
- "str s10, [x25, #0x0]\n"
+ "tbz x13, #0, 74f\n"
+ "str s13, [x12, #0x0]\n"
+ "str s10, [x24, #0x0]\n"
"b 74f\n"
"70:" // Height 2: Partial direct writeback: partial_4_0
- "tbz x14, #2, 72f\n"
- "st1 { v7.4s }, [x13], #0x10\n"
- "st1 { v8.4s }, [x25], #0x10\n"
- "tbz x14, #1, 71f\n"
- "str d12, [x13], #0x8\n"
- "str d9, [x25], #0x8\n"
- "tbz x14, #0, 74f\n"
- "st1 { v12.s }[2], [x13]\n"
- "st1 { v9.s }[2], [x25]\n"
+ "tbz x13, #2, 72f\n"
+ "st1 { v7.4s }, [x12], #0x10\n"
+ "st1 { v8.4s }, [x24], #0x10\n"
+ "tbz x13, #1, 71f\n"
+ "str d12, [x12], #0x8\n"
+ "str d9, [x24], #0x8\n"
+ "tbz x13, #0, 74f\n"
+ "st1 { v12.s }[2], [x12]\n"
+ "st1 { v9.s }[2], [x24]\n"
"b 74f\n"
"71:" // Height 2: Partial direct writeback: partial_1_4
- "tbz x14, #0, 74f\n"
- "str s12, [x13, #0x0]\n"
- "str s9, [x25, #0x0]\n"
+ "tbz x13, #0, 74f\n"
+ "str s12, [x12, #0x0]\n"
+ "str s9, [x24, #0x0]\n"
"b 74f\n"
"72:" // Height 2: Partial direct writeback: partial_2_0
- "tbz x14, #1, 73f\n"
- "str d7, [x13], #0x8\n"
- "str d8, [x25], #0x8\n"
- "tbz x14, #0, 74f\n"
- "st1 { v7.s }[2], [x13]\n"
- "st1 { v8.s }[2], [x25]\n"
+ "tbz x13, #1, 73f\n"
+ "str d7, [x12], #0x8\n"
+ "str d8, [x24], #0x8\n"
+ "tbz x13, #0, 74f\n"
+ "st1 { v7.s }[2], [x12]\n"
+ "st1 { v8.s }[2], [x24]\n"
"b 74f\n"
"73:" // Height 2: Partial direct writeback: partial_1_0
- "str s7, [x13, #0x0]\n"
- "str s8, [x25, #0x0]\n"
+ "str s7, [x12, #0x0]\n"
+ "str s8, [x24, #0x0]\n"
"74:" // Height 2: Partial direct writeback: Done
"b 76f\n"
"75:" // Height 2: Full writeback
- "str q7, [x13, #0x0]\n"
- "str q12, [x13, #0x10]\n"
- "str q13, [x13, #0x20]\n"
- "str q14, [x13, #0x30]\n"
- "add x13, x13, #0x40\n"
- "str q8, [x25, #0x0]\n"
- "str q9, [x25, #0x10]\n"
- "str q10, [x25, #0x20]\n"
- "str q11, [x25, #0x30]\n"
+ "str q7, [x12, #0x0]\n"
+ "str q12, [x12, #0x10]\n"
+ "str q13, [x12, #0x20]\n"
+ "str q14, [x12, #0x30]\n"
+ "add x12, x12, #0x40\n"
+ "str q8, [x24, #0x0]\n"
+ "str q9, [x24, #0x10]\n"
+ "str q10, [x24, #0x20]\n"
+ "str q11, [x24, #0x30]\n"
"76:" // Height 2: Writeback done
- "subs x14, x14, #0x10\n"
+ "subs x13, x13, #0x10\n"
"bgt 40b\n"
"b 230f\n"
"77:" // Height 3
- "ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x15, %x[bias]\n"
- "ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x13, %x[output_ptr]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x14, %x[bias]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x12, %x[output_ptr]\n"
"78:" // Height 3: Column loop
- "ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
- "add x11, x12, x20, LSL #1\n"
- "add x10, x11, x20, LSL #1\n"
- "add x9, x10, x20, LSL #1\n"
- "add x20, x9, x20, LSL #1\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "cmp x14, #0xc\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #1\n"
+ "add x9, x10, x19, LSL #1\n"
+ "add x28, x9, x19, LSL #1\n"
+ "add x19, x28, x19, LSL #1\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "cmp x13, #0xc\n"
"bgt 79f\n"
- "cmp x14, #0x8\n"
- "mov x9, x12\n"
+ "cmp x13, #0x8\n"
+ "mov x28, x11\n"
"bgt 79f\n"
- "cmp x14, #0x4\n"
- "mov x10, x12\n"
+ "cmp x13, #0x4\n"
+ "mov x9, x11\n"
"bgt 79f\n"
- "mov x11, x12\n"
+ "mov x10, x11\n"
"79:" // Height 3: B setup done
- "cbz x15, 80f\n"
- "ldr q8, [x15, #0x0]\n"
- "ldr q9, [x15, #0x10]\n"
+ "cbz x14, 80f\n"
+ "ldr q8, [x14, #0x0]\n"
+ "ldr q9, [x14, #0x10]\n"
"zip2 v12.2d, v8.2d, v8.2d\n"
"zip1 v8.2d, v8.2d, v8.2d\n"
- "ldr q10, [x15, #0x20]\n"
- "ldr q11, [x15, #0x30]\n"
+ "ldr q10, [x14, #0x20]\n"
+ "ldr q11, [x14, #0x30]\n"
"zip2 v13.2d, v9.2d, v9.2d\n"
"zip1 v9.2d, v9.2d, v9.2d\n"
"zip2 v14.2d, v10.2d, v10.2d\n"
"zip1 v10.2d, v10.2d, v10.2d\n"
- "add x15, x15, #0x40\n"
+ "add x14, x14, #0x40\n"
"zip2 v15.2d, v11.2d, v11.2d\n"
"zip1 v11.2d, v11.2d, v11.2d\n"
"mov v16.16b, v8.16b\n"
@@ -962,111 +962,111 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
"b 92f\n"
"80:" // Height 3: no bias
"tbz %x[flags], #0, 91f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #2\n"
- "cmp x14, #0x10\n"
- "add x24, x25, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #2\n"
+ "cmp x13, #0x10\n"
+ "add x23, x24, x19, LSL #2\n"
"bge 89f\n"
- "tbz x14, #3, 84f\n"
- "ld1 { v9.4s }, [x13], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v17.4s }, [x24], #0x10\n"
- "ld1 { v10.4s }, [x13], #0x10\n"
- "ld1 { v13.4s }, [x25], #0x10\n"
- "ld1 { v18.4s }, [x24], #0x10\n"
- "tbz x14, #2, 82f\n"
- "ld1 { v11.4s }, [x13], #0x10\n"
- "ld1 { v14.4s }, [x25], #0x10\n"
- "ld1 { v19.4s }, [x24], #0x10\n"
- "tbz x14, #1, 81f\n"
- "ldr d16, [x13], #0x8\n"
- "ldr d15, [x25], #0x8\n"
- "mov x20, #0x38\n"
- "ldr d24, [x24], #0x8\n"
- "tbz x14, #0, 88f\n"
- "ld1 { v16.s }[2], [x13]\n"
- "ld1 { v15.s }[2], [x25]\n"
- "ld1 { v24.s }[2], [x24]\n"
+ "tbz x13, #3, 84f\n"
+ "ld1 { v9.4s }, [x12], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
+ "ld1 { v17.4s }, [x23], #0x10\n"
+ "ld1 { v10.4s }, [x12], #0x10\n"
+ "ld1 { v13.4s }, [x24], #0x10\n"
+ "ld1 { v18.4s }, [x23], #0x10\n"
+ "tbz x13, #2, 82f\n"
+ "ld1 { v11.4s }, [x12], #0x10\n"
+ "ld1 { v14.4s }, [x24], #0x10\n"
+ "ld1 { v19.4s }, [x23], #0x10\n"
+ "tbz x13, #1, 81f\n"
+ "ldr d16, [x12], #0x8\n"
+ "ldr d15, [x24], #0x8\n"
+ "mov x19, #0x38\n"
+ "ldr d24, [x23], #0x8\n"
+ "tbz x13, #0, 88f\n"
+ "ld1 { v16.s }[2], [x12]\n"
+ "ld1 { v15.s }[2], [x24]\n"
+ "ld1 { v24.s }[2], [x23]\n"
"b 88f\n"
"81:" // Height 3: Partial accumulate: partial_1_12
- "mov x20, #0x30\n"
- "tbz x14, #0, 88f\n"
- "ldr s16, [x13, #0x0]\n"
- "ldr s15, [x25, #0x0]\n"
- "ldr s24, [x24, #0x0]\n"
+ "mov x19, #0x30\n"
+ "tbz x13, #0, 88f\n"
+ "ldr s16, [x12, #0x0]\n"
+ "ldr s15, [x24, #0x0]\n"
+ "ldr s24, [x23, #0x0]\n"
"b 88f\n"
"82:" // Height 3: Partial accumulate: partial_2_8
- "tbz x14, #1, 83f\n"
- "ldr d11, [x13], #0x8\n"
- "ldr d14, [x25], #0x8\n"
- "mov x20, #0x28\n"
- "ldr d19, [x24], #0x8\n"
- "tbz x14, #0, 88f\n"
- "ld1 { v11.s }[2], [x13]\n"
- "ld1 { v14.s }[2], [x25]\n"
- "ld1 { v19.s }[2], [x24]\n"
+ "tbz x13, #1, 83f\n"
+ "ldr d11, [x12], #0x8\n"
+ "ldr d14, [x24], #0x8\n"
+ "mov x19, #0x28\n"
+ "ldr d19, [x23], #0x8\n"
+ "tbz x13, #0, 88f\n"
+ "ld1 { v11.s }[2], [x12]\n"
+ "ld1 { v14.s }[2], [x24]\n"
+ "ld1 { v19.s }[2], [x23]\n"
"b 88f\n"
"83:" // Height 3: Partial accumulate: partial_1_8
- "mov x20, #0x20\n"
- "tbz x14, #0, 88f\n"
- "ldr s11, [x13, #0x0]\n"
- "ldr s14, [x25, #0x0]\n"
- "ldr s19, [x24, #0x0]\n"
+ "mov x19, #0x20\n"
+ "tbz x13, #0, 88f\n"
+ "ldr s11, [x12, #0x0]\n"
+ "ldr s14, [x24, #0x0]\n"
+ "ldr s19, [x23, #0x0]\n"
"b 88f\n"
"84:" // Height 3: Partial accumulate: partial_4_0
- "tbz x14, #2, 86f\n"
- "ld1 { v9.4s }, [x13], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v17.4s }, [x24], #0x10\n"
- "tbz x14, #1, 85f\n"
- "ldr d10, [x13], #0x8\n"
- "ldr d13, [x25], #0x8\n"
- "mov x20, #0x18\n"
- "ldr d18, [x24], #0x8\n"
- "tbz x14, #0, 88f\n"
- "ld1 { v10.s }[2], [x13]\n"
- "ld1 { v13.s }[2], [x25]\n"
- "ld1 { v18.s }[2], [x24]\n"
+ "tbz x13, #2, 86f\n"
+ "ld1 { v9.4s }, [x12], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
+ "ld1 { v17.4s }, [x23], #0x10\n"
+ "tbz x13, #1, 85f\n"
+ "ldr d10, [x12], #0x8\n"
+ "ldr d13, [x24], #0x8\n"
+ "mov x19, #0x18\n"
+ "ldr d18, [x23], #0x8\n"
+ "tbz x13, #0, 88f\n"
+ "ld1 { v10.s }[2], [x12]\n"
+ "ld1 { v13.s }[2], [x24]\n"
+ "ld1 { v18.s }[2], [x23]\n"
"b 88f\n"
"85:" // Height 3: Partial accumulate: partial_1_4
- "mov x20, #0x10\n"
- "tbz x14, #0, 88f\n"
- "ldr s10, [x13, #0x0]\n"
- "ldr s13, [x25, #0x0]\n"
- "ldr s18, [x24, #0x0]\n"
+ "mov x19, #0x10\n"
+ "tbz x13, #0, 88f\n"
+ "ldr s10, [x12, #0x0]\n"
+ "ldr s13, [x24, #0x0]\n"
+ "ldr s18, [x23, #0x0]\n"
"b 88f\n"
"86:" // Height 3: Partial accumulate: partial_2_0
- "tbz x14, #1, 87f\n"
- "ldr d9, [x13], #0x8\n"
- "ldr d12, [x25], #0x8\n"
- "mov x20, #0x8\n"
- "ldr d17, [x24], #0x8\n"
- "tbz x14, #0, 88f\n"
- "ld1 { v9.s }[2], [x13]\n"
- "ld1 { v12.s }[2], [x25]\n"
- "ld1 { v17.s }[2], [x24]\n"
+ "tbz x13, #1, 87f\n"
+ "ldr d9, [x12], #0x8\n"
+ "ldr d12, [x24], #0x8\n"
+ "mov x19, #0x8\n"
+ "ldr d17, [x23], #0x8\n"
+ "tbz x13, #0, 88f\n"
+ "ld1 { v9.s }[2], [x12]\n"
+ "ld1 { v12.s }[2], [x24]\n"
+ "ld1 { v17.s }[2], [x23]\n"
"b 88f\n"
"87:" // Height 3: Partial accumulate: partial_1_0
- "ldr s9, [x13, #0x0]\n"
- "ldr s12, [x25, #0x0]\n"
- "mov x20, #0x0\n"
- "ldr s17, [x24, #0x0]\n"
+ "ldr s9, [x12, #0x0]\n"
+ "ldr s12, [x24, #0x0]\n"
+ "mov x19, #0x0\n"
+ "ldr s17, [x23, #0x0]\n"
"88:" // Height 3: Partial accumulate: Done
- "sub x13, x13, x20\n"
+ "sub x12, x12, x19\n"
"b 90f\n"
"89:" // Height 3: full accumulate
- "ldr q9, [x13, #0x0]\n"
- "ldr q10, [x13, #0x10]\n"
- "ldr q11, [x13, #0x20]\n"
- "ldr q16, [x13, #0x30]\n"
- "ldr q12, [x25, #0x0]\n"
- "ldr q13, [x25, #0x10]\n"
- "ldr q14, [x25, #0x20]\n"
- "ldr q15, [x25, #0x30]\n"
- "ldr q17, [x24, #0x0]\n"
- "ldr q18, [x24, #0x10]\n"
- "ldr q19, [x24, #0x20]\n"
- "ldr q24, [x24, #0x30]\n"
+ "ldr q9, [x12, #0x0]\n"
+ "ldr q10, [x12, #0x10]\n"
+ "ldr q11, [x12, #0x20]\n"
+ "ldr q16, [x12, #0x30]\n"
+ "ldr q12, [x24, #0x0]\n"
+ "ldr q13, [x24, #0x10]\n"
+ "ldr q14, [x24, #0x20]\n"
+ "ldr q15, [x24, #0x30]\n"
+ "ldr q17, [x23, #0x0]\n"
+ "ldr q18, [x23, #0x10]\n"
+ "ldr q19, [x23, #0x20]\n"
+ "ldr q24, [x23, #0x30]\n"
"90:" // Height 3: MMLA fixup
"zip1 v8.2d, v9.2d, v12.2d\n"
"zip2 v12.2d, v9.2d, v12.2d\n"
@@ -1103,36 +1103,36 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
"movi v22.16b, #0x0\n"
"movi v23.16b, #0x0\n"
"92:" // Height 3: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"93:" // Height 3: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 94f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "cbnz x28, 95f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #1\n"
- "add x25, x25, x20, LSL #1\n"
- "add x24, x24, x20, LSL #1\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "cbnz x27, 95f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
+ "add x24, x24, x19, LSL #1\n"
+ "add x23, x23, x19, LSL #1\n"
"b 95f\n"
"94:" // Height 3: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
"95:" // Height 3: input setup done
- "cmp x27, #0x8\n"
+ "cmp x26, #0x8\n"
"blt 98f\n"
- "ldr q1, [x26, #0x0]\n"
- "ldr q2, [x25, #0x0]\n"
- "cmp x27, #0x10\n"
- "ldr q3, [x24, #0x0]\n"
- "ldr q7, [x12, #0x0]\n"
- "ldr q6, [x12, #0x10]\n"
+ "ldr q1, [x25, #0x0]\n"
+ "ldr q2, [x24, #0x0]\n"
+ "cmp x26, #0x10\n"
+ "ldr q3, [x23, #0x0]\n"
+ "ldr q7, [x11, #0x0]\n"
+ "ldr q6, [x11, #0x10]\n"
"blt 97f\n"
"96:" // Height 3: Multiply loop: Main loop head
"trn1 v0.2d, v1.2d, v2.2d\n"
@@ -1140,65 +1140,65 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
".inst 0x6e47ec08 // bfmmla v8.4s, v0.8h, v7.8h\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
".inst 0x6e47ec50 // bfmmla v16.4s, v2.8h, v7.8h\n"
- "ldr q7, [x11, #0x0]\n"
+ "ldr q7, [x10, #0x0]\n"
".inst 0x6e46ec0c // bfmmla v12.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec54 // bfmmla v20.4s, v2.8h, v6.8h\n"
- "ldr q6, [x11, #0x10]\n"
+ "ldr q6, [x10, #0x10]\n"
".inst 0x6e47ec09 // bfmmla v9.4s, v0.8h, v7.8h\n"
"trn2 v3.2d, v3.2d, v4.2d\n"
".inst 0x6e47ec51 // bfmmla v17.4s, v2.8h, v7.8h\n"
- "ldr q7, [x10, #0x0]\n"
+ "ldr q7, [x9, #0x0]\n"
".inst 0x6e46ec0d // bfmmla v13.4s, v0.8h, v6.8h\n"
- "sub x27, x27, #0x8\n"
+ "sub x26, x26, #0x8\n"
".inst 0x6e46ec55 // bfmmla v21.4s, v2.8h, v6.8h\n"
- "ldr q6, [x10, #0x10]\n"
+ "ldr q6, [x9, #0x10]\n"
".inst 0x6e47ec0a // bfmmla v10.4s, v0.8h, v7.8h\n"
- "cmp x27, #0x10\n"
+ "cmp x26, #0x10\n"
".inst 0x6e47ec52 // bfmmla v18.4s, v2.8h, v7.8h\n"
- "ldr q7, [x9, #0x0]\n"
+ "ldr q7, [x28, #0x0]\n"
".inst 0x6e46ec0e // bfmmla v14.4s, v0.8h, v6.8h\n"
- "add x26, x26, #0x10\n"
+ "add x25, x25, #0x10\n"
".inst 0x6e46ec56 // bfmmla v22.4s, v2.8h, v6.8h\n"
- "ldr q6, [x9, #0x10]\n"
+ "ldr q6, [x28, #0x10]\n"
".inst 0x6e47ec0b // bfmmla v11.4s, v0.8h, v7.8h\n"
- "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
".inst 0x6e47ec53 // bfmmla v19.4s, v2.8h, v7.8h\n"
- "ldr q7, [x12, #0x20]\n"
+ "ldr q7, [x11, #0x20]\n"
".inst 0x6e46ec0f // bfmmla v15.4s, v0.8h, v6.8h\n"
- "add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
".inst 0x6e46ec57 // bfmmla v23.4s, v2.8h, v6.8h\n"
- "ldr q6, [x12, #0x30]\n"
- "ldr q2, [x25, #0x0]\n"
+ "ldr q6, [x11, #0x30]\n"
+ "ldr q2, [x24, #0x0]\n"
".inst 0x6e47ec28 // bfmmla v8.4s, v1.8h, v7.8h\n"
".inst 0x6e47ec70 // bfmmla v16.4s, v3.8h, v7.8h\n"
- "ldr q7, [x11, #0x20]\n"
+ "ldr q7, [x10, #0x20]\n"
".inst 0x6e46ec2c // bfmmla v12.4s, v1.8h, v6.8h\n"
- "add x12, x12, #0x40\n"
+ "add x11, x11, #0x40\n"
".inst 0x6e46ec74 // bfmmla v20.4s, v3.8h, v6.8h\n"
- "ldr q6, [x11, #0x30]\n"
+ "ldr q6, [x10, #0x30]\n"
".inst 0x6e47ec29 // bfmmla v9.4s, v1.8h, v7.8h\n"
- "add x11, x11, #0x40\n"
+ "add x10, x10, #0x40\n"
".inst 0x6e47ec71 // bfmmla v17.4s, v3.8h, v7.8h\n"
- "ldr q7, [x10, #0x20]\n"
+ "ldr q7, [x9, #0x20]\n"
".inst 0x6e46ec2d // bfmmla v13.4s, v1.8h, v6.8h\n"
".inst 0x6e46ec75 // bfmmla v21.4s, v3.8h, v6.8h\n"
- "ldr q6, [x10, #0x30]\n"
+ "ldr q6, [x9, #0x30]\n"
".inst 0x6e47ec2a // bfmmla v10.4s, v1.8h, v7.8h\n"
- "add x10, x10, #0x40\n"
+ "add x9, x9, #0x40\n"
".inst 0x6e47ec72 // bfmmla v18.4s, v3.8h, v7.8h\n"
- "ldr q7, [x9, #0x20]\n"
+ "ldr q7, [x28, #0x20]\n"
".inst 0x6e46ec2e // bfmmla v14.4s, v1.8h, v6.8h\n"
".inst 0x6e46ec76 // bfmmla v22.4s, v3.8h, v6.8h\n"
- "ldr q6, [x9, #0x30]\n"
- "add x9, x9, #0x40\n"
+ "ldr q6, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
".inst 0x6e47ec2b // bfmmla v11.4s, v1.8h, v7.8h\n"
".inst 0x6e47ec73 // bfmmla v19.4s, v3.8h, v7.8h\n"
- "ldr q7, [x12, #0x0]\n"
+ "ldr q7, [x11, #0x0]\n"
".inst 0x6e46ec2f // bfmmla v15.4s, v1.8h, v6.8h\n"
- "ldr q1, [x26, #0x0]\n"
+ "ldr q1, [x25, #0x0]\n"
".inst 0x6e46ec77 // bfmmla v23.4s, v3.8h, v6.8h\n"
- "ldr q3, [x24, #0x0]\n"
- "ldr q6, [x12, #0x10]\n"
+ "ldr q3, [x23, #0x0]\n"
+ "ldr q6, [x11, #0x10]\n"
"bge 96b\n"
"97:" // Height 3: Multiply loop: Single iteration only
"trn1 v0.2d, v1.2d, v2.2d\n"
@@ -1206,158 +1206,158 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
".inst 0x6e47ec08 // bfmmla v8.4s, v0.8h, v7.8h\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
".inst 0x6e47ec50 // bfmmla v16.4s, v2.8h, v7.8h\n"
- "ldr q7, [x11, #0x0]\n"
+ "ldr q7, [x10, #0x0]\n"
".inst 0x6e46ec0c // bfmmla v12.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec54 // bfmmla v20.4s, v2.8h, v6.8h\n"
- "ldr q6, [x11, #0x10]\n"
+ "ldr q6, [x10, #0x10]\n"
".inst 0x6e47ec09 // bfmmla v9.4s, v0.8h, v7.8h\n"
"trn2 v3.2d, v3.2d, v4.2d\n"
".inst 0x6e47ec51 // bfmmla v17.4s, v2.8h, v7.8h\n"
- "ldr q7, [x10, #0x0]\n"
+ "ldr q7, [x9, #0x0]\n"
".inst 0x6e46ec0d // bfmmla v13.4s, v0.8h, v6.8h\n"
- "sub x27, x27, #0x8\n"
+ "sub x26, x26, #0x8\n"
".inst 0x6e46ec55 // bfmmla v21.4s, v2.8h, v6.8h\n"
- "ldr q6, [x10, #0x10]\n"
+ "ldr q6, [x9, #0x10]\n"
".inst 0x6e47ec0a // bfmmla v10.4s, v0.8h, v7.8h\n"
- "add x26, x26, #0x10\n"
+ "add x25, x25, #0x10\n"
".inst 0x6e47ec52 // bfmmla v18.4s, v2.8h, v7.8h\n"
- "ldr q7, [x9, #0x0]\n"
+ "ldr q7, [x28, #0x0]\n"
".inst 0x6e46ec0e // bfmmla v14.4s, v0.8h, v6.8h\n"
- "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
".inst 0x6e46ec56 // bfmmla v22.4s, v2.8h, v6.8h\n"
- "ldr q6, [x9, #0x10]\n"
+ "ldr q6, [x28, #0x10]\n"
".inst 0x6e47ec0b // bfmmla v11.4s, v0.8h, v7.8h\n"
- "add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
".inst 0x6e47ec53 // bfmmla v19.4s, v2.8h, v7.8h\n"
- "ldr q7, [x12, #0x20]\n"
+ "ldr q7, [x11, #0x20]\n"
".inst 0x6e46ec0f // bfmmla v15.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec57 // bfmmla v23.4s, v2.8h, v6.8h\n"
- "ldr q6, [x12, #0x30]\n"
+ "ldr q6, [x11, #0x30]\n"
".inst 0x6e47ec28 // bfmmla v8.4s, v1.8h, v7.8h\n"
- "add x12, x12, #0x40\n"
+ "add x11, x11, #0x40\n"
".inst 0x6e47ec70 // bfmmla v16.4s, v3.8h, v7.8h\n"
- "ldr q7, [x11, #0x20]\n"
+ "ldr q7, [x10, #0x20]\n"
".inst 0x6e46ec2c // bfmmla v12.4s, v1.8h, v6.8h\n"
".inst 0x6e46ec74 // bfmmla v20.4s, v3.8h, v6.8h\n"
- "ldr q6, [x11, #0x30]\n"
+ "ldr q6, [x10, #0x30]\n"
".inst 0x6e47ec29 // bfmmla v9.4s, v1.8h, v7.8h\n"
- "add x11, x11, #0x40\n"
+ "add x10, x10, #0x40\n"
".inst 0x6e47ec71 // bfmmla v17.4s, v3.8h, v7.8h\n"
- "ldr q7, [x10, #0x20]\n"
+ "ldr q7, [x9, #0x20]\n"
".inst 0x6e46ec2d // bfmmla v13.4s, v1.8h, v6.8h\n"
".inst 0x6e46ec75 // bfmmla v21.4s, v3.8h, v6.8h\n"
- "ldr q6, [x10, #0x30]\n"
+ "ldr q6, [x9, #0x30]\n"
".inst 0x6e47ec2a // bfmmla v10.4s, v1.8h, v7.8h\n"
- "add x10, x10, #0x40\n"
+ "add x9, x9, #0x40\n"
".inst 0x6e47ec72 // bfmmla v18.4s, v3.8h, v7.8h\n"
- "ldr q7, [x9, #0x20]\n"
+ "ldr q7, [x28, #0x20]\n"
".inst 0x6e46ec2e // bfmmla v14.4s, v1.8h, v6.8h\n"
".inst 0x6e46ec76 // bfmmla v22.4s, v3.8h, v6.8h\n"
- "ldr q6, [x9, #0x30]\n"
- "add x9, x9, #0x40\n"
+ "ldr q6, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
".inst 0x6e47ec2b // bfmmla v11.4s, v1.8h, v7.8h\n"
".inst 0x6e47ec73 // bfmmla v19.4s, v3.8h, v7.8h\n"
".inst 0x6e46ec2f // bfmmla v15.4s, v1.8h, v6.8h\n"
".inst 0x6e46ec77 // bfmmla v23.4s, v3.8h, v6.8h\n"
"98:" // Height 3: Multiply loop: Main loop skip
- "cbz x27, 103f\n"
- "cmp x27, #0x4\n"
+ "cbz x26, 103f\n"
+ "cmp x26, #0x4\n"
"blt 100f\n"
"99:" // Height 3: Multiply loop: Odd block loop
- "ldr d1, [x26], #0x8\n"
- "ldr d2, [x25], #0x8\n"
+ "ldr d1, [x25], #0x8\n"
+ "ldr d2, [x24], #0x8\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
- "ldr d3, [x24], #0x8\n"
- "ldr q6, [x12, #0x0]\n"
+ "ldr d3, [x23], #0x8\n"
+ "ldr q6, [x11, #0x0]\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
".inst 0x6e46ec08 // bfmmla v8.4s, v0.8h, v6.8h\n"
- "ldr q7, [x12, #0x10]\n"
+ "ldr q7, [x11, #0x10]\n"
".inst 0x6e46ec50 // bfmmla v16.4s, v2.8h, v6.8h\n"
- "ldr q6, [x11, #0x0]\n"
+ "ldr q6, [x10, #0x0]\n"
".inst 0x6e47ec0c // bfmmla v12.4s, v0.8h, v7.8h\n"
".inst 0x6e47ec54 // bfmmla v20.4s, v2.8h, v7.8h\n"
- "ldr q7, [x11, #0x10]\n"
+ "ldr q7, [x10, #0x10]\n"
".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n"
- "sub x27, x27, #0x4\n"
+ "sub x26, x26, #0x4\n"
".inst 0x6e46ec51 // bfmmla v17.4s, v2.8h, v6.8h\n"
- "ldr q6, [x10, #0x0]\n"
+ "ldr q6, [x9, #0x0]\n"
".inst 0x6e47ec0d // bfmmla v13.4s, v0.8h, v7.8h\n"
- "cmp x27, #0x4\n"
+ "cmp x26, #0x4\n"
".inst 0x6e47ec55 // bfmmla v21.4s, v2.8h, v7.8h\n"
- "ldr q7, [x10, #0x10]\n"
+ "ldr q7, [x9, #0x10]\n"
".inst 0x6e46ec0a // bfmmla v10.4s, v0.8h, v6.8h\n"
- "add x12, x12, #0x20\n"
+ "add x11, x11, #0x20\n"
".inst 0x6e46ec52 // bfmmla v18.4s, v2.8h, v6.8h\n"
- "ldr q6, [x9, #0x0]\n"
+ "ldr q6, [x28, #0x0]\n"
".inst 0x6e47ec0e // bfmmla v14.4s, v0.8h, v7.8h\n"
- "add x11, x11, #0x20\n"
+ "add x10, x10, #0x20\n"
".inst 0x6e47ec56 // bfmmla v22.4s, v2.8h, v7.8h\n"
- "ldr q7, [x9, #0x10]\n"
+ "ldr q7, [x28, #0x10]\n"
".inst 0x6e46ec0b // bfmmla v11.4s, v0.8h, v6.8h\n"
- "add x10, x10, #0x20\n"
+ "add x9, x9, #0x20\n"
".inst 0x6e46ec53 // bfmmla v19.4s, v2.8h, v6.8h\n"
".inst 0x6e47ec0f // bfmmla v15.4s, v0.8h, v7.8h\n"
- "add x9, x9, #0x20\n"
+ "add x28, x28, #0x20\n"
".inst 0x6e47ec57 // bfmmla v23.4s, v2.8h, v7.8h\n"
"bge 99b\n"
"100:" // Height 3: Multiply loop: Skip odd blocks
- "cbz x27, 103f\n"
- "tbz x27, #1, 101f\n"
- "ldr s1, [x26], #0x4\n"
- "ldr s2, [x25], #0x4\n"
- "ldr s3, [x24], #0x4\n"
- "tbz x27, #0, 102f\n"
- "ld1 { v1.h }[2], [x26]\n"
- "ld1 { v2.h }[2], [x25]\n"
- "ld1 { v3.h }[2], [x24]\n"
+ "cbz x26, 103f\n"
+ "tbz x26, #1, 101f\n"
+ "ldr s1, [x25], #0x4\n"
+ "ldr s2, [x24], #0x4\n"
+ "ldr s3, [x23], #0x4\n"
+ "tbz x26, #0, 102f\n"
+ "ld1 { v1.h }[2], [x25]\n"
+ "ld1 { v2.h }[2], [x24]\n"
+ "ld1 { v3.h }[2], [x23]\n"
"b 102f\n"
"101:" // Height 3: Multiply loop: Ragged operand read: partial_1_0
- "ldr h1, [x26, #0x0]\n"
- "ldr h2, [x25, #0x0]\n"
- "ldr h3, [x24, #0x0]\n"
+ "ldr h1, [x25, #0x0]\n"
+ "ldr h2, [x24, #0x0]\n"
+ "ldr h3, [x23, #0x0]\n"
"102:" // Height 3: Multiply loop: Ragged operand read: Done
- "ldr q7, [x12, #0x0]\n"
- "ldr q6, [x12, #0x10]\n"
+ "ldr q7, [x11, #0x0]\n"
+ "ldr q6, [x11, #0x10]\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
".inst 0x6e47ec08 // bfmmla v8.4s, v0.8h, v7.8h\n"
".inst 0x6e47ec50 // bfmmla v16.4s, v2.8h, v7.8h\n"
- "ldr q7, [x11, #0x0]\n"
+ "ldr q7, [x10, #0x0]\n"
".inst 0x6e46ec0c // bfmmla v12.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec54 // bfmmla v20.4s, v2.8h, v6.8h\n"
- "ldr q6, [x11, #0x10]\n"
- "add x12, x12, #0x20\n"
+ "ldr q6, [x10, #0x10]\n"
+ "add x11, x11, #0x20\n"
".inst 0x6e47ec09 // bfmmla v9.4s, v0.8h, v7.8h\n"
".inst 0x6e47ec51 // bfmmla v17.4s, v2.8h, v7.8h\n"
- "ldr q7, [x10, #0x0]\n"
- "add x11, x11, #0x20\n"
+ "ldr q7, [x9, #0x0]\n"
+ "add x10, x10, #0x20\n"
".inst 0x6e46ec0d // bfmmla v13.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec55 // bfmmla v21.4s, v2.8h, v6.8h\n"
- "ldr q6, [x10, #0x10]\n"
- "add x10, x10, #0x20\n"
+ "ldr q6, [x9, #0x10]\n"
+ "add x9, x9, #0x20\n"
".inst 0x6e47ec0a // bfmmla v10.4s, v0.8h, v7.8h\n"
".inst 0x6e47ec52 // bfmmla v18.4s, v2.8h, v7.8h\n"
- "ldr q7, [x9, #0x0]\n"
+ "ldr q7, [x28, #0x0]\n"
".inst 0x6e46ec0e // bfmmla v14.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec56 // bfmmla v22.4s, v2.8h, v6.8h\n"
- "ldr q6, [x9, #0x10]\n"
- "add x9, x9, #0x20\n"
+ "ldr q6, [x28, #0x10]\n"
+ "add x28, x28, #0x20\n"
".inst 0x6e47ec0b // bfmmla v11.4s, v0.8h, v7.8h\n"
".inst 0x6e47ec53 // bfmmla v19.4s, v2.8h, v7.8h\n"
".inst 0x6e46ec0f // bfmmla v15.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec57 // bfmmla v23.4s, v2.8h, v6.8h\n"
"103:" // Height 3: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 93b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #2\n"
"uzp1 v7.2d, v8.2d, v12.2d\n"
"uzp2 v8.2d, v8.2d, v12.2d\n"
"uzp1 v12.2d, v9.2d, v13.2d\n"
"uzp2 v9.2d, v9.2d, v13.2d\n"
- "add x24, x25, x20, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
"uzp1 v13.2d, v10.2d, v14.2d\n"
"uzp2 v10.2d, v10.2d, v14.2d\n"
"uzp1 v14.2d, v11.2d, v15.2d\n"
@@ -1367,10 +1367,10 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
"uzp1 v18.2d, v18.2d, v22.2d\n"
"uzp1 v19.2d, v19.2d, v23.2d\n"
"tbz %x[flags], #1, 104f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v1.4s }, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1r { v0.4s }, [x20]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v1.4s }, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v0.4s }, [x19]\n"
"fmin v7.4s, v7.4s, v1.4s\n"
"fmin v12.4s, v12.4s, v1.4s\n"
"fmin v13.4s, v13.4s, v1.4s\n"
@@ -1396,140 +1396,140 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
"fmax v18.4s, v18.4s, v0.4s\n"
"fmax v19.4s, v19.4s, v0.4s\n"
"104:" // Height 3: No activation
- "cmp x14, #0x10\n"
+ "cmp x13, #0x10\n"
"bge 113f\n"
- "tbz x14, #3, 108f\n"
- "st1 { v7.4s }, [x13], #0x10\n"
- "st1 { v12.4s }, [x13], #0x10\n"
- "st1 { v8.4s }, [x25], #0x10\n"
- "st1 { v9.4s }, [x25], #0x10\n"
- "st1 { v16.4s }, [x24], #0x10\n"
- "st1 { v17.4s }, [x24], #0x10\n"
- "tbz x14, #2, 106f\n"
- "st1 { v13.4s }, [x13], #0x10\n"
- "st1 { v10.4s }, [x25], #0x10\n"
- "st1 { v18.4s }, [x24], #0x10\n"
- "tbz x14, #1, 105f\n"
- "str d14, [x13], #0x8\n"
- "str d11, [x25], #0x8\n"
- "str d19, [x24], #0x8\n"
- "tbz x14, #0, 112f\n"
- "st1 { v14.s }[2], [x13]\n"
- "st1 { v11.s }[2], [x25]\n"
- "st1 { v19.s }[2], [x24]\n"
+ "tbz x13, #3, 108f\n"
+ "st1 { v7.4s }, [x12], #0x10\n"
+ "st1 { v12.4s }, [x12], #0x10\n"
+ "st1 { v8.4s }, [x24], #0x10\n"
+ "st1 { v9.4s }, [x24], #0x10\n"
+ "st1 { v16.4s }, [x23], #0x10\n"
+ "st1 { v17.4s }, [x23], #0x10\n"
+ "tbz x13, #2, 106f\n"
+ "st1 { v13.4s }, [x12], #0x10\n"
+ "st1 { v10.4s }, [x24], #0x10\n"
+ "st1 { v18.4s }, [x23], #0x10\n"
+ "tbz x13, #1, 105f\n"
+ "str d14, [x12], #0x8\n"
+ "str d11, [x24], #0x8\n"
+ "str d19, [x23], #0x8\n"
+ "tbz x13, #0, 112f\n"
+ "st1 { v14.s }[2], [x12]\n"
+ "st1 { v11.s }[2], [x24]\n"
+ "st1 { v19.s }[2], [x23]\n"
"b 112f\n"
"105:" // Height 3: Partial direct writeback: partial_1_12
- "tbz x14, #0, 112f\n"
- "str s14, [x13, #0x0]\n"
- "str s11, [x25, #0x0]\n"
- "str s19, [x24, #0x0]\n"
+ "tbz x13, #0, 112f\n"
+ "str s14, [x12, #0x0]\n"
+ "str s11, [x24, #0x0]\n"
+ "str s19, [x23, #0x0]\n"
"b 112f\n"
"106:" // Height 3: Partial direct writeback: partial_2_8
- "tbz x14, #1, 107f\n"
- "str d13, [x13], #0x8\n"
- "str d10, [x25], #0x8\n"
- "str d18, [x24], #0x8\n"
- "tbz x14, #0, 112f\n"
- "st1 { v13.s }[2], [x13]\n"
- "st1 { v10.s }[2], [x25]\n"
- "st1 { v18.s }[2], [x24]\n"
+ "tbz x13, #1, 107f\n"
+ "str d13, [x12], #0x8\n"
+ "str d10, [x24], #0x8\n"
+ "str d18, [x23], #0x8\n"
+ "tbz x13, #0, 112f\n"
+ "st1 { v13.s }[2], [x12]\n"
+ "st1 { v10.s }[2], [x24]\n"
+ "st1 { v18.s }[2], [x23]\n"
"b 112f\n"
"107:" // Height 3: Partial direct writeback: partial_1_8
- "tbz x14, #0, 112f\n"
- "str s13, [x13, #0x0]\n"
- "str s10, [x25, #0x0]\n"
- "str s18, [x24, #0x0]\n"
+ "tbz x13, #0, 112f\n"
+ "str s13, [x12, #0x0]\n"
+ "str s10, [x24, #0x0]\n"
+ "str s18, [x23, #0x0]\n"
"b 112f\n"
"108:" // Height 3: Partial direct writeback: partial_4_0
- "tbz x14, #2, 110f\n"
- "st1 { v7.4s }, [x13], #0x10\n"
- "st1 { v8.4s }, [x25], #0x10\n"
- "st1 { v16.4s }, [x24], #0x10\n"
- "tbz x14, #1, 109f\n"
- "str d12, [x13], #0x8\n"
- "str d9, [x25], #0x8\n"
- "str d17, [x24], #0x8\n"
- "tbz x14, #0, 112f\n"
- "st1 { v12.s }[2], [x13]\n"
- "st1 { v9.s }[2], [x25]\n"
- "st1 { v17.s }[2], [x24]\n"
+ "tbz x13, #2, 110f\n"
+ "st1 { v7.4s }, [x12], #0x10\n"
+ "st1 { v8.4s }, [x24], #0x10\n"
+ "st1 { v16.4s }, [x23], #0x10\n"
+ "tbz x13, #1, 109f\n"
+ "str d12, [x12], #0x8\n"
+ "str d9, [x24], #0x8\n"
+ "str d17, [x23], #0x8\n"
+ "tbz x13, #0, 112f\n"
+ "st1 { v12.s }[2], [x12]\n"
+ "st1 { v9.s }[2], [x24]\n"
+ "st1 { v17.s }[2], [x23]\n"
"b 112f\n"
"109:" // Height 3: Partial direct writeback: partial_1_4
- "tbz x14, #0, 112f\n"
- "str s12, [x13, #0x0]\n"
- "str s9, [x25, #0x0]\n"
- "str s17, [x24, #0x0]\n"
+ "tbz x13, #0, 112f\n"
+ "str s12, [x12, #0x0]\n"
+ "str s9, [x24, #0x0]\n"
+ "str s17, [x23, #0x0]\n"
"b 112f\n"
"110:" // Height 3: Partial direct writeback: partial_2_0
- "tbz x14, #1, 111f\n"
- "str d7, [x13], #0x8\n"
- "str d8, [x25], #0x8\n"
- "str d16, [x24], #0x8\n"
- "tbz x14, #0, 112f\n"
- "st1 { v7.s }[2], [x13]\n"
- "st1 { v8.s }[2], [x25]\n"
- "st1 { v16.s }[2], [x24]\n"
+ "tbz x13, #1, 111f\n"
+ "str d7, [x12], #0x8\n"
+ "str d8, [x24], #0x8\n"
+ "str d16, [x23], #0x8\n"
+ "tbz x13, #0, 112f\n"
+ "st1 { v7.s }[2], [x12]\n"
+ "st1 { v8.s }[2], [x24]\n"
+ "st1 { v16.s }[2], [x23]\n"
"b 112f\n"
"111:" // Height 3: Partial direct writeback: partial_1_0
- "str s7, [x13, #0x0]\n"
- "str s8, [x25, #0x0]\n"
- "str s16, [x24, #0x0]\n"
+ "str s7, [x12, #0x0]\n"
+ "str s8, [x24, #0x0]\n"
+ "str s16, [x23, #0x0]\n"
"112:" // Height 3: Partial direct writeback: Done
"b 114f\n"
"113:" // Height 3: Full writeback
- "str q7, [x13, #0x0]\n"
- "str q12, [x13, #0x10]\n"
- "str q13, [x13, #0x20]\n"
- "str q14, [x13, #0x30]\n"
- "add x13, x13, #0x40\n"
- "str q8, [x25, #0x0]\n"
- "str q9, [x25, #0x10]\n"
- "str q10, [x25, #0x20]\n"
- "str q11, [x25, #0x30]\n"
- "str q16, [x24, #0x0]\n"
- "str q17, [x24, #0x10]\n"
- "str q18, [x24, #0x20]\n"
- "str q19, [x24, #0x30]\n"
+ "str q7, [x12, #0x0]\n"
+ "str q12, [x12, #0x10]\n"
+ "str q13, [x12, #0x20]\n"
+ "str q14, [x12, #0x30]\n"
+ "add x12, x12, #0x40\n"
+ "str q8, [x24, #0x0]\n"
+ "str q9, [x24, #0x10]\n"
+ "str q10, [x24, #0x20]\n"
+ "str q11, [x24, #0x30]\n"
+ "str q16, [x23, #0x0]\n"
+ "str q17, [x23, #0x10]\n"
+ "str q18, [x23, #0x20]\n"
+ "str q19, [x23, #0x30]\n"
"114:" // Height 3: Writeback done
- "subs x14, x14, #0x10\n"
+ "subs x13, x13, #0x10\n"
"bgt 78b\n"
"b 230f\n"
"115:" // Height 4
- "ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x15, %x[bias]\n"
- "ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x13, %x[output_ptr]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x14, %x[bias]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x12, %x[output_ptr]\n"
"116:" // Height 4: Column loop
- "ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
- "add x11, x12, x20, LSL #1\n"
- "add x10, x11, x20, LSL #1\n"
- "add x9, x10, x20, LSL #1\n"
- "add x20, x9, x20, LSL #1\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "cmp x14, #0xc\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #1\n"
+ "add x9, x10, x19, LSL #1\n"
+ "add x28, x9, x19, LSL #1\n"
+ "add x19, x28, x19, LSL #1\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "cmp x13, #0xc\n"
"bgt 117f\n"
- "cmp x14, #0x8\n"
- "mov x9, x12\n"
+ "cmp x13, #0x8\n"
+ "mov x28, x11\n"
"bgt 117f\n"
- "cmp x14, #0x4\n"
- "mov x10, x12\n"
+ "cmp x13, #0x4\n"
+ "mov x9, x11\n"
"bgt 117f\n"
- "mov x11, x12\n"
+ "mov x10, x11\n"
"117:" // Height 4: B setup done
- "cbz x15, 118f\n"
- "ldr q8, [x15, #0x0]\n"
- "ldr q9, [x15, #0x10]\n"
+ "cbz x14, 118f\n"
+ "ldr q8, [x14, #0x0]\n"
+ "ldr q9, [x14, #0x10]\n"
"zip2 v12.2d, v8.2d, v8.2d\n"
"zip1 v8.2d, v8.2d, v8.2d\n"
- "ldr q10, [x15, #0x20]\n"
- "ldr q11, [x15, #0x30]\n"
+ "ldr q10, [x14, #0x20]\n"
+ "ldr q11, [x14, #0x30]\n"
"zip2 v13.2d, v9.2d, v9.2d\n"
"zip1 v9.2d, v9.2d, v9.2d\n"
"zip2 v14.2d, v10.2d, v10.2d\n"
"zip1 v10.2d, v10.2d, v10.2d\n"
- "add x15, x15, #0x40\n"
+ "add x14, x14, #0x40\n"
"zip2 v15.2d, v11.2d, v11.2d\n"
"zip1 v11.2d, v11.2d, v11.2d\n"
"mov v16.16b, v8.16b\n"
@@ -1543,132 +1543,132 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
"b 130f\n"
"118:" // Height 4: no bias
"tbz %x[flags], #0, 129f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "cmp x14, #0x10\n"
- "add x23, x24, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "cmp x13, #0x10\n"
+ "add x22, x23, x19, LSL #2\n"
"bge 127f\n"
- "tbz x14, #3, 122f\n"
- "ld1 { v9.4s }, [x13], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v17.4s }, [x24], #0x10\n"
- "ld1 { v20.4s }, [x23], #0x10\n"
- "ld1 { v10.4s }, [x13], #0x10\n"
- "ld1 { v13.4s }, [x25], #0x10\n"
- "ld1 { v18.4s }, [x24], #0x10\n"
- "ld1 { v21.4s }, [x23], #0x10\n"
- "tbz x14, #2, 120f\n"
- "ld1 { v11.4s }, [x13], #0x10\n"
- "ld1 { v14.4s }, [x25], #0x10\n"
- "ld1 { v19.4s }, [x24], #0x10\n"
- "ld1 { v22.4s }, [x23], #0x10\n"
- "tbz x14, #1, 119f\n"
- "ldr d16, [x13], #0x8\n"
- "ldr d15, [x25], #0x8\n"
- "mov x20, #0x38\n"
- "ldr d24, [x24], #0x8\n"
- "ldr d23, [x23], #0x8\n"
- "tbz x14, #0, 126f\n"
- "ld1 { v16.s }[2], [x13]\n"
- "ld1 { v15.s }[2], [x25]\n"
- "ld1 { v24.s }[2], [x24]\n"
- "ld1 { v23.s }[2], [x23]\n"
+ "tbz x13, #3, 122f\n"
+ "ld1 { v9.4s }, [x12], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
+ "ld1 { v17.4s }, [x23], #0x10\n"
+ "ld1 { v20.4s }, [x22], #0x10\n"
+ "ld1 { v10.4s }, [x12], #0x10\n"
+ "ld1 { v13.4s }, [x24], #0x10\n"
+ "ld1 { v18.4s }, [x23], #0x10\n"
+ "ld1 { v21.4s }, [x22], #0x10\n"
+ "tbz x13, #2, 120f\n"
+ "ld1 { v11.4s }, [x12], #0x10\n"
+ "ld1 { v14.4s }, [x24], #0x10\n"
+ "ld1 { v19.4s }, [x23], #0x10\n"
+ "ld1 { v22.4s }, [x22], #0x10\n"
+ "tbz x13, #1, 119f\n"
+ "ldr d16, [x12], #0x8\n"
+ "ldr d15, [x24], #0x8\n"
+ "mov x19, #0x38\n"
+ "ldr d24, [x23], #0x8\n"
+ "ldr d23, [x22], #0x8\n"
+ "tbz x13, #0, 126f\n"
+ "ld1 { v16.s }[2], [x12]\n"
+ "ld1 { v15.s }[2], [x24]\n"
+ "ld1 { v24.s }[2], [x23]\n"
+ "ld1 { v23.s }[2], [x22]\n"
"b 126f\n"
"119:" // Height 4: Partial accumulate: partial_1_12
- "mov x20, #0x30\n"
- "tbz x14, #0, 126f\n"
- "ldr s16, [x13, #0x0]\n"
- "ldr s15, [x25, #0x0]\n"
- "ldr s24, [x24, #0x0]\n"
- "ldr s23, [x23, #0x0]\n"
+ "mov x19, #0x30\n"
+ "tbz x13, #0, 126f\n"
+ "ldr s16, [x12, #0x0]\n"
+ "ldr s15, [x24, #0x0]\n"
+ "ldr s24, [x23, #0x0]\n"
+ "ldr s23, [x22, #0x0]\n"
"b 126f\n"
"120:" // Height 4: Partial accumulate: partial_2_8
- "tbz x14, #1, 121f\n"
- "ldr d11, [x13], #0x8\n"
- "ldr d14, [x25], #0x8\n"
- "mov x20, #0x28\n"
- "ldr d19, [x24], #0x8\n"
- "ldr d22, [x23], #0x8\n"
- "tbz x14, #0, 126f\n"
- "ld1 { v11.s }[2], [x13]\n"
- "ld1 { v14.s }[2], [x25]\n"
- "ld1 { v19.s }[2], [x24]\n"
- "ld1 { v22.s }[2], [x23]\n"
+ "tbz x13, #1, 121f\n"
+ "ldr d11, [x12], #0x8\n"
+ "ldr d14, [x24], #0x8\n"
+ "mov x19, #0x28\n"
+ "ldr d19, [x23], #0x8\n"
+ "ldr d22, [x22], #0x8\n"
+ "tbz x13, #0, 126f\n"
+ "ld1 { v11.s }[2], [x12]\n"
+ "ld1 { v14.s }[2], [x24]\n"
+ "ld1 { v19.s }[2], [x23]\n"
+ "ld1 { v22.s }[2], [x22]\n"
"b 126f\n"
"121:" // Height 4: Partial accumulate: partial_1_8
- "mov x20, #0x20\n"
- "tbz x14, #0, 126f\n"
- "ldr s11, [x13, #0x0]\n"
- "ldr s14, [x25, #0x0]\n"
- "ldr s19, [x24, #0x0]\n"
- "ldr s22, [x23, #0x0]\n"
+ "mov x19, #0x20\n"
+ "tbz x13, #0, 126f\n"
+ "ldr s11, [x12, #0x0]\n"
+ "ldr s14, [x24, #0x0]\n"
+ "ldr s19, [x23, #0x0]\n"
+ "ldr s22, [x22, #0x0]\n"
"b 126f\n"
"122:" // Height 4: Partial accumulate: partial_4_0
- "tbz x14, #2, 124f\n"
- "ld1 { v9.4s }, [x13], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v17.4s }, [x24], #0x10\n"
- "ld1 { v20.4s }, [x23], #0x10\n"
- "tbz x14, #1, 123f\n"
- "ldr d10, [x13], #0x8\n"
- "ldr d13, [x25], #0x8\n"
- "mov x20, #0x18\n"
- "ldr d18, [x24], #0x8\n"
- "ldr d21, [x23], #0x8\n"
- "tbz x14, #0, 126f\n"
- "ld1 { v10.s }[2], [x13]\n"
- "ld1 { v13.s }[2], [x25]\n"
- "ld1 { v18.s }[2], [x24]\n"
- "ld1 { v21.s }[2], [x23]\n"
+ "tbz x13, #2, 124f\n"
+ "ld1 { v9.4s }, [x12], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
+ "ld1 { v17.4s }, [x23], #0x10\n"
+ "ld1 { v20.4s }, [x22], #0x10\n"
+ "tbz x13, #1, 123f\n"
+ "ldr d10, [x12], #0x8\n"
+ "ldr d13, [x24], #0x8\n"
+ "mov x19, #0x18\n"
+ "ldr d18, [x23], #0x8\n"
+ "ldr d21, [x22], #0x8\n"
+ "tbz x13, #0, 126f\n"
+ "ld1 { v10.s }[2], [x12]\n"
+ "ld1 { v13.s }[2], [x24]\n"
+ "ld1 { v18.s }[2], [x23]\n"
+ "ld1 { v21.s }[2], [x22]\n"
"b 126f\n"
"123:" // Height 4: Partial accumulate: partial_1_4
- "mov x20, #0x10\n"
- "tbz x14, #0, 126f\n"
- "ldr s10, [x13, #0x0]\n"
- "ldr s13, [x25, #0x0]\n"
- "ldr s18, [x24, #0x0]\n"
- "ldr s21, [x23, #0x0]\n"
+ "mov x19, #0x10\n"
+ "tbz x13, #0, 126f\n"
+ "ldr s10, [x12, #0x0]\n"
+ "ldr s13, [x24, #0x0]\n"
+ "ldr s18, [x23, #0x0]\n"
+ "ldr s21, [x22, #0x0]\n"
"b 126f\n"
"124:" // Height 4: Partial accumulate: partial_2_0
- "tbz x14, #1, 125f\n"
- "ldr d9, [x13], #0x8\n"
- "ldr d12, [x25], #0x8\n"
- "mov x20, #0x8\n"
- "ldr d17, [x24], #0x8\n"
- "ldr d20, [x23], #0x8\n"
- "tbz x14, #0, 126f\n"
- "ld1 { v9.s }[2], [x13]\n"
- "ld1 { v12.s }[2], [x25]\n"
- "ld1 { v17.s }[2], [x24]\n"
- "ld1 { v20.s }[2], [x23]\n"
+ "tbz x13, #1, 125f\n"
+ "ldr d9, [x12], #0x8\n"
+ "ldr d12, [x24], #0x8\n"
+ "mov x19, #0x8\n"
+ "ldr d17, [x23], #0x8\n"
+ "ldr d20, [x22], #0x8\n"
+ "tbz x13, #0, 126f\n"
+ "ld1 { v9.s }[2], [x12]\n"
+ "ld1 { v12.s }[2], [x24]\n"
+ "ld1 { v17.s }[2], [x23]\n"
+ "ld1 { v20.s }[2], [x22]\n"
"b 126f\n"
"125:" // Height 4: Partial accumulate: partial_1_0
- "ldr s9, [x13, #0x0]\n"
- "ldr s12, [x25, #0x0]\n"
- "mov x20, #0x0\n"
- "ldr s17, [x24, #0x0]\n"
- "ldr s20, [x23, #0x0]\n"
+ "ldr s9, [x12, #0x0]\n"
+ "ldr s12, [x24, #0x0]\n"
+ "mov x19, #0x0\n"
+ "ldr s17, [x23, #0x0]\n"
+ "ldr s20, [x22, #0x0]\n"
"126:" // Height 4: Partial accumulate: Done
- "sub x13, x13, x20\n"
+ "sub x12, x12, x19\n"
"b 128f\n"
"127:" // Height 4: full accumulate
- "ldr q9, [x13, #0x0]\n"
- "ldr q10, [x13, #0x10]\n"
- "ldr q11, [x13, #0x20]\n"
- "ldr q16, [x13, #0x30]\n"
- "ldr q12, [x25, #0x0]\n"
- "ldr q13, [x25, #0x10]\n"
- "ldr q14, [x25, #0x20]\n"
- "ldr q15, [x25, #0x30]\n"
- "ldr q17, [x24, #0x0]\n"
- "ldr q18, [x24, #0x10]\n"
- "ldr q19, [x24, #0x20]\n"
- "ldr q24, [x24, #0x30]\n"
- "ldr q20, [x23, #0x0]\n"
- "ldr q21, [x23, #0x10]\n"
- "ldr q22, [x23, #0x20]\n"
- "ldr q23, [x23, #0x30]\n"
+ "ldr q9, [x12, #0x0]\n"
+ "ldr q10, [x12, #0x10]\n"
+ "ldr q11, [x12, #0x20]\n"
+ "ldr q16, [x12, #0x30]\n"
+ "ldr q12, [x24, #0x0]\n"
+ "ldr q13, [x24, #0x10]\n"
+ "ldr q14, [x24, #0x20]\n"
+ "ldr q15, [x24, #0x30]\n"
+ "ldr q17, [x23, #0x0]\n"
+ "ldr q18, [x23, #0x10]\n"
+ "ldr q19, [x23, #0x20]\n"
+ "ldr q24, [x23, #0x30]\n"
+ "ldr q20, [x22, #0x0]\n"
+ "ldr q21, [x22, #0x10]\n"
+ "ldr q22, [x22, #0x20]\n"
+ "ldr q23, [x22, #0x30]\n"
"128:" // Height 4: MMLA fixup
"zip1 v8.2d, v9.2d, v12.2d\n"
"zip2 v12.2d, v9.2d, v12.2d\n"
@@ -1705,272 +1705,272 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
"movi v22.16b, #0x0\n"
"movi v23.16b, #0x0\n"
"130:" // Height 4: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"131:" // Height 4: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 132f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "cbnz x28, 133f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #1\n"
- "add x25, x25, x20, LSL #1\n"
- "add x24, x24, x20, LSL #1\n"
- "add x23, x23, x20, LSL #1\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "cbnz x27, 133f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
+ "add x24, x24, x19, LSL #1\n"
+ "add x23, x23, x19, LSL #1\n"
+ "add x22, x22, x19, LSL #1\n"
"b 133f\n"
"132:" // Height 4: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
- "add x23, x24, x20, LSL #1\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "add x22, x23, x19, LSL #1\n"
"133:" // Height 4: input setup done
- "cmp x27, #0x8\n"
+ "cmp x26, #0x8\n"
"blt 136f\n"
- "ldr q1, [x26, #0x0]\n"
- "ldr q2, [x25, #0x0]\n"
- "cmp x27, #0x10\n"
- "ldr q3, [x24, #0x0]\n"
- "ldr q4, [x23, #0x0]\n"
- "ldr q7, [x12, #0x0]\n"
- "ldr q6, [x12, #0x10]\n"
+ "ldr q1, [x25, #0x0]\n"
+ "ldr q2, [x24, #0x0]\n"
+ "cmp x26, #0x10\n"
+ "ldr q3, [x23, #0x0]\n"
+ "ldr q4, [x22, #0x0]\n"
+ "ldr q7, [x11, #0x0]\n"
+ "ldr q6, [x11, #0x10]\n"
"blt 135f\n"
"134:" // Height 4: Multiply loop: Main loop head
"trn1 v0.2d, v1.2d, v2.2d\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
".inst 0x6e47ec08 // bfmmla v8.4s, v0.8h, v7.8h\n"
- "sub x27, x27, #0x8\n"
+ "sub x26, x26, #0x8\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
".inst 0x6e47ec50 // bfmmla v16.4s, v2.8h, v7.8h\n"
- "ldr q7, [x11, #0x0]\n"
+ "ldr q7, [x10, #0x0]\n"
".inst 0x6e46ec0c // bfmmla v12.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec54 // bfmmla v20.4s, v2.8h, v6.8h\n"
- "ldr q6, [x11, #0x10]\n"
+ "ldr q6, [x10, #0x10]\n"
".inst 0x6e47ec09 // bfmmla v9.4s, v0.8h, v7.8h\n"
"trn2 v3.2d, v3.2d, v4.2d\n"
".inst 0x6e47ec51 // bfmmla v17.4s, v2.8h, v7.8h\n"
- "ldr q7, [x10, #0x0]\n"
+ "ldr q7, [x9, #0x0]\n"
".inst 0x6e46ec0d // bfmmla v13.4s, v0.8h, v6.8h\n"
- "cmp x27, #0x10\n"
+ "cmp x26, #0x10\n"
".inst 0x6e46ec55 // bfmmla v21.4s, v2.8h, v6.8h\n"
- "ldr q6, [x10, #0x10]\n"
+ "ldr q6, [x9, #0x10]\n"
".inst 0x6e47ec0a // bfmmla v10.4s, v0.8h, v7.8h\n"
- "add x26, x26, #0x10\n"
+ "add x25, x25, #0x10\n"
".inst 0x6e47ec52 // bfmmla v18.4s, v2.8h, v7.8h\n"
- "ldr q7, [x9, #0x0]\n"
+ "ldr q7, [x28, #0x0]\n"
".inst 0x6e46ec0e // bfmmla v14.4s, v0.8h, v6.8h\n"
- "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
".inst 0x6e46ec56 // bfmmla v22.4s, v2.8h, v6.8h\n"
- "ldr q6, [x9, #0x10]\n"
+ "ldr q6, [x28, #0x10]\n"
".inst 0x6e47ec0b // bfmmla v11.4s, v0.8h, v7.8h\n"
- "add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
".inst 0x6e47ec53 // bfmmla v19.4s, v2.8h, v7.8h\n"
- "ldr q7, [x12, #0x20]\n"
+ "ldr q7, [x11, #0x20]\n"
".inst 0x6e46ec0f // bfmmla v15.4s, v0.8h, v6.8h\n"
- "add x23, x23, #0x10\n"
- "ldr q4, [x23, #0x0]\n"
+ "add x22, x22, #0x10\n"
+ "ldr q4, [x22, #0x0]\n"
".inst 0x6e46ec57 // bfmmla v23.4s, v2.8h, v6.8h\n"
- "ldr q6, [x12, #0x30]\n"
+ "ldr q6, [x11, #0x30]\n"
".inst 0x6e47ec28 // bfmmla v8.4s, v1.8h, v7.8h\n"
- "ldr q2, [x25, #0x0]\n"
+ "ldr q2, [x24, #0x0]\n"
".inst 0x6e47ec70 // bfmmla v16.4s, v3.8h, v7.8h\n"
- "ldr q7, [x11, #0x20]\n"
+ "ldr q7, [x10, #0x20]\n"
".inst 0x6e46ec2c // bfmmla v12.4s, v1.8h, v6.8h\n"
".inst 0x6e46ec74 // bfmmla v20.4s, v3.8h, v6.8h\n"
- "ldr q6, [x11, #0x30]\n"
+ "ldr q6, [x10, #0x30]\n"
".inst 0x6e47ec29 // bfmmla v9.4s, v1.8h, v7.8h\n"
- "add x12, x12, #0x40\n"
+ "add x11, x11, #0x40\n"
".inst 0x6e47ec71 // bfmmla v17.4s, v3.8h, v7.8h\n"
- "ldr q7, [x10, #0x20]\n"
+ "ldr q7, [x9, #0x20]\n"
".inst 0x6e46ec2d // bfmmla v13.4s, v1.8h, v6.8h\n"
- "add x11, x11, #0x40\n"
+ "add x10, x10, #0x40\n"
".inst 0x6e46ec75 // bfmmla v21.4s, v3.8h, v6.8h\n"
- "ldr q6, [x10, #0x30]\n"
+ "ldr q6, [x9, #0x30]\n"
".inst 0x6e47ec2a // bfmmla v10.4s, v1.8h, v7.8h\n"
- "add x10, x10, #0x40\n"
+ "add x9, x9, #0x40\n"
".inst 0x6e47ec72 // bfmmla v18.4s, v3.8h, v7.8h\n"
- "ldr q7, [x9, #0x20]\n"
+ "ldr q7, [x28, #0x20]\n"
".inst 0x6e46ec2e // bfmmla v14.4s, v1.8h, v6.8h\n"
".inst 0x6e46ec76 // bfmmla v22.4s, v3.8h, v6.8h\n"
- "ldr q6, [x9, #0x30]\n"
- "add x9, x9, #0x40\n"
+ "ldr q6, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
".inst 0x6e47ec2b // bfmmla v11.4s, v1.8h, v7.8h\n"
".inst 0x6e47ec73 // bfmmla v19.4s, v3.8h, v7.8h\n"
- "ldr q7, [x12, #0x0]\n"
+ "ldr q7, [x11, #0x0]\n"
".inst 0x6e46ec2f // bfmmla v15.4s, v1.8h, v6.8h\n"
- "ldr q1, [x26, #0x0]\n"
+ "ldr q1, [x25, #0x0]\n"
".inst 0x6e46ec77 // bfmmla v23.4s, v3.8h, v6.8h\n"
- "ldr q3, [x24, #0x0]\n"
- "ldr q6, [x12, #0x10]\n"
+ "ldr q3, [x23, #0x0]\n"
+ "ldr q6, [x11, #0x10]\n"
"bge 134b\n"
"135:" // Height 4: Multiply loop: Single iteration only
"trn1 v0.2d, v1.2d, v2.2d\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
".inst 0x6e47ec08 // bfmmla v8.4s, v0.8h, v7.8h\n"
- "sub x27, x27, #0x8\n"
+ "sub x26, x26, #0x8\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
".inst 0x6e47ec50 // bfmmla v16.4s, v2.8h, v7.8h\n"
- "ldr q7, [x11, #0x0]\n"
+ "ldr q7, [x10, #0x0]\n"
".inst 0x6e46ec0c // bfmmla v12.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec54 // bfmmla v20.4s, v2.8h, v6.8h\n"
- "ldr q6, [x11, #0x10]\n"
+ "ldr q6, [x10, #0x10]\n"
".inst 0x6e47ec09 // bfmmla v9.4s, v0.8h, v7.8h\n"
"trn2 v3.2d, v3.2d, v4.2d\n"
".inst 0x6e47ec51 // bfmmla v17.4s, v2.8h, v7.8h\n"
- "ldr q7, [x10, #0x0]\n"
+ "ldr q7, [x9, #0x0]\n"
".inst 0x6e46ec0d // bfmmla v13.4s, v0.8h, v6.8h\n"
- "add x26, x26, #0x10\n"
+ "add x25, x25, #0x10\n"
".inst 0x6e46ec55 // bfmmla v21.4s, v2.8h, v6.8h\n"
- "ldr q6, [x10, #0x10]\n"
+ "ldr q6, [x9, #0x10]\n"
".inst 0x6e47ec0a // bfmmla v10.4s, v0.8h, v7.8h\n"
- "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
".inst 0x6e47ec52 // bfmmla v18.4s, v2.8h, v7.8h\n"
- "ldr q7, [x9, #0x0]\n"
+ "ldr q7, [x28, #0x0]\n"
".inst 0x6e46ec0e // bfmmla v14.4s, v0.8h, v6.8h\n"
- "add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
".inst 0x6e46ec56 // bfmmla v22.4s, v2.8h, v6.8h\n"
- "ldr q6, [x9, #0x10]\n"
+ "ldr q6, [x28, #0x10]\n"
".inst 0x6e47ec0b // bfmmla v11.4s, v0.8h, v7.8h\n"
- "add x23, x23, #0x10\n"
+ "add x22, x22, #0x10\n"
".inst 0x6e47ec53 // bfmmla v19.4s, v2.8h, v7.8h\n"
- "ldr q7, [x12, #0x20]\n"
+ "ldr q7, [x11, #0x20]\n"
".inst 0x6e46ec0f // bfmmla v15.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec57 // bfmmla v23.4s, v2.8h, v6.8h\n"
- "ldr q6, [x12, #0x30]\n"
+ "ldr q6, [x11, #0x30]\n"
".inst 0x6e47ec28 // bfmmla v8.4s, v1.8h, v7.8h\n"
- "add x12, x12, #0x40\n"
+ "add x11, x11, #0x40\n"
".inst 0x6e47ec70 // bfmmla v16.4s, v3.8h, v7.8h\n"
- "ldr q7, [x11, #0x20]\n"
+ "ldr q7, [x10, #0x20]\n"
".inst 0x6e46ec2c // bfmmla v12.4s, v1.8h, v6.8h\n"
".inst 0x6e46ec74 // bfmmla v20.4s, v3.8h, v6.8h\n"
- "ldr q6, [x11, #0x30]\n"
+ "ldr q6, [x10, #0x30]\n"
".inst 0x6e47ec29 // bfmmla v9.4s, v1.8h, v7.8h\n"
- "add x11, x11, #0x40\n"
+ "add x10, x10, #0x40\n"
".inst 0x6e47ec71 // bfmmla v17.4s, v3.8h, v7.8h\n"
- "ldr q7, [x10, #0x20]\n"
+ "ldr q7, [x9, #0x20]\n"
".inst 0x6e46ec2d // bfmmla v13.4s, v1.8h, v6.8h\n"
".inst 0x6e46ec75 // bfmmla v21.4s, v3.8h, v6.8h\n"
- "ldr q6, [x10, #0x30]\n"
+ "ldr q6, [x9, #0x30]\n"
".inst 0x6e47ec2a // bfmmla v10.4s, v1.8h, v7.8h\n"
- "add x10, x10, #0x40\n"
+ "add x9, x9, #0x40\n"
".inst 0x6e47ec72 // bfmmla v18.4s, v3.8h, v7.8h\n"
- "ldr q7, [x9, #0x20]\n"
+ "ldr q7, [x28, #0x20]\n"
".inst 0x6e46ec2e // bfmmla v14.4s, v1.8h, v6.8h\n"
".inst 0x6e46ec76 // bfmmla v22.4s, v3.8h, v6.8h\n"
- "ldr q6, [x9, #0x30]\n"
- "add x9, x9, #0x40\n"
+ "ldr q6, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
".inst 0x6e47ec2b // bfmmla v11.4s, v1.8h, v7.8h\n"
".inst 0x6e47ec73 // bfmmla v19.4s, v3.8h, v7.8h\n"
".inst 0x6e46ec2f // bfmmla v15.4s, v1.8h, v6.8h\n"
".inst 0x6e46ec77 // bfmmla v23.4s, v3.8h, v6.8h\n"
"136:" // Height 4: Multiply loop: Main loop skip
- "cbz x27, 141f\n"
- "cmp x27, #0x4\n"
+ "cbz x26, 141f\n"
+ "cmp x26, #0x4\n"
"blt 138f\n"
"137:" // Height 4: Multiply loop: Odd block loop
- "ldr d1, [x26], #0x8\n"
- "ldr d2, [x25], #0x8\n"
+ "ldr d1, [x25], #0x8\n"
+ "ldr d2, [x24], #0x8\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
- "sub x27, x27, #0x4\n"
- "ldr d3, [x24], #0x8\n"
- "ldr d4, [x23], #0x8\n"
+ "sub x26, x26, #0x4\n"
+ "ldr d3, [x23], #0x8\n"
+ "ldr d4, [x22], #0x8\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
- "cmp x27, #0x4\n"
- "ldr q6, [x12, #0x0]\n"
- "ldr q7, [x12, #0x10]\n"
+ "cmp x26, #0x4\n"
+ "ldr q6, [x11, #0x0]\n"
+ "ldr q7, [x11, #0x10]\n"
".inst 0x6e46ec08 // bfmmla v8.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec50 // bfmmla v16.4s, v2.8h, v6.8h\n"
- "ldr q6, [x11, #0x0]\n"
+ "ldr q6, [x10, #0x0]\n"
".inst 0x6e47ec0c // bfmmla v12.4s, v0.8h, v7.8h\n"
".inst 0x6e47ec54 // bfmmla v20.4s, v2.8h, v7.8h\n"
- "ldr q7, [x11, #0x10]\n"
+ "ldr q7, [x10, #0x10]\n"
".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec51 // bfmmla v17.4s, v2.8h, v6.8h\n"
- "ldr q6, [x10, #0x0]\n"
- "add x12, x12, #0x20\n"
+ "ldr q6, [x9, #0x0]\n"
+ "add x11, x11, #0x20\n"
".inst 0x6e47ec0d // bfmmla v13.4s, v0.8h, v7.8h\n"
".inst 0x6e47ec55 // bfmmla v21.4s, v2.8h, v7.8h\n"
- "ldr q7, [x10, #0x10]\n"
- "add x11, x11, #0x20\n"
+ "ldr q7, [x9, #0x10]\n"
+ "add x10, x10, #0x20\n"
".inst 0x6e46ec0a // bfmmla v10.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec52 // bfmmla v18.4s, v2.8h, v6.8h\n"
- "ldr q6, [x9, #0x0]\n"
- "add x10, x10, #0x20\n"
+ "ldr q6, [x28, #0x0]\n"
+ "add x9, x9, #0x20\n"
".inst 0x6e47ec0e // bfmmla v14.4s, v0.8h, v7.8h\n"
".inst 0x6e47ec56 // bfmmla v22.4s, v2.8h, v7.8h\n"
- "ldr q7, [x9, #0x10]\n"
- "add x9, x9, #0x20\n"
+ "ldr q7, [x28, #0x10]\n"
+ "add x28, x28, #0x20\n"
".inst 0x6e46ec0b // bfmmla v11.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec53 // bfmmla v19.4s, v2.8h, v6.8h\n"
".inst 0x6e47ec0f // bfmmla v15.4s, v0.8h, v7.8h\n"
".inst 0x6e47ec57 // bfmmla v23.4s, v2.8h, v7.8h\n"
"bge 137b\n"
"138:" // Height 4: Multiply loop: Skip odd blocks
- "cbz x27, 141f\n"
- "tbz x27, #1, 139f\n"
- "ldr s1, [x26], #0x4\n"
- "ldr s2, [x25], #0x4\n"
- "ldr s3, [x24], #0x4\n"
- "ldr s4, [x23], #0x4\n"
- "tbz x27, #0, 140f\n"
- "ld1 { v1.h }[2], [x26]\n"
- "ld1 { v2.h }[2], [x25]\n"
- "ld1 { v3.h }[2], [x24]\n"
- "ld1 { v4.h }[2], [x23]\n"
+ "cbz x26, 141f\n"
+ "tbz x26, #1, 139f\n"
+ "ldr s1, [x25], #0x4\n"
+ "ldr s2, [x24], #0x4\n"
+ "ldr s3, [x23], #0x4\n"
+ "ldr s4, [x22], #0x4\n"
+ "tbz x26, #0, 140f\n"
+ "ld1 { v1.h }[2], [x25]\n"
+ "ld1 { v2.h }[2], [x24]\n"
+ "ld1 { v3.h }[2], [x23]\n"
+ "ld1 { v4.h }[2], [x22]\n"
"b 140f\n"
"139:" // Height 4: Multiply loop: Ragged operand read: partial_1_0
- "ldr h1, [x26, #0x0]\n"
- "ldr h2, [x25, #0x0]\n"
- "ldr h3, [x24, #0x0]\n"
- "ldr h4, [x23, #0x0]\n"
+ "ldr h1, [x25, #0x0]\n"
+ "ldr h2, [x24, #0x0]\n"
+ "ldr h3, [x23, #0x0]\n"
+ "ldr h4, [x22, #0x0]\n"
"140:" // Height 4: Multiply loop: Ragged operand read: Done
- "ldr q7, [x12, #0x0]\n"
- "ldr q6, [x12, #0x10]\n"
+ "ldr q7, [x11, #0x0]\n"
+ "ldr q6, [x11, #0x10]\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
".inst 0x6e47ec08 // bfmmla v8.4s, v0.8h, v7.8h\n"
".inst 0x6e47ec50 // bfmmla v16.4s, v2.8h, v7.8h\n"
- "ldr q7, [x11, #0x0]\n"
- "add x12, x12, #0x20\n"
+ "ldr q7, [x10, #0x0]\n"
+ "add x11, x11, #0x20\n"
".inst 0x6e46ec0c // bfmmla v12.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec54 // bfmmla v20.4s, v2.8h, v6.8h\n"
- "ldr q6, [x11, #0x10]\n"
- "add x11, x11, #0x20\n"
+ "ldr q6, [x10, #0x10]\n"
+ "add x10, x10, #0x20\n"
".inst 0x6e47ec09 // bfmmla v9.4s, v0.8h, v7.8h\n"
".inst 0x6e47ec51 // bfmmla v17.4s, v2.8h, v7.8h\n"
- "ldr q7, [x10, #0x0]\n"
+ "ldr q7, [x9, #0x0]\n"
".inst 0x6e46ec0d // bfmmla v13.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec55 // bfmmla v21.4s, v2.8h, v6.8h\n"
- "ldr q6, [x10, #0x10]\n"
- "add x10, x10, #0x20\n"
+ "ldr q6, [x9, #0x10]\n"
+ "add x9, x9, #0x20\n"
".inst 0x6e47ec0a // bfmmla v10.4s, v0.8h, v7.8h\n"
".inst 0x6e47ec52 // bfmmla v18.4s, v2.8h, v7.8h\n"
- "ldr q7, [x9, #0x0]\n"
+ "ldr q7, [x28, #0x0]\n"
".inst 0x6e46ec0e // bfmmla v14.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec56 // bfmmla v22.4s, v2.8h, v6.8h\n"
- "ldr q6, [x9, #0x10]\n"
- "add x9, x9, #0x20\n"
+ "ldr q6, [x28, #0x10]\n"
+ "add x28, x28, #0x20\n"
".inst 0x6e47ec0b // bfmmla v11.4s, v0.8h, v7.8h\n"
".inst 0x6e47ec53 // bfmmla v19.4s, v2.8h, v7.8h\n"
".inst 0x6e46ec0f // bfmmla v15.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec57 // bfmmla v23.4s, v2.8h, v6.8h\n"
"141:" // Height 4: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 131b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
"uzp1 v7.2d, v8.2d, v12.2d\n"
"uzp2 v8.2d, v8.2d, v12.2d\n"
"uzp1 v12.2d, v9.2d, v13.2d\n"
- "add x23, x24, x20, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
"uzp2 v9.2d, v9.2d, v13.2d\n"
"uzp1 v13.2d, v10.2d, v14.2d\n"
"uzp2 v10.2d, v10.2d, v14.2d\n"
@@ -1985,10 +1985,10 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
"uzp1 v22.2d, v19.2d, v23.2d\n"
"uzp2 v19.2d, v19.2d, v23.2d\n"
"tbz %x[flags], #1, 142f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v1.4s }, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1r { v0.4s }, [x20]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v1.4s }, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v0.4s }, [x19]\n"
"fmin v7.4s, v7.4s, v1.4s\n"
"fmin v12.4s, v12.4s, v1.4s\n"
"fmin v13.4s, v13.4s, v1.4s\n"
@@ -2022,160 +2022,160 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
"fmax v18.4s, v18.4s, v0.4s\n"
"fmax v19.4s, v19.4s, v0.4s\n"
"142:" // Height 4: No activation
- "cmp x14, #0x10\n"
+ "cmp x13, #0x10\n"
"bge 151f\n"
- "tbz x14, #3, 146f\n"
- "st1 { v7.4s }, [x13], #0x10\n"
- "st1 { v12.4s }, [x13], #0x10\n"
- "st1 { v8.4s }, [x25], #0x10\n"
- "st1 { v9.4s }, [x25], #0x10\n"
- "st1 { v15.4s }, [x24], #0x10\n"
- "st1 { v20.4s }, [x24], #0x10\n"
- "st1 { v16.4s }, [x23], #0x10\n"
- "st1 { v17.4s }, [x23], #0x10\n"
- "tbz x14, #2, 144f\n"
- "st1 { v13.4s }, [x13], #0x10\n"
- "st1 { v10.4s }, [x25], #0x10\n"
- "st1 { v21.4s }, [x24], #0x10\n"
- "st1 { v18.4s }, [x23], #0x10\n"
- "tbz x14, #1, 143f\n"
- "str d14, [x13], #0x8\n"
- "str d11, [x25], #0x8\n"
- "str d22, [x24], #0x8\n"
- "str d19, [x23], #0x8\n"
- "tbz x14, #0, 150f\n"
- "st1 { v14.s }[2], [x13]\n"
- "st1 { v11.s }[2], [x25]\n"
- "st1 { v22.s }[2], [x24]\n"
- "st1 { v19.s }[2], [x23]\n"
+ "tbz x13, #3, 146f\n"
+ "st1 { v7.4s }, [x12], #0x10\n"
+ "st1 { v12.4s }, [x12], #0x10\n"
+ "st1 { v8.4s }, [x24], #0x10\n"
+ "st1 { v9.4s }, [x24], #0x10\n"
+ "st1 { v15.4s }, [x23], #0x10\n"
+ "st1 { v20.4s }, [x23], #0x10\n"
+ "st1 { v16.4s }, [x22], #0x10\n"
+ "st1 { v17.4s }, [x22], #0x10\n"
+ "tbz x13, #2, 144f\n"
+ "st1 { v13.4s }, [x12], #0x10\n"
+ "st1 { v10.4s }, [x24], #0x10\n"
+ "st1 { v21.4s }, [x23], #0x10\n"
+ "st1 { v18.4s }, [x22], #0x10\n"
+ "tbz x13, #1, 143f\n"
+ "str d14, [x12], #0x8\n"
+ "str d11, [x24], #0x8\n"
+ "str d22, [x23], #0x8\n"
+ "str d19, [x22], #0x8\n"
+ "tbz x13, #0, 150f\n"
+ "st1 { v14.s }[2], [x12]\n"
+ "st1 { v11.s }[2], [x24]\n"
+ "st1 { v22.s }[2], [x23]\n"
+ "st1 { v19.s }[2], [x22]\n"
"b 150f\n"
"143:" // Height 4: Partial direct writeback: partial_1_12
- "tbz x14, #0, 150f\n"
- "str s14, [x13, #0x0]\n"
- "str s11, [x25, #0x0]\n"
- "str s22, [x24, #0x0]\n"
- "str s19, [x23, #0x0]\n"
+ "tbz x13, #0, 150f\n"
+ "str s14, [x12, #0x0]\n"
+ "str s11, [x24, #0x0]\n"
+ "str s22, [x23, #0x0]\n"
+ "str s19, [x22, #0x0]\n"
"b 150f\n"
"144:" // Height 4: Partial direct writeback: partial_2_8
- "tbz x14, #1, 145f\n"
- "str d13, [x13], #0x8\n"
- "str d10, [x25], #0x8\n"
- "str d21, [x24], #0x8\n"
- "str d18, [x23], #0x8\n"
- "tbz x14, #0, 150f\n"
- "st1 { v13.s }[2], [x13]\n"
- "st1 { v10.s }[2], [x25]\n"
- "st1 { v21.s }[2], [x24]\n"
- "st1 { v18.s }[2], [x23]\n"
+ "tbz x13, #1, 145f\n"
+ "str d13, [x12], #0x8\n"
+ "str d10, [x24], #0x8\n"
+ "str d21, [x23], #0x8\n"
+ "str d18, [x22], #0x8\n"
+ "tbz x13, #0, 150f\n"
+ "st1 { v13.s }[2], [x12]\n"
+ "st1 { v10.s }[2], [x24]\n"
+ "st1 { v21.s }[2], [x23]\n"
+ "st1 { v18.s }[2], [x22]\n"
"b 150f\n"
"145:" // Height 4: Partial direct writeback: partial_1_8
- "tbz x14, #0, 150f\n"
- "str s13, [x13, #0x0]\n"
- "str s10, [x25, #0x0]\n"
- "str s21, [x24, #0x0]\n"
- "str s18, [x23, #0x0]\n"
+ "tbz x13, #0, 150f\n"
+ "str s13, [x12, #0x0]\n"
+ "str s10, [x24, #0x0]\n"
+ "str s21, [x23, #0x0]\n"
+ "str s18, [x22, #0x0]\n"
"b 150f\n"
"146:" // Height 4: Partial direct writeback: partial_4_0
- "tbz x14, #2, 148f\n"
- "st1 { v7.4s }, [x13], #0x10\n"
- "st1 { v8.4s }, [x25], #0x10\n"
- "st1 { v15.4s }, [x24], #0x10\n"
- "st1 { v16.4s }, [x23], #0x10\n"
- "tbz x14, #1, 147f\n"
- "str d12, [x13], #0x8\n"
- "str d9, [x25], #0x8\n"
- "str d20, [x24], #0x8\n"
- "str d17, [x23], #0x8\n"
- "tbz x14, #0, 150f\n"
- "st1 { v12.s }[2], [x13]\n"
- "st1 { v9.s }[2], [x25]\n"
- "st1 { v20.s }[2], [x24]\n"
- "st1 { v17.s }[2], [x23]\n"
+ "tbz x13, #2, 148f\n"
+ "st1 { v7.4s }, [x12], #0x10\n"
+ "st1 { v8.4s }, [x24], #0x10\n"
+ "st1 { v15.4s }, [x23], #0x10\n"
+ "st1 { v16.4s }, [x22], #0x10\n"
+ "tbz x13, #1, 147f\n"
+ "str d12, [x12], #0x8\n"
+ "str d9, [x24], #0x8\n"
+ "str d20, [x23], #0x8\n"
+ "str d17, [x22], #0x8\n"
+ "tbz x13, #0, 150f\n"
+ "st1 { v12.s }[2], [x12]\n"
+ "st1 { v9.s }[2], [x24]\n"
+ "st1 { v20.s }[2], [x23]\n"
+ "st1 { v17.s }[2], [x22]\n"
"b 150f\n"
"147:" // Height 4: Partial direct writeback: partial_1_4
- "tbz x14, #0, 150f\n"
- "str s12, [x13, #0x0]\n"
- "str s9, [x25, #0x0]\n"
- "str s20, [x24, #0x0]\n"
- "str s17, [x23, #0x0]\n"
+ "tbz x13, #0, 150f\n"
+ "str s12, [x12, #0x0]\n"
+ "str s9, [x24, #0x0]\n"
+ "str s20, [x23, #0x0]\n"
+ "str s17, [x22, #0x0]\n"
"b 150f\n"
"148:" // Height 4: Partial direct writeback: partial_2_0
- "tbz x14, #1, 149f\n"
- "str d7, [x13], #0x8\n"
- "str d8, [x25], #0x8\n"
- "str d15, [x24], #0x8\n"
- "str d16, [x23], #0x8\n"
- "tbz x14, #0, 150f\n"
- "st1 { v7.s }[2], [x13]\n"
- "st1 { v8.s }[2], [x25]\n"
- "st1 { v15.s }[2], [x24]\n"
- "st1 { v16.s }[2], [x23]\n"
+ "tbz x13, #1, 149f\n"
+ "str d7, [x12], #0x8\n"
+ "str d8, [x24], #0x8\n"
+ "str d15, [x23], #0x8\n"
+ "str d16, [x22], #0x8\n"
+ "tbz x13, #0, 150f\n"
+ "st1 { v7.s }[2], [x12]\n"
+ "st1 { v8.s }[2], [x24]\n"
+ "st1 { v15.s }[2], [x23]\n"
+ "st1 { v16.s }[2], [x22]\n"
"b 150f\n"
"149:" // Height 4: Partial direct writeback: partial_1_0
- "str s7, [x13, #0x0]\n"
- "str s8, [x25, #0x0]\n"
- "str s15, [x24, #0x0]\n"
- "str s16, [x23, #0x0]\n"
+ "str s7, [x12, #0x0]\n"
+ "str s8, [x24, #0x0]\n"
+ "str s15, [x23, #0x0]\n"
+ "str s16, [x22, #0x0]\n"
"150:" // Height 4: Partial direct writeback: Done
"b 152f\n"
"151:" // Height 4: Full writeback
- "str q7, [x13, #0x0]\n"
- "str q12, [x13, #0x10]\n"
- "str q13, [x13, #0x20]\n"
- "str q14, [x13, #0x30]\n"
- "add x13, x13, #0x40\n"
- "str q8, [x25, #0x0]\n"
- "str q9, [x25, #0x10]\n"
- "str q10, [x25, #0x20]\n"
- "str q11, [x25, #0x30]\n"
- "str q15, [x24, #0x0]\n"
- "str q20, [x24, #0x10]\n"
- "str q21, [x24, #0x20]\n"
- "str q22, [x24, #0x30]\n"
- "str q16, [x23, #0x0]\n"
- "str q17, [x23, #0x10]\n"
- "str q18, [x23, #0x20]\n"
- "str q19, [x23, #0x30]\n"
+ "str q7, [x12, #0x0]\n"
+ "str q12, [x12, #0x10]\n"
+ "str q13, [x12, #0x20]\n"
+ "str q14, [x12, #0x30]\n"
+ "add x12, x12, #0x40\n"
+ "str q8, [x24, #0x0]\n"
+ "str q9, [x24, #0x10]\n"
+ "str q10, [x24, #0x20]\n"
+ "str q11, [x24, #0x30]\n"
+ "str q15, [x23, #0x0]\n"
+ "str q20, [x23, #0x10]\n"
+ "str q21, [x23, #0x20]\n"
+ "str q22, [x23, #0x30]\n"
+ "str q16, [x22, #0x0]\n"
+ "str q17, [x22, #0x10]\n"
+ "str q18, [x22, #0x20]\n"
+ "str q19, [x22, #0x30]\n"
"152:" // Height 4: Writeback done
- "subs x14, x14, #0x10\n"
+ "subs x13, x13, #0x10\n"
"bgt 116b\n"
"b 230f\n"
"153:" // Height 5
- "ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x15, %x[bias]\n"
- "ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x13, %x[output_ptr]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x14, %x[bias]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x12, %x[output_ptr]\n"
"154:" // Height 5: Column loop
- "ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
- "add x11, x12, x20, LSL #1\n"
- "add x10, x11, x20, LSL #1\n"
- "add x9, x10, x20, LSL #1\n"
- "add x20, x9, x20, LSL #1\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "cmp x14, #0xc\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #1\n"
+ "add x9, x10, x19, LSL #1\n"
+ "add x28, x9, x19, LSL #1\n"
+ "add x19, x28, x19, LSL #1\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "cmp x13, #0xc\n"
"bgt 155f\n"
- "cmp x14, #0x8\n"
- "mov x9, x12\n"
+ "cmp x13, #0x8\n"
+ "mov x28, x11\n"
"bgt 155f\n"
- "cmp x14, #0x4\n"
- "mov x10, x12\n"
+ "cmp x13, #0x4\n"
+ "mov x9, x11\n"
"bgt 155f\n"
- "mov x11, x12\n"
+ "mov x10, x11\n"
"155:" // Height 5: B setup done
- "cbz x15, 156f\n"
- "ldr q8, [x15, #0x0]\n"
- "ldr q9, [x15, #0x10]\n"
+ "cbz x14, 156f\n"
+ "ldr q8, [x14, #0x0]\n"
+ "ldr q9, [x14, #0x10]\n"
"zip2 v12.2d, v8.2d, v8.2d\n"
"zip1 v8.2d, v8.2d, v8.2d\n"
- "ldr q10, [x15, #0x20]\n"
- "ldr q11, [x15, #0x30]\n"
+ "ldr q10, [x14, #0x20]\n"
+ "ldr q11, [x14, #0x30]\n"
"zip2 v13.2d, v9.2d, v9.2d\n"
"zip1 v9.2d, v9.2d, v9.2d\n"
"zip2 v14.2d, v10.2d, v10.2d\n"
"zip1 v10.2d, v10.2d, v10.2d\n"
- "add x15, x15, #0x40\n"
+ "add x14, x14, #0x40\n"
"zip2 v15.2d, v11.2d, v11.2d\n"
"zip1 v11.2d, v11.2d, v11.2d\n"
"mov v16.16b, v8.16b\n"
@@ -2197,153 +2197,153 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
"b 168f\n"
"156:" // Height 5: no bias
"tbz %x[flags], #0, 167f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "cmp x14, #0x10\n"
- "add x22, x23, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "cmp x13, #0x10\n"
+ "add x21, x22, x19, LSL #2\n"
"bge 165f\n"
- "tbz x14, #3, 160f\n"
- "ld1 { v9.4s }, [x13], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v17.4s }, [x24], #0x10\n"
- "ld1 { v20.4s }, [x23], #0x10\n"
- "ld1 { v25.4s }, [x22], #0x10\n"
- "ld1 { v10.4s }, [x13], #0x10\n"
- "ld1 { v13.4s }, [x25], #0x10\n"
- "ld1 { v18.4s }, [x24], #0x10\n"
- "ld1 { v21.4s }, [x23], #0x10\n"
- "ld1 { v26.4s }, [x22], #0x10\n"
- "tbz x14, #2, 158f\n"
- "ld1 { v11.4s }, [x13], #0x10\n"
- "ld1 { v14.4s }, [x25], #0x10\n"
- "ld1 { v19.4s }, [x24], #0x10\n"
- "ld1 { v22.4s }, [x23], #0x10\n"
- "ld1 { v27.4s }, [x22], #0x10\n"
- "tbz x14, #1, 157f\n"
- "ldr d16, [x13], #0x8\n"
- "ldr d15, [x25], #0x8\n"
- "mov x20, #0x38\n"
- "ldr d24, [x24], #0x8\n"
- "ldr d23, [x23], #0x8\n"
- "ldr d6, [x22], #0x8\n"
- "tbz x14, #0, 164f\n"
- "ld1 { v16.s }[2], [x13]\n"
- "ld1 { v15.s }[2], [x25]\n"
- "ld1 { v24.s }[2], [x24]\n"
- "ld1 { v23.s }[2], [x23]\n"
- "ld1 { v6.s }[2], [x22]\n"
+ "tbz x13, #3, 160f\n"
+ "ld1 { v9.4s }, [x12], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
+ "ld1 { v17.4s }, [x23], #0x10\n"
+ "ld1 { v20.4s }, [x22], #0x10\n"
+ "ld1 { v25.4s }, [x21], #0x10\n"
+ "ld1 { v10.4s }, [x12], #0x10\n"
+ "ld1 { v13.4s }, [x24], #0x10\n"
+ "ld1 { v18.4s }, [x23], #0x10\n"
+ "ld1 { v21.4s }, [x22], #0x10\n"
+ "ld1 { v26.4s }, [x21], #0x10\n"
+ "tbz x13, #2, 158f\n"
+ "ld1 { v11.4s }, [x12], #0x10\n"
+ "ld1 { v14.4s }, [x24], #0x10\n"
+ "ld1 { v19.4s }, [x23], #0x10\n"
+ "ld1 { v22.4s }, [x22], #0x10\n"
+ "ld1 { v27.4s }, [x21], #0x10\n"
+ "tbz x13, #1, 157f\n"
+ "ldr d16, [x12], #0x8\n"
+ "ldr d15, [x24], #0x8\n"
+ "mov x19, #0x38\n"
+ "ldr d24, [x23], #0x8\n"
+ "ldr d23, [x22], #0x8\n"
+ "ldr d6, [x21], #0x8\n"
+ "tbz x13, #0, 164f\n"
+ "ld1 { v16.s }[2], [x12]\n"
+ "ld1 { v15.s }[2], [x24]\n"
+ "ld1 { v24.s }[2], [x23]\n"
+ "ld1 { v23.s }[2], [x22]\n"
+ "ld1 { v6.s }[2], [x21]\n"
"b 164f\n"
"157:" // Height 5: Partial accumulate: partial_1_12
- "mov x20, #0x30\n"
- "tbz x14, #0, 164f\n"
- "ldr s16, [x13, #0x0]\n"
- "ldr s15, [x25, #0x0]\n"
- "ldr s24, [x24, #0x0]\n"
- "ldr s23, [x23, #0x0]\n"
- "ldr s6, [x22, #0x0]\n"
+ "mov x19, #0x30\n"
+ "tbz x13, #0, 164f\n"
+ "ldr s16, [x12, #0x0]\n"
+ "ldr s15, [x24, #0x0]\n"
+ "ldr s24, [x23, #0x0]\n"
+ "ldr s23, [x22, #0x0]\n"
+ "ldr s6, [x21, #0x0]\n"
"b 164f\n"
"158:" // Height 5: Partial accumulate: partial_2_8
- "tbz x14, #1, 159f\n"
- "ldr d11, [x13], #0x8\n"
- "ldr d14, [x25], #0x8\n"
- "mov x20, #0x28\n"
- "ldr d19, [x24], #0x8\n"
- "ldr d22, [x23], #0x8\n"
- "ldr d27, [x22], #0x8\n"
- "tbz x14, #0, 164f\n"
- "ld1 { v11.s }[2], [x13]\n"
- "ld1 { v14.s }[2], [x25]\n"
- "ld1 { v19.s }[2], [x24]\n"
- "ld1 { v22.s }[2], [x23]\n"
- "ld1 { v27.s }[2], [x22]\n"
+ "tbz x13, #1, 159f\n"
+ "ldr d11, [x12], #0x8\n"
+ "ldr d14, [x24], #0x8\n"
+ "mov x19, #0x28\n"
+ "ldr d19, [x23], #0x8\n"
+ "ldr d22, [x22], #0x8\n"
+ "ldr d27, [x21], #0x8\n"
+ "tbz x13, #0, 164f\n"
+ "ld1 { v11.s }[2], [x12]\n"
+ "ld1 { v14.s }[2], [x24]\n"
+ "ld1 { v19.s }[2], [x23]\n"
+ "ld1 { v22.s }[2], [x22]\n"
+ "ld1 { v27.s }[2], [x21]\n"
"b 164f\n"
"159:" // Height 5: Partial accumulate: partial_1_8
- "mov x20, #0x20\n"
- "tbz x14, #0, 164f\n"
- "ldr s11, [x13, #0x0]\n"
- "ldr s14, [x25, #0x0]\n"
- "ldr s19, [x24, #0x0]\n"
- "ldr s22, [x23, #0x0]\n"
- "ldr s27, [x22, #0x0]\n"
+ "mov x19, #0x20\n"
+ "tbz x13, #0, 164f\n"
+ "ldr s11, [x12, #0x0]\n"
+ "ldr s14, [x24, #0x0]\n"
+ "ldr s19, [x23, #0x0]\n"
+ "ldr s22, [x22, #0x0]\n"
+ "ldr s27, [x21, #0x0]\n"
"b 164f\n"
"160:" // Height 5: Partial accumulate: partial_4_0
- "tbz x14, #2, 162f\n"
- "ld1 { v9.4s }, [x13], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v17.4s }, [x24], #0x10\n"
- "ld1 { v20.4s }, [x23], #0x10\n"
- "ld1 { v25.4s }, [x22], #0x10\n"
- "tbz x14, #1, 161f\n"
- "ldr d10, [x13], #0x8\n"
- "ldr d13, [x25], #0x8\n"
- "mov x20, #0x18\n"
- "ldr d18, [x24], #0x8\n"
- "ldr d21, [x23], #0x8\n"
- "ldr d26, [x22], #0x8\n"
- "tbz x14, #0, 164f\n"
- "ld1 { v10.s }[2], [x13]\n"
- "ld1 { v13.s }[2], [x25]\n"
- "ld1 { v18.s }[2], [x24]\n"
- "ld1 { v21.s }[2], [x23]\n"
- "ld1 { v26.s }[2], [x22]\n"
+ "tbz x13, #2, 162f\n"
+ "ld1 { v9.4s }, [x12], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
+ "ld1 { v17.4s }, [x23], #0x10\n"
+ "ld1 { v20.4s }, [x22], #0x10\n"
+ "ld1 { v25.4s }, [x21], #0x10\n"
+ "tbz x13, #1, 161f\n"
+ "ldr d10, [x12], #0x8\n"
+ "ldr d13, [x24], #0x8\n"
+ "mov x19, #0x18\n"
+ "ldr d18, [x23], #0x8\n"
+ "ldr d21, [x22], #0x8\n"
+ "ldr d26, [x21], #0x8\n"
+ "tbz x13, #0, 164f\n"
+ "ld1 { v10.s }[2], [x12]\n"
+ "ld1 { v13.s }[2], [x24]\n"
+ "ld1 { v18.s }[2], [x23]\n"
+ "ld1 { v21.s }[2], [x22]\n"
+ "ld1 { v26.s }[2], [x21]\n"
"b 164f\n"
"161:" // Height 5: Partial accumulate: partial_1_4
- "mov x20, #0x10\n"
- "tbz x14, #0, 164f\n"
- "ldr s10, [x13, #0x0]\n"
- "ldr s13, [x25, #0x0]\n"
- "ldr s18, [x24, #0x0]\n"
- "ldr s21, [x23, #0x0]\n"
- "ldr s26, [x22, #0x0]\n"
+ "mov x19, #0x10\n"
+ "tbz x13, #0, 164f\n"
+ "ldr s10, [x12, #0x0]\n"
+ "ldr s13, [x24, #0x0]\n"
+ "ldr s18, [x23, #0x0]\n"
+ "ldr s21, [x22, #0x0]\n"
+ "ldr s26, [x21, #0x0]\n"
"b 164f\n"
"162:" // Height 5: Partial accumulate: partial_2_0
- "tbz x14, #1, 163f\n"
- "ldr d9, [x13], #0x8\n"
- "ldr d12, [x25], #0x8\n"
- "mov x20, #0x8\n"
- "ldr d17, [x24], #0x8\n"
- "ldr d20, [x23], #0x8\n"
- "ldr d25, [x22], #0x8\n"
- "tbz x14, #0, 164f\n"
- "ld1 { v9.s }[2], [x13]\n"
- "ld1 { v12.s }[2], [x25]\n"
- "ld1 { v17.s }[2], [x24]\n"
- "ld1 { v20.s }[2], [x23]\n"
- "ld1 { v25.s }[2], [x22]\n"
+ "tbz x13, #1, 163f\n"
+ "ldr d9, [x12], #0x8\n"
+ "ldr d12, [x24], #0x8\n"
+ "mov x19, #0x8\n"
+ "ldr d17, [x23], #0x8\n"
+ "ldr d20, [x22], #0x8\n"
+ "ldr d25, [x21], #0x8\n"
+ "tbz x13, #0, 164f\n"
+ "ld1 { v9.s }[2], [x12]\n"
+ "ld1 { v12.s }[2], [x24]\n"
+ "ld1 { v17.s }[2], [x23]\n"
+ "ld1 { v20.s }[2], [x22]\n"
+ "ld1 { v25.s }[2], [x21]\n"
"b 164f\n"
"163:" // Height 5: Partial accumulate: partial_1_0
- "ldr s9, [x13, #0x0]\n"
- "ldr s12, [x25, #0x0]\n"
- "mov x20, #0x0\n"
- "ldr s17, [x24, #0x0]\n"
- "ldr s20, [x23, #0x0]\n"
- "ldr s25, [x22, #0x0]\n"
+ "ldr s9, [x12, #0x0]\n"
+ "ldr s12, [x24, #0x0]\n"
+ "mov x19, #0x0\n"
+ "ldr s17, [x23, #0x0]\n"
+ "ldr s20, [x22, #0x0]\n"
+ "ldr s25, [x21, #0x0]\n"
"164:" // Height 5: Partial accumulate: Done
- "sub x13, x13, x20\n"
+ "sub x12, x12, x19\n"
"b 166f\n"
"165:" // Height 5: full accumulate
- "ldr q9, [x13, #0x0]\n"
- "ldr q10, [x13, #0x10]\n"
- "ldr q11, [x13, #0x20]\n"
- "ldr q16, [x13, #0x30]\n"
- "ldr q12, [x25, #0x0]\n"
- "ldr q13, [x25, #0x10]\n"
- "ldr q14, [x25, #0x20]\n"
- "ldr q15, [x25, #0x30]\n"
- "ldr q17, [x24, #0x0]\n"
- "ldr q18, [x24, #0x10]\n"
- "ldr q19, [x24, #0x20]\n"
- "ldr q24, [x24, #0x30]\n"
- "ldr q20, [x23, #0x0]\n"
- "ldr q21, [x23, #0x10]\n"
- "ldr q22, [x23, #0x20]\n"
- "ldr q23, [x23, #0x30]\n"
- "ldr q25, [x22, #0x0]\n"
- "ldr q26, [x22, #0x10]\n"
- "ldr q27, [x22, #0x20]\n"
- "ldr q6, [x22, #0x30]\n"
+ "ldr q9, [x12, #0x0]\n"
+ "ldr q10, [x12, #0x10]\n"
+ "ldr q11, [x12, #0x20]\n"
+ "ldr q16, [x12, #0x30]\n"
+ "ldr q12, [x24, #0x0]\n"
+ "ldr q13, [x24, #0x10]\n"
+ "ldr q14, [x24, #0x20]\n"
+ "ldr q15, [x24, #0x30]\n"
+ "ldr q17, [x23, #0x0]\n"
+ "ldr q18, [x23, #0x10]\n"
+ "ldr q19, [x23, #0x20]\n"
+ "ldr q24, [x23, #0x30]\n"
+ "ldr q20, [x22, #0x0]\n"
+ "ldr q21, [x22, #0x10]\n"
+ "ldr q22, [x22, #0x20]\n"
+ "ldr q23, [x22, #0x30]\n"
+ "ldr q25, [x21, #0x0]\n"
+ "ldr q26, [x21, #0x10]\n"
+ "ldr q27, [x21, #0x20]\n"
+ "ldr q6, [x21, #0x30]\n"
"166:" // Height 5: MMLA fixup
"zip1 v8.2d, v9.2d, v12.2d\n"
"zip2 v12.2d, v9.2d, v12.2d\n"
@@ -2396,43 +2396,43 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
"movi v30.16b, #0x0\n"
"movi v31.16b, #0x0\n"
"168:" // Height 5: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"169:" // Height 5: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 170f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "ldr x22, [x21, #0x20]\n"
- "cbnz x28, 171f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #1\n"
- "add x25, x25, x20, LSL #1\n"
- "add x24, x24, x20, LSL #1\n"
- "add x23, x23, x20, LSL #1\n"
- "add x22, x22, x20, LSL #1\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "ldr x21, [x20, #0x20]\n"
+ "cbnz x27, 171f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
+ "add x24, x24, x19, LSL #1\n"
+ "add x23, x23, x19, LSL #1\n"
+ "add x22, x22, x19, LSL #1\n"
+ "add x21, x21, x19, LSL #1\n"
"b 171f\n"
"170:" // Height 5: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
- "add x23, x24, x20, LSL #1\n"
- "add x22, x23, x20, LSL #1\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "add x22, x23, x19, LSL #1\n"
+ "add x21, x22, x19, LSL #1\n"
"171:" // Height 5: input setup done
- "cmp x27, #0x8\n"
+ "cmp x26, #0x8\n"
"blt 174f\n"
- "ldr q1, [x26, #0x0]\n"
- "ldr q2, [x25, #0x0]\n"
- "cmp x27, #0x10\n"
- "ldr q3, [x24, #0x0]\n"
- "ldr q4, [x23, #0x0]\n"
- "ldr q5, [x22, #0x0]\n"
- "ldr q7, [x12, #0x0]\n"
+ "ldr q1, [x25, #0x0]\n"
+ "ldr q2, [x24, #0x0]\n"
+ "cmp x26, #0x10\n"
+ "ldr q3, [x23, #0x0]\n"
+ "ldr q4, [x22, #0x0]\n"
+ "ldr q5, [x21, #0x0]\n"
+ "ldr q7, [x11, #0x0]\n"
"blt 173f\n"
"172:" // Height 5: Multiply loop: Main loop head
"trn1 v0.2d, v1.2d, v2.2d\n"
@@ -2441,86 +2441,86 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
"trn1 v2.2d, v3.2d, v4.2d\n"
"trn2 v3.2d, v3.2d, v4.2d\n"
".inst 0x6e47ec50 // bfmmla v16.4s, v2.8h, v7.8h\n"
- "sub x27, x27, #0x8\n"
+ "sub x26, x26, #0x8\n"
"trn1 v4.2d, v5.2d, v6.2d\n"
"trn2 v5.2d, v5.2d, v6.2d\n"
- "ldr q6, [x12, #0x10]\n"
+ "ldr q6, [x11, #0x10]\n"
".inst 0x6e47ec98 // bfmmla v24.4s, v4.8h, v7.8h\n"
- "ldr q7, [x11, #0x0]\n"
+ "ldr q7, [x10, #0x0]\n"
".inst 0x6e46ec0c // bfmmla v12.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec54 // bfmmla v20.4s, v2.8h, v6.8h\n"
- "cmp x27, #0x10\n"
+ "cmp x26, #0x10\n"
".inst 0x6e46ec9c // bfmmla v28.4s, v4.8h, v6.8h\n"
- "ldr q6, [x11, #0x10]\n"
+ "ldr q6, [x10, #0x10]\n"
".inst 0x6e47ec09 // bfmmla v9.4s, v0.8h, v7.8h\n"
- "add x26, x26, #0x10\n"
+ "add x25, x25, #0x10\n"
".inst 0x6e47ec51 // bfmmla v17.4s, v2.8h, v7.8h\n"
".inst 0x6e47ec99 // bfmmla v25.4s, v4.8h, v7.8h\n"
- "ldr q7, [x10, #0x0]\n"
- "add x25, x25, #0x10\n"
+ "ldr q7, [x9, #0x0]\n"
+ "add x24, x24, #0x10\n"
".inst 0x6e46ec0d // bfmmla v13.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec55 // bfmmla v21.4s, v2.8h, v6.8h\n"
- "add x24, x24, #0x10\n"
"add x23, x23, #0x10\n"
+ "add x22, x22, #0x10\n"
".inst 0x6e46ec9d // bfmmla v29.4s, v4.8h, v6.8h\n"
- "ldr q6, [x10, #0x10]\n"
+ "ldr q6, [x9, #0x10]\n"
".inst 0x6e47ec0a // bfmmla v10.4s, v0.8h, v7.8h\n"
- "add x22, x22, #0x10\n"
+ "add x21, x21, #0x10\n"
".inst 0x6e47ec52 // bfmmla v18.4s, v2.8h, v7.8h\n"
".inst 0x6e47ec9a // bfmmla v26.4s, v4.8h, v7.8h\n"
- "ldr q7, [x9, #0x0]\n"
+ "ldr q7, [x28, #0x0]\n"
".inst 0x6e46ec0e // bfmmla v14.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec56 // bfmmla v22.4s, v2.8h, v6.8h\n"
".inst 0x6e46ec9e // bfmmla v30.4s, v4.8h, v6.8h\n"
- "ldr q6, [x9, #0x10]\n"
+ "ldr q6, [x28, #0x10]\n"
".inst 0x6e47ec0b // bfmmla v11.4s, v0.8h, v7.8h\n"
".inst 0x6e47ec53 // bfmmla v19.4s, v2.8h, v7.8h\n"
".inst 0x6e47ec9b // bfmmla v27.4s, v4.8h, v7.8h\n"
- "ldr q7, [x12, #0x20]\n"
+ "ldr q7, [x11, #0x20]\n"
".inst 0x6e46ec0f // bfmmla v15.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec57 // bfmmla v23.4s, v2.8h, v6.8h\n"
- "ldr q2, [x25, #0x0]\n"
+ "ldr q2, [x24, #0x0]\n"
".inst 0x6e46ec9f // bfmmla v31.4s, v4.8h, v6.8h\n"
- "ldr q6, [x12, #0x30]\n"
- "ldr q4, [x23, #0x0]\n"
+ "ldr q6, [x11, #0x30]\n"
+ "ldr q4, [x22, #0x0]\n"
".inst 0x6e47ec28 // bfmmla v8.4s, v1.8h, v7.8h\n"
".inst 0x6e47ec70 // bfmmla v16.4s, v3.8h, v7.8h\n"
".inst 0x6e47ecb8 // bfmmla v24.4s, v5.8h, v7.8h\n"
- "ldr q7, [x11, #0x20]\n"
- "add x12, x12, #0x40\n"
+ "ldr q7, [x10, #0x20]\n"
+ "add x11, x11, #0x40\n"
".inst 0x6e46ec2c // bfmmla v12.4s, v1.8h, v6.8h\n"
".inst 0x6e46ec74 // bfmmla v20.4s, v3.8h, v6.8h\n"
".inst 0x6e46ecbc // bfmmla v28.4s, v5.8h, v6.8h\n"
- "ldr q6, [x11, #0x30]\n"
+ "ldr q6, [x10, #0x30]\n"
".inst 0x6e47ec29 // bfmmla v9.4s, v1.8h, v7.8h\n"
- "add x11, x11, #0x40\n"
+ "add x10, x10, #0x40\n"
".inst 0x6e47ec71 // bfmmla v17.4s, v3.8h, v7.8h\n"
".inst 0x6e47ecb9 // bfmmla v25.4s, v5.8h, v7.8h\n"
- "ldr q7, [x10, #0x20]\n"
+ "ldr q7, [x9, #0x20]\n"
".inst 0x6e46ec2d // bfmmla v13.4s, v1.8h, v6.8h\n"
".inst 0x6e46ec75 // bfmmla v21.4s, v3.8h, v6.8h\n"
".inst 0x6e46ecbd // bfmmla v29.4s, v5.8h, v6.8h\n"
- "ldr q6, [x10, #0x30]\n"
+ "ldr q6, [x9, #0x30]\n"
".inst 0x6e47ec2a // bfmmla v10.4s, v1.8h, v7.8h\n"
- "add x10, x10, #0x40\n"
+ "add x9, x9, #0x40\n"
".inst 0x6e47ec72 // bfmmla v18.4s, v3.8h, v7.8h\n"
".inst 0x6e47ecba // bfmmla v26.4s, v5.8h, v7.8h\n"
- "ldr q7, [x9, #0x20]\n"
+ "ldr q7, [x28, #0x20]\n"
".inst 0x6e46ec2e // bfmmla v14.4s, v1.8h, v6.8h\n"
".inst 0x6e46ec76 // bfmmla v22.4s, v3.8h, v6.8h\n"
".inst 0x6e46ecbe // bfmmla v30.4s, v5.8h, v6.8h\n"
- "ldr q6, [x9, #0x30]\n"
- "add x9, x9, #0x40\n"
+ "ldr q6, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
".inst 0x6e47ec2b // bfmmla v11.4s, v1.8h, v7.8h\n"
".inst 0x6e47ec73 // bfmmla v19.4s, v3.8h, v7.8h\n"
".inst 0x6e47ecbb // bfmmla v27.4s, v5.8h, v7.8h\n"
- "ldr q7, [x12, #0x0]\n"
+ "ldr q7, [x11, #0x0]\n"
".inst 0x6e46ec2f // bfmmla v15.4s, v1.8h, v6.8h\n"
- "ldr q1, [x26, #0x0]\n"
+ "ldr q1, [x25, #0x0]\n"
".inst 0x6e46ec77 // bfmmla v23.4s, v3.8h, v6.8h\n"
- "ldr q3, [x24, #0x0]\n"
+ "ldr q3, [x23, #0x0]\n"
".inst 0x6e46ecbf // bfmmla v31.4s, v5.8h, v6.8h\n"
- "ldr q5, [x22, #0x0]\n"
+ "ldr q5, [x21, #0x0]\n"
"bge 172b\n"
"173:" // Height 5: Multiply loop: Single iteration only
"trn1 v0.2d, v1.2d, v2.2d\n"
@@ -2529,73 +2529,73 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
"trn1 v2.2d, v3.2d, v4.2d\n"
"trn2 v3.2d, v3.2d, v4.2d\n"
".inst 0x6e47ec50 // bfmmla v16.4s, v2.8h, v7.8h\n"
- "sub x27, x27, #0x8\n"
+ "sub x26, x26, #0x8\n"
"trn1 v4.2d, v5.2d, v6.2d\n"
"trn2 v5.2d, v5.2d, v6.2d\n"
- "ldr q6, [x12, #0x10]\n"
+ "ldr q6, [x11, #0x10]\n"
".inst 0x6e47ec98 // bfmmla v24.4s, v4.8h, v7.8h\n"
- "ldr q7, [x11, #0x0]\n"
+ "ldr q7, [x10, #0x0]\n"
".inst 0x6e46ec0c // bfmmla v12.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec54 // bfmmla v20.4s, v2.8h, v6.8h\n"
- "add x26, x26, #0x10\n"
+ "add x25, x25, #0x10\n"
".inst 0x6e46ec9c // bfmmla v28.4s, v4.8h, v6.8h\n"
- "ldr q6, [x11, #0x10]\n"
+ "ldr q6, [x10, #0x10]\n"
".inst 0x6e47ec09 // bfmmla v9.4s, v0.8h, v7.8h\n"
- "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
".inst 0x6e47ec51 // bfmmla v17.4s, v2.8h, v7.8h\n"
".inst 0x6e47ec99 // bfmmla v25.4s, v4.8h, v7.8h\n"
- "ldr q7, [x10, #0x0]\n"
- "add x24, x24, #0x10\n"
+ "ldr q7, [x9, #0x0]\n"
+ "add x23, x23, #0x10\n"
".inst 0x6e46ec0d // bfmmla v13.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec55 // bfmmla v21.4s, v2.8h, v6.8h\n"
- "add x23, x23, #0x10\n"
"add x22, x22, #0x10\n"
+ "add x21, x21, #0x10\n"
".inst 0x6e46ec9d // bfmmla v29.4s, v4.8h, v6.8h\n"
- "ldr q6, [x10, #0x10]\n"
+ "ldr q6, [x9, #0x10]\n"
".inst 0x6e47ec0a // bfmmla v10.4s, v0.8h, v7.8h\n"
".inst 0x6e47ec52 // bfmmla v18.4s, v2.8h, v7.8h\n"
".inst 0x6e47ec9a // bfmmla v26.4s, v4.8h, v7.8h\n"
- "ldr q7, [x9, #0x0]\n"
+ "ldr q7, [x28, #0x0]\n"
".inst 0x6e46ec0e // bfmmla v14.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec56 // bfmmla v22.4s, v2.8h, v6.8h\n"
".inst 0x6e46ec9e // bfmmla v30.4s, v4.8h, v6.8h\n"
- "ldr q6, [x9, #0x10]\n"
+ "ldr q6, [x28, #0x10]\n"
".inst 0x6e47ec0b // bfmmla v11.4s, v0.8h, v7.8h\n"
".inst 0x6e47ec53 // bfmmla v19.4s, v2.8h, v7.8h\n"
".inst 0x6e47ec9b // bfmmla v27.4s, v4.8h, v7.8h\n"
- "ldr q7, [x12, #0x20]\n"
+ "ldr q7, [x11, #0x20]\n"
".inst 0x6e46ec0f // bfmmla v15.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec57 // bfmmla v23.4s, v2.8h, v6.8h\n"
".inst 0x6e46ec9f // bfmmla v31.4s, v4.8h, v6.8h\n"
- "ldr q6, [x12, #0x30]\n"
+ "ldr q6, [x11, #0x30]\n"
".inst 0x6e47ec28 // bfmmla v8.4s, v1.8h, v7.8h\n"
- "add x12, x12, #0x40\n"
+ "add x11, x11, #0x40\n"
".inst 0x6e47ec70 // bfmmla v16.4s, v3.8h, v7.8h\n"
".inst 0x6e47ecb8 // bfmmla v24.4s, v5.8h, v7.8h\n"
- "ldr q7, [x11, #0x20]\n"
+ "ldr q7, [x10, #0x20]\n"
".inst 0x6e46ec2c // bfmmla v12.4s, v1.8h, v6.8h\n"
".inst 0x6e46ec74 // bfmmla v20.4s, v3.8h, v6.8h\n"
".inst 0x6e46ecbc // bfmmla v28.4s, v5.8h, v6.8h\n"
- "ldr q6, [x11, #0x30]\n"
+ "ldr q6, [x10, #0x30]\n"
".inst 0x6e47ec29 // bfmmla v9.4s, v1.8h, v7.8h\n"
- "add x11, x11, #0x40\n"
+ "add x10, x10, #0x40\n"
".inst 0x6e47ec71 // bfmmla v17.4s, v3.8h, v7.8h\n"
".inst 0x6e47ecb9 // bfmmla v25.4s, v5.8h, v7.8h\n"
- "ldr q7, [x10, #0x20]\n"
+ "ldr q7, [x9, #0x20]\n"
".inst 0x6e46ec2d // bfmmla v13.4s, v1.8h, v6.8h\n"
".inst 0x6e46ec75 // bfmmla v21.4s, v3.8h, v6.8h\n"
".inst 0x6e46ecbd // bfmmla v29.4s, v5.8h, v6.8h\n"
- "ldr q6, [x10, #0x30]\n"
+ "ldr q6, [x9, #0x30]\n"
".inst 0x6e47ec2a // bfmmla v10.4s, v1.8h, v7.8h\n"
- "add x10, x10, #0x40\n"
+ "add x9, x9, #0x40\n"
".inst 0x6e47ec72 // bfmmla v18.4s, v3.8h, v7.8h\n"
".inst 0x6e47ecba // bfmmla v26.4s, v5.8h, v7.8h\n"
- "ldr q7, [x9, #0x20]\n"
+ "ldr q7, [x28, #0x20]\n"
".inst 0x6e46ec2e // bfmmla v14.4s, v1.8h, v6.8h\n"
".inst 0x6e46ec76 // bfmmla v22.4s, v3.8h, v6.8h\n"
".inst 0x6e46ecbe // bfmmla v30.4s, v5.8h, v6.8h\n"
- "ldr q6, [x9, #0x30]\n"
- "add x9, x9, #0x40\n"
+ "ldr q6, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
".inst 0x6e47ec2b // bfmmla v11.4s, v1.8h, v7.8h\n"
".inst 0x6e47ec73 // bfmmla v19.4s, v3.8h, v7.8h\n"
".inst 0x6e47ecbb // bfmmla v27.4s, v5.8h, v7.8h\n"
@@ -2603,51 +2603,51 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
".inst 0x6e46ec77 // bfmmla v23.4s, v3.8h, v6.8h\n"
".inst 0x6e46ecbf // bfmmla v31.4s, v5.8h, v6.8h\n"
"174:" // Height 5: Multiply loop: Main loop skip
- "cbz x27, 179f\n"
- "cmp x27, #0x4\n"
+ "cbz x26, 179f\n"
+ "cmp x26, #0x4\n"
"blt 176f\n"
"175:" // Height 5: Multiply loop: Odd block loop
- "ldr d1, [x26], #0x8\n"
- "ldr d2, [x25], #0x8\n"
+ "ldr d1, [x25], #0x8\n"
+ "ldr d2, [x24], #0x8\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
- "ldr d3, [x24], #0x8\n"
- "ldr d4, [x23], #0x8\n"
+ "ldr d3, [x23], #0x8\n"
+ "ldr d4, [x22], #0x8\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
- "sub x27, x27, #0x4\n"
- "ldr d5, [x22], #0x8\n"
- "ldr q6, [x12, #0x0]\n"
+ "sub x26, x26, #0x4\n"
+ "ldr d5, [x21], #0x8\n"
+ "ldr q6, [x11, #0x0]\n"
"trn1 v4.2d, v5.2d, v7.2d\n"
".inst 0x6e46ec08 // bfmmla v8.4s, v0.8h, v6.8h\n"
- "ldr q7, [x12, #0x10]\n"
+ "ldr q7, [x11, #0x10]\n"
".inst 0x6e46ec50 // bfmmla v16.4s, v2.8h, v6.8h\n"
".inst 0x6e46ec98 // bfmmla v24.4s, v4.8h, v6.8h\n"
- "ldr q6, [x11, #0x0]\n"
+ "ldr q6, [x10, #0x0]\n"
".inst 0x6e47ec0c // bfmmla v12.4s, v0.8h, v7.8h\n"
".inst 0x6e47ec54 // bfmmla v20.4s, v2.8h, v7.8h\n"
- "cmp x27, #0x4\n"
- "add x12, x12, #0x20\n"
+ "cmp x26, #0x4\n"
+ "add x11, x11, #0x20\n"
".inst 0x6e47ec9c // bfmmla v28.4s, v4.8h, v7.8h\n"
- "ldr q7, [x11, #0x10]\n"
+ "ldr q7, [x10, #0x10]\n"
".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n"
- "add x11, x11, #0x20\n"
+ "add x10, x10, #0x20\n"
".inst 0x6e46ec51 // bfmmla v17.4s, v2.8h, v6.8h\n"
".inst 0x6e46ec99 // bfmmla v25.4s, v4.8h, v6.8h\n"
- "ldr q6, [x10, #0x0]\n"
+ "ldr q6, [x9, #0x0]\n"
".inst 0x6e47ec0d // bfmmla v13.4s, v0.8h, v7.8h\n"
".inst 0x6e47ec55 // bfmmla v21.4s, v2.8h, v7.8h\n"
".inst 0x6e47ec9d // bfmmla v29.4s, v4.8h, v7.8h\n"
- "ldr q7, [x10, #0x10]\n"
+ "ldr q7, [x9, #0x10]\n"
".inst 0x6e46ec0a // bfmmla v10.4s, v0.8h, v6.8h\n"
- "add x10, x10, #0x20\n"
+ "add x9, x9, #0x20\n"
".inst 0x6e46ec52 // bfmmla v18.4s, v2.8h, v6.8h\n"
".inst 0x6e46ec9a // bfmmla v26.4s, v4.8h, v6.8h\n"
- "ldr q6, [x9, #0x0]\n"
+ "ldr q6, [x28, #0x0]\n"
".inst 0x6e47ec0e // bfmmla v14.4s, v0.8h, v7.8h\n"
".inst 0x6e47ec56 // bfmmla v22.4s, v2.8h, v7.8h\n"
".inst 0x6e47ec9e // bfmmla v30.4s, v4.8h, v7.8h\n"
- "ldr q7, [x9, #0x10]\n"
+ "ldr q7, [x28, #0x10]\n"
".inst 0x6e46ec0b // bfmmla v11.4s, v0.8h, v6.8h\n"
- "add x9, x9, #0x20\n"
+ "add x28, x28, #0x20\n"
".inst 0x6e46ec53 // bfmmla v19.4s, v2.8h, v6.8h\n"
".inst 0x6e46ec9b // bfmmla v27.4s, v4.8h, v6.8h\n"
".inst 0x6e47ec0f // bfmmla v15.4s, v0.8h, v7.8h\n"
@@ -2655,60 +2655,60 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
".inst 0x6e47ec9f // bfmmla v31.4s, v4.8h, v7.8h\n"
"bge 175b\n"
"176:" // Height 5: Multiply loop: Skip odd blocks
- "cbz x27, 179f\n"
- "tbz x27, #1, 177f\n"
- "ldr s1, [x26], #0x4\n"
- "ldr s2, [x25], #0x4\n"
- "ldr s3, [x24], #0x4\n"
- "ldr s4, [x23], #0x4\n"
- "ldr s5, [x22], #0x4\n"
- "tbz x27, #0, 178f\n"
- "ld1 { v1.h }[2], [x26]\n"
- "ld1 { v2.h }[2], [x25]\n"
- "ld1 { v3.h }[2], [x24]\n"
- "ld1 { v4.h }[2], [x23]\n"
- "ld1 { v5.h }[2], [x22]\n"
+ "cbz x26, 179f\n"
+ "tbz x26, #1, 177f\n"
+ "ldr s1, [x25], #0x4\n"
+ "ldr s2, [x24], #0x4\n"
+ "ldr s3, [x23], #0x4\n"
+ "ldr s4, [x22], #0x4\n"
+ "ldr s5, [x21], #0x4\n"
+ "tbz x26, #0, 178f\n"
+ "ld1 { v1.h }[2], [x25]\n"
+ "ld1 { v2.h }[2], [x24]\n"
+ "ld1 { v3.h }[2], [x23]\n"
+ "ld1 { v4.h }[2], [x22]\n"
+ "ld1 { v5.h }[2], [x21]\n"
"b 178f\n"
"177:" // Height 5: Multiply loop: Ragged operand read: partial_1_0
- "ldr h1, [x26, #0x0]\n"
- "ldr h2, [x25, #0x0]\n"
- "ldr h3, [x24, #0x0]\n"
- "ldr h4, [x23, #0x0]\n"
- "ldr h5, [x22, #0x0]\n"
+ "ldr h1, [x25, #0x0]\n"
+ "ldr h2, [x24, #0x0]\n"
+ "ldr h3, [x23, #0x0]\n"
+ "ldr h4, [x22, #0x0]\n"
+ "ldr h5, [x21, #0x0]\n"
"178:" // Height 5: Multiply loop: Ragged operand read: Done
- "ldr q7, [x12, #0x0]\n"
+ "ldr q7, [x11, #0x0]\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
"trn1 v4.2d, v5.2d, v6.2d\n"
- "ldr q6, [x12, #0x10]\n"
+ "ldr q6, [x11, #0x10]\n"
".inst 0x6e47ec08 // bfmmla v8.4s, v0.8h, v7.8h\n"
".inst 0x6e47ec50 // bfmmla v16.4s, v2.8h, v7.8h\n"
".inst 0x6e47ec98 // bfmmla v24.4s, v4.8h, v7.8h\n"
- "ldr q7, [x11, #0x0]\n"
+ "ldr q7, [x10, #0x0]\n"
".inst 0x6e46ec0c // bfmmla v12.4s, v0.8h, v6.8h\n"
- "add x12, x12, #0x20\n"
+ "add x11, x11, #0x20\n"
".inst 0x6e46ec54 // bfmmla v20.4s, v2.8h, v6.8h\n"
".inst 0x6e46ec9c // bfmmla v28.4s, v4.8h, v6.8h\n"
- "ldr q6, [x11, #0x10]\n"
- "add x11, x11, #0x20\n"
+ "ldr q6, [x10, #0x10]\n"
+ "add x10, x10, #0x20\n"
".inst 0x6e47ec09 // bfmmla v9.4s, v0.8h, v7.8h\n"
".inst 0x6e47ec51 // bfmmla v17.4s, v2.8h, v7.8h\n"
".inst 0x6e47ec99 // bfmmla v25.4s, v4.8h, v7.8h\n"
- "ldr q7, [x10, #0x0]\n"
+ "ldr q7, [x9, #0x0]\n"
".inst 0x6e46ec0d // bfmmla v13.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec55 // bfmmla v21.4s, v2.8h, v6.8h\n"
".inst 0x6e46ec9d // bfmmla v29.4s, v4.8h, v6.8h\n"
- "ldr q6, [x10, #0x10]\n"
- "add x10, x10, #0x20\n"
+ "ldr q6, [x9, #0x10]\n"
+ "add x9, x9, #0x20\n"
".inst 0x6e47ec0a // bfmmla v10.4s, v0.8h, v7.8h\n"
".inst 0x6e47ec52 // bfmmla v18.4s, v2.8h, v7.8h\n"
".inst 0x6e47ec9a // bfmmla v26.4s, v4.8h, v7.8h\n"
- "ldr q7, [x9, #0x0]\n"
+ "ldr q7, [x28, #0x0]\n"
".inst 0x6e46ec0e // bfmmla v14.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec56 // bfmmla v22.4s, v2.8h, v6.8h\n"
".inst 0x6e46ec9e // bfmmla v30.4s, v4.8h, v6.8h\n"
- "ldr q6, [x9, #0x10]\n"
- "add x9, x9, #0x20\n"
+ "ldr q6, [x28, #0x10]\n"
+ "add x28, x28, #0x20\n"
".inst 0x6e47ec0b // bfmmla v11.4s, v0.8h, v7.8h\n"
".inst 0x6e47ec53 // bfmmla v19.4s, v2.8h, v7.8h\n"
".inst 0x6e47ec9b // bfmmla v27.4s, v4.8h, v7.8h\n"
@@ -2716,18 +2716,18 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
".inst 0x6e46ec57 // bfmmla v23.4s, v2.8h, v6.8h\n"
".inst 0x6e46ec9f // bfmmla v31.4s, v4.8h, v6.8h\n"
"179:" // Height 5: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 169b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
"uzp1 v7.2d, v8.2d, v12.2d\n"
- "add x23, x24, x20, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
"uzp2 v8.2d, v8.2d, v12.2d\n"
"uzp1 v12.2d, v9.2d, v13.2d\n"
- "add x22, x23, x20, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
"uzp2 v9.2d, v9.2d, v13.2d\n"
"uzp1 v13.2d, v10.2d, v14.2d\n"
"uzp2 v10.2d, v10.2d, v14.2d\n"
@@ -2746,10 +2746,10 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
"uzp1 v26.2d, v26.2d, v30.2d\n"
"uzp1 v27.2d, v27.2d, v31.2d\n"
"tbz %x[flags], #1, 180f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v1.4s }, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1r { v0.4s }, [x20]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v1.4s }, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v0.4s }, [x19]\n"
"fmin v7.4s, v7.4s, v1.4s\n"
"fmin v12.4s, v12.4s, v1.4s\n"
"fmin v13.4s, v13.4s, v1.4s\n"
@@ -2791,183 +2791,183 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
"fmax v26.4s, v26.4s, v0.4s\n"
"fmax v27.4s, v27.4s, v0.4s\n"
"180:" // Height 5: No activation
- "cmp x14, #0x10\n"
+ "cmp x13, #0x10\n"
"bge 189f\n"
- "tbz x14, #3, 184f\n"
- "st1 { v7.4s }, [x13], #0x10\n"
- "st1 { v12.4s }, [x13], #0x10\n"
- "st1 { v8.4s }, [x25], #0x10\n"
- "st1 { v9.4s }, [x25], #0x10\n"
- "st1 { v15.4s }, [x24], #0x10\n"
- "st1 { v20.4s }, [x24], #0x10\n"
- "st1 { v16.4s }, [x23], #0x10\n"
- "st1 { v17.4s }, [x23], #0x10\n"
- "st1 { v24.4s }, [x22], #0x10\n"
- "st1 { v25.4s }, [x22], #0x10\n"
- "tbz x14, #2, 182f\n"
- "st1 { v13.4s }, [x13], #0x10\n"
- "st1 { v10.4s }, [x25], #0x10\n"
- "st1 { v21.4s }, [x24], #0x10\n"
- "st1 { v18.4s }, [x23], #0x10\n"
- "st1 { v26.4s }, [x22], #0x10\n"
- "tbz x14, #1, 181f\n"
- "str d14, [x13], #0x8\n"
- "str d11, [x25], #0x8\n"
- "str d22, [x24], #0x8\n"
- "str d19, [x23], #0x8\n"
- "str d27, [x22], #0x8\n"
- "tbz x14, #0, 188f\n"
- "st1 { v14.s }[2], [x13]\n"
- "st1 { v11.s }[2], [x25]\n"
- "st1 { v22.s }[2], [x24]\n"
- "st1 { v19.s }[2], [x23]\n"
- "st1 { v27.s }[2], [x22]\n"
+ "tbz x13, #3, 184f\n"
+ "st1 { v7.4s }, [x12], #0x10\n"
+ "st1 { v12.4s }, [x12], #0x10\n"
+ "st1 { v8.4s }, [x24], #0x10\n"
+ "st1 { v9.4s }, [x24], #0x10\n"
+ "st1 { v15.4s }, [x23], #0x10\n"
+ "st1 { v20.4s }, [x23], #0x10\n"
+ "st1 { v16.4s }, [x22], #0x10\n"
+ "st1 { v17.4s }, [x22], #0x10\n"
+ "st1 { v24.4s }, [x21], #0x10\n"
+ "st1 { v25.4s }, [x21], #0x10\n"
+ "tbz x13, #2, 182f\n"
+ "st1 { v13.4s }, [x12], #0x10\n"
+ "st1 { v10.4s }, [x24], #0x10\n"
+ "st1 { v21.4s }, [x23], #0x10\n"
+ "st1 { v18.4s }, [x22], #0x10\n"
+ "st1 { v26.4s }, [x21], #0x10\n"
+ "tbz x13, #1, 181f\n"
+ "str d14, [x12], #0x8\n"
+ "str d11, [x24], #0x8\n"
+ "str d22, [x23], #0x8\n"
+ "str d19, [x22], #0x8\n"
+ "str d27, [x21], #0x8\n"
+ "tbz x13, #0, 188f\n"
+ "st1 { v14.s }[2], [x12]\n"
+ "st1 { v11.s }[2], [x24]\n"
+ "st1 { v22.s }[2], [x23]\n"
+ "st1 { v19.s }[2], [x22]\n"
+ "st1 { v27.s }[2], [x21]\n"
"b 188f\n"
"181:" // Height 5: Partial direct writeback: partial_1_12
- "tbz x14, #0, 188f\n"
- "str s14, [x13, #0x0]\n"
- "str s11, [x25, #0x0]\n"
- "str s22, [x24, #0x0]\n"
- "str s19, [x23, #0x0]\n"
- "str s27, [x22, #0x0]\n"
+ "tbz x13, #0, 188f\n"
+ "str s14, [x12, #0x0]\n"
+ "str s11, [x24, #0x0]\n"
+ "str s22, [x23, #0x0]\n"
+ "str s19, [x22, #0x0]\n"
+ "str s27, [x21, #0x0]\n"
"b 188f\n"
"182:" // Height 5: Partial direct writeback: partial_2_8
- "tbz x14, #1, 183f\n"
- "str d13, [x13], #0x8\n"
- "str d10, [x25], #0x8\n"
- "str d21, [x24], #0x8\n"
- "str d18, [x23], #0x8\n"
- "str d26, [x22], #0x8\n"
- "tbz x14, #0, 188f\n"
- "st1 { v13.s }[2], [x13]\n"
- "st1 { v10.s }[2], [x25]\n"
- "st1 { v21.s }[2], [x24]\n"
- "st1 { v18.s }[2], [x23]\n"
- "st1 { v26.s }[2], [x22]\n"
+ "tbz x13, #1, 183f\n"
+ "str d13, [x12], #0x8\n"
+ "str d10, [x24], #0x8\n"
+ "str d21, [x23], #0x8\n"
+ "str d18, [x22], #0x8\n"
+ "str d26, [x21], #0x8\n"
+ "tbz x13, #0, 188f\n"
+ "st1 { v13.s }[2], [x12]\n"
+ "st1 { v10.s }[2], [x24]\n"
+ "st1 { v21.s }[2], [x23]\n"
+ "st1 { v18.s }[2], [x22]\n"
+ "st1 { v26.s }[2], [x21]\n"
"b 188f\n"
"183:" // Height 5: Partial direct writeback: partial_1_8
- "tbz x14, #0, 188f\n"
- "str s13, [x13, #0x0]\n"
- "str s10, [x25, #0x0]\n"
- "str s21, [x24, #0x0]\n"
- "str s18, [x23, #0x0]\n"
- "str s26, [x22, #0x0]\n"
+ "tbz x13, #0, 188f\n"
+ "str s13, [x12, #0x0]\n"
+ "str s10, [x24, #0x0]\n"
+ "str s21, [x23, #0x0]\n"
+ "str s18, [x22, #0x0]\n"
+ "str s26, [x21, #0x0]\n"
"b 188f\n"
"184:" // Height 5: Partial direct writeback: partial_4_0
- "tbz x14, #2, 186f\n"
- "st1 { v7.4s }, [x13], #0x10\n"
- "st1 { v8.4s }, [x25], #0x10\n"
- "st1 { v15.4s }, [x24], #0x10\n"
- "st1 { v16.4s }, [x23], #0x10\n"
- "st1 { v24.4s }, [x22], #0x10\n"
- "tbz x14, #1, 185f\n"
- "str d12, [x13], #0x8\n"
- "str d9, [x25], #0x8\n"
- "str d20, [x24], #0x8\n"
- "str d17, [x23], #0x8\n"
- "str d25, [x22], #0x8\n"
- "tbz x14, #0, 188f\n"
- "st1 { v12.s }[2], [x13]\n"
- "st1 { v9.s }[2], [x25]\n"
- "st1 { v20.s }[2], [x24]\n"
- "st1 { v17.s }[2], [x23]\n"
- "st1 { v25.s }[2], [x22]\n"
+ "tbz x13, #2, 186f\n"
+ "st1 { v7.4s }, [x12], #0x10\n"
+ "st1 { v8.4s }, [x24], #0x10\n"
+ "st1 { v15.4s }, [x23], #0x10\n"
+ "st1 { v16.4s }, [x22], #0x10\n"
+ "st1 { v24.4s }, [x21], #0x10\n"
+ "tbz x13, #1, 185f\n"
+ "str d12, [x12], #0x8\n"
+ "str d9, [x24], #0x8\n"
+ "str d20, [x23], #0x8\n"
+ "str d17, [x22], #0x8\n"
+ "str d25, [x21], #0x8\n"
+ "tbz x13, #0, 188f\n"
+ "st1 { v12.s }[2], [x12]\n"
+ "st1 { v9.s }[2], [x24]\n"
+ "st1 { v20.s }[2], [x23]\n"
+ "st1 { v17.s }[2], [x22]\n"
+ "st1 { v25.s }[2], [x21]\n"
"b 188f\n"
"185:" // Height 5: Partial direct writeback: partial_1_4
- "tbz x14, #0, 188f\n"
- "str s12, [x13, #0x0]\n"
- "str s9, [x25, #0x0]\n"
- "str s20, [x24, #0x0]\n"
- "str s17, [x23, #0x0]\n"
- "str s25, [x22, #0x0]\n"
+ "tbz x13, #0, 188f\n"
+ "str s12, [x12, #0x0]\n"
+ "str s9, [x24, #0x0]\n"
+ "str s20, [x23, #0x0]\n"
+ "str s17, [x22, #0x0]\n"
+ "str s25, [x21, #0x0]\n"
"b 188f\n"
"186:" // Height 5: Partial direct writeback: partial_2_0
- "tbz x14, #1, 187f\n"
- "str d7, [x13], #0x8\n"
- "str d8, [x25], #0x8\n"
- "str d15, [x24], #0x8\n"
- "str d16, [x23], #0x8\n"
- "str d24, [x22], #0x8\n"
- "tbz x14, #0, 188f\n"
- "st1 { v7.s }[2], [x13]\n"
- "st1 { v8.s }[2], [x25]\n"
- "st1 { v15.s }[2], [x24]\n"
- "st1 { v16.s }[2], [x23]\n"
- "st1 { v24.s }[2], [x22]\n"
+ "tbz x13, #1, 187f\n"
+ "str d7, [x12], #0x8\n"
+ "str d8, [x24], #0x8\n"
+ "str d15, [x23], #0x8\n"
+ "str d16, [x22], #0x8\n"
+ "str d24, [x21], #0x8\n"
+ "tbz x13, #0, 188f\n"
+ "st1 { v7.s }[2], [x12]\n"
+ "st1 { v8.s }[2], [x24]\n"
+ "st1 { v15.s }[2], [x23]\n"
+ "st1 { v16.s }[2], [x22]\n"
+ "st1 { v24.s }[2], [x21]\n"
"b 188f\n"
"187:" // Height 5: Partial direct writeback: partial_1_0
- "str s7, [x13, #0x0]\n"
- "str s8, [x25, #0x0]\n"
- "str s15, [x24, #0x0]\n"
- "str s16, [x23, #0x0]\n"
- "str s24, [x22, #0x0]\n"
+ "str s7, [x12, #0x0]\n"
+ "str s8, [x24, #0x0]\n"
+ "str s15, [x23, #0x0]\n"
+ "str s16, [x22, #0x0]\n"
+ "str s24, [x21, #0x0]\n"
"188:" // Height 5: Partial direct writeback: Done
"b 190f\n"
"189:" // Height 5: Full writeback
- "str q7, [x13, #0x0]\n"
- "str q12, [x13, #0x10]\n"
- "str q13, [x13, #0x20]\n"
- "str q14, [x13, #0x30]\n"
- "add x13, x13, #0x40\n"
- "str q8, [x25, #0x0]\n"
- "str q9, [x25, #0x10]\n"
- "str q10, [x25, #0x20]\n"
- "str q11, [x25, #0x30]\n"
- "str q15, [x24, #0x0]\n"
- "str q20, [x24, #0x10]\n"
- "str q21, [x24, #0x20]\n"
- "str q22, [x24, #0x30]\n"
- "str q16, [x23, #0x0]\n"
- "str q17, [x23, #0x10]\n"
- "str q18, [x23, #0x20]\n"
- "str q19, [x23, #0x30]\n"
- "str q24, [x22, #0x0]\n"
- "str q25, [x22, #0x10]\n"
- "str q26, [x22, #0x20]\n"
- "str q27, [x22, #0x30]\n"
+ "str q7, [x12, #0x0]\n"
+ "str q12, [x12, #0x10]\n"
+ "str q13, [x12, #0x20]\n"
+ "str q14, [x12, #0x30]\n"
+ "add x12, x12, #0x40\n"
+ "str q8, [x24, #0x0]\n"
+ "str q9, [x24, #0x10]\n"
+ "str q10, [x24, #0x20]\n"
+ "str q11, [x24, #0x30]\n"
+ "str q15, [x23, #0x0]\n"
+ "str q20, [x23, #0x10]\n"
+ "str q21, [x23, #0x20]\n"
+ "str q22, [x23, #0x30]\n"
+ "str q16, [x22, #0x0]\n"
+ "str q17, [x22, #0x10]\n"
+ "str q18, [x22, #0x20]\n"
+ "str q19, [x22, #0x30]\n"
+ "str q24, [x21, #0x0]\n"
+ "str q25, [x21, #0x10]\n"
+ "str q26, [x21, #0x20]\n"
+ "str q27, [x21, #0x30]\n"
"190:" // Height 5: Writeback done
- "subs x14, x14, #0x10\n"
+ "subs x13, x13, #0x10\n"
"bgt 154b\n"
"b 230f\n"
"191:" // Height 6
- "ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x21, #0x18\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "mov x15, %x[bias]\n"
- "mov x13, %x[output_ptr]\n"
- "madd %x[output_ptr], x20, x21, %x[output_ptr]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x20, #0x18\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "mov x14, %x[bias]\n"
+ "mov x12, %x[output_ptr]\n"
+ "madd %x[output_ptr], x19, x20, %x[output_ptr]\n"
"192:" // Height 6: Column loop
- "ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
- "add x11, x12, x20, LSL #1\n"
- "add x10, x11, x20, LSL #1\n"
- "add x9, x10, x20, LSL #1\n"
- "add x20, x9, x20, LSL #1\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "cmp x14, #0xc\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #1\n"
+ "add x9, x10, x19, LSL #1\n"
+ "add x28, x9, x19, LSL #1\n"
+ "add x19, x28, x19, LSL #1\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "cmp x13, #0xc\n"
"bgt 193f\n"
- "cmp x14, #0x8\n"
- "mov x9, x12\n"
+ "cmp x13, #0x8\n"
+ "mov x28, x11\n"
"bgt 193f\n"
- "cmp x14, #0x4\n"
- "mov x10, x12\n"
+ "cmp x13, #0x4\n"
+ "mov x9, x11\n"
"bgt 193f\n"
- "mov x11, x12\n"
+ "mov x10, x11\n"
"193:" // Height 6: B setup done
- "cbz x15, 194f\n"
- "ldr q8, [x15, #0x0]\n"
- "ldr q9, [x15, #0x10]\n"
+ "cbz x14, 194f\n"
+ "ldr q8, [x14, #0x0]\n"
+ "ldr q9, [x14, #0x10]\n"
"zip2 v12.2d, v8.2d, v8.2d\n"
"zip1 v8.2d, v8.2d, v8.2d\n"
- "ldr q10, [x15, #0x20]\n"
- "ldr q11, [x15, #0x30]\n"
+ "ldr q10, [x14, #0x20]\n"
+ "ldr q11, [x14, #0x30]\n"
"zip2 v13.2d, v9.2d, v9.2d\n"
"zip1 v9.2d, v9.2d, v9.2d\n"
"zip2 v14.2d, v10.2d, v10.2d\n"
"zip1 v10.2d, v10.2d, v10.2d\n"
- "add x15, x15, #0x40\n"
+ "add x14, x14, #0x40\n"
"zip2 v15.2d, v11.2d, v11.2d\n"
"zip1 v11.2d, v11.2d, v11.2d\n"
"mov v16.16b, v8.16b\n"
@@ -2989,174 +2989,174 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
"b 206f\n"
"194:" // Height 6: no bias
"tbz %x[flags], #0, 205f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
- "cmp x14, #0x10\n"
- "add x21, x22, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
+ "cmp x13, #0x10\n"
+ "add x20, x21, x19, LSL #2\n"
"bge 203f\n"
- "tbz x14, #3, 198f\n"
- "ld1 { v9.4s }, [x13], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v17.4s }, [x24], #0x10\n"
- "ld1 { v20.4s }, [x23], #0x10\n"
- "ld1 { v25.4s }, [x22], #0x10\n"
- "ld1 { v28.4s }, [x21], #0x10\n"
- "ld1 { v10.4s }, [x13], #0x10\n"
- "ld1 { v13.4s }, [x25], #0x10\n"
- "ld1 { v18.4s }, [x24], #0x10\n"
- "ld1 { v21.4s }, [x23], #0x10\n"
- "ld1 { v26.4s }, [x22], #0x10\n"
- "ld1 { v29.4s }, [x21], #0x10\n"
- "tbz x14, #2, 196f\n"
- "ld1 { v11.4s }, [x13], #0x10\n"
- "ld1 { v14.4s }, [x25], #0x10\n"
- "ld1 { v19.4s }, [x24], #0x10\n"
- "ld1 { v22.4s }, [x23], #0x10\n"
- "ld1 { v27.4s }, [x22], #0x10\n"
- "ld1 { v30.4s }, [x21], #0x10\n"
- "tbz x14, #1, 195f\n"
- "ldr d16, [x13], #0x8\n"
- "ldr d15, [x25], #0x8\n"
- "mov x20, #0x38\n"
- "ldr d24, [x24], #0x8\n"
- "ldr d23, [x23], #0x8\n"
- "ldr d6, [x22], #0x8\n"
- "ldr d31, [x21], #0x8\n"
- "tbz x14, #0, 202f\n"
- "ld1 { v16.s }[2], [x13]\n"
- "ld1 { v15.s }[2], [x25]\n"
- "ld1 { v24.s }[2], [x24]\n"
- "ld1 { v23.s }[2], [x23]\n"
- "ld1 { v6.s }[2], [x22]\n"
- "ld1 { v31.s }[2], [x21]\n"
+ "tbz x13, #3, 198f\n"
+ "ld1 { v9.4s }, [x12], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
+ "ld1 { v17.4s }, [x23], #0x10\n"
+ "ld1 { v20.4s }, [x22], #0x10\n"
+ "ld1 { v25.4s }, [x21], #0x10\n"
+ "ld1 { v28.4s }, [x20], #0x10\n"
+ "ld1 { v10.4s }, [x12], #0x10\n"
+ "ld1 { v13.4s }, [x24], #0x10\n"
+ "ld1 { v18.4s }, [x23], #0x10\n"
+ "ld1 { v21.4s }, [x22], #0x10\n"
+ "ld1 { v26.4s }, [x21], #0x10\n"
+ "ld1 { v29.4s }, [x20], #0x10\n"
+ "tbz x13, #2, 196f\n"
+ "ld1 { v11.4s }, [x12], #0x10\n"
+ "ld1 { v14.4s }, [x24], #0x10\n"
+ "ld1 { v19.4s }, [x23], #0x10\n"
+ "ld1 { v22.4s }, [x22], #0x10\n"
+ "ld1 { v27.4s }, [x21], #0x10\n"
+ "ld1 { v30.4s }, [x20], #0x10\n"
+ "tbz x13, #1, 195f\n"
+ "ldr d16, [x12], #0x8\n"
+ "ldr d15, [x24], #0x8\n"
+ "mov x19, #0x38\n"
+ "ldr d24, [x23], #0x8\n"
+ "ldr d23, [x22], #0x8\n"
+ "ldr d6, [x21], #0x8\n"
+ "ldr d31, [x20], #0x8\n"
+ "tbz x13, #0, 202f\n"
+ "ld1 { v16.s }[2], [x12]\n"
+ "ld1 { v15.s }[2], [x24]\n"
+ "ld1 { v24.s }[2], [x23]\n"
+ "ld1 { v23.s }[2], [x22]\n"
+ "ld1 { v6.s }[2], [x21]\n"
+ "ld1 { v31.s }[2], [x20]\n"
"b 202f\n"
"195:" // Height 6: Partial accumulate: partial_1_12
- "mov x20, #0x30\n"
- "tbz x14, #0, 202f\n"
- "ldr s16, [x13, #0x0]\n"
- "ldr s15, [x25, #0x0]\n"
- "ldr s24, [x24, #0x0]\n"
- "ldr s23, [x23, #0x0]\n"
- "ldr s6, [x22, #0x0]\n"
- "ldr s31, [x21, #0x0]\n"
+ "mov x19, #0x30\n"
+ "tbz x13, #0, 202f\n"
+ "ldr s16, [x12, #0x0]\n"
+ "ldr s15, [x24, #0x0]\n"
+ "ldr s24, [x23, #0x0]\n"
+ "ldr s23, [x22, #0x0]\n"
+ "ldr s6, [x21, #0x0]\n"
+ "ldr s31, [x20, #0x0]\n"
"b 202f\n"
"196:" // Height 6: Partial accumulate: partial_2_8
- "tbz x14, #1, 197f\n"
- "ldr d11, [x13], #0x8\n"
- "ldr d14, [x25], #0x8\n"
- "mov x20, #0x28\n"
- "ldr d19, [x24], #0x8\n"
- "ldr d22, [x23], #0x8\n"
- "ldr d27, [x22], #0x8\n"
- "ldr d30, [x21], #0x8\n"
- "tbz x14, #0, 202f\n"
- "ld1 { v11.s }[2], [x13]\n"
- "ld1 { v14.s }[2], [x25]\n"
- "ld1 { v19.s }[2], [x24]\n"
- "ld1 { v22.s }[2], [x23]\n"
- "ld1 { v27.s }[2], [x22]\n"
- "ld1 { v30.s }[2], [x21]\n"
+ "tbz x13, #1, 197f\n"
+ "ldr d11, [x12], #0x8\n"
+ "ldr d14, [x24], #0x8\n"
+ "mov x19, #0x28\n"
+ "ldr d19, [x23], #0x8\n"
+ "ldr d22, [x22], #0x8\n"
+ "ldr d27, [x21], #0x8\n"
+ "ldr d30, [x20], #0x8\n"
+ "tbz x13, #0, 202f\n"
+ "ld1 { v11.s }[2], [x12]\n"
+ "ld1 { v14.s }[2], [x24]\n"
+ "ld1 { v19.s }[2], [x23]\n"
+ "ld1 { v22.s }[2], [x22]\n"
+ "ld1 { v27.s }[2], [x21]\n"
+ "ld1 { v30.s }[2], [x20]\n"
"b 202f\n"
"197:" // Height 6: Partial accumulate: partial_1_8
- "mov x20, #0x20\n"
- "tbz x14, #0, 202f\n"
- "ldr s11, [x13, #0x0]\n"
- "ldr s14, [x25, #0x0]\n"
- "ldr s19, [x24, #0x0]\n"
- "ldr s22, [x23, #0x0]\n"
- "ldr s27, [x22, #0x0]\n"
- "ldr s30, [x21, #0x0]\n"
+ "mov x19, #0x20\n"
+ "tbz x13, #0, 202f\n"
+ "ldr s11, [x12, #0x0]\n"
+ "ldr s14, [x24, #0x0]\n"
+ "ldr s19, [x23, #0x0]\n"
+ "ldr s22, [x22, #0x0]\n"
+ "ldr s27, [x21, #0x0]\n"
+ "ldr s30, [x20, #0x0]\n"
"b 202f\n"
"198:" // Height 6: Partial accumulate: partial_4_0
- "tbz x14, #2, 200f\n"
- "ld1 { v9.4s }, [x13], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v17.4s }, [x24], #0x10\n"
- "ld1 { v20.4s }, [x23], #0x10\n"
- "ld1 { v25.4s }, [x22], #0x10\n"
- "ld1 { v28.4s }, [x21], #0x10\n"
- "tbz x14, #1, 199f\n"
- "ldr d10, [x13], #0x8\n"
- "ldr d13, [x25], #0x8\n"
- "mov x20, #0x18\n"
- "ldr d18, [x24], #0x8\n"
- "ldr d21, [x23], #0x8\n"
- "ldr d26, [x22], #0x8\n"
- "ldr d29, [x21], #0x8\n"
- "tbz x14, #0, 202f\n"
- "ld1 { v10.s }[2], [x13]\n"
- "ld1 { v13.s }[2], [x25]\n"
- "ld1 { v18.s }[2], [x24]\n"
- "ld1 { v21.s }[2], [x23]\n"
- "ld1 { v26.s }[2], [x22]\n"
- "ld1 { v29.s }[2], [x21]\n"
+ "tbz x13, #2, 200f\n"
+ "ld1 { v9.4s }, [x12], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
+ "ld1 { v17.4s }, [x23], #0x10\n"
+ "ld1 { v20.4s }, [x22], #0x10\n"
+ "ld1 { v25.4s }, [x21], #0x10\n"
+ "ld1 { v28.4s }, [x20], #0x10\n"
+ "tbz x13, #1, 199f\n"
+ "ldr d10, [x12], #0x8\n"
+ "ldr d13, [x24], #0x8\n"
+ "mov x19, #0x18\n"
+ "ldr d18, [x23], #0x8\n"
+ "ldr d21, [x22], #0x8\n"
+ "ldr d26, [x21], #0x8\n"
+ "ldr d29, [x20], #0x8\n"
+ "tbz x13, #0, 202f\n"
+ "ld1 { v10.s }[2], [x12]\n"
+ "ld1 { v13.s }[2], [x24]\n"
+ "ld1 { v18.s }[2], [x23]\n"
+ "ld1 { v21.s }[2], [x22]\n"
+ "ld1 { v26.s }[2], [x21]\n"
+ "ld1 { v29.s }[2], [x20]\n"
"b 202f\n"
"199:" // Height 6: Partial accumulate: partial_1_4
- "mov x20, #0x10\n"
- "tbz x14, #0, 202f\n"
- "ldr s10, [x13, #0x0]\n"
- "ldr s13, [x25, #0x0]\n"
- "ldr s18, [x24, #0x0]\n"
- "ldr s21, [x23, #0x0]\n"
- "ldr s26, [x22, #0x0]\n"
- "ldr s29, [x21, #0x0]\n"
+ "mov x19, #0x10\n"
+ "tbz x13, #0, 202f\n"
+ "ldr s10, [x12, #0x0]\n"
+ "ldr s13, [x24, #0x0]\n"
+ "ldr s18, [x23, #0x0]\n"
+ "ldr s21, [x22, #0x0]\n"
+ "ldr s26, [x21, #0x0]\n"
+ "ldr s29, [x20, #0x0]\n"
"b 202f\n"
"200:" // Height 6: Partial accumulate: partial_2_0
- "tbz x14, #1, 201f\n"
- "ldr d9, [x13], #0x8\n"
- "ldr d12, [x25], #0x8\n"
- "mov x20, #0x8\n"
- "ldr d17, [x24], #0x8\n"
- "ldr d20, [x23], #0x8\n"
- "ldr d25, [x22], #0x8\n"
- "ldr d28, [x21], #0x8\n"
- "tbz x14, #0, 202f\n"
- "ld1 { v9.s }[2], [x13]\n"
- "ld1 { v12.s }[2], [x25]\n"
- "ld1 { v17.s }[2], [x24]\n"
- "ld1 { v20.s }[2], [x23]\n"
- "ld1 { v25.s }[2], [x22]\n"
- "ld1 { v28.s }[2], [x21]\n"
+ "tbz x13, #1, 201f\n"
+ "ldr d9, [x12], #0x8\n"
+ "ldr d12, [x24], #0x8\n"
+ "mov x19, #0x8\n"
+ "ldr d17, [x23], #0x8\n"
+ "ldr d20, [x22], #0x8\n"
+ "ldr d25, [x21], #0x8\n"
+ "ldr d28, [x20], #0x8\n"
+ "tbz x13, #0, 202f\n"
+ "ld1 { v9.s }[2], [x12]\n"
+ "ld1 { v12.s }[2], [x24]\n"
+ "ld1 { v17.s }[2], [x23]\n"
+ "ld1 { v20.s }[2], [x22]\n"
+ "ld1 { v25.s }[2], [x21]\n"
+ "ld1 { v28.s }[2], [x20]\n"
"b 202f\n"
"201:" // Height 6: Partial accumulate: partial_1_0
- "ldr s9, [x13, #0x0]\n"
- "ldr s12, [x25, #0x0]\n"
- "mov x20, #0x0\n"
- "ldr s17, [x24, #0x0]\n"
- "ldr s20, [x23, #0x0]\n"
- "ldr s25, [x22, #0x0]\n"
- "ldr s28, [x21, #0x0]\n"
+ "ldr s9, [x12, #0x0]\n"
+ "ldr s12, [x24, #0x0]\n"
+ "mov x19, #0x0\n"
+ "ldr s17, [x23, #0x0]\n"
+ "ldr s20, [x22, #0x0]\n"
+ "ldr s25, [x21, #0x0]\n"
+ "ldr s28, [x20, #0x0]\n"
"202:" // Height 6: Partial accumulate: Done
- "sub x13, x13, x20\n"
+ "sub x12, x12, x19\n"
"b 204f\n"
"203:" // Height 6: full accumulate
- "ldr q9, [x13, #0x0]\n"
- "ldr q10, [x13, #0x10]\n"
- "ldr q11, [x13, #0x20]\n"
- "ldr q16, [x13, #0x30]\n"
- "ldr q12, [x25, #0x0]\n"
- "ldr q13, [x25, #0x10]\n"
- "ldr q14, [x25, #0x20]\n"
- "ldr q15, [x25, #0x30]\n"
- "ldr q17, [x24, #0x0]\n"
- "ldr q18, [x24, #0x10]\n"
- "ldr q19, [x24, #0x20]\n"
- "ldr q24, [x24, #0x30]\n"
- "ldr q20, [x23, #0x0]\n"
- "ldr q21, [x23, #0x10]\n"
- "ldr q22, [x23, #0x20]\n"
- "ldr q23, [x23, #0x30]\n"
- "ldr q25, [x22, #0x0]\n"
- "ldr q26, [x22, #0x10]\n"
- "ldr q27, [x22, #0x20]\n"
- "ldr q6, [x22, #0x30]\n"
- "ldr q28, [x21, #0x0]\n"
- "ldr q29, [x21, #0x10]\n"
- "ldr q30, [x21, #0x20]\n"
- "ldr q31, [x21, #0x30]\n"
+ "ldr q9, [x12, #0x0]\n"
+ "ldr q10, [x12, #0x10]\n"
+ "ldr q11, [x12, #0x20]\n"
+ "ldr q16, [x12, #0x30]\n"
+ "ldr q12, [x24, #0x0]\n"
+ "ldr q13, [x24, #0x10]\n"
+ "ldr q14, [x24, #0x20]\n"
+ "ldr q15, [x24, #0x30]\n"
+ "ldr q17, [x23, #0x0]\n"
+ "ldr q18, [x23, #0x10]\n"
+ "ldr q19, [x23, #0x20]\n"
+ "ldr q24, [x23, #0x30]\n"
+ "ldr q20, [x22, #0x0]\n"
+ "ldr q21, [x22, #0x10]\n"
+ "ldr q22, [x22, #0x20]\n"
+ "ldr q23, [x22, #0x30]\n"
+ "ldr q25, [x21, #0x0]\n"
+ "ldr q26, [x21, #0x10]\n"
+ "ldr q27, [x21, #0x20]\n"
+ "ldr q6, [x21, #0x30]\n"
+ "ldr q28, [x20, #0x0]\n"
+ "ldr q29, [x20, #0x10]\n"
+ "ldr q30, [x20, #0x20]\n"
+ "ldr q31, [x20, #0x30]\n"
"204:" // Height 6: MMLA fixup
"zip1 v8.2d, v9.2d, v12.2d\n"
"zip2 v12.2d, v9.2d, v12.2d\n"
@@ -3209,213 +3209,213 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
"movi v30.16b, #0x0\n"
"movi v31.16b, #0x0\n"
"206:" // Height 6: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"207:" // Height 6: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 208f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "ldr x22, [x21, #0x20]\n"
- "ldr x21, [x21, #0x28]\n"
- "cbnz x28, 209f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #1\n"
- "add x25, x25, x20, LSL #1\n"
- "add x24, x24, x20, LSL #1\n"
- "add x23, x23, x20, LSL #1\n"
- "add x22, x22, x20, LSL #1\n"
- "add x21, x21, x20, LSL #1\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "ldr x21, [x20, #0x20]\n"
+ "ldr x20, [x20, #0x28]\n"
+ "cbnz x27, 209f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
+ "add x24, x24, x19, LSL #1\n"
+ "add x23, x23, x19, LSL #1\n"
+ "add x22, x22, x19, LSL #1\n"
+ "add x21, x21, x19, LSL #1\n"
+ "add x20, x20, x19, LSL #1\n"
"b 209f\n"
"208:" // Height 6: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
- "add x23, x24, x20, LSL #1\n"
- "add x22, x23, x20, LSL #1\n"
- "add x21, x22, x20, LSL #1\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "add x22, x23, x19, LSL #1\n"
+ "add x21, x22, x19, LSL #1\n"
+ "add x20, x21, x19, LSL #1\n"
"209:" // Height 6: input setup done
- "cmp x27, #0x8\n"
+ "cmp x26, #0x8\n"
"blt 212f\n"
- "ldr q1, [x26, #0x0]\n"
- "ldr q2, [x25, #0x0]\n"
- "cmp x27, #0x10\n"
- "ldr q3, [x24, #0x0]\n"
- "ldr q4, [x23, #0x0]\n"
- "ldr q5, [x22, #0x0]\n"
- "ldr q6, [x21, #0x0]\n"
- "ldr q7, [x12, #0x0]\n"
+ "ldr q1, [x25, #0x0]\n"
+ "ldr q2, [x24, #0x0]\n"
+ "cmp x26, #0x10\n"
+ "ldr q3, [x23, #0x0]\n"
+ "ldr q4, [x22, #0x0]\n"
+ "ldr q5, [x21, #0x0]\n"
+ "ldr q6, [x20, #0x0]\n"
+ "ldr q7, [x11, #0x0]\n"
"blt 211f\n"
"210:" // Height 6: Multiply loop: Main loop head
"trn1 v0.2d, v1.2d, v2.2d\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
".inst 0x6e47ec08 // bfmmla v8.4s, v0.8h, v7.8h\n"
- "sub x27, x27, #0x8\n"
+ "sub x26, x26, #0x8\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
"trn2 v3.2d, v3.2d, v4.2d\n"
".inst 0x6e47ec50 // bfmmla v16.4s, v2.8h, v7.8h\n"
- "cmp x27, #0x10\n"
+ "cmp x26, #0x10\n"
"trn1 v4.2d, v5.2d, v6.2d\n"
"trn2 v5.2d, v5.2d, v6.2d\n"
- "ldr q6, [x12, #0x10]\n"
+ "ldr q6, [x11, #0x10]\n"
".inst 0x6e47ec98 // bfmmla v24.4s, v4.8h, v7.8h\n"
- "ldr q7, [x11, #0x0]\n"
+ "ldr q7, [x10, #0x0]\n"
".inst 0x6e46ec0c // bfmmla v12.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec54 // bfmmla v20.4s, v2.8h, v6.8h\n"
- "add x26, x26, #0x10\n"
+ "add x25, x25, #0x10\n"
".inst 0x6e46ec9c // bfmmla v28.4s, v4.8h, v6.8h\n"
- "ldr q6, [x11, #0x10]\n"
+ "ldr q6, [x10, #0x10]\n"
".inst 0x6e47ec09 // bfmmla v9.4s, v0.8h, v7.8h\n"
- "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
".inst 0x6e47ec51 // bfmmla v17.4s, v2.8h, v7.8h\n"
".inst 0x6e47ec99 // bfmmla v25.4s, v4.8h, v7.8h\n"
- "ldr q7, [x10, #0x0]\n"
- "add x24, x24, #0x10\n"
+ "ldr q7, [x9, #0x0]\n"
+ "add x23, x23, #0x10\n"
".inst 0x6e46ec0d // bfmmla v13.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec55 // bfmmla v21.4s, v2.8h, v6.8h\n"
- "add x23, x23, #0x10\n"
"add x22, x22, #0x10\n"
+ "add x21, x21, #0x10\n"
".inst 0x6e46ec9d // bfmmla v29.4s, v4.8h, v6.8h\n"
- "ldr q6, [x10, #0x10]\n"
+ "ldr q6, [x9, #0x10]\n"
".inst 0x6e47ec0a // bfmmla v10.4s, v0.8h, v7.8h\n"
- "add x21, x21, #0x10\n"
+ "add x20, x20, #0x10\n"
".inst 0x6e47ec52 // bfmmla v18.4s, v2.8h, v7.8h\n"
".inst 0x6e47ec9a // bfmmla v26.4s, v4.8h, v7.8h\n"
- "ldr q7, [x9, #0x0]\n"
+ "ldr q7, [x28, #0x0]\n"
".inst 0x6e46ec0e // bfmmla v14.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec56 // bfmmla v22.4s, v2.8h, v6.8h\n"
".inst 0x6e46ec9e // bfmmla v30.4s, v4.8h, v6.8h\n"
- "ldr q6, [x9, #0x10]\n"
+ "ldr q6, [x28, #0x10]\n"
".inst 0x6e47ec0b // bfmmla v11.4s, v0.8h, v7.8h\n"
".inst 0x6e47ec53 // bfmmla v19.4s, v2.8h, v7.8h\n"
".inst 0x6e47ec9b // bfmmla v27.4s, v4.8h, v7.8h\n"
- "ldr q7, [x12, #0x20]\n"
+ "ldr q7, [x11, #0x20]\n"
".inst 0x6e46ec0f // bfmmla v15.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec57 // bfmmla v23.4s, v2.8h, v6.8h\n"
- "ldr q2, [x25, #0x0]\n"
+ "ldr q2, [x24, #0x0]\n"
".inst 0x6e46ec9f // bfmmla v31.4s, v4.8h, v6.8h\n"
- "ldr q6, [x12, #0x30]\n"
- "ldr q4, [x23, #0x0]\n"
+ "ldr q6, [x11, #0x30]\n"
+ "ldr q4, [x22, #0x0]\n"
".inst 0x6e47ec28 // bfmmla v8.4s, v1.8h, v7.8h\n"
".inst 0x6e47ec70 // bfmmla v16.4s, v3.8h, v7.8h\n"
".inst 0x6e47ecb8 // bfmmla v24.4s, v5.8h, v7.8h\n"
- "ldr q7, [x11, #0x20]\n"
- "add x12, x12, #0x40\n"
+ "ldr q7, [x10, #0x20]\n"
+ "add x11, x11, #0x40\n"
".inst 0x6e46ec2c // bfmmla v12.4s, v1.8h, v6.8h\n"
".inst 0x6e46ec74 // bfmmla v20.4s, v3.8h, v6.8h\n"
".inst 0x6e46ecbc // bfmmla v28.4s, v5.8h, v6.8h\n"
- "ldr q6, [x11, #0x30]\n"
+ "ldr q6, [x10, #0x30]\n"
".inst 0x6e47ec29 // bfmmla v9.4s, v1.8h, v7.8h\n"
- "add x11, x11, #0x40\n"
+ "add x10, x10, #0x40\n"
".inst 0x6e47ec71 // bfmmla v17.4s, v3.8h, v7.8h\n"
".inst 0x6e47ecb9 // bfmmla v25.4s, v5.8h, v7.8h\n"
- "ldr q7, [x10, #0x20]\n"
+ "ldr q7, [x9, #0x20]\n"
".inst 0x6e46ec2d // bfmmla v13.4s, v1.8h, v6.8h\n"
".inst 0x6e46ec75 // bfmmla v21.4s, v3.8h, v6.8h\n"
".inst 0x6e46ecbd // bfmmla v29.4s, v5.8h, v6.8h\n"
- "ldr q6, [x10, #0x30]\n"
+ "ldr q6, [x9, #0x30]\n"
".inst 0x6e47ec2a // bfmmla v10.4s, v1.8h, v7.8h\n"
- "add x10, x10, #0x40\n"
+ "add x9, x9, #0x40\n"
".inst 0x6e47ec72 // bfmmla v18.4s, v3.8h, v7.8h\n"
".inst 0x6e47ecba // bfmmla v26.4s, v5.8h, v7.8h\n"
- "ldr q7, [x9, #0x20]\n"
+ "ldr q7, [x28, #0x20]\n"
".inst 0x6e46ec2e // bfmmla v14.4s, v1.8h, v6.8h\n"
".inst 0x6e46ec76 // bfmmla v22.4s, v3.8h, v6.8h\n"
".inst 0x6e46ecbe // bfmmla v30.4s, v5.8h, v6.8h\n"
- "ldr q6, [x9, #0x30]\n"
- "add x9, x9, #0x40\n"
+ "ldr q6, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
".inst 0x6e47ec2b // bfmmla v11.4s, v1.8h, v7.8h\n"
".inst 0x6e47ec73 // bfmmla v19.4s, v3.8h, v7.8h\n"
".inst 0x6e47ecbb // bfmmla v27.4s, v5.8h, v7.8h\n"
- "ldr q7, [x12, #0x0]\n"
+ "ldr q7, [x11, #0x0]\n"
".inst 0x6e46ec2f // bfmmla v15.4s, v1.8h, v6.8h\n"
- "ldr q1, [x26, #0x0]\n"
+ "ldr q1, [x25, #0x0]\n"
".inst 0x6e46ec77 // bfmmla v23.4s, v3.8h, v6.8h\n"
- "ldr q3, [x24, #0x0]\n"
+ "ldr q3, [x23, #0x0]\n"
".inst 0x6e46ecbf // bfmmla v31.4s, v5.8h, v6.8h\n"
- "ldr q5, [x22, #0x0]\n"
- "ldr q6, [x21, #0x0]\n"
+ "ldr q5, [x21, #0x0]\n"
+ "ldr q6, [x20, #0x0]\n"
"bge 210b\n"
"211:" // Height 6: Multiply loop: Single iteration only
"trn1 v0.2d, v1.2d, v2.2d\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
".inst 0x6e47ec08 // bfmmla v8.4s, v0.8h, v7.8h\n"
- "sub x27, x27, #0x8\n"
+ "sub x26, x26, #0x8\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
"trn2 v3.2d, v3.2d, v4.2d\n"
".inst 0x6e47ec50 // bfmmla v16.4s, v2.8h, v7.8h\n"
- "add x26, x26, #0x10\n"
+ "add x25, x25, #0x10\n"
"trn1 v4.2d, v5.2d, v6.2d\n"
"trn2 v5.2d, v5.2d, v6.2d\n"
- "ldr q6, [x12, #0x10]\n"
+ "ldr q6, [x11, #0x10]\n"
".inst 0x6e47ec98 // bfmmla v24.4s, v4.8h, v7.8h\n"
- "ldr q7, [x11, #0x0]\n"
+ "ldr q7, [x10, #0x0]\n"
".inst 0x6e46ec0c // bfmmla v12.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec54 // bfmmla v20.4s, v2.8h, v6.8h\n"
- "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
".inst 0x6e46ec9c // bfmmla v28.4s, v4.8h, v6.8h\n"
- "ldr q6, [x11, #0x10]\n"
+ "ldr q6, [x10, #0x10]\n"
".inst 0x6e47ec09 // bfmmla v9.4s, v0.8h, v7.8h\n"
- "add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
".inst 0x6e47ec51 // bfmmla v17.4s, v2.8h, v7.8h\n"
".inst 0x6e47ec99 // bfmmla v25.4s, v4.8h, v7.8h\n"
- "ldr q7, [x10, #0x0]\n"
- "add x23, x23, #0x10\n"
+ "ldr q7, [x9, #0x0]\n"
+ "add x22, x22, #0x10\n"
".inst 0x6e46ec0d // bfmmla v13.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec55 // bfmmla v21.4s, v2.8h, v6.8h\n"
- "add x22, x22, #0x10\n"
"add x21, x21, #0x10\n"
+ "add x20, x20, #0x10\n"
".inst 0x6e46ec9d // bfmmla v29.4s, v4.8h, v6.8h\n"
- "ldr q6, [x10, #0x10]\n"
+ "ldr q6, [x9, #0x10]\n"
".inst 0x6e47ec0a // bfmmla v10.4s, v0.8h, v7.8h\n"
".inst 0x6e47ec52 // bfmmla v18.4s, v2.8h, v7.8h\n"
".inst 0x6e47ec9a // bfmmla v26.4s, v4.8h, v7.8h\n"
- "ldr q7, [x9, #0x0]\n"
+ "ldr q7, [x28, #0x0]\n"
".inst 0x6e46ec0e // bfmmla v14.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec56 // bfmmla v22.4s, v2.8h, v6.8h\n"
".inst 0x6e46ec9e // bfmmla v30.4s, v4.8h, v6.8h\n"
- "ldr q6, [x9, #0x10]\n"
+ "ldr q6, [x28, #0x10]\n"
".inst 0x6e47ec0b // bfmmla v11.4s, v0.8h, v7.8h\n"
".inst 0x6e47ec53 // bfmmla v19.4s, v2.8h, v7.8h\n"
".inst 0x6e47ec9b // bfmmla v27.4s, v4.8h, v7.8h\n"
- "ldr q7, [x12, #0x20]\n"
+ "ldr q7, [x11, #0x20]\n"
".inst 0x6e46ec0f // bfmmla v15.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec57 // bfmmla v23.4s, v2.8h, v6.8h\n"
".inst 0x6e46ec9f // bfmmla v31.4s, v4.8h, v6.8h\n"
- "ldr q6, [x12, #0x30]\n"
+ "ldr q6, [x11, #0x30]\n"
".inst 0x6e47ec28 // bfmmla v8.4s, v1.8h, v7.8h\n"
- "add x12, x12, #0x40\n"
+ "add x11, x11, #0x40\n"
".inst 0x6e47ec70 // bfmmla v16.4s, v3.8h, v7.8h\n"
".inst 0x6e47ecb8 // bfmmla v24.4s, v5.8h, v7.8h\n"
- "ldr q7, [x11, #0x20]\n"
+ "ldr q7, [x10, #0x20]\n"
".inst 0x6e46ec2c // bfmmla v12.4s, v1.8h, v6.8h\n"
".inst 0x6e46ec74 // bfmmla v20.4s, v3.8h, v6.8h\n"
".inst 0x6e46ecbc // bfmmla v28.4s, v5.8h, v6.8h\n"
- "ldr q6, [x11, #0x30]\n"
+ "ldr q6, [x10, #0x30]\n"
".inst 0x6e47ec29 // bfmmla v9.4s, v1.8h, v7.8h\n"
- "add x11, x11, #0x40\n"
+ "add x10, x10, #0x40\n"
".inst 0x6e47ec71 // bfmmla v17.4s, v3.8h, v7.8h\n"
".inst 0x6e47ecb9 // bfmmla v25.4s, v5.8h, v7.8h\n"
- "ldr q7, [x10, #0x20]\n"
+ "ldr q7, [x9, #0x20]\n"
".inst 0x6e46ec2d // bfmmla v13.4s, v1.8h, v6.8h\n"
".inst 0x6e46ec75 // bfmmla v21.4s, v3.8h, v6.8h\n"
".inst 0x6e46ecbd // bfmmla v29.4s, v5.8h, v6.8h\n"
- "ldr q6, [x10, #0x30]\n"
+ "ldr q6, [x9, #0x30]\n"
".inst 0x6e47ec2a // bfmmla v10.4s, v1.8h, v7.8h\n"
- "add x10, x10, #0x40\n"
+ "add x9, x9, #0x40\n"
".inst 0x6e47ec72 // bfmmla v18.4s, v3.8h, v7.8h\n"
".inst 0x6e47ecba // bfmmla v26.4s, v5.8h, v7.8h\n"
- "ldr q7, [x9, #0x20]\n"
+ "ldr q7, [x28, #0x20]\n"
".inst 0x6e46ec2e // bfmmla v14.4s, v1.8h, v6.8h\n"
".inst 0x6e46ec76 // bfmmla v22.4s, v3.8h, v6.8h\n"
".inst 0x6e46ecbe // bfmmla v30.4s, v5.8h, v6.8h\n"
- "ldr q6, [x9, #0x30]\n"
- "add x9, x9, #0x40\n"
+ "ldr q6, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
".inst 0x6e47ec2b // bfmmla v11.4s, v1.8h, v7.8h\n"
".inst 0x6e47ec73 // bfmmla v19.4s, v3.8h, v7.8h\n"
".inst 0x6e47ecbb // bfmmla v27.4s, v5.8h, v7.8h\n"
@@ -3423,51 +3423,51 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
".inst 0x6e46ec77 // bfmmla v23.4s, v3.8h, v6.8h\n"
".inst 0x6e46ecbf // bfmmla v31.4s, v5.8h, v6.8h\n"
"212:" // Height 6: Multiply loop: Main loop skip
- "cbz x27, 217f\n"
- "cmp x27, #0x4\n"
+ "cbz x26, 217f\n"
+ "cmp x26, #0x4\n"
"blt 214f\n"
"213:" // Height 6: Multiply loop: Odd block loop
- "ldr d1, [x26], #0x8\n"
- "ldr d2, [x25], #0x8\n"
+ "ldr d1, [x25], #0x8\n"
+ "ldr d2, [x24], #0x8\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
- "sub x27, x27, #0x4\n"
- "ldr d3, [x24], #0x8\n"
- "ldr d4, [x23], #0x8\n"
+ "sub x26, x26, #0x4\n"
+ "ldr d3, [x23], #0x8\n"
+ "ldr d4, [x22], #0x8\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
- "cmp x27, #0x4\n"
- "ldr d5, [x22], #0x8\n"
- "ldr d7, [x21], #0x8\n"
+ "cmp x26, #0x4\n"
+ "ldr d5, [x21], #0x8\n"
+ "ldr d7, [x20], #0x8\n"
"trn1 v4.2d, v5.2d, v7.2d\n"
- "ldr q6, [x12, #0x0]\n"
- "ldr q7, [x12, #0x10]\n"
+ "ldr q6, [x11, #0x0]\n"
+ "ldr q7, [x11, #0x10]\n"
".inst 0x6e46ec08 // bfmmla v8.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec50 // bfmmla v16.4s, v2.8h, v6.8h\n"
".inst 0x6e46ec98 // bfmmla v24.4s, v4.8h, v6.8h\n"
- "ldr q6, [x11, #0x0]\n"
+ "ldr q6, [x10, #0x0]\n"
".inst 0x6e47ec0c // bfmmla v12.4s, v0.8h, v7.8h\n"
- "add x12, x12, #0x20\n"
+ "add x11, x11, #0x20\n"
".inst 0x6e47ec54 // bfmmla v20.4s, v2.8h, v7.8h\n"
".inst 0x6e47ec9c // bfmmla v28.4s, v4.8h, v7.8h\n"
- "ldr q7, [x11, #0x10]\n"
- "add x11, x11, #0x20\n"
+ "ldr q7, [x10, #0x10]\n"
+ "add x10, x10, #0x20\n"
".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec51 // bfmmla v17.4s, v2.8h, v6.8h\n"
".inst 0x6e46ec99 // bfmmla v25.4s, v4.8h, v6.8h\n"
- "ldr q6, [x10, #0x0]\n"
+ "ldr q6, [x9, #0x0]\n"
".inst 0x6e47ec0d // bfmmla v13.4s, v0.8h, v7.8h\n"
".inst 0x6e47ec55 // bfmmla v21.4s, v2.8h, v7.8h\n"
".inst 0x6e47ec9d // bfmmla v29.4s, v4.8h, v7.8h\n"
- "ldr q7, [x10, #0x10]\n"
- "add x10, x10, #0x20\n"
+ "ldr q7, [x9, #0x10]\n"
+ "add x9, x9, #0x20\n"
".inst 0x6e46ec0a // bfmmla v10.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec52 // bfmmla v18.4s, v2.8h, v6.8h\n"
".inst 0x6e46ec9a // bfmmla v26.4s, v4.8h, v6.8h\n"
- "ldr q6, [x9, #0x0]\n"
+ "ldr q6, [x28, #0x0]\n"
".inst 0x6e47ec0e // bfmmla v14.4s, v0.8h, v7.8h\n"
".inst 0x6e47ec56 // bfmmla v22.4s, v2.8h, v7.8h\n"
".inst 0x6e47ec9e // bfmmla v30.4s, v4.8h, v7.8h\n"
- "ldr q7, [x9, #0x10]\n"
- "add x9, x9, #0x20\n"
+ "ldr q7, [x28, #0x10]\n"
+ "add x28, x28, #0x20\n"
".inst 0x6e46ec0b // bfmmla v11.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec53 // bfmmla v19.4s, v2.8h, v6.8h\n"
".inst 0x6e46ec9b // bfmmla v27.4s, v4.8h, v6.8h\n"
@@ -3476,85 +3476,85 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
".inst 0x6e47ec9f // bfmmla v31.4s, v4.8h, v7.8h\n"
"bge 213b\n"
"214:" // Height 6: Multiply loop: Skip odd blocks
- "cbz x27, 217f\n"
- "tbz x27, #1, 215f\n"
- "ldr s1, [x26], #0x4\n"
- "ldr s2, [x25], #0x4\n"
- "ldr s3, [x24], #0x4\n"
- "ldr s4, [x23], #0x4\n"
- "ldr s5, [x22], #0x4\n"
- "ldr s6, [x21], #0x4\n"
- "tbz x27, #0, 216f\n"
- "ld1 { v1.h }[2], [x26]\n"
- "ld1 { v2.h }[2], [x25]\n"
- "ld1 { v3.h }[2], [x24]\n"
- "ld1 { v4.h }[2], [x23]\n"
- "ld1 { v5.h }[2], [x22]\n"
- "ld1 { v6.h }[2], [x21]\n"
+ "cbz x26, 217f\n"
+ "tbz x26, #1, 215f\n"
+ "ldr s1, [x25], #0x4\n"
+ "ldr s2, [x24], #0x4\n"
+ "ldr s3, [x23], #0x4\n"
+ "ldr s4, [x22], #0x4\n"
+ "ldr s5, [x21], #0x4\n"
+ "ldr s6, [x20], #0x4\n"
+ "tbz x26, #0, 216f\n"
+ "ld1 { v1.h }[2], [x25]\n"
+ "ld1 { v2.h }[2], [x24]\n"
+ "ld1 { v3.h }[2], [x23]\n"
+ "ld1 { v4.h }[2], [x22]\n"
+ "ld1 { v5.h }[2], [x21]\n"
+ "ld1 { v6.h }[2], [x20]\n"
"b 216f\n"
"215:" // Height 6: Multiply loop: Ragged operand read: partial_1_0
- "ldr h1, [x26, #0x0]\n"
- "ldr h2, [x25, #0x0]\n"
- "ldr h3, [x24, #0x0]\n"
- "ldr h4, [x23, #0x0]\n"
- "ldr h5, [x22, #0x0]\n"
- "ldr h6, [x21, #0x0]\n"
+ "ldr h1, [x25, #0x0]\n"
+ "ldr h2, [x24, #0x0]\n"
+ "ldr h3, [x23, #0x0]\n"
+ "ldr h4, [x22, #0x0]\n"
+ "ldr h5, [x21, #0x0]\n"
+ "ldr h6, [x20, #0x0]\n"
"216:" // Height 6: Multiply loop: Ragged operand read: Done
- "ldr q7, [x12, #0x0]\n"
+ "ldr q7, [x11, #0x0]\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
".inst 0x6e47ec08 // bfmmla v8.4s, v0.8h, v7.8h\n"
"trn1 v4.2d, v5.2d, v6.2d\n"
- "ldr q6, [x12, #0x10]\n"
+ "ldr q6, [x11, #0x10]\n"
".inst 0x6e47ec50 // bfmmla v16.4s, v2.8h, v7.8h\n"
".inst 0x6e47ec98 // bfmmla v24.4s, v4.8h, v7.8h\n"
- "ldr q7, [x11, #0x0]\n"
+ "ldr q7, [x10, #0x0]\n"
".inst 0x6e46ec0c // bfmmla v12.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec54 // bfmmla v20.4s, v2.8h, v6.8h\n"
- "add x12, x12, #0x20\n"
+ "add x11, x11, #0x20\n"
".inst 0x6e46ec9c // bfmmla v28.4s, v4.8h, v6.8h\n"
- "ldr q6, [x11, #0x10]\n"
+ "ldr q6, [x10, #0x10]\n"
".inst 0x6e47ec09 // bfmmla v9.4s, v0.8h, v7.8h\n"
- "add x11, x11, #0x20\n"
+ "add x10, x10, #0x20\n"
".inst 0x6e47ec51 // bfmmla v17.4s, v2.8h, v7.8h\n"
".inst 0x6e47ec99 // bfmmla v25.4s, v4.8h, v7.8h\n"
- "ldr q7, [x10, #0x0]\n"
+ "ldr q7, [x9, #0x0]\n"
".inst 0x6e46ec0d // bfmmla v13.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec55 // bfmmla v21.4s, v2.8h, v6.8h\n"
".inst 0x6e46ec9d // bfmmla v29.4s, v4.8h, v6.8h\n"
- "ldr q6, [x10, #0x10]\n"
+ "ldr q6, [x9, #0x10]\n"
".inst 0x6e47ec0a // bfmmla v10.4s, v0.8h, v7.8h\n"
- "add x10, x10, #0x20\n"
+ "add x9, x9, #0x20\n"
".inst 0x6e47ec52 // bfmmla v18.4s, v2.8h, v7.8h\n"
".inst 0x6e47ec9a // bfmmla v26.4s, v4.8h, v7.8h\n"
- "ldr q7, [x9, #0x0]\n"
+ "ldr q7, [x28, #0x0]\n"
".inst 0x6e46ec0e // bfmmla v14.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec56 // bfmmla v22.4s, v2.8h, v6.8h\n"
".inst 0x6e46ec9e // bfmmla v30.4s, v4.8h, v6.8h\n"
- "ldr q6, [x9, #0x10]\n"
+ "ldr q6, [x28, #0x10]\n"
".inst 0x6e47ec0b // bfmmla v11.4s, v0.8h, v7.8h\n"
- "add x9, x9, #0x20\n"
+ "add x28, x28, #0x20\n"
".inst 0x6e47ec53 // bfmmla v19.4s, v2.8h, v7.8h\n"
".inst 0x6e47ec9b // bfmmla v27.4s, v4.8h, v7.8h\n"
".inst 0x6e46ec0f // bfmmla v15.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec57 // bfmmla v23.4s, v2.8h, v6.8h\n"
".inst 0x6e46ec9f // bfmmla v31.4s, v4.8h, v6.8h\n"
"217:" // Height 6: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 207b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
"uzp1 v7.2d, v8.2d, v12.2d\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
"uzp2 v8.2d, v8.2d, v12.2d\n"
"uzp1 v12.2d, v9.2d, v13.2d\n"
"uzp2 v9.2d, v9.2d, v13.2d\n"
"uzp1 v13.2d, v10.2d, v14.2d\n"
- "add x21, x22, x20, LSL #2\n"
+ "add x20, x21, x19, LSL #2\n"
"uzp2 v10.2d, v10.2d, v14.2d\n"
"uzp1 v14.2d, v11.2d, v15.2d\n"
"uzp2 v11.2d, v11.2d, v15.2d\n"
@@ -3575,10 +3575,10 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
"uzp1 v30.2d, v27.2d, v31.2d\n"
"uzp2 v27.2d, v27.2d, v31.2d\n"
"tbz %x[flags], #1, 218f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v1.4s }, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1r { v0.4s }, [x20]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v1.4s }, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v0.4s }, [x19]\n"
"fmin v7.4s, v7.4s, v1.4s\n"
"fmin v12.4s, v12.4s, v1.4s\n"
"fmin v13.4s, v13.4s, v1.4s\n"
@@ -3628,178 +3628,178 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
"fmax v26.4s, v26.4s, v0.4s\n"
"fmax v27.4s, v27.4s, v0.4s\n"
"218:" // Height 6: No activation
- "cmp x14, #0x10\n"
+ "cmp x13, #0x10\n"
"bge 227f\n"
- "tbz x14, #3, 222f\n"
- "st1 { v7.4s }, [x13], #0x10\n"
- "st1 { v12.4s }, [x13], #0x10\n"
- "st1 { v8.4s }, [x25], #0x10\n"
- "st1 { v9.4s }, [x25], #0x10\n"
- "st1 { v15.4s }, [x24], #0x10\n"
- "st1 { v20.4s }, [x24], #0x10\n"
- "st1 { v16.4s }, [x23], #0x10\n"
- "st1 { v17.4s }, [x23], #0x10\n"
- "st1 { v23.4s }, [x22], #0x10\n"
- "st1 { v28.4s }, [x22], #0x10\n"
- "st1 { v24.4s }, [x21], #0x10\n"
- "st1 { v25.4s }, [x21], #0x10\n"
- "tbz x14, #2, 220f\n"
- "st1 { v13.4s }, [x13], #0x10\n"
- "st1 { v10.4s }, [x25], #0x10\n"
- "st1 { v21.4s }, [x24], #0x10\n"
- "st1 { v18.4s }, [x23], #0x10\n"
- "st1 { v29.4s }, [x22], #0x10\n"
- "st1 { v26.4s }, [x21], #0x10\n"
- "tbz x14, #1, 219f\n"
- "str d14, [x13], #0x8\n"
- "str d11, [x25], #0x8\n"
- "str d22, [x24], #0x8\n"
- "str d19, [x23], #0x8\n"
- "str d30, [x22], #0x8\n"
- "str d27, [x21], #0x8\n"
- "tbz x14, #0, 226f\n"
- "st1 { v14.s }[2], [x13]\n"
- "st1 { v11.s }[2], [x25]\n"
- "st1 { v22.s }[2], [x24]\n"
- "st1 { v19.s }[2], [x23]\n"
- "st1 { v30.s }[2], [x22]\n"
- "st1 { v27.s }[2], [x21]\n"
+ "tbz x13, #3, 222f\n"
+ "st1 { v7.4s }, [x12], #0x10\n"
+ "st1 { v12.4s }, [x12], #0x10\n"
+ "st1 { v8.4s }, [x24], #0x10\n"
+ "st1 { v9.4s }, [x24], #0x10\n"
+ "st1 { v15.4s }, [x23], #0x10\n"
+ "st1 { v20.4s }, [x23], #0x10\n"
+ "st1 { v16.4s }, [x22], #0x10\n"
+ "st1 { v17.4s }, [x22], #0x10\n"
+ "st1 { v23.4s }, [x21], #0x10\n"
+ "st1 { v28.4s }, [x21], #0x10\n"
+ "st1 { v24.4s }, [x20], #0x10\n"
+ "st1 { v25.4s }, [x20], #0x10\n"
+ "tbz x13, #2, 220f\n"
+ "st1 { v13.4s }, [x12], #0x10\n"
+ "st1 { v10.4s }, [x24], #0x10\n"
+ "st1 { v21.4s }, [x23], #0x10\n"
+ "st1 { v18.4s }, [x22], #0x10\n"
+ "st1 { v29.4s }, [x21], #0x10\n"
+ "st1 { v26.4s }, [x20], #0x10\n"
+ "tbz x13, #1, 219f\n"
+ "str d14, [x12], #0x8\n"
+ "str d11, [x24], #0x8\n"
+ "str d22, [x23], #0x8\n"
+ "str d19, [x22], #0x8\n"
+ "str d30, [x21], #0x8\n"
+ "str d27, [x20], #0x8\n"
+ "tbz x13, #0, 226f\n"
+ "st1 { v14.s }[2], [x12]\n"
+ "st1 { v11.s }[2], [x24]\n"
+ "st1 { v22.s }[2], [x23]\n"
+ "st1 { v19.s }[2], [x22]\n"
+ "st1 { v30.s }[2], [x21]\n"
+ "st1 { v27.s }[2], [x20]\n"
"b 226f\n"
"219:" // Height 6: Partial direct writeback: partial_1_12
- "tbz x14, #0, 226f\n"
- "str s14, [x13, #0x0]\n"
- "str s11, [x25, #0x0]\n"
- "str s22, [x24, #0x0]\n"
- "str s19, [x23, #0x0]\n"
- "str s30, [x22, #0x0]\n"
- "str s27, [x21, #0x0]\n"
+ "tbz x13, #0, 226f\n"
+ "str s14, [x12, #0x0]\n"
+ "str s11, [x24, #0x0]\n"
+ "str s22, [x23, #0x0]\n"
+ "str s19, [x22, #0x0]\n"
+ "str s30, [x21, #0x0]\n"
+ "str s27, [x20, #0x0]\n"
"b 226f\n"
"220:" // Height 6: Partial direct writeback: partial_2_8
- "tbz x14, #1, 221f\n"
- "str d13, [x13], #0x8\n"
- "str d10, [x25], #0x8\n"
- "str d21, [x24], #0x8\n"
- "str d18, [x23], #0x8\n"
- "str d29, [x22], #0x8\n"
- "str d26, [x21], #0x8\n"
- "tbz x14, #0, 226f\n"
- "st1 { v13.s }[2], [x13]\n"
- "st1 { v10.s }[2], [x25]\n"
- "st1 { v21.s }[2], [x24]\n"
- "st1 { v18.s }[2], [x23]\n"
- "st1 { v29.s }[2], [x22]\n"
- "st1 { v26.s }[2], [x21]\n"
+ "tbz x13, #1, 221f\n"
+ "str d13, [x12], #0x8\n"
+ "str d10, [x24], #0x8\n"
+ "str d21, [x23], #0x8\n"
+ "str d18, [x22], #0x8\n"
+ "str d29, [x21], #0x8\n"
+ "str d26, [x20], #0x8\n"
+ "tbz x13, #0, 226f\n"
+ "st1 { v13.s }[2], [x12]\n"
+ "st1 { v10.s }[2], [x24]\n"
+ "st1 { v21.s }[2], [x23]\n"
+ "st1 { v18.s }[2], [x22]\n"
+ "st1 { v29.s }[2], [x21]\n"
+ "st1 { v26.s }[2], [x20]\n"
"b 226f\n"
"221:" // Height 6: Partial direct writeback: partial_1_8
- "tbz x14, #0, 226f\n"
- "str s13, [x13, #0x0]\n"
- "str s10, [x25, #0x0]\n"
- "str s21, [x24, #0x0]\n"
- "str s18, [x23, #0x0]\n"
- "str s29, [x22, #0x0]\n"
- "str s26, [x21, #0x0]\n"
+ "tbz x13, #0, 226f\n"
+ "str s13, [x12, #0x0]\n"
+ "str s10, [x24, #0x0]\n"
+ "str s21, [x23, #0x0]\n"
+ "str s18, [x22, #0x0]\n"
+ "str s29, [x21, #0x0]\n"
+ "str s26, [x20, #0x0]\n"
"b 226f\n"
"222:" // Height 6: Partial direct writeback: partial_4_0
- "tbz x14, #2, 224f\n"
- "st1 { v7.4s }, [x13], #0x10\n"
- "st1 { v8.4s }, [x25], #0x10\n"
- "st1 { v15.4s }, [x24], #0x10\n"
- "st1 { v16.4s }, [x23], #0x10\n"
- "st1 { v23.4s }, [x22], #0x10\n"
- "st1 { v24.4s }, [x21], #0x10\n"
- "tbz x14, #1, 223f\n"
- "str d12, [x13], #0x8\n"
- "str d9, [x25], #0x8\n"
- "str d20, [x24], #0x8\n"
- "str d17, [x23], #0x8\n"
- "str d28, [x22], #0x8\n"
- "str d25, [x21], #0x8\n"
- "tbz x14, #0, 226f\n"
- "st1 { v12.s }[2], [x13]\n"
- "st1 { v9.s }[2], [x25]\n"
- "st1 { v20.s }[2], [x24]\n"
- "st1 { v17.s }[2], [x23]\n"
- "st1 { v28.s }[2], [x22]\n"
- "st1 { v25.s }[2], [x21]\n"
+ "tbz x13, #2, 224f\n"
+ "st1 { v7.4s }, [x12], #0x10\n"
+ "st1 { v8.4s }, [x24], #0x10\n"
+ "st1 { v15.4s }, [x23], #0x10\n"
+ "st1 { v16.4s }, [x22], #0x10\n"
+ "st1 { v23.4s }, [x21], #0x10\n"
+ "st1 { v24.4s }, [x20], #0x10\n"
+ "tbz x13, #1, 223f\n"
+ "str d12, [x12], #0x8\n"
+ "str d9, [x24], #0x8\n"
+ "str d20, [x23], #0x8\n"
+ "str d17, [x22], #0x8\n"
+ "str d28, [x21], #0x8\n"
+ "str d25, [x20], #0x8\n"
+ "tbz x13, #0, 226f\n"
+ "st1 { v12.s }[2], [x12]\n"
+ "st1 { v9.s }[2], [x24]\n"
+ "st1 { v20.s }[2], [x23]\n"
+ "st1 { v17.s }[2], [x22]\n"
+ "st1 { v28.s }[2], [x21]\n"
+ "st1 { v25.s }[2], [x20]\n"
"b 226f\n"
"223:" // Height 6: Partial direct writeback: partial_1_4
- "tbz x14, #0, 226f\n"
- "str s12, [x13, #0x0]\n"
- "str s9, [x25, #0x0]\n"
- "str s20, [x24, #0x0]\n"
- "str s17, [x23, #0x0]\n"
- "str s28, [x22, #0x0]\n"
- "str s25, [x21, #0x0]\n"
+ "tbz x13, #0, 226f\n"
+ "str s12, [x12, #0x0]\n"
+ "str s9, [x24, #0x0]\n"
+ "str s20, [x23, #0x0]\n"
+ "str s17, [x22, #0x0]\n"
+ "str s28, [x21, #0x0]\n"
+ "str s25, [x20, #0x0]\n"
"b 226f\n"
"224:" // Height 6: Partial direct writeback: partial_2_0
- "tbz x14, #1, 225f\n"
- "str d7, [x13], #0x8\n"
- "str d8, [x25], #0x8\n"
- "str d15, [x24], #0x8\n"
- "str d16, [x23], #0x8\n"
- "str d23, [x22], #0x8\n"
- "str d24, [x21], #0x8\n"
- "tbz x14, #0, 226f\n"
- "st1 { v7.s }[2], [x13]\n"
- "st1 { v8.s }[2], [x25]\n"
- "st1 { v15.s }[2], [x24]\n"
- "st1 { v16.s }[2], [x23]\n"
- "st1 { v23.s }[2], [x22]\n"
- "st1 { v24.s }[2], [x21]\n"
+ "tbz x13, #1, 225f\n"
+ "str d7, [x12], #0x8\n"
+ "str d8, [x24], #0x8\n"
+ "str d15, [x23], #0x8\n"
+ "str d16, [x22], #0x8\n"
+ "str d23, [x21], #0x8\n"
+ "str d24, [x20], #0x8\n"
+ "tbz x13, #0, 226f\n"
+ "st1 { v7.s }[2], [x12]\n"
+ "st1 { v8.s }[2], [x24]\n"
+ "st1 { v15.s }[2], [x23]\n"
+ "st1 { v16.s }[2], [x22]\n"
+ "st1 { v23.s }[2], [x21]\n"
+ "st1 { v24.s }[2], [x20]\n"
"b 226f\n"
"225:" // Height 6: Partial direct writeback: partial_1_0
- "str s7, [x13, #0x0]\n"
- "str s8, [x25, #0x0]\n"
- "str s15, [x24, #0x0]\n"
- "str s16, [x23, #0x0]\n"
- "str s23, [x22, #0x0]\n"
- "str s24, [x21, #0x0]\n"
+ "str s7, [x12, #0x0]\n"
+ "str s8, [x24, #0x0]\n"
+ "str s15, [x23, #0x0]\n"
+ "str s16, [x22, #0x0]\n"
+ "str s23, [x21, #0x0]\n"
+ "str s24, [x20, #0x0]\n"
"226:" // Height 6: Partial direct writeback: Done
"b 228f\n"
"227:" // Height 6: Full writeback
- "str q7, [x13, #0x0]\n"
- "str q12, [x13, #0x10]\n"
- "str q13, [x13, #0x20]\n"
- "str q14, [x13, #0x30]\n"
- "add x13, x13, #0x40\n"
- "str q8, [x25, #0x0]\n"
- "str q9, [x25, #0x10]\n"
- "str q10, [x25, #0x20]\n"
- "str q11, [x25, #0x30]\n"
- "str q15, [x24, #0x0]\n"
- "str q20, [x24, #0x10]\n"
- "str q21, [x24, #0x20]\n"
- "str q22, [x24, #0x30]\n"
- "str q16, [x23, #0x0]\n"
- "str q17, [x23, #0x10]\n"
- "str q18, [x23, #0x20]\n"
- "str q19, [x23, #0x30]\n"
- "str q23, [x22, #0x0]\n"
- "str q28, [x22, #0x10]\n"
- "str q29, [x22, #0x20]\n"
- "str q30, [x22, #0x30]\n"
- "str q24, [x21, #0x0]\n"
- "str q25, [x21, #0x10]\n"
- "str q26, [x21, #0x20]\n"
- "str q27, [x21, #0x30]\n"
+ "str q7, [x12, #0x0]\n"
+ "str q12, [x12, #0x10]\n"
+ "str q13, [x12, #0x20]\n"
+ "str q14, [x12, #0x30]\n"
+ "add x12, x12, #0x40\n"
+ "str q8, [x24, #0x0]\n"
+ "str q9, [x24, #0x10]\n"
+ "str q10, [x24, #0x20]\n"
+ "str q11, [x24, #0x30]\n"
+ "str q15, [x23, #0x0]\n"
+ "str q20, [x23, #0x10]\n"
+ "str q21, [x23, #0x20]\n"
+ "str q22, [x23, #0x30]\n"
+ "str q16, [x22, #0x0]\n"
+ "str q17, [x22, #0x10]\n"
+ "str q18, [x22, #0x20]\n"
+ "str q19, [x22, #0x30]\n"
+ "str q23, [x21, #0x0]\n"
+ "str q28, [x21, #0x10]\n"
+ "str q29, [x21, #0x20]\n"
+ "str q30, [x21, #0x30]\n"
+ "str q24, [x20, #0x0]\n"
+ "str q25, [x20, #0x10]\n"
+ "str q26, [x20, #0x20]\n"
+ "str q27, [x20, #0x30]\n"
"228:" // Height 6: Writeback done
- "subs x14, x14, #0x10\n"
+ "subs x13, x13, #0x10\n"
"bgt 192b\n"
"subs %x[M], %x[M], #0x6\n"
"beq 230f\n"
- "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 229f\n"
- "add x21, x21, #0x6\n"
- "str x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "add x20, x20, #0x6\n"
+ "str x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"b 1b\n"
"229:" // Update direct input
- "mov x20, #0xc\n"
- "madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
+ "mov x19, #0xc\n"
+ "madd %x[input_ptr], x19, x20, %x[input_ptr]\n"
"b 1b\n"
"230:" // Exit
: [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
: [args_ptr] "r" (&ka), [bias] "r" (bias), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_B_stride] "I" (offsetof(KernelArgs, B_stride)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_cur_B_ptr] "I" (offsetof(KernelArgs, cur_B_ptr)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_fp16_mla_6x32/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_fp16_mla_6x32/generic.cpp
index 18a2db5069..e1458b39ab 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_fp16_mla_6x32/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_fp16_mla_6x32/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#if defined(__aarch64__) && (defined(FP16_KERNELS) || defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC))
@@ -104,156 +104,156 @@ void a64_ffhybrid_fp16_mla_6x32 (
"cmp %x[M], #0x2\n"
"bgt 101f\n"
"beq 51f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x15, %x[bias]\n"
- "ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x13, %x[output_ptr]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x14, %x[bias]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x12, %x[output_ptr]\n"
"2:" // Height 1: Column loop
- "ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
- "add x11, x12, x20, LSL #1\n"
- "add x10, x11, x20, LSL #1\n"
- "add x9, x10, x20, LSL #1\n"
- "add x20, x9, x20, LSL #1\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "cmp x14, #0x18\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #1\n"
+ "add x9, x10, x19, LSL #1\n"
+ "add x28, x9, x19, LSL #1\n"
+ "add x19, x28, x19, LSL #1\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "cmp x13, #0x18\n"
"bgt 3f\n"
- "cmp x14, #0x10\n"
- "mov x9, x12\n"
+ "cmp x13, #0x10\n"
+ "mov x28, x11\n"
"bgt 3f\n"
- "cmp x14, #0x8\n"
- "mov x10, x12\n"
+ "cmp x13, #0x8\n"
+ "mov x9, x11\n"
"bgt 3f\n"
- "mov x11, x12\n"
+ "mov x10, x11\n"
"3:" // Height 1: B setup done
- "cbz x15, 4f\n"
- "ldr q8, [x15, #0x0]\n"
- "ldr q9, [x15, #0x10]\n"
- "ldr q10, [x15, #0x20]\n"
- "ldr q11, [x15, #0x30]\n"
- "add x15, x15, #0x40\n"
+ "cbz x14, 4f\n"
+ "ldr q8, [x14, #0x0]\n"
+ "ldr q9, [x14, #0x10]\n"
+ "ldr q10, [x14, #0x20]\n"
+ "ldr q11, [x14, #0x30]\n"
+ "add x14, x14, #0x40\n"
"b 23f\n"
"4:" // Height 1: no bias
"tbz %x[flags], #0, 22f\n"
- "cmp x14, #0x20\n"
+ "cmp x13, #0x20\n"
"bge 21f\n"
- "tbz x14, #4, 12f\n"
- "ld1 { v8.8h }, [x13], #0x10\n"
- "ld1 { v9.8h }, [x13], #0x10\n"
- "tbz x14, #3, 8f\n"
- "ld1 { v10.8h }, [x13], #0x10\n"
- "tbz x14, #2, 6f\n"
- "ldr d11, [x13], #0x8\n"
- "tbz x14, #1, 5f\n"
- "ld1 { v11.s }[2], [x13], #0x4\n"
- "mov x20, #0x3c\n"
- "tbz x14, #0, 20f\n"
- "ld1 { v11.h }[6], [x13]\n"
+ "tbz x13, #4, 12f\n"
+ "ld1 { v8.8h }, [x12], #0x10\n"
+ "ld1 { v9.8h }, [x12], #0x10\n"
+ "tbz x13, #3, 8f\n"
+ "ld1 { v10.8h }, [x12], #0x10\n"
+ "tbz x13, #2, 6f\n"
+ "ldr d11, [x12], #0x8\n"
+ "tbz x13, #1, 5f\n"
+ "ld1 { v11.s }[2], [x12], #0x4\n"
+ "mov x19, #0x3c\n"
+ "tbz x13, #0, 20f\n"
+ "ld1 { v11.h }[6], [x12]\n"
"b 20f\n"
"5:" // Height 1: Partial accumulate: partial_1_28
- "mov x20, #0x38\n"
- "tbz x14, #0, 20f\n"
- "ld1 { v11.h }[4], [x13]\n"
+ "mov x19, #0x38\n"
+ "tbz x13, #0, 20f\n"
+ "ld1 { v11.h }[4], [x12]\n"
"b 20f\n"
"6:" // Height 1: Partial accumulate: partial_2_24
- "tbz x14, #1, 7f\n"
- "ldr s11, [x13], #0x4\n"
- "mov x20, #0x34\n"
- "tbz x14, #0, 20f\n"
- "ld1 { v11.h }[2], [x13]\n"
+ "tbz x13, #1, 7f\n"
+ "ldr s11, [x12], #0x4\n"
+ "mov x19, #0x34\n"
+ "tbz x13, #0, 20f\n"
+ "ld1 { v11.h }[2], [x12]\n"
"b 20f\n"
"7:" // Height 1: Partial accumulate: partial_1_24
- "mov x20, #0x30\n"
- "tbz x14, #0, 20f\n"
- "ldr h11, [x13, #0x0]\n"
+ "mov x19, #0x30\n"
+ "tbz x13, #0, 20f\n"
+ "ldr h11, [x12, #0x0]\n"
"b 20f\n"
"8:" // Height 1: Partial accumulate: partial_4_16
- "tbz x14, #2, 10f\n"
- "ldr d10, [x13], #0x8\n"
- "tbz x14, #1, 9f\n"
- "ld1 { v10.s }[2], [x13], #0x4\n"
- "mov x20, #0x2c\n"
- "tbz x14, #0, 20f\n"
- "ld1 { v10.h }[6], [x13]\n"
+ "tbz x13, #2, 10f\n"
+ "ldr d10, [x12], #0x8\n"
+ "tbz x13, #1, 9f\n"
+ "ld1 { v10.s }[2], [x12], #0x4\n"
+ "mov x19, #0x2c\n"
+ "tbz x13, #0, 20f\n"
+ "ld1 { v10.h }[6], [x12]\n"
"b 20f\n"
"9:" // Height 1: Partial accumulate: partial_1_20
- "mov x20, #0x28\n"
- "tbz x14, #0, 20f\n"
- "ld1 { v10.h }[4], [x13]\n"
+ "mov x19, #0x28\n"
+ "tbz x13, #0, 20f\n"
+ "ld1 { v10.h }[4], [x12]\n"
"b 20f\n"
"10:" // Height 1: Partial accumulate: partial_2_16
- "tbz x14, #1, 11f\n"
- "ldr s10, [x13], #0x4\n"
- "mov x20, #0x24\n"
- "tbz x14, #0, 20f\n"
- "ld1 { v10.h }[2], [x13]\n"
+ "tbz x13, #1, 11f\n"
+ "ldr s10, [x12], #0x4\n"
+ "mov x19, #0x24\n"
+ "tbz x13, #0, 20f\n"
+ "ld1 { v10.h }[2], [x12]\n"
"b 20f\n"
"11:" // Height 1: Partial accumulate: partial_1_16
- "mov x20, #0x20\n"
- "tbz x14, #0, 20f\n"
- "ldr h10, [x13, #0x0]\n"
+ "mov x19, #0x20\n"
+ "tbz x13, #0, 20f\n"
+ "ldr h10, [x12, #0x0]\n"
"b 20f\n"
"12:" // Height 1: Partial accumulate: partial_8_0
- "tbz x14, #3, 16f\n"
- "ld1 { v8.8h }, [x13], #0x10\n"
- "tbz x14, #2, 14f\n"
- "ldr d9, [x13], #0x8\n"
- "tbz x14, #1, 13f\n"
- "ld1 { v9.s }[2], [x13], #0x4\n"
- "mov x20, #0x1c\n"
- "tbz x14, #0, 20f\n"
- "ld1 { v9.h }[6], [x13]\n"
+ "tbz x13, #3, 16f\n"
+ "ld1 { v8.8h }, [x12], #0x10\n"
+ "tbz x13, #2, 14f\n"
+ "ldr d9, [x12], #0x8\n"
+ "tbz x13, #1, 13f\n"
+ "ld1 { v9.s }[2], [x12], #0x4\n"
+ "mov x19, #0x1c\n"
+ "tbz x13, #0, 20f\n"
+ "ld1 { v9.h }[6], [x12]\n"
"b 20f\n"
"13:" // Height 1: Partial accumulate: partial_1_12
- "mov x20, #0x18\n"
- "tbz x14, #0, 20f\n"
- "ld1 { v9.h }[4], [x13]\n"
+ "mov x19, #0x18\n"
+ "tbz x13, #0, 20f\n"
+ "ld1 { v9.h }[4], [x12]\n"
"b 20f\n"
"14:" // Height 1: Partial accumulate: partial_2_8
- "tbz x14, #1, 15f\n"
- "ldr s9, [x13], #0x4\n"
- "mov x20, #0x14\n"
- "tbz x14, #0, 20f\n"
- "ld1 { v9.h }[2], [x13]\n"
+ "tbz x13, #1, 15f\n"
+ "ldr s9, [x12], #0x4\n"
+ "mov x19, #0x14\n"
+ "tbz x13, #0, 20f\n"
+ "ld1 { v9.h }[2], [x12]\n"
"b 20f\n"
"15:" // Height 1: Partial accumulate: partial_1_8
- "mov x20, #0x10\n"
- "tbz x14, #0, 20f\n"
- "ldr h9, [x13, #0x0]\n"
+ "mov x19, #0x10\n"
+ "tbz x13, #0, 20f\n"
+ "ldr h9, [x12, #0x0]\n"
"b 20f\n"
"16:" // Height 1: Partial accumulate: partial_4_0
- "tbz x14, #2, 18f\n"
- "ldr d8, [x13], #0x8\n"
- "tbz x14, #1, 17f\n"
- "ld1 { v8.s }[2], [x13], #0x4\n"
- "mov x20, #0xc\n"
- "tbz x14, #0, 20f\n"
- "ld1 { v8.h }[6], [x13]\n"
+ "tbz x13, #2, 18f\n"
+ "ldr d8, [x12], #0x8\n"
+ "tbz x13, #1, 17f\n"
+ "ld1 { v8.s }[2], [x12], #0x4\n"
+ "mov x19, #0xc\n"
+ "tbz x13, #0, 20f\n"
+ "ld1 { v8.h }[6], [x12]\n"
"b 20f\n"
"17:" // Height 1: Partial accumulate: partial_1_4
- "mov x20, #0x8\n"
- "tbz x14, #0, 20f\n"
- "ld1 { v8.h }[4], [x13]\n"
+ "mov x19, #0x8\n"
+ "tbz x13, #0, 20f\n"
+ "ld1 { v8.h }[4], [x12]\n"
"b 20f\n"
"18:" // Height 1: Partial accumulate: partial_2_0
- "tbz x14, #1, 19f\n"
- "ldr s8, [x13], #0x4\n"
- "mov x20, #0x4\n"
- "tbz x14, #0, 20f\n"
- "ld1 { v8.h }[2], [x13]\n"
+ "tbz x13, #1, 19f\n"
+ "ldr s8, [x12], #0x4\n"
+ "mov x19, #0x4\n"
+ "tbz x13, #0, 20f\n"
+ "ld1 { v8.h }[2], [x12]\n"
"b 20f\n"
"19:" // Height 1: Partial accumulate: partial_1_0
- "ldr h8, [x13, #0x0]\n"
- "mov x20, #0x0\n"
+ "ldr h8, [x12, #0x0]\n"
+ "mov x19, #0x0\n"
"20:" // Height 1: Partial accumulate: Done
- "sub x13, x13, x20\n"
+ "sub x12, x12, x19\n"
"b 23f\n"
"21:" // Height 1: full accumulate
- "ldr q8, [x13, #0x0]\n"
- "ldr q9, [x13, #0x10]\n"
- "ldr q10, [x13, #0x20]\n"
- "ldr q11, [x13, #0x30]\n"
+ "ldr q8, [x12, #0x0]\n"
+ "ldr q9, [x12, #0x10]\n"
+ "ldr q10, [x12, #0x20]\n"
+ "ldr q11, [x12, #0x30]\n"
"b 23f\n"
"22:" // Height 1: no accumulate
"movi v8.16b, #0x0\n"
@@ -261,200 +261,200 @@ void a64_ffhybrid_fp16_mla_6x32 (
"movi v10.16b, #0x0\n"
"movi v11.16b, #0x0\n"
"23:" // Height 1: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"24:" // Height 1: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 25f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "cbnz x28, 26f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #1\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "cbnz x27, 26f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
"b 26f\n"
"25:" // Height 1: setup direct input
- "mov x26, %x[input_ptr]\n"
+ "mov x25, %x[input_ptr]\n"
"26:" // Height 1: input setup done
- "cmp x27, #0x8\n"
+ "cmp x26, #0x8\n"
"blt 29f\n"
- "ldr q0, [x26, #0x0]\n"
- "ldr q6, [x12, #0x0]\n"
- "cmp x27, #0x10\n"
- "ldr q7, [x11, #0x0]\n"
+ "ldr q0, [x25, #0x0]\n"
+ "ldr q6, [x11, #0x0]\n"
+ "cmp x26, #0x10\n"
+ "ldr q7, [x10, #0x0]\n"
"blt 28f\n"
"27:" // Height 1: Multiply loop: Main loop head
"fmla v8.8h, v6.8h, v0.h[0]\n"
- "ldr q6, [x10, #0x0]\n"
+ "ldr q6, [x9, #0x0]\n"
"fmla v9.8h, v7.8h, v0.h[0]\n"
- "ldr q7, [x9, #0x0]\n"
+ "ldr q7, [x28, #0x0]\n"
"fmla v10.8h, v6.8h, v0.h[0]\n"
- "ldr q6, [x12, #0x10]\n"
+ "ldr q6, [x11, #0x10]\n"
"fmla v11.8h, v7.8h, v0.h[0]\n"
- "ldr q7, [x11, #0x10]\n"
+ "ldr q7, [x10, #0x10]\n"
"fmla v8.8h, v6.8h, v0.h[1]\n"
- "ldr q6, [x10, #0x10]\n"
+ "ldr q6, [x9, #0x10]\n"
"fmla v9.8h, v7.8h, v0.h[1]\n"
- "ldr q7, [x9, #0x10]\n"
+ "ldr q7, [x28, #0x10]\n"
"fmla v10.8h, v6.8h, v0.h[1]\n"
- "ldr q6, [x12, #0x20]\n"
+ "ldr q6, [x11, #0x20]\n"
"fmla v11.8h, v7.8h, v0.h[1]\n"
- "ldr q7, [x11, #0x20]\n"
+ "ldr q7, [x10, #0x20]\n"
"fmla v8.8h, v6.8h, v0.h[2]\n"
- "ldr q6, [x10, #0x20]\n"
+ "ldr q6, [x9, #0x20]\n"
"fmla v9.8h, v7.8h, v0.h[2]\n"
- "ldr q7, [x9, #0x20]\n"
+ "ldr q7, [x28, #0x20]\n"
"fmla v10.8h, v6.8h, v0.h[2]\n"
- "ldr q6, [x12, #0x30]\n"
+ "ldr q6, [x11, #0x30]\n"
"fmla v11.8h, v7.8h, v0.h[2]\n"
- "ldr q7, [x11, #0x30]\n"
+ "ldr q7, [x10, #0x30]\n"
"fmla v8.8h, v6.8h, v0.h[3]\n"
- "ldr q6, [x10, #0x30]\n"
+ "ldr q6, [x9, #0x30]\n"
"fmla v9.8h, v7.8h, v0.h[3]\n"
- "ldr q7, [x9, #0x30]\n"
+ "ldr q7, [x28, #0x30]\n"
"fmla v10.8h, v6.8h, v0.h[3]\n"
- "ldr q6, [x12, #0x40]\n"
+ "ldr q6, [x11, #0x40]\n"
"fmla v11.8h, v7.8h, v0.h[3]\n"
- "ldr q7, [x11, #0x40]\n"
+ "ldr q7, [x10, #0x40]\n"
"fmla v8.8h, v6.8h, v0.h[4]\n"
- "ldr q6, [x10, #0x40]\n"
+ "ldr q6, [x9, #0x40]\n"
"fmla v9.8h, v7.8h, v0.h[4]\n"
- "ldr q7, [x9, #0x40]\n"
+ "ldr q7, [x28, #0x40]\n"
"fmla v10.8h, v6.8h, v0.h[4]\n"
- "ldr q6, [x12, #0x50]\n"
+ "ldr q6, [x11, #0x50]\n"
"fmla v11.8h, v7.8h, v0.h[4]\n"
- "ldr q7, [x11, #0x50]\n"
+ "ldr q7, [x10, #0x50]\n"
"fmla v8.8h, v6.8h, v0.h[5]\n"
- "ldr q6, [x10, #0x50]\n"
+ "ldr q6, [x9, #0x50]\n"
"fmla v9.8h, v7.8h, v0.h[5]\n"
- "ldr q7, [x9, #0x50]\n"
+ "ldr q7, [x28, #0x50]\n"
"fmla v10.8h, v6.8h, v0.h[5]\n"
- "ldr q6, [x12, #0x60]\n"
+ "ldr q6, [x11, #0x60]\n"
"fmla v11.8h, v7.8h, v0.h[5]\n"
- "ldr q7, [x11, #0x60]\n"
+ "ldr q7, [x10, #0x60]\n"
"fmla v8.8h, v6.8h, v0.h[6]\n"
- "ldr q6, [x10, #0x60]\n"
+ "ldr q6, [x9, #0x60]\n"
"fmla v9.8h, v7.8h, v0.h[6]\n"
- "ldr q7, [x9, #0x60]\n"
+ "ldr q7, [x28, #0x60]\n"
"fmla v10.8h, v6.8h, v0.h[6]\n"
- "ldr q6, [x12, #0x70]\n"
+ "ldr q6, [x11, #0x70]\n"
"fmla v11.8h, v7.8h, v0.h[6]\n"
- "ldr q7, [x11, #0x70]\n"
+ "ldr q7, [x10, #0x70]\n"
"fmla v8.8h, v6.8h, v0.h[7]\n"
- "ldr q6, [x10, #0x70]\n"
+ "ldr q6, [x9, #0x70]\n"
"fmla v9.8h, v7.8h, v0.h[7]\n"
- "ldr q7, [x9, #0x70]\n"
- "sub x27, x27, #0x8\n"
- "cmp x27, #0x10\n"
+ "ldr q7, [x28, #0x70]\n"
+ "sub x26, x26, #0x8\n"
+ "cmp x26, #0x10\n"
"fmla v10.8h, v6.8h, v0.h[7]\n"
"fmla v11.8h, v7.8h, v0.h[7]\n"
- "add x26, x26, #0x10\n"
- "ldr q0, [x26, #0x0]\n"
- "add x12, x12, #0x80\n"
- "ldr q6, [x12, #0x0]\n"
+ "add x25, x25, #0x10\n"
+ "ldr q0, [x25, #0x0]\n"
"add x11, x11, #0x80\n"
- "ldr q7, [x11, #0x0]\n"
+ "ldr q6, [x11, #0x0]\n"
"add x10, x10, #0x80\n"
+ "ldr q7, [x10, #0x0]\n"
"add x9, x9, #0x80\n"
+ "add x28, x28, #0x80\n"
"bge 27b\n"
"28:" // Height 1: Multiply loop: Single iteration only
"fmla v8.8h, v6.8h, v0.h[0]\n"
- "ldr q6, [x10, #0x0]\n"
+ "ldr q6, [x9, #0x0]\n"
"fmla v9.8h, v7.8h, v0.h[0]\n"
- "ldr q7, [x9, #0x0]\n"
+ "ldr q7, [x28, #0x0]\n"
"fmla v10.8h, v6.8h, v0.h[0]\n"
- "ldr q6, [x12, #0x10]\n"
+ "ldr q6, [x11, #0x10]\n"
"fmla v11.8h, v7.8h, v0.h[0]\n"
- "ldr q7, [x11, #0x10]\n"
+ "ldr q7, [x10, #0x10]\n"
"fmla v8.8h, v6.8h, v0.h[1]\n"
- "ldr q6, [x10, #0x10]\n"
+ "ldr q6, [x9, #0x10]\n"
"fmla v9.8h, v7.8h, v0.h[1]\n"
- "ldr q7, [x9, #0x10]\n"
+ "ldr q7, [x28, #0x10]\n"
"fmla v10.8h, v6.8h, v0.h[1]\n"
- "ldr q6, [x12, #0x20]\n"
+ "ldr q6, [x11, #0x20]\n"
"fmla v11.8h, v7.8h, v0.h[1]\n"
- "ldr q7, [x11, #0x20]\n"
+ "ldr q7, [x10, #0x20]\n"
"fmla v8.8h, v6.8h, v0.h[2]\n"
- "ldr q6, [x10, #0x20]\n"
+ "ldr q6, [x9, #0x20]\n"
"fmla v9.8h, v7.8h, v0.h[2]\n"
- "ldr q7, [x9, #0x20]\n"
+ "ldr q7, [x28, #0x20]\n"
"fmla v10.8h, v6.8h, v0.h[2]\n"
- "ldr q6, [x12, #0x30]\n"
+ "ldr q6, [x11, #0x30]\n"
"fmla v11.8h, v7.8h, v0.h[2]\n"
- "ldr q7, [x11, #0x30]\n"
+ "ldr q7, [x10, #0x30]\n"
"fmla v8.8h, v6.8h, v0.h[3]\n"
- "ldr q6, [x10, #0x30]\n"
+ "ldr q6, [x9, #0x30]\n"
"fmla v9.8h, v7.8h, v0.h[3]\n"
- "ldr q7, [x9, #0x30]\n"
+ "ldr q7, [x28, #0x30]\n"
"fmla v10.8h, v6.8h, v0.h[3]\n"
- "ldr q6, [x12, #0x40]\n"
+ "ldr q6, [x11, #0x40]\n"
"fmla v11.8h, v7.8h, v0.h[3]\n"
- "ldr q7, [x11, #0x40]\n"
+ "ldr q7, [x10, #0x40]\n"
"fmla v8.8h, v6.8h, v0.h[4]\n"
- "ldr q6, [x10, #0x40]\n"
+ "ldr q6, [x9, #0x40]\n"
"fmla v9.8h, v7.8h, v0.h[4]\n"
- "ldr q7, [x9, #0x40]\n"
+ "ldr q7, [x28, #0x40]\n"
"fmla v10.8h, v6.8h, v0.h[4]\n"
- "ldr q6, [x12, #0x50]\n"
+ "ldr q6, [x11, #0x50]\n"
"fmla v11.8h, v7.8h, v0.h[4]\n"
- "ldr q7, [x11, #0x50]\n"
+ "ldr q7, [x10, #0x50]\n"
"fmla v8.8h, v6.8h, v0.h[5]\n"
- "ldr q6, [x10, #0x50]\n"
+ "ldr q6, [x9, #0x50]\n"
"fmla v9.8h, v7.8h, v0.h[5]\n"
- "ldr q7, [x9, #0x50]\n"
+ "ldr q7, [x28, #0x50]\n"
"fmla v10.8h, v6.8h, v0.h[5]\n"
- "ldr q6, [x12, #0x60]\n"
+ "ldr q6, [x11, #0x60]\n"
"fmla v11.8h, v7.8h, v0.h[5]\n"
- "ldr q7, [x11, #0x60]\n"
+ "ldr q7, [x10, #0x60]\n"
"fmla v8.8h, v6.8h, v0.h[6]\n"
- "ldr q6, [x10, #0x60]\n"
+ "ldr q6, [x9, #0x60]\n"
"fmla v9.8h, v7.8h, v0.h[6]\n"
- "ldr q7, [x9, #0x60]\n"
+ "ldr q7, [x28, #0x60]\n"
"fmla v10.8h, v6.8h, v0.h[6]\n"
- "ldr q6, [x12, #0x70]\n"
+ "ldr q6, [x11, #0x70]\n"
"fmla v11.8h, v7.8h, v0.h[6]\n"
- "ldr q7, [x11, #0x70]\n"
+ "ldr q7, [x10, #0x70]\n"
"fmla v8.8h, v6.8h, v0.h[7]\n"
- "ldr q6, [x10, #0x70]\n"
+ "ldr q6, [x9, #0x70]\n"
"fmla v9.8h, v7.8h, v0.h[7]\n"
- "ldr q7, [x9, #0x70]\n"
- "sub x27, x27, #0x8\n"
+ "ldr q7, [x28, #0x70]\n"
+ "sub x26, x26, #0x8\n"
"fmla v10.8h, v6.8h, v0.h[7]\n"
"fmla v11.8h, v7.8h, v0.h[7]\n"
- "add x26, x26, #0x10\n"
- "add x12, x12, #0x80\n"
+ "add x25, x25, #0x10\n"
"add x11, x11, #0x80\n"
"add x10, x10, #0x80\n"
"add x9, x9, #0x80\n"
+ "add x28, x28, #0x80\n"
"29:" // Height 1: Multiply loop: Main loop skip
- "cbz x27, 31f\n"
+ "cbz x26, 31f\n"
"30:" // Height 1: Multiply loop: Odd block loop
- "ldr h0, [x26], #0x2\n"
- "ldr q6, [x12, #0x0]\n"
+ "ldr h0, [x25], #0x2\n"
+ "ldr q6, [x11, #0x0]\n"
"fmla v8.8h, v6.8h, v0.h[0]\n"
- "sub x27, x27, #0x1\n"
- "ldr q7, [x11, #0x0]\n"
- "ldr q6, [x10, #0x0]\n"
+ "sub x26, x26, #0x1\n"
+ "ldr q7, [x10, #0x0]\n"
+ "ldr q6, [x9, #0x0]\n"
"fmla v9.8h, v7.8h, v0.h[0]\n"
"fmla v10.8h, v6.8h, v0.h[0]\n"
- "ldr q7, [x9, #0x0]\n"
+ "ldr q7, [x28, #0x0]\n"
"fmla v11.8h, v7.8h, v0.h[0]\n"
- "add x12, x12, #0x10\n"
"add x11, x11, #0x10\n"
"add x10, x10, #0x10\n"
"add x9, x9, #0x10\n"
- "cbnz x27, 30b\n"
+ "add x28, x28, #0x10\n"
+ "cbnz x26, 30b\n"
"31:" // Height 1: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 24b\n"
"tbz %x[flags], #1, 32f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v1.8h }, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1r { v0.8h }, [x20]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v1.8h }, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v0.8h }, [x19]\n"
"fmin v8.8h, v8.8h, v1.8h\n"
"fmin v9.8h, v9.8h, v1.8h\n"
"fmin v10.8h, v10.8h, v1.8h\n"
@@ -464,305 +464,305 @@ void a64_ffhybrid_fp16_mla_6x32 (
"fmax v10.8h, v10.8h, v0.8h\n"
"fmax v11.8h, v11.8h, v0.8h\n"
"32:" // Height 1: No activation
- "cmp x14, #0x20\n"
+ "cmp x13, #0x20\n"
"bge 49f\n"
- "tbz x14, #4, 40f\n"
- "st1 { v8.8h }, [x13], #0x10\n"
- "st1 { v9.8h }, [x13], #0x10\n"
- "tbz x14, #3, 36f\n"
- "st1 { v10.8h }, [x13], #0x10\n"
- "tbz x14, #2, 34f\n"
- "str d11, [x13], #0x8\n"
- "tbz x14, #1, 33f\n"
- "st1 { v11.s }[2], [x13], #0x4\n"
- "tbz x14, #0, 48f\n"
- "st1 { v11.h }[6], [x13]\n"
+ "tbz x13, #4, 40f\n"
+ "st1 { v8.8h }, [x12], #0x10\n"
+ "st1 { v9.8h }, [x12], #0x10\n"
+ "tbz x13, #3, 36f\n"
+ "st1 { v10.8h }, [x12], #0x10\n"
+ "tbz x13, #2, 34f\n"
+ "str d11, [x12], #0x8\n"
+ "tbz x13, #1, 33f\n"
+ "st1 { v11.s }[2], [x12], #0x4\n"
+ "tbz x13, #0, 48f\n"
+ "st1 { v11.h }[6], [x12]\n"
"b 48f\n"
"33:" // Height 1: Partial direct writeback: partial_1_28
- "tbz x14, #0, 48f\n"
- "st1 { v11.h }[4], [x13]\n"
+ "tbz x13, #0, 48f\n"
+ "st1 { v11.h }[4], [x12]\n"
"b 48f\n"
"34:" // Height 1: Partial direct writeback: partial_2_24
- "tbz x14, #1, 35f\n"
- "str s11, [x13], #0x4\n"
- "tbz x14, #0, 48f\n"
- "st1 { v11.h }[2], [x13]\n"
+ "tbz x13, #1, 35f\n"
+ "str s11, [x12], #0x4\n"
+ "tbz x13, #0, 48f\n"
+ "st1 { v11.h }[2], [x12]\n"
"b 48f\n"
"35:" // Height 1: Partial direct writeback: partial_1_24
- "tbz x14, #0, 48f\n"
- "str h11, [x13, #0x0]\n"
+ "tbz x13, #0, 48f\n"
+ "str h11, [x12, #0x0]\n"
"b 48f\n"
"36:" // Height 1: Partial direct writeback: partial_4_16
- "tbz x14, #2, 38f\n"
- "str d10, [x13], #0x8\n"
- "tbz x14, #1, 37f\n"
- "st1 { v10.s }[2], [x13], #0x4\n"
- "tbz x14, #0, 48f\n"
- "st1 { v10.h }[6], [x13]\n"
+ "tbz x13, #2, 38f\n"
+ "str d10, [x12], #0x8\n"
+ "tbz x13, #1, 37f\n"
+ "st1 { v10.s }[2], [x12], #0x4\n"
+ "tbz x13, #0, 48f\n"
+ "st1 { v10.h }[6], [x12]\n"
"b 48f\n"
"37:" // Height 1: Partial direct writeback: partial_1_20
- "tbz x14, #0, 48f\n"
- "st1 { v10.h }[4], [x13]\n"
+ "tbz x13, #0, 48f\n"
+ "st1 { v10.h }[4], [x12]\n"
"b 48f\n"
"38:" // Height 1: Partial direct writeback: partial_2_16
- "tbz x14, #1, 39f\n"
- "str s10, [x13], #0x4\n"
- "tbz x14, #0, 48f\n"
- "st1 { v10.h }[2], [x13]\n"
+ "tbz x13, #1, 39f\n"
+ "str s10, [x12], #0x4\n"
+ "tbz x13, #0, 48f\n"
+ "st1 { v10.h }[2], [x12]\n"
"b 48f\n"
"39:" // Height 1: Partial direct writeback: partial_1_16
- "tbz x14, #0, 48f\n"
- "str h10, [x13, #0x0]\n"
+ "tbz x13, #0, 48f\n"
+ "str h10, [x12, #0x0]\n"
"b 48f\n"
"40:" // Height 1: Partial direct writeback: partial_8_0
- "tbz x14, #3, 44f\n"
- "st1 { v8.8h }, [x13], #0x10\n"
- "tbz x14, #2, 42f\n"
- "str d9, [x13], #0x8\n"
- "tbz x14, #1, 41f\n"
- "st1 { v9.s }[2], [x13], #0x4\n"
- "tbz x14, #0, 48f\n"
- "st1 { v9.h }[6], [x13]\n"
+ "tbz x13, #3, 44f\n"
+ "st1 { v8.8h }, [x12], #0x10\n"
+ "tbz x13, #2, 42f\n"
+ "str d9, [x12], #0x8\n"
+ "tbz x13, #1, 41f\n"
+ "st1 { v9.s }[2], [x12], #0x4\n"
+ "tbz x13, #0, 48f\n"
+ "st1 { v9.h }[6], [x12]\n"
"b 48f\n"
"41:" // Height 1: Partial direct writeback: partial_1_12
- "tbz x14, #0, 48f\n"
- "st1 { v9.h }[4], [x13]\n"
+ "tbz x13, #0, 48f\n"
+ "st1 { v9.h }[4], [x12]\n"
"b 48f\n"
"42:" // Height 1: Partial direct writeback: partial_2_8
- "tbz x14, #1, 43f\n"
- "str s9, [x13], #0x4\n"
- "tbz x14, #0, 48f\n"
- "st1 { v9.h }[2], [x13]\n"
+ "tbz x13, #1, 43f\n"
+ "str s9, [x12], #0x4\n"
+ "tbz x13, #0, 48f\n"
+ "st1 { v9.h }[2], [x12]\n"
"b 48f\n"
"43:" // Height 1: Partial direct writeback: partial_1_8
- "tbz x14, #0, 48f\n"
- "str h9, [x13, #0x0]\n"
+ "tbz x13, #0, 48f\n"
+ "str h9, [x12, #0x0]\n"
"b 48f\n"
"44:" // Height 1: Partial direct writeback: partial_4_0
- "tbz x14, #2, 46f\n"
- "str d8, [x13], #0x8\n"
- "tbz x14, #1, 45f\n"
- "st1 { v8.s }[2], [x13], #0x4\n"
- "tbz x14, #0, 48f\n"
- "st1 { v8.h }[6], [x13]\n"
+ "tbz x13, #2, 46f\n"
+ "str d8, [x12], #0x8\n"
+ "tbz x13, #1, 45f\n"
+ "st1 { v8.s }[2], [x12], #0x4\n"
+ "tbz x13, #0, 48f\n"
+ "st1 { v8.h }[6], [x12]\n"
"b 48f\n"
"45:" // Height 1: Partial direct writeback: partial_1_4
- "tbz x14, #0, 48f\n"
- "st1 { v8.h }[4], [x13]\n"
+ "tbz x13, #0, 48f\n"
+ "st1 { v8.h }[4], [x12]\n"
"b 48f\n"
"46:" // Height 1: Partial direct writeback: partial_2_0
- "tbz x14, #1, 47f\n"
- "str s8, [x13], #0x4\n"
- "tbz x14, #0, 48f\n"
- "st1 { v8.h }[2], [x13]\n"
+ "tbz x13, #1, 47f\n"
+ "str s8, [x12], #0x4\n"
+ "tbz x13, #0, 48f\n"
+ "st1 { v8.h }[2], [x12]\n"
"b 48f\n"
"47:" // Height 1: Partial direct writeback: partial_1_0
- "str h8, [x13, #0x0]\n"
+ "str h8, [x12, #0x0]\n"
"48:" // Height 1: Partial direct writeback: Done
"b 50f\n"
"49:" // Height 1: Full writeback
- "str q8, [x13, #0x0]\n"
- "str q9, [x13, #0x10]\n"
- "str q10, [x13, #0x20]\n"
- "str q11, [x13, #0x30]\n"
- "add x13, x13, #0x40\n"
+ "str q8, [x12, #0x0]\n"
+ "str q9, [x12, #0x10]\n"
+ "str q10, [x12, #0x20]\n"
+ "str q11, [x12, #0x30]\n"
+ "add x12, x12, #0x40\n"
"50:" // Height 1: Writeback done
- "subs x14, x14, #0x20\n"
+ "subs x13, x13, #0x20\n"
"bgt 2b\n"
"b 302f\n"
"51:" // Height 2
- "ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x15, %x[bias]\n"
- "ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x13, %x[output_ptr]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x14, %x[bias]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x12, %x[output_ptr]\n"
"52:" // Height 2: Column loop
- "ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
- "add x11, x12, x20, LSL #1\n"
- "add x10, x11, x20, LSL #1\n"
- "add x9, x10, x20, LSL #1\n"
- "add x20, x9, x20, LSL #1\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "cmp x14, #0x18\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #1\n"
+ "add x9, x10, x19, LSL #1\n"
+ "add x28, x9, x19, LSL #1\n"
+ "add x19, x28, x19, LSL #1\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "cmp x13, #0x18\n"
"bgt 53f\n"
- "cmp x14, #0x10\n"
- "mov x9, x12\n"
+ "cmp x13, #0x10\n"
+ "mov x28, x11\n"
"bgt 53f\n"
- "cmp x14, #0x8\n"
- "mov x10, x12\n"
+ "cmp x13, #0x8\n"
+ "mov x9, x11\n"
"bgt 53f\n"
- "mov x11, x12\n"
+ "mov x10, x11\n"
"53:" // Height 2: B setup done
- "cbz x15, 54f\n"
- "ldr q8, [x15, #0x0]\n"
- "ldr q9, [x15, #0x10]\n"
+ "cbz x14, 54f\n"
+ "ldr q8, [x14, #0x0]\n"
+ "ldr q9, [x14, #0x10]\n"
"mov v12.16b, v8.16b\n"
"mov v13.16b, v9.16b\n"
- "ldr q10, [x15, #0x20]\n"
- "ldr q11, [x15, #0x30]\n"
+ "ldr q10, [x14, #0x20]\n"
+ "ldr q11, [x14, #0x30]\n"
"mov v14.16b, v10.16b\n"
"mov v15.16b, v11.16b\n"
- "add x15, x15, #0x40\n"
+ "add x14, x14, #0x40\n"
"b 73f\n"
"54:" // Height 2: no bias
"tbz %x[flags], #0, 72f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "cmp x14, #0x20\n"
- "add x25, x13, x20, LSL #1\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "cmp x13, #0x20\n"
+ "add x24, x12, x19, LSL #1\n"
"bge 71f\n"
- "tbz x14, #4, 62f\n"
- "ld1 { v8.8h }, [x13], #0x10\n"
- "ld1 { v12.8h }, [x25], #0x10\n"
- "ld1 { v9.8h }, [x13], #0x10\n"
- "ld1 { v13.8h }, [x25], #0x10\n"
- "tbz x14, #3, 58f\n"
- "ld1 { v10.8h }, [x13], #0x10\n"
- "ld1 { v14.8h }, [x25], #0x10\n"
- "tbz x14, #2, 56f\n"
- "ldr d11, [x13], #0x8\n"
- "ldr d15, [x25], #0x8\n"
- "tbz x14, #1, 55f\n"
- "ld1 { v11.s }[2], [x13], #0x4\n"
- "ld1 { v15.s }[2], [x25], #0x4\n"
- "mov x20, #0x3c\n"
- "tbz x14, #0, 70f\n"
- "ld1 { v11.h }[6], [x13]\n"
- "ld1 { v15.h }[6], [x25]\n"
+ "tbz x13, #4, 62f\n"
+ "ld1 { v8.8h }, [x12], #0x10\n"
+ "ld1 { v12.8h }, [x24], #0x10\n"
+ "ld1 { v9.8h }, [x12], #0x10\n"
+ "ld1 { v13.8h }, [x24], #0x10\n"
+ "tbz x13, #3, 58f\n"
+ "ld1 { v10.8h }, [x12], #0x10\n"
+ "ld1 { v14.8h }, [x24], #0x10\n"
+ "tbz x13, #2, 56f\n"
+ "ldr d11, [x12], #0x8\n"
+ "ldr d15, [x24], #0x8\n"
+ "tbz x13, #1, 55f\n"
+ "ld1 { v11.s }[2], [x12], #0x4\n"
+ "ld1 { v15.s }[2], [x24], #0x4\n"
+ "mov x19, #0x3c\n"
+ "tbz x13, #0, 70f\n"
+ "ld1 { v11.h }[6], [x12]\n"
+ "ld1 { v15.h }[6], [x24]\n"
"b 70f\n"
"55:" // Height 2: Partial accumulate: partial_1_28
- "mov x20, #0x38\n"
- "tbz x14, #0, 70f\n"
- "ld1 { v11.h }[4], [x13]\n"
- "ld1 { v15.h }[4], [x25]\n"
+ "mov x19, #0x38\n"
+ "tbz x13, #0, 70f\n"
+ "ld1 { v11.h }[4], [x12]\n"
+ "ld1 { v15.h }[4], [x24]\n"
"b 70f\n"
"56:" // Height 2: Partial accumulate: partial_2_24
- "tbz x14, #1, 57f\n"
- "ldr s11, [x13], #0x4\n"
- "ldr s15, [x25], #0x4\n"
- "mov x20, #0x34\n"
- "tbz x14, #0, 70f\n"
- "ld1 { v11.h }[2], [x13]\n"
- "ld1 { v15.h }[2], [x25]\n"
+ "tbz x13, #1, 57f\n"
+ "ldr s11, [x12], #0x4\n"
+ "ldr s15, [x24], #0x4\n"
+ "mov x19, #0x34\n"
+ "tbz x13, #0, 70f\n"
+ "ld1 { v11.h }[2], [x12]\n"
+ "ld1 { v15.h }[2], [x24]\n"
"b 70f\n"
"57:" // Height 2: Partial accumulate: partial_1_24
- "mov x20, #0x30\n"
- "tbz x14, #0, 70f\n"
- "ldr h11, [x13, #0x0]\n"
- "ldr h15, [x25, #0x0]\n"
+ "mov x19, #0x30\n"
+ "tbz x13, #0, 70f\n"
+ "ldr h11, [x12, #0x0]\n"
+ "ldr h15, [x24, #0x0]\n"
"b 70f\n"
"58:" // Height 2: Partial accumulate: partial_4_16
- "tbz x14, #2, 60f\n"
- "ldr d10, [x13], #0x8\n"
- "ldr d14, [x25], #0x8\n"
- "tbz x14, #1, 59f\n"
- "ld1 { v10.s }[2], [x13], #0x4\n"
- "ld1 { v14.s }[2], [x25], #0x4\n"
- "mov x20, #0x2c\n"
- "tbz x14, #0, 70f\n"
- "ld1 { v10.h }[6], [x13]\n"
- "ld1 { v14.h }[6], [x25]\n"
+ "tbz x13, #2, 60f\n"
+ "ldr d10, [x12], #0x8\n"
+ "ldr d14, [x24], #0x8\n"
+ "tbz x13, #1, 59f\n"
+ "ld1 { v10.s }[2], [x12], #0x4\n"
+ "ld1 { v14.s }[2], [x24], #0x4\n"
+ "mov x19, #0x2c\n"
+ "tbz x13, #0, 70f\n"
+ "ld1 { v10.h }[6], [x12]\n"
+ "ld1 { v14.h }[6], [x24]\n"
"b 70f\n"
"59:" // Height 2: Partial accumulate: partial_1_20
- "mov x20, #0x28\n"
- "tbz x14, #0, 70f\n"
- "ld1 { v10.h }[4], [x13]\n"
- "ld1 { v14.h }[4], [x25]\n"
+ "mov x19, #0x28\n"
+ "tbz x13, #0, 70f\n"
+ "ld1 { v10.h }[4], [x12]\n"
+ "ld1 { v14.h }[4], [x24]\n"
"b 70f\n"
"60:" // Height 2: Partial accumulate: partial_2_16
- "tbz x14, #1, 61f\n"
- "ldr s10, [x13], #0x4\n"
- "ldr s14, [x25], #0x4\n"
- "mov x20, #0x24\n"
- "tbz x14, #0, 70f\n"
- "ld1 { v10.h }[2], [x13]\n"
- "ld1 { v14.h }[2], [x25]\n"
+ "tbz x13, #1, 61f\n"
+ "ldr s10, [x12], #0x4\n"
+ "ldr s14, [x24], #0x4\n"
+ "mov x19, #0x24\n"
+ "tbz x13, #0, 70f\n"
+ "ld1 { v10.h }[2], [x12]\n"
+ "ld1 { v14.h }[2], [x24]\n"
"b 70f\n"
"61:" // Height 2: Partial accumulate: partial_1_16
- "mov x20, #0x20\n"
- "tbz x14, #0, 70f\n"
- "ldr h10, [x13, #0x0]\n"
- "ldr h14, [x25, #0x0]\n"
+ "mov x19, #0x20\n"
+ "tbz x13, #0, 70f\n"
+ "ldr h10, [x12, #0x0]\n"
+ "ldr h14, [x24, #0x0]\n"
"b 70f\n"
"62:" // Height 2: Partial accumulate: partial_8_0
- "tbz x14, #3, 66f\n"
- "ld1 { v8.8h }, [x13], #0x10\n"
- "ld1 { v12.8h }, [x25], #0x10\n"
- "tbz x14, #2, 64f\n"
- "ldr d9, [x13], #0x8\n"
- "ldr d13, [x25], #0x8\n"
- "tbz x14, #1, 63f\n"
- "ld1 { v9.s }[2], [x13], #0x4\n"
- "ld1 { v13.s }[2], [x25], #0x4\n"
- "mov x20, #0x1c\n"
- "tbz x14, #0, 70f\n"
- "ld1 { v9.h }[6], [x13]\n"
- "ld1 { v13.h }[6], [x25]\n"
+ "tbz x13, #3, 66f\n"
+ "ld1 { v8.8h }, [x12], #0x10\n"
+ "ld1 { v12.8h }, [x24], #0x10\n"
+ "tbz x13, #2, 64f\n"
+ "ldr d9, [x12], #0x8\n"
+ "ldr d13, [x24], #0x8\n"
+ "tbz x13, #1, 63f\n"
+ "ld1 { v9.s }[2], [x12], #0x4\n"
+ "ld1 { v13.s }[2], [x24], #0x4\n"
+ "mov x19, #0x1c\n"
+ "tbz x13, #0, 70f\n"
+ "ld1 { v9.h }[6], [x12]\n"
+ "ld1 { v13.h }[6], [x24]\n"
"b 70f\n"
"63:" // Height 2: Partial accumulate: partial_1_12
- "mov x20, #0x18\n"
- "tbz x14, #0, 70f\n"
- "ld1 { v9.h }[4], [x13]\n"
- "ld1 { v13.h }[4], [x25]\n"
+ "mov x19, #0x18\n"
+ "tbz x13, #0, 70f\n"
+ "ld1 { v9.h }[4], [x12]\n"
+ "ld1 { v13.h }[4], [x24]\n"
"b 70f\n"
"64:" // Height 2: Partial accumulate: partial_2_8
- "tbz x14, #1, 65f\n"
- "ldr s9, [x13], #0x4\n"
- "ldr s13, [x25], #0x4\n"
- "mov x20, #0x14\n"
- "tbz x14, #0, 70f\n"
- "ld1 { v9.h }[2], [x13]\n"
- "ld1 { v13.h }[2], [x25]\n"
+ "tbz x13, #1, 65f\n"
+ "ldr s9, [x12], #0x4\n"
+ "ldr s13, [x24], #0x4\n"
+ "mov x19, #0x14\n"
+ "tbz x13, #0, 70f\n"
+ "ld1 { v9.h }[2], [x12]\n"
+ "ld1 { v13.h }[2], [x24]\n"
"b 70f\n"
"65:" // Height 2: Partial accumulate: partial_1_8
- "mov x20, #0x10\n"
- "tbz x14, #0, 70f\n"
- "ldr h9, [x13, #0x0]\n"
- "ldr h13, [x25, #0x0]\n"
+ "mov x19, #0x10\n"
+ "tbz x13, #0, 70f\n"
+ "ldr h9, [x12, #0x0]\n"
+ "ldr h13, [x24, #0x0]\n"
"b 70f\n"
"66:" // Height 2: Partial accumulate: partial_4_0
- "tbz x14, #2, 68f\n"
- "ldr d8, [x13], #0x8\n"
- "ldr d12, [x25], #0x8\n"
- "tbz x14, #1, 67f\n"
- "ld1 { v8.s }[2], [x13], #0x4\n"
- "ld1 { v12.s }[2], [x25], #0x4\n"
- "mov x20, #0xc\n"
- "tbz x14, #0, 70f\n"
- "ld1 { v8.h }[6], [x13]\n"
- "ld1 { v12.h }[6], [x25]\n"
+ "tbz x13, #2, 68f\n"
+ "ldr d8, [x12], #0x8\n"
+ "ldr d12, [x24], #0x8\n"
+ "tbz x13, #1, 67f\n"
+ "ld1 { v8.s }[2], [x12], #0x4\n"
+ "ld1 { v12.s }[2], [x24], #0x4\n"
+ "mov x19, #0xc\n"
+ "tbz x13, #0, 70f\n"
+ "ld1 { v8.h }[6], [x12]\n"
+ "ld1 { v12.h }[6], [x24]\n"
"b 70f\n"
"67:" // Height 2: Partial accumulate: partial_1_4
- "mov x20, #0x8\n"
- "tbz x14, #0, 70f\n"
- "ld1 { v8.h }[4], [x13]\n"
- "ld1 { v12.h }[4], [x25]\n"
+ "mov x19, #0x8\n"
+ "tbz x13, #0, 70f\n"
+ "ld1 { v8.h }[4], [x12]\n"
+ "ld1 { v12.h }[4], [x24]\n"
"b 70f\n"
"68:" // Height 2: Partial accumulate: partial_2_0
- "tbz x14, #1, 69f\n"
- "ldr s8, [x13], #0x4\n"
- "ldr s12, [x25], #0x4\n"
- "mov x20, #0x4\n"
- "tbz x14, #0, 70f\n"
- "ld1 { v8.h }[2], [x13]\n"
- "ld1 { v12.h }[2], [x25]\n"
+ "tbz x13, #1, 69f\n"
+ "ldr s8, [x12], #0x4\n"
+ "ldr s12, [x24], #0x4\n"
+ "mov x19, #0x4\n"
+ "tbz x13, #0, 70f\n"
+ "ld1 { v8.h }[2], [x12]\n"
+ "ld1 { v12.h }[2], [x24]\n"
"b 70f\n"
"69:" // Height 2: Partial accumulate: partial_1_0
- "ldr h8, [x13, #0x0]\n"
- "ldr h12, [x25, #0x0]\n"
- "mov x20, #0x0\n"
+ "ldr h8, [x12, #0x0]\n"
+ "ldr h12, [x24, #0x0]\n"
+ "mov x19, #0x0\n"
"70:" // Height 2: Partial accumulate: Done
- "sub x13, x13, x20\n"
+ "sub x12, x12, x19\n"
"b 73f\n"
"71:" // Height 2: full accumulate
- "ldr q8, [x13, #0x0]\n"
- "ldr q9, [x13, #0x10]\n"
- "ldr q10, [x13, #0x20]\n"
- "ldr q11, [x13, #0x30]\n"
- "ldr q12, [x25, #0x0]\n"
- "ldr q13, [x25, #0x10]\n"
- "ldr q14, [x25, #0x20]\n"
- "ldr q15, [x25, #0x30]\n"
+ "ldr q8, [x12, #0x0]\n"
+ "ldr q9, [x12, #0x10]\n"
+ "ldr q10, [x12, #0x20]\n"
+ "ldr q11, [x12, #0x30]\n"
+ "ldr q12, [x24, #0x0]\n"
+ "ldr q13, [x24, #0x10]\n"
+ "ldr q14, [x24, #0x20]\n"
+ "ldr q15, [x24, #0x30]\n"
"b 73f\n"
"72:" // Height 2: no accumulate
"movi v8.16b, #0x0\n"
@@ -774,278 +774,278 @@ void a64_ffhybrid_fp16_mla_6x32 (
"movi v14.16b, #0x0\n"
"movi v15.16b, #0x0\n"
"73:" // Height 2: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"74:" // Height 2: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 75f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "cbnz x28, 76f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #1\n"
- "add x25, x25, x20, LSL #1\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "cbnz x27, 76f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
+ "add x24, x24, x19, LSL #1\n"
"b 76f\n"
"75:" // Height 2: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #1\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #1\n"
"76:" // Height 2: input setup done
- "cmp x27, #0x8\n"
+ "cmp x26, #0x8\n"
"blt 79f\n"
- "ldr q0, [x26, #0x0]\n"
- "ldr q1, [x25, #0x0]\n"
- "cmp x27, #0x10\n"
- "ldr q6, [x12, #0x0]\n"
- "ldr q7, [x11, #0x0]\n"
+ "ldr q0, [x25, #0x0]\n"
+ "ldr q1, [x24, #0x0]\n"
+ "cmp x26, #0x10\n"
+ "ldr q6, [x11, #0x0]\n"
+ "ldr q7, [x10, #0x0]\n"
"blt 78f\n"
"77:" // Height 2: Multiply loop: Main loop head
"fmla v8.8h, v6.8h, v0.h[0]\n"
"fmla v12.8h, v6.8h, v1.h[0]\n"
- "ldr q6, [x10, #0x0]\n"
- "sub x27, x27, #0x8\n"
+ "ldr q6, [x9, #0x0]\n"
+ "sub x26, x26, #0x8\n"
"fmla v9.8h, v7.8h, v0.h[0]\n"
"fmla v13.8h, v7.8h, v1.h[0]\n"
- "ldr q7, [x9, #0x0]\n"
- "cmp x27, #0x10\n"
+ "ldr q7, [x28, #0x0]\n"
+ "cmp x26, #0x10\n"
"fmla v10.8h, v6.8h, v0.h[0]\n"
"fmla v14.8h, v6.8h, v1.h[0]\n"
- "ldr q6, [x12, #0x10]\n"
- "add x26, x26, #0x10\n"
+ "ldr q6, [x11, #0x10]\n"
+ "add x25, x25, #0x10\n"
"fmla v11.8h, v7.8h, v0.h[0]\n"
"fmla v15.8h, v7.8h, v1.h[0]\n"
- "ldr q7, [x11, #0x10]\n"
- "add x25, x25, #0x10\n"
+ "ldr q7, [x10, #0x10]\n"
+ "add x24, x24, #0x10\n"
"fmla v8.8h, v6.8h, v0.h[1]\n"
"fmla v12.8h, v6.8h, v1.h[1]\n"
- "ldr q6, [x10, #0x10]\n"
+ "ldr q6, [x9, #0x10]\n"
"fmla v9.8h, v7.8h, v0.h[1]\n"
"fmla v13.8h, v7.8h, v1.h[1]\n"
- "ldr q7, [x9, #0x10]\n"
+ "ldr q7, [x28, #0x10]\n"
"fmla v10.8h, v6.8h, v0.h[1]\n"
"fmla v14.8h, v6.8h, v1.h[1]\n"
- "ldr q6, [x12, #0x20]\n"
+ "ldr q6, [x11, #0x20]\n"
"fmla v11.8h, v7.8h, v0.h[1]\n"
"fmla v15.8h, v7.8h, v1.h[1]\n"
- "ldr q7, [x11, #0x20]\n"
+ "ldr q7, [x10, #0x20]\n"
"fmla v8.8h, v6.8h, v0.h[2]\n"
"fmla v12.8h, v6.8h, v1.h[2]\n"
- "ldr q6, [x10, #0x20]\n"
+ "ldr q6, [x9, #0x20]\n"
"fmla v9.8h, v7.8h, v0.h[2]\n"
"fmla v13.8h, v7.8h, v1.h[2]\n"
- "ldr q7, [x9, #0x20]\n"
+ "ldr q7, [x28, #0x20]\n"
"fmla v10.8h, v6.8h, v0.h[2]\n"
"fmla v14.8h, v6.8h, v1.h[2]\n"
- "ldr q6, [x12, #0x30]\n"
+ "ldr q6, [x11, #0x30]\n"
"fmla v11.8h, v7.8h, v0.h[2]\n"
"fmla v15.8h, v7.8h, v1.h[2]\n"
- "ldr q7, [x11, #0x30]\n"
+ "ldr q7, [x10, #0x30]\n"
"fmla v8.8h, v6.8h, v0.h[3]\n"
"fmla v12.8h, v6.8h, v1.h[3]\n"
- "ldr q6, [x10, #0x30]\n"
+ "ldr q6, [x9, #0x30]\n"
"fmla v9.8h, v7.8h, v0.h[3]\n"
"fmla v13.8h, v7.8h, v1.h[3]\n"
- "ldr q7, [x9, #0x30]\n"
+ "ldr q7, [x28, #0x30]\n"
"fmla v10.8h, v6.8h, v0.h[3]\n"
"fmla v14.8h, v6.8h, v1.h[3]\n"
- "ldr q6, [x12, #0x40]\n"
+ "ldr q6, [x11, #0x40]\n"
"fmla v11.8h, v7.8h, v0.h[3]\n"
"fmla v15.8h, v7.8h, v1.h[3]\n"
- "ldr q7, [x11, #0x40]\n"
+ "ldr q7, [x10, #0x40]\n"
"fmla v8.8h, v6.8h, v0.h[4]\n"
"fmla v12.8h, v6.8h, v1.h[4]\n"
- "ldr q6, [x10, #0x40]\n"
+ "ldr q6, [x9, #0x40]\n"
"fmla v9.8h, v7.8h, v0.h[4]\n"
"fmla v13.8h, v7.8h, v1.h[4]\n"
- "ldr q7, [x9, #0x40]\n"
+ "ldr q7, [x28, #0x40]\n"
"fmla v10.8h, v6.8h, v0.h[4]\n"
"fmla v14.8h, v6.8h, v1.h[4]\n"
- "ldr q6, [x12, #0x50]\n"
+ "ldr q6, [x11, #0x50]\n"
"fmla v11.8h, v7.8h, v0.h[4]\n"
"fmla v15.8h, v7.8h, v1.h[4]\n"
- "ldr q7, [x11, #0x50]\n"
+ "ldr q7, [x10, #0x50]\n"
"fmla v8.8h, v6.8h, v0.h[5]\n"
"fmla v12.8h, v6.8h, v1.h[5]\n"
- "ldr q6, [x10, #0x50]\n"
+ "ldr q6, [x9, #0x50]\n"
"fmla v9.8h, v7.8h, v0.h[5]\n"
"fmla v13.8h, v7.8h, v1.h[5]\n"
- "ldr q7, [x9, #0x50]\n"
+ "ldr q7, [x28, #0x50]\n"
"fmla v10.8h, v6.8h, v0.h[5]\n"
"fmla v14.8h, v6.8h, v1.h[5]\n"
- "ldr q6, [x12, #0x60]\n"
+ "ldr q6, [x11, #0x60]\n"
"fmla v11.8h, v7.8h, v0.h[5]\n"
"fmla v15.8h, v7.8h, v1.h[5]\n"
- "ldr q7, [x11, #0x60]\n"
+ "ldr q7, [x10, #0x60]\n"
"fmla v8.8h, v6.8h, v0.h[6]\n"
"fmla v12.8h, v6.8h, v1.h[6]\n"
- "ldr q6, [x10, #0x60]\n"
+ "ldr q6, [x9, #0x60]\n"
"fmla v9.8h, v7.8h, v0.h[6]\n"
"fmla v13.8h, v7.8h, v1.h[6]\n"
- "ldr q7, [x9, #0x60]\n"
+ "ldr q7, [x28, #0x60]\n"
"fmla v10.8h, v6.8h, v0.h[6]\n"
"fmla v14.8h, v6.8h, v1.h[6]\n"
- "ldr q6, [x12, #0x70]\n"
- "add x12, x12, #0x80\n"
+ "ldr q6, [x11, #0x70]\n"
+ "add x11, x11, #0x80\n"
"fmla v11.8h, v7.8h, v0.h[6]\n"
"fmla v15.8h, v7.8h, v1.h[6]\n"
- "ldr q7, [x11, #0x70]\n"
- "add x11, x11, #0x80\n"
+ "ldr q7, [x10, #0x70]\n"
+ "add x10, x10, #0x80\n"
"fmla v8.8h, v6.8h, v0.h[7]\n"
"fmla v12.8h, v6.8h, v1.h[7]\n"
- "ldr q6, [x10, #0x70]\n"
- "add x10, x10, #0x80\n"
+ "ldr q6, [x9, #0x70]\n"
+ "add x9, x9, #0x80\n"
"fmla v9.8h, v7.8h, v0.h[7]\n"
"fmla v13.8h, v7.8h, v1.h[7]\n"
- "ldr q7, [x9, #0x70]\n"
- "add x9, x9, #0x80\n"
+ "ldr q7, [x28, #0x70]\n"
+ "add x28, x28, #0x80\n"
"fmla v10.8h, v6.8h, v0.h[7]\n"
"fmla v14.8h, v6.8h, v1.h[7]\n"
- "ldr q6, [x12, #0x0]\n"
+ "ldr q6, [x11, #0x0]\n"
"fmla v11.8h, v7.8h, v0.h[7]\n"
- "ldr q0, [x26, #0x0]\n"
+ "ldr q0, [x25, #0x0]\n"
"fmla v15.8h, v7.8h, v1.h[7]\n"
- "ldr q1, [x25, #0x0]\n"
- "ldr q7, [x11, #0x0]\n"
+ "ldr q1, [x24, #0x0]\n"
+ "ldr q7, [x10, #0x0]\n"
"bge 77b\n"
"78:" // Height 2: Multiply loop: Single iteration only
"fmla v8.8h, v6.8h, v0.h[0]\n"
"fmla v12.8h, v6.8h, v1.h[0]\n"
- "ldr q6, [x10, #0x0]\n"
- "sub x27, x27, #0x8\n"
+ "ldr q6, [x9, #0x0]\n"
+ "sub x26, x26, #0x8\n"
"fmla v9.8h, v7.8h, v0.h[0]\n"
"fmla v13.8h, v7.8h, v1.h[0]\n"
- "ldr q7, [x9, #0x0]\n"
- "add x26, x26, #0x10\n"
+ "ldr q7, [x28, #0x0]\n"
+ "add x25, x25, #0x10\n"
"fmla v10.8h, v6.8h, v0.h[0]\n"
"fmla v14.8h, v6.8h, v1.h[0]\n"
- "ldr q6, [x12, #0x10]\n"
- "add x25, x25, #0x10\n"
+ "ldr q6, [x11, #0x10]\n"
+ "add x24, x24, #0x10\n"
"fmla v11.8h, v7.8h, v0.h[0]\n"
"fmla v15.8h, v7.8h, v1.h[0]\n"
- "ldr q7, [x11, #0x10]\n"
+ "ldr q7, [x10, #0x10]\n"
"fmla v8.8h, v6.8h, v0.h[1]\n"
"fmla v12.8h, v6.8h, v1.h[1]\n"
- "ldr q6, [x10, #0x10]\n"
+ "ldr q6, [x9, #0x10]\n"
"fmla v9.8h, v7.8h, v0.h[1]\n"
"fmla v13.8h, v7.8h, v1.h[1]\n"
- "ldr q7, [x9, #0x10]\n"
+ "ldr q7, [x28, #0x10]\n"
"fmla v10.8h, v6.8h, v0.h[1]\n"
"fmla v14.8h, v6.8h, v1.h[1]\n"
- "ldr q6, [x12, #0x20]\n"
+ "ldr q6, [x11, #0x20]\n"
"fmla v11.8h, v7.8h, v0.h[1]\n"
"fmla v15.8h, v7.8h, v1.h[1]\n"
- "ldr q7, [x11, #0x20]\n"
+ "ldr q7, [x10, #0x20]\n"
"fmla v8.8h, v6.8h, v0.h[2]\n"
"fmla v12.8h, v6.8h, v1.h[2]\n"
- "ldr q6, [x10, #0x20]\n"
+ "ldr q6, [x9, #0x20]\n"
"fmla v9.8h, v7.8h, v0.h[2]\n"
"fmla v13.8h, v7.8h, v1.h[2]\n"
- "ldr q7, [x9, #0x20]\n"
+ "ldr q7, [x28, #0x20]\n"
"fmla v10.8h, v6.8h, v0.h[2]\n"
"fmla v14.8h, v6.8h, v1.h[2]\n"
- "ldr q6, [x12, #0x30]\n"
+ "ldr q6, [x11, #0x30]\n"
"fmla v11.8h, v7.8h, v0.h[2]\n"
"fmla v15.8h, v7.8h, v1.h[2]\n"
- "ldr q7, [x11, #0x30]\n"
+ "ldr q7, [x10, #0x30]\n"
"fmla v8.8h, v6.8h, v0.h[3]\n"
"fmla v12.8h, v6.8h, v1.h[3]\n"
- "ldr q6, [x10, #0x30]\n"
+ "ldr q6, [x9, #0x30]\n"
"fmla v9.8h, v7.8h, v0.h[3]\n"
"fmla v13.8h, v7.8h, v1.h[3]\n"
- "ldr q7, [x9, #0x30]\n"
+ "ldr q7, [x28, #0x30]\n"
"fmla v10.8h, v6.8h, v0.h[3]\n"
"fmla v14.8h, v6.8h, v1.h[3]\n"
- "ldr q6, [x12, #0x40]\n"
+ "ldr q6, [x11, #0x40]\n"
"fmla v11.8h, v7.8h, v0.h[3]\n"
"fmla v15.8h, v7.8h, v1.h[3]\n"
- "ldr q7, [x11, #0x40]\n"
+ "ldr q7, [x10, #0x40]\n"
"fmla v8.8h, v6.8h, v0.h[4]\n"
"fmla v12.8h, v6.8h, v1.h[4]\n"
- "ldr q6, [x10, #0x40]\n"
+ "ldr q6, [x9, #0x40]\n"
"fmla v9.8h, v7.8h, v0.h[4]\n"
"fmla v13.8h, v7.8h, v1.h[4]\n"
- "ldr q7, [x9, #0x40]\n"
+ "ldr q7, [x28, #0x40]\n"
"fmla v10.8h, v6.8h, v0.h[4]\n"
"fmla v14.8h, v6.8h, v1.h[4]\n"
- "ldr q6, [x12, #0x50]\n"
+ "ldr q6, [x11, #0x50]\n"
"fmla v11.8h, v7.8h, v0.h[4]\n"
"fmla v15.8h, v7.8h, v1.h[4]\n"
- "ldr q7, [x11, #0x50]\n"
+ "ldr q7, [x10, #0x50]\n"
"fmla v8.8h, v6.8h, v0.h[5]\n"
"fmla v12.8h, v6.8h, v1.h[5]\n"
- "ldr q6, [x10, #0x50]\n"
+ "ldr q6, [x9, #0x50]\n"
"fmla v9.8h, v7.8h, v0.h[5]\n"
"fmla v13.8h, v7.8h, v1.h[5]\n"
- "ldr q7, [x9, #0x50]\n"
+ "ldr q7, [x28, #0x50]\n"
"fmla v10.8h, v6.8h, v0.h[5]\n"
"fmla v14.8h, v6.8h, v1.h[5]\n"
- "ldr q6, [x12, #0x60]\n"
+ "ldr q6, [x11, #0x60]\n"
"fmla v11.8h, v7.8h, v0.h[5]\n"
"fmla v15.8h, v7.8h, v1.h[5]\n"
- "ldr q7, [x11, #0x60]\n"
+ "ldr q7, [x10, #0x60]\n"
"fmla v8.8h, v6.8h, v0.h[6]\n"
"fmla v12.8h, v6.8h, v1.h[6]\n"
- "ldr q6, [x10, #0x60]\n"
+ "ldr q6, [x9, #0x60]\n"
"fmla v9.8h, v7.8h, v0.h[6]\n"
"fmla v13.8h, v7.8h, v1.h[6]\n"
- "ldr q7, [x9, #0x60]\n"
+ "ldr q7, [x28, #0x60]\n"
"fmla v10.8h, v6.8h, v0.h[6]\n"
"fmla v14.8h, v6.8h, v1.h[6]\n"
- "ldr q6, [x12, #0x70]\n"
- "add x12, x12, #0x80\n"
+ "ldr q6, [x11, #0x70]\n"
+ "add x11, x11, #0x80\n"
"fmla v11.8h, v7.8h, v0.h[6]\n"
"fmla v15.8h, v7.8h, v1.h[6]\n"
- "ldr q7, [x11, #0x70]\n"
- "add x11, x11, #0x80\n"
+ "ldr q7, [x10, #0x70]\n"
+ "add x10, x10, #0x80\n"
"fmla v8.8h, v6.8h, v0.h[7]\n"
"fmla v12.8h, v6.8h, v1.h[7]\n"
- "ldr q6, [x10, #0x70]\n"
- "add x10, x10, #0x80\n"
+ "ldr q6, [x9, #0x70]\n"
+ "add x9, x9, #0x80\n"
"fmla v9.8h, v7.8h, v0.h[7]\n"
"fmla v13.8h, v7.8h, v1.h[7]\n"
- "ldr q7, [x9, #0x70]\n"
- "add x9, x9, #0x80\n"
+ "ldr q7, [x28, #0x70]\n"
+ "add x28, x28, #0x80\n"
"fmla v10.8h, v6.8h, v0.h[7]\n"
"fmla v14.8h, v6.8h, v1.h[7]\n"
"fmla v11.8h, v7.8h, v0.h[7]\n"
"fmla v15.8h, v7.8h, v1.h[7]\n"
"79:" // Height 2: Multiply loop: Main loop skip
- "cbz x27, 81f\n"
+ "cbz x26, 81f\n"
"80:" // Height 2: Multiply loop: Odd block loop
- "ldr h0, [x26], #0x2\n"
- "ldr h1, [x25], #0x2\n"
- "sub x27, x27, #0x1\n"
- "ldr q6, [x12, #0x0]\n"
- "ldr q7, [x11, #0x0]\n"
+ "ldr h0, [x25], #0x2\n"
+ "ldr h1, [x24], #0x2\n"
+ "sub x26, x26, #0x1\n"
+ "ldr q6, [x11, #0x0]\n"
+ "ldr q7, [x10, #0x0]\n"
"fmla v8.8h, v6.8h, v0.h[0]\n"
"fmla v12.8h, v6.8h, v1.h[0]\n"
- "ldr q6, [x10, #0x0]\n"
+ "ldr q6, [x9, #0x0]\n"
"fmla v9.8h, v7.8h, v0.h[0]\n"
"fmla v13.8h, v7.8h, v1.h[0]\n"
- "ldr q7, [x9, #0x0]\n"
+ "ldr q7, [x28, #0x0]\n"
"fmla v10.8h, v6.8h, v0.h[0]\n"
"fmla v14.8h, v6.8h, v1.h[0]\n"
- "add x12, x12, #0x10\n"
"add x11, x11, #0x10\n"
+ "add x10, x10, #0x10\n"
"fmla v11.8h, v7.8h, v0.h[0]\n"
"fmla v15.8h, v7.8h, v1.h[0]\n"
- "add x10, x10, #0x10\n"
"add x9, x9, #0x10\n"
- "cbnz x27, 80b\n"
+ "add x28, x28, #0x10\n"
+ "cbnz x26, 80b\n"
"81:" // Height 2: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 74b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #1\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #1\n"
"tbz %x[flags], #1, 82f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v1.8h }, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1r { v0.8h }, [x20]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v1.8h }, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v0.8h }, [x19]\n"
"fmin v8.8h, v8.8h, v1.8h\n"
"fmin v9.8h, v9.8h, v1.8h\n"
"fmin v10.8h, v10.8h, v1.8h\n"
@@ -1063,382 +1063,382 @@ void a64_ffhybrid_fp16_mla_6x32 (
"fmax v14.8h, v14.8h, v0.8h\n"
"fmax v15.8h, v15.8h, v0.8h\n"
"82:" // Height 2: No activation
- "cmp x14, #0x20\n"
+ "cmp x13, #0x20\n"
"bge 99f\n"
- "tbz x14, #4, 90f\n"
- "st1 { v8.8h }, [x13], #0x10\n"
- "st1 { v9.8h }, [x13], #0x10\n"
- "st1 { v12.8h }, [x25], #0x10\n"
- "st1 { v13.8h }, [x25], #0x10\n"
- "tbz x14, #3, 86f\n"
- "st1 { v10.8h }, [x13], #0x10\n"
- "st1 { v14.8h }, [x25], #0x10\n"
- "tbz x14, #2, 84f\n"
- "str d11, [x13], #0x8\n"
- "str d15, [x25], #0x8\n"
- "tbz x14, #1, 83f\n"
- "st1 { v11.s }[2], [x13], #0x4\n"
- "st1 { v15.s }[2], [x25], #0x4\n"
- "tbz x14, #0, 98f\n"
- "st1 { v11.h }[6], [x13]\n"
- "st1 { v15.h }[6], [x25]\n"
+ "tbz x13, #4, 90f\n"
+ "st1 { v8.8h }, [x12], #0x10\n"
+ "st1 { v9.8h }, [x12], #0x10\n"
+ "st1 { v12.8h }, [x24], #0x10\n"
+ "st1 { v13.8h }, [x24], #0x10\n"
+ "tbz x13, #3, 86f\n"
+ "st1 { v10.8h }, [x12], #0x10\n"
+ "st1 { v14.8h }, [x24], #0x10\n"
+ "tbz x13, #2, 84f\n"
+ "str d11, [x12], #0x8\n"
+ "str d15, [x24], #0x8\n"
+ "tbz x13, #1, 83f\n"
+ "st1 { v11.s }[2], [x12], #0x4\n"
+ "st1 { v15.s }[2], [x24], #0x4\n"
+ "tbz x13, #0, 98f\n"
+ "st1 { v11.h }[6], [x12]\n"
+ "st1 { v15.h }[6], [x24]\n"
"b 98f\n"
"83:" // Height 2: Partial direct writeback: partial_1_28
- "tbz x14, #0, 98f\n"
- "st1 { v11.h }[4], [x13]\n"
- "st1 { v15.h }[4], [x25]\n"
+ "tbz x13, #0, 98f\n"
+ "st1 { v11.h }[4], [x12]\n"
+ "st1 { v15.h }[4], [x24]\n"
"b 98f\n"
"84:" // Height 2: Partial direct writeback: partial_2_24
- "tbz x14, #1, 85f\n"
- "str s11, [x13], #0x4\n"
- "str s15, [x25], #0x4\n"
- "tbz x14, #0, 98f\n"
- "st1 { v11.h }[2], [x13]\n"
- "st1 { v15.h }[2], [x25]\n"
+ "tbz x13, #1, 85f\n"
+ "str s11, [x12], #0x4\n"
+ "str s15, [x24], #0x4\n"
+ "tbz x13, #0, 98f\n"
+ "st1 { v11.h }[2], [x12]\n"
+ "st1 { v15.h }[2], [x24]\n"
"b 98f\n"
"85:" // Height 2: Partial direct writeback: partial_1_24
- "tbz x14, #0, 98f\n"
- "str h11, [x13, #0x0]\n"
- "str h15, [x25, #0x0]\n"
+ "tbz x13, #0, 98f\n"
+ "str h11, [x12, #0x0]\n"
+ "str h15, [x24, #0x0]\n"
"b 98f\n"
"86:" // Height 2: Partial direct writeback: partial_4_16
- "tbz x14, #2, 88f\n"
- "str d10, [x13], #0x8\n"
- "str d14, [x25], #0x8\n"
- "tbz x14, #1, 87f\n"
- "st1 { v10.s }[2], [x13], #0x4\n"
- "st1 { v14.s }[2], [x25], #0x4\n"
- "tbz x14, #0, 98f\n"
- "st1 { v10.h }[6], [x13]\n"
- "st1 { v14.h }[6], [x25]\n"
+ "tbz x13, #2, 88f\n"
+ "str d10, [x12], #0x8\n"
+ "str d14, [x24], #0x8\n"
+ "tbz x13, #1, 87f\n"
+ "st1 { v10.s }[2], [x12], #0x4\n"
+ "st1 { v14.s }[2], [x24], #0x4\n"
+ "tbz x13, #0, 98f\n"
+ "st1 { v10.h }[6], [x12]\n"
+ "st1 { v14.h }[6], [x24]\n"
"b 98f\n"
"87:" // Height 2: Partial direct writeback: partial_1_20
- "tbz x14, #0, 98f\n"
- "st1 { v10.h }[4], [x13]\n"
- "st1 { v14.h }[4], [x25]\n"
+ "tbz x13, #0, 98f\n"
+ "st1 { v10.h }[4], [x12]\n"
+ "st1 { v14.h }[4], [x24]\n"
"b 98f\n"
"88:" // Height 2: Partial direct writeback: partial_2_16
- "tbz x14, #1, 89f\n"
- "str s10, [x13], #0x4\n"
- "str s14, [x25], #0x4\n"
- "tbz x14, #0, 98f\n"
- "st1 { v10.h }[2], [x13]\n"
- "st1 { v14.h }[2], [x25]\n"
+ "tbz x13, #1, 89f\n"
+ "str s10, [x12], #0x4\n"
+ "str s14, [x24], #0x4\n"
+ "tbz x13, #0, 98f\n"
+ "st1 { v10.h }[2], [x12]\n"
+ "st1 { v14.h }[2], [x24]\n"
"b 98f\n"
"89:" // Height 2: Partial direct writeback: partial_1_16
- "tbz x14, #0, 98f\n"
- "str h10, [x13, #0x0]\n"
- "str h14, [x25, #0x0]\n"
+ "tbz x13, #0, 98f\n"
+ "str h10, [x12, #0x0]\n"
+ "str h14, [x24, #0x0]\n"
"b 98f\n"
"90:" // Height 2: Partial direct writeback: partial_8_0
- "tbz x14, #3, 94f\n"
- "st1 { v8.8h }, [x13], #0x10\n"
- "st1 { v12.8h }, [x25], #0x10\n"
- "tbz x14, #2, 92f\n"
- "str d9, [x13], #0x8\n"
- "str d13, [x25], #0x8\n"
- "tbz x14, #1, 91f\n"
- "st1 { v9.s }[2], [x13], #0x4\n"
- "st1 { v13.s }[2], [x25], #0x4\n"
- "tbz x14, #0, 98f\n"
- "st1 { v9.h }[6], [x13]\n"
- "st1 { v13.h }[6], [x25]\n"
+ "tbz x13, #3, 94f\n"
+ "st1 { v8.8h }, [x12], #0x10\n"
+ "st1 { v12.8h }, [x24], #0x10\n"
+ "tbz x13, #2, 92f\n"
+ "str d9, [x12], #0x8\n"
+ "str d13, [x24], #0x8\n"
+ "tbz x13, #1, 91f\n"
+ "st1 { v9.s }[2], [x12], #0x4\n"
+ "st1 { v13.s }[2], [x24], #0x4\n"
+ "tbz x13, #0, 98f\n"
+ "st1 { v9.h }[6], [x12]\n"
+ "st1 { v13.h }[6], [x24]\n"
"b 98f\n"
"91:" // Height 2: Partial direct writeback: partial_1_12
- "tbz x14, #0, 98f\n"
- "st1 { v9.h }[4], [x13]\n"
- "st1 { v13.h }[4], [x25]\n"
+ "tbz x13, #0, 98f\n"
+ "st1 { v9.h }[4], [x12]\n"
+ "st1 { v13.h }[4], [x24]\n"
"b 98f\n"
"92:" // Height 2: Partial direct writeback: partial_2_8
- "tbz x14, #1, 93f\n"
- "str s9, [x13], #0x4\n"
- "str s13, [x25], #0x4\n"
- "tbz x14, #0, 98f\n"
- "st1 { v9.h }[2], [x13]\n"
- "st1 { v13.h }[2], [x25]\n"
+ "tbz x13, #1, 93f\n"
+ "str s9, [x12], #0x4\n"
+ "str s13, [x24], #0x4\n"
+ "tbz x13, #0, 98f\n"
+ "st1 { v9.h }[2], [x12]\n"
+ "st1 { v13.h }[2], [x24]\n"
"b 98f\n"
"93:" // Height 2: Partial direct writeback: partial_1_8
- "tbz x14, #0, 98f\n"
- "str h9, [x13, #0x0]\n"
- "str h13, [x25, #0x0]\n"
+ "tbz x13, #0, 98f\n"
+ "str h9, [x12, #0x0]\n"
+ "str h13, [x24, #0x0]\n"
"b 98f\n"
"94:" // Height 2: Partial direct writeback: partial_4_0
- "tbz x14, #2, 96f\n"
- "str d8, [x13], #0x8\n"
- "str d12, [x25], #0x8\n"
- "tbz x14, #1, 95f\n"
- "st1 { v8.s }[2], [x13], #0x4\n"
- "st1 { v12.s }[2], [x25], #0x4\n"
- "tbz x14, #0, 98f\n"
- "st1 { v8.h }[6], [x13]\n"
- "st1 { v12.h }[6], [x25]\n"
+ "tbz x13, #2, 96f\n"
+ "str d8, [x12], #0x8\n"
+ "str d12, [x24], #0x8\n"
+ "tbz x13, #1, 95f\n"
+ "st1 { v8.s }[2], [x12], #0x4\n"
+ "st1 { v12.s }[2], [x24], #0x4\n"
+ "tbz x13, #0, 98f\n"
+ "st1 { v8.h }[6], [x12]\n"
+ "st1 { v12.h }[6], [x24]\n"
"b 98f\n"
"95:" // Height 2: Partial direct writeback: partial_1_4
- "tbz x14, #0, 98f\n"
- "st1 { v8.h }[4], [x13]\n"
- "st1 { v12.h }[4], [x25]\n"
+ "tbz x13, #0, 98f\n"
+ "st1 { v8.h }[4], [x12]\n"
+ "st1 { v12.h }[4], [x24]\n"
"b 98f\n"
"96:" // Height 2: Partial direct writeback: partial_2_0
- "tbz x14, #1, 97f\n"
- "str s8, [x13], #0x4\n"
- "str s12, [x25], #0x4\n"
- "tbz x14, #0, 98f\n"
- "st1 { v8.h }[2], [x13]\n"
- "st1 { v12.h }[2], [x25]\n"
+ "tbz x13, #1, 97f\n"
+ "str s8, [x12], #0x4\n"
+ "str s12, [x24], #0x4\n"
+ "tbz x13, #0, 98f\n"
+ "st1 { v8.h }[2], [x12]\n"
+ "st1 { v12.h }[2], [x24]\n"
"b 98f\n"
"97:" // Height 2: Partial direct writeback: partial_1_0
- "str h8, [x13, #0x0]\n"
- "str h12, [x25, #0x0]\n"
+ "str h8, [x12, #0x0]\n"
+ "str h12, [x24, #0x0]\n"
"98:" // Height 2: Partial direct writeback: Done
"b 100f\n"
"99:" // Height 2: Full writeback
- "str q8, [x13, #0x0]\n"
- "str q9, [x13, #0x10]\n"
- "str q10, [x13, #0x20]\n"
- "str q11, [x13, #0x30]\n"
- "add x13, x13, #0x40\n"
- "str q12, [x25, #0x0]\n"
- "str q13, [x25, #0x10]\n"
- "str q14, [x25, #0x20]\n"
- "str q15, [x25, #0x30]\n"
+ "str q8, [x12, #0x0]\n"
+ "str q9, [x12, #0x10]\n"
+ "str q10, [x12, #0x20]\n"
+ "str q11, [x12, #0x30]\n"
+ "add x12, x12, #0x40\n"
+ "str q12, [x24, #0x0]\n"
+ "str q13, [x24, #0x10]\n"
+ "str q14, [x24, #0x20]\n"
+ "str q15, [x24, #0x30]\n"
"100:" // Height 2: Writeback done
- "subs x14, x14, #0x20\n"
+ "subs x13, x13, #0x20\n"
"bgt 52b\n"
"b 302f\n"
"101:" // Height 3
- "ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x15, %x[bias]\n"
- "ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x13, %x[output_ptr]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x14, %x[bias]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x12, %x[output_ptr]\n"
"102:" // Height 3: Column loop
- "ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
- "add x11, x12, x20, LSL #1\n"
- "add x10, x11, x20, LSL #1\n"
- "add x9, x10, x20, LSL #1\n"
- "add x20, x9, x20, LSL #1\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "cmp x14, #0x18\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #1\n"
+ "add x9, x10, x19, LSL #1\n"
+ "add x28, x9, x19, LSL #1\n"
+ "add x19, x28, x19, LSL #1\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "cmp x13, #0x18\n"
"bgt 103f\n"
- "cmp x14, #0x10\n"
- "mov x9, x12\n"
+ "cmp x13, #0x10\n"
+ "mov x28, x11\n"
"bgt 103f\n"
- "cmp x14, #0x8\n"
- "mov x10, x12\n"
+ "cmp x13, #0x8\n"
+ "mov x9, x11\n"
"bgt 103f\n"
- "mov x11, x12\n"
+ "mov x10, x11\n"
"103:" // Height 3: B setup done
- "cbz x15, 104f\n"
- "ldr q8, [x15, #0x0]\n"
- "ldr q9, [x15, #0x10]\n"
+ "cbz x14, 104f\n"
+ "ldr q8, [x14, #0x0]\n"
+ "ldr q9, [x14, #0x10]\n"
"mov v12.16b, v8.16b\n"
"mov v13.16b, v9.16b\n"
- "ldr q10, [x15, #0x20]\n"
- "ldr q11, [x15, #0x30]\n"
+ "ldr q10, [x14, #0x20]\n"
+ "ldr q11, [x14, #0x30]\n"
"mov v14.16b, v10.16b\n"
"mov v15.16b, v11.16b\n"
"mov v16.16b, v8.16b\n"
"mov v17.16b, v9.16b\n"
- "add x15, x15, #0x40\n"
+ "add x14, x14, #0x40\n"
"mov v18.16b, v10.16b\n"
"mov v19.16b, v11.16b\n"
"b 123f\n"
"104:" // Height 3: no bias
"tbz %x[flags], #0, 122f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #1\n"
- "cmp x14, #0x20\n"
- "add x24, x25, x20, LSL #1\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #1\n"
+ "cmp x13, #0x20\n"
+ "add x23, x24, x19, LSL #1\n"
"bge 121f\n"
- "tbz x14, #4, 112f\n"
- "ld1 { v8.8h }, [x13], #0x10\n"
- "ld1 { v12.8h }, [x25], #0x10\n"
- "ld1 { v16.8h }, [x24], #0x10\n"
- "ld1 { v9.8h }, [x13], #0x10\n"
- "ld1 { v13.8h }, [x25], #0x10\n"
- "ld1 { v17.8h }, [x24], #0x10\n"
- "tbz x14, #3, 108f\n"
- "ld1 { v10.8h }, [x13], #0x10\n"
- "ld1 { v14.8h }, [x25], #0x10\n"
- "ld1 { v18.8h }, [x24], #0x10\n"
- "tbz x14, #2, 106f\n"
- "ldr d11, [x13], #0x8\n"
- "ldr d15, [x25], #0x8\n"
- "ldr d19, [x24], #0x8\n"
- "tbz x14, #1, 105f\n"
- "ld1 { v11.s }[2], [x13], #0x4\n"
- "ld1 { v15.s }[2], [x25], #0x4\n"
- "mov x20, #0x3c\n"
- "ld1 { v19.s }[2], [x24], #0x4\n"
- "tbz x14, #0, 120f\n"
- "ld1 { v11.h }[6], [x13]\n"
- "ld1 { v15.h }[6], [x25]\n"
- "ld1 { v19.h }[6], [x24]\n"
+ "tbz x13, #4, 112f\n"
+ "ld1 { v8.8h }, [x12], #0x10\n"
+ "ld1 { v12.8h }, [x24], #0x10\n"
+ "ld1 { v16.8h }, [x23], #0x10\n"
+ "ld1 { v9.8h }, [x12], #0x10\n"
+ "ld1 { v13.8h }, [x24], #0x10\n"
+ "ld1 { v17.8h }, [x23], #0x10\n"
+ "tbz x13, #3, 108f\n"
+ "ld1 { v10.8h }, [x12], #0x10\n"
+ "ld1 { v14.8h }, [x24], #0x10\n"
+ "ld1 { v18.8h }, [x23], #0x10\n"
+ "tbz x13, #2, 106f\n"
+ "ldr d11, [x12], #0x8\n"
+ "ldr d15, [x24], #0x8\n"
+ "ldr d19, [x23], #0x8\n"
+ "tbz x13, #1, 105f\n"
+ "ld1 { v11.s }[2], [x12], #0x4\n"
+ "ld1 { v15.s }[2], [x24], #0x4\n"
+ "mov x19, #0x3c\n"
+ "ld1 { v19.s }[2], [x23], #0x4\n"
+ "tbz x13, #0, 120f\n"
+ "ld1 { v11.h }[6], [x12]\n"
+ "ld1 { v15.h }[6], [x24]\n"
+ "ld1 { v19.h }[6], [x23]\n"
"b 120f\n"
"105:" // Height 3: Partial accumulate: partial_1_28
- "mov x20, #0x38\n"
- "tbz x14, #0, 120f\n"
- "ld1 { v11.h }[4], [x13]\n"
- "ld1 { v15.h }[4], [x25]\n"
- "ld1 { v19.h }[4], [x24]\n"
+ "mov x19, #0x38\n"
+ "tbz x13, #0, 120f\n"
+ "ld1 { v11.h }[4], [x12]\n"
+ "ld1 { v15.h }[4], [x24]\n"
+ "ld1 { v19.h }[4], [x23]\n"
"b 120f\n"
"106:" // Height 3: Partial accumulate: partial_2_24
- "tbz x14, #1, 107f\n"
- "ldr s11, [x13], #0x4\n"
- "ldr s15, [x25], #0x4\n"
- "mov x20, #0x34\n"
- "ldr s19, [x24], #0x4\n"
- "tbz x14, #0, 120f\n"
- "ld1 { v11.h }[2], [x13]\n"
- "ld1 { v15.h }[2], [x25]\n"
- "ld1 { v19.h }[2], [x24]\n"
+ "tbz x13, #1, 107f\n"
+ "ldr s11, [x12], #0x4\n"
+ "ldr s15, [x24], #0x4\n"
+ "mov x19, #0x34\n"
+ "ldr s19, [x23], #0x4\n"
+ "tbz x13, #0, 120f\n"
+ "ld1 { v11.h }[2], [x12]\n"
+ "ld1 { v15.h }[2], [x24]\n"
+ "ld1 { v19.h }[2], [x23]\n"
"b 120f\n"
"107:" // Height 3: Partial accumulate: partial_1_24
- "mov x20, #0x30\n"
- "tbz x14, #0, 120f\n"
- "ldr h11, [x13, #0x0]\n"
- "ldr h15, [x25, #0x0]\n"
- "ldr h19, [x24, #0x0]\n"
+ "mov x19, #0x30\n"
+ "tbz x13, #0, 120f\n"
+ "ldr h11, [x12, #0x0]\n"
+ "ldr h15, [x24, #0x0]\n"
+ "ldr h19, [x23, #0x0]\n"
"b 120f\n"
"108:" // Height 3: Partial accumulate: partial_4_16
- "tbz x14, #2, 110f\n"
- "ldr d10, [x13], #0x8\n"
- "ldr d14, [x25], #0x8\n"
- "ldr d18, [x24], #0x8\n"
- "tbz x14, #1, 109f\n"
- "ld1 { v10.s }[2], [x13], #0x4\n"
- "ld1 { v14.s }[2], [x25], #0x4\n"
- "mov x20, #0x2c\n"
- "ld1 { v18.s }[2], [x24], #0x4\n"
- "tbz x14, #0, 120f\n"
- "ld1 { v10.h }[6], [x13]\n"
- "ld1 { v14.h }[6], [x25]\n"
- "ld1 { v18.h }[6], [x24]\n"
+ "tbz x13, #2, 110f\n"
+ "ldr d10, [x12], #0x8\n"
+ "ldr d14, [x24], #0x8\n"
+ "ldr d18, [x23], #0x8\n"
+ "tbz x13, #1, 109f\n"
+ "ld1 { v10.s }[2], [x12], #0x4\n"
+ "ld1 { v14.s }[2], [x24], #0x4\n"
+ "mov x19, #0x2c\n"
+ "ld1 { v18.s }[2], [x23], #0x4\n"
+ "tbz x13, #0, 120f\n"
+ "ld1 { v10.h }[6], [x12]\n"
+ "ld1 { v14.h }[6], [x24]\n"
+ "ld1 { v18.h }[6], [x23]\n"
"b 120f\n"
"109:" // Height 3: Partial accumulate: partial_1_20
- "mov x20, #0x28\n"
- "tbz x14, #0, 120f\n"
- "ld1 { v10.h }[4], [x13]\n"
- "ld1 { v14.h }[4], [x25]\n"
- "ld1 { v18.h }[4], [x24]\n"
+ "mov x19, #0x28\n"
+ "tbz x13, #0, 120f\n"
+ "ld1 { v10.h }[4], [x12]\n"
+ "ld1 { v14.h }[4], [x24]\n"
+ "ld1 { v18.h }[4], [x23]\n"
"b 120f\n"
"110:" // Height 3: Partial accumulate: partial_2_16
- "tbz x14, #1, 111f\n"
- "ldr s10, [x13], #0x4\n"
- "ldr s14, [x25], #0x4\n"
- "mov x20, #0x24\n"
- "ldr s18, [x24], #0x4\n"
- "tbz x14, #0, 120f\n"
- "ld1 { v10.h }[2], [x13]\n"
- "ld1 { v14.h }[2], [x25]\n"
- "ld1 { v18.h }[2], [x24]\n"
+ "tbz x13, #1, 111f\n"
+ "ldr s10, [x12], #0x4\n"
+ "ldr s14, [x24], #0x4\n"
+ "mov x19, #0x24\n"
+ "ldr s18, [x23], #0x4\n"
+ "tbz x13, #0, 120f\n"
+ "ld1 { v10.h }[2], [x12]\n"
+ "ld1 { v14.h }[2], [x24]\n"
+ "ld1 { v18.h }[2], [x23]\n"
"b 120f\n"
"111:" // Height 3: Partial accumulate: partial_1_16
- "mov x20, #0x20\n"
- "tbz x14, #0, 120f\n"
- "ldr h10, [x13, #0x0]\n"
- "ldr h14, [x25, #0x0]\n"
- "ldr h18, [x24, #0x0]\n"
+ "mov x19, #0x20\n"
+ "tbz x13, #0, 120f\n"
+ "ldr h10, [x12, #0x0]\n"
+ "ldr h14, [x24, #0x0]\n"
+ "ldr h18, [x23, #0x0]\n"
"b 120f\n"
"112:" // Height 3: Partial accumulate: partial_8_0
- "tbz x14, #3, 116f\n"
- "ld1 { v8.8h }, [x13], #0x10\n"
- "ld1 { v12.8h }, [x25], #0x10\n"
- "ld1 { v16.8h }, [x24], #0x10\n"
- "tbz x14, #2, 114f\n"
- "ldr d9, [x13], #0x8\n"
- "ldr d13, [x25], #0x8\n"
- "ldr d17, [x24], #0x8\n"
- "tbz x14, #1, 113f\n"
- "ld1 { v9.s }[2], [x13], #0x4\n"
- "ld1 { v13.s }[2], [x25], #0x4\n"
- "mov x20, #0x1c\n"
- "ld1 { v17.s }[2], [x24], #0x4\n"
- "tbz x14, #0, 120f\n"
- "ld1 { v9.h }[6], [x13]\n"
- "ld1 { v13.h }[6], [x25]\n"
- "ld1 { v17.h }[6], [x24]\n"
+ "tbz x13, #3, 116f\n"
+ "ld1 { v8.8h }, [x12], #0x10\n"
+ "ld1 { v12.8h }, [x24], #0x10\n"
+ "ld1 { v16.8h }, [x23], #0x10\n"
+ "tbz x13, #2, 114f\n"
+ "ldr d9, [x12], #0x8\n"
+ "ldr d13, [x24], #0x8\n"
+ "ldr d17, [x23], #0x8\n"
+ "tbz x13, #1, 113f\n"
+ "ld1 { v9.s }[2], [x12], #0x4\n"
+ "ld1 { v13.s }[2], [x24], #0x4\n"
+ "mov x19, #0x1c\n"
+ "ld1 { v17.s }[2], [x23], #0x4\n"
+ "tbz x13, #0, 120f\n"
+ "ld1 { v9.h }[6], [x12]\n"
+ "ld1 { v13.h }[6], [x24]\n"
+ "ld1 { v17.h }[6], [x23]\n"
"b 120f\n"
"113:" // Height 3: Partial accumulate: partial_1_12
- "mov x20, #0x18\n"
- "tbz x14, #0, 120f\n"
- "ld1 { v9.h }[4], [x13]\n"
- "ld1 { v13.h }[4], [x25]\n"
- "ld1 { v17.h }[4], [x24]\n"
+ "mov x19, #0x18\n"
+ "tbz x13, #0, 120f\n"
+ "ld1 { v9.h }[4], [x12]\n"
+ "ld1 { v13.h }[4], [x24]\n"
+ "ld1 { v17.h }[4], [x23]\n"
"b 120f\n"
"114:" // Height 3: Partial accumulate: partial_2_8
- "tbz x14, #1, 115f\n"
- "ldr s9, [x13], #0x4\n"
- "ldr s13, [x25], #0x4\n"
- "mov x20, #0x14\n"
- "ldr s17, [x24], #0x4\n"
- "tbz x14, #0, 120f\n"
- "ld1 { v9.h }[2], [x13]\n"
- "ld1 { v13.h }[2], [x25]\n"
- "ld1 { v17.h }[2], [x24]\n"
+ "tbz x13, #1, 115f\n"
+ "ldr s9, [x12], #0x4\n"
+ "ldr s13, [x24], #0x4\n"
+ "mov x19, #0x14\n"
+ "ldr s17, [x23], #0x4\n"
+ "tbz x13, #0, 120f\n"
+ "ld1 { v9.h }[2], [x12]\n"
+ "ld1 { v13.h }[2], [x24]\n"
+ "ld1 { v17.h }[2], [x23]\n"
"b 120f\n"
"115:" // Height 3: Partial accumulate: partial_1_8
- "mov x20, #0x10\n"
- "tbz x14, #0, 120f\n"
- "ldr h9, [x13, #0x0]\n"
- "ldr h13, [x25, #0x0]\n"
- "ldr h17, [x24, #0x0]\n"
+ "mov x19, #0x10\n"
+ "tbz x13, #0, 120f\n"
+ "ldr h9, [x12, #0x0]\n"
+ "ldr h13, [x24, #0x0]\n"
+ "ldr h17, [x23, #0x0]\n"
"b 120f\n"
"116:" // Height 3: Partial accumulate: partial_4_0
- "tbz x14, #2, 118f\n"
- "ldr d8, [x13], #0x8\n"
- "ldr d12, [x25], #0x8\n"
- "ldr d16, [x24], #0x8\n"
- "tbz x14, #1, 117f\n"
- "ld1 { v8.s }[2], [x13], #0x4\n"
- "ld1 { v12.s }[2], [x25], #0x4\n"
- "mov x20, #0xc\n"
- "ld1 { v16.s }[2], [x24], #0x4\n"
- "tbz x14, #0, 120f\n"
- "ld1 { v8.h }[6], [x13]\n"
- "ld1 { v12.h }[6], [x25]\n"
- "ld1 { v16.h }[6], [x24]\n"
+ "tbz x13, #2, 118f\n"
+ "ldr d8, [x12], #0x8\n"
+ "ldr d12, [x24], #0x8\n"
+ "ldr d16, [x23], #0x8\n"
+ "tbz x13, #1, 117f\n"
+ "ld1 { v8.s }[2], [x12], #0x4\n"
+ "ld1 { v12.s }[2], [x24], #0x4\n"
+ "mov x19, #0xc\n"
+ "ld1 { v16.s }[2], [x23], #0x4\n"
+ "tbz x13, #0, 120f\n"
+ "ld1 { v8.h }[6], [x12]\n"
+ "ld1 { v12.h }[6], [x24]\n"
+ "ld1 { v16.h }[6], [x23]\n"
"b 120f\n"
"117:" // Height 3: Partial accumulate: partial_1_4
- "mov x20, #0x8\n"
- "tbz x14, #0, 120f\n"
- "ld1 { v8.h }[4], [x13]\n"
- "ld1 { v12.h }[4], [x25]\n"
- "ld1 { v16.h }[4], [x24]\n"
+ "mov x19, #0x8\n"
+ "tbz x13, #0, 120f\n"
+ "ld1 { v8.h }[4], [x12]\n"
+ "ld1 { v12.h }[4], [x24]\n"
+ "ld1 { v16.h }[4], [x23]\n"
"b 120f\n"
"118:" // Height 3: Partial accumulate: partial_2_0
- "tbz x14, #1, 119f\n"
- "ldr s8, [x13], #0x4\n"
- "ldr s12, [x25], #0x4\n"
- "mov x20, #0x4\n"
- "ldr s16, [x24], #0x4\n"
- "tbz x14, #0, 120f\n"
- "ld1 { v8.h }[2], [x13]\n"
- "ld1 { v12.h }[2], [x25]\n"
- "ld1 { v16.h }[2], [x24]\n"
+ "tbz x13, #1, 119f\n"
+ "ldr s8, [x12], #0x4\n"
+ "ldr s12, [x24], #0x4\n"
+ "mov x19, #0x4\n"
+ "ldr s16, [x23], #0x4\n"
+ "tbz x13, #0, 120f\n"
+ "ld1 { v8.h }[2], [x12]\n"
+ "ld1 { v12.h }[2], [x24]\n"
+ "ld1 { v16.h }[2], [x23]\n"
"b 120f\n"
"119:" // Height 3: Partial accumulate: partial_1_0
- "ldr h8, [x13, #0x0]\n"
- "ldr h12, [x25, #0x0]\n"
- "mov x20, #0x0\n"
- "ldr h16, [x24, #0x0]\n"
+ "ldr h8, [x12, #0x0]\n"
+ "ldr h12, [x24, #0x0]\n"
+ "mov x19, #0x0\n"
+ "ldr h16, [x23, #0x0]\n"
"120:" // Height 3: Partial accumulate: Done
- "sub x13, x13, x20\n"
+ "sub x12, x12, x19\n"
"b 123f\n"
"121:" // Height 3: full accumulate
- "ldr q8, [x13, #0x0]\n"
- "ldr q9, [x13, #0x10]\n"
- "ldr q10, [x13, #0x20]\n"
- "ldr q11, [x13, #0x30]\n"
- "ldr q12, [x25, #0x0]\n"
- "ldr q13, [x25, #0x10]\n"
- "ldr q14, [x25, #0x20]\n"
- "ldr q15, [x25, #0x30]\n"
- "ldr q16, [x24, #0x0]\n"
- "ldr q17, [x24, #0x10]\n"
- "ldr q18, [x24, #0x20]\n"
- "ldr q19, [x24, #0x30]\n"
+ "ldr q8, [x12, #0x0]\n"
+ "ldr q9, [x12, #0x10]\n"
+ "ldr q10, [x12, #0x20]\n"
+ "ldr q11, [x12, #0x30]\n"
+ "ldr q12, [x24, #0x0]\n"
+ "ldr q13, [x24, #0x10]\n"
+ "ldr q14, [x24, #0x20]\n"
+ "ldr q15, [x24, #0x30]\n"
+ "ldr q16, [x23, #0x0]\n"
+ "ldr q17, [x23, #0x10]\n"
+ "ldr q18, [x23, #0x20]\n"
+ "ldr q19, [x23, #0x30]\n"
"b 123f\n"
"122:" // Height 3: no accumulate
"movi v8.16b, #0x0\n"
@@ -1454,308 +1454,308 @@ void a64_ffhybrid_fp16_mla_6x32 (
"movi v18.16b, #0x0\n"
"movi v19.16b, #0x0\n"
"123:" // Height 3: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"124:" // Height 3: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 125f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "cbnz x28, 126f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #1\n"
- "add x25, x25, x20, LSL #1\n"
- "add x24, x24, x20, LSL #1\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "cbnz x27, 126f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
+ "add x24, x24, x19, LSL #1\n"
+ "add x23, x23, x19, LSL #1\n"
"b 126f\n"
"125:" // Height 3: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
"126:" // Height 3: input setup done
- "cmp x27, #0x8\n"
+ "cmp x26, #0x8\n"
"blt 129f\n"
- "ldr q0, [x26, #0x0]\n"
- "ldr q1, [x25, #0x0]\n"
- "cmp x27, #0x10\n"
- "ldr q2, [x24, #0x0]\n"
- "ldr q6, [x12, #0x0]\n"
- "ldr q7, [x11, #0x0]\n"
+ "ldr q0, [x25, #0x0]\n"
+ "ldr q1, [x24, #0x0]\n"
+ "cmp x26, #0x10\n"
+ "ldr q2, [x23, #0x0]\n"
+ "ldr q6, [x11, #0x0]\n"
+ "ldr q7, [x10, #0x0]\n"
"blt 128f\n"
"127:" // Height 3: Multiply loop: Main loop head
"fmla v8.8h, v6.8h, v0.h[0]\n"
"fmla v12.8h, v6.8h, v1.h[0]\n"
- "sub x27, x27, #0x8\n"
- "cmp x27, #0x10\n"
+ "sub x26, x26, #0x8\n"
+ "cmp x26, #0x10\n"
"fmla v16.8h, v6.8h, v2.h[0]\n"
- "ldr q6, [x10, #0x0]\n"
+ "ldr q6, [x9, #0x0]\n"
"fmla v9.8h, v7.8h, v0.h[0]\n"
- "add x26, x26, #0x10\n"
+ "add x25, x25, #0x10\n"
"fmla v13.8h, v7.8h, v1.h[0]\n"
"fmla v17.8h, v7.8h, v2.h[0]\n"
- "ldr q7, [x9, #0x0]\n"
- "add x25, x25, #0x10\n"
+ "ldr q7, [x28, #0x0]\n"
+ "add x24, x24, #0x10\n"
"fmla v10.8h, v6.8h, v0.h[0]\n"
"fmla v14.8h, v6.8h, v1.h[0]\n"
- "add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
"fmla v18.8h, v6.8h, v2.h[0]\n"
- "ldr q6, [x12, #0x10]\n"
+ "ldr q6, [x11, #0x10]\n"
"fmla v11.8h, v7.8h, v0.h[0]\n"
"fmla v15.8h, v7.8h, v1.h[0]\n"
"fmla v19.8h, v7.8h, v2.h[0]\n"
- "ldr q7, [x11, #0x10]\n"
+ "ldr q7, [x10, #0x10]\n"
"fmla v8.8h, v6.8h, v0.h[1]\n"
"fmla v12.8h, v6.8h, v1.h[1]\n"
"fmla v16.8h, v6.8h, v2.h[1]\n"
- "ldr q6, [x10, #0x10]\n"
+ "ldr q6, [x9, #0x10]\n"
"fmla v9.8h, v7.8h, v0.h[1]\n"
"fmla v13.8h, v7.8h, v1.h[1]\n"
"fmla v17.8h, v7.8h, v2.h[1]\n"
- "ldr q7, [x9, #0x10]\n"
+ "ldr q7, [x28, #0x10]\n"
"fmla v10.8h, v6.8h, v0.h[1]\n"
"fmla v14.8h, v6.8h, v1.h[1]\n"
"fmla v18.8h, v6.8h, v2.h[1]\n"
- "ldr q6, [x12, #0x20]\n"
+ "ldr q6, [x11, #0x20]\n"
"fmla v11.8h, v7.8h, v0.h[1]\n"
"fmla v15.8h, v7.8h, v1.h[1]\n"
"fmla v19.8h, v7.8h, v2.h[1]\n"
- "ldr q7, [x11, #0x20]\n"
+ "ldr q7, [x10, #0x20]\n"
"fmla v8.8h, v6.8h, v0.h[2]\n"
"fmla v12.8h, v6.8h, v1.h[2]\n"
"fmla v16.8h, v6.8h, v2.h[2]\n"
- "ldr q6, [x10, #0x20]\n"
+ "ldr q6, [x9, #0x20]\n"
"fmla v9.8h, v7.8h, v0.h[2]\n"
"fmla v13.8h, v7.8h, v1.h[2]\n"
"fmla v17.8h, v7.8h, v2.h[2]\n"
- "ldr q7, [x9, #0x20]\n"
+ "ldr q7, [x28, #0x20]\n"
"fmla v10.8h, v6.8h, v0.h[2]\n"
"fmla v14.8h, v6.8h, v1.h[2]\n"
"fmla v18.8h, v6.8h, v2.h[2]\n"
- "ldr q6, [x12, #0x30]\n"
+ "ldr q6, [x11, #0x30]\n"
"fmla v11.8h, v7.8h, v0.h[2]\n"
"fmla v15.8h, v7.8h, v1.h[2]\n"
"fmla v19.8h, v7.8h, v2.h[2]\n"
- "ldr q7, [x11, #0x30]\n"
+ "ldr q7, [x10, #0x30]\n"
"fmla v8.8h, v6.8h, v0.h[3]\n"
"fmla v12.8h, v6.8h, v1.h[3]\n"
"fmla v16.8h, v6.8h, v2.h[3]\n"
- "ldr q6, [x10, #0x30]\n"
+ "ldr q6, [x9, #0x30]\n"
"fmla v9.8h, v7.8h, v0.h[3]\n"
"fmla v13.8h, v7.8h, v1.h[3]\n"
"fmla v17.8h, v7.8h, v2.h[3]\n"
- "ldr q7, [x9, #0x30]\n"
+ "ldr q7, [x28, #0x30]\n"
"fmla v10.8h, v6.8h, v0.h[3]\n"
"fmla v14.8h, v6.8h, v1.h[3]\n"
"fmla v18.8h, v6.8h, v2.h[3]\n"
- "ldr q6, [x12, #0x40]\n"
+ "ldr q6, [x11, #0x40]\n"
"fmla v11.8h, v7.8h, v0.h[3]\n"
"fmla v15.8h, v7.8h, v1.h[3]\n"
"fmla v19.8h, v7.8h, v2.h[3]\n"
- "ldr q7, [x11, #0x40]\n"
+ "ldr q7, [x10, #0x40]\n"
"fmla v8.8h, v6.8h, v0.h[4]\n"
"fmla v12.8h, v6.8h, v1.h[4]\n"
"fmla v16.8h, v6.8h, v2.h[4]\n"
- "ldr q6, [x10, #0x40]\n"
+ "ldr q6, [x9, #0x40]\n"
"fmla v9.8h, v7.8h, v0.h[4]\n"
"fmla v13.8h, v7.8h, v1.h[4]\n"
"fmla v17.8h, v7.8h, v2.h[4]\n"
- "ldr q7, [x9, #0x40]\n"
+ "ldr q7, [x28, #0x40]\n"
"fmla v10.8h, v6.8h, v0.h[4]\n"
"fmla v14.8h, v6.8h, v1.h[4]\n"
"fmla v18.8h, v6.8h, v2.h[4]\n"
- "ldr q6, [x12, #0x50]\n"
+ "ldr q6, [x11, #0x50]\n"
"fmla v11.8h, v7.8h, v0.h[4]\n"
"fmla v15.8h, v7.8h, v1.h[4]\n"
"fmla v19.8h, v7.8h, v2.h[4]\n"
- "ldr q7, [x11, #0x50]\n"
+ "ldr q7, [x10, #0x50]\n"
"fmla v8.8h, v6.8h, v0.h[5]\n"
"fmla v12.8h, v6.8h, v1.h[5]\n"
"fmla v16.8h, v6.8h, v2.h[5]\n"
- "ldr q6, [x10, #0x50]\n"
+ "ldr q6, [x9, #0x50]\n"
"fmla v9.8h, v7.8h, v0.h[5]\n"
"fmla v13.8h, v7.8h, v1.h[5]\n"
"fmla v17.8h, v7.8h, v2.h[5]\n"
- "ldr q7, [x9, #0x50]\n"
+ "ldr q7, [x28, #0x50]\n"
"fmla v10.8h, v6.8h, v0.h[5]\n"
"fmla v14.8h, v6.8h, v1.h[5]\n"
"fmla v18.8h, v6.8h, v2.h[5]\n"
- "ldr q6, [x12, #0x60]\n"
+ "ldr q6, [x11, #0x60]\n"
"fmla v11.8h, v7.8h, v0.h[5]\n"
"fmla v15.8h, v7.8h, v1.h[5]\n"
"fmla v19.8h, v7.8h, v2.h[5]\n"
- "ldr q7, [x11, #0x60]\n"
+ "ldr q7, [x10, #0x60]\n"
"fmla v8.8h, v6.8h, v0.h[6]\n"
"fmla v12.8h, v6.8h, v1.h[6]\n"
"fmla v16.8h, v6.8h, v2.h[6]\n"
- "ldr q6, [x10, #0x60]\n"
+ "ldr q6, [x9, #0x60]\n"
"fmla v9.8h, v7.8h, v0.h[6]\n"
"fmla v13.8h, v7.8h, v1.h[6]\n"
"fmla v17.8h, v7.8h, v2.h[6]\n"
- "ldr q7, [x9, #0x60]\n"
+ "ldr q7, [x28, #0x60]\n"
"fmla v10.8h, v6.8h, v0.h[6]\n"
"fmla v14.8h, v6.8h, v1.h[6]\n"
"fmla v18.8h, v6.8h, v2.h[6]\n"
- "ldr q6, [x12, #0x70]\n"
+ "ldr q6, [x11, #0x70]\n"
"fmla v11.8h, v7.8h, v0.h[6]\n"
- "add x12, x12, #0x80\n"
+ "add x11, x11, #0x80\n"
"fmla v15.8h, v7.8h, v1.h[6]\n"
"fmla v19.8h, v7.8h, v2.h[6]\n"
- "ldr q7, [x11, #0x70]\n"
- "add x11, x11, #0x80\n"
+ "ldr q7, [x10, #0x70]\n"
+ "add x10, x10, #0x80\n"
"fmla v8.8h, v6.8h, v0.h[7]\n"
"fmla v12.8h, v6.8h, v1.h[7]\n"
"fmla v16.8h, v6.8h, v2.h[7]\n"
- "ldr q6, [x10, #0x70]\n"
+ "ldr q6, [x9, #0x70]\n"
"fmla v9.8h, v7.8h, v0.h[7]\n"
- "add x10, x10, #0x80\n"
+ "add x9, x9, #0x80\n"
"fmla v13.8h, v7.8h, v1.h[7]\n"
"fmla v17.8h, v7.8h, v2.h[7]\n"
- "ldr q7, [x9, #0x70]\n"
- "add x9, x9, #0x80\n"
+ "ldr q7, [x28, #0x70]\n"
+ "add x28, x28, #0x80\n"
"fmla v10.8h, v6.8h, v0.h[7]\n"
"fmla v14.8h, v6.8h, v1.h[7]\n"
"fmla v18.8h, v6.8h, v2.h[7]\n"
- "ldr q6, [x12, #0x0]\n"
+ "ldr q6, [x11, #0x0]\n"
"fmla v11.8h, v7.8h, v0.h[7]\n"
- "ldr q0, [x26, #0x0]\n"
+ "ldr q0, [x25, #0x0]\n"
"fmla v15.8h, v7.8h, v1.h[7]\n"
- "ldr q1, [x25, #0x0]\n"
+ "ldr q1, [x24, #0x0]\n"
"fmla v19.8h, v7.8h, v2.h[7]\n"
- "ldr q2, [x24, #0x0]\n"
- "ldr q7, [x11, #0x0]\n"
+ "ldr q2, [x23, #0x0]\n"
+ "ldr q7, [x10, #0x0]\n"
"bge 127b\n"
"128:" // Height 3: Multiply loop: Single iteration only
"fmla v8.8h, v6.8h, v0.h[0]\n"
"fmla v12.8h, v6.8h, v1.h[0]\n"
- "sub x27, x27, #0x8\n"
- "add x26, x26, #0x10\n"
+ "sub x26, x26, #0x8\n"
+ "add x25, x25, #0x10\n"
"fmla v16.8h, v6.8h, v2.h[0]\n"
- "ldr q6, [x10, #0x0]\n"
+ "ldr q6, [x9, #0x0]\n"
"fmla v9.8h, v7.8h, v0.h[0]\n"
- "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
"fmla v13.8h, v7.8h, v1.h[0]\n"
"fmla v17.8h, v7.8h, v2.h[0]\n"
- "ldr q7, [x9, #0x0]\n"
- "add x24, x24, #0x10\n"
+ "ldr q7, [x28, #0x0]\n"
+ "add x23, x23, #0x10\n"
"fmla v10.8h, v6.8h, v0.h[0]\n"
"fmla v14.8h, v6.8h, v1.h[0]\n"
"fmla v18.8h, v6.8h, v2.h[0]\n"
- "ldr q6, [x12, #0x10]\n"
+ "ldr q6, [x11, #0x10]\n"
"fmla v11.8h, v7.8h, v0.h[0]\n"
"fmla v15.8h, v7.8h, v1.h[0]\n"
"fmla v19.8h, v7.8h, v2.h[0]\n"
- "ldr q7, [x11, #0x10]\n"
+ "ldr q7, [x10, #0x10]\n"
"fmla v8.8h, v6.8h, v0.h[1]\n"
"fmla v12.8h, v6.8h, v1.h[1]\n"
"fmla v16.8h, v6.8h, v2.h[1]\n"
- "ldr q6, [x10, #0x10]\n"
+ "ldr q6, [x9, #0x10]\n"
"fmla v9.8h, v7.8h, v0.h[1]\n"
"fmla v13.8h, v7.8h, v1.h[1]\n"
"fmla v17.8h, v7.8h, v2.h[1]\n"
- "ldr q7, [x9, #0x10]\n"
+ "ldr q7, [x28, #0x10]\n"
"fmla v10.8h, v6.8h, v0.h[1]\n"
"fmla v14.8h, v6.8h, v1.h[1]\n"
"fmla v18.8h, v6.8h, v2.h[1]\n"
- "ldr q6, [x12, #0x20]\n"
+ "ldr q6, [x11, #0x20]\n"
"fmla v11.8h, v7.8h, v0.h[1]\n"
"fmla v15.8h, v7.8h, v1.h[1]\n"
"fmla v19.8h, v7.8h, v2.h[1]\n"
- "ldr q7, [x11, #0x20]\n"
+ "ldr q7, [x10, #0x20]\n"
"fmla v8.8h, v6.8h, v0.h[2]\n"
"fmla v12.8h, v6.8h, v1.h[2]\n"
"fmla v16.8h, v6.8h, v2.h[2]\n"
- "ldr q6, [x10, #0x20]\n"
+ "ldr q6, [x9, #0x20]\n"
"fmla v9.8h, v7.8h, v0.h[2]\n"
"fmla v13.8h, v7.8h, v1.h[2]\n"
"fmla v17.8h, v7.8h, v2.h[2]\n"
- "ldr q7, [x9, #0x20]\n"
+ "ldr q7, [x28, #0x20]\n"
"fmla v10.8h, v6.8h, v0.h[2]\n"
"fmla v14.8h, v6.8h, v1.h[2]\n"
"fmla v18.8h, v6.8h, v2.h[2]\n"
- "ldr q6, [x12, #0x30]\n"
+ "ldr q6, [x11, #0x30]\n"
"fmla v11.8h, v7.8h, v0.h[2]\n"
"fmla v15.8h, v7.8h, v1.h[2]\n"
"fmla v19.8h, v7.8h, v2.h[2]\n"
- "ldr q7, [x11, #0x30]\n"
+ "ldr q7, [x10, #0x30]\n"
"fmla v8.8h, v6.8h, v0.h[3]\n"
"fmla v12.8h, v6.8h, v1.h[3]\n"
"fmla v16.8h, v6.8h, v2.h[3]\n"
- "ldr q6, [x10, #0x30]\n"
+ "ldr q6, [x9, #0x30]\n"
"fmla v9.8h, v7.8h, v0.h[3]\n"
"fmla v13.8h, v7.8h, v1.h[3]\n"
"fmla v17.8h, v7.8h, v2.h[3]\n"
- "ldr q7, [x9, #0x30]\n"
+ "ldr q7, [x28, #0x30]\n"
"fmla v10.8h, v6.8h, v0.h[3]\n"
"fmla v14.8h, v6.8h, v1.h[3]\n"
"fmla v18.8h, v6.8h, v2.h[3]\n"
- "ldr q6, [x12, #0x40]\n"
+ "ldr q6, [x11, #0x40]\n"
"fmla v11.8h, v7.8h, v0.h[3]\n"
"fmla v15.8h, v7.8h, v1.h[3]\n"
"fmla v19.8h, v7.8h, v2.h[3]\n"
- "ldr q7, [x11, #0x40]\n"
+ "ldr q7, [x10, #0x40]\n"
"fmla v8.8h, v6.8h, v0.h[4]\n"
"fmla v12.8h, v6.8h, v1.h[4]\n"
"fmla v16.8h, v6.8h, v2.h[4]\n"
- "ldr q6, [x10, #0x40]\n"
+ "ldr q6, [x9, #0x40]\n"
"fmla v9.8h, v7.8h, v0.h[4]\n"
"fmla v13.8h, v7.8h, v1.h[4]\n"
"fmla v17.8h, v7.8h, v2.h[4]\n"
- "ldr q7, [x9, #0x40]\n"
+ "ldr q7, [x28, #0x40]\n"
"fmla v10.8h, v6.8h, v0.h[4]\n"
"fmla v14.8h, v6.8h, v1.h[4]\n"
"fmla v18.8h, v6.8h, v2.h[4]\n"
- "ldr q6, [x12, #0x50]\n"
+ "ldr q6, [x11, #0x50]\n"
"fmla v11.8h, v7.8h, v0.h[4]\n"
"fmla v15.8h, v7.8h, v1.h[4]\n"
"fmla v19.8h, v7.8h, v2.h[4]\n"
- "ldr q7, [x11, #0x50]\n"
+ "ldr q7, [x10, #0x50]\n"
"fmla v8.8h, v6.8h, v0.h[5]\n"
"fmla v12.8h, v6.8h, v1.h[5]\n"
"fmla v16.8h, v6.8h, v2.h[5]\n"
- "ldr q6, [x10, #0x50]\n"
+ "ldr q6, [x9, #0x50]\n"
"fmla v9.8h, v7.8h, v0.h[5]\n"
"fmla v13.8h, v7.8h, v1.h[5]\n"
"fmla v17.8h, v7.8h, v2.h[5]\n"
- "ldr q7, [x9, #0x50]\n"
+ "ldr q7, [x28, #0x50]\n"
"fmla v10.8h, v6.8h, v0.h[5]\n"
"fmla v14.8h, v6.8h, v1.h[5]\n"
"fmla v18.8h, v6.8h, v2.h[5]\n"
- "ldr q6, [x12, #0x60]\n"
+ "ldr q6, [x11, #0x60]\n"
"fmla v11.8h, v7.8h, v0.h[5]\n"
"fmla v15.8h, v7.8h, v1.h[5]\n"
"fmla v19.8h, v7.8h, v2.h[5]\n"
- "ldr q7, [x11, #0x60]\n"
+ "ldr q7, [x10, #0x60]\n"
"fmla v8.8h, v6.8h, v0.h[6]\n"
"fmla v12.8h, v6.8h, v1.h[6]\n"
"fmla v16.8h, v6.8h, v2.h[6]\n"
- "ldr q6, [x10, #0x60]\n"
+ "ldr q6, [x9, #0x60]\n"
"fmla v9.8h, v7.8h, v0.h[6]\n"
"fmla v13.8h, v7.8h, v1.h[6]\n"
"fmla v17.8h, v7.8h, v2.h[6]\n"
- "ldr q7, [x9, #0x60]\n"
+ "ldr q7, [x28, #0x60]\n"
"fmla v10.8h, v6.8h, v0.h[6]\n"
"fmla v14.8h, v6.8h, v1.h[6]\n"
"fmla v18.8h, v6.8h, v2.h[6]\n"
- "ldr q6, [x12, #0x70]\n"
+ "ldr q6, [x11, #0x70]\n"
"fmla v11.8h, v7.8h, v0.h[6]\n"
- "add x12, x12, #0x80\n"
+ "add x11, x11, #0x80\n"
"fmla v15.8h, v7.8h, v1.h[6]\n"
"fmla v19.8h, v7.8h, v2.h[6]\n"
- "ldr q7, [x11, #0x70]\n"
- "add x11, x11, #0x80\n"
+ "ldr q7, [x10, #0x70]\n"
+ "add x10, x10, #0x80\n"
"fmla v8.8h, v6.8h, v0.h[7]\n"
"fmla v12.8h, v6.8h, v1.h[7]\n"
"fmla v16.8h, v6.8h, v2.h[7]\n"
- "ldr q6, [x10, #0x70]\n"
+ "ldr q6, [x9, #0x70]\n"
"fmla v9.8h, v7.8h, v0.h[7]\n"
- "add x10, x10, #0x80\n"
+ "add x9, x9, #0x80\n"
"fmla v13.8h, v7.8h, v1.h[7]\n"
"fmla v17.8h, v7.8h, v2.h[7]\n"
- "ldr q7, [x9, #0x70]\n"
- "add x9, x9, #0x80\n"
+ "ldr q7, [x28, #0x70]\n"
+ "add x28, x28, #0x80\n"
"fmla v10.8h, v6.8h, v0.h[7]\n"
"fmla v14.8h, v6.8h, v1.h[7]\n"
"fmla v18.8h, v6.8h, v2.h[7]\n"
@@ -1763,46 +1763,46 @@ void a64_ffhybrid_fp16_mla_6x32 (
"fmla v15.8h, v7.8h, v1.h[7]\n"
"fmla v19.8h, v7.8h, v2.h[7]\n"
"129:" // Height 3: Multiply loop: Main loop skip
- "cbz x27, 131f\n"
+ "cbz x26, 131f\n"
"130:" // Height 3: Multiply loop: Odd block loop
- "ldr h0, [x26], #0x2\n"
- "ldr h1, [x25], #0x2\n"
- "sub x27, x27, #0x1\n"
- "ldr h2, [x24], #0x2\n"
- "ldr q6, [x12, #0x0]\n"
+ "ldr h0, [x25], #0x2\n"
+ "ldr h1, [x24], #0x2\n"
+ "sub x26, x26, #0x1\n"
+ "ldr h2, [x23], #0x2\n"
+ "ldr q6, [x11, #0x0]\n"
"fmla v8.8h, v6.8h, v0.h[0]\n"
"fmla v12.8h, v6.8h, v1.h[0]\n"
- "ldr q7, [x11, #0x0]\n"
+ "ldr q7, [x10, #0x0]\n"
"fmla v16.8h, v6.8h, v2.h[0]\n"
- "ldr q6, [x10, #0x0]\n"
+ "ldr q6, [x9, #0x0]\n"
"fmla v9.8h, v7.8h, v0.h[0]\n"
"fmla v13.8h, v7.8h, v1.h[0]\n"
"fmla v17.8h, v7.8h, v2.h[0]\n"
- "ldr q7, [x9, #0x0]\n"
- "add x12, x12, #0x10\n"
+ "ldr q7, [x28, #0x0]\n"
+ "add x11, x11, #0x10\n"
"fmla v10.8h, v6.8h, v0.h[0]\n"
"fmla v14.8h, v6.8h, v1.h[0]\n"
- "add x11, x11, #0x10\n"
"add x10, x10, #0x10\n"
+ "add x9, x9, #0x10\n"
"fmla v18.8h, v6.8h, v2.h[0]\n"
"fmla v11.8h, v7.8h, v0.h[0]\n"
- "add x9, x9, #0x10\n"
+ "add x28, x28, #0x10\n"
"fmla v15.8h, v7.8h, v1.h[0]\n"
"fmla v19.8h, v7.8h, v2.h[0]\n"
- "cbnz x27, 130b\n"
+ "cbnz x26, 130b\n"
"131:" // Height 3: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 124b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
"tbz %x[flags], #1, 132f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v1.8h }, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1r { v0.8h }, [x20]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v1.8h }, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v0.8h }, [x19]\n"
"fmin v8.8h, v8.8h, v1.8h\n"
"fmin v9.8h, v9.8h, v1.8h\n"
"fmin v10.8h, v10.8h, v1.8h\n"
@@ -1828,220 +1828,220 @@ void a64_ffhybrid_fp16_mla_6x32 (
"fmax v18.8h, v18.8h, v0.8h\n"
"fmax v19.8h, v19.8h, v0.8h\n"
"132:" // Height 3: No activation
- "cmp x14, #0x20\n"
+ "cmp x13, #0x20\n"
"bge 149f\n"
- "tbz x14, #4, 140f\n"
- "st1 { v8.8h }, [x13], #0x10\n"
- "st1 { v9.8h }, [x13], #0x10\n"
- "st1 { v12.8h }, [x25], #0x10\n"
- "st1 { v13.8h }, [x25], #0x10\n"
- "st1 { v16.8h }, [x24], #0x10\n"
- "st1 { v17.8h }, [x24], #0x10\n"
- "tbz x14, #3, 136f\n"
- "st1 { v10.8h }, [x13], #0x10\n"
- "st1 { v14.8h }, [x25], #0x10\n"
- "st1 { v18.8h }, [x24], #0x10\n"
- "tbz x14, #2, 134f\n"
- "str d11, [x13], #0x8\n"
- "str d15, [x25], #0x8\n"
- "str d19, [x24], #0x8\n"
- "tbz x14, #1, 133f\n"
- "st1 { v11.s }[2], [x13], #0x4\n"
- "st1 { v15.s }[2], [x25], #0x4\n"
- "st1 { v19.s }[2], [x24], #0x4\n"
- "tbz x14, #0, 148f\n"
- "st1 { v11.h }[6], [x13]\n"
- "st1 { v15.h }[6], [x25]\n"
- "st1 { v19.h }[6], [x24]\n"
+ "tbz x13, #4, 140f\n"
+ "st1 { v8.8h }, [x12], #0x10\n"
+ "st1 { v9.8h }, [x12], #0x10\n"
+ "st1 { v12.8h }, [x24], #0x10\n"
+ "st1 { v13.8h }, [x24], #0x10\n"
+ "st1 { v16.8h }, [x23], #0x10\n"
+ "st1 { v17.8h }, [x23], #0x10\n"
+ "tbz x13, #3, 136f\n"
+ "st1 { v10.8h }, [x12], #0x10\n"
+ "st1 { v14.8h }, [x24], #0x10\n"
+ "st1 { v18.8h }, [x23], #0x10\n"
+ "tbz x13, #2, 134f\n"
+ "str d11, [x12], #0x8\n"
+ "str d15, [x24], #0x8\n"
+ "str d19, [x23], #0x8\n"
+ "tbz x13, #1, 133f\n"
+ "st1 { v11.s }[2], [x12], #0x4\n"
+ "st1 { v15.s }[2], [x24], #0x4\n"
+ "st1 { v19.s }[2], [x23], #0x4\n"
+ "tbz x13, #0, 148f\n"
+ "st1 { v11.h }[6], [x12]\n"
+ "st1 { v15.h }[6], [x24]\n"
+ "st1 { v19.h }[6], [x23]\n"
"b 148f\n"
"133:" // Height 3: Partial direct writeback: partial_1_28
- "tbz x14, #0, 148f\n"
- "st1 { v11.h }[4], [x13]\n"
- "st1 { v15.h }[4], [x25]\n"
- "st1 { v19.h }[4], [x24]\n"
+ "tbz x13, #0, 148f\n"
+ "st1 { v11.h }[4], [x12]\n"
+ "st1 { v15.h }[4], [x24]\n"
+ "st1 { v19.h }[4], [x23]\n"
"b 148f\n"
"134:" // Height 3: Partial direct writeback: partial_2_24
- "tbz x14, #1, 135f\n"
- "str s11, [x13], #0x4\n"
- "str s15, [x25], #0x4\n"
- "str s19, [x24], #0x4\n"
- "tbz x14, #0, 148f\n"
- "st1 { v11.h }[2], [x13]\n"
- "st1 { v15.h }[2], [x25]\n"
- "st1 { v19.h }[2], [x24]\n"
+ "tbz x13, #1, 135f\n"
+ "str s11, [x12], #0x4\n"
+ "str s15, [x24], #0x4\n"
+ "str s19, [x23], #0x4\n"
+ "tbz x13, #0, 148f\n"
+ "st1 { v11.h }[2], [x12]\n"
+ "st1 { v15.h }[2], [x24]\n"
+ "st1 { v19.h }[2], [x23]\n"
"b 148f\n"
"135:" // Height 3: Partial direct writeback: partial_1_24
- "tbz x14, #0, 148f\n"
- "str h11, [x13, #0x0]\n"
- "str h15, [x25, #0x0]\n"
- "str h19, [x24, #0x0]\n"
+ "tbz x13, #0, 148f\n"
+ "str h11, [x12, #0x0]\n"
+ "str h15, [x24, #0x0]\n"
+ "str h19, [x23, #0x0]\n"
"b 148f\n"
"136:" // Height 3: Partial direct writeback: partial_4_16
- "tbz x14, #2, 138f\n"
- "str d10, [x13], #0x8\n"
- "str d14, [x25], #0x8\n"
- "str d18, [x24], #0x8\n"
- "tbz x14, #1, 137f\n"
- "st1 { v10.s }[2], [x13], #0x4\n"
- "st1 { v14.s }[2], [x25], #0x4\n"
- "st1 { v18.s }[2], [x24], #0x4\n"
- "tbz x14, #0, 148f\n"
- "st1 { v10.h }[6], [x13]\n"
- "st1 { v14.h }[6], [x25]\n"
- "st1 { v18.h }[6], [x24]\n"
+ "tbz x13, #2, 138f\n"
+ "str d10, [x12], #0x8\n"
+ "str d14, [x24], #0x8\n"
+ "str d18, [x23], #0x8\n"
+ "tbz x13, #1, 137f\n"
+ "st1 { v10.s }[2], [x12], #0x4\n"
+ "st1 { v14.s }[2], [x24], #0x4\n"
+ "st1 { v18.s }[2], [x23], #0x4\n"
+ "tbz x13, #0, 148f\n"
+ "st1 { v10.h }[6], [x12]\n"
+ "st1 { v14.h }[6], [x24]\n"
+ "st1 { v18.h }[6], [x23]\n"
"b 148f\n"
"137:" // Height 3: Partial direct writeback: partial_1_20
- "tbz x14, #0, 148f\n"
- "st1 { v10.h }[4], [x13]\n"
- "st1 { v14.h }[4], [x25]\n"
- "st1 { v18.h }[4], [x24]\n"
+ "tbz x13, #0, 148f\n"
+ "st1 { v10.h }[4], [x12]\n"
+ "st1 { v14.h }[4], [x24]\n"
+ "st1 { v18.h }[4], [x23]\n"
"b 148f\n"
"138:" // Height 3: Partial direct writeback: partial_2_16
- "tbz x14, #1, 139f\n"
- "str s10, [x13], #0x4\n"
- "str s14, [x25], #0x4\n"
- "str s18, [x24], #0x4\n"
- "tbz x14, #0, 148f\n"
- "st1 { v10.h }[2], [x13]\n"
- "st1 { v14.h }[2], [x25]\n"
- "st1 { v18.h }[2], [x24]\n"
+ "tbz x13, #1, 139f\n"
+ "str s10, [x12], #0x4\n"
+ "str s14, [x24], #0x4\n"
+ "str s18, [x23], #0x4\n"
+ "tbz x13, #0, 148f\n"
+ "st1 { v10.h }[2], [x12]\n"
+ "st1 { v14.h }[2], [x24]\n"
+ "st1 { v18.h }[2], [x23]\n"
"b 148f\n"
"139:" // Height 3: Partial direct writeback: partial_1_16
- "tbz x14, #0, 148f\n"
- "str h10, [x13, #0x0]\n"
- "str h14, [x25, #0x0]\n"
- "str h18, [x24, #0x0]\n"
+ "tbz x13, #0, 148f\n"
+ "str h10, [x12, #0x0]\n"
+ "str h14, [x24, #0x0]\n"
+ "str h18, [x23, #0x0]\n"
"b 148f\n"
"140:" // Height 3: Partial direct writeback: partial_8_0
- "tbz x14, #3, 144f\n"
- "st1 { v8.8h }, [x13], #0x10\n"
- "st1 { v12.8h }, [x25], #0x10\n"
- "st1 { v16.8h }, [x24], #0x10\n"
- "tbz x14, #2, 142f\n"
- "str d9, [x13], #0x8\n"
- "str d13, [x25], #0x8\n"
- "str d17, [x24], #0x8\n"
- "tbz x14, #1, 141f\n"
- "st1 { v9.s }[2], [x13], #0x4\n"
- "st1 { v13.s }[2], [x25], #0x4\n"
- "st1 { v17.s }[2], [x24], #0x4\n"
- "tbz x14, #0, 148f\n"
- "st1 { v9.h }[6], [x13]\n"
- "st1 { v13.h }[6], [x25]\n"
- "st1 { v17.h }[6], [x24]\n"
+ "tbz x13, #3, 144f\n"
+ "st1 { v8.8h }, [x12], #0x10\n"
+ "st1 { v12.8h }, [x24], #0x10\n"
+ "st1 { v16.8h }, [x23], #0x10\n"
+ "tbz x13, #2, 142f\n"
+ "str d9, [x12], #0x8\n"
+ "str d13, [x24], #0x8\n"
+ "str d17, [x23], #0x8\n"
+ "tbz x13, #1, 141f\n"
+ "st1 { v9.s }[2], [x12], #0x4\n"
+ "st1 { v13.s }[2], [x24], #0x4\n"
+ "st1 { v17.s }[2], [x23], #0x4\n"
+ "tbz x13, #0, 148f\n"
+ "st1 { v9.h }[6], [x12]\n"
+ "st1 { v13.h }[6], [x24]\n"
+ "st1 { v17.h }[6], [x23]\n"
"b 148f\n"
"141:" // Height 3: Partial direct writeback: partial_1_12
- "tbz x14, #0, 148f\n"
- "st1 { v9.h }[4], [x13]\n"
- "st1 { v13.h }[4], [x25]\n"
- "st1 { v17.h }[4], [x24]\n"
+ "tbz x13, #0, 148f\n"
+ "st1 { v9.h }[4], [x12]\n"
+ "st1 { v13.h }[4], [x24]\n"
+ "st1 { v17.h }[4], [x23]\n"
"b 148f\n"
"142:" // Height 3: Partial direct writeback: partial_2_8
- "tbz x14, #1, 143f\n"
- "str s9, [x13], #0x4\n"
- "str s13, [x25], #0x4\n"
- "str s17, [x24], #0x4\n"
- "tbz x14, #0, 148f\n"
- "st1 { v9.h }[2], [x13]\n"
- "st1 { v13.h }[2], [x25]\n"
- "st1 { v17.h }[2], [x24]\n"
+ "tbz x13, #1, 143f\n"
+ "str s9, [x12], #0x4\n"
+ "str s13, [x24], #0x4\n"
+ "str s17, [x23], #0x4\n"
+ "tbz x13, #0, 148f\n"
+ "st1 { v9.h }[2], [x12]\n"
+ "st1 { v13.h }[2], [x24]\n"
+ "st1 { v17.h }[2], [x23]\n"
"b 148f\n"
"143:" // Height 3: Partial direct writeback: partial_1_8
- "tbz x14, #0, 148f\n"
- "str h9, [x13, #0x0]\n"
- "str h13, [x25, #0x0]\n"
- "str h17, [x24, #0x0]\n"
+ "tbz x13, #0, 148f\n"
+ "str h9, [x12, #0x0]\n"
+ "str h13, [x24, #0x0]\n"
+ "str h17, [x23, #0x0]\n"
"b 148f\n"
"144:" // Height 3: Partial direct writeback: partial_4_0
- "tbz x14, #2, 146f\n"
- "str d8, [x13], #0x8\n"
- "str d12, [x25], #0x8\n"
- "str d16, [x24], #0x8\n"
- "tbz x14, #1, 145f\n"
- "st1 { v8.s }[2], [x13], #0x4\n"
- "st1 { v12.s }[2], [x25], #0x4\n"
- "st1 { v16.s }[2], [x24], #0x4\n"
- "tbz x14, #0, 148f\n"
- "st1 { v8.h }[6], [x13]\n"
- "st1 { v12.h }[6], [x25]\n"
- "st1 { v16.h }[6], [x24]\n"
+ "tbz x13, #2, 146f\n"
+ "str d8, [x12], #0x8\n"
+ "str d12, [x24], #0x8\n"
+ "str d16, [x23], #0x8\n"
+ "tbz x13, #1, 145f\n"
+ "st1 { v8.s }[2], [x12], #0x4\n"
+ "st1 { v12.s }[2], [x24], #0x4\n"
+ "st1 { v16.s }[2], [x23], #0x4\n"
+ "tbz x13, #0, 148f\n"
+ "st1 { v8.h }[6], [x12]\n"
+ "st1 { v12.h }[6], [x24]\n"
+ "st1 { v16.h }[6], [x23]\n"
"b 148f\n"
"145:" // Height 3: Partial direct writeback: partial_1_4
- "tbz x14, #0, 148f\n"
- "st1 { v8.h }[4], [x13]\n"
- "st1 { v12.h }[4], [x25]\n"
- "st1 { v16.h }[4], [x24]\n"
+ "tbz x13, #0, 148f\n"
+ "st1 { v8.h }[4], [x12]\n"
+ "st1 { v12.h }[4], [x24]\n"
+ "st1 { v16.h }[4], [x23]\n"
"b 148f\n"
"146:" // Height 3: Partial direct writeback: partial_2_0
- "tbz x14, #1, 147f\n"
- "str s8, [x13], #0x4\n"
- "str s12, [x25], #0x4\n"
- "str s16, [x24], #0x4\n"
- "tbz x14, #0, 148f\n"
- "st1 { v8.h }[2], [x13]\n"
- "st1 { v12.h }[2], [x25]\n"
- "st1 { v16.h }[2], [x24]\n"
+ "tbz x13, #1, 147f\n"
+ "str s8, [x12], #0x4\n"
+ "str s12, [x24], #0x4\n"
+ "str s16, [x23], #0x4\n"
+ "tbz x13, #0, 148f\n"
+ "st1 { v8.h }[2], [x12]\n"
+ "st1 { v12.h }[2], [x24]\n"
+ "st1 { v16.h }[2], [x23]\n"
"b 148f\n"
"147:" // Height 3: Partial direct writeback: partial_1_0
- "str h8, [x13, #0x0]\n"
- "str h12, [x25, #0x0]\n"
- "str h16, [x24, #0x0]\n"
+ "str h8, [x12, #0x0]\n"
+ "str h12, [x24, #0x0]\n"
+ "str h16, [x23, #0x0]\n"
"148:" // Height 3: Partial direct writeback: Done
"b 150f\n"
"149:" // Height 3: Full writeback
- "str q8, [x13, #0x0]\n"
- "str q9, [x13, #0x10]\n"
- "str q10, [x13, #0x20]\n"
- "str q11, [x13, #0x30]\n"
- "add x13, x13, #0x40\n"
- "str q12, [x25, #0x0]\n"
- "str q13, [x25, #0x10]\n"
- "str q14, [x25, #0x20]\n"
- "str q15, [x25, #0x30]\n"
- "str q16, [x24, #0x0]\n"
- "str q17, [x24, #0x10]\n"
- "str q18, [x24, #0x20]\n"
- "str q19, [x24, #0x30]\n"
+ "str q8, [x12, #0x0]\n"
+ "str q9, [x12, #0x10]\n"
+ "str q10, [x12, #0x20]\n"
+ "str q11, [x12, #0x30]\n"
+ "add x12, x12, #0x40\n"
+ "str q12, [x24, #0x0]\n"
+ "str q13, [x24, #0x10]\n"
+ "str q14, [x24, #0x20]\n"
+ "str q15, [x24, #0x30]\n"
+ "str q16, [x23, #0x0]\n"
+ "str q17, [x23, #0x10]\n"
+ "str q18, [x23, #0x20]\n"
+ "str q19, [x23, #0x30]\n"
"150:" // Height 3: Writeback done
- "subs x14, x14, #0x20\n"
+ "subs x13, x13, #0x20\n"
"bgt 102b\n"
"b 302f\n"
"151:" // Height 4
- "ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x15, %x[bias]\n"
- "ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x13, %x[output_ptr]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x14, %x[bias]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x12, %x[output_ptr]\n"
"152:" // Height 4: Column loop
- "ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
- "add x11, x12, x20, LSL #1\n"
- "add x10, x11, x20, LSL #1\n"
- "add x9, x10, x20, LSL #1\n"
- "add x20, x9, x20, LSL #1\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "cmp x14, #0x18\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #1\n"
+ "add x9, x10, x19, LSL #1\n"
+ "add x28, x9, x19, LSL #1\n"
+ "add x19, x28, x19, LSL #1\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "cmp x13, #0x18\n"
"bgt 153f\n"
- "cmp x14, #0x10\n"
- "mov x9, x12\n"
+ "cmp x13, #0x10\n"
+ "mov x28, x11\n"
"bgt 153f\n"
- "cmp x14, #0x8\n"
- "mov x10, x12\n"
+ "cmp x13, #0x8\n"
+ "mov x9, x11\n"
"bgt 153f\n"
- "mov x11, x12\n"
+ "mov x10, x11\n"
"153:" // Height 4: B setup done
- "cbz x15, 154f\n"
- "ldr q8, [x15, #0x0]\n"
- "ldr q9, [x15, #0x10]\n"
+ "cbz x14, 154f\n"
+ "ldr q8, [x14, #0x0]\n"
+ "ldr q9, [x14, #0x10]\n"
"mov v12.16b, v8.16b\n"
"mov v13.16b, v9.16b\n"
- "ldr q10, [x15, #0x20]\n"
- "ldr q11, [x15, #0x30]\n"
+ "ldr q10, [x14, #0x20]\n"
+ "ldr q11, [x14, #0x30]\n"
"mov v14.16b, v10.16b\n"
"mov v15.16b, v11.16b\n"
"mov v16.16b, v8.16b\n"
"mov v17.16b, v9.16b\n"
- "add x15, x15, #0x40\n"
+ "add x14, x14, #0x40\n"
"mov v18.16b, v10.16b\n"
"mov v19.16b, v11.16b\n"
"mov v20.16b, v8.16b\n"
@@ -2051,236 +2051,236 @@ void a64_ffhybrid_fp16_mla_6x32 (
"b 173f\n"
"154:" // Height 4: no bias
"tbz %x[flags], #0, 172f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
- "cmp x14, #0x20\n"
- "add x23, x24, x20, LSL #1\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "cmp x13, #0x20\n"
+ "add x22, x23, x19, LSL #1\n"
"bge 171f\n"
- "tbz x14, #4, 162f\n"
- "ld1 { v8.8h }, [x13], #0x10\n"
- "ld1 { v12.8h }, [x25], #0x10\n"
- "ld1 { v16.8h }, [x24], #0x10\n"
- "ld1 { v20.8h }, [x23], #0x10\n"
- "ld1 { v9.8h }, [x13], #0x10\n"
- "ld1 { v13.8h }, [x25], #0x10\n"
- "ld1 { v17.8h }, [x24], #0x10\n"
- "ld1 { v21.8h }, [x23], #0x10\n"
- "tbz x14, #3, 158f\n"
- "ld1 { v10.8h }, [x13], #0x10\n"
- "ld1 { v14.8h }, [x25], #0x10\n"
- "ld1 { v18.8h }, [x24], #0x10\n"
- "ld1 { v22.8h }, [x23], #0x10\n"
- "tbz x14, #2, 156f\n"
- "ldr d11, [x13], #0x8\n"
- "ldr d15, [x25], #0x8\n"
- "ldr d19, [x24], #0x8\n"
- "ldr d23, [x23], #0x8\n"
- "tbz x14, #1, 155f\n"
- "ld1 { v11.s }[2], [x13], #0x4\n"
- "ld1 { v15.s }[2], [x25], #0x4\n"
- "mov x20, #0x3c\n"
- "ld1 { v19.s }[2], [x24], #0x4\n"
- "ld1 { v23.s }[2], [x23], #0x4\n"
- "tbz x14, #0, 170f\n"
- "ld1 { v11.h }[6], [x13]\n"
- "ld1 { v15.h }[6], [x25]\n"
- "ld1 { v19.h }[6], [x24]\n"
- "ld1 { v23.h }[6], [x23]\n"
+ "tbz x13, #4, 162f\n"
+ "ld1 { v8.8h }, [x12], #0x10\n"
+ "ld1 { v12.8h }, [x24], #0x10\n"
+ "ld1 { v16.8h }, [x23], #0x10\n"
+ "ld1 { v20.8h }, [x22], #0x10\n"
+ "ld1 { v9.8h }, [x12], #0x10\n"
+ "ld1 { v13.8h }, [x24], #0x10\n"
+ "ld1 { v17.8h }, [x23], #0x10\n"
+ "ld1 { v21.8h }, [x22], #0x10\n"
+ "tbz x13, #3, 158f\n"
+ "ld1 { v10.8h }, [x12], #0x10\n"
+ "ld1 { v14.8h }, [x24], #0x10\n"
+ "ld1 { v18.8h }, [x23], #0x10\n"
+ "ld1 { v22.8h }, [x22], #0x10\n"
+ "tbz x13, #2, 156f\n"
+ "ldr d11, [x12], #0x8\n"
+ "ldr d15, [x24], #0x8\n"
+ "ldr d19, [x23], #0x8\n"
+ "ldr d23, [x22], #0x8\n"
+ "tbz x13, #1, 155f\n"
+ "ld1 { v11.s }[2], [x12], #0x4\n"
+ "ld1 { v15.s }[2], [x24], #0x4\n"
+ "mov x19, #0x3c\n"
+ "ld1 { v19.s }[2], [x23], #0x4\n"
+ "ld1 { v23.s }[2], [x22], #0x4\n"
+ "tbz x13, #0, 170f\n"
+ "ld1 { v11.h }[6], [x12]\n"
+ "ld1 { v15.h }[6], [x24]\n"
+ "ld1 { v19.h }[6], [x23]\n"
+ "ld1 { v23.h }[6], [x22]\n"
"b 170f\n"
"155:" // Height 4: Partial accumulate: partial_1_28
- "mov x20, #0x38\n"
- "tbz x14, #0, 170f\n"
- "ld1 { v11.h }[4], [x13]\n"
- "ld1 { v15.h }[4], [x25]\n"
- "ld1 { v19.h }[4], [x24]\n"
- "ld1 { v23.h }[4], [x23]\n"
+ "mov x19, #0x38\n"
+ "tbz x13, #0, 170f\n"
+ "ld1 { v11.h }[4], [x12]\n"
+ "ld1 { v15.h }[4], [x24]\n"
+ "ld1 { v19.h }[4], [x23]\n"
+ "ld1 { v23.h }[4], [x22]\n"
"b 170f\n"
"156:" // Height 4: Partial accumulate: partial_2_24
- "tbz x14, #1, 157f\n"
- "ldr s11, [x13], #0x4\n"
- "ldr s15, [x25], #0x4\n"
- "mov x20, #0x34\n"
- "ldr s19, [x24], #0x4\n"
- "ldr s23, [x23], #0x4\n"
- "tbz x14, #0, 170f\n"
- "ld1 { v11.h }[2], [x13]\n"
- "ld1 { v15.h }[2], [x25]\n"
- "ld1 { v19.h }[2], [x24]\n"
- "ld1 { v23.h }[2], [x23]\n"
+ "tbz x13, #1, 157f\n"
+ "ldr s11, [x12], #0x4\n"
+ "ldr s15, [x24], #0x4\n"
+ "mov x19, #0x34\n"
+ "ldr s19, [x23], #0x4\n"
+ "ldr s23, [x22], #0x4\n"
+ "tbz x13, #0, 170f\n"
+ "ld1 { v11.h }[2], [x12]\n"
+ "ld1 { v15.h }[2], [x24]\n"
+ "ld1 { v19.h }[2], [x23]\n"
+ "ld1 { v23.h }[2], [x22]\n"
"b 170f\n"
"157:" // Height 4: Partial accumulate: partial_1_24
- "mov x20, #0x30\n"
- "tbz x14, #0, 170f\n"
- "ldr h11, [x13, #0x0]\n"
- "ldr h15, [x25, #0x0]\n"
- "ldr h19, [x24, #0x0]\n"
- "ldr h23, [x23, #0x0]\n"
+ "mov x19, #0x30\n"
+ "tbz x13, #0, 170f\n"
+ "ldr h11, [x12, #0x0]\n"
+ "ldr h15, [x24, #0x0]\n"
+ "ldr h19, [x23, #0x0]\n"
+ "ldr h23, [x22, #0x0]\n"
"b 170f\n"
"158:" // Height 4: Partial accumulate: partial_4_16
- "tbz x14, #2, 160f\n"
- "ldr d10, [x13], #0x8\n"
- "ldr d14, [x25], #0x8\n"
- "ldr d18, [x24], #0x8\n"
- "ldr d22, [x23], #0x8\n"
- "tbz x14, #1, 159f\n"
- "ld1 { v10.s }[2], [x13], #0x4\n"
- "ld1 { v14.s }[2], [x25], #0x4\n"
- "mov x20, #0x2c\n"
- "ld1 { v18.s }[2], [x24], #0x4\n"
- "ld1 { v22.s }[2], [x23], #0x4\n"
- "tbz x14, #0, 170f\n"
- "ld1 { v10.h }[6], [x13]\n"
- "ld1 { v14.h }[6], [x25]\n"
- "ld1 { v18.h }[6], [x24]\n"
- "ld1 { v22.h }[6], [x23]\n"
+ "tbz x13, #2, 160f\n"
+ "ldr d10, [x12], #0x8\n"
+ "ldr d14, [x24], #0x8\n"
+ "ldr d18, [x23], #0x8\n"
+ "ldr d22, [x22], #0x8\n"
+ "tbz x13, #1, 159f\n"
+ "ld1 { v10.s }[2], [x12], #0x4\n"
+ "ld1 { v14.s }[2], [x24], #0x4\n"
+ "mov x19, #0x2c\n"
+ "ld1 { v18.s }[2], [x23], #0x4\n"
+ "ld1 { v22.s }[2], [x22], #0x4\n"
+ "tbz x13, #0, 170f\n"
+ "ld1 { v10.h }[6], [x12]\n"
+ "ld1 { v14.h }[6], [x24]\n"
+ "ld1 { v18.h }[6], [x23]\n"
+ "ld1 { v22.h }[6], [x22]\n"
"b 170f\n"
"159:" // Height 4: Partial accumulate: partial_1_20
- "mov x20, #0x28\n"
- "tbz x14, #0, 170f\n"
- "ld1 { v10.h }[4], [x13]\n"
- "ld1 { v14.h }[4], [x25]\n"
- "ld1 { v18.h }[4], [x24]\n"
- "ld1 { v22.h }[4], [x23]\n"
+ "mov x19, #0x28\n"
+ "tbz x13, #0, 170f\n"
+ "ld1 { v10.h }[4], [x12]\n"
+ "ld1 { v14.h }[4], [x24]\n"
+ "ld1 { v18.h }[4], [x23]\n"
+ "ld1 { v22.h }[4], [x22]\n"
"b 170f\n"
"160:" // Height 4: Partial accumulate: partial_2_16
- "tbz x14, #1, 161f\n"
- "ldr s10, [x13], #0x4\n"
- "ldr s14, [x25], #0x4\n"
- "mov x20, #0x24\n"
- "ldr s18, [x24], #0x4\n"
- "ldr s22, [x23], #0x4\n"
- "tbz x14, #0, 170f\n"
- "ld1 { v10.h }[2], [x13]\n"
- "ld1 { v14.h }[2], [x25]\n"
- "ld1 { v18.h }[2], [x24]\n"
- "ld1 { v22.h }[2], [x23]\n"
+ "tbz x13, #1, 161f\n"
+ "ldr s10, [x12], #0x4\n"
+ "ldr s14, [x24], #0x4\n"
+ "mov x19, #0x24\n"
+ "ldr s18, [x23], #0x4\n"
+ "ldr s22, [x22], #0x4\n"
+ "tbz x13, #0, 170f\n"
+ "ld1 { v10.h }[2], [x12]\n"
+ "ld1 { v14.h }[2], [x24]\n"
+ "ld1 { v18.h }[2], [x23]\n"
+ "ld1 { v22.h }[2], [x22]\n"
"b 170f\n"
"161:" // Height 4: Partial accumulate: partial_1_16
- "mov x20, #0x20\n"
- "tbz x14, #0, 170f\n"
- "ldr h10, [x13, #0x0]\n"
- "ldr h14, [x25, #0x0]\n"
- "ldr h18, [x24, #0x0]\n"
- "ldr h22, [x23, #0x0]\n"
+ "mov x19, #0x20\n"
+ "tbz x13, #0, 170f\n"
+ "ldr h10, [x12, #0x0]\n"
+ "ldr h14, [x24, #0x0]\n"
+ "ldr h18, [x23, #0x0]\n"
+ "ldr h22, [x22, #0x0]\n"
"b 170f\n"
"162:" // Height 4: Partial accumulate: partial_8_0
- "tbz x14, #3, 166f\n"
- "ld1 { v8.8h }, [x13], #0x10\n"
- "ld1 { v12.8h }, [x25], #0x10\n"
- "ld1 { v16.8h }, [x24], #0x10\n"
- "ld1 { v20.8h }, [x23], #0x10\n"
- "tbz x14, #2, 164f\n"
- "ldr d9, [x13], #0x8\n"
- "ldr d13, [x25], #0x8\n"
- "ldr d17, [x24], #0x8\n"
- "ldr d21, [x23], #0x8\n"
- "tbz x14, #1, 163f\n"
- "ld1 { v9.s }[2], [x13], #0x4\n"
- "ld1 { v13.s }[2], [x25], #0x4\n"
- "mov x20, #0x1c\n"
- "ld1 { v17.s }[2], [x24], #0x4\n"
- "ld1 { v21.s }[2], [x23], #0x4\n"
- "tbz x14, #0, 170f\n"
- "ld1 { v9.h }[6], [x13]\n"
- "ld1 { v13.h }[6], [x25]\n"
- "ld1 { v17.h }[6], [x24]\n"
- "ld1 { v21.h }[6], [x23]\n"
+ "tbz x13, #3, 166f\n"
+ "ld1 { v8.8h }, [x12], #0x10\n"
+ "ld1 { v12.8h }, [x24], #0x10\n"
+ "ld1 { v16.8h }, [x23], #0x10\n"
+ "ld1 { v20.8h }, [x22], #0x10\n"
+ "tbz x13, #2, 164f\n"
+ "ldr d9, [x12], #0x8\n"
+ "ldr d13, [x24], #0x8\n"
+ "ldr d17, [x23], #0x8\n"
+ "ldr d21, [x22], #0x8\n"
+ "tbz x13, #1, 163f\n"
+ "ld1 { v9.s }[2], [x12], #0x4\n"
+ "ld1 { v13.s }[2], [x24], #0x4\n"
+ "mov x19, #0x1c\n"
+ "ld1 { v17.s }[2], [x23], #0x4\n"
+ "ld1 { v21.s }[2], [x22], #0x4\n"
+ "tbz x13, #0, 170f\n"
+ "ld1 { v9.h }[6], [x12]\n"
+ "ld1 { v13.h }[6], [x24]\n"
+ "ld1 { v17.h }[6], [x23]\n"
+ "ld1 { v21.h }[6], [x22]\n"
"b 170f\n"
"163:" // Height 4: Partial accumulate: partial_1_12
- "mov x20, #0x18\n"
- "tbz x14, #0, 170f\n"
- "ld1 { v9.h }[4], [x13]\n"
- "ld1 { v13.h }[4], [x25]\n"
- "ld1 { v17.h }[4], [x24]\n"
- "ld1 { v21.h }[4], [x23]\n"
+ "mov x19, #0x18\n"
+ "tbz x13, #0, 170f\n"
+ "ld1 { v9.h }[4], [x12]\n"
+ "ld1 { v13.h }[4], [x24]\n"
+ "ld1 { v17.h }[4], [x23]\n"
+ "ld1 { v21.h }[4], [x22]\n"
"b 170f\n"
"164:" // Height 4: Partial accumulate: partial_2_8
- "tbz x14, #1, 165f\n"
- "ldr s9, [x13], #0x4\n"
- "ldr s13, [x25], #0x4\n"
- "mov x20, #0x14\n"
- "ldr s17, [x24], #0x4\n"
- "ldr s21, [x23], #0x4\n"
- "tbz x14, #0, 170f\n"
- "ld1 { v9.h }[2], [x13]\n"
- "ld1 { v13.h }[2], [x25]\n"
- "ld1 { v17.h }[2], [x24]\n"
- "ld1 { v21.h }[2], [x23]\n"
+ "tbz x13, #1, 165f\n"
+ "ldr s9, [x12], #0x4\n"
+ "ldr s13, [x24], #0x4\n"
+ "mov x19, #0x14\n"
+ "ldr s17, [x23], #0x4\n"
+ "ldr s21, [x22], #0x4\n"
+ "tbz x13, #0, 170f\n"
+ "ld1 { v9.h }[2], [x12]\n"
+ "ld1 { v13.h }[2], [x24]\n"
+ "ld1 { v17.h }[2], [x23]\n"
+ "ld1 { v21.h }[2], [x22]\n"
"b 170f\n"
"165:" // Height 4: Partial accumulate: partial_1_8
- "mov x20, #0x10\n"
- "tbz x14, #0, 170f\n"
- "ldr h9, [x13, #0x0]\n"
- "ldr h13, [x25, #0x0]\n"
- "ldr h17, [x24, #0x0]\n"
- "ldr h21, [x23, #0x0]\n"
+ "mov x19, #0x10\n"
+ "tbz x13, #0, 170f\n"
+ "ldr h9, [x12, #0x0]\n"
+ "ldr h13, [x24, #0x0]\n"
+ "ldr h17, [x23, #0x0]\n"
+ "ldr h21, [x22, #0x0]\n"
"b 170f\n"
"166:" // Height 4: Partial accumulate: partial_4_0
- "tbz x14, #2, 168f\n"
- "ldr d8, [x13], #0x8\n"
- "ldr d12, [x25], #0x8\n"
- "ldr d16, [x24], #0x8\n"
- "ldr d20, [x23], #0x8\n"
- "tbz x14, #1, 167f\n"
- "ld1 { v8.s }[2], [x13], #0x4\n"
- "ld1 { v12.s }[2], [x25], #0x4\n"
- "mov x20, #0xc\n"
- "ld1 { v16.s }[2], [x24], #0x4\n"
- "ld1 { v20.s }[2], [x23], #0x4\n"
- "tbz x14, #0, 170f\n"
- "ld1 { v8.h }[6], [x13]\n"
- "ld1 { v12.h }[6], [x25]\n"
- "ld1 { v16.h }[6], [x24]\n"
- "ld1 { v20.h }[6], [x23]\n"
+ "tbz x13, #2, 168f\n"
+ "ldr d8, [x12], #0x8\n"
+ "ldr d12, [x24], #0x8\n"
+ "ldr d16, [x23], #0x8\n"
+ "ldr d20, [x22], #0x8\n"
+ "tbz x13, #1, 167f\n"
+ "ld1 { v8.s }[2], [x12], #0x4\n"
+ "ld1 { v12.s }[2], [x24], #0x4\n"
+ "mov x19, #0xc\n"
+ "ld1 { v16.s }[2], [x23], #0x4\n"
+ "ld1 { v20.s }[2], [x22], #0x4\n"
+ "tbz x13, #0, 170f\n"
+ "ld1 { v8.h }[6], [x12]\n"
+ "ld1 { v12.h }[6], [x24]\n"
+ "ld1 { v16.h }[6], [x23]\n"
+ "ld1 { v20.h }[6], [x22]\n"
"b 170f\n"
"167:" // Height 4: Partial accumulate: partial_1_4
- "mov x20, #0x8\n"
- "tbz x14, #0, 170f\n"
- "ld1 { v8.h }[4], [x13]\n"
- "ld1 { v12.h }[4], [x25]\n"
- "ld1 { v16.h }[4], [x24]\n"
- "ld1 { v20.h }[4], [x23]\n"
+ "mov x19, #0x8\n"
+ "tbz x13, #0, 170f\n"
+ "ld1 { v8.h }[4], [x12]\n"
+ "ld1 { v12.h }[4], [x24]\n"
+ "ld1 { v16.h }[4], [x23]\n"
+ "ld1 { v20.h }[4], [x22]\n"
"b 170f\n"
"168:" // Height 4: Partial accumulate: partial_2_0
- "tbz x14, #1, 169f\n"
- "ldr s8, [x13], #0x4\n"
- "ldr s12, [x25], #0x4\n"
- "mov x20, #0x4\n"
- "ldr s16, [x24], #0x4\n"
- "ldr s20, [x23], #0x4\n"
- "tbz x14, #0, 170f\n"
- "ld1 { v8.h }[2], [x13]\n"
- "ld1 { v12.h }[2], [x25]\n"
- "ld1 { v16.h }[2], [x24]\n"
- "ld1 { v20.h }[2], [x23]\n"
+ "tbz x13, #1, 169f\n"
+ "ldr s8, [x12], #0x4\n"
+ "ldr s12, [x24], #0x4\n"
+ "mov x19, #0x4\n"
+ "ldr s16, [x23], #0x4\n"
+ "ldr s20, [x22], #0x4\n"
+ "tbz x13, #0, 170f\n"
+ "ld1 { v8.h }[2], [x12]\n"
+ "ld1 { v12.h }[2], [x24]\n"
+ "ld1 { v16.h }[2], [x23]\n"
+ "ld1 { v20.h }[2], [x22]\n"
"b 170f\n"
"169:" // Height 4: Partial accumulate: partial_1_0
- "ldr h8, [x13, #0x0]\n"
- "ldr h12, [x25, #0x0]\n"
- "mov x20, #0x0\n"
- "ldr h16, [x24, #0x0]\n"
- "ldr h20, [x23, #0x0]\n"
+ "ldr h8, [x12, #0x0]\n"
+ "ldr h12, [x24, #0x0]\n"
+ "mov x19, #0x0\n"
+ "ldr h16, [x23, #0x0]\n"
+ "ldr h20, [x22, #0x0]\n"
"170:" // Height 4: Partial accumulate: Done
- "sub x13, x13, x20\n"
+ "sub x12, x12, x19\n"
"b 173f\n"
"171:" // Height 4: full accumulate
- "ldr q8, [x13, #0x0]\n"
- "ldr q9, [x13, #0x10]\n"
- "ldr q10, [x13, #0x20]\n"
- "ldr q11, [x13, #0x30]\n"
- "ldr q12, [x25, #0x0]\n"
- "ldr q13, [x25, #0x10]\n"
- "ldr q14, [x25, #0x20]\n"
- "ldr q15, [x25, #0x30]\n"
- "ldr q16, [x24, #0x0]\n"
- "ldr q17, [x24, #0x10]\n"
- "ldr q18, [x24, #0x20]\n"
- "ldr q19, [x24, #0x30]\n"
- "ldr q20, [x23, #0x0]\n"
- "ldr q21, [x23, #0x10]\n"
- "ldr q22, [x23, #0x20]\n"
- "ldr q23, [x23, #0x30]\n"
+ "ldr q8, [x12, #0x0]\n"
+ "ldr q9, [x12, #0x10]\n"
+ "ldr q10, [x12, #0x20]\n"
+ "ldr q11, [x12, #0x30]\n"
+ "ldr q12, [x24, #0x0]\n"
+ "ldr q13, [x24, #0x10]\n"
+ "ldr q14, [x24, #0x20]\n"
+ "ldr q15, [x24, #0x30]\n"
+ "ldr q16, [x23, #0x0]\n"
+ "ldr q17, [x23, #0x10]\n"
+ "ldr q18, [x23, #0x20]\n"
+ "ldr q19, [x23, #0x30]\n"
+ "ldr q20, [x22, #0x0]\n"
+ "ldr q21, [x22, #0x10]\n"
+ "ldr q22, [x22, #0x20]\n"
+ "ldr q23, [x22, #0x30]\n"
"b 173f\n"
"172:" // Height 4: no accumulate
"movi v8.16b, #0x0\n"
@@ -2300,377 +2300,377 @@ void a64_ffhybrid_fp16_mla_6x32 (
"movi v22.16b, #0x0\n"
"movi v23.16b, #0x0\n"
"173:" // Height 4: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"174:" // Height 4: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 175f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "cbnz x28, 176f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #1\n"
- "add x25, x25, x20, LSL #1\n"
- "add x24, x24, x20, LSL #1\n"
- "add x23, x23, x20, LSL #1\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "cbnz x27, 176f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
+ "add x24, x24, x19, LSL #1\n"
+ "add x23, x23, x19, LSL #1\n"
+ "add x22, x22, x19, LSL #1\n"
"b 176f\n"
"175:" // Height 4: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
- "add x23, x24, x20, LSL #1\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "add x22, x23, x19, LSL #1\n"
"176:" // Height 4: input setup done
- "cmp x27, #0x8\n"
+ "cmp x26, #0x8\n"
"blt 179f\n"
- "ldr q0, [x26, #0x0]\n"
- "ldr q1, [x25, #0x0]\n"
- "cmp x27, #0x10\n"
- "ldr q2, [x24, #0x0]\n"
- "ldr q3, [x23, #0x0]\n"
- "ldr q6, [x12, #0x0]\n"
- "ldr q7, [x11, #0x0]\n"
+ "ldr q0, [x25, #0x0]\n"
+ "ldr q1, [x24, #0x0]\n"
+ "cmp x26, #0x10\n"
+ "ldr q2, [x23, #0x0]\n"
+ "ldr q3, [x22, #0x0]\n"
+ "ldr q6, [x11, #0x0]\n"
+ "ldr q7, [x10, #0x0]\n"
"blt 178f\n"
"177:" // Height 4: Multiply loop: Main loop head
"fmla v8.8h, v6.8h, v0.h[0]\n"
"fmla v12.8h, v6.8h, v1.h[0]\n"
- "sub x27, x27, #0x8\n"
- "cmp x27, #0x10\n"
+ "sub x26, x26, #0x8\n"
+ "cmp x26, #0x10\n"
"fmla v16.8h, v6.8h, v2.h[0]\n"
"fmla v20.8h, v6.8h, v3.h[0]\n"
- "ldr q6, [x10, #0x0]\n"
- "add x26, x26, #0x10\n"
+ "ldr q6, [x9, #0x0]\n"
+ "add x25, x25, #0x10\n"
"fmla v9.8h, v7.8h, v0.h[0]\n"
"fmla v13.8h, v7.8h, v1.h[0]\n"
- "add x25, x25, #0x10\n"
"add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
"fmla v17.8h, v7.8h, v2.h[0]\n"
"fmla v21.8h, v7.8h, v3.h[0]\n"
- "ldr q7, [x9, #0x0]\n"
- "add x23, x23, #0x10\n"
+ "ldr q7, [x28, #0x0]\n"
+ "add x22, x22, #0x10\n"
"fmla v10.8h, v6.8h, v0.h[0]\n"
"fmla v14.8h, v6.8h, v1.h[0]\n"
"fmla v18.8h, v6.8h, v2.h[0]\n"
"fmla v22.8h, v6.8h, v3.h[0]\n"
- "ldr q6, [x12, #0x10]\n"
+ "ldr q6, [x11, #0x10]\n"
"fmla v11.8h, v7.8h, v0.h[0]\n"
"fmla v15.8h, v7.8h, v1.h[0]\n"
"fmla v19.8h, v7.8h, v2.h[0]\n"
"fmla v23.8h, v7.8h, v3.h[0]\n"
- "ldr q7, [x11, #0x10]\n"
+ "ldr q7, [x10, #0x10]\n"
"fmla v8.8h, v6.8h, v0.h[1]\n"
"fmla v12.8h, v6.8h, v1.h[1]\n"
"fmla v16.8h, v6.8h, v2.h[1]\n"
"fmla v20.8h, v6.8h, v3.h[1]\n"
- "ldr q6, [x10, #0x10]\n"
+ "ldr q6, [x9, #0x10]\n"
"fmla v9.8h, v7.8h, v0.h[1]\n"
"fmla v13.8h, v7.8h, v1.h[1]\n"
"fmla v17.8h, v7.8h, v2.h[1]\n"
"fmla v21.8h, v7.8h, v3.h[1]\n"
- "ldr q7, [x9, #0x10]\n"
+ "ldr q7, [x28, #0x10]\n"
"fmla v10.8h, v6.8h, v0.h[1]\n"
"fmla v14.8h, v6.8h, v1.h[1]\n"
"fmla v18.8h, v6.8h, v2.h[1]\n"
"fmla v22.8h, v6.8h, v3.h[1]\n"
- "ldr q6, [x12, #0x20]\n"
+ "ldr q6, [x11, #0x20]\n"
"fmla v11.8h, v7.8h, v0.h[1]\n"
"fmla v15.8h, v7.8h, v1.h[1]\n"
"fmla v19.8h, v7.8h, v2.h[1]\n"
"fmla v23.8h, v7.8h, v3.h[1]\n"
- "ldr q7, [x11, #0x20]\n"
+ "ldr q7, [x10, #0x20]\n"
"fmla v8.8h, v6.8h, v0.h[2]\n"
"fmla v12.8h, v6.8h, v1.h[2]\n"
"fmla v16.8h, v6.8h, v2.h[2]\n"
"fmla v20.8h, v6.8h, v3.h[2]\n"
- "ldr q6, [x10, #0x20]\n"
+ "ldr q6, [x9, #0x20]\n"
"fmla v9.8h, v7.8h, v0.h[2]\n"
"fmla v13.8h, v7.8h, v1.h[2]\n"
"fmla v17.8h, v7.8h, v2.h[2]\n"
"fmla v21.8h, v7.8h, v3.h[2]\n"
- "ldr q7, [x9, #0x20]\n"
+ "ldr q7, [x28, #0x20]\n"
"fmla v10.8h, v6.8h, v0.h[2]\n"
"fmla v14.8h, v6.8h, v1.h[2]\n"
"fmla v18.8h, v6.8h, v2.h[2]\n"
"fmla v22.8h, v6.8h, v3.h[2]\n"
- "ldr q6, [x12, #0x30]\n"
+ "ldr q6, [x11, #0x30]\n"
"fmla v11.8h, v7.8h, v0.h[2]\n"
"fmla v15.8h, v7.8h, v1.h[2]\n"
"fmla v19.8h, v7.8h, v2.h[2]\n"
"fmla v23.8h, v7.8h, v3.h[2]\n"
- "ldr q7, [x11, #0x30]\n"
+ "ldr q7, [x10, #0x30]\n"
"fmla v8.8h, v6.8h, v0.h[3]\n"
"fmla v12.8h, v6.8h, v1.h[3]\n"
"fmla v16.8h, v6.8h, v2.h[3]\n"
"fmla v20.8h, v6.8h, v3.h[3]\n"
- "ldr q6, [x10, #0x30]\n"
+ "ldr q6, [x9, #0x30]\n"
"fmla v9.8h, v7.8h, v0.h[3]\n"
"fmla v13.8h, v7.8h, v1.h[3]\n"
"fmla v17.8h, v7.8h, v2.h[3]\n"
"fmla v21.8h, v7.8h, v3.h[3]\n"
- "ldr q7, [x9, #0x30]\n"
+ "ldr q7, [x28, #0x30]\n"
"fmla v10.8h, v6.8h, v0.h[3]\n"
"fmla v14.8h, v6.8h, v1.h[3]\n"
"fmla v18.8h, v6.8h, v2.h[3]\n"
"fmla v22.8h, v6.8h, v3.h[3]\n"
- "ldr q6, [x12, #0x40]\n"
+ "ldr q6, [x11, #0x40]\n"
"fmla v11.8h, v7.8h, v0.h[3]\n"
"fmla v15.8h, v7.8h, v1.h[3]\n"
"fmla v19.8h, v7.8h, v2.h[3]\n"
"fmla v23.8h, v7.8h, v3.h[3]\n"
- "ldr q7, [x11, #0x40]\n"
+ "ldr q7, [x10, #0x40]\n"
"fmla v8.8h, v6.8h, v0.h[4]\n"
"fmla v12.8h, v6.8h, v1.h[4]\n"
"fmla v16.8h, v6.8h, v2.h[4]\n"
"fmla v20.8h, v6.8h, v3.h[4]\n"
- "ldr q6, [x10, #0x40]\n"
+ "ldr q6, [x9, #0x40]\n"
"fmla v9.8h, v7.8h, v0.h[4]\n"
"fmla v13.8h, v7.8h, v1.h[4]\n"
"fmla v17.8h, v7.8h, v2.h[4]\n"
"fmla v21.8h, v7.8h, v3.h[4]\n"
- "ldr q7, [x9, #0x40]\n"
+ "ldr q7, [x28, #0x40]\n"
"fmla v10.8h, v6.8h, v0.h[4]\n"
"fmla v14.8h, v6.8h, v1.h[4]\n"
"fmla v18.8h, v6.8h, v2.h[4]\n"
"fmla v22.8h, v6.8h, v3.h[4]\n"
- "ldr q6, [x12, #0x50]\n"
+ "ldr q6, [x11, #0x50]\n"
"fmla v11.8h, v7.8h, v0.h[4]\n"
"fmla v15.8h, v7.8h, v1.h[4]\n"
"fmla v19.8h, v7.8h, v2.h[4]\n"
"fmla v23.8h, v7.8h, v3.h[4]\n"
- "ldr q7, [x11, #0x50]\n"
+ "ldr q7, [x10, #0x50]\n"
"fmla v8.8h, v6.8h, v0.h[5]\n"
"fmla v12.8h, v6.8h, v1.h[5]\n"
"fmla v16.8h, v6.8h, v2.h[5]\n"
"fmla v20.8h, v6.8h, v3.h[5]\n"
- "ldr q6, [x10, #0x50]\n"
+ "ldr q6, [x9, #0x50]\n"
"fmla v9.8h, v7.8h, v0.h[5]\n"
"fmla v13.8h, v7.8h, v1.h[5]\n"
"fmla v17.8h, v7.8h, v2.h[5]\n"
"fmla v21.8h, v7.8h, v3.h[5]\n"
- "ldr q7, [x9, #0x50]\n"
+ "ldr q7, [x28, #0x50]\n"
"fmla v10.8h, v6.8h, v0.h[5]\n"
"fmla v14.8h, v6.8h, v1.h[5]\n"
"fmla v18.8h, v6.8h, v2.h[5]\n"
"fmla v22.8h, v6.8h, v3.h[5]\n"
- "ldr q6, [x12, #0x60]\n"
+ "ldr q6, [x11, #0x60]\n"
"fmla v11.8h, v7.8h, v0.h[5]\n"
"fmla v15.8h, v7.8h, v1.h[5]\n"
"fmla v19.8h, v7.8h, v2.h[5]\n"
"fmla v23.8h, v7.8h, v3.h[5]\n"
- "ldr q7, [x11, #0x60]\n"
+ "ldr q7, [x10, #0x60]\n"
"fmla v8.8h, v6.8h, v0.h[6]\n"
"fmla v12.8h, v6.8h, v1.h[6]\n"
"fmla v16.8h, v6.8h, v2.h[6]\n"
"fmla v20.8h, v6.8h, v3.h[6]\n"
- "ldr q6, [x10, #0x60]\n"
+ "ldr q6, [x9, #0x60]\n"
"fmla v9.8h, v7.8h, v0.h[6]\n"
"fmla v13.8h, v7.8h, v1.h[6]\n"
"fmla v17.8h, v7.8h, v2.h[6]\n"
"fmla v21.8h, v7.8h, v3.h[6]\n"
- "ldr q7, [x9, #0x60]\n"
+ "ldr q7, [x28, #0x60]\n"
"fmla v10.8h, v6.8h, v0.h[6]\n"
"fmla v14.8h, v6.8h, v1.h[6]\n"
"fmla v18.8h, v6.8h, v2.h[6]\n"
"fmla v22.8h, v6.8h, v3.h[6]\n"
- "ldr q6, [x12, #0x70]\n"
- "add x12, x12, #0x80\n"
+ "ldr q6, [x11, #0x70]\n"
+ "add x11, x11, #0x80\n"
"fmla v11.8h, v7.8h, v0.h[6]\n"
"fmla v15.8h, v7.8h, v1.h[6]\n"
"fmla v19.8h, v7.8h, v2.h[6]\n"
"fmla v23.8h, v7.8h, v3.h[6]\n"
- "ldr q7, [x11, #0x70]\n"
- "add x11, x11, #0x80\n"
+ "ldr q7, [x10, #0x70]\n"
+ "add x10, x10, #0x80\n"
"fmla v8.8h, v6.8h, v0.h[7]\n"
"fmla v12.8h, v6.8h, v1.h[7]\n"
"fmla v16.8h, v6.8h, v2.h[7]\n"
"fmla v20.8h, v6.8h, v3.h[7]\n"
- "ldr q6, [x10, #0x70]\n"
- "add x10, x10, #0x80\n"
+ "ldr q6, [x9, #0x70]\n"
+ "add x9, x9, #0x80\n"
"fmla v9.8h, v7.8h, v0.h[7]\n"
"fmla v13.8h, v7.8h, v1.h[7]\n"
"fmla v17.8h, v7.8h, v2.h[7]\n"
"fmla v21.8h, v7.8h, v3.h[7]\n"
- "ldr q7, [x9, #0x70]\n"
- "add x9, x9, #0x80\n"
+ "ldr q7, [x28, #0x70]\n"
+ "add x28, x28, #0x80\n"
"fmla v10.8h, v6.8h, v0.h[7]\n"
"fmla v14.8h, v6.8h, v1.h[7]\n"
"fmla v18.8h, v6.8h, v2.h[7]\n"
"fmla v22.8h, v6.8h, v3.h[7]\n"
- "ldr q6, [x12, #0x0]\n"
+ "ldr q6, [x11, #0x0]\n"
"fmla v11.8h, v7.8h, v0.h[7]\n"
- "ldr q0, [x26, #0x0]\n"
+ "ldr q0, [x25, #0x0]\n"
"fmla v15.8h, v7.8h, v1.h[7]\n"
- "ldr q1, [x25, #0x0]\n"
+ "ldr q1, [x24, #0x0]\n"
"fmla v19.8h, v7.8h, v2.h[7]\n"
- "ldr q2, [x24, #0x0]\n"
+ "ldr q2, [x23, #0x0]\n"
"fmla v23.8h, v7.8h, v3.h[7]\n"
- "ldr q3, [x23, #0x0]\n"
- "ldr q7, [x11, #0x0]\n"
+ "ldr q3, [x22, #0x0]\n"
+ "ldr q7, [x10, #0x0]\n"
"bge 177b\n"
"178:" // Height 4: Multiply loop: Single iteration only
"fmla v8.8h, v6.8h, v0.h[0]\n"
"fmla v12.8h, v6.8h, v1.h[0]\n"
- "sub x27, x27, #0x8\n"
- "add x26, x26, #0x10\n"
+ "sub x26, x26, #0x8\n"
+ "add x25, x25, #0x10\n"
"fmla v16.8h, v6.8h, v2.h[0]\n"
"fmla v20.8h, v6.8h, v3.h[0]\n"
- "ldr q6, [x10, #0x0]\n"
- "add x25, x25, #0x10\n"
+ "ldr q6, [x9, #0x0]\n"
+ "add x24, x24, #0x10\n"
"fmla v9.8h, v7.8h, v0.h[0]\n"
"fmla v13.8h, v7.8h, v1.h[0]\n"
- "add x24, x24, #0x10\n"
"add x23, x23, #0x10\n"
+ "add x22, x22, #0x10\n"
"fmla v17.8h, v7.8h, v2.h[0]\n"
"fmla v21.8h, v7.8h, v3.h[0]\n"
- "ldr q7, [x9, #0x0]\n"
+ "ldr q7, [x28, #0x0]\n"
"fmla v10.8h, v6.8h, v0.h[0]\n"
"fmla v14.8h, v6.8h, v1.h[0]\n"
"fmla v18.8h, v6.8h, v2.h[0]\n"
"fmla v22.8h, v6.8h, v3.h[0]\n"
- "ldr q6, [x12, #0x10]\n"
+ "ldr q6, [x11, #0x10]\n"
"fmla v11.8h, v7.8h, v0.h[0]\n"
"fmla v15.8h, v7.8h, v1.h[0]\n"
"fmla v19.8h, v7.8h, v2.h[0]\n"
"fmla v23.8h, v7.8h, v3.h[0]\n"
- "ldr q7, [x11, #0x10]\n"
+ "ldr q7, [x10, #0x10]\n"
"fmla v8.8h, v6.8h, v0.h[1]\n"
"fmla v12.8h, v6.8h, v1.h[1]\n"
"fmla v16.8h, v6.8h, v2.h[1]\n"
"fmla v20.8h, v6.8h, v3.h[1]\n"
- "ldr q6, [x10, #0x10]\n"
+ "ldr q6, [x9, #0x10]\n"
"fmla v9.8h, v7.8h, v0.h[1]\n"
"fmla v13.8h, v7.8h, v1.h[1]\n"
"fmla v17.8h, v7.8h, v2.h[1]\n"
"fmla v21.8h, v7.8h, v3.h[1]\n"
- "ldr q7, [x9, #0x10]\n"
+ "ldr q7, [x28, #0x10]\n"
"fmla v10.8h, v6.8h, v0.h[1]\n"
"fmla v14.8h, v6.8h, v1.h[1]\n"
"fmla v18.8h, v6.8h, v2.h[1]\n"
"fmla v22.8h, v6.8h, v3.h[1]\n"
- "ldr q6, [x12, #0x20]\n"
+ "ldr q6, [x11, #0x20]\n"
"fmla v11.8h, v7.8h, v0.h[1]\n"
"fmla v15.8h, v7.8h, v1.h[1]\n"
"fmla v19.8h, v7.8h, v2.h[1]\n"
"fmla v23.8h, v7.8h, v3.h[1]\n"
- "ldr q7, [x11, #0x20]\n"
+ "ldr q7, [x10, #0x20]\n"
"fmla v8.8h, v6.8h, v0.h[2]\n"
"fmla v12.8h, v6.8h, v1.h[2]\n"
"fmla v16.8h, v6.8h, v2.h[2]\n"
"fmla v20.8h, v6.8h, v3.h[2]\n"
- "ldr q6, [x10, #0x20]\n"
+ "ldr q6, [x9, #0x20]\n"
"fmla v9.8h, v7.8h, v0.h[2]\n"
"fmla v13.8h, v7.8h, v1.h[2]\n"
"fmla v17.8h, v7.8h, v2.h[2]\n"
"fmla v21.8h, v7.8h, v3.h[2]\n"
- "ldr q7, [x9, #0x20]\n"
+ "ldr q7, [x28, #0x20]\n"
"fmla v10.8h, v6.8h, v0.h[2]\n"
"fmla v14.8h, v6.8h, v1.h[2]\n"
"fmla v18.8h, v6.8h, v2.h[2]\n"
"fmla v22.8h, v6.8h, v3.h[2]\n"
- "ldr q6, [x12, #0x30]\n"
+ "ldr q6, [x11, #0x30]\n"
"fmla v11.8h, v7.8h, v0.h[2]\n"
"fmla v15.8h, v7.8h, v1.h[2]\n"
"fmla v19.8h, v7.8h, v2.h[2]\n"
"fmla v23.8h, v7.8h, v3.h[2]\n"
- "ldr q7, [x11, #0x30]\n"
+ "ldr q7, [x10, #0x30]\n"
"fmla v8.8h, v6.8h, v0.h[3]\n"
"fmla v12.8h, v6.8h, v1.h[3]\n"
"fmla v16.8h, v6.8h, v2.h[3]\n"
"fmla v20.8h, v6.8h, v3.h[3]\n"
- "ldr q6, [x10, #0x30]\n"
+ "ldr q6, [x9, #0x30]\n"
"fmla v9.8h, v7.8h, v0.h[3]\n"
"fmla v13.8h, v7.8h, v1.h[3]\n"
"fmla v17.8h, v7.8h, v2.h[3]\n"
"fmla v21.8h, v7.8h, v3.h[3]\n"
- "ldr q7, [x9, #0x30]\n"
+ "ldr q7, [x28, #0x30]\n"
"fmla v10.8h, v6.8h, v0.h[3]\n"
"fmla v14.8h, v6.8h, v1.h[3]\n"
"fmla v18.8h, v6.8h, v2.h[3]\n"
"fmla v22.8h, v6.8h, v3.h[3]\n"
- "ldr q6, [x12, #0x40]\n"
+ "ldr q6, [x11, #0x40]\n"
"fmla v11.8h, v7.8h, v0.h[3]\n"
"fmla v15.8h, v7.8h, v1.h[3]\n"
"fmla v19.8h, v7.8h, v2.h[3]\n"
"fmla v23.8h, v7.8h, v3.h[3]\n"
- "ldr q7, [x11, #0x40]\n"
+ "ldr q7, [x10, #0x40]\n"
"fmla v8.8h, v6.8h, v0.h[4]\n"
"fmla v12.8h, v6.8h, v1.h[4]\n"
"fmla v16.8h, v6.8h, v2.h[4]\n"
"fmla v20.8h, v6.8h, v3.h[4]\n"
- "ldr q6, [x10, #0x40]\n"
+ "ldr q6, [x9, #0x40]\n"
"fmla v9.8h, v7.8h, v0.h[4]\n"
"fmla v13.8h, v7.8h, v1.h[4]\n"
"fmla v17.8h, v7.8h, v2.h[4]\n"
"fmla v21.8h, v7.8h, v3.h[4]\n"
- "ldr q7, [x9, #0x40]\n"
+ "ldr q7, [x28, #0x40]\n"
"fmla v10.8h, v6.8h, v0.h[4]\n"
"fmla v14.8h, v6.8h, v1.h[4]\n"
"fmla v18.8h, v6.8h, v2.h[4]\n"
"fmla v22.8h, v6.8h, v3.h[4]\n"
- "ldr q6, [x12, #0x50]\n"
+ "ldr q6, [x11, #0x50]\n"
"fmla v11.8h, v7.8h, v0.h[4]\n"
"fmla v15.8h, v7.8h, v1.h[4]\n"
"fmla v19.8h, v7.8h, v2.h[4]\n"
"fmla v23.8h, v7.8h, v3.h[4]\n"
- "ldr q7, [x11, #0x50]\n"
+ "ldr q7, [x10, #0x50]\n"
"fmla v8.8h, v6.8h, v0.h[5]\n"
"fmla v12.8h, v6.8h, v1.h[5]\n"
"fmla v16.8h, v6.8h, v2.h[5]\n"
"fmla v20.8h, v6.8h, v3.h[5]\n"
- "ldr q6, [x10, #0x50]\n"
+ "ldr q6, [x9, #0x50]\n"
"fmla v9.8h, v7.8h, v0.h[5]\n"
"fmla v13.8h, v7.8h, v1.h[5]\n"
"fmla v17.8h, v7.8h, v2.h[5]\n"
"fmla v21.8h, v7.8h, v3.h[5]\n"
- "ldr q7, [x9, #0x50]\n"
+ "ldr q7, [x28, #0x50]\n"
"fmla v10.8h, v6.8h, v0.h[5]\n"
"fmla v14.8h, v6.8h, v1.h[5]\n"
"fmla v18.8h, v6.8h, v2.h[5]\n"
"fmla v22.8h, v6.8h, v3.h[5]\n"
- "ldr q6, [x12, #0x60]\n"
+ "ldr q6, [x11, #0x60]\n"
"fmla v11.8h, v7.8h, v0.h[5]\n"
"fmla v15.8h, v7.8h, v1.h[5]\n"
"fmla v19.8h, v7.8h, v2.h[5]\n"
"fmla v23.8h, v7.8h, v3.h[5]\n"
- "ldr q7, [x11, #0x60]\n"
+ "ldr q7, [x10, #0x60]\n"
"fmla v8.8h, v6.8h, v0.h[6]\n"
"fmla v12.8h, v6.8h, v1.h[6]\n"
"fmla v16.8h, v6.8h, v2.h[6]\n"
"fmla v20.8h, v6.8h, v3.h[6]\n"
- "ldr q6, [x10, #0x60]\n"
+ "ldr q6, [x9, #0x60]\n"
"fmla v9.8h, v7.8h, v0.h[6]\n"
"fmla v13.8h, v7.8h, v1.h[6]\n"
"fmla v17.8h, v7.8h, v2.h[6]\n"
"fmla v21.8h, v7.8h, v3.h[6]\n"
- "ldr q7, [x9, #0x60]\n"
+ "ldr q7, [x28, #0x60]\n"
"fmla v10.8h, v6.8h, v0.h[6]\n"
"fmla v14.8h, v6.8h, v1.h[6]\n"
"fmla v18.8h, v6.8h, v2.h[6]\n"
"fmla v22.8h, v6.8h, v3.h[6]\n"
- "ldr q6, [x12, #0x70]\n"
- "add x12, x12, #0x80\n"
+ "ldr q6, [x11, #0x70]\n"
+ "add x11, x11, #0x80\n"
"fmla v11.8h, v7.8h, v0.h[6]\n"
"fmla v15.8h, v7.8h, v1.h[6]\n"
"fmla v19.8h, v7.8h, v2.h[6]\n"
"fmla v23.8h, v7.8h, v3.h[6]\n"
- "ldr q7, [x11, #0x70]\n"
- "add x11, x11, #0x80\n"
+ "ldr q7, [x10, #0x70]\n"
+ "add x10, x10, #0x80\n"
"fmla v8.8h, v6.8h, v0.h[7]\n"
"fmla v12.8h, v6.8h, v1.h[7]\n"
"fmla v16.8h, v6.8h, v2.h[7]\n"
"fmla v20.8h, v6.8h, v3.h[7]\n"
- "ldr q6, [x10, #0x70]\n"
- "add x10, x10, #0x80\n"
+ "ldr q6, [x9, #0x70]\n"
+ "add x9, x9, #0x80\n"
"fmla v9.8h, v7.8h, v0.h[7]\n"
"fmla v13.8h, v7.8h, v1.h[7]\n"
"fmla v17.8h, v7.8h, v2.h[7]\n"
"fmla v21.8h, v7.8h, v3.h[7]\n"
- "ldr q7, [x9, #0x70]\n"
- "add x9, x9, #0x80\n"
+ "ldr q7, [x28, #0x70]\n"
+ "add x28, x28, #0x80\n"
"fmla v10.8h, v6.8h, v0.h[7]\n"
"fmla v14.8h, v6.8h, v1.h[7]\n"
"fmla v18.8h, v6.8h, v2.h[7]\n"
@@ -2680,29 +2680,29 @@ void a64_ffhybrid_fp16_mla_6x32 (
"fmla v19.8h, v7.8h, v2.h[7]\n"
"fmla v23.8h, v7.8h, v3.h[7]\n"
"179:" // Height 4: Multiply loop: Main loop skip
- "cbz x27, 181f\n"
+ "cbz x26, 181f\n"
"180:" // Height 4: Multiply loop: Odd block loop
- "ldr h0, [x26], #0x2\n"
- "ldr h1, [x25], #0x2\n"
- "sub x27, x27, #0x1\n"
- "ldr h2, [x24], #0x2\n"
- "ldr h3, [x23], #0x2\n"
- "ldr q6, [x12, #0x0]\n"
- "ldr q7, [x11, #0x0]\n"
+ "ldr h0, [x25], #0x2\n"
+ "ldr h1, [x24], #0x2\n"
+ "sub x26, x26, #0x1\n"
+ "ldr h2, [x23], #0x2\n"
+ "ldr h3, [x22], #0x2\n"
+ "ldr q6, [x11, #0x0]\n"
+ "ldr q7, [x10, #0x0]\n"
"fmla v8.8h, v6.8h, v0.h[0]\n"
"fmla v12.8h, v6.8h, v1.h[0]\n"
"fmla v16.8h, v6.8h, v2.h[0]\n"
"fmla v20.8h, v6.8h, v3.h[0]\n"
- "ldr q6, [x10, #0x0]\n"
- "add x12, x12, #0x10\n"
+ "ldr q6, [x9, #0x0]\n"
+ "add x11, x11, #0x10\n"
"fmla v9.8h, v7.8h, v0.h[0]\n"
"fmla v13.8h, v7.8h, v1.h[0]\n"
- "add x11, x11, #0x10\n"
"add x10, x10, #0x10\n"
+ "add x9, x9, #0x10\n"
"fmla v17.8h, v7.8h, v2.h[0]\n"
"fmla v21.8h, v7.8h, v3.h[0]\n"
- "ldr q7, [x9, #0x0]\n"
- "add x9, x9, #0x10\n"
+ "ldr q7, [x28, #0x0]\n"
+ "add x28, x28, #0x10\n"
"fmla v10.8h, v6.8h, v0.h[0]\n"
"fmla v14.8h, v6.8h, v1.h[0]\n"
"fmla v18.8h, v6.8h, v2.h[0]\n"
@@ -2711,21 +2711,21 @@ void a64_ffhybrid_fp16_mla_6x32 (
"fmla v15.8h, v7.8h, v1.h[0]\n"
"fmla v19.8h, v7.8h, v2.h[0]\n"
"fmla v23.8h, v7.8h, v3.h[0]\n"
- "cbnz x27, 180b\n"
+ "cbnz x26, 180b\n"
"181:" // Height 4: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 174b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
- "add x23, x24, x20, LSL #1\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "add x22, x23, x19, LSL #1\n"
"tbz %x[flags], #1, 182f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v1.8h }, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1r { v0.8h }, [x20]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v1.8h }, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v0.8h }, [x19]\n"
"fmin v8.8h, v8.8h, v1.8h\n"
"fmin v9.8h, v9.8h, v1.8h\n"
"fmin v10.8h, v10.8h, v1.8h\n"
@@ -2759,256 +2759,256 @@ void a64_ffhybrid_fp16_mla_6x32 (
"fmax v22.8h, v22.8h, v0.8h\n"
"fmax v23.8h, v23.8h, v0.8h\n"
"182:" // Height 4: No activation
- "cmp x14, #0x20\n"
+ "cmp x13, #0x20\n"
"bge 199f\n"
- "tbz x14, #4, 190f\n"
- "st1 { v8.8h }, [x13], #0x10\n"
- "st1 { v9.8h }, [x13], #0x10\n"
- "st1 { v12.8h }, [x25], #0x10\n"
- "st1 { v13.8h }, [x25], #0x10\n"
- "st1 { v16.8h }, [x24], #0x10\n"
- "st1 { v17.8h }, [x24], #0x10\n"
- "st1 { v20.8h }, [x23], #0x10\n"
- "st1 { v21.8h }, [x23], #0x10\n"
- "tbz x14, #3, 186f\n"
- "st1 { v10.8h }, [x13], #0x10\n"
- "st1 { v14.8h }, [x25], #0x10\n"
- "st1 { v18.8h }, [x24], #0x10\n"
- "st1 { v22.8h }, [x23], #0x10\n"
- "tbz x14, #2, 184f\n"
- "str d11, [x13], #0x8\n"
- "str d15, [x25], #0x8\n"
- "str d19, [x24], #0x8\n"
- "str d23, [x23], #0x8\n"
- "tbz x14, #1, 183f\n"
- "st1 { v11.s }[2], [x13], #0x4\n"
- "st1 { v15.s }[2], [x25], #0x4\n"
- "st1 { v19.s }[2], [x24], #0x4\n"
- "st1 { v23.s }[2], [x23], #0x4\n"
- "tbz x14, #0, 198f\n"
- "st1 { v11.h }[6], [x13]\n"
- "st1 { v15.h }[6], [x25]\n"
- "st1 { v19.h }[6], [x24]\n"
- "st1 { v23.h }[6], [x23]\n"
+ "tbz x13, #4, 190f\n"
+ "st1 { v8.8h }, [x12], #0x10\n"
+ "st1 { v9.8h }, [x12], #0x10\n"
+ "st1 { v12.8h }, [x24], #0x10\n"
+ "st1 { v13.8h }, [x24], #0x10\n"
+ "st1 { v16.8h }, [x23], #0x10\n"
+ "st1 { v17.8h }, [x23], #0x10\n"
+ "st1 { v20.8h }, [x22], #0x10\n"
+ "st1 { v21.8h }, [x22], #0x10\n"
+ "tbz x13, #3, 186f\n"
+ "st1 { v10.8h }, [x12], #0x10\n"
+ "st1 { v14.8h }, [x24], #0x10\n"
+ "st1 { v18.8h }, [x23], #0x10\n"
+ "st1 { v22.8h }, [x22], #0x10\n"
+ "tbz x13, #2, 184f\n"
+ "str d11, [x12], #0x8\n"
+ "str d15, [x24], #0x8\n"
+ "str d19, [x23], #0x8\n"
+ "str d23, [x22], #0x8\n"
+ "tbz x13, #1, 183f\n"
+ "st1 { v11.s }[2], [x12], #0x4\n"
+ "st1 { v15.s }[2], [x24], #0x4\n"
+ "st1 { v19.s }[2], [x23], #0x4\n"
+ "st1 { v23.s }[2], [x22], #0x4\n"
+ "tbz x13, #0, 198f\n"
+ "st1 { v11.h }[6], [x12]\n"
+ "st1 { v15.h }[6], [x24]\n"
+ "st1 { v19.h }[6], [x23]\n"
+ "st1 { v23.h }[6], [x22]\n"
"b 198f\n"
"183:" // Height 4: Partial direct writeback: partial_1_28
- "tbz x14, #0, 198f\n"
- "st1 { v11.h }[4], [x13]\n"
- "st1 { v15.h }[4], [x25]\n"
- "st1 { v19.h }[4], [x24]\n"
- "st1 { v23.h }[4], [x23]\n"
+ "tbz x13, #0, 198f\n"
+ "st1 { v11.h }[4], [x12]\n"
+ "st1 { v15.h }[4], [x24]\n"
+ "st1 { v19.h }[4], [x23]\n"
+ "st1 { v23.h }[4], [x22]\n"
"b 198f\n"
"184:" // Height 4: Partial direct writeback: partial_2_24
- "tbz x14, #1, 185f\n"
- "str s11, [x13], #0x4\n"
- "str s15, [x25], #0x4\n"
- "str s19, [x24], #0x4\n"
- "str s23, [x23], #0x4\n"
- "tbz x14, #0, 198f\n"
- "st1 { v11.h }[2], [x13]\n"
- "st1 { v15.h }[2], [x25]\n"
- "st1 { v19.h }[2], [x24]\n"
- "st1 { v23.h }[2], [x23]\n"
+ "tbz x13, #1, 185f\n"
+ "str s11, [x12], #0x4\n"
+ "str s15, [x24], #0x4\n"
+ "str s19, [x23], #0x4\n"
+ "str s23, [x22], #0x4\n"
+ "tbz x13, #0, 198f\n"
+ "st1 { v11.h }[2], [x12]\n"
+ "st1 { v15.h }[2], [x24]\n"
+ "st1 { v19.h }[2], [x23]\n"
+ "st1 { v23.h }[2], [x22]\n"
"b 198f\n"
"185:" // Height 4: Partial direct writeback: partial_1_24
- "tbz x14, #0, 198f\n"
- "str h11, [x13, #0x0]\n"
- "str h15, [x25, #0x0]\n"
- "str h19, [x24, #0x0]\n"
- "str h23, [x23, #0x0]\n"
+ "tbz x13, #0, 198f\n"
+ "str h11, [x12, #0x0]\n"
+ "str h15, [x24, #0x0]\n"
+ "str h19, [x23, #0x0]\n"
+ "str h23, [x22, #0x0]\n"
"b 198f\n"
"186:" // Height 4: Partial direct writeback: partial_4_16
- "tbz x14, #2, 188f\n"
- "str d10, [x13], #0x8\n"
- "str d14, [x25], #0x8\n"
- "str d18, [x24], #0x8\n"
- "str d22, [x23], #0x8\n"
- "tbz x14, #1, 187f\n"
- "st1 { v10.s }[2], [x13], #0x4\n"
- "st1 { v14.s }[2], [x25], #0x4\n"
- "st1 { v18.s }[2], [x24], #0x4\n"
- "st1 { v22.s }[2], [x23], #0x4\n"
- "tbz x14, #0, 198f\n"
- "st1 { v10.h }[6], [x13]\n"
- "st1 { v14.h }[6], [x25]\n"
- "st1 { v18.h }[6], [x24]\n"
- "st1 { v22.h }[6], [x23]\n"
+ "tbz x13, #2, 188f\n"
+ "str d10, [x12], #0x8\n"
+ "str d14, [x24], #0x8\n"
+ "str d18, [x23], #0x8\n"
+ "str d22, [x22], #0x8\n"
+ "tbz x13, #1, 187f\n"
+ "st1 { v10.s }[2], [x12], #0x4\n"
+ "st1 { v14.s }[2], [x24], #0x4\n"
+ "st1 { v18.s }[2], [x23], #0x4\n"
+ "st1 { v22.s }[2], [x22], #0x4\n"
+ "tbz x13, #0, 198f\n"
+ "st1 { v10.h }[6], [x12]\n"
+ "st1 { v14.h }[6], [x24]\n"
+ "st1 { v18.h }[6], [x23]\n"
+ "st1 { v22.h }[6], [x22]\n"
"b 198f\n"
"187:" // Height 4: Partial direct writeback: partial_1_20
- "tbz x14, #0, 198f\n"
- "st1 { v10.h }[4], [x13]\n"
- "st1 { v14.h }[4], [x25]\n"
- "st1 { v18.h }[4], [x24]\n"
- "st1 { v22.h }[4], [x23]\n"
+ "tbz x13, #0, 198f\n"
+ "st1 { v10.h }[4], [x12]\n"
+ "st1 { v14.h }[4], [x24]\n"
+ "st1 { v18.h }[4], [x23]\n"
+ "st1 { v22.h }[4], [x22]\n"
"b 198f\n"
"188:" // Height 4: Partial direct writeback: partial_2_16
- "tbz x14, #1, 189f\n"
- "str s10, [x13], #0x4\n"
- "str s14, [x25], #0x4\n"
- "str s18, [x24], #0x4\n"
- "str s22, [x23], #0x4\n"
- "tbz x14, #0, 198f\n"
- "st1 { v10.h }[2], [x13]\n"
- "st1 { v14.h }[2], [x25]\n"
- "st1 { v18.h }[2], [x24]\n"
- "st1 { v22.h }[2], [x23]\n"
+ "tbz x13, #1, 189f\n"
+ "str s10, [x12], #0x4\n"
+ "str s14, [x24], #0x4\n"
+ "str s18, [x23], #0x4\n"
+ "str s22, [x22], #0x4\n"
+ "tbz x13, #0, 198f\n"
+ "st1 { v10.h }[2], [x12]\n"
+ "st1 { v14.h }[2], [x24]\n"
+ "st1 { v18.h }[2], [x23]\n"
+ "st1 { v22.h }[2], [x22]\n"
"b 198f\n"
"189:" // Height 4: Partial direct writeback: partial_1_16
- "tbz x14, #0, 198f\n"
- "str h10, [x13, #0x0]\n"
- "str h14, [x25, #0x0]\n"
- "str h18, [x24, #0x0]\n"
- "str h22, [x23, #0x0]\n"
+ "tbz x13, #0, 198f\n"
+ "str h10, [x12, #0x0]\n"
+ "str h14, [x24, #0x0]\n"
+ "str h18, [x23, #0x0]\n"
+ "str h22, [x22, #0x0]\n"
"b 198f\n"
"190:" // Height 4: Partial direct writeback: partial_8_0
- "tbz x14, #3, 194f\n"
- "st1 { v8.8h }, [x13], #0x10\n"
- "st1 { v12.8h }, [x25], #0x10\n"
- "st1 { v16.8h }, [x24], #0x10\n"
- "st1 { v20.8h }, [x23], #0x10\n"
- "tbz x14, #2, 192f\n"
- "str d9, [x13], #0x8\n"
- "str d13, [x25], #0x8\n"
- "str d17, [x24], #0x8\n"
- "str d21, [x23], #0x8\n"
- "tbz x14, #1, 191f\n"
- "st1 { v9.s }[2], [x13], #0x4\n"
- "st1 { v13.s }[2], [x25], #0x4\n"
- "st1 { v17.s }[2], [x24], #0x4\n"
- "st1 { v21.s }[2], [x23], #0x4\n"
- "tbz x14, #0, 198f\n"
- "st1 { v9.h }[6], [x13]\n"
- "st1 { v13.h }[6], [x25]\n"
- "st1 { v17.h }[6], [x24]\n"
- "st1 { v21.h }[6], [x23]\n"
+ "tbz x13, #3, 194f\n"
+ "st1 { v8.8h }, [x12], #0x10\n"
+ "st1 { v12.8h }, [x24], #0x10\n"
+ "st1 { v16.8h }, [x23], #0x10\n"
+ "st1 { v20.8h }, [x22], #0x10\n"
+ "tbz x13, #2, 192f\n"
+ "str d9, [x12], #0x8\n"
+ "str d13, [x24], #0x8\n"
+ "str d17, [x23], #0x8\n"
+ "str d21, [x22], #0x8\n"
+ "tbz x13, #1, 191f\n"
+ "st1 { v9.s }[2], [x12], #0x4\n"
+ "st1 { v13.s }[2], [x24], #0x4\n"
+ "st1 { v17.s }[2], [x23], #0x4\n"
+ "st1 { v21.s }[2], [x22], #0x4\n"
+ "tbz x13, #0, 198f\n"
+ "st1 { v9.h }[6], [x12]\n"
+ "st1 { v13.h }[6], [x24]\n"
+ "st1 { v17.h }[6], [x23]\n"
+ "st1 { v21.h }[6], [x22]\n"
"b 198f\n"
"191:" // Height 4: Partial direct writeback: partial_1_12
- "tbz x14, #0, 198f\n"
- "st1 { v9.h }[4], [x13]\n"
- "st1 { v13.h }[4], [x25]\n"
- "st1 { v17.h }[4], [x24]\n"
- "st1 { v21.h }[4], [x23]\n"
+ "tbz x13, #0, 198f\n"
+ "st1 { v9.h }[4], [x12]\n"
+ "st1 { v13.h }[4], [x24]\n"
+ "st1 { v17.h }[4], [x23]\n"
+ "st1 { v21.h }[4], [x22]\n"
"b 198f\n"
"192:" // Height 4: Partial direct writeback: partial_2_8
- "tbz x14, #1, 193f\n"
- "str s9, [x13], #0x4\n"
- "str s13, [x25], #0x4\n"
- "str s17, [x24], #0x4\n"
- "str s21, [x23], #0x4\n"
- "tbz x14, #0, 198f\n"
- "st1 { v9.h }[2], [x13]\n"
- "st1 { v13.h }[2], [x25]\n"
- "st1 { v17.h }[2], [x24]\n"
- "st1 { v21.h }[2], [x23]\n"
+ "tbz x13, #1, 193f\n"
+ "str s9, [x12], #0x4\n"
+ "str s13, [x24], #0x4\n"
+ "str s17, [x23], #0x4\n"
+ "str s21, [x22], #0x4\n"
+ "tbz x13, #0, 198f\n"
+ "st1 { v9.h }[2], [x12]\n"
+ "st1 { v13.h }[2], [x24]\n"
+ "st1 { v17.h }[2], [x23]\n"
+ "st1 { v21.h }[2], [x22]\n"
"b 198f\n"
"193:" // Height 4: Partial direct writeback: partial_1_8
- "tbz x14, #0, 198f\n"
- "str h9, [x13, #0x0]\n"
- "str h13, [x25, #0x0]\n"
- "str h17, [x24, #0x0]\n"
- "str h21, [x23, #0x0]\n"
+ "tbz x13, #0, 198f\n"
+ "str h9, [x12, #0x0]\n"
+ "str h13, [x24, #0x0]\n"
+ "str h17, [x23, #0x0]\n"
+ "str h21, [x22, #0x0]\n"
"b 198f\n"
"194:" // Height 4: Partial direct writeback: partial_4_0
- "tbz x14, #2, 196f\n"
- "str d8, [x13], #0x8\n"
- "str d12, [x25], #0x8\n"
- "str d16, [x24], #0x8\n"
- "str d20, [x23], #0x8\n"
- "tbz x14, #1, 195f\n"
- "st1 { v8.s }[2], [x13], #0x4\n"
- "st1 { v12.s }[2], [x25], #0x4\n"
- "st1 { v16.s }[2], [x24], #0x4\n"
- "st1 { v20.s }[2], [x23], #0x4\n"
- "tbz x14, #0, 198f\n"
- "st1 { v8.h }[6], [x13]\n"
- "st1 { v12.h }[6], [x25]\n"
- "st1 { v16.h }[6], [x24]\n"
- "st1 { v20.h }[6], [x23]\n"
+ "tbz x13, #2, 196f\n"
+ "str d8, [x12], #0x8\n"
+ "str d12, [x24], #0x8\n"
+ "str d16, [x23], #0x8\n"
+ "str d20, [x22], #0x8\n"
+ "tbz x13, #1, 195f\n"
+ "st1 { v8.s }[2], [x12], #0x4\n"
+ "st1 { v12.s }[2], [x24], #0x4\n"
+ "st1 { v16.s }[2], [x23], #0x4\n"
+ "st1 { v20.s }[2], [x22], #0x4\n"
+ "tbz x13, #0, 198f\n"
+ "st1 { v8.h }[6], [x12]\n"
+ "st1 { v12.h }[6], [x24]\n"
+ "st1 { v16.h }[6], [x23]\n"
+ "st1 { v20.h }[6], [x22]\n"
"b 198f\n"
"195:" // Height 4: Partial direct writeback: partial_1_4
- "tbz x14, #0, 198f\n"
- "st1 { v8.h }[4], [x13]\n"
- "st1 { v12.h }[4], [x25]\n"
- "st1 { v16.h }[4], [x24]\n"
- "st1 { v20.h }[4], [x23]\n"
+ "tbz x13, #0, 198f\n"
+ "st1 { v8.h }[4], [x12]\n"
+ "st1 { v12.h }[4], [x24]\n"
+ "st1 { v16.h }[4], [x23]\n"
+ "st1 { v20.h }[4], [x22]\n"
"b 198f\n"
"196:" // Height 4: Partial direct writeback: partial_2_0
- "tbz x14, #1, 197f\n"
- "str s8, [x13], #0x4\n"
- "str s12, [x25], #0x4\n"
- "str s16, [x24], #0x4\n"
- "str s20, [x23], #0x4\n"
- "tbz x14, #0, 198f\n"
- "st1 { v8.h }[2], [x13]\n"
- "st1 { v12.h }[2], [x25]\n"
- "st1 { v16.h }[2], [x24]\n"
- "st1 { v20.h }[2], [x23]\n"
+ "tbz x13, #1, 197f\n"
+ "str s8, [x12], #0x4\n"
+ "str s12, [x24], #0x4\n"
+ "str s16, [x23], #0x4\n"
+ "str s20, [x22], #0x4\n"
+ "tbz x13, #0, 198f\n"
+ "st1 { v8.h }[2], [x12]\n"
+ "st1 { v12.h }[2], [x24]\n"
+ "st1 { v16.h }[2], [x23]\n"
+ "st1 { v20.h }[2], [x22]\n"
"b 198f\n"
"197:" // Height 4: Partial direct writeback: partial_1_0
- "str h8, [x13, #0x0]\n"
- "str h12, [x25, #0x0]\n"
- "str h16, [x24, #0x0]\n"
- "str h20, [x23, #0x0]\n"
+ "str h8, [x12, #0x0]\n"
+ "str h12, [x24, #0x0]\n"
+ "str h16, [x23, #0x0]\n"
+ "str h20, [x22, #0x0]\n"
"198:" // Height 4: Partial direct writeback: Done
"b 200f\n"
"199:" // Height 4: Full writeback
- "str q8, [x13, #0x0]\n"
- "str q9, [x13, #0x10]\n"
- "str q10, [x13, #0x20]\n"
- "str q11, [x13, #0x30]\n"
- "add x13, x13, #0x40\n"
- "str q12, [x25, #0x0]\n"
- "str q13, [x25, #0x10]\n"
- "str q14, [x25, #0x20]\n"
- "str q15, [x25, #0x30]\n"
- "str q16, [x24, #0x0]\n"
- "str q17, [x24, #0x10]\n"
- "str q18, [x24, #0x20]\n"
- "str q19, [x24, #0x30]\n"
- "str q20, [x23, #0x0]\n"
- "str q21, [x23, #0x10]\n"
- "str q22, [x23, #0x20]\n"
- "str q23, [x23, #0x30]\n"
+ "str q8, [x12, #0x0]\n"
+ "str q9, [x12, #0x10]\n"
+ "str q10, [x12, #0x20]\n"
+ "str q11, [x12, #0x30]\n"
+ "add x12, x12, #0x40\n"
+ "str q12, [x24, #0x0]\n"
+ "str q13, [x24, #0x10]\n"
+ "str q14, [x24, #0x20]\n"
+ "str q15, [x24, #0x30]\n"
+ "str q16, [x23, #0x0]\n"
+ "str q17, [x23, #0x10]\n"
+ "str q18, [x23, #0x20]\n"
+ "str q19, [x23, #0x30]\n"
+ "str q20, [x22, #0x0]\n"
+ "str q21, [x22, #0x10]\n"
+ "str q22, [x22, #0x20]\n"
+ "str q23, [x22, #0x30]\n"
"200:" // Height 4: Writeback done
- "subs x14, x14, #0x20\n"
+ "subs x13, x13, #0x20\n"
"bgt 152b\n"
"b 302f\n"
"201:" // Height 5
- "ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x15, %x[bias]\n"
- "ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x13, %x[output_ptr]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x14, %x[bias]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x12, %x[output_ptr]\n"
"202:" // Height 5: Column loop
- "ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
- "add x11, x12, x20, LSL #1\n"
- "add x10, x11, x20, LSL #1\n"
- "add x9, x10, x20, LSL #1\n"
- "add x20, x9, x20, LSL #1\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "cmp x14, #0x18\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #1\n"
+ "add x9, x10, x19, LSL #1\n"
+ "add x28, x9, x19, LSL #1\n"
+ "add x19, x28, x19, LSL #1\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "cmp x13, #0x18\n"
"bgt 203f\n"
- "cmp x14, #0x10\n"
- "mov x9, x12\n"
+ "cmp x13, #0x10\n"
+ "mov x28, x11\n"
"bgt 203f\n"
- "cmp x14, #0x8\n"
- "mov x10, x12\n"
+ "cmp x13, #0x8\n"
+ "mov x9, x11\n"
"bgt 203f\n"
- "mov x11, x12\n"
+ "mov x10, x11\n"
"203:" // Height 5: B setup done
- "cbz x15, 204f\n"
- "ldr q8, [x15, #0x0]\n"
- "ldr q9, [x15, #0x10]\n"
+ "cbz x14, 204f\n"
+ "ldr q8, [x14, #0x0]\n"
+ "ldr q9, [x14, #0x10]\n"
"mov v12.16b, v8.16b\n"
"mov v13.16b, v9.16b\n"
- "ldr q10, [x15, #0x20]\n"
- "ldr q11, [x15, #0x30]\n"
+ "ldr q10, [x14, #0x20]\n"
+ "ldr q11, [x14, #0x30]\n"
"mov v14.16b, v10.16b\n"
"mov v15.16b, v11.16b\n"
"mov v16.16b, v8.16b\n"
"mov v17.16b, v9.16b\n"
- "add x15, x15, #0x40\n"
+ "add x14, x14, #0x40\n"
"mov v18.16b, v10.16b\n"
"mov v19.16b, v11.16b\n"
"mov v20.16b, v8.16b\n"
@@ -3022,273 +3022,273 @@ void a64_ffhybrid_fp16_mla_6x32 (
"b 223f\n"
"204:" // Height 5: no bias
"tbz %x[flags], #0, 222f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
- "add x23, x24, x20, LSL #1\n"
- "cmp x14, #0x20\n"
- "add x22, x23, x20, LSL #1\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "add x22, x23, x19, LSL #1\n"
+ "cmp x13, #0x20\n"
+ "add x21, x22, x19, LSL #1\n"
"bge 221f\n"
- "tbz x14, #4, 212f\n"
- "ld1 { v8.8h }, [x13], #0x10\n"
- "ld1 { v12.8h }, [x25], #0x10\n"
- "ld1 { v16.8h }, [x24], #0x10\n"
- "ld1 { v20.8h }, [x23], #0x10\n"
- "ld1 { v24.8h }, [x22], #0x10\n"
- "ld1 { v9.8h }, [x13], #0x10\n"
- "ld1 { v13.8h }, [x25], #0x10\n"
- "ld1 { v17.8h }, [x24], #0x10\n"
- "ld1 { v21.8h }, [x23], #0x10\n"
- "ld1 { v25.8h }, [x22], #0x10\n"
- "tbz x14, #3, 208f\n"
- "ld1 { v10.8h }, [x13], #0x10\n"
- "ld1 { v14.8h }, [x25], #0x10\n"
- "ld1 { v18.8h }, [x24], #0x10\n"
- "ld1 { v22.8h }, [x23], #0x10\n"
- "ld1 { v26.8h }, [x22], #0x10\n"
- "tbz x14, #2, 206f\n"
- "ldr d11, [x13], #0x8\n"
- "ldr d15, [x25], #0x8\n"
- "ldr d19, [x24], #0x8\n"
- "ldr d23, [x23], #0x8\n"
- "ldr d27, [x22], #0x8\n"
- "tbz x14, #1, 205f\n"
- "ld1 { v11.s }[2], [x13], #0x4\n"
- "ld1 { v15.s }[2], [x25], #0x4\n"
- "mov x20, #0x3c\n"
- "ld1 { v19.s }[2], [x24], #0x4\n"
- "ld1 { v23.s }[2], [x23], #0x4\n"
- "ld1 { v27.s }[2], [x22], #0x4\n"
- "tbz x14, #0, 220f\n"
- "ld1 { v11.h }[6], [x13]\n"
- "ld1 { v15.h }[6], [x25]\n"
- "ld1 { v19.h }[6], [x24]\n"
- "ld1 { v23.h }[6], [x23]\n"
- "ld1 { v27.h }[6], [x22]\n"
+ "tbz x13, #4, 212f\n"
+ "ld1 { v8.8h }, [x12], #0x10\n"
+ "ld1 { v12.8h }, [x24], #0x10\n"
+ "ld1 { v16.8h }, [x23], #0x10\n"
+ "ld1 { v20.8h }, [x22], #0x10\n"
+ "ld1 { v24.8h }, [x21], #0x10\n"
+ "ld1 { v9.8h }, [x12], #0x10\n"
+ "ld1 { v13.8h }, [x24], #0x10\n"
+ "ld1 { v17.8h }, [x23], #0x10\n"
+ "ld1 { v21.8h }, [x22], #0x10\n"
+ "ld1 { v25.8h }, [x21], #0x10\n"
+ "tbz x13, #3, 208f\n"
+ "ld1 { v10.8h }, [x12], #0x10\n"
+ "ld1 { v14.8h }, [x24], #0x10\n"
+ "ld1 { v18.8h }, [x23], #0x10\n"
+ "ld1 { v22.8h }, [x22], #0x10\n"
+ "ld1 { v26.8h }, [x21], #0x10\n"
+ "tbz x13, #2, 206f\n"
+ "ldr d11, [x12], #0x8\n"
+ "ldr d15, [x24], #0x8\n"
+ "ldr d19, [x23], #0x8\n"
+ "ldr d23, [x22], #0x8\n"
+ "ldr d27, [x21], #0x8\n"
+ "tbz x13, #1, 205f\n"
+ "ld1 { v11.s }[2], [x12], #0x4\n"
+ "ld1 { v15.s }[2], [x24], #0x4\n"
+ "mov x19, #0x3c\n"
+ "ld1 { v19.s }[2], [x23], #0x4\n"
+ "ld1 { v23.s }[2], [x22], #0x4\n"
+ "ld1 { v27.s }[2], [x21], #0x4\n"
+ "tbz x13, #0, 220f\n"
+ "ld1 { v11.h }[6], [x12]\n"
+ "ld1 { v15.h }[6], [x24]\n"
+ "ld1 { v19.h }[6], [x23]\n"
+ "ld1 { v23.h }[6], [x22]\n"
+ "ld1 { v27.h }[6], [x21]\n"
"b 220f\n"
"205:" // Height 5: Partial accumulate: partial_1_28
- "mov x20, #0x38\n"
- "tbz x14, #0, 220f\n"
- "ld1 { v11.h }[4], [x13]\n"
- "ld1 { v15.h }[4], [x25]\n"
- "ld1 { v19.h }[4], [x24]\n"
- "ld1 { v23.h }[4], [x23]\n"
- "ld1 { v27.h }[4], [x22]\n"
+ "mov x19, #0x38\n"
+ "tbz x13, #0, 220f\n"
+ "ld1 { v11.h }[4], [x12]\n"
+ "ld1 { v15.h }[4], [x24]\n"
+ "ld1 { v19.h }[4], [x23]\n"
+ "ld1 { v23.h }[4], [x22]\n"
+ "ld1 { v27.h }[4], [x21]\n"
"b 220f\n"
"206:" // Height 5: Partial accumulate: partial_2_24
- "tbz x14, #1, 207f\n"
- "ldr s11, [x13], #0x4\n"
- "ldr s15, [x25], #0x4\n"
- "mov x20, #0x34\n"
- "ldr s19, [x24], #0x4\n"
- "ldr s23, [x23], #0x4\n"
- "ldr s27, [x22], #0x4\n"
- "tbz x14, #0, 220f\n"
- "ld1 { v11.h }[2], [x13]\n"
- "ld1 { v15.h }[2], [x25]\n"
- "ld1 { v19.h }[2], [x24]\n"
- "ld1 { v23.h }[2], [x23]\n"
- "ld1 { v27.h }[2], [x22]\n"
+ "tbz x13, #1, 207f\n"
+ "ldr s11, [x12], #0x4\n"
+ "ldr s15, [x24], #0x4\n"
+ "mov x19, #0x34\n"
+ "ldr s19, [x23], #0x4\n"
+ "ldr s23, [x22], #0x4\n"
+ "ldr s27, [x21], #0x4\n"
+ "tbz x13, #0, 220f\n"
+ "ld1 { v11.h }[2], [x12]\n"
+ "ld1 { v15.h }[2], [x24]\n"
+ "ld1 { v19.h }[2], [x23]\n"
+ "ld1 { v23.h }[2], [x22]\n"
+ "ld1 { v27.h }[2], [x21]\n"
"b 220f\n"
"207:" // Height 5: Partial accumulate: partial_1_24
- "mov x20, #0x30\n"
- "tbz x14, #0, 220f\n"
- "ldr h11, [x13, #0x0]\n"
- "ldr h15, [x25, #0x0]\n"
- "ldr h19, [x24, #0x0]\n"
- "ldr h23, [x23, #0x0]\n"
- "ldr h27, [x22, #0x0]\n"
+ "mov x19, #0x30\n"
+ "tbz x13, #0, 220f\n"
+ "ldr h11, [x12, #0x0]\n"
+ "ldr h15, [x24, #0x0]\n"
+ "ldr h19, [x23, #0x0]\n"
+ "ldr h23, [x22, #0x0]\n"
+ "ldr h27, [x21, #0x0]\n"
"b 220f\n"
"208:" // Height 5: Partial accumulate: partial_4_16
- "tbz x14, #2, 210f\n"
- "ldr d10, [x13], #0x8\n"
- "ldr d14, [x25], #0x8\n"
- "ldr d18, [x24], #0x8\n"
- "ldr d22, [x23], #0x8\n"
- "ldr d26, [x22], #0x8\n"
- "tbz x14, #1, 209f\n"
- "ld1 { v10.s }[2], [x13], #0x4\n"
- "ld1 { v14.s }[2], [x25], #0x4\n"
- "mov x20, #0x2c\n"
- "ld1 { v18.s }[2], [x24], #0x4\n"
- "ld1 { v22.s }[2], [x23], #0x4\n"
- "ld1 { v26.s }[2], [x22], #0x4\n"
- "tbz x14, #0, 220f\n"
- "ld1 { v10.h }[6], [x13]\n"
- "ld1 { v14.h }[6], [x25]\n"
- "ld1 { v18.h }[6], [x24]\n"
- "ld1 { v22.h }[6], [x23]\n"
- "ld1 { v26.h }[6], [x22]\n"
+ "tbz x13, #2, 210f\n"
+ "ldr d10, [x12], #0x8\n"
+ "ldr d14, [x24], #0x8\n"
+ "ldr d18, [x23], #0x8\n"
+ "ldr d22, [x22], #0x8\n"
+ "ldr d26, [x21], #0x8\n"
+ "tbz x13, #1, 209f\n"
+ "ld1 { v10.s }[2], [x12], #0x4\n"
+ "ld1 { v14.s }[2], [x24], #0x4\n"
+ "mov x19, #0x2c\n"
+ "ld1 { v18.s }[2], [x23], #0x4\n"
+ "ld1 { v22.s }[2], [x22], #0x4\n"
+ "ld1 { v26.s }[2], [x21], #0x4\n"
+ "tbz x13, #0, 220f\n"
+ "ld1 { v10.h }[6], [x12]\n"
+ "ld1 { v14.h }[6], [x24]\n"
+ "ld1 { v18.h }[6], [x23]\n"
+ "ld1 { v22.h }[6], [x22]\n"
+ "ld1 { v26.h }[6], [x21]\n"
"b 220f\n"
"209:" // Height 5: Partial accumulate: partial_1_20
- "mov x20, #0x28\n"
- "tbz x14, #0, 220f\n"
- "ld1 { v10.h }[4], [x13]\n"
- "ld1 { v14.h }[4], [x25]\n"
- "ld1 { v18.h }[4], [x24]\n"
- "ld1 { v22.h }[4], [x23]\n"
- "ld1 { v26.h }[4], [x22]\n"
+ "mov x19, #0x28\n"
+ "tbz x13, #0, 220f\n"
+ "ld1 { v10.h }[4], [x12]\n"
+ "ld1 { v14.h }[4], [x24]\n"
+ "ld1 { v18.h }[4], [x23]\n"
+ "ld1 { v22.h }[4], [x22]\n"
+ "ld1 { v26.h }[4], [x21]\n"
"b 220f\n"
"210:" // Height 5: Partial accumulate: partial_2_16
- "tbz x14, #1, 211f\n"
- "ldr s10, [x13], #0x4\n"
- "ldr s14, [x25], #0x4\n"
- "mov x20, #0x24\n"
- "ldr s18, [x24], #0x4\n"
- "ldr s22, [x23], #0x4\n"
- "ldr s26, [x22], #0x4\n"
- "tbz x14, #0, 220f\n"
- "ld1 { v10.h }[2], [x13]\n"
- "ld1 { v14.h }[2], [x25]\n"
- "ld1 { v18.h }[2], [x24]\n"
- "ld1 { v22.h }[2], [x23]\n"
- "ld1 { v26.h }[2], [x22]\n"
+ "tbz x13, #1, 211f\n"
+ "ldr s10, [x12], #0x4\n"
+ "ldr s14, [x24], #0x4\n"
+ "mov x19, #0x24\n"
+ "ldr s18, [x23], #0x4\n"
+ "ldr s22, [x22], #0x4\n"
+ "ldr s26, [x21], #0x4\n"
+ "tbz x13, #0, 220f\n"
+ "ld1 { v10.h }[2], [x12]\n"
+ "ld1 { v14.h }[2], [x24]\n"
+ "ld1 { v18.h }[2], [x23]\n"
+ "ld1 { v22.h }[2], [x22]\n"
+ "ld1 { v26.h }[2], [x21]\n"
"b 220f\n"
"211:" // Height 5: Partial accumulate: partial_1_16
- "mov x20, #0x20\n"
- "tbz x14, #0, 220f\n"
- "ldr h10, [x13, #0x0]\n"
- "ldr h14, [x25, #0x0]\n"
- "ldr h18, [x24, #0x0]\n"
- "ldr h22, [x23, #0x0]\n"
- "ldr h26, [x22, #0x0]\n"
+ "mov x19, #0x20\n"
+ "tbz x13, #0, 220f\n"
+ "ldr h10, [x12, #0x0]\n"
+ "ldr h14, [x24, #0x0]\n"
+ "ldr h18, [x23, #0x0]\n"
+ "ldr h22, [x22, #0x0]\n"
+ "ldr h26, [x21, #0x0]\n"
"b 220f\n"
"212:" // Height 5: Partial accumulate: partial_8_0
- "tbz x14, #3, 216f\n"
- "ld1 { v8.8h }, [x13], #0x10\n"
- "ld1 { v12.8h }, [x25], #0x10\n"
- "ld1 { v16.8h }, [x24], #0x10\n"
- "ld1 { v20.8h }, [x23], #0x10\n"
- "ld1 { v24.8h }, [x22], #0x10\n"
- "tbz x14, #2, 214f\n"
- "ldr d9, [x13], #0x8\n"
- "ldr d13, [x25], #0x8\n"
- "ldr d17, [x24], #0x8\n"
- "ldr d21, [x23], #0x8\n"
- "ldr d25, [x22], #0x8\n"
- "tbz x14, #1, 213f\n"
- "ld1 { v9.s }[2], [x13], #0x4\n"
- "ld1 { v13.s }[2], [x25], #0x4\n"
- "mov x20, #0x1c\n"
- "ld1 { v17.s }[2], [x24], #0x4\n"
- "ld1 { v21.s }[2], [x23], #0x4\n"
- "ld1 { v25.s }[2], [x22], #0x4\n"
- "tbz x14, #0, 220f\n"
- "ld1 { v9.h }[6], [x13]\n"
- "ld1 { v13.h }[6], [x25]\n"
- "ld1 { v17.h }[6], [x24]\n"
- "ld1 { v21.h }[6], [x23]\n"
- "ld1 { v25.h }[6], [x22]\n"
+ "tbz x13, #3, 216f\n"
+ "ld1 { v8.8h }, [x12], #0x10\n"
+ "ld1 { v12.8h }, [x24], #0x10\n"
+ "ld1 { v16.8h }, [x23], #0x10\n"
+ "ld1 { v20.8h }, [x22], #0x10\n"
+ "ld1 { v24.8h }, [x21], #0x10\n"
+ "tbz x13, #2, 214f\n"
+ "ldr d9, [x12], #0x8\n"
+ "ldr d13, [x24], #0x8\n"
+ "ldr d17, [x23], #0x8\n"
+ "ldr d21, [x22], #0x8\n"
+ "ldr d25, [x21], #0x8\n"
+ "tbz x13, #1, 213f\n"
+ "ld1 { v9.s }[2], [x12], #0x4\n"
+ "ld1 { v13.s }[2], [x24], #0x4\n"
+ "mov x19, #0x1c\n"
+ "ld1 { v17.s }[2], [x23], #0x4\n"
+ "ld1 { v21.s }[2], [x22], #0x4\n"
+ "ld1 { v25.s }[2], [x21], #0x4\n"
+ "tbz x13, #0, 220f\n"
+ "ld1 { v9.h }[6], [x12]\n"
+ "ld1 { v13.h }[6], [x24]\n"
+ "ld1 { v17.h }[6], [x23]\n"
+ "ld1 { v21.h }[6], [x22]\n"
+ "ld1 { v25.h }[6], [x21]\n"
"b 220f\n"
"213:" // Height 5: Partial accumulate: partial_1_12
- "mov x20, #0x18\n"
- "tbz x14, #0, 220f\n"
- "ld1 { v9.h }[4], [x13]\n"
- "ld1 { v13.h }[4], [x25]\n"
- "ld1 { v17.h }[4], [x24]\n"
- "ld1 { v21.h }[4], [x23]\n"
- "ld1 { v25.h }[4], [x22]\n"
+ "mov x19, #0x18\n"
+ "tbz x13, #0, 220f\n"
+ "ld1 { v9.h }[4], [x12]\n"
+ "ld1 { v13.h }[4], [x24]\n"
+ "ld1 { v17.h }[4], [x23]\n"
+ "ld1 { v21.h }[4], [x22]\n"
+ "ld1 { v25.h }[4], [x21]\n"
"b 220f\n"
"214:" // Height 5: Partial accumulate: partial_2_8
- "tbz x14, #1, 215f\n"
- "ldr s9, [x13], #0x4\n"
- "ldr s13, [x25], #0x4\n"
- "mov x20, #0x14\n"
- "ldr s17, [x24], #0x4\n"
- "ldr s21, [x23], #0x4\n"
- "ldr s25, [x22], #0x4\n"
- "tbz x14, #0, 220f\n"
- "ld1 { v9.h }[2], [x13]\n"
- "ld1 { v13.h }[2], [x25]\n"
- "ld1 { v17.h }[2], [x24]\n"
- "ld1 { v21.h }[2], [x23]\n"
- "ld1 { v25.h }[2], [x22]\n"
+ "tbz x13, #1, 215f\n"
+ "ldr s9, [x12], #0x4\n"
+ "ldr s13, [x24], #0x4\n"
+ "mov x19, #0x14\n"
+ "ldr s17, [x23], #0x4\n"
+ "ldr s21, [x22], #0x4\n"
+ "ldr s25, [x21], #0x4\n"
+ "tbz x13, #0, 220f\n"
+ "ld1 { v9.h }[2], [x12]\n"
+ "ld1 { v13.h }[2], [x24]\n"
+ "ld1 { v17.h }[2], [x23]\n"
+ "ld1 { v21.h }[2], [x22]\n"
+ "ld1 { v25.h }[2], [x21]\n"
"b 220f\n"
"215:" // Height 5: Partial accumulate: partial_1_8
- "mov x20, #0x10\n"
- "tbz x14, #0, 220f\n"
- "ldr h9, [x13, #0x0]\n"
- "ldr h13, [x25, #0x0]\n"
- "ldr h17, [x24, #0x0]\n"
- "ldr h21, [x23, #0x0]\n"
- "ldr h25, [x22, #0x0]\n"
+ "mov x19, #0x10\n"
+ "tbz x13, #0, 220f\n"
+ "ldr h9, [x12, #0x0]\n"
+ "ldr h13, [x24, #0x0]\n"
+ "ldr h17, [x23, #0x0]\n"
+ "ldr h21, [x22, #0x0]\n"
+ "ldr h25, [x21, #0x0]\n"
"b 220f\n"
"216:" // Height 5: Partial accumulate: partial_4_0
- "tbz x14, #2, 218f\n"
- "ldr d8, [x13], #0x8\n"
- "ldr d12, [x25], #0x8\n"
- "ldr d16, [x24], #0x8\n"
- "ldr d20, [x23], #0x8\n"
- "ldr d24, [x22], #0x8\n"
- "tbz x14, #1, 217f\n"
- "ld1 { v8.s }[2], [x13], #0x4\n"
- "ld1 { v12.s }[2], [x25], #0x4\n"
- "mov x20, #0xc\n"
- "ld1 { v16.s }[2], [x24], #0x4\n"
- "ld1 { v20.s }[2], [x23], #0x4\n"
- "ld1 { v24.s }[2], [x22], #0x4\n"
- "tbz x14, #0, 220f\n"
- "ld1 { v8.h }[6], [x13]\n"
- "ld1 { v12.h }[6], [x25]\n"
- "ld1 { v16.h }[6], [x24]\n"
- "ld1 { v20.h }[6], [x23]\n"
- "ld1 { v24.h }[6], [x22]\n"
+ "tbz x13, #2, 218f\n"
+ "ldr d8, [x12], #0x8\n"
+ "ldr d12, [x24], #0x8\n"
+ "ldr d16, [x23], #0x8\n"
+ "ldr d20, [x22], #0x8\n"
+ "ldr d24, [x21], #0x8\n"
+ "tbz x13, #1, 217f\n"
+ "ld1 { v8.s }[2], [x12], #0x4\n"
+ "ld1 { v12.s }[2], [x24], #0x4\n"
+ "mov x19, #0xc\n"
+ "ld1 { v16.s }[2], [x23], #0x4\n"
+ "ld1 { v20.s }[2], [x22], #0x4\n"
+ "ld1 { v24.s }[2], [x21], #0x4\n"
+ "tbz x13, #0, 220f\n"
+ "ld1 { v8.h }[6], [x12]\n"
+ "ld1 { v12.h }[6], [x24]\n"
+ "ld1 { v16.h }[6], [x23]\n"
+ "ld1 { v20.h }[6], [x22]\n"
+ "ld1 { v24.h }[6], [x21]\n"
"b 220f\n"
"217:" // Height 5: Partial accumulate: partial_1_4
- "mov x20, #0x8\n"
- "tbz x14, #0, 220f\n"
- "ld1 { v8.h }[4], [x13]\n"
- "ld1 { v12.h }[4], [x25]\n"
- "ld1 { v16.h }[4], [x24]\n"
- "ld1 { v20.h }[4], [x23]\n"
- "ld1 { v24.h }[4], [x22]\n"
+ "mov x19, #0x8\n"
+ "tbz x13, #0, 220f\n"
+ "ld1 { v8.h }[4], [x12]\n"
+ "ld1 { v12.h }[4], [x24]\n"
+ "ld1 { v16.h }[4], [x23]\n"
+ "ld1 { v20.h }[4], [x22]\n"
+ "ld1 { v24.h }[4], [x21]\n"
"b 220f\n"
"218:" // Height 5: Partial accumulate: partial_2_0
- "tbz x14, #1, 219f\n"
- "ldr s8, [x13], #0x4\n"
- "ldr s12, [x25], #0x4\n"
- "mov x20, #0x4\n"
- "ldr s16, [x24], #0x4\n"
- "ldr s20, [x23], #0x4\n"
- "ldr s24, [x22], #0x4\n"
- "tbz x14, #0, 220f\n"
- "ld1 { v8.h }[2], [x13]\n"
- "ld1 { v12.h }[2], [x25]\n"
- "ld1 { v16.h }[2], [x24]\n"
- "ld1 { v20.h }[2], [x23]\n"
- "ld1 { v24.h }[2], [x22]\n"
+ "tbz x13, #1, 219f\n"
+ "ldr s8, [x12], #0x4\n"
+ "ldr s12, [x24], #0x4\n"
+ "mov x19, #0x4\n"
+ "ldr s16, [x23], #0x4\n"
+ "ldr s20, [x22], #0x4\n"
+ "ldr s24, [x21], #0x4\n"
+ "tbz x13, #0, 220f\n"
+ "ld1 { v8.h }[2], [x12]\n"
+ "ld1 { v12.h }[2], [x24]\n"
+ "ld1 { v16.h }[2], [x23]\n"
+ "ld1 { v20.h }[2], [x22]\n"
+ "ld1 { v24.h }[2], [x21]\n"
"b 220f\n"
"219:" // Height 5: Partial accumulate: partial_1_0
- "ldr h8, [x13, #0x0]\n"
- "ldr h12, [x25, #0x0]\n"
- "mov x20, #0x0\n"
- "ldr h16, [x24, #0x0]\n"
- "ldr h20, [x23, #0x0]\n"
- "ldr h24, [x22, #0x0]\n"
+ "ldr h8, [x12, #0x0]\n"
+ "ldr h12, [x24, #0x0]\n"
+ "mov x19, #0x0\n"
+ "ldr h16, [x23, #0x0]\n"
+ "ldr h20, [x22, #0x0]\n"
+ "ldr h24, [x21, #0x0]\n"
"220:" // Height 5: Partial accumulate: Done
- "sub x13, x13, x20\n"
+ "sub x12, x12, x19\n"
"b 223f\n"
"221:" // Height 5: full accumulate
- "ldr q8, [x13, #0x0]\n"
- "ldr q9, [x13, #0x10]\n"
- "ldr q10, [x13, #0x20]\n"
- "ldr q11, [x13, #0x30]\n"
- "ldr q12, [x25, #0x0]\n"
- "ldr q13, [x25, #0x10]\n"
- "ldr q14, [x25, #0x20]\n"
- "ldr q15, [x25, #0x30]\n"
- "ldr q16, [x24, #0x0]\n"
- "ldr q17, [x24, #0x10]\n"
- "ldr q18, [x24, #0x20]\n"
- "ldr q19, [x24, #0x30]\n"
- "ldr q20, [x23, #0x0]\n"
- "ldr q21, [x23, #0x10]\n"
- "ldr q22, [x23, #0x20]\n"
- "ldr q23, [x23, #0x30]\n"
- "ldr q24, [x22, #0x0]\n"
- "ldr q25, [x22, #0x10]\n"
- "ldr q26, [x22, #0x20]\n"
- "ldr q27, [x22, #0x30]\n"
+ "ldr q8, [x12, #0x0]\n"
+ "ldr q9, [x12, #0x10]\n"
+ "ldr q10, [x12, #0x20]\n"
+ "ldr q11, [x12, #0x30]\n"
+ "ldr q12, [x24, #0x0]\n"
+ "ldr q13, [x24, #0x10]\n"
+ "ldr q14, [x24, #0x20]\n"
+ "ldr q15, [x24, #0x30]\n"
+ "ldr q16, [x23, #0x0]\n"
+ "ldr q17, [x23, #0x10]\n"
+ "ldr q18, [x23, #0x20]\n"
+ "ldr q19, [x23, #0x30]\n"
+ "ldr q20, [x22, #0x0]\n"
+ "ldr q21, [x22, #0x10]\n"
+ "ldr q22, [x22, #0x20]\n"
+ "ldr q23, [x22, #0x30]\n"
+ "ldr q24, [x21, #0x0]\n"
+ "ldr q25, [x21, #0x10]\n"
+ "ldr q26, [x21, #0x20]\n"
+ "ldr q27, [x21, #0x30]\n"
"b 223f\n"
"222:" // Height 5: no accumulate
"movi v8.16b, #0x0\n"
@@ -3312,446 +3312,446 @@ void a64_ffhybrid_fp16_mla_6x32 (
"movi v26.16b, #0x0\n"
"movi v27.16b, #0x0\n"
"223:" // Height 5: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"224:" // Height 5: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 225f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "ldr x22, [x21, #0x20]\n"
- "cbnz x28, 226f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #1\n"
- "add x25, x25, x20, LSL #1\n"
- "add x24, x24, x20, LSL #1\n"
- "add x23, x23, x20, LSL #1\n"
- "add x22, x22, x20, LSL #1\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "ldr x21, [x20, #0x20]\n"
+ "cbnz x27, 226f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
+ "add x24, x24, x19, LSL #1\n"
+ "add x23, x23, x19, LSL #1\n"
+ "add x22, x22, x19, LSL #1\n"
+ "add x21, x21, x19, LSL #1\n"
"b 226f\n"
"225:" // Height 5: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
- "add x23, x24, x20, LSL #1\n"
- "add x22, x23, x20, LSL #1\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "add x22, x23, x19, LSL #1\n"
+ "add x21, x22, x19, LSL #1\n"
"226:" // Height 5: input setup done
- "cmp x27, #0x8\n"
+ "cmp x26, #0x8\n"
"blt 229f\n"
- "ldr q0, [x26, #0x0]\n"
- "ldr q1, [x25, #0x0]\n"
- "cmp x27, #0x10\n"
- "ldr q2, [x24, #0x0]\n"
- "ldr q3, [x23, #0x0]\n"
- "ldr q4, [x22, #0x0]\n"
- "ldr q6, [x12, #0x0]\n"
- "ldr q7, [x11, #0x0]\n"
+ "ldr q0, [x25, #0x0]\n"
+ "ldr q1, [x24, #0x0]\n"
+ "cmp x26, #0x10\n"
+ "ldr q2, [x23, #0x0]\n"
+ "ldr q3, [x22, #0x0]\n"
+ "ldr q4, [x21, #0x0]\n"
+ "ldr q6, [x11, #0x0]\n"
+ "ldr q7, [x10, #0x0]\n"
"blt 228f\n"
"227:" // Height 5: Multiply loop: Main loop head
"fmla v8.8h, v6.8h, v0.h[0]\n"
"fmla v12.8h, v6.8h, v1.h[0]\n"
- "sub x27, x27, #0x8\n"
- "cmp x27, #0x10\n"
+ "sub x26, x26, #0x8\n"
+ "cmp x26, #0x10\n"
"fmla v16.8h, v6.8h, v2.h[0]\n"
"fmla v20.8h, v6.8h, v3.h[0]\n"
- "add x26, x26, #0x10\n"
"add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
"fmla v24.8h, v6.8h, v4.h[0]\n"
- "ldr q6, [x10, #0x0]\n"
+ "ldr q6, [x9, #0x0]\n"
"fmla v9.8h, v7.8h, v0.h[0]\n"
- "add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
"fmla v13.8h, v7.8h, v1.h[0]\n"
"fmla v17.8h, v7.8h, v2.h[0]\n"
- "add x23, x23, #0x10\n"
"add x22, x22, #0x10\n"
+ "add x21, x21, #0x10\n"
"fmla v21.8h, v7.8h, v3.h[0]\n"
"fmla v25.8h, v7.8h, v4.h[0]\n"
- "ldr q7, [x9, #0x0]\n"
+ "ldr q7, [x28, #0x0]\n"
"fmla v10.8h, v6.8h, v0.h[0]\n"
"fmla v14.8h, v6.8h, v1.h[0]\n"
"fmla v18.8h, v6.8h, v2.h[0]\n"
"fmla v22.8h, v6.8h, v3.h[0]\n"
"fmla v26.8h, v6.8h, v4.h[0]\n"
- "ldr q6, [x12, #0x10]\n"
+ "ldr q6, [x11, #0x10]\n"
"fmla v11.8h, v7.8h, v0.h[0]\n"
"fmla v15.8h, v7.8h, v1.h[0]\n"
"fmla v19.8h, v7.8h, v2.h[0]\n"
"fmla v23.8h, v7.8h, v3.h[0]\n"
"fmla v27.8h, v7.8h, v4.h[0]\n"
- "ldr q7, [x11, #0x10]\n"
+ "ldr q7, [x10, #0x10]\n"
"fmla v8.8h, v6.8h, v0.h[1]\n"
"fmla v12.8h, v6.8h, v1.h[1]\n"
"fmla v16.8h, v6.8h, v2.h[1]\n"
"fmla v20.8h, v6.8h, v3.h[1]\n"
"fmla v24.8h, v6.8h, v4.h[1]\n"
- "ldr q6, [x10, #0x10]\n"
+ "ldr q6, [x9, #0x10]\n"
"fmla v9.8h, v7.8h, v0.h[1]\n"
"fmla v13.8h, v7.8h, v1.h[1]\n"
"fmla v17.8h, v7.8h, v2.h[1]\n"
"fmla v21.8h, v7.8h, v3.h[1]\n"
"fmla v25.8h, v7.8h, v4.h[1]\n"
- "ldr q7, [x9, #0x10]\n"
+ "ldr q7, [x28, #0x10]\n"
"fmla v10.8h, v6.8h, v0.h[1]\n"
"fmla v14.8h, v6.8h, v1.h[1]\n"
"fmla v18.8h, v6.8h, v2.h[1]\n"
"fmla v22.8h, v6.8h, v3.h[1]\n"
"fmla v26.8h, v6.8h, v4.h[1]\n"
- "ldr q6, [x12, #0x20]\n"
+ "ldr q6, [x11, #0x20]\n"
"fmla v11.8h, v7.8h, v0.h[1]\n"
"fmla v15.8h, v7.8h, v1.h[1]\n"
"fmla v19.8h, v7.8h, v2.h[1]\n"
"fmla v23.8h, v7.8h, v3.h[1]\n"
"fmla v27.8h, v7.8h, v4.h[1]\n"
- "ldr q7, [x11, #0x20]\n"
+ "ldr q7, [x10, #0x20]\n"
"fmla v8.8h, v6.8h, v0.h[2]\n"
"fmla v12.8h, v6.8h, v1.h[2]\n"
"fmla v16.8h, v6.8h, v2.h[2]\n"
"fmla v20.8h, v6.8h, v3.h[2]\n"
"fmla v24.8h, v6.8h, v4.h[2]\n"
- "ldr q6, [x10, #0x20]\n"
+ "ldr q6, [x9, #0x20]\n"
"fmla v9.8h, v7.8h, v0.h[2]\n"
"fmla v13.8h, v7.8h, v1.h[2]\n"
"fmla v17.8h, v7.8h, v2.h[2]\n"
"fmla v21.8h, v7.8h, v3.h[2]\n"
"fmla v25.8h, v7.8h, v4.h[2]\n"
- "ldr q7, [x9, #0x20]\n"
+ "ldr q7, [x28, #0x20]\n"
"fmla v10.8h, v6.8h, v0.h[2]\n"
"fmla v14.8h, v6.8h, v1.h[2]\n"
"fmla v18.8h, v6.8h, v2.h[2]\n"
"fmla v22.8h, v6.8h, v3.h[2]\n"
"fmla v26.8h, v6.8h, v4.h[2]\n"
- "ldr q6, [x12, #0x30]\n"
+ "ldr q6, [x11, #0x30]\n"
"fmla v11.8h, v7.8h, v0.h[2]\n"
"fmla v15.8h, v7.8h, v1.h[2]\n"
"fmla v19.8h, v7.8h, v2.h[2]\n"
"fmla v23.8h, v7.8h, v3.h[2]\n"
"fmla v27.8h, v7.8h, v4.h[2]\n"
- "ldr q7, [x11, #0x30]\n"
+ "ldr q7, [x10, #0x30]\n"
"fmla v8.8h, v6.8h, v0.h[3]\n"
"fmla v12.8h, v6.8h, v1.h[3]\n"
"fmla v16.8h, v6.8h, v2.h[3]\n"
"fmla v20.8h, v6.8h, v3.h[3]\n"
"fmla v24.8h, v6.8h, v4.h[3]\n"
- "ldr q6, [x10, #0x30]\n"
+ "ldr q6, [x9, #0x30]\n"
"fmla v9.8h, v7.8h, v0.h[3]\n"
"fmla v13.8h, v7.8h, v1.h[3]\n"
"fmla v17.8h, v7.8h, v2.h[3]\n"
"fmla v21.8h, v7.8h, v3.h[3]\n"
"fmla v25.8h, v7.8h, v4.h[3]\n"
- "ldr q7, [x9, #0x30]\n"
+ "ldr q7, [x28, #0x30]\n"
"fmla v10.8h, v6.8h, v0.h[3]\n"
"fmla v14.8h, v6.8h, v1.h[3]\n"
"fmla v18.8h, v6.8h, v2.h[3]\n"
"fmla v22.8h, v6.8h, v3.h[3]\n"
"fmla v26.8h, v6.8h, v4.h[3]\n"
- "ldr q6, [x12, #0x40]\n"
+ "ldr q6, [x11, #0x40]\n"
"fmla v11.8h, v7.8h, v0.h[3]\n"
"fmla v15.8h, v7.8h, v1.h[3]\n"
"fmla v19.8h, v7.8h, v2.h[3]\n"
"fmla v23.8h, v7.8h, v3.h[3]\n"
"fmla v27.8h, v7.8h, v4.h[3]\n"
- "ldr q7, [x11, #0x40]\n"
+ "ldr q7, [x10, #0x40]\n"
"fmla v8.8h, v6.8h, v0.h[4]\n"
"fmla v12.8h, v6.8h, v1.h[4]\n"
"fmla v16.8h, v6.8h, v2.h[4]\n"
"fmla v20.8h, v6.8h, v3.h[4]\n"
"fmla v24.8h, v6.8h, v4.h[4]\n"
- "ldr q6, [x10, #0x40]\n"
+ "ldr q6, [x9, #0x40]\n"
"fmla v9.8h, v7.8h, v0.h[4]\n"
"fmla v13.8h, v7.8h, v1.h[4]\n"
"fmla v17.8h, v7.8h, v2.h[4]\n"
"fmla v21.8h, v7.8h, v3.h[4]\n"
"fmla v25.8h, v7.8h, v4.h[4]\n"
- "ldr q7, [x9, #0x40]\n"
+ "ldr q7, [x28, #0x40]\n"
"fmla v10.8h, v6.8h, v0.h[4]\n"
"fmla v14.8h, v6.8h, v1.h[4]\n"
"fmla v18.8h, v6.8h, v2.h[4]\n"
"fmla v22.8h, v6.8h, v3.h[4]\n"
"fmla v26.8h, v6.8h, v4.h[4]\n"
- "ldr q6, [x12, #0x50]\n"
+ "ldr q6, [x11, #0x50]\n"
"fmla v11.8h, v7.8h, v0.h[4]\n"
"fmla v15.8h, v7.8h, v1.h[4]\n"
"fmla v19.8h, v7.8h, v2.h[4]\n"
"fmla v23.8h, v7.8h, v3.h[4]\n"
"fmla v27.8h, v7.8h, v4.h[4]\n"
- "ldr q7, [x11, #0x50]\n"
+ "ldr q7, [x10, #0x50]\n"
"fmla v8.8h, v6.8h, v0.h[5]\n"
"fmla v12.8h, v6.8h, v1.h[5]\n"
"fmla v16.8h, v6.8h, v2.h[5]\n"
"fmla v20.8h, v6.8h, v3.h[5]\n"
"fmla v24.8h, v6.8h, v4.h[5]\n"
- "ldr q6, [x10, #0x50]\n"
+ "ldr q6, [x9, #0x50]\n"
"fmla v9.8h, v7.8h, v0.h[5]\n"
"fmla v13.8h, v7.8h, v1.h[5]\n"
"fmla v17.8h, v7.8h, v2.h[5]\n"
"fmla v21.8h, v7.8h, v3.h[5]\n"
"fmla v25.8h, v7.8h, v4.h[5]\n"
- "ldr q7, [x9, #0x50]\n"
+ "ldr q7, [x28, #0x50]\n"
"fmla v10.8h, v6.8h, v0.h[5]\n"
"fmla v14.8h, v6.8h, v1.h[5]\n"
"fmla v18.8h, v6.8h, v2.h[5]\n"
"fmla v22.8h, v6.8h, v3.h[5]\n"
"fmla v26.8h, v6.8h, v4.h[5]\n"
- "ldr q6, [x12, #0x60]\n"
+ "ldr q6, [x11, #0x60]\n"
"fmla v11.8h, v7.8h, v0.h[5]\n"
"fmla v15.8h, v7.8h, v1.h[5]\n"
"fmla v19.8h, v7.8h, v2.h[5]\n"
"fmla v23.8h, v7.8h, v3.h[5]\n"
"fmla v27.8h, v7.8h, v4.h[5]\n"
- "ldr q7, [x11, #0x60]\n"
+ "ldr q7, [x10, #0x60]\n"
"fmla v8.8h, v6.8h, v0.h[6]\n"
"fmla v12.8h, v6.8h, v1.h[6]\n"
"fmla v16.8h, v6.8h, v2.h[6]\n"
"fmla v20.8h, v6.8h, v3.h[6]\n"
"fmla v24.8h, v6.8h, v4.h[6]\n"
- "ldr q6, [x10, #0x60]\n"
+ "ldr q6, [x9, #0x60]\n"
"fmla v9.8h, v7.8h, v0.h[6]\n"
"fmla v13.8h, v7.8h, v1.h[6]\n"
"fmla v17.8h, v7.8h, v2.h[6]\n"
"fmla v21.8h, v7.8h, v3.h[6]\n"
"fmla v25.8h, v7.8h, v4.h[6]\n"
- "ldr q7, [x9, #0x60]\n"
+ "ldr q7, [x28, #0x60]\n"
"fmla v10.8h, v6.8h, v0.h[6]\n"
"fmla v14.8h, v6.8h, v1.h[6]\n"
"fmla v18.8h, v6.8h, v2.h[6]\n"
"fmla v22.8h, v6.8h, v3.h[6]\n"
"fmla v26.8h, v6.8h, v4.h[6]\n"
- "ldr q6, [x12, #0x70]\n"
+ "ldr q6, [x11, #0x70]\n"
"fmla v11.8h, v7.8h, v0.h[6]\n"
- "add x12, x12, #0x80\n"
+ "add x11, x11, #0x80\n"
"fmla v15.8h, v7.8h, v1.h[6]\n"
"fmla v19.8h, v7.8h, v2.h[6]\n"
"fmla v23.8h, v7.8h, v3.h[6]\n"
"fmla v27.8h, v7.8h, v4.h[6]\n"
- "ldr q7, [x11, #0x70]\n"
- "add x11, x11, #0x80\n"
+ "ldr q7, [x10, #0x70]\n"
+ "add x10, x10, #0x80\n"
"fmla v8.8h, v6.8h, v0.h[7]\n"
"fmla v12.8h, v6.8h, v1.h[7]\n"
"fmla v16.8h, v6.8h, v2.h[7]\n"
"fmla v20.8h, v6.8h, v3.h[7]\n"
"fmla v24.8h, v6.8h, v4.h[7]\n"
- "ldr q6, [x10, #0x70]\n"
+ "ldr q6, [x9, #0x70]\n"
"fmla v9.8h, v7.8h, v0.h[7]\n"
- "add x10, x10, #0x80\n"
+ "add x9, x9, #0x80\n"
"fmla v13.8h, v7.8h, v1.h[7]\n"
"fmla v17.8h, v7.8h, v2.h[7]\n"
"fmla v21.8h, v7.8h, v3.h[7]\n"
"fmla v25.8h, v7.8h, v4.h[7]\n"
- "ldr q7, [x9, #0x70]\n"
- "add x9, x9, #0x80\n"
+ "ldr q7, [x28, #0x70]\n"
+ "add x28, x28, #0x80\n"
"fmla v10.8h, v6.8h, v0.h[7]\n"
"fmla v14.8h, v6.8h, v1.h[7]\n"
"fmla v18.8h, v6.8h, v2.h[7]\n"
"fmla v22.8h, v6.8h, v3.h[7]\n"
"fmla v26.8h, v6.8h, v4.h[7]\n"
- "ldr q6, [x12, #0x0]\n"
+ "ldr q6, [x11, #0x0]\n"
"fmla v11.8h, v7.8h, v0.h[7]\n"
- "ldr q0, [x26, #0x0]\n"
+ "ldr q0, [x25, #0x0]\n"
"fmla v15.8h, v7.8h, v1.h[7]\n"
- "ldr q1, [x25, #0x0]\n"
+ "ldr q1, [x24, #0x0]\n"
"fmla v19.8h, v7.8h, v2.h[7]\n"
- "ldr q2, [x24, #0x0]\n"
+ "ldr q2, [x23, #0x0]\n"
"fmla v23.8h, v7.8h, v3.h[7]\n"
- "ldr q3, [x23, #0x0]\n"
+ "ldr q3, [x22, #0x0]\n"
"fmla v27.8h, v7.8h, v4.h[7]\n"
- "ldr q4, [x22, #0x0]\n"
- "ldr q7, [x11, #0x0]\n"
+ "ldr q4, [x21, #0x0]\n"
+ "ldr q7, [x10, #0x0]\n"
"bge 227b\n"
"228:" // Height 5: Multiply loop: Single iteration only
"fmla v8.8h, v6.8h, v0.h[0]\n"
"fmla v12.8h, v6.8h, v1.h[0]\n"
- "sub x27, x27, #0x8\n"
- "add x26, x26, #0x10\n"
+ "sub x26, x26, #0x8\n"
+ "add x25, x25, #0x10\n"
"fmla v16.8h, v6.8h, v2.h[0]\n"
"fmla v20.8h, v6.8h, v3.h[0]\n"
- "add x25, x25, #0x10\n"
"add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
"fmla v24.8h, v6.8h, v4.h[0]\n"
- "ldr q6, [x10, #0x0]\n"
+ "ldr q6, [x9, #0x0]\n"
"fmla v9.8h, v7.8h, v0.h[0]\n"
- "add x23, x23, #0x10\n"
+ "add x22, x22, #0x10\n"
"fmla v13.8h, v7.8h, v1.h[0]\n"
"fmla v17.8h, v7.8h, v2.h[0]\n"
- "add x22, x22, #0x10\n"
+ "add x21, x21, #0x10\n"
"fmla v21.8h, v7.8h, v3.h[0]\n"
"fmla v25.8h, v7.8h, v4.h[0]\n"
- "ldr q7, [x9, #0x0]\n"
+ "ldr q7, [x28, #0x0]\n"
"fmla v10.8h, v6.8h, v0.h[0]\n"
"fmla v14.8h, v6.8h, v1.h[0]\n"
"fmla v18.8h, v6.8h, v2.h[0]\n"
"fmla v22.8h, v6.8h, v3.h[0]\n"
"fmla v26.8h, v6.8h, v4.h[0]\n"
- "ldr q6, [x12, #0x10]\n"
+ "ldr q6, [x11, #0x10]\n"
"fmla v11.8h, v7.8h, v0.h[0]\n"
"fmla v15.8h, v7.8h, v1.h[0]\n"
"fmla v19.8h, v7.8h, v2.h[0]\n"
"fmla v23.8h, v7.8h, v3.h[0]\n"
"fmla v27.8h, v7.8h, v4.h[0]\n"
- "ldr q7, [x11, #0x10]\n"
+ "ldr q7, [x10, #0x10]\n"
"fmla v8.8h, v6.8h, v0.h[1]\n"
"fmla v12.8h, v6.8h, v1.h[1]\n"
"fmla v16.8h, v6.8h, v2.h[1]\n"
"fmla v20.8h, v6.8h, v3.h[1]\n"
"fmla v24.8h, v6.8h, v4.h[1]\n"
- "ldr q6, [x10, #0x10]\n"
+ "ldr q6, [x9, #0x10]\n"
"fmla v9.8h, v7.8h, v0.h[1]\n"
"fmla v13.8h, v7.8h, v1.h[1]\n"
"fmla v17.8h, v7.8h, v2.h[1]\n"
"fmla v21.8h, v7.8h, v3.h[1]\n"
"fmla v25.8h, v7.8h, v4.h[1]\n"
- "ldr q7, [x9, #0x10]\n"
+ "ldr q7, [x28, #0x10]\n"
"fmla v10.8h, v6.8h, v0.h[1]\n"
"fmla v14.8h, v6.8h, v1.h[1]\n"
"fmla v18.8h, v6.8h, v2.h[1]\n"
"fmla v22.8h, v6.8h, v3.h[1]\n"
"fmla v26.8h, v6.8h, v4.h[1]\n"
- "ldr q6, [x12, #0x20]\n"
+ "ldr q6, [x11, #0x20]\n"
"fmla v11.8h, v7.8h, v0.h[1]\n"
"fmla v15.8h, v7.8h, v1.h[1]\n"
"fmla v19.8h, v7.8h, v2.h[1]\n"
"fmla v23.8h, v7.8h, v3.h[1]\n"
"fmla v27.8h, v7.8h, v4.h[1]\n"
- "ldr q7, [x11, #0x20]\n"
+ "ldr q7, [x10, #0x20]\n"
"fmla v8.8h, v6.8h, v0.h[2]\n"
"fmla v12.8h, v6.8h, v1.h[2]\n"
"fmla v16.8h, v6.8h, v2.h[2]\n"
"fmla v20.8h, v6.8h, v3.h[2]\n"
"fmla v24.8h, v6.8h, v4.h[2]\n"
- "ldr q6, [x10, #0x20]\n"
+ "ldr q6, [x9, #0x20]\n"
"fmla v9.8h, v7.8h, v0.h[2]\n"
"fmla v13.8h, v7.8h, v1.h[2]\n"
"fmla v17.8h, v7.8h, v2.h[2]\n"
"fmla v21.8h, v7.8h, v3.h[2]\n"
"fmla v25.8h, v7.8h, v4.h[2]\n"
- "ldr q7, [x9, #0x20]\n"
+ "ldr q7, [x28, #0x20]\n"
"fmla v10.8h, v6.8h, v0.h[2]\n"
"fmla v14.8h, v6.8h, v1.h[2]\n"
"fmla v18.8h, v6.8h, v2.h[2]\n"
"fmla v22.8h, v6.8h, v3.h[2]\n"
"fmla v26.8h, v6.8h, v4.h[2]\n"
- "ldr q6, [x12, #0x30]\n"
+ "ldr q6, [x11, #0x30]\n"
"fmla v11.8h, v7.8h, v0.h[2]\n"
"fmla v15.8h, v7.8h, v1.h[2]\n"
"fmla v19.8h, v7.8h, v2.h[2]\n"
"fmla v23.8h, v7.8h, v3.h[2]\n"
"fmla v27.8h, v7.8h, v4.h[2]\n"
- "ldr q7, [x11, #0x30]\n"
+ "ldr q7, [x10, #0x30]\n"
"fmla v8.8h, v6.8h, v0.h[3]\n"
"fmla v12.8h, v6.8h, v1.h[3]\n"
"fmla v16.8h, v6.8h, v2.h[3]\n"
"fmla v20.8h, v6.8h, v3.h[3]\n"
"fmla v24.8h, v6.8h, v4.h[3]\n"
- "ldr q6, [x10, #0x30]\n"
+ "ldr q6, [x9, #0x30]\n"
"fmla v9.8h, v7.8h, v0.h[3]\n"
"fmla v13.8h, v7.8h, v1.h[3]\n"
"fmla v17.8h, v7.8h, v2.h[3]\n"
"fmla v21.8h, v7.8h, v3.h[3]\n"
"fmla v25.8h, v7.8h, v4.h[3]\n"
- "ldr q7, [x9, #0x30]\n"
+ "ldr q7, [x28, #0x30]\n"
"fmla v10.8h, v6.8h, v0.h[3]\n"
"fmla v14.8h, v6.8h, v1.h[3]\n"
"fmla v18.8h, v6.8h, v2.h[3]\n"
"fmla v22.8h, v6.8h, v3.h[3]\n"
"fmla v26.8h, v6.8h, v4.h[3]\n"
- "ldr q6, [x12, #0x40]\n"
+ "ldr q6, [x11, #0x40]\n"
"fmla v11.8h, v7.8h, v0.h[3]\n"
"fmla v15.8h, v7.8h, v1.h[3]\n"
"fmla v19.8h, v7.8h, v2.h[3]\n"
"fmla v23.8h, v7.8h, v3.h[3]\n"
"fmla v27.8h, v7.8h, v4.h[3]\n"
- "ldr q7, [x11, #0x40]\n"
+ "ldr q7, [x10, #0x40]\n"
"fmla v8.8h, v6.8h, v0.h[4]\n"
"fmla v12.8h, v6.8h, v1.h[4]\n"
"fmla v16.8h, v6.8h, v2.h[4]\n"
"fmla v20.8h, v6.8h, v3.h[4]\n"
"fmla v24.8h, v6.8h, v4.h[4]\n"
- "ldr q6, [x10, #0x40]\n"
+ "ldr q6, [x9, #0x40]\n"
"fmla v9.8h, v7.8h, v0.h[4]\n"
"fmla v13.8h, v7.8h, v1.h[4]\n"
"fmla v17.8h, v7.8h, v2.h[4]\n"
"fmla v21.8h, v7.8h, v3.h[4]\n"
"fmla v25.8h, v7.8h, v4.h[4]\n"
- "ldr q7, [x9, #0x40]\n"
+ "ldr q7, [x28, #0x40]\n"
"fmla v10.8h, v6.8h, v0.h[4]\n"
"fmla v14.8h, v6.8h, v1.h[4]\n"
"fmla v18.8h, v6.8h, v2.h[4]\n"
"fmla v22.8h, v6.8h, v3.h[4]\n"
"fmla v26.8h, v6.8h, v4.h[4]\n"
- "ldr q6, [x12, #0x50]\n"
+ "ldr q6, [x11, #0x50]\n"
"fmla v11.8h, v7.8h, v0.h[4]\n"
"fmla v15.8h, v7.8h, v1.h[4]\n"
"fmla v19.8h, v7.8h, v2.h[4]\n"
"fmla v23.8h, v7.8h, v3.h[4]\n"
"fmla v27.8h, v7.8h, v4.h[4]\n"
- "ldr q7, [x11, #0x50]\n"
+ "ldr q7, [x10, #0x50]\n"
"fmla v8.8h, v6.8h, v0.h[5]\n"
"fmla v12.8h, v6.8h, v1.h[5]\n"
"fmla v16.8h, v6.8h, v2.h[5]\n"
"fmla v20.8h, v6.8h, v3.h[5]\n"
"fmla v24.8h, v6.8h, v4.h[5]\n"
- "ldr q6, [x10, #0x50]\n"
+ "ldr q6, [x9, #0x50]\n"
"fmla v9.8h, v7.8h, v0.h[5]\n"
"fmla v13.8h, v7.8h, v1.h[5]\n"
"fmla v17.8h, v7.8h, v2.h[5]\n"
"fmla v21.8h, v7.8h, v3.h[5]\n"
"fmla v25.8h, v7.8h, v4.h[5]\n"
- "ldr q7, [x9, #0x50]\n"
+ "ldr q7, [x28, #0x50]\n"
"fmla v10.8h, v6.8h, v0.h[5]\n"
"fmla v14.8h, v6.8h, v1.h[5]\n"
"fmla v18.8h, v6.8h, v2.h[5]\n"
"fmla v22.8h, v6.8h, v3.h[5]\n"
"fmla v26.8h, v6.8h, v4.h[5]\n"
- "ldr q6, [x12, #0x60]\n"
+ "ldr q6, [x11, #0x60]\n"
"fmla v11.8h, v7.8h, v0.h[5]\n"
"fmla v15.8h, v7.8h, v1.h[5]\n"
"fmla v19.8h, v7.8h, v2.h[5]\n"
"fmla v23.8h, v7.8h, v3.h[5]\n"
"fmla v27.8h, v7.8h, v4.h[5]\n"
- "ldr q7, [x11, #0x60]\n"
+ "ldr q7, [x10, #0x60]\n"
"fmla v8.8h, v6.8h, v0.h[6]\n"
"fmla v12.8h, v6.8h, v1.h[6]\n"
"fmla v16.8h, v6.8h, v2.h[6]\n"
"fmla v20.8h, v6.8h, v3.h[6]\n"
"fmla v24.8h, v6.8h, v4.h[6]\n"
- "ldr q6, [x10, #0x60]\n"
+ "ldr q6, [x9, #0x60]\n"
"fmla v9.8h, v7.8h, v0.h[6]\n"
"fmla v13.8h, v7.8h, v1.h[6]\n"
"fmla v17.8h, v7.8h, v2.h[6]\n"
"fmla v21.8h, v7.8h, v3.h[6]\n"
"fmla v25.8h, v7.8h, v4.h[6]\n"
- "ldr q7, [x9, #0x60]\n"
+ "ldr q7, [x28, #0x60]\n"
"fmla v10.8h, v6.8h, v0.h[6]\n"
"fmla v14.8h, v6.8h, v1.h[6]\n"
"fmla v18.8h, v6.8h, v2.h[6]\n"
"fmla v22.8h, v6.8h, v3.h[6]\n"
"fmla v26.8h, v6.8h, v4.h[6]\n"
- "ldr q6, [x12, #0x70]\n"
+ "ldr q6, [x11, #0x70]\n"
"fmla v11.8h, v7.8h, v0.h[6]\n"
- "add x12, x12, #0x80\n"
+ "add x11, x11, #0x80\n"
"fmla v15.8h, v7.8h, v1.h[6]\n"
"fmla v19.8h, v7.8h, v2.h[6]\n"
"fmla v23.8h, v7.8h, v3.h[6]\n"
"fmla v27.8h, v7.8h, v4.h[6]\n"
- "ldr q7, [x11, #0x70]\n"
- "add x11, x11, #0x80\n"
+ "ldr q7, [x10, #0x70]\n"
+ "add x10, x10, #0x80\n"
"fmla v8.8h, v6.8h, v0.h[7]\n"
"fmla v12.8h, v6.8h, v1.h[7]\n"
"fmla v16.8h, v6.8h, v2.h[7]\n"
"fmla v20.8h, v6.8h, v3.h[7]\n"
"fmla v24.8h, v6.8h, v4.h[7]\n"
- "ldr q6, [x10, #0x70]\n"
+ "ldr q6, [x9, #0x70]\n"
"fmla v9.8h, v7.8h, v0.h[7]\n"
- "add x10, x10, #0x80\n"
+ "add x9, x9, #0x80\n"
"fmla v13.8h, v7.8h, v1.h[7]\n"
"fmla v17.8h, v7.8h, v2.h[7]\n"
"fmla v21.8h, v7.8h, v3.h[7]\n"
"fmla v25.8h, v7.8h, v4.h[7]\n"
- "ldr q7, [x9, #0x70]\n"
- "add x9, x9, #0x80\n"
+ "ldr q7, [x28, #0x70]\n"
+ "add x28, x28, #0x80\n"
"fmla v10.8h, v6.8h, v0.h[7]\n"
"fmla v14.8h, v6.8h, v1.h[7]\n"
"fmla v18.8h, v6.8h, v2.h[7]\n"
@@ -3763,32 +3763,32 @@ void a64_ffhybrid_fp16_mla_6x32 (
"fmla v23.8h, v7.8h, v3.h[7]\n"
"fmla v27.8h, v7.8h, v4.h[7]\n"
"229:" // Height 5: Multiply loop: Main loop skip
- "cbz x27, 231f\n"
+ "cbz x26, 231f\n"
"230:" // Height 5: Multiply loop: Odd block loop
- "ldr h0, [x26], #0x2\n"
- "ldr h1, [x25], #0x2\n"
- "sub x27, x27, #0x1\n"
- "ldr h2, [x24], #0x2\n"
- "ldr h3, [x23], #0x2\n"
- "ldr h4, [x22], #0x2\n"
- "ldr q6, [x12, #0x0]\n"
+ "ldr h0, [x25], #0x2\n"
+ "ldr h1, [x24], #0x2\n"
+ "sub x26, x26, #0x1\n"
+ "ldr h2, [x23], #0x2\n"
+ "ldr h3, [x22], #0x2\n"
+ "ldr h4, [x21], #0x2\n"
+ "ldr q6, [x11, #0x0]\n"
"fmla v8.8h, v6.8h, v0.h[0]\n"
"fmla v12.8h, v6.8h, v1.h[0]\n"
- "ldr q7, [x11, #0x0]\n"
+ "ldr q7, [x10, #0x0]\n"
"fmla v16.8h, v6.8h, v2.h[0]\n"
"fmla v20.8h, v6.8h, v3.h[0]\n"
- "add x12, x12, #0x10\n"
+ "add x11, x11, #0x10\n"
"fmla v24.8h, v6.8h, v4.h[0]\n"
- "ldr q6, [x10, #0x0]\n"
+ "ldr q6, [x9, #0x0]\n"
"fmla v9.8h, v7.8h, v0.h[0]\n"
- "add x11, x11, #0x10\n"
+ "add x10, x10, #0x10\n"
"fmla v13.8h, v7.8h, v1.h[0]\n"
"fmla v17.8h, v7.8h, v2.h[0]\n"
- "add x10, x10, #0x10\n"
+ "add x9, x9, #0x10\n"
"fmla v21.8h, v7.8h, v3.h[0]\n"
"fmla v25.8h, v7.8h, v4.h[0]\n"
- "ldr q7, [x9, #0x0]\n"
- "add x9, x9, #0x10\n"
+ "ldr q7, [x28, #0x0]\n"
+ "add x28, x28, #0x10\n"
"fmla v10.8h, v6.8h, v0.h[0]\n"
"fmla v14.8h, v6.8h, v1.h[0]\n"
"fmla v18.8h, v6.8h, v2.h[0]\n"
@@ -3799,22 +3799,22 @@ void a64_ffhybrid_fp16_mla_6x32 (
"fmla v19.8h, v7.8h, v2.h[0]\n"
"fmla v23.8h, v7.8h, v3.h[0]\n"
"fmla v27.8h, v7.8h, v4.h[0]\n"
- "cbnz x27, 230b\n"
+ "cbnz x26, 230b\n"
"231:" // Height 5: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 224b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
- "add x23, x24, x20, LSL #1\n"
- "add x22, x23, x20, LSL #1\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "add x22, x23, x19, LSL #1\n"
+ "add x21, x22, x19, LSL #1\n"
"tbz %x[flags], #1, 232f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v1.8h }, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1r { v0.8h }, [x20]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v1.8h }, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v0.8h }, [x19]\n"
"fmin v8.8h, v8.8h, v1.8h\n"
"fmin v9.8h, v9.8h, v1.8h\n"
"fmin v10.8h, v10.8h, v1.8h\n"
@@ -3856,295 +3856,295 @@ void a64_ffhybrid_fp16_mla_6x32 (
"fmax v26.8h, v26.8h, v0.8h\n"
"fmax v27.8h, v27.8h, v0.8h\n"
"232:" // Height 5: No activation
- "cmp x14, #0x20\n"
+ "cmp x13, #0x20\n"
"bge 249f\n"
- "tbz x14, #4, 240f\n"
- "st1 { v8.8h }, [x13], #0x10\n"
- "st1 { v9.8h }, [x13], #0x10\n"
- "st1 { v12.8h }, [x25], #0x10\n"
- "st1 { v13.8h }, [x25], #0x10\n"
- "st1 { v16.8h }, [x24], #0x10\n"
- "st1 { v17.8h }, [x24], #0x10\n"
- "st1 { v20.8h }, [x23], #0x10\n"
- "st1 { v21.8h }, [x23], #0x10\n"
- "st1 { v24.8h }, [x22], #0x10\n"
- "st1 { v25.8h }, [x22], #0x10\n"
- "tbz x14, #3, 236f\n"
- "st1 { v10.8h }, [x13], #0x10\n"
- "st1 { v14.8h }, [x25], #0x10\n"
- "st1 { v18.8h }, [x24], #0x10\n"
- "st1 { v22.8h }, [x23], #0x10\n"
- "st1 { v26.8h }, [x22], #0x10\n"
- "tbz x14, #2, 234f\n"
- "str d11, [x13], #0x8\n"
- "str d15, [x25], #0x8\n"
- "str d19, [x24], #0x8\n"
- "str d23, [x23], #0x8\n"
- "str d27, [x22], #0x8\n"
- "tbz x14, #1, 233f\n"
- "st1 { v11.s }[2], [x13], #0x4\n"
- "st1 { v15.s }[2], [x25], #0x4\n"
- "st1 { v19.s }[2], [x24], #0x4\n"
- "st1 { v23.s }[2], [x23], #0x4\n"
- "st1 { v27.s }[2], [x22], #0x4\n"
- "tbz x14, #0, 248f\n"
- "st1 { v11.h }[6], [x13]\n"
- "st1 { v15.h }[6], [x25]\n"
- "st1 { v19.h }[6], [x24]\n"
- "st1 { v23.h }[6], [x23]\n"
- "st1 { v27.h }[6], [x22]\n"
+ "tbz x13, #4, 240f\n"
+ "st1 { v8.8h }, [x12], #0x10\n"
+ "st1 { v9.8h }, [x12], #0x10\n"
+ "st1 { v12.8h }, [x24], #0x10\n"
+ "st1 { v13.8h }, [x24], #0x10\n"
+ "st1 { v16.8h }, [x23], #0x10\n"
+ "st1 { v17.8h }, [x23], #0x10\n"
+ "st1 { v20.8h }, [x22], #0x10\n"
+ "st1 { v21.8h }, [x22], #0x10\n"
+ "st1 { v24.8h }, [x21], #0x10\n"
+ "st1 { v25.8h }, [x21], #0x10\n"
+ "tbz x13, #3, 236f\n"
+ "st1 { v10.8h }, [x12], #0x10\n"
+ "st1 { v14.8h }, [x24], #0x10\n"
+ "st1 { v18.8h }, [x23], #0x10\n"
+ "st1 { v22.8h }, [x22], #0x10\n"
+ "st1 { v26.8h }, [x21], #0x10\n"
+ "tbz x13, #2, 234f\n"
+ "str d11, [x12], #0x8\n"
+ "str d15, [x24], #0x8\n"
+ "str d19, [x23], #0x8\n"
+ "str d23, [x22], #0x8\n"
+ "str d27, [x21], #0x8\n"
+ "tbz x13, #1, 233f\n"
+ "st1 { v11.s }[2], [x12], #0x4\n"
+ "st1 { v15.s }[2], [x24], #0x4\n"
+ "st1 { v19.s }[2], [x23], #0x4\n"
+ "st1 { v23.s }[2], [x22], #0x4\n"
+ "st1 { v27.s }[2], [x21], #0x4\n"
+ "tbz x13, #0, 248f\n"
+ "st1 { v11.h }[6], [x12]\n"
+ "st1 { v15.h }[6], [x24]\n"
+ "st1 { v19.h }[6], [x23]\n"
+ "st1 { v23.h }[6], [x22]\n"
+ "st1 { v27.h }[6], [x21]\n"
"b 248f\n"
"233:" // Height 5: Partial direct writeback: partial_1_28
- "tbz x14, #0, 248f\n"
- "st1 { v11.h }[4], [x13]\n"
- "st1 { v15.h }[4], [x25]\n"
- "st1 { v19.h }[4], [x24]\n"
- "st1 { v23.h }[4], [x23]\n"
- "st1 { v27.h }[4], [x22]\n"
+ "tbz x13, #0, 248f\n"
+ "st1 { v11.h }[4], [x12]\n"
+ "st1 { v15.h }[4], [x24]\n"
+ "st1 { v19.h }[4], [x23]\n"
+ "st1 { v23.h }[4], [x22]\n"
+ "st1 { v27.h }[4], [x21]\n"
"b 248f\n"
"234:" // Height 5: Partial direct writeback: partial_2_24
- "tbz x14, #1, 235f\n"
- "str s11, [x13], #0x4\n"
- "str s15, [x25], #0x4\n"
- "str s19, [x24], #0x4\n"
- "str s23, [x23], #0x4\n"
- "str s27, [x22], #0x4\n"
- "tbz x14, #0, 248f\n"
- "st1 { v11.h }[2], [x13]\n"
- "st1 { v15.h }[2], [x25]\n"
- "st1 { v19.h }[2], [x24]\n"
- "st1 { v23.h }[2], [x23]\n"
- "st1 { v27.h }[2], [x22]\n"
+ "tbz x13, #1, 235f\n"
+ "str s11, [x12], #0x4\n"
+ "str s15, [x24], #0x4\n"
+ "str s19, [x23], #0x4\n"
+ "str s23, [x22], #0x4\n"
+ "str s27, [x21], #0x4\n"
+ "tbz x13, #0, 248f\n"
+ "st1 { v11.h }[2], [x12]\n"
+ "st1 { v15.h }[2], [x24]\n"
+ "st1 { v19.h }[2], [x23]\n"
+ "st1 { v23.h }[2], [x22]\n"
+ "st1 { v27.h }[2], [x21]\n"
"b 248f\n"
"235:" // Height 5: Partial direct writeback: partial_1_24
- "tbz x14, #0, 248f\n"
- "str h11, [x13, #0x0]\n"
- "str h15, [x25, #0x0]\n"
- "str h19, [x24, #0x0]\n"
- "str h23, [x23, #0x0]\n"
- "str h27, [x22, #0x0]\n"
+ "tbz x13, #0, 248f\n"
+ "str h11, [x12, #0x0]\n"
+ "str h15, [x24, #0x0]\n"
+ "str h19, [x23, #0x0]\n"
+ "str h23, [x22, #0x0]\n"
+ "str h27, [x21, #0x0]\n"
"b 248f\n"
"236:" // Height 5: Partial direct writeback: partial_4_16
- "tbz x14, #2, 238f\n"
- "str d10, [x13], #0x8\n"
- "str d14, [x25], #0x8\n"
- "str d18, [x24], #0x8\n"
- "str d22, [x23], #0x8\n"
- "str d26, [x22], #0x8\n"
- "tbz x14, #1, 237f\n"
- "st1 { v10.s }[2], [x13], #0x4\n"
- "st1 { v14.s }[2], [x25], #0x4\n"
- "st1 { v18.s }[2], [x24], #0x4\n"
- "st1 { v22.s }[2], [x23], #0x4\n"
- "st1 { v26.s }[2], [x22], #0x4\n"
- "tbz x14, #0, 248f\n"
- "st1 { v10.h }[6], [x13]\n"
- "st1 { v14.h }[6], [x25]\n"
- "st1 { v18.h }[6], [x24]\n"
- "st1 { v22.h }[6], [x23]\n"
- "st1 { v26.h }[6], [x22]\n"
+ "tbz x13, #2, 238f\n"
+ "str d10, [x12], #0x8\n"
+ "str d14, [x24], #0x8\n"
+ "str d18, [x23], #0x8\n"
+ "str d22, [x22], #0x8\n"
+ "str d26, [x21], #0x8\n"
+ "tbz x13, #1, 237f\n"
+ "st1 { v10.s }[2], [x12], #0x4\n"
+ "st1 { v14.s }[2], [x24], #0x4\n"
+ "st1 { v18.s }[2], [x23], #0x4\n"
+ "st1 { v22.s }[2], [x22], #0x4\n"
+ "st1 { v26.s }[2], [x21], #0x4\n"
+ "tbz x13, #0, 248f\n"
+ "st1 { v10.h }[6], [x12]\n"
+ "st1 { v14.h }[6], [x24]\n"
+ "st1 { v18.h }[6], [x23]\n"
+ "st1 { v22.h }[6], [x22]\n"
+ "st1 { v26.h }[6], [x21]\n"
"b 248f\n"
"237:" // Height 5: Partial direct writeback: partial_1_20
- "tbz x14, #0, 248f\n"
- "st1 { v10.h }[4], [x13]\n"
- "st1 { v14.h }[4], [x25]\n"
- "st1 { v18.h }[4], [x24]\n"
- "st1 { v22.h }[4], [x23]\n"
- "st1 { v26.h }[4], [x22]\n"
+ "tbz x13, #0, 248f\n"
+ "st1 { v10.h }[4], [x12]\n"
+ "st1 { v14.h }[4], [x24]\n"
+ "st1 { v18.h }[4], [x23]\n"
+ "st1 { v22.h }[4], [x22]\n"
+ "st1 { v26.h }[4], [x21]\n"
"b 248f\n"
"238:" // Height 5: Partial direct writeback: partial_2_16
- "tbz x14, #1, 239f\n"
- "str s10, [x13], #0x4\n"
- "str s14, [x25], #0x4\n"
- "str s18, [x24], #0x4\n"
- "str s22, [x23], #0x4\n"
- "str s26, [x22], #0x4\n"
- "tbz x14, #0, 248f\n"
- "st1 { v10.h }[2], [x13]\n"
- "st1 { v14.h }[2], [x25]\n"
- "st1 { v18.h }[2], [x24]\n"
- "st1 { v22.h }[2], [x23]\n"
- "st1 { v26.h }[2], [x22]\n"
+ "tbz x13, #1, 239f\n"
+ "str s10, [x12], #0x4\n"
+ "str s14, [x24], #0x4\n"
+ "str s18, [x23], #0x4\n"
+ "str s22, [x22], #0x4\n"
+ "str s26, [x21], #0x4\n"
+ "tbz x13, #0, 248f\n"
+ "st1 { v10.h }[2], [x12]\n"
+ "st1 { v14.h }[2], [x24]\n"
+ "st1 { v18.h }[2], [x23]\n"
+ "st1 { v22.h }[2], [x22]\n"
+ "st1 { v26.h }[2], [x21]\n"
"b 248f\n"
"239:" // Height 5: Partial direct writeback: partial_1_16
- "tbz x14, #0, 248f\n"
- "str h10, [x13, #0x0]\n"
- "str h14, [x25, #0x0]\n"
- "str h18, [x24, #0x0]\n"
- "str h22, [x23, #0x0]\n"
- "str h26, [x22, #0x0]\n"
+ "tbz x13, #0, 248f\n"
+ "str h10, [x12, #0x0]\n"
+ "str h14, [x24, #0x0]\n"
+ "str h18, [x23, #0x0]\n"
+ "str h22, [x22, #0x0]\n"
+ "str h26, [x21, #0x0]\n"
"b 248f\n"
"240:" // Height 5: Partial direct writeback: partial_8_0
- "tbz x14, #3, 244f\n"
- "st1 { v8.8h }, [x13], #0x10\n"
- "st1 { v12.8h }, [x25], #0x10\n"
- "st1 { v16.8h }, [x24], #0x10\n"
- "st1 { v20.8h }, [x23], #0x10\n"
- "st1 { v24.8h }, [x22], #0x10\n"
- "tbz x14, #2, 242f\n"
- "str d9, [x13], #0x8\n"
- "str d13, [x25], #0x8\n"
- "str d17, [x24], #0x8\n"
- "str d21, [x23], #0x8\n"
- "str d25, [x22], #0x8\n"
- "tbz x14, #1, 241f\n"
- "st1 { v9.s }[2], [x13], #0x4\n"
- "st1 { v13.s }[2], [x25], #0x4\n"
- "st1 { v17.s }[2], [x24], #0x4\n"
- "st1 { v21.s }[2], [x23], #0x4\n"
- "st1 { v25.s }[2], [x22], #0x4\n"
- "tbz x14, #0, 248f\n"
- "st1 { v9.h }[6], [x13]\n"
- "st1 { v13.h }[6], [x25]\n"
- "st1 { v17.h }[6], [x24]\n"
- "st1 { v21.h }[6], [x23]\n"
- "st1 { v25.h }[6], [x22]\n"
+ "tbz x13, #3, 244f\n"
+ "st1 { v8.8h }, [x12], #0x10\n"
+ "st1 { v12.8h }, [x24], #0x10\n"
+ "st1 { v16.8h }, [x23], #0x10\n"
+ "st1 { v20.8h }, [x22], #0x10\n"
+ "st1 { v24.8h }, [x21], #0x10\n"
+ "tbz x13, #2, 242f\n"
+ "str d9, [x12], #0x8\n"
+ "str d13, [x24], #0x8\n"
+ "str d17, [x23], #0x8\n"
+ "str d21, [x22], #0x8\n"
+ "str d25, [x21], #0x8\n"
+ "tbz x13, #1, 241f\n"
+ "st1 { v9.s }[2], [x12], #0x4\n"
+ "st1 { v13.s }[2], [x24], #0x4\n"
+ "st1 { v17.s }[2], [x23], #0x4\n"
+ "st1 { v21.s }[2], [x22], #0x4\n"
+ "st1 { v25.s }[2], [x21], #0x4\n"
+ "tbz x13, #0, 248f\n"
+ "st1 { v9.h }[6], [x12]\n"
+ "st1 { v13.h }[6], [x24]\n"
+ "st1 { v17.h }[6], [x23]\n"
+ "st1 { v21.h }[6], [x22]\n"
+ "st1 { v25.h }[6], [x21]\n"
"b 248f\n"
"241:" // Height 5: Partial direct writeback: partial_1_12
- "tbz x14, #0, 248f\n"
- "st1 { v9.h }[4], [x13]\n"
- "st1 { v13.h }[4], [x25]\n"
- "st1 { v17.h }[4], [x24]\n"
- "st1 { v21.h }[4], [x23]\n"
- "st1 { v25.h }[4], [x22]\n"
+ "tbz x13, #0, 248f\n"
+ "st1 { v9.h }[4], [x12]\n"
+ "st1 { v13.h }[4], [x24]\n"
+ "st1 { v17.h }[4], [x23]\n"
+ "st1 { v21.h }[4], [x22]\n"
+ "st1 { v25.h }[4], [x21]\n"
"b 248f\n"
"242:" // Height 5: Partial direct writeback: partial_2_8
- "tbz x14, #1, 243f\n"
- "str s9, [x13], #0x4\n"
- "str s13, [x25], #0x4\n"
- "str s17, [x24], #0x4\n"
- "str s21, [x23], #0x4\n"
- "str s25, [x22], #0x4\n"
- "tbz x14, #0, 248f\n"
- "st1 { v9.h }[2], [x13]\n"
- "st1 { v13.h }[2], [x25]\n"
- "st1 { v17.h }[2], [x24]\n"
- "st1 { v21.h }[2], [x23]\n"
- "st1 { v25.h }[2], [x22]\n"
+ "tbz x13, #1, 243f\n"
+ "str s9, [x12], #0x4\n"
+ "str s13, [x24], #0x4\n"
+ "str s17, [x23], #0x4\n"
+ "str s21, [x22], #0x4\n"
+ "str s25, [x21], #0x4\n"
+ "tbz x13, #0, 248f\n"
+ "st1 { v9.h }[2], [x12]\n"
+ "st1 { v13.h }[2], [x24]\n"
+ "st1 { v17.h }[2], [x23]\n"
+ "st1 { v21.h }[2], [x22]\n"
+ "st1 { v25.h }[2], [x21]\n"
"b 248f\n"
"243:" // Height 5: Partial direct writeback: partial_1_8
- "tbz x14, #0, 248f\n"
- "str h9, [x13, #0x0]\n"
- "str h13, [x25, #0x0]\n"
- "str h17, [x24, #0x0]\n"
- "str h21, [x23, #0x0]\n"
- "str h25, [x22, #0x0]\n"
+ "tbz x13, #0, 248f\n"
+ "str h9, [x12, #0x0]\n"
+ "str h13, [x24, #0x0]\n"
+ "str h17, [x23, #0x0]\n"
+ "str h21, [x22, #0x0]\n"
+ "str h25, [x21, #0x0]\n"
"b 248f\n"
"244:" // Height 5: Partial direct writeback: partial_4_0
- "tbz x14, #2, 246f\n"
- "str d8, [x13], #0x8\n"
- "str d12, [x25], #0x8\n"
- "str d16, [x24], #0x8\n"
- "str d20, [x23], #0x8\n"
- "str d24, [x22], #0x8\n"
- "tbz x14, #1, 245f\n"
- "st1 { v8.s }[2], [x13], #0x4\n"
- "st1 { v12.s }[2], [x25], #0x4\n"
- "st1 { v16.s }[2], [x24], #0x4\n"
- "st1 { v20.s }[2], [x23], #0x4\n"
- "st1 { v24.s }[2], [x22], #0x4\n"
- "tbz x14, #0, 248f\n"
- "st1 { v8.h }[6], [x13]\n"
- "st1 { v12.h }[6], [x25]\n"
- "st1 { v16.h }[6], [x24]\n"
- "st1 { v20.h }[6], [x23]\n"
- "st1 { v24.h }[6], [x22]\n"
+ "tbz x13, #2, 246f\n"
+ "str d8, [x12], #0x8\n"
+ "str d12, [x24], #0x8\n"
+ "str d16, [x23], #0x8\n"
+ "str d20, [x22], #0x8\n"
+ "str d24, [x21], #0x8\n"
+ "tbz x13, #1, 245f\n"
+ "st1 { v8.s }[2], [x12], #0x4\n"
+ "st1 { v12.s }[2], [x24], #0x4\n"
+ "st1 { v16.s }[2], [x23], #0x4\n"
+ "st1 { v20.s }[2], [x22], #0x4\n"
+ "st1 { v24.s }[2], [x21], #0x4\n"
+ "tbz x13, #0, 248f\n"
+ "st1 { v8.h }[6], [x12]\n"
+ "st1 { v12.h }[6], [x24]\n"
+ "st1 { v16.h }[6], [x23]\n"
+ "st1 { v20.h }[6], [x22]\n"
+ "st1 { v24.h }[6], [x21]\n"
"b 248f\n"
"245:" // Height 5: Partial direct writeback: partial_1_4
- "tbz x14, #0, 248f\n"
- "st1 { v8.h }[4], [x13]\n"
- "st1 { v12.h }[4], [x25]\n"
- "st1 { v16.h }[4], [x24]\n"
- "st1 { v20.h }[4], [x23]\n"
- "st1 { v24.h }[4], [x22]\n"
+ "tbz x13, #0, 248f\n"
+ "st1 { v8.h }[4], [x12]\n"
+ "st1 { v12.h }[4], [x24]\n"
+ "st1 { v16.h }[4], [x23]\n"
+ "st1 { v20.h }[4], [x22]\n"
+ "st1 { v24.h }[4], [x21]\n"
"b 248f\n"
"246:" // Height 5: Partial direct writeback: partial_2_0
- "tbz x14, #1, 247f\n"
- "str s8, [x13], #0x4\n"
- "str s12, [x25], #0x4\n"
- "str s16, [x24], #0x4\n"
- "str s20, [x23], #0x4\n"
- "str s24, [x22], #0x4\n"
- "tbz x14, #0, 248f\n"
- "st1 { v8.h }[2], [x13]\n"
- "st1 { v12.h }[2], [x25]\n"
- "st1 { v16.h }[2], [x24]\n"
- "st1 { v20.h }[2], [x23]\n"
- "st1 { v24.h }[2], [x22]\n"
+ "tbz x13, #1, 247f\n"
+ "str s8, [x12], #0x4\n"
+ "str s12, [x24], #0x4\n"
+ "str s16, [x23], #0x4\n"
+ "str s20, [x22], #0x4\n"
+ "str s24, [x21], #0x4\n"
+ "tbz x13, #0, 248f\n"
+ "st1 { v8.h }[2], [x12]\n"
+ "st1 { v12.h }[2], [x24]\n"
+ "st1 { v16.h }[2], [x23]\n"
+ "st1 { v20.h }[2], [x22]\n"
+ "st1 { v24.h }[2], [x21]\n"
"b 248f\n"
"247:" // Height 5: Partial direct writeback: partial_1_0
- "str h8, [x13, #0x0]\n"
- "str h12, [x25, #0x0]\n"
- "str h16, [x24, #0x0]\n"
- "str h20, [x23, #0x0]\n"
- "str h24, [x22, #0x0]\n"
+ "str h8, [x12, #0x0]\n"
+ "str h12, [x24, #0x0]\n"
+ "str h16, [x23, #0x0]\n"
+ "str h20, [x22, #0x0]\n"
+ "str h24, [x21, #0x0]\n"
"248:" // Height 5: Partial direct writeback: Done
"b 250f\n"
"249:" // Height 5: Full writeback
- "str q8, [x13, #0x0]\n"
- "str q9, [x13, #0x10]\n"
- "str q10, [x13, #0x20]\n"
- "str q11, [x13, #0x30]\n"
- "add x13, x13, #0x40\n"
- "str q12, [x25, #0x0]\n"
- "str q13, [x25, #0x10]\n"
- "str q14, [x25, #0x20]\n"
- "str q15, [x25, #0x30]\n"
- "str q16, [x24, #0x0]\n"
- "str q17, [x24, #0x10]\n"
- "str q18, [x24, #0x20]\n"
- "str q19, [x24, #0x30]\n"
- "str q20, [x23, #0x0]\n"
- "str q21, [x23, #0x10]\n"
- "str q22, [x23, #0x20]\n"
- "str q23, [x23, #0x30]\n"
- "str q24, [x22, #0x0]\n"
- "str q25, [x22, #0x10]\n"
- "str q26, [x22, #0x20]\n"
- "str q27, [x22, #0x30]\n"
+ "str q8, [x12, #0x0]\n"
+ "str q9, [x12, #0x10]\n"
+ "str q10, [x12, #0x20]\n"
+ "str q11, [x12, #0x30]\n"
+ "add x12, x12, #0x40\n"
+ "str q12, [x24, #0x0]\n"
+ "str q13, [x24, #0x10]\n"
+ "str q14, [x24, #0x20]\n"
+ "str q15, [x24, #0x30]\n"
+ "str q16, [x23, #0x0]\n"
+ "str q17, [x23, #0x10]\n"
+ "str q18, [x23, #0x20]\n"
+ "str q19, [x23, #0x30]\n"
+ "str q20, [x22, #0x0]\n"
+ "str q21, [x22, #0x10]\n"
+ "str q22, [x22, #0x20]\n"
+ "str q23, [x22, #0x30]\n"
+ "str q24, [x21, #0x0]\n"
+ "str q25, [x21, #0x10]\n"
+ "str q26, [x21, #0x20]\n"
+ "str q27, [x21, #0x30]\n"
"250:" // Height 5: Writeback done
- "subs x14, x14, #0x20\n"
+ "subs x13, x13, #0x20\n"
"bgt 202b\n"
"b 302f\n"
"251:" // Height 6
- "ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x21, #0xc\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "mov x15, %x[bias]\n"
- "mov x13, %x[output_ptr]\n"
- "madd %x[output_ptr], x20, x21, %x[output_ptr]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x20, #0xc\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "mov x14, %x[bias]\n"
+ "mov x12, %x[output_ptr]\n"
+ "madd %x[output_ptr], x19, x20, %x[output_ptr]\n"
"252:" // Height 6: Column loop
- "ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
- "add x11, x12, x20, LSL #1\n"
- "add x10, x11, x20, LSL #1\n"
- "add x9, x10, x20, LSL #1\n"
- "add x20, x9, x20, LSL #1\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "cmp x14, #0x18\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #1\n"
+ "add x9, x10, x19, LSL #1\n"
+ "add x28, x9, x19, LSL #1\n"
+ "add x19, x28, x19, LSL #1\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "cmp x13, #0x18\n"
"bgt 253f\n"
- "cmp x14, #0x10\n"
- "mov x9, x12\n"
+ "cmp x13, #0x10\n"
+ "mov x28, x11\n"
"bgt 253f\n"
- "cmp x14, #0x8\n"
- "mov x10, x12\n"
+ "cmp x13, #0x8\n"
+ "mov x9, x11\n"
"bgt 253f\n"
- "mov x11, x12\n"
+ "mov x10, x11\n"
"253:" // Height 6: B setup done
- "cbz x15, 254f\n"
- "ldr q8, [x15, #0x0]\n"
- "ldr q9, [x15, #0x10]\n"
+ "cbz x14, 254f\n"
+ "ldr q8, [x14, #0x0]\n"
+ "ldr q9, [x14, #0x10]\n"
"mov v12.16b, v8.16b\n"
"mov v13.16b, v9.16b\n"
- "ldr q10, [x15, #0x20]\n"
- "ldr q11, [x15, #0x30]\n"
+ "ldr q10, [x14, #0x20]\n"
+ "ldr q11, [x14, #0x30]\n"
"mov v14.16b, v10.16b\n"
"mov v15.16b, v11.16b\n"
"mov v16.16b, v8.16b\n"
"mov v17.16b, v9.16b\n"
- "add x15, x15, #0x40\n"
+ "add x14, x14, #0x40\n"
"mov v18.16b, v10.16b\n"
"mov v19.16b, v11.16b\n"
"mov v20.16b, v8.16b\n"
@@ -4162,310 +4162,310 @@ void a64_ffhybrid_fp16_mla_6x32 (
"b 273f\n"
"254:" // Height 6: no bias
"tbz %x[flags], #0, 272f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
- "add x23, x24, x20, LSL #1\n"
- "add x22, x23, x20, LSL #1\n"
- "cmp x14, #0x20\n"
- "add x21, x22, x20, LSL #1\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "add x22, x23, x19, LSL #1\n"
+ "add x21, x22, x19, LSL #1\n"
+ "cmp x13, #0x20\n"
+ "add x20, x21, x19, LSL #1\n"
"bge 271f\n"
- "tbz x14, #4, 262f\n"
- "ld1 { v8.8h }, [x13], #0x10\n"
- "ld1 { v12.8h }, [x25], #0x10\n"
- "ld1 { v16.8h }, [x24], #0x10\n"
- "ld1 { v20.8h }, [x23], #0x10\n"
- "ld1 { v24.8h }, [x22], #0x10\n"
- "ld1 { v28.8h }, [x21], #0x10\n"
- "ld1 { v9.8h }, [x13], #0x10\n"
- "ld1 { v13.8h }, [x25], #0x10\n"
- "ld1 { v17.8h }, [x24], #0x10\n"
- "ld1 { v21.8h }, [x23], #0x10\n"
- "ld1 { v25.8h }, [x22], #0x10\n"
- "ld1 { v29.8h }, [x21], #0x10\n"
- "tbz x14, #3, 258f\n"
- "ld1 { v10.8h }, [x13], #0x10\n"
- "ld1 { v14.8h }, [x25], #0x10\n"
- "ld1 { v18.8h }, [x24], #0x10\n"
- "ld1 { v22.8h }, [x23], #0x10\n"
- "ld1 { v26.8h }, [x22], #0x10\n"
- "ld1 { v30.8h }, [x21], #0x10\n"
- "tbz x14, #2, 256f\n"
- "ldr d11, [x13], #0x8\n"
- "ldr d15, [x25], #0x8\n"
- "ldr d19, [x24], #0x8\n"
- "ldr d23, [x23], #0x8\n"
- "ldr d27, [x22], #0x8\n"
- "ldr d31, [x21], #0x8\n"
- "tbz x14, #1, 255f\n"
- "ld1 { v11.s }[2], [x13], #0x4\n"
- "ld1 { v15.s }[2], [x25], #0x4\n"
- "mov x20, #0x3c\n"
- "ld1 { v19.s }[2], [x24], #0x4\n"
- "ld1 { v23.s }[2], [x23], #0x4\n"
- "ld1 { v27.s }[2], [x22], #0x4\n"
- "ld1 { v31.s }[2], [x21], #0x4\n"
- "tbz x14, #0, 270f\n"
- "ld1 { v11.h }[6], [x13]\n"
- "ld1 { v15.h }[6], [x25]\n"
- "ld1 { v19.h }[6], [x24]\n"
- "ld1 { v23.h }[6], [x23]\n"
- "ld1 { v27.h }[6], [x22]\n"
- "ld1 { v31.h }[6], [x21]\n"
+ "tbz x13, #4, 262f\n"
+ "ld1 { v8.8h }, [x12], #0x10\n"
+ "ld1 { v12.8h }, [x24], #0x10\n"
+ "ld1 { v16.8h }, [x23], #0x10\n"
+ "ld1 { v20.8h }, [x22], #0x10\n"
+ "ld1 { v24.8h }, [x21], #0x10\n"
+ "ld1 { v28.8h }, [x20], #0x10\n"
+ "ld1 { v9.8h }, [x12], #0x10\n"
+ "ld1 { v13.8h }, [x24], #0x10\n"
+ "ld1 { v17.8h }, [x23], #0x10\n"
+ "ld1 { v21.8h }, [x22], #0x10\n"
+ "ld1 { v25.8h }, [x21], #0x10\n"
+ "ld1 { v29.8h }, [x20], #0x10\n"
+ "tbz x13, #3, 258f\n"
+ "ld1 { v10.8h }, [x12], #0x10\n"
+ "ld1 { v14.8h }, [x24], #0x10\n"
+ "ld1 { v18.8h }, [x23], #0x10\n"
+ "ld1 { v22.8h }, [x22], #0x10\n"
+ "ld1 { v26.8h }, [x21], #0x10\n"
+ "ld1 { v30.8h }, [x20], #0x10\n"
+ "tbz x13, #2, 256f\n"
+ "ldr d11, [x12], #0x8\n"
+ "ldr d15, [x24], #0x8\n"
+ "ldr d19, [x23], #0x8\n"
+ "ldr d23, [x22], #0x8\n"
+ "ldr d27, [x21], #0x8\n"
+ "ldr d31, [x20], #0x8\n"
+ "tbz x13, #1, 255f\n"
+ "ld1 { v11.s }[2], [x12], #0x4\n"
+ "ld1 { v15.s }[2], [x24], #0x4\n"
+ "mov x19, #0x3c\n"
+ "ld1 { v19.s }[2], [x23], #0x4\n"
+ "ld1 { v23.s }[2], [x22], #0x4\n"
+ "ld1 { v27.s }[2], [x21], #0x4\n"
+ "ld1 { v31.s }[2], [x20], #0x4\n"
+ "tbz x13, #0, 270f\n"
+ "ld1 { v11.h }[6], [x12]\n"
+ "ld1 { v15.h }[6], [x24]\n"
+ "ld1 { v19.h }[6], [x23]\n"
+ "ld1 { v23.h }[6], [x22]\n"
+ "ld1 { v27.h }[6], [x21]\n"
+ "ld1 { v31.h }[6], [x20]\n"
"b 270f\n"
"255:" // Height 6: Partial accumulate: partial_1_28
- "mov x20, #0x38\n"
- "tbz x14, #0, 270f\n"
- "ld1 { v11.h }[4], [x13]\n"
- "ld1 { v15.h }[4], [x25]\n"
- "ld1 { v19.h }[4], [x24]\n"
- "ld1 { v23.h }[4], [x23]\n"
- "ld1 { v27.h }[4], [x22]\n"
- "ld1 { v31.h }[4], [x21]\n"
+ "mov x19, #0x38\n"
+ "tbz x13, #0, 270f\n"
+ "ld1 { v11.h }[4], [x12]\n"
+ "ld1 { v15.h }[4], [x24]\n"
+ "ld1 { v19.h }[4], [x23]\n"
+ "ld1 { v23.h }[4], [x22]\n"
+ "ld1 { v27.h }[4], [x21]\n"
+ "ld1 { v31.h }[4], [x20]\n"
"b 270f\n"
"256:" // Height 6: Partial accumulate: partial_2_24
- "tbz x14, #1, 257f\n"
- "ldr s11, [x13], #0x4\n"
- "ldr s15, [x25], #0x4\n"
- "mov x20, #0x34\n"
- "ldr s19, [x24], #0x4\n"
- "ldr s23, [x23], #0x4\n"
- "ldr s27, [x22], #0x4\n"
- "ldr s31, [x21], #0x4\n"
- "tbz x14, #0, 270f\n"
- "ld1 { v11.h }[2], [x13]\n"
- "ld1 { v15.h }[2], [x25]\n"
- "ld1 { v19.h }[2], [x24]\n"
- "ld1 { v23.h }[2], [x23]\n"
- "ld1 { v27.h }[2], [x22]\n"
- "ld1 { v31.h }[2], [x21]\n"
+ "tbz x13, #1, 257f\n"
+ "ldr s11, [x12], #0x4\n"
+ "ldr s15, [x24], #0x4\n"
+ "mov x19, #0x34\n"
+ "ldr s19, [x23], #0x4\n"
+ "ldr s23, [x22], #0x4\n"
+ "ldr s27, [x21], #0x4\n"
+ "ldr s31, [x20], #0x4\n"
+ "tbz x13, #0, 270f\n"
+ "ld1 { v11.h }[2], [x12]\n"
+ "ld1 { v15.h }[2], [x24]\n"
+ "ld1 { v19.h }[2], [x23]\n"
+ "ld1 { v23.h }[2], [x22]\n"
+ "ld1 { v27.h }[2], [x21]\n"
+ "ld1 { v31.h }[2], [x20]\n"
"b 270f\n"
"257:" // Height 6: Partial accumulate: partial_1_24
- "mov x20, #0x30\n"
- "tbz x14, #0, 270f\n"
- "ldr h11, [x13, #0x0]\n"
- "ldr h15, [x25, #0x0]\n"
- "ldr h19, [x24, #0x0]\n"
- "ldr h23, [x23, #0x0]\n"
- "ldr h27, [x22, #0x0]\n"
- "ldr h31, [x21, #0x0]\n"
+ "mov x19, #0x30\n"
+ "tbz x13, #0, 270f\n"
+ "ldr h11, [x12, #0x0]\n"
+ "ldr h15, [x24, #0x0]\n"
+ "ldr h19, [x23, #0x0]\n"
+ "ldr h23, [x22, #0x0]\n"
+ "ldr h27, [x21, #0x0]\n"
+ "ldr h31, [x20, #0x0]\n"
"b 270f\n"
"258:" // Height 6: Partial accumulate: partial_4_16
- "tbz x14, #2, 260f\n"
- "ldr d10, [x13], #0x8\n"
- "ldr d14, [x25], #0x8\n"
- "ldr d18, [x24], #0x8\n"
- "ldr d22, [x23], #0x8\n"
- "ldr d26, [x22], #0x8\n"
- "ldr d30, [x21], #0x8\n"
- "tbz x14, #1, 259f\n"
- "ld1 { v10.s }[2], [x13], #0x4\n"
- "ld1 { v14.s }[2], [x25], #0x4\n"
- "mov x20, #0x2c\n"
- "ld1 { v18.s }[2], [x24], #0x4\n"
- "ld1 { v22.s }[2], [x23], #0x4\n"
- "ld1 { v26.s }[2], [x22], #0x4\n"
- "ld1 { v30.s }[2], [x21], #0x4\n"
- "tbz x14, #0, 270f\n"
- "ld1 { v10.h }[6], [x13]\n"
- "ld1 { v14.h }[6], [x25]\n"
- "ld1 { v18.h }[6], [x24]\n"
- "ld1 { v22.h }[6], [x23]\n"
- "ld1 { v26.h }[6], [x22]\n"
- "ld1 { v30.h }[6], [x21]\n"
+ "tbz x13, #2, 260f\n"
+ "ldr d10, [x12], #0x8\n"
+ "ldr d14, [x24], #0x8\n"
+ "ldr d18, [x23], #0x8\n"
+ "ldr d22, [x22], #0x8\n"
+ "ldr d26, [x21], #0x8\n"
+ "ldr d30, [x20], #0x8\n"
+ "tbz x13, #1, 259f\n"
+ "ld1 { v10.s }[2], [x12], #0x4\n"
+ "ld1 { v14.s }[2], [x24], #0x4\n"
+ "mov x19, #0x2c\n"
+ "ld1 { v18.s }[2], [x23], #0x4\n"
+ "ld1 { v22.s }[2], [x22], #0x4\n"
+ "ld1 { v26.s }[2], [x21], #0x4\n"
+ "ld1 { v30.s }[2], [x20], #0x4\n"
+ "tbz x13, #0, 270f\n"
+ "ld1 { v10.h }[6], [x12]\n"
+ "ld1 { v14.h }[6], [x24]\n"
+ "ld1 { v18.h }[6], [x23]\n"
+ "ld1 { v22.h }[6], [x22]\n"
+ "ld1 { v26.h }[6], [x21]\n"
+ "ld1 { v30.h }[6], [x20]\n"
"b 270f\n"
"259:" // Height 6: Partial accumulate: partial_1_20
- "mov x20, #0x28\n"
- "tbz x14, #0, 270f\n"
- "ld1 { v10.h }[4], [x13]\n"
- "ld1 { v14.h }[4], [x25]\n"
- "ld1 { v18.h }[4], [x24]\n"
- "ld1 { v22.h }[4], [x23]\n"
- "ld1 { v26.h }[4], [x22]\n"
- "ld1 { v30.h }[4], [x21]\n"
+ "mov x19, #0x28\n"
+ "tbz x13, #0, 270f\n"
+ "ld1 { v10.h }[4], [x12]\n"
+ "ld1 { v14.h }[4], [x24]\n"
+ "ld1 { v18.h }[4], [x23]\n"
+ "ld1 { v22.h }[4], [x22]\n"
+ "ld1 { v26.h }[4], [x21]\n"
+ "ld1 { v30.h }[4], [x20]\n"
"b 270f\n"
"260:" // Height 6: Partial accumulate: partial_2_16
- "tbz x14, #1, 261f\n"
- "ldr s10, [x13], #0x4\n"
- "ldr s14, [x25], #0x4\n"
- "mov x20, #0x24\n"
- "ldr s18, [x24], #0x4\n"
- "ldr s22, [x23], #0x4\n"
- "ldr s26, [x22], #0x4\n"
- "ldr s30, [x21], #0x4\n"
- "tbz x14, #0, 270f\n"
- "ld1 { v10.h }[2], [x13]\n"
- "ld1 { v14.h }[2], [x25]\n"
- "ld1 { v18.h }[2], [x24]\n"
- "ld1 { v22.h }[2], [x23]\n"
- "ld1 { v26.h }[2], [x22]\n"
- "ld1 { v30.h }[2], [x21]\n"
+ "tbz x13, #1, 261f\n"
+ "ldr s10, [x12], #0x4\n"
+ "ldr s14, [x24], #0x4\n"
+ "mov x19, #0x24\n"
+ "ldr s18, [x23], #0x4\n"
+ "ldr s22, [x22], #0x4\n"
+ "ldr s26, [x21], #0x4\n"
+ "ldr s30, [x20], #0x4\n"
+ "tbz x13, #0, 270f\n"
+ "ld1 { v10.h }[2], [x12]\n"
+ "ld1 { v14.h }[2], [x24]\n"
+ "ld1 { v18.h }[2], [x23]\n"
+ "ld1 { v22.h }[2], [x22]\n"
+ "ld1 { v26.h }[2], [x21]\n"
+ "ld1 { v30.h }[2], [x20]\n"
"b 270f\n"
"261:" // Height 6: Partial accumulate: partial_1_16
- "mov x20, #0x20\n"
- "tbz x14, #0, 270f\n"
- "ldr h10, [x13, #0x0]\n"
- "ldr h14, [x25, #0x0]\n"
- "ldr h18, [x24, #0x0]\n"
- "ldr h22, [x23, #0x0]\n"
- "ldr h26, [x22, #0x0]\n"
- "ldr h30, [x21, #0x0]\n"
+ "mov x19, #0x20\n"
+ "tbz x13, #0, 270f\n"
+ "ldr h10, [x12, #0x0]\n"
+ "ldr h14, [x24, #0x0]\n"
+ "ldr h18, [x23, #0x0]\n"
+ "ldr h22, [x22, #0x0]\n"
+ "ldr h26, [x21, #0x0]\n"
+ "ldr h30, [x20, #0x0]\n"
"b 270f\n"
"262:" // Height 6: Partial accumulate: partial_8_0
- "tbz x14, #3, 266f\n"
- "ld1 { v8.8h }, [x13], #0x10\n"
- "ld1 { v12.8h }, [x25], #0x10\n"
- "ld1 { v16.8h }, [x24], #0x10\n"
- "ld1 { v20.8h }, [x23], #0x10\n"
- "ld1 { v24.8h }, [x22], #0x10\n"
- "ld1 { v28.8h }, [x21], #0x10\n"
- "tbz x14, #2, 264f\n"
- "ldr d9, [x13], #0x8\n"
- "ldr d13, [x25], #0x8\n"
- "ldr d17, [x24], #0x8\n"
- "ldr d21, [x23], #0x8\n"
- "ldr d25, [x22], #0x8\n"
- "ldr d29, [x21], #0x8\n"
- "tbz x14, #1, 263f\n"
- "ld1 { v9.s }[2], [x13], #0x4\n"
- "ld1 { v13.s }[2], [x25], #0x4\n"
- "mov x20, #0x1c\n"
- "ld1 { v17.s }[2], [x24], #0x4\n"
- "ld1 { v21.s }[2], [x23], #0x4\n"
- "ld1 { v25.s }[2], [x22], #0x4\n"
- "ld1 { v29.s }[2], [x21], #0x4\n"
- "tbz x14, #0, 270f\n"
- "ld1 { v9.h }[6], [x13]\n"
- "ld1 { v13.h }[6], [x25]\n"
- "ld1 { v17.h }[6], [x24]\n"
- "ld1 { v21.h }[6], [x23]\n"
- "ld1 { v25.h }[6], [x22]\n"
- "ld1 { v29.h }[6], [x21]\n"
+ "tbz x13, #3, 266f\n"
+ "ld1 { v8.8h }, [x12], #0x10\n"
+ "ld1 { v12.8h }, [x24], #0x10\n"
+ "ld1 { v16.8h }, [x23], #0x10\n"
+ "ld1 { v20.8h }, [x22], #0x10\n"
+ "ld1 { v24.8h }, [x21], #0x10\n"
+ "ld1 { v28.8h }, [x20], #0x10\n"
+ "tbz x13, #2, 264f\n"
+ "ldr d9, [x12], #0x8\n"
+ "ldr d13, [x24], #0x8\n"
+ "ldr d17, [x23], #0x8\n"
+ "ldr d21, [x22], #0x8\n"
+ "ldr d25, [x21], #0x8\n"
+ "ldr d29, [x20], #0x8\n"
+ "tbz x13, #1, 263f\n"
+ "ld1 { v9.s }[2], [x12], #0x4\n"
+ "ld1 { v13.s }[2], [x24], #0x4\n"
+ "mov x19, #0x1c\n"
+ "ld1 { v17.s }[2], [x23], #0x4\n"
+ "ld1 { v21.s }[2], [x22], #0x4\n"
+ "ld1 { v25.s }[2], [x21], #0x4\n"
+ "ld1 { v29.s }[2], [x20], #0x4\n"
+ "tbz x13, #0, 270f\n"
+ "ld1 { v9.h }[6], [x12]\n"
+ "ld1 { v13.h }[6], [x24]\n"
+ "ld1 { v17.h }[6], [x23]\n"
+ "ld1 { v21.h }[6], [x22]\n"
+ "ld1 { v25.h }[6], [x21]\n"
+ "ld1 { v29.h }[6], [x20]\n"
"b 270f\n"
"263:" // Height 6: Partial accumulate: partial_1_12
- "mov x20, #0x18\n"
- "tbz x14, #0, 270f\n"
- "ld1 { v9.h }[4], [x13]\n"
- "ld1 { v13.h }[4], [x25]\n"
- "ld1 { v17.h }[4], [x24]\n"
- "ld1 { v21.h }[4], [x23]\n"
- "ld1 { v25.h }[4], [x22]\n"
- "ld1 { v29.h }[4], [x21]\n"
+ "mov x19, #0x18\n"
+ "tbz x13, #0, 270f\n"
+ "ld1 { v9.h }[4], [x12]\n"
+ "ld1 { v13.h }[4], [x24]\n"
+ "ld1 { v17.h }[4], [x23]\n"
+ "ld1 { v21.h }[4], [x22]\n"
+ "ld1 { v25.h }[4], [x21]\n"
+ "ld1 { v29.h }[4], [x20]\n"
"b 270f\n"
"264:" // Height 6: Partial accumulate: partial_2_8
- "tbz x14, #1, 265f\n"
- "ldr s9, [x13], #0x4\n"
- "ldr s13, [x25], #0x4\n"
- "mov x20, #0x14\n"
- "ldr s17, [x24], #0x4\n"
- "ldr s21, [x23], #0x4\n"
- "ldr s25, [x22], #0x4\n"
- "ldr s29, [x21], #0x4\n"
- "tbz x14, #0, 270f\n"
- "ld1 { v9.h }[2], [x13]\n"
- "ld1 { v13.h }[2], [x25]\n"
- "ld1 { v17.h }[2], [x24]\n"
- "ld1 { v21.h }[2], [x23]\n"
- "ld1 { v25.h }[2], [x22]\n"
- "ld1 { v29.h }[2], [x21]\n"
+ "tbz x13, #1, 265f\n"
+ "ldr s9, [x12], #0x4\n"
+ "ldr s13, [x24], #0x4\n"
+ "mov x19, #0x14\n"
+ "ldr s17, [x23], #0x4\n"
+ "ldr s21, [x22], #0x4\n"
+ "ldr s25, [x21], #0x4\n"
+ "ldr s29, [x20], #0x4\n"
+ "tbz x13, #0, 270f\n"
+ "ld1 { v9.h }[2], [x12]\n"
+ "ld1 { v13.h }[2], [x24]\n"
+ "ld1 { v17.h }[2], [x23]\n"
+ "ld1 { v21.h }[2], [x22]\n"
+ "ld1 { v25.h }[2], [x21]\n"
+ "ld1 { v29.h }[2], [x20]\n"
"b 270f\n"
"265:" // Height 6: Partial accumulate: partial_1_8
- "mov x20, #0x10\n"
- "tbz x14, #0, 270f\n"
- "ldr h9, [x13, #0x0]\n"
- "ldr h13, [x25, #0x0]\n"
- "ldr h17, [x24, #0x0]\n"
- "ldr h21, [x23, #0x0]\n"
- "ldr h25, [x22, #0x0]\n"
- "ldr h29, [x21, #0x0]\n"
+ "mov x19, #0x10\n"
+ "tbz x13, #0, 270f\n"
+ "ldr h9, [x12, #0x0]\n"
+ "ldr h13, [x24, #0x0]\n"
+ "ldr h17, [x23, #0x0]\n"
+ "ldr h21, [x22, #0x0]\n"
+ "ldr h25, [x21, #0x0]\n"
+ "ldr h29, [x20, #0x0]\n"
"b 270f\n"
"266:" // Height 6: Partial accumulate: partial_4_0
- "tbz x14, #2, 268f\n"
- "ldr d8, [x13], #0x8\n"
- "ldr d12, [x25], #0x8\n"
- "ldr d16, [x24], #0x8\n"
- "ldr d20, [x23], #0x8\n"
- "ldr d24, [x22], #0x8\n"
- "ldr d28, [x21], #0x8\n"
- "tbz x14, #1, 267f\n"
- "ld1 { v8.s }[2], [x13], #0x4\n"
- "ld1 { v12.s }[2], [x25], #0x4\n"
- "mov x20, #0xc\n"
- "ld1 { v16.s }[2], [x24], #0x4\n"
- "ld1 { v20.s }[2], [x23], #0x4\n"
- "ld1 { v24.s }[2], [x22], #0x4\n"
- "ld1 { v28.s }[2], [x21], #0x4\n"
- "tbz x14, #0, 270f\n"
- "ld1 { v8.h }[6], [x13]\n"
- "ld1 { v12.h }[6], [x25]\n"
- "ld1 { v16.h }[6], [x24]\n"
- "ld1 { v20.h }[6], [x23]\n"
- "ld1 { v24.h }[6], [x22]\n"
- "ld1 { v28.h }[6], [x21]\n"
+ "tbz x13, #2, 268f\n"
+ "ldr d8, [x12], #0x8\n"
+ "ldr d12, [x24], #0x8\n"
+ "ldr d16, [x23], #0x8\n"
+ "ldr d20, [x22], #0x8\n"
+ "ldr d24, [x21], #0x8\n"
+ "ldr d28, [x20], #0x8\n"
+ "tbz x13, #1, 267f\n"
+ "ld1 { v8.s }[2], [x12], #0x4\n"
+ "ld1 { v12.s }[2], [x24], #0x4\n"
+ "mov x19, #0xc\n"
+ "ld1 { v16.s }[2], [x23], #0x4\n"
+ "ld1 { v20.s }[2], [x22], #0x4\n"
+ "ld1 { v24.s }[2], [x21], #0x4\n"
+ "ld1 { v28.s }[2], [x20], #0x4\n"
+ "tbz x13, #0, 270f\n"
+ "ld1 { v8.h }[6], [x12]\n"
+ "ld1 { v12.h }[6], [x24]\n"
+ "ld1 { v16.h }[6], [x23]\n"
+ "ld1 { v20.h }[6], [x22]\n"
+ "ld1 { v24.h }[6], [x21]\n"
+ "ld1 { v28.h }[6], [x20]\n"
"b 270f\n"
"267:" // Height 6: Partial accumulate: partial_1_4
- "mov x20, #0x8\n"
- "tbz x14, #0, 270f\n"
- "ld1 { v8.h }[4], [x13]\n"
- "ld1 { v12.h }[4], [x25]\n"
- "ld1 { v16.h }[4], [x24]\n"
- "ld1 { v20.h }[4], [x23]\n"
- "ld1 { v24.h }[4], [x22]\n"
- "ld1 { v28.h }[4], [x21]\n"
+ "mov x19, #0x8\n"
+ "tbz x13, #0, 270f\n"
+ "ld1 { v8.h }[4], [x12]\n"
+ "ld1 { v12.h }[4], [x24]\n"
+ "ld1 { v16.h }[4], [x23]\n"
+ "ld1 { v20.h }[4], [x22]\n"
+ "ld1 { v24.h }[4], [x21]\n"
+ "ld1 { v28.h }[4], [x20]\n"
"b 270f\n"
"268:" // Height 6: Partial accumulate: partial_2_0
- "tbz x14, #1, 269f\n"
- "ldr s8, [x13], #0x4\n"
- "ldr s12, [x25], #0x4\n"
- "mov x20, #0x4\n"
- "ldr s16, [x24], #0x4\n"
- "ldr s20, [x23], #0x4\n"
- "ldr s24, [x22], #0x4\n"
- "ldr s28, [x21], #0x4\n"
- "tbz x14, #0, 270f\n"
- "ld1 { v8.h }[2], [x13]\n"
- "ld1 { v12.h }[2], [x25]\n"
- "ld1 { v16.h }[2], [x24]\n"
- "ld1 { v20.h }[2], [x23]\n"
- "ld1 { v24.h }[2], [x22]\n"
- "ld1 { v28.h }[2], [x21]\n"
+ "tbz x13, #1, 269f\n"
+ "ldr s8, [x12], #0x4\n"
+ "ldr s12, [x24], #0x4\n"
+ "mov x19, #0x4\n"
+ "ldr s16, [x23], #0x4\n"
+ "ldr s20, [x22], #0x4\n"
+ "ldr s24, [x21], #0x4\n"
+ "ldr s28, [x20], #0x4\n"
+ "tbz x13, #0, 270f\n"
+ "ld1 { v8.h }[2], [x12]\n"
+ "ld1 { v12.h }[2], [x24]\n"
+ "ld1 { v16.h }[2], [x23]\n"
+ "ld1 { v20.h }[2], [x22]\n"
+ "ld1 { v24.h }[2], [x21]\n"
+ "ld1 { v28.h }[2], [x20]\n"
"b 270f\n"
"269:" // Height 6: Partial accumulate: partial_1_0
- "ldr h8, [x13, #0x0]\n"
- "ldr h12, [x25, #0x0]\n"
- "mov x20, #0x0\n"
- "ldr h16, [x24, #0x0]\n"
- "ldr h20, [x23, #0x0]\n"
- "ldr h24, [x22, #0x0]\n"
- "ldr h28, [x21, #0x0]\n"
+ "ldr h8, [x12, #0x0]\n"
+ "ldr h12, [x24, #0x0]\n"
+ "mov x19, #0x0\n"
+ "ldr h16, [x23, #0x0]\n"
+ "ldr h20, [x22, #0x0]\n"
+ "ldr h24, [x21, #0x0]\n"
+ "ldr h28, [x20, #0x0]\n"
"270:" // Height 6: Partial accumulate: Done
- "sub x13, x13, x20\n"
+ "sub x12, x12, x19\n"
"b 273f\n"
"271:" // Height 6: full accumulate
- "ldr q8, [x13, #0x0]\n"
- "ldr q9, [x13, #0x10]\n"
- "ldr q10, [x13, #0x20]\n"
- "ldr q11, [x13, #0x30]\n"
- "ldr q12, [x25, #0x0]\n"
- "ldr q13, [x25, #0x10]\n"
- "ldr q14, [x25, #0x20]\n"
- "ldr q15, [x25, #0x30]\n"
- "ldr q16, [x24, #0x0]\n"
- "ldr q17, [x24, #0x10]\n"
- "ldr q18, [x24, #0x20]\n"
- "ldr q19, [x24, #0x30]\n"
- "ldr q20, [x23, #0x0]\n"
- "ldr q21, [x23, #0x10]\n"
- "ldr q22, [x23, #0x20]\n"
- "ldr q23, [x23, #0x30]\n"
- "ldr q24, [x22, #0x0]\n"
- "ldr q25, [x22, #0x10]\n"
- "ldr q26, [x22, #0x20]\n"
- "ldr q27, [x22, #0x30]\n"
- "ldr q28, [x21, #0x0]\n"
- "ldr q29, [x21, #0x10]\n"
- "ldr q30, [x21, #0x20]\n"
- "ldr q31, [x21, #0x30]\n"
+ "ldr q8, [x12, #0x0]\n"
+ "ldr q9, [x12, #0x10]\n"
+ "ldr q10, [x12, #0x20]\n"
+ "ldr q11, [x12, #0x30]\n"
+ "ldr q12, [x24, #0x0]\n"
+ "ldr q13, [x24, #0x10]\n"
+ "ldr q14, [x24, #0x20]\n"
+ "ldr q15, [x24, #0x30]\n"
+ "ldr q16, [x23, #0x0]\n"
+ "ldr q17, [x23, #0x10]\n"
+ "ldr q18, [x23, #0x20]\n"
+ "ldr q19, [x23, #0x30]\n"
+ "ldr q20, [x22, #0x0]\n"
+ "ldr q21, [x22, #0x10]\n"
+ "ldr q22, [x22, #0x20]\n"
+ "ldr q23, [x22, #0x30]\n"
+ "ldr q24, [x21, #0x0]\n"
+ "ldr q25, [x21, #0x10]\n"
+ "ldr q26, [x21, #0x20]\n"
+ "ldr q27, [x21, #0x30]\n"
+ "ldr q28, [x20, #0x0]\n"
+ "ldr q29, [x20, #0x10]\n"
+ "ldr q30, [x20, #0x20]\n"
+ "ldr q31, [x20, #0x30]\n"
"b 273f\n"
"272:" // Height 6: no accumulate
"movi v8.16b, #0x0\n"
@@ -4493,515 +4493,515 @@ void a64_ffhybrid_fp16_mla_6x32 (
"movi v30.16b, #0x0\n"
"movi v31.16b, #0x0\n"
"273:" // Height 6: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"274:" // Height 6: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 275f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "ldr x22, [x21, #0x20]\n"
- "ldr x21, [x21, #0x28]\n"
- "cbnz x28, 276f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #1\n"
- "add x25, x25, x20, LSL #1\n"
- "add x24, x24, x20, LSL #1\n"
- "add x23, x23, x20, LSL #1\n"
- "add x22, x22, x20, LSL #1\n"
- "add x21, x21, x20, LSL #1\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "ldr x21, [x20, #0x20]\n"
+ "ldr x20, [x20, #0x28]\n"
+ "cbnz x27, 276f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
+ "add x24, x24, x19, LSL #1\n"
+ "add x23, x23, x19, LSL #1\n"
+ "add x22, x22, x19, LSL #1\n"
+ "add x21, x21, x19, LSL #1\n"
+ "add x20, x20, x19, LSL #1\n"
"b 276f\n"
"275:" // Height 6: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
- "add x23, x24, x20, LSL #1\n"
- "add x22, x23, x20, LSL #1\n"
- "add x21, x22, x20, LSL #1\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "add x22, x23, x19, LSL #1\n"
+ "add x21, x22, x19, LSL #1\n"
+ "add x20, x21, x19, LSL #1\n"
"276:" // Height 6: input setup done
- "cmp x27, #0x8\n"
+ "cmp x26, #0x8\n"
"blt 279f\n"
- "ldr q0, [x26, #0x0]\n"
- "ldr q1, [x25, #0x0]\n"
- "cmp x27, #0x10\n"
- "ldr q2, [x24, #0x0]\n"
- "ldr q3, [x23, #0x0]\n"
- "ldr q4, [x22, #0x0]\n"
- "ldr q5, [x21, #0x0]\n"
- "ldr q6, [x12, #0x0]\n"
- "ldr q7, [x11, #0x0]\n"
+ "ldr q0, [x25, #0x0]\n"
+ "ldr q1, [x24, #0x0]\n"
+ "cmp x26, #0x10\n"
+ "ldr q2, [x23, #0x0]\n"
+ "ldr q3, [x22, #0x0]\n"
+ "ldr q4, [x21, #0x0]\n"
+ "ldr q5, [x20, #0x0]\n"
+ "ldr q6, [x11, #0x0]\n"
+ "ldr q7, [x10, #0x0]\n"
"blt 278f\n"
"277:" // Height 6: Multiply loop: Main loop head
"fmla v8.8h, v6.8h, v0.h[0]\n"
"fmla v12.8h, v6.8h, v1.h[0]\n"
- "sub x27, x27, #0x8\n"
- "cmp x27, #0x10\n"
+ "sub x26, x26, #0x8\n"
+ "cmp x26, #0x10\n"
"fmla v16.8h, v6.8h, v2.h[0]\n"
"fmla v20.8h, v6.8h, v3.h[0]\n"
- "add x26, x26, #0x10\n"
"add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
"fmla v24.8h, v6.8h, v4.h[0]\n"
"fmla v28.8h, v6.8h, v5.h[0]\n"
- "ldr q6, [x10, #0x0]\n"
- "add x24, x24, #0x10\n"
+ "ldr q6, [x9, #0x0]\n"
+ "add x23, x23, #0x10\n"
"fmla v9.8h, v7.8h, v0.h[0]\n"
"fmla v13.8h, v7.8h, v1.h[0]\n"
- "add x23, x23, #0x10\n"
"add x22, x22, #0x10\n"
+ "add x21, x21, #0x10\n"
"fmla v17.8h, v7.8h, v2.h[0]\n"
"fmla v21.8h, v7.8h, v3.h[0]\n"
- "add x21, x21, #0x10\n"
+ "add x20, x20, #0x10\n"
"fmla v25.8h, v7.8h, v4.h[0]\n"
"fmla v29.8h, v7.8h, v5.h[0]\n"
- "ldr q7, [x9, #0x0]\n"
+ "ldr q7, [x28, #0x0]\n"
"fmla v10.8h, v6.8h, v0.h[0]\n"
"fmla v14.8h, v6.8h, v1.h[0]\n"
"fmla v18.8h, v6.8h, v2.h[0]\n"
"fmla v22.8h, v6.8h, v3.h[0]\n"
"fmla v26.8h, v6.8h, v4.h[0]\n"
"fmla v30.8h, v6.8h, v5.h[0]\n"
- "ldr q6, [x12, #0x10]\n"
+ "ldr q6, [x11, #0x10]\n"
"fmla v11.8h, v7.8h, v0.h[0]\n"
"fmla v15.8h, v7.8h, v1.h[0]\n"
"fmla v19.8h, v7.8h, v2.h[0]\n"
"fmla v23.8h, v7.8h, v3.h[0]\n"
"fmla v27.8h, v7.8h, v4.h[0]\n"
"fmla v31.8h, v7.8h, v5.h[0]\n"
- "ldr q7, [x11, #0x10]\n"
+ "ldr q7, [x10, #0x10]\n"
"fmla v8.8h, v6.8h, v0.h[1]\n"
"fmla v12.8h, v6.8h, v1.h[1]\n"
"fmla v16.8h, v6.8h, v2.h[1]\n"
"fmla v20.8h, v6.8h, v3.h[1]\n"
"fmla v24.8h, v6.8h, v4.h[1]\n"
"fmla v28.8h, v6.8h, v5.h[1]\n"
- "ldr q6, [x10, #0x10]\n"
+ "ldr q6, [x9, #0x10]\n"
"fmla v9.8h, v7.8h, v0.h[1]\n"
"fmla v13.8h, v7.8h, v1.h[1]\n"
"fmla v17.8h, v7.8h, v2.h[1]\n"
"fmla v21.8h, v7.8h, v3.h[1]\n"
"fmla v25.8h, v7.8h, v4.h[1]\n"
"fmla v29.8h, v7.8h, v5.h[1]\n"
- "ldr q7, [x9, #0x10]\n"
+ "ldr q7, [x28, #0x10]\n"
"fmla v10.8h, v6.8h, v0.h[1]\n"
"fmla v14.8h, v6.8h, v1.h[1]\n"
"fmla v18.8h, v6.8h, v2.h[1]\n"
"fmla v22.8h, v6.8h, v3.h[1]\n"
"fmla v26.8h, v6.8h, v4.h[1]\n"
"fmla v30.8h, v6.8h, v5.h[1]\n"
- "ldr q6, [x12, #0x20]\n"
+ "ldr q6, [x11, #0x20]\n"
"fmla v11.8h, v7.8h, v0.h[1]\n"
"fmla v15.8h, v7.8h, v1.h[1]\n"
"fmla v19.8h, v7.8h, v2.h[1]\n"
"fmla v23.8h, v7.8h, v3.h[1]\n"
"fmla v27.8h, v7.8h, v4.h[1]\n"
"fmla v31.8h, v7.8h, v5.h[1]\n"
- "ldr q7, [x11, #0x20]\n"
+ "ldr q7, [x10, #0x20]\n"
"fmla v8.8h, v6.8h, v0.h[2]\n"
"fmla v12.8h, v6.8h, v1.h[2]\n"
"fmla v16.8h, v6.8h, v2.h[2]\n"
"fmla v20.8h, v6.8h, v3.h[2]\n"
"fmla v24.8h, v6.8h, v4.h[2]\n"
"fmla v28.8h, v6.8h, v5.h[2]\n"
- "ldr q6, [x10, #0x20]\n"
+ "ldr q6, [x9, #0x20]\n"
"fmla v9.8h, v7.8h, v0.h[2]\n"
"fmla v13.8h, v7.8h, v1.h[2]\n"
"fmla v17.8h, v7.8h, v2.h[2]\n"
"fmla v21.8h, v7.8h, v3.h[2]\n"
"fmla v25.8h, v7.8h, v4.h[2]\n"
"fmla v29.8h, v7.8h, v5.h[2]\n"
- "ldr q7, [x9, #0x20]\n"
+ "ldr q7, [x28, #0x20]\n"
"fmla v10.8h, v6.8h, v0.h[2]\n"
"fmla v14.8h, v6.8h, v1.h[2]\n"
"fmla v18.8h, v6.8h, v2.h[2]\n"
"fmla v22.8h, v6.8h, v3.h[2]\n"
"fmla v26.8h, v6.8h, v4.h[2]\n"
"fmla v30.8h, v6.8h, v5.h[2]\n"
- "ldr q6, [x12, #0x30]\n"
+ "ldr q6, [x11, #0x30]\n"
"fmla v11.8h, v7.8h, v0.h[2]\n"
"fmla v15.8h, v7.8h, v1.h[2]\n"
"fmla v19.8h, v7.8h, v2.h[2]\n"
"fmla v23.8h, v7.8h, v3.h[2]\n"
"fmla v27.8h, v7.8h, v4.h[2]\n"
"fmla v31.8h, v7.8h, v5.h[2]\n"
- "ldr q7, [x11, #0x30]\n"
+ "ldr q7, [x10, #0x30]\n"
"fmla v8.8h, v6.8h, v0.h[3]\n"
"fmla v12.8h, v6.8h, v1.h[3]\n"
"fmla v16.8h, v6.8h, v2.h[3]\n"
"fmla v20.8h, v6.8h, v3.h[3]\n"
"fmla v24.8h, v6.8h, v4.h[3]\n"
"fmla v28.8h, v6.8h, v5.h[3]\n"
- "ldr q6, [x10, #0x30]\n"
+ "ldr q6, [x9, #0x30]\n"
"fmla v9.8h, v7.8h, v0.h[3]\n"
"fmla v13.8h, v7.8h, v1.h[3]\n"
"fmla v17.8h, v7.8h, v2.h[3]\n"
"fmla v21.8h, v7.8h, v3.h[3]\n"
"fmla v25.8h, v7.8h, v4.h[3]\n"
"fmla v29.8h, v7.8h, v5.h[3]\n"
- "ldr q7, [x9, #0x30]\n"
+ "ldr q7, [x28, #0x30]\n"
"fmla v10.8h, v6.8h, v0.h[3]\n"
"fmla v14.8h, v6.8h, v1.h[3]\n"
"fmla v18.8h, v6.8h, v2.h[3]\n"
"fmla v22.8h, v6.8h, v3.h[3]\n"
"fmla v26.8h, v6.8h, v4.h[3]\n"
"fmla v30.8h, v6.8h, v5.h[3]\n"
- "ldr q6, [x12, #0x40]\n"
+ "ldr q6, [x11, #0x40]\n"
"fmla v11.8h, v7.8h, v0.h[3]\n"
"fmla v15.8h, v7.8h, v1.h[3]\n"
"fmla v19.8h, v7.8h, v2.h[3]\n"
"fmla v23.8h, v7.8h, v3.h[3]\n"
"fmla v27.8h, v7.8h, v4.h[3]\n"
"fmla v31.8h, v7.8h, v5.h[3]\n"
- "ldr q7, [x11, #0x40]\n"
+ "ldr q7, [x10, #0x40]\n"
"fmla v8.8h, v6.8h, v0.h[4]\n"
"fmla v12.8h, v6.8h, v1.h[4]\n"
"fmla v16.8h, v6.8h, v2.h[4]\n"
"fmla v20.8h, v6.8h, v3.h[4]\n"
"fmla v24.8h, v6.8h, v4.h[4]\n"
"fmla v28.8h, v6.8h, v5.h[4]\n"
- "ldr q6, [x10, #0x40]\n"
+ "ldr q6, [x9, #0x40]\n"
"fmla v9.8h, v7.8h, v0.h[4]\n"
"fmla v13.8h, v7.8h, v1.h[4]\n"
"fmla v17.8h, v7.8h, v2.h[4]\n"
"fmla v21.8h, v7.8h, v3.h[4]\n"
"fmla v25.8h, v7.8h, v4.h[4]\n"
"fmla v29.8h, v7.8h, v5.h[4]\n"
- "ldr q7, [x9, #0x40]\n"
+ "ldr q7, [x28, #0x40]\n"
"fmla v10.8h, v6.8h, v0.h[4]\n"
"fmla v14.8h, v6.8h, v1.h[4]\n"
"fmla v18.8h, v6.8h, v2.h[4]\n"
"fmla v22.8h, v6.8h, v3.h[4]\n"
"fmla v26.8h, v6.8h, v4.h[4]\n"
"fmla v30.8h, v6.8h, v5.h[4]\n"
- "ldr q6, [x12, #0x50]\n"
+ "ldr q6, [x11, #0x50]\n"
"fmla v11.8h, v7.8h, v0.h[4]\n"
"fmla v15.8h, v7.8h, v1.h[4]\n"
"fmla v19.8h, v7.8h, v2.h[4]\n"
"fmla v23.8h, v7.8h, v3.h[4]\n"
"fmla v27.8h, v7.8h, v4.h[4]\n"
"fmla v31.8h, v7.8h, v5.h[4]\n"
- "ldr q7, [x11, #0x50]\n"
+ "ldr q7, [x10, #0x50]\n"
"fmla v8.8h, v6.8h, v0.h[5]\n"
"fmla v12.8h, v6.8h, v1.h[5]\n"
"fmla v16.8h, v6.8h, v2.h[5]\n"
"fmla v20.8h, v6.8h, v3.h[5]\n"
"fmla v24.8h, v6.8h, v4.h[5]\n"
"fmla v28.8h, v6.8h, v5.h[5]\n"
- "ldr q6, [x10, #0x50]\n"
+ "ldr q6, [x9, #0x50]\n"
"fmla v9.8h, v7.8h, v0.h[5]\n"
"fmla v13.8h, v7.8h, v1.h[5]\n"
"fmla v17.8h, v7.8h, v2.h[5]\n"
"fmla v21.8h, v7.8h, v3.h[5]\n"
"fmla v25.8h, v7.8h, v4.h[5]\n"
"fmla v29.8h, v7.8h, v5.h[5]\n"
- "ldr q7, [x9, #0x50]\n"
+ "ldr q7, [x28, #0x50]\n"
"fmla v10.8h, v6.8h, v0.h[5]\n"
"fmla v14.8h, v6.8h, v1.h[5]\n"
"fmla v18.8h, v6.8h, v2.h[5]\n"
"fmla v22.8h, v6.8h, v3.h[5]\n"
"fmla v26.8h, v6.8h, v4.h[5]\n"
"fmla v30.8h, v6.8h, v5.h[5]\n"
- "ldr q6, [x12, #0x60]\n"
+ "ldr q6, [x11, #0x60]\n"
"fmla v11.8h, v7.8h, v0.h[5]\n"
"fmla v15.8h, v7.8h, v1.h[5]\n"
"fmla v19.8h, v7.8h, v2.h[5]\n"
"fmla v23.8h, v7.8h, v3.h[5]\n"
"fmla v27.8h, v7.8h, v4.h[5]\n"
"fmla v31.8h, v7.8h, v5.h[5]\n"
- "ldr q7, [x11, #0x60]\n"
+ "ldr q7, [x10, #0x60]\n"
"fmla v8.8h, v6.8h, v0.h[6]\n"
"fmla v12.8h, v6.8h, v1.h[6]\n"
"fmla v16.8h, v6.8h, v2.h[6]\n"
"fmla v20.8h, v6.8h, v3.h[6]\n"
"fmla v24.8h, v6.8h, v4.h[6]\n"
"fmla v28.8h, v6.8h, v5.h[6]\n"
- "ldr q6, [x10, #0x60]\n"
+ "ldr q6, [x9, #0x60]\n"
"fmla v9.8h, v7.8h, v0.h[6]\n"
"fmla v13.8h, v7.8h, v1.h[6]\n"
"fmla v17.8h, v7.8h, v2.h[6]\n"
"fmla v21.8h, v7.8h, v3.h[6]\n"
"fmla v25.8h, v7.8h, v4.h[6]\n"
"fmla v29.8h, v7.8h, v5.h[6]\n"
- "ldr q7, [x9, #0x60]\n"
+ "ldr q7, [x28, #0x60]\n"
"fmla v10.8h, v6.8h, v0.h[6]\n"
"fmla v14.8h, v6.8h, v1.h[6]\n"
"fmla v18.8h, v6.8h, v2.h[6]\n"
"fmla v22.8h, v6.8h, v3.h[6]\n"
"fmla v26.8h, v6.8h, v4.h[6]\n"
"fmla v30.8h, v6.8h, v5.h[6]\n"
- "ldr q6, [x12, #0x70]\n"
- "add x12, x12, #0x80\n"
+ "ldr q6, [x11, #0x70]\n"
+ "add x11, x11, #0x80\n"
"fmla v11.8h, v7.8h, v0.h[6]\n"
"fmla v15.8h, v7.8h, v1.h[6]\n"
"fmla v19.8h, v7.8h, v2.h[6]\n"
"fmla v23.8h, v7.8h, v3.h[6]\n"
"fmla v27.8h, v7.8h, v4.h[6]\n"
"fmla v31.8h, v7.8h, v5.h[6]\n"
- "ldr q7, [x11, #0x70]\n"
- "add x11, x11, #0x80\n"
+ "ldr q7, [x10, #0x70]\n"
+ "add x10, x10, #0x80\n"
"fmla v8.8h, v6.8h, v0.h[7]\n"
"fmla v12.8h, v6.8h, v1.h[7]\n"
"fmla v16.8h, v6.8h, v2.h[7]\n"
"fmla v20.8h, v6.8h, v3.h[7]\n"
"fmla v24.8h, v6.8h, v4.h[7]\n"
"fmla v28.8h, v6.8h, v5.h[7]\n"
- "ldr q6, [x10, #0x70]\n"
- "add x10, x10, #0x80\n"
+ "ldr q6, [x9, #0x70]\n"
+ "add x9, x9, #0x80\n"
"fmla v9.8h, v7.8h, v0.h[7]\n"
"fmla v13.8h, v7.8h, v1.h[7]\n"
"fmla v17.8h, v7.8h, v2.h[7]\n"
"fmla v21.8h, v7.8h, v3.h[7]\n"
"fmla v25.8h, v7.8h, v4.h[7]\n"
"fmla v29.8h, v7.8h, v5.h[7]\n"
- "ldr q7, [x9, #0x70]\n"
- "add x9, x9, #0x80\n"
+ "ldr q7, [x28, #0x70]\n"
+ "add x28, x28, #0x80\n"
"fmla v10.8h, v6.8h, v0.h[7]\n"
"fmla v14.8h, v6.8h, v1.h[7]\n"
"fmla v18.8h, v6.8h, v2.h[7]\n"
"fmla v22.8h, v6.8h, v3.h[7]\n"
"fmla v26.8h, v6.8h, v4.h[7]\n"
"fmla v30.8h, v6.8h, v5.h[7]\n"
- "ldr q6, [x12, #0x0]\n"
+ "ldr q6, [x11, #0x0]\n"
"fmla v11.8h, v7.8h, v0.h[7]\n"
- "ldr q0, [x26, #0x0]\n"
+ "ldr q0, [x25, #0x0]\n"
"fmla v15.8h, v7.8h, v1.h[7]\n"
- "ldr q1, [x25, #0x0]\n"
+ "ldr q1, [x24, #0x0]\n"
"fmla v19.8h, v7.8h, v2.h[7]\n"
- "ldr q2, [x24, #0x0]\n"
+ "ldr q2, [x23, #0x0]\n"
"fmla v23.8h, v7.8h, v3.h[7]\n"
- "ldr q3, [x23, #0x0]\n"
+ "ldr q3, [x22, #0x0]\n"
"fmla v27.8h, v7.8h, v4.h[7]\n"
- "ldr q4, [x22, #0x0]\n"
+ "ldr q4, [x21, #0x0]\n"
"fmla v31.8h, v7.8h, v5.h[7]\n"
- "ldr q5, [x21, #0x0]\n"
- "ldr q7, [x11, #0x0]\n"
+ "ldr q5, [x20, #0x0]\n"
+ "ldr q7, [x10, #0x0]\n"
"bge 277b\n"
"278:" // Height 6: Multiply loop: Single iteration only
"fmla v8.8h, v6.8h, v0.h[0]\n"
"fmla v12.8h, v6.8h, v1.h[0]\n"
- "sub x27, x27, #0x8\n"
- "add x26, x26, #0x10\n"
+ "sub x26, x26, #0x8\n"
+ "add x25, x25, #0x10\n"
"fmla v16.8h, v6.8h, v2.h[0]\n"
"fmla v20.8h, v6.8h, v3.h[0]\n"
- "add x25, x25, #0x10\n"
"add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
"fmla v24.8h, v6.8h, v4.h[0]\n"
"fmla v28.8h, v6.8h, v5.h[0]\n"
- "ldr q6, [x10, #0x0]\n"
- "add x23, x23, #0x10\n"
+ "ldr q6, [x9, #0x0]\n"
+ "add x22, x22, #0x10\n"
"fmla v9.8h, v7.8h, v0.h[0]\n"
"fmla v13.8h, v7.8h, v1.h[0]\n"
- "add x22, x22, #0x10\n"
"add x21, x21, #0x10\n"
+ "add x20, x20, #0x10\n"
"fmla v17.8h, v7.8h, v2.h[0]\n"
"fmla v21.8h, v7.8h, v3.h[0]\n"
"fmla v25.8h, v7.8h, v4.h[0]\n"
"fmla v29.8h, v7.8h, v5.h[0]\n"
- "ldr q7, [x9, #0x0]\n"
+ "ldr q7, [x28, #0x0]\n"
"fmla v10.8h, v6.8h, v0.h[0]\n"
"fmla v14.8h, v6.8h, v1.h[0]\n"
"fmla v18.8h, v6.8h, v2.h[0]\n"
"fmla v22.8h, v6.8h, v3.h[0]\n"
"fmla v26.8h, v6.8h, v4.h[0]\n"
"fmla v30.8h, v6.8h, v5.h[0]\n"
- "ldr q6, [x12, #0x10]\n"
+ "ldr q6, [x11, #0x10]\n"
"fmla v11.8h, v7.8h, v0.h[0]\n"
"fmla v15.8h, v7.8h, v1.h[0]\n"
"fmla v19.8h, v7.8h, v2.h[0]\n"
"fmla v23.8h, v7.8h, v3.h[0]\n"
"fmla v27.8h, v7.8h, v4.h[0]\n"
"fmla v31.8h, v7.8h, v5.h[0]\n"
- "ldr q7, [x11, #0x10]\n"
+ "ldr q7, [x10, #0x10]\n"
"fmla v8.8h, v6.8h, v0.h[1]\n"
"fmla v12.8h, v6.8h, v1.h[1]\n"
"fmla v16.8h, v6.8h, v2.h[1]\n"
"fmla v20.8h, v6.8h, v3.h[1]\n"
"fmla v24.8h, v6.8h, v4.h[1]\n"
"fmla v28.8h, v6.8h, v5.h[1]\n"
- "ldr q6, [x10, #0x10]\n"
+ "ldr q6, [x9, #0x10]\n"
"fmla v9.8h, v7.8h, v0.h[1]\n"
"fmla v13.8h, v7.8h, v1.h[1]\n"
"fmla v17.8h, v7.8h, v2.h[1]\n"
"fmla v21.8h, v7.8h, v3.h[1]\n"
"fmla v25.8h, v7.8h, v4.h[1]\n"
"fmla v29.8h, v7.8h, v5.h[1]\n"
- "ldr q7, [x9, #0x10]\n"
+ "ldr q7, [x28, #0x10]\n"
"fmla v10.8h, v6.8h, v0.h[1]\n"
"fmla v14.8h, v6.8h, v1.h[1]\n"
"fmla v18.8h, v6.8h, v2.h[1]\n"
"fmla v22.8h, v6.8h, v3.h[1]\n"
"fmla v26.8h, v6.8h, v4.h[1]\n"
"fmla v30.8h, v6.8h, v5.h[1]\n"
- "ldr q6, [x12, #0x20]\n"
+ "ldr q6, [x11, #0x20]\n"
"fmla v11.8h, v7.8h, v0.h[1]\n"
"fmla v15.8h, v7.8h, v1.h[1]\n"
"fmla v19.8h, v7.8h, v2.h[1]\n"
"fmla v23.8h, v7.8h, v3.h[1]\n"
"fmla v27.8h, v7.8h, v4.h[1]\n"
"fmla v31.8h, v7.8h, v5.h[1]\n"
- "ldr q7, [x11, #0x20]\n"
+ "ldr q7, [x10, #0x20]\n"
"fmla v8.8h, v6.8h, v0.h[2]\n"
"fmla v12.8h, v6.8h, v1.h[2]\n"
"fmla v16.8h, v6.8h, v2.h[2]\n"
"fmla v20.8h, v6.8h, v3.h[2]\n"
"fmla v24.8h, v6.8h, v4.h[2]\n"
"fmla v28.8h, v6.8h, v5.h[2]\n"
- "ldr q6, [x10, #0x20]\n"
+ "ldr q6, [x9, #0x20]\n"
"fmla v9.8h, v7.8h, v0.h[2]\n"
"fmla v13.8h, v7.8h, v1.h[2]\n"
"fmla v17.8h, v7.8h, v2.h[2]\n"
"fmla v21.8h, v7.8h, v3.h[2]\n"
"fmla v25.8h, v7.8h, v4.h[2]\n"
"fmla v29.8h, v7.8h, v5.h[2]\n"
- "ldr q7, [x9, #0x20]\n"
+ "ldr q7, [x28, #0x20]\n"
"fmla v10.8h, v6.8h, v0.h[2]\n"
"fmla v14.8h, v6.8h, v1.h[2]\n"
"fmla v18.8h, v6.8h, v2.h[2]\n"
"fmla v22.8h, v6.8h, v3.h[2]\n"
"fmla v26.8h, v6.8h, v4.h[2]\n"
"fmla v30.8h, v6.8h, v5.h[2]\n"
- "ldr q6, [x12, #0x30]\n"
+ "ldr q6, [x11, #0x30]\n"
"fmla v11.8h, v7.8h, v0.h[2]\n"
"fmla v15.8h, v7.8h, v1.h[2]\n"
"fmla v19.8h, v7.8h, v2.h[2]\n"
"fmla v23.8h, v7.8h, v3.h[2]\n"
"fmla v27.8h, v7.8h, v4.h[2]\n"
"fmla v31.8h, v7.8h, v5.h[2]\n"
- "ldr q7, [x11, #0x30]\n"
+ "ldr q7, [x10, #0x30]\n"
"fmla v8.8h, v6.8h, v0.h[3]\n"
"fmla v12.8h, v6.8h, v1.h[3]\n"
"fmla v16.8h, v6.8h, v2.h[3]\n"
"fmla v20.8h, v6.8h, v3.h[3]\n"
"fmla v24.8h, v6.8h, v4.h[3]\n"
"fmla v28.8h, v6.8h, v5.h[3]\n"
- "ldr q6, [x10, #0x30]\n"
+ "ldr q6, [x9, #0x30]\n"
"fmla v9.8h, v7.8h, v0.h[3]\n"
"fmla v13.8h, v7.8h, v1.h[3]\n"
"fmla v17.8h, v7.8h, v2.h[3]\n"
"fmla v21.8h, v7.8h, v3.h[3]\n"
"fmla v25.8h, v7.8h, v4.h[3]\n"
"fmla v29.8h, v7.8h, v5.h[3]\n"
- "ldr q7, [x9, #0x30]\n"
+ "ldr q7, [x28, #0x30]\n"
"fmla v10.8h, v6.8h, v0.h[3]\n"
"fmla v14.8h, v6.8h, v1.h[3]\n"
"fmla v18.8h, v6.8h, v2.h[3]\n"
"fmla v22.8h, v6.8h, v3.h[3]\n"
"fmla v26.8h, v6.8h, v4.h[3]\n"
"fmla v30.8h, v6.8h, v5.h[3]\n"
- "ldr q6, [x12, #0x40]\n"
+ "ldr q6, [x11, #0x40]\n"
"fmla v11.8h, v7.8h, v0.h[3]\n"
"fmla v15.8h, v7.8h, v1.h[3]\n"
"fmla v19.8h, v7.8h, v2.h[3]\n"
"fmla v23.8h, v7.8h, v3.h[3]\n"
"fmla v27.8h, v7.8h, v4.h[3]\n"
"fmla v31.8h, v7.8h, v5.h[3]\n"
- "ldr q7, [x11, #0x40]\n"
+ "ldr q7, [x10, #0x40]\n"
"fmla v8.8h, v6.8h, v0.h[4]\n"
"fmla v12.8h, v6.8h, v1.h[4]\n"
"fmla v16.8h, v6.8h, v2.h[4]\n"
"fmla v20.8h, v6.8h, v3.h[4]\n"
"fmla v24.8h, v6.8h, v4.h[4]\n"
"fmla v28.8h, v6.8h, v5.h[4]\n"
- "ldr q6, [x10, #0x40]\n"
+ "ldr q6, [x9, #0x40]\n"
"fmla v9.8h, v7.8h, v0.h[4]\n"
"fmla v13.8h, v7.8h, v1.h[4]\n"
"fmla v17.8h, v7.8h, v2.h[4]\n"
"fmla v21.8h, v7.8h, v3.h[4]\n"
"fmla v25.8h, v7.8h, v4.h[4]\n"
"fmla v29.8h, v7.8h, v5.h[4]\n"
- "ldr q7, [x9, #0x40]\n"
+ "ldr q7, [x28, #0x40]\n"
"fmla v10.8h, v6.8h, v0.h[4]\n"
"fmla v14.8h, v6.8h, v1.h[4]\n"
"fmla v18.8h, v6.8h, v2.h[4]\n"
"fmla v22.8h, v6.8h, v3.h[4]\n"
"fmla v26.8h, v6.8h, v4.h[4]\n"
"fmla v30.8h, v6.8h, v5.h[4]\n"
- "ldr q6, [x12, #0x50]\n"
+ "ldr q6, [x11, #0x50]\n"
"fmla v11.8h, v7.8h, v0.h[4]\n"
"fmla v15.8h, v7.8h, v1.h[4]\n"
"fmla v19.8h, v7.8h, v2.h[4]\n"
"fmla v23.8h, v7.8h, v3.h[4]\n"
"fmla v27.8h, v7.8h, v4.h[4]\n"
"fmla v31.8h, v7.8h, v5.h[4]\n"
- "ldr q7, [x11, #0x50]\n"
+ "ldr q7, [x10, #0x50]\n"
"fmla v8.8h, v6.8h, v0.h[5]\n"
"fmla v12.8h, v6.8h, v1.h[5]\n"
"fmla v16.8h, v6.8h, v2.h[5]\n"
"fmla v20.8h, v6.8h, v3.h[5]\n"
"fmla v24.8h, v6.8h, v4.h[5]\n"
"fmla v28.8h, v6.8h, v5.h[5]\n"
- "ldr q6, [x10, #0x50]\n"
+ "ldr q6, [x9, #0x50]\n"
"fmla v9.8h, v7.8h, v0.h[5]\n"
"fmla v13.8h, v7.8h, v1.h[5]\n"
"fmla v17.8h, v7.8h, v2.h[5]\n"
"fmla v21.8h, v7.8h, v3.h[5]\n"
"fmla v25.8h, v7.8h, v4.h[5]\n"
"fmla v29.8h, v7.8h, v5.h[5]\n"
- "ldr q7, [x9, #0x50]\n"
+ "ldr q7, [x28, #0x50]\n"
"fmla v10.8h, v6.8h, v0.h[5]\n"
"fmla v14.8h, v6.8h, v1.h[5]\n"
"fmla v18.8h, v6.8h, v2.h[5]\n"
"fmla v22.8h, v6.8h, v3.h[5]\n"
"fmla v26.8h, v6.8h, v4.h[5]\n"
"fmla v30.8h, v6.8h, v5.h[5]\n"
- "ldr q6, [x12, #0x60]\n"
+ "ldr q6, [x11, #0x60]\n"
"fmla v11.8h, v7.8h, v0.h[5]\n"
"fmla v15.8h, v7.8h, v1.h[5]\n"
"fmla v19.8h, v7.8h, v2.h[5]\n"
"fmla v23.8h, v7.8h, v3.h[5]\n"
"fmla v27.8h, v7.8h, v4.h[5]\n"
"fmla v31.8h, v7.8h, v5.h[5]\n"
- "ldr q7, [x11, #0x60]\n"
+ "ldr q7, [x10, #0x60]\n"
"fmla v8.8h, v6.8h, v0.h[6]\n"
"fmla v12.8h, v6.8h, v1.h[6]\n"
"fmla v16.8h, v6.8h, v2.h[6]\n"
"fmla v20.8h, v6.8h, v3.h[6]\n"
"fmla v24.8h, v6.8h, v4.h[6]\n"
"fmla v28.8h, v6.8h, v5.h[6]\n"
- "ldr q6, [x10, #0x60]\n"
+ "ldr q6, [x9, #0x60]\n"
"fmla v9.8h, v7.8h, v0.h[6]\n"
"fmla v13.8h, v7.8h, v1.h[6]\n"
"fmla v17.8h, v7.8h, v2.h[6]\n"
"fmla v21.8h, v7.8h, v3.h[6]\n"
"fmla v25.8h, v7.8h, v4.h[6]\n"
"fmla v29.8h, v7.8h, v5.h[6]\n"
- "ldr q7, [x9, #0x60]\n"
+ "ldr q7, [x28, #0x60]\n"
"fmla v10.8h, v6.8h, v0.h[6]\n"
"fmla v14.8h, v6.8h, v1.h[6]\n"
"fmla v18.8h, v6.8h, v2.h[6]\n"
"fmla v22.8h, v6.8h, v3.h[6]\n"
"fmla v26.8h, v6.8h, v4.h[6]\n"
"fmla v30.8h, v6.8h, v5.h[6]\n"
- "ldr q6, [x12, #0x70]\n"
- "add x12, x12, #0x80\n"
+ "ldr q6, [x11, #0x70]\n"
+ "add x11, x11, #0x80\n"
"fmla v11.8h, v7.8h, v0.h[6]\n"
"fmla v15.8h, v7.8h, v1.h[6]\n"
"fmla v19.8h, v7.8h, v2.h[6]\n"
"fmla v23.8h, v7.8h, v3.h[6]\n"
"fmla v27.8h, v7.8h, v4.h[6]\n"
"fmla v31.8h, v7.8h, v5.h[6]\n"
- "ldr q7, [x11, #0x70]\n"
- "add x11, x11, #0x80\n"
+ "ldr q7, [x10, #0x70]\n"
+ "add x10, x10, #0x80\n"
"fmla v8.8h, v6.8h, v0.h[7]\n"
"fmla v12.8h, v6.8h, v1.h[7]\n"
"fmla v16.8h, v6.8h, v2.h[7]\n"
"fmla v20.8h, v6.8h, v3.h[7]\n"
"fmla v24.8h, v6.8h, v4.h[7]\n"
"fmla v28.8h, v6.8h, v5.h[7]\n"
- "ldr q6, [x10, #0x70]\n"
- "add x10, x10, #0x80\n"
+ "ldr q6, [x9, #0x70]\n"
+ "add x9, x9, #0x80\n"
"fmla v9.8h, v7.8h, v0.h[7]\n"
"fmla v13.8h, v7.8h, v1.h[7]\n"
"fmla v17.8h, v7.8h, v2.h[7]\n"
"fmla v21.8h, v7.8h, v3.h[7]\n"
"fmla v25.8h, v7.8h, v4.h[7]\n"
"fmla v29.8h, v7.8h, v5.h[7]\n"
- "ldr q7, [x9, #0x70]\n"
- "add x9, x9, #0x80\n"
+ "ldr q7, [x28, #0x70]\n"
+ "add x28, x28, #0x80\n"
"fmla v10.8h, v6.8h, v0.h[7]\n"
"fmla v14.8h, v6.8h, v1.h[7]\n"
"fmla v18.8h, v6.8h, v2.h[7]\n"
@@ -5015,35 +5015,35 @@ void a64_ffhybrid_fp16_mla_6x32 (
"fmla v27.8h, v7.8h, v4.h[7]\n"
"fmla v31.8h, v7.8h, v5.h[7]\n"
"279:" // Height 6: Multiply loop: Main loop skip
- "cbz x27, 281f\n"
+ "cbz x26, 281f\n"
"280:" // Height 6: Multiply loop: Odd block loop
- "ldr h0, [x26], #0x2\n"
- "ldr h1, [x25], #0x2\n"
- "sub x27, x27, #0x1\n"
- "ldr h2, [x24], #0x2\n"
- "ldr h3, [x23], #0x2\n"
- "ldr h4, [x22], #0x2\n"
- "ldr h5, [x21], #0x2\n"
- "ldr q6, [x12, #0x0]\n"
- "ldr q7, [x11, #0x0]\n"
+ "ldr h0, [x25], #0x2\n"
+ "ldr h1, [x24], #0x2\n"
+ "sub x26, x26, #0x1\n"
+ "ldr h2, [x23], #0x2\n"
+ "ldr h3, [x22], #0x2\n"
+ "ldr h4, [x21], #0x2\n"
+ "ldr h5, [x20], #0x2\n"
+ "ldr q6, [x11, #0x0]\n"
+ "ldr q7, [x10, #0x0]\n"
"fmla v8.8h, v6.8h, v0.h[0]\n"
"fmla v12.8h, v6.8h, v1.h[0]\n"
"fmla v16.8h, v6.8h, v2.h[0]\n"
"fmla v20.8h, v6.8h, v3.h[0]\n"
- "add x12, x12, #0x10\n"
"add x11, x11, #0x10\n"
+ "add x10, x10, #0x10\n"
"fmla v24.8h, v6.8h, v4.h[0]\n"
"fmla v28.8h, v6.8h, v5.h[0]\n"
- "ldr q6, [x10, #0x0]\n"
- "add x10, x10, #0x10\n"
+ "ldr q6, [x9, #0x0]\n"
+ "add x9, x9, #0x10\n"
"fmla v9.8h, v7.8h, v0.h[0]\n"
"fmla v13.8h, v7.8h, v1.h[0]\n"
"fmla v17.8h, v7.8h, v2.h[0]\n"
"fmla v21.8h, v7.8h, v3.h[0]\n"
"fmla v25.8h, v7.8h, v4.h[0]\n"
"fmla v29.8h, v7.8h, v5.h[0]\n"
- "ldr q7, [x9, #0x0]\n"
- "add x9, x9, #0x10\n"
+ "ldr q7, [x28, #0x0]\n"
+ "add x28, x28, #0x10\n"
"fmla v10.8h, v6.8h, v0.h[0]\n"
"fmla v14.8h, v6.8h, v1.h[0]\n"
"fmla v18.8h, v6.8h, v2.h[0]\n"
@@ -5056,23 +5056,23 @@ void a64_ffhybrid_fp16_mla_6x32 (
"fmla v23.8h, v7.8h, v3.h[0]\n"
"fmla v27.8h, v7.8h, v4.h[0]\n"
"fmla v31.8h, v7.8h, v5.h[0]\n"
- "cbnz x27, 280b\n"
+ "cbnz x26, 280b\n"
"281:" // Height 6: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 274b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
- "add x23, x24, x20, LSL #1\n"
- "add x22, x23, x20, LSL #1\n"
- "add x21, x22, x20, LSL #1\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "add x22, x23, x19, LSL #1\n"
+ "add x21, x22, x19, LSL #1\n"
+ "add x20, x21, x19, LSL #1\n"
"tbz %x[flags], #1, 282f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v1.8h }, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1r { v0.8h }, [x20]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v1.8h }, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v0.8h }, [x19]\n"
"fmin v8.8h, v8.8h, v1.8h\n"
"fmin v9.8h, v9.8h, v1.8h\n"
"fmin v10.8h, v10.8h, v1.8h\n"
@@ -5122,306 +5122,306 @@ void a64_ffhybrid_fp16_mla_6x32 (
"fmax v30.8h, v30.8h, v0.8h\n"
"fmax v31.8h, v31.8h, v0.8h\n"
"282:" // Height 6: No activation
- "cmp x14, #0x20\n"
+ "cmp x13, #0x20\n"
"bge 299f\n"
- "tbz x14, #4, 290f\n"
- "st1 { v8.8h }, [x13], #0x10\n"
- "st1 { v9.8h }, [x13], #0x10\n"
- "st1 { v12.8h }, [x25], #0x10\n"
- "st1 { v13.8h }, [x25], #0x10\n"
- "st1 { v16.8h }, [x24], #0x10\n"
- "st1 { v17.8h }, [x24], #0x10\n"
- "st1 { v20.8h }, [x23], #0x10\n"
- "st1 { v21.8h }, [x23], #0x10\n"
- "st1 { v24.8h }, [x22], #0x10\n"
- "st1 { v25.8h }, [x22], #0x10\n"
- "st1 { v28.8h }, [x21], #0x10\n"
- "st1 { v29.8h }, [x21], #0x10\n"
- "tbz x14, #3, 286f\n"
- "st1 { v10.8h }, [x13], #0x10\n"
- "st1 { v14.8h }, [x25], #0x10\n"
- "st1 { v18.8h }, [x24], #0x10\n"
- "st1 { v22.8h }, [x23], #0x10\n"
- "st1 { v26.8h }, [x22], #0x10\n"
- "st1 { v30.8h }, [x21], #0x10\n"
- "tbz x14, #2, 284f\n"
- "str d11, [x13], #0x8\n"
- "str d15, [x25], #0x8\n"
- "str d19, [x24], #0x8\n"
- "str d23, [x23], #0x8\n"
- "str d27, [x22], #0x8\n"
- "str d31, [x21], #0x8\n"
- "tbz x14, #1, 283f\n"
- "st1 { v11.s }[2], [x13], #0x4\n"
- "st1 { v15.s }[2], [x25], #0x4\n"
- "st1 { v19.s }[2], [x24], #0x4\n"
- "st1 { v23.s }[2], [x23], #0x4\n"
- "st1 { v27.s }[2], [x22], #0x4\n"
- "st1 { v31.s }[2], [x21], #0x4\n"
- "tbz x14, #0, 298f\n"
- "st1 { v11.h }[6], [x13]\n"
- "st1 { v15.h }[6], [x25]\n"
- "st1 { v19.h }[6], [x24]\n"
- "st1 { v23.h }[6], [x23]\n"
- "st1 { v27.h }[6], [x22]\n"
- "st1 { v31.h }[6], [x21]\n"
+ "tbz x13, #4, 290f\n"
+ "st1 { v8.8h }, [x12], #0x10\n"
+ "st1 { v9.8h }, [x12], #0x10\n"
+ "st1 { v12.8h }, [x24], #0x10\n"
+ "st1 { v13.8h }, [x24], #0x10\n"
+ "st1 { v16.8h }, [x23], #0x10\n"
+ "st1 { v17.8h }, [x23], #0x10\n"
+ "st1 { v20.8h }, [x22], #0x10\n"
+ "st1 { v21.8h }, [x22], #0x10\n"
+ "st1 { v24.8h }, [x21], #0x10\n"
+ "st1 { v25.8h }, [x21], #0x10\n"
+ "st1 { v28.8h }, [x20], #0x10\n"
+ "st1 { v29.8h }, [x20], #0x10\n"
+ "tbz x13, #3, 286f\n"
+ "st1 { v10.8h }, [x12], #0x10\n"
+ "st1 { v14.8h }, [x24], #0x10\n"
+ "st1 { v18.8h }, [x23], #0x10\n"
+ "st1 { v22.8h }, [x22], #0x10\n"
+ "st1 { v26.8h }, [x21], #0x10\n"
+ "st1 { v30.8h }, [x20], #0x10\n"
+ "tbz x13, #2, 284f\n"
+ "str d11, [x12], #0x8\n"
+ "str d15, [x24], #0x8\n"
+ "str d19, [x23], #0x8\n"
+ "str d23, [x22], #0x8\n"
+ "str d27, [x21], #0x8\n"
+ "str d31, [x20], #0x8\n"
+ "tbz x13, #1, 283f\n"
+ "st1 { v11.s }[2], [x12], #0x4\n"
+ "st1 { v15.s }[2], [x24], #0x4\n"
+ "st1 { v19.s }[2], [x23], #0x4\n"
+ "st1 { v23.s }[2], [x22], #0x4\n"
+ "st1 { v27.s }[2], [x21], #0x4\n"
+ "st1 { v31.s }[2], [x20], #0x4\n"
+ "tbz x13, #0, 298f\n"
+ "st1 { v11.h }[6], [x12]\n"
+ "st1 { v15.h }[6], [x24]\n"
+ "st1 { v19.h }[6], [x23]\n"
+ "st1 { v23.h }[6], [x22]\n"
+ "st1 { v27.h }[6], [x21]\n"
+ "st1 { v31.h }[6], [x20]\n"
"b 298f\n"
"283:" // Height 6: Partial direct writeback: partial_1_28
- "tbz x14, #0, 298f\n"
- "st1 { v11.h }[4], [x13]\n"
- "st1 { v15.h }[4], [x25]\n"
- "st1 { v19.h }[4], [x24]\n"
- "st1 { v23.h }[4], [x23]\n"
- "st1 { v27.h }[4], [x22]\n"
- "st1 { v31.h }[4], [x21]\n"
+ "tbz x13, #0, 298f\n"
+ "st1 { v11.h }[4], [x12]\n"
+ "st1 { v15.h }[4], [x24]\n"
+ "st1 { v19.h }[4], [x23]\n"
+ "st1 { v23.h }[4], [x22]\n"
+ "st1 { v27.h }[4], [x21]\n"
+ "st1 { v31.h }[4], [x20]\n"
"b 298f\n"
"284:" // Height 6: Partial direct writeback: partial_2_24
- "tbz x14, #1, 285f\n"
- "str s11, [x13], #0x4\n"
- "str s15, [x25], #0x4\n"
- "str s19, [x24], #0x4\n"
- "str s23, [x23], #0x4\n"
- "str s27, [x22], #0x4\n"
- "str s31, [x21], #0x4\n"
- "tbz x14, #0, 298f\n"
- "st1 { v11.h }[2], [x13]\n"
- "st1 { v15.h }[2], [x25]\n"
- "st1 { v19.h }[2], [x24]\n"
- "st1 { v23.h }[2], [x23]\n"
- "st1 { v27.h }[2], [x22]\n"
- "st1 { v31.h }[2], [x21]\n"
+ "tbz x13, #1, 285f\n"
+ "str s11, [x12], #0x4\n"
+ "str s15, [x24], #0x4\n"
+ "str s19, [x23], #0x4\n"
+ "str s23, [x22], #0x4\n"
+ "str s27, [x21], #0x4\n"
+ "str s31, [x20], #0x4\n"
+ "tbz x13, #0, 298f\n"
+ "st1 { v11.h }[2], [x12]\n"
+ "st1 { v15.h }[2], [x24]\n"
+ "st1 { v19.h }[2], [x23]\n"
+ "st1 { v23.h }[2], [x22]\n"
+ "st1 { v27.h }[2], [x21]\n"
+ "st1 { v31.h }[2], [x20]\n"
"b 298f\n"
"285:" // Height 6: Partial direct writeback: partial_1_24
- "tbz x14, #0, 298f\n"
- "str h11, [x13, #0x0]\n"
- "str h15, [x25, #0x0]\n"
- "str h19, [x24, #0x0]\n"
- "str h23, [x23, #0x0]\n"
- "str h27, [x22, #0x0]\n"
- "str h31, [x21, #0x0]\n"
+ "tbz x13, #0, 298f\n"
+ "str h11, [x12, #0x0]\n"
+ "str h15, [x24, #0x0]\n"
+ "str h19, [x23, #0x0]\n"
+ "str h23, [x22, #0x0]\n"
+ "str h27, [x21, #0x0]\n"
+ "str h31, [x20, #0x0]\n"
"b 298f\n"
"286:" // Height 6: Partial direct writeback: partial_4_16
- "tbz x14, #2, 288f\n"
- "str d10, [x13], #0x8\n"
- "str d14, [x25], #0x8\n"
- "str d18, [x24], #0x8\n"
- "str d22, [x23], #0x8\n"
- "str d26, [x22], #0x8\n"
- "str d30, [x21], #0x8\n"
- "tbz x14, #1, 287f\n"
- "st1 { v10.s }[2], [x13], #0x4\n"
- "st1 { v14.s }[2], [x25], #0x4\n"
- "st1 { v18.s }[2], [x24], #0x4\n"
- "st1 { v22.s }[2], [x23], #0x4\n"
- "st1 { v26.s }[2], [x22], #0x4\n"
- "st1 { v30.s }[2], [x21], #0x4\n"
- "tbz x14, #0, 298f\n"
- "st1 { v10.h }[6], [x13]\n"
- "st1 { v14.h }[6], [x25]\n"
- "st1 { v18.h }[6], [x24]\n"
- "st1 { v22.h }[6], [x23]\n"
- "st1 { v26.h }[6], [x22]\n"
- "st1 { v30.h }[6], [x21]\n"
+ "tbz x13, #2, 288f\n"
+ "str d10, [x12], #0x8\n"
+ "str d14, [x24], #0x8\n"
+ "str d18, [x23], #0x8\n"
+ "str d22, [x22], #0x8\n"
+ "str d26, [x21], #0x8\n"
+ "str d30, [x20], #0x8\n"
+ "tbz x13, #1, 287f\n"
+ "st1 { v10.s }[2], [x12], #0x4\n"
+ "st1 { v14.s }[2], [x24], #0x4\n"
+ "st1 { v18.s }[2], [x23], #0x4\n"
+ "st1 { v22.s }[2], [x22], #0x4\n"
+ "st1 { v26.s }[2], [x21], #0x4\n"
+ "st1 { v30.s }[2], [x20], #0x4\n"
+ "tbz x13, #0, 298f\n"
+ "st1 { v10.h }[6], [x12]\n"
+ "st1 { v14.h }[6], [x24]\n"
+ "st1 { v18.h }[6], [x23]\n"
+ "st1 { v22.h }[6], [x22]\n"
+ "st1 { v26.h }[6], [x21]\n"
+ "st1 { v30.h }[6], [x20]\n"
"b 298f\n"
"287:" // Height 6: Partial direct writeback: partial_1_20
- "tbz x14, #0, 298f\n"
- "st1 { v10.h }[4], [x13]\n"
- "st1 { v14.h }[4], [x25]\n"
- "st1 { v18.h }[4], [x24]\n"
- "st1 { v22.h }[4], [x23]\n"
- "st1 { v26.h }[4], [x22]\n"
- "st1 { v30.h }[4], [x21]\n"
+ "tbz x13, #0, 298f\n"
+ "st1 { v10.h }[4], [x12]\n"
+ "st1 { v14.h }[4], [x24]\n"
+ "st1 { v18.h }[4], [x23]\n"
+ "st1 { v22.h }[4], [x22]\n"
+ "st1 { v26.h }[4], [x21]\n"
+ "st1 { v30.h }[4], [x20]\n"
"b 298f\n"
"288:" // Height 6: Partial direct writeback: partial_2_16
- "tbz x14, #1, 289f\n"
- "str s10, [x13], #0x4\n"
- "str s14, [x25], #0x4\n"
- "str s18, [x24], #0x4\n"
- "str s22, [x23], #0x4\n"
- "str s26, [x22], #0x4\n"
- "str s30, [x21], #0x4\n"
- "tbz x14, #0, 298f\n"
- "st1 { v10.h }[2], [x13]\n"
- "st1 { v14.h }[2], [x25]\n"
- "st1 { v18.h }[2], [x24]\n"
- "st1 { v22.h }[2], [x23]\n"
- "st1 { v26.h }[2], [x22]\n"
- "st1 { v30.h }[2], [x21]\n"
+ "tbz x13, #1, 289f\n"
+ "str s10, [x12], #0x4\n"
+ "str s14, [x24], #0x4\n"
+ "str s18, [x23], #0x4\n"
+ "str s22, [x22], #0x4\n"
+ "str s26, [x21], #0x4\n"
+ "str s30, [x20], #0x4\n"
+ "tbz x13, #0, 298f\n"
+ "st1 { v10.h }[2], [x12]\n"
+ "st1 { v14.h }[2], [x24]\n"
+ "st1 { v18.h }[2], [x23]\n"
+ "st1 { v22.h }[2], [x22]\n"
+ "st1 { v26.h }[2], [x21]\n"
+ "st1 { v30.h }[2], [x20]\n"
"b 298f\n"
"289:" // Height 6: Partial direct writeback: partial_1_16
- "tbz x14, #0, 298f\n"
- "str h10, [x13, #0x0]\n"
- "str h14, [x25, #0x0]\n"
- "str h18, [x24, #0x0]\n"
- "str h22, [x23, #0x0]\n"
- "str h26, [x22, #0x0]\n"
- "str h30, [x21, #0x0]\n"
+ "tbz x13, #0, 298f\n"
+ "str h10, [x12, #0x0]\n"
+ "str h14, [x24, #0x0]\n"
+ "str h18, [x23, #0x0]\n"
+ "str h22, [x22, #0x0]\n"
+ "str h26, [x21, #0x0]\n"
+ "str h30, [x20, #0x0]\n"
"b 298f\n"
"290:" // Height 6: Partial direct writeback: partial_8_0
- "tbz x14, #3, 294f\n"
- "st1 { v8.8h }, [x13], #0x10\n"
- "st1 { v12.8h }, [x25], #0x10\n"
- "st1 { v16.8h }, [x24], #0x10\n"
- "st1 { v20.8h }, [x23], #0x10\n"
- "st1 { v24.8h }, [x22], #0x10\n"
- "st1 { v28.8h }, [x21], #0x10\n"
- "tbz x14, #2, 292f\n"
- "str d9, [x13], #0x8\n"
- "str d13, [x25], #0x8\n"
- "str d17, [x24], #0x8\n"
- "str d21, [x23], #0x8\n"
- "str d25, [x22], #0x8\n"
- "str d29, [x21], #0x8\n"
- "tbz x14, #1, 291f\n"
- "st1 { v9.s }[2], [x13], #0x4\n"
- "st1 { v13.s }[2], [x25], #0x4\n"
- "st1 { v17.s }[2], [x24], #0x4\n"
- "st1 { v21.s }[2], [x23], #0x4\n"
- "st1 { v25.s }[2], [x22], #0x4\n"
- "st1 { v29.s }[2], [x21], #0x4\n"
- "tbz x14, #0, 298f\n"
- "st1 { v9.h }[6], [x13]\n"
- "st1 { v13.h }[6], [x25]\n"
- "st1 { v17.h }[6], [x24]\n"
- "st1 { v21.h }[6], [x23]\n"
- "st1 { v25.h }[6], [x22]\n"
- "st1 { v29.h }[6], [x21]\n"
+ "tbz x13, #3, 294f\n"
+ "st1 { v8.8h }, [x12], #0x10\n"
+ "st1 { v12.8h }, [x24], #0x10\n"
+ "st1 { v16.8h }, [x23], #0x10\n"
+ "st1 { v20.8h }, [x22], #0x10\n"
+ "st1 { v24.8h }, [x21], #0x10\n"
+ "st1 { v28.8h }, [x20], #0x10\n"
+ "tbz x13, #2, 292f\n"
+ "str d9, [x12], #0x8\n"
+ "str d13, [x24], #0x8\n"
+ "str d17, [x23], #0x8\n"
+ "str d21, [x22], #0x8\n"
+ "str d25, [x21], #0x8\n"
+ "str d29, [x20], #0x8\n"
+ "tbz x13, #1, 291f\n"
+ "st1 { v9.s }[2], [x12], #0x4\n"
+ "st1 { v13.s }[2], [x24], #0x4\n"
+ "st1 { v17.s }[2], [x23], #0x4\n"
+ "st1 { v21.s }[2], [x22], #0x4\n"
+ "st1 { v25.s }[2], [x21], #0x4\n"
+ "st1 { v29.s }[2], [x20], #0x4\n"
+ "tbz x13, #0, 298f\n"
+ "st1 { v9.h }[6], [x12]\n"
+ "st1 { v13.h }[6], [x24]\n"
+ "st1 { v17.h }[6], [x23]\n"
+ "st1 { v21.h }[6], [x22]\n"
+ "st1 { v25.h }[6], [x21]\n"
+ "st1 { v29.h }[6], [x20]\n"
"b 298f\n"
"291:" // Height 6: Partial direct writeback: partial_1_12
- "tbz x14, #0, 298f\n"
- "st1 { v9.h }[4], [x13]\n"
- "st1 { v13.h }[4], [x25]\n"
- "st1 { v17.h }[4], [x24]\n"
- "st1 { v21.h }[4], [x23]\n"
- "st1 { v25.h }[4], [x22]\n"
- "st1 { v29.h }[4], [x21]\n"
+ "tbz x13, #0, 298f\n"
+ "st1 { v9.h }[4], [x12]\n"
+ "st1 { v13.h }[4], [x24]\n"
+ "st1 { v17.h }[4], [x23]\n"
+ "st1 { v21.h }[4], [x22]\n"
+ "st1 { v25.h }[4], [x21]\n"
+ "st1 { v29.h }[4], [x20]\n"
"b 298f\n"
"292:" // Height 6: Partial direct writeback: partial_2_8
- "tbz x14, #1, 293f\n"
- "str s9, [x13], #0x4\n"
- "str s13, [x25], #0x4\n"
- "str s17, [x24], #0x4\n"
- "str s21, [x23], #0x4\n"
- "str s25, [x22], #0x4\n"
- "str s29, [x21], #0x4\n"
- "tbz x14, #0, 298f\n"
- "st1 { v9.h }[2], [x13]\n"
- "st1 { v13.h }[2], [x25]\n"
- "st1 { v17.h }[2], [x24]\n"
- "st1 { v21.h }[2], [x23]\n"
- "st1 { v25.h }[2], [x22]\n"
- "st1 { v29.h }[2], [x21]\n"
+ "tbz x13, #1, 293f\n"
+ "str s9, [x12], #0x4\n"
+ "str s13, [x24], #0x4\n"
+ "str s17, [x23], #0x4\n"
+ "str s21, [x22], #0x4\n"
+ "str s25, [x21], #0x4\n"
+ "str s29, [x20], #0x4\n"
+ "tbz x13, #0, 298f\n"
+ "st1 { v9.h }[2], [x12]\n"
+ "st1 { v13.h }[2], [x24]\n"
+ "st1 { v17.h }[2], [x23]\n"
+ "st1 { v21.h }[2], [x22]\n"
+ "st1 { v25.h }[2], [x21]\n"
+ "st1 { v29.h }[2], [x20]\n"
"b 298f\n"
"293:" // Height 6: Partial direct writeback: partial_1_8
- "tbz x14, #0, 298f\n"
- "str h9, [x13, #0x0]\n"
- "str h13, [x25, #0x0]\n"
- "str h17, [x24, #0x0]\n"
- "str h21, [x23, #0x0]\n"
- "str h25, [x22, #0x0]\n"
- "str h29, [x21, #0x0]\n"
+ "tbz x13, #0, 298f\n"
+ "str h9, [x12, #0x0]\n"
+ "str h13, [x24, #0x0]\n"
+ "str h17, [x23, #0x0]\n"
+ "str h21, [x22, #0x0]\n"
+ "str h25, [x21, #0x0]\n"
+ "str h29, [x20, #0x0]\n"
"b 298f\n"
"294:" // Height 6: Partial direct writeback: partial_4_0
- "tbz x14, #2, 296f\n"
- "str d8, [x13], #0x8\n"
- "str d12, [x25], #0x8\n"
- "str d16, [x24], #0x8\n"
- "str d20, [x23], #0x8\n"
- "str d24, [x22], #0x8\n"
- "str d28, [x21], #0x8\n"
- "tbz x14, #1, 295f\n"
- "st1 { v8.s }[2], [x13], #0x4\n"
- "st1 { v12.s }[2], [x25], #0x4\n"
- "st1 { v16.s }[2], [x24], #0x4\n"
- "st1 { v20.s }[2], [x23], #0x4\n"
- "st1 { v24.s }[2], [x22], #0x4\n"
- "st1 { v28.s }[2], [x21], #0x4\n"
- "tbz x14, #0, 298f\n"
- "st1 { v8.h }[6], [x13]\n"
- "st1 { v12.h }[6], [x25]\n"
- "st1 { v16.h }[6], [x24]\n"
- "st1 { v20.h }[6], [x23]\n"
- "st1 { v24.h }[6], [x22]\n"
- "st1 { v28.h }[6], [x21]\n"
+ "tbz x13, #2, 296f\n"
+ "str d8, [x12], #0x8\n"
+ "str d12, [x24], #0x8\n"
+ "str d16, [x23], #0x8\n"
+ "str d20, [x22], #0x8\n"
+ "str d24, [x21], #0x8\n"
+ "str d28, [x20], #0x8\n"
+ "tbz x13, #1, 295f\n"
+ "st1 { v8.s }[2], [x12], #0x4\n"
+ "st1 { v12.s }[2], [x24], #0x4\n"
+ "st1 { v16.s }[2], [x23], #0x4\n"
+ "st1 { v20.s }[2], [x22], #0x4\n"
+ "st1 { v24.s }[2], [x21], #0x4\n"
+ "st1 { v28.s }[2], [x20], #0x4\n"
+ "tbz x13, #0, 298f\n"
+ "st1 { v8.h }[6], [x12]\n"
+ "st1 { v12.h }[6], [x24]\n"
+ "st1 { v16.h }[6], [x23]\n"
+ "st1 { v20.h }[6], [x22]\n"
+ "st1 { v24.h }[6], [x21]\n"
+ "st1 { v28.h }[6], [x20]\n"
"b 298f\n"
"295:" // Height 6: Partial direct writeback: partial_1_4
- "tbz x14, #0, 298f\n"
- "st1 { v8.h }[4], [x13]\n"
- "st1 { v12.h }[4], [x25]\n"
- "st1 { v16.h }[4], [x24]\n"
- "st1 { v20.h }[4], [x23]\n"
- "st1 { v24.h }[4], [x22]\n"
- "st1 { v28.h }[4], [x21]\n"
+ "tbz x13, #0, 298f\n"
+ "st1 { v8.h }[4], [x12]\n"
+ "st1 { v12.h }[4], [x24]\n"
+ "st1 { v16.h }[4], [x23]\n"
+ "st1 { v20.h }[4], [x22]\n"
+ "st1 { v24.h }[4], [x21]\n"
+ "st1 { v28.h }[4], [x20]\n"
"b 298f\n"
"296:" // Height 6: Partial direct writeback: partial_2_0
- "tbz x14, #1, 297f\n"
- "str s8, [x13], #0x4\n"
- "str s12, [x25], #0x4\n"
- "str s16, [x24], #0x4\n"
- "str s20, [x23], #0x4\n"
- "str s24, [x22], #0x4\n"
- "str s28, [x21], #0x4\n"
- "tbz x14, #0, 298f\n"
- "st1 { v8.h }[2], [x13]\n"
- "st1 { v12.h }[2], [x25]\n"
- "st1 { v16.h }[2], [x24]\n"
- "st1 { v20.h }[2], [x23]\n"
- "st1 { v24.h }[2], [x22]\n"
- "st1 { v28.h }[2], [x21]\n"
+ "tbz x13, #1, 297f\n"
+ "str s8, [x12], #0x4\n"
+ "str s12, [x24], #0x4\n"
+ "str s16, [x23], #0x4\n"
+ "str s20, [x22], #0x4\n"
+ "str s24, [x21], #0x4\n"
+ "str s28, [x20], #0x4\n"
+ "tbz x13, #0, 298f\n"
+ "st1 { v8.h }[2], [x12]\n"
+ "st1 { v12.h }[2], [x24]\n"
+ "st1 { v16.h }[2], [x23]\n"
+ "st1 { v20.h }[2], [x22]\n"
+ "st1 { v24.h }[2], [x21]\n"
+ "st1 { v28.h }[2], [x20]\n"
"b 298f\n"
"297:" // Height 6: Partial direct writeback: partial_1_0
- "str h8, [x13, #0x0]\n"
- "str h12, [x25, #0x0]\n"
- "str h16, [x24, #0x0]\n"
- "str h20, [x23, #0x0]\n"
- "str h24, [x22, #0x0]\n"
- "str h28, [x21, #0x0]\n"
+ "str h8, [x12, #0x0]\n"
+ "str h12, [x24, #0x0]\n"
+ "str h16, [x23, #0x0]\n"
+ "str h20, [x22, #0x0]\n"
+ "str h24, [x21, #0x0]\n"
+ "str h28, [x20, #0x0]\n"
"298:" // Height 6: Partial direct writeback: Done
"b 300f\n"
"299:" // Height 6: Full writeback
- "str q8, [x13, #0x0]\n"
- "str q9, [x13, #0x10]\n"
- "str q10, [x13, #0x20]\n"
- "str q11, [x13, #0x30]\n"
- "add x13, x13, #0x40\n"
- "str q12, [x25, #0x0]\n"
- "str q13, [x25, #0x10]\n"
- "str q14, [x25, #0x20]\n"
- "str q15, [x25, #0x30]\n"
- "str q16, [x24, #0x0]\n"
- "str q17, [x24, #0x10]\n"
- "str q18, [x24, #0x20]\n"
- "str q19, [x24, #0x30]\n"
- "str q20, [x23, #0x0]\n"
- "str q21, [x23, #0x10]\n"
- "str q22, [x23, #0x20]\n"
- "str q23, [x23, #0x30]\n"
- "str q24, [x22, #0x0]\n"
- "str q25, [x22, #0x10]\n"
- "str q26, [x22, #0x20]\n"
- "str q27, [x22, #0x30]\n"
- "str q28, [x21, #0x0]\n"
- "str q29, [x21, #0x10]\n"
- "str q30, [x21, #0x20]\n"
- "str q31, [x21, #0x30]\n"
+ "str q8, [x12, #0x0]\n"
+ "str q9, [x12, #0x10]\n"
+ "str q10, [x12, #0x20]\n"
+ "str q11, [x12, #0x30]\n"
+ "add x12, x12, #0x40\n"
+ "str q12, [x24, #0x0]\n"
+ "str q13, [x24, #0x10]\n"
+ "str q14, [x24, #0x20]\n"
+ "str q15, [x24, #0x30]\n"
+ "str q16, [x23, #0x0]\n"
+ "str q17, [x23, #0x10]\n"
+ "str q18, [x23, #0x20]\n"
+ "str q19, [x23, #0x30]\n"
+ "str q20, [x22, #0x0]\n"
+ "str q21, [x22, #0x10]\n"
+ "str q22, [x22, #0x20]\n"
+ "str q23, [x22, #0x30]\n"
+ "str q24, [x21, #0x0]\n"
+ "str q25, [x21, #0x10]\n"
+ "str q26, [x21, #0x20]\n"
+ "str q27, [x21, #0x30]\n"
+ "str q28, [x20, #0x0]\n"
+ "str q29, [x20, #0x10]\n"
+ "str q30, [x20, #0x20]\n"
+ "str q31, [x20, #0x30]\n"
"300:" // Height 6: Writeback done
- "subs x14, x14, #0x20\n"
+ "subs x13, x13, #0x20\n"
"bgt 252b\n"
"subs %x[M], %x[M], #0x6\n"
"beq 302f\n"
- "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 301f\n"
- "add x21, x21, #0x6\n"
- "str x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "add x20, x20, #0x6\n"
+ "str x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"b 1b\n"
"301:" // Update direct input
- "mov x20, #0xc\n"
- "madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
+ "mov x19, #0xc\n"
+ "madd %x[input_ptr], x19, x20, %x[input_ptr]\n"
"b 1b\n"
"302:" // Exit
: [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
: [args_ptr] "r" (&ka), [bias] "r" (bias), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_B_stride] "I" (offsetof(KernelArgs, B_stride)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_cur_B_ptr] "I" (offsetof(KernelArgs, cur_B_ptr)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_fp32_mla_6x16/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_fp32_mla_6x16/generic.cpp
index e0fbe17bad..f811116a06 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_fp32_mla_6x16/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_fp32_mla_6x16/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef __aarch64__
@@ -104,100 +104,100 @@ void a64_ffhybrid_fp32_mla_6x16 (
"cmp %x[M], #0x2\n"
"bgt 69f\n"
"beq 35f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x15, %x[bias]\n"
- "ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x13, %x[output_ptr]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x14, %x[bias]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x12, %x[output_ptr]\n"
"2:" // Height 1: Column loop
- "ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
- "add x11, x12, x20, LSL #2\n"
- "add x10, x11, x20, LSL #2\n"
- "add x9, x10, x20, LSL #2\n"
- "add x20, x9, x20, LSL #2\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "cmp x14, #0xc\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #2\n"
+ "add x9, x10, x19, LSL #2\n"
+ "add x28, x9, x19, LSL #2\n"
+ "add x19, x28, x19, LSL #2\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "cmp x13, #0xc\n"
"bgt 3f\n"
- "cmp x14, #0x8\n"
- "mov x9, x12\n"
+ "cmp x13, #0x8\n"
+ "mov x28, x11\n"
"bgt 3f\n"
- "cmp x14, #0x4\n"
- "mov x10, x12\n"
+ "cmp x13, #0x4\n"
+ "mov x9, x11\n"
"bgt 3f\n"
- "mov x11, x12\n"
+ "mov x10, x11\n"
"3:" // Height 1: B setup done
- "cbz x15, 4f\n"
- "ldr q8, [x15, #0x0]\n"
- "ldr q9, [x15, #0x10]\n"
- "ldr q10, [x15, #0x20]\n"
- "ldr q11, [x15, #0x30]\n"
- "add x15, x15, #0x40\n"
+ "cbz x14, 4f\n"
+ "ldr q8, [x14, #0x0]\n"
+ "ldr q9, [x14, #0x10]\n"
+ "ldr q10, [x14, #0x20]\n"
+ "ldr q11, [x14, #0x30]\n"
+ "add x14, x14, #0x40\n"
"b 15f\n"
"4:" // Height 1: no bias
"tbz %x[flags], #0, 14f\n"
- "cmp x14, #0x10\n"
+ "cmp x13, #0x10\n"
"bge 13f\n"
- "tbz x14, #3, 8f\n"
- "ld1 { v8.4s }, [x13], #0x10\n"
- "ld1 { v9.4s }, [x13], #0x10\n"
- "tbz x14, #2, 6f\n"
- "ld1 { v10.4s }, [x13], #0x10\n"
- "tbz x14, #1, 5f\n"
- "ldr d11, [x13], #0x8\n"
- "mov x20, #0x38\n"
- "tbz x14, #0, 12f\n"
- "ld1 { v11.s }[2], [x13]\n"
+ "tbz x13, #3, 8f\n"
+ "ld1 { v8.4s }, [x12], #0x10\n"
+ "ld1 { v9.4s }, [x12], #0x10\n"
+ "tbz x13, #2, 6f\n"
+ "ld1 { v10.4s }, [x12], #0x10\n"
+ "tbz x13, #1, 5f\n"
+ "ldr d11, [x12], #0x8\n"
+ "mov x19, #0x38\n"
+ "tbz x13, #0, 12f\n"
+ "ld1 { v11.s }[2], [x12]\n"
"b 12f\n"
"5:" // Height 1: Partial accumulate: partial_1_12
- "mov x20, #0x30\n"
- "tbz x14, #0, 12f\n"
- "ldr s11, [x13, #0x0]\n"
+ "mov x19, #0x30\n"
+ "tbz x13, #0, 12f\n"
+ "ldr s11, [x12, #0x0]\n"
"b 12f\n"
"6:" // Height 1: Partial accumulate: partial_2_8
- "tbz x14, #1, 7f\n"
- "ldr d10, [x13], #0x8\n"
- "mov x20, #0x28\n"
- "tbz x14, #0, 12f\n"
- "ld1 { v10.s }[2], [x13]\n"
+ "tbz x13, #1, 7f\n"
+ "ldr d10, [x12], #0x8\n"
+ "mov x19, #0x28\n"
+ "tbz x13, #0, 12f\n"
+ "ld1 { v10.s }[2], [x12]\n"
"b 12f\n"
"7:" // Height 1: Partial accumulate: partial_1_8
- "mov x20, #0x20\n"
- "tbz x14, #0, 12f\n"
- "ldr s10, [x13, #0x0]\n"
+ "mov x19, #0x20\n"
+ "tbz x13, #0, 12f\n"
+ "ldr s10, [x12, #0x0]\n"
"b 12f\n"
"8:" // Height 1: Partial accumulate: partial_4_0
- "tbz x14, #2, 10f\n"
- "ld1 { v8.4s }, [x13], #0x10\n"
- "tbz x14, #1, 9f\n"
- "ldr d9, [x13], #0x8\n"
- "mov x20, #0x18\n"
- "tbz x14, #0, 12f\n"
- "ld1 { v9.s }[2], [x13]\n"
+ "tbz x13, #2, 10f\n"
+ "ld1 { v8.4s }, [x12], #0x10\n"
+ "tbz x13, #1, 9f\n"
+ "ldr d9, [x12], #0x8\n"
+ "mov x19, #0x18\n"
+ "tbz x13, #0, 12f\n"
+ "ld1 { v9.s }[2], [x12]\n"
"b 12f\n"
"9:" // Height 1: Partial accumulate: partial_1_4
- "mov x20, #0x10\n"
- "tbz x14, #0, 12f\n"
- "ldr s9, [x13, #0x0]\n"
+ "mov x19, #0x10\n"
+ "tbz x13, #0, 12f\n"
+ "ldr s9, [x12, #0x0]\n"
"b 12f\n"
"10:" // Height 1: Partial accumulate: partial_2_0
- "tbz x14, #1, 11f\n"
- "ldr d8, [x13], #0x8\n"
- "mov x20, #0x8\n"
- "tbz x14, #0, 12f\n"
- "ld1 { v8.s }[2], [x13]\n"
+ "tbz x13, #1, 11f\n"
+ "ldr d8, [x12], #0x8\n"
+ "mov x19, #0x8\n"
+ "tbz x13, #0, 12f\n"
+ "ld1 { v8.s }[2], [x12]\n"
"b 12f\n"
"11:" // Height 1: Partial accumulate: partial_1_0
- "ldr s8, [x13, #0x0]\n"
- "mov x20, #0x0\n"
+ "ldr s8, [x12, #0x0]\n"
+ "mov x19, #0x0\n"
"12:" // Height 1: Partial accumulate: Done
- "sub x13, x13, x20\n"
+ "sub x12, x12, x19\n"
"b 15f\n"
"13:" // Height 1: full accumulate
- "ldr q8, [x13, #0x0]\n"
- "ldr q9, [x13, #0x10]\n"
- "ldr q10, [x13, #0x20]\n"
- "ldr q11, [x13, #0x30]\n"
+ "ldr q8, [x12, #0x0]\n"
+ "ldr q9, [x12, #0x10]\n"
+ "ldr q10, [x12, #0x20]\n"
+ "ldr q11, [x12, #0x30]\n"
"b 15f\n"
"14:" // Height 1: no accumulate
"movi v8.16b, #0x0\n"
@@ -205,136 +205,136 @@ void a64_ffhybrid_fp32_mla_6x16 (
"movi v10.16b, #0x0\n"
"movi v11.16b, #0x0\n"
"15:" // Height 1: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"16:" // Height 1: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 17f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "cbnz x28, 18f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "cbnz x27, 18f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #2\n"
"b 18f\n"
"17:" // Height 1: setup direct input
- "mov x26, %x[input_ptr]\n"
+ "mov x25, %x[input_ptr]\n"
"18:" // Height 1: input setup done
- "cmp x27, #0x4\n"
+ "cmp x26, #0x4\n"
"blt 21f\n"
- "ldr q0, [x26, #0x0]\n"
- "ldr q6, [x12, #0x0]\n"
- "cmp x27, #0x8\n"
- "ldr q7, [x11, #0x0]\n"
+ "ldr q0, [x25, #0x0]\n"
+ "ldr q6, [x11, #0x0]\n"
+ "cmp x26, #0x8\n"
+ "ldr q7, [x10, #0x0]\n"
"blt 20f\n"
"19:" // Height 1: Multiply loop: Main loop head
"fmla v8.4s, v6.4s, v0.s[0]\n"
- "ldr q6, [x10, #0x0]\n"
+ "ldr q6, [x9, #0x0]\n"
"fmla v9.4s, v7.4s, v0.s[0]\n"
- "ldr q7, [x9, #0x0]\n"
+ "ldr q7, [x28, #0x0]\n"
"fmla v10.4s, v6.4s, v0.s[0]\n"
- "ldr q6, [x12, #0x10]\n"
+ "ldr q6, [x11, #0x10]\n"
"fmla v11.4s, v7.4s, v0.s[0]\n"
- "ldr q7, [x11, #0x10]\n"
+ "ldr q7, [x10, #0x10]\n"
"fmla v8.4s, v6.4s, v0.s[1]\n"
- "ldr q6, [x10, #0x10]\n"
+ "ldr q6, [x9, #0x10]\n"
"fmla v9.4s, v7.4s, v0.s[1]\n"
- "ldr q7, [x9, #0x10]\n"
+ "ldr q7, [x28, #0x10]\n"
"fmla v10.4s, v6.4s, v0.s[1]\n"
- "ldr q6, [x12, #0x20]\n"
+ "ldr q6, [x11, #0x20]\n"
"fmla v11.4s, v7.4s, v0.s[1]\n"
- "ldr q7, [x11, #0x20]\n"
+ "ldr q7, [x10, #0x20]\n"
"fmla v8.4s, v6.4s, v0.s[2]\n"
- "ldr q6, [x10, #0x20]\n"
+ "ldr q6, [x9, #0x20]\n"
"fmla v9.4s, v7.4s, v0.s[2]\n"
- "ldr q7, [x9, #0x20]\n"
+ "ldr q7, [x28, #0x20]\n"
"fmla v10.4s, v6.4s, v0.s[2]\n"
- "ldr q6, [x12, #0x30]\n"
+ "ldr q6, [x11, #0x30]\n"
"fmla v11.4s, v7.4s, v0.s[2]\n"
- "ldr q7, [x11, #0x30]\n"
+ "ldr q7, [x10, #0x30]\n"
"fmla v8.4s, v6.4s, v0.s[3]\n"
- "ldr q6, [x10, #0x30]\n"
+ "ldr q6, [x9, #0x30]\n"
"fmla v9.4s, v7.4s, v0.s[3]\n"
- "ldr q7, [x9, #0x30]\n"
- "sub x27, x27, #0x4\n"
- "cmp x27, #0x8\n"
+ "ldr q7, [x28, #0x30]\n"
+ "sub x26, x26, #0x4\n"
+ "cmp x26, #0x8\n"
"fmla v10.4s, v6.4s, v0.s[3]\n"
"fmla v11.4s, v7.4s, v0.s[3]\n"
- "add x26, x26, #0x10\n"
- "ldr q0, [x26, #0x0]\n"
- "add x12, x12, #0x40\n"
- "ldr q6, [x12, #0x0]\n"
+ "add x25, x25, #0x10\n"
+ "ldr q0, [x25, #0x0]\n"
"add x11, x11, #0x40\n"
- "ldr q7, [x11, #0x0]\n"
+ "ldr q6, [x11, #0x0]\n"
"add x10, x10, #0x40\n"
+ "ldr q7, [x10, #0x0]\n"
"add x9, x9, #0x40\n"
+ "add x28, x28, #0x40\n"
"bge 19b\n"
"20:" // Height 1: Multiply loop: Single iteration only
"fmla v8.4s, v6.4s, v0.s[0]\n"
- "ldr q6, [x10, #0x0]\n"
+ "ldr q6, [x9, #0x0]\n"
"fmla v9.4s, v7.4s, v0.s[0]\n"
- "ldr q7, [x9, #0x0]\n"
+ "ldr q7, [x28, #0x0]\n"
"fmla v10.4s, v6.4s, v0.s[0]\n"
- "ldr q6, [x12, #0x10]\n"
+ "ldr q6, [x11, #0x10]\n"
"fmla v11.4s, v7.4s, v0.s[0]\n"
- "ldr q7, [x11, #0x10]\n"
+ "ldr q7, [x10, #0x10]\n"
"fmla v8.4s, v6.4s, v0.s[1]\n"
- "ldr q6, [x10, #0x10]\n"
+ "ldr q6, [x9, #0x10]\n"
"fmla v9.4s, v7.4s, v0.s[1]\n"
- "ldr q7, [x9, #0x10]\n"
+ "ldr q7, [x28, #0x10]\n"
"fmla v10.4s, v6.4s, v0.s[1]\n"
- "ldr q6, [x12, #0x20]\n"
+ "ldr q6, [x11, #0x20]\n"
"fmla v11.4s, v7.4s, v0.s[1]\n"
- "ldr q7, [x11, #0x20]\n"
+ "ldr q7, [x10, #0x20]\n"
"fmla v8.4s, v6.4s, v0.s[2]\n"
- "ldr q6, [x10, #0x20]\n"
+ "ldr q6, [x9, #0x20]\n"
"fmla v9.4s, v7.4s, v0.s[2]\n"
- "ldr q7, [x9, #0x20]\n"
+ "ldr q7, [x28, #0x20]\n"
"fmla v10.4s, v6.4s, v0.s[2]\n"
- "ldr q6, [x12, #0x30]\n"
+ "ldr q6, [x11, #0x30]\n"
"fmla v11.4s, v7.4s, v0.s[2]\n"
- "ldr q7, [x11, #0x30]\n"
+ "ldr q7, [x10, #0x30]\n"
"fmla v8.4s, v6.4s, v0.s[3]\n"
- "ldr q6, [x10, #0x30]\n"
+ "ldr q6, [x9, #0x30]\n"
"fmla v9.4s, v7.4s, v0.s[3]\n"
- "ldr q7, [x9, #0x30]\n"
- "sub x27, x27, #0x4\n"
+ "ldr q7, [x28, #0x30]\n"
+ "sub x26, x26, #0x4\n"
"fmla v10.4s, v6.4s, v0.s[3]\n"
"fmla v11.4s, v7.4s, v0.s[3]\n"
- "add x26, x26, #0x10\n"
- "add x12, x12, #0x40\n"
+ "add x25, x25, #0x10\n"
"add x11, x11, #0x40\n"
"add x10, x10, #0x40\n"
"add x9, x9, #0x40\n"
+ "add x28, x28, #0x40\n"
"21:" // Height 1: Multiply loop: Main loop skip
- "cbz x27, 23f\n"
+ "cbz x26, 23f\n"
"22:" // Height 1: Multiply loop: Odd block loop
- "ldr s0, [x26], #0x4\n"
- "ldr q6, [x12, #0x0]\n"
+ "ldr s0, [x25], #0x4\n"
+ "ldr q6, [x11, #0x0]\n"
"fmla v8.4s, v6.4s, v0.s[0]\n"
- "sub x27, x27, #0x1\n"
- "ldr q7, [x11, #0x0]\n"
- "ldr q6, [x10, #0x0]\n"
+ "sub x26, x26, #0x1\n"
+ "ldr q7, [x10, #0x0]\n"
+ "ldr q6, [x9, #0x0]\n"
"fmla v9.4s, v7.4s, v0.s[0]\n"
"fmla v10.4s, v6.4s, v0.s[0]\n"
- "ldr q7, [x9, #0x0]\n"
+ "ldr q7, [x28, #0x0]\n"
"fmla v11.4s, v7.4s, v0.s[0]\n"
- "add x12, x12, #0x10\n"
"add x11, x11, #0x10\n"
"add x10, x10, #0x10\n"
"add x9, x9, #0x10\n"
- "cbnz x27, 22b\n"
+ "add x28, x28, #0x10\n"
+ "cbnz x26, 22b\n"
"23:" // Height 1: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 16b\n"
"tbz %x[flags], #1, 24f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v1.4s }, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1r { v0.4s }, [x20]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v1.4s }, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v0.4s }, [x19]\n"
"fmin v8.4s, v8.4s, v1.4s\n"
"fmin v9.4s, v9.4s, v1.4s\n"
"fmin v10.4s, v10.4s, v1.4s\n"
@@ -344,185 +344,185 @@ void a64_ffhybrid_fp32_mla_6x16 (
"fmax v10.4s, v10.4s, v0.4s\n"
"fmax v11.4s, v11.4s, v0.4s\n"
"24:" // Height 1: No activation
- "cmp x14, #0x10\n"
+ "cmp x13, #0x10\n"
"bge 33f\n"
- "tbz x14, #3, 28f\n"
- "st1 { v8.4s }, [x13], #0x10\n"
- "st1 { v9.4s }, [x13], #0x10\n"
- "tbz x14, #2, 26f\n"
- "st1 { v10.4s }, [x13], #0x10\n"
- "tbz x14, #1, 25f\n"
- "str d11, [x13], #0x8\n"
- "tbz x14, #0, 32f\n"
- "st1 { v11.s }[2], [x13]\n"
+ "tbz x13, #3, 28f\n"
+ "st1 { v8.4s }, [x12], #0x10\n"
+ "st1 { v9.4s }, [x12], #0x10\n"
+ "tbz x13, #2, 26f\n"
+ "st1 { v10.4s }, [x12], #0x10\n"
+ "tbz x13, #1, 25f\n"
+ "str d11, [x12], #0x8\n"
+ "tbz x13, #0, 32f\n"
+ "st1 { v11.s }[2], [x12]\n"
"b 32f\n"
"25:" // Height 1: Partial direct writeback: partial_1_12
- "tbz x14, #0, 32f\n"
- "str s11, [x13, #0x0]\n"
+ "tbz x13, #0, 32f\n"
+ "str s11, [x12, #0x0]\n"
"b 32f\n"
"26:" // Height 1: Partial direct writeback: partial_2_8
- "tbz x14, #1, 27f\n"
- "str d10, [x13], #0x8\n"
- "tbz x14, #0, 32f\n"
- "st1 { v10.s }[2], [x13]\n"
+ "tbz x13, #1, 27f\n"
+ "str d10, [x12], #0x8\n"
+ "tbz x13, #0, 32f\n"
+ "st1 { v10.s }[2], [x12]\n"
"b 32f\n"
"27:" // Height 1: Partial direct writeback: partial_1_8
- "tbz x14, #0, 32f\n"
- "str s10, [x13, #0x0]\n"
+ "tbz x13, #0, 32f\n"
+ "str s10, [x12, #0x0]\n"
"b 32f\n"
"28:" // Height 1: Partial direct writeback: partial_4_0
- "tbz x14, #2, 30f\n"
- "st1 { v8.4s }, [x13], #0x10\n"
- "tbz x14, #1, 29f\n"
- "str d9, [x13], #0x8\n"
- "tbz x14, #0, 32f\n"
- "st1 { v9.s }[2], [x13]\n"
+ "tbz x13, #2, 30f\n"
+ "st1 { v8.4s }, [x12], #0x10\n"
+ "tbz x13, #1, 29f\n"
+ "str d9, [x12], #0x8\n"
+ "tbz x13, #0, 32f\n"
+ "st1 { v9.s }[2], [x12]\n"
"b 32f\n"
"29:" // Height 1: Partial direct writeback: partial_1_4
- "tbz x14, #0, 32f\n"
- "str s9, [x13, #0x0]\n"
+ "tbz x13, #0, 32f\n"
+ "str s9, [x12, #0x0]\n"
"b 32f\n"
"30:" // Height 1: Partial direct writeback: partial_2_0
- "tbz x14, #1, 31f\n"
- "str d8, [x13], #0x8\n"
- "tbz x14, #0, 32f\n"
- "st1 { v8.s }[2], [x13]\n"
+ "tbz x13, #1, 31f\n"
+ "str d8, [x12], #0x8\n"
+ "tbz x13, #0, 32f\n"
+ "st1 { v8.s }[2], [x12]\n"
"b 32f\n"
"31:" // Height 1: Partial direct writeback: partial_1_0
- "str s8, [x13, #0x0]\n"
+ "str s8, [x12, #0x0]\n"
"32:" // Height 1: Partial direct writeback: Done
"b 34f\n"
"33:" // Height 1: Full writeback
- "str q8, [x13, #0x0]\n"
- "str q9, [x13, #0x10]\n"
- "str q10, [x13, #0x20]\n"
- "str q11, [x13, #0x30]\n"
- "add x13, x13, #0x40\n"
+ "str q8, [x12, #0x0]\n"
+ "str q9, [x12, #0x10]\n"
+ "str q10, [x12, #0x20]\n"
+ "str q11, [x12, #0x30]\n"
+ "add x12, x12, #0x40\n"
"34:" // Height 1: Writeback done
- "subs x14, x14, #0x10\n"
+ "subs x13, x13, #0x10\n"
"bgt 2b\n"
"b 206f\n"
"35:" // Height 2
- "ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x15, %x[bias]\n"
- "ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x13, %x[output_ptr]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x14, %x[bias]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x12, %x[output_ptr]\n"
"36:" // Height 2: Column loop
- "ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
- "add x11, x12, x20, LSL #2\n"
- "add x10, x11, x20, LSL #2\n"
- "add x9, x10, x20, LSL #2\n"
- "add x20, x9, x20, LSL #2\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "cmp x14, #0xc\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #2\n"
+ "add x9, x10, x19, LSL #2\n"
+ "add x28, x9, x19, LSL #2\n"
+ "add x19, x28, x19, LSL #2\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "cmp x13, #0xc\n"
"bgt 37f\n"
- "cmp x14, #0x8\n"
- "mov x9, x12\n"
+ "cmp x13, #0x8\n"
+ "mov x28, x11\n"
"bgt 37f\n"
- "cmp x14, #0x4\n"
- "mov x10, x12\n"
+ "cmp x13, #0x4\n"
+ "mov x9, x11\n"
"bgt 37f\n"
- "mov x11, x12\n"
+ "mov x10, x11\n"
"37:" // Height 2: B setup done
- "cbz x15, 38f\n"
- "ldr q8, [x15, #0x0]\n"
- "ldr q9, [x15, #0x10]\n"
+ "cbz x14, 38f\n"
+ "ldr q8, [x14, #0x0]\n"
+ "ldr q9, [x14, #0x10]\n"
"mov v12.16b, v8.16b\n"
"mov v13.16b, v9.16b\n"
- "ldr q10, [x15, #0x20]\n"
- "ldr q11, [x15, #0x30]\n"
+ "ldr q10, [x14, #0x20]\n"
+ "ldr q11, [x14, #0x30]\n"
"mov v14.16b, v10.16b\n"
"mov v15.16b, v11.16b\n"
- "add x15, x15, #0x40\n"
+ "add x14, x14, #0x40\n"
"b 49f\n"
"38:" // Height 2: no bias
"tbz %x[flags], #0, 48f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "cmp x14, #0x10\n"
- "add x25, x13, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "cmp x13, #0x10\n"
+ "add x24, x12, x19, LSL #2\n"
"bge 47f\n"
- "tbz x14, #3, 42f\n"
- "ld1 { v8.4s }, [x13], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v9.4s }, [x13], #0x10\n"
- "ld1 { v13.4s }, [x25], #0x10\n"
- "tbz x14, #2, 40f\n"
- "ld1 { v10.4s }, [x13], #0x10\n"
- "ld1 { v14.4s }, [x25], #0x10\n"
- "tbz x14, #1, 39f\n"
- "ldr d11, [x13], #0x8\n"
- "ldr d15, [x25], #0x8\n"
- "mov x20, #0x38\n"
- "tbz x14, #0, 46f\n"
- "ld1 { v11.s }[2], [x13]\n"
- "ld1 { v15.s }[2], [x25]\n"
+ "tbz x13, #3, 42f\n"
+ "ld1 { v8.4s }, [x12], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
+ "ld1 { v9.4s }, [x12], #0x10\n"
+ "ld1 { v13.4s }, [x24], #0x10\n"
+ "tbz x13, #2, 40f\n"
+ "ld1 { v10.4s }, [x12], #0x10\n"
+ "ld1 { v14.4s }, [x24], #0x10\n"
+ "tbz x13, #1, 39f\n"
+ "ldr d11, [x12], #0x8\n"
+ "ldr d15, [x24], #0x8\n"
+ "mov x19, #0x38\n"
+ "tbz x13, #0, 46f\n"
+ "ld1 { v11.s }[2], [x12]\n"
+ "ld1 { v15.s }[2], [x24]\n"
"b 46f\n"
"39:" // Height 2: Partial accumulate: partial_1_12
- "mov x20, #0x30\n"
- "tbz x14, #0, 46f\n"
- "ldr s11, [x13, #0x0]\n"
- "ldr s15, [x25, #0x0]\n"
+ "mov x19, #0x30\n"
+ "tbz x13, #0, 46f\n"
+ "ldr s11, [x12, #0x0]\n"
+ "ldr s15, [x24, #0x0]\n"
"b 46f\n"
"40:" // Height 2: Partial accumulate: partial_2_8
- "tbz x14, #1, 41f\n"
- "ldr d10, [x13], #0x8\n"
- "ldr d14, [x25], #0x8\n"
- "mov x20, #0x28\n"
- "tbz x14, #0, 46f\n"
- "ld1 { v10.s }[2], [x13]\n"
- "ld1 { v14.s }[2], [x25]\n"
+ "tbz x13, #1, 41f\n"
+ "ldr d10, [x12], #0x8\n"
+ "ldr d14, [x24], #0x8\n"
+ "mov x19, #0x28\n"
+ "tbz x13, #0, 46f\n"
+ "ld1 { v10.s }[2], [x12]\n"
+ "ld1 { v14.s }[2], [x24]\n"
"b 46f\n"
"41:" // Height 2: Partial accumulate: partial_1_8
- "mov x20, #0x20\n"
- "tbz x14, #0, 46f\n"
- "ldr s10, [x13, #0x0]\n"
- "ldr s14, [x25, #0x0]\n"
+ "mov x19, #0x20\n"
+ "tbz x13, #0, 46f\n"
+ "ldr s10, [x12, #0x0]\n"
+ "ldr s14, [x24, #0x0]\n"
"b 46f\n"
"42:" // Height 2: Partial accumulate: partial_4_0
- "tbz x14, #2, 44f\n"
- "ld1 { v8.4s }, [x13], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "tbz x14, #1, 43f\n"
- "ldr d9, [x13], #0x8\n"
- "ldr d13, [x25], #0x8\n"
- "mov x20, #0x18\n"
- "tbz x14, #0, 46f\n"
- "ld1 { v9.s }[2], [x13]\n"
- "ld1 { v13.s }[2], [x25]\n"
+ "tbz x13, #2, 44f\n"
+ "ld1 { v8.4s }, [x12], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
+ "tbz x13, #1, 43f\n"
+ "ldr d9, [x12], #0x8\n"
+ "ldr d13, [x24], #0x8\n"
+ "mov x19, #0x18\n"
+ "tbz x13, #0, 46f\n"
+ "ld1 { v9.s }[2], [x12]\n"
+ "ld1 { v13.s }[2], [x24]\n"
"b 46f\n"
"43:" // Height 2: Partial accumulate: partial_1_4
- "mov x20, #0x10\n"
- "tbz x14, #0, 46f\n"
- "ldr s9, [x13, #0x0]\n"
- "ldr s13, [x25, #0x0]\n"
+ "mov x19, #0x10\n"
+ "tbz x13, #0, 46f\n"
+ "ldr s9, [x12, #0x0]\n"
+ "ldr s13, [x24, #0x0]\n"
"b 46f\n"
"44:" // Height 2: Partial accumulate: partial_2_0
- "tbz x14, #1, 45f\n"
- "ldr d8, [x13], #0x8\n"
- "ldr d12, [x25], #0x8\n"
- "mov x20, #0x8\n"
- "tbz x14, #0, 46f\n"
- "ld1 { v8.s }[2], [x13]\n"
- "ld1 { v12.s }[2], [x25]\n"
+ "tbz x13, #1, 45f\n"
+ "ldr d8, [x12], #0x8\n"
+ "ldr d12, [x24], #0x8\n"
+ "mov x19, #0x8\n"
+ "tbz x13, #0, 46f\n"
+ "ld1 { v8.s }[2], [x12]\n"
+ "ld1 { v12.s }[2], [x24]\n"
"b 46f\n"
"45:" // Height 2: Partial accumulate: partial_1_0
- "ldr s8, [x13, #0x0]\n"
- "ldr s12, [x25, #0x0]\n"
- "mov x20, #0x0\n"
+ "ldr s8, [x12, #0x0]\n"
+ "ldr s12, [x24, #0x0]\n"
+ "mov x19, #0x0\n"
"46:" // Height 2: Partial accumulate: Done
- "sub x13, x13, x20\n"
+ "sub x12, x12, x19\n"
"b 49f\n"
"47:" // Height 2: full accumulate
- "ldr q8, [x13, #0x0]\n"
- "ldr q9, [x13, #0x10]\n"
- "ldr q10, [x13, #0x20]\n"
- "ldr q11, [x13, #0x30]\n"
- "ldr q12, [x25, #0x0]\n"
- "ldr q13, [x25, #0x10]\n"
- "ldr q14, [x25, #0x20]\n"
- "ldr q15, [x25, #0x30]\n"
+ "ldr q8, [x12, #0x0]\n"
+ "ldr q9, [x12, #0x10]\n"
+ "ldr q10, [x12, #0x20]\n"
+ "ldr q11, [x12, #0x30]\n"
+ "ldr q12, [x24, #0x0]\n"
+ "ldr q13, [x24, #0x10]\n"
+ "ldr q14, [x24, #0x20]\n"
+ "ldr q15, [x24, #0x30]\n"
"b 49f\n"
"48:" // Height 2: no accumulate
"movi v8.16b, #0x0\n"
@@ -534,182 +534,182 @@ void a64_ffhybrid_fp32_mla_6x16 (
"movi v14.16b, #0x0\n"
"movi v15.16b, #0x0\n"
"49:" // Height 2: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"50:" // Height 2: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 51f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "cbnz x28, 52f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #2\n"
- "add x25, x25, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "cbnz x27, 52f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #2\n"
+ "add x24, x24, x19, LSL #2\n"
"b 52f\n"
"51:" // Height 2: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #2\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #2\n"
"52:" // Height 2: input setup done
- "cmp x27, #0x4\n"
+ "cmp x26, #0x4\n"
"blt 55f\n"
- "ldr q0, [x26, #0x0]\n"
- "ldr q1, [x25, #0x0]\n"
- "cmp x27, #0x8\n"
- "ldr q6, [x12, #0x0]\n"
- "ldr q7, [x11, #0x0]\n"
+ "ldr q0, [x25, #0x0]\n"
+ "ldr q1, [x24, #0x0]\n"
+ "cmp x26, #0x8\n"
+ "ldr q6, [x11, #0x0]\n"
+ "ldr q7, [x10, #0x0]\n"
"blt 54f\n"
"53:" // Height 2: Multiply loop: Main loop head
"fmla v8.4s, v6.4s, v0.s[0]\n"
"fmla v12.4s, v6.4s, v1.s[0]\n"
- "ldr q6, [x10, #0x0]\n"
- "sub x27, x27, #0x4\n"
+ "ldr q6, [x9, #0x0]\n"
+ "sub x26, x26, #0x4\n"
"fmla v9.4s, v7.4s, v0.s[0]\n"
"fmla v13.4s, v7.4s, v1.s[0]\n"
- "ldr q7, [x9, #0x0]\n"
- "cmp x27, #0x8\n"
+ "ldr q7, [x28, #0x0]\n"
+ "cmp x26, #0x8\n"
"fmla v10.4s, v6.4s, v0.s[0]\n"
"fmla v14.4s, v6.4s, v1.s[0]\n"
- "ldr q6, [x12, #0x10]\n"
- "add x26, x26, #0x10\n"
+ "ldr q6, [x11, #0x10]\n"
+ "add x25, x25, #0x10\n"
"fmla v11.4s, v7.4s, v0.s[0]\n"
"fmla v15.4s, v7.4s, v1.s[0]\n"
- "ldr q7, [x11, #0x10]\n"
- "add x25, x25, #0x10\n"
+ "ldr q7, [x10, #0x10]\n"
+ "add x24, x24, #0x10\n"
"fmla v8.4s, v6.4s, v0.s[1]\n"
"fmla v12.4s, v6.4s, v1.s[1]\n"
- "ldr q6, [x10, #0x10]\n"
+ "ldr q6, [x9, #0x10]\n"
"fmla v9.4s, v7.4s, v0.s[1]\n"
"fmla v13.4s, v7.4s, v1.s[1]\n"
- "ldr q7, [x9, #0x10]\n"
+ "ldr q7, [x28, #0x10]\n"
"fmla v10.4s, v6.4s, v0.s[1]\n"
"fmla v14.4s, v6.4s, v1.s[1]\n"
- "ldr q6, [x12, #0x20]\n"
+ "ldr q6, [x11, #0x20]\n"
"fmla v11.4s, v7.4s, v0.s[1]\n"
"fmla v15.4s, v7.4s, v1.s[1]\n"
- "ldr q7, [x11, #0x20]\n"
+ "ldr q7, [x10, #0x20]\n"
"fmla v8.4s, v6.4s, v0.s[2]\n"
"fmla v12.4s, v6.4s, v1.s[2]\n"
- "ldr q6, [x10, #0x20]\n"
+ "ldr q6, [x9, #0x20]\n"
"fmla v9.4s, v7.4s, v0.s[2]\n"
"fmla v13.4s, v7.4s, v1.s[2]\n"
- "ldr q7, [x9, #0x20]\n"
+ "ldr q7, [x28, #0x20]\n"
"fmla v10.4s, v6.4s, v0.s[2]\n"
"fmla v14.4s, v6.4s, v1.s[2]\n"
- "ldr q6, [x12, #0x30]\n"
- "add x12, x12, #0x40\n"
+ "ldr q6, [x11, #0x30]\n"
+ "add x11, x11, #0x40\n"
"fmla v11.4s, v7.4s, v0.s[2]\n"
"fmla v15.4s, v7.4s, v1.s[2]\n"
- "ldr q7, [x11, #0x30]\n"
- "add x11, x11, #0x40\n"
+ "ldr q7, [x10, #0x30]\n"
+ "add x10, x10, #0x40\n"
"fmla v8.4s, v6.4s, v0.s[3]\n"
"fmla v12.4s, v6.4s, v1.s[3]\n"
- "ldr q6, [x10, #0x30]\n"
- "add x10, x10, #0x40\n"
+ "ldr q6, [x9, #0x30]\n"
+ "add x9, x9, #0x40\n"
"fmla v9.4s, v7.4s, v0.s[3]\n"
"fmla v13.4s, v7.4s, v1.s[3]\n"
- "ldr q7, [x9, #0x30]\n"
- "add x9, x9, #0x40\n"
+ "ldr q7, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
"fmla v10.4s, v6.4s, v0.s[3]\n"
"fmla v14.4s, v6.4s, v1.s[3]\n"
- "ldr q6, [x12, #0x0]\n"
+ "ldr q6, [x11, #0x0]\n"
"fmla v11.4s, v7.4s, v0.s[3]\n"
- "ldr q0, [x26, #0x0]\n"
+ "ldr q0, [x25, #0x0]\n"
"fmla v15.4s, v7.4s, v1.s[3]\n"
- "ldr q1, [x25, #0x0]\n"
- "ldr q7, [x11, #0x0]\n"
+ "ldr q1, [x24, #0x0]\n"
+ "ldr q7, [x10, #0x0]\n"
"bge 53b\n"
"54:" // Height 2: Multiply loop: Single iteration only
"fmla v8.4s, v6.4s, v0.s[0]\n"
"fmla v12.4s, v6.4s, v1.s[0]\n"
- "ldr q6, [x10, #0x0]\n"
- "sub x27, x27, #0x4\n"
+ "ldr q6, [x9, #0x0]\n"
+ "sub x26, x26, #0x4\n"
"fmla v9.4s, v7.4s, v0.s[0]\n"
"fmla v13.4s, v7.4s, v1.s[0]\n"
- "ldr q7, [x9, #0x0]\n"
- "add x26, x26, #0x10\n"
+ "ldr q7, [x28, #0x0]\n"
+ "add x25, x25, #0x10\n"
"fmla v10.4s, v6.4s, v0.s[0]\n"
"fmla v14.4s, v6.4s, v1.s[0]\n"
- "ldr q6, [x12, #0x10]\n"
- "add x25, x25, #0x10\n"
+ "ldr q6, [x11, #0x10]\n"
+ "add x24, x24, #0x10\n"
"fmla v11.4s, v7.4s, v0.s[0]\n"
"fmla v15.4s, v7.4s, v1.s[0]\n"
- "ldr q7, [x11, #0x10]\n"
+ "ldr q7, [x10, #0x10]\n"
"fmla v8.4s, v6.4s, v0.s[1]\n"
"fmla v12.4s, v6.4s, v1.s[1]\n"
- "ldr q6, [x10, #0x10]\n"
+ "ldr q6, [x9, #0x10]\n"
"fmla v9.4s, v7.4s, v0.s[1]\n"
"fmla v13.4s, v7.4s, v1.s[1]\n"
- "ldr q7, [x9, #0x10]\n"
+ "ldr q7, [x28, #0x10]\n"
"fmla v10.4s, v6.4s, v0.s[1]\n"
"fmla v14.4s, v6.4s, v1.s[1]\n"
- "ldr q6, [x12, #0x20]\n"
+ "ldr q6, [x11, #0x20]\n"
"fmla v11.4s, v7.4s, v0.s[1]\n"
"fmla v15.4s, v7.4s, v1.s[1]\n"
- "ldr q7, [x11, #0x20]\n"
+ "ldr q7, [x10, #0x20]\n"
"fmla v8.4s, v6.4s, v0.s[2]\n"
"fmla v12.4s, v6.4s, v1.s[2]\n"
- "ldr q6, [x10, #0x20]\n"
+ "ldr q6, [x9, #0x20]\n"
"fmla v9.4s, v7.4s, v0.s[2]\n"
"fmla v13.4s, v7.4s, v1.s[2]\n"
- "ldr q7, [x9, #0x20]\n"
+ "ldr q7, [x28, #0x20]\n"
"fmla v10.4s, v6.4s, v0.s[2]\n"
"fmla v14.4s, v6.4s, v1.s[2]\n"
- "ldr q6, [x12, #0x30]\n"
- "add x12, x12, #0x40\n"
+ "ldr q6, [x11, #0x30]\n"
+ "add x11, x11, #0x40\n"
"fmla v11.4s, v7.4s, v0.s[2]\n"
"fmla v15.4s, v7.4s, v1.s[2]\n"
- "ldr q7, [x11, #0x30]\n"
- "add x11, x11, #0x40\n"
+ "ldr q7, [x10, #0x30]\n"
+ "add x10, x10, #0x40\n"
"fmla v8.4s, v6.4s, v0.s[3]\n"
"fmla v12.4s, v6.4s, v1.s[3]\n"
- "ldr q6, [x10, #0x30]\n"
- "add x10, x10, #0x40\n"
+ "ldr q6, [x9, #0x30]\n"
+ "add x9, x9, #0x40\n"
"fmla v9.4s, v7.4s, v0.s[3]\n"
"fmla v13.4s, v7.4s, v1.s[3]\n"
- "ldr q7, [x9, #0x30]\n"
- "add x9, x9, #0x40\n"
+ "ldr q7, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
"fmla v10.4s, v6.4s, v0.s[3]\n"
"fmla v14.4s, v6.4s, v1.s[3]\n"
"fmla v11.4s, v7.4s, v0.s[3]\n"
"fmla v15.4s, v7.4s, v1.s[3]\n"
"55:" // Height 2: Multiply loop: Main loop skip
- "cbz x27, 57f\n"
+ "cbz x26, 57f\n"
"56:" // Height 2: Multiply loop: Odd block loop
- "ldr s0, [x26], #0x4\n"
- "ldr s1, [x25], #0x4\n"
- "sub x27, x27, #0x1\n"
- "ldr q6, [x12, #0x0]\n"
- "ldr q7, [x11, #0x0]\n"
+ "ldr s0, [x25], #0x4\n"
+ "ldr s1, [x24], #0x4\n"
+ "sub x26, x26, #0x1\n"
+ "ldr q6, [x11, #0x0]\n"
+ "ldr q7, [x10, #0x0]\n"
"fmla v8.4s, v6.4s, v0.s[0]\n"
"fmla v12.4s, v6.4s, v1.s[0]\n"
- "ldr q6, [x10, #0x0]\n"
+ "ldr q6, [x9, #0x0]\n"
"fmla v9.4s, v7.4s, v0.s[0]\n"
"fmla v13.4s, v7.4s, v1.s[0]\n"
- "ldr q7, [x9, #0x0]\n"
+ "ldr q7, [x28, #0x0]\n"
"fmla v10.4s, v6.4s, v0.s[0]\n"
"fmla v14.4s, v6.4s, v1.s[0]\n"
- "add x12, x12, #0x10\n"
"add x11, x11, #0x10\n"
+ "add x10, x10, #0x10\n"
"fmla v11.4s, v7.4s, v0.s[0]\n"
"fmla v15.4s, v7.4s, v1.s[0]\n"
- "add x10, x10, #0x10\n"
"add x9, x9, #0x10\n"
- "cbnz x27, 56b\n"
+ "add x28, x28, #0x10\n"
+ "cbnz x26, 56b\n"
"57:" // Height 2: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 50b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #2\n"
"tbz %x[flags], #1, 58f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v1.4s }, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1r { v0.4s }, [x20]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v1.4s }, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v0.4s }, [x19]\n"
"fmin v8.4s, v8.4s, v1.4s\n"
"fmin v9.4s, v9.4s, v1.4s\n"
"fmin v10.4s, v10.4s, v1.4s\n"
@@ -727,230 +727,230 @@ void a64_ffhybrid_fp32_mla_6x16 (
"fmax v14.4s, v14.4s, v0.4s\n"
"fmax v15.4s, v15.4s, v0.4s\n"
"58:" // Height 2: No activation
- "cmp x14, #0x10\n"
+ "cmp x13, #0x10\n"
"bge 67f\n"
- "tbz x14, #3, 62f\n"
- "st1 { v8.4s }, [x13], #0x10\n"
- "st1 { v9.4s }, [x13], #0x10\n"
- "st1 { v12.4s }, [x25], #0x10\n"
- "st1 { v13.4s }, [x25], #0x10\n"
- "tbz x14, #2, 60f\n"
- "st1 { v10.4s }, [x13], #0x10\n"
- "st1 { v14.4s }, [x25], #0x10\n"
- "tbz x14, #1, 59f\n"
- "str d11, [x13], #0x8\n"
- "str d15, [x25], #0x8\n"
- "tbz x14, #0, 66f\n"
- "st1 { v11.s }[2], [x13]\n"
- "st1 { v15.s }[2], [x25]\n"
+ "tbz x13, #3, 62f\n"
+ "st1 { v8.4s }, [x12], #0x10\n"
+ "st1 { v9.4s }, [x12], #0x10\n"
+ "st1 { v12.4s }, [x24], #0x10\n"
+ "st1 { v13.4s }, [x24], #0x10\n"
+ "tbz x13, #2, 60f\n"
+ "st1 { v10.4s }, [x12], #0x10\n"
+ "st1 { v14.4s }, [x24], #0x10\n"
+ "tbz x13, #1, 59f\n"
+ "str d11, [x12], #0x8\n"
+ "str d15, [x24], #0x8\n"
+ "tbz x13, #0, 66f\n"
+ "st1 { v11.s }[2], [x12]\n"
+ "st1 { v15.s }[2], [x24]\n"
"b 66f\n"
"59:" // Height 2: Partial direct writeback: partial_1_12
- "tbz x14, #0, 66f\n"
- "str s11, [x13, #0x0]\n"
- "str s15, [x25, #0x0]\n"
+ "tbz x13, #0, 66f\n"
+ "str s11, [x12, #0x0]\n"
+ "str s15, [x24, #0x0]\n"
"b 66f\n"
"60:" // Height 2: Partial direct writeback: partial_2_8
- "tbz x14, #1, 61f\n"
- "str d10, [x13], #0x8\n"
- "str d14, [x25], #0x8\n"
- "tbz x14, #0, 66f\n"
- "st1 { v10.s }[2], [x13]\n"
- "st1 { v14.s }[2], [x25]\n"
+ "tbz x13, #1, 61f\n"
+ "str d10, [x12], #0x8\n"
+ "str d14, [x24], #0x8\n"
+ "tbz x13, #0, 66f\n"
+ "st1 { v10.s }[2], [x12]\n"
+ "st1 { v14.s }[2], [x24]\n"
"b 66f\n"
"61:" // Height 2: Partial direct writeback: partial_1_8
- "tbz x14, #0, 66f\n"
- "str s10, [x13, #0x0]\n"
- "str s14, [x25, #0x0]\n"
+ "tbz x13, #0, 66f\n"
+ "str s10, [x12, #0x0]\n"
+ "str s14, [x24, #0x0]\n"
"b 66f\n"
"62:" // Height 2: Partial direct writeback: partial_4_0
- "tbz x14, #2, 64f\n"
- "st1 { v8.4s }, [x13], #0x10\n"
- "st1 { v12.4s }, [x25], #0x10\n"
- "tbz x14, #1, 63f\n"
- "str d9, [x13], #0x8\n"
- "str d13, [x25], #0x8\n"
- "tbz x14, #0, 66f\n"
- "st1 { v9.s }[2], [x13]\n"
- "st1 { v13.s }[2], [x25]\n"
+ "tbz x13, #2, 64f\n"
+ "st1 { v8.4s }, [x12], #0x10\n"
+ "st1 { v12.4s }, [x24], #0x10\n"
+ "tbz x13, #1, 63f\n"
+ "str d9, [x12], #0x8\n"
+ "str d13, [x24], #0x8\n"
+ "tbz x13, #0, 66f\n"
+ "st1 { v9.s }[2], [x12]\n"
+ "st1 { v13.s }[2], [x24]\n"
"b 66f\n"
"63:" // Height 2: Partial direct writeback: partial_1_4
- "tbz x14, #0, 66f\n"
- "str s9, [x13, #0x0]\n"
- "str s13, [x25, #0x0]\n"
+ "tbz x13, #0, 66f\n"
+ "str s9, [x12, #0x0]\n"
+ "str s13, [x24, #0x0]\n"
"b 66f\n"
"64:" // Height 2: Partial direct writeback: partial_2_0
- "tbz x14, #1, 65f\n"
- "str d8, [x13], #0x8\n"
- "str d12, [x25], #0x8\n"
- "tbz x14, #0, 66f\n"
- "st1 { v8.s }[2], [x13]\n"
- "st1 { v12.s }[2], [x25]\n"
+ "tbz x13, #1, 65f\n"
+ "str d8, [x12], #0x8\n"
+ "str d12, [x24], #0x8\n"
+ "tbz x13, #0, 66f\n"
+ "st1 { v8.s }[2], [x12]\n"
+ "st1 { v12.s }[2], [x24]\n"
"b 66f\n"
"65:" // Height 2: Partial direct writeback: partial_1_0
- "str s8, [x13, #0x0]\n"
- "str s12, [x25, #0x0]\n"
+ "str s8, [x12, #0x0]\n"
+ "str s12, [x24, #0x0]\n"
"66:" // Height 2: Partial direct writeback: Done
"b 68f\n"
"67:" // Height 2: Full writeback
- "str q8, [x13, #0x0]\n"
- "str q9, [x13, #0x10]\n"
- "str q10, [x13, #0x20]\n"
- "str q11, [x13, #0x30]\n"
- "add x13, x13, #0x40\n"
- "str q12, [x25, #0x0]\n"
- "str q13, [x25, #0x10]\n"
- "str q14, [x25, #0x20]\n"
- "str q15, [x25, #0x30]\n"
+ "str q8, [x12, #0x0]\n"
+ "str q9, [x12, #0x10]\n"
+ "str q10, [x12, #0x20]\n"
+ "str q11, [x12, #0x30]\n"
+ "add x12, x12, #0x40\n"
+ "str q12, [x24, #0x0]\n"
+ "str q13, [x24, #0x10]\n"
+ "str q14, [x24, #0x20]\n"
+ "str q15, [x24, #0x30]\n"
"68:" // Height 2: Writeback done
- "subs x14, x14, #0x10\n"
+ "subs x13, x13, #0x10\n"
"bgt 36b\n"
"b 206f\n"
"69:" // Height 3
- "ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x15, %x[bias]\n"
- "ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x13, %x[output_ptr]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x14, %x[bias]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x12, %x[output_ptr]\n"
"70:" // Height 3: Column loop
- "ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
- "add x11, x12, x20, LSL #2\n"
- "add x10, x11, x20, LSL #2\n"
- "add x9, x10, x20, LSL #2\n"
- "add x20, x9, x20, LSL #2\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "cmp x14, #0xc\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #2\n"
+ "add x9, x10, x19, LSL #2\n"
+ "add x28, x9, x19, LSL #2\n"
+ "add x19, x28, x19, LSL #2\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "cmp x13, #0xc\n"
"bgt 71f\n"
- "cmp x14, #0x8\n"
- "mov x9, x12\n"
+ "cmp x13, #0x8\n"
+ "mov x28, x11\n"
"bgt 71f\n"
- "cmp x14, #0x4\n"
- "mov x10, x12\n"
+ "cmp x13, #0x4\n"
+ "mov x9, x11\n"
"bgt 71f\n"
- "mov x11, x12\n"
+ "mov x10, x11\n"
"71:" // Height 3: B setup done
- "cbz x15, 72f\n"
- "ldr q8, [x15, #0x0]\n"
- "ldr q9, [x15, #0x10]\n"
+ "cbz x14, 72f\n"
+ "ldr q8, [x14, #0x0]\n"
+ "ldr q9, [x14, #0x10]\n"
"mov v12.16b, v8.16b\n"
"mov v13.16b, v9.16b\n"
- "ldr q10, [x15, #0x20]\n"
- "ldr q11, [x15, #0x30]\n"
+ "ldr q10, [x14, #0x20]\n"
+ "ldr q11, [x14, #0x30]\n"
"mov v14.16b, v10.16b\n"
"mov v15.16b, v11.16b\n"
"mov v16.16b, v8.16b\n"
"mov v17.16b, v9.16b\n"
- "add x15, x15, #0x40\n"
+ "add x14, x14, #0x40\n"
"mov v18.16b, v10.16b\n"
"mov v19.16b, v11.16b\n"
"b 83f\n"
"72:" // Height 3: no bias
"tbz %x[flags], #0, 82f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #2\n"
- "cmp x14, #0x10\n"
- "add x24, x25, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #2\n"
+ "cmp x13, #0x10\n"
+ "add x23, x24, x19, LSL #2\n"
"bge 81f\n"
- "tbz x14, #3, 76f\n"
- "ld1 { v8.4s }, [x13], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v16.4s }, [x24], #0x10\n"
- "ld1 { v9.4s }, [x13], #0x10\n"
- "ld1 { v13.4s }, [x25], #0x10\n"
- "ld1 { v17.4s }, [x24], #0x10\n"
- "tbz x14, #2, 74f\n"
- "ld1 { v10.4s }, [x13], #0x10\n"
- "ld1 { v14.4s }, [x25], #0x10\n"
- "ld1 { v18.4s }, [x24], #0x10\n"
- "tbz x14, #1, 73f\n"
- "ldr d11, [x13], #0x8\n"
- "ldr d15, [x25], #0x8\n"
- "mov x20, #0x38\n"
- "ldr d19, [x24], #0x8\n"
- "tbz x14, #0, 80f\n"
- "ld1 { v11.s }[2], [x13]\n"
- "ld1 { v15.s }[2], [x25]\n"
- "ld1 { v19.s }[2], [x24]\n"
+ "tbz x13, #3, 76f\n"
+ "ld1 { v8.4s }, [x12], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
+ "ld1 { v16.4s }, [x23], #0x10\n"
+ "ld1 { v9.4s }, [x12], #0x10\n"
+ "ld1 { v13.4s }, [x24], #0x10\n"
+ "ld1 { v17.4s }, [x23], #0x10\n"
+ "tbz x13, #2, 74f\n"
+ "ld1 { v10.4s }, [x12], #0x10\n"
+ "ld1 { v14.4s }, [x24], #0x10\n"
+ "ld1 { v18.4s }, [x23], #0x10\n"
+ "tbz x13, #1, 73f\n"
+ "ldr d11, [x12], #0x8\n"
+ "ldr d15, [x24], #0x8\n"
+ "mov x19, #0x38\n"
+ "ldr d19, [x23], #0x8\n"
+ "tbz x13, #0, 80f\n"
+ "ld1 { v11.s }[2], [x12]\n"
+ "ld1 { v15.s }[2], [x24]\n"
+ "ld1 { v19.s }[2], [x23]\n"
"b 80f\n"
"73:" // Height 3: Partial accumulate: partial_1_12
- "mov x20, #0x30\n"
- "tbz x14, #0, 80f\n"
- "ldr s11, [x13, #0x0]\n"
- "ldr s15, [x25, #0x0]\n"
- "ldr s19, [x24, #0x0]\n"
+ "mov x19, #0x30\n"
+ "tbz x13, #0, 80f\n"
+ "ldr s11, [x12, #0x0]\n"
+ "ldr s15, [x24, #0x0]\n"
+ "ldr s19, [x23, #0x0]\n"
"b 80f\n"
"74:" // Height 3: Partial accumulate: partial_2_8
- "tbz x14, #1, 75f\n"
- "ldr d10, [x13], #0x8\n"
- "ldr d14, [x25], #0x8\n"
- "mov x20, #0x28\n"
- "ldr d18, [x24], #0x8\n"
- "tbz x14, #0, 80f\n"
- "ld1 { v10.s }[2], [x13]\n"
- "ld1 { v14.s }[2], [x25]\n"
- "ld1 { v18.s }[2], [x24]\n"
+ "tbz x13, #1, 75f\n"
+ "ldr d10, [x12], #0x8\n"
+ "ldr d14, [x24], #0x8\n"
+ "mov x19, #0x28\n"
+ "ldr d18, [x23], #0x8\n"
+ "tbz x13, #0, 80f\n"
+ "ld1 { v10.s }[2], [x12]\n"
+ "ld1 { v14.s }[2], [x24]\n"
+ "ld1 { v18.s }[2], [x23]\n"
"b 80f\n"
"75:" // Height 3: Partial accumulate: partial_1_8
- "mov x20, #0x20\n"
- "tbz x14, #0, 80f\n"
- "ldr s10, [x13, #0x0]\n"
- "ldr s14, [x25, #0x0]\n"
- "ldr s18, [x24, #0x0]\n"
+ "mov x19, #0x20\n"
+ "tbz x13, #0, 80f\n"
+ "ldr s10, [x12, #0x0]\n"
+ "ldr s14, [x24, #0x0]\n"
+ "ldr s18, [x23, #0x0]\n"
"b 80f\n"
"76:" // Height 3: Partial accumulate: partial_4_0
- "tbz x14, #2, 78f\n"
- "ld1 { v8.4s }, [x13], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v16.4s }, [x24], #0x10\n"
- "tbz x14, #1, 77f\n"
- "ldr d9, [x13], #0x8\n"
- "ldr d13, [x25], #0x8\n"
- "mov x20, #0x18\n"
- "ldr d17, [x24], #0x8\n"
- "tbz x14, #0, 80f\n"
- "ld1 { v9.s }[2], [x13]\n"
- "ld1 { v13.s }[2], [x25]\n"
- "ld1 { v17.s }[2], [x24]\n"
+ "tbz x13, #2, 78f\n"
+ "ld1 { v8.4s }, [x12], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
+ "ld1 { v16.4s }, [x23], #0x10\n"
+ "tbz x13, #1, 77f\n"
+ "ldr d9, [x12], #0x8\n"
+ "ldr d13, [x24], #0x8\n"
+ "mov x19, #0x18\n"
+ "ldr d17, [x23], #0x8\n"
+ "tbz x13, #0, 80f\n"
+ "ld1 { v9.s }[2], [x12]\n"
+ "ld1 { v13.s }[2], [x24]\n"
+ "ld1 { v17.s }[2], [x23]\n"
"b 80f\n"
"77:" // Height 3: Partial accumulate: partial_1_4
- "mov x20, #0x10\n"
- "tbz x14, #0, 80f\n"
- "ldr s9, [x13, #0x0]\n"
- "ldr s13, [x25, #0x0]\n"
- "ldr s17, [x24, #0x0]\n"
+ "mov x19, #0x10\n"
+ "tbz x13, #0, 80f\n"
+ "ldr s9, [x12, #0x0]\n"
+ "ldr s13, [x24, #0x0]\n"
+ "ldr s17, [x23, #0x0]\n"
"b 80f\n"
"78:" // Height 3: Partial accumulate: partial_2_0
- "tbz x14, #1, 79f\n"
- "ldr d8, [x13], #0x8\n"
- "ldr d12, [x25], #0x8\n"
- "mov x20, #0x8\n"
- "ldr d16, [x24], #0x8\n"
- "tbz x14, #0, 80f\n"
- "ld1 { v8.s }[2], [x13]\n"
- "ld1 { v12.s }[2], [x25]\n"
- "ld1 { v16.s }[2], [x24]\n"
+ "tbz x13, #1, 79f\n"
+ "ldr d8, [x12], #0x8\n"
+ "ldr d12, [x24], #0x8\n"
+ "mov x19, #0x8\n"
+ "ldr d16, [x23], #0x8\n"
+ "tbz x13, #0, 80f\n"
+ "ld1 { v8.s }[2], [x12]\n"
+ "ld1 { v12.s }[2], [x24]\n"
+ "ld1 { v16.s }[2], [x23]\n"
"b 80f\n"
"79:" // Height 3: Partial accumulate: partial_1_0
- "ldr s8, [x13, #0x0]\n"
- "ldr s12, [x25, #0x0]\n"
- "mov x20, #0x0\n"
- "ldr s16, [x24, #0x0]\n"
+ "ldr s8, [x12, #0x0]\n"
+ "ldr s12, [x24, #0x0]\n"
+ "mov x19, #0x0\n"
+ "ldr s16, [x23, #0x0]\n"
"80:" // Height 3: Partial accumulate: Done
- "sub x13, x13, x20\n"
+ "sub x12, x12, x19\n"
"b 83f\n"
"81:" // Height 3: full accumulate
- "ldr q8, [x13, #0x0]\n"
- "ldr q9, [x13, #0x10]\n"
- "ldr q10, [x13, #0x20]\n"
- "ldr q11, [x13, #0x30]\n"
- "ldr q12, [x25, #0x0]\n"
- "ldr q13, [x25, #0x10]\n"
- "ldr q14, [x25, #0x20]\n"
- "ldr q15, [x25, #0x30]\n"
- "ldr q16, [x24, #0x0]\n"
- "ldr q17, [x24, #0x10]\n"
- "ldr q18, [x24, #0x20]\n"
- "ldr q19, [x24, #0x30]\n"
+ "ldr q8, [x12, #0x0]\n"
+ "ldr q9, [x12, #0x10]\n"
+ "ldr q10, [x12, #0x20]\n"
+ "ldr q11, [x12, #0x30]\n"
+ "ldr q12, [x24, #0x0]\n"
+ "ldr q13, [x24, #0x10]\n"
+ "ldr q14, [x24, #0x20]\n"
+ "ldr q15, [x24, #0x30]\n"
+ "ldr q16, [x23, #0x0]\n"
+ "ldr q17, [x23, #0x10]\n"
+ "ldr q18, [x23, #0x20]\n"
+ "ldr q19, [x23, #0x30]\n"
"b 83f\n"
"82:" // Height 3: no accumulate
"movi v8.16b, #0x0\n"
@@ -966,180 +966,180 @@ void a64_ffhybrid_fp32_mla_6x16 (
"movi v18.16b, #0x0\n"
"movi v19.16b, #0x0\n"
"83:" // Height 3: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"84:" // Height 3: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 85f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "cbnz x28, 86f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #2\n"
- "add x25, x25, x20, LSL #2\n"
- "add x24, x24, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "cbnz x27, 86f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #2\n"
+ "add x24, x24, x19, LSL #2\n"
+ "add x23, x23, x19, LSL #2\n"
"b 86f\n"
"85:" // Height 3: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
"86:" // Height 3: input setup done
- "cmp x27, #0x4\n"
+ "cmp x26, #0x4\n"
"blt 89f\n"
- "ldr q0, [x26, #0x0]\n"
- "ldr q1, [x25, #0x0]\n"
- "cmp x27, #0x8\n"
- "ldr q2, [x24, #0x0]\n"
- "ldr q6, [x12, #0x0]\n"
- "ldr q7, [x11, #0x0]\n"
+ "ldr q0, [x25, #0x0]\n"
+ "ldr q1, [x24, #0x0]\n"
+ "cmp x26, #0x8\n"
+ "ldr q2, [x23, #0x0]\n"
+ "ldr q6, [x11, #0x0]\n"
+ "ldr q7, [x10, #0x0]\n"
"blt 88f\n"
"87:" // Height 3: Multiply loop: Main loop head
"fmla v8.4s, v6.4s, v0.s[0]\n"
"fmla v12.4s, v6.4s, v1.s[0]\n"
- "sub x27, x27, #0x4\n"
- "cmp x27, #0x8\n"
+ "sub x26, x26, #0x4\n"
+ "cmp x26, #0x8\n"
"fmla v16.4s, v6.4s, v2.s[0]\n"
- "ldr q6, [x10, #0x0]\n"
+ "ldr q6, [x9, #0x0]\n"
"fmla v9.4s, v7.4s, v0.s[0]\n"
- "add x26, x26, #0x10\n"
+ "add x25, x25, #0x10\n"
"fmla v13.4s, v7.4s, v1.s[0]\n"
"fmla v17.4s, v7.4s, v2.s[0]\n"
- "ldr q7, [x9, #0x0]\n"
- "add x25, x25, #0x10\n"
+ "ldr q7, [x28, #0x0]\n"
+ "add x24, x24, #0x10\n"
"fmla v10.4s, v6.4s, v0.s[0]\n"
"fmla v14.4s, v6.4s, v1.s[0]\n"
- "add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
"fmla v18.4s, v6.4s, v2.s[0]\n"
- "ldr q6, [x12, #0x10]\n"
+ "ldr q6, [x11, #0x10]\n"
"fmla v11.4s, v7.4s, v0.s[0]\n"
"fmla v15.4s, v7.4s, v1.s[0]\n"
"fmla v19.4s, v7.4s, v2.s[0]\n"
- "ldr q7, [x11, #0x10]\n"
+ "ldr q7, [x10, #0x10]\n"
"fmla v8.4s, v6.4s, v0.s[1]\n"
"fmla v12.4s, v6.4s, v1.s[1]\n"
"fmla v16.4s, v6.4s, v2.s[1]\n"
- "ldr q6, [x10, #0x10]\n"
+ "ldr q6, [x9, #0x10]\n"
"fmla v9.4s, v7.4s, v0.s[1]\n"
"fmla v13.4s, v7.4s, v1.s[1]\n"
"fmla v17.4s, v7.4s, v2.s[1]\n"
- "ldr q7, [x9, #0x10]\n"
+ "ldr q7, [x28, #0x10]\n"
"fmla v10.4s, v6.4s, v0.s[1]\n"
"fmla v14.4s, v6.4s, v1.s[1]\n"
"fmla v18.4s, v6.4s, v2.s[1]\n"
- "ldr q6, [x12, #0x20]\n"
+ "ldr q6, [x11, #0x20]\n"
"fmla v11.4s, v7.4s, v0.s[1]\n"
"fmla v15.4s, v7.4s, v1.s[1]\n"
"fmla v19.4s, v7.4s, v2.s[1]\n"
- "ldr q7, [x11, #0x20]\n"
+ "ldr q7, [x10, #0x20]\n"
"fmla v8.4s, v6.4s, v0.s[2]\n"
"fmla v12.4s, v6.4s, v1.s[2]\n"
"fmla v16.4s, v6.4s, v2.s[2]\n"
- "ldr q6, [x10, #0x20]\n"
+ "ldr q6, [x9, #0x20]\n"
"fmla v9.4s, v7.4s, v0.s[2]\n"
"fmla v13.4s, v7.4s, v1.s[2]\n"
"fmla v17.4s, v7.4s, v2.s[2]\n"
- "ldr q7, [x9, #0x20]\n"
+ "ldr q7, [x28, #0x20]\n"
"fmla v10.4s, v6.4s, v0.s[2]\n"
"fmla v14.4s, v6.4s, v1.s[2]\n"
"fmla v18.4s, v6.4s, v2.s[2]\n"
- "ldr q6, [x12, #0x30]\n"
+ "ldr q6, [x11, #0x30]\n"
"fmla v11.4s, v7.4s, v0.s[2]\n"
- "add x12, x12, #0x40\n"
+ "add x11, x11, #0x40\n"
"fmla v15.4s, v7.4s, v1.s[2]\n"
"fmla v19.4s, v7.4s, v2.s[2]\n"
- "ldr q7, [x11, #0x30]\n"
- "add x11, x11, #0x40\n"
+ "ldr q7, [x10, #0x30]\n"
+ "add x10, x10, #0x40\n"
"fmla v8.4s, v6.4s, v0.s[3]\n"
"fmla v12.4s, v6.4s, v1.s[3]\n"
"fmla v16.4s, v6.4s, v2.s[3]\n"
- "ldr q6, [x10, #0x30]\n"
+ "ldr q6, [x9, #0x30]\n"
"fmla v9.4s, v7.4s, v0.s[3]\n"
- "add x10, x10, #0x40\n"
+ "add x9, x9, #0x40\n"
"fmla v13.4s, v7.4s, v1.s[3]\n"
"fmla v17.4s, v7.4s, v2.s[3]\n"
- "ldr q7, [x9, #0x30]\n"
- "add x9, x9, #0x40\n"
+ "ldr q7, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
"fmla v10.4s, v6.4s, v0.s[3]\n"
"fmla v14.4s, v6.4s, v1.s[3]\n"
"fmla v18.4s, v6.4s, v2.s[3]\n"
- "ldr q6, [x12, #0x0]\n"
+ "ldr q6, [x11, #0x0]\n"
"fmla v11.4s, v7.4s, v0.s[3]\n"
- "ldr q0, [x26, #0x0]\n"
+ "ldr q0, [x25, #0x0]\n"
"fmla v15.4s, v7.4s, v1.s[3]\n"
- "ldr q1, [x25, #0x0]\n"
+ "ldr q1, [x24, #0x0]\n"
"fmla v19.4s, v7.4s, v2.s[3]\n"
- "ldr q2, [x24, #0x0]\n"
- "ldr q7, [x11, #0x0]\n"
+ "ldr q2, [x23, #0x0]\n"
+ "ldr q7, [x10, #0x0]\n"
"bge 87b\n"
"88:" // Height 3: Multiply loop: Single iteration only
"fmla v8.4s, v6.4s, v0.s[0]\n"
"fmla v12.4s, v6.4s, v1.s[0]\n"
- "sub x27, x27, #0x4\n"
- "add x26, x26, #0x10\n"
+ "sub x26, x26, #0x4\n"
+ "add x25, x25, #0x10\n"
"fmla v16.4s, v6.4s, v2.s[0]\n"
- "ldr q6, [x10, #0x0]\n"
+ "ldr q6, [x9, #0x0]\n"
"fmla v9.4s, v7.4s, v0.s[0]\n"
- "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
"fmla v13.4s, v7.4s, v1.s[0]\n"
"fmla v17.4s, v7.4s, v2.s[0]\n"
- "ldr q7, [x9, #0x0]\n"
- "add x24, x24, #0x10\n"
+ "ldr q7, [x28, #0x0]\n"
+ "add x23, x23, #0x10\n"
"fmla v10.4s, v6.4s, v0.s[0]\n"
"fmla v14.4s, v6.4s, v1.s[0]\n"
"fmla v18.4s, v6.4s, v2.s[0]\n"
- "ldr q6, [x12, #0x10]\n"
+ "ldr q6, [x11, #0x10]\n"
"fmla v11.4s, v7.4s, v0.s[0]\n"
"fmla v15.4s, v7.4s, v1.s[0]\n"
"fmla v19.4s, v7.4s, v2.s[0]\n"
- "ldr q7, [x11, #0x10]\n"
+ "ldr q7, [x10, #0x10]\n"
"fmla v8.4s, v6.4s, v0.s[1]\n"
"fmla v12.4s, v6.4s, v1.s[1]\n"
"fmla v16.4s, v6.4s, v2.s[1]\n"
- "ldr q6, [x10, #0x10]\n"
+ "ldr q6, [x9, #0x10]\n"
"fmla v9.4s, v7.4s, v0.s[1]\n"
"fmla v13.4s, v7.4s, v1.s[1]\n"
"fmla v17.4s, v7.4s, v2.s[1]\n"
- "ldr q7, [x9, #0x10]\n"
+ "ldr q7, [x28, #0x10]\n"
"fmla v10.4s, v6.4s, v0.s[1]\n"
"fmla v14.4s, v6.4s, v1.s[1]\n"
"fmla v18.4s, v6.4s, v2.s[1]\n"
- "ldr q6, [x12, #0x20]\n"
+ "ldr q6, [x11, #0x20]\n"
"fmla v11.4s, v7.4s, v0.s[1]\n"
"fmla v15.4s, v7.4s, v1.s[1]\n"
"fmla v19.4s, v7.4s, v2.s[1]\n"
- "ldr q7, [x11, #0x20]\n"
+ "ldr q7, [x10, #0x20]\n"
"fmla v8.4s, v6.4s, v0.s[2]\n"
"fmla v12.4s, v6.4s, v1.s[2]\n"
"fmla v16.4s, v6.4s, v2.s[2]\n"
- "ldr q6, [x10, #0x20]\n"
+ "ldr q6, [x9, #0x20]\n"
"fmla v9.4s, v7.4s, v0.s[2]\n"
"fmla v13.4s, v7.4s, v1.s[2]\n"
"fmla v17.4s, v7.4s, v2.s[2]\n"
- "ldr q7, [x9, #0x20]\n"
+ "ldr q7, [x28, #0x20]\n"
"fmla v10.4s, v6.4s, v0.s[2]\n"
"fmla v14.4s, v6.4s, v1.s[2]\n"
"fmla v18.4s, v6.4s, v2.s[2]\n"
- "ldr q6, [x12, #0x30]\n"
+ "ldr q6, [x11, #0x30]\n"
"fmla v11.4s, v7.4s, v0.s[2]\n"
- "add x12, x12, #0x40\n"
+ "add x11, x11, #0x40\n"
"fmla v15.4s, v7.4s, v1.s[2]\n"
"fmla v19.4s, v7.4s, v2.s[2]\n"
- "ldr q7, [x11, #0x30]\n"
- "add x11, x11, #0x40\n"
+ "ldr q7, [x10, #0x30]\n"
+ "add x10, x10, #0x40\n"
"fmla v8.4s, v6.4s, v0.s[3]\n"
"fmla v12.4s, v6.4s, v1.s[3]\n"
"fmla v16.4s, v6.4s, v2.s[3]\n"
- "ldr q6, [x10, #0x30]\n"
+ "ldr q6, [x9, #0x30]\n"
"fmla v9.4s, v7.4s, v0.s[3]\n"
- "add x10, x10, #0x40\n"
+ "add x9, x9, #0x40\n"
"fmla v13.4s, v7.4s, v1.s[3]\n"
"fmla v17.4s, v7.4s, v2.s[3]\n"
- "ldr q7, [x9, #0x30]\n"
- "add x9, x9, #0x40\n"
+ "ldr q7, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
"fmla v10.4s, v6.4s, v0.s[3]\n"
"fmla v14.4s, v6.4s, v1.s[3]\n"
"fmla v18.4s, v6.4s, v2.s[3]\n"
@@ -1147,46 +1147,46 @@ void a64_ffhybrid_fp32_mla_6x16 (
"fmla v15.4s, v7.4s, v1.s[3]\n"
"fmla v19.4s, v7.4s, v2.s[3]\n"
"89:" // Height 3: Multiply loop: Main loop skip
- "cbz x27, 91f\n"
+ "cbz x26, 91f\n"
"90:" // Height 3: Multiply loop: Odd block loop
- "ldr s0, [x26], #0x4\n"
- "ldr s1, [x25], #0x4\n"
- "sub x27, x27, #0x1\n"
- "ldr s2, [x24], #0x4\n"
- "ldr q6, [x12, #0x0]\n"
+ "ldr s0, [x25], #0x4\n"
+ "ldr s1, [x24], #0x4\n"
+ "sub x26, x26, #0x1\n"
+ "ldr s2, [x23], #0x4\n"
+ "ldr q6, [x11, #0x0]\n"
"fmla v8.4s, v6.4s, v0.s[0]\n"
"fmla v12.4s, v6.4s, v1.s[0]\n"
- "ldr q7, [x11, #0x0]\n"
+ "ldr q7, [x10, #0x0]\n"
"fmla v16.4s, v6.4s, v2.s[0]\n"
- "ldr q6, [x10, #0x0]\n"
+ "ldr q6, [x9, #0x0]\n"
"fmla v9.4s, v7.4s, v0.s[0]\n"
"fmla v13.4s, v7.4s, v1.s[0]\n"
"fmla v17.4s, v7.4s, v2.s[0]\n"
- "ldr q7, [x9, #0x0]\n"
- "add x12, x12, #0x10\n"
+ "ldr q7, [x28, #0x0]\n"
+ "add x11, x11, #0x10\n"
"fmla v10.4s, v6.4s, v0.s[0]\n"
"fmla v14.4s, v6.4s, v1.s[0]\n"
- "add x11, x11, #0x10\n"
"add x10, x10, #0x10\n"
+ "add x9, x9, #0x10\n"
"fmla v18.4s, v6.4s, v2.s[0]\n"
"fmla v11.4s, v7.4s, v0.s[0]\n"
- "add x9, x9, #0x10\n"
+ "add x28, x28, #0x10\n"
"fmla v15.4s, v7.4s, v1.s[0]\n"
"fmla v19.4s, v7.4s, v2.s[0]\n"
- "cbnz x27, 90b\n"
+ "cbnz x26, 90b\n"
"91:" // Height 3: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 84b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
"tbz %x[flags], #1, 92f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v1.4s }, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1r { v0.4s }, [x20]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v1.4s }, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v0.4s }, [x19]\n"
"fmin v8.4s, v8.4s, v1.4s\n"
"fmin v9.4s, v9.4s, v1.4s\n"
"fmin v10.4s, v10.4s, v1.4s\n"
@@ -1212,140 +1212,140 @@ void a64_ffhybrid_fp32_mla_6x16 (
"fmax v18.4s, v18.4s, v0.4s\n"
"fmax v19.4s, v19.4s, v0.4s\n"
"92:" // Height 3: No activation
- "cmp x14, #0x10\n"
+ "cmp x13, #0x10\n"
"bge 101f\n"
- "tbz x14, #3, 96f\n"
- "st1 { v8.4s }, [x13], #0x10\n"
- "st1 { v9.4s }, [x13], #0x10\n"
- "st1 { v12.4s }, [x25], #0x10\n"
- "st1 { v13.4s }, [x25], #0x10\n"
- "st1 { v16.4s }, [x24], #0x10\n"
- "st1 { v17.4s }, [x24], #0x10\n"
- "tbz x14, #2, 94f\n"
- "st1 { v10.4s }, [x13], #0x10\n"
- "st1 { v14.4s }, [x25], #0x10\n"
- "st1 { v18.4s }, [x24], #0x10\n"
- "tbz x14, #1, 93f\n"
- "str d11, [x13], #0x8\n"
- "str d15, [x25], #0x8\n"
- "str d19, [x24], #0x8\n"
- "tbz x14, #0, 100f\n"
- "st1 { v11.s }[2], [x13]\n"
- "st1 { v15.s }[2], [x25]\n"
- "st1 { v19.s }[2], [x24]\n"
+ "tbz x13, #3, 96f\n"
+ "st1 { v8.4s }, [x12], #0x10\n"
+ "st1 { v9.4s }, [x12], #0x10\n"
+ "st1 { v12.4s }, [x24], #0x10\n"
+ "st1 { v13.4s }, [x24], #0x10\n"
+ "st1 { v16.4s }, [x23], #0x10\n"
+ "st1 { v17.4s }, [x23], #0x10\n"
+ "tbz x13, #2, 94f\n"
+ "st1 { v10.4s }, [x12], #0x10\n"
+ "st1 { v14.4s }, [x24], #0x10\n"
+ "st1 { v18.4s }, [x23], #0x10\n"
+ "tbz x13, #1, 93f\n"
+ "str d11, [x12], #0x8\n"
+ "str d15, [x24], #0x8\n"
+ "str d19, [x23], #0x8\n"
+ "tbz x13, #0, 100f\n"
+ "st1 { v11.s }[2], [x12]\n"
+ "st1 { v15.s }[2], [x24]\n"
+ "st1 { v19.s }[2], [x23]\n"
"b 100f\n"
"93:" // Height 3: Partial direct writeback: partial_1_12
- "tbz x14, #0, 100f\n"
- "str s11, [x13, #0x0]\n"
- "str s15, [x25, #0x0]\n"
- "str s19, [x24, #0x0]\n"
+ "tbz x13, #0, 100f\n"
+ "str s11, [x12, #0x0]\n"
+ "str s15, [x24, #0x0]\n"
+ "str s19, [x23, #0x0]\n"
"b 100f\n"
"94:" // Height 3: Partial direct writeback: partial_2_8
- "tbz x14, #1, 95f\n"
- "str d10, [x13], #0x8\n"
- "str d14, [x25], #0x8\n"
- "str d18, [x24], #0x8\n"
- "tbz x14, #0, 100f\n"
- "st1 { v10.s }[2], [x13]\n"
- "st1 { v14.s }[2], [x25]\n"
- "st1 { v18.s }[2], [x24]\n"
+ "tbz x13, #1, 95f\n"
+ "str d10, [x12], #0x8\n"
+ "str d14, [x24], #0x8\n"
+ "str d18, [x23], #0x8\n"
+ "tbz x13, #0, 100f\n"
+ "st1 { v10.s }[2], [x12]\n"
+ "st1 { v14.s }[2], [x24]\n"
+ "st1 { v18.s }[2], [x23]\n"
"b 100f\n"
"95:" // Height 3: Partial direct writeback: partial_1_8
- "tbz x14, #0, 100f\n"
- "str s10, [x13, #0x0]\n"
- "str s14, [x25, #0x0]\n"
- "str s18, [x24, #0x0]\n"
+ "tbz x13, #0, 100f\n"
+ "str s10, [x12, #0x0]\n"
+ "str s14, [x24, #0x0]\n"
+ "str s18, [x23, #0x0]\n"
"b 100f\n"
"96:" // Height 3: Partial direct writeback: partial_4_0
- "tbz x14, #2, 98f\n"
- "st1 { v8.4s }, [x13], #0x10\n"
- "st1 { v12.4s }, [x25], #0x10\n"
- "st1 { v16.4s }, [x24], #0x10\n"
- "tbz x14, #1, 97f\n"
- "str d9, [x13], #0x8\n"
- "str d13, [x25], #0x8\n"
- "str d17, [x24], #0x8\n"
- "tbz x14, #0, 100f\n"
- "st1 { v9.s }[2], [x13]\n"
- "st1 { v13.s }[2], [x25]\n"
- "st1 { v17.s }[2], [x24]\n"
+ "tbz x13, #2, 98f\n"
+ "st1 { v8.4s }, [x12], #0x10\n"
+ "st1 { v12.4s }, [x24], #0x10\n"
+ "st1 { v16.4s }, [x23], #0x10\n"
+ "tbz x13, #1, 97f\n"
+ "str d9, [x12], #0x8\n"
+ "str d13, [x24], #0x8\n"
+ "str d17, [x23], #0x8\n"
+ "tbz x13, #0, 100f\n"
+ "st1 { v9.s }[2], [x12]\n"
+ "st1 { v13.s }[2], [x24]\n"
+ "st1 { v17.s }[2], [x23]\n"
"b 100f\n"
"97:" // Height 3: Partial direct writeback: partial_1_4
- "tbz x14, #0, 100f\n"
- "str s9, [x13, #0x0]\n"
- "str s13, [x25, #0x0]\n"
- "str s17, [x24, #0x0]\n"
+ "tbz x13, #0, 100f\n"
+ "str s9, [x12, #0x0]\n"
+ "str s13, [x24, #0x0]\n"
+ "str s17, [x23, #0x0]\n"
"b 100f\n"
"98:" // Height 3: Partial direct writeback: partial_2_0
- "tbz x14, #1, 99f\n"
- "str d8, [x13], #0x8\n"
- "str d12, [x25], #0x8\n"
- "str d16, [x24], #0x8\n"
- "tbz x14, #0, 100f\n"
- "st1 { v8.s }[2], [x13]\n"
- "st1 { v12.s }[2], [x25]\n"
- "st1 { v16.s }[2], [x24]\n"
+ "tbz x13, #1, 99f\n"
+ "str d8, [x12], #0x8\n"
+ "str d12, [x24], #0x8\n"
+ "str d16, [x23], #0x8\n"
+ "tbz x13, #0, 100f\n"
+ "st1 { v8.s }[2], [x12]\n"
+ "st1 { v12.s }[2], [x24]\n"
+ "st1 { v16.s }[2], [x23]\n"
"b 100f\n"
"99:" // Height 3: Partial direct writeback: partial_1_0
- "str s8, [x13, #0x0]\n"
- "str s12, [x25, #0x0]\n"
- "str s16, [x24, #0x0]\n"
+ "str s8, [x12, #0x0]\n"
+ "str s12, [x24, #0x0]\n"
+ "str s16, [x23, #0x0]\n"
"100:" // Height 3: Partial direct writeback: Done
"b 102f\n"
"101:" // Height 3: Full writeback
- "str q8, [x13, #0x0]\n"
- "str q9, [x13, #0x10]\n"
- "str q10, [x13, #0x20]\n"
- "str q11, [x13, #0x30]\n"
- "add x13, x13, #0x40\n"
- "str q12, [x25, #0x0]\n"
- "str q13, [x25, #0x10]\n"
- "str q14, [x25, #0x20]\n"
- "str q15, [x25, #0x30]\n"
- "str q16, [x24, #0x0]\n"
- "str q17, [x24, #0x10]\n"
- "str q18, [x24, #0x20]\n"
- "str q19, [x24, #0x30]\n"
+ "str q8, [x12, #0x0]\n"
+ "str q9, [x12, #0x10]\n"
+ "str q10, [x12, #0x20]\n"
+ "str q11, [x12, #0x30]\n"
+ "add x12, x12, #0x40\n"
+ "str q12, [x24, #0x0]\n"
+ "str q13, [x24, #0x10]\n"
+ "str q14, [x24, #0x20]\n"
+ "str q15, [x24, #0x30]\n"
+ "str q16, [x23, #0x0]\n"
+ "str q17, [x23, #0x10]\n"
+ "str q18, [x23, #0x20]\n"
+ "str q19, [x23, #0x30]\n"
"102:" // Height 3: Writeback done
- "subs x14, x14, #0x10\n"
+ "subs x13, x13, #0x10\n"
"bgt 70b\n"
"b 206f\n"
"103:" // Height 4
- "ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x15, %x[bias]\n"
- "ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x13, %x[output_ptr]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x14, %x[bias]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x12, %x[output_ptr]\n"
"104:" // Height 4: Column loop
- "ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
- "add x11, x12, x20, LSL #2\n"
- "add x10, x11, x20, LSL #2\n"
- "add x9, x10, x20, LSL #2\n"
- "add x20, x9, x20, LSL #2\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "cmp x14, #0xc\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #2\n"
+ "add x9, x10, x19, LSL #2\n"
+ "add x28, x9, x19, LSL #2\n"
+ "add x19, x28, x19, LSL #2\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "cmp x13, #0xc\n"
"bgt 105f\n"
- "cmp x14, #0x8\n"
- "mov x9, x12\n"
+ "cmp x13, #0x8\n"
+ "mov x28, x11\n"
"bgt 105f\n"
- "cmp x14, #0x4\n"
- "mov x10, x12\n"
+ "cmp x13, #0x4\n"
+ "mov x9, x11\n"
"bgt 105f\n"
- "mov x11, x12\n"
+ "mov x10, x11\n"
"105:" // Height 4: B setup done
- "cbz x15, 106f\n"
- "ldr q8, [x15, #0x0]\n"
- "ldr q9, [x15, #0x10]\n"
+ "cbz x14, 106f\n"
+ "ldr q8, [x14, #0x0]\n"
+ "ldr q9, [x14, #0x10]\n"
"mov v12.16b, v8.16b\n"
"mov v13.16b, v9.16b\n"
- "ldr q10, [x15, #0x20]\n"
- "ldr q11, [x15, #0x30]\n"
+ "ldr q10, [x14, #0x20]\n"
+ "ldr q11, [x14, #0x30]\n"
"mov v14.16b, v10.16b\n"
"mov v15.16b, v11.16b\n"
"mov v16.16b, v8.16b\n"
"mov v17.16b, v9.16b\n"
- "add x15, x15, #0x40\n"
+ "add x14, x14, #0x40\n"
"mov v18.16b, v10.16b\n"
"mov v19.16b, v11.16b\n"
"mov v20.16b, v8.16b\n"
@@ -1355,132 +1355,132 @@ void a64_ffhybrid_fp32_mla_6x16 (
"b 117f\n"
"106:" // Height 4: no bias
"tbz %x[flags], #0, 116f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "cmp x14, #0x10\n"
- "add x23, x24, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "cmp x13, #0x10\n"
+ "add x22, x23, x19, LSL #2\n"
"bge 115f\n"
- "tbz x14, #3, 110f\n"
- "ld1 { v8.4s }, [x13], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v16.4s }, [x24], #0x10\n"
- "ld1 { v20.4s }, [x23], #0x10\n"
- "ld1 { v9.4s }, [x13], #0x10\n"
- "ld1 { v13.4s }, [x25], #0x10\n"
- "ld1 { v17.4s }, [x24], #0x10\n"
- "ld1 { v21.4s }, [x23], #0x10\n"
- "tbz x14, #2, 108f\n"
- "ld1 { v10.4s }, [x13], #0x10\n"
- "ld1 { v14.4s }, [x25], #0x10\n"
- "ld1 { v18.4s }, [x24], #0x10\n"
- "ld1 { v22.4s }, [x23], #0x10\n"
- "tbz x14, #1, 107f\n"
- "ldr d11, [x13], #0x8\n"
- "ldr d15, [x25], #0x8\n"
- "mov x20, #0x38\n"
- "ldr d19, [x24], #0x8\n"
- "ldr d23, [x23], #0x8\n"
- "tbz x14, #0, 114f\n"
- "ld1 { v11.s }[2], [x13]\n"
- "ld1 { v15.s }[2], [x25]\n"
- "ld1 { v19.s }[2], [x24]\n"
- "ld1 { v23.s }[2], [x23]\n"
+ "tbz x13, #3, 110f\n"
+ "ld1 { v8.4s }, [x12], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
+ "ld1 { v16.4s }, [x23], #0x10\n"
+ "ld1 { v20.4s }, [x22], #0x10\n"
+ "ld1 { v9.4s }, [x12], #0x10\n"
+ "ld1 { v13.4s }, [x24], #0x10\n"
+ "ld1 { v17.4s }, [x23], #0x10\n"
+ "ld1 { v21.4s }, [x22], #0x10\n"
+ "tbz x13, #2, 108f\n"
+ "ld1 { v10.4s }, [x12], #0x10\n"
+ "ld1 { v14.4s }, [x24], #0x10\n"
+ "ld1 { v18.4s }, [x23], #0x10\n"
+ "ld1 { v22.4s }, [x22], #0x10\n"
+ "tbz x13, #1, 107f\n"
+ "ldr d11, [x12], #0x8\n"
+ "ldr d15, [x24], #0x8\n"
+ "mov x19, #0x38\n"
+ "ldr d19, [x23], #0x8\n"
+ "ldr d23, [x22], #0x8\n"
+ "tbz x13, #0, 114f\n"
+ "ld1 { v11.s }[2], [x12]\n"
+ "ld1 { v15.s }[2], [x24]\n"
+ "ld1 { v19.s }[2], [x23]\n"
+ "ld1 { v23.s }[2], [x22]\n"
"b 114f\n"
"107:" // Height 4: Partial accumulate: partial_1_12
- "mov x20, #0x30\n"
- "tbz x14, #0, 114f\n"
- "ldr s11, [x13, #0x0]\n"
- "ldr s15, [x25, #0x0]\n"
- "ldr s19, [x24, #0x0]\n"
- "ldr s23, [x23, #0x0]\n"
+ "mov x19, #0x30\n"
+ "tbz x13, #0, 114f\n"
+ "ldr s11, [x12, #0x0]\n"
+ "ldr s15, [x24, #0x0]\n"
+ "ldr s19, [x23, #0x0]\n"
+ "ldr s23, [x22, #0x0]\n"
"b 114f\n"
"108:" // Height 4: Partial accumulate: partial_2_8
- "tbz x14, #1, 109f\n"
- "ldr d10, [x13], #0x8\n"
- "ldr d14, [x25], #0x8\n"
- "mov x20, #0x28\n"
- "ldr d18, [x24], #0x8\n"
- "ldr d22, [x23], #0x8\n"
- "tbz x14, #0, 114f\n"
- "ld1 { v10.s }[2], [x13]\n"
- "ld1 { v14.s }[2], [x25]\n"
- "ld1 { v18.s }[2], [x24]\n"
- "ld1 { v22.s }[2], [x23]\n"
+ "tbz x13, #1, 109f\n"
+ "ldr d10, [x12], #0x8\n"
+ "ldr d14, [x24], #0x8\n"
+ "mov x19, #0x28\n"
+ "ldr d18, [x23], #0x8\n"
+ "ldr d22, [x22], #0x8\n"
+ "tbz x13, #0, 114f\n"
+ "ld1 { v10.s }[2], [x12]\n"
+ "ld1 { v14.s }[2], [x24]\n"
+ "ld1 { v18.s }[2], [x23]\n"
+ "ld1 { v22.s }[2], [x22]\n"
"b 114f\n"
"109:" // Height 4: Partial accumulate: partial_1_8
- "mov x20, #0x20\n"
- "tbz x14, #0, 114f\n"
- "ldr s10, [x13, #0x0]\n"
- "ldr s14, [x25, #0x0]\n"
- "ldr s18, [x24, #0x0]\n"
- "ldr s22, [x23, #0x0]\n"
+ "mov x19, #0x20\n"
+ "tbz x13, #0, 114f\n"
+ "ldr s10, [x12, #0x0]\n"
+ "ldr s14, [x24, #0x0]\n"
+ "ldr s18, [x23, #0x0]\n"
+ "ldr s22, [x22, #0x0]\n"
"b 114f\n"
"110:" // Height 4: Partial accumulate: partial_4_0
- "tbz x14, #2, 112f\n"
- "ld1 { v8.4s }, [x13], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v16.4s }, [x24], #0x10\n"
- "ld1 { v20.4s }, [x23], #0x10\n"
- "tbz x14, #1, 111f\n"
- "ldr d9, [x13], #0x8\n"
- "ldr d13, [x25], #0x8\n"
- "mov x20, #0x18\n"
- "ldr d17, [x24], #0x8\n"
- "ldr d21, [x23], #0x8\n"
- "tbz x14, #0, 114f\n"
- "ld1 { v9.s }[2], [x13]\n"
- "ld1 { v13.s }[2], [x25]\n"
- "ld1 { v17.s }[2], [x24]\n"
- "ld1 { v21.s }[2], [x23]\n"
+ "tbz x13, #2, 112f\n"
+ "ld1 { v8.4s }, [x12], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
+ "ld1 { v16.4s }, [x23], #0x10\n"
+ "ld1 { v20.4s }, [x22], #0x10\n"
+ "tbz x13, #1, 111f\n"
+ "ldr d9, [x12], #0x8\n"
+ "ldr d13, [x24], #0x8\n"
+ "mov x19, #0x18\n"
+ "ldr d17, [x23], #0x8\n"
+ "ldr d21, [x22], #0x8\n"
+ "tbz x13, #0, 114f\n"
+ "ld1 { v9.s }[2], [x12]\n"
+ "ld1 { v13.s }[2], [x24]\n"
+ "ld1 { v17.s }[2], [x23]\n"
+ "ld1 { v21.s }[2], [x22]\n"
"b 114f\n"
"111:" // Height 4: Partial accumulate: partial_1_4
- "mov x20, #0x10\n"
- "tbz x14, #0, 114f\n"
- "ldr s9, [x13, #0x0]\n"
- "ldr s13, [x25, #0x0]\n"
- "ldr s17, [x24, #0x0]\n"
- "ldr s21, [x23, #0x0]\n"
+ "mov x19, #0x10\n"
+ "tbz x13, #0, 114f\n"
+ "ldr s9, [x12, #0x0]\n"
+ "ldr s13, [x24, #0x0]\n"
+ "ldr s17, [x23, #0x0]\n"
+ "ldr s21, [x22, #0x0]\n"
"b 114f\n"
"112:" // Height 4: Partial accumulate: partial_2_0
- "tbz x14, #1, 113f\n"
- "ldr d8, [x13], #0x8\n"
- "ldr d12, [x25], #0x8\n"
- "mov x20, #0x8\n"
- "ldr d16, [x24], #0x8\n"
- "ldr d20, [x23], #0x8\n"
- "tbz x14, #0, 114f\n"
- "ld1 { v8.s }[2], [x13]\n"
- "ld1 { v12.s }[2], [x25]\n"
- "ld1 { v16.s }[2], [x24]\n"
- "ld1 { v20.s }[2], [x23]\n"
+ "tbz x13, #1, 113f\n"
+ "ldr d8, [x12], #0x8\n"
+ "ldr d12, [x24], #0x8\n"
+ "mov x19, #0x8\n"
+ "ldr d16, [x23], #0x8\n"
+ "ldr d20, [x22], #0x8\n"
+ "tbz x13, #0, 114f\n"
+ "ld1 { v8.s }[2], [x12]\n"
+ "ld1 { v12.s }[2], [x24]\n"
+ "ld1 { v16.s }[2], [x23]\n"
+ "ld1 { v20.s }[2], [x22]\n"
"b 114f\n"
"113:" // Height 4: Partial accumulate: partial_1_0
- "ldr s8, [x13, #0x0]\n"
- "ldr s12, [x25, #0x0]\n"
- "mov x20, #0x0\n"
- "ldr s16, [x24, #0x0]\n"
- "ldr s20, [x23, #0x0]\n"
+ "ldr s8, [x12, #0x0]\n"
+ "ldr s12, [x24, #0x0]\n"
+ "mov x19, #0x0\n"
+ "ldr s16, [x23, #0x0]\n"
+ "ldr s20, [x22, #0x0]\n"
"114:" // Height 4: Partial accumulate: Done
- "sub x13, x13, x20\n"
+ "sub x12, x12, x19\n"
"b 117f\n"
"115:" // Height 4: full accumulate
- "ldr q8, [x13, #0x0]\n"
- "ldr q9, [x13, #0x10]\n"
- "ldr q10, [x13, #0x20]\n"
- "ldr q11, [x13, #0x30]\n"
- "ldr q12, [x25, #0x0]\n"
- "ldr q13, [x25, #0x10]\n"
- "ldr q14, [x25, #0x20]\n"
- "ldr q15, [x25, #0x30]\n"
- "ldr q16, [x24, #0x0]\n"
- "ldr q17, [x24, #0x10]\n"
- "ldr q18, [x24, #0x20]\n"
- "ldr q19, [x24, #0x30]\n"
- "ldr q20, [x23, #0x0]\n"
- "ldr q21, [x23, #0x10]\n"
- "ldr q22, [x23, #0x20]\n"
- "ldr q23, [x23, #0x30]\n"
+ "ldr q8, [x12, #0x0]\n"
+ "ldr q9, [x12, #0x10]\n"
+ "ldr q10, [x12, #0x20]\n"
+ "ldr q11, [x12, #0x30]\n"
+ "ldr q12, [x24, #0x0]\n"
+ "ldr q13, [x24, #0x10]\n"
+ "ldr q14, [x24, #0x20]\n"
+ "ldr q15, [x24, #0x30]\n"
+ "ldr q16, [x23, #0x0]\n"
+ "ldr q17, [x23, #0x10]\n"
+ "ldr q18, [x23, #0x20]\n"
+ "ldr q19, [x23, #0x30]\n"
+ "ldr q20, [x22, #0x0]\n"
+ "ldr q21, [x22, #0x10]\n"
+ "ldr q22, [x22, #0x20]\n"
+ "ldr q23, [x22, #0x30]\n"
"b 117f\n"
"116:" // Height 4: no accumulate
"movi v8.16b, #0x0\n"
@@ -1500,217 +1500,217 @@ void a64_ffhybrid_fp32_mla_6x16 (
"movi v22.16b, #0x0\n"
"movi v23.16b, #0x0\n"
"117:" // Height 4: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"118:" // Height 4: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 119f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "cbnz x28, 120f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #2\n"
- "add x25, x25, x20, LSL #2\n"
- "add x24, x24, x20, LSL #2\n"
- "add x23, x23, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "cbnz x27, 120f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #2\n"
+ "add x24, x24, x19, LSL #2\n"
+ "add x23, x23, x19, LSL #2\n"
+ "add x22, x22, x19, LSL #2\n"
"b 120f\n"
"119:" // Height 4: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
"120:" // Height 4: input setup done
- "cmp x27, #0x4\n"
+ "cmp x26, #0x4\n"
"blt 123f\n"
- "ldr q0, [x26, #0x0]\n"
- "ldr q1, [x25, #0x0]\n"
- "cmp x27, #0x8\n"
- "ldr q2, [x24, #0x0]\n"
- "ldr q3, [x23, #0x0]\n"
- "ldr q6, [x12, #0x0]\n"
- "ldr q7, [x11, #0x0]\n"
+ "ldr q0, [x25, #0x0]\n"
+ "ldr q1, [x24, #0x0]\n"
+ "cmp x26, #0x8\n"
+ "ldr q2, [x23, #0x0]\n"
+ "ldr q3, [x22, #0x0]\n"
+ "ldr q6, [x11, #0x0]\n"
+ "ldr q7, [x10, #0x0]\n"
"blt 122f\n"
"121:" // Height 4: Multiply loop: Main loop head
"fmla v8.4s, v6.4s, v0.s[0]\n"
"fmla v12.4s, v6.4s, v1.s[0]\n"
- "sub x27, x27, #0x4\n"
- "cmp x27, #0x8\n"
+ "sub x26, x26, #0x4\n"
+ "cmp x26, #0x8\n"
"fmla v16.4s, v6.4s, v2.s[0]\n"
"fmla v20.4s, v6.4s, v3.s[0]\n"
- "ldr q6, [x10, #0x0]\n"
- "add x26, x26, #0x10\n"
+ "ldr q6, [x9, #0x0]\n"
+ "add x25, x25, #0x10\n"
"fmla v9.4s, v7.4s, v0.s[0]\n"
"fmla v13.4s, v7.4s, v1.s[0]\n"
- "add x25, x25, #0x10\n"
"add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
"fmla v17.4s, v7.4s, v2.s[0]\n"
"fmla v21.4s, v7.4s, v3.s[0]\n"
- "ldr q7, [x9, #0x0]\n"
- "add x23, x23, #0x10\n"
+ "ldr q7, [x28, #0x0]\n"
+ "add x22, x22, #0x10\n"
"fmla v10.4s, v6.4s, v0.s[0]\n"
"fmla v14.4s, v6.4s, v1.s[0]\n"
"fmla v18.4s, v6.4s, v2.s[0]\n"
"fmla v22.4s, v6.4s, v3.s[0]\n"
- "ldr q6, [x12, #0x10]\n"
+ "ldr q6, [x11, #0x10]\n"
"fmla v11.4s, v7.4s, v0.s[0]\n"
"fmla v15.4s, v7.4s, v1.s[0]\n"
"fmla v19.4s, v7.4s, v2.s[0]\n"
"fmla v23.4s, v7.4s, v3.s[0]\n"
- "ldr q7, [x11, #0x10]\n"
+ "ldr q7, [x10, #0x10]\n"
"fmla v8.4s, v6.4s, v0.s[1]\n"
"fmla v12.4s, v6.4s, v1.s[1]\n"
"fmla v16.4s, v6.4s, v2.s[1]\n"
"fmla v20.4s, v6.4s, v3.s[1]\n"
- "ldr q6, [x10, #0x10]\n"
+ "ldr q6, [x9, #0x10]\n"
"fmla v9.4s, v7.4s, v0.s[1]\n"
"fmla v13.4s, v7.4s, v1.s[1]\n"
"fmla v17.4s, v7.4s, v2.s[1]\n"
"fmla v21.4s, v7.4s, v3.s[1]\n"
- "ldr q7, [x9, #0x10]\n"
+ "ldr q7, [x28, #0x10]\n"
"fmla v10.4s, v6.4s, v0.s[1]\n"
"fmla v14.4s, v6.4s, v1.s[1]\n"
"fmla v18.4s, v6.4s, v2.s[1]\n"
"fmla v22.4s, v6.4s, v3.s[1]\n"
- "ldr q6, [x12, #0x20]\n"
+ "ldr q6, [x11, #0x20]\n"
"fmla v11.4s, v7.4s, v0.s[1]\n"
"fmla v15.4s, v7.4s, v1.s[1]\n"
"fmla v19.4s, v7.4s, v2.s[1]\n"
"fmla v23.4s, v7.4s, v3.s[1]\n"
- "ldr q7, [x11, #0x20]\n"
+ "ldr q7, [x10, #0x20]\n"
"fmla v8.4s, v6.4s, v0.s[2]\n"
"fmla v12.4s, v6.4s, v1.s[2]\n"
"fmla v16.4s, v6.4s, v2.s[2]\n"
"fmla v20.4s, v6.4s, v3.s[2]\n"
- "ldr q6, [x10, #0x20]\n"
+ "ldr q6, [x9, #0x20]\n"
"fmla v9.4s, v7.4s, v0.s[2]\n"
"fmla v13.4s, v7.4s, v1.s[2]\n"
"fmla v17.4s, v7.4s, v2.s[2]\n"
"fmla v21.4s, v7.4s, v3.s[2]\n"
- "ldr q7, [x9, #0x20]\n"
+ "ldr q7, [x28, #0x20]\n"
"fmla v10.4s, v6.4s, v0.s[2]\n"
"fmla v14.4s, v6.4s, v1.s[2]\n"
"fmla v18.4s, v6.4s, v2.s[2]\n"
"fmla v22.4s, v6.4s, v3.s[2]\n"
- "ldr q6, [x12, #0x30]\n"
- "add x12, x12, #0x40\n"
+ "ldr q6, [x11, #0x30]\n"
+ "add x11, x11, #0x40\n"
"fmla v11.4s, v7.4s, v0.s[2]\n"
"fmla v15.4s, v7.4s, v1.s[2]\n"
"fmla v19.4s, v7.4s, v2.s[2]\n"
"fmla v23.4s, v7.4s, v3.s[2]\n"
- "ldr q7, [x11, #0x30]\n"
- "add x11, x11, #0x40\n"
+ "ldr q7, [x10, #0x30]\n"
+ "add x10, x10, #0x40\n"
"fmla v8.4s, v6.4s, v0.s[3]\n"
"fmla v12.4s, v6.4s, v1.s[3]\n"
"fmla v16.4s, v6.4s, v2.s[3]\n"
"fmla v20.4s, v6.4s, v3.s[3]\n"
- "ldr q6, [x10, #0x30]\n"
- "add x10, x10, #0x40\n"
+ "ldr q6, [x9, #0x30]\n"
+ "add x9, x9, #0x40\n"
"fmla v9.4s, v7.4s, v0.s[3]\n"
"fmla v13.4s, v7.4s, v1.s[3]\n"
"fmla v17.4s, v7.4s, v2.s[3]\n"
"fmla v21.4s, v7.4s, v3.s[3]\n"
- "ldr q7, [x9, #0x30]\n"
- "add x9, x9, #0x40\n"
+ "ldr q7, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
"fmla v10.4s, v6.4s, v0.s[3]\n"
"fmla v14.4s, v6.4s, v1.s[3]\n"
"fmla v18.4s, v6.4s, v2.s[3]\n"
"fmla v22.4s, v6.4s, v3.s[3]\n"
- "ldr q6, [x12, #0x0]\n"
+ "ldr q6, [x11, #0x0]\n"
"fmla v11.4s, v7.4s, v0.s[3]\n"
- "ldr q0, [x26, #0x0]\n"
+ "ldr q0, [x25, #0x0]\n"
"fmla v15.4s, v7.4s, v1.s[3]\n"
- "ldr q1, [x25, #0x0]\n"
+ "ldr q1, [x24, #0x0]\n"
"fmla v19.4s, v7.4s, v2.s[3]\n"
- "ldr q2, [x24, #0x0]\n"
+ "ldr q2, [x23, #0x0]\n"
"fmla v23.4s, v7.4s, v3.s[3]\n"
- "ldr q3, [x23, #0x0]\n"
- "ldr q7, [x11, #0x0]\n"
+ "ldr q3, [x22, #0x0]\n"
+ "ldr q7, [x10, #0x0]\n"
"bge 121b\n"
"122:" // Height 4: Multiply loop: Single iteration only
"fmla v8.4s, v6.4s, v0.s[0]\n"
"fmla v12.4s, v6.4s, v1.s[0]\n"
- "sub x27, x27, #0x4\n"
- "add x26, x26, #0x10\n"
+ "sub x26, x26, #0x4\n"
+ "add x25, x25, #0x10\n"
"fmla v16.4s, v6.4s, v2.s[0]\n"
"fmla v20.4s, v6.4s, v3.s[0]\n"
- "ldr q6, [x10, #0x0]\n"
- "add x25, x25, #0x10\n"
+ "ldr q6, [x9, #0x0]\n"
+ "add x24, x24, #0x10\n"
"fmla v9.4s, v7.4s, v0.s[0]\n"
"fmla v13.4s, v7.4s, v1.s[0]\n"
- "add x24, x24, #0x10\n"
"add x23, x23, #0x10\n"
+ "add x22, x22, #0x10\n"
"fmla v17.4s, v7.4s, v2.s[0]\n"
"fmla v21.4s, v7.4s, v3.s[0]\n"
- "ldr q7, [x9, #0x0]\n"
+ "ldr q7, [x28, #0x0]\n"
"fmla v10.4s, v6.4s, v0.s[0]\n"
"fmla v14.4s, v6.4s, v1.s[0]\n"
"fmla v18.4s, v6.4s, v2.s[0]\n"
"fmla v22.4s, v6.4s, v3.s[0]\n"
- "ldr q6, [x12, #0x10]\n"
+ "ldr q6, [x11, #0x10]\n"
"fmla v11.4s, v7.4s, v0.s[0]\n"
"fmla v15.4s, v7.4s, v1.s[0]\n"
"fmla v19.4s, v7.4s, v2.s[0]\n"
"fmla v23.4s, v7.4s, v3.s[0]\n"
- "ldr q7, [x11, #0x10]\n"
+ "ldr q7, [x10, #0x10]\n"
"fmla v8.4s, v6.4s, v0.s[1]\n"
"fmla v12.4s, v6.4s, v1.s[1]\n"
"fmla v16.4s, v6.4s, v2.s[1]\n"
"fmla v20.4s, v6.4s, v3.s[1]\n"
- "ldr q6, [x10, #0x10]\n"
+ "ldr q6, [x9, #0x10]\n"
"fmla v9.4s, v7.4s, v0.s[1]\n"
"fmla v13.4s, v7.4s, v1.s[1]\n"
"fmla v17.4s, v7.4s, v2.s[1]\n"
"fmla v21.4s, v7.4s, v3.s[1]\n"
- "ldr q7, [x9, #0x10]\n"
+ "ldr q7, [x28, #0x10]\n"
"fmla v10.4s, v6.4s, v0.s[1]\n"
"fmla v14.4s, v6.4s, v1.s[1]\n"
"fmla v18.4s, v6.4s, v2.s[1]\n"
"fmla v22.4s, v6.4s, v3.s[1]\n"
- "ldr q6, [x12, #0x20]\n"
+ "ldr q6, [x11, #0x20]\n"
"fmla v11.4s, v7.4s, v0.s[1]\n"
"fmla v15.4s, v7.4s, v1.s[1]\n"
"fmla v19.4s, v7.4s, v2.s[1]\n"
"fmla v23.4s, v7.4s, v3.s[1]\n"
- "ldr q7, [x11, #0x20]\n"
+ "ldr q7, [x10, #0x20]\n"
"fmla v8.4s, v6.4s, v0.s[2]\n"
"fmla v12.4s, v6.4s, v1.s[2]\n"
"fmla v16.4s, v6.4s, v2.s[2]\n"
"fmla v20.4s, v6.4s, v3.s[2]\n"
- "ldr q6, [x10, #0x20]\n"
+ "ldr q6, [x9, #0x20]\n"
"fmla v9.4s, v7.4s, v0.s[2]\n"
"fmla v13.4s, v7.4s, v1.s[2]\n"
"fmla v17.4s, v7.4s, v2.s[2]\n"
"fmla v21.4s, v7.4s, v3.s[2]\n"
- "ldr q7, [x9, #0x20]\n"
+ "ldr q7, [x28, #0x20]\n"
"fmla v10.4s, v6.4s, v0.s[2]\n"
"fmla v14.4s, v6.4s, v1.s[2]\n"
"fmla v18.4s, v6.4s, v2.s[2]\n"
"fmla v22.4s, v6.4s, v3.s[2]\n"
- "ldr q6, [x12, #0x30]\n"
- "add x12, x12, #0x40\n"
+ "ldr q6, [x11, #0x30]\n"
+ "add x11, x11, #0x40\n"
"fmla v11.4s, v7.4s, v0.s[2]\n"
"fmla v15.4s, v7.4s, v1.s[2]\n"
"fmla v19.4s, v7.4s, v2.s[2]\n"
"fmla v23.4s, v7.4s, v3.s[2]\n"
- "ldr q7, [x11, #0x30]\n"
- "add x11, x11, #0x40\n"
+ "ldr q7, [x10, #0x30]\n"
+ "add x10, x10, #0x40\n"
"fmla v8.4s, v6.4s, v0.s[3]\n"
"fmla v12.4s, v6.4s, v1.s[3]\n"
"fmla v16.4s, v6.4s, v2.s[3]\n"
"fmla v20.4s, v6.4s, v3.s[3]\n"
- "ldr q6, [x10, #0x30]\n"
- "add x10, x10, #0x40\n"
+ "ldr q6, [x9, #0x30]\n"
+ "add x9, x9, #0x40\n"
"fmla v9.4s, v7.4s, v0.s[3]\n"
"fmla v13.4s, v7.4s, v1.s[3]\n"
"fmla v17.4s, v7.4s, v2.s[3]\n"
"fmla v21.4s, v7.4s, v3.s[3]\n"
- "ldr q7, [x9, #0x30]\n"
- "add x9, x9, #0x40\n"
+ "ldr q7, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
"fmla v10.4s, v6.4s, v0.s[3]\n"
"fmla v14.4s, v6.4s, v1.s[3]\n"
"fmla v18.4s, v6.4s, v2.s[3]\n"
@@ -1720,29 +1720,29 @@ void a64_ffhybrid_fp32_mla_6x16 (
"fmla v19.4s, v7.4s, v2.s[3]\n"
"fmla v23.4s, v7.4s, v3.s[3]\n"
"123:" // Height 4: Multiply loop: Main loop skip
- "cbz x27, 125f\n"
+ "cbz x26, 125f\n"
"124:" // Height 4: Multiply loop: Odd block loop
- "ldr s0, [x26], #0x4\n"
- "ldr s1, [x25], #0x4\n"
- "sub x27, x27, #0x1\n"
- "ldr s2, [x24], #0x4\n"
- "ldr s3, [x23], #0x4\n"
- "ldr q6, [x12, #0x0]\n"
- "ldr q7, [x11, #0x0]\n"
+ "ldr s0, [x25], #0x4\n"
+ "ldr s1, [x24], #0x4\n"
+ "sub x26, x26, #0x1\n"
+ "ldr s2, [x23], #0x4\n"
+ "ldr s3, [x22], #0x4\n"
+ "ldr q6, [x11, #0x0]\n"
+ "ldr q7, [x10, #0x0]\n"
"fmla v8.4s, v6.4s, v0.s[0]\n"
"fmla v12.4s, v6.4s, v1.s[0]\n"
"fmla v16.4s, v6.4s, v2.s[0]\n"
"fmla v20.4s, v6.4s, v3.s[0]\n"
- "ldr q6, [x10, #0x0]\n"
- "add x12, x12, #0x10\n"
+ "ldr q6, [x9, #0x0]\n"
+ "add x11, x11, #0x10\n"
"fmla v9.4s, v7.4s, v0.s[0]\n"
"fmla v13.4s, v7.4s, v1.s[0]\n"
- "add x11, x11, #0x10\n"
"add x10, x10, #0x10\n"
+ "add x9, x9, #0x10\n"
"fmla v17.4s, v7.4s, v2.s[0]\n"
"fmla v21.4s, v7.4s, v3.s[0]\n"
- "ldr q7, [x9, #0x0]\n"
- "add x9, x9, #0x10\n"
+ "ldr q7, [x28, #0x0]\n"
+ "add x28, x28, #0x10\n"
"fmla v10.4s, v6.4s, v0.s[0]\n"
"fmla v14.4s, v6.4s, v1.s[0]\n"
"fmla v18.4s, v6.4s, v2.s[0]\n"
@@ -1751,21 +1751,21 @@ void a64_ffhybrid_fp32_mla_6x16 (
"fmla v15.4s, v7.4s, v1.s[0]\n"
"fmla v19.4s, v7.4s, v2.s[0]\n"
"fmla v23.4s, v7.4s, v3.s[0]\n"
- "cbnz x27, 124b\n"
+ "cbnz x26, 124b\n"
"125:" // Height 4: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 118b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
"tbz %x[flags], #1, 126f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v1.4s }, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1r { v0.4s }, [x20]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v1.4s }, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v0.4s }, [x19]\n"
"fmin v8.4s, v8.4s, v1.4s\n"
"fmin v9.4s, v9.4s, v1.4s\n"
"fmin v10.4s, v10.4s, v1.4s\n"
@@ -1799,160 +1799,160 @@ void a64_ffhybrid_fp32_mla_6x16 (
"fmax v22.4s, v22.4s, v0.4s\n"
"fmax v23.4s, v23.4s, v0.4s\n"
"126:" // Height 4: No activation
- "cmp x14, #0x10\n"
+ "cmp x13, #0x10\n"
"bge 135f\n"
- "tbz x14, #3, 130f\n"
- "st1 { v8.4s }, [x13], #0x10\n"
- "st1 { v9.4s }, [x13], #0x10\n"
- "st1 { v12.4s }, [x25], #0x10\n"
- "st1 { v13.4s }, [x25], #0x10\n"
- "st1 { v16.4s }, [x24], #0x10\n"
- "st1 { v17.4s }, [x24], #0x10\n"
- "st1 { v20.4s }, [x23], #0x10\n"
- "st1 { v21.4s }, [x23], #0x10\n"
- "tbz x14, #2, 128f\n"
- "st1 { v10.4s }, [x13], #0x10\n"
- "st1 { v14.4s }, [x25], #0x10\n"
- "st1 { v18.4s }, [x24], #0x10\n"
- "st1 { v22.4s }, [x23], #0x10\n"
- "tbz x14, #1, 127f\n"
- "str d11, [x13], #0x8\n"
- "str d15, [x25], #0x8\n"
- "str d19, [x24], #0x8\n"
- "str d23, [x23], #0x8\n"
- "tbz x14, #0, 134f\n"
- "st1 { v11.s }[2], [x13]\n"
- "st1 { v15.s }[2], [x25]\n"
- "st1 { v19.s }[2], [x24]\n"
- "st1 { v23.s }[2], [x23]\n"
+ "tbz x13, #3, 130f\n"
+ "st1 { v8.4s }, [x12], #0x10\n"
+ "st1 { v9.4s }, [x12], #0x10\n"
+ "st1 { v12.4s }, [x24], #0x10\n"
+ "st1 { v13.4s }, [x24], #0x10\n"
+ "st1 { v16.4s }, [x23], #0x10\n"
+ "st1 { v17.4s }, [x23], #0x10\n"
+ "st1 { v20.4s }, [x22], #0x10\n"
+ "st1 { v21.4s }, [x22], #0x10\n"
+ "tbz x13, #2, 128f\n"
+ "st1 { v10.4s }, [x12], #0x10\n"
+ "st1 { v14.4s }, [x24], #0x10\n"
+ "st1 { v18.4s }, [x23], #0x10\n"
+ "st1 { v22.4s }, [x22], #0x10\n"
+ "tbz x13, #1, 127f\n"
+ "str d11, [x12], #0x8\n"
+ "str d15, [x24], #0x8\n"
+ "str d19, [x23], #0x8\n"
+ "str d23, [x22], #0x8\n"
+ "tbz x13, #0, 134f\n"
+ "st1 { v11.s }[2], [x12]\n"
+ "st1 { v15.s }[2], [x24]\n"
+ "st1 { v19.s }[2], [x23]\n"
+ "st1 { v23.s }[2], [x22]\n"
"b 134f\n"
"127:" // Height 4: Partial direct writeback: partial_1_12
- "tbz x14, #0, 134f\n"
- "str s11, [x13, #0x0]\n"
- "str s15, [x25, #0x0]\n"
- "str s19, [x24, #0x0]\n"
- "str s23, [x23, #0x0]\n"
+ "tbz x13, #0, 134f\n"
+ "str s11, [x12, #0x0]\n"
+ "str s15, [x24, #0x0]\n"
+ "str s19, [x23, #0x0]\n"
+ "str s23, [x22, #0x0]\n"
"b 134f\n"
"128:" // Height 4: Partial direct writeback: partial_2_8
- "tbz x14, #1, 129f\n"
- "str d10, [x13], #0x8\n"
- "str d14, [x25], #0x8\n"
- "str d18, [x24], #0x8\n"
- "str d22, [x23], #0x8\n"
- "tbz x14, #0, 134f\n"
- "st1 { v10.s }[2], [x13]\n"
- "st1 { v14.s }[2], [x25]\n"
- "st1 { v18.s }[2], [x24]\n"
- "st1 { v22.s }[2], [x23]\n"
+ "tbz x13, #1, 129f\n"
+ "str d10, [x12], #0x8\n"
+ "str d14, [x24], #0x8\n"
+ "str d18, [x23], #0x8\n"
+ "str d22, [x22], #0x8\n"
+ "tbz x13, #0, 134f\n"
+ "st1 { v10.s }[2], [x12]\n"
+ "st1 { v14.s }[2], [x24]\n"
+ "st1 { v18.s }[2], [x23]\n"
+ "st1 { v22.s }[2], [x22]\n"
"b 134f\n"
"129:" // Height 4: Partial direct writeback: partial_1_8
- "tbz x14, #0, 134f\n"
- "str s10, [x13, #0x0]\n"
- "str s14, [x25, #0x0]\n"
- "str s18, [x24, #0x0]\n"
- "str s22, [x23, #0x0]\n"
+ "tbz x13, #0, 134f\n"
+ "str s10, [x12, #0x0]\n"
+ "str s14, [x24, #0x0]\n"
+ "str s18, [x23, #0x0]\n"
+ "str s22, [x22, #0x0]\n"
"b 134f\n"
"130:" // Height 4: Partial direct writeback: partial_4_0
- "tbz x14, #2, 132f\n"
- "st1 { v8.4s }, [x13], #0x10\n"
- "st1 { v12.4s }, [x25], #0x10\n"
- "st1 { v16.4s }, [x24], #0x10\n"
- "st1 { v20.4s }, [x23], #0x10\n"
- "tbz x14, #1, 131f\n"
- "str d9, [x13], #0x8\n"
- "str d13, [x25], #0x8\n"
- "str d17, [x24], #0x8\n"
- "str d21, [x23], #0x8\n"
- "tbz x14, #0, 134f\n"
- "st1 { v9.s }[2], [x13]\n"
- "st1 { v13.s }[2], [x25]\n"
- "st1 { v17.s }[2], [x24]\n"
- "st1 { v21.s }[2], [x23]\n"
+ "tbz x13, #2, 132f\n"
+ "st1 { v8.4s }, [x12], #0x10\n"
+ "st1 { v12.4s }, [x24], #0x10\n"
+ "st1 { v16.4s }, [x23], #0x10\n"
+ "st1 { v20.4s }, [x22], #0x10\n"
+ "tbz x13, #1, 131f\n"
+ "str d9, [x12], #0x8\n"
+ "str d13, [x24], #0x8\n"
+ "str d17, [x23], #0x8\n"
+ "str d21, [x22], #0x8\n"
+ "tbz x13, #0, 134f\n"
+ "st1 { v9.s }[2], [x12]\n"
+ "st1 { v13.s }[2], [x24]\n"
+ "st1 { v17.s }[2], [x23]\n"
+ "st1 { v21.s }[2], [x22]\n"
"b 134f\n"
"131:" // Height 4: Partial direct writeback: partial_1_4
- "tbz x14, #0, 134f\n"
- "str s9, [x13, #0x0]\n"
- "str s13, [x25, #0x0]\n"
- "str s17, [x24, #0x0]\n"
- "str s21, [x23, #0x0]\n"
+ "tbz x13, #0, 134f\n"
+ "str s9, [x12, #0x0]\n"
+ "str s13, [x24, #0x0]\n"
+ "str s17, [x23, #0x0]\n"
+ "str s21, [x22, #0x0]\n"
"b 134f\n"
"132:" // Height 4: Partial direct writeback: partial_2_0
- "tbz x14, #1, 133f\n"
- "str d8, [x13], #0x8\n"
- "str d12, [x25], #0x8\n"
- "str d16, [x24], #0x8\n"
- "str d20, [x23], #0x8\n"
- "tbz x14, #0, 134f\n"
- "st1 { v8.s }[2], [x13]\n"
- "st1 { v12.s }[2], [x25]\n"
- "st1 { v16.s }[2], [x24]\n"
- "st1 { v20.s }[2], [x23]\n"
+ "tbz x13, #1, 133f\n"
+ "str d8, [x12], #0x8\n"
+ "str d12, [x24], #0x8\n"
+ "str d16, [x23], #0x8\n"
+ "str d20, [x22], #0x8\n"
+ "tbz x13, #0, 134f\n"
+ "st1 { v8.s }[2], [x12]\n"
+ "st1 { v12.s }[2], [x24]\n"
+ "st1 { v16.s }[2], [x23]\n"
+ "st1 { v20.s }[2], [x22]\n"
"b 134f\n"
"133:" // Height 4: Partial direct writeback: partial_1_0
- "str s8, [x13, #0x0]\n"
- "str s12, [x25, #0x0]\n"
- "str s16, [x24, #0x0]\n"
- "str s20, [x23, #0x0]\n"
+ "str s8, [x12, #0x0]\n"
+ "str s12, [x24, #0x0]\n"
+ "str s16, [x23, #0x0]\n"
+ "str s20, [x22, #0x0]\n"
"134:" // Height 4: Partial direct writeback: Done
"b 136f\n"
"135:" // Height 4: Full writeback
- "str q8, [x13, #0x0]\n"
- "str q9, [x13, #0x10]\n"
- "str q10, [x13, #0x20]\n"
- "str q11, [x13, #0x30]\n"
- "add x13, x13, #0x40\n"
- "str q12, [x25, #0x0]\n"
- "str q13, [x25, #0x10]\n"
- "str q14, [x25, #0x20]\n"
- "str q15, [x25, #0x30]\n"
- "str q16, [x24, #0x0]\n"
- "str q17, [x24, #0x10]\n"
- "str q18, [x24, #0x20]\n"
- "str q19, [x24, #0x30]\n"
- "str q20, [x23, #0x0]\n"
- "str q21, [x23, #0x10]\n"
- "str q22, [x23, #0x20]\n"
- "str q23, [x23, #0x30]\n"
+ "str q8, [x12, #0x0]\n"
+ "str q9, [x12, #0x10]\n"
+ "str q10, [x12, #0x20]\n"
+ "str q11, [x12, #0x30]\n"
+ "add x12, x12, #0x40\n"
+ "str q12, [x24, #0x0]\n"
+ "str q13, [x24, #0x10]\n"
+ "str q14, [x24, #0x20]\n"
+ "str q15, [x24, #0x30]\n"
+ "str q16, [x23, #0x0]\n"
+ "str q17, [x23, #0x10]\n"
+ "str q18, [x23, #0x20]\n"
+ "str q19, [x23, #0x30]\n"
+ "str q20, [x22, #0x0]\n"
+ "str q21, [x22, #0x10]\n"
+ "str q22, [x22, #0x20]\n"
+ "str q23, [x22, #0x30]\n"
"136:" // Height 4: Writeback done
- "subs x14, x14, #0x10\n"
+ "subs x13, x13, #0x10\n"
"bgt 104b\n"
"b 206f\n"
"137:" // Height 5
- "ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x15, %x[bias]\n"
- "ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x13, %x[output_ptr]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x14, %x[bias]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x12, %x[output_ptr]\n"
"138:" // Height 5: Column loop
- "ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
- "add x11, x12, x20, LSL #2\n"
- "add x10, x11, x20, LSL #2\n"
- "add x9, x10, x20, LSL #2\n"
- "add x20, x9, x20, LSL #2\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "cmp x14, #0xc\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #2\n"
+ "add x9, x10, x19, LSL #2\n"
+ "add x28, x9, x19, LSL #2\n"
+ "add x19, x28, x19, LSL #2\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "cmp x13, #0xc\n"
"bgt 139f\n"
- "cmp x14, #0x8\n"
- "mov x9, x12\n"
+ "cmp x13, #0x8\n"
+ "mov x28, x11\n"
"bgt 139f\n"
- "cmp x14, #0x4\n"
- "mov x10, x12\n"
+ "cmp x13, #0x4\n"
+ "mov x9, x11\n"
"bgt 139f\n"
- "mov x11, x12\n"
+ "mov x10, x11\n"
"139:" // Height 5: B setup done
- "cbz x15, 140f\n"
- "ldr q8, [x15, #0x0]\n"
- "ldr q9, [x15, #0x10]\n"
+ "cbz x14, 140f\n"
+ "ldr q8, [x14, #0x0]\n"
+ "ldr q9, [x14, #0x10]\n"
"mov v12.16b, v8.16b\n"
"mov v13.16b, v9.16b\n"
- "ldr q10, [x15, #0x20]\n"
- "ldr q11, [x15, #0x30]\n"
+ "ldr q10, [x14, #0x20]\n"
+ "ldr q11, [x14, #0x30]\n"
"mov v14.16b, v10.16b\n"
"mov v15.16b, v11.16b\n"
"mov v16.16b, v8.16b\n"
"mov v17.16b, v9.16b\n"
- "add x15, x15, #0x40\n"
+ "add x14, x14, #0x40\n"
"mov v18.16b, v10.16b\n"
"mov v19.16b, v11.16b\n"
"mov v20.16b, v8.16b\n"
@@ -1966,153 +1966,153 @@ void a64_ffhybrid_fp32_mla_6x16 (
"b 151f\n"
"140:" // Height 5: no bias
"tbz %x[flags], #0, 150f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "cmp x14, #0x10\n"
- "add x22, x23, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "cmp x13, #0x10\n"
+ "add x21, x22, x19, LSL #2\n"
"bge 149f\n"
- "tbz x14, #3, 144f\n"
- "ld1 { v8.4s }, [x13], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v16.4s }, [x24], #0x10\n"
- "ld1 { v20.4s }, [x23], #0x10\n"
- "ld1 { v24.4s }, [x22], #0x10\n"
- "ld1 { v9.4s }, [x13], #0x10\n"
- "ld1 { v13.4s }, [x25], #0x10\n"
- "ld1 { v17.4s }, [x24], #0x10\n"
- "ld1 { v21.4s }, [x23], #0x10\n"
- "ld1 { v25.4s }, [x22], #0x10\n"
- "tbz x14, #2, 142f\n"
- "ld1 { v10.4s }, [x13], #0x10\n"
- "ld1 { v14.4s }, [x25], #0x10\n"
- "ld1 { v18.4s }, [x24], #0x10\n"
- "ld1 { v22.4s }, [x23], #0x10\n"
- "ld1 { v26.4s }, [x22], #0x10\n"
- "tbz x14, #1, 141f\n"
- "ldr d11, [x13], #0x8\n"
- "ldr d15, [x25], #0x8\n"
- "mov x20, #0x38\n"
- "ldr d19, [x24], #0x8\n"
- "ldr d23, [x23], #0x8\n"
- "ldr d27, [x22], #0x8\n"
- "tbz x14, #0, 148f\n"
- "ld1 { v11.s }[2], [x13]\n"
- "ld1 { v15.s }[2], [x25]\n"
- "ld1 { v19.s }[2], [x24]\n"
- "ld1 { v23.s }[2], [x23]\n"
- "ld1 { v27.s }[2], [x22]\n"
+ "tbz x13, #3, 144f\n"
+ "ld1 { v8.4s }, [x12], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
+ "ld1 { v16.4s }, [x23], #0x10\n"
+ "ld1 { v20.4s }, [x22], #0x10\n"
+ "ld1 { v24.4s }, [x21], #0x10\n"
+ "ld1 { v9.4s }, [x12], #0x10\n"
+ "ld1 { v13.4s }, [x24], #0x10\n"
+ "ld1 { v17.4s }, [x23], #0x10\n"
+ "ld1 { v21.4s }, [x22], #0x10\n"
+ "ld1 { v25.4s }, [x21], #0x10\n"
+ "tbz x13, #2, 142f\n"
+ "ld1 { v10.4s }, [x12], #0x10\n"
+ "ld1 { v14.4s }, [x24], #0x10\n"
+ "ld1 { v18.4s }, [x23], #0x10\n"
+ "ld1 { v22.4s }, [x22], #0x10\n"
+ "ld1 { v26.4s }, [x21], #0x10\n"
+ "tbz x13, #1, 141f\n"
+ "ldr d11, [x12], #0x8\n"
+ "ldr d15, [x24], #0x8\n"
+ "mov x19, #0x38\n"
+ "ldr d19, [x23], #0x8\n"
+ "ldr d23, [x22], #0x8\n"
+ "ldr d27, [x21], #0x8\n"
+ "tbz x13, #0, 148f\n"
+ "ld1 { v11.s }[2], [x12]\n"
+ "ld1 { v15.s }[2], [x24]\n"
+ "ld1 { v19.s }[2], [x23]\n"
+ "ld1 { v23.s }[2], [x22]\n"
+ "ld1 { v27.s }[2], [x21]\n"
"b 148f\n"
"141:" // Height 5: Partial accumulate: partial_1_12
- "mov x20, #0x30\n"
- "tbz x14, #0, 148f\n"
- "ldr s11, [x13, #0x0]\n"
- "ldr s15, [x25, #0x0]\n"
- "ldr s19, [x24, #0x0]\n"
- "ldr s23, [x23, #0x0]\n"
- "ldr s27, [x22, #0x0]\n"
+ "mov x19, #0x30\n"
+ "tbz x13, #0, 148f\n"
+ "ldr s11, [x12, #0x0]\n"
+ "ldr s15, [x24, #0x0]\n"
+ "ldr s19, [x23, #0x0]\n"
+ "ldr s23, [x22, #0x0]\n"
+ "ldr s27, [x21, #0x0]\n"
"b 148f\n"
"142:" // Height 5: Partial accumulate: partial_2_8
- "tbz x14, #1, 143f\n"
- "ldr d10, [x13], #0x8\n"
- "ldr d14, [x25], #0x8\n"
- "mov x20, #0x28\n"
- "ldr d18, [x24], #0x8\n"
- "ldr d22, [x23], #0x8\n"
- "ldr d26, [x22], #0x8\n"
- "tbz x14, #0, 148f\n"
- "ld1 { v10.s }[2], [x13]\n"
- "ld1 { v14.s }[2], [x25]\n"
- "ld1 { v18.s }[2], [x24]\n"
- "ld1 { v22.s }[2], [x23]\n"
- "ld1 { v26.s }[2], [x22]\n"
+ "tbz x13, #1, 143f\n"
+ "ldr d10, [x12], #0x8\n"
+ "ldr d14, [x24], #0x8\n"
+ "mov x19, #0x28\n"
+ "ldr d18, [x23], #0x8\n"
+ "ldr d22, [x22], #0x8\n"
+ "ldr d26, [x21], #0x8\n"
+ "tbz x13, #0, 148f\n"
+ "ld1 { v10.s }[2], [x12]\n"
+ "ld1 { v14.s }[2], [x24]\n"
+ "ld1 { v18.s }[2], [x23]\n"
+ "ld1 { v22.s }[2], [x22]\n"
+ "ld1 { v26.s }[2], [x21]\n"
"b 148f\n"
"143:" // Height 5: Partial accumulate: partial_1_8
- "mov x20, #0x20\n"
- "tbz x14, #0, 148f\n"
- "ldr s10, [x13, #0x0]\n"
- "ldr s14, [x25, #0x0]\n"
- "ldr s18, [x24, #0x0]\n"
- "ldr s22, [x23, #0x0]\n"
- "ldr s26, [x22, #0x0]\n"
+ "mov x19, #0x20\n"
+ "tbz x13, #0, 148f\n"
+ "ldr s10, [x12, #0x0]\n"
+ "ldr s14, [x24, #0x0]\n"
+ "ldr s18, [x23, #0x0]\n"
+ "ldr s22, [x22, #0x0]\n"
+ "ldr s26, [x21, #0x0]\n"
"b 148f\n"
"144:" // Height 5: Partial accumulate: partial_4_0
- "tbz x14, #2, 146f\n"
- "ld1 { v8.4s }, [x13], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v16.4s }, [x24], #0x10\n"
- "ld1 { v20.4s }, [x23], #0x10\n"
- "ld1 { v24.4s }, [x22], #0x10\n"
- "tbz x14, #1, 145f\n"
- "ldr d9, [x13], #0x8\n"
- "ldr d13, [x25], #0x8\n"
- "mov x20, #0x18\n"
- "ldr d17, [x24], #0x8\n"
- "ldr d21, [x23], #0x8\n"
- "ldr d25, [x22], #0x8\n"
- "tbz x14, #0, 148f\n"
- "ld1 { v9.s }[2], [x13]\n"
- "ld1 { v13.s }[2], [x25]\n"
- "ld1 { v17.s }[2], [x24]\n"
- "ld1 { v21.s }[2], [x23]\n"
- "ld1 { v25.s }[2], [x22]\n"
+ "tbz x13, #2, 146f\n"
+ "ld1 { v8.4s }, [x12], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
+ "ld1 { v16.4s }, [x23], #0x10\n"
+ "ld1 { v20.4s }, [x22], #0x10\n"
+ "ld1 { v24.4s }, [x21], #0x10\n"
+ "tbz x13, #1, 145f\n"
+ "ldr d9, [x12], #0x8\n"
+ "ldr d13, [x24], #0x8\n"
+ "mov x19, #0x18\n"
+ "ldr d17, [x23], #0x8\n"
+ "ldr d21, [x22], #0x8\n"
+ "ldr d25, [x21], #0x8\n"
+ "tbz x13, #0, 148f\n"
+ "ld1 { v9.s }[2], [x12]\n"
+ "ld1 { v13.s }[2], [x24]\n"
+ "ld1 { v17.s }[2], [x23]\n"
+ "ld1 { v21.s }[2], [x22]\n"
+ "ld1 { v25.s }[2], [x21]\n"
"b 148f\n"
"145:" // Height 5: Partial accumulate: partial_1_4
- "mov x20, #0x10\n"
- "tbz x14, #0, 148f\n"
- "ldr s9, [x13, #0x0]\n"
- "ldr s13, [x25, #0x0]\n"
- "ldr s17, [x24, #0x0]\n"
- "ldr s21, [x23, #0x0]\n"
- "ldr s25, [x22, #0x0]\n"
+ "mov x19, #0x10\n"
+ "tbz x13, #0, 148f\n"
+ "ldr s9, [x12, #0x0]\n"
+ "ldr s13, [x24, #0x0]\n"
+ "ldr s17, [x23, #0x0]\n"
+ "ldr s21, [x22, #0x0]\n"
+ "ldr s25, [x21, #0x0]\n"
"b 148f\n"
"146:" // Height 5: Partial accumulate: partial_2_0
- "tbz x14, #1, 147f\n"
- "ldr d8, [x13], #0x8\n"
- "ldr d12, [x25], #0x8\n"
- "mov x20, #0x8\n"
- "ldr d16, [x24], #0x8\n"
- "ldr d20, [x23], #0x8\n"
- "ldr d24, [x22], #0x8\n"
- "tbz x14, #0, 148f\n"
- "ld1 { v8.s }[2], [x13]\n"
- "ld1 { v12.s }[2], [x25]\n"
- "ld1 { v16.s }[2], [x24]\n"
- "ld1 { v20.s }[2], [x23]\n"
- "ld1 { v24.s }[2], [x22]\n"
+ "tbz x13, #1, 147f\n"
+ "ldr d8, [x12], #0x8\n"
+ "ldr d12, [x24], #0x8\n"
+ "mov x19, #0x8\n"
+ "ldr d16, [x23], #0x8\n"
+ "ldr d20, [x22], #0x8\n"
+ "ldr d24, [x21], #0x8\n"
+ "tbz x13, #0, 148f\n"
+ "ld1 { v8.s }[2], [x12]\n"
+ "ld1 { v12.s }[2], [x24]\n"
+ "ld1 { v16.s }[2], [x23]\n"
+ "ld1 { v20.s }[2], [x22]\n"
+ "ld1 { v24.s }[2], [x21]\n"
"b 148f\n"
"147:" // Height 5: Partial accumulate: partial_1_0
- "ldr s8, [x13, #0x0]\n"
- "ldr s12, [x25, #0x0]\n"
- "mov x20, #0x0\n"
- "ldr s16, [x24, #0x0]\n"
- "ldr s20, [x23, #0x0]\n"
- "ldr s24, [x22, #0x0]\n"
+ "ldr s8, [x12, #0x0]\n"
+ "ldr s12, [x24, #0x0]\n"
+ "mov x19, #0x0\n"
+ "ldr s16, [x23, #0x0]\n"
+ "ldr s20, [x22, #0x0]\n"
+ "ldr s24, [x21, #0x0]\n"
"148:" // Height 5: Partial accumulate: Done
- "sub x13, x13, x20\n"
+ "sub x12, x12, x19\n"
"b 151f\n"
"149:" // Height 5: full accumulate
- "ldr q8, [x13, #0x0]\n"
- "ldr q9, [x13, #0x10]\n"
- "ldr q10, [x13, #0x20]\n"
- "ldr q11, [x13, #0x30]\n"
- "ldr q12, [x25, #0x0]\n"
- "ldr q13, [x25, #0x10]\n"
- "ldr q14, [x25, #0x20]\n"
- "ldr q15, [x25, #0x30]\n"
- "ldr q16, [x24, #0x0]\n"
- "ldr q17, [x24, #0x10]\n"
- "ldr q18, [x24, #0x20]\n"
- "ldr q19, [x24, #0x30]\n"
- "ldr q20, [x23, #0x0]\n"
- "ldr q21, [x23, #0x10]\n"
- "ldr q22, [x23, #0x20]\n"
- "ldr q23, [x23, #0x30]\n"
- "ldr q24, [x22, #0x0]\n"
- "ldr q25, [x22, #0x10]\n"
- "ldr q26, [x22, #0x20]\n"
- "ldr q27, [x22, #0x30]\n"
+ "ldr q8, [x12, #0x0]\n"
+ "ldr q9, [x12, #0x10]\n"
+ "ldr q10, [x12, #0x20]\n"
+ "ldr q11, [x12, #0x30]\n"
+ "ldr q12, [x24, #0x0]\n"
+ "ldr q13, [x24, #0x10]\n"
+ "ldr q14, [x24, #0x20]\n"
+ "ldr q15, [x24, #0x30]\n"
+ "ldr q16, [x23, #0x0]\n"
+ "ldr q17, [x23, #0x10]\n"
+ "ldr q18, [x23, #0x20]\n"
+ "ldr q19, [x23, #0x30]\n"
+ "ldr q20, [x22, #0x0]\n"
+ "ldr q21, [x22, #0x10]\n"
+ "ldr q22, [x22, #0x20]\n"
+ "ldr q23, [x22, #0x30]\n"
+ "ldr q24, [x21, #0x0]\n"
+ "ldr q25, [x21, #0x10]\n"
+ "ldr q26, [x21, #0x20]\n"
+ "ldr q27, [x21, #0x30]\n"
"b 151f\n"
"150:" // Height 5: no accumulate
"movi v8.16b, #0x0\n"
@@ -2136,254 +2136,254 @@ void a64_ffhybrid_fp32_mla_6x16 (
"movi v26.16b, #0x0\n"
"movi v27.16b, #0x0\n"
"151:" // Height 5: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"152:" // Height 5: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 153f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "ldr x22, [x21, #0x20]\n"
- "cbnz x28, 154f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #2\n"
- "add x25, x25, x20, LSL #2\n"
- "add x24, x24, x20, LSL #2\n"
- "add x23, x23, x20, LSL #2\n"
- "add x22, x22, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "ldr x21, [x20, #0x20]\n"
+ "cbnz x27, 154f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #2\n"
+ "add x24, x24, x19, LSL #2\n"
+ "add x23, x23, x19, LSL #2\n"
+ "add x22, x22, x19, LSL #2\n"
+ "add x21, x21, x19, LSL #2\n"
"b 154f\n"
"153:" // Height 5: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
"154:" // Height 5: input setup done
- "cmp x27, #0x4\n"
+ "cmp x26, #0x4\n"
"blt 157f\n"
- "ldr q0, [x26, #0x0]\n"
- "ldr q1, [x25, #0x0]\n"
- "cmp x27, #0x8\n"
- "ldr q2, [x24, #0x0]\n"
- "ldr q3, [x23, #0x0]\n"
- "ldr q4, [x22, #0x0]\n"
- "ldr q6, [x12, #0x0]\n"
- "ldr q7, [x11, #0x0]\n"
+ "ldr q0, [x25, #0x0]\n"
+ "ldr q1, [x24, #0x0]\n"
+ "cmp x26, #0x8\n"
+ "ldr q2, [x23, #0x0]\n"
+ "ldr q3, [x22, #0x0]\n"
+ "ldr q4, [x21, #0x0]\n"
+ "ldr q6, [x11, #0x0]\n"
+ "ldr q7, [x10, #0x0]\n"
"blt 156f\n"
"155:" // Height 5: Multiply loop: Main loop head
"fmla v8.4s, v6.4s, v0.s[0]\n"
"fmla v12.4s, v6.4s, v1.s[0]\n"
- "sub x27, x27, #0x4\n"
- "cmp x27, #0x8\n"
+ "sub x26, x26, #0x4\n"
+ "cmp x26, #0x8\n"
"fmla v16.4s, v6.4s, v2.s[0]\n"
"fmla v20.4s, v6.4s, v3.s[0]\n"
- "add x26, x26, #0x10\n"
"add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
"fmla v24.4s, v6.4s, v4.s[0]\n"
- "ldr q6, [x10, #0x0]\n"
+ "ldr q6, [x9, #0x0]\n"
"fmla v9.4s, v7.4s, v0.s[0]\n"
- "add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
"fmla v13.4s, v7.4s, v1.s[0]\n"
"fmla v17.4s, v7.4s, v2.s[0]\n"
- "add x23, x23, #0x10\n"
"add x22, x22, #0x10\n"
+ "add x21, x21, #0x10\n"
"fmla v21.4s, v7.4s, v3.s[0]\n"
"fmla v25.4s, v7.4s, v4.s[0]\n"
- "ldr q7, [x9, #0x0]\n"
+ "ldr q7, [x28, #0x0]\n"
"fmla v10.4s, v6.4s, v0.s[0]\n"
"fmla v14.4s, v6.4s, v1.s[0]\n"
"fmla v18.4s, v6.4s, v2.s[0]\n"
"fmla v22.4s, v6.4s, v3.s[0]\n"
"fmla v26.4s, v6.4s, v4.s[0]\n"
- "ldr q6, [x12, #0x10]\n"
+ "ldr q6, [x11, #0x10]\n"
"fmla v11.4s, v7.4s, v0.s[0]\n"
"fmla v15.4s, v7.4s, v1.s[0]\n"
"fmla v19.4s, v7.4s, v2.s[0]\n"
"fmla v23.4s, v7.4s, v3.s[0]\n"
"fmla v27.4s, v7.4s, v4.s[0]\n"
- "ldr q7, [x11, #0x10]\n"
+ "ldr q7, [x10, #0x10]\n"
"fmla v8.4s, v6.4s, v0.s[1]\n"
"fmla v12.4s, v6.4s, v1.s[1]\n"
"fmla v16.4s, v6.4s, v2.s[1]\n"
"fmla v20.4s, v6.4s, v3.s[1]\n"
"fmla v24.4s, v6.4s, v4.s[1]\n"
- "ldr q6, [x10, #0x10]\n"
+ "ldr q6, [x9, #0x10]\n"
"fmla v9.4s, v7.4s, v0.s[1]\n"
"fmla v13.4s, v7.4s, v1.s[1]\n"
"fmla v17.4s, v7.4s, v2.s[1]\n"
"fmla v21.4s, v7.4s, v3.s[1]\n"
"fmla v25.4s, v7.4s, v4.s[1]\n"
- "ldr q7, [x9, #0x10]\n"
+ "ldr q7, [x28, #0x10]\n"
"fmla v10.4s, v6.4s, v0.s[1]\n"
"fmla v14.4s, v6.4s, v1.s[1]\n"
"fmla v18.4s, v6.4s, v2.s[1]\n"
"fmla v22.4s, v6.4s, v3.s[1]\n"
"fmla v26.4s, v6.4s, v4.s[1]\n"
- "ldr q6, [x12, #0x20]\n"
+ "ldr q6, [x11, #0x20]\n"
"fmla v11.4s, v7.4s, v0.s[1]\n"
"fmla v15.4s, v7.4s, v1.s[1]\n"
"fmla v19.4s, v7.4s, v2.s[1]\n"
"fmla v23.4s, v7.4s, v3.s[1]\n"
"fmla v27.4s, v7.4s, v4.s[1]\n"
- "ldr q7, [x11, #0x20]\n"
+ "ldr q7, [x10, #0x20]\n"
"fmla v8.4s, v6.4s, v0.s[2]\n"
"fmla v12.4s, v6.4s, v1.s[2]\n"
"fmla v16.4s, v6.4s, v2.s[2]\n"
"fmla v20.4s, v6.4s, v3.s[2]\n"
"fmla v24.4s, v6.4s, v4.s[2]\n"
- "ldr q6, [x10, #0x20]\n"
+ "ldr q6, [x9, #0x20]\n"
"fmla v9.4s, v7.4s, v0.s[2]\n"
"fmla v13.4s, v7.4s, v1.s[2]\n"
"fmla v17.4s, v7.4s, v2.s[2]\n"
"fmla v21.4s, v7.4s, v3.s[2]\n"
"fmla v25.4s, v7.4s, v4.s[2]\n"
- "ldr q7, [x9, #0x20]\n"
+ "ldr q7, [x28, #0x20]\n"
"fmla v10.4s, v6.4s, v0.s[2]\n"
"fmla v14.4s, v6.4s, v1.s[2]\n"
"fmla v18.4s, v6.4s, v2.s[2]\n"
"fmla v22.4s, v6.4s, v3.s[2]\n"
"fmla v26.4s, v6.4s, v4.s[2]\n"
- "ldr q6, [x12, #0x30]\n"
+ "ldr q6, [x11, #0x30]\n"
"fmla v11.4s, v7.4s, v0.s[2]\n"
- "add x12, x12, #0x40\n"
+ "add x11, x11, #0x40\n"
"fmla v15.4s, v7.4s, v1.s[2]\n"
"fmla v19.4s, v7.4s, v2.s[2]\n"
"fmla v23.4s, v7.4s, v3.s[2]\n"
"fmla v27.4s, v7.4s, v4.s[2]\n"
- "ldr q7, [x11, #0x30]\n"
- "add x11, x11, #0x40\n"
+ "ldr q7, [x10, #0x30]\n"
+ "add x10, x10, #0x40\n"
"fmla v8.4s, v6.4s, v0.s[3]\n"
"fmla v12.4s, v6.4s, v1.s[3]\n"
"fmla v16.4s, v6.4s, v2.s[3]\n"
"fmla v20.4s, v6.4s, v3.s[3]\n"
"fmla v24.4s, v6.4s, v4.s[3]\n"
- "ldr q6, [x10, #0x30]\n"
+ "ldr q6, [x9, #0x30]\n"
"fmla v9.4s, v7.4s, v0.s[3]\n"
- "add x10, x10, #0x40\n"
+ "add x9, x9, #0x40\n"
"fmla v13.4s, v7.4s, v1.s[3]\n"
"fmla v17.4s, v7.4s, v2.s[3]\n"
"fmla v21.4s, v7.4s, v3.s[3]\n"
"fmla v25.4s, v7.4s, v4.s[3]\n"
- "ldr q7, [x9, #0x30]\n"
- "add x9, x9, #0x40\n"
+ "ldr q7, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
"fmla v10.4s, v6.4s, v0.s[3]\n"
"fmla v14.4s, v6.4s, v1.s[3]\n"
"fmla v18.4s, v6.4s, v2.s[3]\n"
"fmla v22.4s, v6.4s, v3.s[3]\n"
"fmla v26.4s, v6.4s, v4.s[3]\n"
- "ldr q6, [x12, #0x0]\n"
+ "ldr q6, [x11, #0x0]\n"
"fmla v11.4s, v7.4s, v0.s[3]\n"
- "ldr q0, [x26, #0x0]\n"
+ "ldr q0, [x25, #0x0]\n"
"fmla v15.4s, v7.4s, v1.s[3]\n"
- "ldr q1, [x25, #0x0]\n"
+ "ldr q1, [x24, #0x0]\n"
"fmla v19.4s, v7.4s, v2.s[3]\n"
- "ldr q2, [x24, #0x0]\n"
+ "ldr q2, [x23, #0x0]\n"
"fmla v23.4s, v7.4s, v3.s[3]\n"
- "ldr q3, [x23, #0x0]\n"
+ "ldr q3, [x22, #0x0]\n"
"fmla v27.4s, v7.4s, v4.s[3]\n"
- "ldr q4, [x22, #0x0]\n"
- "ldr q7, [x11, #0x0]\n"
+ "ldr q4, [x21, #0x0]\n"
+ "ldr q7, [x10, #0x0]\n"
"bge 155b\n"
"156:" // Height 5: Multiply loop: Single iteration only
"fmla v8.4s, v6.4s, v0.s[0]\n"
"fmla v12.4s, v6.4s, v1.s[0]\n"
- "sub x27, x27, #0x4\n"
- "add x26, x26, #0x10\n"
+ "sub x26, x26, #0x4\n"
+ "add x25, x25, #0x10\n"
"fmla v16.4s, v6.4s, v2.s[0]\n"
"fmla v20.4s, v6.4s, v3.s[0]\n"
- "add x25, x25, #0x10\n"
"add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
"fmla v24.4s, v6.4s, v4.s[0]\n"
- "ldr q6, [x10, #0x0]\n"
+ "ldr q6, [x9, #0x0]\n"
"fmla v9.4s, v7.4s, v0.s[0]\n"
- "add x23, x23, #0x10\n"
+ "add x22, x22, #0x10\n"
"fmla v13.4s, v7.4s, v1.s[0]\n"
"fmla v17.4s, v7.4s, v2.s[0]\n"
- "add x22, x22, #0x10\n"
+ "add x21, x21, #0x10\n"
"fmla v21.4s, v7.4s, v3.s[0]\n"
"fmla v25.4s, v7.4s, v4.s[0]\n"
- "ldr q7, [x9, #0x0]\n"
+ "ldr q7, [x28, #0x0]\n"
"fmla v10.4s, v6.4s, v0.s[0]\n"
"fmla v14.4s, v6.4s, v1.s[0]\n"
"fmla v18.4s, v6.4s, v2.s[0]\n"
"fmla v22.4s, v6.4s, v3.s[0]\n"
"fmla v26.4s, v6.4s, v4.s[0]\n"
- "ldr q6, [x12, #0x10]\n"
+ "ldr q6, [x11, #0x10]\n"
"fmla v11.4s, v7.4s, v0.s[0]\n"
"fmla v15.4s, v7.4s, v1.s[0]\n"
"fmla v19.4s, v7.4s, v2.s[0]\n"
"fmla v23.4s, v7.4s, v3.s[0]\n"
"fmla v27.4s, v7.4s, v4.s[0]\n"
- "ldr q7, [x11, #0x10]\n"
+ "ldr q7, [x10, #0x10]\n"
"fmla v8.4s, v6.4s, v0.s[1]\n"
"fmla v12.4s, v6.4s, v1.s[1]\n"
"fmla v16.4s, v6.4s, v2.s[1]\n"
"fmla v20.4s, v6.4s, v3.s[1]\n"
"fmla v24.4s, v6.4s, v4.s[1]\n"
- "ldr q6, [x10, #0x10]\n"
+ "ldr q6, [x9, #0x10]\n"
"fmla v9.4s, v7.4s, v0.s[1]\n"
"fmla v13.4s, v7.4s, v1.s[1]\n"
"fmla v17.4s, v7.4s, v2.s[1]\n"
"fmla v21.4s, v7.4s, v3.s[1]\n"
"fmla v25.4s, v7.4s, v4.s[1]\n"
- "ldr q7, [x9, #0x10]\n"
+ "ldr q7, [x28, #0x10]\n"
"fmla v10.4s, v6.4s, v0.s[1]\n"
"fmla v14.4s, v6.4s, v1.s[1]\n"
"fmla v18.4s, v6.4s, v2.s[1]\n"
"fmla v22.4s, v6.4s, v3.s[1]\n"
"fmla v26.4s, v6.4s, v4.s[1]\n"
- "ldr q6, [x12, #0x20]\n"
+ "ldr q6, [x11, #0x20]\n"
"fmla v11.4s, v7.4s, v0.s[1]\n"
"fmla v15.4s, v7.4s, v1.s[1]\n"
"fmla v19.4s, v7.4s, v2.s[1]\n"
"fmla v23.4s, v7.4s, v3.s[1]\n"
"fmla v27.4s, v7.4s, v4.s[1]\n"
- "ldr q7, [x11, #0x20]\n"
+ "ldr q7, [x10, #0x20]\n"
"fmla v8.4s, v6.4s, v0.s[2]\n"
"fmla v12.4s, v6.4s, v1.s[2]\n"
"fmla v16.4s, v6.4s, v2.s[2]\n"
"fmla v20.4s, v6.4s, v3.s[2]\n"
"fmla v24.4s, v6.4s, v4.s[2]\n"
- "ldr q6, [x10, #0x20]\n"
+ "ldr q6, [x9, #0x20]\n"
"fmla v9.4s, v7.4s, v0.s[2]\n"
"fmla v13.4s, v7.4s, v1.s[2]\n"
"fmla v17.4s, v7.4s, v2.s[2]\n"
"fmla v21.4s, v7.4s, v3.s[2]\n"
"fmla v25.4s, v7.4s, v4.s[2]\n"
- "ldr q7, [x9, #0x20]\n"
+ "ldr q7, [x28, #0x20]\n"
"fmla v10.4s, v6.4s, v0.s[2]\n"
"fmla v14.4s, v6.4s, v1.s[2]\n"
"fmla v18.4s, v6.4s, v2.s[2]\n"
"fmla v22.4s, v6.4s, v3.s[2]\n"
"fmla v26.4s, v6.4s, v4.s[2]\n"
- "ldr q6, [x12, #0x30]\n"
+ "ldr q6, [x11, #0x30]\n"
"fmla v11.4s, v7.4s, v0.s[2]\n"
- "add x12, x12, #0x40\n"
+ "add x11, x11, #0x40\n"
"fmla v15.4s, v7.4s, v1.s[2]\n"
"fmla v19.4s, v7.4s, v2.s[2]\n"
"fmla v23.4s, v7.4s, v3.s[2]\n"
"fmla v27.4s, v7.4s, v4.s[2]\n"
- "ldr q7, [x11, #0x30]\n"
- "add x11, x11, #0x40\n"
+ "ldr q7, [x10, #0x30]\n"
+ "add x10, x10, #0x40\n"
"fmla v8.4s, v6.4s, v0.s[3]\n"
"fmla v12.4s, v6.4s, v1.s[3]\n"
"fmla v16.4s, v6.4s, v2.s[3]\n"
"fmla v20.4s, v6.4s, v3.s[3]\n"
"fmla v24.4s, v6.4s, v4.s[3]\n"
- "ldr q6, [x10, #0x30]\n"
+ "ldr q6, [x9, #0x30]\n"
"fmla v9.4s, v7.4s, v0.s[3]\n"
- "add x10, x10, #0x40\n"
+ "add x9, x9, #0x40\n"
"fmla v13.4s, v7.4s, v1.s[3]\n"
"fmla v17.4s, v7.4s, v2.s[3]\n"
"fmla v21.4s, v7.4s, v3.s[3]\n"
"fmla v25.4s, v7.4s, v4.s[3]\n"
- "ldr q7, [x9, #0x30]\n"
- "add x9, x9, #0x40\n"
+ "ldr q7, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
"fmla v10.4s, v6.4s, v0.s[3]\n"
"fmla v14.4s, v6.4s, v1.s[3]\n"
"fmla v18.4s, v6.4s, v2.s[3]\n"
@@ -2395,32 +2395,32 @@ void a64_ffhybrid_fp32_mla_6x16 (
"fmla v23.4s, v7.4s, v3.s[3]\n"
"fmla v27.4s, v7.4s, v4.s[3]\n"
"157:" // Height 5: Multiply loop: Main loop skip
- "cbz x27, 159f\n"
+ "cbz x26, 159f\n"
"158:" // Height 5: Multiply loop: Odd block loop
- "ldr s0, [x26], #0x4\n"
- "ldr s1, [x25], #0x4\n"
- "sub x27, x27, #0x1\n"
- "ldr s2, [x24], #0x4\n"
- "ldr s3, [x23], #0x4\n"
- "ldr s4, [x22], #0x4\n"
- "ldr q6, [x12, #0x0]\n"
+ "ldr s0, [x25], #0x4\n"
+ "ldr s1, [x24], #0x4\n"
+ "sub x26, x26, #0x1\n"
+ "ldr s2, [x23], #0x4\n"
+ "ldr s3, [x22], #0x4\n"
+ "ldr s4, [x21], #0x4\n"
+ "ldr q6, [x11, #0x0]\n"
"fmla v8.4s, v6.4s, v0.s[0]\n"
"fmla v12.4s, v6.4s, v1.s[0]\n"
- "ldr q7, [x11, #0x0]\n"
+ "ldr q7, [x10, #0x0]\n"
"fmla v16.4s, v6.4s, v2.s[0]\n"
"fmla v20.4s, v6.4s, v3.s[0]\n"
- "add x12, x12, #0x10\n"
+ "add x11, x11, #0x10\n"
"fmla v24.4s, v6.4s, v4.s[0]\n"
- "ldr q6, [x10, #0x0]\n"
+ "ldr q6, [x9, #0x0]\n"
"fmla v9.4s, v7.4s, v0.s[0]\n"
- "add x11, x11, #0x10\n"
+ "add x10, x10, #0x10\n"
"fmla v13.4s, v7.4s, v1.s[0]\n"
"fmla v17.4s, v7.4s, v2.s[0]\n"
- "add x10, x10, #0x10\n"
+ "add x9, x9, #0x10\n"
"fmla v21.4s, v7.4s, v3.s[0]\n"
"fmla v25.4s, v7.4s, v4.s[0]\n"
- "ldr q7, [x9, #0x0]\n"
- "add x9, x9, #0x10\n"
+ "ldr q7, [x28, #0x0]\n"
+ "add x28, x28, #0x10\n"
"fmla v10.4s, v6.4s, v0.s[0]\n"
"fmla v14.4s, v6.4s, v1.s[0]\n"
"fmla v18.4s, v6.4s, v2.s[0]\n"
@@ -2431,22 +2431,22 @@ void a64_ffhybrid_fp32_mla_6x16 (
"fmla v19.4s, v7.4s, v2.s[0]\n"
"fmla v23.4s, v7.4s, v3.s[0]\n"
"fmla v27.4s, v7.4s, v4.s[0]\n"
- "cbnz x27, 158b\n"
+ "cbnz x26, 158b\n"
"159:" // Height 5: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 152b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
"tbz %x[flags], #1, 160f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v1.4s }, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1r { v0.4s }, [x20]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v1.4s }, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v0.4s }, [x19]\n"
"fmin v8.4s, v8.4s, v1.4s\n"
"fmin v9.4s, v9.4s, v1.4s\n"
"fmin v10.4s, v10.4s, v1.4s\n"
@@ -2488,183 +2488,183 @@ void a64_ffhybrid_fp32_mla_6x16 (
"fmax v26.4s, v26.4s, v0.4s\n"
"fmax v27.4s, v27.4s, v0.4s\n"
"160:" // Height 5: No activation
- "cmp x14, #0x10\n"
+ "cmp x13, #0x10\n"
"bge 169f\n"
- "tbz x14, #3, 164f\n"
- "st1 { v8.4s }, [x13], #0x10\n"
- "st1 { v9.4s }, [x13], #0x10\n"
- "st1 { v12.4s }, [x25], #0x10\n"
- "st1 { v13.4s }, [x25], #0x10\n"
- "st1 { v16.4s }, [x24], #0x10\n"
- "st1 { v17.4s }, [x24], #0x10\n"
- "st1 { v20.4s }, [x23], #0x10\n"
- "st1 { v21.4s }, [x23], #0x10\n"
- "st1 { v24.4s }, [x22], #0x10\n"
- "st1 { v25.4s }, [x22], #0x10\n"
- "tbz x14, #2, 162f\n"
- "st1 { v10.4s }, [x13], #0x10\n"
- "st1 { v14.4s }, [x25], #0x10\n"
- "st1 { v18.4s }, [x24], #0x10\n"
- "st1 { v22.4s }, [x23], #0x10\n"
- "st1 { v26.4s }, [x22], #0x10\n"
- "tbz x14, #1, 161f\n"
- "str d11, [x13], #0x8\n"
- "str d15, [x25], #0x8\n"
- "str d19, [x24], #0x8\n"
- "str d23, [x23], #0x8\n"
- "str d27, [x22], #0x8\n"
- "tbz x14, #0, 168f\n"
- "st1 { v11.s }[2], [x13]\n"
- "st1 { v15.s }[2], [x25]\n"
- "st1 { v19.s }[2], [x24]\n"
- "st1 { v23.s }[2], [x23]\n"
- "st1 { v27.s }[2], [x22]\n"
+ "tbz x13, #3, 164f\n"
+ "st1 { v8.4s }, [x12], #0x10\n"
+ "st1 { v9.4s }, [x12], #0x10\n"
+ "st1 { v12.4s }, [x24], #0x10\n"
+ "st1 { v13.4s }, [x24], #0x10\n"
+ "st1 { v16.4s }, [x23], #0x10\n"
+ "st1 { v17.4s }, [x23], #0x10\n"
+ "st1 { v20.4s }, [x22], #0x10\n"
+ "st1 { v21.4s }, [x22], #0x10\n"
+ "st1 { v24.4s }, [x21], #0x10\n"
+ "st1 { v25.4s }, [x21], #0x10\n"
+ "tbz x13, #2, 162f\n"
+ "st1 { v10.4s }, [x12], #0x10\n"
+ "st1 { v14.4s }, [x24], #0x10\n"
+ "st1 { v18.4s }, [x23], #0x10\n"
+ "st1 { v22.4s }, [x22], #0x10\n"
+ "st1 { v26.4s }, [x21], #0x10\n"
+ "tbz x13, #1, 161f\n"
+ "str d11, [x12], #0x8\n"
+ "str d15, [x24], #0x8\n"
+ "str d19, [x23], #0x8\n"
+ "str d23, [x22], #0x8\n"
+ "str d27, [x21], #0x8\n"
+ "tbz x13, #0, 168f\n"
+ "st1 { v11.s }[2], [x12]\n"
+ "st1 { v15.s }[2], [x24]\n"
+ "st1 { v19.s }[2], [x23]\n"
+ "st1 { v23.s }[2], [x22]\n"
+ "st1 { v27.s }[2], [x21]\n"
"b 168f\n"
"161:" // Height 5: Partial direct writeback: partial_1_12
- "tbz x14, #0, 168f\n"
- "str s11, [x13, #0x0]\n"
- "str s15, [x25, #0x0]\n"
- "str s19, [x24, #0x0]\n"
- "str s23, [x23, #0x0]\n"
- "str s27, [x22, #0x0]\n"
+ "tbz x13, #0, 168f\n"
+ "str s11, [x12, #0x0]\n"
+ "str s15, [x24, #0x0]\n"
+ "str s19, [x23, #0x0]\n"
+ "str s23, [x22, #0x0]\n"
+ "str s27, [x21, #0x0]\n"
"b 168f\n"
"162:" // Height 5: Partial direct writeback: partial_2_8
- "tbz x14, #1, 163f\n"
- "str d10, [x13], #0x8\n"
- "str d14, [x25], #0x8\n"
- "str d18, [x24], #0x8\n"
- "str d22, [x23], #0x8\n"
- "str d26, [x22], #0x8\n"
- "tbz x14, #0, 168f\n"
- "st1 { v10.s }[2], [x13]\n"
- "st1 { v14.s }[2], [x25]\n"
- "st1 { v18.s }[2], [x24]\n"
- "st1 { v22.s }[2], [x23]\n"
- "st1 { v26.s }[2], [x22]\n"
+ "tbz x13, #1, 163f\n"
+ "str d10, [x12], #0x8\n"
+ "str d14, [x24], #0x8\n"
+ "str d18, [x23], #0x8\n"
+ "str d22, [x22], #0x8\n"
+ "str d26, [x21], #0x8\n"
+ "tbz x13, #0, 168f\n"
+ "st1 { v10.s }[2], [x12]\n"
+ "st1 { v14.s }[2], [x24]\n"
+ "st1 { v18.s }[2], [x23]\n"
+ "st1 { v22.s }[2], [x22]\n"
+ "st1 { v26.s }[2], [x21]\n"
"b 168f\n"
"163:" // Height 5: Partial direct writeback: partial_1_8
- "tbz x14, #0, 168f\n"
- "str s10, [x13, #0x0]\n"
- "str s14, [x25, #0x0]\n"
- "str s18, [x24, #0x0]\n"
- "str s22, [x23, #0x0]\n"
- "str s26, [x22, #0x0]\n"
+ "tbz x13, #0, 168f\n"
+ "str s10, [x12, #0x0]\n"
+ "str s14, [x24, #0x0]\n"
+ "str s18, [x23, #0x0]\n"
+ "str s22, [x22, #0x0]\n"
+ "str s26, [x21, #0x0]\n"
"b 168f\n"
"164:" // Height 5: Partial direct writeback: partial_4_0
- "tbz x14, #2, 166f\n"
- "st1 { v8.4s }, [x13], #0x10\n"
- "st1 { v12.4s }, [x25], #0x10\n"
- "st1 { v16.4s }, [x24], #0x10\n"
- "st1 { v20.4s }, [x23], #0x10\n"
- "st1 { v24.4s }, [x22], #0x10\n"
- "tbz x14, #1, 165f\n"
- "str d9, [x13], #0x8\n"
- "str d13, [x25], #0x8\n"
- "str d17, [x24], #0x8\n"
- "str d21, [x23], #0x8\n"
- "str d25, [x22], #0x8\n"
- "tbz x14, #0, 168f\n"
- "st1 { v9.s }[2], [x13]\n"
- "st1 { v13.s }[2], [x25]\n"
- "st1 { v17.s }[2], [x24]\n"
- "st1 { v21.s }[2], [x23]\n"
- "st1 { v25.s }[2], [x22]\n"
+ "tbz x13, #2, 166f\n"
+ "st1 { v8.4s }, [x12], #0x10\n"
+ "st1 { v12.4s }, [x24], #0x10\n"
+ "st1 { v16.4s }, [x23], #0x10\n"
+ "st1 { v20.4s }, [x22], #0x10\n"
+ "st1 { v24.4s }, [x21], #0x10\n"
+ "tbz x13, #1, 165f\n"
+ "str d9, [x12], #0x8\n"
+ "str d13, [x24], #0x8\n"
+ "str d17, [x23], #0x8\n"
+ "str d21, [x22], #0x8\n"
+ "str d25, [x21], #0x8\n"
+ "tbz x13, #0, 168f\n"
+ "st1 { v9.s }[2], [x12]\n"
+ "st1 { v13.s }[2], [x24]\n"
+ "st1 { v17.s }[2], [x23]\n"
+ "st1 { v21.s }[2], [x22]\n"
+ "st1 { v25.s }[2], [x21]\n"
"b 168f\n"
"165:" // Height 5: Partial direct writeback: partial_1_4
- "tbz x14, #0, 168f\n"
- "str s9, [x13, #0x0]\n"
- "str s13, [x25, #0x0]\n"
- "str s17, [x24, #0x0]\n"
- "str s21, [x23, #0x0]\n"
- "str s25, [x22, #0x0]\n"
+ "tbz x13, #0, 168f\n"
+ "str s9, [x12, #0x0]\n"
+ "str s13, [x24, #0x0]\n"
+ "str s17, [x23, #0x0]\n"
+ "str s21, [x22, #0x0]\n"
+ "str s25, [x21, #0x0]\n"
"b 168f\n"
"166:" // Height 5: Partial direct writeback: partial_2_0
- "tbz x14, #1, 167f\n"
- "str d8, [x13], #0x8\n"
- "str d12, [x25], #0x8\n"
- "str d16, [x24], #0x8\n"
- "str d20, [x23], #0x8\n"
- "str d24, [x22], #0x8\n"
- "tbz x14, #0, 168f\n"
- "st1 { v8.s }[2], [x13]\n"
- "st1 { v12.s }[2], [x25]\n"
- "st1 { v16.s }[2], [x24]\n"
- "st1 { v20.s }[2], [x23]\n"
- "st1 { v24.s }[2], [x22]\n"
+ "tbz x13, #1, 167f\n"
+ "str d8, [x12], #0x8\n"
+ "str d12, [x24], #0x8\n"
+ "str d16, [x23], #0x8\n"
+ "str d20, [x22], #0x8\n"
+ "str d24, [x21], #0x8\n"
+ "tbz x13, #0, 168f\n"
+ "st1 { v8.s }[2], [x12]\n"
+ "st1 { v12.s }[2], [x24]\n"
+ "st1 { v16.s }[2], [x23]\n"
+ "st1 { v20.s }[2], [x22]\n"
+ "st1 { v24.s }[2], [x21]\n"
"b 168f\n"
"167:" // Height 5: Partial direct writeback: partial_1_0
- "str s8, [x13, #0x0]\n"
- "str s12, [x25, #0x0]\n"
- "str s16, [x24, #0x0]\n"
- "str s20, [x23, #0x0]\n"
- "str s24, [x22, #0x0]\n"
+ "str s8, [x12, #0x0]\n"
+ "str s12, [x24, #0x0]\n"
+ "str s16, [x23, #0x0]\n"
+ "str s20, [x22, #0x0]\n"
+ "str s24, [x21, #0x0]\n"
"168:" // Height 5: Partial direct writeback: Done
"b 170f\n"
"169:" // Height 5: Full writeback
- "str q8, [x13, #0x0]\n"
- "str q9, [x13, #0x10]\n"
- "str q10, [x13, #0x20]\n"
- "str q11, [x13, #0x30]\n"
- "add x13, x13, #0x40\n"
- "str q12, [x25, #0x0]\n"
- "str q13, [x25, #0x10]\n"
- "str q14, [x25, #0x20]\n"
- "str q15, [x25, #0x30]\n"
- "str q16, [x24, #0x0]\n"
- "str q17, [x24, #0x10]\n"
- "str q18, [x24, #0x20]\n"
- "str q19, [x24, #0x30]\n"
- "str q20, [x23, #0x0]\n"
- "str q21, [x23, #0x10]\n"
- "str q22, [x23, #0x20]\n"
- "str q23, [x23, #0x30]\n"
- "str q24, [x22, #0x0]\n"
- "str q25, [x22, #0x10]\n"
- "str q26, [x22, #0x20]\n"
- "str q27, [x22, #0x30]\n"
+ "str q8, [x12, #0x0]\n"
+ "str q9, [x12, #0x10]\n"
+ "str q10, [x12, #0x20]\n"
+ "str q11, [x12, #0x30]\n"
+ "add x12, x12, #0x40\n"
+ "str q12, [x24, #0x0]\n"
+ "str q13, [x24, #0x10]\n"
+ "str q14, [x24, #0x20]\n"
+ "str q15, [x24, #0x30]\n"
+ "str q16, [x23, #0x0]\n"
+ "str q17, [x23, #0x10]\n"
+ "str q18, [x23, #0x20]\n"
+ "str q19, [x23, #0x30]\n"
+ "str q20, [x22, #0x0]\n"
+ "str q21, [x22, #0x10]\n"
+ "str q22, [x22, #0x20]\n"
+ "str q23, [x22, #0x30]\n"
+ "str q24, [x21, #0x0]\n"
+ "str q25, [x21, #0x10]\n"
+ "str q26, [x21, #0x20]\n"
+ "str q27, [x21, #0x30]\n"
"170:" // Height 5: Writeback done
- "subs x14, x14, #0x10\n"
+ "subs x13, x13, #0x10\n"
"bgt 138b\n"
"b 206f\n"
"171:" // Height 6
- "ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x21, #0x18\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "mov x15, %x[bias]\n"
- "mov x13, %x[output_ptr]\n"
- "madd %x[output_ptr], x20, x21, %x[output_ptr]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x20, #0x18\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "mov x14, %x[bias]\n"
+ "mov x12, %x[output_ptr]\n"
+ "madd %x[output_ptr], x19, x20, %x[output_ptr]\n"
"172:" // Height 6: Column loop
- "ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
- "add x11, x12, x20, LSL #2\n"
- "add x10, x11, x20, LSL #2\n"
- "add x9, x10, x20, LSL #2\n"
- "add x20, x9, x20, LSL #2\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "cmp x14, #0xc\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #2\n"
+ "add x9, x10, x19, LSL #2\n"
+ "add x28, x9, x19, LSL #2\n"
+ "add x19, x28, x19, LSL #2\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "cmp x13, #0xc\n"
"bgt 173f\n"
- "cmp x14, #0x8\n"
- "mov x9, x12\n"
+ "cmp x13, #0x8\n"
+ "mov x28, x11\n"
"bgt 173f\n"
- "cmp x14, #0x4\n"
- "mov x10, x12\n"
+ "cmp x13, #0x4\n"
+ "mov x9, x11\n"
"bgt 173f\n"
- "mov x11, x12\n"
+ "mov x10, x11\n"
"173:" // Height 6: B setup done
- "cbz x15, 174f\n"
- "ldr q8, [x15, #0x0]\n"
- "ldr q9, [x15, #0x10]\n"
+ "cbz x14, 174f\n"
+ "ldr q8, [x14, #0x0]\n"
+ "ldr q9, [x14, #0x10]\n"
"mov v12.16b, v8.16b\n"
"mov v13.16b, v9.16b\n"
- "ldr q10, [x15, #0x20]\n"
- "ldr q11, [x15, #0x30]\n"
+ "ldr q10, [x14, #0x20]\n"
+ "ldr q11, [x14, #0x30]\n"
"mov v14.16b, v10.16b\n"
"mov v15.16b, v11.16b\n"
"mov v16.16b, v8.16b\n"
"mov v17.16b, v9.16b\n"
- "add x15, x15, #0x40\n"
+ "add x14, x14, #0x40\n"
"mov v18.16b, v10.16b\n"
"mov v19.16b, v11.16b\n"
"mov v20.16b, v8.16b\n"
@@ -2682,174 +2682,174 @@ void a64_ffhybrid_fp32_mla_6x16 (
"b 185f\n"
"174:" // Height 6: no bias
"tbz %x[flags], #0, 184f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
- "cmp x14, #0x10\n"
- "add x21, x22, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
+ "cmp x13, #0x10\n"
+ "add x20, x21, x19, LSL #2\n"
"bge 183f\n"
- "tbz x14, #3, 178f\n"
- "ld1 { v8.4s }, [x13], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v16.4s }, [x24], #0x10\n"
- "ld1 { v20.4s }, [x23], #0x10\n"
- "ld1 { v24.4s }, [x22], #0x10\n"
- "ld1 { v28.4s }, [x21], #0x10\n"
- "ld1 { v9.4s }, [x13], #0x10\n"
- "ld1 { v13.4s }, [x25], #0x10\n"
- "ld1 { v17.4s }, [x24], #0x10\n"
- "ld1 { v21.4s }, [x23], #0x10\n"
- "ld1 { v25.4s }, [x22], #0x10\n"
- "ld1 { v29.4s }, [x21], #0x10\n"
- "tbz x14, #2, 176f\n"
- "ld1 { v10.4s }, [x13], #0x10\n"
- "ld1 { v14.4s }, [x25], #0x10\n"
- "ld1 { v18.4s }, [x24], #0x10\n"
- "ld1 { v22.4s }, [x23], #0x10\n"
- "ld1 { v26.4s }, [x22], #0x10\n"
- "ld1 { v30.4s }, [x21], #0x10\n"
- "tbz x14, #1, 175f\n"
- "ldr d11, [x13], #0x8\n"
- "ldr d15, [x25], #0x8\n"
- "mov x20, #0x38\n"
- "ldr d19, [x24], #0x8\n"
- "ldr d23, [x23], #0x8\n"
- "ldr d27, [x22], #0x8\n"
- "ldr d31, [x21], #0x8\n"
- "tbz x14, #0, 182f\n"
- "ld1 { v11.s }[2], [x13]\n"
- "ld1 { v15.s }[2], [x25]\n"
- "ld1 { v19.s }[2], [x24]\n"
- "ld1 { v23.s }[2], [x23]\n"
- "ld1 { v27.s }[2], [x22]\n"
- "ld1 { v31.s }[2], [x21]\n"
+ "tbz x13, #3, 178f\n"
+ "ld1 { v8.4s }, [x12], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
+ "ld1 { v16.4s }, [x23], #0x10\n"
+ "ld1 { v20.4s }, [x22], #0x10\n"
+ "ld1 { v24.4s }, [x21], #0x10\n"
+ "ld1 { v28.4s }, [x20], #0x10\n"
+ "ld1 { v9.4s }, [x12], #0x10\n"
+ "ld1 { v13.4s }, [x24], #0x10\n"
+ "ld1 { v17.4s }, [x23], #0x10\n"
+ "ld1 { v21.4s }, [x22], #0x10\n"
+ "ld1 { v25.4s }, [x21], #0x10\n"
+ "ld1 { v29.4s }, [x20], #0x10\n"
+ "tbz x13, #2, 176f\n"
+ "ld1 { v10.4s }, [x12], #0x10\n"
+ "ld1 { v14.4s }, [x24], #0x10\n"
+ "ld1 { v18.4s }, [x23], #0x10\n"
+ "ld1 { v22.4s }, [x22], #0x10\n"
+ "ld1 { v26.4s }, [x21], #0x10\n"
+ "ld1 { v30.4s }, [x20], #0x10\n"
+ "tbz x13, #1, 175f\n"
+ "ldr d11, [x12], #0x8\n"
+ "ldr d15, [x24], #0x8\n"
+ "mov x19, #0x38\n"
+ "ldr d19, [x23], #0x8\n"
+ "ldr d23, [x22], #0x8\n"
+ "ldr d27, [x21], #0x8\n"
+ "ldr d31, [x20], #0x8\n"
+ "tbz x13, #0, 182f\n"
+ "ld1 { v11.s }[2], [x12]\n"
+ "ld1 { v15.s }[2], [x24]\n"
+ "ld1 { v19.s }[2], [x23]\n"
+ "ld1 { v23.s }[2], [x22]\n"
+ "ld1 { v27.s }[2], [x21]\n"
+ "ld1 { v31.s }[2], [x20]\n"
"b 182f\n"
"175:" // Height 6: Partial accumulate: partial_1_12
- "mov x20, #0x30\n"
- "tbz x14, #0, 182f\n"
- "ldr s11, [x13, #0x0]\n"
- "ldr s15, [x25, #0x0]\n"
- "ldr s19, [x24, #0x0]\n"
- "ldr s23, [x23, #0x0]\n"
- "ldr s27, [x22, #0x0]\n"
- "ldr s31, [x21, #0x0]\n"
+ "mov x19, #0x30\n"
+ "tbz x13, #0, 182f\n"
+ "ldr s11, [x12, #0x0]\n"
+ "ldr s15, [x24, #0x0]\n"
+ "ldr s19, [x23, #0x0]\n"
+ "ldr s23, [x22, #0x0]\n"
+ "ldr s27, [x21, #0x0]\n"
+ "ldr s31, [x20, #0x0]\n"
"b 182f\n"
"176:" // Height 6: Partial accumulate: partial_2_8
- "tbz x14, #1, 177f\n"
- "ldr d10, [x13], #0x8\n"
- "ldr d14, [x25], #0x8\n"
- "mov x20, #0x28\n"
- "ldr d18, [x24], #0x8\n"
- "ldr d22, [x23], #0x8\n"
- "ldr d26, [x22], #0x8\n"
- "ldr d30, [x21], #0x8\n"
- "tbz x14, #0, 182f\n"
- "ld1 { v10.s }[2], [x13]\n"
- "ld1 { v14.s }[2], [x25]\n"
- "ld1 { v18.s }[2], [x24]\n"
- "ld1 { v22.s }[2], [x23]\n"
- "ld1 { v26.s }[2], [x22]\n"
- "ld1 { v30.s }[2], [x21]\n"
+ "tbz x13, #1, 177f\n"
+ "ldr d10, [x12], #0x8\n"
+ "ldr d14, [x24], #0x8\n"
+ "mov x19, #0x28\n"
+ "ldr d18, [x23], #0x8\n"
+ "ldr d22, [x22], #0x8\n"
+ "ldr d26, [x21], #0x8\n"
+ "ldr d30, [x20], #0x8\n"
+ "tbz x13, #0, 182f\n"
+ "ld1 { v10.s }[2], [x12]\n"
+ "ld1 { v14.s }[2], [x24]\n"
+ "ld1 { v18.s }[2], [x23]\n"
+ "ld1 { v22.s }[2], [x22]\n"
+ "ld1 { v26.s }[2], [x21]\n"
+ "ld1 { v30.s }[2], [x20]\n"
"b 182f\n"
"177:" // Height 6: Partial accumulate: partial_1_8
- "mov x20, #0x20\n"
- "tbz x14, #0, 182f\n"
- "ldr s10, [x13, #0x0]\n"
- "ldr s14, [x25, #0x0]\n"
- "ldr s18, [x24, #0x0]\n"
- "ldr s22, [x23, #0x0]\n"
- "ldr s26, [x22, #0x0]\n"
- "ldr s30, [x21, #0x0]\n"
+ "mov x19, #0x20\n"
+ "tbz x13, #0, 182f\n"
+ "ldr s10, [x12, #0x0]\n"
+ "ldr s14, [x24, #0x0]\n"
+ "ldr s18, [x23, #0x0]\n"
+ "ldr s22, [x22, #0x0]\n"
+ "ldr s26, [x21, #0x0]\n"
+ "ldr s30, [x20, #0x0]\n"
"b 182f\n"
"178:" // Height 6: Partial accumulate: partial_4_0
- "tbz x14, #2, 180f\n"
- "ld1 { v8.4s }, [x13], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v16.4s }, [x24], #0x10\n"
- "ld1 { v20.4s }, [x23], #0x10\n"
- "ld1 { v24.4s }, [x22], #0x10\n"
- "ld1 { v28.4s }, [x21], #0x10\n"
- "tbz x14, #1, 179f\n"
- "ldr d9, [x13], #0x8\n"
- "ldr d13, [x25], #0x8\n"
- "mov x20, #0x18\n"
- "ldr d17, [x24], #0x8\n"
- "ldr d21, [x23], #0x8\n"
- "ldr d25, [x22], #0x8\n"
- "ldr d29, [x21], #0x8\n"
- "tbz x14, #0, 182f\n"
- "ld1 { v9.s }[2], [x13]\n"
- "ld1 { v13.s }[2], [x25]\n"
- "ld1 { v17.s }[2], [x24]\n"
- "ld1 { v21.s }[2], [x23]\n"
- "ld1 { v25.s }[2], [x22]\n"
- "ld1 { v29.s }[2], [x21]\n"
+ "tbz x13, #2, 180f\n"
+ "ld1 { v8.4s }, [x12], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
+ "ld1 { v16.4s }, [x23], #0x10\n"
+ "ld1 { v20.4s }, [x22], #0x10\n"
+ "ld1 { v24.4s }, [x21], #0x10\n"
+ "ld1 { v28.4s }, [x20], #0x10\n"
+ "tbz x13, #1, 179f\n"
+ "ldr d9, [x12], #0x8\n"
+ "ldr d13, [x24], #0x8\n"
+ "mov x19, #0x18\n"
+ "ldr d17, [x23], #0x8\n"
+ "ldr d21, [x22], #0x8\n"
+ "ldr d25, [x21], #0x8\n"
+ "ldr d29, [x20], #0x8\n"
+ "tbz x13, #0, 182f\n"
+ "ld1 { v9.s }[2], [x12]\n"
+ "ld1 { v13.s }[2], [x24]\n"
+ "ld1 { v17.s }[2], [x23]\n"
+ "ld1 { v21.s }[2], [x22]\n"
+ "ld1 { v25.s }[2], [x21]\n"
+ "ld1 { v29.s }[2], [x20]\n"
"b 182f\n"
"179:" // Height 6: Partial accumulate: partial_1_4
- "mov x20, #0x10\n"
- "tbz x14, #0, 182f\n"
- "ldr s9, [x13, #0x0]\n"
- "ldr s13, [x25, #0x0]\n"
- "ldr s17, [x24, #0x0]\n"
- "ldr s21, [x23, #0x0]\n"
- "ldr s25, [x22, #0x0]\n"
- "ldr s29, [x21, #0x0]\n"
+ "mov x19, #0x10\n"
+ "tbz x13, #0, 182f\n"
+ "ldr s9, [x12, #0x0]\n"
+ "ldr s13, [x24, #0x0]\n"
+ "ldr s17, [x23, #0x0]\n"
+ "ldr s21, [x22, #0x0]\n"
+ "ldr s25, [x21, #0x0]\n"
+ "ldr s29, [x20, #0x0]\n"
"b 182f\n"
"180:" // Height 6: Partial accumulate: partial_2_0
- "tbz x14, #1, 181f\n"
- "ldr d8, [x13], #0x8\n"
- "ldr d12, [x25], #0x8\n"
- "mov x20, #0x8\n"
- "ldr d16, [x24], #0x8\n"
- "ldr d20, [x23], #0x8\n"
- "ldr d24, [x22], #0x8\n"
- "ldr d28, [x21], #0x8\n"
- "tbz x14, #0, 182f\n"
- "ld1 { v8.s }[2], [x13]\n"
- "ld1 { v12.s }[2], [x25]\n"
- "ld1 { v16.s }[2], [x24]\n"
- "ld1 { v20.s }[2], [x23]\n"
- "ld1 { v24.s }[2], [x22]\n"
- "ld1 { v28.s }[2], [x21]\n"
+ "tbz x13, #1, 181f\n"
+ "ldr d8, [x12], #0x8\n"
+ "ldr d12, [x24], #0x8\n"
+ "mov x19, #0x8\n"
+ "ldr d16, [x23], #0x8\n"
+ "ldr d20, [x22], #0x8\n"
+ "ldr d24, [x21], #0x8\n"
+ "ldr d28, [x20], #0x8\n"
+ "tbz x13, #0, 182f\n"
+ "ld1 { v8.s }[2], [x12]\n"
+ "ld1 { v12.s }[2], [x24]\n"
+ "ld1 { v16.s }[2], [x23]\n"
+ "ld1 { v20.s }[2], [x22]\n"
+ "ld1 { v24.s }[2], [x21]\n"
+ "ld1 { v28.s }[2], [x20]\n"
"b 182f\n"
"181:" // Height 6: Partial accumulate: partial_1_0
- "ldr s8, [x13, #0x0]\n"
- "ldr s12, [x25, #0x0]\n"
- "mov x20, #0x0\n"
- "ldr s16, [x24, #0x0]\n"
- "ldr s20, [x23, #0x0]\n"
- "ldr s24, [x22, #0x0]\n"
- "ldr s28, [x21, #0x0]\n"
+ "ldr s8, [x12, #0x0]\n"
+ "ldr s12, [x24, #0x0]\n"
+ "mov x19, #0x0\n"
+ "ldr s16, [x23, #0x0]\n"
+ "ldr s20, [x22, #0x0]\n"
+ "ldr s24, [x21, #0x0]\n"
+ "ldr s28, [x20, #0x0]\n"
"182:" // Height 6: Partial accumulate: Done
- "sub x13, x13, x20\n"
+ "sub x12, x12, x19\n"
"b 185f\n"
"183:" // Height 6: full accumulate
- "ldr q8, [x13, #0x0]\n"
- "ldr q9, [x13, #0x10]\n"
- "ldr q10, [x13, #0x20]\n"
- "ldr q11, [x13, #0x30]\n"
- "ldr q12, [x25, #0x0]\n"
- "ldr q13, [x25, #0x10]\n"
- "ldr q14, [x25, #0x20]\n"
- "ldr q15, [x25, #0x30]\n"
- "ldr q16, [x24, #0x0]\n"
- "ldr q17, [x24, #0x10]\n"
- "ldr q18, [x24, #0x20]\n"
- "ldr q19, [x24, #0x30]\n"
- "ldr q20, [x23, #0x0]\n"
- "ldr q21, [x23, #0x10]\n"
- "ldr q22, [x23, #0x20]\n"
- "ldr q23, [x23, #0x30]\n"
- "ldr q24, [x22, #0x0]\n"
- "ldr q25, [x22, #0x10]\n"
- "ldr q26, [x22, #0x20]\n"
- "ldr q27, [x22, #0x30]\n"
- "ldr q28, [x21, #0x0]\n"
- "ldr q29, [x21, #0x10]\n"
- "ldr q30, [x21, #0x20]\n"
- "ldr q31, [x21, #0x30]\n"
+ "ldr q8, [x12, #0x0]\n"
+ "ldr q9, [x12, #0x10]\n"
+ "ldr q10, [x12, #0x20]\n"
+ "ldr q11, [x12, #0x30]\n"
+ "ldr q12, [x24, #0x0]\n"
+ "ldr q13, [x24, #0x10]\n"
+ "ldr q14, [x24, #0x20]\n"
+ "ldr q15, [x24, #0x30]\n"
+ "ldr q16, [x23, #0x0]\n"
+ "ldr q17, [x23, #0x10]\n"
+ "ldr q18, [x23, #0x20]\n"
+ "ldr q19, [x23, #0x30]\n"
+ "ldr q20, [x22, #0x0]\n"
+ "ldr q21, [x22, #0x10]\n"
+ "ldr q22, [x22, #0x20]\n"
+ "ldr q23, [x22, #0x30]\n"
+ "ldr q24, [x21, #0x0]\n"
+ "ldr q25, [x21, #0x10]\n"
+ "ldr q26, [x21, #0x20]\n"
+ "ldr q27, [x21, #0x30]\n"
+ "ldr q28, [x20, #0x0]\n"
+ "ldr q29, [x20, #0x10]\n"
+ "ldr q30, [x20, #0x20]\n"
+ "ldr q31, [x20, #0x30]\n"
"b 185f\n"
"184:" // Height 6: no accumulate
"movi v8.16b, #0x0\n"
@@ -2877,291 +2877,291 @@ void a64_ffhybrid_fp32_mla_6x16 (
"movi v30.16b, #0x0\n"
"movi v31.16b, #0x0\n"
"185:" // Height 6: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"186:" // Height 6: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 187f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "ldr x22, [x21, #0x20]\n"
- "ldr x21, [x21, #0x28]\n"
- "cbnz x28, 188f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #2\n"
- "add x25, x25, x20, LSL #2\n"
- "add x24, x24, x20, LSL #2\n"
- "add x23, x23, x20, LSL #2\n"
- "add x22, x22, x20, LSL #2\n"
- "add x21, x21, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "ldr x21, [x20, #0x20]\n"
+ "ldr x20, [x20, #0x28]\n"
+ "cbnz x27, 188f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #2\n"
+ "add x24, x24, x19, LSL #2\n"
+ "add x23, x23, x19, LSL #2\n"
+ "add x22, x22, x19, LSL #2\n"
+ "add x21, x21, x19, LSL #2\n"
+ "add x20, x20, x19, LSL #2\n"
"b 188f\n"
"187:" // Height 6: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
+ "add x20, x21, x19, LSL #2\n"
"188:" // Height 6: input setup done
- "cmp x27, #0x4\n"
+ "cmp x26, #0x4\n"
"blt 191f\n"
- "ldr q0, [x26, #0x0]\n"
- "ldr q1, [x25, #0x0]\n"
- "cmp x27, #0x8\n"
- "ldr q2, [x24, #0x0]\n"
- "ldr q3, [x23, #0x0]\n"
- "ldr q4, [x22, #0x0]\n"
- "ldr q5, [x21, #0x0]\n"
- "ldr q6, [x12, #0x0]\n"
- "ldr q7, [x11, #0x0]\n"
+ "ldr q0, [x25, #0x0]\n"
+ "ldr q1, [x24, #0x0]\n"
+ "cmp x26, #0x8\n"
+ "ldr q2, [x23, #0x0]\n"
+ "ldr q3, [x22, #0x0]\n"
+ "ldr q4, [x21, #0x0]\n"
+ "ldr q5, [x20, #0x0]\n"
+ "ldr q6, [x11, #0x0]\n"
+ "ldr q7, [x10, #0x0]\n"
"blt 190f\n"
"189:" // Height 6: Multiply loop: Main loop head
"fmla v8.4s, v6.4s, v0.s[0]\n"
"fmla v12.4s, v6.4s, v1.s[0]\n"
- "sub x27, x27, #0x4\n"
- "cmp x27, #0x8\n"
+ "sub x26, x26, #0x4\n"
+ "cmp x26, #0x8\n"
"fmla v16.4s, v6.4s, v2.s[0]\n"
"fmla v20.4s, v6.4s, v3.s[0]\n"
- "add x26, x26, #0x10\n"
"add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
"fmla v24.4s, v6.4s, v4.s[0]\n"
"fmla v28.4s, v6.4s, v5.s[0]\n"
- "ldr q6, [x10, #0x0]\n"
- "add x24, x24, #0x10\n"
+ "ldr q6, [x9, #0x0]\n"
+ "add x23, x23, #0x10\n"
"fmla v9.4s, v7.4s, v0.s[0]\n"
"fmla v13.4s, v7.4s, v1.s[0]\n"
- "add x23, x23, #0x10\n"
"add x22, x22, #0x10\n"
+ "add x21, x21, #0x10\n"
"fmla v17.4s, v7.4s, v2.s[0]\n"
"fmla v21.4s, v7.4s, v3.s[0]\n"
- "add x21, x21, #0x10\n"
+ "add x20, x20, #0x10\n"
"fmla v25.4s, v7.4s, v4.s[0]\n"
"fmla v29.4s, v7.4s, v5.s[0]\n"
- "ldr q7, [x9, #0x0]\n"
+ "ldr q7, [x28, #0x0]\n"
"fmla v10.4s, v6.4s, v0.s[0]\n"
"fmla v14.4s, v6.4s, v1.s[0]\n"
"fmla v18.4s, v6.4s, v2.s[0]\n"
"fmla v22.4s, v6.4s, v3.s[0]\n"
"fmla v26.4s, v6.4s, v4.s[0]\n"
"fmla v30.4s, v6.4s, v5.s[0]\n"
- "ldr q6, [x12, #0x10]\n"
+ "ldr q6, [x11, #0x10]\n"
"fmla v11.4s, v7.4s, v0.s[0]\n"
"fmla v15.4s, v7.4s, v1.s[0]\n"
"fmla v19.4s, v7.4s, v2.s[0]\n"
"fmla v23.4s, v7.4s, v3.s[0]\n"
"fmla v27.4s, v7.4s, v4.s[0]\n"
"fmla v31.4s, v7.4s, v5.s[0]\n"
- "ldr q7, [x11, #0x10]\n"
+ "ldr q7, [x10, #0x10]\n"
"fmla v8.4s, v6.4s, v0.s[1]\n"
"fmla v12.4s, v6.4s, v1.s[1]\n"
"fmla v16.4s, v6.4s, v2.s[1]\n"
"fmla v20.4s, v6.4s, v3.s[1]\n"
"fmla v24.4s, v6.4s, v4.s[1]\n"
"fmla v28.4s, v6.4s, v5.s[1]\n"
- "ldr q6, [x10, #0x10]\n"
+ "ldr q6, [x9, #0x10]\n"
"fmla v9.4s, v7.4s, v0.s[1]\n"
"fmla v13.4s, v7.4s, v1.s[1]\n"
"fmla v17.4s, v7.4s, v2.s[1]\n"
"fmla v21.4s, v7.4s, v3.s[1]\n"
"fmla v25.4s, v7.4s, v4.s[1]\n"
"fmla v29.4s, v7.4s, v5.s[1]\n"
- "ldr q7, [x9, #0x10]\n"
+ "ldr q7, [x28, #0x10]\n"
"fmla v10.4s, v6.4s, v0.s[1]\n"
"fmla v14.4s, v6.4s, v1.s[1]\n"
"fmla v18.4s, v6.4s, v2.s[1]\n"
"fmla v22.4s, v6.4s, v3.s[1]\n"
"fmla v26.4s, v6.4s, v4.s[1]\n"
"fmla v30.4s, v6.4s, v5.s[1]\n"
- "ldr q6, [x12, #0x20]\n"
+ "ldr q6, [x11, #0x20]\n"
"fmla v11.4s, v7.4s, v0.s[1]\n"
"fmla v15.4s, v7.4s, v1.s[1]\n"
"fmla v19.4s, v7.4s, v2.s[1]\n"
"fmla v23.4s, v7.4s, v3.s[1]\n"
"fmla v27.4s, v7.4s, v4.s[1]\n"
"fmla v31.4s, v7.4s, v5.s[1]\n"
- "ldr q7, [x11, #0x20]\n"
+ "ldr q7, [x10, #0x20]\n"
"fmla v8.4s, v6.4s, v0.s[2]\n"
"fmla v12.4s, v6.4s, v1.s[2]\n"
"fmla v16.4s, v6.4s, v2.s[2]\n"
"fmla v20.4s, v6.4s, v3.s[2]\n"
"fmla v24.4s, v6.4s, v4.s[2]\n"
"fmla v28.4s, v6.4s, v5.s[2]\n"
- "ldr q6, [x10, #0x20]\n"
+ "ldr q6, [x9, #0x20]\n"
"fmla v9.4s, v7.4s, v0.s[2]\n"
"fmla v13.4s, v7.4s, v1.s[2]\n"
"fmla v17.4s, v7.4s, v2.s[2]\n"
"fmla v21.4s, v7.4s, v3.s[2]\n"
"fmla v25.4s, v7.4s, v4.s[2]\n"
"fmla v29.4s, v7.4s, v5.s[2]\n"
- "ldr q7, [x9, #0x20]\n"
+ "ldr q7, [x28, #0x20]\n"
"fmla v10.4s, v6.4s, v0.s[2]\n"
"fmla v14.4s, v6.4s, v1.s[2]\n"
"fmla v18.4s, v6.4s, v2.s[2]\n"
"fmla v22.4s, v6.4s, v3.s[2]\n"
"fmla v26.4s, v6.4s, v4.s[2]\n"
"fmla v30.4s, v6.4s, v5.s[2]\n"
- "ldr q6, [x12, #0x30]\n"
- "add x12, x12, #0x40\n"
+ "ldr q6, [x11, #0x30]\n"
+ "add x11, x11, #0x40\n"
"fmla v11.4s, v7.4s, v0.s[2]\n"
"fmla v15.4s, v7.4s, v1.s[2]\n"
"fmla v19.4s, v7.4s, v2.s[2]\n"
"fmla v23.4s, v7.4s, v3.s[2]\n"
"fmla v27.4s, v7.4s, v4.s[2]\n"
"fmla v31.4s, v7.4s, v5.s[2]\n"
- "ldr q7, [x11, #0x30]\n"
- "add x11, x11, #0x40\n"
+ "ldr q7, [x10, #0x30]\n"
+ "add x10, x10, #0x40\n"
"fmla v8.4s, v6.4s, v0.s[3]\n"
"fmla v12.4s, v6.4s, v1.s[3]\n"
"fmla v16.4s, v6.4s, v2.s[3]\n"
"fmla v20.4s, v6.4s, v3.s[3]\n"
"fmla v24.4s, v6.4s, v4.s[3]\n"
"fmla v28.4s, v6.4s, v5.s[3]\n"
- "ldr q6, [x10, #0x30]\n"
- "add x10, x10, #0x40\n"
+ "ldr q6, [x9, #0x30]\n"
+ "add x9, x9, #0x40\n"
"fmla v9.4s, v7.4s, v0.s[3]\n"
"fmla v13.4s, v7.4s, v1.s[3]\n"
"fmla v17.4s, v7.4s, v2.s[3]\n"
"fmla v21.4s, v7.4s, v3.s[3]\n"
"fmla v25.4s, v7.4s, v4.s[3]\n"
"fmla v29.4s, v7.4s, v5.s[3]\n"
- "ldr q7, [x9, #0x30]\n"
- "add x9, x9, #0x40\n"
+ "ldr q7, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
"fmla v10.4s, v6.4s, v0.s[3]\n"
"fmla v14.4s, v6.4s, v1.s[3]\n"
"fmla v18.4s, v6.4s, v2.s[3]\n"
"fmla v22.4s, v6.4s, v3.s[3]\n"
"fmla v26.4s, v6.4s, v4.s[3]\n"
"fmla v30.4s, v6.4s, v5.s[3]\n"
- "ldr q6, [x12, #0x0]\n"
+ "ldr q6, [x11, #0x0]\n"
"fmla v11.4s, v7.4s, v0.s[3]\n"
- "ldr q0, [x26, #0x0]\n"
+ "ldr q0, [x25, #0x0]\n"
"fmla v15.4s, v7.4s, v1.s[3]\n"
- "ldr q1, [x25, #0x0]\n"
+ "ldr q1, [x24, #0x0]\n"
"fmla v19.4s, v7.4s, v2.s[3]\n"
- "ldr q2, [x24, #0x0]\n"
+ "ldr q2, [x23, #0x0]\n"
"fmla v23.4s, v7.4s, v3.s[3]\n"
- "ldr q3, [x23, #0x0]\n"
+ "ldr q3, [x22, #0x0]\n"
"fmla v27.4s, v7.4s, v4.s[3]\n"
- "ldr q4, [x22, #0x0]\n"
+ "ldr q4, [x21, #0x0]\n"
"fmla v31.4s, v7.4s, v5.s[3]\n"
- "ldr q5, [x21, #0x0]\n"
- "ldr q7, [x11, #0x0]\n"
+ "ldr q5, [x20, #0x0]\n"
+ "ldr q7, [x10, #0x0]\n"
"bge 189b\n"
"190:" // Height 6: Multiply loop: Single iteration only
"fmla v8.4s, v6.4s, v0.s[0]\n"
"fmla v12.4s, v6.4s, v1.s[0]\n"
- "sub x27, x27, #0x4\n"
- "add x26, x26, #0x10\n"
+ "sub x26, x26, #0x4\n"
+ "add x25, x25, #0x10\n"
"fmla v16.4s, v6.4s, v2.s[0]\n"
"fmla v20.4s, v6.4s, v3.s[0]\n"
- "add x25, x25, #0x10\n"
"add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
"fmla v24.4s, v6.4s, v4.s[0]\n"
"fmla v28.4s, v6.4s, v5.s[0]\n"
- "ldr q6, [x10, #0x0]\n"
- "add x23, x23, #0x10\n"
+ "ldr q6, [x9, #0x0]\n"
+ "add x22, x22, #0x10\n"
"fmla v9.4s, v7.4s, v0.s[0]\n"
"fmla v13.4s, v7.4s, v1.s[0]\n"
- "add x22, x22, #0x10\n"
"add x21, x21, #0x10\n"
+ "add x20, x20, #0x10\n"
"fmla v17.4s, v7.4s, v2.s[0]\n"
"fmla v21.4s, v7.4s, v3.s[0]\n"
"fmla v25.4s, v7.4s, v4.s[0]\n"
"fmla v29.4s, v7.4s, v5.s[0]\n"
- "ldr q7, [x9, #0x0]\n"
+ "ldr q7, [x28, #0x0]\n"
"fmla v10.4s, v6.4s, v0.s[0]\n"
"fmla v14.4s, v6.4s, v1.s[0]\n"
"fmla v18.4s, v6.4s, v2.s[0]\n"
"fmla v22.4s, v6.4s, v3.s[0]\n"
"fmla v26.4s, v6.4s, v4.s[0]\n"
"fmla v30.4s, v6.4s, v5.s[0]\n"
- "ldr q6, [x12, #0x10]\n"
+ "ldr q6, [x11, #0x10]\n"
"fmla v11.4s, v7.4s, v0.s[0]\n"
"fmla v15.4s, v7.4s, v1.s[0]\n"
"fmla v19.4s, v7.4s, v2.s[0]\n"
"fmla v23.4s, v7.4s, v3.s[0]\n"
"fmla v27.4s, v7.4s, v4.s[0]\n"
"fmla v31.4s, v7.4s, v5.s[0]\n"
- "ldr q7, [x11, #0x10]\n"
+ "ldr q7, [x10, #0x10]\n"
"fmla v8.4s, v6.4s, v0.s[1]\n"
"fmla v12.4s, v6.4s, v1.s[1]\n"
"fmla v16.4s, v6.4s, v2.s[1]\n"
"fmla v20.4s, v6.4s, v3.s[1]\n"
"fmla v24.4s, v6.4s, v4.s[1]\n"
"fmla v28.4s, v6.4s, v5.s[1]\n"
- "ldr q6, [x10, #0x10]\n"
+ "ldr q6, [x9, #0x10]\n"
"fmla v9.4s, v7.4s, v0.s[1]\n"
"fmla v13.4s, v7.4s, v1.s[1]\n"
"fmla v17.4s, v7.4s, v2.s[1]\n"
"fmla v21.4s, v7.4s, v3.s[1]\n"
"fmla v25.4s, v7.4s, v4.s[1]\n"
"fmla v29.4s, v7.4s, v5.s[1]\n"
- "ldr q7, [x9, #0x10]\n"
+ "ldr q7, [x28, #0x10]\n"
"fmla v10.4s, v6.4s, v0.s[1]\n"
"fmla v14.4s, v6.4s, v1.s[1]\n"
"fmla v18.4s, v6.4s, v2.s[1]\n"
"fmla v22.4s, v6.4s, v3.s[1]\n"
"fmla v26.4s, v6.4s, v4.s[1]\n"
"fmla v30.4s, v6.4s, v5.s[1]\n"
- "ldr q6, [x12, #0x20]\n"
+ "ldr q6, [x11, #0x20]\n"
"fmla v11.4s, v7.4s, v0.s[1]\n"
"fmla v15.4s, v7.4s, v1.s[1]\n"
"fmla v19.4s, v7.4s, v2.s[1]\n"
"fmla v23.4s, v7.4s, v3.s[1]\n"
"fmla v27.4s, v7.4s, v4.s[1]\n"
"fmla v31.4s, v7.4s, v5.s[1]\n"
- "ldr q7, [x11, #0x20]\n"
+ "ldr q7, [x10, #0x20]\n"
"fmla v8.4s, v6.4s, v0.s[2]\n"
"fmla v12.4s, v6.4s, v1.s[2]\n"
"fmla v16.4s, v6.4s, v2.s[2]\n"
"fmla v20.4s, v6.4s, v3.s[2]\n"
"fmla v24.4s, v6.4s, v4.s[2]\n"
"fmla v28.4s, v6.4s, v5.s[2]\n"
- "ldr q6, [x10, #0x20]\n"
+ "ldr q6, [x9, #0x20]\n"
"fmla v9.4s, v7.4s, v0.s[2]\n"
"fmla v13.4s, v7.4s, v1.s[2]\n"
"fmla v17.4s, v7.4s, v2.s[2]\n"
"fmla v21.4s, v7.4s, v3.s[2]\n"
"fmla v25.4s, v7.4s, v4.s[2]\n"
"fmla v29.4s, v7.4s, v5.s[2]\n"
- "ldr q7, [x9, #0x20]\n"
+ "ldr q7, [x28, #0x20]\n"
"fmla v10.4s, v6.4s, v0.s[2]\n"
"fmla v14.4s, v6.4s, v1.s[2]\n"
"fmla v18.4s, v6.4s, v2.s[2]\n"
"fmla v22.4s, v6.4s, v3.s[2]\n"
"fmla v26.4s, v6.4s, v4.s[2]\n"
"fmla v30.4s, v6.4s, v5.s[2]\n"
- "ldr q6, [x12, #0x30]\n"
- "add x12, x12, #0x40\n"
+ "ldr q6, [x11, #0x30]\n"
+ "add x11, x11, #0x40\n"
"fmla v11.4s, v7.4s, v0.s[2]\n"
"fmla v15.4s, v7.4s, v1.s[2]\n"
"fmla v19.4s, v7.4s, v2.s[2]\n"
"fmla v23.4s, v7.4s, v3.s[2]\n"
"fmla v27.4s, v7.4s, v4.s[2]\n"
"fmla v31.4s, v7.4s, v5.s[2]\n"
- "ldr q7, [x11, #0x30]\n"
- "add x11, x11, #0x40\n"
+ "ldr q7, [x10, #0x30]\n"
+ "add x10, x10, #0x40\n"
"fmla v8.4s, v6.4s, v0.s[3]\n"
"fmla v12.4s, v6.4s, v1.s[3]\n"
"fmla v16.4s, v6.4s, v2.s[3]\n"
"fmla v20.4s, v6.4s, v3.s[3]\n"
"fmla v24.4s, v6.4s, v4.s[3]\n"
"fmla v28.4s, v6.4s, v5.s[3]\n"
- "ldr q6, [x10, #0x30]\n"
- "add x10, x10, #0x40\n"
+ "ldr q6, [x9, #0x30]\n"
+ "add x9, x9, #0x40\n"
"fmla v9.4s, v7.4s, v0.s[3]\n"
"fmla v13.4s, v7.4s, v1.s[3]\n"
"fmla v17.4s, v7.4s, v2.s[3]\n"
"fmla v21.4s, v7.4s, v3.s[3]\n"
"fmla v25.4s, v7.4s, v4.s[3]\n"
"fmla v29.4s, v7.4s, v5.s[3]\n"
- "ldr q7, [x9, #0x30]\n"
- "add x9, x9, #0x40\n"
+ "ldr q7, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
"fmla v10.4s, v6.4s, v0.s[3]\n"
"fmla v14.4s, v6.4s, v1.s[3]\n"
"fmla v18.4s, v6.4s, v2.s[3]\n"
@@ -3175,35 +3175,35 @@ void a64_ffhybrid_fp32_mla_6x16 (
"fmla v27.4s, v7.4s, v4.s[3]\n"
"fmla v31.4s, v7.4s, v5.s[3]\n"
"191:" // Height 6: Multiply loop: Main loop skip
- "cbz x27, 193f\n"
+ "cbz x26, 193f\n"
"192:" // Height 6: Multiply loop: Odd block loop
- "ldr s0, [x26], #0x4\n"
- "ldr s1, [x25], #0x4\n"
- "sub x27, x27, #0x1\n"
- "ldr s2, [x24], #0x4\n"
- "ldr s3, [x23], #0x4\n"
- "ldr s4, [x22], #0x4\n"
- "ldr s5, [x21], #0x4\n"
- "ldr q6, [x12, #0x0]\n"
- "ldr q7, [x11, #0x0]\n"
+ "ldr s0, [x25], #0x4\n"
+ "ldr s1, [x24], #0x4\n"
+ "sub x26, x26, #0x1\n"
+ "ldr s2, [x23], #0x4\n"
+ "ldr s3, [x22], #0x4\n"
+ "ldr s4, [x21], #0x4\n"
+ "ldr s5, [x20], #0x4\n"
+ "ldr q6, [x11, #0x0]\n"
+ "ldr q7, [x10, #0x0]\n"
"fmla v8.4s, v6.4s, v0.s[0]\n"
"fmla v12.4s, v6.4s, v1.s[0]\n"
"fmla v16.4s, v6.4s, v2.s[0]\n"
"fmla v20.4s, v6.4s, v3.s[0]\n"
- "add x12, x12, #0x10\n"
"add x11, x11, #0x10\n"
+ "add x10, x10, #0x10\n"
"fmla v24.4s, v6.4s, v4.s[0]\n"
"fmla v28.4s, v6.4s, v5.s[0]\n"
- "ldr q6, [x10, #0x0]\n"
- "add x10, x10, #0x10\n"
+ "ldr q6, [x9, #0x0]\n"
+ "add x9, x9, #0x10\n"
"fmla v9.4s, v7.4s, v0.s[0]\n"
"fmla v13.4s, v7.4s, v1.s[0]\n"
"fmla v17.4s, v7.4s, v2.s[0]\n"
"fmla v21.4s, v7.4s, v3.s[0]\n"
"fmla v25.4s, v7.4s, v4.s[0]\n"
"fmla v29.4s, v7.4s, v5.s[0]\n"
- "ldr q7, [x9, #0x0]\n"
- "add x9, x9, #0x10\n"
+ "ldr q7, [x28, #0x0]\n"
+ "add x28, x28, #0x10\n"
"fmla v10.4s, v6.4s, v0.s[0]\n"
"fmla v14.4s, v6.4s, v1.s[0]\n"
"fmla v18.4s, v6.4s, v2.s[0]\n"
@@ -3216,23 +3216,23 @@ void a64_ffhybrid_fp32_mla_6x16 (
"fmla v23.4s, v7.4s, v3.s[0]\n"
"fmla v27.4s, v7.4s, v4.s[0]\n"
"fmla v31.4s, v7.4s, v5.s[0]\n"
- "cbnz x27, 192b\n"
+ "cbnz x26, 192b\n"
"193:" // Height 6: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 186b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
+ "add x20, x21, x19, LSL #2\n"
"tbz %x[flags], #1, 194f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v1.4s }, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1r { v0.4s }, [x20]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v1.4s }, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v0.4s }, [x19]\n"
"fmin v8.4s, v8.4s, v1.4s\n"
"fmin v9.4s, v9.4s, v1.4s\n"
"fmin v10.4s, v10.4s, v1.4s\n"
@@ -3282,178 +3282,178 @@ void a64_ffhybrid_fp32_mla_6x16 (
"fmax v30.4s, v30.4s, v0.4s\n"
"fmax v31.4s, v31.4s, v0.4s\n"
"194:" // Height 6: No activation
- "cmp x14, #0x10\n"
+ "cmp x13, #0x10\n"
"bge 203f\n"
- "tbz x14, #3, 198f\n"
- "st1 { v8.4s }, [x13], #0x10\n"
- "st1 { v9.4s }, [x13], #0x10\n"
- "st1 { v12.4s }, [x25], #0x10\n"
- "st1 { v13.4s }, [x25], #0x10\n"
- "st1 { v16.4s }, [x24], #0x10\n"
- "st1 { v17.4s }, [x24], #0x10\n"
- "st1 { v20.4s }, [x23], #0x10\n"
- "st1 { v21.4s }, [x23], #0x10\n"
- "st1 { v24.4s }, [x22], #0x10\n"
- "st1 { v25.4s }, [x22], #0x10\n"
- "st1 { v28.4s }, [x21], #0x10\n"
- "st1 { v29.4s }, [x21], #0x10\n"
- "tbz x14, #2, 196f\n"
- "st1 { v10.4s }, [x13], #0x10\n"
- "st1 { v14.4s }, [x25], #0x10\n"
- "st1 { v18.4s }, [x24], #0x10\n"
- "st1 { v22.4s }, [x23], #0x10\n"
- "st1 { v26.4s }, [x22], #0x10\n"
- "st1 { v30.4s }, [x21], #0x10\n"
- "tbz x14, #1, 195f\n"
- "str d11, [x13], #0x8\n"
- "str d15, [x25], #0x8\n"
- "str d19, [x24], #0x8\n"
- "str d23, [x23], #0x8\n"
- "str d27, [x22], #0x8\n"
- "str d31, [x21], #0x8\n"
- "tbz x14, #0, 202f\n"
- "st1 { v11.s }[2], [x13]\n"
- "st1 { v15.s }[2], [x25]\n"
- "st1 { v19.s }[2], [x24]\n"
- "st1 { v23.s }[2], [x23]\n"
- "st1 { v27.s }[2], [x22]\n"
- "st1 { v31.s }[2], [x21]\n"
+ "tbz x13, #3, 198f\n"
+ "st1 { v8.4s }, [x12], #0x10\n"
+ "st1 { v9.4s }, [x12], #0x10\n"
+ "st1 { v12.4s }, [x24], #0x10\n"
+ "st1 { v13.4s }, [x24], #0x10\n"
+ "st1 { v16.4s }, [x23], #0x10\n"
+ "st1 { v17.4s }, [x23], #0x10\n"
+ "st1 { v20.4s }, [x22], #0x10\n"
+ "st1 { v21.4s }, [x22], #0x10\n"
+ "st1 { v24.4s }, [x21], #0x10\n"
+ "st1 { v25.4s }, [x21], #0x10\n"
+ "st1 { v28.4s }, [x20], #0x10\n"
+ "st1 { v29.4s }, [x20], #0x10\n"
+ "tbz x13, #2, 196f\n"
+ "st1 { v10.4s }, [x12], #0x10\n"
+ "st1 { v14.4s }, [x24], #0x10\n"
+ "st1 { v18.4s }, [x23], #0x10\n"
+ "st1 { v22.4s }, [x22], #0x10\n"
+ "st1 { v26.4s }, [x21], #0x10\n"
+ "st1 { v30.4s }, [x20], #0x10\n"
+ "tbz x13, #1, 195f\n"
+ "str d11, [x12], #0x8\n"
+ "str d15, [x24], #0x8\n"
+ "str d19, [x23], #0x8\n"
+ "str d23, [x22], #0x8\n"
+ "str d27, [x21], #0x8\n"
+ "str d31, [x20], #0x8\n"
+ "tbz x13, #0, 202f\n"
+ "st1 { v11.s }[2], [x12]\n"
+ "st1 { v15.s }[2], [x24]\n"
+ "st1 { v19.s }[2], [x23]\n"
+ "st1 { v23.s }[2], [x22]\n"
+ "st1 { v27.s }[2], [x21]\n"
+ "st1 { v31.s }[2], [x20]\n"
"b 202f\n"
"195:" // Height 6: Partial direct writeback: partial_1_12
- "tbz x14, #0, 202f\n"
- "str s11, [x13, #0x0]\n"
- "str s15, [x25, #0x0]\n"
- "str s19, [x24, #0x0]\n"
- "str s23, [x23, #0x0]\n"
- "str s27, [x22, #0x0]\n"
- "str s31, [x21, #0x0]\n"
+ "tbz x13, #0, 202f\n"
+ "str s11, [x12, #0x0]\n"
+ "str s15, [x24, #0x0]\n"
+ "str s19, [x23, #0x0]\n"
+ "str s23, [x22, #0x0]\n"
+ "str s27, [x21, #0x0]\n"
+ "str s31, [x20, #0x0]\n"
"b 202f\n"
"196:" // Height 6: Partial direct writeback: partial_2_8
- "tbz x14, #1, 197f\n"
- "str d10, [x13], #0x8\n"
- "str d14, [x25], #0x8\n"
- "str d18, [x24], #0x8\n"
- "str d22, [x23], #0x8\n"
- "str d26, [x22], #0x8\n"
- "str d30, [x21], #0x8\n"
- "tbz x14, #0, 202f\n"
- "st1 { v10.s }[2], [x13]\n"
- "st1 { v14.s }[2], [x25]\n"
- "st1 { v18.s }[2], [x24]\n"
- "st1 { v22.s }[2], [x23]\n"
- "st1 { v26.s }[2], [x22]\n"
- "st1 { v30.s }[2], [x21]\n"
+ "tbz x13, #1, 197f\n"
+ "str d10, [x12], #0x8\n"
+ "str d14, [x24], #0x8\n"
+ "str d18, [x23], #0x8\n"
+ "str d22, [x22], #0x8\n"
+ "str d26, [x21], #0x8\n"
+ "str d30, [x20], #0x8\n"
+ "tbz x13, #0, 202f\n"
+ "st1 { v10.s }[2], [x12]\n"
+ "st1 { v14.s }[2], [x24]\n"
+ "st1 { v18.s }[2], [x23]\n"
+ "st1 { v22.s }[2], [x22]\n"
+ "st1 { v26.s }[2], [x21]\n"
+ "st1 { v30.s }[2], [x20]\n"
"b 202f\n"
"197:" // Height 6: Partial direct writeback: partial_1_8
- "tbz x14, #0, 202f\n"
- "str s10, [x13, #0x0]\n"
- "str s14, [x25, #0x0]\n"
- "str s18, [x24, #0x0]\n"
- "str s22, [x23, #0x0]\n"
- "str s26, [x22, #0x0]\n"
- "str s30, [x21, #0x0]\n"
+ "tbz x13, #0, 202f\n"
+ "str s10, [x12, #0x0]\n"
+ "str s14, [x24, #0x0]\n"
+ "str s18, [x23, #0x0]\n"
+ "str s22, [x22, #0x0]\n"
+ "str s26, [x21, #0x0]\n"
+ "str s30, [x20, #0x0]\n"
"b 202f\n"
"198:" // Height 6: Partial direct writeback: partial_4_0
- "tbz x14, #2, 200f\n"
- "st1 { v8.4s }, [x13], #0x10\n"
- "st1 { v12.4s }, [x25], #0x10\n"
- "st1 { v16.4s }, [x24], #0x10\n"
- "st1 { v20.4s }, [x23], #0x10\n"
- "st1 { v24.4s }, [x22], #0x10\n"
- "st1 { v28.4s }, [x21], #0x10\n"
- "tbz x14, #1, 199f\n"
- "str d9, [x13], #0x8\n"
- "str d13, [x25], #0x8\n"
- "str d17, [x24], #0x8\n"
- "str d21, [x23], #0x8\n"
- "str d25, [x22], #0x8\n"
- "str d29, [x21], #0x8\n"
- "tbz x14, #0, 202f\n"
- "st1 { v9.s }[2], [x13]\n"
- "st1 { v13.s }[2], [x25]\n"
- "st1 { v17.s }[2], [x24]\n"
- "st1 { v21.s }[2], [x23]\n"
- "st1 { v25.s }[2], [x22]\n"
- "st1 { v29.s }[2], [x21]\n"
+ "tbz x13, #2, 200f\n"
+ "st1 { v8.4s }, [x12], #0x10\n"
+ "st1 { v12.4s }, [x24], #0x10\n"
+ "st1 { v16.4s }, [x23], #0x10\n"
+ "st1 { v20.4s }, [x22], #0x10\n"
+ "st1 { v24.4s }, [x21], #0x10\n"
+ "st1 { v28.4s }, [x20], #0x10\n"
+ "tbz x13, #1, 199f\n"
+ "str d9, [x12], #0x8\n"
+ "str d13, [x24], #0x8\n"
+ "str d17, [x23], #0x8\n"
+ "str d21, [x22], #0x8\n"
+ "str d25, [x21], #0x8\n"
+ "str d29, [x20], #0x8\n"
+ "tbz x13, #0, 202f\n"
+ "st1 { v9.s }[2], [x12]\n"
+ "st1 { v13.s }[2], [x24]\n"
+ "st1 { v17.s }[2], [x23]\n"
+ "st1 { v21.s }[2], [x22]\n"
+ "st1 { v25.s }[2], [x21]\n"
+ "st1 { v29.s }[2], [x20]\n"
"b 202f\n"
"199:" // Height 6: Partial direct writeback: partial_1_4
- "tbz x14, #0, 202f\n"
- "str s9, [x13, #0x0]\n"
- "str s13, [x25, #0x0]\n"
- "str s17, [x24, #0x0]\n"
- "str s21, [x23, #0x0]\n"
- "str s25, [x22, #0x0]\n"
- "str s29, [x21, #0x0]\n"
+ "tbz x13, #0, 202f\n"
+ "str s9, [x12, #0x0]\n"
+ "str s13, [x24, #0x0]\n"
+ "str s17, [x23, #0x0]\n"
+ "str s21, [x22, #0x0]\n"
+ "str s25, [x21, #0x0]\n"
+ "str s29, [x20, #0x0]\n"
"b 202f\n"
"200:" // Height 6: Partial direct writeback: partial_2_0
- "tbz x14, #1, 201f\n"
- "str d8, [x13], #0x8\n"
- "str d12, [x25], #0x8\n"
- "str d16, [x24], #0x8\n"
- "str d20, [x23], #0x8\n"
- "str d24, [x22], #0x8\n"
- "str d28, [x21], #0x8\n"
- "tbz x14, #0, 202f\n"
- "st1 { v8.s }[2], [x13]\n"
- "st1 { v12.s }[2], [x25]\n"
- "st1 { v16.s }[2], [x24]\n"
- "st1 { v20.s }[2], [x23]\n"
- "st1 { v24.s }[2], [x22]\n"
- "st1 { v28.s }[2], [x21]\n"
+ "tbz x13, #1, 201f\n"
+ "str d8, [x12], #0x8\n"
+ "str d12, [x24], #0x8\n"
+ "str d16, [x23], #0x8\n"
+ "str d20, [x22], #0x8\n"
+ "str d24, [x21], #0x8\n"
+ "str d28, [x20], #0x8\n"
+ "tbz x13, #0, 202f\n"
+ "st1 { v8.s }[2], [x12]\n"
+ "st1 { v12.s }[2], [x24]\n"
+ "st1 { v16.s }[2], [x23]\n"
+ "st1 { v20.s }[2], [x22]\n"
+ "st1 { v24.s }[2], [x21]\n"
+ "st1 { v28.s }[2], [x20]\n"
"b 202f\n"
"201:" // Height 6: Partial direct writeback: partial_1_0
- "str s8, [x13, #0x0]\n"
- "str s12, [x25, #0x0]\n"
- "str s16, [x24, #0x0]\n"
- "str s20, [x23, #0x0]\n"
- "str s24, [x22, #0x0]\n"
- "str s28, [x21, #0x0]\n"
+ "str s8, [x12, #0x0]\n"
+ "str s12, [x24, #0x0]\n"
+ "str s16, [x23, #0x0]\n"
+ "str s20, [x22, #0x0]\n"
+ "str s24, [x21, #0x0]\n"
+ "str s28, [x20, #0x0]\n"
"202:" // Height 6: Partial direct writeback: Done
"b 204f\n"
"203:" // Height 6: Full writeback
- "str q8, [x13, #0x0]\n"
- "str q9, [x13, #0x10]\n"
- "str q10, [x13, #0x20]\n"
- "str q11, [x13, #0x30]\n"
- "add x13, x13, #0x40\n"
- "str q12, [x25, #0x0]\n"
- "str q13, [x25, #0x10]\n"
- "str q14, [x25, #0x20]\n"
- "str q15, [x25, #0x30]\n"
- "str q16, [x24, #0x0]\n"
- "str q17, [x24, #0x10]\n"
- "str q18, [x24, #0x20]\n"
- "str q19, [x24, #0x30]\n"
- "str q20, [x23, #0x0]\n"
- "str q21, [x23, #0x10]\n"
- "str q22, [x23, #0x20]\n"
- "str q23, [x23, #0x30]\n"
- "str q24, [x22, #0x0]\n"
- "str q25, [x22, #0x10]\n"
- "str q26, [x22, #0x20]\n"
- "str q27, [x22, #0x30]\n"
- "str q28, [x21, #0x0]\n"
- "str q29, [x21, #0x10]\n"
- "str q30, [x21, #0x20]\n"
- "str q31, [x21, #0x30]\n"
+ "str q8, [x12, #0x0]\n"
+ "str q9, [x12, #0x10]\n"
+ "str q10, [x12, #0x20]\n"
+ "str q11, [x12, #0x30]\n"
+ "add x12, x12, #0x40\n"
+ "str q12, [x24, #0x0]\n"
+ "str q13, [x24, #0x10]\n"
+ "str q14, [x24, #0x20]\n"
+ "str q15, [x24, #0x30]\n"
+ "str q16, [x23, #0x0]\n"
+ "str q17, [x23, #0x10]\n"
+ "str q18, [x23, #0x20]\n"
+ "str q19, [x23, #0x30]\n"
+ "str q20, [x22, #0x0]\n"
+ "str q21, [x22, #0x10]\n"
+ "str q22, [x22, #0x20]\n"
+ "str q23, [x22, #0x30]\n"
+ "str q24, [x21, #0x0]\n"
+ "str q25, [x21, #0x10]\n"
+ "str q26, [x21, #0x20]\n"
+ "str q27, [x21, #0x30]\n"
+ "str q28, [x20, #0x0]\n"
+ "str q29, [x20, #0x10]\n"
+ "str q30, [x20, #0x20]\n"
+ "str q31, [x20, #0x30]\n"
"204:" // Height 6: Writeback done
- "subs x14, x14, #0x10\n"
+ "subs x13, x13, #0x10\n"
"bgt 172b\n"
"subs %x[M], %x[M], #0x6\n"
"beq 206f\n"
- "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 205f\n"
- "add x21, x21, #0x6\n"
- "str x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "add x20, x20, #0x6\n"
+ "str x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"b 1b\n"
"205:" // Update direct input
- "mov x20, #0x18\n"
- "madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
+ "mov x19, #0x18\n"
+ "madd %x[input_ptr], x19, x20, %x[input_ptr]\n"
"b 1b\n"
"206:" // Exit
: [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
: [args_ptr] "r" (&ka), [bias] "r" (bias), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_B_stride] "I" (offsetof(KernelArgs, B_stride)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_cur_B_ptr] "I" (offsetof(KernelArgs, cur_B_ptr)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_fp32bf16fp32_mmla_4x24/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_fp32bf16fp32_mmla_4x24/generic.cpp
index 1f707fa962..245e653a43 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_fp32bf16fp32_mmla_4x24/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_fp32bf16fp32_mmla_4x24/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef __aarch64__
@@ -102,53 +102,53 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 (
"cmp %x[M], #0x2\n"
"bgt 89f\n"
"beq 45f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x15, %x[bias]\n"
- "ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x13, %x[output_ptr]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x14, %x[bias]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x12, %x[output_ptr]\n"
"2:" // Height 1: Column loop
- "ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
- "add x11, x12, x20, LSL #1\n"
- "add x10, x11, x20, LSL #1\n"
- "add x9, x10, x20, LSL #1\n"
- "add x28, x9, x20, LSL #1\n"
- "add x27, x28, x20, LSL #1\n"
- "add x20, x27, x20, LSL #1\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "cmp x14, #0x14\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #1\n"
+ "add x9, x10, x19, LSL #1\n"
+ "add x28, x9, x19, LSL #1\n"
+ "add x27, x28, x19, LSL #1\n"
+ "add x26, x27, x19, LSL #1\n"
+ "add x19, x26, x19, LSL #1\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "cmp x13, #0x14\n"
"bgt 3f\n"
- "cmp x14, #0x10\n"
- "mov x27, x12\n"
+ "cmp x13, #0x10\n"
+ "mov x26, x11\n"
"bgt 3f\n"
- "cmp x14, #0xc\n"
- "mov x28, x12\n"
+ "cmp x13, #0xc\n"
+ "mov x27, x11\n"
"bgt 3f\n"
- "cmp x14, #0x8\n"
- "mov x9, x12\n"
+ "cmp x13, #0x8\n"
+ "mov x28, x11\n"
"bgt 3f\n"
- "cmp x14, #0x4\n"
- "mov x10, x12\n"
+ "cmp x13, #0x4\n"
+ "mov x9, x11\n"
"bgt 3f\n"
- "mov x11, x12\n"
+ "mov x10, x11\n"
"3:" // Height 1: B setup done
- "cbz x15, 4f\n"
- "ldr q8, [x15, #0x0]\n"
- "ldr q9, [x15, #0x10]\n"
+ "cbz x14, 4f\n"
+ "ldr q8, [x14, #0x0]\n"
+ "ldr q9, [x14, #0x10]\n"
"zip2 v14.2d, v8.2d, v8.2d\n"
"zip1 v8.2d, v8.2d, v8.2d\n"
- "ldr q10, [x15, #0x20]\n"
- "ldr q11, [x15, #0x30]\n"
+ "ldr q10, [x14, #0x20]\n"
+ "ldr q11, [x14, #0x30]\n"
"zip2 v15.2d, v9.2d, v9.2d\n"
"zip1 v9.2d, v9.2d, v9.2d\n"
- "ldr q12, [x15, #0x40]\n"
- "ldr q13, [x15, #0x50]\n"
+ "ldr q12, [x14, #0x40]\n"
+ "ldr q13, [x14, #0x50]\n"
"zip2 v16.2d, v10.2d, v10.2d\n"
"zip1 v10.2d, v10.2d, v10.2d\n"
"zip2 v17.2d, v11.2d, v11.2d\n"
"zip1 v11.2d, v11.2d, v11.2d\n"
- "add x15, x15, #0x60\n"
+ "add x14, x14, #0x60\n"
"zip2 v18.2d, v12.2d, v12.2d\n"
"zip1 v12.2d, v12.2d, v12.2d\n"
"zip2 v19.2d, v13.2d, v13.2d\n"
@@ -156,101 +156,101 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 (
"b 20f\n"
"4:" // Height 1: no bias
"tbz %x[flags], #0, 19f\n"
- "cmp x14, #0x18\n"
+ "cmp x13, #0x18\n"
"bge 17f\n"
- "tbz x14, #4, 8f\n"
- "ld1 { v9.4s }, [x13], #0x10\n"
- "ld1 { v10.4s }, [x13], #0x10\n"
- "ld1 { v11.4s }, [x13], #0x10\n"
- "ld1 { v12.4s }, [x13], #0x10\n"
- "tbz x14, #2, 6f\n"
- "ld1 { v13.4s }, [x13], #0x10\n"
- "tbz x14, #1, 5f\n"
- "ldr d20, [x13], #0x8\n"
- "mov x20, #0x58\n"
- "tbz x14, #0, 16f\n"
- "ld1 { v20.s }[2], [x13]\n"
+ "tbz x13, #4, 8f\n"
+ "ld1 { v9.4s }, [x12], #0x10\n"
+ "ld1 { v10.4s }, [x12], #0x10\n"
+ "ld1 { v11.4s }, [x12], #0x10\n"
+ "ld1 { v12.4s }, [x12], #0x10\n"
+ "tbz x13, #2, 6f\n"
+ "ld1 { v13.4s }, [x12], #0x10\n"
+ "tbz x13, #1, 5f\n"
+ "ldr d20, [x12], #0x8\n"
+ "mov x19, #0x58\n"
+ "tbz x13, #0, 16f\n"
+ "ld1 { v20.s }[2], [x12]\n"
"b 16f\n"
"5:" // Height 1: Partial accumulate: partial_1_20
- "mov x20, #0x50\n"
- "tbz x14, #0, 16f\n"
- "ldr s20, [x13, #0x0]\n"
+ "mov x19, #0x50\n"
+ "tbz x13, #0, 16f\n"
+ "ldr s20, [x12, #0x0]\n"
"b 16f\n"
"6:" // Height 1: Partial accumulate: partial_2_16
- "tbz x14, #1, 7f\n"
- "ldr d13, [x13], #0x8\n"
- "mov x20, #0x48\n"
- "tbz x14, #0, 16f\n"
- "ld1 { v13.s }[2], [x13]\n"
+ "tbz x13, #1, 7f\n"
+ "ldr d13, [x12], #0x8\n"
+ "mov x19, #0x48\n"
+ "tbz x13, #0, 16f\n"
+ "ld1 { v13.s }[2], [x12]\n"
"b 16f\n"
"7:" // Height 1: Partial accumulate: partial_1_16
- "mov x20, #0x40\n"
- "tbz x14, #0, 16f\n"
- "ldr s13, [x13, #0x0]\n"
+ "mov x19, #0x40\n"
+ "tbz x13, #0, 16f\n"
+ "ldr s13, [x12, #0x0]\n"
"b 16f\n"
"8:" // Height 1: Partial accumulate: partial_8_0
- "tbz x14, #3, 12f\n"
- "ld1 { v9.4s }, [x13], #0x10\n"
- "ld1 { v10.4s }, [x13], #0x10\n"
- "tbz x14, #2, 10f\n"
- "ld1 { v11.4s }, [x13], #0x10\n"
- "tbz x14, #1, 9f\n"
- "ldr d12, [x13], #0x8\n"
- "mov x20, #0x38\n"
- "tbz x14, #0, 16f\n"
- "ld1 { v12.s }[2], [x13]\n"
+ "tbz x13, #3, 12f\n"
+ "ld1 { v9.4s }, [x12], #0x10\n"
+ "ld1 { v10.4s }, [x12], #0x10\n"
+ "tbz x13, #2, 10f\n"
+ "ld1 { v11.4s }, [x12], #0x10\n"
+ "tbz x13, #1, 9f\n"
+ "ldr d12, [x12], #0x8\n"
+ "mov x19, #0x38\n"
+ "tbz x13, #0, 16f\n"
+ "ld1 { v12.s }[2], [x12]\n"
"b 16f\n"
"9:" // Height 1: Partial accumulate: partial_1_12
- "mov x20, #0x30\n"
- "tbz x14, #0, 16f\n"
- "ldr s12, [x13, #0x0]\n"
+ "mov x19, #0x30\n"
+ "tbz x13, #0, 16f\n"
+ "ldr s12, [x12, #0x0]\n"
"b 16f\n"
"10:" // Height 1: Partial accumulate: partial_2_8
- "tbz x14, #1, 11f\n"
- "ldr d11, [x13], #0x8\n"
- "mov x20, #0x28\n"
- "tbz x14, #0, 16f\n"
- "ld1 { v11.s }[2], [x13]\n"
+ "tbz x13, #1, 11f\n"
+ "ldr d11, [x12], #0x8\n"
+ "mov x19, #0x28\n"
+ "tbz x13, #0, 16f\n"
+ "ld1 { v11.s }[2], [x12]\n"
"b 16f\n"
"11:" // Height 1: Partial accumulate: partial_1_8
- "mov x20, #0x20\n"
- "tbz x14, #0, 16f\n"
- "ldr s11, [x13, #0x0]\n"
+ "mov x19, #0x20\n"
+ "tbz x13, #0, 16f\n"
+ "ldr s11, [x12, #0x0]\n"
"b 16f\n"
"12:" // Height 1: Partial accumulate: partial_4_0
- "tbz x14, #2, 14f\n"
- "ld1 { v9.4s }, [x13], #0x10\n"
- "tbz x14, #1, 13f\n"
- "ldr d10, [x13], #0x8\n"
- "mov x20, #0x18\n"
- "tbz x14, #0, 16f\n"
- "ld1 { v10.s }[2], [x13]\n"
+ "tbz x13, #2, 14f\n"
+ "ld1 { v9.4s }, [x12], #0x10\n"
+ "tbz x13, #1, 13f\n"
+ "ldr d10, [x12], #0x8\n"
+ "mov x19, #0x18\n"
+ "tbz x13, #0, 16f\n"
+ "ld1 { v10.s }[2], [x12]\n"
"b 16f\n"
"13:" // Height 1: Partial accumulate: partial_1_4
- "mov x20, #0x10\n"
- "tbz x14, #0, 16f\n"
- "ldr s10, [x13, #0x0]\n"
+ "mov x19, #0x10\n"
+ "tbz x13, #0, 16f\n"
+ "ldr s10, [x12, #0x0]\n"
"b 16f\n"
"14:" // Height 1: Partial accumulate: partial_2_0
- "tbz x14, #1, 15f\n"
- "ldr d9, [x13], #0x8\n"
- "mov x20, #0x8\n"
- "tbz x14, #0, 16f\n"
- "ld1 { v9.s }[2], [x13]\n"
+ "tbz x13, #1, 15f\n"
+ "ldr d9, [x12], #0x8\n"
+ "mov x19, #0x8\n"
+ "tbz x13, #0, 16f\n"
+ "ld1 { v9.s }[2], [x12]\n"
"b 16f\n"
"15:" // Height 1: Partial accumulate: partial_1_0
- "ldr s9, [x13, #0x0]\n"
- "mov x20, #0x0\n"
+ "ldr s9, [x12, #0x0]\n"
+ "mov x19, #0x0\n"
"16:" // Height 1: Partial accumulate: Done
- "sub x13, x13, x20\n"
+ "sub x12, x12, x19\n"
"b 18f\n"
"17:" // Height 1: full accumulate
- "ldr q9, [x13, #0x0]\n"
- "ldr q10, [x13, #0x10]\n"
- "ldr q11, [x13, #0x20]\n"
- "ldr q12, [x13, #0x30]\n"
- "ldr q13, [x13, #0x40]\n"
- "ldr q20, [x13, #0x50]\n"
+ "ldr q9, [x12, #0x0]\n"
+ "ldr q10, [x12, #0x10]\n"
+ "ldr q11, [x12, #0x20]\n"
+ "ldr q12, [x12, #0x30]\n"
+ "ldr q13, [x12, #0x40]\n"
+ "ldr q20, [x12, #0x50]\n"
"18:" // Height 1: MMLA fixup
"zip1 v8.2d, v9.2d, v14.2d\n"
"zip2 v14.2d, v9.2d, v14.2d\n"
@@ -279,142 +279,142 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 (
"movi v18.16b, #0x0\n"
"movi v19.16b, #0x0\n"
"20:" // Height 1: setup done
- "mov x26, #0x0\n"
+ "mov x25, #0x0\n"
"21:" // Height 1: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w24, [x19, x25, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 22f\n"
- "ldr x21, [%x[input_ptr], x26, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x24, [x21, #0x0]\n"
- "cbnz x26, 23f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x24, x24, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x25, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x23, [x20, #0x0]\n"
+ "cbnz x25, 23f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x23, x23, x19, LSL #2\n"
"b 23f\n"
"22:" // Height 1: setup direct input
- "mov x24, %x[input_ptr]\n"
+ "mov x23, %x[input_ptr]\n"
"23:" // Height 1: input setup done
- "cmp x25, #0x4\n"
+ "cmp x24, #0x4\n"
"blt 26f\n"
- "ld1 { v0.4s }, [x24], #0x10\n"
- "ldr q4, [x12, #0x0]\n"
- "cmp x25, #0x8\n"
- "ldr q5, [x12, #0x10]\n"
- "ldr q6, [x11, #0x0]\n"
- "ldr q7, [x11, #0x10]\n"
+ "ld1 { v0.4s }, [x23], #0x10\n"
+ "ldr q4, [x11, #0x0]\n"
+ "cmp x24, #0x8\n"
+ "ldr q5, [x11, #0x10]\n"
+ "ldr q6, [x10, #0x0]\n"
+ "ldr q7, [x10, #0x10]\n"
"blt 25f\n"
"24:" // Height 1: Multiply loop: Main loop head
".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n"
".inst 0x6e44ec08 // bfmmla v8.4s, v0.8h, v4.8h\n"
- "ldr q4, [x10, #0x0]\n"
+ "ldr q4, [x9, #0x0]\n"
".inst 0x6e45ec0e // bfmmla v14.4s, v0.8h, v5.8h\n"
- "ldr q5, [x10, #0x10]\n"
+ "ldr q5, [x9, #0x10]\n"
".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n"
- "ldr q6, [x9, #0x0]\n"
+ "ldr q6, [x28, #0x0]\n"
".inst 0x6e47ec0f // bfmmla v15.4s, v0.8h, v7.8h\n"
- "ldr q7, [x9, #0x10]\n"
+ "ldr q7, [x28, #0x10]\n"
".inst 0x6e44ec0a // bfmmla v10.4s, v0.8h, v4.8h\n"
- "ldr q4, [x28, #0x0]\n"
+ "ldr q4, [x27, #0x0]\n"
".inst 0x6e45ec10 // bfmmla v16.4s, v0.8h, v5.8h\n"
- "ldr q5, [x28, #0x10]\n"
+ "ldr q5, [x27, #0x10]\n"
".inst 0x6e46ec0b // bfmmla v11.4s, v0.8h, v6.8h\n"
- "ldr q6, [x27, #0x0]\n"
+ "ldr q6, [x26, #0x0]\n"
".inst 0x6e47ec11 // bfmmla v17.4s, v0.8h, v7.8h\n"
- "ldr q7, [x27, #0x10]\n"
- "sub x25, x25, #0x4\n"
- "cmp x25, #0x8\n"
+ "ldr q7, [x26, #0x10]\n"
+ "sub x24, x24, #0x4\n"
+ "cmp x24, #0x8\n"
".inst 0x6e44ec0c // bfmmla v12.4s, v0.8h, v4.8h\n"
- "add x12, x12, #0x20\n"
- "ldr q4, [x12, #0x0]\n"
"add x11, x11, #0x20\n"
+ "ldr q4, [x11, #0x0]\n"
+ "add x10, x10, #0x20\n"
".inst 0x6e45ec12 // bfmmla v18.4s, v0.8h, v5.8h\n"
- "ldr q5, [x12, #0x10]\n"
+ "ldr q5, [x11, #0x10]\n"
".inst 0x6e46ec0d // bfmmla v13.4s, v0.8h, v6.8h\n"
- "ldr q6, [x11, #0x0]\n"
+ "ldr q6, [x10, #0x0]\n"
".inst 0x6e47ec13 // bfmmla v19.4s, v0.8h, v7.8h\n"
- "ld1 { v0.4s }, [x24], #0x10\n"
- "ldr q7, [x11, #0x10]\n"
- "add x10, x10, #0x20\n"
+ "ld1 { v0.4s }, [x23], #0x10\n"
+ "ldr q7, [x10, #0x10]\n"
"add x9, x9, #0x20\n"
"add x28, x28, #0x20\n"
"add x27, x27, #0x20\n"
+ "add x26, x26, #0x20\n"
"bge 24b\n"
"25:" // Height 1: Multiply loop: Single iteration only
".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n"
".inst 0x6e44ec08 // bfmmla v8.4s, v0.8h, v4.8h\n"
- "ldr q4, [x10, #0x0]\n"
+ "ldr q4, [x9, #0x0]\n"
".inst 0x6e45ec0e // bfmmla v14.4s, v0.8h, v5.8h\n"
- "ldr q5, [x10, #0x10]\n"
+ "ldr q5, [x9, #0x10]\n"
".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n"
- "ldr q6, [x9, #0x0]\n"
+ "ldr q6, [x28, #0x0]\n"
".inst 0x6e47ec0f // bfmmla v15.4s, v0.8h, v7.8h\n"
- "ldr q7, [x9, #0x10]\n"
+ "ldr q7, [x28, #0x10]\n"
".inst 0x6e44ec0a // bfmmla v10.4s, v0.8h, v4.8h\n"
- "ldr q4, [x28, #0x0]\n"
+ "ldr q4, [x27, #0x0]\n"
".inst 0x6e45ec10 // bfmmla v16.4s, v0.8h, v5.8h\n"
- "ldr q5, [x28, #0x10]\n"
+ "ldr q5, [x27, #0x10]\n"
".inst 0x6e46ec0b // bfmmla v11.4s, v0.8h, v6.8h\n"
- "ldr q6, [x27, #0x0]\n"
+ "ldr q6, [x26, #0x0]\n"
".inst 0x6e47ec11 // bfmmla v17.4s, v0.8h, v7.8h\n"
- "ldr q7, [x27, #0x10]\n"
- "sub x25, x25, #0x4\n"
+ "ldr q7, [x26, #0x10]\n"
+ "sub x24, x24, #0x4\n"
".inst 0x6e44ec0c // bfmmla v12.4s, v0.8h, v4.8h\n"
".inst 0x6e45ec12 // bfmmla v18.4s, v0.8h, v5.8h\n"
- "add x12, x12, #0x20\n"
"add x11, x11, #0x20\n"
+ "add x10, x10, #0x20\n"
".inst 0x6e46ec0d // bfmmla v13.4s, v0.8h, v6.8h\n"
".inst 0x6e47ec13 // bfmmla v19.4s, v0.8h, v7.8h\n"
- "add x10, x10, #0x20\n"
"add x9, x9, #0x20\n"
"add x28, x28, #0x20\n"
"add x27, x27, #0x20\n"
+ "add x26, x26, #0x20\n"
"26:" // Height 1: Multiply loop: Main loop skip
- "cbz x25, 29f\n"
- "cbz x25, 29f\n"
- "tbz x25, #1, 27f\n"
- "ldr d0, [x24], #0x8\n"
- "tbz x25, #0, 28f\n"
- "ld1 { v0.s }[2], [x24]\n"
+ "cbz x24, 29f\n"
+ "cbz x24, 29f\n"
+ "tbz x24, #1, 27f\n"
+ "ldr d0, [x23], #0x8\n"
+ "tbz x24, #0, 28f\n"
+ "ld1 { v0.s }[2], [x23]\n"
"b 28f\n"
"27:" // Height 1: Multiply loop: Ragged operand read: partial_1_0
- "ldr s0, [x24, #0x0]\n"
+ "ldr s0, [x23, #0x0]\n"
"28:" // Height 1: Multiply loop: Ragged operand read: Done
- "ldr q4, [x12, #0x0]\n"
- "ldr q5, [x12, #0x10]\n"
+ "ldr q4, [x11, #0x0]\n"
+ "ldr q5, [x11, #0x10]\n"
".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n"
".inst 0x6e44ec08 // bfmmla v8.4s, v0.8h, v4.8h\n"
- "ldr q6, [x11, #0x0]\n"
- "ldr q7, [x11, #0x10]\n"
+ "ldr q6, [x10, #0x0]\n"
+ "ldr q7, [x10, #0x10]\n"
".inst 0x6e45ec0e // bfmmla v14.4s, v0.8h, v5.8h\n"
".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n"
- "ldr q4, [x10, #0x0]\n"
- "ldr q5, [x10, #0x10]\n"
+ "ldr q4, [x9, #0x0]\n"
+ "ldr q5, [x9, #0x10]\n"
".inst 0x6e47ec0f // bfmmla v15.4s, v0.8h, v7.8h\n"
".inst 0x6e44ec0a // bfmmla v10.4s, v0.8h, v4.8h\n"
- "ldr q6, [x9, #0x0]\n"
- "ldr q7, [x9, #0x10]\n"
+ "ldr q6, [x28, #0x0]\n"
+ "ldr q7, [x28, #0x10]\n"
".inst 0x6e45ec10 // bfmmla v16.4s, v0.8h, v5.8h\n"
".inst 0x6e46ec0b // bfmmla v11.4s, v0.8h, v6.8h\n"
- "ldr q4, [x28, #0x0]\n"
- "ldr q5, [x28, #0x10]\n"
+ "ldr q4, [x27, #0x0]\n"
+ "ldr q5, [x27, #0x10]\n"
".inst 0x6e47ec11 // bfmmla v17.4s, v0.8h, v7.8h\n"
".inst 0x6e44ec0c // bfmmla v12.4s, v0.8h, v4.8h\n"
- "ldr q6, [x27, #0x0]\n"
- "ldr q7, [x27, #0x10]\n"
+ "ldr q6, [x26, #0x0]\n"
+ "ldr q7, [x26, #0x10]\n"
".inst 0x6e45ec12 // bfmmla v18.4s, v0.8h, v5.8h\n"
".inst 0x6e46ec0d // bfmmla v13.4s, v0.8h, v6.8h\n"
".inst 0x6e47ec13 // bfmmla v19.4s, v0.8h, v7.8h\n"
- "add x12, x12, #0x20\n"
"add x11, x11, #0x20\n"
"add x10, x10, #0x20\n"
"add x9, x9, #0x20\n"
"add x28, x28, #0x20\n"
"add x27, x27, #0x20\n"
+ "add x26, x26, #0x20\n"
"29:" // Height 1: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x26, x26, #0x1\n"
- "cmp x26, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x25, x25, #0x1\n"
+ "cmp x25, x19\n"
"bne 21b\n"
"uzp1 v8.2d, v8.2d, v14.2d\n"
"uzp1 v9.2d, v9.2d, v15.2d\n"
@@ -423,10 +423,10 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 (
"uzp1 v12.2d, v12.2d, v18.2d\n"
"uzp1 v13.2d, v13.2d, v19.2d\n"
"tbz %x[flags], #1, 30f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v1.4s }, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1r { v0.4s }, [x20]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v1.4s }, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v0.4s }, [x19]\n"
"fmin v8.4s, v8.4s, v1.4s\n"
"fmin v9.4s, v9.4s, v1.4s\n"
"fmin v10.4s, v10.4s, v1.4s\n"
@@ -440,141 +440,141 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 (
"fmax v12.4s, v12.4s, v0.4s\n"
"fmax v13.4s, v13.4s, v0.4s\n"
"30:" // Height 1: No activation
- "cmp x14, #0x18\n"
+ "cmp x13, #0x18\n"
"bge 43f\n"
- "tbz x14, #4, 34f\n"
- "st1 { v8.4s }, [x13], #0x10\n"
- "st1 { v9.4s }, [x13], #0x10\n"
- "st1 { v10.4s }, [x13], #0x10\n"
- "st1 { v11.4s }, [x13], #0x10\n"
- "tbz x14, #2, 32f\n"
- "st1 { v12.4s }, [x13], #0x10\n"
- "tbz x14, #1, 31f\n"
- "str d13, [x13], #0x8\n"
- "tbz x14, #0, 42f\n"
- "st1 { v13.s }[2], [x13]\n"
+ "tbz x13, #4, 34f\n"
+ "st1 { v8.4s }, [x12], #0x10\n"
+ "st1 { v9.4s }, [x12], #0x10\n"
+ "st1 { v10.4s }, [x12], #0x10\n"
+ "st1 { v11.4s }, [x12], #0x10\n"
+ "tbz x13, #2, 32f\n"
+ "st1 { v12.4s }, [x12], #0x10\n"
+ "tbz x13, #1, 31f\n"
+ "str d13, [x12], #0x8\n"
+ "tbz x13, #0, 42f\n"
+ "st1 { v13.s }[2], [x12]\n"
"b 42f\n"
"31:" // Height 1: Partial direct writeback: partial_1_20
- "tbz x14, #0, 42f\n"
- "str s13, [x13, #0x0]\n"
+ "tbz x13, #0, 42f\n"
+ "str s13, [x12, #0x0]\n"
"b 42f\n"
"32:" // Height 1: Partial direct writeback: partial_2_16
- "tbz x14, #1, 33f\n"
- "str d12, [x13], #0x8\n"
- "tbz x14, #0, 42f\n"
- "st1 { v12.s }[2], [x13]\n"
+ "tbz x13, #1, 33f\n"
+ "str d12, [x12], #0x8\n"
+ "tbz x13, #0, 42f\n"
+ "st1 { v12.s }[2], [x12]\n"
"b 42f\n"
"33:" // Height 1: Partial direct writeback: partial_1_16
- "tbz x14, #0, 42f\n"
- "str s12, [x13, #0x0]\n"
+ "tbz x13, #0, 42f\n"
+ "str s12, [x12, #0x0]\n"
"b 42f\n"
"34:" // Height 1: Partial direct writeback: partial_8_0
- "tbz x14, #3, 38f\n"
- "st1 { v8.4s }, [x13], #0x10\n"
- "st1 { v9.4s }, [x13], #0x10\n"
- "tbz x14, #2, 36f\n"
- "st1 { v10.4s }, [x13], #0x10\n"
- "tbz x14, #1, 35f\n"
- "str d11, [x13], #0x8\n"
- "tbz x14, #0, 42f\n"
- "st1 { v11.s }[2], [x13]\n"
+ "tbz x13, #3, 38f\n"
+ "st1 { v8.4s }, [x12], #0x10\n"
+ "st1 { v9.4s }, [x12], #0x10\n"
+ "tbz x13, #2, 36f\n"
+ "st1 { v10.4s }, [x12], #0x10\n"
+ "tbz x13, #1, 35f\n"
+ "str d11, [x12], #0x8\n"
+ "tbz x13, #0, 42f\n"
+ "st1 { v11.s }[2], [x12]\n"
"b 42f\n"
"35:" // Height 1: Partial direct writeback: partial_1_12
- "tbz x14, #0, 42f\n"
- "str s11, [x13, #0x0]\n"
+ "tbz x13, #0, 42f\n"
+ "str s11, [x12, #0x0]\n"
"b 42f\n"
"36:" // Height 1: Partial direct writeback: partial_2_8
- "tbz x14, #1, 37f\n"
- "str d10, [x13], #0x8\n"
- "tbz x14, #0, 42f\n"
- "st1 { v10.s }[2], [x13]\n"
+ "tbz x13, #1, 37f\n"
+ "str d10, [x12], #0x8\n"
+ "tbz x13, #0, 42f\n"
+ "st1 { v10.s }[2], [x12]\n"
"b 42f\n"
"37:" // Height 1: Partial direct writeback: partial_1_8
- "tbz x14, #0, 42f\n"
- "str s10, [x13, #0x0]\n"
+ "tbz x13, #0, 42f\n"
+ "str s10, [x12, #0x0]\n"
"b 42f\n"
"38:" // Height 1: Partial direct writeback: partial_4_0
- "tbz x14, #2, 40f\n"
- "st1 { v8.4s }, [x13], #0x10\n"
- "tbz x14, #1, 39f\n"
- "str d9, [x13], #0x8\n"
- "tbz x14, #0, 42f\n"
- "st1 { v9.s }[2], [x13]\n"
+ "tbz x13, #2, 40f\n"
+ "st1 { v8.4s }, [x12], #0x10\n"
+ "tbz x13, #1, 39f\n"
+ "str d9, [x12], #0x8\n"
+ "tbz x13, #0, 42f\n"
+ "st1 { v9.s }[2], [x12]\n"
"b 42f\n"
"39:" // Height 1: Partial direct writeback: partial_1_4
- "tbz x14, #0, 42f\n"
- "str s9, [x13, #0x0]\n"
+ "tbz x13, #0, 42f\n"
+ "str s9, [x12, #0x0]\n"
"b 42f\n"
"40:" // Height 1: Partial direct writeback: partial_2_0
- "tbz x14, #1, 41f\n"
- "str d8, [x13], #0x8\n"
- "tbz x14, #0, 42f\n"
- "st1 { v8.s }[2], [x13]\n"
+ "tbz x13, #1, 41f\n"
+ "str d8, [x12], #0x8\n"
+ "tbz x13, #0, 42f\n"
+ "st1 { v8.s }[2], [x12]\n"
"b 42f\n"
"41:" // Height 1: Partial direct writeback: partial_1_0
- "str s8, [x13, #0x0]\n"
+ "str s8, [x12, #0x0]\n"
"42:" // Height 1: Partial direct writeback: Done
"b 44f\n"
"43:" // Height 1: Full writeback
- "str q8, [x13, #0x0]\n"
- "str q9, [x13, #0x10]\n"
- "str q10, [x13, #0x20]\n"
- "str q11, [x13, #0x30]\n"
- "str q12, [x13, #0x40]\n"
- "str q13, [x13, #0x50]\n"
- "add x13, x13, #0x60\n"
+ "str q8, [x12, #0x0]\n"
+ "str q9, [x12, #0x10]\n"
+ "str q10, [x12, #0x20]\n"
+ "str q11, [x12, #0x30]\n"
+ "str q12, [x12, #0x40]\n"
+ "str q13, [x12, #0x50]\n"
+ "add x12, x12, #0x60\n"
"44:" // Height 1: Writeback done
- "subs x14, x14, #0x18\n"
+ "subs x13, x13, #0x18\n"
"bgt 2b\n"
"b 178f\n"
"45:" // Height 2
- "ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x15, %x[bias]\n"
- "ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x13, %x[output_ptr]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x14, %x[bias]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x12, %x[output_ptr]\n"
"46:" // Height 2: Column loop
- "ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
- "add x11, x12, x20, LSL #1\n"
- "add x10, x11, x20, LSL #1\n"
- "add x9, x10, x20, LSL #1\n"
- "add x28, x9, x20, LSL #1\n"
- "add x27, x28, x20, LSL #1\n"
- "add x20, x27, x20, LSL #1\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "cmp x14, #0x14\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #1\n"
+ "add x9, x10, x19, LSL #1\n"
+ "add x28, x9, x19, LSL #1\n"
+ "add x27, x28, x19, LSL #1\n"
+ "add x26, x27, x19, LSL #1\n"
+ "add x19, x26, x19, LSL #1\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "cmp x13, #0x14\n"
"bgt 47f\n"
- "cmp x14, #0x10\n"
- "mov x27, x12\n"
+ "cmp x13, #0x10\n"
+ "mov x26, x11\n"
"bgt 47f\n"
- "cmp x14, #0xc\n"
- "mov x28, x12\n"
+ "cmp x13, #0xc\n"
+ "mov x27, x11\n"
"bgt 47f\n"
- "cmp x14, #0x8\n"
- "mov x9, x12\n"
+ "cmp x13, #0x8\n"
+ "mov x28, x11\n"
"bgt 47f\n"
- "cmp x14, #0x4\n"
- "mov x10, x12\n"
+ "cmp x13, #0x4\n"
+ "mov x9, x11\n"
"bgt 47f\n"
- "mov x11, x12\n"
+ "mov x10, x11\n"
"47:" // Height 2: B setup done
- "cbz x15, 48f\n"
- "ldr q8, [x15, #0x0]\n"
- "ldr q9, [x15, #0x10]\n"
+ "cbz x14, 48f\n"
+ "ldr q8, [x14, #0x0]\n"
+ "ldr q9, [x14, #0x10]\n"
"zip2 v14.2d, v8.2d, v8.2d\n"
"zip1 v8.2d, v8.2d, v8.2d\n"
- "ldr q10, [x15, #0x20]\n"
- "ldr q11, [x15, #0x30]\n"
+ "ldr q10, [x14, #0x20]\n"
+ "ldr q11, [x14, #0x30]\n"
"zip2 v15.2d, v9.2d, v9.2d\n"
"zip1 v9.2d, v9.2d, v9.2d\n"
- "ldr q12, [x15, #0x40]\n"
- "ldr q13, [x15, #0x50]\n"
+ "ldr q12, [x14, #0x40]\n"
+ "ldr q13, [x14, #0x50]\n"
"zip2 v16.2d, v10.2d, v10.2d\n"
"zip1 v10.2d, v10.2d, v10.2d\n"
"zip2 v17.2d, v11.2d, v11.2d\n"
"zip1 v11.2d, v11.2d, v11.2d\n"
- "add x15, x15, #0x60\n"
+ "add x14, x14, #0x60\n"
"zip2 v18.2d, v12.2d, v12.2d\n"
"zip1 v12.2d, v12.2d, v12.2d\n"
"zip2 v19.2d, v13.2d, v13.2d\n"
@@ -582,136 +582,136 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 (
"b 64f\n"
"48:" // Height 2: no bias
"tbz %x[flags], #0, 63f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "cmp x14, #0x18\n"
- "add x23, x13, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "cmp x13, #0x18\n"
+ "add x22, x12, x19, LSL #2\n"
"bge 61f\n"
- "tbz x14, #4, 52f\n"
- "ld1 { v9.4s }, [x13], #0x10\n"
- "ld1 { v14.4s }, [x23], #0x10\n"
- "ld1 { v10.4s }, [x13], #0x10\n"
- "ld1 { v15.4s }, [x23], #0x10\n"
- "ld1 { v11.4s }, [x13], #0x10\n"
- "ld1 { v16.4s }, [x23], #0x10\n"
- "ld1 { v12.4s }, [x13], #0x10\n"
- "ld1 { v17.4s }, [x23], #0x10\n"
- "tbz x14, #2, 50f\n"
- "ld1 { v13.4s }, [x13], #0x10\n"
- "ld1 { v18.4s }, [x23], #0x10\n"
- "tbz x14, #1, 49f\n"
- "ldr d20, [x13], #0x8\n"
- "ldr d19, [x23], #0x8\n"
- "mov x20, #0x58\n"
- "tbz x14, #0, 60f\n"
- "ld1 { v20.s }[2], [x13]\n"
- "ld1 { v19.s }[2], [x23]\n"
+ "tbz x13, #4, 52f\n"
+ "ld1 { v9.4s }, [x12], #0x10\n"
+ "ld1 { v14.4s }, [x22], #0x10\n"
+ "ld1 { v10.4s }, [x12], #0x10\n"
+ "ld1 { v15.4s }, [x22], #0x10\n"
+ "ld1 { v11.4s }, [x12], #0x10\n"
+ "ld1 { v16.4s }, [x22], #0x10\n"
+ "ld1 { v12.4s }, [x12], #0x10\n"
+ "ld1 { v17.4s }, [x22], #0x10\n"
+ "tbz x13, #2, 50f\n"
+ "ld1 { v13.4s }, [x12], #0x10\n"
+ "ld1 { v18.4s }, [x22], #0x10\n"
+ "tbz x13, #1, 49f\n"
+ "ldr d20, [x12], #0x8\n"
+ "ldr d19, [x22], #0x8\n"
+ "mov x19, #0x58\n"
+ "tbz x13, #0, 60f\n"
+ "ld1 { v20.s }[2], [x12]\n"
+ "ld1 { v19.s }[2], [x22]\n"
"b 60f\n"
"49:" // Height 2: Partial accumulate: partial_1_20
- "mov x20, #0x50\n"
- "tbz x14, #0, 60f\n"
- "ldr s20, [x13, #0x0]\n"
- "ldr s19, [x23, #0x0]\n"
+ "mov x19, #0x50\n"
+ "tbz x13, #0, 60f\n"
+ "ldr s20, [x12, #0x0]\n"
+ "ldr s19, [x22, #0x0]\n"
"b 60f\n"
"50:" // Height 2: Partial accumulate: partial_2_16
- "tbz x14, #1, 51f\n"
- "ldr d13, [x13], #0x8\n"
- "ldr d18, [x23], #0x8\n"
- "mov x20, #0x48\n"
- "tbz x14, #0, 60f\n"
- "ld1 { v13.s }[2], [x13]\n"
- "ld1 { v18.s }[2], [x23]\n"
+ "tbz x13, #1, 51f\n"
+ "ldr d13, [x12], #0x8\n"
+ "ldr d18, [x22], #0x8\n"
+ "mov x19, #0x48\n"
+ "tbz x13, #0, 60f\n"
+ "ld1 { v13.s }[2], [x12]\n"
+ "ld1 { v18.s }[2], [x22]\n"
"b 60f\n"
"51:" // Height 2: Partial accumulate: partial_1_16
- "mov x20, #0x40\n"
- "tbz x14, #0, 60f\n"
- "ldr s13, [x13, #0x0]\n"
- "ldr s18, [x23, #0x0]\n"
+ "mov x19, #0x40\n"
+ "tbz x13, #0, 60f\n"
+ "ldr s13, [x12, #0x0]\n"
+ "ldr s18, [x22, #0x0]\n"
"b 60f\n"
"52:" // Height 2: Partial accumulate: partial_8_0
- "tbz x14, #3, 56f\n"
- "ld1 { v9.4s }, [x13], #0x10\n"
- "ld1 { v14.4s }, [x23], #0x10\n"
- "ld1 { v10.4s }, [x13], #0x10\n"
- "ld1 { v15.4s }, [x23], #0x10\n"
- "tbz x14, #2, 54f\n"
- "ld1 { v11.4s }, [x13], #0x10\n"
- "ld1 { v16.4s }, [x23], #0x10\n"
- "tbz x14, #1, 53f\n"
- "ldr d12, [x13], #0x8\n"
- "ldr d17, [x23], #0x8\n"
- "mov x20, #0x38\n"
- "tbz x14, #0, 60f\n"
- "ld1 { v12.s }[2], [x13]\n"
- "ld1 { v17.s }[2], [x23]\n"
+ "tbz x13, #3, 56f\n"
+ "ld1 { v9.4s }, [x12], #0x10\n"
+ "ld1 { v14.4s }, [x22], #0x10\n"
+ "ld1 { v10.4s }, [x12], #0x10\n"
+ "ld1 { v15.4s }, [x22], #0x10\n"
+ "tbz x13, #2, 54f\n"
+ "ld1 { v11.4s }, [x12], #0x10\n"
+ "ld1 { v16.4s }, [x22], #0x10\n"
+ "tbz x13, #1, 53f\n"
+ "ldr d12, [x12], #0x8\n"
+ "ldr d17, [x22], #0x8\n"
+ "mov x19, #0x38\n"
+ "tbz x13, #0, 60f\n"
+ "ld1 { v12.s }[2], [x12]\n"
+ "ld1 { v17.s }[2], [x22]\n"
"b 60f\n"
"53:" // Height 2: Partial accumulate: partial_1_12
- "mov x20, #0x30\n"
- "tbz x14, #0, 60f\n"
- "ldr s12, [x13, #0x0]\n"
- "ldr s17, [x23, #0x0]\n"
+ "mov x19, #0x30\n"
+ "tbz x13, #0, 60f\n"
+ "ldr s12, [x12, #0x0]\n"
+ "ldr s17, [x22, #0x0]\n"
"b 60f\n"
"54:" // Height 2: Partial accumulate: partial_2_8
- "tbz x14, #1, 55f\n"
- "ldr d11, [x13], #0x8\n"
- "ldr d16, [x23], #0x8\n"
- "mov x20, #0x28\n"
- "tbz x14, #0, 60f\n"
- "ld1 { v11.s }[2], [x13]\n"
- "ld1 { v16.s }[2], [x23]\n"
+ "tbz x13, #1, 55f\n"
+ "ldr d11, [x12], #0x8\n"
+ "ldr d16, [x22], #0x8\n"
+ "mov x19, #0x28\n"
+ "tbz x13, #0, 60f\n"
+ "ld1 { v11.s }[2], [x12]\n"
+ "ld1 { v16.s }[2], [x22]\n"
"b 60f\n"
"55:" // Height 2: Partial accumulate: partial_1_8
- "mov x20, #0x20\n"
- "tbz x14, #0, 60f\n"
- "ldr s11, [x13, #0x0]\n"
- "ldr s16, [x23, #0x0]\n"
+ "mov x19, #0x20\n"
+ "tbz x13, #0, 60f\n"
+ "ldr s11, [x12, #0x0]\n"
+ "ldr s16, [x22, #0x0]\n"
"b 60f\n"
"56:" // Height 2: Partial accumulate: partial_4_0
- "tbz x14, #2, 58f\n"
- "ld1 { v9.4s }, [x13], #0x10\n"
- "ld1 { v14.4s }, [x23], #0x10\n"
- "tbz x14, #1, 57f\n"
- "ldr d10, [x13], #0x8\n"
- "ldr d15, [x23], #0x8\n"
- "mov x20, #0x18\n"
- "tbz x14, #0, 60f\n"
- "ld1 { v10.s }[2], [x13]\n"
- "ld1 { v15.s }[2], [x23]\n"
+ "tbz x13, #2, 58f\n"
+ "ld1 { v9.4s }, [x12], #0x10\n"
+ "ld1 { v14.4s }, [x22], #0x10\n"
+ "tbz x13, #1, 57f\n"
+ "ldr d10, [x12], #0x8\n"
+ "ldr d15, [x22], #0x8\n"
+ "mov x19, #0x18\n"
+ "tbz x13, #0, 60f\n"
+ "ld1 { v10.s }[2], [x12]\n"
+ "ld1 { v15.s }[2], [x22]\n"
"b 60f\n"
"57:" // Height 2: Partial accumulate: partial_1_4
- "mov x20, #0x10\n"
- "tbz x14, #0, 60f\n"
- "ldr s10, [x13, #0x0]\n"
- "ldr s15, [x23, #0x0]\n"
+ "mov x19, #0x10\n"
+ "tbz x13, #0, 60f\n"
+ "ldr s10, [x12, #0x0]\n"
+ "ldr s15, [x22, #0x0]\n"
"b 60f\n"
"58:" // Height 2: Partial accumulate: partial_2_0
- "tbz x14, #1, 59f\n"
- "ldr d9, [x13], #0x8\n"
- "ldr d14, [x23], #0x8\n"
- "mov x20, #0x8\n"
- "tbz x14, #0, 60f\n"
- "ld1 { v9.s }[2], [x13]\n"
- "ld1 { v14.s }[2], [x23]\n"
+ "tbz x13, #1, 59f\n"
+ "ldr d9, [x12], #0x8\n"
+ "ldr d14, [x22], #0x8\n"
+ "mov x19, #0x8\n"
+ "tbz x13, #0, 60f\n"
+ "ld1 { v9.s }[2], [x12]\n"
+ "ld1 { v14.s }[2], [x22]\n"
"b 60f\n"
"59:" // Height 2: Partial accumulate: partial_1_0
- "ldr s9, [x13, #0x0]\n"
- "ldr s14, [x23, #0x0]\n"
- "mov x20, #0x0\n"
+ "ldr s9, [x12, #0x0]\n"
+ "ldr s14, [x22, #0x0]\n"
+ "mov x19, #0x0\n"
"60:" // Height 2: Partial accumulate: Done
- "sub x13, x13, x20\n"
+ "sub x12, x12, x19\n"
"b 62f\n"
"61:" // Height 2: full accumulate
- "ldr q9, [x13, #0x0]\n"
- "ldr q10, [x13, #0x10]\n"
- "ldr q11, [x13, #0x20]\n"
- "ldr q12, [x13, #0x30]\n"
- "ldr q13, [x13, #0x40]\n"
- "ldr q20, [x13, #0x50]\n"
- "ldr q14, [x23, #0x0]\n"
- "ldr q15, [x23, #0x10]\n"
- "ldr q16, [x23, #0x20]\n"
- "ldr q17, [x23, #0x30]\n"
- "ldr q18, [x23, #0x40]\n"
- "ldr q19, [x23, #0x50]\n"
+ "ldr q9, [x12, #0x0]\n"
+ "ldr q10, [x12, #0x10]\n"
+ "ldr q11, [x12, #0x20]\n"
+ "ldr q12, [x12, #0x30]\n"
+ "ldr q13, [x12, #0x40]\n"
+ "ldr q20, [x12, #0x50]\n"
+ "ldr q14, [x22, #0x0]\n"
+ "ldr q15, [x22, #0x10]\n"
+ "ldr q16, [x22, #0x20]\n"
+ "ldr q17, [x22, #0x30]\n"
+ "ldr q18, [x22, #0x40]\n"
+ "ldr q19, [x22, #0x50]\n"
"62:" // Height 2: MMLA fixup
"zip1 v8.2d, v9.2d, v14.2d\n"
"zip2 v14.2d, v9.2d, v14.2d\n"
@@ -740,158 +740,158 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 (
"movi v18.16b, #0x0\n"
"movi v19.16b, #0x0\n"
"64:" // Height 2: setup done
- "mov x26, #0x0\n"
+ "mov x25, #0x0\n"
"65:" // Height 2: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w24, [x19, x25, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 66f\n"
- "ldr x21, [%x[input_ptr], x26, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x24, [x21, #0x0]\n"
- "ldr x23, [x21, #0x8]\n"
- "cbnz x26, 67f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x24, x24, x20, LSL #2\n"
- "add x23, x23, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x25, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x23, [x20, #0x0]\n"
+ "ldr x22, [x20, #0x8]\n"
+ "cbnz x25, 67f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x23, x23, x19, LSL #2\n"
+ "add x22, x22, x19, LSL #2\n"
"b 67f\n"
"66:" // Height 2: setup direct input
- "mov x24, %x[input_ptr]\n"
- "add x23, x24, x20, LSL #2\n"
+ "mov x23, %x[input_ptr]\n"
+ "add x22, x23, x19, LSL #2\n"
"67:" // Height 2: input setup done
- "cmp x25, #0x4\n"
+ "cmp x24, #0x4\n"
"blt 70f\n"
- "ld1 { v0.4s }, [x24], #0x10\n"
- "ld1 { v1.4s }, [x23], #0x10\n"
- "cmp x25, #0x8\n"
- "ldr q4, [x12, #0x0]\n"
- "ldr q5, [x12, #0x10]\n"
- "ldr q6, [x11, #0x0]\n"
- "ldr q7, [x11, #0x10]\n"
+ "ld1 { v0.4s }, [x23], #0x10\n"
+ "ld1 { v1.4s }, [x22], #0x10\n"
+ "cmp x24, #0x8\n"
+ "ldr q4, [x11, #0x0]\n"
+ "ldr q5, [x11, #0x10]\n"
+ "ldr q6, [x10, #0x0]\n"
+ "ldr q7, [x10, #0x10]\n"
"blt 69f\n"
"68:" // Height 2: Multiply loop: Main loop head
".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n"
".inst 0x4ea16820 // bfcvtn2 v0.8h, v1.4s\n"
- "ld1 { v1.4s }, [x23], #0x10\n"
+ "ld1 { v1.4s }, [x22], #0x10\n"
".inst 0x6e44ec08 // bfmmla v8.4s, v0.8h, v4.8h\n"
- "ldr q4, [x10, #0x0]\n"
+ "ldr q4, [x9, #0x0]\n"
".inst 0x6e45ec0e // bfmmla v14.4s, v0.8h, v5.8h\n"
- "ldr q5, [x10, #0x10]\n"
+ "ldr q5, [x9, #0x10]\n"
".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n"
- "ldr q6, [x9, #0x0]\n"
+ "ldr q6, [x28, #0x0]\n"
".inst 0x6e47ec0f // bfmmla v15.4s, v0.8h, v7.8h\n"
- "ldr q7, [x9, #0x10]\n"
+ "ldr q7, [x28, #0x10]\n"
".inst 0x6e44ec0a // bfmmla v10.4s, v0.8h, v4.8h\n"
- "ldr q4, [x28, #0x0]\n"
+ "ldr q4, [x27, #0x0]\n"
".inst 0x6e45ec10 // bfmmla v16.4s, v0.8h, v5.8h\n"
- "ldr q5, [x28, #0x10]\n"
+ "ldr q5, [x27, #0x10]\n"
".inst 0x6e46ec0b // bfmmla v11.4s, v0.8h, v6.8h\n"
- "ldr q6, [x27, #0x0]\n"
+ "ldr q6, [x26, #0x0]\n"
".inst 0x6e47ec11 // bfmmla v17.4s, v0.8h, v7.8h\n"
- "ldr q7, [x27, #0x10]\n"
- "sub x25, x25, #0x4\n"
- "cmp x25, #0x8\n"
- "add x12, x12, #0x20\n"
- ".inst 0x6e44ec0c // bfmmla v12.4s, v0.8h, v4.8h\n"
- "ldr q4, [x12, #0x0]\n"
+ "ldr q7, [x26, #0x10]\n"
+ "sub x24, x24, #0x4\n"
+ "cmp x24, #0x8\n"
"add x11, x11, #0x20\n"
+ ".inst 0x6e44ec0c // bfmmla v12.4s, v0.8h, v4.8h\n"
+ "ldr q4, [x11, #0x0]\n"
+ "add x10, x10, #0x20\n"
".inst 0x6e45ec12 // bfmmla v18.4s, v0.8h, v5.8h\n"
- "ldr q5, [x12, #0x10]\n"
+ "ldr q5, [x11, #0x10]\n"
".inst 0x6e46ec0d // bfmmla v13.4s, v0.8h, v6.8h\n"
- "ldr q6, [x11, #0x0]\n"
+ "ldr q6, [x10, #0x0]\n"
".inst 0x6e47ec13 // bfmmla v19.4s, v0.8h, v7.8h\n"
- "ld1 { v0.4s }, [x24], #0x10\n"
- "add x10, x10, #0x20\n"
- "ldr q7, [x11, #0x10]\n"
+ "ld1 { v0.4s }, [x23], #0x10\n"
"add x9, x9, #0x20\n"
+ "ldr q7, [x10, #0x10]\n"
"add x28, x28, #0x20\n"
"add x27, x27, #0x20\n"
+ "add x26, x26, #0x20\n"
"bge 68b\n"
"69:" // Height 2: Multiply loop: Single iteration only
".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n"
".inst 0x4ea16820 // bfcvtn2 v0.8h, v1.4s\n"
".inst 0x6e44ec08 // bfmmla v8.4s, v0.8h, v4.8h\n"
- "ldr q4, [x10, #0x0]\n"
+ "ldr q4, [x9, #0x0]\n"
".inst 0x6e45ec0e // bfmmla v14.4s, v0.8h, v5.8h\n"
- "ldr q5, [x10, #0x10]\n"
+ "ldr q5, [x9, #0x10]\n"
".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n"
- "ldr q6, [x9, #0x0]\n"
+ "ldr q6, [x28, #0x0]\n"
".inst 0x6e47ec0f // bfmmla v15.4s, v0.8h, v7.8h\n"
- "ldr q7, [x9, #0x10]\n"
+ "ldr q7, [x28, #0x10]\n"
".inst 0x6e44ec0a // bfmmla v10.4s, v0.8h, v4.8h\n"
- "ldr q4, [x28, #0x0]\n"
+ "ldr q4, [x27, #0x0]\n"
".inst 0x6e45ec10 // bfmmla v16.4s, v0.8h, v5.8h\n"
- "ldr q5, [x28, #0x10]\n"
+ "ldr q5, [x27, #0x10]\n"
".inst 0x6e46ec0b // bfmmla v11.4s, v0.8h, v6.8h\n"
- "ldr q6, [x27, #0x0]\n"
+ "ldr q6, [x26, #0x0]\n"
".inst 0x6e47ec11 // bfmmla v17.4s, v0.8h, v7.8h\n"
- "ldr q7, [x27, #0x10]\n"
- "sub x25, x25, #0x4\n"
+ "ldr q7, [x26, #0x10]\n"
+ "sub x24, x24, #0x4\n"
".inst 0x6e44ec0c // bfmmla v12.4s, v0.8h, v4.8h\n"
".inst 0x6e45ec12 // bfmmla v18.4s, v0.8h, v5.8h\n"
- "add x12, x12, #0x20\n"
"add x11, x11, #0x20\n"
+ "add x10, x10, #0x20\n"
".inst 0x6e46ec0d // bfmmla v13.4s, v0.8h, v6.8h\n"
".inst 0x6e47ec13 // bfmmla v19.4s, v0.8h, v7.8h\n"
- "add x10, x10, #0x20\n"
"add x9, x9, #0x20\n"
"add x28, x28, #0x20\n"
"add x27, x27, #0x20\n"
+ "add x26, x26, #0x20\n"
"70:" // Height 2: Multiply loop: Main loop skip
- "cbz x25, 73f\n"
- "cbz x25, 73f\n"
- "tbz x25, #1, 71f\n"
- "ldr d0, [x24], #0x8\n"
- "ldr d1, [x23], #0x8\n"
- "tbz x25, #0, 72f\n"
- "ld1 { v0.s }[2], [x24]\n"
- "ld1 { v1.s }[2], [x23]\n"
+ "cbz x24, 73f\n"
+ "cbz x24, 73f\n"
+ "tbz x24, #1, 71f\n"
+ "ldr d0, [x23], #0x8\n"
+ "ldr d1, [x22], #0x8\n"
+ "tbz x24, #0, 72f\n"
+ "ld1 { v0.s }[2], [x23]\n"
+ "ld1 { v1.s }[2], [x22]\n"
"b 72f\n"
"71:" // Height 2: Multiply loop: Ragged operand read: partial_1_0
- "ldr s0, [x24, #0x0]\n"
- "ldr s1, [x23, #0x0]\n"
+ "ldr s0, [x23, #0x0]\n"
+ "ldr s1, [x22, #0x0]\n"
"72:" // Height 2: Multiply loop: Ragged operand read: Done
- "ldr q4, [x12, #0x0]\n"
- "ldr q5, [x12, #0x10]\n"
+ "ldr q4, [x11, #0x0]\n"
+ "ldr q5, [x11, #0x10]\n"
".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n"
".inst 0x4ea16820 // bfcvtn2 v0.8h, v1.4s\n"
- "ldr q6, [x11, #0x0]\n"
- "ldr q7, [x11, #0x10]\n"
+ "ldr q6, [x10, #0x0]\n"
+ "ldr q7, [x10, #0x10]\n"
".inst 0x6e44ec08 // bfmmla v8.4s, v0.8h, v4.8h\n"
".inst 0x6e45ec0e // bfmmla v14.4s, v0.8h, v5.8h\n"
- "ldr q4, [x10, #0x0]\n"
- "ldr q5, [x10, #0x10]\n"
+ "ldr q4, [x9, #0x0]\n"
+ "ldr q5, [x9, #0x10]\n"
".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n"
".inst 0x6e47ec0f // bfmmla v15.4s, v0.8h, v7.8h\n"
- "ldr q6, [x9, #0x0]\n"
- "ldr q7, [x9, #0x10]\n"
+ "ldr q6, [x28, #0x0]\n"
+ "ldr q7, [x28, #0x10]\n"
".inst 0x6e44ec0a // bfmmla v10.4s, v0.8h, v4.8h\n"
".inst 0x6e45ec10 // bfmmla v16.4s, v0.8h, v5.8h\n"
- "ldr q4, [x28, #0x0]\n"
- "ldr q5, [x28, #0x10]\n"
+ "ldr q4, [x27, #0x0]\n"
+ "ldr q5, [x27, #0x10]\n"
".inst 0x6e46ec0b // bfmmla v11.4s, v0.8h, v6.8h\n"
".inst 0x6e47ec11 // bfmmla v17.4s, v0.8h, v7.8h\n"
- "ldr q6, [x27, #0x0]\n"
- "ldr q7, [x27, #0x10]\n"
+ "ldr q6, [x26, #0x0]\n"
+ "ldr q7, [x26, #0x10]\n"
".inst 0x6e44ec0c // bfmmla v12.4s, v0.8h, v4.8h\n"
".inst 0x6e45ec12 // bfmmla v18.4s, v0.8h, v5.8h\n"
".inst 0x6e46ec0d // bfmmla v13.4s, v0.8h, v6.8h\n"
".inst 0x6e47ec13 // bfmmla v19.4s, v0.8h, v7.8h\n"
- "add x12, x12, #0x20\n"
"add x11, x11, #0x20\n"
"add x10, x10, #0x20\n"
"add x9, x9, #0x20\n"
"add x28, x28, #0x20\n"
"add x27, x27, #0x20\n"
+ "add x26, x26, #0x20\n"
"73:" // Height 2: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x26, x26, #0x1\n"
- "cmp x26, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x25, x25, #0x1\n"
+ "cmp x25, x19\n"
"bne 65b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp1 v4.2d, v8.2d, v14.2d\n"
"uzp2 v8.2d, v8.2d, v14.2d\n"
- "add x23, x13, x20, LSL #2\n"
+ "add x22, x12, x19, LSL #2\n"
"uzp1 v14.2d, v9.2d, v15.2d\n"
"uzp2 v9.2d, v9.2d, v15.2d\n"
"uzp1 v15.2d, v10.2d, v16.2d\n"
@@ -903,10 +903,10 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 (
"uzp1 v18.2d, v13.2d, v19.2d\n"
"uzp2 v13.2d, v13.2d, v19.2d\n"
"tbz %x[flags], #1, 74f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v1.4s }, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1r { v0.4s }, [x20]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v1.4s }, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v0.4s }, [x19]\n"
"fmin v4.4s, v4.4s, v1.4s\n"
"fmin v14.4s, v14.4s, v1.4s\n"
"fmin v15.4s, v15.4s, v1.4s\n"
@@ -932,174 +932,174 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 (
"fmax v12.4s, v12.4s, v0.4s\n"
"fmax v13.4s, v13.4s, v0.4s\n"
"74:" // Height 2: No activation
- "cmp x14, #0x18\n"
+ "cmp x13, #0x18\n"
"bge 87f\n"
- "tbz x14, #4, 78f\n"
- "st1 { v4.4s }, [x13], #0x10\n"
- "st1 { v14.4s }, [x13], #0x10\n"
- "st1 { v15.4s }, [x13], #0x10\n"
- "st1 { v16.4s }, [x13], #0x10\n"
- "st1 { v8.4s }, [x23], #0x10\n"
- "st1 { v9.4s }, [x23], #0x10\n"
- "st1 { v10.4s }, [x23], #0x10\n"
- "st1 { v11.4s }, [x23], #0x10\n"
- "tbz x14, #2, 76f\n"
- "st1 { v17.4s }, [x13], #0x10\n"
- "st1 { v12.4s }, [x23], #0x10\n"
- "tbz x14, #1, 75f\n"
- "str d18, [x13], #0x8\n"
- "str d13, [x23], #0x8\n"
- "tbz x14, #0, 86f\n"
- "st1 { v18.s }[2], [x13]\n"
- "st1 { v13.s }[2], [x23]\n"
+ "tbz x13, #4, 78f\n"
+ "st1 { v4.4s }, [x12], #0x10\n"
+ "st1 { v14.4s }, [x12], #0x10\n"
+ "st1 { v15.4s }, [x12], #0x10\n"
+ "st1 { v16.4s }, [x12], #0x10\n"
+ "st1 { v8.4s }, [x22], #0x10\n"
+ "st1 { v9.4s }, [x22], #0x10\n"
+ "st1 { v10.4s }, [x22], #0x10\n"
+ "st1 { v11.4s }, [x22], #0x10\n"
+ "tbz x13, #2, 76f\n"
+ "st1 { v17.4s }, [x12], #0x10\n"
+ "st1 { v12.4s }, [x22], #0x10\n"
+ "tbz x13, #1, 75f\n"
+ "str d18, [x12], #0x8\n"
+ "str d13, [x22], #0x8\n"
+ "tbz x13, #0, 86f\n"
+ "st1 { v18.s }[2], [x12]\n"
+ "st1 { v13.s }[2], [x22]\n"
"b 86f\n"
"75:" // Height 2: Partial direct writeback: partial_1_20
- "tbz x14, #0, 86f\n"
- "str s18, [x13, #0x0]\n"
- "str s13, [x23, #0x0]\n"
+ "tbz x13, #0, 86f\n"
+ "str s18, [x12, #0x0]\n"
+ "str s13, [x22, #0x0]\n"
"b 86f\n"
"76:" // Height 2: Partial direct writeback: partial_2_16
- "tbz x14, #1, 77f\n"
- "str d17, [x13], #0x8\n"
- "str d12, [x23], #0x8\n"
- "tbz x14, #0, 86f\n"
- "st1 { v17.s }[2], [x13]\n"
- "st1 { v12.s }[2], [x23]\n"
+ "tbz x13, #1, 77f\n"
+ "str d17, [x12], #0x8\n"
+ "str d12, [x22], #0x8\n"
+ "tbz x13, #0, 86f\n"
+ "st1 { v17.s }[2], [x12]\n"
+ "st1 { v12.s }[2], [x22]\n"
"b 86f\n"
"77:" // Height 2: Partial direct writeback: partial_1_16
- "tbz x14, #0, 86f\n"
- "str s17, [x13, #0x0]\n"
- "str s12, [x23, #0x0]\n"
+ "tbz x13, #0, 86f\n"
+ "str s17, [x12, #0x0]\n"
+ "str s12, [x22, #0x0]\n"
"b 86f\n"
"78:" // Height 2: Partial direct writeback: partial_8_0
- "tbz x14, #3, 82f\n"
- "st1 { v4.4s }, [x13], #0x10\n"
- "st1 { v14.4s }, [x13], #0x10\n"
- "st1 { v8.4s }, [x23], #0x10\n"
- "st1 { v9.4s }, [x23], #0x10\n"
- "tbz x14, #2, 80f\n"
- "st1 { v15.4s }, [x13], #0x10\n"
- "st1 { v10.4s }, [x23], #0x10\n"
- "tbz x14, #1, 79f\n"
- "str d16, [x13], #0x8\n"
- "str d11, [x23], #0x8\n"
- "tbz x14, #0, 86f\n"
- "st1 { v16.s }[2], [x13]\n"
- "st1 { v11.s }[2], [x23]\n"
+ "tbz x13, #3, 82f\n"
+ "st1 { v4.4s }, [x12], #0x10\n"
+ "st1 { v14.4s }, [x12], #0x10\n"
+ "st1 { v8.4s }, [x22], #0x10\n"
+ "st1 { v9.4s }, [x22], #0x10\n"
+ "tbz x13, #2, 80f\n"
+ "st1 { v15.4s }, [x12], #0x10\n"
+ "st1 { v10.4s }, [x22], #0x10\n"
+ "tbz x13, #1, 79f\n"
+ "str d16, [x12], #0x8\n"
+ "str d11, [x22], #0x8\n"
+ "tbz x13, #0, 86f\n"
+ "st1 { v16.s }[2], [x12]\n"
+ "st1 { v11.s }[2], [x22]\n"
"b 86f\n"
"79:" // Height 2: Partial direct writeback: partial_1_12
- "tbz x14, #0, 86f\n"
- "str s16, [x13, #0x0]\n"
- "str s11, [x23, #0x0]\n"
+ "tbz x13, #0, 86f\n"
+ "str s16, [x12, #0x0]\n"
+ "str s11, [x22, #0x0]\n"
"b 86f\n"
"80:" // Height 2: Partial direct writeback: partial_2_8
- "tbz x14, #1, 81f\n"
- "str d15, [x13], #0x8\n"
- "str d10, [x23], #0x8\n"
- "tbz x14, #0, 86f\n"
- "st1 { v15.s }[2], [x13]\n"
- "st1 { v10.s }[2], [x23]\n"
+ "tbz x13, #1, 81f\n"
+ "str d15, [x12], #0x8\n"
+ "str d10, [x22], #0x8\n"
+ "tbz x13, #0, 86f\n"
+ "st1 { v15.s }[2], [x12]\n"
+ "st1 { v10.s }[2], [x22]\n"
"b 86f\n"
"81:" // Height 2: Partial direct writeback: partial_1_8
- "tbz x14, #0, 86f\n"
- "str s15, [x13, #0x0]\n"
- "str s10, [x23, #0x0]\n"
+ "tbz x13, #0, 86f\n"
+ "str s15, [x12, #0x0]\n"
+ "str s10, [x22, #0x0]\n"
"b 86f\n"
"82:" // Height 2: Partial direct writeback: partial_4_0
- "tbz x14, #2, 84f\n"
- "st1 { v4.4s }, [x13], #0x10\n"
- "st1 { v8.4s }, [x23], #0x10\n"
- "tbz x14, #1, 83f\n"
- "str d14, [x13], #0x8\n"
- "str d9, [x23], #0x8\n"
- "tbz x14, #0, 86f\n"
- "st1 { v14.s }[2], [x13]\n"
- "st1 { v9.s }[2], [x23]\n"
+ "tbz x13, #2, 84f\n"
+ "st1 { v4.4s }, [x12], #0x10\n"
+ "st1 { v8.4s }, [x22], #0x10\n"
+ "tbz x13, #1, 83f\n"
+ "str d14, [x12], #0x8\n"
+ "str d9, [x22], #0x8\n"
+ "tbz x13, #0, 86f\n"
+ "st1 { v14.s }[2], [x12]\n"
+ "st1 { v9.s }[2], [x22]\n"
"b 86f\n"
"83:" // Height 2: Partial direct writeback: partial_1_4
- "tbz x14, #0, 86f\n"
- "str s14, [x13, #0x0]\n"
- "str s9, [x23, #0x0]\n"
+ "tbz x13, #0, 86f\n"
+ "str s14, [x12, #0x0]\n"
+ "str s9, [x22, #0x0]\n"
"b 86f\n"
"84:" // Height 2: Partial direct writeback: partial_2_0
- "tbz x14, #1, 85f\n"
- "str d4, [x13], #0x8\n"
- "str d8, [x23], #0x8\n"
- "tbz x14, #0, 86f\n"
- "st1 { v4.s }[2], [x13]\n"
- "st1 { v8.s }[2], [x23]\n"
+ "tbz x13, #1, 85f\n"
+ "str d4, [x12], #0x8\n"
+ "str d8, [x22], #0x8\n"
+ "tbz x13, #0, 86f\n"
+ "st1 { v4.s }[2], [x12]\n"
+ "st1 { v8.s }[2], [x22]\n"
"b 86f\n"
"85:" // Height 2: Partial direct writeback: partial_1_0
- "str s4, [x13, #0x0]\n"
- "str s8, [x23, #0x0]\n"
+ "str s4, [x12, #0x0]\n"
+ "str s8, [x22, #0x0]\n"
"86:" // Height 2: Partial direct writeback: Done
"b 88f\n"
"87:" // Height 2: Full writeback
- "str q4, [x13, #0x0]\n"
- "str q14, [x13, #0x10]\n"
- "str q15, [x13, #0x20]\n"
- "str q16, [x13, #0x30]\n"
- "str q17, [x13, #0x40]\n"
- "str q18, [x13, #0x50]\n"
- "add x13, x13, #0x60\n"
- "str q8, [x23, #0x0]\n"
- "str q9, [x23, #0x10]\n"
- "str q10, [x23, #0x20]\n"
- "str q11, [x23, #0x30]\n"
- "str q12, [x23, #0x40]\n"
- "str q13, [x23, #0x50]\n"
+ "str q4, [x12, #0x0]\n"
+ "str q14, [x12, #0x10]\n"
+ "str q15, [x12, #0x20]\n"
+ "str q16, [x12, #0x30]\n"
+ "str q17, [x12, #0x40]\n"
+ "str q18, [x12, #0x50]\n"
+ "add x12, x12, #0x60\n"
+ "str q8, [x22, #0x0]\n"
+ "str q9, [x22, #0x10]\n"
+ "str q10, [x22, #0x20]\n"
+ "str q11, [x22, #0x30]\n"
+ "str q12, [x22, #0x40]\n"
+ "str q13, [x22, #0x50]\n"
"88:" // Height 2: Writeback done
- "subs x14, x14, #0x18\n"
+ "subs x13, x13, #0x18\n"
"bgt 46b\n"
"b 178f\n"
"89:" // Height 3
- "ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x15, %x[bias]\n"
- "ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x13, %x[output_ptr]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x14, %x[bias]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x12, %x[output_ptr]\n"
"90:" // Height 3: Column loop
- "ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
- "add x11, x12, x20, LSL #1\n"
- "add x10, x11, x20, LSL #1\n"
- "add x9, x10, x20, LSL #1\n"
- "add x28, x9, x20, LSL #1\n"
- "add x27, x28, x20, LSL #1\n"
- "add x20, x27, x20, LSL #1\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "cmp x14, #0x14\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #1\n"
+ "add x9, x10, x19, LSL #1\n"
+ "add x28, x9, x19, LSL #1\n"
+ "add x27, x28, x19, LSL #1\n"
+ "add x26, x27, x19, LSL #1\n"
+ "add x19, x26, x19, LSL #1\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "cmp x13, #0x14\n"
"bgt 91f\n"
- "cmp x14, #0x10\n"
- "mov x27, x12\n"
+ "cmp x13, #0x10\n"
+ "mov x26, x11\n"
"bgt 91f\n"
- "cmp x14, #0xc\n"
- "mov x28, x12\n"
+ "cmp x13, #0xc\n"
+ "mov x27, x11\n"
"bgt 91f\n"
- "cmp x14, #0x8\n"
- "mov x9, x12\n"
+ "cmp x13, #0x8\n"
+ "mov x28, x11\n"
"bgt 91f\n"
- "cmp x14, #0x4\n"
- "mov x10, x12\n"
+ "cmp x13, #0x4\n"
+ "mov x9, x11\n"
"bgt 91f\n"
- "mov x11, x12\n"
+ "mov x10, x11\n"
"91:" // Height 3: B setup done
- "cbz x15, 92f\n"
- "ldr q8, [x15, #0x0]\n"
- "ldr q9, [x15, #0x10]\n"
+ "cbz x14, 92f\n"
+ "ldr q8, [x14, #0x0]\n"
+ "ldr q9, [x14, #0x10]\n"
"zip2 v14.2d, v8.2d, v8.2d\n"
"zip1 v8.2d, v8.2d, v8.2d\n"
- "ldr q10, [x15, #0x20]\n"
- "ldr q11, [x15, #0x30]\n"
+ "ldr q10, [x14, #0x20]\n"
+ "ldr q11, [x14, #0x30]\n"
"zip2 v15.2d, v9.2d, v9.2d\n"
"zip1 v9.2d, v9.2d, v9.2d\n"
- "ldr q12, [x15, #0x40]\n"
- "ldr q13, [x15, #0x50]\n"
+ "ldr q12, [x14, #0x40]\n"
+ "ldr q13, [x14, #0x50]\n"
"zip2 v16.2d, v10.2d, v10.2d\n"
"zip1 v10.2d, v10.2d, v10.2d\n"
"zip2 v17.2d, v11.2d, v11.2d\n"
"zip1 v11.2d, v11.2d, v11.2d\n"
- "add x15, x15, #0x60\n"
+ "add x14, x14, #0x60\n"
"zip2 v18.2d, v12.2d, v12.2d\n"
"zip1 v12.2d, v12.2d, v12.2d\n"
"zip2 v19.2d, v13.2d, v13.2d\n"
@@ -1119,170 +1119,170 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 (
"b 108f\n"
"92:" // Height 3: no bias
"tbz %x[flags], #0, 107f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x13, x20, LSL #2\n"
- "cmp x14, #0x18\n"
- "add x22, x23, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x22, x12, x19, LSL #2\n"
+ "cmp x13, #0x18\n"
+ "add x21, x22, x19, LSL #2\n"
"bge 105f\n"
- "tbz x14, #4, 96f\n"
- "ld1 { v9.4s }, [x13], #0x10\n"
- "ld1 { v14.4s }, [x23], #0x10\n"
- "ld1 { v21.4s }, [x22], #0x10\n"
- "ld1 { v10.4s }, [x13], #0x10\n"
- "ld1 { v15.4s }, [x23], #0x10\n"
- "ld1 { v22.4s }, [x22], #0x10\n"
- "ld1 { v11.4s }, [x13], #0x10\n"
- "ld1 { v16.4s }, [x23], #0x10\n"
- "ld1 { v23.4s }, [x22], #0x10\n"
- "ld1 { v12.4s }, [x13], #0x10\n"
- "ld1 { v17.4s }, [x23], #0x10\n"
- "ld1 { v24.4s }, [x22], #0x10\n"
- "tbz x14, #2, 94f\n"
- "ld1 { v13.4s }, [x13], #0x10\n"
- "ld1 { v18.4s }, [x23], #0x10\n"
- "ld1 { v25.4s }, [x22], #0x10\n"
- "tbz x14, #1, 93f\n"
- "ldr d20, [x13], #0x8\n"
- "ldr d19, [x23], #0x8\n"
- "mov x20, #0x58\n"
- "ldr d4, [x22], #0x8\n"
- "tbz x14, #0, 104f\n"
- "ld1 { v20.s }[2], [x13]\n"
- "ld1 { v19.s }[2], [x23]\n"
- "ld1 { v4.s }[2], [x22]\n"
+ "tbz x13, #4, 96f\n"
+ "ld1 { v9.4s }, [x12], #0x10\n"
+ "ld1 { v14.4s }, [x22], #0x10\n"
+ "ld1 { v21.4s }, [x21], #0x10\n"
+ "ld1 { v10.4s }, [x12], #0x10\n"
+ "ld1 { v15.4s }, [x22], #0x10\n"
+ "ld1 { v22.4s }, [x21], #0x10\n"
+ "ld1 { v11.4s }, [x12], #0x10\n"
+ "ld1 { v16.4s }, [x22], #0x10\n"
+ "ld1 { v23.4s }, [x21], #0x10\n"
+ "ld1 { v12.4s }, [x12], #0x10\n"
+ "ld1 { v17.4s }, [x22], #0x10\n"
+ "ld1 { v24.4s }, [x21], #0x10\n"
+ "tbz x13, #2, 94f\n"
+ "ld1 { v13.4s }, [x12], #0x10\n"
+ "ld1 { v18.4s }, [x22], #0x10\n"
+ "ld1 { v25.4s }, [x21], #0x10\n"
+ "tbz x13, #1, 93f\n"
+ "ldr d20, [x12], #0x8\n"
+ "ldr d19, [x22], #0x8\n"
+ "mov x19, #0x58\n"
+ "ldr d4, [x21], #0x8\n"
+ "tbz x13, #0, 104f\n"
+ "ld1 { v20.s }[2], [x12]\n"
+ "ld1 { v19.s }[2], [x22]\n"
+ "ld1 { v4.s }[2], [x21]\n"
"b 104f\n"
"93:" // Height 3: Partial accumulate: partial_1_20
- "mov x20, #0x50\n"
- "tbz x14, #0, 104f\n"
- "ldr s20, [x13, #0x0]\n"
- "ldr s19, [x23, #0x0]\n"
- "ldr s4, [x22, #0x0]\n"
+ "mov x19, #0x50\n"
+ "tbz x13, #0, 104f\n"
+ "ldr s20, [x12, #0x0]\n"
+ "ldr s19, [x22, #0x0]\n"
+ "ldr s4, [x21, #0x0]\n"
"b 104f\n"
"94:" // Height 3: Partial accumulate: partial_2_16
- "tbz x14, #1, 95f\n"
- "ldr d13, [x13], #0x8\n"
- "ldr d18, [x23], #0x8\n"
- "mov x20, #0x48\n"
- "ldr d25, [x22], #0x8\n"
- "tbz x14, #0, 104f\n"
- "ld1 { v13.s }[2], [x13]\n"
- "ld1 { v18.s }[2], [x23]\n"
- "ld1 { v25.s }[2], [x22]\n"
+ "tbz x13, #1, 95f\n"
+ "ldr d13, [x12], #0x8\n"
+ "ldr d18, [x22], #0x8\n"
+ "mov x19, #0x48\n"
+ "ldr d25, [x21], #0x8\n"
+ "tbz x13, #0, 104f\n"
+ "ld1 { v13.s }[2], [x12]\n"
+ "ld1 { v18.s }[2], [x22]\n"
+ "ld1 { v25.s }[2], [x21]\n"
"b 104f\n"
"95:" // Height 3: Partial accumulate: partial_1_16
- "mov x20, #0x40\n"
- "tbz x14, #0, 104f\n"
- "ldr s13, [x13, #0x0]\n"
- "ldr s18, [x23, #0x0]\n"
- "ldr s25, [x22, #0x0]\n"
+ "mov x19, #0x40\n"
+ "tbz x13, #0, 104f\n"
+ "ldr s13, [x12, #0x0]\n"
+ "ldr s18, [x22, #0x0]\n"
+ "ldr s25, [x21, #0x0]\n"
"b 104f\n"
"96:" // Height 3: Partial accumulate: partial_8_0
- "tbz x14, #3, 100f\n"
- "ld1 { v9.4s }, [x13], #0x10\n"
- "ld1 { v14.4s }, [x23], #0x10\n"
- "ld1 { v21.4s }, [x22], #0x10\n"
- "ld1 { v10.4s }, [x13], #0x10\n"
- "ld1 { v15.4s }, [x23], #0x10\n"
- "ld1 { v22.4s }, [x22], #0x10\n"
- "tbz x14, #2, 98f\n"
- "ld1 { v11.4s }, [x13], #0x10\n"
- "ld1 { v16.4s }, [x23], #0x10\n"
- "ld1 { v23.4s }, [x22], #0x10\n"
- "tbz x14, #1, 97f\n"
- "ldr d12, [x13], #0x8\n"
- "ldr d17, [x23], #0x8\n"
- "mov x20, #0x38\n"
- "ldr d24, [x22], #0x8\n"
- "tbz x14, #0, 104f\n"
- "ld1 { v12.s }[2], [x13]\n"
- "ld1 { v17.s }[2], [x23]\n"
- "ld1 { v24.s }[2], [x22]\n"
+ "tbz x13, #3, 100f\n"
+ "ld1 { v9.4s }, [x12], #0x10\n"
+ "ld1 { v14.4s }, [x22], #0x10\n"
+ "ld1 { v21.4s }, [x21], #0x10\n"
+ "ld1 { v10.4s }, [x12], #0x10\n"
+ "ld1 { v15.4s }, [x22], #0x10\n"
+ "ld1 { v22.4s }, [x21], #0x10\n"
+ "tbz x13, #2, 98f\n"
+ "ld1 { v11.4s }, [x12], #0x10\n"
+ "ld1 { v16.4s }, [x22], #0x10\n"
+ "ld1 { v23.4s }, [x21], #0x10\n"
+ "tbz x13, #1, 97f\n"
+ "ldr d12, [x12], #0x8\n"
+ "ldr d17, [x22], #0x8\n"
+ "mov x19, #0x38\n"
+ "ldr d24, [x21], #0x8\n"
+ "tbz x13, #0, 104f\n"
+ "ld1 { v12.s }[2], [x12]\n"
+ "ld1 { v17.s }[2], [x22]\n"
+ "ld1 { v24.s }[2], [x21]\n"
"b 104f\n"
"97:" // Height 3: Partial accumulate: partial_1_12
- "mov x20, #0x30\n"
- "tbz x14, #0, 104f\n"
- "ldr s12, [x13, #0x0]\n"
- "ldr s17, [x23, #0x0]\n"
- "ldr s24, [x22, #0x0]\n"
+ "mov x19, #0x30\n"
+ "tbz x13, #0, 104f\n"
+ "ldr s12, [x12, #0x0]\n"
+ "ldr s17, [x22, #0x0]\n"
+ "ldr s24, [x21, #0x0]\n"
"b 104f\n"
"98:" // Height 3: Partial accumulate: partial_2_8
- "tbz x14, #1, 99f\n"
- "ldr d11, [x13], #0x8\n"
- "ldr d16, [x23], #0x8\n"
- "mov x20, #0x28\n"
- "ldr d23, [x22], #0x8\n"
- "tbz x14, #0, 104f\n"
- "ld1 { v11.s }[2], [x13]\n"
- "ld1 { v16.s }[2], [x23]\n"
- "ld1 { v23.s }[2], [x22]\n"
+ "tbz x13, #1, 99f\n"
+ "ldr d11, [x12], #0x8\n"
+ "ldr d16, [x22], #0x8\n"
+ "mov x19, #0x28\n"
+ "ldr d23, [x21], #0x8\n"
+ "tbz x13, #0, 104f\n"
+ "ld1 { v11.s }[2], [x12]\n"
+ "ld1 { v16.s }[2], [x22]\n"
+ "ld1 { v23.s }[2], [x21]\n"
"b 104f\n"
"99:" // Height 3: Partial accumulate: partial_1_8
- "mov x20, #0x20\n"
- "tbz x14, #0, 104f\n"
- "ldr s11, [x13, #0x0]\n"
- "ldr s16, [x23, #0x0]\n"
- "ldr s23, [x22, #0x0]\n"
+ "mov x19, #0x20\n"
+ "tbz x13, #0, 104f\n"
+ "ldr s11, [x12, #0x0]\n"
+ "ldr s16, [x22, #0x0]\n"
+ "ldr s23, [x21, #0x0]\n"
"b 104f\n"
"100:" // Height 3: Partial accumulate: partial_4_0
- "tbz x14, #2, 102f\n"
- "ld1 { v9.4s }, [x13], #0x10\n"
- "ld1 { v14.4s }, [x23], #0x10\n"
- "ld1 { v21.4s }, [x22], #0x10\n"
- "tbz x14, #1, 101f\n"
- "ldr d10, [x13], #0x8\n"
- "ldr d15, [x23], #0x8\n"
- "mov x20, #0x18\n"
- "ldr d22, [x22], #0x8\n"
- "tbz x14, #0, 104f\n"
- "ld1 { v10.s }[2], [x13]\n"
- "ld1 { v15.s }[2], [x23]\n"
- "ld1 { v22.s }[2], [x22]\n"
+ "tbz x13, #2, 102f\n"
+ "ld1 { v9.4s }, [x12], #0x10\n"
+ "ld1 { v14.4s }, [x22], #0x10\n"
+ "ld1 { v21.4s }, [x21], #0x10\n"
+ "tbz x13, #1, 101f\n"
+ "ldr d10, [x12], #0x8\n"
+ "ldr d15, [x22], #0x8\n"
+ "mov x19, #0x18\n"
+ "ldr d22, [x21], #0x8\n"
+ "tbz x13, #0, 104f\n"
+ "ld1 { v10.s }[2], [x12]\n"
+ "ld1 { v15.s }[2], [x22]\n"
+ "ld1 { v22.s }[2], [x21]\n"
"b 104f\n"
"101:" // Height 3: Partial accumulate: partial_1_4
- "mov x20, #0x10\n"
- "tbz x14, #0, 104f\n"
- "ldr s10, [x13, #0x0]\n"
- "ldr s15, [x23, #0x0]\n"
- "ldr s22, [x22, #0x0]\n"
+ "mov x19, #0x10\n"
+ "tbz x13, #0, 104f\n"
+ "ldr s10, [x12, #0x0]\n"
+ "ldr s15, [x22, #0x0]\n"
+ "ldr s22, [x21, #0x0]\n"
"b 104f\n"
"102:" // Height 3: Partial accumulate: partial_2_0
- "tbz x14, #1, 103f\n"
- "ldr d9, [x13], #0x8\n"
- "ldr d14, [x23], #0x8\n"
- "mov x20, #0x8\n"
- "ldr d21, [x22], #0x8\n"
- "tbz x14, #0, 104f\n"
- "ld1 { v9.s }[2], [x13]\n"
- "ld1 { v14.s }[2], [x23]\n"
- "ld1 { v21.s }[2], [x22]\n"
+ "tbz x13, #1, 103f\n"
+ "ldr d9, [x12], #0x8\n"
+ "ldr d14, [x22], #0x8\n"
+ "mov x19, #0x8\n"
+ "ldr d21, [x21], #0x8\n"
+ "tbz x13, #0, 104f\n"
+ "ld1 { v9.s }[2], [x12]\n"
+ "ld1 { v14.s }[2], [x22]\n"
+ "ld1 { v21.s }[2], [x21]\n"
"b 104f\n"
"103:" // Height 3: Partial accumulate: partial_1_0
- "ldr s9, [x13, #0x0]\n"
- "ldr s14, [x23, #0x0]\n"
- "mov x20, #0x0\n"
- "ldr s21, [x22, #0x0]\n"
+ "ldr s9, [x12, #0x0]\n"
+ "ldr s14, [x22, #0x0]\n"
+ "mov x19, #0x0\n"
+ "ldr s21, [x21, #0x0]\n"
"104:" // Height 3: Partial accumulate: Done
- "sub x13, x13, x20\n"
+ "sub x12, x12, x19\n"
"b 106f\n"
"105:" // Height 3: full accumulate
- "ldr q9, [x13, #0x0]\n"
- "ldr q10, [x13, #0x10]\n"
- "ldr q11, [x13, #0x20]\n"
- "ldr q12, [x13, #0x30]\n"
- "ldr q13, [x13, #0x40]\n"
- "ldr q20, [x13, #0x50]\n"
- "ldr q14, [x23, #0x0]\n"
- "ldr q15, [x23, #0x10]\n"
- "ldr q16, [x23, #0x20]\n"
- "ldr q17, [x23, #0x30]\n"
- "ldr q18, [x23, #0x40]\n"
- "ldr q19, [x23, #0x50]\n"
- "ldr q21, [x22, #0x0]\n"
- "ldr q22, [x22, #0x10]\n"
- "ldr q23, [x22, #0x20]\n"
- "ldr q24, [x22, #0x30]\n"
- "ldr q25, [x22, #0x40]\n"
- "ldr q4, [x22, #0x50]\n"
+ "ldr q9, [x12, #0x0]\n"
+ "ldr q10, [x12, #0x10]\n"
+ "ldr q11, [x12, #0x20]\n"
+ "ldr q12, [x12, #0x30]\n"
+ "ldr q13, [x12, #0x40]\n"
+ "ldr q20, [x12, #0x50]\n"
+ "ldr q14, [x22, #0x0]\n"
+ "ldr q15, [x22, #0x10]\n"
+ "ldr q16, [x22, #0x20]\n"
+ "ldr q17, [x22, #0x30]\n"
+ "ldr q18, [x22, #0x40]\n"
+ "ldr q19, [x22, #0x50]\n"
+ "ldr q21, [x21, #0x0]\n"
+ "ldr q22, [x21, #0x10]\n"
+ "ldr q23, [x21, #0x20]\n"
+ "ldr q24, [x21, #0x30]\n"
+ "ldr q25, [x21, #0x40]\n"
+ "ldr q4, [x21, #0x50]\n"
"106:" // Height 3: MMLA fixup
"zip1 v8.2d, v9.2d, v14.2d\n"
"zip2 v14.2d, v9.2d, v14.2d\n"
@@ -1335,126 +1335,126 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 (
"movi v30.16b, #0x0\n"
"movi v31.16b, #0x0\n"
"108:" // Height 3: setup done
- "mov x26, #0x0\n"
+ "mov x25, #0x0\n"
"109:" // Height 3: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w24, [x19, x25, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 110f\n"
- "ldr x21, [%x[input_ptr], x26, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x24, [x21, #0x0]\n"
- "ldr x23, [x21, #0x8]\n"
- "ldr x22, [x21, #0x10]\n"
- "cbnz x26, 111f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x24, x24, x20, LSL #2\n"
- "add x23, x23, x20, LSL #2\n"
- "add x22, x22, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x25, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x23, [x20, #0x0]\n"
+ "ldr x22, [x20, #0x8]\n"
+ "ldr x21, [x20, #0x10]\n"
+ "cbnz x25, 111f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x23, x23, x19, LSL #2\n"
+ "add x22, x22, x19, LSL #2\n"
+ "add x21, x21, x19, LSL #2\n"
"b 111f\n"
"110:" // Height 3: setup direct input
- "mov x24, %x[input_ptr]\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
+ "mov x23, %x[input_ptr]\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
"111:" // Height 3: input setup done
- "cmp x25, #0x4\n"
+ "cmp x24, #0x4\n"
"blt 114f\n"
- "ld1 { v0.4s }, [x24], #0x10\n"
- "ld1 { v1.4s }, [x23], #0x10\n"
- "cmp x25, #0x8\n"
- "ld1 { v2.4s }, [x22], #0x10\n"
- "ldr q4, [x12, #0x0]\n"
- "ldr q5, [x12, #0x10]\n"
- "ldr q6, [x11, #0x0]\n"
- "ldr q7, [x11, #0x10]\n"
+ "ld1 { v0.4s }, [x23], #0x10\n"
+ "ld1 { v1.4s }, [x22], #0x10\n"
+ "cmp x24, #0x8\n"
+ "ld1 { v2.4s }, [x21], #0x10\n"
+ "ldr q4, [x11, #0x0]\n"
+ "ldr q5, [x11, #0x10]\n"
+ "ldr q6, [x10, #0x0]\n"
+ "ldr q7, [x10, #0x10]\n"
"blt 113f\n"
"112:" // Height 3: Multiply loop: Main loop head
".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n"
".inst 0x4ea16820 // bfcvtn2 v0.8h, v1.4s\n"
- "ld1 { v1.4s }, [x23], #0x10\n"
+ "ld1 { v1.4s }, [x22], #0x10\n"
".inst 0x6e44ec08 // bfmmla v8.4s, v0.8h, v4.8h\n"
".inst 0x0ea16842 // bfcvtn v2.4h, v2.4s\n"
".inst 0x6e44ec54 // bfmmla v20.4s, v2.8h, v4.8h\n"
- "ldr q4, [x10, #0x0]\n"
+ "ldr q4, [x9, #0x0]\n"
".inst 0x6e45ec0e // bfmmla v14.4s, v0.8h, v5.8h\n"
".inst 0x6e45ec5a // bfmmla v26.4s, v2.8h, v5.8h\n"
- "ldr q5, [x10, #0x10]\n"
+ "ldr q5, [x9, #0x10]\n"
".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n"
- "sub x25, x25, #0x4\n"
+ "sub x24, x24, #0x4\n"
".inst 0x6e46ec55 // bfmmla v21.4s, v2.8h, v6.8h\n"
- "ldr q6, [x9, #0x0]\n"
+ "ldr q6, [x28, #0x0]\n"
".inst 0x6e47ec0f // bfmmla v15.4s, v0.8h, v7.8h\n"
- "cmp x25, #0x8\n"
+ "cmp x24, #0x8\n"
".inst 0x6e47ec5b // bfmmla v27.4s, v2.8h, v7.8h\n"
- "ldr q7, [x9, #0x10]\n"
+ "ldr q7, [x28, #0x10]\n"
".inst 0x6e44ec0a // bfmmla v10.4s, v0.8h, v4.8h\n"
- "add x12, x12, #0x20\n"
+ "add x11, x11, #0x20\n"
".inst 0x6e44ec56 // bfmmla v22.4s, v2.8h, v4.8h\n"
- "ldr q4, [x28, #0x0]\n"
+ "ldr q4, [x27, #0x0]\n"
".inst 0x6e45ec10 // bfmmla v16.4s, v0.8h, v5.8h\n"
- "add x11, x11, #0x20\n"
+ "add x10, x10, #0x20\n"
".inst 0x6e45ec5c // bfmmla v28.4s, v2.8h, v5.8h\n"
- "ldr q5, [x28, #0x10]\n"
+ "ldr q5, [x27, #0x10]\n"
".inst 0x6e46ec0b // bfmmla v11.4s, v0.8h, v6.8h\n"
- "add x10, x10, #0x20\n"
+ "add x9, x9, #0x20\n"
".inst 0x6e46ec57 // bfmmla v23.4s, v2.8h, v6.8h\n"
- "ldr q6, [x27, #0x0]\n"
+ "ldr q6, [x26, #0x0]\n"
".inst 0x6e47ec11 // bfmmla v17.4s, v0.8h, v7.8h\n"
- "add x9, x9, #0x20\n"
- ".inst 0x6e47ec5d // bfmmla v29.4s, v2.8h, v7.8h\n"
- "ldr q7, [x27, #0x10]\n"
"add x28, x28, #0x20\n"
- ".inst 0x6e44ec0c // bfmmla v12.4s, v0.8h, v4.8h\n"
+ ".inst 0x6e47ec5d // bfmmla v29.4s, v2.8h, v7.8h\n"
+ "ldr q7, [x26, #0x10]\n"
"add x27, x27, #0x20\n"
+ ".inst 0x6e44ec0c // bfmmla v12.4s, v0.8h, v4.8h\n"
+ "add x26, x26, #0x20\n"
".inst 0x6e44ec58 // bfmmla v24.4s, v2.8h, v4.8h\n"
- "ldr q4, [x12, #0x0]\n"
+ "ldr q4, [x11, #0x0]\n"
".inst 0x6e45ec12 // bfmmla v18.4s, v0.8h, v5.8h\n"
".inst 0x6e45ec5e // bfmmla v30.4s, v2.8h, v5.8h\n"
- "ldr q5, [x12, #0x10]\n"
+ "ldr q5, [x11, #0x10]\n"
".inst 0x6e46ec0d // bfmmla v13.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec59 // bfmmla v25.4s, v2.8h, v6.8h\n"
- "ldr q6, [x11, #0x0]\n"
+ "ldr q6, [x10, #0x0]\n"
".inst 0x6e47ec13 // bfmmla v19.4s, v0.8h, v7.8h\n"
- "ld1 { v0.4s }, [x24], #0x10\n"
+ "ld1 { v0.4s }, [x23], #0x10\n"
".inst 0x6e47ec5f // bfmmla v31.4s, v2.8h, v7.8h\n"
- "ld1 { v2.4s }, [x22], #0x10\n"
- "ldr q7, [x11, #0x10]\n"
+ "ld1 { v2.4s }, [x21], #0x10\n"
+ "ldr q7, [x10, #0x10]\n"
"bge 112b\n"
"113:" // Height 3: Multiply loop: Single iteration only
".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n"
".inst 0x4ea16820 // bfcvtn2 v0.8h, v1.4s\n"
".inst 0x6e44ec08 // bfmmla v8.4s, v0.8h, v4.8h\n"
- "sub x25, x25, #0x4\n"
+ "sub x24, x24, #0x4\n"
".inst 0x0ea16842 // bfcvtn v2.4h, v2.4s\n"
".inst 0x6e44ec54 // bfmmla v20.4s, v2.8h, v4.8h\n"
- "ldr q4, [x10, #0x0]\n"
+ "ldr q4, [x9, #0x0]\n"
".inst 0x6e45ec0e // bfmmla v14.4s, v0.8h, v5.8h\n"
".inst 0x6e45ec5a // bfmmla v26.4s, v2.8h, v5.8h\n"
- "ldr q5, [x10, #0x10]\n"
+ "ldr q5, [x9, #0x10]\n"
".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n"
- "add x12, x12, #0x20\n"
+ "add x11, x11, #0x20\n"
".inst 0x6e46ec55 // bfmmla v21.4s, v2.8h, v6.8h\n"
- "ldr q6, [x9, #0x0]\n"
+ "ldr q6, [x28, #0x0]\n"
".inst 0x6e47ec0f // bfmmla v15.4s, v0.8h, v7.8h\n"
- "add x11, x11, #0x20\n"
+ "add x10, x10, #0x20\n"
".inst 0x6e47ec5b // bfmmla v27.4s, v2.8h, v7.8h\n"
- "ldr q7, [x9, #0x10]\n"
+ "ldr q7, [x28, #0x10]\n"
".inst 0x6e44ec0a // bfmmla v10.4s, v0.8h, v4.8h\n"
- "add x10, x10, #0x20\n"
+ "add x9, x9, #0x20\n"
".inst 0x6e44ec56 // bfmmla v22.4s, v2.8h, v4.8h\n"
- "ldr q4, [x28, #0x0]\n"
+ "ldr q4, [x27, #0x0]\n"
".inst 0x6e45ec10 // bfmmla v16.4s, v0.8h, v5.8h\n"
- "add x9, x9, #0x20\n"
+ "add x28, x28, #0x20\n"
".inst 0x6e45ec5c // bfmmla v28.4s, v2.8h, v5.8h\n"
- "ldr q5, [x28, #0x10]\n"
+ "ldr q5, [x27, #0x10]\n"
".inst 0x6e46ec0b // bfmmla v11.4s, v0.8h, v6.8h\n"
- "add x28, x28, #0x20\n"
+ "add x27, x27, #0x20\n"
".inst 0x6e46ec57 // bfmmla v23.4s, v2.8h, v6.8h\n"
- "ldr q6, [x27, #0x0]\n"
+ "ldr q6, [x26, #0x0]\n"
".inst 0x6e47ec11 // bfmmla v17.4s, v0.8h, v7.8h\n"
".inst 0x6e47ec5d // bfmmla v29.4s, v2.8h, v7.8h\n"
- "ldr q7, [x27, #0x10]\n"
- "add x27, x27, #0x20\n"
+ "ldr q7, [x26, #0x10]\n"
+ "add x26, x26, #0x20\n"
".inst 0x6e44ec0c // bfmmla v12.4s, v0.8h, v4.8h\n"
".inst 0x6e44ec58 // bfmmla v24.4s, v2.8h, v4.8h\n"
".inst 0x6e45ec12 // bfmmla v18.4s, v0.8h, v5.8h\n"
@@ -1464,60 +1464,60 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 (
".inst 0x6e47ec13 // bfmmla v19.4s, v0.8h, v7.8h\n"
".inst 0x6e47ec5f // bfmmla v31.4s, v2.8h, v7.8h\n"
"114:" // Height 3: Multiply loop: Main loop skip
- "cbz x25, 117f\n"
- "cbz x25, 117f\n"
- "tbz x25, #1, 115f\n"
- "ldr d0, [x24], #0x8\n"
- "ldr d1, [x23], #0x8\n"
- "ldr d2, [x22], #0x8\n"
- "tbz x25, #0, 116f\n"
- "ld1 { v0.s }[2], [x24]\n"
- "ld1 { v1.s }[2], [x23]\n"
- "ld1 { v2.s }[2], [x22]\n"
+ "cbz x24, 117f\n"
+ "cbz x24, 117f\n"
+ "tbz x24, #1, 115f\n"
+ "ldr d0, [x23], #0x8\n"
+ "ldr d1, [x22], #0x8\n"
+ "ldr d2, [x21], #0x8\n"
+ "tbz x24, #0, 116f\n"
+ "ld1 { v0.s }[2], [x23]\n"
+ "ld1 { v1.s }[2], [x22]\n"
+ "ld1 { v2.s }[2], [x21]\n"
"b 116f\n"
"115:" // Height 3: Multiply loop: Ragged operand read: partial_1_0
- "ldr s0, [x24, #0x0]\n"
- "ldr s1, [x23, #0x0]\n"
- "ldr s2, [x22, #0x0]\n"
+ "ldr s0, [x23, #0x0]\n"
+ "ldr s1, [x22, #0x0]\n"
+ "ldr s2, [x21, #0x0]\n"
"116:" // Height 3: Multiply loop: Ragged operand read: Done
- "ldr q4, [x12, #0x0]\n"
- "ldr q5, [x12, #0x10]\n"
+ "ldr q4, [x11, #0x0]\n"
+ "ldr q5, [x11, #0x10]\n"
".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n"
".inst 0x4ea16820 // bfcvtn2 v0.8h, v1.4s\n"
- "ldr q6, [x11, #0x0]\n"
- "ldr q7, [x11, #0x10]\n"
+ "ldr q6, [x10, #0x0]\n"
+ "ldr q7, [x10, #0x10]\n"
".inst 0x0ea16842 // bfcvtn v2.4h, v2.4s\n"
".inst 0x6e44ec08 // bfmmla v8.4s, v0.8h, v4.8h\n"
".inst 0x6e44ec54 // bfmmla v20.4s, v2.8h, v4.8h\n"
- "ldr q4, [x10, #0x0]\n"
+ "ldr q4, [x9, #0x0]\n"
".inst 0x6e45ec0e // bfmmla v14.4s, v0.8h, v5.8h\n"
- "add x12, x12, #0x20\n"
+ "add x11, x11, #0x20\n"
".inst 0x6e45ec5a // bfmmla v26.4s, v2.8h, v5.8h\n"
- "ldr q5, [x10, #0x10]\n"
+ "ldr q5, [x9, #0x10]\n"
".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n"
- "add x11, x11, #0x20\n"
+ "add x10, x10, #0x20\n"
".inst 0x6e46ec55 // bfmmla v21.4s, v2.8h, v6.8h\n"
- "ldr q6, [x9, #0x0]\n"
+ "ldr q6, [x28, #0x0]\n"
".inst 0x6e47ec0f // bfmmla v15.4s, v0.8h, v7.8h\n"
- "add x10, x10, #0x20\n"
+ "add x9, x9, #0x20\n"
".inst 0x6e47ec5b // bfmmla v27.4s, v2.8h, v7.8h\n"
- "ldr q7, [x9, #0x10]\n"
+ "ldr q7, [x28, #0x10]\n"
".inst 0x6e44ec0a // bfmmla v10.4s, v0.8h, v4.8h\n"
- "add x9, x9, #0x20\n"
+ "add x28, x28, #0x20\n"
".inst 0x6e44ec56 // bfmmla v22.4s, v2.8h, v4.8h\n"
- "ldr q4, [x28, #0x0]\n"
+ "ldr q4, [x27, #0x0]\n"
".inst 0x6e45ec10 // bfmmla v16.4s, v0.8h, v5.8h\n"
".inst 0x6e45ec5c // bfmmla v28.4s, v2.8h, v5.8h\n"
- "ldr q5, [x28, #0x10]\n"
+ "ldr q5, [x27, #0x10]\n"
".inst 0x6e46ec0b // bfmmla v11.4s, v0.8h, v6.8h\n"
- "add x28, x28, #0x20\n"
+ "add x27, x27, #0x20\n"
".inst 0x6e46ec57 // bfmmla v23.4s, v2.8h, v6.8h\n"
- "ldr q6, [x27, #0x0]\n"
+ "ldr q6, [x26, #0x0]\n"
".inst 0x6e47ec11 // bfmmla v17.4s, v0.8h, v7.8h\n"
".inst 0x6e47ec5d // bfmmla v29.4s, v2.8h, v7.8h\n"
- "ldr q7, [x27, #0x10]\n"
+ "ldr q7, [x26, #0x10]\n"
".inst 0x6e44ec0c // bfmmla v12.4s, v0.8h, v4.8h\n"
- "add x27, x27, #0x20\n"
+ "add x26, x26, #0x20\n"
".inst 0x6e44ec58 // bfmmla v24.4s, v2.8h, v4.8h\n"
".inst 0x6e45ec12 // bfmmla v18.4s, v0.8h, v5.8h\n"
".inst 0x6e45ec5e // bfmmla v30.4s, v2.8h, v5.8h\n"
@@ -1526,17 +1526,17 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 (
".inst 0x6e47ec13 // bfmmla v19.4s, v0.8h, v7.8h\n"
".inst 0x6e47ec5f // bfmmla v31.4s, v2.8h, v7.8h\n"
"117:" // Height 3: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x26, x26, #0x1\n"
- "cmp x26, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x25, x25, #0x1\n"
+ "cmp x25, x19\n"
"bne 109b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x13, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x22, x12, x19, LSL #2\n"
"uzp1 v4.2d, v8.2d, v14.2d\n"
"uzp2 v8.2d, v8.2d, v14.2d\n"
"uzp1 v14.2d, v9.2d, v15.2d\n"
"uzp2 v9.2d, v9.2d, v15.2d\n"
- "add x22, x23, x20, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
"uzp1 v15.2d, v10.2d, v16.2d\n"
"uzp2 v10.2d, v10.2d, v16.2d\n"
"uzp1 v16.2d, v11.2d, v17.2d\n"
@@ -1552,10 +1552,10 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 (
"uzp1 v24.2d, v24.2d, v30.2d\n"
"uzp1 v25.2d, v25.2d, v31.2d\n"
"tbz %x[flags], #1, 118f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v1.4s }, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1r { v0.4s }, [x20]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v1.4s }, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v0.4s }, [x19]\n"
"fmin v4.4s, v4.4s, v1.4s\n"
"fmin v14.4s, v14.4s, v1.4s\n"
"fmin v15.4s, v15.4s, v1.4s\n"
@@ -1593,210 +1593,210 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 (
"fmax v24.4s, v24.4s, v0.4s\n"
"fmax v25.4s, v25.4s, v0.4s\n"
"118:" // Height 3: No activation
- "cmp x14, #0x18\n"
+ "cmp x13, #0x18\n"
"bge 131f\n"
- "tbz x14, #4, 122f\n"
- "st1 { v4.4s }, [x13], #0x10\n"
- "st1 { v14.4s }, [x13], #0x10\n"
- "st1 { v15.4s }, [x13], #0x10\n"
- "st1 { v16.4s }, [x13], #0x10\n"
- "st1 { v8.4s }, [x23], #0x10\n"
- "st1 { v9.4s }, [x23], #0x10\n"
- "st1 { v10.4s }, [x23], #0x10\n"
- "st1 { v11.4s }, [x23], #0x10\n"
- "st1 { v20.4s }, [x22], #0x10\n"
- "st1 { v21.4s }, [x22], #0x10\n"
- "st1 { v22.4s }, [x22], #0x10\n"
- "st1 { v23.4s }, [x22], #0x10\n"
- "tbz x14, #2, 120f\n"
- "st1 { v17.4s }, [x13], #0x10\n"
- "st1 { v12.4s }, [x23], #0x10\n"
- "st1 { v24.4s }, [x22], #0x10\n"
- "tbz x14, #1, 119f\n"
- "str d18, [x13], #0x8\n"
- "str d13, [x23], #0x8\n"
- "str d25, [x22], #0x8\n"
- "tbz x14, #0, 130f\n"
- "st1 { v18.s }[2], [x13]\n"
- "st1 { v13.s }[2], [x23]\n"
- "st1 { v25.s }[2], [x22]\n"
+ "tbz x13, #4, 122f\n"
+ "st1 { v4.4s }, [x12], #0x10\n"
+ "st1 { v14.4s }, [x12], #0x10\n"
+ "st1 { v15.4s }, [x12], #0x10\n"
+ "st1 { v16.4s }, [x12], #0x10\n"
+ "st1 { v8.4s }, [x22], #0x10\n"
+ "st1 { v9.4s }, [x22], #0x10\n"
+ "st1 { v10.4s }, [x22], #0x10\n"
+ "st1 { v11.4s }, [x22], #0x10\n"
+ "st1 { v20.4s }, [x21], #0x10\n"
+ "st1 { v21.4s }, [x21], #0x10\n"
+ "st1 { v22.4s }, [x21], #0x10\n"
+ "st1 { v23.4s }, [x21], #0x10\n"
+ "tbz x13, #2, 120f\n"
+ "st1 { v17.4s }, [x12], #0x10\n"
+ "st1 { v12.4s }, [x22], #0x10\n"
+ "st1 { v24.4s }, [x21], #0x10\n"
+ "tbz x13, #1, 119f\n"
+ "str d18, [x12], #0x8\n"
+ "str d13, [x22], #0x8\n"
+ "str d25, [x21], #0x8\n"
+ "tbz x13, #0, 130f\n"
+ "st1 { v18.s }[2], [x12]\n"
+ "st1 { v13.s }[2], [x22]\n"
+ "st1 { v25.s }[2], [x21]\n"
"b 130f\n"
"119:" // Height 3: Partial direct writeback: partial_1_20
- "tbz x14, #0, 130f\n"
- "str s18, [x13, #0x0]\n"
- "str s13, [x23, #0x0]\n"
- "str s25, [x22, #0x0]\n"
+ "tbz x13, #0, 130f\n"
+ "str s18, [x12, #0x0]\n"
+ "str s13, [x22, #0x0]\n"
+ "str s25, [x21, #0x0]\n"
"b 130f\n"
"120:" // Height 3: Partial direct writeback: partial_2_16
- "tbz x14, #1, 121f\n"
- "str d17, [x13], #0x8\n"
- "str d12, [x23], #0x8\n"
- "str d24, [x22], #0x8\n"
- "tbz x14, #0, 130f\n"
- "st1 { v17.s }[2], [x13]\n"
- "st1 { v12.s }[2], [x23]\n"
- "st1 { v24.s }[2], [x22]\n"
+ "tbz x13, #1, 121f\n"
+ "str d17, [x12], #0x8\n"
+ "str d12, [x22], #0x8\n"
+ "str d24, [x21], #0x8\n"
+ "tbz x13, #0, 130f\n"
+ "st1 { v17.s }[2], [x12]\n"
+ "st1 { v12.s }[2], [x22]\n"
+ "st1 { v24.s }[2], [x21]\n"
"b 130f\n"
"121:" // Height 3: Partial direct writeback: partial_1_16
- "tbz x14, #0, 130f\n"
- "str s17, [x13, #0x0]\n"
- "str s12, [x23, #0x0]\n"
- "str s24, [x22, #0x0]\n"
+ "tbz x13, #0, 130f\n"
+ "str s17, [x12, #0x0]\n"
+ "str s12, [x22, #0x0]\n"
+ "str s24, [x21, #0x0]\n"
"b 130f\n"
"122:" // Height 3: Partial direct writeback: partial_8_0
- "tbz x14, #3, 126f\n"
- "st1 { v4.4s }, [x13], #0x10\n"
- "st1 { v14.4s }, [x13], #0x10\n"
- "st1 { v8.4s }, [x23], #0x10\n"
- "st1 { v9.4s }, [x23], #0x10\n"
- "st1 { v20.4s }, [x22], #0x10\n"
- "st1 { v21.4s }, [x22], #0x10\n"
- "tbz x14, #2, 124f\n"
- "st1 { v15.4s }, [x13], #0x10\n"
- "st1 { v10.4s }, [x23], #0x10\n"
- "st1 { v22.4s }, [x22], #0x10\n"
- "tbz x14, #1, 123f\n"
- "str d16, [x13], #0x8\n"
- "str d11, [x23], #0x8\n"
- "str d23, [x22], #0x8\n"
- "tbz x14, #0, 130f\n"
- "st1 { v16.s }[2], [x13]\n"
- "st1 { v11.s }[2], [x23]\n"
- "st1 { v23.s }[2], [x22]\n"
+ "tbz x13, #3, 126f\n"
+ "st1 { v4.4s }, [x12], #0x10\n"
+ "st1 { v14.4s }, [x12], #0x10\n"
+ "st1 { v8.4s }, [x22], #0x10\n"
+ "st1 { v9.4s }, [x22], #0x10\n"
+ "st1 { v20.4s }, [x21], #0x10\n"
+ "st1 { v21.4s }, [x21], #0x10\n"
+ "tbz x13, #2, 124f\n"
+ "st1 { v15.4s }, [x12], #0x10\n"
+ "st1 { v10.4s }, [x22], #0x10\n"
+ "st1 { v22.4s }, [x21], #0x10\n"
+ "tbz x13, #1, 123f\n"
+ "str d16, [x12], #0x8\n"
+ "str d11, [x22], #0x8\n"
+ "str d23, [x21], #0x8\n"
+ "tbz x13, #0, 130f\n"
+ "st1 { v16.s }[2], [x12]\n"
+ "st1 { v11.s }[2], [x22]\n"
+ "st1 { v23.s }[2], [x21]\n"
"b 130f\n"
"123:" // Height 3: Partial direct writeback: partial_1_12
- "tbz x14, #0, 130f\n"
- "str s16, [x13, #0x0]\n"
- "str s11, [x23, #0x0]\n"
- "str s23, [x22, #0x0]\n"
+ "tbz x13, #0, 130f\n"
+ "str s16, [x12, #0x0]\n"
+ "str s11, [x22, #0x0]\n"
+ "str s23, [x21, #0x0]\n"
"b 130f\n"
"124:" // Height 3: Partial direct writeback: partial_2_8
- "tbz x14, #1, 125f\n"
- "str d15, [x13], #0x8\n"
- "str d10, [x23], #0x8\n"
- "str d22, [x22], #0x8\n"
- "tbz x14, #0, 130f\n"
- "st1 { v15.s }[2], [x13]\n"
- "st1 { v10.s }[2], [x23]\n"
- "st1 { v22.s }[2], [x22]\n"
+ "tbz x13, #1, 125f\n"
+ "str d15, [x12], #0x8\n"
+ "str d10, [x22], #0x8\n"
+ "str d22, [x21], #0x8\n"
+ "tbz x13, #0, 130f\n"
+ "st1 { v15.s }[2], [x12]\n"
+ "st1 { v10.s }[2], [x22]\n"
+ "st1 { v22.s }[2], [x21]\n"
"b 130f\n"
"125:" // Height 3: Partial direct writeback: partial_1_8
- "tbz x14, #0, 130f\n"
- "str s15, [x13, #0x0]\n"
- "str s10, [x23, #0x0]\n"
- "str s22, [x22, #0x0]\n"
+ "tbz x13, #0, 130f\n"
+ "str s15, [x12, #0x0]\n"
+ "str s10, [x22, #0x0]\n"
+ "str s22, [x21, #0x0]\n"
"b 130f\n"
"126:" // Height 3: Partial direct writeback: partial_4_0
- "tbz x14, #2, 128f\n"
- "st1 { v4.4s }, [x13], #0x10\n"
- "st1 { v8.4s }, [x23], #0x10\n"
- "st1 { v20.4s }, [x22], #0x10\n"
- "tbz x14, #1, 127f\n"
- "str d14, [x13], #0x8\n"
- "str d9, [x23], #0x8\n"
- "str d21, [x22], #0x8\n"
- "tbz x14, #0, 130f\n"
- "st1 { v14.s }[2], [x13]\n"
- "st1 { v9.s }[2], [x23]\n"
- "st1 { v21.s }[2], [x22]\n"
+ "tbz x13, #2, 128f\n"
+ "st1 { v4.4s }, [x12], #0x10\n"
+ "st1 { v8.4s }, [x22], #0x10\n"
+ "st1 { v20.4s }, [x21], #0x10\n"
+ "tbz x13, #1, 127f\n"
+ "str d14, [x12], #0x8\n"
+ "str d9, [x22], #0x8\n"
+ "str d21, [x21], #0x8\n"
+ "tbz x13, #0, 130f\n"
+ "st1 { v14.s }[2], [x12]\n"
+ "st1 { v9.s }[2], [x22]\n"
+ "st1 { v21.s }[2], [x21]\n"
"b 130f\n"
"127:" // Height 3: Partial direct writeback: partial_1_4
- "tbz x14, #0, 130f\n"
- "str s14, [x13, #0x0]\n"
- "str s9, [x23, #0x0]\n"
- "str s21, [x22, #0x0]\n"
+ "tbz x13, #0, 130f\n"
+ "str s14, [x12, #0x0]\n"
+ "str s9, [x22, #0x0]\n"
+ "str s21, [x21, #0x0]\n"
"b 130f\n"
"128:" // Height 3: Partial direct writeback: partial_2_0
- "tbz x14, #1, 129f\n"
- "str d4, [x13], #0x8\n"
- "str d8, [x23], #0x8\n"
- "str d20, [x22], #0x8\n"
- "tbz x14, #0, 130f\n"
- "st1 { v4.s }[2], [x13]\n"
- "st1 { v8.s }[2], [x23]\n"
- "st1 { v20.s }[2], [x22]\n"
+ "tbz x13, #1, 129f\n"
+ "str d4, [x12], #0x8\n"
+ "str d8, [x22], #0x8\n"
+ "str d20, [x21], #0x8\n"
+ "tbz x13, #0, 130f\n"
+ "st1 { v4.s }[2], [x12]\n"
+ "st1 { v8.s }[2], [x22]\n"
+ "st1 { v20.s }[2], [x21]\n"
"b 130f\n"
"129:" // Height 3: Partial direct writeback: partial_1_0
- "str s4, [x13, #0x0]\n"
- "str s8, [x23, #0x0]\n"
- "str s20, [x22, #0x0]\n"
+ "str s4, [x12, #0x0]\n"
+ "str s8, [x22, #0x0]\n"
+ "str s20, [x21, #0x0]\n"
"130:" // Height 3: Partial direct writeback: Done
"b 132f\n"
"131:" // Height 3: Full writeback
- "str q4, [x13, #0x0]\n"
- "str q14, [x13, #0x10]\n"
- "str q15, [x13, #0x20]\n"
- "str q16, [x13, #0x30]\n"
- "str q17, [x13, #0x40]\n"
- "str q18, [x13, #0x50]\n"
- "add x13, x13, #0x60\n"
- "str q8, [x23, #0x0]\n"
- "str q9, [x23, #0x10]\n"
- "str q10, [x23, #0x20]\n"
- "str q11, [x23, #0x30]\n"
- "str q12, [x23, #0x40]\n"
- "str q13, [x23, #0x50]\n"
- "str q20, [x22, #0x0]\n"
- "str q21, [x22, #0x10]\n"
- "str q22, [x22, #0x20]\n"
- "str q23, [x22, #0x30]\n"
- "str q24, [x22, #0x40]\n"
- "str q25, [x22, #0x50]\n"
+ "str q4, [x12, #0x0]\n"
+ "str q14, [x12, #0x10]\n"
+ "str q15, [x12, #0x20]\n"
+ "str q16, [x12, #0x30]\n"
+ "str q17, [x12, #0x40]\n"
+ "str q18, [x12, #0x50]\n"
+ "add x12, x12, #0x60\n"
+ "str q8, [x22, #0x0]\n"
+ "str q9, [x22, #0x10]\n"
+ "str q10, [x22, #0x20]\n"
+ "str q11, [x22, #0x30]\n"
+ "str q12, [x22, #0x40]\n"
+ "str q13, [x22, #0x50]\n"
+ "str q20, [x21, #0x0]\n"
+ "str q21, [x21, #0x10]\n"
+ "str q22, [x21, #0x20]\n"
+ "str q23, [x21, #0x30]\n"
+ "str q24, [x21, #0x40]\n"
+ "str q25, [x21, #0x50]\n"
"132:" // Height 3: Writeback done
- "subs x14, x14, #0x18\n"
+ "subs x13, x13, #0x18\n"
"bgt 90b\n"
"b 178f\n"
"133:" // Height 4
- "ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x21, #0x10\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "mov x15, %x[bias]\n"
- "mov x13, %x[output_ptr]\n"
- "madd %x[output_ptr], x20, x21, %x[output_ptr]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x20, #0x10\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "mov x14, %x[bias]\n"
+ "mov x12, %x[output_ptr]\n"
+ "madd %x[output_ptr], x19, x20, %x[output_ptr]\n"
"134:" // Height 4: Column loop
- "ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
- "add x11, x12, x20, LSL #1\n"
- "add x10, x11, x20, LSL #1\n"
- "add x9, x10, x20, LSL #1\n"
- "add x28, x9, x20, LSL #1\n"
- "add x27, x28, x20, LSL #1\n"
- "add x20, x27, x20, LSL #1\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "cmp x14, #0x14\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #1\n"
+ "add x9, x10, x19, LSL #1\n"
+ "add x28, x9, x19, LSL #1\n"
+ "add x27, x28, x19, LSL #1\n"
+ "add x26, x27, x19, LSL #1\n"
+ "add x19, x26, x19, LSL #1\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "cmp x13, #0x14\n"
"bgt 135f\n"
- "cmp x14, #0x10\n"
- "mov x27, x12\n"
+ "cmp x13, #0x10\n"
+ "mov x26, x11\n"
"bgt 135f\n"
- "cmp x14, #0xc\n"
- "mov x28, x12\n"
+ "cmp x13, #0xc\n"
+ "mov x27, x11\n"
"bgt 135f\n"
- "cmp x14, #0x8\n"
- "mov x9, x12\n"
+ "cmp x13, #0x8\n"
+ "mov x28, x11\n"
"bgt 135f\n"
- "cmp x14, #0x4\n"
- "mov x10, x12\n"
+ "cmp x13, #0x4\n"
+ "mov x9, x11\n"
"bgt 135f\n"
- "mov x11, x12\n"
+ "mov x10, x11\n"
"135:" // Height 4: B setup done
- "cbz x15, 136f\n"
- "ldr q8, [x15, #0x0]\n"
- "ldr q9, [x15, #0x10]\n"
+ "cbz x14, 136f\n"
+ "ldr q8, [x14, #0x0]\n"
+ "ldr q9, [x14, #0x10]\n"
"zip2 v14.2d, v8.2d, v8.2d\n"
"zip1 v8.2d, v8.2d, v8.2d\n"
- "ldr q10, [x15, #0x20]\n"
- "ldr q11, [x15, #0x30]\n"
+ "ldr q10, [x14, #0x20]\n"
+ "ldr q11, [x14, #0x30]\n"
"zip2 v15.2d, v9.2d, v9.2d\n"
"zip1 v9.2d, v9.2d, v9.2d\n"
- "ldr q12, [x15, #0x40]\n"
- "ldr q13, [x15, #0x50]\n"
+ "ldr q12, [x14, #0x40]\n"
+ "ldr q13, [x14, #0x50]\n"
"zip2 v16.2d, v10.2d, v10.2d\n"
"zip1 v10.2d, v10.2d, v10.2d\n"
"zip2 v17.2d, v11.2d, v11.2d\n"
"zip1 v11.2d, v11.2d, v11.2d\n"
- "add x15, x15, #0x60\n"
+ "add x14, x14, #0x60\n"
"zip2 v18.2d, v12.2d, v12.2d\n"
"zip1 v12.2d, v12.2d, v12.2d\n"
"zip2 v19.2d, v13.2d, v13.2d\n"
@@ -1816,204 +1816,204 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 (
"b 152f\n"
"136:" // Height 4: no bias
"tbz %x[flags], #0, 151f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x13, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
- "cmp x14, #0x18\n"
- "add x21, x22, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x22, x12, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
+ "cmp x13, #0x18\n"
+ "add x20, x21, x19, LSL #2\n"
"bge 149f\n"
- "tbz x14, #4, 140f\n"
- "ld1 { v9.4s }, [x13], #0x10\n"
- "ld1 { v14.4s }, [x23], #0x10\n"
- "ld1 { v21.4s }, [x22], #0x10\n"
- "ld1 { v26.4s }, [x21], #0x10\n"
- "ld1 { v10.4s }, [x13], #0x10\n"
- "ld1 { v15.4s }, [x23], #0x10\n"
- "ld1 { v22.4s }, [x22], #0x10\n"
- "ld1 { v27.4s }, [x21], #0x10\n"
- "ld1 { v11.4s }, [x13], #0x10\n"
- "ld1 { v16.4s }, [x23], #0x10\n"
- "ld1 { v23.4s }, [x22], #0x10\n"
- "ld1 { v28.4s }, [x21], #0x10\n"
- "ld1 { v12.4s }, [x13], #0x10\n"
- "ld1 { v17.4s }, [x23], #0x10\n"
- "ld1 { v24.4s }, [x22], #0x10\n"
- "ld1 { v29.4s }, [x21], #0x10\n"
- "tbz x14, #2, 138f\n"
- "ld1 { v13.4s }, [x13], #0x10\n"
- "ld1 { v18.4s }, [x23], #0x10\n"
- "ld1 { v25.4s }, [x22], #0x10\n"
- "ld1 { v30.4s }, [x21], #0x10\n"
- "tbz x14, #1, 137f\n"
- "ldr d20, [x13], #0x8\n"
- "ldr d19, [x23], #0x8\n"
- "mov x20, #0x58\n"
- "ldr d4, [x22], #0x8\n"
- "ldr d31, [x21], #0x8\n"
- "tbz x14, #0, 148f\n"
- "ld1 { v20.s }[2], [x13]\n"
- "ld1 { v19.s }[2], [x23]\n"
- "ld1 { v4.s }[2], [x22]\n"
- "ld1 { v31.s }[2], [x21]\n"
+ "tbz x13, #4, 140f\n"
+ "ld1 { v9.4s }, [x12], #0x10\n"
+ "ld1 { v14.4s }, [x22], #0x10\n"
+ "ld1 { v21.4s }, [x21], #0x10\n"
+ "ld1 { v26.4s }, [x20], #0x10\n"
+ "ld1 { v10.4s }, [x12], #0x10\n"
+ "ld1 { v15.4s }, [x22], #0x10\n"
+ "ld1 { v22.4s }, [x21], #0x10\n"
+ "ld1 { v27.4s }, [x20], #0x10\n"
+ "ld1 { v11.4s }, [x12], #0x10\n"
+ "ld1 { v16.4s }, [x22], #0x10\n"
+ "ld1 { v23.4s }, [x21], #0x10\n"
+ "ld1 { v28.4s }, [x20], #0x10\n"
+ "ld1 { v12.4s }, [x12], #0x10\n"
+ "ld1 { v17.4s }, [x22], #0x10\n"
+ "ld1 { v24.4s }, [x21], #0x10\n"
+ "ld1 { v29.4s }, [x20], #0x10\n"
+ "tbz x13, #2, 138f\n"
+ "ld1 { v13.4s }, [x12], #0x10\n"
+ "ld1 { v18.4s }, [x22], #0x10\n"
+ "ld1 { v25.4s }, [x21], #0x10\n"
+ "ld1 { v30.4s }, [x20], #0x10\n"
+ "tbz x13, #1, 137f\n"
+ "ldr d20, [x12], #0x8\n"
+ "ldr d19, [x22], #0x8\n"
+ "mov x19, #0x58\n"
+ "ldr d4, [x21], #0x8\n"
+ "ldr d31, [x20], #0x8\n"
+ "tbz x13, #0, 148f\n"
+ "ld1 { v20.s }[2], [x12]\n"
+ "ld1 { v19.s }[2], [x22]\n"
+ "ld1 { v4.s }[2], [x21]\n"
+ "ld1 { v31.s }[2], [x20]\n"
"b 148f\n"
"137:" // Height 4: Partial accumulate: partial_1_20
- "mov x20, #0x50\n"
- "tbz x14, #0, 148f\n"
- "ldr s20, [x13, #0x0]\n"
- "ldr s19, [x23, #0x0]\n"
- "ldr s4, [x22, #0x0]\n"
- "ldr s31, [x21, #0x0]\n"
+ "mov x19, #0x50\n"
+ "tbz x13, #0, 148f\n"
+ "ldr s20, [x12, #0x0]\n"
+ "ldr s19, [x22, #0x0]\n"
+ "ldr s4, [x21, #0x0]\n"
+ "ldr s31, [x20, #0x0]\n"
"b 148f\n"
"138:" // Height 4: Partial accumulate: partial_2_16
- "tbz x14, #1, 139f\n"
- "ldr d13, [x13], #0x8\n"
- "ldr d18, [x23], #0x8\n"
- "mov x20, #0x48\n"
- "ldr d25, [x22], #0x8\n"
- "ldr d30, [x21], #0x8\n"
- "tbz x14, #0, 148f\n"
- "ld1 { v13.s }[2], [x13]\n"
- "ld1 { v18.s }[2], [x23]\n"
- "ld1 { v25.s }[2], [x22]\n"
- "ld1 { v30.s }[2], [x21]\n"
+ "tbz x13, #1, 139f\n"
+ "ldr d13, [x12], #0x8\n"
+ "ldr d18, [x22], #0x8\n"
+ "mov x19, #0x48\n"
+ "ldr d25, [x21], #0x8\n"
+ "ldr d30, [x20], #0x8\n"
+ "tbz x13, #0, 148f\n"
+ "ld1 { v13.s }[2], [x12]\n"
+ "ld1 { v18.s }[2], [x22]\n"
+ "ld1 { v25.s }[2], [x21]\n"
+ "ld1 { v30.s }[2], [x20]\n"
"b 148f\n"
"139:" // Height 4: Partial accumulate: partial_1_16
- "mov x20, #0x40\n"
- "tbz x14, #0, 148f\n"
- "ldr s13, [x13, #0x0]\n"
- "ldr s18, [x23, #0x0]\n"
- "ldr s25, [x22, #0x0]\n"
- "ldr s30, [x21, #0x0]\n"
+ "mov x19, #0x40\n"
+ "tbz x13, #0, 148f\n"
+ "ldr s13, [x12, #0x0]\n"
+ "ldr s18, [x22, #0x0]\n"
+ "ldr s25, [x21, #0x0]\n"
+ "ldr s30, [x20, #0x0]\n"
"b 148f\n"
"140:" // Height 4: Partial accumulate: partial_8_0
- "tbz x14, #3, 144f\n"
- "ld1 { v9.4s }, [x13], #0x10\n"
- "ld1 { v14.4s }, [x23], #0x10\n"
- "ld1 { v21.4s }, [x22], #0x10\n"
- "ld1 { v26.4s }, [x21], #0x10\n"
- "ld1 { v10.4s }, [x13], #0x10\n"
- "ld1 { v15.4s }, [x23], #0x10\n"
- "ld1 { v22.4s }, [x22], #0x10\n"
- "ld1 { v27.4s }, [x21], #0x10\n"
- "tbz x14, #2, 142f\n"
- "ld1 { v11.4s }, [x13], #0x10\n"
- "ld1 { v16.4s }, [x23], #0x10\n"
- "ld1 { v23.4s }, [x22], #0x10\n"
- "ld1 { v28.4s }, [x21], #0x10\n"
- "tbz x14, #1, 141f\n"
- "ldr d12, [x13], #0x8\n"
- "ldr d17, [x23], #0x8\n"
- "mov x20, #0x38\n"
- "ldr d24, [x22], #0x8\n"
- "ldr d29, [x21], #0x8\n"
- "tbz x14, #0, 148f\n"
- "ld1 { v12.s }[2], [x13]\n"
- "ld1 { v17.s }[2], [x23]\n"
- "ld1 { v24.s }[2], [x22]\n"
- "ld1 { v29.s }[2], [x21]\n"
+ "tbz x13, #3, 144f\n"
+ "ld1 { v9.4s }, [x12], #0x10\n"
+ "ld1 { v14.4s }, [x22], #0x10\n"
+ "ld1 { v21.4s }, [x21], #0x10\n"
+ "ld1 { v26.4s }, [x20], #0x10\n"
+ "ld1 { v10.4s }, [x12], #0x10\n"
+ "ld1 { v15.4s }, [x22], #0x10\n"
+ "ld1 { v22.4s }, [x21], #0x10\n"
+ "ld1 { v27.4s }, [x20], #0x10\n"
+ "tbz x13, #2, 142f\n"
+ "ld1 { v11.4s }, [x12], #0x10\n"
+ "ld1 { v16.4s }, [x22], #0x10\n"
+ "ld1 { v23.4s }, [x21], #0x10\n"
+ "ld1 { v28.4s }, [x20], #0x10\n"
+ "tbz x13, #1, 141f\n"
+ "ldr d12, [x12], #0x8\n"
+ "ldr d17, [x22], #0x8\n"
+ "mov x19, #0x38\n"
+ "ldr d24, [x21], #0x8\n"
+ "ldr d29, [x20], #0x8\n"
+ "tbz x13, #0, 148f\n"
+ "ld1 { v12.s }[2], [x12]\n"
+ "ld1 { v17.s }[2], [x22]\n"
+ "ld1 { v24.s }[2], [x21]\n"
+ "ld1 { v29.s }[2], [x20]\n"
"b 148f\n"
"141:" // Height 4: Partial accumulate: partial_1_12
- "mov x20, #0x30\n"
- "tbz x14, #0, 148f\n"
- "ldr s12, [x13, #0x0]\n"
- "ldr s17, [x23, #0x0]\n"
- "ldr s24, [x22, #0x0]\n"
- "ldr s29, [x21, #0x0]\n"
+ "mov x19, #0x30\n"
+ "tbz x13, #0, 148f\n"
+ "ldr s12, [x12, #0x0]\n"
+ "ldr s17, [x22, #0x0]\n"
+ "ldr s24, [x21, #0x0]\n"
+ "ldr s29, [x20, #0x0]\n"
"b 148f\n"
"142:" // Height 4: Partial accumulate: partial_2_8
- "tbz x14, #1, 143f\n"
- "ldr d11, [x13], #0x8\n"
- "ldr d16, [x23], #0x8\n"
- "mov x20, #0x28\n"
- "ldr d23, [x22], #0x8\n"
- "ldr d28, [x21], #0x8\n"
- "tbz x14, #0, 148f\n"
- "ld1 { v11.s }[2], [x13]\n"
- "ld1 { v16.s }[2], [x23]\n"
- "ld1 { v23.s }[2], [x22]\n"
- "ld1 { v28.s }[2], [x21]\n"
+ "tbz x13, #1, 143f\n"
+ "ldr d11, [x12], #0x8\n"
+ "ldr d16, [x22], #0x8\n"
+ "mov x19, #0x28\n"
+ "ldr d23, [x21], #0x8\n"
+ "ldr d28, [x20], #0x8\n"
+ "tbz x13, #0, 148f\n"
+ "ld1 { v11.s }[2], [x12]\n"
+ "ld1 { v16.s }[2], [x22]\n"
+ "ld1 { v23.s }[2], [x21]\n"
+ "ld1 { v28.s }[2], [x20]\n"
"b 148f\n"
"143:" // Height 4: Partial accumulate: partial_1_8
- "mov x20, #0x20\n"
- "tbz x14, #0, 148f\n"
- "ldr s11, [x13, #0x0]\n"
- "ldr s16, [x23, #0x0]\n"
- "ldr s23, [x22, #0x0]\n"
- "ldr s28, [x21, #0x0]\n"
+ "mov x19, #0x20\n"
+ "tbz x13, #0, 148f\n"
+ "ldr s11, [x12, #0x0]\n"
+ "ldr s16, [x22, #0x0]\n"
+ "ldr s23, [x21, #0x0]\n"
+ "ldr s28, [x20, #0x0]\n"
"b 148f\n"
"144:" // Height 4: Partial accumulate: partial_4_0
- "tbz x14, #2, 146f\n"
- "ld1 { v9.4s }, [x13], #0x10\n"
- "ld1 { v14.4s }, [x23], #0x10\n"
- "ld1 { v21.4s }, [x22], #0x10\n"
- "ld1 { v26.4s }, [x21], #0x10\n"
- "tbz x14, #1, 145f\n"
- "ldr d10, [x13], #0x8\n"
- "ldr d15, [x23], #0x8\n"
- "mov x20, #0x18\n"
- "ldr d22, [x22], #0x8\n"
- "ldr d27, [x21], #0x8\n"
- "tbz x14, #0, 148f\n"
- "ld1 { v10.s }[2], [x13]\n"
- "ld1 { v15.s }[2], [x23]\n"
- "ld1 { v22.s }[2], [x22]\n"
- "ld1 { v27.s }[2], [x21]\n"
+ "tbz x13, #2, 146f\n"
+ "ld1 { v9.4s }, [x12], #0x10\n"
+ "ld1 { v14.4s }, [x22], #0x10\n"
+ "ld1 { v21.4s }, [x21], #0x10\n"
+ "ld1 { v26.4s }, [x20], #0x10\n"
+ "tbz x13, #1, 145f\n"
+ "ldr d10, [x12], #0x8\n"
+ "ldr d15, [x22], #0x8\n"
+ "mov x19, #0x18\n"
+ "ldr d22, [x21], #0x8\n"
+ "ldr d27, [x20], #0x8\n"
+ "tbz x13, #0, 148f\n"
+ "ld1 { v10.s }[2], [x12]\n"
+ "ld1 { v15.s }[2], [x22]\n"
+ "ld1 { v22.s }[2], [x21]\n"
+ "ld1 { v27.s }[2], [x20]\n"
"b 148f\n"
"145:" // Height 4: Partial accumulate: partial_1_4
- "mov x20, #0x10\n"
- "tbz x14, #0, 148f\n"
- "ldr s10, [x13, #0x0]\n"
- "ldr s15, [x23, #0x0]\n"
- "ldr s22, [x22, #0x0]\n"
- "ldr s27, [x21, #0x0]\n"
+ "mov x19, #0x10\n"
+ "tbz x13, #0, 148f\n"
+ "ldr s10, [x12, #0x0]\n"
+ "ldr s15, [x22, #0x0]\n"
+ "ldr s22, [x21, #0x0]\n"
+ "ldr s27, [x20, #0x0]\n"
"b 148f\n"
"146:" // Height 4: Partial accumulate: partial_2_0
- "tbz x14, #1, 147f\n"
- "ldr d9, [x13], #0x8\n"
- "ldr d14, [x23], #0x8\n"
- "mov x20, #0x8\n"
- "ldr d21, [x22], #0x8\n"
- "ldr d26, [x21], #0x8\n"
- "tbz x14, #0, 148f\n"
- "ld1 { v9.s }[2], [x13]\n"
- "ld1 { v14.s }[2], [x23]\n"
- "ld1 { v21.s }[2], [x22]\n"
- "ld1 { v26.s }[2], [x21]\n"
+ "tbz x13, #1, 147f\n"
+ "ldr d9, [x12], #0x8\n"
+ "ldr d14, [x22], #0x8\n"
+ "mov x19, #0x8\n"
+ "ldr d21, [x21], #0x8\n"
+ "ldr d26, [x20], #0x8\n"
+ "tbz x13, #0, 148f\n"
+ "ld1 { v9.s }[2], [x12]\n"
+ "ld1 { v14.s }[2], [x22]\n"
+ "ld1 { v21.s }[2], [x21]\n"
+ "ld1 { v26.s }[2], [x20]\n"
"b 148f\n"
"147:" // Height 4: Partial accumulate: partial_1_0
- "ldr s9, [x13, #0x0]\n"
- "ldr s14, [x23, #0x0]\n"
- "mov x20, #0x0\n"
- "ldr s21, [x22, #0x0]\n"
- "ldr s26, [x21, #0x0]\n"
+ "ldr s9, [x12, #0x0]\n"
+ "ldr s14, [x22, #0x0]\n"
+ "mov x19, #0x0\n"
+ "ldr s21, [x21, #0x0]\n"
+ "ldr s26, [x20, #0x0]\n"
"148:" // Height 4: Partial accumulate: Done
- "sub x13, x13, x20\n"
+ "sub x12, x12, x19\n"
"b 150f\n"
"149:" // Height 4: full accumulate
- "ldr q9, [x13, #0x0]\n"
- "ldr q10, [x13, #0x10]\n"
- "ldr q11, [x13, #0x20]\n"
- "ldr q12, [x13, #0x30]\n"
- "ldr q13, [x13, #0x40]\n"
- "ldr q20, [x13, #0x50]\n"
- "ldr q14, [x23, #0x0]\n"
- "ldr q15, [x23, #0x10]\n"
- "ldr q16, [x23, #0x20]\n"
- "ldr q17, [x23, #0x30]\n"
- "ldr q18, [x23, #0x40]\n"
- "ldr q19, [x23, #0x50]\n"
- "ldr q21, [x22, #0x0]\n"
- "ldr q22, [x22, #0x10]\n"
- "ldr q23, [x22, #0x20]\n"
- "ldr q24, [x22, #0x30]\n"
- "ldr q25, [x22, #0x40]\n"
- "ldr q4, [x22, #0x50]\n"
- "ldr q26, [x21, #0x0]\n"
- "ldr q27, [x21, #0x10]\n"
- "ldr q28, [x21, #0x20]\n"
- "ldr q29, [x21, #0x30]\n"
- "ldr q30, [x21, #0x40]\n"
- "ldr q31, [x21, #0x50]\n"
+ "ldr q9, [x12, #0x0]\n"
+ "ldr q10, [x12, #0x10]\n"
+ "ldr q11, [x12, #0x20]\n"
+ "ldr q12, [x12, #0x30]\n"
+ "ldr q13, [x12, #0x40]\n"
+ "ldr q20, [x12, #0x50]\n"
+ "ldr q14, [x22, #0x0]\n"
+ "ldr q15, [x22, #0x10]\n"
+ "ldr q16, [x22, #0x20]\n"
+ "ldr q17, [x22, #0x30]\n"
+ "ldr q18, [x22, #0x40]\n"
+ "ldr q19, [x22, #0x50]\n"
+ "ldr q21, [x21, #0x0]\n"
+ "ldr q22, [x21, #0x10]\n"
+ "ldr q23, [x21, #0x20]\n"
+ "ldr q24, [x21, #0x30]\n"
+ "ldr q25, [x21, #0x40]\n"
+ "ldr q4, [x21, #0x50]\n"
+ "ldr q26, [x20, #0x0]\n"
+ "ldr q27, [x20, #0x10]\n"
+ "ldr q28, [x20, #0x20]\n"
+ "ldr q29, [x20, #0x30]\n"
+ "ldr q30, [x20, #0x40]\n"
+ "ldr q31, [x20, #0x50]\n"
"150:" // Height 4: MMLA fixup
"zip1 v8.2d, v9.2d, v14.2d\n"
"zip2 v14.2d, v9.2d, v14.2d\n"
@@ -2066,133 +2066,133 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 (
"movi v30.16b, #0x0\n"
"movi v31.16b, #0x0\n"
"152:" // Height 4: setup done
- "mov x26, #0x0\n"
+ "mov x25, #0x0\n"
"153:" // Height 4: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w24, [x19, x25, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 154f\n"
- "ldr x21, [%x[input_ptr], x26, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x24, [x21, #0x0]\n"
- "ldr x23, [x21, #0x8]\n"
- "ldr x22, [x21, #0x10]\n"
- "ldr x21, [x21, #0x18]\n"
- "cbnz x26, 155f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x24, x24, x20, LSL #2\n"
- "add x23, x23, x20, LSL #2\n"
- "add x22, x22, x20, LSL #2\n"
- "add x21, x21, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x25, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x23, [x20, #0x0]\n"
+ "ldr x22, [x20, #0x8]\n"
+ "ldr x21, [x20, #0x10]\n"
+ "ldr x20, [x20, #0x18]\n"
+ "cbnz x25, 155f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x23, x23, x19, LSL #2\n"
+ "add x22, x22, x19, LSL #2\n"
+ "add x21, x21, x19, LSL #2\n"
+ "add x20, x20, x19, LSL #2\n"
"b 155f\n"
"154:" // Height 4: setup direct input
- "mov x24, %x[input_ptr]\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
+ "mov x23, %x[input_ptr]\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
+ "add x20, x21, x19, LSL #2\n"
"155:" // Height 4: input setup done
- "cmp x25, #0x4\n"
+ "cmp x24, #0x4\n"
"blt 158f\n"
- "ld1 { v0.4s }, [x24], #0x10\n"
- "ld1 { v2.4s }, [x22], #0x10\n"
- "cmp x25, #0x8\n"
- "ld1 { v1.4s }, [x23], #0x10\n"
- "ld1 { v3.4s }, [x21], #0x10\n"
- "ldr q4, [x12, #0x0]\n"
- "ldr q5, [x12, #0x10]\n"
- "ldr q6, [x11, #0x0]\n"
- "ldr q7, [x11, #0x10]\n"
+ "ld1 { v0.4s }, [x23], #0x10\n"
+ "ld1 { v2.4s }, [x21], #0x10\n"
+ "cmp x24, #0x8\n"
+ "ld1 { v1.4s }, [x22], #0x10\n"
+ "ld1 { v3.4s }, [x20], #0x10\n"
+ "ldr q4, [x11, #0x0]\n"
+ "ldr q5, [x11, #0x10]\n"
+ "ldr q6, [x10, #0x0]\n"
+ "ldr q7, [x10, #0x10]\n"
"blt 157f\n"
"156:" // Height 4: Multiply loop: Main loop head
".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n"
".inst 0x0ea16842 // bfcvtn v2.4h, v2.4s\n"
- "sub x25, x25, #0x4\n"
- "cmp x25, #0x8\n"
+ "sub x24, x24, #0x4\n"
+ "cmp x24, #0x8\n"
".inst 0x4ea16820 // bfcvtn2 v0.8h, v1.4s\n"
- "ld1 { v1.4s }, [x23], #0x10\n"
+ "ld1 { v1.4s }, [x22], #0x10\n"
".inst 0x4ea16862 // bfcvtn2 v2.8h, v3.4s\n"
- "ld1 { v3.4s }, [x21], #0x10\n"
+ "ld1 { v3.4s }, [x20], #0x10\n"
".inst 0x6e44ec08 // bfmmla v8.4s, v0.8h, v4.8h\n"
".inst 0x6e44ec54 // bfmmla v20.4s, v2.8h, v4.8h\n"
- "ldr q4, [x10, #0x0]\n"
- "add x12, x12, #0x20\n"
+ "ldr q4, [x9, #0x0]\n"
+ "add x11, x11, #0x20\n"
".inst 0x6e45ec0e // bfmmla v14.4s, v0.8h, v5.8h\n"
".inst 0x6e45ec5a // bfmmla v26.4s, v2.8h, v5.8h\n"
- "ldr q5, [x10, #0x10]\n"
- "add x11, x11, #0x20\n"
+ "ldr q5, [x9, #0x10]\n"
+ "add x10, x10, #0x20\n"
".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec55 // bfmmla v21.4s, v2.8h, v6.8h\n"
- "ldr q6, [x9, #0x0]\n"
- "add x10, x10, #0x20\n"
+ "ldr q6, [x28, #0x0]\n"
+ "add x9, x9, #0x20\n"
".inst 0x6e47ec0f // bfmmla v15.4s, v0.8h, v7.8h\n"
".inst 0x6e47ec5b // bfmmla v27.4s, v2.8h, v7.8h\n"
- "ldr q7, [x9, #0x10]\n"
- "add x9, x9, #0x20\n"
+ "ldr q7, [x28, #0x10]\n"
+ "add x28, x28, #0x20\n"
".inst 0x6e44ec0a // bfmmla v10.4s, v0.8h, v4.8h\n"
".inst 0x6e44ec56 // bfmmla v22.4s, v2.8h, v4.8h\n"
- "ldr q4, [x28, #0x0]\n"
+ "ldr q4, [x27, #0x0]\n"
".inst 0x6e45ec10 // bfmmla v16.4s, v0.8h, v5.8h\n"
".inst 0x6e45ec5c // bfmmla v28.4s, v2.8h, v5.8h\n"
- "ldr q5, [x28, #0x10]\n"
- "add x28, x28, #0x20\n"
+ "ldr q5, [x27, #0x10]\n"
+ "add x27, x27, #0x20\n"
".inst 0x6e46ec0b // bfmmla v11.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec57 // bfmmla v23.4s, v2.8h, v6.8h\n"
- "ldr q6, [x27, #0x0]\n"
+ "ldr q6, [x26, #0x0]\n"
".inst 0x6e47ec11 // bfmmla v17.4s, v0.8h, v7.8h\n"
".inst 0x6e47ec5d // bfmmla v29.4s, v2.8h, v7.8h\n"
- "ldr q7, [x27, #0x10]\n"
- "add x27, x27, #0x20\n"
+ "ldr q7, [x26, #0x10]\n"
+ "add x26, x26, #0x20\n"
".inst 0x6e44ec0c // bfmmla v12.4s, v0.8h, v4.8h\n"
".inst 0x6e44ec58 // bfmmla v24.4s, v2.8h, v4.8h\n"
- "ldr q4, [x12, #0x0]\n"
+ "ldr q4, [x11, #0x0]\n"
".inst 0x6e45ec12 // bfmmla v18.4s, v0.8h, v5.8h\n"
".inst 0x6e45ec5e // bfmmla v30.4s, v2.8h, v5.8h\n"
- "ldr q5, [x12, #0x10]\n"
+ "ldr q5, [x11, #0x10]\n"
".inst 0x6e46ec0d // bfmmla v13.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec59 // bfmmla v25.4s, v2.8h, v6.8h\n"
- "ldr q6, [x11, #0x0]\n"
+ "ldr q6, [x10, #0x0]\n"
".inst 0x6e47ec13 // bfmmla v19.4s, v0.8h, v7.8h\n"
- "ld1 { v0.4s }, [x24], #0x10\n"
+ "ld1 { v0.4s }, [x23], #0x10\n"
".inst 0x6e47ec5f // bfmmla v31.4s, v2.8h, v7.8h\n"
- "ld1 { v2.4s }, [x22], #0x10\n"
- "ldr q7, [x11, #0x10]\n"
+ "ld1 { v2.4s }, [x21], #0x10\n"
+ "ldr q7, [x10, #0x10]\n"
"bge 156b\n"
"157:" // Height 4: Multiply loop: Single iteration only
".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n"
".inst 0x0ea16842 // bfcvtn v2.4h, v2.4s\n"
- "sub x25, x25, #0x4\n"
- "add x12, x12, #0x20\n"
+ "sub x24, x24, #0x4\n"
+ "add x11, x11, #0x20\n"
".inst 0x4ea16820 // bfcvtn2 v0.8h, v1.4s\n"
".inst 0x4ea16862 // bfcvtn2 v2.8h, v3.4s\n"
".inst 0x6e44ec08 // bfmmla v8.4s, v0.8h, v4.8h\n"
- "add x11, x11, #0x20\n"
+ "add x10, x10, #0x20\n"
".inst 0x6e44ec54 // bfmmla v20.4s, v2.8h, v4.8h\n"
- "ldr q4, [x10, #0x0]\n"
+ "ldr q4, [x9, #0x0]\n"
".inst 0x6e45ec0e // bfmmla v14.4s, v0.8h, v5.8h\n"
".inst 0x6e45ec5a // bfmmla v26.4s, v2.8h, v5.8h\n"
- "ldr q5, [x10, #0x10]\n"
+ "ldr q5, [x9, #0x10]\n"
".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n"
- "add x10, x10, #0x20\n"
+ "add x9, x9, #0x20\n"
".inst 0x6e46ec55 // bfmmla v21.4s, v2.8h, v6.8h\n"
- "ldr q6, [x9, #0x0]\n"
+ "ldr q6, [x28, #0x0]\n"
".inst 0x6e47ec0f // bfmmla v15.4s, v0.8h, v7.8h\n"
".inst 0x6e47ec5b // bfmmla v27.4s, v2.8h, v7.8h\n"
- "ldr q7, [x9, #0x10]\n"
+ "ldr q7, [x28, #0x10]\n"
".inst 0x6e44ec0a // bfmmla v10.4s, v0.8h, v4.8h\n"
- "add x9, x9, #0x20\n"
+ "add x28, x28, #0x20\n"
".inst 0x6e44ec56 // bfmmla v22.4s, v2.8h, v4.8h\n"
- "ldr q4, [x28, #0x0]\n"
+ "ldr q4, [x27, #0x0]\n"
".inst 0x6e45ec10 // bfmmla v16.4s, v0.8h, v5.8h\n"
".inst 0x6e45ec5c // bfmmla v28.4s, v2.8h, v5.8h\n"
- "ldr q5, [x28, #0x10]\n"
+ "ldr q5, [x27, #0x10]\n"
".inst 0x6e46ec0b // bfmmla v11.4s, v0.8h, v6.8h\n"
- "add x28, x28, #0x20\n"
+ "add x27, x27, #0x20\n"
".inst 0x6e46ec57 // bfmmla v23.4s, v2.8h, v6.8h\n"
- "ldr q6, [x27, #0x0]\n"
+ "ldr q6, [x26, #0x0]\n"
".inst 0x6e47ec11 // bfmmla v17.4s, v0.8h, v7.8h\n"
".inst 0x6e47ec5d // bfmmla v29.4s, v2.8h, v7.8h\n"
- "ldr q7, [x27, #0x10]\n"
- "add x27, x27, #0x20\n"
+ "ldr q7, [x26, #0x10]\n"
+ "add x26, x26, #0x20\n"
".inst 0x6e44ec0c // bfmmla v12.4s, v0.8h, v4.8h\n"
".inst 0x6e44ec58 // bfmmla v24.4s, v2.8h, v4.8h\n"
".inst 0x6e45ec12 // bfmmla v18.4s, v0.8h, v5.8h\n"
@@ -2202,63 +2202,63 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 (
".inst 0x6e47ec13 // bfmmla v19.4s, v0.8h, v7.8h\n"
".inst 0x6e47ec5f // bfmmla v31.4s, v2.8h, v7.8h\n"
"158:" // Height 4: Multiply loop: Main loop skip
- "cbz x25, 161f\n"
- "cbz x25, 161f\n"
- "tbz x25, #1, 159f\n"
- "ldr d0, [x24], #0x8\n"
- "ldr d1, [x23], #0x8\n"
- "ldr d2, [x22], #0x8\n"
- "ldr d3, [x21], #0x8\n"
- "tbz x25, #0, 160f\n"
- "ld1 { v0.s }[2], [x24]\n"
- "ld1 { v1.s }[2], [x23]\n"
- "ld1 { v2.s }[2], [x22]\n"
- "ld1 { v3.s }[2], [x21]\n"
+ "cbz x24, 161f\n"
+ "cbz x24, 161f\n"
+ "tbz x24, #1, 159f\n"
+ "ldr d0, [x23], #0x8\n"
+ "ldr d1, [x22], #0x8\n"
+ "ldr d2, [x21], #0x8\n"
+ "ldr d3, [x20], #0x8\n"
+ "tbz x24, #0, 160f\n"
+ "ld1 { v0.s }[2], [x23]\n"
+ "ld1 { v1.s }[2], [x22]\n"
+ "ld1 { v2.s }[2], [x21]\n"
+ "ld1 { v3.s }[2], [x20]\n"
"b 160f\n"
"159:" // Height 4: Multiply loop: Ragged operand read: partial_1_0
- "ldr s0, [x24, #0x0]\n"
- "ldr s1, [x23, #0x0]\n"
- "ldr s2, [x22, #0x0]\n"
- "ldr s3, [x21, #0x0]\n"
+ "ldr s0, [x23, #0x0]\n"
+ "ldr s1, [x22, #0x0]\n"
+ "ldr s2, [x21, #0x0]\n"
+ "ldr s3, [x20, #0x0]\n"
"160:" // Height 4: Multiply loop: Ragged operand read: Done
- "ldr q4, [x12, #0x0]\n"
- "ldr q5, [x12, #0x10]\n"
+ "ldr q4, [x11, #0x0]\n"
+ "ldr q5, [x11, #0x10]\n"
".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n"
".inst 0x0ea16842 // bfcvtn v2.4h, v2.4s\n"
- "ldr q6, [x11, #0x0]\n"
- "ldr q7, [x11, #0x10]\n"
+ "ldr q6, [x10, #0x0]\n"
+ "ldr q7, [x10, #0x10]\n"
".inst 0x4ea16820 // bfcvtn2 v0.8h, v1.4s\n"
".inst 0x4ea16862 // bfcvtn2 v2.8h, v3.4s\n"
".inst 0x6e44ec08 // bfmmla v8.4s, v0.8h, v4.8h\n"
".inst 0x6e44ec54 // bfmmla v20.4s, v2.8h, v4.8h\n"
- "ldr q4, [x10, #0x0]\n"
- "add x12, x12, #0x20\n"
+ "ldr q4, [x9, #0x0]\n"
+ "add x11, x11, #0x20\n"
".inst 0x6e45ec0e // bfmmla v14.4s, v0.8h, v5.8h\n"
".inst 0x6e45ec5a // bfmmla v26.4s, v2.8h, v5.8h\n"
- "ldr q5, [x10, #0x10]\n"
- "add x11, x11, #0x20\n"
+ "ldr q5, [x9, #0x10]\n"
+ "add x10, x10, #0x20\n"
".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec55 // bfmmla v21.4s, v2.8h, v6.8h\n"
- "ldr q6, [x9, #0x0]\n"
- "add x10, x10, #0x20\n"
+ "ldr q6, [x28, #0x0]\n"
+ "add x9, x9, #0x20\n"
".inst 0x6e47ec0f // bfmmla v15.4s, v0.8h, v7.8h\n"
".inst 0x6e47ec5b // bfmmla v27.4s, v2.8h, v7.8h\n"
- "ldr q7, [x9, #0x10]\n"
- "add x9, x9, #0x20\n"
+ "ldr q7, [x28, #0x10]\n"
+ "add x28, x28, #0x20\n"
".inst 0x6e44ec0a // bfmmla v10.4s, v0.8h, v4.8h\n"
".inst 0x6e44ec56 // bfmmla v22.4s, v2.8h, v4.8h\n"
- "ldr q4, [x28, #0x0]\n"
+ "ldr q4, [x27, #0x0]\n"
".inst 0x6e45ec10 // bfmmla v16.4s, v0.8h, v5.8h\n"
".inst 0x6e45ec5c // bfmmla v28.4s, v2.8h, v5.8h\n"
- "ldr q5, [x28, #0x10]\n"
- "add x28, x28, #0x20\n"
+ "ldr q5, [x27, #0x10]\n"
+ "add x27, x27, #0x20\n"
".inst 0x6e46ec0b // bfmmla v11.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec57 // bfmmla v23.4s, v2.8h, v6.8h\n"
- "ldr q6, [x27, #0x0]\n"
+ "ldr q6, [x26, #0x0]\n"
".inst 0x6e47ec11 // bfmmla v17.4s, v0.8h, v7.8h\n"
".inst 0x6e47ec5d // bfmmla v29.4s, v2.8h, v7.8h\n"
- "ldr q7, [x27, #0x10]\n"
- "add x27, x27, #0x20\n"
+ "ldr q7, [x26, #0x10]\n"
+ "add x26, x26, #0x20\n"
".inst 0x6e44ec0c // bfmmla v12.4s, v0.8h, v4.8h\n"
".inst 0x6e44ec58 // bfmmla v24.4s, v2.8h, v4.8h\n"
".inst 0x6e45ec12 // bfmmla v18.4s, v0.8h, v5.8h\n"
@@ -2268,17 +2268,17 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 (
".inst 0x6e47ec13 // bfmmla v19.4s, v0.8h, v7.8h\n"
".inst 0x6e47ec5f // bfmmla v31.4s, v2.8h, v7.8h\n"
"161:" // Height 4: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x26, x26, #0x1\n"
- "cmp x26, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x25, x25, #0x1\n"
+ "cmp x25, x19\n"
"bne 153b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x13, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x22, x12, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
"uzp1 v4.2d, v8.2d, v14.2d\n"
"uzp2 v8.2d, v8.2d, v14.2d\n"
"uzp1 v14.2d, v9.2d, v15.2d\n"
- "add x21, x22, x20, LSL #2\n"
+ "add x20, x21, x19, LSL #2\n"
"uzp2 v9.2d, v9.2d, v15.2d\n"
"uzp1 v15.2d, v10.2d, v16.2d\n"
"uzp2 v10.2d, v10.2d, v16.2d\n"
@@ -2301,10 +2301,10 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 (
"uzp1 v30.2d, v25.2d, v31.2d\n"
"uzp2 v25.2d, v25.2d, v31.2d\n"
"tbz %x[flags], #1, 162f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v1.4s }, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1r { v0.4s }, [x20]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v1.4s }, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v0.4s }, [x19]\n"
"fmin v4.4s, v4.4s, v1.4s\n"
"fmin v14.4s, v14.4s, v1.4s\n"
"fmin v15.4s, v15.4s, v1.4s\n"
@@ -2354,206 +2354,206 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 (
"fmax v24.4s, v24.4s, v0.4s\n"
"fmax v25.4s, v25.4s, v0.4s\n"
"162:" // Height 4: No activation
- "cmp x14, #0x18\n"
+ "cmp x13, #0x18\n"
"bge 175f\n"
- "tbz x14, #4, 166f\n"
- "st1 { v4.4s }, [x13], #0x10\n"
- "st1 { v14.4s }, [x13], #0x10\n"
- "st1 { v15.4s }, [x13], #0x10\n"
- "st1 { v16.4s }, [x13], #0x10\n"
- "st1 { v8.4s }, [x23], #0x10\n"
- "st1 { v9.4s }, [x23], #0x10\n"
- "st1 { v10.4s }, [x23], #0x10\n"
- "st1 { v11.4s }, [x23], #0x10\n"
- "st1 { v19.4s }, [x22], #0x10\n"
- "st1 { v26.4s }, [x22], #0x10\n"
- "st1 { v27.4s }, [x22], #0x10\n"
- "st1 { v28.4s }, [x22], #0x10\n"
- "st1 { v20.4s }, [x21], #0x10\n"
- "st1 { v21.4s }, [x21], #0x10\n"
- "st1 { v22.4s }, [x21], #0x10\n"
- "st1 { v23.4s }, [x21], #0x10\n"
- "tbz x14, #2, 164f\n"
- "st1 { v17.4s }, [x13], #0x10\n"
- "st1 { v12.4s }, [x23], #0x10\n"
- "st1 { v29.4s }, [x22], #0x10\n"
- "st1 { v24.4s }, [x21], #0x10\n"
- "tbz x14, #1, 163f\n"
- "str d18, [x13], #0x8\n"
- "str d13, [x23], #0x8\n"
- "str d30, [x22], #0x8\n"
- "str d25, [x21], #0x8\n"
- "tbz x14, #0, 174f\n"
- "st1 { v18.s }[2], [x13]\n"
- "st1 { v13.s }[2], [x23]\n"
- "st1 { v30.s }[2], [x22]\n"
- "st1 { v25.s }[2], [x21]\n"
+ "tbz x13, #4, 166f\n"
+ "st1 { v4.4s }, [x12], #0x10\n"
+ "st1 { v14.4s }, [x12], #0x10\n"
+ "st1 { v15.4s }, [x12], #0x10\n"
+ "st1 { v16.4s }, [x12], #0x10\n"
+ "st1 { v8.4s }, [x22], #0x10\n"
+ "st1 { v9.4s }, [x22], #0x10\n"
+ "st1 { v10.4s }, [x22], #0x10\n"
+ "st1 { v11.4s }, [x22], #0x10\n"
+ "st1 { v19.4s }, [x21], #0x10\n"
+ "st1 { v26.4s }, [x21], #0x10\n"
+ "st1 { v27.4s }, [x21], #0x10\n"
+ "st1 { v28.4s }, [x21], #0x10\n"
+ "st1 { v20.4s }, [x20], #0x10\n"
+ "st1 { v21.4s }, [x20], #0x10\n"
+ "st1 { v22.4s }, [x20], #0x10\n"
+ "st1 { v23.4s }, [x20], #0x10\n"
+ "tbz x13, #2, 164f\n"
+ "st1 { v17.4s }, [x12], #0x10\n"
+ "st1 { v12.4s }, [x22], #0x10\n"
+ "st1 { v29.4s }, [x21], #0x10\n"
+ "st1 { v24.4s }, [x20], #0x10\n"
+ "tbz x13, #1, 163f\n"
+ "str d18, [x12], #0x8\n"
+ "str d13, [x22], #0x8\n"
+ "str d30, [x21], #0x8\n"
+ "str d25, [x20], #0x8\n"
+ "tbz x13, #0, 174f\n"
+ "st1 { v18.s }[2], [x12]\n"
+ "st1 { v13.s }[2], [x22]\n"
+ "st1 { v30.s }[2], [x21]\n"
+ "st1 { v25.s }[2], [x20]\n"
"b 174f\n"
"163:" // Height 4: Partial direct writeback: partial_1_20
- "tbz x14, #0, 174f\n"
- "str s18, [x13, #0x0]\n"
- "str s13, [x23, #0x0]\n"
- "str s30, [x22, #0x0]\n"
- "str s25, [x21, #0x0]\n"
+ "tbz x13, #0, 174f\n"
+ "str s18, [x12, #0x0]\n"
+ "str s13, [x22, #0x0]\n"
+ "str s30, [x21, #0x0]\n"
+ "str s25, [x20, #0x0]\n"
"b 174f\n"
"164:" // Height 4: Partial direct writeback: partial_2_16
- "tbz x14, #1, 165f\n"
- "str d17, [x13], #0x8\n"
- "str d12, [x23], #0x8\n"
- "str d29, [x22], #0x8\n"
- "str d24, [x21], #0x8\n"
- "tbz x14, #0, 174f\n"
- "st1 { v17.s }[2], [x13]\n"
- "st1 { v12.s }[2], [x23]\n"
- "st1 { v29.s }[2], [x22]\n"
- "st1 { v24.s }[2], [x21]\n"
+ "tbz x13, #1, 165f\n"
+ "str d17, [x12], #0x8\n"
+ "str d12, [x22], #0x8\n"
+ "str d29, [x21], #0x8\n"
+ "str d24, [x20], #0x8\n"
+ "tbz x13, #0, 174f\n"
+ "st1 { v17.s }[2], [x12]\n"
+ "st1 { v12.s }[2], [x22]\n"
+ "st1 { v29.s }[2], [x21]\n"
+ "st1 { v24.s }[2], [x20]\n"
"b 174f\n"
"165:" // Height 4: Partial direct writeback: partial_1_16
- "tbz x14, #0, 174f\n"
- "str s17, [x13, #0x0]\n"
- "str s12, [x23, #0x0]\n"
- "str s29, [x22, #0x0]\n"
- "str s24, [x21, #0x0]\n"
+ "tbz x13, #0, 174f\n"
+ "str s17, [x12, #0x0]\n"
+ "str s12, [x22, #0x0]\n"
+ "str s29, [x21, #0x0]\n"
+ "str s24, [x20, #0x0]\n"
"b 174f\n"
"166:" // Height 4: Partial direct writeback: partial_8_0
- "tbz x14, #3, 170f\n"
- "st1 { v4.4s }, [x13], #0x10\n"
- "st1 { v14.4s }, [x13], #0x10\n"
- "st1 { v8.4s }, [x23], #0x10\n"
- "st1 { v9.4s }, [x23], #0x10\n"
- "st1 { v19.4s }, [x22], #0x10\n"
- "st1 { v26.4s }, [x22], #0x10\n"
- "st1 { v20.4s }, [x21], #0x10\n"
- "st1 { v21.4s }, [x21], #0x10\n"
- "tbz x14, #2, 168f\n"
- "st1 { v15.4s }, [x13], #0x10\n"
- "st1 { v10.4s }, [x23], #0x10\n"
- "st1 { v27.4s }, [x22], #0x10\n"
- "st1 { v22.4s }, [x21], #0x10\n"
- "tbz x14, #1, 167f\n"
- "str d16, [x13], #0x8\n"
- "str d11, [x23], #0x8\n"
- "str d28, [x22], #0x8\n"
- "str d23, [x21], #0x8\n"
- "tbz x14, #0, 174f\n"
- "st1 { v16.s }[2], [x13]\n"
- "st1 { v11.s }[2], [x23]\n"
- "st1 { v28.s }[2], [x22]\n"
- "st1 { v23.s }[2], [x21]\n"
+ "tbz x13, #3, 170f\n"
+ "st1 { v4.4s }, [x12], #0x10\n"
+ "st1 { v14.4s }, [x12], #0x10\n"
+ "st1 { v8.4s }, [x22], #0x10\n"
+ "st1 { v9.4s }, [x22], #0x10\n"
+ "st1 { v19.4s }, [x21], #0x10\n"
+ "st1 { v26.4s }, [x21], #0x10\n"
+ "st1 { v20.4s }, [x20], #0x10\n"
+ "st1 { v21.4s }, [x20], #0x10\n"
+ "tbz x13, #2, 168f\n"
+ "st1 { v15.4s }, [x12], #0x10\n"
+ "st1 { v10.4s }, [x22], #0x10\n"
+ "st1 { v27.4s }, [x21], #0x10\n"
+ "st1 { v22.4s }, [x20], #0x10\n"
+ "tbz x13, #1, 167f\n"
+ "str d16, [x12], #0x8\n"
+ "str d11, [x22], #0x8\n"
+ "str d28, [x21], #0x8\n"
+ "str d23, [x20], #0x8\n"
+ "tbz x13, #0, 174f\n"
+ "st1 { v16.s }[2], [x12]\n"
+ "st1 { v11.s }[2], [x22]\n"
+ "st1 { v28.s }[2], [x21]\n"
+ "st1 { v23.s }[2], [x20]\n"
"b 174f\n"
"167:" // Height 4: Partial direct writeback: partial_1_12
- "tbz x14, #0, 174f\n"
- "str s16, [x13, #0x0]\n"
- "str s11, [x23, #0x0]\n"
- "str s28, [x22, #0x0]\n"
- "str s23, [x21, #0x0]\n"
+ "tbz x13, #0, 174f\n"
+ "str s16, [x12, #0x0]\n"
+ "str s11, [x22, #0x0]\n"
+ "str s28, [x21, #0x0]\n"
+ "str s23, [x20, #0x0]\n"
"b 174f\n"
"168:" // Height 4: Partial direct writeback: partial_2_8
- "tbz x14, #1, 169f\n"
- "str d15, [x13], #0x8\n"
- "str d10, [x23], #0x8\n"
- "str d27, [x22], #0x8\n"
- "str d22, [x21], #0x8\n"
- "tbz x14, #0, 174f\n"
- "st1 { v15.s }[2], [x13]\n"
- "st1 { v10.s }[2], [x23]\n"
- "st1 { v27.s }[2], [x22]\n"
- "st1 { v22.s }[2], [x21]\n"
+ "tbz x13, #1, 169f\n"
+ "str d15, [x12], #0x8\n"
+ "str d10, [x22], #0x8\n"
+ "str d27, [x21], #0x8\n"
+ "str d22, [x20], #0x8\n"
+ "tbz x13, #0, 174f\n"
+ "st1 { v15.s }[2], [x12]\n"
+ "st1 { v10.s }[2], [x22]\n"
+ "st1 { v27.s }[2], [x21]\n"
+ "st1 { v22.s }[2], [x20]\n"
"b 174f\n"
"169:" // Height 4: Partial direct writeback: partial_1_8
- "tbz x14, #0, 174f\n"
- "str s15, [x13, #0x0]\n"
- "str s10, [x23, #0x0]\n"
- "str s27, [x22, #0x0]\n"
- "str s22, [x21, #0x0]\n"
+ "tbz x13, #0, 174f\n"
+ "str s15, [x12, #0x0]\n"
+ "str s10, [x22, #0x0]\n"
+ "str s27, [x21, #0x0]\n"
+ "str s22, [x20, #0x0]\n"
"b 174f\n"
"170:" // Height 4: Partial direct writeback: partial_4_0
- "tbz x14, #2, 172f\n"
- "st1 { v4.4s }, [x13], #0x10\n"
- "st1 { v8.4s }, [x23], #0x10\n"
- "st1 { v19.4s }, [x22], #0x10\n"
- "st1 { v20.4s }, [x21], #0x10\n"
- "tbz x14, #1, 171f\n"
- "str d14, [x13], #0x8\n"
- "str d9, [x23], #0x8\n"
- "str d26, [x22], #0x8\n"
- "str d21, [x21], #0x8\n"
- "tbz x14, #0, 174f\n"
- "st1 { v14.s }[2], [x13]\n"
- "st1 { v9.s }[2], [x23]\n"
- "st1 { v26.s }[2], [x22]\n"
- "st1 { v21.s }[2], [x21]\n"
+ "tbz x13, #2, 172f\n"
+ "st1 { v4.4s }, [x12], #0x10\n"
+ "st1 { v8.4s }, [x22], #0x10\n"
+ "st1 { v19.4s }, [x21], #0x10\n"
+ "st1 { v20.4s }, [x20], #0x10\n"
+ "tbz x13, #1, 171f\n"
+ "str d14, [x12], #0x8\n"
+ "str d9, [x22], #0x8\n"
+ "str d26, [x21], #0x8\n"
+ "str d21, [x20], #0x8\n"
+ "tbz x13, #0, 174f\n"
+ "st1 { v14.s }[2], [x12]\n"
+ "st1 { v9.s }[2], [x22]\n"
+ "st1 { v26.s }[2], [x21]\n"
+ "st1 { v21.s }[2], [x20]\n"
"b 174f\n"
"171:" // Height 4: Partial direct writeback: partial_1_4
- "tbz x14, #0, 174f\n"
- "str s14, [x13, #0x0]\n"
- "str s9, [x23, #0x0]\n"
- "str s26, [x22, #0x0]\n"
- "str s21, [x21, #0x0]\n"
+ "tbz x13, #0, 174f\n"
+ "str s14, [x12, #0x0]\n"
+ "str s9, [x22, #0x0]\n"
+ "str s26, [x21, #0x0]\n"
+ "str s21, [x20, #0x0]\n"
"b 174f\n"
"172:" // Height 4: Partial direct writeback: partial_2_0
- "tbz x14, #1, 173f\n"
- "str d4, [x13], #0x8\n"
- "str d8, [x23], #0x8\n"
- "str d19, [x22], #0x8\n"
- "str d20, [x21], #0x8\n"
- "tbz x14, #0, 174f\n"
- "st1 { v4.s }[2], [x13]\n"
- "st1 { v8.s }[2], [x23]\n"
- "st1 { v19.s }[2], [x22]\n"
- "st1 { v20.s }[2], [x21]\n"
+ "tbz x13, #1, 173f\n"
+ "str d4, [x12], #0x8\n"
+ "str d8, [x22], #0x8\n"
+ "str d19, [x21], #0x8\n"
+ "str d20, [x20], #0x8\n"
+ "tbz x13, #0, 174f\n"
+ "st1 { v4.s }[2], [x12]\n"
+ "st1 { v8.s }[2], [x22]\n"
+ "st1 { v19.s }[2], [x21]\n"
+ "st1 { v20.s }[2], [x20]\n"
"b 174f\n"
"173:" // Height 4: Partial direct writeback: partial_1_0
- "str s4, [x13, #0x0]\n"
- "str s8, [x23, #0x0]\n"
- "str s19, [x22, #0x0]\n"
- "str s20, [x21, #0x0]\n"
+ "str s4, [x12, #0x0]\n"
+ "str s8, [x22, #0x0]\n"
+ "str s19, [x21, #0x0]\n"
+ "str s20, [x20, #0x0]\n"
"174:" // Height 4: Partial direct writeback: Done
"b 176f\n"
"175:" // Height 4: Full writeback
- "str q4, [x13, #0x0]\n"
- "str q14, [x13, #0x10]\n"
- "str q15, [x13, #0x20]\n"
- "str q16, [x13, #0x30]\n"
- "str q17, [x13, #0x40]\n"
- "str q18, [x13, #0x50]\n"
- "add x13, x13, #0x60\n"
- "str q8, [x23, #0x0]\n"
- "str q9, [x23, #0x10]\n"
- "str q10, [x23, #0x20]\n"
- "str q11, [x23, #0x30]\n"
- "str q12, [x23, #0x40]\n"
- "str q13, [x23, #0x50]\n"
- "str q19, [x22, #0x0]\n"
- "str q26, [x22, #0x10]\n"
- "str q27, [x22, #0x20]\n"
- "str q28, [x22, #0x30]\n"
- "str q29, [x22, #0x40]\n"
- "str q30, [x22, #0x50]\n"
- "str q20, [x21, #0x0]\n"
- "str q21, [x21, #0x10]\n"
- "str q22, [x21, #0x20]\n"
- "str q23, [x21, #0x30]\n"
- "str q24, [x21, #0x40]\n"
- "str q25, [x21, #0x50]\n"
+ "str q4, [x12, #0x0]\n"
+ "str q14, [x12, #0x10]\n"
+ "str q15, [x12, #0x20]\n"
+ "str q16, [x12, #0x30]\n"
+ "str q17, [x12, #0x40]\n"
+ "str q18, [x12, #0x50]\n"
+ "add x12, x12, #0x60\n"
+ "str q8, [x22, #0x0]\n"
+ "str q9, [x22, #0x10]\n"
+ "str q10, [x22, #0x20]\n"
+ "str q11, [x22, #0x30]\n"
+ "str q12, [x22, #0x40]\n"
+ "str q13, [x22, #0x50]\n"
+ "str q19, [x21, #0x0]\n"
+ "str q26, [x21, #0x10]\n"
+ "str q27, [x21, #0x20]\n"
+ "str q28, [x21, #0x30]\n"
+ "str q29, [x21, #0x40]\n"
+ "str q30, [x21, #0x50]\n"
+ "str q20, [x20, #0x0]\n"
+ "str q21, [x20, #0x10]\n"
+ "str q22, [x20, #0x20]\n"
+ "str q23, [x20, #0x30]\n"
+ "str q24, [x20, #0x40]\n"
+ "str q25, [x20, #0x50]\n"
"176:" // Height 4: Writeback done
- "subs x14, x14, #0x18\n"
+ "subs x13, x13, #0x18\n"
"bgt 134b\n"
"subs %x[M], %x[M], #0x4\n"
"beq 178f\n"
- "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 177f\n"
- "add x21, x21, #0x4\n"
- "str x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "add x20, x20, #0x4\n"
+ "str x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"b 1b\n"
"177:" // Update direct input
- "mov x20, #0x10\n"
- "madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
+ "mov x19, #0x10\n"
+ "madd %x[input_ptr], x19, x20, %x[input_ptr]\n"
"b 1b\n"
"178:" // Exit
: [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
: [args_ptr] "r" (&ka), [bias] "r" (bias), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_B_stride] "I" (offsetof(KernelArgs, B_stride)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_cur_B_ptr] "I" (offsetof(KernelArgs, cur_B_ptr)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_ffinterleaved_bf16fp32_dot_8x12/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_ffinterleaved_bf16fp32_dot_8x12/generic.cpp
index 2458d6a035..967396c377 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_ffinterleaved_bf16fp32_dot_8x12/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_ffinterleaved_bf16fp32_dot_8x12/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef __aarch64__
@@ -52,34 +52,34 @@ void a64_ffinterleaved_bf16fp32_dot_8x12(
__asm__ __volatile__(
"1:" // Height loop
- "ldr x25, [%x[args_ptr], %[offsetof_Bpanel]]\n"
- "ldr x24, [%x[args_ptr], %[offsetof_N]]\n"
- "str x25, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x23, %x[Apanel]\n"
+ "ldr x24, [%x[args_ptr], %[offsetof_Bpanel]]\n"
+ "ldr x23, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x24, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x22, %x[Apanel]\n"
"2:" // Width loop
- "ldr x25, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
- "add x22, x25, x20, LSL #1\n"
- "add x21, x22, x20, LSL #1\n"
- "add x20, x21, x20, LSL #1\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "cmp x24, #0x8\n"
- "mov %x[Apanel], x23\n"
+ "ldr x24, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x21, x24, x19, LSL #1\n"
+ "add x20, x21, x19, LSL #1\n"
+ "add x19, x20, x19, LSL #1\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "cmp x23, #0x8\n"
+ "mov %x[Apanel], x22\n"
"bgt 3f\n"
- "cmp x24, #0x4\n"
- "mov x21, x25\n"
+ "cmp x23, #0x4\n"
+ "mov x20, x24\n"
"bgt 3f\n"
- "mov x22, x25\n"
+ "mov x21, x24\n"
"3:" // B setup done
"ldr q0, [%x[Apanel], #0x0]\n"
"ldr q1, [%x[Apanel], #0x10]\n"
"movi v8.16b, #0x0\n"
- "ldr q4, [x25, #0x0]\n"
- "ldr q5, [x22, #0x0]\n"
+ "ldr q4, [x24, #0x0]\n"
+ "ldr q5, [x21, #0x0]\n"
"movi v9.16b, #0x0\n"
- "ldr q6, [x21, #0x0]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_K]]\n"
- "cmp x20, #0x2\n"
+ "ldr q6, [x20, #0x0]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_K]]\n"
+ "cmp x19, #0x2\n"
"movi v10.16b, #0x0\n"
"movi v11.16b, #0x0\n"
"movi v12.16b, #0x0\n"
@@ -109,38 +109,38 @@ void a64_ffinterleaved_bf16fp32_dot_8x12(
".inst 0x4f40f088 // bfdot v8.4s, v4.8h, v0.h[0]\n"
".inst 0x4f60f08b // bfdot v11.4s, v4.8h, v0.h[1]\n"
".inst 0x4f40f88e // bfdot v14.4s, v4.8h, v0.h[2]\n"
- "sub x20, x20, #0x2\n"
+ "sub x19, x19, #0x2\n"
".inst 0x4f60f891 // bfdot v17.4s, v4.8h, v0.h[3]\n"
".inst 0x4f41f094 // bfdot v20.4s, v4.8h, v1.h[0]\n"
- "cmp x20, #0x2\n"
+ "cmp x19, #0x2\n"
".inst 0x4f61f097 // bfdot v23.4s, v4.8h, v1.h[1]\n"
".inst 0x4f41f89a // bfdot v26.4s, v4.8h, v1.h[2]\n"
"add %x[Apanel], %x[Apanel], #0x40\n"
".inst 0x4f61f89d // bfdot v29.4s, v4.8h, v1.h[3]\n"
- "ldr q4, [x25, #0x10]\n"
+ "ldr q4, [x24, #0x10]\n"
".inst 0x4f40f0a9 // bfdot v9.4s, v5.8h, v0.h[0]\n"
".inst 0x4f60f0ac // bfdot v12.4s, v5.8h, v0.h[1]\n"
".inst 0x4f40f8af // bfdot v15.4s, v5.8h, v0.h[2]\n"
- "add x25, x25, #0x20\n"
+ "add x24, x24, #0x20\n"
".inst 0x4f60f8b2 // bfdot v18.4s, v5.8h, v0.h[3]\n"
".inst 0x4f41f0b5 // bfdot v21.4s, v5.8h, v1.h[0]\n"
".inst 0x4f61f0b8 // bfdot v24.4s, v5.8h, v1.h[1]\n"
".inst 0x4f41f8bb // bfdot v27.4s, v5.8h, v1.h[2]\n"
".inst 0x4f61f8be // bfdot v30.4s, v5.8h, v1.h[3]\n"
- "ldr q5, [x22, #0x10]\n"
+ "ldr q5, [x21, #0x10]\n"
".inst 0x4f40f0ca // bfdot v10.4s, v6.8h, v0.h[0]\n"
".inst 0x4f60f0cd // bfdot v13.4s, v6.8h, v0.h[1]\n"
".inst 0x4f40f8d0 // bfdot v16.4s, v6.8h, v0.h[2]\n"
- "add x22, x22, #0x20\n"
+ "add x21, x21, #0x20\n"
".inst 0x4f60f8d3 // bfdot v19.4s, v6.8h, v0.h[3]\n"
"ldr q0, [%x[Apanel], #0x0]\n"
".inst 0x4f41f0d6 // bfdot v22.4s, v6.8h, v1.h[0]\n"
".inst 0x4f61f0d9 // bfdot v25.4s, v6.8h, v1.h[1]\n"
".inst 0x4f41f8dc // bfdot v28.4s, v6.8h, v1.h[2]\n"
".inst 0x4f61f8df // bfdot v31.4s, v6.8h, v1.h[3]\n"
- "ldr q6, [x21, #0x10]\n"
+ "ldr q6, [x20, #0x10]\n"
"ldr q1, [%x[Apanel], #0x10]\n"
- "add x21, x21, #0x20\n"
+ "add x20, x20, #0x20\n"
".inst 0x4f42f088 // bfdot v8.4s, v4.8h, v2.h[0]\n"
".inst 0x4f62f08b // bfdot v11.4s, v4.8h, v2.h[1]\n"
".inst 0x4f42f88e // bfdot v14.4s, v4.8h, v2.h[2]\n"
@@ -149,7 +149,7 @@ void a64_ffinterleaved_bf16fp32_dot_8x12(
".inst 0x4f63f097 // bfdot v23.4s, v4.8h, v3.h[1]\n"
".inst 0x4f43f89a // bfdot v26.4s, v4.8h, v3.h[2]\n"
".inst 0x4f63f89d // bfdot v29.4s, v4.8h, v3.h[3]\n"
- "ldr q4, [x25, #0x0]\n"
+ "ldr q4, [x24, #0x0]\n"
".inst 0x4f42f0a9 // bfdot v9.4s, v5.8h, v2.h[0]\n"
".inst 0x4f62f0ac // bfdot v12.4s, v5.8h, v2.h[1]\n"
".inst 0x4f42f8af // bfdot v15.4s, v5.8h, v2.h[2]\n"
@@ -158,7 +158,7 @@ void a64_ffinterleaved_bf16fp32_dot_8x12(
".inst 0x4f63f0b8 // bfdot v24.4s, v5.8h, v3.h[1]\n"
".inst 0x4f43f8bb // bfdot v27.4s, v5.8h, v3.h[2]\n"
".inst 0x4f63f8be // bfdot v30.4s, v5.8h, v3.h[3]\n"
- "ldr q5, [x22, #0x0]\n"
+ "ldr q5, [x21, #0x0]\n"
".inst 0x4f42f0ca // bfdot v10.4s, v6.8h, v2.h[0]\n"
".inst 0x4f62f0cd // bfdot v13.4s, v6.8h, v2.h[1]\n"
".inst 0x4f42f8d0 // bfdot v16.4s, v6.8h, v2.h[2]\n"
@@ -167,7 +167,7 @@ void a64_ffinterleaved_bf16fp32_dot_8x12(
".inst 0x4f63f0d9 // bfdot v25.4s, v6.8h, v3.h[1]\n"
".inst 0x4f43f8dc // bfdot v28.4s, v6.8h, v3.h[2]\n"
".inst 0x4f63f8df // bfdot v31.4s, v6.8h, v3.h[3]\n"
- "ldr q6, [x21, #0x0]\n"
+ "ldr q6, [x20, #0x0]\n"
"bge 4b\n"
"5:" // main loop skip
".inst 0x4f40f088 // bfdot v8.4s, v4.8h, v0.h[0]\n"
@@ -175,13 +175,13 @@ void a64_ffinterleaved_bf16fp32_dot_8x12(
"add %x[Apanel], %x[Apanel], #0x20\n"
".inst 0x4f40f88e // bfdot v14.4s, v4.8h, v0.h[2]\n"
".inst 0x4f60f891 // bfdot v17.4s, v4.8h, v0.h[3]\n"
- "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
".inst 0x4f41f094 // bfdot v20.4s, v4.8h, v1.h[0]\n"
".inst 0x4f61f097 // bfdot v23.4s, v4.8h, v1.h[1]\n"
- "add x22, x22, #0x10\n"
+ "add x21, x21, #0x10\n"
".inst 0x4f41f89a // bfdot v26.4s, v4.8h, v1.h[2]\n"
".inst 0x4f61f89d // bfdot v29.4s, v4.8h, v1.h[3]\n"
- "add x21, x21, #0x10\n"
+ "add x20, x20, #0x10\n"
".inst 0x4f40f0a9 // bfdot v9.4s, v5.8h, v0.h[0]\n"
".inst 0x4f60f0ac // bfdot v12.4s, v5.8h, v0.h[1]\n"
".inst 0x4f40f8af // bfdot v15.4s, v5.8h, v0.h[2]\n"
@@ -198,14 +198,14 @@ void a64_ffinterleaved_bf16fp32_dot_8x12(
".inst 0x4f61f0d9 // bfdot v25.4s, v6.8h, v1.h[1]\n"
".inst 0x4f41f8dc // bfdot v28.4s, v6.8h, v1.h[2]\n"
".inst 0x4f61f8df // bfdot v31.4s, v6.8h, v1.h[3]\n"
- "cbz x20, 6f\n"
+ "cbz x19, 6f\n"
"ldr q0, [%x[Apanel], #0x0]\n"
"ldr q1, [%x[Apanel], #0x10]\n"
"add %x[Apanel], %x[Apanel], #0x20\n"
- "ldr q7, [x25, #0x0]\n"
- "ldr q4, [x22, #0x0]\n"
+ "ldr q7, [x24, #0x0]\n"
+ "ldr q4, [x21, #0x0]\n"
".inst 0x4f40f0e8 // bfdot v8.4s, v7.8h, v0.h[0]\n"
- "ldr q5, [x21, #0x0]\n"
+ "ldr q5, [x20, #0x0]\n"
".inst 0x4f60f0eb // bfdot v11.4s, v7.8h, v0.h[1]\n"
".inst 0x4f40f8ee // bfdot v14.4s, v7.8h, v0.h[2]\n"
".inst 0x4f60f8f1 // bfdot v17.4s, v7.8h, v0.h[3]\n"
@@ -230,7 +230,7 @@ void a64_ffinterleaved_bf16fp32_dot_8x12(
".inst 0x4f41f8bc // bfdot v28.4s, v5.8h, v1.h[2]\n"
".inst 0x4f61f8bf // bfdot v31.4s, v5.8h, v1.h[3]\n"
"6:" // multiply loop done
- "subs x24, x24, #0xc\n"
+ "subs x23, x23, #0xc\n"
"str q8, [%x[Cpanel], #0x0]\n"
"str q9, [%x[Cpanel], #0x10]\n"
"str q10, [%x[Cpanel], #0x20]\n"
@@ -261,7 +261,7 @@ void a64_ffinterleaved_bf16fp32_dot_8x12(
"bne 1b\n"
: [Apanel] "+&r" (Apanel), [Cpanel] "+&r" (Cpanel), [ablocks] "+&r" (ablocks)
: [args_ptr] "r" (&ka), [offsetof_B_stride] "I" (offsetof(KernelArgs, B_stride)), [offsetof_Bpanel] "I" (offsetof(KernelArgs, Bpanel)), [offsetof_K] "I" (offsetof(KernelArgs, K)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_cur_B_ptr] "I" (offsetof(KernelArgs, cur_B_ptr))
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x20", "x21", "x22", "x23", "x24", "x25"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22", "x23", "x24"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_ffinterleaved_bf16fp32_mmla_8x12/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_ffinterleaved_bf16fp32_mmla_8x12/generic.cpp
index 47991114af..509f2afa09 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_ffinterleaved_bf16fp32_mmla_8x12/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_ffinterleaved_bf16fp32_mmla_8x12/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef __aarch64__
@@ -52,37 +52,37 @@ void a64_ffinterleaved_bf16fp32_mmla_8x12(
__asm__ __volatile__(
"1:" // Height loop
- "ldr x25, [%x[args_ptr], %[offsetof_Bpanel]]\n"
- "ldr x24, [%x[args_ptr], %[offsetof_N]]\n"
- "str x25, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x23, %x[Apanel]\n"
+ "ldr x24, [%x[args_ptr], %[offsetof_Bpanel]]\n"
+ "ldr x23, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x24, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x22, %x[Apanel]\n"
"2:" // Width loop
- "ldr x25, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
- "add x22, x25, x20, LSL #1\n"
- "add x21, x22, x20, LSL #1\n"
- "add x20, x21, x20, LSL #1\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "cmp x24, #0x8\n"
- "mov %x[Apanel], x23\n"
+ "ldr x24, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x21, x24, x19, LSL #1\n"
+ "add x20, x21, x19, LSL #1\n"
+ "add x19, x20, x19, LSL #1\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "cmp x23, #0x8\n"
+ "mov %x[Apanel], x22\n"
"bgt 3f\n"
- "cmp x24, #0x4\n"
- "mov x21, x25\n"
+ "cmp x23, #0x4\n"
+ "mov x20, x24\n"
"bgt 3f\n"
- "mov x22, x25\n"
+ "mov x21, x24\n"
"3:" // B setup done
- "ldr q4, [x25, #0x0]\n"
+ "ldr q4, [x24, #0x0]\n"
"ldr q0, [%x[Apanel], #0x0]\n"
"movi v8.16b, #0x0\n"
"ldr q1, [%x[Apanel], #0x10]\n"
- "ldr q5, [x25, #0x10]\n"
+ "ldr q5, [x24, #0x10]\n"
"movi v9.16b, #0x0\n"
"ldr q2, [%x[Apanel], #0x20]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_K]]\n"
- "cmp x20, #0x2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_K]]\n"
+ "cmp x19, #0x2\n"
"movi v10.16b, #0x0\n"
"movi v11.16b, #0x0\n"
- "add x25, x25, #0x20\n"
+ "add x24, x24, #0x20\n"
"movi v12.16b, #0x0\n"
"movi v13.16b, #0x0\n"
"add %x[Apanel], %x[Apanel], #0x30\n"
@@ -107,30 +107,30 @@ void a64_ffinterleaved_bf16fp32_mmla_8x12(
"blt 5f\n"
"4:" // main loop head
"ldr q3, [%x[Apanel], #0x0]\n"
- "ldr q6, [x22, #0x0]\n"
+ "ldr q6, [x21, #0x0]\n"
".inst 0x6e44ec08 // bfmmla v8.4s, v0.8h, v4.8h\n"
- "ldr q7, [x22, #0x10]\n"
+ "ldr q7, [x21, #0x10]\n"
".inst 0x6e45ec0b // bfmmla v11.4s, v0.8h, v5.8h\n"
".inst 0x6e44ec2e // bfmmla v14.4s, v1.8h, v4.8h\n"
".inst 0x6e45ec31 // bfmmla v17.4s, v1.8h, v5.8h\n"
".inst 0x6e44ec54 // bfmmla v20.4s, v2.8h, v4.8h\n"
- "sub x20, x20, #0x2\n"
+ "sub x19, x19, #0x2\n"
".inst 0x6e45ec57 // bfmmla v23.4s, v2.8h, v5.8h\n"
".inst 0x6e44ec7a // bfmmla v26.4s, v3.8h, v4.8h\n"
- "ldr q4, [x21, #0x0]\n"
+ "ldr q4, [x20, #0x0]\n"
".inst 0x6e45ec7d // bfmmla v29.4s, v3.8h, v5.8h\n"
- "ldr q5, [x21, #0x10]\n"
+ "ldr q5, [x20, #0x10]\n"
".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n"
".inst 0x6e47ec0c // bfmmla v12.4s, v0.8h, v7.8h\n"
".inst 0x6e46ec2f // bfmmla v15.4s, v1.8h, v6.8h\n"
- "cmp x20, #0x2\n"
+ "cmp x19, #0x2\n"
".inst 0x6e47ec32 // bfmmla v18.4s, v1.8h, v7.8h\n"
".inst 0x6e46ec55 // bfmmla v21.4s, v2.8h, v6.8h\n"
".inst 0x6e47ec58 // bfmmla v24.4s, v2.8h, v7.8h\n"
".inst 0x6e46ec7b // bfmmla v27.4s, v3.8h, v6.8h\n"
- "ldr q6, [x25, #0x0]\n"
+ "ldr q6, [x24, #0x0]\n"
".inst 0x6e47ec7e // bfmmla v30.4s, v3.8h, v7.8h\n"
- "ldr q7, [x25, #0x10]\n"
+ "ldr q7, [x24, #0x10]\n"
".inst 0x6e44ec0a // bfmmla v10.4s, v0.8h, v4.8h\n"
".inst 0x6e45ec0d // bfmmla v13.4s, v0.8h, v5.8h\n"
"ldr q0, [%x[Apanel], #0x10]\n"
@@ -141,32 +141,32 @@ void a64_ffinterleaved_bf16fp32_mmla_8x12(
".inst 0x6e45ec59 // bfmmla v25.4s, v2.8h, v5.8h\n"
"ldr q2, [%x[Apanel], #0x30]\n"
".inst 0x6e44ec7c // bfmmla v28.4s, v3.8h, v4.8h\n"
- "ldr q4, [x22, #0x20]\n"
+ "ldr q4, [x21, #0x20]\n"
".inst 0x6e45ec7f // bfmmla v31.4s, v3.8h, v5.8h\n"
"ldr q3, [%x[Apanel], #0x40]\n"
- "ldr q5, [x22, #0x30]\n"
+ "ldr q5, [x21, #0x30]\n"
".inst 0x6e46ec08 // bfmmla v8.4s, v0.8h, v6.8h\n"
".inst 0x6e47ec0b // bfmmla v11.4s, v0.8h, v7.8h\n"
".inst 0x6e46ec2e // bfmmla v14.4s, v1.8h, v6.8h\n"
".inst 0x6e47ec31 // bfmmla v17.4s, v1.8h, v7.8h\n"
- "add x22, x22, #0x40\n"
+ "add x21, x21, #0x40\n"
".inst 0x6e46ec54 // bfmmla v20.4s, v2.8h, v6.8h\n"
".inst 0x6e47ec57 // bfmmla v23.4s, v2.8h, v7.8h\n"
".inst 0x6e46ec7a // bfmmla v26.4s, v3.8h, v6.8h\n"
- "ldr q6, [x21, #0x20]\n"
+ "ldr q6, [x20, #0x20]\n"
".inst 0x6e47ec7d // bfmmla v29.4s, v3.8h, v7.8h\n"
- "ldr q7, [x21, #0x30]\n"
+ "ldr q7, [x20, #0x30]\n"
".inst 0x6e44ec09 // bfmmla v9.4s, v0.8h, v4.8h\n"
".inst 0x6e45ec0c // bfmmla v12.4s, v0.8h, v5.8h\n"
".inst 0x6e44ec2f // bfmmla v15.4s, v1.8h, v4.8h\n"
".inst 0x6e45ec32 // bfmmla v18.4s, v1.8h, v5.8h\n"
- "add x21, x21, #0x40\n"
+ "add x20, x20, #0x40\n"
".inst 0x6e44ec55 // bfmmla v21.4s, v2.8h, v4.8h\n"
".inst 0x6e45ec58 // bfmmla v24.4s, v2.8h, v5.8h\n"
".inst 0x6e44ec7b // bfmmla v27.4s, v3.8h, v4.8h\n"
- "ldr q4, [x25, #0x20]\n"
+ "ldr q4, [x24, #0x20]\n"
".inst 0x6e45ec7e // bfmmla v30.4s, v3.8h, v5.8h\n"
- "ldr q5, [x25, #0x30]\n"
+ "ldr q5, [x24, #0x30]\n"
".inst 0x6e46ec0a // bfmmla v10.4s, v0.8h, v6.8h\n"
".inst 0x6e47ec0d // bfmmla v13.4s, v0.8h, v7.8h\n"
"ldr q0, [%x[Apanel], #0x50]\n"
@@ -179,13 +179,13 @@ void a64_ffinterleaved_bf16fp32_mmla_8x12(
".inst 0x6e46ec7c // bfmmla v28.4s, v3.8h, v6.8h\n"
".inst 0x6e47ec7f // bfmmla v31.4s, v3.8h, v7.8h\n"
"add %x[Apanel], %x[Apanel], #0x80\n"
- "add x25, x25, #0x40\n"
+ "add x24, x24, #0x40\n"
"bge 4b\n"
"5:" // main loop skip
"ldr q3, [%x[Apanel], #0x0]\n"
- "ldr q6, [x22, #0x0]\n"
+ "ldr q6, [x21, #0x0]\n"
".inst 0x6e44ec08 // bfmmla v8.4s, v0.8h, v4.8h\n"
- "ldr q7, [x22, #0x10]\n"
+ "ldr q7, [x21, #0x10]\n"
".inst 0x6e45ec0b // bfmmla v11.4s, v0.8h, v5.8h\n"
".inst 0x6e44ec2e // bfmmla v14.4s, v1.8h, v4.8h\n"
".inst 0x6e45ec31 // bfmmla v17.4s, v1.8h, v5.8h\n"
@@ -193,16 +193,16 @@ void a64_ffinterleaved_bf16fp32_mmla_8x12(
"add %x[Apanel], %x[Apanel], #0x10\n"
".inst 0x6e45ec57 // bfmmla v23.4s, v2.8h, v5.8h\n"
".inst 0x6e44ec7a // bfmmla v26.4s, v3.8h, v4.8h\n"
- "ldr q4, [x21, #0x0]\n"
+ "ldr q4, [x20, #0x0]\n"
".inst 0x6e45ec7d // bfmmla v29.4s, v3.8h, v5.8h\n"
- "ldr q5, [x21, #0x10]\n"
+ "ldr q5, [x20, #0x10]\n"
".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n"
".inst 0x6e47ec0c // bfmmla v12.4s, v0.8h, v7.8h\n"
".inst 0x6e46ec2f // bfmmla v15.4s, v1.8h, v6.8h\n"
- "add x22, x22, #0x20\n"
+ "add x21, x21, #0x20\n"
".inst 0x6e47ec32 // bfmmla v18.4s, v1.8h, v7.8h\n"
".inst 0x6e46ec55 // bfmmla v21.4s, v2.8h, v6.8h\n"
- "add x21, x21, #0x20\n"
+ "add x20, x20, #0x20\n"
".inst 0x6e47ec58 // bfmmla v24.4s, v2.8h, v7.8h\n"
".inst 0x6e46ec7b // bfmmla v27.4s, v3.8h, v6.8h\n"
".inst 0x6e47ec7e // bfmmla v30.4s, v3.8h, v7.8h\n"
@@ -214,26 +214,26 @@ void a64_ffinterleaved_bf16fp32_mmla_8x12(
".inst 0x6e45ec59 // bfmmla v25.4s, v2.8h, v5.8h\n"
".inst 0x6e44ec7c // bfmmla v28.4s, v3.8h, v4.8h\n"
".inst 0x6e45ec7f // bfmmla v31.4s, v3.8h, v5.8h\n"
- "cbz x20, 6f\n"
- "ldr q6, [x25, #0x0]\n"
+ "cbz x19, 6f\n"
+ "ldr q6, [x24, #0x0]\n"
"ldr q0, [%x[Apanel], #0x0]\n"
".inst 0x6e46ec08 // bfmmla v8.4s, v0.8h, v6.8h\n"
"ldr q1, [%x[Apanel], #0x10]\n"
- "ldr q7, [x25, #0x10]\n"
+ "ldr q7, [x24, #0x10]\n"
".inst 0x6e47ec0b // bfmmla v11.4s, v0.8h, v7.8h\n"
"ldr q2, [%x[Apanel], #0x20]\n"
"ldr q3, [%x[Apanel], #0x30]\n"
".inst 0x6e46ec2e // bfmmla v14.4s, v1.8h, v6.8h\n"
- "ldr q4, [x22, #0x0]\n"
- "ldr q5, [x22, #0x10]\n"
+ "ldr q4, [x21, #0x0]\n"
+ "ldr q5, [x21, #0x10]\n"
".inst 0x6e47ec31 // bfmmla v17.4s, v1.8h, v7.8h\n"
".inst 0x6e46ec54 // bfmmla v20.4s, v2.8h, v6.8h\n"
".inst 0x6e47ec57 // bfmmla v23.4s, v2.8h, v7.8h\n"
"add %x[Apanel], %x[Apanel], #0x40\n"
".inst 0x6e46ec7a // bfmmla v26.4s, v3.8h, v6.8h\n"
- "ldr q6, [x21, #0x0]\n"
+ "ldr q6, [x20, #0x0]\n"
".inst 0x6e47ec7d // bfmmla v29.4s, v3.8h, v7.8h\n"
- "ldr q7, [x21, #0x10]\n"
+ "ldr q7, [x20, #0x10]\n"
".inst 0x6e44ec09 // bfmmla v9.4s, v0.8h, v4.8h\n"
".inst 0x6e45ec0c // bfmmla v12.4s, v0.8h, v5.8h\n"
".inst 0x6e44ec2f // bfmmla v15.4s, v1.8h, v4.8h\n"
@@ -251,7 +251,7 @@ void a64_ffinterleaved_bf16fp32_mmla_8x12(
".inst 0x6e46ec7c // bfmmla v28.4s, v3.8h, v6.8h\n"
".inst 0x6e47ec7f // bfmmla v31.4s, v3.8h, v7.8h\n"
"6:" // multiply loop done
- "subs x24, x24, #0xc\n"
+ "subs x23, x23, #0xc\n"
"uzp1 v4.2d, v8.2d, v11.2d\n"
"uzp2 v8.2d, v8.2d, v11.2d\n"
"uzp1 v11.2d, v9.2d, v12.2d\n"
@@ -306,7 +306,7 @@ void a64_ffinterleaved_bf16fp32_mmla_8x12(
"bne 1b\n"
: [Apanel] "+&r" (Apanel), [Cpanel] "+&r" (Cpanel), [ablocks] "+&r" (ablocks)
: [args_ptr] "r" (&ka), [offsetof_B_stride] "I" (offsetof(KernelArgs, B_stride)), [offsetof_Bpanel] "I" (offsetof(KernelArgs, Bpanel)), [offsetof_K] "I" (offsetof(KernelArgs, K)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_cur_B_ptr] "I" (offsetof(KernelArgs, cur_B_ptr))
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x20", "x21", "x22", "x23", "x24", "x25"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22", "x23", "x24"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_ffinterleaved_fp16_mla_8x24/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_ffinterleaved_fp16_mla_8x24/generic.cpp
index 36bfccf52f..19836f2e9d 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_ffinterleaved_fp16_mla_8x24/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_ffinterleaved_fp16_mla_8x24/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#if defined(__aarch64__) && (defined(FP16_KERNELS) || defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC))
@@ -51,33 +51,33 @@ void a64_ffinterleaved_fp16_mla_8x24(
__asm__ __volatile__(
"1:" // Height loop
- "ldr x25, [%x[args_ptr], %[offsetof_Bpanel]]\n"
- "ldr x24, [%x[args_ptr], %[offsetof_N]]\n"
- "str x25, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x23, %x[Apanel]\n"
+ "ldr x24, [%x[args_ptr], %[offsetof_Bpanel]]\n"
+ "ldr x23, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x24, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x22, %x[Apanel]\n"
"2:" // Width loop
- "ldr x25, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
- "add x22, x25, x20, LSL #1\n"
- "add x21, x22, x20, LSL #1\n"
- "add x20, x21, x20, LSL #1\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "cmp x24, #0x10\n"
- "mov %x[Apanel], x23\n"
+ "ldr x24, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x21, x24, x19, LSL #1\n"
+ "add x20, x21, x19, LSL #1\n"
+ "add x19, x20, x19, LSL #1\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "cmp x23, #0x10\n"
+ "mov %x[Apanel], x22\n"
"bgt 3f\n"
- "cmp x24, #0x8\n"
- "mov x21, x25\n"
+ "cmp x23, #0x8\n"
+ "mov x20, x24\n"
"bgt 3f\n"
- "mov x22, x25\n"
+ "mov x21, x24\n"
"3:" // B setup done
"ldr q0, [%x[Apanel], #0x0]\n"
- "ldr q2, [x25, #0x0]\n"
+ "ldr q2, [x24, #0x0]\n"
"movi v8.16b, #0x0\n"
- "ldr q3, [x22, #0x0]\n"
- "ldr q4, [x21, #0x0]\n"
+ "ldr q3, [x21, #0x0]\n"
+ "ldr q4, [x20, #0x0]\n"
"movi v9.16b, #0x0\n"
- "ldr x20, [%x[args_ptr], %[offsetof_K]]\n"
- "cmp x20, #0x2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_K]]\n"
+ "cmp x19, #0x2\n"
"movi v10.16b, #0x0\n"
"movi v11.16b, #0x0\n"
"movi v12.16b, #0x0\n"
@@ -103,35 +103,35 @@ void a64_ffinterleaved_fp16_mla_8x24(
"blt 5f\n"
"4:" // main loop head
"ldr q1, [%x[Apanel], #0x10]\n"
- "ldr q5, [x25, #0x10]\n"
+ "ldr q5, [x24, #0x10]\n"
"fmla v8.8h, v2.8h, v0.h[0]\n"
- "ldr q6, [x22, #0x10]\n"
- "ldr q7, [x21, #0x10]\n"
+ "ldr q6, [x21, #0x10]\n"
+ "ldr q7, [x20, #0x10]\n"
"fmla v11.8h, v2.8h, v0.h[1]\n"
"fmla v14.8h, v2.8h, v0.h[2]\n"
"fmla v17.8h, v2.8h, v0.h[3]\n"
- "sub x20, x20, #0x2\n"
+ "sub x19, x19, #0x2\n"
"fmla v20.8h, v2.8h, v0.h[4]\n"
"fmla v23.8h, v2.8h, v0.h[5]\n"
- "cmp x20, #0x2\n"
+ "cmp x19, #0x2\n"
"fmla v26.8h, v2.8h, v0.h[6]\n"
"fmla v29.8h, v2.8h, v0.h[7]\n"
"add %x[Apanel], %x[Apanel], #0x20\n"
"fmla v9.8h, v3.8h, v0.h[0]\n"
"fmla v12.8h, v3.8h, v0.h[1]\n"
- "add x25, x25, #0x20\n"
- "ldr q2, [x25, #0x0]\n"
+ "add x24, x24, #0x20\n"
+ "ldr q2, [x24, #0x0]\n"
"fmla v15.8h, v3.8h, v0.h[2]\n"
"fmla v18.8h, v3.8h, v0.h[3]\n"
"fmla v21.8h, v3.8h, v0.h[4]\n"
"fmla v24.8h, v3.8h, v0.h[5]\n"
- "add x22, x22, #0x20\n"
+ "add x21, x21, #0x20\n"
"fmla v27.8h, v3.8h, v0.h[6]\n"
"fmla v30.8h, v3.8h, v0.h[7]\n"
- "ldr q3, [x22, #0x0]\n"
+ "ldr q3, [x21, #0x0]\n"
"fmla v10.8h, v4.8h, v0.h[0]\n"
"fmla v13.8h, v4.8h, v0.h[1]\n"
- "add x21, x21, #0x20\n"
+ "add x20, x20, #0x20\n"
"fmla v16.8h, v4.8h, v0.h[2]\n"
"fmla v19.8h, v4.8h, v0.h[3]\n"
"fmla v22.8h, v4.8h, v0.h[4]\n"
@@ -139,7 +139,7 @@ void a64_ffinterleaved_fp16_mla_8x24(
"fmla v28.8h, v4.8h, v0.h[6]\n"
"fmla v31.8h, v4.8h, v0.h[7]\n"
"ldr q0, [%x[Apanel], #0x0]\n"
- "ldr q4, [x21, #0x0]\n"
+ "ldr q4, [x20, #0x0]\n"
"fmla v8.8h, v5.8h, v1.h[0]\n"
"fmla v11.8h, v5.8h, v1.h[1]\n"
"fmla v14.8h, v5.8h, v1.h[2]\n"
@@ -171,13 +171,13 @@ void a64_ffinterleaved_fp16_mla_8x24(
"add %x[Apanel], %x[Apanel], #0x10\n"
"fmla v14.8h, v2.8h, v0.h[2]\n"
"fmla v17.8h, v2.8h, v0.h[3]\n"
- "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
"fmla v20.8h, v2.8h, v0.h[4]\n"
"fmla v23.8h, v2.8h, v0.h[5]\n"
- "add x22, x22, #0x10\n"
+ "add x21, x21, #0x10\n"
"fmla v26.8h, v2.8h, v0.h[6]\n"
"fmla v29.8h, v2.8h, v0.h[7]\n"
- "add x21, x21, #0x10\n"
+ "add x20, x20, #0x10\n"
"fmla v9.8h, v3.8h, v0.h[0]\n"
"fmla v12.8h, v3.8h, v0.h[1]\n"
"fmla v15.8h, v3.8h, v0.h[2]\n"
@@ -194,12 +194,12 @@ void a64_ffinterleaved_fp16_mla_8x24(
"fmla v25.8h, v4.8h, v0.h[5]\n"
"fmla v28.8h, v4.8h, v0.h[6]\n"
"fmla v31.8h, v4.8h, v0.h[7]\n"
- "cbz x20, 6f\n"
+ "cbz x19, 6f\n"
"ldr q0, [%x[Apanel], #0x0]\n"
- "ldr q5, [x25, #0x0]\n"
+ "ldr q5, [x24, #0x0]\n"
"fmla v8.8h, v5.8h, v0.h[0]\n"
- "ldr q6, [x22, #0x0]\n"
- "ldr q7, [x21, #0x0]\n"
+ "ldr q6, [x21, #0x0]\n"
+ "ldr q7, [x20, #0x0]\n"
"fmla v11.8h, v5.8h, v0.h[1]\n"
"fmla v14.8h, v5.8h, v0.h[2]\n"
"fmla v17.8h, v5.8h, v0.h[3]\n"
@@ -225,7 +225,7 @@ void a64_ffinterleaved_fp16_mla_8x24(
"fmla v28.8h, v7.8h, v0.h[6]\n"
"fmla v31.8h, v7.8h, v0.h[7]\n"
"6:" // multiply loop done
- "subs x24, x24, #0x18\n"
+ "subs x23, x23, #0x18\n"
"str q8, [%x[Cpanel], #0x0]\n"
"str q9, [%x[Cpanel], #0x10]\n"
"str q10, [%x[Cpanel], #0x20]\n"
@@ -256,7 +256,7 @@ void a64_ffinterleaved_fp16_mla_8x24(
"bne 1b\n"
: [Apanel] "+&r" (Apanel), [Cpanel] "+&r" (Cpanel), [ablocks] "+&r" (ablocks)
: [args_ptr] "r" (&ka), [offsetof_B_stride] "I" (offsetof(KernelArgs, B_stride)), [offsetof_Bpanel] "I" (offsetof(KernelArgs, Bpanel)), [offsetof_K] "I" (offsetof(KernelArgs, K)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_cur_B_ptr] "I" (offsetof(KernelArgs, cur_B_ptr))
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x20", "x21", "x22", "x23", "x24", "x25"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22", "x23", "x24"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_ffinterleaved_fp32_mla_8x12/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_ffinterleaved_fp32_mla_8x12/generic.cpp
index ec99d64f4a..bf804b5f43 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_ffinterleaved_fp32_mla_8x12/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_ffinterleaved_fp32_mla_8x12/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef __aarch64__
@@ -51,34 +51,34 @@ void a64_ffinterleaved_fp32_mla_8x12(
__asm__ __volatile__(
"1:" // Height loop
- "ldr x25, [%x[args_ptr], %[offsetof_Bpanel]]\n"
- "ldr x24, [%x[args_ptr], %[offsetof_N]]\n"
- "str x25, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x23, %x[Apanel]\n"
+ "ldr x24, [%x[args_ptr], %[offsetof_Bpanel]]\n"
+ "ldr x23, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x24, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x22, %x[Apanel]\n"
"2:" // Width loop
- "ldr x25, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
- "add x22, x25, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
- "add x20, x21, x20, LSL #2\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "cmp x24, #0x8\n"
- "mov %x[Apanel], x23\n"
+ "ldr x24, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x21, x24, x19, LSL #2\n"
+ "add x20, x21, x19, LSL #2\n"
+ "add x19, x20, x19, LSL #2\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "cmp x23, #0x8\n"
+ "mov %x[Apanel], x22\n"
"bgt 3f\n"
- "cmp x24, #0x4\n"
- "mov x21, x25\n"
+ "cmp x23, #0x4\n"
+ "mov x20, x24\n"
"bgt 3f\n"
- "mov x22, x25\n"
+ "mov x21, x24\n"
"3:" // B setup done
"ldr q0, [%x[Apanel], #0x0]\n"
"ldr q1, [%x[Apanel], #0x10]\n"
"movi v8.16b, #0x0\n"
- "ldr q4, [x25, #0x0]\n"
- "ldr q5, [x22, #0x0]\n"
+ "ldr q4, [x24, #0x0]\n"
+ "ldr q5, [x21, #0x0]\n"
"movi v9.16b, #0x0\n"
- "ldr q6, [x21, #0x0]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_K]]\n"
- "cmp x20, #0x4\n"
+ "ldr q6, [x20, #0x0]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_K]]\n"
+ "cmp x19, #0x4\n"
"movi v10.16b, #0x0\n"
"movi v11.16b, #0x0\n"
"movi v12.16b, #0x0\n"
@@ -106,17 +106,17 @@ void a64_ffinterleaved_fp32_mla_8x12(
"ldr q2, [%x[Apanel], #0x20]\n"
"ldr q3, [%x[Apanel], #0x30]\n"
"fmla v8.4s, v4.4s, v0.s[0]\n"
- "ldr q7, [x25, #0x10]\n"
+ "ldr q7, [x24, #0x10]\n"
"fmla v11.4s, v4.4s, v0.s[1]\n"
"fmla v14.4s, v4.4s, v0.s[2]\n"
"fmla v17.4s, v4.4s, v0.s[3]\n"
"fmla v20.4s, v4.4s, v1.s[0]\n"
- "sub x20, x20, #0x4\n"
+ "sub x19, x19, #0x4\n"
"fmla v23.4s, v4.4s, v1.s[1]\n"
"fmla v26.4s, v4.4s, v1.s[2]\n"
- "cmp x20, #0x4\n"
+ "cmp x19, #0x4\n"
"fmla v29.4s, v4.4s, v1.s[3]\n"
- "ldr q4, [x22, #0x10]\n"
+ "ldr q4, [x21, #0x10]\n"
"fmla v9.4s, v5.4s, v0.s[0]\n"
"fmla v12.4s, v5.4s, v0.s[1]\n"
"fmla v15.4s, v5.4s, v0.s[2]\n"
@@ -125,7 +125,7 @@ void a64_ffinterleaved_fp32_mla_8x12(
"fmla v24.4s, v5.4s, v1.s[1]\n"
"fmla v27.4s, v5.4s, v1.s[2]\n"
"fmla v30.4s, v5.4s, v1.s[3]\n"
- "ldr q5, [x21, #0x10]\n"
+ "ldr q5, [x20, #0x10]\n"
"fmla v10.4s, v6.4s, v0.s[0]\n"
"fmla v13.4s, v6.4s, v0.s[1]\n"
"fmla v16.4s, v6.4s, v0.s[2]\n"
@@ -136,7 +136,7 @@ void a64_ffinterleaved_fp32_mla_8x12(
"fmla v28.4s, v6.4s, v1.s[2]\n"
"fmla v31.4s, v6.4s, v1.s[3]\n"
"ldr q1, [%x[Apanel], #0x50]\n"
- "ldr q6, [x25, #0x20]\n"
+ "ldr q6, [x24, #0x20]\n"
"fmla v8.4s, v7.4s, v2.s[0]\n"
"fmla v11.4s, v7.4s, v2.s[1]\n"
"fmla v14.4s, v7.4s, v2.s[2]\n"
@@ -145,7 +145,7 @@ void a64_ffinterleaved_fp32_mla_8x12(
"fmla v23.4s, v7.4s, v3.s[1]\n"
"fmla v26.4s, v7.4s, v3.s[2]\n"
"fmla v29.4s, v7.4s, v3.s[3]\n"
- "ldr q7, [x22, #0x20]\n"
+ "ldr q7, [x21, #0x20]\n"
"fmla v9.4s, v4.4s, v2.s[0]\n"
"fmla v12.4s, v4.4s, v2.s[1]\n"
"fmla v15.4s, v4.4s, v2.s[2]\n"
@@ -154,7 +154,7 @@ void a64_ffinterleaved_fp32_mla_8x12(
"fmla v24.4s, v4.4s, v3.s[1]\n"
"fmla v27.4s, v4.4s, v3.s[2]\n"
"fmla v30.4s, v4.4s, v3.s[3]\n"
- "ldr q4, [x21, #0x20]\n"
+ "ldr q4, [x20, #0x20]\n"
"fmla v10.4s, v5.4s, v2.s[0]\n"
"fmla v13.4s, v5.4s, v2.s[1]\n"
"fmla v16.4s, v5.4s, v2.s[2]\n"
@@ -165,7 +165,7 @@ void a64_ffinterleaved_fp32_mla_8x12(
"fmla v28.4s, v5.4s, v3.s[2]\n"
"fmla v31.4s, v5.4s, v3.s[3]\n"
"ldr q3, [%x[Apanel], #0x70]\n"
- "ldr q5, [x25, #0x30]\n"
+ "ldr q5, [x24, #0x30]\n"
"fmla v8.4s, v6.4s, v0.s[0]\n"
"fmla v11.4s, v6.4s, v0.s[1]\n"
"fmla v14.4s, v6.4s, v0.s[2]\n"
@@ -173,23 +173,23 @@ void a64_ffinterleaved_fp32_mla_8x12(
"add %x[Apanel], %x[Apanel], #0x80\n"
"fmla v20.4s, v6.4s, v1.s[0]\n"
"fmla v23.4s, v6.4s, v1.s[1]\n"
- "add x25, x25, #0x40\n"
+ "add x24, x24, #0x40\n"
"fmla v26.4s, v6.4s, v1.s[2]\n"
"fmla v29.4s, v6.4s, v1.s[3]\n"
- "ldr q6, [x22, #0x30]\n"
+ "ldr q6, [x21, #0x30]\n"
"fmla v9.4s, v7.4s, v0.s[0]\n"
"fmla v12.4s, v7.4s, v0.s[1]\n"
- "add x22, x22, #0x40\n"
+ "add x21, x21, #0x40\n"
"fmla v15.4s, v7.4s, v0.s[2]\n"
"fmla v18.4s, v7.4s, v0.s[3]\n"
"fmla v21.4s, v7.4s, v1.s[0]\n"
"fmla v24.4s, v7.4s, v1.s[1]\n"
"fmla v27.4s, v7.4s, v1.s[2]\n"
"fmla v30.4s, v7.4s, v1.s[3]\n"
- "ldr q7, [x21, #0x30]\n"
+ "ldr q7, [x20, #0x30]\n"
"fmla v10.4s, v4.4s, v0.s[0]\n"
"fmla v13.4s, v4.4s, v0.s[1]\n"
- "add x21, x21, #0x40\n"
+ "add x20, x20, #0x40\n"
"fmla v16.4s, v4.4s, v0.s[2]\n"
"fmla v19.4s, v4.4s, v0.s[3]\n"
"ldr q0, [%x[Apanel], #0x0]\n"
@@ -198,7 +198,7 @@ void a64_ffinterleaved_fp32_mla_8x12(
"fmla v28.4s, v4.4s, v1.s[2]\n"
"fmla v31.4s, v4.4s, v1.s[3]\n"
"ldr q1, [%x[Apanel], #0x10]\n"
- "ldr q4, [x25, #0x0]\n"
+ "ldr q4, [x24, #0x0]\n"
"fmla v8.4s, v5.4s, v2.s[0]\n"
"fmla v11.4s, v5.4s, v2.s[1]\n"
"fmla v14.4s, v5.4s, v2.s[2]\n"
@@ -207,7 +207,7 @@ void a64_ffinterleaved_fp32_mla_8x12(
"fmla v23.4s, v5.4s, v3.s[1]\n"
"fmla v26.4s, v5.4s, v3.s[2]\n"
"fmla v29.4s, v5.4s, v3.s[3]\n"
- "ldr q5, [x22, #0x0]\n"
+ "ldr q5, [x21, #0x0]\n"
"fmla v9.4s, v6.4s, v2.s[0]\n"
"fmla v12.4s, v6.4s, v2.s[1]\n"
"fmla v15.4s, v6.4s, v2.s[2]\n"
@@ -216,7 +216,7 @@ void a64_ffinterleaved_fp32_mla_8x12(
"fmla v24.4s, v6.4s, v3.s[1]\n"
"fmla v27.4s, v6.4s, v3.s[2]\n"
"fmla v30.4s, v6.4s, v3.s[3]\n"
- "ldr q6, [x21, #0x0]\n"
+ "ldr q6, [x20, #0x0]\n"
"fmla v10.4s, v7.4s, v2.s[0]\n"
"fmla v13.4s, v7.4s, v2.s[1]\n"
"fmla v16.4s, v7.4s, v2.s[2]\n"
@@ -232,13 +232,13 @@ void a64_ffinterleaved_fp32_mla_8x12(
"add %x[Apanel], %x[Apanel], #0x20\n"
"fmla v14.4s, v4.4s, v0.s[2]\n"
"fmla v17.4s, v4.4s, v0.s[3]\n"
- "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
"fmla v20.4s, v4.4s, v1.s[0]\n"
"fmla v23.4s, v4.4s, v1.s[1]\n"
- "add x22, x22, #0x10\n"
+ "add x21, x21, #0x10\n"
"fmla v26.4s, v4.4s, v1.s[2]\n"
"fmla v29.4s, v4.4s, v1.s[3]\n"
- "add x21, x21, #0x10\n"
+ "add x20, x20, #0x10\n"
"fmla v9.4s, v5.4s, v0.s[0]\n"
"fmla v12.4s, v5.4s, v0.s[1]\n"
"fmla v15.4s, v5.4s, v0.s[2]\n"
@@ -255,15 +255,15 @@ void a64_ffinterleaved_fp32_mla_8x12(
"fmla v25.4s, v6.4s, v1.s[1]\n"
"fmla v28.4s, v6.4s, v1.s[2]\n"
"fmla v31.4s, v6.4s, v1.s[3]\n"
- "cbz x20, 7f\n"
+ "cbz x19, 7f\n"
"6:" // odd loop
"ldr q0, [%x[Apanel], #0x0]\n"
"ldr q1, [%x[Apanel], #0x10]\n"
- "subs x20, x20, #0x1\n"
- "ldr q7, [x25, #0x0]\n"
- "ldr q4, [x22, #0x0]\n"
+ "subs x19, x19, #0x1\n"
+ "ldr q7, [x24, #0x0]\n"
+ "ldr q4, [x21, #0x0]\n"
"fmla v8.4s, v7.4s, v0.s[0]\n"
- "ldr q5, [x21, #0x0]\n"
+ "ldr q5, [x20, #0x0]\n"
"fmla v11.4s, v7.4s, v0.s[1]\n"
"fmla v14.4s, v7.4s, v0.s[2]\n"
"fmla v17.4s, v7.4s, v0.s[3]\n"
@@ -271,13 +271,13 @@ void a64_ffinterleaved_fp32_mla_8x12(
"add %x[Apanel], %x[Apanel], #0x20\n"
"fmla v23.4s, v7.4s, v1.s[1]\n"
"fmla v26.4s, v7.4s, v1.s[2]\n"
- "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
"fmla v29.4s, v7.4s, v1.s[3]\n"
"fmla v9.4s, v4.4s, v0.s[0]\n"
- "add x22, x22, #0x10\n"
+ "add x21, x21, #0x10\n"
"fmla v12.4s, v4.4s, v0.s[1]\n"
"fmla v15.4s, v4.4s, v0.s[2]\n"
- "add x21, x21, #0x10\n"
+ "add x20, x20, #0x10\n"
"fmla v18.4s, v4.4s, v0.s[3]\n"
"fmla v21.4s, v4.4s, v1.s[0]\n"
"fmla v24.4s, v4.4s, v1.s[1]\n"
@@ -293,7 +293,7 @@ void a64_ffinterleaved_fp32_mla_8x12(
"fmla v31.4s, v5.4s, v1.s[3]\n"
"bne 6b\n"
"7:" // multiply loop done
- "subs x24, x24, #0xc\n"
+ "subs x23, x23, #0xc\n"
"str q8, [%x[Cpanel], #0x0]\n"
"str q9, [%x[Cpanel], #0x10]\n"
"str q10, [%x[Cpanel], #0x20]\n"
@@ -324,7 +324,7 @@ void a64_ffinterleaved_fp32_mla_8x12(
"bne 1b\n"
: [Apanel] "+&r" (Apanel), [Cpanel] "+&r" (Cpanel), [ablocks] "+&r" (ablocks)
: [args_ptr] "r" (&ka), [offsetof_B_stride] "I" (offsetof(KernelArgs, B_stride)), [offsetof_Bpanel] "I" (offsetof(KernelArgs, Bpanel)), [offsetof_K] "I" (offsetof(KernelArgs, K)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_cur_B_ptr] "I" (offsetof(KernelArgs, cur_B_ptr))
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x20", "x21", "x22", "x23", "x24", "x25"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22", "x23", "x24"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_bf16fp32_dot_6x16/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_bf16fp32_dot_6x16/generic.cpp
index 02d2434356..27e08135b6 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_bf16fp32_dot_6x16/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_bf16fp32_dot_6x16/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef __aarch64__
@@ -103,82 +103,82 @@ void a64_hybrid_bf16fp32_dot_6x16 (
"cmp %x[M], #0x2\n"
"bgt 71f\n"
"beq 36f\n"
- "mov x12, %x[bias]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "mov x9, %x[bias]\n"
+ "mov x28, %x[output_ptr]\n"
"2:" // Height 1: Column loop
- "cbz x12, 3f\n"
- "ldr q8, [x12, #0x0]\n"
- "ldr q9, [x12, #0x10]\n"
- "ldr q10, [x12, #0x20]\n"
- "ldr q11, [x12, #0x30]\n"
- "add x12, x12, #0x40\n"
+ "cbz x9, 3f\n"
+ "ldr q8, [x9, #0x0]\n"
+ "ldr q9, [x9, #0x10]\n"
+ "ldr q10, [x9, #0x20]\n"
+ "ldr q11, [x9, #0x30]\n"
+ "add x9, x9, #0x40\n"
"b 14f\n"
"3:" // Height 1: no bias
"tbz %x[flags], #0, 13f\n"
"cmp x11, #0x10\n"
"bge 12f\n"
"tbz x11, #3, 7f\n"
- "ld1 { v8.4s }, [x9], #0x10\n"
- "ld1 { v9.4s }, [x9], #0x10\n"
+ "ld1 { v8.4s }, [x28], #0x10\n"
+ "ld1 { v9.4s }, [x28], #0x10\n"
"tbz x11, #2, 5f\n"
- "ld1 { v10.4s }, [x9], #0x10\n"
+ "ld1 { v10.4s }, [x28], #0x10\n"
"tbz x11, #1, 4f\n"
- "ldr d11, [x9], #0x8\n"
- "mov x20, #0x38\n"
+ "mov x19, #0x38\n"
+ "ldr d11, [x28], #0x8\n"
"tbz x11, #0, 11f\n"
- "ld1 { v11.s }[2], [x9]\n"
+ "ld1 { v11.s }[2], [x28]\n"
"b 11f\n"
"4:" // Height 1: Partial accumulate: partial_1_12
- "mov x20, #0x30\n"
+ "mov x19, #0x30\n"
"tbz x11, #0, 11f\n"
- "ldr s11, [x9, #0x0]\n"
+ "ldr s11, [x28, #0x0]\n"
"b 11f\n"
"5:" // Height 1: Partial accumulate: partial_2_8
"tbz x11, #1, 6f\n"
- "ldr d10, [x9], #0x8\n"
- "mov x20, #0x28\n"
+ "ldr d10, [x28], #0x8\n"
+ "mov x19, #0x28\n"
"tbz x11, #0, 11f\n"
- "ld1 { v10.s }[2], [x9]\n"
+ "ld1 { v10.s }[2], [x28]\n"
"b 11f\n"
"6:" // Height 1: Partial accumulate: partial_1_8
- "mov x20, #0x20\n"
+ "mov x19, #0x20\n"
"tbz x11, #0, 11f\n"
- "ldr s10, [x9, #0x0]\n"
+ "ldr s10, [x28, #0x0]\n"
"b 11f\n"
"7:" // Height 1: Partial accumulate: partial_4_0
"tbz x11, #2, 9f\n"
- "ld1 { v8.4s }, [x9], #0x10\n"
+ "ld1 { v8.4s }, [x28], #0x10\n"
"tbz x11, #1, 8f\n"
- "ldr d9, [x9], #0x8\n"
- "mov x20, #0x18\n"
+ "ldr d9, [x28], #0x8\n"
+ "mov x19, #0x18\n"
"tbz x11, #0, 11f\n"
- "ld1 { v9.s }[2], [x9]\n"
+ "ld1 { v9.s }[2], [x28]\n"
"b 11f\n"
"8:" // Height 1: Partial accumulate: partial_1_4
- "mov x20, #0x10\n"
+ "mov x19, #0x10\n"
"tbz x11, #0, 11f\n"
- "ldr s9, [x9, #0x0]\n"
+ "ldr s9, [x28, #0x0]\n"
"b 11f\n"
"9:" // Height 1: Partial accumulate: partial_2_0
"tbz x11, #1, 10f\n"
- "ldr d8, [x9], #0x8\n"
- "mov x20, #0x8\n"
+ "ldr d8, [x28], #0x8\n"
+ "mov x19, #0x8\n"
"tbz x11, #0, 11f\n"
- "ld1 { v8.s }[2], [x9]\n"
+ "ld1 { v8.s }[2], [x28]\n"
"b 11f\n"
"10:" // Height 1: Partial accumulate: partial_1_0
- "ldr s8, [x9, #0x0]\n"
- "mov x20, #0x0\n"
+ "ldr s8, [x28, #0x0]\n"
+ "mov x19, #0x0\n"
"11:" // Height 1: Partial accumulate: Done
- "sub x9, x9, x20\n"
+ "sub x28, x28, x19\n"
"b 14f\n"
"12:" // Height 1: full accumulate
- "ldr q8, [x9, #0x0]\n"
- "ldr q9, [x9, #0x10]\n"
- "ldr q10, [x9, #0x20]\n"
- "ldr q11, [x9, #0x30]\n"
+ "ldr q8, [x28, #0x0]\n"
+ "ldr q9, [x28, #0x10]\n"
+ "ldr q10, [x28, #0x20]\n"
+ "ldr q11, [x28, #0x30]\n"
"b 14f\n"
"13:" // Height 1: no accumulate
"movi v8.16b, #0x0\n"
@@ -186,42 +186,46 @@ void a64_hybrid_bf16fp32_dot_6x16 (
"movi v10.16b, #0x0\n"
"movi v11.16b, #0x0\n"
"14:" // Height 1: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"15:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 16f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "cbnz x28, 17f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #1\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "cbnz x27, 17f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
"b 17f\n"
"16:" // Height 1: setup direct input
- "mov x26, %x[input_ptr]\n"
+ "mov x25, %x[input_ptr]\n"
"17:" // Height 1: input setup done
- "cmp x27, #0x8\n"
+ "cmp x26, #0x8\n"
"blt 20f\n"
- "ldr q0, [x26, #0x0]\n"
+ "ldr q0, [x25, #0x0]\n"
"ldr q6, [x10, #0x0]\n"
- "cmp x27, #0x10\n"
- "ldr q7, [x10, #0x10]\n"
+ "cmp x26, #0x10\n"
"blt 19f\n"
"18:" // Height 1: Multiply loop: Main loop head
".inst 0x4f40f0c8 // bfdot v8.4s, v6.8h, v0.h[0]\n"
- "ldr q6, [x10, #0x20]\n"
+ "ldr q7, [x10, #0x10]\n"
+ "add x25, x25, #0x10\n"
".inst 0x4f40f0e9 // bfdot v9.4s, v7.8h, v0.h[0]\n"
- "ldr q7, [x10, #0x30]\n"
+ "ldr q6, [x10, #0x20]\n"
+ "sub x26, x26, #0x8\n"
".inst 0x4f40f0ca // bfdot v10.4s, v6.8h, v0.h[0]\n"
- "ldr q6, [x10, #0x40]\n"
+ "ldr q7, [x10, #0x30]\n"
+ "cmp x26, #0x10\n"
".inst 0x4f40f0eb // bfdot v11.4s, v7.8h, v0.h[0]\n"
+ "ldr q6, [x10, #0x40]\n"
"ldr q7, [x10, #0x50]\n"
".inst 0x4f60f0c8 // bfdot v8.4s, v6.8h, v0.h[1]\n"
"ldr q6, [x10, #0x60]\n"
".inst 0x4f60f0e9 // bfdot v9.4s, v7.8h, v0.h[1]\n"
"ldr q7, [x10, #0x70]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x4f60f0ca // bfdot v10.4s, v6.8h, v0.h[1]\n"
"ldr q6, [x10, #0x80]\n"
".inst 0x4f60f0eb // bfdot v11.4s, v7.8h, v0.h[1]\n"
@@ -238,28 +242,27 @@ void a64_hybrid_bf16fp32_dot_6x16 (
"ldr q6, [x10, #0xe0]\n"
".inst 0x4f60f8e9 // bfdot v9.4s, v7.8h, v0.h[3]\n"
"ldr q7, [x10, #0xf0]\n"
- "sub x27, x27, #0x8\n"
- "add x26, x26, #0x10\n"
- ".inst 0x4f60f8ca // bfdot v10.4s, v6.8h, v0.h[3]\n"
- ".inst 0x4f60f8eb // bfdot v11.4s, v7.8h, v0.h[3]\n"
- "ldr q0, [x26, #0x0]\n"
- "cmp x27, #0x10\n"
"add x10, x10, #0x100\n"
+ ".inst 0x4f60f8ca // bfdot v10.4s, v6.8h, v0.h[3]\n"
"ldr q6, [x10, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
+ ".inst 0x4f60f8eb // bfdot v11.4s, v7.8h, v0.h[3]\n"
+ "ldr q0, [x25, #0x0]\n"
"bge 18b\n"
"19:" // Height 1: Multiply loop: Single iteration only
".inst 0x4f40f0c8 // bfdot v8.4s, v6.8h, v0.h[0]\n"
- "ldr q6, [x10, #0x20]\n"
+ "ldr q7, [x10, #0x10]\n"
+ "sub x26, x26, #0x8\n"
".inst 0x4f40f0e9 // bfdot v9.4s, v7.8h, v0.h[0]\n"
- "ldr q7, [x10, #0x30]\n"
+ "ldr q6, [x10, #0x20]\n"
+ "add x25, x25, #0x10\n"
".inst 0x4f40f0ca // bfdot v10.4s, v6.8h, v0.h[0]\n"
+ "ldr q7, [x10, #0x30]\n"
"ldr q6, [x10, #0x40]\n"
".inst 0x4f40f0eb // bfdot v11.4s, v7.8h, v0.h[0]\n"
"ldr q7, [x10, #0x50]\n"
".inst 0x4f60f0c8 // bfdot v8.4s, v6.8h, v0.h[1]\n"
"ldr q6, [x10, #0x60]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x4f60f0e9 // bfdot v9.4s, v7.8h, v0.h[1]\n"
"ldr q7, [x10, #0x70]\n"
".inst 0x4f60f0ca // bfdot v10.4s, v6.8h, v0.h[1]\n"
@@ -278,224 +281,221 @@ void a64_hybrid_bf16fp32_dot_6x16 (
"ldr q6, [x10, #0xe0]\n"
".inst 0x4f60f8e9 // bfdot v9.4s, v7.8h, v0.h[3]\n"
"ldr q7, [x10, #0xf0]\n"
- "add x26, x26, #0x10\n"
- "sub x27, x27, #0x8\n"
+ "add x10, x10, #0x100\n"
".inst 0x4f60f8ca // bfdot v10.4s, v6.8h, v0.h[3]\n"
".inst 0x4f60f8eb // bfdot v11.4s, v7.8h, v0.h[3]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
- "add x10, x10, #0x100\n"
"20:" // Height 1: Multiply loop: Main loop skip
- "cbz x27, 24f\n"
- "cmp x27, #0x2\n"
+ "cbz x26, 24f\n"
+ "cmp x26, #0x2\n"
"blt 22f\n"
"21:" // Height 1: Multiply loop: Odd block loop
- "ldr s0, [x26], #0x4\n"
+ "ldr s0, [x25], #0x4\n"
+ "sub x26, x26, #0x2\n"
"ldr q6, [x10, #0x0]\n"
".inst 0x4f40f0c8 // bfdot v8.4s, v6.8h, v0.h[0]\n"
- "sub x27, x27, #0x2\n"
"ldr q7, [x10, #0x10]\n"
- "ldr q6, [x10, #0x20]\n"
+ "cmp x26, #0x2\n"
".inst 0x4f40f0e9 // bfdot v9.4s, v7.8h, v0.h[0]\n"
- "cmp x27, #0x2\n"
+ "ldr q6, [x10, #0x20]\n"
"ldr q7, [x10, #0x30]\n"
".inst 0x4f40f0ca // bfdot v10.4s, v6.8h, v0.h[0]\n"
- ".inst 0x4f40f0eb // bfdot v11.4s, v7.8h, v0.h[0]\n"
"add x10, x10, #0x40\n"
+ ".inst 0x4f40f0eb // bfdot v11.4s, v7.8h, v0.h[0]\n"
"bge 21b\n"
+ "cbz x26, 24f\n"
"22:" // Height 1: Multiply loop: Skip odd blocks
- "cbz x27, 24f\n"
- "ldr h0, [x26, #0x0]\n"
+ "ldr h0, [x25, #0x0]\n"
"23:" // Height 1: Multiply loop: Ragged operand read: Done
"ldr q6, [x10, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
".inst 0x4f40f0c8 // bfdot v8.4s, v6.8h, v0.h[0]\n"
- ".inst 0x4f40f0e9 // bfdot v9.4s, v7.8h, v0.h[0]\n"
+ "ldr q7, [x10, #0x10]\n"
"ldr q6, [x10, #0x20]\n"
+ ".inst 0x4f40f0e9 // bfdot v9.4s, v7.8h, v0.h[0]\n"
"ldr q7, [x10, #0x30]\n"
+ "add x10, x10, #0x40\n"
".inst 0x4f40f0ca // bfdot v10.4s, v6.8h, v0.h[0]\n"
".inst 0x4f40f0eb // bfdot v11.4s, v7.8h, v0.h[0]\n"
- "add x10, x10, #0x40\n"
"24:" // Height 1: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 15b\n"
- "prfm pstl1keep, [x9, #0x0]\n"
+ "prfm pstl1keep, [x28, #0x0]\n"
"tbz %x[flags], #1, 25f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v1.4s }, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1r { v0.4s }, [x20]\n"
- "fmin v8.4s, v8.4s, v1.4s\n"
- "fmin v9.4s, v9.4s, v1.4s\n"
- "fmin v10.4s, v10.4s, v1.4s\n"
- "fmin v11.4s, v11.4s, v1.4s\n"
- "fmax v8.4s, v8.4s, v0.4s\n"
- "fmax v9.4s, v9.4s, v0.4s\n"
- "fmax v10.4s, v10.4s, v0.4s\n"
- "fmax v11.4s, v11.4s, v0.4s\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v1.4s }, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v0.4s }, [x19]\n"
+ "fmin v8.4s, v8.4s, v0.4s\n"
+ "fmin v9.4s, v9.4s, v0.4s\n"
+ "fmin v10.4s, v10.4s, v0.4s\n"
+ "fmin v11.4s, v11.4s, v0.4s\n"
+ "fmax v8.4s, v8.4s, v1.4s\n"
+ "fmax v9.4s, v9.4s, v1.4s\n"
+ "fmax v10.4s, v10.4s, v1.4s\n"
+ "fmax v11.4s, v11.4s, v1.4s\n"
"25:" // Height 1: No activation
"cmp x11, #0x10\n"
"bge 34f\n"
"tbz x11, #3, 29f\n"
- "st1 { v8.4s }, [x9], #0x10\n"
- "st1 { v9.4s }, [x9], #0x10\n"
+ "st1 { v8.4s }, [x28], #0x10\n"
+ "st1 { v9.4s }, [x28], #0x10\n"
"tbz x11, #2, 27f\n"
- "st1 { v10.4s }, [x9], #0x10\n"
+ "st1 { v10.4s }, [x28], #0x10\n"
"tbz x11, #1, 26f\n"
- "str d11, [x9], #0x8\n"
+ "str d11, [x28], #0x8\n"
"tbz x11, #0, 33f\n"
- "st1 { v11.s }[2], [x9]\n"
+ "st1 { v11.s }[2], [x28]\n"
"b 33f\n"
"26:" // Height 1: Partial direct writeback: partial_1_12
"tbz x11, #0, 33f\n"
- "str s11, [x9, #0x0]\n"
+ "str s11, [x28, #0x0]\n"
"b 33f\n"
"27:" // Height 1: Partial direct writeback: partial_2_8
"tbz x11, #1, 28f\n"
- "str d10, [x9], #0x8\n"
+ "str d10, [x28], #0x8\n"
"tbz x11, #0, 33f\n"
- "st1 { v10.s }[2], [x9]\n"
+ "st1 { v10.s }[2], [x28]\n"
"b 33f\n"
"28:" // Height 1: Partial direct writeback: partial_1_8
"tbz x11, #0, 33f\n"
- "str s10, [x9, #0x0]\n"
+ "str s10, [x28, #0x0]\n"
"b 33f\n"
"29:" // Height 1: Partial direct writeback: partial_4_0
"tbz x11, #2, 31f\n"
- "st1 { v8.4s }, [x9], #0x10\n"
+ "st1 { v8.4s }, [x28], #0x10\n"
"tbz x11, #1, 30f\n"
- "str d9, [x9], #0x8\n"
+ "str d9, [x28], #0x8\n"
"tbz x11, #0, 33f\n"
- "st1 { v9.s }[2], [x9]\n"
+ "st1 { v9.s }[2], [x28]\n"
"b 33f\n"
"30:" // Height 1: Partial direct writeback: partial_1_4
"tbz x11, #0, 33f\n"
- "str s9, [x9, #0x0]\n"
+ "str s9, [x28, #0x0]\n"
"b 33f\n"
"31:" // Height 1: Partial direct writeback: partial_2_0
"tbz x11, #1, 32f\n"
- "str d8, [x9], #0x8\n"
+ "str d8, [x28], #0x8\n"
"tbz x11, #0, 33f\n"
- "st1 { v8.s }[2], [x9]\n"
+ "st1 { v8.s }[2], [x28]\n"
"b 33f\n"
"32:" // Height 1: Partial direct writeback: partial_1_0
- "str s8, [x9, #0x0]\n"
+ "str s8, [x28, #0x0]\n"
"33:" // Height 1: Partial direct writeback: Done
"b 35f\n"
"34:" // Height 1: Full writeback
- "str q8, [x9, #0x0]\n"
- "str q9, [x9, #0x10]\n"
- "str q10, [x9, #0x20]\n"
- "str q11, [x9, #0x30]\n"
- "add x9, x9, #0x40\n"
+ "str q8, [x28, #0x0]\n"
+ "str q9, [x28, #0x10]\n"
+ "str q10, [x28, #0x20]\n"
+ "str q11, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
"35:" // Height 1: Writeback done
"subs x11, x11, #0x10\n"
"bgt 2b\n"
"b 212f\n"
"36:" // Height 2
- "mov x12, %x[bias]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x9, %x[bias]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "mov x28, %x[output_ptr]\n"
"37:" // Height 2: Column loop
- "cbz x12, 38f\n"
- "ldr q8, [x12, #0x0]\n"
- "ldr q9, [x12, #0x10]\n"
+ "cbz x9, 38f\n"
+ "ldr q8, [x9, #0x0]\n"
"mov v12.16b, v8.16b\n"
+ "ldr q9, [x9, #0x10]\n"
+ "ldr q10, [x9, #0x20]\n"
"mov v13.16b, v9.16b\n"
- "ldr q10, [x12, #0x20]\n"
- "ldr q11, [x12, #0x30]\n"
+ "ldr q11, [x9, #0x30]\n"
+ "add x9, x9, #0x40\n"
"mov v14.16b, v10.16b\n"
"mov v15.16b, v11.16b\n"
- "add x12, x12, #0x40\n"
"b 49f\n"
"38:" // Height 2: no bias
"tbz %x[flags], #0, 48f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"cmp x11, #0x10\n"
- "add x25, x9, x20, LSL #2\n"
+ "add x24, x28, x19, LSL #2\n"
"bge 47f\n"
"tbz x11, #3, 42f\n"
- "ld1 { v8.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v9.4s }, [x9], #0x10\n"
- "ld1 { v13.4s }, [x25], #0x10\n"
+ "ld1 { v8.4s }, [x28], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
+ "ld1 { v9.4s }, [x28], #0x10\n"
+ "ld1 { v13.4s }, [x24], #0x10\n"
"tbz x11, #2, 40f\n"
- "ld1 { v10.4s }, [x9], #0x10\n"
- "ld1 { v14.4s }, [x25], #0x10\n"
+ "ld1 { v10.4s }, [x28], #0x10\n"
+ "ld1 { v14.4s }, [x24], #0x10\n"
"tbz x11, #1, 39f\n"
- "ldr d11, [x9], #0x8\n"
- "ldr d15, [x25], #0x8\n"
- "mov x20, #0x38\n"
+ "mov x19, #0x38\n"
+ "ldr d11, [x28], #0x8\n"
+ "ldr d15, [x24], #0x8\n"
"tbz x11, #0, 46f\n"
- "ld1 { v11.s }[2], [x9]\n"
- "ld1 { v15.s }[2], [x25]\n"
+ "ld1 { v11.s }[2], [x28]\n"
+ "ld1 { v15.s }[2], [x24]\n"
"b 46f\n"
"39:" // Height 2: Partial accumulate: partial_1_12
- "mov x20, #0x30\n"
+ "mov x19, #0x30\n"
"tbz x11, #0, 46f\n"
- "ldr s11, [x9, #0x0]\n"
- "ldr s15, [x25, #0x0]\n"
+ "ldr s11, [x28, #0x0]\n"
+ "ldr s15, [x24, #0x0]\n"
"b 46f\n"
"40:" // Height 2: Partial accumulate: partial_2_8
"tbz x11, #1, 41f\n"
- "ldr d10, [x9], #0x8\n"
- "ldr d14, [x25], #0x8\n"
- "mov x20, #0x28\n"
+ "ldr d10, [x28], #0x8\n"
+ "ldr d14, [x24], #0x8\n"
+ "mov x19, #0x28\n"
"tbz x11, #0, 46f\n"
- "ld1 { v10.s }[2], [x9]\n"
- "ld1 { v14.s }[2], [x25]\n"
+ "ld1 { v10.s }[2], [x28]\n"
+ "ld1 { v14.s }[2], [x24]\n"
"b 46f\n"
"41:" // Height 2: Partial accumulate: partial_1_8
- "mov x20, #0x20\n"
+ "mov x19, #0x20\n"
"tbz x11, #0, 46f\n"
- "ldr s10, [x9, #0x0]\n"
- "ldr s14, [x25, #0x0]\n"
+ "ldr s10, [x28, #0x0]\n"
+ "ldr s14, [x24, #0x0]\n"
"b 46f\n"
"42:" // Height 2: Partial accumulate: partial_4_0
"tbz x11, #2, 44f\n"
- "ld1 { v8.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
+ "ld1 { v8.4s }, [x28], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
"tbz x11, #1, 43f\n"
- "ldr d9, [x9], #0x8\n"
- "ldr d13, [x25], #0x8\n"
- "mov x20, #0x18\n"
+ "mov x19, #0x18\n"
+ "ldr d9, [x28], #0x8\n"
+ "ldr d13, [x24], #0x8\n"
"tbz x11, #0, 46f\n"
- "ld1 { v9.s }[2], [x9]\n"
- "ld1 { v13.s }[2], [x25]\n"
+ "ld1 { v9.s }[2], [x28]\n"
+ "ld1 { v13.s }[2], [x24]\n"
"b 46f\n"
"43:" // Height 2: Partial accumulate: partial_1_4
- "mov x20, #0x10\n"
+ "mov x19, #0x10\n"
"tbz x11, #0, 46f\n"
- "ldr s9, [x9, #0x0]\n"
- "ldr s13, [x25, #0x0]\n"
+ "ldr s9, [x28, #0x0]\n"
+ "ldr s13, [x24, #0x0]\n"
"b 46f\n"
"44:" // Height 2: Partial accumulate: partial_2_0
"tbz x11, #1, 45f\n"
- "ldr d8, [x9], #0x8\n"
- "ldr d12, [x25], #0x8\n"
- "mov x20, #0x8\n"
+ "ldr d8, [x28], #0x8\n"
+ "ldr d12, [x24], #0x8\n"
+ "mov x19, #0x8\n"
"tbz x11, #0, 46f\n"
- "ld1 { v8.s }[2], [x9]\n"
- "ld1 { v12.s }[2], [x25]\n"
+ "ld1 { v8.s }[2], [x28]\n"
+ "ld1 { v12.s }[2], [x24]\n"
"b 46f\n"
"45:" // Height 2: Partial accumulate: partial_1_0
- "ldr s8, [x9, #0x0]\n"
- "ldr s12, [x25, #0x0]\n"
- "mov x20, #0x0\n"
+ "ldr s8, [x28, #0x0]\n"
+ "mov x19, #0x0\n"
+ "ldr s12, [x24, #0x0]\n"
"46:" // Height 2: Partial accumulate: Done
- "sub x9, x9, x20\n"
+ "sub x28, x28, x19\n"
"b 49f\n"
"47:" // Height 2: full accumulate
- "ldr q8, [x9, #0x0]\n"
- "ldr q9, [x9, #0x10]\n"
- "ldr q10, [x9, #0x20]\n"
- "ldr q11, [x9, #0x30]\n"
- "ldr q12, [x25, #0x0]\n"
- "ldr q13, [x25, #0x10]\n"
- "ldr q14, [x25, #0x20]\n"
- "ldr q15, [x25, #0x30]\n"
+ "ldr q8, [x28, #0x0]\n"
+ "ldr q9, [x28, #0x10]\n"
+ "ldr q10, [x28, #0x20]\n"
+ "ldr q11, [x28, #0x30]\n"
+ "ldr q12, [x24, #0x0]\n"
+ "ldr q13, [x24, #0x10]\n"
+ "ldr q14, [x24, #0x20]\n"
+ "ldr q15, [x24, #0x30]\n"
"b 49f\n"
"48:" // Height 2: no accumulate
"movi v8.16b, #0x0\n"
@@ -507,58 +507,58 @@ void a64_hybrid_bf16fp32_dot_6x16 (
"movi v14.16b, #0x0\n"
"movi v15.16b, #0x0\n"
"49:" // Height 2: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"50:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 51f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "cbnz x28, 52f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #1\n"
- "add x25, x25, x20, LSL #1\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "cbnz x27, 52f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
+ "add x24, x24, x19, LSL #1\n"
"b 52f\n"
"51:" // Height 2: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #1\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #1\n"
"52:" // Height 2: input setup done
- "cmp x27, #0x8\n"
+ "cmp x26, #0x8\n"
"blt 55f\n"
- "ldr q0, [x26, #0x0]\n"
- "ldr q1, [x25, #0x0]\n"
- "cmp x27, #0x10\n"
+ "ldr q0, [x25, #0x0]\n"
+ "ldr q1, [x24, #0x0]\n"
+ "cmp x26, #0x10\n"
"ldr q6, [x10, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
"blt 54f\n"
"53:" // Height 2: Multiply loop: Main loop head
".inst 0x4f40f0c8 // bfdot v8.4s, v6.8h, v0.h[0]\n"
+ "ldr q7, [x10, #0x10]\n"
+ "add x25, x25, #0x10\n"
".inst 0x4f41f0cc // bfdot v12.4s, v6.8h, v1.h[0]\n"
"ldr q6, [x10, #0x20]\n"
- "sub x27, x27, #0x8\n"
+ "add x24, x24, #0x10\n"
".inst 0x4f40f0e9 // bfdot v9.4s, v7.8h, v0.h[0]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "sub x26, x26, #0x8\n"
".inst 0x4f41f0ed // bfdot v13.4s, v7.8h, v1.h[0]\n"
"ldr q7, [x10, #0x30]\n"
- "add x26, x26, #0x10\n"
+ "cmp x26, #0x10\n"
".inst 0x4f40f0ca // bfdot v10.4s, v6.8h, v0.h[0]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x4f41f0ce // bfdot v14.4s, v6.8h, v1.h[0]\n"
"ldr q6, [x10, #0x40]\n"
- "add x25, x25, #0x10\n"
".inst 0x4f40f0eb // bfdot v11.4s, v7.8h, v0.h[0]\n"
".inst 0x4f41f0ef // bfdot v15.4s, v7.8h, v1.h[0]\n"
"ldr q7, [x10, #0x50]\n"
- "cmp x27, #0x10\n"
".inst 0x4f60f0c8 // bfdot v8.4s, v6.8h, v0.h[1]\n"
".inst 0x4f61f0cc // bfdot v12.4s, v6.8h, v1.h[1]\n"
"ldr q6, [x10, #0x60]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x4f60f0e9 // bfdot v9.4s, v7.8h, v0.h[1]\n"
".inst 0x4f61f0ed // bfdot v13.4s, v7.8h, v1.h[1]\n"
"ldr q7, [x10, #0x70]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x4f60f0ca // bfdot v10.4s, v6.8h, v0.h[1]\n"
".inst 0x4f61f0ce // bfdot v14.4s, v6.8h, v1.h[1]\n"
"ldr q6, [x10, #0x80]\n"
@@ -588,32 +588,32 @@ void a64_hybrid_bf16fp32_dot_6x16 (
".inst 0x4f61f8ce // bfdot v14.4s, v6.8h, v1.h[3]\n"
"ldr q6, [x10, #0x0]\n"
".inst 0x4f60f8eb // bfdot v11.4s, v7.8h, v0.h[3]\n"
- "ldr q0, [x26, #0x0]\n"
+ "ldr q0, [x25, #0x0]\n"
".inst 0x4f61f8ef // bfdot v15.4s, v7.8h, v1.h[3]\n"
- "ldr q1, [x25, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
+ "ldr q1, [x24, #0x0]\n"
"bge 53b\n"
"54:" // Height 2: Multiply loop: Single iteration only
".inst 0x4f40f0c8 // bfdot v8.4s, v6.8h, v0.h[0]\n"
+ "ldr q7, [x10, #0x10]\n"
+ "sub x26, x26, #0x8\n"
".inst 0x4f41f0cc // bfdot v12.4s, v6.8h, v1.h[0]\n"
"ldr q6, [x10, #0x20]\n"
- "add x26, x26, #0x10\n"
+ "add x25, x25, #0x10\n"
".inst 0x4f40f0e9 // bfdot v9.4s, v7.8h, v0.h[0]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "add x24, x24, #0x10\n"
".inst 0x4f41f0ed // bfdot v13.4s, v7.8h, v1.h[0]\n"
"ldr q7, [x10, #0x30]\n"
- "add x25, x25, #0x10\n"
".inst 0x4f40f0ca // bfdot v10.4s, v6.8h, v0.h[0]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x4f41f0ce // bfdot v14.4s, v6.8h, v1.h[0]\n"
"ldr q6, [x10, #0x40]\n"
- "sub x27, x27, #0x8\n"
".inst 0x4f40f0eb // bfdot v11.4s, v7.8h, v0.h[0]\n"
".inst 0x4f41f0ef // bfdot v15.4s, v7.8h, v1.h[0]\n"
"ldr q7, [x10, #0x50]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x4f60f0c8 // bfdot v8.4s, v6.8h, v0.h[1]\n"
".inst 0x4f61f0cc // bfdot v12.4s, v6.8h, v1.h[1]\n"
"ldr q6, [x10, #0x60]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x4f60f0e9 // bfdot v9.4s, v7.8h, v0.h[1]\n"
".inst 0x4f61f0ed // bfdot v13.4s, v7.8h, v1.h[1]\n"
"ldr q7, [x10, #0x70]\n"
@@ -647,283 +647,283 @@ void a64_hybrid_bf16fp32_dot_6x16 (
".inst 0x4f60f8eb // bfdot v11.4s, v7.8h, v0.h[3]\n"
".inst 0x4f61f8ef // bfdot v15.4s, v7.8h, v1.h[3]\n"
"55:" // Height 2: Multiply loop: Main loop skip
- "cbz x27, 59f\n"
- "cmp x27, #0x2\n"
+ "cbz x26, 59f\n"
+ "cmp x26, #0x2\n"
"blt 57f\n"
"56:" // Height 2: Multiply loop: Odd block loop
- "ldr s0, [x26], #0x4\n"
- "ldr s1, [x25], #0x4\n"
- "sub x27, x27, #0x2\n"
- "cmp x27, #0x2\n"
+ "ldr s0, [x25], #0x4\n"
+ "sub x26, x26, #0x2\n"
+ "ldr s1, [x24], #0x4\n"
+ "cmp x26, #0x2\n"
"ldr q6, [x10, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
".inst 0x4f40f0c8 // bfdot v8.4s, v6.8h, v0.h[0]\n"
+ "ldr q7, [x10, #0x10]\n"
".inst 0x4f41f0cc // bfdot v12.4s, v6.8h, v1.h[0]\n"
"ldr q6, [x10, #0x20]\n"
".inst 0x4f40f0e9 // bfdot v9.4s, v7.8h, v0.h[0]\n"
".inst 0x4f41f0ed // bfdot v13.4s, v7.8h, v1.h[0]\n"
"ldr q7, [x10, #0x30]\n"
+ "add x10, x10, #0x40\n"
".inst 0x4f40f0ca // bfdot v10.4s, v6.8h, v0.h[0]\n"
".inst 0x4f41f0ce // bfdot v14.4s, v6.8h, v1.h[0]\n"
- "add x10, x10, #0x40\n"
".inst 0x4f40f0eb // bfdot v11.4s, v7.8h, v0.h[0]\n"
".inst 0x4f41f0ef // bfdot v15.4s, v7.8h, v1.h[0]\n"
"bge 56b\n"
+ "cbz x26, 59f\n"
"57:" // Height 2: Multiply loop: Skip odd blocks
- "cbz x27, 59f\n"
- "ldr h0, [x26, #0x0]\n"
- "ldr h1, [x25, #0x0]\n"
+ "ldr h0, [x25, #0x0]\n"
+ "ldr h1, [x24, #0x0]\n"
"58:" // Height 2: Multiply loop: Ragged operand read: Done
"ldr q6, [x10, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
".inst 0x4f40f0c8 // bfdot v8.4s, v6.8h, v0.h[0]\n"
+ "ldr q7, [x10, #0x10]\n"
".inst 0x4f41f0cc // bfdot v12.4s, v6.8h, v1.h[0]\n"
"ldr q6, [x10, #0x20]\n"
".inst 0x4f40f0e9 // bfdot v9.4s, v7.8h, v0.h[0]\n"
".inst 0x4f41f0ed // bfdot v13.4s, v7.8h, v1.h[0]\n"
"ldr q7, [x10, #0x30]\n"
+ "add x10, x10, #0x40\n"
".inst 0x4f40f0ca // bfdot v10.4s, v6.8h, v0.h[0]\n"
".inst 0x4f41f0ce // bfdot v14.4s, v6.8h, v1.h[0]\n"
- "add x10, x10, #0x40\n"
".inst 0x4f40f0eb // bfdot v11.4s, v7.8h, v0.h[0]\n"
".inst 0x4f41f0ef // bfdot v15.4s, v7.8h, v1.h[0]\n"
"59:" // Height 2: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 50b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "prfm pstl1keep, [x9, #0x0]\n"
- "prfm pstl1keep, [x25, #0x0]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "prfm pstl1keep, [x28, #0x0]\n"
+ "add x24, x28, x19, LSL #2\n"
+ "prfm pstl1keep, [x24, #0x0]\n"
"tbz %x[flags], #1, 60f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v1.4s }, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1r { v0.4s }, [x20]\n"
- "fmin v8.4s, v8.4s, v1.4s\n"
- "fmin v9.4s, v9.4s, v1.4s\n"
- "fmin v10.4s, v10.4s, v1.4s\n"
- "fmin v11.4s, v11.4s, v1.4s\n"
- "fmin v12.4s, v12.4s, v1.4s\n"
- "fmin v13.4s, v13.4s, v1.4s\n"
- "fmin v14.4s, v14.4s, v1.4s\n"
- "fmin v15.4s, v15.4s, v1.4s\n"
- "fmax v8.4s, v8.4s, v0.4s\n"
- "fmax v9.4s, v9.4s, v0.4s\n"
- "fmax v10.4s, v10.4s, v0.4s\n"
- "fmax v11.4s, v11.4s, v0.4s\n"
- "fmax v12.4s, v12.4s, v0.4s\n"
- "fmax v13.4s, v13.4s, v0.4s\n"
- "fmax v14.4s, v14.4s, v0.4s\n"
- "fmax v15.4s, v15.4s, v0.4s\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v1.4s }, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v0.4s }, [x19]\n"
+ "fmin v8.4s, v8.4s, v0.4s\n"
+ "fmin v9.4s, v9.4s, v0.4s\n"
+ "fmin v10.4s, v10.4s, v0.4s\n"
+ "fmin v11.4s, v11.4s, v0.4s\n"
+ "fmax v8.4s, v8.4s, v1.4s\n"
+ "fmax v9.4s, v9.4s, v1.4s\n"
+ "fmax v10.4s, v10.4s, v1.4s\n"
+ "fmax v11.4s, v11.4s, v1.4s\n"
+ "fmin v12.4s, v12.4s, v0.4s\n"
+ "fmin v13.4s, v13.4s, v0.4s\n"
+ "fmin v14.4s, v14.4s, v0.4s\n"
+ "fmax v12.4s, v12.4s, v1.4s\n"
+ "fmax v13.4s, v13.4s, v1.4s\n"
+ "fmax v14.4s, v14.4s, v1.4s\n"
+ "fmin v15.4s, v15.4s, v0.4s\n"
+ "fmax v15.4s, v15.4s, v1.4s\n"
"60:" // Height 2: No activation
"cmp x11, #0x10\n"
"bge 69f\n"
"tbz x11, #3, 64f\n"
- "st1 { v8.4s }, [x9], #0x10\n"
- "st1 { v9.4s }, [x9], #0x10\n"
- "st1 { v12.4s }, [x25], #0x10\n"
- "st1 { v13.4s }, [x25], #0x10\n"
+ "st1 { v8.4s }, [x28], #0x10\n"
+ "st1 { v9.4s }, [x28], #0x10\n"
+ "st1 { v12.4s }, [x24], #0x10\n"
+ "st1 { v13.4s }, [x24], #0x10\n"
"tbz x11, #2, 62f\n"
- "st1 { v10.4s }, [x9], #0x10\n"
- "st1 { v14.4s }, [x25], #0x10\n"
+ "st1 { v10.4s }, [x28], #0x10\n"
+ "st1 { v14.4s }, [x24], #0x10\n"
"tbz x11, #1, 61f\n"
- "str d11, [x9], #0x8\n"
- "str d15, [x25], #0x8\n"
+ "str d11, [x28], #0x8\n"
+ "str d15, [x24], #0x8\n"
"tbz x11, #0, 68f\n"
- "st1 { v11.s }[2], [x9]\n"
- "st1 { v15.s }[2], [x25]\n"
+ "st1 { v11.s }[2], [x28]\n"
+ "st1 { v15.s }[2], [x24]\n"
"b 68f\n"
"61:" // Height 2: Partial direct writeback: partial_1_12
"tbz x11, #0, 68f\n"
- "str s11, [x9, #0x0]\n"
- "str s15, [x25, #0x0]\n"
+ "str s11, [x28, #0x0]\n"
+ "str s15, [x24, #0x0]\n"
"b 68f\n"
"62:" // Height 2: Partial direct writeback: partial_2_8
"tbz x11, #1, 63f\n"
- "str d10, [x9], #0x8\n"
- "str d14, [x25], #0x8\n"
+ "str d10, [x28], #0x8\n"
+ "str d14, [x24], #0x8\n"
"tbz x11, #0, 68f\n"
- "st1 { v10.s }[2], [x9]\n"
- "st1 { v14.s }[2], [x25]\n"
+ "st1 { v10.s }[2], [x28]\n"
+ "st1 { v14.s }[2], [x24]\n"
"b 68f\n"
"63:" // Height 2: Partial direct writeback: partial_1_8
"tbz x11, #0, 68f\n"
- "str s10, [x9, #0x0]\n"
- "str s14, [x25, #0x0]\n"
+ "str s10, [x28, #0x0]\n"
+ "str s14, [x24, #0x0]\n"
"b 68f\n"
"64:" // Height 2: Partial direct writeback: partial_4_0
"tbz x11, #2, 66f\n"
- "st1 { v8.4s }, [x9], #0x10\n"
- "st1 { v12.4s }, [x25], #0x10\n"
+ "st1 { v8.4s }, [x28], #0x10\n"
+ "st1 { v12.4s }, [x24], #0x10\n"
"tbz x11, #1, 65f\n"
- "str d9, [x9], #0x8\n"
- "str d13, [x25], #0x8\n"
+ "str d9, [x28], #0x8\n"
+ "str d13, [x24], #0x8\n"
"tbz x11, #0, 68f\n"
- "st1 { v9.s }[2], [x9]\n"
- "st1 { v13.s }[2], [x25]\n"
+ "st1 { v9.s }[2], [x28]\n"
+ "st1 { v13.s }[2], [x24]\n"
"b 68f\n"
"65:" // Height 2: Partial direct writeback: partial_1_4
"tbz x11, #0, 68f\n"
- "str s9, [x9, #0x0]\n"
- "str s13, [x25, #0x0]\n"
+ "str s9, [x28, #0x0]\n"
+ "str s13, [x24, #0x0]\n"
"b 68f\n"
"66:" // Height 2: Partial direct writeback: partial_2_0
"tbz x11, #1, 67f\n"
- "str d8, [x9], #0x8\n"
- "str d12, [x25], #0x8\n"
+ "str d8, [x28], #0x8\n"
+ "str d12, [x24], #0x8\n"
"tbz x11, #0, 68f\n"
- "st1 { v8.s }[2], [x9]\n"
- "st1 { v12.s }[2], [x25]\n"
+ "st1 { v8.s }[2], [x28]\n"
+ "st1 { v12.s }[2], [x24]\n"
"b 68f\n"
"67:" // Height 2: Partial direct writeback: partial_1_0
- "str s8, [x9, #0x0]\n"
- "str s12, [x25, #0x0]\n"
+ "str s8, [x28, #0x0]\n"
+ "str s12, [x24, #0x0]\n"
"68:" // Height 2: Partial direct writeback: Done
"b 70f\n"
"69:" // Height 2: Full writeback
- "str q8, [x9, #0x0]\n"
- "str q9, [x9, #0x10]\n"
- "str q10, [x9, #0x20]\n"
- "str q11, [x9, #0x30]\n"
- "add x9, x9, #0x40\n"
- "str q12, [x25, #0x0]\n"
- "str q13, [x25, #0x10]\n"
- "str q14, [x25, #0x20]\n"
- "str q15, [x25, #0x30]\n"
+ "str q8, [x28, #0x0]\n"
+ "str q9, [x28, #0x10]\n"
+ "str q10, [x28, #0x20]\n"
+ "str q11, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
+ "str q12, [x24, #0x0]\n"
+ "str q13, [x24, #0x10]\n"
+ "str q14, [x24, #0x20]\n"
+ "str q15, [x24, #0x30]\n"
"70:" // Height 2: Writeback done
"subs x11, x11, #0x10\n"
"bgt 37b\n"
"b 212f\n"
"71:" // Height 3
- "mov x12, %x[bias]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x9, %x[bias]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "mov x28, %x[output_ptr]\n"
"72:" // Height 3: Column loop
- "cbz x12, 73f\n"
- "ldr q8, [x12, #0x0]\n"
- "ldr q9, [x12, #0x10]\n"
+ "cbz x9, 73f\n"
+ "ldr q8, [x9, #0x0]\n"
"mov v12.16b, v8.16b\n"
+ "ldr q9, [x9, #0x10]\n"
+ "mov v16.16b, v8.16b\n"
+ "ldr q10, [x9, #0x20]\n"
+ "ldr q11, [x9, #0x30]\n"
"mov v13.16b, v9.16b\n"
- "ldr q10, [x12, #0x20]\n"
- "ldr q11, [x12, #0x30]\n"
+ "add x9, x9, #0x40\n"
+ "mov v17.16b, v9.16b\n"
"mov v14.16b, v10.16b\n"
"mov v15.16b, v11.16b\n"
- "mov v16.16b, v8.16b\n"
- "mov v17.16b, v9.16b\n"
- "add x12, x12, #0x40\n"
"mov v18.16b, v10.16b\n"
"mov v19.16b, v11.16b\n"
"b 84f\n"
"73:" // Height 3: no bias
"tbz %x[flags], #0, 83f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"cmp x11, #0x10\n"
- "add x24, x25, x20, LSL #2\n"
+ "add x24, x28, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
"bge 82f\n"
"tbz x11, #3, 77f\n"
- "ld1 { v8.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v16.4s }, [x24], #0x10\n"
- "ld1 { v9.4s }, [x9], #0x10\n"
- "ld1 { v13.4s }, [x25], #0x10\n"
- "ld1 { v17.4s }, [x24], #0x10\n"
+ "ld1 { v8.4s }, [x28], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
+ "ld1 { v16.4s }, [x23], #0x10\n"
+ "ld1 { v9.4s }, [x28], #0x10\n"
+ "ld1 { v13.4s }, [x24], #0x10\n"
+ "ld1 { v17.4s }, [x23], #0x10\n"
"tbz x11, #2, 75f\n"
- "ld1 { v10.4s }, [x9], #0x10\n"
- "ld1 { v14.4s }, [x25], #0x10\n"
- "ld1 { v18.4s }, [x24], #0x10\n"
+ "ld1 { v10.4s }, [x28], #0x10\n"
+ "ld1 { v14.4s }, [x24], #0x10\n"
+ "ld1 { v18.4s }, [x23], #0x10\n"
"tbz x11, #1, 74f\n"
- "ldr d11, [x9], #0x8\n"
- "ldr d15, [x25], #0x8\n"
- "mov x20, #0x38\n"
- "ldr d19, [x24], #0x8\n"
+ "mov x19, #0x38\n"
+ "ldr d11, [x28], #0x8\n"
+ "ldr d15, [x24], #0x8\n"
+ "ldr d19, [x23], #0x8\n"
"tbz x11, #0, 81f\n"
- "ld1 { v11.s }[2], [x9]\n"
- "ld1 { v15.s }[2], [x25]\n"
- "ld1 { v19.s }[2], [x24]\n"
+ "ld1 { v11.s }[2], [x28]\n"
+ "ld1 { v15.s }[2], [x24]\n"
+ "ld1 { v19.s }[2], [x23]\n"
"b 81f\n"
"74:" // Height 3: Partial accumulate: partial_1_12
- "mov x20, #0x30\n"
+ "mov x19, #0x30\n"
"tbz x11, #0, 81f\n"
- "ldr s11, [x9, #0x0]\n"
- "ldr s15, [x25, #0x0]\n"
- "ldr s19, [x24, #0x0]\n"
+ "ldr s11, [x28, #0x0]\n"
+ "ldr s15, [x24, #0x0]\n"
+ "ldr s19, [x23, #0x0]\n"
"b 81f\n"
"75:" // Height 3: Partial accumulate: partial_2_8
"tbz x11, #1, 76f\n"
- "ldr d10, [x9], #0x8\n"
- "ldr d14, [x25], #0x8\n"
- "mov x20, #0x28\n"
- "ldr d18, [x24], #0x8\n"
+ "ldr d10, [x28], #0x8\n"
+ "ldr d14, [x24], #0x8\n"
+ "mov x19, #0x28\n"
+ "ldr d18, [x23], #0x8\n"
"tbz x11, #0, 81f\n"
- "ld1 { v10.s }[2], [x9]\n"
- "ld1 { v14.s }[2], [x25]\n"
- "ld1 { v18.s }[2], [x24]\n"
+ "ld1 { v10.s }[2], [x28]\n"
+ "ld1 { v14.s }[2], [x24]\n"
+ "ld1 { v18.s }[2], [x23]\n"
"b 81f\n"
"76:" // Height 3: Partial accumulate: partial_1_8
- "mov x20, #0x20\n"
+ "mov x19, #0x20\n"
"tbz x11, #0, 81f\n"
- "ldr s10, [x9, #0x0]\n"
- "ldr s14, [x25, #0x0]\n"
- "ldr s18, [x24, #0x0]\n"
+ "ldr s10, [x28, #0x0]\n"
+ "ldr s14, [x24, #0x0]\n"
+ "ldr s18, [x23, #0x0]\n"
"b 81f\n"
"77:" // Height 3: Partial accumulate: partial_4_0
"tbz x11, #2, 79f\n"
- "ld1 { v8.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v16.4s }, [x24], #0x10\n"
+ "ld1 { v8.4s }, [x28], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
+ "ld1 { v16.4s }, [x23], #0x10\n"
"tbz x11, #1, 78f\n"
- "ldr d9, [x9], #0x8\n"
- "ldr d13, [x25], #0x8\n"
- "mov x20, #0x18\n"
- "ldr d17, [x24], #0x8\n"
+ "mov x19, #0x18\n"
+ "ldr d9, [x28], #0x8\n"
+ "ldr d13, [x24], #0x8\n"
+ "ldr d17, [x23], #0x8\n"
"tbz x11, #0, 81f\n"
- "ld1 { v9.s }[2], [x9]\n"
- "ld1 { v13.s }[2], [x25]\n"
- "ld1 { v17.s }[2], [x24]\n"
+ "ld1 { v9.s }[2], [x28]\n"
+ "ld1 { v13.s }[2], [x24]\n"
+ "ld1 { v17.s }[2], [x23]\n"
"b 81f\n"
"78:" // Height 3: Partial accumulate: partial_1_4
- "mov x20, #0x10\n"
+ "mov x19, #0x10\n"
"tbz x11, #0, 81f\n"
- "ldr s9, [x9, #0x0]\n"
- "ldr s13, [x25, #0x0]\n"
- "ldr s17, [x24, #0x0]\n"
+ "ldr s9, [x28, #0x0]\n"
+ "ldr s13, [x24, #0x0]\n"
+ "ldr s17, [x23, #0x0]\n"
"b 81f\n"
"79:" // Height 3: Partial accumulate: partial_2_0
"tbz x11, #1, 80f\n"
- "ldr d8, [x9], #0x8\n"
- "ldr d12, [x25], #0x8\n"
- "mov x20, #0x8\n"
- "ldr d16, [x24], #0x8\n"
+ "ldr d8, [x28], #0x8\n"
+ "ldr d12, [x24], #0x8\n"
+ "mov x19, #0x8\n"
+ "ldr d16, [x23], #0x8\n"
"tbz x11, #0, 81f\n"
- "ld1 { v8.s }[2], [x9]\n"
- "ld1 { v12.s }[2], [x25]\n"
- "ld1 { v16.s }[2], [x24]\n"
+ "ld1 { v8.s }[2], [x28]\n"
+ "ld1 { v12.s }[2], [x24]\n"
+ "ld1 { v16.s }[2], [x23]\n"
"b 81f\n"
"80:" // Height 3: Partial accumulate: partial_1_0
- "ldr s8, [x9, #0x0]\n"
- "ldr s12, [x25, #0x0]\n"
- "mov x20, #0x0\n"
- "ldr s16, [x24, #0x0]\n"
+ "ldr s8, [x28, #0x0]\n"
+ "mov x19, #0x0\n"
+ "ldr s12, [x24, #0x0]\n"
+ "ldr s16, [x23, #0x0]\n"
"81:" // Height 3: Partial accumulate: Done
- "sub x9, x9, x20\n"
+ "sub x28, x28, x19\n"
"b 84f\n"
"82:" // Height 3: full accumulate
- "ldr q8, [x9, #0x0]\n"
- "ldr q9, [x9, #0x10]\n"
- "ldr q10, [x9, #0x20]\n"
- "ldr q11, [x9, #0x30]\n"
- "ldr q12, [x25, #0x0]\n"
- "ldr q13, [x25, #0x10]\n"
- "ldr q14, [x25, #0x20]\n"
- "ldr q15, [x25, #0x30]\n"
- "ldr q16, [x24, #0x0]\n"
- "ldr q17, [x24, #0x10]\n"
- "ldr q18, [x24, #0x20]\n"
- "ldr q19, [x24, #0x30]\n"
+ "ldr q8, [x28, #0x0]\n"
+ "ldr q9, [x28, #0x10]\n"
+ "ldr q10, [x28, #0x20]\n"
+ "ldr q11, [x28, #0x30]\n"
+ "ldr q12, [x24, #0x0]\n"
+ "ldr q13, [x24, #0x10]\n"
+ "ldr q14, [x24, #0x20]\n"
+ "ldr q15, [x24, #0x30]\n"
+ "ldr q16, [x23, #0x0]\n"
+ "ldr q17, [x23, #0x10]\n"
+ "ldr q18, [x23, #0x20]\n"
+ "ldr q19, [x23, #0x30]\n"
"b 84f\n"
"83:" // Height 3: no accumulate
"movi v8.16b, #0x0\n"
@@ -939,62 +939,62 @@ void a64_hybrid_bf16fp32_dot_6x16 (
"movi v18.16b, #0x0\n"
"movi v19.16b, #0x0\n"
"84:" // Height 3: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"85:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 86f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "cbnz x28, 87f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #1\n"
- "add x25, x25, x20, LSL #1\n"
- "add x24, x24, x20, LSL #1\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "cbnz x27, 87f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
+ "add x24, x24, x19, LSL #1\n"
+ "add x23, x23, x19, LSL #1\n"
"b 87f\n"
"86:" // Height 3: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
"87:" // Height 3: input setup done
- "cmp x27, #0x8\n"
+ "cmp x26, #0x8\n"
"blt 90f\n"
- "ldr q0, [x26, #0x0]\n"
- "ldr q1, [x25, #0x0]\n"
- "cmp x27, #0x10\n"
- "ldr q2, [x24, #0x0]\n"
+ "ldr q0, [x25, #0x0]\n"
+ "ldr q1, [x24, #0x0]\n"
+ "cmp x26, #0x10\n"
+ "ldr q2, [x23, #0x0]\n"
"ldr q6, [x10, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
"blt 89f\n"
"88:" // Height 3: Multiply loop: Main loop head
".inst 0x4f40f0c8 // bfdot v8.4s, v6.8h, v0.h[0]\n"
+ "ldr q7, [x10, #0x10]\n"
+ "add x25, x25, #0x10\n"
".inst 0x4f41f0cc // bfdot v12.4s, v6.8h, v1.h[0]\n"
- "sub x27, x27, #0x8\n"
- "add x26, x26, #0x10\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "add x24, x24, #0x10\n"
".inst 0x4f42f0d0 // bfdot v16.4s, v6.8h, v2.h[0]\n"
"ldr q6, [x10, #0x20]\n"
+ "add x23, x23, #0x10\n"
".inst 0x4f40f0e9 // bfdot v9.4s, v7.8h, v0.h[0]\n"
- "add x25, x25, #0x10\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "sub x26, x26, #0x8\n"
".inst 0x4f41f0ed // bfdot v13.4s, v7.8h, v1.h[0]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
+ "cmp x26, #0x10\n"
".inst 0x4f42f0f1 // bfdot v17.4s, v7.8h, v2.h[0]\n"
"ldr q7, [x10, #0x30]\n"
- "add x24, x24, #0x10\n"
".inst 0x4f40f0ca // bfdot v10.4s, v6.8h, v0.h[0]\n"
".inst 0x4f41f0ce // bfdot v14.4s, v6.8h, v1.h[0]\n"
- "cmp x27, #0x10\n"
- "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x4f42f0d2 // bfdot v18.4s, v6.8h, v2.h[0]\n"
"ldr q6, [x10, #0x40]\n"
".inst 0x4f40f0eb // bfdot v11.4s, v7.8h, v0.h[0]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x4f41f0ef // bfdot v15.4s, v7.8h, v1.h[0]\n"
".inst 0x4f42f0f3 // bfdot v19.4s, v7.8h, v2.h[0]\n"
"ldr q7, [x10, #0x50]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x4f60f0c8 // bfdot v8.4s, v6.8h, v0.h[1]\n"
".inst 0x4f61f0cc // bfdot v12.4s, v6.8h, v1.h[1]\n"
".inst 0x4f62f0d0 // bfdot v16.4s, v6.8h, v2.h[1]\n"
@@ -1041,34 +1041,34 @@ void a64_hybrid_bf16fp32_dot_6x16 (
".inst 0x4f62f8d2 // bfdot v18.4s, v6.8h, v2.h[3]\n"
"ldr q6, [x10, #0x0]\n"
".inst 0x4f60f8eb // bfdot v11.4s, v7.8h, v0.h[3]\n"
- "ldr q0, [x26, #0x0]\n"
+ "ldr q0, [x25, #0x0]\n"
".inst 0x4f61f8ef // bfdot v15.4s, v7.8h, v1.h[3]\n"
- "ldr q1, [x25, #0x0]\n"
+ "ldr q1, [x24, #0x0]\n"
".inst 0x4f62f8f3 // bfdot v19.4s, v7.8h, v2.h[3]\n"
- "ldr q2, [x24, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
+ "ldr q2, [x23, #0x0]\n"
"bge 88b\n"
"89:" // Height 3: Multiply loop: Single iteration only
".inst 0x4f40f0c8 // bfdot v8.4s, v6.8h, v0.h[0]\n"
+ "ldr q7, [x10, #0x10]\n"
+ "sub x26, x26, #0x8\n"
".inst 0x4f41f0cc // bfdot v12.4s, v6.8h, v1.h[0]\n"
- "add x26, x26, #0x10\n"
"add x25, x25, #0x10\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x4f42f0d0 // bfdot v16.4s, v6.8h, v2.h[0]\n"
- "ldr q6, [x10, #0x20]\n"
- ".inst 0x4f40f0e9 // bfdot v9.4s, v7.8h, v0.h[0]\n"
"add x24, x24, #0x10\n"
+ ".inst 0x4f40f0e9 // bfdot v9.4s, v7.8h, v0.h[0]\n"
+ "ldr q6, [x10, #0x20]\n"
+ "add x23, x23, #0x10\n"
".inst 0x4f41f0ed // bfdot v13.4s, v7.8h, v1.h[0]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x4f42f0f1 // bfdot v17.4s, v7.8h, v2.h[0]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
"ldr q7, [x10, #0x30]\n"
- "sub x27, x27, #0x8\n"
".inst 0x4f40f0ca // bfdot v10.4s, v6.8h, v0.h[0]\n"
".inst 0x4f41f0ce // bfdot v14.4s, v6.8h, v1.h[0]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x4f42f0d2 // bfdot v18.4s, v6.8h, v2.h[0]\n"
"ldr q6, [x10, #0x40]\n"
".inst 0x4f40f0eb // bfdot v11.4s, v7.8h, v0.h[0]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x4f41f0ef // bfdot v15.4s, v7.8h, v1.h[0]\n"
".inst 0x4f42f0f3 // bfdot v19.4s, v7.8h, v2.h[0]\n"
"ldr q7, [x10, #0x50]\n"
@@ -1120,19 +1120,19 @@ void a64_hybrid_bf16fp32_dot_6x16 (
".inst 0x4f61f8ef // bfdot v15.4s, v7.8h, v1.h[3]\n"
".inst 0x4f62f8f3 // bfdot v19.4s, v7.8h, v2.h[3]\n"
"90:" // Height 3: Multiply loop: Main loop skip
- "cbz x27, 94f\n"
- "cmp x27, #0x2\n"
+ "cbz x26, 94f\n"
+ "cmp x26, #0x2\n"
"blt 92f\n"
"91:" // Height 3: Multiply loop: Odd block loop
- "ldr s0, [x26], #0x4\n"
- "ldr s1, [x25], #0x4\n"
- "sub x27, x27, #0x2\n"
- "cmp x27, #0x2\n"
- "ldr s2, [x24], #0x4\n"
+ "ldr s0, [x25], #0x4\n"
+ "sub x26, x26, #0x2\n"
+ "ldr s1, [x24], #0x4\n"
+ "cmp x26, #0x2\n"
+ "ldr s2, [x23], #0x4\n"
"ldr q6, [x10, #0x0]\n"
".inst 0x4f40f0c8 // bfdot v8.4s, v6.8h, v0.h[0]\n"
- ".inst 0x4f41f0cc // bfdot v12.4s, v6.8h, v1.h[0]\n"
"ldr q7, [x10, #0x10]\n"
+ ".inst 0x4f41f0cc // bfdot v12.4s, v6.8h, v1.h[0]\n"
".inst 0x4f42f0d0 // bfdot v16.4s, v6.8h, v2.h[0]\n"
"ldr q6, [x10, #0x20]\n"
".inst 0x4f40f0e9 // bfdot v9.4s, v7.8h, v0.h[0]\n"
@@ -1147,15 +1147,15 @@ void a64_hybrid_bf16fp32_dot_6x16 (
".inst 0x4f41f0ef // bfdot v15.4s, v7.8h, v1.h[0]\n"
".inst 0x4f42f0f3 // bfdot v19.4s, v7.8h, v2.h[0]\n"
"bge 91b\n"
+ "cbz x26, 94f\n"
"92:" // Height 3: Multiply loop: Skip odd blocks
- "cbz x27, 94f\n"
- "ldr h0, [x26, #0x0]\n"
- "ldr h1, [x25, #0x0]\n"
- "ldr h2, [x24, #0x0]\n"
+ "ldr h0, [x25, #0x0]\n"
+ "ldr h1, [x24, #0x0]\n"
+ "ldr h2, [x23, #0x0]\n"
"93:" // Height 3: Multiply loop: Ragged operand read: Done
"ldr q6, [x10, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
".inst 0x4f40f0c8 // bfdot v8.4s, v6.8h, v0.h[0]\n"
+ "ldr q7, [x10, #0x10]\n"
".inst 0x4f41f0cc // bfdot v12.4s, v6.8h, v1.h[0]\n"
".inst 0x4f42f0d0 // bfdot v16.4s, v6.8h, v2.h[0]\n"
"ldr q6, [x10, #0x20]\n"
@@ -1171,297 +1171,297 @@ void a64_hybrid_bf16fp32_dot_6x16 (
".inst 0x4f41f0ef // bfdot v15.4s, v7.8h, v1.h[0]\n"
".inst 0x4f42f0f3 // bfdot v19.4s, v7.8h, v2.h[0]\n"
"94:" // Height 3: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 85b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "prfm pstl1keep, [x9, #0x0]\n"
- "prfm pstl1keep, [x25, #0x0]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "prfm pstl1keep, [x28, #0x0]\n"
+ "add x24, x28, x19, LSL #2\n"
"prfm pstl1keep, [x24, #0x0]\n"
+ "add x23, x24, x19, LSL #2\n"
+ "prfm pstl1keep, [x23, #0x0]\n"
"tbz %x[flags], #1, 95f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v1.4s }, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1r { v0.4s }, [x20]\n"
- "fmin v8.4s, v8.4s, v1.4s\n"
- "fmin v9.4s, v9.4s, v1.4s\n"
- "fmin v10.4s, v10.4s, v1.4s\n"
- "fmin v11.4s, v11.4s, v1.4s\n"
- "fmin v12.4s, v12.4s, v1.4s\n"
- "fmin v13.4s, v13.4s, v1.4s\n"
- "fmin v14.4s, v14.4s, v1.4s\n"
- "fmin v15.4s, v15.4s, v1.4s\n"
- "fmin v16.4s, v16.4s, v1.4s\n"
- "fmin v17.4s, v17.4s, v1.4s\n"
- "fmin v18.4s, v18.4s, v1.4s\n"
- "fmin v19.4s, v19.4s, v1.4s\n"
- "fmax v8.4s, v8.4s, v0.4s\n"
- "fmax v9.4s, v9.4s, v0.4s\n"
- "fmax v10.4s, v10.4s, v0.4s\n"
- "fmax v11.4s, v11.4s, v0.4s\n"
- "fmax v12.4s, v12.4s, v0.4s\n"
- "fmax v13.4s, v13.4s, v0.4s\n"
- "fmax v14.4s, v14.4s, v0.4s\n"
- "fmax v15.4s, v15.4s, v0.4s\n"
- "fmax v16.4s, v16.4s, v0.4s\n"
- "fmax v17.4s, v17.4s, v0.4s\n"
- "fmax v18.4s, v18.4s, v0.4s\n"
- "fmax v19.4s, v19.4s, v0.4s\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v1.4s }, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v0.4s }, [x19]\n"
+ "fmin v8.4s, v8.4s, v0.4s\n"
+ "fmin v9.4s, v9.4s, v0.4s\n"
+ "fmin v10.4s, v10.4s, v0.4s\n"
+ "fmin v11.4s, v11.4s, v0.4s\n"
+ "fmax v8.4s, v8.4s, v1.4s\n"
+ "fmax v9.4s, v9.4s, v1.4s\n"
+ "fmax v10.4s, v10.4s, v1.4s\n"
+ "fmax v11.4s, v11.4s, v1.4s\n"
+ "fmin v12.4s, v12.4s, v0.4s\n"
+ "fmin v13.4s, v13.4s, v0.4s\n"
+ "fmin v14.4s, v14.4s, v0.4s\n"
+ "fmax v12.4s, v12.4s, v1.4s\n"
+ "fmax v13.4s, v13.4s, v1.4s\n"
+ "fmax v14.4s, v14.4s, v1.4s\n"
+ "fmin v15.4s, v15.4s, v0.4s\n"
+ "fmin v16.4s, v16.4s, v0.4s\n"
+ "fmin v17.4s, v17.4s, v0.4s\n"
+ "fmax v15.4s, v15.4s, v1.4s\n"
+ "fmax v16.4s, v16.4s, v1.4s\n"
+ "fmax v17.4s, v17.4s, v1.4s\n"
+ "fmin v18.4s, v18.4s, v0.4s\n"
+ "fmin v19.4s, v19.4s, v0.4s\n"
+ "fmax v18.4s, v18.4s, v1.4s\n"
+ "fmax v19.4s, v19.4s, v1.4s\n"
"95:" // Height 3: No activation
"cmp x11, #0x10\n"
"bge 104f\n"
"tbz x11, #3, 99f\n"
- "st1 { v8.4s }, [x9], #0x10\n"
- "st1 { v9.4s }, [x9], #0x10\n"
- "st1 { v12.4s }, [x25], #0x10\n"
- "st1 { v13.4s }, [x25], #0x10\n"
- "st1 { v16.4s }, [x24], #0x10\n"
- "st1 { v17.4s }, [x24], #0x10\n"
+ "st1 { v8.4s }, [x28], #0x10\n"
+ "st1 { v9.4s }, [x28], #0x10\n"
+ "st1 { v12.4s }, [x24], #0x10\n"
+ "st1 { v13.4s }, [x24], #0x10\n"
+ "st1 { v16.4s }, [x23], #0x10\n"
+ "st1 { v17.4s }, [x23], #0x10\n"
"tbz x11, #2, 97f\n"
- "st1 { v10.4s }, [x9], #0x10\n"
- "st1 { v14.4s }, [x25], #0x10\n"
- "st1 { v18.4s }, [x24], #0x10\n"
+ "st1 { v10.4s }, [x28], #0x10\n"
+ "st1 { v14.4s }, [x24], #0x10\n"
+ "st1 { v18.4s }, [x23], #0x10\n"
"tbz x11, #1, 96f\n"
- "str d11, [x9], #0x8\n"
- "str d15, [x25], #0x8\n"
- "str d19, [x24], #0x8\n"
+ "str d11, [x28], #0x8\n"
+ "str d15, [x24], #0x8\n"
+ "str d19, [x23], #0x8\n"
"tbz x11, #0, 103f\n"
- "st1 { v11.s }[2], [x9]\n"
- "st1 { v15.s }[2], [x25]\n"
- "st1 { v19.s }[2], [x24]\n"
+ "st1 { v11.s }[2], [x28]\n"
+ "st1 { v15.s }[2], [x24]\n"
+ "st1 { v19.s }[2], [x23]\n"
"b 103f\n"
"96:" // Height 3: Partial direct writeback: partial_1_12
"tbz x11, #0, 103f\n"
- "str s11, [x9, #0x0]\n"
- "str s15, [x25, #0x0]\n"
- "str s19, [x24, #0x0]\n"
+ "str s11, [x28, #0x0]\n"
+ "str s15, [x24, #0x0]\n"
+ "str s19, [x23, #0x0]\n"
"b 103f\n"
"97:" // Height 3: Partial direct writeback: partial_2_8
"tbz x11, #1, 98f\n"
- "str d10, [x9], #0x8\n"
- "str d14, [x25], #0x8\n"
- "str d18, [x24], #0x8\n"
+ "str d10, [x28], #0x8\n"
+ "str d14, [x24], #0x8\n"
+ "str d18, [x23], #0x8\n"
"tbz x11, #0, 103f\n"
- "st1 { v10.s }[2], [x9]\n"
- "st1 { v14.s }[2], [x25]\n"
- "st1 { v18.s }[2], [x24]\n"
+ "st1 { v10.s }[2], [x28]\n"
+ "st1 { v14.s }[2], [x24]\n"
+ "st1 { v18.s }[2], [x23]\n"
"b 103f\n"
"98:" // Height 3: Partial direct writeback: partial_1_8
"tbz x11, #0, 103f\n"
- "str s10, [x9, #0x0]\n"
- "str s14, [x25, #0x0]\n"
- "str s18, [x24, #0x0]\n"
+ "str s10, [x28, #0x0]\n"
+ "str s14, [x24, #0x0]\n"
+ "str s18, [x23, #0x0]\n"
"b 103f\n"
"99:" // Height 3: Partial direct writeback: partial_4_0
"tbz x11, #2, 101f\n"
- "st1 { v8.4s }, [x9], #0x10\n"
- "st1 { v12.4s }, [x25], #0x10\n"
- "st1 { v16.4s }, [x24], #0x10\n"
+ "st1 { v8.4s }, [x28], #0x10\n"
+ "st1 { v12.4s }, [x24], #0x10\n"
+ "st1 { v16.4s }, [x23], #0x10\n"
"tbz x11, #1, 100f\n"
- "str d9, [x9], #0x8\n"
- "str d13, [x25], #0x8\n"
- "str d17, [x24], #0x8\n"
+ "str d9, [x28], #0x8\n"
+ "str d13, [x24], #0x8\n"
+ "str d17, [x23], #0x8\n"
"tbz x11, #0, 103f\n"
- "st1 { v9.s }[2], [x9]\n"
- "st1 { v13.s }[2], [x25]\n"
- "st1 { v17.s }[2], [x24]\n"
+ "st1 { v9.s }[2], [x28]\n"
+ "st1 { v13.s }[2], [x24]\n"
+ "st1 { v17.s }[2], [x23]\n"
"b 103f\n"
"100:" // Height 3: Partial direct writeback: partial_1_4
"tbz x11, #0, 103f\n"
- "str s9, [x9, #0x0]\n"
- "str s13, [x25, #0x0]\n"
- "str s17, [x24, #0x0]\n"
+ "str s9, [x28, #0x0]\n"
+ "str s13, [x24, #0x0]\n"
+ "str s17, [x23, #0x0]\n"
"b 103f\n"
"101:" // Height 3: Partial direct writeback: partial_2_0
"tbz x11, #1, 102f\n"
- "str d8, [x9], #0x8\n"
- "str d12, [x25], #0x8\n"
- "str d16, [x24], #0x8\n"
+ "str d8, [x28], #0x8\n"
+ "str d12, [x24], #0x8\n"
+ "str d16, [x23], #0x8\n"
"tbz x11, #0, 103f\n"
- "st1 { v8.s }[2], [x9]\n"
- "st1 { v12.s }[2], [x25]\n"
- "st1 { v16.s }[2], [x24]\n"
+ "st1 { v8.s }[2], [x28]\n"
+ "st1 { v12.s }[2], [x24]\n"
+ "st1 { v16.s }[2], [x23]\n"
"b 103f\n"
"102:" // Height 3: Partial direct writeback: partial_1_0
- "str s8, [x9, #0x0]\n"
- "str s12, [x25, #0x0]\n"
- "str s16, [x24, #0x0]\n"
+ "str s8, [x28, #0x0]\n"
+ "str s12, [x24, #0x0]\n"
+ "str s16, [x23, #0x0]\n"
"103:" // Height 3: Partial direct writeback: Done
"b 105f\n"
"104:" // Height 3: Full writeback
- "str q8, [x9, #0x0]\n"
- "str q9, [x9, #0x10]\n"
- "str q10, [x9, #0x20]\n"
- "str q11, [x9, #0x30]\n"
- "add x9, x9, #0x40\n"
- "str q12, [x25, #0x0]\n"
- "str q13, [x25, #0x10]\n"
- "str q14, [x25, #0x20]\n"
- "str q15, [x25, #0x30]\n"
- "str q16, [x24, #0x0]\n"
- "str q17, [x24, #0x10]\n"
- "str q18, [x24, #0x20]\n"
- "str q19, [x24, #0x30]\n"
+ "str q8, [x28, #0x0]\n"
+ "str q9, [x28, #0x10]\n"
+ "str q10, [x28, #0x20]\n"
+ "str q11, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
+ "str q12, [x24, #0x0]\n"
+ "str q13, [x24, #0x10]\n"
+ "str q14, [x24, #0x20]\n"
+ "str q15, [x24, #0x30]\n"
+ "str q16, [x23, #0x0]\n"
+ "str q17, [x23, #0x10]\n"
+ "str q18, [x23, #0x20]\n"
+ "str q19, [x23, #0x30]\n"
"105:" // Height 3: Writeback done
"subs x11, x11, #0x10\n"
"bgt 72b\n"
"b 212f\n"
"106:" // Height 4
- "mov x12, %x[bias]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x9, %x[bias]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "mov x28, %x[output_ptr]\n"
"107:" // Height 4: Column loop
- "cbz x12, 108f\n"
- "ldr q8, [x12, #0x0]\n"
- "ldr q9, [x12, #0x10]\n"
+ "cbz x9, 108f\n"
+ "ldr q8, [x9, #0x0]\n"
"mov v12.16b, v8.16b\n"
+ "ldr q9, [x9, #0x10]\n"
+ "mov v16.16b, v8.16b\n"
+ "ldr q10, [x9, #0x20]\n"
+ "mov v20.16b, v8.16b\n"
+ "ldr q11, [x9, #0x30]\n"
+ "add x9, x9, #0x40\n"
"mov v13.16b, v9.16b\n"
- "ldr q10, [x12, #0x20]\n"
- "ldr q11, [x12, #0x30]\n"
+ "mov v17.16b, v9.16b\n"
"mov v14.16b, v10.16b\n"
"mov v15.16b, v11.16b\n"
- "mov v16.16b, v8.16b\n"
- "mov v17.16b, v9.16b\n"
- "add x12, x12, #0x40\n"
"mov v18.16b, v10.16b\n"
"mov v19.16b, v11.16b\n"
- "mov v20.16b, v8.16b\n"
"mov v21.16b, v9.16b\n"
"mov v22.16b, v10.16b\n"
"mov v23.16b, v11.16b\n"
"b 119f\n"
"108:" // Height 4: no bias
"tbz %x[flags], #0, 118f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"cmp x11, #0x10\n"
- "add x23, x24, x20, LSL #2\n"
+ "add x24, x28, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
"bge 117f\n"
"tbz x11, #3, 112f\n"
- "ld1 { v8.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v16.4s }, [x24], #0x10\n"
- "ld1 { v20.4s }, [x23], #0x10\n"
- "ld1 { v9.4s }, [x9], #0x10\n"
- "ld1 { v13.4s }, [x25], #0x10\n"
- "ld1 { v17.4s }, [x24], #0x10\n"
- "ld1 { v21.4s }, [x23], #0x10\n"
+ "ld1 { v8.4s }, [x28], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
+ "ld1 { v16.4s }, [x23], #0x10\n"
+ "ld1 { v20.4s }, [x22], #0x10\n"
+ "ld1 { v9.4s }, [x28], #0x10\n"
+ "ld1 { v13.4s }, [x24], #0x10\n"
+ "ld1 { v17.4s }, [x23], #0x10\n"
+ "ld1 { v21.4s }, [x22], #0x10\n"
"tbz x11, #2, 110f\n"
- "ld1 { v10.4s }, [x9], #0x10\n"
- "ld1 { v14.4s }, [x25], #0x10\n"
- "ld1 { v18.4s }, [x24], #0x10\n"
- "ld1 { v22.4s }, [x23], #0x10\n"
+ "ld1 { v10.4s }, [x28], #0x10\n"
+ "ld1 { v14.4s }, [x24], #0x10\n"
+ "ld1 { v18.4s }, [x23], #0x10\n"
+ "ld1 { v22.4s }, [x22], #0x10\n"
"tbz x11, #1, 109f\n"
- "ldr d11, [x9], #0x8\n"
- "ldr d15, [x25], #0x8\n"
- "mov x20, #0x38\n"
- "ldr d19, [x24], #0x8\n"
- "ldr d23, [x23], #0x8\n"
+ "mov x19, #0x38\n"
+ "ldr d11, [x28], #0x8\n"
+ "ldr d15, [x24], #0x8\n"
+ "ldr d19, [x23], #0x8\n"
+ "ldr d23, [x22], #0x8\n"
"tbz x11, #0, 116f\n"
- "ld1 { v11.s }[2], [x9]\n"
- "ld1 { v15.s }[2], [x25]\n"
- "ld1 { v19.s }[2], [x24]\n"
- "ld1 { v23.s }[2], [x23]\n"
+ "ld1 { v11.s }[2], [x28]\n"
+ "ld1 { v15.s }[2], [x24]\n"
+ "ld1 { v19.s }[2], [x23]\n"
+ "ld1 { v23.s }[2], [x22]\n"
"b 116f\n"
"109:" // Height 4: Partial accumulate: partial_1_12
- "mov x20, #0x30\n"
+ "mov x19, #0x30\n"
"tbz x11, #0, 116f\n"
- "ldr s11, [x9, #0x0]\n"
- "ldr s15, [x25, #0x0]\n"
- "ldr s19, [x24, #0x0]\n"
- "ldr s23, [x23, #0x0]\n"
+ "ldr s11, [x28, #0x0]\n"
+ "ldr s15, [x24, #0x0]\n"
+ "ldr s19, [x23, #0x0]\n"
+ "ldr s23, [x22, #0x0]\n"
"b 116f\n"
"110:" // Height 4: Partial accumulate: partial_2_8
"tbz x11, #1, 111f\n"
- "ldr d10, [x9], #0x8\n"
- "ldr d14, [x25], #0x8\n"
- "mov x20, #0x28\n"
- "ldr d18, [x24], #0x8\n"
- "ldr d22, [x23], #0x8\n"
+ "ldr d10, [x28], #0x8\n"
+ "ldr d14, [x24], #0x8\n"
+ "mov x19, #0x28\n"
+ "ldr d18, [x23], #0x8\n"
+ "ldr d22, [x22], #0x8\n"
"tbz x11, #0, 116f\n"
- "ld1 { v10.s }[2], [x9]\n"
- "ld1 { v14.s }[2], [x25]\n"
- "ld1 { v18.s }[2], [x24]\n"
- "ld1 { v22.s }[2], [x23]\n"
+ "ld1 { v10.s }[2], [x28]\n"
+ "ld1 { v14.s }[2], [x24]\n"
+ "ld1 { v18.s }[2], [x23]\n"
+ "ld1 { v22.s }[2], [x22]\n"
"b 116f\n"
"111:" // Height 4: Partial accumulate: partial_1_8
- "mov x20, #0x20\n"
+ "mov x19, #0x20\n"
"tbz x11, #0, 116f\n"
- "ldr s10, [x9, #0x0]\n"
- "ldr s14, [x25, #0x0]\n"
- "ldr s18, [x24, #0x0]\n"
- "ldr s22, [x23, #0x0]\n"
+ "ldr s10, [x28, #0x0]\n"
+ "ldr s14, [x24, #0x0]\n"
+ "ldr s18, [x23, #0x0]\n"
+ "ldr s22, [x22, #0x0]\n"
"b 116f\n"
"112:" // Height 4: Partial accumulate: partial_4_0
"tbz x11, #2, 114f\n"
- "ld1 { v8.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v16.4s }, [x24], #0x10\n"
- "ld1 { v20.4s }, [x23], #0x10\n"
+ "ld1 { v8.4s }, [x28], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
+ "ld1 { v16.4s }, [x23], #0x10\n"
+ "ld1 { v20.4s }, [x22], #0x10\n"
"tbz x11, #1, 113f\n"
- "ldr d9, [x9], #0x8\n"
- "ldr d13, [x25], #0x8\n"
- "mov x20, #0x18\n"
- "ldr d17, [x24], #0x8\n"
- "ldr d21, [x23], #0x8\n"
+ "mov x19, #0x18\n"
+ "ldr d9, [x28], #0x8\n"
+ "ldr d13, [x24], #0x8\n"
+ "ldr d17, [x23], #0x8\n"
+ "ldr d21, [x22], #0x8\n"
"tbz x11, #0, 116f\n"
- "ld1 { v9.s }[2], [x9]\n"
- "ld1 { v13.s }[2], [x25]\n"
- "ld1 { v17.s }[2], [x24]\n"
- "ld1 { v21.s }[2], [x23]\n"
+ "ld1 { v9.s }[2], [x28]\n"
+ "ld1 { v13.s }[2], [x24]\n"
+ "ld1 { v17.s }[2], [x23]\n"
+ "ld1 { v21.s }[2], [x22]\n"
"b 116f\n"
"113:" // Height 4: Partial accumulate: partial_1_4
- "mov x20, #0x10\n"
+ "mov x19, #0x10\n"
"tbz x11, #0, 116f\n"
- "ldr s9, [x9, #0x0]\n"
- "ldr s13, [x25, #0x0]\n"
- "ldr s17, [x24, #0x0]\n"
- "ldr s21, [x23, #0x0]\n"
+ "ldr s9, [x28, #0x0]\n"
+ "ldr s13, [x24, #0x0]\n"
+ "ldr s17, [x23, #0x0]\n"
+ "ldr s21, [x22, #0x0]\n"
"b 116f\n"
"114:" // Height 4: Partial accumulate: partial_2_0
"tbz x11, #1, 115f\n"
- "ldr d8, [x9], #0x8\n"
- "ldr d12, [x25], #0x8\n"
- "mov x20, #0x8\n"
- "ldr d16, [x24], #0x8\n"
- "ldr d20, [x23], #0x8\n"
+ "ldr d8, [x28], #0x8\n"
+ "ldr d12, [x24], #0x8\n"
+ "mov x19, #0x8\n"
+ "ldr d16, [x23], #0x8\n"
+ "ldr d20, [x22], #0x8\n"
"tbz x11, #0, 116f\n"
- "ld1 { v8.s }[2], [x9]\n"
- "ld1 { v12.s }[2], [x25]\n"
- "ld1 { v16.s }[2], [x24]\n"
- "ld1 { v20.s }[2], [x23]\n"
+ "ld1 { v8.s }[2], [x28]\n"
+ "ld1 { v12.s }[2], [x24]\n"
+ "ld1 { v16.s }[2], [x23]\n"
+ "ld1 { v20.s }[2], [x22]\n"
"b 116f\n"
"115:" // Height 4: Partial accumulate: partial_1_0
- "ldr s8, [x9, #0x0]\n"
- "ldr s12, [x25, #0x0]\n"
- "mov x20, #0x0\n"
- "ldr s16, [x24, #0x0]\n"
- "ldr s20, [x23, #0x0]\n"
+ "ldr s8, [x28, #0x0]\n"
+ "mov x19, #0x0\n"
+ "ldr s12, [x24, #0x0]\n"
+ "ldr s16, [x23, #0x0]\n"
+ "ldr s20, [x22, #0x0]\n"
"116:" // Height 4: Partial accumulate: Done
- "sub x9, x9, x20\n"
+ "sub x28, x28, x19\n"
"b 119f\n"
"117:" // Height 4: full accumulate
- "ldr q8, [x9, #0x0]\n"
- "ldr q9, [x9, #0x10]\n"
- "ldr q10, [x9, #0x20]\n"
- "ldr q11, [x9, #0x30]\n"
- "ldr q12, [x25, #0x0]\n"
- "ldr q13, [x25, #0x10]\n"
- "ldr q14, [x25, #0x20]\n"
- "ldr q15, [x25, #0x30]\n"
- "ldr q16, [x24, #0x0]\n"
- "ldr q17, [x24, #0x10]\n"
- "ldr q18, [x24, #0x20]\n"
- "ldr q19, [x24, #0x30]\n"
- "ldr q20, [x23, #0x0]\n"
- "ldr q21, [x23, #0x10]\n"
- "ldr q22, [x23, #0x20]\n"
- "ldr q23, [x23, #0x30]\n"
+ "ldr q8, [x28, #0x0]\n"
+ "ldr q9, [x28, #0x10]\n"
+ "ldr q10, [x28, #0x20]\n"
+ "ldr q11, [x28, #0x30]\n"
+ "ldr q12, [x24, #0x0]\n"
+ "ldr q13, [x24, #0x10]\n"
+ "ldr q14, [x24, #0x20]\n"
+ "ldr q15, [x24, #0x30]\n"
+ "ldr q16, [x23, #0x0]\n"
+ "ldr q17, [x23, #0x10]\n"
+ "ldr q18, [x23, #0x20]\n"
+ "ldr q19, [x23, #0x30]\n"
+ "ldr q20, [x22, #0x0]\n"
+ "ldr q21, [x22, #0x10]\n"
+ "ldr q22, [x22, #0x20]\n"
+ "ldr q23, [x22, #0x30]\n"
"b 119f\n"
"118:" // Height 4: no accumulate
"movi v8.16b, #0x0\n"
@@ -1481,69 +1481,69 @@ void a64_hybrid_bf16fp32_dot_6x16 (
"movi v22.16b, #0x0\n"
"movi v23.16b, #0x0\n"
"119:" // Height 4: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"120:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 121f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "cbnz x28, 122f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #1\n"
- "add x25, x25, x20, LSL #1\n"
- "add x24, x24, x20, LSL #1\n"
- "add x23, x23, x20, LSL #1\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "cbnz x27, 122f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
+ "add x24, x24, x19, LSL #1\n"
+ "add x23, x23, x19, LSL #1\n"
+ "add x22, x22, x19, LSL #1\n"
"b 122f\n"
"121:" // Height 4: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
- "add x23, x24, x20, LSL #1\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "add x22, x23, x19, LSL #1\n"
"122:" // Height 4: input setup done
- "cmp x27, #0x8\n"
+ "cmp x26, #0x8\n"
"blt 125f\n"
- "ldr q0, [x26, #0x0]\n"
- "ldr q1, [x25, #0x0]\n"
- "cmp x27, #0x10\n"
- "ldr q2, [x24, #0x0]\n"
- "ldr q3, [x23, #0x0]\n"
+ "ldr q0, [x25, #0x0]\n"
+ "ldr q1, [x24, #0x0]\n"
+ "cmp x26, #0x10\n"
+ "ldr q2, [x23, #0x0]\n"
+ "ldr q3, [x22, #0x0]\n"
"ldr q6, [x10, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
"blt 124f\n"
"123:" // Height 4: Multiply loop: Main loop head
".inst 0x4f40f0c8 // bfdot v8.4s, v6.8h, v0.h[0]\n"
+ "ldr q7, [x10, #0x10]\n"
+ "add x25, x25, #0x10\n"
".inst 0x4f41f0cc // bfdot v12.4s, v6.8h, v1.h[0]\n"
- "sub x27, x27, #0x8\n"
- "add x26, x26, #0x10\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "add x24, x24, #0x10\n"
".inst 0x4f42f0d0 // bfdot v16.4s, v6.8h, v2.h[0]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "add x23, x23, #0x10\n"
".inst 0x4f43f0d4 // bfdot v20.4s, v6.8h, v3.h[0]\n"
"ldr q6, [x10, #0x20]\n"
- "add x25, x25, #0x10\n"
+ "add x22, x22, #0x10\n"
".inst 0x4f40f0e9 // bfdot v9.4s, v7.8h, v0.h[0]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
+ "sub x26, x26, #0x8\n"
".inst 0x4f41f0ed // bfdot v13.4s, v7.8h, v1.h[0]\n"
- "add x24, x24, #0x10\n"
- "add x23, x23, #0x10\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
+ "cmp x26, #0x10\n"
".inst 0x4f42f0f1 // bfdot v17.4s, v7.8h, v2.h[0]\n"
".inst 0x4f43f0f5 // bfdot v21.4s, v7.8h, v3.h[0]\n"
"ldr q7, [x10, #0x30]\n"
- "cmp x27, #0x10\n"
".inst 0x4f40f0ca // bfdot v10.4s, v6.8h, v0.h[0]\n"
".inst 0x4f41f0ce // bfdot v14.4s, v6.8h, v1.h[0]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x4f42f0d2 // bfdot v18.4s, v6.8h, v2.h[0]\n"
".inst 0x4f43f0d6 // bfdot v22.4s, v6.8h, v3.h[0]\n"
"ldr q6, [x10, #0x40]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x4f40f0eb // bfdot v11.4s, v7.8h, v0.h[0]\n"
".inst 0x4f41f0ef // bfdot v15.4s, v7.8h, v1.h[0]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x4f42f0f3 // bfdot v19.4s, v7.8h, v2.h[0]\n"
".inst 0x4f43f0f7 // bfdot v23.4s, v7.8h, v3.h[0]\n"
"ldr q7, [x10, #0x50]\n"
@@ -1604,40 +1604,40 @@ void a64_hybrid_bf16fp32_dot_6x16 (
".inst 0x4f63f8d6 // bfdot v22.4s, v6.8h, v3.h[3]\n"
"ldr q6, [x10, #0x0]\n"
".inst 0x4f60f8eb // bfdot v11.4s, v7.8h, v0.h[3]\n"
- "ldr q0, [x26, #0x0]\n"
+ "ldr q0, [x25, #0x0]\n"
".inst 0x4f61f8ef // bfdot v15.4s, v7.8h, v1.h[3]\n"
- "ldr q1, [x25, #0x0]\n"
+ "ldr q1, [x24, #0x0]\n"
".inst 0x4f62f8f3 // bfdot v19.4s, v7.8h, v2.h[3]\n"
- "ldr q2, [x24, #0x0]\n"
+ "ldr q2, [x23, #0x0]\n"
".inst 0x4f63f8f7 // bfdot v23.4s, v7.8h, v3.h[3]\n"
- "ldr q3, [x23, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
+ "ldr q3, [x22, #0x0]\n"
"bge 123b\n"
"124:" // Height 4: Multiply loop: Single iteration only
".inst 0x4f40f0c8 // bfdot v8.4s, v6.8h, v0.h[0]\n"
+ "ldr q7, [x10, #0x10]\n"
+ "sub x26, x26, #0x8\n"
".inst 0x4f41f0cc // bfdot v12.4s, v6.8h, v1.h[0]\n"
- "add x26, x26, #0x10\n"
"add x25, x25, #0x10\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x4f42f0d0 // bfdot v16.4s, v6.8h, v2.h[0]\n"
- ".inst 0x4f43f0d4 // bfdot v20.4s, v6.8h, v3.h[0]\n"
- "ldr q6, [x10, #0x20]\n"
"add x24, x24, #0x10\n"
+ ".inst 0x4f43f0d4 // bfdot v20.4s, v6.8h, v3.h[0]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "add x23, x23, #0x10\n"
".inst 0x4f40f0e9 // bfdot v9.4s, v7.8h, v0.h[0]\n"
+ "ldr q6, [x10, #0x20]\n"
+ "add x22, x22, #0x10\n"
".inst 0x4f41f0ed // bfdot v13.4s, v7.8h, v1.h[0]\n"
- "add x23, x23, #0x10\n"
- "sub x27, x27, #0x8\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x4f42f0f1 // bfdot v17.4s, v7.8h, v2.h[0]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x4f43f0f5 // bfdot v21.4s, v7.8h, v3.h[0]\n"
"ldr q7, [x10, #0x30]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x4f40f0ca // bfdot v10.4s, v6.8h, v0.h[0]\n"
".inst 0x4f41f0ce // bfdot v14.4s, v6.8h, v1.h[0]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x4f42f0d2 // bfdot v18.4s, v6.8h, v2.h[0]\n"
".inst 0x4f43f0d6 // bfdot v22.4s, v6.8h, v3.h[0]\n"
"ldr q6, [x10, #0x40]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x4f40f0eb // bfdot v11.4s, v7.8h, v0.h[0]\n"
".inst 0x4f41f0ef // bfdot v15.4s, v7.8h, v1.h[0]\n"
".inst 0x4f42f0f3 // bfdot v19.4s, v7.8h, v2.h[0]\n"
@@ -1703,19 +1703,19 @@ void a64_hybrid_bf16fp32_dot_6x16 (
".inst 0x4f62f8f3 // bfdot v19.4s, v7.8h, v2.h[3]\n"
".inst 0x4f63f8f7 // bfdot v23.4s, v7.8h, v3.h[3]\n"
"125:" // Height 4: Multiply loop: Main loop skip
- "cbz x27, 129f\n"
- "cmp x27, #0x2\n"
+ "cbz x26, 129f\n"
+ "cmp x26, #0x2\n"
"blt 127f\n"
"126:" // Height 4: Multiply loop: Odd block loop
- "ldr s0, [x26], #0x4\n"
- "ldr s1, [x25], #0x4\n"
- "sub x27, x27, #0x2\n"
- "cmp x27, #0x2\n"
- "ldr s2, [x24], #0x4\n"
- "ldr s3, [x23], #0x4\n"
+ "ldr s0, [x25], #0x4\n"
+ "sub x26, x26, #0x2\n"
+ "ldr s1, [x24], #0x4\n"
+ "cmp x26, #0x2\n"
+ "ldr s2, [x23], #0x4\n"
+ "ldr s3, [x22], #0x4\n"
"ldr q6, [x10, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
".inst 0x4f40f0c8 // bfdot v8.4s, v6.8h, v0.h[0]\n"
+ "ldr q7, [x10, #0x10]\n"
".inst 0x4f41f0cc // bfdot v12.4s, v6.8h, v1.h[0]\n"
".inst 0x4f42f0d0 // bfdot v16.4s, v6.8h, v2.h[0]\n"
".inst 0x4f43f0d4 // bfdot v20.4s, v6.8h, v3.h[0]\n"
@@ -1735,16 +1735,16 @@ void a64_hybrid_bf16fp32_dot_6x16 (
".inst 0x4f42f0f3 // bfdot v19.4s, v7.8h, v2.h[0]\n"
".inst 0x4f43f0f7 // bfdot v23.4s, v7.8h, v3.h[0]\n"
"bge 126b\n"
+ "cbz x26, 129f\n"
"127:" // Height 4: Multiply loop: Skip odd blocks
- "cbz x27, 129f\n"
- "ldr h0, [x26, #0x0]\n"
- "ldr h1, [x25, #0x0]\n"
- "ldr h2, [x24, #0x0]\n"
- "ldr h3, [x23, #0x0]\n"
+ "ldr h0, [x25, #0x0]\n"
+ "ldr h1, [x24, #0x0]\n"
+ "ldr h2, [x23, #0x0]\n"
+ "ldr h3, [x22, #0x0]\n"
"128:" // Height 4: Multiply loop: Ragged operand read: Done
"ldr q6, [x10, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
".inst 0x4f40f0c8 // bfdot v8.4s, v6.8h, v0.h[0]\n"
+ "ldr q7, [x10, #0x10]\n"
".inst 0x4f41f0cc // bfdot v12.4s, v6.8h, v1.h[0]\n"
".inst 0x4f42f0d0 // bfdot v16.4s, v6.8h, v2.h[0]\n"
".inst 0x4f43f0d4 // bfdot v20.4s, v6.8h, v3.h[0]\n"
@@ -1764,352 +1764,352 @@ void a64_hybrid_bf16fp32_dot_6x16 (
".inst 0x4f42f0f3 // bfdot v19.4s, v7.8h, v2.h[0]\n"
".inst 0x4f43f0f7 // bfdot v23.4s, v7.8h, v3.h[0]\n"
"129:" // Height 4: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 120b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "prfm pstl1keep, [x9, #0x0]\n"
- "add x23, x24, x20, LSL #2\n"
- "prfm pstl1keep, [x25, #0x0]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "prfm pstl1keep, [x28, #0x0]\n"
+ "add x24, x28, x19, LSL #2\n"
"prfm pstl1keep, [x24, #0x0]\n"
+ "add x23, x24, x19, LSL #2\n"
"prfm pstl1keep, [x23, #0x0]\n"
+ "add x22, x23, x19, LSL #2\n"
+ "prfm pstl1keep, [x22, #0x0]\n"
"tbz %x[flags], #1, 130f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v1.4s }, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1r { v0.4s }, [x20]\n"
- "fmin v8.4s, v8.4s, v1.4s\n"
- "fmin v9.4s, v9.4s, v1.4s\n"
- "fmin v10.4s, v10.4s, v1.4s\n"
- "fmin v11.4s, v11.4s, v1.4s\n"
- "fmin v12.4s, v12.4s, v1.4s\n"
- "fmin v13.4s, v13.4s, v1.4s\n"
- "fmin v14.4s, v14.4s, v1.4s\n"
- "fmin v15.4s, v15.4s, v1.4s\n"
- "fmin v16.4s, v16.4s, v1.4s\n"
- "fmin v17.4s, v17.4s, v1.4s\n"
- "fmin v18.4s, v18.4s, v1.4s\n"
- "fmin v19.4s, v19.4s, v1.4s\n"
- "fmin v20.4s, v20.4s, v1.4s\n"
- "fmin v21.4s, v21.4s, v1.4s\n"
- "fmin v22.4s, v22.4s, v1.4s\n"
- "fmin v23.4s, v23.4s, v1.4s\n"
- "fmax v8.4s, v8.4s, v0.4s\n"
- "fmax v9.4s, v9.4s, v0.4s\n"
- "fmax v10.4s, v10.4s, v0.4s\n"
- "fmax v11.4s, v11.4s, v0.4s\n"
- "fmax v12.4s, v12.4s, v0.4s\n"
- "fmax v13.4s, v13.4s, v0.4s\n"
- "fmax v14.4s, v14.4s, v0.4s\n"
- "fmax v15.4s, v15.4s, v0.4s\n"
- "fmax v16.4s, v16.4s, v0.4s\n"
- "fmax v17.4s, v17.4s, v0.4s\n"
- "fmax v18.4s, v18.4s, v0.4s\n"
- "fmax v19.4s, v19.4s, v0.4s\n"
- "fmax v20.4s, v20.4s, v0.4s\n"
- "fmax v21.4s, v21.4s, v0.4s\n"
- "fmax v22.4s, v22.4s, v0.4s\n"
- "fmax v23.4s, v23.4s, v0.4s\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v1.4s }, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v0.4s }, [x19]\n"
+ "fmin v8.4s, v8.4s, v0.4s\n"
+ "fmin v9.4s, v9.4s, v0.4s\n"
+ "fmin v10.4s, v10.4s, v0.4s\n"
+ "fmin v11.4s, v11.4s, v0.4s\n"
+ "fmax v8.4s, v8.4s, v1.4s\n"
+ "fmax v9.4s, v9.4s, v1.4s\n"
+ "fmax v10.4s, v10.4s, v1.4s\n"
+ "fmax v11.4s, v11.4s, v1.4s\n"
+ "fmin v12.4s, v12.4s, v0.4s\n"
+ "fmin v13.4s, v13.4s, v0.4s\n"
+ "fmin v14.4s, v14.4s, v0.4s\n"
+ "fmax v12.4s, v12.4s, v1.4s\n"
+ "fmax v13.4s, v13.4s, v1.4s\n"
+ "fmax v14.4s, v14.4s, v1.4s\n"
+ "fmin v15.4s, v15.4s, v0.4s\n"
+ "fmin v16.4s, v16.4s, v0.4s\n"
+ "fmin v17.4s, v17.4s, v0.4s\n"
+ "fmax v15.4s, v15.4s, v1.4s\n"
+ "fmax v16.4s, v16.4s, v1.4s\n"
+ "fmax v17.4s, v17.4s, v1.4s\n"
+ "fmin v18.4s, v18.4s, v0.4s\n"
+ "fmin v19.4s, v19.4s, v0.4s\n"
+ "fmin v20.4s, v20.4s, v0.4s\n"
+ "fmax v18.4s, v18.4s, v1.4s\n"
+ "fmax v19.4s, v19.4s, v1.4s\n"
+ "fmax v20.4s, v20.4s, v1.4s\n"
+ "fmin v21.4s, v21.4s, v0.4s\n"
+ "fmin v22.4s, v22.4s, v0.4s\n"
+ "fmin v23.4s, v23.4s, v0.4s\n"
+ "fmax v21.4s, v21.4s, v1.4s\n"
+ "fmax v22.4s, v22.4s, v1.4s\n"
+ "fmax v23.4s, v23.4s, v1.4s\n"
"130:" // Height 4: No activation
"cmp x11, #0x10\n"
"bge 139f\n"
"tbz x11, #3, 134f\n"
- "st1 { v8.4s }, [x9], #0x10\n"
- "st1 { v9.4s }, [x9], #0x10\n"
- "st1 { v12.4s }, [x25], #0x10\n"
- "st1 { v13.4s }, [x25], #0x10\n"
- "st1 { v16.4s }, [x24], #0x10\n"
- "st1 { v17.4s }, [x24], #0x10\n"
- "st1 { v20.4s }, [x23], #0x10\n"
- "st1 { v21.4s }, [x23], #0x10\n"
+ "st1 { v8.4s }, [x28], #0x10\n"
+ "st1 { v9.4s }, [x28], #0x10\n"
+ "st1 { v12.4s }, [x24], #0x10\n"
+ "st1 { v13.4s }, [x24], #0x10\n"
+ "st1 { v16.4s }, [x23], #0x10\n"
+ "st1 { v17.4s }, [x23], #0x10\n"
+ "st1 { v20.4s }, [x22], #0x10\n"
+ "st1 { v21.4s }, [x22], #0x10\n"
"tbz x11, #2, 132f\n"
- "st1 { v10.4s }, [x9], #0x10\n"
- "st1 { v14.4s }, [x25], #0x10\n"
- "st1 { v18.4s }, [x24], #0x10\n"
- "st1 { v22.4s }, [x23], #0x10\n"
+ "st1 { v10.4s }, [x28], #0x10\n"
+ "st1 { v14.4s }, [x24], #0x10\n"
+ "st1 { v18.4s }, [x23], #0x10\n"
+ "st1 { v22.4s }, [x22], #0x10\n"
"tbz x11, #1, 131f\n"
- "str d11, [x9], #0x8\n"
- "str d15, [x25], #0x8\n"
- "str d19, [x24], #0x8\n"
- "str d23, [x23], #0x8\n"
+ "str d11, [x28], #0x8\n"
+ "str d15, [x24], #0x8\n"
+ "str d19, [x23], #0x8\n"
+ "str d23, [x22], #0x8\n"
"tbz x11, #0, 138f\n"
- "st1 { v11.s }[2], [x9]\n"
- "st1 { v15.s }[2], [x25]\n"
- "st1 { v19.s }[2], [x24]\n"
- "st1 { v23.s }[2], [x23]\n"
+ "st1 { v11.s }[2], [x28]\n"
+ "st1 { v15.s }[2], [x24]\n"
+ "st1 { v19.s }[2], [x23]\n"
+ "st1 { v23.s }[2], [x22]\n"
"b 138f\n"
"131:" // Height 4: Partial direct writeback: partial_1_12
"tbz x11, #0, 138f\n"
- "str s11, [x9, #0x0]\n"
- "str s15, [x25, #0x0]\n"
- "str s19, [x24, #0x0]\n"
- "str s23, [x23, #0x0]\n"
+ "str s11, [x28, #0x0]\n"
+ "str s15, [x24, #0x0]\n"
+ "str s19, [x23, #0x0]\n"
+ "str s23, [x22, #0x0]\n"
"b 138f\n"
"132:" // Height 4: Partial direct writeback: partial_2_8
"tbz x11, #1, 133f\n"
- "str d10, [x9], #0x8\n"
- "str d14, [x25], #0x8\n"
- "str d18, [x24], #0x8\n"
- "str d22, [x23], #0x8\n"
+ "str d10, [x28], #0x8\n"
+ "str d14, [x24], #0x8\n"
+ "str d18, [x23], #0x8\n"
+ "str d22, [x22], #0x8\n"
"tbz x11, #0, 138f\n"
- "st1 { v10.s }[2], [x9]\n"
- "st1 { v14.s }[2], [x25]\n"
- "st1 { v18.s }[2], [x24]\n"
- "st1 { v22.s }[2], [x23]\n"
+ "st1 { v10.s }[2], [x28]\n"
+ "st1 { v14.s }[2], [x24]\n"
+ "st1 { v18.s }[2], [x23]\n"
+ "st1 { v22.s }[2], [x22]\n"
"b 138f\n"
"133:" // Height 4: Partial direct writeback: partial_1_8
"tbz x11, #0, 138f\n"
- "str s10, [x9, #0x0]\n"
- "str s14, [x25, #0x0]\n"
- "str s18, [x24, #0x0]\n"
- "str s22, [x23, #0x0]\n"
+ "str s10, [x28, #0x0]\n"
+ "str s14, [x24, #0x0]\n"
+ "str s18, [x23, #0x0]\n"
+ "str s22, [x22, #0x0]\n"
"b 138f\n"
"134:" // Height 4: Partial direct writeback: partial_4_0
"tbz x11, #2, 136f\n"
- "st1 { v8.4s }, [x9], #0x10\n"
- "st1 { v12.4s }, [x25], #0x10\n"
- "st1 { v16.4s }, [x24], #0x10\n"
- "st1 { v20.4s }, [x23], #0x10\n"
+ "st1 { v8.4s }, [x28], #0x10\n"
+ "st1 { v12.4s }, [x24], #0x10\n"
+ "st1 { v16.4s }, [x23], #0x10\n"
+ "st1 { v20.4s }, [x22], #0x10\n"
"tbz x11, #1, 135f\n"
- "str d9, [x9], #0x8\n"
- "str d13, [x25], #0x8\n"
- "str d17, [x24], #0x8\n"
- "str d21, [x23], #0x8\n"
+ "str d9, [x28], #0x8\n"
+ "str d13, [x24], #0x8\n"
+ "str d17, [x23], #0x8\n"
+ "str d21, [x22], #0x8\n"
"tbz x11, #0, 138f\n"
- "st1 { v9.s }[2], [x9]\n"
- "st1 { v13.s }[2], [x25]\n"
- "st1 { v17.s }[2], [x24]\n"
- "st1 { v21.s }[2], [x23]\n"
+ "st1 { v9.s }[2], [x28]\n"
+ "st1 { v13.s }[2], [x24]\n"
+ "st1 { v17.s }[2], [x23]\n"
+ "st1 { v21.s }[2], [x22]\n"
"b 138f\n"
"135:" // Height 4: Partial direct writeback: partial_1_4
"tbz x11, #0, 138f\n"
- "str s9, [x9, #0x0]\n"
- "str s13, [x25, #0x0]\n"
- "str s17, [x24, #0x0]\n"
- "str s21, [x23, #0x0]\n"
+ "str s9, [x28, #0x0]\n"
+ "str s13, [x24, #0x0]\n"
+ "str s17, [x23, #0x0]\n"
+ "str s21, [x22, #0x0]\n"
"b 138f\n"
"136:" // Height 4: Partial direct writeback: partial_2_0
"tbz x11, #1, 137f\n"
- "str d8, [x9], #0x8\n"
- "str d12, [x25], #0x8\n"
- "str d16, [x24], #0x8\n"
- "str d20, [x23], #0x8\n"
+ "str d8, [x28], #0x8\n"
+ "str d12, [x24], #0x8\n"
+ "str d16, [x23], #0x8\n"
+ "str d20, [x22], #0x8\n"
"tbz x11, #0, 138f\n"
- "st1 { v8.s }[2], [x9]\n"
- "st1 { v12.s }[2], [x25]\n"
- "st1 { v16.s }[2], [x24]\n"
- "st1 { v20.s }[2], [x23]\n"
+ "st1 { v8.s }[2], [x28]\n"
+ "st1 { v12.s }[2], [x24]\n"
+ "st1 { v16.s }[2], [x23]\n"
+ "st1 { v20.s }[2], [x22]\n"
"b 138f\n"
"137:" // Height 4: Partial direct writeback: partial_1_0
- "str s8, [x9, #0x0]\n"
- "str s12, [x25, #0x0]\n"
- "str s16, [x24, #0x0]\n"
- "str s20, [x23, #0x0]\n"
+ "str s8, [x28, #0x0]\n"
+ "str s12, [x24, #0x0]\n"
+ "str s16, [x23, #0x0]\n"
+ "str s20, [x22, #0x0]\n"
"138:" // Height 4: Partial direct writeback: Done
"b 140f\n"
"139:" // Height 4: Full writeback
- "str q8, [x9, #0x0]\n"
- "str q9, [x9, #0x10]\n"
- "str q10, [x9, #0x20]\n"
- "str q11, [x9, #0x30]\n"
- "add x9, x9, #0x40\n"
- "str q12, [x25, #0x0]\n"
- "str q13, [x25, #0x10]\n"
- "str q14, [x25, #0x20]\n"
- "str q15, [x25, #0x30]\n"
- "str q16, [x24, #0x0]\n"
- "str q17, [x24, #0x10]\n"
- "str q18, [x24, #0x20]\n"
- "str q19, [x24, #0x30]\n"
- "str q20, [x23, #0x0]\n"
- "str q21, [x23, #0x10]\n"
- "str q22, [x23, #0x20]\n"
- "str q23, [x23, #0x30]\n"
+ "str q8, [x28, #0x0]\n"
+ "str q9, [x28, #0x10]\n"
+ "str q10, [x28, #0x20]\n"
+ "str q11, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
+ "str q12, [x24, #0x0]\n"
+ "str q13, [x24, #0x10]\n"
+ "str q14, [x24, #0x20]\n"
+ "str q15, [x24, #0x30]\n"
+ "str q16, [x23, #0x0]\n"
+ "str q17, [x23, #0x10]\n"
+ "str q18, [x23, #0x20]\n"
+ "str q19, [x23, #0x30]\n"
+ "str q20, [x22, #0x0]\n"
+ "str q21, [x22, #0x10]\n"
+ "str q22, [x22, #0x20]\n"
+ "str q23, [x22, #0x30]\n"
"140:" // Height 4: Writeback done
"subs x11, x11, #0x10\n"
"bgt 107b\n"
"b 212f\n"
"141:" // Height 5
- "mov x12, %x[bias]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x9, %x[bias]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "mov x28, %x[output_ptr]\n"
"142:" // Height 5: Column loop
- "cbz x12, 143f\n"
- "ldr q8, [x12, #0x0]\n"
- "ldr q9, [x12, #0x10]\n"
+ "cbz x9, 143f\n"
+ "ldr q8, [x9, #0x0]\n"
"mov v12.16b, v8.16b\n"
+ "ldr q9, [x9, #0x10]\n"
+ "mov v16.16b, v8.16b\n"
+ "ldr q10, [x9, #0x20]\n"
+ "mov v20.16b, v8.16b\n"
+ "ldr q11, [x9, #0x30]\n"
+ "add x9, x9, #0x40\n"
+ "mov v24.16b, v8.16b\n"
"mov v13.16b, v9.16b\n"
- "ldr q10, [x12, #0x20]\n"
- "ldr q11, [x12, #0x30]\n"
+ "mov v17.16b, v9.16b\n"
"mov v14.16b, v10.16b\n"
"mov v15.16b, v11.16b\n"
- "mov v16.16b, v8.16b\n"
- "mov v17.16b, v9.16b\n"
- "add x12, x12, #0x40\n"
"mov v18.16b, v10.16b\n"
"mov v19.16b, v11.16b\n"
- "mov v20.16b, v8.16b\n"
"mov v21.16b, v9.16b\n"
"mov v22.16b, v10.16b\n"
"mov v23.16b, v11.16b\n"
- "mov v24.16b, v8.16b\n"
"mov v25.16b, v9.16b\n"
"mov v26.16b, v10.16b\n"
"mov v27.16b, v11.16b\n"
"b 154f\n"
"143:" // Height 5: no bias
"tbz %x[flags], #0, 153f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"cmp x11, #0x10\n"
- "add x22, x23, x20, LSL #2\n"
+ "add x24, x28, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
"bge 152f\n"
"tbz x11, #3, 147f\n"
- "ld1 { v8.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v16.4s }, [x24], #0x10\n"
- "ld1 { v20.4s }, [x23], #0x10\n"
- "ld1 { v24.4s }, [x22], #0x10\n"
- "ld1 { v9.4s }, [x9], #0x10\n"
- "ld1 { v13.4s }, [x25], #0x10\n"
- "ld1 { v17.4s }, [x24], #0x10\n"
- "ld1 { v21.4s }, [x23], #0x10\n"
- "ld1 { v25.4s }, [x22], #0x10\n"
+ "ld1 { v8.4s }, [x28], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
+ "ld1 { v16.4s }, [x23], #0x10\n"
+ "ld1 { v20.4s }, [x22], #0x10\n"
+ "ld1 { v24.4s }, [x21], #0x10\n"
+ "ld1 { v9.4s }, [x28], #0x10\n"
+ "ld1 { v13.4s }, [x24], #0x10\n"
+ "ld1 { v17.4s }, [x23], #0x10\n"
+ "ld1 { v21.4s }, [x22], #0x10\n"
+ "ld1 { v25.4s }, [x21], #0x10\n"
"tbz x11, #2, 145f\n"
- "ld1 { v10.4s }, [x9], #0x10\n"
- "ld1 { v14.4s }, [x25], #0x10\n"
- "ld1 { v18.4s }, [x24], #0x10\n"
- "ld1 { v22.4s }, [x23], #0x10\n"
- "ld1 { v26.4s }, [x22], #0x10\n"
+ "ld1 { v10.4s }, [x28], #0x10\n"
+ "ld1 { v14.4s }, [x24], #0x10\n"
+ "ld1 { v18.4s }, [x23], #0x10\n"
+ "ld1 { v22.4s }, [x22], #0x10\n"
+ "ld1 { v26.4s }, [x21], #0x10\n"
"tbz x11, #1, 144f\n"
- "ldr d11, [x9], #0x8\n"
- "ldr d15, [x25], #0x8\n"
- "mov x20, #0x38\n"
- "ldr d19, [x24], #0x8\n"
- "ldr d23, [x23], #0x8\n"
- "ldr d27, [x22], #0x8\n"
+ "ldr d11, [x28], #0x8\n"
+ "mov x19, #0x38\n"
+ "ldr d15, [x24], #0x8\n"
+ "ldr d19, [x23], #0x8\n"
+ "ldr d23, [x22], #0x8\n"
+ "ldr d27, [x21], #0x8\n"
"tbz x11, #0, 151f\n"
- "ld1 { v11.s }[2], [x9]\n"
- "ld1 { v15.s }[2], [x25]\n"
- "ld1 { v19.s }[2], [x24]\n"
- "ld1 { v23.s }[2], [x23]\n"
- "ld1 { v27.s }[2], [x22]\n"
+ "ld1 { v11.s }[2], [x28]\n"
+ "ld1 { v15.s }[2], [x24]\n"
+ "ld1 { v19.s }[2], [x23]\n"
+ "ld1 { v23.s }[2], [x22]\n"
+ "ld1 { v27.s }[2], [x21]\n"
"b 151f\n"
"144:" // Height 5: Partial accumulate: partial_1_12
- "mov x20, #0x30\n"
+ "mov x19, #0x30\n"
"tbz x11, #0, 151f\n"
- "ldr s11, [x9, #0x0]\n"
- "ldr s15, [x25, #0x0]\n"
- "ldr s19, [x24, #0x0]\n"
- "ldr s23, [x23, #0x0]\n"
- "ldr s27, [x22, #0x0]\n"
+ "ldr s11, [x28, #0x0]\n"
+ "ldr s15, [x24, #0x0]\n"
+ "ldr s19, [x23, #0x0]\n"
+ "ldr s23, [x22, #0x0]\n"
+ "ldr s27, [x21, #0x0]\n"
"b 151f\n"
"145:" // Height 5: Partial accumulate: partial_2_8
"tbz x11, #1, 146f\n"
- "ldr d10, [x9], #0x8\n"
- "ldr d14, [x25], #0x8\n"
- "mov x20, #0x28\n"
- "ldr d18, [x24], #0x8\n"
- "ldr d22, [x23], #0x8\n"
- "ldr d26, [x22], #0x8\n"
+ "ldr d10, [x28], #0x8\n"
+ "ldr d14, [x24], #0x8\n"
+ "mov x19, #0x28\n"
+ "ldr d18, [x23], #0x8\n"
+ "ldr d22, [x22], #0x8\n"
+ "ldr d26, [x21], #0x8\n"
"tbz x11, #0, 151f\n"
- "ld1 { v10.s }[2], [x9]\n"
- "ld1 { v14.s }[2], [x25]\n"
- "ld1 { v18.s }[2], [x24]\n"
- "ld1 { v22.s }[2], [x23]\n"
- "ld1 { v26.s }[2], [x22]\n"
+ "ld1 { v10.s }[2], [x28]\n"
+ "ld1 { v14.s }[2], [x24]\n"
+ "ld1 { v18.s }[2], [x23]\n"
+ "ld1 { v22.s }[2], [x22]\n"
+ "ld1 { v26.s }[2], [x21]\n"
"b 151f\n"
"146:" // Height 5: Partial accumulate: partial_1_8
- "mov x20, #0x20\n"
+ "mov x19, #0x20\n"
"tbz x11, #0, 151f\n"
- "ldr s10, [x9, #0x0]\n"
- "ldr s14, [x25, #0x0]\n"
- "ldr s18, [x24, #0x0]\n"
- "ldr s22, [x23, #0x0]\n"
- "ldr s26, [x22, #0x0]\n"
+ "ldr s10, [x28, #0x0]\n"
+ "ldr s14, [x24, #0x0]\n"
+ "ldr s18, [x23, #0x0]\n"
+ "ldr s22, [x22, #0x0]\n"
+ "ldr s26, [x21, #0x0]\n"
"b 151f\n"
"147:" // Height 5: Partial accumulate: partial_4_0
"tbz x11, #2, 149f\n"
- "ld1 { v8.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v16.4s }, [x24], #0x10\n"
- "ld1 { v20.4s }, [x23], #0x10\n"
- "ld1 { v24.4s }, [x22], #0x10\n"
+ "ld1 { v8.4s }, [x28], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
+ "ld1 { v16.4s }, [x23], #0x10\n"
+ "ld1 { v20.4s }, [x22], #0x10\n"
+ "ld1 { v24.4s }, [x21], #0x10\n"
"tbz x11, #1, 148f\n"
- "ldr d9, [x9], #0x8\n"
- "ldr d13, [x25], #0x8\n"
- "mov x20, #0x18\n"
- "ldr d17, [x24], #0x8\n"
- "ldr d21, [x23], #0x8\n"
- "ldr d25, [x22], #0x8\n"
+ "ldr d9, [x28], #0x8\n"
+ "mov x19, #0x18\n"
+ "ldr d13, [x24], #0x8\n"
+ "ldr d17, [x23], #0x8\n"
+ "ldr d21, [x22], #0x8\n"
+ "ldr d25, [x21], #0x8\n"
"tbz x11, #0, 151f\n"
- "ld1 { v9.s }[2], [x9]\n"
- "ld1 { v13.s }[2], [x25]\n"
- "ld1 { v17.s }[2], [x24]\n"
- "ld1 { v21.s }[2], [x23]\n"
- "ld1 { v25.s }[2], [x22]\n"
+ "ld1 { v9.s }[2], [x28]\n"
+ "ld1 { v13.s }[2], [x24]\n"
+ "ld1 { v17.s }[2], [x23]\n"
+ "ld1 { v21.s }[2], [x22]\n"
+ "ld1 { v25.s }[2], [x21]\n"
"b 151f\n"
"148:" // Height 5: Partial accumulate: partial_1_4
- "mov x20, #0x10\n"
+ "mov x19, #0x10\n"
"tbz x11, #0, 151f\n"
- "ldr s9, [x9, #0x0]\n"
- "ldr s13, [x25, #0x0]\n"
- "ldr s17, [x24, #0x0]\n"
- "ldr s21, [x23, #0x0]\n"
- "ldr s25, [x22, #0x0]\n"
+ "ldr s9, [x28, #0x0]\n"
+ "ldr s13, [x24, #0x0]\n"
+ "ldr s17, [x23, #0x0]\n"
+ "ldr s21, [x22, #0x0]\n"
+ "ldr s25, [x21, #0x0]\n"
"b 151f\n"
"149:" // Height 5: Partial accumulate: partial_2_0
"tbz x11, #1, 150f\n"
- "ldr d8, [x9], #0x8\n"
- "ldr d12, [x25], #0x8\n"
- "mov x20, #0x8\n"
- "ldr d16, [x24], #0x8\n"
- "ldr d20, [x23], #0x8\n"
- "ldr d24, [x22], #0x8\n"
+ "ldr d8, [x28], #0x8\n"
+ "ldr d12, [x24], #0x8\n"
+ "mov x19, #0x8\n"
+ "ldr d16, [x23], #0x8\n"
+ "ldr d20, [x22], #0x8\n"
+ "ldr d24, [x21], #0x8\n"
"tbz x11, #0, 151f\n"
- "ld1 { v8.s }[2], [x9]\n"
- "ld1 { v12.s }[2], [x25]\n"
- "ld1 { v16.s }[2], [x24]\n"
- "ld1 { v20.s }[2], [x23]\n"
- "ld1 { v24.s }[2], [x22]\n"
+ "ld1 { v8.s }[2], [x28]\n"
+ "ld1 { v12.s }[2], [x24]\n"
+ "ld1 { v16.s }[2], [x23]\n"
+ "ld1 { v20.s }[2], [x22]\n"
+ "ld1 { v24.s }[2], [x21]\n"
"b 151f\n"
"150:" // Height 5: Partial accumulate: partial_1_0
- "ldr s8, [x9, #0x0]\n"
- "ldr s12, [x25, #0x0]\n"
- "mov x20, #0x0\n"
- "ldr s16, [x24, #0x0]\n"
- "ldr s20, [x23, #0x0]\n"
- "ldr s24, [x22, #0x0]\n"
+ "ldr s8, [x28, #0x0]\n"
+ "mov x19, #0x0\n"
+ "ldr s12, [x24, #0x0]\n"
+ "ldr s16, [x23, #0x0]\n"
+ "ldr s20, [x22, #0x0]\n"
+ "ldr s24, [x21, #0x0]\n"
"151:" // Height 5: Partial accumulate: Done
- "sub x9, x9, x20\n"
+ "sub x28, x28, x19\n"
"b 154f\n"
"152:" // Height 5: full accumulate
- "ldr q8, [x9, #0x0]\n"
- "ldr q9, [x9, #0x10]\n"
- "ldr q10, [x9, #0x20]\n"
- "ldr q11, [x9, #0x30]\n"
- "ldr q12, [x25, #0x0]\n"
- "ldr q13, [x25, #0x10]\n"
- "ldr q14, [x25, #0x20]\n"
- "ldr q15, [x25, #0x30]\n"
- "ldr q16, [x24, #0x0]\n"
- "ldr q17, [x24, #0x10]\n"
- "ldr q18, [x24, #0x20]\n"
- "ldr q19, [x24, #0x30]\n"
- "ldr q20, [x23, #0x0]\n"
- "ldr q21, [x23, #0x10]\n"
- "ldr q22, [x23, #0x20]\n"
- "ldr q23, [x23, #0x30]\n"
- "ldr q24, [x22, #0x0]\n"
- "ldr q25, [x22, #0x10]\n"
- "ldr q26, [x22, #0x20]\n"
- "ldr q27, [x22, #0x30]\n"
+ "ldr q8, [x28, #0x0]\n"
+ "ldr q9, [x28, #0x10]\n"
+ "ldr q10, [x28, #0x20]\n"
+ "ldr q11, [x28, #0x30]\n"
+ "ldr q12, [x24, #0x0]\n"
+ "ldr q13, [x24, #0x10]\n"
+ "ldr q14, [x24, #0x20]\n"
+ "ldr q15, [x24, #0x30]\n"
+ "ldr q16, [x23, #0x0]\n"
+ "ldr q17, [x23, #0x10]\n"
+ "ldr q18, [x23, #0x20]\n"
+ "ldr q19, [x23, #0x30]\n"
+ "ldr q20, [x22, #0x0]\n"
+ "ldr q21, [x22, #0x10]\n"
+ "ldr q22, [x22, #0x20]\n"
+ "ldr q23, [x22, #0x30]\n"
+ "ldr q24, [x21, #0x0]\n"
+ "ldr q25, [x21, #0x10]\n"
+ "ldr q26, [x21, #0x20]\n"
+ "ldr q27, [x21, #0x30]\n"
"b 154f\n"
"153:" // Height 5: no accumulate
"movi v8.16b, #0x0\n"
@@ -2133,74 +2133,74 @@ void a64_hybrid_bf16fp32_dot_6x16 (
"movi v26.16b, #0x0\n"
"movi v27.16b, #0x0\n"
"154:" // Height 5: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"155:" // Height 5: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 156f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "ldr x22, [x21, #0x20]\n"
- "cbnz x28, 157f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #1\n"
- "add x25, x25, x20, LSL #1\n"
- "add x24, x24, x20, LSL #1\n"
- "add x23, x23, x20, LSL #1\n"
- "add x22, x22, x20, LSL #1\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "ldr x21, [x20, #0x20]\n"
+ "cbnz x27, 157f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
+ "add x24, x24, x19, LSL #1\n"
+ "add x23, x23, x19, LSL #1\n"
+ "add x22, x22, x19, LSL #1\n"
+ "add x21, x21, x19, LSL #1\n"
"b 157f\n"
"156:" // Height 5: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
- "add x23, x24, x20, LSL #1\n"
- "add x22, x23, x20, LSL #1\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "add x22, x23, x19, LSL #1\n"
+ "add x21, x22, x19, LSL #1\n"
"157:" // Height 5: input setup done
- "cmp x27, #0x8\n"
+ "cmp x26, #0x8\n"
"blt 160f\n"
- "ldr q0, [x26, #0x0]\n"
- "ldr q1, [x25, #0x0]\n"
- "cmp x27, #0x10\n"
- "ldr q2, [x24, #0x0]\n"
- "ldr q3, [x23, #0x0]\n"
- "ldr q4, [x22, #0x0]\n"
+ "ldr q0, [x25, #0x0]\n"
+ "ldr q1, [x24, #0x0]\n"
+ "cmp x26, #0x10\n"
+ "ldr q2, [x23, #0x0]\n"
+ "ldr q3, [x22, #0x0]\n"
+ "ldr q4, [x21, #0x0]\n"
"ldr q6, [x10, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
"blt 159f\n"
"158:" // Height 5: Multiply loop: Main loop head
".inst 0x4f40f0c8 // bfdot v8.4s, v6.8h, v0.h[0]\n"
+ "ldr q7, [x10, #0x10]\n"
+ "add x25, x25, #0x10\n"
".inst 0x4f41f0cc // bfdot v12.4s, v6.8h, v1.h[0]\n"
- "sub x27, x27, #0x8\n"
- "add x26, x26, #0x10\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "add x24, x24, #0x10\n"
".inst 0x4f42f0d0 // bfdot v16.4s, v6.8h, v2.h[0]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "add x23, x23, #0x10\n"
".inst 0x4f43f0d4 // bfdot v20.4s, v6.8h, v3.h[0]\n"
- "add x25, x25, #0x10\n"
- "add x24, x24, #0x10\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
+ "add x22, x22, #0x10\n"
".inst 0x4f44f0d8 // bfdot v24.4s, v6.8h, v4.h[0]\n"
"ldr q6, [x10, #0x20]\n"
+ "add x21, x21, #0x10\n"
".inst 0x4f40f0e9 // bfdot v9.4s, v7.8h, v0.h[0]\n"
- "add x23, x23, #0x10\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
+ "sub x26, x26, #0x8\n"
".inst 0x4f41f0ed // bfdot v13.4s, v7.8h, v1.h[0]\n"
+ "prfm pldl1keep, [x21, #0x80]\n"
+ "cmp x26, #0x10\n"
".inst 0x4f42f0f1 // bfdot v17.4s, v7.8h, v2.h[0]\n"
- "add x22, x22, #0x10\n"
- "cmp x27, #0x10\n"
".inst 0x4f43f0f5 // bfdot v21.4s, v7.8h, v3.h[0]\n"
".inst 0x4f44f0f9 // bfdot v25.4s, v7.8h, v4.h[0]\n"
"ldr q7, [x10, #0x30]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x4f40f0ca // bfdot v10.4s, v6.8h, v0.h[0]\n"
".inst 0x4f41f0ce // bfdot v14.4s, v6.8h, v1.h[0]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x4f42f0d2 // bfdot v18.4s, v6.8h, v2.h[0]\n"
".inst 0x4f43f0d6 // bfdot v22.4s, v6.8h, v3.h[0]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
- "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x4f44f0da // bfdot v26.4s, v6.8h, v4.h[0]\n"
"ldr q6, [x10, #0x40]\n"
".inst 0x4f40f0eb // bfdot v11.4s, v7.8h, v0.h[0]\n"
@@ -2277,45 +2277,45 @@ void a64_hybrid_bf16fp32_dot_6x16 (
".inst 0x4f64f8da // bfdot v26.4s, v6.8h, v4.h[3]\n"
"ldr q6, [x10, #0x0]\n"
".inst 0x4f60f8eb // bfdot v11.4s, v7.8h, v0.h[3]\n"
- "ldr q0, [x26, #0x0]\n"
+ "ldr q0, [x25, #0x0]\n"
".inst 0x4f61f8ef // bfdot v15.4s, v7.8h, v1.h[3]\n"
- "ldr q1, [x25, #0x0]\n"
+ "ldr q1, [x24, #0x0]\n"
".inst 0x4f62f8f3 // bfdot v19.4s, v7.8h, v2.h[3]\n"
- "ldr q2, [x24, #0x0]\n"
+ "ldr q2, [x23, #0x0]\n"
".inst 0x4f63f8f7 // bfdot v23.4s, v7.8h, v3.h[3]\n"
- "ldr q3, [x23, #0x0]\n"
+ "ldr q3, [x22, #0x0]\n"
".inst 0x4f64f8fb // bfdot v27.4s, v7.8h, v4.h[3]\n"
- "ldr q4, [x22, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
+ "ldr q4, [x21, #0x0]\n"
"bge 158b\n"
"159:" // Height 5: Multiply loop: Single iteration only
".inst 0x4f40f0c8 // bfdot v8.4s, v6.8h, v0.h[0]\n"
+ "ldr q7, [x10, #0x10]\n"
+ "sub x26, x26, #0x8\n"
".inst 0x4f41f0cc // bfdot v12.4s, v6.8h, v1.h[0]\n"
- "add x26, x26, #0x10\n"
"add x25, x25, #0x10\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x4f42f0d0 // bfdot v16.4s, v6.8h, v2.h[0]\n"
- ".inst 0x4f43f0d4 // bfdot v20.4s, v6.8h, v3.h[0]\n"
"add x24, x24, #0x10\n"
+ ".inst 0x4f43f0d4 // bfdot v20.4s, v6.8h, v3.h[0]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
"add x23, x23, #0x10\n"
".inst 0x4f44f0d8 // bfdot v24.4s, v6.8h, v4.h[0]\n"
- "ldr q6, [x10, #0x20]\n"
- ".inst 0x4f40f0e9 // bfdot v9.4s, v7.8h, v0.h[0]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
"add x22, x22, #0x10\n"
+ ".inst 0x4f40f0e9 // bfdot v9.4s, v7.8h, v0.h[0]\n"
+ "ldr q6, [x10, #0x20]\n"
+ "add x21, x21, #0x10\n"
".inst 0x4f41f0ed // bfdot v13.4s, v7.8h, v1.h[0]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x4f42f0f1 // bfdot v17.4s, v7.8h, v2.h[0]\n"
- "sub x27, x27, #0x8\n"
- "prfm pldl1keep, [x26, #0x80]\n"
+ "prfm pldl1keep, [x21, #0x80]\n"
".inst 0x4f43f0f5 // bfdot v21.4s, v7.8h, v3.h[0]\n"
".inst 0x4f44f0f9 // bfdot v25.4s, v7.8h, v4.h[0]\n"
"ldr q7, [x10, #0x30]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x4f40f0ca // bfdot v10.4s, v6.8h, v0.h[0]\n"
".inst 0x4f41f0ce // bfdot v14.4s, v6.8h, v1.h[0]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x4f42f0d2 // bfdot v18.4s, v6.8h, v2.h[0]\n"
".inst 0x4f43f0d6 // bfdot v22.4s, v6.8h, v3.h[0]\n"
- "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x4f44f0da // bfdot v26.4s, v6.8h, v4.h[0]\n"
"ldr q6, [x10, #0x40]\n"
".inst 0x4f40f0eb // bfdot v11.4s, v7.8h, v0.h[0]\n"
@@ -2396,21 +2396,21 @@ void a64_hybrid_bf16fp32_dot_6x16 (
".inst 0x4f63f8f7 // bfdot v23.4s, v7.8h, v3.h[3]\n"
".inst 0x4f64f8fb // bfdot v27.4s, v7.8h, v4.h[3]\n"
"160:" // Height 5: Multiply loop: Main loop skip
- "cbz x27, 164f\n"
- "cmp x27, #0x2\n"
+ "cbz x26, 164f\n"
+ "cmp x26, #0x2\n"
"blt 162f\n"
"161:" // Height 5: Multiply loop: Odd block loop
- "ldr s0, [x26], #0x4\n"
- "ldr s1, [x25], #0x4\n"
- "sub x27, x27, #0x2\n"
- "cmp x27, #0x2\n"
- "ldr s2, [x24], #0x4\n"
- "ldr s3, [x23], #0x4\n"
- "ldr s4, [x22], #0x4\n"
+ "ldr s0, [x25], #0x4\n"
+ "sub x26, x26, #0x2\n"
+ "ldr s1, [x24], #0x4\n"
+ "cmp x26, #0x2\n"
+ "ldr s2, [x23], #0x4\n"
+ "ldr s3, [x22], #0x4\n"
+ "ldr s4, [x21], #0x4\n"
"ldr q6, [x10, #0x0]\n"
".inst 0x4f40f0c8 // bfdot v8.4s, v6.8h, v0.h[0]\n"
- ".inst 0x4f41f0cc // bfdot v12.4s, v6.8h, v1.h[0]\n"
"ldr q7, [x10, #0x10]\n"
+ ".inst 0x4f41f0cc // bfdot v12.4s, v6.8h, v1.h[0]\n"
".inst 0x4f42f0d0 // bfdot v16.4s, v6.8h, v2.h[0]\n"
".inst 0x4f43f0d4 // bfdot v20.4s, v6.8h, v3.h[0]\n"
".inst 0x4f44f0d8 // bfdot v24.4s, v6.8h, v4.h[0]\n"
@@ -2433,17 +2433,17 @@ void a64_hybrid_bf16fp32_dot_6x16 (
".inst 0x4f43f0f7 // bfdot v23.4s, v7.8h, v3.h[0]\n"
".inst 0x4f44f0fb // bfdot v27.4s, v7.8h, v4.h[0]\n"
"bge 161b\n"
+ "cbz x26, 164f\n"
"162:" // Height 5: Multiply loop: Skip odd blocks
- "cbz x27, 164f\n"
- "ldr h0, [x26, #0x0]\n"
- "ldr h1, [x25, #0x0]\n"
- "ldr h2, [x24, #0x0]\n"
- "ldr h3, [x23, #0x0]\n"
- "ldr h4, [x22, #0x0]\n"
+ "ldr h0, [x25, #0x0]\n"
+ "ldr h1, [x24, #0x0]\n"
+ "ldr h2, [x23, #0x0]\n"
+ "ldr h3, [x22, #0x0]\n"
+ "ldr h4, [x21, #0x0]\n"
"163:" // Height 5: Multiply loop: Ragged operand read: Done
"ldr q6, [x10, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
".inst 0x4f40f0c8 // bfdot v8.4s, v6.8h, v0.h[0]\n"
+ "ldr q7, [x10, #0x10]\n"
".inst 0x4f41f0cc // bfdot v12.4s, v6.8h, v1.h[0]\n"
".inst 0x4f42f0d0 // bfdot v16.4s, v6.8h, v2.h[0]\n"
".inst 0x4f43f0d4 // bfdot v20.4s, v6.8h, v3.h[0]\n"
@@ -2467,410 +2467,410 @@ void a64_hybrid_bf16fp32_dot_6x16 (
".inst 0x4f43f0f7 // bfdot v23.4s, v7.8h, v3.h[0]\n"
".inst 0x4f44f0fb // bfdot v27.4s, v7.8h, v4.h[0]\n"
"164:" // Height 5: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 155b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "prfm pstl1keep, [x9, #0x0]\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
- "prfm pstl1keep, [x25, #0x0]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "prfm pstl1keep, [x28, #0x0]\n"
+ "add x24, x28, x19, LSL #2\n"
"prfm pstl1keep, [x24, #0x0]\n"
+ "add x23, x24, x19, LSL #2\n"
"prfm pstl1keep, [x23, #0x0]\n"
+ "add x22, x23, x19, LSL #2\n"
"prfm pstl1keep, [x22, #0x0]\n"
+ "add x21, x22, x19, LSL #2\n"
+ "prfm pstl1keep, [x21, #0x0]\n"
"tbz %x[flags], #1, 165f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v1.4s }, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1r { v0.4s }, [x20]\n"
- "fmin v8.4s, v8.4s, v1.4s\n"
- "fmin v9.4s, v9.4s, v1.4s\n"
- "fmin v10.4s, v10.4s, v1.4s\n"
- "fmin v11.4s, v11.4s, v1.4s\n"
- "fmin v12.4s, v12.4s, v1.4s\n"
- "fmin v13.4s, v13.4s, v1.4s\n"
- "fmin v14.4s, v14.4s, v1.4s\n"
- "fmin v15.4s, v15.4s, v1.4s\n"
- "fmin v16.4s, v16.4s, v1.4s\n"
- "fmin v17.4s, v17.4s, v1.4s\n"
- "fmin v18.4s, v18.4s, v1.4s\n"
- "fmin v19.4s, v19.4s, v1.4s\n"
- "fmin v20.4s, v20.4s, v1.4s\n"
- "fmin v21.4s, v21.4s, v1.4s\n"
- "fmin v22.4s, v22.4s, v1.4s\n"
- "fmin v23.4s, v23.4s, v1.4s\n"
- "fmin v24.4s, v24.4s, v1.4s\n"
- "fmin v25.4s, v25.4s, v1.4s\n"
- "fmin v26.4s, v26.4s, v1.4s\n"
- "fmin v27.4s, v27.4s, v1.4s\n"
- "fmax v8.4s, v8.4s, v0.4s\n"
- "fmax v9.4s, v9.4s, v0.4s\n"
- "fmax v10.4s, v10.4s, v0.4s\n"
- "fmax v11.4s, v11.4s, v0.4s\n"
- "fmax v12.4s, v12.4s, v0.4s\n"
- "fmax v13.4s, v13.4s, v0.4s\n"
- "fmax v14.4s, v14.4s, v0.4s\n"
- "fmax v15.4s, v15.4s, v0.4s\n"
- "fmax v16.4s, v16.4s, v0.4s\n"
- "fmax v17.4s, v17.4s, v0.4s\n"
- "fmax v18.4s, v18.4s, v0.4s\n"
- "fmax v19.4s, v19.4s, v0.4s\n"
- "fmax v20.4s, v20.4s, v0.4s\n"
- "fmax v21.4s, v21.4s, v0.4s\n"
- "fmax v22.4s, v22.4s, v0.4s\n"
- "fmax v23.4s, v23.4s, v0.4s\n"
- "fmax v24.4s, v24.4s, v0.4s\n"
- "fmax v25.4s, v25.4s, v0.4s\n"
- "fmax v26.4s, v26.4s, v0.4s\n"
- "fmax v27.4s, v27.4s, v0.4s\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v1.4s }, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v0.4s }, [x19]\n"
+ "fmin v8.4s, v8.4s, v0.4s\n"
+ "fmin v9.4s, v9.4s, v0.4s\n"
+ "fmin v10.4s, v10.4s, v0.4s\n"
+ "fmin v11.4s, v11.4s, v0.4s\n"
+ "fmax v8.4s, v8.4s, v1.4s\n"
+ "fmax v9.4s, v9.4s, v1.4s\n"
+ "fmax v10.4s, v10.4s, v1.4s\n"
+ "fmax v11.4s, v11.4s, v1.4s\n"
+ "fmin v12.4s, v12.4s, v0.4s\n"
+ "fmin v13.4s, v13.4s, v0.4s\n"
+ "fmin v14.4s, v14.4s, v0.4s\n"
+ "fmax v12.4s, v12.4s, v1.4s\n"
+ "fmax v13.4s, v13.4s, v1.4s\n"
+ "fmax v14.4s, v14.4s, v1.4s\n"
+ "fmin v15.4s, v15.4s, v0.4s\n"
+ "fmin v16.4s, v16.4s, v0.4s\n"
+ "fmin v17.4s, v17.4s, v0.4s\n"
+ "fmax v15.4s, v15.4s, v1.4s\n"
+ "fmax v16.4s, v16.4s, v1.4s\n"
+ "fmax v17.4s, v17.4s, v1.4s\n"
+ "fmin v18.4s, v18.4s, v0.4s\n"
+ "fmin v19.4s, v19.4s, v0.4s\n"
+ "fmin v20.4s, v20.4s, v0.4s\n"
+ "fmax v18.4s, v18.4s, v1.4s\n"
+ "fmax v19.4s, v19.4s, v1.4s\n"
+ "fmax v20.4s, v20.4s, v1.4s\n"
+ "fmin v21.4s, v21.4s, v0.4s\n"
+ "fmin v22.4s, v22.4s, v0.4s\n"
+ "fmin v23.4s, v23.4s, v0.4s\n"
+ "fmax v21.4s, v21.4s, v1.4s\n"
+ "fmax v22.4s, v22.4s, v1.4s\n"
+ "fmax v23.4s, v23.4s, v1.4s\n"
+ "fmin v24.4s, v24.4s, v0.4s\n"
+ "fmin v25.4s, v25.4s, v0.4s\n"
+ "fmin v26.4s, v26.4s, v0.4s\n"
+ "fmax v24.4s, v24.4s, v1.4s\n"
+ "fmax v25.4s, v25.4s, v1.4s\n"
+ "fmax v26.4s, v26.4s, v1.4s\n"
+ "fmin v27.4s, v27.4s, v0.4s\n"
+ "fmax v27.4s, v27.4s, v1.4s\n"
"165:" // Height 5: No activation
"cmp x11, #0x10\n"
"bge 174f\n"
"tbz x11, #3, 169f\n"
- "st1 { v8.4s }, [x9], #0x10\n"
- "st1 { v9.4s }, [x9], #0x10\n"
- "st1 { v12.4s }, [x25], #0x10\n"
- "st1 { v13.4s }, [x25], #0x10\n"
- "st1 { v16.4s }, [x24], #0x10\n"
- "st1 { v17.4s }, [x24], #0x10\n"
- "st1 { v20.4s }, [x23], #0x10\n"
- "st1 { v21.4s }, [x23], #0x10\n"
- "st1 { v24.4s }, [x22], #0x10\n"
- "st1 { v25.4s }, [x22], #0x10\n"
+ "st1 { v8.4s }, [x28], #0x10\n"
+ "st1 { v9.4s }, [x28], #0x10\n"
+ "st1 { v12.4s }, [x24], #0x10\n"
+ "st1 { v13.4s }, [x24], #0x10\n"
+ "st1 { v16.4s }, [x23], #0x10\n"
+ "st1 { v17.4s }, [x23], #0x10\n"
+ "st1 { v20.4s }, [x22], #0x10\n"
+ "st1 { v21.4s }, [x22], #0x10\n"
+ "st1 { v24.4s }, [x21], #0x10\n"
+ "st1 { v25.4s }, [x21], #0x10\n"
"tbz x11, #2, 167f\n"
- "st1 { v10.4s }, [x9], #0x10\n"
- "st1 { v14.4s }, [x25], #0x10\n"
- "st1 { v18.4s }, [x24], #0x10\n"
- "st1 { v22.4s }, [x23], #0x10\n"
- "st1 { v26.4s }, [x22], #0x10\n"
+ "st1 { v10.4s }, [x28], #0x10\n"
+ "st1 { v14.4s }, [x24], #0x10\n"
+ "st1 { v18.4s }, [x23], #0x10\n"
+ "st1 { v22.4s }, [x22], #0x10\n"
+ "st1 { v26.4s }, [x21], #0x10\n"
"tbz x11, #1, 166f\n"
- "str d11, [x9], #0x8\n"
- "str d15, [x25], #0x8\n"
- "str d19, [x24], #0x8\n"
- "str d23, [x23], #0x8\n"
- "str d27, [x22], #0x8\n"
+ "str d11, [x28], #0x8\n"
+ "str d15, [x24], #0x8\n"
+ "str d19, [x23], #0x8\n"
+ "str d23, [x22], #0x8\n"
+ "str d27, [x21], #0x8\n"
"tbz x11, #0, 173f\n"
- "st1 { v11.s }[2], [x9]\n"
- "st1 { v15.s }[2], [x25]\n"
- "st1 { v19.s }[2], [x24]\n"
- "st1 { v23.s }[2], [x23]\n"
- "st1 { v27.s }[2], [x22]\n"
+ "st1 { v11.s }[2], [x28]\n"
+ "st1 { v15.s }[2], [x24]\n"
+ "st1 { v19.s }[2], [x23]\n"
+ "st1 { v23.s }[2], [x22]\n"
+ "st1 { v27.s }[2], [x21]\n"
"b 173f\n"
"166:" // Height 5: Partial direct writeback: partial_1_12
"tbz x11, #0, 173f\n"
- "str s11, [x9, #0x0]\n"
- "str s15, [x25, #0x0]\n"
- "str s19, [x24, #0x0]\n"
- "str s23, [x23, #0x0]\n"
- "str s27, [x22, #0x0]\n"
+ "str s11, [x28, #0x0]\n"
+ "str s15, [x24, #0x0]\n"
+ "str s19, [x23, #0x0]\n"
+ "str s23, [x22, #0x0]\n"
+ "str s27, [x21, #0x0]\n"
"b 173f\n"
"167:" // Height 5: Partial direct writeback: partial_2_8
"tbz x11, #1, 168f\n"
- "str d10, [x9], #0x8\n"
- "str d14, [x25], #0x8\n"
- "str d18, [x24], #0x8\n"
- "str d22, [x23], #0x8\n"
- "str d26, [x22], #0x8\n"
+ "str d10, [x28], #0x8\n"
+ "str d14, [x24], #0x8\n"
+ "str d18, [x23], #0x8\n"
+ "str d22, [x22], #0x8\n"
+ "str d26, [x21], #0x8\n"
"tbz x11, #0, 173f\n"
- "st1 { v10.s }[2], [x9]\n"
- "st1 { v14.s }[2], [x25]\n"
- "st1 { v18.s }[2], [x24]\n"
- "st1 { v22.s }[2], [x23]\n"
- "st1 { v26.s }[2], [x22]\n"
+ "st1 { v10.s }[2], [x28]\n"
+ "st1 { v14.s }[2], [x24]\n"
+ "st1 { v18.s }[2], [x23]\n"
+ "st1 { v22.s }[2], [x22]\n"
+ "st1 { v26.s }[2], [x21]\n"
"b 173f\n"
"168:" // Height 5: Partial direct writeback: partial_1_8
"tbz x11, #0, 173f\n"
- "str s10, [x9, #0x0]\n"
- "str s14, [x25, #0x0]\n"
- "str s18, [x24, #0x0]\n"
- "str s22, [x23, #0x0]\n"
- "str s26, [x22, #0x0]\n"
+ "str s10, [x28, #0x0]\n"
+ "str s14, [x24, #0x0]\n"
+ "str s18, [x23, #0x0]\n"
+ "str s22, [x22, #0x0]\n"
+ "str s26, [x21, #0x0]\n"
"b 173f\n"
"169:" // Height 5: Partial direct writeback: partial_4_0
"tbz x11, #2, 171f\n"
- "st1 { v8.4s }, [x9], #0x10\n"
- "st1 { v12.4s }, [x25], #0x10\n"
- "st1 { v16.4s }, [x24], #0x10\n"
- "st1 { v20.4s }, [x23], #0x10\n"
- "st1 { v24.4s }, [x22], #0x10\n"
+ "st1 { v8.4s }, [x28], #0x10\n"
+ "st1 { v12.4s }, [x24], #0x10\n"
+ "st1 { v16.4s }, [x23], #0x10\n"
+ "st1 { v20.4s }, [x22], #0x10\n"
+ "st1 { v24.4s }, [x21], #0x10\n"
"tbz x11, #1, 170f\n"
- "str d9, [x9], #0x8\n"
- "str d13, [x25], #0x8\n"
- "str d17, [x24], #0x8\n"
- "str d21, [x23], #0x8\n"
- "str d25, [x22], #0x8\n"
+ "str d9, [x28], #0x8\n"
+ "str d13, [x24], #0x8\n"
+ "str d17, [x23], #0x8\n"
+ "str d21, [x22], #0x8\n"
+ "str d25, [x21], #0x8\n"
"tbz x11, #0, 173f\n"
- "st1 { v9.s }[2], [x9]\n"
- "st1 { v13.s }[2], [x25]\n"
- "st1 { v17.s }[2], [x24]\n"
- "st1 { v21.s }[2], [x23]\n"
- "st1 { v25.s }[2], [x22]\n"
+ "st1 { v9.s }[2], [x28]\n"
+ "st1 { v13.s }[2], [x24]\n"
+ "st1 { v17.s }[2], [x23]\n"
+ "st1 { v21.s }[2], [x22]\n"
+ "st1 { v25.s }[2], [x21]\n"
"b 173f\n"
"170:" // Height 5: Partial direct writeback: partial_1_4
"tbz x11, #0, 173f\n"
- "str s9, [x9, #0x0]\n"
- "str s13, [x25, #0x0]\n"
- "str s17, [x24, #0x0]\n"
- "str s21, [x23, #0x0]\n"
- "str s25, [x22, #0x0]\n"
+ "str s9, [x28, #0x0]\n"
+ "str s13, [x24, #0x0]\n"
+ "str s17, [x23, #0x0]\n"
+ "str s21, [x22, #0x0]\n"
+ "str s25, [x21, #0x0]\n"
"b 173f\n"
"171:" // Height 5: Partial direct writeback: partial_2_0
"tbz x11, #1, 172f\n"
- "str d8, [x9], #0x8\n"
- "str d12, [x25], #0x8\n"
- "str d16, [x24], #0x8\n"
- "str d20, [x23], #0x8\n"
- "str d24, [x22], #0x8\n"
+ "str d8, [x28], #0x8\n"
+ "str d12, [x24], #0x8\n"
+ "str d16, [x23], #0x8\n"
+ "str d20, [x22], #0x8\n"
+ "str d24, [x21], #0x8\n"
"tbz x11, #0, 173f\n"
- "st1 { v8.s }[2], [x9]\n"
- "st1 { v12.s }[2], [x25]\n"
- "st1 { v16.s }[2], [x24]\n"
- "st1 { v20.s }[2], [x23]\n"
- "st1 { v24.s }[2], [x22]\n"
+ "st1 { v8.s }[2], [x28]\n"
+ "st1 { v12.s }[2], [x24]\n"
+ "st1 { v16.s }[2], [x23]\n"
+ "st1 { v20.s }[2], [x22]\n"
+ "st1 { v24.s }[2], [x21]\n"
"b 173f\n"
"172:" // Height 5: Partial direct writeback: partial_1_0
- "str s8, [x9, #0x0]\n"
- "str s12, [x25, #0x0]\n"
- "str s16, [x24, #0x0]\n"
- "str s20, [x23, #0x0]\n"
- "str s24, [x22, #0x0]\n"
+ "str s8, [x28, #0x0]\n"
+ "str s12, [x24, #0x0]\n"
+ "str s16, [x23, #0x0]\n"
+ "str s20, [x22, #0x0]\n"
+ "str s24, [x21, #0x0]\n"
"173:" // Height 5: Partial direct writeback: Done
"b 175f\n"
"174:" // Height 5: Full writeback
- "str q8, [x9, #0x0]\n"
- "str q9, [x9, #0x10]\n"
- "str q10, [x9, #0x20]\n"
- "str q11, [x9, #0x30]\n"
- "add x9, x9, #0x40\n"
- "str q12, [x25, #0x0]\n"
- "str q13, [x25, #0x10]\n"
- "str q14, [x25, #0x20]\n"
- "str q15, [x25, #0x30]\n"
- "str q16, [x24, #0x0]\n"
- "str q17, [x24, #0x10]\n"
- "str q18, [x24, #0x20]\n"
- "str q19, [x24, #0x30]\n"
- "str q20, [x23, #0x0]\n"
- "str q21, [x23, #0x10]\n"
- "str q22, [x23, #0x20]\n"
- "str q23, [x23, #0x30]\n"
- "str q24, [x22, #0x0]\n"
- "str q25, [x22, #0x10]\n"
- "str q26, [x22, #0x20]\n"
- "str q27, [x22, #0x30]\n"
+ "str q8, [x28, #0x0]\n"
+ "str q9, [x28, #0x10]\n"
+ "str q10, [x28, #0x20]\n"
+ "str q11, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
+ "str q12, [x24, #0x0]\n"
+ "str q13, [x24, #0x10]\n"
+ "str q14, [x24, #0x20]\n"
+ "str q15, [x24, #0x30]\n"
+ "str q16, [x23, #0x0]\n"
+ "str q17, [x23, #0x10]\n"
+ "str q18, [x23, #0x20]\n"
+ "str q19, [x23, #0x30]\n"
+ "str q20, [x22, #0x0]\n"
+ "str q21, [x22, #0x10]\n"
+ "str q22, [x22, #0x20]\n"
+ "str q23, [x22, #0x30]\n"
+ "str q24, [x21, #0x0]\n"
+ "str q25, [x21, #0x10]\n"
+ "str q26, [x21, #0x20]\n"
+ "str q27, [x21, #0x30]\n"
"175:" // Height 5: Writeback done
"subs x11, x11, #0x10\n"
"bgt 142b\n"
"b 212f\n"
"176:" // Height 6
- "ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "mov x20, #0x18\n"
- "mov x12, %x[bias]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x9, %x[bias]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
+ "mov x28, %x[output_ptr]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "mov x19, #0x18\n"
+ "madd %x[output_ptr], x20, x19, %x[output_ptr]\n"
"177:" // Height 6: Column loop
- "cbz x12, 178f\n"
- "ldr q8, [x12, #0x0]\n"
- "ldr q9, [x12, #0x10]\n"
+ "cbz x9, 178f\n"
+ "ldr q8, [x9, #0x0]\n"
"mov v12.16b, v8.16b\n"
+ "ldr q9, [x9, #0x10]\n"
+ "mov v16.16b, v8.16b\n"
+ "ldr q10, [x9, #0x20]\n"
+ "mov v20.16b, v8.16b\n"
+ "ldr q11, [x9, #0x30]\n"
+ "add x9, x9, #0x40\n"
+ "mov v24.16b, v8.16b\n"
+ "mov v28.16b, v8.16b\n"
"mov v13.16b, v9.16b\n"
- "ldr q10, [x12, #0x20]\n"
- "ldr q11, [x12, #0x30]\n"
"mov v14.16b, v10.16b\n"
"mov v15.16b, v11.16b\n"
- "mov v16.16b, v8.16b\n"
"mov v17.16b, v9.16b\n"
- "add x12, x12, #0x40\n"
"mov v18.16b, v10.16b\n"
"mov v19.16b, v11.16b\n"
- "mov v20.16b, v8.16b\n"
"mov v21.16b, v9.16b\n"
"mov v22.16b, v10.16b\n"
"mov v23.16b, v11.16b\n"
- "mov v24.16b, v8.16b\n"
"mov v25.16b, v9.16b\n"
"mov v26.16b, v10.16b\n"
"mov v27.16b, v11.16b\n"
- "mov v28.16b, v8.16b\n"
"mov v29.16b, v9.16b\n"
"mov v30.16b, v10.16b\n"
"mov v31.16b, v11.16b\n"
"b 189f\n"
"178:" // Height 6: no bias
"tbz %x[flags], #0, 188f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"cmp x11, #0x10\n"
- "add x21, x22, x20, LSL #2\n"
+ "add x24, x28, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
+ "add x20, x21, x19, LSL #2\n"
"bge 187f\n"
"tbz x11, #3, 182f\n"
- "ld1 { v8.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v16.4s }, [x24], #0x10\n"
- "ld1 { v20.4s }, [x23], #0x10\n"
- "ld1 { v24.4s }, [x22], #0x10\n"
- "ld1 { v28.4s }, [x21], #0x10\n"
- "ld1 { v9.4s }, [x9], #0x10\n"
- "ld1 { v13.4s }, [x25], #0x10\n"
- "ld1 { v17.4s }, [x24], #0x10\n"
- "ld1 { v21.4s }, [x23], #0x10\n"
- "ld1 { v25.4s }, [x22], #0x10\n"
- "ld1 { v29.4s }, [x21], #0x10\n"
+ "ld1 { v8.4s }, [x28], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
+ "ld1 { v16.4s }, [x23], #0x10\n"
+ "ld1 { v20.4s }, [x22], #0x10\n"
+ "ld1 { v24.4s }, [x21], #0x10\n"
+ "ld1 { v9.4s }, [x28], #0x10\n"
+ "ld1 { v13.4s }, [x24], #0x10\n"
+ "ld1 { v17.4s }, [x23], #0x10\n"
+ "ld1 { v21.4s }, [x22], #0x10\n"
+ "ld1 { v25.4s }, [x21], #0x10\n"
+ "ld1 { v28.4s }, [x20], #0x10\n"
+ "ld1 { v29.4s }, [x20], #0x10\n"
"tbz x11, #2, 180f\n"
- "ld1 { v10.4s }, [x9], #0x10\n"
- "ld1 { v14.4s }, [x25], #0x10\n"
- "ld1 { v18.4s }, [x24], #0x10\n"
- "ld1 { v22.4s }, [x23], #0x10\n"
- "ld1 { v26.4s }, [x22], #0x10\n"
- "ld1 { v30.4s }, [x21], #0x10\n"
+ "ld1 { v10.4s }, [x28], #0x10\n"
+ "ld1 { v14.4s }, [x24], #0x10\n"
+ "ld1 { v18.4s }, [x23], #0x10\n"
+ "ld1 { v22.4s }, [x22], #0x10\n"
+ "ld1 { v26.4s }, [x21], #0x10\n"
+ "ld1 { v30.4s }, [x20], #0x10\n"
"tbz x11, #1, 179f\n"
- "ldr d11, [x9], #0x8\n"
- "ldr d15, [x25], #0x8\n"
- "mov x20, #0x38\n"
- "ldr d19, [x24], #0x8\n"
- "ldr d23, [x23], #0x8\n"
- "ldr d27, [x22], #0x8\n"
- "ldr d31, [x21], #0x8\n"
+ "ldr d11, [x28], #0x8\n"
+ "mov x19, #0x38\n"
+ "ldr d15, [x24], #0x8\n"
+ "ldr d19, [x23], #0x8\n"
+ "ldr d23, [x22], #0x8\n"
+ "ldr d27, [x21], #0x8\n"
+ "ldr d31, [x20], #0x8\n"
"tbz x11, #0, 186f\n"
- "ld1 { v11.s }[2], [x9]\n"
- "ld1 { v15.s }[2], [x25]\n"
- "ld1 { v19.s }[2], [x24]\n"
- "ld1 { v23.s }[2], [x23]\n"
- "ld1 { v27.s }[2], [x22]\n"
- "ld1 { v31.s }[2], [x21]\n"
+ "ld1 { v11.s }[2], [x28]\n"
+ "ld1 { v15.s }[2], [x24]\n"
+ "ld1 { v19.s }[2], [x23]\n"
+ "ld1 { v23.s }[2], [x22]\n"
+ "ld1 { v27.s }[2], [x21]\n"
+ "ld1 { v31.s }[2], [x20]\n"
"b 186f\n"
"179:" // Height 6: Partial accumulate: partial_1_12
- "mov x20, #0x30\n"
+ "mov x19, #0x30\n"
"tbz x11, #0, 186f\n"
- "ldr s11, [x9, #0x0]\n"
- "ldr s15, [x25, #0x0]\n"
- "ldr s19, [x24, #0x0]\n"
- "ldr s23, [x23, #0x0]\n"
- "ldr s27, [x22, #0x0]\n"
- "ldr s31, [x21, #0x0]\n"
+ "ldr s11, [x28, #0x0]\n"
+ "ldr s15, [x24, #0x0]\n"
+ "ldr s19, [x23, #0x0]\n"
+ "ldr s23, [x22, #0x0]\n"
+ "ldr s27, [x21, #0x0]\n"
+ "ldr s31, [x20, #0x0]\n"
"b 186f\n"
"180:" // Height 6: Partial accumulate: partial_2_8
"tbz x11, #1, 181f\n"
- "ldr d10, [x9], #0x8\n"
- "ldr d14, [x25], #0x8\n"
- "mov x20, #0x28\n"
- "ldr d18, [x24], #0x8\n"
- "ldr d22, [x23], #0x8\n"
- "ldr d26, [x22], #0x8\n"
- "ldr d30, [x21], #0x8\n"
+ "ldr d10, [x28], #0x8\n"
+ "ldr d14, [x24], #0x8\n"
+ "mov x19, #0x28\n"
+ "ldr d18, [x23], #0x8\n"
+ "ldr d22, [x22], #0x8\n"
+ "ldr d26, [x21], #0x8\n"
+ "ldr d30, [x20], #0x8\n"
"tbz x11, #0, 186f\n"
- "ld1 { v10.s }[2], [x9]\n"
- "ld1 { v14.s }[2], [x25]\n"
- "ld1 { v18.s }[2], [x24]\n"
- "ld1 { v22.s }[2], [x23]\n"
- "ld1 { v26.s }[2], [x22]\n"
- "ld1 { v30.s }[2], [x21]\n"
+ "ld1 { v10.s }[2], [x28]\n"
+ "ld1 { v14.s }[2], [x24]\n"
+ "ld1 { v18.s }[2], [x23]\n"
+ "ld1 { v22.s }[2], [x22]\n"
+ "ld1 { v26.s }[2], [x21]\n"
+ "ld1 { v30.s }[2], [x20]\n"
"b 186f\n"
"181:" // Height 6: Partial accumulate: partial_1_8
- "mov x20, #0x20\n"
+ "mov x19, #0x20\n"
"tbz x11, #0, 186f\n"
- "ldr s10, [x9, #0x0]\n"
- "ldr s14, [x25, #0x0]\n"
- "ldr s18, [x24, #0x0]\n"
- "ldr s22, [x23, #0x0]\n"
- "ldr s26, [x22, #0x0]\n"
- "ldr s30, [x21, #0x0]\n"
+ "ldr s10, [x28, #0x0]\n"
+ "ldr s14, [x24, #0x0]\n"
+ "ldr s18, [x23, #0x0]\n"
+ "ldr s22, [x22, #0x0]\n"
+ "ldr s26, [x21, #0x0]\n"
+ "ldr s30, [x20, #0x0]\n"
"b 186f\n"
"182:" // Height 6: Partial accumulate: partial_4_0
"tbz x11, #2, 184f\n"
- "ld1 { v8.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v16.4s }, [x24], #0x10\n"
- "ld1 { v20.4s }, [x23], #0x10\n"
- "ld1 { v24.4s }, [x22], #0x10\n"
- "ld1 { v28.4s }, [x21], #0x10\n"
+ "ld1 { v8.4s }, [x28], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
+ "ld1 { v16.4s }, [x23], #0x10\n"
+ "ld1 { v20.4s }, [x22], #0x10\n"
+ "ld1 { v24.4s }, [x21], #0x10\n"
+ "ld1 { v28.4s }, [x20], #0x10\n"
"tbz x11, #1, 183f\n"
- "ldr d9, [x9], #0x8\n"
- "ldr d13, [x25], #0x8\n"
- "mov x20, #0x18\n"
- "ldr d17, [x24], #0x8\n"
- "ldr d21, [x23], #0x8\n"
- "ldr d25, [x22], #0x8\n"
- "ldr d29, [x21], #0x8\n"
+ "ldr d9, [x28], #0x8\n"
+ "mov x19, #0x18\n"
+ "ldr d13, [x24], #0x8\n"
+ "ldr d17, [x23], #0x8\n"
+ "ldr d21, [x22], #0x8\n"
+ "ldr d25, [x21], #0x8\n"
+ "ldr d29, [x20], #0x8\n"
"tbz x11, #0, 186f\n"
- "ld1 { v9.s }[2], [x9]\n"
- "ld1 { v13.s }[2], [x25]\n"
- "ld1 { v17.s }[2], [x24]\n"
- "ld1 { v21.s }[2], [x23]\n"
- "ld1 { v25.s }[2], [x22]\n"
- "ld1 { v29.s }[2], [x21]\n"
+ "ld1 { v9.s }[2], [x28]\n"
+ "ld1 { v13.s }[2], [x24]\n"
+ "ld1 { v17.s }[2], [x23]\n"
+ "ld1 { v21.s }[2], [x22]\n"
+ "ld1 { v25.s }[2], [x21]\n"
+ "ld1 { v29.s }[2], [x20]\n"
"b 186f\n"
"183:" // Height 6: Partial accumulate: partial_1_4
- "mov x20, #0x10\n"
+ "mov x19, #0x10\n"
"tbz x11, #0, 186f\n"
- "ldr s9, [x9, #0x0]\n"
- "ldr s13, [x25, #0x0]\n"
- "ldr s17, [x24, #0x0]\n"
- "ldr s21, [x23, #0x0]\n"
- "ldr s25, [x22, #0x0]\n"
- "ldr s29, [x21, #0x0]\n"
+ "ldr s9, [x28, #0x0]\n"
+ "ldr s13, [x24, #0x0]\n"
+ "ldr s17, [x23, #0x0]\n"
+ "ldr s21, [x22, #0x0]\n"
+ "ldr s25, [x21, #0x0]\n"
+ "ldr s29, [x20, #0x0]\n"
"b 186f\n"
"184:" // Height 6: Partial accumulate: partial_2_0
"tbz x11, #1, 185f\n"
- "ldr d8, [x9], #0x8\n"
- "ldr d12, [x25], #0x8\n"
- "mov x20, #0x8\n"
- "ldr d16, [x24], #0x8\n"
- "ldr d20, [x23], #0x8\n"
- "ldr d24, [x22], #0x8\n"
- "ldr d28, [x21], #0x8\n"
+ "ldr d8, [x28], #0x8\n"
+ "ldr d12, [x24], #0x8\n"
+ "mov x19, #0x8\n"
+ "ldr d16, [x23], #0x8\n"
+ "ldr d20, [x22], #0x8\n"
+ "ldr d24, [x21], #0x8\n"
+ "ldr d28, [x20], #0x8\n"
"tbz x11, #0, 186f\n"
- "ld1 { v8.s }[2], [x9]\n"
- "ld1 { v12.s }[2], [x25]\n"
- "ld1 { v16.s }[2], [x24]\n"
- "ld1 { v20.s }[2], [x23]\n"
- "ld1 { v24.s }[2], [x22]\n"
- "ld1 { v28.s }[2], [x21]\n"
+ "ld1 { v8.s }[2], [x28]\n"
+ "ld1 { v12.s }[2], [x24]\n"
+ "ld1 { v16.s }[2], [x23]\n"
+ "ld1 { v20.s }[2], [x22]\n"
+ "ld1 { v24.s }[2], [x21]\n"
+ "ld1 { v28.s }[2], [x20]\n"
"b 186f\n"
"185:" // Height 6: Partial accumulate: partial_1_0
- "ldr s8, [x9, #0x0]\n"
- "ldr s12, [x25, #0x0]\n"
- "mov x20, #0x0\n"
- "ldr s16, [x24, #0x0]\n"
- "ldr s20, [x23, #0x0]\n"
- "ldr s24, [x22, #0x0]\n"
- "ldr s28, [x21, #0x0]\n"
+ "ldr s8, [x28, #0x0]\n"
+ "mov x19, #0x0\n"
+ "ldr s12, [x24, #0x0]\n"
+ "ldr s16, [x23, #0x0]\n"
+ "ldr s20, [x22, #0x0]\n"
+ "ldr s24, [x21, #0x0]\n"
+ "ldr s28, [x20, #0x0]\n"
"186:" // Height 6: Partial accumulate: Done
- "sub x9, x9, x20\n"
+ "sub x28, x28, x19\n"
"b 189f\n"
"187:" // Height 6: full accumulate
- "ldr q8, [x9, #0x0]\n"
- "ldr q9, [x9, #0x10]\n"
- "ldr q10, [x9, #0x20]\n"
- "ldr q11, [x9, #0x30]\n"
- "ldr q12, [x25, #0x0]\n"
- "ldr q13, [x25, #0x10]\n"
- "ldr q14, [x25, #0x20]\n"
- "ldr q15, [x25, #0x30]\n"
- "ldr q16, [x24, #0x0]\n"
- "ldr q17, [x24, #0x10]\n"
- "ldr q18, [x24, #0x20]\n"
- "ldr q19, [x24, #0x30]\n"
- "ldr q20, [x23, #0x0]\n"
- "ldr q21, [x23, #0x10]\n"
- "ldr q22, [x23, #0x20]\n"
- "ldr q23, [x23, #0x30]\n"
- "ldr q24, [x22, #0x0]\n"
- "ldr q25, [x22, #0x10]\n"
- "ldr q26, [x22, #0x20]\n"
- "ldr q27, [x22, #0x30]\n"
- "ldr q28, [x21, #0x0]\n"
- "ldr q29, [x21, #0x10]\n"
- "ldr q30, [x21, #0x20]\n"
- "ldr q31, [x21, #0x30]\n"
+ "ldr q8, [x28, #0x0]\n"
+ "ldr q9, [x28, #0x10]\n"
+ "ldr q10, [x28, #0x20]\n"
+ "ldr q11, [x28, #0x30]\n"
+ "ldr q12, [x24, #0x0]\n"
+ "ldr q13, [x24, #0x10]\n"
+ "ldr q14, [x24, #0x20]\n"
+ "ldr q15, [x24, #0x30]\n"
+ "ldr q16, [x23, #0x0]\n"
+ "ldr q17, [x23, #0x10]\n"
+ "ldr q18, [x23, #0x20]\n"
+ "ldr q19, [x23, #0x30]\n"
+ "ldr q20, [x22, #0x0]\n"
+ "ldr q21, [x22, #0x10]\n"
+ "ldr q22, [x22, #0x20]\n"
+ "ldr q23, [x22, #0x30]\n"
+ "ldr q24, [x21, #0x0]\n"
+ "ldr q25, [x21, #0x10]\n"
+ "ldr q26, [x21, #0x20]\n"
+ "ldr q27, [x21, #0x30]\n"
+ "ldr q28, [x20, #0x0]\n"
+ "ldr q29, [x20, #0x10]\n"
+ "ldr q30, [x20, #0x20]\n"
+ "ldr q31, [x20, #0x30]\n"
"b 189f\n"
"188:" // Height 6: no accumulate
"movi v8.16b, #0x0\n"
@@ -2898,82 +2898,82 @@ void a64_hybrid_bf16fp32_dot_6x16 (
"movi v30.16b, #0x0\n"
"movi v31.16b, #0x0\n"
"189:" // Height 6: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"190:" // Height 6: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 191f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "ldr x22, [x21, #0x20]\n"
- "ldr x21, [x21, #0x28]\n"
- "cbnz x28, 192f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #1\n"
- "add x25, x25, x20, LSL #1\n"
- "add x24, x24, x20, LSL #1\n"
- "add x23, x23, x20, LSL #1\n"
- "add x22, x22, x20, LSL #1\n"
- "add x21, x21, x20, LSL #1\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "ldr x21, [x20, #0x20]\n"
+ "ldr x20, [x20, #0x28]\n"
+ "cbnz x27, 192f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
+ "add x24, x24, x19, LSL #1\n"
+ "add x23, x23, x19, LSL #1\n"
+ "add x22, x22, x19, LSL #1\n"
+ "add x21, x21, x19, LSL #1\n"
+ "add x20, x20, x19, LSL #1\n"
"b 192f\n"
"191:" // Height 6: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
- "add x23, x24, x20, LSL #1\n"
- "add x22, x23, x20, LSL #1\n"
- "add x21, x22, x20, LSL #1\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "add x22, x23, x19, LSL #1\n"
+ "add x21, x22, x19, LSL #1\n"
+ "add x20, x21, x19, LSL #1\n"
"192:" // Height 6: input setup done
- "cmp x27, #0x8\n"
+ "cmp x26, #0x8\n"
"blt 195f\n"
- "ldr q0, [x26, #0x0]\n"
- "ldr q1, [x25, #0x0]\n"
- "cmp x27, #0x10\n"
- "ldr q2, [x24, #0x0]\n"
- "ldr q3, [x23, #0x0]\n"
- "ldr q4, [x22, #0x0]\n"
- "ldr q5, [x21, #0x0]\n"
+ "ldr q0, [x25, #0x0]\n"
+ "ldr q1, [x24, #0x0]\n"
+ "cmp x26, #0x10\n"
+ "ldr q2, [x23, #0x0]\n"
+ "ldr q3, [x22, #0x0]\n"
+ "ldr q4, [x21, #0x0]\n"
+ "ldr q5, [x20, #0x0]\n"
"ldr q6, [x10, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
"blt 194f\n"
"193:" // Height 6: Multiply loop: Main loop head
".inst 0x4f40f0c8 // bfdot v8.4s, v6.8h, v0.h[0]\n"
+ "ldr q7, [x10, #0x10]\n"
+ "add x25, x25, #0x10\n"
".inst 0x4f41f0cc // bfdot v12.4s, v6.8h, v1.h[0]\n"
- "sub x27, x27, #0x8\n"
- "add x26, x26, #0x10\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "add x24, x24, #0x10\n"
".inst 0x4f42f0d0 // bfdot v16.4s, v6.8h, v2.h[0]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "add x23, x23, #0x10\n"
".inst 0x4f43f0d4 // bfdot v20.4s, v6.8h, v3.h[0]\n"
- "add x25, x25, #0x10\n"
- "add x24, x24, #0x10\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
+ "add x22, x22, #0x10\n"
".inst 0x4f44f0d8 // bfdot v24.4s, v6.8h, v4.h[0]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
+ "add x21, x21, #0x10\n"
".inst 0x4f45f0dc // bfdot v28.4s, v6.8h, v5.h[0]\n"
"ldr q6, [x10, #0x20]\n"
- "add x23, x23, #0x10\n"
+ "add x20, x20, #0x10\n"
".inst 0x4f40f0e9 // bfdot v9.4s, v7.8h, v0.h[0]\n"
+ "prfm pldl1keep, [x21, #0x80]\n"
+ "sub x26, x26, #0x8\n"
".inst 0x4f41f0ed // bfdot v13.4s, v7.8h, v1.h[0]\n"
- "add x22, x22, #0x10\n"
- "add x21, x21, #0x10\n"
+ "prfm pldl1keep, [x20, #0x80]\n"
+ "cmp x26, #0x10\n"
".inst 0x4f42f0f1 // bfdot v17.4s, v7.8h, v2.h[0]\n"
".inst 0x4f43f0f5 // bfdot v21.4s, v7.8h, v3.h[0]\n"
- "cmp x27, #0x10\n"
- "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x4f44f0f9 // bfdot v25.4s, v7.8h, v4.h[0]\n"
".inst 0x4f45f0fd // bfdot v29.4s, v7.8h, v5.h[0]\n"
"ldr q7, [x10, #0x30]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x4f40f0ca // bfdot v10.4s, v6.8h, v0.h[0]\n"
".inst 0x4f41f0ce // bfdot v14.4s, v6.8h, v1.h[0]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x4f42f0d2 // bfdot v18.4s, v6.8h, v2.h[0]\n"
".inst 0x4f43f0d6 // bfdot v22.4s, v6.8h, v3.h[0]\n"
- "prfm pldl1keep, [x22, #0x80]\n"
- "prfm pldl1keep, [x21, #0x80]\n"
".inst 0x4f44f0da // bfdot v26.4s, v6.8h, v4.h[0]\n"
".inst 0x4f45f0de // bfdot v30.4s, v6.8h, v5.h[0]\n"
"ldr q6, [x10, #0x40]\n"
@@ -3063,51 +3063,51 @@ void a64_hybrid_bf16fp32_dot_6x16 (
".inst 0x4f65f8de // bfdot v30.4s, v6.8h, v5.h[3]\n"
"ldr q6, [x10, #0x0]\n"
".inst 0x4f60f8eb // bfdot v11.4s, v7.8h, v0.h[3]\n"
- "ldr q0, [x26, #0x0]\n"
+ "ldr q0, [x25, #0x0]\n"
".inst 0x4f61f8ef // bfdot v15.4s, v7.8h, v1.h[3]\n"
- "ldr q1, [x25, #0x0]\n"
+ "ldr q1, [x24, #0x0]\n"
".inst 0x4f62f8f3 // bfdot v19.4s, v7.8h, v2.h[3]\n"
- "ldr q2, [x24, #0x0]\n"
+ "ldr q2, [x23, #0x0]\n"
".inst 0x4f63f8f7 // bfdot v23.4s, v7.8h, v3.h[3]\n"
- "ldr q3, [x23, #0x0]\n"
+ "ldr q3, [x22, #0x0]\n"
".inst 0x4f64f8fb // bfdot v27.4s, v7.8h, v4.h[3]\n"
- "ldr q4, [x22, #0x0]\n"
+ "ldr q4, [x21, #0x0]\n"
".inst 0x4f65f8ff // bfdot v31.4s, v7.8h, v5.h[3]\n"
- "ldr q5, [x21, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
+ "ldr q5, [x20, #0x0]\n"
"bge 193b\n"
"194:" // Height 6: Multiply loop: Single iteration only
".inst 0x4f40f0c8 // bfdot v8.4s, v6.8h, v0.h[0]\n"
+ "ldr q7, [x10, #0x10]\n"
+ "sub x26, x26, #0x8\n"
".inst 0x4f41f0cc // bfdot v12.4s, v6.8h, v1.h[0]\n"
- "add x26, x26, #0x10\n"
"add x25, x25, #0x10\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x4f42f0d0 // bfdot v16.4s, v6.8h, v2.h[0]\n"
- ".inst 0x4f43f0d4 // bfdot v20.4s, v6.8h, v3.h[0]\n"
"add x24, x24, #0x10\n"
+ ".inst 0x4f43f0d4 // bfdot v20.4s, v6.8h, v3.h[0]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
"add x23, x23, #0x10\n"
".inst 0x4f44f0d8 // bfdot v24.4s, v6.8h, v4.h[0]\n"
- ".inst 0x4f45f0dc // bfdot v28.4s, v6.8h, v5.h[0]\n"
- "ldr q6, [x10, #0x20]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
"add x22, x22, #0x10\n"
+ ".inst 0x4f45f0dc // bfdot v28.4s, v6.8h, v5.h[0]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
+ "add x21, x21, #0x10\n"
".inst 0x4f40f0e9 // bfdot v9.4s, v7.8h, v0.h[0]\n"
+ "ldr q6, [x10, #0x20]\n"
+ "add x20, x20, #0x10\n"
".inst 0x4f41f0ed // bfdot v13.4s, v7.8h, v1.h[0]\n"
- "add x21, x21, #0x10\n"
- "sub x27, x27, #0x8\n"
+ "prfm pldl1keep, [x21, #0x80]\n"
".inst 0x4f42f0f1 // bfdot v17.4s, v7.8h, v2.h[0]\n"
+ "prfm pldl1keep, [x20, #0x80]\n"
".inst 0x4f43f0f5 // bfdot v21.4s, v7.8h, v3.h[0]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x4f44f0f9 // bfdot v25.4s, v7.8h, v4.h[0]\n"
".inst 0x4f45f0fd // bfdot v29.4s, v7.8h, v5.h[0]\n"
"ldr q7, [x10, #0x30]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x4f40f0ca // bfdot v10.4s, v6.8h, v0.h[0]\n"
".inst 0x4f41f0ce // bfdot v14.4s, v6.8h, v1.h[0]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
- "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x4f42f0d2 // bfdot v18.4s, v6.8h, v2.h[0]\n"
".inst 0x4f43f0d6 // bfdot v22.4s, v6.8h, v3.h[0]\n"
- "prfm pldl1keep, [x21, #0x80]\n"
".inst 0x4f44f0da // bfdot v26.4s, v6.8h, v4.h[0]\n"
".inst 0x4f45f0de // bfdot v30.4s, v6.8h, v5.h[0]\n"
"ldr q6, [x10, #0x40]\n"
@@ -3202,21 +3202,21 @@ void a64_hybrid_bf16fp32_dot_6x16 (
".inst 0x4f64f8fb // bfdot v27.4s, v7.8h, v4.h[3]\n"
".inst 0x4f65f8ff // bfdot v31.4s, v7.8h, v5.h[3]\n"
"195:" // Height 6: Multiply loop: Main loop skip
- "cbz x27, 199f\n"
- "cmp x27, #0x2\n"
+ "cbz x26, 199f\n"
+ "cmp x26, #0x2\n"
"blt 197f\n"
"196:" // Height 6: Multiply loop: Odd block loop
- "ldr s0, [x26], #0x4\n"
- "ldr s1, [x25], #0x4\n"
- "sub x27, x27, #0x2\n"
- "cmp x27, #0x2\n"
- "ldr s2, [x24], #0x4\n"
- "ldr s3, [x23], #0x4\n"
- "ldr s4, [x22], #0x4\n"
- "ldr s5, [x21], #0x4\n"
+ "ldr s0, [x25], #0x4\n"
+ "sub x26, x26, #0x2\n"
+ "ldr s1, [x24], #0x4\n"
+ "cmp x26, #0x2\n"
+ "ldr s2, [x23], #0x4\n"
+ "ldr s3, [x22], #0x4\n"
+ "ldr s4, [x21], #0x4\n"
+ "ldr s5, [x20], #0x4\n"
"ldr q6, [x10, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
".inst 0x4f40f0c8 // bfdot v8.4s, v6.8h, v0.h[0]\n"
+ "ldr q7, [x10, #0x10]\n"
".inst 0x4f41f0cc // bfdot v12.4s, v6.8h, v1.h[0]\n"
".inst 0x4f42f0d0 // bfdot v16.4s, v6.8h, v2.h[0]\n"
".inst 0x4f43f0d4 // bfdot v20.4s, v6.8h, v3.h[0]\n"
@@ -3244,18 +3244,18 @@ void a64_hybrid_bf16fp32_dot_6x16 (
".inst 0x4f44f0fb // bfdot v27.4s, v7.8h, v4.h[0]\n"
".inst 0x4f45f0ff // bfdot v31.4s, v7.8h, v5.h[0]\n"
"bge 196b\n"
+ "cbz x26, 199f\n"
"197:" // Height 6: Multiply loop: Skip odd blocks
- "cbz x27, 199f\n"
- "ldr h0, [x26, #0x0]\n"
- "ldr h1, [x25, #0x0]\n"
- "ldr h2, [x24, #0x0]\n"
- "ldr h3, [x23, #0x0]\n"
- "ldr h4, [x22, #0x0]\n"
- "ldr h5, [x21, #0x0]\n"
+ "ldr h0, [x25, #0x0]\n"
+ "ldr h1, [x24, #0x0]\n"
+ "ldr h2, [x23, #0x0]\n"
+ "ldr h3, [x22, #0x0]\n"
+ "ldr h4, [x21, #0x0]\n"
+ "ldr h5, [x20, #0x0]\n"
"198:" // Height 6: Multiply loop: Ragged operand read: Done
"ldr q6, [x10, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
".inst 0x4f40f0c8 // bfdot v8.4s, v6.8h, v0.h[0]\n"
+ "ldr q7, [x10, #0x10]\n"
".inst 0x4f41f0cc // bfdot v12.4s, v6.8h, v1.h[0]\n"
".inst 0x4f42f0d0 // bfdot v16.4s, v6.8h, v2.h[0]\n"
".inst 0x4f43f0d4 // bfdot v20.4s, v6.8h, v3.h[0]\n"
@@ -3283,249 +3283,249 @@ void a64_hybrid_bf16fp32_dot_6x16 (
".inst 0x4f44f0fb // bfdot v27.4s, v7.8h, v4.h[0]\n"
".inst 0x4f45f0ff // bfdot v31.4s, v7.8h, v5.h[0]\n"
"199:" // Height 6: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 190b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "prfm pstl1keep, [x9, #0x0]\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
- "prfm pstl1keep, [x25, #0x0]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "prfm pstl1keep, [x28, #0x0]\n"
+ "add x24, x28, x19, LSL #2\n"
"prfm pstl1keep, [x24, #0x0]\n"
- "add x21, x22, x20, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
"prfm pstl1keep, [x23, #0x0]\n"
+ "add x22, x23, x19, LSL #2\n"
"prfm pstl1keep, [x22, #0x0]\n"
+ "add x21, x22, x19, LSL #2\n"
"prfm pstl1keep, [x21, #0x0]\n"
+ "add x20, x21, x19, LSL #2\n"
+ "prfm pstl1keep, [x20, #0x0]\n"
"tbz %x[flags], #1, 200f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v1.4s }, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1r { v0.4s }, [x20]\n"
- "fmin v8.4s, v8.4s, v1.4s\n"
- "fmin v9.4s, v9.4s, v1.4s\n"
- "fmin v10.4s, v10.4s, v1.4s\n"
- "fmin v11.4s, v11.4s, v1.4s\n"
- "fmin v12.4s, v12.4s, v1.4s\n"
- "fmin v13.4s, v13.4s, v1.4s\n"
- "fmin v14.4s, v14.4s, v1.4s\n"
- "fmin v15.4s, v15.4s, v1.4s\n"
- "fmin v16.4s, v16.4s, v1.4s\n"
- "fmin v17.4s, v17.4s, v1.4s\n"
- "fmin v18.4s, v18.4s, v1.4s\n"
- "fmin v19.4s, v19.4s, v1.4s\n"
- "fmin v20.4s, v20.4s, v1.4s\n"
- "fmin v21.4s, v21.4s, v1.4s\n"
- "fmin v22.4s, v22.4s, v1.4s\n"
- "fmin v23.4s, v23.4s, v1.4s\n"
- "fmin v24.4s, v24.4s, v1.4s\n"
- "fmin v25.4s, v25.4s, v1.4s\n"
- "fmin v26.4s, v26.4s, v1.4s\n"
- "fmin v27.4s, v27.4s, v1.4s\n"
- "fmin v28.4s, v28.4s, v1.4s\n"
- "fmin v29.4s, v29.4s, v1.4s\n"
- "fmin v30.4s, v30.4s, v1.4s\n"
- "fmin v31.4s, v31.4s, v1.4s\n"
- "fmax v8.4s, v8.4s, v0.4s\n"
- "fmax v9.4s, v9.4s, v0.4s\n"
- "fmax v10.4s, v10.4s, v0.4s\n"
- "fmax v11.4s, v11.4s, v0.4s\n"
- "fmax v12.4s, v12.4s, v0.4s\n"
- "fmax v13.4s, v13.4s, v0.4s\n"
- "fmax v14.4s, v14.4s, v0.4s\n"
- "fmax v15.4s, v15.4s, v0.4s\n"
- "fmax v16.4s, v16.4s, v0.4s\n"
- "fmax v17.4s, v17.4s, v0.4s\n"
- "fmax v18.4s, v18.4s, v0.4s\n"
- "fmax v19.4s, v19.4s, v0.4s\n"
- "fmax v20.4s, v20.4s, v0.4s\n"
- "fmax v21.4s, v21.4s, v0.4s\n"
- "fmax v22.4s, v22.4s, v0.4s\n"
- "fmax v23.4s, v23.4s, v0.4s\n"
- "fmax v24.4s, v24.4s, v0.4s\n"
- "fmax v25.4s, v25.4s, v0.4s\n"
- "fmax v26.4s, v26.4s, v0.4s\n"
- "fmax v27.4s, v27.4s, v0.4s\n"
- "fmax v28.4s, v28.4s, v0.4s\n"
- "fmax v29.4s, v29.4s, v0.4s\n"
- "fmax v30.4s, v30.4s, v0.4s\n"
- "fmax v31.4s, v31.4s, v0.4s\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v1.4s }, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v0.4s }, [x19]\n"
+ "fmin v8.4s, v8.4s, v0.4s\n"
+ "fmin v9.4s, v9.4s, v0.4s\n"
+ "fmin v10.4s, v10.4s, v0.4s\n"
+ "fmin v11.4s, v11.4s, v0.4s\n"
+ "fmax v8.4s, v8.4s, v1.4s\n"
+ "fmax v9.4s, v9.4s, v1.4s\n"
+ "fmax v10.4s, v10.4s, v1.4s\n"
+ "fmax v11.4s, v11.4s, v1.4s\n"
+ "fmin v12.4s, v12.4s, v0.4s\n"
+ "fmin v13.4s, v13.4s, v0.4s\n"
+ "fmin v14.4s, v14.4s, v0.4s\n"
+ "fmax v12.4s, v12.4s, v1.4s\n"
+ "fmax v13.4s, v13.4s, v1.4s\n"
+ "fmax v14.4s, v14.4s, v1.4s\n"
+ "fmin v15.4s, v15.4s, v0.4s\n"
+ "fmin v16.4s, v16.4s, v0.4s\n"
+ "fmin v17.4s, v17.4s, v0.4s\n"
+ "fmax v15.4s, v15.4s, v1.4s\n"
+ "fmax v16.4s, v16.4s, v1.4s\n"
+ "fmax v17.4s, v17.4s, v1.4s\n"
+ "fmin v18.4s, v18.4s, v0.4s\n"
+ "fmin v19.4s, v19.4s, v0.4s\n"
+ "fmin v20.4s, v20.4s, v0.4s\n"
+ "fmax v18.4s, v18.4s, v1.4s\n"
+ "fmax v19.4s, v19.4s, v1.4s\n"
+ "fmax v20.4s, v20.4s, v1.4s\n"
+ "fmin v21.4s, v21.4s, v0.4s\n"
+ "fmin v22.4s, v22.4s, v0.4s\n"
+ "fmin v23.4s, v23.4s, v0.4s\n"
+ "fmax v21.4s, v21.4s, v1.4s\n"
+ "fmax v22.4s, v22.4s, v1.4s\n"
+ "fmax v23.4s, v23.4s, v1.4s\n"
+ "fmin v24.4s, v24.4s, v0.4s\n"
+ "fmin v25.4s, v25.4s, v0.4s\n"
+ "fmin v26.4s, v26.4s, v0.4s\n"
+ "fmax v24.4s, v24.4s, v1.4s\n"
+ "fmax v25.4s, v25.4s, v1.4s\n"
+ "fmax v26.4s, v26.4s, v1.4s\n"
+ "fmin v27.4s, v27.4s, v0.4s\n"
+ "fmin v28.4s, v28.4s, v0.4s\n"
+ "fmin v29.4s, v29.4s, v0.4s\n"
+ "fmax v27.4s, v27.4s, v1.4s\n"
+ "fmax v28.4s, v28.4s, v1.4s\n"
+ "fmax v29.4s, v29.4s, v1.4s\n"
+ "fmin v30.4s, v30.4s, v0.4s\n"
+ "fmin v31.4s, v31.4s, v0.4s\n"
+ "fmax v30.4s, v30.4s, v1.4s\n"
+ "fmax v31.4s, v31.4s, v1.4s\n"
"200:" // Height 6: No activation
"cmp x11, #0x10\n"
"bge 209f\n"
"tbz x11, #3, 204f\n"
- "st1 { v8.4s }, [x9], #0x10\n"
- "st1 { v9.4s }, [x9], #0x10\n"
- "st1 { v12.4s }, [x25], #0x10\n"
- "st1 { v13.4s }, [x25], #0x10\n"
- "st1 { v16.4s }, [x24], #0x10\n"
- "st1 { v17.4s }, [x24], #0x10\n"
- "st1 { v20.4s }, [x23], #0x10\n"
- "st1 { v21.4s }, [x23], #0x10\n"
- "st1 { v24.4s }, [x22], #0x10\n"
- "st1 { v25.4s }, [x22], #0x10\n"
- "st1 { v28.4s }, [x21], #0x10\n"
- "st1 { v29.4s }, [x21], #0x10\n"
+ "st1 { v8.4s }, [x28], #0x10\n"
+ "st1 { v9.4s }, [x28], #0x10\n"
+ "st1 { v12.4s }, [x24], #0x10\n"
+ "st1 { v13.4s }, [x24], #0x10\n"
+ "st1 { v16.4s }, [x23], #0x10\n"
+ "st1 { v17.4s }, [x23], #0x10\n"
+ "st1 { v20.4s }, [x22], #0x10\n"
+ "st1 { v21.4s }, [x22], #0x10\n"
+ "st1 { v24.4s }, [x21], #0x10\n"
+ "st1 { v25.4s }, [x21], #0x10\n"
+ "st1 { v28.4s }, [x20], #0x10\n"
+ "st1 { v29.4s }, [x20], #0x10\n"
"tbz x11, #2, 202f\n"
- "st1 { v10.4s }, [x9], #0x10\n"
- "st1 { v14.4s }, [x25], #0x10\n"
- "st1 { v18.4s }, [x24], #0x10\n"
- "st1 { v22.4s }, [x23], #0x10\n"
- "st1 { v26.4s }, [x22], #0x10\n"
- "st1 { v30.4s }, [x21], #0x10\n"
+ "st1 { v10.4s }, [x28], #0x10\n"
+ "st1 { v14.4s }, [x24], #0x10\n"
+ "st1 { v18.4s }, [x23], #0x10\n"
+ "st1 { v22.4s }, [x22], #0x10\n"
+ "st1 { v26.4s }, [x21], #0x10\n"
+ "st1 { v30.4s }, [x20], #0x10\n"
"tbz x11, #1, 201f\n"
- "str d11, [x9], #0x8\n"
- "str d15, [x25], #0x8\n"
- "str d19, [x24], #0x8\n"
- "str d23, [x23], #0x8\n"
- "str d27, [x22], #0x8\n"
- "str d31, [x21], #0x8\n"
+ "str d11, [x28], #0x8\n"
+ "str d15, [x24], #0x8\n"
+ "str d19, [x23], #0x8\n"
+ "str d23, [x22], #0x8\n"
+ "str d27, [x21], #0x8\n"
+ "str d31, [x20], #0x8\n"
"tbz x11, #0, 208f\n"
- "st1 { v11.s }[2], [x9]\n"
- "st1 { v15.s }[2], [x25]\n"
- "st1 { v19.s }[2], [x24]\n"
- "st1 { v23.s }[2], [x23]\n"
- "st1 { v27.s }[2], [x22]\n"
- "st1 { v31.s }[2], [x21]\n"
+ "st1 { v11.s }[2], [x28]\n"
+ "st1 { v15.s }[2], [x24]\n"
+ "st1 { v19.s }[2], [x23]\n"
+ "st1 { v23.s }[2], [x22]\n"
+ "st1 { v27.s }[2], [x21]\n"
+ "st1 { v31.s }[2], [x20]\n"
"b 208f\n"
"201:" // Height 6: Partial direct writeback: partial_1_12
"tbz x11, #0, 208f\n"
- "str s11, [x9, #0x0]\n"
- "str s15, [x25, #0x0]\n"
- "str s19, [x24, #0x0]\n"
- "str s23, [x23, #0x0]\n"
- "str s27, [x22, #0x0]\n"
- "str s31, [x21, #0x0]\n"
+ "str s11, [x28, #0x0]\n"
+ "str s15, [x24, #0x0]\n"
+ "str s19, [x23, #0x0]\n"
+ "str s23, [x22, #0x0]\n"
+ "str s27, [x21, #0x0]\n"
+ "str s31, [x20, #0x0]\n"
"b 208f\n"
"202:" // Height 6: Partial direct writeback: partial_2_8
"tbz x11, #1, 203f\n"
- "str d10, [x9], #0x8\n"
- "str d14, [x25], #0x8\n"
- "str d18, [x24], #0x8\n"
- "str d22, [x23], #0x8\n"
- "str d26, [x22], #0x8\n"
- "str d30, [x21], #0x8\n"
+ "str d10, [x28], #0x8\n"
+ "str d14, [x24], #0x8\n"
+ "str d18, [x23], #0x8\n"
+ "str d22, [x22], #0x8\n"
+ "str d26, [x21], #0x8\n"
+ "str d30, [x20], #0x8\n"
"tbz x11, #0, 208f\n"
- "st1 { v10.s }[2], [x9]\n"
- "st1 { v14.s }[2], [x25]\n"
- "st1 { v18.s }[2], [x24]\n"
- "st1 { v22.s }[2], [x23]\n"
- "st1 { v26.s }[2], [x22]\n"
- "st1 { v30.s }[2], [x21]\n"
+ "st1 { v10.s }[2], [x28]\n"
+ "st1 { v14.s }[2], [x24]\n"
+ "st1 { v18.s }[2], [x23]\n"
+ "st1 { v22.s }[2], [x22]\n"
+ "st1 { v26.s }[2], [x21]\n"
+ "st1 { v30.s }[2], [x20]\n"
"b 208f\n"
"203:" // Height 6: Partial direct writeback: partial_1_8
"tbz x11, #0, 208f\n"
- "str s10, [x9, #0x0]\n"
- "str s14, [x25, #0x0]\n"
- "str s18, [x24, #0x0]\n"
- "str s22, [x23, #0x0]\n"
- "str s26, [x22, #0x0]\n"
- "str s30, [x21, #0x0]\n"
+ "str s10, [x28, #0x0]\n"
+ "str s14, [x24, #0x0]\n"
+ "str s18, [x23, #0x0]\n"
+ "str s22, [x22, #0x0]\n"
+ "str s26, [x21, #0x0]\n"
+ "str s30, [x20, #0x0]\n"
"b 208f\n"
"204:" // Height 6: Partial direct writeback: partial_4_0
"tbz x11, #2, 206f\n"
- "st1 { v8.4s }, [x9], #0x10\n"
- "st1 { v12.4s }, [x25], #0x10\n"
- "st1 { v16.4s }, [x24], #0x10\n"
- "st1 { v20.4s }, [x23], #0x10\n"
- "st1 { v24.4s }, [x22], #0x10\n"
- "st1 { v28.4s }, [x21], #0x10\n"
+ "st1 { v8.4s }, [x28], #0x10\n"
+ "st1 { v12.4s }, [x24], #0x10\n"
+ "st1 { v16.4s }, [x23], #0x10\n"
+ "st1 { v20.4s }, [x22], #0x10\n"
+ "st1 { v24.4s }, [x21], #0x10\n"
+ "st1 { v28.4s }, [x20], #0x10\n"
"tbz x11, #1, 205f\n"
- "str d9, [x9], #0x8\n"
- "str d13, [x25], #0x8\n"
- "str d17, [x24], #0x8\n"
- "str d21, [x23], #0x8\n"
- "str d25, [x22], #0x8\n"
- "str d29, [x21], #0x8\n"
+ "str d9, [x28], #0x8\n"
+ "str d13, [x24], #0x8\n"
+ "str d17, [x23], #0x8\n"
+ "str d21, [x22], #0x8\n"
+ "str d25, [x21], #0x8\n"
+ "str d29, [x20], #0x8\n"
"tbz x11, #0, 208f\n"
- "st1 { v9.s }[2], [x9]\n"
- "st1 { v13.s }[2], [x25]\n"
- "st1 { v17.s }[2], [x24]\n"
- "st1 { v21.s }[2], [x23]\n"
- "st1 { v25.s }[2], [x22]\n"
- "st1 { v29.s }[2], [x21]\n"
+ "st1 { v9.s }[2], [x28]\n"
+ "st1 { v13.s }[2], [x24]\n"
+ "st1 { v17.s }[2], [x23]\n"
+ "st1 { v21.s }[2], [x22]\n"
+ "st1 { v25.s }[2], [x21]\n"
+ "st1 { v29.s }[2], [x20]\n"
"b 208f\n"
"205:" // Height 6: Partial direct writeback: partial_1_4
"tbz x11, #0, 208f\n"
- "str s9, [x9, #0x0]\n"
- "str s13, [x25, #0x0]\n"
- "str s17, [x24, #0x0]\n"
- "str s21, [x23, #0x0]\n"
- "str s25, [x22, #0x0]\n"
- "str s29, [x21, #0x0]\n"
+ "str s9, [x28, #0x0]\n"
+ "str s13, [x24, #0x0]\n"
+ "str s17, [x23, #0x0]\n"
+ "str s21, [x22, #0x0]\n"
+ "str s25, [x21, #0x0]\n"
+ "str s29, [x20, #0x0]\n"
"b 208f\n"
"206:" // Height 6: Partial direct writeback: partial_2_0
"tbz x11, #1, 207f\n"
- "str d8, [x9], #0x8\n"
- "str d12, [x25], #0x8\n"
- "str d16, [x24], #0x8\n"
- "str d20, [x23], #0x8\n"
- "str d24, [x22], #0x8\n"
- "str d28, [x21], #0x8\n"
+ "str d8, [x28], #0x8\n"
+ "str d12, [x24], #0x8\n"
+ "str d16, [x23], #0x8\n"
+ "str d20, [x22], #0x8\n"
+ "str d24, [x21], #0x8\n"
+ "str d28, [x20], #0x8\n"
"tbz x11, #0, 208f\n"
- "st1 { v8.s }[2], [x9]\n"
- "st1 { v12.s }[2], [x25]\n"
- "st1 { v16.s }[2], [x24]\n"
- "st1 { v20.s }[2], [x23]\n"
- "st1 { v24.s }[2], [x22]\n"
- "st1 { v28.s }[2], [x21]\n"
+ "st1 { v8.s }[2], [x28]\n"
+ "st1 { v12.s }[2], [x24]\n"
+ "st1 { v16.s }[2], [x23]\n"
+ "st1 { v20.s }[2], [x22]\n"
+ "st1 { v24.s }[2], [x21]\n"
+ "st1 { v28.s }[2], [x20]\n"
"b 208f\n"
"207:" // Height 6: Partial direct writeback: partial_1_0
- "str s8, [x9, #0x0]\n"
- "str s12, [x25, #0x0]\n"
- "str s16, [x24, #0x0]\n"
- "str s20, [x23, #0x0]\n"
- "str s24, [x22, #0x0]\n"
- "str s28, [x21, #0x0]\n"
+ "str s8, [x28, #0x0]\n"
+ "str s12, [x24, #0x0]\n"
+ "str s16, [x23, #0x0]\n"
+ "str s20, [x22, #0x0]\n"
+ "str s24, [x21, #0x0]\n"
+ "str s28, [x20, #0x0]\n"
"208:" // Height 6: Partial direct writeback: Done
"b 210f\n"
"209:" // Height 6: Full writeback
- "str q8, [x9, #0x0]\n"
- "str q9, [x9, #0x10]\n"
- "str q10, [x9, #0x20]\n"
- "str q11, [x9, #0x30]\n"
- "add x9, x9, #0x40\n"
- "str q12, [x25, #0x0]\n"
- "str q13, [x25, #0x10]\n"
- "str q14, [x25, #0x20]\n"
- "str q15, [x25, #0x30]\n"
- "str q16, [x24, #0x0]\n"
- "str q17, [x24, #0x10]\n"
- "str q18, [x24, #0x20]\n"
- "str q19, [x24, #0x30]\n"
- "str q20, [x23, #0x0]\n"
- "str q21, [x23, #0x10]\n"
- "str q22, [x23, #0x20]\n"
- "str q23, [x23, #0x30]\n"
- "str q24, [x22, #0x0]\n"
- "str q25, [x22, #0x10]\n"
- "str q26, [x22, #0x20]\n"
- "str q27, [x22, #0x30]\n"
- "str q28, [x21, #0x0]\n"
- "str q29, [x21, #0x10]\n"
- "str q30, [x21, #0x20]\n"
- "str q31, [x21, #0x30]\n"
+ "str q8, [x28, #0x0]\n"
+ "str q9, [x28, #0x10]\n"
+ "str q10, [x28, #0x20]\n"
+ "str q11, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
+ "str q12, [x24, #0x0]\n"
+ "str q13, [x24, #0x10]\n"
+ "str q14, [x24, #0x20]\n"
+ "str q15, [x24, #0x30]\n"
+ "str q16, [x23, #0x0]\n"
+ "str q17, [x23, #0x10]\n"
+ "str q18, [x23, #0x20]\n"
+ "str q19, [x23, #0x30]\n"
+ "str q20, [x22, #0x0]\n"
+ "str q21, [x22, #0x10]\n"
+ "str q22, [x22, #0x20]\n"
+ "str q23, [x22, #0x30]\n"
+ "str q24, [x21, #0x0]\n"
+ "str q25, [x21, #0x10]\n"
+ "str q26, [x21, #0x20]\n"
+ "str q27, [x21, #0x30]\n"
+ "str q28, [x20, #0x0]\n"
+ "str q29, [x20, #0x10]\n"
+ "str q30, [x20, #0x20]\n"
+ "str q31, [x20, #0x30]\n"
"210:" // Height 6: Writeback done
"subs x11, x11, #0x10\n"
"bgt 177b\n"
"subs %x[M], %x[M], #0x6\n"
"beq 212f\n"
- "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 211f\n"
- "add x21, x21, #0x6\n"
- "str x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "add x20, x20, #0x6\n"
+ "str x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"b 1b\n"
"211:" // Update direct input
- "mov x20, #0xc\n"
- "madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
+ "mov x19, #0xc\n"
+ "madd %x[input_ptr], x19, x20, %x[input_ptr]\n"
"b 1b\n"
"212:" // Exit
: [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
: [args_ptr] "r" (&ka), [bias] "r" (bias), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_bf16fp32_mmla_6x16/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_bf16fp32_mmla_6x16/generic.cpp
index 5a000c69af..0fa358e848 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_bf16fp32_mmla_6x16/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_bf16fp32_mmla_6x16/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef __aarch64__
@@ -103,23 +103,23 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
"cmp %x[M], #0x2\n"
"bgt 75f\n"
"beq 38f\n"
- "mov x12, %x[bias]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "mov x9, %x[bias]\n"
+ "mov x28, %x[output_ptr]\n"
"2:" // Height 1: Column loop
- "cbz x12, 3f\n"
- "ldr q8, [x12, #0x0]\n"
- "ldr q9, [x12, #0x10]\n"
+ "cbz x9, 3f\n"
+ "ldr q8, [x9, #0x0]\n"
"zip2 v12.2d, v8.2d, v8.2d\n"
+ "ldr q9, [x9, #0x10]\n"
"zip1 v8.2d, v8.2d, v8.2d\n"
- "ldr q10, [x12, #0x20]\n"
- "ldr q11, [x12, #0x30]\n"
+ "ldr q10, [x9, #0x20]\n"
+ "ldr q11, [x9, #0x30]\n"
"zip2 v13.2d, v9.2d, v9.2d\n"
+ "add x9, x9, #0x40\n"
"zip1 v9.2d, v9.2d, v9.2d\n"
"zip2 v14.2d, v10.2d, v10.2d\n"
"zip1 v10.2d, v10.2d, v10.2d\n"
- "add x12, x12, #0x40\n"
"zip2 v15.2d, v11.2d, v11.2d\n"
"zip1 v11.2d, v11.2d, v11.2d\n"
"b 15f\n"
@@ -128,65 +128,65 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
"cmp x11, #0x10\n"
"bge 12f\n"
"tbz x11, #3, 7f\n"
- "ld1 { v9.4s }, [x9], #0x10\n"
- "ld1 { v10.4s }, [x9], #0x10\n"
+ "ld1 { v9.4s }, [x28], #0x10\n"
+ "ld1 { v10.4s }, [x28], #0x10\n"
"tbz x11, #2, 5f\n"
- "ld1 { v11.4s }, [x9], #0x10\n"
+ "ld1 { v11.4s }, [x28], #0x10\n"
"tbz x11, #1, 4f\n"
- "ldr d16, [x9], #0x8\n"
- "mov x20, #0x38\n"
+ "mov x19, #0x38\n"
+ "ldr d16, [x28], #0x8\n"
"tbz x11, #0, 11f\n"
- "ld1 { v16.s }[2], [x9]\n"
+ "ld1 { v16.s }[2], [x28]\n"
"b 11f\n"
"4:" // Height 1: Partial accumulate: partial_1_12
- "mov x20, #0x30\n"
+ "mov x19, #0x30\n"
"tbz x11, #0, 11f\n"
- "ldr s16, [x9, #0x0]\n"
+ "ldr s16, [x28, #0x0]\n"
"b 11f\n"
"5:" // Height 1: Partial accumulate: partial_2_8
"tbz x11, #1, 6f\n"
- "ldr d11, [x9], #0x8\n"
- "mov x20, #0x28\n"
+ "ldr d11, [x28], #0x8\n"
+ "mov x19, #0x28\n"
"tbz x11, #0, 11f\n"
- "ld1 { v11.s }[2], [x9]\n"
+ "ld1 { v11.s }[2], [x28]\n"
"b 11f\n"
"6:" // Height 1: Partial accumulate: partial_1_8
- "mov x20, #0x20\n"
+ "mov x19, #0x20\n"
"tbz x11, #0, 11f\n"
- "ldr s11, [x9, #0x0]\n"
+ "ldr s11, [x28, #0x0]\n"
"b 11f\n"
"7:" // Height 1: Partial accumulate: partial_4_0
"tbz x11, #2, 9f\n"
- "ld1 { v9.4s }, [x9], #0x10\n"
+ "ld1 { v9.4s }, [x28], #0x10\n"
"tbz x11, #1, 8f\n"
- "ldr d10, [x9], #0x8\n"
- "mov x20, #0x18\n"
+ "ldr d10, [x28], #0x8\n"
+ "mov x19, #0x18\n"
"tbz x11, #0, 11f\n"
- "ld1 { v10.s }[2], [x9]\n"
+ "ld1 { v10.s }[2], [x28]\n"
"b 11f\n"
"8:" // Height 1: Partial accumulate: partial_1_4
- "mov x20, #0x10\n"
+ "mov x19, #0x10\n"
"tbz x11, #0, 11f\n"
- "ldr s10, [x9, #0x0]\n"
+ "ldr s10, [x28, #0x0]\n"
"b 11f\n"
"9:" // Height 1: Partial accumulate: partial_2_0
"tbz x11, #1, 10f\n"
- "ldr d9, [x9], #0x8\n"
- "mov x20, #0x8\n"
+ "ldr d9, [x28], #0x8\n"
+ "mov x19, #0x8\n"
"tbz x11, #0, 11f\n"
- "ld1 { v9.s }[2], [x9]\n"
+ "ld1 { v9.s }[2], [x28]\n"
"b 11f\n"
"10:" // Height 1: Partial accumulate: partial_1_0
- "ldr s9, [x9, #0x0]\n"
- "mov x20, #0x0\n"
+ "ldr s9, [x28, #0x0]\n"
+ "mov x19, #0x0\n"
"11:" // Height 1: Partial accumulate: Done
- "sub x9, x9, x20\n"
+ "sub x28, x28, x19\n"
"b 13f\n"
"12:" // Height 1: full accumulate
- "ldr q9, [x9, #0x0]\n"
- "ldr q10, [x9, #0x10]\n"
- "ldr q11, [x9, #0x20]\n"
- "ldr q16, [x9, #0x30]\n"
+ "ldr q9, [x28, #0x0]\n"
+ "ldr q10, [x28, #0x10]\n"
+ "ldr q11, [x28, #0x20]\n"
+ "ldr q16, [x28, #0x30]\n"
"13:" // Height 1: MMLA fixup
"zip1 v8.2d, v9.2d, v12.2d\n"
"zip2 v12.2d, v9.2d, v12.2d\n"
@@ -207,31 +207,37 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
"movi v14.16b, #0x0\n"
"movi v15.16b, #0x0\n"
"15:" // Height 1: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"16:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 17f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "cbnz x28, 18f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #1\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "cbnz x27, 18f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
"b 18f\n"
"17:" // Height 1: setup direct input
- "mov x26, %x[input_ptr]\n"
+ "mov x25, %x[input_ptr]\n"
"18:" // Height 1: input setup done
- "cmp x27, #0x8\n"
+ "cmp x26, #0x8\n"
"blt 21f\n"
- "ldr q1, [x26, #0x0]\n"
- "ldr q7, [x10, #0x0]\n"
- "cmp x27, #0x10\n"
- "ldr q6, [x10, #0x10]\n"
+ "ldr q1, [x25, #0x0]\n"
+ "cmp x26, #0x10\n"
"blt 20f\n"
"19:" // Height 1: Multiply loop: Main loop head
+ "movi v2.16b, #0x0\n"
+ "ldr q7, [x10, #0x0]\n"
+ "add x25, x25, #0x10\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
+ "ldr q6, [x10, #0x10]\n"
+ "sub x26, x26, #0x8\n"
+ "trn2 v1.2d, v1.2d, v2.2d\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "cmp x26, #0x10\n"
".inst 0x6e47ec08 // bfmmla v8.4s, v0.8h, v7.8h\n"
"ldr q7, [x10, #0x20]\n"
".inst 0x6e46ec0c // bfmmla v12.4s, v0.8h, v6.8h\n"
@@ -244,7 +250,6 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
"ldr q7, [x10, #0x60]\n"
".inst 0x6e46ec0e // bfmmla v14.4s, v0.8h, v6.8h\n"
"ldr q6, [x10, #0x70]\n"
- "trn2 v1.2d, v1.2d, v2.2d\n"
".inst 0x6e47ec0b // bfmmla v11.4s, v0.8h, v7.8h\n"
"ldr q7, [x10, #0x80]\n"
".inst 0x6e46ec0f // bfmmla v15.4s, v0.8h, v6.8h\n"
@@ -261,19 +266,20 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
"ldr q7, [x10, #0xe0]\n"
".inst 0x6e46ec2e // bfmmla v14.4s, v1.8h, v6.8h\n"
"ldr q6, [x10, #0xf0]\n"
- "sub x27, x27, #0x8\n"
- "add x26, x26, #0x10\n"
- "cmp x27, #0x10\n"
+ "add x10, x10, #0x100\n"
".inst 0x6e47ec2b // bfmmla v11.4s, v1.8h, v7.8h\n"
".inst 0x6e46ec2f // bfmmla v15.4s, v1.8h, v6.8h\n"
- "ldr q1, [x26, #0x0]\n"
- "add x10, x10, #0x100\n"
- "ldr q7, [x10, #0x0]\n"
- "ldr q6, [x10, #0x10]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
+ "ldr q1, [x25, #0x0]\n"
"bge 19b\n"
"20:" // Height 1: Multiply loop: Single iteration only
+ "movi v2.16b, #0x0\n"
+ "ldr q7, [x10, #0x0]\n"
+ "sub x26, x26, #0x8\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
+ "ldr q6, [x10, #0x10]\n"
+ "add x25, x25, #0x10\n"
+ "trn2 v1.2d, v1.2d, v2.2d\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x6e47ec08 // bfmmla v8.4s, v0.8h, v7.8h\n"
"ldr q7, [x10, #0x20]\n"
".inst 0x6e46ec0c // bfmmla v12.4s, v0.8h, v6.8h\n"
@@ -286,7 +292,6 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
"ldr q7, [x10, #0x60]\n"
".inst 0x6e46ec0e // bfmmla v14.4s, v0.8h, v6.8h\n"
"ldr q6, [x10, #0x70]\n"
- "trn2 v1.2d, v1.2d, v2.2d\n"
".inst 0x6e47ec0b // bfmmla v11.4s, v0.8h, v7.8h\n"
"ldr q7, [x10, #0x80]\n"
".inst 0x6e46ec0f // bfmmla v15.4s, v0.8h, v6.8h\n"
@@ -303,22 +308,22 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
"ldr q7, [x10, #0xe0]\n"
".inst 0x6e46ec2e // bfmmla v14.4s, v1.8h, v6.8h\n"
"ldr q6, [x10, #0xf0]\n"
- "add x26, x26, #0x10\n"
- "sub x27, x27, #0x8\n"
+ "add x10, x10, #0x100\n"
".inst 0x6e47ec2b // bfmmla v11.4s, v1.8h, v7.8h\n"
".inst 0x6e46ec2f // bfmmla v15.4s, v1.8h, v6.8h\n"
- "prfm pldl1keep, [x26, #0x80]\n"
- "add x10, x10, #0x100\n"
"21:" // Height 1: Multiply loop: Main loop skip
- "cbz x27, 26f\n"
- "cmp x27, #0x4\n"
+ "cbz x26, 26f\n"
+ "cmp x26, #0x4\n"
"blt 23f\n"
"22:" // Height 1: Multiply loop: Odd block loop
- "ldr d1, [x26], #0x8\n"
- "ldr q6, [x10, #0x0]\n"
+ "movi v2.16b, #0x0\n"
+ "ldr d1, [x25], #0x8\n"
+ "sub x26, x26, #0x4\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
- "ldr q7, [x10, #0x10]\n"
+ "ldr q6, [x10, #0x0]\n"
+ "cmp x26, #0x4\n"
".inst 0x6e46ec08 // bfmmla v8.4s, v0.8h, v6.8h\n"
+ "ldr q7, [x10, #0x10]\n"
"ldr q6, [x10, #0x20]\n"
".inst 0x6e47ec0c // bfmmla v12.4s, v0.8h, v7.8h\n"
"ldr q7, [x10, #0x30]\n"
@@ -330,22 +335,21 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
"ldr q6, [x10, #0x60]\n"
".inst 0x6e47ec0e // bfmmla v14.4s, v0.8h, v7.8h\n"
"ldr q7, [x10, #0x70]\n"
- "sub x27, x27, #0x4\n"
- "cmp x27, #0x4\n"
+ "add x10, x10, #0x80\n"
".inst 0x6e46ec0b // bfmmla v11.4s, v0.8h, v6.8h\n"
".inst 0x6e47ec0f // bfmmla v15.4s, v0.8h, v7.8h\n"
- "add x10, x10, #0x80\n"
"bge 22b\n"
+ "cbz x26, 26f\n"
"23:" // Height 1: Multiply loop: Skip odd blocks
- "cbz x27, 26f\n"
- "tbz x27, #1, 24f\n"
- "ldr s1, [x26], #0x4\n"
- "tbz x27, #0, 25f\n"
- "ld1 { v1.h }[2], [x26]\n"
+ "tbz x26, #1, 24f\n"
+ "ldr s1, [x25], #0x4\n"
+ "tbz x26, #0, 25f\n"
+ "ld1 { v1.h }[2], [x25]\n"
"b 25f\n"
"24:" // Height 1: Multiply loop: Ragged operand read: partial_1_0
- "ldr h1, [x26, #0x0]\n"
+ "ldr h1, [x25, #0x0]\n"
"25:" // Height 1: Multiply loop: Ragged operand read: Done
+ "movi v2.16b, #0x0\n"
"ldr q7, [x10, #0x0]\n"
"ldr q6, [x10, #0x10]\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
@@ -361,198 +365,206 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
"ldr q7, [x10, #0x60]\n"
".inst 0x6e46ec0e // bfmmla v14.4s, v0.8h, v6.8h\n"
"ldr q6, [x10, #0x70]\n"
+ "add x10, x10, #0x80\n"
".inst 0x6e47ec0b // bfmmla v11.4s, v0.8h, v7.8h\n"
".inst 0x6e46ec0f // bfmmla v15.4s, v0.8h, v6.8h\n"
- "add x10, x10, #0x80\n"
"26:" // Height 1: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 16b\n"
+ "prfm pstl1keep, [x28, #0x0]\n"
+ "tbz %x[flags], #1, 27f\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v1.4s }, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v0.4s }, [x19]\n"
+ "fmin v8.4s, v8.4s, v0.4s\n"
+ "fmin v9.4s, v9.4s, v0.4s\n"
+ "fmin v10.4s, v10.4s, v0.4s\n"
+ "fmin v11.4s, v11.4s, v0.4s\n"
+ "fmax v8.4s, v8.4s, v1.4s\n"
+ "fmax v9.4s, v9.4s, v1.4s\n"
+ "fmax v10.4s, v10.4s, v1.4s\n"
+ "fmax v11.4s, v11.4s, v1.4s\n"
+ "fmin v12.4s, v12.4s, v0.4s\n"
+ "fmin v13.4s, v13.4s, v0.4s\n"
+ "fmin v14.4s, v14.4s, v0.4s\n"
+ "fmax v12.4s, v12.4s, v1.4s\n"
+ "fmax v13.4s, v13.4s, v1.4s\n"
+ "fmax v14.4s, v14.4s, v1.4s\n"
+ "fmin v15.4s, v15.4s, v0.4s\n"
+ "fmax v15.4s, v15.4s, v1.4s\n"
+ "27:" // Height 1: No activation
"uzp1 v8.2d, v8.2d, v12.2d\n"
+ "cmp x11, #0x10\n"
"uzp1 v9.2d, v9.2d, v13.2d\n"
- "prfm pstl1keep, [x9, #0x0]\n"
"uzp1 v10.2d, v10.2d, v14.2d\n"
"uzp1 v11.2d, v11.2d, v15.2d\n"
- "tbz %x[flags], #1, 27f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v1.4s }, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1r { v0.4s }, [x20]\n"
- "fmin v8.4s, v8.4s, v1.4s\n"
- "fmin v9.4s, v9.4s, v1.4s\n"
- "fmin v10.4s, v10.4s, v1.4s\n"
- "fmin v11.4s, v11.4s, v1.4s\n"
- "fmax v8.4s, v8.4s, v0.4s\n"
- "fmax v9.4s, v9.4s, v0.4s\n"
- "fmax v10.4s, v10.4s, v0.4s\n"
- "fmax v11.4s, v11.4s, v0.4s\n"
- "27:" // Height 1: No activation
- "cmp x11, #0x10\n"
"bge 36f\n"
"tbz x11, #3, 31f\n"
- "st1 { v8.4s }, [x9], #0x10\n"
- "st1 { v9.4s }, [x9], #0x10\n"
+ "st1 { v8.4s }, [x28], #0x10\n"
+ "st1 { v9.4s }, [x28], #0x10\n"
"tbz x11, #2, 29f\n"
- "st1 { v10.4s }, [x9], #0x10\n"
+ "st1 { v10.4s }, [x28], #0x10\n"
"tbz x11, #1, 28f\n"
- "str d11, [x9], #0x8\n"
+ "str d11, [x28], #0x8\n"
"tbz x11, #0, 35f\n"
- "st1 { v11.s }[2], [x9]\n"
+ "st1 { v11.s }[2], [x28]\n"
"b 35f\n"
"28:" // Height 1: Partial direct writeback: partial_1_12
"tbz x11, #0, 35f\n"
- "str s11, [x9, #0x0]\n"
+ "str s11, [x28, #0x0]\n"
"b 35f\n"
"29:" // Height 1: Partial direct writeback: partial_2_8
"tbz x11, #1, 30f\n"
- "str d10, [x9], #0x8\n"
+ "str d10, [x28], #0x8\n"
"tbz x11, #0, 35f\n"
- "st1 { v10.s }[2], [x9]\n"
+ "st1 { v10.s }[2], [x28]\n"
"b 35f\n"
"30:" // Height 1: Partial direct writeback: partial_1_8
"tbz x11, #0, 35f\n"
- "str s10, [x9, #0x0]\n"
+ "str s10, [x28, #0x0]\n"
"b 35f\n"
"31:" // Height 1: Partial direct writeback: partial_4_0
"tbz x11, #2, 33f\n"
- "st1 { v8.4s }, [x9], #0x10\n"
+ "st1 { v8.4s }, [x28], #0x10\n"
"tbz x11, #1, 32f\n"
- "str d9, [x9], #0x8\n"
+ "str d9, [x28], #0x8\n"
"tbz x11, #0, 35f\n"
- "st1 { v9.s }[2], [x9]\n"
+ "st1 { v9.s }[2], [x28]\n"
"b 35f\n"
"32:" // Height 1: Partial direct writeback: partial_1_4
"tbz x11, #0, 35f\n"
- "str s9, [x9, #0x0]\n"
+ "str s9, [x28, #0x0]\n"
"b 35f\n"
"33:" // Height 1: Partial direct writeback: partial_2_0
"tbz x11, #1, 34f\n"
- "str d8, [x9], #0x8\n"
+ "str d8, [x28], #0x8\n"
"tbz x11, #0, 35f\n"
- "st1 { v8.s }[2], [x9]\n"
+ "st1 { v8.s }[2], [x28]\n"
"b 35f\n"
"34:" // Height 1: Partial direct writeback: partial_1_0
- "str s8, [x9, #0x0]\n"
+ "str s8, [x28, #0x0]\n"
"35:" // Height 1: Partial direct writeback: Done
"b 37f\n"
"36:" // Height 1: Full writeback
- "str q8, [x9, #0x0]\n"
- "str q9, [x9, #0x10]\n"
- "str q10, [x9, #0x20]\n"
- "str q11, [x9, #0x30]\n"
- "add x9, x9, #0x40\n"
+ "str q8, [x28, #0x0]\n"
+ "str q9, [x28, #0x10]\n"
+ "str q10, [x28, #0x20]\n"
+ "str q11, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
"37:" // Height 1: Writeback done
"subs x11, x11, #0x10\n"
"bgt 2b\n"
"b 224f\n"
"38:" // Height 2
- "mov x12, %x[bias]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x9, %x[bias]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "mov x28, %x[output_ptr]\n"
"39:" // Height 2: Column loop
- "cbz x12, 40f\n"
- "ldr q8, [x12, #0x0]\n"
- "ldr q9, [x12, #0x10]\n"
+ "cbz x9, 40f\n"
+ "ldr q8, [x9, #0x0]\n"
"zip2 v12.2d, v8.2d, v8.2d\n"
+ "ldr q9, [x9, #0x10]\n"
"zip1 v8.2d, v8.2d, v8.2d\n"
- "ldr q10, [x12, #0x20]\n"
- "ldr q11, [x12, #0x30]\n"
+ "ldr q10, [x9, #0x20]\n"
+ "ldr q11, [x9, #0x30]\n"
"zip2 v13.2d, v9.2d, v9.2d\n"
+ "add x9, x9, #0x40\n"
"zip1 v9.2d, v9.2d, v9.2d\n"
"zip2 v14.2d, v10.2d, v10.2d\n"
"zip1 v10.2d, v10.2d, v10.2d\n"
- "add x12, x12, #0x40\n"
"zip2 v15.2d, v11.2d, v11.2d\n"
"zip1 v11.2d, v11.2d, v11.2d\n"
"b 52f\n"
"40:" // Height 2: no bias
"tbz %x[flags], #0, 51f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"cmp x11, #0x10\n"
- "add x25, x9, x20, LSL #2\n"
+ "add x24, x28, x19, LSL #2\n"
"bge 49f\n"
"tbz x11, #3, 44f\n"
- "ld1 { v9.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v10.4s }, [x9], #0x10\n"
- "ld1 { v13.4s }, [x25], #0x10\n"
+ "ld1 { v9.4s }, [x28], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
+ "ld1 { v10.4s }, [x28], #0x10\n"
+ "ld1 { v13.4s }, [x24], #0x10\n"
"tbz x11, #2, 42f\n"
- "ld1 { v11.4s }, [x9], #0x10\n"
- "ld1 { v14.4s }, [x25], #0x10\n"
+ "ld1 { v11.4s }, [x28], #0x10\n"
+ "ld1 { v14.4s }, [x24], #0x10\n"
"tbz x11, #1, 41f\n"
- "ldr d16, [x9], #0x8\n"
- "ldr d15, [x25], #0x8\n"
- "mov x20, #0x38\n"
+ "mov x19, #0x38\n"
+ "ldr d16, [x28], #0x8\n"
+ "ldr d15, [x24], #0x8\n"
"tbz x11, #0, 48f\n"
- "ld1 { v16.s }[2], [x9]\n"
- "ld1 { v15.s }[2], [x25]\n"
+ "ld1 { v16.s }[2], [x28]\n"
+ "ld1 { v15.s }[2], [x24]\n"
"b 48f\n"
"41:" // Height 2: Partial accumulate: partial_1_12
- "mov x20, #0x30\n"
+ "mov x19, #0x30\n"
"tbz x11, #0, 48f\n"
- "ldr s16, [x9, #0x0]\n"
- "ldr s15, [x25, #0x0]\n"
+ "ldr s16, [x28, #0x0]\n"
+ "ldr s15, [x24, #0x0]\n"
"b 48f\n"
"42:" // Height 2: Partial accumulate: partial_2_8
"tbz x11, #1, 43f\n"
- "ldr d11, [x9], #0x8\n"
- "ldr d14, [x25], #0x8\n"
- "mov x20, #0x28\n"
+ "ldr d11, [x28], #0x8\n"
+ "ldr d14, [x24], #0x8\n"
+ "mov x19, #0x28\n"
"tbz x11, #0, 48f\n"
- "ld1 { v11.s }[2], [x9]\n"
- "ld1 { v14.s }[2], [x25]\n"
+ "ld1 { v11.s }[2], [x28]\n"
+ "ld1 { v14.s }[2], [x24]\n"
"b 48f\n"
"43:" // Height 2: Partial accumulate: partial_1_8
- "mov x20, #0x20\n"
+ "mov x19, #0x20\n"
"tbz x11, #0, 48f\n"
- "ldr s11, [x9, #0x0]\n"
- "ldr s14, [x25, #0x0]\n"
+ "ldr s11, [x28, #0x0]\n"
+ "ldr s14, [x24, #0x0]\n"
"b 48f\n"
"44:" // Height 2: Partial accumulate: partial_4_0
"tbz x11, #2, 46f\n"
- "ld1 { v9.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
+ "ld1 { v9.4s }, [x28], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
"tbz x11, #1, 45f\n"
- "ldr d10, [x9], #0x8\n"
- "ldr d13, [x25], #0x8\n"
- "mov x20, #0x18\n"
+ "mov x19, #0x18\n"
+ "ldr d10, [x28], #0x8\n"
+ "ldr d13, [x24], #0x8\n"
"tbz x11, #0, 48f\n"
- "ld1 { v10.s }[2], [x9]\n"
- "ld1 { v13.s }[2], [x25]\n"
+ "ld1 { v10.s }[2], [x28]\n"
+ "ld1 { v13.s }[2], [x24]\n"
"b 48f\n"
"45:" // Height 2: Partial accumulate: partial_1_4
- "mov x20, #0x10\n"
+ "mov x19, #0x10\n"
"tbz x11, #0, 48f\n"
- "ldr s10, [x9, #0x0]\n"
- "ldr s13, [x25, #0x0]\n"
+ "ldr s10, [x28, #0x0]\n"
+ "ldr s13, [x24, #0x0]\n"
"b 48f\n"
"46:" // Height 2: Partial accumulate: partial_2_0
"tbz x11, #1, 47f\n"
- "ldr d9, [x9], #0x8\n"
- "ldr d12, [x25], #0x8\n"
- "mov x20, #0x8\n"
+ "ldr d9, [x28], #0x8\n"
+ "ldr d12, [x24], #0x8\n"
+ "mov x19, #0x8\n"
"tbz x11, #0, 48f\n"
- "ld1 { v9.s }[2], [x9]\n"
- "ld1 { v12.s }[2], [x25]\n"
+ "ld1 { v9.s }[2], [x28]\n"
+ "ld1 { v12.s }[2], [x24]\n"
"b 48f\n"
"47:" // Height 2: Partial accumulate: partial_1_0
- "ldr s9, [x9, #0x0]\n"
- "ldr s12, [x25, #0x0]\n"
- "mov x20, #0x0\n"
+ "ldr s9, [x28, #0x0]\n"
+ "mov x19, #0x0\n"
+ "ldr s12, [x24, #0x0]\n"
"48:" // Height 2: Partial accumulate: Done
- "sub x9, x9, x20\n"
+ "sub x28, x28, x19\n"
"b 50f\n"
"49:" // Height 2: full accumulate
- "ldr q9, [x9, #0x0]\n"
- "ldr q10, [x9, #0x10]\n"
- "ldr q11, [x9, #0x20]\n"
- "ldr q16, [x9, #0x30]\n"
- "ldr q12, [x25, #0x0]\n"
- "ldr q13, [x25, #0x10]\n"
- "ldr q14, [x25, #0x20]\n"
- "ldr q15, [x25, #0x30]\n"
+ "ldr q9, [x28, #0x0]\n"
+ "ldr q10, [x28, #0x10]\n"
+ "ldr q11, [x28, #0x20]\n"
+ "ldr q16, [x28, #0x30]\n"
+ "ldr q12, [x24, #0x0]\n"
+ "ldr q13, [x24, #0x10]\n"
+ "ldr q14, [x24, #0x20]\n"
+ "ldr q15, [x24, #0x30]\n"
"50:" // Height 2: MMLA fixup
"zip1 v8.2d, v9.2d, v12.2d\n"
"zip2 v12.2d, v9.2d, v12.2d\n"
@@ -573,50 +585,57 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
"movi v14.16b, #0x0\n"
"movi v15.16b, #0x0\n"
"52:" // Height 2: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"53:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 54f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "cbnz x28, 55f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #1\n"
- "add x25, x25, x20, LSL #1\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "cbnz x27, 55f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
+ "add x24, x24, x19, LSL #1\n"
"b 55f\n"
"54:" // Height 2: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #1\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #1\n"
"55:" // Height 2: input setup done
- "cmp x27, #0x8\n"
+ "cmp x26, #0x8\n"
"blt 58f\n"
- "ldr q1, [x26, #0x0]\n"
- "ldr q2, [x25, #0x0]\n"
- "cmp x27, #0x10\n"
- "ldr q7, [x10, #0x0]\n"
- "ldr q6, [x10, #0x10]\n"
+ "ldr q1, [x25, #0x0]\n"
+ "ldr q2, [x24, #0x0]\n"
+ "cmp x26, #0x10\n"
"blt 57f\n"
"56:" // Height 2: Multiply loop: Main loop head
"trn1 v0.2d, v1.2d, v2.2d\n"
+ "ldr q7, [x10, #0x0]\n"
+ "add x25, x25, #0x10\n"
+ "trn2 v1.2d, v1.2d, v2.2d\n"
+ "ldr q6, [x10, #0x10]\n"
+ "add x24, x24, #0x10\n"
".inst 0x6e47ec08 // bfmmla v8.4s, v0.8h, v7.8h\n"
"ldr q7, [x10, #0x20]\n"
+ "sub x26, x26, #0x8\n"
".inst 0x6e46ec0c // bfmmla v12.4s, v0.8h, v6.8h\n"
"ldr q6, [x10, #0x30]\n"
+ "cmp x26, #0x10\n"
".inst 0x6e47ec09 // bfmmla v9.4s, v0.8h, v7.8h\n"
"ldr q7, [x10, #0x40]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x6e46ec0d // bfmmla v13.4s, v0.8h, v6.8h\n"
"ldr q6, [x10, #0x50]\n"
".inst 0x6e47ec0a // bfmmla v10.4s, v0.8h, v7.8h\n"
"ldr q7, [x10, #0x60]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x6e46ec0e // bfmmla v14.4s, v0.8h, v6.8h\n"
"ldr q6, [x10, #0x70]\n"
- "trn2 v1.2d, v1.2d, v2.2d\n"
".inst 0x6e47ec0b // bfmmla v11.4s, v0.8h, v7.8h\n"
"ldr q7, [x10, #0x80]\n"
+ "ldr q2, [x24, #0x0]\n"
".inst 0x6e46ec0f // bfmmla v15.4s, v0.8h, v6.8h\n"
"ldr q6, [x10, #0x90]\n"
".inst 0x6e47ec28 // bfmmla v8.4s, v1.8h, v7.8h\n"
@@ -631,35 +650,33 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
"ldr q7, [x10, #0xe0]\n"
".inst 0x6e46ec2e // bfmmla v14.4s, v1.8h, v6.8h\n"
"ldr q6, [x10, #0xf0]\n"
- "sub x27, x27, #0x8\n"
- "add x26, x26, #0x10\n"
- "add x25, x25, #0x10\n"
- "ldr q2, [x25, #0x0]\n"
- "cmp x27, #0x10\n"
- ".inst 0x6e47ec2b // bfmmla v11.4s, v1.8h, v7.8h\n"
"add x10, x10, #0x100\n"
- "ldr q7, [x10, #0x0]\n"
+ ".inst 0x6e47ec2b // bfmmla v11.4s, v1.8h, v7.8h\n"
".inst 0x6e46ec2f // bfmmla v15.4s, v1.8h, v6.8h\n"
- "ldr q1, [x26, #0x0]\n"
- "ldr q6, [x10, #0x10]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
+ "ldr q1, [x25, #0x0]\n"
"bge 56b\n"
"57:" // Height 2: Multiply loop: Single iteration only
"trn1 v0.2d, v1.2d, v2.2d\n"
+ "ldr q7, [x10, #0x0]\n"
+ "sub x26, x26, #0x8\n"
+ "trn2 v1.2d, v1.2d, v2.2d\n"
+ "ldr q6, [x10, #0x10]\n"
+ "add x25, x25, #0x10\n"
".inst 0x6e47ec08 // bfmmla v8.4s, v0.8h, v7.8h\n"
"ldr q7, [x10, #0x20]\n"
+ "add x24, x24, #0x10\n"
".inst 0x6e46ec0c // bfmmla v12.4s, v0.8h, v6.8h\n"
"ldr q6, [x10, #0x30]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x6e47ec09 // bfmmla v9.4s, v0.8h, v7.8h\n"
"ldr q7, [x10, #0x40]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x6e46ec0d // bfmmla v13.4s, v0.8h, v6.8h\n"
"ldr q6, [x10, #0x50]\n"
".inst 0x6e47ec0a // bfmmla v10.4s, v0.8h, v7.8h\n"
"ldr q7, [x10, #0x60]\n"
".inst 0x6e46ec0e // bfmmla v14.4s, v0.8h, v6.8h\n"
"ldr q6, [x10, #0x70]\n"
- "trn2 v1.2d, v1.2d, v2.2d\n"
".inst 0x6e47ec0b // bfmmla v11.4s, v0.8h, v7.8h\n"
"ldr q7, [x10, #0x80]\n"
".inst 0x6e46ec0f // bfmmla v15.4s, v0.8h, v6.8h\n"
@@ -676,58 +693,53 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
"ldr q7, [x10, #0xe0]\n"
".inst 0x6e46ec2e // bfmmla v14.4s, v1.8h, v6.8h\n"
"ldr q6, [x10, #0xf0]\n"
- "add x26, x26, #0x10\n"
- "add x25, x25, #0x10\n"
+ "add x10, x10, #0x100\n"
".inst 0x6e47ec2b // bfmmla v11.4s, v1.8h, v7.8h\n"
".inst 0x6e46ec2f // bfmmla v15.4s, v1.8h, v6.8h\n"
- "sub x27, x27, #0x8\n"
- "prfm pldl1keep, [x26, #0x80]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
- "add x10, x10, #0x100\n"
"58:" // Height 2: Multiply loop: Main loop skip
- "cbz x27, 63f\n"
- "cmp x27, #0x4\n"
+ "cbz x26, 63f\n"
+ "cmp x26, #0x4\n"
"blt 60f\n"
"59:" // Height 2: Multiply loop: Odd block loop
- "ldr d1, [x26], #0x8\n"
- "ldr d2, [x25], #0x8\n"
+ "ldr d1, [x25], #0x8\n"
+ "sub x26, x26, #0x4\n"
+ "ldr d2, [x24], #0x8\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
- "sub x27, x27, #0x4\n"
"ldr q6, [x10, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
+ "cmp x26, #0x4\n"
".inst 0x6e46ec08 // bfmmla v8.4s, v0.8h, v6.8h\n"
- ".inst 0x6e47ec0c // bfmmla v12.4s, v0.8h, v7.8h\n"
+ "ldr q7, [x10, #0x10]\n"
"ldr q6, [x10, #0x20]\n"
+ ".inst 0x6e47ec0c // bfmmla v12.4s, v0.8h, v7.8h\n"
"ldr q7, [x10, #0x30]\n"
".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n"
- ".inst 0x6e47ec0d // bfmmla v13.4s, v0.8h, v7.8h\n"
"ldr q6, [x10, #0x40]\n"
+ ".inst 0x6e47ec0d // bfmmla v13.4s, v0.8h, v7.8h\n"
"ldr q7, [x10, #0x50]\n"
".inst 0x6e46ec0a // bfmmla v10.4s, v0.8h, v6.8h\n"
- ".inst 0x6e47ec0e // bfmmla v14.4s, v0.8h, v7.8h\n"
"ldr q6, [x10, #0x60]\n"
+ ".inst 0x6e47ec0e // bfmmla v14.4s, v0.8h, v7.8h\n"
"ldr q7, [x10, #0x70]\n"
- "cmp x27, #0x4\n"
+ "add x10, x10, #0x80\n"
".inst 0x6e46ec0b // bfmmla v11.4s, v0.8h, v6.8h\n"
".inst 0x6e47ec0f // bfmmla v15.4s, v0.8h, v7.8h\n"
- "add x10, x10, #0x80\n"
"bge 59b\n"
+ "cbz x26, 63f\n"
"60:" // Height 2: Multiply loop: Skip odd blocks
- "cbz x27, 63f\n"
- "tbz x27, #1, 61f\n"
- "ldr s1, [x26], #0x4\n"
- "ldr s2, [x25], #0x4\n"
- "tbz x27, #0, 62f\n"
- "ld1 { v1.h }[2], [x26]\n"
- "ld1 { v2.h }[2], [x25]\n"
+ "tbz x26, #1, 61f\n"
+ "ldr s1, [x25], #0x4\n"
+ "ldr s2, [x24], #0x4\n"
+ "tbz x26, #0, 62f\n"
+ "ld1 { v1.h }[2], [x25]\n"
+ "ld1 { v2.h }[2], [x24]\n"
"b 62f\n"
"61:" // Height 2: Multiply loop: Ragged operand read: partial_1_0
- "ldr h1, [x26, #0x0]\n"
- "ldr h2, [x25, #0x0]\n"
+ "ldr h1, [x25, #0x0]\n"
+ "ldr h2, [x24, #0x0]\n"
"62:" // Height 2: Multiply loop: Ragged operand read: Done
+ "trn1 v0.2d, v1.2d, v2.2d\n"
"ldr q7, [x10, #0x0]\n"
"ldr q6, [x10, #0x10]\n"
- "trn1 v0.2d, v1.2d, v2.2d\n"
".inst 0x6e47ec08 // bfmmla v8.4s, v0.8h, v7.8h\n"
"ldr q7, [x10, #0x20]\n"
".inst 0x6e46ec0c // bfmmla v12.4s, v0.8h, v6.8h\n"
@@ -740,148 +752,148 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
"ldr q7, [x10, #0x60]\n"
".inst 0x6e46ec0e // bfmmla v14.4s, v0.8h, v6.8h\n"
"ldr q6, [x10, #0x70]\n"
+ "add x10, x10, #0x80\n"
".inst 0x6e47ec0b // bfmmla v11.4s, v0.8h, v7.8h\n"
".inst 0x6e46ec0f // bfmmla v15.4s, v0.8h, v6.8h\n"
- "add x10, x10, #0x80\n"
"63:" // Height 2: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 53b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "prfm pstl1keep, [x28, #0x0]\n"
+ "add x24, x28, x19, LSL #2\n"
+ "prfm pstl1keep, [x24, #0x0]\n"
+ "tbz %x[flags], #1, 64f\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v1.4s }, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v0.4s }, [x19]\n"
+ "fmin v8.4s, v8.4s, v0.4s\n"
+ "fmin v9.4s, v9.4s, v0.4s\n"
+ "fmin v10.4s, v10.4s, v0.4s\n"
+ "fmin v11.4s, v11.4s, v0.4s\n"
+ "fmax v8.4s, v8.4s, v1.4s\n"
+ "fmax v9.4s, v9.4s, v1.4s\n"
+ "fmax v10.4s, v10.4s, v1.4s\n"
+ "fmax v11.4s, v11.4s, v1.4s\n"
+ "fmin v12.4s, v12.4s, v0.4s\n"
+ "fmin v13.4s, v13.4s, v0.4s\n"
+ "fmin v14.4s, v14.4s, v0.4s\n"
+ "fmax v12.4s, v12.4s, v1.4s\n"
+ "fmax v13.4s, v13.4s, v1.4s\n"
+ "fmax v14.4s, v14.4s, v1.4s\n"
+ "fmin v15.4s, v15.4s, v0.4s\n"
+ "fmax v15.4s, v15.4s, v1.4s\n"
+ "64:" // Height 2: No activation
"uzp1 v7.2d, v8.2d, v12.2d\n"
+ "cmp x11, #0x10\n"
"uzp2 v8.2d, v8.2d, v12.2d\n"
"uzp1 v12.2d, v9.2d, v13.2d\n"
"uzp2 v9.2d, v9.2d, v13.2d\n"
- "prfm pstl1keep, [x9, #0x0]\n"
- "prfm pstl1keep, [x25, #0x0]\n"
"uzp1 v13.2d, v10.2d, v14.2d\n"
"uzp2 v10.2d, v10.2d, v14.2d\n"
"uzp1 v14.2d, v11.2d, v15.2d\n"
"uzp2 v11.2d, v11.2d, v15.2d\n"
- "tbz %x[flags], #1, 64f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v1.4s }, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1r { v0.4s }, [x20]\n"
- "fmin v7.4s, v7.4s, v1.4s\n"
- "fmin v12.4s, v12.4s, v1.4s\n"
- "fmin v13.4s, v13.4s, v1.4s\n"
- "fmin v14.4s, v14.4s, v1.4s\n"
- "fmin v8.4s, v8.4s, v1.4s\n"
- "fmin v9.4s, v9.4s, v1.4s\n"
- "fmin v10.4s, v10.4s, v1.4s\n"
- "fmin v11.4s, v11.4s, v1.4s\n"
- "fmax v7.4s, v7.4s, v0.4s\n"
- "fmax v12.4s, v12.4s, v0.4s\n"
- "fmax v13.4s, v13.4s, v0.4s\n"
- "fmax v14.4s, v14.4s, v0.4s\n"
- "fmax v8.4s, v8.4s, v0.4s\n"
- "fmax v9.4s, v9.4s, v0.4s\n"
- "fmax v10.4s, v10.4s, v0.4s\n"
- "fmax v11.4s, v11.4s, v0.4s\n"
- "64:" // Height 2: No activation
- "cmp x11, #0x10\n"
"bge 73f\n"
"tbz x11, #3, 68f\n"
- "st1 { v7.4s }, [x9], #0x10\n"
- "st1 { v12.4s }, [x9], #0x10\n"
- "st1 { v8.4s }, [x25], #0x10\n"
- "st1 { v9.4s }, [x25], #0x10\n"
+ "st1 { v7.4s }, [x28], #0x10\n"
+ "st1 { v12.4s }, [x28], #0x10\n"
+ "st1 { v8.4s }, [x24], #0x10\n"
+ "st1 { v9.4s }, [x24], #0x10\n"
"tbz x11, #2, 66f\n"
- "st1 { v13.4s }, [x9], #0x10\n"
- "st1 { v10.4s }, [x25], #0x10\n"
+ "st1 { v13.4s }, [x28], #0x10\n"
+ "st1 { v10.4s }, [x24], #0x10\n"
"tbz x11, #1, 65f\n"
- "str d14, [x9], #0x8\n"
- "str d11, [x25], #0x8\n"
+ "str d14, [x28], #0x8\n"
+ "str d11, [x24], #0x8\n"
"tbz x11, #0, 72f\n"
- "st1 { v14.s }[2], [x9]\n"
- "st1 { v11.s }[2], [x25]\n"
+ "st1 { v14.s }[2], [x28]\n"
+ "st1 { v11.s }[2], [x24]\n"
"b 72f\n"
"65:" // Height 2: Partial direct writeback: partial_1_12
"tbz x11, #0, 72f\n"
- "str s14, [x9, #0x0]\n"
- "str s11, [x25, #0x0]\n"
+ "str s14, [x28, #0x0]\n"
+ "str s11, [x24, #0x0]\n"
"b 72f\n"
"66:" // Height 2: Partial direct writeback: partial_2_8
"tbz x11, #1, 67f\n"
- "str d13, [x9], #0x8\n"
- "str d10, [x25], #0x8\n"
+ "str d13, [x28], #0x8\n"
+ "str d10, [x24], #0x8\n"
"tbz x11, #0, 72f\n"
- "st1 { v13.s }[2], [x9]\n"
- "st1 { v10.s }[2], [x25]\n"
+ "st1 { v13.s }[2], [x28]\n"
+ "st1 { v10.s }[2], [x24]\n"
"b 72f\n"
"67:" // Height 2: Partial direct writeback: partial_1_8
"tbz x11, #0, 72f\n"
- "str s13, [x9, #0x0]\n"
- "str s10, [x25, #0x0]\n"
+ "str s13, [x28, #0x0]\n"
+ "str s10, [x24, #0x0]\n"
"b 72f\n"
"68:" // Height 2: Partial direct writeback: partial_4_0
"tbz x11, #2, 70f\n"
- "st1 { v7.4s }, [x9], #0x10\n"
- "st1 { v8.4s }, [x25], #0x10\n"
+ "st1 { v7.4s }, [x28], #0x10\n"
+ "st1 { v8.4s }, [x24], #0x10\n"
"tbz x11, #1, 69f\n"
- "str d12, [x9], #0x8\n"
- "str d9, [x25], #0x8\n"
+ "str d12, [x28], #0x8\n"
+ "str d9, [x24], #0x8\n"
"tbz x11, #0, 72f\n"
- "st1 { v12.s }[2], [x9]\n"
- "st1 { v9.s }[2], [x25]\n"
+ "st1 { v12.s }[2], [x28]\n"
+ "st1 { v9.s }[2], [x24]\n"
"b 72f\n"
"69:" // Height 2: Partial direct writeback: partial_1_4
"tbz x11, #0, 72f\n"
- "str s12, [x9, #0x0]\n"
- "str s9, [x25, #0x0]\n"
+ "str s12, [x28, #0x0]\n"
+ "str s9, [x24, #0x0]\n"
"b 72f\n"
"70:" // Height 2: Partial direct writeback: partial_2_0
"tbz x11, #1, 71f\n"
- "str d7, [x9], #0x8\n"
- "str d8, [x25], #0x8\n"
+ "str d7, [x28], #0x8\n"
+ "str d8, [x24], #0x8\n"
"tbz x11, #0, 72f\n"
- "st1 { v7.s }[2], [x9]\n"
- "st1 { v8.s }[2], [x25]\n"
+ "st1 { v7.s }[2], [x28]\n"
+ "st1 { v8.s }[2], [x24]\n"
"b 72f\n"
"71:" // Height 2: Partial direct writeback: partial_1_0
- "str s7, [x9, #0x0]\n"
- "str s8, [x25, #0x0]\n"
+ "str s7, [x28, #0x0]\n"
+ "str s8, [x24, #0x0]\n"
"72:" // Height 2: Partial direct writeback: Done
"b 74f\n"
"73:" // Height 2: Full writeback
- "str q7, [x9, #0x0]\n"
- "str q12, [x9, #0x10]\n"
- "str q13, [x9, #0x20]\n"
- "str q14, [x9, #0x30]\n"
- "add x9, x9, #0x40\n"
- "str q8, [x25, #0x0]\n"
- "str q9, [x25, #0x10]\n"
- "str q10, [x25, #0x20]\n"
- "str q11, [x25, #0x30]\n"
+ "str q7, [x28, #0x0]\n"
+ "str q12, [x28, #0x10]\n"
+ "str q13, [x28, #0x20]\n"
+ "str q14, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
+ "str q8, [x24, #0x0]\n"
+ "str q9, [x24, #0x10]\n"
+ "str q10, [x24, #0x20]\n"
+ "str q11, [x24, #0x30]\n"
"74:" // Height 2: Writeback done
"subs x11, x11, #0x10\n"
"bgt 39b\n"
"b 224f\n"
"75:" // Height 3
- "mov x12, %x[bias]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x9, %x[bias]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "mov x28, %x[output_ptr]\n"
"76:" // Height 3: Column loop
- "cbz x12, 77f\n"
- "ldr q8, [x12, #0x0]\n"
- "ldr q9, [x12, #0x10]\n"
+ "cbz x9, 77f\n"
+ "ldr q8, [x9, #0x0]\n"
"zip2 v12.2d, v8.2d, v8.2d\n"
+ "ldr q9, [x9, #0x10]\n"
"zip1 v8.2d, v8.2d, v8.2d\n"
- "ldr q10, [x12, #0x20]\n"
- "ldr q11, [x12, #0x30]\n"
+ "ldr q10, [x9, #0x20]\n"
+ "mov v16.16b, v8.16b\n"
+ "ldr q11, [x9, #0x30]\n"
+ "add x9, x9, #0x40\n"
+ "mov v20.16b, v12.16b\n"
"zip2 v13.2d, v9.2d, v9.2d\n"
"zip1 v9.2d, v9.2d, v9.2d\n"
"zip2 v14.2d, v10.2d, v10.2d\n"
"zip1 v10.2d, v10.2d, v10.2d\n"
- "add x12, x12, #0x40\n"
"zip2 v15.2d, v11.2d, v11.2d\n"
"zip1 v11.2d, v11.2d, v11.2d\n"
- "mov v16.16b, v8.16b\n"
- "mov v20.16b, v12.16b\n"
"mov v17.16b, v9.16b\n"
"mov v21.16b, v13.16b\n"
"mov v18.16b, v10.16b\n"
@@ -891,111 +903,111 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
"b 89f\n"
"77:" // Height 3: no bias
"tbz %x[flags], #0, 88f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"cmp x11, #0x10\n"
- "add x24, x25, x20, LSL #2\n"
+ "add x24, x28, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
"bge 86f\n"
"tbz x11, #3, 81f\n"
- "ld1 { v9.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v17.4s }, [x24], #0x10\n"
- "ld1 { v10.4s }, [x9], #0x10\n"
- "ld1 { v13.4s }, [x25], #0x10\n"
- "ld1 { v18.4s }, [x24], #0x10\n"
+ "ld1 { v9.4s }, [x28], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
+ "ld1 { v17.4s }, [x23], #0x10\n"
+ "ld1 { v10.4s }, [x28], #0x10\n"
+ "ld1 { v13.4s }, [x24], #0x10\n"
+ "ld1 { v18.4s }, [x23], #0x10\n"
"tbz x11, #2, 79f\n"
- "ld1 { v11.4s }, [x9], #0x10\n"
- "ld1 { v14.4s }, [x25], #0x10\n"
- "ld1 { v19.4s }, [x24], #0x10\n"
+ "ld1 { v11.4s }, [x28], #0x10\n"
+ "ld1 { v14.4s }, [x24], #0x10\n"
+ "ld1 { v19.4s }, [x23], #0x10\n"
"tbz x11, #1, 78f\n"
- "ldr d16, [x9], #0x8\n"
- "ldr d15, [x25], #0x8\n"
- "mov x20, #0x38\n"
- "ldr d24, [x24], #0x8\n"
+ "mov x19, #0x38\n"
+ "ldr d16, [x28], #0x8\n"
+ "ldr d15, [x24], #0x8\n"
+ "ldr d24, [x23], #0x8\n"
"tbz x11, #0, 85f\n"
- "ld1 { v16.s }[2], [x9]\n"
- "ld1 { v15.s }[2], [x25]\n"
- "ld1 { v24.s }[2], [x24]\n"
+ "ld1 { v16.s }[2], [x28]\n"
+ "ld1 { v15.s }[2], [x24]\n"
+ "ld1 { v24.s }[2], [x23]\n"
"b 85f\n"
"78:" // Height 3: Partial accumulate: partial_1_12
- "mov x20, #0x30\n"
+ "mov x19, #0x30\n"
"tbz x11, #0, 85f\n"
- "ldr s16, [x9, #0x0]\n"
- "ldr s15, [x25, #0x0]\n"
- "ldr s24, [x24, #0x0]\n"
+ "ldr s16, [x28, #0x0]\n"
+ "ldr s15, [x24, #0x0]\n"
+ "ldr s24, [x23, #0x0]\n"
"b 85f\n"
"79:" // Height 3: Partial accumulate: partial_2_8
"tbz x11, #1, 80f\n"
- "ldr d11, [x9], #0x8\n"
- "ldr d14, [x25], #0x8\n"
- "mov x20, #0x28\n"
- "ldr d19, [x24], #0x8\n"
+ "ldr d11, [x28], #0x8\n"
+ "ldr d14, [x24], #0x8\n"
+ "mov x19, #0x28\n"
+ "ldr d19, [x23], #0x8\n"
"tbz x11, #0, 85f\n"
- "ld1 { v11.s }[2], [x9]\n"
- "ld1 { v14.s }[2], [x25]\n"
- "ld1 { v19.s }[2], [x24]\n"
+ "ld1 { v11.s }[2], [x28]\n"
+ "ld1 { v14.s }[2], [x24]\n"
+ "ld1 { v19.s }[2], [x23]\n"
"b 85f\n"
"80:" // Height 3: Partial accumulate: partial_1_8
- "mov x20, #0x20\n"
+ "mov x19, #0x20\n"
"tbz x11, #0, 85f\n"
- "ldr s11, [x9, #0x0]\n"
- "ldr s14, [x25, #0x0]\n"
- "ldr s19, [x24, #0x0]\n"
+ "ldr s11, [x28, #0x0]\n"
+ "ldr s14, [x24, #0x0]\n"
+ "ldr s19, [x23, #0x0]\n"
"b 85f\n"
"81:" // Height 3: Partial accumulate: partial_4_0
"tbz x11, #2, 83f\n"
- "ld1 { v9.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v17.4s }, [x24], #0x10\n"
+ "ld1 { v9.4s }, [x28], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
+ "ld1 { v17.4s }, [x23], #0x10\n"
"tbz x11, #1, 82f\n"
- "ldr d10, [x9], #0x8\n"
- "ldr d13, [x25], #0x8\n"
- "mov x20, #0x18\n"
- "ldr d18, [x24], #0x8\n"
+ "mov x19, #0x18\n"
+ "ldr d10, [x28], #0x8\n"
+ "ldr d13, [x24], #0x8\n"
+ "ldr d18, [x23], #0x8\n"
"tbz x11, #0, 85f\n"
- "ld1 { v10.s }[2], [x9]\n"
- "ld1 { v13.s }[2], [x25]\n"
- "ld1 { v18.s }[2], [x24]\n"
+ "ld1 { v10.s }[2], [x28]\n"
+ "ld1 { v13.s }[2], [x24]\n"
+ "ld1 { v18.s }[2], [x23]\n"
"b 85f\n"
"82:" // Height 3: Partial accumulate: partial_1_4
- "mov x20, #0x10\n"
+ "mov x19, #0x10\n"
"tbz x11, #0, 85f\n"
- "ldr s10, [x9, #0x0]\n"
- "ldr s13, [x25, #0x0]\n"
- "ldr s18, [x24, #0x0]\n"
+ "ldr s10, [x28, #0x0]\n"
+ "ldr s13, [x24, #0x0]\n"
+ "ldr s18, [x23, #0x0]\n"
"b 85f\n"
"83:" // Height 3: Partial accumulate: partial_2_0
"tbz x11, #1, 84f\n"
- "ldr d9, [x9], #0x8\n"
- "ldr d12, [x25], #0x8\n"
- "mov x20, #0x8\n"
- "ldr d17, [x24], #0x8\n"
+ "ldr d9, [x28], #0x8\n"
+ "ldr d12, [x24], #0x8\n"
+ "mov x19, #0x8\n"
+ "ldr d17, [x23], #0x8\n"
"tbz x11, #0, 85f\n"
- "ld1 { v9.s }[2], [x9]\n"
- "ld1 { v12.s }[2], [x25]\n"
- "ld1 { v17.s }[2], [x24]\n"
+ "ld1 { v9.s }[2], [x28]\n"
+ "ld1 { v12.s }[2], [x24]\n"
+ "ld1 { v17.s }[2], [x23]\n"
"b 85f\n"
"84:" // Height 3: Partial accumulate: partial_1_0
- "ldr s9, [x9, #0x0]\n"
- "ldr s12, [x25, #0x0]\n"
- "mov x20, #0x0\n"
- "ldr s17, [x24, #0x0]\n"
+ "ldr s9, [x28, #0x0]\n"
+ "mov x19, #0x0\n"
+ "ldr s12, [x24, #0x0]\n"
+ "ldr s17, [x23, #0x0]\n"
"85:" // Height 3: Partial accumulate: Done
- "sub x9, x9, x20\n"
+ "sub x28, x28, x19\n"
"b 87f\n"
"86:" // Height 3: full accumulate
- "ldr q9, [x9, #0x0]\n"
- "ldr q10, [x9, #0x10]\n"
- "ldr q11, [x9, #0x20]\n"
- "ldr q16, [x9, #0x30]\n"
- "ldr q12, [x25, #0x0]\n"
- "ldr q13, [x25, #0x10]\n"
- "ldr q14, [x25, #0x20]\n"
- "ldr q15, [x25, #0x30]\n"
- "ldr q17, [x24, #0x0]\n"
- "ldr q18, [x24, #0x10]\n"
- "ldr q19, [x24, #0x20]\n"
- "ldr q24, [x24, #0x30]\n"
+ "ldr q9, [x28, #0x0]\n"
+ "ldr q10, [x28, #0x10]\n"
+ "ldr q11, [x28, #0x20]\n"
+ "ldr q16, [x28, #0x30]\n"
+ "ldr q12, [x24, #0x0]\n"
+ "ldr q13, [x24, #0x10]\n"
+ "ldr q14, [x24, #0x20]\n"
+ "ldr q15, [x24, #0x30]\n"
+ "ldr q17, [x23, #0x0]\n"
+ "ldr q18, [x23, #0x10]\n"
+ "ldr q19, [x23, #0x20]\n"
+ "ldr q24, [x23, #0x30]\n"
"87:" // Height 3: MMLA fixup
"zip1 v8.2d, v9.2d, v12.2d\n"
"zip2 v12.2d, v9.2d, v12.2d\n"
@@ -1032,85 +1044,85 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
"movi v22.16b, #0x0\n"
"movi v23.16b, #0x0\n"
"89:" // Height 3: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"90:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 91f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "cbnz x28, 92f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #1\n"
- "add x25, x25, x20, LSL #1\n"
- "add x24, x24, x20, LSL #1\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "cbnz x27, 92f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
+ "add x24, x24, x19, LSL #1\n"
+ "add x23, x23, x19, LSL #1\n"
"b 92f\n"
"91:" // Height 3: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
"92:" // Height 3: input setup done
- "cmp x27, #0x8\n"
+ "cmp x26, #0x8\n"
"blt 95f\n"
- "ldr q1, [x26, #0x0]\n"
- "ldr q2, [x25, #0x0]\n"
- "cmp x27, #0x10\n"
- "ldr q3, [x24, #0x0]\n"
- "ldr q7, [x10, #0x0]\n"
- "ldr q6, [x10, #0x10]\n"
+ "ldr q1, [x25, #0x0]\n"
+ "cmp x26, #0x10\n"
"blt 94f\n"
"93:" // Height 3: Multiply loop: Main loop head
+ "movi v4.16b, #0x0\n"
+ "ldr q2, [x24, #0x0]\n"
+ "add x25, x25, #0x10\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
+ "ldr q3, [x23, #0x0]\n"
+ "add x24, x24, #0x10\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
- ".inst 0x6e47ec08 // bfmmla v8.4s, v0.8h, v7.8h\n"
+ "ldr q7, [x10, #0x0]\n"
+ "add x23, x23, #0x10\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
+ "ldr q6, [x10, #0x10]\n"
+ "sub x26, x26, #0x8\n"
+ "trn2 v3.2d, v3.2d, v4.2d\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "cmp x26, #0x10\n"
+ ".inst 0x6e47ec08 // bfmmla v8.4s, v0.8h, v7.8h\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x6e47ec50 // bfmmla v16.4s, v2.8h, v7.8h\n"
"ldr q7, [x10, #0x20]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x6e46ec0c // bfmmla v12.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec54 // bfmmla v20.4s, v2.8h, v6.8h\n"
"ldr q6, [x10, #0x30]\n"
".inst 0x6e47ec09 // bfmmla v9.4s, v0.8h, v7.8h\n"
- "trn2 v3.2d, v3.2d, v4.2d\n"
".inst 0x6e47ec51 // bfmmla v17.4s, v2.8h, v7.8h\n"
"ldr q7, [x10, #0x40]\n"
".inst 0x6e46ec0d // bfmmla v13.4s, v0.8h, v6.8h\n"
- "sub x27, x27, #0x8\n"
".inst 0x6e46ec55 // bfmmla v21.4s, v2.8h, v6.8h\n"
"ldr q6, [x10, #0x50]\n"
".inst 0x6e47ec0a // bfmmla v10.4s, v0.8h, v7.8h\n"
- "add x26, x26, #0x10\n"
".inst 0x6e47ec52 // bfmmla v18.4s, v2.8h, v7.8h\n"
"ldr q7, [x10, #0x60]\n"
".inst 0x6e46ec0e // bfmmla v14.4s, v0.8h, v6.8h\n"
- "add x25, x25, #0x10\n"
".inst 0x6e46ec56 // bfmmla v22.4s, v2.8h, v6.8h\n"
"ldr q6, [x10, #0x70]\n"
".inst 0x6e47ec0b // bfmmla v11.4s, v0.8h, v7.8h\n"
- "add x24, x24, #0x10\n"
".inst 0x6e47ec53 // bfmmla v19.4s, v2.8h, v7.8h\n"
"ldr q7, [x10, #0x80]\n"
".inst 0x6e46ec0f // bfmmla v15.4s, v0.8h, v6.8h\n"
- "cmp x27, #0x10\n"
".inst 0x6e46ec57 // bfmmla v23.4s, v2.8h, v6.8h\n"
"ldr q6, [x10, #0x90]\n"
- "ldr q2, [x25, #0x0]\n"
".inst 0x6e47ec28 // bfmmla v8.4s, v1.8h, v7.8h\n"
".inst 0x6e47ec70 // bfmmla v16.4s, v3.8h, v7.8h\n"
"ldr q7, [x10, #0xa0]\n"
".inst 0x6e46ec2c // bfmmla v12.4s, v1.8h, v6.8h\n"
- "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x6e46ec74 // bfmmla v20.4s, v3.8h, v6.8h\n"
"ldr q6, [x10, #0xb0]\n"
".inst 0x6e47ec29 // bfmmla v9.4s, v1.8h, v7.8h\n"
- "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x6e47ec71 // bfmmla v17.4s, v3.8h, v7.8h\n"
"ldr q7, [x10, #0xc0]\n"
".inst 0x6e46ec2d // bfmmla v13.4s, v1.8h, v6.8h\n"
- "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x6e46ec75 // bfmmla v21.4s, v3.8h, v6.8h\n"
"ldr q6, [x10, #0xd0]\n"
".inst 0x6e47ec2a // bfmmla v10.4s, v1.8h, v7.8h\n"
@@ -1122,53 +1134,55 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
"add x10, x10, #0x100\n"
".inst 0x6e47ec2b // bfmmla v11.4s, v1.8h, v7.8h\n"
".inst 0x6e47ec73 // bfmmla v19.4s, v3.8h, v7.8h\n"
- "ldr q7, [x10, #0x0]\n"
".inst 0x6e46ec2f // bfmmla v15.4s, v1.8h, v6.8h\n"
- "ldr q1, [x26, #0x0]\n"
+ "ldr q1, [x25, #0x0]\n"
".inst 0x6e46ec77 // bfmmla v23.4s, v3.8h, v6.8h\n"
- "ldr q3, [x24, #0x0]\n"
- "ldr q6, [x10, #0x10]\n"
"bge 93b\n"
"94:" // Height 3: Multiply loop: Single iteration only
+ "movi v4.16b, #0x0\n"
+ "ldr q2, [x24, #0x0]\n"
+ "sub x26, x26, #0x8\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
+ "ldr q3, [x23, #0x0]\n"
+ "add x25, x25, #0x10\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
- ".inst 0x6e47ec08 // bfmmla v8.4s, v0.8h, v7.8h\n"
+ "ldr q7, [x10, #0x0]\n"
+ "add x24, x24, #0x10\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
+ "ldr q6, [x10, #0x10]\n"
+ "add x23, x23, #0x10\n"
+ "trn2 v3.2d, v3.2d, v4.2d\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ ".inst 0x6e47ec08 // bfmmla v8.4s, v0.8h, v7.8h\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x6e47ec50 // bfmmla v16.4s, v2.8h, v7.8h\n"
"ldr q7, [x10, #0x20]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x6e46ec0c // bfmmla v12.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec54 // bfmmla v20.4s, v2.8h, v6.8h\n"
"ldr q6, [x10, #0x30]\n"
".inst 0x6e47ec09 // bfmmla v9.4s, v0.8h, v7.8h\n"
- "trn2 v3.2d, v3.2d, v4.2d\n"
".inst 0x6e47ec51 // bfmmla v17.4s, v2.8h, v7.8h\n"
"ldr q7, [x10, #0x40]\n"
".inst 0x6e46ec0d // bfmmla v13.4s, v0.8h, v6.8h\n"
- "add x26, x26, #0x10\n"
".inst 0x6e46ec55 // bfmmla v21.4s, v2.8h, v6.8h\n"
"ldr q6, [x10, #0x50]\n"
".inst 0x6e47ec0a // bfmmla v10.4s, v0.8h, v7.8h\n"
- "add x25, x25, #0x10\n"
".inst 0x6e47ec52 // bfmmla v18.4s, v2.8h, v7.8h\n"
"ldr q7, [x10, #0x60]\n"
".inst 0x6e46ec0e // bfmmla v14.4s, v0.8h, v6.8h\n"
- "add x24, x24, #0x10\n"
".inst 0x6e46ec56 // bfmmla v22.4s, v2.8h, v6.8h\n"
"ldr q6, [x10, #0x70]\n"
".inst 0x6e47ec0b // bfmmla v11.4s, v0.8h, v7.8h\n"
- "sub x27, x27, #0x8\n"
".inst 0x6e47ec53 // bfmmla v19.4s, v2.8h, v7.8h\n"
"ldr q7, [x10, #0x80]\n"
".inst 0x6e46ec0f // bfmmla v15.4s, v0.8h, v6.8h\n"
- "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x6e46ec57 // bfmmla v23.4s, v2.8h, v6.8h\n"
"ldr q6, [x10, #0x90]\n"
".inst 0x6e47ec28 // bfmmla v8.4s, v1.8h, v7.8h\n"
- "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x6e47ec70 // bfmmla v16.4s, v3.8h, v7.8h\n"
"ldr q7, [x10, #0xa0]\n"
".inst 0x6e46ec2c // bfmmla v12.4s, v1.8h, v6.8h\n"
- "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x6e46ec74 // bfmmla v20.4s, v3.8h, v6.8h\n"
"ldr q6, [x10, #0xb0]\n"
".inst 0x6e47ec29 // bfmmla v9.4s, v1.8h, v7.8h\n"
@@ -1189,29 +1203,30 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
".inst 0x6e46ec2f // bfmmla v15.4s, v1.8h, v6.8h\n"
".inst 0x6e46ec77 // bfmmla v23.4s, v3.8h, v6.8h\n"
"95:" // Height 3: Multiply loop: Main loop skip
- "cbz x27, 100f\n"
- "cmp x27, #0x4\n"
+ "cbz x26, 100f\n"
+ "cmp x26, #0x4\n"
"blt 97f\n"
"96:" // Height 3: Multiply loop: Odd block loop
- "ldr d1, [x26], #0x8\n"
- "ldr d2, [x25], #0x8\n"
+ "movi v4.16b, #0x0\n"
+ "ldr d1, [x25], #0x8\n"
+ "sub x26, x26, #0x4\n"
+ "ldr d2, [x24], #0x8\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
- "ldr d3, [x24], #0x8\n"
- "ldr q6, [x10, #0x0]\n"
+ "ldr d3, [x23], #0x8\n"
+ "cmp x26, #0x4\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
- ".inst 0x6e46ec08 // bfmmla v8.4s, v0.8h, v6.8h\n"
+ "ldr q6, [x10, #0x0]\n"
"ldr q7, [x10, #0x10]\n"
+ ".inst 0x6e46ec08 // bfmmla v8.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec50 // bfmmla v16.4s, v2.8h, v6.8h\n"
"ldr q6, [x10, #0x20]\n"
".inst 0x6e47ec0c // bfmmla v12.4s, v0.8h, v7.8h\n"
".inst 0x6e47ec54 // bfmmla v20.4s, v2.8h, v7.8h\n"
"ldr q7, [x10, #0x30]\n"
".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n"
- "sub x27, x27, #0x4\n"
".inst 0x6e46ec51 // bfmmla v17.4s, v2.8h, v6.8h\n"
"ldr q6, [x10, #0x40]\n"
".inst 0x6e47ec0d // bfmmla v13.4s, v0.8h, v7.8h\n"
- "cmp x27, #0x4\n"
".inst 0x6e47ec55 // bfmmla v21.4s, v2.8h, v7.8h\n"
"ldr q7, [x10, #0x50]\n"
".inst 0x6e46ec0a // bfmmla v10.4s, v0.8h, v6.8h\n"
@@ -1220,31 +1235,32 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
".inst 0x6e47ec0e // bfmmla v14.4s, v0.8h, v7.8h\n"
".inst 0x6e47ec56 // bfmmla v22.4s, v2.8h, v7.8h\n"
"ldr q7, [x10, #0x70]\n"
- ".inst 0x6e46ec0b // bfmmla v11.4s, v0.8h, v6.8h\n"
"add x10, x10, #0x80\n"
+ ".inst 0x6e46ec0b // bfmmla v11.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec53 // bfmmla v19.4s, v2.8h, v6.8h\n"
".inst 0x6e47ec0f // bfmmla v15.4s, v0.8h, v7.8h\n"
".inst 0x6e47ec57 // bfmmla v23.4s, v2.8h, v7.8h\n"
"bge 96b\n"
+ "cbz x26, 100f\n"
"97:" // Height 3: Multiply loop: Skip odd blocks
- "cbz x27, 100f\n"
- "tbz x27, #1, 98f\n"
- "ldr s1, [x26], #0x4\n"
- "ldr s2, [x25], #0x4\n"
- "ldr s3, [x24], #0x4\n"
- "tbz x27, #0, 99f\n"
- "ld1 { v1.h }[2], [x26]\n"
- "ld1 { v2.h }[2], [x25]\n"
- "ld1 { v3.h }[2], [x24]\n"
+ "tbz x26, #1, 98f\n"
+ "ldr s1, [x25], #0x4\n"
+ "ldr s2, [x24], #0x4\n"
+ "ldr s3, [x23], #0x4\n"
+ "tbz x26, #0, 99f\n"
+ "ld1 { v1.h }[2], [x25]\n"
+ "ld1 { v2.h }[2], [x24]\n"
+ "ld1 { v3.h }[2], [x23]\n"
"b 99f\n"
"98:" // Height 3: Multiply loop: Ragged operand read: partial_1_0
- "ldr h1, [x26, #0x0]\n"
- "ldr h2, [x25, #0x0]\n"
- "ldr h3, [x24, #0x0]\n"
+ "ldr h1, [x25, #0x0]\n"
+ "ldr h2, [x24, #0x0]\n"
+ "ldr h3, [x23, #0x0]\n"
"99:" // Height 3: Multiply loop: Ragged operand read: Done
+ "movi v4.16b, #0x0\n"
"ldr q7, [x10, #0x0]\n"
- "ldr q6, [x10, #0x10]\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
+ "ldr q6, [x10, #0x10]\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
".inst 0x6e47ec08 // bfmmla v8.4s, v0.8h, v7.8h\n"
".inst 0x6e47ec50 // bfmmla v16.4s, v2.8h, v7.8h\n"
@@ -1270,21 +1286,60 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
".inst 0x6e46ec0f // bfmmla v15.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec57 // bfmmla v23.4s, v2.8h, v6.8h\n"
"100:" // Height 3: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 90b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "prfm pstl1keep, [x28, #0x0]\n"
+ "add x24, x28, x19, LSL #2\n"
+ "prfm pstl1keep, [x24, #0x0]\n"
+ "add x23, x24, x19, LSL #2\n"
+ "prfm pstl1keep, [x23, #0x0]\n"
+ "tbz %x[flags], #1, 101f\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v1.4s }, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v0.4s }, [x19]\n"
+ "fmin v8.4s, v8.4s, v0.4s\n"
+ "fmin v9.4s, v9.4s, v0.4s\n"
+ "fmin v10.4s, v10.4s, v0.4s\n"
+ "fmin v11.4s, v11.4s, v0.4s\n"
+ "fmax v8.4s, v8.4s, v1.4s\n"
+ "fmax v9.4s, v9.4s, v1.4s\n"
+ "fmax v10.4s, v10.4s, v1.4s\n"
+ "fmax v11.4s, v11.4s, v1.4s\n"
+ "fmin v12.4s, v12.4s, v0.4s\n"
+ "fmin v13.4s, v13.4s, v0.4s\n"
+ "fmin v14.4s, v14.4s, v0.4s\n"
+ "fmax v12.4s, v12.4s, v1.4s\n"
+ "fmax v13.4s, v13.4s, v1.4s\n"
+ "fmax v14.4s, v14.4s, v1.4s\n"
+ "fmin v15.4s, v15.4s, v0.4s\n"
+ "fmin v16.4s, v16.4s, v0.4s\n"
+ "fmin v17.4s, v17.4s, v0.4s\n"
+ "fmax v15.4s, v15.4s, v1.4s\n"
+ "fmax v16.4s, v16.4s, v1.4s\n"
+ "fmax v17.4s, v17.4s, v1.4s\n"
+ "fmin v18.4s, v18.4s, v0.4s\n"
+ "fmin v19.4s, v19.4s, v0.4s\n"
+ "fmin v20.4s, v20.4s, v0.4s\n"
+ "fmax v18.4s, v18.4s, v1.4s\n"
+ "fmax v19.4s, v19.4s, v1.4s\n"
+ "fmax v20.4s, v20.4s, v1.4s\n"
+ "fmin v21.4s, v21.4s, v0.4s\n"
+ "fmin v22.4s, v22.4s, v0.4s\n"
+ "fmin v23.4s, v23.4s, v0.4s\n"
+ "fmax v21.4s, v21.4s, v1.4s\n"
+ "fmax v22.4s, v22.4s, v1.4s\n"
+ "fmax v23.4s, v23.4s, v1.4s\n"
+ "101:" // Height 3: No activation
"uzp1 v7.2d, v8.2d, v12.2d\n"
+ "cmp x11, #0x10\n"
"uzp2 v8.2d, v8.2d, v12.2d\n"
"uzp1 v12.2d, v9.2d, v13.2d\n"
- "prfm pstl1keep, [x9, #0x0]\n"
- "prfm pstl1keep, [x25, #0x0]\n"
"uzp2 v9.2d, v9.2d, v13.2d\n"
"uzp1 v13.2d, v10.2d, v14.2d\n"
- "prfm pstl1keep, [x24, #0x0]\n"
"uzp2 v10.2d, v10.2d, v14.2d\n"
"uzp1 v14.2d, v11.2d, v15.2d\n"
"uzp2 v11.2d, v11.2d, v15.2d\n"
@@ -1292,156 +1347,125 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
"uzp1 v17.2d, v17.2d, v21.2d\n"
"uzp1 v18.2d, v18.2d, v22.2d\n"
"uzp1 v19.2d, v19.2d, v23.2d\n"
- "tbz %x[flags], #1, 101f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v1.4s }, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1r { v0.4s }, [x20]\n"
- "fmin v7.4s, v7.4s, v1.4s\n"
- "fmin v12.4s, v12.4s, v1.4s\n"
- "fmin v13.4s, v13.4s, v1.4s\n"
- "fmin v14.4s, v14.4s, v1.4s\n"
- "fmin v8.4s, v8.4s, v1.4s\n"
- "fmin v9.4s, v9.4s, v1.4s\n"
- "fmin v10.4s, v10.4s, v1.4s\n"
- "fmin v11.4s, v11.4s, v1.4s\n"
- "fmin v16.4s, v16.4s, v1.4s\n"
- "fmin v17.4s, v17.4s, v1.4s\n"
- "fmin v18.4s, v18.4s, v1.4s\n"
- "fmin v19.4s, v19.4s, v1.4s\n"
- "fmax v7.4s, v7.4s, v0.4s\n"
- "fmax v12.4s, v12.4s, v0.4s\n"
- "fmax v13.4s, v13.4s, v0.4s\n"
- "fmax v14.4s, v14.4s, v0.4s\n"
- "fmax v8.4s, v8.4s, v0.4s\n"
- "fmax v9.4s, v9.4s, v0.4s\n"
- "fmax v10.4s, v10.4s, v0.4s\n"
- "fmax v11.4s, v11.4s, v0.4s\n"
- "fmax v16.4s, v16.4s, v0.4s\n"
- "fmax v17.4s, v17.4s, v0.4s\n"
- "fmax v18.4s, v18.4s, v0.4s\n"
- "fmax v19.4s, v19.4s, v0.4s\n"
- "101:" // Height 3: No activation
- "cmp x11, #0x10\n"
"bge 110f\n"
"tbz x11, #3, 105f\n"
- "st1 { v7.4s }, [x9], #0x10\n"
- "st1 { v12.4s }, [x9], #0x10\n"
- "st1 { v8.4s }, [x25], #0x10\n"
- "st1 { v9.4s }, [x25], #0x10\n"
- "st1 { v16.4s }, [x24], #0x10\n"
- "st1 { v17.4s }, [x24], #0x10\n"
+ "st1 { v7.4s }, [x28], #0x10\n"
+ "st1 { v12.4s }, [x28], #0x10\n"
+ "st1 { v8.4s }, [x24], #0x10\n"
+ "st1 { v9.4s }, [x24], #0x10\n"
+ "st1 { v16.4s }, [x23], #0x10\n"
+ "st1 { v17.4s }, [x23], #0x10\n"
"tbz x11, #2, 103f\n"
- "st1 { v13.4s }, [x9], #0x10\n"
- "st1 { v10.4s }, [x25], #0x10\n"
- "st1 { v18.4s }, [x24], #0x10\n"
+ "st1 { v13.4s }, [x28], #0x10\n"
+ "st1 { v10.4s }, [x24], #0x10\n"
+ "st1 { v18.4s }, [x23], #0x10\n"
"tbz x11, #1, 102f\n"
- "str d14, [x9], #0x8\n"
- "str d11, [x25], #0x8\n"
- "str d19, [x24], #0x8\n"
+ "str d14, [x28], #0x8\n"
+ "str d11, [x24], #0x8\n"
+ "str d19, [x23], #0x8\n"
"tbz x11, #0, 109f\n"
- "st1 { v14.s }[2], [x9]\n"
- "st1 { v11.s }[2], [x25]\n"
- "st1 { v19.s }[2], [x24]\n"
+ "st1 { v14.s }[2], [x28]\n"
+ "st1 { v11.s }[2], [x24]\n"
+ "st1 { v19.s }[2], [x23]\n"
"b 109f\n"
"102:" // Height 3: Partial direct writeback: partial_1_12
"tbz x11, #0, 109f\n"
- "str s14, [x9, #0x0]\n"
- "str s11, [x25, #0x0]\n"
- "str s19, [x24, #0x0]\n"
+ "str s14, [x28, #0x0]\n"
+ "str s11, [x24, #0x0]\n"
+ "str s19, [x23, #0x0]\n"
"b 109f\n"
"103:" // Height 3: Partial direct writeback: partial_2_8
"tbz x11, #1, 104f\n"
- "str d13, [x9], #0x8\n"
- "str d10, [x25], #0x8\n"
- "str d18, [x24], #0x8\n"
+ "str d13, [x28], #0x8\n"
+ "str d10, [x24], #0x8\n"
+ "str d18, [x23], #0x8\n"
"tbz x11, #0, 109f\n"
- "st1 { v13.s }[2], [x9]\n"
- "st1 { v10.s }[2], [x25]\n"
- "st1 { v18.s }[2], [x24]\n"
+ "st1 { v13.s }[2], [x28]\n"
+ "st1 { v10.s }[2], [x24]\n"
+ "st1 { v18.s }[2], [x23]\n"
"b 109f\n"
"104:" // Height 3: Partial direct writeback: partial_1_8
"tbz x11, #0, 109f\n"
- "str s13, [x9, #0x0]\n"
- "str s10, [x25, #0x0]\n"
- "str s18, [x24, #0x0]\n"
+ "str s13, [x28, #0x0]\n"
+ "str s10, [x24, #0x0]\n"
+ "str s18, [x23, #0x0]\n"
"b 109f\n"
"105:" // Height 3: Partial direct writeback: partial_4_0
"tbz x11, #2, 107f\n"
- "st1 { v7.4s }, [x9], #0x10\n"
- "st1 { v8.4s }, [x25], #0x10\n"
- "st1 { v16.4s }, [x24], #0x10\n"
+ "st1 { v7.4s }, [x28], #0x10\n"
+ "st1 { v8.4s }, [x24], #0x10\n"
+ "st1 { v16.4s }, [x23], #0x10\n"
"tbz x11, #1, 106f\n"
- "str d12, [x9], #0x8\n"
- "str d9, [x25], #0x8\n"
- "str d17, [x24], #0x8\n"
+ "str d12, [x28], #0x8\n"
+ "str d9, [x24], #0x8\n"
+ "str d17, [x23], #0x8\n"
"tbz x11, #0, 109f\n"
- "st1 { v12.s }[2], [x9]\n"
- "st1 { v9.s }[2], [x25]\n"
- "st1 { v17.s }[2], [x24]\n"
+ "st1 { v12.s }[2], [x28]\n"
+ "st1 { v9.s }[2], [x24]\n"
+ "st1 { v17.s }[2], [x23]\n"
"b 109f\n"
"106:" // Height 3: Partial direct writeback: partial_1_4
"tbz x11, #0, 109f\n"
- "str s12, [x9, #0x0]\n"
- "str s9, [x25, #0x0]\n"
- "str s17, [x24, #0x0]\n"
+ "str s12, [x28, #0x0]\n"
+ "str s9, [x24, #0x0]\n"
+ "str s17, [x23, #0x0]\n"
"b 109f\n"
"107:" // Height 3: Partial direct writeback: partial_2_0
"tbz x11, #1, 108f\n"
- "str d7, [x9], #0x8\n"
- "str d8, [x25], #0x8\n"
- "str d16, [x24], #0x8\n"
+ "str d7, [x28], #0x8\n"
+ "str d8, [x24], #0x8\n"
+ "str d16, [x23], #0x8\n"
"tbz x11, #0, 109f\n"
- "st1 { v7.s }[2], [x9]\n"
- "st1 { v8.s }[2], [x25]\n"
- "st1 { v16.s }[2], [x24]\n"
+ "st1 { v7.s }[2], [x28]\n"
+ "st1 { v8.s }[2], [x24]\n"
+ "st1 { v16.s }[2], [x23]\n"
"b 109f\n"
"108:" // Height 3: Partial direct writeback: partial_1_0
- "str s7, [x9, #0x0]\n"
- "str s8, [x25, #0x0]\n"
- "str s16, [x24, #0x0]\n"
+ "str s7, [x28, #0x0]\n"
+ "str s8, [x24, #0x0]\n"
+ "str s16, [x23, #0x0]\n"
"109:" // Height 3: Partial direct writeback: Done
"b 111f\n"
"110:" // Height 3: Full writeback
- "str q7, [x9, #0x0]\n"
- "str q12, [x9, #0x10]\n"
- "str q13, [x9, #0x20]\n"
- "str q14, [x9, #0x30]\n"
- "add x9, x9, #0x40\n"
- "str q8, [x25, #0x0]\n"
- "str q9, [x25, #0x10]\n"
- "str q10, [x25, #0x20]\n"
- "str q11, [x25, #0x30]\n"
- "str q16, [x24, #0x0]\n"
- "str q17, [x24, #0x10]\n"
- "str q18, [x24, #0x20]\n"
- "str q19, [x24, #0x30]\n"
+ "str q7, [x28, #0x0]\n"
+ "str q12, [x28, #0x10]\n"
+ "str q13, [x28, #0x20]\n"
+ "str q14, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
+ "str q8, [x24, #0x0]\n"
+ "str q9, [x24, #0x10]\n"
+ "str q10, [x24, #0x20]\n"
+ "str q11, [x24, #0x30]\n"
+ "str q16, [x23, #0x0]\n"
+ "str q17, [x23, #0x10]\n"
+ "str q18, [x23, #0x20]\n"
+ "str q19, [x23, #0x30]\n"
"111:" // Height 3: Writeback done
"subs x11, x11, #0x10\n"
"bgt 76b\n"
"b 224f\n"
"112:" // Height 4
- "mov x12, %x[bias]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x9, %x[bias]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "mov x28, %x[output_ptr]\n"
"113:" // Height 4: Column loop
- "cbz x12, 114f\n"
- "ldr q8, [x12, #0x0]\n"
- "ldr q9, [x12, #0x10]\n"
+ "cbz x9, 114f\n"
+ "ldr q8, [x9, #0x0]\n"
"zip2 v12.2d, v8.2d, v8.2d\n"
+ "ldr q9, [x9, #0x10]\n"
"zip1 v8.2d, v8.2d, v8.2d\n"
- "ldr q10, [x12, #0x20]\n"
- "ldr q11, [x12, #0x30]\n"
+ "ldr q10, [x9, #0x20]\n"
+ "mov v16.16b, v8.16b\n"
+ "ldr q11, [x9, #0x30]\n"
+ "add x9, x9, #0x40\n"
+ "mov v20.16b, v12.16b\n"
"zip2 v13.2d, v9.2d, v9.2d\n"
"zip1 v9.2d, v9.2d, v9.2d\n"
"zip2 v14.2d, v10.2d, v10.2d\n"
"zip1 v10.2d, v10.2d, v10.2d\n"
- "add x12, x12, #0x40\n"
"zip2 v15.2d, v11.2d, v11.2d\n"
"zip1 v11.2d, v11.2d, v11.2d\n"
- "mov v16.16b, v8.16b\n"
- "mov v20.16b, v12.16b\n"
"mov v17.16b, v9.16b\n"
"mov v21.16b, v13.16b\n"
"mov v18.16b, v10.16b\n"
@@ -1451,132 +1475,132 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
"b 126f\n"
"114:" // Height 4: no bias
"tbz %x[flags], #0, 125f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"cmp x11, #0x10\n"
- "add x23, x24, x20, LSL #2\n"
+ "add x24, x28, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
"bge 123f\n"
"tbz x11, #3, 118f\n"
- "ld1 { v9.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v17.4s }, [x24], #0x10\n"
- "ld1 { v20.4s }, [x23], #0x10\n"
- "ld1 { v10.4s }, [x9], #0x10\n"
- "ld1 { v13.4s }, [x25], #0x10\n"
- "ld1 { v18.4s }, [x24], #0x10\n"
- "ld1 { v21.4s }, [x23], #0x10\n"
+ "ld1 { v9.4s }, [x28], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
+ "ld1 { v17.4s }, [x23], #0x10\n"
+ "ld1 { v20.4s }, [x22], #0x10\n"
+ "ld1 { v10.4s }, [x28], #0x10\n"
+ "ld1 { v13.4s }, [x24], #0x10\n"
+ "ld1 { v18.4s }, [x23], #0x10\n"
+ "ld1 { v21.4s }, [x22], #0x10\n"
"tbz x11, #2, 116f\n"
- "ld1 { v11.4s }, [x9], #0x10\n"
- "ld1 { v14.4s }, [x25], #0x10\n"
- "ld1 { v19.4s }, [x24], #0x10\n"
- "ld1 { v22.4s }, [x23], #0x10\n"
+ "ld1 { v11.4s }, [x28], #0x10\n"
+ "ld1 { v14.4s }, [x24], #0x10\n"
+ "ld1 { v19.4s }, [x23], #0x10\n"
+ "ld1 { v22.4s }, [x22], #0x10\n"
"tbz x11, #1, 115f\n"
- "ldr d16, [x9], #0x8\n"
- "ldr d15, [x25], #0x8\n"
- "mov x20, #0x38\n"
- "ldr d24, [x24], #0x8\n"
- "ldr d23, [x23], #0x8\n"
+ "mov x19, #0x38\n"
+ "ldr d16, [x28], #0x8\n"
+ "ldr d15, [x24], #0x8\n"
+ "ldr d24, [x23], #0x8\n"
+ "ldr d23, [x22], #0x8\n"
"tbz x11, #0, 122f\n"
- "ld1 { v16.s }[2], [x9]\n"
- "ld1 { v15.s }[2], [x25]\n"
- "ld1 { v24.s }[2], [x24]\n"
- "ld1 { v23.s }[2], [x23]\n"
+ "ld1 { v16.s }[2], [x28]\n"
+ "ld1 { v15.s }[2], [x24]\n"
+ "ld1 { v24.s }[2], [x23]\n"
+ "ld1 { v23.s }[2], [x22]\n"
"b 122f\n"
"115:" // Height 4: Partial accumulate: partial_1_12
- "mov x20, #0x30\n"
+ "mov x19, #0x30\n"
"tbz x11, #0, 122f\n"
- "ldr s16, [x9, #0x0]\n"
- "ldr s15, [x25, #0x0]\n"
- "ldr s24, [x24, #0x0]\n"
- "ldr s23, [x23, #0x0]\n"
+ "ldr s16, [x28, #0x0]\n"
+ "ldr s15, [x24, #0x0]\n"
+ "ldr s24, [x23, #0x0]\n"
+ "ldr s23, [x22, #0x0]\n"
"b 122f\n"
"116:" // Height 4: Partial accumulate: partial_2_8
"tbz x11, #1, 117f\n"
- "ldr d11, [x9], #0x8\n"
- "ldr d14, [x25], #0x8\n"
- "mov x20, #0x28\n"
- "ldr d19, [x24], #0x8\n"
- "ldr d22, [x23], #0x8\n"
+ "ldr d11, [x28], #0x8\n"
+ "ldr d14, [x24], #0x8\n"
+ "mov x19, #0x28\n"
+ "ldr d19, [x23], #0x8\n"
+ "ldr d22, [x22], #0x8\n"
"tbz x11, #0, 122f\n"
- "ld1 { v11.s }[2], [x9]\n"
- "ld1 { v14.s }[2], [x25]\n"
- "ld1 { v19.s }[2], [x24]\n"
- "ld1 { v22.s }[2], [x23]\n"
+ "ld1 { v11.s }[2], [x28]\n"
+ "ld1 { v14.s }[2], [x24]\n"
+ "ld1 { v19.s }[2], [x23]\n"
+ "ld1 { v22.s }[2], [x22]\n"
"b 122f\n"
"117:" // Height 4: Partial accumulate: partial_1_8
- "mov x20, #0x20\n"
+ "mov x19, #0x20\n"
"tbz x11, #0, 122f\n"
- "ldr s11, [x9, #0x0]\n"
- "ldr s14, [x25, #0x0]\n"
- "ldr s19, [x24, #0x0]\n"
- "ldr s22, [x23, #0x0]\n"
+ "ldr s11, [x28, #0x0]\n"
+ "ldr s14, [x24, #0x0]\n"
+ "ldr s19, [x23, #0x0]\n"
+ "ldr s22, [x22, #0x0]\n"
"b 122f\n"
"118:" // Height 4: Partial accumulate: partial_4_0
"tbz x11, #2, 120f\n"
- "ld1 { v9.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v17.4s }, [x24], #0x10\n"
- "ld1 { v20.4s }, [x23], #0x10\n"
+ "ld1 { v9.4s }, [x28], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
+ "ld1 { v17.4s }, [x23], #0x10\n"
+ "ld1 { v20.4s }, [x22], #0x10\n"
"tbz x11, #1, 119f\n"
- "ldr d10, [x9], #0x8\n"
- "ldr d13, [x25], #0x8\n"
- "mov x20, #0x18\n"
- "ldr d18, [x24], #0x8\n"
- "ldr d21, [x23], #0x8\n"
+ "mov x19, #0x18\n"
+ "ldr d10, [x28], #0x8\n"
+ "ldr d13, [x24], #0x8\n"
+ "ldr d18, [x23], #0x8\n"
+ "ldr d21, [x22], #0x8\n"
"tbz x11, #0, 122f\n"
- "ld1 { v10.s }[2], [x9]\n"
- "ld1 { v13.s }[2], [x25]\n"
- "ld1 { v18.s }[2], [x24]\n"
- "ld1 { v21.s }[2], [x23]\n"
+ "ld1 { v10.s }[2], [x28]\n"
+ "ld1 { v13.s }[2], [x24]\n"
+ "ld1 { v18.s }[2], [x23]\n"
+ "ld1 { v21.s }[2], [x22]\n"
"b 122f\n"
"119:" // Height 4: Partial accumulate: partial_1_4
- "mov x20, #0x10\n"
+ "mov x19, #0x10\n"
"tbz x11, #0, 122f\n"
- "ldr s10, [x9, #0x0]\n"
- "ldr s13, [x25, #0x0]\n"
- "ldr s18, [x24, #0x0]\n"
- "ldr s21, [x23, #0x0]\n"
+ "ldr s10, [x28, #0x0]\n"
+ "ldr s13, [x24, #0x0]\n"
+ "ldr s18, [x23, #0x0]\n"
+ "ldr s21, [x22, #0x0]\n"
"b 122f\n"
"120:" // Height 4: Partial accumulate: partial_2_0
"tbz x11, #1, 121f\n"
- "ldr d9, [x9], #0x8\n"
- "ldr d12, [x25], #0x8\n"
- "mov x20, #0x8\n"
- "ldr d17, [x24], #0x8\n"
- "ldr d20, [x23], #0x8\n"
+ "ldr d9, [x28], #0x8\n"
+ "ldr d12, [x24], #0x8\n"
+ "mov x19, #0x8\n"
+ "ldr d17, [x23], #0x8\n"
+ "ldr d20, [x22], #0x8\n"
"tbz x11, #0, 122f\n"
- "ld1 { v9.s }[2], [x9]\n"
- "ld1 { v12.s }[2], [x25]\n"
- "ld1 { v17.s }[2], [x24]\n"
- "ld1 { v20.s }[2], [x23]\n"
+ "ld1 { v9.s }[2], [x28]\n"
+ "ld1 { v12.s }[2], [x24]\n"
+ "ld1 { v17.s }[2], [x23]\n"
+ "ld1 { v20.s }[2], [x22]\n"
"b 122f\n"
"121:" // Height 4: Partial accumulate: partial_1_0
- "ldr s9, [x9, #0x0]\n"
- "ldr s12, [x25, #0x0]\n"
- "mov x20, #0x0\n"
- "ldr s17, [x24, #0x0]\n"
- "ldr s20, [x23, #0x0]\n"
+ "ldr s9, [x28, #0x0]\n"
+ "mov x19, #0x0\n"
+ "ldr s12, [x24, #0x0]\n"
+ "ldr s17, [x23, #0x0]\n"
+ "ldr s20, [x22, #0x0]\n"
"122:" // Height 4: Partial accumulate: Done
- "sub x9, x9, x20\n"
+ "sub x28, x28, x19\n"
"b 124f\n"
"123:" // Height 4: full accumulate
- "ldr q9, [x9, #0x0]\n"
- "ldr q10, [x9, #0x10]\n"
- "ldr q11, [x9, #0x20]\n"
- "ldr q16, [x9, #0x30]\n"
- "ldr q12, [x25, #0x0]\n"
- "ldr q13, [x25, #0x10]\n"
- "ldr q14, [x25, #0x20]\n"
- "ldr q15, [x25, #0x30]\n"
- "ldr q17, [x24, #0x0]\n"
- "ldr q18, [x24, #0x10]\n"
- "ldr q19, [x24, #0x20]\n"
- "ldr q24, [x24, #0x30]\n"
- "ldr q20, [x23, #0x0]\n"
- "ldr q21, [x23, #0x10]\n"
- "ldr q22, [x23, #0x20]\n"
- "ldr q23, [x23, #0x30]\n"
+ "ldr q9, [x28, #0x0]\n"
+ "ldr q10, [x28, #0x10]\n"
+ "ldr q11, [x28, #0x20]\n"
+ "ldr q16, [x28, #0x30]\n"
+ "ldr q12, [x24, #0x0]\n"
+ "ldr q13, [x24, #0x10]\n"
+ "ldr q14, [x24, #0x20]\n"
+ "ldr q15, [x24, #0x30]\n"
+ "ldr q17, [x23, #0x0]\n"
+ "ldr q18, [x23, #0x10]\n"
+ "ldr q19, [x23, #0x20]\n"
+ "ldr q24, [x23, #0x30]\n"
+ "ldr q20, [x22, #0x0]\n"
+ "ldr q21, [x22, #0x10]\n"
+ "ldr q22, [x22, #0x20]\n"
+ "ldr q23, [x22, #0x30]\n"
"124:" // Height 4: MMLA fixup
"zip1 v8.2d, v9.2d, v12.2d\n"
"zip2 v12.2d, v9.2d, v12.2d\n"
@@ -1613,155 +1637,155 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
"movi v22.16b, #0x0\n"
"movi v23.16b, #0x0\n"
"126:" // Height 4: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"127:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 128f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "cbnz x28, 129f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #1\n"
- "add x25, x25, x20, LSL #1\n"
- "add x24, x24, x20, LSL #1\n"
- "add x23, x23, x20, LSL #1\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "cbnz x27, 129f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
+ "add x24, x24, x19, LSL #1\n"
+ "add x23, x23, x19, LSL #1\n"
+ "add x22, x22, x19, LSL #1\n"
"b 129f\n"
"128:" // Height 4: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
- "add x23, x24, x20, LSL #1\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "add x22, x23, x19, LSL #1\n"
"129:" // Height 4: input setup done
- "cmp x27, #0x8\n"
+ "cmp x26, #0x8\n"
"blt 132f\n"
- "ldr q1, [x26, #0x0]\n"
- "ldr q2, [x25, #0x0]\n"
- "cmp x27, #0x10\n"
- "ldr q3, [x24, #0x0]\n"
- "ldr q4, [x23, #0x0]\n"
- "ldr q7, [x10, #0x0]\n"
- "ldr q6, [x10, #0x10]\n"
+ "ldr q1, [x25, #0x0]\n"
+ "ldr q2, [x24, #0x0]\n"
+ "cmp x26, #0x10\n"
"blt 131f\n"
"130:" // Height 4: Multiply loop: Main loop head
"trn1 v0.2d, v1.2d, v2.2d\n"
+ "ldr q3, [x23, #0x0]\n"
+ "add x25, x25, #0x10\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
- ".inst 0x6e47ec08 // bfmmla v8.4s, v0.8h, v7.8h\n"
- "sub x27, x27, #0x8\n"
+ "ldr q4, [x22, #0x0]\n"
+ "add x24, x24, #0x10\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
+ "ldr q7, [x10, #0x0]\n"
+ "add x23, x23, #0x10\n"
+ "trn2 v3.2d, v3.2d, v4.2d\n"
+ "ldr q6, [x10, #0x10]\n"
+ "add x22, x22, #0x10\n"
+ ".inst 0x6e47ec08 // bfmmla v8.4s, v0.8h, v7.8h\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "sub x26, x26, #0x8\n"
".inst 0x6e47ec50 // bfmmla v16.4s, v2.8h, v7.8h\n"
"ldr q7, [x10, #0x20]\n"
+ "cmp x26, #0x10\n"
".inst 0x6e46ec0c // bfmmla v12.4s, v0.8h, v6.8h\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x6e46ec54 // bfmmla v20.4s, v2.8h, v6.8h\n"
"ldr q6, [x10, #0x30]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x6e47ec09 // bfmmla v9.4s, v0.8h, v7.8h\n"
- "trn2 v3.2d, v3.2d, v4.2d\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x6e47ec51 // bfmmla v17.4s, v2.8h, v7.8h\n"
"ldr q7, [x10, #0x40]\n"
".inst 0x6e46ec0d // bfmmla v13.4s, v0.8h, v6.8h\n"
- "add x26, x26, #0x10\n"
".inst 0x6e46ec55 // bfmmla v21.4s, v2.8h, v6.8h\n"
"ldr q6, [x10, #0x50]\n"
".inst 0x6e47ec0a // bfmmla v10.4s, v0.8h, v7.8h\n"
- "add x25, x25, #0x10\n"
".inst 0x6e47ec52 // bfmmla v18.4s, v2.8h, v7.8h\n"
"ldr q7, [x10, #0x60]\n"
".inst 0x6e46ec0e // bfmmla v14.4s, v0.8h, v6.8h\n"
- "add x24, x24, #0x10\n"
".inst 0x6e46ec56 // bfmmla v22.4s, v2.8h, v6.8h\n"
"ldr q6, [x10, #0x70]\n"
".inst 0x6e47ec0b // bfmmla v11.4s, v0.8h, v7.8h\n"
- "add x23, x23, #0x10\n"
- "ldr q4, [x23, #0x0]\n"
".inst 0x6e47ec53 // bfmmla v19.4s, v2.8h, v7.8h\n"
"ldr q7, [x10, #0x80]\n"
".inst 0x6e46ec0f // bfmmla v15.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec57 // bfmmla v23.4s, v2.8h, v6.8h\n"
"ldr q6, [x10, #0x90]\n"
- "ldr q2, [x25, #0x0]\n"
+ "ldr q2, [x24, #0x0]\n"
".inst 0x6e47ec28 // bfmmla v8.4s, v1.8h, v7.8h\n"
".inst 0x6e47ec70 // bfmmla v16.4s, v3.8h, v7.8h\n"
"ldr q7, [x10, #0xa0]\n"
".inst 0x6e46ec2c // bfmmla v12.4s, v1.8h, v6.8h\n"
- "cmp x27, #0x10\n"
".inst 0x6e46ec74 // bfmmla v20.4s, v3.8h, v6.8h\n"
"ldr q6, [x10, #0xb0]\n"
".inst 0x6e47ec29 // bfmmla v9.4s, v1.8h, v7.8h\n"
- "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x6e47ec71 // bfmmla v17.4s, v3.8h, v7.8h\n"
"ldr q7, [x10, #0xc0]\n"
".inst 0x6e46ec2d // bfmmla v13.4s, v1.8h, v6.8h\n"
- "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x6e46ec75 // bfmmla v21.4s, v3.8h, v6.8h\n"
"ldr q6, [x10, #0xd0]\n"
".inst 0x6e47ec2a // bfmmla v10.4s, v1.8h, v7.8h\n"
- "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x6e47ec72 // bfmmla v18.4s, v3.8h, v7.8h\n"
"ldr q7, [x10, #0xe0]\n"
".inst 0x6e46ec2e // bfmmla v14.4s, v1.8h, v6.8h\n"
- "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x6e46ec76 // bfmmla v22.4s, v3.8h, v6.8h\n"
"ldr q6, [x10, #0xf0]\n"
"add x10, x10, #0x100\n"
".inst 0x6e47ec2b // bfmmla v11.4s, v1.8h, v7.8h\n"
".inst 0x6e47ec73 // bfmmla v19.4s, v3.8h, v7.8h\n"
- "ldr q7, [x10, #0x0]\n"
".inst 0x6e46ec2f // bfmmla v15.4s, v1.8h, v6.8h\n"
- "ldr q1, [x26, #0x0]\n"
+ "ldr q1, [x25, #0x0]\n"
".inst 0x6e46ec77 // bfmmla v23.4s, v3.8h, v6.8h\n"
- "ldr q3, [x24, #0x0]\n"
- "ldr q6, [x10, #0x10]\n"
"bge 130b\n"
"131:" // Height 4: Multiply loop: Single iteration only
"trn1 v0.2d, v1.2d, v2.2d\n"
+ "ldr q3, [x23, #0x0]\n"
+ "sub x26, x26, #0x8\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
- ".inst 0x6e47ec08 // bfmmla v8.4s, v0.8h, v7.8h\n"
- "add x26, x26, #0x10\n"
+ "ldr q4, [x22, #0x0]\n"
+ "add x25, x25, #0x10\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
+ "ldr q7, [x10, #0x0]\n"
+ "add x24, x24, #0x10\n"
+ "trn2 v3.2d, v3.2d, v4.2d\n"
+ "ldr q6, [x10, #0x10]\n"
+ "add x23, x23, #0x10\n"
+ ".inst 0x6e47ec08 // bfmmla v8.4s, v0.8h, v7.8h\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "add x22, x22, #0x10\n"
".inst 0x6e47ec50 // bfmmla v16.4s, v2.8h, v7.8h\n"
"ldr q7, [x10, #0x20]\n"
".inst 0x6e46ec0c // bfmmla v12.4s, v0.8h, v6.8h\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x6e46ec54 // bfmmla v20.4s, v2.8h, v6.8h\n"
"ldr q6, [x10, #0x30]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x6e47ec09 // bfmmla v9.4s, v0.8h, v7.8h\n"
- "trn2 v3.2d, v3.2d, v4.2d\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x6e47ec51 // bfmmla v17.4s, v2.8h, v7.8h\n"
"ldr q7, [x10, #0x40]\n"
".inst 0x6e46ec0d // bfmmla v13.4s, v0.8h, v6.8h\n"
- "add x25, x25, #0x10\n"
".inst 0x6e46ec55 // bfmmla v21.4s, v2.8h, v6.8h\n"
"ldr q6, [x10, #0x50]\n"
".inst 0x6e47ec0a // bfmmla v10.4s, v0.8h, v7.8h\n"
- "add x24, x24, #0x10\n"
".inst 0x6e47ec52 // bfmmla v18.4s, v2.8h, v7.8h\n"
"ldr q7, [x10, #0x60]\n"
".inst 0x6e46ec0e // bfmmla v14.4s, v0.8h, v6.8h\n"
- "add x23, x23, #0x10\n"
".inst 0x6e46ec56 // bfmmla v22.4s, v2.8h, v6.8h\n"
"ldr q6, [x10, #0x70]\n"
".inst 0x6e47ec0b // bfmmla v11.4s, v0.8h, v7.8h\n"
- "sub x27, x27, #0x8\n"
".inst 0x6e47ec53 // bfmmla v19.4s, v2.8h, v7.8h\n"
"ldr q7, [x10, #0x80]\n"
".inst 0x6e46ec0f // bfmmla v15.4s, v0.8h, v6.8h\n"
- "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x6e46ec57 // bfmmla v23.4s, v2.8h, v6.8h\n"
"ldr q6, [x10, #0x90]\n"
".inst 0x6e47ec28 // bfmmla v8.4s, v1.8h, v7.8h\n"
- "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x6e47ec70 // bfmmla v16.4s, v3.8h, v7.8h\n"
"ldr q7, [x10, #0xa0]\n"
".inst 0x6e46ec2c // bfmmla v12.4s, v1.8h, v6.8h\n"
- "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x6e46ec74 // bfmmla v20.4s, v3.8h, v6.8h\n"
"ldr q6, [x10, #0xb0]\n"
".inst 0x6e47ec29 // bfmmla v9.4s, v1.8h, v7.8h\n"
- "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x6e47ec71 // bfmmla v17.4s, v3.8h, v7.8h\n"
"ldr q7, [x10, #0xc0]\n"
".inst 0x6e46ec2d // bfmmla v13.4s, v1.8h, v6.8h\n"
@@ -1779,18 +1803,18 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
".inst 0x6e46ec2f // bfmmla v15.4s, v1.8h, v6.8h\n"
".inst 0x6e46ec77 // bfmmla v23.4s, v3.8h, v6.8h\n"
"132:" // Height 4: Multiply loop: Main loop skip
- "cbz x27, 137f\n"
- "cmp x27, #0x4\n"
+ "cbz x26, 137f\n"
+ "cmp x26, #0x4\n"
"blt 134f\n"
"133:" // Height 4: Multiply loop: Odd block loop
- "ldr d1, [x26], #0x8\n"
- "ldr d2, [x25], #0x8\n"
+ "ldr d1, [x25], #0x8\n"
+ "sub x26, x26, #0x4\n"
+ "ldr d2, [x24], #0x8\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
- "sub x27, x27, #0x4\n"
- "ldr d3, [x24], #0x8\n"
- "ldr d4, [x23], #0x8\n"
+ "ldr d3, [x23], #0x8\n"
+ "cmp x26, #0x4\n"
+ "ldr d4, [x22], #0x8\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
- "cmp x27, #0x4\n"
"ldr q6, [x10, #0x0]\n"
"ldr q7, [x10, #0x10]\n"
".inst 0x6e46ec08 // bfmmla v8.4s, v0.8h, v6.8h\n"
@@ -1817,29 +1841,29 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
".inst 0x6e47ec0f // bfmmla v15.4s, v0.8h, v7.8h\n"
".inst 0x6e47ec57 // bfmmla v23.4s, v2.8h, v7.8h\n"
"bge 133b\n"
+ "cbz x26, 137f\n"
"134:" // Height 4: Multiply loop: Skip odd blocks
- "cbz x27, 137f\n"
- "tbz x27, #1, 135f\n"
- "ldr s1, [x26], #0x4\n"
- "ldr s2, [x25], #0x4\n"
- "ldr s3, [x24], #0x4\n"
- "ldr s4, [x23], #0x4\n"
- "tbz x27, #0, 136f\n"
- "ld1 { v1.h }[2], [x26]\n"
- "ld1 { v2.h }[2], [x25]\n"
- "ld1 { v3.h }[2], [x24]\n"
- "ld1 { v4.h }[2], [x23]\n"
+ "tbz x26, #1, 135f\n"
+ "ldr s1, [x25], #0x4\n"
+ "ldr s2, [x24], #0x4\n"
+ "ldr s3, [x23], #0x4\n"
+ "ldr s4, [x22], #0x4\n"
+ "tbz x26, #0, 136f\n"
+ "ld1 { v1.h }[2], [x25]\n"
+ "ld1 { v2.h }[2], [x24]\n"
+ "ld1 { v3.h }[2], [x23]\n"
+ "ld1 { v4.h }[2], [x22]\n"
"b 136f\n"
"135:" // Height 4: Multiply loop: Ragged operand read: partial_1_0
- "ldr h1, [x26, #0x0]\n"
- "ldr h2, [x25, #0x0]\n"
- "ldr h3, [x24, #0x0]\n"
- "ldr h4, [x23, #0x0]\n"
+ "ldr h1, [x25, #0x0]\n"
+ "ldr h2, [x24, #0x0]\n"
+ "ldr h3, [x23, #0x0]\n"
+ "ldr h4, [x22, #0x0]\n"
"136:" // Height 4: Multiply loop: Ragged operand read: Done
- "ldr q7, [x10, #0x0]\n"
- "ldr q6, [x10, #0x10]\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
+ "ldr q7, [x10, #0x0]\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
+ "ldr q6, [x10, #0x10]\n"
".inst 0x6e47ec08 // bfmmla v8.4s, v0.8h, v7.8h\n"
".inst 0x6e47ec50 // bfmmla v16.4s, v2.8h, v7.8h\n"
"ldr q7, [x10, #0x20]\n"
@@ -1864,25 +1888,64 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
".inst 0x6e46ec0f // bfmmla v15.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec57 // bfmmla v23.4s, v2.8h, v6.8h\n"
"137:" // Height 4: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 127b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "prfm pstl1keep, [x28, #0x0]\n"
+ "add x24, x28, x19, LSL #2\n"
+ "prfm pstl1keep, [x24, #0x0]\n"
+ "add x23, x24, x19, LSL #2\n"
+ "prfm pstl1keep, [x23, #0x0]\n"
+ "add x22, x23, x19, LSL #2\n"
+ "prfm pstl1keep, [x22, #0x0]\n"
+ "tbz %x[flags], #1, 138f\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v1.4s }, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v0.4s }, [x19]\n"
+ "fmin v8.4s, v8.4s, v0.4s\n"
+ "fmin v9.4s, v9.4s, v0.4s\n"
+ "fmin v10.4s, v10.4s, v0.4s\n"
+ "fmin v11.4s, v11.4s, v0.4s\n"
+ "fmax v8.4s, v8.4s, v1.4s\n"
+ "fmax v9.4s, v9.4s, v1.4s\n"
+ "fmax v10.4s, v10.4s, v1.4s\n"
+ "fmax v11.4s, v11.4s, v1.4s\n"
+ "fmin v12.4s, v12.4s, v0.4s\n"
+ "fmin v13.4s, v13.4s, v0.4s\n"
+ "fmin v14.4s, v14.4s, v0.4s\n"
+ "fmax v12.4s, v12.4s, v1.4s\n"
+ "fmax v13.4s, v13.4s, v1.4s\n"
+ "fmax v14.4s, v14.4s, v1.4s\n"
+ "fmin v15.4s, v15.4s, v0.4s\n"
+ "fmin v16.4s, v16.4s, v0.4s\n"
+ "fmin v17.4s, v17.4s, v0.4s\n"
+ "fmax v15.4s, v15.4s, v1.4s\n"
+ "fmax v16.4s, v16.4s, v1.4s\n"
+ "fmax v17.4s, v17.4s, v1.4s\n"
+ "fmin v18.4s, v18.4s, v0.4s\n"
+ "fmin v19.4s, v19.4s, v0.4s\n"
+ "fmin v20.4s, v20.4s, v0.4s\n"
+ "fmax v18.4s, v18.4s, v1.4s\n"
+ "fmax v19.4s, v19.4s, v1.4s\n"
+ "fmax v20.4s, v20.4s, v1.4s\n"
+ "fmin v21.4s, v21.4s, v0.4s\n"
+ "fmin v22.4s, v22.4s, v0.4s\n"
+ "fmin v23.4s, v23.4s, v0.4s\n"
+ "fmax v21.4s, v21.4s, v1.4s\n"
+ "fmax v22.4s, v22.4s, v1.4s\n"
+ "fmax v23.4s, v23.4s, v1.4s\n"
+ "138:" // Height 4: No activation
"uzp1 v7.2d, v8.2d, v12.2d\n"
- "add x23, x24, x20, LSL #2\n"
+ "cmp x11, #0x10\n"
"uzp2 v8.2d, v8.2d, v12.2d\n"
"uzp1 v12.2d, v9.2d, v13.2d\n"
- "prfm pstl1keep, [x9, #0x0]\n"
"uzp2 v9.2d, v9.2d, v13.2d\n"
"uzp1 v13.2d, v10.2d, v14.2d\n"
- "prfm pstl1keep, [x25, #0x0]\n"
- "prfm pstl1keep, [x24, #0x0]\n"
"uzp2 v10.2d, v10.2d, v14.2d\n"
"uzp1 v14.2d, v11.2d, v15.2d\n"
- "prfm pstl1keep, [x23, #0x0]\n"
"uzp2 v11.2d, v11.2d, v15.2d\n"
"uzp1 v15.2d, v16.2d, v20.2d\n"
"uzp2 v16.2d, v16.2d, v20.2d\n"
@@ -1892,191 +1955,152 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
"uzp2 v18.2d, v18.2d, v22.2d\n"
"uzp1 v22.2d, v19.2d, v23.2d\n"
"uzp2 v19.2d, v19.2d, v23.2d\n"
- "tbz %x[flags], #1, 138f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v1.4s }, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1r { v0.4s }, [x20]\n"
- "fmin v7.4s, v7.4s, v1.4s\n"
- "fmin v12.4s, v12.4s, v1.4s\n"
- "fmin v13.4s, v13.4s, v1.4s\n"
- "fmin v14.4s, v14.4s, v1.4s\n"
- "fmin v8.4s, v8.4s, v1.4s\n"
- "fmin v9.4s, v9.4s, v1.4s\n"
- "fmin v10.4s, v10.4s, v1.4s\n"
- "fmin v11.4s, v11.4s, v1.4s\n"
- "fmin v15.4s, v15.4s, v1.4s\n"
- "fmin v20.4s, v20.4s, v1.4s\n"
- "fmin v21.4s, v21.4s, v1.4s\n"
- "fmin v22.4s, v22.4s, v1.4s\n"
- "fmin v16.4s, v16.4s, v1.4s\n"
- "fmin v17.4s, v17.4s, v1.4s\n"
- "fmin v18.4s, v18.4s, v1.4s\n"
- "fmin v19.4s, v19.4s, v1.4s\n"
- "fmax v7.4s, v7.4s, v0.4s\n"
- "fmax v12.4s, v12.4s, v0.4s\n"
- "fmax v13.4s, v13.4s, v0.4s\n"
- "fmax v14.4s, v14.4s, v0.4s\n"
- "fmax v8.4s, v8.4s, v0.4s\n"
- "fmax v9.4s, v9.4s, v0.4s\n"
- "fmax v10.4s, v10.4s, v0.4s\n"
- "fmax v11.4s, v11.4s, v0.4s\n"
- "fmax v15.4s, v15.4s, v0.4s\n"
- "fmax v20.4s, v20.4s, v0.4s\n"
- "fmax v21.4s, v21.4s, v0.4s\n"
- "fmax v22.4s, v22.4s, v0.4s\n"
- "fmax v16.4s, v16.4s, v0.4s\n"
- "fmax v17.4s, v17.4s, v0.4s\n"
- "fmax v18.4s, v18.4s, v0.4s\n"
- "fmax v19.4s, v19.4s, v0.4s\n"
- "138:" // Height 4: No activation
- "cmp x11, #0x10\n"
"bge 147f\n"
"tbz x11, #3, 142f\n"
- "st1 { v7.4s }, [x9], #0x10\n"
- "st1 { v12.4s }, [x9], #0x10\n"
- "st1 { v8.4s }, [x25], #0x10\n"
- "st1 { v9.4s }, [x25], #0x10\n"
- "st1 { v15.4s }, [x24], #0x10\n"
- "st1 { v20.4s }, [x24], #0x10\n"
- "st1 { v16.4s }, [x23], #0x10\n"
- "st1 { v17.4s }, [x23], #0x10\n"
+ "st1 { v7.4s }, [x28], #0x10\n"
+ "st1 { v12.4s }, [x28], #0x10\n"
+ "st1 { v8.4s }, [x24], #0x10\n"
+ "st1 { v9.4s }, [x24], #0x10\n"
+ "st1 { v15.4s }, [x23], #0x10\n"
+ "st1 { v20.4s }, [x23], #0x10\n"
+ "st1 { v16.4s }, [x22], #0x10\n"
+ "st1 { v17.4s }, [x22], #0x10\n"
"tbz x11, #2, 140f\n"
- "st1 { v13.4s }, [x9], #0x10\n"
- "st1 { v10.4s }, [x25], #0x10\n"
- "st1 { v21.4s }, [x24], #0x10\n"
- "st1 { v18.4s }, [x23], #0x10\n"
+ "st1 { v13.4s }, [x28], #0x10\n"
+ "st1 { v10.4s }, [x24], #0x10\n"
+ "st1 { v21.4s }, [x23], #0x10\n"
+ "st1 { v18.4s }, [x22], #0x10\n"
"tbz x11, #1, 139f\n"
- "str d14, [x9], #0x8\n"
- "str d11, [x25], #0x8\n"
- "str d22, [x24], #0x8\n"
- "str d19, [x23], #0x8\n"
+ "str d14, [x28], #0x8\n"
+ "str d11, [x24], #0x8\n"
+ "str d22, [x23], #0x8\n"
+ "str d19, [x22], #0x8\n"
"tbz x11, #0, 146f\n"
- "st1 { v14.s }[2], [x9]\n"
- "st1 { v11.s }[2], [x25]\n"
- "st1 { v22.s }[2], [x24]\n"
- "st1 { v19.s }[2], [x23]\n"
+ "st1 { v14.s }[2], [x28]\n"
+ "st1 { v11.s }[2], [x24]\n"
+ "st1 { v22.s }[2], [x23]\n"
+ "st1 { v19.s }[2], [x22]\n"
"b 146f\n"
"139:" // Height 4: Partial direct writeback: partial_1_12
"tbz x11, #0, 146f\n"
- "str s14, [x9, #0x0]\n"
- "str s11, [x25, #0x0]\n"
- "str s22, [x24, #0x0]\n"
- "str s19, [x23, #0x0]\n"
+ "str s14, [x28, #0x0]\n"
+ "str s11, [x24, #0x0]\n"
+ "str s22, [x23, #0x0]\n"
+ "str s19, [x22, #0x0]\n"
"b 146f\n"
"140:" // Height 4: Partial direct writeback: partial_2_8
"tbz x11, #1, 141f\n"
- "str d13, [x9], #0x8\n"
- "str d10, [x25], #0x8\n"
- "str d21, [x24], #0x8\n"
- "str d18, [x23], #0x8\n"
+ "str d13, [x28], #0x8\n"
+ "str d10, [x24], #0x8\n"
+ "str d21, [x23], #0x8\n"
+ "str d18, [x22], #0x8\n"
"tbz x11, #0, 146f\n"
- "st1 { v13.s }[2], [x9]\n"
- "st1 { v10.s }[2], [x25]\n"
- "st1 { v21.s }[2], [x24]\n"
- "st1 { v18.s }[2], [x23]\n"
+ "st1 { v13.s }[2], [x28]\n"
+ "st1 { v10.s }[2], [x24]\n"
+ "st1 { v21.s }[2], [x23]\n"
+ "st1 { v18.s }[2], [x22]\n"
"b 146f\n"
"141:" // Height 4: Partial direct writeback: partial_1_8
"tbz x11, #0, 146f\n"
- "str s13, [x9, #0x0]\n"
- "str s10, [x25, #0x0]\n"
- "str s21, [x24, #0x0]\n"
- "str s18, [x23, #0x0]\n"
+ "str s13, [x28, #0x0]\n"
+ "str s10, [x24, #0x0]\n"
+ "str s21, [x23, #0x0]\n"
+ "str s18, [x22, #0x0]\n"
"b 146f\n"
"142:" // Height 4: Partial direct writeback: partial_4_0
"tbz x11, #2, 144f\n"
- "st1 { v7.4s }, [x9], #0x10\n"
- "st1 { v8.4s }, [x25], #0x10\n"
- "st1 { v15.4s }, [x24], #0x10\n"
- "st1 { v16.4s }, [x23], #0x10\n"
+ "st1 { v7.4s }, [x28], #0x10\n"
+ "st1 { v8.4s }, [x24], #0x10\n"
+ "st1 { v15.4s }, [x23], #0x10\n"
+ "st1 { v16.4s }, [x22], #0x10\n"
"tbz x11, #1, 143f\n"
- "str d12, [x9], #0x8\n"
- "str d9, [x25], #0x8\n"
- "str d20, [x24], #0x8\n"
- "str d17, [x23], #0x8\n"
+ "str d12, [x28], #0x8\n"
+ "str d9, [x24], #0x8\n"
+ "str d20, [x23], #0x8\n"
+ "str d17, [x22], #0x8\n"
"tbz x11, #0, 146f\n"
- "st1 { v12.s }[2], [x9]\n"
- "st1 { v9.s }[2], [x25]\n"
- "st1 { v20.s }[2], [x24]\n"
- "st1 { v17.s }[2], [x23]\n"
+ "st1 { v12.s }[2], [x28]\n"
+ "st1 { v9.s }[2], [x24]\n"
+ "st1 { v20.s }[2], [x23]\n"
+ "st1 { v17.s }[2], [x22]\n"
"b 146f\n"
"143:" // Height 4: Partial direct writeback: partial_1_4
"tbz x11, #0, 146f\n"
- "str s12, [x9, #0x0]\n"
- "str s9, [x25, #0x0]\n"
- "str s20, [x24, #0x0]\n"
- "str s17, [x23, #0x0]\n"
+ "str s12, [x28, #0x0]\n"
+ "str s9, [x24, #0x0]\n"
+ "str s20, [x23, #0x0]\n"
+ "str s17, [x22, #0x0]\n"
"b 146f\n"
"144:" // Height 4: Partial direct writeback: partial_2_0
"tbz x11, #1, 145f\n"
- "str d7, [x9], #0x8\n"
- "str d8, [x25], #0x8\n"
- "str d15, [x24], #0x8\n"
- "str d16, [x23], #0x8\n"
+ "str d7, [x28], #0x8\n"
+ "str d8, [x24], #0x8\n"
+ "str d15, [x23], #0x8\n"
+ "str d16, [x22], #0x8\n"
"tbz x11, #0, 146f\n"
- "st1 { v7.s }[2], [x9]\n"
- "st1 { v8.s }[2], [x25]\n"
- "st1 { v15.s }[2], [x24]\n"
- "st1 { v16.s }[2], [x23]\n"
+ "st1 { v7.s }[2], [x28]\n"
+ "st1 { v8.s }[2], [x24]\n"
+ "st1 { v15.s }[2], [x23]\n"
+ "st1 { v16.s }[2], [x22]\n"
"b 146f\n"
"145:" // Height 4: Partial direct writeback: partial_1_0
- "str s7, [x9, #0x0]\n"
- "str s8, [x25, #0x0]\n"
- "str s15, [x24, #0x0]\n"
- "str s16, [x23, #0x0]\n"
+ "str s7, [x28, #0x0]\n"
+ "str s8, [x24, #0x0]\n"
+ "str s15, [x23, #0x0]\n"
+ "str s16, [x22, #0x0]\n"
"146:" // Height 4: Partial direct writeback: Done
"b 148f\n"
"147:" // Height 4: Full writeback
- "str q7, [x9, #0x0]\n"
- "str q12, [x9, #0x10]\n"
- "str q13, [x9, #0x20]\n"
- "str q14, [x9, #0x30]\n"
- "add x9, x9, #0x40\n"
- "str q8, [x25, #0x0]\n"
- "str q9, [x25, #0x10]\n"
- "str q10, [x25, #0x20]\n"
- "str q11, [x25, #0x30]\n"
- "str q15, [x24, #0x0]\n"
- "str q20, [x24, #0x10]\n"
- "str q21, [x24, #0x20]\n"
- "str q22, [x24, #0x30]\n"
- "str q16, [x23, #0x0]\n"
- "str q17, [x23, #0x10]\n"
- "str q18, [x23, #0x20]\n"
- "str q19, [x23, #0x30]\n"
+ "str q7, [x28, #0x0]\n"
+ "str q12, [x28, #0x10]\n"
+ "str q13, [x28, #0x20]\n"
+ "str q14, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
+ "str q8, [x24, #0x0]\n"
+ "str q9, [x24, #0x10]\n"
+ "str q10, [x24, #0x20]\n"
+ "str q11, [x24, #0x30]\n"
+ "str q15, [x23, #0x0]\n"
+ "str q20, [x23, #0x10]\n"
+ "str q21, [x23, #0x20]\n"
+ "str q22, [x23, #0x30]\n"
+ "str q16, [x22, #0x0]\n"
+ "str q17, [x22, #0x10]\n"
+ "str q18, [x22, #0x20]\n"
+ "str q19, [x22, #0x30]\n"
"148:" // Height 4: Writeback done
"subs x11, x11, #0x10\n"
"bgt 113b\n"
"b 224f\n"
"149:" // Height 5
- "mov x12, %x[bias]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x9, %x[bias]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "mov x28, %x[output_ptr]\n"
"150:" // Height 5: Column loop
- "cbz x12, 151f\n"
- "ldr q8, [x12, #0x0]\n"
- "ldr q9, [x12, #0x10]\n"
+ "cbz x9, 151f\n"
+ "ldr q8, [x9, #0x0]\n"
"zip2 v12.2d, v8.2d, v8.2d\n"
+ "ldr q9, [x9, #0x10]\n"
"zip1 v8.2d, v8.2d, v8.2d\n"
- "ldr q10, [x12, #0x20]\n"
- "ldr q11, [x12, #0x30]\n"
+ "ldr q10, [x9, #0x20]\n"
+ "mov v16.16b, v8.16b\n"
+ "ldr q11, [x9, #0x30]\n"
+ "add x9, x9, #0x40\n"
+ "mov v20.16b, v12.16b\n"
+ "mov v24.16b, v8.16b\n"
"zip2 v13.2d, v9.2d, v9.2d\n"
"zip1 v9.2d, v9.2d, v9.2d\n"
"zip2 v14.2d, v10.2d, v10.2d\n"
"zip1 v10.2d, v10.2d, v10.2d\n"
- "add x12, x12, #0x40\n"
"zip2 v15.2d, v11.2d, v11.2d\n"
"zip1 v11.2d, v11.2d, v11.2d\n"
- "mov v16.16b, v8.16b\n"
- "mov v20.16b, v12.16b\n"
"mov v17.16b, v9.16b\n"
"mov v21.16b, v13.16b\n"
"mov v18.16b, v10.16b\n"
"mov v22.16b, v14.16b\n"
"mov v19.16b, v11.16b\n"
"mov v23.16b, v15.16b\n"
- "mov v24.16b, v8.16b\n"
"mov v28.16b, v12.16b\n"
"mov v25.16b, v9.16b\n"
"mov v29.16b, v13.16b\n"
@@ -2087,153 +2111,153 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
"b 163f\n"
"151:" // Height 5: no bias
"tbz %x[flags], #0, 162f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"cmp x11, #0x10\n"
- "add x22, x23, x20, LSL #2\n"
+ "add x24, x28, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
"bge 160f\n"
"tbz x11, #3, 155f\n"
- "ld1 { v9.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v17.4s }, [x24], #0x10\n"
- "ld1 { v20.4s }, [x23], #0x10\n"
- "ld1 { v25.4s }, [x22], #0x10\n"
- "ld1 { v10.4s }, [x9], #0x10\n"
- "ld1 { v13.4s }, [x25], #0x10\n"
- "ld1 { v18.4s }, [x24], #0x10\n"
- "ld1 { v21.4s }, [x23], #0x10\n"
- "ld1 { v26.4s }, [x22], #0x10\n"
+ "ld1 { v9.4s }, [x28], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
+ "ld1 { v17.4s }, [x23], #0x10\n"
+ "ld1 { v20.4s }, [x22], #0x10\n"
+ "ld1 { v25.4s }, [x21], #0x10\n"
+ "ld1 { v10.4s }, [x28], #0x10\n"
+ "ld1 { v13.4s }, [x24], #0x10\n"
+ "ld1 { v18.4s }, [x23], #0x10\n"
+ "ld1 { v21.4s }, [x22], #0x10\n"
+ "ld1 { v26.4s }, [x21], #0x10\n"
"tbz x11, #2, 153f\n"
- "ld1 { v11.4s }, [x9], #0x10\n"
- "ld1 { v14.4s }, [x25], #0x10\n"
- "ld1 { v19.4s }, [x24], #0x10\n"
- "ld1 { v22.4s }, [x23], #0x10\n"
- "ld1 { v27.4s }, [x22], #0x10\n"
+ "ld1 { v11.4s }, [x28], #0x10\n"
+ "ld1 { v14.4s }, [x24], #0x10\n"
+ "ld1 { v19.4s }, [x23], #0x10\n"
+ "ld1 { v22.4s }, [x22], #0x10\n"
+ "ld1 { v27.4s }, [x21], #0x10\n"
"tbz x11, #1, 152f\n"
- "ldr d16, [x9], #0x8\n"
- "ldr d15, [x25], #0x8\n"
- "mov x20, #0x38\n"
- "ldr d24, [x24], #0x8\n"
- "ldr d23, [x23], #0x8\n"
- "ldr d6, [x22], #0x8\n"
+ "ldr d16, [x28], #0x8\n"
+ "mov x19, #0x38\n"
+ "ldr d15, [x24], #0x8\n"
+ "ldr d24, [x23], #0x8\n"
+ "ldr d23, [x22], #0x8\n"
+ "ldr d6, [x21], #0x8\n"
"tbz x11, #0, 159f\n"
- "ld1 { v16.s }[2], [x9]\n"
- "ld1 { v15.s }[2], [x25]\n"
- "ld1 { v24.s }[2], [x24]\n"
- "ld1 { v23.s }[2], [x23]\n"
- "ld1 { v6.s }[2], [x22]\n"
+ "ld1 { v16.s }[2], [x28]\n"
+ "ld1 { v15.s }[2], [x24]\n"
+ "ld1 { v24.s }[2], [x23]\n"
+ "ld1 { v23.s }[2], [x22]\n"
+ "ld1 { v6.s }[2], [x21]\n"
"b 159f\n"
"152:" // Height 5: Partial accumulate: partial_1_12
- "mov x20, #0x30\n"
+ "mov x19, #0x30\n"
"tbz x11, #0, 159f\n"
- "ldr s16, [x9, #0x0]\n"
- "ldr s15, [x25, #0x0]\n"
- "ldr s24, [x24, #0x0]\n"
- "ldr s23, [x23, #0x0]\n"
- "ldr s6, [x22, #0x0]\n"
+ "ldr s16, [x28, #0x0]\n"
+ "ldr s15, [x24, #0x0]\n"
+ "ldr s24, [x23, #0x0]\n"
+ "ldr s23, [x22, #0x0]\n"
+ "ldr s6, [x21, #0x0]\n"
"b 159f\n"
"153:" // Height 5: Partial accumulate: partial_2_8
"tbz x11, #1, 154f\n"
- "ldr d11, [x9], #0x8\n"
- "ldr d14, [x25], #0x8\n"
- "mov x20, #0x28\n"
- "ldr d19, [x24], #0x8\n"
- "ldr d22, [x23], #0x8\n"
- "ldr d27, [x22], #0x8\n"
+ "ldr d11, [x28], #0x8\n"
+ "ldr d14, [x24], #0x8\n"
+ "mov x19, #0x28\n"
+ "ldr d19, [x23], #0x8\n"
+ "ldr d22, [x22], #0x8\n"
+ "ldr d27, [x21], #0x8\n"
"tbz x11, #0, 159f\n"
- "ld1 { v11.s }[2], [x9]\n"
- "ld1 { v14.s }[2], [x25]\n"
- "ld1 { v19.s }[2], [x24]\n"
- "ld1 { v22.s }[2], [x23]\n"
- "ld1 { v27.s }[2], [x22]\n"
+ "ld1 { v11.s }[2], [x28]\n"
+ "ld1 { v14.s }[2], [x24]\n"
+ "ld1 { v19.s }[2], [x23]\n"
+ "ld1 { v22.s }[2], [x22]\n"
+ "ld1 { v27.s }[2], [x21]\n"
"b 159f\n"
"154:" // Height 5: Partial accumulate: partial_1_8
- "mov x20, #0x20\n"
+ "mov x19, #0x20\n"
"tbz x11, #0, 159f\n"
- "ldr s11, [x9, #0x0]\n"
- "ldr s14, [x25, #0x0]\n"
- "ldr s19, [x24, #0x0]\n"
- "ldr s22, [x23, #0x0]\n"
- "ldr s27, [x22, #0x0]\n"
+ "ldr s11, [x28, #0x0]\n"
+ "ldr s14, [x24, #0x0]\n"
+ "ldr s19, [x23, #0x0]\n"
+ "ldr s22, [x22, #0x0]\n"
+ "ldr s27, [x21, #0x0]\n"
"b 159f\n"
"155:" // Height 5: Partial accumulate: partial_4_0
"tbz x11, #2, 157f\n"
- "ld1 { v9.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v17.4s }, [x24], #0x10\n"
- "ld1 { v20.4s }, [x23], #0x10\n"
- "ld1 { v25.4s }, [x22], #0x10\n"
+ "ld1 { v9.4s }, [x28], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
+ "ld1 { v17.4s }, [x23], #0x10\n"
+ "ld1 { v20.4s }, [x22], #0x10\n"
+ "ld1 { v25.4s }, [x21], #0x10\n"
"tbz x11, #1, 156f\n"
- "ldr d10, [x9], #0x8\n"
- "ldr d13, [x25], #0x8\n"
- "mov x20, #0x18\n"
- "ldr d18, [x24], #0x8\n"
- "ldr d21, [x23], #0x8\n"
- "ldr d26, [x22], #0x8\n"
+ "ldr d10, [x28], #0x8\n"
+ "mov x19, #0x18\n"
+ "ldr d13, [x24], #0x8\n"
+ "ldr d18, [x23], #0x8\n"
+ "ldr d21, [x22], #0x8\n"
+ "ldr d26, [x21], #0x8\n"
"tbz x11, #0, 159f\n"
- "ld1 { v10.s }[2], [x9]\n"
- "ld1 { v13.s }[2], [x25]\n"
- "ld1 { v18.s }[2], [x24]\n"
- "ld1 { v21.s }[2], [x23]\n"
- "ld1 { v26.s }[2], [x22]\n"
+ "ld1 { v10.s }[2], [x28]\n"
+ "ld1 { v13.s }[2], [x24]\n"
+ "ld1 { v18.s }[2], [x23]\n"
+ "ld1 { v21.s }[2], [x22]\n"
+ "ld1 { v26.s }[2], [x21]\n"
"b 159f\n"
"156:" // Height 5: Partial accumulate: partial_1_4
- "mov x20, #0x10\n"
+ "mov x19, #0x10\n"
"tbz x11, #0, 159f\n"
- "ldr s10, [x9, #0x0]\n"
- "ldr s13, [x25, #0x0]\n"
- "ldr s18, [x24, #0x0]\n"
- "ldr s21, [x23, #0x0]\n"
- "ldr s26, [x22, #0x0]\n"
+ "ldr s10, [x28, #0x0]\n"
+ "ldr s13, [x24, #0x0]\n"
+ "ldr s18, [x23, #0x0]\n"
+ "ldr s21, [x22, #0x0]\n"
+ "ldr s26, [x21, #0x0]\n"
"b 159f\n"
"157:" // Height 5: Partial accumulate: partial_2_0
"tbz x11, #1, 158f\n"
- "ldr d9, [x9], #0x8\n"
- "ldr d12, [x25], #0x8\n"
- "mov x20, #0x8\n"
- "ldr d17, [x24], #0x8\n"
- "ldr d20, [x23], #0x8\n"
- "ldr d25, [x22], #0x8\n"
+ "ldr d9, [x28], #0x8\n"
+ "ldr d12, [x24], #0x8\n"
+ "mov x19, #0x8\n"
+ "ldr d17, [x23], #0x8\n"
+ "ldr d20, [x22], #0x8\n"
+ "ldr d25, [x21], #0x8\n"
"tbz x11, #0, 159f\n"
- "ld1 { v9.s }[2], [x9]\n"
- "ld1 { v12.s }[2], [x25]\n"
- "ld1 { v17.s }[2], [x24]\n"
- "ld1 { v20.s }[2], [x23]\n"
- "ld1 { v25.s }[2], [x22]\n"
+ "ld1 { v9.s }[2], [x28]\n"
+ "ld1 { v12.s }[2], [x24]\n"
+ "ld1 { v17.s }[2], [x23]\n"
+ "ld1 { v20.s }[2], [x22]\n"
+ "ld1 { v25.s }[2], [x21]\n"
"b 159f\n"
"158:" // Height 5: Partial accumulate: partial_1_0
- "ldr s9, [x9, #0x0]\n"
- "ldr s12, [x25, #0x0]\n"
- "mov x20, #0x0\n"
- "ldr s17, [x24, #0x0]\n"
- "ldr s20, [x23, #0x0]\n"
- "ldr s25, [x22, #0x0]\n"
+ "ldr s9, [x28, #0x0]\n"
+ "mov x19, #0x0\n"
+ "ldr s12, [x24, #0x0]\n"
+ "ldr s17, [x23, #0x0]\n"
+ "ldr s20, [x22, #0x0]\n"
+ "ldr s25, [x21, #0x0]\n"
"159:" // Height 5: Partial accumulate: Done
- "sub x9, x9, x20\n"
+ "sub x28, x28, x19\n"
"b 161f\n"
"160:" // Height 5: full accumulate
- "ldr q9, [x9, #0x0]\n"
- "ldr q10, [x9, #0x10]\n"
- "ldr q11, [x9, #0x20]\n"
- "ldr q16, [x9, #0x30]\n"
- "ldr q12, [x25, #0x0]\n"
- "ldr q13, [x25, #0x10]\n"
- "ldr q14, [x25, #0x20]\n"
- "ldr q15, [x25, #0x30]\n"
- "ldr q17, [x24, #0x0]\n"
- "ldr q18, [x24, #0x10]\n"
- "ldr q19, [x24, #0x20]\n"
- "ldr q24, [x24, #0x30]\n"
- "ldr q20, [x23, #0x0]\n"
- "ldr q21, [x23, #0x10]\n"
- "ldr q22, [x23, #0x20]\n"
- "ldr q23, [x23, #0x30]\n"
- "ldr q25, [x22, #0x0]\n"
- "ldr q26, [x22, #0x10]\n"
- "ldr q27, [x22, #0x20]\n"
- "ldr q6, [x22, #0x30]\n"
+ "ldr q9, [x28, #0x0]\n"
+ "ldr q10, [x28, #0x10]\n"
+ "ldr q11, [x28, #0x20]\n"
+ "ldr q16, [x28, #0x30]\n"
+ "ldr q12, [x24, #0x0]\n"
+ "ldr q13, [x24, #0x10]\n"
+ "ldr q14, [x24, #0x20]\n"
+ "ldr q15, [x24, #0x30]\n"
+ "ldr q17, [x23, #0x0]\n"
+ "ldr q18, [x23, #0x10]\n"
+ "ldr q19, [x23, #0x20]\n"
+ "ldr q24, [x23, #0x30]\n"
+ "ldr q20, [x22, #0x0]\n"
+ "ldr q21, [x22, #0x10]\n"
+ "ldr q22, [x22, #0x20]\n"
+ "ldr q23, [x22, #0x30]\n"
+ "ldr q25, [x21, #0x0]\n"
+ "ldr q26, [x21, #0x10]\n"
+ "ldr q27, [x21, #0x20]\n"
+ "ldr q6, [x21, #0x30]\n"
"161:" // Height 5: MMLA fixup
"zip1 v8.2d, v9.2d, v12.2d\n"
"zip2 v12.2d, v9.2d, v12.2d\n"
@@ -2286,98 +2310,97 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
"movi v30.16b, #0x0\n"
"movi v31.16b, #0x0\n"
"163:" // Height 5: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"164:" // Height 5: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 165f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "ldr x22, [x21, #0x20]\n"
- "cbnz x28, 166f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #1\n"
- "add x25, x25, x20, LSL #1\n"
- "add x24, x24, x20, LSL #1\n"
- "add x23, x23, x20, LSL #1\n"
- "add x22, x22, x20, LSL #1\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "ldr x21, [x20, #0x20]\n"
+ "cbnz x27, 166f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
+ "add x24, x24, x19, LSL #1\n"
+ "add x23, x23, x19, LSL #1\n"
+ "add x22, x22, x19, LSL #1\n"
+ "add x21, x21, x19, LSL #1\n"
"b 166f\n"
"165:" // Height 5: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
- "add x23, x24, x20, LSL #1\n"
- "add x22, x23, x20, LSL #1\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "add x22, x23, x19, LSL #1\n"
+ "add x21, x22, x19, LSL #1\n"
"166:" // Height 5: input setup done
- "cmp x27, #0x8\n"
+ "cmp x26, #0x8\n"
"blt 169f\n"
- "ldr q1, [x26, #0x0]\n"
- "ldr q2, [x25, #0x0]\n"
- "cmp x27, #0x10\n"
- "ldr q3, [x24, #0x0]\n"
- "ldr q4, [x23, #0x0]\n"
- "ldr q5, [x22, #0x0]\n"
- "ldr q7, [x10, #0x0]\n"
+ "ldr q1, [x25, #0x0]\n"
+ "cmp x26, #0x10\n"
"blt 168f\n"
"167:" // Height 5: Multiply loop: Main loop head
+ "movi v6.16b, #0x0\n"
+ "ldr q2, [x24, #0x0]\n"
+ "add x25, x25, #0x10\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
+ "ldr q3, [x23, #0x0]\n"
+ "add x24, x24, #0x10\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
- ".inst 0x6e47ec08 // bfmmla v8.4s, v0.8h, v7.8h\n"
+ "ldr q4, [x22, #0x0]\n"
+ "add x23, x23, #0x10\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
+ "ldr q5, [x21, #0x0]\n"
+ "add x22, x22, #0x10\n"
"trn2 v3.2d, v3.2d, v4.2d\n"
- ".inst 0x6e47ec50 // bfmmla v16.4s, v2.8h, v7.8h\n"
- "sub x27, x27, #0x8\n"
+ "ldr q7, [x10, #0x0]\n"
+ "add x21, x21, #0x10\n"
"trn1 v4.2d, v5.2d, v6.2d\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "sub x26, x26, #0x8\n"
"trn2 v5.2d, v5.2d, v6.2d\n"
"ldr q6, [x10, #0x10]\n"
+ "cmp x26, #0x10\n"
+ ".inst 0x6e47ec08 // bfmmla v8.4s, v0.8h, v7.8h\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ ".inst 0x6e47ec50 // bfmmla v16.4s, v2.8h, v7.8h\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x6e47ec98 // bfmmla v24.4s, v4.8h, v7.8h\n"
"ldr q7, [x10, #0x20]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x6e46ec0c // bfmmla v12.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec54 // bfmmla v20.4s, v2.8h, v6.8h\n"
- "add x26, x26, #0x10\n"
+ "prfm pldl1keep, [x21, #0x80]\n"
".inst 0x6e46ec9c // bfmmla v28.4s, v4.8h, v6.8h\n"
"ldr q6, [x10, #0x30]\n"
".inst 0x6e47ec09 // bfmmla v9.4s, v0.8h, v7.8h\n"
- "add x25, x25, #0x10\n"
".inst 0x6e47ec51 // bfmmla v17.4s, v2.8h, v7.8h\n"
".inst 0x6e47ec99 // bfmmla v25.4s, v4.8h, v7.8h\n"
"ldr q7, [x10, #0x40]\n"
- "add x24, x24, #0x10\n"
".inst 0x6e46ec0d // bfmmla v13.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec55 // bfmmla v21.4s, v2.8h, v6.8h\n"
- "add x23, x23, #0x10\n"
- "add x22, x22, #0x10\n"
".inst 0x6e46ec9d // bfmmla v29.4s, v4.8h, v6.8h\n"
"ldr q6, [x10, #0x50]\n"
".inst 0x6e47ec0a // bfmmla v10.4s, v0.8h, v7.8h\n"
- "cmp x27, #0x10\n"
".inst 0x6e47ec52 // bfmmla v18.4s, v2.8h, v7.8h\n"
".inst 0x6e47ec9a // bfmmla v26.4s, v4.8h, v7.8h\n"
"ldr q7, [x10, #0x60]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x6e46ec0e // bfmmla v14.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec56 // bfmmla v22.4s, v2.8h, v6.8h\n"
- "prfm pldl1keep, [x25, #0x80]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x6e46ec9e // bfmmla v30.4s, v4.8h, v6.8h\n"
"ldr q6, [x10, #0x70]\n"
".inst 0x6e47ec0b // bfmmla v11.4s, v0.8h, v7.8h\n"
- "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x6e47ec53 // bfmmla v19.4s, v2.8h, v7.8h\n"
".inst 0x6e47ec9b // bfmmla v27.4s, v4.8h, v7.8h\n"
"ldr q7, [x10, #0x80]\n"
- "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x6e46ec0f // bfmmla v15.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec57 // bfmmla v23.4s, v2.8h, v6.8h\n"
- "ldr q2, [x25, #0x0]\n"
".inst 0x6e46ec9f // bfmmla v31.4s, v4.8h, v6.8h\n"
"ldr q6, [x10, #0x90]\n"
- "ldr q4, [x23, #0x0]\n"
".inst 0x6e47ec28 // bfmmla v8.4s, v1.8h, v7.8h\n"
".inst 0x6e47ec70 // bfmmla v16.4s, v3.8h, v7.8h\n"
".inst 0x6e47ecb8 // bfmmla v24.4s, v5.8h, v7.8h\n"
@@ -2406,58 +2429,61 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
".inst 0x6e47ec2b // bfmmla v11.4s, v1.8h, v7.8h\n"
".inst 0x6e47ec73 // bfmmla v19.4s, v3.8h, v7.8h\n"
".inst 0x6e47ecbb // bfmmla v27.4s, v5.8h, v7.8h\n"
- "ldr q7, [x10, #0x0]\n"
".inst 0x6e46ec2f // bfmmla v15.4s, v1.8h, v6.8h\n"
- "ldr q1, [x26, #0x0]\n"
+ "ldr q1, [x25, #0x0]\n"
".inst 0x6e46ec77 // bfmmla v23.4s, v3.8h, v6.8h\n"
- "ldr q3, [x24, #0x0]\n"
".inst 0x6e46ecbf // bfmmla v31.4s, v5.8h, v6.8h\n"
- "ldr q5, [x22, #0x0]\n"
"bge 167b\n"
"168:" // Height 5: Multiply loop: Single iteration only
+ "movi v6.16b, #0x0\n"
+ "ldr q2, [x24, #0x0]\n"
+ "sub x26, x26, #0x8\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
+ "ldr q3, [x23, #0x0]\n"
+ "add x25, x25, #0x10\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
- ".inst 0x6e47ec08 // bfmmla v8.4s, v0.8h, v7.8h\n"
+ "ldr q4, [x22, #0x0]\n"
+ "add x24, x24, #0x10\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
+ "ldr q5, [x21, #0x0]\n"
+ "add x23, x23, #0x10\n"
"trn2 v3.2d, v3.2d, v4.2d\n"
- ".inst 0x6e47ec50 // bfmmla v16.4s, v2.8h, v7.8h\n"
- "add x26, x26, #0x10\n"
+ "ldr q7, [x10, #0x0]\n"
+ "add x22, x22, #0x10\n"
"trn1 v4.2d, v5.2d, v6.2d\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "add x21, x21, #0x10\n"
"trn2 v5.2d, v5.2d, v6.2d\n"
"ldr q6, [x10, #0x10]\n"
+ ".inst 0x6e47ec08 // bfmmla v8.4s, v0.8h, v7.8h\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ ".inst 0x6e47ec50 // bfmmla v16.4s, v2.8h, v7.8h\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x6e47ec98 // bfmmla v24.4s, v4.8h, v7.8h\n"
"ldr q7, [x10, #0x20]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x6e46ec0c // bfmmla v12.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec54 // bfmmla v20.4s, v2.8h, v6.8h\n"
- "add x25, x25, #0x10\n"
+ "prfm pldl1keep, [x21, #0x80]\n"
".inst 0x6e46ec9c // bfmmla v28.4s, v4.8h, v6.8h\n"
"ldr q6, [x10, #0x30]\n"
".inst 0x6e47ec09 // bfmmla v9.4s, v0.8h, v7.8h\n"
- "add x24, x24, #0x10\n"
".inst 0x6e47ec51 // bfmmla v17.4s, v2.8h, v7.8h\n"
".inst 0x6e47ec99 // bfmmla v25.4s, v4.8h, v7.8h\n"
"ldr q7, [x10, #0x40]\n"
- "add x23, x23, #0x10\n"
".inst 0x6e46ec0d // bfmmla v13.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec55 // bfmmla v21.4s, v2.8h, v6.8h\n"
- "add x22, x22, #0x10\n"
- "sub x27, x27, #0x8\n"
".inst 0x6e46ec9d // bfmmla v29.4s, v4.8h, v6.8h\n"
"ldr q6, [x10, #0x50]\n"
".inst 0x6e47ec0a // bfmmla v10.4s, v0.8h, v7.8h\n"
- "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x6e47ec52 // bfmmla v18.4s, v2.8h, v7.8h\n"
".inst 0x6e47ec9a // bfmmla v26.4s, v4.8h, v7.8h\n"
"ldr q7, [x10, #0x60]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x6e46ec0e // bfmmla v14.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec56 // bfmmla v22.4s, v2.8h, v6.8h\n"
- "prfm pldl1keep, [x24, #0x80]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x6e46ec9e // bfmmla v30.4s, v4.8h, v6.8h\n"
"ldr q6, [x10, #0x70]\n"
".inst 0x6e47ec0b // bfmmla v11.4s, v0.8h, v7.8h\n"
- "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x6e47ec53 // bfmmla v19.4s, v2.8h, v7.8h\n"
".inst 0x6e47ec9b // bfmmla v27.4s, v4.8h, v7.8h\n"
"ldr q7, [x10, #0x80]\n"
@@ -2497,28 +2523,29 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
".inst 0x6e46ec77 // bfmmla v23.4s, v3.8h, v6.8h\n"
".inst 0x6e46ecbf // bfmmla v31.4s, v5.8h, v6.8h\n"
"169:" // Height 5: Multiply loop: Main loop skip
- "cbz x27, 174f\n"
- "cmp x27, #0x4\n"
+ "cbz x26, 174f\n"
+ "cmp x26, #0x4\n"
"blt 171f\n"
"170:" // Height 5: Multiply loop: Odd block loop
- "ldr d1, [x26], #0x8\n"
- "ldr d2, [x25], #0x8\n"
+ "movi v7.16b, #0x0\n"
+ "ldr d1, [x25], #0x8\n"
+ "sub x26, x26, #0x4\n"
+ "ldr d2, [x24], #0x8\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
- "ldr d3, [x24], #0x8\n"
- "ldr d4, [x23], #0x8\n"
+ "ldr d3, [x23], #0x8\n"
+ "cmp x26, #0x4\n"
+ "ldr d4, [x22], #0x8\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
- "sub x27, x27, #0x4\n"
- "ldr d5, [x22], #0x8\n"
+ "ldr d5, [x21], #0x8\n"
"ldr q6, [x10, #0x0]\n"
"trn1 v4.2d, v5.2d, v7.2d\n"
- ".inst 0x6e46ec08 // bfmmla v8.4s, v0.8h, v6.8h\n"
"ldr q7, [x10, #0x10]\n"
+ ".inst 0x6e46ec08 // bfmmla v8.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec50 // bfmmla v16.4s, v2.8h, v6.8h\n"
".inst 0x6e46ec98 // bfmmla v24.4s, v4.8h, v6.8h\n"
"ldr q6, [x10, #0x20]\n"
".inst 0x6e47ec0c // bfmmla v12.4s, v0.8h, v7.8h\n"
".inst 0x6e47ec54 // bfmmla v20.4s, v2.8h, v7.8h\n"
- "cmp x27, #0x4\n"
".inst 0x6e47ec9c // bfmmla v28.4s, v4.8h, v7.8h\n"
"ldr q7, [x10, #0x30]\n"
".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n"
@@ -2537,36 +2564,37 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
".inst 0x6e47ec56 // bfmmla v22.4s, v2.8h, v7.8h\n"
".inst 0x6e47ec9e // bfmmla v30.4s, v4.8h, v7.8h\n"
"ldr q7, [x10, #0x70]\n"
- ".inst 0x6e46ec0b // bfmmla v11.4s, v0.8h, v6.8h\n"
"add x10, x10, #0x80\n"
+ ".inst 0x6e46ec0b // bfmmla v11.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec53 // bfmmla v19.4s, v2.8h, v6.8h\n"
".inst 0x6e46ec9b // bfmmla v27.4s, v4.8h, v6.8h\n"
".inst 0x6e47ec0f // bfmmla v15.4s, v0.8h, v7.8h\n"
".inst 0x6e47ec57 // bfmmla v23.4s, v2.8h, v7.8h\n"
".inst 0x6e47ec9f // bfmmla v31.4s, v4.8h, v7.8h\n"
"bge 170b\n"
+ "cbz x26, 174f\n"
"171:" // Height 5: Multiply loop: Skip odd blocks
- "cbz x27, 174f\n"
- "tbz x27, #1, 172f\n"
- "ldr s1, [x26], #0x4\n"
- "ldr s2, [x25], #0x4\n"
- "ldr s3, [x24], #0x4\n"
- "ldr s4, [x23], #0x4\n"
- "ldr s5, [x22], #0x4\n"
- "tbz x27, #0, 173f\n"
- "ld1 { v1.h }[2], [x26]\n"
- "ld1 { v2.h }[2], [x25]\n"
- "ld1 { v3.h }[2], [x24]\n"
- "ld1 { v4.h }[2], [x23]\n"
- "ld1 { v5.h }[2], [x22]\n"
+ "tbz x26, #1, 172f\n"
+ "ldr s1, [x25], #0x4\n"
+ "ldr s2, [x24], #0x4\n"
+ "ldr s3, [x23], #0x4\n"
+ "ldr s4, [x22], #0x4\n"
+ "ldr s5, [x21], #0x4\n"
+ "tbz x26, #0, 173f\n"
+ "ld1 { v1.h }[2], [x25]\n"
+ "ld1 { v2.h }[2], [x24]\n"
+ "ld1 { v3.h }[2], [x23]\n"
+ "ld1 { v4.h }[2], [x22]\n"
+ "ld1 { v5.h }[2], [x21]\n"
"b 173f\n"
"172:" // Height 5: Multiply loop: Ragged operand read: partial_1_0
- "ldr h1, [x26, #0x0]\n"
- "ldr h2, [x25, #0x0]\n"
- "ldr h3, [x24, #0x0]\n"
- "ldr h4, [x23, #0x0]\n"
- "ldr h5, [x22, #0x0]\n"
+ "ldr h1, [x25, #0x0]\n"
+ "ldr h2, [x24, #0x0]\n"
+ "ldr h3, [x23, #0x0]\n"
+ "ldr h4, [x22, #0x0]\n"
+ "ldr h5, [x21, #0x0]\n"
"173:" // Height 5: Multiply loop: Ragged operand read: Done
+ "movi v6.16b, #0x0\n"
"ldr q7, [x10, #0x0]\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
@@ -2604,29 +2632,84 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
".inst 0x6e46ec57 // bfmmla v23.4s, v2.8h, v6.8h\n"
".inst 0x6e46ec9f // bfmmla v31.4s, v4.8h, v6.8h\n"
"174:" // Height 5: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 164b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "prfm pstl1keep, [x28, #0x0]\n"
+ "add x24, x28, x19, LSL #2\n"
+ "prfm pstl1keep, [x24, #0x0]\n"
+ "add x23, x24, x19, LSL #2\n"
+ "prfm pstl1keep, [x23, #0x0]\n"
+ "add x22, x23, x19, LSL #2\n"
+ "prfm pstl1keep, [x22, #0x0]\n"
+ "add x21, x22, x19, LSL #2\n"
+ "prfm pstl1keep, [x21, #0x0]\n"
+ "tbz %x[flags], #1, 175f\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v1.4s }, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v0.4s }, [x19]\n"
+ "fmin v8.4s, v8.4s, v0.4s\n"
+ "fmin v9.4s, v9.4s, v0.4s\n"
+ "fmin v10.4s, v10.4s, v0.4s\n"
+ "fmin v11.4s, v11.4s, v0.4s\n"
+ "fmax v8.4s, v8.4s, v1.4s\n"
+ "fmax v9.4s, v9.4s, v1.4s\n"
+ "fmax v10.4s, v10.4s, v1.4s\n"
+ "fmax v11.4s, v11.4s, v1.4s\n"
+ "fmin v12.4s, v12.4s, v0.4s\n"
+ "fmin v13.4s, v13.4s, v0.4s\n"
+ "fmin v14.4s, v14.4s, v0.4s\n"
+ "fmax v12.4s, v12.4s, v1.4s\n"
+ "fmax v13.4s, v13.4s, v1.4s\n"
+ "fmax v14.4s, v14.4s, v1.4s\n"
+ "fmin v15.4s, v15.4s, v0.4s\n"
+ "fmin v16.4s, v16.4s, v0.4s\n"
+ "fmin v17.4s, v17.4s, v0.4s\n"
+ "fmax v15.4s, v15.4s, v1.4s\n"
+ "fmax v16.4s, v16.4s, v1.4s\n"
+ "fmax v17.4s, v17.4s, v1.4s\n"
+ "fmin v18.4s, v18.4s, v0.4s\n"
+ "fmin v19.4s, v19.4s, v0.4s\n"
+ "fmin v20.4s, v20.4s, v0.4s\n"
+ "fmax v18.4s, v18.4s, v1.4s\n"
+ "fmax v19.4s, v19.4s, v1.4s\n"
+ "fmax v20.4s, v20.4s, v1.4s\n"
+ "fmin v21.4s, v21.4s, v0.4s\n"
+ "fmin v22.4s, v22.4s, v0.4s\n"
+ "fmin v23.4s, v23.4s, v0.4s\n"
+ "fmax v21.4s, v21.4s, v1.4s\n"
+ "fmax v22.4s, v22.4s, v1.4s\n"
+ "fmax v23.4s, v23.4s, v1.4s\n"
+ "fmin v24.4s, v24.4s, v0.4s\n"
+ "fmin v25.4s, v25.4s, v0.4s\n"
+ "fmin v26.4s, v26.4s, v0.4s\n"
+ "fmax v24.4s, v24.4s, v1.4s\n"
+ "fmax v25.4s, v25.4s, v1.4s\n"
+ "fmax v26.4s, v26.4s, v1.4s\n"
+ "fmin v27.4s, v27.4s, v0.4s\n"
+ "fmin v28.4s, v28.4s, v0.4s\n"
+ "fmin v29.4s, v29.4s, v0.4s\n"
+ "fmax v27.4s, v27.4s, v1.4s\n"
+ "fmax v28.4s, v28.4s, v1.4s\n"
+ "fmax v29.4s, v29.4s, v1.4s\n"
+ "fmin v30.4s, v30.4s, v0.4s\n"
+ "fmin v31.4s, v31.4s, v0.4s\n"
+ "fmax v30.4s, v30.4s, v1.4s\n"
+ "fmax v31.4s, v31.4s, v1.4s\n"
+ "175:" // Height 5: No activation
"uzp1 v7.2d, v8.2d, v12.2d\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
+ "cmp x11, #0x10\n"
"uzp2 v8.2d, v8.2d, v12.2d\n"
"uzp1 v12.2d, v9.2d, v13.2d\n"
"uzp2 v9.2d, v9.2d, v13.2d\n"
"uzp1 v13.2d, v10.2d, v14.2d\n"
- "prfm pstl1keep, [x9, #0x0]\n"
- "prfm pstl1keep, [x25, #0x0]\n"
"uzp2 v10.2d, v10.2d, v14.2d\n"
"uzp1 v14.2d, v11.2d, v15.2d\n"
- "prfm pstl1keep, [x24, #0x0]\n"
- "prfm pstl1keep, [x23, #0x0]\n"
"uzp2 v11.2d, v11.2d, v15.2d\n"
"uzp1 v15.2d, v16.2d, v20.2d\n"
- "prfm pstl1keep, [x22, #0x0]\n"
"uzp2 v16.2d, v16.2d, v20.2d\n"
"uzp1 v20.2d, v17.2d, v21.2d\n"
"uzp2 v17.2d, v17.2d, v21.2d\n"
@@ -2638,222 +2721,175 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
"uzp1 v25.2d, v25.2d, v29.2d\n"
"uzp1 v26.2d, v26.2d, v30.2d\n"
"uzp1 v27.2d, v27.2d, v31.2d\n"
- "tbz %x[flags], #1, 175f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v1.4s }, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1r { v0.4s }, [x20]\n"
- "fmin v7.4s, v7.4s, v1.4s\n"
- "fmin v12.4s, v12.4s, v1.4s\n"
- "fmin v13.4s, v13.4s, v1.4s\n"
- "fmin v14.4s, v14.4s, v1.4s\n"
- "fmin v8.4s, v8.4s, v1.4s\n"
- "fmin v9.4s, v9.4s, v1.4s\n"
- "fmin v10.4s, v10.4s, v1.4s\n"
- "fmin v11.4s, v11.4s, v1.4s\n"
- "fmin v15.4s, v15.4s, v1.4s\n"
- "fmin v20.4s, v20.4s, v1.4s\n"
- "fmin v21.4s, v21.4s, v1.4s\n"
- "fmin v22.4s, v22.4s, v1.4s\n"
- "fmin v16.4s, v16.4s, v1.4s\n"
- "fmin v17.4s, v17.4s, v1.4s\n"
- "fmin v18.4s, v18.4s, v1.4s\n"
- "fmin v19.4s, v19.4s, v1.4s\n"
- "fmin v24.4s, v24.4s, v1.4s\n"
- "fmin v25.4s, v25.4s, v1.4s\n"
- "fmin v26.4s, v26.4s, v1.4s\n"
- "fmin v27.4s, v27.4s, v1.4s\n"
- "fmax v7.4s, v7.4s, v0.4s\n"
- "fmax v12.4s, v12.4s, v0.4s\n"
- "fmax v13.4s, v13.4s, v0.4s\n"
- "fmax v14.4s, v14.4s, v0.4s\n"
- "fmax v8.4s, v8.4s, v0.4s\n"
- "fmax v9.4s, v9.4s, v0.4s\n"
- "fmax v10.4s, v10.4s, v0.4s\n"
- "fmax v11.4s, v11.4s, v0.4s\n"
- "fmax v15.4s, v15.4s, v0.4s\n"
- "fmax v20.4s, v20.4s, v0.4s\n"
- "fmax v21.4s, v21.4s, v0.4s\n"
- "fmax v22.4s, v22.4s, v0.4s\n"
- "fmax v16.4s, v16.4s, v0.4s\n"
- "fmax v17.4s, v17.4s, v0.4s\n"
- "fmax v18.4s, v18.4s, v0.4s\n"
- "fmax v19.4s, v19.4s, v0.4s\n"
- "fmax v24.4s, v24.4s, v0.4s\n"
- "fmax v25.4s, v25.4s, v0.4s\n"
- "fmax v26.4s, v26.4s, v0.4s\n"
- "fmax v27.4s, v27.4s, v0.4s\n"
- "175:" // Height 5: No activation
- "cmp x11, #0x10\n"
"bge 184f\n"
"tbz x11, #3, 179f\n"
- "st1 { v7.4s }, [x9], #0x10\n"
- "st1 { v12.4s }, [x9], #0x10\n"
- "st1 { v8.4s }, [x25], #0x10\n"
- "st1 { v9.4s }, [x25], #0x10\n"
- "st1 { v15.4s }, [x24], #0x10\n"
- "st1 { v20.4s }, [x24], #0x10\n"
- "st1 { v16.4s }, [x23], #0x10\n"
- "st1 { v17.4s }, [x23], #0x10\n"
- "st1 { v24.4s }, [x22], #0x10\n"
- "st1 { v25.4s }, [x22], #0x10\n"
+ "st1 { v7.4s }, [x28], #0x10\n"
+ "st1 { v12.4s }, [x28], #0x10\n"
+ "st1 { v8.4s }, [x24], #0x10\n"
+ "st1 { v9.4s }, [x24], #0x10\n"
+ "st1 { v15.4s }, [x23], #0x10\n"
+ "st1 { v20.4s }, [x23], #0x10\n"
+ "st1 { v16.4s }, [x22], #0x10\n"
+ "st1 { v17.4s }, [x22], #0x10\n"
+ "st1 { v24.4s }, [x21], #0x10\n"
+ "st1 { v25.4s }, [x21], #0x10\n"
"tbz x11, #2, 177f\n"
- "st1 { v13.4s }, [x9], #0x10\n"
- "st1 { v10.4s }, [x25], #0x10\n"
- "st1 { v21.4s }, [x24], #0x10\n"
- "st1 { v18.4s }, [x23], #0x10\n"
- "st1 { v26.4s }, [x22], #0x10\n"
+ "st1 { v13.4s }, [x28], #0x10\n"
+ "st1 { v10.4s }, [x24], #0x10\n"
+ "st1 { v21.4s }, [x23], #0x10\n"
+ "st1 { v18.4s }, [x22], #0x10\n"
+ "st1 { v26.4s }, [x21], #0x10\n"
"tbz x11, #1, 176f\n"
- "str d14, [x9], #0x8\n"
- "str d11, [x25], #0x8\n"
- "str d22, [x24], #0x8\n"
- "str d19, [x23], #0x8\n"
- "str d27, [x22], #0x8\n"
+ "str d14, [x28], #0x8\n"
+ "str d11, [x24], #0x8\n"
+ "str d22, [x23], #0x8\n"
+ "str d19, [x22], #0x8\n"
+ "str d27, [x21], #0x8\n"
"tbz x11, #0, 183f\n"
- "st1 { v14.s }[2], [x9]\n"
- "st1 { v11.s }[2], [x25]\n"
- "st1 { v22.s }[2], [x24]\n"
- "st1 { v19.s }[2], [x23]\n"
- "st1 { v27.s }[2], [x22]\n"
+ "st1 { v14.s }[2], [x28]\n"
+ "st1 { v11.s }[2], [x24]\n"
+ "st1 { v22.s }[2], [x23]\n"
+ "st1 { v19.s }[2], [x22]\n"
+ "st1 { v27.s }[2], [x21]\n"
"b 183f\n"
"176:" // Height 5: Partial direct writeback: partial_1_12
"tbz x11, #0, 183f\n"
- "str s14, [x9, #0x0]\n"
- "str s11, [x25, #0x0]\n"
- "str s22, [x24, #0x0]\n"
- "str s19, [x23, #0x0]\n"
- "str s27, [x22, #0x0]\n"
+ "str s14, [x28, #0x0]\n"
+ "str s11, [x24, #0x0]\n"
+ "str s22, [x23, #0x0]\n"
+ "str s19, [x22, #0x0]\n"
+ "str s27, [x21, #0x0]\n"
"b 183f\n"
"177:" // Height 5: Partial direct writeback: partial_2_8
"tbz x11, #1, 178f\n"
- "str d13, [x9], #0x8\n"
- "str d10, [x25], #0x8\n"
- "str d21, [x24], #0x8\n"
- "str d18, [x23], #0x8\n"
- "str d26, [x22], #0x8\n"
+ "str d13, [x28], #0x8\n"
+ "str d10, [x24], #0x8\n"
+ "str d21, [x23], #0x8\n"
+ "str d18, [x22], #0x8\n"
+ "str d26, [x21], #0x8\n"
"tbz x11, #0, 183f\n"
- "st1 { v13.s }[2], [x9]\n"
- "st1 { v10.s }[2], [x25]\n"
- "st1 { v21.s }[2], [x24]\n"
- "st1 { v18.s }[2], [x23]\n"
- "st1 { v26.s }[2], [x22]\n"
+ "st1 { v13.s }[2], [x28]\n"
+ "st1 { v10.s }[2], [x24]\n"
+ "st1 { v21.s }[2], [x23]\n"
+ "st1 { v18.s }[2], [x22]\n"
+ "st1 { v26.s }[2], [x21]\n"
"b 183f\n"
"178:" // Height 5: Partial direct writeback: partial_1_8
"tbz x11, #0, 183f\n"
- "str s13, [x9, #0x0]\n"
- "str s10, [x25, #0x0]\n"
- "str s21, [x24, #0x0]\n"
- "str s18, [x23, #0x0]\n"
- "str s26, [x22, #0x0]\n"
+ "str s13, [x28, #0x0]\n"
+ "str s10, [x24, #0x0]\n"
+ "str s21, [x23, #0x0]\n"
+ "str s18, [x22, #0x0]\n"
+ "str s26, [x21, #0x0]\n"
"b 183f\n"
"179:" // Height 5: Partial direct writeback: partial_4_0
"tbz x11, #2, 181f\n"
- "st1 { v7.4s }, [x9], #0x10\n"
- "st1 { v8.4s }, [x25], #0x10\n"
- "st1 { v15.4s }, [x24], #0x10\n"
- "st1 { v16.4s }, [x23], #0x10\n"
- "st1 { v24.4s }, [x22], #0x10\n"
+ "st1 { v7.4s }, [x28], #0x10\n"
+ "st1 { v8.4s }, [x24], #0x10\n"
+ "st1 { v15.4s }, [x23], #0x10\n"
+ "st1 { v16.4s }, [x22], #0x10\n"
+ "st1 { v24.4s }, [x21], #0x10\n"
"tbz x11, #1, 180f\n"
- "str d12, [x9], #0x8\n"
- "str d9, [x25], #0x8\n"
- "str d20, [x24], #0x8\n"
- "str d17, [x23], #0x8\n"
- "str d25, [x22], #0x8\n"
+ "str d12, [x28], #0x8\n"
+ "str d9, [x24], #0x8\n"
+ "str d20, [x23], #0x8\n"
+ "str d17, [x22], #0x8\n"
+ "str d25, [x21], #0x8\n"
"tbz x11, #0, 183f\n"
- "st1 { v12.s }[2], [x9]\n"
- "st1 { v9.s }[2], [x25]\n"
- "st1 { v20.s }[2], [x24]\n"
- "st1 { v17.s }[2], [x23]\n"
- "st1 { v25.s }[2], [x22]\n"
+ "st1 { v12.s }[2], [x28]\n"
+ "st1 { v9.s }[2], [x24]\n"
+ "st1 { v20.s }[2], [x23]\n"
+ "st1 { v17.s }[2], [x22]\n"
+ "st1 { v25.s }[2], [x21]\n"
"b 183f\n"
"180:" // Height 5: Partial direct writeback: partial_1_4
"tbz x11, #0, 183f\n"
- "str s12, [x9, #0x0]\n"
- "str s9, [x25, #0x0]\n"
- "str s20, [x24, #0x0]\n"
- "str s17, [x23, #0x0]\n"
- "str s25, [x22, #0x0]\n"
+ "str s12, [x28, #0x0]\n"
+ "str s9, [x24, #0x0]\n"
+ "str s20, [x23, #0x0]\n"
+ "str s17, [x22, #0x0]\n"
+ "str s25, [x21, #0x0]\n"
"b 183f\n"
"181:" // Height 5: Partial direct writeback: partial_2_0
"tbz x11, #1, 182f\n"
- "str d7, [x9], #0x8\n"
- "str d8, [x25], #0x8\n"
- "str d15, [x24], #0x8\n"
- "str d16, [x23], #0x8\n"
- "str d24, [x22], #0x8\n"
+ "str d7, [x28], #0x8\n"
+ "str d8, [x24], #0x8\n"
+ "str d15, [x23], #0x8\n"
+ "str d16, [x22], #0x8\n"
+ "str d24, [x21], #0x8\n"
"tbz x11, #0, 183f\n"
- "st1 { v7.s }[2], [x9]\n"
- "st1 { v8.s }[2], [x25]\n"
- "st1 { v15.s }[2], [x24]\n"
- "st1 { v16.s }[2], [x23]\n"
- "st1 { v24.s }[2], [x22]\n"
+ "st1 { v7.s }[2], [x28]\n"
+ "st1 { v8.s }[2], [x24]\n"
+ "st1 { v15.s }[2], [x23]\n"
+ "st1 { v16.s }[2], [x22]\n"
+ "st1 { v24.s }[2], [x21]\n"
"b 183f\n"
"182:" // Height 5: Partial direct writeback: partial_1_0
- "str s7, [x9, #0x0]\n"
- "str s8, [x25, #0x0]\n"
- "str s15, [x24, #0x0]\n"
- "str s16, [x23, #0x0]\n"
- "str s24, [x22, #0x0]\n"
+ "str s7, [x28, #0x0]\n"
+ "str s8, [x24, #0x0]\n"
+ "str s15, [x23, #0x0]\n"
+ "str s16, [x22, #0x0]\n"
+ "str s24, [x21, #0x0]\n"
"183:" // Height 5: Partial direct writeback: Done
"b 185f\n"
"184:" // Height 5: Full writeback
- "str q7, [x9, #0x0]\n"
- "str q12, [x9, #0x10]\n"
- "str q13, [x9, #0x20]\n"
- "str q14, [x9, #0x30]\n"
- "add x9, x9, #0x40\n"
- "str q8, [x25, #0x0]\n"
- "str q9, [x25, #0x10]\n"
- "str q10, [x25, #0x20]\n"
- "str q11, [x25, #0x30]\n"
- "str q15, [x24, #0x0]\n"
- "str q20, [x24, #0x10]\n"
- "str q21, [x24, #0x20]\n"
- "str q22, [x24, #0x30]\n"
- "str q16, [x23, #0x0]\n"
- "str q17, [x23, #0x10]\n"
- "str q18, [x23, #0x20]\n"
- "str q19, [x23, #0x30]\n"
- "str q24, [x22, #0x0]\n"
- "str q25, [x22, #0x10]\n"
- "str q26, [x22, #0x20]\n"
- "str q27, [x22, #0x30]\n"
+ "str q7, [x28, #0x0]\n"
+ "str q12, [x28, #0x10]\n"
+ "str q13, [x28, #0x20]\n"
+ "str q14, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
+ "str q8, [x24, #0x0]\n"
+ "str q9, [x24, #0x10]\n"
+ "str q10, [x24, #0x20]\n"
+ "str q11, [x24, #0x30]\n"
+ "str q15, [x23, #0x0]\n"
+ "str q20, [x23, #0x10]\n"
+ "str q21, [x23, #0x20]\n"
+ "str q22, [x23, #0x30]\n"
+ "str q16, [x22, #0x0]\n"
+ "str q17, [x22, #0x10]\n"
+ "str q18, [x22, #0x20]\n"
+ "str q19, [x22, #0x30]\n"
+ "str q24, [x21, #0x0]\n"
+ "str q25, [x21, #0x10]\n"
+ "str q26, [x21, #0x20]\n"
+ "str q27, [x21, #0x30]\n"
"185:" // Height 5: Writeback done
"subs x11, x11, #0x10\n"
"bgt 150b\n"
"b 224f\n"
"186:" // Height 6
- "ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "mov x20, #0x18\n"
- "mov x12, %x[bias]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x9, %x[bias]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
+ "mov x28, %x[output_ptr]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "mov x19, #0x18\n"
+ "madd %x[output_ptr], x20, x19, %x[output_ptr]\n"
"187:" // Height 6: Column loop
- "cbz x12, 188f\n"
- "ldr q8, [x12, #0x0]\n"
- "ldr q9, [x12, #0x10]\n"
+ "cbz x9, 188f\n"
+ "ldr q8, [x9, #0x0]\n"
"zip2 v12.2d, v8.2d, v8.2d\n"
+ "ldr q9, [x9, #0x10]\n"
"zip1 v8.2d, v8.2d, v8.2d\n"
- "ldr q10, [x12, #0x20]\n"
- "ldr q11, [x12, #0x30]\n"
+ "ldr q10, [x9, #0x20]\n"
+ "mov v16.16b, v8.16b\n"
+ "ldr q11, [x9, #0x30]\n"
+ "add x9, x9, #0x40\n"
+ "mov v20.16b, v12.16b\n"
+ "mov v24.16b, v8.16b\n"
"zip2 v13.2d, v9.2d, v9.2d\n"
"zip1 v9.2d, v9.2d, v9.2d\n"
"zip2 v14.2d, v10.2d, v10.2d\n"
"zip1 v10.2d, v10.2d, v10.2d\n"
- "add x12, x12, #0x40\n"
"zip2 v15.2d, v11.2d, v11.2d\n"
"zip1 v11.2d, v11.2d, v11.2d\n"
- "mov v16.16b, v8.16b\n"
- "mov v20.16b, v12.16b\n"
"mov v17.16b, v9.16b\n"
"mov v21.16b, v13.16b\n"
"mov v18.16b, v10.16b\n"
"mov v22.16b, v14.16b\n"
"mov v19.16b, v11.16b\n"
"mov v23.16b, v15.16b\n"
- "mov v24.16b, v8.16b\n"
"mov v28.16b, v12.16b\n"
"mov v25.16b, v9.16b\n"
"mov v29.16b, v13.16b\n"
@@ -2864,174 +2900,174 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
"b 200f\n"
"188:" // Height 6: no bias
"tbz %x[flags], #0, 199f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"cmp x11, #0x10\n"
- "add x21, x22, x20, LSL #2\n"
+ "add x24, x28, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
+ "add x20, x21, x19, LSL #2\n"
"bge 197f\n"
"tbz x11, #3, 192f\n"
- "ld1 { v9.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v17.4s }, [x24], #0x10\n"
- "ld1 { v20.4s }, [x23], #0x10\n"
- "ld1 { v25.4s }, [x22], #0x10\n"
- "ld1 { v28.4s }, [x21], #0x10\n"
- "ld1 { v10.4s }, [x9], #0x10\n"
- "ld1 { v13.4s }, [x25], #0x10\n"
- "ld1 { v18.4s }, [x24], #0x10\n"
- "ld1 { v21.4s }, [x23], #0x10\n"
- "ld1 { v26.4s }, [x22], #0x10\n"
- "ld1 { v29.4s }, [x21], #0x10\n"
+ "ld1 { v9.4s }, [x28], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
+ "ld1 { v17.4s }, [x23], #0x10\n"
+ "ld1 { v20.4s }, [x22], #0x10\n"
+ "ld1 { v25.4s }, [x21], #0x10\n"
+ "ld1 { v10.4s }, [x28], #0x10\n"
+ "ld1 { v13.4s }, [x24], #0x10\n"
+ "ld1 { v18.4s }, [x23], #0x10\n"
+ "ld1 { v21.4s }, [x22], #0x10\n"
+ "ld1 { v26.4s }, [x21], #0x10\n"
+ "ld1 { v28.4s }, [x20], #0x10\n"
+ "ld1 { v29.4s }, [x20], #0x10\n"
"tbz x11, #2, 190f\n"
- "ld1 { v11.4s }, [x9], #0x10\n"
- "ld1 { v14.4s }, [x25], #0x10\n"
- "ld1 { v19.4s }, [x24], #0x10\n"
- "ld1 { v22.4s }, [x23], #0x10\n"
- "ld1 { v27.4s }, [x22], #0x10\n"
- "ld1 { v30.4s }, [x21], #0x10\n"
+ "ld1 { v11.4s }, [x28], #0x10\n"
+ "ld1 { v14.4s }, [x24], #0x10\n"
+ "ld1 { v19.4s }, [x23], #0x10\n"
+ "ld1 { v22.4s }, [x22], #0x10\n"
+ "ld1 { v27.4s }, [x21], #0x10\n"
+ "ld1 { v30.4s }, [x20], #0x10\n"
"tbz x11, #1, 189f\n"
- "ldr d16, [x9], #0x8\n"
- "ldr d15, [x25], #0x8\n"
- "mov x20, #0x38\n"
- "ldr d24, [x24], #0x8\n"
- "ldr d23, [x23], #0x8\n"
- "ldr d6, [x22], #0x8\n"
- "ldr d31, [x21], #0x8\n"
+ "ldr d16, [x28], #0x8\n"
+ "mov x19, #0x38\n"
+ "ldr d15, [x24], #0x8\n"
+ "ldr d24, [x23], #0x8\n"
+ "ldr d23, [x22], #0x8\n"
+ "ldr d6, [x21], #0x8\n"
+ "ldr d31, [x20], #0x8\n"
"tbz x11, #0, 196f\n"
- "ld1 { v16.s }[2], [x9]\n"
- "ld1 { v15.s }[2], [x25]\n"
- "ld1 { v24.s }[2], [x24]\n"
- "ld1 { v23.s }[2], [x23]\n"
- "ld1 { v6.s }[2], [x22]\n"
- "ld1 { v31.s }[2], [x21]\n"
+ "ld1 { v16.s }[2], [x28]\n"
+ "ld1 { v15.s }[2], [x24]\n"
+ "ld1 { v24.s }[2], [x23]\n"
+ "ld1 { v23.s }[2], [x22]\n"
+ "ld1 { v6.s }[2], [x21]\n"
+ "ld1 { v31.s }[2], [x20]\n"
"b 196f\n"
"189:" // Height 6: Partial accumulate: partial_1_12
- "mov x20, #0x30\n"
+ "mov x19, #0x30\n"
"tbz x11, #0, 196f\n"
- "ldr s16, [x9, #0x0]\n"
- "ldr s15, [x25, #0x0]\n"
- "ldr s24, [x24, #0x0]\n"
- "ldr s23, [x23, #0x0]\n"
- "ldr s6, [x22, #0x0]\n"
- "ldr s31, [x21, #0x0]\n"
+ "ldr s16, [x28, #0x0]\n"
+ "ldr s15, [x24, #0x0]\n"
+ "ldr s24, [x23, #0x0]\n"
+ "ldr s23, [x22, #0x0]\n"
+ "ldr s6, [x21, #0x0]\n"
+ "ldr s31, [x20, #0x0]\n"
"b 196f\n"
"190:" // Height 6: Partial accumulate: partial_2_8
"tbz x11, #1, 191f\n"
- "ldr d11, [x9], #0x8\n"
- "ldr d14, [x25], #0x8\n"
- "mov x20, #0x28\n"
- "ldr d19, [x24], #0x8\n"
- "ldr d22, [x23], #0x8\n"
- "ldr d27, [x22], #0x8\n"
- "ldr d30, [x21], #0x8\n"
+ "ldr d11, [x28], #0x8\n"
+ "ldr d14, [x24], #0x8\n"
+ "mov x19, #0x28\n"
+ "ldr d19, [x23], #0x8\n"
+ "ldr d22, [x22], #0x8\n"
+ "ldr d27, [x21], #0x8\n"
+ "ldr d30, [x20], #0x8\n"
"tbz x11, #0, 196f\n"
- "ld1 { v11.s }[2], [x9]\n"
- "ld1 { v14.s }[2], [x25]\n"
- "ld1 { v19.s }[2], [x24]\n"
- "ld1 { v22.s }[2], [x23]\n"
- "ld1 { v27.s }[2], [x22]\n"
- "ld1 { v30.s }[2], [x21]\n"
+ "ld1 { v11.s }[2], [x28]\n"
+ "ld1 { v14.s }[2], [x24]\n"
+ "ld1 { v19.s }[2], [x23]\n"
+ "ld1 { v22.s }[2], [x22]\n"
+ "ld1 { v27.s }[2], [x21]\n"
+ "ld1 { v30.s }[2], [x20]\n"
"b 196f\n"
"191:" // Height 6: Partial accumulate: partial_1_8
- "mov x20, #0x20\n"
+ "mov x19, #0x20\n"
"tbz x11, #0, 196f\n"
- "ldr s11, [x9, #0x0]\n"
- "ldr s14, [x25, #0x0]\n"
- "ldr s19, [x24, #0x0]\n"
- "ldr s22, [x23, #0x0]\n"
- "ldr s27, [x22, #0x0]\n"
- "ldr s30, [x21, #0x0]\n"
+ "ldr s11, [x28, #0x0]\n"
+ "ldr s14, [x24, #0x0]\n"
+ "ldr s19, [x23, #0x0]\n"
+ "ldr s22, [x22, #0x0]\n"
+ "ldr s27, [x21, #0x0]\n"
+ "ldr s30, [x20, #0x0]\n"
"b 196f\n"
"192:" // Height 6: Partial accumulate: partial_4_0
"tbz x11, #2, 194f\n"
- "ld1 { v9.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v17.4s }, [x24], #0x10\n"
- "ld1 { v20.4s }, [x23], #0x10\n"
- "ld1 { v25.4s }, [x22], #0x10\n"
- "ld1 { v28.4s }, [x21], #0x10\n"
+ "ld1 { v9.4s }, [x28], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
+ "ld1 { v17.4s }, [x23], #0x10\n"
+ "ld1 { v20.4s }, [x22], #0x10\n"
+ "ld1 { v25.4s }, [x21], #0x10\n"
+ "ld1 { v28.4s }, [x20], #0x10\n"
"tbz x11, #1, 193f\n"
- "ldr d10, [x9], #0x8\n"
- "ldr d13, [x25], #0x8\n"
- "mov x20, #0x18\n"
- "ldr d18, [x24], #0x8\n"
- "ldr d21, [x23], #0x8\n"
- "ldr d26, [x22], #0x8\n"
- "ldr d29, [x21], #0x8\n"
+ "ldr d10, [x28], #0x8\n"
+ "mov x19, #0x18\n"
+ "ldr d13, [x24], #0x8\n"
+ "ldr d18, [x23], #0x8\n"
+ "ldr d21, [x22], #0x8\n"
+ "ldr d26, [x21], #0x8\n"
+ "ldr d29, [x20], #0x8\n"
"tbz x11, #0, 196f\n"
- "ld1 { v10.s }[2], [x9]\n"
- "ld1 { v13.s }[2], [x25]\n"
- "ld1 { v18.s }[2], [x24]\n"
- "ld1 { v21.s }[2], [x23]\n"
- "ld1 { v26.s }[2], [x22]\n"
- "ld1 { v29.s }[2], [x21]\n"
+ "ld1 { v10.s }[2], [x28]\n"
+ "ld1 { v13.s }[2], [x24]\n"
+ "ld1 { v18.s }[2], [x23]\n"
+ "ld1 { v21.s }[2], [x22]\n"
+ "ld1 { v26.s }[2], [x21]\n"
+ "ld1 { v29.s }[2], [x20]\n"
"b 196f\n"
"193:" // Height 6: Partial accumulate: partial_1_4
- "mov x20, #0x10\n"
+ "mov x19, #0x10\n"
"tbz x11, #0, 196f\n"
- "ldr s10, [x9, #0x0]\n"
- "ldr s13, [x25, #0x0]\n"
- "ldr s18, [x24, #0x0]\n"
- "ldr s21, [x23, #0x0]\n"
- "ldr s26, [x22, #0x0]\n"
- "ldr s29, [x21, #0x0]\n"
+ "ldr s10, [x28, #0x0]\n"
+ "ldr s13, [x24, #0x0]\n"
+ "ldr s18, [x23, #0x0]\n"
+ "ldr s21, [x22, #0x0]\n"
+ "ldr s26, [x21, #0x0]\n"
+ "ldr s29, [x20, #0x0]\n"
"b 196f\n"
"194:" // Height 6: Partial accumulate: partial_2_0
"tbz x11, #1, 195f\n"
- "ldr d9, [x9], #0x8\n"
- "ldr d12, [x25], #0x8\n"
- "mov x20, #0x8\n"
- "ldr d17, [x24], #0x8\n"
- "ldr d20, [x23], #0x8\n"
- "ldr d25, [x22], #0x8\n"
- "ldr d28, [x21], #0x8\n"
+ "ldr d9, [x28], #0x8\n"
+ "ldr d12, [x24], #0x8\n"
+ "mov x19, #0x8\n"
+ "ldr d17, [x23], #0x8\n"
+ "ldr d20, [x22], #0x8\n"
+ "ldr d25, [x21], #0x8\n"
+ "ldr d28, [x20], #0x8\n"
"tbz x11, #0, 196f\n"
- "ld1 { v9.s }[2], [x9]\n"
- "ld1 { v12.s }[2], [x25]\n"
- "ld1 { v17.s }[2], [x24]\n"
- "ld1 { v20.s }[2], [x23]\n"
- "ld1 { v25.s }[2], [x22]\n"
- "ld1 { v28.s }[2], [x21]\n"
+ "ld1 { v9.s }[2], [x28]\n"
+ "ld1 { v12.s }[2], [x24]\n"
+ "ld1 { v17.s }[2], [x23]\n"
+ "ld1 { v20.s }[2], [x22]\n"
+ "ld1 { v25.s }[2], [x21]\n"
+ "ld1 { v28.s }[2], [x20]\n"
"b 196f\n"
"195:" // Height 6: Partial accumulate: partial_1_0
- "ldr s9, [x9, #0x0]\n"
- "ldr s12, [x25, #0x0]\n"
- "mov x20, #0x0\n"
- "ldr s17, [x24, #0x0]\n"
- "ldr s20, [x23, #0x0]\n"
- "ldr s25, [x22, #0x0]\n"
- "ldr s28, [x21, #0x0]\n"
+ "ldr s9, [x28, #0x0]\n"
+ "mov x19, #0x0\n"
+ "ldr s12, [x24, #0x0]\n"
+ "ldr s17, [x23, #0x0]\n"
+ "ldr s20, [x22, #0x0]\n"
+ "ldr s25, [x21, #0x0]\n"
+ "ldr s28, [x20, #0x0]\n"
"196:" // Height 6: Partial accumulate: Done
- "sub x9, x9, x20\n"
+ "sub x28, x28, x19\n"
"b 198f\n"
"197:" // Height 6: full accumulate
- "ldr q9, [x9, #0x0]\n"
- "ldr q10, [x9, #0x10]\n"
- "ldr q11, [x9, #0x20]\n"
- "ldr q16, [x9, #0x30]\n"
- "ldr q12, [x25, #0x0]\n"
- "ldr q13, [x25, #0x10]\n"
- "ldr q14, [x25, #0x20]\n"
- "ldr q15, [x25, #0x30]\n"
- "ldr q17, [x24, #0x0]\n"
- "ldr q18, [x24, #0x10]\n"
- "ldr q19, [x24, #0x20]\n"
- "ldr q24, [x24, #0x30]\n"
- "ldr q20, [x23, #0x0]\n"
- "ldr q21, [x23, #0x10]\n"
- "ldr q22, [x23, #0x20]\n"
- "ldr q23, [x23, #0x30]\n"
- "ldr q25, [x22, #0x0]\n"
- "ldr q26, [x22, #0x10]\n"
- "ldr q27, [x22, #0x20]\n"
- "ldr q6, [x22, #0x30]\n"
- "ldr q28, [x21, #0x0]\n"
- "ldr q29, [x21, #0x10]\n"
- "ldr q30, [x21, #0x20]\n"
- "ldr q31, [x21, #0x30]\n"
+ "ldr q9, [x28, #0x0]\n"
+ "ldr q10, [x28, #0x10]\n"
+ "ldr q11, [x28, #0x20]\n"
+ "ldr q16, [x28, #0x30]\n"
+ "ldr q12, [x24, #0x0]\n"
+ "ldr q13, [x24, #0x10]\n"
+ "ldr q14, [x24, #0x20]\n"
+ "ldr q15, [x24, #0x30]\n"
+ "ldr q17, [x23, #0x0]\n"
+ "ldr q18, [x23, #0x10]\n"
+ "ldr q19, [x23, #0x20]\n"
+ "ldr q24, [x23, #0x30]\n"
+ "ldr q20, [x22, #0x0]\n"
+ "ldr q21, [x22, #0x10]\n"
+ "ldr q22, [x22, #0x20]\n"
+ "ldr q23, [x22, #0x30]\n"
+ "ldr q25, [x21, #0x0]\n"
+ "ldr q26, [x21, #0x10]\n"
+ "ldr q27, [x21, #0x20]\n"
+ "ldr q6, [x21, #0x30]\n"
+ "ldr q28, [x20, #0x0]\n"
+ "ldr q29, [x20, #0x10]\n"
+ "ldr q30, [x20, #0x20]\n"
+ "ldr q31, [x20, #0x30]\n"
"198:" // Height 6: MMLA fixup
"zip1 v8.2d, v9.2d, v12.2d\n"
"zip2 v12.2d, v9.2d, v12.2d\n"
@@ -3084,104 +3120,103 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
"movi v30.16b, #0x0\n"
"movi v31.16b, #0x0\n"
"200:" // Height 6: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"201:" // Height 6: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 202f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "ldr x22, [x21, #0x20]\n"
- "ldr x21, [x21, #0x28]\n"
- "cbnz x28, 203f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #1\n"
- "add x25, x25, x20, LSL #1\n"
- "add x24, x24, x20, LSL #1\n"
- "add x23, x23, x20, LSL #1\n"
- "add x22, x22, x20, LSL #1\n"
- "add x21, x21, x20, LSL #1\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "ldr x21, [x20, #0x20]\n"
+ "ldr x20, [x20, #0x28]\n"
+ "cbnz x27, 203f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
+ "add x24, x24, x19, LSL #1\n"
+ "add x23, x23, x19, LSL #1\n"
+ "add x22, x22, x19, LSL #1\n"
+ "add x21, x21, x19, LSL #1\n"
+ "add x20, x20, x19, LSL #1\n"
"b 203f\n"
"202:" // Height 6: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
- "add x23, x24, x20, LSL #1\n"
- "add x22, x23, x20, LSL #1\n"
- "add x21, x22, x20, LSL #1\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "add x22, x23, x19, LSL #1\n"
+ "add x21, x22, x19, LSL #1\n"
+ "add x20, x21, x19, LSL #1\n"
"203:" // Height 6: input setup done
- "cmp x27, #0x8\n"
+ "cmp x26, #0x8\n"
"blt 206f\n"
- "ldr q1, [x26, #0x0]\n"
- "ldr q2, [x25, #0x0]\n"
- "cmp x27, #0x10\n"
- "ldr q3, [x24, #0x0]\n"
- "ldr q4, [x23, #0x0]\n"
- "ldr q5, [x22, #0x0]\n"
- "ldr q6, [x21, #0x0]\n"
- "ldr q7, [x10, #0x0]\n"
+ "ldr q1, [x25, #0x0]\n"
+ "ldr q2, [x24, #0x0]\n"
+ "cmp x26, #0x10\n"
"blt 205f\n"
"204:" // Height 6: Multiply loop: Main loop head
"trn1 v0.2d, v1.2d, v2.2d\n"
+ "ldr q3, [x23, #0x0]\n"
+ "add x25, x25, #0x10\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
- ".inst 0x6e47ec08 // bfmmla v8.4s, v0.8h, v7.8h\n"
- "sub x27, x27, #0x8\n"
+ "ldr q4, [x22, #0x0]\n"
+ "add x24, x24, #0x10\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
+ "ldr q5, [x21, #0x0]\n"
+ "add x23, x23, #0x10\n"
"trn2 v3.2d, v3.2d, v4.2d\n"
- ".inst 0x6e47ec50 // bfmmla v16.4s, v2.8h, v7.8h\n"
- "add x26, x26, #0x10\n"
+ "ldr q6, [x20, #0x0]\n"
+ "add x22, x22, #0x10\n"
"trn1 v4.2d, v5.2d, v6.2d\n"
+ "ldr q7, [x10, #0x0]\n"
+ "add x21, x21, #0x10\n"
"trn2 v5.2d, v5.2d, v6.2d\n"
"ldr q6, [x10, #0x10]\n"
+ "add x20, x20, #0x10\n"
+ ".inst 0x6e47ec08 // bfmmla v8.4s, v0.8h, v7.8h\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "sub x26, x26, #0x8\n"
+ ".inst 0x6e47ec50 // bfmmla v16.4s, v2.8h, v7.8h\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "cmp x26, #0x10\n"
".inst 0x6e47ec98 // bfmmla v24.4s, v4.8h, v7.8h\n"
"ldr q7, [x10, #0x20]\n"
".inst 0x6e46ec0c // bfmmla v12.4s, v0.8h, v6.8h\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x6e46ec54 // bfmmla v20.4s, v2.8h, v6.8h\n"
- "add x25, x25, #0x10\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x6e46ec9c // bfmmla v28.4s, v4.8h, v6.8h\n"
"ldr q6, [x10, #0x30]\n"
+ "prfm pldl1keep, [x21, #0x80]\n"
".inst 0x6e47ec09 // bfmmla v9.4s, v0.8h, v7.8h\n"
- "add x24, x24, #0x10\n"
".inst 0x6e47ec51 // bfmmla v17.4s, v2.8h, v7.8h\n"
+ "prfm pldl1keep, [x20, #0x80]\n"
".inst 0x6e47ec99 // bfmmla v25.4s, v4.8h, v7.8h\n"
"ldr q7, [x10, #0x40]\n"
- "add x23, x23, #0x10\n"
".inst 0x6e46ec0d // bfmmla v13.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec55 // bfmmla v21.4s, v2.8h, v6.8h\n"
- "add x22, x22, #0x10\n"
- "add x21, x21, #0x10\n"
".inst 0x6e46ec9d // bfmmla v29.4s, v4.8h, v6.8h\n"
"ldr q6, [x10, #0x50]\n"
".inst 0x6e47ec0a // bfmmla v10.4s, v0.8h, v7.8h\n"
- "cmp x27, #0x10\n"
".inst 0x6e47ec52 // bfmmla v18.4s, v2.8h, v7.8h\n"
".inst 0x6e47ec9a // bfmmla v26.4s, v4.8h, v7.8h\n"
"ldr q7, [x10, #0x60]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x6e46ec0e // bfmmla v14.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec56 // bfmmla v22.4s, v2.8h, v6.8h\n"
- "prfm pldl1keep, [x25, #0x80]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x6e46ec9e // bfmmla v30.4s, v4.8h, v6.8h\n"
"ldr q6, [x10, #0x70]\n"
".inst 0x6e47ec0b // bfmmla v11.4s, v0.8h, v7.8h\n"
- "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x6e47ec53 // bfmmla v19.4s, v2.8h, v7.8h\n"
".inst 0x6e47ec9b // bfmmla v27.4s, v4.8h, v7.8h\n"
"ldr q7, [x10, #0x80]\n"
- "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x6e46ec0f // bfmmla v15.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec57 // bfmmla v23.4s, v2.8h, v6.8h\n"
- "ldr q2, [x25, #0x0]\n"
- "prfm pldl1keep, [x21, #0x80]\n"
+ "ldr q2, [x24, #0x0]\n"
".inst 0x6e46ec9f // bfmmla v31.4s, v4.8h, v6.8h\n"
"ldr q6, [x10, #0x90]\n"
- "ldr q4, [x23, #0x0]\n"
".inst 0x6e47ec28 // bfmmla v8.4s, v1.8h, v7.8h\n"
".inst 0x6e47ec70 // bfmmla v16.4s, v3.8h, v7.8h\n"
".inst 0x6e47ecb8 // bfmmla v24.4s, v5.8h, v7.8h\n"
@@ -3210,64 +3245,65 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
".inst 0x6e47ec2b // bfmmla v11.4s, v1.8h, v7.8h\n"
".inst 0x6e47ec73 // bfmmla v19.4s, v3.8h, v7.8h\n"
".inst 0x6e47ecbb // bfmmla v27.4s, v5.8h, v7.8h\n"
- "ldr q7, [x10, #0x0]\n"
".inst 0x6e46ec2f // bfmmla v15.4s, v1.8h, v6.8h\n"
- "ldr q1, [x26, #0x0]\n"
+ "ldr q1, [x25, #0x0]\n"
".inst 0x6e46ec77 // bfmmla v23.4s, v3.8h, v6.8h\n"
- "ldr q3, [x24, #0x0]\n"
".inst 0x6e46ecbf // bfmmla v31.4s, v5.8h, v6.8h\n"
- "ldr q5, [x22, #0x0]\n"
- "ldr q6, [x21, #0x0]\n"
"bge 204b\n"
"205:" // Height 6: Multiply loop: Single iteration only
"trn1 v0.2d, v1.2d, v2.2d\n"
+ "ldr q3, [x23, #0x0]\n"
+ "sub x26, x26, #0x8\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
- ".inst 0x6e47ec08 // bfmmla v8.4s, v0.8h, v7.8h\n"
- "add x26, x26, #0x10\n"
+ "ldr q4, [x22, #0x0]\n"
+ "add x25, x25, #0x10\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
+ "ldr q5, [x21, #0x0]\n"
+ "add x24, x24, #0x10\n"
"trn2 v3.2d, v3.2d, v4.2d\n"
- ".inst 0x6e47ec50 // bfmmla v16.4s, v2.8h, v7.8h\n"
- "add x25, x25, #0x10\n"
+ "ldr q6, [x20, #0x0]\n"
+ "add x23, x23, #0x10\n"
"trn1 v4.2d, v5.2d, v6.2d\n"
+ "ldr q7, [x10, #0x0]\n"
+ "add x22, x22, #0x10\n"
"trn2 v5.2d, v5.2d, v6.2d\n"
"ldr q6, [x10, #0x10]\n"
+ "add x21, x21, #0x10\n"
+ ".inst 0x6e47ec08 // bfmmla v8.4s, v0.8h, v7.8h\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "add x20, x20, #0x10\n"
+ ".inst 0x6e47ec50 // bfmmla v16.4s, v2.8h, v7.8h\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x6e47ec98 // bfmmla v24.4s, v4.8h, v7.8h\n"
"ldr q7, [x10, #0x20]\n"
".inst 0x6e46ec0c // bfmmla v12.4s, v0.8h, v6.8h\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x6e46ec54 // bfmmla v20.4s, v2.8h, v6.8h\n"
- "add x24, x24, #0x10\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x6e46ec9c // bfmmla v28.4s, v4.8h, v6.8h\n"
"ldr q6, [x10, #0x30]\n"
+ "prfm pldl1keep, [x21, #0x80]\n"
".inst 0x6e47ec09 // bfmmla v9.4s, v0.8h, v7.8h\n"
- "add x23, x23, #0x10\n"
".inst 0x6e47ec51 // bfmmla v17.4s, v2.8h, v7.8h\n"
+ "prfm pldl1keep, [x20, #0x80]\n"
".inst 0x6e47ec99 // bfmmla v25.4s, v4.8h, v7.8h\n"
"ldr q7, [x10, #0x40]\n"
- "add x22, x22, #0x10\n"
".inst 0x6e46ec0d // bfmmla v13.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec55 // bfmmla v21.4s, v2.8h, v6.8h\n"
- "add x21, x21, #0x10\n"
- "sub x27, x27, #0x8\n"
".inst 0x6e46ec9d // bfmmla v29.4s, v4.8h, v6.8h\n"
"ldr q6, [x10, #0x50]\n"
".inst 0x6e47ec0a // bfmmla v10.4s, v0.8h, v7.8h\n"
- "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x6e47ec52 // bfmmla v18.4s, v2.8h, v7.8h\n"
".inst 0x6e47ec9a // bfmmla v26.4s, v4.8h, v7.8h\n"
"ldr q7, [x10, #0x60]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x6e46ec0e // bfmmla v14.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec56 // bfmmla v22.4s, v2.8h, v6.8h\n"
- "prfm pldl1keep, [x24, #0x80]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x6e46ec9e // bfmmla v30.4s, v4.8h, v6.8h\n"
"ldr q6, [x10, #0x70]\n"
".inst 0x6e47ec0b // bfmmla v11.4s, v0.8h, v7.8h\n"
- "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x6e47ec53 // bfmmla v19.4s, v2.8h, v7.8h\n"
".inst 0x6e47ec9b // bfmmla v27.4s, v4.8h, v7.8h\n"
"ldr q7, [x10, #0x80]\n"
- "prfm pldl1keep, [x21, #0x80]\n"
".inst 0x6e46ec0f // bfmmla v15.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec57 // bfmmla v23.4s, v2.8h, v6.8h\n"
".inst 0x6e46ec9f // bfmmla v31.4s, v4.8h, v6.8h\n"
@@ -3304,20 +3340,20 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
".inst 0x6e46ec77 // bfmmla v23.4s, v3.8h, v6.8h\n"
".inst 0x6e46ecbf // bfmmla v31.4s, v5.8h, v6.8h\n"
"206:" // Height 6: Multiply loop: Main loop skip
- "cbz x27, 211f\n"
- "cmp x27, #0x4\n"
+ "cbz x26, 211f\n"
+ "cmp x26, #0x4\n"
"blt 208f\n"
"207:" // Height 6: Multiply loop: Odd block loop
- "ldr d1, [x26], #0x8\n"
- "ldr d2, [x25], #0x8\n"
+ "ldr d1, [x25], #0x8\n"
+ "sub x26, x26, #0x4\n"
+ "ldr d2, [x24], #0x8\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
- "sub x27, x27, #0x4\n"
- "ldr d3, [x24], #0x8\n"
- "ldr d4, [x23], #0x8\n"
+ "ldr d3, [x23], #0x8\n"
+ "cmp x26, #0x4\n"
+ "ldr d4, [x22], #0x8\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
- "cmp x27, #0x4\n"
- "ldr d5, [x22], #0x8\n"
- "ldr d7, [x21], #0x8\n"
+ "ldr d5, [x21], #0x8\n"
+ "ldr d7, [x20], #0x8\n"
"trn1 v4.2d, v5.2d, v7.2d\n"
"ldr q6, [x10, #0x0]\n"
"ldr q7, [x10, #0x10]\n"
@@ -3353,37 +3389,37 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
".inst 0x6e47ec57 // bfmmla v23.4s, v2.8h, v7.8h\n"
".inst 0x6e47ec9f // bfmmla v31.4s, v4.8h, v7.8h\n"
"bge 207b\n"
+ "cbz x26, 211f\n"
"208:" // Height 6: Multiply loop: Skip odd blocks
- "cbz x27, 211f\n"
- "tbz x27, #1, 209f\n"
- "ldr s1, [x26], #0x4\n"
- "ldr s2, [x25], #0x4\n"
- "ldr s3, [x24], #0x4\n"
- "ldr s4, [x23], #0x4\n"
- "ldr s5, [x22], #0x4\n"
- "ldr s6, [x21], #0x4\n"
- "tbz x27, #0, 210f\n"
- "ld1 { v1.h }[2], [x26]\n"
- "ld1 { v2.h }[2], [x25]\n"
- "ld1 { v3.h }[2], [x24]\n"
- "ld1 { v4.h }[2], [x23]\n"
- "ld1 { v5.h }[2], [x22]\n"
- "ld1 { v6.h }[2], [x21]\n"
+ "tbz x26, #1, 209f\n"
+ "ldr s1, [x25], #0x4\n"
+ "ldr s2, [x24], #0x4\n"
+ "ldr s3, [x23], #0x4\n"
+ "ldr s4, [x22], #0x4\n"
+ "ldr s5, [x21], #0x4\n"
+ "ldr s6, [x20], #0x4\n"
+ "tbz x26, #0, 210f\n"
+ "ld1 { v1.h }[2], [x25]\n"
+ "ld1 { v2.h }[2], [x24]\n"
+ "ld1 { v3.h }[2], [x23]\n"
+ "ld1 { v4.h }[2], [x22]\n"
+ "ld1 { v5.h }[2], [x21]\n"
+ "ld1 { v6.h }[2], [x20]\n"
"b 210f\n"
"209:" // Height 6: Multiply loop: Ragged operand read: partial_1_0
- "ldr h1, [x26, #0x0]\n"
- "ldr h2, [x25, #0x0]\n"
- "ldr h3, [x24, #0x0]\n"
- "ldr h4, [x23, #0x0]\n"
- "ldr h5, [x22, #0x0]\n"
- "ldr h6, [x21, #0x0]\n"
+ "ldr h1, [x25, #0x0]\n"
+ "ldr h2, [x24, #0x0]\n"
+ "ldr h3, [x23, #0x0]\n"
+ "ldr h4, [x22, #0x0]\n"
+ "ldr h5, [x21, #0x0]\n"
+ "ldr h6, [x20, #0x0]\n"
"210:" // Height 6: Multiply loop: Ragged operand read: Done
- "ldr q7, [x10, #0x0]\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
+ "ldr q7, [x10, #0x0]\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
- ".inst 0x6e47ec08 // bfmmla v8.4s, v0.8h, v7.8h\n"
"trn1 v4.2d, v5.2d, v6.2d\n"
"ldr q6, [x10, #0x10]\n"
+ ".inst 0x6e47ec08 // bfmmla v8.4s, v0.8h, v7.8h\n"
".inst 0x6e47ec50 // bfmmla v16.4s, v2.8h, v7.8h\n"
".inst 0x6e47ec98 // bfmmla v24.4s, v4.8h, v7.8h\n"
"ldr q7, [x10, #0x20]\n"
@@ -3407,41 +3443,96 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
".inst 0x6e46ec56 // bfmmla v22.4s, v2.8h, v6.8h\n"
".inst 0x6e46ec9e // bfmmla v30.4s, v4.8h, v6.8h\n"
"ldr q6, [x10, #0x70]\n"
- ".inst 0x6e47ec0b // bfmmla v11.4s, v0.8h, v7.8h\n"
"add x10, x10, #0x80\n"
+ ".inst 0x6e47ec0b // bfmmla v11.4s, v0.8h, v7.8h\n"
".inst 0x6e47ec53 // bfmmla v19.4s, v2.8h, v7.8h\n"
".inst 0x6e47ec9b // bfmmla v27.4s, v4.8h, v7.8h\n"
".inst 0x6e46ec0f // bfmmla v15.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec57 // bfmmla v23.4s, v2.8h, v6.8h\n"
".inst 0x6e46ec9f // bfmmla v31.4s, v4.8h, v6.8h\n"
"211:" // Height 6: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 201b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "prfm pstl1keep, [x28, #0x0]\n"
+ "add x24, x28, x19, LSL #2\n"
+ "prfm pstl1keep, [x24, #0x0]\n"
+ "add x23, x24, x19, LSL #2\n"
+ "prfm pstl1keep, [x23, #0x0]\n"
+ "add x22, x23, x19, LSL #2\n"
+ "prfm pstl1keep, [x22, #0x0]\n"
+ "add x21, x22, x19, LSL #2\n"
+ "prfm pstl1keep, [x21, #0x0]\n"
+ "add x20, x21, x19, LSL #2\n"
+ "prfm pstl1keep, [x20, #0x0]\n"
+ "tbz %x[flags], #1, 212f\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v1.4s }, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v0.4s }, [x19]\n"
+ "fmin v8.4s, v8.4s, v0.4s\n"
+ "fmin v9.4s, v9.4s, v0.4s\n"
+ "fmin v10.4s, v10.4s, v0.4s\n"
+ "fmin v11.4s, v11.4s, v0.4s\n"
+ "fmax v8.4s, v8.4s, v1.4s\n"
+ "fmax v9.4s, v9.4s, v1.4s\n"
+ "fmax v10.4s, v10.4s, v1.4s\n"
+ "fmax v11.4s, v11.4s, v1.4s\n"
+ "fmin v12.4s, v12.4s, v0.4s\n"
+ "fmin v13.4s, v13.4s, v0.4s\n"
+ "fmin v14.4s, v14.4s, v0.4s\n"
+ "fmax v12.4s, v12.4s, v1.4s\n"
+ "fmax v13.4s, v13.4s, v1.4s\n"
+ "fmax v14.4s, v14.4s, v1.4s\n"
+ "fmin v15.4s, v15.4s, v0.4s\n"
+ "fmin v16.4s, v16.4s, v0.4s\n"
+ "fmin v17.4s, v17.4s, v0.4s\n"
+ "fmax v15.4s, v15.4s, v1.4s\n"
+ "fmax v16.4s, v16.4s, v1.4s\n"
+ "fmax v17.4s, v17.4s, v1.4s\n"
+ "fmin v18.4s, v18.4s, v0.4s\n"
+ "fmin v19.4s, v19.4s, v0.4s\n"
+ "fmin v20.4s, v20.4s, v0.4s\n"
+ "fmax v18.4s, v18.4s, v1.4s\n"
+ "fmax v19.4s, v19.4s, v1.4s\n"
+ "fmax v20.4s, v20.4s, v1.4s\n"
+ "fmin v21.4s, v21.4s, v0.4s\n"
+ "fmin v22.4s, v22.4s, v0.4s\n"
+ "fmin v23.4s, v23.4s, v0.4s\n"
+ "fmax v21.4s, v21.4s, v1.4s\n"
+ "fmax v22.4s, v22.4s, v1.4s\n"
+ "fmax v23.4s, v23.4s, v1.4s\n"
+ "fmin v24.4s, v24.4s, v0.4s\n"
+ "fmin v25.4s, v25.4s, v0.4s\n"
+ "fmin v26.4s, v26.4s, v0.4s\n"
+ "fmax v24.4s, v24.4s, v1.4s\n"
+ "fmax v25.4s, v25.4s, v1.4s\n"
+ "fmax v26.4s, v26.4s, v1.4s\n"
+ "fmin v27.4s, v27.4s, v0.4s\n"
+ "fmin v28.4s, v28.4s, v0.4s\n"
+ "fmin v29.4s, v29.4s, v0.4s\n"
+ "fmax v27.4s, v27.4s, v1.4s\n"
+ "fmax v28.4s, v28.4s, v1.4s\n"
+ "fmax v29.4s, v29.4s, v1.4s\n"
+ "fmin v30.4s, v30.4s, v0.4s\n"
+ "fmin v31.4s, v31.4s, v0.4s\n"
+ "fmax v30.4s, v30.4s, v1.4s\n"
+ "fmax v31.4s, v31.4s, v1.4s\n"
+ "212:" // Height 6: No activation
"uzp1 v7.2d, v8.2d, v12.2d\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
+ "cmp x11, #0x10\n"
"uzp2 v8.2d, v8.2d, v12.2d\n"
"uzp1 v12.2d, v9.2d, v13.2d\n"
- "add x21, x22, x20, LSL #2\n"
"uzp2 v9.2d, v9.2d, v13.2d\n"
"uzp1 v13.2d, v10.2d, v14.2d\n"
- "prfm pstl1keep, [x9, #0x0]\n"
"uzp2 v10.2d, v10.2d, v14.2d\n"
"uzp1 v14.2d, v11.2d, v15.2d\n"
- "prfm pstl1keep, [x25, #0x0]\n"
- "prfm pstl1keep, [x24, #0x0]\n"
"uzp2 v11.2d, v11.2d, v15.2d\n"
"uzp1 v15.2d, v16.2d, v20.2d\n"
- "prfm pstl1keep, [x23, #0x0]\n"
- "prfm pstl1keep, [x22, #0x0]\n"
"uzp2 v16.2d, v16.2d, v20.2d\n"
"uzp1 v20.2d, v17.2d, v21.2d\n"
- "prfm pstl1keep, [x21, #0x0]\n"
"uzp2 v17.2d, v17.2d, v21.2d\n"
"uzp1 v21.2d, v18.2d, v22.2d\n"
"uzp2 v18.2d, v18.2d, v22.2d\n"
@@ -3455,233 +3546,178 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
"uzp2 v26.2d, v26.2d, v30.2d\n"
"uzp1 v30.2d, v27.2d, v31.2d\n"
"uzp2 v27.2d, v27.2d, v31.2d\n"
- "tbz %x[flags], #1, 212f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v1.4s }, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1r { v0.4s }, [x20]\n"
- "fmin v7.4s, v7.4s, v1.4s\n"
- "fmin v12.4s, v12.4s, v1.4s\n"
- "fmin v13.4s, v13.4s, v1.4s\n"
- "fmin v14.4s, v14.4s, v1.4s\n"
- "fmin v8.4s, v8.4s, v1.4s\n"
- "fmin v9.4s, v9.4s, v1.4s\n"
- "fmin v10.4s, v10.4s, v1.4s\n"
- "fmin v11.4s, v11.4s, v1.4s\n"
- "fmin v15.4s, v15.4s, v1.4s\n"
- "fmin v20.4s, v20.4s, v1.4s\n"
- "fmin v21.4s, v21.4s, v1.4s\n"
- "fmin v22.4s, v22.4s, v1.4s\n"
- "fmin v16.4s, v16.4s, v1.4s\n"
- "fmin v17.4s, v17.4s, v1.4s\n"
- "fmin v18.4s, v18.4s, v1.4s\n"
- "fmin v19.4s, v19.4s, v1.4s\n"
- "fmin v23.4s, v23.4s, v1.4s\n"
- "fmin v28.4s, v28.4s, v1.4s\n"
- "fmin v29.4s, v29.4s, v1.4s\n"
- "fmin v30.4s, v30.4s, v1.4s\n"
- "fmin v24.4s, v24.4s, v1.4s\n"
- "fmin v25.4s, v25.4s, v1.4s\n"
- "fmin v26.4s, v26.4s, v1.4s\n"
- "fmin v27.4s, v27.4s, v1.4s\n"
- "fmax v7.4s, v7.4s, v0.4s\n"
- "fmax v12.4s, v12.4s, v0.4s\n"
- "fmax v13.4s, v13.4s, v0.4s\n"
- "fmax v14.4s, v14.4s, v0.4s\n"
- "fmax v8.4s, v8.4s, v0.4s\n"
- "fmax v9.4s, v9.4s, v0.4s\n"
- "fmax v10.4s, v10.4s, v0.4s\n"
- "fmax v11.4s, v11.4s, v0.4s\n"
- "fmax v15.4s, v15.4s, v0.4s\n"
- "fmax v20.4s, v20.4s, v0.4s\n"
- "fmax v21.4s, v21.4s, v0.4s\n"
- "fmax v22.4s, v22.4s, v0.4s\n"
- "fmax v16.4s, v16.4s, v0.4s\n"
- "fmax v17.4s, v17.4s, v0.4s\n"
- "fmax v18.4s, v18.4s, v0.4s\n"
- "fmax v19.4s, v19.4s, v0.4s\n"
- "fmax v23.4s, v23.4s, v0.4s\n"
- "fmax v28.4s, v28.4s, v0.4s\n"
- "fmax v29.4s, v29.4s, v0.4s\n"
- "fmax v30.4s, v30.4s, v0.4s\n"
- "fmax v24.4s, v24.4s, v0.4s\n"
- "fmax v25.4s, v25.4s, v0.4s\n"
- "fmax v26.4s, v26.4s, v0.4s\n"
- "fmax v27.4s, v27.4s, v0.4s\n"
- "212:" // Height 6: No activation
- "cmp x11, #0x10\n"
"bge 221f\n"
"tbz x11, #3, 216f\n"
- "st1 { v7.4s }, [x9], #0x10\n"
- "st1 { v12.4s }, [x9], #0x10\n"
- "st1 { v8.4s }, [x25], #0x10\n"
- "st1 { v9.4s }, [x25], #0x10\n"
- "st1 { v15.4s }, [x24], #0x10\n"
- "st1 { v20.4s }, [x24], #0x10\n"
- "st1 { v16.4s }, [x23], #0x10\n"
- "st1 { v17.4s }, [x23], #0x10\n"
- "st1 { v23.4s }, [x22], #0x10\n"
- "st1 { v28.4s }, [x22], #0x10\n"
- "st1 { v24.4s }, [x21], #0x10\n"
- "st1 { v25.4s }, [x21], #0x10\n"
+ "st1 { v7.4s }, [x28], #0x10\n"
+ "st1 { v12.4s }, [x28], #0x10\n"
+ "st1 { v8.4s }, [x24], #0x10\n"
+ "st1 { v9.4s }, [x24], #0x10\n"
+ "st1 { v15.4s }, [x23], #0x10\n"
+ "st1 { v20.4s }, [x23], #0x10\n"
+ "st1 { v16.4s }, [x22], #0x10\n"
+ "st1 { v17.4s }, [x22], #0x10\n"
+ "st1 { v23.4s }, [x21], #0x10\n"
+ "st1 { v28.4s }, [x21], #0x10\n"
+ "st1 { v24.4s }, [x20], #0x10\n"
+ "st1 { v25.4s }, [x20], #0x10\n"
"tbz x11, #2, 214f\n"
- "st1 { v13.4s }, [x9], #0x10\n"
- "st1 { v10.4s }, [x25], #0x10\n"
- "st1 { v21.4s }, [x24], #0x10\n"
- "st1 { v18.4s }, [x23], #0x10\n"
- "st1 { v29.4s }, [x22], #0x10\n"
- "st1 { v26.4s }, [x21], #0x10\n"
+ "st1 { v13.4s }, [x28], #0x10\n"
+ "st1 { v10.4s }, [x24], #0x10\n"
+ "st1 { v21.4s }, [x23], #0x10\n"
+ "st1 { v18.4s }, [x22], #0x10\n"
+ "st1 { v29.4s }, [x21], #0x10\n"
+ "st1 { v26.4s }, [x20], #0x10\n"
"tbz x11, #1, 213f\n"
- "str d14, [x9], #0x8\n"
- "str d11, [x25], #0x8\n"
- "str d22, [x24], #0x8\n"
- "str d19, [x23], #0x8\n"
- "str d30, [x22], #0x8\n"
- "str d27, [x21], #0x8\n"
+ "str d14, [x28], #0x8\n"
+ "str d11, [x24], #0x8\n"
+ "str d22, [x23], #0x8\n"
+ "str d19, [x22], #0x8\n"
+ "str d30, [x21], #0x8\n"
+ "str d27, [x20], #0x8\n"
"tbz x11, #0, 220f\n"
- "st1 { v14.s }[2], [x9]\n"
- "st1 { v11.s }[2], [x25]\n"
- "st1 { v22.s }[2], [x24]\n"
- "st1 { v19.s }[2], [x23]\n"
- "st1 { v30.s }[2], [x22]\n"
- "st1 { v27.s }[2], [x21]\n"
+ "st1 { v14.s }[2], [x28]\n"
+ "st1 { v11.s }[2], [x24]\n"
+ "st1 { v22.s }[2], [x23]\n"
+ "st1 { v19.s }[2], [x22]\n"
+ "st1 { v30.s }[2], [x21]\n"
+ "st1 { v27.s }[2], [x20]\n"
"b 220f\n"
"213:" // Height 6: Partial direct writeback: partial_1_12
"tbz x11, #0, 220f\n"
- "str s14, [x9, #0x0]\n"
- "str s11, [x25, #0x0]\n"
- "str s22, [x24, #0x0]\n"
- "str s19, [x23, #0x0]\n"
- "str s30, [x22, #0x0]\n"
- "str s27, [x21, #0x0]\n"
+ "str s14, [x28, #0x0]\n"
+ "str s11, [x24, #0x0]\n"
+ "str s22, [x23, #0x0]\n"
+ "str s19, [x22, #0x0]\n"
+ "str s30, [x21, #0x0]\n"
+ "str s27, [x20, #0x0]\n"
"b 220f\n"
"214:" // Height 6: Partial direct writeback: partial_2_8
"tbz x11, #1, 215f\n"
- "str d13, [x9], #0x8\n"
- "str d10, [x25], #0x8\n"
- "str d21, [x24], #0x8\n"
- "str d18, [x23], #0x8\n"
- "str d29, [x22], #0x8\n"
- "str d26, [x21], #0x8\n"
+ "str d13, [x28], #0x8\n"
+ "str d10, [x24], #0x8\n"
+ "str d21, [x23], #0x8\n"
+ "str d18, [x22], #0x8\n"
+ "str d29, [x21], #0x8\n"
+ "str d26, [x20], #0x8\n"
"tbz x11, #0, 220f\n"
- "st1 { v13.s }[2], [x9]\n"
- "st1 { v10.s }[2], [x25]\n"
- "st1 { v21.s }[2], [x24]\n"
- "st1 { v18.s }[2], [x23]\n"
- "st1 { v29.s }[2], [x22]\n"
- "st1 { v26.s }[2], [x21]\n"
+ "st1 { v13.s }[2], [x28]\n"
+ "st1 { v10.s }[2], [x24]\n"
+ "st1 { v21.s }[2], [x23]\n"
+ "st1 { v18.s }[2], [x22]\n"
+ "st1 { v29.s }[2], [x21]\n"
+ "st1 { v26.s }[2], [x20]\n"
"b 220f\n"
"215:" // Height 6: Partial direct writeback: partial_1_8
"tbz x11, #0, 220f\n"
- "str s13, [x9, #0x0]\n"
- "str s10, [x25, #0x0]\n"
- "str s21, [x24, #0x0]\n"
- "str s18, [x23, #0x0]\n"
- "str s29, [x22, #0x0]\n"
- "str s26, [x21, #0x0]\n"
+ "str s13, [x28, #0x0]\n"
+ "str s10, [x24, #0x0]\n"
+ "str s21, [x23, #0x0]\n"
+ "str s18, [x22, #0x0]\n"
+ "str s29, [x21, #0x0]\n"
+ "str s26, [x20, #0x0]\n"
"b 220f\n"
"216:" // Height 6: Partial direct writeback: partial_4_0
"tbz x11, #2, 218f\n"
- "st1 { v7.4s }, [x9], #0x10\n"
- "st1 { v8.4s }, [x25], #0x10\n"
- "st1 { v15.4s }, [x24], #0x10\n"
- "st1 { v16.4s }, [x23], #0x10\n"
- "st1 { v23.4s }, [x22], #0x10\n"
- "st1 { v24.4s }, [x21], #0x10\n"
+ "st1 { v7.4s }, [x28], #0x10\n"
+ "st1 { v8.4s }, [x24], #0x10\n"
+ "st1 { v15.4s }, [x23], #0x10\n"
+ "st1 { v16.4s }, [x22], #0x10\n"
+ "st1 { v23.4s }, [x21], #0x10\n"
+ "st1 { v24.4s }, [x20], #0x10\n"
"tbz x11, #1, 217f\n"
- "str d12, [x9], #0x8\n"
- "str d9, [x25], #0x8\n"
- "str d20, [x24], #0x8\n"
- "str d17, [x23], #0x8\n"
- "str d28, [x22], #0x8\n"
- "str d25, [x21], #0x8\n"
+ "str d12, [x28], #0x8\n"
+ "str d9, [x24], #0x8\n"
+ "str d20, [x23], #0x8\n"
+ "str d17, [x22], #0x8\n"
+ "str d28, [x21], #0x8\n"
+ "str d25, [x20], #0x8\n"
"tbz x11, #0, 220f\n"
- "st1 { v12.s }[2], [x9]\n"
- "st1 { v9.s }[2], [x25]\n"
- "st1 { v20.s }[2], [x24]\n"
- "st1 { v17.s }[2], [x23]\n"
- "st1 { v28.s }[2], [x22]\n"
- "st1 { v25.s }[2], [x21]\n"
+ "st1 { v12.s }[2], [x28]\n"
+ "st1 { v9.s }[2], [x24]\n"
+ "st1 { v20.s }[2], [x23]\n"
+ "st1 { v17.s }[2], [x22]\n"
+ "st1 { v28.s }[2], [x21]\n"
+ "st1 { v25.s }[2], [x20]\n"
"b 220f\n"
"217:" // Height 6: Partial direct writeback: partial_1_4
"tbz x11, #0, 220f\n"
- "str s12, [x9, #0x0]\n"
- "str s9, [x25, #0x0]\n"
- "str s20, [x24, #0x0]\n"
- "str s17, [x23, #0x0]\n"
- "str s28, [x22, #0x0]\n"
- "str s25, [x21, #0x0]\n"
+ "str s12, [x28, #0x0]\n"
+ "str s9, [x24, #0x0]\n"
+ "str s20, [x23, #0x0]\n"
+ "str s17, [x22, #0x0]\n"
+ "str s28, [x21, #0x0]\n"
+ "str s25, [x20, #0x0]\n"
"b 220f\n"
"218:" // Height 6: Partial direct writeback: partial_2_0
"tbz x11, #1, 219f\n"
- "str d7, [x9], #0x8\n"
- "str d8, [x25], #0x8\n"
- "str d15, [x24], #0x8\n"
- "str d16, [x23], #0x8\n"
- "str d23, [x22], #0x8\n"
- "str d24, [x21], #0x8\n"
+ "str d7, [x28], #0x8\n"
+ "str d8, [x24], #0x8\n"
+ "str d15, [x23], #0x8\n"
+ "str d16, [x22], #0x8\n"
+ "str d23, [x21], #0x8\n"
+ "str d24, [x20], #0x8\n"
"tbz x11, #0, 220f\n"
- "st1 { v7.s }[2], [x9]\n"
- "st1 { v8.s }[2], [x25]\n"
- "st1 { v15.s }[2], [x24]\n"
- "st1 { v16.s }[2], [x23]\n"
- "st1 { v23.s }[2], [x22]\n"
- "st1 { v24.s }[2], [x21]\n"
+ "st1 { v7.s }[2], [x28]\n"
+ "st1 { v8.s }[2], [x24]\n"
+ "st1 { v15.s }[2], [x23]\n"
+ "st1 { v16.s }[2], [x22]\n"
+ "st1 { v23.s }[2], [x21]\n"
+ "st1 { v24.s }[2], [x20]\n"
"b 220f\n"
"219:" // Height 6: Partial direct writeback: partial_1_0
- "str s7, [x9, #0x0]\n"
- "str s8, [x25, #0x0]\n"
- "str s15, [x24, #0x0]\n"
- "str s16, [x23, #0x0]\n"
- "str s23, [x22, #0x0]\n"
- "str s24, [x21, #0x0]\n"
+ "str s7, [x28, #0x0]\n"
+ "str s8, [x24, #0x0]\n"
+ "str s15, [x23, #0x0]\n"
+ "str s16, [x22, #0x0]\n"
+ "str s23, [x21, #0x0]\n"
+ "str s24, [x20, #0x0]\n"
"220:" // Height 6: Partial direct writeback: Done
"b 222f\n"
"221:" // Height 6: Full writeback
- "str q7, [x9, #0x0]\n"
- "str q12, [x9, #0x10]\n"
- "str q13, [x9, #0x20]\n"
- "str q14, [x9, #0x30]\n"
- "add x9, x9, #0x40\n"
- "str q8, [x25, #0x0]\n"
- "str q9, [x25, #0x10]\n"
- "str q10, [x25, #0x20]\n"
- "str q11, [x25, #0x30]\n"
- "str q15, [x24, #0x0]\n"
- "str q20, [x24, #0x10]\n"
- "str q21, [x24, #0x20]\n"
- "str q22, [x24, #0x30]\n"
- "str q16, [x23, #0x0]\n"
- "str q17, [x23, #0x10]\n"
- "str q18, [x23, #0x20]\n"
- "str q19, [x23, #0x30]\n"
- "str q23, [x22, #0x0]\n"
- "str q28, [x22, #0x10]\n"
- "str q29, [x22, #0x20]\n"
- "str q30, [x22, #0x30]\n"
- "str q24, [x21, #0x0]\n"
- "str q25, [x21, #0x10]\n"
- "str q26, [x21, #0x20]\n"
- "str q27, [x21, #0x30]\n"
+ "str q7, [x28, #0x0]\n"
+ "str q12, [x28, #0x10]\n"
+ "str q13, [x28, #0x20]\n"
+ "str q14, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
+ "str q8, [x24, #0x0]\n"
+ "str q9, [x24, #0x10]\n"
+ "str q10, [x24, #0x20]\n"
+ "str q11, [x24, #0x30]\n"
+ "str q15, [x23, #0x0]\n"
+ "str q20, [x23, #0x10]\n"
+ "str q21, [x23, #0x20]\n"
+ "str q22, [x23, #0x30]\n"
+ "str q16, [x22, #0x0]\n"
+ "str q17, [x22, #0x10]\n"
+ "str q18, [x22, #0x20]\n"
+ "str q19, [x22, #0x30]\n"
+ "str q23, [x21, #0x0]\n"
+ "str q28, [x21, #0x10]\n"
+ "str q29, [x21, #0x20]\n"
+ "str q30, [x21, #0x30]\n"
+ "str q24, [x20, #0x0]\n"
+ "str q25, [x20, #0x10]\n"
+ "str q26, [x20, #0x20]\n"
+ "str q27, [x20, #0x30]\n"
"222:" // Height 6: Writeback done
"subs x11, x11, #0x10\n"
"bgt 187b\n"
"subs %x[M], %x[M], #0x6\n"
"beq 224f\n"
- "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 223f\n"
- "add x21, x21, #0x6\n"
- "str x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "add x20, x20, #0x6\n"
+ "str x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"b 1b\n"
"223:" // Update direct input
- "mov x20, #0xc\n"
- "madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
+ "mov x19, #0xc\n"
+ "madd %x[input_ptr], x19, x20, %x[input_ptr]\n"
"b 1b\n"
"224:" // Exit
: [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
: [args_ptr] "r" (&ka), [bias] "r" (bias), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp16_mla_6x32/a55.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp16_mla_6x32/a55.cpp
index 19636548a0..9157d29eba 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp16_mla_6x32/a55.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp16_mla_6x32/a55.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#if defined(__aarch64__) && (defined(FP16_KERNELS) || defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC))
@@ -101,138 +101,138 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"cmp %x[M], #0x2\n"
"bgt 99f\n"
"beq 50f\n"
- "mov x7, %x[bias]\n"
"ldr x8, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x17, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x16, %x[output_ptr]\n"
+ "mov x16, %x[bias]\n"
+ "mov x15, %x[output_ptr]\n"
"2:" // Height 1: Column loop
- "cbz x7, 3f\n"
- "ldr q8, [x7, #0x0]\n"
- "ldr q9, [x7, #0x10]\n"
- "ldr q10, [x7, #0x20]\n"
- "ldr q11, [x7, #0x30]\n"
- "add x7, x7, #0x40\n"
+ "cbz x16, 3f\n"
+ "ldr q8, [x16, #0x0]\n"
+ "ldr q9, [x16, #0x10]\n"
+ "ldr q10, [x16, #0x20]\n"
+ "ldr q11, [x16, #0x30]\n"
+ "add x16, x16, #0x40\n"
"b 22f\n"
"3:" // Height 1: no bias
"tbz %x[flags], #0, 21f\n"
"cmp x8, #0x20\n"
"bge 20f\n"
"tbz x8, #4, 11f\n"
- "ld1 { v8.8h }, [x16], #0x10\n"
- "ld1 { v9.8h }, [x16], #0x10\n"
+ "ld1 { v8.8h }, [x15], #0x10\n"
+ "ld1 { v9.8h }, [x15], #0x10\n"
"tbz x8, #3, 7f\n"
- "ld1 { v10.8h }, [x16], #0x10\n"
+ "ld1 { v10.8h }, [x15], #0x10\n"
"tbz x8, #2, 5f\n"
- "ldr d11, [x16], #0x8\n"
+ "ldr d11, [x15], #0x8\n"
"tbz x8, #1, 4f\n"
- "ld1 { v11.s }[2], [x16], #0x4\n"
- "mov x20, #0x3c\n"
+ "mov x19, #0x3c\n"
+ "ld1 { v11.s }[2], [x15], #0x4\n"
"tbz x8, #0, 19f\n"
- "ld1 { v11.h }[6], [x16]\n"
+ "ld1 { v11.h }[6], [x15]\n"
"b 19f\n"
"4:" // Height 1: Partial accumulate: partial_1_28
- "mov x20, #0x38\n"
+ "mov x19, #0x38\n"
"tbz x8, #0, 19f\n"
- "ld1 { v11.h }[4], [x16]\n"
+ "ld1 { v11.h }[4], [x15]\n"
"b 19f\n"
"5:" // Height 1: Partial accumulate: partial_2_24
"tbz x8, #1, 6f\n"
- "ldr s11, [x16], #0x4\n"
- "mov x20, #0x34\n"
+ "ldr s11, [x15], #0x4\n"
+ "mov x19, #0x34\n"
"tbz x8, #0, 19f\n"
- "ld1 { v11.h }[2], [x16]\n"
+ "ld1 { v11.h }[2], [x15]\n"
"b 19f\n"
"6:" // Height 1: Partial accumulate: partial_1_24
- "mov x20, #0x30\n"
+ "mov x19, #0x30\n"
"tbz x8, #0, 19f\n"
- "ldr h11, [x16, #0x0]\n"
+ "ldr h11, [x15, #0x0]\n"
"b 19f\n"
"7:" // Height 1: Partial accumulate: partial_4_16
"tbz x8, #2, 9f\n"
- "ldr d10, [x16], #0x8\n"
+ "ldr d10, [x15], #0x8\n"
"tbz x8, #1, 8f\n"
- "ld1 { v10.s }[2], [x16], #0x4\n"
- "mov x20, #0x2c\n"
+ "mov x19, #0x2c\n"
+ "ld1 { v10.s }[2], [x15], #0x4\n"
"tbz x8, #0, 19f\n"
- "ld1 { v10.h }[6], [x16]\n"
+ "ld1 { v10.h }[6], [x15]\n"
"b 19f\n"
"8:" // Height 1: Partial accumulate: partial_1_20
- "mov x20, #0x28\n"
+ "mov x19, #0x28\n"
"tbz x8, #0, 19f\n"
- "ld1 { v10.h }[4], [x16]\n"
+ "ld1 { v10.h }[4], [x15]\n"
"b 19f\n"
"9:" // Height 1: Partial accumulate: partial_2_16
"tbz x8, #1, 10f\n"
- "ldr s10, [x16], #0x4\n"
- "mov x20, #0x24\n"
+ "ldr s10, [x15], #0x4\n"
+ "mov x19, #0x24\n"
"tbz x8, #0, 19f\n"
- "ld1 { v10.h }[2], [x16]\n"
+ "ld1 { v10.h }[2], [x15]\n"
"b 19f\n"
"10:" // Height 1: Partial accumulate: partial_1_16
- "mov x20, #0x20\n"
+ "mov x19, #0x20\n"
"tbz x8, #0, 19f\n"
- "ldr h10, [x16, #0x0]\n"
+ "ldr h10, [x15, #0x0]\n"
"b 19f\n"
"11:" // Height 1: Partial accumulate: partial_8_0
"tbz x8, #3, 15f\n"
- "ld1 { v8.8h }, [x16], #0x10\n"
+ "ld1 { v8.8h }, [x15], #0x10\n"
"tbz x8, #2, 13f\n"
- "ldr d9, [x16], #0x8\n"
+ "ldr d9, [x15], #0x8\n"
"tbz x8, #1, 12f\n"
- "ld1 { v9.s }[2], [x16], #0x4\n"
- "mov x20, #0x1c\n"
+ "mov x19, #0x1c\n"
+ "ld1 { v9.s }[2], [x15], #0x4\n"
"tbz x8, #0, 19f\n"
- "ld1 { v9.h }[6], [x16]\n"
+ "ld1 { v9.h }[6], [x15]\n"
"b 19f\n"
"12:" // Height 1: Partial accumulate: partial_1_12
- "mov x20, #0x18\n"
+ "mov x19, #0x18\n"
"tbz x8, #0, 19f\n"
- "ld1 { v9.h }[4], [x16]\n"
+ "ld1 { v9.h }[4], [x15]\n"
"b 19f\n"
"13:" // Height 1: Partial accumulate: partial_2_8
"tbz x8, #1, 14f\n"
- "ldr s9, [x16], #0x4\n"
- "mov x20, #0x14\n"
+ "ldr s9, [x15], #0x4\n"
+ "mov x19, #0x14\n"
"tbz x8, #0, 19f\n"
- "ld1 { v9.h }[2], [x16]\n"
+ "ld1 { v9.h }[2], [x15]\n"
"b 19f\n"
"14:" // Height 1: Partial accumulate: partial_1_8
- "mov x20, #0x10\n"
+ "mov x19, #0x10\n"
"tbz x8, #0, 19f\n"
- "ldr h9, [x16, #0x0]\n"
+ "ldr h9, [x15, #0x0]\n"
"b 19f\n"
"15:" // Height 1: Partial accumulate: partial_4_0
"tbz x8, #2, 17f\n"
- "ldr d8, [x16], #0x8\n"
+ "ldr d8, [x15], #0x8\n"
"tbz x8, #1, 16f\n"
- "ld1 { v8.s }[2], [x16], #0x4\n"
- "mov x20, #0xc\n"
+ "mov x19, #0xc\n"
+ "ld1 { v8.s }[2], [x15], #0x4\n"
"tbz x8, #0, 19f\n"
- "ld1 { v8.h }[6], [x16]\n"
+ "ld1 { v8.h }[6], [x15]\n"
"b 19f\n"
"16:" // Height 1: Partial accumulate: partial_1_4
- "mov x20, #0x8\n"
+ "mov x19, #0x8\n"
"tbz x8, #0, 19f\n"
- "ld1 { v8.h }[4], [x16]\n"
+ "ld1 { v8.h }[4], [x15]\n"
"b 19f\n"
"17:" // Height 1: Partial accumulate: partial_2_0
"tbz x8, #1, 18f\n"
- "ldr s8, [x16], #0x4\n"
- "mov x20, #0x4\n"
+ "ldr s8, [x15], #0x4\n"
+ "mov x19, #0x4\n"
"tbz x8, #0, 19f\n"
- "ld1 { v8.h }[2], [x16]\n"
+ "ld1 { v8.h }[2], [x15]\n"
"b 19f\n"
"18:" // Height 1: Partial accumulate: partial_1_0
- "ldr h8, [x16, #0x0]\n"
- "mov x20, #0x0\n"
+ "ldr h8, [x15, #0x0]\n"
+ "mov x19, #0x0\n"
"19:" // Height 1: Partial accumulate: Done
- "sub x16, x16, x20\n"
+ "sub x15, x15, x19\n"
"b 22f\n"
"20:" // Height 1: full accumulate
- "ldr q8, [x16, #0x0]\n"
- "ldr q9, [x16, #0x10]\n"
- "ldr q10, [x16, #0x20]\n"
- "ldr q11, [x16, #0x30]\n"
+ "ldr q8, [x15, #0x0]\n"
+ "ldr q9, [x15, #0x10]\n"
+ "ldr q10, [x15, #0x20]\n"
+ "ldr q11, [x15, #0x30]\n"
"b 22f\n"
"21:" // Height 1: no accumulate
"movi v8.16b, #0x0\n"
@@ -240,173 +240,176 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"movi v10.16b, #0x0\n"
"movi v11.16b, #0x0\n"
"22:" // Height 1: setup done
- "mov x15, #0x0\n"
+ "mov x14, #0x0\n"
"23:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w14, [x20, x15, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w13, [x20, x14, LSL #0x2]\n"
"tbz %x[flags], #3, 24f\n"
- "ldr x21, [%x[input_ptr], x15, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x13, [x21, #0x0]\n"
- "cbnz x15, 25f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x13, x13, x20, LSL #1\n"
+ "ldr x20, [%x[input_ptr], x14, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x12, [x20, #0x0]\n"
+ "cbnz x14, 25f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x12, x12, x19, LSL #1\n"
"b 25f\n"
"24:" // Height 1: setup direct input
- "mov x13, %x[input_ptr]\n"
+ "mov x12, %x[input_ptr]\n"
"25:" // Height 1: input setup done
- "cmp x14, #0x8\n"
+ "cmp x13, #0x8\n"
"blt 28f\n"
- "ldr q0, [x13, #0x0]\n"
- "cmp x14, #0x10\n"
+ "ldr q0, [x12, #0x0]\n"
"ldr q6, [x17, #0x0]\n"
- "ldr q7, [x17, #0x10]\n"
+ "cmp x13, #0x10\n"
"blt 27f\n"
"26:" // Height 1: Multiply loop: Main loop head
"fmla v8.8h, v6.8h, v0.h[0]\n"
+ "ldr d7, [x17, #0x10]\n"
+ "ldr x11, [x17, #0x18]\n"
+ "add x12, x12, #0x10\n"
"ldr d6, [x17, #0x20]\n"
- "ldr x12, [x17, #0x28]\n"
+ "sub x13, x13, #0x8\n"
+ "ldr x10, [x17, #0x28]\n"
+ "cmp x13, #0x10\n"
+ "mov v7.d[1], x11\n"
+ "prfm pldl1keep, [x12, #0x80]\n"
+ "ldr x11, [x17, #0x38]\n"
"fmla v9.8h, v7.8h, v0.h[0]\n"
+ "mov v6.d[1], x10\n"
"ldr d7, [x17, #0x30]\n"
- "mov v6.d[1], x12\n"
- "ldr x11, [x17, #0x38]\n"
- "mov v7.d[1], x11\n"
"fmla v10.8h, v6.8h, v0.h[0]\n"
"ldr d6, [x17, #0x40]\n"
- "ldr x12, [x17, #0x48]\n"
+ "ldr x10, [x17, #0x48]\n"
+ "mov v7.d[1], x11\n"
+ "ldr x11, [x17, #0x58]\n"
+ "ldr x9, [x12, #0x8]\n"
"fmla v11.8h, v7.8h, v0.h[0]\n"
+ "mov v6.d[1], x10\n"
"ldr d7, [x17, #0x50]\n"
- "mov v6.d[1], x12\n"
- "ldr x11, [x17, #0x58]\n"
- "mov v7.d[1], x11\n"
"fmla v8.8h, v6.8h, v0.h[1]\n"
"ldr d6, [x17, #0x60]\n"
- "ldr x12, [x17, #0x68]\n"
+ "ldr x10, [x17, #0x68]\n"
+ "mov v7.d[1], x11\n"
+ "ldr x11, [x17, #0x78]\n"
"fmla v9.8h, v7.8h, v0.h[1]\n"
+ "mov v6.d[1], x10\n"
"ldr d7, [x17, #0x70]\n"
- "mov v6.d[1], x12\n"
- "ldr x11, [x17, #0x78]\n"
- "mov v7.d[1], x11\n"
"fmla v10.8h, v6.8h, v0.h[1]\n"
"ldr d6, [x17, #0x80]\n"
- "ldr x12, [x17, #0x88]\n"
+ "ldr x10, [x17, #0x88]\n"
+ "mov v7.d[1], x11\n"
+ "ldr x11, [x17, #0x98]\n"
"fmla v11.8h, v7.8h, v0.h[1]\n"
+ "mov v6.d[1], x10\n"
"ldr d7, [x17, #0x90]\n"
- "mov v6.d[1], x12\n"
- "ldr x11, [x17, #0x98]\n"
- "mov v7.d[1], x11\n"
"fmla v8.8h, v6.8h, v0.h[2]\n"
"ldr d6, [x17, #0xa0]\n"
- "ldr x12, [x17, #0xa8]\n"
+ "ldr x10, [x17, #0xa8]\n"
+ "mov v7.d[1], x11\n"
+ "ldr x11, [x17, #0xb8]\n"
"fmla v9.8h, v7.8h, v0.h[2]\n"
+ "mov v6.d[1], x10\n"
"ldr d7, [x17, #0xb0]\n"
- "mov v6.d[1], x12\n"
- "ldr x11, [x17, #0xb8]\n"
- "mov v7.d[1], x11\n"
"fmla v10.8h, v6.8h, v0.h[2]\n"
"ldr d6, [x17, #0xc0]\n"
- "ldr x12, [x17, #0xc8]\n"
+ "ldr x10, [x17, #0xc8]\n"
+ "mov v7.d[1], x11\n"
+ "ldr x11, [x17, #0xd8]\n"
"fmla v11.8h, v7.8h, v0.h[2]\n"
+ "mov v6.d[1], x10\n"
"ldr d7, [x17, #0xd0]\n"
- "mov v6.d[1], x12\n"
- "ldr x11, [x17, #0xd8]\n"
- "mov v7.d[1], x11\n"
"fmla v8.8h, v6.8h, v0.h[3]\n"
"ldr d6, [x17, #0xe0]\n"
- "ldr x12, [x17, #0xe8]\n"
+ "ldr x10, [x17, #0xe8]\n"
+ "mov v7.d[1], x11\n"
+ "ldr x11, [x17, #0xf8]\n"
"fmla v9.8h, v7.8h, v0.h[3]\n"
+ "mov v6.d[1], x10\n"
"ldr d7, [x17, #0xf0]\n"
- "mov v6.d[1], x12\n"
- "ldr x11, [x17, #0xf8]\n"
- "mov v7.d[1], x11\n"
"fmla v10.8h, v6.8h, v0.h[3]\n"
"ldr d6, [x17, #0x100]\n"
- "ldr x12, [x17, #0x108]\n"
+ "ldr x10, [x17, #0x108]\n"
+ "mov v7.d[1], x11\n"
+ "ldr x11, [x17, #0x118]\n"
"fmla v11.8h, v7.8h, v0.h[3]\n"
+ "mov v6.d[1], x10\n"
"ldr d7, [x17, #0x110]\n"
- "mov v6.d[1], x12\n"
- "ldr x11, [x17, #0x118]\n"
- "mov v7.d[1], x11\n"
"fmla v8.8h, v6.8h, v0.h[4]\n"
"ldr d6, [x17, #0x120]\n"
- "ldr x12, [x17, #0x128]\n"
+ "ldr x10, [x17, #0x128]\n"
+ "mov v7.d[1], x11\n"
+ "ldr x11, [x17, #0x138]\n"
"fmla v9.8h, v7.8h, v0.h[4]\n"
+ "mov v6.d[1], x10\n"
"ldr d7, [x17, #0x130]\n"
- "mov v6.d[1], x12\n"
- "ldr x11, [x17, #0x138]\n"
- "mov v7.d[1], x11\n"
"fmla v10.8h, v6.8h, v0.h[4]\n"
"ldr d6, [x17, #0x140]\n"
- "ldr x12, [x17, #0x148]\n"
+ "ldr x10, [x17, #0x148]\n"
+ "mov v7.d[1], x11\n"
+ "ldr x11, [x17, #0x158]\n"
"fmla v11.8h, v7.8h, v0.h[4]\n"
+ "mov v6.d[1], x10\n"
"ldr d7, [x17, #0x150]\n"
- "mov v6.d[1], x12\n"
- "ldr x11, [x17, #0x158]\n"
- "mov v7.d[1], x11\n"
"fmla v8.8h, v6.8h, v0.h[5]\n"
"ldr d6, [x17, #0x160]\n"
- "ldr x12, [x17, #0x168]\n"
+ "ldr x10, [x17, #0x168]\n"
+ "mov v7.d[1], x11\n"
+ "ldr x11, [x17, #0x178]\n"
"fmla v9.8h, v7.8h, v0.h[5]\n"
+ "mov v6.d[1], x10\n"
"ldr d7, [x17, #0x170]\n"
- "mov v6.d[1], x12\n"
- "ldr x11, [x17, #0x178]\n"
- "mov v7.d[1], x11\n"
"fmla v10.8h, v6.8h, v0.h[5]\n"
"ldr d6, [x17, #0x180]\n"
- "ldr x12, [x17, #0x188]\n"
+ "ldr x10, [x17, #0x188]\n"
+ "mov v7.d[1], x11\n"
+ "ldr x11, [x17, #0x198]\n"
"fmla v11.8h, v7.8h, v0.h[5]\n"
+ "mov v6.d[1], x10\n"
"ldr d7, [x17, #0x190]\n"
- "mov v6.d[1], x12\n"
- "ldr x11, [x17, #0x198]\n"
- "mov v7.d[1], x11\n"
"fmla v8.8h, v6.8h, v0.h[6]\n"
"ldr d6, [x17, #0x1a0]\n"
- "ldr x12, [x17, #0x1a8]\n"
+ "ldr x10, [x17, #0x1a8]\n"
+ "mov v7.d[1], x11\n"
+ "ldr x11, [x17, #0x1b8]\n"
"fmla v9.8h, v7.8h, v0.h[6]\n"
+ "mov v6.d[1], x10\n"
"ldr d7, [x17, #0x1b0]\n"
- "mov v6.d[1], x12\n"
- "ldr x11, [x17, #0x1b8]\n"
- "mov v7.d[1], x11\n"
"fmla v10.8h, v6.8h, v0.h[6]\n"
"ldr d6, [x17, #0x1c0]\n"
- "ldr x12, [x17, #0x1c8]\n"
+ "ldr x10, [x17, #0x1c8]\n"
+ "mov v7.d[1], x11\n"
+ "ldr x11, [x17, #0x1d8]\n"
"fmla v11.8h, v7.8h, v0.h[6]\n"
+ "mov v6.d[1], x10\n"
"ldr d7, [x17, #0x1d0]\n"
- "mov v6.d[1], x12\n"
- "ldr x11, [x17, #0x1d8]\n"
- "mov v7.d[1], x11\n"
"fmla v8.8h, v6.8h, v0.h[7]\n"
"ldr d6, [x17, #0x1e0]\n"
- "ldr x12, [x17, #0x1e8]\n"
+ "ldr x10, [x17, #0x1e8]\n"
+ "mov v7.d[1], x11\n"
+ "ldr x11, [x17, #0x1f8]\n"
"fmla v9.8h, v7.8h, v0.h[7]\n"
+ "mov v6.d[1], x10\n"
"ldr d7, [x17, #0x1f0]\n"
- "mov v6.d[1], x12\n"
- "ldr x11, [x17, #0x1f8]\n"
- "mov v7.d[1], x11\n"
- "add x13, x13, #0x10\n"
"add x17, x17, #0x200\n"
"fmla v10.8h, v6.8h, v0.h[7]\n"
"ldr d6, [x17, #0x0]\n"
- "ldr x12, [x17, #0x8]\n"
- "fmla v11.8h, v7.8h, v0.h[7]\n"
- "ldr d0, [x13, #0x0]\n"
- "sub x14, x14, #0x8\n"
- "ldr d7, [x17, #0x10]\n"
- "cmp x14, #0x10\n"
- "ldr x10, [x13, #0x8]\n"
- "mov v6.d[1], x12\n"
- "ldr x11, [x17, #0x18]\n"
- "mov v0.d[1], x10\n"
+ "ldr x10, [x17, #0x8]\n"
"mov v7.d[1], x11\n"
- "prfm pldl1keep, [x13, #0x80]\n"
+ "fmla v11.8h, v7.8h, v0.h[7]\n"
+ "mov v6.d[1], x10\n"
+ "ldr d0, [x12, #0x0]\n"
+ "mov v0.d[1], x9\n"
"bge 26b\n"
"27:" // Height 1: Multiply loop: Single iteration only
"fmla v8.8h, v6.8h, v0.h[0]\n"
+ "ldr q7, [x17, #0x10]\n"
"ldr q6, [x17, #0x20]\n"
+ "sub x13, x13, #0x8\n"
+ "add x12, x12, #0x10\n"
"fmla v9.8h, v7.8h, v0.h[0]\n"
- "ldr q7, [x17, #0x30]\n"
+ "prfm pldl1keep, [x12, #0x80]\n"
"fmla v10.8h, v6.8h, v0.h[0]\n"
+ "ldr q7, [x17, #0x30]\n"
"ldr q6, [x17, #0x40]\n"
"fmla v11.8h, v7.8h, v0.h[0]\n"
"ldr q7, [x17, #0x50]\n"
@@ -462,324 +465,321 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"ldr q6, [x17, #0x1e0]\n"
"fmla v9.8h, v7.8h, v0.h[7]\n"
"ldr q7, [x17, #0x1f0]\n"
- "add x13, x13, #0x10\n"
- "sub x14, x14, #0x8\n"
"fmla v10.8h, v6.8h, v0.h[7]\n"
- "prfm pldl1keep, [x13, #0x80]\n"
- "fmla v11.8h, v7.8h, v0.h[7]\n"
"add x17, x17, #0x200\n"
+ "fmla v11.8h, v7.8h, v0.h[7]\n"
"28:" // Height 1: Multiply loop: Main loop skip
- "cbz x14, 30f\n"
+ "cbz x13, 30f\n"
"29:" // Height 1: Multiply loop: Odd block loop
- "ldr h0, [x13], #0x2\n"
- "sub x14, x14, #0x1\n"
+ "ldr h0, [x12], #0x2\n"
+ "sub x13, x13, #0x1\n"
"ldr q6, [x17, #0x0]\n"
- "fmla v8.8h, v6.8h, v0.h[0]\n"
"ldr q7, [x17, #0x10]\n"
- "fmla v9.8h, v7.8h, v0.h[0]\n"
+ "fmla v8.8h, v6.8h, v0.h[0]\n"
"ldr q6, [x17, #0x20]\n"
- "fmla v10.8h, v6.8h, v0.h[0]\n"
+ "fmla v9.8h, v7.8h, v0.h[0]\n"
"ldr q7, [x17, #0x30]\n"
- "fmla v11.8h, v7.8h, v0.h[0]\n"
"add x17, x17, #0x40\n"
- "cbnz x14, 29b\n"
+ "fmla v10.8h, v6.8h, v0.h[0]\n"
+ "fmla v11.8h, v7.8h, v0.h[0]\n"
+ "cbnz x13, 29b\n"
"30:" // Height 1: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x15, x15, #0x1\n"
- "cmp x15, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x14, x14, #0x1\n"
+ "cmp x14, x19\n"
"bne 23b\n"
- "prfm pstl1keep, [x16, #0x0]\n"
+ "prfm pstl1keep, [x15, #0x0]\n"
"tbz %x[flags], #1, 31f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v0.8h }, [x20]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v1.8h }, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v0.8h }, [x19]\n"
"fmin v8.8h, v8.8h, v0.8h\n"
"fmin v9.8h, v9.8h, v0.8h\n"
"fmin v10.8h, v10.8h, v0.8h\n"
"fmin v11.8h, v11.8h, v0.8h\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1r { v0.8h }, [x20]\n"
- "fmax v8.8h, v8.8h, v0.8h\n"
- "fmax v9.8h, v9.8h, v0.8h\n"
- "fmax v10.8h, v10.8h, v0.8h\n"
- "fmax v11.8h, v11.8h, v0.8h\n"
+ "fmax v8.8h, v8.8h, v1.8h\n"
+ "fmax v9.8h, v9.8h, v1.8h\n"
+ "fmax v10.8h, v10.8h, v1.8h\n"
+ "fmax v11.8h, v11.8h, v1.8h\n"
"31:" // Height 1: No activation
"cmp x8, #0x20\n"
"bge 48f\n"
"tbz x8, #4, 39f\n"
- "st1 { v8.8h }, [x16], #0x10\n"
- "st1 { v9.8h }, [x16], #0x10\n"
+ "st1 { v8.8h }, [x15], #0x10\n"
+ "st1 { v9.8h }, [x15], #0x10\n"
"tbz x8, #3, 35f\n"
- "st1 { v10.8h }, [x16], #0x10\n"
+ "st1 { v10.8h }, [x15], #0x10\n"
"tbz x8, #2, 33f\n"
- "str d11, [x16], #0x8\n"
+ "str d11, [x15], #0x8\n"
"tbz x8, #1, 32f\n"
- "st1 { v11.s }[2], [x16], #0x4\n"
+ "st1 { v11.s }[2], [x15], #0x4\n"
"tbz x8, #0, 47f\n"
- "st1 { v11.h }[6], [x16]\n"
+ "st1 { v11.h }[6], [x15]\n"
"b 47f\n"
"32:" // Height 1: Partial direct writeback: partial_1_28
"tbz x8, #0, 47f\n"
- "st1 { v11.h }[4], [x16]\n"
+ "st1 { v11.h }[4], [x15]\n"
"b 47f\n"
"33:" // Height 1: Partial direct writeback: partial_2_24
"tbz x8, #1, 34f\n"
- "str s11, [x16], #0x4\n"
+ "str s11, [x15], #0x4\n"
"tbz x8, #0, 47f\n"
- "st1 { v11.h }[2], [x16]\n"
+ "st1 { v11.h }[2], [x15]\n"
"b 47f\n"
"34:" // Height 1: Partial direct writeback: partial_1_24
"tbz x8, #0, 47f\n"
- "str h11, [x16, #0x0]\n"
+ "str h11, [x15, #0x0]\n"
"b 47f\n"
"35:" // Height 1: Partial direct writeback: partial_4_16
"tbz x8, #2, 37f\n"
- "str d10, [x16], #0x8\n"
+ "str d10, [x15], #0x8\n"
"tbz x8, #1, 36f\n"
- "st1 { v10.s }[2], [x16], #0x4\n"
+ "st1 { v10.s }[2], [x15], #0x4\n"
"tbz x8, #0, 47f\n"
- "st1 { v10.h }[6], [x16]\n"
+ "st1 { v10.h }[6], [x15]\n"
"b 47f\n"
"36:" // Height 1: Partial direct writeback: partial_1_20
"tbz x8, #0, 47f\n"
- "st1 { v10.h }[4], [x16]\n"
+ "st1 { v10.h }[4], [x15]\n"
"b 47f\n"
"37:" // Height 1: Partial direct writeback: partial_2_16
"tbz x8, #1, 38f\n"
- "str s10, [x16], #0x4\n"
+ "str s10, [x15], #0x4\n"
"tbz x8, #0, 47f\n"
- "st1 { v10.h }[2], [x16]\n"
+ "st1 { v10.h }[2], [x15]\n"
"b 47f\n"
"38:" // Height 1: Partial direct writeback: partial_1_16
"tbz x8, #0, 47f\n"
- "str h10, [x16, #0x0]\n"
+ "str h10, [x15, #0x0]\n"
"b 47f\n"
"39:" // Height 1: Partial direct writeback: partial_8_0
"tbz x8, #3, 43f\n"
- "st1 { v8.8h }, [x16], #0x10\n"
+ "st1 { v8.8h }, [x15], #0x10\n"
"tbz x8, #2, 41f\n"
- "str d9, [x16], #0x8\n"
+ "str d9, [x15], #0x8\n"
"tbz x8, #1, 40f\n"
- "st1 { v9.s }[2], [x16], #0x4\n"
+ "st1 { v9.s }[2], [x15], #0x4\n"
"tbz x8, #0, 47f\n"
- "st1 { v9.h }[6], [x16]\n"
+ "st1 { v9.h }[6], [x15]\n"
"b 47f\n"
"40:" // Height 1: Partial direct writeback: partial_1_12
"tbz x8, #0, 47f\n"
- "st1 { v9.h }[4], [x16]\n"
+ "st1 { v9.h }[4], [x15]\n"
"b 47f\n"
"41:" // Height 1: Partial direct writeback: partial_2_8
"tbz x8, #1, 42f\n"
- "str s9, [x16], #0x4\n"
+ "str s9, [x15], #0x4\n"
"tbz x8, #0, 47f\n"
- "st1 { v9.h }[2], [x16]\n"
+ "st1 { v9.h }[2], [x15]\n"
"b 47f\n"
"42:" // Height 1: Partial direct writeback: partial_1_8
"tbz x8, #0, 47f\n"
- "str h9, [x16, #0x0]\n"
+ "str h9, [x15, #0x0]\n"
"b 47f\n"
"43:" // Height 1: Partial direct writeback: partial_4_0
"tbz x8, #2, 45f\n"
- "str d8, [x16], #0x8\n"
+ "str d8, [x15], #0x8\n"
"tbz x8, #1, 44f\n"
- "st1 { v8.s }[2], [x16], #0x4\n"
+ "st1 { v8.s }[2], [x15], #0x4\n"
"tbz x8, #0, 47f\n"
- "st1 { v8.h }[6], [x16]\n"
+ "st1 { v8.h }[6], [x15]\n"
"b 47f\n"
"44:" // Height 1: Partial direct writeback: partial_1_4
"tbz x8, #0, 47f\n"
- "st1 { v8.h }[4], [x16]\n"
+ "st1 { v8.h }[4], [x15]\n"
"b 47f\n"
"45:" // Height 1: Partial direct writeback: partial_2_0
"tbz x8, #1, 46f\n"
- "str s8, [x16], #0x4\n"
+ "str s8, [x15], #0x4\n"
"tbz x8, #0, 47f\n"
- "st1 { v8.h }[2], [x16]\n"
+ "st1 { v8.h }[2], [x15]\n"
"b 47f\n"
"46:" // Height 1: Partial direct writeback: partial_1_0
- "str h8, [x16, #0x0]\n"
+ "str h8, [x15, #0x0]\n"
"47:" // Height 1: Partial direct writeback: Done
"b 49f\n"
"48:" // Height 1: Full writeback
- "str q8, [x16, #0x0]\n"
- "str q9, [x16, #0x10]\n"
- "str q10, [x16, #0x20]\n"
- "str q11, [x16, #0x30]\n"
- "add x16, x16, #0x40\n"
+ "str q8, [x15, #0x0]\n"
+ "str q9, [x15, #0x10]\n"
+ "str q10, [x15, #0x20]\n"
+ "str q11, [x15, #0x30]\n"
+ "add x15, x15, #0x40\n"
"49:" // Height 1: Writeback done
"subs x8, x8, #0x20\n"
"bgt 2b\n"
"b 296f\n"
"50:" // Height 2
- "mov x7, %x[bias]\n"
"ldr x8, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x16, %x[bias]\n"
"ldr x17, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x16, %x[output_ptr]\n"
+ "mov x15, %x[output_ptr]\n"
"51:" // Height 2: Column loop
- "cbz x7, 52f\n"
- "ldr q8, [x7, #0x0]\n"
+ "cbz x16, 52f\n"
+ "ldr q8, [x16, #0x0]\n"
+ "ldr q9, [x16, #0x10]\n"
+ "ldr q10, [x16, #0x20]\n"
"mov v12.16b, v8.16b\n"
- "ldr q9, [x7, #0x10]\n"
+ "ldr q11, [x16, #0x30]\n"
"mov v13.16b, v9.16b\n"
- "ldr q10, [x7, #0x20]\n"
+ "add x16, x16, #0x40\n"
"mov v14.16b, v10.16b\n"
- "ldr q11, [x7, #0x30]\n"
"mov v15.16b, v11.16b\n"
- "add x7, x7, #0x40\n"
"b 71f\n"
"52:" // Height 2: no bias
"tbz %x[flags], #0, 70f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"cmp x8, #0x20\n"
- "add x25, x16, x20, LSL #1\n"
+ "add x25, x15, x19, LSL #1\n"
"bge 69f\n"
"tbz x8, #4, 60f\n"
- "ld1 { v8.8h }, [x16], #0x10\n"
+ "ld1 { v8.8h }, [x15], #0x10\n"
"ld1 { v12.8h }, [x25], #0x10\n"
- "ld1 { v9.8h }, [x16], #0x10\n"
+ "ld1 { v9.8h }, [x15], #0x10\n"
"ld1 { v13.8h }, [x25], #0x10\n"
"tbz x8, #3, 56f\n"
- "ld1 { v10.8h }, [x16], #0x10\n"
+ "ld1 { v10.8h }, [x15], #0x10\n"
"ld1 { v14.8h }, [x25], #0x10\n"
"tbz x8, #2, 54f\n"
- "ldr d11, [x16], #0x8\n"
+ "ldr d11, [x15], #0x8\n"
"ldr d15, [x25], #0x8\n"
"tbz x8, #1, 53f\n"
- "ld1 { v11.s }[2], [x16], #0x4\n"
- "mov x20, #0x3c\n"
+ "mov x19, #0x3c\n"
+ "ld1 { v11.s }[2], [x15], #0x4\n"
"ld1 { v15.s }[2], [x25], #0x4\n"
"tbz x8, #0, 68f\n"
- "ld1 { v11.h }[6], [x16]\n"
+ "ld1 { v11.h }[6], [x15]\n"
"ld1 { v15.h }[6], [x25]\n"
"b 68f\n"
"53:" // Height 2: Partial accumulate: partial_1_28
- "mov x20, #0x38\n"
+ "mov x19, #0x38\n"
"tbz x8, #0, 68f\n"
- "ld1 { v11.h }[4], [x16]\n"
+ "ld1 { v11.h }[4], [x15]\n"
"ld1 { v15.h }[4], [x25]\n"
"b 68f\n"
"54:" // Height 2: Partial accumulate: partial_2_24
"tbz x8, #1, 55f\n"
- "ldr s11, [x16], #0x4\n"
- "mov x20, #0x34\n"
+ "ldr s11, [x15], #0x4\n"
"ldr s15, [x25], #0x4\n"
+ "mov x19, #0x34\n"
"tbz x8, #0, 68f\n"
- "ld1 { v11.h }[2], [x16]\n"
+ "ld1 { v11.h }[2], [x15]\n"
"ld1 { v15.h }[2], [x25]\n"
"b 68f\n"
"55:" // Height 2: Partial accumulate: partial_1_24
- "mov x20, #0x30\n"
+ "mov x19, #0x30\n"
"tbz x8, #0, 68f\n"
- "ldr h11, [x16, #0x0]\n"
+ "ldr h11, [x15, #0x0]\n"
"ldr h15, [x25, #0x0]\n"
"b 68f\n"
"56:" // Height 2: Partial accumulate: partial_4_16
"tbz x8, #2, 58f\n"
- "ldr d10, [x16], #0x8\n"
+ "ldr d10, [x15], #0x8\n"
"ldr d14, [x25], #0x8\n"
"tbz x8, #1, 57f\n"
- "ld1 { v10.s }[2], [x16], #0x4\n"
- "mov x20, #0x2c\n"
+ "mov x19, #0x2c\n"
+ "ld1 { v10.s }[2], [x15], #0x4\n"
"ld1 { v14.s }[2], [x25], #0x4\n"
"tbz x8, #0, 68f\n"
- "ld1 { v10.h }[6], [x16]\n"
+ "ld1 { v10.h }[6], [x15]\n"
"ld1 { v14.h }[6], [x25]\n"
"b 68f\n"
"57:" // Height 2: Partial accumulate: partial_1_20
- "mov x20, #0x28\n"
+ "mov x19, #0x28\n"
"tbz x8, #0, 68f\n"
- "ld1 { v10.h }[4], [x16]\n"
+ "ld1 { v10.h }[4], [x15]\n"
"ld1 { v14.h }[4], [x25]\n"
"b 68f\n"
"58:" // Height 2: Partial accumulate: partial_2_16
"tbz x8, #1, 59f\n"
- "ldr s10, [x16], #0x4\n"
- "mov x20, #0x24\n"
+ "ldr s10, [x15], #0x4\n"
"ldr s14, [x25], #0x4\n"
+ "mov x19, #0x24\n"
"tbz x8, #0, 68f\n"
- "ld1 { v10.h }[2], [x16]\n"
+ "ld1 { v10.h }[2], [x15]\n"
"ld1 { v14.h }[2], [x25]\n"
"b 68f\n"
"59:" // Height 2: Partial accumulate: partial_1_16
- "mov x20, #0x20\n"
+ "mov x19, #0x20\n"
"tbz x8, #0, 68f\n"
- "ldr h10, [x16, #0x0]\n"
+ "ldr h10, [x15, #0x0]\n"
"ldr h14, [x25, #0x0]\n"
"b 68f\n"
"60:" // Height 2: Partial accumulate: partial_8_0
"tbz x8, #3, 64f\n"
- "ld1 { v8.8h }, [x16], #0x10\n"
+ "ld1 { v8.8h }, [x15], #0x10\n"
"ld1 { v12.8h }, [x25], #0x10\n"
"tbz x8, #2, 62f\n"
- "ldr d9, [x16], #0x8\n"
+ "ldr d9, [x15], #0x8\n"
"ldr d13, [x25], #0x8\n"
"tbz x8, #1, 61f\n"
- "ld1 { v9.s }[2], [x16], #0x4\n"
- "mov x20, #0x1c\n"
+ "mov x19, #0x1c\n"
+ "ld1 { v9.s }[2], [x15], #0x4\n"
"ld1 { v13.s }[2], [x25], #0x4\n"
"tbz x8, #0, 68f\n"
- "ld1 { v9.h }[6], [x16]\n"
+ "ld1 { v9.h }[6], [x15]\n"
"ld1 { v13.h }[6], [x25]\n"
"b 68f\n"
"61:" // Height 2: Partial accumulate: partial_1_12
- "mov x20, #0x18\n"
+ "mov x19, #0x18\n"
"tbz x8, #0, 68f\n"
- "ld1 { v9.h }[4], [x16]\n"
+ "ld1 { v9.h }[4], [x15]\n"
"ld1 { v13.h }[4], [x25]\n"
"b 68f\n"
"62:" // Height 2: Partial accumulate: partial_2_8
"tbz x8, #1, 63f\n"
- "ldr s9, [x16], #0x4\n"
- "mov x20, #0x14\n"
+ "ldr s9, [x15], #0x4\n"
"ldr s13, [x25], #0x4\n"
+ "mov x19, #0x14\n"
"tbz x8, #0, 68f\n"
- "ld1 { v9.h }[2], [x16]\n"
+ "ld1 { v9.h }[2], [x15]\n"
"ld1 { v13.h }[2], [x25]\n"
"b 68f\n"
"63:" // Height 2: Partial accumulate: partial_1_8
- "mov x20, #0x10\n"
+ "mov x19, #0x10\n"
"tbz x8, #0, 68f\n"
- "ldr h9, [x16, #0x0]\n"
+ "ldr h9, [x15, #0x0]\n"
"ldr h13, [x25, #0x0]\n"
"b 68f\n"
"64:" // Height 2: Partial accumulate: partial_4_0
"tbz x8, #2, 66f\n"
- "ldr d8, [x16], #0x8\n"
+ "ldr d8, [x15], #0x8\n"
"ldr d12, [x25], #0x8\n"
"tbz x8, #1, 65f\n"
- "ld1 { v8.s }[2], [x16], #0x4\n"
- "mov x20, #0xc\n"
+ "mov x19, #0xc\n"
+ "ld1 { v8.s }[2], [x15], #0x4\n"
"ld1 { v12.s }[2], [x25], #0x4\n"
"tbz x8, #0, 68f\n"
- "ld1 { v8.h }[6], [x16]\n"
+ "ld1 { v8.h }[6], [x15]\n"
"ld1 { v12.h }[6], [x25]\n"
"b 68f\n"
"65:" // Height 2: Partial accumulate: partial_1_4
- "mov x20, #0x8\n"
+ "mov x19, #0x8\n"
"tbz x8, #0, 68f\n"
- "ld1 { v8.h }[4], [x16]\n"
+ "ld1 { v8.h }[4], [x15]\n"
"ld1 { v12.h }[4], [x25]\n"
"b 68f\n"
"66:" // Height 2: Partial accumulate: partial_2_0
"tbz x8, #1, 67f\n"
- "ldr s8, [x16], #0x4\n"
- "mov x20, #0x4\n"
+ "ldr s8, [x15], #0x4\n"
"ldr s12, [x25], #0x4\n"
+ "mov x19, #0x4\n"
"tbz x8, #0, 68f\n"
- "ld1 { v8.h }[2], [x16]\n"
+ "ld1 { v8.h }[2], [x15]\n"
"ld1 { v12.h }[2], [x25]\n"
"b 68f\n"
"67:" // Height 2: Partial accumulate: partial_1_0
- "ldr h8, [x16, #0x0]\n"
- "mov x20, #0x0\n"
+ "ldr h8, [x15, #0x0]\n"
+ "mov x19, #0x0\n"
"ldr h12, [x25, #0x0]\n"
"68:" // Height 2: Partial accumulate: Done
- "sub x16, x16, x20\n"
+ "sub x15, x15, x19\n"
"b 71f\n"
"69:" // Height 2: full accumulate
- "ldr q8, [x16, #0x0]\n"
- "ldr q9, [x16, #0x10]\n"
- "ldr q10, [x16, #0x20]\n"
- "ldr q11, [x16, #0x30]\n"
+ "ldr q8, [x15, #0x0]\n"
+ "ldr q9, [x15, #0x10]\n"
+ "ldr q10, [x15, #0x20]\n"
+ "ldr q11, [x15, #0x30]\n"
"ldr q12, [x25, #0x0]\n"
"ldr q13, [x25, #0x10]\n"
"ldr q14, [x25, #0x20]\n"
@@ -795,227 +795,227 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"movi v14.16b, #0x0\n"
"movi v15.16b, #0x0\n"
"71:" // Height 2: setup done
- "mov x15, #0x0\n"
+ "mov x14, #0x0\n"
"72:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w14, [x20, x15, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w13, [x20, x14, LSL #0x2]\n"
"tbz %x[flags], #3, 73f\n"
- "ldr x21, [%x[input_ptr], x15, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x13, [x21, #0x0]\n"
- "ldr x9, [x21, #0x8]\n"
- "cbnz x15, 74f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x13, x13, x20, LSL #1\n"
- "add x9, x9, x20, LSL #1\n"
+ "ldr x20, [%x[input_ptr], x14, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x12, [x20, #0x0]\n"
+ "ldr x28, [x20, #0x8]\n"
+ "cbnz x14, 74f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x12, x12, x19, LSL #1\n"
+ "add x28, x28, x19, LSL #1\n"
"b 74f\n"
"73:" // Height 2: setup direct input
- "mov x13, %x[input_ptr]\n"
- "add x9, x13, x20, LSL #1\n"
+ "mov x12, %x[input_ptr]\n"
+ "add x28, x12, x19, LSL #1\n"
"74:" // Height 2: input setup done
- "cmp x14, #0x8\n"
+ "cmp x13, #0x8\n"
"blt 77f\n"
- "ldr q0, [x13, #0x0]\n"
- "cmp x14, #0x10\n"
- "ldr q1, [x9, #0x0]\n"
+ "ldr q0, [x12, #0x0]\n"
+ "ldr q1, [x28, #0x0]\n"
+ "cmp x13, #0x10\n"
"ldr q6, [x17, #0x0]\n"
- "ldr q7, [x17, #0x10]\n"
"blt 76f\n"
"75:" // Height 2: Multiply loop: Main loop head
"fmla v8.8h, v6.8h, v0.h[0]\n"
- "ldr x12, [x17, #0x28]\n"
+ "ldr d7, [x17, #0x10]\n"
"fmla v12.8h, v6.8h, v1.h[0]\n"
+ "ldr x11, [x17, #0x18]\n"
"ldr d6, [x17, #0x20]\n"
+ "add x12, x12, #0x10\n"
+ "ldr x10, [x17, #0x28]\n"
+ "add x28, x28, #0x10\n"
+ "mov v7.d[1], x11\n"
+ "prfm pldl1keep, [x12, #0x80]\n"
+ "prfm pldl1keep, [x28, #0x80]\n"
+ "sub x13, x13, #0x8\n"
"fmla v9.8h, v7.8h, v0.h[0]\n"
- "ldr x11, [x17, #0x38]\n"
+ "mov v6.d[1], x10\n"
"fmla v13.8h, v7.8h, v1.h[0]\n"
"ldr d7, [x17, #0x30]\n"
- "mov v6.d[1], x12\n"
"fmla v10.8h, v6.8h, v0.h[0]\n"
- "mov v7.d[1], x11\n"
+ "ldr x11, [x17, #0x38]\n"
"fmla v14.8h, v6.8h, v1.h[0]\n"
"ldr d6, [x17, #0x40]\n"
+ "ldr x10, [x17, #0x48]\n"
+ "cmp x13, #0x10\n"
+ "mov v7.d[1], x11\n"
+ "ldr x11, [x17, #0x58]\n"
+ "ldr x9, [x12, #0x8]\n"
"fmla v11.8h, v7.8h, v0.h[0]\n"
- "ldr x12, [x17, #0x48]\n"
+ "mov v6.d[1], x10\n"
"fmla v15.8h, v7.8h, v1.h[0]\n"
"ldr d7, [x17, #0x50]\n"
- "mov v6.d[1], x12\n"
- "ldr x11, [x17, #0x58]\n"
- "mov v7.d[1], x11\n"
"fmla v8.8h, v6.8h, v0.h[1]\n"
- "ldr x12, [x17, #0x68]\n"
+ "ldr x10, [x17, #0x68]\n"
"fmla v12.8h, v6.8h, v1.h[1]\n"
"ldr d6, [x17, #0x60]\n"
- "fmla v9.8h, v7.8h, v0.h[1]\n"
+ "mov v7.d[1], x11\n"
"ldr x11, [x17, #0x78]\n"
+ "ldr x27, [x28, #0x8]\n"
+ "fmla v9.8h, v7.8h, v0.h[1]\n"
+ "mov v6.d[1], x10\n"
"fmla v13.8h, v7.8h, v1.h[1]\n"
"ldr d7, [x17, #0x70]\n"
- "mov v6.d[1], x12\n"
"fmla v10.8h, v6.8h, v0.h[1]\n"
- "mov v7.d[1], x11\n"
+ "ldr x10, [x17, #0x88]\n"
"fmla v14.8h, v6.8h, v1.h[1]\n"
"ldr d6, [x17, #0x80]\n"
+ "mov v7.d[1], x11\n"
+ "ldr x11, [x17, #0x98]\n"
"fmla v11.8h, v7.8h, v0.h[1]\n"
- "ldr x12, [x17, #0x88]\n"
+ "mov v6.d[1], x10\n"
"fmla v15.8h, v7.8h, v1.h[1]\n"
"ldr d7, [x17, #0x90]\n"
- "mov v6.d[1], x12\n"
- "ldr x11, [x17, #0x98]\n"
- "mov v7.d[1], x11\n"
"fmla v8.8h, v6.8h, v0.h[2]\n"
- "ldr x12, [x17, #0xa8]\n"
+ "ldr x10, [x17, #0xa8]\n"
"fmla v12.8h, v6.8h, v1.h[2]\n"
"ldr d6, [x17, #0xa0]\n"
- "fmla v9.8h, v7.8h, v0.h[2]\n"
+ "mov v7.d[1], x11\n"
"ldr x11, [x17, #0xb8]\n"
+ "fmla v9.8h, v7.8h, v0.h[2]\n"
+ "mov v6.d[1], x10\n"
"fmla v13.8h, v7.8h, v1.h[2]\n"
"ldr d7, [x17, #0xb0]\n"
- "mov v6.d[1], x12\n"
"fmla v10.8h, v6.8h, v0.h[2]\n"
- "mov v7.d[1], x11\n"
+ "ldr x10, [x17, #0xc8]\n"
"fmla v14.8h, v6.8h, v1.h[2]\n"
"ldr d6, [x17, #0xc0]\n"
+ "mov v7.d[1], x11\n"
+ "ldr x11, [x17, #0xd8]\n"
"fmla v11.8h, v7.8h, v0.h[2]\n"
- "ldr x12, [x17, #0xc8]\n"
+ "mov v6.d[1], x10\n"
"fmla v15.8h, v7.8h, v1.h[2]\n"
"ldr d7, [x17, #0xd0]\n"
- "mov v6.d[1], x12\n"
- "ldr x11, [x17, #0xd8]\n"
- "mov v7.d[1], x11\n"
"fmla v8.8h, v6.8h, v0.h[3]\n"
- "ldr x12, [x17, #0xe8]\n"
+ "ldr x10, [x17, #0xe8]\n"
"fmla v12.8h, v6.8h, v1.h[3]\n"
"ldr d6, [x17, #0xe0]\n"
- "fmla v9.8h, v7.8h, v0.h[3]\n"
+ "mov v7.d[1], x11\n"
"ldr x11, [x17, #0xf8]\n"
+ "fmla v9.8h, v7.8h, v0.h[3]\n"
+ "mov v6.d[1], x10\n"
"fmla v13.8h, v7.8h, v1.h[3]\n"
"ldr d7, [x17, #0xf0]\n"
- "mov v6.d[1], x12\n"
"fmla v10.8h, v6.8h, v0.h[3]\n"
- "mov v7.d[1], x11\n"
+ "ldr x10, [x17, #0x108]\n"
"fmla v14.8h, v6.8h, v1.h[3]\n"
"ldr d6, [x17, #0x100]\n"
+ "mov v7.d[1], x11\n"
+ "ldr x11, [x17, #0x118]\n"
"fmla v11.8h, v7.8h, v0.h[3]\n"
- "ldr x12, [x17, #0x108]\n"
+ "mov v6.d[1], x10\n"
"fmla v15.8h, v7.8h, v1.h[3]\n"
"ldr d7, [x17, #0x110]\n"
- "mov v6.d[1], x12\n"
- "ldr x11, [x17, #0x118]\n"
- "mov v7.d[1], x11\n"
"fmla v8.8h, v6.8h, v0.h[4]\n"
- "ldr x12, [x17, #0x128]\n"
+ "ldr x10, [x17, #0x128]\n"
"fmla v12.8h, v6.8h, v1.h[4]\n"
"ldr d6, [x17, #0x120]\n"
- "fmla v9.8h, v7.8h, v0.h[4]\n"
+ "mov v7.d[1], x11\n"
"ldr x11, [x17, #0x138]\n"
+ "fmla v9.8h, v7.8h, v0.h[4]\n"
+ "mov v6.d[1], x10\n"
"fmla v13.8h, v7.8h, v1.h[4]\n"
"ldr d7, [x17, #0x130]\n"
- "mov v6.d[1], x12\n"
"fmla v10.8h, v6.8h, v0.h[4]\n"
- "mov v7.d[1], x11\n"
+ "ldr x10, [x17, #0x148]\n"
"fmla v14.8h, v6.8h, v1.h[4]\n"
"ldr d6, [x17, #0x140]\n"
+ "mov v7.d[1], x11\n"
+ "ldr x11, [x17, #0x158]\n"
"fmla v11.8h, v7.8h, v0.h[4]\n"
- "ldr x12, [x17, #0x148]\n"
+ "mov v6.d[1], x10\n"
"fmla v15.8h, v7.8h, v1.h[4]\n"
"ldr d7, [x17, #0x150]\n"
- "mov v6.d[1], x12\n"
- "ldr x11, [x17, #0x158]\n"
- "mov v7.d[1], x11\n"
"fmla v8.8h, v6.8h, v0.h[5]\n"
- "ldr x12, [x17, #0x168]\n"
+ "ldr x10, [x17, #0x168]\n"
"fmla v12.8h, v6.8h, v1.h[5]\n"
"ldr d6, [x17, #0x160]\n"
- "fmla v9.8h, v7.8h, v0.h[5]\n"
+ "mov v7.d[1], x11\n"
"ldr x11, [x17, #0x178]\n"
+ "fmla v9.8h, v7.8h, v0.h[5]\n"
+ "mov v6.d[1], x10\n"
"fmla v13.8h, v7.8h, v1.h[5]\n"
"ldr d7, [x17, #0x170]\n"
- "mov v6.d[1], x12\n"
"fmla v10.8h, v6.8h, v0.h[5]\n"
- "mov v7.d[1], x11\n"
+ "ldr x10, [x17, #0x188]\n"
"fmla v14.8h, v6.8h, v1.h[5]\n"
"ldr d6, [x17, #0x180]\n"
+ "mov v7.d[1], x11\n"
+ "ldr x11, [x17, #0x198]\n"
"fmla v11.8h, v7.8h, v0.h[5]\n"
- "ldr x12, [x17, #0x188]\n"
+ "mov v6.d[1], x10\n"
"fmla v15.8h, v7.8h, v1.h[5]\n"
"ldr d7, [x17, #0x190]\n"
- "mov v6.d[1], x12\n"
- "ldr x11, [x17, #0x198]\n"
- "mov v7.d[1], x11\n"
"fmla v8.8h, v6.8h, v0.h[6]\n"
- "ldr x12, [x17, #0x1a8]\n"
+ "ldr x10, [x17, #0x1a8]\n"
"fmla v12.8h, v6.8h, v1.h[6]\n"
"ldr d6, [x17, #0x1a0]\n"
- "fmla v9.8h, v7.8h, v0.h[6]\n"
+ "mov v7.d[1], x11\n"
"ldr x11, [x17, #0x1b8]\n"
+ "fmla v9.8h, v7.8h, v0.h[6]\n"
+ "mov v6.d[1], x10\n"
"fmla v13.8h, v7.8h, v1.h[6]\n"
"ldr d7, [x17, #0x1b0]\n"
- "mov v6.d[1], x12\n"
"fmla v10.8h, v6.8h, v0.h[6]\n"
- "mov v7.d[1], x11\n"
+ "ldr x10, [x17, #0x1c8]\n"
"fmla v14.8h, v6.8h, v1.h[6]\n"
"ldr d6, [x17, #0x1c0]\n"
+ "mov v7.d[1], x11\n"
+ "ldr x11, [x17, #0x1d8]\n"
"fmla v11.8h, v7.8h, v0.h[6]\n"
- "ldr x12, [x17, #0x1c8]\n"
+ "mov v6.d[1], x10\n"
"fmla v15.8h, v7.8h, v1.h[6]\n"
"ldr d7, [x17, #0x1d0]\n"
- "mov v6.d[1], x12\n"
- "ldr x11, [x17, #0x1d8]\n"
- "mov v7.d[1], x11\n"
"fmla v8.8h, v6.8h, v0.h[7]\n"
- "ldr x12, [x17, #0x1e8]\n"
+ "ldr x10, [x17, #0x1e8]\n"
"fmla v12.8h, v6.8h, v1.h[7]\n"
"ldr d6, [x17, #0x1e0]\n"
- "fmla v9.8h, v7.8h, v0.h[7]\n"
+ "mov v7.d[1], x11\n"
"ldr x11, [x17, #0x1f8]\n"
+ "fmla v9.8h, v7.8h, v0.h[7]\n"
+ "mov v6.d[1], x10\n"
"fmla v13.8h, v7.8h, v1.h[7]\n"
"ldr d7, [x17, #0x1f0]\n"
- "mov v6.d[1], x12\n"
- "add x13, x13, #0x10\n"
- "mov v7.d[1], x11\n"
- "add x9, x9, #0x10\n"
- "add x17, x17, #0x200\n"
"fmla v10.8h, v6.8h, v0.h[7]\n"
+ "add x17, x17, #0x200\n"
"fmla v14.8h, v6.8h, v1.h[7]\n"
"ldr d6, [x17, #0x0]\n"
- "ldr x12, [x17, #0x8]\n"
+ "mov v7.d[1], x11\n"
+ "ldr x10, [x17, #0x8]\n"
"fmla v11.8h, v7.8h, v0.h[7]\n"
- "ldr d0, [x13, #0x0]\n"
+ "ldr d0, [x12, #0x0]\n"
"fmla v15.8h, v7.8h, v1.h[7]\n"
- "ldr d1, [x9, #0x0]\n"
- "sub x14, x14, #0x8\n"
- "ldr d7, [x17, #0x10]\n"
- "cmp x14, #0x10\n"
- "ldr x10, [x13, #0x8]\n"
- "mov v6.d[1], x12\n"
- "ldr x28, [x9, #0x8]\n"
- "mov v0.d[1], x10\n"
- "ldr x11, [x17, #0x18]\n"
- "mov v1.d[1], x28\n"
- "prfm pldl1keep, [x13, #0x80]\n"
- "mov v7.d[1], x11\n"
- "prfm pldl1keep, [x9, #0x80]\n"
+ "mov v6.d[1], x10\n"
+ "ldr d1, [x28, #0x0]\n"
+ "mov v0.d[1], x9\n"
+ "mov v1.d[1], x27\n"
"bge 75b\n"
"76:" // Height 2: Multiply loop: Single iteration only
"fmla v8.8h, v6.8h, v0.h[0]\n"
- "add x13, x13, #0x10\n"
+ "ldr q7, [x17, #0x10]\n"
"fmla v12.8h, v6.8h, v1.h[0]\n"
"ldr q6, [x17, #0x20]\n"
+ "sub x13, x13, #0x8\n"
+ "add x12, x12, #0x10\n"
"fmla v9.8h, v7.8h, v0.h[0]\n"
- "add x9, x9, #0x10\n"
+ "prfm pldl1keep, [x12, #0x80]\n"
"fmla v13.8h, v7.8h, v1.h[0]\n"
"ldr q7, [x17, #0x30]\n"
"fmla v10.8h, v6.8h, v0.h[0]\n"
- "sub x14, x14, #0x8\n"
+ "add x28, x28, #0x10\n"
"fmla v14.8h, v6.8h, v1.h[0]\n"
- "ldr q6, [x17, #0x40]\n"
+ "prfm pldl1keep, [x28, #0x80]\n"
"fmla v11.8h, v7.8h, v0.h[0]\n"
- "prfm pldl1keep, [x13, #0x80]\n"
+ "ldr q6, [x17, #0x40]\n"
"fmla v15.8h, v7.8h, v1.h[0]\n"
"ldr q7, [x17, #0x50]\n"
"fmla v8.8h, v6.8h, v0.h[1]\n"
- "prfm pldl1keep, [x9, #0x80]\n"
"fmla v12.8h, v6.8h, v1.h[1]\n"
"ldr q6, [x17, #0x60]\n"
"fmla v9.8h, v7.8h, v0.h[1]\n"
@@ -1099,14 +1099,14 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"fmla v11.8h, v7.8h, v0.h[7]\n"
"fmla v15.8h, v7.8h, v1.h[7]\n"
"77:" // Height 2: Multiply loop: Main loop skip
- "cbz x14, 79f\n"
+ "cbz x13, 79f\n"
"78:" // Height 2: Multiply loop: Odd block loop
- "ldr h0, [x13], #0x2\n"
- "sub x14, x14, #0x1\n"
- "ldr h1, [x9], #0x2\n"
+ "ldr h0, [x12], #0x2\n"
+ "sub x13, x13, #0x1\n"
+ "ldr h1, [x28], #0x2\n"
"ldr q6, [x17, #0x0]\n"
- "fmla v8.8h, v6.8h, v0.h[0]\n"
"ldr q7, [x17, #0x10]\n"
+ "fmla v8.8h, v6.8h, v0.h[0]\n"
"fmla v12.8h, v6.8h, v1.h[0]\n"
"ldr q6, [x17, #0x20]\n"
"fmla v9.8h, v7.8h, v0.h[0]\n"
@@ -1117,19 +1117,21 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"fmla v14.8h, v6.8h, v1.h[0]\n"
"fmla v11.8h, v7.8h, v0.h[0]\n"
"fmla v15.8h, v7.8h, v1.h[0]\n"
- "cbnz x14, 78b\n"
+ "cbnz x13, 78b\n"
"79:" // Height 2: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x15, x15, #0x1\n"
- "cmp x15, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x14, x14, #0x1\n"
+ "cmp x14, x19\n"
"bne 72b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x16, x20, LSL #1\n"
- "prfm pstl1keep, [x16, #0x0]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "prfm pstl1keep, [x15, #0x0]\n"
+ "add x25, x15, x19, LSL #1\n"
"prfm pstl1keep, [x25, #0x0]\n"
"tbz %x[flags], #1, 80f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v0.8h }, [x20]\n"
+ "add x20, %x[args_ptr], %[offset_min]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v1.8h }, [x20]\n"
+ "ld1r { v0.8h }, [x19]\n"
"fmin v8.8h, v8.8h, v0.8h\n"
"fmin v9.8h, v9.8h, v0.8h\n"
"fmin v10.8h, v10.8h, v0.8h\n"
@@ -1138,151 +1140,149 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"fmin v13.8h, v13.8h, v0.8h\n"
"fmin v14.8h, v14.8h, v0.8h\n"
"fmin v15.8h, v15.8h, v0.8h\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1r { v0.8h }, [x20]\n"
- "fmax v8.8h, v8.8h, v0.8h\n"
- "fmax v9.8h, v9.8h, v0.8h\n"
- "fmax v10.8h, v10.8h, v0.8h\n"
- "fmax v11.8h, v11.8h, v0.8h\n"
- "fmax v12.8h, v12.8h, v0.8h\n"
- "fmax v13.8h, v13.8h, v0.8h\n"
- "fmax v14.8h, v14.8h, v0.8h\n"
- "fmax v15.8h, v15.8h, v0.8h\n"
+ "fmax v8.8h, v8.8h, v1.8h\n"
+ "fmax v9.8h, v9.8h, v1.8h\n"
+ "fmax v10.8h, v10.8h, v1.8h\n"
+ "fmax v11.8h, v11.8h, v1.8h\n"
+ "fmax v12.8h, v12.8h, v1.8h\n"
+ "fmax v13.8h, v13.8h, v1.8h\n"
+ "fmax v14.8h, v14.8h, v1.8h\n"
+ "fmax v15.8h, v15.8h, v1.8h\n"
"80:" // Height 2: No activation
"cmp x8, #0x20\n"
"bge 97f\n"
"tbz x8, #4, 88f\n"
- "st1 { v8.8h }, [x16], #0x10\n"
- "st1 { v9.8h }, [x16], #0x10\n"
+ "st1 { v8.8h }, [x15], #0x10\n"
+ "st1 { v9.8h }, [x15], #0x10\n"
"st1 { v12.8h }, [x25], #0x10\n"
"st1 { v13.8h }, [x25], #0x10\n"
"tbz x8, #3, 84f\n"
- "st1 { v10.8h }, [x16], #0x10\n"
+ "st1 { v10.8h }, [x15], #0x10\n"
"st1 { v14.8h }, [x25], #0x10\n"
"tbz x8, #2, 82f\n"
- "str d11, [x16], #0x8\n"
+ "str d11, [x15], #0x8\n"
"str d15, [x25], #0x8\n"
"tbz x8, #1, 81f\n"
- "st1 { v11.s }[2], [x16], #0x4\n"
+ "st1 { v11.s }[2], [x15], #0x4\n"
"st1 { v15.s }[2], [x25], #0x4\n"
"tbz x8, #0, 96f\n"
- "st1 { v11.h }[6], [x16]\n"
+ "st1 { v11.h }[6], [x15]\n"
"st1 { v15.h }[6], [x25]\n"
"b 96f\n"
"81:" // Height 2: Partial direct writeback: partial_1_28
"tbz x8, #0, 96f\n"
- "st1 { v11.h }[4], [x16]\n"
+ "st1 { v11.h }[4], [x15]\n"
"st1 { v15.h }[4], [x25]\n"
"b 96f\n"
"82:" // Height 2: Partial direct writeback: partial_2_24
"tbz x8, #1, 83f\n"
- "str s11, [x16], #0x4\n"
+ "str s11, [x15], #0x4\n"
"str s15, [x25], #0x4\n"
"tbz x8, #0, 96f\n"
- "st1 { v11.h }[2], [x16]\n"
+ "st1 { v11.h }[2], [x15]\n"
"st1 { v15.h }[2], [x25]\n"
"b 96f\n"
"83:" // Height 2: Partial direct writeback: partial_1_24
"tbz x8, #0, 96f\n"
- "str h11, [x16, #0x0]\n"
+ "str h11, [x15, #0x0]\n"
"str h15, [x25, #0x0]\n"
"b 96f\n"
"84:" // Height 2: Partial direct writeback: partial_4_16
"tbz x8, #2, 86f\n"
- "str d10, [x16], #0x8\n"
+ "str d10, [x15], #0x8\n"
"str d14, [x25], #0x8\n"
"tbz x8, #1, 85f\n"
- "st1 { v10.s }[2], [x16], #0x4\n"
+ "st1 { v10.s }[2], [x15], #0x4\n"
"st1 { v14.s }[2], [x25], #0x4\n"
"tbz x8, #0, 96f\n"
- "st1 { v10.h }[6], [x16]\n"
+ "st1 { v10.h }[6], [x15]\n"
"st1 { v14.h }[6], [x25]\n"
"b 96f\n"
"85:" // Height 2: Partial direct writeback: partial_1_20
"tbz x8, #0, 96f\n"
- "st1 { v10.h }[4], [x16]\n"
+ "st1 { v10.h }[4], [x15]\n"
"st1 { v14.h }[4], [x25]\n"
"b 96f\n"
"86:" // Height 2: Partial direct writeback: partial_2_16
"tbz x8, #1, 87f\n"
- "str s10, [x16], #0x4\n"
+ "str s10, [x15], #0x4\n"
"str s14, [x25], #0x4\n"
"tbz x8, #0, 96f\n"
- "st1 { v10.h }[2], [x16]\n"
+ "st1 { v10.h }[2], [x15]\n"
"st1 { v14.h }[2], [x25]\n"
"b 96f\n"
"87:" // Height 2: Partial direct writeback: partial_1_16
"tbz x8, #0, 96f\n"
- "str h10, [x16, #0x0]\n"
+ "str h10, [x15, #0x0]\n"
"str h14, [x25, #0x0]\n"
"b 96f\n"
"88:" // Height 2: Partial direct writeback: partial_8_0
"tbz x8, #3, 92f\n"
- "st1 { v8.8h }, [x16], #0x10\n"
+ "st1 { v8.8h }, [x15], #0x10\n"
"st1 { v12.8h }, [x25], #0x10\n"
"tbz x8, #2, 90f\n"
- "str d9, [x16], #0x8\n"
+ "str d9, [x15], #0x8\n"
"str d13, [x25], #0x8\n"
"tbz x8, #1, 89f\n"
- "st1 { v9.s }[2], [x16], #0x4\n"
+ "st1 { v9.s }[2], [x15], #0x4\n"
"st1 { v13.s }[2], [x25], #0x4\n"
"tbz x8, #0, 96f\n"
- "st1 { v9.h }[6], [x16]\n"
+ "st1 { v9.h }[6], [x15]\n"
"st1 { v13.h }[6], [x25]\n"
"b 96f\n"
"89:" // Height 2: Partial direct writeback: partial_1_12
"tbz x8, #0, 96f\n"
- "st1 { v9.h }[4], [x16]\n"
+ "st1 { v9.h }[4], [x15]\n"
"st1 { v13.h }[4], [x25]\n"
"b 96f\n"
"90:" // Height 2: Partial direct writeback: partial_2_8
"tbz x8, #1, 91f\n"
- "str s9, [x16], #0x4\n"
+ "str s9, [x15], #0x4\n"
"str s13, [x25], #0x4\n"
"tbz x8, #0, 96f\n"
- "st1 { v9.h }[2], [x16]\n"
+ "st1 { v9.h }[2], [x15]\n"
"st1 { v13.h }[2], [x25]\n"
"b 96f\n"
"91:" // Height 2: Partial direct writeback: partial_1_8
"tbz x8, #0, 96f\n"
- "str h9, [x16, #0x0]\n"
+ "str h9, [x15, #0x0]\n"
"str h13, [x25, #0x0]\n"
"b 96f\n"
"92:" // Height 2: Partial direct writeback: partial_4_0
"tbz x8, #2, 94f\n"
- "str d8, [x16], #0x8\n"
+ "str d8, [x15], #0x8\n"
"str d12, [x25], #0x8\n"
"tbz x8, #1, 93f\n"
- "st1 { v8.s }[2], [x16], #0x4\n"
+ "st1 { v8.s }[2], [x15], #0x4\n"
"st1 { v12.s }[2], [x25], #0x4\n"
"tbz x8, #0, 96f\n"
- "st1 { v8.h }[6], [x16]\n"
+ "st1 { v8.h }[6], [x15]\n"
"st1 { v12.h }[6], [x25]\n"
"b 96f\n"
"93:" // Height 2: Partial direct writeback: partial_1_4
"tbz x8, #0, 96f\n"
- "st1 { v8.h }[4], [x16]\n"
+ "st1 { v8.h }[4], [x15]\n"
"st1 { v12.h }[4], [x25]\n"
"b 96f\n"
"94:" // Height 2: Partial direct writeback: partial_2_0
"tbz x8, #1, 95f\n"
- "str s8, [x16], #0x4\n"
+ "str s8, [x15], #0x4\n"
"str s12, [x25], #0x4\n"
"tbz x8, #0, 96f\n"
- "st1 { v8.h }[2], [x16]\n"
+ "st1 { v8.h }[2], [x15]\n"
"st1 { v12.h }[2], [x25]\n"
"b 96f\n"
"95:" // Height 2: Partial direct writeback: partial_1_0
- "str h8, [x16, #0x0]\n"
+ "str h8, [x15, #0x0]\n"
"str h12, [x25, #0x0]\n"
"96:" // Height 2: Partial direct writeback: Done
"b 98f\n"
"97:" // Height 2: Full writeback
- "str q8, [x16, #0x0]\n"
- "str q9, [x16, #0x10]\n"
- "str q10, [x16, #0x20]\n"
- "str q11, [x16, #0x30]\n"
- "add x16, x16, #0x40\n"
+ "str q8, [x15, #0x0]\n"
+ "str q9, [x15, #0x10]\n"
+ "str q10, [x15, #0x20]\n"
+ "str q11, [x15, #0x30]\n"
+ "add x15, x15, #0x40\n"
"str q12, [x25, #0x0]\n"
"str q13, [x25, #0x10]\n"
"str q14, [x25, #0x20]\n"
@@ -1292,213 +1292,213 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"bgt 51b\n"
"b 296f\n"
"99:" // Height 3
- "mov x7, %x[bias]\n"
"ldr x8, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x16, %x[bias]\n"
"ldr x17, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x16, %x[output_ptr]\n"
+ "mov x15, %x[output_ptr]\n"
"100:" // Height 3: Column loop
- "cbz x7, 101f\n"
- "ldr q8, [x7, #0x0]\n"
+ "cbz x16, 101f\n"
+ "ldr q8, [x16, #0x0]\n"
+ "ldr q9, [x16, #0x10]\n"
+ "ldr q10, [x16, #0x20]\n"
"mov v12.16b, v8.16b\n"
- "ldr q9, [x7, #0x10]\n"
+ "ldr q11, [x16, #0x30]\n"
"mov v13.16b, v9.16b\n"
- "ldr q10, [x7, #0x20]\n"
+ "add x16, x16, #0x40\n"
"mov v14.16b, v10.16b\n"
- "ldr q11, [x7, #0x30]\n"
"mov v15.16b, v11.16b\n"
"mov v16.16b, v8.16b\n"
- "add x7, x7, #0x40\n"
"mov v17.16b, v9.16b\n"
"mov v18.16b, v10.16b\n"
"mov v19.16b, v11.16b\n"
"b 120f\n"
"101:" // Height 3: no bias
"tbz %x[flags], #0, 119f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x16, x20, LSL #1\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"cmp x8, #0x20\n"
- "add x24, x25, x20, LSL #1\n"
+ "add x25, x15, x19, LSL #1\n"
+ "add x24, x25, x19, LSL #1\n"
"bge 118f\n"
"tbz x8, #4, 109f\n"
- "ld1 { v8.8h }, [x16], #0x10\n"
+ "ld1 { v8.8h }, [x15], #0x10\n"
"ld1 { v12.8h }, [x25], #0x10\n"
"ld1 { v16.8h }, [x24], #0x10\n"
- "ld1 { v9.8h }, [x16], #0x10\n"
+ "ld1 { v9.8h }, [x15], #0x10\n"
"ld1 { v13.8h }, [x25], #0x10\n"
"ld1 { v17.8h }, [x24], #0x10\n"
"tbz x8, #3, 105f\n"
- "ld1 { v10.8h }, [x16], #0x10\n"
+ "ld1 { v10.8h }, [x15], #0x10\n"
"ld1 { v14.8h }, [x25], #0x10\n"
"ld1 { v18.8h }, [x24], #0x10\n"
"tbz x8, #2, 103f\n"
- "ldr d11, [x16], #0x8\n"
+ "ldr d11, [x15], #0x8\n"
"ldr d15, [x25], #0x8\n"
"ldr d19, [x24], #0x8\n"
"tbz x8, #1, 102f\n"
- "ld1 { v11.s }[2], [x16], #0x4\n"
- "mov x20, #0x3c\n"
+ "ld1 { v11.s }[2], [x15], #0x4\n"
+ "mov x19, #0x3c\n"
"ld1 { v15.s }[2], [x25], #0x4\n"
"ld1 { v19.s }[2], [x24], #0x4\n"
"tbz x8, #0, 117f\n"
- "ld1 { v11.h }[6], [x16]\n"
+ "ld1 { v11.h }[6], [x15]\n"
"ld1 { v15.h }[6], [x25]\n"
"ld1 { v19.h }[6], [x24]\n"
"b 117f\n"
"102:" // Height 3: Partial accumulate: partial_1_28
- "mov x20, #0x38\n"
+ "mov x19, #0x38\n"
"tbz x8, #0, 117f\n"
- "ld1 { v11.h }[4], [x16]\n"
+ "ld1 { v11.h }[4], [x15]\n"
"ld1 { v15.h }[4], [x25]\n"
"ld1 { v19.h }[4], [x24]\n"
"b 117f\n"
"103:" // Height 3: Partial accumulate: partial_2_24
"tbz x8, #1, 104f\n"
- "ldr s11, [x16], #0x4\n"
- "mov x20, #0x34\n"
+ "ldr s11, [x15], #0x4\n"
"ldr s15, [x25], #0x4\n"
+ "mov x19, #0x34\n"
"ldr s19, [x24], #0x4\n"
"tbz x8, #0, 117f\n"
- "ld1 { v11.h }[2], [x16]\n"
+ "ld1 { v11.h }[2], [x15]\n"
"ld1 { v15.h }[2], [x25]\n"
"ld1 { v19.h }[2], [x24]\n"
"b 117f\n"
"104:" // Height 3: Partial accumulate: partial_1_24
- "mov x20, #0x30\n"
+ "mov x19, #0x30\n"
"tbz x8, #0, 117f\n"
- "ldr h11, [x16, #0x0]\n"
+ "ldr h11, [x15, #0x0]\n"
"ldr h15, [x25, #0x0]\n"
"ldr h19, [x24, #0x0]\n"
"b 117f\n"
"105:" // Height 3: Partial accumulate: partial_4_16
"tbz x8, #2, 107f\n"
- "ldr d10, [x16], #0x8\n"
+ "ldr d10, [x15], #0x8\n"
"ldr d14, [x25], #0x8\n"
"ldr d18, [x24], #0x8\n"
"tbz x8, #1, 106f\n"
- "ld1 { v10.s }[2], [x16], #0x4\n"
- "mov x20, #0x2c\n"
+ "ld1 { v10.s }[2], [x15], #0x4\n"
+ "mov x19, #0x2c\n"
"ld1 { v14.s }[2], [x25], #0x4\n"
"ld1 { v18.s }[2], [x24], #0x4\n"
"tbz x8, #0, 117f\n"
- "ld1 { v10.h }[6], [x16]\n"
+ "ld1 { v10.h }[6], [x15]\n"
"ld1 { v14.h }[6], [x25]\n"
"ld1 { v18.h }[6], [x24]\n"
"b 117f\n"
"106:" // Height 3: Partial accumulate: partial_1_20
- "mov x20, #0x28\n"
+ "mov x19, #0x28\n"
"tbz x8, #0, 117f\n"
- "ld1 { v10.h }[4], [x16]\n"
+ "ld1 { v10.h }[4], [x15]\n"
"ld1 { v14.h }[4], [x25]\n"
"ld1 { v18.h }[4], [x24]\n"
"b 117f\n"
"107:" // Height 3: Partial accumulate: partial_2_16
"tbz x8, #1, 108f\n"
- "ldr s10, [x16], #0x4\n"
- "mov x20, #0x24\n"
+ "ldr s10, [x15], #0x4\n"
"ldr s14, [x25], #0x4\n"
+ "mov x19, #0x24\n"
"ldr s18, [x24], #0x4\n"
"tbz x8, #0, 117f\n"
- "ld1 { v10.h }[2], [x16]\n"
+ "ld1 { v10.h }[2], [x15]\n"
"ld1 { v14.h }[2], [x25]\n"
"ld1 { v18.h }[2], [x24]\n"
"b 117f\n"
"108:" // Height 3: Partial accumulate: partial_1_16
- "mov x20, #0x20\n"
+ "mov x19, #0x20\n"
"tbz x8, #0, 117f\n"
- "ldr h10, [x16, #0x0]\n"
+ "ldr h10, [x15, #0x0]\n"
"ldr h14, [x25, #0x0]\n"
"ldr h18, [x24, #0x0]\n"
"b 117f\n"
"109:" // Height 3: Partial accumulate: partial_8_0
"tbz x8, #3, 113f\n"
- "ld1 { v8.8h }, [x16], #0x10\n"
+ "ld1 { v8.8h }, [x15], #0x10\n"
"ld1 { v12.8h }, [x25], #0x10\n"
"ld1 { v16.8h }, [x24], #0x10\n"
"tbz x8, #2, 111f\n"
- "ldr d9, [x16], #0x8\n"
+ "ldr d9, [x15], #0x8\n"
"ldr d13, [x25], #0x8\n"
"ldr d17, [x24], #0x8\n"
"tbz x8, #1, 110f\n"
- "ld1 { v9.s }[2], [x16], #0x4\n"
- "mov x20, #0x1c\n"
+ "ld1 { v9.s }[2], [x15], #0x4\n"
+ "mov x19, #0x1c\n"
"ld1 { v13.s }[2], [x25], #0x4\n"
"ld1 { v17.s }[2], [x24], #0x4\n"
"tbz x8, #0, 117f\n"
- "ld1 { v9.h }[6], [x16]\n"
+ "ld1 { v9.h }[6], [x15]\n"
"ld1 { v13.h }[6], [x25]\n"
"ld1 { v17.h }[6], [x24]\n"
"b 117f\n"
"110:" // Height 3: Partial accumulate: partial_1_12
- "mov x20, #0x18\n"
+ "mov x19, #0x18\n"
"tbz x8, #0, 117f\n"
- "ld1 { v9.h }[4], [x16]\n"
+ "ld1 { v9.h }[4], [x15]\n"
"ld1 { v13.h }[4], [x25]\n"
"ld1 { v17.h }[4], [x24]\n"
"b 117f\n"
"111:" // Height 3: Partial accumulate: partial_2_8
"tbz x8, #1, 112f\n"
- "ldr s9, [x16], #0x4\n"
- "mov x20, #0x14\n"
+ "ldr s9, [x15], #0x4\n"
"ldr s13, [x25], #0x4\n"
+ "mov x19, #0x14\n"
"ldr s17, [x24], #0x4\n"
"tbz x8, #0, 117f\n"
- "ld1 { v9.h }[2], [x16]\n"
+ "ld1 { v9.h }[2], [x15]\n"
"ld1 { v13.h }[2], [x25]\n"
"ld1 { v17.h }[2], [x24]\n"
"b 117f\n"
"112:" // Height 3: Partial accumulate: partial_1_8
- "mov x20, #0x10\n"
+ "mov x19, #0x10\n"
"tbz x8, #0, 117f\n"
- "ldr h9, [x16, #0x0]\n"
+ "ldr h9, [x15, #0x0]\n"
"ldr h13, [x25, #0x0]\n"
"ldr h17, [x24, #0x0]\n"
"b 117f\n"
"113:" // Height 3: Partial accumulate: partial_4_0
"tbz x8, #2, 115f\n"
- "ldr d8, [x16], #0x8\n"
+ "ldr d8, [x15], #0x8\n"
"ldr d12, [x25], #0x8\n"
"ldr d16, [x24], #0x8\n"
"tbz x8, #1, 114f\n"
- "ld1 { v8.s }[2], [x16], #0x4\n"
- "mov x20, #0xc\n"
+ "ld1 { v8.s }[2], [x15], #0x4\n"
+ "mov x19, #0xc\n"
"ld1 { v12.s }[2], [x25], #0x4\n"
"ld1 { v16.s }[2], [x24], #0x4\n"
"tbz x8, #0, 117f\n"
- "ld1 { v8.h }[6], [x16]\n"
+ "ld1 { v8.h }[6], [x15]\n"
"ld1 { v12.h }[6], [x25]\n"
"ld1 { v16.h }[6], [x24]\n"
"b 117f\n"
"114:" // Height 3: Partial accumulate: partial_1_4
- "mov x20, #0x8\n"
+ "mov x19, #0x8\n"
"tbz x8, #0, 117f\n"
- "ld1 { v8.h }[4], [x16]\n"
+ "ld1 { v8.h }[4], [x15]\n"
"ld1 { v12.h }[4], [x25]\n"
"ld1 { v16.h }[4], [x24]\n"
"b 117f\n"
"115:" // Height 3: Partial accumulate: partial_2_0
"tbz x8, #1, 116f\n"
- "ldr s8, [x16], #0x4\n"
- "mov x20, #0x4\n"
+ "ldr s8, [x15], #0x4\n"
"ldr s12, [x25], #0x4\n"
+ "mov x19, #0x4\n"
"ldr s16, [x24], #0x4\n"
"tbz x8, #0, 117f\n"
- "ld1 { v8.h }[2], [x16]\n"
+ "ld1 { v8.h }[2], [x15]\n"
"ld1 { v12.h }[2], [x25]\n"
"ld1 { v16.h }[2], [x24]\n"
"b 117f\n"
"116:" // Height 3: Partial accumulate: partial_1_0
- "ldr h8, [x16, #0x0]\n"
- "mov x20, #0x0\n"
+ "ldr h8, [x15, #0x0]\n"
+ "mov x19, #0x0\n"
"ldr h12, [x25, #0x0]\n"
"ldr h16, [x24, #0x0]\n"
"117:" // Height 3: Partial accumulate: Done
- "sub x16, x16, x20\n"
+ "sub x15, x15, x19\n"
"b 120f\n"
"118:" // Height 3: full accumulate
- "ldr q8, [x16, #0x0]\n"
- "ldr q9, [x16, #0x10]\n"
- "ldr q10, [x16, #0x20]\n"
- "ldr q11, [x16, #0x30]\n"
+ "ldr q8, [x15, #0x0]\n"
+ "ldr q9, [x15, #0x10]\n"
+ "ldr q10, [x15, #0x20]\n"
+ "ldr q11, [x15, #0x30]\n"
"ldr q12, [x25, #0x0]\n"
"ldr q13, [x25, #0x10]\n"
"ldr q14, [x25, #0x20]\n"
@@ -1522,271 +1522,271 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"movi v18.16b, #0x0\n"
"movi v19.16b, #0x0\n"
"120:" // Height 3: setup done
- "mov x15, #0x0\n"
+ "mov x14, #0x0\n"
"121:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w14, [x20, x15, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w13, [x20, x14, LSL #0x2]\n"
"tbz %x[flags], #3, 122f\n"
- "ldr x21, [%x[input_ptr], x15, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x13, [x21, #0x0]\n"
- "ldr x9, [x21, #0x8]\n"
- "ldr x27, [x21, #0x10]\n"
- "cbnz x15, 123f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x13, x13, x20, LSL #1\n"
- "add x9, x9, x20, LSL #1\n"
- "add x27, x27, x20, LSL #1\n"
+ "ldr x20, [%x[input_ptr], x14, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x12, [x20, #0x0]\n"
+ "ldr x28, [x20, #0x8]\n"
+ "ldr x26, [x20, #0x10]\n"
+ "cbnz x14, 123f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x12, x12, x19, LSL #1\n"
+ "add x28, x28, x19, LSL #1\n"
+ "add x26, x26, x19, LSL #1\n"
"b 123f\n"
"122:" // Height 3: setup direct input
- "mov x13, %x[input_ptr]\n"
- "add x9, x13, x20, LSL #1\n"
- "add x27, x9, x20, LSL #1\n"
+ "mov x12, %x[input_ptr]\n"
+ "add x28, x12, x19, LSL #1\n"
+ "add x26, x28, x19, LSL #1\n"
"123:" // Height 3: input setup done
- "cmp x14, #0x8\n"
+ "cmp x13, #0x8\n"
"blt 126f\n"
- "ldr q0, [x13, #0x0]\n"
- "cmp x14, #0x10\n"
- "ldr q1, [x9, #0x0]\n"
- "ldr q2, [x27, #0x0]\n"
+ "ldr q0, [x12, #0x0]\n"
+ "ldr q1, [x28, #0x0]\n"
+ "cmp x13, #0x10\n"
+ "ldr q2, [x26, #0x0]\n"
"ldr q6, [x17, #0x0]\n"
- "ldr q7, [x17, #0x10]\n"
"blt 125f\n"
"124:" // Height 3: Multiply loop: Main loop head
"fmla v8.8h, v6.8h, v0.h[0]\n"
- "ldr x12, [x17, #0x28]\n"
+ "ldr d7, [x17, #0x10]\n"
"fmla v12.8h, v6.8h, v1.h[0]\n"
- "ldr x11, [x17, #0x38]\n"
+ "ldr x11, [x17, #0x18]\n"
"fmla v16.8h, v6.8h, v2.h[0]\n"
"ldr d6, [x17, #0x20]\n"
+ "ldr x10, [x17, #0x28]\n"
+ "add x12, x12, #0x10\n"
+ "mov v7.d[1], x11\n"
+ "prfm pldl1keep, [x12, #0x80]\n"
+ "ldr x11, [x17, #0x38]\n"
+ "add x28, x28, #0x10\n"
"fmla v9.8h, v7.8h, v0.h[0]\n"
- "mov v6.d[1], x12\n"
+ "mov v6.d[1], x10\n"
"fmla v13.8h, v7.8h, v1.h[0]\n"
- "ldr x12, [x17, #0x48]\n"
+ "prfm pldl1keep, [x28, #0x80]\n"
"fmla v17.8h, v7.8h, v2.h[0]\n"
"ldr d7, [x17, #0x30]\n"
- "mov v7.d[1], x11\n"
"fmla v10.8h, v6.8h, v0.h[0]\n"
+ "ldr x10, [x17, #0x48]\n"
"fmla v14.8h, v6.8h, v1.h[0]\n"
- "ldr x11, [x17, #0x58]\n"
+ "ldr x9, [x12, #0x8]\n"
"fmla v18.8h, v6.8h, v2.h[0]\n"
+ "mov v7.d[1], x11\n"
"ldr d6, [x17, #0x40]\n"
+ "add x26, x26, #0x10\n"
"fmla v11.8h, v7.8h, v0.h[0]\n"
- "mov v6.d[1], x12\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
"fmla v15.8h, v7.8h, v1.h[0]\n"
- "ldr x12, [x17, #0x68]\n"
+ "ldr x11, [x17, #0x58]\n"
"fmla v19.8h, v7.8h, v2.h[0]\n"
+ "mov v6.d[1], x10\n"
"ldr d7, [x17, #0x50]\n"
- "mov v7.d[1], x11\n"
+ "sub x13, x13, #0x8\n"
"fmla v8.8h, v6.8h, v0.h[1]\n"
+ "ldr x10, [x17, #0x68]\n"
"fmla v12.8h, v6.8h, v1.h[1]\n"
- "ldr x11, [x17, #0x78]\n"
+ "ldr x27, [x28, #0x8]\n"
"fmla v16.8h, v6.8h, v2.h[1]\n"
+ "mov v7.d[1], x11\n"
"ldr d6, [x17, #0x60]\n"
+ "cmp x13, #0x10\n"
"fmla v9.8h, v7.8h, v0.h[1]\n"
- "mov v6.d[1], x12\n"
+ "ldr x11, [x17, #0x78]\n"
"fmla v13.8h, v7.8h, v1.h[1]\n"
- "ldr x12, [x17, #0x88]\n"
+ "ldr x25, [x26, #0x8]\n"
"fmla v17.8h, v7.8h, v2.h[1]\n"
+ "mov v6.d[1], x10\n"
"ldr d7, [x17, #0x70]\n"
- "mov v7.d[1], x11\n"
"fmla v10.8h, v6.8h, v0.h[1]\n"
+ "ldr x10, [x17, #0x88]\n"
"fmla v14.8h, v6.8h, v1.h[1]\n"
- "ldr x11, [x17, #0x98]\n"
"fmla v18.8h, v6.8h, v2.h[1]\n"
+ "mov v7.d[1], x11\n"
"ldr d6, [x17, #0x80]\n"
"fmla v11.8h, v7.8h, v0.h[1]\n"
- "mov v6.d[1], x12\n"
+ "ldr x11, [x17, #0x98]\n"
"fmla v15.8h, v7.8h, v1.h[1]\n"
- "ldr x12, [x17, #0xa8]\n"
"fmla v19.8h, v7.8h, v2.h[1]\n"
+ "mov v6.d[1], x10\n"
"ldr d7, [x17, #0x90]\n"
- "mov v7.d[1], x11\n"
"fmla v8.8h, v6.8h, v0.h[2]\n"
+ "ldr x10, [x17, #0xa8]\n"
"fmla v12.8h, v6.8h, v1.h[2]\n"
- "ldr x11, [x17, #0xb8]\n"
"fmla v16.8h, v6.8h, v2.h[2]\n"
+ "mov v7.d[1], x11\n"
"ldr d6, [x17, #0xa0]\n"
"fmla v9.8h, v7.8h, v0.h[2]\n"
- "mov v6.d[1], x12\n"
+ "ldr x11, [x17, #0xb8]\n"
"fmla v13.8h, v7.8h, v1.h[2]\n"
- "ldr x12, [x17, #0xc8]\n"
"fmla v17.8h, v7.8h, v2.h[2]\n"
+ "mov v6.d[1], x10\n"
"ldr d7, [x17, #0xb0]\n"
- "mov v7.d[1], x11\n"
"fmla v10.8h, v6.8h, v0.h[2]\n"
+ "ldr x10, [x17, #0xc8]\n"
"fmla v14.8h, v6.8h, v1.h[2]\n"
- "ldr x11, [x17, #0xd8]\n"
"fmla v18.8h, v6.8h, v2.h[2]\n"
+ "mov v7.d[1], x11\n"
"ldr d6, [x17, #0xc0]\n"
"fmla v11.8h, v7.8h, v0.h[2]\n"
- "mov v6.d[1], x12\n"
+ "ldr x11, [x17, #0xd8]\n"
"fmla v15.8h, v7.8h, v1.h[2]\n"
- "ldr x12, [x17, #0xe8]\n"
"fmla v19.8h, v7.8h, v2.h[2]\n"
+ "mov v6.d[1], x10\n"
"ldr d7, [x17, #0xd0]\n"
- "mov v7.d[1], x11\n"
"fmla v8.8h, v6.8h, v0.h[3]\n"
+ "ldr x10, [x17, #0xe8]\n"
"fmla v12.8h, v6.8h, v1.h[3]\n"
- "ldr x11, [x17, #0xf8]\n"
"fmla v16.8h, v6.8h, v2.h[3]\n"
+ "mov v7.d[1], x11\n"
"ldr d6, [x17, #0xe0]\n"
"fmla v9.8h, v7.8h, v0.h[3]\n"
- "mov v6.d[1], x12\n"
+ "ldr x11, [x17, #0xf8]\n"
"fmla v13.8h, v7.8h, v1.h[3]\n"
- "ldr x12, [x17, #0x108]\n"
"fmla v17.8h, v7.8h, v2.h[3]\n"
+ "mov v6.d[1], x10\n"
"ldr d7, [x17, #0xf0]\n"
- "mov v7.d[1], x11\n"
"fmla v10.8h, v6.8h, v0.h[3]\n"
+ "ldr x10, [x17, #0x108]\n"
"fmla v14.8h, v6.8h, v1.h[3]\n"
- "ldr x11, [x17, #0x118]\n"
"fmla v18.8h, v6.8h, v2.h[3]\n"
+ "mov v7.d[1], x11\n"
"ldr d6, [x17, #0x100]\n"
"fmla v11.8h, v7.8h, v0.h[3]\n"
- "mov v6.d[1], x12\n"
+ "ldr x11, [x17, #0x118]\n"
"fmla v15.8h, v7.8h, v1.h[3]\n"
- "ldr x12, [x17, #0x128]\n"
"fmla v19.8h, v7.8h, v2.h[3]\n"
+ "mov v6.d[1], x10\n"
"ldr d7, [x17, #0x110]\n"
- "mov v7.d[1], x11\n"
"fmla v8.8h, v6.8h, v0.h[4]\n"
+ "ldr x10, [x17, #0x128]\n"
"fmla v12.8h, v6.8h, v1.h[4]\n"
- "ldr x11, [x17, #0x138]\n"
"fmla v16.8h, v6.8h, v2.h[4]\n"
+ "mov v7.d[1], x11\n"
"ldr d6, [x17, #0x120]\n"
"fmla v9.8h, v7.8h, v0.h[4]\n"
- "mov v6.d[1], x12\n"
+ "ldr x11, [x17, #0x138]\n"
"fmla v13.8h, v7.8h, v1.h[4]\n"
- "ldr x12, [x17, #0x148]\n"
"fmla v17.8h, v7.8h, v2.h[4]\n"
+ "mov v6.d[1], x10\n"
"ldr d7, [x17, #0x130]\n"
- "mov v7.d[1], x11\n"
"fmla v10.8h, v6.8h, v0.h[4]\n"
+ "ldr x10, [x17, #0x148]\n"
"fmla v14.8h, v6.8h, v1.h[4]\n"
- "ldr x11, [x17, #0x158]\n"
"fmla v18.8h, v6.8h, v2.h[4]\n"
+ "mov v7.d[1], x11\n"
"ldr d6, [x17, #0x140]\n"
"fmla v11.8h, v7.8h, v0.h[4]\n"
- "mov v6.d[1], x12\n"
+ "ldr x11, [x17, #0x158]\n"
"fmla v15.8h, v7.8h, v1.h[4]\n"
- "ldr x12, [x17, #0x168]\n"
"fmla v19.8h, v7.8h, v2.h[4]\n"
+ "mov v6.d[1], x10\n"
"ldr d7, [x17, #0x150]\n"
- "mov v7.d[1], x11\n"
"fmla v8.8h, v6.8h, v0.h[5]\n"
+ "ldr x10, [x17, #0x168]\n"
"fmla v12.8h, v6.8h, v1.h[5]\n"
- "ldr x11, [x17, #0x178]\n"
"fmla v16.8h, v6.8h, v2.h[5]\n"
+ "mov v7.d[1], x11\n"
"ldr d6, [x17, #0x160]\n"
"fmla v9.8h, v7.8h, v0.h[5]\n"
- "mov v6.d[1], x12\n"
+ "ldr x11, [x17, #0x178]\n"
"fmla v13.8h, v7.8h, v1.h[5]\n"
- "ldr x12, [x17, #0x188]\n"
"fmla v17.8h, v7.8h, v2.h[5]\n"
+ "mov v6.d[1], x10\n"
"ldr d7, [x17, #0x170]\n"
- "mov v7.d[1], x11\n"
"fmla v10.8h, v6.8h, v0.h[5]\n"
+ "ldr x10, [x17, #0x188]\n"
"fmla v14.8h, v6.8h, v1.h[5]\n"
- "ldr x11, [x17, #0x198]\n"
"fmla v18.8h, v6.8h, v2.h[5]\n"
+ "mov v7.d[1], x11\n"
"ldr d6, [x17, #0x180]\n"
"fmla v11.8h, v7.8h, v0.h[5]\n"
- "mov v6.d[1], x12\n"
+ "ldr x11, [x17, #0x198]\n"
"fmla v15.8h, v7.8h, v1.h[5]\n"
- "ldr x12, [x17, #0x1a8]\n"
"fmla v19.8h, v7.8h, v2.h[5]\n"
+ "mov v6.d[1], x10\n"
"ldr d7, [x17, #0x190]\n"
- "mov v7.d[1], x11\n"
"fmla v8.8h, v6.8h, v0.h[6]\n"
+ "ldr x10, [x17, #0x1a8]\n"
"fmla v12.8h, v6.8h, v1.h[6]\n"
- "ldr x11, [x17, #0x1b8]\n"
"fmla v16.8h, v6.8h, v2.h[6]\n"
+ "mov v7.d[1], x11\n"
"ldr d6, [x17, #0x1a0]\n"
"fmla v9.8h, v7.8h, v0.h[6]\n"
- "mov v6.d[1], x12\n"
+ "ldr x11, [x17, #0x1b8]\n"
"fmla v13.8h, v7.8h, v1.h[6]\n"
- "ldr x12, [x17, #0x1c8]\n"
"fmla v17.8h, v7.8h, v2.h[6]\n"
+ "mov v6.d[1], x10\n"
"ldr d7, [x17, #0x1b0]\n"
- "mov v7.d[1], x11\n"
"fmla v10.8h, v6.8h, v0.h[6]\n"
+ "ldr x10, [x17, #0x1c8]\n"
"fmla v14.8h, v6.8h, v1.h[6]\n"
- "ldr x11, [x17, #0x1d8]\n"
"fmla v18.8h, v6.8h, v2.h[6]\n"
+ "mov v7.d[1], x11\n"
"ldr d6, [x17, #0x1c0]\n"
"fmla v11.8h, v7.8h, v0.h[6]\n"
- "mov v6.d[1], x12\n"
+ "ldr x11, [x17, #0x1d8]\n"
"fmla v15.8h, v7.8h, v1.h[6]\n"
- "ldr x12, [x17, #0x1e8]\n"
"fmla v19.8h, v7.8h, v2.h[6]\n"
+ "mov v6.d[1], x10\n"
"ldr d7, [x17, #0x1d0]\n"
- "mov v7.d[1], x11\n"
"fmla v8.8h, v6.8h, v0.h[7]\n"
+ "ldr x10, [x17, #0x1e8]\n"
"fmla v12.8h, v6.8h, v1.h[7]\n"
- "ldr x11, [x17, #0x1f8]\n"
"fmla v16.8h, v6.8h, v2.h[7]\n"
+ "mov v7.d[1], x11\n"
"ldr d6, [x17, #0x1e0]\n"
"fmla v9.8h, v7.8h, v0.h[7]\n"
- "mov v6.d[1], x12\n"
+ "ldr x11, [x17, #0x1f8]\n"
"fmla v13.8h, v7.8h, v1.h[7]\n"
- "add x13, x13, #0x10\n"
"fmla v17.8h, v7.8h, v2.h[7]\n"
+ "mov v6.d[1], x10\n"
"ldr d7, [x17, #0x1f0]\n"
- "mov v7.d[1], x11\n"
- "add x9, x9, #0x10\n"
- "add x27, x27, #0x10\n"
"add x17, x17, #0x200\n"
"fmla v10.8h, v6.8h, v0.h[7]\n"
- "ldr x12, [x17, #0x8]\n"
+ "ldr x10, [x17, #0x8]\n"
"fmla v14.8h, v6.8h, v1.h[7]\n"
- "ldr x10, [x13, #0x8]\n"
"fmla v18.8h, v6.8h, v2.h[7]\n"
+ "mov v7.d[1], x11\n"
"ldr d6, [x17, #0x0]\n"
"fmla v11.8h, v7.8h, v0.h[7]\n"
- "ldr d0, [x13, #0x0]\n"
+ "ldr d0, [x12, #0x0]\n"
"fmla v15.8h, v7.8h, v1.h[7]\n"
- "ldr d1, [x9, #0x0]\n"
- "ldr x28, [x9, #0x8]\n"
+ "ldr d1, [x28, #0x0]\n"
"fmla v19.8h, v7.8h, v2.h[7]\n"
- "ldr d2, [x27, #0x0]\n"
- "sub x14, x14, #0x8\n"
- "ldr d7, [x17, #0x10]\n"
- "cmp x14, #0x10\n"
- "ldr x26, [x27, #0x8]\n"
- "mov v6.d[1], x12\n"
- "ldr x11, [x17, #0x18]\n"
- "mov v0.d[1], x10\n"
- "prfm pldl1keep, [x13, #0x80]\n"
- "mov v1.d[1], x28\n"
- "prfm pldl1keep, [x9, #0x80]\n"
- "mov v2.d[1], x26\n"
- "prfm pldl1keep, [x27, #0x80]\n"
- "mov v7.d[1], x11\n"
+ "mov v6.d[1], x10\n"
+ "mov v0.d[1], x9\n"
+ "ldr d2, [x26, #0x0]\n"
+ "mov v1.d[1], x27\n"
+ "mov v2.d[1], x25\n"
"bge 124b\n"
"125:" // Height 3: Multiply loop: Single iteration only
"fmla v8.8h, v6.8h, v0.h[0]\n"
- "add x13, x13, #0x10\n"
+ "ldr q7, [x17, #0x10]\n"
"fmla v12.8h, v6.8h, v1.h[0]\n"
- "add x9, x9, #0x10\n"
+ "sub x13, x13, #0x8\n"
"fmla v16.8h, v6.8h, v2.h[0]\n"
"ldr q6, [x17, #0x20]\n"
"fmla v9.8h, v7.8h, v0.h[0]\n"
- "add x27, x27, #0x10\n"
+ "add x12, x12, #0x10\n"
"fmla v13.8h, v7.8h, v1.h[0]\n"
- "sub x14, x14, #0x8\n"
+ "prfm pldl1keep, [x12, #0x80]\n"
"fmla v17.8h, v7.8h, v2.h[0]\n"
"ldr q7, [x17, #0x30]\n"
"fmla v10.8h, v6.8h, v0.h[0]\n"
- "prfm pldl1keep, [x13, #0x80]\n"
+ "add x28, x28, #0x10\n"
"fmla v14.8h, v6.8h, v1.h[0]\n"
- "prfm pldl1keep, [x9, #0x80]\n"
+ "prfm pldl1keep, [x28, #0x80]\n"
"fmla v18.8h, v6.8h, v2.h[0]\n"
"ldr q6, [x17, #0x40]\n"
"fmla v11.8h, v7.8h, v0.h[0]\n"
- "prfm pldl1keep, [x27, #0x80]\n"
+ "add x26, x26, #0x10\n"
"fmla v15.8h, v7.8h, v1.h[0]\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
"fmla v19.8h, v7.8h, v2.h[0]\n"
"ldr q7, [x17, #0x50]\n"
"fmla v8.8h, v6.8h, v0.h[1]\n"
@@ -1901,15 +1901,15 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"fmla v15.8h, v7.8h, v1.h[7]\n"
"fmla v19.8h, v7.8h, v2.h[7]\n"
"126:" // Height 3: Multiply loop: Main loop skip
- "cbz x14, 128f\n"
+ "cbz x13, 128f\n"
"127:" // Height 3: Multiply loop: Odd block loop
- "ldr h0, [x13], #0x2\n"
- "sub x14, x14, #0x1\n"
- "ldr h1, [x9], #0x2\n"
- "ldr h2, [x27], #0x2\n"
+ "ldr h0, [x12], #0x2\n"
+ "sub x13, x13, #0x1\n"
+ "ldr h1, [x28], #0x2\n"
+ "ldr h2, [x26], #0x2\n"
"ldr q6, [x17, #0x0]\n"
- "fmla v8.8h, v6.8h, v0.h[0]\n"
"ldr q7, [x17, #0x10]\n"
+ "fmla v8.8h, v6.8h, v0.h[0]\n"
"fmla v12.8h, v6.8h, v1.h[0]\n"
"fmla v16.8h, v6.8h, v2.h[0]\n"
"ldr q6, [x17, #0x20]\n"
@@ -1924,21 +1924,23 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"fmla v11.8h, v7.8h, v0.h[0]\n"
"fmla v15.8h, v7.8h, v1.h[0]\n"
"fmla v19.8h, v7.8h, v2.h[0]\n"
- "cbnz x14, 127b\n"
+ "cbnz x13, 127b\n"
"128:" // Height 3: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x15, x15, #0x1\n"
- "cmp x15, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x14, x14, #0x1\n"
+ "cmp x14, x19\n"
"bne 121b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x16, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
- "prfm pstl1keep, [x16, #0x0]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "prfm pstl1keep, [x15, #0x0]\n"
+ "add x25, x15, x19, LSL #1\n"
"prfm pstl1keep, [x25, #0x0]\n"
+ "add x24, x25, x19, LSL #1\n"
"prfm pstl1keep, [x24, #0x0]\n"
"tbz %x[flags], #1, 129f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v0.8h }, [x20]\n"
+ "add x20, %x[args_ptr], %[offset_min]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v1.8h }, [x20]\n"
+ "ld1r { v0.8h }, [x19]\n"
"fmin v8.8h, v8.8h, v0.8h\n"
"fmin v9.8h, v9.8h, v0.8h\n"
"fmin v10.8h, v10.8h, v0.8h\n"
@@ -1949,189 +1951,187 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"fmin v15.8h, v15.8h, v0.8h\n"
"fmin v16.8h, v16.8h, v0.8h\n"
"fmin v17.8h, v17.8h, v0.8h\n"
+ "fmax v8.8h, v8.8h, v1.8h\n"
+ "fmax v9.8h, v9.8h, v1.8h\n"
+ "fmax v10.8h, v10.8h, v1.8h\n"
+ "fmax v11.8h, v11.8h, v1.8h\n"
+ "fmax v12.8h, v12.8h, v1.8h\n"
+ "fmax v13.8h, v13.8h, v1.8h\n"
+ "fmax v14.8h, v14.8h, v1.8h\n"
+ "fmax v15.8h, v15.8h, v1.8h\n"
+ "fmax v16.8h, v16.8h, v1.8h\n"
+ "fmax v17.8h, v17.8h, v1.8h\n"
"fmin v18.8h, v18.8h, v0.8h\n"
"fmin v19.8h, v19.8h, v0.8h\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1r { v0.8h }, [x20]\n"
- "fmax v8.8h, v8.8h, v0.8h\n"
- "fmax v9.8h, v9.8h, v0.8h\n"
- "fmax v10.8h, v10.8h, v0.8h\n"
- "fmax v11.8h, v11.8h, v0.8h\n"
- "fmax v12.8h, v12.8h, v0.8h\n"
- "fmax v13.8h, v13.8h, v0.8h\n"
- "fmax v14.8h, v14.8h, v0.8h\n"
- "fmax v15.8h, v15.8h, v0.8h\n"
- "fmax v16.8h, v16.8h, v0.8h\n"
- "fmax v17.8h, v17.8h, v0.8h\n"
- "fmax v18.8h, v18.8h, v0.8h\n"
- "fmax v19.8h, v19.8h, v0.8h\n"
+ "fmax v18.8h, v18.8h, v1.8h\n"
+ "fmax v19.8h, v19.8h, v1.8h\n"
"129:" // Height 3: No activation
"cmp x8, #0x20\n"
"bge 146f\n"
"tbz x8, #4, 137f\n"
- "st1 { v8.8h }, [x16], #0x10\n"
- "st1 { v9.8h }, [x16], #0x10\n"
+ "st1 { v8.8h }, [x15], #0x10\n"
+ "st1 { v9.8h }, [x15], #0x10\n"
"st1 { v12.8h }, [x25], #0x10\n"
"st1 { v13.8h }, [x25], #0x10\n"
"st1 { v16.8h }, [x24], #0x10\n"
"st1 { v17.8h }, [x24], #0x10\n"
"tbz x8, #3, 133f\n"
- "st1 { v10.8h }, [x16], #0x10\n"
+ "st1 { v10.8h }, [x15], #0x10\n"
"st1 { v14.8h }, [x25], #0x10\n"
"st1 { v18.8h }, [x24], #0x10\n"
"tbz x8, #2, 131f\n"
- "str d11, [x16], #0x8\n"
+ "str d11, [x15], #0x8\n"
"str d15, [x25], #0x8\n"
"str d19, [x24], #0x8\n"
"tbz x8, #1, 130f\n"
- "st1 { v11.s }[2], [x16], #0x4\n"
+ "st1 { v11.s }[2], [x15], #0x4\n"
"st1 { v15.s }[2], [x25], #0x4\n"
"st1 { v19.s }[2], [x24], #0x4\n"
"tbz x8, #0, 145f\n"
- "st1 { v11.h }[6], [x16]\n"
+ "st1 { v11.h }[6], [x15]\n"
"st1 { v15.h }[6], [x25]\n"
"st1 { v19.h }[6], [x24]\n"
"b 145f\n"
"130:" // Height 3: Partial direct writeback: partial_1_28
"tbz x8, #0, 145f\n"
- "st1 { v11.h }[4], [x16]\n"
+ "st1 { v11.h }[4], [x15]\n"
"st1 { v15.h }[4], [x25]\n"
"st1 { v19.h }[4], [x24]\n"
"b 145f\n"
"131:" // Height 3: Partial direct writeback: partial_2_24
"tbz x8, #1, 132f\n"
- "str s11, [x16], #0x4\n"
+ "str s11, [x15], #0x4\n"
"str s15, [x25], #0x4\n"
"str s19, [x24], #0x4\n"
"tbz x8, #0, 145f\n"
- "st1 { v11.h }[2], [x16]\n"
+ "st1 { v11.h }[2], [x15]\n"
"st1 { v15.h }[2], [x25]\n"
"st1 { v19.h }[2], [x24]\n"
"b 145f\n"
"132:" // Height 3: Partial direct writeback: partial_1_24
"tbz x8, #0, 145f\n"
- "str h11, [x16, #0x0]\n"
+ "str h11, [x15, #0x0]\n"
"str h15, [x25, #0x0]\n"
"str h19, [x24, #0x0]\n"
"b 145f\n"
"133:" // Height 3: Partial direct writeback: partial_4_16
"tbz x8, #2, 135f\n"
- "str d10, [x16], #0x8\n"
+ "str d10, [x15], #0x8\n"
"str d14, [x25], #0x8\n"
"str d18, [x24], #0x8\n"
"tbz x8, #1, 134f\n"
- "st1 { v10.s }[2], [x16], #0x4\n"
+ "st1 { v10.s }[2], [x15], #0x4\n"
"st1 { v14.s }[2], [x25], #0x4\n"
"st1 { v18.s }[2], [x24], #0x4\n"
"tbz x8, #0, 145f\n"
- "st1 { v10.h }[6], [x16]\n"
+ "st1 { v10.h }[6], [x15]\n"
"st1 { v14.h }[6], [x25]\n"
"st1 { v18.h }[6], [x24]\n"
"b 145f\n"
"134:" // Height 3: Partial direct writeback: partial_1_20
"tbz x8, #0, 145f\n"
- "st1 { v10.h }[4], [x16]\n"
+ "st1 { v10.h }[4], [x15]\n"
"st1 { v14.h }[4], [x25]\n"
"st1 { v18.h }[4], [x24]\n"
"b 145f\n"
"135:" // Height 3: Partial direct writeback: partial_2_16
"tbz x8, #1, 136f\n"
- "str s10, [x16], #0x4\n"
+ "str s10, [x15], #0x4\n"
"str s14, [x25], #0x4\n"
"str s18, [x24], #0x4\n"
"tbz x8, #0, 145f\n"
- "st1 { v10.h }[2], [x16]\n"
+ "st1 { v10.h }[2], [x15]\n"
"st1 { v14.h }[2], [x25]\n"
"st1 { v18.h }[2], [x24]\n"
"b 145f\n"
"136:" // Height 3: Partial direct writeback: partial_1_16
"tbz x8, #0, 145f\n"
- "str h10, [x16, #0x0]\n"
+ "str h10, [x15, #0x0]\n"
"str h14, [x25, #0x0]\n"
"str h18, [x24, #0x0]\n"
"b 145f\n"
"137:" // Height 3: Partial direct writeback: partial_8_0
"tbz x8, #3, 141f\n"
- "st1 { v8.8h }, [x16], #0x10\n"
+ "st1 { v8.8h }, [x15], #0x10\n"
"st1 { v12.8h }, [x25], #0x10\n"
"st1 { v16.8h }, [x24], #0x10\n"
"tbz x8, #2, 139f\n"
- "str d9, [x16], #0x8\n"
+ "str d9, [x15], #0x8\n"
"str d13, [x25], #0x8\n"
"str d17, [x24], #0x8\n"
"tbz x8, #1, 138f\n"
- "st1 { v9.s }[2], [x16], #0x4\n"
+ "st1 { v9.s }[2], [x15], #0x4\n"
"st1 { v13.s }[2], [x25], #0x4\n"
"st1 { v17.s }[2], [x24], #0x4\n"
"tbz x8, #0, 145f\n"
- "st1 { v9.h }[6], [x16]\n"
+ "st1 { v9.h }[6], [x15]\n"
"st1 { v13.h }[6], [x25]\n"
"st1 { v17.h }[6], [x24]\n"
"b 145f\n"
"138:" // Height 3: Partial direct writeback: partial_1_12
"tbz x8, #0, 145f\n"
- "st1 { v9.h }[4], [x16]\n"
+ "st1 { v9.h }[4], [x15]\n"
"st1 { v13.h }[4], [x25]\n"
"st1 { v17.h }[4], [x24]\n"
"b 145f\n"
"139:" // Height 3: Partial direct writeback: partial_2_8
"tbz x8, #1, 140f\n"
- "str s9, [x16], #0x4\n"
+ "str s9, [x15], #0x4\n"
"str s13, [x25], #0x4\n"
"str s17, [x24], #0x4\n"
"tbz x8, #0, 145f\n"
- "st1 { v9.h }[2], [x16]\n"
+ "st1 { v9.h }[2], [x15]\n"
"st1 { v13.h }[2], [x25]\n"
"st1 { v17.h }[2], [x24]\n"
"b 145f\n"
"140:" // Height 3: Partial direct writeback: partial_1_8
"tbz x8, #0, 145f\n"
- "str h9, [x16, #0x0]\n"
+ "str h9, [x15, #0x0]\n"
"str h13, [x25, #0x0]\n"
"str h17, [x24, #0x0]\n"
"b 145f\n"
"141:" // Height 3: Partial direct writeback: partial_4_0
"tbz x8, #2, 143f\n"
- "str d8, [x16], #0x8\n"
+ "str d8, [x15], #0x8\n"
"str d12, [x25], #0x8\n"
"str d16, [x24], #0x8\n"
"tbz x8, #1, 142f\n"
- "st1 { v8.s }[2], [x16], #0x4\n"
+ "st1 { v8.s }[2], [x15], #0x4\n"
"st1 { v12.s }[2], [x25], #0x4\n"
"st1 { v16.s }[2], [x24], #0x4\n"
"tbz x8, #0, 145f\n"
- "st1 { v8.h }[6], [x16]\n"
+ "st1 { v8.h }[6], [x15]\n"
"st1 { v12.h }[6], [x25]\n"
"st1 { v16.h }[6], [x24]\n"
"b 145f\n"
"142:" // Height 3: Partial direct writeback: partial_1_4
"tbz x8, #0, 145f\n"
- "st1 { v8.h }[4], [x16]\n"
+ "st1 { v8.h }[4], [x15]\n"
"st1 { v12.h }[4], [x25]\n"
"st1 { v16.h }[4], [x24]\n"
"b 145f\n"
"143:" // Height 3: Partial direct writeback: partial_2_0
"tbz x8, #1, 144f\n"
- "str s8, [x16], #0x4\n"
+ "str s8, [x15], #0x4\n"
"str s12, [x25], #0x4\n"
"str s16, [x24], #0x4\n"
"tbz x8, #0, 145f\n"
- "st1 { v8.h }[2], [x16]\n"
+ "st1 { v8.h }[2], [x15]\n"
"st1 { v12.h }[2], [x25]\n"
"st1 { v16.h }[2], [x24]\n"
"b 145f\n"
"144:" // Height 3: Partial direct writeback: partial_1_0
- "str h8, [x16, #0x0]\n"
+ "str h8, [x15, #0x0]\n"
"str h12, [x25, #0x0]\n"
"str h16, [x24, #0x0]\n"
"145:" // Height 3: Partial direct writeback: Done
"b 147f\n"
"146:" // Height 3: Full writeback
- "str q8, [x16, #0x0]\n"
- "str q9, [x16, #0x10]\n"
- "str q10, [x16, #0x20]\n"
- "str q11, [x16, #0x30]\n"
- "add x16, x16, #0x40\n"
+ "str q8, [x15, #0x0]\n"
+ "str q9, [x15, #0x10]\n"
+ "str q10, [x15, #0x20]\n"
+ "str q11, [x15, #0x30]\n"
+ "add x15, x15, #0x40\n"
"str q12, [x25, #0x0]\n"
"str q13, [x25, #0x10]\n"
"str q14, [x25, #0x20]\n"
@@ -2145,22 +2145,22 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"bgt 100b\n"
"b 296f\n"
"148:" // Height 4
- "mov x7, %x[bias]\n"
"ldr x8, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x16, %x[bias]\n"
"ldr x17, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x16, %x[output_ptr]\n"
+ "mov x15, %x[output_ptr]\n"
"149:" // Height 4: Column loop
- "cbz x7, 150f\n"
- "ldr q8, [x7, #0x0]\n"
+ "cbz x16, 150f\n"
+ "ldr q8, [x16, #0x0]\n"
+ "ldr q9, [x16, #0x10]\n"
+ "ldr q10, [x16, #0x20]\n"
"mov v12.16b, v8.16b\n"
- "ldr q9, [x7, #0x10]\n"
+ "ldr q11, [x16, #0x30]\n"
"mov v13.16b, v9.16b\n"
- "ldr q10, [x7, #0x20]\n"
+ "add x16, x16, #0x40\n"
"mov v14.16b, v10.16b\n"
- "ldr q11, [x7, #0x30]\n"
"mov v15.16b, v11.16b\n"
"mov v16.16b, v8.16b\n"
- "add x7, x7, #0x40\n"
"mov v17.16b, v9.16b\n"
"mov v18.16b, v10.16b\n"
"mov v19.16b, v11.16b\n"
@@ -2171,224 +2171,224 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"b 169f\n"
"150:" // Height 4: no bias
"tbz %x[flags], #0, 168f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x16, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"cmp x8, #0x20\n"
- "add x23, x24, x20, LSL #1\n"
+ "add x25, x15, x19, LSL #1\n"
+ "add x24, x25, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
"bge 167f\n"
"tbz x8, #4, 158f\n"
- "ld1 { v8.8h }, [x16], #0x10\n"
+ "ld1 { v8.8h }, [x15], #0x10\n"
"ld1 { v12.8h }, [x25], #0x10\n"
"ld1 { v16.8h }, [x24], #0x10\n"
- "ld1 { v20.8h }, [x23], #0x10\n"
- "ld1 { v9.8h }, [x16], #0x10\n"
+ "ld1 { v9.8h }, [x15], #0x10\n"
"ld1 { v13.8h }, [x25], #0x10\n"
"ld1 { v17.8h }, [x24], #0x10\n"
+ "ld1 { v20.8h }, [x23], #0x10\n"
"ld1 { v21.8h }, [x23], #0x10\n"
"tbz x8, #3, 154f\n"
- "ld1 { v10.8h }, [x16], #0x10\n"
+ "ld1 { v10.8h }, [x15], #0x10\n"
"ld1 { v14.8h }, [x25], #0x10\n"
"ld1 { v18.8h }, [x24], #0x10\n"
"ld1 { v22.8h }, [x23], #0x10\n"
"tbz x8, #2, 152f\n"
- "ldr d11, [x16], #0x8\n"
+ "ldr d11, [x15], #0x8\n"
"ldr d15, [x25], #0x8\n"
"ldr d19, [x24], #0x8\n"
"ldr d23, [x23], #0x8\n"
"tbz x8, #1, 151f\n"
- "ld1 { v11.s }[2], [x16], #0x4\n"
- "mov x20, #0x3c\n"
+ "ld1 { v11.s }[2], [x15], #0x4\n"
+ "mov x19, #0x3c\n"
"ld1 { v15.s }[2], [x25], #0x4\n"
"ld1 { v19.s }[2], [x24], #0x4\n"
"ld1 { v23.s }[2], [x23], #0x4\n"
"tbz x8, #0, 166f\n"
- "ld1 { v11.h }[6], [x16]\n"
+ "ld1 { v11.h }[6], [x15]\n"
"ld1 { v15.h }[6], [x25]\n"
"ld1 { v19.h }[6], [x24]\n"
"ld1 { v23.h }[6], [x23]\n"
"b 166f\n"
"151:" // Height 4: Partial accumulate: partial_1_28
- "mov x20, #0x38\n"
+ "mov x19, #0x38\n"
"tbz x8, #0, 166f\n"
- "ld1 { v11.h }[4], [x16]\n"
+ "ld1 { v11.h }[4], [x15]\n"
"ld1 { v15.h }[4], [x25]\n"
"ld1 { v19.h }[4], [x24]\n"
"ld1 { v23.h }[4], [x23]\n"
"b 166f\n"
"152:" // Height 4: Partial accumulate: partial_2_24
"tbz x8, #1, 153f\n"
- "ldr s11, [x16], #0x4\n"
- "mov x20, #0x34\n"
+ "ldr s11, [x15], #0x4\n"
"ldr s15, [x25], #0x4\n"
+ "mov x19, #0x34\n"
"ldr s19, [x24], #0x4\n"
"ldr s23, [x23], #0x4\n"
"tbz x8, #0, 166f\n"
- "ld1 { v11.h }[2], [x16]\n"
+ "ld1 { v11.h }[2], [x15]\n"
"ld1 { v15.h }[2], [x25]\n"
"ld1 { v19.h }[2], [x24]\n"
"ld1 { v23.h }[2], [x23]\n"
"b 166f\n"
"153:" // Height 4: Partial accumulate: partial_1_24
- "mov x20, #0x30\n"
+ "mov x19, #0x30\n"
"tbz x8, #0, 166f\n"
- "ldr h11, [x16, #0x0]\n"
+ "ldr h11, [x15, #0x0]\n"
"ldr h15, [x25, #0x0]\n"
"ldr h19, [x24, #0x0]\n"
"ldr h23, [x23, #0x0]\n"
"b 166f\n"
"154:" // Height 4: Partial accumulate: partial_4_16
"tbz x8, #2, 156f\n"
- "ldr d10, [x16], #0x8\n"
+ "ldr d10, [x15], #0x8\n"
"ldr d14, [x25], #0x8\n"
"ldr d18, [x24], #0x8\n"
"ldr d22, [x23], #0x8\n"
"tbz x8, #1, 155f\n"
- "ld1 { v10.s }[2], [x16], #0x4\n"
- "mov x20, #0x2c\n"
+ "ld1 { v10.s }[2], [x15], #0x4\n"
+ "mov x19, #0x2c\n"
"ld1 { v14.s }[2], [x25], #0x4\n"
"ld1 { v18.s }[2], [x24], #0x4\n"
"ld1 { v22.s }[2], [x23], #0x4\n"
"tbz x8, #0, 166f\n"
- "ld1 { v10.h }[6], [x16]\n"
+ "ld1 { v10.h }[6], [x15]\n"
"ld1 { v14.h }[6], [x25]\n"
"ld1 { v18.h }[6], [x24]\n"
"ld1 { v22.h }[6], [x23]\n"
"b 166f\n"
"155:" // Height 4: Partial accumulate: partial_1_20
- "mov x20, #0x28\n"
+ "mov x19, #0x28\n"
"tbz x8, #0, 166f\n"
- "ld1 { v10.h }[4], [x16]\n"
+ "ld1 { v10.h }[4], [x15]\n"
"ld1 { v14.h }[4], [x25]\n"
"ld1 { v18.h }[4], [x24]\n"
"ld1 { v22.h }[4], [x23]\n"
"b 166f\n"
"156:" // Height 4: Partial accumulate: partial_2_16
"tbz x8, #1, 157f\n"
- "ldr s10, [x16], #0x4\n"
- "mov x20, #0x24\n"
+ "ldr s10, [x15], #0x4\n"
"ldr s14, [x25], #0x4\n"
+ "mov x19, #0x24\n"
"ldr s18, [x24], #0x4\n"
"ldr s22, [x23], #0x4\n"
"tbz x8, #0, 166f\n"
- "ld1 { v10.h }[2], [x16]\n"
+ "ld1 { v10.h }[2], [x15]\n"
"ld1 { v14.h }[2], [x25]\n"
"ld1 { v18.h }[2], [x24]\n"
"ld1 { v22.h }[2], [x23]\n"
"b 166f\n"
"157:" // Height 4: Partial accumulate: partial_1_16
- "mov x20, #0x20\n"
+ "mov x19, #0x20\n"
"tbz x8, #0, 166f\n"
- "ldr h10, [x16, #0x0]\n"
+ "ldr h10, [x15, #0x0]\n"
"ldr h14, [x25, #0x0]\n"
"ldr h18, [x24, #0x0]\n"
"ldr h22, [x23, #0x0]\n"
"b 166f\n"
"158:" // Height 4: Partial accumulate: partial_8_0
"tbz x8, #3, 162f\n"
- "ld1 { v8.8h }, [x16], #0x10\n"
+ "ld1 { v8.8h }, [x15], #0x10\n"
"ld1 { v12.8h }, [x25], #0x10\n"
"ld1 { v16.8h }, [x24], #0x10\n"
"ld1 { v20.8h }, [x23], #0x10\n"
"tbz x8, #2, 160f\n"
- "ldr d9, [x16], #0x8\n"
+ "ldr d9, [x15], #0x8\n"
"ldr d13, [x25], #0x8\n"
"ldr d17, [x24], #0x8\n"
"ldr d21, [x23], #0x8\n"
"tbz x8, #1, 159f\n"
- "ld1 { v9.s }[2], [x16], #0x4\n"
- "mov x20, #0x1c\n"
+ "ld1 { v9.s }[2], [x15], #0x4\n"
+ "mov x19, #0x1c\n"
"ld1 { v13.s }[2], [x25], #0x4\n"
"ld1 { v17.s }[2], [x24], #0x4\n"
"ld1 { v21.s }[2], [x23], #0x4\n"
"tbz x8, #0, 166f\n"
- "ld1 { v9.h }[6], [x16]\n"
+ "ld1 { v9.h }[6], [x15]\n"
"ld1 { v13.h }[6], [x25]\n"
"ld1 { v17.h }[6], [x24]\n"
"ld1 { v21.h }[6], [x23]\n"
"b 166f\n"
"159:" // Height 4: Partial accumulate: partial_1_12
- "mov x20, #0x18\n"
+ "mov x19, #0x18\n"
"tbz x8, #0, 166f\n"
- "ld1 { v9.h }[4], [x16]\n"
+ "ld1 { v9.h }[4], [x15]\n"
"ld1 { v13.h }[4], [x25]\n"
"ld1 { v17.h }[4], [x24]\n"
"ld1 { v21.h }[4], [x23]\n"
"b 166f\n"
"160:" // Height 4: Partial accumulate: partial_2_8
"tbz x8, #1, 161f\n"
- "ldr s9, [x16], #0x4\n"
- "mov x20, #0x14\n"
+ "ldr s9, [x15], #0x4\n"
"ldr s13, [x25], #0x4\n"
+ "mov x19, #0x14\n"
"ldr s17, [x24], #0x4\n"
"ldr s21, [x23], #0x4\n"
"tbz x8, #0, 166f\n"
- "ld1 { v9.h }[2], [x16]\n"
+ "ld1 { v9.h }[2], [x15]\n"
"ld1 { v13.h }[2], [x25]\n"
"ld1 { v17.h }[2], [x24]\n"
"ld1 { v21.h }[2], [x23]\n"
"b 166f\n"
"161:" // Height 4: Partial accumulate: partial_1_8
- "mov x20, #0x10\n"
+ "mov x19, #0x10\n"
"tbz x8, #0, 166f\n"
- "ldr h9, [x16, #0x0]\n"
+ "ldr h9, [x15, #0x0]\n"
"ldr h13, [x25, #0x0]\n"
"ldr h17, [x24, #0x0]\n"
"ldr h21, [x23, #0x0]\n"
"b 166f\n"
"162:" // Height 4: Partial accumulate: partial_4_0
"tbz x8, #2, 164f\n"
- "ldr d8, [x16], #0x8\n"
+ "ldr d8, [x15], #0x8\n"
"ldr d12, [x25], #0x8\n"
"ldr d16, [x24], #0x8\n"
"ldr d20, [x23], #0x8\n"
"tbz x8, #1, 163f\n"
- "ld1 { v8.s }[2], [x16], #0x4\n"
- "mov x20, #0xc\n"
+ "ld1 { v8.s }[2], [x15], #0x4\n"
+ "mov x19, #0xc\n"
"ld1 { v12.s }[2], [x25], #0x4\n"
"ld1 { v16.s }[2], [x24], #0x4\n"
"ld1 { v20.s }[2], [x23], #0x4\n"
"tbz x8, #0, 166f\n"
- "ld1 { v8.h }[6], [x16]\n"
+ "ld1 { v8.h }[6], [x15]\n"
"ld1 { v12.h }[6], [x25]\n"
"ld1 { v16.h }[6], [x24]\n"
"ld1 { v20.h }[6], [x23]\n"
"b 166f\n"
"163:" // Height 4: Partial accumulate: partial_1_4
- "mov x20, #0x8\n"
+ "mov x19, #0x8\n"
"tbz x8, #0, 166f\n"
- "ld1 { v8.h }[4], [x16]\n"
+ "ld1 { v8.h }[4], [x15]\n"
"ld1 { v12.h }[4], [x25]\n"
"ld1 { v16.h }[4], [x24]\n"
"ld1 { v20.h }[4], [x23]\n"
"b 166f\n"
"164:" // Height 4: Partial accumulate: partial_2_0
"tbz x8, #1, 165f\n"
- "ldr s8, [x16], #0x4\n"
- "mov x20, #0x4\n"
+ "ldr s8, [x15], #0x4\n"
"ldr s12, [x25], #0x4\n"
+ "mov x19, #0x4\n"
"ldr s16, [x24], #0x4\n"
"ldr s20, [x23], #0x4\n"
"tbz x8, #0, 166f\n"
- "ld1 { v8.h }[2], [x16]\n"
+ "ld1 { v8.h }[2], [x15]\n"
"ld1 { v12.h }[2], [x25]\n"
"ld1 { v16.h }[2], [x24]\n"
"ld1 { v20.h }[2], [x23]\n"
"b 166f\n"
"165:" // Height 4: Partial accumulate: partial_1_0
- "ldr h8, [x16, #0x0]\n"
- "mov x20, #0x0\n"
+ "ldr h8, [x15, #0x0]\n"
+ "mov x19, #0x0\n"
"ldr h12, [x25, #0x0]\n"
"ldr h16, [x24, #0x0]\n"
"ldr h20, [x23, #0x0]\n"
"166:" // Height 4: Partial accumulate: Done
- "sub x16, x16, x20\n"
+ "sub x15, x15, x19\n"
"b 169f\n"
"167:" // Height 4: full accumulate
- "ldr q8, [x16, #0x0]\n"
- "ldr q9, [x16, #0x10]\n"
- "ldr q10, [x16, #0x20]\n"
- "ldr q11, [x16, #0x30]\n"
+ "ldr q8, [x15, #0x0]\n"
+ "ldr q9, [x15, #0x10]\n"
+ "ldr q10, [x15, #0x20]\n"
+ "ldr q11, [x15, #0x30]\n"
"ldr q12, [x25, #0x0]\n"
"ldr q13, [x25, #0x10]\n"
"ldr q14, [x25, #0x20]\n"
@@ -2420,316 +2420,316 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"movi v22.16b, #0x0\n"
"movi v23.16b, #0x0\n"
"169:" // Height 4: setup done
- "mov x15, #0x0\n"
+ "mov x14, #0x0\n"
"170:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w14, [x20, x15, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w13, [x20, x14, LSL #0x2]\n"
"tbz %x[flags], #3, 171f\n"
- "ldr x21, [%x[input_ptr], x15, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x13, [x21, #0x0]\n"
- "ldr x9, [x21, #0x8]\n"
- "ldr x27, [x21, #0x10]\n"
- "ldr x25, [x21, #0x18]\n"
- "cbnz x15, 172f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x13, x13, x20, LSL #1\n"
- "add x9, x9, x20, LSL #1\n"
- "add x27, x27, x20, LSL #1\n"
- "add x25, x25, x20, LSL #1\n"
+ "ldr x20, [%x[input_ptr], x14, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x12, [x20, #0x0]\n"
+ "ldr x28, [x20, #0x8]\n"
+ "ldr x26, [x20, #0x10]\n"
+ "ldr x24, [x20, #0x18]\n"
+ "cbnz x14, 172f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x12, x12, x19, LSL #1\n"
+ "add x28, x28, x19, LSL #1\n"
+ "add x26, x26, x19, LSL #1\n"
+ "add x24, x24, x19, LSL #1\n"
"b 172f\n"
"171:" // Height 4: setup direct input
- "mov x13, %x[input_ptr]\n"
- "add x9, x13, x20, LSL #1\n"
- "add x27, x9, x20, LSL #1\n"
- "add x25, x27, x20, LSL #1\n"
+ "mov x12, %x[input_ptr]\n"
+ "add x28, x12, x19, LSL #1\n"
+ "add x26, x28, x19, LSL #1\n"
+ "add x24, x26, x19, LSL #1\n"
"172:" // Height 4: input setup done
- "cmp x14, #0x8\n"
+ "cmp x13, #0x8\n"
"blt 175f\n"
- "ldr q0, [x13, #0x0]\n"
- "cmp x14, #0x10\n"
- "ldr q1, [x9, #0x0]\n"
- "ldr q2, [x27, #0x0]\n"
- "ldr q3, [x25, #0x0]\n"
+ "ldr q0, [x12, #0x0]\n"
+ "ldr q1, [x28, #0x0]\n"
+ "cmp x13, #0x10\n"
+ "ldr q2, [x26, #0x0]\n"
+ "ldr q3, [x24, #0x0]\n"
"ldr q6, [x17, #0x0]\n"
- "ldr q7, [x17, #0x10]\n"
"blt 174f\n"
"173:" // Height 4: Multiply loop: Main loop head
"fmla v8.8h, v6.8h, v0.h[0]\n"
- "ldr x12, [x17, #0x28]\n"
+ "ldr d7, [x17, #0x10]\n"
"fmla v12.8h, v6.8h, v1.h[0]\n"
- "ldr x11, [x17, #0x38]\n"
+ "ldr x11, [x17, #0x18]\n"
"fmla v16.8h, v6.8h, v2.h[0]\n"
- "add x13, x13, #0x10\n"
+ "ldr x10, [x17, #0x28]\n"
"fmla v20.8h, v6.8h, v3.h[0]\n"
"ldr d6, [x17, #0x20]\n"
+ "mov v7.d[1], x11\n"
+ "ldr x11, [x17, #0x38]\n"
+ "add x12, x12, #0x10\n"
+ "add x28, x28, #0x10\n"
"fmla v9.8h, v7.8h, v0.h[0]\n"
- "mov v6.d[1], x12\n"
+ "mov v6.d[1], x10\n"
"fmla v13.8h, v7.8h, v1.h[0]\n"
- "ldr x12, [x17, #0x48]\n"
+ "prfm pldl1keep, [x12, #0x80]\n"
"fmla v17.8h, v7.8h, v2.h[0]\n"
- "add x9, x9, #0x10\n"
+ "prfm pldl1keep, [x28, #0x80]\n"
"fmla v21.8h, v7.8h, v3.h[0]\n"
"ldr d7, [x17, #0x30]\n"
- "mov v7.d[1], x11\n"
"fmla v10.8h, v6.8h, v0.h[0]\n"
+ "ldr x10, [x17, #0x48]\n"
"fmla v14.8h, v6.8h, v1.h[0]\n"
- "ldr x11, [x17, #0x58]\n"
+ "ldr x9, [x12, #0x8]\n"
"fmla v18.8h, v6.8h, v2.h[0]\n"
- "add x27, x27, #0x10\n"
+ "mov v7.d[1], x11\n"
"fmla v22.8h, v6.8h, v3.h[0]\n"
"ldr d6, [x17, #0x40]\n"
"fmla v11.8h, v7.8h, v0.h[0]\n"
- "mov v6.d[1], x12\n"
+ "ldr x11, [x17, #0x58]\n"
"fmla v15.8h, v7.8h, v1.h[0]\n"
- "ldr x12, [x17, #0x68]\n"
+ "ldr x27, [x28, #0x8]\n"
"fmla v19.8h, v7.8h, v2.h[0]\n"
- "add x25, x25, #0x10\n"
+ "mov v6.d[1], x10\n"
"fmla v23.8h, v7.8h, v3.h[0]\n"
"ldr d7, [x17, #0x50]\n"
- "mov v7.d[1], x11\n"
"fmla v8.8h, v6.8h, v0.h[1]\n"
+ "ldr x10, [x17, #0x68]\n"
"fmla v12.8h, v6.8h, v1.h[1]\n"
- "ldr x11, [x17, #0x78]\n"
+ "add x26, x26, #0x10\n"
"fmla v16.8h, v6.8h, v2.h[1]\n"
- "ldr x10, [x13, #0x8]\n"
+ "mov v7.d[1], x11\n"
"fmla v20.8h, v6.8h, v3.h[1]\n"
- "ldr d6, [x17, #0x60]\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
"fmla v9.8h, v7.8h, v0.h[1]\n"
- "mov v6.d[1], x12\n"
+ "ldr d6, [x17, #0x60]\n"
"fmla v13.8h, v7.8h, v1.h[1]\n"
- "ldr x12, [x17, #0x88]\n"
+ "ldr x11, [x17, #0x78]\n"
"fmla v17.8h, v7.8h, v2.h[1]\n"
- "ldr x28, [x9, #0x8]\n"
+ "ldr x25, [x26, #0x8]\n"
"fmla v21.8h, v7.8h, v3.h[1]\n"
+ "mov v6.d[1], x10\n"
"ldr d7, [x17, #0x70]\n"
- "mov v7.d[1], x11\n"
+ "add x24, x24, #0x10\n"
"fmla v10.8h, v6.8h, v0.h[1]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
"fmla v14.8h, v6.8h, v1.h[1]\n"
- "ldr x11, [x17, #0x98]\n"
+ "ldr x10, [x17, #0x88]\n"
"fmla v18.8h, v6.8h, v2.h[1]\n"
- "ldr x26, [x27, #0x8]\n"
+ "mov v7.d[1], x11\n"
"fmla v22.8h, v6.8h, v3.h[1]\n"
"ldr d6, [x17, #0x80]\n"
"fmla v11.8h, v7.8h, v0.h[1]\n"
- "mov v6.d[1], x12\n"
+ "ldr x11, [x17, #0x98]\n"
"fmla v15.8h, v7.8h, v1.h[1]\n"
- "ldr x12, [x17, #0xa8]\n"
+ "ldr x23, [x24, #0x8]\n"
"fmla v19.8h, v7.8h, v2.h[1]\n"
- "ldr x24, [x25, #0x8]\n"
+ "mov v6.d[1], x10\n"
"fmla v23.8h, v7.8h, v3.h[1]\n"
"ldr d7, [x17, #0x90]\n"
- "mov v7.d[1], x11\n"
"fmla v8.8h, v6.8h, v0.h[2]\n"
+ "ldr x10, [x17, #0xa8]\n"
"fmla v12.8h, v6.8h, v1.h[2]\n"
- "ldr x11, [x17, #0xb8]\n"
+ "sub x13, x13, #0x8\n"
"fmla v16.8h, v6.8h, v2.h[2]\n"
- "sub x14, x14, #0x8\n"
+ "mov v7.d[1], x11\n"
"fmla v20.8h, v6.8h, v3.h[2]\n"
"ldr d6, [x17, #0xa0]\n"
"fmla v9.8h, v7.8h, v0.h[2]\n"
- "mov v6.d[1], x12\n"
+ "ldr x11, [x17, #0xb8]\n"
"fmla v13.8h, v7.8h, v1.h[2]\n"
- "ldr x12, [x17, #0xc8]\n"
+ "cmp x13, #0x10\n"
"fmla v17.8h, v7.8h, v2.h[2]\n"
- "cmp x14, #0x10\n"
+ "mov v6.d[1], x10\n"
"fmla v21.8h, v7.8h, v3.h[2]\n"
"ldr d7, [x17, #0xb0]\n"
- "mov v7.d[1], x11\n"
"fmla v10.8h, v6.8h, v0.h[2]\n"
+ "ldr x10, [x17, #0xc8]\n"
"fmla v14.8h, v6.8h, v1.h[2]\n"
- "ldr x11, [x17, #0xd8]\n"
"fmla v18.8h, v6.8h, v2.h[2]\n"
- "prfm pldl1keep, [x13, #0x80]\n"
+ "mov v7.d[1], x11\n"
"fmla v22.8h, v6.8h, v3.h[2]\n"
"ldr d6, [x17, #0xc0]\n"
"fmla v11.8h, v7.8h, v0.h[2]\n"
- "mov v6.d[1], x12\n"
+ "ldr x11, [x17, #0xd8]\n"
"fmla v15.8h, v7.8h, v1.h[2]\n"
- "ldr x12, [x17, #0xe8]\n"
"fmla v19.8h, v7.8h, v2.h[2]\n"
- "prfm pldl1keep, [x9, #0x80]\n"
+ "mov v6.d[1], x10\n"
"fmla v23.8h, v7.8h, v3.h[2]\n"
"ldr d7, [x17, #0xd0]\n"
- "mov v7.d[1], x11\n"
"fmla v8.8h, v6.8h, v0.h[3]\n"
+ "ldr x10, [x17, #0xe8]\n"
"fmla v12.8h, v6.8h, v1.h[3]\n"
- "ldr x11, [x17, #0xf8]\n"
"fmla v16.8h, v6.8h, v2.h[3]\n"
- "prfm pldl1keep, [x27, #0x80]\n"
+ "mov v7.d[1], x11\n"
"fmla v20.8h, v6.8h, v3.h[3]\n"
"ldr d6, [x17, #0xe0]\n"
"fmla v9.8h, v7.8h, v0.h[3]\n"
- "mov v6.d[1], x12\n"
+ "ldr x11, [x17, #0xf8]\n"
"fmla v13.8h, v7.8h, v1.h[3]\n"
- "ldr x12, [x17, #0x108]\n"
"fmla v17.8h, v7.8h, v2.h[3]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
+ "mov v6.d[1], x10\n"
"fmla v21.8h, v7.8h, v3.h[3]\n"
"ldr d7, [x17, #0xf0]\n"
- "mov v7.d[1], x11\n"
"fmla v10.8h, v6.8h, v0.h[3]\n"
+ "ldr x10, [x17, #0x108]\n"
"fmla v14.8h, v6.8h, v1.h[3]\n"
- "ldr x11, [x17, #0x118]\n"
"fmla v18.8h, v6.8h, v2.h[3]\n"
+ "mov v7.d[1], x11\n"
"fmla v22.8h, v6.8h, v3.h[3]\n"
"ldr d6, [x17, #0x100]\n"
"fmla v11.8h, v7.8h, v0.h[3]\n"
- "mov v6.d[1], x12\n"
+ "ldr x11, [x17, #0x118]\n"
"fmla v15.8h, v7.8h, v1.h[3]\n"
- "ldr x12, [x17, #0x128]\n"
"fmla v19.8h, v7.8h, v2.h[3]\n"
+ "mov v6.d[1], x10\n"
"fmla v23.8h, v7.8h, v3.h[3]\n"
"ldr d7, [x17, #0x110]\n"
- "mov v7.d[1], x11\n"
"fmla v8.8h, v6.8h, v0.h[4]\n"
+ "ldr x10, [x17, #0x128]\n"
"fmla v12.8h, v6.8h, v1.h[4]\n"
- "ldr x11, [x17, #0x138]\n"
"fmla v16.8h, v6.8h, v2.h[4]\n"
+ "mov v7.d[1], x11\n"
"fmla v20.8h, v6.8h, v3.h[4]\n"
"ldr d6, [x17, #0x120]\n"
"fmla v9.8h, v7.8h, v0.h[4]\n"
- "mov v6.d[1], x12\n"
+ "ldr x11, [x17, #0x138]\n"
"fmla v13.8h, v7.8h, v1.h[4]\n"
- "ldr x12, [x17, #0x148]\n"
"fmla v17.8h, v7.8h, v2.h[4]\n"
+ "mov v6.d[1], x10\n"
"fmla v21.8h, v7.8h, v3.h[4]\n"
"ldr d7, [x17, #0x130]\n"
- "mov v7.d[1], x11\n"
"fmla v10.8h, v6.8h, v0.h[4]\n"
+ "ldr x10, [x17, #0x148]\n"
"fmla v14.8h, v6.8h, v1.h[4]\n"
- "ldr x11, [x17, #0x158]\n"
"fmla v18.8h, v6.8h, v2.h[4]\n"
+ "mov v7.d[1], x11\n"
"fmla v22.8h, v6.8h, v3.h[4]\n"
"ldr d6, [x17, #0x140]\n"
"fmla v11.8h, v7.8h, v0.h[4]\n"
- "mov v6.d[1], x12\n"
+ "ldr x11, [x17, #0x158]\n"
"fmla v15.8h, v7.8h, v1.h[4]\n"
- "ldr x12, [x17, #0x168]\n"
"fmla v19.8h, v7.8h, v2.h[4]\n"
+ "mov v6.d[1], x10\n"
"fmla v23.8h, v7.8h, v3.h[4]\n"
"ldr d7, [x17, #0x150]\n"
- "mov v7.d[1], x11\n"
"fmla v8.8h, v6.8h, v0.h[5]\n"
+ "ldr x10, [x17, #0x168]\n"
"fmla v12.8h, v6.8h, v1.h[5]\n"
- "ldr x11, [x17, #0x178]\n"
"fmla v16.8h, v6.8h, v2.h[5]\n"
+ "mov v7.d[1], x11\n"
"fmla v20.8h, v6.8h, v3.h[5]\n"
"ldr d6, [x17, #0x160]\n"
"fmla v9.8h, v7.8h, v0.h[5]\n"
- "mov v6.d[1], x12\n"
+ "ldr x11, [x17, #0x178]\n"
"fmla v13.8h, v7.8h, v1.h[5]\n"
- "ldr x12, [x17, #0x188]\n"
"fmla v17.8h, v7.8h, v2.h[5]\n"
+ "mov v6.d[1], x10\n"
"fmla v21.8h, v7.8h, v3.h[5]\n"
"ldr d7, [x17, #0x170]\n"
- "mov v7.d[1], x11\n"
"fmla v10.8h, v6.8h, v0.h[5]\n"
+ "ldr x10, [x17, #0x188]\n"
"fmla v14.8h, v6.8h, v1.h[5]\n"
- "ldr x11, [x17, #0x198]\n"
"fmla v18.8h, v6.8h, v2.h[5]\n"
+ "mov v7.d[1], x11\n"
"fmla v22.8h, v6.8h, v3.h[5]\n"
"ldr d6, [x17, #0x180]\n"
"fmla v11.8h, v7.8h, v0.h[5]\n"
- "mov v6.d[1], x12\n"
+ "ldr x11, [x17, #0x198]\n"
"fmla v15.8h, v7.8h, v1.h[5]\n"
- "ldr x12, [x17, #0x1a8]\n"
"fmla v19.8h, v7.8h, v2.h[5]\n"
+ "mov v6.d[1], x10\n"
"fmla v23.8h, v7.8h, v3.h[5]\n"
"ldr d7, [x17, #0x190]\n"
- "mov v7.d[1], x11\n"
"fmla v8.8h, v6.8h, v0.h[6]\n"
+ "ldr x10, [x17, #0x1a8]\n"
"fmla v12.8h, v6.8h, v1.h[6]\n"
- "ldr x11, [x17, #0x1b8]\n"
"fmla v16.8h, v6.8h, v2.h[6]\n"
+ "mov v7.d[1], x11\n"
"fmla v20.8h, v6.8h, v3.h[6]\n"
"ldr d6, [x17, #0x1a0]\n"
"fmla v9.8h, v7.8h, v0.h[6]\n"
- "mov v6.d[1], x12\n"
+ "ldr x11, [x17, #0x1b8]\n"
"fmla v13.8h, v7.8h, v1.h[6]\n"
- "ldr x12, [x17, #0x1c8]\n"
"fmla v17.8h, v7.8h, v2.h[6]\n"
+ "mov v6.d[1], x10\n"
"fmla v21.8h, v7.8h, v3.h[6]\n"
"ldr d7, [x17, #0x1b0]\n"
- "mov v7.d[1], x11\n"
"fmla v10.8h, v6.8h, v0.h[6]\n"
+ "ldr x10, [x17, #0x1c8]\n"
"fmla v14.8h, v6.8h, v1.h[6]\n"
- "ldr x11, [x17, #0x1d8]\n"
"fmla v18.8h, v6.8h, v2.h[6]\n"
+ "mov v7.d[1], x11\n"
"fmla v22.8h, v6.8h, v3.h[6]\n"
"ldr d6, [x17, #0x1c0]\n"
"fmla v11.8h, v7.8h, v0.h[6]\n"
- "mov v6.d[1], x12\n"
+ "ldr x11, [x17, #0x1d8]\n"
"fmla v15.8h, v7.8h, v1.h[6]\n"
- "ldr x12, [x17, #0x1e8]\n"
"fmla v19.8h, v7.8h, v2.h[6]\n"
+ "mov v6.d[1], x10\n"
"fmla v23.8h, v7.8h, v3.h[6]\n"
"ldr d7, [x17, #0x1d0]\n"
- "mov v7.d[1], x11\n"
"fmla v8.8h, v6.8h, v0.h[7]\n"
+ "ldr x10, [x17, #0x1e8]\n"
"fmla v12.8h, v6.8h, v1.h[7]\n"
- "ldr x11, [x17, #0x1f8]\n"
"fmla v16.8h, v6.8h, v2.h[7]\n"
+ "mov v7.d[1], x11\n"
"fmla v20.8h, v6.8h, v3.h[7]\n"
"ldr d6, [x17, #0x1e0]\n"
"fmla v9.8h, v7.8h, v0.h[7]\n"
- "mov v6.d[1], x12\n"
+ "ldr x11, [x17, #0x1f8]\n"
"fmla v13.8h, v7.8h, v1.h[7]\n"
"fmla v17.8h, v7.8h, v2.h[7]\n"
+ "mov v6.d[1], x10\n"
"fmla v21.8h, v7.8h, v3.h[7]\n"
"ldr d7, [x17, #0x1f0]\n"
- "mov v7.d[1], x11\n"
- "add x17, x17, #0x200\n"
"fmla v10.8h, v6.8h, v0.h[7]\n"
- "ldr x12, [x17, #0x8]\n"
+ "add x17, x17, #0x200\n"
"fmla v14.8h, v6.8h, v1.h[7]\n"
- "ldr x11, [x17, #0x18]\n"
+ "ldr x10, [x17, #0x8]\n"
"fmla v18.8h, v6.8h, v2.h[7]\n"
+ "mov v7.d[1], x11\n"
"fmla v22.8h, v6.8h, v3.h[7]\n"
"ldr d6, [x17, #0x0]\n"
"fmla v11.8h, v7.8h, v0.h[7]\n"
- "ldr d0, [x13, #0x0]\n"
+ "ldr d0, [x12, #0x0]\n"
"fmla v15.8h, v7.8h, v1.h[7]\n"
- "ldr d1, [x9, #0x0]\n"
+ "ldr d1, [x28, #0x0]\n"
"fmla v19.8h, v7.8h, v2.h[7]\n"
- "ldr d2, [x27, #0x0]\n"
+ "mov v6.d[1], x10\n"
"fmla v23.8h, v7.8h, v3.h[7]\n"
- "ldr d3, [x25, #0x0]\n"
- "ldr d7, [x17, #0x10]\n"
- "mov v6.d[1], x12\n"
- "mov v0.d[1], x10\n"
- "mov v1.d[1], x28\n"
- "mov v2.d[1], x26\n"
- "mov v3.d[1], x24\n"
- "mov v7.d[1], x11\n"
+ "mov v0.d[1], x9\n"
+ "mov v1.d[1], x27\n"
+ "ldr d2, [x26, #0x0]\n"
+ "ldr d3, [x24, #0x0]\n"
+ "mov v2.d[1], x25\n"
+ "mov v3.d[1], x23\n"
"bge 173b\n"
"174:" // Height 4: Multiply loop: Single iteration only
"fmla v8.8h, v6.8h, v0.h[0]\n"
- "add x13, x13, #0x10\n"
+ "ldr q7, [x17, #0x10]\n"
"fmla v12.8h, v6.8h, v1.h[0]\n"
- "add x9, x9, #0x10\n"
+ "sub x13, x13, #0x8\n"
"fmla v16.8h, v6.8h, v2.h[0]\n"
- "add x27, x27, #0x10\n"
+ "add x12, x12, #0x10\n"
"fmla v20.8h, v6.8h, v3.h[0]\n"
- "ldr q6, [x17, #0x20]\n"
+ "prfm pldl1keep, [x12, #0x80]\n"
"fmla v9.8h, v7.8h, v0.h[0]\n"
- "add x25, x25, #0x10\n"
+ "ldr q6, [x17, #0x20]\n"
"fmla v13.8h, v7.8h, v1.h[0]\n"
- "sub x14, x14, #0x8\n"
+ "add x28, x28, #0x10\n"
"fmla v17.8h, v7.8h, v2.h[0]\n"
- "prfm pldl1keep, [x13, #0x80]\n"
+ "prfm pldl1keep, [x28, #0x80]\n"
"fmla v21.8h, v7.8h, v3.h[0]\n"
"ldr q7, [x17, #0x30]\n"
"fmla v10.8h, v6.8h, v0.h[0]\n"
- "prfm pldl1keep, [x9, #0x80]\n"
+ "add x26, x26, #0x10\n"
"fmla v14.8h, v6.8h, v1.h[0]\n"
- "prfm pldl1keep, [x27, #0x80]\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
"fmla v18.8h, v6.8h, v2.h[0]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
+ "add x24, x24, #0x10\n"
"fmla v22.8h, v6.8h, v3.h[0]\n"
- "ldr q6, [x17, #0x40]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
"fmla v11.8h, v7.8h, v0.h[0]\n"
+ "ldr q6, [x17, #0x40]\n"
"fmla v15.8h, v7.8h, v1.h[0]\n"
"fmla v19.8h, v7.8h, v2.h[0]\n"
"fmla v23.8h, v7.8h, v3.h[0]\n"
@@ -2874,16 +2874,16 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"fmla v19.8h, v7.8h, v2.h[7]\n"
"fmla v23.8h, v7.8h, v3.h[7]\n"
"175:" // Height 4: Multiply loop: Main loop skip
- "cbz x14, 177f\n"
+ "cbz x13, 177f\n"
"176:" // Height 4: Multiply loop: Odd block loop
- "ldr h0, [x13], #0x2\n"
- "sub x14, x14, #0x1\n"
- "ldr h1, [x9], #0x2\n"
- "ldr h2, [x27], #0x2\n"
- "ldr h3, [x25], #0x2\n"
+ "ldr h0, [x12], #0x2\n"
+ "sub x13, x13, #0x1\n"
+ "ldr h1, [x28], #0x2\n"
+ "ldr h2, [x26], #0x2\n"
+ "ldr h3, [x24], #0x2\n"
"ldr q6, [x17, #0x0]\n"
- "fmla v8.8h, v6.8h, v0.h[0]\n"
"ldr q7, [x17, #0x10]\n"
+ "fmla v8.8h, v6.8h, v0.h[0]\n"
"fmla v12.8h, v6.8h, v1.h[0]\n"
"fmla v16.8h, v6.8h, v2.h[0]\n"
"fmla v20.8h, v6.8h, v3.h[0]\n"
@@ -2902,23 +2902,25 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"fmla v15.8h, v7.8h, v1.h[0]\n"
"fmla v19.8h, v7.8h, v2.h[0]\n"
"fmla v23.8h, v7.8h, v3.h[0]\n"
- "cbnz x14, 176b\n"
+ "cbnz x13, 176b\n"
"177:" // Height 4: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x15, x15, #0x1\n"
- "cmp x15, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x14, x14, #0x1\n"
+ "cmp x14, x19\n"
"bne 170b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x16, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
- "add x23, x24, x20, LSL #1\n"
- "prfm pstl1keep, [x16, #0x0]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "prfm pstl1keep, [x15, #0x0]\n"
+ "add x25, x15, x19, LSL #1\n"
"prfm pstl1keep, [x25, #0x0]\n"
+ "add x24, x25, x19, LSL #1\n"
"prfm pstl1keep, [x24, #0x0]\n"
+ "add x23, x24, x19, LSL #1\n"
"prfm pstl1keep, [x23, #0x0]\n"
"tbz %x[flags], #1, 178f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v0.8h }, [x20]\n"
+ "add x20, %x[args_ptr], %[offset_min]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v1.8h }, [x20]\n"
+ "ld1r { v0.8h }, [x19]\n"
"fmin v8.8h, v8.8h, v0.8h\n"
"fmin v9.8h, v9.8h, v0.8h\n"
"fmin v10.8h, v10.8h, v0.8h\n"
@@ -2929,36 +2931,34 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"fmin v15.8h, v15.8h, v0.8h\n"
"fmin v16.8h, v16.8h, v0.8h\n"
"fmin v17.8h, v17.8h, v0.8h\n"
+ "fmax v8.8h, v8.8h, v1.8h\n"
+ "fmax v9.8h, v9.8h, v1.8h\n"
+ "fmax v10.8h, v10.8h, v1.8h\n"
+ "fmax v11.8h, v11.8h, v1.8h\n"
+ "fmax v12.8h, v12.8h, v1.8h\n"
+ "fmax v13.8h, v13.8h, v1.8h\n"
+ "fmax v14.8h, v14.8h, v1.8h\n"
+ "fmax v15.8h, v15.8h, v1.8h\n"
+ "fmax v16.8h, v16.8h, v1.8h\n"
+ "fmax v17.8h, v17.8h, v1.8h\n"
"fmin v18.8h, v18.8h, v0.8h\n"
"fmin v19.8h, v19.8h, v0.8h\n"
"fmin v20.8h, v20.8h, v0.8h\n"
"fmin v21.8h, v21.8h, v0.8h\n"
"fmin v22.8h, v22.8h, v0.8h\n"
"fmin v23.8h, v23.8h, v0.8h\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1r { v0.8h }, [x20]\n"
- "fmax v8.8h, v8.8h, v0.8h\n"
- "fmax v9.8h, v9.8h, v0.8h\n"
- "fmax v10.8h, v10.8h, v0.8h\n"
- "fmax v11.8h, v11.8h, v0.8h\n"
- "fmax v12.8h, v12.8h, v0.8h\n"
- "fmax v13.8h, v13.8h, v0.8h\n"
- "fmax v14.8h, v14.8h, v0.8h\n"
- "fmax v15.8h, v15.8h, v0.8h\n"
- "fmax v16.8h, v16.8h, v0.8h\n"
- "fmax v17.8h, v17.8h, v0.8h\n"
- "fmax v18.8h, v18.8h, v0.8h\n"
- "fmax v19.8h, v19.8h, v0.8h\n"
- "fmax v20.8h, v20.8h, v0.8h\n"
- "fmax v21.8h, v21.8h, v0.8h\n"
- "fmax v22.8h, v22.8h, v0.8h\n"
- "fmax v23.8h, v23.8h, v0.8h\n"
+ "fmax v18.8h, v18.8h, v1.8h\n"
+ "fmax v19.8h, v19.8h, v1.8h\n"
+ "fmax v20.8h, v20.8h, v1.8h\n"
+ "fmax v21.8h, v21.8h, v1.8h\n"
+ "fmax v22.8h, v22.8h, v1.8h\n"
+ "fmax v23.8h, v23.8h, v1.8h\n"
"178:" // Height 4: No activation
"cmp x8, #0x20\n"
"bge 195f\n"
"tbz x8, #4, 186f\n"
- "st1 { v8.8h }, [x16], #0x10\n"
- "st1 { v9.8h }, [x16], #0x10\n"
+ "st1 { v8.8h }, [x15], #0x10\n"
+ "st1 { v9.8h }, [x15], #0x10\n"
"st1 { v12.8h }, [x25], #0x10\n"
"st1 { v13.8h }, [x25], #0x10\n"
"st1 { v16.8h }, [x24], #0x10\n"
@@ -2966,192 +2966,192 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"st1 { v20.8h }, [x23], #0x10\n"
"st1 { v21.8h }, [x23], #0x10\n"
"tbz x8, #3, 182f\n"
- "st1 { v10.8h }, [x16], #0x10\n"
+ "st1 { v10.8h }, [x15], #0x10\n"
"st1 { v14.8h }, [x25], #0x10\n"
"st1 { v18.8h }, [x24], #0x10\n"
"st1 { v22.8h }, [x23], #0x10\n"
"tbz x8, #2, 180f\n"
- "str d11, [x16], #0x8\n"
+ "str d11, [x15], #0x8\n"
"str d15, [x25], #0x8\n"
"str d19, [x24], #0x8\n"
"str d23, [x23], #0x8\n"
"tbz x8, #1, 179f\n"
- "st1 { v11.s }[2], [x16], #0x4\n"
+ "st1 { v11.s }[2], [x15], #0x4\n"
"st1 { v15.s }[2], [x25], #0x4\n"
"st1 { v19.s }[2], [x24], #0x4\n"
"st1 { v23.s }[2], [x23], #0x4\n"
"tbz x8, #0, 194f\n"
- "st1 { v11.h }[6], [x16]\n"
+ "st1 { v11.h }[6], [x15]\n"
"st1 { v15.h }[6], [x25]\n"
"st1 { v19.h }[6], [x24]\n"
"st1 { v23.h }[6], [x23]\n"
"b 194f\n"
"179:" // Height 4: Partial direct writeback: partial_1_28
"tbz x8, #0, 194f\n"
- "st1 { v11.h }[4], [x16]\n"
+ "st1 { v11.h }[4], [x15]\n"
"st1 { v15.h }[4], [x25]\n"
"st1 { v19.h }[4], [x24]\n"
"st1 { v23.h }[4], [x23]\n"
"b 194f\n"
"180:" // Height 4: Partial direct writeback: partial_2_24
"tbz x8, #1, 181f\n"
- "str s11, [x16], #0x4\n"
+ "str s11, [x15], #0x4\n"
"str s15, [x25], #0x4\n"
"str s19, [x24], #0x4\n"
"str s23, [x23], #0x4\n"
"tbz x8, #0, 194f\n"
- "st1 { v11.h }[2], [x16]\n"
+ "st1 { v11.h }[2], [x15]\n"
"st1 { v15.h }[2], [x25]\n"
"st1 { v19.h }[2], [x24]\n"
"st1 { v23.h }[2], [x23]\n"
"b 194f\n"
"181:" // Height 4: Partial direct writeback: partial_1_24
"tbz x8, #0, 194f\n"
- "str h11, [x16, #0x0]\n"
+ "str h11, [x15, #0x0]\n"
"str h15, [x25, #0x0]\n"
"str h19, [x24, #0x0]\n"
"str h23, [x23, #0x0]\n"
"b 194f\n"
"182:" // Height 4: Partial direct writeback: partial_4_16
"tbz x8, #2, 184f\n"
- "str d10, [x16], #0x8\n"
+ "str d10, [x15], #0x8\n"
"str d14, [x25], #0x8\n"
"str d18, [x24], #0x8\n"
"str d22, [x23], #0x8\n"
"tbz x8, #1, 183f\n"
- "st1 { v10.s }[2], [x16], #0x4\n"
+ "st1 { v10.s }[2], [x15], #0x4\n"
"st1 { v14.s }[2], [x25], #0x4\n"
"st1 { v18.s }[2], [x24], #0x4\n"
"st1 { v22.s }[2], [x23], #0x4\n"
"tbz x8, #0, 194f\n"
- "st1 { v10.h }[6], [x16]\n"
+ "st1 { v10.h }[6], [x15]\n"
"st1 { v14.h }[6], [x25]\n"
"st1 { v18.h }[6], [x24]\n"
"st1 { v22.h }[6], [x23]\n"
"b 194f\n"
"183:" // Height 4: Partial direct writeback: partial_1_20
"tbz x8, #0, 194f\n"
- "st1 { v10.h }[4], [x16]\n"
+ "st1 { v10.h }[4], [x15]\n"
"st1 { v14.h }[4], [x25]\n"
"st1 { v18.h }[4], [x24]\n"
"st1 { v22.h }[4], [x23]\n"
"b 194f\n"
"184:" // Height 4: Partial direct writeback: partial_2_16
"tbz x8, #1, 185f\n"
- "str s10, [x16], #0x4\n"
+ "str s10, [x15], #0x4\n"
"str s14, [x25], #0x4\n"
"str s18, [x24], #0x4\n"
"str s22, [x23], #0x4\n"
"tbz x8, #0, 194f\n"
- "st1 { v10.h }[2], [x16]\n"
+ "st1 { v10.h }[2], [x15]\n"
"st1 { v14.h }[2], [x25]\n"
"st1 { v18.h }[2], [x24]\n"
"st1 { v22.h }[2], [x23]\n"
"b 194f\n"
"185:" // Height 4: Partial direct writeback: partial_1_16
"tbz x8, #0, 194f\n"
- "str h10, [x16, #0x0]\n"
+ "str h10, [x15, #0x0]\n"
"str h14, [x25, #0x0]\n"
"str h18, [x24, #0x0]\n"
"str h22, [x23, #0x0]\n"
"b 194f\n"
"186:" // Height 4: Partial direct writeback: partial_8_0
"tbz x8, #3, 190f\n"
- "st1 { v8.8h }, [x16], #0x10\n"
+ "st1 { v8.8h }, [x15], #0x10\n"
"st1 { v12.8h }, [x25], #0x10\n"
"st1 { v16.8h }, [x24], #0x10\n"
"st1 { v20.8h }, [x23], #0x10\n"
"tbz x8, #2, 188f\n"
- "str d9, [x16], #0x8\n"
+ "str d9, [x15], #0x8\n"
"str d13, [x25], #0x8\n"
"str d17, [x24], #0x8\n"
"str d21, [x23], #0x8\n"
"tbz x8, #1, 187f\n"
- "st1 { v9.s }[2], [x16], #0x4\n"
+ "st1 { v9.s }[2], [x15], #0x4\n"
"st1 { v13.s }[2], [x25], #0x4\n"
"st1 { v17.s }[2], [x24], #0x4\n"
"st1 { v21.s }[2], [x23], #0x4\n"
"tbz x8, #0, 194f\n"
- "st1 { v9.h }[6], [x16]\n"
+ "st1 { v9.h }[6], [x15]\n"
"st1 { v13.h }[6], [x25]\n"
"st1 { v17.h }[6], [x24]\n"
"st1 { v21.h }[6], [x23]\n"
"b 194f\n"
"187:" // Height 4: Partial direct writeback: partial_1_12
"tbz x8, #0, 194f\n"
- "st1 { v9.h }[4], [x16]\n"
+ "st1 { v9.h }[4], [x15]\n"
"st1 { v13.h }[4], [x25]\n"
"st1 { v17.h }[4], [x24]\n"
"st1 { v21.h }[4], [x23]\n"
"b 194f\n"
"188:" // Height 4: Partial direct writeback: partial_2_8
"tbz x8, #1, 189f\n"
- "str s9, [x16], #0x4\n"
+ "str s9, [x15], #0x4\n"
"str s13, [x25], #0x4\n"
"str s17, [x24], #0x4\n"
"str s21, [x23], #0x4\n"
"tbz x8, #0, 194f\n"
- "st1 { v9.h }[2], [x16]\n"
+ "st1 { v9.h }[2], [x15]\n"
"st1 { v13.h }[2], [x25]\n"
"st1 { v17.h }[2], [x24]\n"
"st1 { v21.h }[2], [x23]\n"
"b 194f\n"
"189:" // Height 4: Partial direct writeback: partial_1_8
"tbz x8, #0, 194f\n"
- "str h9, [x16, #0x0]\n"
+ "str h9, [x15, #0x0]\n"
"str h13, [x25, #0x0]\n"
"str h17, [x24, #0x0]\n"
"str h21, [x23, #0x0]\n"
"b 194f\n"
"190:" // Height 4: Partial direct writeback: partial_4_0
"tbz x8, #2, 192f\n"
- "str d8, [x16], #0x8\n"
+ "str d8, [x15], #0x8\n"
"str d12, [x25], #0x8\n"
"str d16, [x24], #0x8\n"
"str d20, [x23], #0x8\n"
"tbz x8, #1, 191f\n"
- "st1 { v8.s }[2], [x16], #0x4\n"
+ "st1 { v8.s }[2], [x15], #0x4\n"
"st1 { v12.s }[2], [x25], #0x4\n"
"st1 { v16.s }[2], [x24], #0x4\n"
"st1 { v20.s }[2], [x23], #0x4\n"
"tbz x8, #0, 194f\n"
- "st1 { v8.h }[6], [x16]\n"
+ "st1 { v8.h }[6], [x15]\n"
"st1 { v12.h }[6], [x25]\n"
"st1 { v16.h }[6], [x24]\n"
"st1 { v20.h }[6], [x23]\n"
"b 194f\n"
"191:" // Height 4: Partial direct writeback: partial_1_4
"tbz x8, #0, 194f\n"
- "st1 { v8.h }[4], [x16]\n"
+ "st1 { v8.h }[4], [x15]\n"
"st1 { v12.h }[4], [x25]\n"
"st1 { v16.h }[4], [x24]\n"
"st1 { v20.h }[4], [x23]\n"
"b 194f\n"
"192:" // Height 4: Partial direct writeback: partial_2_0
"tbz x8, #1, 193f\n"
- "str s8, [x16], #0x4\n"
+ "str s8, [x15], #0x4\n"
"str s12, [x25], #0x4\n"
"str s16, [x24], #0x4\n"
"str s20, [x23], #0x4\n"
"tbz x8, #0, 194f\n"
- "st1 { v8.h }[2], [x16]\n"
+ "st1 { v8.h }[2], [x15]\n"
"st1 { v12.h }[2], [x25]\n"
"st1 { v16.h }[2], [x24]\n"
"st1 { v20.h }[2], [x23]\n"
"b 194f\n"
"193:" // Height 4: Partial direct writeback: partial_1_0
- "str h8, [x16, #0x0]\n"
+ "str h8, [x15, #0x0]\n"
"str h12, [x25, #0x0]\n"
"str h16, [x24, #0x0]\n"
"str h20, [x23, #0x0]\n"
"194:" // Height 4: Partial direct writeback: Done
"b 196f\n"
"195:" // Height 4: Full writeback
- "str q8, [x16, #0x0]\n"
- "str q9, [x16, #0x10]\n"
- "str q10, [x16, #0x20]\n"
- "str q11, [x16, #0x30]\n"
- "add x16, x16, #0x40\n"
+ "str q8, [x15, #0x0]\n"
+ "str q9, [x15, #0x10]\n"
+ "str q10, [x15, #0x20]\n"
+ "str q11, [x15, #0x30]\n"
+ "add x15, x15, #0x40\n"
"str q12, [x25, #0x0]\n"
"str q13, [x25, #0x10]\n"
"str q14, [x25, #0x20]\n"
@@ -3169,22 +3169,22 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"bgt 149b\n"
"b 296f\n"
"197:" // Height 5
- "mov x7, %x[bias]\n"
"ldr x8, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x16, %x[bias]\n"
"ldr x17, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x16, %x[output_ptr]\n"
+ "mov x15, %x[output_ptr]\n"
"198:" // Height 5: Column loop
- "cbz x7, 199f\n"
- "ldr q8, [x7, #0x0]\n"
+ "cbz x16, 199f\n"
+ "ldr q8, [x16, #0x0]\n"
+ "ldr q9, [x16, #0x10]\n"
+ "ldr q10, [x16, #0x20]\n"
"mov v12.16b, v8.16b\n"
- "ldr q9, [x7, #0x10]\n"
+ "ldr q11, [x16, #0x30]\n"
"mov v13.16b, v9.16b\n"
- "ldr q10, [x7, #0x20]\n"
+ "add x16, x16, #0x40\n"
"mov v14.16b, v10.16b\n"
- "ldr q11, [x7, #0x30]\n"
"mov v15.16b, v11.16b\n"
"mov v16.16b, v8.16b\n"
- "add x7, x7, #0x40\n"
"mov v17.16b, v9.16b\n"
"mov v18.16b, v10.16b\n"
"mov v19.16b, v11.16b\n"
@@ -3199,54 +3199,54 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"b 218f\n"
"199:" // Height 5: no bias
"tbz %x[flags], #0, 217f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x16, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
- "add x23, x24, x20, LSL #1\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"cmp x8, #0x20\n"
- "add x22, x23, x20, LSL #1\n"
+ "add x25, x15, x19, LSL #1\n"
+ "add x24, x25, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "add x22, x23, x19, LSL #1\n"
"bge 216f\n"
"tbz x8, #4, 207f\n"
- "ld1 { v8.8h }, [x16], #0x10\n"
+ "ld1 { v8.8h }, [x15], #0x10\n"
"ld1 { v12.8h }, [x25], #0x10\n"
"ld1 { v16.8h }, [x24], #0x10\n"
- "ld1 { v20.8h }, [x23], #0x10\n"
- "ld1 { v24.8h }, [x22], #0x10\n"
- "ld1 { v9.8h }, [x16], #0x10\n"
+ "ld1 { v9.8h }, [x15], #0x10\n"
"ld1 { v13.8h }, [x25], #0x10\n"
"ld1 { v17.8h }, [x24], #0x10\n"
+ "ld1 { v20.8h }, [x23], #0x10\n"
+ "ld1 { v24.8h }, [x22], #0x10\n"
"ld1 { v21.8h }, [x23], #0x10\n"
"ld1 { v25.8h }, [x22], #0x10\n"
"tbz x8, #3, 203f\n"
- "ld1 { v10.8h }, [x16], #0x10\n"
+ "ld1 { v10.8h }, [x15], #0x10\n"
"ld1 { v14.8h }, [x25], #0x10\n"
"ld1 { v18.8h }, [x24], #0x10\n"
"ld1 { v22.8h }, [x23], #0x10\n"
"ld1 { v26.8h }, [x22], #0x10\n"
"tbz x8, #2, 201f\n"
- "ldr d11, [x16], #0x8\n"
+ "ldr d11, [x15], #0x8\n"
"ldr d15, [x25], #0x8\n"
"ldr d19, [x24], #0x8\n"
"ldr d23, [x23], #0x8\n"
"ldr d27, [x22], #0x8\n"
"tbz x8, #1, 200f\n"
- "ld1 { v11.s }[2], [x16], #0x4\n"
- "mov x20, #0x3c\n"
+ "ld1 { v11.s }[2], [x15], #0x4\n"
+ "mov x19, #0x3c\n"
"ld1 { v15.s }[2], [x25], #0x4\n"
"ld1 { v19.s }[2], [x24], #0x4\n"
"ld1 { v23.s }[2], [x23], #0x4\n"
"ld1 { v27.s }[2], [x22], #0x4\n"
"tbz x8, #0, 215f\n"
- "ld1 { v11.h }[6], [x16]\n"
+ "ld1 { v11.h }[6], [x15]\n"
"ld1 { v15.h }[6], [x25]\n"
"ld1 { v19.h }[6], [x24]\n"
"ld1 { v23.h }[6], [x23]\n"
"ld1 { v27.h }[6], [x22]\n"
"b 215f\n"
"200:" // Height 5: Partial accumulate: partial_1_28
- "mov x20, #0x38\n"
+ "mov x19, #0x38\n"
"tbz x8, #0, 215f\n"
- "ld1 { v11.h }[4], [x16]\n"
+ "ld1 { v11.h }[4], [x15]\n"
"ld1 { v15.h }[4], [x25]\n"
"ld1 { v19.h }[4], [x24]\n"
"ld1 { v23.h }[4], [x23]\n"
@@ -3254,23 +3254,23 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"b 215f\n"
"201:" // Height 5: Partial accumulate: partial_2_24
"tbz x8, #1, 202f\n"
- "ldr s11, [x16], #0x4\n"
- "mov x20, #0x34\n"
+ "ldr s11, [x15], #0x4\n"
"ldr s15, [x25], #0x4\n"
+ "mov x19, #0x34\n"
"ldr s19, [x24], #0x4\n"
"ldr s23, [x23], #0x4\n"
"ldr s27, [x22], #0x4\n"
"tbz x8, #0, 215f\n"
- "ld1 { v11.h }[2], [x16]\n"
+ "ld1 { v11.h }[2], [x15]\n"
"ld1 { v15.h }[2], [x25]\n"
"ld1 { v19.h }[2], [x24]\n"
"ld1 { v23.h }[2], [x23]\n"
"ld1 { v27.h }[2], [x22]\n"
"b 215f\n"
"202:" // Height 5: Partial accumulate: partial_1_24
- "mov x20, #0x30\n"
+ "mov x19, #0x30\n"
"tbz x8, #0, 215f\n"
- "ldr h11, [x16, #0x0]\n"
+ "ldr h11, [x15, #0x0]\n"
"ldr h15, [x25, #0x0]\n"
"ldr h19, [x24, #0x0]\n"
"ldr h23, [x23, #0x0]\n"
@@ -3278,29 +3278,29 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"b 215f\n"
"203:" // Height 5: Partial accumulate: partial_4_16
"tbz x8, #2, 205f\n"
- "ldr d10, [x16], #0x8\n"
+ "ldr d10, [x15], #0x8\n"
"ldr d14, [x25], #0x8\n"
"ldr d18, [x24], #0x8\n"
"ldr d22, [x23], #0x8\n"
"ldr d26, [x22], #0x8\n"
"tbz x8, #1, 204f\n"
- "ld1 { v10.s }[2], [x16], #0x4\n"
- "mov x20, #0x2c\n"
+ "ld1 { v10.s }[2], [x15], #0x4\n"
+ "mov x19, #0x2c\n"
"ld1 { v14.s }[2], [x25], #0x4\n"
"ld1 { v18.s }[2], [x24], #0x4\n"
"ld1 { v22.s }[2], [x23], #0x4\n"
"ld1 { v26.s }[2], [x22], #0x4\n"
"tbz x8, #0, 215f\n"
- "ld1 { v10.h }[6], [x16]\n"
+ "ld1 { v10.h }[6], [x15]\n"
"ld1 { v14.h }[6], [x25]\n"
"ld1 { v18.h }[6], [x24]\n"
"ld1 { v22.h }[6], [x23]\n"
"ld1 { v26.h }[6], [x22]\n"
"b 215f\n"
"204:" // Height 5: Partial accumulate: partial_1_20
- "mov x20, #0x28\n"
+ "mov x19, #0x28\n"
"tbz x8, #0, 215f\n"
- "ld1 { v10.h }[4], [x16]\n"
+ "ld1 { v10.h }[4], [x15]\n"
"ld1 { v14.h }[4], [x25]\n"
"ld1 { v18.h }[4], [x24]\n"
"ld1 { v22.h }[4], [x23]\n"
@@ -3308,23 +3308,23 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"b 215f\n"
"205:" // Height 5: Partial accumulate: partial_2_16
"tbz x8, #1, 206f\n"
- "ldr s10, [x16], #0x4\n"
- "mov x20, #0x24\n"
+ "ldr s10, [x15], #0x4\n"
"ldr s14, [x25], #0x4\n"
+ "mov x19, #0x24\n"
"ldr s18, [x24], #0x4\n"
"ldr s22, [x23], #0x4\n"
"ldr s26, [x22], #0x4\n"
"tbz x8, #0, 215f\n"
- "ld1 { v10.h }[2], [x16]\n"
+ "ld1 { v10.h }[2], [x15]\n"
"ld1 { v14.h }[2], [x25]\n"
"ld1 { v18.h }[2], [x24]\n"
"ld1 { v22.h }[2], [x23]\n"
"ld1 { v26.h }[2], [x22]\n"
"b 215f\n"
"206:" // Height 5: Partial accumulate: partial_1_16
- "mov x20, #0x20\n"
+ "mov x19, #0x20\n"
"tbz x8, #0, 215f\n"
- "ldr h10, [x16, #0x0]\n"
+ "ldr h10, [x15, #0x0]\n"
"ldr h14, [x25, #0x0]\n"
"ldr h18, [x24, #0x0]\n"
"ldr h22, [x23, #0x0]\n"
@@ -3332,35 +3332,35 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"b 215f\n"
"207:" // Height 5: Partial accumulate: partial_8_0
"tbz x8, #3, 211f\n"
- "ld1 { v8.8h }, [x16], #0x10\n"
+ "ld1 { v8.8h }, [x15], #0x10\n"
"ld1 { v12.8h }, [x25], #0x10\n"
"ld1 { v16.8h }, [x24], #0x10\n"
"ld1 { v20.8h }, [x23], #0x10\n"
"ld1 { v24.8h }, [x22], #0x10\n"
"tbz x8, #2, 209f\n"
- "ldr d9, [x16], #0x8\n"
+ "ldr d9, [x15], #0x8\n"
"ldr d13, [x25], #0x8\n"
"ldr d17, [x24], #0x8\n"
"ldr d21, [x23], #0x8\n"
"ldr d25, [x22], #0x8\n"
"tbz x8, #1, 208f\n"
- "ld1 { v9.s }[2], [x16], #0x4\n"
- "mov x20, #0x1c\n"
+ "ld1 { v9.s }[2], [x15], #0x4\n"
+ "mov x19, #0x1c\n"
"ld1 { v13.s }[2], [x25], #0x4\n"
"ld1 { v17.s }[2], [x24], #0x4\n"
"ld1 { v21.s }[2], [x23], #0x4\n"
"ld1 { v25.s }[2], [x22], #0x4\n"
"tbz x8, #0, 215f\n"
- "ld1 { v9.h }[6], [x16]\n"
+ "ld1 { v9.h }[6], [x15]\n"
"ld1 { v13.h }[6], [x25]\n"
"ld1 { v17.h }[6], [x24]\n"
"ld1 { v21.h }[6], [x23]\n"
"ld1 { v25.h }[6], [x22]\n"
"b 215f\n"
"208:" // Height 5: Partial accumulate: partial_1_12
- "mov x20, #0x18\n"
+ "mov x19, #0x18\n"
"tbz x8, #0, 215f\n"
- "ld1 { v9.h }[4], [x16]\n"
+ "ld1 { v9.h }[4], [x15]\n"
"ld1 { v13.h }[4], [x25]\n"
"ld1 { v17.h }[4], [x24]\n"
"ld1 { v21.h }[4], [x23]\n"
@@ -3368,23 +3368,23 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"b 215f\n"
"209:" // Height 5: Partial accumulate: partial_2_8
"tbz x8, #1, 210f\n"
- "ldr s9, [x16], #0x4\n"
- "mov x20, #0x14\n"
+ "ldr s9, [x15], #0x4\n"
"ldr s13, [x25], #0x4\n"
+ "mov x19, #0x14\n"
"ldr s17, [x24], #0x4\n"
"ldr s21, [x23], #0x4\n"
"ldr s25, [x22], #0x4\n"
"tbz x8, #0, 215f\n"
- "ld1 { v9.h }[2], [x16]\n"
+ "ld1 { v9.h }[2], [x15]\n"
"ld1 { v13.h }[2], [x25]\n"
"ld1 { v17.h }[2], [x24]\n"
"ld1 { v21.h }[2], [x23]\n"
"ld1 { v25.h }[2], [x22]\n"
"b 215f\n"
"210:" // Height 5: Partial accumulate: partial_1_8
- "mov x20, #0x10\n"
+ "mov x19, #0x10\n"
"tbz x8, #0, 215f\n"
- "ldr h9, [x16, #0x0]\n"
+ "ldr h9, [x15, #0x0]\n"
"ldr h13, [x25, #0x0]\n"
"ldr h17, [x24, #0x0]\n"
"ldr h21, [x23, #0x0]\n"
@@ -3392,29 +3392,29 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"b 215f\n"
"211:" // Height 5: Partial accumulate: partial_4_0
"tbz x8, #2, 213f\n"
- "ldr d8, [x16], #0x8\n"
+ "ldr d8, [x15], #0x8\n"
"ldr d12, [x25], #0x8\n"
"ldr d16, [x24], #0x8\n"
"ldr d20, [x23], #0x8\n"
"ldr d24, [x22], #0x8\n"
"tbz x8, #1, 212f\n"
- "ld1 { v8.s }[2], [x16], #0x4\n"
- "mov x20, #0xc\n"
+ "ld1 { v8.s }[2], [x15], #0x4\n"
+ "mov x19, #0xc\n"
"ld1 { v12.s }[2], [x25], #0x4\n"
"ld1 { v16.s }[2], [x24], #0x4\n"
"ld1 { v20.s }[2], [x23], #0x4\n"
"ld1 { v24.s }[2], [x22], #0x4\n"
"tbz x8, #0, 215f\n"
- "ld1 { v8.h }[6], [x16]\n"
+ "ld1 { v8.h }[6], [x15]\n"
"ld1 { v12.h }[6], [x25]\n"
"ld1 { v16.h }[6], [x24]\n"
"ld1 { v20.h }[6], [x23]\n"
"ld1 { v24.h }[6], [x22]\n"
"b 215f\n"
"212:" // Height 5: Partial accumulate: partial_1_4
- "mov x20, #0x8\n"
+ "mov x19, #0x8\n"
"tbz x8, #0, 215f\n"
- "ld1 { v8.h }[4], [x16]\n"
+ "ld1 { v8.h }[4], [x15]\n"
"ld1 { v12.h }[4], [x25]\n"
"ld1 { v16.h }[4], [x24]\n"
"ld1 { v20.h }[4], [x23]\n"
@@ -3422,34 +3422,34 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"b 215f\n"
"213:" // Height 5: Partial accumulate: partial_2_0
"tbz x8, #1, 214f\n"
- "ldr s8, [x16], #0x4\n"
- "mov x20, #0x4\n"
+ "ldr s8, [x15], #0x4\n"
"ldr s12, [x25], #0x4\n"
+ "mov x19, #0x4\n"
"ldr s16, [x24], #0x4\n"
"ldr s20, [x23], #0x4\n"
"ldr s24, [x22], #0x4\n"
"tbz x8, #0, 215f\n"
- "ld1 { v8.h }[2], [x16]\n"
+ "ld1 { v8.h }[2], [x15]\n"
"ld1 { v12.h }[2], [x25]\n"
"ld1 { v16.h }[2], [x24]\n"
"ld1 { v20.h }[2], [x23]\n"
"ld1 { v24.h }[2], [x22]\n"
"b 215f\n"
"214:" // Height 5: Partial accumulate: partial_1_0
- "ldr h8, [x16, #0x0]\n"
- "mov x20, #0x0\n"
+ "ldr h8, [x15, #0x0]\n"
+ "mov x19, #0x0\n"
"ldr h12, [x25, #0x0]\n"
"ldr h16, [x24, #0x0]\n"
"ldr h20, [x23, #0x0]\n"
"ldr h24, [x22, #0x0]\n"
"215:" // Height 5: Partial accumulate: Done
- "sub x16, x16, x20\n"
+ "sub x15, x15, x19\n"
"b 218f\n"
"216:" // Height 5: full accumulate
- "ldr q8, [x16, #0x0]\n"
- "ldr q9, [x16, #0x10]\n"
- "ldr q10, [x16, #0x20]\n"
- "ldr q11, [x16, #0x30]\n"
+ "ldr q8, [x15, #0x0]\n"
+ "ldr q9, [x15, #0x10]\n"
+ "ldr q10, [x15, #0x20]\n"
+ "ldr q11, [x15, #0x30]\n"
"ldr q12, [x25, #0x0]\n"
"ldr q13, [x25, #0x10]\n"
"ldr q14, [x25, #0x20]\n"
@@ -3489,359 +3489,359 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"movi v26.16b, #0x0\n"
"movi v27.16b, #0x0\n"
"218:" // Height 5: setup done
- "mov x15, #0x0\n"
+ "mov x14, #0x0\n"
"219:" // Height 5: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w14, [x20, x15, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w13, [x20, x14, LSL #0x2]\n"
"tbz %x[flags], #3, 220f\n"
- "ldr x21, [%x[input_ptr], x15, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x13, [x21, #0x0]\n"
- "ldr x9, [x21, #0x8]\n"
- "ldr x27, [x21, #0x10]\n"
- "ldr x25, [x21, #0x18]\n"
- "ldr x23, [x21, #0x20]\n"
- "cbnz x15, 221f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x13, x13, x20, LSL #1\n"
- "add x9, x9, x20, LSL #1\n"
- "add x27, x27, x20, LSL #1\n"
- "add x25, x25, x20, LSL #1\n"
- "add x23, x23, x20, LSL #1\n"
+ "ldr x20, [%x[input_ptr], x14, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x12, [x20, #0x0]\n"
+ "ldr x28, [x20, #0x8]\n"
+ "ldr x26, [x20, #0x10]\n"
+ "ldr x24, [x20, #0x18]\n"
+ "ldr x22, [x20, #0x20]\n"
+ "cbnz x14, 221f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x12, x12, x19, LSL #1\n"
+ "add x28, x28, x19, LSL #1\n"
+ "add x26, x26, x19, LSL #1\n"
+ "add x24, x24, x19, LSL #1\n"
+ "add x22, x22, x19, LSL #1\n"
"b 221f\n"
"220:" // Height 5: setup direct input
- "mov x13, %x[input_ptr]\n"
- "add x9, x13, x20, LSL #1\n"
- "add x27, x9, x20, LSL #1\n"
- "add x25, x27, x20, LSL #1\n"
- "add x23, x25, x20, LSL #1\n"
+ "mov x12, %x[input_ptr]\n"
+ "add x28, x12, x19, LSL #1\n"
+ "add x26, x28, x19, LSL #1\n"
+ "add x24, x26, x19, LSL #1\n"
+ "add x22, x24, x19, LSL #1\n"
"221:" // Height 5: input setup done
- "cmp x14, #0x8\n"
+ "cmp x13, #0x8\n"
"blt 224f\n"
- "ldr q0, [x13, #0x0]\n"
- "cmp x14, #0x10\n"
- "ldr q1, [x9, #0x0]\n"
- "ldr q2, [x27, #0x0]\n"
- "ldr q3, [x25, #0x0]\n"
- "ldr q4, [x23, #0x0]\n"
+ "ldr q0, [x12, #0x0]\n"
+ "ldr q1, [x28, #0x0]\n"
+ "cmp x13, #0x10\n"
+ "ldr q2, [x26, #0x0]\n"
+ "ldr q3, [x24, #0x0]\n"
+ "ldr q4, [x22, #0x0]\n"
"ldr q6, [x17, #0x0]\n"
- "ldr q7, [x17, #0x10]\n"
"blt 223f\n"
"222:" // Height 5: Multiply loop: Main loop head
"fmla v8.8h, v6.8h, v0.h[0]\n"
- "ldr x12, [x17, #0x28]\n"
+ "ldr d7, [x17, #0x10]\n"
"fmla v12.8h, v6.8h, v1.h[0]\n"
- "ldr x11, [x17, #0x38]\n"
+ "ldr x11, [x17, #0x18]\n"
"fmla v16.8h, v6.8h, v2.h[0]\n"
- "add x13, x13, #0x10\n"
+ "ldr x10, [x17, #0x28]\n"
"fmla v20.8h, v6.8h, v3.h[0]\n"
- "add x9, x9, #0x10\n"
+ "add x12, x12, #0x10\n"
"fmla v24.8h, v6.8h, v4.h[0]\n"
- "ldr d6, [x17, #0x20]\n"
+ "mov v7.d[1], x11\n"
+ "prfm pldl1keep, [x12, #0x80]\n"
+ "add x28, x28, #0x10\n"
"fmla v9.8h, v7.8h, v0.h[0]\n"
- "mov v6.d[1], x12\n"
+ "prfm pldl1keep, [x28, #0x80]\n"
"fmla v13.8h, v7.8h, v1.h[0]\n"
- "ldr x12, [x17, #0x48]\n"
+ "ldr d6, [x17, #0x20]\n"
"fmla v17.8h, v7.8h, v2.h[0]\n"
- "add x27, x27, #0x10\n"
+ "ldr x11, [x17, #0x38]\n"
"fmla v21.8h, v7.8h, v3.h[0]\n"
- "add x25, x25, #0x10\n"
+ "ldr x9, [x12, #0x8]\n"
"fmla v25.8h, v7.8h, v4.h[0]\n"
+ "mov v6.d[1], x10\n"
"ldr d7, [x17, #0x30]\n"
- "mov v7.d[1], x11\n"
+ "add x26, x26, #0x10\n"
"fmla v10.8h, v6.8h, v0.h[0]\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
"fmla v14.8h, v6.8h, v1.h[0]\n"
- "ldr x11, [x17, #0x58]\n"
+ "ldr x10, [x17, #0x48]\n"
"fmla v18.8h, v6.8h, v2.h[0]\n"
- "add x23, x23, #0x10\n"
+ "mov v7.d[1], x11\n"
"fmla v22.8h, v6.8h, v3.h[0]\n"
- "ldr x10, [x13, #0x8]\n"
+ "ldr x11, [x17, #0x58]\n"
"fmla v26.8h, v6.8h, v4.h[0]\n"
"ldr d6, [x17, #0x40]\n"
"fmla v11.8h, v7.8h, v0.h[0]\n"
- "mov v6.d[1], x12\n"
+ "ldr x27, [x28, #0x8]\n"
"fmla v15.8h, v7.8h, v1.h[0]\n"
- "ldr x12, [x17, #0x68]\n"
+ "ldr x25, [x26, #0x8]\n"
"fmla v19.8h, v7.8h, v2.h[0]\n"
- "ldr x28, [x9, #0x8]\n"
+ "mov v6.d[1], x10\n"
"fmla v23.8h, v7.8h, v3.h[0]\n"
- "ldr x26, [x27, #0x8]\n"
+ "ldr x10, [x17, #0x68]\n"
"fmla v27.8h, v7.8h, v4.h[0]\n"
"ldr d7, [x17, #0x50]\n"
- "mov v7.d[1], x11\n"
"fmla v8.8h, v6.8h, v0.h[1]\n"
+ "add x24, x24, #0x10\n"
"fmla v12.8h, v6.8h, v1.h[1]\n"
- "ldr x11, [x17, #0x78]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
"fmla v16.8h, v6.8h, v2.h[1]\n"
- "ldr x24, [x25, #0x8]\n"
+ "mov v7.d[1], x11\n"
"fmla v20.8h, v6.8h, v3.h[1]\n"
- "ldr x22, [x23, #0x8]\n"
+ "ldr x11, [x17, #0x78]\n"
"fmla v24.8h, v6.8h, v4.h[1]\n"
"ldr d6, [x17, #0x60]\n"
"fmla v9.8h, v7.8h, v0.h[1]\n"
- "mov v6.d[1], x12\n"
+ "ldr x23, [x24, #0x8]\n"
"fmla v13.8h, v7.8h, v1.h[1]\n"
- "ldr x12, [x17, #0x88]\n"
+ "add x22, x22, #0x10\n"
"fmla v17.8h, v7.8h, v2.h[1]\n"
- "sub x14, x14, #0x8\n"
+ "mov v6.d[1], x10\n"
"fmla v21.8h, v7.8h, v3.h[1]\n"
- "cmp x14, #0x10\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
"fmla v25.8h, v7.8h, v4.h[1]\n"
"ldr d7, [x17, #0x70]\n"
- "mov v7.d[1], x11\n"
"fmla v10.8h, v6.8h, v0.h[1]\n"
+ "ldr x10, [x17, #0x88]\n"
"fmla v14.8h, v6.8h, v1.h[1]\n"
- "ldr x11, [x17, #0x98]\n"
+ "ldr x21, [x22, #0x8]\n"
"fmla v18.8h, v6.8h, v2.h[1]\n"
- "prfm pldl1keep, [x13, #0x80]\n"
+ "mov v7.d[1], x11\n"
"fmla v22.8h, v6.8h, v3.h[1]\n"
- "prfm pldl1keep, [x9, #0x80]\n"
+ "ldr x11, [x17, #0x98]\n"
"fmla v26.8h, v6.8h, v4.h[1]\n"
"ldr d6, [x17, #0x80]\n"
"fmla v11.8h, v7.8h, v0.h[1]\n"
- "mov v6.d[1], x12\n"
+ "sub x13, x13, #0x8\n"
"fmla v15.8h, v7.8h, v1.h[1]\n"
- "ldr x12, [x17, #0xa8]\n"
+ "cmp x13, #0x10\n"
"fmla v19.8h, v7.8h, v2.h[1]\n"
- "prfm pldl1keep, [x27, #0x80]\n"
+ "mov v6.d[1], x10\n"
"fmla v23.8h, v7.8h, v3.h[1]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
+ "ldr x10, [x17, #0xa8]\n"
"fmla v27.8h, v7.8h, v4.h[1]\n"
"ldr d7, [x17, #0x90]\n"
- "mov v7.d[1], x11\n"
"fmla v8.8h, v6.8h, v0.h[2]\n"
"fmla v12.8h, v6.8h, v1.h[2]\n"
- "ldr x11, [x17, #0xb8]\n"
"fmla v16.8h, v6.8h, v2.h[2]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
+ "mov v7.d[1], x11\n"
"fmla v20.8h, v6.8h, v3.h[2]\n"
+ "ldr x11, [x17, #0xb8]\n"
"fmla v24.8h, v6.8h, v4.h[2]\n"
"ldr d6, [x17, #0xa0]\n"
"fmla v9.8h, v7.8h, v0.h[2]\n"
- "mov v6.d[1], x12\n"
"fmla v13.8h, v7.8h, v1.h[2]\n"
- "ldr x12, [x17, #0xc8]\n"
"fmla v17.8h, v7.8h, v2.h[2]\n"
+ "mov v6.d[1], x10\n"
"fmla v21.8h, v7.8h, v3.h[2]\n"
+ "ldr x10, [x17, #0xc8]\n"
"fmla v25.8h, v7.8h, v4.h[2]\n"
"ldr d7, [x17, #0xb0]\n"
- "mov v7.d[1], x11\n"
"fmla v10.8h, v6.8h, v0.h[2]\n"
"fmla v14.8h, v6.8h, v1.h[2]\n"
- "ldr x11, [x17, #0xd8]\n"
"fmla v18.8h, v6.8h, v2.h[2]\n"
+ "mov v7.d[1], x11\n"
"fmla v22.8h, v6.8h, v3.h[2]\n"
+ "ldr x11, [x17, #0xd8]\n"
"fmla v26.8h, v6.8h, v4.h[2]\n"
"ldr d6, [x17, #0xc0]\n"
"fmla v11.8h, v7.8h, v0.h[2]\n"
- "mov v6.d[1], x12\n"
"fmla v15.8h, v7.8h, v1.h[2]\n"
- "ldr x12, [x17, #0xe8]\n"
"fmla v19.8h, v7.8h, v2.h[2]\n"
+ "mov v6.d[1], x10\n"
"fmla v23.8h, v7.8h, v3.h[2]\n"
+ "ldr x10, [x17, #0xe8]\n"
"fmla v27.8h, v7.8h, v4.h[2]\n"
"ldr d7, [x17, #0xd0]\n"
- "mov v7.d[1], x11\n"
"fmla v8.8h, v6.8h, v0.h[3]\n"
"fmla v12.8h, v6.8h, v1.h[3]\n"
- "ldr x11, [x17, #0xf8]\n"
"fmla v16.8h, v6.8h, v2.h[3]\n"
+ "mov v7.d[1], x11\n"
"fmla v20.8h, v6.8h, v3.h[3]\n"
+ "ldr x11, [x17, #0xf8]\n"
"fmla v24.8h, v6.8h, v4.h[3]\n"
"ldr d6, [x17, #0xe0]\n"
"fmla v9.8h, v7.8h, v0.h[3]\n"
- "mov v6.d[1], x12\n"
"fmla v13.8h, v7.8h, v1.h[3]\n"
- "ldr x12, [x17, #0x108]\n"
"fmla v17.8h, v7.8h, v2.h[3]\n"
+ "mov v6.d[1], x10\n"
"fmla v21.8h, v7.8h, v3.h[3]\n"
+ "ldr x10, [x17, #0x108]\n"
"fmla v25.8h, v7.8h, v4.h[3]\n"
"ldr d7, [x17, #0xf0]\n"
- "mov v7.d[1], x11\n"
"fmla v10.8h, v6.8h, v0.h[3]\n"
"fmla v14.8h, v6.8h, v1.h[3]\n"
- "ldr x11, [x17, #0x118]\n"
"fmla v18.8h, v6.8h, v2.h[3]\n"
+ "mov v7.d[1], x11\n"
"fmla v22.8h, v6.8h, v3.h[3]\n"
+ "ldr x11, [x17, #0x118]\n"
"fmla v26.8h, v6.8h, v4.h[3]\n"
"ldr d6, [x17, #0x100]\n"
"fmla v11.8h, v7.8h, v0.h[3]\n"
- "mov v6.d[1], x12\n"
"fmla v15.8h, v7.8h, v1.h[3]\n"
- "ldr x12, [x17, #0x128]\n"
"fmla v19.8h, v7.8h, v2.h[3]\n"
+ "mov v6.d[1], x10\n"
"fmla v23.8h, v7.8h, v3.h[3]\n"
+ "ldr x10, [x17, #0x128]\n"
"fmla v27.8h, v7.8h, v4.h[3]\n"
"ldr d7, [x17, #0x110]\n"
- "mov v7.d[1], x11\n"
"fmla v8.8h, v6.8h, v0.h[4]\n"
"fmla v12.8h, v6.8h, v1.h[4]\n"
- "ldr x11, [x17, #0x138]\n"
"fmla v16.8h, v6.8h, v2.h[4]\n"
+ "mov v7.d[1], x11\n"
"fmla v20.8h, v6.8h, v3.h[4]\n"
+ "ldr x11, [x17, #0x138]\n"
"fmla v24.8h, v6.8h, v4.h[4]\n"
"ldr d6, [x17, #0x120]\n"
"fmla v9.8h, v7.8h, v0.h[4]\n"
- "mov v6.d[1], x12\n"
"fmla v13.8h, v7.8h, v1.h[4]\n"
- "ldr x12, [x17, #0x148]\n"
"fmla v17.8h, v7.8h, v2.h[4]\n"
+ "mov v6.d[1], x10\n"
"fmla v21.8h, v7.8h, v3.h[4]\n"
+ "ldr x10, [x17, #0x148]\n"
"fmla v25.8h, v7.8h, v4.h[4]\n"
"ldr d7, [x17, #0x130]\n"
- "mov v7.d[1], x11\n"
"fmla v10.8h, v6.8h, v0.h[4]\n"
"fmla v14.8h, v6.8h, v1.h[4]\n"
- "ldr x11, [x17, #0x158]\n"
"fmla v18.8h, v6.8h, v2.h[4]\n"
+ "mov v7.d[1], x11\n"
"fmla v22.8h, v6.8h, v3.h[4]\n"
+ "ldr x11, [x17, #0x158]\n"
"fmla v26.8h, v6.8h, v4.h[4]\n"
"ldr d6, [x17, #0x140]\n"
"fmla v11.8h, v7.8h, v0.h[4]\n"
- "mov v6.d[1], x12\n"
"fmla v15.8h, v7.8h, v1.h[4]\n"
- "ldr x12, [x17, #0x168]\n"
"fmla v19.8h, v7.8h, v2.h[4]\n"
+ "mov v6.d[1], x10\n"
"fmla v23.8h, v7.8h, v3.h[4]\n"
+ "ldr x10, [x17, #0x168]\n"
"fmla v27.8h, v7.8h, v4.h[4]\n"
"ldr d7, [x17, #0x150]\n"
- "mov v7.d[1], x11\n"
"fmla v8.8h, v6.8h, v0.h[5]\n"
"fmla v12.8h, v6.8h, v1.h[5]\n"
- "ldr x11, [x17, #0x178]\n"
"fmla v16.8h, v6.8h, v2.h[5]\n"
+ "mov v7.d[1], x11\n"
"fmla v20.8h, v6.8h, v3.h[5]\n"
+ "ldr x11, [x17, #0x178]\n"
"fmla v24.8h, v6.8h, v4.h[5]\n"
"ldr d6, [x17, #0x160]\n"
"fmla v9.8h, v7.8h, v0.h[5]\n"
- "mov v6.d[1], x12\n"
"fmla v13.8h, v7.8h, v1.h[5]\n"
- "ldr x12, [x17, #0x188]\n"
"fmla v17.8h, v7.8h, v2.h[5]\n"
+ "mov v6.d[1], x10\n"
"fmla v21.8h, v7.8h, v3.h[5]\n"
+ "ldr x10, [x17, #0x188]\n"
"fmla v25.8h, v7.8h, v4.h[5]\n"
"ldr d7, [x17, #0x170]\n"
- "mov v7.d[1], x11\n"
"fmla v10.8h, v6.8h, v0.h[5]\n"
"fmla v14.8h, v6.8h, v1.h[5]\n"
- "ldr x11, [x17, #0x198]\n"
"fmla v18.8h, v6.8h, v2.h[5]\n"
+ "mov v7.d[1], x11\n"
"fmla v22.8h, v6.8h, v3.h[5]\n"
+ "ldr x11, [x17, #0x198]\n"
"fmla v26.8h, v6.8h, v4.h[5]\n"
"ldr d6, [x17, #0x180]\n"
"fmla v11.8h, v7.8h, v0.h[5]\n"
- "mov v6.d[1], x12\n"
"fmla v15.8h, v7.8h, v1.h[5]\n"
- "ldr x12, [x17, #0x1a8]\n"
"fmla v19.8h, v7.8h, v2.h[5]\n"
+ "mov v6.d[1], x10\n"
"fmla v23.8h, v7.8h, v3.h[5]\n"
+ "ldr x10, [x17, #0x1a8]\n"
"fmla v27.8h, v7.8h, v4.h[5]\n"
"ldr d7, [x17, #0x190]\n"
- "mov v7.d[1], x11\n"
"fmla v8.8h, v6.8h, v0.h[6]\n"
"fmla v12.8h, v6.8h, v1.h[6]\n"
- "ldr x11, [x17, #0x1b8]\n"
"fmla v16.8h, v6.8h, v2.h[6]\n"
+ "mov v7.d[1], x11\n"
"fmla v20.8h, v6.8h, v3.h[6]\n"
+ "ldr x11, [x17, #0x1b8]\n"
"fmla v24.8h, v6.8h, v4.h[6]\n"
"ldr d6, [x17, #0x1a0]\n"
"fmla v9.8h, v7.8h, v0.h[6]\n"
- "mov v6.d[1], x12\n"
"fmla v13.8h, v7.8h, v1.h[6]\n"
- "ldr x12, [x17, #0x1c8]\n"
"fmla v17.8h, v7.8h, v2.h[6]\n"
+ "mov v6.d[1], x10\n"
"fmla v21.8h, v7.8h, v3.h[6]\n"
+ "ldr x10, [x17, #0x1c8]\n"
"fmla v25.8h, v7.8h, v4.h[6]\n"
"ldr d7, [x17, #0x1b0]\n"
- "mov v7.d[1], x11\n"
"fmla v10.8h, v6.8h, v0.h[6]\n"
"fmla v14.8h, v6.8h, v1.h[6]\n"
- "ldr x11, [x17, #0x1d8]\n"
"fmla v18.8h, v6.8h, v2.h[6]\n"
+ "mov v7.d[1], x11\n"
"fmla v22.8h, v6.8h, v3.h[6]\n"
+ "ldr x11, [x17, #0x1d8]\n"
"fmla v26.8h, v6.8h, v4.h[6]\n"
"ldr d6, [x17, #0x1c0]\n"
"fmla v11.8h, v7.8h, v0.h[6]\n"
- "mov v6.d[1], x12\n"
"fmla v15.8h, v7.8h, v1.h[6]\n"
- "ldr x12, [x17, #0x1e8]\n"
"fmla v19.8h, v7.8h, v2.h[6]\n"
+ "mov v6.d[1], x10\n"
"fmla v23.8h, v7.8h, v3.h[6]\n"
+ "ldr x10, [x17, #0x1e8]\n"
"fmla v27.8h, v7.8h, v4.h[6]\n"
"ldr d7, [x17, #0x1d0]\n"
- "mov v7.d[1], x11\n"
"fmla v8.8h, v6.8h, v0.h[7]\n"
"fmla v12.8h, v6.8h, v1.h[7]\n"
- "ldr x11, [x17, #0x1f8]\n"
"fmla v16.8h, v6.8h, v2.h[7]\n"
+ "mov v7.d[1], x11\n"
"fmla v20.8h, v6.8h, v3.h[7]\n"
+ "ldr x11, [x17, #0x1f8]\n"
"fmla v24.8h, v6.8h, v4.h[7]\n"
"ldr d6, [x17, #0x1e0]\n"
"fmla v9.8h, v7.8h, v0.h[7]\n"
- "mov v6.d[1], x12\n"
"fmla v13.8h, v7.8h, v1.h[7]\n"
"fmla v17.8h, v7.8h, v2.h[7]\n"
+ "mov v6.d[1], x10\n"
"fmla v21.8h, v7.8h, v3.h[7]\n"
"fmla v25.8h, v7.8h, v4.h[7]\n"
"ldr d7, [x17, #0x1f0]\n"
- "mov v7.d[1], x11\n"
- "add x17, x17, #0x200\n"
"fmla v10.8h, v6.8h, v0.h[7]\n"
- "ldr x12, [x17, #0x8]\n"
+ "add x17, x17, #0x200\n"
"fmla v14.8h, v6.8h, v1.h[7]\n"
- "ldr x11, [x17, #0x18]\n"
+ "ldr x10, [x17, #0x8]\n"
"fmla v18.8h, v6.8h, v2.h[7]\n"
+ "mov v7.d[1], x11\n"
"fmla v22.8h, v6.8h, v3.h[7]\n"
"fmla v26.8h, v6.8h, v4.h[7]\n"
"ldr d6, [x17, #0x0]\n"
"fmla v11.8h, v7.8h, v0.h[7]\n"
- "ldr d0, [x13, #0x0]\n"
+ "ldr d0, [x12, #0x0]\n"
"fmla v15.8h, v7.8h, v1.h[7]\n"
- "ldr d1, [x9, #0x0]\n"
+ "ldr d1, [x28, #0x0]\n"
"fmla v19.8h, v7.8h, v2.h[7]\n"
- "ldr d2, [x27, #0x0]\n"
+ "mov v6.d[1], x10\n"
"fmla v23.8h, v7.8h, v3.h[7]\n"
- "ldr d3, [x25, #0x0]\n"
+ "mov v0.d[1], x9\n"
"fmla v27.8h, v7.8h, v4.h[7]\n"
- "ldr d4, [x23, #0x0]\n"
- "ldr d7, [x17, #0x10]\n"
- "mov v6.d[1], x12\n"
- "mov v0.d[1], x10\n"
- "mov v1.d[1], x28\n"
- "mov v2.d[1], x26\n"
- "mov v3.d[1], x24\n"
- "mov v4.d[1], x22\n"
- "mov v7.d[1], x11\n"
+ "mov v1.d[1], x27\n"
+ "ldr d2, [x26, #0x0]\n"
+ "ldr d3, [x24, #0x0]\n"
+ "ldr d4, [x22, #0x0]\n"
+ "mov v2.d[1], x25\n"
+ "mov v3.d[1], x23\n"
+ "mov v4.d[1], x21\n"
"bge 222b\n"
"223:" // Height 5: Multiply loop: Single iteration only
"fmla v8.8h, v6.8h, v0.h[0]\n"
- "add x13, x13, #0x10\n"
+ "ldr q7, [x17, #0x10]\n"
"fmla v12.8h, v6.8h, v1.h[0]\n"
- "add x9, x9, #0x10\n"
+ "sub x13, x13, #0x8\n"
"fmla v16.8h, v6.8h, v2.h[0]\n"
- "add x27, x27, #0x10\n"
+ "add x12, x12, #0x10\n"
"fmla v20.8h, v6.8h, v3.h[0]\n"
- "add x25, x25, #0x10\n"
+ "prfm pldl1keep, [x12, #0x80]\n"
"fmla v24.8h, v6.8h, v4.h[0]\n"
"ldr q6, [x17, #0x20]\n"
"fmla v9.8h, v7.8h, v0.h[0]\n"
- "add x23, x23, #0x10\n"
+ "add x28, x28, #0x10\n"
"fmla v13.8h, v7.8h, v1.h[0]\n"
- "sub x14, x14, #0x8\n"
+ "prfm pldl1keep, [x28, #0x80]\n"
"fmla v17.8h, v7.8h, v2.h[0]\n"
- "prfm pldl1keep, [x13, #0x80]\n"
+ "add x26, x26, #0x10\n"
"fmla v21.8h, v7.8h, v3.h[0]\n"
- "prfm pldl1keep, [x9, #0x80]\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
"fmla v25.8h, v7.8h, v4.h[0]\n"
"ldr q7, [x17, #0x30]\n"
"fmla v10.8h, v6.8h, v0.h[0]\n"
- "prfm pldl1keep, [x27, #0x80]\n"
+ "add x24, x24, #0x10\n"
"fmla v14.8h, v6.8h, v1.h[0]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
"fmla v18.8h, v6.8h, v2.h[0]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
+ "add x22, x22, #0x10\n"
"fmla v22.8h, v6.8h, v3.h[0]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
"fmla v26.8h, v6.8h, v4.h[0]\n"
"ldr q6, [x17, #0x40]\n"
"fmla v11.8h, v7.8h, v0.h[0]\n"
@@ -4018,17 +4018,17 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"fmla v23.8h, v7.8h, v3.h[7]\n"
"fmla v27.8h, v7.8h, v4.h[7]\n"
"224:" // Height 5: Multiply loop: Main loop skip
- "cbz x14, 226f\n"
+ "cbz x13, 226f\n"
"225:" // Height 5: Multiply loop: Odd block loop
- "ldr h0, [x13], #0x2\n"
- "sub x14, x14, #0x1\n"
- "ldr h1, [x9], #0x2\n"
- "ldr h2, [x27], #0x2\n"
- "ldr h3, [x25], #0x2\n"
- "ldr h4, [x23], #0x2\n"
+ "ldr h0, [x12], #0x2\n"
+ "sub x13, x13, #0x1\n"
+ "ldr h1, [x28], #0x2\n"
+ "ldr h2, [x26], #0x2\n"
+ "ldr h3, [x24], #0x2\n"
+ "ldr h4, [x22], #0x2\n"
"ldr q6, [x17, #0x0]\n"
- "fmla v8.8h, v6.8h, v0.h[0]\n"
"ldr q7, [x17, #0x10]\n"
+ "fmla v8.8h, v6.8h, v0.h[0]\n"
"fmla v12.8h, v6.8h, v1.h[0]\n"
"fmla v16.8h, v6.8h, v2.h[0]\n"
"fmla v20.8h, v6.8h, v3.h[0]\n"
@@ -4051,25 +4051,27 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"fmla v19.8h, v7.8h, v2.h[0]\n"
"fmla v23.8h, v7.8h, v3.h[0]\n"
"fmla v27.8h, v7.8h, v4.h[0]\n"
- "cbnz x14, 225b\n"
+ "cbnz x13, 225b\n"
"226:" // Height 5: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x15, x15, #0x1\n"
- "cmp x15, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x14, x14, #0x1\n"
+ "cmp x14, x19\n"
"bne 219b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x16, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
- "add x23, x24, x20, LSL #1\n"
- "add x22, x23, x20, LSL #1\n"
- "prfm pstl1keep, [x16, #0x0]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "prfm pstl1keep, [x15, #0x0]\n"
+ "add x25, x15, x19, LSL #1\n"
"prfm pstl1keep, [x25, #0x0]\n"
+ "add x24, x25, x19, LSL #1\n"
"prfm pstl1keep, [x24, #0x0]\n"
+ "add x23, x24, x19, LSL #1\n"
"prfm pstl1keep, [x23, #0x0]\n"
+ "add x22, x23, x19, LSL #1\n"
"prfm pstl1keep, [x22, #0x0]\n"
"tbz %x[flags], #1, 227f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v0.8h }, [x20]\n"
+ "add x20, %x[args_ptr], %[offset_min]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v1.8h }, [x20]\n"
+ "ld1r { v0.8h }, [x19]\n"
"fmin v8.8h, v8.8h, v0.8h\n"
"fmin v9.8h, v9.8h, v0.8h\n"
"fmin v10.8h, v10.8h, v0.8h\n"
@@ -4080,6 +4082,16 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"fmin v15.8h, v15.8h, v0.8h\n"
"fmin v16.8h, v16.8h, v0.8h\n"
"fmin v17.8h, v17.8h, v0.8h\n"
+ "fmax v8.8h, v8.8h, v1.8h\n"
+ "fmax v9.8h, v9.8h, v1.8h\n"
+ "fmax v10.8h, v10.8h, v1.8h\n"
+ "fmax v11.8h, v11.8h, v1.8h\n"
+ "fmax v12.8h, v12.8h, v1.8h\n"
+ "fmax v13.8h, v13.8h, v1.8h\n"
+ "fmax v14.8h, v14.8h, v1.8h\n"
+ "fmax v15.8h, v15.8h, v1.8h\n"
+ "fmax v16.8h, v16.8h, v1.8h\n"
+ "fmax v17.8h, v17.8h, v1.8h\n"
"fmin v18.8h, v18.8h, v0.8h\n"
"fmin v19.8h, v19.8h, v0.8h\n"
"fmin v20.8h, v20.8h, v0.8h\n"
@@ -4090,34 +4102,22 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"fmin v25.8h, v25.8h, v0.8h\n"
"fmin v26.8h, v26.8h, v0.8h\n"
"fmin v27.8h, v27.8h, v0.8h\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1r { v0.8h }, [x20]\n"
- "fmax v8.8h, v8.8h, v0.8h\n"
- "fmax v9.8h, v9.8h, v0.8h\n"
- "fmax v10.8h, v10.8h, v0.8h\n"
- "fmax v11.8h, v11.8h, v0.8h\n"
- "fmax v12.8h, v12.8h, v0.8h\n"
- "fmax v13.8h, v13.8h, v0.8h\n"
- "fmax v14.8h, v14.8h, v0.8h\n"
- "fmax v15.8h, v15.8h, v0.8h\n"
- "fmax v16.8h, v16.8h, v0.8h\n"
- "fmax v17.8h, v17.8h, v0.8h\n"
- "fmax v18.8h, v18.8h, v0.8h\n"
- "fmax v19.8h, v19.8h, v0.8h\n"
- "fmax v20.8h, v20.8h, v0.8h\n"
- "fmax v21.8h, v21.8h, v0.8h\n"
- "fmax v22.8h, v22.8h, v0.8h\n"
- "fmax v23.8h, v23.8h, v0.8h\n"
- "fmax v24.8h, v24.8h, v0.8h\n"
- "fmax v25.8h, v25.8h, v0.8h\n"
- "fmax v26.8h, v26.8h, v0.8h\n"
- "fmax v27.8h, v27.8h, v0.8h\n"
+ "fmax v18.8h, v18.8h, v1.8h\n"
+ "fmax v19.8h, v19.8h, v1.8h\n"
+ "fmax v20.8h, v20.8h, v1.8h\n"
+ "fmax v21.8h, v21.8h, v1.8h\n"
+ "fmax v22.8h, v22.8h, v1.8h\n"
+ "fmax v23.8h, v23.8h, v1.8h\n"
+ "fmax v24.8h, v24.8h, v1.8h\n"
+ "fmax v25.8h, v25.8h, v1.8h\n"
+ "fmax v26.8h, v26.8h, v1.8h\n"
+ "fmax v27.8h, v27.8h, v1.8h\n"
"227:" // Height 5: No activation
"cmp x8, #0x20\n"
"bge 244f\n"
"tbz x8, #4, 235f\n"
- "st1 { v8.8h }, [x16], #0x10\n"
- "st1 { v9.8h }, [x16], #0x10\n"
+ "st1 { v8.8h }, [x15], #0x10\n"
+ "st1 { v9.8h }, [x15], #0x10\n"
"st1 { v12.8h }, [x25], #0x10\n"
"st1 { v13.8h }, [x25], #0x10\n"
"st1 { v16.8h }, [x24], #0x10\n"
@@ -4127,25 +4127,25 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"st1 { v24.8h }, [x22], #0x10\n"
"st1 { v25.8h }, [x22], #0x10\n"
"tbz x8, #3, 231f\n"
- "st1 { v10.8h }, [x16], #0x10\n"
+ "st1 { v10.8h }, [x15], #0x10\n"
"st1 { v14.8h }, [x25], #0x10\n"
"st1 { v18.8h }, [x24], #0x10\n"
"st1 { v22.8h }, [x23], #0x10\n"
"st1 { v26.8h }, [x22], #0x10\n"
"tbz x8, #2, 229f\n"
- "str d11, [x16], #0x8\n"
+ "str d11, [x15], #0x8\n"
"str d15, [x25], #0x8\n"
"str d19, [x24], #0x8\n"
"str d23, [x23], #0x8\n"
"str d27, [x22], #0x8\n"
"tbz x8, #1, 228f\n"
- "st1 { v11.s }[2], [x16], #0x4\n"
+ "st1 { v11.s }[2], [x15], #0x4\n"
"st1 { v15.s }[2], [x25], #0x4\n"
"st1 { v19.s }[2], [x24], #0x4\n"
"st1 { v23.s }[2], [x23], #0x4\n"
"st1 { v27.s }[2], [x22], #0x4\n"
"tbz x8, #0, 243f\n"
- "st1 { v11.h }[6], [x16]\n"
+ "st1 { v11.h }[6], [x15]\n"
"st1 { v15.h }[6], [x25]\n"
"st1 { v19.h }[6], [x24]\n"
"st1 { v23.h }[6], [x23]\n"
@@ -4153,7 +4153,7 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"b 243f\n"
"228:" // Height 5: Partial direct writeback: partial_1_28
"tbz x8, #0, 243f\n"
- "st1 { v11.h }[4], [x16]\n"
+ "st1 { v11.h }[4], [x15]\n"
"st1 { v15.h }[4], [x25]\n"
"st1 { v19.h }[4], [x24]\n"
"st1 { v23.h }[4], [x23]\n"
@@ -4161,13 +4161,13 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"b 243f\n"
"229:" // Height 5: Partial direct writeback: partial_2_24
"tbz x8, #1, 230f\n"
- "str s11, [x16], #0x4\n"
+ "str s11, [x15], #0x4\n"
"str s15, [x25], #0x4\n"
"str s19, [x24], #0x4\n"
"str s23, [x23], #0x4\n"
"str s27, [x22], #0x4\n"
"tbz x8, #0, 243f\n"
- "st1 { v11.h }[2], [x16]\n"
+ "st1 { v11.h }[2], [x15]\n"
"st1 { v15.h }[2], [x25]\n"
"st1 { v19.h }[2], [x24]\n"
"st1 { v23.h }[2], [x23]\n"
@@ -4175,7 +4175,7 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"b 243f\n"
"230:" // Height 5: Partial direct writeback: partial_1_24
"tbz x8, #0, 243f\n"
- "str h11, [x16, #0x0]\n"
+ "str h11, [x15, #0x0]\n"
"str h15, [x25, #0x0]\n"
"str h19, [x24, #0x0]\n"
"str h23, [x23, #0x0]\n"
@@ -4183,19 +4183,19 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"b 243f\n"
"231:" // Height 5: Partial direct writeback: partial_4_16
"tbz x8, #2, 233f\n"
- "str d10, [x16], #0x8\n"
+ "str d10, [x15], #0x8\n"
"str d14, [x25], #0x8\n"
"str d18, [x24], #0x8\n"
"str d22, [x23], #0x8\n"
"str d26, [x22], #0x8\n"
"tbz x8, #1, 232f\n"
- "st1 { v10.s }[2], [x16], #0x4\n"
+ "st1 { v10.s }[2], [x15], #0x4\n"
"st1 { v14.s }[2], [x25], #0x4\n"
"st1 { v18.s }[2], [x24], #0x4\n"
"st1 { v22.s }[2], [x23], #0x4\n"
"st1 { v26.s }[2], [x22], #0x4\n"
"tbz x8, #0, 243f\n"
- "st1 { v10.h }[6], [x16]\n"
+ "st1 { v10.h }[6], [x15]\n"
"st1 { v14.h }[6], [x25]\n"
"st1 { v18.h }[6], [x24]\n"
"st1 { v22.h }[6], [x23]\n"
@@ -4203,7 +4203,7 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"b 243f\n"
"232:" // Height 5: Partial direct writeback: partial_1_20
"tbz x8, #0, 243f\n"
- "st1 { v10.h }[4], [x16]\n"
+ "st1 { v10.h }[4], [x15]\n"
"st1 { v14.h }[4], [x25]\n"
"st1 { v18.h }[4], [x24]\n"
"st1 { v22.h }[4], [x23]\n"
@@ -4211,13 +4211,13 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"b 243f\n"
"233:" // Height 5: Partial direct writeback: partial_2_16
"tbz x8, #1, 234f\n"
- "str s10, [x16], #0x4\n"
+ "str s10, [x15], #0x4\n"
"str s14, [x25], #0x4\n"
"str s18, [x24], #0x4\n"
"str s22, [x23], #0x4\n"
"str s26, [x22], #0x4\n"
"tbz x8, #0, 243f\n"
- "st1 { v10.h }[2], [x16]\n"
+ "st1 { v10.h }[2], [x15]\n"
"st1 { v14.h }[2], [x25]\n"
"st1 { v18.h }[2], [x24]\n"
"st1 { v22.h }[2], [x23]\n"
@@ -4225,7 +4225,7 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"b 243f\n"
"234:" // Height 5: Partial direct writeback: partial_1_16
"tbz x8, #0, 243f\n"
- "str h10, [x16, #0x0]\n"
+ "str h10, [x15, #0x0]\n"
"str h14, [x25, #0x0]\n"
"str h18, [x24, #0x0]\n"
"str h22, [x23, #0x0]\n"
@@ -4233,25 +4233,25 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"b 243f\n"
"235:" // Height 5: Partial direct writeback: partial_8_0
"tbz x8, #3, 239f\n"
- "st1 { v8.8h }, [x16], #0x10\n"
+ "st1 { v8.8h }, [x15], #0x10\n"
"st1 { v12.8h }, [x25], #0x10\n"
"st1 { v16.8h }, [x24], #0x10\n"
"st1 { v20.8h }, [x23], #0x10\n"
"st1 { v24.8h }, [x22], #0x10\n"
"tbz x8, #2, 237f\n"
- "str d9, [x16], #0x8\n"
+ "str d9, [x15], #0x8\n"
"str d13, [x25], #0x8\n"
"str d17, [x24], #0x8\n"
"str d21, [x23], #0x8\n"
"str d25, [x22], #0x8\n"
"tbz x8, #1, 236f\n"
- "st1 { v9.s }[2], [x16], #0x4\n"
+ "st1 { v9.s }[2], [x15], #0x4\n"
"st1 { v13.s }[2], [x25], #0x4\n"
"st1 { v17.s }[2], [x24], #0x4\n"
"st1 { v21.s }[2], [x23], #0x4\n"
"st1 { v25.s }[2], [x22], #0x4\n"
"tbz x8, #0, 243f\n"
- "st1 { v9.h }[6], [x16]\n"
+ "st1 { v9.h }[6], [x15]\n"
"st1 { v13.h }[6], [x25]\n"
"st1 { v17.h }[6], [x24]\n"
"st1 { v21.h }[6], [x23]\n"
@@ -4259,7 +4259,7 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"b 243f\n"
"236:" // Height 5: Partial direct writeback: partial_1_12
"tbz x8, #0, 243f\n"
- "st1 { v9.h }[4], [x16]\n"
+ "st1 { v9.h }[4], [x15]\n"
"st1 { v13.h }[4], [x25]\n"
"st1 { v17.h }[4], [x24]\n"
"st1 { v21.h }[4], [x23]\n"
@@ -4267,13 +4267,13 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"b 243f\n"
"237:" // Height 5: Partial direct writeback: partial_2_8
"tbz x8, #1, 238f\n"
- "str s9, [x16], #0x4\n"
+ "str s9, [x15], #0x4\n"
"str s13, [x25], #0x4\n"
"str s17, [x24], #0x4\n"
"str s21, [x23], #0x4\n"
"str s25, [x22], #0x4\n"
"tbz x8, #0, 243f\n"
- "st1 { v9.h }[2], [x16]\n"
+ "st1 { v9.h }[2], [x15]\n"
"st1 { v13.h }[2], [x25]\n"
"st1 { v17.h }[2], [x24]\n"
"st1 { v21.h }[2], [x23]\n"
@@ -4281,7 +4281,7 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"b 243f\n"
"238:" // Height 5: Partial direct writeback: partial_1_8
"tbz x8, #0, 243f\n"
- "str h9, [x16, #0x0]\n"
+ "str h9, [x15, #0x0]\n"
"str h13, [x25, #0x0]\n"
"str h17, [x24, #0x0]\n"
"str h21, [x23, #0x0]\n"
@@ -4289,19 +4289,19 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"b 243f\n"
"239:" // Height 5: Partial direct writeback: partial_4_0
"tbz x8, #2, 241f\n"
- "str d8, [x16], #0x8\n"
+ "str d8, [x15], #0x8\n"
"str d12, [x25], #0x8\n"
"str d16, [x24], #0x8\n"
"str d20, [x23], #0x8\n"
"str d24, [x22], #0x8\n"
"tbz x8, #1, 240f\n"
- "st1 { v8.s }[2], [x16], #0x4\n"
+ "st1 { v8.s }[2], [x15], #0x4\n"
"st1 { v12.s }[2], [x25], #0x4\n"
"st1 { v16.s }[2], [x24], #0x4\n"
"st1 { v20.s }[2], [x23], #0x4\n"
"st1 { v24.s }[2], [x22], #0x4\n"
"tbz x8, #0, 243f\n"
- "st1 { v8.h }[6], [x16]\n"
+ "st1 { v8.h }[6], [x15]\n"
"st1 { v12.h }[6], [x25]\n"
"st1 { v16.h }[6], [x24]\n"
"st1 { v20.h }[6], [x23]\n"
@@ -4309,7 +4309,7 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"b 243f\n"
"240:" // Height 5: Partial direct writeback: partial_1_4
"tbz x8, #0, 243f\n"
- "st1 { v8.h }[4], [x16]\n"
+ "st1 { v8.h }[4], [x15]\n"
"st1 { v12.h }[4], [x25]\n"
"st1 { v16.h }[4], [x24]\n"
"st1 { v20.h }[4], [x23]\n"
@@ -4317,20 +4317,20 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"b 243f\n"
"241:" // Height 5: Partial direct writeback: partial_2_0
"tbz x8, #1, 242f\n"
- "str s8, [x16], #0x4\n"
+ "str s8, [x15], #0x4\n"
"str s12, [x25], #0x4\n"
"str s16, [x24], #0x4\n"
"str s20, [x23], #0x4\n"
"str s24, [x22], #0x4\n"
"tbz x8, #0, 243f\n"
- "st1 { v8.h }[2], [x16]\n"
+ "st1 { v8.h }[2], [x15]\n"
"st1 { v12.h }[2], [x25]\n"
"st1 { v16.h }[2], [x24]\n"
"st1 { v20.h }[2], [x23]\n"
"st1 { v24.h }[2], [x22]\n"
"b 243f\n"
"242:" // Height 5: Partial direct writeback: partial_1_0
- "str h8, [x16, #0x0]\n"
+ "str h8, [x15, #0x0]\n"
"str h12, [x25, #0x0]\n"
"str h16, [x24, #0x0]\n"
"str h20, [x23, #0x0]\n"
@@ -4338,11 +4338,11 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"243:" // Height 5: Partial direct writeback: Done
"b 245f\n"
"244:" // Height 5: Full writeback
- "str q8, [x16, #0x0]\n"
- "str q9, [x16, #0x10]\n"
- "str q10, [x16, #0x20]\n"
- "str q11, [x16, #0x30]\n"
- "add x16, x16, #0x40\n"
+ "str q8, [x15, #0x0]\n"
+ "str q9, [x15, #0x10]\n"
+ "str q10, [x15, #0x20]\n"
+ "str q11, [x15, #0x30]\n"
+ "add x15, x15, #0x40\n"
"str q12, [x25, #0x0]\n"
"str q13, [x25, #0x10]\n"
"str q14, [x25, #0x20]\n"
@@ -4364,25 +4364,25 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"bgt 198b\n"
"b 296f\n"
"246:" // Height 6
- "ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "mov x20, #0xc\n"
- "mov x7, %x[bias]\n"
"ldr x8, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x16, %x[bias]\n"
"ldr x17, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x16, %x[output_ptr]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
+ "mov x15, %x[output_ptr]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "mov x19, #0xc\n"
+ "madd %x[output_ptr], x20, x19, %x[output_ptr]\n"
"247:" // Height 6: Column loop
- "cbz x7, 248f\n"
- "ldr q8, [x7, #0x0]\n"
+ "cbz x16, 248f\n"
+ "ldr q8, [x16, #0x0]\n"
+ "ldr q9, [x16, #0x10]\n"
+ "ldr q10, [x16, #0x20]\n"
"mov v12.16b, v8.16b\n"
- "ldr q9, [x7, #0x10]\n"
+ "ldr q11, [x16, #0x30]\n"
"mov v13.16b, v9.16b\n"
- "ldr q10, [x7, #0x20]\n"
+ "add x16, x16, #0x40\n"
"mov v14.16b, v10.16b\n"
- "ldr q11, [x7, #0x30]\n"
"mov v15.16b, v11.16b\n"
"mov v16.16b, v8.16b\n"
- "add x7, x7, #0x40\n"
"mov v17.16b, v9.16b\n"
"mov v18.16b, v10.16b\n"
"mov v19.16b, v11.16b\n"
@@ -4401,51 +4401,51 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"b 267f\n"
"248:" // Height 6: no bias
"tbz %x[flags], #0, 266f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x16, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
- "add x23, x24, x20, LSL #1\n"
- "add x22, x23, x20, LSL #1\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"cmp x8, #0x20\n"
- "add x21, x22, x20, LSL #1\n"
+ "add x25, x15, x19, LSL #1\n"
+ "add x24, x25, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "add x22, x23, x19, LSL #1\n"
+ "add x21, x22, x19, LSL #1\n"
"bge 265f\n"
"tbz x8, #4, 256f\n"
- "ld1 { v8.8h }, [x16], #0x10\n"
+ "ld1 { v8.8h }, [x15], #0x10\n"
"ld1 { v12.8h }, [x25], #0x10\n"
"ld1 { v16.8h }, [x24], #0x10\n"
+ "ld1 { v9.8h }, [x15], #0x10\n"
+ "ld1 { v13.8h }, [x25], #0x10\n"
+ "ld1 { v17.8h }, [x24], #0x10\n"
"ld1 { v20.8h }, [x23], #0x10\n"
"ld1 { v24.8h }, [x22], #0x10\n"
"ld1 { v28.8h }, [x21], #0x10\n"
- "ld1 { v9.8h }, [x16], #0x10\n"
- "ld1 { v13.8h }, [x25], #0x10\n"
- "ld1 { v17.8h }, [x24], #0x10\n"
"ld1 { v21.8h }, [x23], #0x10\n"
"ld1 { v25.8h }, [x22], #0x10\n"
"ld1 { v29.8h }, [x21], #0x10\n"
"tbz x8, #3, 252f\n"
- "ld1 { v10.8h }, [x16], #0x10\n"
+ "ld1 { v10.8h }, [x15], #0x10\n"
"ld1 { v14.8h }, [x25], #0x10\n"
"ld1 { v18.8h }, [x24], #0x10\n"
"ld1 { v22.8h }, [x23], #0x10\n"
"ld1 { v26.8h }, [x22], #0x10\n"
"ld1 { v30.8h }, [x21], #0x10\n"
"tbz x8, #2, 250f\n"
- "ldr d11, [x16], #0x8\n"
+ "ldr d11, [x15], #0x8\n"
"ldr d15, [x25], #0x8\n"
"ldr d19, [x24], #0x8\n"
"ldr d23, [x23], #0x8\n"
"ldr d27, [x22], #0x8\n"
"ldr d31, [x21], #0x8\n"
"tbz x8, #1, 249f\n"
- "ld1 { v11.s }[2], [x16], #0x4\n"
- "mov x20, #0x3c\n"
+ "ld1 { v11.s }[2], [x15], #0x4\n"
+ "mov x19, #0x3c\n"
"ld1 { v15.s }[2], [x25], #0x4\n"
"ld1 { v19.s }[2], [x24], #0x4\n"
"ld1 { v23.s }[2], [x23], #0x4\n"
"ld1 { v27.s }[2], [x22], #0x4\n"
"ld1 { v31.s }[2], [x21], #0x4\n"
"tbz x8, #0, 264f\n"
- "ld1 { v11.h }[6], [x16]\n"
+ "ld1 { v11.h }[6], [x15]\n"
"ld1 { v15.h }[6], [x25]\n"
"ld1 { v19.h }[6], [x24]\n"
"ld1 { v23.h }[6], [x23]\n"
@@ -4453,9 +4453,9 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"ld1 { v31.h }[6], [x21]\n"
"b 264f\n"
"249:" // Height 6: Partial accumulate: partial_1_28
- "mov x20, #0x38\n"
+ "mov x19, #0x38\n"
"tbz x8, #0, 264f\n"
- "ld1 { v11.h }[4], [x16]\n"
+ "ld1 { v11.h }[4], [x15]\n"
"ld1 { v15.h }[4], [x25]\n"
"ld1 { v19.h }[4], [x24]\n"
"ld1 { v23.h }[4], [x23]\n"
@@ -4464,15 +4464,15 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"b 264f\n"
"250:" // Height 6: Partial accumulate: partial_2_24
"tbz x8, #1, 251f\n"
- "ldr s11, [x16], #0x4\n"
- "mov x20, #0x34\n"
+ "ldr s11, [x15], #0x4\n"
"ldr s15, [x25], #0x4\n"
+ "mov x19, #0x34\n"
"ldr s19, [x24], #0x4\n"
"ldr s23, [x23], #0x4\n"
"ldr s27, [x22], #0x4\n"
"ldr s31, [x21], #0x4\n"
"tbz x8, #0, 264f\n"
- "ld1 { v11.h }[2], [x16]\n"
+ "ld1 { v11.h }[2], [x15]\n"
"ld1 { v15.h }[2], [x25]\n"
"ld1 { v19.h }[2], [x24]\n"
"ld1 { v23.h }[2], [x23]\n"
@@ -4480,9 +4480,9 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"ld1 { v31.h }[2], [x21]\n"
"b 264f\n"
"251:" // Height 6: Partial accumulate: partial_1_24
- "mov x20, #0x30\n"
+ "mov x19, #0x30\n"
"tbz x8, #0, 264f\n"
- "ldr h11, [x16, #0x0]\n"
+ "ldr h11, [x15, #0x0]\n"
"ldr h15, [x25, #0x0]\n"
"ldr h19, [x24, #0x0]\n"
"ldr h23, [x23, #0x0]\n"
@@ -4491,22 +4491,22 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"b 264f\n"
"252:" // Height 6: Partial accumulate: partial_4_16
"tbz x8, #2, 254f\n"
- "ldr d10, [x16], #0x8\n"
+ "ldr d10, [x15], #0x8\n"
"ldr d14, [x25], #0x8\n"
"ldr d18, [x24], #0x8\n"
"ldr d22, [x23], #0x8\n"
"ldr d26, [x22], #0x8\n"
"ldr d30, [x21], #0x8\n"
"tbz x8, #1, 253f\n"
- "ld1 { v10.s }[2], [x16], #0x4\n"
- "mov x20, #0x2c\n"
+ "ld1 { v10.s }[2], [x15], #0x4\n"
+ "mov x19, #0x2c\n"
"ld1 { v14.s }[2], [x25], #0x4\n"
"ld1 { v18.s }[2], [x24], #0x4\n"
"ld1 { v22.s }[2], [x23], #0x4\n"
"ld1 { v26.s }[2], [x22], #0x4\n"
"ld1 { v30.s }[2], [x21], #0x4\n"
"tbz x8, #0, 264f\n"
- "ld1 { v10.h }[6], [x16]\n"
+ "ld1 { v10.h }[6], [x15]\n"
"ld1 { v14.h }[6], [x25]\n"
"ld1 { v18.h }[6], [x24]\n"
"ld1 { v22.h }[6], [x23]\n"
@@ -4514,9 +4514,9 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"ld1 { v30.h }[6], [x21]\n"
"b 264f\n"
"253:" // Height 6: Partial accumulate: partial_1_20
- "mov x20, #0x28\n"
+ "mov x19, #0x28\n"
"tbz x8, #0, 264f\n"
- "ld1 { v10.h }[4], [x16]\n"
+ "ld1 { v10.h }[4], [x15]\n"
"ld1 { v14.h }[4], [x25]\n"
"ld1 { v18.h }[4], [x24]\n"
"ld1 { v22.h }[4], [x23]\n"
@@ -4525,15 +4525,15 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"b 264f\n"
"254:" // Height 6: Partial accumulate: partial_2_16
"tbz x8, #1, 255f\n"
- "ldr s10, [x16], #0x4\n"
- "mov x20, #0x24\n"
+ "ldr s10, [x15], #0x4\n"
"ldr s14, [x25], #0x4\n"
+ "mov x19, #0x24\n"
"ldr s18, [x24], #0x4\n"
"ldr s22, [x23], #0x4\n"
"ldr s26, [x22], #0x4\n"
"ldr s30, [x21], #0x4\n"
"tbz x8, #0, 264f\n"
- "ld1 { v10.h }[2], [x16]\n"
+ "ld1 { v10.h }[2], [x15]\n"
"ld1 { v14.h }[2], [x25]\n"
"ld1 { v18.h }[2], [x24]\n"
"ld1 { v22.h }[2], [x23]\n"
@@ -4541,9 +4541,9 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"ld1 { v30.h }[2], [x21]\n"
"b 264f\n"
"255:" // Height 6: Partial accumulate: partial_1_16
- "mov x20, #0x20\n"
+ "mov x19, #0x20\n"
"tbz x8, #0, 264f\n"
- "ldr h10, [x16, #0x0]\n"
+ "ldr h10, [x15, #0x0]\n"
"ldr h14, [x25, #0x0]\n"
"ldr h18, [x24, #0x0]\n"
"ldr h22, [x23, #0x0]\n"
@@ -4552,29 +4552,29 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"b 264f\n"
"256:" // Height 6: Partial accumulate: partial_8_0
"tbz x8, #3, 260f\n"
- "ld1 { v8.8h }, [x16], #0x10\n"
+ "ld1 { v8.8h }, [x15], #0x10\n"
"ld1 { v12.8h }, [x25], #0x10\n"
"ld1 { v16.8h }, [x24], #0x10\n"
"ld1 { v20.8h }, [x23], #0x10\n"
"ld1 { v24.8h }, [x22], #0x10\n"
"ld1 { v28.8h }, [x21], #0x10\n"
"tbz x8, #2, 258f\n"
- "ldr d9, [x16], #0x8\n"
+ "ldr d9, [x15], #0x8\n"
"ldr d13, [x25], #0x8\n"
"ldr d17, [x24], #0x8\n"
"ldr d21, [x23], #0x8\n"
"ldr d25, [x22], #0x8\n"
"ldr d29, [x21], #0x8\n"
"tbz x8, #1, 257f\n"
- "ld1 { v9.s }[2], [x16], #0x4\n"
- "mov x20, #0x1c\n"
+ "ld1 { v9.s }[2], [x15], #0x4\n"
+ "mov x19, #0x1c\n"
"ld1 { v13.s }[2], [x25], #0x4\n"
"ld1 { v17.s }[2], [x24], #0x4\n"
"ld1 { v21.s }[2], [x23], #0x4\n"
"ld1 { v25.s }[2], [x22], #0x4\n"
"ld1 { v29.s }[2], [x21], #0x4\n"
"tbz x8, #0, 264f\n"
- "ld1 { v9.h }[6], [x16]\n"
+ "ld1 { v9.h }[6], [x15]\n"
"ld1 { v13.h }[6], [x25]\n"
"ld1 { v17.h }[6], [x24]\n"
"ld1 { v21.h }[6], [x23]\n"
@@ -4582,9 +4582,9 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"ld1 { v29.h }[6], [x21]\n"
"b 264f\n"
"257:" // Height 6: Partial accumulate: partial_1_12
- "mov x20, #0x18\n"
+ "mov x19, #0x18\n"
"tbz x8, #0, 264f\n"
- "ld1 { v9.h }[4], [x16]\n"
+ "ld1 { v9.h }[4], [x15]\n"
"ld1 { v13.h }[4], [x25]\n"
"ld1 { v17.h }[4], [x24]\n"
"ld1 { v21.h }[4], [x23]\n"
@@ -4593,15 +4593,15 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"b 264f\n"
"258:" // Height 6: Partial accumulate: partial_2_8
"tbz x8, #1, 259f\n"
- "ldr s9, [x16], #0x4\n"
- "mov x20, #0x14\n"
+ "ldr s9, [x15], #0x4\n"
"ldr s13, [x25], #0x4\n"
+ "mov x19, #0x14\n"
"ldr s17, [x24], #0x4\n"
"ldr s21, [x23], #0x4\n"
"ldr s25, [x22], #0x4\n"
"ldr s29, [x21], #0x4\n"
"tbz x8, #0, 264f\n"
- "ld1 { v9.h }[2], [x16]\n"
+ "ld1 { v9.h }[2], [x15]\n"
"ld1 { v13.h }[2], [x25]\n"
"ld1 { v17.h }[2], [x24]\n"
"ld1 { v21.h }[2], [x23]\n"
@@ -4609,9 +4609,9 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"ld1 { v29.h }[2], [x21]\n"
"b 264f\n"
"259:" // Height 6: Partial accumulate: partial_1_8
- "mov x20, #0x10\n"
+ "mov x19, #0x10\n"
"tbz x8, #0, 264f\n"
- "ldr h9, [x16, #0x0]\n"
+ "ldr h9, [x15, #0x0]\n"
"ldr h13, [x25, #0x0]\n"
"ldr h17, [x24, #0x0]\n"
"ldr h21, [x23, #0x0]\n"
@@ -4620,22 +4620,22 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"b 264f\n"
"260:" // Height 6: Partial accumulate: partial_4_0
"tbz x8, #2, 262f\n"
- "ldr d8, [x16], #0x8\n"
+ "ldr d8, [x15], #0x8\n"
"ldr d12, [x25], #0x8\n"
"ldr d16, [x24], #0x8\n"
"ldr d20, [x23], #0x8\n"
"ldr d24, [x22], #0x8\n"
"ldr d28, [x21], #0x8\n"
"tbz x8, #1, 261f\n"
- "ld1 { v8.s }[2], [x16], #0x4\n"
- "mov x20, #0xc\n"
+ "ld1 { v8.s }[2], [x15], #0x4\n"
+ "mov x19, #0xc\n"
"ld1 { v12.s }[2], [x25], #0x4\n"
"ld1 { v16.s }[2], [x24], #0x4\n"
"ld1 { v20.s }[2], [x23], #0x4\n"
"ld1 { v24.s }[2], [x22], #0x4\n"
"ld1 { v28.s }[2], [x21], #0x4\n"
"tbz x8, #0, 264f\n"
- "ld1 { v8.h }[6], [x16]\n"
+ "ld1 { v8.h }[6], [x15]\n"
"ld1 { v12.h }[6], [x25]\n"
"ld1 { v16.h }[6], [x24]\n"
"ld1 { v20.h }[6], [x23]\n"
@@ -4643,9 +4643,9 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"ld1 { v28.h }[6], [x21]\n"
"b 264f\n"
"261:" // Height 6: Partial accumulate: partial_1_4
- "mov x20, #0x8\n"
+ "mov x19, #0x8\n"
"tbz x8, #0, 264f\n"
- "ld1 { v8.h }[4], [x16]\n"
+ "ld1 { v8.h }[4], [x15]\n"
"ld1 { v12.h }[4], [x25]\n"
"ld1 { v16.h }[4], [x24]\n"
"ld1 { v20.h }[4], [x23]\n"
@@ -4654,15 +4654,15 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"b 264f\n"
"262:" // Height 6: Partial accumulate: partial_2_0
"tbz x8, #1, 263f\n"
- "ldr s8, [x16], #0x4\n"
- "mov x20, #0x4\n"
+ "ldr s8, [x15], #0x4\n"
"ldr s12, [x25], #0x4\n"
+ "mov x19, #0x4\n"
"ldr s16, [x24], #0x4\n"
"ldr s20, [x23], #0x4\n"
"ldr s24, [x22], #0x4\n"
"ldr s28, [x21], #0x4\n"
"tbz x8, #0, 264f\n"
- "ld1 { v8.h }[2], [x16]\n"
+ "ld1 { v8.h }[2], [x15]\n"
"ld1 { v12.h }[2], [x25]\n"
"ld1 { v16.h }[2], [x24]\n"
"ld1 { v20.h }[2], [x23]\n"
@@ -4670,21 +4670,21 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"ld1 { v28.h }[2], [x21]\n"
"b 264f\n"
"263:" // Height 6: Partial accumulate: partial_1_0
- "ldr h8, [x16, #0x0]\n"
- "mov x20, #0x0\n"
+ "ldr h8, [x15, #0x0]\n"
+ "mov x19, #0x0\n"
"ldr h12, [x25, #0x0]\n"
"ldr h16, [x24, #0x0]\n"
"ldr h20, [x23, #0x0]\n"
"ldr h24, [x22, #0x0]\n"
"ldr h28, [x21, #0x0]\n"
"264:" // Height 6: Partial accumulate: Done
- "sub x16, x16, x20\n"
+ "sub x15, x15, x19\n"
"b 267f\n"
"265:" // Height 6: full accumulate
- "ldr q8, [x16, #0x0]\n"
- "ldr q9, [x16, #0x10]\n"
- "ldr q10, [x16, #0x20]\n"
- "ldr q11, [x16, #0x30]\n"
+ "ldr q8, [x15, #0x0]\n"
+ "ldr q9, [x15, #0x10]\n"
+ "ldr q10, [x15, #0x20]\n"
+ "ldr q11, [x15, #0x30]\n"
"ldr q12, [x25, #0x0]\n"
"ldr q13, [x25, #0x10]\n"
"ldr q14, [x25, #0x20]\n"
@@ -4732,404 +4732,404 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"movi v30.16b, #0x0\n"
"movi v31.16b, #0x0\n"
"267:" // Height 6: setup done
- "mov x15, #0x0\n"
+ "mov x14, #0x0\n"
"268:" // Height 6: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w14, [x20, x15, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w13, [x20, x14, LSL #0x2]\n"
"tbz %x[flags], #3, 269f\n"
- "ldr x21, [%x[input_ptr], x15, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x13, [x21, #0x0]\n"
- "ldr x9, [x21, #0x8]\n"
- "ldr x27, [x21, #0x10]\n"
- "ldr x25, [x21, #0x18]\n"
- "ldr x23, [x21, #0x20]\n"
- "ldr x21, [x21, #0x28]\n"
- "cbnz x15, 270f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x13, x13, x20, LSL #1\n"
- "add x9, x9, x20, LSL #1\n"
- "add x27, x27, x20, LSL #1\n"
- "add x25, x25, x20, LSL #1\n"
- "add x23, x23, x20, LSL #1\n"
- "add x21, x21, x20, LSL #1\n"
+ "ldr x20, [%x[input_ptr], x14, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x12, [x20, #0x0]\n"
+ "ldr x28, [x20, #0x8]\n"
+ "ldr x26, [x20, #0x10]\n"
+ "ldr x24, [x20, #0x18]\n"
+ "ldr x22, [x20, #0x20]\n"
+ "ldr x20, [x20, #0x28]\n"
+ "cbnz x14, 270f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x12, x12, x19, LSL #1\n"
+ "add x28, x28, x19, LSL #1\n"
+ "add x26, x26, x19, LSL #1\n"
+ "add x24, x24, x19, LSL #1\n"
+ "add x22, x22, x19, LSL #1\n"
+ "add x20, x20, x19, LSL #1\n"
"b 270f\n"
"269:" // Height 6: setup direct input
- "mov x13, %x[input_ptr]\n"
- "add x9, x13, x20, LSL #1\n"
- "add x27, x9, x20, LSL #1\n"
- "add x25, x27, x20, LSL #1\n"
- "add x23, x25, x20, LSL #1\n"
- "add x21, x23, x20, LSL #1\n"
+ "mov x12, %x[input_ptr]\n"
+ "add x28, x12, x19, LSL #1\n"
+ "add x26, x28, x19, LSL #1\n"
+ "add x24, x26, x19, LSL #1\n"
+ "add x22, x24, x19, LSL #1\n"
+ "add x20, x22, x19, LSL #1\n"
"270:" // Height 6: input setup done
- "cmp x14, #0x8\n"
+ "cmp x13, #0x8\n"
"blt 273f\n"
- "ldr q0, [x13, #0x0]\n"
- "cmp x14, #0x10\n"
- "ldr q1, [x9, #0x0]\n"
- "ldr q2, [x27, #0x0]\n"
- "ldr q3, [x25, #0x0]\n"
- "ldr q4, [x23, #0x0]\n"
- "ldr q5, [x21, #0x0]\n"
+ "ldr q0, [x12, #0x0]\n"
+ "ldr q1, [x28, #0x0]\n"
+ "cmp x13, #0x10\n"
+ "ldr q2, [x26, #0x0]\n"
+ "ldr q3, [x24, #0x0]\n"
+ "ldr q4, [x22, #0x0]\n"
+ "ldr q5, [x20, #0x0]\n"
"ldr q6, [x17, #0x0]\n"
- "ldr q7, [x17, #0x10]\n"
"blt 272f\n"
"271:" // Height 6: Multiply loop: Main loop head
"fmla v8.8h, v6.8h, v0.h[0]\n"
- "ldr x12, [x17, #0x28]\n"
+ "ldr d7, [x17, #0x10]\n"
"fmla v12.8h, v6.8h, v1.h[0]\n"
- "ldr x11, [x17, #0x38]\n"
+ "ldr x11, [x17, #0x18]\n"
"fmla v16.8h, v6.8h, v2.h[0]\n"
- "add x13, x13, #0x10\n"
+ "ldr x10, [x17, #0x28]\n"
"fmla v20.8h, v6.8h, v3.h[0]\n"
- "add x9, x9, #0x10\n"
+ "add x12, x12, #0x10\n"
"fmla v24.8h, v6.8h, v4.h[0]\n"
- "add x27, x27, #0x10\n"
+ "mov v7.d[1], x11\n"
"fmla v28.8h, v6.8h, v5.h[0]\n"
- "ldr d6, [x17, #0x20]\n"
+ "prfm pldl1keep, [x12, #0x80]\n"
"fmla v9.8h, v7.8h, v0.h[0]\n"
- "mov v6.d[1], x12\n"
+ "ldr d6, [x17, #0x20]\n"
"fmla v13.8h, v7.8h, v1.h[0]\n"
- "ldr x12, [x17, #0x48]\n"
+ "ldr x11, [x17, #0x38]\n"
"fmla v17.8h, v7.8h, v2.h[0]\n"
- "add x25, x25, #0x10\n"
+ "ldr x9, [x12, #0x8]\n"
"fmla v21.8h, v7.8h, v3.h[0]\n"
- "add x23, x23, #0x10\n"
+ "mov v6.d[1], x10\n"
"fmla v25.8h, v7.8h, v4.h[0]\n"
- "add x21, x21, #0x10\n"
+ "ldr x10, [x17, #0x48]\n"
"fmla v29.8h, v7.8h, v5.h[0]\n"
"ldr d7, [x17, #0x30]\n"
- "mov v7.d[1], x11\n"
"fmla v10.8h, v6.8h, v0.h[0]\n"
+ "add x28, x28, #0x10\n"
"fmla v14.8h, v6.8h, v1.h[0]\n"
- "ldr x11, [x17, #0x58]\n"
+ "prfm pldl1keep, [x28, #0x80]\n"
"fmla v18.8h, v6.8h, v2.h[0]\n"
- "ldr x10, [x13, #0x8]\n"
+ "mov v7.d[1], x11\n"
"fmla v22.8h, v6.8h, v3.h[0]\n"
- "ldr x28, [x9, #0x8]\n"
+ "ldr x11, [x17, #0x58]\n"
"fmla v26.8h, v6.8h, v4.h[0]\n"
- "ldr x26, [x27, #0x8]\n"
+ "ldr x27, [x28, #0x8]\n"
"fmla v30.8h, v6.8h, v5.h[0]\n"
"ldr d6, [x17, #0x40]\n"
"fmla v11.8h, v7.8h, v0.h[0]\n"
- "mov v6.d[1], x12\n"
+ "add x26, x26, #0x10\n"
"fmla v15.8h, v7.8h, v1.h[0]\n"
- "ldr x12, [x17, #0x68]\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
"fmla v19.8h, v7.8h, v2.h[0]\n"
- "ldr x24, [x25, #0x8]\n"
+ "mov v6.d[1], x10\n"
"fmla v23.8h, v7.8h, v3.h[0]\n"
- "ldr x22, [x23, #0x8]\n"
+ "ldr x10, [x17, #0x68]\n"
"fmla v27.8h, v7.8h, v4.h[0]\n"
- "ldr x20, [x21, #0x8]\n"
+ "ldr x25, [x26, #0x8]\n"
"fmla v31.8h, v7.8h, v5.h[0]\n"
"ldr d7, [x17, #0x50]\n"
- "mov v7.d[1], x11\n"
"fmla v8.8h, v6.8h, v0.h[1]\n"
+ "add x24, x24, #0x10\n"
"fmla v12.8h, v6.8h, v1.h[1]\n"
- "ldr x11, [x17, #0x78]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
"fmla v16.8h, v6.8h, v2.h[1]\n"
- "sub x14, x14, #0x8\n"
+ "mov v7.d[1], x11\n"
"fmla v20.8h, v6.8h, v3.h[1]\n"
- "cmp x14, #0x10\n"
+ "ldr x11, [x17, #0x78]\n"
"fmla v24.8h, v6.8h, v4.h[1]\n"
- "prfm pldl1keep, [x13, #0x80]\n"
+ "ldr x23, [x24, #0x8]\n"
"fmla v28.8h, v6.8h, v5.h[1]\n"
"ldr d6, [x17, #0x60]\n"
"fmla v9.8h, v7.8h, v0.h[1]\n"
- "mov v6.d[1], x12\n"
+ "add x22, x22, #0x10\n"
"fmla v13.8h, v7.8h, v1.h[1]\n"
- "ldr x12, [x17, #0x88]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
"fmla v17.8h, v7.8h, v2.h[1]\n"
- "prfm pldl1keep, [x9, #0x80]\n"
+ "mov v6.d[1], x10\n"
"fmla v21.8h, v7.8h, v3.h[1]\n"
- "prfm pldl1keep, [x27, #0x80]\n"
+ "ldr x10, [x17, #0x88]\n"
"fmla v25.8h, v7.8h, v4.h[1]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
+ "ldr x21, [x22, #0x8]\n"
"fmla v29.8h, v7.8h, v5.h[1]\n"
"ldr d7, [x17, #0x70]\n"
- "mov v7.d[1], x11\n"
"fmla v10.8h, v6.8h, v0.h[1]\n"
+ "add x20, x20, #0x10\n"
"fmla v14.8h, v6.8h, v1.h[1]\n"
- "ldr x11, [x17, #0x98]\n"
+ "prfm pldl1keep, [x20, #0x80]\n"
"fmla v18.8h, v6.8h, v2.h[1]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
+ "mov v7.d[1], x11\n"
"fmla v22.8h, v6.8h, v3.h[1]\n"
- "prfm pldl1keep, [x21, #0x80]\n"
+ "ldr x11, [x17, #0x98]\n"
"fmla v26.8h, v6.8h, v4.h[1]\n"
+ "ldr x19, [x20, #0x8]\n"
"fmla v30.8h, v6.8h, v5.h[1]\n"
"ldr d6, [x17, #0x80]\n"
"fmla v11.8h, v7.8h, v0.h[1]\n"
- "mov v6.d[1], x12\n"
+ "sub x13, x13, #0x8\n"
"fmla v15.8h, v7.8h, v1.h[1]\n"
- "ldr x12, [x17, #0xa8]\n"
+ "cmp x13, #0x10\n"
"fmla v19.8h, v7.8h, v2.h[1]\n"
+ "mov v6.d[1], x10\n"
"fmla v23.8h, v7.8h, v3.h[1]\n"
+ "ldr x10, [x17, #0xa8]\n"
"fmla v27.8h, v7.8h, v4.h[1]\n"
"fmla v31.8h, v7.8h, v5.h[1]\n"
"ldr d7, [x17, #0x90]\n"
- "mov v7.d[1], x11\n"
"fmla v8.8h, v6.8h, v0.h[2]\n"
"fmla v12.8h, v6.8h, v1.h[2]\n"
- "ldr x11, [x17, #0xb8]\n"
"fmla v16.8h, v6.8h, v2.h[2]\n"
+ "mov v7.d[1], x11\n"
"fmla v20.8h, v6.8h, v3.h[2]\n"
+ "ldr x11, [x17, #0xb8]\n"
"fmla v24.8h, v6.8h, v4.h[2]\n"
"fmla v28.8h, v6.8h, v5.h[2]\n"
"ldr d6, [x17, #0xa0]\n"
"fmla v9.8h, v7.8h, v0.h[2]\n"
- "mov v6.d[1], x12\n"
"fmla v13.8h, v7.8h, v1.h[2]\n"
- "ldr x12, [x17, #0xc8]\n"
"fmla v17.8h, v7.8h, v2.h[2]\n"
+ "mov v6.d[1], x10\n"
"fmla v21.8h, v7.8h, v3.h[2]\n"
+ "ldr x10, [x17, #0xc8]\n"
"fmla v25.8h, v7.8h, v4.h[2]\n"
"fmla v29.8h, v7.8h, v5.h[2]\n"
"ldr d7, [x17, #0xb0]\n"
- "mov v7.d[1], x11\n"
"fmla v10.8h, v6.8h, v0.h[2]\n"
"fmla v14.8h, v6.8h, v1.h[2]\n"
- "ldr x11, [x17, #0xd8]\n"
"fmla v18.8h, v6.8h, v2.h[2]\n"
+ "mov v7.d[1], x11\n"
"fmla v22.8h, v6.8h, v3.h[2]\n"
+ "ldr x11, [x17, #0xd8]\n"
"fmla v26.8h, v6.8h, v4.h[2]\n"
"fmla v30.8h, v6.8h, v5.h[2]\n"
"ldr d6, [x17, #0xc0]\n"
"fmla v11.8h, v7.8h, v0.h[2]\n"
- "mov v6.d[1], x12\n"
"fmla v15.8h, v7.8h, v1.h[2]\n"
- "ldr x12, [x17, #0xe8]\n"
"fmla v19.8h, v7.8h, v2.h[2]\n"
+ "mov v6.d[1], x10\n"
"fmla v23.8h, v7.8h, v3.h[2]\n"
+ "ldr x10, [x17, #0xe8]\n"
"fmla v27.8h, v7.8h, v4.h[2]\n"
"fmla v31.8h, v7.8h, v5.h[2]\n"
"ldr d7, [x17, #0xd0]\n"
- "mov v7.d[1], x11\n"
"fmla v8.8h, v6.8h, v0.h[3]\n"
"fmla v12.8h, v6.8h, v1.h[3]\n"
- "ldr x11, [x17, #0xf8]\n"
"fmla v16.8h, v6.8h, v2.h[3]\n"
+ "mov v7.d[1], x11\n"
"fmla v20.8h, v6.8h, v3.h[3]\n"
+ "ldr x11, [x17, #0xf8]\n"
"fmla v24.8h, v6.8h, v4.h[3]\n"
"fmla v28.8h, v6.8h, v5.h[3]\n"
"ldr d6, [x17, #0xe0]\n"
"fmla v9.8h, v7.8h, v0.h[3]\n"
- "mov v6.d[1], x12\n"
"fmla v13.8h, v7.8h, v1.h[3]\n"
- "ldr x12, [x17, #0x108]\n"
"fmla v17.8h, v7.8h, v2.h[3]\n"
+ "mov v6.d[1], x10\n"
"fmla v21.8h, v7.8h, v3.h[3]\n"
+ "ldr x10, [x17, #0x108]\n"
"fmla v25.8h, v7.8h, v4.h[3]\n"
"fmla v29.8h, v7.8h, v5.h[3]\n"
"ldr d7, [x17, #0xf0]\n"
- "mov v7.d[1], x11\n"
"fmla v10.8h, v6.8h, v0.h[3]\n"
"fmla v14.8h, v6.8h, v1.h[3]\n"
- "ldr x11, [x17, #0x118]\n"
"fmla v18.8h, v6.8h, v2.h[3]\n"
+ "mov v7.d[1], x11\n"
"fmla v22.8h, v6.8h, v3.h[3]\n"
+ "ldr x11, [x17, #0x118]\n"
"fmla v26.8h, v6.8h, v4.h[3]\n"
"fmla v30.8h, v6.8h, v5.h[3]\n"
"ldr d6, [x17, #0x100]\n"
"fmla v11.8h, v7.8h, v0.h[3]\n"
- "mov v6.d[1], x12\n"
"fmla v15.8h, v7.8h, v1.h[3]\n"
- "ldr x12, [x17, #0x128]\n"
"fmla v19.8h, v7.8h, v2.h[3]\n"
+ "mov v6.d[1], x10\n"
"fmla v23.8h, v7.8h, v3.h[3]\n"
+ "ldr x10, [x17, #0x128]\n"
"fmla v27.8h, v7.8h, v4.h[3]\n"
"fmla v31.8h, v7.8h, v5.h[3]\n"
"ldr d7, [x17, #0x110]\n"
- "mov v7.d[1], x11\n"
"fmla v8.8h, v6.8h, v0.h[4]\n"
"fmla v12.8h, v6.8h, v1.h[4]\n"
- "ldr x11, [x17, #0x138]\n"
"fmla v16.8h, v6.8h, v2.h[4]\n"
+ "mov v7.d[1], x11\n"
"fmla v20.8h, v6.8h, v3.h[4]\n"
+ "ldr x11, [x17, #0x138]\n"
"fmla v24.8h, v6.8h, v4.h[4]\n"
"fmla v28.8h, v6.8h, v5.h[4]\n"
"ldr d6, [x17, #0x120]\n"
"fmla v9.8h, v7.8h, v0.h[4]\n"
- "mov v6.d[1], x12\n"
"fmla v13.8h, v7.8h, v1.h[4]\n"
- "ldr x12, [x17, #0x148]\n"
"fmla v17.8h, v7.8h, v2.h[4]\n"
+ "mov v6.d[1], x10\n"
"fmla v21.8h, v7.8h, v3.h[4]\n"
+ "ldr x10, [x17, #0x148]\n"
"fmla v25.8h, v7.8h, v4.h[4]\n"
"fmla v29.8h, v7.8h, v5.h[4]\n"
"ldr d7, [x17, #0x130]\n"
- "mov v7.d[1], x11\n"
"fmla v10.8h, v6.8h, v0.h[4]\n"
"fmla v14.8h, v6.8h, v1.h[4]\n"
- "ldr x11, [x17, #0x158]\n"
"fmla v18.8h, v6.8h, v2.h[4]\n"
+ "mov v7.d[1], x11\n"
"fmla v22.8h, v6.8h, v3.h[4]\n"
+ "ldr x11, [x17, #0x158]\n"
"fmla v26.8h, v6.8h, v4.h[4]\n"
"fmla v30.8h, v6.8h, v5.h[4]\n"
"ldr d6, [x17, #0x140]\n"
"fmla v11.8h, v7.8h, v0.h[4]\n"
- "mov v6.d[1], x12\n"
"fmla v15.8h, v7.8h, v1.h[4]\n"
- "ldr x12, [x17, #0x168]\n"
"fmla v19.8h, v7.8h, v2.h[4]\n"
+ "mov v6.d[1], x10\n"
"fmla v23.8h, v7.8h, v3.h[4]\n"
+ "ldr x10, [x17, #0x168]\n"
"fmla v27.8h, v7.8h, v4.h[4]\n"
"fmla v31.8h, v7.8h, v5.h[4]\n"
"ldr d7, [x17, #0x150]\n"
- "mov v7.d[1], x11\n"
"fmla v8.8h, v6.8h, v0.h[5]\n"
"fmla v12.8h, v6.8h, v1.h[5]\n"
- "ldr x11, [x17, #0x178]\n"
"fmla v16.8h, v6.8h, v2.h[5]\n"
+ "mov v7.d[1], x11\n"
"fmla v20.8h, v6.8h, v3.h[5]\n"
+ "ldr x11, [x17, #0x178]\n"
"fmla v24.8h, v6.8h, v4.h[5]\n"
"fmla v28.8h, v6.8h, v5.h[5]\n"
"ldr d6, [x17, #0x160]\n"
"fmla v9.8h, v7.8h, v0.h[5]\n"
- "mov v6.d[1], x12\n"
"fmla v13.8h, v7.8h, v1.h[5]\n"
- "ldr x12, [x17, #0x188]\n"
"fmla v17.8h, v7.8h, v2.h[5]\n"
+ "mov v6.d[1], x10\n"
"fmla v21.8h, v7.8h, v3.h[5]\n"
+ "ldr x10, [x17, #0x188]\n"
"fmla v25.8h, v7.8h, v4.h[5]\n"
"fmla v29.8h, v7.8h, v5.h[5]\n"
"ldr d7, [x17, #0x170]\n"
- "mov v7.d[1], x11\n"
"fmla v10.8h, v6.8h, v0.h[5]\n"
"fmla v14.8h, v6.8h, v1.h[5]\n"
- "ldr x11, [x17, #0x198]\n"
"fmla v18.8h, v6.8h, v2.h[5]\n"
+ "mov v7.d[1], x11\n"
"fmla v22.8h, v6.8h, v3.h[5]\n"
+ "ldr x11, [x17, #0x198]\n"
"fmla v26.8h, v6.8h, v4.h[5]\n"
"fmla v30.8h, v6.8h, v5.h[5]\n"
"ldr d6, [x17, #0x180]\n"
"fmla v11.8h, v7.8h, v0.h[5]\n"
- "mov v6.d[1], x12\n"
"fmla v15.8h, v7.8h, v1.h[5]\n"
- "ldr x12, [x17, #0x1a8]\n"
"fmla v19.8h, v7.8h, v2.h[5]\n"
+ "mov v6.d[1], x10\n"
"fmla v23.8h, v7.8h, v3.h[5]\n"
+ "ldr x10, [x17, #0x1a8]\n"
"fmla v27.8h, v7.8h, v4.h[5]\n"
"fmla v31.8h, v7.8h, v5.h[5]\n"
"ldr d7, [x17, #0x190]\n"
- "mov v7.d[1], x11\n"
"fmla v8.8h, v6.8h, v0.h[6]\n"
"fmla v12.8h, v6.8h, v1.h[6]\n"
- "ldr x11, [x17, #0x1b8]\n"
"fmla v16.8h, v6.8h, v2.h[6]\n"
+ "mov v7.d[1], x11\n"
"fmla v20.8h, v6.8h, v3.h[6]\n"
+ "ldr x11, [x17, #0x1b8]\n"
"fmla v24.8h, v6.8h, v4.h[6]\n"
"fmla v28.8h, v6.8h, v5.h[6]\n"
"ldr d6, [x17, #0x1a0]\n"
"fmla v9.8h, v7.8h, v0.h[6]\n"
- "mov v6.d[1], x12\n"
"fmla v13.8h, v7.8h, v1.h[6]\n"
- "ldr x12, [x17, #0x1c8]\n"
"fmla v17.8h, v7.8h, v2.h[6]\n"
+ "mov v6.d[1], x10\n"
"fmla v21.8h, v7.8h, v3.h[6]\n"
+ "ldr x10, [x17, #0x1c8]\n"
"fmla v25.8h, v7.8h, v4.h[6]\n"
"fmla v29.8h, v7.8h, v5.h[6]\n"
"ldr d7, [x17, #0x1b0]\n"
- "mov v7.d[1], x11\n"
"fmla v10.8h, v6.8h, v0.h[6]\n"
"fmla v14.8h, v6.8h, v1.h[6]\n"
- "ldr x11, [x17, #0x1d8]\n"
"fmla v18.8h, v6.8h, v2.h[6]\n"
+ "mov v7.d[1], x11\n"
"fmla v22.8h, v6.8h, v3.h[6]\n"
+ "ldr x11, [x17, #0x1d8]\n"
"fmla v26.8h, v6.8h, v4.h[6]\n"
"fmla v30.8h, v6.8h, v5.h[6]\n"
"ldr d6, [x17, #0x1c0]\n"
"fmla v11.8h, v7.8h, v0.h[6]\n"
- "mov v6.d[1], x12\n"
"fmla v15.8h, v7.8h, v1.h[6]\n"
- "ldr x12, [x17, #0x1e8]\n"
"fmla v19.8h, v7.8h, v2.h[6]\n"
+ "mov v6.d[1], x10\n"
"fmla v23.8h, v7.8h, v3.h[6]\n"
+ "ldr x10, [x17, #0x1e8]\n"
"fmla v27.8h, v7.8h, v4.h[6]\n"
"fmla v31.8h, v7.8h, v5.h[6]\n"
"ldr d7, [x17, #0x1d0]\n"
- "mov v7.d[1], x11\n"
"fmla v8.8h, v6.8h, v0.h[7]\n"
"fmla v12.8h, v6.8h, v1.h[7]\n"
- "ldr x11, [x17, #0x1f8]\n"
"fmla v16.8h, v6.8h, v2.h[7]\n"
+ "mov v7.d[1], x11\n"
"fmla v20.8h, v6.8h, v3.h[7]\n"
+ "ldr x11, [x17, #0x1f8]\n"
"fmla v24.8h, v6.8h, v4.h[7]\n"
"fmla v28.8h, v6.8h, v5.h[7]\n"
"ldr d6, [x17, #0x1e0]\n"
"fmla v9.8h, v7.8h, v0.h[7]\n"
- "mov v6.d[1], x12\n"
"fmla v13.8h, v7.8h, v1.h[7]\n"
"fmla v17.8h, v7.8h, v2.h[7]\n"
+ "mov v6.d[1], x10\n"
"fmla v21.8h, v7.8h, v3.h[7]\n"
"fmla v25.8h, v7.8h, v4.h[7]\n"
"fmla v29.8h, v7.8h, v5.h[7]\n"
"ldr d7, [x17, #0x1f0]\n"
- "mov v7.d[1], x11\n"
- "add x17, x17, #0x200\n"
"fmla v10.8h, v6.8h, v0.h[7]\n"
- "ldr x12, [x17, #0x8]\n"
+ "add x17, x17, #0x200\n"
"fmla v14.8h, v6.8h, v1.h[7]\n"
- "ldr x11, [x17, #0x18]\n"
+ "ldr x10, [x17, #0x8]\n"
"fmla v18.8h, v6.8h, v2.h[7]\n"
+ "mov v7.d[1], x11\n"
"fmla v22.8h, v6.8h, v3.h[7]\n"
"fmla v26.8h, v6.8h, v4.h[7]\n"
"fmla v30.8h, v6.8h, v5.h[7]\n"
"ldr d6, [x17, #0x0]\n"
"fmla v11.8h, v7.8h, v0.h[7]\n"
- "ldr d0, [x13, #0x0]\n"
+ "ldr d0, [x12, #0x0]\n"
"fmla v15.8h, v7.8h, v1.h[7]\n"
- "ldr d1, [x9, #0x0]\n"
+ "ldr d1, [x28, #0x0]\n"
"fmla v19.8h, v7.8h, v2.h[7]\n"
- "ldr d2, [x27, #0x0]\n"
+ "mov v6.d[1], x10\n"
"fmla v23.8h, v7.8h, v3.h[7]\n"
- "ldr d3, [x25, #0x0]\n"
+ "mov v0.d[1], x9\n"
"fmla v27.8h, v7.8h, v4.h[7]\n"
- "ldr d4, [x23, #0x0]\n"
+ "mov v1.d[1], x27\n"
"fmla v31.8h, v7.8h, v5.h[7]\n"
- "ldr d5, [x21, #0x0]\n"
- "ldr d7, [x17, #0x10]\n"
- "mov v6.d[1], x12\n"
- "mov v0.d[1], x10\n"
- "mov v1.d[1], x28\n"
- "mov v2.d[1], x26\n"
- "mov v3.d[1], x24\n"
- "mov v4.d[1], x22\n"
- "mov v5.d[1], x20\n"
- "mov v7.d[1], x11\n"
+ "ldr d2, [x26, #0x0]\n"
+ "ldr d3, [x24, #0x0]\n"
+ "ldr d4, [x22, #0x0]\n"
+ "mov v2.d[1], x25\n"
+ "ldr d5, [x20, #0x0]\n"
+ "mov v3.d[1], x23\n"
+ "mov v4.d[1], x21\n"
+ "mov v5.d[1], x19\n"
"bge 271b\n"
"272:" // Height 6: Multiply loop: Single iteration only
"fmla v8.8h, v6.8h, v0.h[0]\n"
- "add x13, x13, #0x10\n"
+ "ldr q7, [x17, #0x10]\n"
"fmla v12.8h, v6.8h, v1.h[0]\n"
- "add x9, x9, #0x10\n"
+ "sub x13, x13, #0x8\n"
"fmla v16.8h, v6.8h, v2.h[0]\n"
- "add x27, x27, #0x10\n"
+ "add x12, x12, #0x10\n"
"fmla v20.8h, v6.8h, v3.h[0]\n"
- "add x25, x25, #0x10\n"
+ "prfm pldl1keep, [x12, #0x80]\n"
"fmla v24.8h, v6.8h, v4.h[0]\n"
- "add x23, x23, #0x10\n"
+ "add x28, x28, #0x10\n"
"fmla v28.8h, v6.8h, v5.h[0]\n"
- "ldr q6, [x17, #0x20]\n"
+ "prfm pldl1keep, [x28, #0x80]\n"
"fmla v9.8h, v7.8h, v0.h[0]\n"
- "add x21, x21, #0x10\n"
+ "ldr q6, [x17, #0x20]\n"
"fmla v13.8h, v7.8h, v1.h[0]\n"
- "sub x14, x14, #0x8\n"
+ "add x26, x26, #0x10\n"
"fmla v17.8h, v7.8h, v2.h[0]\n"
- "prfm pldl1keep, [x13, #0x80]\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
"fmla v21.8h, v7.8h, v3.h[0]\n"
- "prfm pldl1keep, [x9, #0x80]\n"
+ "add x24, x24, #0x10\n"
"fmla v25.8h, v7.8h, v4.h[0]\n"
- "prfm pldl1keep, [x27, #0x80]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
"fmla v29.8h, v7.8h, v5.h[0]\n"
"ldr q7, [x17, #0x30]\n"
"fmla v10.8h, v6.8h, v0.h[0]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
+ "add x22, x22, #0x10\n"
"fmla v14.8h, v6.8h, v1.h[0]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
"fmla v18.8h, v6.8h, v2.h[0]\n"
- "prfm pldl1keep, [x21, #0x80]\n"
+ "add x20, x20, #0x10\n"
"fmla v22.8h, v6.8h, v3.h[0]\n"
+ "prfm pldl1keep, [x20, #0x80]\n"
"fmla v26.8h, v6.8h, v4.h[0]\n"
"fmla v30.8h, v6.8h, v5.h[0]\n"
"ldr q6, [x17, #0x40]\n"
@@ -5336,18 +5336,18 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"fmla v27.8h, v7.8h, v4.h[7]\n"
"fmla v31.8h, v7.8h, v5.h[7]\n"
"273:" // Height 6: Multiply loop: Main loop skip
- "cbz x14, 275f\n"
+ "cbz x13, 275f\n"
"274:" // Height 6: Multiply loop: Odd block loop
- "ldr h0, [x13], #0x2\n"
- "sub x14, x14, #0x1\n"
- "ldr h1, [x9], #0x2\n"
- "ldr h2, [x27], #0x2\n"
- "ldr h3, [x25], #0x2\n"
- "ldr h4, [x23], #0x2\n"
- "ldr h5, [x21], #0x2\n"
+ "ldr h0, [x12], #0x2\n"
+ "sub x13, x13, #0x1\n"
+ "ldr h1, [x28], #0x2\n"
+ "ldr h2, [x26], #0x2\n"
+ "ldr h3, [x24], #0x2\n"
+ "ldr h4, [x22], #0x2\n"
+ "ldr h5, [x20], #0x2\n"
"ldr q6, [x17, #0x0]\n"
- "fmla v8.8h, v6.8h, v0.h[0]\n"
"ldr q7, [x17, #0x10]\n"
+ "fmla v8.8h, v6.8h, v0.h[0]\n"
"fmla v12.8h, v6.8h, v1.h[0]\n"
"fmla v16.8h, v6.8h, v2.h[0]\n"
"fmla v20.8h, v6.8h, v3.h[0]\n"
@@ -5374,27 +5374,29 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"fmla v23.8h, v7.8h, v3.h[0]\n"
"fmla v27.8h, v7.8h, v4.h[0]\n"
"fmla v31.8h, v7.8h, v5.h[0]\n"
- "cbnz x14, 274b\n"
+ "cbnz x13, 274b\n"
"275:" // Height 6: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x15, x15, #0x1\n"
- "cmp x15, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x14, x14, #0x1\n"
+ "cmp x14, x19\n"
"bne 268b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x16, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
- "add x23, x24, x20, LSL #1\n"
- "add x22, x23, x20, LSL #1\n"
- "add x21, x22, x20, LSL #1\n"
- "prfm pstl1keep, [x16, #0x0]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "prfm pstl1keep, [x15, #0x0]\n"
+ "add x25, x15, x19, LSL #1\n"
"prfm pstl1keep, [x25, #0x0]\n"
+ "add x24, x25, x19, LSL #1\n"
"prfm pstl1keep, [x24, #0x0]\n"
+ "add x23, x24, x19, LSL #1\n"
"prfm pstl1keep, [x23, #0x0]\n"
+ "add x22, x23, x19, LSL #1\n"
"prfm pstl1keep, [x22, #0x0]\n"
+ "add x21, x22, x19, LSL #1\n"
"prfm pstl1keep, [x21, #0x0]\n"
"tbz %x[flags], #1, 276f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v0.8h }, [x20]\n"
+ "add x20, %x[args_ptr], %[offset_min]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v1.8h }, [x20]\n"
+ "ld1r { v0.8h }, [x19]\n"
"fmin v8.8h, v8.8h, v0.8h\n"
"fmin v9.8h, v9.8h, v0.8h\n"
"fmin v10.8h, v10.8h, v0.8h\n"
@@ -5405,6 +5407,16 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"fmin v15.8h, v15.8h, v0.8h\n"
"fmin v16.8h, v16.8h, v0.8h\n"
"fmin v17.8h, v17.8h, v0.8h\n"
+ "fmax v8.8h, v8.8h, v1.8h\n"
+ "fmax v9.8h, v9.8h, v1.8h\n"
+ "fmax v10.8h, v10.8h, v1.8h\n"
+ "fmax v11.8h, v11.8h, v1.8h\n"
+ "fmax v12.8h, v12.8h, v1.8h\n"
+ "fmax v13.8h, v13.8h, v1.8h\n"
+ "fmax v14.8h, v14.8h, v1.8h\n"
+ "fmax v15.8h, v15.8h, v1.8h\n"
+ "fmax v16.8h, v16.8h, v1.8h\n"
+ "fmax v17.8h, v17.8h, v1.8h\n"
"fmin v18.8h, v18.8h, v0.8h\n"
"fmin v19.8h, v19.8h, v0.8h\n"
"fmin v20.8h, v20.8h, v0.8h\n"
@@ -5415,42 +5427,30 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"fmin v25.8h, v25.8h, v0.8h\n"
"fmin v26.8h, v26.8h, v0.8h\n"
"fmin v27.8h, v27.8h, v0.8h\n"
+ "fmax v18.8h, v18.8h, v1.8h\n"
+ "fmax v19.8h, v19.8h, v1.8h\n"
+ "fmax v20.8h, v20.8h, v1.8h\n"
+ "fmax v21.8h, v21.8h, v1.8h\n"
+ "fmax v22.8h, v22.8h, v1.8h\n"
+ "fmax v23.8h, v23.8h, v1.8h\n"
+ "fmax v24.8h, v24.8h, v1.8h\n"
+ "fmax v25.8h, v25.8h, v1.8h\n"
+ "fmax v26.8h, v26.8h, v1.8h\n"
+ "fmax v27.8h, v27.8h, v1.8h\n"
"fmin v28.8h, v28.8h, v0.8h\n"
"fmin v29.8h, v29.8h, v0.8h\n"
"fmin v30.8h, v30.8h, v0.8h\n"
"fmin v31.8h, v31.8h, v0.8h\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1r { v0.8h }, [x20]\n"
- "fmax v8.8h, v8.8h, v0.8h\n"
- "fmax v9.8h, v9.8h, v0.8h\n"
- "fmax v10.8h, v10.8h, v0.8h\n"
- "fmax v11.8h, v11.8h, v0.8h\n"
- "fmax v12.8h, v12.8h, v0.8h\n"
- "fmax v13.8h, v13.8h, v0.8h\n"
- "fmax v14.8h, v14.8h, v0.8h\n"
- "fmax v15.8h, v15.8h, v0.8h\n"
- "fmax v16.8h, v16.8h, v0.8h\n"
- "fmax v17.8h, v17.8h, v0.8h\n"
- "fmax v18.8h, v18.8h, v0.8h\n"
- "fmax v19.8h, v19.8h, v0.8h\n"
- "fmax v20.8h, v20.8h, v0.8h\n"
- "fmax v21.8h, v21.8h, v0.8h\n"
- "fmax v22.8h, v22.8h, v0.8h\n"
- "fmax v23.8h, v23.8h, v0.8h\n"
- "fmax v24.8h, v24.8h, v0.8h\n"
- "fmax v25.8h, v25.8h, v0.8h\n"
- "fmax v26.8h, v26.8h, v0.8h\n"
- "fmax v27.8h, v27.8h, v0.8h\n"
- "fmax v28.8h, v28.8h, v0.8h\n"
- "fmax v29.8h, v29.8h, v0.8h\n"
- "fmax v30.8h, v30.8h, v0.8h\n"
- "fmax v31.8h, v31.8h, v0.8h\n"
+ "fmax v28.8h, v28.8h, v1.8h\n"
+ "fmax v29.8h, v29.8h, v1.8h\n"
+ "fmax v30.8h, v30.8h, v1.8h\n"
+ "fmax v31.8h, v31.8h, v1.8h\n"
"276:" // Height 6: No activation
"cmp x8, #0x20\n"
"bge 293f\n"
"tbz x8, #4, 284f\n"
- "st1 { v8.8h }, [x16], #0x10\n"
- "st1 { v9.8h }, [x16], #0x10\n"
+ "st1 { v8.8h }, [x15], #0x10\n"
+ "st1 { v9.8h }, [x15], #0x10\n"
"st1 { v12.8h }, [x25], #0x10\n"
"st1 { v13.8h }, [x25], #0x10\n"
"st1 { v16.8h }, [x24], #0x10\n"
@@ -5462,28 +5462,28 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"st1 { v28.8h }, [x21], #0x10\n"
"st1 { v29.8h }, [x21], #0x10\n"
"tbz x8, #3, 280f\n"
- "st1 { v10.8h }, [x16], #0x10\n"
+ "st1 { v10.8h }, [x15], #0x10\n"
"st1 { v14.8h }, [x25], #0x10\n"
"st1 { v18.8h }, [x24], #0x10\n"
"st1 { v22.8h }, [x23], #0x10\n"
"st1 { v26.8h }, [x22], #0x10\n"
"st1 { v30.8h }, [x21], #0x10\n"
"tbz x8, #2, 278f\n"
- "str d11, [x16], #0x8\n"
+ "str d11, [x15], #0x8\n"
"str d15, [x25], #0x8\n"
"str d19, [x24], #0x8\n"
"str d23, [x23], #0x8\n"
"str d27, [x22], #0x8\n"
"str d31, [x21], #0x8\n"
"tbz x8, #1, 277f\n"
- "st1 { v11.s }[2], [x16], #0x4\n"
+ "st1 { v11.s }[2], [x15], #0x4\n"
"st1 { v15.s }[2], [x25], #0x4\n"
"st1 { v19.s }[2], [x24], #0x4\n"
"st1 { v23.s }[2], [x23], #0x4\n"
"st1 { v27.s }[2], [x22], #0x4\n"
"st1 { v31.s }[2], [x21], #0x4\n"
"tbz x8, #0, 292f\n"
- "st1 { v11.h }[6], [x16]\n"
+ "st1 { v11.h }[6], [x15]\n"
"st1 { v15.h }[6], [x25]\n"
"st1 { v19.h }[6], [x24]\n"
"st1 { v23.h }[6], [x23]\n"
@@ -5492,7 +5492,7 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"b 292f\n"
"277:" // Height 6: Partial direct writeback: partial_1_28
"tbz x8, #0, 292f\n"
- "st1 { v11.h }[4], [x16]\n"
+ "st1 { v11.h }[4], [x15]\n"
"st1 { v15.h }[4], [x25]\n"
"st1 { v19.h }[4], [x24]\n"
"st1 { v23.h }[4], [x23]\n"
@@ -5501,14 +5501,14 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"b 292f\n"
"278:" // Height 6: Partial direct writeback: partial_2_24
"tbz x8, #1, 279f\n"
- "str s11, [x16], #0x4\n"
+ "str s11, [x15], #0x4\n"
"str s15, [x25], #0x4\n"
"str s19, [x24], #0x4\n"
"str s23, [x23], #0x4\n"
"str s27, [x22], #0x4\n"
"str s31, [x21], #0x4\n"
"tbz x8, #0, 292f\n"
- "st1 { v11.h }[2], [x16]\n"
+ "st1 { v11.h }[2], [x15]\n"
"st1 { v15.h }[2], [x25]\n"
"st1 { v19.h }[2], [x24]\n"
"st1 { v23.h }[2], [x23]\n"
@@ -5517,7 +5517,7 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"b 292f\n"
"279:" // Height 6: Partial direct writeback: partial_1_24
"tbz x8, #0, 292f\n"
- "str h11, [x16, #0x0]\n"
+ "str h11, [x15, #0x0]\n"
"str h15, [x25, #0x0]\n"
"str h19, [x24, #0x0]\n"
"str h23, [x23, #0x0]\n"
@@ -5526,21 +5526,21 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"b 292f\n"
"280:" // Height 6: Partial direct writeback: partial_4_16
"tbz x8, #2, 282f\n"
- "str d10, [x16], #0x8\n"
+ "str d10, [x15], #0x8\n"
"str d14, [x25], #0x8\n"
"str d18, [x24], #0x8\n"
"str d22, [x23], #0x8\n"
"str d26, [x22], #0x8\n"
"str d30, [x21], #0x8\n"
"tbz x8, #1, 281f\n"
- "st1 { v10.s }[2], [x16], #0x4\n"
+ "st1 { v10.s }[2], [x15], #0x4\n"
"st1 { v14.s }[2], [x25], #0x4\n"
"st1 { v18.s }[2], [x24], #0x4\n"
"st1 { v22.s }[2], [x23], #0x4\n"
"st1 { v26.s }[2], [x22], #0x4\n"
"st1 { v30.s }[2], [x21], #0x4\n"
"tbz x8, #0, 292f\n"
- "st1 { v10.h }[6], [x16]\n"
+ "st1 { v10.h }[6], [x15]\n"
"st1 { v14.h }[6], [x25]\n"
"st1 { v18.h }[6], [x24]\n"
"st1 { v22.h }[6], [x23]\n"
@@ -5549,7 +5549,7 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"b 292f\n"
"281:" // Height 6: Partial direct writeback: partial_1_20
"tbz x8, #0, 292f\n"
- "st1 { v10.h }[4], [x16]\n"
+ "st1 { v10.h }[4], [x15]\n"
"st1 { v14.h }[4], [x25]\n"
"st1 { v18.h }[4], [x24]\n"
"st1 { v22.h }[4], [x23]\n"
@@ -5558,14 +5558,14 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"b 292f\n"
"282:" // Height 6: Partial direct writeback: partial_2_16
"tbz x8, #1, 283f\n"
- "str s10, [x16], #0x4\n"
+ "str s10, [x15], #0x4\n"
"str s14, [x25], #0x4\n"
"str s18, [x24], #0x4\n"
"str s22, [x23], #0x4\n"
"str s26, [x22], #0x4\n"
"str s30, [x21], #0x4\n"
"tbz x8, #0, 292f\n"
- "st1 { v10.h }[2], [x16]\n"
+ "st1 { v10.h }[2], [x15]\n"
"st1 { v14.h }[2], [x25]\n"
"st1 { v18.h }[2], [x24]\n"
"st1 { v22.h }[2], [x23]\n"
@@ -5574,7 +5574,7 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"b 292f\n"
"283:" // Height 6: Partial direct writeback: partial_1_16
"tbz x8, #0, 292f\n"
- "str h10, [x16, #0x0]\n"
+ "str h10, [x15, #0x0]\n"
"str h14, [x25, #0x0]\n"
"str h18, [x24, #0x0]\n"
"str h22, [x23, #0x0]\n"
@@ -5583,28 +5583,28 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"b 292f\n"
"284:" // Height 6: Partial direct writeback: partial_8_0
"tbz x8, #3, 288f\n"
- "st1 { v8.8h }, [x16], #0x10\n"
+ "st1 { v8.8h }, [x15], #0x10\n"
"st1 { v12.8h }, [x25], #0x10\n"
"st1 { v16.8h }, [x24], #0x10\n"
"st1 { v20.8h }, [x23], #0x10\n"
"st1 { v24.8h }, [x22], #0x10\n"
"st1 { v28.8h }, [x21], #0x10\n"
"tbz x8, #2, 286f\n"
- "str d9, [x16], #0x8\n"
+ "str d9, [x15], #0x8\n"
"str d13, [x25], #0x8\n"
"str d17, [x24], #0x8\n"
"str d21, [x23], #0x8\n"
"str d25, [x22], #0x8\n"
"str d29, [x21], #0x8\n"
"tbz x8, #1, 285f\n"
- "st1 { v9.s }[2], [x16], #0x4\n"
+ "st1 { v9.s }[2], [x15], #0x4\n"
"st1 { v13.s }[2], [x25], #0x4\n"
"st1 { v17.s }[2], [x24], #0x4\n"
"st1 { v21.s }[2], [x23], #0x4\n"
"st1 { v25.s }[2], [x22], #0x4\n"
"st1 { v29.s }[2], [x21], #0x4\n"
"tbz x8, #0, 292f\n"
- "st1 { v9.h }[6], [x16]\n"
+ "st1 { v9.h }[6], [x15]\n"
"st1 { v13.h }[6], [x25]\n"
"st1 { v17.h }[6], [x24]\n"
"st1 { v21.h }[6], [x23]\n"
@@ -5613,7 +5613,7 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"b 292f\n"
"285:" // Height 6: Partial direct writeback: partial_1_12
"tbz x8, #0, 292f\n"
- "st1 { v9.h }[4], [x16]\n"
+ "st1 { v9.h }[4], [x15]\n"
"st1 { v13.h }[4], [x25]\n"
"st1 { v17.h }[4], [x24]\n"
"st1 { v21.h }[4], [x23]\n"
@@ -5622,14 +5622,14 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"b 292f\n"
"286:" // Height 6: Partial direct writeback: partial_2_8
"tbz x8, #1, 287f\n"
- "str s9, [x16], #0x4\n"
+ "str s9, [x15], #0x4\n"
"str s13, [x25], #0x4\n"
"str s17, [x24], #0x4\n"
"str s21, [x23], #0x4\n"
"str s25, [x22], #0x4\n"
"str s29, [x21], #0x4\n"
"tbz x8, #0, 292f\n"
- "st1 { v9.h }[2], [x16]\n"
+ "st1 { v9.h }[2], [x15]\n"
"st1 { v13.h }[2], [x25]\n"
"st1 { v17.h }[2], [x24]\n"
"st1 { v21.h }[2], [x23]\n"
@@ -5638,7 +5638,7 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"b 292f\n"
"287:" // Height 6: Partial direct writeback: partial_1_8
"tbz x8, #0, 292f\n"
- "str h9, [x16, #0x0]\n"
+ "str h9, [x15, #0x0]\n"
"str h13, [x25, #0x0]\n"
"str h17, [x24, #0x0]\n"
"str h21, [x23, #0x0]\n"
@@ -5647,21 +5647,21 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"b 292f\n"
"288:" // Height 6: Partial direct writeback: partial_4_0
"tbz x8, #2, 290f\n"
- "str d8, [x16], #0x8\n"
+ "str d8, [x15], #0x8\n"
"str d12, [x25], #0x8\n"
"str d16, [x24], #0x8\n"
"str d20, [x23], #0x8\n"
"str d24, [x22], #0x8\n"
"str d28, [x21], #0x8\n"
"tbz x8, #1, 289f\n"
- "st1 { v8.s }[2], [x16], #0x4\n"
+ "st1 { v8.s }[2], [x15], #0x4\n"
"st1 { v12.s }[2], [x25], #0x4\n"
"st1 { v16.s }[2], [x24], #0x4\n"
"st1 { v20.s }[2], [x23], #0x4\n"
"st1 { v24.s }[2], [x22], #0x4\n"
"st1 { v28.s }[2], [x21], #0x4\n"
"tbz x8, #0, 292f\n"
- "st1 { v8.h }[6], [x16]\n"
+ "st1 { v8.h }[6], [x15]\n"
"st1 { v12.h }[6], [x25]\n"
"st1 { v16.h }[6], [x24]\n"
"st1 { v20.h }[6], [x23]\n"
@@ -5670,7 +5670,7 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"b 292f\n"
"289:" // Height 6: Partial direct writeback: partial_1_4
"tbz x8, #0, 292f\n"
- "st1 { v8.h }[4], [x16]\n"
+ "st1 { v8.h }[4], [x15]\n"
"st1 { v12.h }[4], [x25]\n"
"st1 { v16.h }[4], [x24]\n"
"st1 { v20.h }[4], [x23]\n"
@@ -5679,14 +5679,14 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"b 292f\n"
"290:" // Height 6: Partial direct writeback: partial_2_0
"tbz x8, #1, 291f\n"
- "str s8, [x16], #0x4\n"
+ "str s8, [x15], #0x4\n"
"str s12, [x25], #0x4\n"
"str s16, [x24], #0x4\n"
"str s20, [x23], #0x4\n"
"str s24, [x22], #0x4\n"
"str s28, [x21], #0x4\n"
"tbz x8, #0, 292f\n"
- "st1 { v8.h }[2], [x16]\n"
+ "st1 { v8.h }[2], [x15]\n"
"st1 { v12.h }[2], [x25]\n"
"st1 { v16.h }[2], [x24]\n"
"st1 { v20.h }[2], [x23]\n"
@@ -5694,7 +5694,7 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"st1 { v28.h }[2], [x21]\n"
"b 292f\n"
"291:" // Height 6: Partial direct writeback: partial_1_0
- "str h8, [x16, #0x0]\n"
+ "str h8, [x15, #0x0]\n"
"str h12, [x25, #0x0]\n"
"str h16, [x24, #0x0]\n"
"str h20, [x23, #0x0]\n"
@@ -5703,11 +5703,11 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"292:" // Height 6: Partial direct writeback: Done
"b 294f\n"
"293:" // Height 6: Full writeback
- "str q8, [x16, #0x0]\n"
- "str q9, [x16, #0x10]\n"
- "str q10, [x16, #0x20]\n"
- "str q11, [x16, #0x30]\n"
- "add x16, x16, #0x40\n"
+ "str q8, [x15, #0x0]\n"
+ "str q9, [x15, #0x10]\n"
+ "str q10, [x15, #0x20]\n"
+ "str q11, [x15, #0x30]\n"
+ "add x15, x15, #0x40\n"
"str q12, [x25, #0x0]\n"
"str q13, [x25, #0x10]\n"
"str q14, [x25, #0x20]\n"
@@ -5733,20 +5733,20 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"bgt 247b\n"
"subs %x[M], %x[M], #0x6\n"
"beq 296f\n"
- "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 295f\n"
- "add x21, x21, #0x6\n"
- "str x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "add x20, x20, #0x6\n"
+ "str x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"b 1b\n"
"295:" // Update direct input
- "mov x20, #0xc\n"
- "madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
+ "mov x19, #0xc\n"
+ "madd %x[input_ptr], x19, x20, %x[input_ptr]\n"
"b 1b\n"
"296:" // Exit
: [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
: [args_ptr] "r" (&ka), [bias] "r" (bias), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp16_mla_6x32/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp16_mla_6x32/generic.cpp
index 335308751f..8877306f40 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp16_mla_6x32/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp16_mla_6x32/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#if defined(__aarch64__) && (defined(FP16_KERNELS) || defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC))
@@ -101,138 +101,138 @@ void a64_hybrid_fp16_mla_6x32 (
"cmp %x[M], #0x2\n"
"bgt 99f\n"
"beq 50f\n"
- "mov x12, %x[bias]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "mov x9, %x[bias]\n"
+ "mov x28, %x[output_ptr]\n"
"2:" // Height 1: Column loop
- "cbz x12, 3f\n"
- "ldr q8, [x12, #0x0]\n"
- "ldr q9, [x12, #0x10]\n"
- "ldr q10, [x12, #0x20]\n"
- "ldr q11, [x12, #0x30]\n"
- "add x12, x12, #0x40\n"
+ "cbz x9, 3f\n"
+ "ldr q8, [x9, #0x0]\n"
+ "ldr q9, [x9, #0x10]\n"
+ "ldr q10, [x9, #0x20]\n"
+ "ldr q11, [x9, #0x30]\n"
+ "add x9, x9, #0x40\n"
"b 22f\n"
"3:" // Height 1: no bias
"tbz %x[flags], #0, 21f\n"
"cmp x11, #0x20\n"
"bge 20f\n"
"tbz x11, #4, 11f\n"
- "ld1 { v8.8h }, [x9], #0x10\n"
- "ld1 { v9.8h }, [x9], #0x10\n"
+ "ld1 { v8.8h }, [x28], #0x10\n"
+ "ld1 { v9.8h }, [x28], #0x10\n"
"tbz x11, #3, 7f\n"
- "ld1 { v10.8h }, [x9], #0x10\n"
+ "ld1 { v10.8h }, [x28], #0x10\n"
"tbz x11, #2, 5f\n"
- "ldr d11, [x9], #0x8\n"
+ "ldr d11, [x28], #0x8\n"
"tbz x11, #1, 4f\n"
- "ld1 { v11.s }[2], [x9], #0x4\n"
- "mov x20, #0x3c\n"
+ "mov x19, #0x3c\n"
+ "ld1 { v11.s }[2], [x28], #0x4\n"
"tbz x11, #0, 19f\n"
- "ld1 { v11.h }[6], [x9]\n"
+ "ld1 { v11.h }[6], [x28]\n"
"b 19f\n"
"4:" // Height 1: Partial accumulate: partial_1_28
- "mov x20, #0x38\n"
+ "mov x19, #0x38\n"
"tbz x11, #0, 19f\n"
- "ld1 { v11.h }[4], [x9]\n"
+ "ld1 { v11.h }[4], [x28]\n"
"b 19f\n"
"5:" // Height 1: Partial accumulate: partial_2_24
"tbz x11, #1, 6f\n"
- "ldr s11, [x9], #0x4\n"
- "mov x20, #0x34\n"
+ "ldr s11, [x28], #0x4\n"
+ "mov x19, #0x34\n"
"tbz x11, #0, 19f\n"
- "ld1 { v11.h }[2], [x9]\n"
+ "ld1 { v11.h }[2], [x28]\n"
"b 19f\n"
"6:" // Height 1: Partial accumulate: partial_1_24
- "mov x20, #0x30\n"
+ "mov x19, #0x30\n"
"tbz x11, #0, 19f\n"
- "ldr h11, [x9, #0x0]\n"
+ "ldr h11, [x28, #0x0]\n"
"b 19f\n"
"7:" // Height 1: Partial accumulate: partial_4_16
"tbz x11, #2, 9f\n"
- "ldr d10, [x9], #0x8\n"
+ "ldr d10, [x28], #0x8\n"
"tbz x11, #1, 8f\n"
- "ld1 { v10.s }[2], [x9], #0x4\n"
- "mov x20, #0x2c\n"
+ "ld1 { v10.s }[2], [x28], #0x4\n"
+ "mov x19, #0x2c\n"
"tbz x11, #0, 19f\n"
- "ld1 { v10.h }[6], [x9]\n"
+ "ld1 { v10.h }[6], [x28]\n"
"b 19f\n"
"8:" // Height 1: Partial accumulate: partial_1_20
- "mov x20, #0x28\n"
+ "mov x19, #0x28\n"
"tbz x11, #0, 19f\n"
- "ld1 { v10.h }[4], [x9]\n"
+ "ld1 { v10.h }[4], [x28]\n"
"b 19f\n"
"9:" // Height 1: Partial accumulate: partial_2_16
"tbz x11, #1, 10f\n"
- "ldr s10, [x9], #0x4\n"
- "mov x20, #0x24\n"
+ "ldr s10, [x28], #0x4\n"
+ "mov x19, #0x24\n"
"tbz x11, #0, 19f\n"
- "ld1 { v10.h }[2], [x9]\n"
+ "ld1 { v10.h }[2], [x28]\n"
"b 19f\n"
"10:" // Height 1: Partial accumulate: partial_1_16
- "mov x20, #0x20\n"
+ "mov x19, #0x20\n"
"tbz x11, #0, 19f\n"
- "ldr h10, [x9, #0x0]\n"
+ "ldr h10, [x28, #0x0]\n"
"b 19f\n"
"11:" // Height 1: Partial accumulate: partial_8_0
"tbz x11, #3, 15f\n"
- "ld1 { v8.8h }, [x9], #0x10\n"
+ "ld1 { v8.8h }, [x28], #0x10\n"
"tbz x11, #2, 13f\n"
- "ldr d9, [x9], #0x8\n"
+ "ldr d9, [x28], #0x8\n"
"tbz x11, #1, 12f\n"
- "ld1 { v9.s }[2], [x9], #0x4\n"
- "mov x20, #0x1c\n"
+ "mov x19, #0x1c\n"
+ "ld1 { v9.s }[2], [x28], #0x4\n"
"tbz x11, #0, 19f\n"
- "ld1 { v9.h }[6], [x9]\n"
+ "ld1 { v9.h }[6], [x28]\n"
"b 19f\n"
"12:" // Height 1: Partial accumulate: partial_1_12
- "mov x20, #0x18\n"
+ "mov x19, #0x18\n"
"tbz x11, #0, 19f\n"
- "ld1 { v9.h }[4], [x9]\n"
+ "ld1 { v9.h }[4], [x28]\n"
"b 19f\n"
"13:" // Height 1: Partial accumulate: partial_2_8
"tbz x11, #1, 14f\n"
- "ldr s9, [x9], #0x4\n"
- "mov x20, #0x14\n"
+ "ldr s9, [x28], #0x4\n"
+ "mov x19, #0x14\n"
"tbz x11, #0, 19f\n"
- "ld1 { v9.h }[2], [x9]\n"
+ "ld1 { v9.h }[2], [x28]\n"
"b 19f\n"
"14:" // Height 1: Partial accumulate: partial_1_8
- "mov x20, #0x10\n"
+ "mov x19, #0x10\n"
"tbz x11, #0, 19f\n"
- "ldr h9, [x9, #0x0]\n"
+ "ldr h9, [x28, #0x0]\n"
"b 19f\n"
"15:" // Height 1: Partial accumulate: partial_4_0
"tbz x11, #2, 17f\n"
- "ldr d8, [x9], #0x8\n"
+ "ldr d8, [x28], #0x8\n"
"tbz x11, #1, 16f\n"
- "ld1 { v8.s }[2], [x9], #0x4\n"
- "mov x20, #0xc\n"
+ "ld1 { v8.s }[2], [x28], #0x4\n"
+ "mov x19, #0xc\n"
"tbz x11, #0, 19f\n"
- "ld1 { v8.h }[6], [x9]\n"
+ "ld1 { v8.h }[6], [x28]\n"
"b 19f\n"
"16:" // Height 1: Partial accumulate: partial_1_4
- "mov x20, #0x8\n"
+ "mov x19, #0x8\n"
"tbz x11, #0, 19f\n"
- "ld1 { v8.h }[4], [x9]\n"
+ "ld1 { v8.h }[4], [x28]\n"
"b 19f\n"
"17:" // Height 1: Partial accumulate: partial_2_0
"tbz x11, #1, 18f\n"
- "ldr s8, [x9], #0x4\n"
- "mov x20, #0x4\n"
+ "ldr s8, [x28], #0x4\n"
+ "mov x19, #0x4\n"
"tbz x11, #0, 19f\n"
- "ld1 { v8.h }[2], [x9]\n"
+ "ld1 { v8.h }[2], [x28]\n"
"b 19f\n"
"18:" // Height 1: Partial accumulate: partial_1_0
- "ldr h8, [x9, #0x0]\n"
- "mov x20, #0x0\n"
+ "ldr h8, [x28, #0x0]\n"
+ "mov x19, #0x0\n"
"19:" // Height 1: Partial accumulate: Done
- "sub x9, x9, x20\n"
+ "sub x28, x28, x19\n"
"b 22f\n"
"20:" // Height 1: full accumulate
- "ldr q8, [x9, #0x0]\n"
- "ldr q9, [x9, #0x10]\n"
- "ldr q10, [x9, #0x20]\n"
- "ldr q11, [x9, #0x30]\n"
+ "ldr q8, [x28, #0x0]\n"
+ "ldr q9, [x28, #0x10]\n"
+ "ldr q10, [x28, #0x20]\n"
+ "ldr q11, [x28, #0x30]\n"
"b 22f\n"
"21:" // Height 1: no accumulate
"movi v8.16b, #0x0\n"
@@ -240,42 +240,46 @@ void a64_hybrid_fp16_mla_6x32 (
"movi v10.16b, #0x0\n"
"movi v11.16b, #0x0\n"
"22:" // Height 1: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"23:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 24f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "cbnz x28, 25f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #1\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "cbnz x27, 25f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
"b 25f\n"
"24:" // Height 1: setup direct input
- "mov x26, %x[input_ptr]\n"
+ "mov x25, %x[input_ptr]\n"
"25:" // Height 1: input setup done
- "cmp x27, #0x8\n"
+ "cmp x26, #0x8\n"
"blt 28f\n"
- "ldr q0, [x26, #0x0]\n"
+ "ldr q0, [x25, #0x0]\n"
"ldr q6, [x10, #0x0]\n"
- "cmp x27, #0x10\n"
- "ldr q7, [x10, #0x10]\n"
+ "cmp x26, #0x10\n"
"blt 27f\n"
"26:" // Height 1: Multiply loop: Main loop head
"fmla v8.8h, v6.8h, v0.h[0]\n"
- "ldr q6, [x10, #0x20]\n"
+ "ldr q7, [x10, #0x10]\n"
+ "add x25, x25, #0x10\n"
"fmla v9.8h, v7.8h, v0.h[0]\n"
- "ldr q7, [x10, #0x30]\n"
+ "ldr q6, [x10, #0x20]\n"
+ "sub x26, x26, #0x8\n"
"fmla v10.8h, v6.8h, v0.h[0]\n"
- "ldr q6, [x10, #0x40]\n"
+ "ldr q7, [x10, #0x30]\n"
+ "cmp x26, #0x10\n"
"fmla v11.8h, v7.8h, v0.h[0]\n"
+ "ldr q6, [x10, #0x40]\n"
"ldr q7, [x10, #0x50]\n"
"fmla v8.8h, v6.8h, v0.h[1]\n"
"ldr q6, [x10, #0x60]\n"
"fmla v9.8h, v7.8h, v0.h[1]\n"
"ldr q7, [x10, #0x70]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
"fmla v10.8h, v6.8h, v0.h[1]\n"
"ldr q6, [x10, #0x80]\n"
"fmla v11.8h, v7.8h, v0.h[1]\n"
@@ -324,28 +328,27 @@ void a64_hybrid_fp16_mla_6x32 (
"ldr q6, [x10, #0x1e0]\n"
"fmla v9.8h, v7.8h, v0.h[7]\n"
"ldr q7, [x10, #0x1f0]\n"
- "sub x27, x27, #0x8\n"
- "add x26, x26, #0x10\n"
- "fmla v10.8h, v6.8h, v0.h[7]\n"
- "fmla v11.8h, v7.8h, v0.h[7]\n"
- "ldr q0, [x26, #0x0]\n"
- "cmp x27, #0x10\n"
"add x10, x10, #0x200\n"
+ "fmla v10.8h, v6.8h, v0.h[7]\n"
"ldr q6, [x10, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
+ "fmla v11.8h, v7.8h, v0.h[7]\n"
+ "ldr q0, [x25, #0x0]\n"
"bge 26b\n"
"27:" // Height 1: Multiply loop: Single iteration only
"fmla v8.8h, v6.8h, v0.h[0]\n"
- "ldr q6, [x10, #0x20]\n"
+ "ldr q7, [x10, #0x10]\n"
+ "sub x26, x26, #0x8\n"
"fmla v9.8h, v7.8h, v0.h[0]\n"
- "ldr q7, [x10, #0x30]\n"
+ "ldr q6, [x10, #0x20]\n"
+ "add x25, x25, #0x10\n"
"fmla v10.8h, v6.8h, v0.h[0]\n"
+ "ldr q7, [x10, #0x30]\n"
"ldr q6, [x10, #0x40]\n"
"fmla v11.8h, v7.8h, v0.h[0]\n"
"ldr q7, [x10, #0x50]\n"
"fmla v8.8h, v6.8h, v0.h[1]\n"
"ldr q6, [x10, #0x60]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
"fmla v9.8h, v7.8h, v0.h[1]\n"
"ldr q7, [x10, #0x70]\n"
"fmla v10.8h, v6.8h, v0.h[1]\n"
@@ -396,328 +399,325 @@ void a64_hybrid_fp16_mla_6x32 (
"ldr q6, [x10, #0x1e0]\n"
"fmla v9.8h, v7.8h, v0.h[7]\n"
"ldr q7, [x10, #0x1f0]\n"
- "add x26, x26, #0x10\n"
- "sub x27, x27, #0x8\n"
+ "add x10, x10, #0x200\n"
"fmla v10.8h, v6.8h, v0.h[7]\n"
"fmla v11.8h, v7.8h, v0.h[7]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
- "add x10, x10, #0x200\n"
"28:" // Height 1: Multiply loop: Main loop skip
- "cbz x27, 30f\n"
+ "cbz x26, 30f\n"
"29:" // Height 1: Multiply loop: Odd block loop
- "ldr h0, [x26], #0x2\n"
+ "ldr h0, [x25], #0x2\n"
+ "sub x26, x26, #0x1\n"
"ldr q6, [x10, #0x0]\n"
"fmla v8.8h, v6.8h, v0.h[0]\n"
- "sub x27, x27, #0x1\n"
"ldr q7, [x10, #0x10]\n"
"ldr q6, [x10, #0x20]\n"
"fmla v9.8h, v7.8h, v0.h[0]\n"
- "fmla v10.8h, v6.8h, v0.h[0]\n"
"ldr q7, [x10, #0x30]\n"
- "fmla v11.8h, v7.8h, v0.h[0]\n"
"add x10, x10, #0x40\n"
- "cbnz x27, 29b\n"
+ "fmla v10.8h, v6.8h, v0.h[0]\n"
+ "fmla v11.8h, v7.8h, v0.h[0]\n"
+ "cbnz x26, 29b\n"
"30:" // Height 1: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 23b\n"
- "prfm pstl1keep, [x9, #0x0]\n"
+ "prfm pstl1keep, [x28, #0x0]\n"
"tbz %x[flags], #1, 31f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v1.8h }, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1r { v0.8h }, [x20]\n"
- "fmin v8.8h, v8.8h, v1.8h\n"
- "fmin v9.8h, v9.8h, v1.8h\n"
- "fmin v10.8h, v10.8h, v1.8h\n"
- "fmin v11.8h, v11.8h, v1.8h\n"
- "fmax v8.8h, v8.8h, v0.8h\n"
- "fmax v9.8h, v9.8h, v0.8h\n"
- "fmax v10.8h, v10.8h, v0.8h\n"
- "fmax v11.8h, v11.8h, v0.8h\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v1.8h }, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v0.8h }, [x19]\n"
+ "fmin v8.8h, v8.8h, v0.8h\n"
+ "fmin v9.8h, v9.8h, v0.8h\n"
+ "fmin v10.8h, v10.8h, v0.8h\n"
+ "fmin v11.8h, v11.8h, v0.8h\n"
+ "fmax v8.8h, v8.8h, v1.8h\n"
+ "fmax v9.8h, v9.8h, v1.8h\n"
+ "fmax v10.8h, v10.8h, v1.8h\n"
+ "fmax v11.8h, v11.8h, v1.8h\n"
"31:" // Height 1: No activation
"cmp x11, #0x20\n"
"bge 48f\n"
"tbz x11, #4, 39f\n"
- "st1 { v8.8h }, [x9], #0x10\n"
- "st1 { v9.8h }, [x9], #0x10\n"
+ "st1 { v8.8h }, [x28], #0x10\n"
+ "st1 { v9.8h }, [x28], #0x10\n"
"tbz x11, #3, 35f\n"
- "st1 { v10.8h }, [x9], #0x10\n"
+ "st1 { v10.8h }, [x28], #0x10\n"
"tbz x11, #2, 33f\n"
- "str d11, [x9], #0x8\n"
+ "str d11, [x28], #0x8\n"
"tbz x11, #1, 32f\n"
- "st1 { v11.s }[2], [x9], #0x4\n"
+ "st1 { v11.s }[2], [x28], #0x4\n"
"tbz x11, #0, 47f\n"
- "st1 { v11.h }[6], [x9]\n"
+ "st1 { v11.h }[6], [x28]\n"
"b 47f\n"
"32:" // Height 1: Partial direct writeback: partial_1_28
"tbz x11, #0, 47f\n"
- "st1 { v11.h }[4], [x9]\n"
+ "st1 { v11.h }[4], [x28]\n"
"b 47f\n"
"33:" // Height 1: Partial direct writeback: partial_2_24
"tbz x11, #1, 34f\n"
- "str s11, [x9], #0x4\n"
+ "str s11, [x28], #0x4\n"
"tbz x11, #0, 47f\n"
- "st1 { v11.h }[2], [x9]\n"
+ "st1 { v11.h }[2], [x28]\n"
"b 47f\n"
"34:" // Height 1: Partial direct writeback: partial_1_24
"tbz x11, #0, 47f\n"
- "str h11, [x9, #0x0]\n"
+ "str h11, [x28, #0x0]\n"
"b 47f\n"
"35:" // Height 1: Partial direct writeback: partial_4_16
"tbz x11, #2, 37f\n"
- "str d10, [x9], #0x8\n"
+ "str d10, [x28], #0x8\n"
"tbz x11, #1, 36f\n"
- "st1 { v10.s }[2], [x9], #0x4\n"
+ "st1 { v10.s }[2], [x28], #0x4\n"
"tbz x11, #0, 47f\n"
- "st1 { v10.h }[6], [x9]\n"
+ "st1 { v10.h }[6], [x28]\n"
"b 47f\n"
"36:" // Height 1: Partial direct writeback: partial_1_20
"tbz x11, #0, 47f\n"
- "st1 { v10.h }[4], [x9]\n"
+ "st1 { v10.h }[4], [x28]\n"
"b 47f\n"
"37:" // Height 1: Partial direct writeback: partial_2_16
"tbz x11, #1, 38f\n"
- "str s10, [x9], #0x4\n"
+ "str s10, [x28], #0x4\n"
"tbz x11, #0, 47f\n"
- "st1 { v10.h }[2], [x9]\n"
+ "st1 { v10.h }[2], [x28]\n"
"b 47f\n"
"38:" // Height 1: Partial direct writeback: partial_1_16
"tbz x11, #0, 47f\n"
- "str h10, [x9, #0x0]\n"
+ "str h10, [x28, #0x0]\n"
"b 47f\n"
"39:" // Height 1: Partial direct writeback: partial_8_0
"tbz x11, #3, 43f\n"
- "st1 { v8.8h }, [x9], #0x10\n"
+ "st1 { v8.8h }, [x28], #0x10\n"
"tbz x11, #2, 41f\n"
- "str d9, [x9], #0x8\n"
+ "str d9, [x28], #0x8\n"
"tbz x11, #1, 40f\n"
- "st1 { v9.s }[2], [x9], #0x4\n"
+ "st1 { v9.s }[2], [x28], #0x4\n"
"tbz x11, #0, 47f\n"
- "st1 { v9.h }[6], [x9]\n"
+ "st1 { v9.h }[6], [x28]\n"
"b 47f\n"
"40:" // Height 1: Partial direct writeback: partial_1_12
"tbz x11, #0, 47f\n"
- "st1 { v9.h }[4], [x9]\n"
+ "st1 { v9.h }[4], [x28]\n"
"b 47f\n"
"41:" // Height 1: Partial direct writeback: partial_2_8
"tbz x11, #1, 42f\n"
- "str s9, [x9], #0x4\n"
+ "str s9, [x28], #0x4\n"
"tbz x11, #0, 47f\n"
- "st1 { v9.h }[2], [x9]\n"
+ "st1 { v9.h }[2], [x28]\n"
"b 47f\n"
"42:" // Height 1: Partial direct writeback: partial_1_8
"tbz x11, #0, 47f\n"
- "str h9, [x9, #0x0]\n"
+ "str h9, [x28, #0x0]\n"
"b 47f\n"
"43:" // Height 1: Partial direct writeback: partial_4_0
"tbz x11, #2, 45f\n"
- "str d8, [x9], #0x8\n"
+ "str d8, [x28], #0x8\n"
"tbz x11, #1, 44f\n"
- "st1 { v8.s }[2], [x9], #0x4\n"
+ "st1 { v8.s }[2], [x28], #0x4\n"
"tbz x11, #0, 47f\n"
- "st1 { v8.h }[6], [x9]\n"
+ "st1 { v8.h }[6], [x28]\n"
"b 47f\n"
"44:" // Height 1: Partial direct writeback: partial_1_4
"tbz x11, #0, 47f\n"
- "st1 { v8.h }[4], [x9]\n"
+ "st1 { v8.h }[4], [x28]\n"
"b 47f\n"
"45:" // Height 1: Partial direct writeback: partial_2_0
"tbz x11, #1, 46f\n"
- "str s8, [x9], #0x4\n"
+ "str s8, [x28], #0x4\n"
"tbz x11, #0, 47f\n"
- "st1 { v8.h }[2], [x9]\n"
+ "st1 { v8.h }[2], [x28]\n"
"b 47f\n"
"46:" // Height 1: Partial direct writeback: partial_1_0
- "str h8, [x9, #0x0]\n"
+ "str h8, [x28, #0x0]\n"
"47:" // Height 1: Partial direct writeback: Done
"b 49f\n"
"48:" // Height 1: Full writeback
- "str q8, [x9, #0x0]\n"
- "str q9, [x9, #0x10]\n"
- "str q10, [x9, #0x20]\n"
- "str q11, [x9, #0x30]\n"
- "add x9, x9, #0x40\n"
+ "str q8, [x28, #0x0]\n"
+ "str q9, [x28, #0x10]\n"
+ "str q10, [x28, #0x20]\n"
+ "str q11, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
"49:" // Height 1: Writeback done
"subs x11, x11, #0x20\n"
"bgt 2b\n"
"b 296f\n"
"50:" // Height 2
- "mov x12, %x[bias]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x9, %x[bias]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "mov x28, %x[output_ptr]\n"
"51:" // Height 2: Column loop
- "cbz x12, 52f\n"
- "ldr q8, [x12, #0x0]\n"
- "ldr q9, [x12, #0x10]\n"
+ "cbz x9, 52f\n"
+ "ldr q8, [x9, #0x0]\n"
"mov v12.16b, v8.16b\n"
+ "ldr q9, [x9, #0x10]\n"
+ "ldr q10, [x9, #0x20]\n"
"mov v13.16b, v9.16b\n"
- "ldr q10, [x12, #0x20]\n"
- "ldr q11, [x12, #0x30]\n"
+ "ldr q11, [x9, #0x30]\n"
+ "add x9, x9, #0x40\n"
"mov v14.16b, v10.16b\n"
"mov v15.16b, v11.16b\n"
- "add x12, x12, #0x40\n"
"b 71f\n"
"52:" // Height 2: no bias
"tbz %x[flags], #0, 70f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"cmp x11, #0x20\n"
- "add x25, x9, x20, LSL #1\n"
+ "add x24, x28, x19, LSL #1\n"
"bge 69f\n"
"tbz x11, #4, 60f\n"
- "ld1 { v8.8h }, [x9], #0x10\n"
- "ld1 { v12.8h }, [x25], #0x10\n"
- "ld1 { v9.8h }, [x9], #0x10\n"
- "ld1 { v13.8h }, [x25], #0x10\n"
+ "ld1 { v8.8h }, [x28], #0x10\n"
+ "ld1 { v12.8h }, [x24], #0x10\n"
+ "ld1 { v9.8h }, [x28], #0x10\n"
+ "ld1 { v13.8h }, [x24], #0x10\n"
"tbz x11, #3, 56f\n"
- "ld1 { v10.8h }, [x9], #0x10\n"
- "ld1 { v14.8h }, [x25], #0x10\n"
+ "ld1 { v10.8h }, [x28], #0x10\n"
+ "ld1 { v14.8h }, [x24], #0x10\n"
"tbz x11, #2, 54f\n"
- "ldr d11, [x9], #0x8\n"
- "ldr d15, [x25], #0x8\n"
+ "ldr d11, [x28], #0x8\n"
+ "ldr d15, [x24], #0x8\n"
"tbz x11, #1, 53f\n"
- "ld1 { v11.s }[2], [x9], #0x4\n"
- "ld1 { v15.s }[2], [x25], #0x4\n"
- "mov x20, #0x3c\n"
+ "mov x19, #0x3c\n"
+ "ld1 { v11.s }[2], [x28], #0x4\n"
+ "ld1 { v15.s }[2], [x24], #0x4\n"
"tbz x11, #0, 68f\n"
- "ld1 { v11.h }[6], [x9]\n"
- "ld1 { v15.h }[6], [x25]\n"
+ "ld1 { v11.h }[6], [x28]\n"
+ "ld1 { v15.h }[6], [x24]\n"
"b 68f\n"
"53:" // Height 2: Partial accumulate: partial_1_28
- "mov x20, #0x38\n"
+ "mov x19, #0x38\n"
"tbz x11, #0, 68f\n"
- "ld1 { v11.h }[4], [x9]\n"
- "ld1 { v15.h }[4], [x25]\n"
+ "ld1 { v11.h }[4], [x28]\n"
+ "ld1 { v15.h }[4], [x24]\n"
"b 68f\n"
"54:" // Height 2: Partial accumulate: partial_2_24
"tbz x11, #1, 55f\n"
- "ldr s11, [x9], #0x4\n"
- "ldr s15, [x25], #0x4\n"
- "mov x20, #0x34\n"
+ "ldr s11, [x28], #0x4\n"
+ "ldr s15, [x24], #0x4\n"
+ "mov x19, #0x34\n"
"tbz x11, #0, 68f\n"
- "ld1 { v11.h }[2], [x9]\n"
- "ld1 { v15.h }[2], [x25]\n"
+ "ld1 { v11.h }[2], [x28]\n"
+ "ld1 { v15.h }[2], [x24]\n"
"b 68f\n"
"55:" // Height 2: Partial accumulate: partial_1_24
- "mov x20, #0x30\n"
+ "mov x19, #0x30\n"
"tbz x11, #0, 68f\n"
- "ldr h11, [x9, #0x0]\n"
- "ldr h15, [x25, #0x0]\n"
+ "ldr h11, [x28, #0x0]\n"
+ "ldr h15, [x24, #0x0]\n"
"b 68f\n"
"56:" // Height 2: Partial accumulate: partial_4_16
"tbz x11, #2, 58f\n"
- "ldr d10, [x9], #0x8\n"
- "ldr d14, [x25], #0x8\n"
+ "ldr d10, [x28], #0x8\n"
+ "ldr d14, [x24], #0x8\n"
"tbz x11, #1, 57f\n"
- "ld1 { v10.s }[2], [x9], #0x4\n"
- "ld1 { v14.s }[2], [x25], #0x4\n"
- "mov x20, #0x2c\n"
+ "mov x19, #0x2c\n"
+ "ld1 { v10.s }[2], [x28], #0x4\n"
+ "ld1 { v14.s }[2], [x24], #0x4\n"
"tbz x11, #0, 68f\n"
- "ld1 { v10.h }[6], [x9]\n"
- "ld1 { v14.h }[6], [x25]\n"
+ "ld1 { v10.h }[6], [x28]\n"
+ "ld1 { v14.h }[6], [x24]\n"
"b 68f\n"
"57:" // Height 2: Partial accumulate: partial_1_20
- "mov x20, #0x28\n"
+ "mov x19, #0x28\n"
"tbz x11, #0, 68f\n"
- "ld1 { v10.h }[4], [x9]\n"
- "ld1 { v14.h }[4], [x25]\n"
+ "ld1 { v10.h }[4], [x28]\n"
+ "ld1 { v14.h }[4], [x24]\n"
"b 68f\n"
"58:" // Height 2: Partial accumulate: partial_2_16
"tbz x11, #1, 59f\n"
- "ldr s10, [x9], #0x4\n"
- "ldr s14, [x25], #0x4\n"
- "mov x20, #0x24\n"
+ "ldr s10, [x28], #0x4\n"
+ "ldr s14, [x24], #0x4\n"
+ "mov x19, #0x24\n"
"tbz x11, #0, 68f\n"
- "ld1 { v10.h }[2], [x9]\n"
- "ld1 { v14.h }[2], [x25]\n"
+ "ld1 { v10.h }[2], [x28]\n"
+ "ld1 { v14.h }[2], [x24]\n"
"b 68f\n"
"59:" // Height 2: Partial accumulate: partial_1_16
- "mov x20, #0x20\n"
+ "mov x19, #0x20\n"
"tbz x11, #0, 68f\n"
- "ldr h10, [x9, #0x0]\n"
- "ldr h14, [x25, #0x0]\n"
+ "ldr h10, [x28, #0x0]\n"
+ "ldr h14, [x24, #0x0]\n"
"b 68f\n"
"60:" // Height 2: Partial accumulate: partial_8_0
"tbz x11, #3, 64f\n"
- "ld1 { v8.8h }, [x9], #0x10\n"
- "ld1 { v12.8h }, [x25], #0x10\n"
+ "ld1 { v8.8h }, [x28], #0x10\n"
+ "ld1 { v12.8h }, [x24], #0x10\n"
"tbz x11, #2, 62f\n"
- "ldr d9, [x9], #0x8\n"
- "ldr d13, [x25], #0x8\n"
+ "ldr d9, [x28], #0x8\n"
+ "ldr d13, [x24], #0x8\n"
"tbz x11, #1, 61f\n"
- "ld1 { v9.s }[2], [x9], #0x4\n"
- "ld1 { v13.s }[2], [x25], #0x4\n"
- "mov x20, #0x1c\n"
+ "mov x19, #0x1c\n"
+ "ld1 { v9.s }[2], [x28], #0x4\n"
+ "ld1 { v13.s }[2], [x24], #0x4\n"
"tbz x11, #0, 68f\n"
- "ld1 { v9.h }[6], [x9]\n"
- "ld1 { v13.h }[6], [x25]\n"
+ "ld1 { v9.h }[6], [x28]\n"
+ "ld1 { v13.h }[6], [x24]\n"
"b 68f\n"
"61:" // Height 2: Partial accumulate: partial_1_12
- "mov x20, #0x18\n"
+ "mov x19, #0x18\n"
"tbz x11, #0, 68f\n"
- "ld1 { v9.h }[4], [x9]\n"
- "ld1 { v13.h }[4], [x25]\n"
+ "ld1 { v9.h }[4], [x28]\n"
+ "ld1 { v13.h }[4], [x24]\n"
"b 68f\n"
"62:" // Height 2: Partial accumulate: partial_2_8
"tbz x11, #1, 63f\n"
- "ldr s9, [x9], #0x4\n"
- "ldr s13, [x25], #0x4\n"
- "mov x20, #0x14\n"
+ "ldr s9, [x28], #0x4\n"
+ "ldr s13, [x24], #0x4\n"
+ "mov x19, #0x14\n"
"tbz x11, #0, 68f\n"
- "ld1 { v9.h }[2], [x9]\n"
- "ld1 { v13.h }[2], [x25]\n"
+ "ld1 { v9.h }[2], [x28]\n"
+ "ld1 { v13.h }[2], [x24]\n"
"b 68f\n"
"63:" // Height 2: Partial accumulate: partial_1_8
- "mov x20, #0x10\n"
+ "mov x19, #0x10\n"
"tbz x11, #0, 68f\n"
- "ldr h9, [x9, #0x0]\n"
- "ldr h13, [x25, #0x0]\n"
+ "ldr h9, [x28, #0x0]\n"
+ "ldr h13, [x24, #0x0]\n"
"b 68f\n"
"64:" // Height 2: Partial accumulate: partial_4_0
"tbz x11, #2, 66f\n"
- "ldr d8, [x9], #0x8\n"
- "ldr d12, [x25], #0x8\n"
+ "ldr d8, [x28], #0x8\n"
+ "ldr d12, [x24], #0x8\n"
"tbz x11, #1, 65f\n"
- "ld1 { v8.s }[2], [x9], #0x4\n"
- "ld1 { v12.s }[2], [x25], #0x4\n"
- "mov x20, #0xc\n"
+ "mov x19, #0xc\n"
+ "ld1 { v8.s }[2], [x28], #0x4\n"
+ "ld1 { v12.s }[2], [x24], #0x4\n"
"tbz x11, #0, 68f\n"
- "ld1 { v8.h }[6], [x9]\n"
- "ld1 { v12.h }[6], [x25]\n"
+ "ld1 { v8.h }[6], [x28]\n"
+ "ld1 { v12.h }[6], [x24]\n"
"b 68f\n"
"65:" // Height 2: Partial accumulate: partial_1_4
- "mov x20, #0x8\n"
+ "mov x19, #0x8\n"
"tbz x11, #0, 68f\n"
- "ld1 { v8.h }[4], [x9]\n"
- "ld1 { v12.h }[4], [x25]\n"
+ "ld1 { v8.h }[4], [x28]\n"
+ "ld1 { v12.h }[4], [x24]\n"
"b 68f\n"
"66:" // Height 2: Partial accumulate: partial_2_0
"tbz x11, #1, 67f\n"
- "ldr s8, [x9], #0x4\n"
- "ldr s12, [x25], #0x4\n"
- "mov x20, #0x4\n"
+ "ldr s8, [x28], #0x4\n"
+ "ldr s12, [x24], #0x4\n"
+ "mov x19, #0x4\n"
"tbz x11, #0, 68f\n"
- "ld1 { v8.h }[2], [x9]\n"
- "ld1 { v12.h }[2], [x25]\n"
+ "ld1 { v8.h }[2], [x28]\n"
+ "ld1 { v12.h }[2], [x24]\n"
"b 68f\n"
"67:" // Height 2: Partial accumulate: partial_1_0
- "ldr h8, [x9, #0x0]\n"
- "ldr h12, [x25, #0x0]\n"
- "mov x20, #0x0\n"
+ "ldr h8, [x28, #0x0]\n"
+ "mov x19, #0x0\n"
+ "ldr h12, [x24, #0x0]\n"
"68:" // Height 2: Partial accumulate: Done
- "sub x9, x9, x20\n"
+ "sub x28, x28, x19\n"
"b 71f\n"
"69:" // Height 2: full accumulate
- "ldr q8, [x9, #0x0]\n"
- "ldr q9, [x9, #0x10]\n"
- "ldr q10, [x9, #0x20]\n"
- "ldr q11, [x9, #0x30]\n"
- "ldr q12, [x25, #0x0]\n"
- "ldr q13, [x25, #0x10]\n"
- "ldr q14, [x25, #0x20]\n"
- "ldr q15, [x25, #0x30]\n"
+ "ldr q8, [x28, #0x0]\n"
+ "ldr q9, [x28, #0x10]\n"
+ "ldr q10, [x28, #0x20]\n"
+ "ldr q11, [x28, #0x30]\n"
+ "ldr q12, [x24, #0x0]\n"
+ "ldr q13, [x24, #0x10]\n"
+ "ldr q14, [x24, #0x20]\n"
+ "ldr q15, [x24, #0x30]\n"
"b 71f\n"
"70:" // Height 2: no accumulate
"movi v8.16b, #0x0\n"
@@ -729,58 +729,58 @@ void a64_hybrid_fp16_mla_6x32 (
"movi v14.16b, #0x0\n"
"movi v15.16b, #0x0\n"
"71:" // Height 2: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"72:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 73f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "cbnz x28, 74f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #1\n"
- "add x25, x25, x20, LSL #1\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "cbnz x27, 74f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
+ "add x24, x24, x19, LSL #1\n"
"b 74f\n"
"73:" // Height 2: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #1\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #1\n"
"74:" // Height 2: input setup done
- "cmp x27, #0x8\n"
+ "cmp x26, #0x8\n"
"blt 77f\n"
- "ldr q0, [x26, #0x0]\n"
- "ldr q1, [x25, #0x0]\n"
- "cmp x27, #0x10\n"
+ "ldr q0, [x25, #0x0]\n"
+ "ldr q1, [x24, #0x0]\n"
+ "cmp x26, #0x10\n"
"ldr q6, [x10, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
"blt 76f\n"
"75:" // Height 2: Multiply loop: Main loop head
"fmla v8.8h, v6.8h, v0.h[0]\n"
+ "ldr q7, [x10, #0x10]\n"
+ "add x25, x25, #0x10\n"
"fmla v12.8h, v6.8h, v1.h[0]\n"
"ldr q6, [x10, #0x20]\n"
- "sub x27, x27, #0x8\n"
+ "add x24, x24, #0x10\n"
"fmla v9.8h, v7.8h, v0.h[0]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "sub x26, x26, #0x8\n"
"fmla v13.8h, v7.8h, v1.h[0]\n"
"ldr q7, [x10, #0x30]\n"
- "add x26, x26, #0x10\n"
+ "cmp x26, #0x10\n"
"fmla v10.8h, v6.8h, v0.h[0]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
"fmla v14.8h, v6.8h, v1.h[0]\n"
"ldr q6, [x10, #0x40]\n"
- "add x25, x25, #0x10\n"
"fmla v11.8h, v7.8h, v0.h[0]\n"
"fmla v15.8h, v7.8h, v1.h[0]\n"
"ldr q7, [x10, #0x50]\n"
- "cmp x27, #0x10\n"
"fmla v8.8h, v6.8h, v0.h[1]\n"
"fmla v12.8h, v6.8h, v1.h[1]\n"
"ldr q6, [x10, #0x60]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
"fmla v9.8h, v7.8h, v0.h[1]\n"
"fmla v13.8h, v7.8h, v1.h[1]\n"
"ldr q7, [x10, #0x70]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
"fmla v10.8h, v6.8h, v0.h[1]\n"
"fmla v14.8h, v6.8h, v1.h[1]\n"
"ldr q6, [x10, #0x80]\n"
@@ -858,32 +858,32 @@ void a64_hybrid_fp16_mla_6x32 (
"fmla v14.8h, v6.8h, v1.h[7]\n"
"ldr q6, [x10, #0x0]\n"
"fmla v11.8h, v7.8h, v0.h[7]\n"
- "ldr q0, [x26, #0x0]\n"
+ "ldr q0, [x25, #0x0]\n"
"fmla v15.8h, v7.8h, v1.h[7]\n"
- "ldr q1, [x25, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
+ "ldr q1, [x24, #0x0]\n"
"bge 75b\n"
"76:" // Height 2: Multiply loop: Single iteration only
"fmla v8.8h, v6.8h, v0.h[0]\n"
+ "ldr q7, [x10, #0x10]\n"
+ "sub x26, x26, #0x8\n"
"fmla v12.8h, v6.8h, v1.h[0]\n"
"ldr q6, [x10, #0x20]\n"
- "add x26, x26, #0x10\n"
+ "add x25, x25, #0x10\n"
"fmla v9.8h, v7.8h, v0.h[0]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "add x24, x24, #0x10\n"
"fmla v13.8h, v7.8h, v1.h[0]\n"
"ldr q7, [x10, #0x30]\n"
- "add x25, x25, #0x10\n"
"fmla v10.8h, v6.8h, v0.h[0]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
"fmla v14.8h, v6.8h, v1.h[0]\n"
"ldr q6, [x10, #0x40]\n"
- "sub x27, x27, #0x8\n"
"fmla v11.8h, v7.8h, v0.h[0]\n"
"fmla v15.8h, v7.8h, v1.h[0]\n"
"ldr q7, [x10, #0x50]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
"fmla v8.8h, v6.8h, v0.h[1]\n"
"fmla v12.8h, v6.8h, v1.h[1]\n"
"ldr q6, [x10, #0x60]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
"fmla v9.8h, v7.8h, v0.h[1]\n"
"fmla v13.8h, v7.8h, v1.h[1]\n"
"ldr q7, [x10, #0x70]\n"
@@ -965,414 +965,414 @@ void a64_hybrid_fp16_mla_6x32 (
"fmla v11.8h, v7.8h, v0.h[7]\n"
"fmla v15.8h, v7.8h, v1.h[7]\n"
"77:" // Height 2: Multiply loop: Main loop skip
- "cbz x27, 79f\n"
+ "cbz x26, 79f\n"
"78:" // Height 2: Multiply loop: Odd block loop
- "ldr h0, [x26], #0x2\n"
- "ldr h1, [x25], #0x2\n"
- "sub x27, x27, #0x1\n"
+ "ldr h0, [x25], #0x2\n"
+ "sub x26, x26, #0x1\n"
+ "ldr h1, [x24], #0x2\n"
"ldr q6, [x10, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
"fmla v8.8h, v6.8h, v0.h[0]\n"
+ "ldr q7, [x10, #0x10]\n"
"fmla v12.8h, v6.8h, v1.h[0]\n"
"ldr q6, [x10, #0x20]\n"
"fmla v9.8h, v7.8h, v0.h[0]\n"
"fmla v13.8h, v7.8h, v1.h[0]\n"
"ldr q7, [x10, #0x30]\n"
+ "add x10, x10, #0x40\n"
"fmla v10.8h, v6.8h, v0.h[0]\n"
"fmla v14.8h, v6.8h, v1.h[0]\n"
- "add x10, x10, #0x40\n"
"fmla v11.8h, v7.8h, v0.h[0]\n"
"fmla v15.8h, v7.8h, v1.h[0]\n"
- "cbnz x27, 78b\n"
+ "cbnz x26, 78b\n"
"79:" // Height 2: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 72b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #1\n"
- "prfm pstl1keep, [x9, #0x0]\n"
- "prfm pstl1keep, [x25, #0x0]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "prfm pstl1keep, [x28, #0x0]\n"
+ "add x24, x28, x19, LSL #1\n"
+ "prfm pstl1keep, [x24, #0x0]\n"
"tbz %x[flags], #1, 80f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v1.8h }, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1r { v0.8h }, [x20]\n"
- "fmin v8.8h, v8.8h, v1.8h\n"
- "fmin v9.8h, v9.8h, v1.8h\n"
- "fmin v10.8h, v10.8h, v1.8h\n"
- "fmin v11.8h, v11.8h, v1.8h\n"
- "fmin v12.8h, v12.8h, v1.8h\n"
- "fmin v13.8h, v13.8h, v1.8h\n"
- "fmin v14.8h, v14.8h, v1.8h\n"
- "fmin v15.8h, v15.8h, v1.8h\n"
- "fmax v8.8h, v8.8h, v0.8h\n"
- "fmax v9.8h, v9.8h, v0.8h\n"
- "fmax v10.8h, v10.8h, v0.8h\n"
- "fmax v11.8h, v11.8h, v0.8h\n"
- "fmax v12.8h, v12.8h, v0.8h\n"
- "fmax v13.8h, v13.8h, v0.8h\n"
- "fmax v14.8h, v14.8h, v0.8h\n"
- "fmax v15.8h, v15.8h, v0.8h\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v1.8h }, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v0.8h }, [x19]\n"
+ "fmin v8.8h, v8.8h, v0.8h\n"
+ "fmin v9.8h, v9.8h, v0.8h\n"
+ "fmin v10.8h, v10.8h, v0.8h\n"
+ "fmin v11.8h, v11.8h, v0.8h\n"
+ "fmax v8.8h, v8.8h, v1.8h\n"
+ "fmax v9.8h, v9.8h, v1.8h\n"
+ "fmax v10.8h, v10.8h, v1.8h\n"
+ "fmax v11.8h, v11.8h, v1.8h\n"
+ "fmin v12.8h, v12.8h, v0.8h\n"
+ "fmin v13.8h, v13.8h, v0.8h\n"
+ "fmin v14.8h, v14.8h, v0.8h\n"
+ "fmax v12.8h, v12.8h, v1.8h\n"
+ "fmax v13.8h, v13.8h, v1.8h\n"
+ "fmax v14.8h, v14.8h, v1.8h\n"
+ "fmin v15.8h, v15.8h, v0.8h\n"
+ "fmax v15.8h, v15.8h, v1.8h\n"
"80:" // Height 2: No activation
"cmp x11, #0x20\n"
"bge 97f\n"
"tbz x11, #4, 88f\n"
- "st1 { v8.8h }, [x9], #0x10\n"
- "st1 { v9.8h }, [x9], #0x10\n"
- "st1 { v12.8h }, [x25], #0x10\n"
- "st1 { v13.8h }, [x25], #0x10\n"
+ "st1 { v8.8h }, [x28], #0x10\n"
+ "st1 { v9.8h }, [x28], #0x10\n"
+ "st1 { v12.8h }, [x24], #0x10\n"
+ "st1 { v13.8h }, [x24], #0x10\n"
"tbz x11, #3, 84f\n"
- "st1 { v10.8h }, [x9], #0x10\n"
- "st1 { v14.8h }, [x25], #0x10\n"
+ "st1 { v10.8h }, [x28], #0x10\n"
+ "st1 { v14.8h }, [x24], #0x10\n"
"tbz x11, #2, 82f\n"
- "str d11, [x9], #0x8\n"
- "str d15, [x25], #0x8\n"
+ "str d11, [x28], #0x8\n"
+ "str d15, [x24], #0x8\n"
"tbz x11, #1, 81f\n"
- "st1 { v11.s }[2], [x9], #0x4\n"
- "st1 { v15.s }[2], [x25], #0x4\n"
+ "st1 { v11.s }[2], [x28], #0x4\n"
+ "st1 { v15.s }[2], [x24], #0x4\n"
"tbz x11, #0, 96f\n"
- "st1 { v11.h }[6], [x9]\n"
- "st1 { v15.h }[6], [x25]\n"
+ "st1 { v11.h }[6], [x28]\n"
+ "st1 { v15.h }[6], [x24]\n"
"b 96f\n"
"81:" // Height 2: Partial direct writeback: partial_1_28
"tbz x11, #0, 96f\n"
- "st1 { v11.h }[4], [x9]\n"
- "st1 { v15.h }[4], [x25]\n"
+ "st1 { v11.h }[4], [x28]\n"
+ "st1 { v15.h }[4], [x24]\n"
"b 96f\n"
"82:" // Height 2: Partial direct writeback: partial_2_24
"tbz x11, #1, 83f\n"
- "str s11, [x9], #0x4\n"
- "str s15, [x25], #0x4\n"
+ "str s11, [x28], #0x4\n"
+ "str s15, [x24], #0x4\n"
"tbz x11, #0, 96f\n"
- "st1 { v11.h }[2], [x9]\n"
- "st1 { v15.h }[2], [x25]\n"
+ "st1 { v11.h }[2], [x28]\n"
+ "st1 { v15.h }[2], [x24]\n"
"b 96f\n"
"83:" // Height 2: Partial direct writeback: partial_1_24
"tbz x11, #0, 96f\n"
- "str h11, [x9, #0x0]\n"
- "str h15, [x25, #0x0]\n"
+ "str h11, [x28, #0x0]\n"
+ "str h15, [x24, #0x0]\n"
"b 96f\n"
"84:" // Height 2: Partial direct writeback: partial_4_16
"tbz x11, #2, 86f\n"
- "str d10, [x9], #0x8\n"
- "str d14, [x25], #0x8\n"
+ "str d10, [x28], #0x8\n"
+ "str d14, [x24], #0x8\n"
"tbz x11, #1, 85f\n"
- "st1 { v10.s }[2], [x9], #0x4\n"
- "st1 { v14.s }[2], [x25], #0x4\n"
+ "st1 { v10.s }[2], [x28], #0x4\n"
+ "st1 { v14.s }[2], [x24], #0x4\n"
"tbz x11, #0, 96f\n"
- "st1 { v10.h }[6], [x9]\n"
- "st1 { v14.h }[6], [x25]\n"
+ "st1 { v10.h }[6], [x28]\n"
+ "st1 { v14.h }[6], [x24]\n"
"b 96f\n"
"85:" // Height 2: Partial direct writeback: partial_1_20
"tbz x11, #0, 96f\n"
- "st1 { v10.h }[4], [x9]\n"
- "st1 { v14.h }[4], [x25]\n"
+ "st1 { v10.h }[4], [x28]\n"
+ "st1 { v14.h }[4], [x24]\n"
"b 96f\n"
"86:" // Height 2: Partial direct writeback: partial_2_16
"tbz x11, #1, 87f\n"
- "str s10, [x9], #0x4\n"
- "str s14, [x25], #0x4\n"
+ "str s10, [x28], #0x4\n"
+ "str s14, [x24], #0x4\n"
"tbz x11, #0, 96f\n"
- "st1 { v10.h }[2], [x9]\n"
- "st1 { v14.h }[2], [x25]\n"
+ "st1 { v10.h }[2], [x28]\n"
+ "st1 { v14.h }[2], [x24]\n"
"b 96f\n"
"87:" // Height 2: Partial direct writeback: partial_1_16
"tbz x11, #0, 96f\n"
- "str h10, [x9, #0x0]\n"
- "str h14, [x25, #0x0]\n"
+ "str h10, [x28, #0x0]\n"
+ "str h14, [x24, #0x0]\n"
"b 96f\n"
"88:" // Height 2: Partial direct writeback: partial_8_0
"tbz x11, #3, 92f\n"
- "st1 { v8.8h }, [x9], #0x10\n"
- "st1 { v12.8h }, [x25], #0x10\n"
+ "st1 { v8.8h }, [x28], #0x10\n"
+ "st1 { v12.8h }, [x24], #0x10\n"
"tbz x11, #2, 90f\n"
- "str d9, [x9], #0x8\n"
- "str d13, [x25], #0x8\n"
+ "str d9, [x28], #0x8\n"
+ "str d13, [x24], #0x8\n"
"tbz x11, #1, 89f\n"
- "st1 { v9.s }[2], [x9], #0x4\n"
- "st1 { v13.s }[2], [x25], #0x4\n"
+ "st1 { v9.s }[2], [x28], #0x4\n"
+ "st1 { v13.s }[2], [x24], #0x4\n"
"tbz x11, #0, 96f\n"
- "st1 { v9.h }[6], [x9]\n"
- "st1 { v13.h }[6], [x25]\n"
+ "st1 { v9.h }[6], [x28]\n"
+ "st1 { v13.h }[6], [x24]\n"
"b 96f\n"
"89:" // Height 2: Partial direct writeback: partial_1_12
"tbz x11, #0, 96f\n"
- "st1 { v9.h }[4], [x9]\n"
- "st1 { v13.h }[4], [x25]\n"
+ "st1 { v9.h }[4], [x28]\n"
+ "st1 { v13.h }[4], [x24]\n"
"b 96f\n"
"90:" // Height 2: Partial direct writeback: partial_2_8
"tbz x11, #1, 91f\n"
- "str s9, [x9], #0x4\n"
- "str s13, [x25], #0x4\n"
+ "str s9, [x28], #0x4\n"
+ "str s13, [x24], #0x4\n"
"tbz x11, #0, 96f\n"
- "st1 { v9.h }[2], [x9]\n"
- "st1 { v13.h }[2], [x25]\n"
+ "st1 { v9.h }[2], [x28]\n"
+ "st1 { v13.h }[2], [x24]\n"
"b 96f\n"
"91:" // Height 2: Partial direct writeback: partial_1_8
"tbz x11, #0, 96f\n"
- "str h9, [x9, #0x0]\n"
- "str h13, [x25, #0x0]\n"
+ "str h9, [x28, #0x0]\n"
+ "str h13, [x24, #0x0]\n"
"b 96f\n"
"92:" // Height 2: Partial direct writeback: partial_4_0
"tbz x11, #2, 94f\n"
- "str d8, [x9], #0x8\n"
- "str d12, [x25], #0x8\n"
+ "str d8, [x28], #0x8\n"
+ "str d12, [x24], #0x8\n"
"tbz x11, #1, 93f\n"
- "st1 { v8.s }[2], [x9], #0x4\n"
- "st1 { v12.s }[2], [x25], #0x4\n"
+ "st1 { v8.s }[2], [x28], #0x4\n"
+ "st1 { v12.s }[2], [x24], #0x4\n"
"tbz x11, #0, 96f\n"
- "st1 { v8.h }[6], [x9]\n"
- "st1 { v12.h }[6], [x25]\n"
+ "st1 { v8.h }[6], [x28]\n"
+ "st1 { v12.h }[6], [x24]\n"
"b 96f\n"
"93:" // Height 2: Partial direct writeback: partial_1_4
"tbz x11, #0, 96f\n"
- "st1 { v8.h }[4], [x9]\n"
- "st1 { v12.h }[4], [x25]\n"
+ "st1 { v8.h }[4], [x28]\n"
+ "st1 { v12.h }[4], [x24]\n"
"b 96f\n"
"94:" // Height 2: Partial direct writeback: partial_2_0
"tbz x11, #1, 95f\n"
- "str s8, [x9], #0x4\n"
- "str s12, [x25], #0x4\n"
+ "str s8, [x28], #0x4\n"
+ "str s12, [x24], #0x4\n"
"tbz x11, #0, 96f\n"
- "st1 { v8.h }[2], [x9]\n"
- "st1 { v12.h }[2], [x25]\n"
+ "st1 { v8.h }[2], [x28]\n"
+ "st1 { v12.h }[2], [x24]\n"
"b 96f\n"
"95:" // Height 2: Partial direct writeback: partial_1_0
- "str h8, [x9, #0x0]\n"
- "str h12, [x25, #0x0]\n"
+ "str h8, [x28, #0x0]\n"
+ "str h12, [x24, #0x0]\n"
"96:" // Height 2: Partial direct writeback: Done
"b 98f\n"
"97:" // Height 2: Full writeback
- "str q8, [x9, #0x0]\n"
- "str q9, [x9, #0x10]\n"
- "str q10, [x9, #0x20]\n"
- "str q11, [x9, #0x30]\n"
- "add x9, x9, #0x40\n"
- "str q12, [x25, #0x0]\n"
- "str q13, [x25, #0x10]\n"
- "str q14, [x25, #0x20]\n"
- "str q15, [x25, #0x30]\n"
+ "str q8, [x28, #0x0]\n"
+ "str q9, [x28, #0x10]\n"
+ "str q10, [x28, #0x20]\n"
+ "str q11, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
+ "str q12, [x24, #0x0]\n"
+ "str q13, [x24, #0x10]\n"
+ "str q14, [x24, #0x20]\n"
+ "str q15, [x24, #0x30]\n"
"98:" // Height 2: Writeback done
"subs x11, x11, #0x20\n"
"bgt 51b\n"
"b 296f\n"
"99:" // Height 3
- "mov x12, %x[bias]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x9, %x[bias]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "mov x28, %x[output_ptr]\n"
"100:" // Height 3: Column loop
- "cbz x12, 101f\n"
- "ldr q8, [x12, #0x0]\n"
- "ldr q9, [x12, #0x10]\n"
+ "cbz x9, 101f\n"
+ "ldr q8, [x9, #0x0]\n"
"mov v12.16b, v8.16b\n"
+ "ldr q9, [x9, #0x10]\n"
+ "mov v16.16b, v8.16b\n"
+ "ldr q10, [x9, #0x20]\n"
+ "ldr q11, [x9, #0x30]\n"
"mov v13.16b, v9.16b\n"
- "ldr q10, [x12, #0x20]\n"
- "ldr q11, [x12, #0x30]\n"
+ "add x9, x9, #0x40\n"
+ "mov v17.16b, v9.16b\n"
"mov v14.16b, v10.16b\n"
"mov v15.16b, v11.16b\n"
- "mov v16.16b, v8.16b\n"
- "mov v17.16b, v9.16b\n"
- "add x12, x12, #0x40\n"
"mov v18.16b, v10.16b\n"
"mov v19.16b, v11.16b\n"
"b 120f\n"
"101:" // Height 3: no bias
"tbz %x[flags], #0, 119f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #1\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"cmp x11, #0x20\n"
- "add x24, x25, x20, LSL #1\n"
+ "add x24, x28, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
"bge 118f\n"
"tbz x11, #4, 109f\n"
- "ld1 { v8.8h }, [x9], #0x10\n"
- "ld1 { v12.8h }, [x25], #0x10\n"
- "ld1 { v16.8h }, [x24], #0x10\n"
- "ld1 { v9.8h }, [x9], #0x10\n"
- "ld1 { v13.8h }, [x25], #0x10\n"
- "ld1 { v17.8h }, [x24], #0x10\n"
+ "ld1 { v8.8h }, [x28], #0x10\n"
+ "ld1 { v12.8h }, [x24], #0x10\n"
+ "ld1 { v16.8h }, [x23], #0x10\n"
+ "ld1 { v9.8h }, [x28], #0x10\n"
+ "ld1 { v13.8h }, [x24], #0x10\n"
+ "ld1 { v17.8h }, [x23], #0x10\n"
"tbz x11, #3, 105f\n"
- "ld1 { v10.8h }, [x9], #0x10\n"
- "ld1 { v14.8h }, [x25], #0x10\n"
- "ld1 { v18.8h }, [x24], #0x10\n"
+ "ld1 { v10.8h }, [x28], #0x10\n"
+ "ld1 { v14.8h }, [x24], #0x10\n"
+ "ld1 { v18.8h }, [x23], #0x10\n"
"tbz x11, #2, 103f\n"
- "ldr d11, [x9], #0x8\n"
- "ldr d15, [x25], #0x8\n"
- "ldr d19, [x24], #0x8\n"
+ "ldr d11, [x28], #0x8\n"
+ "ldr d15, [x24], #0x8\n"
+ "ldr d19, [x23], #0x8\n"
"tbz x11, #1, 102f\n"
- "ld1 { v11.s }[2], [x9], #0x4\n"
- "ld1 { v15.s }[2], [x25], #0x4\n"
- "mov x20, #0x3c\n"
- "ld1 { v19.s }[2], [x24], #0x4\n"
+ "mov x19, #0x3c\n"
+ "ld1 { v11.s }[2], [x28], #0x4\n"
+ "ld1 { v15.s }[2], [x24], #0x4\n"
+ "ld1 { v19.s }[2], [x23], #0x4\n"
"tbz x11, #0, 117f\n"
- "ld1 { v11.h }[6], [x9]\n"
- "ld1 { v15.h }[6], [x25]\n"
- "ld1 { v19.h }[6], [x24]\n"
+ "ld1 { v11.h }[6], [x28]\n"
+ "ld1 { v15.h }[6], [x24]\n"
+ "ld1 { v19.h }[6], [x23]\n"
"b 117f\n"
"102:" // Height 3: Partial accumulate: partial_1_28
- "mov x20, #0x38\n"
+ "mov x19, #0x38\n"
"tbz x11, #0, 117f\n"
- "ld1 { v11.h }[4], [x9]\n"
- "ld1 { v15.h }[4], [x25]\n"
- "ld1 { v19.h }[4], [x24]\n"
+ "ld1 { v11.h }[4], [x28]\n"
+ "ld1 { v15.h }[4], [x24]\n"
+ "ld1 { v19.h }[4], [x23]\n"
"b 117f\n"
"103:" // Height 3: Partial accumulate: partial_2_24
"tbz x11, #1, 104f\n"
- "ldr s11, [x9], #0x4\n"
- "ldr s15, [x25], #0x4\n"
- "mov x20, #0x34\n"
- "ldr s19, [x24], #0x4\n"
+ "ldr s11, [x28], #0x4\n"
+ "ldr s15, [x24], #0x4\n"
+ "mov x19, #0x34\n"
+ "ldr s19, [x23], #0x4\n"
"tbz x11, #0, 117f\n"
- "ld1 { v11.h }[2], [x9]\n"
- "ld1 { v15.h }[2], [x25]\n"
- "ld1 { v19.h }[2], [x24]\n"
+ "ld1 { v11.h }[2], [x28]\n"
+ "ld1 { v15.h }[2], [x24]\n"
+ "ld1 { v19.h }[2], [x23]\n"
"b 117f\n"
"104:" // Height 3: Partial accumulate: partial_1_24
- "mov x20, #0x30\n"
+ "mov x19, #0x30\n"
"tbz x11, #0, 117f\n"
- "ldr h11, [x9, #0x0]\n"
- "ldr h15, [x25, #0x0]\n"
- "ldr h19, [x24, #0x0]\n"
+ "ldr h11, [x28, #0x0]\n"
+ "ldr h15, [x24, #0x0]\n"
+ "ldr h19, [x23, #0x0]\n"
"b 117f\n"
"105:" // Height 3: Partial accumulate: partial_4_16
"tbz x11, #2, 107f\n"
- "ldr d10, [x9], #0x8\n"
- "ldr d14, [x25], #0x8\n"
- "ldr d18, [x24], #0x8\n"
+ "ldr d10, [x28], #0x8\n"
+ "ldr d14, [x24], #0x8\n"
+ "ldr d18, [x23], #0x8\n"
"tbz x11, #1, 106f\n"
- "ld1 { v10.s }[2], [x9], #0x4\n"
- "ld1 { v14.s }[2], [x25], #0x4\n"
- "mov x20, #0x2c\n"
- "ld1 { v18.s }[2], [x24], #0x4\n"
+ "mov x19, #0x2c\n"
+ "ld1 { v10.s }[2], [x28], #0x4\n"
+ "ld1 { v14.s }[2], [x24], #0x4\n"
+ "ld1 { v18.s }[2], [x23], #0x4\n"
"tbz x11, #0, 117f\n"
- "ld1 { v10.h }[6], [x9]\n"
- "ld1 { v14.h }[6], [x25]\n"
- "ld1 { v18.h }[6], [x24]\n"
+ "ld1 { v10.h }[6], [x28]\n"
+ "ld1 { v14.h }[6], [x24]\n"
+ "ld1 { v18.h }[6], [x23]\n"
"b 117f\n"
"106:" // Height 3: Partial accumulate: partial_1_20
- "mov x20, #0x28\n"
+ "mov x19, #0x28\n"
"tbz x11, #0, 117f\n"
- "ld1 { v10.h }[4], [x9]\n"
- "ld1 { v14.h }[4], [x25]\n"
- "ld1 { v18.h }[4], [x24]\n"
+ "ld1 { v10.h }[4], [x28]\n"
+ "ld1 { v14.h }[4], [x24]\n"
+ "ld1 { v18.h }[4], [x23]\n"
"b 117f\n"
"107:" // Height 3: Partial accumulate: partial_2_16
"tbz x11, #1, 108f\n"
- "ldr s10, [x9], #0x4\n"
- "ldr s14, [x25], #0x4\n"
- "mov x20, #0x24\n"
- "ldr s18, [x24], #0x4\n"
+ "ldr s10, [x28], #0x4\n"
+ "ldr s14, [x24], #0x4\n"
+ "mov x19, #0x24\n"
+ "ldr s18, [x23], #0x4\n"
"tbz x11, #0, 117f\n"
- "ld1 { v10.h }[2], [x9]\n"
- "ld1 { v14.h }[2], [x25]\n"
- "ld1 { v18.h }[2], [x24]\n"
+ "ld1 { v10.h }[2], [x28]\n"
+ "ld1 { v14.h }[2], [x24]\n"
+ "ld1 { v18.h }[2], [x23]\n"
"b 117f\n"
"108:" // Height 3: Partial accumulate: partial_1_16
- "mov x20, #0x20\n"
+ "mov x19, #0x20\n"
"tbz x11, #0, 117f\n"
- "ldr h10, [x9, #0x0]\n"
- "ldr h14, [x25, #0x0]\n"
- "ldr h18, [x24, #0x0]\n"
+ "ldr h10, [x28, #0x0]\n"
+ "ldr h14, [x24, #0x0]\n"
+ "ldr h18, [x23, #0x0]\n"
"b 117f\n"
"109:" // Height 3: Partial accumulate: partial_8_0
"tbz x11, #3, 113f\n"
- "ld1 { v8.8h }, [x9], #0x10\n"
- "ld1 { v12.8h }, [x25], #0x10\n"
- "ld1 { v16.8h }, [x24], #0x10\n"
+ "ld1 { v8.8h }, [x28], #0x10\n"
+ "ld1 { v12.8h }, [x24], #0x10\n"
+ "ld1 { v16.8h }, [x23], #0x10\n"
"tbz x11, #2, 111f\n"
- "ldr d9, [x9], #0x8\n"
- "ldr d13, [x25], #0x8\n"
- "ldr d17, [x24], #0x8\n"
+ "ldr d9, [x28], #0x8\n"
+ "ldr d13, [x24], #0x8\n"
+ "ldr d17, [x23], #0x8\n"
"tbz x11, #1, 110f\n"
- "ld1 { v9.s }[2], [x9], #0x4\n"
- "ld1 { v13.s }[2], [x25], #0x4\n"
- "mov x20, #0x1c\n"
- "ld1 { v17.s }[2], [x24], #0x4\n"
+ "mov x19, #0x1c\n"
+ "ld1 { v9.s }[2], [x28], #0x4\n"
+ "ld1 { v13.s }[2], [x24], #0x4\n"
+ "ld1 { v17.s }[2], [x23], #0x4\n"
"tbz x11, #0, 117f\n"
- "ld1 { v9.h }[6], [x9]\n"
- "ld1 { v13.h }[6], [x25]\n"
- "ld1 { v17.h }[6], [x24]\n"
+ "ld1 { v9.h }[6], [x28]\n"
+ "ld1 { v13.h }[6], [x24]\n"
+ "ld1 { v17.h }[6], [x23]\n"
"b 117f\n"
"110:" // Height 3: Partial accumulate: partial_1_12
- "mov x20, #0x18\n"
+ "mov x19, #0x18\n"
"tbz x11, #0, 117f\n"
- "ld1 { v9.h }[4], [x9]\n"
- "ld1 { v13.h }[4], [x25]\n"
- "ld1 { v17.h }[4], [x24]\n"
+ "ld1 { v9.h }[4], [x28]\n"
+ "ld1 { v13.h }[4], [x24]\n"
+ "ld1 { v17.h }[4], [x23]\n"
"b 117f\n"
"111:" // Height 3: Partial accumulate: partial_2_8
"tbz x11, #1, 112f\n"
- "ldr s9, [x9], #0x4\n"
- "ldr s13, [x25], #0x4\n"
- "mov x20, #0x14\n"
- "ldr s17, [x24], #0x4\n"
+ "ldr s9, [x28], #0x4\n"
+ "ldr s13, [x24], #0x4\n"
+ "mov x19, #0x14\n"
+ "ldr s17, [x23], #0x4\n"
"tbz x11, #0, 117f\n"
- "ld1 { v9.h }[2], [x9]\n"
- "ld1 { v13.h }[2], [x25]\n"
- "ld1 { v17.h }[2], [x24]\n"
+ "ld1 { v9.h }[2], [x28]\n"
+ "ld1 { v13.h }[2], [x24]\n"
+ "ld1 { v17.h }[2], [x23]\n"
"b 117f\n"
"112:" // Height 3: Partial accumulate: partial_1_8
- "mov x20, #0x10\n"
+ "mov x19, #0x10\n"
"tbz x11, #0, 117f\n"
- "ldr h9, [x9, #0x0]\n"
- "ldr h13, [x25, #0x0]\n"
- "ldr h17, [x24, #0x0]\n"
+ "ldr h9, [x28, #0x0]\n"
+ "ldr h13, [x24, #0x0]\n"
+ "ldr h17, [x23, #0x0]\n"
"b 117f\n"
"113:" // Height 3: Partial accumulate: partial_4_0
"tbz x11, #2, 115f\n"
- "ldr d8, [x9], #0x8\n"
- "ldr d12, [x25], #0x8\n"
- "ldr d16, [x24], #0x8\n"
+ "ldr d8, [x28], #0x8\n"
+ "ldr d12, [x24], #0x8\n"
+ "ldr d16, [x23], #0x8\n"
"tbz x11, #1, 114f\n"
- "ld1 { v8.s }[2], [x9], #0x4\n"
- "ld1 { v12.s }[2], [x25], #0x4\n"
- "mov x20, #0xc\n"
- "ld1 { v16.s }[2], [x24], #0x4\n"
+ "mov x19, #0xc\n"
+ "ld1 { v8.s }[2], [x28], #0x4\n"
+ "ld1 { v12.s }[2], [x24], #0x4\n"
+ "ld1 { v16.s }[2], [x23], #0x4\n"
"tbz x11, #0, 117f\n"
- "ld1 { v8.h }[6], [x9]\n"
- "ld1 { v12.h }[6], [x25]\n"
- "ld1 { v16.h }[6], [x24]\n"
+ "ld1 { v8.h }[6], [x28]\n"
+ "ld1 { v12.h }[6], [x24]\n"
+ "ld1 { v16.h }[6], [x23]\n"
"b 117f\n"
"114:" // Height 3: Partial accumulate: partial_1_4
- "mov x20, #0x8\n"
+ "mov x19, #0x8\n"
"tbz x11, #0, 117f\n"
- "ld1 { v8.h }[4], [x9]\n"
- "ld1 { v12.h }[4], [x25]\n"
- "ld1 { v16.h }[4], [x24]\n"
+ "ld1 { v8.h }[4], [x28]\n"
+ "ld1 { v12.h }[4], [x24]\n"
+ "ld1 { v16.h }[4], [x23]\n"
"b 117f\n"
"115:" // Height 3: Partial accumulate: partial_2_0
"tbz x11, #1, 116f\n"
- "ldr s8, [x9], #0x4\n"
- "ldr s12, [x25], #0x4\n"
- "mov x20, #0x4\n"
- "ldr s16, [x24], #0x4\n"
+ "ldr s8, [x28], #0x4\n"
+ "ldr s12, [x24], #0x4\n"
+ "mov x19, #0x4\n"
+ "ldr s16, [x23], #0x4\n"
"tbz x11, #0, 117f\n"
- "ld1 { v8.h }[2], [x9]\n"
- "ld1 { v12.h }[2], [x25]\n"
- "ld1 { v16.h }[2], [x24]\n"
+ "ld1 { v8.h }[2], [x28]\n"
+ "ld1 { v12.h }[2], [x24]\n"
+ "ld1 { v16.h }[2], [x23]\n"
"b 117f\n"
"116:" // Height 3: Partial accumulate: partial_1_0
- "ldr h8, [x9, #0x0]\n"
- "ldr h12, [x25, #0x0]\n"
- "mov x20, #0x0\n"
- "ldr h16, [x24, #0x0]\n"
+ "ldr h8, [x28, #0x0]\n"
+ "mov x19, #0x0\n"
+ "ldr h12, [x24, #0x0]\n"
+ "ldr h16, [x23, #0x0]\n"
"117:" // Height 3: Partial accumulate: Done
- "sub x9, x9, x20\n"
+ "sub x28, x28, x19\n"
"b 120f\n"
"118:" // Height 3: full accumulate
- "ldr q8, [x9, #0x0]\n"
- "ldr q9, [x9, #0x10]\n"
- "ldr q10, [x9, #0x20]\n"
- "ldr q11, [x9, #0x30]\n"
- "ldr q12, [x25, #0x0]\n"
- "ldr q13, [x25, #0x10]\n"
- "ldr q14, [x25, #0x20]\n"
- "ldr q15, [x25, #0x30]\n"
- "ldr q16, [x24, #0x0]\n"
- "ldr q17, [x24, #0x10]\n"
- "ldr q18, [x24, #0x20]\n"
- "ldr q19, [x24, #0x30]\n"
+ "ldr q8, [x28, #0x0]\n"
+ "ldr q9, [x28, #0x10]\n"
+ "ldr q10, [x28, #0x20]\n"
+ "ldr q11, [x28, #0x30]\n"
+ "ldr q12, [x24, #0x0]\n"
+ "ldr q13, [x24, #0x10]\n"
+ "ldr q14, [x24, #0x20]\n"
+ "ldr q15, [x24, #0x30]\n"
+ "ldr q16, [x23, #0x0]\n"
+ "ldr q17, [x23, #0x10]\n"
+ "ldr q18, [x23, #0x20]\n"
+ "ldr q19, [x23, #0x30]\n"
"b 120f\n"
"119:" // Height 3: no accumulate
"movi v8.16b, #0x0\n"
@@ -1388,62 +1388,62 @@ void a64_hybrid_fp16_mla_6x32 (
"movi v18.16b, #0x0\n"
"movi v19.16b, #0x0\n"
"120:" // Height 3: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"121:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 122f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "cbnz x28, 123f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #1\n"
- "add x25, x25, x20, LSL #1\n"
- "add x24, x24, x20, LSL #1\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "cbnz x27, 123f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
+ "add x24, x24, x19, LSL #1\n"
+ "add x23, x23, x19, LSL #1\n"
"b 123f\n"
"122:" // Height 3: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
"123:" // Height 3: input setup done
- "cmp x27, #0x8\n"
+ "cmp x26, #0x8\n"
"blt 126f\n"
- "ldr q0, [x26, #0x0]\n"
- "ldr q1, [x25, #0x0]\n"
- "cmp x27, #0x10\n"
- "ldr q2, [x24, #0x0]\n"
+ "ldr q0, [x25, #0x0]\n"
+ "ldr q1, [x24, #0x0]\n"
+ "cmp x26, #0x10\n"
+ "ldr q2, [x23, #0x0]\n"
"ldr q6, [x10, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
"blt 125f\n"
"124:" // Height 3: Multiply loop: Main loop head
"fmla v8.8h, v6.8h, v0.h[0]\n"
+ "ldr q7, [x10, #0x10]\n"
+ "add x25, x25, #0x10\n"
"fmla v12.8h, v6.8h, v1.h[0]\n"
- "sub x27, x27, #0x8\n"
- "add x26, x26, #0x10\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "add x24, x24, #0x10\n"
"fmla v16.8h, v6.8h, v2.h[0]\n"
"ldr q6, [x10, #0x20]\n"
+ "add x23, x23, #0x10\n"
"fmla v9.8h, v7.8h, v0.h[0]\n"
- "add x25, x25, #0x10\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "sub x26, x26, #0x8\n"
"fmla v13.8h, v7.8h, v1.h[0]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
+ "cmp x26, #0x10\n"
"fmla v17.8h, v7.8h, v2.h[0]\n"
"ldr q7, [x10, #0x30]\n"
- "add x24, x24, #0x10\n"
"fmla v10.8h, v6.8h, v0.h[0]\n"
"fmla v14.8h, v6.8h, v1.h[0]\n"
- "cmp x27, #0x10\n"
- "prfm pldl1keep, [x26, #0x80]\n"
"fmla v18.8h, v6.8h, v2.h[0]\n"
"ldr q6, [x10, #0x40]\n"
"fmla v11.8h, v7.8h, v0.h[0]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
"fmla v15.8h, v7.8h, v1.h[0]\n"
"fmla v19.8h, v7.8h, v2.h[0]\n"
"ldr q7, [x10, #0x50]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
"fmla v8.8h, v6.8h, v0.h[1]\n"
"fmla v12.8h, v6.8h, v1.h[1]\n"
"fmla v16.8h, v6.8h, v2.h[1]\n"
@@ -1554,34 +1554,34 @@ void a64_hybrid_fp16_mla_6x32 (
"fmla v18.8h, v6.8h, v2.h[7]\n"
"ldr q6, [x10, #0x0]\n"
"fmla v11.8h, v7.8h, v0.h[7]\n"
- "ldr q0, [x26, #0x0]\n"
+ "ldr q0, [x25, #0x0]\n"
"fmla v15.8h, v7.8h, v1.h[7]\n"
- "ldr q1, [x25, #0x0]\n"
+ "ldr q1, [x24, #0x0]\n"
"fmla v19.8h, v7.8h, v2.h[7]\n"
- "ldr q2, [x24, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
+ "ldr q2, [x23, #0x0]\n"
"bge 124b\n"
"125:" // Height 3: Multiply loop: Single iteration only
"fmla v8.8h, v6.8h, v0.h[0]\n"
+ "ldr q7, [x10, #0x10]\n"
+ "sub x26, x26, #0x8\n"
"fmla v12.8h, v6.8h, v1.h[0]\n"
- "add x26, x26, #0x10\n"
"add x25, x25, #0x10\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
"fmla v16.8h, v6.8h, v2.h[0]\n"
- "ldr q6, [x10, #0x20]\n"
- "fmla v9.8h, v7.8h, v0.h[0]\n"
"add x24, x24, #0x10\n"
+ "fmla v9.8h, v7.8h, v0.h[0]\n"
+ "ldr q6, [x10, #0x20]\n"
+ "add x23, x23, #0x10\n"
"fmla v13.8h, v7.8h, v1.h[0]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
"fmla v17.8h, v7.8h, v2.h[0]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
"ldr q7, [x10, #0x30]\n"
- "sub x27, x27, #0x8\n"
"fmla v10.8h, v6.8h, v0.h[0]\n"
"fmla v14.8h, v6.8h, v1.h[0]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
"fmla v18.8h, v6.8h, v2.h[0]\n"
"ldr q6, [x10, #0x40]\n"
"fmla v11.8h, v7.8h, v0.h[0]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
"fmla v15.8h, v7.8h, v1.h[0]\n"
"fmla v19.8h, v7.8h, v2.h[0]\n"
"ldr q7, [x10, #0x50]\n"
@@ -1697,16 +1697,16 @@ void a64_hybrid_fp16_mla_6x32 (
"fmla v15.8h, v7.8h, v1.h[7]\n"
"fmla v19.8h, v7.8h, v2.h[7]\n"
"126:" // Height 3: Multiply loop: Main loop skip
- "cbz x27, 128f\n"
+ "cbz x26, 128f\n"
"127:" // Height 3: Multiply loop: Odd block loop
- "ldr h0, [x26], #0x2\n"
- "ldr h1, [x25], #0x2\n"
- "sub x27, x27, #0x1\n"
- "ldr h2, [x24], #0x2\n"
+ "ldr h0, [x25], #0x2\n"
+ "sub x26, x26, #0x1\n"
+ "ldr h1, [x24], #0x2\n"
+ "ldr h2, [x23], #0x2\n"
"ldr q6, [x10, #0x0]\n"
"fmla v8.8h, v6.8h, v0.h[0]\n"
- "fmla v12.8h, v6.8h, v1.h[0]\n"
"ldr q7, [x10, #0x10]\n"
+ "fmla v12.8h, v6.8h, v1.h[0]\n"
"fmla v16.8h, v6.8h, v2.h[0]\n"
"ldr q6, [x10, #0x20]\n"
"fmla v9.8h, v7.8h, v0.h[0]\n"
@@ -1720,483 +1720,483 @@ void a64_hybrid_fp16_mla_6x32 (
"fmla v11.8h, v7.8h, v0.h[0]\n"
"fmla v15.8h, v7.8h, v1.h[0]\n"
"fmla v19.8h, v7.8h, v2.h[0]\n"
- "cbnz x27, 127b\n"
+ "cbnz x26, 127b\n"
"128:" // Height 3: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 121b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
- "prfm pstl1keep, [x9, #0x0]\n"
- "prfm pstl1keep, [x25, #0x0]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "prfm pstl1keep, [x28, #0x0]\n"
+ "add x24, x28, x19, LSL #1\n"
"prfm pstl1keep, [x24, #0x0]\n"
+ "add x23, x24, x19, LSL #1\n"
+ "prfm pstl1keep, [x23, #0x0]\n"
"tbz %x[flags], #1, 129f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v1.8h }, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1r { v0.8h }, [x20]\n"
- "fmin v8.8h, v8.8h, v1.8h\n"
- "fmin v9.8h, v9.8h, v1.8h\n"
- "fmin v10.8h, v10.8h, v1.8h\n"
- "fmin v11.8h, v11.8h, v1.8h\n"
- "fmin v12.8h, v12.8h, v1.8h\n"
- "fmin v13.8h, v13.8h, v1.8h\n"
- "fmin v14.8h, v14.8h, v1.8h\n"
- "fmin v15.8h, v15.8h, v1.8h\n"
- "fmin v16.8h, v16.8h, v1.8h\n"
- "fmin v17.8h, v17.8h, v1.8h\n"
- "fmin v18.8h, v18.8h, v1.8h\n"
- "fmin v19.8h, v19.8h, v1.8h\n"
- "fmax v8.8h, v8.8h, v0.8h\n"
- "fmax v9.8h, v9.8h, v0.8h\n"
- "fmax v10.8h, v10.8h, v0.8h\n"
- "fmax v11.8h, v11.8h, v0.8h\n"
- "fmax v12.8h, v12.8h, v0.8h\n"
- "fmax v13.8h, v13.8h, v0.8h\n"
- "fmax v14.8h, v14.8h, v0.8h\n"
- "fmax v15.8h, v15.8h, v0.8h\n"
- "fmax v16.8h, v16.8h, v0.8h\n"
- "fmax v17.8h, v17.8h, v0.8h\n"
- "fmax v18.8h, v18.8h, v0.8h\n"
- "fmax v19.8h, v19.8h, v0.8h\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v1.8h }, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v0.8h }, [x19]\n"
+ "fmin v8.8h, v8.8h, v0.8h\n"
+ "fmin v9.8h, v9.8h, v0.8h\n"
+ "fmin v10.8h, v10.8h, v0.8h\n"
+ "fmin v11.8h, v11.8h, v0.8h\n"
+ "fmax v8.8h, v8.8h, v1.8h\n"
+ "fmax v9.8h, v9.8h, v1.8h\n"
+ "fmax v10.8h, v10.8h, v1.8h\n"
+ "fmax v11.8h, v11.8h, v1.8h\n"
+ "fmin v12.8h, v12.8h, v0.8h\n"
+ "fmin v13.8h, v13.8h, v0.8h\n"
+ "fmin v14.8h, v14.8h, v0.8h\n"
+ "fmax v12.8h, v12.8h, v1.8h\n"
+ "fmax v13.8h, v13.8h, v1.8h\n"
+ "fmax v14.8h, v14.8h, v1.8h\n"
+ "fmin v15.8h, v15.8h, v0.8h\n"
+ "fmin v16.8h, v16.8h, v0.8h\n"
+ "fmin v17.8h, v17.8h, v0.8h\n"
+ "fmax v15.8h, v15.8h, v1.8h\n"
+ "fmax v16.8h, v16.8h, v1.8h\n"
+ "fmax v17.8h, v17.8h, v1.8h\n"
+ "fmin v18.8h, v18.8h, v0.8h\n"
+ "fmin v19.8h, v19.8h, v0.8h\n"
+ "fmax v18.8h, v18.8h, v1.8h\n"
+ "fmax v19.8h, v19.8h, v1.8h\n"
"129:" // Height 3: No activation
"cmp x11, #0x20\n"
"bge 146f\n"
"tbz x11, #4, 137f\n"
- "st1 { v8.8h }, [x9], #0x10\n"
- "st1 { v9.8h }, [x9], #0x10\n"
- "st1 { v12.8h }, [x25], #0x10\n"
- "st1 { v13.8h }, [x25], #0x10\n"
- "st1 { v16.8h }, [x24], #0x10\n"
- "st1 { v17.8h }, [x24], #0x10\n"
+ "st1 { v8.8h }, [x28], #0x10\n"
+ "st1 { v9.8h }, [x28], #0x10\n"
+ "st1 { v12.8h }, [x24], #0x10\n"
+ "st1 { v13.8h }, [x24], #0x10\n"
+ "st1 { v16.8h }, [x23], #0x10\n"
+ "st1 { v17.8h }, [x23], #0x10\n"
"tbz x11, #3, 133f\n"
- "st1 { v10.8h }, [x9], #0x10\n"
- "st1 { v14.8h }, [x25], #0x10\n"
- "st1 { v18.8h }, [x24], #0x10\n"
+ "st1 { v10.8h }, [x28], #0x10\n"
+ "st1 { v14.8h }, [x24], #0x10\n"
+ "st1 { v18.8h }, [x23], #0x10\n"
"tbz x11, #2, 131f\n"
- "str d11, [x9], #0x8\n"
- "str d15, [x25], #0x8\n"
- "str d19, [x24], #0x8\n"
+ "str d11, [x28], #0x8\n"
+ "str d15, [x24], #0x8\n"
+ "str d19, [x23], #0x8\n"
"tbz x11, #1, 130f\n"
- "st1 { v11.s }[2], [x9], #0x4\n"
- "st1 { v15.s }[2], [x25], #0x4\n"
- "st1 { v19.s }[2], [x24], #0x4\n"
+ "st1 { v11.s }[2], [x28], #0x4\n"
+ "st1 { v15.s }[2], [x24], #0x4\n"
+ "st1 { v19.s }[2], [x23], #0x4\n"
"tbz x11, #0, 145f\n"
- "st1 { v11.h }[6], [x9]\n"
- "st1 { v15.h }[6], [x25]\n"
- "st1 { v19.h }[6], [x24]\n"
+ "st1 { v11.h }[6], [x28]\n"
+ "st1 { v15.h }[6], [x24]\n"
+ "st1 { v19.h }[6], [x23]\n"
"b 145f\n"
"130:" // Height 3: Partial direct writeback: partial_1_28
"tbz x11, #0, 145f\n"
- "st1 { v11.h }[4], [x9]\n"
- "st1 { v15.h }[4], [x25]\n"
- "st1 { v19.h }[4], [x24]\n"
+ "st1 { v11.h }[4], [x28]\n"
+ "st1 { v15.h }[4], [x24]\n"
+ "st1 { v19.h }[4], [x23]\n"
"b 145f\n"
"131:" // Height 3: Partial direct writeback: partial_2_24
"tbz x11, #1, 132f\n"
- "str s11, [x9], #0x4\n"
- "str s15, [x25], #0x4\n"
- "str s19, [x24], #0x4\n"
+ "str s11, [x28], #0x4\n"
+ "str s15, [x24], #0x4\n"
+ "str s19, [x23], #0x4\n"
"tbz x11, #0, 145f\n"
- "st1 { v11.h }[2], [x9]\n"
- "st1 { v15.h }[2], [x25]\n"
- "st1 { v19.h }[2], [x24]\n"
+ "st1 { v11.h }[2], [x28]\n"
+ "st1 { v15.h }[2], [x24]\n"
+ "st1 { v19.h }[2], [x23]\n"
"b 145f\n"
"132:" // Height 3: Partial direct writeback: partial_1_24
"tbz x11, #0, 145f\n"
- "str h11, [x9, #0x0]\n"
- "str h15, [x25, #0x0]\n"
- "str h19, [x24, #0x0]\n"
+ "str h11, [x28, #0x0]\n"
+ "str h15, [x24, #0x0]\n"
+ "str h19, [x23, #0x0]\n"
"b 145f\n"
"133:" // Height 3: Partial direct writeback: partial_4_16
"tbz x11, #2, 135f\n"
- "str d10, [x9], #0x8\n"
- "str d14, [x25], #0x8\n"
- "str d18, [x24], #0x8\n"
+ "str d10, [x28], #0x8\n"
+ "str d14, [x24], #0x8\n"
+ "str d18, [x23], #0x8\n"
"tbz x11, #1, 134f\n"
- "st1 { v10.s }[2], [x9], #0x4\n"
- "st1 { v14.s }[2], [x25], #0x4\n"
- "st1 { v18.s }[2], [x24], #0x4\n"
+ "st1 { v10.s }[2], [x28], #0x4\n"
+ "st1 { v14.s }[2], [x24], #0x4\n"
+ "st1 { v18.s }[2], [x23], #0x4\n"
"tbz x11, #0, 145f\n"
- "st1 { v10.h }[6], [x9]\n"
- "st1 { v14.h }[6], [x25]\n"
- "st1 { v18.h }[6], [x24]\n"
+ "st1 { v10.h }[6], [x28]\n"
+ "st1 { v14.h }[6], [x24]\n"
+ "st1 { v18.h }[6], [x23]\n"
"b 145f\n"
"134:" // Height 3: Partial direct writeback: partial_1_20
"tbz x11, #0, 145f\n"
- "st1 { v10.h }[4], [x9]\n"
- "st1 { v14.h }[4], [x25]\n"
- "st1 { v18.h }[4], [x24]\n"
+ "st1 { v10.h }[4], [x28]\n"
+ "st1 { v14.h }[4], [x24]\n"
+ "st1 { v18.h }[4], [x23]\n"
"b 145f\n"
"135:" // Height 3: Partial direct writeback: partial_2_16
"tbz x11, #1, 136f\n"
- "str s10, [x9], #0x4\n"
- "str s14, [x25], #0x4\n"
- "str s18, [x24], #0x4\n"
+ "str s10, [x28], #0x4\n"
+ "str s14, [x24], #0x4\n"
+ "str s18, [x23], #0x4\n"
"tbz x11, #0, 145f\n"
- "st1 { v10.h }[2], [x9]\n"
- "st1 { v14.h }[2], [x25]\n"
- "st1 { v18.h }[2], [x24]\n"
+ "st1 { v10.h }[2], [x28]\n"
+ "st1 { v14.h }[2], [x24]\n"
+ "st1 { v18.h }[2], [x23]\n"
"b 145f\n"
"136:" // Height 3: Partial direct writeback: partial_1_16
"tbz x11, #0, 145f\n"
- "str h10, [x9, #0x0]\n"
- "str h14, [x25, #0x0]\n"
- "str h18, [x24, #0x0]\n"
+ "str h10, [x28, #0x0]\n"
+ "str h14, [x24, #0x0]\n"
+ "str h18, [x23, #0x0]\n"
"b 145f\n"
"137:" // Height 3: Partial direct writeback: partial_8_0
"tbz x11, #3, 141f\n"
- "st1 { v8.8h }, [x9], #0x10\n"
- "st1 { v12.8h }, [x25], #0x10\n"
- "st1 { v16.8h }, [x24], #0x10\n"
+ "st1 { v8.8h }, [x28], #0x10\n"
+ "st1 { v12.8h }, [x24], #0x10\n"
+ "st1 { v16.8h }, [x23], #0x10\n"
"tbz x11, #2, 139f\n"
- "str d9, [x9], #0x8\n"
- "str d13, [x25], #0x8\n"
- "str d17, [x24], #0x8\n"
+ "str d9, [x28], #0x8\n"
+ "str d13, [x24], #0x8\n"
+ "str d17, [x23], #0x8\n"
"tbz x11, #1, 138f\n"
- "st1 { v9.s }[2], [x9], #0x4\n"
- "st1 { v13.s }[2], [x25], #0x4\n"
- "st1 { v17.s }[2], [x24], #0x4\n"
+ "st1 { v9.s }[2], [x28], #0x4\n"
+ "st1 { v13.s }[2], [x24], #0x4\n"
+ "st1 { v17.s }[2], [x23], #0x4\n"
"tbz x11, #0, 145f\n"
- "st1 { v9.h }[6], [x9]\n"
- "st1 { v13.h }[6], [x25]\n"
- "st1 { v17.h }[6], [x24]\n"
+ "st1 { v9.h }[6], [x28]\n"
+ "st1 { v13.h }[6], [x24]\n"
+ "st1 { v17.h }[6], [x23]\n"
"b 145f\n"
"138:" // Height 3: Partial direct writeback: partial_1_12
"tbz x11, #0, 145f\n"
- "st1 { v9.h }[4], [x9]\n"
- "st1 { v13.h }[4], [x25]\n"
- "st1 { v17.h }[4], [x24]\n"
+ "st1 { v9.h }[4], [x28]\n"
+ "st1 { v13.h }[4], [x24]\n"
+ "st1 { v17.h }[4], [x23]\n"
"b 145f\n"
"139:" // Height 3: Partial direct writeback: partial_2_8
"tbz x11, #1, 140f\n"
- "str s9, [x9], #0x4\n"
- "str s13, [x25], #0x4\n"
- "str s17, [x24], #0x4\n"
+ "str s9, [x28], #0x4\n"
+ "str s13, [x24], #0x4\n"
+ "str s17, [x23], #0x4\n"
"tbz x11, #0, 145f\n"
- "st1 { v9.h }[2], [x9]\n"
- "st1 { v13.h }[2], [x25]\n"
- "st1 { v17.h }[2], [x24]\n"
+ "st1 { v9.h }[2], [x28]\n"
+ "st1 { v13.h }[2], [x24]\n"
+ "st1 { v17.h }[2], [x23]\n"
"b 145f\n"
"140:" // Height 3: Partial direct writeback: partial_1_8
"tbz x11, #0, 145f\n"
- "str h9, [x9, #0x0]\n"
- "str h13, [x25, #0x0]\n"
- "str h17, [x24, #0x0]\n"
+ "str h9, [x28, #0x0]\n"
+ "str h13, [x24, #0x0]\n"
+ "str h17, [x23, #0x0]\n"
"b 145f\n"
"141:" // Height 3: Partial direct writeback: partial_4_0
"tbz x11, #2, 143f\n"
- "str d8, [x9], #0x8\n"
- "str d12, [x25], #0x8\n"
- "str d16, [x24], #0x8\n"
+ "str d8, [x28], #0x8\n"
+ "str d12, [x24], #0x8\n"
+ "str d16, [x23], #0x8\n"
"tbz x11, #1, 142f\n"
- "st1 { v8.s }[2], [x9], #0x4\n"
- "st1 { v12.s }[2], [x25], #0x4\n"
- "st1 { v16.s }[2], [x24], #0x4\n"
+ "st1 { v8.s }[2], [x28], #0x4\n"
+ "st1 { v12.s }[2], [x24], #0x4\n"
+ "st1 { v16.s }[2], [x23], #0x4\n"
"tbz x11, #0, 145f\n"
- "st1 { v8.h }[6], [x9]\n"
- "st1 { v12.h }[6], [x25]\n"
- "st1 { v16.h }[6], [x24]\n"
+ "st1 { v8.h }[6], [x28]\n"
+ "st1 { v12.h }[6], [x24]\n"
+ "st1 { v16.h }[6], [x23]\n"
"b 145f\n"
"142:" // Height 3: Partial direct writeback: partial_1_4
"tbz x11, #0, 145f\n"
- "st1 { v8.h }[4], [x9]\n"
- "st1 { v12.h }[4], [x25]\n"
- "st1 { v16.h }[4], [x24]\n"
+ "st1 { v8.h }[4], [x28]\n"
+ "st1 { v12.h }[4], [x24]\n"
+ "st1 { v16.h }[4], [x23]\n"
"b 145f\n"
"143:" // Height 3: Partial direct writeback: partial_2_0
"tbz x11, #1, 144f\n"
- "str s8, [x9], #0x4\n"
- "str s12, [x25], #0x4\n"
- "str s16, [x24], #0x4\n"
+ "str s8, [x28], #0x4\n"
+ "str s12, [x24], #0x4\n"
+ "str s16, [x23], #0x4\n"
"tbz x11, #0, 145f\n"
- "st1 { v8.h }[2], [x9]\n"
- "st1 { v12.h }[2], [x25]\n"
- "st1 { v16.h }[2], [x24]\n"
+ "st1 { v8.h }[2], [x28]\n"
+ "st1 { v12.h }[2], [x24]\n"
+ "st1 { v16.h }[2], [x23]\n"
"b 145f\n"
"144:" // Height 3: Partial direct writeback: partial_1_0
- "str h8, [x9, #0x0]\n"
- "str h12, [x25, #0x0]\n"
- "str h16, [x24, #0x0]\n"
+ "str h8, [x28, #0x0]\n"
+ "str h12, [x24, #0x0]\n"
+ "str h16, [x23, #0x0]\n"
"145:" // Height 3: Partial direct writeback: Done
"b 147f\n"
"146:" // Height 3: Full writeback
- "str q8, [x9, #0x0]\n"
- "str q9, [x9, #0x10]\n"
- "str q10, [x9, #0x20]\n"
- "str q11, [x9, #0x30]\n"
- "add x9, x9, #0x40\n"
- "str q12, [x25, #0x0]\n"
- "str q13, [x25, #0x10]\n"
- "str q14, [x25, #0x20]\n"
- "str q15, [x25, #0x30]\n"
- "str q16, [x24, #0x0]\n"
- "str q17, [x24, #0x10]\n"
- "str q18, [x24, #0x20]\n"
- "str q19, [x24, #0x30]\n"
+ "str q8, [x28, #0x0]\n"
+ "str q9, [x28, #0x10]\n"
+ "str q10, [x28, #0x20]\n"
+ "str q11, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
+ "str q12, [x24, #0x0]\n"
+ "str q13, [x24, #0x10]\n"
+ "str q14, [x24, #0x20]\n"
+ "str q15, [x24, #0x30]\n"
+ "str q16, [x23, #0x0]\n"
+ "str q17, [x23, #0x10]\n"
+ "str q18, [x23, #0x20]\n"
+ "str q19, [x23, #0x30]\n"
"147:" // Height 3: Writeback done
"subs x11, x11, #0x20\n"
"bgt 100b\n"
"b 296f\n"
"148:" // Height 4
- "mov x12, %x[bias]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x9, %x[bias]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "mov x28, %x[output_ptr]\n"
"149:" // Height 4: Column loop
- "cbz x12, 150f\n"
- "ldr q8, [x12, #0x0]\n"
- "ldr q9, [x12, #0x10]\n"
+ "cbz x9, 150f\n"
+ "ldr q8, [x9, #0x0]\n"
"mov v12.16b, v8.16b\n"
+ "ldr q9, [x9, #0x10]\n"
+ "mov v16.16b, v8.16b\n"
+ "ldr q10, [x9, #0x20]\n"
+ "mov v20.16b, v8.16b\n"
+ "ldr q11, [x9, #0x30]\n"
+ "add x9, x9, #0x40\n"
"mov v13.16b, v9.16b\n"
- "ldr q10, [x12, #0x20]\n"
- "ldr q11, [x12, #0x30]\n"
+ "mov v17.16b, v9.16b\n"
"mov v14.16b, v10.16b\n"
"mov v15.16b, v11.16b\n"
- "mov v16.16b, v8.16b\n"
- "mov v17.16b, v9.16b\n"
- "add x12, x12, #0x40\n"
"mov v18.16b, v10.16b\n"
"mov v19.16b, v11.16b\n"
- "mov v20.16b, v8.16b\n"
"mov v21.16b, v9.16b\n"
"mov v22.16b, v10.16b\n"
"mov v23.16b, v11.16b\n"
"b 169f\n"
"150:" // Height 4: no bias
"tbz %x[flags], #0, 168f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"cmp x11, #0x20\n"
- "add x23, x24, x20, LSL #1\n"
+ "add x24, x28, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "add x22, x23, x19, LSL #1\n"
"bge 167f\n"
"tbz x11, #4, 158f\n"
- "ld1 { v8.8h }, [x9], #0x10\n"
- "ld1 { v12.8h }, [x25], #0x10\n"
- "ld1 { v16.8h }, [x24], #0x10\n"
- "ld1 { v20.8h }, [x23], #0x10\n"
- "ld1 { v9.8h }, [x9], #0x10\n"
- "ld1 { v13.8h }, [x25], #0x10\n"
- "ld1 { v17.8h }, [x24], #0x10\n"
- "ld1 { v21.8h }, [x23], #0x10\n"
+ "ld1 { v8.8h }, [x28], #0x10\n"
+ "ld1 { v12.8h }, [x24], #0x10\n"
+ "ld1 { v16.8h }, [x23], #0x10\n"
+ "ld1 { v20.8h }, [x22], #0x10\n"
+ "ld1 { v9.8h }, [x28], #0x10\n"
+ "ld1 { v13.8h }, [x24], #0x10\n"
+ "ld1 { v17.8h }, [x23], #0x10\n"
+ "ld1 { v21.8h }, [x22], #0x10\n"
"tbz x11, #3, 154f\n"
- "ld1 { v10.8h }, [x9], #0x10\n"
- "ld1 { v14.8h }, [x25], #0x10\n"
- "ld1 { v18.8h }, [x24], #0x10\n"
- "ld1 { v22.8h }, [x23], #0x10\n"
+ "ld1 { v10.8h }, [x28], #0x10\n"
+ "ld1 { v14.8h }, [x24], #0x10\n"
+ "ld1 { v18.8h }, [x23], #0x10\n"
+ "ld1 { v22.8h }, [x22], #0x10\n"
"tbz x11, #2, 152f\n"
- "ldr d11, [x9], #0x8\n"
- "ldr d15, [x25], #0x8\n"
- "ldr d19, [x24], #0x8\n"
- "ldr d23, [x23], #0x8\n"
+ "ldr d11, [x28], #0x8\n"
+ "ldr d15, [x24], #0x8\n"
+ "ldr d19, [x23], #0x8\n"
+ "ldr d23, [x22], #0x8\n"
"tbz x11, #1, 151f\n"
- "ld1 { v11.s }[2], [x9], #0x4\n"
- "ld1 { v15.s }[2], [x25], #0x4\n"
- "mov x20, #0x3c\n"
- "ld1 { v19.s }[2], [x24], #0x4\n"
- "ld1 { v23.s }[2], [x23], #0x4\n"
+ "mov x19, #0x3c\n"
+ "ld1 { v11.s }[2], [x28], #0x4\n"
+ "ld1 { v15.s }[2], [x24], #0x4\n"
+ "ld1 { v19.s }[2], [x23], #0x4\n"
+ "ld1 { v23.s }[2], [x22], #0x4\n"
"tbz x11, #0, 166f\n"
- "ld1 { v11.h }[6], [x9]\n"
- "ld1 { v15.h }[6], [x25]\n"
- "ld1 { v19.h }[6], [x24]\n"
- "ld1 { v23.h }[6], [x23]\n"
+ "ld1 { v11.h }[6], [x28]\n"
+ "ld1 { v15.h }[6], [x24]\n"
+ "ld1 { v19.h }[6], [x23]\n"
+ "ld1 { v23.h }[6], [x22]\n"
"b 166f\n"
"151:" // Height 4: Partial accumulate: partial_1_28
- "mov x20, #0x38\n"
+ "mov x19, #0x38\n"
"tbz x11, #0, 166f\n"
- "ld1 { v11.h }[4], [x9]\n"
- "ld1 { v15.h }[4], [x25]\n"
- "ld1 { v19.h }[4], [x24]\n"
- "ld1 { v23.h }[4], [x23]\n"
+ "ld1 { v11.h }[4], [x28]\n"
+ "ld1 { v15.h }[4], [x24]\n"
+ "ld1 { v19.h }[4], [x23]\n"
+ "ld1 { v23.h }[4], [x22]\n"
"b 166f\n"
"152:" // Height 4: Partial accumulate: partial_2_24
"tbz x11, #1, 153f\n"
- "ldr s11, [x9], #0x4\n"
- "ldr s15, [x25], #0x4\n"
- "mov x20, #0x34\n"
- "ldr s19, [x24], #0x4\n"
- "ldr s23, [x23], #0x4\n"
+ "ldr s11, [x28], #0x4\n"
+ "ldr s15, [x24], #0x4\n"
+ "mov x19, #0x34\n"
+ "ldr s19, [x23], #0x4\n"
+ "ldr s23, [x22], #0x4\n"
"tbz x11, #0, 166f\n"
- "ld1 { v11.h }[2], [x9]\n"
- "ld1 { v15.h }[2], [x25]\n"
- "ld1 { v19.h }[2], [x24]\n"
- "ld1 { v23.h }[2], [x23]\n"
+ "ld1 { v11.h }[2], [x28]\n"
+ "ld1 { v15.h }[2], [x24]\n"
+ "ld1 { v19.h }[2], [x23]\n"
+ "ld1 { v23.h }[2], [x22]\n"
"b 166f\n"
"153:" // Height 4: Partial accumulate: partial_1_24
- "mov x20, #0x30\n"
+ "mov x19, #0x30\n"
"tbz x11, #0, 166f\n"
- "ldr h11, [x9, #0x0]\n"
- "ldr h15, [x25, #0x0]\n"
- "ldr h19, [x24, #0x0]\n"
- "ldr h23, [x23, #0x0]\n"
+ "ldr h11, [x28, #0x0]\n"
+ "ldr h15, [x24, #0x0]\n"
+ "ldr h19, [x23, #0x0]\n"
+ "ldr h23, [x22, #0x0]\n"
"b 166f\n"
"154:" // Height 4: Partial accumulate: partial_4_16
"tbz x11, #2, 156f\n"
- "ldr d10, [x9], #0x8\n"
- "ldr d14, [x25], #0x8\n"
- "ldr d18, [x24], #0x8\n"
- "ldr d22, [x23], #0x8\n"
+ "ldr d10, [x28], #0x8\n"
+ "ldr d14, [x24], #0x8\n"
+ "ldr d18, [x23], #0x8\n"
+ "ldr d22, [x22], #0x8\n"
"tbz x11, #1, 155f\n"
- "ld1 { v10.s }[2], [x9], #0x4\n"
- "ld1 { v14.s }[2], [x25], #0x4\n"
- "mov x20, #0x2c\n"
- "ld1 { v18.s }[2], [x24], #0x4\n"
- "ld1 { v22.s }[2], [x23], #0x4\n"
+ "mov x19, #0x2c\n"
+ "ld1 { v10.s }[2], [x28], #0x4\n"
+ "ld1 { v14.s }[2], [x24], #0x4\n"
+ "ld1 { v18.s }[2], [x23], #0x4\n"
+ "ld1 { v22.s }[2], [x22], #0x4\n"
"tbz x11, #0, 166f\n"
- "ld1 { v10.h }[6], [x9]\n"
- "ld1 { v14.h }[6], [x25]\n"
- "ld1 { v18.h }[6], [x24]\n"
- "ld1 { v22.h }[6], [x23]\n"
+ "ld1 { v10.h }[6], [x28]\n"
+ "ld1 { v14.h }[6], [x24]\n"
+ "ld1 { v18.h }[6], [x23]\n"
+ "ld1 { v22.h }[6], [x22]\n"
"b 166f\n"
"155:" // Height 4: Partial accumulate: partial_1_20
- "mov x20, #0x28\n"
+ "mov x19, #0x28\n"
"tbz x11, #0, 166f\n"
- "ld1 { v10.h }[4], [x9]\n"
- "ld1 { v14.h }[4], [x25]\n"
- "ld1 { v18.h }[4], [x24]\n"
- "ld1 { v22.h }[4], [x23]\n"
+ "ld1 { v10.h }[4], [x28]\n"
+ "ld1 { v14.h }[4], [x24]\n"
+ "ld1 { v18.h }[4], [x23]\n"
+ "ld1 { v22.h }[4], [x22]\n"
"b 166f\n"
"156:" // Height 4: Partial accumulate: partial_2_16
"tbz x11, #1, 157f\n"
- "ldr s10, [x9], #0x4\n"
- "ldr s14, [x25], #0x4\n"
- "mov x20, #0x24\n"
- "ldr s18, [x24], #0x4\n"
- "ldr s22, [x23], #0x4\n"
+ "ldr s10, [x28], #0x4\n"
+ "ldr s14, [x24], #0x4\n"
+ "mov x19, #0x24\n"
+ "ldr s18, [x23], #0x4\n"
+ "ldr s22, [x22], #0x4\n"
"tbz x11, #0, 166f\n"
- "ld1 { v10.h }[2], [x9]\n"
- "ld1 { v14.h }[2], [x25]\n"
- "ld1 { v18.h }[2], [x24]\n"
- "ld1 { v22.h }[2], [x23]\n"
+ "ld1 { v10.h }[2], [x28]\n"
+ "ld1 { v14.h }[2], [x24]\n"
+ "ld1 { v18.h }[2], [x23]\n"
+ "ld1 { v22.h }[2], [x22]\n"
"b 166f\n"
"157:" // Height 4: Partial accumulate: partial_1_16
- "mov x20, #0x20\n"
+ "mov x19, #0x20\n"
"tbz x11, #0, 166f\n"
- "ldr h10, [x9, #0x0]\n"
- "ldr h14, [x25, #0x0]\n"
- "ldr h18, [x24, #0x0]\n"
- "ldr h22, [x23, #0x0]\n"
+ "ldr h10, [x28, #0x0]\n"
+ "ldr h14, [x24, #0x0]\n"
+ "ldr h18, [x23, #0x0]\n"
+ "ldr h22, [x22, #0x0]\n"
"b 166f\n"
"158:" // Height 4: Partial accumulate: partial_8_0
"tbz x11, #3, 162f\n"
- "ld1 { v8.8h }, [x9], #0x10\n"
- "ld1 { v12.8h }, [x25], #0x10\n"
- "ld1 { v16.8h }, [x24], #0x10\n"
- "ld1 { v20.8h }, [x23], #0x10\n"
+ "ld1 { v8.8h }, [x28], #0x10\n"
+ "ld1 { v12.8h }, [x24], #0x10\n"
+ "ld1 { v16.8h }, [x23], #0x10\n"
+ "ld1 { v20.8h }, [x22], #0x10\n"
"tbz x11, #2, 160f\n"
- "ldr d9, [x9], #0x8\n"
- "ldr d13, [x25], #0x8\n"
- "ldr d17, [x24], #0x8\n"
- "ldr d21, [x23], #0x8\n"
+ "ldr d9, [x28], #0x8\n"
+ "ldr d13, [x24], #0x8\n"
+ "ldr d17, [x23], #0x8\n"
+ "ldr d21, [x22], #0x8\n"
"tbz x11, #1, 159f\n"
- "ld1 { v9.s }[2], [x9], #0x4\n"
- "ld1 { v13.s }[2], [x25], #0x4\n"
- "mov x20, #0x1c\n"
- "ld1 { v17.s }[2], [x24], #0x4\n"
- "ld1 { v21.s }[2], [x23], #0x4\n"
+ "mov x19, #0x1c\n"
+ "ld1 { v9.s }[2], [x28], #0x4\n"
+ "ld1 { v13.s }[2], [x24], #0x4\n"
+ "ld1 { v17.s }[2], [x23], #0x4\n"
+ "ld1 { v21.s }[2], [x22], #0x4\n"
"tbz x11, #0, 166f\n"
- "ld1 { v9.h }[6], [x9]\n"
- "ld1 { v13.h }[6], [x25]\n"
- "ld1 { v17.h }[6], [x24]\n"
- "ld1 { v21.h }[6], [x23]\n"
+ "ld1 { v9.h }[6], [x28]\n"
+ "ld1 { v13.h }[6], [x24]\n"
+ "ld1 { v17.h }[6], [x23]\n"
+ "ld1 { v21.h }[6], [x22]\n"
"b 166f\n"
"159:" // Height 4: Partial accumulate: partial_1_12
- "mov x20, #0x18\n"
+ "mov x19, #0x18\n"
"tbz x11, #0, 166f\n"
- "ld1 { v9.h }[4], [x9]\n"
- "ld1 { v13.h }[4], [x25]\n"
- "ld1 { v17.h }[4], [x24]\n"
- "ld1 { v21.h }[4], [x23]\n"
+ "ld1 { v9.h }[4], [x28]\n"
+ "ld1 { v13.h }[4], [x24]\n"
+ "ld1 { v17.h }[4], [x23]\n"
+ "ld1 { v21.h }[4], [x22]\n"
"b 166f\n"
"160:" // Height 4: Partial accumulate: partial_2_8
"tbz x11, #1, 161f\n"
- "ldr s9, [x9], #0x4\n"
- "ldr s13, [x25], #0x4\n"
- "mov x20, #0x14\n"
- "ldr s17, [x24], #0x4\n"
- "ldr s21, [x23], #0x4\n"
+ "ldr s9, [x28], #0x4\n"
+ "ldr s13, [x24], #0x4\n"
+ "mov x19, #0x14\n"
+ "ldr s17, [x23], #0x4\n"
+ "ldr s21, [x22], #0x4\n"
"tbz x11, #0, 166f\n"
- "ld1 { v9.h }[2], [x9]\n"
- "ld1 { v13.h }[2], [x25]\n"
- "ld1 { v17.h }[2], [x24]\n"
- "ld1 { v21.h }[2], [x23]\n"
+ "ld1 { v9.h }[2], [x28]\n"
+ "ld1 { v13.h }[2], [x24]\n"
+ "ld1 { v17.h }[2], [x23]\n"
+ "ld1 { v21.h }[2], [x22]\n"
"b 166f\n"
"161:" // Height 4: Partial accumulate: partial_1_8
- "mov x20, #0x10\n"
+ "mov x19, #0x10\n"
"tbz x11, #0, 166f\n"
- "ldr h9, [x9, #0x0]\n"
- "ldr h13, [x25, #0x0]\n"
- "ldr h17, [x24, #0x0]\n"
- "ldr h21, [x23, #0x0]\n"
+ "ldr h9, [x28, #0x0]\n"
+ "ldr h13, [x24, #0x0]\n"
+ "ldr h17, [x23, #0x0]\n"
+ "ldr h21, [x22, #0x0]\n"
"b 166f\n"
"162:" // Height 4: Partial accumulate: partial_4_0
"tbz x11, #2, 164f\n"
- "ldr d8, [x9], #0x8\n"
- "ldr d12, [x25], #0x8\n"
- "ldr d16, [x24], #0x8\n"
- "ldr d20, [x23], #0x8\n"
+ "ldr d8, [x28], #0x8\n"
+ "ldr d12, [x24], #0x8\n"
+ "ldr d16, [x23], #0x8\n"
+ "ldr d20, [x22], #0x8\n"
"tbz x11, #1, 163f\n"
- "ld1 { v8.s }[2], [x9], #0x4\n"
- "ld1 { v12.s }[2], [x25], #0x4\n"
- "mov x20, #0xc\n"
- "ld1 { v16.s }[2], [x24], #0x4\n"
- "ld1 { v20.s }[2], [x23], #0x4\n"
+ "mov x19, #0xc\n"
+ "ld1 { v8.s }[2], [x28], #0x4\n"
+ "ld1 { v12.s }[2], [x24], #0x4\n"
+ "ld1 { v16.s }[2], [x23], #0x4\n"
+ "ld1 { v20.s }[2], [x22], #0x4\n"
"tbz x11, #0, 166f\n"
- "ld1 { v8.h }[6], [x9]\n"
- "ld1 { v12.h }[6], [x25]\n"
- "ld1 { v16.h }[6], [x24]\n"
- "ld1 { v20.h }[6], [x23]\n"
+ "ld1 { v8.h }[6], [x28]\n"
+ "ld1 { v12.h }[6], [x24]\n"
+ "ld1 { v16.h }[6], [x23]\n"
+ "ld1 { v20.h }[6], [x22]\n"
"b 166f\n"
"163:" // Height 4: Partial accumulate: partial_1_4
- "mov x20, #0x8\n"
+ "mov x19, #0x8\n"
"tbz x11, #0, 166f\n"
- "ld1 { v8.h }[4], [x9]\n"
- "ld1 { v12.h }[4], [x25]\n"
- "ld1 { v16.h }[4], [x24]\n"
- "ld1 { v20.h }[4], [x23]\n"
+ "ld1 { v8.h }[4], [x28]\n"
+ "ld1 { v12.h }[4], [x24]\n"
+ "ld1 { v16.h }[4], [x23]\n"
+ "ld1 { v20.h }[4], [x22]\n"
"b 166f\n"
"164:" // Height 4: Partial accumulate: partial_2_0
"tbz x11, #1, 165f\n"
- "ldr s8, [x9], #0x4\n"
- "ldr s12, [x25], #0x4\n"
- "mov x20, #0x4\n"
- "ldr s16, [x24], #0x4\n"
- "ldr s20, [x23], #0x4\n"
+ "ldr s8, [x28], #0x4\n"
+ "ldr s12, [x24], #0x4\n"
+ "mov x19, #0x4\n"
+ "ldr s16, [x23], #0x4\n"
+ "ldr s20, [x22], #0x4\n"
"tbz x11, #0, 166f\n"
- "ld1 { v8.h }[2], [x9]\n"
- "ld1 { v12.h }[2], [x25]\n"
- "ld1 { v16.h }[2], [x24]\n"
- "ld1 { v20.h }[2], [x23]\n"
+ "ld1 { v8.h }[2], [x28]\n"
+ "ld1 { v12.h }[2], [x24]\n"
+ "ld1 { v16.h }[2], [x23]\n"
+ "ld1 { v20.h }[2], [x22]\n"
"b 166f\n"
"165:" // Height 4: Partial accumulate: partial_1_0
- "ldr h8, [x9, #0x0]\n"
- "ldr h12, [x25, #0x0]\n"
- "mov x20, #0x0\n"
- "ldr h16, [x24, #0x0]\n"
- "ldr h20, [x23, #0x0]\n"
+ "ldr h8, [x28, #0x0]\n"
+ "mov x19, #0x0\n"
+ "ldr h12, [x24, #0x0]\n"
+ "ldr h16, [x23, #0x0]\n"
+ "ldr h20, [x22, #0x0]\n"
"166:" // Height 4: Partial accumulate: Done
- "sub x9, x9, x20\n"
+ "sub x28, x28, x19\n"
"b 169f\n"
"167:" // Height 4: full accumulate
- "ldr q8, [x9, #0x0]\n"
- "ldr q9, [x9, #0x10]\n"
- "ldr q10, [x9, #0x20]\n"
- "ldr q11, [x9, #0x30]\n"
- "ldr q12, [x25, #0x0]\n"
- "ldr q13, [x25, #0x10]\n"
- "ldr q14, [x25, #0x20]\n"
- "ldr q15, [x25, #0x30]\n"
- "ldr q16, [x24, #0x0]\n"
- "ldr q17, [x24, #0x10]\n"
- "ldr q18, [x24, #0x20]\n"
- "ldr q19, [x24, #0x30]\n"
- "ldr q20, [x23, #0x0]\n"
- "ldr q21, [x23, #0x10]\n"
- "ldr q22, [x23, #0x20]\n"
- "ldr q23, [x23, #0x30]\n"
+ "ldr q8, [x28, #0x0]\n"
+ "ldr q9, [x28, #0x10]\n"
+ "ldr q10, [x28, #0x20]\n"
+ "ldr q11, [x28, #0x30]\n"
+ "ldr q12, [x24, #0x0]\n"
+ "ldr q13, [x24, #0x10]\n"
+ "ldr q14, [x24, #0x20]\n"
+ "ldr q15, [x24, #0x30]\n"
+ "ldr q16, [x23, #0x0]\n"
+ "ldr q17, [x23, #0x10]\n"
+ "ldr q18, [x23, #0x20]\n"
+ "ldr q19, [x23, #0x30]\n"
+ "ldr q20, [x22, #0x0]\n"
+ "ldr q21, [x22, #0x10]\n"
+ "ldr q22, [x22, #0x20]\n"
+ "ldr q23, [x22, #0x30]\n"
"b 169f\n"
"168:" // Height 4: no accumulate
"movi v8.16b, #0x0\n"
@@ -2216,69 +2216,69 @@ void a64_hybrid_fp16_mla_6x32 (
"movi v22.16b, #0x0\n"
"movi v23.16b, #0x0\n"
"169:" // Height 4: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"170:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 171f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "cbnz x28, 172f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #1\n"
- "add x25, x25, x20, LSL #1\n"
- "add x24, x24, x20, LSL #1\n"
- "add x23, x23, x20, LSL #1\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "cbnz x27, 172f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
+ "add x24, x24, x19, LSL #1\n"
+ "add x23, x23, x19, LSL #1\n"
+ "add x22, x22, x19, LSL #1\n"
"b 172f\n"
"171:" // Height 4: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
- "add x23, x24, x20, LSL #1\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "add x22, x23, x19, LSL #1\n"
"172:" // Height 4: input setup done
- "cmp x27, #0x8\n"
+ "cmp x26, #0x8\n"
"blt 175f\n"
- "ldr q0, [x26, #0x0]\n"
- "ldr q1, [x25, #0x0]\n"
- "cmp x27, #0x10\n"
- "ldr q2, [x24, #0x0]\n"
- "ldr q3, [x23, #0x0]\n"
+ "ldr q0, [x25, #0x0]\n"
+ "ldr q1, [x24, #0x0]\n"
+ "cmp x26, #0x10\n"
+ "ldr q2, [x23, #0x0]\n"
+ "ldr q3, [x22, #0x0]\n"
"ldr q6, [x10, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
"blt 174f\n"
"173:" // Height 4: Multiply loop: Main loop head
"fmla v8.8h, v6.8h, v0.h[0]\n"
+ "ldr q7, [x10, #0x10]\n"
+ "add x25, x25, #0x10\n"
"fmla v12.8h, v6.8h, v1.h[0]\n"
- "sub x27, x27, #0x8\n"
- "add x26, x26, #0x10\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "add x24, x24, #0x10\n"
"fmla v16.8h, v6.8h, v2.h[0]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "add x23, x23, #0x10\n"
"fmla v20.8h, v6.8h, v3.h[0]\n"
"ldr q6, [x10, #0x20]\n"
- "add x25, x25, #0x10\n"
+ "add x22, x22, #0x10\n"
"fmla v9.8h, v7.8h, v0.h[0]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
+ "sub x26, x26, #0x8\n"
"fmla v13.8h, v7.8h, v1.h[0]\n"
- "add x24, x24, #0x10\n"
- "add x23, x23, #0x10\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
+ "cmp x26, #0x10\n"
"fmla v17.8h, v7.8h, v2.h[0]\n"
"fmla v21.8h, v7.8h, v3.h[0]\n"
"ldr q7, [x10, #0x30]\n"
- "cmp x27, #0x10\n"
"fmla v10.8h, v6.8h, v0.h[0]\n"
"fmla v14.8h, v6.8h, v1.h[0]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
"fmla v18.8h, v6.8h, v2.h[0]\n"
"fmla v22.8h, v6.8h, v3.h[0]\n"
"ldr q6, [x10, #0x40]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
"fmla v11.8h, v7.8h, v0.h[0]\n"
"fmla v15.8h, v7.8h, v1.h[0]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
"fmla v19.8h, v7.8h, v2.h[0]\n"
"fmla v23.8h, v7.8h, v3.h[0]\n"
"ldr q7, [x10, #0x50]\n"
@@ -2419,40 +2419,40 @@ void a64_hybrid_fp16_mla_6x32 (
"fmla v22.8h, v6.8h, v3.h[7]\n"
"ldr q6, [x10, #0x0]\n"
"fmla v11.8h, v7.8h, v0.h[7]\n"
- "ldr q0, [x26, #0x0]\n"
+ "ldr q0, [x25, #0x0]\n"
"fmla v15.8h, v7.8h, v1.h[7]\n"
- "ldr q1, [x25, #0x0]\n"
+ "ldr q1, [x24, #0x0]\n"
"fmla v19.8h, v7.8h, v2.h[7]\n"
- "ldr q2, [x24, #0x0]\n"
+ "ldr q2, [x23, #0x0]\n"
"fmla v23.8h, v7.8h, v3.h[7]\n"
- "ldr q3, [x23, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
+ "ldr q3, [x22, #0x0]\n"
"bge 173b\n"
"174:" // Height 4: Multiply loop: Single iteration only
"fmla v8.8h, v6.8h, v0.h[0]\n"
+ "ldr q7, [x10, #0x10]\n"
+ "sub x26, x26, #0x8\n"
"fmla v12.8h, v6.8h, v1.h[0]\n"
- "add x26, x26, #0x10\n"
"add x25, x25, #0x10\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
"fmla v16.8h, v6.8h, v2.h[0]\n"
- "fmla v20.8h, v6.8h, v3.h[0]\n"
- "ldr q6, [x10, #0x20]\n"
"add x24, x24, #0x10\n"
+ "fmla v20.8h, v6.8h, v3.h[0]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "add x23, x23, #0x10\n"
"fmla v9.8h, v7.8h, v0.h[0]\n"
+ "ldr q6, [x10, #0x20]\n"
+ "add x22, x22, #0x10\n"
"fmla v13.8h, v7.8h, v1.h[0]\n"
- "add x23, x23, #0x10\n"
- "sub x27, x27, #0x8\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
"fmla v17.8h, v7.8h, v2.h[0]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
"fmla v21.8h, v7.8h, v3.h[0]\n"
"ldr q7, [x10, #0x30]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
"fmla v10.8h, v6.8h, v0.h[0]\n"
"fmla v14.8h, v6.8h, v1.h[0]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
"fmla v18.8h, v6.8h, v2.h[0]\n"
"fmla v22.8h, v6.8h, v3.h[0]\n"
"ldr q6, [x10, #0x40]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
"fmla v11.8h, v7.8h, v0.h[0]\n"
"fmla v15.8h, v7.8h, v1.h[0]\n"
"fmla v19.8h, v7.8h, v2.h[0]\n"
@@ -2598,16 +2598,16 @@ void a64_hybrid_fp16_mla_6x32 (
"fmla v19.8h, v7.8h, v2.h[7]\n"
"fmla v23.8h, v7.8h, v3.h[7]\n"
"175:" // Height 4: Multiply loop: Main loop skip
- "cbz x27, 177f\n"
+ "cbz x26, 177f\n"
"176:" // Height 4: Multiply loop: Odd block loop
- "ldr h0, [x26], #0x2\n"
- "ldr h1, [x25], #0x2\n"
- "sub x27, x27, #0x1\n"
- "ldr h2, [x24], #0x2\n"
- "ldr h3, [x23], #0x2\n"
+ "ldr h0, [x25], #0x2\n"
+ "sub x26, x26, #0x1\n"
+ "ldr h1, [x24], #0x2\n"
+ "ldr h2, [x23], #0x2\n"
+ "ldr h3, [x22], #0x2\n"
"ldr q6, [x10, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
"fmla v8.8h, v6.8h, v0.h[0]\n"
+ "ldr q7, [x10, #0x10]\n"
"fmla v12.8h, v6.8h, v1.h[0]\n"
"fmla v16.8h, v6.8h, v2.h[0]\n"
"fmla v20.8h, v6.8h, v3.h[0]\n"
@@ -2626,570 +2626,570 @@ void a64_hybrid_fp16_mla_6x32 (
"fmla v15.8h, v7.8h, v1.h[0]\n"
"fmla v19.8h, v7.8h, v2.h[0]\n"
"fmla v23.8h, v7.8h, v3.h[0]\n"
- "cbnz x27, 176b\n"
+ "cbnz x26, 176b\n"
"177:" // Height 4: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 170b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
- "prfm pstl1keep, [x9, #0x0]\n"
- "add x23, x24, x20, LSL #1\n"
- "prfm pstl1keep, [x25, #0x0]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "prfm pstl1keep, [x28, #0x0]\n"
+ "add x24, x28, x19, LSL #1\n"
"prfm pstl1keep, [x24, #0x0]\n"
+ "add x23, x24, x19, LSL #1\n"
"prfm pstl1keep, [x23, #0x0]\n"
+ "add x22, x23, x19, LSL #1\n"
+ "prfm pstl1keep, [x22, #0x0]\n"
"tbz %x[flags], #1, 178f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v1.8h }, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1r { v0.8h }, [x20]\n"
- "fmin v8.8h, v8.8h, v1.8h\n"
- "fmin v9.8h, v9.8h, v1.8h\n"
- "fmin v10.8h, v10.8h, v1.8h\n"
- "fmin v11.8h, v11.8h, v1.8h\n"
- "fmin v12.8h, v12.8h, v1.8h\n"
- "fmin v13.8h, v13.8h, v1.8h\n"
- "fmin v14.8h, v14.8h, v1.8h\n"
- "fmin v15.8h, v15.8h, v1.8h\n"
- "fmin v16.8h, v16.8h, v1.8h\n"
- "fmin v17.8h, v17.8h, v1.8h\n"
- "fmin v18.8h, v18.8h, v1.8h\n"
- "fmin v19.8h, v19.8h, v1.8h\n"
- "fmin v20.8h, v20.8h, v1.8h\n"
- "fmin v21.8h, v21.8h, v1.8h\n"
- "fmin v22.8h, v22.8h, v1.8h\n"
- "fmin v23.8h, v23.8h, v1.8h\n"
- "fmax v8.8h, v8.8h, v0.8h\n"
- "fmax v9.8h, v9.8h, v0.8h\n"
- "fmax v10.8h, v10.8h, v0.8h\n"
- "fmax v11.8h, v11.8h, v0.8h\n"
- "fmax v12.8h, v12.8h, v0.8h\n"
- "fmax v13.8h, v13.8h, v0.8h\n"
- "fmax v14.8h, v14.8h, v0.8h\n"
- "fmax v15.8h, v15.8h, v0.8h\n"
- "fmax v16.8h, v16.8h, v0.8h\n"
- "fmax v17.8h, v17.8h, v0.8h\n"
- "fmax v18.8h, v18.8h, v0.8h\n"
- "fmax v19.8h, v19.8h, v0.8h\n"
- "fmax v20.8h, v20.8h, v0.8h\n"
- "fmax v21.8h, v21.8h, v0.8h\n"
- "fmax v22.8h, v22.8h, v0.8h\n"
- "fmax v23.8h, v23.8h, v0.8h\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v1.8h }, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v0.8h }, [x19]\n"
+ "fmin v8.8h, v8.8h, v0.8h\n"
+ "fmin v9.8h, v9.8h, v0.8h\n"
+ "fmin v10.8h, v10.8h, v0.8h\n"
+ "fmin v11.8h, v11.8h, v0.8h\n"
+ "fmax v8.8h, v8.8h, v1.8h\n"
+ "fmax v9.8h, v9.8h, v1.8h\n"
+ "fmax v10.8h, v10.8h, v1.8h\n"
+ "fmax v11.8h, v11.8h, v1.8h\n"
+ "fmin v12.8h, v12.8h, v0.8h\n"
+ "fmin v13.8h, v13.8h, v0.8h\n"
+ "fmin v14.8h, v14.8h, v0.8h\n"
+ "fmax v12.8h, v12.8h, v1.8h\n"
+ "fmax v13.8h, v13.8h, v1.8h\n"
+ "fmax v14.8h, v14.8h, v1.8h\n"
+ "fmin v15.8h, v15.8h, v0.8h\n"
+ "fmin v16.8h, v16.8h, v0.8h\n"
+ "fmin v17.8h, v17.8h, v0.8h\n"
+ "fmax v15.8h, v15.8h, v1.8h\n"
+ "fmax v16.8h, v16.8h, v1.8h\n"
+ "fmax v17.8h, v17.8h, v1.8h\n"
+ "fmin v18.8h, v18.8h, v0.8h\n"
+ "fmin v19.8h, v19.8h, v0.8h\n"
+ "fmin v20.8h, v20.8h, v0.8h\n"
+ "fmax v18.8h, v18.8h, v1.8h\n"
+ "fmax v19.8h, v19.8h, v1.8h\n"
+ "fmax v20.8h, v20.8h, v1.8h\n"
+ "fmin v21.8h, v21.8h, v0.8h\n"
+ "fmin v22.8h, v22.8h, v0.8h\n"
+ "fmin v23.8h, v23.8h, v0.8h\n"
+ "fmax v21.8h, v21.8h, v1.8h\n"
+ "fmax v22.8h, v22.8h, v1.8h\n"
+ "fmax v23.8h, v23.8h, v1.8h\n"
"178:" // Height 4: No activation
"cmp x11, #0x20\n"
"bge 195f\n"
"tbz x11, #4, 186f\n"
- "st1 { v8.8h }, [x9], #0x10\n"
- "st1 { v9.8h }, [x9], #0x10\n"
- "st1 { v12.8h }, [x25], #0x10\n"
- "st1 { v13.8h }, [x25], #0x10\n"
- "st1 { v16.8h }, [x24], #0x10\n"
- "st1 { v17.8h }, [x24], #0x10\n"
- "st1 { v20.8h }, [x23], #0x10\n"
- "st1 { v21.8h }, [x23], #0x10\n"
+ "st1 { v8.8h }, [x28], #0x10\n"
+ "st1 { v9.8h }, [x28], #0x10\n"
+ "st1 { v12.8h }, [x24], #0x10\n"
+ "st1 { v13.8h }, [x24], #0x10\n"
+ "st1 { v16.8h }, [x23], #0x10\n"
+ "st1 { v17.8h }, [x23], #0x10\n"
+ "st1 { v20.8h }, [x22], #0x10\n"
+ "st1 { v21.8h }, [x22], #0x10\n"
"tbz x11, #3, 182f\n"
- "st1 { v10.8h }, [x9], #0x10\n"
- "st1 { v14.8h }, [x25], #0x10\n"
- "st1 { v18.8h }, [x24], #0x10\n"
- "st1 { v22.8h }, [x23], #0x10\n"
+ "st1 { v10.8h }, [x28], #0x10\n"
+ "st1 { v14.8h }, [x24], #0x10\n"
+ "st1 { v18.8h }, [x23], #0x10\n"
+ "st1 { v22.8h }, [x22], #0x10\n"
"tbz x11, #2, 180f\n"
- "str d11, [x9], #0x8\n"
- "str d15, [x25], #0x8\n"
- "str d19, [x24], #0x8\n"
- "str d23, [x23], #0x8\n"
+ "str d11, [x28], #0x8\n"
+ "str d15, [x24], #0x8\n"
+ "str d19, [x23], #0x8\n"
+ "str d23, [x22], #0x8\n"
"tbz x11, #1, 179f\n"
- "st1 { v11.s }[2], [x9], #0x4\n"
- "st1 { v15.s }[2], [x25], #0x4\n"
- "st1 { v19.s }[2], [x24], #0x4\n"
- "st1 { v23.s }[2], [x23], #0x4\n"
+ "st1 { v11.s }[2], [x28], #0x4\n"
+ "st1 { v15.s }[2], [x24], #0x4\n"
+ "st1 { v19.s }[2], [x23], #0x4\n"
+ "st1 { v23.s }[2], [x22], #0x4\n"
"tbz x11, #0, 194f\n"
- "st1 { v11.h }[6], [x9]\n"
- "st1 { v15.h }[6], [x25]\n"
- "st1 { v19.h }[6], [x24]\n"
- "st1 { v23.h }[6], [x23]\n"
+ "st1 { v11.h }[6], [x28]\n"
+ "st1 { v15.h }[6], [x24]\n"
+ "st1 { v19.h }[6], [x23]\n"
+ "st1 { v23.h }[6], [x22]\n"
"b 194f\n"
"179:" // Height 4: Partial direct writeback: partial_1_28
"tbz x11, #0, 194f\n"
- "st1 { v11.h }[4], [x9]\n"
- "st1 { v15.h }[4], [x25]\n"
- "st1 { v19.h }[4], [x24]\n"
- "st1 { v23.h }[4], [x23]\n"
+ "st1 { v11.h }[4], [x28]\n"
+ "st1 { v15.h }[4], [x24]\n"
+ "st1 { v19.h }[4], [x23]\n"
+ "st1 { v23.h }[4], [x22]\n"
"b 194f\n"
"180:" // Height 4: Partial direct writeback: partial_2_24
"tbz x11, #1, 181f\n"
- "str s11, [x9], #0x4\n"
- "str s15, [x25], #0x4\n"
- "str s19, [x24], #0x4\n"
- "str s23, [x23], #0x4\n"
+ "str s11, [x28], #0x4\n"
+ "str s15, [x24], #0x4\n"
+ "str s19, [x23], #0x4\n"
+ "str s23, [x22], #0x4\n"
"tbz x11, #0, 194f\n"
- "st1 { v11.h }[2], [x9]\n"
- "st1 { v15.h }[2], [x25]\n"
- "st1 { v19.h }[2], [x24]\n"
- "st1 { v23.h }[2], [x23]\n"
+ "st1 { v11.h }[2], [x28]\n"
+ "st1 { v15.h }[2], [x24]\n"
+ "st1 { v19.h }[2], [x23]\n"
+ "st1 { v23.h }[2], [x22]\n"
"b 194f\n"
"181:" // Height 4: Partial direct writeback: partial_1_24
"tbz x11, #0, 194f\n"
- "str h11, [x9, #0x0]\n"
- "str h15, [x25, #0x0]\n"
- "str h19, [x24, #0x0]\n"
- "str h23, [x23, #0x0]\n"
+ "str h11, [x28, #0x0]\n"
+ "str h15, [x24, #0x0]\n"
+ "str h19, [x23, #0x0]\n"
+ "str h23, [x22, #0x0]\n"
"b 194f\n"
"182:" // Height 4: Partial direct writeback: partial_4_16
"tbz x11, #2, 184f\n"
- "str d10, [x9], #0x8\n"
- "str d14, [x25], #0x8\n"
- "str d18, [x24], #0x8\n"
- "str d22, [x23], #0x8\n"
+ "str d10, [x28], #0x8\n"
+ "str d14, [x24], #0x8\n"
+ "str d18, [x23], #0x8\n"
+ "str d22, [x22], #0x8\n"
"tbz x11, #1, 183f\n"
- "st1 { v10.s }[2], [x9], #0x4\n"
- "st1 { v14.s }[2], [x25], #0x4\n"
- "st1 { v18.s }[2], [x24], #0x4\n"
- "st1 { v22.s }[2], [x23], #0x4\n"
+ "st1 { v10.s }[2], [x28], #0x4\n"
+ "st1 { v14.s }[2], [x24], #0x4\n"
+ "st1 { v18.s }[2], [x23], #0x4\n"
+ "st1 { v22.s }[2], [x22], #0x4\n"
"tbz x11, #0, 194f\n"
- "st1 { v10.h }[6], [x9]\n"
- "st1 { v14.h }[6], [x25]\n"
- "st1 { v18.h }[6], [x24]\n"
- "st1 { v22.h }[6], [x23]\n"
+ "st1 { v10.h }[6], [x28]\n"
+ "st1 { v14.h }[6], [x24]\n"
+ "st1 { v18.h }[6], [x23]\n"
+ "st1 { v22.h }[6], [x22]\n"
"b 194f\n"
"183:" // Height 4: Partial direct writeback: partial_1_20
"tbz x11, #0, 194f\n"
- "st1 { v10.h }[4], [x9]\n"
- "st1 { v14.h }[4], [x25]\n"
- "st1 { v18.h }[4], [x24]\n"
- "st1 { v22.h }[4], [x23]\n"
+ "st1 { v10.h }[4], [x28]\n"
+ "st1 { v14.h }[4], [x24]\n"
+ "st1 { v18.h }[4], [x23]\n"
+ "st1 { v22.h }[4], [x22]\n"
"b 194f\n"
"184:" // Height 4: Partial direct writeback: partial_2_16
"tbz x11, #1, 185f\n"
- "str s10, [x9], #0x4\n"
- "str s14, [x25], #0x4\n"
- "str s18, [x24], #0x4\n"
- "str s22, [x23], #0x4\n"
+ "str s10, [x28], #0x4\n"
+ "str s14, [x24], #0x4\n"
+ "str s18, [x23], #0x4\n"
+ "str s22, [x22], #0x4\n"
"tbz x11, #0, 194f\n"
- "st1 { v10.h }[2], [x9]\n"
- "st1 { v14.h }[2], [x25]\n"
- "st1 { v18.h }[2], [x24]\n"
- "st1 { v22.h }[2], [x23]\n"
+ "st1 { v10.h }[2], [x28]\n"
+ "st1 { v14.h }[2], [x24]\n"
+ "st1 { v18.h }[2], [x23]\n"
+ "st1 { v22.h }[2], [x22]\n"
"b 194f\n"
"185:" // Height 4: Partial direct writeback: partial_1_16
"tbz x11, #0, 194f\n"
- "str h10, [x9, #0x0]\n"
- "str h14, [x25, #0x0]\n"
- "str h18, [x24, #0x0]\n"
- "str h22, [x23, #0x0]\n"
+ "str h10, [x28, #0x0]\n"
+ "str h14, [x24, #0x0]\n"
+ "str h18, [x23, #0x0]\n"
+ "str h22, [x22, #0x0]\n"
"b 194f\n"
"186:" // Height 4: Partial direct writeback: partial_8_0
"tbz x11, #3, 190f\n"
- "st1 { v8.8h }, [x9], #0x10\n"
- "st1 { v12.8h }, [x25], #0x10\n"
- "st1 { v16.8h }, [x24], #0x10\n"
- "st1 { v20.8h }, [x23], #0x10\n"
+ "st1 { v8.8h }, [x28], #0x10\n"
+ "st1 { v12.8h }, [x24], #0x10\n"
+ "st1 { v16.8h }, [x23], #0x10\n"
+ "st1 { v20.8h }, [x22], #0x10\n"
"tbz x11, #2, 188f\n"
- "str d9, [x9], #0x8\n"
- "str d13, [x25], #0x8\n"
- "str d17, [x24], #0x8\n"
- "str d21, [x23], #0x8\n"
+ "str d9, [x28], #0x8\n"
+ "str d13, [x24], #0x8\n"
+ "str d17, [x23], #0x8\n"
+ "str d21, [x22], #0x8\n"
"tbz x11, #1, 187f\n"
- "st1 { v9.s }[2], [x9], #0x4\n"
- "st1 { v13.s }[2], [x25], #0x4\n"
- "st1 { v17.s }[2], [x24], #0x4\n"
- "st1 { v21.s }[2], [x23], #0x4\n"
+ "st1 { v9.s }[2], [x28], #0x4\n"
+ "st1 { v13.s }[2], [x24], #0x4\n"
+ "st1 { v17.s }[2], [x23], #0x4\n"
+ "st1 { v21.s }[2], [x22], #0x4\n"
"tbz x11, #0, 194f\n"
- "st1 { v9.h }[6], [x9]\n"
- "st1 { v13.h }[6], [x25]\n"
- "st1 { v17.h }[6], [x24]\n"
- "st1 { v21.h }[6], [x23]\n"
+ "st1 { v9.h }[6], [x28]\n"
+ "st1 { v13.h }[6], [x24]\n"
+ "st1 { v17.h }[6], [x23]\n"
+ "st1 { v21.h }[6], [x22]\n"
"b 194f\n"
"187:" // Height 4: Partial direct writeback: partial_1_12
"tbz x11, #0, 194f\n"
- "st1 { v9.h }[4], [x9]\n"
- "st1 { v13.h }[4], [x25]\n"
- "st1 { v17.h }[4], [x24]\n"
- "st1 { v21.h }[4], [x23]\n"
+ "st1 { v9.h }[4], [x28]\n"
+ "st1 { v13.h }[4], [x24]\n"
+ "st1 { v17.h }[4], [x23]\n"
+ "st1 { v21.h }[4], [x22]\n"
"b 194f\n"
"188:" // Height 4: Partial direct writeback: partial_2_8
"tbz x11, #1, 189f\n"
- "str s9, [x9], #0x4\n"
- "str s13, [x25], #0x4\n"
- "str s17, [x24], #0x4\n"
- "str s21, [x23], #0x4\n"
+ "str s9, [x28], #0x4\n"
+ "str s13, [x24], #0x4\n"
+ "str s17, [x23], #0x4\n"
+ "str s21, [x22], #0x4\n"
"tbz x11, #0, 194f\n"
- "st1 { v9.h }[2], [x9]\n"
- "st1 { v13.h }[2], [x25]\n"
- "st1 { v17.h }[2], [x24]\n"
- "st1 { v21.h }[2], [x23]\n"
+ "st1 { v9.h }[2], [x28]\n"
+ "st1 { v13.h }[2], [x24]\n"
+ "st1 { v17.h }[2], [x23]\n"
+ "st1 { v21.h }[2], [x22]\n"
"b 194f\n"
"189:" // Height 4: Partial direct writeback: partial_1_8
"tbz x11, #0, 194f\n"
- "str h9, [x9, #0x0]\n"
- "str h13, [x25, #0x0]\n"
- "str h17, [x24, #0x0]\n"
- "str h21, [x23, #0x0]\n"
+ "str h9, [x28, #0x0]\n"
+ "str h13, [x24, #0x0]\n"
+ "str h17, [x23, #0x0]\n"
+ "str h21, [x22, #0x0]\n"
"b 194f\n"
"190:" // Height 4: Partial direct writeback: partial_4_0
"tbz x11, #2, 192f\n"
- "str d8, [x9], #0x8\n"
- "str d12, [x25], #0x8\n"
- "str d16, [x24], #0x8\n"
- "str d20, [x23], #0x8\n"
+ "str d8, [x28], #0x8\n"
+ "str d12, [x24], #0x8\n"
+ "str d16, [x23], #0x8\n"
+ "str d20, [x22], #0x8\n"
"tbz x11, #1, 191f\n"
- "st1 { v8.s }[2], [x9], #0x4\n"
- "st1 { v12.s }[2], [x25], #0x4\n"
- "st1 { v16.s }[2], [x24], #0x4\n"
- "st1 { v20.s }[2], [x23], #0x4\n"
+ "st1 { v8.s }[2], [x28], #0x4\n"
+ "st1 { v12.s }[2], [x24], #0x4\n"
+ "st1 { v16.s }[2], [x23], #0x4\n"
+ "st1 { v20.s }[2], [x22], #0x4\n"
"tbz x11, #0, 194f\n"
- "st1 { v8.h }[6], [x9]\n"
- "st1 { v12.h }[6], [x25]\n"
- "st1 { v16.h }[6], [x24]\n"
- "st1 { v20.h }[6], [x23]\n"
+ "st1 { v8.h }[6], [x28]\n"
+ "st1 { v12.h }[6], [x24]\n"
+ "st1 { v16.h }[6], [x23]\n"
+ "st1 { v20.h }[6], [x22]\n"
"b 194f\n"
"191:" // Height 4: Partial direct writeback: partial_1_4
"tbz x11, #0, 194f\n"
- "st1 { v8.h }[4], [x9]\n"
- "st1 { v12.h }[4], [x25]\n"
- "st1 { v16.h }[4], [x24]\n"
- "st1 { v20.h }[4], [x23]\n"
+ "st1 { v8.h }[4], [x28]\n"
+ "st1 { v12.h }[4], [x24]\n"
+ "st1 { v16.h }[4], [x23]\n"
+ "st1 { v20.h }[4], [x22]\n"
"b 194f\n"
"192:" // Height 4: Partial direct writeback: partial_2_0
"tbz x11, #1, 193f\n"
- "str s8, [x9], #0x4\n"
- "str s12, [x25], #0x4\n"
- "str s16, [x24], #0x4\n"
- "str s20, [x23], #0x4\n"
+ "str s8, [x28], #0x4\n"
+ "str s12, [x24], #0x4\n"
+ "str s16, [x23], #0x4\n"
+ "str s20, [x22], #0x4\n"
"tbz x11, #0, 194f\n"
- "st1 { v8.h }[2], [x9]\n"
- "st1 { v12.h }[2], [x25]\n"
- "st1 { v16.h }[2], [x24]\n"
- "st1 { v20.h }[2], [x23]\n"
+ "st1 { v8.h }[2], [x28]\n"
+ "st1 { v12.h }[2], [x24]\n"
+ "st1 { v16.h }[2], [x23]\n"
+ "st1 { v20.h }[2], [x22]\n"
"b 194f\n"
"193:" // Height 4: Partial direct writeback: partial_1_0
- "str h8, [x9, #0x0]\n"
- "str h12, [x25, #0x0]\n"
- "str h16, [x24, #0x0]\n"
- "str h20, [x23, #0x0]\n"
+ "str h8, [x28, #0x0]\n"
+ "str h12, [x24, #0x0]\n"
+ "str h16, [x23, #0x0]\n"
+ "str h20, [x22, #0x0]\n"
"194:" // Height 4: Partial direct writeback: Done
"b 196f\n"
"195:" // Height 4: Full writeback
- "str q8, [x9, #0x0]\n"
- "str q9, [x9, #0x10]\n"
- "str q10, [x9, #0x20]\n"
- "str q11, [x9, #0x30]\n"
- "add x9, x9, #0x40\n"
- "str q12, [x25, #0x0]\n"
- "str q13, [x25, #0x10]\n"
- "str q14, [x25, #0x20]\n"
- "str q15, [x25, #0x30]\n"
- "str q16, [x24, #0x0]\n"
- "str q17, [x24, #0x10]\n"
- "str q18, [x24, #0x20]\n"
- "str q19, [x24, #0x30]\n"
- "str q20, [x23, #0x0]\n"
- "str q21, [x23, #0x10]\n"
- "str q22, [x23, #0x20]\n"
- "str q23, [x23, #0x30]\n"
+ "str q8, [x28, #0x0]\n"
+ "str q9, [x28, #0x10]\n"
+ "str q10, [x28, #0x20]\n"
+ "str q11, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
+ "str q12, [x24, #0x0]\n"
+ "str q13, [x24, #0x10]\n"
+ "str q14, [x24, #0x20]\n"
+ "str q15, [x24, #0x30]\n"
+ "str q16, [x23, #0x0]\n"
+ "str q17, [x23, #0x10]\n"
+ "str q18, [x23, #0x20]\n"
+ "str q19, [x23, #0x30]\n"
+ "str q20, [x22, #0x0]\n"
+ "str q21, [x22, #0x10]\n"
+ "str q22, [x22, #0x20]\n"
+ "str q23, [x22, #0x30]\n"
"196:" // Height 4: Writeback done
"subs x11, x11, #0x20\n"
"bgt 149b\n"
"b 296f\n"
"197:" // Height 5
- "mov x12, %x[bias]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x9, %x[bias]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "mov x28, %x[output_ptr]\n"
"198:" // Height 5: Column loop
- "cbz x12, 199f\n"
- "ldr q8, [x12, #0x0]\n"
- "ldr q9, [x12, #0x10]\n"
+ "cbz x9, 199f\n"
+ "ldr q8, [x9, #0x0]\n"
"mov v12.16b, v8.16b\n"
+ "ldr q9, [x9, #0x10]\n"
+ "mov v16.16b, v8.16b\n"
+ "ldr q10, [x9, #0x20]\n"
+ "mov v20.16b, v8.16b\n"
+ "ldr q11, [x9, #0x30]\n"
+ "add x9, x9, #0x40\n"
+ "mov v24.16b, v8.16b\n"
"mov v13.16b, v9.16b\n"
- "ldr q10, [x12, #0x20]\n"
- "ldr q11, [x12, #0x30]\n"
+ "mov v17.16b, v9.16b\n"
"mov v14.16b, v10.16b\n"
"mov v15.16b, v11.16b\n"
- "mov v16.16b, v8.16b\n"
- "mov v17.16b, v9.16b\n"
- "add x12, x12, #0x40\n"
"mov v18.16b, v10.16b\n"
"mov v19.16b, v11.16b\n"
- "mov v20.16b, v8.16b\n"
"mov v21.16b, v9.16b\n"
"mov v22.16b, v10.16b\n"
"mov v23.16b, v11.16b\n"
- "mov v24.16b, v8.16b\n"
"mov v25.16b, v9.16b\n"
"mov v26.16b, v10.16b\n"
"mov v27.16b, v11.16b\n"
"b 218f\n"
"199:" // Height 5: no bias
"tbz %x[flags], #0, 217f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
- "add x23, x24, x20, LSL #1\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"cmp x11, #0x20\n"
- "add x22, x23, x20, LSL #1\n"
+ "add x24, x28, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "add x22, x23, x19, LSL #1\n"
+ "add x21, x22, x19, LSL #1\n"
"bge 216f\n"
"tbz x11, #4, 207f\n"
- "ld1 { v8.8h }, [x9], #0x10\n"
- "ld1 { v12.8h }, [x25], #0x10\n"
- "ld1 { v16.8h }, [x24], #0x10\n"
- "ld1 { v20.8h }, [x23], #0x10\n"
- "ld1 { v24.8h }, [x22], #0x10\n"
- "ld1 { v9.8h }, [x9], #0x10\n"
- "ld1 { v13.8h }, [x25], #0x10\n"
- "ld1 { v17.8h }, [x24], #0x10\n"
- "ld1 { v21.8h }, [x23], #0x10\n"
- "ld1 { v25.8h }, [x22], #0x10\n"
+ "ld1 { v8.8h }, [x28], #0x10\n"
+ "ld1 { v12.8h }, [x24], #0x10\n"
+ "ld1 { v16.8h }, [x23], #0x10\n"
+ "ld1 { v20.8h }, [x22], #0x10\n"
+ "ld1 { v24.8h }, [x21], #0x10\n"
+ "ld1 { v9.8h }, [x28], #0x10\n"
+ "ld1 { v13.8h }, [x24], #0x10\n"
+ "ld1 { v17.8h }, [x23], #0x10\n"
+ "ld1 { v21.8h }, [x22], #0x10\n"
+ "ld1 { v25.8h }, [x21], #0x10\n"
"tbz x11, #3, 203f\n"
- "ld1 { v10.8h }, [x9], #0x10\n"
- "ld1 { v14.8h }, [x25], #0x10\n"
- "ld1 { v18.8h }, [x24], #0x10\n"
- "ld1 { v22.8h }, [x23], #0x10\n"
- "ld1 { v26.8h }, [x22], #0x10\n"
+ "ld1 { v10.8h }, [x28], #0x10\n"
+ "ld1 { v14.8h }, [x24], #0x10\n"
+ "ld1 { v18.8h }, [x23], #0x10\n"
+ "ld1 { v22.8h }, [x22], #0x10\n"
+ "ld1 { v26.8h }, [x21], #0x10\n"
"tbz x11, #2, 201f\n"
- "ldr d11, [x9], #0x8\n"
- "ldr d15, [x25], #0x8\n"
- "ldr d19, [x24], #0x8\n"
- "ldr d23, [x23], #0x8\n"
- "ldr d27, [x22], #0x8\n"
+ "ldr d11, [x28], #0x8\n"
+ "ldr d15, [x24], #0x8\n"
+ "ldr d19, [x23], #0x8\n"
+ "ldr d23, [x22], #0x8\n"
+ "ldr d27, [x21], #0x8\n"
"tbz x11, #1, 200f\n"
- "ld1 { v11.s }[2], [x9], #0x4\n"
- "ld1 { v15.s }[2], [x25], #0x4\n"
- "mov x20, #0x3c\n"
- "ld1 { v19.s }[2], [x24], #0x4\n"
- "ld1 { v23.s }[2], [x23], #0x4\n"
- "ld1 { v27.s }[2], [x22], #0x4\n"
+ "ld1 { v11.s }[2], [x28], #0x4\n"
+ "mov x19, #0x3c\n"
+ "ld1 { v15.s }[2], [x24], #0x4\n"
+ "ld1 { v19.s }[2], [x23], #0x4\n"
+ "ld1 { v23.s }[2], [x22], #0x4\n"
+ "ld1 { v27.s }[2], [x21], #0x4\n"
"tbz x11, #0, 215f\n"
- "ld1 { v11.h }[6], [x9]\n"
- "ld1 { v15.h }[6], [x25]\n"
- "ld1 { v19.h }[6], [x24]\n"
- "ld1 { v23.h }[6], [x23]\n"
- "ld1 { v27.h }[6], [x22]\n"
+ "ld1 { v11.h }[6], [x28]\n"
+ "ld1 { v15.h }[6], [x24]\n"
+ "ld1 { v19.h }[6], [x23]\n"
+ "ld1 { v23.h }[6], [x22]\n"
+ "ld1 { v27.h }[6], [x21]\n"
"b 215f\n"
"200:" // Height 5: Partial accumulate: partial_1_28
- "mov x20, #0x38\n"
+ "mov x19, #0x38\n"
"tbz x11, #0, 215f\n"
- "ld1 { v11.h }[4], [x9]\n"
- "ld1 { v15.h }[4], [x25]\n"
- "ld1 { v19.h }[4], [x24]\n"
- "ld1 { v23.h }[4], [x23]\n"
- "ld1 { v27.h }[4], [x22]\n"
+ "ld1 { v11.h }[4], [x28]\n"
+ "ld1 { v15.h }[4], [x24]\n"
+ "ld1 { v19.h }[4], [x23]\n"
+ "ld1 { v23.h }[4], [x22]\n"
+ "ld1 { v27.h }[4], [x21]\n"
"b 215f\n"
"201:" // Height 5: Partial accumulate: partial_2_24
"tbz x11, #1, 202f\n"
- "ldr s11, [x9], #0x4\n"
- "ldr s15, [x25], #0x4\n"
- "mov x20, #0x34\n"
- "ldr s19, [x24], #0x4\n"
- "ldr s23, [x23], #0x4\n"
- "ldr s27, [x22], #0x4\n"
+ "ldr s11, [x28], #0x4\n"
+ "ldr s15, [x24], #0x4\n"
+ "mov x19, #0x34\n"
+ "ldr s19, [x23], #0x4\n"
+ "ldr s23, [x22], #0x4\n"
+ "ldr s27, [x21], #0x4\n"
"tbz x11, #0, 215f\n"
- "ld1 { v11.h }[2], [x9]\n"
- "ld1 { v15.h }[2], [x25]\n"
- "ld1 { v19.h }[2], [x24]\n"
- "ld1 { v23.h }[2], [x23]\n"
- "ld1 { v27.h }[2], [x22]\n"
+ "ld1 { v11.h }[2], [x28]\n"
+ "ld1 { v15.h }[2], [x24]\n"
+ "ld1 { v19.h }[2], [x23]\n"
+ "ld1 { v23.h }[2], [x22]\n"
+ "ld1 { v27.h }[2], [x21]\n"
"b 215f\n"
"202:" // Height 5: Partial accumulate: partial_1_24
- "mov x20, #0x30\n"
+ "mov x19, #0x30\n"
"tbz x11, #0, 215f\n"
- "ldr h11, [x9, #0x0]\n"
- "ldr h15, [x25, #0x0]\n"
- "ldr h19, [x24, #0x0]\n"
- "ldr h23, [x23, #0x0]\n"
- "ldr h27, [x22, #0x0]\n"
+ "ldr h11, [x28, #0x0]\n"
+ "ldr h15, [x24, #0x0]\n"
+ "ldr h19, [x23, #0x0]\n"
+ "ldr h23, [x22, #0x0]\n"
+ "ldr h27, [x21, #0x0]\n"
"b 215f\n"
"203:" // Height 5: Partial accumulate: partial_4_16
"tbz x11, #2, 205f\n"
- "ldr d10, [x9], #0x8\n"
- "ldr d14, [x25], #0x8\n"
- "ldr d18, [x24], #0x8\n"
- "ldr d22, [x23], #0x8\n"
- "ldr d26, [x22], #0x8\n"
+ "ldr d10, [x28], #0x8\n"
+ "ldr d14, [x24], #0x8\n"
+ "ldr d18, [x23], #0x8\n"
+ "ldr d22, [x22], #0x8\n"
+ "ldr d26, [x21], #0x8\n"
"tbz x11, #1, 204f\n"
- "ld1 { v10.s }[2], [x9], #0x4\n"
- "ld1 { v14.s }[2], [x25], #0x4\n"
- "mov x20, #0x2c\n"
- "ld1 { v18.s }[2], [x24], #0x4\n"
- "ld1 { v22.s }[2], [x23], #0x4\n"
- "ld1 { v26.s }[2], [x22], #0x4\n"
+ "ld1 { v10.s }[2], [x28], #0x4\n"
+ "mov x19, #0x2c\n"
+ "ld1 { v14.s }[2], [x24], #0x4\n"
+ "ld1 { v18.s }[2], [x23], #0x4\n"
+ "ld1 { v22.s }[2], [x22], #0x4\n"
+ "ld1 { v26.s }[2], [x21], #0x4\n"
"tbz x11, #0, 215f\n"
- "ld1 { v10.h }[6], [x9]\n"
- "ld1 { v14.h }[6], [x25]\n"
- "ld1 { v18.h }[6], [x24]\n"
- "ld1 { v22.h }[6], [x23]\n"
- "ld1 { v26.h }[6], [x22]\n"
+ "ld1 { v10.h }[6], [x28]\n"
+ "ld1 { v14.h }[6], [x24]\n"
+ "ld1 { v18.h }[6], [x23]\n"
+ "ld1 { v22.h }[6], [x22]\n"
+ "ld1 { v26.h }[6], [x21]\n"
"b 215f\n"
"204:" // Height 5: Partial accumulate: partial_1_20
- "mov x20, #0x28\n"
+ "mov x19, #0x28\n"
"tbz x11, #0, 215f\n"
- "ld1 { v10.h }[4], [x9]\n"
- "ld1 { v14.h }[4], [x25]\n"
- "ld1 { v18.h }[4], [x24]\n"
- "ld1 { v22.h }[4], [x23]\n"
- "ld1 { v26.h }[4], [x22]\n"
+ "ld1 { v10.h }[4], [x28]\n"
+ "ld1 { v14.h }[4], [x24]\n"
+ "ld1 { v18.h }[4], [x23]\n"
+ "ld1 { v22.h }[4], [x22]\n"
+ "ld1 { v26.h }[4], [x21]\n"
"b 215f\n"
"205:" // Height 5: Partial accumulate: partial_2_16
"tbz x11, #1, 206f\n"
- "ldr s10, [x9], #0x4\n"
- "ldr s14, [x25], #0x4\n"
- "mov x20, #0x24\n"
- "ldr s18, [x24], #0x4\n"
- "ldr s22, [x23], #0x4\n"
- "ldr s26, [x22], #0x4\n"
+ "ldr s10, [x28], #0x4\n"
+ "ldr s14, [x24], #0x4\n"
+ "mov x19, #0x24\n"
+ "ldr s18, [x23], #0x4\n"
+ "ldr s22, [x22], #0x4\n"
+ "ldr s26, [x21], #0x4\n"
"tbz x11, #0, 215f\n"
- "ld1 { v10.h }[2], [x9]\n"
- "ld1 { v14.h }[2], [x25]\n"
- "ld1 { v18.h }[2], [x24]\n"
- "ld1 { v22.h }[2], [x23]\n"
- "ld1 { v26.h }[2], [x22]\n"
+ "ld1 { v10.h }[2], [x28]\n"
+ "ld1 { v14.h }[2], [x24]\n"
+ "ld1 { v18.h }[2], [x23]\n"
+ "ld1 { v22.h }[2], [x22]\n"
+ "ld1 { v26.h }[2], [x21]\n"
"b 215f\n"
"206:" // Height 5: Partial accumulate: partial_1_16
- "mov x20, #0x20\n"
+ "mov x19, #0x20\n"
"tbz x11, #0, 215f\n"
- "ldr h10, [x9, #0x0]\n"
- "ldr h14, [x25, #0x0]\n"
- "ldr h18, [x24, #0x0]\n"
- "ldr h22, [x23, #0x0]\n"
- "ldr h26, [x22, #0x0]\n"
+ "ldr h10, [x28, #0x0]\n"
+ "ldr h14, [x24, #0x0]\n"
+ "ldr h18, [x23, #0x0]\n"
+ "ldr h22, [x22, #0x0]\n"
+ "ldr h26, [x21, #0x0]\n"
"b 215f\n"
"207:" // Height 5: Partial accumulate: partial_8_0
"tbz x11, #3, 211f\n"
- "ld1 { v8.8h }, [x9], #0x10\n"
- "ld1 { v12.8h }, [x25], #0x10\n"
- "ld1 { v16.8h }, [x24], #0x10\n"
- "ld1 { v20.8h }, [x23], #0x10\n"
- "ld1 { v24.8h }, [x22], #0x10\n"
+ "ld1 { v8.8h }, [x28], #0x10\n"
+ "ld1 { v12.8h }, [x24], #0x10\n"
+ "ld1 { v16.8h }, [x23], #0x10\n"
+ "ld1 { v20.8h }, [x22], #0x10\n"
+ "ld1 { v24.8h }, [x21], #0x10\n"
"tbz x11, #2, 209f\n"
- "ldr d9, [x9], #0x8\n"
- "ldr d13, [x25], #0x8\n"
- "ldr d17, [x24], #0x8\n"
- "ldr d21, [x23], #0x8\n"
- "ldr d25, [x22], #0x8\n"
+ "ldr d9, [x28], #0x8\n"
+ "ldr d13, [x24], #0x8\n"
+ "ldr d17, [x23], #0x8\n"
+ "ldr d21, [x22], #0x8\n"
+ "ldr d25, [x21], #0x8\n"
"tbz x11, #1, 208f\n"
- "ld1 { v9.s }[2], [x9], #0x4\n"
- "ld1 { v13.s }[2], [x25], #0x4\n"
- "mov x20, #0x1c\n"
- "ld1 { v17.s }[2], [x24], #0x4\n"
- "ld1 { v21.s }[2], [x23], #0x4\n"
- "ld1 { v25.s }[2], [x22], #0x4\n"
+ "ld1 { v9.s }[2], [x28], #0x4\n"
+ "mov x19, #0x1c\n"
+ "ld1 { v13.s }[2], [x24], #0x4\n"
+ "ld1 { v17.s }[2], [x23], #0x4\n"
+ "ld1 { v21.s }[2], [x22], #0x4\n"
+ "ld1 { v25.s }[2], [x21], #0x4\n"
"tbz x11, #0, 215f\n"
- "ld1 { v9.h }[6], [x9]\n"
- "ld1 { v13.h }[6], [x25]\n"
- "ld1 { v17.h }[6], [x24]\n"
- "ld1 { v21.h }[6], [x23]\n"
- "ld1 { v25.h }[6], [x22]\n"
+ "ld1 { v9.h }[6], [x28]\n"
+ "ld1 { v13.h }[6], [x24]\n"
+ "ld1 { v17.h }[6], [x23]\n"
+ "ld1 { v21.h }[6], [x22]\n"
+ "ld1 { v25.h }[6], [x21]\n"
"b 215f\n"
"208:" // Height 5: Partial accumulate: partial_1_12
- "mov x20, #0x18\n"
+ "mov x19, #0x18\n"
"tbz x11, #0, 215f\n"
- "ld1 { v9.h }[4], [x9]\n"
- "ld1 { v13.h }[4], [x25]\n"
- "ld1 { v17.h }[4], [x24]\n"
- "ld1 { v21.h }[4], [x23]\n"
- "ld1 { v25.h }[4], [x22]\n"
+ "ld1 { v9.h }[4], [x28]\n"
+ "ld1 { v13.h }[4], [x24]\n"
+ "ld1 { v17.h }[4], [x23]\n"
+ "ld1 { v21.h }[4], [x22]\n"
+ "ld1 { v25.h }[4], [x21]\n"
"b 215f\n"
"209:" // Height 5: Partial accumulate: partial_2_8
"tbz x11, #1, 210f\n"
- "ldr s9, [x9], #0x4\n"
- "ldr s13, [x25], #0x4\n"
- "mov x20, #0x14\n"
- "ldr s17, [x24], #0x4\n"
- "ldr s21, [x23], #0x4\n"
- "ldr s25, [x22], #0x4\n"
+ "ldr s9, [x28], #0x4\n"
+ "ldr s13, [x24], #0x4\n"
+ "mov x19, #0x14\n"
+ "ldr s17, [x23], #0x4\n"
+ "ldr s21, [x22], #0x4\n"
+ "ldr s25, [x21], #0x4\n"
"tbz x11, #0, 215f\n"
- "ld1 { v9.h }[2], [x9]\n"
- "ld1 { v13.h }[2], [x25]\n"
- "ld1 { v17.h }[2], [x24]\n"
- "ld1 { v21.h }[2], [x23]\n"
- "ld1 { v25.h }[2], [x22]\n"
+ "ld1 { v9.h }[2], [x28]\n"
+ "ld1 { v13.h }[2], [x24]\n"
+ "ld1 { v17.h }[2], [x23]\n"
+ "ld1 { v21.h }[2], [x22]\n"
+ "ld1 { v25.h }[2], [x21]\n"
"b 215f\n"
"210:" // Height 5: Partial accumulate: partial_1_8
- "mov x20, #0x10\n"
+ "mov x19, #0x10\n"
"tbz x11, #0, 215f\n"
- "ldr h9, [x9, #0x0]\n"
- "ldr h13, [x25, #0x0]\n"
- "ldr h17, [x24, #0x0]\n"
- "ldr h21, [x23, #0x0]\n"
- "ldr h25, [x22, #0x0]\n"
+ "ldr h9, [x28, #0x0]\n"
+ "ldr h13, [x24, #0x0]\n"
+ "ldr h17, [x23, #0x0]\n"
+ "ldr h21, [x22, #0x0]\n"
+ "ldr h25, [x21, #0x0]\n"
"b 215f\n"
"211:" // Height 5: Partial accumulate: partial_4_0
"tbz x11, #2, 213f\n"
- "ldr d8, [x9], #0x8\n"
- "ldr d12, [x25], #0x8\n"
- "ldr d16, [x24], #0x8\n"
- "ldr d20, [x23], #0x8\n"
- "ldr d24, [x22], #0x8\n"
+ "ldr d8, [x28], #0x8\n"
+ "ldr d12, [x24], #0x8\n"
+ "ldr d16, [x23], #0x8\n"
+ "ldr d20, [x22], #0x8\n"
+ "ldr d24, [x21], #0x8\n"
"tbz x11, #1, 212f\n"
- "ld1 { v8.s }[2], [x9], #0x4\n"
- "ld1 { v12.s }[2], [x25], #0x4\n"
- "mov x20, #0xc\n"
- "ld1 { v16.s }[2], [x24], #0x4\n"
- "ld1 { v20.s }[2], [x23], #0x4\n"
- "ld1 { v24.s }[2], [x22], #0x4\n"
+ "ld1 { v8.s }[2], [x28], #0x4\n"
+ "mov x19, #0xc\n"
+ "ld1 { v12.s }[2], [x24], #0x4\n"
+ "ld1 { v16.s }[2], [x23], #0x4\n"
+ "ld1 { v20.s }[2], [x22], #0x4\n"
+ "ld1 { v24.s }[2], [x21], #0x4\n"
"tbz x11, #0, 215f\n"
- "ld1 { v8.h }[6], [x9]\n"
- "ld1 { v12.h }[6], [x25]\n"
- "ld1 { v16.h }[6], [x24]\n"
- "ld1 { v20.h }[6], [x23]\n"
- "ld1 { v24.h }[6], [x22]\n"
+ "ld1 { v8.h }[6], [x28]\n"
+ "ld1 { v12.h }[6], [x24]\n"
+ "ld1 { v16.h }[6], [x23]\n"
+ "ld1 { v20.h }[6], [x22]\n"
+ "ld1 { v24.h }[6], [x21]\n"
"b 215f\n"
"212:" // Height 5: Partial accumulate: partial_1_4
- "mov x20, #0x8\n"
+ "mov x19, #0x8\n"
"tbz x11, #0, 215f\n"
- "ld1 { v8.h }[4], [x9]\n"
- "ld1 { v12.h }[4], [x25]\n"
- "ld1 { v16.h }[4], [x24]\n"
- "ld1 { v20.h }[4], [x23]\n"
- "ld1 { v24.h }[4], [x22]\n"
+ "ld1 { v8.h }[4], [x28]\n"
+ "ld1 { v12.h }[4], [x24]\n"
+ "ld1 { v16.h }[4], [x23]\n"
+ "ld1 { v20.h }[4], [x22]\n"
+ "ld1 { v24.h }[4], [x21]\n"
"b 215f\n"
"213:" // Height 5: Partial accumulate: partial_2_0
"tbz x11, #1, 214f\n"
- "ldr s8, [x9], #0x4\n"
- "ldr s12, [x25], #0x4\n"
- "mov x20, #0x4\n"
- "ldr s16, [x24], #0x4\n"
- "ldr s20, [x23], #0x4\n"
- "ldr s24, [x22], #0x4\n"
+ "ldr s8, [x28], #0x4\n"
+ "ldr s12, [x24], #0x4\n"
+ "mov x19, #0x4\n"
+ "ldr s16, [x23], #0x4\n"
+ "ldr s20, [x22], #0x4\n"
+ "ldr s24, [x21], #0x4\n"
"tbz x11, #0, 215f\n"
- "ld1 { v8.h }[2], [x9]\n"
- "ld1 { v12.h }[2], [x25]\n"
- "ld1 { v16.h }[2], [x24]\n"
- "ld1 { v20.h }[2], [x23]\n"
- "ld1 { v24.h }[2], [x22]\n"
+ "ld1 { v8.h }[2], [x28]\n"
+ "ld1 { v12.h }[2], [x24]\n"
+ "ld1 { v16.h }[2], [x23]\n"
+ "ld1 { v20.h }[2], [x22]\n"
+ "ld1 { v24.h }[2], [x21]\n"
"b 215f\n"
"214:" // Height 5: Partial accumulate: partial_1_0
- "ldr h8, [x9, #0x0]\n"
- "ldr h12, [x25, #0x0]\n"
- "mov x20, #0x0\n"
- "ldr h16, [x24, #0x0]\n"
- "ldr h20, [x23, #0x0]\n"
- "ldr h24, [x22, #0x0]\n"
+ "ldr h8, [x28, #0x0]\n"
+ "mov x19, #0x0\n"
+ "ldr h12, [x24, #0x0]\n"
+ "ldr h16, [x23, #0x0]\n"
+ "ldr h20, [x22, #0x0]\n"
+ "ldr h24, [x21, #0x0]\n"
"215:" // Height 5: Partial accumulate: Done
- "sub x9, x9, x20\n"
+ "sub x28, x28, x19\n"
"b 218f\n"
"216:" // Height 5: full accumulate
- "ldr q8, [x9, #0x0]\n"
- "ldr q9, [x9, #0x10]\n"
- "ldr q10, [x9, #0x20]\n"
- "ldr q11, [x9, #0x30]\n"
- "ldr q12, [x25, #0x0]\n"
- "ldr q13, [x25, #0x10]\n"
- "ldr q14, [x25, #0x20]\n"
- "ldr q15, [x25, #0x30]\n"
- "ldr q16, [x24, #0x0]\n"
- "ldr q17, [x24, #0x10]\n"
- "ldr q18, [x24, #0x20]\n"
- "ldr q19, [x24, #0x30]\n"
- "ldr q20, [x23, #0x0]\n"
- "ldr q21, [x23, #0x10]\n"
- "ldr q22, [x23, #0x20]\n"
- "ldr q23, [x23, #0x30]\n"
- "ldr q24, [x22, #0x0]\n"
- "ldr q25, [x22, #0x10]\n"
- "ldr q26, [x22, #0x20]\n"
- "ldr q27, [x22, #0x30]\n"
+ "ldr q8, [x28, #0x0]\n"
+ "ldr q9, [x28, #0x10]\n"
+ "ldr q10, [x28, #0x20]\n"
+ "ldr q11, [x28, #0x30]\n"
+ "ldr q12, [x24, #0x0]\n"
+ "ldr q13, [x24, #0x10]\n"
+ "ldr q14, [x24, #0x20]\n"
+ "ldr q15, [x24, #0x30]\n"
+ "ldr q16, [x23, #0x0]\n"
+ "ldr q17, [x23, #0x10]\n"
+ "ldr q18, [x23, #0x20]\n"
+ "ldr q19, [x23, #0x30]\n"
+ "ldr q20, [x22, #0x0]\n"
+ "ldr q21, [x22, #0x10]\n"
+ "ldr q22, [x22, #0x20]\n"
+ "ldr q23, [x22, #0x30]\n"
+ "ldr q24, [x21, #0x0]\n"
+ "ldr q25, [x21, #0x10]\n"
+ "ldr q26, [x21, #0x20]\n"
+ "ldr q27, [x21, #0x30]\n"
"b 218f\n"
"217:" // Height 5: no accumulate
"movi v8.16b, #0x0\n"
@@ -3213,74 +3213,74 @@ void a64_hybrid_fp16_mla_6x32 (
"movi v26.16b, #0x0\n"
"movi v27.16b, #0x0\n"
"218:" // Height 5: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"219:" // Height 5: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 220f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "ldr x22, [x21, #0x20]\n"
- "cbnz x28, 221f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #1\n"
- "add x25, x25, x20, LSL #1\n"
- "add x24, x24, x20, LSL #1\n"
- "add x23, x23, x20, LSL #1\n"
- "add x22, x22, x20, LSL #1\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "ldr x21, [x20, #0x20]\n"
+ "cbnz x27, 221f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
+ "add x24, x24, x19, LSL #1\n"
+ "add x23, x23, x19, LSL #1\n"
+ "add x22, x22, x19, LSL #1\n"
+ "add x21, x21, x19, LSL #1\n"
"b 221f\n"
"220:" // Height 5: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
- "add x23, x24, x20, LSL #1\n"
- "add x22, x23, x20, LSL #1\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "add x22, x23, x19, LSL #1\n"
+ "add x21, x22, x19, LSL #1\n"
"221:" // Height 5: input setup done
- "cmp x27, #0x8\n"
+ "cmp x26, #0x8\n"
"blt 224f\n"
- "ldr q0, [x26, #0x0]\n"
- "ldr q1, [x25, #0x0]\n"
- "cmp x27, #0x10\n"
- "ldr q2, [x24, #0x0]\n"
- "ldr q3, [x23, #0x0]\n"
- "ldr q4, [x22, #0x0]\n"
+ "ldr q0, [x25, #0x0]\n"
+ "ldr q1, [x24, #0x0]\n"
+ "cmp x26, #0x10\n"
+ "ldr q2, [x23, #0x0]\n"
+ "ldr q3, [x22, #0x0]\n"
+ "ldr q4, [x21, #0x0]\n"
"ldr q6, [x10, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
"blt 223f\n"
"222:" // Height 5: Multiply loop: Main loop head
"fmla v8.8h, v6.8h, v0.h[0]\n"
+ "ldr q7, [x10, #0x10]\n"
+ "add x25, x25, #0x10\n"
"fmla v12.8h, v6.8h, v1.h[0]\n"
- "sub x27, x27, #0x8\n"
- "add x26, x26, #0x10\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "add x24, x24, #0x10\n"
"fmla v16.8h, v6.8h, v2.h[0]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "add x23, x23, #0x10\n"
"fmla v20.8h, v6.8h, v3.h[0]\n"
- "add x25, x25, #0x10\n"
- "add x24, x24, #0x10\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
+ "add x22, x22, #0x10\n"
"fmla v24.8h, v6.8h, v4.h[0]\n"
"ldr q6, [x10, #0x20]\n"
+ "add x21, x21, #0x10\n"
"fmla v9.8h, v7.8h, v0.h[0]\n"
- "add x23, x23, #0x10\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
+ "sub x26, x26, #0x8\n"
"fmla v13.8h, v7.8h, v1.h[0]\n"
+ "prfm pldl1keep, [x21, #0x80]\n"
+ "cmp x26, #0x10\n"
"fmla v17.8h, v7.8h, v2.h[0]\n"
- "add x22, x22, #0x10\n"
- "cmp x27, #0x10\n"
"fmla v21.8h, v7.8h, v3.h[0]\n"
"fmla v25.8h, v7.8h, v4.h[0]\n"
"ldr q7, [x10, #0x30]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
"fmla v10.8h, v6.8h, v0.h[0]\n"
"fmla v14.8h, v6.8h, v1.h[0]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
"fmla v18.8h, v6.8h, v2.h[0]\n"
"fmla v22.8h, v6.8h, v3.h[0]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
- "prfm pldl1keep, [x22, #0x80]\n"
"fmla v26.8h, v6.8h, v4.h[0]\n"
"ldr q6, [x10, #0x40]\n"
"fmla v11.8h, v7.8h, v0.h[0]\n"
@@ -3453,45 +3453,45 @@ void a64_hybrid_fp16_mla_6x32 (
"fmla v26.8h, v6.8h, v4.h[7]\n"
"ldr q6, [x10, #0x0]\n"
"fmla v11.8h, v7.8h, v0.h[7]\n"
- "ldr q0, [x26, #0x0]\n"
+ "ldr q0, [x25, #0x0]\n"
"fmla v15.8h, v7.8h, v1.h[7]\n"
- "ldr q1, [x25, #0x0]\n"
+ "ldr q1, [x24, #0x0]\n"
"fmla v19.8h, v7.8h, v2.h[7]\n"
- "ldr q2, [x24, #0x0]\n"
+ "ldr q2, [x23, #0x0]\n"
"fmla v23.8h, v7.8h, v3.h[7]\n"
- "ldr q3, [x23, #0x0]\n"
+ "ldr q3, [x22, #0x0]\n"
"fmla v27.8h, v7.8h, v4.h[7]\n"
- "ldr q4, [x22, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
+ "ldr q4, [x21, #0x0]\n"
"bge 222b\n"
"223:" // Height 5: Multiply loop: Single iteration only
"fmla v8.8h, v6.8h, v0.h[0]\n"
+ "ldr q7, [x10, #0x10]\n"
+ "sub x26, x26, #0x8\n"
"fmla v12.8h, v6.8h, v1.h[0]\n"
- "add x26, x26, #0x10\n"
"add x25, x25, #0x10\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
"fmla v16.8h, v6.8h, v2.h[0]\n"
- "fmla v20.8h, v6.8h, v3.h[0]\n"
"add x24, x24, #0x10\n"
+ "fmla v20.8h, v6.8h, v3.h[0]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
"add x23, x23, #0x10\n"
"fmla v24.8h, v6.8h, v4.h[0]\n"
- "ldr q6, [x10, #0x20]\n"
- "fmla v9.8h, v7.8h, v0.h[0]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
"add x22, x22, #0x10\n"
+ "fmla v9.8h, v7.8h, v0.h[0]\n"
+ "ldr q6, [x10, #0x20]\n"
+ "add x21, x21, #0x10\n"
"fmla v13.8h, v7.8h, v1.h[0]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
"fmla v17.8h, v7.8h, v2.h[0]\n"
- "sub x27, x27, #0x8\n"
- "prfm pldl1keep, [x26, #0x80]\n"
+ "prfm pldl1keep, [x21, #0x80]\n"
"fmla v21.8h, v7.8h, v3.h[0]\n"
"fmla v25.8h, v7.8h, v4.h[0]\n"
"ldr q7, [x10, #0x30]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
"fmla v10.8h, v6.8h, v0.h[0]\n"
"fmla v14.8h, v6.8h, v1.h[0]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
"fmla v18.8h, v6.8h, v2.h[0]\n"
"fmla v22.8h, v6.8h, v3.h[0]\n"
- "prfm pldl1keep, [x22, #0x80]\n"
"fmla v26.8h, v6.8h, v4.h[0]\n"
"ldr q6, [x10, #0x40]\n"
"fmla v11.8h, v7.8h, v0.h[0]\n"
@@ -3668,18 +3668,18 @@ void a64_hybrid_fp16_mla_6x32 (
"fmla v23.8h, v7.8h, v3.h[7]\n"
"fmla v27.8h, v7.8h, v4.h[7]\n"
"224:" // Height 5: Multiply loop: Main loop skip
- "cbz x27, 226f\n"
+ "cbz x26, 226f\n"
"225:" // Height 5: Multiply loop: Odd block loop
- "ldr h0, [x26], #0x2\n"
- "ldr h1, [x25], #0x2\n"
- "sub x27, x27, #0x1\n"
- "ldr h2, [x24], #0x2\n"
- "ldr h3, [x23], #0x2\n"
- "ldr h4, [x22], #0x2\n"
+ "ldr h0, [x25], #0x2\n"
+ "sub x26, x26, #0x1\n"
+ "ldr h1, [x24], #0x2\n"
+ "ldr h2, [x23], #0x2\n"
+ "ldr h3, [x22], #0x2\n"
+ "ldr h4, [x21], #0x2\n"
"ldr q6, [x10, #0x0]\n"
"fmla v8.8h, v6.8h, v0.h[0]\n"
- "fmla v12.8h, v6.8h, v1.h[0]\n"
"ldr q7, [x10, #0x10]\n"
+ "fmla v12.8h, v6.8h, v1.h[0]\n"
"fmla v16.8h, v6.8h, v2.h[0]\n"
"fmla v20.8h, v6.8h, v3.h[0]\n"
"fmla v24.8h, v6.8h, v4.h[0]\n"
@@ -3701,660 +3701,660 @@ void a64_hybrid_fp16_mla_6x32 (
"fmla v19.8h, v7.8h, v2.h[0]\n"
"fmla v23.8h, v7.8h, v3.h[0]\n"
"fmla v27.8h, v7.8h, v4.h[0]\n"
- "cbnz x27, 225b\n"
+ "cbnz x26, 225b\n"
"226:" // Height 5: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 219b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
- "prfm pstl1keep, [x9, #0x0]\n"
- "add x23, x24, x20, LSL #1\n"
- "add x22, x23, x20, LSL #1\n"
- "prfm pstl1keep, [x25, #0x0]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "prfm pstl1keep, [x28, #0x0]\n"
+ "add x24, x28, x19, LSL #1\n"
"prfm pstl1keep, [x24, #0x0]\n"
+ "add x23, x24, x19, LSL #1\n"
"prfm pstl1keep, [x23, #0x0]\n"
+ "add x22, x23, x19, LSL #1\n"
"prfm pstl1keep, [x22, #0x0]\n"
+ "add x21, x22, x19, LSL #1\n"
+ "prfm pstl1keep, [x21, #0x0]\n"
"tbz %x[flags], #1, 227f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v1.8h }, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1r { v0.8h }, [x20]\n"
- "fmin v8.8h, v8.8h, v1.8h\n"
- "fmin v9.8h, v9.8h, v1.8h\n"
- "fmin v10.8h, v10.8h, v1.8h\n"
- "fmin v11.8h, v11.8h, v1.8h\n"
- "fmin v12.8h, v12.8h, v1.8h\n"
- "fmin v13.8h, v13.8h, v1.8h\n"
- "fmin v14.8h, v14.8h, v1.8h\n"
- "fmin v15.8h, v15.8h, v1.8h\n"
- "fmin v16.8h, v16.8h, v1.8h\n"
- "fmin v17.8h, v17.8h, v1.8h\n"
- "fmin v18.8h, v18.8h, v1.8h\n"
- "fmin v19.8h, v19.8h, v1.8h\n"
- "fmin v20.8h, v20.8h, v1.8h\n"
- "fmin v21.8h, v21.8h, v1.8h\n"
- "fmin v22.8h, v22.8h, v1.8h\n"
- "fmin v23.8h, v23.8h, v1.8h\n"
- "fmin v24.8h, v24.8h, v1.8h\n"
- "fmin v25.8h, v25.8h, v1.8h\n"
- "fmin v26.8h, v26.8h, v1.8h\n"
- "fmin v27.8h, v27.8h, v1.8h\n"
- "fmax v8.8h, v8.8h, v0.8h\n"
- "fmax v9.8h, v9.8h, v0.8h\n"
- "fmax v10.8h, v10.8h, v0.8h\n"
- "fmax v11.8h, v11.8h, v0.8h\n"
- "fmax v12.8h, v12.8h, v0.8h\n"
- "fmax v13.8h, v13.8h, v0.8h\n"
- "fmax v14.8h, v14.8h, v0.8h\n"
- "fmax v15.8h, v15.8h, v0.8h\n"
- "fmax v16.8h, v16.8h, v0.8h\n"
- "fmax v17.8h, v17.8h, v0.8h\n"
- "fmax v18.8h, v18.8h, v0.8h\n"
- "fmax v19.8h, v19.8h, v0.8h\n"
- "fmax v20.8h, v20.8h, v0.8h\n"
- "fmax v21.8h, v21.8h, v0.8h\n"
- "fmax v22.8h, v22.8h, v0.8h\n"
- "fmax v23.8h, v23.8h, v0.8h\n"
- "fmax v24.8h, v24.8h, v0.8h\n"
- "fmax v25.8h, v25.8h, v0.8h\n"
- "fmax v26.8h, v26.8h, v0.8h\n"
- "fmax v27.8h, v27.8h, v0.8h\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v1.8h }, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v0.8h }, [x19]\n"
+ "fmin v8.8h, v8.8h, v0.8h\n"
+ "fmin v9.8h, v9.8h, v0.8h\n"
+ "fmin v10.8h, v10.8h, v0.8h\n"
+ "fmin v11.8h, v11.8h, v0.8h\n"
+ "fmax v8.8h, v8.8h, v1.8h\n"
+ "fmax v9.8h, v9.8h, v1.8h\n"
+ "fmax v10.8h, v10.8h, v1.8h\n"
+ "fmax v11.8h, v11.8h, v1.8h\n"
+ "fmin v12.8h, v12.8h, v0.8h\n"
+ "fmin v13.8h, v13.8h, v0.8h\n"
+ "fmin v14.8h, v14.8h, v0.8h\n"
+ "fmax v12.8h, v12.8h, v1.8h\n"
+ "fmax v13.8h, v13.8h, v1.8h\n"
+ "fmax v14.8h, v14.8h, v1.8h\n"
+ "fmin v15.8h, v15.8h, v0.8h\n"
+ "fmin v16.8h, v16.8h, v0.8h\n"
+ "fmin v17.8h, v17.8h, v0.8h\n"
+ "fmax v15.8h, v15.8h, v1.8h\n"
+ "fmax v16.8h, v16.8h, v1.8h\n"
+ "fmax v17.8h, v17.8h, v1.8h\n"
+ "fmin v18.8h, v18.8h, v0.8h\n"
+ "fmin v19.8h, v19.8h, v0.8h\n"
+ "fmin v20.8h, v20.8h, v0.8h\n"
+ "fmax v18.8h, v18.8h, v1.8h\n"
+ "fmax v19.8h, v19.8h, v1.8h\n"
+ "fmax v20.8h, v20.8h, v1.8h\n"
+ "fmin v21.8h, v21.8h, v0.8h\n"
+ "fmin v22.8h, v22.8h, v0.8h\n"
+ "fmin v23.8h, v23.8h, v0.8h\n"
+ "fmax v21.8h, v21.8h, v1.8h\n"
+ "fmax v22.8h, v22.8h, v1.8h\n"
+ "fmax v23.8h, v23.8h, v1.8h\n"
+ "fmin v24.8h, v24.8h, v0.8h\n"
+ "fmin v25.8h, v25.8h, v0.8h\n"
+ "fmin v26.8h, v26.8h, v0.8h\n"
+ "fmax v24.8h, v24.8h, v1.8h\n"
+ "fmax v25.8h, v25.8h, v1.8h\n"
+ "fmax v26.8h, v26.8h, v1.8h\n"
+ "fmin v27.8h, v27.8h, v0.8h\n"
+ "fmax v27.8h, v27.8h, v1.8h\n"
"227:" // Height 5: No activation
"cmp x11, #0x20\n"
"bge 244f\n"
"tbz x11, #4, 235f\n"
- "st1 { v8.8h }, [x9], #0x10\n"
- "st1 { v9.8h }, [x9], #0x10\n"
- "st1 { v12.8h }, [x25], #0x10\n"
- "st1 { v13.8h }, [x25], #0x10\n"
- "st1 { v16.8h }, [x24], #0x10\n"
- "st1 { v17.8h }, [x24], #0x10\n"
- "st1 { v20.8h }, [x23], #0x10\n"
- "st1 { v21.8h }, [x23], #0x10\n"
- "st1 { v24.8h }, [x22], #0x10\n"
- "st1 { v25.8h }, [x22], #0x10\n"
+ "st1 { v8.8h }, [x28], #0x10\n"
+ "st1 { v9.8h }, [x28], #0x10\n"
+ "st1 { v12.8h }, [x24], #0x10\n"
+ "st1 { v13.8h }, [x24], #0x10\n"
+ "st1 { v16.8h }, [x23], #0x10\n"
+ "st1 { v17.8h }, [x23], #0x10\n"
+ "st1 { v20.8h }, [x22], #0x10\n"
+ "st1 { v21.8h }, [x22], #0x10\n"
+ "st1 { v24.8h }, [x21], #0x10\n"
+ "st1 { v25.8h }, [x21], #0x10\n"
"tbz x11, #3, 231f\n"
- "st1 { v10.8h }, [x9], #0x10\n"
- "st1 { v14.8h }, [x25], #0x10\n"
- "st1 { v18.8h }, [x24], #0x10\n"
- "st1 { v22.8h }, [x23], #0x10\n"
- "st1 { v26.8h }, [x22], #0x10\n"
+ "st1 { v10.8h }, [x28], #0x10\n"
+ "st1 { v14.8h }, [x24], #0x10\n"
+ "st1 { v18.8h }, [x23], #0x10\n"
+ "st1 { v22.8h }, [x22], #0x10\n"
+ "st1 { v26.8h }, [x21], #0x10\n"
"tbz x11, #2, 229f\n"
- "str d11, [x9], #0x8\n"
- "str d15, [x25], #0x8\n"
- "str d19, [x24], #0x8\n"
- "str d23, [x23], #0x8\n"
- "str d27, [x22], #0x8\n"
+ "str d11, [x28], #0x8\n"
+ "str d15, [x24], #0x8\n"
+ "str d19, [x23], #0x8\n"
+ "str d23, [x22], #0x8\n"
+ "str d27, [x21], #0x8\n"
"tbz x11, #1, 228f\n"
- "st1 { v11.s }[2], [x9], #0x4\n"
- "st1 { v15.s }[2], [x25], #0x4\n"
- "st1 { v19.s }[2], [x24], #0x4\n"
- "st1 { v23.s }[2], [x23], #0x4\n"
- "st1 { v27.s }[2], [x22], #0x4\n"
+ "st1 { v11.s }[2], [x28], #0x4\n"
+ "st1 { v15.s }[2], [x24], #0x4\n"
+ "st1 { v19.s }[2], [x23], #0x4\n"
+ "st1 { v23.s }[2], [x22], #0x4\n"
+ "st1 { v27.s }[2], [x21], #0x4\n"
"tbz x11, #0, 243f\n"
- "st1 { v11.h }[6], [x9]\n"
- "st1 { v15.h }[6], [x25]\n"
- "st1 { v19.h }[6], [x24]\n"
- "st1 { v23.h }[6], [x23]\n"
- "st1 { v27.h }[6], [x22]\n"
+ "st1 { v11.h }[6], [x28]\n"
+ "st1 { v15.h }[6], [x24]\n"
+ "st1 { v19.h }[6], [x23]\n"
+ "st1 { v23.h }[6], [x22]\n"
+ "st1 { v27.h }[6], [x21]\n"
"b 243f\n"
"228:" // Height 5: Partial direct writeback: partial_1_28
"tbz x11, #0, 243f\n"
- "st1 { v11.h }[4], [x9]\n"
- "st1 { v15.h }[4], [x25]\n"
- "st1 { v19.h }[4], [x24]\n"
- "st1 { v23.h }[4], [x23]\n"
- "st1 { v27.h }[4], [x22]\n"
+ "st1 { v11.h }[4], [x28]\n"
+ "st1 { v15.h }[4], [x24]\n"
+ "st1 { v19.h }[4], [x23]\n"
+ "st1 { v23.h }[4], [x22]\n"
+ "st1 { v27.h }[4], [x21]\n"
"b 243f\n"
"229:" // Height 5: Partial direct writeback: partial_2_24
"tbz x11, #1, 230f\n"
- "str s11, [x9], #0x4\n"
- "str s15, [x25], #0x4\n"
- "str s19, [x24], #0x4\n"
- "str s23, [x23], #0x4\n"
- "str s27, [x22], #0x4\n"
+ "str s11, [x28], #0x4\n"
+ "str s15, [x24], #0x4\n"
+ "str s19, [x23], #0x4\n"
+ "str s23, [x22], #0x4\n"
+ "str s27, [x21], #0x4\n"
"tbz x11, #0, 243f\n"
- "st1 { v11.h }[2], [x9]\n"
- "st1 { v15.h }[2], [x25]\n"
- "st1 { v19.h }[2], [x24]\n"
- "st1 { v23.h }[2], [x23]\n"
- "st1 { v27.h }[2], [x22]\n"
+ "st1 { v11.h }[2], [x28]\n"
+ "st1 { v15.h }[2], [x24]\n"
+ "st1 { v19.h }[2], [x23]\n"
+ "st1 { v23.h }[2], [x22]\n"
+ "st1 { v27.h }[2], [x21]\n"
"b 243f\n"
"230:" // Height 5: Partial direct writeback: partial_1_24
"tbz x11, #0, 243f\n"
- "str h11, [x9, #0x0]\n"
- "str h15, [x25, #0x0]\n"
- "str h19, [x24, #0x0]\n"
- "str h23, [x23, #0x0]\n"
- "str h27, [x22, #0x0]\n"
+ "str h11, [x28, #0x0]\n"
+ "str h15, [x24, #0x0]\n"
+ "str h19, [x23, #0x0]\n"
+ "str h23, [x22, #0x0]\n"
+ "str h27, [x21, #0x0]\n"
"b 243f\n"
"231:" // Height 5: Partial direct writeback: partial_4_16
"tbz x11, #2, 233f\n"
- "str d10, [x9], #0x8\n"
- "str d14, [x25], #0x8\n"
- "str d18, [x24], #0x8\n"
- "str d22, [x23], #0x8\n"
- "str d26, [x22], #0x8\n"
+ "str d10, [x28], #0x8\n"
+ "str d14, [x24], #0x8\n"
+ "str d18, [x23], #0x8\n"
+ "str d22, [x22], #0x8\n"
+ "str d26, [x21], #0x8\n"
"tbz x11, #1, 232f\n"
- "st1 { v10.s }[2], [x9], #0x4\n"
- "st1 { v14.s }[2], [x25], #0x4\n"
- "st1 { v18.s }[2], [x24], #0x4\n"
- "st1 { v22.s }[2], [x23], #0x4\n"
- "st1 { v26.s }[2], [x22], #0x4\n"
+ "st1 { v10.s }[2], [x28], #0x4\n"
+ "st1 { v14.s }[2], [x24], #0x4\n"
+ "st1 { v18.s }[2], [x23], #0x4\n"
+ "st1 { v22.s }[2], [x22], #0x4\n"
+ "st1 { v26.s }[2], [x21], #0x4\n"
"tbz x11, #0, 243f\n"
- "st1 { v10.h }[6], [x9]\n"
- "st1 { v14.h }[6], [x25]\n"
- "st1 { v18.h }[6], [x24]\n"
- "st1 { v22.h }[6], [x23]\n"
- "st1 { v26.h }[6], [x22]\n"
+ "st1 { v10.h }[6], [x28]\n"
+ "st1 { v14.h }[6], [x24]\n"
+ "st1 { v18.h }[6], [x23]\n"
+ "st1 { v22.h }[6], [x22]\n"
+ "st1 { v26.h }[6], [x21]\n"
"b 243f\n"
"232:" // Height 5: Partial direct writeback: partial_1_20
"tbz x11, #0, 243f\n"
- "st1 { v10.h }[4], [x9]\n"
- "st1 { v14.h }[4], [x25]\n"
- "st1 { v18.h }[4], [x24]\n"
- "st1 { v22.h }[4], [x23]\n"
- "st1 { v26.h }[4], [x22]\n"
+ "st1 { v10.h }[4], [x28]\n"
+ "st1 { v14.h }[4], [x24]\n"
+ "st1 { v18.h }[4], [x23]\n"
+ "st1 { v22.h }[4], [x22]\n"
+ "st1 { v26.h }[4], [x21]\n"
"b 243f\n"
"233:" // Height 5: Partial direct writeback: partial_2_16
"tbz x11, #1, 234f\n"
- "str s10, [x9], #0x4\n"
- "str s14, [x25], #0x4\n"
- "str s18, [x24], #0x4\n"
- "str s22, [x23], #0x4\n"
- "str s26, [x22], #0x4\n"
+ "str s10, [x28], #0x4\n"
+ "str s14, [x24], #0x4\n"
+ "str s18, [x23], #0x4\n"
+ "str s22, [x22], #0x4\n"
+ "str s26, [x21], #0x4\n"
"tbz x11, #0, 243f\n"
- "st1 { v10.h }[2], [x9]\n"
- "st1 { v14.h }[2], [x25]\n"
- "st1 { v18.h }[2], [x24]\n"
- "st1 { v22.h }[2], [x23]\n"
- "st1 { v26.h }[2], [x22]\n"
+ "st1 { v10.h }[2], [x28]\n"
+ "st1 { v14.h }[2], [x24]\n"
+ "st1 { v18.h }[2], [x23]\n"
+ "st1 { v22.h }[2], [x22]\n"
+ "st1 { v26.h }[2], [x21]\n"
"b 243f\n"
"234:" // Height 5: Partial direct writeback: partial_1_16
"tbz x11, #0, 243f\n"
- "str h10, [x9, #0x0]\n"
- "str h14, [x25, #0x0]\n"
- "str h18, [x24, #0x0]\n"
- "str h22, [x23, #0x0]\n"
- "str h26, [x22, #0x0]\n"
+ "str h10, [x28, #0x0]\n"
+ "str h14, [x24, #0x0]\n"
+ "str h18, [x23, #0x0]\n"
+ "str h22, [x22, #0x0]\n"
+ "str h26, [x21, #0x0]\n"
"b 243f\n"
"235:" // Height 5: Partial direct writeback: partial_8_0
"tbz x11, #3, 239f\n"
- "st1 { v8.8h }, [x9], #0x10\n"
- "st1 { v12.8h }, [x25], #0x10\n"
- "st1 { v16.8h }, [x24], #0x10\n"
- "st1 { v20.8h }, [x23], #0x10\n"
- "st1 { v24.8h }, [x22], #0x10\n"
+ "st1 { v8.8h }, [x28], #0x10\n"
+ "st1 { v12.8h }, [x24], #0x10\n"
+ "st1 { v16.8h }, [x23], #0x10\n"
+ "st1 { v20.8h }, [x22], #0x10\n"
+ "st1 { v24.8h }, [x21], #0x10\n"
"tbz x11, #2, 237f\n"
- "str d9, [x9], #0x8\n"
- "str d13, [x25], #0x8\n"
- "str d17, [x24], #0x8\n"
- "str d21, [x23], #0x8\n"
- "str d25, [x22], #0x8\n"
+ "str d9, [x28], #0x8\n"
+ "str d13, [x24], #0x8\n"
+ "str d17, [x23], #0x8\n"
+ "str d21, [x22], #0x8\n"
+ "str d25, [x21], #0x8\n"
"tbz x11, #1, 236f\n"
- "st1 { v9.s }[2], [x9], #0x4\n"
- "st1 { v13.s }[2], [x25], #0x4\n"
- "st1 { v17.s }[2], [x24], #0x4\n"
- "st1 { v21.s }[2], [x23], #0x4\n"
- "st1 { v25.s }[2], [x22], #0x4\n"
+ "st1 { v9.s }[2], [x28], #0x4\n"
+ "st1 { v13.s }[2], [x24], #0x4\n"
+ "st1 { v17.s }[2], [x23], #0x4\n"
+ "st1 { v21.s }[2], [x22], #0x4\n"
+ "st1 { v25.s }[2], [x21], #0x4\n"
"tbz x11, #0, 243f\n"
- "st1 { v9.h }[6], [x9]\n"
- "st1 { v13.h }[6], [x25]\n"
- "st1 { v17.h }[6], [x24]\n"
- "st1 { v21.h }[6], [x23]\n"
- "st1 { v25.h }[6], [x22]\n"
+ "st1 { v9.h }[6], [x28]\n"
+ "st1 { v13.h }[6], [x24]\n"
+ "st1 { v17.h }[6], [x23]\n"
+ "st1 { v21.h }[6], [x22]\n"
+ "st1 { v25.h }[6], [x21]\n"
"b 243f\n"
"236:" // Height 5: Partial direct writeback: partial_1_12
"tbz x11, #0, 243f\n"
- "st1 { v9.h }[4], [x9]\n"
- "st1 { v13.h }[4], [x25]\n"
- "st1 { v17.h }[4], [x24]\n"
- "st1 { v21.h }[4], [x23]\n"
- "st1 { v25.h }[4], [x22]\n"
+ "st1 { v9.h }[4], [x28]\n"
+ "st1 { v13.h }[4], [x24]\n"
+ "st1 { v17.h }[4], [x23]\n"
+ "st1 { v21.h }[4], [x22]\n"
+ "st1 { v25.h }[4], [x21]\n"
"b 243f\n"
"237:" // Height 5: Partial direct writeback: partial_2_8
"tbz x11, #1, 238f\n"
- "str s9, [x9], #0x4\n"
- "str s13, [x25], #0x4\n"
- "str s17, [x24], #0x4\n"
- "str s21, [x23], #0x4\n"
- "str s25, [x22], #0x4\n"
+ "str s9, [x28], #0x4\n"
+ "str s13, [x24], #0x4\n"
+ "str s17, [x23], #0x4\n"
+ "str s21, [x22], #0x4\n"
+ "str s25, [x21], #0x4\n"
"tbz x11, #0, 243f\n"
- "st1 { v9.h }[2], [x9]\n"
- "st1 { v13.h }[2], [x25]\n"
- "st1 { v17.h }[2], [x24]\n"
- "st1 { v21.h }[2], [x23]\n"
- "st1 { v25.h }[2], [x22]\n"
+ "st1 { v9.h }[2], [x28]\n"
+ "st1 { v13.h }[2], [x24]\n"
+ "st1 { v17.h }[2], [x23]\n"
+ "st1 { v21.h }[2], [x22]\n"
+ "st1 { v25.h }[2], [x21]\n"
"b 243f\n"
"238:" // Height 5: Partial direct writeback: partial_1_8
"tbz x11, #0, 243f\n"
- "str h9, [x9, #0x0]\n"
- "str h13, [x25, #0x0]\n"
- "str h17, [x24, #0x0]\n"
- "str h21, [x23, #0x0]\n"
- "str h25, [x22, #0x0]\n"
+ "str h9, [x28, #0x0]\n"
+ "str h13, [x24, #0x0]\n"
+ "str h17, [x23, #0x0]\n"
+ "str h21, [x22, #0x0]\n"
+ "str h25, [x21, #0x0]\n"
"b 243f\n"
"239:" // Height 5: Partial direct writeback: partial_4_0
"tbz x11, #2, 241f\n"
- "str d8, [x9], #0x8\n"
- "str d12, [x25], #0x8\n"
- "str d16, [x24], #0x8\n"
- "str d20, [x23], #0x8\n"
- "str d24, [x22], #0x8\n"
+ "str d8, [x28], #0x8\n"
+ "str d12, [x24], #0x8\n"
+ "str d16, [x23], #0x8\n"
+ "str d20, [x22], #0x8\n"
+ "str d24, [x21], #0x8\n"
"tbz x11, #1, 240f\n"
- "st1 { v8.s }[2], [x9], #0x4\n"
- "st1 { v12.s }[2], [x25], #0x4\n"
- "st1 { v16.s }[2], [x24], #0x4\n"
- "st1 { v20.s }[2], [x23], #0x4\n"
- "st1 { v24.s }[2], [x22], #0x4\n"
+ "st1 { v8.s }[2], [x28], #0x4\n"
+ "st1 { v12.s }[2], [x24], #0x4\n"
+ "st1 { v16.s }[2], [x23], #0x4\n"
+ "st1 { v20.s }[2], [x22], #0x4\n"
+ "st1 { v24.s }[2], [x21], #0x4\n"
"tbz x11, #0, 243f\n"
- "st1 { v8.h }[6], [x9]\n"
- "st1 { v12.h }[6], [x25]\n"
- "st1 { v16.h }[6], [x24]\n"
- "st1 { v20.h }[6], [x23]\n"
- "st1 { v24.h }[6], [x22]\n"
+ "st1 { v8.h }[6], [x28]\n"
+ "st1 { v12.h }[6], [x24]\n"
+ "st1 { v16.h }[6], [x23]\n"
+ "st1 { v20.h }[6], [x22]\n"
+ "st1 { v24.h }[6], [x21]\n"
"b 243f\n"
"240:" // Height 5: Partial direct writeback: partial_1_4
"tbz x11, #0, 243f\n"
- "st1 { v8.h }[4], [x9]\n"
- "st1 { v12.h }[4], [x25]\n"
- "st1 { v16.h }[4], [x24]\n"
- "st1 { v20.h }[4], [x23]\n"
- "st1 { v24.h }[4], [x22]\n"
+ "st1 { v8.h }[4], [x28]\n"
+ "st1 { v12.h }[4], [x24]\n"
+ "st1 { v16.h }[4], [x23]\n"
+ "st1 { v20.h }[4], [x22]\n"
+ "st1 { v24.h }[4], [x21]\n"
"b 243f\n"
"241:" // Height 5: Partial direct writeback: partial_2_0
"tbz x11, #1, 242f\n"
- "str s8, [x9], #0x4\n"
- "str s12, [x25], #0x4\n"
- "str s16, [x24], #0x4\n"
- "str s20, [x23], #0x4\n"
- "str s24, [x22], #0x4\n"
+ "str s8, [x28], #0x4\n"
+ "str s12, [x24], #0x4\n"
+ "str s16, [x23], #0x4\n"
+ "str s20, [x22], #0x4\n"
+ "str s24, [x21], #0x4\n"
"tbz x11, #0, 243f\n"
- "st1 { v8.h }[2], [x9]\n"
- "st1 { v12.h }[2], [x25]\n"
- "st1 { v16.h }[2], [x24]\n"
- "st1 { v20.h }[2], [x23]\n"
- "st1 { v24.h }[2], [x22]\n"
+ "st1 { v8.h }[2], [x28]\n"
+ "st1 { v12.h }[2], [x24]\n"
+ "st1 { v16.h }[2], [x23]\n"
+ "st1 { v20.h }[2], [x22]\n"
+ "st1 { v24.h }[2], [x21]\n"
"b 243f\n"
"242:" // Height 5: Partial direct writeback: partial_1_0
- "str h8, [x9, #0x0]\n"
- "str h12, [x25, #0x0]\n"
- "str h16, [x24, #0x0]\n"
- "str h20, [x23, #0x0]\n"
- "str h24, [x22, #0x0]\n"
+ "str h8, [x28, #0x0]\n"
+ "str h12, [x24, #0x0]\n"
+ "str h16, [x23, #0x0]\n"
+ "str h20, [x22, #0x0]\n"
+ "str h24, [x21, #0x0]\n"
"243:" // Height 5: Partial direct writeback: Done
"b 245f\n"
"244:" // Height 5: Full writeback
- "str q8, [x9, #0x0]\n"
- "str q9, [x9, #0x10]\n"
- "str q10, [x9, #0x20]\n"
- "str q11, [x9, #0x30]\n"
- "add x9, x9, #0x40\n"
- "str q12, [x25, #0x0]\n"
- "str q13, [x25, #0x10]\n"
- "str q14, [x25, #0x20]\n"
- "str q15, [x25, #0x30]\n"
- "str q16, [x24, #0x0]\n"
- "str q17, [x24, #0x10]\n"
- "str q18, [x24, #0x20]\n"
- "str q19, [x24, #0x30]\n"
- "str q20, [x23, #0x0]\n"
- "str q21, [x23, #0x10]\n"
- "str q22, [x23, #0x20]\n"
- "str q23, [x23, #0x30]\n"
- "str q24, [x22, #0x0]\n"
- "str q25, [x22, #0x10]\n"
- "str q26, [x22, #0x20]\n"
- "str q27, [x22, #0x30]\n"
+ "str q8, [x28, #0x0]\n"
+ "str q9, [x28, #0x10]\n"
+ "str q10, [x28, #0x20]\n"
+ "str q11, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
+ "str q12, [x24, #0x0]\n"
+ "str q13, [x24, #0x10]\n"
+ "str q14, [x24, #0x20]\n"
+ "str q15, [x24, #0x30]\n"
+ "str q16, [x23, #0x0]\n"
+ "str q17, [x23, #0x10]\n"
+ "str q18, [x23, #0x20]\n"
+ "str q19, [x23, #0x30]\n"
+ "str q20, [x22, #0x0]\n"
+ "str q21, [x22, #0x10]\n"
+ "str q22, [x22, #0x20]\n"
+ "str q23, [x22, #0x30]\n"
+ "str q24, [x21, #0x0]\n"
+ "str q25, [x21, #0x10]\n"
+ "str q26, [x21, #0x20]\n"
+ "str q27, [x21, #0x30]\n"
"245:" // Height 5: Writeback done
"subs x11, x11, #0x20\n"
"bgt 198b\n"
"b 296f\n"
"246:" // Height 6
- "ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "mov x20, #0xc\n"
- "mov x12, %x[bias]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x9, %x[bias]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
+ "mov x28, %x[output_ptr]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "mov x19, #0xc\n"
+ "madd %x[output_ptr], x20, x19, %x[output_ptr]\n"
"247:" // Height 6: Column loop
- "cbz x12, 248f\n"
- "ldr q8, [x12, #0x0]\n"
- "ldr q9, [x12, #0x10]\n"
+ "cbz x9, 248f\n"
+ "ldr q8, [x9, #0x0]\n"
"mov v12.16b, v8.16b\n"
+ "ldr q9, [x9, #0x10]\n"
+ "mov v16.16b, v8.16b\n"
+ "ldr q10, [x9, #0x20]\n"
+ "mov v20.16b, v8.16b\n"
+ "ldr q11, [x9, #0x30]\n"
+ "add x9, x9, #0x40\n"
+ "mov v24.16b, v8.16b\n"
+ "mov v28.16b, v8.16b\n"
"mov v13.16b, v9.16b\n"
- "ldr q10, [x12, #0x20]\n"
- "ldr q11, [x12, #0x30]\n"
"mov v14.16b, v10.16b\n"
"mov v15.16b, v11.16b\n"
- "mov v16.16b, v8.16b\n"
"mov v17.16b, v9.16b\n"
- "add x12, x12, #0x40\n"
"mov v18.16b, v10.16b\n"
"mov v19.16b, v11.16b\n"
- "mov v20.16b, v8.16b\n"
"mov v21.16b, v9.16b\n"
"mov v22.16b, v10.16b\n"
"mov v23.16b, v11.16b\n"
- "mov v24.16b, v8.16b\n"
"mov v25.16b, v9.16b\n"
"mov v26.16b, v10.16b\n"
"mov v27.16b, v11.16b\n"
- "mov v28.16b, v8.16b\n"
"mov v29.16b, v9.16b\n"
"mov v30.16b, v10.16b\n"
"mov v31.16b, v11.16b\n"
"b 267f\n"
"248:" // Height 6: no bias
"tbz %x[flags], #0, 266f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
- "add x23, x24, x20, LSL #1\n"
- "add x22, x23, x20, LSL #1\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"cmp x11, #0x20\n"
- "add x21, x22, x20, LSL #1\n"
+ "add x24, x28, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "add x22, x23, x19, LSL #1\n"
+ "add x21, x22, x19, LSL #1\n"
+ "add x20, x21, x19, LSL #1\n"
"bge 265f\n"
"tbz x11, #4, 256f\n"
- "ld1 { v8.8h }, [x9], #0x10\n"
- "ld1 { v12.8h }, [x25], #0x10\n"
- "ld1 { v16.8h }, [x24], #0x10\n"
- "ld1 { v20.8h }, [x23], #0x10\n"
- "ld1 { v24.8h }, [x22], #0x10\n"
- "ld1 { v28.8h }, [x21], #0x10\n"
- "ld1 { v9.8h }, [x9], #0x10\n"
- "ld1 { v13.8h }, [x25], #0x10\n"
- "ld1 { v17.8h }, [x24], #0x10\n"
- "ld1 { v21.8h }, [x23], #0x10\n"
- "ld1 { v25.8h }, [x22], #0x10\n"
- "ld1 { v29.8h }, [x21], #0x10\n"
+ "ld1 { v8.8h }, [x28], #0x10\n"
+ "ld1 { v12.8h }, [x24], #0x10\n"
+ "ld1 { v16.8h }, [x23], #0x10\n"
+ "ld1 { v20.8h }, [x22], #0x10\n"
+ "ld1 { v24.8h }, [x21], #0x10\n"
+ "ld1 { v9.8h }, [x28], #0x10\n"
+ "ld1 { v13.8h }, [x24], #0x10\n"
+ "ld1 { v17.8h }, [x23], #0x10\n"
+ "ld1 { v21.8h }, [x22], #0x10\n"
+ "ld1 { v25.8h }, [x21], #0x10\n"
+ "ld1 { v28.8h }, [x20], #0x10\n"
+ "ld1 { v29.8h }, [x20], #0x10\n"
"tbz x11, #3, 252f\n"
- "ld1 { v10.8h }, [x9], #0x10\n"
- "ld1 { v14.8h }, [x25], #0x10\n"
- "ld1 { v18.8h }, [x24], #0x10\n"
- "ld1 { v22.8h }, [x23], #0x10\n"
- "ld1 { v26.8h }, [x22], #0x10\n"
- "ld1 { v30.8h }, [x21], #0x10\n"
+ "ld1 { v10.8h }, [x28], #0x10\n"
+ "ld1 { v14.8h }, [x24], #0x10\n"
+ "ld1 { v18.8h }, [x23], #0x10\n"
+ "ld1 { v22.8h }, [x22], #0x10\n"
+ "ld1 { v26.8h }, [x21], #0x10\n"
+ "ld1 { v30.8h }, [x20], #0x10\n"
"tbz x11, #2, 250f\n"
- "ldr d11, [x9], #0x8\n"
- "ldr d15, [x25], #0x8\n"
- "ldr d19, [x24], #0x8\n"
- "ldr d23, [x23], #0x8\n"
- "ldr d27, [x22], #0x8\n"
- "ldr d31, [x21], #0x8\n"
+ "ldr d11, [x28], #0x8\n"
+ "ldr d15, [x24], #0x8\n"
+ "ldr d19, [x23], #0x8\n"
+ "ldr d23, [x22], #0x8\n"
+ "ldr d27, [x21], #0x8\n"
+ "ldr d31, [x20], #0x8\n"
"tbz x11, #1, 249f\n"
- "ld1 { v11.s }[2], [x9], #0x4\n"
- "ld1 { v15.s }[2], [x25], #0x4\n"
- "mov x20, #0x3c\n"
- "ld1 { v19.s }[2], [x24], #0x4\n"
- "ld1 { v23.s }[2], [x23], #0x4\n"
- "ld1 { v27.s }[2], [x22], #0x4\n"
- "ld1 { v31.s }[2], [x21], #0x4\n"
+ "ld1 { v11.s }[2], [x28], #0x4\n"
+ "mov x19, #0x3c\n"
+ "ld1 { v15.s }[2], [x24], #0x4\n"
+ "ld1 { v19.s }[2], [x23], #0x4\n"
+ "ld1 { v23.s }[2], [x22], #0x4\n"
+ "ld1 { v27.s }[2], [x21], #0x4\n"
+ "ld1 { v31.s }[2], [x20], #0x4\n"
"tbz x11, #0, 264f\n"
- "ld1 { v11.h }[6], [x9]\n"
- "ld1 { v15.h }[6], [x25]\n"
- "ld1 { v19.h }[6], [x24]\n"
- "ld1 { v23.h }[6], [x23]\n"
- "ld1 { v27.h }[6], [x22]\n"
- "ld1 { v31.h }[6], [x21]\n"
+ "ld1 { v11.h }[6], [x28]\n"
+ "ld1 { v15.h }[6], [x24]\n"
+ "ld1 { v19.h }[6], [x23]\n"
+ "ld1 { v23.h }[6], [x22]\n"
+ "ld1 { v27.h }[6], [x21]\n"
+ "ld1 { v31.h }[6], [x20]\n"
"b 264f\n"
"249:" // Height 6: Partial accumulate: partial_1_28
- "mov x20, #0x38\n"
+ "mov x19, #0x38\n"
"tbz x11, #0, 264f\n"
- "ld1 { v11.h }[4], [x9]\n"
- "ld1 { v15.h }[4], [x25]\n"
- "ld1 { v19.h }[4], [x24]\n"
- "ld1 { v23.h }[4], [x23]\n"
- "ld1 { v27.h }[4], [x22]\n"
- "ld1 { v31.h }[4], [x21]\n"
+ "ld1 { v11.h }[4], [x28]\n"
+ "ld1 { v15.h }[4], [x24]\n"
+ "ld1 { v19.h }[4], [x23]\n"
+ "ld1 { v23.h }[4], [x22]\n"
+ "ld1 { v27.h }[4], [x21]\n"
+ "ld1 { v31.h }[4], [x20]\n"
"b 264f\n"
"250:" // Height 6: Partial accumulate: partial_2_24
"tbz x11, #1, 251f\n"
- "ldr s11, [x9], #0x4\n"
- "ldr s15, [x25], #0x4\n"
- "mov x20, #0x34\n"
- "ldr s19, [x24], #0x4\n"
- "ldr s23, [x23], #0x4\n"
- "ldr s27, [x22], #0x4\n"
- "ldr s31, [x21], #0x4\n"
+ "ldr s11, [x28], #0x4\n"
+ "ldr s15, [x24], #0x4\n"
+ "mov x19, #0x34\n"
+ "ldr s19, [x23], #0x4\n"
+ "ldr s23, [x22], #0x4\n"
+ "ldr s27, [x21], #0x4\n"
+ "ldr s31, [x20], #0x4\n"
"tbz x11, #0, 264f\n"
- "ld1 { v11.h }[2], [x9]\n"
- "ld1 { v15.h }[2], [x25]\n"
- "ld1 { v19.h }[2], [x24]\n"
- "ld1 { v23.h }[2], [x23]\n"
- "ld1 { v27.h }[2], [x22]\n"
- "ld1 { v31.h }[2], [x21]\n"
+ "ld1 { v11.h }[2], [x28]\n"
+ "ld1 { v15.h }[2], [x24]\n"
+ "ld1 { v19.h }[2], [x23]\n"
+ "ld1 { v23.h }[2], [x22]\n"
+ "ld1 { v27.h }[2], [x21]\n"
+ "ld1 { v31.h }[2], [x20]\n"
"b 264f\n"
"251:" // Height 6: Partial accumulate: partial_1_24
- "mov x20, #0x30\n"
+ "mov x19, #0x30\n"
"tbz x11, #0, 264f\n"
- "ldr h11, [x9, #0x0]\n"
- "ldr h15, [x25, #0x0]\n"
- "ldr h19, [x24, #0x0]\n"
- "ldr h23, [x23, #0x0]\n"
- "ldr h27, [x22, #0x0]\n"
- "ldr h31, [x21, #0x0]\n"
+ "ldr h11, [x28, #0x0]\n"
+ "ldr h15, [x24, #0x0]\n"
+ "ldr h19, [x23, #0x0]\n"
+ "ldr h23, [x22, #0x0]\n"
+ "ldr h27, [x21, #0x0]\n"
+ "ldr h31, [x20, #0x0]\n"
"b 264f\n"
"252:" // Height 6: Partial accumulate: partial_4_16
"tbz x11, #2, 254f\n"
- "ldr d10, [x9], #0x8\n"
- "ldr d14, [x25], #0x8\n"
- "ldr d18, [x24], #0x8\n"
- "ldr d22, [x23], #0x8\n"
- "ldr d26, [x22], #0x8\n"
- "ldr d30, [x21], #0x8\n"
+ "ldr d10, [x28], #0x8\n"
+ "ldr d14, [x24], #0x8\n"
+ "ldr d18, [x23], #0x8\n"
+ "ldr d22, [x22], #0x8\n"
+ "ldr d26, [x21], #0x8\n"
+ "ldr d30, [x20], #0x8\n"
"tbz x11, #1, 253f\n"
- "ld1 { v10.s }[2], [x9], #0x4\n"
- "ld1 { v14.s }[2], [x25], #0x4\n"
- "mov x20, #0x2c\n"
- "ld1 { v18.s }[2], [x24], #0x4\n"
- "ld1 { v22.s }[2], [x23], #0x4\n"
- "ld1 { v26.s }[2], [x22], #0x4\n"
- "ld1 { v30.s }[2], [x21], #0x4\n"
+ "ld1 { v10.s }[2], [x28], #0x4\n"
+ "mov x19, #0x2c\n"
+ "ld1 { v14.s }[2], [x24], #0x4\n"
+ "ld1 { v18.s }[2], [x23], #0x4\n"
+ "ld1 { v22.s }[2], [x22], #0x4\n"
+ "ld1 { v26.s }[2], [x21], #0x4\n"
+ "ld1 { v30.s }[2], [x20], #0x4\n"
"tbz x11, #0, 264f\n"
- "ld1 { v10.h }[6], [x9]\n"
- "ld1 { v14.h }[6], [x25]\n"
- "ld1 { v18.h }[6], [x24]\n"
- "ld1 { v22.h }[6], [x23]\n"
- "ld1 { v26.h }[6], [x22]\n"
- "ld1 { v30.h }[6], [x21]\n"
+ "ld1 { v10.h }[6], [x28]\n"
+ "ld1 { v14.h }[6], [x24]\n"
+ "ld1 { v18.h }[6], [x23]\n"
+ "ld1 { v22.h }[6], [x22]\n"
+ "ld1 { v26.h }[6], [x21]\n"
+ "ld1 { v30.h }[6], [x20]\n"
"b 264f\n"
"253:" // Height 6: Partial accumulate: partial_1_20
- "mov x20, #0x28\n"
+ "mov x19, #0x28\n"
"tbz x11, #0, 264f\n"
- "ld1 { v10.h }[4], [x9]\n"
- "ld1 { v14.h }[4], [x25]\n"
- "ld1 { v18.h }[4], [x24]\n"
- "ld1 { v22.h }[4], [x23]\n"
- "ld1 { v26.h }[4], [x22]\n"
- "ld1 { v30.h }[4], [x21]\n"
+ "ld1 { v10.h }[4], [x28]\n"
+ "ld1 { v14.h }[4], [x24]\n"
+ "ld1 { v18.h }[4], [x23]\n"
+ "ld1 { v22.h }[4], [x22]\n"
+ "ld1 { v26.h }[4], [x21]\n"
+ "ld1 { v30.h }[4], [x20]\n"
"b 264f\n"
"254:" // Height 6: Partial accumulate: partial_2_16
"tbz x11, #1, 255f\n"
- "ldr s10, [x9], #0x4\n"
- "ldr s14, [x25], #0x4\n"
- "mov x20, #0x24\n"
- "ldr s18, [x24], #0x4\n"
- "ldr s22, [x23], #0x4\n"
- "ldr s26, [x22], #0x4\n"
- "ldr s30, [x21], #0x4\n"
+ "ldr s10, [x28], #0x4\n"
+ "ldr s14, [x24], #0x4\n"
+ "mov x19, #0x24\n"
+ "ldr s18, [x23], #0x4\n"
+ "ldr s22, [x22], #0x4\n"
+ "ldr s26, [x21], #0x4\n"
+ "ldr s30, [x20], #0x4\n"
"tbz x11, #0, 264f\n"
- "ld1 { v10.h }[2], [x9]\n"
- "ld1 { v14.h }[2], [x25]\n"
- "ld1 { v18.h }[2], [x24]\n"
- "ld1 { v22.h }[2], [x23]\n"
- "ld1 { v26.h }[2], [x22]\n"
- "ld1 { v30.h }[2], [x21]\n"
+ "ld1 { v10.h }[2], [x28]\n"
+ "ld1 { v14.h }[2], [x24]\n"
+ "ld1 { v18.h }[2], [x23]\n"
+ "ld1 { v22.h }[2], [x22]\n"
+ "ld1 { v26.h }[2], [x21]\n"
+ "ld1 { v30.h }[2], [x20]\n"
"b 264f\n"
"255:" // Height 6: Partial accumulate: partial_1_16
- "mov x20, #0x20\n"
+ "mov x19, #0x20\n"
"tbz x11, #0, 264f\n"
- "ldr h10, [x9, #0x0]\n"
- "ldr h14, [x25, #0x0]\n"
- "ldr h18, [x24, #0x0]\n"
- "ldr h22, [x23, #0x0]\n"
- "ldr h26, [x22, #0x0]\n"
- "ldr h30, [x21, #0x0]\n"
+ "ldr h10, [x28, #0x0]\n"
+ "ldr h14, [x24, #0x0]\n"
+ "ldr h18, [x23, #0x0]\n"
+ "ldr h22, [x22, #0x0]\n"
+ "ldr h26, [x21, #0x0]\n"
+ "ldr h30, [x20, #0x0]\n"
"b 264f\n"
"256:" // Height 6: Partial accumulate: partial_8_0
"tbz x11, #3, 260f\n"
- "ld1 { v8.8h }, [x9], #0x10\n"
- "ld1 { v12.8h }, [x25], #0x10\n"
- "ld1 { v16.8h }, [x24], #0x10\n"
- "ld1 { v20.8h }, [x23], #0x10\n"
- "ld1 { v24.8h }, [x22], #0x10\n"
- "ld1 { v28.8h }, [x21], #0x10\n"
+ "ld1 { v8.8h }, [x28], #0x10\n"
+ "ld1 { v12.8h }, [x24], #0x10\n"
+ "ld1 { v16.8h }, [x23], #0x10\n"
+ "ld1 { v20.8h }, [x22], #0x10\n"
+ "ld1 { v24.8h }, [x21], #0x10\n"
+ "ld1 { v28.8h }, [x20], #0x10\n"
"tbz x11, #2, 258f\n"
- "ldr d9, [x9], #0x8\n"
- "ldr d13, [x25], #0x8\n"
- "ldr d17, [x24], #0x8\n"
- "ldr d21, [x23], #0x8\n"
- "ldr d25, [x22], #0x8\n"
- "ldr d29, [x21], #0x8\n"
+ "ldr d9, [x28], #0x8\n"
+ "ldr d13, [x24], #0x8\n"
+ "ldr d17, [x23], #0x8\n"
+ "ldr d21, [x22], #0x8\n"
+ "ldr d25, [x21], #0x8\n"
+ "ldr d29, [x20], #0x8\n"
"tbz x11, #1, 257f\n"
- "ld1 { v9.s }[2], [x9], #0x4\n"
- "ld1 { v13.s }[2], [x25], #0x4\n"
- "mov x20, #0x1c\n"
- "ld1 { v17.s }[2], [x24], #0x4\n"
- "ld1 { v21.s }[2], [x23], #0x4\n"
- "ld1 { v25.s }[2], [x22], #0x4\n"
- "ld1 { v29.s }[2], [x21], #0x4\n"
+ "ld1 { v9.s }[2], [x28], #0x4\n"
+ "mov x19, #0x1c\n"
+ "ld1 { v13.s }[2], [x24], #0x4\n"
+ "ld1 { v17.s }[2], [x23], #0x4\n"
+ "ld1 { v21.s }[2], [x22], #0x4\n"
+ "ld1 { v25.s }[2], [x21], #0x4\n"
+ "ld1 { v29.s }[2], [x20], #0x4\n"
"tbz x11, #0, 264f\n"
- "ld1 { v9.h }[6], [x9]\n"
- "ld1 { v13.h }[6], [x25]\n"
- "ld1 { v17.h }[6], [x24]\n"
- "ld1 { v21.h }[6], [x23]\n"
- "ld1 { v25.h }[6], [x22]\n"
- "ld1 { v29.h }[6], [x21]\n"
+ "ld1 { v9.h }[6], [x28]\n"
+ "ld1 { v13.h }[6], [x24]\n"
+ "ld1 { v17.h }[6], [x23]\n"
+ "ld1 { v21.h }[6], [x22]\n"
+ "ld1 { v25.h }[6], [x21]\n"
+ "ld1 { v29.h }[6], [x20]\n"
"b 264f\n"
"257:" // Height 6: Partial accumulate: partial_1_12
- "mov x20, #0x18\n"
+ "mov x19, #0x18\n"
"tbz x11, #0, 264f\n"
- "ld1 { v9.h }[4], [x9]\n"
- "ld1 { v13.h }[4], [x25]\n"
- "ld1 { v17.h }[4], [x24]\n"
- "ld1 { v21.h }[4], [x23]\n"
- "ld1 { v25.h }[4], [x22]\n"
- "ld1 { v29.h }[4], [x21]\n"
+ "ld1 { v9.h }[4], [x28]\n"
+ "ld1 { v13.h }[4], [x24]\n"
+ "ld1 { v17.h }[4], [x23]\n"
+ "ld1 { v21.h }[4], [x22]\n"
+ "ld1 { v25.h }[4], [x21]\n"
+ "ld1 { v29.h }[4], [x20]\n"
"b 264f\n"
"258:" // Height 6: Partial accumulate: partial_2_8
"tbz x11, #1, 259f\n"
- "ldr s9, [x9], #0x4\n"
- "ldr s13, [x25], #0x4\n"
- "mov x20, #0x14\n"
- "ldr s17, [x24], #0x4\n"
- "ldr s21, [x23], #0x4\n"
- "ldr s25, [x22], #0x4\n"
- "ldr s29, [x21], #0x4\n"
+ "ldr s9, [x28], #0x4\n"
+ "ldr s13, [x24], #0x4\n"
+ "mov x19, #0x14\n"
+ "ldr s17, [x23], #0x4\n"
+ "ldr s21, [x22], #0x4\n"
+ "ldr s25, [x21], #0x4\n"
+ "ldr s29, [x20], #0x4\n"
"tbz x11, #0, 264f\n"
- "ld1 { v9.h }[2], [x9]\n"
- "ld1 { v13.h }[2], [x25]\n"
- "ld1 { v17.h }[2], [x24]\n"
- "ld1 { v21.h }[2], [x23]\n"
- "ld1 { v25.h }[2], [x22]\n"
- "ld1 { v29.h }[2], [x21]\n"
+ "ld1 { v9.h }[2], [x28]\n"
+ "ld1 { v13.h }[2], [x24]\n"
+ "ld1 { v17.h }[2], [x23]\n"
+ "ld1 { v21.h }[2], [x22]\n"
+ "ld1 { v25.h }[2], [x21]\n"
+ "ld1 { v29.h }[2], [x20]\n"
"b 264f\n"
"259:" // Height 6: Partial accumulate: partial_1_8
- "mov x20, #0x10\n"
+ "mov x19, #0x10\n"
"tbz x11, #0, 264f\n"
- "ldr h9, [x9, #0x0]\n"
- "ldr h13, [x25, #0x0]\n"
- "ldr h17, [x24, #0x0]\n"
- "ldr h21, [x23, #0x0]\n"
- "ldr h25, [x22, #0x0]\n"
- "ldr h29, [x21, #0x0]\n"
+ "ldr h9, [x28, #0x0]\n"
+ "ldr h13, [x24, #0x0]\n"
+ "ldr h17, [x23, #0x0]\n"
+ "ldr h21, [x22, #0x0]\n"
+ "ldr h25, [x21, #0x0]\n"
+ "ldr h29, [x20, #0x0]\n"
"b 264f\n"
"260:" // Height 6: Partial accumulate: partial_4_0
"tbz x11, #2, 262f\n"
- "ldr d8, [x9], #0x8\n"
- "ldr d12, [x25], #0x8\n"
- "ldr d16, [x24], #0x8\n"
- "ldr d20, [x23], #0x8\n"
- "ldr d24, [x22], #0x8\n"
- "ldr d28, [x21], #0x8\n"
+ "ldr d8, [x28], #0x8\n"
+ "ldr d12, [x24], #0x8\n"
+ "ldr d16, [x23], #0x8\n"
+ "ldr d20, [x22], #0x8\n"
+ "ldr d24, [x21], #0x8\n"
+ "ldr d28, [x20], #0x8\n"
"tbz x11, #1, 261f\n"
- "ld1 { v8.s }[2], [x9], #0x4\n"
- "ld1 { v12.s }[2], [x25], #0x4\n"
- "mov x20, #0xc\n"
- "ld1 { v16.s }[2], [x24], #0x4\n"
- "ld1 { v20.s }[2], [x23], #0x4\n"
- "ld1 { v24.s }[2], [x22], #0x4\n"
- "ld1 { v28.s }[2], [x21], #0x4\n"
+ "ld1 { v8.s }[2], [x28], #0x4\n"
+ "mov x19, #0xc\n"
+ "ld1 { v12.s }[2], [x24], #0x4\n"
+ "ld1 { v16.s }[2], [x23], #0x4\n"
+ "ld1 { v20.s }[2], [x22], #0x4\n"
+ "ld1 { v24.s }[2], [x21], #0x4\n"
+ "ld1 { v28.s }[2], [x20], #0x4\n"
"tbz x11, #0, 264f\n"
- "ld1 { v8.h }[6], [x9]\n"
- "ld1 { v12.h }[6], [x25]\n"
- "ld1 { v16.h }[6], [x24]\n"
- "ld1 { v20.h }[6], [x23]\n"
- "ld1 { v24.h }[6], [x22]\n"
- "ld1 { v28.h }[6], [x21]\n"
+ "ld1 { v8.h }[6], [x28]\n"
+ "ld1 { v12.h }[6], [x24]\n"
+ "ld1 { v16.h }[6], [x23]\n"
+ "ld1 { v20.h }[6], [x22]\n"
+ "ld1 { v24.h }[6], [x21]\n"
+ "ld1 { v28.h }[6], [x20]\n"
"b 264f\n"
"261:" // Height 6: Partial accumulate: partial_1_4
- "mov x20, #0x8\n"
+ "mov x19, #0x8\n"
"tbz x11, #0, 264f\n"
- "ld1 { v8.h }[4], [x9]\n"
- "ld1 { v12.h }[4], [x25]\n"
- "ld1 { v16.h }[4], [x24]\n"
- "ld1 { v20.h }[4], [x23]\n"
- "ld1 { v24.h }[4], [x22]\n"
- "ld1 { v28.h }[4], [x21]\n"
+ "ld1 { v8.h }[4], [x28]\n"
+ "ld1 { v12.h }[4], [x24]\n"
+ "ld1 { v16.h }[4], [x23]\n"
+ "ld1 { v20.h }[4], [x22]\n"
+ "ld1 { v24.h }[4], [x21]\n"
+ "ld1 { v28.h }[4], [x20]\n"
"b 264f\n"
"262:" // Height 6: Partial accumulate: partial_2_0
"tbz x11, #1, 263f\n"
- "ldr s8, [x9], #0x4\n"
- "ldr s12, [x25], #0x4\n"
- "mov x20, #0x4\n"
- "ldr s16, [x24], #0x4\n"
- "ldr s20, [x23], #0x4\n"
- "ldr s24, [x22], #0x4\n"
- "ldr s28, [x21], #0x4\n"
+ "ldr s8, [x28], #0x4\n"
+ "ldr s12, [x24], #0x4\n"
+ "mov x19, #0x4\n"
+ "ldr s16, [x23], #0x4\n"
+ "ldr s20, [x22], #0x4\n"
+ "ldr s24, [x21], #0x4\n"
+ "ldr s28, [x20], #0x4\n"
"tbz x11, #0, 264f\n"
- "ld1 { v8.h }[2], [x9]\n"
- "ld1 { v12.h }[2], [x25]\n"
- "ld1 { v16.h }[2], [x24]\n"
- "ld1 { v20.h }[2], [x23]\n"
- "ld1 { v24.h }[2], [x22]\n"
- "ld1 { v28.h }[2], [x21]\n"
+ "ld1 { v8.h }[2], [x28]\n"
+ "ld1 { v12.h }[2], [x24]\n"
+ "ld1 { v16.h }[2], [x23]\n"
+ "ld1 { v20.h }[2], [x22]\n"
+ "ld1 { v24.h }[2], [x21]\n"
+ "ld1 { v28.h }[2], [x20]\n"
"b 264f\n"
"263:" // Height 6: Partial accumulate: partial_1_0
- "ldr h8, [x9, #0x0]\n"
- "ldr h12, [x25, #0x0]\n"
- "mov x20, #0x0\n"
- "ldr h16, [x24, #0x0]\n"
- "ldr h20, [x23, #0x0]\n"
- "ldr h24, [x22, #0x0]\n"
- "ldr h28, [x21, #0x0]\n"
+ "ldr h8, [x28, #0x0]\n"
+ "mov x19, #0x0\n"
+ "ldr h12, [x24, #0x0]\n"
+ "ldr h16, [x23, #0x0]\n"
+ "ldr h20, [x22, #0x0]\n"
+ "ldr h24, [x21, #0x0]\n"
+ "ldr h28, [x20, #0x0]\n"
"264:" // Height 6: Partial accumulate: Done
- "sub x9, x9, x20\n"
+ "sub x28, x28, x19\n"
"b 267f\n"
"265:" // Height 6: full accumulate
- "ldr q8, [x9, #0x0]\n"
- "ldr q9, [x9, #0x10]\n"
- "ldr q10, [x9, #0x20]\n"
- "ldr q11, [x9, #0x30]\n"
- "ldr q12, [x25, #0x0]\n"
- "ldr q13, [x25, #0x10]\n"
- "ldr q14, [x25, #0x20]\n"
- "ldr q15, [x25, #0x30]\n"
- "ldr q16, [x24, #0x0]\n"
- "ldr q17, [x24, #0x10]\n"
- "ldr q18, [x24, #0x20]\n"
- "ldr q19, [x24, #0x30]\n"
- "ldr q20, [x23, #0x0]\n"
- "ldr q21, [x23, #0x10]\n"
- "ldr q22, [x23, #0x20]\n"
- "ldr q23, [x23, #0x30]\n"
- "ldr q24, [x22, #0x0]\n"
- "ldr q25, [x22, #0x10]\n"
- "ldr q26, [x22, #0x20]\n"
- "ldr q27, [x22, #0x30]\n"
- "ldr q28, [x21, #0x0]\n"
- "ldr q29, [x21, #0x10]\n"
- "ldr q30, [x21, #0x20]\n"
- "ldr q31, [x21, #0x30]\n"
+ "ldr q8, [x28, #0x0]\n"
+ "ldr q9, [x28, #0x10]\n"
+ "ldr q10, [x28, #0x20]\n"
+ "ldr q11, [x28, #0x30]\n"
+ "ldr q12, [x24, #0x0]\n"
+ "ldr q13, [x24, #0x10]\n"
+ "ldr q14, [x24, #0x20]\n"
+ "ldr q15, [x24, #0x30]\n"
+ "ldr q16, [x23, #0x0]\n"
+ "ldr q17, [x23, #0x10]\n"
+ "ldr q18, [x23, #0x20]\n"
+ "ldr q19, [x23, #0x30]\n"
+ "ldr q20, [x22, #0x0]\n"
+ "ldr q21, [x22, #0x10]\n"
+ "ldr q22, [x22, #0x20]\n"
+ "ldr q23, [x22, #0x30]\n"
+ "ldr q24, [x21, #0x0]\n"
+ "ldr q25, [x21, #0x10]\n"
+ "ldr q26, [x21, #0x20]\n"
+ "ldr q27, [x21, #0x30]\n"
+ "ldr q28, [x20, #0x0]\n"
+ "ldr q29, [x20, #0x10]\n"
+ "ldr q30, [x20, #0x20]\n"
+ "ldr q31, [x20, #0x30]\n"
"b 267f\n"
"266:" // Height 6: no accumulate
"movi v8.16b, #0x0\n"
@@ -4382,82 +4382,82 @@ void a64_hybrid_fp16_mla_6x32 (
"movi v30.16b, #0x0\n"
"movi v31.16b, #0x0\n"
"267:" // Height 6: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"268:" // Height 6: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 269f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "ldr x22, [x21, #0x20]\n"
- "ldr x21, [x21, #0x28]\n"
- "cbnz x28, 270f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #1\n"
- "add x25, x25, x20, LSL #1\n"
- "add x24, x24, x20, LSL #1\n"
- "add x23, x23, x20, LSL #1\n"
- "add x22, x22, x20, LSL #1\n"
- "add x21, x21, x20, LSL #1\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "ldr x21, [x20, #0x20]\n"
+ "ldr x20, [x20, #0x28]\n"
+ "cbnz x27, 270f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
+ "add x24, x24, x19, LSL #1\n"
+ "add x23, x23, x19, LSL #1\n"
+ "add x22, x22, x19, LSL #1\n"
+ "add x21, x21, x19, LSL #1\n"
+ "add x20, x20, x19, LSL #1\n"
"b 270f\n"
"269:" // Height 6: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
- "add x23, x24, x20, LSL #1\n"
- "add x22, x23, x20, LSL #1\n"
- "add x21, x22, x20, LSL #1\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "add x22, x23, x19, LSL #1\n"
+ "add x21, x22, x19, LSL #1\n"
+ "add x20, x21, x19, LSL #1\n"
"270:" // Height 6: input setup done
- "cmp x27, #0x8\n"
+ "cmp x26, #0x8\n"
"blt 273f\n"
- "ldr q0, [x26, #0x0]\n"
- "ldr q1, [x25, #0x0]\n"
- "cmp x27, #0x10\n"
- "ldr q2, [x24, #0x0]\n"
- "ldr q3, [x23, #0x0]\n"
- "ldr q4, [x22, #0x0]\n"
- "ldr q5, [x21, #0x0]\n"
+ "ldr q0, [x25, #0x0]\n"
+ "ldr q1, [x24, #0x0]\n"
+ "cmp x26, #0x10\n"
+ "ldr q2, [x23, #0x0]\n"
+ "ldr q3, [x22, #0x0]\n"
+ "ldr q4, [x21, #0x0]\n"
+ "ldr q5, [x20, #0x0]\n"
"ldr q6, [x10, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
"blt 272f\n"
"271:" // Height 6: Multiply loop: Main loop head
"fmla v8.8h, v6.8h, v0.h[0]\n"
+ "ldr q7, [x10, #0x10]\n"
+ "add x25, x25, #0x10\n"
"fmla v12.8h, v6.8h, v1.h[0]\n"
- "sub x27, x27, #0x8\n"
- "add x26, x26, #0x10\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "add x24, x24, #0x10\n"
"fmla v16.8h, v6.8h, v2.h[0]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "add x23, x23, #0x10\n"
"fmla v20.8h, v6.8h, v3.h[0]\n"
- "add x25, x25, #0x10\n"
- "add x24, x24, #0x10\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
+ "add x22, x22, #0x10\n"
"fmla v24.8h, v6.8h, v4.h[0]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
+ "add x21, x21, #0x10\n"
"fmla v28.8h, v6.8h, v5.h[0]\n"
"ldr q6, [x10, #0x20]\n"
- "add x23, x23, #0x10\n"
+ "add x20, x20, #0x10\n"
"fmla v9.8h, v7.8h, v0.h[0]\n"
+ "prfm pldl1keep, [x21, #0x80]\n"
+ "sub x26, x26, #0x8\n"
"fmla v13.8h, v7.8h, v1.h[0]\n"
- "add x22, x22, #0x10\n"
- "add x21, x21, #0x10\n"
+ "prfm pldl1keep, [x20, #0x80]\n"
+ "cmp x26, #0x10\n"
"fmla v17.8h, v7.8h, v2.h[0]\n"
"fmla v21.8h, v7.8h, v3.h[0]\n"
- "cmp x27, #0x10\n"
- "prfm pldl1keep, [x26, #0x80]\n"
"fmla v25.8h, v7.8h, v4.h[0]\n"
"fmla v29.8h, v7.8h, v5.h[0]\n"
"ldr q7, [x10, #0x30]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
"fmla v10.8h, v6.8h, v0.h[0]\n"
"fmla v14.8h, v6.8h, v1.h[0]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
"fmla v18.8h, v6.8h, v2.h[0]\n"
"fmla v22.8h, v6.8h, v3.h[0]\n"
- "prfm pldl1keep, [x22, #0x80]\n"
- "prfm pldl1keep, [x21, #0x80]\n"
"fmla v26.8h, v6.8h, v4.h[0]\n"
"fmla v30.8h, v6.8h, v5.h[0]\n"
"ldr q6, [x10, #0x40]\n"
@@ -4659,51 +4659,51 @@ void a64_hybrid_fp16_mla_6x32 (
"fmla v30.8h, v6.8h, v5.h[7]\n"
"ldr q6, [x10, #0x0]\n"
"fmla v11.8h, v7.8h, v0.h[7]\n"
- "ldr q0, [x26, #0x0]\n"
+ "ldr q0, [x25, #0x0]\n"
"fmla v15.8h, v7.8h, v1.h[7]\n"
- "ldr q1, [x25, #0x0]\n"
+ "ldr q1, [x24, #0x0]\n"
"fmla v19.8h, v7.8h, v2.h[7]\n"
- "ldr q2, [x24, #0x0]\n"
+ "ldr q2, [x23, #0x0]\n"
"fmla v23.8h, v7.8h, v3.h[7]\n"
- "ldr q3, [x23, #0x0]\n"
+ "ldr q3, [x22, #0x0]\n"
"fmla v27.8h, v7.8h, v4.h[7]\n"
- "ldr q4, [x22, #0x0]\n"
+ "ldr q4, [x21, #0x0]\n"
"fmla v31.8h, v7.8h, v5.h[7]\n"
- "ldr q5, [x21, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
+ "ldr q5, [x20, #0x0]\n"
"bge 271b\n"
"272:" // Height 6: Multiply loop: Single iteration only
"fmla v8.8h, v6.8h, v0.h[0]\n"
+ "ldr q7, [x10, #0x10]\n"
+ "sub x26, x26, #0x8\n"
"fmla v12.8h, v6.8h, v1.h[0]\n"
- "add x26, x26, #0x10\n"
"add x25, x25, #0x10\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
"fmla v16.8h, v6.8h, v2.h[0]\n"
- "fmla v20.8h, v6.8h, v3.h[0]\n"
"add x24, x24, #0x10\n"
+ "fmla v20.8h, v6.8h, v3.h[0]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
"add x23, x23, #0x10\n"
"fmla v24.8h, v6.8h, v4.h[0]\n"
- "fmla v28.8h, v6.8h, v5.h[0]\n"
- "ldr q6, [x10, #0x20]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
"add x22, x22, #0x10\n"
+ "fmla v28.8h, v6.8h, v5.h[0]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
+ "add x21, x21, #0x10\n"
"fmla v9.8h, v7.8h, v0.h[0]\n"
+ "ldr q6, [x10, #0x20]\n"
+ "add x20, x20, #0x10\n"
"fmla v13.8h, v7.8h, v1.h[0]\n"
- "add x21, x21, #0x10\n"
- "sub x27, x27, #0x8\n"
+ "prfm pldl1keep, [x21, #0x80]\n"
"fmla v17.8h, v7.8h, v2.h[0]\n"
+ "prfm pldl1keep, [x20, #0x80]\n"
"fmla v21.8h, v7.8h, v3.h[0]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
"fmla v25.8h, v7.8h, v4.h[0]\n"
"fmla v29.8h, v7.8h, v5.h[0]\n"
"ldr q7, [x10, #0x30]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
"fmla v10.8h, v6.8h, v0.h[0]\n"
"fmla v14.8h, v6.8h, v1.h[0]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
- "prfm pldl1keep, [x22, #0x80]\n"
"fmla v18.8h, v6.8h, v2.h[0]\n"
"fmla v22.8h, v6.8h, v3.h[0]\n"
- "prfm pldl1keep, [x21, #0x80]\n"
"fmla v26.8h, v6.8h, v4.h[0]\n"
"fmla v30.8h, v6.8h, v5.h[0]\n"
"ldr q6, [x10, #0x40]\n"
@@ -4910,18 +4910,18 @@ void a64_hybrid_fp16_mla_6x32 (
"fmla v27.8h, v7.8h, v4.h[7]\n"
"fmla v31.8h, v7.8h, v5.h[7]\n"
"273:" // Height 6: Multiply loop: Main loop skip
- "cbz x27, 275f\n"
+ "cbz x26, 275f\n"
"274:" // Height 6: Multiply loop: Odd block loop
- "ldr h0, [x26], #0x2\n"
- "ldr h1, [x25], #0x2\n"
- "sub x27, x27, #0x1\n"
- "ldr h2, [x24], #0x2\n"
- "ldr h3, [x23], #0x2\n"
- "ldr h4, [x22], #0x2\n"
- "ldr h5, [x21], #0x2\n"
+ "ldr h0, [x25], #0x2\n"
+ "sub x26, x26, #0x1\n"
+ "ldr h1, [x24], #0x2\n"
+ "ldr h2, [x23], #0x2\n"
+ "ldr h3, [x22], #0x2\n"
+ "ldr h4, [x21], #0x2\n"
+ "ldr h5, [x20], #0x2\n"
"ldr q6, [x10, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
"fmla v8.8h, v6.8h, v0.h[0]\n"
+ "ldr q7, [x10, #0x10]\n"
"fmla v12.8h, v6.8h, v1.h[0]\n"
"fmla v16.8h, v6.8h, v2.h[0]\n"
"fmla v20.8h, v6.8h, v3.h[0]\n"
@@ -4948,379 +4948,379 @@ void a64_hybrid_fp16_mla_6x32 (
"fmla v23.8h, v7.8h, v3.h[0]\n"
"fmla v27.8h, v7.8h, v4.h[0]\n"
"fmla v31.8h, v7.8h, v5.h[0]\n"
- "cbnz x27, 274b\n"
+ "cbnz x26, 274b\n"
"275:" // Height 6: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 268b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
- "prfm pstl1keep, [x9, #0x0]\n"
- "add x23, x24, x20, LSL #1\n"
- "add x22, x23, x20, LSL #1\n"
- "prfm pstl1keep, [x25, #0x0]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "prfm pstl1keep, [x28, #0x0]\n"
+ "add x24, x28, x19, LSL #1\n"
"prfm pstl1keep, [x24, #0x0]\n"
- "add x21, x22, x20, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
"prfm pstl1keep, [x23, #0x0]\n"
+ "add x22, x23, x19, LSL #1\n"
"prfm pstl1keep, [x22, #0x0]\n"
+ "add x21, x22, x19, LSL #1\n"
"prfm pstl1keep, [x21, #0x0]\n"
+ "add x20, x21, x19, LSL #1\n"
+ "prfm pstl1keep, [x20, #0x0]\n"
"tbz %x[flags], #1, 276f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v1.8h }, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1r { v0.8h }, [x20]\n"
- "fmin v8.8h, v8.8h, v1.8h\n"
- "fmin v9.8h, v9.8h, v1.8h\n"
- "fmin v10.8h, v10.8h, v1.8h\n"
- "fmin v11.8h, v11.8h, v1.8h\n"
- "fmin v12.8h, v12.8h, v1.8h\n"
- "fmin v13.8h, v13.8h, v1.8h\n"
- "fmin v14.8h, v14.8h, v1.8h\n"
- "fmin v15.8h, v15.8h, v1.8h\n"
- "fmin v16.8h, v16.8h, v1.8h\n"
- "fmin v17.8h, v17.8h, v1.8h\n"
- "fmin v18.8h, v18.8h, v1.8h\n"
- "fmin v19.8h, v19.8h, v1.8h\n"
- "fmin v20.8h, v20.8h, v1.8h\n"
- "fmin v21.8h, v21.8h, v1.8h\n"
- "fmin v22.8h, v22.8h, v1.8h\n"
- "fmin v23.8h, v23.8h, v1.8h\n"
- "fmin v24.8h, v24.8h, v1.8h\n"
- "fmin v25.8h, v25.8h, v1.8h\n"
- "fmin v26.8h, v26.8h, v1.8h\n"
- "fmin v27.8h, v27.8h, v1.8h\n"
- "fmin v28.8h, v28.8h, v1.8h\n"
- "fmin v29.8h, v29.8h, v1.8h\n"
- "fmin v30.8h, v30.8h, v1.8h\n"
- "fmin v31.8h, v31.8h, v1.8h\n"
- "fmax v8.8h, v8.8h, v0.8h\n"
- "fmax v9.8h, v9.8h, v0.8h\n"
- "fmax v10.8h, v10.8h, v0.8h\n"
- "fmax v11.8h, v11.8h, v0.8h\n"
- "fmax v12.8h, v12.8h, v0.8h\n"
- "fmax v13.8h, v13.8h, v0.8h\n"
- "fmax v14.8h, v14.8h, v0.8h\n"
- "fmax v15.8h, v15.8h, v0.8h\n"
- "fmax v16.8h, v16.8h, v0.8h\n"
- "fmax v17.8h, v17.8h, v0.8h\n"
- "fmax v18.8h, v18.8h, v0.8h\n"
- "fmax v19.8h, v19.8h, v0.8h\n"
- "fmax v20.8h, v20.8h, v0.8h\n"
- "fmax v21.8h, v21.8h, v0.8h\n"
- "fmax v22.8h, v22.8h, v0.8h\n"
- "fmax v23.8h, v23.8h, v0.8h\n"
- "fmax v24.8h, v24.8h, v0.8h\n"
- "fmax v25.8h, v25.8h, v0.8h\n"
- "fmax v26.8h, v26.8h, v0.8h\n"
- "fmax v27.8h, v27.8h, v0.8h\n"
- "fmax v28.8h, v28.8h, v0.8h\n"
- "fmax v29.8h, v29.8h, v0.8h\n"
- "fmax v30.8h, v30.8h, v0.8h\n"
- "fmax v31.8h, v31.8h, v0.8h\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v1.8h }, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v0.8h }, [x19]\n"
+ "fmin v8.8h, v8.8h, v0.8h\n"
+ "fmin v9.8h, v9.8h, v0.8h\n"
+ "fmin v10.8h, v10.8h, v0.8h\n"
+ "fmin v11.8h, v11.8h, v0.8h\n"
+ "fmax v8.8h, v8.8h, v1.8h\n"
+ "fmax v9.8h, v9.8h, v1.8h\n"
+ "fmax v10.8h, v10.8h, v1.8h\n"
+ "fmax v11.8h, v11.8h, v1.8h\n"
+ "fmin v12.8h, v12.8h, v0.8h\n"
+ "fmin v13.8h, v13.8h, v0.8h\n"
+ "fmin v14.8h, v14.8h, v0.8h\n"
+ "fmax v12.8h, v12.8h, v1.8h\n"
+ "fmax v13.8h, v13.8h, v1.8h\n"
+ "fmax v14.8h, v14.8h, v1.8h\n"
+ "fmin v15.8h, v15.8h, v0.8h\n"
+ "fmin v16.8h, v16.8h, v0.8h\n"
+ "fmin v17.8h, v17.8h, v0.8h\n"
+ "fmax v15.8h, v15.8h, v1.8h\n"
+ "fmax v16.8h, v16.8h, v1.8h\n"
+ "fmax v17.8h, v17.8h, v1.8h\n"
+ "fmin v18.8h, v18.8h, v0.8h\n"
+ "fmin v19.8h, v19.8h, v0.8h\n"
+ "fmin v20.8h, v20.8h, v0.8h\n"
+ "fmax v18.8h, v18.8h, v1.8h\n"
+ "fmax v19.8h, v19.8h, v1.8h\n"
+ "fmax v20.8h, v20.8h, v1.8h\n"
+ "fmin v21.8h, v21.8h, v0.8h\n"
+ "fmin v22.8h, v22.8h, v0.8h\n"
+ "fmin v23.8h, v23.8h, v0.8h\n"
+ "fmax v21.8h, v21.8h, v1.8h\n"
+ "fmax v22.8h, v22.8h, v1.8h\n"
+ "fmax v23.8h, v23.8h, v1.8h\n"
+ "fmin v24.8h, v24.8h, v0.8h\n"
+ "fmin v25.8h, v25.8h, v0.8h\n"
+ "fmin v26.8h, v26.8h, v0.8h\n"
+ "fmax v24.8h, v24.8h, v1.8h\n"
+ "fmax v25.8h, v25.8h, v1.8h\n"
+ "fmax v26.8h, v26.8h, v1.8h\n"
+ "fmin v27.8h, v27.8h, v0.8h\n"
+ "fmin v28.8h, v28.8h, v0.8h\n"
+ "fmin v29.8h, v29.8h, v0.8h\n"
+ "fmax v27.8h, v27.8h, v1.8h\n"
+ "fmax v28.8h, v28.8h, v1.8h\n"
+ "fmax v29.8h, v29.8h, v1.8h\n"
+ "fmin v30.8h, v30.8h, v0.8h\n"
+ "fmin v31.8h, v31.8h, v0.8h\n"
+ "fmax v30.8h, v30.8h, v1.8h\n"
+ "fmax v31.8h, v31.8h, v1.8h\n"
"276:" // Height 6: No activation
"cmp x11, #0x20\n"
"bge 293f\n"
"tbz x11, #4, 284f\n"
- "st1 { v8.8h }, [x9], #0x10\n"
- "st1 { v9.8h }, [x9], #0x10\n"
- "st1 { v12.8h }, [x25], #0x10\n"
- "st1 { v13.8h }, [x25], #0x10\n"
- "st1 { v16.8h }, [x24], #0x10\n"
- "st1 { v17.8h }, [x24], #0x10\n"
- "st1 { v20.8h }, [x23], #0x10\n"
- "st1 { v21.8h }, [x23], #0x10\n"
- "st1 { v24.8h }, [x22], #0x10\n"
- "st1 { v25.8h }, [x22], #0x10\n"
- "st1 { v28.8h }, [x21], #0x10\n"
- "st1 { v29.8h }, [x21], #0x10\n"
+ "st1 { v8.8h }, [x28], #0x10\n"
+ "st1 { v9.8h }, [x28], #0x10\n"
+ "st1 { v12.8h }, [x24], #0x10\n"
+ "st1 { v13.8h }, [x24], #0x10\n"
+ "st1 { v16.8h }, [x23], #0x10\n"
+ "st1 { v17.8h }, [x23], #0x10\n"
+ "st1 { v20.8h }, [x22], #0x10\n"
+ "st1 { v21.8h }, [x22], #0x10\n"
+ "st1 { v24.8h }, [x21], #0x10\n"
+ "st1 { v25.8h }, [x21], #0x10\n"
+ "st1 { v28.8h }, [x20], #0x10\n"
+ "st1 { v29.8h }, [x20], #0x10\n"
"tbz x11, #3, 280f\n"
- "st1 { v10.8h }, [x9], #0x10\n"
- "st1 { v14.8h }, [x25], #0x10\n"
- "st1 { v18.8h }, [x24], #0x10\n"
- "st1 { v22.8h }, [x23], #0x10\n"
- "st1 { v26.8h }, [x22], #0x10\n"
- "st1 { v30.8h }, [x21], #0x10\n"
+ "st1 { v10.8h }, [x28], #0x10\n"
+ "st1 { v14.8h }, [x24], #0x10\n"
+ "st1 { v18.8h }, [x23], #0x10\n"
+ "st1 { v22.8h }, [x22], #0x10\n"
+ "st1 { v26.8h }, [x21], #0x10\n"
+ "st1 { v30.8h }, [x20], #0x10\n"
"tbz x11, #2, 278f\n"
- "str d11, [x9], #0x8\n"
- "str d15, [x25], #0x8\n"
- "str d19, [x24], #0x8\n"
- "str d23, [x23], #0x8\n"
- "str d27, [x22], #0x8\n"
- "str d31, [x21], #0x8\n"
+ "str d11, [x28], #0x8\n"
+ "str d15, [x24], #0x8\n"
+ "str d19, [x23], #0x8\n"
+ "str d23, [x22], #0x8\n"
+ "str d27, [x21], #0x8\n"
+ "str d31, [x20], #0x8\n"
"tbz x11, #1, 277f\n"
- "st1 { v11.s }[2], [x9], #0x4\n"
- "st1 { v15.s }[2], [x25], #0x4\n"
- "st1 { v19.s }[2], [x24], #0x4\n"
- "st1 { v23.s }[2], [x23], #0x4\n"
- "st1 { v27.s }[2], [x22], #0x4\n"
- "st1 { v31.s }[2], [x21], #0x4\n"
+ "st1 { v11.s }[2], [x28], #0x4\n"
+ "st1 { v15.s }[2], [x24], #0x4\n"
+ "st1 { v19.s }[2], [x23], #0x4\n"
+ "st1 { v23.s }[2], [x22], #0x4\n"
+ "st1 { v27.s }[2], [x21], #0x4\n"
+ "st1 { v31.s }[2], [x20], #0x4\n"
"tbz x11, #0, 292f\n"
- "st1 { v11.h }[6], [x9]\n"
- "st1 { v15.h }[6], [x25]\n"
- "st1 { v19.h }[6], [x24]\n"
- "st1 { v23.h }[6], [x23]\n"
- "st1 { v27.h }[6], [x22]\n"
- "st1 { v31.h }[6], [x21]\n"
+ "st1 { v11.h }[6], [x28]\n"
+ "st1 { v15.h }[6], [x24]\n"
+ "st1 { v19.h }[6], [x23]\n"
+ "st1 { v23.h }[6], [x22]\n"
+ "st1 { v27.h }[6], [x21]\n"
+ "st1 { v31.h }[6], [x20]\n"
"b 292f\n"
"277:" // Height 6: Partial direct writeback: partial_1_28
"tbz x11, #0, 292f\n"
- "st1 { v11.h }[4], [x9]\n"
- "st1 { v15.h }[4], [x25]\n"
- "st1 { v19.h }[4], [x24]\n"
- "st1 { v23.h }[4], [x23]\n"
- "st1 { v27.h }[4], [x22]\n"
- "st1 { v31.h }[4], [x21]\n"
+ "st1 { v11.h }[4], [x28]\n"
+ "st1 { v15.h }[4], [x24]\n"
+ "st1 { v19.h }[4], [x23]\n"
+ "st1 { v23.h }[4], [x22]\n"
+ "st1 { v27.h }[4], [x21]\n"
+ "st1 { v31.h }[4], [x20]\n"
"b 292f\n"
"278:" // Height 6: Partial direct writeback: partial_2_24
"tbz x11, #1, 279f\n"
- "str s11, [x9], #0x4\n"
- "str s15, [x25], #0x4\n"
- "str s19, [x24], #0x4\n"
- "str s23, [x23], #0x4\n"
- "str s27, [x22], #0x4\n"
- "str s31, [x21], #0x4\n"
+ "str s11, [x28], #0x4\n"
+ "str s15, [x24], #0x4\n"
+ "str s19, [x23], #0x4\n"
+ "str s23, [x22], #0x4\n"
+ "str s27, [x21], #0x4\n"
+ "str s31, [x20], #0x4\n"
"tbz x11, #0, 292f\n"
- "st1 { v11.h }[2], [x9]\n"
- "st1 { v15.h }[2], [x25]\n"
- "st1 { v19.h }[2], [x24]\n"
- "st1 { v23.h }[2], [x23]\n"
- "st1 { v27.h }[2], [x22]\n"
- "st1 { v31.h }[2], [x21]\n"
+ "st1 { v11.h }[2], [x28]\n"
+ "st1 { v15.h }[2], [x24]\n"
+ "st1 { v19.h }[2], [x23]\n"
+ "st1 { v23.h }[2], [x22]\n"
+ "st1 { v27.h }[2], [x21]\n"
+ "st1 { v31.h }[2], [x20]\n"
"b 292f\n"
"279:" // Height 6: Partial direct writeback: partial_1_24
"tbz x11, #0, 292f\n"
- "str h11, [x9, #0x0]\n"
- "str h15, [x25, #0x0]\n"
- "str h19, [x24, #0x0]\n"
- "str h23, [x23, #0x0]\n"
- "str h27, [x22, #0x0]\n"
- "str h31, [x21, #0x0]\n"
+ "str h11, [x28, #0x0]\n"
+ "str h15, [x24, #0x0]\n"
+ "str h19, [x23, #0x0]\n"
+ "str h23, [x22, #0x0]\n"
+ "str h27, [x21, #0x0]\n"
+ "str h31, [x20, #0x0]\n"
"b 292f\n"
"280:" // Height 6: Partial direct writeback: partial_4_16
"tbz x11, #2, 282f\n"
- "str d10, [x9], #0x8\n"
- "str d14, [x25], #0x8\n"
- "str d18, [x24], #0x8\n"
- "str d22, [x23], #0x8\n"
- "str d26, [x22], #0x8\n"
- "str d30, [x21], #0x8\n"
+ "str d10, [x28], #0x8\n"
+ "str d14, [x24], #0x8\n"
+ "str d18, [x23], #0x8\n"
+ "str d22, [x22], #0x8\n"
+ "str d26, [x21], #0x8\n"
+ "str d30, [x20], #0x8\n"
"tbz x11, #1, 281f\n"
- "st1 { v10.s }[2], [x9], #0x4\n"
- "st1 { v14.s }[2], [x25], #0x4\n"
- "st1 { v18.s }[2], [x24], #0x4\n"
- "st1 { v22.s }[2], [x23], #0x4\n"
- "st1 { v26.s }[2], [x22], #0x4\n"
- "st1 { v30.s }[2], [x21], #0x4\n"
+ "st1 { v10.s }[2], [x28], #0x4\n"
+ "st1 { v14.s }[2], [x24], #0x4\n"
+ "st1 { v18.s }[2], [x23], #0x4\n"
+ "st1 { v22.s }[2], [x22], #0x4\n"
+ "st1 { v26.s }[2], [x21], #0x4\n"
+ "st1 { v30.s }[2], [x20], #0x4\n"
"tbz x11, #0, 292f\n"
- "st1 { v10.h }[6], [x9]\n"
- "st1 { v14.h }[6], [x25]\n"
- "st1 { v18.h }[6], [x24]\n"
- "st1 { v22.h }[6], [x23]\n"
- "st1 { v26.h }[6], [x22]\n"
- "st1 { v30.h }[6], [x21]\n"
+ "st1 { v10.h }[6], [x28]\n"
+ "st1 { v14.h }[6], [x24]\n"
+ "st1 { v18.h }[6], [x23]\n"
+ "st1 { v22.h }[6], [x22]\n"
+ "st1 { v26.h }[6], [x21]\n"
+ "st1 { v30.h }[6], [x20]\n"
"b 292f\n"
"281:" // Height 6: Partial direct writeback: partial_1_20
"tbz x11, #0, 292f\n"
- "st1 { v10.h }[4], [x9]\n"
- "st1 { v14.h }[4], [x25]\n"
- "st1 { v18.h }[4], [x24]\n"
- "st1 { v22.h }[4], [x23]\n"
- "st1 { v26.h }[4], [x22]\n"
- "st1 { v30.h }[4], [x21]\n"
+ "st1 { v10.h }[4], [x28]\n"
+ "st1 { v14.h }[4], [x24]\n"
+ "st1 { v18.h }[4], [x23]\n"
+ "st1 { v22.h }[4], [x22]\n"
+ "st1 { v26.h }[4], [x21]\n"
+ "st1 { v30.h }[4], [x20]\n"
"b 292f\n"
"282:" // Height 6: Partial direct writeback: partial_2_16
"tbz x11, #1, 283f\n"
- "str s10, [x9], #0x4\n"
- "str s14, [x25], #0x4\n"
- "str s18, [x24], #0x4\n"
- "str s22, [x23], #0x4\n"
- "str s26, [x22], #0x4\n"
- "str s30, [x21], #0x4\n"
+ "str s10, [x28], #0x4\n"
+ "str s14, [x24], #0x4\n"
+ "str s18, [x23], #0x4\n"
+ "str s22, [x22], #0x4\n"
+ "str s26, [x21], #0x4\n"
+ "str s30, [x20], #0x4\n"
"tbz x11, #0, 292f\n"
- "st1 { v10.h }[2], [x9]\n"
- "st1 { v14.h }[2], [x25]\n"
- "st1 { v18.h }[2], [x24]\n"
- "st1 { v22.h }[2], [x23]\n"
- "st1 { v26.h }[2], [x22]\n"
- "st1 { v30.h }[2], [x21]\n"
+ "st1 { v10.h }[2], [x28]\n"
+ "st1 { v14.h }[2], [x24]\n"
+ "st1 { v18.h }[2], [x23]\n"
+ "st1 { v22.h }[2], [x22]\n"
+ "st1 { v26.h }[2], [x21]\n"
+ "st1 { v30.h }[2], [x20]\n"
"b 292f\n"
"283:" // Height 6: Partial direct writeback: partial_1_16
"tbz x11, #0, 292f\n"
- "str h10, [x9, #0x0]\n"
- "str h14, [x25, #0x0]\n"
- "str h18, [x24, #0x0]\n"
- "str h22, [x23, #0x0]\n"
- "str h26, [x22, #0x0]\n"
- "str h30, [x21, #0x0]\n"
+ "str h10, [x28, #0x0]\n"
+ "str h14, [x24, #0x0]\n"
+ "str h18, [x23, #0x0]\n"
+ "str h22, [x22, #0x0]\n"
+ "str h26, [x21, #0x0]\n"
+ "str h30, [x20, #0x0]\n"
"b 292f\n"
"284:" // Height 6: Partial direct writeback: partial_8_0
"tbz x11, #3, 288f\n"
- "st1 { v8.8h }, [x9], #0x10\n"
- "st1 { v12.8h }, [x25], #0x10\n"
- "st1 { v16.8h }, [x24], #0x10\n"
- "st1 { v20.8h }, [x23], #0x10\n"
- "st1 { v24.8h }, [x22], #0x10\n"
- "st1 { v28.8h }, [x21], #0x10\n"
+ "st1 { v8.8h }, [x28], #0x10\n"
+ "st1 { v12.8h }, [x24], #0x10\n"
+ "st1 { v16.8h }, [x23], #0x10\n"
+ "st1 { v20.8h }, [x22], #0x10\n"
+ "st1 { v24.8h }, [x21], #0x10\n"
+ "st1 { v28.8h }, [x20], #0x10\n"
"tbz x11, #2, 286f\n"
- "str d9, [x9], #0x8\n"
- "str d13, [x25], #0x8\n"
- "str d17, [x24], #0x8\n"
- "str d21, [x23], #0x8\n"
- "str d25, [x22], #0x8\n"
- "str d29, [x21], #0x8\n"
+ "str d9, [x28], #0x8\n"
+ "str d13, [x24], #0x8\n"
+ "str d17, [x23], #0x8\n"
+ "str d21, [x22], #0x8\n"
+ "str d25, [x21], #0x8\n"
+ "str d29, [x20], #0x8\n"
"tbz x11, #1, 285f\n"
- "st1 { v9.s }[2], [x9], #0x4\n"
- "st1 { v13.s }[2], [x25], #0x4\n"
- "st1 { v17.s }[2], [x24], #0x4\n"
- "st1 { v21.s }[2], [x23], #0x4\n"
- "st1 { v25.s }[2], [x22], #0x4\n"
- "st1 { v29.s }[2], [x21], #0x4\n"
+ "st1 { v9.s }[2], [x28], #0x4\n"
+ "st1 { v13.s }[2], [x24], #0x4\n"
+ "st1 { v17.s }[2], [x23], #0x4\n"
+ "st1 { v21.s }[2], [x22], #0x4\n"
+ "st1 { v25.s }[2], [x21], #0x4\n"
+ "st1 { v29.s }[2], [x20], #0x4\n"
"tbz x11, #0, 292f\n"
- "st1 { v9.h }[6], [x9]\n"
- "st1 { v13.h }[6], [x25]\n"
- "st1 { v17.h }[6], [x24]\n"
- "st1 { v21.h }[6], [x23]\n"
- "st1 { v25.h }[6], [x22]\n"
- "st1 { v29.h }[6], [x21]\n"
+ "st1 { v9.h }[6], [x28]\n"
+ "st1 { v13.h }[6], [x24]\n"
+ "st1 { v17.h }[6], [x23]\n"
+ "st1 { v21.h }[6], [x22]\n"
+ "st1 { v25.h }[6], [x21]\n"
+ "st1 { v29.h }[6], [x20]\n"
"b 292f\n"
"285:" // Height 6: Partial direct writeback: partial_1_12
"tbz x11, #0, 292f\n"
- "st1 { v9.h }[4], [x9]\n"
- "st1 { v13.h }[4], [x25]\n"
- "st1 { v17.h }[4], [x24]\n"
- "st1 { v21.h }[4], [x23]\n"
- "st1 { v25.h }[4], [x22]\n"
- "st1 { v29.h }[4], [x21]\n"
+ "st1 { v9.h }[4], [x28]\n"
+ "st1 { v13.h }[4], [x24]\n"
+ "st1 { v17.h }[4], [x23]\n"
+ "st1 { v21.h }[4], [x22]\n"
+ "st1 { v25.h }[4], [x21]\n"
+ "st1 { v29.h }[4], [x20]\n"
"b 292f\n"
"286:" // Height 6: Partial direct writeback: partial_2_8
"tbz x11, #1, 287f\n"
- "str s9, [x9], #0x4\n"
- "str s13, [x25], #0x4\n"
- "str s17, [x24], #0x4\n"
- "str s21, [x23], #0x4\n"
- "str s25, [x22], #0x4\n"
- "str s29, [x21], #0x4\n"
+ "str s9, [x28], #0x4\n"
+ "str s13, [x24], #0x4\n"
+ "str s17, [x23], #0x4\n"
+ "str s21, [x22], #0x4\n"
+ "str s25, [x21], #0x4\n"
+ "str s29, [x20], #0x4\n"
"tbz x11, #0, 292f\n"
- "st1 { v9.h }[2], [x9]\n"
- "st1 { v13.h }[2], [x25]\n"
- "st1 { v17.h }[2], [x24]\n"
- "st1 { v21.h }[2], [x23]\n"
- "st1 { v25.h }[2], [x22]\n"
- "st1 { v29.h }[2], [x21]\n"
+ "st1 { v9.h }[2], [x28]\n"
+ "st1 { v13.h }[2], [x24]\n"
+ "st1 { v17.h }[2], [x23]\n"
+ "st1 { v21.h }[2], [x22]\n"
+ "st1 { v25.h }[2], [x21]\n"
+ "st1 { v29.h }[2], [x20]\n"
"b 292f\n"
"287:" // Height 6: Partial direct writeback: partial_1_8
"tbz x11, #0, 292f\n"
- "str h9, [x9, #0x0]\n"
- "str h13, [x25, #0x0]\n"
- "str h17, [x24, #0x0]\n"
- "str h21, [x23, #0x0]\n"
- "str h25, [x22, #0x0]\n"
- "str h29, [x21, #0x0]\n"
+ "str h9, [x28, #0x0]\n"
+ "str h13, [x24, #0x0]\n"
+ "str h17, [x23, #0x0]\n"
+ "str h21, [x22, #0x0]\n"
+ "str h25, [x21, #0x0]\n"
+ "str h29, [x20, #0x0]\n"
"b 292f\n"
"288:" // Height 6: Partial direct writeback: partial_4_0
"tbz x11, #2, 290f\n"
- "str d8, [x9], #0x8\n"
- "str d12, [x25], #0x8\n"
- "str d16, [x24], #0x8\n"
- "str d20, [x23], #0x8\n"
- "str d24, [x22], #0x8\n"
- "str d28, [x21], #0x8\n"
+ "str d8, [x28], #0x8\n"
+ "str d12, [x24], #0x8\n"
+ "str d16, [x23], #0x8\n"
+ "str d20, [x22], #0x8\n"
+ "str d24, [x21], #0x8\n"
+ "str d28, [x20], #0x8\n"
"tbz x11, #1, 289f\n"
- "st1 { v8.s }[2], [x9], #0x4\n"
- "st1 { v12.s }[2], [x25], #0x4\n"
- "st1 { v16.s }[2], [x24], #0x4\n"
- "st1 { v20.s }[2], [x23], #0x4\n"
- "st1 { v24.s }[2], [x22], #0x4\n"
- "st1 { v28.s }[2], [x21], #0x4\n"
+ "st1 { v8.s }[2], [x28], #0x4\n"
+ "st1 { v12.s }[2], [x24], #0x4\n"
+ "st1 { v16.s }[2], [x23], #0x4\n"
+ "st1 { v20.s }[2], [x22], #0x4\n"
+ "st1 { v24.s }[2], [x21], #0x4\n"
+ "st1 { v28.s }[2], [x20], #0x4\n"
"tbz x11, #0, 292f\n"
- "st1 { v8.h }[6], [x9]\n"
- "st1 { v12.h }[6], [x25]\n"
- "st1 { v16.h }[6], [x24]\n"
- "st1 { v20.h }[6], [x23]\n"
- "st1 { v24.h }[6], [x22]\n"
- "st1 { v28.h }[6], [x21]\n"
+ "st1 { v8.h }[6], [x28]\n"
+ "st1 { v12.h }[6], [x24]\n"
+ "st1 { v16.h }[6], [x23]\n"
+ "st1 { v20.h }[6], [x22]\n"
+ "st1 { v24.h }[6], [x21]\n"
+ "st1 { v28.h }[6], [x20]\n"
"b 292f\n"
"289:" // Height 6: Partial direct writeback: partial_1_4
"tbz x11, #0, 292f\n"
- "st1 { v8.h }[4], [x9]\n"
- "st1 { v12.h }[4], [x25]\n"
- "st1 { v16.h }[4], [x24]\n"
- "st1 { v20.h }[4], [x23]\n"
- "st1 { v24.h }[4], [x22]\n"
- "st1 { v28.h }[4], [x21]\n"
+ "st1 { v8.h }[4], [x28]\n"
+ "st1 { v12.h }[4], [x24]\n"
+ "st1 { v16.h }[4], [x23]\n"
+ "st1 { v20.h }[4], [x22]\n"
+ "st1 { v24.h }[4], [x21]\n"
+ "st1 { v28.h }[4], [x20]\n"
"b 292f\n"
"290:" // Height 6: Partial direct writeback: partial_2_0
"tbz x11, #1, 291f\n"
- "str s8, [x9], #0x4\n"
- "str s12, [x25], #0x4\n"
- "str s16, [x24], #0x4\n"
- "str s20, [x23], #0x4\n"
- "str s24, [x22], #0x4\n"
- "str s28, [x21], #0x4\n"
+ "str s8, [x28], #0x4\n"
+ "str s12, [x24], #0x4\n"
+ "str s16, [x23], #0x4\n"
+ "str s20, [x22], #0x4\n"
+ "str s24, [x21], #0x4\n"
+ "str s28, [x20], #0x4\n"
"tbz x11, #0, 292f\n"
- "st1 { v8.h }[2], [x9]\n"
- "st1 { v12.h }[2], [x25]\n"
- "st1 { v16.h }[2], [x24]\n"
- "st1 { v20.h }[2], [x23]\n"
- "st1 { v24.h }[2], [x22]\n"
- "st1 { v28.h }[2], [x21]\n"
+ "st1 { v8.h }[2], [x28]\n"
+ "st1 { v12.h }[2], [x24]\n"
+ "st1 { v16.h }[2], [x23]\n"
+ "st1 { v20.h }[2], [x22]\n"
+ "st1 { v24.h }[2], [x21]\n"
+ "st1 { v28.h }[2], [x20]\n"
"b 292f\n"
"291:" // Height 6: Partial direct writeback: partial_1_0
- "str h8, [x9, #0x0]\n"
- "str h12, [x25, #0x0]\n"
- "str h16, [x24, #0x0]\n"
- "str h20, [x23, #0x0]\n"
- "str h24, [x22, #0x0]\n"
- "str h28, [x21, #0x0]\n"
+ "str h8, [x28, #0x0]\n"
+ "str h12, [x24, #0x0]\n"
+ "str h16, [x23, #0x0]\n"
+ "str h20, [x22, #0x0]\n"
+ "str h24, [x21, #0x0]\n"
+ "str h28, [x20, #0x0]\n"
"292:" // Height 6: Partial direct writeback: Done
"b 294f\n"
"293:" // Height 6: Full writeback
- "str q8, [x9, #0x0]\n"
- "str q9, [x9, #0x10]\n"
- "str q10, [x9, #0x20]\n"
- "str q11, [x9, #0x30]\n"
- "add x9, x9, #0x40\n"
- "str q12, [x25, #0x0]\n"
- "str q13, [x25, #0x10]\n"
- "str q14, [x25, #0x20]\n"
- "str q15, [x25, #0x30]\n"
- "str q16, [x24, #0x0]\n"
- "str q17, [x24, #0x10]\n"
- "str q18, [x24, #0x20]\n"
- "str q19, [x24, #0x30]\n"
- "str q20, [x23, #0x0]\n"
- "str q21, [x23, #0x10]\n"
- "str q22, [x23, #0x20]\n"
- "str q23, [x23, #0x30]\n"
- "str q24, [x22, #0x0]\n"
- "str q25, [x22, #0x10]\n"
- "str q26, [x22, #0x20]\n"
- "str q27, [x22, #0x30]\n"
- "str q28, [x21, #0x0]\n"
- "str q29, [x21, #0x10]\n"
- "str q30, [x21, #0x20]\n"
- "str q31, [x21, #0x30]\n"
+ "str q8, [x28, #0x0]\n"
+ "str q9, [x28, #0x10]\n"
+ "str q10, [x28, #0x20]\n"
+ "str q11, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
+ "str q12, [x24, #0x0]\n"
+ "str q13, [x24, #0x10]\n"
+ "str q14, [x24, #0x20]\n"
+ "str q15, [x24, #0x30]\n"
+ "str q16, [x23, #0x0]\n"
+ "str q17, [x23, #0x10]\n"
+ "str q18, [x23, #0x20]\n"
+ "str q19, [x23, #0x30]\n"
+ "str q20, [x22, #0x0]\n"
+ "str q21, [x22, #0x10]\n"
+ "str q22, [x22, #0x20]\n"
+ "str q23, [x22, #0x30]\n"
+ "str q24, [x21, #0x0]\n"
+ "str q25, [x21, #0x10]\n"
+ "str q26, [x21, #0x20]\n"
+ "str q27, [x21, #0x30]\n"
+ "str q28, [x20, #0x0]\n"
+ "str q29, [x20, #0x10]\n"
+ "str q30, [x20, #0x20]\n"
+ "str q31, [x20, #0x30]\n"
"294:" // Height 6: Writeback done
"subs x11, x11, #0x20\n"
"bgt 247b\n"
"subs %x[M], %x[M], #0x6\n"
"beq 296f\n"
- "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 295f\n"
- "add x21, x21, #0x6\n"
- "str x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "add x20, x20, #0x6\n"
+ "str x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"b 1b\n"
"295:" // Update direct input
- "mov x20, #0xc\n"
- "madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
+ "mov x19, #0xc\n"
+ "madd %x[input_ptr], x19, x20, %x[input_ptr]\n"
"b 1b\n"
"296:" // Exit
: [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
: [args_ptr] "r" (&ka), [bias] "r" (bias), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_4x24/a55.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_4x24/a55.cpp
index 700d803f82..1fbc9232f0 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_4x24/a55.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_4x24/a55.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef __aarch64__
@@ -99,117 +99,117 @@ void a64_hybrid_fp32_mla_4x24_a55 (
"cmp %x[M], #0x2\n"
"bgt 83f\n"
"beq 42f\n"
- "mov x17, %x[bias]\n"
"ldr x16, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x15, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x14, %x[output_ptr]\n"
+ "mov x14, %x[bias]\n"
+ "mov x13, %x[output_ptr]\n"
"2:" // Height 1: Column loop
- "cbz x17, 3f\n"
- "ldr q8, [x17, #0x0]\n"
- "ldr q9, [x17, #0x10]\n"
- "ldr q10, [x17, #0x20]\n"
- "ldr q11, [x17, #0x30]\n"
- "ldr q12, [x17, #0x40]\n"
- "ldr q13, [x17, #0x50]\n"
- "add x17, x17, #0x60\n"
+ "cbz x14, 3f\n"
+ "ldr q8, [x14, #0x0]\n"
+ "ldr q9, [x14, #0x10]\n"
+ "ldr q10, [x14, #0x20]\n"
+ "ldr q11, [x14, #0x30]\n"
+ "ldr q12, [x14, #0x40]\n"
+ "ldr q13, [x14, #0x50]\n"
+ "add x14, x14, #0x60\n"
"b 18f\n"
"3:" // Height 1: no bias
"tbz %x[flags], #0, 17f\n"
"cmp x16, #0x18\n"
"bge 16f\n"
"tbz x16, #4, 7f\n"
- "ld1 { v8.4s }, [x14], #0x10\n"
- "ld1 { v9.4s }, [x14], #0x10\n"
- "ld1 { v10.4s }, [x14], #0x10\n"
- "ld1 { v11.4s }, [x14], #0x10\n"
+ "ld1 { v8.4s }, [x13], #0x10\n"
+ "ld1 { v9.4s }, [x13], #0x10\n"
+ "ld1 { v10.4s }, [x13], #0x10\n"
+ "ld1 { v11.4s }, [x13], #0x10\n"
"tbz x16, #2, 5f\n"
- "ld1 { v12.4s }, [x14], #0x10\n"
+ "ld1 { v12.4s }, [x13], #0x10\n"
"tbz x16, #1, 4f\n"
- "ldr d13, [x14], #0x8\n"
- "mov x20, #0x58\n"
+ "mov x19, #0x58\n"
+ "ldr d13, [x13], #0x8\n"
"tbz x16, #0, 15f\n"
- "ld1 { v13.s }[2], [x14]\n"
+ "ld1 { v13.s }[2], [x13]\n"
"b 15f\n"
"4:" // Height 1: Partial accumulate: partial_1_20
- "mov x20, #0x50\n"
+ "mov x19, #0x50\n"
"tbz x16, #0, 15f\n"
- "ldr s13, [x14, #0x0]\n"
+ "ldr s13, [x13, #0x0]\n"
"b 15f\n"
"5:" // Height 1: Partial accumulate: partial_2_16
"tbz x16, #1, 6f\n"
- "ldr d12, [x14], #0x8\n"
- "mov x20, #0x48\n"
+ "ldr d12, [x13], #0x8\n"
+ "mov x19, #0x48\n"
"tbz x16, #0, 15f\n"
- "ld1 { v12.s }[2], [x14]\n"
+ "ld1 { v12.s }[2], [x13]\n"
"b 15f\n"
"6:" // Height 1: Partial accumulate: partial_1_16
- "mov x20, #0x40\n"
+ "mov x19, #0x40\n"
"tbz x16, #0, 15f\n"
- "ldr s12, [x14, #0x0]\n"
+ "ldr s12, [x13, #0x0]\n"
"b 15f\n"
"7:" // Height 1: Partial accumulate: partial_8_0
"tbz x16, #3, 11f\n"
- "ld1 { v8.4s }, [x14], #0x10\n"
- "ld1 { v9.4s }, [x14], #0x10\n"
+ "ld1 { v8.4s }, [x13], #0x10\n"
+ "ld1 { v9.4s }, [x13], #0x10\n"
"tbz x16, #2, 9f\n"
- "ld1 { v10.4s }, [x14], #0x10\n"
+ "ld1 { v10.4s }, [x13], #0x10\n"
"tbz x16, #1, 8f\n"
- "ldr d11, [x14], #0x8\n"
- "mov x20, #0x38\n"
+ "mov x19, #0x38\n"
+ "ldr d11, [x13], #0x8\n"
"tbz x16, #0, 15f\n"
- "ld1 { v11.s }[2], [x14]\n"
+ "ld1 { v11.s }[2], [x13]\n"
"b 15f\n"
"8:" // Height 1: Partial accumulate: partial_1_12
- "mov x20, #0x30\n"
+ "mov x19, #0x30\n"
"tbz x16, #0, 15f\n"
- "ldr s11, [x14, #0x0]\n"
+ "ldr s11, [x13, #0x0]\n"
"b 15f\n"
"9:" // Height 1: Partial accumulate: partial_2_8
"tbz x16, #1, 10f\n"
- "ldr d10, [x14], #0x8\n"
- "mov x20, #0x28\n"
+ "ldr d10, [x13], #0x8\n"
+ "mov x19, #0x28\n"
"tbz x16, #0, 15f\n"
- "ld1 { v10.s }[2], [x14]\n"
+ "ld1 { v10.s }[2], [x13]\n"
"b 15f\n"
"10:" // Height 1: Partial accumulate: partial_1_8
- "mov x20, #0x20\n"
+ "mov x19, #0x20\n"
"tbz x16, #0, 15f\n"
- "ldr s10, [x14, #0x0]\n"
+ "ldr s10, [x13, #0x0]\n"
"b 15f\n"
"11:" // Height 1: Partial accumulate: partial_4_0
"tbz x16, #2, 13f\n"
- "ld1 { v8.4s }, [x14], #0x10\n"
+ "ld1 { v8.4s }, [x13], #0x10\n"
"tbz x16, #1, 12f\n"
- "ldr d9, [x14], #0x8\n"
- "mov x20, #0x18\n"
+ "mov x19, #0x18\n"
+ "ldr d9, [x13], #0x8\n"
"tbz x16, #0, 15f\n"
- "ld1 { v9.s }[2], [x14]\n"
+ "ld1 { v9.s }[2], [x13]\n"
"b 15f\n"
"12:" // Height 1: Partial accumulate: partial_1_4
- "mov x20, #0x10\n"
+ "mov x19, #0x10\n"
"tbz x16, #0, 15f\n"
- "ldr s9, [x14, #0x0]\n"
+ "ldr s9, [x13, #0x0]\n"
"b 15f\n"
"13:" // Height 1: Partial accumulate: partial_2_0
"tbz x16, #1, 14f\n"
- "ldr d8, [x14], #0x8\n"
- "mov x20, #0x8\n"
+ "ldr d8, [x13], #0x8\n"
+ "mov x19, #0x8\n"
"tbz x16, #0, 15f\n"
- "ld1 { v8.s }[2], [x14]\n"
+ "ld1 { v8.s }[2], [x13]\n"
"b 15f\n"
"14:" // Height 1: Partial accumulate: partial_1_0
- "ldr s8, [x14, #0x0]\n"
- "mov x20, #0x0\n"
+ "ldr s8, [x13, #0x0]\n"
+ "mov x19, #0x0\n"
"15:" // Height 1: Partial accumulate: Done
- "sub x14, x14, x20\n"
+ "sub x13, x13, x19\n"
"b 18f\n"
"16:" // Height 1: full accumulate
- "ldr q8, [x14, #0x0]\n"
- "ldr q9, [x14, #0x10]\n"
- "ldr q10, [x14, #0x20]\n"
- "ldr q11, [x14, #0x30]\n"
- "ldr q12, [x14, #0x40]\n"
- "ldr q13, [x14, #0x50]\n"
+ "ldr q8, [x13, #0x0]\n"
+ "ldr q9, [x13, #0x10]\n"
+ "ldr q10, [x13, #0x20]\n"
+ "ldr q11, [x13, #0x30]\n"
+ "ldr q12, [x13, #0x40]\n"
+ "ldr q13, [x13, #0x50]\n"
"b 18f\n"
"17:" // Height 1: no accumulate
"movi v8.16b, #0x0\n"
@@ -219,463 +219,463 @@ void a64_hybrid_fp32_mla_4x24_a55 (
"movi v12.16b, #0x0\n"
"movi v13.16b, #0x0\n"
"18:" // Height 1: setup done
- "mov x13, #0x0\n"
+ "mov x12, #0x0\n"
"19:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w12, [x20, x13, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w11, [x20, x12, LSL #0x2]\n"
"tbz %x[flags], #3, 20f\n"
- "ldr x21, [%x[input_ptr], x13, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x11, [x21, #0x0]\n"
- "cbnz x13, 21f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x11, x11, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x12, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x10, [x20, #0x0]\n"
+ "cbnz x12, 21f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x10, x10, x19, LSL #2\n"
"b 21f\n"
"20:" // Height 1: setup direct input
- "mov x11, %x[input_ptr]\n"
+ "mov x10, %x[input_ptr]\n"
"21:" // Height 1: input setup done
- "cmp x12, #0x4\n"
+ "cmp x11, #0x4\n"
"blt 24f\n"
- "ldr q0, [x11, #0x0]\n"
- "cmp x12, #0x8\n"
+ "ldr q0, [x10, #0x0]\n"
"ldr q4, [x15, #0x0]\n"
- "ldr q5, [x15, #0x10]\n"
- "ldr q6, [x15, #0x20]\n"
- "ldr q7, [x15, #0x30]\n"
+ "cmp x11, #0x8\n"
"blt 23f\n"
"22:" // Height 1: Multiply loop: Main loop head
"fmla v8.4s, v4.4s, v0.s[0]\n"
- "ldr d4, [x15, #0x40]\n"
- "ldr x10, [x15, #0x48]\n"
+ "ldr d5, [x15, #0x10]\n"
+ "ldr x9, [x15, #0x18]\n"
+ "add x10, x10, #0x10\n"
+ "ldr d6, [x15, #0x20]\n"
+ "sub x11, x11, #0x4\n"
+ "ldr x28, [x15, #0x28]\n"
+ "cmp x11, #0x8\n"
+ "mov v5.d[1], x9\n"
+ "ldr d7, [x15, #0x30]\n"
+ "ldr x27, [x15, #0x38]\n"
"fmla v9.4s, v5.4s, v0.s[0]\n"
- "ldr d5, [x15, #0x50]\n"
+ "mov v6.d[1], x28\n"
+ "ldr d4, [x15, #0x40]\n"
"fmla v10.4s, v6.4s, v0.s[0]\n"
- "ldr d6, [x15, #0x60]\n"
+ "mov v7.d[1], x27\n"
+ "ldr x26, [x15, #0x48]\n"
"fmla v11.4s, v7.4s, v0.s[0]\n"
- "ldr d7, [x15, #0x70]\n"
- "mov v4.d[1], x10\n"
+ "ldr d5, [x15, #0x50]\n"
"ldr x9, [x15, #0x58]\n"
- "mov v5.d[1], x9\n"
+ "mov v4.d[1], x26\n"
+ "ldr d6, [x15, #0x60]\n"
"ldr x28, [x15, #0x68]\n"
+ "fmla v12.4s, v4.4s, v0.s[0]\n"
+ "mov v5.d[1], x9\n"
+ "ldr d7, [x15, #0x70]\n"
+ "fmla v13.4s, v5.4s, v0.s[0]\n"
"mov v6.d[1], x28\n"
"ldr x27, [x15, #0x78]\n"
- "mov v7.d[1], x27\n"
- "fmla v12.4s, v4.4s, v0.s[0]\n"
+ "fmla v8.4s, v6.4s, v0.s[1]\n"
"ldr d4, [x15, #0x80]\n"
- "ldr x10, [x15, #0x88]\n"
- "fmla v13.4s, v5.4s, v0.s[0]\n"
+ "ldr x26, [x15, #0x88]\n"
+ "mov v7.d[1], x27\n"
"ldr d5, [x15, #0x90]\n"
- "fmla v8.4s, v6.4s, v0.s[1]\n"
- "ldr d6, [x15, #0xa0]\n"
- "fmla v9.4s, v7.4s, v0.s[1]\n"
- "ldr d7, [x15, #0xb0]\n"
- "mov v4.d[1], x10\n"
"ldr x9, [x15, #0x98]\n"
+ "fmla v9.4s, v7.4s, v0.s[1]\n"
+ "mov v4.d[1], x26\n"
+ "ldr d6, [x15, #0xa0]\n"
+ "fmla v10.4s, v4.4s, v0.s[1]\n"
"mov v5.d[1], x9\n"
"ldr x28, [x15, #0xa8]\n"
- "mov v6.d[1], x28\n"
+ "fmla v11.4s, v5.4s, v0.s[1]\n"
+ "ldr d7, [x15, #0xb0]\n"
"ldr x27, [x15, #0xb8]\n"
- "mov v7.d[1], x27\n"
- "fmla v10.4s, v4.4s, v0.s[1]\n"
+ "mov v6.d[1], x28\n"
"ldr d4, [x15, #0xc0]\n"
- "ldr x10, [x15, #0xc8]\n"
- "fmla v11.4s, v5.4s, v0.s[1]\n"
- "ldr d5, [x15, #0xd0]\n"
+ "ldr x26, [x15, #0xc8]\n"
"fmla v12.4s, v6.4s, v0.s[1]\n"
- "ldr d6, [x15, #0xe0]\n"
+ "mov v7.d[1], x27\n"
+ "ldr d5, [x15, #0xd0]\n"
"fmla v13.4s, v7.4s, v0.s[1]\n"
- "ldr d7, [x15, #0xf0]\n"
- "mov v4.d[1], x10\n"
+ "mov v4.d[1], x26\n"
"ldr x9, [x15, #0xd8]\n"
- "mov v5.d[1], x9\n"
+ "fmla v8.4s, v4.4s, v0.s[2]\n"
+ "ldr d6, [x15, #0xe0]\n"
"ldr x28, [x15, #0xe8]\n"
- "mov v6.d[1], x28\n"
+ "mov v5.d[1], x9\n"
+ "ldr d7, [x15, #0xf0]\n"
"ldr x27, [x15, #0xf8]\n"
- "mov v7.d[1], x27\n"
- "fmla v8.4s, v4.4s, v0.s[2]\n"
- "ldr d4, [x15, #0x100]\n"
- "ldr x10, [x15, #0x108]\n"
"fmla v9.4s, v5.4s, v0.s[2]\n"
- "ldr d5, [x15, #0x110]\n"
+ "mov v6.d[1], x28\n"
+ "ldr d4, [x15, #0x100]\n"
"fmla v10.4s, v6.4s, v0.s[2]\n"
- "ldr d6, [x15, #0x120]\n"
+ "mov v7.d[1], x27\n"
+ "ldr x26, [x15, #0x108]\n"
"fmla v11.4s, v7.4s, v0.s[2]\n"
- "ldr d7, [x15, #0x130]\n"
- "mov v4.d[1], x10\n"
+ "ldr d5, [x15, #0x110]\n"
"ldr x9, [x15, #0x118]\n"
- "mov v5.d[1], x9\n"
+ "mov v4.d[1], x26\n"
+ "ldr d6, [x15, #0x120]\n"
"ldr x28, [x15, #0x128]\n"
+ "fmla v12.4s, v4.4s, v0.s[2]\n"
+ "mov v5.d[1], x9\n"
+ "ldr d7, [x15, #0x130]\n"
+ "fmla v13.4s, v5.4s, v0.s[2]\n"
"mov v6.d[1], x28\n"
"ldr x27, [x15, #0x138]\n"
- "mov v7.d[1], x27\n"
- "fmla v12.4s, v4.4s, v0.s[2]\n"
+ "fmla v8.4s, v6.4s, v0.s[3]\n"
"ldr d4, [x15, #0x140]\n"
- "ldr x10, [x15, #0x148]\n"
- "fmla v13.4s, v5.4s, v0.s[2]\n"
+ "ldr x26, [x15, #0x148]\n"
+ "mov v7.d[1], x27\n"
"ldr d5, [x15, #0x150]\n"
- "fmla v8.4s, v6.4s, v0.s[3]\n"
- "ldr d6, [x15, #0x160]\n"
- "fmla v9.4s, v7.4s, v0.s[3]\n"
- "ldr d7, [x15, #0x170]\n"
- "mov v4.d[1], x10\n"
"ldr x9, [x15, #0x158]\n"
+ "fmla v9.4s, v7.4s, v0.s[3]\n"
+ "mov v4.d[1], x26\n"
+ "ldr d6, [x15, #0x160]\n"
+ "fmla v10.4s, v4.4s, v0.s[3]\n"
"mov v5.d[1], x9\n"
"ldr x28, [x15, #0x168]\n"
- "mov v6.d[1], x28\n"
+ "fmla v11.4s, v5.4s, v0.s[3]\n"
+ "ldr d7, [x15, #0x170]\n"
"ldr x27, [x15, #0x178]\n"
- "mov v7.d[1], x27\n"
- "add x11, x11, #0x10\n"
"add x15, x15, #0x180\n"
- "fmla v10.4s, v4.4s, v0.s[3]\n"
- "ldr d4, [x15, #0x0]\n"
- "ldr x10, [x15, #0x8]\n"
- "fmla v11.4s, v5.4s, v0.s[3]\n"
- "ldr d5, [x15, #0x10]\n"
- "fmla v12.4s, v6.4s, v0.s[3]\n"
- "ldr d6, [x15, #0x20]\n"
- "fmla v13.4s, v7.4s, v0.s[3]\n"
- "ldr d0, [x11, #0x0]\n"
- "sub x12, x12, #0x4\n"
- "ldr d7, [x15, #0x30]\n"
- "cmp x12, #0x8\n"
- "ldr x9, [x15, #0x18]\n"
- "mov v4.d[1], x10\n"
- "ldr x28, [x15, #0x28]\n"
- "mov v5.d[1], x9\n"
- "ldr x26, [x11, #0x8]\n"
"mov v6.d[1], x28\n"
- "ldr x27, [x15, #0x38]\n"
- "mov v0.d[1], x26\n"
+ "prfm pldl1keep, [x10, #0x80]\n"
+ "ldr x25, [x10, #0x8]\n"
+ "fmla v12.4s, v6.4s, v0.s[3]\n"
"mov v7.d[1], x27\n"
- "prfm pldl1keep, [x11, #0x80]\n"
+ "ldr d4, [x15, #0x0]\n"
+ "fmla v13.4s, v7.4s, v0.s[3]\n"
+ "ldr d0, [x10, #0x0]\n"
+ "ldr x26, [x15, #0x8]\n"
+ "mov v0.d[1], x25\n"
+ "mov v4.d[1], x26\n"
"bge 22b\n"
"23:" // Height 1: Multiply loop: Single iteration only
"fmla v8.4s, v4.4s, v0.s[0]\n"
- "ldr q4, [x15, #0x40]\n"
+ "ldr q5, [x15, #0x10]\n"
+ "ldr q6, [x15, #0x20]\n"
+ "sub x11, x11, #0x4\n"
+ "ldr q7, [x15, #0x30]\n"
+ "add x10, x10, #0x10\n"
"fmla v9.4s, v5.4s, v0.s[0]\n"
- "ldr q5, [x15, #0x50]\n"
+ "ldr q4, [x15, #0x40]\n"
"fmla v10.4s, v6.4s, v0.s[0]\n"
- "ldr q6, [x15, #0x60]\n"
+ "ldr q5, [x15, #0x50]\n"
"fmla v11.4s, v7.4s, v0.s[0]\n"
- "ldr q7, [x15, #0x70]\n"
+ "ldr q6, [x15, #0x60]\n"
"fmla v12.4s, v4.4s, v0.s[0]\n"
- "ldr q4, [x15, #0x80]\n"
+ "ldr q7, [x15, #0x70]\n"
"fmla v13.4s, v5.4s, v0.s[0]\n"
- "ldr q5, [x15, #0x90]\n"
+ "ldr q4, [x15, #0x80]\n"
"fmla v8.4s, v6.4s, v0.s[1]\n"
- "ldr q6, [x15, #0xa0]\n"
+ "ldr q5, [x15, #0x90]\n"
"fmla v9.4s, v7.4s, v0.s[1]\n"
- "ldr q7, [x15, #0xb0]\n"
+ "ldr q6, [x15, #0xa0]\n"
"fmla v10.4s, v4.4s, v0.s[1]\n"
- "ldr q4, [x15, #0xc0]\n"
+ "ldr q7, [x15, #0xb0]\n"
"fmla v11.4s, v5.4s, v0.s[1]\n"
- "ldr q5, [x15, #0xd0]\n"
+ "ldr q4, [x15, #0xc0]\n"
"fmla v12.4s, v6.4s, v0.s[1]\n"
- "ldr q6, [x15, #0xe0]\n"
+ "ldr q5, [x15, #0xd0]\n"
"fmla v13.4s, v7.4s, v0.s[1]\n"
- "ldr q7, [x15, #0xf0]\n"
+ "ldr q6, [x15, #0xe0]\n"
"fmla v8.4s, v4.4s, v0.s[2]\n"
- "ldr q4, [x15, #0x100]\n"
+ "ldr q7, [x15, #0xf0]\n"
"fmla v9.4s, v5.4s, v0.s[2]\n"
- "ldr q5, [x15, #0x110]\n"
+ "ldr q4, [x15, #0x100]\n"
"fmla v10.4s, v6.4s, v0.s[2]\n"
- "ldr q6, [x15, #0x120]\n"
+ "ldr q5, [x15, #0x110]\n"
"fmla v11.4s, v7.4s, v0.s[2]\n"
- "ldr q7, [x15, #0x130]\n"
+ "ldr q6, [x15, #0x120]\n"
"fmla v12.4s, v4.4s, v0.s[2]\n"
- "ldr q4, [x15, #0x140]\n"
+ "ldr q7, [x15, #0x130]\n"
"fmla v13.4s, v5.4s, v0.s[2]\n"
- "ldr q5, [x15, #0x150]\n"
+ "ldr q4, [x15, #0x140]\n"
"fmla v8.4s, v6.4s, v0.s[3]\n"
- "ldr q6, [x15, #0x160]\n"
+ "ldr q5, [x15, #0x150]\n"
"fmla v9.4s, v7.4s, v0.s[3]\n"
- "ldr q7, [x15, #0x170]\n"
- "add x11, x11, #0x10\n"
- "sub x12, x12, #0x4\n"
+ "ldr q6, [x15, #0x160]\n"
"fmla v10.4s, v4.4s, v0.s[3]\n"
- "prfm pldl1keep, [x11, #0x80]\n"
+ "ldr q7, [x15, #0x170]\n"
"fmla v11.4s, v5.4s, v0.s[3]\n"
- "add x15, x15, #0x180\n"
+ "prfm pldl1keep, [x10, #0x80]\n"
"fmla v12.4s, v6.4s, v0.s[3]\n"
+ "add x15, x15, #0x180\n"
"fmla v13.4s, v7.4s, v0.s[3]\n"
"24:" // Height 1: Multiply loop: Main loop skip
- "cbz x12, 26f\n"
+ "cbz x11, 26f\n"
"25:" // Height 1: Multiply loop: Odd block loop
- "ldr s0, [x11], #0x4\n"
- "sub x12, x12, #0x1\n"
+ "ldr s0, [x10], #0x4\n"
+ "sub x11, x11, #0x1\n"
"ldr q4, [x15, #0x0]\n"
- "fmla v8.4s, v4.4s, v0.s[0]\n"
"ldr q5, [x15, #0x10]\n"
- "fmla v9.4s, v5.4s, v0.s[0]\n"
"ldr q6, [x15, #0x20]\n"
- "fmla v10.4s, v6.4s, v0.s[0]\n"
+ "fmla v8.4s, v4.4s, v0.s[0]\n"
"ldr q7, [x15, #0x30]\n"
- "fmla v11.4s, v7.4s, v0.s[0]\n"
+ "fmla v9.4s, v5.4s, v0.s[0]\n"
"ldr q4, [x15, #0x40]\n"
- "fmla v12.4s, v4.4s, v0.s[0]\n"
+ "fmla v10.4s, v6.4s, v0.s[0]\n"
"ldr q5, [x15, #0x50]\n"
- "fmla v13.4s, v5.4s, v0.s[0]\n"
+ "fmla v11.4s, v7.4s, v0.s[0]\n"
"add x15, x15, #0x60\n"
- "cbnz x12, 25b\n"
+ "fmla v12.4s, v4.4s, v0.s[0]\n"
+ "fmla v13.4s, v5.4s, v0.s[0]\n"
+ "cbnz x11, 25b\n"
"26:" // Height 1: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x13, x13, #0x1\n"
- "cmp x13, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x12, x12, #0x1\n"
+ "cmp x12, x19\n"
"bne 19b\n"
- "prfm pstl1keep, [x14, #0x0]\n"
+ "prfm pstl1keep, [x13, #0x0]\n"
"tbz %x[flags], #1, 27f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v0.4s }, [x20]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v1.4s }, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v0.4s }, [x19]\n"
"fmin v8.4s, v8.4s, v0.4s\n"
"fmin v9.4s, v9.4s, v0.4s\n"
"fmin v10.4s, v10.4s, v0.4s\n"
"fmin v11.4s, v11.4s, v0.4s\n"
"fmin v12.4s, v12.4s, v0.4s\n"
"fmin v13.4s, v13.4s, v0.4s\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1r { v0.4s }, [x20]\n"
- "fmax v8.4s, v8.4s, v0.4s\n"
- "fmax v9.4s, v9.4s, v0.4s\n"
- "fmax v10.4s, v10.4s, v0.4s\n"
- "fmax v11.4s, v11.4s, v0.4s\n"
- "fmax v12.4s, v12.4s, v0.4s\n"
- "fmax v13.4s, v13.4s, v0.4s\n"
+ "fmax v8.4s, v8.4s, v1.4s\n"
+ "fmax v9.4s, v9.4s, v1.4s\n"
+ "fmax v10.4s, v10.4s, v1.4s\n"
+ "fmax v11.4s, v11.4s, v1.4s\n"
+ "fmax v12.4s, v12.4s, v1.4s\n"
+ "fmax v13.4s, v13.4s, v1.4s\n"
"27:" // Height 1: No activation
"cmp x16, #0x18\n"
"bge 40f\n"
"tbz x16, #4, 31f\n"
- "st1 { v8.4s }, [x14], #0x10\n"
- "st1 { v9.4s }, [x14], #0x10\n"
- "st1 { v10.4s }, [x14], #0x10\n"
- "st1 { v11.4s }, [x14], #0x10\n"
+ "st1 { v8.4s }, [x13], #0x10\n"
+ "st1 { v9.4s }, [x13], #0x10\n"
+ "st1 { v10.4s }, [x13], #0x10\n"
+ "st1 { v11.4s }, [x13], #0x10\n"
"tbz x16, #2, 29f\n"
- "st1 { v12.4s }, [x14], #0x10\n"
+ "st1 { v12.4s }, [x13], #0x10\n"
"tbz x16, #1, 28f\n"
- "str d13, [x14], #0x8\n"
+ "str d13, [x13], #0x8\n"
"tbz x16, #0, 39f\n"
- "st1 { v13.s }[2], [x14]\n"
+ "st1 { v13.s }[2], [x13]\n"
"b 39f\n"
"28:" // Height 1: Partial direct writeback: partial_1_20
"tbz x16, #0, 39f\n"
- "str s13, [x14, #0x0]\n"
+ "str s13, [x13, #0x0]\n"
"b 39f\n"
"29:" // Height 1: Partial direct writeback: partial_2_16
"tbz x16, #1, 30f\n"
- "str d12, [x14], #0x8\n"
+ "str d12, [x13], #0x8\n"
"tbz x16, #0, 39f\n"
- "st1 { v12.s }[2], [x14]\n"
+ "st1 { v12.s }[2], [x13]\n"
"b 39f\n"
"30:" // Height 1: Partial direct writeback: partial_1_16
"tbz x16, #0, 39f\n"
- "str s12, [x14, #0x0]\n"
+ "str s12, [x13, #0x0]\n"
"b 39f\n"
"31:" // Height 1: Partial direct writeback: partial_8_0
"tbz x16, #3, 35f\n"
- "st1 { v8.4s }, [x14], #0x10\n"
- "st1 { v9.4s }, [x14], #0x10\n"
+ "st1 { v8.4s }, [x13], #0x10\n"
+ "st1 { v9.4s }, [x13], #0x10\n"
"tbz x16, #2, 33f\n"
- "st1 { v10.4s }, [x14], #0x10\n"
+ "st1 { v10.4s }, [x13], #0x10\n"
"tbz x16, #1, 32f\n"
- "str d11, [x14], #0x8\n"
+ "str d11, [x13], #0x8\n"
"tbz x16, #0, 39f\n"
- "st1 { v11.s }[2], [x14]\n"
+ "st1 { v11.s }[2], [x13]\n"
"b 39f\n"
"32:" // Height 1: Partial direct writeback: partial_1_12
"tbz x16, #0, 39f\n"
- "str s11, [x14, #0x0]\n"
+ "str s11, [x13, #0x0]\n"
"b 39f\n"
"33:" // Height 1: Partial direct writeback: partial_2_8
"tbz x16, #1, 34f\n"
- "str d10, [x14], #0x8\n"
+ "str d10, [x13], #0x8\n"
"tbz x16, #0, 39f\n"
- "st1 { v10.s }[2], [x14]\n"
+ "st1 { v10.s }[2], [x13]\n"
"b 39f\n"
"34:" // Height 1: Partial direct writeback: partial_1_8
"tbz x16, #0, 39f\n"
- "str s10, [x14, #0x0]\n"
+ "str s10, [x13, #0x0]\n"
"b 39f\n"
"35:" // Height 1: Partial direct writeback: partial_4_0
"tbz x16, #2, 37f\n"
- "st1 { v8.4s }, [x14], #0x10\n"
+ "st1 { v8.4s }, [x13], #0x10\n"
"tbz x16, #1, 36f\n"
- "str d9, [x14], #0x8\n"
+ "str d9, [x13], #0x8\n"
"tbz x16, #0, 39f\n"
- "st1 { v9.s }[2], [x14]\n"
+ "st1 { v9.s }[2], [x13]\n"
"b 39f\n"
"36:" // Height 1: Partial direct writeback: partial_1_4
"tbz x16, #0, 39f\n"
- "str s9, [x14, #0x0]\n"
+ "str s9, [x13, #0x0]\n"
"b 39f\n"
"37:" // Height 1: Partial direct writeback: partial_2_0
"tbz x16, #1, 38f\n"
- "str d8, [x14], #0x8\n"
+ "str d8, [x13], #0x8\n"
"tbz x16, #0, 39f\n"
- "st1 { v8.s }[2], [x14]\n"
+ "st1 { v8.s }[2], [x13]\n"
"b 39f\n"
"38:" // Height 1: Partial direct writeback: partial_1_0
- "str s8, [x14, #0x0]\n"
+ "str s8, [x13, #0x0]\n"
"39:" // Height 1: Partial direct writeback: Done
"b 41f\n"
"40:" // Height 1: Full writeback
- "str q8, [x14, #0x0]\n"
- "str q9, [x14, #0x10]\n"
- "str q10, [x14, #0x20]\n"
- "str q11, [x14, #0x30]\n"
- "str q12, [x14, #0x40]\n"
- "str q13, [x14, #0x50]\n"
- "add x14, x14, #0x60\n"
+ "str q8, [x13, #0x0]\n"
+ "str q9, [x13, #0x10]\n"
+ "str q10, [x13, #0x20]\n"
+ "str q11, [x13, #0x30]\n"
+ "str q12, [x13, #0x40]\n"
+ "str q13, [x13, #0x50]\n"
+ "add x13, x13, #0x60\n"
"41:" // Height 1: Writeback done
"subs x16, x16, #0x18\n"
"bgt 2b\n"
"b 166f\n"
"42:" // Height 2
- "mov x17, %x[bias]\n"
"ldr x16, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x14, %x[bias]\n"
"ldr x15, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x14, %x[output_ptr]\n"
+ "mov x13, %x[output_ptr]\n"
"43:" // Height 2: Column loop
- "cbz x17, 44f\n"
- "ldr q8, [x17, #0x0]\n"
+ "cbz x14, 44f\n"
+ "ldr q8, [x14, #0x0]\n"
+ "ldr q9, [x14, #0x10]\n"
+ "ldr q10, [x14, #0x20]\n"
"mov v14.16b, v8.16b\n"
- "ldr q9, [x17, #0x10]\n"
+ "ldr q11, [x14, #0x30]\n"
"mov v15.16b, v9.16b\n"
- "ldr q10, [x17, #0x20]\n"
+ "ldr q12, [x14, #0x40]\n"
"mov v16.16b, v10.16b\n"
- "ldr q11, [x17, #0x30]\n"
+ "ldr q13, [x14, #0x50]\n"
"mov v17.16b, v11.16b\n"
- "ldr q12, [x17, #0x40]\n"
+ "add x14, x14, #0x60\n"
"mov v18.16b, v12.16b\n"
- "ldr q13, [x17, #0x50]\n"
"mov v19.16b, v13.16b\n"
- "add x17, x17, #0x60\n"
"b 59f\n"
"44:" // Height 2: no bias
"tbz %x[flags], #0, 58f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"cmp x16, #0x18\n"
- "add x23, x14, x20, LSL #2\n"
+ "add x23, x13, x19, LSL #2\n"
"bge 57f\n"
"tbz x16, #4, 48f\n"
- "ld1 { v8.4s }, [x14], #0x10\n"
+ "ld1 { v8.4s }, [x13], #0x10\n"
"ld1 { v14.4s }, [x23], #0x10\n"
- "ld1 { v9.4s }, [x14], #0x10\n"
+ "ld1 { v9.4s }, [x13], #0x10\n"
"ld1 { v15.4s }, [x23], #0x10\n"
- "ld1 { v10.4s }, [x14], #0x10\n"
+ "ld1 { v10.4s }, [x13], #0x10\n"
"ld1 { v16.4s }, [x23], #0x10\n"
- "ld1 { v11.4s }, [x14], #0x10\n"
+ "ld1 { v11.4s }, [x13], #0x10\n"
"ld1 { v17.4s }, [x23], #0x10\n"
"tbz x16, #2, 46f\n"
- "ld1 { v12.4s }, [x14], #0x10\n"
+ "ld1 { v12.4s }, [x13], #0x10\n"
"ld1 { v18.4s }, [x23], #0x10\n"
"tbz x16, #1, 45f\n"
- "ldr d13, [x14], #0x8\n"
- "mov x20, #0x58\n"
+ "mov x19, #0x58\n"
+ "ldr d13, [x13], #0x8\n"
"ldr d19, [x23], #0x8\n"
"tbz x16, #0, 56f\n"
- "ld1 { v13.s }[2], [x14]\n"
+ "ld1 { v13.s }[2], [x13]\n"
"ld1 { v19.s }[2], [x23]\n"
"b 56f\n"
"45:" // Height 2: Partial accumulate: partial_1_20
- "mov x20, #0x50\n"
+ "mov x19, #0x50\n"
"tbz x16, #0, 56f\n"
- "ldr s13, [x14, #0x0]\n"
+ "ldr s13, [x13, #0x0]\n"
"ldr s19, [x23, #0x0]\n"
"b 56f\n"
"46:" // Height 2: Partial accumulate: partial_2_16
"tbz x16, #1, 47f\n"
- "ldr d12, [x14], #0x8\n"
- "mov x20, #0x48\n"
+ "ldr d12, [x13], #0x8\n"
"ldr d18, [x23], #0x8\n"
+ "mov x19, #0x48\n"
"tbz x16, #0, 56f\n"
- "ld1 { v12.s }[2], [x14]\n"
+ "ld1 { v12.s }[2], [x13]\n"
"ld1 { v18.s }[2], [x23]\n"
"b 56f\n"
"47:" // Height 2: Partial accumulate: partial_1_16
- "mov x20, #0x40\n"
+ "mov x19, #0x40\n"
"tbz x16, #0, 56f\n"
- "ldr s12, [x14, #0x0]\n"
+ "ldr s12, [x13, #0x0]\n"
"ldr s18, [x23, #0x0]\n"
"b 56f\n"
"48:" // Height 2: Partial accumulate: partial_8_0
"tbz x16, #3, 52f\n"
- "ld1 { v8.4s }, [x14], #0x10\n"
+ "ld1 { v8.4s }, [x13], #0x10\n"
"ld1 { v14.4s }, [x23], #0x10\n"
- "ld1 { v9.4s }, [x14], #0x10\n"
+ "ld1 { v9.4s }, [x13], #0x10\n"
"ld1 { v15.4s }, [x23], #0x10\n"
"tbz x16, #2, 50f\n"
- "ld1 { v10.4s }, [x14], #0x10\n"
+ "ld1 { v10.4s }, [x13], #0x10\n"
"ld1 { v16.4s }, [x23], #0x10\n"
"tbz x16, #1, 49f\n"
- "ldr d11, [x14], #0x8\n"
- "mov x20, #0x38\n"
+ "mov x19, #0x38\n"
+ "ldr d11, [x13], #0x8\n"
"ldr d17, [x23], #0x8\n"
"tbz x16, #0, 56f\n"
- "ld1 { v11.s }[2], [x14]\n"
+ "ld1 { v11.s }[2], [x13]\n"
"ld1 { v17.s }[2], [x23]\n"
"b 56f\n"
"49:" // Height 2: Partial accumulate: partial_1_12
- "mov x20, #0x30\n"
+ "mov x19, #0x30\n"
"tbz x16, #0, 56f\n"
- "ldr s11, [x14, #0x0]\n"
+ "ldr s11, [x13, #0x0]\n"
"ldr s17, [x23, #0x0]\n"
"b 56f\n"
"50:" // Height 2: Partial accumulate: partial_2_8
"tbz x16, #1, 51f\n"
- "ldr d10, [x14], #0x8\n"
- "mov x20, #0x28\n"
+ "ldr d10, [x13], #0x8\n"
"ldr d16, [x23], #0x8\n"
+ "mov x19, #0x28\n"
"tbz x16, #0, 56f\n"
- "ld1 { v10.s }[2], [x14]\n"
+ "ld1 { v10.s }[2], [x13]\n"
"ld1 { v16.s }[2], [x23]\n"
"b 56f\n"
"51:" // Height 2: Partial accumulate: partial_1_8
- "mov x20, #0x20\n"
+ "mov x19, #0x20\n"
"tbz x16, #0, 56f\n"
- "ldr s10, [x14, #0x0]\n"
+ "ldr s10, [x13, #0x0]\n"
"ldr s16, [x23, #0x0]\n"
"b 56f\n"
"52:" // Height 2: Partial accumulate: partial_4_0
"tbz x16, #2, 54f\n"
- "ld1 { v8.4s }, [x14], #0x10\n"
+ "ld1 { v8.4s }, [x13], #0x10\n"
"ld1 { v14.4s }, [x23], #0x10\n"
"tbz x16, #1, 53f\n"
- "ldr d9, [x14], #0x8\n"
- "mov x20, #0x18\n"
+ "mov x19, #0x18\n"
+ "ldr d9, [x13], #0x8\n"
"ldr d15, [x23], #0x8\n"
"tbz x16, #0, 56f\n"
- "ld1 { v9.s }[2], [x14]\n"
+ "ld1 { v9.s }[2], [x13]\n"
"ld1 { v15.s }[2], [x23]\n"
"b 56f\n"
"53:" // Height 2: Partial accumulate: partial_1_4
- "mov x20, #0x10\n"
+ "mov x19, #0x10\n"
"tbz x16, #0, 56f\n"
- "ldr s9, [x14, #0x0]\n"
+ "ldr s9, [x13, #0x0]\n"
"ldr s15, [x23, #0x0]\n"
"b 56f\n"
"54:" // Height 2: Partial accumulate: partial_2_0
"tbz x16, #1, 55f\n"
- "ldr d8, [x14], #0x8\n"
- "mov x20, #0x8\n"
+ "ldr d8, [x13], #0x8\n"
"ldr d14, [x23], #0x8\n"
+ "mov x19, #0x8\n"
"tbz x16, #0, 56f\n"
- "ld1 { v8.s }[2], [x14]\n"
+ "ld1 { v8.s }[2], [x13]\n"
"ld1 { v14.s }[2], [x23]\n"
"b 56f\n"
"55:" // Height 2: Partial accumulate: partial_1_0
- "ldr s8, [x14, #0x0]\n"
- "mov x20, #0x0\n"
+ "ldr s8, [x13, #0x0]\n"
+ "mov x19, #0x0\n"
"ldr s14, [x23, #0x0]\n"
"56:" // Height 2: Partial accumulate: Done
- "sub x14, x14, x20\n"
+ "sub x13, x13, x19\n"
"b 59f\n"
"57:" // Height 2: full accumulate
- "ldr q8, [x14, #0x0]\n"
- "ldr q9, [x14, #0x10]\n"
- "ldr q10, [x14, #0x20]\n"
- "ldr q11, [x14, #0x30]\n"
- "ldr q12, [x14, #0x40]\n"
- "ldr q13, [x14, #0x50]\n"
+ "ldr q8, [x13, #0x0]\n"
+ "ldr q9, [x13, #0x10]\n"
+ "ldr q10, [x13, #0x20]\n"
+ "ldr q11, [x13, #0x30]\n"
+ "ldr q12, [x13, #0x40]\n"
+ "ldr q13, [x13, #0x50]\n"
"ldr q14, [x23, #0x0]\n"
"ldr q15, [x23, #0x10]\n"
"ldr q16, [x23, #0x20]\n"
@@ -697,192 +697,192 @@ void a64_hybrid_fp32_mla_4x24_a55 (
"movi v18.16b, #0x0\n"
"movi v19.16b, #0x0\n"
"59:" // Height 2: setup done
- "mov x13, #0x0\n"
+ "mov x12, #0x0\n"
"60:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w12, [x20, x13, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w11, [x20, x12, LSL #0x2]\n"
"tbz %x[flags], #3, 61f\n"
- "ldr x21, [%x[input_ptr], x13, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x11, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "cbnz x13, 62f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x11, x11, x20, LSL #2\n"
- "add x25, x25, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x12, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x10, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "cbnz x12, 62f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x10, x10, x19, LSL #2\n"
+ "add x24, x24, x19, LSL #2\n"
"b 62f\n"
"61:" // Height 2: setup direct input
- "mov x11, %x[input_ptr]\n"
- "add x25, x11, x20, LSL #2\n"
+ "mov x10, %x[input_ptr]\n"
+ "add x24, x10, x19, LSL #2\n"
"62:" // Height 2: input setup done
- "cmp x12, #0x4\n"
+ "cmp x11, #0x4\n"
"blt 65f\n"
- "ldr q0, [x11, #0x0]\n"
- "cmp x12, #0x8\n"
- "ldr q1, [x25, #0x0]\n"
+ "ldr q0, [x10, #0x0]\n"
+ "ldr q1, [x24, #0x0]\n"
+ "cmp x11, #0x8\n"
"ldr q4, [x15, #0x0]\n"
- "ldr q5, [x15, #0x10]\n"
- "ldr q6, [x15, #0x20]\n"
- "ldr q7, [x15, #0x30]\n"
"blt 64f\n"
"63:" // Height 2: Multiply loop: Main loop head
"fmla v8.4s, v4.4s, v0.s[0]\n"
- "ldr x10, [x15, #0x48]\n"
+ "ldr d5, [x15, #0x10]\n"
"fmla v14.4s, v4.4s, v1.s[0]\n"
- "ldr d4, [x15, #0x40]\n"
+ "ldr x9, [x15, #0x18]\n"
+ "ldr d6, [x15, #0x20]\n"
+ "add x10, x10, #0x10\n"
+ "ldr x28, [x15, #0x28]\n"
+ "add x24, x24, #0x10\n"
+ "mov v5.d[1], x9\n"
+ "ldr d7, [x15, #0x30]\n"
+ "ldr x27, [x15, #0x38]\n"
+ "sub x11, x11, #0x4\n"
"fmla v9.4s, v5.4s, v0.s[0]\n"
- "ldr x9, [x15, #0x58]\n"
+ "mov v6.d[1], x28\n"
"fmla v15.4s, v5.4s, v1.s[0]\n"
- "ldr d5, [x15, #0x50]\n"
+ "ldr d4, [x15, #0x40]\n"
"fmla v10.4s, v6.4s, v0.s[0]\n"
- "ldr x28, [x15, #0x68]\n"
+ "mov v7.d[1], x27\n"
"fmla v16.4s, v6.4s, v1.s[0]\n"
- "ldr d6, [x15, #0x60]\n"
+ "ldr x26, [x15, #0x48]\n"
"fmla v11.4s, v7.4s, v0.s[0]\n"
- "ldr x27, [x15, #0x78]\n"
+ "prfm pldl1keep, [x10, #0x80]\n"
"fmla v17.4s, v7.4s, v1.s[0]\n"
- "ldr d7, [x15, #0x70]\n"
- "mov v4.d[1], x10\n"
+ "ldr d5, [x15, #0x50]\n"
+ "mov v4.d[1], x26\n"
+ "ldr x9, [x15, #0x58]\n"
+ "ldr d6, [x15, #0x60]\n"
+ "cmp x11, #0x8\n"
"fmla v12.4s, v4.4s, v0.s[0]\n"
- "mov v5.d[1], x9\n"
+ "ldr x28, [x15, #0x68]\n"
"fmla v18.4s, v4.4s, v1.s[0]\n"
- "ldr d4, [x15, #0x80]\n"
- "mov v6.d[1], x28\n"
- "mov v7.d[1], x27\n"
- "ldr x10, [x15, #0x88]\n"
+ "mov v5.d[1], x9\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
"fmla v13.4s, v5.4s, v0.s[0]\n"
- "ldr x9, [x15, #0x98]\n"
+ "mov v6.d[1], x28\n"
"fmla v19.4s, v5.4s, v1.s[0]\n"
- "ldr d5, [x15, #0x90]\n"
+ "ldr d7, [x15, #0x70]\n"
"fmla v8.4s, v6.4s, v0.s[1]\n"
- "ldr x28, [x15, #0xa8]\n"
+ "ldr x27, [x15, #0x78]\n"
"fmla v14.4s, v6.4s, v1.s[1]\n"
- "ldr d6, [x15, #0xa0]\n"
+ "ldr d4, [x15, #0x80]\n"
+ "ldr x26, [x15, #0x88]\n"
+ "mov v7.d[1], x27\n"
+ "ldr d5, [x15, #0x90]\n"
+ "ldr x9, [x15, #0x98]\n"
"fmla v9.4s, v7.4s, v0.s[1]\n"
- "ldr x27, [x15, #0xb8]\n"
+ "mov v4.d[1], x26\n"
"fmla v15.4s, v7.4s, v1.s[1]\n"
- "ldr d7, [x15, #0xb0]\n"
- "mov v4.d[1], x10\n"
+ "ldr d6, [x15, #0xa0]\n"
"fmla v10.4s, v4.4s, v0.s[1]\n"
"mov v5.d[1], x9\n"
"fmla v16.4s, v4.4s, v1.s[1]\n"
- "ldr d4, [x15, #0xc0]\n"
- "mov v6.d[1], x28\n"
- "mov v7.d[1], x27\n"
- "ldr x10, [x15, #0xc8]\n"
+ "ldr x28, [x15, #0xa8]\n"
"fmla v11.4s, v5.4s, v0.s[1]\n"
- "ldr x9, [x15, #0xd8]\n"
+ "ldr d7, [x15, #0xb0]\n"
"fmla v17.4s, v5.4s, v1.s[1]\n"
- "ldr d5, [x15, #0xd0]\n"
+ "ldr x27, [x15, #0xb8]\n"
+ "mov v6.d[1], x28\n"
+ "ldr d4, [x15, #0xc0]\n"
+ "ldr x26, [x15, #0xc8]\n"
"fmla v12.4s, v6.4s, v0.s[1]\n"
- "ldr x28, [x15, #0xe8]\n"
+ "mov v7.d[1], x27\n"
"fmla v18.4s, v6.4s, v1.s[1]\n"
- "ldr d6, [x15, #0xe0]\n"
+ "ldr d5, [x15, #0xd0]\n"
"fmla v13.4s, v7.4s, v0.s[1]\n"
- "ldr x27, [x15, #0xf8]\n"
+ "mov v4.d[1], x26\n"
"fmla v19.4s, v7.4s, v1.s[1]\n"
- "ldr d7, [x15, #0xf0]\n"
- "mov v4.d[1], x10\n"
+ "ldr x9, [x15, #0xd8]\n"
"fmla v8.4s, v4.4s, v0.s[2]\n"
- "mov v5.d[1], x9\n"
+ "ldr d6, [x15, #0xe0]\n"
"fmla v14.4s, v4.4s, v1.s[2]\n"
- "ldr d4, [x15, #0x100]\n"
- "mov v6.d[1], x28\n"
- "mov v7.d[1], x27\n"
- "ldr x10, [x15, #0x108]\n"
+ "ldr x28, [x15, #0xe8]\n"
+ "mov v5.d[1], x9\n"
+ "ldr d7, [x15, #0xf0]\n"
+ "ldr x27, [x15, #0xf8]\n"
"fmla v9.4s, v5.4s, v0.s[2]\n"
- "ldr x9, [x15, #0x118]\n"
+ "mov v6.d[1], x28\n"
"fmla v15.4s, v5.4s, v1.s[2]\n"
- "ldr d5, [x15, #0x110]\n"
+ "ldr d4, [x15, #0x100]\n"
"fmla v10.4s, v6.4s, v0.s[2]\n"
- "ldr x28, [x15, #0x128]\n"
+ "mov v7.d[1], x27\n"
"fmla v16.4s, v6.4s, v1.s[2]\n"
- "ldr d6, [x15, #0x120]\n"
+ "ldr x26, [x15, #0x108]\n"
"fmla v11.4s, v7.4s, v0.s[2]\n"
- "ldr x27, [x15, #0x138]\n"
+ "ldr d5, [x15, #0x110]\n"
"fmla v17.4s, v7.4s, v1.s[2]\n"
- "ldr d7, [x15, #0x130]\n"
- "mov v4.d[1], x10\n"
+ "ldr x9, [x15, #0x118]\n"
+ "mov v4.d[1], x26\n"
+ "ldr d6, [x15, #0x120]\n"
+ "ldr x28, [x15, #0x128]\n"
"fmla v12.4s, v4.4s, v0.s[2]\n"
"mov v5.d[1], x9\n"
"fmla v18.4s, v4.4s, v1.s[2]\n"
- "ldr d4, [x15, #0x140]\n"
- "mov v6.d[1], x28\n"
- "mov v7.d[1], x27\n"
- "ldr x10, [x15, #0x148]\n"
+ "ldr d7, [x15, #0x130]\n"
"fmla v13.4s, v5.4s, v0.s[2]\n"
- "ldr x9, [x15, #0x158]\n"
+ "mov v6.d[1], x28\n"
"fmla v19.4s, v5.4s, v1.s[2]\n"
- "ldr d5, [x15, #0x150]\n"
+ "ldr x27, [x15, #0x138]\n"
"fmla v8.4s, v6.4s, v0.s[3]\n"
- "ldr x28, [x15, #0x168]\n"
+ "ldr d4, [x15, #0x140]\n"
"fmla v14.4s, v6.4s, v1.s[3]\n"
- "ldr d6, [x15, #0x160]\n"
+ "ldr x26, [x15, #0x148]\n"
+ "mov v7.d[1], x27\n"
+ "ldr d5, [x15, #0x150]\n"
+ "ldr x9, [x15, #0x158]\n"
"fmla v9.4s, v7.4s, v0.s[3]\n"
- "ldr x27, [x15, #0x178]\n"
+ "mov v4.d[1], x26\n"
"fmla v15.4s, v7.4s, v1.s[3]\n"
- "ldr d7, [x15, #0x170]\n"
- "mov v4.d[1], x10\n"
- "add x11, x11, #0x10\n"
- "mov v5.d[1], x9\n"
- "add x25, x25, #0x10\n"
- "mov v6.d[1], x28\n"
- "add x15, x15, #0x180\n"
- "mov v7.d[1], x27\n"
+ "ldr d6, [x15, #0x160]\n"
"fmla v10.4s, v4.4s, v0.s[3]\n"
+ "mov v5.d[1], x9\n"
"fmla v16.4s, v4.4s, v1.s[3]\n"
- "ldr d4, [x15, #0x0]\n"
- "ldr x10, [x15, #0x8]\n"
+ "ldr x28, [x15, #0x168]\n"
"fmla v11.4s, v5.4s, v0.s[3]\n"
+ "ldr d7, [x15, #0x170]\n"
"fmla v17.4s, v5.4s, v1.s[3]\n"
- "ldr d5, [x15, #0x10]\n"
- "ldr x9, [x15, #0x18]\n"
+ "ldr x27, [x15, #0x178]\n"
+ "mov v6.d[1], x28\n"
+ "ldr x25, [x10, #0x8]\n"
+ "ldr x23, [x24, #0x8]\n"
+ "add x15, x15, #0x180\n"
"fmla v12.4s, v6.4s, v0.s[3]\n"
+ "mov v7.d[1], x27\n"
"fmla v18.4s, v6.4s, v1.s[3]\n"
- "ldr d6, [x15, #0x20]\n"
- "ldr x28, [x15, #0x28]\n"
+ "ldr d4, [x15, #0x0]\n"
"fmla v13.4s, v7.4s, v0.s[3]\n"
- "ldr d0, [x11, #0x0]\n"
+ "ldr x26, [x15, #0x8]\n"
"fmla v19.4s, v7.4s, v1.s[3]\n"
- "ldr d1, [x25, #0x0]\n"
- "sub x12, x12, #0x4\n"
- "ldr d7, [x15, #0x30]\n"
- "cmp x12, #0x8\n"
- "ldr x26, [x11, #0x8]\n"
- "mov v4.d[1], x10\n"
- "ldr x24, [x25, #0x8]\n"
- "mov v5.d[1], x9\n"
- "ldr x27, [x15, #0x38]\n"
- "mov v6.d[1], x28\n"
- "prfm pldl1keep, [x11, #0x80]\n"
- "mov v0.d[1], x26\n"
- "prfm pldl1keep, [x25, #0x80]\n"
- "mov v1.d[1], x24\n"
- "mov v7.d[1], x27\n"
+ "ldr d0, [x10, #0x0]\n"
+ "ldr d1, [x24, #0x0]\n"
+ "mov v4.d[1], x26\n"
+ "mov v0.d[1], x25\n"
+ "mov v1.d[1], x23\n"
"bge 63b\n"
"64:" // Height 2: Multiply loop: Single iteration only
"fmla v8.4s, v4.4s, v0.s[0]\n"
- "add x11, x11, #0x10\n"
+ "ldr q5, [x15, #0x10]\n"
"fmla v14.4s, v4.4s, v1.s[0]\n"
- "ldr q4, [x15, #0x40]\n"
+ "ldr q6, [x15, #0x20]\n"
+ "ldr q7, [x15, #0x30]\n"
+ "sub x11, x11, #0x4\n"
"fmla v9.4s, v5.4s, v0.s[0]\n"
- "add x25, x25, #0x10\n"
+ "ldr q4, [x15, #0x40]\n"
"fmla v15.4s, v5.4s, v1.s[0]\n"
"ldr q5, [x15, #0x50]\n"
"fmla v10.4s, v6.4s, v0.s[0]\n"
- "sub x12, x12, #0x4\n"
+ "add x10, x10, #0x10\n"
"fmla v16.4s, v6.4s, v1.s[0]\n"
- "ldr q6, [x15, #0x60]\n"
+ "prfm pldl1keep, [x10, #0x80]\n"
"fmla v11.4s, v7.4s, v0.s[0]\n"
- "prfm pldl1keep, [x11, #0x80]\n"
+ "ldr q6, [x15, #0x60]\n"
"fmla v17.4s, v7.4s, v1.s[0]\n"
"ldr q7, [x15, #0x70]\n"
"fmla v12.4s, v4.4s, v0.s[0]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
+ "add x24, x24, #0x10\n"
"fmla v18.4s, v4.4s, v1.s[0]\n"
- "ldr q4, [x15, #0x80]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
"fmla v13.4s, v5.4s, v0.s[0]\n"
+ "ldr q4, [x15, #0x80]\n"
"fmla v19.4s, v5.4s, v1.s[0]\n"
"ldr q5, [x15, #0x90]\n"
"fmla v8.4s, v6.4s, v0.s[1]\n"
@@ -937,43 +937,45 @@ void a64_hybrid_fp32_mla_4x24_a55 (
"fmla v13.4s, v7.4s, v0.s[3]\n"
"fmla v19.4s, v7.4s, v1.s[3]\n"
"65:" // Height 2: Multiply loop: Main loop skip
- "cbz x12, 67f\n"
+ "cbz x11, 67f\n"
"66:" // Height 2: Multiply loop: Odd block loop
- "ldr s0, [x11], #0x4\n"
- "sub x12, x12, #0x1\n"
- "ldr s1, [x25], #0x4\n"
+ "ldr s0, [x10], #0x4\n"
+ "sub x11, x11, #0x1\n"
+ "ldr s1, [x24], #0x4\n"
"ldr q4, [x15, #0x0]\n"
- "fmla v8.4s, v4.4s, v0.s[0]\n"
"ldr q5, [x15, #0x10]\n"
- "fmla v14.4s, v4.4s, v1.s[0]\n"
"ldr q6, [x15, #0x20]\n"
- "fmla v9.4s, v5.4s, v0.s[0]\n"
+ "fmla v8.4s, v4.4s, v0.s[0]\n"
"ldr q7, [x15, #0x30]\n"
- "fmla v15.4s, v5.4s, v1.s[0]\n"
+ "fmla v14.4s, v4.4s, v1.s[0]\n"
"ldr q4, [x15, #0x40]\n"
- "fmla v10.4s, v6.4s, v0.s[0]\n"
+ "fmla v9.4s, v5.4s, v0.s[0]\n"
+ "fmla v15.4s, v5.4s, v1.s[0]\n"
"ldr q5, [x15, #0x50]\n"
+ "fmla v10.4s, v6.4s, v0.s[0]\n"
+ "add x15, x15, #0x60\n"
"fmla v16.4s, v6.4s, v1.s[0]\n"
"fmla v11.4s, v7.4s, v0.s[0]\n"
- "add x15, x15, #0x60\n"
"fmla v17.4s, v7.4s, v1.s[0]\n"
"fmla v12.4s, v4.4s, v0.s[0]\n"
"fmla v18.4s, v4.4s, v1.s[0]\n"
"fmla v13.4s, v5.4s, v0.s[0]\n"
"fmla v19.4s, v5.4s, v1.s[0]\n"
- "cbnz x12, 66b\n"
+ "cbnz x11, 66b\n"
"67:" // Height 2: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x13, x13, #0x1\n"
- "cmp x13, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x12, x12, #0x1\n"
+ "cmp x12, x19\n"
"bne 60b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x14, x20, LSL #2\n"
- "prfm pstl1keep, [x14, #0x0]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "prfm pstl1keep, [x13, #0x0]\n"
+ "add x23, x13, x19, LSL #2\n"
"prfm pstl1keep, [x23, #0x0]\n"
"tbz %x[flags], #1, 68f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v0.4s }, [x20]\n"
+ "add x20, %x[args_ptr], %[offset_min]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v1.4s }, [x20]\n"
+ "ld1r { v0.4s }, [x19]\n"
"fmin v8.4s, v8.4s, v0.4s\n"
"fmin v9.4s, v9.4s, v0.4s\n"
"fmin v10.4s, v10.4s, v0.4s\n"
@@ -984,133 +986,131 @@ void a64_hybrid_fp32_mla_4x24_a55 (
"fmin v15.4s, v15.4s, v0.4s\n"
"fmin v16.4s, v16.4s, v0.4s\n"
"fmin v17.4s, v17.4s, v0.4s\n"
+ "fmax v8.4s, v8.4s, v1.4s\n"
+ "fmax v9.4s, v9.4s, v1.4s\n"
+ "fmax v10.4s, v10.4s, v1.4s\n"
+ "fmax v11.4s, v11.4s, v1.4s\n"
+ "fmax v12.4s, v12.4s, v1.4s\n"
+ "fmax v13.4s, v13.4s, v1.4s\n"
+ "fmax v14.4s, v14.4s, v1.4s\n"
+ "fmax v15.4s, v15.4s, v1.4s\n"
+ "fmax v16.4s, v16.4s, v1.4s\n"
+ "fmax v17.4s, v17.4s, v1.4s\n"
"fmin v18.4s, v18.4s, v0.4s\n"
"fmin v19.4s, v19.4s, v0.4s\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1r { v0.4s }, [x20]\n"
- "fmax v8.4s, v8.4s, v0.4s\n"
- "fmax v9.4s, v9.4s, v0.4s\n"
- "fmax v10.4s, v10.4s, v0.4s\n"
- "fmax v11.4s, v11.4s, v0.4s\n"
- "fmax v12.4s, v12.4s, v0.4s\n"
- "fmax v13.4s, v13.4s, v0.4s\n"
- "fmax v14.4s, v14.4s, v0.4s\n"
- "fmax v15.4s, v15.4s, v0.4s\n"
- "fmax v16.4s, v16.4s, v0.4s\n"
- "fmax v17.4s, v17.4s, v0.4s\n"
- "fmax v18.4s, v18.4s, v0.4s\n"
- "fmax v19.4s, v19.4s, v0.4s\n"
+ "fmax v18.4s, v18.4s, v1.4s\n"
+ "fmax v19.4s, v19.4s, v1.4s\n"
"68:" // Height 2: No activation
"cmp x16, #0x18\n"
"bge 81f\n"
"tbz x16, #4, 72f\n"
- "st1 { v8.4s }, [x14], #0x10\n"
- "st1 { v9.4s }, [x14], #0x10\n"
- "st1 { v10.4s }, [x14], #0x10\n"
- "st1 { v11.4s }, [x14], #0x10\n"
+ "st1 { v8.4s }, [x13], #0x10\n"
+ "st1 { v9.4s }, [x13], #0x10\n"
+ "st1 { v10.4s }, [x13], #0x10\n"
+ "st1 { v11.4s }, [x13], #0x10\n"
"st1 { v14.4s }, [x23], #0x10\n"
"st1 { v15.4s }, [x23], #0x10\n"
"st1 { v16.4s }, [x23], #0x10\n"
"st1 { v17.4s }, [x23], #0x10\n"
"tbz x16, #2, 70f\n"
- "st1 { v12.4s }, [x14], #0x10\n"
+ "st1 { v12.4s }, [x13], #0x10\n"
"st1 { v18.4s }, [x23], #0x10\n"
"tbz x16, #1, 69f\n"
- "str d13, [x14], #0x8\n"
+ "str d13, [x13], #0x8\n"
"str d19, [x23], #0x8\n"
"tbz x16, #0, 80f\n"
- "st1 { v13.s }[2], [x14]\n"
+ "st1 { v13.s }[2], [x13]\n"
"st1 { v19.s }[2], [x23]\n"
"b 80f\n"
"69:" // Height 2: Partial direct writeback: partial_1_20
"tbz x16, #0, 80f\n"
- "str s13, [x14, #0x0]\n"
+ "str s13, [x13, #0x0]\n"
"str s19, [x23, #0x0]\n"
"b 80f\n"
"70:" // Height 2: Partial direct writeback: partial_2_16
"tbz x16, #1, 71f\n"
- "str d12, [x14], #0x8\n"
+ "str d12, [x13], #0x8\n"
"str d18, [x23], #0x8\n"
"tbz x16, #0, 80f\n"
- "st1 { v12.s }[2], [x14]\n"
+ "st1 { v12.s }[2], [x13]\n"
"st1 { v18.s }[2], [x23]\n"
"b 80f\n"
"71:" // Height 2: Partial direct writeback: partial_1_16
"tbz x16, #0, 80f\n"
- "str s12, [x14, #0x0]\n"
+ "str s12, [x13, #0x0]\n"
"str s18, [x23, #0x0]\n"
"b 80f\n"
"72:" // Height 2: Partial direct writeback: partial_8_0
"tbz x16, #3, 76f\n"
- "st1 { v8.4s }, [x14], #0x10\n"
- "st1 { v9.4s }, [x14], #0x10\n"
+ "st1 { v8.4s }, [x13], #0x10\n"
+ "st1 { v9.4s }, [x13], #0x10\n"
"st1 { v14.4s }, [x23], #0x10\n"
"st1 { v15.4s }, [x23], #0x10\n"
"tbz x16, #2, 74f\n"
- "st1 { v10.4s }, [x14], #0x10\n"
+ "st1 { v10.4s }, [x13], #0x10\n"
"st1 { v16.4s }, [x23], #0x10\n"
"tbz x16, #1, 73f\n"
- "str d11, [x14], #0x8\n"
+ "str d11, [x13], #0x8\n"
"str d17, [x23], #0x8\n"
"tbz x16, #0, 80f\n"
- "st1 { v11.s }[2], [x14]\n"
+ "st1 { v11.s }[2], [x13]\n"
"st1 { v17.s }[2], [x23]\n"
"b 80f\n"
"73:" // Height 2: Partial direct writeback: partial_1_12
"tbz x16, #0, 80f\n"
- "str s11, [x14, #0x0]\n"
+ "str s11, [x13, #0x0]\n"
"str s17, [x23, #0x0]\n"
"b 80f\n"
"74:" // Height 2: Partial direct writeback: partial_2_8
"tbz x16, #1, 75f\n"
- "str d10, [x14], #0x8\n"
+ "str d10, [x13], #0x8\n"
"str d16, [x23], #0x8\n"
"tbz x16, #0, 80f\n"
- "st1 { v10.s }[2], [x14]\n"
+ "st1 { v10.s }[2], [x13]\n"
"st1 { v16.s }[2], [x23]\n"
"b 80f\n"
"75:" // Height 2: Partial direct writeback: partial_1_8
"tbz x16, #0, 80f\n"
- "str s10, [x14, #0x0]\n"
+ "str s10, [x13, #0x0]\n"
"str s16, [x23, #0x0]\n"
"b 80f\n"
"76:" // Height 2: Partial direct writeback: partial_4_0
"tbz x16, #2, 78f\n"
- "st1 { v8.4s }, [x14], #0x10\n"
+ "st1 { v8.4s }, [x13], #0x10\n"
"st1 { v14.4s }, [x23], #0x10\n"
"tbz x16, #1, 77f\n"
- "str d9, [x14], #0x8\n"
+ "str d9, [x13], #0x8\n"
"str d15, [x23], #0x8\n"
"tbz x16, #0, 80f\n"
- "st1 { v9.s }[2], [x14]\n"
+ "st1 { v9.s }[2], [x13]\n"
"st1 { v15.s }[2], [x23]\n"
"b 80f\n"
"77:" // Height 2: Partial direct writeback: partial_1_4
"tbz x16, #0, 80f\n"
- "str s9, [x14, #0x0]\n"
+ "str s9, [x13, #0x0]\n"
"str s15, [x23, #0x0]\n"
"b 80f\n"
"78:" // Height 2: Partial direct writeback: partial_2_0
"tbz x16, #1, 79f\n"
- "str d8, [x14], #0x8\n"
+ "str d8, [x13], #0x8\n"
"str d14, [x23], #0x8\n"
"tbz x16, #0, 80f\n"
- "st1 { v8.s }[2], [x14]\n"
+ "st1 { v8.s }[2], [x13]\n"
"st1 { v14.s }[2], [x23]\n"
"b 80f\n"
"79:" // Height 2: Partial direct writeback: partial_1_0
- "str s8, [x14, #0x0]\n"
+ "str s8, [x13, #0x0]\n"
"str s14, [x23, #0x0]\n"
"80:" // Height 2: Partial direct writeback: Done
"b 82f\n"
"81:" // Height 2: Full writeback
- "str q8, [x14, #0x0]\n"
- "str q9, [x14, #0x10]\n"
- "str q10, [x14, #0x20]\n"
- "str q11, [x14, #0x30]\n"
- "str q12, [x14, #0x40]\n"
- "str q13, [x14, #0x50]\n"
- "add x14, x14, #0x60\n"
+ "str q8, [x13, #0x0]\n"
+ "str q9, [x13, #0x10]\n"
+ "str q10, [x13, #0x20]\n"
+ "str q11, [x13, #0x30]\n"
+ "str q12, [x13, #0x40]\n"
+ "str q13, [x13, #0x50]\n"
+ "add x13, x13, #0x60\n"
"str q14, [x23, #0x0]\n"
"str q15, [x23, #0x10]\n"
"str q16, [x23, #0x20]\n"
@@ -1122,26 +1122,26 @@ void a64_hybrid_fp32_mla_4x24_a55 (
"bgt 43b\n"
"b 166f\n"
"83:" // Height 3
- "mov x17, %x[bias]\n"
"ldr x16, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x14, %x[bias]\n"
"ldr x15, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x14, %x[output_ptr]\n"
+ "mov x13, %x[output_ptr]\n"
"84:" // Height 3: Column loop
- "cbz x17, 85f\n"
- "ldr q8, [x17, #0x0]\n"
+ "cbz x14, 85f\n"
+ "ldr q8, [x14, #0x0]\n"
+ "ldr q9, [x14, #0x10]\n"
+ "ldr q10, [x14, #0x20]\n"
"mov v14.16b, v8.16b\n"
- "ldr q9, [x17, #0x10]\n"
+ "ldr q11, [x14, #0x30]\n"
"mov v15.16b, v9.16b\n"
- "ldr q10, [x17, #0x20]\n"
+ "ldr q12, [x14, #0x40]\n"
"mov v16.16b, v10.16b\n"
- "ldr q11, [x17, #0x30]\n"
+ "ldr q13, [x14, #0x50]\n"
"mov v17.16b, v11.16b\n"
- "ldr q12, [x17, #0x40]\n"
+ "add x14, x14, #0x60\n"
"mov v18.16b, v12.16b\n"
- "ldr q13, [x17, #0x50]\n"
"mov v19.16b, v13.16b\n"
"mov v20.16b, v8.16b\n"
- "add x17, x17, #0x60\n"
"mov v21.16b, v9.16b\n"
"mov v22.16b, v10.16b\n"
"mov v23.16b, v11.16b\n"
@@ -1150,158 +1150,158 @@ void a64_hybrid_fp32_mla_4x24_a55 (
"b 100f\n"
"85:" // Height 3: no bias
"tbz %x[flags], #0, 99f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x14, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"cmp x16, #0x18\n"
- "add x22, x23, x20, LSL #2\n"
+ "add x23, x13, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
"bge 98f\n"
"tbz x16, #4, 89f\n"
- "ld1 { v8.4s }, [x14], #0x10\n"
+ "ld1 { v8.4s }, [x13], #0x10\n"
"ld1 { v14.4s }, [x23], #0x10\n"
"ld1 { v20.4s }, [x22], #0x10\n"
- "ld1 { v9.4s }, [x14], #0x10\n"
+ "ld1 { v9.4s }, [x13], #0x10\n"
"ld1 { v15.4s }, [x23], #0x10\n"
"ld1 { v21.4s }, [x22], #0x10\n"
- "ld1 { v10.4s }, [x14], #0x10\n"
+ "ld1 { v10.4s }, [x13], #0x10\n"
"ld1 { v16.4s }, [x23], #0x10\n"
"ld1 { v22.4s }, [x22], #0x10\n"
- "ld1 { v11.4s }, [x14], #0x10\n"
+ "ld1 { v11.4s }, [x13], #0x10\n"
"ld1 { v17.4s }, [x23], #0x10\n"
"ld1 { v23.4s }, [x22], #0x10\n"
"tbz x16, #2, 87f\n"
- "ld1 { v12.4s }, [x14], #0x10\n"
+ "ld1 { v12.4s }, [x13], #0x10\n"
"ld1 { v18.4s }, [x23], #0x10\n"
"ld1 { v24.4s }, [x22], #0x10\n"
"tbz x16, #1, 86f\n"
- "ldr d13, [x14], #0x8\n"
- "mov x20, #0x58\n"
+ "ldr d13, [x13], #0x8\n"
+ "mov x19, #0x58\n"
"ldr d19, [x23], #0x8\n"
"ldr d25, [x22], #0x8\n"
"tbz x16, #0, 97f\n"
- "ld1 { v13.s }[2], [x14]\n"
+ "ld1 { v13.s }[2], [x13]\n"
"ld1 { v19.s }[2], [x23]\n"
"ld1 { v25.s }[2], [x22]\n"
"b 97f\n"
"86:" // Height 3: Partial accumulate: partial_1_20
- "mov x20, #0x50\n"
+ "mov x19, #0x50\n"
"tbz x16, #0, 97f\n"
- "ldr s13, [x14, #0x0]\n"
+ "ldr s13, [x13, #0x0]\n"
"ldr s19, [x23, #0x0]\n"
"ldr s25, [x22, #0x0]\n"
"b 97f\n"
"87:" // Height 3: Partial accumulate: partial_2_16
"tbz x16, #1, 88f\n"
- "ldr d12, [x14], #0x8\n"
- "mov x20, #0x48\n"
+ "ldr d12, [x13], #0x8\n"
"ldr d18, [x23], #0x8\n"
+ "mov x19, #0x48\n"
"ldr d24, [x22], #0x8\n"
"tbz x16, #0, 97f\n"
- "ld1 { v12.s }[2], [x14]\n"
+ "ld1 { v12.s }[2], [x13]\n"
"ld1 { v18.s }[2], [x23]\n"
"ld1 { v24.s }[2], [x22]\n"
"b 97f\n"
"88:" // Height 3: Partial accumulate: partial_1_16
- "mov x20, #0x40\n"
+ "mov x19, #0x40\n"
"tbz x16, #0, 97f\n"
- "ldr s12, [x14, #0x0]\n"
+ "ldr s12, [x13, #0x0]\n"
"ldr s18, [x23, #0x0]\n"
"ldr s24, [x22, #0x0]\n"
"b 97f\n"
"89:" // Height 3: Partial accumulate: partial_8_0
"tbz x16, #3, 93f\n"
- "ld1 { v8.4s }, [x14], #0x10\n"
+ "ld1 { v8.4s }, [x13], #0x10\n"
"ld1 { v14.4s }, [x23], #0x10\n"
"ld1 { v20.4s }, [x22], #0x10\n"
- "ld1 { v9.4s }, [x14], #0x10\n"
+ "ld1 { v9.4s }, [x13], #0x10\n"
"ld1 { v15.4s }, [x23], #0x10\n"
"ld1 { v21.4s }, [x22], #0x10\n"
"tbz x16, #2, 91f\n"
- "ld1 { v10.4s }, [x14], #0x10\n"
+ "ld1 { v10.4s }, [x13], #0x10\n"
"ld1 { v16.4s }, [x23], #0x10\n"
"ld1 { v22.4s }, [x22], #0x10\n"
"tbz x16, #1, 90f\n"
- "ldr d11, [x14], #0x8\n"
- "mov x20, #0x38\n"
+ "ldr d11, [x13], #0x8\n"
+ "mov x19, #0x38\n"
"ldr d17, [x23], #0x8\n"
"ldr d23, [x22], #0x8\n"
"tbz x16, #0, 97f\n"
- "ld1 { v11.s }[2], [x14]\n"
+ "ld1 { v11.s }[2], [x13]\n"
"ld1 { v17.s }[2], [x23]\n"
"ld1 { v23.s }[2], [x22]\n"
"b 97f\n"
"90:" // Height 3: Partial accumulate: partial_1_12
- "mov x20, #0x30\n"
+ "mov x19, #0x30\n"
"tbz x16, #0, 97f\n"
- "ldr s11, [x14, #0x0]\n"
+ "ldr s11, [x13, #0x0]\n"
"ldr s17, [x23, #0x0]\n"
"ldr s23, [x22, #0x0]\n"
"b 97f\n"
"91:" // Height 3: Partial accumulate: partial_2_8
"tbz x16, #1, 92f\n"
- "ldr d10, [x14], #0x8\n"
- "mov x20, #0x28\n"
+ "ldr d10, [x13], #0x8\n"
"ldr d16, [x23], #0x8\n"
+ "mov x19, #0x28\n"
"ldr d22, [x22], #0x8\n"
"tbz x16, #0, 97f\n"
- "ld1 { v10.s }[2], [x14]\n"
+ "ld1 { v10.s }[2], [x13]\n"
"ld1 { v16.s }[2], [x23]\n"
"ld1 { v22.s }[2], [x22]\n"
"b 97f\n"
"92:" // Height 3: Partial accumulate: partial_1_8
- "mov x20, #0x20\n"
+ "mov x19, #0x20\n"
"tbz x16, #0, 97f\n"
- "ldr s10, [x14, #0x0]\n"
+ "ldr s10, [x13, #0x0]\n"
"ldr s16, [x23, #0x0]\n"
"ldr s22, [x22, #0x0]\n"
"b 97f\n"
"93:" // Height 3: Partial accumulate: partial_4_0
"tbz x16, #2, 95f\n"
- "ld1 { v8.4s }, [x14], #0x10\n"
+ "ld1 { v8.4s }, [x13], #0x10\n"
"ld1 { v14.4s }, [x23], #0x10\n"
"ld1 { v20.4s }, [x22], #0x10\n"
"tbz x16, #1, 94f\n"
- "ldr d9, [x14], #0x8\n"
- "mov x20, #0x18\n"
+ "ldr d9, [x13], #0x8\n"
+ "mov x19, #0x18\n"
"ldr d15, [x23], #0x8\n"
"ldr d21, [x22], #0x8\n"
"tbz x16, #0, 97f\n"
- "ld1 { v9.s }[2], [x14]\n"
+ "ld1 { v9.s }[2], [x13]\n"
"ld1 { v15.s }[2], [x23]\n"
"ld1 { v21.s }[2], [x22]\n"
"b 97f\n"
"94:" // Height 3: Partial accumulate: partial_1_4
- "mov x20, #0x10\n"
+ "mov x19, #0x10\n"
"tbz x16, #0, 97f\n"
- "ldr s9, [x14, #0x0]\n"
+ "ldr s9, [x13, #0x0]\n"
"ldr s15, [x23, #0x0]\n"
"ldr s21, [x22, #0x0]\n"
"b 97f\n"
"95:" // Height 3: Partial accumulate: partial_2_0
"tbz x16, #1, 96f\n"
- "ldr d8, [x14], #0x8\n"
- "mov x20, #0x8\n"
+ "ldr d8, [x13], #0x8\n"
"ldr d14, [x23], #0x8\n"
+ "mov x19, #0x8\n"
"ldr d20, [x22], #0x8\n"
"tbz x16, #0, 97f\n"
- "ld1 { v8.s }[2], [x14]\n"
+ "ld1 { v8.s }[2], [x13]\n"
"ld1 { v14.s }[2], [x23]\n"
"ld1 { v20.s }[2], [x22]\n"
"b 97f\n"
"96:" // Height 3: Partial accumulate: partial_1_0
- "ldr s8, [x14, #0x0]\n"
- "mov x20, #0x0\n"
+ "ldr s8, [x13, #0x0]\n"
+ "mov x19, #0x0\n"
"ldr s14, [x23, #0x0]\n"
"ldr s20, [x22, #0x0]\n"
"97:" // Height 3: Partial accumulate: Done
- "sub x14, x14, x20\n"
+ "sub x13, x13, x19\n"
"b 100f\n"
"98:" // Height 3: full accumulate
- "ldr q8, [x14, #0x0]\n"
- "ldr q9, [x14, #0x10]\n"
- "ldr q10, [x14, #0x20]\n"
- "ldr q11, [x14, #0x30]\n"
- "ldr q12, [x14, #0x40]\n"
- "ldr q13, [x14, #0x50]\n"
+ "ldr q8, [x13, #0x0]\n"
+ "ldr q9, [x13, #0x10]\n"
+ "ldr q10, [x13, #0x20]\n"
+ "ldr q11, [x13, #0x30]\n"
+ "ldr q12, [x13, #0x40]\n"
+ "ldr q13, [x13, #0x50]\n"
"ldr q14, [x23, #0x0]\n"
"ldr q15, [x23, #0x10]\n"
"ldr q16, [x23, #0x20]\n"
@@ -1335,229 +1335,229 @@ void a64_hybrid_fp32_mla_4x24_a55 (
"movi v24.16b, #0x0\n"
"movi v25.16b, #0x0\n"
"100:" // Height 3: setup done
- "mov x13, #0x0\n"
+ "mov x12, #0x0\n"
"101:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w12, [x20, x13, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w11, [x20, x12, LSL #0x2]\n"
"tbz %x[flags], #3, 102f\n"
- "ldr x21, [%x[input_ptr], x13, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x11, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x23, [x21, #0x10]\n"
- "cbnz x13, 103f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x11, x11, x20, LSL #2\n"
- "add x25, x25, x20, LSL #2\n"
- "add x23, x23, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x12, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x10, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x22, [x20, #0x10]\n"
+ "cbnz x12, 103f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x10, x10, x19, LSL #2\n"
+ "add x24, x24, x19, LSL #2\n"
+ "add x22, x22, x19, LSL #2\n"
"b 103f\n"
"102:" // Height 3: setup direct input
- "mov x11, %x[input_ptr]\n"
- "add x25, x11, x20, LSL #2\n"
- "add x23, x25, x20, LSL #2\n"
+ "mov x10, %x[input_ptr]\n"
+ "add x24, x10, x19, LSL #2\n"
+ "add x22, x24, x19, LSL #2\n"
"103:" // Height 3: input setup done
- "cmp x12, #0x4\n"
+ "cmp x11, #0x4\n"
"blt 106f\n"
- "ldr q0, [x11, #0x0]\n"
- "cmp x12, #0x8\n"
- "ldr q1, [x25, #0x0]\n"
- "ldr q2, [x23, #0x0]\n"
+ "ldr q0, [x10, #0x0]\n"
+ "ldr q1, [x24, #0x0]\n"
+ "cmp x11, #0x8\n"
+ "ldr q2, [x22, #0x0]\n"
"ldr q4, [x15, #0x0]\n"
- "ldr q5, [x15, #0x10]\n"
- "ldr q6, [x15, #0x20]\n"
- "ldr q7, [x15, #0x30]\n"
"blt 105f\n"
"104:" // Height 3: Multiply loop: Main loop head
"fmla v8.4s, v4.4s, v0.s[0]\n"
- "ldr x10, [x15, #0x48]\n"
+ "ldr d5, [x15, #0x10]\n"
"fmla v14.4s, v4.4s, v1.s[0]\n"
- "ldr x9, [x15, #0x58]\n"
+ "ldr x9, [x15, #0x18]\n"
"fmla v20.4s, v4.4s, v2.s[0]\n"
- "ldr d4, [x15, #0x40]\n"
+ "ldr d6, [x15, #0x20]\n"
+ "ldr x28, [x15, #0x28]\n"
+ "add x10, x10, #0x10\n"
+ "mov v5.d[1], x9\n"
+ "ldr d7, [x15, #0x30]\n"
+ "ldr x27, [x15, #0x38]\n"
+ "add x24, x24, #0x10\n"
"fmla v9.4s, v5.4s, v0.s[0]\n"
- "ldr x28, [x15, #0x68]\n"
+ "mov v6.d[1], x28\n"
"fmla v15.4s, v5.4s, v1.s[0]\n"
- "ldr x27, [x15, #0x78]\n"
+ "ldr d4, [x15, #0x40]\n"
"fmla v21.4s, v5.4s, v2.s[0]\n"
- "ldr d5, [x15, #0x50]\n"
+ "mov v7.d[1], x27\n"
"fmla v10.4s, v6.4s, v0.s[0]\n"
- "mov v4.d[1], x10\n"
+ "ldr x26, [x15, #0x48]\n"
"fmla v16.4s, v6.4s, v1.s[0]\n"
- "mov v5.d[1], x9\n"
+ "prfm pldl1keep, [x10, #0x80]\n"
"fmla v22.4s, v6.4s, v2.s[0]\n"
- "ldr d6, [x15, #0x60]\n"
+ "ldr d5, [x15, #0x50]\n"
"fmla v11.4s, v7.4s, v0.s[0]\n"
- "mov v6.d[1], x28\n"
+ "mov v4.d[1], x26\n"
"fmla v17.4s, v7.4s, v1.s[0]\n"
- "ldr x10, [x15, #0x88]\n"
+ "ldr x9, [x15, #0x58]\n"
"fmla v23.4s, v7.4s, v2.s[0]\n"
- "ldr d7, [x15, #0x70]\n"
- "mov v7.d[1], x27\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
"fmla v12.4s, v4.4s, v0.s[0]\n"
+ "ldr d6, [x15, #0x60]\n"
"fmla v18.4s, v4.4s, v1.s[0]\n"
- "ldr x9, [x15, #0x98]\n"
+ "mov v5.d[1], x9\n"
"fmla v24.4s, v4.4s, v2.s[0]\n"
- "ldr d4, [x15, #0x80]\n"
+ "ldr x28, [x15, #0x68]\n"
"fmla v13.4s, v5.4s, v0.s[0]\n"
- "ldr x28, [x15, #0xa8]\n"
+ "ldr d7, [x15, #0x70]\n"
"fmla v19.4s, v5.4s, v1.s[0]\n"
- "ldr x27, [x15, #0xb8]\n"
+ "ldr x27, [x15, #0x78]\n"
"fmla v25.4s, v5.4s, v2.s[0]\n"
- "ldr d5, [x15, #0x90]\n"
+ "mov v6.d[1], x28\n"
+ "ldr d4, [x15, #0x80]\n"
+ "add x22, x22, #0x10\n"
"fmla v8.4s, v6.4s, v0.s[1]\n"
- "mov v4.d[1], x10\n"
+ "mov v7.d[1], x27\n"
"fmla v14.4s, v6.4s, v1.s[1]\n"
- "mov v5.d[1], x9\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
"fmla v20.4s, v6.4s, v2.s[1]\n"
- "ldr d6, [x15, #0xa0]\n"
+ "ldr x26, [x15, #0x88]\n"
"fmla v9.4s, v7.4s, v0.s[1]\n"
- "mov v6.d[1], x28\n"
+ "ldr d5, [x15, #0x90]\n"
"fmla v15.4s, v7.4s, v1.s[1]\n"
- "ldr x10, [x15, #0xc8]\n"
+ "ldr x9, [x15, #0x98]\n"
"fmla v21.4s, v7.4s, v2.s[1]\n"
- "ldr d7, [x15, #0xb0]\n"
- "mov v7.d[1], x27\n"
+ "mov v4.d[1], x26\n"
+ "ldr d6, [x15, #0xa0]\n"
+ "sub x11, x11, #0x4\n"
"fmla v10.4s, v4.4s, v0.s[1]\n"
+ "mov v5.d[1], x9\n"
"fmla v16.4s, v4.4s, v1.s[1]\n"
- "ldr x9, [x15, #0xd8]\n"
+ "ldr x28, [x15, #0xa8]\n"
"fmla v22.4s, v4.4s, v2.s[1]\n"
- "ldr d4, [x15, #0xc0]\n"
+ "ldr d7, [x15, #0xb0]\n"
"fmla v11.4s, v5.4s, v0.s[1]\n"
- "ldr x28, [x15, #0xe8]\n"
+ "ldr x27, [x15, #0xb8]\n"
"fmla v17.4s, v5.4s, v1.s[1]\n"
- "ldr x27, [x15, #0xf8]\n"
+ "mov v6.d[1], x28\n"
"fmla v23.4s, v5.4s, v2.s[1]\n"
- "ldr d5, [x15, #0xd0]\n"
+ "ldr d4, [x15, #0xc0]\n"
"fmla v12.4s, v6.4s, v0.s[1]\n"
- "mov v4.d[1], x10\n"
+ "mov v7.d[1], x27\n"
"fmla v18.4s, v6.4s, v1.s[1]\n"
- "mov v5.d[1], x9\n"
+ "ldr x26, [x15, #0xc8]\n"
"fmla v24.4s, v6.4s, v2.s[1]\n"
- "ldr d6, [x15, #0xe0]\n"
+ "ldr d5, [x15, #0xd0]\n"
"fmla v13.4s, v7.4s, v0.s[1]\n"
- "mov v6.d[1], x28\n"
+ "ldr x9, [x15, #0xd8]\n"
"fmla v19.4s, v7.4s, v1.s[1]\n"
- "ldr x10, [x15, #0x108]\n"
+ "mov v4.d[1], x26\n"
"fmla v25.4s, v7.4s, v2.s[1]\n"
- "ldr d7, [x15, #0xf0]\n"
- "mov v7.d[1], x27\n"
+ "ldr d6, [x15, #0xe0]\n"
"fmla v8.4s, v4.4s, v0.s[2]\n"
+ "mov v5.d[1], x9\n"
"fmla v14.4s, v4.4s, v1.s[2]\n"
- "ldr x9, [x15, #0x118]\n"
+ "ldr x28, [x15, #0xe8]\n"
"fmla v20.4s, v4.4s, v2.s[2]\n"
- "ldr d4, [x15, #0x100]\n"
+ "ldr d7, [x15, #0xf0]\n"
"fmla v9.4s, v5.4s, v0.s[2]\n"
- "ldr x28, [x15, #0x128]\n"
+ "ldr x27, [x15, #0xf8]\n"
"fmla v15.4s, v5.4s, v1.s[2]\n"
- "ldr x27, [x15, #0x138]\n"
+ "mov v6.d[1], x28\n"
"fmla v21.4s, v5.4s, v2.s[2]\n"
- "ldr d5, [x15, #0x110]\n"
+ "ldr d4, [x15, #0x100]\n"
"fmla v10.4s, v6.4s, v0.s[2]\n"
- "mov v4.d[1], x10\n"
+ "mov v7.d[1], x27\n"
"fmla v16.4s, v6.4s, v1.s[2]\n"
- "mov v5.d[1], x9\n"
+ "ldr x26, [x15, #0x108]\n"
"fmla v22.4s, v6.4s, v2.s[2]\n"
- "ldr d6, [x15, #0x120]\n"
+ "ldr d5, [x15, #0x110]\n"
"fmla v11.4s, v7.4s, v0.s[2]\n"
- "mov v6.d[1], x28\n"
+ "ldr x9, [x15, #0x118]\n"
"fmla v17.4s, v7.4s, v1.s[2]\n"
- "ldr x10, [x15, #0x148]\n"
+ "mov v4.d[1], x26\n"
"fmla v23.4s, v7.4s, v2.s[2]\n"
- "ldr d7, [x15, #0x130]\n"
- "mov v7.d[1], x27\n"
+ "ldr d6, [x15, #0x120]\n"
"fmla v12.4s, v4.4s, v0.s[2]\n"
+ "mov v5.d[1], x9\n"
"fmla v18.4s, v4.4s, v1.s[2]\n"
- "ldr x9, [x15, #0x158]\n"
+ "ldr x28, [x15, #0x128]\n"
"fmla v24.4s, v4.4s, v2.s[2]\n"
- "ldr d4, [x15, #0x140]\n"
+ "ldr d7, [x15, #0x130]\n"
"fmla v13.4s, v5.4s, v0.s[2]\n"
- "ldr x28, [x15, #0x168]\n"
+ "ldr x27, [x15, #0x138]\n"
"fmla v19.4s, v5.4s, v1.s[2]\n"
- "ldr x27, [x15, #0x178]\n"
+ "mov v6.d[1], x28\n"
"fmla v25.4s, v5.4s, v2.s[2]\n"
- "ldr d5, [x15, #0x150]\n"
+ "ldr d4, [x15, #0x140]\n"
"fmla v8.4s, v6.4s, v0.s[3]\n"
- "mov v4.d[1], x10\n"
+ "mov v7.d[1], x27\n"
"fmla v14.4s, v6.4s, v1.s[3]\n"
- "mov v5.d[1], x9\n"
+ "ldr x26, [x15, #0x148]\n"
"fmla v20.4s, v6.4s, v2.s[3]\n"
- "ldr d6, [x15, #0x160]\n"
+ "ldr d5, [x15, #0x150]\n"
"fmla v9.4s, v7.4s, v0.s[3]\n"
- "mov v6.d[1], x28\n"
+ "ldr x9, [x15, #0x158]\n"
"fmla v15.4s, v7.4s, v1.s[3]\n"
- "add x11, x11, #0x10\n"
+ "mov v4.d[1], x26\n"
"fmla v21.4s, v7.4s, v2.s[3]\n"
- "ldr d7, [x15, #0x170]\n"
- "mov v7.d[1], x27\n"
- "add x25, x25, #0x10\n"
- "add x23, x23, #0x10\n"
- "add x15, x15, #0x180\n"
+ "ldr d6, [x15, #0x160]\n"
"fmla v10.4s, v4.4s, v0.s[3]\n"
- "ldr x10, [x15, #0x8]\n"
+ "mov v5.d[1], x9\n"
"fmla v16.4s, v4.4s, v1.s[3]\n"
- "ldr x9, [x15, #0x18]\n"
+ "ldr x28, [x15, #0x168]\n"
"fmla v22.4s, v4.4s, v2.s[3]\n"
- "ldr d4, [x15, #0x0]\n"
+ "ldr d7, [x15, #0x170]\n"
"fmla v11.4s, v5.4s, v0.s[3]\n"
- "ldr x28, [x15, #0x28]\n"
+ "ldr x27, [x15, #0x178]\n"
"fmla v17.4s, v5.4s, v1.s[3]\n"
- "ldr x26, [x11, #0x8]\n"
+ "mov v6.d[1], x28\n"
"fmla v23.4s, v5.4s, v2.s[3]\n"
- "ldr d5, [x15, #0x10]\n"
+ "ldr x25, [x10, #0x8]\n"
"fmla v12.4s, v6.4s, v0.s[3]\n"
- "ldr x24, [x25, #0x8]\n"
+ "mov v7.d[1], x27\n"
"fmla v18.4s, v6.4s, v1.s[3]\n"
- "ldr x22, [x23, #0x8]\n"
+ "ldr x23, [x24, #0x8]\n"
"fmla v24.4s, v6.4s, v2.s[3]\n"
- "ldr d6, [x15, #0x20]\n"
+ "ldr x21, [x22, #0x8]\n"
"fmla v13.4s, v7.4s, v0.s[3]\n"
- "ldr d0, [x11, #0x0]\n"
+ "ldr d0, [x10, #0x0]\n"
"fmla v19.4s, v7.4s, v1.s[3]\n"
- "ldr d1, [x25, #0x0]\n"
+ "ldr d1, [x24, #0x0]\n"
"fmla v25.4s, v7.4s, v2.s[3]\n"
- "ldr d2, [x23, #0x0]\n"
- "ldr d7, [x15, #0x30]\n"
- "sub x12, x12, #0x4\n"
- "ldr x27, [x15, #0x38]\n"
- "cmp x12, #0x8\n"
- "prfm pldl1keep, [x11, #0x80]\n"
- "mov v4.d[1], x10\n"
- "prfm pldl1keep, [x25, #0x80]\n"
- "mov v5.d[1], x9\n"
- "prfm pldl1keep, [x23, #0x80]\n"
- "mov v6.d[1], x28\n"
- "mov v0.d[1], x26\n"
- "mov v1.d[1], x24\n"
- "mov v2.d[1], x22\n"
- "mov v7.d[1], x27\n"
+ "ldr d2, [x22, #0x0]\n"
+ "mov v0.d[1], x25\n"
+ "cmp x11, #0x8\n"
+ "mov v1.d[1], x23\n"
+ "add x15, x15, #0x180\n"
+ "mov v2.d[1], x21\n"
+ "ldr d4, [x15, #0x0]\n"
+ "ldr x26, [x15, #0x8]\n"
+ "mov v4.d[1], x26\n"
"bge 104b\n"
"105:" // Height 3: Multiply loop: Single iteration only
"fmla v8.4s, v4.4s, v0.s[0]\n"
- "add x11, x11, #0x10\n"
+ "ldr q5, [x15, #0x10]\n"
"fmla v14.4s, v4.4s, v1.s[0]\n"
- "add x25, x25, #0x10\n"
+ "ldr q6, [x15, #0x20]\n"
"fmla v20.4s, v4.4s, v2.s[0]\n"
- "ldr q4, [x15, #0x40]\n"
+ "ldr q7, [x15, #0x30]\n"
"fmla v9.4s, v5.4s, v0.s[0]\n"
- "add x23, x23, #0x10\n"
+ "ldr q4, [x15, #0x40]\n"
"fmla v15.4s, v5.4s, v1.s[0]\n"
- "sub x12, x12, #0x4\n"
+ "sub x11, x11, #0x4\n"
"fmla v21.4s, v5.4s, v2.s[0]\n"
"ldr q5, [x15, #0x50]\n"
"fmla v10.4s, v6.4s, v0.s[0]\n"
- "prfm pldl1keep, [x11, #0x80]\n"
+ "add x10, x10, #0x10\n"
"fmla v16.4s, v6.4s, v1.s[0]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
+ "prfm pldl1keep, [x10, #0x80]\n"
"fmla v22.4s, v6.4s, v2.s[0]\n"
"ldr q6, [x15, #0x60]\n"
"fmla v11.4s, v7.4s, v0.s[0]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
+ "add x24, x24, #0x10\n"
"fmla v17.4s, v7.4s, v1.s[0]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
"fmla v23.4s, v7.4s, v2.s[0]\n"
"ldr q7, [x15, #0x70]\n"
"fmla v12.4s, v4.4s, v0.s[0]\n"
+ "add x22, x22, #0x10\n"
"fmla v18.4s, v4.4s, v1.s[0]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
"fmla v24.4s, v4.4s, v2.s[0]\n"
"ldr q4, [x15, #0x80]\n"
"fmla v13.4s, v5.4s, v0.s[0]\n"
@@ -1634,21 +1634,21 @@ void a64_hybrid_fp32_mla_4x24_a55 (
"fmla v19.4s, v7.4s, v1.s[3]\n"
"fmla v25.4s, v7.4s, v2.s[3]\n"
"106:" // Height 3: Multiply loop: Main loop skip
- "cbz x12, 108f\n"
+ "cbz x11, 108f\n"
"107:" // Height 3: Multiply loop: Odd block loop
- "ldr s0, [x11], #0x4\n"
- "sub x12, x12, #0x1\n"
- "ldr s1, [x25], #0x4\n"
- "ldr s2, [x23], #0x4\n"
+ "ldr s0, [x10], #0x4\n"
+ "sub x11, x11, #0x1\n"
+ "ldr s1, [x24], #0x4\n"
+ "ldr s2, [x22], #0x4\n"
"ldr q4, [x15, #0x0]\n"
- "fmla v8.4s, v4.4s, v0.s[0]\n"
"ldr q5, [x15, #0x10]\n"
- "fmla v14.4s, v4.4s, v1.s[0]\n"
"ldr q6, [x15, #0x20]\n"
- "fmla v20.4s, v4.4s, v2.s[0]\n"
+ "fmla v8.4s, v4.4s, v0.s[0]\n"
"ldr q7, [x15, #0x30]\n"
- "fmla v9.4s, v5.4s, v0.s[0]\n"
+ "fmla v14.4s, v4.4s, v1.s[0]\n"
+ "fmla v20.4s, v4.4s, v2.s[0]\n"
"ldr q4, [x15, #0x40]\n"
+ "fmla v9.4s, v5.4s, v0.s[0]\n"
"fmla v15.4s, v5.4s, v1.s[0]\n"
"fmla v21.4s, v5.4s, v2.s[0]\n"
"ldr q5, [x15, #0x50]\n"
@@ -1665,21 +1665,23 @@ void a64_hybrid_fp32_mla_4x24_a55 (
"fmla v13.4s, v5.4s, v0.s[0]\n"
"fmla v19.4s, v5.4s, v1.s[0]\n"
"fmla v25.4s, v5.4s, v2.s[0]\n"
- "cbnz x12, 107b\n"
+ "cbnz x11, 107b\n"
"108:" // Height 3: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x13, x13, #0x1\n"
- "cmp x13, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x12, x12, #0x1\n"
+ "cmp x12, x19\n"
"bne 101b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x14, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
- "prfm pstl1keep, [x14, #0x0]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "prfm pstl1keep, [x13, #0x0]\n"
+ "add x23, x13, x19, LSL #2\n"
"prfm pstl1keep, [x23, #0x0]\n"
+ "add x22, x23, x19, LSL #2\n"
"prfm pstl1keep, [x22, #0x0]\n"
"tbz %x[flags], #1, 109f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v0.4s }, [x20]\n"
+ "add x20, %x[args_ptr], %[offset_min]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v1.4s }, [x20]\n"
+ "ld1r { v0.4s }, [x19]\n"
"fmin v8.4s, v8.4s, v0.4s\n"
"fmin v9.4s, v9.4s, v0.4s\n"
"fmin v10.4s, v10.4s, v0.4s\n"
@@ -1690,6 +1692,16 @@ void a64_hybrid_fp32_mla_4x24_a55 (
"fmin v15.4s, v15.4s, v0.4s\n"
"fmin v16.4s, v16.4s, v0.4s\n"
"fmin v17.4s, v17.4s, v0.4s\n"
+ "fmax v8.4s, v8.4s, v1.4s\n"
+ "fmax v9.4s, v9.4s, v1.4s\n"
+ "fmax v10.4s, v10.4s, v1.4s\n"
+ "fmax v11.4s, v11.4s, v1.4s\n"
+ "fmax v12.4s, v12.4s, v1.4s\n"
+ "fmax v13.4s, v13.4s, v1.4s\n"
+ "fmax v14.4s, v14.4s, v1.4s\n"
+ "fmax v15.4s, v15.4s, v1.4s\n"
+ "fmax v16.4s, v16.4s, v1.4s\n"
+ "fmax v17.4s, v17.4s, v1.4s\n"
"fmin v18.4s, v18.4s, v0.4s\n"
"fmin v19.4s, v19.4s, v0.4s\n"
"fmin v20.4s, v20.4s, v0.4s\n"
@@ -1698,34 +1710,22 @@ void a64_hybrid_fp32_mla_4x24_a55 (
"fmin v23.4s, v23.4s, v0.4s\n"
"fmin v24.4s, v24.4s, v0.4s\n"
"fmin v25.4s, v25.4s, v0.4s\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1r { v0.4s }, [x20]\n"
- "fmax v8.4s, v8.4s, v0.4s\n"
- "fmax v9.4s, v9.4s, v0.4s\n"
- "fmax v10.4s, v10.4s, v0.4s\n"
- "fmax v11.4s, v11.4s, v0.4s\n"
- "fmax v12.4s, v12.4s, v0.4s\n"
- "fmax v13.4s, v13.4s, v0.4s\n"
- "fmax v14.4s, v14.4s, v0.4s\n"
- "fmax v15.4s, v15.4s, v0.4s\n"
- "fmax v16.4s, v16.4s, v0.4s\n"
- "fmax v17.4s, v17.4s, v0.4s\n"
- "fmax v18.4s, v18.4s, v0.4s\n"
- "fmax v19.4s, v19.4s, v0.4s\n"
- "fmax v20.4s, v20.4s, v0.4s\n"
- "fmax v21.4s, v21.4s, v0.4s\n"
- "fmax v22.4s, v22.4s, v0.4s\n"
- "fmax v23.4s, v23.4s, v0.4s\n"
- "fmax v24.4s, v24.4s, v0.4s\n"
- "fmax v25.4s, v25.4s, v0.4s\n"
+ "fmax v18.4s, v18.4s, v1.4s\n"
+ "fmax v19.4s, v19.4s, v1.4s\n"
+ "fmax v20.4s, v20.4s, v1.4s\n"
+ "fmax v21.4s, v21.4s, v1.4s\n"
+ "fmax v22.4s, v22.4s, v1.4s\n"
+ "fmax v23.4s, v23.4s, v1.4s\n"
+ "fmax v24.4s, v24.4s, v1.4s\n"
+ "fmax v25.4s, v25.4s, v1.4s\n"
"109:" // Height 3: No activation
"cmp x16, #0x18\n"
"bge 122f\n"
"tbz x16, #4, 113f\n"
- "st1 { v8.4s }, [x14], #0x10\n"
- "st1 { v9.4s }, [x14], #0x10\n"
- "st1 { v10.4s }, [x14], #0x10\n"
- "st1 { v11.4s }, [x14], #0x10\n"
+ "st1 { v8.4s }, [x13], #0x10\n"
+ "st1 { v9.4s }, [x13], #0x10\n"
+ "st1 { v10.4s }, [x13], #0x10\n"
+ "st1 { v11.4s }, [x13], #0x10\n"
"st1 { v14.4s }, [x23], #0x10\n"
"st1 { v15.4s }, [x23], #0x10\n"
"st1 { v16.4s }, [x23], #0x10\n"
@@ -1735,127 +1735,127 @@ void a64_hybrid_fp32_mla_4x24_a55 (
"st1 { v22.4s }, [x22], #0x10\n"
"st1 { v23.4s }, [x22], #0x10\n"
"tbz x16, #2, 111f\n"
- "st1 { v12.4s }, [x14], #0x10\n"
+ "st1 { v12.4s }, [x13], #0x10\n"
"st1 { v18.4s }, [x23], #0x10\n"
"st1 { v24.4s }, [x22], #0x10\n"
"tbz x16, #1, 110f\n"
- "str d13, [x14], #0x8\n"
+ "str d13, [x13], #0x8\n"
"str d19, [x23], #0x8\n"
"str d25, [x22], #0x8\n"
"tbz x16, #0, 121f\n"
- "st1 { v13.s }[2], [x14]\n"
+ "st1 { v13.s }[2], [x13]\n"
"st1 { v19.s }[2], [x23]\n"
"st1 { v25.s }[2], [x22]\n"
"b 121f\n"
"110:" // Height 3: Partial direct writeback: partial_1_20
"tbz x16, #0, 121f\n"
- "str s13, [x14, #0x0]\n"
+ "str s13, [x13, #0x0]\n"
"str s19, [x23, #0x0]\n"
"str s25, [x22, #0x0]\n"
"b 121f\n"
"111:" // Height 3: Partial direct writeback: partial_2_16
"tbz x16, #1, 112f\n"
- "str d12, [x14], #0x8\n"
+ "str d12, [x13], #0x8\n"
"str d18, [x23], #0x8\n"
"str d24, [x22], #0x8\n"
"tbz x16, #0, 121f\n"
- "st1 { v12.s }[2], [x14]\n"
+ "st1 { v12.s }[2], [x13]\n"
"st1 { v18.s }[2], [x23]\n"
"st1 { v24.s }[2], [x22]\n"
"b 121f\n"
"112:" // Height 3: Partial direct writeback: partial_1_16
"tbz x16, #0, 121f\n"
- "str s12, [x14, #0x0]\n"
+ "str s12, [x13, #0x0]\n"
"str s18, [x23, #0x0]\n"
"str s24, [x22, #0x0]\n"
"b 121f\n"
"113:" // Height 3: Partial direct writeback: partial_8_0
"tbz x16, #3, 117f\n"
- "st1 { v8.4s }, [x14], #0x10\n"
- "st1 { v9.4s }, [x14], #0x10\n"
+ "st1 { v8.4s }, [x13], #0x10\n"
+ "st1 { v9.4s }, [x13], #0x10\n"
"st1 { v14.4s }, [x23], #0x10\n"
"st1 { v15.4s }, [x23], #0x10\n"
"st1 { v20.4s }, [x22], #0x10\n"
"st1 { v21.4s }, [x22], #0x10\n"
"tbz x16, #2, 115f\n"
- "st1 { v10.4s }, [x14], #0x10\n"
+ "st1 { v10.4s }, [x13], #0x10\n"
"st1 { v16.4s }, [x23], #0x10\n"
"st1 { v22.4s }, [x22], #0x10\n"
"tbz x16, #1, 114f\n"
- "str d11, [x14], #0x8\n"
+ "str d11, [x13], #0x8\n"
"str d17, [x23], #0x8\n"
"str d23, [x22], #0x8\n"
"tbz x16, #0, 121f\n"
- "st1 { v11.s }[2], [x14]\n"
+ "st1 { v11.s }[2], [x13]\n"
"st1 { v17.s }[2], [x23]\n"
"st1 { v23.s }[2], [x22]\n"
"b 121f\n"
"114:" // Height 3: Partial direct writeback: partial_1_12
"tbz x16, #0, 121f\n"
- "str s11, [x14, #0x0]\n"
+ "str s11, [x13, #0x0]\n"
"str s17, [x23, #0x0]\n"
"str s23, [x22, #0x0]\n"
"b 121f\n"
"115:" // Height 3: Partial direct writeback: partial_2_8
"tbz x16, #1, 116f\n"
- "str d10, [x14], #0x8\n"
+ "str d10, [x13], #0x8\n"
"str d16, [x23], #0x8\n"
"str d22, [x22], #0x8\n"
"tbz x16, #0, 121f\n"
- "st1 { v10.s }[2], [x14]\n"
+ "st1 { v10.s }[2], [x13]\n"
"st1 { v16.s }[2], [x23]\n"
"st1 { v22.s }[2], [x22]\n"
"b 121f\n"
"116:" // Height 3: Partial direct writeback: partial_1_8
"tbz x16, #0, 121f\n"
- "str s10, [x14, #0x0]\n"
+ "str s10, [x13, #0x0]\n"
"str s16, [x23, #0x0]\n"
"str s22, [x22, #0x0]\n"
"b 121f\n"
"117:" // Height 3: Partial direct writeback: partial_4_0
"tbz x16, #2, 119f\n"
- "st1 { v8.4s }, [x14], #0x10\n"
+ "st1 { v8.4s }, [x13], #0x10\n"
"st1 { v14.4s }, [x23], #0x10\n"
"st1 { v20.4s }, [x22], #0x10\n"
"tbz x16, #1, 118f\n"
- "str d9, [x14], #0x8\n"
+ "str d9, [x13], #0x8\n"
"str d15, [x23], #0x8\n"
"str d21, [x22], #0x8\n"
"tbz x16, #0, 121f\n"
- "st1 { v9.s }[2], [x14]\n"
+ "st1 { v9.s }[2], [x13]\n"
"st1 { v15.s }[2], [x23]\n"
"st1 { v21.s }[2], [x22]\n"
"b 121f\n"
"118:" // Height 3: Partial direct writeback: partial_1_4
"tbz x16, #0, 121f\n"
- "str s9, [x14, #0x0]\n"
+ "str s9, [x13, #0x0]\n"
"str s15, [x23, #0x0]\n"
"str s21, [x22, #0x0]\n"
"b 121f\n"
"119:" // Height 3: Partial direct writeback: partial_2_0
"tbz x16, #1, 120f\n"
- "str d8, [x14], #0x8\n"
+ "str d8, [x13], #0x8\n"
"str d14, [x23], #0x8\n"
"str d20, [x22], #0x8\n"
"tbz x16, #0, 121f\n"
- "st1 { v8.s }[2], [x14]\n"
+ "st1 { v8.s }[2], [x13]\n"
"st1 { v14.s }[2], [x23]\n"
"st1 { v20.s }[2], [x22]\n"
"b 121f\n"
"120:" // Height 3: Partial direct writeback: partial_1_0
- "str s8, [x14, #0x0]\n"
+ "str s8, [x13, #0x0]\n"
"str s14, [x23, #0x0]\n"
"str s20, [x22, #0x0]\n"
"121:" // Height 3: Partial direct writeback: Done
"b 123f\n"
"122:" // Height 3: Full writeback
- "str q8, [x14, #0x0]\n"
- "str q9, [x14, #0x10]\n"
- "str q10, [x14, #0x20]\n"
- "str q11, [x14, #0x30]\n"
- "str q12, [x14, #0x40]\n"
- "str q13, [x14, #0x50]\n"
- "add x14, x14, #0x60\n"
+ "str q8, [x13, #0x0]\n"
+ "str q9, [x13, #0x10]\n"
+ "str q10, [x13, #0x20]\n"
+ "str q11, [x13, #0x30]\n"
+ "str q12, [x13, #0x40]\n"
+ "str q13, [x13, #0x50]\n"
+ "add x13, x13, #0x60\n"
"str q14, [x23, #0x0]\n"
"str q15, [x23, #0x10]\n"
"str q16, [x23, #0x20]\n"
@@ -1873,29 +1873,29 @@ void a64_hybrid_fp32_mla_4x24_a55 (
"bgt 84b\n"
"b 166f\n"
"124:" // Height 4
- "ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "mov x20, #0x10\n"
- "mov x17, %x[bias]\n"
"ldr x16, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x14, %x[bias]\n"
"ldr x15, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x14, %x[output_ptr]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
+ "mov x13, %x[output_ptr]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "mov x19, #0x10\n"
+ "madd %x[output_ptr], x20, x19, %x[output_ptr]\n"
"125:" // Height 4: Column loop
- "cbz x17, 126f\n"
- "ldr q8, [x17, #0x0]\n"
+ "cbz x14, 126f\n"
+ "ldr q8, [x14, #0x0]\n"
+ "ldr q9, [x14, #0x10]\n"
+ "ldr q10, [x14, #0x20]\n"
"mov v14.16b, v8.16b\n"
- "ldr q9, [x17, #0x10]\n"
+ "ldr q11, [x14, #0x30]\n"
"mov v15.16b, v9.16b\n"
- "ldr q10, [x17, #0x20]\n"
+ "ldr q12, [x14, #0x40]\n"
"mov v16.16b, v10.16b\n"
- "ldr q11, [x17, #0x30]\n"
+ "ldr q13, [x14, #0x50]\n"
"mov v17.16b, v11.16b\n"
- "ldr q12, [x17, #0x40]\n"
+ "add x14, x14, #0x60\n"
"mov v18.16b, v12.16b\n"
- "ldr q13, [x17, #0x50]\n"
"mov v19.16b, v13.16b\n"
"mov v20.16b, v8.16b\n"
- "add x17, x17, #0x60\n"
"mov v21.16b, v9.16b\n"
"mov v22.16b, v10.16b\n"
"mov v23.16b, v11.16b\n"
@@ -1910,186 +1910,186 @@ void a64_hybrid_fp32_mla_4x24_a55 (
"b 141f\n"
"126:" // Height 4: no bias
"tbz %x[flags], #0, 140f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x14, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"cmp x16, #0x18\n"
- "add x21, x22, x20, LSL #2\n"
+ "add x23, x13, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
"bge 139f\n"
"tbz x16, #4, 130f\n"
- "ld1 { v8.4s }, [x14], #0x10\n"
+ "ld1 { v8.4s }, [x13], #0x10\n"
"ld1 { v14.4s }, [x23], #0x10\n"
"ld1 { v20.4s }, [x22], #0x10\n"
- "ld1 { v26.4s }, [x21], #0x10\n"
- "ld1 { v9.4s }, [x14], #0x10\n"
+ "ld1 { v9.4s }, [x13], #0x10\n"
"ld1 { v15.4s }, [x23], #0x10\n"
"ld1 { v21.4s }, [x22], #0x10\n"
- "ld1 { v27.4s }, [x21], #0x10\n"
- "ld1 { v10.4s }, [x14], #0x10\n"
+ "ld1 { v10.4s }, [x13], #0x10\n"
"ld1 { v16.4s }, [x23], #0x10\n"
"ld1 { v22.4s }, [x22], #0x10\n"
- "ld1 { v28.4s }, [x21], #0x10\n"
- "ld1 { v11.4s }, [x14], #0x10\n"
+ "ld1 { v11.4s }, [x13], #0x10\n"
"ld1 { v17.4s }, [x23], #0x10\n"
"ld1 { v23.4s }, [x22], #0x10\n"
+ "ld1 { v26.4s }, [x21], #0x10\n"
+ "ld1 { v27.4s }, [x21], #0x10\n"
+ "ld1 { v28.4s }, [x21], #0x10\n"
"ld1 { v29.4s }, [x21], #0x10\n"
"tbz x16, #2, 128f\n"
- "ld1 { v12.4s }, [x14], #0x10\n"
+ "ld1 { v12.4s }, [x13], #0x10\n"
"ld1 { v18.4s }, [x23], #0x10\n"
"ld1 { v24.4s }, [x22], #0x10\n"
"ld1 { v30.4s }, [x21], #0x10\n"
"tbz x16, #1, 127f\n"
- "ldr d13, [x14], #0x8\n"
- "mov x20, #0x58\n"
+ "ldr d13, [x13], #0x8\n"
+ "mov x19, #0x58\n"
"ldr d19, [x23], #0x8\n"
"ldr d25, [x22], #0x8\n"
"ldr d31, [x21], #0x8\n"
"tbz x16, #0, 138f\n"
- "ld1 { v13.s }[2], [x14]\n"
+ "ld1 { v13.s }[2], [x13]\n"
"ld1 { v19.s }[2], [x23]\n"
"ld1 { v25.s }[2], [x22]\n"
"ld1 { v31.s }[2], [x21]\n"
"b 138f\n"
"127:" // Height 4: Partial accumulate: partial_1_20
- "mov x20, #0x50\n"
+ "mov x19, #0x50\n"
"tbz x16, #0, 138f\n"
- "ldr s13, [x14, #0x0]\n"
+ "ldr s13, [x13, #0x0]\n"
"ldr s19, [x23, #0x0]\n"
"ldr s25, [x22, #0x0]\n"
"ldr s31, [x21, #0x0]\n"
"b 138f\n"
"128:" // Height 4: Partial accumulate: partial_2_16
"tbz x16, #1, 129f\n"
- "ldr d12, [x14], #0x8\n"
- "mov x20, #0x48\n"
+ "ldr d12, [x13], #0x8\n"
"ldr d18, [x23], #0x8\n"
+ "mov x19, #0x48\n"
"ldr d24, [x22], #0x8\n"
"ldr d30, [x21], #0x8\n"
"tbz x16, #0, 138f\n"
- "ld1 { v12.s }[2], [x14]\n"
+ "ld1 { v12.s }[2], [x13]\n"
"ld1 { v18.s }[2], [x23]\n"
"ld1 { v24.s }[2], [x22]\n"
"ld1 { v30.s }[2], [x21]\n"
"b 138f\n"
"129:" // Height 4: Partial accumulate: partial_1_16
- "mov x20, #0x40\n"
+ "mov x19, #0x40\n"
"tbz x16, #0, 138f\n"
- "ldr s12, [x14, #0x0]\n"
+ "ldr s12, [x13, #0x0]\n"
"ldr s18, [x23, #0x0]\n"
"ldr s24, [x22, #0x0]\n"
"ldr s30, [x21, #0x0]\n"
"b 138f\n"
"130:" // Height 4: Partial accumulate: partial_8_0
"tbz x16, #3, 134f\n"
- "ld1 { v8.4s }, [x14], #0x10\n"
+ "ld1 { v8.4s }, [x13], #0x10\n"
"ld1 { v14.4s }, [x23], #0x10\n"
"ld1 { v20.4s }, [x22], #0x10\n"
- "ld1 { v26.4s }, [x21], #0x10\n"
- "ld1 { v9.4s }, [x14], #0x10\n"
+ "ld1 { v9.4s }, [x13], #0x10\n"
"ld1 { v15.4s }, [x23], #0x10\n"
"ld1 { v21.4s }, [x22], #0x10\n"
+ "ld1 { v26.4s }, [x21], #0x10\n"
"ld1 { v27.4s }, [x21], #0x10\n"
"tbz x16, #2, 132f\n"
- "ld1 { v10.4s }, [x14], #0x10\n"
+ "ld1 { v10.4s }, [x13], #0x10\n"
"ld1 { v16.4s }, [x23], #0x10\n"
"ld1 { v22.4s }, [x22], #0x10\n"
"ld1 { v28.4s }, [x21], #0x10\n"
"tbz x16, #1, 131f\n"
- "ldr d11, [x14], #0x8\n"
- "mov x20, #0x38\n"
+ "ldr d11, [x13], #0x8\n"
+ "mov x19, #0x38\n"
"ldr d17, [x23], #0x8\n"
"ldr d23, [x22], #0x8\n"
"ldr d29, [x21], #0x8\n"
"tbz x16, #0, 138f\n"
- "ld1 { v11.s }[2], [x14]\n"
+ "ld1 { v11.s }[2], [x13]\n"
"ld1 { v17.s }[2], [x23]\n"
"ld1 { v23.s }[2], [x22]\n"
"ld1 { v29.s }[2], [x21]\n"
"b 138f\n"
"131:" // Height 4: Partial accumulate: partial_1_12
- "mov x20, #0x30\n"
+ "mov x19, #0x30\n"
"tbz x16, #0, 138f\n"
- "ldr s11, [x14, #0x0]\n"
+ "ldr s11, [x13, #0x0]\n"
"ldr s17, [x23, #0x0]\n"
"ldr s23, [x22, #0x0]\n"
"ldr s29, [x21, #0x0]\n"
"b 138f\n"
"132:" // Height 4: Partial accumulate: partial_2_8
"tbz x16, #1, 133f\n"
- "ldr d10, [x14], #0x8\n"
- "mov x20, #0x28\n"
+ "ldr d10, [x13], #0x8\n"
"ldr d16, [x23], #0x8\n"
+ "mov x19, #0x28\n"
"ldr d22, [x22], #0x8\n"
"ldr d28, [x21], #0x8\n"
"tbz x16, #0, 138f\n"
- "ld1 { v10.s }[2], [x14]\n"
+ "ld1 { v10.s }[2], [x13]\n"
"ld1 { v16.s }[2], [x23]\n"
"ld1 { v22.s }[2], [x22]\n"
"ld1 { v28.s }[2], [x21]\n"
"b 138f\n"
"133:" // Height 4: Partial accumulate: partial_1_8
- "mov x20, #0x20\n"
+ "mov x19, #0x20\n"
"tbz x16, #0, 138f\n"
- "ldr s10, [x14, #0x0]\n"
+ "ldr s10, [x13, #0x0]\n"
"ldr s16, [x23, #0x0]\n"
"ldr s22, [x22, #0x0]\n"
"ldr s28, [x21, #0x0]\n"
"b 138f\n"
"134:" // Height 4: Partial accumulate: partial_4_0
"tbz x16, #2, 136f\n"
- "ld1 { v8.4s }, [x14], #0x10\n"
+ "ld1 { v8.4s }, [x13], #0x10\n"
"ld1 { v14.4s }, [x23], #0x10\n"
"ld1 { v20.4s }, [x22], #0x10\n"
"ld1 { v26.4s }, [x21], #0x10\n"
"tbz x16, #1, 135f\n"
- "ldr d9, [x14], #0x8\n"
- "mov x20, #0x18\n"
+ "ldr d9, [x13], #0x8\n"
+ "mov x19, #0x18\n"
"ldr d15, [x23], #0x8\n"
"ldr d21, [x22], #0x8\n"
"ldr d27, [x21], #0x8\n"
"tbz x16, #0, 138f\n"
- "ld1 { v9.s }[2], [x14]\n"
+ "ld1 { v9.s }[2], [x13]\n"
"ld1 { v15.s }[2], [x23]\n"
"ld1 { v21.s }[2], [x22]\n"
"ld1 { v27.s }[2], [x21]\n"
"b 138f\n"
"135:" // Height 4: Partial accumulate: partial_1_4
- "mov x20, #0x10\n"
+ "mov x19, #0x10\n"
"tbz x16, #0, 138f\n"
- "ldr s9, [x14, #0x0]\n"
+ "ldr s9, [x13, #0x0]\n"
"ldr s15, [x23, #0x0]\n"
"ldr s21, [x22, #0x0]\n"
"ldr s27, [x21, #0x0]\n"
"b 138f\n"
"136:" // Height 4: Partial accumulate: partial_2_0
"tbz x16, #1, 137f\n"
- "ldr d8, [x14], #0x8\n"
- "mov x20, #0x8\n"
+ "ldr d8, [x13], #0x8\n"
"ldr d14, [x23], #0x8\n"
+ "mov x19, #0x8\n"
"ldr d20, [x22], #0x8\n"
"ldr d26, [x21], #0x8\n"
"tbz x16, #0, 138f\n"
- "ld1 { v8.s }[2], [x14]\n"
+ "ld1 { v8.s }[2], [x13]\n"
"ld1 { v14.s }[2], [x23]\n"
"ld1 { v20.s }[2], [x22]\n"
"ld1 { v26.s }[2], [x21]\n"
"b 138f\n"
"137:" // Height 4: Partial accumulate: partial_1_0
- "ldr s8, [x14, #0x0]\n"
- "mov x20, #0x0\n"
+ "ldr s8, [x13, #0x0]\n"
+ "mov x19, #0x0\n"
"ldr s14, [x23, #0x0]\n"
"ldr s20, [x22, #0x0]\n"
"ldr s26, [x21, #0x0]\n"
"138:" // Height 4: Partial accumulate: Done
- "sub x14, x14, x20\n"
+ "sub x13, x13, x19\n"
"b 141f\n"
"139:" // Height 4: full accumulate
- "ldr q8, [x14, #0x0]\n"
- "ldr q9, [x14, #0x10]\n"
- "ldr q10, [x14, #0x20]\n"
- "ldr q11, [x14, #0x30]\n"
- "ldr q12, [x14, #0x40]\n"
- "ldr q13, [x14, #0x50]\n"
+ "ldr q8, [x13, #0x0]\n"
+ "ldr q9, [x13, #0x10]\n"
+ "ldr q10, [x13, #0x20]\n"
+ "ldr q11, [x13, #0x30]\n"
+ "ldr q12, [x13, #0x40]\n"
+ "ldr q13, [x13, #0x50]\n"
"ldr q14, [x23, #0x0]\n"
"ldr q15, [x23, #0x10]\n"
"ldr q16, [x23, #0x20]\n"
@@ -2135,264 +2135,264 @@ void a64_hybrid_fp32_mla_4x24_a55 (
"movi v30.16b, #0x0\n"
"movi v31.16b, #0x0\n"
"141:" // Height 4: setup done
- "mov x13, #0x0\n"
+ "mov x12, #0x0\n"
"142:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w12, [x20, x13, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w11, [x20, x12, LSL #0x2]\n"
"tbz %x[flags], #3, 143f\n"
- "ldr x21, [%x[input_ptr], x13, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x11, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x23, [x21, #0x10]\n"
- "ldr x21, [x21, #0x18]\n"
- "cbnz x13, 144f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x11, x11, x20, LSL #2\n"
- "add x25, x25, x20, LSL #2\n"
- "add x23, x23, x20, LSL #2\n"
- "add x21, x21, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x12, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x10, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x22, [x20, #0x10]\n"
+ "ldr x20, [x20, #0x18]\n"
+ "cbnz x12, 144f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x10, x10, x19, LSL #2\n"
+ "add x24, x24, x19, LSL #2\n"
+ "add x22, x22, x19, LSL #2\n"
+ "add x20, x20, x19, LSL #2\n"
"b 144f\n"
"143:" // Height 4: setup direct input
- "mov x11, %x[input_ptr]\n"
- "add x25, x11, x20, LSL #2\n"
- "add x23, x25, x20, LSL #2\n"
- "add x21, x23, x20, LSL #2\n"
+ "mov x10, %x[input_ptr]\n"
+ "add x24, x10, x19, LSL #2\n"
+ "add x22, x24, x19, LSL #2\n"
+ "add x20, x22, x19, LSL #2\n"
"144:" // Height 4: input setup done
- "cmp x12, #0x4\n"
+ "cmp x11, #0x4\n"
"blt 147f\n"
- "ldr q0, [x11, #0x0]\n"
- "cmp x12, #0x8\n"
- "ldr q1, [x25, #0x0]\n"
- "ldr q2, [x23, #0x0]\n"
- "ldr q3, [x21, #0x0]\n"
+ "ldr q0, [x10, #0x0]\n"
+ "ldr q1, [x24, #0x0]\n"
+ "cmp x11, #0x8\n"
+ "ldr q2, [x22, #0x0]\n"
+ "ldr q3, [x20, #0x0]\n"
"ldr q4, [x15, #0x0]\n"
- "ldr q5, [x15, #0x10]\n"
- "ldr q6, [x15, #0x20]\n"
- "ldr q7, [x15, #0x30]\n"
"blt 146f\n"
"145:" // Height 4: Multiply loop: Main loop head
"fmla v8.4s, v4.4s, v0.s[0]\n"
- "ldr x10, [x15, #0x48]\n"
+ "ldr d5, [x15, #0x10]\n"
"fmla v14.4s, v4.4s, v1.s[0]\n"
- "ldr x9, [x15, #0x58]\n"
+ "ldr x9, [x15, #0x18]\n"
"fmla v20.4s, v4.4s, v2.s[0]\n"
- "ldr x28, [x15, #0x68]\n"
+ "ldr d6, [x15, #0x20]\n"
"fmla v26.4s, v4.4s, v3.s[0]\n"
- "ldr d4, [x15, #0x40]\n"
+ "ldr x28, [x15, #0x28]\n"
+ "mov v5.d[1], x9\n"
+ "ldr d7, [x15, #0x30]\n"
+ "ldr x27, [x15, #0x38]\n"
+ "add x10, x10, #0x10\n"
"fmla v9.4s, v5.4s, v0.s[0]\n"
- "ldr x27, [x15, #0x78]\n"
+ "mov v6.d[1], x28\n"
"fmla v15.4s, v5.4s, v1.s[0]\n"
- "mov v4.d[1], x10\n"
+ "ldr d4, [x15, #0x40]\n"
"fmla v21.4s, v5.4s, v2.s[0]\n"
- "ldr x10, [x15, #0x88]\n"
+ "mov v7.d[1], x27\n"
"fmla v27.4s, v5.4s, v3.s[0]\n"
- "ldr d5, [x15, #0x50]\n"
+ "ldr x26, [x15, #0x48]\n"
"fmla v10.4s, v6.4s, v0.s[0]\n"
- "mov v5.d[1], x9\n"
+ "prfm pldl1keep, [x10, #0x80]\n"
"fmla v16.4s, v6.4s, v1.s[0]\n"
- "ldr x9, [x15, #0x98]\n"
+ "ldr d5, [x15, #0x50]\n"
"fmla v22.4s, v6.4s, v2.s[0]\n"
- "add x11, x11, #0x10\n"
+ "mov v4.d[1], x26\n"
"fmla v28.4s, v6.4s, v3.s[0]\n"
- "ldr d6, [x15, #0x60]\n"
+ "ldr x9, [x15, #0x58]\n"
"fmla v11.4s, v7.4s, v0.s[0]\n"
- "mov v6.d[1], x28\n"
+ "ldr d6, [x15, #0x60]\n"
"fmla v17.4s, v7.4s, v1.s[0]\n"
- "ldr x28, [x15, #0xa8]\n"
+ "ldr x28, [x15, #0x68]\n"
"fmla v23.4s, v7.4s, v2.s[0]\n"
- "add x25, x25, #0x10\n"
+ "mov v5.d[1], x9\n"
"fmla v29.4s, v7.4s, v3.s[0]\n"
"ldr d7, [x15, #0x70]\n"
- "mov v7.d[1], x27\n"
"fmla v12.4s, v4.4s, v0.s[0]\n"
+ "mov v6.d[1], x28\n"
"fmla v18.4s, v4.4s, v1.s[0]\n"
- "ldr x27, [x15, #0xb8]\n"
+ "ldr x27, [x15, #0x78]\n"
"fmla v24.4s, v4.4s, v2.s[0]\n"
- "add x23, x23, #0x10\n"
+ "ldr x26, [x15, #0x88]\n"
"fmla v30.4s, v4.4s, v3.s[0]\n"
"ldr d4, [x15, #0x80]\n"
"fmla v13.4s, v5.4s, v0.s[0]\n"
- "mov v4.d[1], x10\n"
+ "mov v7.d[1], x27\n"
"fmla v19.4s, v5.4s, v1.s[0]\n"
- "ldr x10, [x15, #0xc8]\n"
+ "ldr x9, [x15, #0x98]\n"
"fmla v25.4s, v5.4s, v2.s[0]\n"
- "add x21, x21, #0x10\n"
+ "mov v4.d[1], x26\n"
"fmla v31.4s, v5.4s, v3.s[0]\n"
"ldr d5, [x15, #0x90]\n"
"fmla v8.4s, v6.4s, v0.s[1]\n"
- "mov v5.d[1], x9\n"
+ "ldr x28, [x15, #0xa8]\n"
"fmla v14.4s, v6.4s, v1.s[1]\n"
- "ldr x9, [x15, #0xd8]\n"
+ "ldr x27, [x15, #0xb8]\n"
"fmla v20.4s, v6.4s, v2.s[1]\n"
- "ldr x26, [x11, #0x8]\n"
+ "mov v5.d[1], x9\n"
"fmla v26.4s, v6.4s, v3.s[1]\n"
"ldr d6, [x15, #0xa0]\n"
"fmla v9.4s, v7.4s, v0.s[1]\n"
- "mov v6.d[1], x28\n"
+ "ldr x26, [x15, #0xc8]\n"
"fmla v15.4s, v7.4s, v1.s[1]\n"
- "ldr x28, [x15, #0xe8]\n"
+ "ldr x9, [x15, #0xd8]\n"
"fmla v21.4s, v7.4s, v2.s[1]\n"
- "ldr x24, [x25, #0x8]\n"
+ "mov v6.d[1], x28\n"
"fmla v27.4s, v7.4s, v3.s[1]\n"
"ldr d7, [x15, #0xb0]\n"
- "mov v7.d[1], x27\n"
"fmla v10.4s, v4.4s, v0.s[1]\n"
+ "ldr x28, [x15, #0xe8]\n"
"fmla v16.4s, v4.4s, v1.s[1]\n"
- "ldr x27, [x15, #0xf8]\n"
+ "ldr x25, [x10, #0x8]\n"
"fmla v22.4s, v4.4s, v2.s[1]\n"
- "ldr x22, [x23, #0x8]\n"
+ "mov v7.d[1], x27\n"
"fmla v28.4s, v4.4s, v3.s[1]\n"
"ldr d4, [x15, #0xc0]\n"
"fmla v11.4s, v5.4s, v0.s[1]\n"
- "mov v4.d[1], x10\n"
+ "ldr x27, [x15, #0xf8]\n"
"fmla v17.4s, v5.4s, v1.s[1]\n"
- "ldr x10, [x15, #0x108]\n"
+ "add x24, x24, #0x10\n"
"fmla v23.4s, v5.4s, v2.s[1]\n"
- "ldr x20, [x21, #0x8]\n"
+ "mov v4.d[1], x26\n"
"fmla v29.4s, v5.4s, v3.s[1]\n"
- "ldr d5, [x15, #0xd0]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
"fmla v12.4s, v6.4s, v0.s[1]\n"
- "mov v5.d[1], x9\n"
+ "ldr d5, [x15, #0xd0]\n"
"fmla v18.4s, v6.4s, v1.s[1]\n"
- "ldr x9, [x15, #0x118]\n"
+ "ldr x26, [x15, #0x108]\n"
"fmla v24.4s, v6.4s, v2.s[1]\n"
- "sub x12, x12, #0x4\n"
+ "ldr x23, [x24, #0x8]\n"
"fmla v30.4s, v6.4s, v3.s[1]\n"
- "ldr d6, [x15, #0xe0]\n"
+ "mov v5.d[1], x9\n"
"fmla v13.4s, v7.4s, v0.s[1]\n"
- "mov v6.d[1], x28\n"
+ "ldr d6, [x15, #0xe0]\n"
"fmla v19.4s, v7.4s, v1.s[1]\n"
- "ldr x28, [x15, #0x128]\n"
+ "ldr x9, [x15, #0x118]\n"
"fmla v25.4s, v7.4s, v2.s[1]\n"
- "cmp x12, #0x8\n"
+ "add x22, x22, #0x10\n"
"fmla v31.4s, v7.4s, v3.s[1]\n"
- "ldr d7, [x15, #0xf0]\n"
- "mov v7.d[1], x27\n"
+ "mov v6.d[1], x28\n"
"fmla v8.4s, v4.4s, v0.s[2]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
"fmla v14.4s, v4.4s, v1.s[2]\n"
- "ldr x27, [x15, #0x138]\n"
+ "ldr d7, [x15, #0xf0]\n"
"fmla v20.4s, v4.4s, v2.s[2]\n"
- "prfm pldl1keep, [x11, #0x80]\n"
+ "ldr x28, [x15, #0x128]\n"
"fmla v26.4s, v4.4s, v3.s[2]\n"
"ldr d4, [x15, #0x100]\n"
"fmla v9.4s, v5.4s, v0.s[2]\n"
- "mov v4.d[1], x10\n"
+ "mov v7.d[1], x27\n"
"fmla v15.4s, v5.4s, v1.s[2]\n"
- "ldr x10, [x15, #0x148]\n"
+ "ldr x27, [x15, #0x138]\n"
"fmla v21.4s, v5.4s, v2.s[2]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
+ "mov v4.d[1], x26\n"
"fmla v27.4s, v5.4s, v3.s[2]\n"
"ldr d5, [x15, #0x110]\n"
"fmla v10.4s, v6.4s, v0.s[2]\n"
- "mov v5.d[1], x9\n"
+ "ldr x26, [x15, #0x148]\n"
"fmla v16.4s, v6.4s, v1.s[2]\n"
- "ldr x9, [x15, #0x158]\n"
+ "ldr x21, [x22, #0x8]\n"
"fmla v22.4s, v6.4s, v2.s[2]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
+ "mov v5.d[1], x9\n"
"fmla v28.4s, v6.4s, v3.s[2]\n"
"ldr d6, [x15, #0x120]\n"
"fmla v11.4s, v7.4s, v0.s[2]\n"
- "mov v6.d[1], x28\n"
+ "ldr x9, [x15, #0x158]\n"
"fmla v17.4s, v7.4s, v1.s[2]\n"
- "ldr x28, [x15, #0x168]\n"
+ "add x20, x20, #0x10\n"
"fmla v23.4s, v7.4s, v2.s[2]\n"
- "prfm pldl1keep, [x21, #0x80]\n"
+ "mov v6.d[1], x28\n"
"fmla v29.4s, v7.4s, v3.s[2]\n"
- "ldr d7, [x15, #0x130]\n"
- "mov v7.d[1], x27\n"
+ "prfm pldl1keep, [x20, #0x80]\n"
"fmla v12.4s, v4.4s, v0.s[2]\n"
+ "ldr d7, [x15, #0x130]\n"
"fmla v18.4s, v4.4s, v1.s[2]\n"
- "ldr x27, [x15, #0x178]\n"
+ "ldr x28, [x15, #0x168]\n"
"fmla v24.4s, v4.4s, v2.s[2]\n"
+ "ldr x19, [x20, #0x8]\n"
"fmla v30.4s, v4.4s, v3.s[2]\n"
- "ldr d4, [x15, #0x140]\n"
+ "mov v7.d[1], x27\n"
"fmla v13.4s, v5.4s, v0.s[2]\n"
- "mov v4.d[1], x10\n"
+ "ldr d4, [x15, #0x140]\n"
"fmla v19.4s, v5.4s, v1.s[2]\n"
+ "ldr x27, [x15, #0x178]\n"
"fmla v25.4s, v5.4s, v2.s[2]\n"
+ "sub x11, x11, #0x4\n"
"fmla v31.4s, v5.4s, v3.s[2]\n"
- "ldr d5, [x15, #0x150]\n"
+ "mov v4.d[1], x26\n"
"fmla v8.4s, v6.4s, v0.s[3]\n"
- "mov v5.d[1], x9\n"
+ "ldr d5, [x15, #0x150]\n"
"fmla v14.4s, v6.4s, v1.s[3]\n"
+ "cmp x11, #0x8\n"
"fmla v20.4s, v6.4s, v2.s[3]\n"
"fmla v26.4s, v6.4s, v3.s[3]\n"
- "ldr d6, [x15, #0x160]\n"
+ "mov v5.d[1], x9\n"
"fmla v9.4s, v7.4s, v0.s[3]\n"
- "mov v6.d[1], x28\n"
+ "ldr d6, [x15, #0x160]\n"
"fmla v15.4s, v7.4s, v1.s[3]\n"
"fmla v21.4s, v7.4s, v2.s[3]\n"
"fmla v27.4s, v7.4s, v3.s[3]\n"
- "ldr d7, [x15, #0x170]\n"
- "mov v7.d[1], x27\n"
- "add x15, x15, #0x180\n"
+ "mov v6.d[1], x28\n"
"fmla v10.4s, v4.4s, v0.s[3]\n"
- "ldr x10, [x15, #0x8]\n"
+ "ldr d7, [x15, #0x170]\n"
"fmla v16.4s, v4.4s, v1.s[3]\n"
- "ldr x9, [x15, #0x18]\n"
+ "add x15, x15, #0x180\n"
"fmla v22.4s, v4.4s, v2.s[3]\n"
- "ldr x28, [x15, #0x28]\n"
+ "ldr x26, [x15, #0x8]\n"
"fmla v28.4s, v4.4s, v3.s[3]\n"
- "ldr d4, [x15, #0x0]\n"
+ "mov v7.d[1], x27\n"
"fmla v11.4s, v5.4s, v0.s[3]\n"
- "ldr x27, [x15, #0x38]\n"
+ "ldr d4, [x15, #0x0]\n"
"fmla v17.4s, v5.4s, v1.s[3]\n"
- "mov v4.d[1], x10\n"
"fmla v23.4s, v5.4s, v2.s[3]\n"
"fmla v29.4s, v5.4s, v3.s[3]\n"
- "ldr d5, [x15, #0x10]\n"
+ "mov v4.d[1], x26\n"
"fmla v12.4s, v6.4s, v0.s[3]\n"
- "mov v5.d[1], x9\n"
"fmla v18.4s, v6.4s, v1.s[3]\n"
"fmla v24.4s, v6.4s, v2.s[3]\n"
"fmla v30.4s, v6.4s, v3.s[3]\n"
- "ldr d6, [x15, #0x20]\n"
"fmla v13.4s, v7.4s, v0.s[3]\n"
- "ldr d0, [x11, #0x0]\n"
+ "ldr d0, [x10, #0x0]\n"
"fmla v19.4s, v7.4s, v1.s[3]\n"
- "ldr d1, [x25, #0x0]\n"
+ "ldr d1, [x24, #0x0]\n"
"fmla v25.4s, v7.4s, v2.s[3]\n"
- "ldr d2, [x23, #0x0]\n"
+ "ldr d2, [x22, #0x0]\n"
"fmla v31.4s, v7.4s, v3.s[3]\n"
- "ldr d3, [x21, #0x0]\n"
- "ldr d7, [x15, #0x30]\n"
- "mov v6.d[1], x28\n"
- "mov v0.d[1], x26\n"
- "mov v1.d[1], x24\n"
- "mov v2.d[1], x22\n"
- "mov v3.d[1], x20\n"
- "mov v7.d[1], x27\n"
+ "mov v0.d[1], x25\n"
+ "mov v1.d[1], x23\n"
+ "ldr d3, [x20, #0x0]\n"
+ "mov v2.d[1], x21\n"
+ "mov v3.d[1], x19\n"
"bge 145b\n"
"146:" // Height 4: Multiply loop: Single iteration only
"fmla v8.4s, v4.4s, v0.s[0]\n"
- "add x11, x11, #0x10\n"
+ "ldr q5, [x15, #0x10]\n"
"fmla v14.4s, v4.4s, v1.s[0]\n"
- "add x25, x25, #0x10\n"
+ "ldr q6, [x15, #0x20]\n"
"fmla v20.4s, v4.4s, v2.s[0]\n"
- "add x23, x23, #0x10\n"
+ "ldr q7, [x15, #0x30]\n"
"fmla v26.4s, v4.4s, v3.s[0]\n"
"ldr q4, [x15, #0x40]\n"
"fmla v9.4s, v5.4s, v0.s[0]\n"
- "add x21, x21, #0x10\n"
+ "sub x11, x11, #0x4\n"
"fmla v15.4s, v5.4s, v1.s[0]\n"
- "sub x12, x12, #0x4\n"
+ "add x10, x10, #0x10\n"
"fmla v21.4s, v5.4s, v2.s[0]\n"
- "prfm pldl1keep, [x11, #0x80]\n"
+ "prfm pldl1keep, [x10, #0x80]\n"
"fmla v27.4s, v5.4s, v3.s[0]\n"
"ldr q5, [x15, #0x50]\n"
"fmla v10.4s, v6.4s, v0.s[0]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
+ "add x24, x24, #0x10\n"
"fmla v16.4s, v6.4s, v1.s[0]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
"fmla v22.4s, v6.4s, v2.s[0]\n"
- "prfm pldl1keep, [x21, #0x80]\n"
+ "add x22, x22, #0x10\n"
"fmla v28.4s, v6.4s, v3.s[0]\n"
- "ldr q6, [x15, #0x60]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
"fmla v11.4s, v7.4s, v0.s[0]\n"
+ "ldr q6, [x15, #0x60]\n"
"fmla v17.4s, v7.4s, v1.s[0]\n"
+ "add x20, x20, #0x10\n"
"fmla v23.4s, v7.4s, v2.s[0]\n"
+ "prfm pldl1keep, [x20, #0x80]\n"
"fmla v29.4s, v7.4s, v3.s[0]\n"
"ldr q7, [x15, #0x70]\n"
"fmla v12.4s, v4.4s, v0.s[0]\n"
@@ -2493,20 +2493,20 @@ void a64_hybrid_fp32_mla_4x24_a55 (
"fmla v25.4s, v7.4s, v2.s[3]\n"
"fmla v31.4s, v7.4s, v3.s[3]\n"
"147:" // Height 4: Multiply loop: Main loop skip
- "cbz x12, 149f\n"
+ "cbz x11, 149f\n"
"148:" // Height 4: Multiply loop: Odd block loop
- "ldr s0, [x11], #0x4\n"
- "sub x12, x12, #0x1\n"
- "ldr s1, [x25], #0x4\n"
- "ldr s2, [x23], #0x4\n"
- "ldr s3, [x21], #0x4\n"
+ "ldr s0, [x10], #0x4\n"
+ "sub x11, x11, #0x1\n"
+ "ldr s1, [x24], #0x4\n"
+ "ldr s2, [x22], #0x4\n"
+ "ldr s3, [x20], #0x4\n"
"ldr q4, [x15, #0x0]\n"
- "fmla v8.4s, v4.4s, v0.s[0]\n"
"ldr q5, [x15, #0x10]\n"
- "fmla v14.4s, v4.4s, v1.s[0]\n"
"ldr q6, [x15, #0x20]\n"
- "fmla v20.4s, v4.4s, v2.s[0]\n"
+ "fmla v8.4s, v4.4s, v0.s[0]\n"
"ldr q7, [x15, #0x30]\n"
+ "fmla v14.4s, v4.4s, v1.s[0]\n"
+ "fmla v20.4s, v4.4s, v2.s[0]\n"
"fmla v26.4s, v4.4s, v3.s[0]\n"
"ldr q4, [x15, #0x40]\n"
"fmla v9.4s, v5.4s, v0.s[0]\n"
@@ -2531,23 +2531,25 @@ void a64_hybrid_fp32_mla_4x24_a55 (
"fmla v19.4s, v5.4s, v1.s[0]\n"
"fmla v25.4s, v5.4s, v2.s[0]\n"
"fmla v31.4s, v5.4s, v3.s[0]\n"
- "cbnz x12, 148b\n"
+ "cbnz x11, 148b\n"
"149:" // Height 4: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x13, x13, #0x1\n"
- "cmp x13, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x12, x12, #0x1\n"
+ "cmp x12, x19\n"
"bne 142b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x14, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
- "prfm pstl1keep, [x14, #0x0]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "prfm pstl1keep, [x13, #0x0]\n"
+ "add x23, x13, x19, LSL #2\n"
"prfm pstl1keep, [x23, #0x0]\n"
+ "add x22, x23, x19, LSL #2\n"
"prfm pstl1keep, [x22, #0x0]\n"
+ "add x21, x22, x19, LSL #2\n"
"prfm pstl1keep, [x21, #0x0]\n"
"tbz %x[flags], #1, 150f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v0.4s }, [x20]\n"
+ "add x20, %x[args_ptr], %[offset_min]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v1.4s }, [x20]\n"
+ "ld1r { v0.4s }, [x19]\n"
"fmin v8.4s, v8.4s, v0.4s\n"
"fmin v9.4s, v9.4s, v0.4s\n"
"fmin v10.4s, v10.4s, v0.4s\n"
@@ -2558,6 +2560,16 @@ void a64_hybrid_fp32_mla_4x24_a55 (
"fmin v15.4s, v15.4s, v0.4s\n"
"fmin v16.4s, v16.4s, v0.4s\n"
"fmin v17.4s, v17.4s, v0.4s\n"
+ "fmax v8.4s, v8.4s, v1.4s\n"
+ "fmax v9.4s, v9.4s, v1.4s\n"
+ "fmax v10.4s, v10.4s, v1.4s\n"
+ "fmax v11.4s, v11.4s, v1.4s\n"
+ "fmax v12.4s, v12.4s, v1.4s\n"
+ "fmax v13.4s, v13.4s, v1.4s\n"
+ "fmax v14.4s, v14.4s, v1.4s\n"
+ "fmax v15.4s, v15.4s, v1.4s\n"
+ "fmax v16.4s, v16.4s, v1.4s\n"
+ "fmax v17.4s, v17.4s, v1.4s\n"
"fmin v18.4s, v18.4s, v0.4s\n"
"fmin v19.4s, v19.4s, v0.4s\n"
"fmin v20.4s, v20.4s, v0.4s\n"
@@ -2568,44 +2580,32 @@ void a64_hybrid_fp32_mla_4x24_a55 (
"fmin v25.4s, v25.4s, v0.4s\n"
"fmin v26.4s, v26.4s, v0.4s\n"
"fmin v27.4s, v27.4s, v0.4s\n"
+ "fmax v18.4s, v18.4s, v1.4s\n"
+ "fmax v19.4s, v19.4s, v1.4s\n"
+ "fmax v20.4s, v20.4s, v1.4s\n"
+ "fmax v21.4s, v21.4s, v1.4s\n"
+ "fmax v22.4s, v22.4s, v1.4s\n"
+ "fmax v23.4s, v23.4s, v1.4s\n"
+ "fmax v24.4s, v24.4s, v1.4s\n"
+ "fmax v25.4s, v25.4s, v1.4s\n"
+ "fmax v26.4s, v26.4s, v1.4s\n"
+ "fmax v27.4s, v27.4s, v1.4s\n"
"fmin v28.4s, v28.4s, v0.4s\n"
"fmin v29.4s, v29.4s, v0.4s\n"
"fmin v30.4s, v30.4s, v0.4s\n"
"fmin v31.4s, v31.4s, v0.4s\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1r { v0.4s }, [x20]\n"
- "fmax v8.4s, v8.4s, v0.4s\n"
- "fmax v9.4s, v9.4s, v0.4s\n"
- "fmax v10.4s, v10.4s, v0.4s\n"
- "fmax v11.4s, v11.4s, v0.4s\n"
- "fmax v12.4s, v12.4s, v0.4s\n"
- "fmax v13.4s, v13.4s, v0.4s\n"
- "fmax v14.4s, v14.4s, v0.4s\n"
- "fmax v15.4s, v15.4s, v0.4s\n"
- "fmax v16.4s, v16.4s, v0.4s\n"
- "fmax v17.4s, v17.4s, v0.4s\n"
- "fmax v18.4s, v18.4s, v0.4s\n"
- "fmax v19.4s, v19.4s, v0.4s\n"
- "fmax v20.4s, v20.4s, v0.4s\n"
- "fmax v21.4s, v21.4s, v0.4s\n"
- "fmax v22.4s, v22.4s, v0.4s\n"
- "fmax v23.4s, v23.4s, v0.4s\n"
- "fmax v24.4s, v24.4s, v0.4s\n"
- "fmax v25.4s, v25.4s, v0.4s\n"
- "fmax v26.4s, v26.4s, v0.4s\n"
- "fmax v27.4s, v27.4s, v0.4s\n"
- "fmax v28.4s, v28.4s, v0.4s\n"
- "fmax v29.4s, v29.4s, v0.4s\n"
- "fmax v30.4s, v30.4s, v0.4s\n"
- "fmax v31.4s, v31.4s, v0.4s\n"
+ "fmax v28.4s, v28.4s, v1.4s\n"
+ "fmax v29.4s, v29.4s, v1.4s\n"
+ "fmax v30.4s, v30.4s, v1.4s\n"
+ "fmax v31.4s, v31.4s, v1.4s\n"
"150:" // Height 4: No activation
"cmp x16, #0x18\n"
"bge 163f\n"
"tbz x16, #4, 154f\n"
- "st1 { v8.4s }, [x14], #0x10\n"
- "st1 { v9.4s }, [x14], #0x10\n"
- "st1 { v10.4s }, [x14], #0x10\n"
- "st1 { v11.4s }, [x14], #0x10\n"
+ "st1 { v8.4s }, [x13], #0x10\n"
+ "st1 { v9.4s }, [x13], #0x10\n"
+ "st1 { v10.4s }, [x13], #0x10\n"
+ "st1 { v11.4s }, [x13], #0x10\n"
"st1 { v14.4s }, [x23], #0x10\n"
"st1 { v15.4s }, [x23], #0x10\n"
"st1 { v16.4s }, [x23], #0x10\n"
@@ -2619,51 +2619,51 @@ void a64_hybrid_fp32_mla_4x24_a55 (
"st1 { v28.4s }, [x21], #0x10\n"
"st1 { v29.4s }, [x21], #0x10\n"
"tbz x16, #2, 152f\n"
- "st1 { v12.4s }, [x14], #0x10\n"
+ "st1 { v12.4s }, [x13], #0x10\n"
"st1 { v18.4s }, [x23], #0x10\n"
"st1 { v24.4s }, [x22], #0x10\n"
"st1 { v30.4s }, [x21], #0x10\n"
"tbz x16, #1, 151f\n"
- "str d13, [x14], #0x8\n"
+ "str d13, [x13], #0x8\n"
"str d19, [x23], #0x8\n"
"str d25, [x22], #0x8\n"
"str d31, [x21], #0x8\n"
"tbz x16, #0, 162f\n"
- "st1 { v13.s }[2], [x14]\n"
+ "st1 { v13.s }[2], [x13]\n"
"st1 { v19.s }[2], [x23]\n"
"st1 { v25.s }[2], [x22]\n"
"st1 { v31.s }[2], [x21]\n"
"b 162f\n"
"151:" // Height 4: Partial direct writeback: partial_1_20
"tbz x16, #0, 162f\n"
- "str s13, [x14, #0x0]\n"
+ "str s13, [x13, #0x0]\n"
"str s19, [x23, #0x0]\n"
"str s25, [x22, #0x0]\n"
"str s31, [x21, #0x0]\n"
"b 162f\n"
"152:" // Height 4: Partial direct writeback: partial_2_16
"tbz x16, #1, 153f\n"
- "str d12, [x14], #0x8\n"
+ "str d12, [x13], #0x8\n"
"str d18, [x23], #0x8\n"
"str d24, [x22], #0x8\n"
"str d30, [x21], #0x8\n"
"tbz x16, #0, 162f\n"
- "st1 { v12.s }[2], [x14]\n"
+ "st1 { v12.s }[2], [x13]\n"
"st1 { v18.s }[2], [x23]\n"
"st1 { v24.s }[2], [x22]\n"
"st1 { v30.s }[2], [x21]\n"
"b 162f\n"
"153:" // Height 4: Partial direct writeback: partial_1_16
"tbz x16, #0, 162f\n"
- "str s12, [x14, #0x0]\n"
+ "str s12, [x13, #0x0]\n"
"str s18, [x23, #0x0]\n"
"str s24, [x22, #0x0]\n"
"str s30, [x21, #0x0]\n"
"b 162f\n"
"154:" // Height 4: Partial direct writeback: partial_8_0
"tbz x16, #3, 158f\n"
- "st1 { v8.4s }, [x14], #0x10\n"
- "st1 { v9.4s }, [x14], #0x10\n"
+ "st1 { v8.4s }, [x13], #0x10\n"
+ "st1 { v9.4s }, [x13], #0x10\n"
"st1 { v14.4s }, [x23], #0x10\n"
"st1 { v15.4s }, [x23], #0x10\n"
"st1 { v20.4s }, [x22], #0x10\n"
@@ -2671,98 +2671,98 @@ void a64_hybrid_fp32_mla_4x24_a55 (
"st1 { v26.4s }, [x21], #0x10\n"
"st1 { v27.4s }, [x21], #0x10\n"
"tbz x16, #2, 156f\n"
- "st1 { v10.4s }, [x14], #0x10\n"
+ "st1 { v10.4s }, [x13], #0x10\n"
"st1 { v16.4s }, [x23], #0x10\n"
"st1 { v22.4s }, [x22], #0x10\n"
"st1 { v28.4s }, [x21], #0x10\n"
"tbz x16, #1, 155f\n"
- "str d11, [x14], #0x8\n"
+ "str d11, [x13], #0x8\n"
"str d17, [x23], #0x8\n"
"str d23, [x22], #0x8\n"
"str d29, [x21], #0x8\n"
"tbz x16, #0, 162f\n"
- "st1 { v11.s }[2], [x14]\n"
+ "st1 { v11.s }[2], [x13]\n"
"st1 { v17.s }[2], [x23]\n"
"st1 { v23.s }[2], [x22]\n"
"st1 { v29.s }[2], [x21]\n"
"b 162f\n"
"155:" // Height 4: Partial direct writeback: partial_1_12
"tbz x16, #0, 162f\n"
- "str s11, [x14, #0x0]\n"
+ "str s11, [x13, #0x0]\n"
"str s17, [x23, #0x0]\n"
"str s23, [x22, #0x0]\n"
"str s29, [x21, #0x0]\n"
"b 162f\n"
"156:" // Height 4: Partial direct writeback: partial_2_8
"tbz x16, #1, 157f\n"
- "str d10, [x14], #0x8\n"
+ "str d10, [x13], #0x8\n"
"str d16, [x23], #0x8\n"
"str d22, [x22], #0x8\n"
"str d28, [x21], #0x8\n"
"tbz x16, #0, 162f\n"
- "st1 { v10.s }[2], [x14]\n"
+ "st1 { v10.s }[2], [x13]\n"
"st1 { v16.s }[2], [x23]\n"
"st1 { v22.s }[2], [x22]\n"
"st1 { v28.s }[2], [x21]\n"
"b 162f\n"
"157:" // Height 4: Partial direct writeback: partial_1_8
"tbz x16, #0, 162f\n"
- "str s10, [x14, #0x0]\n"
+ "str s10, [x13, #0x0]\n"
"str s16, [x23, #0x0]\n"
"str s22, [x22, #0x0]\n"
"str s28, [x21, #0x0]\n"
"b 162f\n"
"158:" // Height 4: Partial direct writeback: partial_4_0
"tbz x16, #2, 160f\n"
- "st1 { v8.4s }, [x14], #0x10\n"
+ "st1 { v8.4s }, [x13], #0x10\n"
"st1 { v14.4s }, [x23], #0x10\n"
"st1 { v20.4s }, [x22], #0x10\n"
"st1 { v26.4s }, [x21], #0x10\n"
"tbz x16, #1, 159f\n"
- "str d9, [x14], #0x8\n"
+ "str d9, [x13], #0x8\n"
"str d15, [x23], #0x8\n"
"str d21, [x22], #0x8\n"
"str d27, [x21], #0x8\n"
"tbz x16, #0, 162f\n"
- "st1 { v9.s }[2], [x14]\n"
+ "st1 { v9.s }[2], [x13]\n"
"st1 { v15.s }[2], [x23]\n"
"st1 { v21.s }[2], [x22]\n"
"st1 { v27.s }[2], [x21]\n"
"b 162f\n"
"159:" // Height 4: Partial direct writeback: partial_1_4
"tbz x16, #0, 162f\n"
- "str s9, [x14, #0x0]\n"
+ "str s9, [x13, #0x0]\n"
"str s15, [x23, #0x0]\n"
"str s21, [x22, #0x0]\n"
"str s27, [x21, #0x0]\n"
"b 162f\n"
"160:" // Height 4: Partial direct writeback: partial_2_0
"tbz x16, #1, 161f\n"
- "str d8, [x14], #0x8\n"
+ "str d8, [x13], #0x8\n"
"str d14, [x23], #0x8\n"
"str d20, [x22], #0x8\n"
"str d26, [x21], #0x8\n"
"tbz x16, #0, 162f\n"
- "st1 { v8.s }[2], [x14]\n"
+ "st1 { v8.s }[2], [x13]\n"
"st1 { v14.s }[2], [x23]\n"
"st1 { v20.s }[2], [x22]\n"
"st1 { v26.s }[2], [x21]\n"
"b 162f\n"
"161:" // Height 4: Partial direct writeback: partial_1_0
- "str s8, [x14, #0x0]\n"
+ "str s8, [x13, #0x0]\n"
"str s14, [x23, #0x0]\n"
"str s20, [x22, #0x0]\n"
"str s26, [x21, #0x0]\n"
"162:" // Height 4: Partial direct writeback: Done
"b 164f\n"
"163:" // Height 4: Full writeback
- "str q8, [x14, #0x0]\n"
- "str q9, [x14, #0x10]\n"
- "str q10, [x14, #0x20]\n"
- "str q11, [x14, #0x30]\n"
- "str q12, [x14, #0x40]\n"
- "str q13, [x14, #0x50]\n"
- "add x14, x14, #0x60\n"
+ "str q8, [x13, #0x0]\n"
+ "str q9, [x13, #0x10]\n"
+ "str q10, [x13, #0x20]\n"
+ "str q11, [x13, #0x30]\n"
+ "str q12, [x13, #0x40]\n"
+ "str q13, [x13, #0x50]\n"
+ "add x13, x13, #0x60\n"
"str q14, [x23, #0x0]\n"
"str q15, [x23, #0x10]\n"
"str q16, [x23, #0x20]\n"
@@ -2786,20 +2786,20 @@ void a64_hybrid_fp32_mla_4x24_a55 (
"bgt 125b\n"
"subs %x[M], %x[M], #0x4\n"
"beq 166f\n"
- "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 165f\n"
- "add x21, x21, #0x4\n"
- "str x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "add x20, x20, #0x4\n"
+ "str x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"b 1b\n"
"165:" // Update direct input
- "mov x20, #0x10\n"
- "madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
+ "mov x19, #0x10\n"
+ "madd %x[input_ptr], x19, x20, %x[input_ptr]\n"
"b 1b\n"
"166:" // Exit
: [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
: [args_ptr] "r" (&ka), [bias] "r" (bias), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_4x24/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_4x24/generic.cpp
index 5fb71c95b7..37d59cc327 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_4x24/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_4x24/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef __aarch64__
@@ -99,117 +99,117 @@ void a64_hybrid_fp32_mla_4x24 (
"cmp %x[M], #0x2\n"
"bgt 83f\n"
"beq 42f\n"
- "mov x10, %x[bias]\n"
"ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x27, %x[output_ptr]\n"
+ "mov x27, %x[bias]\n"
+ "mov x26, %x[output_ptr]\n"
"2:" // Height 1: Column loop
- "cbz x10, 3f\n"
- "ldr q8, [x10, #0x0]\n"
- "ldr q9, [x10, #0x10]\n"
- "ldr q10, [x10, #0x20]\n"
- "ldr q11, [x10, #0x30]\n"
- "ldr q12, [x10, #0x40]\n"
- "ldr q13, [x10, #0x50]\n"
- "add x10, x10, #0x60\n"
+ "cbz x27, 3f\n"
+ "ldr q8, [x27, #0x0]\n"
+ "ldr q9, [x27, #0x10]\n"
+ "ldr q10, [x27, #0x20]\n"
+ "ldr q11, [x27, #0x30]\n"
+ "ldr q12, [x27, #0x40]\n"
+ "ldr q13, [x27, #0x50]\n"
+ "add x27, x27, #0x60\n"
"b 18f\n"
"3:" // Height 1: no bias
"tbz %x[flags], #0, 17f\n"
"cmp x9, #0x18\n"
"bge 16f\n"
"tbz x9, #4, 7f\n"
- "ld1 { v8.4s }, [x27], #0x10\n"
- "ld1 { v9.4s }, [x27], #0x10\n"
- "ld1 { v10.4s }, [x27], #0x10\n"
- "ld1 { v11.4s }, [x27], #0x10\n"
+ "ld1 { v8.4s }, [x26], #0x10\n"
+ "ld1 { v9.4s }, [x26], #0x10\n"
+ "ld1 { v10.4s }, [x26], #0x10\n"
+ "ld1 { v11.4s }, [x26], #0x10\n"
"tbz x9, #2, 5f\n"
- "ld1 { v12.4s }, [x27], #0x10\n"
+ "ld1 { v12.4s }, [x26], #0x10\n"
"tbz x9, #1, 4f\n"
- "ldr d13, [x27], #0x8\n"
- "mov x20, #0x58\n"
+ "mov x19, #0x58\n"
+ "ldr d13, [x26], #0x8\n"
"tbz x9, #0, 15f\n"
- "ld1 { v13.s }[2], [x27]\n"
+ "ld1 { v13.s }[2], [x26]\n"
"b 15f\n"
"4:" // Height 1: Partial accumulate: partial_1_20
- "mov x20, #0x50\n"
+ "mov x19, #0x50\n"
"tbz x9, #0, 15f\n"
- "ldr s13, [x27, #0x0]\n"
+ "ldr s13, [x26, #0x0]\n"
"b 15f\n"
"5:" // Height 1: Partial accumulate: partial_2_16
"tbz x9, #1, 6f\n"
- "ldr d12, [x27], #0x8\n"
- "mov x20, #0x48\n"
+ "ldr d12, [x26], #0x8\n"
+ "mov x19, #0x48\n"
"tbz x9, #0, 15f\n"
- "ld1 { v12.s }[2], [x27]\n"
+ "ld1 { v12.s }[2], [x26]\n"
"b 15f\n"
"6:" // Height 1: Partial accumulate: partial_1_16
- "mov x20, #0x40\n"
+ "mov x19, #0x40\n"
"tbz x9, #0, 15f\n"
- "ldr s12, [x27, #0x0]\n"
+ "ldr s12, [x26, #0x0]\n"
"b 15f\n"
"7:" // Height 1: Partial accumulate: partial_8_0
"tbz x9, #3, 11f\n"
- "ld1 { v8.4s }, [x27], #0x10\n"
- "ld1 { v9.4s }, [x27], #0x10\n"
+ "ld1 { v8.4s }, [x26], #0x10\n"
+ "ld1 { v9.4s }, [x26], #0x10\n"
"tbz x9, #2, 9f\n"
- "ld1 { v10.4s }, [x27], #0x10\n"
+ "ld1 { v10.4s }, [x26], #0x10\n"
"tbz x9, #1, 8f\n"
- "ldr d11, [x27], #0x8\n"
- "mov x20, #0x38\n"
+ "mov x19, #0x38\n"
+ "ldr d11, [x26], #0x8\n"
"tbz x9, #0, 15f\n"
- "ld1 { v11.s }[2], [x27]\n"
+ "ld1 { v11.s }[2], [x26]\n"
"b 15f\n"
"8:" // Height 1: Partial accumulate: partial_1_12
- "mov x20, #0x30\n"
+ "mov x19, #0x30\n"
"tbz x9, #0, 15f\n"
- "ldr s11, [x27, #0x0]\n"
+ "ldr s11, [x26, #0x0]\n"
"b 15f\n"
"9:" // Height 1: Partial accumulate: partial_2_8
"tbz x9, #1, 10f\n"
- "ldr d10, [x27], #0x8\n"
- "mov x20, #0x28\n"
+ "ldr d10, [x26], #0x8\n"
+ "mov x19, #0x28\n"
"tbz x9, #0, 15f\n"
- "ld1 { v10.s }[2], [x27]\n"
+ "ld1 { v10.s }[2], [x26]\n"
"b 15f\n"
"10:" // Height 1: Partial accumulate: partial_1_8
- "mov x20, #0x20\n"
+ "mov x19, #0x20\n"
"tbz x9, #0, 15f\n"
- "ldr s10, [x27, #0x0]\n"
+ "ldr s10, [x26, #0x0]\n"
"b 15f\n"
"11:" // Height 1: Partial accumulate: partial_4_0
"tbz x9, #2, 13f\n"
- "ld1 { v8.4s }, [x27], #0x10\n"
+ "ld1 { v8.4s }, [x26], #0x10\n"
"tbz x9, #1, 12f\n"
- "ldr d9, [x27], #0x8\n"
- "mov x20, #0x18\n"
+ "ldr d9, [x26], #0x8\n"
+ "mov x19, #0x18\n"
"tbz x9, #0, 15f\n"
- "ld1 { v9.s }[2], [x27]\n"
+ "ld1 { v9.s }[2], [x26]\n"
"b 15f\n"
"12:" // Height 1: Partial accumulate: partial_1_4
- "mov x20, #0x10\n"
+ "mov x19, #0x10\n"
"tbz x9, #0, 15f\n"
- "ldr s9, [x27, #0x0]\n"
+ "ldr s9, [x26, #0x0]\n"
"b 15f\n"
"13:" // Height 1: Partial accumulate: partial_2_0
"tbz x9, #1, 14f\n"
- "ldr d8, [x27], #0x8\n"
- "mov x20, #0x8\n"
+ "ldr d8, [x26], #0x8\n"
+ "mov x19, #0x8\n"
"tbz x9, #0, 15f\n"
- "ld1 { v8.s }[2], [x27]\n"
+ "ld1 { v8.s }[2], [x26]\n"
"b 15f\n"
"14:" // Height 1: Partial accumulate: partial_1_0
- "ldr s8, [x27, #0x0]\n"
- "mov x20, #0x0\n"
+ "ldr s8, [x26, #0x0]\n"
+ "mov x19, #0x0\n"
"15:" // Height 1: Partial accumulate: Done
- "sub x27, x27, x20\n"
+ "sub x26, x26, x19\n"
"b 18f\n"
"16:" // Height 1: full accumulate
- "ldr q8, [x27, #0x0]\n"
- "ldr q9, [x27, #0x10]\n"
- "ldr q10, [x27, #0x20]\n"
- "ldr q11, [x27, #0x30]\n"
- "ldr q12, [x27, #0x40]\n"
- "ldr q13, [x27, #0x50]\n"
+ "ldr q8, [x26, #0x0]\n"
+ "ldr q9, [x26, #0x10]\n"
+ "ldr q10, [x26, #0x20]\n"
+ "ldr q11, [x26, #0x30]\n"
+ "ldr q12, [x26, #0x40]\n"
+ "ldr q13, [x26, #0x50]\n"
"b 18f\n"
"17:" // Height 1: no accumulate
"movi v8.16b, #0x0\n"
@@ -219,57 +219,61 @@ void a64_hybrid_fp32_mla_4x24 (
"movi v12.16b, #0x0\n"
"movi v13.16b, #0x0\n"
"18:" // Height 1: setup done
- "mov x26, #0x0\n"
+ "mov x25, #0x0\n"
"19:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w24, [x20, x25, LSL #0x2]\n"
"tbz %x[flags], #3, 20f\n"
- "ldr x21, [%x[input_ptr], x26, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x24, [x21, #0x0]\n"
- "cbnz x26, 21f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x24, x24, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x25, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x23, [x20, #0x0]\n"
+ "cbnz x25, 21f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x23, x23, x19, LSL #2\n"
"b 21f\n"
"20:" // Height 1: setup direct input
- "mov x24, %x[input_ptr]\n"
+ "mov x23, %x[input_ptr]\n"
"21:" // Height 1: input setup done
- "cmp x25, #0x4\n"
+ "cmp x24, #0x4\n"
"blt 24f\n"
- "ldr q0, [x24, #0x0]\n"
+ "ldr q0, [x23, #0x0]\n"
"ldr q4, [x28, #0x0]\n"
- "cmp x25, #0x8\n"
- "ldr q5, [x28, #0x10]\n"
- "ldr q6, [x28, #0x20]\n"
- "ldr q7, [x28, #0x30]\n"
+ "cmp x24, #0x8\n"
"blt 23f\n"
"22:" // Height 1: Multiply loop: Main loop head
"fmla v8.4s, v4.4s, v0.s[0]\n"
- "ldr q4, [x28, #0x40]\n"
+ "ldr q5, [x28, #0x10]\n"
+ "add x23, x23, #0x10\n"
"fmla v9.4s, v5.4s, v0.s[0]\n"
- "ldr q5, [x28, #0x50]\n"
+ "ldr q6, [x28, #0x20]\n"
+ "sub x24, x24, #0x4\n"
"fmla v10.4s, v6.4s, v0.s[0]\n"
- "ldr q6, [x28, #0x60]\n"
+ "ldr q7, [x28, #0x30]\n"
+ "cmp x24, #0x8\n"
"fmla v11.4s, v7.4s, v0.s[0]\n"
- "ldr q7, [x28, #0x70]\n"
+ "ldr q4, [x28, #0x40]\n"
+ "ldr q5, [x28, #0x50]\n"
"fmla v12.4s, v4.4s, v0.s[0]\n"
- "ldr q4, [x28, #0x80]\n"
+ "ldr q6, [x28, #0x60]\n"
"fmla v13.4s, v5.4s, v0.s[0]\n"
- "ldr q5, [x28, #0x90]\n"
+ "ldr q7, [x28, #0x70]\n"
+ "ldr q4, [x28, #0x80]\n"
"fmla v8.4s, v6.4s, v0.s[1]\n"
+ "ldr q5, [x28, #0x90]\n"
"ldr q6, [x28, #0xa0]\n"
"fmla v9.4s, v7.4s, v0.s[1]\n"
- "ldr q7, [x28, #0xb0]\n"
"fmla v10.4s, v4.4s, v0.s[1]\n"
+ "ldr q7, [x28, #0xb0]\n"
"ldr q4, [x28, #0xc0]\n"
"fmla v11.4s, v5.4s, v0.s[1]\n"
"ldr q5, [x28, #0xd0]\n"
"fmla v12.4s, v6.4s, v0.s[1]\n"
"ldr q6, [x28, #0xe0]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
"fmla v13.4s, v7.4s, v0.s[1]\n"
- "ldr q7, [x28, #0xf0]\n"
"fmla v8.4s, v4.4s, v0.s[2]\n"
+ "ldr q7, [x28, #0xf0]\n"
"ldr q4, [x28, #0x100]\n"
"fmla v9.4s, v5.4s, v0.s[2]\n"
"ldr q5, [x28, #0x110]\n"
@@ -285,45 +289,44 @@ void a64_hybrid_fp32_mla_4x24 (
"ldr q6, [x28, #0x160]\n"
"fmla v9.4s, v7.4s, v0.s[3]\n"
"ldr q7, [x28, #0x170]\n"
- "sub x25, x25, #0x4\n"
- "add x24, x24, #0x10\n"
- "fmla v10.4s, v4.4s, v0.s[3]\n"
- "fmla v11.4s, v5.4s, v0.s[3]\n"
- "cmp x25, #0x8\n"
"add x28, x28, #0x180\n"
+ "fmla v10.4s, v4.4s, v0.s[3]\n"
"ldr q4, [x28, #0x0]\n"
- "ldr q5, [x28, #0x10]\n"
+ "fmla v11.4s, v5.4s, v0.s[3]\n"
"fmla v12.4s, v6.4s, v0.s[3]\n"
- "ldr q6, [x28, #0x20]\n"
"fmla v13.4s, v7.4s, v0.s[3]\n"
- "ldr q0, [x24, #0x0]\n"
- "ldr q7, [x28, #0x30]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
+ "ldr q0, [x23, #0x0]\n"
"bge 22b\n"
"23:" // Height 1: Multiply loop: Single iteration only
"fmla v8.4s, v4.4s, v0.s[0]\n"
- "ldr q4, [x28, #0x40]\n"
+ "ldr q5, [x28, #0x10]\n"
+ "sub x24, x24, #0x4\n"
"fmla v9.4s, v5.4s, v0.s[0]\n"
- "ldr q5, [x28, #0x50]\n"
+ "ldr q6, [x28, #0x20]\n"
+ "add x23, x23, #0x10\n"
"fmla v10.4s, v6.4s, v0.s[0]\n"
- "ldr q6, [x28, #0x60]\n"
+ "ldr q7, [x28, #0x30]\n"
+ "ldr q4, [x28, #0x40]\n"
"fmla v11.4s, v7.4s, v0.s[0]\n"
- "ldr q7, [x28, #0x70]\n"
+ "ldr q5, [x28, #0x50]\n"
"fmla v12.4s, v4.4s, v0.s[0]\n"
- "ldr q4, [x28, #0x80]\n"
+ "ldr q6, [x28, #0x60]\n"
+ "ldr q7, [x28, #0x70]\n"
"fmla v13.4s, v5.4s, v0.s[0]\n"
+ "ldr q4, [x28, #0x80]\n"
"ldr q5, [x28, #0x90]\n"
"fmla v8.4s, v6.4s, v0.s[1]\n"
- "ldr q6, [x28, #0xa0]\n"
"fmla v9.4s, v7.4s, v0.s[1]\n"
+ "ldr q6, [x28, #0xa0]\n"
"ldr q7, [x28, #0xb0]\n"
"fmla v10.4s, v4.4s, v0.s[1]\n"
"ldr q4, [x28, #0xc0]\n"
"fmla v11.4s, v5.4s, v0.s[1]\n"
"ldr q5, [x28, #0xd0]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
"fmla v12.4s, v6.4s, v0.s[1]\n"
- "ldr q6, [x28, #0xe0]\n"
"fmla v13.4s, v7.4s, v0.s[1]\n"
+ "ldr q6, [x28, #0xe0]\n"
"ldr q7, [x28, #0xf0]\n"
"fmla v8.4s, v4.4s, v0.s[2]\n"
"ldr q4, [x28, #0x100]\n"
@@ -341,297 +344,294 @@ void a64_hybrid_fp32_mla_4x24 (
"ldr q6, [x28, #0x160]\n"
"fmla v9.4s, v7.4s, v0.s[3]\n"
"ldr q7, [x28, #0x170]\n"
- "add x24, x24, #0x10\n"
- "sub x25, x25, #0x4\n"
+ "add x28, x28, #0x180\n"
"fmla v10.4s, v4.4s, v0.s[3]\n"
"fmla v11.4s, v5.4s, v0.s[3]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
- "add x28, x28, #0x180\n"
"fmla v12.4s, v6.4s, v0.s[3]\n"
"fmla v13.4s, v7.4s, v0.s[3]\n"
"24:" // Height 1: Multiply loop: Main loop skip
- "cbz x25, 26f\n"
+ "cbz x24, 26f\n"
"25:" // Height 1: Multiply loop: Odd block loop
- "ldr s0, [x24], #0x4\n"
+ "ldr s0, [x23], #0x4\n"
+ "sub x24, x24, #0x1\n"
"ldr q4, [x28, #0x0]\n"
"fmla v8.4s, v4.4s, v0.s[0]\n"
- "sub x25, x25, #0x1\n"
"ldr q5, [x28, #0x10]\n"
"ldr q6, [x28, #0x20]\n"
"fmla v9.4s, v5.4s, v0.s[0]\n"
- "fmla v10.4s, v6.4s, v0.s[0]\n"
"ldr q7, [x28, #0x30]\n"
+ "fmla v10.4s, v6.4s, v0.s[0]\n"
"ldr q4, [x28, #0x40]\n"
+ "ldr q5, [x28, #0x50]\n"
"fmla v11.4s, v7.4s, v0.s[0]\n"
+ "add x28, x28, #0x60\n"
"fmla v12.4s, v4.4s, v0.s[0]\n"
- "ldr q5, [x28, #0x50]\n"
"fmla v13.4s, v5.4s, v0.s[0]\n"
- "add x28, x28, #0x60\n"
- "cbnz x25, 25b\n"
+ "cbnz x24, 25b\n"
"26:" // Height 1: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x26, x26, #0x1\n"
- "cmp x26, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x25, x25, #0x1\n"
+ "cmp x25, x19\n"
"bne 19b\n"
- "prfm pstl1keep, [x27, #0x0]\n"
+ "prfm pstl1keep, [x26, #0x0]\n"
"tbz %x[flags], #1, 27f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v1.4s }, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1r { v0.4s }, [x20]\n"
- "fmin v8.4s, v8.4s, v1.4s\n"
- "fmin v9.4s, v9.4s, v1.4s\n"
- "fmin v10.4s, v10.4s, v1.4s\n"
- "fmin v11.4s, v11.4s, v1.4s\n"
- "fmin v12.4s, v12.4s, v1.4s\n"
- "fmin v13.4s, v13.4s, v1.4s\n"
- "fmax v8.4s, v8.4s, v0.4s\n"
- "fmax v9.4s, v9.4s, v0.4s\n"
- "fmax v10.4s, v10.4s, v0.4s\n"
- "fmax v11.4s, v11.4s, v0.4s\n"
- "fmax v12.4s, v12.4s, v0.4s\n"
- "fmax v13.4s, v13.4s, v0.4s\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v1.4s }, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v0.4s }, [x19]\n"
+ "fmin v8.4s, v8.4s, v0.4s\n"
+ "fmin v9.4s, v9.4s, v0.4s\n"
+ "fmin v10.4s, v10.4s, v0.4s\n"
+ "fmin v11.4s, v11.4s, v0.4s\n"
+ "fmax v8.4s, v8.4s, v1.4s\n"
+ "fmax v9.4s, v9.4s, v1.4s\n"
+ "fmax v10.4s, v10.4s, v1.4s\n"
+ "fmax v11.4s, v11.4s, v1.4s\n"
+ "fmin v12.4s, v12.4s, v0.4s\n"
+ "fmin v13.4s, v13.4s, v0.4s\n"
+ "fmax v12.4s, v12.4s, v1.4s\n"
+ "fmax v13.4s, v13.4s, v1.4s\n"
"27:" // Height 1: No activation
"cmp x9, #0x18\n"
"bge 40f\n"
"tbz x9, #4, 31f\n"
- "st1 { v8.4s }, [x27], #0x10\n"
- "st1 { v9.4s }, [x27], #0x10\n"
- "st1 { v10.4s }, [x27], #0x10\n"
- "st1 { v11.4s }, [x27], #0x10\n"
+ "st1 { v8.4s }, [x26], #0x10\n"
+ "st1 { v9.4s }, [x26], #0x10\n"
+ "st1 { v10.4s }, [x26], #0x10\n"
+ "st1 { v11.4s }, [x26], #0x10\n"
"tbz x9, #2, 29f\n"
- "st1 { v12.4s }, [x27], #0x10\n"
+ "st1 { v12.4s }, [x26], #0x10\n"
"tbz x9, #1, 28f\n"
- "str d13, [x27], #0x8\n"
+ "str d13, [x26], #0x8\n"
"tbz x9, #0, 39f\n"
- "st1 { v13.s }[2], [x27]\n"
+ "st1 { v13.s }[2], [x26]\n"
"b 39f\n"
"28:" // Height 1: Partial direct writeback: partial_1_20
"tbz x9, #0, 39f\n"
- "str s13, [x27, #0x0]\n"
+ "str s13, [x26, #0x0]\n"
"b 39f\n"
"29:" // Height 1: Partial direct writeback: partial_2_16
"tbz x9, #1, 30f\n"
- "str d12, [x27], #0x8\n"
+ "str d12, [x26], #0x8\n"
"tbz x9, #0, 39f\n"
- "st1 { v12.s }[2], [x27]\n"
+ "st1 { v12.s }[2], [x26]\n"
"b 39f\n"
"30:" // Height 1: Partial direct writeback: partial_1_16
"tbz x9, #0, 39f\n"
- "str s12, [x27, #0x0]\n"
+ "str s12, [x26, #0x0]\n"
"b 39f\n"
"31:" // Height 1: Partial direct writeback: partial_8_0
"tbz x9, #3, 35f\n"
- "st1 { v8.4s }, [x27], #0x10\n"
- "st1 { v9.4s }, [x27], #0x10\n"
+ "st1 { v8.4s }, [x26], #0x10\n"
+ "st1 { v9.4s }, [x26], #0x10\n"
"tbz x9, #2, 33f\n"
- "st1 { v10.4s }, [x27], #0x10\n"
+ "st1 { v10.4s }, [x26], #0x10\n"
"tbz x9, #1, 32f\n"
- "str d11, [x27], #0x8\n"
+ "str d11, [x26], #0x8\n"
"tbz x9, #0, 39f\n"
- "st1 { v11.s }[2], [x27]\n"
+ "st1 { v11.s }[2], [x26]\n"
"b 39f\n"
"32:" // Height 1: Partial direct writeback: partial_1_12
"tbz x9, #0, 39f\n"
- "str s11, [x27, #0x0]\n"
+ "str s11, [x26, #0x0]\n"
"b 39f\n"
"33:" // Height 1: Partial direct writeback: partial_2_8
"tbz x9, #1, 34f\n"
- "str d10, [x27], #0x8\n"
+ "str d10, [x26], #0x8\n"
"tbz x9, #0, 39f\n"
- "st1 { v10.s }[2], [x27]\n"
+ "st1 { v10.s }[2], [x26]\n"
"b 39f\n"
"34:" // Height 1: Partial direct writeback: partial_1_8
"tbz x9, #0, 39f\n"
- "str s10, [x27, #0x0]\n"
+ "str s10, [x26, #0x0]\n"
"b 39f\n"
"35:" // Height 1: Partial direct writeback: partial_4_0
"tbz x9, #2, 37f\n"
- "st1 { v8.4s }, [x27], #0x10\n"
+ "st1 { v8.4s }, [x26], #0x10\n"
"tbz x9, #1, 36f\n"
- "str d9, [x27], #0x8\n"
+ "str d9, [x26], #0x8\n"
"tbz x9, #0, 39f\n"
- "st1 { v9.s }[2], [x27]\n"
+ "st1 { v9.s }[2], [x26]\n"
"b 39f\n"
"36:" // Height 1: Partial direct writeback: partial_1_4
"tbz x9, #0, 39f\n"
- "str s9, [x27, #0x0]\n"
+ "str s9, [x26, #0x0]\n"
"b 39f\n"
"37:" // Height 1: Partial direct writeback: partial_2_0
"tbz x9, #1, 38f\n"
- "str d8, [x27], #0x8\n"
+ "str d8, [x26], #0x8\n"
"tbz x9, #0, 39f\n"
- "st1 { v8.s }[2], [x27]\n"
+ "st1 { v8.s }[2], [x26]\n"
"b 39f\n"
"38:" // Height 1: Partial direct writeback: partial_1_0
- "str s8, [x27, #0x0]\n"
+ "str s8, [x26, #0x0]\n"
"39:" // Height 1: Partial direct writeback: Done
"b 41f\n"
"40:" // Height 1: Full writeback
- "str q8, [x27, #0x0]\n"
- "str q9, [x27, #0x10]\n"
- "str q10, [x27, #0x20]\n"
- "str q11, [x27, #0x30]\n"
- "str q12, [x27, #0x40]\n"
- "str q13, [x27, #0x50]\n"
- "add x27, x27, #0x60\n"
+ "str q8, [x26, #0x0]\n"
+ "str q9, [x26, #0x10]\n"
+ "str q10, [x26, #0x20]\n"
+ "str q11, [x26, #0x30]\n"
+ "str q12, [x26, #0x40]\n"
+ "str q13, [x26, #0x50]\n"
+ "add x26, x26, #0x60\n"
"41:" // Height 1: Writeback done
"subs x9, x9, #0x18\n"
"bgt 2b\n"
"b 166f\n"
"42:" // Height 2
- "mov x10, %x[bias]\n"
"ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x27, %x[bias]\n"
"ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x27, %x[output_ptr]\n"
+ "mov x26, %x[output_ptr]\n"
"43:" // Height 2: Column loop
- "cbz x10, 44f\n"
- "ldr q8, [x10, #0x0]\n"
- "ldr q9, [x10, #0x10]\n"
+ "cbz x27, 44f\n"
+ "ldr q8, [x27, #0x0]\n"
"mov v14.16b, v8.16b\n"
+ "ldr q9, [x27, #0x10]\n"
+ "ldr q10, [x27, #0x20]\n"
"mov v15.16b, v9.16b\n"
- "ldr q10, [x10, #0x20]\n"
- "ldr q11, [x10, #0x30]\n"
+ "ldr q11, [x27, #0x30]\n"
"mov v16.16b, v10.16b\n"
+ "ldr q12, [x27, #0x40]\n"
+ "ldr q13, [x27, #0x50]\n"
"mov v17.16b, v11.16b\n"
- "ldr q12, [x10, #0x40]\n"
- "ldr q13, [x10, #0x50]\n"
+ "add x27, x27, #0x60\n"
"mov v18.16b, v12.16b\n"
"mov v19.16b, v13.16b\n"
- "add x10, x10, #0x60\n"
"b 59f\n"
"44:" // Height 2: no bias
"tbz %x[flags], #0, 58f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"cmp x9, #0x18\n"
- "add x23, x27, x20, LSL #2\n"
+ "add x22, x26, x19, LSL #2\n"
"bge 57f\n"
"tbz x9, #4, 48f\n"
- "ld1 { v8.4s }, [x27], #0x10\n"
- "ld1 { v14.4s }, [x23], #0x10\n"
- "ld1 { v9.4s }, [x27], #0x10\n"
- "ld1 { v15.4s }, [x23], #0x10\n"
- "ld1 { v10.4s }, [x27], #0x10\n"
- "ld1 { v16.4s }, [x23], #0x10\n"
- "ld1 { v11.4s }, [x27], #0x10\n"
- "ld1 { v17.4s }, [x23], #0x10\n"
+ "ld1 { v8.4s }, [x26], #0x10\n"
+ "ld1 { v14.4s }, [x22], #0x10\n"
+ "ld1 { v9.4s }, [x26], #0x10\n"
+ "ld1 { v15.4s }, [x22], #0x10\n"
+ "ld1 { v10.4s }, [x26], #0x10\n"
+ "ld1 { v16.4s }, [x22], #0x10\n"
+ "ld1 { v11.4s }, [x26], #0x10\n"
+ "ld1 { v17.4s }, [x22], #0x10\n"
"tbz x9, #2, 46f\n"
- "ld1 { v12.4s }, [x27], #0x10\n"
- "ld1 { v18.4s }, [x23], #0x10\n"
+ "ld1 { v12.4s }, [x26], #0x10\n"
+ "ld1 { v18.4s }, [x22], #0x10\n"
"tbz x9, #1, 45f\n"
- "ldr d13, [x27], #0x8\n"
- "ldr d19, [x23], #0x8\n"
- "mov x20, #0x58\n"
+ "mov x19, #0x58\n"
+ "ldr d13, [x26], #0x8\n"
+ "ldr d19, [x22], #0x8\n"
"tbz x9, #0, 56f\n"
- "ld1 { v13.s }[2], [x27]\n"
- "ld1 { v19.s }[2], [x23]\n"
+ "ld1 { v13.s }[2], [x26]\n"
+ "ld1 { v19.s }[2], [x22]\n"
"b 56f\n"
"45:" // Height 2: Partial accumulate: partial_1_20
- "mov x20, #0x50\n"
+ "mov x19, #0x50\n"
"tbz x9, #0, 56f\n"
- "ldr s13, [x27, #0x0]\n"
- "ldr s19, [x23, #0x0]\n"
+ "ldr s13, [x26, #0x0]\n"
+ "ldr s19, [x22, #0x0]\n"
"b 56f\n"
"46:" // Height 2: Partial accumulate: partial_2_16
"tbz x9, #1, 47f\n"
- "ldr d12, [x27], #0x8\n"
- "ldr d18, [x23], #0x8\n"
- "mov x20, #0x48\n"
+ "ldr d12, [x26], #0x8\n"
+ "ldr d18, [x22], #0x8\n"
+ "mov x19, #0x48\n"
"tbz x9, #0, 56f\n"
- "ld1 { v12.s }[2], [x27]\n"
- "ld1 { v18.s }[2], [x23]\n"
+ "ld1 { v12.s }[2], [x26]\n"
+ "ld1 { v18.s }[2], [x22]\n"
"b 56f\n"
"47:" // Height 2: Partial accumulate: partial_1_16
- "mov x20, #0x40\n"
+ "mov x19, #0x40\n"
"tbz x9, #0, 56f\n"
- "ldr s12, [x27, #0x0]\n"
- "ldr s18, [x23, #0x0]\n"
+ "ldr s12, [x26, #0x0]\n"
+ "ldr s18, [x22, #0x0]\n"
"b 56f\n"
"48:" // Height 2: Partial accumulate: partial_8_0
"tbz x9, #3, 52f\n"
- "ld1 { v8.4s }, [x27], #0x10\n"
- "ld1 { v14.4s }, [x23], #0x10\n"
- "ld1 { v9.4s }, [x27], #0x10\n"
- "ld1 { v15.4s }, [x23], #0x10\n"
+ "ld1 { v8.4s }, [x26], #0x10\n"
+ "ld1 { v14.4s }, [x22], #0x10\n"
+ "ld1 { v9.4s }, [x26], #0x10\n"
+ "ld1 { v15.4s }, [x22], #0x10\n"
"tbz x9, #2, 50f\n"
- "ld1 { v10.4s }, [x27], #0x10\n"
- "ld1 { v16.4s }, [x23], #0x10\n"
+ "ld1 { v10.4s }, [x26], #0x10\n"
+ "ld1 { v16.4s }, [x22], #0x10\n"
"tbz x9, #1, 49f\n"
- "ldr d11, [x27], #0x8\n"
- "ldr d17, [x23], #0x8\n"
- "mov x20, #0x38\n"
+ "mov x19, #0x38\n"
+ "ldr d11, [x26], #0x8\n"
+ "ldr d17, [x22], #0x8\n"
"tbz x9, #0, 56f\n"
- "ld1 { v11.s }[2], [x27]\n"
- "ld1 { v17.s }[2], [x23]\n"
+ "ld1 { v11.s }[2], [x26]\n"
+ "ld1 { v17.s }[2], [x22]\n"
"b 56f\n"
"49:" // Height 2: Partial accumulate: partial_1_12
- "mov x20, #0x30\n"
+ "mov x19, #0x30\n"
"tbz x9, #0, 56f\n"
- "ldr s11, [x27, #0x0]\n"
- "ldr s17, [x23, #0x0]\n"
+ "ldr s11, [x26, #0x0]\n"
+ "ldr s17, [x22, #0x0]\n"
"b 56f\n"
"50:" // Height 2: Partial accumulate: partial_2_8
"tbz x9, #1, 51f\n"
- "ldr d10, [x27], #0x8\n"
- "ldr d16, [x23], #0x8\n"
- "mov x20, #0x28\n"
+ "ldr d10, [x26], #0x8\n"
+ "ldr d16, [x22], #0x8\n"
+ "mov x19, #0x28\n"
"tbz x9, #0, 56f\n"
- "ld1 { v10.s }[2], [x27]\n"
- "ld1 { v16.s }[2], [x23]\n"
+ "ld1 { v10.s }[2], [x26]\n"
+ "ld1 { v16.s }[2], [x22]\n"
"b 56f\n"
"51:" // Height 2: Partial accumulate: partial_1_8
- "mov x20, #0x20\n"
+ "mov x19, #0x20\n"
"tbz x9, #0, 56f\n"
- "ldr s10, [x27, #0x0]\n"
- "ldr s16, [x23, #0x0]\n"
+ "ldr s10, [x26, #0x0]\n"
+ "ldr s16, [x22, #0x0]\n"
"b 56f\n"
"52:" // Height 2: Partial accumulate: partial_4_0
"tbz x9, #2, 54f\n"
- "ld1 { v8.4s }, [x27], #0x10\n"
- "ld1 { v14.4s }, [x23], #0x10\n"
+ "ld1 { v8.4s }, [x26], #0x10\n"
+ "ld1 { v14.4s }, [x22], #0x10\n"
"tbz x9, #1, 53f\n"
- "ldr d9, [x27], #0x8\n"
- "ldr d15, [x23], #0x8\n"
- "mov x20, #0x18\n"
+ "mov x19, #0x18\n"
+ "ldr d9, [x26], #0x8\n"
+ "ldr d15, [x22], #0x8\n"
"tbz x9, #0, 56f\n"
- "ld1 { v9.s }[2], [x27]\n"
- "ld1 { v15.s }[2], [x23]\n"
+ "ld1 { v9.s }[2], [x26]\n"
+ "ld1 { v15.s }[2], [x22]\n"
"b 56f\n"
"53:" // Height 2: Partial accumulate: partial_1_4
- "mov x20, #0x10\n"
+ "mov x19, #0x10\n"
"tbz x9, #0, 56f\n"
- "ldr s9, [x27, #0x0]\n"
- "ldr s15, [x23, #0x0]\n"
+ "ldr s9, [x26, #0x0]\n"
+ "ldr s15, [x22, #0x0]\n"
"b 56f\n"
"54:" // Height 2: Partial accumulate: partial_2_0
"tbz x9, #1, 55f\n"
- "ldr d8, [x27], #0x8\n"
- "ldr d14, [x23], #0x8\n"
- "mov x20, #0x8\n"
+ "ldr d8, [x26], #0x8\n"
+ "ldr d14, [x22], #0x8\n"
+ "mov x19, #0x8\n"
"tbz x9, #0, 56f\n"
- "ld1 { v8.s }[2], [x27]\n"
- "ld1 { v14.s }[2], [x23]\n"
+ "ld1 { v8.s }[2], [x26]\n"
+ "ld1 { v14.s }[2], [x22]\n"
"b 56f\n"
"55:" // Height 2: Partial accumulate: partial_1_0
- "ldr s8, [x27, #0x0]\n"
- "ldr s14, [x23, #0x0]\n"
- "mov x20, #0x0\n"
+ "ldr s8, [x26, #0x0]\n"
+ "mov x19, #0x0\n"
+ "ldr s14, [x22, #0x0]\n"
"56:" // Height 2: Partial accumulate: Done
- "sub x27, x27, x20\n"
+ "sub x26, x26, x19\n"
"b 59f\n"
"57:" // Height 2: full accumulate
- "ldr q8, [x27, #0x0]\n"
- "ldr q9, [x27, #0x10]\n"
- "ldr q10, [x27, #0x20]\n"
- "ldr q11, [x27, #0x30]\n"
- "ldr q12, [x27, #0x40]\n"
- "ldr q13, [x27, #0x50]\n"
- "ldr q14, [x23, #0x0]\n"
- "ldr q15, [x23, #0x10]\n"
- "ldr q16, [x23, #0x20]\n"
- "ldr q17, [x23, #0x30]\n"
- "ldr q18, [x23, #0x40]\n"
- "ldr q19, [x23, #0x50]\n"
+ "ldr q8, [x26, #0x0]\n"
+ "ldr q9, [x26, #0x10]\n"
+ "ldr q10, [x26, #0x20]\n"
+ "ldr q11, [x26, #0x30]\n"
+ "ldr q12, [x26, #0x40]\n"
+ "ldr q13, [x26, #0x50]\n"
+ "ldr q14, [x22, #0x0]\n"
+ "ldr q15, [x22, #0x10]\n"
+ "ldr q16, [x22, #0x20]\n"
+ "ldr q17, [x22, #0x30]\n"
+ "ldr q18, [x22, #0x40]\n"
+ "ldr q19, [x22, #0x50]\n"
"b 59f\n"
"58:" // Height 2: no accumulate
"movi v8.16b, #0x0\n"
@@ -647,60 +647,60 @@ void a64_hybrid_fp32_mla_4x24 (
"movi v18.16b, #0x0\n"
"movi v19.16b, #0x0\n"
"59:" // Height 2: setup done
- "mov x26, #0x0\n"
+ "mov x25, #0x0\n"
"60:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w24, [x20, x25, LSL #0x2]\n"
"tbz %x[flags], #3, 61f\n"
- "ldr x21, [%x[input_ptr], x26, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x24, [x21, #0x0]\n"
- "ldr x23, [x21, #0x8]\n"
- "cbnz x26, 62f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x24, x24, x20, LSL #2\n"
- "add x23, x23, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x25, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x23, [x20, #0x0]\n"
+ "ldr x22, [x20, #0x8]\n"
+ "cbnz x25, 62f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x23, x23, x19, LSL #2\n"
+ "add x22, x22, x19, LSL #2\n"
"b 62f\n"
"61:" // Height 2: setup direct input
- "mov x24, %x[input_ptr]\n"
- "add x23, x24, x20, LSL #2\n"
+ "mov x23, %x[input_ptr]\n"
+ "add x22, x23, x19, LSL #2\n"
"62:" // Height 2: input setup done
- "cmp x25, #0x4\n"
+ "cmp x24, #0x4\n"
"blt 65f\n"
- "ldr q0, [x24, #0x0]\n"
- "ldr q1, [x23, #0x0]\n"
- "cmp x25, #0x8\n"
+ "ldr q0, [x23, #0x0]\n"
+ "ldr q1, [x22, #0x0]\n"
+ "cmp x24, #0x8\n"
"ldr q4, [x28, #0x0]\n"
- "ldr q5, [x28, #0x10]\n"
- "ldr q6, [x28, #0x20]\n"
- "ldr q7, [x28, #0x30]\n"
"blt 64f\n"
"63:" // Height 2: Multiply loop: Main loop head
"fmla v8.4s, v4.4s, v0.s[0]\n"
+ "ldr q5, [x28, #0x10]\n"
+ "add x23, x23, #0x10\n"
"fmla v14.4s, v4.4s, v1.s[0]\n"
- "ldr q4, [x28, #0x40]\n"
- "sub x25, x25, #0x4\n"
+ "ldr q6, [x28, #0x20]\n"
+ "add x22, x22, #0x10\n"
"fmla v9.4s, v5.4s, v0.s[0]\n"
+ "ldr q7, [x28, #0x30]\n"
+ "sub x24, x24, #0x4\n"
"fmla v15.4s, v5.4s, v1.s[0]\n"
- "ldr q5, [x28, #0x50]\n"
- "add x24, x24, #0x10\n"
+ "ldr q4, [x28, #0x40]\n"
+ "cmp x24, #0x8\n"
"fmla v10.4s, v6.4s, v0.s[0]\n"
+ "ldr q5, [x28, #0x50]\n"
"fmla v16.4s, v6.4s, v1.s[0]\n"
"ldr q6, [x28, #0x60]\n"
- "add x23, x23, #0x10\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
"fmla v11.4s, v7.4s, v0.s[0]\n"
"fmla v17.4s, v7.4s, v1.s[0]\n"
"ldr q7, [x28, #0x70]\n"
- "cmp x25, #0x8\n"
"fmla v12.4s, v4.4s, v0.s[0]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
"fmla v18.4s, v4.4s, v1.s[0]\n"
"ldr q4, [x28, #0x80]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
"fmla v13.4s, v5.4s, v0.s[0]\n"
"fmla v19.4s, v5.4s, v1.s[0]\n"
"ldr q5, [x28, #0x90]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
"fmla v8.4s, v6.4s, v0.s[1]\n"
"fmla v14.4s, v6.4s, v1.s[1]\n"
"ldr q6, [x28, #0xa0]\n"
@@ -749,37 +749,37 @@ void a64_hybrid_fp32_mla_4x24 (
"ldr q4, [x28, #0x0]\n"
"fmla v11.4s, v5.4s, v0.s[3]\n"
"fmla v17.4s, v5.4s, v1.s[3]\n"
- "ldr q5, [x28, #0x10]\n"
"fmla v12.4s, v6.4s, v0.s[3]\n"
"fmla v18.4s, v6.4s, v1.s[3]\n"
- "ldr q6, [x28, #0x20]\n"
"fmla v13.4s, v7.4s, v0.s[3]\n"
- "ldr q0, [x24, #0x0]\n"
+ "ldr q0, [x23, #0x0]\n"
"fmla v19.4s, v7.4s, v1.s[3]\n"
- "ldr q1, [x23, #0x0]\n"
- "ldr q7, [x28, #0x30]\n"
+ "ldr q1, [x22, #0x0]\n"
"bge 63b\n"
"64:" // Height 2: Multiply loop: Single iteration only
"fmla v8.4s, v4.4s, v0.s[0]\n"
+ "ldr q5, [x28, #0x10]\n"
+ "sub x24, x24, #0x4\n"
"fmla v14.4s, v4.4s, v1.s[0]\n"
- "ldr q4, [x28, #0x40]\n"
- "add x24, x24, #0x10\n"
+ "ldr q6, [x28, #0x20]\n"
+ "add x23, x23, #0x10\n"
"fmla v9.4s, v5.4s, v0.s[0]\n"
+ "ldr q7, [x28, #0x30]\n"
+ "add x22, x22, #0x10\n"
"fmla v15.4s, v5.4s, v1.s[0]\n"
- "ldr q5, [x28, #0x50]\n"
- "add x23, x23, #0x10\n"
+ "ldr q4, [x28, #0x40]\n"
"fmla v10.4s, v6.4s, v0.s[0]\n"
+ "ldr q5, [x28, #0x50]\n"
"fmla v16.4s, v6.4s, v1.s[0]\n"
"ldr q6, [x28, #0x60]\n"
- "sub x25, x25, #0x4\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
"fmla v11.4s, v7.4s, v0.s[0]\n"
"fmla v17.4s, v7.4s, v1.s[0]\n"
"ldr q7, [x28, #0x70]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
"fmla v12.4s, v4.4s, v0.s[0]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
"fmla v18.4s, v4.4s, v1.s[0]\n"
"ldr q4, [x28, #0x80]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
"fmla v13.4s, v5.4s, v0.s[0]\n"
"fmla v19.4s, v5.4s, v1.s[0]\n"
"ldr q5, [x28, #0x90]\n"
@@ -835,383 +835,383 @@ void a64_hybrid_fp32_mla_4x24 (
"fmla v13.4s, v7.4s, v0.s[3]\n"
"fmla v19.4s, v7.4s, v1.s[3]\n"
"65:" // Height 2: Multiply loop: Main loop skip
- "cbz x25, 67f\n"
+ "cbz x24, 67f\n"
"66:" // Height 2: Multiply loop: Odd block loop
- "ldr s0, [x24], #0x4\n"
- "ldr s1, [x23], #0x4\n"
- "sub x25, x25, #0x1\n"
+ "ldr s0, [x23], #0x4\n"
+ "sub x24, x24, #0x1\n"
+ "ldr s1, [x22], #0x4\n"
"ldr q4, [x28, #0x0]\n"
- "ldr q5, [x28, #0x10]\n"
"fmla v8.4s, v4.4s, v0.s[0]\n"
+ "ldr q5, [x28, #0x10]\n"
"fmla v14.4s, v4.4s, v1.s[0]\n"
"ldr q6, [x28, #0x20]\n"
"ldr q7, [x28, #0x30]\n"
"fmla v9.4s, v5.4s, v0.s[0]\n"
- "fmla v15.4s, v5.4s, v1.s[0]\n"
"ldr q4, [x28, #0x40]\n"
+ "fmla v15.4s, v5.4s, v1.s[0]\n"
"ldr q5, [x28, #0x50]\n"
+ "add x28, x28, #0x60\n"
"fmla v10.4s, v6.4s, v0.s[0]\n"
"fmla v16.4s, v6.4s, v1.s[0]\n"
"fmla v11.4s, v7.4s, v0.s[0]\n"
"fmla v17.4s, v7.4s, v1.s[0]\n"
- "add x28, x28, #0x60\n"
"fmla v12.4s, v4.4s, v0.s[0]\n"
"fmla v18.4s, v4.4s, v1.s[0]\n"
"fmla v13.4s, v5.4s, v0.s[0]\n"
"fmla v19.4s, v5.4s, v1.s[0]\n"
- "cbnz x25, 66b\n"
+ "cbnz x24, 66b\n"
"67:" // Height 2: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x26, x26, #0x1\n"
- "cmp x26, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x25, x25, #0x1\n"
+ "cmp x25, x19\n"
"bne 60b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x27, x20, LSL #2\n"
- "prfm pstl1keep, [x27, #0x0]\n"
- "prfm pstl1keep, [x23, #0x0]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "prfm pstl1keep, [x26, #0x0]\n"
+ "add x22, x26, x19, LSL #2\n"
+ "prfm pstl1keep, [x22, #0x0]\n"
"tbz %x[flags], #1, 68f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v1.4s }, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1r { v0.4s }, [x20]\n"
- "fmin v8.4s, v8.4s, v1.4s\n"
- "fmin v9.4s, v9.4s, v1.4s\n"
- "fmin v10.4s, v10.4s, v1.4s\n"
- "fmin v11.4s, v11.4s, v1.4s\n"
- "fmin v12.4s, v12.4s, v1.4s\n"
- "fmin v13.4s, v13.4s, v1.4s\n"
- "fmin v14.4s, v14.4s, v1.4s\n"
- "fmin v15.4s, v15.4s, v1.4s\n"
- "fmin v16.4s, v16.4s, v1.4s\n"
- "fmin v17.4s, v17.4s, v1.4s\n"
- "fmin v18.4s, v18.4s, v1.4s\n"
- "fmin v19.4s, v19.4s, v1.4s\n"
- "fmax v8.4s, v8.4s, v0.4s\n"
- "fmax v9.4s, v9.4s, v0.4s\n"
- "fmax v10.4s, v10.4s, v0.4s\n"
- "fmax v11.4s, v11.4s, v0.4s\n"
- "fmax v12.4s, v12.4s, v0.4s\n"
- "fmax v13.4s, v13.4s, v0.4s\n"
- "fmax v14.4s, v14.4s, v0.4s\n"
- "fmax v15.4s, v15.4s, v0.4s\n"
- "fmax v16.4s, v16.4s, v0.4s\n"
- "fmax v17.4s, v17.4s, v0.4s\n"
- "fmax v18.4s, v18.4s, v0.4s\n"
- "fmax v19.4s, v19.4s, v0.4s\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v1.4s }, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v0.4s }, [x19]\n"
+ "fmin v8.4s, v8.4s, v0.4s\n"
+ "fmin v9.4s, v9.4s, v0.4s\n"
+ "fmin v10.4s, v10.4s, v0.4s\n"
+ "fmin v11.4s, v11.4s, v0.4s\n"
+ "fmax v8.4s, v8.4s, v1.4s\n"
+ "fmax v9.4s, v9.4s, v1.4s\n"
+ "fmax v10.4s, v10.4s, v1.4s\n"
+ "fmax v11.4s, v11.4s, v1.4s\n"
+ "fmin v12.4s, v12.4s, v0.4s\n"
+ "fmin v13.4s, v13.4s, v0.4s\n"
+ "fmin v14.4s, v14.4s, v0.4s\n"
+ "fmax v12.4s, v12.4s, v1.4s\n"
+ "fmax v13.4s, v13.4s, v1.4s\n"
+ "fmax v14.4s, v14.4s, v1.4s\n"
+ "fmin v15.4s, v15.4s, v0.4s\n"
+ "fmin v16.4s, v16.4s, v0.4s\n"
+ "fmin v17.4s, v17.4s, v0.4s\n"
+ "fmax v15.4s, v15.4s, v1.4s\n"
+ "fmax v16.4s, v16.4s, v1.4s\n"
+ "fmax v17.4s, v17.4s, v1.4s\n"
+ "fmin v18.4s, v18.4s, v0.4s\n"
+ "fmin v19.4s, v19.4s, v0.4s\n"
+ "fmax v18.4s, v18.4s, v1.4s\n"
+ "fmax v19.4s, v19.4s, v1.4s\n"
"68:" // Height 2: No activation
"cmp x9, #0x18\n"
"bge 81f\n"
"tbz x9, #4, 72f\n"
- "st1 { v8.4s }, [x27], #0x10\n"
- "st1 { v9.4s }, [x27], #0x10\n"
- "st1 { v10.4s }, [x27], #0x10\n"
- "st1 { v11.4s }, [x27], #0x10\n"
- "st1 { v14.4s }, [x23], #0x10\n"
- "st1 { v15.4s }, [x23], #0x10\n"
- "st1 { v16.4s }, [x23], #0x10\n"
- "st1 { v17.4s }, [x23], #0x10\n"
+ "st1 { v8.4s }, [x26], #0x10\n"
+ "st1 { v9.4s }, [x26], #0x10\n"
+ "st1 { v10.4s }, [x26], #0x10\n"
+ "st1 { v11.4s }, [x26], #0x10\n"
+ "st1 { v14.4s }, [x22], #0x10\n"
+ "st1 { v15.4s }, [x22], #0x10\n"
+ "st1 { v16.4s }, [x22], #0x10\n"
+ "st1 { v17.4s }, [x22], #0x10\n"
"tbz x9, #2, 70f\n"
- "st1 { v12.4s }, [x27], #0x10\n"
- "st1 { v18.4s }, [x23], #0x10\n"
+ "st1 { v12.4s }, [x26], #0x10\n"
+ "st1 { v18.4s }, [x22], #0x10\n"
"tbz x9, #1, 69f\n"
- "str d13, [x27], #0x8\n"
- "str d19, [x23], #0x8\n"
+ "str d13, [x26], #0x8\n"
+ "str d19, [x22], #0x8\n"
"tbz x9, #0, 80f\n"
- "st1 { v13.s }[2], [x27]\n"
- "st1 { v19.s }[2], [x23]\n"
+ "st1 { v13.s }[2], [x26]\n"
+ "st1 { v19.s }[2], [x22]\n"
"b 80f\n"
"69:" // Height 2: Partial direct writeback: partial_1_20
"tbz x9, #0, 80f\n"
- "str s13, [x27, #0x0]\n"
- "str s19, [x23, #0x0]\n"
+ "str s13, [x26, #0x0]\n"
+ "str s19, [x22, #0x0]\n"
"b 80f\n"
"70:" // Height 2: Partial direct writeback: partial_2_16
"tbz x9, #1, 71f\n"
- "str d12, [x27], #0x8\n"
- "str d18, [x23], #0x8\n"
+ "str d12, [x26], #0x8\n"
+ "str d18, [x22], #0x8\n"
"tbz x9, #0, 80f\n"
- "st1 { v12.s }[2], [x27]\n"
- "st1 { v18.s }[2], [x23]\n"
+ "st1 { v12.s }[2], [x26]\n"
+ "st1 { v18.s }[2], [x22]\n"
"b 80f\n"
"71:" // Height 2: Partial direct writeback: partial_1_16
"tbz x9, #0, 80f\n"
- "str s12, [x27, #0x0]\n"
- "str s18, [x23, #0x0]\n"
+ "str s12, [x26, #0x0]\n"
+ "str s18, [x22, #0x0]\n"
"b 80f\n"
"72:" // Height 2: Partial direct writeback: partial_8_0
"tbz x9, #3, 76f\n"
- "st1 { v8.4s }, [x27], #0x10\n"
- "st1 { v9.4s }, [x27], #0x10\n"
- "st1 { v14.4s }, [x23], #0x10\n"
- "st1 { v15.4s }, [x23], #0x10\n"
+ "st1 { v8.4s }, [x26], #0x10\n"
+ "st1 { v9.4s }, [x26], #0x10\n"
+ "st1 { v14.4s }, [x22], #0x10\n"
+ "st1 { v15.4s }, [x22], #0x10\n"
"tbz x9, #2, 74f\n"
- "st1 { v10.4s }, [x27], #0x10\n"
- "st1 { v16.4s }, [x23], #0x10\n"
+ "st1 { v10.4s }, [x26], #0x10\n"
+ "st1 { v16.4s }, [x22], #0x10\n"
"tbz x9, #1, 73f\n"
- "str d11, [x27], #0x8\n"
- "str d17, [x23], #0x8\n"
+ "str d11, [x26], #0x8\n"
+ "str d17, [x22], #0x8\n"
"tbz x9, #0, 80f\n"
- "st1 { v11.s }[2], [x27]\n"
- "st1 { v17.s }[2], [x23]\n"
+ "st1 { v11.s }[2], [x26]\n"
+ "st1 { v17.s }[2], [x22]\n"
"b 80f\n"
"73:" // Height 2: Partial direct writeback: partial_1_12
"tbz x9, #0, 80f\n"
- "str s11, [x27, #0x0]\n"
- "str s17, [x23, #0x0]\n"
+ "str s11, [x26, #0x0]\n"
+ "str s17, [x22, #0x0]\n"
"b 80f\n"
"74:" // Height 2: Partial direct writeback: partial_2_8
"tbz x9, #1, 75f\n"
- "str d10, [x27], #0x8\n"
- "str d16, [x23], #0x8\n"
+ "str d10, [x26], #0x8\n"
+ "str d16, [x22], #0x8\n"
"tbz x9, #0, 80f\n"
- "st1 { v10.s }[2], [x27]\n"
- "st1 { v16.s }[2], [x23]\n"
+ "st1 { v10.s }[2], [x26]\n"
+ "st1 { v16.s }[2], [x22]\n"
"b 80f\n"
"75:" // Height 2: Partial direct writeback: partial_1_8
"tbz x9, #0, 80f\n"
- "str s10, [x27, #0x0]\n"
- "str s16, [x23, #0x0]\n"
+ "str s10, [x26, #0x0]\n"
+ "str s16, [x22, #0x0]\n"
"b 80f\n"
"76:" // Height 2: Partial direct writeback: partial_4_0
"tbz x9, #2, 78f\n"
- "st1 { v8.4s }, [x27], #0x10\n"
- "st1 { v14.4s }, [x23], #0x10\n"
+ "st1 { v8.4s }, [x26], #0x10\n"
+ "st1 { v14.4s }, [x22], #0x10\n"
"tbz x9, #1, 77f\n"
- "str d9, [x27], #0x8\n"
- "str d15, [x23], #0x8\n"
+ "str d9, [x26], #0x8\n"
+ "str d15, [x22], #0x8\n"
"tbz x9, #0, 80f\n"
- "st1 { v9.s }[2], [x27]\n"
- "st1 { v15.s }[2], [x23]\n"
+ "st1 { v9.s }[2], [x26]\n"
+ "st1 { v15.s }[2], [x22]\n"
"b 80f\n"
"77:" // Height 2: Partial direct writeback: partial_1_4
"tbz x9, #0, 80f\n"
- "str s9, [x27, #0x0]\n"
- "str s15, [x23, #0x0]\n"
+ "str s9, [x26, #0x0]\n"
+ "str s15, [x22, #0x0]\n"
"b 80f\n"
"78:" // Height 2: Partial direct writeback: partial_2_0
"tbz x9, #1, 79f\n"
- "str d8, [x27], #0x8\n"
- "str d14, [x23], #0x8\n"
+ "str d8, [x26], #0x8\n"
+ "str d14, [x22], #0x8\n"
"tbz x9, #0, 80f\n"
- "st1 { v8.s }[2], [x27]\n"
- "st1 { v14.s }[2], [x23]\n"
+ "st1 { v8.s }[2], [x26]\n"
+ "st1 { v14.s }[2], [x22]\n"
"b 80f\n"
"79:" // Height 2: Partial direct writeback: partial_1_0
- "str s8, [x27, #0x0]\n"
- "str s14, [x23, #0x0]\n"
+ "str s8, [x26, #0x0]\n"
+ "str s14, [x22, #0x0]\n"
"80:" // Height 2: Partial direct writeback: Done
"b 82f\n"
"81:" // Height 2: Full writeback
- "str q8, [x27, #0x0]\n"
- "str q9, [x27, #0x10]\n"
- "str q10, [x27, #0x20]\n"
- "str q11, [x27, #0x30]\n"
- "str q12, [x27, #0x40]\n"
- "str q13, [x27, #0x50]\n"
- "add x27, x27, #0x60\n"
- "str q14, [x23, #0x0]\n"
- "str q15, [x23, #0x10]\n"
- "str q16, [x23, #0x20]\n"
- "str q17, [x23, #0x30]\n"
- "str q18, [x23, #0x40]\n"
- "str q19, [x23, #0x50]\n"
+ "str q8, [x26, #0x0]\n"
+ "str q9, [x26, #0x10]\n"
+ "str q10, [x26, #0x20]\n"
+ "str q11, [x26, #0x30]\n"
+ "str q12, [x26, #0x40]\n"
+ "str q13, [x26, #0x50]\n"
+ "add x26, x26, #0x60\n"
+ "str q14, [x22, #0x0]\n"
+ "str q15, [x22, #0x10]\n"
+ "str q16, [x22, #0x20]\n"
+ "str q17, [x22, #0x30]\n"
+ "str q18, [x22, #0x40]\n"
+ "str q19, [x22, #0x50]\n"
"82:" // Height 2: Writeback done
"subs x9, x9, #0x18\n"
"bgt 43b\n"
"b 166f\n"
"83:" // Height 3
- "mov x10, %x[bias]\n"
"ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x27, %x[bias]\n"
"ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x27, %x[output_ptr]\n"
+ "mov x26, %x[output_ptr]\n"
"84:" // Height 3: Column loop
- "cbz x10, 85f\n"
- "ldr q8, [x10, #0x0]\n"
- "ldr q9, [x10, #0x10]\n"
+ "cbz x27, 85f\n"
+ "ldr q8, [x27, #0x0]\n"
"mov v14.16b, v8.16b\n"
+ "ldr q9, [x27, #0x10]\n"
+ "mov v20.16b, v8.16b\n"
+ "ldr q10, [x27, #0x20]\n"
+ "ldr q11, [x27, #0x30]\n"
"mov v15.16b, v9.16b\n"
- "ldr q10, [x10, #0x20]\n"
- "ldr q11, [x10, #0x30]\n"
+ "ldr q12, [x27, #0x40]\n"
+ "mov v21.16b, v9.16b\n"
+ "ldr q13, [x27, #0x50]\n"
+ "add x27, x27, #0x60\n"
"mov v16.16b, v10.16b\n"
"mov v17.16b, v11.16b\n"
- "ldr q12, [x10, #0x40]\n"
- "ldr q13, [x10, #0x50]\n"
+ "mov v22.16b, v10.16b\n"
"mov v18.16b, v12.16b\n"
"mov v19.16b, v13.16b\n"
- "mov v20.16b, v8.16b\n"
- "mov v21.16b, v9.16b\n"
- "add x10, x10, #0x60\n"
- "mov v22.16b, v10.16b\n"
"mov v23.16b, v11.16b\n"
"mov v24.16b, v12.16b\n"
"mov v25.16b, v13.16b\n"
"b 100f\n"
"85:" // Height 3: no bias
"tbz %x[flags], #0, 99f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x27, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"cmp x9, #0x18\n"
- "add x22, x23, x20, LSL #2\n"
+ "add x22, x26, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
"bge 98f\n"
"tbz x9, #4, 89f\n"
- "ld1 { v8.4s }, [x27], #0x10\n"
- "ld1 { v14.4s }, [x23], #0x10\n"
- "ld1 { v20.4s }, [x22], #0x10\n"
- "ld1 { v9.4s }, [x27], #0x10\n"
- "ld1 { v15.4s }, [x23], #0x10\n"
- "ld1 { v21.4s }, [x22], #0x10\n"
- "ld1 { v10.4s }, [x27], #0x10\n"
- "ld1 { v16.4s }, [x23], #0x10\n"
- "ld1 { v22.4s }, [x22], #0x10\n"
- "ld1 { v11.4s }, [x27], #0x10\n"
- "ld1 { v17.4s }, [x23], #0x10\n"
- "ld1 { v23.4s }, [x22], #0x10\n"
+ "ld1 { v8.4s }, [x26], #0x10\n"
+ "ld1 { v14.4s }, [x22], #0x10\n"
+ "ld1 { v20.4s }, [x21], #0x10\n"
+ "ld1 { v9.4s }, [x26], #0x10\n"
+ "ld1 { v15.4s }, [x22], #0x10\n"
+ "ld1 { v21.4s }, [x21], #0x10\n"
+ "ld1 { v10.4s }, [x26], #0x10\n"
+ "ld1 { v16.4s }, [x22], #0x10\n"
+ "ld1 { v22.4s }, [x21], #0x10\n"
+ "ld1 { v11.4s }, [x26], #0x10\n"
+ "ld1 { v17.4s }, [x22], #0x10\n"
+ "ld1 { v23.4s }, [x21], #0x10\n"
"tbz x9, #2, 87f\n"
- "ld1 { v12.4s }, [x27], #0x10\n"
- "ld1 { v18.4s }, [x23], #0x10\n"
- "ld1 { v24.4s }, [x22], #0x10\n"
+ "ld1 { v12.4s }, [x26], #0x10\n"
+ "ld1 { v18.4s }, [x22], #0x10\n"
+ "ld1 { v24.4s }, [x21], #0x10\n"
"tbz x9, #1, 86f\n"
- "ldr d13, [x27], #0x8\n"
- "ldr d19, [x23], #0x8\n"
- "mov x20, #0x58\n"
- "ldr d25, [x22], #0x8\n"
+ "mov x19, #0x58\n"
+ "ldr d13, [x26], #0x8\n"
+ "ldr d19, [x22], #0x8\n"
+ "ldr d25, [x21], #0x8\n"
"tbz x9, #0, 97f\n"
- "ld1 { v13.s }[2], [x27]\n"
- "ld1 { v19.s }[2], [x23]\n"
- "ld1 { v25.s }[2], [x22]\n"
+ "ld1 { v13.s }[2], [x26]\n"
+ "ld1 { v19.s }[2], [x22]\n"
+ "ld1 { v25.s }[2], [x21]\n"
"b 97f\n"
"86:" // Height 3: Partial accumulate: partial_1_20
- "mov x20, #0x50\n"
+ "mov x19, #0x50\n"
"tbz x9, #0, 97f\n"
- "ldr s13, [x27, #0x0]\n"
- "ldr s19, [x23, #0x0]\n"
- "ldr s25, [x22, #0x0]\n"
+ "ldr s13, [x26, #0x0]\n"
+ "ldr s19, [x22, #0x0]\n"
+ "ldr s25, [x21, #0x0]\n"
"b 97f\n"
"87:" // Height 3: Partial accumulate: partial_2_16
"tbz x9, #1, 88f\n"
- "ldr d12, [x27], #0x8\n"
- "ldr d18, [x23], #0x8\n"
- "mov x20, #0x48\n"
- "ldr d24, [x22], #0x8\n"
+ "ldr d12, [x26], #0x8\n"
+ "ldr d18, [x22], #0x8\n"
+ "mov x19, #0x48\n"
+ "ldr d24, [x21], #0x8\n"
"tbz x9, #0, 97f\n"
- "ld1 { v12.s }[2], [x27]\n"
- "ld1 { v18.s }[2], [x23]\n"
- "ld1 { v24.s }[2], [x22]\n"
+ "ld1 { v12.s }[2], [x26]\n"
+ "ld1 { v18.s }[2], [x22]\n"
+ "ld1 { v24.s }[2], [x21]\n"
"b 97f\n"
"88:" // Height 3: Partial accumulate: partial_1_16
- "mov x20, #0x40\n"
+ "mov x19, #0x40\n"
"tbz x9, #0, 97f\n"
- "ldr s12, [x27, #0x0]\n"
- "ldr s18, [x23, #0x0]\n"
- "ldr s24, [x22, #0x0]\n"
+ "ldr s12, [x26, #0x0]\n"
+ "ldr s18, [x22, #0x0]\n"
+ "ldr s24, [x21, #0x0]\n"
"b 97f\n"
"89:" // Height 3: Partial accumulate: partial_8_0
"tbz x9, #3, 93f\n"
- "ld1 { v8.4s }, [x27], #0x10\n"
- "ld1 { v14.4s }, [x23], #0x10\n"
- "ld1 { v20.4s }, [x22], #0x10\n"
- "ld1 { v9.4s }, [x27], #0x10\n"
- "ld1 { v15.4s }, [x23], #0x10\n"
- "ld1 { v21.4s }, [x22], #0x10\n"
+ "ld1 { v8.4s }, [x26], #0x10\n"
+ "ld1 { v14.4s }, [x22], #0x10\n"
+ "ld1 { v20.4s }, [x21], #0x10\n"
+ "ld1 { v9.4s }, [x26], #0x10\n"
+ "ld1 { v15.4s }, [x22], #0x10\n"
+ "ld1 { v21.4s }, [x21], #0x10\n"
"tbz x9, #2, 91f\n"
- "ld1 { v10.4s }, [x27], #0x10\n"
- "ld1 { v16.4s }, [x23], #0x10\n"
- "ld1 { v22.4s }, [x22], #0x10\n"
+ "ld1 { v10.4s }, [x26], #0x10\n"
+ "ld1 { v16.4s }, [x22], #0x10\n"
+ "ld1 { v22.4s }, [x21], #0x10\n"
"tbz x9, #1, 90f\n"
- "ldr d11, [x27], #0x8\n"
- "ldr d17, [x23], #0x8\n"
- "mov x20, #0x38\n"
- "ldr d23, [x22], #0x8\n"
+ "mov x19, #0x38\n"
+ "ldr d11, [x26], #0x8\n"
+ "ldr d17, [x22], #0x8\n"
+ "ldr d23, [x21], #0x8\n"
"tbz x9, #0, 97f\n"
- "ld1 { v11.s }[2], [x27]\n"
- "ld1 { v17.s }[2], [x23]\n"
- "ld1 { v23.s }[2], [x22]\n"
+ "ld1 { v11.s }[2], [x26]\n"
+ "ld1 { v17.s }[2], [x22]\n"
+ "ld1 { v23.s }[2], [x21]\n"
"b 97f\n"
"90:" // Height 3: Partial accumulate: partial_1_12
- "mov x20, #0x30\n"
+ "mov x19, #0x30\n"
"tbz x9, #0, 97f\n"
- "ldr s11, [x27, #0x0]\n"
- "ldr s17, [x23, #0x0]\n"
- "ldr s23, [x22, #0x0]\n"
+ "ldr s11, [x26, #0x0]\n"
+ "ldr s17, [x22, #0x0]\n"
+ "ldr s23, [x21, #0x0]\n"
"b 97f\n"
"91:" // Height 3: Partial accumulate: partial_2_8
"tbz x9, #1, 92f\n"
- "ldr d10, [x27], #0x8\n"
- "ldr d16, [x23], #0x8\n"
- "mov x20, #0x28\n"
- "ldr d22, [x22], #0x8\n"
+ "ldr d10, [x26], #0x8\n"
+ "ldr d16, [x22], #0x8\n"
+ "mov x19, #0x28\n"
+ "ldr d22, [x21], #0x8\n"
"tbz x9, #0, 97f\n"
- "ld1 { v10.s }[2], [x27]\n"
- "ld1 { v16.s }[2], [x23]\n"
- "ld1 { v22.s }[2], [x22]\n"
+ "ld1 { v10.s }[2], [x26]\n"
+ "ld1 { v16.s }[2], [x22]\n"
+ "ld1 { v22.s }[2], [x21]\n"
"b 97f\n"
"92:" // Height 3: Partial accumulate: partial_1_8
- "mov x20, #0x20\n"
+ "mov x19, #0x20\n"
"tbz x9, #0, 97f\n"
- "ldr s10, [x27, #0x0]\n"
- "ldr s16, [x23, #0x0]\n"
- "ldr s22, [x22, #0x0]\n"
+ "ldr s10, [x26, #0x0]\n"
+ "ldr s16, [x22, #0x0]\n"
+ "ldr s22, [x21, #0x0]\n"
"b 97f\n"
"93:" // Height 3: Partial accumulate: partial_4_0
"tbz x9, #2, 95f\n"
- "ld1 { v8.4s }, [x27], #0x10\n"
- "ld1 { v14.4s }, [x23], #0x10\n"
- "ld1 { v20.4s }, [x22], #0x10\n"
+ "ld1 { v8.4s }, [x26], #0x10\n"
+ "ld1 { v14.4s }, [x22], #0x10\n"
+ "ld1 { v20.4s }, [x21], #0x10\n"
"tbz x9, #1, 94f\n"
- "ldr d9, [x27], #0x8\n"
- "ldr d15, [x23], #0x8\n"
- "mov x20, #0x18\n"
- "ldr d21, [x22], #0x8\n"
+ "mov x19, #0x18\n"
+ "ldr d9, [x26], #0x8\n"
+ "ldr d15, [x22], #0x8\n"
+ "ldr d21, [x21], #0x8\n"
"tbz x9, #0, 97f\n"
- "ld1 { v9.s }[2], [x27]\n"
- "ld1 { v15.s }[2], [x23]\n"
- "ld1 { v21.s }[2], [x22]\n"
+ "ld1 { v9.s }[2], [x26]\n"
+ "ld1 { v15.s }[2], [x22]\n"
+ "ld1 { v21.s }[2], [x21]\n"
"b 97f\n"
"94:" // Height 3: Partial accumulate: partial_1_4
- "mov x20, #0x10\n"
+ "mov x19, #0x10\n"
"tbz x9, #0, 97f\n"
- "ldr s9, [x27, #0x0]\n"
- "ldr s15, [x23, #0x0]\n"
- "ldr s21, [x22, #0x0]\n"
+ "ldr s9, [x26, #0x0]\n"
+ "ldr s15, [x22, #0x0]\n"
+ "ldr s21, [x21, #0x0]\n"
"b 97f\n"
"95:" // Height 3: Partial accumulate: partial_2_0
"tbz x9, #1, 96f\n"
- "ldr d8, [x27], #0x8\n"
- "ldr d14, [x23], #0x8\n"
- "mov x20, #0x8\n"
- "ldr d20, [x22], #0x8\n"
+ "ldr d8, [x26], #0x8\n"
+ "ldr d14, [x22], #0x8\n"
+ "mov x19, #0x8\n"
+ "ldr d20, [x21], #0x8\n"
"tbz x9, #0, 97f\n"
- "ld1 { v8.s }[2], [x27]\n"
- "ld1 { v14.s }[2], [x23]\n"
- "ld1 { v20.s }[2], [x22]\n"
+ "ld1 { v8.s }[2], [x26]\n"
+ "ld1 { v14.s }[2], [x22]\n"
+ "ld1 { v20.s }[2], [x21]\n"
"b 97f\n"
"96:" // Height 3: Partial accumulate: partial_1_0
- "ldr s8, [x27, #0x0]\n"
- "ldr s14, [x23, #0x0]\n"
- "mov x20, #0x0\n"
- "ldr s20, [x22, #0x0]\n"
+ "ldr s8, [x26, #0x0]\n"
+ "mov x19, #0x0\n"
+ "ldr s14, [x22, #0x0]\n"
+ "ldr s20, [x21, #0x0]\n"
"97:" // Height 3: Partial accumulate: Done
- "sub x27, x27, x20\n"
+ "sub x26, x26, x19\n"
"b 100f\n"
"98:" // Height 3: full accumulate
- "ldr q8, [x27, #0x0]\n"
- "ldr q9, [x27, #0x10]\n"
- "ldr q10, [x27, #0x20]\n"
- "ldr q11, [x27, #0x30]\n"
- "ldr q12, [x27, #0x40]\n"
- "ldr q13, [x27, #0x50]\n"
- "ldr q14, [x23, #0x0]\n"
- "ldr q15, [x23, #0x10]\n"
- "ldr q16, [x23, #0x20]\n"
- "ldr q17, [x23, #0x30]\n"
- "ldr q18, [x23, #0x40]\n"
- "ldr q19, [x23, #0x50]\n"
- "ldr q20, [x22, #0x0]\n"
- "ldr q21, [x22, #0x10]\n"
- "ldr q22, [x22, #0x20]\n"
- "ldr q23, [x22, #0x30]\n"
- "ldr q24, [x22, #0x40]\n"
- "ldr q25, [x22, #0x50]\n"
+ "ldr q8, [x26, #0x0]\n"
+ "ldr q9, [x26, #0x10]\n"
+ "ldr q10, [x26, #0x20]\n"
+ "ldr q11, [x26, #0x30]\n"
+ "ldr q12, [x26, #0x40]\n"
+ "ldr q13, [x26, #0x50]\n"
+ "ldr q14, [x22, #0x0]\n"
+ "ldr q15, [x22, #0x10]\n"
+ "ldr q16, [x22, #0x20]\n"
+ "ldr q17, [x22, #0x30]\n"
+ "ldr q18, [x22, #0x40]\n"
+ "ldr q19, [x22, #0x50]\n"
+ "ldr q20, [x21, #0x0]\n"
+ "ldr q21, [x21, #0x10]\n"
+ "ldr q22, [x21, #0x20]\n"
+ "ldr q23, [x21, #0x30]\n"
+ "ldr q24, [x21, #0x40]\n"
+ "ldr q25, [x21, #0x50]\n"
"b 100f\n"
"99:" // Height 3: no accumulate
"movi v8.16b, #0x0\n"
@@ -1233,64 +1233,64 @@ void a64_hybrid_fp32_mla_4x24 (
"movi v24.16b, #0x0\n"
"movi v25.16b, #0x0\n"
"100:" // Height 3: setup done
- "mov x26, #0x0\n"
+ "mov x25, #0x0\n"
"101:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w24, [x20, x25, LSL #0x2]\n"
"tbz %x[flags], #3, 102f\n"
- "ldr x21, [%x[input_ptr], x26, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x24, [x21, #0x0]\n"
- "ldr x23, [x21, #0x8]\n"
- "ldr x22, [x21, #0x10]\n"
- "cbnz x26, 103f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x24, x24, x20, LSL #2\n"
- "add x23, x23, x20, LSL #2\n"
- "add x22, x22, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x25, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x23, [x20, #0x0]\n"
+ "ldr x22, [x20, #0x8]\n"
+ "ldr x21, [x20, #0x10]\n"
+ "cbnz x25, 103f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x23, x23, x19, LSL #2\n"
+ "add x22, x22, x19, LSL #2\n"
+ "add x21, x21, x19, LSL #2\n"
"b 103f\n"
"102:" // Height 3: setup direct input
- "mov x24, %x[input_ptr]\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
+ "mov x23, %x[input_ptr]\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
"103:" // Height 3: input setup done
- "cmp x25, #0x4\n"
+ "cmp x24, #0x4\n"
"blt 106f\n"
- "ldr q0, [x24, #0x0]\n"
- "ldr q1, [x23, #0x0]\n"
- "cmp x25, #0x8\n"
- "ldr q2, [x22, #0x0]\n"
+ "ldr q0, [x23, #0x0]\n"
+ "ldr q1, [x22, #0x0]\n"
+ "cmp x24, #0x8\n"
+ "ldr q2, [x21, #0x0]\n"
"ldr q4, [x28, #0x0]\n"
- "ldr q5, [x28, #0x10]\n"
- "ldr q6, [x28, #0x20]\n"
- "ldr q7, [x28, #0x30]\n"
"blt 105f\n"
"104:" // Height 3: Multiply loop: Main loop head
"fmla v8.4s, v4.4s, v0.s[0]\n"
+ "ldr q5, [x28, #0x10]\n"
+ "add x23, x23, #0x10\n"
"fmla v14.4s, v4.4s, v1.s[0]\n"
- "sub x25, x25, #0x4\n"
- "add x24, x24, #0x10\n"
+ "ldr q6, [x28, #0x20]\n"
+ "add x22, x22, #0x10\n"
"fmla v20.4s, v4.4s, v2.s[0]\n"
- "ldr q4, [x28, #0x40]\n"
+ "ldr q7, [x28, #0x30]\n"
+ "add x21, x21, #0x10\n"
"fmla v9.4s, v5.4s, v0.s[0]\n"
- "add x23, x23, #0x10\n"
+ "ldr q4, [x28, #0x40]\n"
+ "sub x24, x24, #0x4\n"
"fmla v15.4s, v5.4s, v1.s[0]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
+ "cmp x24, #0x8\n"
"fmla v21.4s, v5.4s, v2.s[0]\n"
"ldr q5, [x28, #0x50]\n"
- "add x22, x22, #0x10\n"
"fmla v10.4s, v6.4s, v0.s[0]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
"fmla v16.4s, v6.4s, v1.s[0]\n"
- "cmp x25, #0x8\n"
- "prfm pldl1keep, [x24, #0x80]\n"
+ "prfm pldl1keep, [x21, #0x80]\n"
"fmla v22.4s, v6.4s, v2.s[0]\n"
"ldr q6, [x28, #0x60]\n"
"fmla v11.4s, v7.4s, v0.s[0]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
"fmla v17.4s, v7.4s, v1.s[0]\n"
"fmla v23.4s, v7.4s, v2.s[0]\n"
"ldr q7, [x28, #0x70]\n"
- "prfm pldl1keep, [x22, #0x80]\n"
"fmla v12.4s, v4.4s, v0.s[0]\n"
"fmla v18.4s, v4.4s, v1.s[0]\n"
"fmla v24.4s, v4.4s, v2.s[0]\n"
@@ -1363,40 +1363,40 @@ void a64_hybrid_fp32_mla_4x24 (
"fmla v11.4s, v5.4s, v0.s[3]\n"
"fmla v17.4s, v5.4s, v1.s[3]\n"
"fmla v23.4s, v5.4s, v2.s[3]\n"
- "ldr q5, [x28, #0x10]\n"
"fmla v12.4s, v6.4s, v0.s[3]\n"
"fmla v18.4s, v6.4s, v1.s[3]\n"
"fmla v24.4s, v6.4s, v2.s[3]\n"
- "ldr q6, [x28, #0x20]\n"
"fmla v13.4s, v7.4s, v0.s[3]\n"
- "ldr q0, [x24, #0x0]\n"
+ "ldr q0, [x23, #0x0]\n"
"fmla v19.4s, v7.4s, v1.s[3]\n"
- "ldr q1, [x23, #0x0]\n"
+ "ldr q1, [x22, #0x0]\n"
"fmla v25.4s, v7.4s, v2.s[3]\n"
- "ldr q2, [x22, #0x0]\n"
- "ldr q7, [x28, #0x30]\n"
+ "ldr q2, [x21, #0x0]\n"
"bge 104b\n"
"105:" // Height 3: Multiply loop: Single iteration only
"fmla v8.4s, v4.4s, v0.s[0]\n"
+ "ldr q5, [x28, #0x10]\n"
+ "sub x24, x24, #0x4\n"
"fmla v14.4s, v4.4s, v1.s[0]\n"
- "add x24, x24, #0x10\n"
+ "ldr q6, [x28, #0x20]\n"
"add x23, x23, #0x10\n"
"fmla v20.4s, v4.4s, v2.s[0]\n"
- "ldr q4, [x28, #0x40]\n"
- "fmla v9.4s, v5.4s, v0.s[0]\n"
+ "ldr q7, [x28, #0x30]\n"
"add x22, x22, #0x10\n"
+ "fmla v9.4s, v5.4s, v0.s[0]\n"
+ "ldr q4, [x28, #0x40]\n"
+ "add x21, x21, #0x10\n"
"fmla v15.4s, v5.4s, v1.s[0]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
"fmla v21.4s, v5.4s, v2.s[0]\n"
- "ldr q5, [x28, #0x50]\n"
- "sub x25, x25, #0x4\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
"fmla v10.4s, v6.4s, v0.s[0]\n"
+ "ldr q5, [x28, #0x50]\n"
"fmla v16.4s, v6.4s, v1.s[0]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
+ "prfm pldl1keep, [x21, #0x80]\n"
"fmla v22.4s, v6.4s, v2.s[0]\n"
"ldr q6, [x28, #0x60]\n"
"fmla v11.4s, v7.4s, v0.s[0]\n"
- "prfm pldl1keep, [x22, #0x80]\n"
"fmla v17.4s, v7.4s, v1.s[0]\n"
"fmla v23.4s, v7.4s, v2.s[0]\n"
"ldr q7, [x28, #0x70]\n"
@@ -1478,27 +1478,27 @@ void a64_hybrid_fp32_mla_4x24 (
"fmla v19.4s, v7.4s, v1.s[3]\n"
"fmla v25.4s, v7.4s, v2.s[3]\n"
"106:" // Height 3: Multiply loop: Main loop skip
- "cbz x25, 108f\n"
+ "cbz x24, 108f\n"
"107:" // Height 3: Multiply loop: Odd block loop
- "ldr s0, [x24], #0x4\n"
- "ldr s1, [x23], #0x4\n"
- "sub x25, x25, #0x1\n"
- "ldr s2, [x22], #0x4\n"
+ "ldr s0, [x23], #0x4\n"
+ "sub x24, x24, #0x1\n"
+ "ldr s1, [x22], #0x4\n"
+ "ldr s2, [x21], #0x4\n"
"ldr q4, [x28, #0x0]\n"
"fmla v8.4s, v4.4s, v0.s[0]\n"
- "fmla v14.4s, v4.4s, v1.s[0]\n"
"ldr q5, [x28, #0x10]\n"
+ "fmla v14.4s, v4.4s, v1.s[0]\n"
"ldr q6, [x28, #0x20]\n"
"fmla v20.4s, v4.4s, v2.s[0]\n"
- "fmla v9.4s, v5.4s, v0.s[0]\n"
"ldr q7, [x28, #0x30]\n"
"ldr q4, [x28, #0x40]\n"
+ "fmla v9.4s, v5.4s, v0.s[0]\n"
"fmla v15.4s, v5.4s, v1.s[0]\n"
"fmla v21.4s, v5.4s, v2.s[0]\n"
"ldr q5, [x28, #0x50]\n"
+ "add x28, x28, #0x60\n"
"fmla v10.4s, v6.4s, v0.s[0]\n"
"fmla v16.4s, v6.4s, v1.s[0]\n"
- "add x28, x28, #0x60\n"
"fmla v22.4s, v6.4s, v2.s[0]\n"
"fmla v11.4s, v7.4s, v0.s[0]\n"
"fmla v17.4s, v7.4s, v1.s[0]\n"
@@ -1509,243 +1509,243 @@ void a64_hybrid_fp32_mla_4x24 (
"fmla v13.4s, v5.4s, v0.s[0]\n"
"fmla v19.4s, v5.4s, v1.s[0]\n"
"fmla v25.4s, v5.4s, v2.s[0]\n"
- "cbnz x25, 107b\n"
+ "cbnz x24, 107b\n"
"108:" // Height 3: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x26, x26, #0x1\n"
- "cmp x26, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x25, x25, #0x1\n"
+ "cmp x25, x19\n"
"bne 101b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x27, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
- "prfm pstl1keep, [x27, #0x0]\n"
- "prfm pstl1keep, [x23, #0x0]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "prfm pstl1keep, [x26, #0x0]\n"
+ "add x22, x26, x19, LSL #2\n"
"prfm pstl1keep, [x22, #0x0]\n"
+ "add x21, x22, x19, LSL #2\n"
+ "prfm pstl1keep, [x21, #0x0]\n"
"tbz %x[flags], #1, 109f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v1.4s }, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1r { v0.4s }, [x20]\n"
- "fmin v8.4s, v8.4s, v1.4s\n"
- "fmin v9.4s, v9.4s, v1.4s\n"
- "fmin v10.4s, v10.4s, v1.4s\n"
- "fmin v11.4s, v11.4s, v1.4s\n"
- "fmin v12.4s, v12.4s, v1.4s\n"
- "fmin v13.4s, v13.4s, v1.4s\n"
- "fmin v14.4s, v14.4s, v1.4s\n"
- "fmin v15.4s, v15.4s, v1.4s\n"
- "fmin v16.4s, v16.4s, v1.4s\n"
- "fmin v17.4s, v17.4s, v1.4s\n"
- "fmin v18.4s, v18.4s, v1.4s\n"
- "fmin v19.4s, v19.4s, v1.4s\n"
- "fmin v20.4s, v20.4s, v1.4s\n"
- "fmin v21.4s, v21.4s, v1.4s\n"
- "fmin v22.4s, v22.4s, v1.4s\n"
- "fmin v23.4s, v23.4s, v1.4s\n"
- "fmin v24.4s, v24.4s, v1.4s\n"
- "fmin v25.4s, v25.4s, v1.4s\n"
- "fmax v8.4s, v8.4s, v0.4s\n"
- "fmax v9.4s, v9.4s, v0.4s\n"
- "fmax v10.4s, v10.4s, v0.4s\n"
- "fmax v11.4s, v11.4s, v0.4s\n"
- "fmax v12.4s, v12.4s, v0.4s\n"
- "fmax v13.4s, v13.4s, v0.4s\n"
- "fmax v14.4s, v14.4s, v0.4s\n"
- "fmax v15.4s, v15.4s, v0.4s\n"
- "fmax v16.4s, v16.4s, v0.4s\n"
- "fmax v17.4s, v17.4s, v0.4s\n"
- "fmax v18.4s, v18.4s, v0.4s\n"
- "fmax v19.4s, v19.4s, v0.4s\n"
- "fmax v20.4s, v20.4s, v0.4s\n"
- "fmax v21.4s, v21.4s, v0.4s\n"
- "fmax v22.4s, v22.4s, v0.4s\n"
- "fmax v23.4s, v23.4s, v0.4s\n"
- "fmax v24.4s, v24.4s, v0.4s\n"
- "fmax v25.4s, v25.4s, v0.4s\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v1.4s }, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v0.4s }, [x19]\n"
+ "fmin v8.4s, v8.4s, v0.4s\n"
+ "fmin v9.4s, v9.4s, v0.4s\n"
+ "fmin v10.4s, v10.4s, v0.4s\n"
+ "fmin v11.4s, v11.4s, v0.4s\n"
+ "fmax v8.4s, v8.4s, v1.4s\n"
+ "fmax v9.4s, v9.4s, v1.4s\n"
+ "fmax v10.4s, v10.4s, v1.4s\n"
+ "fmax v11.4s, v11.4s, v1.4s\n"
+ "fmin v12.4s, v12.4s, v0.4s\n"
+ "fmin v13.4s, v13.4s, v0.4s\n"
+ "fmin v14.4s, v14.4s, v0.4s\n"
+ "fmax v12.4s, v12.4s, v1.4s\n"
+ "fmax v13.4s, v13.4s, v1.4s\n"
+ "fmax v14.4s, v14.4s, v1.4s\n"
+ "fmin v15.4s, v15.4s, v0.4s\n"
+ "fmin v16.4s, v16.4s, v0.4s\n"
+ "fmin v17.4s, v17.4s, v0.4s\n"
+ "fmax v15.4s, v15.4s, v1.4s\n"
+ "fmax v16.4s, v16.4s, v1.4s\n"
+ "fmax v17.4s, v17.4s, v1.4s\n"
+ "fmin v18.4s, v18.4s, v0.4s\n"
+ "fmin v19.4s, v19.4s, v0.4s\n"
+ "fmin v20.4s, v20.4s, v0.4s\n"
+ "fmax v18.4s, v18.4s, v1.4s\n"
+ "fmax v19.4s, v19.4s, v1.4s\n"
+ "fmax v20.4s, v20.4s, v1.4s\n"
+ "fmin v21.4s, v21.4s, v0.4s\n"
+ "fmin v22.4s, v22.4s, v0.4s\n"
+ "fmin v23.4s, v23.4s, v0.4s\n"
+ "fmax v21.4s, v21.4s, v1.4s\n"
+ "fmax v22.4s, v22.4s, v1.4s\n"
+ "fmax v23.4s, v23.4s, v1.4s\n"
+ "fmin v24.4s, v24.4s, v0.4s\n"
+ "fmin v25.4s, v25.4s, v0.4s\n"
+ "fmax v24.4s, v24.4s, v1.4s\n"
+ "fmax v25.4s, v25.4s, v1.4s\n"
"109:" // Height 3: No activation
"cmp x9, #0x18\n"
"bge 122f\n"
"tbz x9, #4, 113f\n"
- "st1 { v8.4s }, [x27], #0x10\n"
- "st1 { v9.4s }, [x27], #0x10\n"
- "st1 { v10.4s }, [x27], #0x10\n"
- "st1 { v11.4s }, [x27], #0x10\n"
- "st1 { v14.4s }, [x23], #0x10\n"
- "st1 { v15.4s }, [x23], #0x10\n"
- "st1 { v16.4s }, [x23], #0x10\n"
- "st1 { v17.4s }, [x23], #0x10\n"
- "st1 { v20.4s }, [x22], #0x10\n"
- "st1 { v21.4s }, [x22], #0x10\n"
- "st1 { v22.4s }, [x22], #0x10\n"
- "st1 { v23.4s }, [x22], #0x10\n"
+ "st1 { v8.4s }, [x26], #0x10\n"
+ "st1 { v9.4s }, [x26], #0x10\n"
+ "st1 { v10.4s }, [x26], #0x10\n"
+ "st1 { v11.4s }, [x26], #0x10\n"
+ "st1 { v14.4s }, [x22], #0x10\n"
+ "st1 { v15.4s }, [x22], #0x10\n"
+ "st1 { v16.4s }, [x22], #0x10\n"
+ "st1 { v17.4s }, [x22], #0x10\n"
+ "st1 { v20.4s }, [x21], #0x10\n"
+ "st1 { v21.4s }, [x21], #0x10\n"
+ "st1 { v22.4s }, [x21], #0x10\n"
+ "st1 { v23.4s }, [x21], #0x10\n"
"tbz x9, #2, 111f\n"
- "st1 { v12.4s }, [x27], #0x10\n"
- "st1 { v18.4s }, [x23], #0x10\n"
- "st1 { v24.4s }, [x22], #0x10\n"
+ "st1 { v12.4s }, [x26], #0x10\n"
+ "st1 { v18.4s }, [x22], #0x10\n"
+ "st1 { v24.4s }, [x21], #0x10\n"
"tbz x9, #1, 110f\n"
- "str d13, [x27], #0x8\n"
- "str d19, [x23], #0x8\n"
- "str d25, [x22], #0x8\n"
+ "str d13, [x26], #0x8\n"
+ "str d19, [x22], #0x8\n"
+ "str d25, [x21], #0x8\n"
"tbz x9, #0, 121f\n"
- "st1 { v13.s }[2], [x27]\n"
- "st1 { v19.s }[2], [x23]\n"
- "st1 { v25.s }[2], [x22]\n"
+ "st1 { v13.s }[2], [x26]\n"
+ "st1 { v19.s }[2], [x22]\n"
+ "st1 { v25.s }[2], [x21]\n"
"b 121f\n"
"110:" // Height 3: Partial direct writeback: partial_1_20
"tbz x9, #0, 121f\n"
- "str s13, [x27, #0x0]\n"
- "str s19, [x23, #0x0]\n"
- "str s25, [x22, #0x0]\n"
+ "str s13, [x26, #0x0]\n"
+ "str s19, [x22, #0x0]\n"
+ "str s25, [x21, #0x0]\n"
"b 121f\n"
"111:" // Height 3: Partial direct writeback: partial_2_16
"tbz x9, #1, 112f\n"
- "str d12, [x27], #0x8\n"
- "str d18, [x23], #0x8\n"
- "str d24, [x22], #0x8\n"
+ "str d12, [x26], #0x8\n"
+ "str d18, [x22], #0x8\n"
+ "str d24, [x21], #0x8\n"
"tbz x9, #0, 121f\n"
- "st1 { v12.s }[2], [x27]\n"
- "st1 { v18.s }[2], [x23]\n"
- "st1 { v24.s }[2], [x22]\n"
+ "st1 { v12.s }[2], [x26]\n"
+ "st1 { v18.s }[2], [x22]\n"
+ "st1 { v24.s }[2], [x21]\n"
"b 121f\n"
"112:" // Height 3: Partial direct writeback: partial_1_16
"tbz x9, #0, 121f\n"
- "str s12, [x27, #0x0]\n"
- "str s18, [x23, #0x0]\n"
- "str s24, [x22, #0x0]\n"
+ "str s12, [x26, #0x0]\n"
+ "str s18, [x22, #0x0]\n"
+ "str s24, [x21, #0x0]\n"
"b 121f\n"
"113:" // Height 3: Partial direct writeback: partial_8_0
"tbz x9, #3, 117f\n"
- "st1 { v8.4s }, [x27], #0x10\n"
- "st1 { v9.4s }, [x27], #0x10\n"
- "st1 { v14.4s }, [x23], #0x10\n"
- "st1 { v15.4s }, [x23], #0x10\n"
- "st1 { v20.4s }, [x22], #0x10\n"
- "st1 { v21.4s }, [x22], #0x10\n"
+ "st1 { v8.4s }, [x26], #0x10\n"
+ "st1 { v9.4s }, [x26], #0x10\n"
+ "st1 { v14.4s }, [x22], #0x10\n"
+ "st1 { v15.4s }, [x22], #0x10\n"
+ "st1 { v20.4s }, [x21], #0x10\n"
+ "st1 { v21.4s }, [x21], #0x10\n"
"tbz x9, #2, 115f\n"
- "st1 { v10.4s }, [x27], #0x10\n"
- "st1 { v16.4s }, [x23], #0x10\n"
- "st1 { v22.4s }, [x22], #0x10\n"
+ "st1 { v10.4s }, [x26], #0x10\n"
+ "st1 { v16.4s }, [x22], #0x10\n"
+ "st1 { v22.4s }, [x21], #0x10\n"
"tbz x9, #1, 114f\n"
- "str d11, [x27], #0x8\n"
- "str d17, [x23], #0x8\n"
- "str d23, [x22], #0x8\n"
+ "str d11, [x26], #0x8\n"
+ "str d17, [x22], #0x8\n"
+ "str d23, [x21], #0x8\n"
"tbz x9, #0, 121f\n"
- "st1 { v11.s }[2], [x27]\n"
- "st1 { v17.s }[2], [x23]\n"
- "st1 { v23.s }[2], [x22]\n"
+ "st1 { v11.s }[2], [x26]\n"
+ "st1 { v17.s }[2], [x22]\n"
+ "st1 { v23.s }[2], [x21]\n"
"b 121f\n"
"114:" // Height 3: Partial direct writeback: partial_1_12
"tbz x9, #0, 121f\n"
- "str s11, [x27, #0x0]\n"
- "str s17, [x23, #0x0]\n"
- "str s23, [x22, #0x0]\n"
+ "str s11, [x26, #0x0]\n"
+ "str s17, [x22, #0x0]\n"
+ "str s23, [x21, #0x0]\n"
"b 121f\n"
"115:" // Height 3: Partial direct writeback: partial_2_8
"tbz x9, #1, 116f\n"
- "str d10, [x27], #0x8\n"
- "str d16, [x23], #0x8\n"
- "str d22, [x22], #0x8\n"
+ "str d10, [x26], #0x8\n"
+ "str d16, [x22], #0x8\n"
+ "str d22, [x21], #0x8\n"
"tbz x9, #0, 121f\n"
- "st1 { v10.s }[2], [x27]\n"
- "st1 { v16.s }[2], [x23]\n"
- "st1 { v22.s }[2], [x22]\n"
+ "st1 { v10.s }[2], [x26]\n"
+ "st1 { v16.s }[2], [x22]\n"
+ "st1 { v22.s }[2], [x21]\n"
"b 121f\n"
"116:" // Height 3: Partial direct writeback: partial_1_8
"tbz x9, #0, 121f\n"
- "str s10, [x27, #0x0]\n"
- "str s16, [x23, #0x0]\n"
- "str s22, [x22, #0x0]\n"
+ "str s10, [x26, #0x0]\n"
+ "str s16, [x22, #0x0]\n"
+ "str s22, [x21, #0x0]\n"
"b 121f\n"
"117:" // Height 3: Partial direct writeback: partial_4_0
"tbz x9, #2, 119f\n"
- "st1 { v8.4s }, [x27], #0x10\n"
- "st1 { v14.4s }, [x23], #0x10\n"
- "st1 { v20.4s }, [x22], #0x10\n"
+ "st1 { v8.4s }, [x26], #0x10\n"
+ "st1 { v14.4s }, [x22], #0x10\n"
+ "st1 { v20.4s }, [x21], #0x10\n"
"tbz x9, #1, 118f\n"
- "str d9, [x27], #0x8\n"
- "str d15, [x23], #0x8\n"
- "str d21, [x22], #0x8\n"
+ "str d9, [x26], #0x8\n"
+ "str d15, [x22], #0x8\n"
+ "str d21, [x21], #0x8\n"
"tbz x9, #0, 121f\n"
- "st1 { v9.s }[2], [x27]\n"
- "st1 { v15.s }[2], [x23]\n"
- "st1 { v21.s }[2], [x22]\n"
+ "st1 { v9.s }[2], [x26]\n"
+ "st1 { v15.s }[2], [x22]\n"
+ "st1 { v21.s }[2], [x21]\n"
"b 121f\n"
"118:" // Height 3: Partial direct writeback: partial_1_4
"tbz x9, #0, 121f\n"
- "str s9, [x27, #0x0]\n"
- "str s15, [x23, #0x0]\n"
- "str s21, [x22, #0x0]\n"
+ "str s9, [x26, #0x0]\n"
+ "str s15, [x22, #0x0]\n"
+ "str s21, [x21, #0x0]\n"
"b 121f\n"
"119:" // Height 3: Partial direct writeback: partial_2_0
"tbz x9, #1, 120f\n"
- "str d8, [x27], #0x8\n"
- "str d14, [x23], #0x8\n"
- "str d20, [x22], #0x8\n"
+ "str d8, [x26], #0x8\n"
+ "str d14, [x22], #0x8\n"
+ "str d20, [x21], #0x8\n"
"tbz x9, #0, 121f\n"
- "st1 { v8.s }[2], [x27]\n"
- "st1 { v14.s }[2], [x23]\n"
- "st1 { v20.s }[2], [x22]\n"
+ "st1 { v8.s }[2], [x26]\n"
+ "st1 { v14.s }[2], [x22]\n"
+ "st1 { v20.s }[2], [x21]\n"
"b 121f\n"
"120:" // Height 3: Partial direct writeback: partial_1_0
- "str s8, [x27, #0x0]\n"
- "str s14, [x23, #0x0]\n"
- "str s20, [x22, #0x0]\n"
+ "str s8, [x26, #0x0]\n"
+ "str s14, [x22, #0x0]\n"
+ "str s20, [x21, #0x0]\n"
"121:" // Height 3: Partial direct writeback: Done
"b 123f\n"
"122:" // Height 3: Full writeback
- "str q8, [x27, #0x0]\n"
- "str q9, [x27, #0x10]\n"
- "str q10, [x27, #0x20]\n"
- "str q11, [x27, #0x30]\n"
- "str q12, [x27, #0x40]\n"
- "str q13, [x27, #0x50]\n"
- "add x27, x27, #0x60\n"
- "str q14, [x23, #0x0]\n"
- "str q15, [x23, #0x10]\n"
- "str q16, [x23, #0x20]\n"
- "str q17, [x23, #0x30]\n"
- "str q18, [x23, #0x40]\n"
- "str q19, [x23, #0x50]\n"
- "str q20, [x22, #0x0]\n"
- "str q21, [x22, #0x10]\n"
- "str q22, [x22, #0x20]\n"
- "str q23, [x22, #0x30]\n"
- "str q24, [x22, #0x40]\n"
- "str q25, [x22, #0x50]\n"
+ "str q8, [x26, #0x0]\n"
+ "str q9, [x26, #0x10]\n"
+ "str q10, [x26, #0x20]\n"
+ "str q11, [x26, #0x30]\n"
+ "str q12, [x26, #0x40]\n"
+ "str q13, [x26, #0x50]\n"
+ "add x26, x26, #0x60\n"
+ "str q14, [x22, #0x0]\n"
+ "str q15, [x22, #0x10]\n"
+ "str q16, [x22, #0x20]\n"
+ "str q17, [x22, #0x30]\n"
+ "str q18, [x22, #0x40]\n"
+ "str q19, [x22, #0x50]\n"
+ "str q20, [x21, #0x0]\n"
+ "str q21, [x21, #0x10]\n"
+ "str q22, [x21, #0x20]\n"
+ "str q23, [x21, #0x30]\n"
+ "str q24, [x21, #0x40]\n"
+ "str q25, [x21, #0x50]\n"
"123:" // Height 3: Writeback done
"subs x9, x9, #0x18\n"
"bgt 84b\n"
"b 166f\n"
"124:" // Height 4
- "ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "mov x20, #0x10\n"
- "mov x10, %x[bias]\n"
"ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x27, %x[bias]\n"
"ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x27, %x[output_ptr]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
+ "mov x26, %x[output_ptr]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "mov x19, #0x10\n"
+ "madd %x[output_ptr], x20, x19, %x[output_ptr]\n"
"125:" // Height 4: Column loop
- "cbz x10, 126f\n"
- "ldr q8, [x10, #0x0]\n"
- "ldr q9, [x10, #0x10]\n"
+ "cbz x27, 126f\n"
+ "ldr q8, [x27, #0x0]\n"
"mov v14.16b, v8.16b\n"
+ "ldr q9, [x27, #0x10]\n"
+ "mov v20.16b, v8.16b\n"
+ "ldr q10, [x27, #0x20]\n"
+ "mov v26.16b, v8.16b\n"
+ "ldr q11, [x27, #0x30]\n"
+ "ldr q12, [x27, #0x40]\n"
"mov v15.16b, v9.16b\n"
- "ldr q10, [x10, #0x20]\n"
- "ldr q11, [x10, #0x30]\n"
+ "ldr q13, [x27, #0x50]\n"
+ "add x27, x27, #0x60\n"
"mov v16.16b, v10.16b\n"
+ "mov v21.16b, v9.16b\n"
"mov v17.16b, v11.16b\n"
- "ldr q12, [x10, #0x40]\n"
- "ldr q13, [x10, #0x50]\n"
"mov v18.16b, v12.16b\n"
"mov v19.16b, v13.16b\n"
- "mov v20.16b, v8.16b\n"
- "mov v21.16b, v9.16b\n"
- "add x10, x10, #0x60\n"
"mov v22.16b, v10.16b\n"
"mov v23.16b, v11.16b\n"
"mov v24.16b, v12.16b\n"
"mov v25.16b, v13.16b\n"
- "mov v26.16b, v8.16b\n"
"mov v27.16b, v9.16b\n"
"mov v28.16b, v10.16b\n"
"mov v29.16b, v11.16b\n"
@@ -1754,204 +1754,204 @@ void a64_hybrid_fp32_mla_4x24 (
"b 141f\n"
"126:" // Height 4: no bias
"tbz %x[flags], #0, 140f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x27, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"cmp x9, #0x18\n"
- "add x21, x22, x20, LSL #2\n"
+ "add x22, x26, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
+ "add x20, x21, x19, LSL #2\n"
"bge 139f\n"
"tbz x9, #4, 130f\n"
- "ld1 { v8.4s }, [x27], #0x10\n"
- "ld1 { v14.4s }, [x23], #0x10\n"
- "ld1 { v20.4s }, [x22], #0x10\n"
- "ld1 { v26.4s }, [x21], #0x10\n"
- "ld1 { v9.4s }, [x27], #0x10\n"
- "ld1 { v15.4s }, [x23], #0x10\n"
- "ld1 { v21.4s }, [x22], #0x10\n"
- "ld1 { v27.4s }, [x21], #0x10\n"
- "ld1 { v10.4s }, [x27], #0x10\n"
- "ld1 { v16.4s }, [x23], #0x10\n"
- "ld1 { v22.4s }, [x22], #0x10\n"
- "ld1 { v28.4s }, [x21], #0x10\n"
- "ld1 { v11.4s }, [x27], #0x10\n"
- "ld1 { v17.4s }, [x23], #0x10\n"
- "ld1 { v23.4s }, [x22], #0x10\n"
- "ld1 { v29.4s }, [x21], #0x10\n"
+ "ld1 { v8.4s }, [x26], #0x10\n"
+ "ld1 { v14.4s }, [x22], #0x10\n"
+ "ld1 { v20.4s }, [x21], #0x10\n"
+ "ld1 { v26.4s }, [x20], #0x10\n"
+ "ld1 { v9.4s }, [x26], #0x10\n"
+ "ld1 { v15.4s }, [x22], #0x10\n"
+ "ld1 { v21.4s }, [x21], #0x10\n"
+ "ld1 { v27.4s }, [x20], #0x10\n"
+ "ld1 { v10.4s }, [x26], #0x10\n"
+ "ld1 { v16.4s }, [x22], #0x10\n"
+ "ld1 { v22.4s }, [x21], #0x10\n"
+ "ld1 { v28.4s }, [x20], #0x10\n"
+ "ld1 { v11.4s }, [x26], #0x10\n"
+ "ld1 { v17.4s }, [x22], #0x10\n"
+ "ld1 { v23.4s }, [x21], #0x10\n"
+ "ld1 { v29.4s }, [x20], #0x10\n"
"tbz x9, #2, 128f\n"
- "ld1 { v12.4s }, [x27], #0x10\n"
- "ld1 { v18.4s }, [x23], #0x10\n"
- "ld1 { v24.4s }, [x22], #0x10\n"
- "ld1 { v30.4s }, [x21], #0x10\n"
+ "ld1 { v12.4s }, [x26], #0x10\n"
+ "ld1 { v18.4s }, [x22], #0x10\n"
+ "ld1 { v24.4s }, [x21], #0x10\n"
+ "ld1 { v30.4s }, [x20], #0x10\n"
"tbz x9, #1, 127f\n"
- "ldr d13, [x27], #0x8\n"
- "ldr d19, [x23], #0x8\n"
- "mov x20, #0x58\n"
- "ldr d25, [x22], #0x8\n"
- "ldr d31, [x21], #0x8\n"
+ "mov x19, #0x58\n"
+ "ldr d13, [x26], #0x8\n"
+ "ldr d19, [x22], #0x8\n"
+ "ldr d25, [x21], #0x8\n"
+ "ldr d31, [x20], #0x8\n"
"tbz x9, #0, 138f\n"
- "ld1 { v13.s }[2], [x27]\n"
- "ld1 { v19.s }[2], [x23]\n"
- "ld1 { v25.s }[2], [x22]\n"
- "ld1 { v31.s }[2], [x21]\n"
+ "ld1 { v13.s }[2], [x26]\n"
+ "ld1 { v19.s }[2], [x22]\n"
+ "ld1 { v25.s }[2], [x21]\n"
+ "ld1 { v31.s }[2], [x20]\n"
"b 138f\n"
"127:" // Height 4: Partial accumulate: partial_1_20
- "mov x20, #0x50\n"
+ "mov x19, #0x50\n"
"tbz x9, #0, 138f\n"
- "ldr s13, [x27, #0x0]\n"
- "ldr s19, [x23, #0x0]\n"
- "ldr s25, [x22, #0x0]\n"
- "ldr s31, [x21, #0x0]\n"
+ "ldr s13, [x26, #0x0]\n"
+ "ldr s19, [x22, #0x0]\n"
+ "ldr s25, [x21, #0x0]\n"
+ "ldr s31, [x20, #0x0]\n"
"b 138f\n"
"128:" // Height 4: Partial accumulate: partial_2_16
"tbz x9, #1, 129f\n"
- "ldr d12, [x27], #0x8\n"
- "ldr d18, [x23], #0x8\n"
- "mov x20, #0x48\n"
- "ldr d24, [x22], #0x8\n"
- "ldr d30, [x21], #0x8\n"
+ "ldr d12, [x26], #0x8\n"
+ "ldr d18, [x22], #0x8\n"
+ "mov x19, #0x48\n"
+ "ldr d24, [x21], #0x8\n"
+ "ldr d30, [x20], #0x8\n"
"tbz x9, #0, 138f\n"
- "ld1 { v12.s }[2], [x27]\n"
- "ld1 { v18.s }[2], [x23]\n"
- "ld1 { v24.s }[2], [x22]\n"
- "ld1 { v30.s }[2], [x21]\n"
+ "ld1 { v12.s }[2], [x26]\n"
+ "ld1 { v18.s }[2], [x22]\n"
+ "ld1 { v24.s }[2], [x21]\n"
+ "ld1 { v30.s }[2], [x20]\n"
"b 138f\n"
"129:" // Height 4: Partial accumulate: partial_1_16
- "mov x20, #0x40\n"
+ "mov x19, #0x40\n"
"tbz x9, #0, 138f\n"
- "ldr s12, [x27, #0x0]\n"
- "ldr s18, [x23, #0x0]\n"
- "ldr s24, [x22, #0x0]\n"
- "ldr s30, [x21, #0x0]\n"
+ "ldr s12, [x26, #0x0]\n"
+ "ldr s18, [x22, #0x0]\n"
+ "ldr s24, [x21, #0x0]\n"
+ "ldr s30, [x20, #0x0]\n"
"b 138f\n"
"130:" // Height 4: Partial accumulate: partial_8_0
"tbz x9, #3, 134f\n"
- "ld1 { v8.4s }, [x27], #0x10\n"
- "ld1 { v14.4s }, [x23], #0x10\n"
- "ld1 { v20.4s }, [x22], #0x10\n"
- "ld1 { v26.4s }, [x21], #0x10\n"
- "ld1 { v9.4s }, [x27], #0x10\n"
- "ld1 { v15.4s }, [x23], #0x10\n"
- "ld1 { v21.4s }, [x22], #0x10\n"
- "ld1 { v27.4s }, [x21], #0x10\n"
+ "ld1 { v8.4s }, [x26], #0x10\n"
+ "ld1 { v14.4s }, [x22], #0x10\n"
+ "ld1 { v20.4s }, [x21], #0x10\n"
+ "ld1 { v26.4s }, [x20], #0x10\n"
+ "ld1 { v9.4s }, [x26], #0x10\n"
+ "ld1 { v15.4s }, [x22], #0x10\n"
+ "ld1 { v21.4s }, [x21], #0x10\n"
+ "ld1 { v27.4s }, [x20], #0x10\n"
"tbz x9, #2, 132f\n"
- "ld1 { v10.4s }, [x27], #0x10\n"
- "ld1 { v16.4s }, [x23], #0x10\n"
- "ld1 { v22.4s }, [x22], #0x10\n"
- "ld1 { v28.4s }, [x21], #0x10\n"
+ "ld1 { v10.4s }, [x26], #0x10\n"
+ "ld1 { v16.4s }, [x22], #0x10\n"
+ "ld1 { v22.4s }, [x21], #0x10\n"
+ "ld1 { v28.4s }, [x20], #0x10\n"
"tbz x9, #1, 131f\n"
- "ldr d11, [x27], #0x8\n"
- "ldr d17, [x23], #0x8\n"
- "mov x20, #0x38\n"
- "ldr d23, [x22], #0x8\n"
- "ldr d29, [x21], #0x8\n"
+ "mov x19, #0x38\n"
+ "ldr d11, [x26], #0x8\n"
+ "ldr d17, [x22], #0x8\n"
+ "ldr d23, [x21], #0x8\n"
+ "ldr d29, [x20], #0x8\n"
"tbz x9, #0, 138f\n"
- "ld1 { v11.s }[2], [x27]\n"
- "ld1 { v17.s }[2], [x23]\n"
- "ld1 { v23.s }[2], [x22]\n"
- "ld1 { v29.s }[2], [x21]\n"
+ "ld1 { v11.s }[2], [x26]\n"
+ "ld1 { v17.s }[2], [x22]\n"
+ "ld1 { v23.s }[2], [x21]\n"
+ "ld1 { v29.s }[2], [x20]\n"
"b 138f\n"
"131:" // Height 4: Partial accumulate: partial_1_12
- "mov x20, #0x30\n"
+ "mov x19, #0x30\n"
"tbz x9, #0, 138f\n"
- "ldr s11, [x27, #0x0]\n"
- "ldr s17, [x23, #0x0]\n"
- "ldr s23, [x22, #0x0]\n"
- "ldr s29, [x21, #0x0]\n"
+ "ldr s11, [x26, #0x0]\n"
+ "ldr s17, [x22, #0x0]\n"
+ "ldr s23, [x21, #0x0]\n"
+ "ldr s29, [x20, #0x0]\n"
"b 138f\n"
"132:" // Height 4: Partial accumulate: partial_2_8
"tbz x9, #1, 133f\n"
- "ldr d10, [x27], #0x8\n"
- "ldr d16, [x23], #0x8\n"
- "mov x20, #0x28\n"
- "ldr d22, [x22], #0x8\n"
- "ldr d28, [x21], #0x8\n"
+ "ldr d10, [x26], #0x8\n"
+ "ldr d16, [x22], #0x8\n"
+ "mov x19, #0x28\n"
+ "ldr d22, [x21], #0x8\n"
+ "ldr d28, [x20], #0x8\n"
"tbz x9, #0, 138f\n"
- "ld1 { v10.s }[2], [x27]\n"
- "ld1 { v16.s }[2], [x23]\n"
- "ld1 { v22.s }[2], [x22]\n"
- "ld1 { v28.s }[2], [x21]\n"
+ "ld1 { v10.s }[2], [x26]\n"
+ "ld1 { v16.s }[2], [x22]\n"
+ "ld1 { v22.s }[2], [x21]\n"
+ "ld1 { v28.s }[2], [x20]\n"
"b 138f\n"
"133:" // Height 4: Partial accumulate: partial_1_8
- "mov x20, #0x20\n"
+ "mov x19, #0x20\n"
"tbz x9, #0, 138f\n"
- "ldr s10, [x27, #0x0]\n"
- "ldr s16, [x23, #0x0]\n"
- "ldr s22, [x22, #0x0]\n"
- "ldr s28, [x21, #0x0]\n"
+ "ldr s10, [x26, #0x0]\n"
+ "ldr s16, [x22, #0x0]\n"
+ "ldr s22, [x21, #0x0]\n"
+ "ldr s28, [x20, #0x0]\n"
"b 138f\n"
"134:" // Height 4: Partial accumulate: partial_4_0
"tbz x9, #2, 136f\n"
- "ld1 { v8.4s }, [x27], #0x10\n"
- "ld1 { v14.4s }, [x23], #0x10\n"
- "ld1 { v20.4s }, [x22], #0x10\n"
- "ld1 { v26.4s }, [x21], #0x10\n"
+ "ld1 { v8.4s }, [x26], #0x10\n"
+ "ld1 { v14.4s }, [x22], #0x10\n"
+ "ld1 { v20.4s }, [x21], #0x10\n"
+ "ld1 { v26.4s }, [x20], #0x10\n"
"tbz x9, #1, 135f\n"
- "ldr d9, [x27], #0x8\n"
- "ldr d15, [x23], #0x8\n"
- "mov x20, #0x18\n"
- "ldr d21, [x22], #0x8\n"
- "ldr d27, [x21], #0x8\n"
+ "mov x19, #0x18\n"
+ "ldr d9, [x26], #0x8\n"
+ "ldr d15, [x22], #0x8\n"
+ "ldr d21, [x21], #0x8\n"
+ "ldr d27, [x20], #0x8\n"
"tbz x9, #0, 138f\n"
- "ld1 { v9.s }[2], [x27]\n"
- "ld1 { v15.s }[2], [x23]\n"
- "ld1 { v21.s }[2], [x22]\n"
- "ld1 { v27.s }[2], [x21]\n"
+ "ld1 { v9.s }[2], [x26]\n"
+ "ld1 { v15.s }[2], [x22]\n"
+ "ld1 { v21.s }[2], [x21]\n"
+ "ld1 { v27.s }[2], [x20]\n"
"b 138f\n"
"135:" // Height 4: Partial accumulate: partial_1_4
- "mov x20, #0x10\n"
+ "mov x19, #0x10\n"
"tbz x9, #0, 138f\n"
- "ldr s9, [x27, #0x0]\n"
- "ldr s15, [x23, #0x0]\n"
- "ldr s21, [x22, #0x0]\n"
- "ldr s27, [x21, #0x0]\n"
+ "ldr s9, [x26, #0x0]\n"
+ "ldr s15, [x22, #0x0]\n"
+ "ldr s21, [x21, #0x0]\n"
+ "ldr s27, [x20, #0x0]\n"
"b 138f\n"
"136:" // Height 4: Partial accumulate: partial_2_0
"tbz x9, #1, 137f\n"
- "ldr d8, [x27], #0x8\n"
- "ldr d14, [x23], #0x8\n"
- "mov x20, #0x8\n"
- "ldr d20, [x22], #0x8\n"
- "ldr d26, [x21], #0x8\n"
+ "ldr d8, [x26], #0x8\n"
+ "ldr d14, [x22], #0x8\n"
+ "mov x19, #0x8\n"
+ "ldr d20, [x21], #0x8\n"
+ "ldr d26, [x20], #0x8\n"
"tbz x9, #0, 138f\n"
- "ld1 { v8.s }[2], [x27]\n"
- "ld1 { v14.s }[2], [x23]\n"
- "ld1 { v20.s }[2], [x22]\n"
- "ld1 { v26.s }[2], [x21]\n"
+ "ld1 { v8.s }[2], [x26]\n"
+ "ld1 { v14.s }[2], [x22]\n"
+ "ld1 { v20.s }[2], [x21]\n"
+ "ld1 { v26.s }[2], [x20]\n"
"b 138f\n"
"137:" // Height 4: Partial accumulate: partial_1_0
- "ldr s8, [x27, #0x0]\n"
- "ldr s14, [x23, #0x0]\n"
- "mov x20, #0x0\n"
- "ldr s20, [x22, #0x0]\n"
- "ldr s26, [x21, #0x0]\n"
+ "ldr s8, [x26, #0x0]\n"
+ "mov x19, #0x0\n"
+ "ldr s14, [x22, #0x0]\n"
+ "ldr s20, [x21, #0x0]\n"
+ "ldr s26, [x20, #0x0]\n"
"138:" // Height 4: Partial accumulate: Done
- "sub x27, x27, x20\n"
+ "sub x26, x26, x19\n"
"b 141f\n"
"139:" // Height 4: full accumulate
- "ldr q8, [x27, #0x0]\n"
- "ldr q9, [x27, #0x10]\n"
- "ldr q10, [x27, #0x20]\n"
- "ldr q11, [x27, #0x30]\n"
- "ldr q12, [x27, #0x40]\n"
- "ldr q13, [x27, #0x50]\n"
- "ldr q14, [x23, #0x0]\n"
- "ldr q15, [x23, #0x10]\n"
- "ldr q16, [x23, #0x20]\n"
- "ldr q17, [x23, #0x30]\n"
- "ldr q18, [x23, #0x40]\n"
- "ldr q19, [x23, #0x50]\n"
- "ldr q20, [x22, #0x0]\n"
- "ldr q21, [x22, #0x10]\n"
- "ldr q22, [x22, #0x20]\n"
- "ldr q23, [x22, #0x30]\n"
- "ldr q24, [x22, #0x40]\n"
- "ldr q25, [x22, #0x50]\n"
- "ldr q26, [x21, #0x0]\n"
- "ldr q27, [x21, #0x10]\n"
- "ldr q28, [x21, #0x20]\n"
- "ldr q29, [x21, #0x30]\n"
- "ldr q30, [x21, #0x40]\n"
- "ldr q31, [x21, #0x50]\n"
+ "ldr q8, [x26, #0x0]\n"
+ "ldr q9, [x26, #0x10]\n"
+ "ldr q10, [x26, #0x20]\n"
+ "ldr q11, [x26, #0x30]\n"
+ "ldr q12, [x26, #0x40]\n"
+ "ldr q13, [x26, #0x50]\n"
+ "ldr q14, [x22, #0x0]\n"
+ "ldr q15, [x22, #0x10]\n"
+ "ldr q16, [x22, #0x20]\n"
+ "ldr q17, [x22, #0x30]\n"
+ "ldr q18, [x22, #0x40]\n"
+ "ldr q19, [x22, #0x50]\n"
+ "ldr q20, [x21, #0x0]\n"
+ "ldr q21, [x21, #0x10]\n"
+ "ldr q22, [x21, #0x20]\n"
+ "ldr q23, [x21, #0x30]\n"
+ "ldr q24, [x21, #0x40]\n"
+ "ldr q25, [x21, #0x50]\n"
+ "ldr q26, [x20, #0x0]\n"
+ "ldr q27, [x20, #0x10]\n"
+ "ldr q28, [x20, #0x20]\n"
+ "ldr q29, [x20, #0x30]\n"
+ "ldr q30, [x20, #0x40]\n"
+ "ldr q31, [x20, #0x50]\n"
"b 141f\n"
"140:" // Height 4: no accumulate
"movi v8.16b, #0x0\n"
@@ -1979,71 +1979,71 @@ void a64_hybrid_fp32_mla_4x24 (
"movi v30.16b, #0x0\n"
"movi v31.16b, #0x0\n"
"141:" // Height 4: setup done
- "mov x26, #0x0\n"
+ "mov x25, #0x0\n"
"142:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w24, [x20, x25, LSL #0x2]\n"
"tbz %x[flags], #3, 143f\n"
- "ldr x21, [%x[input_ptr], x26, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x24, [x21, #0x0]\n"
- "ldr x23, [x21, #0x8]\n"
- "ldr x22, [x21, #0x10]\n"
- "ldr x21, [x21, #0x18]\n"
- "cbnz x26, 144f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x24, x24, x20, LSL #2\n"
- "add x23, x23, x20, LSL #2\n"
- "add x22, x22, x20, LSL #2\n"
- "add x21, x21, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x25, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x23, [x20, #0x0]\n"
+ "ldr x22, [x20, #0x8]\n"
+ "ldr x21, [x20, #0x10]\n"
+ "ldr x20, [x20, #0x18]\n"
+ "cbnz x25, 144f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x23, x23, x19, LSL #2\n"
+ "add x22, x22, x19, LSL #2\n"
+ "add x21, x21, x19, LSL #2\n"
+ "add x20, x20, x19, LSL #2\n"
"b 144f\n"
"143:" // Height 4: setup direct input
- "mov x24, %x[input_ptr]\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
+ "mov x23, %x[input_ptr]\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
+ "add x20, x21, x19, LSL #2\n"
"144:" // Height 4: input setup done
- "cmp x25, #0x4\n"
+ "cmp x24, #0x4\n"
"blt 147f\n"
- "ldr q0, [x24, #0x0]\n"
- "ldr q1, [x23, #0x0]\n"
- "cmp x25, #0x8\n"
- "ldr q2, [x22, #0x0]\n"
- "ldr q3, [x21, #0x0]\n"
+ "ldr q0, [x23, #0x0]\n"
+ "ldr q1, [x22, #0x0]\n"
+ "cmp x24, #0x8\n"
+ "ldr q2, [x21, #0x0]\n"
+ "ldr q3, [x20, #0x0]\n"
"ldr q4, [x28, #0x0]\n"
- "ldr q5, [x28, #0x10]\n"
- "ldr q6, [x28, #0x20]\n"
- "ldr q7, [x28, #0x30]\n"
"blt 146f\n"
"145:" // Height 4: Multiply loop: Main loop head
"fmla v8.4s, v4.4s, v0.s[0]\n"
+ "ldr q5, [x28, #0x10]\n"
+ "add x23, x23, #0x10\n"
"fmla v14.4s, v4.4s, v1.s[0]\n"
- "sub x25, x25, #0x4\n"
- "add x24, x24, #0x10\n"
+ "ldr q6, [x28, #0x20]\n"
+ "add x22, x22, #0x10\n"
"fmla v20.4s, v4.4s, v2.s[0]\n"
+ "ldr q7, [x28, #0x30]\n"
+ "add x21, x21, #0x10\n"
"fmla v26.4s, v4.4s, v3.s[0]\n"
"ldr q4, [x28, #0x40]\n"
- "add x23, x23, #0x10\n"
+ "add x20, x20, #0x10\n"
"fmla v9.4s, v5.4s, v0.s[0]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
+ "sub x24, x24, #0x4\n"
"fmla v15.4s, v5.4s, v1.s[0]\n"
- "add x22, x22, #0x10\n"
- "add x21, x21, #0x10\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
+ "cmp x24, #0x8\n"
"fmla v21.4s, v5.4s, v2.s[0]\n"
+ "prfm pldl1keep, [x21, #0x80]\n"
"fmla v27.4s, v5.4s, v3.s[0]\n"
"ldr q5, [x28, #0x50]\n"
- "cmp x25, #0x8\n"
"fmla v10.4s, v6.4s, v0.s[0]\n"
+ "prfm pldl1keep, [x20, #0x80]\n"
"fmla v16.4s, v6.4s, v1.s[0]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
"fmla v22.4s, v6.4s, v2.s[0]\n"
"fmla v28.4s, v6.4s, v3.s[0]\n"
"ldr q6, [x28, #0x60]\n"
- "prfm pldl1keep, [x22, #0x80]\n"
"fmla v11.4s, v7.4s, v0.s[0]\n"
"fmla v17.4s, v7.4s, v1.s[0]\n"
- "prfm pldl1keep, [x21, #0x80]\n"
"fmla v23.4s, v7.4s, v2.s[0]\n"
"fmla v29.4s, v7.4s, v3.s[0]\n"
"ldr q7, [x28, #0x70]\n"
@@ -2137,47 +2137,47 @@ void a64_hybrid_fp32_mla_4x24 (
"fmla v17.4s, v5.4s, v1.s[3]\n"
"fmla v23.4s, v5.4s, v2.s[3]\n"
"fmla v29.4s, v5.4s, v3.s[3]\n"
- "ldr q5, [x28, #0x10]\n"
"fmla v12.4s, v6.4s, v0.s[3]\n"
"fmla v18.4s, v6.4s, v1.s[3]\n"
"fmla v24.4s, v6.4s, v2.s[3]\n"
"fmla v30.4s, v6.4s, v3.s[3]\n"
- "ldr q6, [x28, #0x20]\n"
"fmla v13.4s, v7.4s, v0.s[3]\n"
- "ldr q0, [x24, #0x0]\n"
+ "ldr q0, [x23, #0x0]\n"
"fmla v19.4s, v7.4s, v1.s[3]\n"
- "ldr q1, [x23, #0x0]\n"
+ "ldr q1, [x22, #0x0]\n"
"fmla v25.4s, v7.4s, v2.s[3]\n"
- "ldr q2, [x22, #0x0]\n"
+ "ldr q2, [x21, #0x0]\n"
"fmla v31.4s, v7.4s, v3.s[3]\n"
- "ldr q3, [x21, #0x0]\n"
- "ldr q7, [x28, #0x30]\n"
+ "ldr q3, [x20, #0x0]\n"
"bge 145b\n"
"146:" // Height 4: Multiply loop: Single iteration only
"fmla v8.4s, v4.4s, v0.s[0]\n"
+ "ldr q5, [x28, #0x10]\n"
+ "sub x24, x24, #0x4\n"
"fmla v14.4s, v4.4s, v1.s[0]\n"
- "add x24, x24, #0x10\n"
+ "ldr q6, [x28, #0x20]\n"
"add x23, x23, #0x10\n"
"fmla v20.4s, v4.4s, v2.s[0]\n"
+ "ldr q7, [x28, #0x30]\n"
+ "add x22, x22, #0x10\n"
"fmla v26.4s, v4.4s, v3.s[0]\n"
"ldr q4, [x28, #0x40]\n"
- "add x22, x22, #0x10\n"
+ "add x21, x21, #0x10\n"
"fmla v9.4s, v5.4s, v0.s[0]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
+ "add x20, x20, #0x10\n"
"fmla v15.4s, v5.4s, v1.s[0]\n"
- "add x21, x21, #0x10\n"
- "sub x25, x25, #0x4\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
"fmla v21.4s, v5.4s, v2.s[0]\n"
+ "prfm pldl1keep, [x21, #0x80]\n"
"fmla v27.4s, v5.4s, v3.s[0]\n"
"ldr q5, [x28, #0x50]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
"fmla v10.4s, v6.4s, v0.s[0]\n"
+ "prfm pldl1keep, [x20, #0x80]\n"
"fmla v16.4s, v6.4s, v1.s[0]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
- "prfm pldl1keep, [x22, #0x80]\n"
"fmla v22.4s, v6.4s, v2.s[0]\n"
"fmla v28.4s, v6.4s, v3.s[0]\n"
"ldr q6, [x28, #0x60]\n"
- "prfm pldl1keep, [x21, #0x80]\n"
"fmla v11.4s, v7.4s, v0.s[0]\n"
"fmla v17.4s, v7.4s, v1.s[0]\n"
"fmla v23.4s, v7.4s, v2.s[0]\n"
@@ -2281,20 +2281,20 @@ void a64_hybrid_fp32_mla_4x24 (
"fmla v25.4s, v7.4s, v2.s[3]\n"
"fmla v31.4s, v7.4s, v3.s[3]\n"
"147:" // Height 4: Multiply loop: Main loop skip
- "cbz x25, 149f\n"
+ "cbz x24, 149f\n"
"148:" // Height 4: Multiply loop: Odd block loop
- "ldr s0, [x24], #0x4\n"
- "ldr s1, [x23], #0x4\n"
- "sub x25, x25, #0x1\n"
- "ldr s2, [x22], #0x4\n"
- "ldr s3, [x21], #0x4\n"
+ "ldr s0, [x23], #0x4\n"
+ "sub x24, x24, #0x1\n"
+ "ldr s1, [x22], #0x4\n"
+ "ldr s2, [x21], #0x4\n"
+ "ldr s3, [x20], #0x4\n"
"ldr q4, [x28, #0x0]\n"
- "ldr q5, [x28, #0x10]\n"
"fmla v8.4s, v4.4s, v0.s[0]\n"
+ "ldr q5, [x28, #0x10]\n"
"fmla v14.4s, v4.4s, v1.s[0]\n"
"ldr q6, [x28, #0x20]\n"
- "ldr q7, [x28, #0x30]\n"
"fmla v20.4s, v4.4s, v2.s[0]\n"
+ "ldr q7, [x28, #0x30]\n"
"fmla v26.4s, v4.4s, v3.s[0]\n"
"ldr q4, [x28, #0x40]\n"
"fmla v9.4s, v5.4s, v0.s[0]\n"
@@ -2319,275 +2319,275 @@ void a64_hybrid_fp32_mla_4x24 (
"fmla v19.4s, v5.4s, v1.s[0]\n"
"fmla v25.4s, v5.4s, v2.s[0]\n"
"fmla v31.4s, v5.4s, v3.s[0]\n"
- "cbnz x25, 148b\n"
+ "cbnz x24, 148b\n"
"149:" // Height 4: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x26, x26, #0x1\n"
- "cmp x26, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x25, x25, #0x1\n"
+ "cmp x25, x19\n"
"bne 142b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x27, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
- "prfm pstl1keep, [x27, #0x0]\n"
- "add x21, x22, x20, LSL #2\n"
- "prfm pstl1keep, [x23, #0x0]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "prfm pstl1keep, [x26, #0x0]\n"
+ "add x22, x26, x19, LSL #2\n"
"prfm pstl1keep, [x22, #0x0]\n"
+ "add x21, x22, x19, LSL #2\n"
"prfm pstl1keep, [x21, #0x0]\n"
+ "add x20, x21, x19, LSL #2\n"
+ "prfm pstl1keep, [x20, #0x0]\n"
"tbz %x[flags], #1, 150f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v1.4s }, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1r { v0.4s }, [x20]\n"
- "fmin v8.4s, v8.4s, v1.4s\n"
- "fmin v9.4s, v9.4s, v1.4s\n"
- "fmin v10.4s, v10.4s, v1.4s\n"
- "fmin v11.4s, v11.4s, v1.4s\n"
- "fmin v12.4s, v12.4s, v1.4s\n"
- "fmin v13.4s, v13.4s, v1.4s\n"
- "fmin v14.4s, v14.4s, v1.4s\n"
- "fmin v15.4s, v15.4s, v1.4s\n"
- "fmin v16.4s, v16.4s, v1.4s\n"
- "fmin v17.4s, v17.4s, v1.4s\n"
- "fmin v18.4s, v18.4s, v1.4s\n"
- "fmin v19.4s, v19.4s, v1.4s\n"
- "fmin v20.4s, v20.4s, v1.4s\n"
- "fmin v21.4s, v21.4s, v1.4s\n"
- "fmin v22.4s, v22.4s, v1.4s\n"
- "fmin v23.4s, v23.4s, v1.4s\n"
- "fmin v24.4s, v24.4s, v1.4s\n"
- "fmin v25.4s, v25.4s, v1.4s\n"
- "fmin v26.4s, v26.4s, v1.4s\n"
- "fmin v27.4s, v27.4s, v1.4s\n"
- "fmin v28.4s, v28.4s, v1.4s\n"
- "fmin v29.4s, v29.4s, v1.4s\n"
- "fmin v30.4s, v30.4s, v1.4s\n"
- "fmin v31.4s, v31.4s, v1.4s\n"
- "fmax v8.4s, v8.4s, v0.4s\n"
- "fmax v9.4s, v9.4s, v0.4s\n"
- "fmax v10.4s, v10.4s, v0.4s\n"
- "fmax v11.4s, v11.4s, v0.4s\n"
- "fmax v12.4s, v12.4s, v0.4s\n"
- "fmax v13.4s, v13.4s, v0.4s\n"
- "fmax v14.4s, v14.4s, v0.4s\n"
- "fmax v15.4s, v15.4s, v0.4s\n"
- "fmax v16.4s, v16.4s, v0.4s\n"
- "fmax v17.4s, v17.4s, v0.4s\n"
- "fmax v18.4s, v18.4s, v0.4s\n"
- "fmax v19.4s, v19.4s, v0.4s\n"
- "fmax v20.4s, v20.4s, v0.4s\n"
- "fmax v21.4s, v21.4s, v0.4s\n"
- "fmax v22.4s, v22.4s, v0.4s\n"
- "fmax v23.4s, v23.4s, v0.4s\n"
- "fmax v24.4s, v24.4s, v0.4s\n"
- "fmax v25.4s, v25.4s, v0.4s\n"
- "fmax v26.4s, v26.4s, v0.4s\n"
- "fmax v27.4s, v27.4s, v0.4s\n"
- "fmax v28.4s, v28.4s, v0.4s\n"
- "fmax v29.4s, v29.4s, v0.4s\n"
- "fmax v30.4s, v30.4s, v0.4s\n"
- "fmax v31.4s, v31.4s, v0.4s\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v1.4s }, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v0.4s }, [x19]\n"
+ "fmin v8.4s, v8.4s, v0.4s\n"
+ "fmin v9.4s, v9.4s, v0.4s\n"
+ "fmin v10.4s, v10.4s, v0.4s\n"
+ "fmin v11.4s, v11.4s, v0.4s\n"
+ "fmax v8.4s, v8.4s, v1.4s\n"
+ "fmax v9.4s, v9.4s, v1.4s\n"
+ "fmax v10.4s, v10.4s, v1.4s\n"
+ "fmax v11.4s, v11.4s, v1.4s\n"
+ "fmin v12.4s, v12.4s, v0.4s\n"
+ "fmin v13.4s, v13.4s, v0.4s\n"
+ "fmin v14.4s, v14.4s, v0.4s\n"
+ "fmax v12.4s, v12.4s, v1.4s\n"
+ "fmax v13.4s, v13.4s, v1.4s\n"
+ "fmax v14.4s, v14.4s, v1.4s\n"
+ "fmin v15.4s, v15.4s, v0.4s\n"
+ "fmin v16.4s, v16.4s, v0.4s\n"
+ "fmin v17.4s, v17.4s, v0.4s\n"
+ "fmax v15.4s, v15.4s, v1.4s\n"
+ "fmax v16.4s, v16.4s, v1.4s\n"
+ "fmax v17.4s, v17.4s, v1.4s\n"
+ "fmin v18.4s, v18.4s, v0.4s\n"
+ "fmin v19.4s, v19.4s, v0.4s\n"
+ "fmin v20.4s, v20.4s, v0.4s\n"
+ "fmax v18.4s, v18.4s, v1.4s\n"
+ "fmax v19.4s, v19.4s, v1.4s\n"
+ "fmax v20.4s, v20.4s, v1.4s\n"
+ "fmin v21.4s, v21.4s, v0.4s\n"
+ "fmin v22.4s, v22.4s, v0.4s\n"
+ "fmin v23.4s, v23.4s, v0.4s\n"
+ "fmax v21.4s, v21.4s, v1.4s\n"
+ "fmax v22.4s, v22.4s, v1.4s\n"
+ "fmax v23.4s, v23.4s, v1.4s\n"
+ "fmin v24.4s, v24.4s, v0.4s\n"
+ "fmin v25.4s, v25.4s, v0.4s\n"
+ "fmin v26.4s, v26.4s, v0.4s\n"
+ "fmax v24.4s, v24.4s, v1.4s\n"
+ "fmax v25.4s, v25.4s, v1.4s\n"
+ "fmax v26.4s, v26.4s, v1.4s\n"
+ "fmin v27.4s, v27.4s, v0.4s\n"
+ "fmin v28.4s, v28.4s, v0.4s\n"
+ "fmin v29.4s, v29.4s, v0.4s\n"
+ "fmax v27.4s, v27.4s, v1.4s\n"
+ "fmax v28.4s, v28.4s, v1.4s\n"
+ "fmax v29.4s, v29.4s, v1.4s\n"
+ "fmin v30.4s, v30.4s, v0.4s\n"
+ "fmin v31.4s, v31.4s, v0.4s\n"
+ "fmax v30.4s, v30.4s, v1.4s\n"
+ "fmax v31.4s, v31.4s, v1.4s\n"
"150:" // Height 4: No activation
"cmp x9, #0x18\n"
"bge 163f\n"
"tbz x9, #4, 154f\n"
- "st1 { v8.4s }, [x27], #0x10\n"
- "st1 { v9.4s }, [x27], #0x10\n"
- "st1 { v10.4s }, [x27], #0x10\n"
- "st1 { v11.4s }, [x27], #0x10\n"
- "st1 { v14.4s }, [x23], #0x10\n"
- "st1 { v15.4s }, [x23], #0x10\n"
- "st1 { v16.4s }, [x23], #0x10\n"
- "st1 { v17.4s }, [x23], #0x10\n"
- "st1 { v20.4s }, [x22], #0x10\n"
- "st1 { v21.4s }, [x22], #0x10\n"
- "st1 { v22.4s }, [x22], #0x10\n"
- "st1 { v23.4s }, [x22], #0x10\n"
- "st1 { v26.4s }, [x21], #0x10\n"
- "st1 { v27.4s }, [x21], #0x10\n"
- "st1 { v28.4s }, [x21], #0x10\n"
- "st1 { v29.4s }, [x21], #0x10\n"
+ "st1 { v8.4s }, [x26], #0x10\n"
+ "st1 { v9.4s }, [x26], #0x10\n"
+ "st1 { v10.4s }, [x26], #0x10\n"
+ "st1 { v11.4s }, [x26], #0x10\n"
+ "st1 { v14.4s }, [x22], #0x10\n"
+ "st1 { v15.4s }, [x22], #0x10\n"
+ "st1 { v16.4s }, [x22], #0x10\n"
+ "st1 { v17.4s }, [x22], #0x10\n"
+ "st1 { v20.4s }, [x21], #0x10\n"
+ "st1 { v21.4s }, [x21], #0x10\n"
+ "st1 { v22.4s }, [x21], #0x10\n"
+ "st1 { v23.4s }, [x21], #0x10\n"
+ "st1 { v26.4s }, [x20], #0x10\n"
+ "st1 { v27.4s }, [x20], #0x10\n"
+ "st1 { v28.4s }, [x20], #0x10\n"
+ "st1 { v29.4s }, [x20], #0x10\n"
"tbz x9, #2, 152f\n"
- "st1 { v12.4s }, [x27], #0x10\n"
- "st1 { v18.4s }, [x23], #0x10\n"
- "st1 { v24.4s }, [x22], #0x10\n"
- "st1 { v30.4s }, [x21], #0x10\n"
+ "st1 { v12.4s }, [x26], #0x10\n"
+ "st1 { v18.4s }, [x22], #0x10\n"
+ "st1 { v24.4s }, [x21], #0x10\n"
+ "st1 { v30.4s }, [x20], #0x10\n"
"tbz x9, #1, 151f\n"
- "str d13, [x27], #0x8\n"
- "str d19, [x23], #0x8\n"
- "str d25, [x22], #0x8\n"
- "str d31, [x21], #0x8\n"
+ "str d13, [x26], #0x8\n"
+ "str d19, [x22], #0x8\n"
+ "str d25, [x21], #0x8\n"
+ "str d31, [x20], #0x8\n"
"tbz x9, #0, 162f\n"
- "st1 { v13.s }[2], [x27]\n"
- "st1 { v19.s }[2], [x23]\n"
- "st1 { v25.s }[2], [x22]\n"
- "st1 { v31.s }[2], [x21]\n"
+ "st1 { v13.s }[2], [x26]\n"
+ "st1 { v19.s }[2], [x22]\n"
+ "st1 { v25.s }[2], [x21]\n"
+ "st1 { v31.s }[2], [x20]\n"
"b 162f\n"
"151:" // Height 4: Partial direct writeback: partial_1_20
"tbz x9, #0, 162f\n"
- "str s13, [x27, #0x0]\n"
- "str s19, [x23, #0x0]\n"
- "str s25, [x22, #0x0]\n"
- "str s31, [x21, #0x0]\n"
+ "str s13, [x26, #0x0]\n"
+ "str s19, [x22, #0x0]\n"
+ "str s25, [x21, #0x0]\n"
+ "str s31, [x20, #0x0]\n"
"b 162f\n"
"152:" // Height 4: Partial direct writeback: partial_2_16
"tbz x9, #1, 153f\n"
- "str d12, [x27], #0x8\n"
- "str d18, [x23], #0x8\n"
- "str d24, [x22], #0x8\n"
- "str d30, [x21], #0x8\n"
+ "str d12, [x26], #0x8\n"
+ "str d18, [x22], #0x8\n"
+ "str d24, [x21], #0x8\n"
+ "str d30, [x20], #0x8\n"
"tbz x9, #0, 162f\n"
- "st1 { v12.s }[2], [x27]\n"
- "st1 { v18.s }[2], [x23]\n"
- "st1 { v24.s }[2], [x22]\n"
- "st1 { v30.s }[2], [x21]\n"
+ "st1 { v12.s }[2], [x26]\n"
+ "st1 { v18.s }[2], [x22]\n"
+ "st1 { v24.s }[2], [x21]\n"
+ "st1 { v30.s }[2], [x20]\n"
"b 162f\n"
"153:" // Height 4: Partial direct writeback: partial_1_16
"tbz x9, #0, 162f\n"
- "str s12, [x27, #0x0]\n"
- "str s18, [x23, #0x0]\n"
- "str s24, [x22, #0x0]\n"
- "str s30, [x21, #0x0]\n"
+ "str s12, [x26, #0x0]\n"
+ "str s18, [x22, #0x0]\n"
+ "str s24, [x21, #0x0]\n"
+ "str s30, [x20, #0x0]\n"
"b 162f\n"
"154:" // Height 4: Partial direct writeback: partial_8_0
"tbz x9, #3, 158f\n"
- "st1 { v8.4s }, [x27], #0x10\n"
- "st1 { v9.4s }, [x27], #0x10\n"
- "st1 { v14.4s }, [x23], #0x10\n"
- "st1 { v15.4s }, [x23], #0x10\n"
- "st1 { v20.4s }, [x22], #0x10\n"
- "st1 { v21.4s }, [x22], #0x10\n"
- "st1 { v26.4s }, [x21], #0x10\n"
- "st1 { v27.4s }, [x21], #0x10\n"
+ "st1 { v8.4s }, [x26], #0x10\n"
+ "st1 { v9.4s }, [x26], #0x10\n"
+ "st1 { v14.4s }, [x22], #0x10\n"
+ "st1 { v15.4s }, [x22], #0x10\n"
+ "st1 { v20.4s }, [x21], #0x10\n"
+ "st1 { v21.4s }, [x21], #0x10\n"
+ "st1 { v26.4s }, [x20], #0x10\n"
+ "st1 { v27.4s }, [x20], #0x10\n"
"tbz x9, #2, 156f\n"
- "st1 { v10.4s }, [x27], #0x10\n"
- "st1 { v16.4s }, [x23], #0x10\n"
- "st1 { v22.4s }, [x22], #0x10\n"
- "st1 { v28.4s }, [x21], #0x10\n"
+ "st1 { v10.4s }, [x26], #0x10\n"
+ "st1 { v16.4s }, [x22], #0x10\n"
+ "st1 { v22.4s }, [x21], #0x10\n"
+ "st1 { v28.4s }, [x20], #0x10\n"
"tbz x9, #1, 155f\n"
- "str d11, [x27], #0x8\n"
- "str d17, [x23], #0x8\n"
- "str d23, [x22], #0x8\n"
- "str d29, [x21], #0x8\n"
+ "str d11, [x26], #0x8\n"
+ "str d17, [x22], #0x8\n"
+ "str d23, [x21], #0x8\n"
+ "str d29, [x20], #0x8\n"
"tbz x9, #0, 162f\n"
- "st1 { v11.s }[2], [x27]\n"
- "st1 { v17.s }[2], [x23]\n"
- "st1 { v23.s }[2], [x22]\n"
- "st1 { v29.s }[2], [x21]\n"
+ "st1 { v11.s }[2], [x26]\n"
+ "st1 { v17.s }[2], [x22]\n"
+ "st1 { v23.s }[2], [x21]\n"
+ "st1 { v29.s }[2], [x20]\n"
"b 162f\n"
"155:" // Height 4: Partial direct writeback: partial_1_12
"tbz x9, #0, 162f\n"
- "str s11, [x27, #0x0]\n"
- "str s17, [x23, #0x0]\n"
- "str s23, [x22, #0x0]\n"
- "str s29, [x21, #0x0]\n"
+ "str s11, [x26, #0x0]\n"
+ "str s17, [x22, #0x0]\n"
+ "str s23, [x21, #0x0]\n"
+ "str s29, [x20, #0x0]\n"
"b 162f\n"
"156:" // Height 4: Partial direct writeback: partial_2_8
"tbz x9, #1, 157f\n"
- "str d10, [x27], #0x8\n"
- "str d16, [x23], #0x8\n"
- "str d22, [x22], #0x8\n"
- "str d28, [x21], #0x8\n"
+ "str d10, [x26], #0x8\n"
+ "str d16, [x22], #0x8\n"
+ "str d22, [x21], #0x8\n"
+ "str d28, [x20], #0x8\n"
"tbz x9, #0, 162f\n"
- "st1 { v10.s }[2], [x27]\n"
- "st1 { v16.s }[2], [x23]\n"
- "st1 { v22.s }[2], [x22]\n"
- "st1 { v28.s }[2], [x21]\n"
+ "st1 { v10.s }[2], [x26]\n"
+ "st1 { v16.s }[2], [x22]\n"
+ "st1 { v22.s }[2], [x21]\n"
+ "st1 { v28.s }[2], [x20]\n"
"b 162f\n"
"157:" // Height 4: Partial direct writeback: partial_1_8
"tbz x9, #0, 162f\n"
- "str s10, [x27, #0x0]\n"
- "str s16, [x23, #0x0]\n"
- "str s22, [x22, #0x0]\n"
- "str s28, [x21, #0x0]\n"
+ "str s10, [x26, #0x0]\n"
+ "str s16, [x22, #0x0]\n"
+ "str s22, [x21, #0x0]\n"
+ "str s28, [x20, #0x0]\n"
"b 162f\n"
"158:" // Height 4: Partial direct writeback: partial_4_0
"tbz x9, #2, 160f\n"
- "st1 { v8.4s }, [x27], #0x10\n"
- "st1 { v14.4s }, [x23], #0x10\n"
- "st1 { v20.4s }, [x22], #0x10\n"
- "st1 { v26.4s }, [x21], #0x10\n"
+ "st1 { v8.4s }, [x26], #0x10\n"
+ "st1 { v14.4s }, [x22], #0x10\n"
+ "st1 { v20.4s }, [x21], #0x10\n"
+ "st1 { v26.4s }, [x20], #0x10\n"
"tbz x9, #1, 159f\n"
- "str d9, [x27], #0x8\n"
- "str d15, [x23], #0x8\n"
- "str d21, [x22], #0x8\n"
- "str d27, [x21], #0x8\n"
+ "str d9, [x26], #0x8\n"
+ "str d15, [x22], #0x8\n"
+ "str d21, [x21], #0x8\n"
+ "str d27, [x20], #0x8\n"
"tbz x9, #0, 162f\n"
- "st1 { v9.s }[2], [x27]\n"
- "st1 { v15.s }[2], [x23]\n"
- "st1 { v21.s }[2], [x22]\n"
- "st1 { v27.s }[2], [x21]\n"
+ "st1 { v9.s }[2], [x26]\n"
+ "st1 { v15.s }[2], [x22]\n"
+ "st1 { v21.s }[2], [x21]\n"
+ "st1 { v27.s }[2], [x20]\n"
"b 162f\n"
"159:" // Height 4: Partial direct writeback: partial_1_4
"tbz x9, #0, 162f\n"
- "str s9, [x27, #0x0]\n"
- "str s15, [x23, #0x0]\n"
- "str s21, [x22, #0x0]\n"
- "str s27, [x21, #0x0]\n"
+ "str s9, [x26, #0x0]\n"
+ "str s15, [x22, #0x0]\n"
+ "str s21, [x21, #0x0]\n"
+ "str s27, [x20, #0x0]\n"
"b 162f\n"
"160:" // Height 4: Partial direct writeback: partial_2_0
"tbz x9, #1, 161f\n"
- "str d8, [x27], #0x8\n"
- "str d14, [x23], #0x8\n"
- "str d20, [x22], #0x8\n"
- "str d26, [x21], #0x8\n"
+ "str d8, [x26], #0x8\n"
+ "str d14, [x22], #0x8\n"
+ "str d20, [x21], #0x8\n"
+ "str d26, [x20], #0x8\n"
"tbz x9, #0, 162f\n"
- "st1 { v8.s }[2], [x27]\n"
- "st1 { v14.s }[2], [x23]\n"
- "st1 { v20.s }[2], [x22]\n"
- "st1 { v26.s }[2], [x21]\n"
+ "st1 { v8.s }[2], [x26]\n"
+ "st1 { v14.s }[2], [x22]\n"
+ "st1 { v20.s }[2], [x21]\n"
+ "st1 { v26.s }[2], [x20]\n"
"b 162f\n"
"161:" // Height 4: Partial direct writeback: partial_1_0
- "str s8, [x27, #0x0]\n"
- "str s14, [x23, #0x0]\n"
- "str s20, [x22, #0x0]\n"
- "str s26, [x21, #0x0]\n"
+ "str s8, [x26, #0x0]\n"
+ "str s14, [x22, #0x0]\n"
+ "str s20, [x21, #0x0]\n"
+ "str s26, [x20, #0x0]\n"
"162:" // Height 4: Partial direct writeback: Done
"b 164f\n"
"163:" // Height 4: Full writeback
- "str q8, [x27, #0x0]\n"
- "str q9, [x27, #0x10]\n"
- "str q10, [x27, #0x20]\n"
- "str q11, [x27, #0x30]\n"
- "str q12, [x27, #0x40]\n"
- "str q13, [x27, #0x50]\n"
- "add x27, x27, #0x60\n"
- "str q14, [x23, #0x0]\n"
- "str q15, [x23, #0x10]\n"
- "str q16, [x23, #0x20]\n"
- "str q17, [x23, #0x30]\n"
- "str q18, [x23, #0x40]\n"
- "str q19, [x23, #0x50]\n"
- "str q20, [x22, #0x0]\n"
- "str q21, [x22, #0x10]\n"
- "str q22, [x22, #0x20]\n"
- "str q23, [x22, #0x30]\n"
- "str q24, [x22, #0x40]\n"
- "str q25, [x22, #0x50]\n"
- "str q26, [x21, #0x0]\n"
- "str q27, [x21, #0x10]\n"
- "str q28, [x21, #0x20]\n"
- "str q29, [x21, #0x30]\n"
- "str q30, [x21, #0x40]\n"
- "str q31, [x21, #0x50]\n"
+ "str q8, [x26, #0x0]\n"
+ "str q9, [x26, #0x10]\n"
+ "str q10, [x26, #0x20]\n"
+ "str q11, [x26, #0x30]\n"
+ "str q12, [x26, #0x40]\n"
+ "str q13, [x26, #0x50]\n"
+ "add x26, x26, #0x60\n"
+ "str q14, [x22, #0x0]\n"
+ "str q15, [x22, #0x10]\n"
+ "str q16, [x22, #0x20]\n"
+ "str q17, [x22, #0x30]\n"
+ "str q18, [x22, #0x40]\n"
+ "str q19, [x22, #0x50]\n"
+ "str q20, [x21, #0x0]\n"
+ "str q21, [x21, #0x10]\n"
+ "str q22, [x21, #0x20]\n"
+ "str q23, [x21, #0x30]\n"
+ "str q24, [x21, #0x40]\n"
+ "str q25, [x21, #0x50]\n"
+ "str q26, [x20, #0x0]\n"
+ "str q27, [x20, #0x10]\n"
+ "str q28, [x20, #0x20]\n"
+ "str q29, [x20, #0x30]\n"
+ "str q30, [x20, #0x40]\n"
+ "str q31, [x20, #0x50]\n"
"164:" // Height 4: Writeback done
"subs x9, x9, #0x18\n"
"bgt 125b\n"
"subs %x[M], %x[M], #0x4\n"
"beq 166f\n"
- "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 165f\n"
- "add x21, x21, #0x4\n"
- "str x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "add x20, x20, #0x4\n"
+ "str x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"b 1b\n"
"165:" // Update direct input
- "mov x20, #0x10\n"
- "madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
+ "mov x19, #0x10\n"
+ "madd %x[input_ptr], x19, x20, %x[input_ptr]\n"
"b 1b\n"
"166:" // Exit
: [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
: [args_ptr] "r" (&ka), [bias] "r" (bias), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_6x16/a55.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_6x16/a55.cpp
index 985d57d9b6..e8b7db21bd 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_6x16/a55.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_6x16/a55.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef __aarch64__
@@ -102,82 +102,82 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"cmp %x[M], #0x2\n"
"bgt 67f\n"
"beq 34f\n"
- "mov x7, %x[bias]\n"
"ldr x8, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x17, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x16, %x[output_ptr]\n"
+ "mov x16, %x[bias]\n"
+ "mov x15, %x[output_ptr]\n"
"2:" // Height 1: Column loop
- "cbz x7, 3f\n"
- "ldr q8, [x7, #0x0]\n"
- "ldr q9, [x7, #0x10]\n"
- "ldr q10, [x7, #0x20]\n"
- "ldr q11, [x7, #0x30]\n"
- "add x7, x7, #0x40\n"
+ "cbz x16, 3f\n"
+ "ldr q8, [x16, #0x0]\n"
+ "ldr q9, [x16, #0x10]\n"
+ "ldr q10, [x16, #0x20]\n"
+ "ldr q11, [x16, #0x30]\n"
+ "add x16, x16, #0x40\n"
"b 14f\n"
"3:" // Height 1: no bias
"tbz %x[flags], #0, 13f\n"
"cmp x8, #0x10\n"
"bge 12f\n"
"tbz x8, #3, 7f\n"
- "ld1 { v8.4s }, [x16], #0x10\n"
- "ld1 { v9.4s }, [x16], #0x10\n"
+ "ld1 { v8.4s }, [x15], #0x10\n"
+ "ld1 { v9.4s }, [x15], #0x10\n"
"tbz x8, #2, 5f\n"
- "ld1 { v10.4s }, [x16], #0x10\n"
+ "ld1 { v10.4s }, [x15], #0x10\n"
"tbz x8, #1, 4f\n"
- "ldr d11, [x16], #0x8\n"
- "mov x20, #0x38\n"
+ "mov x19, #0x38\n"
+ "ldr d11, [x15], #0x8\n"
"tbz x8, #0, 11f\n"
- "ld1 { v11.s }[2], [x16]\n"
+ "ld1 { v11.s }[2], [x15]\n"
"b 11f\n"
"4:" // Height 1: Partial accumulate: partial_1_12
- "mov x20, #0x30\n"
+ "mov x19, #0x30\n"
"tbz x8, #0, 11f\n"
- "ldr s11, [x16, #0x0]\n"
+ "ldr s11, [x15, #0x0]\n"
"b 11f\n"
"5:" // Height 1: Partial accumulate: partial_2_8
"tbz x8, #1, 6f\n"
- "ldr d10, [x16], #0x8\n"
- "mov x20, #0x28\n"
+ "ldr d10, [x15], #0x8\n"
+ "mov x19, #0x28\n"
"tbz x8, #0, 11f\n"
- "ld1 { v10.s }[2], [x16]\n"
+ "ld1 { v10.s }[2], [x15]\n"
"b 11f\n"
"6:" // Height 1: Partial accumulate: partial_1_8
- "mov x20, #0x20\n"
+ "mov x19, #0x20\n"
"tbz x8, #0, 11f\n"
- "ldr s10, [x16, #0x0]\n"
+ "ldr s10, [x15, #0x0]\n"
"b 11f\n"
"7:" // Height 1: Partial accumulate: partial_4_0
"tbz x8, #2, 9f\n"
- "ld1 { v8.4s }, [x16], #0x10\n"
+ "ld1 { v8.4s }, [x15], #0x10\n"
"tbz x8, #1, 8f\n"
- "ldr d9, [x16], #0x8\n"
- "mov x20, #0x18\n"
+ "mov x19, #0x18\n"
+ "ldr d9, [x15], #0x8\n"
"tbz x8, #0, 11f\n"
- "ld1 { v9.s }[2], [x16]\n"
+ "ld1 { v9.s }[2], [x15]\n"
"b 11f\n"
"8:" // Height 1: Partial accumulate: partial_1_4
- "mov x20, #0x10\n"
+ "mov x19, #0x10\n"
"tbz x8, #0, 11f\n"
- "ldr s9, [x16, #0x0]\n"
+ "ldr s9, [x15, #0x0]\n"
"b 11f\n"
"9:" // Height 1: Partial accumulate: partial_2_0
"tbz x8, #1, 10f\n"
- "ldr d8, [x16], #0x8\n"
- "mov x20, #0x8\n"
+ "ldr d8, [x15], #0x8\n"
+ "mov x19, #0x8\n"
"tbz x8, #0, 11f\n"
- "ld1 { v8.s }[2], [x16]\n"
+ "ld1 { v8.s }[2], [x15]\n"
"b 11f\n"
"10:" // Height 1: Partial accumulate: partial_1_0
- "ldr s8, [x16, #0x0]\n"
- "mov x20, #0x0\n"
+ "ldr s8, [x15, #0x0]\n"
+ "mov x19, #0x0\n"
"11:" // Height 1: Partial accumulate: Done
- "sub x16, x16, x20\n"
+ "sub x15, x15, x19\n"
"b 14f\n"
"12:" // Height 1: full accumulate
- "ldr q8, [x16, #0x0]\n"
- "ldr q9, [x16, #0x10]\n"
- "ldr q10, [x16, #0x20]\n"
- "ldr q11, [x16, #0x30]\n"
+ "ldr q8, [x15, #0x0]\n"
+ "ldr q9, [x15, #0x10]\n"
+ "ldr q10, [x15, #0x20]\n"
+ "ldr q11, [x15, #0x30]\n"
"b 14f\n"
"13:" // Height 1: no accumulate
"movi v8.16b, #0x0\n"
@@ -185,109 +185,112 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"movi v10.16b, #0x0\n"
"movi v11.16b, #0x0\n"
"14:" // Height 1: setup done
- "mov x15, #0x0\n"
+ "mov x14, #0x0\n"
"15:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w14, [x20, x15, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w13, [x20, x14, LSL #0x2]\n"
"tbz %x[flags], #3, 16f\n"
- "ldr x21, [%x[input_ptr], x15, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x13, [x21, #0x0]\n"
- "cbnz x15, 17f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x13, x13, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x14, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x12, [x20, #0x0]\n"
+ "cbnz x14, 17f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x12, x12, x19, LSL #2\n"
"b 17f\n"
"16:" // Height 1: setup direct input
- "mov x13, %x[input_ptr]\n"
+ "mov x12, %x[input_ptr]\n"
"17:" // Height 1: input setup done
- "cmp x14, #0x4\n"
+ "cmp x13, #0x4\n"
"blt 20f\n"
- "ldr q0, [x13, #0x0]\n"
- "cmp x14, #0x8\n"
+ "ldr q0, [x12, #0x0]\n"
"ldr q6, [x17, #0x0]\n"
- "ldr q7, [x17, #0x10]\n"
+ "cmp x13, #0x8\n"
"blt 19f\n"
"18:" // Height 1: Multiply loop: Main loop head
"fmla v8.4s, v6.4s, v0.s[0]\n"
+ "ldr d7, [x17, #0x10]\n"
+ "ldr x11, [x17, #0x18]\n"
+ "add x12, x12, #0x10\n"
"ldr d6, [x17, #0x20]\n"
- "ldr x12, [x17, #0x28]\n"
+ "sub x13, x13, #0x4\n"
+ "ldr x10, [x17, #0x28]\n"
+ "cmp x13, #0x8\n"
+ "mov v7.d[1], x11\n"
+ "prfm pldl1keep, [x12, #0x80]\n"
+ "ldr x11, [x17, #0x38]\n"
"fmla v9.4s, v7.4s, v0.s[0]\n"
+ "mov v6.d[1], x10\n"
"ldr d7, [x17, #0x30]\n"
- "mov v6.d[1], x12\n"
- "ldr x11, [x17, #0x38]\n"
- "mov v7.d[1], x11\n"
"fmla v10.4s, v6.4s, v0.s[0]\n"
"ldr d6, [x17, #0x40]\n"
- "ldr x12, [x17, #0x48]\n"
+ "ldr x10, [x17, #0x48]\n"
+ "mov v7.d[1], x11\n"
+ "ldr x11, [x17, #0x58]\n"
+ "ldr x9, [x12, #0x8]\n"
"fmla v11.4s, v7.4s, v0.s[0]\n"
+ "mov v6.d[1], x10\n"
"ldr d7, [x17, #0x50]\n"
- "mov v6.d[1], x12\n"
- "ldr x11, [x17, #0x58]\n"
- "mov v7.d[1], x11\n"
"fmla v8.4s, v6.4s, v0.s[1]\n"
"ldr d6, [x17, #0x60]\n"
- "ldr x12, [x17, #0x68]\n"
+ "ldr x10, [x17, #0x68]\n"
+ "mov v7.d[1], x11\n"
+ "ldr x11, [x17, #0x78]\n"
"fmla v9.4s, v7.4s, v0.s[1]\n"
+ "mov v6.d[1], x10\n"
"ldr d7, [x17, #0x70]\n"
- "mov v6.d[1], x12\n"
- "ldr x11, [x17, #0x78]\n"
- "mov v7.d[1], x11\n"
"fmla v10.4s, v6.4s, v0.s[1]\n"
"ldr d6, [x17, #0x80]\n"
- "ldr x12, [x17, #0x88]\n"
+ "ldr x10, [x17, #0x88]\n"
+ "mov v7.d[1], x11\n"
+ "ldr x11, [x17, #0x98]\n"
"fmla v11.4s, v7.4s, v0.s[1]\n"
+ "mov v6.d[1], x10\n"
"ldr d7, [x17, #0x90]\n"
- "mov v6.d[1], x12\n"
- "ldr x11, [x17, #0x98]\n"
- "mov v7.d[1], x11\n"
"fmla v8.4s, v6.4s, v0.s[2]\n"
"ldr d6, [x17, #0xa0]\n"
- "ldr x12, [x17, #0xa8]\n"
+ "ldr x10, [x17, #0xa8]\n"
+ "mov v7.d[1], x11\n"
+ "ldr x11, [x17, #0xb8]\n"
"fmla v9.4s, v7.4s, v0.s[2]\n"
+ "mov v6.d[1], x10\n"
"ldr d7, [x17, #0xb0]\n"
- "mov v6.d[1], x12\n"
- "ldr x11, [x17, #0xb8]\n"
- "mov v7.d[1], x11\n"
"fmla v10.4s, v6.4s, v0.s[2]\n"
"ldr d6, [x17, #0xc0]\n"
- "ldr x12, [x17, #0xc8]\n"
+ "ldr x10, [x17, #0xc8]\n"
+ "mov v7.d[1], x11\n"
+ "ldr x11, [x17, #0xd8]\n"
"fmla v11.4s, v7.4s, v0.s[2]\n"
+ "mov v6.d[1], x10\n"
"ldr d7, [x17, #0xd0]\n"
- "mov v6.d[1], x12\n"
- "ldr x11, [x17, #0xd8]\n"
- "mov v7.d[1], x11\n"
"fmla v8.4s, v6.4s, v0.s[3]\n"
"ldr d6, [x17, #0xe0]\n"
- "ldr x12, [x17, #0xe8]\n"
+ "ldr x10, [x17, #0xe8]\n"
+ "mov v7.d[1], x11\n"
+ "ldr x11, [x17, #0xf8]\n"
"fmla v9.4s, v7.4s, v0.s[3]\n"
+ "mov v6.d[1], x10\n"
"ldr d7, [x17, #0xf0]\n"
- "mov v6.d[1], x12\n"
- "ldr x11, [x17, #0xf8]\n"
- "mov v7.d[1], x11\n"
- "add x13, x13, #0x10\n"
"add x17, x17, #0x100\n"
"fmla v10.4s, v6.4s, v0.s[3]\n"
"ldr d6, [x17, #0x0]\n"
- "ldr x12, [x17, #0x8]\n"
- "fmla v11.4s, v7.4s, v0.s[3]\n"
- "ldr d0, [x13, #0x0]\n"
- "sub x14, x14, #0x4\n"
- "ldr d7, [x17, #0x10]\n"
- "cmp x14, #0x8\n"
- "ldr x10, [x13, #0x8]\n"
- "mov v6.d[1], x12\n"
- "ldr x11, [x17, #0x18]\n"
- "mov v0.d[1], x10\n"
+ "ldr x10, [x17, #0x8]\n"
"mov v7.d[1], x11\n"
- "prfm pldl1keep, [x13, #0x80]\n"
+ "fmla v11.4s, v7.4s, v0.s[3]\n"
+ "mov v6.d[1], x10\n"
+ "ldr d0, [x12, #0x0]\n"
+ "mov v0.d[1], x9\n"
"bge 18b\n"
"19:" // Height 1: Multiply loop: Single iteration only
"fmla v8.4s, v6.4s, v0.s[0]\n"
+ "ldr q7, [x17, #0x10]\n"
"ldr q6, [x17, #0x20]\n"
+ "sub x13, x13, #0x4\n"
+ "add x12, x12, #0x10\n"
"fmla v9.4s, v7.4s, v0.s[0]\n"
- "ldr q7, [x17, #0x30]\n"
+ "prfm pldl1keep, [x12, #0x80]\n"
"fmla v10.4s, v6.4s, v0.s[0]\n"
+ "ldr q7, [x17, #0x30]\n"
"ldr q6, [x17, #0x40]\n"
"fmla v11.4s, v7.4s, v0.s[0]\n"
"ldr q7, [x17, #0x50]\n"
@@ -311,204 +314,201 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"ldr q6, [x17, #0xe0]\n"
"fmla v9.4s, v7.4s, v0.s[3]\n"
"ldr q7, [x17, #0xf0]\n"
- "add x13, x13, #0x10\n"
- "sub x14, x14, #0x4\n"
"fmla v10.4s, v6.4s, v0.s[3]\n"
- "prfm pldl1keep, [x13, #0x80]\n"
- "fmla v11.4s, v7.4s, v0.s[3]\n"
"add x17, x17, #0x100\n"
+ "fmla v11.4s, v7.4s, v0.s[3]\n"
"20:" // Height 1: Multiply loop: Main loop skip
- "cbz x14, 22f\n"
+ "cbz x13, 22f\n"
"21:" // Height 1: Multiply loop: Odd block loop
- "ldr s0, [x13], #0x4\n"
- "sub x14, x14, #0x1\n"
+ "ldr s0, [x12], #0x4\n"
+ "sub x13, x13, #0x1\n"
"ldr q6, [x17, #0x0]\n"
- "fmla v8.4s, v6.4s, v0.s[0]\n"
"ldr q7, [x17, #0x10]\n"
- "fmla v9.4s, v7.4s, v0.s[0]\n"
+ "fmla v8.4s, v6.4s, v0.s[0]\n"
"ldr q6, [x17, #0x20]\n"
- "fmla v10.4s, v6.4s, v0.s[0]\n"
+ "fmla v9.4s, v7.4s, v0.s[0]\n"
"ldr q7, [x17, #0x30]\n"
- "fmla v11.4s, v7.4s, v0.s[0]\n"
"add x17, x17, #0x40\n"
- "cbnz x14, 21b\n"
+ "fmla v10.4s, v6.4s, v0.s[0]\n"
+ "fmla v11.4s, v7.4s, v0.s[0]\n"
+ "cbnz x13, 21b\n"
"22:" // Height 1: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x15, x15, #0x1\n"
- "cmp x15, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x14, x14, #0x1\n"
+ "cmp x14, x19\n"
"bne 15b\n"
- "prfm pstl1keep, [x16, #0x0]\n"
+ "prfm pstl1keep, [x15, #0x0]\n"
"tbz %x[flags], #1, 23f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v0.4s }, [x20]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v1.4s }, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v0.4s }, [x19]\n"
"fmin v8.4s, v8.4s, v0.4s\n"
"fmin v9.4s, v9.4s, v0.4s\n"
"fmin v10.4s, v10.4s, v0.4s\n"
"fmin v11.4s, v11.4s, v0.4s\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1r { v0.4s }, [x20]\n"
- "fmax v8.4s, v8.4s, v0.4s\n"
- "fmax v9.4s, v9.4s, v0.4s\n"
- "fmax v10.4s, v10.4s, v0.4s\n"
- "fmax v11.4s, v11.4s, v0.4s\n"
+ "fmax v8.4s, v8.4s, v1.4s\n"
+ "fmax v9.4s, v9.4s, v1.4s\n"
+ "fmax v10.4s, v10.4s, v1.4s\n"
+ "fmax v11.4s, v11.4s, v1.4s\n"
"23:" // Height 1: No activation
"cmp x8, #0x10\n"
"bge 32f\n"
"tbz x8, #3, 27f\n"
- "st1 { v8.4s }, [x16], #0x10\n"
- "st1 { v9.4s }, [x16], #0x10\n"
+ "st1 { v8.4s }, [x15], #0x10\n"
+ "st1 { v9.4s }, [x15], #0x10\n"
"tbz x8, #2, 25f\n"
- "st1 { v10.4s }, [x16], #0x10\n"
+ "st1 { v10.4s }, [x15], #0x10\n"
"tbz x8, #1, 24f\n"
- "str d11, [x16], #0x8\n"
+ "str d11, [x15], #0x8\n"
"tbz x8, #0, 31f\n"
- "st1 { v11.s }[2], [x16]\n"
+ "st1 { v11.s }[2], [x15]\n"
"b 31f\n"
"24:" // Height 1: Partial direct writeback: partial_1_12
"tbz x8, #0, 31f\n"
- "str s11, [x16, #0x0]\n"
+ "str s11, [x15, #0x0]\n"
"b 31f\n"
"25:" // Height 1: Partial direct writeback: partial_2_8
"tbz x8, #1, 26f\n"
- "str d10, [x16], #0x8\n"
+ "str d10, [x15], #0x8\n"
"tbz x8, #0, 31f\n"
- "st1 { v10.s }[2], [x16]\n"
+ "st1 { v10.s }[2], [x15]\n"
"b 31f\n"
"26:" // Height 1: Partial direct writeback: partial_1_8
"tbz x8, #0, 31f\n"
- "str s10, [x16, #0x0]\n"
+ "str s10, [x15, #0x0]\n"
"b 31f\n"
"27:" // Height 1: Partial direct writeback: partial_4_0
"tbz x8, #2, 29f\n"
- "st1 { v8.4s }, [x16], #0x10\n"
+ "st1 { v8.4s }, [x15], #0x10\n"
"tbz x8, #1, 28f\n"
- "str d9, [x16], #0x8\n"
+ "str d9, [x15], #0x8\n"
"tbz x8, #0, 31f\n"
- "st1 { v9.s }[2], [x16]\n"
+ "st1 { v9.s }[2], [x15]\n"
"b 31f\n"
"28:" // Height 1: Partial direct writeback: partial_1_4
"tbz x8, #0, 31f\n"
- "str s9, [x16, #0x0]\n"
+ "str s9, [x15, #0x0]\n"
"b 31f\n"
"29:" // Height 1: Partial direct writeback: partial_2_0
"tbz x8, #1, 30f\n"
- "str d8, [x16], #0x8\n"
+ "str d8, [x15], #0x8\n"
"tbz x8, #0, 31f\n"
- "st1 { v8.s }[2], [x16]\n"
+ "st1 { v8.s }[2], [x15]\n"
"b 31f\n"
"30:" // Height 1: Partial direct writeback: partial_1_0
- "str s8, [x16, #0x0]\n"
+ "str s8, [x15, #0x0]\n"
"31:" // Height 1: Partial direct writeback: Done
"b 33f\n"
"32:" // Height 1: Full writeback
- "str q8, [x16, #0x0]\n"
- "str q9, [x16, #0x10]\n"
- "str q10, [x16, #0x20]\n"
- "str q11, [x16, #0x30]\n"
- "add x16, x16, #0x40\n"
+ "str q8, [x15, #0x0]\n"
+ "str q9, [x15, #0x10]\n"
+ "str q10, [x15, #0x20]\n"
+ "str q11, [x15, #0x30]\n"
+ "add x15, x15, #0x40\n"
"33:" // Height 1: Writeback done
"subs x8, x8, #0x10\n"
"bgt 2b\n"
"b 200f\n"
"34:" // Height 2
- "mov x7, %x[bias]\n"
"ldr x8, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x16, %x[bias]\n"
"ldr x17, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x16, %x[output_ptr]\n"
+ "mov x15, %x[output_ptr]\n"
"35:" // Height 2: Column loop
- "cbz x7, 36f\n"
- "ldr q8, [x7, #0x0]\n"
+ "cbz x16, 36f\n"
+ "ldr q8, [x16, #0x0]\n"
+ "ldr q9, [x16, #0x10]\n"
+ "ldr q10, [x16, #0x20]\n"
"mov v12.16b, v8.16b\n"
- "ldr q9, [x7, #0x10]\n"
+ "ldr q11, [x16, #0x30]\n"
"mov v13.16b, v9.16b\n"
- "ldr q10, [x7, #0x20]\n"
+ "add x16, x16, #0x40\n"
"mov v14.16b, v10.16b\n"
- "ldr q11, [x7, #0x30]\n"
"mov v15.16b, v11.16b\n"
- "add x7, x7, #0x40\n"
"b 47f\n"
"36:" // Height 2: no bias
"tbz %x[flags], #0, 46f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"cmp x8, #0x10\n"
- "add x25, x16, x20, LSL #2\n"
+ "add x25, x15, x19, LSL #2\n"
"bge 45f\n"
"tbz x8, #3, 40f\n"
- "ld1 { v8.4s }, [x16], #0x10\n"
+ "ld1 { v8.4s }, [x15], #0x10\n"
"ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v9.4s }, [x16], #0x10\n"
+ "ld1 { v9.4s }, [x15], #0x10\n"
"ld1 { v13.4s }, [x25], #0x10\n"
"tbz x8, #2, 38f\n"
- "ld1 { v10.4s }, [x16], #0x10\n"
+ "ld1 { v10.4s }, [x15], #0x10\n"
"ld1 { v14.4s }, [x25], #0x10\n"
"tbz x8, #1, 37f\n"
- "ldr d11, [x16], #0x8\n"
- "mov x20, #0x38\n"
+ "mov x19, #0x38\n"
+ "ldr d11, [x15], #0x8\n"
"ldr d15, [x25], #0x8\n"
"tbz x8, #0, 44f\n"
- "ld1 { v11.s }[2], [x16]\n"
+ "ld1 { v11.s }[2], [x15]\n"
"ld1 { v15.s }[2], [x25]\n"
"b 44f\n"
"37:" // Height 2: Partial accumulate: partial_1_12
- "mov x20, #0x30\n"
+ "mov x19, #0x30\n"
"tbz x8, #0, 44f\n"
- "ldr s11, [x16, #0x0]\n"
+ "ldr s11, [x15, #0x0]\n"
"ldr s15, [x25, #0x0]\n"
"b 44f\n"
"38:" // Height 2: Partial accumulate: partial_2_8
"tbz x8, #1, 39f\n"
- "ldr d10, [x16], #0x8\n"
- "mov x20, #0x28\n"
+ "ldr d10, [x15], #0x8\n"
"ldr d14, [x25], #0x8\n"
+ "mov x19, #0x28\n"
"tbz x8, #0, 44f\n"
- "ld1 { v10.s }[2], [x16]\n"
+ "ld1 { v10.s }[2], [x15]\n"
"ld1 { v14.s }[2], [x25]\n"
"b 44f\n"
"39:" // Height 2: Partial accumulate: partial_1_8
- "mov x20, #0x20\n"
+ "mov x19, #0x20\n"
"tbz x8, #0, 44f\n"
- "ldr s10, [x16, #0x0]\n"
+ "ldr s10, [x15, #0x0]\n"
"ldr s14, [x25, #0x0]\n"
"b 44f\n"
"40:" // Height 2: Partial accumulate: partial_4_0
"tbz x8, #2, 42f\n"
- "ld1 { v8.4s }, [x16], #0x10\n"
+ "ld1 { v8.4s }, [x15], #0x10\n"
"ld1 { v12.4s }, [x25], #0x10\n"
"tbz x8, #1, 41f\n"
- "ldr d9, [x16], #0x8\n"
- "mov x20, #0x18\n"
+ "mov x19, #0x18\n"
+ "ldr d9, [x15], #0x8\n"
"ldr d13, [x25], #0x8\n"
"tbz x8, #0, 44f\n"
- "ld1 { v9.s }[2], [x16]\n"
+ "ld1 { v9.s }[2], [x15]\n"
"ld1 { v13.s }[2], [x25]\n"
"b 44f\n"
"41:" // Height 2: Partial accumulate: partial_1_4
- "mov x20, #0x10\n"
+ "mov x19, #0x10\n"
"tbz x8, #0, 44f\n"
- "ldr s9, [x16, #0x0]\n"
+ "ldr s9, [x15, #0x0]\n"
"ldr s13, [x25, #0x0]\n"
"b 44f\n"
"42:" // Height 2: Partial accumulate: partial_2_0
"tbz x8, #1, 43f\n"
- "ldr d8, [x16], #0x8\n"
- "mov x20, #0x8\n"
+ "ldr d8, [x15], #0x8\n"
"ldr d12, [x25], #0x8\n"
+ "mov x19, #0x8\n"
"tbz x8, #0, 44f\n"
- "ld1 { v8.s }[2], [x16]\n"
+ "ld1 { v8.s }[2], [x15]\n"
"ld1 { v12.s }[2], [x25]\n"
"b 44f\n"
"43:" // Height 2: Partial accumulate: partial_1_0
- "ldr s8, [x16, #0x0]\n"
- "mov x20, #0x0\n"
+ "ldr s8, [x15, #0x0]\n"
+ "mov x19, #0x0\n"
"ldr s12, [x25, #0x0]\n"
"44:" // Height 2: Partial accumulate: Done
- "sub x16, x16, x20\n"
+ "sub x15, x15, x19\n"
"b 47f\n"
"45:" // Height 2: full accumulate
- "ldr q8, [x16, #0x0]\n"
- "ldr q9, [x16, #0x10]\n"
- "ldr q10, [x16, #0x20]\n"
- "ldr q11, [x16, #0x30]\n"
+ "ldr q8, [x15, #0x0]\n"
+ "ldr q9, [x15, #0x10]\n"
+ "ldr q10, [x15, #0x20]\n"
+ "ldr q11, [x15, #0x30]\n"
"ldr q12, [x25, #0x0]\n"
"ldr q13, [x25, #0x10]\n"
"ldr q14, [x25, #0x20]\n"
@@ -524,147 +524,147 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"movi v14.16b, #0x0\n"
"movi v15.16b, #0x0\n"
"47:" // Height 2: setup done
- "mov x15, #0x0\n"
+ "mov x14, #0x0\n"
"48:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w14, [x20, x15, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w13, [x20, x14, LSL #0x2]\n"
"tbz %x[flags], #3, 49f\n"
- "ldr x21, [%x[input_ptr], x15, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x13, [x21, #0x0]\n"
- "ldr x9, [x21, #0x8]\n"
- "cbnz x15, 50f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x13, x13, x20, LSL #2\n"
- "add x9, x9, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x14, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x12, [x20, #0x0]\n"
+ "ldr x28, [x20, #0x8]\n"
+ "cbnz x14, 50f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x12, x12, x19, LSL #2\n"
+ "add x28, x28, x19, LSL #2\n"
"b 50f\n"
"49:" // Height 2: setup direct input
- "mov x13, %x[input_ptr]\n"
- "add x9, x13, x20, LSL #2\n"
+ "mov x12, %x[input_ptr]\n"
+ "add x28, x12, x19, LSL #2\n"
"50:" // Height 2: input setup done
- "cmp x14, #0x4\n"
+ "cmp x13, #0x4\n"
"blt 53f\n"
- "ldr q0, [x13, #0x0]\n"
- "cmp x14, #0x8\n"
- "ldr q1, [x9, #0x0]\n"
+ "ldr q0, [x12, #0x0]\n"
+ "ldr q1, [x28, #0x0]\n"
+ "cmp x13, #0x8\n"
"ldr q6, [x17, #0x0]\n"
- "ldr q7, [x17, #0x10]\n"
"blt 52f\n"
"51:" // Height 2: Multiply loop: Main loop head
"fmla v8.4s, v6.4s, v0.s[0]\n"
- "ldr x12, [x17, #0x28]\n"
+ "ldr d7, [x17, #0x10]\n"
"fmla v12.4s, v6.4s, v1.s[0]\n"
+ "ldr x11, [x17, #0x18]\n"
"ldr d6, [x17, #0x20]\n"
+ "add x12, x12, #0x10\n"
+ "ldr x10, [x17, #0x28]\n"
+ "add x28, x28, #0x10\n"
+ "mov v7.d[1], x11\n"
+ "prfm pldl1keep, [x12, #0x80]\n"
+ "prfm pldl1keep, [x28, #0x80]\n"
+ "sub x13, x13, #0x4\n"
"fmla v9.4s, v7.4s, v0.s[0]\n"
- "ldr x11, [x17, #0x38]\n"
+ "mov v6.d[1], x10\n"
"fmla v13.4s, v7.4s, v1.s[0]\n"
"ldr d7, [x17, #0x30]\n"
- "mov v6.d[1], x12\n"
"fmla v10.4s, v6.4s, v0.s[0]\n"
- "mov v7.d[1], x11\n"
+ "ldr x11, [x17, #0x38]\n"
"fmla v14.4s, v6.4s, v1.s[0]\n"
"ldr d6, [x17, #0x40]\n"
+ "ldr x10, [x17, #0x48]\n"
+ "cmp x13, #0x8\n"
+ "mov v7.d[1], x11\n"
+ "ldr x11, [x17, #0x58]\n"
+ "ldr x9, [x12, #0x8]\n"
"fmla v11.4s, v7.4s, v0.s[0]\n"
- "ldr x12, [x17, #0x48]\n"
+ "mov v6.d[1], x10\n"
"fmla v15.4s, v7.4s, v1.s[0]\n"
"ldr d7, [x17, #0x50]\n"
- "mov v6.d[1], x12\n"
- "ldr x11, [x17, #0x58]\n"
- "mov v7.d[1], x11\n"
"fmla v8.4s, v6.4s, v0.s[1]\n"
- "ldr x12, [x17, #0x68]\n"
+ "ldr x10, [x17, #0x68]\n"
"fmla v12.4s, v6.4s, v1.s[1]\n"
"ldr d6, [x17, #0x60]\n"
- "fmla v9.4s, v7.4s, v0.s[1]\n"
+ "mov v7.d[1], x11\n"
"ldr x11, [x17, #0x78]\n"
+ "ldr x27, [x28, #0x8]\n"
+ "fmla v9.4s, v7.4s, v0.s[1]\n"
+ "mov v6.d[1], x10\n"
"fmla v13.4s, v7.4s, v1.s[1]\n"
"ldr d7, [x17, #0x70]\n"
- "mov v6.d[1], x12\n"
"fmla v10.4s, v6.4s, v0.s[1]\n"
- "mov v7.d[1], x11\n"
+ "ldr x10, [x17, #0x88]\n"
"fmla v14.4s, v6.4s, v1.s[1]\n"
"ldr d6, [x17, #0x80]\n"
+ "mov v7.d[1], x11\n"
+ "ldr x11, [x17, #0x98]\n"
"fmla v11.4s, v7.4s, v0.s[1]\n"
- "ldr x12, [x17, #0x88]\n"
+ "mov v6.d[1], x10\n"
"fmla v15.4s, v7.4s, v1.s[1]\n"
"ldr d7, [x17, #0x90]\n"
- "mov v6.d[1], x12\n"
- "ldr x11, [x17, #0x98]\n"
- "mov v7.d[1], x11\n"
"fmla v8.4s, v6.4s, v0.s[2]\n"
- "ldr x12, [x17, #0xa8]\n"
+ "ldr x10, [x17, #0xa8]\n"
"fmla v12.4s, v6.4s, v1.s[2]\n"
"ldr d6, [x17, #0xa0]\n"
- "fmla v9.4s, v7.4s, v0.s[2]\n"
+ "mov v7.d[1], x11\n"
"ldr x11, [x17, #0xb8]\n"
+ "fmla v9.4s, v7.4s, v0.s[2]\n"
+ "mov v6.d[1], x10\n"
"fmla v13.4s, v7.4s, v1.s[2]\n"
"ldr d7, [x17, #0xb0]\n"
- "mov v6.d[1], x12\n"
"fmla v10.4s, v6.4s, v0.s[2]\n"
- "mov v7.d[1], x11\n"
+ "ldr x10, [x17, #0xc8]\n"
"fmla v14.4s, v6.4s, v1.s[2]\n"
"ldr d6, [x17, #0xc0]\n"
+ "mov v7.d[1], x11\n"
+ "ldr x11, [x17, #0xd8]\n"
"fmla v11.4s, v7.4s, v0.s[2]\n"
- "ldr x12, [x17, #0xc8]\n"
+ "mov v6.d[1], x10\n"
"fmla v15.4s, v7.4s, v1.s[2]\n"
"ldr d7, [x17, #0xd0]\n"
- "mov v6.d[1], x12\n"
- "ldr x11, [x17, #0xd8]\n"
- "mov v7.d[1], x11\n"
"fmla v8.4s, v6.4s, v0.s[3]\n"
- "ldr x12, [x17, #0xe8]\n"
+ "ldr x10, [x17, #0xe8]\n"
"fmla v12.4s, v6.4s, v1.s[3]\n"
"ldr d6, [x17, #0xe0]\n"
- "fmla v9.4s, v7.4s, v0.s[3]\n"
+ "mov v7.d[1], x11\n"
"ldr x11, [x17, #0xf8]\n"
+ "fmla v9.4s, v7.4s, v0.s[3]\n"
+ "mov v6.d[1], x10\n"
"fmla v13.4s, v7.4s, v1.s[3]\n"
"ldr d7, [x17, #0xf0]\n"
- "mov v6.d[1], x12\n"
- "add x13, x13, #0x10\n"
- "mov v7.d[1], x11\n"
- "add x9, x9, #0x10\n"
- "add x17, x17, #0x100\n"
"fmla v10.4s, v6.4s, v0.s[3]\n"
+ "add x17, x17, #0x100\n"
"fmla v14.4s, v6.4s, v1.s[3]\n"
"ldr d6, [x17, #0x0]\n"
- "ldr x12, [x17, #0x8]\n"
+ "mov v7.d[1], x11\n"
+ "ldr x10, [x17, #0x8]\n"
"fmla v11.4s, v7.4s, v0.s[3]\n"
- "ldr d0, [x13, #0x0]\n"
+ "ldr d0, [x12, #0x0]\n"
"fmla v15.4s, v7.4s, v1.s[3]\n"
- "ldr d1, [x9, #0x0]\n"
- "sub x14, x14, #0x4\n"
- "ldr d7, [x17, #0x10]\n"
- "cmp x14, #0x8\n"
- "ldr x10, [x13, #0x8]\n"
- "mov v6.d[1], x12\n"
- "ldr x28, [x9, #0x8]\n"
- "mov v0.d[1], x10\n"
- "ldr x11, [x17, #0x18]\n"
- "mov v1.d[1], x28\n"
- "prfm pldl1keep, [x13, #0x80]\n"
- "mov v7.d[1], x11\n"
- "prfm pldl1keep, [x9, #0x80]\n"
+ "mov v6.d[1], x10\n"
+ "ldr d1, [x28, #0x0]\n"
+ "mov v0.d[1], x9\n"
+ "mov v1.d[1], x27\n"
"bge 51b\n"
"52:" // Height 2: Multiply loop: Single iteration only
"fmla v8.4s, v6.4s, v0.s[0]\n"
- "add x13, x13, #0x10\n"
+ "ldr q7, [x17, #0x10]\n"
"fmla v12.4s, v6.4s, v1.s[0]\n"
"ldr q6, [x17, #0x20]\n"
+ "sub x13, x13, #0x4\n"
+ "add x12, x12, #0x10\n"
"fmla v9.4s, v7.4s, v0.s[0]\n"
- "add x9, x9, #0x10\n"
+ "prfm pldl1keep, [x12, #0x80]\n"
"fmla v13.4s, v7.4s, v1.s[0]\n"
"ldr q7, [x17, #0x30]\n"
"fmla v10.4s, v6.4s, v0.s[0]\n"
- "sub x14, x14, #0x4\n"
+ "add x28, x28, #0x10\n"
"fmla v14.4s, v6.4s, v1.s[0]\n"
- "ldr q6, [x17, #0x40]\n"
+ "prfm pldl1keep, [x28, #0x80]\n"
"fmla v11.4s, v7.4s, v0.s[0]\n"
- "prfm pldl1keep, [x13, #0x80]\n"
+ "ldr q6, [x17, #0x40]\n"
"fmla v15.4s, v7.4s, v1.s[0]\n"
"ldr q7, [x17, #0x50]\n"
"fmla v8.4s, v6.4s, v0.s[1]\n"
- "prfm pldl1keep, [x9, #0x80]\n"
"fmla v12.4s, v6.4s, v1.s[1]\n"
"ldr q6, [x17, #0x60]\n"
"fmla v9.4s, v7.4s, v0.s[1]\n"
@@ -700,14 +700,14 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"fmla v11.4s, v7.4s, v0.s[3]\n"
"fmla v15.4s, v7.4s, v1.s[3]\n"
"53:" // Height 2: Multiply loop: Main loop skip
- "cbz x14, 55f\n"
+ "cbz x13, 55f\n"
"54:" // Height 2: Multiply loop: Odd block loop
- "ldr s0, [x13], #0x4\n"
- "sub x14, x14, #0x1\n"
- "ldr s1, [x9], #0x4\n"
+ "ldr s0, [x12], #0x4\n"
+ "sub x13, x13, #0x1\n"
+ "ldr s1, [x28], #0x4\n"
"ldr q6, [x17, #0x0]\n"
- "fmla v8.4s, v6.4s, v0.s[0]\n"
"ldr q7, [x17, #0x10]\n"
+ "fmla v8.4s, v6.4s, v0.s[0]\n"
"fmla v12.4s, v6.4s, v1.s[0]\n"
"ldr q6, [x17, #0x20]\n"
"fmla v9.4s, v7.4s, v0.s[0]\n"
@@ -718,19 +718,21 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"fmla v14.4s, v6.4s, v1.s[0]\n"
"fmla v11.4s, v7.4s, v0.s[0]\n"
"fmla v15.4s, v7.4s, v1.s[0]\n"
- "cbnz x14, 54b\n"
+ "cbnz x13, 54b\n"
"55:" // Height 2: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x15, x15, #0x1\n"
- "cmp x15, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x14, x14, #0x1\n"
+ "cmp x14, x19\n"
"bne 48b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x16, x20, LSL #2\n"
- "prfm pstl1keep, [x16, #0x0]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "prfm pstl1keep, [x15, #0x0]\n"
+ "add x25, x15, x19, LSL #2\n"
"prfm pstl1keep, [x25, #0x0]\n"
"tbz %x[flags], #1, 56f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v0.4s }, [x20]\n"
+ "add x20, %x[args_ptr], %[offset_min]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v1.4s }, [x20]\n"
+ "ld1r { v0.4s }, [x19]\n"
"fmin v8.4s, v8.4s, v0.4s\n"
"fmin v9.4s, v9.4s, v0.4s\n"
"fmin v10.4s, v10.4s, v0.4s\n"
@@ -739,87 +741,85 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"fmin v13.4s, v13.4s, v0.4s\n"
"fmin v14.4s, v14.4s, v0.4s\n"
"fmin v15.4s, v15.4s, v0.4s\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1r { v0.4s }, [x20]\n"
- "fmax v8.4s, v8.4s, v0.4s\n"
- "fmax v9.4s, v9.4s, v0.4s\n"
- "fmax v10.4s, v10.4s, v0.4s\n"
- "fmax v11.4s, v11.4s, v0.4s\n"
- "fmax v12.4s, v12.4s, v0.4s\n"
- "fmax v13.4s, v13.4s, v0.4s\n"
- "fmax v14.4s, v14.4s, v0.4s\n"
- "fmax v15.4s, v15.4s, v0.4s\n"
+ "fmax v8.4s, v8.4s, v1.4s\n"
+ "fmax v9.4s, v9.4s, v1.4s\n"
+ "fmax v10.4s, v10.4s, v1.4s\n"
+ "fmax v11.4s, v11.4s, v1.4s\n"
+ "fmax v12.4s, v12.4s, v1.4s\n"
+ "fmax v13.4s, v13.4s, v1.4s\n"
+ "fmax v14.4s, v14.4s, v1.4s\n"
+ "fmax v15.4s, v15.4s, v1.4s\n"
"56:" // Height 2: No activation
"cmp x8, #0x10\n"
"bge 65f\n"
"tbz x8, #3, 60f\n"
- "st1 { v8.4s }, [x16], #0x10\n"
- "st1 { v9.4s }, [x16], #0x10\n"
+ "st1 { v8.4s }, [x15], #0x10\n"
+ "st1 { v9.4s }, [x15], #0x10\n"
"st1 { v12.4s }, [x25], #0x10\n"
"st1 { v13.4s }, [x25], #0x10\n"
"tbz x8, #2, 58f\n"
- "st1 { v10.4s }, [x16], #0x10\n"
+ "st1 { v10.4s }, [x15], #0x10\n"
"st1 { v14.4s }, [x25], #0x10\n"
"tbz x8, #1, 57f\n"
- "str d11, [x16], #0x8\n"
+ "str d11, [x15], #0x8\n"
"str d15, [x25], #0x8\n"
"tbz x8, #0, 64f\n"
- "st1 { v11.s }[2], [x16]\n"
+ "st1 { v11.s }[2], [x15]\n"
"st1 { v15.s }[2], [x25]\n"
"b 64f\n"
"57:" // Height 2: Partial direct writeback: partial_1_12
"tbz x8, #0, 64f\n"
- "str s11, [x16, #0x0]\n"
+ "str s11, [x15, #0x0]\n"
"str s15, [x25, #0x0]\n"
"b 64f\n"
"58:" // Height 2: Partial direct writeback: partial_2_8
"tbz x8, #1, 59f\n"
- "str d10, [x16], #0x8\n"
+ "str d10, [x15], #0x8\n"
"str d14, [x25], #0x8\n"
"tbz x8, #0, 64f\n"
- "st1 { v10.s }[2], [x16]\n"
+ "st1 { v10.s }[2], [x15]\n"
"st1 { v14.s }[2], [x25]\n"
"b 64f\n"
"59:" // Height 2: Partial direct writeback: partial_1_8
"tbz x8, #0, 64f\n"
- "str s10, [x16, #0x0]\n"
+ "str s10, [x15, #0x0]\n"
"str s14, [x25, #0x0]\n"
"b 64f\n"
"60:" // Height 2: Partial direct writeback: partial_4_0
"tbz x8, #2, 62f\n"
- "st1 { v8.4s }, [x16], #0x10\n"
+ "st1 { v8.4s }, [x15], #0x10\n"
"st1 { v12.4s }, [x25], #0x10\n"
"tbz x8, #1, 61f\n"
- "str d9, [x16], #0x8\n"
+ "str d9, [x15], #0x8\n"
"str d13, [x25], #0x8\n"
"tbz x8, #0, 64f\n"
- "st1 { v9.s }[2], [x16]\n"
+ "st1 { v9.s }[2], [x15]\n"
"st1 { v13.s }[2], [x25]\n"
"b 64f\n"
"61:" // Height 2: Partial direct writeback: partial_1_4
"tbz x8, #0, 64f\n"
- "str s9, [x16, #0x0]\n"
+ "str s9, [x15, #0x0]\n"
"str s13, [x25, #0x0]\n"
"b 64f\n"
"62:" // Height 2: Partial direct writeback: partial_2_0
"tbz x8, #1, 63f\n"
- "str d8, [x16], #0x8\n"
+ "str d8, [x15], #0x8\n"
"str d12, [x25], #0x8\n"
"tbz x8, #0, 64f\n"
- "st1 { v8.s }[2], [x16]\n"
+ "st1 { v8.s }[2], [x15]\n"
"st1 { v12.s }[2], [x25]\n"
"b 64f\n"
"63:" // Height 2: Partial direct writeback: partial_1_0
- "str s8, [x16, #0x0]\n"
+ "str s8, [x15, #0x0]\n"
"str s12, [x25, #0x0]\n"
"64:" // Height 2: Partial direct writeback: Done
"b 66f\n"
"65:" // Height 2: Full writeback
- "str q8, [x16, #0x0]\n"
- "str q9, [x16, #0x10]\n"
- "str q10, [x16, #0x20]\n"
- "str q11, [x16, #0x30]\n"
- "add x16, x16, #0x40\n"
+ "str q8, [x15, #0x0]\n"
+ "str q9, [x15, #0x10]\n"
+ "str q10, [x15, #0x20]\n"
+ "str q11, [x15, #0x30]\n"
+ "add x15, x15, #0x40\n"
"str q12, [x25, #0x0]\n"
"str q13, [x25, #0x10]\n"
"str q14, [x25, #0x20]\n"
@@ -829,125 +829,125 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"bgt 35b\n"
"b 200f\n"
"67:" // Height 3
- "mov x7, %x[bias]\n"
"ldr x8, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x16, %x[bias]\n"
"ldr x17, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x16, %x[output_ptr]\n"
+ "mov x15, %x[output_ptr]\n"
"68:" // Height 3: Column loop
- "cbz x7, 69f\n"
- "ldr q8, [x7, #0x0]\n"
+ "cbz x16, 69f\n"
+ "ldr q8, [x16, #0x0]\n"
+ "ldr q9, [x16, #0x10]\n"
+ "ldr q10, [x16, #0x20]\n"
"mov v12.16b, v8.16b\n"
- "ldr q9, [x7, #0x10]\n"
+ "ldr q11, [x16, #0x30]\n"
"mov v13.16b, v9.16b\n"
- "ldr q10, [x7, #0x20]\n"
+ "add x16, x16, #0x40\n"
"mov v14.16b, v10.16b\n"
- "ldr q11, [x7, #0x30]\n"
"mov v15.16b, v11.16b\n"
"mov v16.16b, v8.16b\n"
- "add x7, x7, #0x40\n"
"mov v17.16b, v9.16b\n"
"mov v18.16b, v10.16b\n"
"mov v19.16b, v11.16b\n"
"b 80f\n"
"69:" // Height 3: no bias
"tbz %x[flags], #0, 79f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x16, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"cmp x8, #0x10\n"
- "add x24, x25, x20, LSL #2\n"
+ "add x25, x15, x19, LSL #2\n"
+ "add x24, x25, x19, LSL #2\n"
"bge 78f\n"
"tbz x8, #3, 73f\n"
- "ld1 { v8.4s }, [x16], #0x10\n"
+ "ld1 { v8.4s }, [x15], #0x10\n"
"ld1 { v12.4s }, [x25], #0x10\n"
"ld1 { v16.4s }, [x24], #0x10\n"
- "ld1 { v9.4s }, [x16], #0x10\n"
+ "ld1 { v9.4s }, [x15], #0x10\n"
"ld1 { v13.4s }, [x25], #0x10\n"
"ld1 { v17.4s }, [x24], #0x10\n"
"tbz x8, #2, 71f\n"
- "ld1 { v10.4s }, [x16], #0x10\n"
+ "ld1 { v10.4s }, [x15], #0x10\n"
"ld1 { v14.4s }, [x25], #0x10\n"
"ld1 { v18.4s }, [x24], #0x10\n"
"tbz x8, #1, 70f\n"
- "ldr d11, [x16], #0x8\n"
- "mov x20, #0x38\n"
+ "ldr d11, [x15], #0x8\n"
+ "mov x19, #0x38\n"
"ldr d15, [x25], #0x8\n"
"ldr d19, [x24], #0x8\n"
"tbz x8, #0, 77f\n"
- "ld1 { v11.s }[2], [x16]\n"
+ "ld1 { v11.s }[2], [x15]\n"
"ld1 { v15.s }[2], [x25]\n"
"ld1 { v19.s }[2], [x24]\n"
"b 77f\n"
"70:" // Height 3: Partial accumulate: partial_1_12
- "mov x20, #0x30\n"
+ "mov x19, #0x30\n"
"tbz x8, #0, 77f\n"
- "ldr s11, [x16, #0x0]\n"
+ "ldr s11, [x15, #0x0]\n"
"ldr s15, [x25, #0x0]\n"
"ldr s19, [x24, #0x0]\n"
"b 77f\n"
"71:" // Height 3: Partial accumulate: partial_2_8
"tbz x8, #1, 72f\n"
- "ldr d10, [x16], #0x8\n"
- "mov x20, #0x28\n"
+ "ldr d10, [x15], #0x8\n"
"ldr d14, [x25], #0x8\n"
+ "mov x19, #0x28\n"
"ldr d18, [x24], #0x8\n"
"tbz x8, #0, 77f\n"
- "ld1 { v10.s }[2], [x16]\n"
+ "ld1 { v10.s }[2], [x15]\n"
"ld1 { v14.s }[2], [x25]\n"
"ld1 { v18.s }[2], [x24]\n"
"b 77f\n"
"72:" // Height 3: Partial accumulate: partial_1_8
- "mov x20, #0x20\n"
+ "mov x19, #0x20\n"
"tbz x8, #0, 77f\n"
- "ldr s10, [x16, #0x0]\n"
+ "ldr s10, [x15, #0x0]\n"
"ldr s14, [x25, #0x0]\n"
"ldr s18, [x24, #0x0]\n"
"b 77f\n"
"73:" // Height 3: Partial accumulate: partial_4_0
"tbz x8, #2, 75f\n"
- "ld1 { v8.4s }, [x16], #0x10\n"
+ "ld1 { v8.4s }, [x15], #0x10\n"
"ld1 { v12.4s }, [x25], #0x10\n"
"ld1 { v16.4s }, [x24], #0x10\n"
"tbz x8, #1, 74f\n"
- "ldr d9, [x16], #0x8\n"
- "mov x20, #0x18\n"
+ "ldr d9, [x15], #0x8\n"
+ "mov x19, #0x18\n"
"ldr d13, [x25], #0x8\n"
"ldr d17, [x24], #0x8\n"
"tbz x8, #0, 77f\n"
- "ld1 { v9.s }[2], [x16]\n"
+ "ld1 { v9.s }[2], [x15]\n"
"ld1 { v13.s }[2], [x25]\n"
"ld1 { v17.s }[2], [x24]\n"
"b 77f\n"
"74:" // Height 3: Partial accumulate: partial_1_4
- "mov x20, #0x10\n"
+ "mov x19, #0x10\n"
"tbz x8, #0, 77f\n"
- "ldr s9, [x16, #0x0]\n"
+ "ldr s9, [x15, #0x0]\n"
"ldr s13, [x25, #0x0]\n"
"ldr s17, [x24, #0x0]\n"
"b 77f\n"
"75:" // Height 3: Partial accumulate: partial_2_0
"tbz x8, #1, 76f\n"
- "ldr d8, [x16], #0x8\n"
- "mov x20, #0x8\n"
+ "ldr d8, [x15], #0x8\n"
"ldr d12, [x25], #0x8\n"
+ "mov x19, #0x8\n"
"ldr d16, [x24], #0x8\n"
"tbz x8, #0, 77f\n"
- "ld1 { v8.s }[2], [x16]\n"
+ "ld1 { v8.s }[2], [x15]\n"
"ld1 { v12.s }[2], [x25]\n"
"ld1 { v16.s }[2], [x24]\n"
"b 77f\n"
"76:" // Height 3: Partial accumulate: partial_1_0
- "ldr s8, [x16, #0x0]\n"
- "mov x20, #0x0\n"
+ "ldr s8, [x15, #0x0]\n"
+ "mov x19, #0x0\n"
"ldr s12, [x25, #0x0]\n"
"ldr s16, [x24, #0x0]\n"
"77:" // Height 3: Partial accumulate: Done
- "sub x16, x16, x20\n"
+ "sub x15, x15, x19\n"
"b 80f\n"
"78:" // Height 3: full accumulate
- "ldr q8, [x16, #0x0]\n"
- "ldr q9, [x16, #0x10]\n"
- "ldr q10, [x16, #0x20]\n"
- "ldr q11, [x16, #0x30]\n"
+ "ldr q8, [x15, #0x0]\n"
+ "ldr q9, [x15, #0x10]\n"
+ "ldr q10, [x15, #0x20]\n"
+ "ldr q11, [x15, #0x30]\n"
"ldr q12, [x25, #0x0]\n"
"ldr q13, [x25, #0x10]\n"
"ldr q14, [x25, #0x20]\n"
@@ -971,175 +971,175 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"movi v18.16b, #0x0\n"
"movi v19.16b, #0x0\n"
"80:" // Height 3: setup done
- "mov x15, #0x0\n"
+ "mov x14, #0x0\n"
"81:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w14, [x20, x15, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w13, [x20, x14, LSL #0x2]\n"
"tbz %x[flags], #3, 82f\n"
- "ldr x21, [%x[input_ptr], x15, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x13, [x21, #0x0]\n"
- "ldr x9, [x21, #0x8]\n"
- "ldr x27, [x21, #0x10]\n"
- "cbnz x15, 83f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x13, x13, x20, LSL #2\n"
- "add x9, x9, x20, LSL #2\n"
- "add x27, x27, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x14, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x12, [x20, #0x0]\n"
+ "ldr x28, [x20, #0x8]\n"
+ "ldr x26, [x20, #0x10]\n"
+ "cbnz x14, 83f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x12, x12, x19, LSL #2\n"
+ "add x28, x28, x19, LSL #2\n"
+ "add x26, x26, x19, LSL #2\n"
"b 83f\n"
"82:" // Height 3: setup direct input
- "mov x13, %x[input_ptr]\n"
- "add x9, x13, x20, LSL #2\n"
- "add x27, x9, x20, LSL #2\n"
+ "mov x12, %x[input_ptr]\n"
+ "add x28, x12, x19, LSL #2\n"
+ "add x26, x28, x19, LSL #2\n"
"83:" // Height 3: input setup done
- "cmp x14, #0x4\n"
+ "cmp x13, #0x4\n"
"blt 86f\n"
- "ldr q0, [x13, #0x0]\n"
- "cmp x14, #0x8\n"
- "ldr q1, [x9, #0x0]\n"
- "ldr q2, [x27, #0x0]\n"
+ "ldr q0, [x12, #0x0]\n"
+ "ldr q1, [x28, #0x0]\n"
+ "cmp x13, #0x8\n"
+ "ldr q2, [x26, #0x0]\n"
"ldr q6, [x17, #0x0]\n"
- "ldr q7, [x17, #0x10]\n"
"blt 85f\n"
"84:" // Height 3: Multiply loop: Main loop head
"fmla v8.4s, v6.4s, v0.s[0]\n"
- "ldr x12, [x17, #0x28]\n"
+ "ldr d7, [x17, #0x10]\n"
"fmla v12.4s, v6.4s, v1.s[0]\n"
- "ldr x11, [x17, #0x38]\n"
+ "ldr x11, [x17, #0x18]\n"
"fmla v16.4s, v6.4s, v2.s[0]\n"
"ldr d6, [x17, #0x20]\n"
+ "ldr x10, [x17, #0x28]\n"
+ "add x12, x12, #0x10\n"
+ "mov v7.d[1], x11\n"
+ "prfm pldl1keep, [x12, #0x80]\n"
+ "ldr x11, [x17, #0x38]\n"
+ "add x28, x28, #0x10\n"
"fmla v9.4s, v7.4s, v0.s[0]\n"
- "mov v6.d[1], x12\n"
+ "mov v6.d[1], x10\n"
"fmla v13.4s, v7.4s, v1.s[0]\n"
- "ldr x12, [x17, #0x48]\n"
+ "prfm pldl1keep, [x28, #0x80]\n"
"fmla v17.4s, v7.4s, v2.s[0]\n"
"ldr d7, [x17, #0x30]\n"
- "mov v7.d[1], x11\n"
"fmla v10.4s, v6.4s, v0.s[0]\n"
+ "ldr x10, [x17, #0x48]\n"
"fmla v14.4s, v6.4s, v1.s[0]\n"
- "ldr x11, [x17, #0x58]\n"
+ "ldr x9, [x12, #0x8]\n"
"fmla v18.4s, v6.4s, v2.s[0]\n"
+ "mov v7.d[1], x11\n"
"ldr d6, [x17, #0x40]\n"
+ "add x26, x26, #0x10\n"
"fmla v11.4s, v7.4s, v0.s[0]\n"
- "mov v6.d[1], x12\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
"fmla v15.4s, v7.4s, v1.s[0]\n"
- "ldr x12, [x17, #0x68]\n"
+ "ldr x11, [x17, #0x58]\n"
"fmla v19.4s, v7.4s, v2.s[0]\n"
+ "mov v6.d[1], x10\n"
"ldr d7, [x17, #0x50]\n"
- "mov v7.d[1], x11\n"
+ "sub x13, x13, #0x4\n"
"fmla v8.4s, v6.4s, v0.s[1]\n"
+ "ldr x10, [x17, #0x68]\n"
"fmla v12.4s, v6.4s, v1.s[1]\n"
- "ldr x11, [x17, #0x78]\n"
+ "ldr x27, [x28, #0x8]\n"
"fmla v16.4s, v6.4s, v2.s[1]\n"
+ "mov v7.d[1], x11\n"
"ldr d6, [x17, #0x60]\n"
+ "cmp x13, #0x8\n"
"fmla v9.4s, v7.4s, v0.s[1]\n"
- "mov v6.d[1], x12\n"
+ "ldr x11, [x17, #0x78]\n"
"fmla v13.4s, v7.4s, v1.s[1]\n"
- "ldr x12, [x17, #0x88]\n"
+ "ldr x25, [x26, #0x8]\n"
"fmla v17.4s, v7.4s, v2.s[1]\n"
+ "mov v6.d[1], x10\n"
"ldr d7, [x17, #0x70]\n"
- "mov v7.d[1], x11\n"
"fmla v10.4s, v6.4s, v0.s[1]\n"
+ "ldr x10, [x17, #0x88]\n"
"fmla v14.4s, v6.4s, v1.s[1]\n"
- "ldr x11, [x17, #0x98]\n"
"fmla v18.4s, v6.4s, v2.s[1]\n"
+ "mov v7.d[1], x11\n"
"ldr d6, [x17, #0x80]\n"
"fmla v11.4s, v7.4s, v0.s[1]\n"
- "mov v6.d[1], x12\n"
+ "ldr x11, [x17, #0x98]\n"
"fmla v15.4s, v7.4s, v1.s[1]\n"
- "ldr x12, [x17, #0xa8]\n"
"fmla v19.4s, v7.4s, v2.s[1]\n"
+ "mov v6.d[1], x10\n"
"ldr d7, [x17, #0x90]\n"
- "mov v7.d[1], x11\n"
"fmla v8.4s, v6.4s, v0.s[2]\n"
+ "ldr x10, [x17, #0xa8]\n"
"fmla v12.4s, v6.4s, v1.s[2]\n"
- "ldr x11, [x17, #0xb8]\n"
"fmla v16.4s, v6.4s, v2.s[2]\n"
+ "mov v7.d[1], x11\n"
"ldr d6, [x17, #0xa0]\n"
"fmla v9.4s, v7.4s, v0.s[2]\n"
- "mov v6.d[1], x12\n"
+ "ldr x11, [x17, #0xb8]\n"
"fmla v13.4s, v7.4s, v1.s[2]\n"
- "ldr x12, [x17, #0xc8]\n"
"fmla v17.4s, v7.4s, v2.s[2]\n"
+ "mov v6.d[1], x10\n"
"ldr d7, [x17, #0xb0]\n"
- "mov v7.d[1], x11\n"
"fmla v10.4s, v6.4s, v0.s[2]\n"
+ "ldr x10, [x17, #0xc8]\n"
"fmla v14.4s, v6.4s, v1.s[2]\n"
- "ldr x11, [x17, #0xd8]\n"
"fmla v18.4s, v6.4s, v2.s[2]\n"
+ "mov v7.d[1], x11\n"
"ldr d6, [x17, #0xc0]\n"
"fmla v11.4s, v7.4s, v0.s[2]\n"
- "mov v6.d[1], x12\n"
+ "ldr x11, [x17, #0xd8]\n"
"fmla v15.4s, v7.4s, v1.s[2]\n"
- "ldr x12, [x17, #0xe8]\n"
"fmla v19.4s, v7.4s, v2.s[2]\n"
+ "mov v6.d[1], x10\n"
"ldr d7, [x17, #0xd0]\n"
- "mov v7.d[1], x11\n"
"fmla v8.4s, v6.4s, v0.s[3]\n"
+ "ldr x10, [x17, #0xe8]\n"
"fmla v12.4s, v6.4s, v1.s[3]\n"
- "ldr x11, [x17, #0xf8]\n"
"fmla v16.4s, v6.4s, v2.s[3]\n"
+ "mov v7.d[1], x11\n"
"ldr d6, [x17, #0xe0]\n"
"fmla v9.4s, v7.4s, v0.s[3]\n"
- "mov v6.d[1], x12\n"
+ "ldr x11, [x17, #0xf8]\n"
"fmla v13.4s, v7.4s, v1.s[3]\n"
- "add x13, x13, #0x10\n"
"fmla v17.4s, v7.4s, v2.s[3]\n"
+ "mov v6.d[1], x10\n"
"ldr d7, [x17, #0xf0]\n"
- "mov v7.d[1], x11\n"
- "add x9, x9, #0x10\n"
- "add x27, x27, #0x10\n"
"add x17, x17, #0x100\n"
"fmla v10.4s, v6.4s, v0.s[3]\n"
- "ldr x12, [x17, #0x8]\n"
+ "ldr x10, [x17, #0x8]\n"
"fmla v14.4s, v6.4s, v1.s[3]\n"
- "ldr x10, [x13, #0x8]\n"
"fmla v18.4s, v6.4s, v2.s[3]\n"
+ "mov v7.d[1], x11\n"
"ldr d6, [x17, #0x0]\n"
"fmla v11.4s, v7.4s, v0.s[3]\n"
- "ldr d0, [x13, #0x0]\n"
+ "ldr d0, [x12, #0x0]\n"
"fmla v15.4s, v7.4s, v1.s[3]\n"
- "ldr d1, [x9, #0x0]\n"
- "ldr x28, [x9, #0x8]\n"
+ "ldr d1, [x28, #0x0]\n"
"fmla v19.4s, v7.4s, v2.s[3]\n"
- "ldr d2, [x27, #0x0]\n"
- "sub x14, x14, #0x4\n"
- "ldr d7, [x17, #0x10]\n"
- "cmp x14, #0x8\n"
- "ldr x26, [x27, #0x8]\n"
- "mov v6.d[1], x12\n"
- "ldr x11, [x17, #0x18]\n"
- "mov v0.d[1], x10\n"
- "prfm pldl1keep, [x13, #0x80]\n"
- "mov v1.d[1], x28\n"
- "prfm pldl1keep, [x9, #0x80]\n"
- "mov v2.d[1], x26\n"
- "prfm pldl1keep, [x27, #0x80]\n"
- "mov v7.d[1], x11\n"
+ "mov v6.d[1], x10\n"
+ "mov v0.d[1], x9\n"
+ "ldr d2, [x26, #0x0]\n"
+ "mov v1.d[1], x27\n"
+ "mov v2.d[1], x25\n"
"bge 84b\n"
"85:" // Height 3: Multiply loop: Single iteration only
"fmla v8.4s, v6.4s, v0.s[0]\n"
- "add x13, x13, #0x10\n"
+ "ldr q7, [x17, #0x10]\n"
"fmla v12.4s, v6.4s, v1.s[0]\n"
- "add x9, x9, #0x10\n"
+ "sub x13, x13, #0x4\n"
"fmla v16.4s, v6.4s, v2.s[0]\n"
"ldr q6, [x17, #0x20]\n"
"fmla v9.4s, v7.4s, v0.s[0]\n"
- "add x27, x27, #0x10\n"
+ "add x12, x12, #0x10\n"
"fmla v13.4s, v7.4s, v1.s[0]\n"
- "sub x14, x14, #0x4\n"
+ "prfm pldl1keep, [x12, #0x80]\n"
"fmla v17.4s, v7.4s, v2.s[0]\n"
"ldr q7, [x17, #0x30]\n"
"fmla v10.4s, v6.4s, v0.s[0]\n"
- "prfm pldl1keep, [x13, #0x80]\n"
+ "add x28, x28, #0x10\n"
"fmla v14.4s, v6.4s, v1.s[0]\n"
- "prfm pldl1keep, [x9, #0x80]\n"
+ "prfm pldl1keep, [x28, #0x80]\n"
"fmla v18.4s, v6.4s, v2.s[0]\n"
"ldr q6, [x17, #0x40]\n"
"fmla v11.4s, v7.4s, v0.s[0]\n"
- "prfm pldl1keep, [x27, #0x80]\n"
+ "add x26, x26, #0x10\n"
"fmla v15.4s, v7.4s, v1.s[0]\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
"fmla v19.4s, v7.4s, v2.s[0]\n"
"ldr q7, [x17, #0x50]\n"
"fmla v8.4s, v6.4s, v0.s[1]\n"
@@ -1190,15 +1190,15 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"fmla v15.4s, v7.4s, v1.s[3]\n"
"fmla v19.4s, v7.4s, v2.s[3]\n"
"86:" // Height 3: Multiply loop: Main loop skip
- "cbz x14, 88f\n"
+ "cbz x13, 88f\n"
"87:" // Height 3: Multiply loop: Odd block loop
- "ldr s0, [x13], #0x4\n"
- "sub x14, x14, #0x1\n"
- "ldr s1, [x9], #0x4\n"
- "ldr s2, [x27], #0x4\n"
+ "ldr s0, [x12], #0x4\n"
+ "sub x13, x13, #0x1\n"
+ "ldr s1, [x28], #0x4\n"
+ "ldr s2, [x26], #0x4\n"
"ldr q6, [x17, #0x0]\n"
- "fmla v8.4s, v6.4s, v0.s[0]\n"
"ldr q7, [x17, #0x10]\n"
+ "fmla v8.4s, v6.4s, v0.s[0]\n"
"fmla v12.4s, v6.4s, v1.s[0]\n"
"fmla v16.4s, v6.4s, v2.s[0]\n"
"ldr q6, [x17, #0x20]\n"
@@ -1213,21 +1213,23 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"fmla v11.4s, v7.4s, v0.s[0]\n"
"fmla v15.4s, v7.4s, v1.s[0]\n"
"fmla v19.4s, v7.4s, v2.s[0]\n"
- "cbnz x14, 87b\n"
+ "cbnz x13, 87b\n"
"88:" // Height 3: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x15, x15, #0x1\n"
- "cmp x15, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x14, x14, #0x1\n"
+ "cmp x14, x19\n"
"bne 81b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x16, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "prfm pstl1keep, [x16, #0x0]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "prfm pstl1keep, [x15, #0x0]\n"
+ "add x25, x15, x19, LSL #2\n"
"prfm pstl1keep, [x25, #0x0]\n"
+ "add x24, x25, x19, LSL #2\n"
"prfm pstl1keep, [x24, #0x0]\n"
"tbz %x[flags], #1, 89f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v0.4s }, [x20]\n"
+ "add x20, %x[args_ptr], %[offset_min]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v1.4s }, [x20]\n"
+ "ld1r { v0.4s }, [x19]\n"
"fmin v8.4s, v8.4s, v0.4s\n"
"fmin v9.4s, v9.4s, v0.4s\n"
"fmin v10.4s, v10.4s, v0.4s\n"
@@ -1238,109 +1240,107 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"fmin v15.4s, v15.4s, v0.4s\n"
"fmin v16.4s, v16.4s, v0.4s\n"
"fmin v17.4s, v17.4s, v0.4s\n"
+ "fmax v8.4s, v8.4s, v1.4s\n"
+ "fmax v9.4s, v9.4s, v1.4s\n"
+ "fmax v10.4s, v10.4s, v1.4s\n"
+ "fmax v11.4s, v11.4s, v1.4s\n"
+ "fmax v12.4s, v12.4s, v1.4s\n"
+ "fmax v13.4s, v13.4s, v1.4s\n"
+ "fmax v14.4s, v14.4s, v1.4s\n"
+ "fmax v15.4s, v15.4s, v1.4s\n"
+ "fmax v16.4s, v16.4s, v1.4s\n"
+ "fmax v17.4s, v17.4s, v1.4s\n"
"fmin v18.4s, v18.4s, v0.4s\n"
"fmin v19.4s, v19.4s, v0.4s\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1r { v0.4s }, [x20]\n"
- "fmax v8.4s, v8.4s, v0.4s\n"
- "fmax v9.4s, v9.4s, v0.4s\n"
- "fmax v10.4s, v10.4s, v0.4s\n"
- "fmax v11.4s, v11.4s, v0.4s\n"
- "fmax v12.4s, v12.4s, v0.4s\n"
- "fmax v13.4s, v13.4s, v0.4s\n"
- "fmax v14.4s, v14.4s, v0.4s\n"
- "fmax v15.4s, v15.4s, v0.4s\n"
- "fmax v16.4s, v16.4s, v0.4s\n"
- "fmax v17.4s, v17.4s, v0.4s\n"
- "fmax v18.4s, v18.4s, v0.4s\n"
- "fmax v19.4s, v19.4s, v0.4s\n"
+ "fmax v18.4s, v18.4s, v1.4s\n"
+ "fmax v19.4s, v19.4s, v1.4s\n"
"89:" // Height 3: No activation
"cmp x8, #0x10\n"
"bge 98f\n"
"tbz x8, #3, 93f\n"
- "st1 { v8.4s }, [x16], #0x10\n"
- "st1 { v9.4s }, [x16], #0x10\n"
+ "st1 { v8.4s }, [x15], #0x10\n"
+ "st1 { v9.4s }, [x15], #0x10\n"
"st1 { v12.4s }, [x25], #0x10\n"
"st1 { v13.4s }, [x25], #0x10\n"
"st1 { v16.4s }, [x24], #0x10\n"
"st1 { v17.4s }, [x24], #0x10\n"
"tbz x8, #2, 91f\n"
- "st1 { v10.4s }, [x16], #0x10\n"
+ "st1 { v10.4s }, [x15], #0x10\n"
"st1 { v14.4s }, [x25], #0x10\n"
"st1 { v18.4s }, [x24], #0x10\n"
"tbz x8, #1, 90f\n"
- "str d11, [x16], #0x8\n"
+ "str d11, [x15], #0x8\n"
"str d15, [x25], #0x8\n"
"str d19, [x24], #0x8\n"
"tbz x8, #0, 97f\n"
- "st1 { v11.s }[2], [x16]\n"
+ "st1 { v11.s }[2], [x15]\n"
"st1 { v15.s }[2], [x25]\n"
"st1 { v19.s }[2], [x24]\n"
"b 97f\n"
"90:" // Height 3: Partial direct writeback: partial_1_12
"tbz x8, #0, 97f\n"
- "str s11, [x16, #0x0]\n"
+ "str s11, [x15, #0x0]\n"
"str s15, [x25, #0x0]\n"
"str s19, [x24, #0x0]\n"
"b 97f\n"
"91:" // Height 3: Partial direct writeback: partial_2_8
"tbz x8, #1, 92f\n"
- "str d10, [x16], #0x8\n"
+ "str d10, [x15], #0x8\n"
"str d14, [x25], #0x8\n"
"str d18, [x24], #0x8\n"
"tbz x8, #0, 97f\n"
- "st1 { v10.s }[2], [x16]\n"
+ "st1 { v10.s }[2], [x15]\n"
"st1 { v14.s }[2], [x25]\n"
"st1 { v18.s }[2], [x24]\n"
"b 97f\n"
"92:" // Height 3: Partial direct writeback: partial_1_8
"tbz x8, #0, 97f\n"
- "str s10, [x16, #0x0]\n"
+ "str s10, [x15, #0x0]\n"
"str s14, [x25, #0x0]\n"
"str s18, [x24, #0x0]\n"
"b 97f\n"
"93:" // Height 3: Partial direct writeback: partial_4_0
"tbz x8, #2, 95f\n"
- "st1 { v8.4s }, [x16], #0x10\n"
+ "st1 { v8.4s }, [x15], #0x10\n"
"st1 { v12.4s }, [x25], #0x10\n"
"st1 { v16.4s }, [x24], #0x10\n"
"tbz x8, #1, 94f\n"
- "str d9, [x16], #0x8\n"
+ "str d9, [x15], #0x8\n"
"str d13, [x25], #0x8\n"
"str d17, [x24], #0x8\n"
"tbz x8, #0, 97f\n"
- "st1 { v9.s }[2], [x16]\n"
+ "st1 { v9.s }[2], [x15]\n"
"st1 { v13.s }[2], [x25]\n"
"st1 { v17.s }[2], [x24]\n"
"b 97f\n"
"94:" // Height 3: Partial direct writeback: partial_1_4
"tbz x8, #0, 97f\n"
- "str s9, [x16, #0x0]\n"
+ "str s9, [x15, #0x0]\n"
"str s13, [x25, #0x0]\n"
"str s17, [x24, #0x0]\n"
"b 97f\n"
"95:" // Height 3: Partial direct writeback: partial_2_0
"tbz x8, #1, 96f\n"
- "str d8, [x16], #0x8\n"
+ "str d8, [x15], #0x8\n"
"str d12, [x25], #0x8\n"
"str d16, [x24], #0x8\n"
"tbz x8, #0, 97f\n"
- "st1 { v8.s }[2], [x16]\n"
+ "st1 { v8.s }[2], [x15]\n"
"st1 { v12.s }[2], [x25]\n"
"st1 { v16.s }[2], [x24]\n"
"b 97f\n"
"96:" // Height 3: Partial direct writeback: partial_1_0
- "str s8, [x16, #0x0]\n"
+ "str s8, [x15, #0x0]\n"
"str s12, [x25, #0x0]\n"
"str s16, [x24, #0x0]\n"
"97:" // Height 3: Partial direct writeback: Done
"b 99f\n"
"98:" // Height 3: Full writeback
- "str q8, [x16, #0x0]\n"
- "str q9, [x16, #0x10]\n"
- "str q10, [x16, #0x20]\n"
- "str q11, [x16, #0x30]\n"
- "add x16, x16, #0x40\n"
+ "str q8, [x15, #0x0]\n"
+ "str q9, [x15, #0x10]\n"
+ "str q10, [x15, #0x20]\n"
+ "str q11, [x15, #0x30]\n"
+ "add x15, x15, #0x40\n"
"str q12, [x25, #0x0]\n"
"str q13, [x25, #0x10]\n"
"str q14, [x25, #0x20]\n"
@@ -1354,22 +1354,22 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"bgt 68b\n"
"b 200f\n"
"100:" // Height 4
- "mov x7, %x[bias]\n"
"ldr x8, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x16, %x[bias]\n"
"ldr x17, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x16, %x[output_ptr]\n"
+ "mov x15, %x[output_ptr]\n"
"101:" // Height 4: Column loop
- "cbz x7, 102f\n"
- "ldr q8, [x7, #0x0]\n"
+ "cbz x16, 102f\n"
+ "ldr q8, [x16, #0x0]\n"
+ "ldr q9, [x16, #0x10]\n"
+ "ldr q10, [x16, #0x20]\n"
"mov v12.16b, v8.16b\n"
- "ldr q9, [x7, #0x10]\n"
+ "ldr q11, [x16, #0x30]\n"
"mov v13.16b, v9.16b\n"
- "ldr q10, [x7, #0x20]\n"
+ "add x16, x16, #0x40\n"
"mov v14.16b, v10.16b\n"
- "ldr q11, [x7, #0x30]\n"
"mov v15.16b, v11.16b\n"
"mov v16.16b, v8.16b\n"
- "add x7, x7, #0x40\n"
"mov v17.16b, v9.16b\n"
"mov v18.16b, v10.16b\n"
"mov v19.16b, v11.16b\n"
@@ -1380,120 +1380,120 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"b 113f\n"
"102:" // Height 4: no bias
"tbz %x[flags], #0, 112f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x16, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"cmp x8, #0x10\n"
- "add x23, x24, x20, LSL #2\n"
+ "add x25, x15, x19, LSL #2\n"
+ "add x24, x25, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
"bge 111f\n"
"tbz x8, #3, 106f\n"
- "ld1 { v8.4s }, [x16], #0x10\n"
+ "ld1 { v8.4s }, [x15], #0x10\n"
"ld1 { v12.4s }, [x25], #0x10\n"
"ld1 { v16.4s }, [x24], #0x10\n"
- "ld1 { v20.4s }, [x23], #0x10\n"
- "ld1 { v9.4s }, [x16], #0x10\n"
+ "ld1 { v9.4s }, [x15], #0x10\n"
"ld1 { v13.4s }, [x25], #0x10\n"
"ld1 { v17.4s }, [x24], #0x10\n"
+ "ld1 { v20.4s }, [x23], #0x10\n"
"ld1 { v21.4s }, [x23], #0x10\n"
"tbz x8, #2, 104f\n"
- "ld1 { v10.4s }, [x16], #0x10\n"
+ "ld1 { v10.4s }, [x15], #0x10\n"
"ld1 { v14.4s }, [x25], #0x10\n"
"ld1 { v18.4s }, [x24], #0x10\n"
"ld1 { v22.4s }, [x23], #0x10\n"
"tbz x8, #1, 103f\n"
- "ldr d11, [x16], #0x8\n"
- "mov x20, #0x38\n"
+ "ldr d11, [x15], #0x8\n"
+ "mov x19, #0x38\n"
"ldr d15, [x25], #0x8\n"
"ldr d19, [x24], #0x8\n"
"ldr d23, [x23], #0x8\n"
"tbz x8, #0, 110f\n"
- "ld1 { v11.s }[2], [x16]\n"
+ "ld1 { v11.s }[2], [x15]\n"
"ld1 { v15.s }[2], [x25]\n"
"ld1 { v19.s }[2], [x24]\n"
"ld1 { v23.s }[2], [x23]\n"
"b 110f\n"
"103:" // Height 4: Partial accumulate: partial_1_12
- "mov x20, #0x30\n"
+ "mov x19, #0x30\n"
"tbz x8, #0, 110f\n"
- "ldr s11, [x16, #0x0]\n"
+ "ldr s11, [x15, #0x0]\n"
"ldr s15, [x25, #0x0]\n"
"ldr s19, [x24, #0x0]\n"
"ldr s23, [x23, #0x0]\n"
"b 110f\n"
"104:" // Height 4: Partial accumulate: partial_2_8
"tbz x8, #1, 105f\n"
- "ldr d10, [x16], #0x8\n"
- "mov x20, #0x28\n"
+ "ldr d10, [x15], #0x8\n"
"ldr d14, [x25], #0x8\n"
+ "mov x19, #0x28\n"
"ldr d18, [x24], #0x8\n"
"ldr d22, [x23], #0x8\n"
"tbz x8, #0, 110f\n"
- "ld1 { v10.s }[2], [x16]\n"
+ "ld1 { v10.s }[2], [x15]\n"
"ld1 { v14.s }[2], [x25]\n"
"ld1 { v18.s }[2], [x24]\n"
"ld1 { v22.s }[2], [x23]\n"
"b 110f\n"
"105:" // Height 4: Partial accumulate: partial_1_8
- "mov x20, #0x20\n"
+ "mov x19, #0x20\n"
"tbz x8, #0, 110f\n"
- "ldr s10, [x16, #0x0]\n"
+ "ldr s10, [x15, #0x0]\n"
"ldr s14, [x25, #0x0]\n"
"ldr s18, [x24, #0x0]\n"
"ldr s22, [x23, #0x0]\n"
"b 110f\n"
"106:" // Height 4: Partial accumulate: partial_4_0
"tbz x8, #2, 108f\n"
- "ld1 { v8.4s }, [x16], #0x10\n"
+ "ld1 { v8.4s }, [x15], #0x10\n"
"ld1 { v12.4s }, [x25], #0x10\n"
"ld1 { v16.4s }, [x24], #0x10\n"
"ld1 { v20.4s }, [x23], #0x10\n"
"tbz x8, #1, 107f\n"
- "ldr d9, [x16], #0x8\n"
- "mov x20, #0x18\n"
+ "ldr d9, [x15], #0x8\n"
+ "mov x19, #0x18\n"
"ldr d13, [x25], #0x8\n"
"ldr d17, [x24], #0x8\n"
"ldr d21, [x23], #0x8\n"
"tbz x8, #0, 110f\n"
- "ld1 { v9.s }[2], [x16]\n"
+ "ld1 { v9.s }[2], [x15]\n"
"ld1 { v13.s }[2], [x25]\n"
"ld1 { v17.s }[2], [x24]\n"
"ld1 { v21.s }[2], [x23]\n"
"b 110f\n"
"107:" // Height 4: Partial accumulate: partial_1_4
- "mov x20, #0x10\n"
+ "mov x19, #0x10\n"
"tbz x8, #0, 110f\n"
- "ldr s9, [x16, #0x0]\n"
+ "ldr s9, [x15, #0x0]\n"
"ldr s13, [x25, #0x0]\n"
"ldr s17, [x24, #0x0]\n"
"ldr s21, [x23, #0x0]\n"
"b 110f\n"
"108:" // Height 4: Partial accumulate: partial_2_0
"tbz x8, #1, 109f\n"
- "ldr d8, [x16], #0x8\n"
- "mov x20, #0x8\n"
+ "ldr d8, [x15], #0x8\n"
"ldr d12, [x25], #0x8\n"
+ "mov x19, #0x8\n"
"ldr d16, [x24], #0x8\n"
"ldr d20, [x23], #0x8\n"
"tbz x8, #0, 110f\n"
- "ld1 { v8.s }[2], [x16]\n"
+ "ld1 { v8.s }[2], [x15]\n"
"ld1 { v12.s }[2], [x25]\n"
"ld1 { v16.s }[2], [x24]\n"
"ld1 { v20.s }[2], [x23]\n"
"b 110f\n"
"109:" // Height 4: Partial accumulate: partial_1_0
- "ldr s8, [x16, #0x0]\n"
- "mov x20, #0x0\n"
+ "ldr s8, [x15, #0x0]\n"
+ "mov x19, #0x0\n"
"ldr s12, [x25, #0x0]\n"
"ldr s16, [x24, #0x0]\n"
"ldr s20, [x23, #0x0]\n"
"110:" // Height 4: Partial accumulate: Done
- "sub x16, x16, x20\n"
+ "sub x15, x15, x19\n"
"b 113f\n"
"111:" // Height 4: full accumulate
- "ldr q8, [x16, #0x0]\n"
- "ldr q9, [x16, #0x10]\n"
- "ldr q10, [x16, #0x20]\n"
- "ldr q11, [x16, #0x30]\n"
+ "ldr q8, [x15, #0x0]\n"
+ "ldr q9, [x15, #0x10]\n"
+ "ldr q10, [x15, #0x20]\n"
+ "ldr q11, [x15, #0x30]\n"
"ldr q12, [x25, #0x0]\n"
"ldr q13, [x25, #0x10]\n"
"ldr q14, [x25, #0x20]\n"
@@ -1525,204 +1525,204 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"movi v22.16b, #0x0\n"
"movi v23.16b, #0x0\n"
"113:" // Height 4: setup done
- "mov x15, #0x0\n"
+ "mov x14, #0x0\n"
"114:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w14, [x20, x15, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w13, [x20, x14, LSL #0x2]\n"
"tbz %x[flags], #3, 115f\n"
- "ldr x21, [%x[input_ptr], x15, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x13, [x21, #0x0]\n"
- "ldr x9, [x21, #0x8]\n"
- "ldr x27, [x21, #0x10]\n"
- "ldr x25, [x21, #0x18]\n"
- "cbnz x15, 116f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x13, x13, x20, LSL #2\n"
- "add x9, x9, x20, LSL #2\n"
- "add x27, x27, x20, LSL #2\n"
- "add x25, x25, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x14, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x12, [x20, #0x0]\n"
+ "ldr x28, [x20, #0x8]\n"
+ "ldr x26, [x20, #0x10]\n"
+ "ldr x24, [x20, #0x18]\n"
+ "cbnz x14, 116f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x12, x12, x19, LSL #2\n"
+ "add x28, x28, x19, LSL #2\n"
+ "add x26, x26, x19, LSL #2\n"
+ "add x24, x24, x19, LSL #2\n"
"b 116f\n"
"115:" // Height 4: setup direct input
- "mov x13, %x[input_ptr]\n"
- "add x9, x13, x20, LSL #2\n"
- "add x27, x9, x20, LSL #2\n"
- "add x25, x27, x20, LSL #2\n"
+ "mov x12, %x[input_ptr]\n"
+ "add x28, x12, x19, LSL #2\n"
+ "add x26, x28, x19, LSL #2\n"
+ "add x24, x26, x19, LSL #2\n"
"116:" // Height 4: input setup done
- "cmp x14, #0x4\n"
+ "cmp x13, #0x4\n"
"blt 119f\n"
- "ldr q0, [x13, #0x0]\n"
- "cmp x14, #0x8\n"
- "ldr q1, [x9, #0x0]\n"
- "ldr q2, [x27, #0x0]\n"
- "ldr q3, [x25, #0x0]\n"
+ "ldr q0, [x12, #0x0]\n"
+ "ldr q1, [x28, #0x0]\n"
+ "cmp x13, #0x8\n"
+ "ldr q2, [x26, #0x0]\n"
+ "ldr q3, [x24, #0x0]\n"
"ldr q6, [x17, #0x0]\n"
- "ldr q7, [x17, #0x10]\n"
"blt 118f\n"
"117:" // Height 4: Multiply loop: Main loop head
"fmla v8.4s, v6.4s, v0.s[0]\n"
- "ldr x12, [x17, #0x28]\n"
+ "ldr d7, [x17, #0x10]\n"
"fmla v12.4s, v6.4s, v1.s[0]\n"
- "ldr x11, [x17, #0x38]\n"
+ "ldr x11, [x17, #0x18]\n"
"fmla v16.4s, v6.4s, v2.s[0]\n"
- "add x13, x13, #0x10\n"
+ "ldr x10, [x17, #0x28]\n"
"fmla v20.4s, v6.4s, v3.s[0]\n"
"ldr d6, [x17, #0x20]\n"
+ "mov v7.d[1], x11\n"
+ "ldr x11, [x17, #0x38]\n"
+ "add x12, x12, #0x10\n"
+ "add x28, x28, #0x10\n"
"fmla v9.4s, v7.4s, v0.s[0]\n"
- "mov v6.d[1], x12\n"
+ "mov v6.d[1], x10\n"
"fmla v13.4s, v7.4s, v1.s[0]\n"
- "ldr x12, [x17, #0x48]\n"
+ "prfm pldl1keep, [x12, #0x80]\n"
"fmla v17.4s, v7.4s, v2.s[0]\n"
- "add x9, x9, #0x10\n"
+ "prfm pldl1keep, [x28, #0x80]\n"
"fmla v21.4s, v7.4s, v3.s[0]\n"
"ldr d7, [x17, #0x30]\n"
- "mov v7.d[1], x11\n"
"fmla v10.4s, v6.4s, v0.s[0]\n"
+ "ldr x10, [x17, #0x48]\n"
"fmla v14.4s, v6.4s, v1.s[0]\n"
- "ldr x11, [x17, #0x58]\n"
+ "ldr x9, [x12, #0x8]\n"
"fmla v18.4s, v6.4s, v2.s[0]\n"
- "add x27, x27, #0x10\n"
+ "mov v7.d[1], x11\n"
"fmla v22.4s, v6.4s, v3.s[0]\n"
"ldr d6, [x17, #0x40]\n"
"fmla v11.4s, v7.4s, v0.s[0]\n"
- "mov v6.d[1], x12\n"
+ "ldr x11, [x17, #0x58]\n"
"fmla v15.4s, v7.4s, v1.s[0]\n"
- "ldr x12, [x17, #0x68]\n"
+ "ldr x27, [x28, #0x8]\n"
"fmla v19.4s, v7.4s, v2.s[0]\n"
- "add x25, x25, #0x10\n"
+ "mov v6.d[1], x10\n"
"fmla v23.4s, v7.4s, v3.s[0]\n"
"ldr d7, [x17, #0x50]\n"
- "mov v7.d[1], x11\n"
"fmla v8.4s, v6.4s, v0.s[1]\n"
+ "ldr x10, [x17, #0x68]\n"
"fmla v12.4s, v6.4s, v1.s[1]\n"
- "ldr x11, [x17, #0x78]\n"
+ "add x26, x26, #0x10\n"
"fmla v16.4s, v6.4s, v2.s[1]\n"
- "ldr x10, [x13, #0x8]\n"
+ "mov v7.d[1], x11\n"
"fmla v20.4s, v6.4s, v3.s[1]\n"
- "ldr d6, [x17, #0x60]\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
"fmla v9.4s, v7.4s, v0.s[1]\n"
- "mov v6.d[1], x12\n"
+ "ldr d6, [x17, #0x60]\n"
"fmla v13.4s, v7.4s, v1.s[1]\n"
- "ldr x12, [x17, #0x88]\n"
+ "ldr x11, [x17, #0x78]\n"
"fmla v17.4s, v7.4s, v2.s[1]\n"
- "ldr x28, [x9, #0x8]\n"
+ "ldr x25, [x26, #0x8]\n"
"fmla v21.4s, v7.4s, v3.s[1]\n"
+ "mov v6.d[1], x10\n"
"ldr d7, [x17, #0x70]\n"
- "mov v7.d[1], x11\n"
+ "add x24, x24, #0x10\n"
"fmla v10.4s, v6.4s, v0.s[1]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
"fmla v14.4s, v6.4s, v1.s[1]\n"
- "ldr x11, [x17, #0x98]\n"
+ "ldr x10, [x17, #0x88]\n"
"fmla v18.4s, v6.4s, v2.s[1]\n"
- "ldr x26, [x27, #0x8]\n"
+ "mov v7.d[1], x11\n"
"fmla v22.4s, v6.4s, v3.s[1]\n"
"ldr d6, [x17, #0x80]\n"
"fmla v11.4s, v7.4s, v0.s[1]\n"
- "mov v6.d[1], x12\n"
+ "ldr x11, [x17, #0x98]\n"
"fmla v15.4s, v7.4s, v1.s[1]\n"
- "ldr x12, [x17, #0xa8]\n"
+ "ldr x23, [x24, #0x8]\n"
"fmla v19.4s, v7.4s, v2.s[1]\n"
- "ldr x24, [x25, #0x8]\n"
+ "mov v6.d[1], x10\n"
"fmla v23.4s, v7.4s, v3.s[1]\n"
"ldr d7, [x17, #0x90]\n"
- "mov v7.d[1], x11\n"
"fmla v8.4s, v6.4s, v0.s[2]\n"
+ "ldr x10, [x17, #0xa8]\n"
"fmla v12.4s, v6.4s, v1.s[2]\n"
- "ldr x11, [x17, #0xb8]\n"
+ "sub x13, x13, #0x4\n"
"fmla v16.4s, v6.4s, v2.s[2]\n"
- "sub x14, x14, #0x4\n"
+ "mov v7.d[1], x11\n"
"fmla v20.4s, v6.4s, v3.s[2]\n"
"ldr d6, [x17, #0xa0]\n"
"fmla v9.4s, v7.4s, v0.s[2]\n"
- "mov v6.d[1], x12\n"
+ "ldr x11, [x17, #0xb8]\n"
"fmla v13.4s, v7.4s, v1.s[2]\n"
- "ldr x12, [x17, #0xc8]\n"
+ "cmp x13, #0x8\n"
"fmla v17.4s, v7.4s, v2.s[2]\n"
- "cmp x14, #0x8\n"
+ "mov v6.d[1], x10\n"
"fmla v21.4s, v7.4s, v3.s[2]\n"
"ldr d7, [x17, #0xb0]\n"
- "mov v7.d[1], x11\n"
"fmla v10.4s, v6.4s, v0.s[2]\n"
+ "ldr x10, [x17, #0xc8]\n"
"fmla v14.4s, v6.4s, v1.s[2]\n"
- "ldr x11, [x17, #0xd8]\n"
"fmla v18.4s, v6.4s, v2.s[2]\n"
- "prfm pldl1keep, [x13, #0x80]\n"
+ "mov v7.d[1], x11\n"
"fmla v22.4s, v6.4s, v3.s[2]\n"
"ldr d6, [x17, #0xc0]\n"
"fmla v11.4s, v7.4s, v0.s[2]\n"
- "mov v6.d[1], x12\n"
+ "ldr x11, [x17, #0xd8]\n"
"fmla v15.4s, v7.4s, v1.s[2]\n"
- "ldr x12, [x17, #0xe8]\n"
"fmla v19.4s, v7.4s, v2.s[2]\n"
- "prfm pldl1keep, [x9, #0x80]\n"
+ "mov v6.d[1], x10\n"
"fmla v23.4s, v7.4s, v3.s[2]\n"
"ldr d7, [x17, #0xd0]\n"
- "mov v7.d[1], x11\n"
"fmla v8.4s, v6.4s, v0.s[3]\n"
+ "ldr x10, [x17, #0xe8]\n"
"fmla v12.4s, v6.4s, v1.s[3]\n"
- "ldr x11, [x17, #0xf8]\n"
"fmla v16.4s, v6.4s, v2.s[3]\n"
- "prfm pldl1keep, [x27, #0x80]\n"
+ "mov v7.d[1], x11\n"
"fmla v20.4s, v6.4s, v3.s[3]\n"
"ldr d6, [x17, #0xe0]\n"
"fmla v9.4s, v7.4s, v0.s[3]\n"
- "mov v6.d[1], x12\n"
+ "ldr x11, [x17, #0xf8]\n"
"fmla v13.4s, v7.4s, v1.s[3]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
"fmla v17.4s, v7.4s, v2.s[3]\n"
+ "mov v6.d[1], x10\n"
"fmla v21.4s, v7.4s, v3.s[3]\n"
"ldr d7, [x17, #0xf0]\n"
- "mov v7.d[1], x11\n"
- "add x17, x17, #0x100\n"
"fmla v10.4s, v6.4s, v0.s[3]\n"
- "ldr x12, [x17, #0x8]\n"
+ "add x17, x17, #0x100\n"
"fmla v14.4s, v6.4s, v1.s[3]\n"
- "ldr x11, [x17, #0x18]\n"
+ "ldr x10, [x17, #0x8]\n"
"fmla v18.4s, v6.4s, v2.s[3]\n"
+ "mov v7.d[1], x11\n"
"fmla v22.4s, v6.4s, v3.s[3]\n"
"ldr d6, [x17, #0x0]\n"
"fmla v11.4s, v7.4s, v0.s[3]\n"
- "ldr d0, [x13, #0x0]\n"
+ "ldr d0, [x12, #0x0]\n"
"fmla v15.4s, v7.4s, v1.s[3]\n"
- "ldr d1, [x9, #0x0]\n"
+ "ldr d1, [x28, #0x0]\n"
"fmla v19.4s, v7.4s, v2.s[3]\n"
- "ldr d2, [x27, #0x0]\n"
+ "mov v6.d[1], x10\n"
"fmla v23.4s, v7.4s, v3.s[3]\n"
- "ldr d3, [x25, #0x0]\n"
- "ldr d7, [x17, #0x10]\n"
- "mov v6.d[1], x12\n"
- "mov v0.d[1], x10\n"
- "mov v1.d[1], x28\n"
- "mov v2.d[1], x26\n"
- "mov v3.d[1], x24\n"
- "mov v7.d[1], x11\n"
+ "mov v0.d[1], x9\n"
+ "mov v1.d[1], x27\n"
+ "ldr d2, [x26, #0x0]\n"
+ "ldr d3, [x24, #0x0]\n"
+ "mov v2.d[1], x25\n"
+ "mov v3.d[1], x23\n"
"bge 117b\n"
"118:" // Height 4: Multiply loop: Single iteration only
"fmla v8.4s, v6.4s, v0.s[0]\n"
- "add x13, x13, #0x10\n"
+ "ldr q7, [x17, #0x10]\n"
"fmla v12.4s, v6.4s, v1.s[0]\n"
- "add x9, x9, #0x10\n"
+ "sub x13, x13, #0x4\n"
"fmla v16.4s, v6.4s, v2.s[0]\n"
- "add x27, x27, #0x10\n"
+ "add x12, x12, #0x10\n"
"fmla v20.4s, v6.4s, v3.s[0]\n"
- "ldr q6, [x17, #0x20]\n"
+ "prfm pldl1keep, [x12, #0x80]\n"
"fmla v9.4s, v7.4s, v0.s[0]\n"
- "add x25, x25, #0x10\n"
+ "ldr q6, [x17, #0x20]\n"
"fmla v13.4s, v7.4s, v1.s[0]\n"
- "sub x14, x14, #0x4\n"
+ "add x28, x28, #0x10\n"
"fmla v17.4s, v7.4s, v2.s[0]\n"
- "prfm pldl1keep, [x13, #0x80]\n"
+ "prfm pldl1keep, [x28, #0x80]\n"
"fmla v21.4s, v7.4s, v3.s[0]\n"
"ldr q7, [x17, #0x30]\n"
"fmla v10.4s, v6.4s, v0.s[0]\n"
- "prfm pldl1keep, [x9, #0x80]\n"
+ "add x26, x26, #0x10\n"
"fmla v14.4s, v6.4s, v1.s[0]\n"
- "prfm pldl1keep, [x27, #0x80]\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
"fmla v18.4s, v6.4s, v2.s[0]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
+ "add x24, x24, #0x10\n"
"fmla v22.4s, v6.4s, v3.s[0]\n"
- "ldr q6, [x17, #0x40]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
"fmla v11.4s, v7.4s, v0.s[0]\n"
+ "ldr q6, [x17, #0x40]\n"
"fmla v15.4s, v7.4s, v1.s[0]\n"
"fmla v19.4s, v7.4s, v2.s[0]\n"
"fmla v23.4s, v7.4s, v3.s[0]\n"
@@ -1787,16 +1787,16 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"fmla v19.4s, v7.4s, v2.s[3]\n"
"fmla v23.4s, v7.4s, v3.s[3]\n"
"119:" // Height 4: Multiply loop: Main loop skip
- "cbz x14, 121f\n"
+ "cbz x13, 121f\n"
"120:" // Height 4: Multiply loop: Odd block loop
- "ldr s0, [x13], #0x4\n"
- "sub x14, x14, #0x1\n"
- "ldr s1, [x9], #0x4\n"
- "ldr s2, [x27], #0x4\n"
- "ldr s3, [x25], #0x4\n"
+ "ldr s0, [x12], #0x4\n"
+ "sub x13, x13, #0x1\n"
+ "ldr s1, [x28], #0x4\n"
+ "ldr s2, [x26], #0x4\n"
+ "ldr s3, [x24], #0x4\n"
"ldr q6, [x17, #0x0]\n"
- "fmla v8.4s, v6.4s, v0.s[0]\n"
"ldr q7, [x17, #0x10]\n"
+ "fmla v8.4s, v6.4s, v0.s[0]\n"
"fmla v12.4s, v6.4s, v1.s[0]\n"
"fmla v16.4s, v6.4s, v2.s[0]\n"
"fmla v20.4s, v6.4s, v3.s[0]\n"
@@ -1815,23 +1815,25 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"fmla v15.4s, v7.4s, v1.s[0]\n"
"fmla v19.4s, v7.4s, v2.s[0]\n"
"fmla v23.4s, v7.4s, v3.s[0]\n"
- "cbnz x14, 120b\n"
+ "cbnz x13, 120b\n"
"121:" // Height 4: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x15, x15, #0x1\n"
- "cmp x15, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x14, x14, #0x1\n"
+ "cmp x14, x19\n"
"bne 114b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x16, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "prfm pstl1keep, [x16, #0x0]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "prfm pstl1keep, [x15, #0x0]\n"
+ "add x25, x15, x19, LSL #2\n"
"prfm pstl1keep, [x25, #0x0]\n"
+ "add x24, x25, x19, LSL #2\n"
"prfm pstl1keep, [x24, #0x0]\n"
+ "add x23, x24, x19, LSL #2\n"
"prfm pstl1keep, [x23, #0x0]\n"
"tbz %x[flags], #1, 122f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v0.4s }, [x20]\n"
+ "add x20, %x[args_ptr], %[offset_min]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v1.4s }, [x20]\n"
+ "ld1r { v0.4s }, [x19]\n"
"fmin v8.4s, v8.4s, v0.4s\n"
"fmin v9.4s, v9.4s, v0.4s\n"
"fmin v10.4s, v10.4s, v0.4s\n"
@@ -1842,36 +1844,34 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"fmin v15.4s, v15.4s, v0.4s\n"
"fmin v16.4s, v16.4s, v0.4s\n"
"fmin v17.4s, v17.4s, v0.4s\n"
+ "fmax v8.4s, v8.4s, v1.4s\n"
+ "fmax v9.4s, v9.4s, v1.4s\n"
+ "fmax v10.4s, v10.4s, v1.4s\n"
+ "fmax v11.4s, v11.4s, v1.4s\n"
+ "fmax v12.4s, v12.4s, v1.4s\n"
+ "fmax v13.4s, v13.4s, v1.4s\n"
+ "fmax v14.4s, v14.4s, v1.4s\n"
+ "fmax v15.4s, v15.4s, v1.4s\n"
+ "fmax v16.4s, v16.4s, v1.4s\n"
+ "fmax v17.4s, v17.4s, v1.4s\n"
"fmin v18.4s, v18.4s, v0.4s\n"
"fmin v19.4s, v19.4s, v0.4s\n"
"fmin v20.4s, v20.4s, v0.4s\n"
"fmin v21.4s, v21.4s, v0.4s\n"
"fmin v22.4s, v22.4s, v0.4s\n"
"fmin v23.4s, v23.4s, v0.4s\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1r { v0.4s }, [x20]\n"
- "fmax v8.4s, v8.4s, v0.4s\n"
- "fmax v9.4s, v9.4s, v0.4s\n"
- "fmax v10.4s, v10.4s, v0.4s\n"
- "fmax v11.4s, v11.4s, v0.4s\n"
- "fmax v12.4s, v12.4s, v0.4s\n"
- "fmax v13.4s, v13.4s, v0.4s\n"
- "fmax v14.4s, v14.4s, v0.4s\n"
- "fmax v15.4s, v15.4s, v0.4s\n"
- "fmax v16.4s, v16.4s, v0.4s\n"
- "fmax v17.4s, v17.4s, v0.4s\n"
- "fmax v18.4s, v18.4s, v0.4s\n"
- "fmax v19.4s, v19.4s, v0.4s\n"
- "fmax v20.4s, v20.4s, v0.4s\n"
- "fmax v21.4s, v21.4s, v0.4s\n"
- "fmax v22.4s, v22.4s, v0.4s\n"
- "fmax v23.4s, v23.4s, v0.4s\n"
+ "fmax v18.4s, v18.4s, v1.4s\n"
+ "fmax v19.4s, v19.4s, v1.4s\n"
+ "fmax v20.4s, v20.4s, v1.4s\n"
+ "fmax v21.4s, v21.4s, v1.4s\n"
+ "fmax v22.4s, v22.4s, v1.4s\n"
+ "fmax v23.4s, v23.4s, v1.4s\n"
"122:" // Height 4: No activation
"cmp x8, #0x10\n"
"bge 131f\n"
"tbz x8, #3, 126f\n"
- "st1 { v8.4s }, [x16], #0x10\n"
- "st1 { v9.4s }, [x16], #0x10\n"
+ "st1 { v8.4s }, [x15], #0x10\n"
+ "st1 { v9.4s }, [x15], #0x10\n"
"st1 { v12.4s }, [x25], #0x10\n"
"st1 { v13.4s }, [x25], #0x10\n"
"st1 { v16.4s }, [x24], #0x10\n"
@@ -1879,96 +1879,96 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"st1 { v20.4s }, [x23], #0x10\n"
"st1 { v21.4s }, [x23], #0x10\n"
"tbz x8, #2, 124f\n"
- "st1 { v10.4s }, [x16], #0x10\n"
+ "st1 { v10.4s }, [x15], #0x10\n"
"st1 { v14.4s }, [x25], #0x10\n"
"st1 { v18.4s }, [x24], #0x10\n"
"st1 { v22.4s }, [x23], #0x10\n"
"tbz x8, #1, 123f\n"
- "str d11, [x16], #0x8\n"
+ "str d11, [x15], #0x8\n"
"str d15, [x25], #0x8\n"
"str d19, [x24], #0x8\n"
"str d23, [x23], #0x8\n"
"tbz x8, #0, 130f\n"
- "st1 { v11.s }[2], [x16]\n"
+ "st1 { v11.s }[2], [x15]\n"
"st1 { v15.s }[2], [x25]\n"
"st1 { v19.s }[2], [x24]\n"
"st1 { v23.s }[2], [x23]\n"
"b 130f\n"
"123:" // Height 4: Partial direct writeback: partial_1_12
"tbz x8, #0, 130f\n"
- "str s11, [x16, #0x0]\n"
+ "str s11, [x15, #0x0]\n"
"str s15, [x25, #0x0]\n"
"str s19, [x24, #0x0]\n"
"str s23, [x23, #0x0]\n"
"b 130f\n"
"124:" // Height 4: Partial direct writeback: partial_2_8
"tbz x8, #1, 125f\n"
- "str d10, [x16], #0x8\n"
+ "str d10, [x15], #0x8\n"
"str d14, [x25], #0x8\n"
"str d18, [x24], #0x8\n"
"str d22, [x23], #0x8\n"
"tbz x8, #0, 130f\n"
- "st1 { v10.s }[2], [x16]\n"
+ "st1 { v10.s }[2], [x15]\n"
"st1 { v14.s }[2], [x25]\n"
"st1 { v18.s }[2], [x24]\n"
"st1 { v22.s }[2], [x23]\n"
"b 130f\n"
"125:" // Height 4: Partial direct writeback: partial_1_8
"tbz x8, #0, 130f\n"
- "str s10, [x16, #0x0]\n"
+ "str s10, [x15, #0x0]\n"
"str s14, [x25, #0x0]\n"
"str s18, [x24, #0x0]\n"
"str s22, [x23, #0x0]\n"
"b 130f\n"
"126:" // Height 4: Partial direct writeback: partial_4_0
"tbz x8, #2, 128f\n"
- "st1 { v8.4s }, [x16], #0x10\n"
+ "st1 { v8.4s }, [x15], #0x10\n"
"st1 { v12.4s }, [x25], #0x10\n"
"st1 { v16.4s }, [x24], #0x10\n"
"st1 { v20.4s }, [x23], #0x10\n"
"tbz x8, #1, 127f\n"
- "str d9, [x16], #0x8\n"
+ "str d9, [x15], #0x8\n"
"str d13, [x25], #0x8\n"
"str d17, [x24], #0x8\n"
"str d21, [x23], #0x8\n"
"tbz x8, #0, 130f\n"
- "st1 { v9.s }[2], [x16]\n"
+ "st1 { v9.s }[2], [x15]\n"
"st1 { v13.s }[2], [x25]\n"
"st1 { v17.s }[2], [x24]\n"
"st1 { v21.s }[2], [x23]\n"
"b 130f\n"
"127:" // Height 4: Partial direct writeback: partial_1_4
"tbz x8, #0, 130f\n"
- "str s9, [x16, #0x0]\n"
+ "str s9, [x15, #0x0]\n"
"str s13, [x25, #0x0]\n"
"str s17, [x24, #0x0]\n"
"str s21, [x23, #0x0]\n"
"b 130f\n"
"128:" // Height 4: Partial direct writeback: partial_2_0
"tbz x8, #1, 129f\n"
- "str d8, [x16], #0x8\n"
+ "str d8, [x15], #0x8\n"
"str d12, [x25], #0x8\n"
"str d16, [x24], #0x8\n"
"str d20, [x23], #0x8\n"
"tbz x8, #0, 130f\n"
- "st1 { v8.s }[2], [x16]\n"
+ "st1 { v8.s }[2], [x15]\n"
"st1 { v12.s }[2], [x25]\n"
"st1 { v16.s }[2], [x24]\n"
"st1 { v20.s }[2], [x23]\n"
"b 130f\n"
"129:" // Height 4: Partial direct writeback: partial_1_0
- "str s8, [x16, #0x0]\n"
+ "str s8, [x15, #0x0]\n"
"str s12, [x25, #0x0]\n"
"str s16, [x24, #0x0]\n"
"str s20, [x23, #0x0]\n"
"130:" // Height 4: Partial direct writeback: Done
"b 132f\n"
"131:" // Height 4: Full writeback
- "str q8, [x16, #0x0]\n"
- "str q9, [x16, #0x10]\n"
- "str q10, [x16, #0x20]\n"
- "str q11, [x16, #0x30]\n"
- "add x16, x16, #0x40\n"
+ "str q8, [x15, #0x0]\n"
+ "str q9, [x15, #0x10]\n"
+ "str q10, [x15, #0x20]\n"
+ "str q11, [x15, #0x30]\n"
+ "add x15, x15, #0x40\n"
"str q12, [x25, #0x0]\n"
"str q13, [x25, #0x10]\n"
"str q14, [x25, #0x20]\n"
@@ -1986,22 +1986,22 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"bgt 101b\n"
"b 200f\n"
"133:" // Height 5
- "mov x7, %x[bias]\n"
"ldr x8, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x16, %x[bias]\n"
"ldr x17, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x16, %x[output_ptr]\n"
+ "mov x15, %x[output_ptr]\n"
"134:" // Height 5: Column loop
- "cbz x7, 135f\n"
- "ldr q8, [x7, #0x0]\n"
+ "cbz x16, 135f\n"
+ "ldr q8, [x16, #0x0]\n"
+ "ldr q9, [x16, #0x10]\n"
+ "ldr q10, [x16, #0x20]\n"
"mov v12.16b, v8.16b\n"
- "ldr q9, [x7, #0x10]\n"
+ "ldr q11, [x16, #0x30]\n"
"mov v13.16b, v9.16b\n"
- "ldr q10, [x7, #0x20]\n"
+ "add x16, x16, #0x40\n"
"mov v14.16b, v10.16b\n"
- "ldr q11, [x7, #0x30]\n"
"mov v15.16b, v11.16b\n"
"mov v16.16b, v8.16b\n"
- "add x7, x7, #0x40\n"
"mov v17.16b, v9.16b\n"
"mov v18.16b, v10.16b\n"
"mov v19.16b, v11.16b\n"
@@ -2016,48 +2016,48 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"b 146f\n"
"135:" // Height 5: no bias
"tbz %x[flags], #0, 145f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x16, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"cmp x8, #0x10\n"
- "add x22, x23, x20, LSL #2\n"
+ "add x25, x15, x19, LSL #2\n"
+ "add x24, x25, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
"bge 144f\n"
"tbz x8, #3, 139f\n"
- "ld1 { v8.4s }, [x16], #0x10\n"
+ "ld1 { v8.4s }, [x15], #0x10\n"
"ld1 { v12.4s }, [x25], #0x10\n"
"ld1 { v16.4s }, [x24], #0x10\n"
- "ld1 { v20.4s }, [x23], #0x10\n"
- "ld1 { v24.4s }, [x22], #0x10\n"
- "ld1 { v9.4s }, [x16], #0x10\n"
+ "ld1 { v9.4s }, [x15], #0x10\n"
"ld1 { v13.4s }, [x25], #0x10\n"
"ld1 { v17.4s }, [x24], #0x10\n"
+ "ld1 { v20.4s }, [x23], #0x10\n"
+ "ld1 { v24.4s }, [x22], #0x10\n"
"ld1 { v21.4s }, [x23], #0x10\n"
"ld1 { v25.4s }, [x22], #0x10\n"
"tbz x8, #2, 137f\n"
- "ld1 { v10.4s }, [x16], #0x10\n"
+ "ld1 { v10.4s }, [x15], #0x10\n"
"ld1 { v14.4s }, [x25], #0x10\n"
"ld1 { v18.4s }, [x24], #0x10\n"
"ld1 { v22.4s }, [x23], #0x10\n"
"ld1 { v26.4s }, [x22], #0x10\n"
"tbz x8, #1, 136f\n"
- "ldr d11, [x16], #0x8\n"
- "mov x20, #0x38\n"
+ "ldr d11, [x15], #0x8\n"
+ "mov x19, #0x38\n"
"ldr d15, [x25], #0x8\n"
"ldr d19, [x24], #0x8\n"
"ldr d23, [x23], #0x8\n"
"ldr d27, [x22], #0x8\n"
"tbz x8, #0, 143f\n"
- "ld1 { v11.s }[2], [x16]\n"
+ "ld1 { v11.s }[2], [x15]\n"
"ld1 { v15.s }[2], [x25]\n"
"ld1 { v19.s }[2], [x24]\n"
"ld1 { v23.s }[2], [x23]\n"
"ld1 { v27.s }[2], [x22]\n"
"b 143f\n"
"136:" // Height 5: Partial accumulate: partial_1_12
- "mov x20, #0x30\n"
+ "mov x19, #0x30\n"
"tbz x8, #0, 143f\n"
- "ldr s11, [x16, #0x0]\n"
+ "ldr s11, [x15, #0x0]\n"
"ldr s15, [x25, #0x0]\n"
"ldr s19, [x24, #0x0]\n"
"ldr s23, [x23, #0x0]\n"
@@ -2065,23 +2065,23 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"b 143f\n"
"137:" // Height 5: Partial accumulate: partial_2_8
"tbz x8, #1, 138f\n"
- "ldr d10, [x16], #0x8\n"
- "mov x20, #0x28\n"
+ "ldr d10, [x15], #0x8\n"
"ldr d14, [x25], #0x8\n"
+ "mov x19, #0x28\n"
"ldr d18, [x24], #0x8\n"
"ldr d22, [x23], #0x8\n"
"ldr d26, [x22], #0x8\n"
"tbz x8, #0, 143f\n"
- "ld1 { v10.s }[2], [x16]\n"
+ "ld1 { v10.s }[2], [x15]\n"
"ld1 { v14.s }[2], [x25]\n"
"ld1 { v18.s }[2], [x24]\n"
"ld1 { v22.s }[2], [x23]\n"
"ld1 { v26.s }[2], [x22]\n"
"b 143f\n"
"138:" // Height 5: Partial accumulate: partial_1_8
- "mov x20, #0x20\n"
+ "mov x19, #0x20\n"
"tbz x8, #0, 143f\n"
- "ldr s10, [x16, #0x0]\n"
+ "ldr s10, [x15, #0x0]\n"
"ldr s14, [x25, #0x0]\n"
"ldr s18, [x24, #0x0]\n"
"ldr s22, [x23, #0x0]\n"
@@ -2089,29 +2089,29 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"b 143f\n"
"139:" // Height 5: Partial accumulate: partial_4_0
"tbz x8, #2, 141f\n"
- "ld1 { v8.4s }, [x16], #0x10\n"
+ "ld1 { v8.4s }, [x15], #0x10\n"
"ld1 { v12.4s }, [x25], #0x10\n"
"ld1 { v16.4s }, [x24], #0x10\n"
"ld1 { v20.4s }, [x23], #0x10\n"
"ld1 { v24.4s }, [x22], #0x10\n"
"tbz x8, #1, 140f\n"
- "ldr d9, [x16], #0x8\n"
- "mov x20, #0x18\n"
+ "ldr d9, [x15], #0x8\n"
+ "mov x19, #0x18\n"
"ldr d13, [x25], #0x8\n"
"ldr d17, [x24], #0x8\n"
"ldr d21, [x23], #0x8\n"
"ldr d25, [x22], #0x8\n"
"tbz x8, #0, 143f\n"
- "ld1 { v9.s }[2], [x16]\n"
+ "ld1 { v9.s }[2], [x15]\n"
"ld1 { v13.s }[2], [x25]\n"
"ld1 { v17.s }[2], [x24]\n"
"ld1 { v21.s }[2], [x23]\n"
"ld1 { v25.s }[2], [x22]\n"
"b 143f\n"
"140:" // Height 5: Partial accumulate: partial_1_4
- "mov x20, #0x10\n"
+ "mov x19, #0x10\n"
"tbz x8, #0, 143f\n"
- "ldr s9, [x16, #0x0]\n"
+ "ldr s9, [x15, #0x0]\n"
"ldr s13, [x25, #0x0]\n"
"ldr s17, [x24, #0x0]\n"
"ldr s21, [x23, #0x0]\n"
@@ -2119,34 +2119,34 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"b 143f\n"
"141:" // Height 5: Partial accumulate: partial_2_0
"tbz x8, #1, 142f\n"
- "ldr d8, [x16], #0x8\n"
- "mov x20, #0x8\n"
+ "ldr d8, [x15], #0x8\n"
"ldr d12, [x25], #0x8\n"
+ "mov x19, #0x8\n"
"ldr d16, [x24], #0x8\n"
"ldr d20, [x23], #0x8\n"
"ldr d24, [x22], #0x8\n"
"tbz x8, #0, 143f\n"
- "ld1 { v8.s }[2], [x16]\n"
+ "ld1 { v8.s }[2], [x15]\n"
"ld1 { v12.s }[2], [x25]\n"
"ld1 { v16.s }[2], [x24]\n"
"ld1 { v20.s }[2], [x23]\n"
"ld1 { v24.s }[2], [x22]\n"
"b 143f\n"
"142:" // Height 5: Partial accumulate: partial_1_0
- "ldr s8, [x16, #0x0]\n"
- "mov x20, #0x0\n"
+ "ldr s8, [x15, #0x0]\n"
+ "mov x19, #0x0\n"
"ldr s12, [x25, #0x0]\n"
"ldr s16, [x24, #0x0]\n"
"ldr s20, [x23, #0x0]\n"
"ldr s24, [x22, #0x0]\n"
"143:" // Height 5: Partial accumulate: Done
- "sub x16, x16, x20\n"
+ "sub x15, x15, x19\n"
"b 146f\n"
"144:" // Height 5: full accumulate
- "ldr q8, [x16, #0x0]\n"
- "ldr q9, [x16, #0x10]\n"
- "ldr q10, [x16, #0x20]\n"
- "ldr q11, [x16, #0x30]\n"
+ "ldr q8, [x15, #0x0]\n"
+ "ldr q9, [x15, #0x10]\n"
+ "ldr q10, [x15, #0x20]\n"
+ "ldr q11, [x15, #0x30]\n"
"ldr q12, [x25, #0x0]\n"
"ldr q13, [x25, #0x10]\n"
"ldr q14, [x25, #0x20]\n"
@@ -2186,231 +2186,231 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"movi v26.16b, #0x0\n"
"movi v27.16b, #0x0\n"
"146:" // Height 5: setup done
- "mov x15, #0x0\n"
+ "mov x14, #0x0\n"
"147:" // Height 5: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w14, [x20, x15, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w13, [x20, x14, LSL #0x2]\n"
"tbz %x[flags], #3, 148f\n"
- "ldr x21, [%x[input_ptr], x15, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x13, [x21, #0x0]\n"
- "ldr x9, [x21, #0x8]\n"
- "ldr x27, [x21, #0x10]\n"
- "ldr x25, [x21, #0x18]\n"
- "ldr x23, [x21, #0x20]\n"
- "cbnz x15, 149f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x13, x13, x20, LSL #2\n"
- "add x9, x9, x20, LSL #2\n"
- "add x27, x27, x20, LSL #2\n"
- "add x25, x25, x20, LSL #2\n"
- "add x23, x23, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x14, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x12, [x20, #0x0]\n"
+ "ldr x28, [x20, #0x8]\n"
+ "ldr x26, [x20, #0x10]\n"
+ "ldr x24, [x20, #0x18]\n"
+ "ldr x22, [x20, #0x20]\n"
+ "cbnz x14, 149f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x12, x12, x19, LSL #2\n"
+ "add x28, x28, x19, LSL #2\n"
+ "add x26, x26, x19, LSL #2\n"
+ "add x24, x24, x19, LSL #2\n"
+ "add x22, x22, x19, LSL #2\n"
"b 149f\n"
"148:" // Height 5: setup direct input
- "mov x13, %x[input_ptr]\n"
- "add x9, x13, x20, LSL #2\n"
- "add x27, x9, x20, LSL #2\n"
- "add x25, x27, x20, LSL #2\n"
- "add x23, x25, x20, LSL #2\n"
+ "mov x12, %x[input_ptr]\n"
+ "add x28, x12, x19, LSL #2\n"
+ "add x26, x28, x19, LSL #2\n"
+ "add x24, x26, x19, LSL #2\n"
+ "add x22, x24, x19, LSL #2\n"
"149:" // Height 5: input setup done
- "cmp x14, #0x4\n"
+ "cmp x13, #0x4\n"
"blt 152f\n"
- "ldr q0, [x13, #0x0]\n"
- "cmp x14, #0x8\n"
- "ldr q1, [x9, #0x0]\n"
- "ldr q2, [x27, #0x0]\n"
- "ldr q3, [x25, #0x0]\n"
- "ldr q4, [x23, #0x0]\n"
+ "ldr q0, [x12, #0x0]\n"
+ "ldr q1, [x28, #0x0]\n"
+ "cmp x13, #0x8\n"
+ "ldr q2, [x26, #0x0]\n"
+ "ldr q3, [x24, #0x0]\n"
+ "ldr q4, [x22, #0x0]\n"
"ldr q6, [x17, #0x0]\n"
- "ldr q7, [x17, #0x10]\n"
"blt 151f\n"
"150:" // Height 5: Multiply loop: Main loop head
"fmla v8.4s, v6.4s, v0.s[0]\n"
- "ldr x12, [x17, #0x28]\n"
+ "ldr d7, [x17, #0x10]\n"
"fmla v12.4s, v6.4s, v1.s[0]\n"
- "ldr x11, [x17, #0x38]\n"
+ "ldr x11, [x17, #0x18]\n"
"fmla v16.4s, v6.4s, v2.s[0]\n"
- "add x13, x13, #0x10\n"
+ "ldr x10, [x17, #0x28]\n"
"fmla v20.4s, v6.4s, v3.s[0]\n"
- "add x9, x9, #0x10\n"
+ "add x12, x12, #0x10\n"
"fmla v24.4s, v6.4s, v4.s[0]\n"
- "ldr d6, [x17, #0x20]\n"
+ "mov v7.d[1], x11\n"
+ "prfm pldl1keep, [x12, #0x80]\n"
+ "add x28, x28, #0x10\n"
"fmla v9.4s, v7.4s, v0.s[0]\n"
- "mov v6.d[1], x12\n"
+ "prfm pldl1keep, [x28, #0x80]\n"
"fmla v13.4s, v7.4s, v1.s[0]\n"
- "ldr x12, [x17, #0x48]\n"
+ "ldr d6, [x17, #0x20]\n"
"fmla v17.4s, v7.4s, v2.s[0]\n"
- "add x27, x27, #0x10\n"
+ "ldr x11, [x17, #0x38]\n"
"fmla v21.4s, v7.4s, v3.s[0]\n"
- "add x25, x25, #0x10\n"
+ "ldr x9, [x12, #0x8]\n"
"fmla v25.4s, v7.4s, v4.s[0]\n"
+ "mov v6.d[1], x10\n"
"ldr d7, [x17, #0x30]\n"
- "mov v7.d[1], x11\n"
+ "add x26, x26, #0x10\n"
"fmla v10.4s, v6.4s, v0.s[0]\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
"fmla v14.4s, v6.4s, v1.s[0]\n"
- "ldr x11, [x17, #0x58]\n"
+ "ldr x10, [x17, #0x48]\n"
"fmla v18.4s, v6.4s, v2.s[0]\n"
- "add x23, x23, #0x10\n"
+ "mov v7.d[1], x11\n"
"fmla v22.4s, v6.4s, v3.s[0]\n"
- "ldr x10, [x13, #0x8]\n"
+ "ldr x11, [x17, #0x58]\n"
"fmla v26.4s, v6.4s, v4.s[0]\n"
"ldr d6, [x17, #0x40]\n"
"fmla v11.4s, v7.4s, v0.s[0]\n"
- "mov v6.d[1], x12\n"
+ "ldr x27, [x28, #0x8]\n"
"fmla v15.4s, v7.4s, v1.s[0]\n"
- "ldr x12, [x17, #0x68]\n"
+ "ldr x25, [x26, #0x8]\n"
"fmla v19.4s, v7.4s, v2.s[0]\n"
- "ldr x28, [x9, #0x8]\n"
+ "mov v6.d[1], x10\n"
"fmla v23.4s, v7.4s, v3.s[0]\n"
- "ldr x26, [x27, #0x8]\n"
+ "ldr x10, [x17, #0x68]\n"
"fmla v27.4s, v7.4s, v4.s[0]\n"
"ldr d7, [x17, #0x50]\n"
- "mov v7.d[1], x11\n"
"fmla v8.4s, v6.4s, v0.s[1]\n"
+ "add x24, x24, #0x10\n"
"fmla v12.4s, v6.4s, v1.s[1]\n"
- "ldr x11, [x17, #0x78]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
"fmla v16.4s, v6.4s, v2.s[1]\n"
- "ldr x24, [x25, #0x8]\n"
+ "mov v7.d[1], x11\n"
"fmla v20.4s, v6.4s, v3.s[1]\n"
- "ldr x22, [x23, #0x8]\n"
+ "ldr x11, [x17, #0x78]\n"
"fmla v24.4s, v6.4s, v4.s[1]\n"
"ldr d6, [x17, #0x60]\n"
"fmla v9.4s, v7.4s, v0.s[1]\n"
- "mov v6.d[1], x12\n"
+ "ldr x23, [x24, #0x8]\n"
"fmla v13.4s, v7.4s, v1.s[1]\n"
- "ldr x12, [x17, #0x88]\n"
+ "add x22, x22, #0x10\n"
"fmla v17.4s, v7.4s, v2.s[1]\n"
- "sub x14, x14, #0x4\n"
+ "mov v6.d[1], x10\n"
"fmla v21.4s, v7.4s, v3.s[1]\n"
- "cmp x14, #0x8\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
"fmla v25.4s, v7.4s, v4.s[1]\n"
"ldr d7, [x17, #0x70]\n"
- "mov v7.d[1], x11\n"
"fmla v10.4s, v6.4s, v0.s[1]\n"
+ "ldr x10, [x17, #0x88]\n"
"fmla v14.4s, v6.4s, v1.s[1]\n"
- "ldr x11, [x17, #0x98]\n"
+ "ldr x21, [x22, #0x8]\n"
"fmla v18.4s, v6.4s, v2.s[1]\n"
- "prfm pldl1keep, [x13, #0x80]\n"
+ "mov v7.d[1], x11\n"
"fmla v22.4s, v6.4s, v3.s[1]\n"
- "prfm pldl1keep, [x9, #0x80]\n"
+ "ldr x11, [x17, #0x98]\n"
"fmla v26.4s, v6.4s, v4.s[1]\n"
"ldr d6, [x17, #0x80]\n"
"fmla v11.4s, v7.4s, v0.s[1]\n"
- "mov v6.d[1], x12\n"
+ "sub x13, x13, #0x4\n"
"fmla v15.4s, v7.4s, v1.s[1]\n"
- "ldr x12, [x17, #0xa8]\n"
+ "cmp x13, #0x8\n"
"fmla v19.4s, v7.4s, v2.s[1]\n"
- "prfm pldl1keep, [x27, #0x80]\n"
+ "mov v6.d[1], x10\n"
"fmla v23.4s, v7.4s, v3.s[1]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
+ "ldr x10, [x17, #0xa8]\n"
"fmla v27.4s, v7.4s, v4.s[1]\n"
"ldr d7, [x17, #0x90]\n"
- "mov v7.d[1], x11\n"
"fmla v8.4s, v6.4s, v0.s[2]\n"
"fmla v12.4s, v6.4s, v1.s[2]\n"
- "ldr x11, [x17, #0xb8]\n"
"fmla v16.4s, v6.4s, v2.s[2]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
+ "mov v7.d[1], x11\n"
"fmla v20.4s, v6.4s, v3.s[2]\n"
+ "ldr x11, [x17, #0xb8]\n"
"fmla v24.4s, v6.4s, v4.s[2]\n"
"ldr d6, [x17, #0xa0]\n"
"fmla v9.4s, v7.4s, v0.s[2]\n"
- "mov v6.d[1], x12\n"
"fmla v13.4s, v7.4s, v1.s[2]\n"
- "ldr x12, [x17, #0xc8]\n"
"fmla v17.4s, v7.4s, v2.s[2]\n"
+ "mov v6.d[1], x10\n"
"fmla v21.4s, v7.4s, v3.s[2]\n"
+ "ldr x10, [x17, #0xc8]\n"
"fmla v25.4s, v7.4s, v4.s[2]\n"
"ldr d7, [x17, #0xb0]\n"
- "mov v7.d[1], x11\n"
"fmla v10.4s, v6.4s, v0.s[2]\n"
"fmla v14.4s, v6.4s, v1.s[2]\n"
- "ldr x11, [x17, #0xd8]\n"
"fmla v18.4s, v6.4s, v2.s[2]\n"
+ "mov v7.d[1], x11\n"
"fmla v22.4s, v6.4s, v3.s[2]\n"
+ "ldr x11, [x17, #0xd8]\n"
"fmla v26.4s, v6.4s, v4.s[2]\n"
"ldr d6, [x17, #0xc0]\n"
"fmla v11.4s, v7.4s, v0.s[2]\n"
- "mov v6.d[1], x12\n"
"fmla v15.4s, v7.4s, v1.s[2]\n"
- "ldr x12, [x17, #0xe8]\n"
"fmla v19.4s, v7.4s, v2.s[2]\n"
+ "mov v6.d[1], x10\n"
"fmla v23.4s, v7.4s, v3.s[2]\n"
+ "ldr x10, [x17, #0xe8]\n"
"fmla v27.4s, v7.4s, v4.s[2]\n"
"ldr d7, [x17, #0xd0]\n"
- "mov v7.d[1], x11\n"
"fmla v8.4s, v6.4s, v0.s[3]\n"
"fmla v12.4s, v6.4s, v1.s[3]\n"
- "ldr x11, [x17, #0xf8]\n"
"fmla v16.4s, v6.4s, v2.s[3]\n"
+ "mov v7.d[1], x11\n"
"fmla v20.4s, v6.4s, v3.s[3]\n"
+ "ldr x11, [x17, #0xf8]\n"
"fmla v24.4s, v6.4s, v4.s[3]\n"
"ldr d6, [x17, #0xe0]\n"
"fmla v9.4s, v7.4s, v0.s[3]\n"
- "mov v6.d[1], x12\n"
"fmla v13.4s, v7.4s, v1.s[3]\n"
"fmla v17.4s, v7.4s, v2.s[3]\n"
+ "mov v6.d[1], x10\n"
"fmla v21.4s, v7.4s, v3.s[3]\n"
"fmla v25.4s, v7.4s, v4.s[3]\n"
"ldr d7, [x17, #0xf0]\n"
- "mov v7.d[1], x11\n"
- "add x17, x17, #0x100\n"
"fmla v10.4s, v6.4s, v0.s[3]\n"
- "ldr x12, [x17, #0x8]\n"
+ "add x17, x17, #0x100\n"
"fmla v14.4s, v6.4s, v1.s[3]\n"
- "ldr x11, [x17, #0x18]\n"
+ "ldr x10, [x17, #0x8]\n"
"fmla v18.4s, v6.4s, v2.s[3]\n"
+ "mov v7.d[1], x11\n"
"fmla v22.4s, v6.4s, v3.s[3]\n"
"fmla v26.4s, v6.4s, v4.s[3]\n"
"ldr d6, [x17, #0x0]\n"
"fmla v11.4s, v7.4s, v0.s[3]\n"
- "ldr d0, [x13, #0x0]\n"
+ "ldr d0, [x12, #0x0]\n"
"fmla v15.4s, v7.4s, v1.s[3]\n"
- "ldr d1, [x9, #0x0]\n"
+ "ldr d1, [x28, #0x0]\n"
"fmla v19.4s, v7.4s, v2.s[3]\n"
- "ldr d2, [x27, #0x0]\n"
+ "mov v6.d[1], x10\n"
"fmla v23.4s, v7.4s, v3.s[3]\n"
- "ldr d3, [x25, #0x0]\n"
+ "mov v0.d[1], x9\n"
"fmla v27.4s, v7.4s, v4.s[3]\n"
- "ldr d4, [x23, #0x0]\n"
- "ldr d7, [x17, #0x10]\n"
- "mov v6.d[1], x12\n"
- "mov v0.d[1], x10\n"
- "mov v1.d[1], x28\n"
- "mov v2.d[1], x26\n"
- "mov v3.d[1], x24\n"
- "mov v4.d[1], x22\n"
- "mov v7.d[1], x11\n"
+ "mov v1.d[1], x27\n"
+ "ldr d2, [x26, #0x0]\n"
+ "ldr d3, [x24, #0x0]\n"
+ "ldr d4, [x22, #0x0]\n"
+ "mov v2.d[1], x25\n"
+ "mov v3.d[1], x23\n"
+ "mov v4.d[1], x21\n"
"bge 150b\n"
"151:" // Height 5: Multiply loop: Single iteration only
"fmla v8.4s, v6.4s, v0.s[0]\n"
- "add x13, x13, #0x10\n"
+ "ldr q7, [x17, #0x10]\n"
"fmla v12.4s, v6.4s, v1.s[0]\n"
- "add x9, x9, #0x10\n"
+ "sub x13, x13, #0x4\n"
"fmla v16.4s, v6.4s, v2.s[0]\n"
- "add x27, x27, #0x10\n"
+ "add x12, x12, #0x10\n"
"fmla v20.4s, v6.4s, v3.s[0]\n"
- "add x25, x25, #0x10\n"
+ "prfm pldl1keep, [x12, #0x80]\n"
"fmla v24.4s, v6.4s, v4.s[0]\n"
"ldr q6, [x17, #0x20]\n"
"fmla v9.4s, v7.4s, v0.s[0]\n"
- "add x23, x23, #0x10\n"
+ "add x28, x28, #0x10\n"
"fmla v13.4s, v7.4s, v1.s[0]\n"
- "sub x14, x14, #0x4\n"
+ "prfm pldl1keep, [x28, #0x80]\n"
"fmla v17.4s, v7.4s, v2.s[0]\n"
- "prfm pldl1keep, [x13, #0x80]\n"
+ "add x26, x26, #0x10\n"
"fmla v21.4s, v7.4s, v3.s[0]\n"
- "prfm pldl1keep, [x9, #0x80]\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
"fmla v25.4s, v7.4s, v4.s[0]\n"
"ldr q7, [x17, #0x30]\n"
"fmla v10.4s, v6.4s, v0.s[0]\n"
- "prfm pldl1keep, [x27, #0x80]\n"
+ "add x24, x24, #0x10\n"
"fmla v14.4s, v6.4s, v1.s[0]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
"fmla v18.4s, v6.4s, v2.s[0]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
+ "add x22, x22, #0x10\n"
"fmla v22.4s, v6.4s, v3.s[0]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
"fmla v26.4s, v6.4s, v4.s[0]\n"
"ldr q6, [x17, #0x40]\n"
"fmla v11.4s, v7.4s, v0.s[0]\n"
@@ -2491,17 +2491,17 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"fmla v23.4s, v7.4s, v3.s[3]\n"
"fmla v27.4s, v7.4s, v4.s[3]\n"
"152:" // Height 5: Multiply loop: Main loop skip
- "cbz x14, 154f\n"
+ "cbz x13, 154f\n"
"153:" // Height 5: Multiply loop: Odd block loop
- "ldr s0, [x13], #0x4\n"
- "sub x14, x14, #0x1\n"
- "ldr s1, [x9], #0x4\n"
- "ldr s2, [x27], #0x4\n"
- "ldr s3, [x25], #0x4\n"
- "ldr s4, [x23], #0x4\n"
+ "ldr s0, [x12], #0x4\n"
+ "sub x13, x13, #0x1\n"
+ "ldr s1, [x28], #0x4\n"
+ "ldr s2, [x26], #0x4\n"
+ "ldr s3, [x24], #0x4\n"
+ "ldr s4, [x22], #0x4\n"
"ldr q6, [x17, #0x0]\n"
- "fmla v8.4s, v6.4s, v0.s[0]\n"
"ldr q7, [x17, #0x10]\n"
+ "fmla v8.4s, v6.4s, v0.s[0]\n"
"fmla v12.4s, v6.4s, v1.s[0]\n"
"fmla v16.4s, v6.4s, v2.s[0]\n"
"fmla v20.4s, v6.4s, v3.s[0]\n"
@@ -2524,25 +2524,27 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"fmla v19.4s, v7.4s, v2.s[0]\n"
"fmla v23.4s, v7.4s, v3.s[0]\n"
"fmla v27.4s, v7.4s, v4.s[0]\n"
- "cbnz x14, 153b\n"
+ "cbnz x13, 153b\n"
"154:" // Height 5: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x15, x15, #0x1\n"
- "cmp x15, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x14, x14, #0x1\n"
+ "cmp x14, x19\n"
"bne 147b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x16, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
- "prfm pstl1keep, [x16, #0x0]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "prfm pstl1keep, [x15, #0x0]\n"
+ "add x25, x15, x19, LSL #2\n"
"prfm pstl1keep, [x25, #0x0]\n"
+ "add x24, x25, x19, LSL #2\n"
"prfm pstl1keep, [x24, #0x0]\n"
+ "add x23, x24, x19, LSL #2\n"
"prfm pstl1keep, [x23, #0x0]\n"
+ "add x22, x23, x19, LSL #2\n"
"prfm pstl1keep, [x22, #0x0]\n"
"tbz %x[flags], #1, 155f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v0.4s }, [x20]\n"
+ "add x20, %x[args_ptr], %[offset_min]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v1.4s }, [x20]\n"
+ "ld1r { v0.4s }, [x19]\n"
"fmin v8.4s, v8.4s, v0.4s\n"
"fmin v9.4s, v9.4s, v0.4s\n"
"fmin v10.4s, v10.4s, v0.4s\n"
@@ -2553,6 +2555,16 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"fmin v15.4s, v15.4s, v0.4s\n"
"fmin v16.4s, v16.4s, v0.4s\n"
"fmin v17.4s, v17.4s, v0.4s\n"
+ "fmax v8.4s, v8.4s, v1.4s\n"
+ "fmax v9.4s, v9.4s, v1.4s\n"
+ "fmax v10.4s, v10.4s, v1.4s\n"
+ "fmax v11.4s, v11.4s, v1.4s\n"
+ "fmax v12.4s, v12.4s, v1.4s\n"
+ "fmax v13.4s, v13.4s, v1.4s\n"
+ "fmax v14.4s, v14.4s, v1.4s\n"
+ "fmax v15.4s, v15.4s, v1.4s\n"
+ "fmax v16.4s, v16.4s, v1.4s\n"
+ "fmax v17.4s, v17.4s, v1.4s\n"
"fmin v18.4s, v18.4s, v0.4s\n"
"fmin v19.4s, v19.4s, v0.4s\n"
"fmin v20.4s, v20.4s, v0.4s\n"
@@ -2563,34 +2575,22 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"fmin v25.4s, v25.4s, v0.4s\n"
"fmin v26.4s, v26.4s, v0.4s\n"
"fmin v27.4s, v27.4s, v0.4s\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1r { v0.4s }, [x20]\n"
- "fmax v8.4s, v8.4s, v0.4s\n"
- "fmax v9.4s, v9.4s, v0.4s\n"
- "fmax v10.4s, v10.4s, v0.4s\n"
- "fmax v11.4s, v11.4s, v0.4s\n"
- "fmax v12.4s, v12.4s, v0.4s\n"
- "fmax v13.4s, v13.4s, v0.4s\n"
- "fmax v14.4s, v14.4s, v0.4s\n"
- "fmax v15.4s, v15.4s, v0.4s\n"
- "fmax v16.4s, v16.4s, v0.4s\n"
- "fmax v17.4s, v17.4s, v0.4s\n"
- "fmax v18.4s, v18.4s, v0.4s\n"
- "fmax v19.4s, v19.4s, v0.4s\n"
- "fmax v20.4s, v20.4s, v0.4s\n"
- "fmax v21.4s, v21.4s, v0.4s\n"
- "fmax v22.4s, v22.4s, v0.4s\n"
- "fmax v23.4s, v23.4s, v0.4s\n"
- "fmax v24.4s, v24.4s, v0.4s\n"
- "fmax v25.4s, v25.4s, v0.4s\n"
- "fmax v26.4s, v26.4s, v0.4s\n"
- "fmax v27.4s, v27.4s, v0.4s\n"
+ "fmax v18.4s, v18.4s, v1.4s\n"
+ "fmax v19.4s, v19.4s, v1.4s\n"
+ "fmax v20.4s, v20.4s, v1.4s\n"
+ "fmax v21.4s, v21.4s, v1.4s\n"
+ "fmax v22.4s, v22.4s, v1.4s\n"
+ "fmax v23.4s, v23.4s, v1.4s\n"
+ "fmax v24.4s, v24.4s, v1.4s\n"
+ "fmax v25.4s, v25.4s, v1.4s\n"
+ "fmax v26.4s, v26.4s, v1.4s\n"
+ "fmax v27.4s, v27.4s, v1.4s\n"
"155:" // Height 5: No activation
"cmp x8, #0x10\n"
"bge 164f\n"
"tbz x8, #3, 159f\n"
- "st1 { v8.4s }, [x16], #0x10\n"
- "st1 { v9.4s }, [x16], #0x10\n"
+ "st1 { v8.4s }, [x15], #0x10\n"
+ "st1 { v9.4s }, [x15], #0x10\n"
"st1 { v12.4s }, [x25], #0x10\n"
"st1 { v13.4s }, [x25], #0x10\n"
"st1 { v16.4s }, [x24], #0x10\n"
@@ -2600,19 +2600,19 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"st1 { v24.4s }, [x22], #0x10\n"
"st1 { v25.4s }, [x22], #0x10\n"
"tbz x8, #2, 157f\n"
- "st1 { v10.4s }, [x16], #0x10\n"
+ "st1 { v10.4s }, [x15], #0x10\n"
"st1 { v14.4s }, [x25], #0x10\n"
"st1 { v18.4s }, [x24], #0x10\n"
"st1 { v22.4s }, [x23], #0x10\n"
"st1 { v26.4s }, [x22], #0x10\n"
"tbz x8, #1, 156f\n"
- "str d11, [x16], #0x8\n"
+ "str d11, [x15], #0x8\n"
"str d15, [x25], #0x8\n"
"str d19, [x24], #0x8\n"
"str d23, [x23], #0x8\n"
"str d27, [x22], #0x8\n"
"tbz x8, #0, 163f\n"
- "st1 { v11.s }[2], [x16]\n"
+ "st1 { v11.s }[2], [x15]\n"
"st1 { v15.s }[2], [x25]\n"
"st1 { v19.s }[2], [x24]\n"
"st1 { v23.s }[2], [x23]\n"
@@ -2620,7 +2620,7 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"b 163f\n"
"156:" // Height 5: Partial direct writeback: partial_1_12
"tbz x8, #0, 163f\n"
- "str s11, [x16, #0x0]\n"
+ "str s11, [x15, #0x0]\n"
"str s15, [x25, #0x0]\n"
"str s19, [x24, #0x0]\n"
"str s23, [x23, #0x0]\n"
@@ -2628,13 +2628,13 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"b 163f\n"
"157:" // Height 5: Partial direct writeback: partial_2_8
"tbz x8, #1, 158f\n"
- "str d10, [x16], #0x8\n"
+ "str d10, [x15], #0x8\n"
"str d14, [x25], #0x8\n"
"str d18, [x24], #0x8\n"
"str d22, [x23], #0x8\n"
"str d26, [x22], #0x8\n"
"tbz x8, #0, 163f\n"
- "st1 { v10.s }[2], [x16]\n"
+ "st1 { v10.s }[2], [x15]\n"
"st1 { v14.s }[2], [x25]\n"
"st1 { v18.s }[2], [x24]\n"
"st1 { v22.s }[2], [x23]\n"
@@ -2642,7 +2642,7 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"b 163f\n"
"158:" // Height 5: Partial direct writeback: partial_1_8
"tbz x8, #0, 163f\n"
- "str s10, [x16, #0x0]\n"
+ "str s10, [x15, #0x0]\n"
"str s14, [x25, #0x0]\n"
"str s18, [x24, #0x0]\n"
"str s22, [x23, #0x0]\n"
@@ -2650,19 +2650,19 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"b 163f\n"
"159:" // Height 5: Partial direct writeback: partial_4_0
"tbz x8, #2, 161f\n"
- "st1 { v8.4s }, [x16], #0x10\n"
+ "st1 { v8.4s }, [x15], #0x10\n"
"st1 { v12.4s }, [x25], #0x10\n"
"st1 { v16.4s }, [x24], #0x10\n"
"st1 { v20.4s }, [x23], #0x10\n"
"st1 { v24.4s }, [x22], #0x10\n"
"tbz x8, #1, 160f\n"
- "str d9, [x16], #0x8\n"
+ "str d9, [x15], #0x8\n"
"str d13, [x25], #0x8\n"
"str d17, [x24], #0x8\n"
"str d21, [x23], #0x8\n"
"str d25, [x22], #0x8\n"
"tbz x8, #0, 163f\n"
- "st1 { v9.s }[2], [x16]\n"
+ "st1 { v9.s }[2], [x15]\n"
"st1 { v13.s }[2], [x25]\n"
"st1 { v17.s }[2], [x24]\n"
"st1 { v21.s }[2], [x23]\n"
@@ -2670,7 +2670,7 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"b 163f\n"
"160:" // Height 5: Partial direct writeback: partial_1_4
"tbz x8, #0, 163f\n"
- "str s9, [x16, #0x0]\n"
+ "str s9, [x15, #0x0]\n"
"str s13, [x25, #0x0]\n"
"str s17, [x24, #0x0]\n"
"str s21, [x23, #0x0]\n"
@@ -2678,20 +2678,20 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"b 163f\n"
"161:" // Height 5: Partial direct writeback: partial_2_0
"tbz x8, #1, 162f\n"
- "str d8, [x16], #0x8\n"
+ "str d8, [x15], #0x8\n"
"str d12, [x25], #0x8\n"
"str d16, [x24], #0x8\n"
"str d20, [x23], #0x8\n"
"str d24, [x22], #0x8\n"
"tbz x8, #0, 163f\n"
- "st1 { v8.s }[2], [x16]\n"
+ "st1 { v8.s }[2], [x15]\n"
"st1 { v12.s }[2], [x25]\n"
"st1 { v16.s }[2], [x24]\n"
"st1 { v20.s }[2], [x23]\n"
"st1 { v24.s }[2], [x22]\n"
"b 163f\n"
"162:" // Height 5: Partial direct writeback: partial_1_0
- "str s8, [x16, #0x0]\n"
+ "str s8, [x15, #0x0]\n"
"str s12, [x25, #0x0]\n"
"str s16, [x24, #0x0]\n"
"str s20, [x23, #0x0]\n"
@@ -2699,11 +2699,11 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"163:" // Height 5: Partial direct writeback: Done
"b 165f\n"
"164:" // Height 5: Full writeback
- "str q8, [x16, #0x0]\n"
- "str q9, [x16, #0x10]\n"
- "str q10, [x16, #0x20]\n"
- "str q11, [x16, #0x30]\n"
- "add x16, x16, #0x40\n"
+ "str q8, [x15, #0x0]\n"
+ "str q9, [x15, #0x10]\n"
+ "str q10, [x15, #0x20]\n"
+ "str q11, [x15, #0x30]\n"
+ "add x15, x15, #0x40\n"
"str q12, [x25, #0x0]\n"
"str q13, [x25, #0x10]\n"
"str q14, [x25, #0x20]\n"
@@ -2725,25 +2725,25 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"bgt 134b\n"
"b 200f\n"
"166:" // Height 6
- "ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "mov x20, #0x18\n"
- "mov x7, %x[bias]\n"
"ldr x8, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x16, %x[bias]\n"
"ldr x17, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x16, %x[output_ptr]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
+ "mov x15, %x[output_ptr]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "mov x19, #0x18\n"
+ "madd %x[output_ptr], x20, x19, %x[output_ptr]\n"
"167:" // Height 6: Column loop
- "cbz x7, 168f\n"
- "ldr q8, [x7, #0x0]\n"
+ "cbz x16, 168f\n"
+ "ldr q8, [x16, #0x0]\n"
+ "ldr q9, [x16, #0x10]\n"
+ "ldr q10, [x16, #0x20]\n"
"mov v12.16b, v8.16b\n"
- "ldr q9, [x7, #0x10]\n"
+ "ldr q11, [x16, #0x30]\n"
"mov v13.16b, v9.16b\n"
- "ldr q10, [x7, #0x20]\n"
+ "add x16, x16, #0x40\n"
"mov v14.16b, v10.16b\n"
- "ldr q11, [x7, #0x30]\n"
"mov v15.16b, v11.16b\n"
"mov v16.16b, v8.16b\n"
- "add x7, x7, #0x40\n"
"mov v17.16b, v9.16b\n"
"mov v18.16b, v10.16b\n"
"mov v19.16b, v11.16b\n"
@@ -2762,44 +2762,44 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"b 179f\n"
"168:" // Height 6: no bias
"tbz %x[flags], #0, 178f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x16, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"cmp x8, #0x10\n"
- "add x21, x22, x20, LSL #2\n"
+ "add x25, x15, x19, LSL #2\n"
+ "add x24, x25, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
"bge 177f\n"
"tbz x8, #3, 172f\n"
- "ld1 { v8.4s }, [x16], #0x10\n"
+ "ld1 { v8.4s }, [x15], #0x10\n"
"ld1 { v12.4s }, [x25], #0x10\n"
"ld1 { v16.4s }, [x24], #0x10\n"
+ "ld1 { v9.4s }, [x15], #0x10\n"
+ "ld1 { v13.4s }, [x25], #0x10\n"
+ "ld1 { v17.4s }, [x24], #0x10\n"
"ld1 { v20.4s }, [x23], #0x10\n"
"ld1 { v24.4s }, [x22], #0x10\n"
"ld1 { v28.4s }, [x21], #0x10\n"
- "ld1 { v9.4s }, [x16], #0x10\n"
- "ld1 { v13.4s }, [x25], #0x10\n"
- "ld1 { v17.4s }, [x24], #0x10\n"
"ld1 { v21.4s }, [x23], #0x10\n"
"ld1 { v25.4s }, [x22], #0x10\n"
"ld1 { v29.4s }, [x21], #0x10\n"
"tbz x8, #2, 170f\n"
- "ld1 { v10.4s }, [x16], #0x10\n"
+ "ld1 { v10.4s }, [x15], #0x10\n"
"ld1 { v14.4s }, [x25], #0x10\n"
"ld1 { v18.4s }, [x24], #0x10\n"
"ld1 { v22.4s }, [x23], #0x10\n"
"ld1 { v26.4s }, [x22], #0x10\n"
"ld1 { v30.4s }, [x21], #0x10\n"
"tbz x8, #1, 169f\n"
- "ldr d11, [x16], #0x8\n"
- "mov x20, #0x38\n"
+ "ldr d11, [x15], #0x8\n"
+ "mov x19, #0x38\n"
"ldr d15, [x25], #0x8\n"
"ldr d19, [x24], #0x8\n"
"ldr d23, [x23], #0x8\n"
"ldr d27, [x22], #0x8\n"
"ldr d31, [x21], #0x8\n"
"tbz x8, #0, 176f\n"
- "ld1 { v11.s }[2], [x16]\n"
+ "ld1 { v11.s }[2], [x15]\n"
"ld1 { v15.s }[2], [x25]\n"
"ld1 { v19.s }[2], [x24]\n"
"ld1 { v23.s }[2], [x23]\n"
@@ -2807,9 +2807,9 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"ld1 { v31.s }[2], [x21]\n"
"b 176f\n"
"169:" // Height 6: Partial accumulate: partial_1_12
- "mov x20, #0x30\n"
+ "mov x19, #0x30\n"
"tbz x8, #0, 176f\n"
- "ldr s11, [x16, #0x0]\n"
+ "ldr s11, [x15, #0x0]\n"
"ldr s15, [x25, #0x0]\n"
"ldr s19, [x24, #0x0]\n"
"ldr s23, [x23, #0x0]\n"
@@ -2818,15 +2818,15 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"b 176f\n"
"170:" // Height 6: Partial accumulate: partial_2_8
"tbz x8, #1, 171f\n"
- "ldr d10, [x16], #0x8\n"
- "mov x20, #0x28\n"
+ "ldr d10, [x15], #0x8\n"
"ldr d14, [x25], #0x8\n"
+ "mov x19, #0x28\n"
"ldr d18, [x24], #0x8\n"
"ldr d22, [x23], #0x8\n"
"ldr d26, [x22], #0x8\n"
"ldr d30, [x21], #0x8\n"
"tbz x8, #0, 176f\n"
- "ld1 { v10.s }[2], [x16]\n"
+ "ld1 { v10.s }[2], [x15]\n"
"ld1 { v14.s }[2], [x25]\n"
"ld1 { v18.s }[2], [x24]\n"
"ld1 { v22.s }[2], [x23]\n"
@@ -2834,9 +2834,9 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"ld1 { v30.s }[2], [x21]\n"
"b 176f\n"
"171:" // Height 6: Partial accumulate: partial_1_8
- "mov x20, #0x20\n"
+ "mov x19, #0x20\n"
"tbz x8, #0, 176f\n"
- "ldr s10, [x16, #0x0]\n"
+ "ldr s10, [x15, #0x0]\n"
"ldr s14, [x25, #0x0]\n"
"ldr s18, [x24, #0x0]\n"
"ldr s22, [x23, #0x0]\n"
@@ -2845,22 +2845,22 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"b 176f\n"
"172:" // Height 6: Partial accumulate: partial_4_0
"tbz x8, #2, 174f\n"
- "ld1 { v8.4s }, [x16], #0x10\n"
+ "ld1 { v8.4s }, [x15], #0x10\n"
"ld1 { v12.4s }, [x25], #0x10\n"
"ld1 { v16.4s }, [x24], #0x10\n"
"ld1 { v20.4s }, [x23], #0x10\n"
"ld1 { v24.4s }, [x22], #0x10\n"
"ld1 { v28.4s }, [x21], #0x10\n"
"tbz x8, #1, 173f\n"
- "ldr d9, [x16], #0x8\n"
- "mov x20, #0x18\n"
+ "ldr d9, [x15], #0x8\n"
+ "mov x19, #0x18\n"
"ldr d13, [x25], #0x8\n"
"ldr d17, [x24], #0x8\n"
"ldr d21, [x23], #0x8\n"
"ldr d25, [x22], #0x8\n"
"ldr d29, [x21], #0x8\n"
"tbz x8, #0, 176f\n"
- "ld1 { v9.s }[2], [x16]\n"
+ "ld1 { v9.s }[2], [x15]\n"
"ld1 { v13.s }[2], [x25]\n"
"ld1 { v17.s }[2], [x24]\n"
"ld1 { v21.s }[2], [x23]\n"
@@ -2868,9 +2868,9 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"ld1 { v29.s }[2], [x21]\n"
"b 176f\n"
"173:" // Height 6: Partial accumulate: partial_1_4
- "mov x20, #0x10\n"
+ "mov x19, #0x10\n"
"tbz x8, #0, 176f\n"
- "ldr s9, [x16, #0x0]\n"
+ "ldr s9, [x15, #0x0]\n"
"ldr s13, [x25, #0x0]\n"
"ldr s17, [x24, #0x0]\n"
"ldr s21, [x23, #0x0]\n"
@@ -2879,15 +2879,15 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"b 176f\n"
"174:" // Height 6: Partial accumulate: partial_2_0
"tbz x8, #1, 175f\n"
- "ldr d8, [x16], #0x8\n"
- "mov x20, #0x8\n"
+ "ldr d8, [x15], #0x8\n"
"ldr d12, [x25], #0x8\n"
+ "mov x19, #0x8\n"
"ldr d16, [x24], #0x8\n"
"ldr d20, [x23], #0x8\n"
"ldr d24, [x22], #0x8\n"
"ldr d28, [x21], #0x8\n"
"tbz x8, #0, 176f\n"
- "ld1 { v8.s }[2], [x16]\n"
+ "ld1 { v8.s }[2], [x15]\n"
"ld1 { v12.s }[2], [x25]\n"
"ld1 { v16.s }[2], [x24]\n"
"ld1 { v20.s }[2], [x23]\n"
@@ -2895,21 +2895,21 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"ld1 { v28.s }[2], [x21]\n"
"b 176f\n"
"175:" // Height 6: Partial accumulate: partial_1_0
- "ldr s8, [x16, #0x0]\n"
- "mov x20, #0x0\n"
+ "ldr s8, [x15, #0x0]\n"
+ "mov x19, #0x0\n"
"ldr s12, [x25, #0x0]\n"
"ldr s16, [x24, #0x0]\n"
"ldr s20, [x23, #0x0]\n"
"ldr s24, [x22, #0x0]\n"
"ldr s28, [x21, #0x0]\n"
"176:" // Height 6: Partial accumulate: Done
- "sub x16, x16, x20\n"
+ "sub x15, x15, x19\n"
"b 179f\n"
"177:" // Height 6: full accumulate
- "ldr q8, [x16, #0x0]\n"
- "ldr q9, [x16, #0x10]\n"
- "ldr q10, [x16, #0x20]\n"
- "ldr q11, [x16, #0x30]\n"
+ "ldr q8, [x15, #0x0]\n"
+ "ldr q9, [x15, #0x10]\n"
+ "ldr q10, [x15, #0x20]\n"
+ "ldr q11, [x15, #0x30]\n"
"ldr q12, [x25, #0x0]\n"
"ldr q13, [x25, #0x10]\n"
"ldr q14, [x25, #0x20]\n"
@@ -2957,260 +2957,260 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"movi v30.16b, #0x0\n"
"movi v31.16b, #0x0\n"
"179:" // Height 6: setup done
- "mov x15, #0x0\n"
+ "mov x14, #0x0\n"
"180:" // Height 6: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w14, [x20, x15, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w13, [x20, x14, LSL #0x2]\n"
"tbz %x[flags], #3, 181f\n"
- "ldr x21, [%x[input_ptr], x15, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x13, [x21, #0x0]\n"
- "ldr x9, [x21, #0x8]\n"
- "ldr x27, [x21, #0x10]\n"
- "ldr x25, [x21, #0x18]\n"
- "ldr x23, [x21, #0x20]\n"
- "ldr x21, [x21, #0x28]\n"
- "cbnz x15, 182f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x13, x13, x20, LSL #2\n"
- "add x9, x9, x20, LSL #2\n"
- "add x27, x27, x20, LSL #2\n"
- "add x25, x25, x20, LSL #2\n"
- "add x23, x23, x20, LSL #2\n"
- "add x21, x21, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x14, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x12, [x20, #0x0]\n"
+ "ldr x28, [x20, #0x8]\n"
+ "ldr x26, [x20, #0x10]\n"
+ "ldr x24, [x20, #0x18]\n"
+ "ldr x22, [x20, #0x20]\n"
+ "ldr x20, [x20, #0x28]\n"
+ "cbnz x14, 182f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x12, x12, x19, LSL #2\n"
+ "add x28, x28, x19, LSL #2\n"
+ "add x26, x26, x19, LSL #2\n"
+ "add x24, x24, x19, LSL #2\n"
+ "add x22, x22, x19, LSL #2\n"
+ "add x20, x20, x19, LSL #2\n"
"b 182f\n"
"181:" // Height 6: setup direct input
- "mov x13, %x[input_ptr]\n"
- "add x9, x13, x20, LSL #2\n"
- "add x27, x9, x20, LSL #2\n"
- "add x25, x27, x20, LSL #2\n"
- "add x23, x25, x20, LSL #2\n"
- "add x21, x23, x20, LSL #2\n"
+ "mov x12, %x[input_ptr]\n"
+ "add x28, x12, x19, LSL #2\n"
+ "add x26, x28, x19, LSL #2\n"
+ "add x24, x26, x19, LSL #2\n"
+ "add x22, x24, x19, LSL #2\n"
+ "add x20, x22, x19, LSL #2\n"
"182:" // Height 6: input setup done
- "cmp x14, #0x4\n"
+ "cmp x13, #0x4\n"
"blt 185f\n"
- "ldr q0, [x13, #0x0]\n"
- "cmp x14, #0x8\n"
- "ldr q1, [x9, #0x0]\n"
- "ldr q2, [x27, #0x0]\n"
- "ldr q3, [x25, #0x0]\n"
- "ldr q4, [x23, #0x0]\n"
- "ldr q5, [x21, #0x0]\n"
+ "ldr q0, [x12, #0x0]\n"
+ "ldr q1, [x28, #0x0]\n"
+ "cmp x13, #0x8\n"
+ "ldr q2, [x26, #0x0]\n"
+ "ldr q3, [x24, #0x0]\n"
+ "ldr q4, [x22, #0x0]\n"
+ "ldr q5, [x20, #0x0]\n"
"ldr q6, [x17, #0x0]\n"
- "ldr q7, [x17, #0x10]\n"
"blt 184f\n"
"183:" // Height 6: Multiply loop: Main loop head
"fmla v8.4s, v6.4s, v0.s[0]\n"
- "ldr x12, [x17, #0x28]\n"
+ "ldr d7, [x17, #0x10]\n"
"fmla v12.4s, v6.4s, v1.s[0]\n"
- "ldr x11, [x17, #0x38]\n"
+ "ldr x11, [x17, #0x18]\n"
"fmla v16.4s, v6.4s, v2.s[0]\n"
- "add x13, x13, #0x10\n"
+ "ldr x10, [x17, #0x28]\n"
"fmla v20.4s, v6.4s, v3.s[0]\n"
- "add x9, x9, #0x10\n"
+ "add x12, x12, #0x10\n"
"fmla v24.4s, v6.4s, v4.s[0]\n"
- "add x27, x27, #0x10\n"
+ "mov v7.d[1], x11\n"
"fmla v28.4s, v6.4s, v5.s[0]\n"
- "ldr d6, [x17, #0x20]\n"
+ "prfm pldl1keep, [x12, #0x80]\n"
"fmla v9.4s, v7.4s, v0.s[0]\n"
- "mov v6.d[1], x12\n"
+ "ldr d6, [x17, #0x20]\n"
"fmla v13.4s, v7.4s, v1.s[0]\n"
- "ldr x12, [x17, #0x48]\n"
+ "ldr x11, [x17, #0x38]\n"
"fmla v17.4s, v7.4s, v2.s[0]\n"
- "add x25, x25, #0x10\n"
+ "ldr x9, [x12, #0x8]\n"
"fmla v21.4s, v7.4s, v3.s[0]\n"
- "add x23, x23, #0x10\n"
+ "mov v6.d[1], x10\n"
"fmla v25.4s, v7.4s, v4.s[0]\n"
- "add x21, x21, #0x10\n"
+ "ldr x10, [x17, #0x48]\n"
"fmla v29.4s, v7.4s, v5.s[0]\n"
"ldr d7, [x17, #0x30]\n"
- "mov v7.d[1], x11\n"
"fmla v10.4s, v6.4s, v0.s[0]\n"
+ "add x28, x28, #0x10\n"
"fmla v14.4s, v6.4s, v1.s[0]\n"
- "ldr x11, [x17, #0x58]\n"
+ "prfm pldl1keep, [x28, #0x80]\n"
"fmla v18.4s, v6.4s, v2.s[0]\n"
- "ldr x10, [x13, #0x8]\n"
+ "mov v7.d[1], x11\n"
"fmla v22.4s, v6.4s, v3.s[0]\n"
- "ldr x28, [x9, #0x8]\n"
+ "ldr x11, [x17, #0x58]\n"
"fmla v26.4s, v6.4s, v4.s[0]\n"
- "ldr x26, [x27, #0x8]\n"
+ "ldr x27, [x28, #0x8]\n"
"fmla v30.4s, v6.4s, v5.s[0]\n"
"ldr d6, [x17, #0x40]\n"
"fmla v11.4s, v7.4s, v0.s[0]\n"
- "mov v6.d[1], x12\n"
+ "add x26, x26, #0x10\n"
"fmla v15.4s, v7.4s, v1.s[0]\n"
- "ldr x12, [x17, #0x68]\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
"fmla v19.4s, v7.4s, v2.s[0]\n"
- "ldr x24, [x25, #0x8]\n"
+ "mov v6.d[1], x10\n"
"fmla v23.4s, v7.4s, v3.s[0]\n"
- "ldr x22, [x23, #0x8]\n"
+ "ldr x10, [x17, #0x68]\n"
"fmla v27.4s, v7.4s, v4.s[0]\n"
- "ldr x20, [x21, #0x8]\n"
+ "ldr x25, [x26, #0x8]\n"
"fmla v31.4s, v7.4s, v5.s[0]\n"
"ldr d7, [x17, #0x50]\n"
- "mov v7.d[1], x11\n"
"fmla v8.4s, v6.4s, v0.s[1]\n"
+ "add x24, x24, #0x10\n"
"fmla v12.4s, v6.4s, v1.s[1]\n"
- "ldr x11, [x17, #0x78]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
"fmla v16.4s, v6.4s, v2.s[1]\n"
- "sub x14, x14, #0x4\n"
+ "mov v7.d[1], x11\n"
"fmla v20.4s, v6.4s, v3.s[1]\n"
- "cmp x14, #0x8\n"
+ "ldr x11, [x17, #0x78]\n"
"fmla v24.4s, v6.4s, v4.s[1]\n"
- "prfm pldl1keep, [x13, #0x80]\n"
+ "ldr x23, [x24, #0x8]\n"
"fmla v28.4s, v6.4s, v5.s[1]\n"
"ldr d6, [x17, #0x60]\n"
"fmla v9.4s, v7.4s, v0.s[1]\n"
- "mov v6.d[1], x12\n"
+ "add x22, x22, #0x10\n"
"fmla v13.4s, v7.4s, v1.s[1]\n"
- "ldr x12, [x17, #0x88]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
"fmla v17.4s, v7.4s, v2.s[1]\n"
- "prfm pldl1keep, [x9, #0x80]\n"
+ "mov v6.d[1], x10\n"
"fmla v21.4s, v7.4s, v3.s[1]\n"
- "prfm pldl1keep, [x27, #0x80]\n"
+ "ldr x10, [x17, #0x88]\n"
"fmla v25.4s, v7.4s, v4.s[1]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
+ "ldr x21, [x22, #0x8]\n"
"fmla v29.4s, v7.4s, v5.s[1]\n"
"ldr d7, [x17, #0x70]\n"
- "mov v7.d[1], x11\n"
"fmla v10.4s, v6.4s, v0.s[1]\n"
+ "add x20, x20, #0x10\n"
"fmla v14.4s, v6.4s, v1.s[1]\n"
- "ldr x11, [x17, #0x98]\n"
+ "prfm pldl1keep, [x20, #0x80]\n"
"fmla v18.4s, v6.4s, v2.s[1]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
+ "mov v7.d[1], x11\n"
"fmla v22.4s, v6.4s, v3.s[1]\n"
- "prfm pldl1keep, [x21, #0x80]\n"
+ "ldr x11, [x17, #0x98]\n"
"fmla v26.4s, v6.4s, v4.s[1]\n"
+ "ldr x19, [x20, #0x8]\n"
"fmla v30.4s, v6.4s, v5.s[1]\n"
"ldr d6, [x17, #0x80]\n"
"fmla v11.4s, v7.4s, v0.s[1]\n"
- "mov v6.d[1], x12\n"
+ "sub x13, x13, #0x4\n"
"fmla v15.4s, v7.4s, v1.s[1]\n"
- "ldr x12, [x17, #0xa8]\n"
+ "cmp x13, #0x8\n"
"fmla v19.4s, v7.4s, v2.s[1]\n"
+ "mov v6.d[1], x10\n"
"fmla v23.4s, v7.4s, v3.s[1]\n"
+ "ldr x10, [x17, #0xa8]\n"
"fmla v27.4s, v7.4s, v4.s[1]\n"
"fmla v31.4s, v7.4s, v5.s[1]\n"
"ldr d7, [x17, #0x90]\n"
- "mov v7.d[1], x11\n"
"fmla v8.4s, v6.4s, v0.s[2]\n"
"fmla v12.4s, v6.4s, v1.s[2]\n"
- "ldr x11, [x17, #0xb8]\n"
"fmla v16.4s, v6.4s, v2.s[2]\n"
+ "mov v7.d[1], x11\n"
"fmla v20.4s, v6.4s, v3.s[2]\n"
+ "ldr x11, [x17, #0xb8]\n"
"fmla v24.4s, v6.4s, v4.s[2]\n"
"fmla v28.4s, v6.4s, v5.s[2]\n"
"ldr d6, [x17, #0xa0]\n"
"fmla v9.4s, v7.4s, v0.s[2]\n"
- "mov v6.d[1], x12\n"
"fmla v13.4s, v7.4s, v1.s[2]\n"
- "ldr x12, [x17, #0xc8]\n"
"fmla v17.4s, v7.4s, v2.s[2]\n"
+ "mov v6.d[1], x10\n"
"fmla v21.4s, v7.4s, v3.s[2]\n"
+ "ldr x10, [x17, #0xc8]\n"
"fmla v25.4s, v7.4s, v4.s[2]\n"
"fmla v29.4s, v7.4s, v5.s[2]\n"
"ldr d7, [x17, #0xb0]\n"
- "mov v7.d[1], x11\n"
"fmla v10.4s, v6.4s, v0.s[2]\n"
"fmla v14.4s, v6.4s, v1.s[2]\n"
- "ldr x11, [x17, #0xd8]\n"
"fmla v18.4s, v6.4s, v2.s[2]\n"
+ "mov v7.d[1], x11\n"
"fmla v22.4s, v6.4s, v3.s[2]\n"
+ "ldr x11, [x17, #0xd8]\n"
"fmla v26.4s, v6.4s, v4.s[2]\n"
"fmla v30.4s, v6.4s, v5.s[2]\n"
"ldr d6, [x17, #0xc0]\n"
"fmla v11.4s, v7.4s, v0.s[2]\n"
- "mov v6.d[1], x12\n"
"fmla v15.4s, v7.4s, v1.s[2]\n"
- "ldr x12, [x17, #0xe8]\n"
"fmla v19.4s, v7.4s, v2.s[2]\n"
+ "mov v6.d[1], x10\n"
"fmla v23.4s, v7.4s, v3.s[2]\n"
+ "ldr x10, [x17, #0xe8]\n"
"fmla v27.4s, v7.4s, v4.s[2]\n"
"fmla v31.4s, v7.4s, v5.s[2]\n"
"ldr d7, [x17, #0xd0]\n"
- "mov v7.d[1], x11\n"
"fmla v8.4s, v6.4s, v0.s[3]\n"
"fmla v12.4s, v6.4s, v1.s[3]\n"
- "ldr x11, [x17, #0xf8]\n"
"fmla v16.4s, v6.4s, v2.s[3]\n"
+ "mov v7.d[1], x11\n"
"fmla v20.4s, v6.4s, v3.s[3]\n"
+ "ldr x11, [x17, #0xf8]\n"
"fmla v24.4s, v6.4s, v4.s[3]\n"
"fmla v28.4s, v6.4s, v5.s[3]\n"
"ldr d6, [x17, #0xe0]\n"
"fmla v9.4s, v7.4s, v0.s[3]\n"
- "mov v6.d[1], x12\n"
"fmla v13.4s, v7.4s, v1.s[3]\n"
"fmla v17.4s, v7.4s, v2.s[3]\n"
+ "mov v6.d[1], x10\n"
"fmla v21.4s, v7.4s, v3.s[3]\n"
"fmla v25.4s, v7.4s, v4.s[3]\n"
"fmla v29.4s, v7.4s, v5.s[3]\n"
"ldr d7, [x17, #0xf0]\n"
- "mov v7.d[1], x11\n"
- "add x17, x17, #0x100\n"
"fmla v10.4s, v6.4s, v0.s[3]\n"
- "ldr x12, [x17, #0x8]\n"
+ "add x17, x17, #0x100\n"
"fmla v14.4s, v6.4s, v1.s[3]\n"
- "ldr x11, [x17, #0x18]\n"
+ "ldr x10, [x17, #0x8]\n"
"fmla v18.4s, v6.4s, v2.s[3]\n"
+ "mov v7.d[1], x11\n"
"fmla v22.4s, v6.4s, v3.s[3]\n"
"fmla v26.4s, v6.4s, v4.s[3]\n"
"fmla v30.4s, v6.4s, v5.s[3]\n"
"ldr d6, [x17, #0x0]\n"
"fmla v11.4s, v7.4s, v0.s[3]\n"
- "ldr d0, [x13, #0x0]\n"
+ "ldr d0, [x12, #0x0]\n"
"fmla v15.4s, v7.4s, v1.s[3]\n"
- "ldr d1, [x9, #0x0]\n"
+ "ldr d1, [x28, #0x0]\n"
"fmla v19.4s, v7.4s, v2.s[3]\n"
- "ldr d2, [x27, #0x0]\n"
+ "mov v6.d[1], x10\n"
"fmla v23.4s, v7.4s, v3.s[3]\n"
- "ldr d3, [x25, #0x0]\n"
+ "mov v0.d[1], x9\n"
"fmla v27.4s, v7.4s, v4.s[3]\n"
- "ldr d4, [x23, #0x0]\n"
+ "mov v1.d[1], x27\n"
"fmla v31.4s, v7.4s, v5.s[3]\n"
- "ldr d5, [x21, #0x0]\n"
- "ldr d7, [x17, #0x10]\n"
- "mov v6.d[1], x12\n"
- "mov v0.d[1], x10\n"
- "mov v1.d[1], x28\n"
- "mov v2.d[1], x26\n"
- "mov v3.d[1], x24\n"
- "mov v4.d[1], x22\n"
- "mov v5.d[1], x20\n"
- "mov v7.d[1], x11\n"
+ "ldr d2, [x26, #0x0]\n"
+ "ldr d3, [x24, #0x0]\n"
+ "ldr d4, [x22, #0x0]\n"
+ "mov v2.d[1], x25\n"
+ "ldr d5, [x20, #0x0]\n"
+ "mov v3.d[1], x23\n"
+ "mov v4.d[1], x21\n"
+ "mov v5.d[1], x19\n"
"bge 183b\n"
"184:" // Height 6: Multiply loop: Single iteration only
"fmla v8.4s, v6.4s, v0.s[0]\n"
- "add x13, x13, #0x10\n"
+ "ldr q7, [x17, #0x10]\n"
"fmla v12.4s, v6.4s, v1.s[0]\n"
- "add x9, x9, #0x10\n"
+ "sub x13, x13, #0x4\n"
"fmla v16.4s, v6.4s, v2.s[0]\n"
- "add x27, x27, #0x10\n"
+ "add x12, x12, #0x10\n"
"fmla v20.4s, v6.4s, v3.s[0]\n"
- "add x25, x25, #0x10\n"
+ "prfm pldl1keep, [x12, #0x80]\n"
"fmla v24.4s, v6.4s, v4.s[0]\n"
- "add x23, x23, #0x10\n"
+ "add x28, x28, #0x10\n"
"fmla v28.4s, v6.4s, v5.s[0]\n"
- "ldr q6, [x17, #0x20]\n"
+ "prfm pldl1keep, [x28, #0x80]\n"
"fmla v9.4s, v7.4s, v0.s[0]\n"
- "add x21, x21, #0x10\n"
+ "ldr q6, [x17, #0x20]\n"
"fmla v13.4s, v7.4s, v1.s[0]\n"
- "sub x14, x14, #0x4\n"
+ "add x26, x26, #0x10\n"
"fmla v17.4s, v7.4s, v2.s[0]\n"
- "prfm pldl1keep, [x13, #0x80]\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
"fmla v21.4s, v7.4s, v3.s[0]\n"
- "prfm pldl1keep, [x9, #0x80]\n"
+ "add x24, x24, #0x10\n"
"fmla v25.4s, v7.4s, v4.s[0]\n"
- "prfm pldl1keep, [x27, #0x80]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
"fmla v29.4s, v7.4s, v5.s[0]\n"
"ldr q7, [x17, #0x30]\n"
"fmla v10.4s, v6.4s, v0.s[0]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
+ "add x22, x22, #0x10\n"
"fmla v14.4s, v6.4s, v1.s[0]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
"fmla v18.4s, v6.4s, v2.s[0]\n"
- "prfm pldl1keep, [x21, #0x80]\n"
+ "add x20, x20, #0x10\n"
"fmla v22.4s, v6.4s, v3.s[0]\n"
+ "prfm pldl1keep, [x20, #0x80]\n"
"fmla v26.4s, v6.4s, v4.s[0]\n"
"fmla v30.4s, v6.4s, v5.s[0]\n"
"ldr q6, [x17, #0x40]\n"
@@ -3305,18 +3305,18 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"fmla v27.4s, v7.4s, v4.s[3]\n"
"fmla v31.4s, v7.4s, v5.s[3]\n"
"185:" // Height 6: Multiply loop: Main loop skip
- "cbz x14, 187f\n"
+ "cbz x13, 187f\n"
"186:" // Height 6: Multiply loop: Odd block loop
- "ldr s0, [x13], #0x4\n"
- "sub x14, x14, #0x1\n"
- "ldr s1, [x9], #0x4\n"
- "ldr s2, [x27], #0x4\n"
- "ldr s3, [x25], #0x4\n"
- "ldr s4, [x23], #0x4\n"
- "ldr s5, [x21], #0x4\n"
+ "ldr s0, [x12], #0x4\n"
+ "sub x13, x13, #0x1\n"
+ "ldr s1, [x28], #0x4\n"
+ "ldr s2, [x26], #0x4\n"
+ "ldr s3, [x24], #0x4\n"
+ "ldr s4, [x22], #0x4\n"
+ "ldr s5, [x20], #0x4\n"
"ldr q6, [x17, #0x0]\n"
- "fmla v8.4s, v6.4s, v0.s[0]\n"
"ldr q7, [x17, #0x10]\n"
+ "fmla v8.4s, v6.4s, v0.s[0]\n"
"fmla v12.4s, v6.4s, v1.s[0]\n"
"fmla v16.4s, v6.4s, v2.s[0]\n"
"fmla v20.4s, v6.4s, v3.s[0]\n"
@@ -3343,27 +3343,29 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"fmla v23.4s, v7.4s, v3.s[0]\n"
"fmla v27.4s, v7.4s, v4.s[0]\n"
"fmla v31.4s, v7.4s, v5.s[0]\n"
- "cbnz x14, 186b\n"
+ "cbnz x13, 186b\n"
"187:" // Height 6: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x15, x15, #0x1\n"
- "cmp x15, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x14, x14, #0x1\n"
+ "cmp x14, x19\n"
"bne 180b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x16, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
- "prfm pstl1keep, [x16, #0x0]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "prfm pstl1keep, [x15, #0x0]\n"
+ "add x25, x15, x19, LSL #2\n"
"prfm pstl1keep, [x25, #0x0]\n"
+ "add x24, x25, x19, LSL #2\n"
"prfm pstl1keep, [x24, #0x0]\n"
+ "add x23, x24, x19, LSL #2\n"
"prfm pstl1keep, [x23, #0x0]\n"
+ "add x22, x23, x19, LSL #2\n"
"prfm pstl1keep, [x22, #0x0]\n"
+ "add x21, x22, x19, LSL #2\n"
"prfm pstl1keep, [x21, #0x0]\n"
"tbz %x[flags], #1, 188f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v0.4s }, [x20]\n"
+ "add x20, %x[args_ptr], %[offset_min]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v1.4s }, [x20]\n"
+ "ld1r { v0.4s }, [x19]\n"
"fmin v8.4s, v8.4s, v0.4s\n"
"fmin v9.4s, v9.4s, v0.4s\n"
"fmin v10.4s, v10.4s, v0.4s\n"
@@ -3374,6 +3376,16 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"fmin v15.4s, v15.4s, v0.4s\n"
"fmin v16.4s, v16.4s, v0.4s\n"
"fmin v17.4s, v17.4s, v0.4s\n"
+ "fmax v8.4s, v8.4s, v1.4s\n"
+ "fmax v9.4s, v9.4s, v1.4s\n"
+ "fmax v10.4s, v10.4s, v1.4s\n"
+ "fmax v11.4s, v11.4s, v1.4s\n"
+ "fmax v12.4s, v12.4s, v1.4s\n"
+ "fmax v13.4s, v13.4s, v1.4s\n"
+ "fmax v14.4s, v14.4s, v1.4s\n"
+ "fmax v15.4s, v15.4s, v1.4s\n"
+ "fmax v16.4s, v16.4s, v1.4s\n"
+ "fmax v17.4s, v17.4s, v1.4s\n"
"fmin v18.4s, v18.4s, v0.4s\n"
"fmin v19.4s, v19.4s, v0.4s\n"
"fmin v20.4s, v20.4s, v0.4s\n"
@@ -3384,42 +3396,30 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"fmin v25.4s, v25.4s, v0.4s\n"
"fmin v26.4s, v26.4s, v0.4s\n"
"fmin v27.4s, v27.4s, v0.4s\n"
+ "fmax v18.4s, v18.4s, v1.4s\n"
+ "fmax v19.4s, v19.4s, v1.4s\n"
+ "fmax v20.4s, v20.4s, v1.4s\n"
+ "fmax v21.4s, v21.4s, v1.4s\n"
+ "fmax v22.4s, v22.4s, v1.4s\n"
+ "fmax v23.4s, v23.4s, v1.4s\n"
+ "fmax v24.4s, v24.4s, v1.4s\n"
+ "fmax v25.4s, v25.4s, v1.4s\n"
+ "fmax v26.4s, v26.4s, v1.4s\n"
+ "fmax v27.4s, v27.4s, v1.4s\n"
"fmin v28.4s, v28.4s, v0.4s\n"
"fmin v29.4s, v29.4s, v0.4s\n"
"fmin v30.4s, v30.4s, v0.4s\n"
"fmin v31.4s, v31.4s, v0.4s\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1r { v0.4s }, [x20]\n"
- "fmax v8.4s, v8.4s, v0.4s\n"
- "fmax v9.4s, v9.4s, v0.4s\n"
- "fmax v10.4s, v10.4s, v0.4s\n"
- "fmax v11.4s, v11.4s, v0.4s\n"
- "fmax v12.4s, v12.4s, v0.4s\n"
- "fmax v13.4s, v13.4s, v0.4s\n"
- "fmax v14.4s, v14.4s, v0.4s\n"
- "fmax v15.4s, v15.4s, v0.4s\n"
- "fmax v16.4s, v16.4s, v0.4s\n"
- "fmax v17.4s, v17.4s, v0.4s\n"
- "fmax v18.4s, v18.4s, v0.4s\n"
- "fmax v19.4s, v19.4s, v0.4s\n"
- "fmax v20.4s, v20.4s, v0.4s\n"
- "fmax v21.4s, v21.4s, v0.4s\n"
- "fmax v22.4s, v22.4s, v0.4s\n"
- "fmax v23.4s, v23.4s, v0.4s\n"
- "fmax v24.4s, v24.4s, v0.4s\n"
- "fmax v25.4s, v25.4s, v0.4s\n"
- "fmax v26.4s, v26.4s, v0.4s\n"
- "fmax v27.4s, v27.4s, v0.4s\n"
- "fmax v28.4s, v28.4s, v0.4s\n"
- "fmax v29.4s, v29.4s, v0.4s\n"
- "fmax v30.4s, v30.4s, v0.4s\n"
- "fmax v31.4s, v31.4s, v0.4s\n"
+ "fmax v28.4s, v28.4s, v1.4s\n"
+ "fmax v29.4s, v29.4s, v1.4s\n"
+ "fmax v30.4s, v30.4s, v1.4s\n"
+ "fmax v31.4s, v31.4s, v1.4s\n"
"188:" // Height 6: No activation
"cmp x8, #0x10\n"
"bge 197f\n"
"tbz x8, #3, 192f\n"
- "st1 { v8.4s }, [x16], #0x10\n"
- "st1 { v9.4s }, [x16], #0x10\n"
+ "st1 { v8.4s }, [x15], #0x10\n"
+ "st1 { v9.4s }, [x15], #0x10\n"
"st1 { v12.4s }, [x25], #0x10\n"
"st1 { v13.4s }, [x25], #0x10\n"
"st1 { v16.4s }, [x24], #0x10\n"
@@ -3431,21 +3431,21 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"st1 { v28.4s }, [x21], #0x10\n"
"st1 { v29.4s }, [x21], #0x10\n"
"tbz x8, #2, 190f\n"
- "st1 { v10.4s }, [x16], #0x10\n"
+ "st1 { v10.4s }, [x15], #0x10\n"
"st1 { v14.4s }, [x25], #0x10\n"
"st1 { v18.4s }, [x24], #0x10\n"
"st1 { v22.4s }, [x23], #0x10\n"
"st1 { v26.4s }, [x22], #0x10\n"
"st1 { v30.4s }, [x21], #0x10\n"
"tbz x8, #1, 189f\n"
- "str d11, [x16], #0x8\n"
+ "str d11, [x15], #0x8\n"
"str d15, [x25], #0x8\n"
"str d19, [x24], #0x8\n"
"str d23, [x23], #0x8\n"
"str d27, [x22], #0x8\n"
"str d31, [x21], #0x8\n"
"tbz x8, #0, 196f\n"
- "st1 { v11.s }[2], [x16]\n"
+ "st1 { v11.s }[2], [x15]\n"
"st1 { v15.s }[2], [x25]\n"
"st1 { v19.s }[2], [x24]\n"
"st1 { v23.s }[2], [x23]\n"
@@ -3454,7 +3454,7 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"b 196f\n"
"189:" // Height 6: Partial direct writeback: partial_1_12
"tbz x8, #0, 196f\n"
- "str s11, [x16, #0x0]\n"
+ "str s11, [x15, #0x0]\n"
"str s15, [x25, #0x0]\n"
"str s19, [x24, #0x0]\n"
"str s23, [x23, #0x0]\n"
@@ -3463,14 +3463,14 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"b 196f\n"
"190:" // Height 6: Partial direct writeback: partial_2_8
"tbz x8, #1, 191f\n"
- "str d10, [x16], #0x8\n"
+ "str d10, [x15], #0x8\n"
"str d14, [x25], #0x8\n"
"str d18, [x24], #0x8\n"
"str d22, [x23], #0x8\n"
"str d26, [x22], #0x8\n"
"str d30, [x21], #0x8\n"
"tbz x8, #0, 196f\n"
- "st1 { v10.s }[2], [x16]\n"
+ "st1 { v10.s }[2], [x15]\n"
"st1 { v14.s }[2], [x25]\n"
"st1 { v18.s }[2], [x24]\n"
"st1 { v22.s }[2], [x23]\n"
@@ -3479,7 +3479,7 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"b 196f\n"
"191:" // Height 6: Partial direct writeback: partial_1_8
"tbz x8, #0, 196f\n"
- "str s10, [x16, #0x0]\n"
+ "str s10, [x15, #0x0]\n"
"str s14, [x25, #0x0]\n"
"str s18, [x24, #0x0]\n"
"str s22, [x23, #0x0]\n"
@@ -3488,21 +3488,21 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"b 196f\n"
"192:" // Height 6: Partial direct writeback: partial_4_0
"tbz x8, #2, 194f\n"
- "st1 { v8.4s }, [x16], #0x10\n"
+ "st1 { v8.4s }, [x15], #0x10\n"
"st1 { v12.4s }, [x25], #0x10\n"
"st1 { v16.4s }, [x24], #0x10\n"
"st1 { v20.4s }, [x23], #0x10\n"
"st1 { v24.4s }, [x22], #0x10\n"
"st1 { v28.4s }, [x21], #0x10\n"
"tbz x8, #1, 193f\n"
- "str d9, [x16], #0x8\n"
+ "str d9, [x15], #0x8\n"
"str d13, [x25], #0x8\n"
"str d17, [x24], #0x8\n"
"str d21, [x23], #0x8\n"
"str d25, [x22], #0x8\n"
"str d29, [x21], #0x8\n"
"tbz x8, #0, 196f\n"
- "st1 { v9.s }[2], [x16]\n"
+ "st1 { v9.s }[2], [x15]\n"
"st1 { v13.s }[2], [x25]\n"
"st1 { v17.s }[2], [x24]\n"
"st1 { v21.s }[2], [x23]\n"
@@ -3511,7 +3511,7 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"b 196f\n"
"193:" // Height 6: Partial direct writeback: partial_1_4
"tbz x8, #0, 196f\n"
- "str s9, [x16, #0x0]\n"
+ "str s9, [x15, #0x0]\n"
"str s13, [x25, #0x0]\n"
"str s17, [x24, #0x0]\n"
"str s21, [x23, #0x0]\n"
@@ -3520,14 +3520,14 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"b 196f\n"
"194:" // Height 6: Partial direct writeback: partial_2_0
"tbz x8, #1, 195f\n"
- "str d8, [x16], #0x8\n"
+ "str d8, [x15], #0x8\n"
"str d12, [x25], #0x8\n"
"str d16, [x24], #0x8\n"
"str d20, [x23], #0x8\n"
"str d24, [x22], #0x8\n"
"str d28, [x21], #0x8\n"
"tbz x8, #0, 196f\n"
- "st1 { v8.s }[2], [x16]\n"
+ "st1 { v8.s }[2], [x15]\n"
"st1 { v12.s }[2], [x25]\n"
"st1 { v16.s }[2], [x24]\n"
"st1 { v20.s }[2], [x23]\n"
@@ -3535,7 +3535,7 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"st1 { v28.s }[2], [x21]\n"
"b 196f\n"
"195:" // Height 6: Partial direct writeback: partial_1_0
- "str s8, [x16, #0x0]\n"
+ "str s8, [x15, #0x0]\n"
"str s12, [x25, #0x0]\n"
"str s16, [x24, #0x0]\n"
"str s20, [x23, #0x0]\n"
@@ -3544,11 +3544,11 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"196:" // Height 6: Partial direct writeback: Done
"b 198f\n"
"197:" // Height 6: Full writeback
- "str q8, [x16, #0x0]\n"
- "str q9, [x16, #0x10]\n"
- "str q10, [x16, #0x20]\n"
- "str q11, [x16, #0x30]\n"
- "add x16, x16, #0x40\n"
+ "str q8, [x15, #0x0]\n"
+ "str q9, [x15, #0x10]\n"
+ "str q10, [x15, #0x20]\n"
+ "str q11, [x15, #0x30]\n"
+ "add x15, x15, #0x40\n"
"str q12, [x25, #0x0]\n"
"str q13, [x25, #0x10]\n"
"str q14, [x25, #0x20]\n"
@@ -3574,20 +3574,20 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"bgt 167b\n"
"subs %x[M], %x[M], #0x6\n"
"beq 200f\n"
- "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 199f\n"
- "add x21, x21, #0x6\n"
- "str x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "add x20, x20, #0x6\n"
+ "str x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"b 1b\n"
"199:" // Update direct input
- "mov x20, #0x18\n"
- "madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
+ "mov x19, #0x18\n"
+ "madd %x[input_ptr], x19, x20, %x[input_ptr]\n"
"b 1b\n"
"200:" // Exit
: [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
: [args_ptr] "r" (&ka), [bias] "r" (bias), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_6x16/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_6x16/generic.cpp
index c5e4388aa9..28e9be4cb7 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_6x16/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_6x16/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef __aarch64__
@@ -102,82 +102,82 @@ void a64_hybrid_fp32_mla_6x16 (
"cmp %x[M], #0x2\n"
"bgt 67f\n"
"beq 34f\n"
- "mov x12, %x[bias]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "mov x9, %x[bias]\n"
+ "mov x28, %x[output_ptr]\n"
"2:" // Height 1: Column loop
- "cbz x12, 3f\n"
- "ldr q8, [x12, #0x0]\n"
- "ldr q9, [x12, #0x10]\n"
- "ldr q10, [x12, #0x20]\n"
- "ldr q11, [x12, #0x30]\n"
- "add x12, x12, #0x40\n"
+ "cbz x9, 3f\n"
+ "ldr q8, [x9, #0x0]\n"
+ "ldr q9, [x9, #0x10]\n"
+ "ldr q10, [x9, #0x20]\n"
+ "ldr q11, [x9, #0x30]\n"
+ "add x9, x9, #0x40\n"
"b 14f\n"
"3:" // Height 1: no bias
"tbz %x[flags], #0, 13f\n"
"cmp x11, #0x10\n"
"bge 12f\n"
"tbz x11, #3, 7f\n"
- "ld1 { v8.4s }, [x9], #0x10\n"
- "ld1 { v9.4s }, [x9], #0x10\n"
+ "ld1 { v8.4s }, [x28], #0x10\n"
+ "ld1 { v9.4s }, [x28], #0x10\n"
"tbz x11, #2, 5f\n"
- "ld1 { v10.4s }, [x9], #0x10\n"
+ "ld1 { v10.4s }, [x28], #0x10\n"
"tbz x11, #1, 4f\n"
- "ldr d11, [x9], #0x8\n"
- "mov x20, #0x38\n"
+ "mov x19, #0x38\n"
+ "ldr d11, [x28], #0x8\n"
"tbz x11, #0, 11f\n"
- "ld1 { v11.s }[2], [x9]\n"
+ "ld1 { v11.s }[2], [x28]\n"
"b 11f\n"
"4:" // Height 1: Partial accumulate: partial_1_12
- "mov x20, #0x30\n"
+ "mov x19, #0x30\n"
"tbz x11, #0, 11f\n"
- "ldr s11, [x9, #0x0]\n"
+ "ldr s11, [x28, #0x0]\n"
"b 11f\n"
"5:" // Height 1: Partial accumulate: partial_2_8
"tbz x11, #1, 6f\n"
- "ldr d10, [x9], #0x8\n"
- "mov x20, #0x28\n"
+ "ldr d10, [x28], #0x8\n"
+ "mov x19, #0x28\n"
"tbz x11, #0, 11f\n"
- "ld1 { v10.s }[2], [x9]\n"
+ "ld1 { v10.s }[2], [x28]\n"
"b 11f\n"
"6:" // Height 1: Partial accumulate: partial_1_8
- "mov x20, #0x20\n"
+ "mov x19, #0x20\n"
"tbz x11, #0, 11f\n"
- "ldr s10, [x9, #0x0]\n"
+ "ldr s10, [x28, #0x0]\n"
"b 11f\n"
"7:" // Height 1: Partial accumulate: partial_4_0
"tbz x11, #2, 9f\n"
- "ld1 { v8.4s }, [x9], #0x10\n"
+ "ld1 { v8.4s }, [x28], #0x10\n"
"tbz x11, #1, 8f\n"
- "ldr d9, [x9], #0x8\n"
- "mov x20, #0x18\n"
+ "ldr d9, [x28], #0x8\n"
+ "mov x19, #0x18\n"
"tbz x11, #0, 11f\n"
- "ld1 { v9.s }[2], [x9]\n"
+ "ld1 { v9.s }[2], [x28]\n"
"b 11f\n"
"8:" // Height 1: Partial accumulate: partial_1_4
- "mov x20, #0x10\n"
+ "mov x19, #0x10\n"
"tbz x11, #0, 11f\n"
- "ldr s9, [x9, #0x0]\n"
+ "ldr s9, [x28, #0x0]\n"
"b 11f\n"
"9:" // Height 1: Partial accumulate: partial_2_0
"tbz x11, #1, 10f\n"
- "ldr d8, [x9], #0x8\n"
- "mov x20, #0x8\n"
+ "ldr d8, [x28], #0x8\n"
+ "mov x19, #0x8\n"
"tbz x11, #0, 11f\n"
- "ld1 { v8.s }[2], [x9]\n"
+ "ld1 { v8.s }[2], [x28]\n"
"b 11f\n"
"10:" // Height 1: Partial accumulate: partial_1_0
- "ldr s8, [x9, #0x0]\n"
- "mov x20, #0x0\n"
+ "ldr s8, [x28, #0x0]\n"
+ "mov x19, #0x0\n"
"11:" // Height 1: Partial accumulate: Done
- "sub x9, x9, x20\n"
+ "sub x28, x28, x19\n"
"b 14f\n"
"12:" // Height 1: full accumulate
- "ldr q8, [x9, #0x0]\n"
- "ldr q9, [x9, #0x10]\n"
- "ldr q10, [x9, #0x20]\n"
- "ldr q11, [x9, #0x30]\n"
+ "ldr q8, [x28, #0x0]\n"
+ "ldr q9, [x28, #0x10]\n"
+ "ldr q10, [x28, #0x20]\n"
+ "ldr q11, [x28, #0x30]\n"
"b 14f\n"
"13:" // Height 1: no accumulate
"movi v8.16b, #0x0\n"
@@ -185,42 +185,46 @@ void a64_hybrid_fp32_mla_6x16 (
"movi v10.16b, #0x0\n"
"movi v11.16b, #0x0\n"
"14:" // Height 1: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"15:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 16f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "cbnz x28, 17f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "cbnz x27, 17f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #2\n"
"b 17f\n"
"16:" // Height 1: setup direct input
- "mov x26, %x[input_ptr]\n"
+ "mov x25, %x[input_ptr]\n"
"17:" // Height 1: input setup done
- "cmp x27, #0x4\n"
+ "cmp x26, #0x4\n"
"blt 20f\n"
- "ldr q0, [x26, #0x0]\n"
+ "ldr q0, [x25, #0x0]\n"
"ldr q6, [x10, #0x0]\n"
- "cmp x27, #0x8\n"
- "ldr q7, [x10, #0x10]\n"
+ "cmp x26, #0x8\n"
"blt 19f\n"
"18:" // Height 1: Multiply loop: Main loop head
"fmla v8.4s, v6.4s, v0.s[0]\n"
- "ldr q6, [x10, #0x20]\n"
+ "ldr q7, [x10, #0x10]\n"
+ "add x25, x25, #0x10\n"
"fmla v9.4s, v7.4s, v0.s[0]\n"
- "ldr q7, [x10, #0x30]\n"
+ "ldr q6, [x10, #0x20]\n"
+ "sub x26, x26, #0x4\n"
"fmla v10.4s, v6.4s, v0.s[0]\n"
- "ldr q6, [x10, #0x40]\n"
+ "ldr q7, [x10, #0x30]\n"
+ "cmp x26, #0x8\n"
"fmla v11.4s, v7.4s, v0.s[0]\n"
+ "ldr q6, [x10, #0x40]\n"
"ldr q7, [x10, #0x50]\n"
"fmla v8.4s, v6.4s, v0.s[1]\n"
"ldr q6, [x10, #0x60]\n"
"fmla v9.4s, v7.4s, v0.s[1]\n"
"ldr q7, [x10, #0x70]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
"fmla v10.4s, v6.4s, v0.s[1]\n"
"ldr q6, [x10, #0x80]\n"
"fmla v11.4s, v7.4s, v0.s[1]\n"
@@ -237,28 +241,27 @@ void a64_hybrid_fp32_mla_6x16 (
"ldr q6, [x10, #0xe0]\n"
"fmla v9.4s, v7.4s, v0.s[3]\n"
"ldr q7, [x10, #0xf0]\n"
- "sub x27, x27, #0x4\n"
- "add x26, x26, #0x10\n"
- "fmla v10.4s, v6.4s, v0.s[3]\n"
- "fmla v11.4s, v7.4s, v0.s[3]\n"
- "ldr q0, [x26, #0x0]\n"
- "cmp x27, #0x8\n"
"add x10, x10, #0x100\n"
+ "fmla v10.4s, v6.4s, v0.s[3]\n"
"ldr q6, [x10, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
+ "fmla v11.4s, v7.4s, v0.s[3]\n"
+ "ldr q0, [x25, #0x0]\n"
"bge 18b\n"
"19:" // Height 1: Multiply loop: Single iteration only
"fmla v8.4s, v6.4s, v0.s[0]\n"
- "ldr q6, [x10, #0x20]\n"
+ "ldr q7, [x10, #0x10]\n"
+ "sub x26, x26, #0x4\n"
"fmla v9.4s, v7.4s, v0.s[0]\n"
- "ldr q7, [x10, #0x30]\n"
+ "ldr q6, [x10, #0x20]\n"
+ "add x25, x25, #0x10\n"
"fmla v10.4s, v6.4s, v0.s[0]\n"
+ "ldr q7, [x10, #0x30]\n"
"ldr q6, [x10, #0x40]\n"
"fmla v11.4s, v7.4s, v0.s[0]\n"
"ldr q7, [x10, #0x50]\n"
"fmla v8.4s, v6.4s, v0.s[1]\n"
"ldr q6, [x10, #0x60]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
"fmla v9.4s, v7.4s, v0.s[1]\n"
"ldr q7, [x10, #0x70]\n"
"fmla v10.4s, v6.4s, v0.s[1]\n"
@@ -277,208 +280,205 @@ void a64_hybrid_fp32_mla_6x16 (
"ldr q6, [x10, #0xe0]\n"
"fmla v9.4s, v7.4s, v0.s[3]\n"
"ldr q7, [x10, #0xf0]\n"
- "add x26, x26, #0x10\n"
- "sub x27, x27, #0x4\n"
+ "add x10, x10, #0x100\n"
"fmla v10.4s, v6.4s, v0.s[3]\n"
"fmla v11.4s, v7.4s, v0.s[3]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
- "add x10, x10, #0x100\n"
"20:" // Height 1: Multiply loop: Main loop skip
- "cbz x27, 22f\n"
+ "cbz x26, 22f\n"
"21:" // Height 1: Multiply loop: Odd block loop
- "ldr s0, [x26], #0x4\n"
+ "ldr s0, [x25], #0x4\n"
+ "sub x26, x26, #0x1\n"
"ldr q6, [x10, #0x0]\n"
"fmla v8.4s, v6.4s, v0.s[0]\n"
- "sub x27, x27, #0x1\n"
"ldr q7, [x10, #0x10]\n"
"ldr q6, [x10, #0x20]\n"
"fmla v9.4s, v7.4s, v0.s[0]\n"
- "fmla v10.4s, v6.4s, v0.s[0]\n"
"ldr q7, [x10, #0x30]\n"
- "fmla v11.4s, v7.4s, v0.s[0]\n"
"add x10, x10, #0x40\n"
- "cbnz x27, 21b\n"
+ "fmla v10.4s, v6.4s, v0.s[0]\n"
+ "fmla v11.4s, v7.4s, v0.s[0]\n"
+ "cbnz x26, 21b\n"
"22:" // Height 1: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 15b\n"
- "prfm pstl1keep, [x9, #0x0]\n"
+ "prfm pstl1keep, [x28, #0x0]\n"
"tbz %x[flags], #1, 23f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v1.4s }, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1r { v0.4s }, [x20]\n"
- "fmin v8.4s, v8.4s, v1.4s\n"
- "fmin v9.4s, v9.4s, v1.4s\n"
- "fmin v10.4s, v10.4s, v1.4s\n"
- "fmin v11.4s, v11.4s, v1.4s\n"
- "fmax v8.4s, v8.4s, v0.4s\n"
- "fmax v9.4s, v9.4s, v0.4s\n"
- "fmax v10.4s, v10.4s, v0.4s\n"
- "fmax v11.4s, v11.4s, v0.4s\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v1.4s }, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v0.4s }, [x19]\n"
+ "fmin v8.4s, v8.4s, v0.4s\n"
+ "fmin v9.4s, v9.4s, v0.4s\n"
+ "fmin v10.4s, v10.4s, v0.4s\n"
+ "fmin v11.4s, v11.4s, v0.4s\n"
+ "fmax v8.4s, v8.4s, v1.4s\n"
+ "fmax v9.4s, v9.4s, v1.4s\n"
+ "fmax v10.4s, v10.4s, v1.4s\n"
+ "fmax v11.4s, v11.4s, v1.4s\n"
"23:" // Height 1: No activation
"cmp x11, #0x10\n"
"bge 32f\n"
"tbz x11, #3, 27f\n"
- "st1 { v8.4s }, [x9], #0x10\n"
- "st1 { v9.4s }, [x9], #0x10\n"
+ "st1 { v8.4s }, [x28], #0x10\n"
+ "st1 { v9.4s }, [x28], #0x10\n"
"tbz x11, #2, 25f\n"
- "st1 { v10.4s }, [x9], #0x10\n"
+ "st1 { v10.4s }, [x28], #0x10\n"
"tbz x11, #1, 24f\n"
- "str d11, [x9], #0x8\n"
+ "str d11, [x28], #0x8\n"
"tbz x11, #0, 31f\n"
- "st1 { v11.s }[2], [x9]\n"
+ "st1 { v11.s }[2], [x28]\n"
"b 31f\n"
"24:" // Height 1: Partial direct writeback: partial_1_12
"tbz x11, #0, 31f\n"
- "str s11, [x9, #0x0]\n"
+ "str s11, [x28, #0x0]\n"
"b 31f\n"
"25:" // Height 1: Partial direct writeback: partial_2_8
"tbz x11, #1, 26f\n"
- "str d10, [x9], #0x8\n"
+ "str d10, [x28], #0x8\n"
"tbz x11, #0, 31f\n"
- "st1 { v10.s }[2], [x9]\n"
+ "st1 { v10.s }[2], [x28]\n"
"b 31f\n"
"26:" // Height 1: Partial direct writeback: partial_1_8
"tbz x11, #0, 31f\n"
- "str s10, [x9, #0x0]\n"
+ "str s10, [x28, #0x0]\n"
"b 31f\n"
"27:" // Height 1: Partial direct writeback: partial_4_0
"tbz x11, #2, 29f\n"
- "st1 { v8.4s }, [x9], #0x10\n"
+ "st1 { v8.4s }, [x28], #0x10\n"
"tbz x11, #1, 28f\n"
- "str d9, [x9], #0x8\n"
+ "str d9, [x28], #0x8\n"
"tbz x11, #0, 31f\n"
- "st1 { v9.s }[2], [x9]\n"
+ "st1 { v9.s }[2], [x28]\n"
"b 31f\n"
"28:" // Height 1: Partial direct writeback: partial_1_4
"tbz x11, #0, 31f\n"
- "str s9, [x9, #0x0]\n"
+ "str s9, [x28, #0x0]\n"
"b 31f\n"
"29:" // Height 1: Partial direct writeback: partial_2_0
"tbz x11, #1, 30f\n"
- "str d8, [x9], #0x8\n"
+ "str d8, [x28], #0x8\n"
"tbz x11, #0, 31f\n"
- "st1 { v8.s }[2], [x9]\n"
+ "st1 { v8.s }[2], [x28]\n"
"b 31f\n"
"30:" // Height 1: Partial direct writeback: partial_1_0
- "str s8, [x9, #0x0]\n"
+ "str s8, [x28, #0x0]\n"
"31:" // Height 1: Partial direct writeback: Done
"b 33f\n"
"32:" // Height 1: Full writeback
- "str q8, [x9, #0x0]\n"
- "str q9, [x9, #0x10]\n"
- "str q10, [x9, #0x20]\n"
- "str q11, [x9, #0x30]\n"
- "add x9, x9, #0x40\n"
+ "str q8, [x28, #0x0]\n"
+ "str q9, [x28, #0x10]\n"
+ "str q10, [x28, #0x20]\n"
+ "str q11, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
"33:" // Height 1: Writeback done
"subs x11, x11, #0x10\n"
"bgt 2b\n"
"b 200f\n"
"34:" // Height 2
- "mov x12, %x[bias]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x9, %x[bias]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "mov x28, %x[output_ptr]\n"
"35:" // Height 2: Column loop
- "cbz x12, 36f\n"
- "ldr q8, [x12, #0x0]\n"
- "ldr q9, [x12, #0x10]\n"
+ "cbz x9, 36f\n"
+ "ldr q8, [x9, #0x0]\n"
"mov v12.16b, v8.16b\n"
+ "ldr q9, [x9, #0x10]\n"
+ "ldr q10, [x9, #0x20]\n"
"mov v13.16b, v9.16b\n"
- "ldr q10, [x12, #0x20]\n"
- "ldr q11, [x12, #0x30]\n"
+ "ldr q11, [x9, #0x30]\n"
+ "add x9, x9, #0x40\n"
"mov v14.16b, v10.16b\n"
"mov v15.16b, v11.16b\n"
- "add x12, x12, #0x40\n"
"b 47f\n"
"36:" // Height 2: no bias
"tbz %x[flags], #0, 46f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"cmp x11, #0x10\n"
- "add x25, x9, x20, LSL #2\n"
+ "add x24, x28, x19, LSL #2\n"
"bge 45f\n"
"tbz x11, #3, 40f\n"
- "ld1 { v8.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v9.4s }, [x9], #0x10\n"
- "ld1 { v13.4s }, [x25], #0x10\n"
+ "ld1 { v8.4s }, [x28], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
+ "ld1 { v9.4s }, [x28], #0x10\n"
+ "ld1 { v13.4s }, [x24], #0x10\n"
"tbz x11, #2, 38f\n"
- "ld1 { v10.4s }, [x9], #0x10\n"
- "ld1 { v14.4s }, [x25], #0x10\n"
+ "ld1 { v10.4s }, [x28], #0x10\n"
+ "ld1 { v14.4s }, [x24], #0x10\n"
"tbz x11, #1, 37f\n"
- "ldr d11, [x9], #0x8\n"
- "ldr d15, [x25], #0x8\n"
- "mov x20, #0x38\n"
+ "mov x19, #0x38\n"
+ "ldr d11, [x28], #0x8\n"
+ "ldr d15, [x24], #0x8\n"
"tbz x11, #0, 44f\n"
- "ld1 { v11.s }[2], [x9]\n"
- "ld1 { v15.s }[2], [x25]\n"
+ "ld1 { v11.s }[2], [x28]\n"
+ "ld1 { v15.s }[2], [x24]\n"
"b 44f\n"
"37:" // Height 2: Partial accumulate: partial_1_12
- "mov x20, #0x30\n"
+ "mov x19, #0x30\n"
"tbz x11, #0, 44f\n"
- "ldr s11, [x9, #0x0]\n"
- "ldr s15, [x25, #0x0]\n"
+ "ldr s11, [x28, #0x0]\n"
+ "ldr s15, [x24, #0x0]\n"
"b 44f\n"
"38:" // Height 2: Partial accumulate: partial_2_8
"tbz x11, #1, 39f\n"
- "ldr d10, [x9], #0x8\n"
- "ldr d14, [x25], #0x8\n"
- "mov x20, #0x28\n"
+ "ldr d10, [x28], #0x8\n"
+ "ldr d14, [x24], #0x8\n"
+ "mov x19, #0x28\n"
"tbz x11, #0, 44f\n"
- "ld1 { v10.s }[2], [x9]\n"
- "ld1 { v14.s }[2], [x25]\n"
+ "ld1 { v10.s }[2], [x28]\n"
+ "ld1 { v14.s }[2], [x24]\n"
"b 44f\n"
"39:" // Height 2: Partial accumulate: partial_1_8
- "mov x20, #0x20\n"
+ "mov x19, #0x20\n"
"tbz x11, #0, 44f\n"
- "ldr s10, [x9, #0x0]\n"
- "ldr s14, [x25, #0x0]\n"
+ "ldr s10, [x28, #0x0]\n"
+ "ldr s14, [x24, #0x0]\n"
"b 44f\n"
"40:" // Height 2: Partial accumulate: partial_4_0
"tbz x11, #2, 42f\n"
- "ld1 { v8.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
+ "ld1 { v8.4s }, [x28], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
"tbz x11, #1, 41f\n"
- "ldr d9, [x9], #0x8\n"
- "ldr d13, [x25], #0x8\n"
- "mov x20, #0x18\n"
+ "mov x19, #0x18\n"
+ "ldr d9, [x28], #0x8\n"
+ "ldr d13, [x24], #0x8\n"
"tbz x11, #0, 44f\n"
- "ld1 { v9.s }[2], [x9]\n"
- "ld1 { v13.s }[2], [x25]\n"
+ "ld1 { v9.s }[2], [x28]\n"
+ "ld1 { v13.s }[2], [x24]\n"
"b 44f\n"
"41:" // Height 2: Partial accumulate: partial_1_4
- "mov x20, #0x10\n"
+ "mov x19, #0x10\n"
"tbz x11, #0, 44f\n"
- "ldr s9, [x9, #0x0]\n"
- "ldr s13, [x25, #0x0]\n"
+ "ldr s9, [x28, #0x0]\n"
+ "ldr s13, [x24, #0x0]\n"
"b 44f\n"
"42:" // Height 2: Partial accumulate: partial_2_0
"tbz x11, #1, 43f\n"
- "ldr d8, [x9], #0x8\n"
- "ldr d12, [x25], #0x8\n"
- "mov x20, #0x8\n"
+ "ldr d8, [x28], #0x8\n"
+ "ldr d12, [x24], #0x8\n"
+ "mov x19, #0x8\n"
"tbz x11, #0, 44f\n"
- "ld1 { v8.s }[2], [x9]\n"
- "ld1 { v12.s }[2], [x25]\n"
+ "ld1 { v8.s }[2], [x28]\n"
+ "ld1 { v12.s }[2], [x24]\n"
"b 44f\n"
"43:" // Height 2: Partial accumulate: partial_1_0
- "ldr s8, [x9, #0x0]\n"
- "ldr s12, [x25, #0x0]\n"
- "mov x20, #0x0\n"
+ "ldr s8, [x28, #0x0]\n"
+ "mov x19, #0x0\n"
+ "ldr s12, [x24, #0x0]\n"
"44:" // Height 2: Partial accumulate: Done
- "sub x9, x9, x20\n"
+ "sub x28, x28, x19\n"
"b 47f\n"
"45:" // Height 2: full accumulate
- "ldr q8, [x9, #0x0]\n"
- "ldr q9, [x9, #0x10]\n"
- "ldr q10, [x9, #0x20]\n"
- "ldr q11, [x9, #0x30]\n"
- "ldr q12, [x25, #0x0]\n"
- "ldr q13, [x25, #0x10]\n"
- "ldr q14, [x25, #0x20]\n"
- "ldr q15, [x25, #0x30]\n"
+ "ldr q8, [x28, #0x0]\n"
+ "ldr q9, [x28, #0x10]\n"
+ "ldr q10, [x28, #0x20]\n"
+ "ldr q11, [x28, #0x30]\n"
+ "ldr q12, [x24, #0x0]\n"
+ "ldr q13, [x24, #0x10]\n"
+ "ldr q14, [x24, #0x20]\n"
+ "ldr q15, [x24, #0x30]\n"
"b 47f\n"
"46:" // Height 2: no accumulate
"movi v8.16b, #0x0\n"
@@ -490,58 +490,58 @@ void a64_hybrid_fp32_mla_6x16 (
"movi v14.16b, #0x0\n"
"movi v15.16b, #0x0\n"
"47:" // Height 2: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"48:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 49f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "cbnz x28, 50f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #2\n"
- "add x25, x25, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "cbnz x27, 50f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #2\n"
+ "add x24, x24, x19, LSL #2\n"
"b 50f\n"
"49:" // Height 2: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #2\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #2\n"
"50:" // Height 2: input setup done
- "cmp x27, #0x4\n"
+ "cmp x26, #0x4\n"
"blt 53f\n"
- "ldr q0, [x26, #0x0]\n"
- "ldr q1, [x25, #0x0]\n"
- "cmp x27, #0x8\n"
+ "ldr q0, [x25, #0x0]\n"
+ "ldr q1, [x24, #0x0]\n"
+ "cmp x26, #0x8\n"
"ldr q6, [x10, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
"blt 52f\n"
"51:" // Height 2: Multiply loop: Main loop head
"fmla v8.4s, v6.4s, v0.s[0]\n"
+ "ldr q7, [x10, #0x10]\n"
+ "add x25, x25, #0x10\n"
"fmla v12.4s, v6.4s, v1.s[0]\n"
"ldr q6, [x10, #0x20]\n"
- "sub x27, x27, #0x4\n"
+ "add x24, x24, #0x10\n"
"fmla v9.4s, v7.4s, v0.s[0]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "sub x26, x26, #0x4\n"
"fmla v13.4s, v7.4s, v1.s[0]\n"
"ldr q7, [x10, #0x30]\n"
- "add x26, x26, #0x10\n"
+ "cmp x26, #0x8\n"
"fmla v10.4s, v6.4s, v0.s[0]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
"fmla v14.4s, v6.4s, v1.s[0]\n"
"ldr q6, [x10, #0x40]\n"
- "add x25, x25, #0x10\n"
"fmla v11.4s, v7.4s, v0.s[0]\n"
"fmla v15.4s, v7.4s, v1.s[0]\n"
"ldr q7, [x10, #0x50]\n"
- "cmp x27, #0x8\n"
"fmla v8.4s, v6.4s, v0.s[1]\n"
"fmla v12.4s, v6.4s, v1.s[1]\n"
"ldr q6, [x10, #0x60]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
"fmla v9.4s, v7.4s, v0.s[1]\n"
"fmla v13.4s, v7.4s, v1.s[1]\n"
"ldr q7, [x10, #0x70]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
"fmla v10.4s, v6.4s, v0.s[1]\n"
"fmla v14.4s, v6.4s, v1.s[1]\n"
"ldr q6, [x10, #0x80]\n"
@@ -571,32 +571,32 @@ void a64_hybrid_fp32_mla_6x16 (
"fmla v14.4s, v6.4s, v1.s[3]\n"
"ldr q6, [x10, #0x0]\n"
"fmla v11.4s, v7.4s, v0.s[3]\n"
- "ldr q0, [x26, #0x0]\n"
+ "ldr q0, [x25, #0x0]\n"
"fmla v15.4s, v7.4s, v1.s[3]\n"
- "ldr q1, [x25, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
+ "ldr q1, [x24, #0x0]\n"
"bge 51b\n"
"52:" // Height 2: Multiply loop: Single iteration only
"fmla v8.4s, v6.4s, v0.s[0]\n"
+ "ldr q7, [x10, #0x10]\n"
+ "sub x26, x26, #0x4\n"
"fmla v12.4s, v6.4s, v1.s[0]\n"
"ldr q6, [x10, #0x20]\n"
- "add x26, x26, #0x10\n"
+ "add x25, x25, #0x10\n"
"fmla v9.4s, v7.4s, v0.s[0]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "add x24, x24, #0x10\n"
"fmla v13.4s, v7.4s, v1.s[0]\n"
"ldr q7, [x10, #0x30]\n"
- "add x25, x25, #0x10\n"
"fmla v10.4s, v6.4s, v0.s[0]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
"fmla v14.4s, v6.4s, v1.s[0]\n"
"ldr q6, [x10, #0x40]\n"
- "sub x27, x27, #0x4\n"
"fmla v11.4s, v7.4s, v0.s[0]\n"
"fmla v15.4s, v7.4s, v1.s[0]\n"
"ldr q7, [x10, #0x50]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
"fmla v8.4s, v6.4s, v0.s[1]\n"
"fmla v12.4s, v6.4s, v1.s[1]\n"
"ldr q6, [x10, #0x60]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
"fmla v9.4s, v7.4s, v0.s[1]\n"
"fmla v13.4s, v7.4s, v1.s[1]\n"
"ldr q7, [x10, #0x70]\n"
@@ -630,262 +630,262 @@ void a64_hybrid_fp32_mla_6x16 (
"fmla v11.4s, v7.4s, v0.s[3]\n"
"fmla v15.4s, v7.4s, v1.s[3]\n"
"53:" // Height 2: Multiply loop: Main loop skip
- "cbz x27, 55f\n"
+ "cbz x26, 55f\n"
"54:" // Height 2: Multiply loop: Odd block loop
- "ldr s0, [x26], #0x4\n"
- "ldr s1, [x25], #0x4\n"
- "sub x27, x27, #0x1\n"
+ "ldr s0, [x25], #0x4\n"
+ "sub x26, x26, #0x1\n"
+ "ldr s1, [x24], #0x4\n"
"ldr q6, [x10, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
"fmla v8.4s, v6.4s, v0.s[0]\n"
+ "ldr q7, [x10, #0x10]\n"
"fmla v12.4s, v6.4s, v1.s[0]\n"
"ldr q6, [x10, #0x20]\n"
"fmla v9.4s, v7.4s, v0.s[0]\n"
"fmla v13.4s, v7.4s, v1.s[0]\n"
"ldr q7, [x10, #0x30]\n"
+ "add x10, x10, #0x40\n"
"fmla v10.4s, v6.4s, v0.s[0]\n"
"fmla v14.4s, v6.4s, v1.s[0]\n"
- "add x10, x10, #0x40\n"
"fmla v11.4s, v7.4s, v0.s[0]\n"
"fmla v15.4s, v7.4s, v1.s[0]\n"
- "cbnz x27, 54b\n"
+ "cbnz x26, 54b\n"
"55:" // Height 2: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 48b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "prfm pstl1keep, [x9, #0x0]\n"
- "prfm pstl1keep, [x25, #0x0]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "prfm pstl1keep, [x28, #0x0]\n"
+ "add x24, x28, x19, LSL #2\n"
+ "prfm pstl1keep, [x24, #0x0]\n"
"tbz %x[flags], #1, 56f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v1.4s }, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1r { v0.4s }, [x20]\n"
- "fmin v8.4s, v8.4s, v1.4s\n"
- "fmin v9.4s, v9.4s, v1.4s\n"
- "fmin v10.4s, v10.4s, v1.4s\n"
- "fmin v11.4s, v11.4s, v1.4s\n"
- "fmin v12.4s, v12.4s, v1.4s\n"
- "fmin v13.4s, v13.4s, v1.4s\n"
- "fmin v14.4s, v14.4s, v1.4s\n"
- "fmin v15.4s, v15.4s, v1.4s\n"
- "fmax v8.4s, v8.4s, v0.4s\n"
- "fmax v9.4s, v9.4s, v0.4s\n"
- "fmax v10.4s, v10.4s, v0.4s\n"
- "fmax v11.4s, v11.4s, v0.4s\n"
- "fmax v12.4s, v12.4s, v0.4s\n"
- "fmax v13.4s, v13.4s, v0.4s\n"
- "fmax v14.4s, v14.4s, v0.4s\n"
- "fmax v15.4s, v15.4s, v0.4s\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v1.4s }, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v0.4s }, [x19]\n"
+ "fmin v8.4s, v8.4s, v0.4s\n"
+ "fmin v9.4s, v9.4s, v0.4s\n"
+ "fmin v10.4s, v10.4s, v0.4s\n"
+ "fmin v11.4s, v11.4s, v0.4s\n"
+ "fmax v8.4s, v8.4s, v1.4s\n"
+ "fmax v9.4s, v9.4s, v1.4s\n"
+ "fmax v10.4s, v10.4s, v1.4s\n"
+ "fmax v11.4s, v11.4s, v1.4s\n"
+ "fmin v12.4s, v12.4s, v0.4s\n"
+ "fmin v13.4s, v13.4s, v0.4s\n"
+ "fmin v14.4s, v14.4s, v0.4s\n"
+ "fmax v12.4s, v12.4s, v1.4s\n"
+ "fmax v13.4s, v13.4s, v1.4s\n"
+ "fmax v14.4s, v14.4s, v1.4s\n"
+ "fmin v15.4s, v15.4s, v0.4s\n"
+ "fmax v15.4s, v15.4s, v1.4s\n"
"56:" // Height 2: No activation
"cmp x11, #0x10\n"
"bge 65f\n"
"tbz x11, #3, 60f\n"
- "st1 { v8.4s }, [x9], #0x10\n"
- "st1 { v9.4s }, [x9], #0x10\n"
- "st1 { v12.4s }, [x25], #0x10\n"
- "st1 { v13.4s }, [x25], #0x10\n"
+ "st1 { v8.4s }, [x28], #0x10\n"
+ "st1 { v9.4s }, [x28], #0x10\n"
+ "st1 { v12.4s }, [x24], #0x10\n"
+ "st1 { v13.4s }, [x24], #0x10\n"
"tbz x11, #2, 58f\n"
- "st1 { v10.4s }, [x9], #0x10\n"
- "st1 { v14.4s }, [x25], #0x10\n"
+ "st1 { v10.4s }, [x28], #0x10\n"
+ "st1 { v14.4s }, [x24], #0x10\n"
"tbz x11, #1, 57f\n"
- "str d11, [x9], #0x8\n"
- "str d15, [x25], #0x8\n"
+ "str d11, [x28], #0x8\n"
+ "str d15, [x24], #0x8\n"
"tbz x11, #0, 64f\n"
- "st1 { v11.s }[2], [x9]\n"
- "st1 { v15.s }[2], [x25]\n"
+ "st1 { v11.s }[2], [x28]\n"
+ "st1 { v15.s }[2], [x24]\n"
"b 64f\n"
"57:" // Height 2: Partial direct writeback: partial_1_12
"tbz x11, #0, 64f\n"
- "str s11, [x9, #0x0]\n"
- "str s15, [x25, #0x0]\n"
+ "str s11, [x28, #0x0]\n"
+ "str s15, [x24, #0x0]\n"
"b 64f\n"
"58:" // Height 2: Partial direct writeback: partial_2_8
"tbz x11, #1, 59f\n"
- "str d10, [x9], #0x8\n"
- "str d14, [x25], #0x8\n"
+ "str d10, [x28], #0x8\n"
+ "str d14, [x24], #0x8\n"
"tbz x11, #0, 64f\n"
- "st1 { v10.s }[2], [x9]\n"
- "st1 { v14.s }[2], [x25]\n"
+ "st1 { v10.s }[2], [x28]\n"
+ "st1 { v14.s }[2], [x24]\n"
"b 64f\n"
"59:" // Height 2: Partial direct writeback: partial_1_8
"tbz x11, #0, 64f\n"
- "str s10, [x9, #0x0]\n"
- "str s14, [x25, #0x0]\n"
+ "str s10, [x28, #0x0]\n"
+ "str s14, [x24, #0x0]\n"
"b 64f\n"
"60:" // Height 2: Partial direct writeback: partial_4_0
"tbz x11, #2, 62f\n"
- "st1 { v8.4s }, [x9], #0x10\n"
- "st1 { v12.4s }, [x25], #0x10\n"
+ "st1 { v8.4s }, [x28], #0x10\n"
+ "st1 { v12.4s }, [x24], #0x10\n"
"tbz x11, #1, 61f\n"
- "str d9, [x9], #0x8\n"
- "str d13, [x25], #0x8\n"
+ "str d9, [x28], #0x8\n"
+ "str d13, [x24], #0x8\n"
"tbz x11, #0, 64f\n"
- "st1 { v9.s }[2], [x9]\n"
- "st1 { v13.s }[2], [x25]\n"
+ "st1 { v9.s }[2], [x28]\n"
+ "st1 { v13.s }[2], [x24]\n"
"b 64f\n"
"61:" // Height 2: Partial direct writeback: partial_1_4
"tbz x11, #0, 64f\n"
- "str s9, [x9, #0x0]\n"
- "str s13, [x25, #0x0]\n"
+ "str s9, [x28, #0x0]\n"
+ "str s13, [x24, #0x0]\n"
"b 64f\n"
"62:" // Height 2: Partial direct writeback: partial_2_0
"tbz x11, #1, 63f\n"
- "str d8, [x9], #0x8\n"
- "str d12, [x25], #0x8\n"
+ "str d8, [x28], #0x8\n"
+ "str d12, [x24], #0x8\n"
"tbz x11, #0, 64f\n"
- "st1 { v8.s }[2], [x9]\n"
- "st1 { v12.s }[2], [x25]\n"
+ "st1 { v8.s }[2], [x28]\n"
+ "st1 { v12.s }[2], [x24]\n"
"b 64f\n"
"63:" // Height 2: Partial direct writeback: partial_1_0
- "str s8, [x9, #0x0]\n"
- "str s12, [x25, #0x0]\n"
+ "str s8, [x28, #0x0]\n"
+ "str s12, [x24, #0x0]\n"
"64:" // Height 2: Partial direct writeback: Done
"b 66f\n"
"65:" // Height 2: Full writeback
- "str q8, [x9, #0x0]\n"
- "str q9, [x9, #0x10]\n"
- "str q10, [x9, #0x20]\n"
- "str q11, [x9, #0x30]\n"
- "add x9, x9, #0x40\n"
- "str q12, [x25, #0x0]\n"
- "str q13, [x25, #0x10]\n"
- "str q14, [x25, #0x20]\n"
- "str q15, [x25, #0x30]\n"
+ "str q8, [x28, #0x0]\n"
+ "str q9, [x28, #0x10]\n"
+ "str q10, [x28, #0x20]\n"
+ "str q11, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
+ "str q12, [x24, #0x0]\n"
+ "str q13, [x24, #0x10]\n"
+ "str q14, [x24, #0x20]\n"
+ "str q15, [x24, #0x30]\n"
"66:" // Height 2: Writeback done
"subs x11, x11, #0x10\n"
"bgt 35b\n"
"b 200f\n"
"67:" // Height 3
- "mov x12, %x[bias]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x9, %x[bias]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "mov x28, %x[output_ptr]\n"
"68:" // Height 3: Column loop
- "cbz x12, 69f\n"
- "ldr q8, [x12, #0x0]\n"
- "ldr q9, [x12, #0x10]\n"
+ "cbz x9, 69f\n"
+ "ldr q8, [x9, #0x0]\n"
"mov v12.16b, v8.16b\n"
+ "ldr q9, [x9, #0x10]\n"
+ "mov v16.16b, v8.16b\n"
+ "ldr q10, [x9, #0x20]\n"
+ "ldr q11, [x9, #0x30]\n"
"mov v13.16b, v9.16b\n"
- "ldr q10, [x12, #0x20]\n"
- "ldr q11, [x12, #0x30]\n"
+ "add x9, x9, #0x40\n"
+ "mov v17.16b, v9.16b\n"
"mov v14.16b, v10.16b\n"
"mov v15.16b, v11.16b\n"
- "mov v16.16b, v8.16b\n"
- "mov v17.16b, v9.16b\n"
- "add x12, x12, #0x40\n"
"mov v18.16b, v10.16b\n"
"mov v19.16b, v11.16b\n"
"b 80f\n"
"69:" // Height 3: no bias
"tbz %x[flags], #0, 79f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"cmp x11, #0x10\n"
- "add x24, x25, x20, LSL #2\n"
+ "add x24, x28, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
"bge 78f\n"
"tbz x11, #3, 73f\n"
- "ld1 { v8.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v16.4s }, [x24], #0x10\n"
- "ld1 { v9.4s }, [x9], #0x10\n"
- "ld1 { v13.4s }, [x25], #0x10\n"
- "ld1 { v17.4s }, [x24], #0x10\n"
+ "ld1 { v8.4s }, [x28], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
+ "ld1 { v16.4s }, [x23], #0x10\n"
+ "ld1 { v9.4s }, [x28], #0x10\n"
+ "ld1 { v13.4s }, [x24], #0x10\n"
+ "ld1 { v17.4s }, [x23], #0x10\n"
"tbz x11, #2, 71f\n"
- "ld1 { v10.4s }, [x9], #0x10\n"
- "ld1 { v14.4s }, [x25], #0x10\n"
- "ld1 { v18.4s }, [x24], #0x10\n"
+ "ld1 { v10.4s }, [x28], #0x10\n"
+ "ld1 { v14.4s }, [x24], #0x10\n"
+ "ld1 { v18.4s }, [x23], #0x10\n"
"tbz x11, #1, 70f\n"
- "ldr d11, [x9], #0x8\n"
- "ldr d15, [x25], #0x8\n"
- "mov x20, #0x38\n"
- "ldr d19, [x24], #0x8\n"
+ "mov x19, #0x38\n"
+ "ldr d11, [x28], #0x8\n"
+ "ldr d15, [x24], #0x8\n"
+ "ldr d19, [x23], #0x8\n"
"tbz x11, #0, 77f\n"
- "ld1 { v11.s }[2], [x9]\n"
- "ld1 { v15.s }[2], [x25]\n"
- "ld1 { v19.s }[2], [x24]\n"
+ "ld1 { v11.s }[2], [x28]\n"
+ "ld1 { v15.s }[2], [x24]\n"
+ "ld1 { v19.s }[2], [x23]\n"
"b 77f\n"
"70:" // Height 3: Partial accumulate: partial_1_12
- "mov x20, #0x30\n"
+ "mov x19, #0x30\n"
"tbz x11, #0, 77f\n"
- "ldr s11, [x9, #0x0]\n"
- "ldr s15, [x25, #0x0]\n"
- "ldr s19, [x24, #0x0]\n"
+ "ldr s11, [x28, #0x0]\n"
+ "ldr s15, [x24, #0x0]\n"
+ "ldr s19, [x23, #0x0]\n"
"b 77f\n"
"71:" // Height 3: Partial accumulate: partial_2_8
"tbz x11, #1, 72f\n"
- "ldr d10, [x9], #0x8\n"
- "ldr d14, [x25], #0x8\n"
- "mov x20, #0x28\n"
- "ldr d18, [x24], #0x8\n"
+ "ldr d10, [x28], #0x8\n"
+ "ldr d14, [x24], #0x8\n"
+ "mov x19, #0x28\n"
+ "ldr d18, [x23], #0x8\n"
"tbz x11, #0, 77f\n"
- "ld1 { v10.s }[2], [x9]\n"
- "ld1 { v14.s }[2], [x25]\n"
- "ld1 { v18.s }[2], [x24]\n"
+ "ld1 { v10.s }[2], [x28]\n"
+ "ld1 { v14.s }[2], [x24]\n"
+ "ld1 { v18.s }[2], [x23]\n"
"b 77f\n"
"72:" // Height 3: Partial accumulate: partial_1_8
- "mov x20, #0x20\n"
+ "mov x19, #0x20\n"
"tbz x11, #0, 77f\n"
- "ldr s10, [x9, #0x0]\n"
- "ldr s14, [x25, #0x0]\n"
- "ldr s18, [x24, #0x0]\n"
+ "ldr s10, [x28, #0x0]\n"
+ "ldr s14, [x24, #0x0]\n"
+ "ldr s18, [x23, #0x0]\n"
"b 77f\n"
"73:" // Height 3: Partial accumulate: partial_4_0
"tbz x11, #2, 75f\n"
- "ld1 { v8.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v16.4s }, [x24], #0x10\n"
+ "ld1 { v8.4s }, [x28], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
+ "ld1 { v16.4s }, [x23], #0x10\n"
"tbz x11, #1, 74f\n"
- "ldr d9, [x9], #0x8\n"
- "ldr d13, [x25], #0x8\n"
- "mov x20, #0x18\n"
- "ldr d17, [x24], #0x8\n"
+ "mov x19, #0x18\n"
+ "ldr d9, [x28], #0x8\n"
+ "ldr d13, [x24], #0x8\n"
+ "ldr d17, [x23], #0x8\n"
"tbz x11, #0, 77f\n"
- "ld1 { v9.s }[2], [x9]\n"
- "ld1 { v13.s }[2], [x25]\n"
- "ld1 { v17.s }[2], [x24]\n"
+ "ld1 { v9.s }[2], [x28]\n"
+ "ld1 { v13.s }[2], [x24]\n"
+ "ld1 { v17.s }[2], [x23]\n"
"b 77f\n"
"74:" // Height 3: Partial accumulate: partial_1_4
- "mov x20, #0x10\n"
+ "mov x19, #0x10\n"
"tbz x11, #0, 77f\n"
- "ldr s9, [x9, #0x0]\n"
- "ldr s13, [x25, #0x0]\n"
- "ldr s17, [x24, #0x0]\n"
+ "ldr s9, [x28, #0x0]\n"
+ "ldr s13, [x24, #0x0]\n"
+ "ldr s17, [x23, #0x0]\n"
"b 77f\n"
"75:" // Height 3: Partial accumulate: partial_2_0
"tbz x11, #1, 76f\n"
- "ldr d8, [x9], #0x8\n"
- "ldr d12, [x25], #0x8\n"
- "mov x20, #0x8\n"
- "ldr d16, [x24], #0x8\n"
+ "ldr d8, [x28], #0x8\n"
+ "ldr d12, [x24], #0x8\n"
+ "mov x19, #0x8\n"
+ "ldr d16, [x23], #0x8\n"
"tbz x11, #0, 77f\n"
- "ld1 { v8.s }[2], [x9]\n"
- "ld1 { v12.s }[2], [x25]\n"
- "ld1 { v16.s }[2], [x24]\n"
+ "ld1 { v8.s }[2], [x28]\n"
+ "ld1 { v12.s }[2], [x24]\n"
+ "ld1 { v16.s }[2], [x23]\n"
"b 77f\n"
"76:" // Height 3: Partial accumulate: partial_1_0
- "ldr s8, [x9, #0x0]\n"
- "ldr s12, [x25, #0x0]\n"
- "mov x20, #0x0\n"
- "ldr s16, [x24, #0x0]\n"
+ "ldr s8, [x28, #0x0]\n"
+ "mov x19, #0x0\n"
+ "ldr s12, [x24, #0x0]\n"
+ "ldr s16, [x23, #0x0]\n"
"77:" // Height 3: Partial accumulate: Done
- "sub x9, x9, x20\n"
+ "sub x28, x28, x19\n"
"b 80f\n"
"78:" // Height 3: full accumulate
- "ldr q8, [x9, #0x0]\n"
- "ldr q9, [x9, #0x10]\n"
- "ldr q10, [x9, #0x20]\n"
- "ldr q11, [x9, #0x30]\n"
- "ldr q12, [x25, #0x0]\n"
- "ldr q13, [x25, #0x10]\n"
- "ldr q14, [x25, #0x20]\n"
- "ldr q15, [x25, #0x30]\n"
- "ldr q16, [x24, #0x0]\n"
- "ldr q17, [x24, #0x10]\n"
- "ldr q18, [x24, #0x20]\n"
- "ldr q19, [x24, #0x30]\n"
+ "ldr q8, [x28, #0x0]\n"
+ "ldr q9, [x28, #0x10]\n"
+ "ldr q10, [x28, #0x20]\n"
+ "ldr q11, [x28, #0x30]\n"
+ "ldr q12, [x24, #0x0]\n"
+ "ldr q13, [x24, #0x10]\n"
+ "ldr q14, [x24, #0x20]\n"
+ "ldr q15, [x24, #0x30]\n"
+ "ldr q16, [x23, #0x0]\n"
+ "ldr q17, [x23, #0x10]\n"
+ "ldr q18, [x23, #0x20]\n"
+ "ldr q19, [x23, #0x30]\n"
"b 80f\n"
"79:" // Height 3: no accumulate
"movi v8.16b, #0x0\n"
@@ -901,62 +901,62 @@ void a64_hybrid_fp32_mla_6x16 (
"movi v18.16b, #0x0\n"
"movi v19.16b, #0x0\n"
"80:" // Height 3: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"81:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 82f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "cbnz x28, 83f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #2\n"
- "add x25, x25, x20, LSL #2\n"
- "add x24, x24, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "cbnz x27, 83f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #2\n"
+ "add x24, x24, x19, LSL #2\n"
+ "add x23, x23, x19, LSL #2\n"
"b 83f\n"
"82:" // Height 3: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
"83:" // Height 3: input setup done
- "cmp x27, #0x4\n"
+ "cmp x26, #0x4\n"
"blt 86f\n"
- "ldr q0, [x26, #0x0]\n"
- "ldr q1, [x25, #0x0]\n"
- "cmp x27, #0x8\n"
- "ldr q2, [x24, #0x0]\n"
+ "ldr q0, [x25, #0x0]\n"
+ "ldr q1, [x24, #0x0]\n"
+ "cmp x26, #0x8\n"
+ "ldr q2, [x23, #0x0]\n"
"ldr q6, [x10, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
"blt 85f\n"
"84:" // Height 3: Multiply loop: Main loop head
"fmla v8.4s, v6.4s, v0.s[0]\n"
+ "ldr q7, [x10, #0x10]\n"
+ "add x25, x25, #0x10\n"
"fmla v12.4s, v6.4s, v1.s[0]\n"
- "sub x27, x27, #0x4\n"
- "add x26, x26, #0x10\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "add x24, x24, #0x10\n"
"fmla v16.4s, v6.4s, v2.s[0]\n"
"ldr q6, [x10, #0x20]\n"
+ "add x23, x23, #0x10\n"
"fmla v9.4s, v7.4s, v0.s[0]\n"
- "add x25, x25, #0x10\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "sub x26, x26, #0x4\n"
"fmla v13.4s, v7.4s, v1.s[0]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
+ "cmp x26, #0x8\n"
"fmla v17.4s, v7.4s, v2.s[0]\n"
"ldr q7, [x10, #0x30]\n"
- "add x24, x24, #0x10\n"
"fmla v10.4s, v6.4s, v0.s[0]\n"
"fmla v14.4s, v6.4s, v1.s[0]\n"
- "cmp x27, #0x8\n"
- "prfm pldl1keep, [x26, #0x80]\n"
"fmla v18.4s, v6.4s, v2.s[0]\n"
"ldr q6, [x10, #0x40]\n"
"fmla v11.4s, v7.4s, v0.s[0]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
"fmla v15.4s, v7.4s, v1.s[0]\n"
"fmla v19.4s, v7.4s, v2.s[0]\n"
"ldr q7, [x10, #0x50]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
"fmla v8.4s, v6.4s, v0.s[1]\n"
"fmla v12.4s, v6.4s, v1.s[1]\n"
"fmla v16.4s, v6.4s, v2.s[1]\n"
@@ -1003,34 +1003,34 @@ void a64_hybrid_fp32_mla_6x16 (
"fmla v18.4s, v6.4s, v2.s[3]\n"
"ldr q6, [x10, #0x0]\n"
"fmla v11.4s, v7.4s, v0.s[3]\n"
- "ldr q0, [x26, #0x0]\n"
+ "ldr q0, [x25, #0x0]\n"
"fmla v15.4s, v7.4s, v1.s[3]\n"
- "ldr q1, [x25, #0x0]\n"
+ "ldr q1, [x24, #0x0]\n"
"fmla v19.4s, v7.4s, v2.s[3]\n"
- "ldr q2, [x24, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
+ "ldr q2, [x23, #0x0]\n"
"bge 84b\n"
"85:" // Height 3: Multiply loop: Single iteration only
"fmla v8.4s, v6.4s, v0.s[0]\n"
+ "ldr q7, [x10, #0x10]\n"
+ "sub x26, x26, #0x4\n"
"fmla v12.4s, v6.4s, v1.s[0]\n"
- "add x26, x26, #0x10\n"
"add x25, x25, #0x10\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
"fmla v16.4s, v6.4s, v2.s[0]\n"
- "ldr q6, [x10, #0x20]\n"
- "fmla v9.4s, v7.4s, v0.s[0]\n"
"add x24, x24, #0x10\n"
+ "fmla v9.4s, v7.4s, v0.s[0]\n"
+ "ldr q6, [x10, #0x20]\n"
+ "add x23, x23, #0x10\n"
"fmla v13.4s, v7.4s, v1.s[0]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
"fmla v17.4s, v7.4s, v2.s[0]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
"ldr q7, [x10, #0x30]\n"
- "sub x27, x27, #0x4\n"
"fmla v10.4s, v6.4s, v0.s[0]\n"
"fmla v14.4s, v6.4s, v1.s[0]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
"fmla v18.4s, v6.4s, v2.s[0]\n"
"ldr q6, [x10, #0x40]\n"
"fmla v11.4s, v7.4s, v0.s[0]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
"fmla v15.4s, v7.4s, v1.s[0]\n"
"fmla v19.4s, v7.4s, v2.s[0]\n"
"ldr q7, [x10, #0x50]\n"
@@ -1082,16 +1082,16 @@ void a64_hybrid_fp32_mla_6x16 (
"fmla v15.4s, v7.4s, v1.s[3]\n"
"fmla v19.4s, v7.4s, v2.s[3]\n"
"86:" // Height 3: Multiply loop: Main loop skip
- "cbz x27, 88f\n"
+ "cbz x26, 88f\n"
"87:" // Height 3: Multiply loop: Odd block loop
- "ldr s0, [x26], #0x4\n"
- "ldr s1, [x25], #0x4\n"
- "sub x27, x27, #0x1\n"
- "ldr s2, [x24], #0x4\n"
+ "ldr s0, [x25], #0x4\n"
+ "sub x26, x26, #0x1\n"
+ "ldr s1, [x24], #0x4\n"
+ "ldr s2, [x23], #0x4\n"
"ldr q6, [x10, #0x0]\n"
"fmla v8.4s, v6.4s, v0.s[0]\n"
- "fmla v12.4s, v6.4s, v1.s[0]\n"
"ldr q7, [x10, #0x10]\n"
+ "fmla v12.4s, v6.4s, v1.s[0]\n"
"fmla v16.4s, v6.4s, v2.s[0]\n"
"ldr q6, [x10, #0x20]\n"
"fmla v9.4s, v7.4s, v0.s[0]\n"
@@ -1105,299 +1105,299 @@ void a64_hybrid_fp32_mla_6x16 (
"fmla v11.4s, v7.4s, v0.s[0]\n"
"fmla v15.4s, v7.4s, v1.s[0]\n"
"fmla v19.4s, v7.4s, v2.s[0]\n"
- "cbnz x27, 87b\n"
+ "cbnz x26, 87b\n"
"88:" // Height 3: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 81b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "prfm pstl1keep, [x9, #0x0]\n"
- "prfm pstl1keep, [x25, #0x0]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "prfm pstl1keep, [x28, #0x0]\n"
+ "add x24, x28, x19, LSL #2\n"
"prfm pstl1keep, [x24, #0x0]\n"
+ "add x23, x24, x19, LSL #2\n"
+ "prfm pstl1keep, [x23, #0x0]\n"
"tbz %x[flags], #1, 89f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v1.4s }, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1r { v0.4s }, [x20]\n"
- "fmin v8.4s, v8.4s, v1.4s\n"
- "fmin v9.4s, v9.4s, v1.4s\n"
- "fmin v10.4s, v10.4s, v1.4s\n"
- "fmin v11.4s, v11.4s, v1.4s\n"
- "fmin v12.4s, v12.4s, v1.4s\n"
- "fmin v13.4s, v13.4s, v1.4s\n"
- "fmin v14.4s, v14.4s, v1.4s\n"
- "fmin v15.4s, v15.4s, v1.4s\n"
- "fmin v16.4s, v16.4s, v1.4s\n"
- "fmin v17.4s, v17.4s, v1.4s\n"
- "fmin v18.4s, v18.4s, v1.4s\n"
- "fmin v19.4s, v19.4s, v1.4s\n"
- "fmax v8.4s, v8.4s, v0.4s\n"
- "fmax v9.4s, v9.4s, v0.4s\n"
- "fmax v10.4s, v10.4s, v0.4s\n"
- "fmax v11.4s, v11.4s, v0.4s\n"
- "fmax v12.4s, v12.4s, v0.4s\n"
- "fmax v13.4s, v13.4s, v0.4s\n"
- "fmax v14.4s, v14.4s, v0.4s\n"
- "fmax v15.4s, v15.4s, v0.4s\n"
- "fmax v16.4s, v16.4s, v0.4s\n"
- "fmax v17.4s, v17.4s, v0.4s\n"
- "fmax v18.4s, v18.4s, v0.4s\n"
- "fmax v19.4s, v19.4s, v0.4s\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v1.4s }, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v0.4s }, [x19]\n"
+ "fmin v8.4s, v8.4s, v0.4s\n"
+ "fmin v9.4s, v9.4s, v0.4s\n"
+ "fmin v10.4s, v10.4s, v0.4s\n"
+ "fmin v11.4s, v11.4s, v0.4s\n"
+ "fmax v8.4s, v8.4s, v1.4s\n"
+ "fmax v9.4s, v9.4s, v1.4s\n"
+ "fmax v10.4s, v10.4s, v1.4s\n"
+ "fmax v11.4s, v11.4s, v1.4s\n"
+ "fmin v12.4s, v12.4s, v0.4s\n"
+ "fmin v13.4s, v13.4s, v0.4s\n"
+ "fmin v14.4s, v14.4s, v0.4s\n"
+ "fmax v12.4s, v12.4s, v1.4s\n"
+ "fmax v13.4s, v13.4s, v1.4s\n"
+ "fmax v14.4s, v14.4s, v1.4s\n"
+ "fmin v15.4s, v15.4s, v0.4s\n"
+ "fmin v16.4s, v16.4s, v0.4s\n"
+ "fmin v17.4s, v17.4s, v0.4s\n"
+ "fmax v15.4s, v15.4s, v1.4s\n"
+ "fmax v16.4s, v16.4s, v1.4s\n"
+ "fmax v17.4s, v17.4s, v1.4s\n"
+ "fmin v18.4s, v18.4s, v0.4s\n"
+ "fmin v19.4s, v19.4s, v0.4s\n"
+ "fmax v18.4s, v18.4s, v1.4s\n"
+ "fmax v19.4s, v19.4s, v1.4s\n"
"89:" // Height 3: No activation
"cmp x11, #0x10\n"
"bge 98f\n"
"tbz x11, #3, 93f\n"
- "st1 { v8.4s }, [x9], #0x10\n"
- "st1 { v9.4s }, [x9], #0x10\n"
- "st1 { v12.4s }, [x25], #0x10\n"
- "st1 { v13.4s }, [x25], #0x10\n"
- "st1 { v16.4s }, [x24], #0x10\n"
- "st1 { v17.4s }, [x24], #0x10\n"
+ "st1 { v8.4s }, [x28], #0x10\n"
+ "st1 { v9.4s }, [x28], #0x10\n"
+ "st1 { v12.4s }, [x24], #0x10\n"
+ "st1 { v13.4s }, [x24], #0x10\n"
+ "st1 { v16.4s }, [x23], #0x10\n"
+ "st1 { v17.4s }, [x23], #0x10\n"
"tbz x11, #2, 91f\n"
- "st1 { v10.4s }, [x9], #0x10\n"
- "st1 { v14.4s }, [x25], #0x10\n"
- "st1 { v18.4s }, [x24], #0x10\n"
+ "st1 { v10.4s }, [x28], #0x10\n"
+ "st1 { v14.4s }, [x24], #0x10\n"
+ "st1 { v18.4s }, [x23], #0x10\n"
"tbz x11, #1, 90f\n"
- "str d11, [x9], #0x8\n"
- "str d15, [x25], #0x8\n"
- "str d19, [x24], #0x8\n"
+ "str d11, [x28], #0x8\n"
+ "str d15, [x24], #0x8\n"
+ "str d19, [x23], #0x8\n"
"tbz x11, #0, 97f\n"
- "st1 { v11.s }[2], [x9]\n"
- "st1 { v15.s }[2], [x25]\n"
- "st1 { v19.s }[2], [x24]\n"
+ "st1 { v11.s }[2], [x28]\n"
+ "st1 { v15.s }[2], [x24]\n"
+ "st1 { v19.s }[2], [x23]\n"
"b 97f\n"
"90:" // Height 3: Partial direct writeback: partial_1_12
"tbz x11, #0, 97f\n"
- "str s11, [x9, #0x0]\n"
- "str s15, [x25, #0x0]\n"
- "str s19, [x24, #0x0]\n"
+ "str s11, [x28, #0x0]\n"
+ "str s15, [x24, #0x0]\n"
+ "str s19, [x23, #0x0]\n"
"b 97f\n"
"91:" // Height 3: Partial direct writeback: partial_2_8
"tbz x11, #1, 92f\n"
- "str d10, [x9], #0x8\n"
- "str d14, [x25], #0x8\n"
- "str d18, [x24], #0x8\n"
+ "str d10, [x28], #0x8\n"
+ "str d14, [x24], #0x8\n"
+ "str d18, [x23], #0x8\n"
"tbz x11, #0, 97f\n"
- "st1 { v10.s }[2], [x9]\n"
- "st1 { v14.s }[2], [x25]\n"
- "st1 { v18.s }[2], [x24]\n"
+ "st1 { v10.s }[2], [x28]\n"
+ "st1 { v14.s }[2], [x24]\n"
+ "st1 { v18.s }[2], [x23]\n"
"b 97f\n"
"92:" // Height 3: Partial direct writeback: partial_1_8
"tbz x11, #0, 97f\n"
- "str s10, [x9, #0x0]\n"
- "str s14, [x25, #0x0]\n"
- "str s18, [x24, #0x0]\n"
+ "str s10, [x28, #0x0]\n"
+ "str s14, [x24, #0x0]\n"
+ "str s18, [x23, #0x0]\n"
"b 97f\n"
"93:" // Height 3: Partial direct writeback: partial_4_0
"tbz x11, #2, 95f\n"
- "st1 { v8.4s }, [x9], #0x10\n"
- "st1 { v12.4s }, [x25], #0x10\n"
- "st1 { v16.4s }, [x24], #0x10\n"
+ "st1 { v8.4s }, [x28], #0x10\n"
+ "st1 { v12.4s }, [x24], #0x10\n"
+ "st1 { v16.4s }, [x23], #0x10\n"
"tbz x11, #1, 94f\n"
- "str d9, [x9], #0x8\n"
- "str d13, [x25], #0x8\n"
- "str d17, [x24], #0x8\n"
+ "str d9, [x28], #0x8\n"
+ "str d13, [x24], #0x8\n"
+ "str d17, [x23], #0x8\n"
"tbz x11, #0, 97f\n"
- "st1 { v9.s }[2], [x9]\n"
- "st1 { v13.s }[2], [x25]\n"
- "st1 { v17.s }[2], [x24]\n"
+ "st1 { v9.s }[2], [x28]\n"
+ "st1 { v13.s }[2], [x24]\n"
+ "st1 { v17.s }[2], [x23]\n"
"b 97f\n"
"94:" // Height 3: Partial direct writeback: partial_1_4
"tbz x11, #0, 97f\n"
- "str s9, [x9, #0x0]\n"
- "str s13, [x25, #0x0]\n"
- "str s17, [x24, #0x0]\n"
+ "str s9, [x28, #0x0]\n"
+ "str s13, [x24, #0x0]\n"
+ "str s17, [x23, #0x0]\n"
"b 97f\n"
"95:" // Height 3: Partial direct writeback: partial_2_0
"tbz x11, #1, 96f\n"
- "str d8, [x9], #0x8\n"
- "str d12, [x25], #0x8\n"
- "str d16, [x24], #0x8\n"
+ "str d8, [x28], #0x8\n"
+ "str d12, [x24], #0x8\n"
+ "str d16, [x23], #0x8\n"
"tbz x11, #0, 97f\n"
- "st1 { v8.s }[2], [x9]\n"
- "st1 { v12.s }[2], [x25]\n"
- "st1 { v16.s }[2], [x24]\n"
+ "st1 { v8.s }[2], [x28]\n"
+ "st1 { v12.s }[2], [x24]\n"
+ "st1 { v16.s }[2], [x23]\n"
"b 97f\n"
"96:" // Height 3: Partial direct writeback: partial_1_0
- "str s8, [x9, #0x0]\n"
- "str s12, [x25, #0x0]\n"
- "str s16, [x24, #0x0]\n"
+ "str s8, [x28, #0x0]\n"
+ "str s12, [x24, #0x0]\n"
+ "str s16, [x23, #0x0]\n"
"97:" // Height 3: Partial direct writeback: Done
"b 99f\n"
"98:" // Height 3: Full writeback
- "str q8, [x9, #0x0]\n"
- "str q9, [x9, #0x10]\n"
- "str q10, [x9, #0x20]\n"
- "str q11, [x9, #0x30]\n"
- "add x9, x9, #0x40\n"
- "str q12, [x25, #0x0]\n"
- "str q13, [x25, #0x10]\n"
- "str q14, [x25, #0x20]\n"
- "str q15, [x25, #0x30]\n"
- "str q16, [x24, #0x0]\n"
- "str q17, [x24, #0x10]\n"
- "str q18, [x24, #0x20]\n"
- "str q19, [x24, #0x30]\n"
+ "str q8, [x28, #0x0]\n"
+ "str q9, [x28, #0x10]\n"
+ "str q10, [x28, #0x20]\n"
+ "str q11, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
+ "str q12, [x24, #0x0]\n"
+ "str q13, [x24, #0x10]\n"
+ "str q14, [x24, #0x20]\n"
+ "str q15, [x24, #0x30]\n"
+ "str q16, [x23, #0x0]\n"
+ "str q17, [x23, #0x10]\n"
+ "str q18, [x23, #0x20]\n"
+ "str q19, [x23, #0x30]\n"
"99:" // Height 3: Writeback done
"subs x11, x11, #0x10\n"
"bgt 68b\n"
"b 200f\n"
"100:" // Height 4
- "mov x12, %x[bias]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x9, %x[bias]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "mov x28, %x[output_ptr]\n"
"101:" // Height 4: Column loop
- "cbz x12, 102f\n"
- "ldr q8, [x12, #0x0]\n"
- "ldr q9, [x12, #0x10]\n"
+ "cbz x9, 102f\n"
+ "ldr q8, [x9, #0x0]\n"
"mov v12.16b, v8.16b\n"
+ "ldr q9, [x9, #0x10]\n"
+ "mov v16.16b, v8.16b\n"
+ "ldr q10, [x9, #0x20]\n"
+ "mov v20.16b, v8.16b\n"
+ "ldr q11, [x9, #0x30]\n"
+ "add x9, x9, #0x40\n"
"mov v13.16b, v9.16b\n"
- "ldr q10, [x12, #0x20]\n"
- "ldr q11, [x12, #0x30]\n"
+ "mov v17.16b, v9.16b\n"
"mov v14.16b, v10.16b\n"
"mov v15.16b, v11.16b\n"
- "mov v16.16b, v8.16b\n"
- "mov v17.16b, v9.16b\n"
- "add x12, x12, #0x40\n"
"mov v18.16b, v10.16b\n"
"mov v19.16b, v11.16b\n"
- "mov v20.16b, v8.16b\n"
"mov v21.16b, v9.16b\n"
"mov v22.16b, v10.16b\n"
"mov v23.16b, v11.16b\n"
"b 113f\n"
"102:" // Height 4: no bias
"tbz %x[flags], #0, 112f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"cmp x11, #0x10\n"
- "add x23, x24, x20, LSL #2\n"
+ "add x24, x28, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
"bge 111f\n"
"tbz x11, #3, 106f\n"
- "ld1 { v8.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v16.4s }, [x24], #0x10\n"
- "ld1 { v20.4s }, [x23], #0x10\n"
- "ld1 { v9.4s }, [x9], #0x10\n"
- "ld1 { v13.4s }, [x25], #0x10\n"
- "ld1 { v17.4s }, [x24], #0x10\n"
- "ld1 { v21.4s }, [x23], #0x10\n"
+ "ld1 { v8.4s }, [x28], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
+ "ld1 { v16.4s }, [x23], #0x10\n"
+ "ld1 { v20.4s }, [x22], #0x10\n"
+ "ld1 { v9.4s }, [x28], #0x10\n"
+ "ld1 { v13.4s }, [x24], #0x10\n"
+ "ld1 { v17.4s }, [x23], #0x10\n"
+ "ld1 { v21.4s }, [x22], #0x10\n"
"tbz x11, #2, 104f\n"
- "ld1 { v10.4s }, [x9], #0x10\n"
- "ld1 { v14.4s }, [x25], #0x10\n"
- "ld1 { v18.4s }, [x24], #0x10\n"
- "ld1 { v22.4s }, [x23], #0x10\n"
+ "ld1 { v10.4s }, [x28], #0x10\n"
+ "ld1 { v14.4s }, [x24], #0x10\n"
+ "ld1 { v18.4s }, [x23], #0x10\n"
+ "ld1 { v22.4s }, [x22], #0x10\n"
"tbz x11, #1, 103f\n"
- "ldr d11, [x9], #0x8\n"
- "ldr d15, [x25], #0x8\n"
- "mov x20, #0x38\n"
- "ldr d19, [x24], #0x8\n"
- "ldr d23, [x23], #0x8\n"
+ "mov x19, #0x38\n"
+ "ldr d11, [x28], #0x8\n"
+ "ldr d15, [x24], #0x8\n"
+ "ldr d19, [x23], #0x8\n"
+ "ldr d23, [x22], #0x8\n"
"tbz x11, #0, 110f\n"
- "ld1 { v11.s }[2], [x9]\n"
- "ld1 { v15.s }[2], [x25]\n"
- "ld1 { v19.s }[2], [x24]\n"
- "ld1 { v23.s }[2], [x23]\n"
+ "ld1 { v11.s }[2], [x28]\n"
+ "ld1 { v15.s }[2], [x24]\n"
+ "ld1 { v19.s }[2], [x23]\n"
+ "ld1 { v23.s }[2], [x22]\n"
"b 110f\n"
"103:" // Height 4: Partial accumulate: partial_1_12
- "mov x20, #0x30\n"
+ "mov x19, #0x30\n"
"tbz x11, #0, 110f\n"
- "ldr s11, [x9, #0x0]\n"
- "ldr s15, [x25, #0x0]\n"
- "ldr s19, [x24, #0x0]\n"
- "ldr s23, [x23, #0x0]\n"
+ "ldr s11, [x28, #0x0]\n"
+ "ldr s15, [x24, #0x0]\n"
+ "ldr s19, [x23, #0x0]\n"
+ "ldr s23, [x22, #0x0]\n"
"b 110f\n"
"104:" // Height 4: Partial accumulate: partial_2_8
"tbz x11, #1, 105f\n"
- "ldr d10, [x9], #0x8\n"
- "ldr d14, [x25], #0x8\n"
- "mov x20, #0x28\n"
- "ldr d18, [x24], #0x8\n"
- "ldr d22, [x23], #0x8\n"
+ "ldr d10, [x28], #0x8\n"
+ "ldr d14, [x24], #0x8\n"
+ "mov x19, #0x28\n"
+ "ldr d18, [x23], #0x8\n"
+ "ldr d22, [x22], #0x8\n"
"tbz x11, #0, 110f\n"
- "ld1 { v10.s }[2], [x9]\n"
- "ld1 { v14.s }[2], [x25]\n"
- "ld1 { v18.s }[2], [x24]\n"
- "ld1 { v22.s }[2], [x23]\n"
+ "ld1 { v10.s }[2], [x28]\n"
+ "ld1 { v14.s }[2], [x24]\n"
+ "ld1 { v18.s }[2], [x23]\n"
+ "ld1 { v22.s }[2], [x22]\n"
"b 110f\n"
"105:" // Height 4: Partial accumulate: partial_1_8
- "mov x20, #0x20\n"
+ "mov x19, #0x20\n"
"tbz x11, #0, 110f\n"
- "ldr s10, [x9, #0x0]\n"
- "ldr s14, [x25, #0x0]\n"
- "ldr s18, [x24, #0x0]\n"
- "ldr s22, [x23, #0x0]\n"
+ "ldr s10, [x28, #0x0]\n"
+ "ldr s14, [x24, #0x0]\n"
+ "ldr s18, [x23, #0x0]\n"
+ "ldr s22, [x22, #0x0]\n"
"b 110f\n"
"106:" // Height 4: Partial accumulate: partial_4_0
"tbz x11, #2, 108f\n"
- "ld1 { v8.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v16.4s }, [x24], #0x10\n"
- "ld1 { v20.4s }, [x23], #0x10\n"
+ "ld1 { v8.4s }, [x28], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
+ "ld1 { v16.4s }, [x23], #0x10\n"
+ "ld1 { v20.4s }, [x22], #0x10\n"
"tbz x11, #1, 107f\n"
- "ldr d9, [x9], #0x8\n"
- "ldr d13, [x25], #0x8\n"
- "mov x20, #0x18\n"
- "ldr d17, [x24], #0x8\n"
- "ldr d21, [x23], #0x8\n"
+ "mov x19, #0x18\n"
+ "ldr d9, [x28], #0x8\n"
+ "ldr d13, [x24], #0x8\n"
+ "ldr d17, [x23], #0x8\n"
+ "ldr d21, [x22], #0x8\n"
"tbz x11, #0, 110f\n"
- "ld1 { v9.s }[2], [x9]\n"
- "ld1 { v13.s }[2], [x25]\n"
- "ld1 { v17.s }[2], [x24]\n"
- "ld1 { v21.s }[2], [x23]\n"
+ "ld1 { v9.s }[2], [x28]\n"
+ "ld1 { v13.s }[2], [x24]\n"
+ "ld1 { v17.s }[2], [x23]\n"
+ "ld1 { v21.s }[2], [x22]\n"
"b 110f\n"
"107:" // Height 4: Partial accumulate: partial_1_4
- "mov x20, #0x10\n"
+ "mov x19, #0x10\n"
"tbz x11, #0, 110f\n"
- "ldr s9, [x9, #0x0]\n"
- "ldr s13, [x25, #0x0]\n"
- "ldr s17, [x24, #0x0]\n"
- "ldr s21, [x23, #0x0]\n"
+ "ldr s9, [x28, #0x0]\n"
+ "ldr s13, [x24, #0x0]\n"
+ "ldr s17, [x23, #0x0]\n"
+ "ldr s21, [x22, #0x0]\n"
"b 110f\n"
"108:" // Height 4: Partial accumulate: partial_2_0
"tbz x11, #1, 109f\n"
- "ldr d8, [x9], #0x8\n"
- "ldr d12, [x25], #0x8\n"
- "mov x20, #0x8\n"
- "ldr d16, [x24], #0x8\n"
- "ldr d20, [x23], #0x8\n"
+ "ldr d8, [x28], #0x8\n"
+ "ldr d12, [x24], #0x8\n"
+ "mov x19, #0x8\n"
+ "ldr d16, [x23], #0x8\n"
+ "ldr d20, [x22], #0x8\n"
"tbz x11, #0, 110f\n"
- "ld1 { v8.s }[2], [x9]\n"
- "ld1 { v12.s }[2], [x25]\n"
- "ld1 { v16.s }[2], [x24]\n"
- "ld1 { v20.s }[2], [x23]\n"
+ "ld1 { v8.s }[2], [x28]\n"
+ "ld1 { v12.s }[2], [x24]\n"
+ "ld1 { v16.s }[2], [x23]\n"
+ "ld1 { v20.s }[2], [x22]\n"
"b 110f\n"
"109:" // Height 4: Partial accumulate: partial_1_0
- "ldr s8, [x9, #0x0]\n"
- "ldr s12, [x25, #0x0]\n"
- "mov x20, #0x0\n"
- "ldr s16, [x24, #0x0]\n"
- "ldr s20, [x23, #0x0]\n"
+ "ldr s8, [x28, #0x0]\n"
+ "mov x19, #0x0\n"
+ "ldr s12, [x24, #0x0]\n"
+ "ldr s16, [x23, #0x0]\n"
+ "ldr s20, [x22, #0x0]\n"
"110:" // Height 4: Partial accumulate: Done
- "sub x9, x9, x20\n"
+ "sub x28, x28, x19\n"
"b 113f\n"
"111:" // Height 4: full accumulate
- "ldr q8, [x9, #0x0]\n"
- "ldr q9, [x9, #0x10]\n"
- "ldr q10, [x9, #0x20]\n"
- "ldr q11, [x9, #0x30]\n"
- "ldr q12, [x25, #0x0]\n"
- "ldr q13, [x25, #0x10]\n"
- "ldr q14, [x25, #0x20]\n"
- "ldr q15, [x25, #0x30]\n"
- "ldr q16, [x24, #0x0]\n"
- "ldr q17, [x24, #0x10]\n"
- "ldr q18, [x24, #0x20]\n"
- "ldr q19, [x24, #0x30]\n"
- "ldr q20, [x23, #0x0]\n"
- "ldr q21, [x23, #0x10]\n"
- "ldr q22, [x23, #0x20]\n"
- "ldr q23, [x23, #0x30]\n"
+ "ldr q8, [x28, #0x0]\n"
+ "ldr q9, [x28, #0x10]\n"
+ "ldr q10, [x28, #0x20]\n"
+ "ldr q11, [x28, #0x30]\n"
+ "ldr q12, [x24, #0x0]\n"
+ "ldr q13, [x24, #0x10]\n"
+ "ldr q14, [x24, #0x20]\n"
+ "ldr q15, [x24, #0x30]\n"
+ "ldr q16, [x23, #0x0]\n"
+ "ldr q17, [x23, #0x10]\n"
+ "ldr q18, [x23, #0x20]\n"
+ "ldr q19, [x23, #0x30]\n"
+ "ldr q20, [x22, #0x0]\n"
+ "ldr q21, [x22, #0x10]\n"
+ "ldr q22, [x22, #0x20]\n"
+ "ldr q23, [x22, #0x30]\n"
"b 113f\n"
"112:" // Height 4: no accumulate
"movi v8.16b, #0x0\n"
@@ -1417,69 +1417,69 @@ void a64_hybrid_fp32_mla_6x16 (
"movi v22.16b, #0x0\n"
"movi v23.16b, #0x0\n"
"113:" // Height 4: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"114:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 115f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "cbnz x28, 116f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #2\n"
- "add x25, x25, x20, LSL #2\n"
- "add x24, x24, x20, LSL #2\n"
- "add x23, x23, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "cbnz x27, 116f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #2\n"
+ "add x24, x24, x19, LSL #2\n"
+ "add x23, x23, x19, LSL #2\n"
+ "add x22, x22, x19, LSL #2\n"
"b 116f\n"
"115:" // Height 4: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
"116:" // Height 4: input setup done
- "cmp x27, #0x4\n"
+ "cmp x26, #0x4\n"
"blt 119f\n"
- "ldr q0, [x26, #0x0]\n"
- "ldr q1, [x25, #0x0]\n"
- "cmp x27, #0x8\n"
- "ldr q2, [x24, #0x0]\n"
- "ldr q3, [x23, #0x0]\n"
+ "ldr q0, [x25, #0x0]\n"
+ "ldr q1, [x24, #0x0]\n"
+ "cmp x26, #0x8\n"
+ "ldr q2, [x23, #0x0]\n"
+ "ldr q3, [x22, #0x0]\n"
"ldr q6, [x10, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
"blt 118f\n"
"117:" // Height 4: Multiply loop: Main loop head
"fmla v8.4s, v6.4s, v0.s[0]\n"
+ "ldr q7, [x10, #0x10]\n"
+ "add x25, x25, #0x10\n"
"fmla v12.4s, v6.4s, v1.s[0]\n"
- "sub x27, x27, #0x4\n"
- "add x26, x26, #0x10\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "add x24, x24, #0x10\n"
"fmla v16.4s, v6.4s, v2.s[0]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "add x23, x23, #0x10\n"
"fmla v20.4s, v6.4s, v3.s[0]\n"
"ldr q6, [x10, #0x20]\n"
- "add x25, x25, #0x10\n"
+ "add x22, x22, #0x10\n"
"fmla v9.4s, v7.4s, v0.s[0]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
+ "sub x26, x26, #0x4\n"
"fmla v13.4s, v7.4s, v1.s[0]\n"
- "add x24, x24, #0x10\n"
- "add x23, x23, #0x10\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
+ "cmp x26, #0x8\n"
"fmla v17.4s, v7.4s, v2.s[0]\n"
"fmla v21.4s, v7.4s, v3.s[0]\n"
"ldr q7, [x10, #0x30]\n"
- "cmp x27, #0x8\n"
"fmla v10.4s, v6.4s, v0.s[0]\n"
"fmla v14.4s, v6.4s, v1.s[0]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
"fmla v18.4s, v6.4s, v2.s[0]\n"
"fmla v22.4s, v6.4s, v3.s[0]\n"
"ldr q6, [x10, #0x40]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
"fmla v11.4s, v7.4s, v0.s[0]\n"
"fmla v15.4s, v7.4s, v1.s[0]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
"fmla v19.4s, v7.4s, v2.s[0]\n"
"fmla v23.4s, v7.4s, v3.s[0]\n"
"ldr q7, [x10, #0x50]\n"
@@ -1540,40 +1540,40 @@ void a64_hybrid_fp32_mla_6x16 (
"fmla v22.4s, v6.4s, v3.s[3]\n"
"ldr q6, [x10, #0x0]\n"
"fmla v11.4s, v7.4s, v0.s[3]\n"
- "ldr q0, [x26, #0x0]\n"
+ "ldr q0, [x25, #0x0]\n"
"fmla v15.4s, v7.4s, v1.s[3]\n"
- "ldr q1, [x25, #0x0]\n"
+ "ldr q1, [x24, #0x0]\n"
"fmla v19.4s, v7.4s, v2.s[3]\n"
- "ldr q2, [x24, #0x0]\n"
+ "ldr q2, [x23, #0x0]\n"
"fmla v23.4s, v7.4s, v3.s[3]\n"
- "ldr q3, [x23, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
+ "ldr q3, [x22, #0x0]\n"
"bge 117b\n"
"118:" // Height 4: Multiply loop: Single iteration only
"fmla v8.4s, v6.4s, v0.s[0]\n"
+ "ldr q7, [x10, #0x10]\n"
+ "sub x26, x26, #0x4\n"
"fmla v12.4s, v6.4s, v1.s[0]\n"
- "add x26, x26, #0x10\n"
"add x25, x25, #0x10\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
"fmla v16.4s, v6.4s, v2.s[0]\n"
- "fmla v20.4s, v6.4s, v3.s[0]\n"
- "ldr q6, [x10, #0x20]\n"
"add x24, x24, #0x10\n"
+ "fmla v20.4s, v6.4s, v3.s[0]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "add x23, x23, #0x10\n"
"fmla v9.4s, v7.4s, v0.s[0]\n"
+ "ldr q6, [x10, #0x20]\n"
+ "add x22, x22, #0x10\n"
"fmla v13.4s, v7.4s, v1.s[0]\n"
- "add x23, x23, #0x10\n"
- "sub x27, x27, #0x4\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
"fmla v17.4s, v7.4s, v2.s[0]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
"fmla v21.4s, v7.4s, v3.s[0]\n"
"ldr q7, [x10, #0x30]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
"fmla v10.4s, v6.4s, v0.s[0]\n"
"fmla v14.4s, v6.4s, v1.s[0]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
"fmla v18.4s, v6.4s, v2.s[0]\n"
"fmla v22.4s, v6.4s, v3.s[0]\n"
"ldr q6, [x10, #0x40]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
"fmla v11.4s, v7.4s, v0.s[0]\n"
"fmla v15.4s, v7.4s, v1.s[0]\n"
"fmla v19.4s, v7.4s, v2.s[0]\n"
@@ -1639,16 +1639,16 @@ void a64_hybrid_fp32_mla_6x16 (
"fmla v19.4s, v7.4s, v2.s[3]\n"
"fmla v23.4s, v7.4s, v3.s[3]\n"
"119:" // Height 4: Multiply loop: Main loop skip
- "cbz x27, 121f\n"
+ "cbz x26, 121f\n"
"120:" // Height 4: Multiply loop: Odd block loop
- "ldr s0, [x26], #0x4\n"
- "ldr s1, [x25], #0x4\n"
- "sub x27, x27, #0x1\n"
- "ldr s2, [x24], #0x4\n"
- "ldr s3, [x23], #0x4\n"
+ "ldr s0, [x25], #0x4\n"
+ "sub x26, x26, #0x1\n"
+ "ldr s1, [x24], #0x4\n"
+ "ldr s2, [x23], #0x4\n"
+ "ldr s3, [x22], #0x4\n"
"ldr q6, [x10, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
"fmla v8.4s, v6.4s, v0.s[0]\n"
+ "ldr q7, [x10, #0x10]\n"
"fmla v12.4s, v6.4s, v1.s[0]\n"
"fmla v16.4s, v6.4s, v2.s[0]\n"
"fmla v20.4s, v6.4s, v3.s[0]\n"
@@ -1667,354 +1667,354 @@ void a64_hybrid_fp32_mla_6x16 (
"fmla v15.4s, v7.4s, v1.s[0]\n"
"fmla v19.4s, v7.4s, v2.s[0]\n"
"fmla v23.4s, v7.4s, v3.s[0]\n"
- "cbnz x27, 120b\n"
+ "cbnz x26, 120b\n"
"121:" // Height 4: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 114b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "prfm pstl1keep, [x9, #0x0]\n"
- "add x23, x24, x20, LSL #2\n"
- "prfm pstl1keep, [x25, #0x0]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "prfm pstl1keep, [x28, #0x0]\n"
+ "add x24, x28, x19, LSL #2\n"
"prfm pstl1keep, [x24, #0x0]\n"
+ "add x23, x24, x19, LSL #2\n"
"prfm pstl1keep, [x23, #0x0]\n"
+ "add x22, x23, x19, LSL #2\n"
+ "prfm pstl1keep, [x22, #0x0]\n"
"tbz %x[flags], #1, 122f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v1.4s }, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1r { v0.4s }, [x20]\n"
- "fmin v8.4s, v8.4s, v1.4s\n"
- "fmin v9.4s, v9.4s, v1.4s\n"
- "fmin v10.4s, v10.4s, v1.4s\n"
- "fmin v11.4s, v11.4s, v1.4s\n"
- "fmin v12.4s, v12.4s, v1.4s\n"
- "fmin v13.4s, v13.4s, v1.4s\n"
- "fmin v14.4s, v14.4s, v1.4s\n"
- "fmin v15.4s, v15.4s, v1.4s\n"
- "fmin v16.4s, v16.4s, v1.4s\n"
- "fmin v17.4s, v17.4s, v1.4s\n"
- "fmin v18.4s, v18.4s, v1.4s\n"
- "fmin v19.4s, v19.4s, v1.4s\n"
- "fmin v20.4s, v20.4s, v1.4s\n"
- "fmin v21.4s, v21.4s, v1.4s\n"
- "fmin v22.4s, v22.4s, v1.4s\n"
- "fmin v23.4s, v23.4s, v1.4s\n"
- "fmax v8.4s, v8.4s, v0.4s\n"
- "fmax v9.4s, v9.4s, v0.4s\n"
- "fmax v10.4s, v10.4s, v0.4s\n"
- "fmax v11.4s, v11.4s, v0.4s\n"
- "fmax v12.4s, v12.4s, v0.4s\n"
- "fmax v13.4s, v13.4s, v0.4s\n"
- "fmax v14.4s, v14.4s, v0.4s\n"
- "fmax v15.4s, v15.4s, v0.4s\n"
- "fmax v16.4s, v16.4s, v0.4s\n"
- "fmax v17.4s, v17.4s, v0.4s\n"
- "fmax v18.4s, v18.4s, v0.4s\n"
- "fmax v19.4s, v19.4s, v0.4s\n"
- "fmax v20.4s, v20.4s, v0.4s\n"
- "fmax v21.4s, v21.4s, v0.4s\n"
- "fmax v22.4s, v22.4s, v0.4s\n"
- "fmax v23.4s, v23.4s, v0.4s\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v1.4s }, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v0.4s }, [x19]\n"
+ "fmin v8.4s, v8.4s, v0.4s\n"
+ "fmin v9.4s, v9.4s, v0.4s\n"
+ "fmin v10.4s, v10.4s, v0.4s\n"
+ "fmin v11.4s, v11.4s, v0.4s\n"
+ "fmax v8.4s, v8.4s, v1.4s\n"
+ "fmax v9.4s, v9.4s, v1.4s\n"
+ "fmax v10.4s, v10.4s, v1.4s\n"
+ "fmax v11.4s, v11.4s, v1.4s\n"
+ "fmin v12.4s, v12.4s, v0.4s\n"
+ "fmin v13.4s, v13.4s, v0.4s\n"
+ "fmin v14.4s, v14.4s, v0.4s\n"
+ "fmax v12.4s, v12.4s, v1.4s\n"
+ "fmax v13.4s, v13.4s, v1.4s\n"
+ "fmax v14.4s, v14.4s, v1.4s\n"
+ "fmin v15.4s, v15.4s, v0.4s\n"
+ "fmin v16.4s, v16.4s, v0.4s\n"
+ "fmin v17.4s, v17.4s, v0.4s\n"
+ "fmax v15.4s, v15.4s, v1.4s\n"
+ "fmax v16.4s, v16.4s, v1.4s\n"
+ "fmax v17.4s, v17.4s, v1.4s\n"
+ "fmin v18.4s, v18.4s, v0.4s\n"
+ "fmin v19.4s, v19.4s, v0.4s\n"
+ "fmin v20.4s, v20.4s, v0.4s\n"
+ "fmax v18.4s, v18.4s, v1.4s\n"
+ "fmax v19.4s, v19.4s, v1.4s\n"
+ "fmax v20.4s, v20.4s, v1.4s\n"
+ "fmin v21.4s, v21.4s, v0.4s\n"
+ "fmin v22.4s, v22.4s, v0.4s\n"
+ "fmin v23.4s, v23.4s, v0.4s\n"
+ "fmax v21.4s, v21.4s, v1.4s\n"
+ "fmax v22.4s, v22.4s, v1.4s\n"
+ "fmax v23.4s, v23.4s, v1.4s\n"
"122:" // Height 4: No activation
"cmp x11, #0x10\n"
"bge 131f\n"
"tbz x11, #3, 126f\n"
- "st1 { v8.4s }, [x9], #0x10\n"
- "st1 { v9.4s }, [x9], #0x10\n"
- "st1 { v12.4s }, [x25], #0x10\n"
- "st1 { v13.4s }, [x25], #0x10\n"
- "st1 { v16.4s }, [x24], #0x10\n"
- "st1 { v17.4s }, [x24], #0x10\n"
- "st1 { v20.4s }, [x23], #0x10\n"
- "st1 { v21.4s }, [x23], #0x10\n"
+ "st1 { v8.4s }, [x28], #0x10\n"
+ "st1 { v9.4s }, [x28], #0x10\n"
+ "st1 { v12.4s }, [x24], #0x10\n"
+ "st1 { v13.4s }, [x24], #0x10\n"
+ "st1 { v16.4s }, [x23], #0x10\n"
+ "st1 { v17.4s }, [x23], #0x10\n"
+ "st1 { v20.4s }, [x22], #0x10\n"
+ "st1 { v21.4s }, [x22], #0x10\n"
"tbz x11, #2, 124f\n"
- "st1 { v10.4s }, [x9], #0x10\n"
- "st1 { v14.4s }, [x25], #0x10\n"
- "st1 { v18.4s }, [x24], #0x10\n"
- "st1 { v22.4s }, [x23], #0x10\n"
+ "st1 { v10.4s }, [x28], #0x10\n"
+ "st1 { v14.4s }, [x24], #0x10\n"
+ "st1 { v18.4s }, [x23], #0x10\n"
+ "st1 { v22.4s }, [x22], #0x10\n"
"tbz x11, #1, 123f\n"
- "str d11, [x9], #0x8\n"
- "str d15, [x25], #0x8\n"
- "str d19, [x24], #0x8\n"
- "str d23, [x23], #0x8\n"
+ "str d11, [x28], #0x8\n"
+ "str d15, [x24], #0x8\n"
+ "str d19, [x23], #0x8\n"
+ "str d23, [x22], #0x8\n"
"tbz x11, #0, 130f\n"
- "st1 { v11.s }[2], [x9]\n"
- "st1 { v15.s }[2], [x25]\n"
- "st1 { v19.s }[2], [x24]\n"
- "st1 { v23.s }[2], [x23]\n"
+ "st1 { v11.s }[2], [x28]\n"
+ "st1 { v15.s }[2], [x24]\n"
+ "st1 { v19.s }[2], [x23]\n"
+ "st1 { v23.s }[2], [x22]\n"
"b 130f\n"
"123:" // Height 4: Partial direct writeback: partial_1_12
"tbz x11, #0, 130f\n"
- "str s11, [x9, #0x0]\n"
- "str s15, [x25, #0x0]\n"
- "str s19, [x24, #0x0]\n"
- "str s23, [x23, #0x0]\n"
+ "str s11, [x28, #0x0]\n"
+ "str s15, [x24, #0x0]\n"
+ "str s19, [x23, #0x0]\n"
+ "str s23, [x22, #0x0]\n"
"b 130f\n"
"124:" // Height 4: Partial direct writeback: partial_2_8
"tbz x11, #1, 125f\n"
- "str d10, [x9], #0x8\n"
- "str d14, [x25], #0x8\n"
- "str d18, [x24], #0x8\n"
- "str d22, [x23], #0x8\n"
+ "str d10, [x28], #0x8\n"
+ "str d14, [x24], #0x8\n"
+ "str d18, [x23], #0x8\n"
+ "str d22, [x22], #0x8\n"
"tbz x11, #0, 130f\n"
- "st1 { v10.s }[2], [x9]\n"
- "st1 { v14.s }[2], [x25]\n"
- "st1 { v18.s }[2], [x24]\n"
- "st1 { v22.s }[2], [x23]\n"
+ "st1 { v10.s }[2], [x28]\n"
+ "st1 { v14.s }[2], [x24]\n"
+ "st1 { v18.s }[2], [x23]\n"
+ "st1 { v22.s }[2], [x22]\n"
"b 130f\n"
"125:" // Height 4: Partial direct writeback: partial_1_8
"tbz x11, #0, 130f\n"
- "str s10, [x9, #0x0]\n"
- "str s14, [x25, #0x0]\n"
- "str s18, [x24, #0x0]\n"
- "str s22, [x23, #0x0]\n"
+ "str s10, [x28, #0x0]\n"
+ "str s14, [x24, #0x0]\n"
+ "str s18, [x23, #0x0]\n"
+ "str s22, [x22, #0x0]\n"
"b 130f\n"
"126:" // Height 4: Partial direct writeback: partial_4_0
"tbz x11, #2, 128f\n"
- "st1 { v8.4s }, [x9], #0x10\n"
- "st1 { v12.4s }, [x25], #0x10\n"
- "st1 { v16.4s }, [x24], #0x10\n"
- "st1 { v20.4s }, [x23], #0x10\n"
+ "st1 { v8.4s }, [x28], #0x10\n"
+ "st1 { v12.4s }, [x24], #0x10\n"
+ "st1 { v16.4s }, [x23], #0x10\n"
+ "st1 { v20.4s }, [x22], #0x10\n"
"tbz x11, #1, 127f\n"
- "str d9, [x9], #0x8\n"
- "str d13, [x25], #0x8\n"
- "str d17, [x24], #0x8\n"
- "str d21, [x23], #0x8\n"
+ "str d9, [x28], #0x8\n"
+ "str d13, [x24], #0x8\n"
+ "str d17, [x23], #0x8\n"
+ "str d21, [x22], #0x8\n"
"tbz x11, #0, 130f\n"
- "st1 { v9.s }[2], [x9]\n"
- "st1 { v13.s }[2], [x25]\n"
- "st1 { v17.s }[2], [x24]\n"
- "st1 { v21.s }[2], [x23]\n"
+ "st1 { v9.s }[2], [x28]\n"
+ "st1 { v13.s }[2], [x24]\n"
+ "st1 { v17.s }[2], [x23]\n"
+ "st1 { v21.s }[2], [x22]\n"
"b 130f\n"
"127:" // Height 4: Partial direct writeback: partial_1_4
"tbz x11, #0, 130f\n"
- "str s9, [x9, #0x0]\n"
- "str s13, [x25, #0x0]\n"
- "str s17, [x24, #0x0]\n"
- "str s21, [x23, #0x0]\n"
+ "str s9, [x28, #0x0]\n"
+ "str s13, [x24, #0x0]\n"
+ "str s17, [x23, #0x0]\n"
+ "str s21, [x22, #0x0]\n"
"b 130f\n"
"128:" // Height 4: Partial direct writeback: partial_2_0
"tbz x11, #1, 129f\n"
- "str d8, [x9], #0x8\n"
- "str d12, [x25], #0x8\n"
- "str d16, [x24], #0x8\n"
- "str d20, [x23], #0x8\n"
+ "str d8, [x28], #0x8\n"
+ "str d12, [x24], #0x8\n"
+ "str d16, [x23], #0x8\n"
+ "str d20, [x22], #0x8\n"
"tbz x11, #0, 130f\n"
- "st1 { v8.s }[2], [x9]\n"
- "st1 { v12.s }[2], [x25]\n"
- "st1 { v16.s }[2], [x24]\n"
- "st1 { v20.s }[2], [x23]\n"
+ "st1 { v8.s }[2], [x28]\n"
+ "st1 { v12.s }[2], [x24]\n"
+ "st1 { v16.s }[2], [x23]\n"
+ "st1 { v20.s }[2], [x22]\n"
"b 130f\n"
"129:" // Height 4: Partial direct writeback: partial_1_0
- "str s8, [x9, #0x0]\n"
- "str s12, [x25, #0x0]\n"
- "str s16, [x24, #0x0]\n"
- "str s20, [x23, #0x0]\n"
+ "str s8, [x28, #0x0]\n"
+ "str s12, [x24, #0x0]\n"
+ "str s16, [x23, #0x0]\n"
+ "str s20, [x22, #0x0]\n"
"130:" // Height 4: Partial direct writeback: Done
"b 132f\n"
"131:" // Height 4: Full writeback
- "str q8, [x9, #0x0]\n"
- "str q9, [x9, #0x10]\n"
- "str q10, [x9, #0x20]\n"
- "str q11, [x9, #0x30]\n"
- "add x9, x9, #0x40\n"
- "str q12, [x25, #0x0]\n"
- "str q13, [x25, #0x10]\n"
- "str q14, [x25, #0x20]\n"
- "str q15, [x25, #0x30]\n"
- "str q16, [x24, #0x0]\n"
- "str q17, [x24, #0x10]\n"
- "str q18, [x24, #0x20]\n"
- "str q19, [x24, #0x30]\n"
- "str q20, [x23, #0x0]\n"
- "str q21, [x23, #0x10]\n"
- "str q22, [x23, #0x20]\n"
- "str q23, [x23, #0x30]\n"
+ "str q8, [x28, #0x0]\n"
+ "str q9, [x28, #0x10]\n"
+ "str q10, [x28, #0x20]\n"
+ "str q11, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
+ "str q12, [x24, #0x0]\n"
+ "str q13, [x24, #0x10]\n"
+ "str q14, [x24, #0x20]\n"
+ "str q15, [x24, #0x30]\n"
+ "str q16, [x23, #0x0]\n"
+ "str q17, [x23, #0x10]\n"
+ "str q18, [x23, #0x20]\n"
+ "str q19, [x23, #0x30]\n"
+ "str q20, [x22, #0x0]\n"
+ "str q21, [x22, #0x10]\n"
+ "str q22, [x22, #0x20]\n"
+ "str q23, [x22, #0x30]\n"
"132:" // Height 4: Writeback done
"subs x11, x11, #0x10\n"
"bgt 101b\n"
"b 200f\n"
"133:" // Height 5
- "mov x12, %x[bias]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x9, %x[bias]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "mov x28, %x[output_ptr]\n"
"134:" // Height 5: Column loop
- "cbz x12, 135f\n"
- "ldr q8, [x12, #0x0]\n"
- "ldr q9, [x12, #0x10]\n"
+ "cbz x9, 135f\n"
+ "ldr q8, [x9, #0x0]\n"
"mov v12.16b, v8.16b\n"
+ "ldr q9, [x9, #0x10]\n"
+ "mov v16.16b, v8.16b\n"
+ "ldr q10, [x9, #0x20]\n"
+ "mov v20.16b, v8.16b\n"
+ "ldr q11, [x9, #0x30]\n"
+ "add x9, x9, #0x40\n"
+ "mov v24.16b, v8.16b\n"
"mov v13.16b, v9.16b\n"
- "ldr q10, [x12, #0x20]\n"
- "ldr q11, [x12, #0x30]\n"
+ "mov v17.16b, v9.16b\n"
"mov v14.16b, v10.16b\n"
"mov v15.16b, v11.16b\n"
- "mov v16.16b, v8.16b\n"
- "mov v17.16b, v9.16b\n"
- "add x12, x12, #0x40\n"
"mov v18.16b, v10.16b\n"
"mov v19.16b, v11.16b\n"
- "mov v20.16b, v8.16b\n"
"mov v21.16b, v9.16b\n"
"mov v22.16b, v10.16b\n"
"mov v23.16b, v11.16b\n"
- "mov v24.16b, v8.16b\n"
"mov v25.16b, v9.16b\n"
"mov v26.16b, v10.16b\n"
"mov v27.16b, v11.16b\n"
"b 146f\n"
"135:" // Height 5: no bias
"tbz %x[flags], #0, 145f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"cmp x11, #0x10\n"
- "add x22, x23, x20, LSL #2\n"
+ "add x24, x28, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
"bge 144f\n"
"tbz x11, #3, 139f\n"
- "ld1 { v8.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v16.4s }, [x24], #0x10\n"
- "ld1 { v20.4s }, [x23], #0x10\n"
- "ld1 { v24.4s }, [x22], #0x10\n"
- "ld1 { v9.4s }, [x9], #0x10\n"
- "ld1 { v13.4s }, [x25], #0x10\n"
- "ld1 { v17.4s }, [x24], #0x10\n"
- "ld1 { v21.4s }, [x23], #0x10\n"
- "ld1 { v25.4s }, [x22], #0x10\n"
+ "ld1 { v8.4s }, [x28], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
+ "ld1 { v16.4s }, [x23], #0x10\n"
+ "ld1 { v20.4s }, [x22], #0x10\n"
+ "ld1 { v24.4s }, [x21], #0x10\n"
+ "ld1 { v9.4s }, [x28], #0x10\n"
+ "ld1 { v13.4s }, [x24], #0x10\n"
+ "ld1 { v17.4s }, [x23], #0x10\n"
+ "ld1 { v21.4s }, [x22], #0x10\n"
+ "ld1 { v25.4s }, [x21], #0x10\n"
"tbz x11, #2, 137f\n"
- "ld1 { v10.4s }, [x9], #0x10\n"
- "ld1 { v14.4s }, [x25], #0x10\n"
- "ld1 { v18.4s }, [x24], #0x10\n"
- "ld1 { v22.4s }, [x23], #0x10\n"
- "ld1 { v26.4s }, [x22], #0x10\n"
+ "ld1 { v10.4s }, [x28], #0x10\n"
+ "ld1 { v14.4s }, [x24], #0x10\n"
+ "ld1 { v18.4s }, [x23], #0x10\n"
+ "ld1 { v22.4s }, [x22], #0x10\n"
+ "ld1 { v26.4s }, [x21], #0x10\n"
"tbz x11, #1, 136f\n"
- "ldr d11, [x9], #0x8\n"
- "ldr d15, [x25], #0x8\n"
- "mov x20, #0x38\n"
- "ldr d19, [x24], #0x8\n"
- "ldr d23, [x23], #0x8\n"
- "ldr d27, [x22], #0x8\n"
+ "ldr d11, [x28], #0x8\n"
+ "mov x19, #0x38\n"
+ "ldr d15, [x24], #0x8\n"
+ "ldr d19, [x23], #0x8\n"
+ "ldr d23, [x22], #0x8\n"
+ "ldr d27, [x21], #0x8\n"
"tbz x11, #0, 143f\n"
- "ld1 { v11.s }[2], [x9]\n"
- "ld1 { v15.s }[2], [x25]\n"
- "ld1 { v19.s }[2], [x24]\n"
- "ld1 { v23.s }[2], [x23]\n"
- "ld1 { v27.s }[2], [x22]\n"
+ "ld1 { v11.s }[2], [x28]\n"
+ "ld1 { v15.s }[2], [x24]\n"
+ "ld1 { v19.s }[2], [x23]\n"
+ "ld1 { v23.s }[2], [x22]\n"
+ "ld1 { v27.s }[2], [x21]\n"
"b 143f\n"
"136:" // Height 5: Partial accumulate: partial_1_12
- "mov x20, #0x30\n"
+ "mov x19, #0x30\n"
"tbz x11, #0, 143f\n"
- "ldr s11, [x9, #0x0]\n"
- "ldr s15, [x25, #0x0]\n"
- "ldr s19, [x24, #0x0]\n"
- "ldr s23, [x23, #0x0]\n"
- "ldr s27, [x22, #0x0]\n"
+ "ldr s11, [x28, #0x0]\n"
+ "ldr s15, [x24, #0x0]\n"
+ "ldr s19, [x23, #0x0]\n"
+ "ldr s23, [x22, #0x0]\n"
+ "ldr s27, [x21, #0x0]\n"
"b 143f\n"
"137:" // Height 5: Partial accumulate: partial_2_8
"tbz x11, #1, 138f\n"
- "ldr d10, [x9], #0x8\n"
- "ldr d14, [x25], #0x8\n"
- "mov x20, #0x28\n"
- "ldr d18, [x24], #0x8\n"
- "ldr d22, [x23], #0x8\n"
- "ldr d26, [x22], #0x8\n"
+ "ldr d10, [x28], #0x8\n"
+ "ldr d14, [x24], #0x8\n"
+ "mov x19, #0x28\n"
+ "ldr d18, [x23], #0x8\n"
+ "ldr d22, [x22], #0x8\n"
+ "ldr d26, [x21], #0x8\n"
"tbz x11, #0, 143f\n"
- "ld1 { v10.s }[2], [x9]\n"
- "ld1 { v14.s }[2], [x25]\n"
- "ld1 { v18.s }[2], [x24]\n"
- "ld1 { v22.s }[2], [x23]\n"
- "ld1 { v26.s }[2], [x22]\n"
+ "ld1 { v10.s }[2], [x28]\n"
+ "ld1 { v14.s }[2], [x24]\n"
+ "ld1 { v18.s }[2], [x23]\n"
+ "ld1 { v22.s }[2], [x22]\n"
+ "ld1 { v26.s }[2], [x21]\n"
"b 143f\n"
"138:" // Height 5: Partial accumulate: partial_1_8
- "mov x20, #0x20\n"
+ "mov x19, #0x20\n"
"tbz x11, #0, 143f\n"
- "ldr s10, [x9, #0x0]\n"
- "ldr s14, [x25, #0x0]\n"
- "ldr s18, [x24, #0x0]\n"
- "ldr s22, [x23, #0x0]\n"
- "ldr s26, [x22, #0x0]\n"
+ "ldr s10, [x28, #0x0]\n"
+ "ldr s14, [x24, #0x0]\n"
+ "ldr s18, [x23, #0x0]\n"
+ "ldr s22, [x22, #0x0]\n"
+ "ldr s26, [x21, #0x0]\n"
"b 143f\n"
"139:" // Height 5: Partial accumulate: partial_4_0
"tbz x11, #2, 141f\n"
- "ld1 { v8.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v16.4s }, [x24], #0x10\n"
- "ld1 { v20.4s }, [x23], #0x10\n"
- "ld1 { v24.4s }, [x22], #0x10\n"
+ "ld1 { v8.4s }, [x28], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
+ "ld1 { v16.4s }, [x23], #0x10\n"
+ "ld1 { v20.4s }, [x22], #0x10\n"
+ "ld1 { v24.4s }, [x21], #0x10\n"
"tbz x11, #1, 140f\n"
- "ldr d9, [x9], #0x8\n"
- "ldr d13, [x25], #0x8\n"
- "mov x20, #0x18\n"
- "ldr d17, [x24], #0x8\n"
- "ldr d21, [x23], #0x8\n"
- "ldr d25, [x22], #0x8\n"
+ "ldr d9, [x28], #0x8\n"
+ "mov x19, #0x18\n"
+ "ldr d13, [x24], #0x8\n"
+ "ldr d17, [x23], #0x8\n"
+ "ldr d21, [x22], #0x8\n"
+ "ldr d25, [x21], #0x8\n"
"tbz x11, #0, 143f\n"
- "ld1 { v9.s }[2], [x9]\n"
- "ld1 { v13.s }[2], [x25]\n"
- "ld1 { v17.s }[2], [x24]\n"
- "ld1 { v21.s }[2], [x23]\n"
- "ld1 { v25.s }[2], [x22]\n"
+ "ld1 { v9.s }[2], [x28]\n"
+ "ld1 { v13.s }[2], [x24]\n"
+ "ld1 { v17.s }[2], [x23]\n"
+ "ld1 { v21.s }[2], [x22]\n"
+ "ld1 { v25.s }[2], [x21]\n"
"b 143f\n"
"140:" // Height 5: Partial accumulate: partial_1_4
- "mov x20, #0x10\n"
+ "mov x19, #0x10\n"
"tbz x11, #0, 143f\n"
- "ldr s9, [x9, #0x0]\n"
- "ldr s13, [x25, #0x0]\n"
- "ldr s17, [x24, #0x0]\n"
- "ldr s21, [x23, #0x0]\n"
- "ldr s25, [x22, #0x0]\n"
+ "ldr s9, [x28, #0x0]\n"
+ "ldr s13, [x24, #0x0]\n"
+ "ldr s17, [x23, #0x0]\n"
+ "ldr s21, [x22, #0x0]\n"
+ "ldr s25, [x21, #0x0]\n"
"b 143f\n"
"141:" // Height 5: Partial accumulate: partial_2_0
"tbz x11, #1, 142f\n"
- "ldr d8, [x9], #0x8\n"
- "ldr d12, [x25], #0x8\n"
- "mov x20, #0x8\n"
- "ldr d16, [x24], #0x8\n"
- "ldr d20, [x23], #0x8\n"
- "ldr d24, [x22], #0x8\n"
+ "ldr d8, [x28], #0x8\n"
+ "ldr d12, [x24], #0x8\n"
+ "mov x19, #0x8\n"
+ "ldr d16, [x23], #0x8\n"
+ "ldr d20, [x22], #0x8\n"
+ "ldr d24, [x21], #0x8\n"
"tbz x11, #0, 143f\n"
- "ld1 { v8.s }[2], [x9]\n"
- "ld1 { v12.s }[2], [x25]\n"
- "ld1 { v16.s }[2], [x24]\n"
- "ld1 { v20.s }[2], [x23]\n"
- "ld1 { v24.s }[2], [x22]\n"
+ "ld1 { v8.s }[2], [x28]\n"
+ "ld1 { v12.s }[2], [x24]\n"
+ "ld1 { v16.s }[2], [x23]\n"
+ "ld1 { v20.s }[2], [x22]\n"
+ "ld1 { v24.s }[2], [x21]\n"
"b 143f\n"
"142:" // Height 5: Partial accumulate: partial_1_0
- "ldr s8, [x9, #0x0]\n"
- "ldr s12, [x25, #0x0]\n"
- "mov x20, #0x0\n"
- "ldr s16, [x24, #0x0]\n"
- "ldr s20, [x23, #0x0]\n"
- "ldr s24, [x22, #0x0]\n"
+ "ldr s8, [x28, #0x0]\n"
+ "mov x19, #0x0\n"
+ "ldr s12, [x24, #0x0]\n"
+ "ldr s16, [x23, #0x0]\n"
+ "ldr s20, [x22, #0x0]\n"
+ "ldr s24, [x21, #0x0]\n"
"143:" // Height 5: Partial accumulate: Done
- "sub x9, x9, x20\n"
+ "sub x28, x28, x19\n"
"b 146f\n"
"144:" // Height 5: full accumulate
- "ldr q8, [x9, #0x0]\n"
- "ldr q9, [x9, #0x10]\n"
- "ldr q10, [x9, #0x20]\n"
- "ldr q11, [x9, #0x30]\n"
- "ldr q12, [x25, #0x0]\n"
- "ldr q13, [x25, #0x10]\n"
- "ldr q14, [x25, #0x20]\n"
- "ldr q15, [x25, #0x30]\n"
- "ldr q16, [x24, #0x0]\n"
- "ldr q17, [x24, #0x10]\n"
- "ldr q18, [x24, #0x20]\n"
- "ldr q19, [x24, #0x30]\n"
- "ldr q20, [x23, #0x0]\n"
- "ldr q21, [x23, #0x10]\n"
- "ldr q22, [x23, #0x20]\n"
- "ldr q23, [x23, #0x30]\n"
- "ldr q24, [x22, #0x0]\n"
- "ldr q25, [x22, #0x10]\n"
- "ldr q26, [x22, #0x20]\n"
- "ldr q27, [x22, #0x30]\n"
+ "ldr q8, [x28, #0x0]\n"
+ "ldr q9, [x28, #0x10]\n"
+ "ldr q10, [x28, #0x20]\n"
+ "ldr q11, [x28, #0x30]\n"
+ "ldr q12, [x24, #0x0]\n"
+ "ldr q13, [x24, #0x10]\n"
+ "ldr q14, [x24, #0x20]\n"
+ "ldr q15, [x24, #0x30]\n"
+ "ldr q16, [x23, #0x0]\n"
+ "ldr q17, [x23, #0x10]\n"
+ "ldr q18, [x23, #0x20]\n"
+ "ldr q19, [x23, #0x30]\n"
+ "ldr q20, [x22, #0x0]\n"
+ "ldr q21, [x22, #0x10]\n"
+ "ldr q22, [x22, #0x20]\n"
+ "ldr q23, [x22, #0x30]\n"
+ "ldr q24, [x21, #0x0]\n"
+ "ldr q25, [x21, #0x10]\n"
+ "ldr q26, [x21, #0x20]\n"
+ "ldr q27, [x21, #0x30]\n"
"b 146f\n"
"145:" // Height 5: no accumulate
"movi v8.16b, #0x0\n"
@@ -2038,74 +2038,74 @@ void a64_hybrid_fp32_mla_6x16 (
"movi v26.16b, #0x0\n"
"movi v27.16b, #0x0\n"
"146:" // Height 5: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"147:" // Height 5: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 148f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "ldr x22, [x21, #0x20]\n"
- "cbnz x28, 149f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #2\n"
- "add x25, x25, x20, LSL #2\n"
- "add x24, x24, x20, LSL #2\n"
- "add x23, x23, x20, LSL #2\n"
- "add x22, x22, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "ldr x21, [x20, #0x20]\n"
+ "cbnz x27, 149f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #2\n"
+ "add x24, x24, x19, LSL #2\n"
+ "add x23, x23, x19, LSL #2\n"
+ "add x22, x22, x19, LSL #2\n"
+ "add x21, x21, x19, LSL #2\n"
"b 149f\n"
"148:" // Height 5: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
"149:" // Height 5: input setup done
- "cmp x27, #0x4\n"
+ "cmp x26, #0x4\n"
"blt 152f\n"
- "ldr q0, [x26, #0x0]\n"
- "ldr q1, [x25, #0x0]\n"
- "cmp x27, #0x8\n"
- "ldr q2, [x24, #0x0]\n"
- "ldr q3, [x23, #0x0]\n"
- "ldr q4, [x22, #0x0]\n"
+ "ldr q0, [x25, #0x0]\n"
+ "ldr q1, [x24, #0x0]\n"
+ "cmp x26, #0x8\n"
+ "ldr q2, [x23, #0x0]\n"
+ "ldr q3, [x22, #0x0]\n"
+ "ldr q4, [x21, #0x0]\n"
"ldr q6, [x10, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
"blt 151f\n"
"150:" // Height 5: Multiply loop: Main loop head
"fmla v8.4s, v6.4s, v0.s[0]\n"
+ "ldr q7, [x10, #0x10]\n"
+ "add x25, x25, #0x10\n"
"fmla v12.4s, v6.4s, v1.s[0]\n"
- "sub x27, x27, #0x4\n"
- "add x26, x26, #0x10\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "add x24, x24, #0x10\n"
"fmla v16.4s, v6.4s, v2.s[0]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "add x23, x23, #0x10\n"
"fmla v20.4s, v6.4s, v3.s[0]\n"
- "add x25, x25, #0x10\n"
- "add x24, x24, #0x10\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
+ "add x22, x22, #0x10\n"
"fmla v24.4s, v6.4s, v4.s[0]\n"
"ldr q6, [x10, #0x20]\n"
+ "add x21, x21, #0x10\n"
"fmla v9.4s, v7.4s, v0.s[0]\n"
- "add x23, x23, #0x10\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
+ "sub x26, x26, #0x4\n"
"fmla v13.4s, v7.4s, v1.s[0]\n"
+ "prfm pldl1keep, [x21, #0x80]\n"
+ "cmp x26, #0x8\n"
"fmla v17.4s, v7.4s, v2.s[0]\n"
- "add x22, x22, #0x10\n"
- "cmp x27, #0x8\n"
"fmla v21.4s, v7.4s, v3.s[0]\n"
"fmla v25.4s, v7.4s, v4.s[0]\n"
"ldr q7, [x10, #0x30]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
"fmla v10.4s, v6.4s, v0.s[0]\n"
"fmla v14.4s, v6.4s, v1.s[0]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
"fmla v18.4s, v6.4s, v2.s[0]\n"
"fmla v22.4s, v6.4s, v3.s[0]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
- "prfm pldl1keep, [x22, #0x80]\n"
"fmla v26.4s, v6.4s, v4.s[0]\n"
"ldr q6, [x10, #0x40]\n"
"fmla v11.4s, v7.4s, v0.s[0]\n"
@@ -2182,45 +2182,45 @@ void a64_hybrid_fp32_mla_6x16 (
"fmla v26.4s, v6.4s, v4.s[3]\n"
"ldr q6, [x10, #0x0]\n"
"fmla v11.4s, v7.4s, v0.s[3]\n"
- "ldr q0, [x26, #0x0]\n"
+ "ldr q0, [x25, #0x0]\n"
"fmla v15.4s, v7.4s, v1.s[3]\n"
- "ldr q1, [x25, #0x0]\n"
+ "ldr q1, [x24, #0x0]\n"
"fmla v19.4s, v7.4s, v2.s[3]\n"
- "ldr q2, [x24, #0x0]\n"
+ "ldr q2, [x23, #0x0]\n"
"fmla v23.4s, v7.4s, v3.s[3]\n"
- "ldr q3, [x23, #0x0]\n"
+ "ldr q3, [x22, #0x0]\n"
"fmla v27.4s, v7.4s, v4.s[3]\n"
- "ldr q4, [x22, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
+ "ldr q4, [x21, #0x0]\n"
"bge 150b\n"
"151:" // Height 5: Multiply loop: Single iteration only
"fmla v8.4s, v6.4s, v0.s[0]\n"
+ "ldr q7, [x10, #0x10]\n"
+ "sub x26, x26, #0x4\n"
"fmla v12.4s, v6.4s, v1.s[0]\n"
- "add x26, x26, #0x10\n"
"add x25, x25, #0x10\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
"fmla v16.4s, v6.4s, v2.s[0]\n"
- "fmla v20.4s, v6.4s, v3.s[0]\n"
"add x24, x24, #0x10\n"
+ "fmla v20.4s, v6.4s, v3.s[0]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
"add x23, x23, #0x10\n"
"fmla v24.4s, v6.4s, v4.s[0]\n"
- "ldr q6, [x10, #0x20]\n"
- "fmla v9.4s, v7.4s, v0.s[0]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
"add x22, x22, #0x10\n"
+ "fmla v9.4s, v7.4s, v0.s[0]\n"
+ "ldr q6, [x10, #0x20]\n"
+ "add x21, x21, #0x10\n"
"fmla v13.4s, v7.4s, v1.s[0]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
"fmla v17.4s, v7.4s, v2.s[0]\n"
- "sub x27, x27, #0x4\n"
- "prfm pldl1keep, [x26, #0x80]\n"
+ "prfm pldl1keep, [x21, #0x80]\n"
"fmla v21.4s, v7.4s, v3.s[0]\n"
"fmla v25.4s, v7.4s, v4.s[0]\n"
"ldr q7, [x10, #0x30]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
"fmla v10.4s, v6.4s, v0.s[0]\n"
"fmla v14.4s, v6.4s, v1.s[0]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
"fmla v18.4s, v6.4s, v2.s[0]\n"
"fmla v22.4s, v6.4s, v3.s[0]\n"
- "prfm pldl1keep, [x22, #0x80]\n"
"fmla v26.4s, v6.4s, v4.s[0]\n"
"ldr q6, [x10, #0x40]\n"
"fmla v11.4s, v7.4s, v0.s[0]\n"
@@ -2301,18 +2301,18 @@ void a64_hybrid_fp32_mla_6x16 (
"fmla v23.4s, v7.4s, v3.s[3]\n"
"fmla v27.4s, v7.4s, v4.s[3]\n"
"152:" // Height 5: Multiply loop: Main loop skip
- "cbz x27, 154f\n"
+ "cbz x26, 154f\n"
"153:" // Height 5: Multiply loop: Odd block loop
- "ldr s0, [x26], #0x4\n"
- "ldr s1, [x25], #0x4\n"
- "sub x27, x27, #0x1\n"
- "ldr s2, [x24], #0x4\n"
- "ldr s3, [x23], #0x4\n"
- "ldr s4, [x22], #0x4\n"
+ "ldr s0, [x25], #0x4\n"
+ "sub x26, x26, #0x1\n"
+ "ldr s1, [x24], #0x4\n"
+ "ldr s2, [x23], #0x4\n"
+ "ldr s3, [x22], #0x4\n"
+ "ldr s4, [x21], #0x4\n"
"ldr q6, [x10, #0x0]\n"
"fmla v8.4s, v6.4s, v0.s[0]\n"
- "fmla v12.4s, v6.4s, v1.s[0]\n"
"ldr q7, [x10, #0x10]\n"
+ "fmla v12.4s, v6.4s, v1.s[0]\n"
"fmla v16.4s, v6.4s, v2.s[0]\n"
"fmla v20.4s, v6.4s, v3.s[0]\n"
"fmla v24.4s, v6.4s, v4.s[0]\n"
@@ -2334,412 +2334,412 @@ void a64_hybrid_fp32_mla_6x16 (
"fmla v19.4s, v7.4s, v2.s[0]\n"
"fmla v23.4s, v7.4s, v3.s[0]\n"
"fmla v27.4s, v7.4s, v4.s[0]\n"
- "cbnz x27, 153b\n"
+ "cbnz x26, 153b\n"
"154:" // Height 5: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 147b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "prfm pstl1keep, [x9, #0x0]\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
- "prfm pstl1keep, [x25, #0x0]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "prfm pstl1keep, [x28, #0x0]\n"
+ "add x24, x28, x19, LSL #2\n"
"prfm pstl1keep, [x24, #0x0]\n"
+ "add x23, x24, x19, LSL #2\n"
"prfm pstl1keep, [x23, #0x0]\n"
+ "add x22, x23, x19, LSL #2\n"
"prfm pstl1keep, [x22, #0x0]\n"
+ "add x21, x22, x19, LSL #2\n"
+ "prfm pstl1keep, [x21, #0x0]\n"
"tbz %x[flags], #1, 155f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v1.4s }, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1r { v0.4s }, [x20]\n"
- "fmin v8.4s, v8.4s, v1.4s\n"
- "fmin v9.4s, v9.4s, v1.4s\n"
- "fmin v10.4s, v10.4s, v1.4s\n"
- "fmin v11.4s, v11.4s, v1.4s\n"
- "fmin v12.4s, v12.4s, v1.4s\n"
- "fmin v13.4s, v13.4s, v1.4s\n"
- "fmin v14.4s, v14.4s, v1.4s\n"
- "fmin v15.4s, v15.4s, v1.4s\n"
- "fmin v16.4s, v16.4s, v1.4s\n"
- "fmin v17.4s, v17.4s, v1.4s\n"
- "fmin v18.4s, v18.4s, v1.4s\n"
- "fmin v19.4s, v19.4s, v1.4s\n"
- "fmin v20.4s, v20.4s, v1.4s\n"
- "fmin v21.4s, v21.4s, v1.4s\n"
- "fmin v22.4s, v22.4s, v1.4s\n"
- "fmin v23.4s, v23.4s, v1.4s\n"
- "fmin v24.4s, v24.4s, v1.4s\n"
- "fmin v25.4s, v25.4s, v1.4s\n"
- "fmin v26.4s, v26.4s, v1.4s\n"
- "fmin v27.4s, v27.4s, v1.4s\n"
- "fmax v8.4s, v8.4s, v0.4s\n"
- "fmax v9.4s, v9.4s, v0.4s\n"
- "fmax v10.4s, v10.4s, v0.4s\n"
- "fmax v11.4s, v11.4s, v0.4s\n"
- "fmax v12.4s, v12.4s, v0.4s\n"
- "fmax v13.4s, v13.4s, v0.4s\n"
- "fmax v14.4s, v14.4s, v0.4s\n"
- "fmax v15.4s, v15.4s, v0.4s\n"
- "fmax v16.4s, v16.4s, v0.4s\n"
- "fmax v17.4s, v17.4s, v0.4s\n"
- "fmax v18.4s, v18.4s, v0.4s\n"
- "fmax v19.4s, v19.4s, v0.4s\n"
- "fmax v20.4s, v20.4s, v0.4s\n"
- "fmax v21.4s, v21.4s, v0.4s\n"
- "fmax v22.4s, v22.4s, v0.4s\n"
- "fmax v23.4s, v23.4s, v0.4s\n"
- "fmax v24.4s, v24.4s, v0.4s\n"
- "fmax v25.4s, v25.4s, v0.4s\n"
- "fmax v26.4s, v26.4s, v0.4s\n"
- "fmax v27.4s, v27.4s, v0.4s\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v1.4s }, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v0.4s }, [x19]\n"
+ "fmin v8.4s, v8.4s, v0.4s\n"
+ "fmin v9.4s, v9.4s, v0.4s\n"
+ "fmin v10.4s, v10.4s, v0.4s\n"
+ "fmin v11.4s, v11.4s, v0.4s\n"
+ "fmax v8.4s, v8.4s, v1.4s\n"
+ "fmax v9.4s, v9.4s, v1.4s\n"
+ "fmax v10.4s, v10.4s, v1.4s\n"
+ "fmax v11.4s, v11.4s, v1.4s\n"
+ "fmin v12.4s, v12.4s, v0.4s\n"
+ "fmin v13.4s, v13.4s, v0.4s\n"
+ "fmin v14.4s, v14.4s, v0.4s\n"
+ "fmax v12.4s, v12.4s, v1.4s\n"
+ "fmax v13.4s, v13.4s, v1.4s\n"
+ "fmax v14.4s, v14.4s, v1.4s\n"
+ "fmin v15.4s, v15.4s, v0.4s\n"
+ "fmin v16.4s, v16.4s, v0.4s\n"
+ "fmin v17.4s, v17.4s, v0.4s\n"
+ "fmax v15.4s, v15.4s, v1.4s\n"
+ "fmax v16.4s, v16.4s, v1.4s\n"
+ "fmax v17.4s, v17.4s, v1.4s\n"
+ "fmin v18.4s, v18.4s, v0.4s\n"
+ "fmin v19.4s, v19.4s, v0.4s\n"
+ "fmin v20.4s, v20.4s, v0.4s\n"
+ "fmax v18.4s, v18.4s, v1.4s\n"
+ "fmax v19.4s, v19.4s, v1.4s\n"
+ "fmax v20.4s, v20.4s, v1.4s\n"
+ "fmin v21.4s, v21.4s, v0.4s\n"
+ "fmin v22.4s, v22.4s, v0.4s\n"
+ "fmin v23.4s, v23.4s, v0.4s\n"
+ "fmax v21.4s, v21.4s, v1.4s\n"
+ "fmax v22.4s, v22.4s, v1.4s\n"
+ "fmax v23.4s, v23.4s, v1.4s\n"
+ "fmin v24.4s, v24.4s, v0.4s\n"
+ "fmin v25.4s, v25.4s, v0.4s\n"
+ "fmin v26.4s, v26.4s, v0.4s\n"
+ "fmax v24.4s, v24.4s, v1.4s\n"
+ "fmax v25.4s, v25.4s, v1.4s\n"
+ "fmax v26.4s, v26.4s, v1.4s\n"
+ "fmin v27.4s, v27.4s, v0.4s\n"
+ "fmax v27.4s, v27.4s, v1.4s\n"
"155:" // Height 5: No activation
"cmp x11, #0x10\n"
"bge 164f\n"
"tbz x11, #3, 159f\n"
- "st1 { v8.4s }, [x9], #0x10\n"
- "st1 { v9.4s }, [x9], #0x10\n"
- "st1 { v12.4s }, [x25], #0x10\n"
- "st1 { v13.4s }, [x25], #0x10\n"
- "st1 { v16.4s }, [x24], #0x10\n"
- "st1 { v17.4s }, [x24], #0x10\n"
- "st1 { v20.4s }, [x23], #0x10\n"
- "st1 { v21.4s }, [x23], #0x10\n"
- "st1 { v24.4s }, [x22], #0x10\n"
- "st1 { v25.4s }, [x22], #0x10\n"
+ "st1 { v8.4s }, [x28], #0x10\n"
+ "st1 { v9.4s }, [x28], #0x10\n"
+ "st1 { v12.4s }, [x24], #0x10\n"
+ "st1 { v13.4s }, [x24], #0x10\n"
+ "st1 { v16.4s }, [x23], #0x10\n"
+ "st1 { v17.4s }, [x23], #0x10\n"
+ "st1 { v20.4s }, [x22], #0x10\n"
+ "st1 { v21.4s }, [x22], #0x10\n"
+ "st1 { v24.4s }, [x21], #0x10\n"
+ "st1 { v25.4s }, [x21], #0x10\n"
"tbz x11, #2, 157f\n"
- "st1 { v10.4s }, [x9], #0x10\n"
- "st1 { v14.4s }, [x25], #0x10\n"
- "st1 { v18.4s }, [x24], #0x10\n"
- "st1 { v22.4s }, [x23], #0x10\n"
- "st1 { v26.4s }, [x22], #0x10\n"
+ "st1 { v10.4s }, [x28], #0x10\n"
+ "st1 { v14.4s }, [x24], #0x10\n"
+ "st1 { v18.4s }, [x23], #0x10\n"
+ "st1 { v22.4s }, [x22], #0x10\n"
+ "st1 { v26.4s }, [x21], #0x10\n"
"tbz x11, #1, 156f\n"
- "str d11, [x9], #0x8\n"
- "str d15, [x25], #0x8\n"
- "str d19, [x24], #0x8\n"
- "str d23, [x23], #0x8\n"
- "str d27, [x22], #0x8\n"
+ "str d11, [x28], #0x8\n"
+ "str d15, [x24], #0x8\n"
+ "str d19, [x23], #0x8\n"
+ "str d23, [x22], #0x8\n"
+ "str d27, [x21], #0x8\n"
"tbz x11, #0, 163f\n"
- "st1 { v11.s }[2], [x9]\n"
- "st1 { v15.s }[2], [x25]\n"
- "st1 { v19.s }[2], [x24]\n"
- "st1 { v23.s }[2], [x23]\n"
- "st1 { v27.s }[2], [x22]\n"
+ "st1 { v11.s }[2], [x28]\n"
+ "st1 { v15.s }[2], [x24]\n"
+ "st1 { v19.s }[2], [x23]\n"
+ "st1 { v23.s }[2], [x22]\n"
+ "st1 { v27.s }[2], [x21]\n"
"b 163f\n"
"156:" // Height 5: Partial direct writeback: partial_1_12
"tbz x11, #0, 163f\n"
- "str s11, [x9, #0x0]\n"
- "str s15, [x25, #0x0]\n"
- "str s19, [x24, #0x0]\n"
- "str s23, [x23, #0x0]\n"
- "str s27, [x22, #0x0]\n"
+ "str s11, [x28, #0x0]\n"
+ "str s15, [x24, #0x0]\n"
+ "str s19, [x23, #0x0]\n"
+ "str s23, [x22, #0x0]\n"
+ "str s27, [x21, #0x0]\n"
"b 163f\n"
"157:" // Height 5: Partial direct writeback: partial_2_8
"tbz x11, #1, 158f\n"
- "str d10, [x9], #0x8\n"
- "str d14, [x25], #0x8\n"
- "str d18, [x24], #0x8\n"
- "str d22, [x23], #0x8\n"
- "str d26, [x22], #0x8\n"
+ "str d10, [x28], #0x8\n"
+ "str d14, [x24], #0x8\n"
+ "str d18, [x23], #0x8\n"
+ "str d22, [x22], #0x8\n"
+ "str d26, [x21], #0x8\n"
"tbz x11, #0, 163f\n"
- "st1 { v10.s }[2], [x9]\n"
- "st1 { v14.s }[2], [x25]\n"
- "st1 { v18.s }[2], [x24]\n"
- "st1 { v22.s }[2], [x23]\n"
- "st1 { v26.s }[2], [x22]\n"
+ "st1 { v10.s }[2], [x28]\n"
+ "st1 { v14.s }[2], [x24]\n"
+ "st1 { v18.s }[2], [x23]\n"
+ "st1 { v22.s }[2], [x22]\n"
+ "st1 { v26.s }[2], [x21]\n"
"b 163f\n"
"158:" // Height 5: Partial direct writeback: partial_1_8
"tbz x11, #0, 163f\n"
- "str s10, [x9, #0x0]\n"
- "str s14, [x25, #0x0]\n"
- "str s18, [x24, #0x0]\n"
- "str s22, [x23, #0x0]\n"
- "str s26, [x22, #0x0]\n"
+ "str s10, [x28, #0x0]\n"
+ "str s14, [x24, #0x0]\n"
+ "str s18, [x23, #0x0]\n"
+ "str s22, [x22, #0x0]\n"
+ "str s26, [x21, #0x0]\n"
"b 163f\n"
"159:" // Height 5: Partial direct writeback: partial_4_0
"tbz x11, #2, 161f\n"
- "st1 { v8.4s }, [x9], #0x10\n"
- "st1 { v12.4s }, [x25], #0x10\n"
- "st1 { v16.4s }, [x24], #0x10\n"
- "st1 { v20.4s }, [x23], #0x10\n"
- "st1 { v24.4s }, [x22], #0x10\n"
+ "st1 { v8.4s }, [x28], #0x10\n"
+ "st1 { v12.4s }, [x24], #0x10\n"
+ "st1 { v16.4s }, [x23], #0x10\n"
+ "st1 { v20.4s }, [x22], #0x10\n"
+ "st1 { v24.4s }, [x21], #0x10\n"
"tbz x11, #1, 160f\n"
- "str d9, [x9], #0x8\n"
- "str d13, [x25], #0x8\n"
- "str d17, [x24], #0x8\n"
- "str d21, [x23], #0x8\n"
- "str d25, [x22], #0x8\n"
+ "str d9, [x28], #0x8\n"
+ "str d13, [x24], #0x8\n"
+ "str d17, [x23], #0x8\n"
+ "str d21, [x22], #0x8\n"
+ "str d25, [x21], #0x8\n"
"tbz x11, #0, 163f\n"
- "st1 { v9.s }[2], [x9]\n"
- "st1 { v13.s }[2], [x25]\n"
- "st1 { v17.s }[2], [x24]\n"
- "st1 { v21.s }[2], [x23]\n"
- "st1 { v25.s }[2], [x22]\n"
+ "st1 { v9.s }[2], [x28]\n"
+ "st1 { v13.s }[2], [x24]\n"
+ "st1 { v17.s }[2], [x23]\n"
+ "st1 { v21.s }[2], [x22]\n"
+ "st1 { v25.s }[2], [x21]\n"
"b 163f\n"
"160:" // Height 5: Partial direct writeback: partial_1_4
"tbz x11, #0, 163f\n"
- "str s9, [x9, #0x0]\n"
- "str s13, [x25, #0x0]\n"
- "str s17, [x24, #0x0]\n"
- "str s21, [x23, #0x0]\n"
- "str s25, [x22, #0x0]\n"
+ "str s9, [x28, #0x0]\n"
+ "str s13, [x24, #0x0]\n"
+ "str s17, [x23, #0x0]\n"
+ "str s21, [x22, #0x0]\n"
+ "str s25, [x21, #0x0]\n"
"b 163f\n"
"161:" // Height 5: Partial direct writeback: partial_2_0
"tbz x11, #1, 162f\n"
- "str d8, [x9], #0x8\n"
- "str d12, [x25], #0x8\n"
- "str d16, [x24], #0x8\n"
- "str d20, [x23], #0x8\n"
- "str d24, [x22], #0x8\n"
+ "str d8, [x28], #0x8\n"
+ "str d12, [x24], #0x8\n"
+ "str d16, [x23], #0x8\n"
+ "str d20, [x22], #0x8\n"
+ "str d24, [x21], #0x8\n"
"tbz x11, #0, 163f\n"
- "st1 { v8.s }[2], [x9]\n"
- "st1 { v12.s }[2], [x25]\n"
- "st1 { v16.s }[2], [x24]\n"
- "st1 { v20.s }[2], [x23]\n"
- "st1 { v24.s }[2], [x22]\n"
+ "st1 { v8.s }[2], [x28]\n"
+ "st1 { v12.s }[2], [x24]\n"
+ "st1 { v16.s }[2], [x23]\n"
+ "st1 { v20.s }[2], [x22]\n"
+ "st1 { v24.s }[2], [x21]\n"
"b 163f\n"
"162:" // Height 5: Partial direct writeback: partial_1_0
- "str s8, [x9, #0x0]\n"
- "str s12, [x25, #0x0]\n"
- "str s16, [x24, #0x0]\n"
- "str s20, [x23, #0x0]\n"
- "str s24, [x22, #0x0]\n"
+ "str s8, [x28, #0x0]\n"
+ "str s12, [x24, #0x0]\n"
+ "str s16, [x23, #0x0]\n"
+ "str s20, [x22, #0x0]\n"
+ "str s24, [x21, #0x0]\n"
"163:" // Height 5: Partial direct writeback: Done
"b 165f\n"
"164:" // Height 5: Full writeback
- "str q8, [x9, #0x0]\n"
- "str q9, [x9, #0x10]\n"
- "str q10, [x9, #0x20]\n"
- "str q11, [x9, #0x30]\n"
- "add x9, x9, #0x40\n"
- "str q12, [x25, #0x0]\n"
- "str q13, [x25, #0x10]\n"
- "str q14, [x25, #0x20]\n"
- "str q15, [x25, #0x30]\n"
- "str q16, [x24, #0x0]\n"
- "str q17, [x24, #0x10]\n"
- "str q18, [x24, #0x20]\n"
- "str q19, [x24, #0x30]\n"
- "str q20, [x23, #0x0]\n"
- "str q21, [x23, #0x10]\n"
- "str q22, [x23, #0x20]\n"
- "str q23, [x23, #0x30]\n"
- "str q24, [x22, #0x0]\n"
- "str q25, [x22, #0x10]\n"
- "str q26, [x22, #0x20]\n"
- "str q27, [x22, #0x30]\n"
+ "str q8, [x28, #0x0]\n"
+ "str q9, [x28, #0x10]\n"
+ "str q10, [x28, #0x20]\n"
+ "str q11, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
+ "str q12, [x24, #0x0]\n"
+ "str q13, [x24, #0x10]\n"
+ "str q14, [x24, #0x20]\n"
+ "str q15, [x24, #0x30]\n"
+ "str q16, [x23, #0x0]\n"
+ "str q17, [x23, #0x10]\n"
+ "str q18, [x23, #0x20]\n"
+ "str q19, [x23, #0x30]\n"
+ "str q20, [x22, #0x0]\n"
+ "str q21, [x22, #0x10]\n"
+ "str q22, [x22, #0x20]\n"
+ "str q23, [x22, #0x30]\n"
+ "str q24, [x21, #0x0]\n"
+ "str q25, [x21, #0x10]\n"
+ "str q26, [x21, #0x20]\n"
+ "str q27, [x21, #0x30]\n"
"165:" // Height 5: Writeback done
"subs x11, x11, #0x10\n"
"bgt 134b\n"
"b 200f\n"
"166:" // Height 6
- "ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "mov x20, #0x18\n"
- "mov x12, %x[bias]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x9, %x[bias]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
+ "mov x28, %x[output_ptr]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "mov x19, #0x18\n"
+ "madd %x[output_ptr], x20, x19, %x[output_ptr]\n"
"167:" // Height 6: Column loop
- "cbz x12, 168f\n"
- "ldr q8, [x12, #0x0]\n"
- "ldr q9, [x12, #0x10]\n"
+ "cbz x9, 168f\n"
+ "ldr q8, [x9, #0x0]\n"
"mov v12.16b, v8.16b\n"
+ "ldr q9, [x9, #0x10]\n"
+ "mov v16.16b, v8.16b\n"
+ "ldr q10, [x9, #0x20]\n"
+ "mov v20.16b, v8.16b\n"
+ "ldr q11, [x9, #0x30]\n"
+ "add x9, x9, #0x40\n"
+ "mov v24.16b, v8.16b\n"
+ "mov v28.16b, v8.16b\n"
"mov v13.16b, v9.16b\n"
- "ldr q10, [x12, #0x20]\n"
- "ldr q11, [x12, #0x30]\n"
"mov v14.16b, v10.16b\n"
"mov v15.16b, v11.16b\n"
- "mov v16.16b, v8.16b\n"
"mov v17.16b, v9.16b\n"
- "add x12, x12, #0x40\n"
"mov v18.16b, v10.16b\n"
"mov v19.16b, v11.16b\n"
- "mov v20.16b, v8.16b\n"
"mov v21.16b, v9.16b\n"
"mov v22.16b, v10.16b\n"
"mov v23.16b, v11.16b\n"
- "mov v24.16b, v8.16b\n"
"mov v25.16b, v9.16b\n"
"mov v26.16b, v10.16b\n"
"mov v27.16b, v11.16b\n"
- "mov v28.16b, v8.16b\n"
"mov v29.16b, v9.16b\n"
"mov v30.16b, v10.16b\n"
"mov v31.16b, v11.16b\n"
"b 179f\n"
"168:" // Height 6: no bias
"tbz %x[flags], #0, 178f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"cmp x11, #0x10\n"
- "add x21, x22, x20, LSL #2\n"
+ "add x24, x28, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
+ "add x20, x21, x19, LSL #2\n"
"bge 177f\n"
"tbz x11, #3, 172f\n"
- "ld1 { v8.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v16.4s }, [x24], #0x10\n"
- "ld1 { v20.4s }, [x23], #0x10\n"
- "ld1 { v24.4s }, [x22], #0x10\n"
- "ld1 { v28.4s }, [x21], #0x10\n"
- "ld1 { v9.4s }, [x9], #0x10\n"
- "ld1 { v13.4s }, [x25], #0x10\n"
- "ld1 { v17.4s }, [x24], #0x10\n"
- "ld1 { v21.4s }, [x23], #0x10\n"
- "ld1 { v25.4s }, [x22], #0x10\n"
- "ld1 { v29.4s }, [x21], #0x10\n"
+ "ld1 { v8.4s }, [x28], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
+ "ld1 { v16.4s }, [x23], #0x10\n"
+ "ld1 { v20.4s }, [x22], #0x10\n"
+ "ld1 { v24.4s }, [x21], #0x10\n"
+ "ld1 { v9.4s }, [x28], #0x10\n"
+ "ld1 { v13.4s }, [x24], #0x10\n"
+ "ld1 { v17.4s }, [x23], #0x10\n"
+ "ld1 { v21.4s }, [x22], #0x10\n"
+ "ld1 { v25.4s }, [x21], #0x10\n"
+ "ld1 { v28.4s }, [x20], #0x10\n"
+ "ld1 { v29.4s }, [x20], #0x10\n"
"tbz x11, #2, 170f\n"
- "ld1 { v10.4s }, [x9], #0x10\n"
- "ld1 { v14.4s }, [x25], #0x10\n"
- "ld1 { v18.4s }, [x24], #0x10\n"
- "ld1 { v22.4s }, [x23], #0x10\n"
- "ld1 { v26.4s }, [x22], #0x10\n"
- "ld1 { v30.4s }, [x21], #0x10\n"
+ "ld1 { v10.4s }, [x28], #0x10\n"
+ "ld1 { v14.4s }, [x24], #0x10\n"
+ "ld1 { v18.4s }, [x23], #0x10\n"
+ "ld1 { v22.4s }, [x22], #0x10\n"
+ "ld1 { v26.4s }, [x21], #0x10\n"
+ "ld1 { v30.4s }, [x20], #0x10\n"
"tbz x11, #1, 169f\n"
- "ldr d11, [x9], #0x8\n"
- "ldr d15, [x25], #0x8\n"
- "mov x20, #0x38\n"
- "ldr d19, [x24], #0x8\n"
- "ldr d23, [x23], #0x8\n"
- "ldr d27, [x22], #0x8\n"
- "ldr d31, [x21], #0x8\n"
+ "ldr d11, [x28], #0x8\n"
+ "mov x19, #0x38\n"
+ "ldr d15, [x24], #0x8\n"
+ "ldr d19, [x23], #0x8\n"
+ "ldr d23, [x22], #0x8\n"
+ "ldr d27, [x21], #0x8\n"
+ "ldr d31, [x20], #0x8\n"
"tbz x11, #0, 176f\n"
- "ld1 { v11.s }[2], [x9]\n"
- "ld1 { v15.s }[2], [x25]\n"
- "ld1 { v19.s }[2], [x24]\n"
- "ld1 { v23.s }[2], [x23]\n"
- "ld1 { v27.s }[2], [x22]\n"
- "ld1 { v31.s }[2], [x21]\n"
+ "ld1 { v11.s }[2], [x28]\n"
+ "ld1 { v15.s }[2], [x24]\n"
+ "ld1 { v19.s }[2], [x23]\n"
+ "ld1 { v23.s }[2], [x22]\n"
+ "ld1 { v27.s }[2], [x21]\n"
+ "ld1 { v31.s }[2], [x20]\n"
"b 176f\n"
"169:" // Height 6: Partial accumulate: partial_1_12
- "mov x20, #0x30\n"
+ "mov x19, #0x30\n"
"tbz x11, #0, 176f\n"
- "ldr s11, [x9, #0x0]\n"
- "ldr s15, [x25, #0x0]\n"
- "ldr s19, [x24, #0x0]\n"
- "ldr s23, [x23, #0x0]\n"
- "ldr s27, [x22, #0x0]\n"
- "ldr s31, [x21, #0x0]\n"
+ "ldr s11, [x28, #0x0]\n"
+ "ldr s15, [x24, #0x0]\n"
+ "ldr s19, [x23, #0x0]\n"
+ "ldr s23, [x22, #0x0]\n"
+ "ldr s27, [x21, #0x0]\n"
+ "ldr s31, [x20, #0x0]\n"
"b 176f\n"
"170:" // Height 6: Partial accumulate: partial_2_8
"tbz x11, #1, 171f\n"
- "ldr d10, [x9], #0x8\n"
- "ldr d14, [x25], #0x8\n"
- "mov x20, #0x28\n"
- "ldr d18, [x24], #0x8\n"
- "ldr d22, [x23], #0x8\n"
- "ldr d26, [x22], #0x8\n"
- "ldr d30, [x21], #0x8\n"
+ "ldr d10, [x28], #0x8\n"
+ "ldr d14, [x24], #0x8\n"
+ "mov x19, #0x28\n"
+ "ldr d18, [x23], #0x8\n"
+ "ldr d22, [x22], #0x8\n"
+ "ldr d26, [x21], #0x8\n"
+ "ldr d30, [x20], #0x8\n"
"tbz x11, #0, 176f\n"
- "ld1 { v10.s }[2], [x9]\n"
- "ld1 { v14.s }[2], [x25]\n"
- "ld1 { v18.s }[2], [x24]\n"
- "ld1 { v22.s }[2], [x23]\n"
- "ld1 { v26.s }[2], [x22]\n"
- "ld1 { v30.s }[2], [x21]\n"
+ "ld1 { v10.s }[2], [x28]\n"
+ "ld1 { v14.s }[2], [x24]\n"
+ "ld1 { v18.s }[2], [x23]\n"
+ "ld1 { v22.s }[2], [x22]\n"
+ "ld1 { v26.s }[2], [x21]\n"
+ "ld1 { v30.s }[2], [x20]\n"
"b 176f\n"
"171:" // Height 6: Partial accumulate: partial_1_8
- "mov x20, #0x20\n"
+ "mov x19, #0x20\n"
"tbz x11, #0, 176f\n"
- "ldr s10, [x9, #0x0]\n"
- "ldr s14, [x25, #0x0]\n"
- "ldr s18, [x24, #0x0]\n"
- "ldr s22, [x23, #0x0]\n"
- "ldr s26, [x22, #0x0]\n"
- "ldr s30, [x21, #0x0]\n"
+ "ldr s10, [x28, #0x0]\n"
+ "ldr s14, [x24, #0x0]\n"
+ "ldr s18, [x23, #0x0]\n"
+ "ldr s22, [x22, #0x0]\n"
+ "ldr s26, [x21, #0x0]\n"
+ "ldr s30, [x20, #0x0]\n"
"b 176f\n"
"172:" // Height 6: Partial accumulate: partial_4_0
"tbz x11, #2, 174f\n"
- "ld1 { v8.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v16.4s }, [x24], #0x10\n"
- "ld1 { v20.4s }, [x23], #0x10\n"
- "ld1 { v24.4s }, [x22], #0x10\n"
- "ld1 { v28.4s }, [x21], #0x10\n"
+ "ld1 { v8.4s }, [x28], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
+ "ld1 { v16.4s }, [x23], #0x10\n"
+ "ld1 { v20.4s }, [x22], #0x10\n"
+ "ld1 { v24.4s }, [x21], #0x10\n"
+ "ld1 { v28.4s }, [x20], #0x10\n"
"tbz x11, #1, 173f\n"
- "ldr d9, [x9], #0x8\n"
- "ldr d13, [x25], #0x8\n"
- "mov x20, #0x18\n"
- "ldr d17, [x24], #0x8\n"
- "ldr d21, [x23], #0x8\n"
- "ldr d25, [x22], #0x8\n"
- "ldr d29, [x21], #0x8\n"
+ "ldr d9, [x28], #0x8\n"
+ "mov x19, #0x18\n"
+ "ldr d13, [x24], #0x8\n"
+ "ldr d17, [x23], #0x8\n"
+ "ldr d21, [x22], #0x8\n"
+ "ldr d25, [x21], #0x8\n"
+ "ldr d29, [x20], #0x8\n"
"tbz x11, #0, 176f\n"
- "ld1 { v9.s }[2], [x9]\n"
- "ld1 { v13.s }[2], [x25]\n"
- "ld1 { v17.s }[2], [x24]\n"
- "ld1 { v21.s }[2], [x23]\n"
- "ld1 { v25.s }[2], [x22]\n"
- "ld1 { v29.s }[2], [x21]\n"
+ "ld1 { v9.s }[2], [x28]\n"
+ "ld1 { v13.s }[2], [x24]\n"
+ "ld1 { v17.s }[2], [x23]\n"
+ "ld1 { v21.s }[2], [x22]\n"
+ "ld1 { v25.s }[2], [x21]\n"
+ "ld1 { v29.s }[2], [x20]\n"
"b 176f\n"
"173:" // Height 6: Partial accumulate: partial_1_4
- "mov x20, #0x10\n"
+ "mov x19, #0x10\n"
"tbz x11, #0, 176f\n"
- "ldr s9, [x9, #0x0]\n"
- "ldr s13, [x25, #0x0]\n"
- "ldr s17, [x24, #0x0]\n"
- "ldr s21, [x23, #0x0]\n"
- "ldr s25, [x22, #0x0]\n"
- "ldr s29, [x21, #0x0]\n"
+ "ldr s9, [x28, #0x0]\n"
+ "ldr s13, [x24, #0x0]\n"
+ "ldr s17, [x23, #0x0]\n"
+ "ldr s21, [x22, #0x0]\n"
+ "ldr s25, [x21, #0x0]\n"
+ "ldr s29, [x20, #0x0]\n"
"b 176f\n"
"174:" // Height 6: Partial accumulate: partial_2_0
"tbz x11, #1, 175f\n"
- "ldr d8, [x9], #0x8\n"
- "ldr d12, [x25], #0x8\n"
- "mov x20, #0x8\n"
- "ldr d16, [x24], #0x8\n"
- "ldr d20, [x23], #0x8\n"
- "ldr d24, [x22], #0x8\n"
- "ldr d28, [x21], #0x8\n"
+ "ldr d8, [x28], #0x8\n"
+ "ldr d12, [x24], #0x8\n"
+ "mov x19, #0x8\n"
+ "ldr d16, [x23], #0x8\n"
+ "ldr d20, [x22], #0x8\n"
+ "ldr d24, [x21], #0x8\n"
+ "ldr d28, [x20], #0x8\n"
"tbz x11, #0, 176f\n"
- "ld1 { v8.s }[2], [x9]\n"
- "ld1 { v12.s }[2], [x25]\n"
- "ld1 { v16.s }[2], [x24]\n"
- "ld1 { v20.s }[2], [x23]\n"
- "ld1 { v24.s }[2], [x22]\n"
- "ld1 { v28.s }[2], [x21]\n"
+ "ld1 { v8.s }[2], [x28]\n"
+ "ld1 { v12.s }[2], [x24]\n"
+ "ld1 { v16.s }[2], [x23]\n"
+ "ld1 { v20.s }[2], [x22]\n"
+ "ld1 { v24.s }[2], [x21]\n"
+ "ld1 { v28.s }[2], [x20]\n"
"b 176f\n"
"175:" // Height 6: Partial accumulate: partial_1_0
- "ldr s8, [x9, #0x0]\n"
- "ldr s12, [x25, #0x0]\n"
- "mov x20, #0x0\n"
- "ldr s16, [x24, #0x0]\n"
- "ldr s20, [x23, #0x0]\n"
- "ldr s24, [x22, #0x0]\n"
- "ldr s28, [x21, #0x0]\n"
+ "ldr s8, [x28, #0x0]\n"
+ "mov x19, #0x0\n"
+ "ldr s12, [x24, #0x0]\n"
+ "ldr s16, [x23, #0x0]\n"
+ "ldr s20, [x22, #0x0]\n"
+ "ldr s24, [x21, #0x0]\n"
+ "ldr s28, [x20, #0x0]\n"
"176:" // Height 6: Partial accumulate: Done
- "sub x9, x9, x20\n"
+ "sub x28, x28, x19\n"
"b 179f\n"
"177:" // Height 6: full accumulate
- "ldr q8, [x9, #0x0]\n"
- "ldr q9, [x9, #0x10]\n"
- "ldr q10, [x9, #0x20]\n"
- "ldr q11, [x9, #0x30]\n"
- "ldr q12, [x25, #0x0]\n"
- "ldr q13, [x25, #0x10]\n"
- "ldr q14, [x25, #0x20]\n"
- "ldr q15, [x25, #0x30]\n"
- "ldr q16, [x24, #0x0]\n"
- "ldr q17, [x24, #0x10]\n"
- "ldr q18, [x24, #0x20]\n"
- "ldr q19, [x24, #0x30]\n"
- "ldr q20, [x23, #0x0]\n"
- "ldr q21, [x23, #0x10]\n"
- "ldr q22, [x23, #0x20]\n"
- "ldr q23, [x23, #0x30]\n"
- "ldr q24, [x22, #0x0]\n"
- "ldr q25, [x22, #0x10]\n"
- "ldr q26, [x22, #0x20]\n"
- "ldr q27, [x22, #0x30]\n"
- "ldr q28, [x21, #0x0]\n"
- "ldr q29, [x21, #0x10]\n"
- "ldr q30, [x21, #0x20]\n"
- "ldr q31, [x21, #0x30]\n"
+ "ldr q8, [x28, #0x0]\n"
+ "ldr q9, [x28, #0x10]\n"
+ "ldr q10, [x28, #0x20]\n"
+ "ldr q11, [x28, #0x30]\n"
+ "ldr q12, [x24, #0x0]\n"
+ "ldr q13, [x24, #0x10]\n"
+ "ldr q14, [x24, #0x20]\n"
+ "ldr q15, [x24, #0x30]\n"
+ "ldr q16, [x23, #0x0]\n"
+ "ldr q17, [x23, #0x10]\n"
+ "ldr q18, [x23, #0x20]\n"
+ "ldr q19, [x23, #0x30]\n"
+ "ldr q20, [x22, #0x0]\n"
+ "ldr q21, [x22, #0x10]\n"
+ "ldr q22, [x22, #0x20]\n"
+ "ldr q23, [x22, #0x30]\n"
+ "ldr q24, [x21, #0x0]\n"
+ "ldr q25, [x21, #0x10]\n"
+ "ldr q26, [x21, #0x20]\n"
+ "ldr q27, [x21, #0x30]\n"
+ "ldr q28, [x20, #0x0]\n"
+ "ldr q29, [x20, #0x10]\n"
+ "ldr q30, [x20, #0x20]\n"
+ "ldr q31, [x20, #0x30]\n"
"b 179f\n"
"178:" // Height 6: no accumulate
"movi v8.16b, #0x0\n"
@@ -2767,82 +2767,82 @@ void a64_hybrid_fp32_mla_6x16 (
"movi v30.16b, #0x0\n"
"movi v31.16b, #0x0\n"
"179:" // Height 6: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"180:" // Height 6: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 181f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "ldr x22, [x21, #0x20]\n"
- "ldr x21, [x21, #0x28]\n"
- "cbnz x28, 182f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #2\n"
- "add x25, x25, x20, LSL #2\n"
- "add x24, x24, x20, LSL #2\n"
- "add x23, x23, x20, LSL #2\n"
- "add x22, x22, x20, LSL #2\n"
- "add x21, x21, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "ldr x21, [x20, #0x20]\n"
+ "ldr x20, [x20, #0x28]\n"
+ "cbnz x27, 182f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #2\n"
+ "add x24, x24, x19, LSL #2\n"
+ "add x23, x23, x19, LSL #2\n"
+ "add x22, x22, x19, LSL #2\n"
+ "add x21, x21, x19, LSL #2\n"
+ "add x20, x20, x19, LSL #2\n"
"b 182f\n"
"181:" // Height 6: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
+ "add x20, x21, x19, LSL #2\n"
"182:" // Height 6: input setup done
- "cmp x27, #0x4\n"
+ "cmp x26, #0x4\n"
"blt 185f\n"
- "ldr q0, [x26, #0x0]\n"
- "ldr q1, [x25, #0x0]\n"
- "cmp x27, #0x8\n"
- "ldr q2, [x24, #0x0]\n"
- "ldr q3, [x23, #0x0]\n"
- "ldr q4, [x22, #0x0]\n"
- "ldr q5, [x21, #0x0]\n"
+ "ldr q0, [x25, #0x0]\n"
+ "ldr q1, [x24, #0x0]\n"
+ "cmp x26, #0x8\n"
+ "ldr q2, [x23, #0x0]\n"
+ "ldr q3, [x22, #0x0]\n"
+ "ldr q4, [x21, #0x0]\n"
+ "ldr q5, [x20, #0x0]\n"
"ldr q6, [x10, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
"blt 184f\n"
"183:" // Height 6: Multiply loop: Main loop head
"fmla v8.4s, v6.4s, v0.s[0]\n"
+ "ldr q7, [x10, #0x10]\n"
+ "add x25, x25, #0x10\n"
"fmla v12.4s, v6.4s, v1.s[0]\n"
- "sub x27, x27, #0x4\n"
- "add x26, x26, #0x10\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "add x24, x24, #0x10\n"
"fmla v16.4s, v6.4s, v2.s[0]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "add x23, x23, #0x10\n"
"fmla v20.4s, v6.4s, v3.s[0]\n"
- "add x25, x25, #0x10\n"
- "add x24, x24, #0x10\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
+ "add x22, x22, #0x10\n"
"fmla v24.4s, v6.4s, v4.s[0]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
+ "add x21, x21, #0x10\n"
"fmla v28.4s, v6.4s, v5.s[0]\n"
"ldr q6, [x10, #0x20]\n"
- "add x23, x23, #0x10\n"
+ "add x20, x20, #0x10\n"
"fmla v9.4s, v7.4s, v0.s[0]\n"
+ "prfm pldl1keep, [x21, #0x80]\n"
+ "sub x26, x26, #0x4\n"
"fmla v13.4s, v7.4s, v1.s[0]\n"
- "add x22, x22, #0x10\n"
- "add x21, x21, #0x10\n"
+ "prfm pldl1keep, [x20, #0x80]\n"
+ "cmp x26, #0x8\n"
"fmla v17.4s, v7.4s, v2.s[0]\n"
"fmla v21.4s, v7.4s, v3.s[0]\n"
- "cmp x27, #0x8\n"
- "prfm pldl1keep, [x26, #0x80]\n"
"fmla v25.4s, v7.4s, v4.s[0]\n"
"fmla v29.4s, v7.4s, v5.s[0]\n"
"ldr q7, [x10, #0x30]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
"fmla v10.4s, v6.4s, v0.s[0]\n"
"fmla v14.4s, v6.4s, v1.s[0]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
"fmla v18.4s, v6.4s, v2.s[0]\n"
"fmla v22.4s, v6.4s, v3.s[0]\n"
- "prfm pldl1keep, [x22, #0x80]\n"
- "prfm pldl1keep, [x21, #0x80]\n"
"fmla v26.4s, v6.4s, v4.s[0]\n"
"fmla v30.4s, v6.4s, v5.s[0]\n"
"ldr q6, [x10, #0x40]\n"
@@ -2932,51 +2932,51 @@ void a64_hybrid_fp32_mla_6x16 (
"fmla v30.4s, v6.4s, v5.s[3]\n"
"ldr q6, [x10, #0x0]\n"
"fmla v11.4s, v7.4s, v0.s[3]\n"
- "ldr q0, [x26, #0x0]\n"
+ "ldr q0, [x25, #0x0]\n"
"fmla v15.4s, v7.4s, v1.s[3]\n"
- "ldr q1, [x25, #0x0]\n"
+ "ldr q1, [x24, #0x0]\n"
"fmla v19.4s, v7.4s, v2.s[3]\n"
- "ldr q2, [x24, #0x0]\n"
+ "ldr q2, [x23, #0x0]\n"
"fmla v23.4s, v7.4s, v3.s[3]\n"
- "ldr q3, [x23, #0x0]\n"
+ "ldr q3, [x22, #0x0]\n"
"fmla v27.4s, v7.4s, v4.s[3]\n"
- "ldr q4, [x22, #0x0]\n"
+ "ldr q4, [x21, #0x0]\n"
"fmla v31.4s, v7.4s, v5.s[3]\n"
- "ldr q5, [x21, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
+ "ldr q5, [x20, #0x0]\n"
"bge 183b\n"
"184:" // Height 6: Multiply loop: Single iteration only
"fmla v8.4s, v6.4s, v0.s[0]\n"
+ "ldr q7, [x10, #0x10]\n"
+ "sub x26, x26, #0x4\n"
"fmla v12.4s, v6.4s, v1.s[0]\n"
- "add x26, x26, #0x10\n"
"add x25, x25, #0x10\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
"fmla v16.4s, v6.4s, v2.s[0]\n"
- "fmla v20.4s, v6.4s, v3.s[0]\n"
"add x24, x24, #0x10\n"
+ "fmla v20.4s, v6.4s, v3.s[0]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
"add x23, x23, #0x10\n"
"fmla v24.4s, v6.4s, v4.s[0]\n"
- "fmla v28.4s, v6.4s, v5.s[0]\n"
- "ldr q6, [x10, #0x20]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
"add x22, x22, #0x10\n"
+ "fmla v28.4s, v6.4s, v5.s[0]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
+ "add x21, x21, #0x10\n"
"fmla v9.4s, v7.4s, v0.s[0]\n"
+ "ldr q6, [x10, #0x20]\n"
+ "add x20, x20, #0x10\n"
"fmla v13.4s, v7.4s, v1.s[0]\n"
- "add x21, x21, #0x10\n"
- "sub x27, x27, #0x4\n"
+ "prfm pldl1keep, [x21, #0x80]\n"
"fmla v17.4s, v7.4s, v2.s[0]\n"
+ "prfm pldl1keep, [x20, #0x80]\n"
"fmla v21.4s, v7.4s, v3.s[0]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
"fmla v25.4s, v7.4s, v4.s[0]\n"
"fmla v29.4s, v7.4s, v5.s[0]\n"
"ldr q7, [x10, #0x30]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
"fmla v10.4s, v6.4s, v0.s[0]\n"
"fmla v14.4s, v6.4s, v1.s[0]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
- "prfm pldl1keep, [x22, #0x80]\n"
"fmla v18.4s, v6.4s, v2.s[0]\n"
"fmla v22.4s, v6.4s, v3.s[0]\n"
- "prfm pldl1keep, [x21, #0x80]\n"
"fmla v26.4s, v6.4s, v4.s[0]\n"
"fmla v30.4s, v6.4s, v5.s[0]\n"
"ldr q6, [x10, #0x40]\n"
@@ -3071,18 +3071,18 @@ void a64_hybrid_fp32_mla_6x16 (
"fmla v27.4s, v7.4s, v4.s[3]\n"
"fmla v31.4s, v7.4s, v5.s[3]\n"
"185:" // Height 6: Multiply loop: Main loop skip
- "cbz x27, 187f\n"
+ "cbz x26, 187f\n"
"186:" // Height 6: Multiply loop: Odd block loop
- "ldr s0, [x26], #0x4\n"
- "ldr s1, [x25], #0x4\n"
- "sub x27, x27, #0x1\n"
- "ldr s2, [x24], #0x4\n"
- "ldr s3, [x23], #0x4\n"
- "ldr s4, [x22], #0x4\n"
- "ldr s5, [x21], #0x4\n"
+ "ldr s0, [x25], #0x4\n"
+ "sub x26, x26, #0x1\n"
+ "ldr s1, [x24], #0x4\n"
+ "ldr s2, [x23], #0x4\n"
+ "ldr s3, [x22], #0x4\n"
+ "ldr s4, [x21], #0x4\n"
+ "ldr s5, [x20], #0x4\n"
"ldr q6, [x10, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
"fmla v8.4s, v6.4s, v0.s[0]\n"
+ "ldr q7, [x10, #0x10]\n"
"fmla v12.4s, v6.4s, v1.s[0]\n"
"fmla v16.4s, v6.4s, v2.s[0]\n"
"fmla v20.4s, v6.4s, v3.s[0]\n"
@@ -3109,251 +3109,251 @@ void a64_hybrid_fp32_mla_6x16 (
"fmla v23.4s, v7.4s, v3.s[0]\n"
"fmla v27.4s, v7.4s, v4.s[0]\n"
"fmla v31.4s, v7.4s, v5.s[0]\n"
- "cbnz x27, 186b\n"
+ "cbnz x26, 186b\n"
"187:" // Height 6: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 180b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "prfm pstl1keep, [x9, #0x0]\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
- "prfm pstl1keep, [x25, #0x0]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "prfm pstl1keep, [x28, #0x0]\n"
+ "add x24, x28, x19, LSL #2\n"
"prfm pstl1keep, [x24, #0x0]\n"
- "add x21, x22, x20, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
"prfm pstl1keep, [x23, #0x0]\n"
+ "add x22, x23, x19, LSL #2\n"
"prfm pstl1keep, [x22, #0x0]\n"
+ "add x21, x22, x19, LSL #2\n"
"prfm pstl1keep, [x21, #0x0]\n"
+ "add x20, x21, x19, LSL #2\n"
+ "prfm pstl1keep, [x20, #0x0]\n"
"tbz %x[flags], #1, 188f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v1.4s }, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1r { v0.4s }, [x20]\n"
- "fmin v8.4s, v8.4s, v1.4s\n"
- "fmin v9.4s, v9.4s, v1.4s\n"
- "fmin v10.4s, v10.4s, v1.4s\n"
- "fmin v11.4s, v11.4s, v1.4s\n"
- "fmin v12.4s, v12.4s, v1.4s\n"
- "fmin v13.4s, v13.4s, v1.4s\n"
- "fmin v14.4s, v14.4s, v1.4s\n"
- "fmin v15.4s, v15.4s, v1.4s\n"
- "fmin v16.4s, v16.4s, v1.4s\n"
- "fmin v17.4s, v17.4s, v1.4s\n"
- "fmin v18.4s, v18.4s, v1.4s\n"
- "fmin v19.4s, v19.4s, v1.4s\n"
- "fmin v20.4s, v20.4s, v1.4s\n"
- "fmin v21.4s, v21.4s, v1.4s\n"
- "fmin v22.4s, v22.4s, v1.4s\n"
- "fmin v23.4s, v23.4s, v1.4s\n"
- "fmin v24.4s, v24.4s, v1.4s\n"
- "fmin v25.4s, v25.4s, v1.4s\n"
- "fmin v26.4s, v26.4s, v1.4s\n"
- "fmin v27.4s, v27.4s, v1.4s\n"
- "fmin v28.4s, v28.4s, v1.4s\n"
- "fmin v29.4s, v29.4s, v1.4s\n"
- "fmin v30.4s, v30.4s, v1.4s\n"
- "fmin v31.4s, v31.4s, v1.4s\n"
- "fmax v8.4s, v8.4s, v0.4s\n"
- "fmax v9.4s, v9.4s, v0.4s\n"
- "fmax v10.4s, v10.4s, v0.4s\n"
- "fmax v11.4s, v11.4s, v0.4s\n"
- "fmax v12.4s, v12.4s, v0.4s\n"
- "fmax v13.4s, v13.4s, v0.4s\n"
- "fmax v14.4s, v14.4s, v0.4s\n"
- "fmax v15.4s, v15.4s, v0.4s\n"
- "fmax v16.4s, v16.4s, v0.4s\n"
- "fmax v17.4s, v17.4s, v0.4s\n"
- "fmax v18.4s, v18.4s, v0.4s\n"
- "fmax v19.4s, v19.4s, v0.4s\n"
- "fmax v20.4s, v20.4s, v0.4s\n"
- "fmax v21.4s, v21.4s, v0.4s\n"
- "fmax v22.4s, v22.4s, v0.4s\n"
- "fmax v23.4s, v23.4s, v0.4s\n"
- "fmax v24.4s, v24.4s, v0.4s\n"
- "fmax v25.4s, v25.4s, v0.4s\n"
- "fmax v26.4s, v26.4s, v0.4s\n"
- "fmax v27.4s, v27.4s, v0.4s\n"
- "fmax v28.4s, v28.4s, v0.4s\n"
- "fmax v29.4s, v29.4s, v0.4s\n"
- "fmax v30.4s, v30.4s, v0.4s\n"
- "fmax v31.4s, v31.4s, v0.4s\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v1.4s }, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v0.4s }, [x19]\n"
+ "fmin v8.4s, v8.4s, v0.4s\n"
+ "fmin v9.4s, v9.4s, v0.4s\n"
+ "fmin v10.4s, v10.4s, v0.4s\n"
+ "fmin v11.4s, v11.4s, v0.4s\n"
+ "fmax v8.4s, v8.4s, v1.4s\n"
+ "fmax v9.4s, v9.4s, v1.4s\n"
+ "fmax v10.4s, v10.4s, v1.4s\n"
+ "fmax v11.4s, v11.4s, v1.4s\n"
+ "fmin v12.4s, v12.4s, v0.4s\n"
+ "fmin v13.4s, v13.4s, v0.4s\n"
+ "fmin v14.4s, v14.4s, v0.4s\n"
+ "fmax v12.4s, v12.4s, v1.4s\n"
+ "fmax v13.4s, v13.4s, v1.4s\n"
+ "fmax v14.4s, v14.4s, v1.4s\n"
+ "fmin v15.4s, v15.4s, v0.4s\n"
+ "fmin v16.4s, v16.4s, v0.4s\n"
+ "fmin v17.4s, v17.4s, v0.4s\n"
+ "fmax v15.4s, v15.4s, v1.4s\n"
+ "fmax v16.4s, v16.4s, v1.4s\n"
+ "fmax v17.4s, v17.4s, v1.4s\n"
+ "fmin v18.4s, v18.4s, v0.4s\n"
+ "fmin v19.4s, v19.4s, v0.4s\n"
+ "fmin v20.4s, v20.4s, v0.4s\n"
+ "fmax v18.4s, v18.4s, v1.4s\n"
+ "fmax v19.4s, v19.4s, v1.4s\n"
+ "fmax v20.4s, v20.4s, v1.4s\n"
+ "fmin v21.4s, v21.4s, v0.4s\n"
+ "fmin v22.4s, v22.4s, v0.4s\n"
+ "fmin v23.4s, v23.4s, v0.4s\n"
+ "fmax v21.4s, v21.4s, v1.4s\n"
+ "fmax v22.4s, v22.4s, v1.4s\n"
+ "fmax v23.4s, v23.4s, v1.4s\n"
+ "fmin v24.4s, v24.4s, v0.4s\n"
+ "fmin v25.4s, v25.4s, v0.4s\n"
+ "fmin v26.4s, v26.4s, v0.4s\n"
+ "fmax v24.4s, v24.4s, v1.4s\n"
+ "fmax v25.4s, v25.4s, v1.4s\n"
+ "fmax v26.4s, v26.4s, v1.4s\n"
+ "fmin v27.4s, v27.4s, v0.4s\n"
+ "fmin v28.4s, v28.4s, v0.4s\n"
+ "fmin v29.4s, v29.4s, v0.4s\n"
+ "fmax v27.4s, v27.4s, v1.4s\n"
+ "fmax v28.4s, v28.4s, v1.4s\n"
+ "fmax v29.4s, v29.4s, v1.4s\n"
+ "fmin v30.4s, v30.4s, v0.4s\n"
+ "fmin v31.4s, v31.4s, v0.4s\n"
+ "fmax v30.4s, v30.4s, v1.4s\n"
+ "fmax v31.4s, v31.4s, v1.4s\n"
"188:" // Height 6: No activation
"cmp x11, #0x10\n"
"bge 197f\n"
"tbz x11, #3, 192f\n"
- "st1 { v8.4s }, [x9], #0x10\n"
- "st1 { v9.4s }, [x9], #0x10\n"
- "st1 { v12.4s }, [x25], #0x10\n"
- "st1 { v13.4s }, [x25], #0x10\n"
- "st1 { v16.4s }, [x24], #0x10\n"
- "st1 { v17.4s }, [x24], #0x10\n"
- "st1 { v20.4s }, [x23], #0x10\n"
- "st1 { v21.4s }, [x23], #0x10\n"
- "st1 { v24.4s }, [x22], #0x10\n"
- "st1 { v25.4s }, [x22], #0x10\n"
- "st1 { v28.4s }, [x21], #0x10\n"
- "st1 { v29.4s }, [x21], #0x10\n"
+ "st1 { v8.4s }, [x28], #0x10\n"
+ "st1 { v9.4s }, [x28], #0x10\n"
+ "st1 { v12.4s }, [x24], #0x10\n"
+ "st1 { v13.4s }, [x24], #0x10\n"
+ "st1 { v16.4s }, [x23], #0x10\n"
+ "st1 { v17.4s }, [x23], #0x10\n"
+ "st1 { v20.4s }, [x22], #0x10\n"
+ "st1 { v21.4s }, [x22], #0x10\n"
+ "st1 { v24.4s }, [x21], #0x10\n"
+ "st1 { v25.4s }, [x21], #0x10\n"
+ "st1 { v28.4s }, [x20], #0x10\n"
+ "st1 { v29.4s }, [x20], #0x10\n"
"tbz x11, #2, 190f\n"
- "st1 { v10.4s }, [x9], #0x10\n"
- "st1 { v14.4s }, [x25], #0x10\n"
- "st1 { v18.4s }, [x24], #0x10\n"
- "st1 { v22.4s }, [x23], #0x10\n"
- "st1 { v26.4s }, [x22], #0x10\n"
- "st1 { v30.4s }, [x21], #0x10\n"
+ "st1 { v10.4s }, [x28], #0x10\n"
+ "st1 { v14.4s }, [x24], #0x10\n"
+ "st1 { v18.4s }, [x23], #0x10\n"
+ "st1 { v22.4s }, [x22], #0x10\n"
+ "st1 { v26.4s }, [x21], #0x10\n"
+ "st1 { v30.4s }, [x20], #0x10\n"
"tbz x11, #1, 189f\n"
- "str d11, [x9], #0x8\n"
- "str d15, [x25], #0x8\n"
- "str d19, [x24], #0x8\n"
- "str d23, [x23], #0x8\n"
- "str d27, [x22], #0x8\n"
- "str d31, [x21], #0x8\n"
+ "str d11, [x28], #0x8\n"
+ "str d15, [x24], #0x8\n"
+ "str d19, [x23], #0x8\n"
+ "str d23, [x22], #0x8\n"
+ "str d27, [x21], #0x8\n"
+ "str d31, [x20], #0x8\n"
"tbz x11, #0, 196f\n"
- "st1 { v11.s }[2], [x9]\n"
- "st1 { v15.s }[2], [x25]\n"
- "st1 { v19.s }[2], [x24]\n"
- "st1 { v23.s }[2], [x23]\n"
- "st1 { v27.s }[2], [x22]\n"
- "st1 { v31.s }[2], [x21]\n"
+ "st1 { v11.s }[2], [x28]\n"
+ "st1 { v15.s }[2], [x24]\n"
+ "st1 { v19.s }[2], [x23]\n"
+ "st1 { v23.s }[2], [x22]\n"
+ "st1 { v27.s }[2], [x21]\n"
+ "st1 { v31.s }[2], [x20]\n"
"b 196f\n"
"189:" // Height 6: Partial direct writeback: partial_1_12
"tbz x11, #0, 196f\n"
- "str s11, [x9, #0x0]\n"
- "str s15, [x25, #0x0]\n"
- "str s19, [x24, #0x0]\n"
- "str s23, [x23, #0x0]\n"
- "str s27, [x22, #0x0]\n"
- "str s31, [x21, #0x0]\n"
+ "str s11, [x28, #0x0]\n"
+ "str s15, [x24, #0x0]\n"
+ "str s19, [x23, #0x0]\n"
+ "str s23, [x22, #0x0]\n"
+ "str s27, [x21, #0x0]\n"
+ "str s31, [x20, #0x0]\n"
"b 196f\n"
"190:" // Height 6: Partial direct writeback: partial_2_8
"tbz x11, #1, 191f\n"
- "str d10, [x9], #0x8\n"
- "str d14, [x25], #0x8\n"
- "str d18, [x24], #0x8\n"
- "str d22, [x23], #0x8\n"
- "str d26, [x22], #0x8\n"
- "str d30, [x21], #0x8\n"
+ "str d10, [x28], #0x8\n"
+ "str d14, [x24], #0x8\n"
+ "str d18, [x23], #0x8\n"
+ "str d22, [x22], #0x8\n"
+ "str d26, [x21], #0x8\n"
+ "str d30, [x20], #0x8\n"
"tbz x11, #0, 196f\n"
- "st1 { v10.s }[2], [x9]\n"
- "st1 { v14.s }[2], [x25]\n"
- "st1 { v18.s }[2], [x24]\n"
- "st1 { v22.s }[2], [x23]\n"
- "st1 { v26.s }[2], [x22]\n"
- "st1 { v30.s }[2], [x21]\n"
+ "st1 { v10.s }[2], [x28]\n"
+ "st1 { v14.s }[2], [x24]\n"
+ "st1 { v18.s }[2], [x23]\n"
+ "st1 { v22.s }[2], [x22]\n"
+ "st1 { v26.s }[2], [x21]\n"
+ "st1 { v30.s }[2], [x20]\n"
"b 196f\n"
"191:" // Height 6: Partial direct writeback: partial_1_8
"tbz x11, #0, 196f\n"
- "str s10, [x9, #0x0]\n"
- "str s14, [x25, #0x0]\n"
- "str s18, [x24, #0x0]\n"
- "str s22, [x23, #0x0]\n"
- "str s26, [x22, #0x0]\n"
- "str s30, [x21, #0x0]\n"
+ "str s10, [x28, #0x0]\n"
+ "str s14, [x24, #0x0]\n"
+ "str s18, [x23, #0x0]\n"
+ "str s22, [x22, #0x0]\n"
+ "str s26, [x21, #0x0]\n"
+ "str s30, [x20, #0x0]\n"
"b 196f\n"
"192:" // Height 6: Partial direct writeback: partial_4_0
"tbz x11, #2, 194f\n"
- "st1 { v8.4s }, [x9], #0x10\n"
- "st1 { v12.4s }, [x25], #0x10\n"
- "st1 { v16.4s }, [x24], #0x10\n"
- "st1 { v20.4s }, [x23], #0x10\n"
- "st1 { v24.4s }, [x22], #0x10\n"
- "st1 { v28.4s }, [x21], #0x10\n"
+ "st1 { v8.4s }, [x28], #0x10\n"
+ "st1 { v12.4s }, [x24], #0x10\n"
+ "st1 { v16.4s }, [x23], #0x10\n"
+ "st1 { v20.4s }, [x22], #0x10\n"
+ "st1 { v24.4s }, [x21], #0x10\n"
+ "st1 { v28.4s }, [x20], #0x10\n"
"tbz x11, #1, 193f\n"
- "str d9, [x9], #0x8\n"
- "str d13, [x25], #0x8\n"
- "str d17, [x24], #0x8\n"
- "str d21, [x23], #0x8\n"
- "str d25, [x22], #0x8\n"
- "str d29, [x21], #0x8\n"
+ "str d9, [x28], #0x8\n"
+ "str d13, [x24], #0x8\n"
+ "str d17, [x23], #0x8\n"
+ "str d21, [x22], #0x8\n"
+ "str d25, [x21], #0x8\n"
+ "str d29, [x20], #0x8\n"
"tbz x11, #0, 196f\n"
- "st1 { v9.s }[2], [x9]\n"
- "st1 { v13.s }[2], [x25]\n"
- "st1 { v17.s }[2], [x24]\n"
- "st1 { v21.s }[2], [x23]\n"
- "st1 { v25.s }[2], [x22]\n"
- "st1 { v29.s }[2], [x21]\n"
+ "st1 { v9.s }[2], [x28]\n"
+ "st1 { v13.s }[2], [x24]\n"
+ "st1 { v17.s }[2], [x23]\n"
+ "st1 { v21.s }[2], [x22]\n"
+ "st1 { v25.s }[2], [x21]\n"
+ "st1 { v29.s }[2], [x20]\n"
"b 196f\n"
"193:" // Height 6: Partial direct writeback: partial_1_4
"tbz x11, #0, 196f\n"
- "str s9, [x9, #0x0]\n"
- "str s13, [x25, #0x0]\n"
- "str s17, [x24, #0x0]\n"
- "str s21, [x23, #0x0]\n"
- "str s25, [x22, #0x0]\n"
- "str s29, [x21, #0x0]\n"
+ "str s9, [x28, #0x0]\n"
+ "str s13, [x24, #0x0]\n"
+ "str s17, [x23, #0x0]\n"
+ "str s21, [x22, #0x0]\n"
+ "str s25, [x21, #0x0]\n"
+ "str s29, [x20, #0x0]\n"
"b 196f\n"
"194:" // Height 6: Partial direct writeback: partial_2_0
"tbz x11, #1, 195f\n"
- "str d8, [x9], #0x8\n"
- "str d12, [x25], #0x8\n"
- "str d16, [x24], #0x8\n"
- "str d20, [x23], #0x8\n"
- "str d24, [x22], #0x8\n"
- "str d28, [x21], #0x8\n"
+ "str d8, [x28], #0x8\n"
+ "str d12, [x24], #0x8\n"
+ "str d16, [x23], #0x8\n"
+ "str d20, [x22], #0x8\n"
+ "str d24, [x21], #0x8\n"
+ "str d28, [x20], #0x8\n"
"tbz x11, #0, 196f\n"
- "st1 { v8.s }[2], [x9]\n"
- "st1 { v12.s }[2], [x25]\n"
- "st1 { v16.s }[2], [x24]\n"
- "st1 { v20.s }[2], [x23]\n"
- "st1 { v24.s }[2], [x22]\n"
- "st1 { v28.s }[2], [x21]\n"
+ "st1 { v8.s }[2], [x28]\n"
+ "st1 { v12.s }[2], [x24]\n"
+ "st1 { v16.s }[2], [x23]\n"
+ "st1 { v20.s }[2], [x22]\n"
+ "st1 { v24.s }[2], [x21]\n"
+ "st1 { v28.s }[2], [x20]\n"
"b 196f\n"
"195:" // Height 6: Partial direct writeback: partial_1_0
- "str s8, [x9, #0x0]\n"
- "str s12, [x25, #0x0]\n"
- "str s16, [x24, #0x0]\n"
- "str s20, [x23, #0x0]\n"
- "str s24, [x22, #0x0]\n"
- "str s28, [x21, #0x0]\n"
+ "str s8, [x28, #0x0]\n"
+ "str s12, [x24, #0x0]\n"
+ "str s16, [x23, #0x0]\n"
+ "str s20, [x22, #0x0]\n"
+ "str s24, [x21, #0x0]\n"
+ "str s28, [x20, #0x0]\n"
"196:" // Height 6: Partial direct writeback: Done
"b 198f\n"
"197:" // Height 6: Full writeback
- "str q8, [x9, #0x0]\n"
- "str q9, [x9, #0x10]\n"
- "str q10, [x9, #0x20]\n"
- "str q11, [x9, #0x30]\n"
- "add x9, x9, #0x40\n"
- "str q12, [x25, #0x0]\n"
- "str q13, [x25, #0x10]\n"
- "str q14, [x25, #0x20]\n"
- "str q15, [x25, #0x30]\n"
- "str q16, [x24, #0x0]\n"
- "str q17, [x24, #0x10]\n"
- "str q18, [x24, #0x20]\n"
- "str q19, [x24, #0x30]\n"
- "str q20, [x23, #0x0]\n"
- "str q21, [x23, #0x10]\n"
- "str q22, [x23, #0x20]\n"
- "str q23, [x23, #0x30]\n"
- "str q24, [x22, #0x0]\n"
- "str q25, [x22, #0x10]\n"
- "str q26, [x22, #0x20]\n"
- "str q27, [x22, #0x30]\n"
- "str q28, [x21, #0x0]\n"
- "str q29, [x21, #0x10]\n"
- "str q30, [x21, #0x20]\n"
- "str q31, [x21, #0x30]\n"
+ "str q8, [x28, #0x0]\n"
+ "str q9, [x28, #0x10]\n"
+ "str q10, [x28, #0x20]\n"
+ "str q11, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
+ "str q12, [x24, #0x0]\n"
+ "str q13, [x24, #0x10]\n"
+ "str q14, [x24, #0x20]\n"
+ "str q15, [x24, #0x30]\n"
+ "str q16, [x23, #0x0]\n"
+ "str q17, [x23, #0x10]\n"
+ "str q18, [x23, #0x20]\n"
+ "str q19, [x23, #0x30]\n"
+ "str q20, [x22, #0x0]\n"
+ "str q21, [x22, #0x10]\n"
+ "str q22, [x22, #0x20]\n"
+ "str q23, [x22, #0x30]\n"
+ "str q24, [x21, #0x0]\n"
+ "str q25, [x21, #0x10]\n"
+ "str q26, [x21, #0x20]\n"
+ "str q27, [x21, #0x30]\n"
+ "str q28, [x20, #0x0]\n"
+ "str q29, [x20, #0x10]\n"
+ "str q30, [x20, #0x20]\n"
+ "str q31, [x20, #0x30]\n"
"198:" // Height 6: Writeback done
"subs x11, x11, #0x10\n"
"bgt 167b\n"
"subs %x[M], %x[M], #0x6\n"
"beq 200f\n"
- "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 199f\n"
- "add x21, x21, #0x6\n"
- "str x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "add x20, x20, #0x6\n"
+ "str x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"b 1b\n"
"199:" // Update direct input
- "mov x20, #0x18\n"
- "madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
+ "mov x19, #0x18\n"
+ "madd %x[input_ptr], x19, x20, %x[input_ptr]\n"
"b 1b\n"
"200:" // Exit
: [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
: [args_ptr] "r" (&ka), [bias] "r" (bias), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_8x4/a55.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_8x4/a55.cpp
index e2dae4b414..99920002b2 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_8x4/a55.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_8x4/a55.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef __aarch64__
@@ -120,15 +120,15 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"bge 6f\n"
"tbz x17, #1, 4f\n"
"ldr d24, [x14], #0x8\n"
- "mov x8, #0x8\n"
+ "mov x19, #0x8\n"
"tbz x17, #0, 5f\n"
"ld1 { v24.s }[2], [x14]\n"
"b 5f\n"
"4:" // Height 1: Partial accumulate: partial_1_0
"ldr s24, [x14, #0x0]\n"
- "mov x8, #0x0\n"
+ "mov x19, #0x0\n"
"5:" // Height 1: Partial accumulate: Done
- "sub x14, x14, x8\n"
+ "sub x14, x14, x19\n"
"b 8f\n"
"6:" // Height 1: full accumulate
"ldr q24, [x14, #0x0]\n"
@@ -139,15 +139,15 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"mov x13, #0x0\n"
"9:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr x8, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"ldr w12, [x20, x13, LSL #0x2]\n"
"tbz %x[flags], #3, 10f\n"
"ldr x20, [%x[input_ptr], x13, LSL #0x3]\n"
- "add x20, x20, x8, LSL #3\n"
+ "add x20, x20, x19, LSL #3\n"
"ldr x11, [x20, #0x0]\n"
"cbnz x13, 11f\n"
- "ldr x8, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x11, x11, x8, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x11, x11, x19, LSL #2\n"
"b 11f\n"
"10:" // Height 1: setup direct input
"mov x11, %x[input_ptr]\n"
@@ -161,20 +161,20 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"12:" // Height 1: Multiply loop: Main loop head
"fmla v24.4s, v8.4s, v0.s[0]\n"
"ldr d9, [x16, #0x10]\n"
- "ldr x8, [x16, #0x18]\n"
+ "ldr x19, [x16, #0x18]\n"
"add x11, x11, #0x10\n"
"ldr d10, [x16, #0x20]\n"
"sub x12, x12, #0x4\n"
"ldr x21, [x16, #0x28]\n"
"cmp x12, #0x8\n"
- "mov v9.d[1], x8\n"
+ "mov v9.d[1], x19\n"
"ldr d11, [x16, #0x30]\n"
- "ldr x8, [x16, #0x38]\n"
+ "ldr x19, [x16, #0x38]\n"
"add x16, x16, #0x40\n"
"fmla v24.4s, v9.4s, v0.s[1]\n"
"mov v10.d[1], x21\n"
"prfm pldl1keep, [x11, #0x80]\n"
- "mov v11.d[1], x8\n"
+ "mov v11.d[1], x19\n"
"ldr d8, [x16, #0x0]\n"
"ldr x26, [x16, #0x8]\n"
"fmla v24.4s, v10.4s, v0.s[2]\n"
@@ -208,14 +208,14 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"16:" // Height 1: Multiply loop: No odd multiplies
"ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
"add x13, x13, #0x1\n"
- "cmp x13, x8\n"
+ "cmp x13, x19\n"
"bne 9b\n"
"prfm pstl1keep, [x14, #0x0]\n"
"tbz %x[flags], #1, 17f\n"
- "add x8, %x[args_ptr], %[offset_min]\n"
- "ld1r { v17.4s }, [x8]\n"
- "add x8, %x[args_ptr], %[offset_max]\n"
- "ld1r { v16.4s }, [x8]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v17.4s }, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v16.4s }, [x19]\n"
"fmin v24.4s, v24.4s, v16.4s\n"
"fmax v24.4s, v24.4s, v17.4s\n"
"17:" // Height 1: No activation
@@ -250,24 +250,24 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"b 29f\n"
"24:" // Height 2: no bias
"tbz %x[flags], #0, 28f\n"
- "ldr x8, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"cmp x17, #0x4\n"
- "add x27, x14, x8, LSL #2\n"
+ "add x27, x14, x19, LSL #2\n"
"bge 27f\n"
"tbz x17, #1, 25f\n"
"ldr d24, [x14], #0x8\n"
"ldr d25, [x27], #0x8\n"
- "mov x8, #0x8\n"
+ "mov x19, #0x8\n"
"tbz x17, #0, 26f\n"
"ld1 { v24.s }[2], [x14]\n"
"ld1 { v25.s }[2], [x27]\n"
"b 26f\n"
"25:" // Height 2: Partial accumulate: partial_1_0
"ldr s24, [x14, #0x0]\n"
- "mov x8, #0x0\n"
+ "mov x19, #0x0\n"
"ldr s25, [x27, #0x0]\n"
"26:" // Height 2: Partial accumulate: Done
- "sub x14, x14, x8\n"
+ "sub x14, x14, x19\n"
"b 29f\n"
"27:" // Height 2: full accumulate
"ldr q24, [x14, #0x0]\n"
@@ -280,21 +280,21 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"mov x13, #0x0\n"
"30:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr x8, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"ldr w12, [x20, x13, LSL #0x2]\n"
"tbz %x[flags], #3, 31f\n"
"ldr x20, [%x[input_ptr], x13, LSL #0x3]\n"
- "add x20, x20, x8, LSL #3\n"
+ "add x20, x20, x19, LSL #3\n"
"ldr x11, [x20, #0x0]\n"
"ldr x9, [x20, #0x8]\n"
"cbnz x13, 32f\n"
- "ldr x8, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x11, x11, x8, LSL #2\n"
- "add x9, x9, x8, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x11, x11, x19, LSL #2\n"
+ "add x9, x9, x19, LSL #2\n"
"b 32f\n"
"31:" // Height 2: setup direct input
"mov x11, %x[input_ptr]\n"
- "add x9, x11, x8, LSL #2\n"
+ "add x9, x11, x19, LSL #2\n"
"32:" // Height 2: input setup done
"cmp x12, #0x4\n"
"blt 35f\n"
@@ -307,20 +307,20 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"fmla v24.4s, v8.4s, v0.s[0]\n"
"ldr d9, [x16, #0x10]\n"
"fmla v25.4s, v8.4s, v1.s[0]\n"
- "ldr x8, [x16, #0x18]\n"
+ "ldr x19, [x16, #0x18]\n"
"ldr d10, [x16, #0x20]\n"
"add x11, x11, #0x10\n"
"ldr x21, [x16, #0x28]\n"
"add x9, x9, #0x10\n"
- "mov v9.d[1], x8\n"
+ "mov v9.d[1], x19\n"
"ldr d11, [x16, #0x30]\n"
- "ldr x8, [x16, #0x38]\n"
+ "ldr x19, [x16, #0x38]\n"
"sub x12, x12, #0x4\n"
"fmla v24.4s, v9.4s, v0.s[1]\n"
"mov v10.d[1], x21\n"
"fmla v25.4s, v9.4s, v1.s[1]\n"
"prfm pldl1keep, [x11, #0x80]\n"
- "mov v11.d[1], x8\n"
+ "mov v11.d[1], x19\n"
"prfm pldl1keep, [x9, #0x80]\n"
"ldr x10, [x11, #0x8]\n"
"cmp x12, #0x8\n"
@@ -370,17 +370,17 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"37:" // Height 2: Multiply loop: No odd multiplies
"ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
"add x13, x13, #0x1\n"
- "cmp x13, x8\n"
+ "cmp x13, x19\n"
"bne 30b\n"
- "ldr x8, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"prfm pstl1keep, [x14, #0x0]\n"
- "add x27, x14, x8, LSL #2\n"
+ "add x27, x14, x19, LSL #2\n"
"prfm pstl1keep, [x27, #0x0]\n"
"tbz %x[flags], #1, 38f\n"
"add x20, %x[args_ptr], %[offset_min]\n"
- "add x8, %x[args_ptr], %[offset_max]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
"ld1r { v17.4s }, [x20]\n"
- "ld1r { v16.4s }, [x8]\n"
+ "ld1r { v16.4s }, [x19]\n"
"fmin v24.4s, v24.4s, v16.4s\n"
"fmin v25.4s, v25.4s, v16.4s\n"
"fmax v24.4s, v24.4s, v17.4s\n"
@@ -422,15 +422,15 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"b 50f\n"
"45:" // Height 3: no bias
"tbz %x[flags], #0, 49f\n"
- "ldr x8, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"cmp x17, #0x4\n"
- "add x27, x14, x8, LSL #2\n"
- "add x26, x27, x8, LSL #2\n"
+ "add x27, x14, x19, LSL #2\n"
+ "add x26, x27, x19, LSL #2\n"
"bge 48f\n"
"tbz x17, #1, 46f\n"
"ldr d24, [x14], #0x8\n"
"ldr d25, [x27], #0x8\n"
- "mov x8, #0x8\n"
+ "mov x19, #0x8\n"
"ldr d26, [x26], #0x8\n"
"tbz x17, #0, 47f\n"
"ld1 { v24.s }[2], [x14]\n"
@@ -439,11 +439,11 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"b 47f\n"
"46:" // Height 3: Partial accumulate: partial_1_0
"ldr s24, [x14, #0x0]\n"
- "mov x8, #0x0\n"
+ "mov x19, #0x0\n"
"ldr s25, [x27, #0x0]\n"
"ldr s26, [x26, #0x0]\n"
"47:" // Height 3: Partial accumulate: Done
- "sub x14, x14, x8\n"
+ "sub x14, x14, x19\n"
"b 50f\n"
"48:" // Height 3: full accumulate
"ldr q24, [x14, #0x0]\n"
@@ -458,24 +458,24 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"mov x13, #0x0\n"
"51:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr x8, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"ldr w12, [x20, x13, LSL #0x2]\n"
"tbz %x[flags], #3, 52f\n"
"ldr x20, [%x[input_ptr], x13, LSL #0x3]\n"
- "add x20, x20, x8, LSL #3\n"
+ "add x20, x20, x19, LSL #3\n"
"ldr x11, [x20, #0x0]\n"
"ldr x9, [x20, #0x8]\n"
"ldr x27, [x20, #0x10]\n"
"cbnz x13, 53f\n"
- "ldr x8, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x11, x11, x8, LSL #2\n"
- "add x9, x9, x8, LSL #2\n"
- "add x27, x27, x8, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x11, x11, x19, LSL #2\n"
+ "add x9, x9, x19, LSL #2\n"
+ "add x27, x27, x19, LSL #2\n"
"b 53f\n"
"52:" // Height 3: setup direct input
"mov x11, %x[input_ptr]\n"
- "add x9, x11, x8, LSL #2\n"
- "add x27, x9, x8, LSL #2\n"
+ "add x9, x11, x19, LSL #2\n"
+ "add x27, x9, x19, LSL #2\n"
"53:" // Height 3: input setup done
"cmp x12, #0x4\n"
"blt 56f\n"
@@ -489,21 +489,21 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"fmla v24.4s, v8.4s, v0.s[0]\n"
"ldr d9, [x16, #0x10]\n"
"fmla v25.4s, v8.4s, v1.s[0]\n"
- "ldr x8, [x16, #0x18]\n"
+ "ldr x19, [x16, #0x18]\n"
"fmla v26.4s, v8.4s, v2.s[0]\n"
"ldr d10, [x16, #0x20]\n"
"ldr x21, [x16, #0x28]\n"
"add x11, x11, #0x10\n"
- "mov v9.d[1], x8\n"
+ "mov v9.d[1], x19\n"
"ldr d11, [x16, #0x30]\n"
- "ldr x8, [x16, #0x38]\n"
+ "ldr x19, [x16, #0x38]\n"
"add x9, x9, #0x10\n"
"fmla v24.4s, v9.4s, v0.s[1]\n"
"mov v10.d[1], x21\n"
"fmla v25.4s, v9.4s, v1.s[1]\n"
"prfm pldl1keep, [x11, #0x80]\n"
"fmla v26.4s, v9.4s, v2.s[1]\n"
- "mov v11.d[1], x8\n"
+ "mov v11.d[1], x19\n"
"prfm pldl1keep, [x9, #0x80]\n"
"add x27, x27, #0x10\n"
"fmla v24.4s, v10.4s, v0.s[2]\n"
@@ -569,19 +569,19 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"58:" // Height 3: Multiply loop: No odd multiplies
"ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
"add x13, x13, #0x1\n"
- "cmp x13, x8\n"
+ "cmp x13, x19\n"
"bne 51b\n"
- "ldr x8, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"prfm pstl1keep, [x14, #0x0]\n"
- "add x27, x14, x8, LSL #2\n"
+ "add x27, x14, x19, LSL #2\n"
"prfm pstl1keep, [x27, #0x0]\n"
- "add x26, x27, x8, LSL #2\n"
+ "add x26, x27, x19, LSL #2\n"
"prfm pstl1keep, [x26, #0x0]\n"
"tbz %x[flags], #1, 59f\n"
"add x20, %x[args_ptr], %[offset_min]\n"
- "add x8, %x[args_ptr], %[offset_max]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
"ld1r { v17.4s }, [x20]\n"
- "ld1r { v16.4s }, [x8]\n"
+ "ld1r { v16.4s }, [x19]\n"
"fmin v24.4s, v24.4s, v16.4s\n"
"fmin v25.4s, v25.4s, v16.4s\n"
"fmin v26.4s, v26.4s, v16.4s\n"
@@ -630,16 +630,16 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"b 71f\n"
"66:" // Height 4: no bias
"tbz %x[flags], #0, 70f\n"
- "ldr x8, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"cmp x17, #0x4\n"
- "add x27, x14, x8, LSL #2\n"
- "add x26, x27, x8, LSL #2\n"
- "add x25, x26, x8, LSL #2\n"
+ "add x27, x14, x19, LSL #2\n"
+ "add x26, x27, x19, LSL #2\n"
+ "add x25, x26, x19, LSL #2\n"
"bge 69f\n"
"tbz x17, #1, 67f\n"
"ldr d24, [x14], #0x8\n"
"ldr d25, [x27], #0x8\n"
- "mov x8, #0x8\n"
+ "mov x19, #0x8\n"
"ldr d26, [x26], #0x8\n"
"ldr d27, [x25], #0x8\n"
"tbz x17, #0, 68f\n"
@@ -650,12 +650,12 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"b 68f\n"
"67:" // Height 4: Partial accumulate: partial_1_0
"ldr s24, [x14, #0x0]\n"
- "mov x8, #0x0\n"
+ "mov x19, #0x0\n"
"ldr s25, [x27, #0x0]\n"
"ldr s26, [x26, #0x0]\n"
"ldr s27, [x25, #0x0]\n"
"68:" // Height 4: Partial accumulate: Done
- "sub x14, x14, x8\n"
+ "sub x14, x14, x19\n"
"b 71f\n"
"69:" // Height 4: full accumulate
"ldr q24, [x14, #0x0]\n"
@@ -672,27 +672,27 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"mov x13, #0x0\n"
"72:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr x8, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"ldr w12, [x20, x13, LSL #0x2]\n"
"tbz %x[flags], #3, 73f\n"
"ldr x20, [%x[input_ptr], x13, LSL #0x3]\n"
- "add x20, x20, x8, LSL #3\n"
+ "add x20, x20, x19, LSL #3\n"
"ldr x11, [x20, #0x0]\n"
"ldr x9, [x20, #0x8]\n"
"ldr x27, [x20, #0x10]\n"
"ldr x25, [x20, #0x18]\n"
"cbnz x13, 74f\n"
- "ldr x8, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x11, x11, x8, LSL #2\n"
- "add x9, x9, x8, LSL #2\n"
- "add x27, x27, x8, LSL #2\n"
- "add x25, x25, x8, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x11, x11, x19, LSL #2\n"
+ "add x9, x9, x19, LSL #2\n"
+ "add x27, x27, x19, LSL #2\n"
+ "add x25, x25, x19, LSL #2\n"
"b 74f\n"
"73:" // Height 4: setup direct input
"mov x11, %x[input_ptr]\n"
- "add x9, x11, x8, LSL #2\n"
- "add x27, x9, x8, LSL #2\n"
- "add x25, x27, x8, LSL #2\n"
+ "add x9, x11, x19, LSL #2\n"
+ "add x27, x9, x19, LSL #2\n"
+ "add x25, x27, x19, LSL #2\n"
"74:" // Height 4: input setup done
"cmp x12, #0x4\n"
"blt 77f\n"
@@ -707,21 +707,21 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"fmla v24.4s, v8.4s, v0.s[0]\n"
"ldr d9, [x16, #0x10]\n"
"fmla v25.4s, v8.4s, v1.s[0]\n"
- "ldr x8, [x16, #0x18]\n"
+ "ldr x19, [x16, #0x18]\n"
"fmla v26.4s, v8.4s, v2.s[0]\n"
"ldr d10, [x16, #0x20]\n"
"fmla v27.4s, v8.4s, v3.s[0]\n"
"ldr x21, [x16, #0x28]\n"
- "mov v9.d[1], x8\n"
+ "mov v9.d[1], x19\n"
"ldr d11, [x16, #0x30]\n"
- "ldr x8, [x16, #0x38]\n"
+ "ldr x19, [x16, #0x38]\n"
"add x11, x11, #0x10\n"
"fmla v24.4s, v9.4s, v0.s[1]\n"
"mov v10.d[1], x21\n"
"fmla v25.4s, v9.4s, v1.s[1]\n"
"prfm pldl1keep, [x11, #0x80]\n"
"fmla v26.4s, v9.4s, v2.s[1]\n"
- "mov v11.d[1], x8\n"
+ "mov v11.d[1], x19\n"
"fmla v27.4s, v9.4s, v3.s[1]\n"
"ldr x10, [x11, #0x8]\n"
"fmla v24.4s, v10.4s, v0.s[2]\n"
@@ -748,11 +748,11 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"sub x12, x12, #0x4\n"
"mov v2.d[1], x26\n"
"ldr d3, [x25, #0x0]\n"
- "ldr x8, [x25, #0x8]\n"
+ "ldr x19, [x25, #0x8]\n"
"cmp x12, #0x8\n"
"add x16, x16, #0x40\n"
"ldr d8, [x16, #0x0]\n"
- "mov v3.d[1], x8\n"
+ "mov v3.d[1], x19\n"
"ldr x26, [x16, #0x8]\n"
"mov v8.d[1], x26\n"
"bge 75b\n"
@@ -804,21 +804,21 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"79:" // Height 4: Multiply loop: No odd multiplies
"ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
"add x13, x13, #0x1\n"
- "cmp x13, x8\n"
+ "cmp x13, x19\n"
"bne 72b\n"
- "ldr x8, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"prfm pstl1keep, [x14, #0x0]\n"
- "add x27, x14, x8, LSL #2\n"
+ "add x27, x14, x19, LSL #2\n"
"prfm pstl1keep, [x27, #0x0]\n"
- "add x26, x27, x8, LSL #2\n"
+ "add x26, x27, x19, LSL #2\n"
"prfm pstl1keep, [x26, #0x0]\n"
- "add x25, x26, x8, LSL #2\n"
+ "add x25, x26, x19, LSL #2\n"
"prfm pstl1keep, [x25, #0x0]\n"
"tbz %x[flags], #1, 80f\n"
"add x20, %x[args_ptr], %[offset_min]\n"
- "add x8, %x[args_ptr], %[offset_max]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
"ld1r { v17.4s }, [x20]\n"
- "ld1r { v16.4s }, [x8]\n"
+ "ld1r { v16.4s }, [x19]\n"
"fmin v24.4s, v24.4s, v16.4s\n"
"fmin v25.4s, v25.4s, v16.4s\n"
"fmin v26.4s, v26.4s, v16.4s\n"
@@ -874,17 +874,17 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"b 92f\n"
"87:" // Height 5: no bias
"tbz %x[flags], #0, 91f\n"
- "ldr x8, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"cmp x17, #0x4\n"
- "add x27, x14, x8, LSL #2\n"
- "add x26, x27, x8, LSL #2\n"
- "add x25, x26, x8, LSL #2\n"
- "add x24, x25, x8, LSL #2\n"
+ "add x27, x14, x19, LSL #2\n"
+ "add x26, x27, x19, LSL #2\n"
+ "add x25, x26, x19, LSL #2\n"
+ "add x24, x25, x19, LSL #2\n"
"bge 90f\n"
"tbz x17, #1, 88f\n"
"ldr d24, [x14], #0x8\n"
"ldr d25, [x27], #0x8\n"
- "mov x8, #0x8\n"
+ "mov x19, #0x8\n"
"ldr d26, [x26], #0x8\n"
"ldr d27, [x25], #0x8\n"
"ldr d28, [x24], #0x8\n"
@@ -897,13 +897,13 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"b 89f\n"
"88:" // Height 5: Partial accumulate: partial_1_0
"ldr s24, [x14, #0x0]\n"
- "mov x8, #0x0\n"
+ "mov x19, #0x0\n"
"ldr s25, [x27, #0x0]\n"
"ldr s26, [x26, #0x0]\n"
"ldr s27, [x25, #0x0]\n"
"ldr s28, [x24, #0x0]\n"
"89:" // Height 5: Partial accumulate: Done
- "sub x14, x14, x8\n"
+ "sub x14, x14, x19\n"
"b 92f\n"
"90:" // Height 5: full accumulate
"ldr q24, [x14, #0x0]\n"
@@ -922,30 +922,30 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"mov x13, #0x0\n"
"93:" // Height 5: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr x8, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"ldr w12, [x20, x13, LSL #0x2]\n"
"tbz %x[flags], #3, 94f\n"
"ldr x20, [%x[input_ptr], x13, LSL #0x3]\n"
- "add x20, x20, x8, LSL #3\n"
+ "add x20, x20, x19, LSL #3\n"
"ldr x11, [x20, #0x0]\n"
"ldr x9, [x20, #0x8]\n"
"ldr x27, [x20, #0x10]\n"
"ldr x25, [x20, #0x18]\n"
"ldr x24, [x20, #0x20]\n"
"cbnz x13, 95f\n"
- "ldr x8, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x11, x11, x8, LSL #2\n"
- "add x9, x9, x8, LSL #2\n"
- "add x27, x27, x8, LSL #2\n"
- "add x25, x25, x8, LSL #2\n"
- "add x24, x24, x8, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x11, x11, x19, LSL #2\n"
+ "add x9, x9, x19, LSL #2\n"
+ "add x27, x27, x19, LSL #2\n"
+ "add x25, x25, x19, LSL #2\n"
+ "add x24, x24, x19, LSL #2\n"
"b 95f\n"
"94:" // Height 5: setup direct input
"mov x11, %x[input_ptr]\n"
- "add x9, x11, x8, LSL #2\n"
- "add x27, x9, x8, LSL #2\n"
- "add x25, x27, x8, LSL #2\n"
- "add x24, x25, x8, LSL #2\n"
+ "add x9, x11, x19, LSL #2\n"
+ "add x27, x9, x19, LSL #2\n"
+ "add x25, x27, x19, LSL #2\n"
+ "add x24, x25, x19, LSL #2\n"
"95:" // Height 5: input setup done
"cmp x12, #0x4\n"
"blt 98f\n"
@@ -961,25 +961,25 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"fmla v24.4s, v8.4s, v0.s[0]\n"
"ldr d9, [x16, #0x10]\n"
"fmla v25.4s, v8.4s, v1.s[0]\n"
- "ldr x8, [x16, #0x18]\n"
+ "ldr x19, [x16, #0x18]\n"
"fmla v26.4s, v8.4s, v2.s[0]\n"
"ldr d10, [x16, #0x20]\n"
"fmla v27.4s, v8.4s, v3.s[0]\n"
"ldr x21, [x16, #0x28]\n"
"fmla v28.4s, v8.4s, v4.s[0]\n"
- "mov v9.d[1], x8\n"
+ "mov v9.d[1], x19\n"
"ldr d11, [x16, #0x30]\n"
"add x11, x11, #0x10\n"
"fmla v24.4s, v9.4s, v0.s[1]\n"
"mov v10.d[1], x21\n"
"fmla v25.4s, v9.4s, v1.s[1]\n"
- "ldr x8, [x16, #0x38]\n"
+ "ldr x19, [x16, #0x38]\n"
"fmla v26.4s, v9.4s, v2.s[1]\n"
"prfm pldl1keep, [x11, #0x80]\n"
"fmla v27.4s, v9.4s, v3.s[1]\n"
"ldr x10, [x11, #0x8]\n"
"fmla v28.4s, v9.4s, v4.s[1]\n"
- "mov v11.d[1], x8\n"
+ "mov v11.d[1], x19\n"
"fmla v24.4s, v10.4s, v0.s[2]\n"
"add x9, x9, #0x10\n"
"fmla v25.4s, v10.4s, v1.s[2]\n"
@@ -1008,12 +1008,12 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"sub x12, x12, #0x4\n"
"mov v2.d[1], x26\n"
"ldr d3, [x25, #0x0]\n"
- "ldr x8, [x25, #0x8]\n"
+ "ldr x19, [x25, #0x8]\n"
"cmp x12, #0x8\n"
"ldr d4, [x24, #0x0]\n"
"add x16, x16, #0x40\n"
"ldr x21, [x24, #0x8]\n"
- "mov v3.d[1], x8\n"
+ "mov v3.d[1], x19\n"
"ldr d8, [x16, #0x0]\n"
"ldr x26, [x16, #0x8]\n"
"mov v4.d[1], x21\n"
@@ -1075,23 +1075,23 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"100:" // Height 5: Multiply loop: No odd multiplies
"ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
"add x13, x13, #0x1\n"
- "cmp x13, x8\n"
+ "cmp x13, x19\n"
"bne 93b\n"
- "ldr x8, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"prfm pstl1keep, [x14, #0x0]\n"
- "add x27, x14, x8, LSL #2\n"
+ "add x27, x14, x19, LSL #2\n"
"prfm pstl1keep, [x27, #0x0]\n"
- "add x26, x27, x8, LSL #2\n"
+ "add x26, x27, x19, LSL #2\n"
"prfm pstl1keep, [x26, #0x0]\n"
- "add x25, x26, x8, LSL #2\n"
+ "add x25, x26, x19, LSL #2\n"
"prfm pstl1keep, [x25, #0x0]\n"
- "add x24, x25, x8, LSL #2\n"
+ "add x24, x25, x19, LSL #2\n"
"prfm pstl1keep, [x24, #0x0]\n"
"tbz %x[flags], #1, 101f\n"
"add x20, %x[args_ptr], %[offset_min]\n"
- "add x8, %x[args_ptr], %[offset_max]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
"ld1r { v17.4s }, [x20]\n"
- "ld1r { v16.4s }, [x8]\n"
+ "ld1r { v16.4s }, [x19]\n"
"fmin v24.4s, v24.4s, v16.4s\n"
"fmin v25.4s, v25.4s, v16.4s\n"
"fmin v26.4s, v26.4s, v16.4s\n"
@@ -1154,18 +1154,18 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"b 113f\n"
"108:" // Height 6: no bias
"tbz %x[flags], #0, 112f\n"
- "ldr x8, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"cmp x17, #0x4\n"
- "add x27, x14, x8, LSL #2\n"
- "add x26, x27, x8, LSL #2\n"
- "add x25, x26, x8, LSL #2\n"
- "add x24, x25, x8, LSL #2\n"
- "add x23, x24, x8, LSL #2\n"
+ "add x27, x14, x19, LSL #2\n"
+ "add x26, x27, x19, LSL #2\n"
+ "add x25, x26, x19, LSL #2\n"
+ "add x24, x25, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
"bge 111f\n"
"tbz x17, #1, 109f\n"
"ldr d24, [x14], #0x8\n"
"ldr d25, [x27], #0x8\n"
- "mov x8, #0x8\n"
+ "mov x19, #0x8\n"
"ldr d26, [x26], #0x8\n"
"ldr d27, [x25], #0x8\n"
"ldr d28, [x24], #0x8\n"
@@ -1180,14 +1180,14 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"b 110f\n"
"109:" // Height 6: Partial accumulate: partial_1_0
"ldr s24, [x14, #0x0]\n"
- "mov x8, #0x0\n"
+ "mov x19, #0x0\n"
"ldr s25, [x27, #0x0]\n"
"ldr s26, [x26, #0x0]\n"
"ldr s27, [x25, #0x0]\n"
"ldr s28, [x24, #0x0]\n"
"ldr s29, [x23, #0x0]\n"
"110:" // Height 6: Partial accumulate: Done
- "sub x14, x14, x8\n"
+ "sub x14, x14, x19\n"
"b 113f\n"
"111:" // Height 6: full accumulate
"ldr q24, [x14, #0x0]\n"
@@ -1208,11 +1208,11 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"mov x13, #0x0\n"
"114:" // Height 6: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr x8, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"ldr w12, [x20, x13, LSL #0x2]\n"
"tbz %x[flags], #3, 115f\n"
"ldr x20, [%x[input_ptr], x13, LSL #0x3]\n"
- "add x20, x20, x8, LSL #3\n"
+ "add x20, x20, x19, LSL #3\n"
"ldr x11, [x20, #0x0]\n"
"ldr x9, [x20, #0x8]\n"
"ldr x27, [x20, #0x10]\n"
@@ -1220,21 +1220,21 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"ldr x24, [x20, #0x20]\n"
"ldr x23, [x20, #0x28]\n"
"cbnz x13, 116f\n"
- "ldr x8, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x11, x11, x8, LSL #2\n"
- "add x9, x9, x8, LSL #2\n"
- "add x27, x27, x8, LSL #2\n"
- "add x25, x25, x8, LSL #2\n"
- "add x24, x24, x8, LSL #2\n"
- "add x23, x23, x8, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x11, x11, x19, LSL #2\n"
+ "add x9, x9, x19, LSL #2\n"
+ "add x27, x27, x19, LSL #2\n"
+ "add x25, x25, x19, LSL #2\n"
+ "add x24, x24, x19, LSL #2\n"
+ "add x23, x23, x19, LSL #2\n"
"b 116f\n"
"115:" // Height 6: setup direct input
"mov x11, %x[input_ptr]\n"
- "add x9, x11, x8, LSL #2\n"
- "add x27, x9, x8, LSL #2\n"
- "add x25, x27, x8, LSL #2\n"
- "add x24, x25, x8, LSL #2\n"
- "add x23, x24, x8, LSL #2\n"
+ "add x9, x11, x19, LSL #2\n"
+ "add x27, x9, x19, LSL #2\n"
+ "add x25, x27, x19, LSL #2\n"
+ "add x24, x25, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
"116:" // Height 6: input setup done
"cmp x12, #0x4\n"
"blt 119f\n"
@@ -1251,25 +1251,25 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"fmla v24.4s, v8.4s, v0.s[0]\n"
"ldr d9, [x16, #0x10]\n"
"fmla v25.4s, v8.4s, v1.s[0]\n"
- "ldr x8, [x16, #0x18]\n"
+ "ldr x19, [x16, #0x18]\n"
"fmla v26.4s, v8.4s, v2.s[0]\n"
"ldr d10, [x16, #0x20]\n"
"fmla v27.4s, v8.4s, v3.s[0]\n"
"ldr x21, [x16, #0x28]\n"
"fmla v28.4s, v8.4s, v4.s[0]\n"
- "mov v9.d[1], x8\n"
+ "mov v9.d[1], x19\n"
"fmla v29.4s, v8.4s, v5.s[0]\n"
"ldr d11, [x16, #0x30]\n"
"fmla v24.4s, v9.4s, v0.s[1]\n"
"mov v10.d[1], x21\n"
"fmla v25.4s, v9.4s, v1.s[1]\n"
- "ldr x8, [x16, #0x38]\n"
+ "ldr x19, [x16, #0x38]\n"
"fmla v26.4s, v9.4s, v2.s[1]\n"
"add x11, x11, #0x10\n"
"fmla v27.4s, v9.4s, v3.s[1]\n"
"prfm pldl1keep, [x11, #0x80]\n"
"fmla v28.4s, v9.4s, v4.s[1]\n"
- "mov v11.d[1], x8\n"
+ "mov v11.d[1], x19\n"
"fmla v29.4s, v9.4s, v5.s[1]\n"
"ldr x10, [x11, #0x8]\n"
"fmla v24.4s, v10.4s, v0.s[2]\n"
@@ -1306,16 +1306,16 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"cmp x12, #0x8\n"
"ldr d3, [x25, #0x0]\n"
"add x16, x16, #0x40\n"
- "ldr x8, [x25, #0x8]\n"
+ "ldr x19, [x25, #0x8]\n"
"ldr d4, [x24, #0x0]\n"
"ldr x21, [x24, #0x8]\n"
- "mov v3.d[1], x8\n"
+ "mov v3.d[1], x19\n"
"ldr d5, [x23, #0x0]\n"
- "ldr x8, [x23, #0x8]\n"
+ "ldr x19, [x23, #0x8]\n"
"mov v4.d[1], x21\n"
"ldr d8, [x16, #0x0]\n"
"ldr x26, [x16, #0x8]\n"
- "mov v5.d[1], x8\n"
+ "mov v5.d[1], x19\n"
"mov v8.d[1], x26\n"
"bge 117b\n"
"118:" // Height 6: Multiply loop: Single iteration only
@@ -1382,25 +1382,25 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"121:" // Height 6: Multiply loop: No odd multiplies
"ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
"add x13, x13, #0x1\n"
- "cmp x13, x8\n"
+ "cmp x13, x19\n"
"bne 114b\n"
- "ldr x8, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"prfm pstl1keep, [x14, #0x0]\n"
- "add x27, x14, x8, LSL #2\n"
+ "add x27, x14, x19, LSL #2\n"
"prfm pstl1keep, [x27, #0x0]\n"
- "add x26, x27, x8, LSL #2\n"
+ "add x26, x27, x19, LSL #2\n"
"prfm pstl1keep, [x26, #0x0]\n"
- "add x25, x26, x8, LSL #2\n"
+ "add x25, x26, x19, LSL #2\n"
"prfm pstl1keep, [x25, #0x0]\n"
- "add x24, x25, x8, LSL #2\n"
+ "add x24, x25, x19, LSL #2\n"
"prfm pstl1keep, [x24, #0x0]\n"
- "add x23, x24, x8, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
"prfm pstl1keep, [x23, #0x0]\n"
"tbz %x[flags], #1, 122f\n"
"add x20, %x[args_ptr], %[offset_min]\n"
- "add x8, %x[args_ptr], %[offset_max]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
"ld1r { v17.4s }, [x20]\n"
- "ld1r { v16.4s }, [x8]\n"
+ "ld1r { v16.4s }, [x19]\n"
"fmin v24.4s, v24.4s, v16.4s\n"
"fmin v25.4s, v25.4s, v16.4s\n"
"fmin v26.4s, v26.4s, v16.4s\n"
@@ -1470,19 +1470,19 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"b 134f\n"
"129:" // Height 7: no bias
"tbz %x[flags], #0, 133f\n"
- "ldr x8, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"cmp x17, #0x4\n"
- "add x27, x14, x8, LSL #2\n"
- "add x26, x27, x8, LSL #2\n"
- "add x25, x26, x8, LSL #2\n"
- "add x24, x25, x8, LSL #2\n"
- "add x23, x24, x8, LSL #2\n"
- "add x22, x23, x8, LSL #2\n"
+ "add x27, x14, x19, LSL #2\n"
+ "add x26, x27, x19, LSL #2\n"
+ "add x25, x26, x19, LSL #2\n"
+ "add x24, x25, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
"bge 132f\n"
"tbz x17, #1, 130f\n"
"ldr d24, [x14], #0x8\n"
"ldr d25, [x27], #0x8\n"
- "mov x8, #0x8\n"
+ "mov x19, #0x8\n"
"ldr d26, [x26], #0x8\n"
"ldr d27, [x25], #0x8\n"
"ldr d28, [x24], #0x8\n"
@@ -1499,7 +1499,7 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"b 131f\n"
"130:" // Height 7: Partial accumulate: partial_1_0
"ldr s24, [x14, #0x0]\n"
- "mov x8, #0x0\n"
+ "mov x19, #0x0\n"
"ldr s25, [x27, #0x0]\n"
"ldr s26, [x26, #0x0]\n"
"ldr s27, [x25, #0x0]\n"
@@ -1507,7 +1507,7 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"ldr s29, [x23, #0x0]\n"
"ldr s30, [x22, #0x0]\n"
"131:" // Height 7: Partial accumulate: Done
- "sub x14, x14, x8\n"
+ "sub x14, x14, x19\n"
"b 134f\n"
"132:" // Height 7: full accumulate
"ldr q24, [x14, #0x0]\n"
@@ -1530,11 +1530,11 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"mov x13, #0x0\n"
"135:" // Height 7: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr x8, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"ldr w12, [x20, x13, LSL #0x2]\n"
"tbz %x[flags], #3, 136f\n"
"ldr x20, [%x[input_ptr], x13, LSL #0x3]\n"
- "add x20, x20, x8, LSL #3\n"
+ "add x20, x20, x19, LSL #3\n"
"ldr x11, [x20, #0x0]\n"
"ldr x9, [x20, #0x8]\n"
"ldr x27, [x20, #0x10]\n"
@@ -1543,23 +1543,23 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"ldr x23, [x20, #0x28]\n"
"ldr x22, [x20, #0x30]\n"
"cbnz x13, 137f\n"
- "ldr x8, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x11, x11, x8, LSL #2\n"
- "add x9, x9, x8, LSL #2\n"
- "add x27, x27, x8, LSL #2\n"
- "add x25, x25, x8, LSL #2\n"
- "add x24, x24, x8, LSL #2\n"
- "add x23, x23, x8, LSL #2\n"
- "add x22, x22, x8, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x11, x11, x19, LSL #2\n"
+ "add x9, x9, x19, LSL #2\n"
+ "add x27, x27, x19, LSL #2\n"
+ "add x25, x25, x19, LSL #2\n"
+ "add x24, x24, x19, LSL #2\n"
+ "add x23, x23, x19, LSL #2\n"
+ "add x22, x22, x19, LSL #2\n"
"b 137f\n"
"136:" // Height 7: setup direct input
"mov x11, %x[input_ptr]\n"
- "add x9, x11, x8, LSL #2\n"
- "add x27, x9, x8, LSL #2\n"
- "add x25, x27, x8, LSL #2\n"
- "add x24, x25, x8, LSL #2\n"
- "add x23, x24, x8, LSL #2\n"
- "add x22, x23, x8, LSL #2\n"
+ "add x9, x11, x19, LSL #2\n"
+ "add x27, x9, x19, LSL #2\n"
+ "add x25, x27, x19, LSL #2\n"
+ "add x24, x25, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
"137:" // Height 7: input setup done
"cmp x12, #0x4\n"
"blt 140f\n"
@@ -1577,25 +1577,25 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"fmla v24.4s, v8.4s, v0.s[0]\n"
"ldr d9, [x16, #0x10]\n"
"fmla v25.4s, v8.4s, v1.s[0]\n"
- "ldr x8, [x16, #0x18]\n"
+ "ldr x19, [x16, #0x18]\n"
"fmla v26.4s, v8.4s, v2.s[0]\n"
"ldr d10, [x16, #0x20]\n"
"fmla v27.4s, v8.4s, v3.s[0]\n"
"ldr x21, [x16, #0x28]\n"
"fmla v28.4s, v8.4s, v4.s[0]\n"
- "mov v9.d[1], x8\n"
+ "mov v9.d[1], x19\n"
"fmla v29.4s, v8.4s, v5.s[0]\n"
"ldr d11, [x16, #0x30]\n"
"fmla v30.4s, v8.4s, v6.s[0]\n"
"mov v10.d[1], x21\n"
"fmla v24.4s, v9.4s, v0.s[1]\n"
- "ldr x8, [x16, #0x38]\n"
+ "ldr x19, [x16, #0x38]\n"
"fmla v25.4s, v9.4s, v1.s[1]\n"
"add x11, x11, #0x10\n"
"fmla v26.4s, v9.4s, v2.s[1]\n"
"prfm pldl1keep, [x11, #0x80]\n"
"fmla v27.4s, v9.4s, v3.s[1]\n"
- "mov v11.d[1], x8\n"
+ "mov v11.d[1], x19\n"
"fmla v28.4s, v9.4s, v4.s[1]\n"
"ldr x10, [x11, #0x8]\n"
"fmla v29.4s, v9.4s, v5.s[1]\n"
@@ -1615,7 +1615,7 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"fmla v29.4s, v10.4s, v5.s[2]\n"
"prfm pldl1keep, [x25, #0x80]\n"
"fmla v30.4s, v10.4s, v6.s[2]\n"
- "ldr x8, [x25, #0x8]\n"
+ "ldr x19, [x25, #0x8]\n"
"fmla v24.4s, v11.4s, v0.s[3]\n"
"ldr d0, [x11, #0x0]\n"
"fmla v25.4s, v11.4s, v1.s[3]\n"
@@ -1634,7 +1634,7 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"add x23, x23, #0x10\n"
"prfm pldl1keep, [x24, #0x80]\n"
"add x22, x22, #0x10\n"
- "mov v3.d[1], x8\n"
+ "mov v3.d[1], x19\n"
"prfm pldl1keep, [x23, #0x80]\n"
"prfm pldl1keep, [x22, #0x80]\n"
"sub x12, x12, #0x4\n"
@@ -1646,11 +1646,11 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"ldr x26, [x16, #0x8]\n"
"mov v4.d[1], x21\n"
"ldr d5, [x23, #0x0]\n"
- "ldr x8, [x23, #0x8]\n"
+ "ldr x19, [x23, #0x8]\n"
"mov v8.d[1], x26\n"
"ldr d6, [x22, #0x0]\n"
"ldr x21, [x22, #0x8]\n"
- "mov v5.d[1], x8\n"
+ "mov v5.d[1], x19\n"
"mov v6.d[1], x21\n"
"bge 138b\n"
"139:" // Height 7: Multiply loop: Single iteration only
@@ -1725,27 +1725,27 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"142:" // Height 7: Multiply loop: No odd multiplies
"ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
"add x13, x13, #0x1\n"
- "cmp x13, x8\n"
+ "cmp x13, x19\n"
"bne 135b\n"
- "ldr x8, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"prfm pstl1keep, [x14, #0x0]\n"
- "add x27, x14, x8, LSL #2\n"
+ "add x27, x14, x19, LSL #2\n"
"prfm pstl1keep, [x27, #0x0]\n"
- "add x26, x27, x8, LSL #2\n"
+ "add x26, x27, x19, LSL #2\n"
"prfm pstl1keep, [x26, #0x0]\n"
- "add x25, x26, x8, LSL #2\n"
+ "add x25, x26, x19, LSL #2\n"
"prfm pstl1keep, [x25, #0x0]\n"
- "add x24, x25, x8, LSL #2\n"
+ "add x24, x25, x19, LSL #2\n"
"prfm pstl1keep, [x24, #0x0]\n"
- "add x23, x24, x8, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
"prfm pstl1keep, [x23, #0x0]\n"
- "add x22, x23, x8, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
"prfm pstl1keep, [x22, #0x0]\n"
"tbz %x[flags], #1, 143f\n"
"add x20, %x[args_ptr], %[offset_min]\n"
- "add x8, %x[args_ptr], %[offset_max]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
"ld1r { v17.4s }, [x20]\n"
- "ld1r { v16.4s }, [x8]\n"
+ "ld1r { v16.4s }, [x19]\n"
"fmin v24.4s, v24.4s, v16.4s\n"
"fmin v25.4s, v25.4s, v16.4s\n"
"fmin v26.4s, v26.4s, v16.4s\n"
@@ -1809,8 +1809,8 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"ldr x16, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"mov x14, %x[output_ptr]\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "mov x8, #0x20\n"
- "madd %x[output_ptr], x20, x8, %x[output_ptr]\n"
+ "mov x19, #0x20\n"
+ "madd %x[output_ptr], x20, x19, %x[output_ptr]\n"
"149:" // Height 8: Column loop
"cbz x15, 150f\n"
"ldr q24, [x15, #0x0]\n"
@@ -1825,20 +1825,20 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"b 155f\n"
"150:" // Height 8: no bias
"tbz %x[flags], #0, 154f\n"
- "ldr x8, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"cmp x17, #0x4\n"
- "add x27, x14, x8, LSL #2\n"
- "add x26, x27, x8, LSL #2\n"
- "add x25, x26, x8, LSL #2\n"
- "add x24, x25, x8, LSL #2\n"
- "add x23, x24, x8, LSL #2\n"
- "add x22, x23, x8, LSL #2\n"
- "add x21, x22, x8, LSL #2\n"
+ "add x27, x14, x19, LSL #2\n"
+ "add x26, x27, x19, LSL #2\n"
+ "add x25, x26, x19, LSL #2\n"
+ "add x24, x25, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
"bge 153f\n"
"tbz x17, #1, 151f\n"
"ldr d24, [x14], #0x8\n"
"ldr d25, [x27], #0x8\n"
- "mov x8, #0x8\n"
+ "mov x19, #0x8\n"
"ldr d26, [x26], #0x8\n"
"ldr d27, [x25], #0x8\n"
"ldr d28, [x24], #0x8\n"
@@ -1857,7 +1857,7 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"b 152f\n"
"151:" // Height 8: Partial accumulate: partial_1_0
"ldr s24, [x14, #0x0]\n"
- "mov x8, #0x0\n"
+ "mov x19, #0x0\n"
"ldr s25, [x27, #0x0]\n"
"ldr s26, [x26, #0x0]\n"
"ldr s27, [x25, #0x0]\n"
@@ -1866,7 +1866,7 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"ldr s30, [x22, #0x0]\n"
"ldr s31, [x21, #0x0]\n"
"152:" // Height 8: Partial accumulate: Done
- "sub x14, x14, x8\n"
+ "sub x14, x14, x19\n"
"b 155f\n"
"153:" // Height 8: full accumulate
"ldr q24, [x14, #0x0]\n"
@@ -1891,11 +1891,11 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"mov x13, #0x0\n"
"156:" // Height 8: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr x8, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"ldr w12, [x20, x13, LSL #0x2]\n"
"tbz %x[flags], #3, 157f\n"
"ldr x20, [%x[input_ptr], x13, LSL #0x3]\n"
- "add x20, x20, x8, LSL #3\n"
+ "add x20, x20, x19, LSL #3\n"
"ldr x11, [x20, #0x0]\n"
"ldr x9, [x20, #0x8]\n"
"ldr x27, [x20, #0x10]\n"
@@ -1905,25 +1905,25 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"ldr x22, [x20, #0x30]\n"
"ldr x20, [x20, #0x38]\n"
"cbnz x13, 158f\n"
- "ldr x8, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x11, x11, x8, LSL #2\n"
- "add x9, x9, x8, LSL #2\n"
- "add x27, x27, x8, LSL #2\n"
- "add x25, x25, x8, LSL #2\n"
- "add x24, x24, x8, LSL #2\n"
- "add x23, x23, x8, LSL #2\n"
- "add x22, x22, x8, LSL #2\n"
- "add x20, x20, x8, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x11, x11, x19, LSL #2\n"
+ "add x9, x9, x19, LSL #2\n"
+ "add x27, x27, x19, LSL #2\n"
+ "add x25, x25, x19, LSL #2\n"
+ "add x24, x24, x19, LSL #2\n"
+ "add x23, x23, x19, LSL #2\n"
+ "add x22, x22, x19, LSL #2\n"
+ "add x20, x20, x19, LSL #2\n"
"b 158f\n"
"157:" // Height 8: setup direct input
"mov x11, %x[input_ptr]\n"
- "add x9, x11, x8, LSL #2\n"
- "add x27, x9, x8, LSL #2\n"
- "add x25, x27, x8, LSL #2\n"
- "add x24, x25, x8, LSL #2\n"
- "add x23, x24, x8, LSL #2\n"
- "add x22, x23, x8, LSL #2\n"
- "add x20, x22, x8, LSL #2\n"
+ "add x9, x11, x19, LSL #2\n"
+ "add x27, x9, x19, LSL #2\n"
+ "add x25, x27, x19, LSL #2\n"
+ "add x24, x25, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x20, x22, x19, LSL #2\n"
"158:" // Height 8: input setup done
"cmp x12, #0x4\n"
"blt 161f\n"
@@ -1942,25 +1942,25 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"fmla v24.4s, v8.4s, v0.s[0]\n"
"ldr d9, [x16, #0x10]\n"
"fmla v25.4s, v8.4s, v1.s[0]\n"
- "ldr x8, [x16, #0x18]\n"
+ "ldr x19, [x16, #0x18]\n"
"fmla v26.4s, v8.4s, v2.s[0]\n"
"ldr d10, [x16, #0x20]\n"
"fmla v27.4s, v8.4s, v3.s[0]\n"
"ldr x21, [x16, #0x28]\n"
"fmla v28.4s, v8.4s, v4.s[0]\n"
- "mov v9.d[1], x8\n"
+ "mov v9.d[1], x19\n"
"fmla v29.4s, v8.4s, v5.s[0]\n"
"ldr d11, [x16, #0x30]\n"
"fmla v30.4s, v8.4s, v6.s[0]\n"
"mov v10.d[1], x21\n"
"fmla v31.4s, v8.4s, v7.s[0]\n"
- "ldr x8, [x16, #0x38]\n"
+ "ldr x19, [x16, #0x38]\n"
"fmla v24.4s, v9.4s, v0.s[1]\n"
"add x11, x11, #0x10\n"
"fmla v25.4s, v9.4s, v1.s[1]\n"
"prfm pldl1keep, [x11, #0x80]\n"
"fmla v26.4s, v9.4s, v2.s[1]\n"
- "mov v11.d[1], x8\n"
+ "mov v11.d[1], x19\n"
"fmla v27.4s, v9.4s, v3.s[1]\n"
"ldr x10, [x11, #0x8]\n"
"fmla v28.4s, v9.4s, v4.s[1]\n"
@@ -1980,7 +1980,7 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"fmla v27.4s, v10.4s, v3.s[2]\n"
"prfm pldl1keep, [x25, #0x80]\n"
"fmla v28.4s, v10.4s, v4.s[2]\n"
- "ldr x8, [x25, #0x8]\n"
+ "ldr x19, [x25, #0x8]\n"
"fmla v29.4s, v10.4s, v5.s[2]\n"
"add x24, x24, #0x10\n"
"fmla v30.4s, v10.4s, v6.s[2]\n"
@@ -2005,7 +2005,7 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"ldr d4, [x24, #0x0]\n"
"add x23, x23, #0x10\n"
"add x22, x22, #0x10\n"
- "mov v3.d[1], x8\n"
+ "mov v3.d[1], x19\n"
"prfm pldl1keep, [x23, #0x80]\n"
"mov v4.d[1], x21\n"
"prfm pldl1keep, [x22, #0x80]\n"
@@ -2013,19 +2013,19 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"add x20, x20, #0x10\n"
"prfm pldl1keep, [x20, #0x80]\n"
"sub x12, x12, #0x4\n"
- "ldr x8, [x23, #0x8]\n"
+ "ldr x19, [x23, #0x8]\n"
"cmp x12, #0x8\n"
"ldr d6, [x22, #0x0]\n"
"add x16, x16, #0x40\n"
"ldr d8, [x16, #0x0]\n"
- "mov v5.d[1], x8\n"
+ "mov v5.d[1], x19\n"
"ldr x26, [x16, #0x8]\n"
"ldr x21, [x22, #0x8]\n"
"ldr d7, [x20, #0x0]\n"
"mov v8.d[1], x26\n"
- "ldr x8, [x20, #0x8]\n"
+ "ldr x19, [x20, #0x8]\n"
"mov v6.d[1], x21\n"
- "mov v7.d[1], x8\n"
+ "mov v7.d[1], x19\n"
"bge 159b\n"
"160:" // Height 8: Multiply loop: Single iteration only
"fmla v24.4s, v8.4s, v0.s[0]\n"
@@ -2107,29 +2107,29 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"163:" // Height 8: Multiply loop: No odd multiplies
"ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
"add x13, x13, #0x1\n"
- "cmp x13, x8\n"
+ "cmp x13, x19\n"
"bne 156b\n"
- "ldr x8, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"prfm pstl1keep, [x14, #0x0]\n"
- "add x27, x14, x8, LSL #2\n"
+ "add x27, x14, x19, LSL #2\n"
"prfm pstl1keep, [x27, #0x0]\n"
- "add x26, x27, x8, LSL #2\n"
+ "add x26, x27, x19, LSL #2\n"
"prfm pstl1keep, [x26, #0x0]\n"
- "add x25, x26, x8, LSL #2\n"
+ "add x25, x26, x19, LSL #2\n"
"prfm pstl1keep, [x25, #0x0]\n"
- "add x24, x25, x8, LSL #2\n"
+ "add x24, x25, x19, LSL #2\n"
"prfm pstl1keep, [x24, #0x0]\n"
- "add x23, x24, x8, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
"prfm pstl1keep, [x23, #0x0]\n"
- "add x22, x23, x8, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
"prfm pstl1keep, [x22, #0x0]\n"
- "add x21, x22, x8, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
"prfm pstl1keep, [x21, #0x0]\n"
"tbz %x[flags], #1, 164f\n"
"add x20, %x[args_ptr], %[offset_min]\n"
- "add x8, %x[args_ptr], %[offset_max]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
"ld1r { v17.4s }, [x20]\n"
- "ld1r { v16.4s }, [x8]\n"
+ "ld1r { v16.4s }, [x19]\n"
"fmin v24.4s, v24.4s, v16.4s\n"
"fmin v25.4s, v25.4s, v16.4s\n"
"fmin v26.4s, v26.4s, v16.4s\n"
@@ -2200,14 +2200,14 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"str x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"b 1b\n"
"169:" // Update direct input
- "mov x8, #0x20\n"
- "madd %x[input_ptr], x8, x20, %x[input_ptr]\n"
+ "mov x19, #0x20\n"
+ "madd %x[input_ptr], x19, x20, %x[input_ptr]\n"
"b 1b\n"
"170:" // Exit
: [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
: [args_ptr] "r" (&ka), [bias] "r" (bias), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v16", "v17", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x8", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v16", "v17", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_8x4/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_8x4/generic.cpp
index bd22336c8d..9bed0213da 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_8x4/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_8x4/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef __aarch64__
@@ -105,527 +105,527 @@ void a64_hybrid_fp32_mla_8x4 (
"cmp %x[M], #0x2\n"
"bgt 43f\n"
"beq 22f\n"
- "mov x14, %x[bias]\n"
"ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x12, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x11, %x[output_ptr]\n"
+ "mov x11, %x[bias]\n"
+ "mov x10, %x[output_ptr]\n"
"2:" // Height 1: Column loop
- "cbz x14, 3f\n"
- "ldr q24, [x14, #0x0]\n"
- "add x14, x14, #0x10\n"
+ "cbz x11, 3f\n"
+ "ldr q24, [x11, #0x0]\n"
+ "add x11, x11, #0x10\n"
"b 8f\n"
"3:" // Height 1: no bias
"tbz %x[flags], #0, 7f\n"
"cmp x13, #0x4\n"
"bge 6f\n"
"tbz x13, #1, 4f\n"
- "ldr d24, [x11], #0x8\n"
- "mov x20, #0x8\n"
+ "ldr d24, [x10], #0x8\n"
+ "mov x19, #0x8\n"
"tbz x13, #0, 5f\n"
- "ld1 { v24.s }[2], [x11]\n"
+ "ld1 { v24.s }[2], [x10]\n"
"b 5f\n"
"4:" // Height 1: Partial accumulate: partial_1_0
- "ldr s24, [x11, #0x0]\n"
- "mov x20, #0x0\n"
+ "ldr s24, [x10, #0x0]\n"
+ "mov x19, #0x0\n"
"5:" // Height 1: Partial accumulate: Done
- "sub x11, x11, x20\n"
+ "sub x10, x10, x19\n"
"b 8f\n"
"6:" // Height 1: full accumulate
- "ldr q24, [x11, #0x0]\n"
+ "ldr q24, [x10, #0x0]\n"
"b 8f\n"
"7:" // Height 1: no accumulate
"movi v24.16b, #0x0\n"
"8:" // Height 1: setup done
- "mov x10, #0x0\n"
+ "mov x9, #0x0\n"
"9:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w9, [x20, x10, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w28, [x20, x9, LSL #0x2]\n"
"tbz %x[flags], #3, 10f\n"
- "ldr x21, [%x[input_ptr], x10, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x28, [x21, #0x0]\n"
- "cbnz x10, 11f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x28, x28, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x9, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x27, [x20, #0x0]\n"
+ "cbnz x9, 11f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x27, x27, x19, LSL #2\n"
"b 11f\n"
"10:" // Height 1: setup direct input
- "mov x28, %x[input_ptr]\n"
+ "mov x27, %x[input_ptr]\n"
"11:" // Height 1: input setup done
- "cmp x9, #0x4\n"
+ "cmp x28, #0x4\n"
"blt 14f\n"
- "ldr q0, [x28, #0x0]\n"
+ "ldr q0, [x27, #0x0]\n"
"ldr q8, [x12, #0x0]\n"
- "cmp x9, #0x8\n"
- "ldr q9, [x12, #0x10]\n"
- "ldr q10, [x12, #0x20]\n"
- "ldr q11, [x12, #0x30]\n"
+ "cmp x28, #0x8\n"
"blt 13f\n"
"12:" // Height 1: Multiply loop: Main loop head
"fmla v24.4s, v8.4s, v0.s[0]\n"
+ "ldr q9, [x12, #0x10]\n"
+ "add x27, x27, #0x10\n"
"fmla v24.4s, v9.4s, v0.s[1]\n"
- "sub x9, x9, #0x4\n"
- "add x28, x28, #0x10\n"
+ "ldr q10, [x12, #0x20]\n"
+ "sub x28, x28, #0x4\n"
"fmla v24.4s, v10.4s, v0.s[2]\n"
- "cmp x9, #0x8\n"
+ "ldr q11, [x12, #0x30]\n"
+ "cmp x28, #0x8\n"
+ "fmla v24.4s, v11.4s, v0.s[3]\n"
+ "prfm pldl1keep, [x27, #0x80]\n"
"add x12, x12, #0x40\n"
+ "ldr q0, [x27, #0x0]\n"
"ldr q8, [x12, #0x0]\n"
- "ldr q9, [x12, #0x10]\n"
- "ldr q10, [x12, #0x20]\n"
- "fmla v24.4s, v11.4s, v0.s[3]\n"
- "ldr q0, [x28, #0x0]\n"
- "ldr q11, [x12, #0x30]\n"
- "prfm pldl1keep, [x28, #0x80]\n"
"bge 12b\n"
"13:" // Height 1: Multiply loop: Single iteration only
"fmla v24.4s, v8.4s, v0.s[0]\n"
+ "ldr q9, [x12, #0x10]\n"
+ "sub x28, x28, #0x4\n"
"fmla v24.4s, v9.4s, v0.s[1]\n"
- "add x28, x28, #0x10\n"
- "sub x9, x9, #0x4\n"
+ "ldr q10, [x12, #0x20]\n"
+ "add x27, x27, #0x10\n"
"fmla v24.4s, v10.4s, v0.s[2]\n"
- "fmla v24.4s, v11.4s, v0.s[3]\n"
- "prfm pldl1keep, [x28, #0x80]\n"
+ "ldr q11, [x12, #0x30]\n"
"add x12, x12, #0x40\n"
+ "fmla v24.4s, v11.4s, v0.s[3]\n"
+ "prfm pldl1keep, [x27, #0x80]\n"
"14:" // Height 1: Multiply loop: Main loop skip
- "cbz x9, 16f\n"
+ "cbz x28, 16f\n"
"15:" // Height 1: Multiply loop: Odd block loop
- "ldr s0, [x28], #0x4\n"
+ "ldr s0, [x27], #0x4\n"
+ "sub x28, x28, #0x1\n"
"ldr q12, [x12, #0x0]\n"
- "sub x9, x9, #0x1\n"
"fmla v24.4s, v12.4s, v0.s[0]\n"
"add x12, x12, #0x10\n"
- "cbnz x9, 15b\n"
+ "cbnz x28, 15b\n"
"16:" // Height 1: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x10, x10, #0x1\n"
- "cmp x10, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x9, x9, #0x1\n"
+ "cmp x9, x19\n"
"bne 9b\n"
- "prfm pstl1keep, [x11, #0x0]\n"
+ "prfm pstl1keep, [x10, #0x0]\n"
"tbz %x[flags], #1, 17f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v17.4s }, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1r { v16.4s }, [x20]\n"
- "fmin v24.4s, v24.4s, v17.4s\n"
- "fmax v24.4s, v24.4s, v16.4s\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v17.4s }, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v16.4s }, [x19]\n"
+ "fmin v24.4s, v24.4s, v16.4s\n"
+ "fmax v24.4s, v24.4s, v17.4s\n"
"17:" // Height 1: No activation
"cmp x13, #0x4\n"
"bge 20f\n"
"tbz x13, #1, 18f\n"
- "str d24, [x11], #0x8\n"
+ "str d24, [x10], #0x8\n"
"tbz x13, #0, 19f\n"
- "st1 { v24.s }[2], [x11]\n"
+ "st1 { v24.s }[2], [x10]\n"
"b 19f\n"
"18:" // Height 1: Partial direct writeback: partial_1_0
- "str s24, [x11, #0x0]\n"
+ "str s24, [x10, #0x0]\n"
"19:" // Height 1: Partial direct writeback: Done
"b 21f\n"
"20:" // Height 1: Full writeback
- "str q24, [x11, #0x0]\n"
- "add x11, x11, #0x10\n"
+ "str q24, [x10, #0x0]\n"
+ "add x10, x10, #0x10\n"
"21:" // Height 1: Writeback done
"subs x13, x13, #0x4\n"
"bgt 2b\n"
"b 170f\n"
"22:" // Height 2
- "mov x14, %x[bias]\n"
"ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x11, %x[bias]\n"
"ldr x12, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x11, %x[output_ptr]\n"
+ "mov x10, %x[output_ptr]\n"
"23:" // Height 2: Column loop
- "cbz x14, 24f\n"
- "ldr q24, [x14, #0x0]\n"
+ "cbz x11, 24f\n"
+ "ldr q24, [x11, #0x0]\n"
"mov v25.16b, v24.16b\n"
- "add x14, x14, #0x10\n"
+ "add x11, x11, #0x10\n"
"b 29f\n"
"24:" // Height 2: no bias
"tbz %x[flags], #0, 28f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"cmp x13, #0x4\n"
- "add x27, x11, x20, LSL #2\n"
+ "add x26, x10, x19, LSL #2\n"
"bge 27f\n"
"tbz x13, #1, 25f\n"
- "ldr d24, [x11], #0x8\n"
- "ldr d25, [x27], #0x8\n"
- "mov x20, #0x8\n"
+ "ldr d24, [x10], #0x8\n"
+ "ldr d25, [x26], #0x8\n"
+ "mov x19, #0x8\n"
"tbz x13, #0, 26f\n"
- "ld1 { v24.s }[2], [x11]\n"
- "ld1 { v25.s }[2], [x27]\n"
+ "ld1 { v24.s }[2], [x10]\n"
+ "ld1 { v25.s }[2], [x26]\n"
"b 26f\n"
"25:" // Height 2: Partial accumulate: partial_1_0
- "ldr s24, [x11, #0x0]\n"
- "ldr s25, [x27, #0x0]\n"
- "mov x20, #0x0\n"
+ "ldr s24, [x10, #0x0]\n"
+ "mov x19, #0x0\n"
+ "ldr s25, [x26, #0x0]\n"
"26:" // Height 2: Partial accumulate: Done
- "sub x11, x11, x20\n"
+ "sub x10, x10, x19\n"
"b 29f\n"
"27:" // Height 2: full accumulate
- "ldr q24, [x11, #0x0]\n"
- "ldr q25, [x27, #0x0]\n"
+ "ldr q24, [x10, #0x0]\n"
+ "ldr q25, [x26, #0x0]\n"
"b 29f\n"
"28:" // Height 2: no accumulate
"movi v24.16b, #0x0\n"
"movi v25.16b, #0x0\n"
"29:" // Height 2: setup done
- "mov x10, #0x0\n"
+ "mov x9, #0x0\n"
"30:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w9, [x20, x10, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w28, [x20, x9, LSL #0x2]\n"
"tbz %x[flags], #3, 31f\n"
- "ldr x21, [%x[input_ptr], x10, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x28, [x21, #0x0]\n"
- "ldr x27, [x21, #0x8]\n"
- "cbnz x10, 32f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x28, x28, x20, LSL #2\n"
- "add x27, x27, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x9, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x27, [x20, #0x0]\n"
+ "ldr x26, [x20, #0x8]\n"
+ "cbnz x9, 32f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x27, x27, x19, LSL #2\n"
+ "add x26, x26, x19, LSL #2\n"
"b 32f\n"
"31:" // Height 2: setup direct input
- "mov x28, %x[input_ptr]\n"
- "add x27, x28, x20, LSL #2\n"
+ "mov x27, %x[input_ptr]\n"
+ "add x26, x27, x19, LSL #2\n"
"32:" // Height 2: input setup done
- "cmp x9, #0x4\n"
+ "cmp x28, #0x4\n"
"blt 35f\n"
- "ldr q0, [x28, #0x0]\n"
- "ldr q1, [x27, #0x0]\n"
- "cmp x9, #0x8\n"
+ "ldr q0, [x27, #0x0]\n"
+ "ldr q1, [x26, #0x0]\n"
+ "cmp x28, #0x8\n"
"ldr q8, [x12, #0x0]\n"
- "ldr q9, [x12, #0x10]\n"
- "ldr q10, [x12, #0x20]\n"
- "ldr q11, [x12, #0x30]\n"
"blt 34f\n"
"33:" // Height 2: Multiply loop: Main loop head
"fmla v24.4s, v8.4s, v0.s[0]\n"
+ "ldr q9, [x12, #0x10]\n"
+ "add x27, x27, #0x10\n"
"fmla v25.4s, v8.4s, v1.s[0]\n"
- "sub x9, x9, #0x4\n"
- "add x28, x28, #0x10\n"
+ "ldr q10, [x12, #0x20]\n"
+ "add x26, x26, #0x10\n"
"fmla v24.4s, v9.4s, v0.s[1]\n"
+ "ldr q11, [x12, #0x30]\n"
+ "sub x28, x28, #0x4\n"
"fmla v25.4s, v9.4s, v1.s[1]\n"
- "add x27, x27, #0x10\n"
- "cmp x9, #0x8\n"
+ "prfm pldl1keep, [x27, #0x80]\n"
+ "cmp x28, #0x8\n"
"fmla v24.4s, v10.4s, v0.s[2]\n"
- "fmla v25.4s, v10.4s, v1.s[2]\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
"add x12, x12, #0x40\n"
+ "fmla v25.4s, v10.4s, v1.s[2]\n"
"ldr q8, [x12, #0x0]\n"
- "ldr q9, [x12, #0x10]\n"
- "ldr q10, [x12, #0x20]\n"
"fmla v24.4s, v11.4s, v0.s[3]\n"
+ "ldr q0, [x27, #0x0]\n"
"fmla v25.4s, v11.4s, v1.s[3]\n"
- "ldr q0, [x28, #0x0]\n"
- "ldr q1, [x27, #0x0]\n"
- "ldr q11, [x12, #0x30]\n"
- "prfm pldl1keep, [x28, #0x80]\n"
- "prfm pldl1keep, [x27, #0x80]\n"
+ "ldr q1, [x26, #0x0]\n"
"bge 33b\n"
"34:" // Height 2: Multiply loop: Single iteration only
"fmla v24.4s, v8.4s, v0.s[0]\n"
+ "ldr q9, [x12, #0x10]\n"
+ "sub x28, x28, #0x4\n"
"fmla v25.4s, v8.4s, v1.s[0]\n"
- "add x28, x28, #0x10\n"
+ "ldr q10, [x12, #0x20]\n"
"add x27, x27, #0x10\n"
"fmla v24.4s, v9.4s, v0.s[1]\n"
+ "ldr q11, [x12, #0x30]\n"
+ "add x26, x26, #0x10\n"
"fmla v25.4s, v9.4s, v1.s[1]\n"
- "sub x9, x9, #0x4\n"
- "prfm pldl1keep, [x28, #0x80]\n"
- "fmla v24.4s, v10.4s, v0.s[2]\n"
- "fmla v25.4s, v10.4s, v1.s[2]\n"
"prfm pldl1keep, [x27, #0x80]\n"
"add x12, x12, #0x40\n"
+ "fmla v24.4s, v10.4s, v0.s[2]\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
+ "fmla v25.4s, v10.4s, v1.s[2]\n"
"fmla v24.4s, v11.4s, v0.s[3]\n"
"fmla v25.4s, v11.4s, v1.s[3]\n"
"35:" // Height 2: Multiply loop: Main loop skip
- "cbz x9, 37f\n"
+ "cbz x28, 37f\n"
"36:" // Height 2: Multiply loop: Odd block loop
- "ldr s0, [x28], #0x4\n"
- "ldr s1, [x27], #0x4\n"
- "sub x9, x9, #0x1\n"
+ "ldr s0, [x27], #0x4\n"
+ "sub x28, x28, #0x1\n"
+ "ldr s1, [x26], #0x4\n"
"ldr q12, [x12, #0x0]\n"
"fmla v24.4s, v12.4s, v0.s[0]\n"
- "fmla v25.4s, v12.4s, v1.s[0]\n"
"add x12, x12, #0x10\n"
- "cbnz x9, 36b\n"
+ "fmla v25.4s, v12.4s, v1.s[0]\n"
+ "cbnz x28, 36b\n"
"37:" // Height 2: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x10, x10, #0x1\n"
- "cmp x10, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x9, x9, #0x1\n"
+ "cmp x9, x19\n"
"bne 30b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x27, x11, x20, LSL #2\n"
- "prfm pstl1keep, [x11, #0x0]\n"
- "prfm pstl1keep, [x27, #0x0]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "prfm pstl1keep, [x10, #0x0]\n"
+ "add x26, x10, x19, LSL #2\n"
+ "prfm pstl1keep, [x26, #0x0]\n"
"tbz %x[flags], #1, 38f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v17.4s }, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1r { v16.4s }, [x20]\n"
- "fmin v24.4s, v24.4s, v17.4s\n"
- "fmin v25.4s, v25.4s, v17.4s\n"
- "fmax v24.4s, v24.4s, v16.4s\n"
- "fmax v25.4s, v25.4s, v16.4s\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v17.4s }, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v16.4s }, [x19]\n"
+ "fmin v24.4s, v24.4s, v16.4s\n"
+ "fmin v25.4s, v25.4s, v16.4s\n"
+ "fmax v24.4s, v24.4s, v17.4s\n"
+ "fmax v25.4s, v25.4s, v17.4s\n"
"38:" // Height 2: No activation
"cmp x13, #0x4\n"
"bge 41f\n"
"tbz x13, #1, 39f\n"
- "str d24, [x11], #0x8\n"
- "str d25, [x27], #0x8\n"
+ "str d24, [x10], #0x8\n"
+ "str d25, [x26], #0x8\n"
"tbz x13, #0, 40f\n"
- "st1 { v24.s }[2], [x11]\n"
- "st1 { v25.s }[2], [x27]\n"
+ "st1 { v24.s }[2], [x10]\n"
+ "st1 { v25.s }[2], [x26]\n"
"b 40f\n"
"39:" // Height 2: Partial direct writeback: partial_1_0
- "str s24, [x11, #0x0]\n"
- "str s25, [x27, #0x0]\n"
+ "str s24, [x10, #0x0]\n"
+ "str s25, [x26, #0x0]\n"
"40:" // Height 2: Partial direct writeback: Done
"b 42f\n"
"41:" // Height 2: Full writeback
- "str q24, [x11, #0x0]\n"
- "add x11, x11, #0x10\n"
- "str q25, [x27, #0x0]\n"
+ "str q24, [x10, #0x0]\n"
+ "add x10, x10, #0x10\n"
+ "str q25, [x26, #0x0]\n"
"42:" // Height 2: Writeback done
"subs x13, x13, #0x4\n"
"bgt 23b\n"
"b 170f\n"
"43:" // Height 3
- "mov x14, %x[bias]\n"
"ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x11, %x[bias]\n"
"ldr x12, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x11, %x[output_ptr]\n"
+ "mov x10, %x[output_ptr]\n"
"44:" // Height 3: Column loop
- "cbz x14, 45f\n"
- "ldr q24, [x14, #0x0]\n"
+ "cbz x11, 45f\n"
+ "ldr q24, [x11, #0x0]\n"
"mov v25.16b, v24.16b\n"
+ "add x11, x11, #0x10\n"
"mov v26.16b, v24.16b\n"
- "add x14, x14, #0x10\n"
"b 50f\n"
"45:" // Height 3: no bias
"tbz %x[flags], #0, 49f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x27, x11, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"cmp x13, #0x4\n"
- "add x26, x27, x20, LSL #2\n"
+ "add x26, x10, x19, LSL #2\n"
+ "add x25, x26, x19, LSL #2\n"
"bge 48f\n"
"tbz x13, #1, 46f\n"
- "ldr d24, [x11], #0x8\n"
- "ldr d25, [x27], #0x8\n"
- "mov x20, #0x8\n"
- "ldr d26, [x26], #0x8\n"
+ "ldr d24, [x10], #0x8\n"
+ "ldr d25, [x26], #0x8\n"
+ "mov x19, #0x8\n"
+ "ldr d26, [x25], #0x8\n"
"tbz x13, #0, 47f\n"
- "ld1 { v24.s }[2], [x11]\n"
- "ld1 { v25.s }[2], [x27]\n"
- "ld1 { v26.s }[2], [x26]\n"
+ "ld1 { v24.s }[2], [x10]\n"
+ "ld1 { v25.s }[2], [x26]\n"
+ "ld1 { v26.s }[2], [x25]\n"
"b 47f\n"
"46:" // Height 3: Partial accumulate: partial_1_0
- "ldr s24, [x11, #0x0]\n"
- "ldr s25, [x27, #0x0]\n"
- "mov x20, #0x0\n"
- "ldr s26, [x26, #0x0]\n"
+ "ldr s24, [x10, #0x0]\n"
+ "mov x19, #0x0\n"
+ "ldr s25, [x26, #0x0]\n"
+ "ldr s26, [x25, #0x0]\n"
"47:" // Height 3: Partial accumulate: Done
- "sub x11, x11, x20\n"
+ "sub x10, x10, x19\n"
"b 50f\n"
"48:" // Height 3: full accumulate
- "ldr q24, [x11, #0x0]\n"
- "ldr q25, [x27, #0x0]\n"
- "ldr q26, [x26, #0x0]\n"
+ "ldr q24, [x10, #0x0]\n"
+ "ldr q25, [x26, #0x0]\n"
+ "ldr q26, [x25, #0x0]\n"
"b 50f\n"
"49:" // Height 3: no accumulate
"movi v24.16b, #0x0\n"
"movi v25.16b, #0x0\n"
"movi v26.16b, #0x0\n"
"50:" // Height 3: setup done
- "mov x10, #0x0\n"
+ "mov x9, #0x0\n"
"51:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w9, [x20, x10, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w28, [x20, x9, LSL #0x2]\n"
"tbz %x[flags], #3, 52f\n"
- "ldr x21, [%x[input_ptr], x10, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x28, [x21, #0x0]\n"
- "ldr x27, [x21, #0x8]\n"
- "ldr x26, [x21, #0x10]\n"
- "cbnz x10, 53f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x28, x28, x20, LSL #2\n"
- "add x27, x27, x20, LSL #2\n"
- "add x26, x26, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x9, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x27, [x20, #0x0]\n"
+ "ldr x26, [x20, #0x8]\n"
+ "ldr x25, [x20, #0x10]\n"
+ "cbnz x9, 53f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x27, x27, x19, LSL #2\n"
+ "add x26, x26, x19, LSL #2\n"
+ "add x25, x25, x19, LSL #2\n"
"b 53f\n"
"52:" // Height 3: setup direct input
- "mov x28, %x[input_ptr]\n"
- "add x27, x28, x20, LSL #2\n"
- "add x26, x27, x20, LSL #2\n"
+ "mov x27, %x[input_ptr]\n"
+ "add x26, x27, x19, LSL #2\n"
+ "add x25, x26, x19, LSL #2\n"
"53:" // Height 3: input setup done
- "cmp x9, #0x4\n"
+ "cmp x28, #0x4\n"
"blt 56f\n"
- "ldr q0, [x28, #0x0]\n"
- "ldr q1, [x27, #0x0]\n"
- "cmp x9, #0x8\n"
- "ldr q2, [x26, #0x0]\n"
+ "ldr q0, [x27, #0x0]\n"
+ "ldr q1, [x26, #0x0]\n"
+ "cmp x28, #0x8\n"
+ "ldr q2, [x25, #0x0]\n"
"ldr q8, [x12, #0x0]\n"
- "ldr q9, [x12, #0x10]\n"
- "ldr q10, [x12, #0x20]\n"
- "ldr q11, [x12, #0x30]\n"
"blt 55f\n"
"54:" // Height 3: Multiply loop: Main loop head
"fmla v24.4s, v8.4s, v0.s[0]\n"
+ "ldr q9, [x12, #0x10]\n"
+ "add x27, x27, #0x10\n"
"fmla v25.4s, v8.4s, v1.s[0]\n"
- "sub x9, x9, #0x4\n"
- "add x28, x28, #0x10\n"
+ "ldr q10, [x12, #0x20]\n"
+ "add x26, x26, #0x10\n"
"fmla v26.4s, v8.4s, v2.s[0]\n"
+ "ldr q11, [x12, #0x30]\n"
+ "add x25, x25, #0x10\n"
"fmla v24.4s, v9.4s, v0.s[1]\n"
- "add x27, x27, #0x10\n"
- "add x26, x26, #0x10\n"
+ "prfm pldl1keep, [x27, #0x80]\n"
+ "sub x28, x28, #0x4\n"
"fmla v25.4s, v9.4s, v1.s[1]\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
+ "cmp x28, #0x8\n"
"fmla v26.4s, v9.4s, v2.s[1]\n"
- "cmp x9, #0x8\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
"add x12, x12, #0x40\n"
- "ldr q8, [x12, #0x0]\n"
- "ldr q9, [x12, #0x10]\n"
"fmla v24.4s, v10.4s, v0.s[2]\n"
+ "ldr q8, [x12, #0x0]\n"
"fmla v25.4s, v10.4s, v1.s[2]\n"
"fmla v26.4s, v10.4s, v2.s[2]\n"
- "ldr q10, [x12, #0x20]\n"
"fmla v24.4s, v11.4s, v0.s[3]\n"
- "ldr q0, [x28, #0x0]\n"
- "prfm pldl1keep, [x28, #0x80]\n"
+ "ldr q0, [x27, #0x0]\n"
"fmla v25.4s, v11.4s, v1.s[3]\n"
- "ldr q1, [x27, #0x0]\n"
+ "ldr q1, [x26, #0x0]\n"
"fmla v26.4s, v11.4s, v2.s[3]\n"
- "ldr q2, [x26, #0x0]\n"
- "ldr q11, [x12, #0x30]\n"
- "prfm pldl1keep, [x27, #0x80]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
+ "ldr q2, [x25, #0x0]\n"
"bge 54b\n"
"55:" // Height 3: Multiply loop: Single iteration only
"fmla v24.4s, v8.4s, v0.s[0]\n"
+ "ldr q9, [x12, #0x10]\n"
+ "sub x28, x28, #0x4\n"
"fmla v25.4s, v8.4s, v1.s[0]\n"
- "add x28, x28, #0x10\n"
+ "ldr q10, [x12, #0x20]\n"
"add x27, x27, #0x10\n"
"fmla v26.4s, v8.4s, v2.s[0]\n"
- "fmla v24.4s, v9.4s, v0.s[1]\n"
+ "ldr q11, [x12, #0x30]\n"
"add x26, x26, #0x10\n"
- "sub x9, x9, #0x4\n"
+ "fmla v24.4s, v9.4s, v0.s[1]\n"
+ "prfm pldl1keep, [x27, #0x80]\n"
+ "add x25, x25, #0x10\n"
"fmla v25.4s, v9.4s, v1.s[1]\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
+ "add x12, x12, #0x40\n"
"fmla v26.4s, v9.4s, v2.s[1]\n"
- "prfm pldl1keep, [x28, #0x80]\n"
- "prfm pldl1keep, [x27, #0x80]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
"fmla v24.4s, v10.4s, v0.s[2]\n"
"fmla v25.4s, v10.4s, v1.s[2]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
- "add x12, x12, #0x40\n"
"fmla v26.4s, v10.4s, v2.s[2]\n"
"fmla v24.4s, v11.4s, v0.s[3]\n"
"fmla v25.4s, v11.4s, v1.s[3]\n"
"fmla v26.4s, v11.4s, v2.s[3]\n"
"56:" // Height 3: Multiply loop: Main loop skip
- "cbz x9, 58f\n"
+ "cbz x28, 58f\n"
"57:" // Height 3: Multiply loop: Odd block loop
- "ldr s0, [x28], #0x4\n"
- "ldr s1, [x27], #0x4\n"
- "sub x9, x9, #0x1\n"
- "ldr s2, [x26], #0x4\n"
+ "ldr s0, [x27], #0x4\n"
+ "sub x28, x28, #0x1\n"
+ "ldr s1, [x26], #0x4\n"
+ "ldr s2, [x25], #0x4\n"
"ldr q12, [x12, #0x0]\n"
"fmla v24.4s, v12.4s, v0.s[0]\n"
+ "add x12, x12, #0x10\n"
"fmla v25.4s, v12.4s, v1.s[0]\n"
"fmla v26.4s, v12.4s, v2.s[0]\n"
- "add x12, x12, #0x10\n"
- "cbnz x9, 57b\n"
+ "cbnz x28, 57b\n"
"58:" // Height 3: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x10, x10, #0x1\n"
- "cmp x10, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x9, x9, #0x1\n"
+ "cmp x9, x19\n"
"bne 51b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x27, x11, x20, LSL #2\n"
- "add x26, x27, x20, LSL #2\n"
- "prfm pstl1keep, [x11, #0x0]\n"
- "prfm pstl1keep, [x27, #0x0]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "prfm pstl1keep, [x10, #0x0]\n"
+ "add x26, x10, x19, LSL #2\n"
"prfm pstl1keep, [x26, #0x0]\n"
+ "add x25, x26, x19, LSL #2\n"
+ "prfm pstl1keep, [x25, #0x0]\n"
"tbz %x[flags], #1, 59f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v17.4s }, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1r { v16.4s }, [x20]\n"
- "fmin v24.4s, v24.4s, v17.4s\n"
- "fmin v25.4s, v25.4s, v17.4s\n"
- "fmin v26.4s, v26.4s, v17.4s\n"
- "fmax v24.4s, v24.4s, v16.4s\n"
- "fmax v25.4s, v25.4s, v16.4s\n"
- "fmax v26.4s, v26.4s, v16.4s\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v17.4s }, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v16.4s }, [x19]\n"
+ "fmin v24.4s, v24.4s, v16.4s\n"
+ "fmin v25.4s, v25.4s, v16.4s\n"
+ "fmin v26.4s, v26.4s, v16.4s\n"
+ "fmax v24.4s, v24.4s, v17.4s\n"
+ "fmax v25.4s, v25.4s, v17.4s\n"
+ "fmax v26.4s, v26.4s, v17.4s\n"
"59:" // Height 3: No activation
"cmp x13, #0x4\n"
"bge 62f\n"
"tbz x13, #1, 60f\n"
- "str d24, [x11], #0x8\n"
- "str d25, [x27], #0x8\n"
- "str d26, [x26], #0x8\n"
+ "str d24, [x10], #0x8\n"
+ "str d25, [x26], #0x8\n"
+ "str d26, [x25], #0x8\n"
"tbz x13, #0, 61f\n"
- "st1 { v24.s }[2], [x11]\n"
- "st1 { v25.s }[2], [x27]\n"
- "st1 { v26.s }[2], [x26]\n"
+ "st1 { v24.s }[2], [x10]\n"
+ "st1 { v25.s }[2], [x26]\n"
+ "st1 { v26.s }[2], [x25]\n"
"b 61f\n"
"60:" // Height 3: Partial direct writeback: partial_1_0
- "str s24, [x11, #0x0]\n"
- "str s25, [x27, #0x0]\n"
- "str s26, [x26, #0x0]\n"
+ "str s24, [x10, #0x0]\n"
+ "str s25, [x26, #0x0]\n"
+ "str s26, [x25, #0x0]\n"
"61:" // Height 3: Partial direct writeback: Done
"b 63f\n"
"62:" // Height 3: Full writeback
- "str q24, [x11, #0x0]\n"
- "add x11, x11, #0x10\n"
- "str q25, [x27, #0x0]\n"
- "str q26, [x26, #0x0]\n"
+ "str q24, [x10, #0x0]\n"
+ "add x10, x10, #0x10\n"
+ "str q25, [x26, #0x0]\n"
+ "str q26, [x25, #0x0]\n"
"63:" // Height 3: Writeback done
"subs x13, x13, #0x4\n"
"bgt 44b\n"
"b 170f\n"
"64:" // Height 4
- "mov x14, %x[bias]\n"
"ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x11, %x[bias]\n"
"ldr x12, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x11, %x[output_ptr]\n"
+ "mov x10, %x[output_ptr]\n"
"65:" // Height 4: Column loop
- "cbz x14, 66f\n"
- "ldr q24, [x14, #0x0]\n"
+ "cbz x11, 66f\n"
+ "ldr q24, [x11, #0x0]\n"
"mov v25.16b, v24.16b\n"
+ "add x11, x11, #0x10\n"
"mov v26.16b, v24.16b\n"
- "add x14, x14, #0x10\n"
"mov v27.16b, v24.16b\n"
"b 71f\n"
"66:" // Height 4: no bias
"tbz %x[flags], #0, 70f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x27, x11, x20, LSL #2\n"
- "add x26, x27, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"cmp x13, #0x4\n"
- "add x25, x26, x20, LSL #2\n"
+ "add x26, x10, x19, LSL #2\n"
+ "add x25, x26, x19, LSL #2\n"
+ "add x24, x25, x19, LSL #2\n"
"bge 69f\n"
"tbz x13, #1, 67f\n"
- "ldr d24, [x11], #0x8\n"
- "ldr d25, [x27], #0x8\n"
- "mov x20, #0x8\n"
- "ldr d26, [x26], #0x8\n"
- "ldr d27, [x25], #0x8\n"
+ "ldr d24, [x10], #0x8\n"
+ "ldr d25, [x26], #0x8\n"
+ "mov x19, #0x8\n"
+ "ldr d26, [x25], #0x8\n"
+ "ldr d27, [x24], #0x8\n"
"tbz x13, #0, 68f\n"
- "ld1 { v24.s }[2], [x11]\n"
- "ld1 { v25.s }[2], [x27]\n"
- "ld1 { v26.s }[2], [x26]\n"
- "ld1 { v27.s }[2], [x25]\n"
+ "ld1 { v24.s }[2], [x10]\n"
+ "ld1 { v25.s }[2], [x26]\n"
+ "ld1 { v26.s }[2], [x25]\n"
+ "ld1 { v27.s }[2], [x24]\n"
"b 68f\n"
"67:" // Height 4: Partial accumulate: partial_1_0
- "ldr s24, [x11, #0x0]\n"
- "ldr s25, [x27, #0x0]\n"
- "mov x20, #0x0\n"
- "ldr s26, [x26, #0x0]\n"
- "ldr s27, [x25, #0x0]\n"
+ "ldr s24, [x10, #0x0]\n"
+ "mov x19, #0x0\n"
+ "ldr s25, [x26, #0x0]\n"
+ "ldr s26, [x25, #0x0]\n"
+ "ldr s27, [x24, #0x0]\n"
"68:" // Height 4: Partial accumulate: Done
- "sub x11, x11, x20\n"
+ "sub x10, x10, x19\n"
"b 71f\n"
"69:" // Height 4: full accumulate
- "ldr q24, [x11, #0x0]\n"
- "ldr q25, [x27, #0x0]\n"
- "ldr q26, [x26, #0x0]\n"
- "ldr q27, [x25, #0x0]\n"
+ "ldr q24, [x10, #0x0]\n"
+ "ldr q25, [x26, #0x0]\n"
+ "ldr q26, [x25, #0x0]\n"
+ "ldr q27, [x24, #0x0]\n"
"b 71f\n"
"70:" // Height 4: no accumulate
"movi v24.16b, #0x0\n"
@@ -633,101 +633,101 @@ void a64_hybrid_fp32_mla_8x4 (
"movi v26.16b, #0x0\n"
"movi v27.16b, #0x0\n"
"71:" // Height 4: setup done
- "mov x10, #0x0\n"
+ "mov x9, #0x0\n"
"72:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w9, [x20, x10, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w28, [x20, x9, LSL #0x2]\n"
"tbz %x[flags], #3, 73f\n"
- "ldr x21, [%x[input_ptr], x10, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x28, [x21, #0x0]\n"
- "ldr x27, [x21, #0x8]\n"
- "ldr x26, [x21, #0x10]\n"
- "ldr x25, [x21, #0x18]\n"
- "cbnz x10, 74f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x28, x28, x20, LSL #2\n"
- "add x27, x27, x20, LSL #2\n"
- "add x26, x26, x20, LSL #2\n"
- "add x25, x25, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x9, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x27, [x20, #0x0]\n"
+ "ldr x26, [x20, #0x8]\n"
+ "ldr x25, [x20, #0x10]\n"
+ "ldr x24, [x20, #0x18]\n"
+ "cbnz x9, 74f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x27, x27, x19, LSL #2\n"
+ "add x26, x26, x19, LSL #2\n"
+ "add x25, x25, x19, LSL #2\n"
+ "add x24, x24, x19, LSL #2\n"
"b 74f\n"
"73:" // Height 4: setup direct input
- "mov x28, %x[input_ptr]\n"
- "add x27, x28, x20, LSL #2\n"
- "add x26, x27, x20, LSL #2\n"
- "add x25, x26, x20, LSL #2\n"
+ "mov x27, %x[input_ptr]\n"
+ "add x26, x27, x19, LSL #2\n"
+ "add x25, x26, x19, LSL #2\n"
+ "add x24, x25, x19, LSL #2\n"
"74:" // Height 4: input setup done
- "cmp x9, #0x4\n"
+ "cmp x28, #0x4\n"
"blt 77f\n"
- "ldr q0, [x28, #0x0]\n"
- "ldr q1, [x27, #0x0]\n"
- "cmp x9, #0x8\n"
- "ldr q2, [x26, #0x0]\n"
- "ldr q3, [x25, #0x0]\n"
+ "ldr q0, [x27, #0x0]\n"
+ "ldr q1, [x26, #0x0]\n"
+ "cmp x28, #0x8\n"
+ "ldr q2, [x25, #0x0]\n"
+ "ldr q3, [x24, #0x0]\n"
"ldr q8, [x12, #0x0]\n"
- "ldr q9, [x12, #0x10]\n"
- "ldr q10, [x12, #0x20]\n"
- "ldr q11, [x12, #0x30]\n"
"blt 76f\n"
"75:" // Height 4: Multiply loop: Main loop head
"fmla v24.4s, v8.4s, v0.s[0]\n"
+ "ldr q9, [x12, #0x10]\n"
+ "add x27, x27, #0x10\n"
"fmla v25.4s, v8.4s, v1.s[0]\n"
- "sub x9, x9, #0x4\n"
- "add x28, x28, #0x10\n"
+ "ldr q10, [x12, #0x20]\n"
+ "add x26, x26, #0x10\n"
"fmla v26.4s, v8.4s, v2.s[0]\n"
+ "ldr q11, [x12, #0x30]\n"
+ "add x25, x25, #0x10\n"
"fmla v27.4s, v8.4s, v3.s[0]\n"
- "add x27, x27, #0x10\n"
- "add x26, x26, #0x10\n"
+ "prfm pldl1keep, [x27, #0x80]\n"
+ "add x24, x24, #0x10\n"
"fmla v24.4s, v9.4s, v0.s[1]\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
+ "sub x28, x28, #0x4\n"
"fmla v25.4s, v9.4s, v1.s[1]\n"
- "add x25, x25, #0x10\n"
- "cmp x9, #0x8\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "cmp x28, #0x8\n"
"fmla v26.4s, v9.4s, v2.s[1]\n"
- "fmla v27.4s, v9.4s, v3.s[1]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
"add x12, x12, #0x40\n"
+ "fmla v27.4s, v9.4s, v3.s[1]\n"
"ldr q8, [x12, #0x0]\n"
- "ldr q9, [x12, #0x10]\n"
"fmla v24.4s, v10.4s, v0.s[2]\n"
"fmla v25.4s, v10.4s, v1.s[2]\n"
- "prfm pldl1keep, [x28, #0x80]\n"
"fmla v26.4s, v10.4s, v2.s[2]\n"
"fmla v27.4s, v10.4s, v3.s[2]\n"
- "ldr q10, [x12, #0x20]\n"
- "prfm pldl1keep, [x27, #0x80]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
"fmla v24.4s, v11.4s, v0.s[3]\n"
- "ldr q0, [x28, #0x0]\n"
+ "ldr q0, [x27, #0x0]\n"
"fmla v25.4s, v11.4s, v1.s[3]\n"
- "ldr q1, [x27, #0x0]\n"
+ "ldr q1, [x26, #0x0]\n"
"fmla v26.4s, v11.4s, v2.s[3]\n"
- "ldr q2, [x26, #0x0]\n"
+ "ldr q2, [x25, #0x0]\n"
"fmla v27.4s, v11.4s, v3.s[3]\n"
- "ldr q3, [x25, #0x0]\n"
- "ldr q11, [x12, #0x30]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
+ "ldr q3, [x24, #0x0]\n"
"bge 75b\n"
"76:" // Height 4: Multiply loop: Single iteration only
"fmla v24.4s, v8.4s, v0.s[0]\n"
+ "ldr q9, [x12, #0x10]\n"
+ "sub x28, x28, #0x4\n"
"fmla v25.4s, v8.4s, v1.s[0]\n"
- "add x28, x28, #0x10\n"
+ "ldr q10, [x12, #0x20]\n"
"add x27, x27, #0x10\n"
"fmla v26.4s, v8.4s, v2.s[0]\n"
- "fmla v27.4s, v8.4s, v3.s[0]\n"
+ "ldr q11, [x12, #0x30]\n"
"add x26, x26, #0x10\n"
+ "fmla v27.4s, v8.4s, v3.s[0]\n"
+ "prfm pldl1keep, [x27, #0x80]\n"
"add x25, x25, #0x10\n"
"fmla v24.4s, v9.4s, v0.s[1]\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
+ "add x24, x24, #0x10\n"
"fmla v25.4s, v9.4s, v1.s[1]\n"
- "sub x9, x9, #0x4\n"
- "prfm pldl1keep, [x28, #0x80]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "add x12, x12, #0x40\n"
"fmla v26.4s, v9.4s, v2.s[1]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
"fmla v27.4s, v9.4s, v3.s[1]\n"
- "prfm pldl1keep, [x27, #0x80]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
"fmla v24.4s, v10.4s, v0.s[2]\n"
"fmla v25.4s, v10.4s, v1.s[2]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
- "add x12, x12, #0x40\n"
"fmla v26.4s, v10.4s, v2.s[2]\n"
"fmla v27.4s, v10.4s, v3.s[2]\n"
"fmla v24.4s, v11.4s, v0.s[3]\n"
@@ -735,130 +735,130 @@ void a64_hybrid_fp32_mla_8x4 (
"fmla v26.4s, v11.4s, v2.s[3]\n"
"fmla v27.4s, v11.4s, v3.s[3]\n"
"77:" // Height 4: Multiply loop: Main loop skip
- "cbz x9, 79f\n"
+ "cbz x28, 79f\n"
"78:" // Height 4: Multiply loop: Odd block loop
- "ldr s0, [x28], #0x4\n"
- "ldr s1, [x27], #0x4\n"
- "sub x9, x9, #0x1\n"
- "ldr s2, [x26], #0x4\n"
- "ldr s3, [x25], #0x4\n"
+ "ldr s0, [x27], #0x4\n"
+ "sub x28, x28, #0x1\n"
+ "ldr s1, [x26], #0x4\n"
+ "ldr s2, [x25], #0x4\n"
+ "ldr s3, [x24], #0x4\n"
"ldr q12, [x12, #0x0]\n"
"fmla v24.4s, v12.4s, v0.s[0]\n"
- "fmla v25.4s, v12.4s, v1.s[0]\n"
"add x12, x12, #0x10\n"
+ "fmla v25.4s, v12.4s, v1.s[0]\n"
"fmla v26.4s, v12.4s, v2.s[0]\n"
"fmla v27.4s, v12.4s, v3.s[0]\n"
- "cbnz x9, 78b\n"
+ "cbnz x28, 78b\n"
"79:" // Height 4: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x10, x10, #0x1\n"
- "cmp x10, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x9, x9, #0x1\n"
+ "cmp x9, x19\n"
"bne 72b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x27, x11, x20, LSL #2\n"
- "add x26, x27, x20, LSL #2\n"
- "prfm pstl1keep, [x11, #0x0]\n"
- "add x25, x26, x20, LSL #2\n"
- "prfm pstl1keep, [x27, #0x0]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "prfm pstl1keep, [x10, #0x0]\n"
+ "add x26, x10, x19, LSL #2\n"
"prfm pstl1keep, [x26, #0x0]\n"
+ "add x25, x26, x19, LSL #2\n"
"prfm pstl1keep, [x25, #0x0]\n"
+ "add x24, x25, x19, LSL #2\n"
+ "prfm pstl1keep, [x24, #0x0]\n"
"tbz %x[flags], #1, 80f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v17.4s }, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1r { v16.4s }, [x20]\n"
- "fmin v24.4s, v24.4s, v17.4s\n"
- "fmin v25.4s, v25.4s, v17.4s\n"
- "fmin v26.4s, v26.4s, v17.4s\n"
- "fmin v27.4s, v27.4s, v17.4s\n"
- "fmax v24.4s, v24.4s, v16.4s\n"
- "fmax v25.4s, v25.4s, v16.4s\n"
- "fmax v26.4s, v26.4s, v16.4s\n"
- "fmax v27.4s, v27.4s, v16.4s\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v17.4s }, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v16.4s }, [x19]\n"
+ "fmin v24.4s, v24.4s, v16.4s\n"
+ "fmin v25.4s, v25.4s, v16.4s\n"
+ "fmin v26.4s, v26.4s, v16.4s\n"
+ "fmin v27.4s, v27.4s, v16.4s\n"
+ "fmax v24.4s, v24.4s, v17.4s\n"
+ "fmax v25.4s, v25.4s, v17.4s\n"
+ "fmax v26.4s, v26.4s, v17.4s\n"
+ "fmax v27.4s, v27.4s, v17.4s\n"
"80:" // Height 4: No activation
"cmp x13, #0x4\n"
"bge 83f\n"
"tbz x13, #1, 81f\n"
- "str d24, [x11], #0x8\n"
- "str d25, [x27], #0x8\n"
- "str d26, [x26], #0x8\n"
- "str d27, [x25], #0x8\n"
+ "str d24, [x10], #0x8\n"
+ "str d25, [x26], #0x8\n"
+ "str d26, [x25], #0x8\n"
+ "str d27, [x24], #0x8\n"
"tbz x13, #0, 82f\n"
- "st1 { v24.s }[2], [x11]\n"
- "st1 { v25.s }[2], [x27]\n"
- "st1 { v26.s }[2], [x26]\n"
- "st1 { v27.s }[2], [x25]\n"
+ "st1 { v24.s }[2], [x10]\n"
+ "st1 { v25.s }[2], [x26]\n"
+ "st1 { v26.s }[2], [x25]\n"
+ "st1 { v27.s }[2], [x24]\n"
"b 82f\n"
"81:" // Height 4: Partial direct writeback: partial_1_0
- "str s24, [x11, #0x0]\n"
- "str s25, [x27, #0x0]\n"
- "str s26, [x26, #0x0]\n"
- "str s27, [x25, #0x0]\n"
+ "str s24, [x10, #0x0]\n"
+ "str s25, [x26, #0x0]\n"
+ "str s26, [x25, #0x0]\n"
+ "str s27, [x24, #0x0]\n"
"82:" // Height 4: Partial direct writeback: Done
"b 84f\n"
"83:" // Height 4: Full writeback
- "str q24, [x11, #0x0]\n"
- "add x11, x11, #0x10\n"
- "str q25, [x27, #0x0]\n"
- "str q26, [x26, #0x0]\n"
- "str q27, [x25, #0x0]\n"
+ "str q24, [x10, #0x0]\n"
+ "add x10, x10, #0x10\n"
+ "str q25, [x26, #0x0]\n"
+ "str q26, [x25, #0x0]\n"
+ "str q27, [x24, #0x0]\n"
"84:" // Height 4: Writeback done
"subs x13, x13, #0x4\n"
"bgt 65b\n"
"b 170f\n"
"85:" // Height 5
- "mov x14, %x[bias]\n"
"ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x11, %x[bias]\n"
"ldr x12, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x11, %x[output_ptr]\n"
+ "mov x10, %x[output_ptr]\n"
"86:" // Height 5: Column loop
- "cbz x14, 87f\n"
- "ldr q24, [x14, #0x0]\n"
+ "cbz x11, 87f\n"
+ "ldr q24, [x11, #0x0]\n"
"mov v25.16b, v24.16b\n"
+ "add x11, x11, #0x10\n"
"mov v26.16b, v24.16b\n"
- "add x14, x14, #0x10\n"
"mov v27.16b, v24.16b\n"
"mov v28.16b, v24.16b\n"
"b 92f\n"
"87:" // Height 5: no bias
"tbz %x[flags], #0, 91f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x27, x11, x20, LSL #2\n"
- "add x26, x27, x20, LSL #2\n"
- "add x25, x26, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"cmp x13, #0x4\n"
- "add x24, x25, x20, LSL #2\n"
+ "add x26, x10, x19, LSL #2\n"
+ "add x25, x26, x19, LSL #2\n"
+ "add x24, x25, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
"bge 90f\n"
"tbz x13, #1, 88f\n"
- "ldr d24, [x11], #0x8\n"
- "ldr d25, [x27], #0x8\n"
- "mov x20, #0x8\n"
- "ldr d26, [x26], #0x8\n"
- "ldr d27, [x25], #0x8\n"
- "ldr d28, [x24], #0x8\n"
+ "ldr d24, [x10], #0x8\n"
+ "ldr d25, [x26], #0x8\n"
+ "mov x19, #0x8\n"
+ "ldr d26, [x25], #0x8\n"
+ "ldr d27, [x24], #0x8\n"
+ "ldr d28, [x23], #0x8\n"
"tbz x13, #0, 89f\n"
- "ld1 { v24.s }[2], [x11]\n"
- "ld1 { v25.s }[2], [x27]\n"
- "ld1 { v26.s }[2], [x26]\n"
- "ld1 { v27.s }[2], [x25]\n"
- "ld1 { v28.s }[2], [x24]\n"
+ "ld1 { v24.s }[2], [x10]\n"
+ "ld1 { v25.s }[2], [x26]\n"
+ "ld1 { v26.s }[2], [x25]\n"
+ "ld1 { v27.s }[2], [x24]\n"
+ "ld1 { v28.s }[2], [x23]\n"
"b 89f\n"
"88:" // Height 5: Partial accumulate: partial_1_0
- "ldr s24, [x11, #0x0]\n"
- "ldr s25, [x27, #0x0]\n"
- "mov x20, #0x0\n"
- "ldr s26, [x26, #0x0]\n"
- "ldr s27, [x25, #0x0]\n"
- "ldr s28, [x24, #0x0]\n"
+ "ldr s24, [x10, #0x0]\n"
+ "mov x19, #0x0\n"
+ "ldr s25, [x26, #0x0]\n"
+ "ldr s26, [x25, #0x0]\n"
+ "ldr s27, [x24, #0x0]\n"
+ "ldr s28, [x23, #0x0]\n"
"89:" // Height 5: Partial accumulate: Done
- "sub x11, x11, x20\n"
+ "sub x10, x10, x19\n"
"b 92f\n"
"90:" // Height 5: full accumulate
- "ldr q24, [x11, #0x0]\n"
- "ldr q25, [x27, #0x0]\n"
- "ldr q26, [x26, #0x0]\n"
- "ldr q27, [x25, #0x0]\n"
- "ldr q28, [x24, #0x0]\n"
+ "ldr q24, [x10, #0x0]\n"
+ "ldr q25, [x26, #0x0]\n"
+ "ldr q26, [x25, #0x0]\n"
+ "ldr q27, [x24, #0x0]\n"
+ "ldr q28, [x23, #0x0]\n"
"b 92f\n"
"91:" // Height 5: no accumulate
"movi v24.16b, #0x0\n"
@@ -867,116 +867,116 @@ void a64_hybrid_fp32_mla_8x4 (
"movi v27.16b, #0x0\n"
"movi v28.16b, #0x0\n"
"92:" // Height 5: setup done
- "mov x10, #0x0\n"
+ "mov x9, #0x0\n"
"93:" // Height 5: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w9, [x20, x10, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w28, [x20, x9, LSL #0x2]\n"
"tbz %x[flags], #3, 94f\n"
- "ldr x21, [%x[input_ptr], x10, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x28, [x21, #0x0]\n"
- "ldr x27, [x21, #0x8]\n"
- "ldr x26, [x21, #0x10]\n"
- "ldr x25, [x21, #0x18]\n"
- "ldr x24, [x21, #0x20]\n"
- "cbnz x10, 95f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x28, x28, x20, LSL #2\n"
- "add x27, x27, x20, LSL #2\n"
- "add x26, x26, x20, LSL #2\n"
- "add x25, x25, x20, LSL #2\n"
- "add x24, x24, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x9, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x27, [x20, #0x0]\n"
+ "ldr x26, [x20, #0x8]\n"
+ "ldr x25, [x20, #0x10]\n"
+ "ldr x24, [x20, #0x18]\n"
+ "ldr x23, [x20, #0x20]\n"
+ "cbnz x9, 95f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x27, x27, x19, LSL #2\n"
+ "add x26, x26, x19, LSL #2\n"
+ "add x25, x25, x19, LSL #2\n"
+ "add x24, x24, x19, LSL #2\n"
+ "add x23, x23, x19, LSL #2\n"
"b 95f\n"
"94:" // Height 5: setup direct input
- "mov x28, %x[input_ptr]\n"
- "add x27, x28, x20, LSL #2\n"
- "add x26, x27, x20, LSL #2\n"
- "add x25, x26, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
+ "mov x27, %x[input_ptr]\n"
+ "add x26, x27, x19, LSL #2\n"
+ "add x25, x26, x19, LSL #2\n"
+ "add x24, x25, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
"95:" // Height 5: input setup done
- "cmp x9, #0x4\n"
+ "cmp x28, #0x4\n"
"blt 98f\n"
- "ldr q0, [x28, #0x0]\n"
- "ldr q1, [x27, #0x0]\n"
- "cmp x9, #0x8\n"
- "ldr q2, [x26, #0x0]\n"
- "ldr q3, [x25, #0x0]\n"
- "ldr q4, [x24, #0x0]\n"
+ "ldr q0, [x27, #0x0]\n"
+ "ldr q1, [x26, #0x0]\n"
+ "cmp x28, #0x8\n"
+ "ldr q2, [x25, #0x0]\n"
+ "ldr q3, [x24, #0x0]\n"
+ "ldr q4, [x23, #0x0]\n"
"ldr q8, [x12, #0x0]\n"
- "ldr q9, [x12, #0x10]\n"
- "ldr q10, [x12, #0x20]\n"
- "ldr q11, [x12, #0x30]\n"
"blt 97f\n"
"96:" // Height 5: Multiply loop: Main loop head
"fmla v24.4s, v8.4s, v0.s[0]\n"
+ "ldr q9, [x12, #0x10]\n"
+ "add x27, x27, #0x10\n"
"fmla v25.4s, v8.4s, v1.s[0]\n"
- "sub x9, x9, #0x4\n"
- "add x28, x28, #0x10\n"
+ "ldr q10, [x12, #0x20]\n"
+ "add x26, x26, #0x10\n"
"fmla v26.4s, v8.4s, v2.s[0]\n"
+ "ldr q11, [x12, #0x30]\n"
+ "add x25, x25, #0x10\n"
"fmla v27.4s, v8.4s, v3.s[0]\n"
- "add x27, x27, #0x10\n"
- "add x26, x26, #0x10\n"
+ "prfm pldl1keep, [x27, #0x80]\n"
+ "add x24, x24, #0x10\n"
"fmla v28.4s, v8.4s, v4.s[0]\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
+ "add x23, x23, #0x10\n"
"fmla v24.4s, v9.4s, v0.s[1]\n"
- "add x25, x25, #0x10\n"
- "add x24, x24, #0x10\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "sub x28, x28, #0x4\n"
"fmla v25.4s, v9.4s, v1.s[1]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "cmp x28, #0x8\n"
"fmla v26.4s, v9.4s, v2.s[1]\n"
- "cmp x9, #0x8\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
"add x12, x12, #0x40\n"
- "ldr q8, [x12, #0x0]\n"
"fmla v27.4s, v9.4s, v3.s[1]\n"
+ "ldr q8, [x12, #0x0]\n"
"fmla v28.4s, v9.4s, v4.s[1]\n"
- "ldr q9, [x12, #0x10]\n"
"fmla v24.4s, v10.4s, v0.s[2]\n"
"fmla v25.4s, v10.4s, v1.s[2]\n"
- "prfm pldl1keep, [x28, #0x80]\n"
- "prfm pldl1keep, [x27, #0x80]\n"
"fmla v26.4s, v10.4s, v2.s[2]\n"
"fmla v27.4s, v10.4s, v3.s[2]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
"fmla v28.4s, v10.4s, v4.s[2]\n"
- "ldr q10, [x12, #0x20]\n"
"fmla v24.4s, v11.4s, v0.s[3]\n"
- "ldr q0, [x28, #0x0]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
+ "ldr q0, [x27, #0x0]\n"
"fmla v25.4s, v11.4s, v1.s[3]\n"
- "ldr q1, [x27, #0x0]\n"
+ "ldr q1, [x26, #0x0]\n"
"fmla v26.4s, v11.4s, v2.s[3]\n"
- "ldr q2, [x26, #0x0]\n"
+ "ldr q2, [x25, #0x0]\n"
"fmla v27.4s, v11.4s, v3.s[3]\n"
- "ldr q3, [x25, #0x0]\n"
+ "ldr q3, [x24, #0x0]\n"
"fmla v28.4s, v11.4s, v4.s[3]\n"
- "ldr q4, [x24, #0x0]\n"
- "ldr q11, [x12, #0x30]\n"
+ "ldr q4, [x23, #0x0]\n"
"bge 96b\n"
"97:" // Height 5: Multiply loop: Single iteration only
"fmla v24.4s, v8.4s, v0.s[0]\n"
+ "ldr q9, [x12, #0x10]\n"
+ "sub x28, x28, #0x4\n"
"fmla v25.4s, v8.4s, v1.s[0]\n"
- "add x28, x28, #0x10\n"
+ "ldr q10, [x12, #0x20]\n"
"add x27, x27, #0x10\n"
"fmla v26.4s, v8.4s, v2.s[0]\n"
- "fmla v27.4s, v8.4s, v3.s[0]\n"
+ "ldr q11, [x12, #0x30]\n"
"add x26, x26, #0x10\n"
+ "fmla v27.4s, v8.4s, v3.s[0]\n"
+ "prfm pldl1keep, [x27, #0x80]\n"
"add x25, x25, #0x10\n"
"fmla v28.4s, v8.4s, v4.s[0]\n"
- "fmla v24.4s, v9.4s, v0.s[1]\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
"add x24, x24, #0x10\n"
- "sub x9, x9, #0x4\n"
+ "fmla v24.4s, v9.4s, v0.s[1]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "add x23, x23, #0x10\n"
"fmla v25.4s, v9.4s, v1.s[1]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "add x12, x12, #0x40\n"
"fmla v26.4s, v9.4s, v2.s[1]\n"
- "prfm pldl1keep, [x28, #0x80]\n"
- "prfm pldl1keep, [x27, #0x80]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
"fmla v27.4s, v9.4s, v3.s[1]\n"
"fmla v28.4s, v9.4s, v4.s[1]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
"fmla v24.4s, v10.4s, v0.s[2]\n"
"fmla v25.4s, v10.4s, v1.s[2]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
- "add x12, x12, #0x40\n"
"fmla v26.4s, v10.4s, v2.s[2]\n"
"fmla v27.4s, v10.4s, v3.s[2]\n"
"fmla v28.4s, v10.4s, v4.s[2]\n"
@@ -986,146 +986,146 @@ void a64_hybrid_fp32_mla_8x4 (
"fmla v27.4s, v11.4s, v3.s[3]\n"
"fmla v28.4s, v11.4s, v4.s[3]\n"
"98:" // Height 5: Multiply loop: Main loop skip
- "cbz x9, 100f\n"
+ "cbz x28, 100f\n"
"99:" // Height 5: Multiply loop: Odd block loop
- "ldr s0, [x28], #0x4\n"
- "ldr s1, [x27], #0x4\n"
- "sub x9, x9, #0x1\n"
- "ldr s2, [x26], #0x4\n"
- "ldr s3, [x25], #0x4\n"
- "ldr s4, [x24], #0x4\n"
+ "ldr s0, [x27], #0x4\n"
+ "sub x28, x28, #0x1\n"
+ "ldr s1, [x26], #0x4\n"
+ "ldr s2, [x25], #0x4\n"
+ "ldr s3, [x24], #0x4\n"
+ "ldr s4, [x23], #0x4\n"
"ldr q12, [x12, #0x0]\n"
"fmla v24.4s, v12.4s, v0.s[0]\n"
+ "add x12, x12, #0x10\n"
"fmla v25.4s, v12.4s, v1.s[0]\n"
"fmla v26.4s, v12.4s, v2.s[0]\n"
"fmla v27.4s, v12.4s, v3.s[0]\n"
- "add x12, x12, #0x10\n"
"fmla v28.4s, v12.4s, v4.s[0]\n"
- "cbnz x9, 99b\n"
+ "cbnz x28, 99b\n"
"100:" // Height 5: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x10, x10, #0x1\n"
- "cmp x10, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x9, x9, #0x1\n"
+ "cmp x9, x19\n"
"bne 93b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x27, x11, x20, LSL #2\n"
- "add x26, x27, x20, LSL #2\n"
- "prfm pstl1keep, [x11, #0x0]\n"
- "add x25, x26, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "prfm pstl1keep, [x27, #0x0]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "prfm pstl1keep, [x10, #0x0]\n"
+ "add x26, x10, x19, LSL #2\n"
"prfm pstl1keep, [x26, #0x0]\n"
+ "add x25, x26, x19, LSL #2\n"
"prfm pstl1keep, [x25, #0x0]\n"
+ "add x24, x25, x19, LSL #2\n"
"prfm pstl1keep, [x24, #0x0]\n"
+ "add x23, x24, x19, LSL #2\n"
+ "prfm pstl1keep, [x23, #0x0]\n"
"tbz %x[flags], #1, 101f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v17.4s }, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1r { v16.4s }, [x20]\n"
- "fmin v24.4s, v24.4s, v17.4s\n"
- "fmin v25.4s, v25.4s, v17.4s\n"
- "fmin v26.4s, v26.4s, v17.4s\n"
- "fmin v27.4s, v27.4s, v17.4s\n"
- "fmin v28.4s, v28.4s, v17.4s\n"
- "fmax v24.4s, v24.4s, v16.4s\n"
- "fmax v25.4s, v25.4s, v16.4s\n"
- "fmax v26.4s, v26.4s, v16.4s\n"
- "fmax v27.4s, v27.4s, v16.4s\n"
- "fmax v28.4s, v28.4s, v16.4s\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v17.4s }, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v16.4s }, [x19]\n"
+ "fmin v24.4s, v24.4s, v16.4s\n"
+ "fmin v25.4s, v25.4s, v16.4s\n"
+ "fmin v26.4s, v26.4s, v16.4s\n"
+ "fmin v27.4s, v27.4s, v16.4s\n"
+ "fmax v24.4s, v24.4s, v17.4s\n"
+ "fmax v25.4s, v25.4s, v17.4s\n"
+ "fmax v26.4s, v26.4s, v17.4s\n"
+ "fmax v27.4s, v27.4s, v17.4s\n"
+ "fmin v28.4s, v28.4s, v16.4s\n"
+ "fmax v28.4s, v28.4s, v17.4s\n"
"101:" // Height 5: No activation
"cmp x13, #0x4\n"
"bge 104f\n"
"tbz x13, #1, 102f\n"
- "str d24, [x11], #0x8\n"
- "str d25, [x27], #0x8\n"
- "str d26, [x26], #0x8\n"
- "str d27, [x25], #0x8\n"
- "str d28, [x24], #0x8\n"
+ "str d24, [x10], #0x8\n"
+ "str d25, [x26], #0x8\n"
+ "str d26, [x25], #0x8\n"
+ "str d27, [x24], #0x8\n"
+ "str d28, [x23], #0x8\n"
"tbz x13, #0, 103f\n"
- "st1 { v24.s }[2], [x11]\n"
- "st1 { v25.s }[2], [x27]\n"
- "st1 { v26.s }[2], [x26]\n"
- "st1 { v27.s }[2], [x25]\n"
- "st1 { v28.s }[2], [x24]\n"
+ "st1 { v24.s }[2], [x10]\n"
+ "st1 { v25.s }[2], [x26]\n"
+ "st1 { v26.s }[2], [x25]\n"
+ "st1 { v27.s }[2], [x24]\n"
+ "st1 { v28.s }[2], [x23]\n"
"b 103f\n"
"102:" // Height 5: Partial direct writeback: partial_1_0
- "str s24, [x11, #0x0]\n"
- "str s25, [x27, #0x0]\n"
- "str s26, [x26, #0x0]\n"
- "str s27, [x25, #0x0]\n"
- "str s28, [x24, #0x0]\n"
+ "str s24, [x10, #0x0]\n"
+ "str s25, [x26, #0x0]\n"
+ "str s26, [x25, #0x0]\n"
+ "str s27, [x24, #0x0]\n"
+ "str s28, [x23, #0x0]\n"
"103:" // Height 5: Partial direct writeback: Done
"b 105f\n"
"104:" // Height 5: Full writeback
- "str q24, [x11, #0x0]\n"
- "add x11, x11, #0x10\n"
- "str q25, [x27, #0x0]\n"
- "str q26, [x26, #0x0]\n"
- "str q27, [x25, #0x0]\n"
- "str q28, [x24, #0x0]\n"
+ "str q24, [x10, #0x0]\n"
+ "add x10, x10, #0x10\n"
+ "str q25, [x26, #0x0]\n"
+ "str q26, [x25, #0x0]\n"
+ "str q27, [x24, #0x0]\n"
+ "str q28, [x23, #0x0]\n"
"105:" // Height 5: Writeback done
"subs x13, x13, #0x4\n"
"bgt 86b\n"
"b 170f\n"
"106:" // Height 6
- "mov x14, %x[bias]\n"
"ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x11, %x[bias]\n"
"ldr x12, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x11, %x[output_ptr]\n"
+ "mov x10, %x[output_ptr]\n"
"107:" // Height 6: Column loop
- "cbz x14, 108f\n"
- "ldr q24, [x14, #0x0]\n"
+ "cbz x11, 108f\n"
+ "ldr q24, [x11, #0x0]\n"
"mov v25.16b, v24.16b\n"
+ "add x11, x11, #0x10\n"
"mov v26.16b, v24.16b\n"
- "add x14, x14, #0x10\n"
"mov v27.16b, v24.16b\n"
"mov v28.16b, v24.16b\n"
"mov v29.16b, v24.16b\n"
"b 113f\n"
"108:" // Height 6: no bias
"tbz %x[flags], #0, 112f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x27, x11, x20, LSL #2\n"
- "add x26, x27, x20, LSL #2\n"
- "add x25, x26, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"cmp x13, #0x4\n"
- "add x23, x24, x20, LSL #2\n"
+ "add x26, x10, x19, LSL #2\n"
+ "add x25, x26, x19, LSL #2\n"
+ "add x24, x25, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
"bge 111f\n"
"tbz x13, #1, 109f\n"
- "ldr d24, [x11], #0x8\n"
- "ldr d25, [x27], #0x8\n"
- "mov x20, #0x8\n"
- "ldr d26, [x26], #0x8\n"
- "ldr d27, [x25], #0x8\n"
- "ldr d28, [x24], #0x8\n"
- "ldr d29, [x23], #0x8\n"
+ "ldr d24, [x10], #0x8\n"
+ "ldr d25, [x26], #0x8\n"
+ "mov x19, #0x8\n"
+ "ldr d26, [x25], #0x8\n"
+ "ldr d27, [x24], #0x8\n"
+ "ldr d28, [x23], #0x8\n"
+ "ldr d29, [x22], #0x8\n"
"tbz x13, #0, 110f\n"
- "ld1 { v24.s }[2], [x11]\n"
- "ld1 { v25.s }[2], [x27]\n"
- "ld1 { v26.s }[2], [x26]\n"
- "ld1 { v27.s }[2], [x25]\n"
- "ld1 { v28.s }[2], [x24]\n"
- "ld1 { v29.s }[2], [x23]\n"
+ "ld1 { v24.s }[2], [x10]\n"
+ "ld1 { v25.s }[2], [x26]\n"
+ "ld1 { v26.s }[2], [x25]\n"
+ "ld1 { v27.s }[2], [x24]\n"
+ "ld1 { v28.s }[2], [x23]\n"
+ "ld1 { v29.s }[2], [x22]\n"
"b 110f\n"
"109:" // Height 6: Partial accumulate: partial_1_0
- "ldr s24, [x11, #0x0]\n"
- "ldr s25, [x27, #0x0]\n"
- "mov x20, #0x0\n"
- "ldr s26, [x26, #0x0]\n"
- "ldr s27, [x25, #0x0]\n"
- "ldr s28, [x24, #0x0]\n"
- "ldr s29, [x23, #0x0]\n"
+ "ldr s24, [x10, #0x0]\n"
+ "mov x19, #0x0\n"
+ "ldr s25, [x26, #0x0]\n"
+ "ldr s26, [x25, #0x0]\n"
+ "ldr s27, [x24, #0x0]\n"
+ "ldr s28, [x23, #0x0]\n"
+ "ldr s29, [x22, #0x0]\n"
"110:" // Height 6: Partial accumulate: Done
- "sub x11, x11, x20\n"
+ "sub x10, x10, x19\n"
"b 113f\n"
"111:" // Height 6: full accumulate
- "ldr q24, [x11, #0x0]\n"
- "ldr q25, [x27, #0x0]\n"
- "ldr q26, [x26, #0x0]\n"
- "ldr q27, [x25, #0x0]\n"
- "ldr q28, [x24, #0x0]\n"
- "ldr q29, [x23, #0x0]\n"
+ "ldr q24, [x10, #0x0]\n"
+ "ldr q25, [x26, #0x0]\n"
+ "ldr q26, [x25, #0x0]\n"
+ "ldr q27, [x24, #0x0]\n"
+ "ldr q28, [x23, #0x0]\n"
+ "ldr q29, [x22, #0x0]\n"
"b 113f\n"
"112:" // Height 6: no accumulate
"movi v24.16b, #0x0\n"
@@ -1135,131 +1135,131 @@ void a64_hybrid_fp32_mla_8x4 (
"movi v28.16b, #0x0\n"
"movi v29.16b, #0x0\n"
"113:" // Height 6: setup done
- "mov x10, #0x0\n"
+ "mov x9, #0x0\n"
"114:" // Height 6: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w9, [x20, x10, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w28, [x20, x9, LSL #0x2]\n"
"tbz %x[flags], #3, 115f\n"
- "ldr x21, [%x[input_ptr], x10, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x28, [x21, #0x0]\n"
- "ldr x27, [x21, #0x8]\n"
- "ldr x26, [x21, #0x10]\n"
- "ldr x25, [x21, #0x18]\n"
- "ldr x24, [x21, #0x20]\n"
- "ldr x23, [x21, #0x28]\n"
- "cbnz x10, 116f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x28, x28, x20, LSL #2\n"
- "add x27, x27, x20, LSL #2\n"
- "add x26, x26, x20, LSL #2\n"
- "add x25, x25, x20, LSL #2\n"
- "add x24, x24, x20, LSL #2\n"
- "add x23, x23, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x9, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x27, [x20, #0x0]\n"
+ "ldr x26, [x20, #0x8]\n"
+ "ldr x25, [x20, #0x10]\n"
+ "ldr x24, [x20, #0x18]\n"
+ "ldr x23, [x20, #0x20]\n"
+ "ldr x22, [x20, #0x28]\n"
+ "cbnz x9, 116f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x27, x27, x19, LSL #2\n"
+ "add x26, x26, x19, LSL #2\n"
+ "add x25, x25, x19, LSL #2\n"
+ "add x24, x24, x19, LSL #2\n"
+ "add x23, x23, x19, LSL #2\n"
+ "add x22, x22, x19, LSL #2\n"
"b 116f\n"
"115:" // Height 6: setup direct input
- "mov x28, %x[input_ptr]\n"
- "add x27, x28, x20, LSL #2\n"
- "add x26, x27, x20, LSL #2\n"
- "add x25, x26, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
+ "mov x27, %x[input_ptr]\n"
+ "add x26, x27, x19, LSL #2\n"
+ "add x25, x26, x19, LSL #2\n"
+ "add x24, x25, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
"116:" // Height 6: input setup done
- "cmp x9, #0x4\n"
+ "cmp x28, #0x4\n"
"blt 119f\n"
- "ldr q0, [x28, #0x0]\n"
- "ldr q1, [x27, #0x0]\n"
- "cmp x9, #0x8\n"
- "ldr q2, [x26, #0x0]\n"
- "ldr q3, [x25, #0x0]\n"
- "ldr q4, [x24, #0x0]\n"
- "ldr q5, [x23, #0x0]\n"
+ "ldr q0, [x27, #0x0]\n"
+ "ldr q1, [x26, #0x0]\n"
+ "cmp x28, #0x8\n"
+ "ldr q2, [x25, #0x0]\n"
+ "ldr q3, [x24, #0x0]\n"
+ "ldr q4, [x23, #0x0]\n"
+ "ldr q5, [x22, #0x0]\n"
"ldr q8, [x12, #0x0]\n"
- "ldr q9, [x12, #0x10]\n"
- "ldr q10, [x12, #0x20]\n"
- "ldr q11, [x12, #0x30]\n"
"blt 118f\n"
"117:" // Height 6: Multiply loop: Main loop head
"fmla v24.4s, v8.4s, v0.s[0]\n"
+ "ldr q9, [x12, #0x10]\n"
+ "add x27, x27, #0x10\n"
"fmla v25.4s, v8.4s, v1.s[0]\n"
- "sub x9, x9, #0x4\n"
- "add x28, x28, #0x10\n"
+ "ldr q10, [x12, #0x20]\n"
+ "add x26, x26, #0x10\n"
"fmla v26.4s, v8.4s, v2.s[0]\n"
+ "ldr q11, [x12, #0x30]\n"
+ "add x25, x25, #0x10\n"
"fmla v27.4s, v8.4s, v3.s[0]\n"
- "add x27, x27, #0x10\n"
- "add x26, x26, #0x10\n"
+ "prfm pldl1keep, [x27, #0x80]\n"
+ "add x24, x24, #0x10\n"
"fmla v28.4s, v8.4s, v4.s[0]\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
+ "add x23, x23, #0x10\n"
"fmla v29.4s, v8.4s, v5.s[0]\n"
- "add x25, x25, #0x10\n"
- "add x24, x24, #0x10\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "add x22, x22, #0x10\n"
"fmla v24.4s, v9.4s, v0.s[1]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "sub x28, x28, #0x4\n"
"fmla v25.4s, v9.4s, v1.s[1]\n"
- "add x23, x23, #0x10\n"
- "cmp x9, #0x8\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
+ "cmp x28, #0x8\n"
"fmla v26.4s, v9.4s, v2.s[1]\n"
- "fmla v27.4s, v9.4s, v3.s[1]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
"add x12, x12, #0x40\n"
+ "fmla v27.4s, v9.4s, v3.s[1]\n"
"ldr q8, [x12, #0x0]\n"
"fmla v28.4s, v9.4s, v4.s[1]\n"
"fmla v29.4s, v9.4s, v5.s[1]\n"
- "ldr q9, [x12, #0x10]\n"
- "prfm pldl1keep, [x28, #0x80]\n"
"fmla v24.4s, v10.4s, v0.s[2]\n"
"fmla v25.4s, v10.4s, v1.s[2]\n"
- "prfm pldl1keep, [x27, #0x80]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
"fmla v26.4s, v10.4s, v2.s[2]\n"
"fmla v27.4s, v10.4s, v3.s[2]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
"fmla v28.4s, v10.4s, v4.s[2]\n"
"fmla v29.4s, v10.4s, v5.s[2]\n"
- "ldr q10, [x12, #0x20]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
"fmla v24.4s, v11.4s, v0.s[3]\n"
- "ldr q0, [x28, #0x0]\n"
+ "ldr q0, [x27, #0x0]\n"
"fmla v25.4s, v11.4s, v1.s[3]\n"
- "ldr q1, [x27, #0x0]\n"
+ "ldr q1, [x26, #0x0]\n"
"fmla v26.4s, v11.4s, v2.s[3]\n"
- "ldr q2, [x26, #0x0]\n"
+ "ldr q2, [x25, #0x0]\n"
"fmla v27.4s, v11.4s, v3.s[3]\n"
- "ldr q3, [x25, #0x0]\n"
+ "ldr q3, [x24, #0x0]\n"
"fmla v28.4s, v11.4s, v4.s[3]\n"
- "ldr q4, [x24, #0x0]\n"
+ "ldr q4, [x23, #0x0]\n"
"fmla v29.4s, v11.4s, v5.s[3]\n"
- "ldr q5, [x23, #0x0]\n"
- "ldr q11, [x12, #0x30]\n"
+ "ldr q5, [x22, #0x0]\n"
"bge 117b\n"
"118:" // Height 6: Multiply loop: Single iteration only
"fmla v24.4s, v8.4s, v0.s[0]\n"
+ "ldr q9, [x12, #0x10]\n"
+ "sub x28, x28, #0x4\n"
"fmla v25.4s, v8.4s, v1.s[0]\n"
- "add x28, x28, #0x10\n"
+ "ldr q10, [x12, #0x20]\n"
"add x27, x27, #0x10\n"
"fmla v26.4s, v8.4s, v2.s[0]\n"
- "fmla v27.4s, v8.4s, v3.s[0]\n"
+ "ldr q11, [x12, #0x30]\n"
"add x26, x26, #0x10\n"
+ "fmla v27.4s, v8.4s, v3.s[0]\n"
+ "prfm pldl1keep, [x27, #0x80]\n"
"add x25, x25, #0x10\n"
"fmla v28.4s, v8.4s, v4.s[0]\n"
- "fmla v29.4s, v8.4s, v5.s[0]\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
"add x24, x24, #0x10\n"
+ "fmla v29.4s, v8.4s, v5.s[0]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
"add x23, x23, #0x10\n"
"fmla v24.4s, v9.4s, v0.s[1]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "add x22, x22, #0x10\n"
"fmla v25.4s, v9.4s, v1.s[1]\n"
- "sub x9, x9, #0x4\n"
- "prfm pldl1keep, [x28, #0x80]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
+ "add x12, x12, #0x40\n"
"fmla v26.4s, v9.4s, v2.s[1]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
"fmla v27.4s, v9.4s, v3.s[1]\n"
- "prfm pldl1keep, [x27, #0x80]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
"fmla v28.4s, v9.4s, v4.s[1]\n"
"fmla v29.4s, v9.4s, v5.s[1]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
"fmla v24.4s, v10.4s, v0.s[2]\n"
"fmla v25.4s, v10.4s, v1.s[2]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
- "add x12, x12, #0x40\n"
"fmla v26.4s, v10.4s, v2.s[2]\n"
"fmla v27.4s, v10.4s, v3.s[2]\n"
"fmla v28.4s, v10.4s, v4.s[2]\n"
@@ -1271,108 +1271,108 @@ void a64_hybrid_fp32_mla_8x4 (
"fmla v28.4s, v11.4s, v4.s[3]\n"
"fmla v29.4s, v11.4s, v5.s[3]\n"
"119:" // Height 6: Multiply loop: Main loop skip
- "cbz x9, 121f\n"
+ "cbz x28, 121f\n"
"120:" // Height 6: Multiply loop: Odd block loop
- "ldr s0, [x28], #0x4\n"
- "ldr s1, [x27], #0x4\n"
- "sub x9, x9, #0x1\n"
- "ldr s2, [x26], #0x4\n"
- "ldr s3, [x25], #0x4\n"
- "ldr s4, [x24], #0x4\n"
- "ldr s5, [x23], #0x4\n"
+ "ldr s0, [x27], #0x4\n"
+ "sub x28, x28, #0x1\n"
+ "ldr s1, [x26], #0x4\n"
+ "ldr s2, [x25], #0x4\n"
+ "ldr s3, [x24], #0x4\n"
+ "ldr s4, [x23], #0x4\n"
+ "ldr s5, [x22], #0x4\n"
"ldr q12, [x12, #0x0]\n"
"fmla v24.4s, v12.4s, v0.s[0]\n"
- "fmla v25.4s, v12.4s, v1.s[0]\n"
"add x12, x12, #0x10\n"
+ "fmla v25.4s, v12.4s, v1.s[0]\n"
"fmla v26.4s, v12.4s, v2.s[0]\n"
"fmla v27.4s, v12.4s, v3.s[0]\n"
"fmla v28.4s, v12.4s, v4.s[0]\n"
"fmla v29.4s, v12.4s, v5.s[0]\n"
- "cbnz x9, 120b\n"
+ "cbnz x28, 120b\n"
"121:" // Height 6: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x10, x10, #0x1\n"
- "cmp x10, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x9, x9, #0x1\n"
+ "cmp x9, x19\n"
"bne 114b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x27, x11, x20, LSL #2\n"
- "add x26, x27, x20, LSL #2\n"
- "prfm pstl1keep, [x11, #0x0]\n"
- "add x25, x26, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "prfm pstl1keep, [x27, #0x0]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "prfm pstl1keep, [x10, #0x0]\n"
+ "add x26, x10, x19, LSL #2\n"
"prfm pstl1keep, [x26, #0x0]\n"
- "add x23, x24, x20, LSL #2\n"
+ "add x25, x26, x19, LSL #2\n"
"prfm pstl1keep, [x25, #0x0]\n"
+ "add x24, x25, x19, LSL #2\n"
"prfm pstl1keep, [x24, #0x0]\n"
+ "add x23, x24, x19, LSL #2\n"
"prfm pstl1keep, [x23, #0x0]\n"
+ "add x22, x23, x19, LSL #2\n"
+ "prfm pstl1keep, [x22, #0x0]\n"
"tbz %x[flags], #1, 122f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v17.4s }, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1r { v16.4s }, [x20]\n"
- "fmin v24.4s, v24.4s, v17.4s\n"
- "fmin v25.4s, v25.4s, v17.4s\n"
- "fmin v26.4s, v26.4s, v17.4s\n"
- "fmin v27.4s, v27.4s, v17.4s\n"
- "fmin v28.4s, v28.4s, v17.4s\n"
- "fmin v29.4s, v29.4s, v17.4s\n"
- "fmax v24.4s, v24.4s, v16.4s\n"
- "fmax v25.4s, v25.4s, v16.4s\n"
- "fmax v26.4s, v26.4s, v16.4s\n"
- "fmax v27.4s, v27.4s, v16.4s\n"
- "fmax v28.4s, v28.4s, v16.4s\n"
- "fmax v29.4s, v29.4s, v16.4s\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v17.4s }, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v16.4s }, [x19]\n"
+ "fmin v24.4s, v24.4s, v16.4s\n"
+ "fmin v25.4s, v25.4s, v16.4s\n"
+ "fmin v26.4s, v26.4s, v16.4s\n"
+ "fmin v27.4s, v27.4s, v16.4s\n"
+ "fmax v24.4s, v24.4s, v17.4s\n"
+ "fmax v25.4s, v25.4s, v17.4s\n"
+ "fmax v26.4s, v26.4s, v17.4s\n"
+ "fmax v27.4s, v27.4s, v17.4s\n"
+ "fmin v28.4s, v28.4s, v16.4s\n"
+ "fmin v29.4s, v29.4s, v16.4s\n"
+ "fmax v28.4s, v28.4s, v17.4s\n"
+ "fmax v29.4s, v29.4s, v17.4s\n"
"122:" // Height 6: No activation
"cmp x13, #0x4\n"
"bge 125f\n"
"tbz x13, #1, 123f\n"
- "str d24, [x11], #0x8\n"
- "str d25, [x27], #0x8\n"
- "str d26, [x26], #0x8\n"
- "str d27, [x25], #0x8\n"
- "str d28, [x24], #0x8\n"
- "str d29, [x23], #0x8\n"
+ "str d24, [x10], #0x8\n"
+ "str d25, [x26], #0x8\n"
+ "str d26, [x25], #0x8\n"
+ "str d27, [x24], #0x8\n"
+ "str d28, [x23], #0x8\n"
+ "str d29, [x22], #0x8\n"
"tbz x13, #0, 124f\n"
- "st1 { v24.s }[2], [x11]\n"
- "st1 { v25.s }[2], [x27]\n"
- "st1 { v26.s }[2], [x26]\n"
- "st1 { v27.s }[2], [x25]\n"
- "st1 { v28.s }[2], [x24]\n"
- "st1 { v29.s }[2], [x23]\n"
+ "st1 { v24.s }[2], [x10]\n"
+ "st1 { v25.s }[2], [x26]\n"
+ "st1 { v26.s }[2], [x25]\n"
+ "st1 { v27.s }[2], [x24]\n"
+ "st1 { v28.s }[2], [x23]\n"
+ "st1 { v29.s }[2], [x22]\n"
"b 124f\n"
"123:" // Height 6: Partial direct writeback: partial_1_0
- "str s24, [x11, #0x0]\n"
- "str s25, [x27, #0x0]\n"
- "str s26, [x26, #0x0]\n"
- "str s27, [x25, #0x0]\n"
- "str s28, [x24, #0x0]\n"
- "str s29, [x23, #0x0]\n"
+ "str s24, [x10, #0x0]\n"
+ "str s25, [x26, #0x0]\n"
+ "str s26, [x25, #0x0]\n"
+ "str s27, [x24, #0x0]\n"
+ "str s28, [x23, #0x0]\n"
+ "str s29, [x22, #0x0]\n"
"124:" // Height 6: Partial direct writeback: Done
"b 126f\n"
"125:" // Height 6: Full writeback
- "str q24, [x11, #0x0]\n"
- "add x11, x11, #0x10\n"
- "str q25, [x27, #0x0]\n"
- "str q26, [x26, #0x0]\n"
- "str q27, [x25, #0x0]\n"
- "str q28, [x24, #0x0]\n"
- "str q29, [x23, #0x0]\n"
+ "str q24, [x10, #0x0]\n"
+ "add x10, x10, #0x10\n"
+ "str q25, [x26, #0x0]\n"
+ "str q26, [x25, #0x0]\n"
+ "str q27, [x24, #0x0]\n"
+ "str q28, [x23, #0x0]\n"
+ "str q29, [x22, #0x0]\n"
"126:" // Height 6: Writeback done
"subs x13, x13, #0x4\n"
"bgt 107b\n"
"b 170f\n"
"127:" // Height 7
- "mov x14, %x[bias]\n"
"ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x11, %x[bias]\n"
"ldr x12, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x11, %x[output_ptr]\n"
+ "mov x10, %x[output_ptr]\n"
"128:" // Height 7: Column loop
- "cbz x14, 129f\n"
- "ldr q24, [x14, #0x0]\n"
+ "cbz x11, 129f\n"
+ "ldr q24, [x11, #0x0]\n"
"mov v25.16b, v24.16b\n"
+ "add x11, x11, #0x10\n"
"mov v26.16b, v24.16b\n"
- "add x14, x14, #0x10\n"
"mov v27.16b, v24.16b\n"
"mov v28.16b, v24.16b\n"
"mov v29.16b, v24.16b\n"
@@ -1380,53 +1380,53 @@ void a64_hybrid_fp32_mla_8x4 (
"b 134f\n"
"129:" // Height 7: no bias
"tbz %x[flags], #0, 133f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x27, x11, x20, LSL #2\n"
- "add x26, x27, x20, LSL #2\n"
- "add x25, x26, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"cmp x13, #0x4\n"
- "add x22, x23, x20, LSL #2\n"
+ "add x26, x10, x19, LSL #2\n"
+ "add x25, x26, x19, LSL #2\n"
+ "add x24, x25, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
"bge 132f\n"
"tbz x13, #1, 130f\n"
- "ldr d24, [x11], #0x8\n"
- "ldr d25, [x27], #0x8\n"
- "mov x20, #0x8\n"
- "ldr d26, [x26], #0x8\n"
- "ldr d27, [x25], #0x8\n"
- "ldr d28, [x24], #0x8\n"
- "ldr d29, [x23], #0x8\n"
- "ldr d30, [x22], #0x8\n"
+ "ldr d24, [x10], #0x8\n"
+ "ldr d25, [x26], #0x8\n"
+ "mov x19, #0x8\n"
+ "ldr d26, [x25], #0x8\n"
+ "ldr d27, [x24], #0x8\n"
+ "ldr d28, [x23], #0x8\n"
+ "ldr d29, [x22], #0x8\n"
+ "ldr d30, [x21], #0x8\n"
"tbz x13, #0, 131f\n"
- "ld1 { v24.s }[2], [x11]\n"
- "ld1 { v25.s }[2], [x27]\n"
- "ld1 { v26.s }[2], [x26]\n"
- "ld1 { v27.s }[2], [x25]\n"
- "ld1 { v28.s }[2], [x24]\n"
- "ld1 { v29.s }[2], [x23]\n"
- "ld1 { v30.s }[2], [x22]\n"
+ "ld1 { v24.s }[2], [x10]\n"
+ "ld1 { v25.s }[2], [x26]\n"
+ "ld1 { v26.s }[2], [x25]\n"
+ "ld1 { v27.s }[2], [x24]\n"
+ "ld1 { v28.s }[2], [x23]\n"
+ "ld1 { v29.s }[2], [x22]\n"
+ "ld1 { v30.s }[2], [x21]\n"
"b 131f\n"
"130:" // Height 7: Partial accumulate: partial_1_0
- "ldr s24, [x11, #0x0]\n"
- "ldr s25, [x27, #0x0]\n"
- "mov x20, #0x0\n"
- "ldr s26, [x26, #0x0]\n"
- "ldr s27, [x25, #0x0]\n"
- "ldr s28, [x24, #0x0]\n"
- "ldr s29, [x23, #0x0]\n"
- "ldr s30, [x22, #0x0]\n"
+ "ldr s24, [x10, #0x0]\n"
+ "mov x19, #0x0\n"
+ "ldr s25, [x26, #0x0]\n"
+ "ldr s26, [x25, #0x0]\n"
+ "ldr s27, [x24, #0x0]\n"
+ "ldr s28, [x23, #0x0]\n"
+ "ldr s29, [x22, #0x0]\n"
+ "ldr s30, [x21, #0x0]\n"
"131:" // Height 7: Partial accumulate: Done
- "sub x11, x11, x20\n"
+ "sub x10, x10, x19\n"
"b 134f\n"
"132:" // Height 7: full accumulate
- "ldr q24, [x11, #0x0]\n"
- "ldr q25, [x27, #0x0]\n"
- "ldr q26, [x26, #0x0]\n"
- "ldr q27, [x25, #0x0]\n"
- "ldr q28, [x24, #0x0]\n"
- "ldr q29, [x23, #0x0]\n"
- "ldr q30, [x22, #0x0]\n"
+ "ldr q24, [x10, #0x0]\n"
+ "ldr q25, [x26, #0x0]\n"
+ "ldr q26, [x25, #0x0]\n"
+ "ldr q27, [x24, #0x0]\n"
+ "ldr q28, [x23, #0x0]\n"
+ "ldr q29, [x22, #0x0]\n"
+ "ldr q30, [x21, #0x0]\n"
"b 134f\n"
"133:" // Height 7: no accumulate
"movi v24.16b, #0x0\n"
@@ -1437,146 +1437,146 @@ void a64_hybrid_fp32_mla_8x4 (
"movi v29.16b, #0x0\n"
"movi v30.16b, #0x0\n"
"134:" // Height 7: setup done
- "mov x10, #0x0\n"
+ "mov x9, #0x0\n"
"135:" // Height 7: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w9, [x20, x10, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w28, [x20, x9, LSL #0x2]\n"
"tbz %x[flags], #3, 136f\n"
- "ldr x21, [%x[input_ptr], x10, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x28, [x21, #0x0]\n"
- "ldr x27, [x21, #0x8]\n"
- "ldr x26, [x21, #0x10]\n"
- "ldr x25, [x21, #0x18]\n"
- "ldr x24, [x21, #0x20]\n"
- "ldr x23, [x21, #0x28]\n"
- "ldr x22, [x21, #0x30]\n"
- "cbnz x10, 137f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x28, x28, x20, LSL #2\n"
- "add x27, x27, x20, LSL #2\n"
- "add x26, x26, x20, LSL #2\n"
- "add x25, x25, x20, LSL #2\n"
- "add x24, x24, x20, LSL #2\n"
- "add x23, x23, x20, LSL #2\n"
- "add x22, x22, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x9, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x27, [x20, #0x0]\n"
+ "ldr x26, [x20, #0x8]\n"
+ "ldr x25, [x20, #0x10]\n"
+ "ldr x24, [x20, #0x18]\n"
+ "ldr x23, [x20, #0x20]\n"
+ "ldr x22, [x20, #0x28]\n"
+ "ldr x21, [x20, #0x30]\n"
+ "cbnz x9, 137f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x27, x27, x19, LSL #2\n"
+ "add x26, x26, x19, LSL #2\n"
+ "add x25, x25, x19, LSL #2\n"
+ "add x24, x24, x19, LSL #2\n"
+ "add x23, x23, x19, LSL #2\n"
+ "add x22, x22, x19, LSL #2\n"
+ "add x21, x21, x19, LSL #2\n"
"b 137f\n"
"136:" // Height 7: setup direct input
- "mov x28, %x[input_ptr]\n"
- "add x27, x28, x20, LSL #2\n"
- "add x26, x27, x20, LSL #2\n"
- "add x25, x26, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
+ "mov x27, %x[input_ptr]\n"
+ "add x26, x27, x19, LSL #2\n"
+ "add x25, x26, x19, LSL #2\n"
+ "add x24, x25, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
"137:" // Height 7: input setup done
- "cmp x9, #0x4\n"
+ "cmp x28, #0x4\n"
"blt 140f\n"
- "ldr q0, [x28, #0x0]\n"
- "ldr q1, [x27, #0x0]\n"
- "cmp x9, #0x8\n"
- "ldr q2, [x26, #0x0]\n"
- "ldr q3, [x25, #0x0]\n"
- "ldr q4, [x24, #0x0]\n"
- "ldr q5, [x23, #0x0]\n"
- "ldr q6, [x22, #0x0]\n"
+ "ldr q0, [x27, #0x0]\n"
+ "ldr q1, [x26, #0x0]\n"
+ "cmp x28, #0x8\n"
+ "ldr q2, [x25, #0x0]\n"
+ "ldr q3, [x24, #0x0]\n"
+ "ldr q4, [x23, #0x0]\n"
+ "ldr q5, [x22, #0x0]\n"
+ "ldr q6, [x21, #0x0]\n"
"ldr q8, [x12, #0x0]\n"
- "ldr q9, [x12, #0x10]\n"
- "ldr q10, [x12, #0x20]\n"
- "ldr q11, [x12, #0x30]\n"
"blt 139f\n"
"138:" // Height 7: Multiply loop: Main loop head
"fmla v24.4s, v8.4s, v0.s[0]\n"
+ "ldr q9, [x12, #0x10]\n"
+ "add x27, x27, #0x10\n"
"fmla v25.4s, v8.4s, v1.s[0]\n"
- "sub x9, x9, #0x4\n"
- "add x28, x28, #0x10\n"
+ "ldr q10, [x12, #0x20]\n"
+ "add x26, x26, #0x10\n"
"fmla v26.4s, v8.4s, v2.s[0]\n"
+ "ldr q11, [x12, #0x30]\n"
+ "add x25, x25, #0x10\n"
"fmla v27.4s, v8.4s, v3.s[0]\n"
- "add x27, x27, #0x10\n"
- "add x26, x26, #0x10\n"
+ "prfm pldl1keep, [x27, #0x80]\n"
+ "add x24, x24, #0x10\n"
"fmla v28.4s, v8.4s, v4.s[0]\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
+ "add x23, x23, #0x10\n"
"fmla v29.4s, v8.4s, v5.s[0]\n"
- "add x25, x25, #0x10\n"
- "add x24, x24, #0x10\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "add x22, x22, #0x10\n"
"fmla v30.4s, v8.4s, v6.s[0]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "add x21, x21, #0x10\n"
"fmla v24.4s, v9.4s, v0.s[1]\n"
- "add x23, x23, #0x10\n"
- "add x22, x22, #0x10\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
+ "sub x28, x28, #0x4\n"
"fmla v25.4s, v9.4s, v1.s[1]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
+ "cmp x28, #0x8\n"
"fmla v26.4s, v9.4s, v2.s[1]\n"
- "cmp x9, #0x8\n"
+ "prfm pldl1keep, [x21, #0x80]\n"
"add x12, x12, #0x40\n"
- "ldr q8, [x12, #0x0]\n"
"fmla v27.4s, v9.4s, v3.s[1]\n"
+ "ldr q8, [x12, #0x0]\n"
"fmla v28.4s, v9.4s, v4.s[1]\n"
- "prfm pldl1keep, [x28, #0x80]\n"
"fmla v29.4s, v9.4s, v5.s[1]\n"
"fmla v30.4s, v9.4s, v6.s[1]\n"
- "ldr q9, [x12, #0x10]\n"
- "prfm pldl1keep, [x27, #0x80]\n"
"fmla v24.4s, v10.4s, v0.s[2]\n"
"fmla v25.4s, v10.4s, v1.s[2]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
"fmla v26.4s, v10.4s, v2.s[2]\n"
"fmla v27.4s, v10.4s, v3.s[2]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
"fmla v28.4s, v10.4s, v4.s[2]\n"
"fmla v29.4s, v10.4s, v5.s[2]\n"
- "prfm pldl1keep, [x22, #0x80]\n"
"fmla v30.4s, v10.4s, v6.s[2]\n"
- "ldr q10, [x12, #0x20]\n"
"fmla v24.4s, v11.4s, v0.s[3]\n"
- "ldr q0, [x28, #0x0]\n"
+ "ldr q0, [x27, #0x0]\n"
"fmla v25.4s, v11.4s, v1.s[3]\n"
- "ldr q1, [x27, #0x0]\n"
+ "ldr q1, [x26, #0x0]\n"
"fmla v26.4s, v11.4s, v2.s[3]\n"
- "ldr q2, [x26, #0x0]\n"
+ "ldr q2, [x25, #0x0]\n"
"fmla v27.4s, v11.4s, v3.s[3]\n"
- "ldr q3, [x25, #0x0]\n"
+ "ldr q3, [x24, #0x0]\n"
"fmla v28.4s, v11.4s, v4.s[3]\n"
- "ldr q4, [x24, #0x0]\n"
+ "ldr q4, [x23, #0x0]\n"
"fmla v29.4s, v11.4s, v5.s[3]\n"
- "ldr q5, [x23, #0x0]\n"
+ "ldr q5, [x22, #0x0]\n"
"fmla v30.4s, v11.4s, v6.s[3]\n"
- "ldr q6, [x22, #0x0]\n"
- "ldr q11, [x12, #0x30]\n"
+ "ldr q6, [x21, #0x0]\n"
"bge 138b\n"
"139:" // Height 7: Multiply loop: Single iteration only
"fmla v24.4s, v8.4s, v0.s[0]\n"
+ "ldr q9, [x12, #0x10]\n"
+ "sub x28, x28, #0x4\n"
"fmla v25.4s, v8.4s, v1.s[0]\n"
- "add x28, x28, #0x10\n"
+ "ldr q10, [x12, #0x20]\n"
"add x27, x27, #0x10\n"
"fmla v26.4s, v8.4s, v2.s[0]\n"
- "fmla v27.4s, v8.4s, v3.s[0]\n"
+ "ldr q11, [x12, #0x30]\n"
"add x26, x26, #0x10\n"
+ "fmla v27.4s, v8.4s, v3.s[0]\n"
+ "prfm pldl1keep, [x27, #0x80]\n"
"add x25, x25, #0x10\n"
"fmla v28.4s, v8.4s, v4.s[0]\n"
- "fmla v29.4s, v8.4s, v5.s[0]\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
"add x24, x24, #0x10\n"
+ "fmla v29.4s, v8.4s, v5.s[0]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
"add x23, x23, #0x10\n"
"fmla v30.4s, v8.4s, v6.s[0]\n"
- "fmla v24.4s, v9.4s, v0.s[1]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
"add x22, x22, #0x10\n"
- "sub x9, x9, #0x4\n"
+ "fmla v24.4s, v9.4s, v0.s[1]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
+ "add x21, x21, #0x10\n"
"fmla v25.4s, v9.4s, v1.s[1]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
+ "add x12, x12, #0x40\n"
"fmla v26.4s, v9.4s, v2.s[1]\n"
- "prfm pldl1keep, [x28, #0x80]\n"
- "prfm pldl1keep, [x27, #0x80]\n"
+ "prfm pldl1keep, [x21, #0x80]\n"
"fmla v27.4s, v9.4s, v3.s[1]\n"
"fmla v28.4s, v9.4s, v4.s[1]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
"fmla v29.4s, v9.4s, v5.s[1]\n"
"fmla v30.4s, v9.4s, v6.s[1]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
"fmla v24.4s, v10.4s, v0.s[2]\n"
"fmla v25.4s, v10.4s, v1.s[2]\n"
- "prfm pldl1keep, [x22, #0x80]\n"
- "add x12, x12, #0x40\n"
"fmla v26.4s, v10.4s, v2.s[2]\n"
"fmla v27.4s, v10.4s, v3.s[2]\n"
"fmla v28.4s, v10.4s, v4.s[2]\n"
@@ -1590,121 +1590,121 @@ void a64_hybrid_fp32_mla_8x4 (
"fmla v29.4s, v11.4s, v5.s[3]\n"
"fmla v30.4s, v11.4s, v6.s[3]\n"
"140:" // Height 7: Multiply loop: Main loop skip
- "cbz x9, 142f\n"
+ "cbz x28, 142f\n"
"141:" // Height 7: Multiply loop: Odd block loop
- "ldr s0, [x28], #0x4\n"
- "ldr s1, [x27], #0x4\n"
- "sub x9, x9, #0x1\n"
- "ldr s2, [x26], #0x4\n"
- "ldr s3, [x25], #0x4\n"
- "ldr s4, [x24], #0x4\n"
- "ldr s5, [x23], #0x4\n"
- "ldr s6, [x22], #0x4\n"
+ "ldr s0, [x27], #0x4\n"
+ "sub x28, x28, #0x1\n"
+ "ldr s1, [x26], #0x4\n"
+ "ldr s2, [x25], #0x4\n"
+ "ldr s3, [x24], #0x4\n"
+ "ldr s4, [x23], #0x4\n"
+ "ldr s5, [x22], #0x4\n"
+ "ldr s6, [x21], #0x4\n"
"ldr q12, [x12, #0x0]\n"
"fmla v24.4s, v12.4s, v0.s[0]\n"
+ "add x12, x12, #0x10\n"
"fmla v25.4s, v12.4s, v1.s[0]\n"
"fmla v26.4s, v12.4s, v2.s[0]\n"
"fmla v27.4s, v12.4s, v3.s[0]\n"
- "add x12, x12, #0x10\n"
"fmla v28.4s, v12.4s, v4.s[0]\n"
"fmla v29.4s, v12.4s, v5.s[0]\n"
"fmla v30.4s, v12.4s, v6.s[0]\n"
- "cbnz x9, 141b\n"
+ "cbnz x28, 141b\n"
"142:" // Height 7: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x10, x10, #0x1\n"
- "cmp x10, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x9, x9, #0x1\n"
+ "cmp x9, x19\n"
"bne 135b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x27, x11, x20, LSL #2\n"
- "add x26, x27, x20, LSL #2\n"
- "prfm pstl1keep, [x11, #0x0]\n"
- "add x25, x26, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "prfm pstl1keep, [x27, #0x0]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "prfm pstl1keep, [x10, #0x0]\n"
+ "add x26, x10, x19, LSL #2\n"
"prfm pstl1keep, [x26, #0x0]\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
+ "add x25, x26, x19, LSL #2\n"
"prfm pstl1keep, [x25, #0x0]\n"
+ "add x24, x25, x19, LSL #2\n"
"prfm pstl1keep, [x24, #0x0]\n"
+ "add x23, x24, x19, LSL #2\n"
"prfm pstl1keep, [x23, #0x0]\n"
+ "add x22, x23, x19, LSL #2\n"
"prfm pstl1keep, [x22, #0x0]\n"
+ "add x21, x22, x19, LSL #2\n"
+ "prfm pstl1keep, [x21, #0x0]\n"
"tbz %x[flags], #1, 143f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v17.4s }, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1r { v16.4s }, [x20]\n"
- "fmin v24.4s, v24.4s, v17.4s\n"
- "fmin v25.4s, v25.4s, v17.4s\n"
- "fmin v26.4s, v26.4s, v17.4s\n"
- "fmin v27.4s, v27.4s, v17.4s\n"
- "fmin v28.4s, v28.4s, v17.4s\n"
- "fmin v29.4s, v29.4s, v17.4s\n"
- "fmin v30.4s, v30.4s, v17.4s\n"
- "fmax v24.4s, v24.4s, v16.4s\n"
- "fmax v25.4s, v25.4s, v16.4s\n"
- "fmax v26.4s, v26.4s, v16.4s\n"
- "fmax v27.4s, v27.4s, v16.4s\n"
- "fmax v28.4s, v28.4s, v16.4s\n"
- "fmax v29.4s, v29.4s, v16.4s\n"
- "fmax v30.4s, v30.4s, v16.4s\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v17.4s }, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v16.4s }, [x19]\n"
+ "fmin v24.4s, v24.4s, v16.4s\n"
+ "fmin v25.4s, v25.4s, v16.4s\n"
+ "fmin v26.4s, v26.4s, v16.4s\n"
+ "fmin v27.4s, v27.4s, v16.4s\n"
+ "fmax v24.4s, v24.4s, v17.4s\n"
+ "fmax v25.4s, v25.4s, v17.4s\n"
+ "fmax v26.4s, v26.4s, v17.4s\n"
+ "fmax v27.4s, v27.4s, v17.4s\n"
+ "fmin v28.4s, v28.4s, v16.4s\n"
+ "fmin v29.4s, v29.4s, v16.4s\n"
+ "fmin v30.4s, v30.4s, v16.4s\n"
+ "fmax v28.4s, v28.4s, v17.4s\n"
+ "fmax v29.4s, v29.4s, v17.4s\n"
+ "fmax v30.4s, v30.4s, v17.4s\n"
"143:" // Height 7: No activation
"cmp x13, #0x4\n"
"bge 146f\n"
"tbz x13, #1, 144f\n"
- "str d24, [x11], #0x8\n"
- "str d25, [x27], #0x8\n"
- "str d26, [x26], #0x8\n"
- "str d27, [x25], #0x8\n"
- "str d28, [x24], #0x8\n"
- "str d29, [x23], #0x8\n"
- "str d30, [x22], #0x8\n"
+ "str d24, [x10], #0x8\n"
+ "str d25, [x26], #0x8\n"
+ "str d26, [x25], #0x8\n"
+ "str d27, [x24], #0x8\n"
+ "str d28, [x23], #0x8\n"
+ "str d29, [x22], #0x8\n"
+ "str d30, [x21], #0x8\n"
"tbz x13, #0, 145f\n"
- "st1 { v24.s }[2], [x11]\n"
- "st1 { v25.s }[2], [x27]\n"
- "st1 { v26.s }[2], [x26]\n"
- "st1 { v27.s }[2], [x25]\n"
- "st1 { v28.s }[2], [x24]\n"
- "st1 { v29.s }[2], [x23]\n"
- "st1 { v30.s }[2], [x22]\n"
+ "st1 { v24.s }[2], [x10]\n"
+ "st1 { v25.s }[2], [x26]\n"
+ "st1 { v26.s }[2], [x25]\n"
+ "st1 { v27.s }[2], [x24]\n"
+ "st1 { v28.s }[2], [x23]\n"
+ "st1 { v29.s }[2], [x22]\n"
+ "st1 { v30.s }[2], [x21]\n"
"b 145f\n"
"144:" // Height 7: Partial direct writeback: partial_1_0
- "str s24, [x11, #0x0]\n"
- "str s25, [x27, #0x0]\n"
- "str s26, [x26, #0x0]\n"
- "str s27, [x25, #0x0]\n"
- "str s28, [x24, #0x0]\n"
- "str s29, [x23, #0x0]\n"
- "str s30, [x22, #0x0]\n"
+ "str s24, [x10, #0x0]\n"
+ "str s25, [x26, #0x0]\n"
+ "str s26, [x25, #0x0]\n"
+ "str s27, [x24, #0x0]\n"
+ "str s28, [x23, #0x0]\n"
+ "str s29, [x22, #0x0]\n"
+ "str s30, [x21, #0x0]\n"
"145:" // Height 7: Partial direct writeback: Done
"b 147f\n"
"146:" // Height 7: Full writeback
- "str q24, [x11, #0x0]\n"
- "add x11, x11, #0x10\n"
- "str q25, [x27, #0x0]\n"
- "str q26, [x26, #0x0]\n"
- "str q27, [x25, #0x0]\n"
- "str q28, [x24, #0x0]\n"
- "str q29, [x23, #0x0]\n"
- "str q30, [x22, #0x0]\n"
+ "str q24, [x10, #0x0]\n"
+ "add x10, x10, #0x10\n"
+ "str q25, [x26, #0x0]\n"
+ "str q26, [x25, #0x0]\n"
+ "str q27, [x24, #0x0]\n"
+ "str q28, [x23, #0x0]\n"
+ "str q29, [x22, #0x0]\n"
+ "str q30, [x21, #0x0]\n"
"147:" // Height 7: Writeback done
"subs x13, x13, #0x4\n"
"bgt 128b\n"
"b 170f\n"
"148:" // Height 8
- "ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "mov x20, #0x20\n"
- "mov x14, %x[bias]\n"
"ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x11, %x[bias]\n"
"ldr x12, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x11, %x[output_ptr]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
+ "mov x10, %x[output_ptr]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "mov x19, #0x20\n"
+ "madd %x[output_ptr], x20, x19, %x[output_ptr]\n"
"149:" // Height 8: Column loop
- "cbz x14, 150f\n"
- "ldr q24, [x14, #0x0]\n"
+ "cbz x11, 150f\n"
+ "ldr q24, [x11, #0x0]\n"
"mov v25.16b, v24.16b\n"
+ "add x11, x11, #0x10\n"
"mov v26.16b, v24.16b\n"
- "add x14, x14, #0x10\n"
"mov v27.16b, v24.16b\n"
"mov v28.16b, v24.16b\n"
"mov v29.16b, v24.16b\n"
@@ -1713,58 +1713,58 @@ void a64_hybrid_fp32_mla_8x4 (
"b 155f\n"
"150:" // Height 8: no bias
"tbz %x[flags], #0, 154f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x27, x11, x20, LSL #2\n"
- "add x26, x27, x20, LSL #2\n"
- "add x25, x26, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"cmp x13, #0x4\n"
- "add x21, x22, x20, LSL #2\n"
+ "add x26, x10, x19, LSL #2\n"
+ "add x25, x26, x19, LSL #2\n"
+ "add x24, x25, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
+ "add x20, x21, x19, LSL #2\n"
"bge 153f\n"
"tbz x13, #1, 151f\n"
- "ldr d24, [x11], #0x8\n"
- "ldr d25, [x27], #0x8\n"
- "mov x20, #0x8\n"
- "ldr d26, [x26], #0x8\n"
- "ldr d27, [x25], #0x8\n"
- "ldr d28, [x24], #0x8\n"
- "ldr d29, [x23], #0x8\n"
- "ldr d30, [x22], #0x8\n"
- "ldr d31, [x21], #0x8\n"
+ "ldr d24, [x10], #0x8\n"
+ "ldr d25, [x26], #0x8\n"
+ "mov x19, #0x8\n"
+ "ldr d26, [x25], #0x8\n"
+ "ldr d27, [x24], #0x8\n"
+ "ldr d28, [x23], #0x8\n"
+ "ldr d29, [x22], #0x8\n"
+ "ldr d30, [x21], #0x8\n"
+ "ldr d31, [x20], #0x8\n"
"tbz x13, #0, 152f\n"
- "ld1 { v24.s }[2], [x11]\n"
- "ld1 { v25.s }[2], [x27]\n"
- "ld1 { v26.s }[2], [x26]\n"
- "ld1 { v27.s }[2], [x25]\n"
- "ld1 { v28.s }[2], [x24]\n"
- "ld1 { v29.s }[2], [x23]\n"
- "ld1 { v30.s }[2], [x22]\n"
- "ld1 { v31.s }[2], [x21]\n"
+ "ld1 { v24.s }[2], [x10]\n"
+ "ld1 { v25.s }[2], [x26]\n"
+ "ld1 { v26.s }[2], [x25]\n"
+ "ld1 { v27.s }[2], [x24]\n"
+ "ld1 { v28.s }[2], [x23]\n"
+ "ld1 { v29.s }[2], [x22]\n"
+ "ld1 { v30.s }[2], [x21]\n"
+ "ld1 { v31.s }[2], [x20]\n"
"b 152f\n"
"151:" // Height 8: Partial accumulate: partial_1_0
- "ldr s24, [x11, #0x0]\n"
- "ldr s25, [x27, #0x0]\n"
- "mov x20, #0x0\n"
- "ldr s26, [x26, #0x0]\n"
- "ldr s27, [x25, #0x0]\n"
- "ldr s28, [x24, #0x0]\n"
- "ldr s29, [x23, #0x0]\n"
- "ldr s30, [x22, #0x0]\n"
- "ldr s31, [x21, #0x0]\n"
+ "ldr s24, [x10, #0x0]\n"
+ "mov x19, #0x0\n"
+ "ldr s25, [x26, #0x0]\n"
+ "ldr s26, [x25, #0x0]\n"
+ "ldr s27, [x24, #0x0]\n"
+ "ldr s28, [x23, #0x0]\n"
+ "ldr s29, [x22, #0x0]\n"
+ "ldr s30, [x21, #0x0]\n"
+ "ldr s31, [x20, #0x0]\n"
"152:" // Height 8: Partial accumulate: Done
- "sub x11, x11, x20\n"
+ "sub x10, x10, x19\n"
"b 155f\n"
"153:" // Height 8: full accumulate
- "ldr q24, [x11, #0x0]\n"
- "ldr q25, [x27, #0x0]\n"
- "ldr q26, [x26, #0x0]\n"
- "ldr q27, [x25, #0x0]\n"
- "ldr q28, [x24, #0x0]\n"
- "ldr q29, [x23, #0x0]\n"
- "ldr q30, [x22, #0x0]\n"
- "ldr q31, [x21, #0x0]\n"
+ "ldr q24, [x10, #0x0]\n"
+ "ldr q25, [x26, #0x0]\n"
+ "ldr q26, [x25, #0x0]\n"
+ "ldr q27, [x24, #0x0]\n"
+ "ldr q28, [x23, #0x0]\n"
+ "ldr q29, [x22, #0x0]\n"
+ "ldr q30, [x21, #0x0]\n"
+ "ldr q31, [x20, #0x0]\n"
"b 155f\n"
"154:" // Height 8: no accumulate
"movi v24.16b, #0x0\n"
@@ -1776,161 +1776,161 @@ void a64_hybrid_fp32_mla_8x4 (
"movi v30.16b, #0x0\n"
"movi v31.16b, #0x0\n"
"155:" // Height 8: setup done
- "mov x10, #0x0\n"
+ "mov x9, #0x0\n"
"156:" // Height 8: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w9, [x20, x10, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w28, [x20, x9, LSL #0x2]\n"
"tbz %x[flags], #3, 157f\n"
- "ldr x21, [%x[input_ptr], x10, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x28, [x21, #0x0]\n"
- "ldr x27, [x21, #0x8]\n"
- "ldr x26, [x21, #0x10]\n"
- "ldr x25, [x21, #0x18]\n"
- "ldr x24, [x21, #0x20]\n"
- "ldr x23, [x21, #0x28]\n"
- "ldr x22, [x21, #0x30]\n"
- "ldr x21, [x21, #0x38]\n"
- "cbnz x10, 158f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x28, x28, x20, LSL #2\n"
- "add x27, x27, x20, LSL #2\n"
- "add x26, x26, x20, LSL #2\n"
- "add x25, x25, x20, LSL #2\n"
- "add x24, x24, x20, LSL #2\n"
- "add x23, x23, x20, LSL #2\n"
- "add x22, x22, x20, LSL #2\n"
- "add x21, x21, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x9, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x27, [x20, #0x0]\n"
+ "ldr x26, [x20, #0x8]\n"
+ "ldr x25, [x20, #0x10]\n"
+ "ldr x24, [x20, #0x18]\n"
+ "ldr x23, [x20, #0x20]\n"
+ "ldr x22, [x20, #0x28]\n"
+ "ldr x21, [x20, #0x30]\n"
+ "ldr x20, [x20, #0x38]\n"
+ "cbnz x9, 158f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x27, x27, x19, LSL #2\n"
+ "add x26, x26, x19, LSL #2\n"
+ "add x25, x25, x19, LSL #2\n"
+ "add x24, x24, x19, LSL #2\n"
+ "add x23, x23, x19, LSL #2\n"
+ "add x22, x22, x19, LSL #2\n"
+ "add x21, x21, x19, LSL #2\n"
+ "add x20, x20, x19, LSL #2\n"
"b 158f\n"
"157:" // Height 8: setup direct input
- "mov x28, %x[input_ptr]\n"
- "add x27, x28, x20, LSL #2\n"
- "add x26, x27, x20, LSL #2\n"
- "add x25, x26, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
+ "mov x27, %x[input_ptr]\n"
+ "add x26, x27, x19, LSL #2\n"
+ "add x25, x26, x19, LSL #2\n"
+ "add x24, x25, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
+ "add x20, x21, x19, LSL #2\n"
"158:" // Height 8: input setup done
- "cmp x9, #0x4\n"
+ "cmp x28, #0x4\n"
"blt 161f\n"
- "ldr q0, [x28, #0x0]\n"
- "ldr q1, [x27, #0x0]\n"
- "cmp x9, #0x8\n"
- "ldr q2, [x26, #0x0]\n"
- "ldr q3, [x25, #0x0]\n"
- "ldr q4, [x24, #0x0]\n"
- "ldr q5, [x23, #0x0]\n"
- "ldr q6, [x22, #0x0]\n"
- "ldr q7, [x21, #0x0]\n"
+ "ldr q0, [x27, #0x0]\n"
+ "ldr q1, [x26, #0x0]\n"
+ "cmp x28, #0x8\n"
+ "ldr q2, [x25, #0x0]\n"
+ "ldr q3, [x24, #0x0]\n"
+ "ldr q4, [x23, #0x0]\n"
+ "ldr q5, [x22, #0x0]\n"
+ "ldr q6, [x21, #0x0]\n"
+ "ldr q7, [x20, #0x0]\n"
"ldr q8, [x12, #0x0]\n"
- "ldr q9, [x12, #0x10]\n"
- "ldr q10, [x12, #0x20]\n"
- "ldr q11, [x12, #0x30]\n"
"blt 160f\n"
"159:" // Height 8: Multiply loop: Main loop head
"fmla v24.4s, v8.4s, v0.s[0]\n"
+ "ldr q9, [x12, #0x10]\n"
+ "add x27, x27, #0x10\n"
"fmla v25.4s, v8.4s, v1.s[0]\n"
- "sub x9, x9, #0x4\n"
- "add x28, x28, #0x10\n"
+ "ldr q10, [x12, #0x20]\n"
+ "add x26, x26, #0x10\n"
"fmla v26.4s, v8.4s, v2.s[0]\n"
+ "ldr q11, [x12, #0x30]\n"
+ "add x25, x25, #0x10\n"
"fmla v27.4s, v8.4s, v3.s[0]\n"
- "add x27, x27, #0x10\n"
- "add x26, x26, #0x10\n"
+ "prfm pldl1keep, [x27, #0x80]\n"
+ "add x24, x24, #0x10\n"
"fmla v28.4s, v8.4s, v4.s[0]\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
+ "add x23, x23, #0x10\n"
"fmla v29.4s, v8.4s, v5.s[0]\n"
- "add x25, x25, #0x10\n"
- "add x24, x24, #0x10\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "add x22, x22, #0x10\n"
"fmla v30.4s, v8.4s, v6.s[0]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "add x21, x21, #0x10\n"
"fmla v31.4s, v8.4s, v7.s[0]\n"
- "add x23, x23, #0x10\n"
- "add x22, x22, #0x10\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
+ "add x20, x20, #0x10\n"
"fmla v24.4s, v9.4s, v0.s[1]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
+ "sub x28, x28, #0x4\n"
"fmla v25.4s, v9.4s, v1.s[1]\n"
- "add x21, x21, #0x10\n"
- "cmp x9, #0x8\n"
+ "prfm pldl1keep, [x21, #0x80]\n"
+ "cmp x28, #0x8\n"
"fmla v26.4s, v9.4s, v2.s[1]\n"
- "fmla v27.4s, v9.4s, v3.s[1]\n"
+ "prfm pldl1keep, [x20, #0x80]\n"
"add x12, x12, #0x40\n"
+ "fmla v27.4s, v9.4s, v3.s[1]\n"
"ldr q8, [x12, #0x0]\n"
"fmla v28.4s, v9.4s, v4.s[1]\n"
"fmla v29.4s, v9.4s, v5.s[1]\n"
- "prfm pldl1keep, [x28, #0x80]\n"
- "prfm pldl1keep, [x27, #0x80]\n"
"fmla v30.4s, v9.4s, v6.s[1]\n"
"fmla v31.4s, v9.4s, v7.s[1]\n"
- "ldr q9, [x12, #0x10]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
"fmla v24.4s, v10.4s, v0.s[2]\n"
"fmla v25.4s, v10.4s, v1.s[2]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
"fmla v26.4s, v10.4s, v2.s[2]\n"
"fmla v27.4s, v10.4s, v3.s[2]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
- "prfm pldl1keep, [x22, #0x80]\n"
"fmla v28.4s, v10.4s, v4.s[2]\n"
"fmla v29.4s, v10.4s, v5.s[2]\n"
- "prfm pldl1keep, [x21, #0x80]\n"
"fmla v30.4s, v10.4s, v6.s[2]\n"
"fmla v31.4s, v10.4s, v7.s[2]\n"
- "ldr q10, [x12, #0x20]\n"
"fmla v24.4s, v11.4s, v0.s[3]\n"
- "ldr q0, [x28, #0x0]\n"
+ "ldr q0, [x27, #0x0]\n"
"fmla v25.4s, v11.4s, v1.s[3]\n"
- "ldr q1, [x27, #0x0]\n"
+ "ldr q1, [x26, #0x0]\n"
"fmla v26.4s, v11.4s, v2.s[3]\n"
- "ldr q2, [x26, #0x0]\n"
+ "ldr q2, [x25, #0x0]\n"
"fmla v27.4s, v11.4s, v3.s[3]\n"
- "ldr q3, [x25, #0x0]\n"
+ "ldr q3, [x24, #0x0]\n"
"fmla v28.4s, v11.4s, v4.s[3]\n"
- "ldr q4, [x24, #0x0]\n"
+ "ldr q4, [x23, #0x0]\n"
"fmla v29.4s, v11.4s, v5.s[3]\n"
- "ldr q5, [x23, #0x0]\n"
+ "ldr q5, [x22, #0x0]\n"
"fmla v30.4s, v11.4s, v6.s[3]\n"
- "ldr q6, [x22, #0x0]\n"
+ "ldr q6, [x21, #0x0]\n"
"fmla v31.4s, v11.4s, v7.s[3]\n"
- "ldr q7, [x21, #0x0]\n"
- "ldr q11, [x12, #0x30]\n"
+ "ldr q7, [x20, #0x0]\n"
"bge 159b\n"
"160:" // Height 8: Multiply loop: Single iteration only
"fmla v24.4s, v8.4s, v0.s[0]\n"
+ "ldr q9, [x12, #0x10]\n"
+ "sub x28, x28, #0x4\n"
"fmla v25.4s, v8.4s, v1.s[0]\n"
- "add x28, x28, #0x10\n"
+ "ldr q10, [x12, #0x20]\n"
"add x27, x27, #0x10\n"
"fmla v26.4s, v8.4s, v2.s[0]\n"
- "fmla v27.4s, v8.4s, v3.s[0]\n"
+ "ldr q11, [x12, #0x30]\n"
"add x26, x26, #0x10\n"
+ "fmla v27.4s, v8.4s, v3.s[0]\n"
+ "prfm pldl1keep, [x27, #0x80]\n"
"add x25, x25, #0x10\n"
"fmla v28.4s, v8.4s, v4.s[0]\n"
- "fmla v29.4s, v8.4s, v5.s[0]\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
"add x24, x24, #0x10\n"
+ "fmla v29.4s, v8.4s, v5.s[0]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
"add x23, x23, #0x10\n"
"fmla v30.4s, v8.4s, v6.s[0]\n"
- "fmla v31.4s, v8.4s, v7.s[0]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
"add x22, x22, #0x10\n"
+ "fmla v31.4s, v8.4s, v7.s[0]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
"add x21, x21, #0x10\n"
"fmla v24.4s, v9.4s, v0.s[1]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
+ "add x20, x20, #0x10\n"
"fmla v25.4s, v9.4s, v1.s[1]\n"
- "sub x9, x9, #0x4\n"
- "prfm pldl1keep, [x28, #0x80]\n"
+ "prfm pldl1keep, [x21, #0x80]\n"
+ "add x12, x12, #0x40\n"
"fmla v26.4s, v9.4s, v2.s[1]\n"
+ "prfm pldl1keep, [x20, #0x80]\n"
"fmla v27.4s, v9.4s, v3.s[1]\n"
- "prfm pldl1keep, [x27, #0x80]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
"fmla v28.4s, v9.4s, v4.s[1]\n"
"fmla v29.4s, v9.4s, v5.s[1]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
"fmla v30.4s, v9.4s, v6.s[1]\n"
"fmla v31.4s, v9.4s, v7.s[1]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
- "prfm pldl1keep, [x22, #0x80]\n"
"fmla v24.4s, v10.4s, v0.s[2]\n"
"fmla v25.4s, v10.4s, v1.s[2]\n"
- "prfm pldl1keep, [x21, #0x80]\n"
- "add x12, x12, #0x40\n"
"fmla v26.4s, v10.4s, v2.s[2]\n"
"fmla v27.4s, v10.4s, v3.s[2]\n"
"fmla v28.4s, v10.4s, v4.s[2]\n"
@@ -1946,132 +1946,132 @@ void a64_hybrid_fp32_mla_8x4 (
"fmla v30.4s, v11.4s, v6.s[3]\n"
"fmla v31.4s, v11.4s, v7.s[3]\n"
"161:" // Height 8: Multiply loop: Main loop skip
- "cbz x9, 163f\n"
+ "cbz x28, 163f\n"
"162:" // Height 8: Multiply loop: Odd block loop
- "ldr s0, [x28], #0x4\n"
- "ldr s1, [x27], #0x4\n"
- "sub x9, x9, #0x1\n"
- "ldr s2, [x26], #0x4\n"
- "ldr s3, [x25], #0x4\n"
- "ldr s4, [x24], #0x4\n"
- "ldr s5, [x23], #0x4\n"
- "ldr s6, [x22], #0x4\n"
- "ldr s7, [x21], #0x4\n"
+ "ldr s0, [x27], #0x4\n"
+ "sub x28, x28, #0x1\n"
+ "ldr s1, [x26], #0x4\n"
+ "ldr s2, [x25], #0x4\n"
+ "ldr s3, [x24], #0x4\n"
+ "ldr s4, [x23], #0x4\n"
+ "ldr s5, [x22], #0x4\n"
+ "ldr s6, [x21], #0x4\n"
+ "ldr s7, [x20], #0x4\n"
"ldr q12, [x12, #0x0]\n"
"fmla v24.4s, v12.4s, v0.s[0]\n"
- "fmla v25.4s, v12.4s, v1.s[0]\n"
"add x12, x12, #0x10\n"
+ "fmla v25.4s, v12.4s, v1.s[0]\n"
"fmla v26.4s, v12.4s, v2.s[0]\n"
"fmla v27.4s, v12.4s, v3.s[0]\n"
"fmla v28.4s, v12.4s, v4.s[0]\n"
"fmla v29.4s, v12.4s, v5.s[0]\n"
"fmla v30.4s, v12.4s, v6.s[0]\n"
"fmla v31.4s, v12.4s, v7.s[0]\n"
- "cbnz x9, 162b\n"
+ "cbnz x28, 162b\n"
"163:" // Height 8: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x10, x10, #0x1\n"
- "cmp x10, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x9, x9, #0x1\n"
+ "cmp x9, x19\n"
"bne 156b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x27, x11, x20, LSL #2\n"
- "add x26, x27, x20, LSL #2\n"
- "prfm pstl1keep, [x11, #0x0]\n"
- "add x25, x26, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "prfm pstl1keep, [x27, #0x0]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "prfm pstl1keep, [x10, #0x0]\n"
+ "add x26, x10, x19, LSL #2\n"
"prfm pstl1keep, [x26, #0x0]\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
+ "add x25, x26, x19, LSL #2\n"
"prfm pstl1keep, [x25, #0x0]\n"
+ "add x24, x25, x19, LSL #2\n"
"prfm pstl1keep, [x24, #0x0]\n"
- "add x21, x22, x20, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
"prfm pstl1keep, [x23, #0x0]\n"
+ "add x22, x23, x19, LSL #2\n"
"prfm pstl1keep, [x22, #0x0]\n"
+ "add x21, x22, x19, LSL #2\n"
"prfm pstl1keep, [x21, #0x0]\n"
+ "add x20, x21, x19, LSL #2\n"
+ "prfm pstl1keep, [x20, #0x0]\n"
"tbz %x[flags], #1, 164f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v17.4s }, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1r { v16.4s }, [x20]\n"
- "fmin v24.4s, v24.4s, v17.4s\n"
- "fmin v25.4s, v25.4s, v17.4s\n"
- "fmin v26.4s, v26.4s, v17.4s\n"
- "fmin v27.4s, v27.4s, v17.4s\n"
- "fmin v28.4s, v28.4s, v17.4s\n"
- "fmin v29.4s, v29.4s, v17.4s\n"
- "fmin v30.4s, v30.4s, v17.4s\n"
- "fmin v31.4s, v31.4s, v17.4s\n"
- "fmax v24.4s, v24.4s, v16.4s\n"
- "fmax v25.4s, v25.4s, v16.4s\n"
- "fmax v26.4s, v26.4s, v16.4s\n"
- "fmax v27.4s, v27.4s, v16.4s\n"
- "fmax v28.4s, v28.4s, v16.4s\n"
- "fmax v29.4s, v29.4s, v16.4s\n"
- "fmax v30.4s, v30.4s, v16.4s\n"
- "fmax v31.4s, v31.4s, v16.4s\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v17.4s }, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v16.4s }, [x19]\n"
+ "fmin v24.4s, v24.4s, v16.4s\n"
+ "fmin v25.4s, v25.4s, v16.4s\n"
+ "fmin v26.4s, v26.4s, v16.4s\n"
+ "fmin v27.4s, v27.4s, v16.4s\n"
+ "fmax v24.4s, v24.4s, v17.4s\n"
+ "fmax v25.4s, v25.4s, v17.4s\n"
+ "fmax v26.4s, v26.4s, v17.4s\n"
+ "fmax v27.4s, v27.4s, v17.4s\n"
+ "fmin v28.4s, v28.4s, v16.4s\n"
+ "fmin v29.4s, v29.4s, v16.4s\n"
+ "fmin v30.4s, v30.4s, v16.4s\n"
+ "fmax v28.4s, v28.4s, v17.4s\n"
+ "fmax v29.4s, v29.4s, v17.4s\n"
+ "fmax v30.4s, v30.4s, v17.4s\n"
+ "fmin v31.4s, v31.4s, v16.4s\n"
+ "fmax v31.4s, v31.4s, v17.4s\n"
"164:" // Height 8: No activation
"cmp x13, #0x4\n"
"bge 167f\n"
"tbz x13, #1, 165f\n"
- "str d24, [x11], #0x8\n"
- "str d25, [x27], #0x8\n"
- "str d26, [x26], #0x8\n"
- "str d27, [x25], #0x8\n"
- "str d28, [x24], #0x8\n"
- "str d29, [x23], #0x8\n"
- "str d30, [x22], #0x8\n"
- "str d31, [x21], #0x8\n"
+ "str d24, [x10], #0x8\n"
+ "str d25, [x26], #0x8\n"
+ "str d26, [x25], #0x8\n"
+ "str d27, [x24], #0x8\n"
+ "str d28, [x23], #0x8\n"
+ "str d29, [x22], #0x8\n"
+ "str d30, [x21], #0x8\n"
+ "str d31, [x20], #0x8\n"
"tbz x13, #0, 166f\n"
- "st1 { v24.s }[2], [x11]\n"
- "st1 { v25.s }[2], [x27]\n"
- "st1 { v26.s }[2], [x26]\n"
- "st1 { v27.s }[2], [x25]\n"
- "st1 { v28.s }[2], [x24]\n"
- "st1 { v29.s }[2], [x23]\n"
- "st1 { v30.s }[2], [x22]\n"
- "st1 { v31.s }[2], [x21]\n"
+ "st1 { v24.s }[2], [x10]\n"
+ "st1 { v25.s }[2], [x26]\n"
+ "st1 { v26.s }[2], [x25]\n"
+ "st1 { v27.s }[2], [x24]\n"
+ "st1 { v28.s }[2], [x23]\n"
+ "st1 { v29.s }[2], [x22]\n"
+ "st1 { v30.s }[2], [x21]\n"
+ "st1 { v31.s }[2], [x20]\n"
"b 166f\n"
"165:" // Height 8: Partial direct writeback: partial_1_0
- "str s24, [x11, #0x0]\n"
- "str s25, [x27, #0x0]\n"
- "str s26, [x26, #0x0]\n"
- "str s27, [x25, #0x0]\n"
- "str s28, [x24, #0x0]\n"
- "str s29, [x23, #0x0]\n"
- "str s30, [x22, #0x0]\n"
- "str s31, [x21, #0x0]\n"
+ "str s24, [x10, #0x0]\n"
+ "str s25, [x26, #0x0]\n"
+ "str s26, [x25, #0x0]\n"
+ "str s27, [x24, #0x0]\n"
+ "str s28, [x23, #0x0]\n"
+ "str s29, [x22, #0x0]\n"
+ "str s30, [x21, #0x0]\n"
+ "str s31, [x20, #0x0]\n"
"166:" // Height 8: Partial direct writeback: Done
"b 168f\n"
"167:" // Height 8: Full writeback
- "str q24, [x11, #0x0]\n"
- "add x11, x11, #0x10\n"
- "str q25, [x27, #0x0]\n"
- "str q26, [x26, #0x0]\n"
- "str q27, [x25, #0x0]\n"
- "str q28, [x24, #0x0]\n"
- "str q29, [x23, #0x0]\n"
- "str q30, [x22, #0x0]\n"
- "str q31, [x21, #0x0]\n"
+ "str q24, [x10, #0x0]\n"
+ "add x10, x10, #0x10\n"
+ "str q25, [x26, #0x0]\n"
+ "str q26, [x25, #0x0]\n"
+ "str q27, [x24, #0x0]\n"
+ "str q28, [x23, #0x0]\n"
+ "str q29, [x22, #0x0]\n"
+ "str q30, [x21, #0x0]\n"
+ "str q31, [x20, #0x0]\n"
"168:" // Height 8: Writeback done
"subs x13, x13, #0x4\n"
"bgt 149b\n"
"subs %x[M], %x[M], #0x8\n"
"beq 170f\n"
- "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 169f\n"
- "add x21, x21, #0x8\n"
- "str x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "add x20, x20, #0x8\n"
+ "str x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"b 1b\n"
"169:" // Update direct input
- "mov x20, #0x20\n"
- "madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
+ "mov x19, #0x20\n"
+ "madd %x[input_ptr], x19, x20, %x[input_ptr]\n"
"b 1b\n"
"170:" // Exit
: [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
: [args_ptr] "r" (&ka), [bias] "r" (bias), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v16", "v17", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v16", "v17", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32bf16fp32_mmla_4x24/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32bf16fp32_mmla_4x24/generic.cpp
index a0ea96822a..76c2688291 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32bf16fp32_mmla_4x24/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32bf16fp32_mmla_4x24/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef __aarch64__
@@ -100,27 +100,27 @@ void a64_hybrid_fp32bf16fp32_mmla_4x24 (
"cmp %x[M], #0x2\n"
"bgt 87f\n"
"beq 44f\n"
- "mov x10, %x[bias]\n"
"ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x27, %x[output_ptr]\n"
+ "mov x27, %x[bias]\n"
+ "mov x26, %x[output_ptr]\n"
"2:" // Height 1: Column loop
- "cbz x10, 3f\n"
- "ldr q8, [x10, #0x0]\n"
- "ldr q9, [x10, #0x10]\n"
+ "cbz x27, 3f\n"
+ "ldr q8, [x27, #0x0]\n"
"zip2 v14.2d, v8.2d, v8.2d\n"
+ "ldr q9, [x27, #0x10]\n"
"zip1 v8.2d, v8.2d, v8.2d\n"
- "ldr q10, [x10, #0x20]\n"
- "ldr q11, [x10, #0x30]\n"
+ "ldr q10, [x27, #0x20]\n"
+ "ldr q11, [x27, #0x30]\n"
"zip2 v15.2d, v9.2d, v9.2d\n"
+ "ldr q12, [x27, #0x40]\n"
"zip1 v9.2d, v9.2d, v9.2d\n"
- "ldr q12, [x10, #0x40]\n"
- "ldr q13, [x10, #0x50]\n"
+ "ldr q13, [x27, #0x50]\n"
+ "add x27, x27, #0x60\n"
"zip2 v16.2d, v10.2d, v10.2d\n"
"zip1 v10.2d, v10.2d, v10.2d\n"
"zip2 v17.2d, v11.2d, v11.2d\n"
"zip1 v11.2d, v11.2d, v11.2d\n"
- "add x10, x10, #0x60\n"
"zip2 v18.2d, v12.2d, v12.2d\n"
"zip1 v12.2d, v12.2d, v12.2d\n"
"zip2 v19.2d, v13.2d, v13.2d\n"
@@ -131,98 +131,98 @@ void a64_hybrid_fp32bf16fp32_mmla_4x24 (
"cmp x9, #0x18\n"
"bge 16f\n"
"tbz x9, #4, 7f\n"
- "ld1 { v9.4s }, [x27], #0x10\n"
- "ld1 { v10.4s }, [x27], #0x10\n"
- "ld1 { v11.4s }, [x27], #0x10\n"
- "ld1 { v12.4s }, [x27], #0x10\n"
+ "ld1 { v9.4s }, [x26], #0x10\n"
+ "ld1 { v10.4s }, [x26], #0x10\n"
+ "ld1 { v11.4s }, [x26], #0x10\n"
+ "ld1 { v12.4s }, [x26], #0x10\n"
"tbz x9, #2, 5f\n"
- "ld1 { v13.4s }, [x27], #0x10\n"
+ "ld1 { v13.4s }, [x26], #0x10\n"
"tbz x9, #1, 4f\n"
- "ldr d20, [x27], #0x8\n"
- "mov x20, #0x58\n"
+ "mov x19, #0x58\n"
+ "ldr d20, [x26], #0x8\n"
"tbz x9, #0, 15f\n"
- "ld1 { v20.s }[2], [x27]\n"
+ "ld1 { v20.s }[2], [x26]\n"
"b 15f\n"
"4:" // Height 1: Partial accumulate: partial_1_20
- "mov x20, #0x50\n"
+ "mov x19, #0x50\n"
"tbz x9, #0, 15f\n"
- "ldr s20, [x27, #0x0]\n"
+ "ldr s20, [x26, #0x0]\n"
"b 15f\n"
"5:" // Height 1: Partial accumulate: partial_2_16
"tbz x9, #1, 6f\n"
- "ldr d13, [x27], #0x8\n"
- "mov x20, #0x48\n"
+ "ldr d13, [x26], #0x8\n"
+ "mov x19, #0x48\n"
"tbz x9, #0, 15f\n"
- "ld1 { v13.s }[2], [x27]\n"
+ "ld1 { v13.s }[2], [x26]\n"
"b 15f\n"
"6:" // Height 1: Partial accumulate: partial_1_16
- "mov x20, #0x40\n"
+ "mov x19, #0x40\n"
"tbz x9, #0, 15f\n"
- "ldr s13, [x27, #0x0]\n"
+ "ldr s13, [x26, #0x0]\n"
"b 15f\n"
"7:" // Height 1: Partial accumulate: partial_8_0
"tbz x9, #3, 11f\n"
- "ld1 { v9.4s }, [x27], #0x10\n"
- "ld1 { v10.4s }, [x27], #0x10\n"
+ "ld1 { v9.4s }, [x26], #0x10\n"
+ "ld1 { v10.4s }, [x26], #0x10\n"
"tbz x9, #2, 9f\n"
- "ld1 { v11.4s }, [x27], #0x10\n"
+ "ld1 { v11.4s }, [x26], #0x10\n"
"tbz x9, #1, 8f\n"
- "ldr d12, [x27], #0x8\n"
- "mov x20, #0x38\n"
+ "mov x19, #0x38\n"
+ "ldr d12, [x26], #0x8\n"
"tbz x9, #0, 15f\n"
- "ld1 { v12.s }[2], [x27]\n"
+ "ld1 { v12.s }[2], [x26]\n"
"b 15f\n"
"8:" // Height 1: Partial accumulate: partial_1_12
- "mov x20, #0x30\n"
+ "mov x19, #0x30\n"
"tbz x9, #0, 15f\n"
- "ldr s12, [x27, #0x0]\n"
+ "ldr s12, [x26, #0x0]\n"
"b 15f\n"
"9:" // Height 1: Partial accumulate: partial_2_8
"tbz x9, #1, 10f\n"
- "ldr d11, [x27], #0x8\n"
- "mov x20, #0x28\n"
+ "ldr d11, [x26], #0x8\n"
+ "mov x19, #0x28\n"
"tbz x9, #0, 15f\n"
- "ld1 { v11.s }[2], [x27]\n"
+ "ld1 { v11.s }[2], [x26]\n"
"b 15f\n"
"10:" // Height 1: Partial accumulate: partial_1_8
- "mov x20, #0x20\n"
+ "mov x19, #0x20\n"
"tbz x9, #0, 15f\n"
- "ldr s11, [x27, #0x0]\n"
+ "ldr s11, [x26, #0x0]\n"
"b 15f\n"
"11:" // Height 1: Partial accumulate: partial_4_0
"tbz x9, #2, 13f\n"
- "ld1 { v9.4s }, [x27], #0x10\n"
+ "ld1 { v9.4s }, [x26], #0x10\n"
"tbz x9, #1, 12f\n"
- "ldr d10, [x27], #0x8\n"
- "mov x20, #0x18\n"
+ "ldr d10, [x26], #0x8\n"
+ "mov x19, #0x18\n"
"tbz x9, #0, 15f\n"
- "ld1 { v10.s }[2], [x27]\n"
+ "ld1 { v10.s }[2], [x26]\n"
"b 15f\n"
"12:" // Height 1: Partial accumulate: partial_1_4
- "mov x20, #0x10\n"
+ "mov x19, #0x10\n"
"tbz x9, #0, 15f\n"
- "ldr s10, [x27, #0x0]\n"
+ "ldr s10, [x26, #0x0]\n"
"b 15f\n"
"13:" // Height 1: Partial accumulate: partial_2_0
"tbz x9, #1, 14f\n"
- "ldr d9, [x27], #0x8\n"
- "mov x20, #0x8\n"
+ "ldr d9, [x26], #0x8\n"
+ "mov x19, #0x8\n"
"tbz x9, #0, 15f\n"
- "ld1 { v9.s }[2], [x27]\n"
+ "ld1 { v9.s }[2], [x26]\n"
"b 15f\n"
"14:" // Height 1: Partial accumulate: partial_1_0
- "ldr s9, [x27, #0x0]\n"
- "mov x20, #0x0\n"
+ "ldr s9, [x26, #0x0]\n"
+ "mov x19, #0x0\n"
"15:" // Height 1: Partial accumulate: Done
- "sub x27, x27, x20\n"
+ "sub x26, x26, x19\n"
"b 17f\n"
"16:" // Height 1: full accumulate
- "ldr q9, [x27, #0x0]\n"
- "ldr q10, [x27, #0x10]\n"
- "ldr q11, [x27, #0x20]\n"
- "ldr q12, [x27, #0x30]\n"
- "ldr q13, [x27, #0x40]\n"
- "ldr q20, [x27, #0x50]\n"
+ "ldr q9, [x26, #0x0]\n"
+ "ldr q10, [x26, #0x10]\n"
+ "ldr q11, [x26, #0x20]\n"
+ "ldr q12, [x26, #0x30]\n"
+ "ldr q13, [x26, #0x40]\n"
+ "ldr q20, [x26, #0x50]\n"
"17:" // Height 1: MMLA fixup
"zip1 v8.2d, v9.2d, v14.2d\n"
"zip2 v14.2d, v9.2d, v14.2d\n"
@@ -251,264 +251,264 @@ void a64_hybrid_fp32bf16fp32_mmla_4x24 (
"movi v18.16b, #0x0\n"
"movi v19.16b, #0x0\n"
"19:" // Height 1: setup done
- "mov x26, #0x0\n"
+ "mov x25, #0x0\n"
"20:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w24, [x20, x25, LSL #0x2]\n"
"tbz %x[flags], #3, 21f\n"
- "ldr x21, [%x[input_ptr], x26, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x24, [x21, #0x0]\n"
- "cbnz x26, 22f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x24, x24, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x25, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x23, [x20, #0x0]\n"
+ "cbnz x25, 22f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x23, x23, x19, LSL #2\n"
"b 22f\n"
"21:" // Height 1: setup direct input
- "mov x24, %x[input_ptr]\n"
+ "mov x23, %x[input_ptr]\n"
"22:" // Height 1: input setup done
- "cmp x25, #0x4\n"
+ "cmp x24, #0x4\n"
"blt 25f\n"
- "ld1 { v0.4s }, [x24], #0x10\n"
- "ldr q4, [x28, #0x0]\n"
- "cmp x25, #0x8\n"
- "ldr q5, [x28, #0x10]\n"
- "ldr q6, [x28, #0x20]\n"
- "ldr q7, [x28, #0x30]\n"
+ "ld1 { v0.4s }, [x23], #0x10\n"
+ "cmp x24, #0x8\n"
"blt 24f\n"
"23:" // Height 1: Multiply loop: Main loop head
".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n"
+ "ldr q4, [x28, #0x0]\n"
+ "sub x24, x24, #0x4\n"
".inst 0x6e44ec08 // bfmmla v8.4s, v0.8h, v4.8h\n"
- "ldr q4, [x28, #0x40]\n"
+ "ldr q5, [x28, #0x10]\n"
+ "cmp x24, #0x8\n"
".inst 0x6e45ec0e // bfmmla v14.4s, v0.8h, v5.8h\n"
- "ldr q5, [x28, #0x50]\n"
+ "ldr q6, [x28, #0x20]\n"
+ "ldr q7, [x28, #0x30]\n"
".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n"
- "ldr q6, [x28, #0x60]\n"
+ "ldr q4, [x28, #0x40]\n"
".inst 0x6e47ec0f // bfmmla v15.4s, v0.8h, v7.8h\n"
- "ldr q7, [x28, #0x70]\n"
+ "ldr q5, [x28, #0x50]\n"
+ "ldr q6, [x28, #0x60]\n"
".inst 0x6e44ec0a // bfmmla v10.4s, v0.8h, v4.8h\n"
+ "ldr q7, [x28, #0x70]\n"
"ldr q4, [x28, #0x80]\n"
".inst 0x6e45ec10 // bfmmla v16.4s, v0.8h, v5.8h\n"
- "ldr q5, [x28, #0x90]\n"
".inst 0x6e46ec0b // bfmmla v11.4s, v0.8h, v6.8h\n"
+ "ldr q5, [x28, #0x90]\n"
"ldr q6, [x28, #0xa0]\n"
".inst 0x6e47ec11 // bfmmla v17.4s, v0.8h, v7.8h\n"
"ldr q7, [x28, #0xb0]\n"
- "sub x25, x25, #0x4\n"
- "cmp x25, #0x8\n"
- ".inst 0x6e44ec0c // bfmmla v12.4s, v0.8h, v4.8h\n"
"add x28, x28, #0xc0\n"
- "ldr q4, [x28, #0x0]\n"
+ ".inst 0x6e44ec0c // bfmmla v12.4s, v0.8h, v4.8h\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x6e45ec12 // bfmmla v18.4s, v0.8h, v5.8h\n"
- "ldr q5, [x28, #0x10]\n"
".inst 0x6e46ec0d // bfmmla v13.4s, v0.8h, v6.8h\n"
- "ldr q6, [x28, #0x20]\n"
".inst 0x6e47ec13 // bfmmla v19.4s, v0.8h, v7.8h\n"
- "ldr q7, [x28, #0x30]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
- "ld1 { v0.4s }, [x24], #0x10\n"
+ "ld1 { v0.4s }, [x23], #0x10\n"
"bge 23b\n"
"24:" // Height 1: Multiply loop: Single iteration only
".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n"
+ "ldr q4, [x28, #0x0]\n"
+ "sub x24, x24, #0x4\n"
".inst 0x6e44ec08 // bfmmla v8.4s, v0.8h, v4.8h\n"
- "ldr q4, [x28, #0x40]\n"
+ "ldr q5, [x28, #0x10]\n"
+ "ldr q6, [x28, #0x20]\n"
".inst 0x6e45ec0e // bfmmla v14.4s, v0.8h, v5.8h\n"
- "ldr q5, [x28, #0x50]\n"
+ "ldr q7, [x28, #0x30]\n"
".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n"
- "ldr q6, [x28, #0x60]\n"
+ "ldr q4, [x28, #0x40]\n"
+ "ldr q5, [x28, #0x50]\n"
".inst 0x6e47ec0f // bfmmla v15.4s, v0.8h, v7.8h\n"
+ "ldr q6, [x28, #0x60]\n"
"ldr q7, [x28, #0x70]\n"
".inst 0x6e44ec0a // bfmmla v10.4s, v0.8h, v4.8h\n"
- "ldr q4, [x28, #0x80]\n"
".inst 0x6e45ec10 // bfmmla v16.4s, v0.8h, v5.8h\n"
+ "ldr q4, [x28, #0x80]\n"
"ldr q5, [x28, #0x90]\n"
".inst 0x6e46ec0b // bfmmla v11.4s, v0.8h, v6.8h\n"
"ldr q6, [x28, #0xa0]\n"
".inst 0x6e47ec11 // bfmmla v17.4s, v0.8h, v7.8h\n"
"ldr q7, [x28, #0xb0]\n"
- "sub x25, x25, #0x4\n"
+ "add x28, x28, #0xc0\n"
".inst 0x6e44ec0c // bfmmla v12.4s, v0.8h, v4.8h\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x6e45ec12 // bfmmla v18.4s, v0.8h, v5.8h\n"
- "prfm pldl1keep, [x24, #0x80]\n"
- "add x28, x28, #0xc0\n"
".inst 0x6e46ec0d // bfmmla v13.4s, v0.8h, v6.8h\n"
".inst 0x6e47ec13 // bfmmla v19.4s, v0.8h, v7.8h\n"
"25:" // Height 1: Multiply loop: Main loop skip
- "cbz x25, 28f\n"
- "cbz x25, 28f\n"
- "tbz x25, #1, 26f\n"
- "ldr d0, [x24], #0x8\n"
- "tbz x25, #0, 27f\n"
- "ld1 { v0.s }[2], [x24]\n"
+ "cbz x24, 28f\n"
+ "cbz x24, 28f\n"
+ "tbz x24, #1, 26f\n"
+ "ldr d0, [x23], #0x8\n"
+ "tbz x24, #0, 27f\n"
+ "ld1 { v0.s }[2], [x23]\n"
"b 27f\n"
"26:" // Height 1: Multiply loop: Ragged operand read: partial_1_0
- "ldr s0, [x24, #0x0]\n"
+ "ldr s0, [x23, #0x0]\n"
"27:" // Height 1: Multiply loop: Ragged operand read: Done
+ ".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n"
"ldr q4, [x28, #0x0]\n"
"ldr q5, [x28, #0x10]\n"
- ".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n"
".inst 0x6e44ec08 // bfmmla v8.4s, v0.8h, v4.8h\n"
"ldr q6, [x28, #0x20]\n"
- "ldr q7, [x28, #0x30]\n"
".inst 0x6e45ec0e // bfmmla v14.4s, v0.8h, v5.8h\n"
- ".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n"
+ "ldr q7, [x28, #0x30]\n"
"ldr q4, [x28, #0x40]\n"
+ ".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n"
"ldr q5, [x28, #0x50]\n"
+ "ldr q6, [x28, #0x60]\n"
".inst 0x6e47ec0f // bfmmla v15.4s, v0.8h, v7.8h\n"
".inst 0x6e44ec0a // bfmmla v10.4s, v0.8h, v4.8h\n"
- "ldr q6, [x28, #0x60]\n"
"ldr q7, [x28, #0x70]\n"
- ".inst 0x6e45ec10 // bfmmla v16.4s, v0.8h, v5.8h\n"
- ".inst 0x6e46ec0b // bfmmla v11.4s, v0.8h, v6.8h\n"
"ldr q4, [x28, #0x80]\n"
+ ".inst 0x6e45ec10 // bfmmla v16.4s, v0.8h, v5.8h\n"
"ldr q5, [x28, #0x90]\n"
- ".inst 0x6e47ec11 // bfmmla v17.4s, v0.8h, v7.8h\n"
- ".inst 0x6e44ec0c // bfmmla v12.4s, v0.8h, v4.8h\n"
+ ".inst 0x6e46ec0b // bfmmla v11.4s, v0.8h, v6.8h\n"
"ldr q6, [x28, #0xa0]\n"
+ ".inst 0x6e47ec11 // bfmmla v17.4s, v0.8h, v7.8h\n"
"ldr q7, [x28, #0xb0]\n"
+ "add x28, x28, #0xc0\n"
+ ".inst 0x6e44ec0c // bfmmla v12.4s, v0.8h, v4.8h\n"
".inst 0x6e45ec12 // bfmmla v18.4s, v0.8h, v5.8h\n"
".inst 0x6e46ec0d // bfmmla v13.4s, v0.8h, v6.8h\n"
".inst 0x6e47ec13 // bfmmla v19.4s, v0.8h, v7.8h\n"
- "add x28, x28, #0xc0\n"
"28:" // Height 1: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x26, x26, #0x1\n"
- "cmp x26, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x25, x25, #0x1\n"
+ "cmp x25, x19\n"
"bne 20b\n"
"uzp1 v8.2d, v8.2d, v14.2d\n"
+ "prfm pstl1keep, [x26, #0x0]\n"
"uzp1 v9.2d, v9.2d, v15.2d\n"
- "prfm pstl1keep, [x27, #0x0]\n"
"uzp1 v10.2d, v10.2d, v16.2d\n"
"uzp1 v11.2d, v11.2d, v17.2d\n"
"uzp1 v12.2d, v12.2d, v18.2d\n"
"uzp1 v13.2d, v13.2d, v19.2d\n"
"tbz %x[flags], #1, 29f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v1.4s }, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1r { v0.4s }, [x20]\n"
- "fmin v8.4s, v8.4s, v1.4s\n"
- "fmin v9.4s, v9.4s, v1.4s\n"
- "fmin v10.4s, v10.4s, v1.4s\n"
- "fmin v11.4s, v11.4s, v1.4s\n"
- "fmin v12.4s, v12.4s, v1.4s\n"
- "fmin v13.4s, v13.4s, v1.4s\n"
- "fmax v8.4s, v8.4s, v0.4s\n"
- "fmax v9.4s, v9.4s, v0.4s\n"
- "fmax v10.4s, v10.4s, v0.4s\n"
- "fmax v11.4s, v11.4s, v0.4s\n"
- "fmax v12.4s, v12.4s, v0.4s\n"
- "fmax v13.4s, v13.4s, v0.4s\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v1.4s }, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v0.4s }, [x19]\n"
+ "fmin v8.4s, v8.4s, v0.4s\n"
+ "fmin v9.4s, v9.4s, v0.4s\n"
+ "fmin v10.4s, v10.4s, v0.4s\n"
+ "fmin v11.4s, v11.4s, v0.4s\n"
+ "fmax v8.4s, v8.4s, v1.4s\n"
+ "fmax v9.4s, v9.4s, v1.4s\n"
+ "fmax v10.4s, v10.4s, v1.4s\n"
+ "fmax v11.4s, v11.4s, v1.4s\n"
+ "fmin v12.4s, v12.4s, v0.4s\n"
+ "fmin v13.4s, v13.4s, v0.4s\n"
+ "fmax v12.4s, v12.4s, v1.4s\n"
+ "fmax v13.4s, v13.4s, v1.4s\n"
"29:" // Height 1: No activation
"cmp x9, #0x18\n"
"bge 42f\n"
"tbz x9, #4, 33f\n"
- "st1 { v8.4s }, [x27], #0x10\n"
- "st1 { v9.4s }, [x27], #0x10\n"
- "st1 { v10.4s }, [x27], #0x10\n"
- "st1 { v11.4s }, [x27], #0x10\n"
+ "st1 { v8.4s }, [x26], #0x10\n"
+ "st1 { v9.4s }, [x26], #0x10\n"
+ "st1 { v10.4s }, [x26], #0x10\n"
+ "st1 { v11.4s }, [x26], #0x10\n"
"tbz x9, #2, 31f\n"
- "st1 { v12.4s }, [x27], #0x10\n"
+ "st1 { v12.4s }, [x26], #0x10\n"
"tbz x9, #1, 30f\n"
- "str d13, [x27], #0x8\n"
+ "str d13, [x26], #0x8\n"
"tbz x9, #0, 41f\n"
- "st1 { v13.s }[2], [x27]\n"
+ "st1 { v13.s }[2], [x26]\n"
"b 41f\n"
"30:" // Height 1: Partial direct writeback: partial_1_20
"tbz x9, #0, 41f\n"
- "str s13, [x27, #0x0]\n"
+ "str s13, [x26, #0x0]\n"
"b 41f\n"
"31:" // Height 1: Partial direct writeback: partial_2_16
"tbz x9, #1, 32f\n"
- "str d12, [x27], #0x8\n"
+ "str d12, [x26], #0x8\n"
"tbz x9, #0, 41f\n"
- "st1 { v12.s }[2], [x27]\n"
+ "st1 { v12.s }[2], [x26]\n"
"b 41f\n"
"32:" // Height 1: Partial direct writeback: partial_1_16
"tbz x9, #0, 41f\n"
- "str s12, [x27, #0x0]\n"
+ "str s12, [x26, #0x0]\n"
"b 41f\n"
"33:" // Height 1: Partial direct writeback: partial_8_0
"tbz x9, #3, 37f\n"
- "st1 { v8.4s }, [x27], #0x10\n"
- "st1 { v9.4s }, [x27], #0x10\n"
+ "st1 { v8.4s }, [x26], #0x10\n"
+ "st1 { v9.4s }, [x26], #0x10\n"
"tbz x9, #2, 35f\n"
- "st1 { v10.4s }, [x27], #0x10\n"
+ "st1 { v10.4s }, [x26], #0x10\n"
"tbz x9, #1, 34f\n"
- "str d11, [x27], #0x8\n"
+ "str d11, [x26], #0x8\n"
"tbz x9, #0, 41f\n"
- "st1 { v11.s }[2], [x27]\n"
+ "st1 { v11.s }[2], [x26]\n"
"b 41f\n"
"34:" // Height 1: Partial direct writeback: partial_1_12
"tbz x9, #0, 41f\n"
- "str s11, [x27, #0x0]\n"
+ "str s11, [x26, #0x0]\n"
"b 41f\n"
"35:" // Height 1: Partial direct writeback: partial_2_8
"tbz x9, #1, 36f\n"
- "str d10, [x27], #0x8\n"
+ "str d10, [x26], #0x8\n"
"tbz x9, #0, 41f\n"
- "st1 { v10.s }[2], [x27]\n"
+ "st1 { v10.s }[2], [x26]\n"
"b 41f\n"
"36:" // Height 1: Partial direct writeback: partial_1_8
"tbz x9, #0, 41f\n"
- "str s10, [x27, #0x0]\n"
+ "str s10, [x26, #0x0]\n"
"b 41f\n"
"37:" // Height 1: Partial direct writeback: partial_4_0
"tbz x9, #2, 39f\n"
- "st1 { v8.4s }, [x27], #0x10\n"
+ "st1 { v8.4s }, [x26], #0x10\n"
"tbz x9, #1, 38f\n"
- "str d9, [x27], #0x8\n"
+ "str d9, [x26], #0x8\n"
"tbz x9, #0, 41f\n"
- "st1 { v9.s }[2], [x27]\n"
+ "st1 { v9.s }[2], [x26]\n"
"b 41f\n"
"38:" // Height 1: Partial direct writeback: partial_1_4
"tbz x9, #0, 41f\n"
- "str s9, [x27, #0x0]\n"
+ "str s9, [x26, #0x0]\n"
"b 41f\n"
"39:" // Height 1: Partial direct writeback: partial_2_0
"tbz x9, #1, 40f\n"
- "str d8, [x27], #0x8\n"
+ "str d8, [x26], #0x8\n"
"tbz x9, #0, 41f\n"
- "st1 { v8.s }[2], [x27]\n"
+ "st1 { v8.s }[2], [x26]\n"
"b 41f\n"
"40:" // Height 1: Partial direct writeback: partial_1_0
- "str s8, [x27, #0x0]\n"
+ "str s8, [x26, #0x0]\n"
"41:" // Height 1: Partial direct writeback: Done
"b 43f\n"
"42:" // Height 1: Full writeback
- "str q8, [x27, #0x0]\n"
- "str q9, [x27, #0x10]\n"
- "str q10, [x27, #0x20]\n"
- "str q11, [x27, #0x30]\n"
- "str q12, [x27, #0x40]\n"
- "str q13, [x27, #0x50]\n"
- "add x27, x27, #0x60\n"
+ "str q8, [x26, #0x0]\n"
+ "str q9, [x26, #0x10]\n"
+ "str q10, [x26, #0x20]\n"
+ "str q11, [x26, #0x30]\n"
+ "str q12, [x26, #0x40]\n"
+ "str q13, [x26, #0x50]\n"
+ "add x26, x26, #0x60\n"
"43:" // Height 1: Writeback done
"subs x9, x9, #0x18\n"
"bgt 2b\n"
"b 174f\n"
"44:" // Height 2
- "mov x10, %x[bias]\n"
"ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x27, %x[bias]\n"
"ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x27, %x[output_ptr]\n"
+ "mov x26, %x[output_ptr]\n"
"45:" // Height 2: Column loop
- "cbz x10, 46f\n"
- "ldr q8, [x10, #0x0]\n"
- "ldr q9, [x10, #0x10]\n"
+ "cbz x27, 46f\n"
+ "ldr q8, [x27, #0x0]\n"
"zip2 v14.2d, v8.2d, v8.2d\n"
+ "ldr q9, [x27, #0x10]\n"
"zip1 v8.2d, v8.2d, v8.2d\n"
- "ldr q10, [x10, #0x20]\n"
- "ldr q11, [x10, #0x30]\n"
+ "ldr q10, [x27, #0x20]\n"
+ "ldr q11, [x27, #0x30]\n"
"zip2 v15.2d, v9.2d, v9.2d\n"
+ "ldr q12, [x27, #0x40]\n"
"zip1 v9.2d, v9.2d, v9.2d\n"
- "ldr q12, [x10, #0x40]\n"
- "ldr q13, [x10, #0x50]\n"
+ "ldr q13, [x27, #0x50]\n"
+ "add x27, x27, #0x60\n"
"zip2 v16.2d, v10.2d, v10.2d\n"
"zip1 v10.2d, v10.2d, v10.2d\n"
"zip2 v17.2d, v11.2d, v11.2d\n"
"zip1 v11.2d, v11.2d, v11.2d\n"
- "add x10, x10, #0x60\n"
"zip2 v18.2d, v12.2d, v12.2d\n"
"zip1 v12.2d, v12.2d, v12.2d\n"
"zip2 v19.2d, v13.2d, v13.2d\n"
@@ -516,136 +516,136 @@ void a64_hybrid_fp32bf16fp32_mmla_4x24 (
"b 62f\n"
"46:" // Height 2: no bias
"tbz %x[flags], #0, 61f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"cmp x9, #0x18\n"
- "add x23, x27, x20, LSL #2\n"
+ "add x22, x26, x19, LSL #2\n"
"bge 59f\n"
"tbz x9, #4, 50f\n"
- "ld1 { v9.4s }, [x27], #0x10\n"
- "ld1 { v14.4s }, [x23], #0x10\n"
- "ld1 { v10.4s }, [x27], #0x10\n"
- "ld1 { v15.4s }, [x23], #0x10\n"
- "ld1 { v11.4s }, [x27], #0x10\n"
- "ld1 { v16.4s }, [x23], #0x10\n"
- "ld1 { v12.4s }, [x27], #0x10\n"
- "ld1 { v17.4s }, [x23], #0x10\n"
+ "ld1 { v9.4s }, [x26], #0x10\n"
+ "ld1 { v14.4s }, [x22], #0x10\n"
+ "ld1 { v10.4s }, [x26], #0x10\n"
+ "ld1 { v15.4s }, [x22], #0x10\n"
+ "ld1 { v11.4s }, [x26], #0x10\n"
+ "ld1 { v16.4s }, [x22], #0x10\n"
+ "ld1 { v12.4s }, [x26], #0x10\n"
+ "ld1 { v17.4s }, [x22], #0x10\n"
"tbz x9, #2, 48f\n"
- "ld1 { v13.4s }, [x27], #0x10\n"
- "ld1 { v18.4s }, [x23], #0x10\n"
+ "ld1 { v13.4s }, [x26], #0x10\n"
+ "ld1 { v18.4s }, [x22], #0x10\n"
"tbz x9, #1, 47f\n"
- "ldr d20, [x27], #0x8\n"
- "ldr d19, [x23], #0x8\n"
- "mov x20, #0x58\n"
+ "mov x19, #0x58\n"
+ "ldr d20, [x26], #0x8\n"
+ "ldr d19, [x22], #0x8\n"
"tbz x9, #0, 58f\n"
- "ld1 { v20.s }[2], [x27]\n"
- "ld1 { v19.s }[2], [x23]\n"
+ "ld1 { v20.s }[2], [x26]\n"
+ "ld1 { v19.s }[2], [x22]\n"
"b 58f\n"
"47:" // Height 2: Partial accumulate: partial_1_20
- "mov x20, #0x50\n"
+ "mov x19, #0x50\n"
"tbz x9, #0, 58f\n"
- "ldr s20, [x27, #0x0]\n"
- "ldr s19, [x23, #0x0]\n"
+ "ldr s20, [x26, #0x0]\n"
+ "ldr s19, [x22, #0x0]\n"
"b 58f\n"
"48:" // Height 2: Partial accumulate: partial_2_16
"tbz x9, #1, 49f\n"
- "ldr d13, [x27], #0x8\n"
- "ldr d18, [x23], #0x8\n"
- "mov x20, #0x48\n"
+ "ldr d13, [x26], #0x8\n"
+ "ldr d18, [x22], #0x8\n"
+ "mov x19, #0x48\n"
"tbz x9, #0, 58f\n"
- "ld1 { v13.s }[2], [x27]\n"
- "ld1 { v18.s }[2], [x23]\n"
+ "ld1 { v13.s }[2], [x26]\n"
+ "ld1 { v18.s }[2], [x22]\n"
"b 58f\n"
"49:" // Height 2: Partial accumulate: partial_1_16
- "mov x20, #0x40\n"
+ "mov x19, #0x40\n"
"tbz x9, #0, 58f\n"
- "ldr s13, [x27, #0x0]\n"
- "ldr s18, [x23, #0x0]\n"
+ "ldr s13, [x26, #0x0]\n"
+ "ldr s18, [x22, #0x0]\n"
"b 58f\n"
"50:" // Height 2: Partial accumulate: partial_8_0
"tbz x9, #3, 54f\n"
- "ld1 { v9.4s }, [x27], #0x10\n"
- "ld1 { v14.4s }, [x23], #0x10\n"
- "ld1 { v10.4s }, [x27], #0x10\n"
- "ld1 { v15.4s }, [x23], #0x10\n"
+ "ld1 { v9.4s }, [x26], #0x10\n"
+ "ld1 { v14.4s }, [x22], #0x10\n"
+ "ld1 { v10.4s }, [x26], #0x10\n"
+ "ld1 { v15.4s }, [x22], #0x10\n"
"tbz x9, #2, 52f\n"
- "ld1 { v11.4s }, [x27], #0x10\n"
- "ld1 { v16.4s }, [x23], #0x10\n"
+ "ld1 { v11.4s }, [x26], #0x10\n"
+ "ld1 { v16.4s }, [x22], #0x10\n"
"tbz x9, #1, 51f\n"
- "ldr d12, [x27], #0x8\n"
- "ldr d17, [x23], #0x8\n"
- "mov x20, #0x38\n"
+ "mov x19, #0x38\n"
+ "ldr d12, [x26], #0x8\n"
+ "ldr d17, [x22], #0x8\n"
"tbz x9, #0, 58f\n"
- "ld1 { v12.s }[2], [x27]\n"
- "ld1 { v17.s }[2], [x23]\n"
+ "ld1 { v12.s }[2], [x26]\n"
+ "ld1 { v17.s }[2], [x22]\n"
"b 58f\n"
"51:" // Height 2: Partial accumulate: partial_1_12
- "mov x20, #0x30\n"
+ "mov x19, #0x30\n"
"tbz x9, #0, 58f\n"
- "ldr s12, [x27, #0x0]\n"
- "ldr s17, [x23, #0x0]\n"
+ "ldr s12, [x26, #0x0]\n"
+ "ldr s17, [x22, #0x0]\n"
"b 58f\n"
"52:" // Height 2: Partial accumulate: partial_2_8
"tbz x9, #1, 53f\n"
- "ldr d11, [x27], #0x8\n"
- "ldr d16, [x23], #0x8\n"
- "mov x20, #0x28\n"
+ "ldr d11, [x26], #0x8\n"
+ "ldr d16, [x22], #0x8\n"
+ "mov x19, #0x28\n"
"tbz x9, #0, 58f\n"
- "ld1 { v11.s }[2], [x27]\n"
- "ld1 { v16.s }[2], [x23]\n"
+ "ld1 { v11.s }[2], [x26]\n"
+ "ld1 { v16.s }[2], [x22]\n"
"b 58f\n"
"53:" // Height 2: Partial accumulate: partial_1_8
- "mov x20, #0x20\n"
+ "mov x19, #0x20\n"
"tbz x9, #0, 58f\n"
- "ldr s11, [x27, #0x0]\n"
- "ldr s16, [x23, #0x0]\n"
+ "ldr s11, [x26, #0x0]\n"
+ "ldr s16, [x22, #0x0]\n"
"b 58f\n"
"54:" // Height 2: Partial accumulate: partial_4_0
"tbz x9, #2, 56f\n"
- "ld1 { v9.4s }, [x27], #0x10\n"
- "ld1 { v14.4s }, [x23], #0x10\n"
+ "ld1 { v9.4s }, [x26], #0x10\n"
+ "ld1 { v14.4s }, [x22], #0x10\n"
"tbz x9, #1, 55f\n"
- "ldr d10, [x27], #0x8\n"
- "ldr d15, [x23], #0x8\n"
- "mov x20, #0x18\n"
+ "mov x19, #0x18\n"
+ "ldr d10, [x26], #0x8\n"
+ "ldr d15, [x22], #0x8\n"
"tbz x9, #0, 58f\n"
- "ld1 { v10.s }[2], [x27]\n"
- "ld1 { v15.s }[2], [x23]\n"
+ "ld1 { v10.s }[2], [x26]\n"
+ "ld1 { v15.s }[2], [x22]\n"
"b 58f\n"
"55:" // Height 2: Partial accumulate: partial_1_4
- "mov x20, #0x10\n"
+ "mov x19, #0x10\n"
"tbz x9, #0, 58f\n"
- "ldr s10, [x27, #0x0]\n"
- "ldr s15, [x23, #0x0]\n"
+ "ldr s10, [x26, #0x0]\n"
+ "ldr s15, [x22, #0x0]\n"
"b 58f\n"
"56:" // Height 2: Partial accumulate: partial_2_0
"tbz x9, #1, 57f\n"
- "ldr d9, [x27], #0x8\n"
- "ldr d14, [x23], #0x8\n"
- "mov x20, #0x8\n"
+ "ldr d9, [x26], #0x8\n"
+ "ldr d14, [x22], #0x8\n"
+ "mov x19, #0x8\n"
"tbz x9, #0, 58f\n"
- "ld1 { v9.s }[2], [x27]\n"
- "ld1 { v14.s }[2], [x23]\n"
+ "ld1 { v9.s }[2], [x26]\n"
+ "ld1 { v14.s }[2], [x22]\n"
"b 58f\n"
"57:" // Height 2: Partial accumulate: partial_1_0
- "ldr s9, [x27, #0x0]\n"
- "ldr s14, [x23, #0x0]\n"
- "mov x20, #0x0\n"
+ "ldr s9, [x26, #0x0]\n"
+ "mov x19, #0x0\n"
+ "ldr s14, [x22, #0x0]\n"
"58:" // Height 2: Partial accumulate: Done
- "sub x27, x27, x20\n"
+ "sub x26, x26, x19\n"
"b 60f\n"
"59:" // Height 2: full accumulate
- "ldr q9, [x27, #0x0]\n"
- "ldr q10, [x27, #0x10]\n"
- "ldr q11, [x27, #0x20]\n"
- "ldr q12, [x27, #0x30]\n"
- "ldr q13, [x27, #0x40]\n"
- "ldr q20, [x27, #0x50]\n"
- "ldr q14, [x23, #0x0]\n"
- "ldr q15, [x23, #0x10]\n"
- "ldr q16, [x23, #0x20]\n"
- "ldr q17, [x23, #0x30]\n"
- "ldr q18, [x23, #0x40]\n"
- "ldr q19, [x23, #0x50]\n"
+ "ldr q9, [x26, #0x0]\n"
+ "ldr q10, [x26, #0x10]\n"
+ "ldr q11, [x26, #0x20]\n"
+ "ldr q12, [x26, #0x30]\n"
+ "ldr q13, [x26, #0x40]\n"
+ "ldr q20, [x26, #0x50]\n"
+ "ldr q14, [x22, #0x0]\n"
+ "ldr q15, [x22, #0x10]\n"
+ "ldr q16, [x22, #0x20]\n"
+ "ldr q17, [x22, #0x30]\n"
+ "ldr q18, [x22, #0x40]\n"
+ "ldr q19, [x22, #0x50]\n"
"60:" // Height 2: MMLA fixup
"zip1 v8.2d, v9.2d, v14.2d\n"
"zip2 v14.2d, v9.2d, v14.2d\n"
@@ -674,151 +674,151 @@ void a64_hybrid_fp32bf16fp32_mmla_4x24 (
"movi v18.16b, #0x0\n"
"movi v19.16b, #0x0\n"
"62:" // Height 2: setup done
- "mov x26, #0x0\n"
+ "mov x25, #0x0\n"
"63:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w24, [x20, x25, LSL #0x2]\n"
"tbz %x[flags], #3, 64f\n"
- "ldr x21, [%x[input_ptr], x26, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x24, [x21, #0x0]\n"
- "ldr x23, [x21, #0x8]\n"
- "cbnz x26, 65f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x24, x24, x20, LSL #2\n"
- "add x23, x23, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x25, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x23, [x20, #0x0]\n"
+ "ldr x22, [x20, #0x8]\n"
+ "cbnz x25, 65f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x23, x23, x19, LSL #2\n"
+ "add x22, x22, x19, LSL #2\n"
"b 65f\n"
"64:" // Height 2: setup direct input
- "mov x24, %x[input_ptr]\n"
- "add x23, x24, x20, LSL #2\n"
+ "mov x23, %x[input_ptr]\n"
+ "add x22, x23, x19, LSL #2\n"
"65:" // Height 2: input setup done
- "cmp x25, #0x4\n"
+ "cmp x24, #0x4\n"
"blt 68f\n"
- "ld1 { v0.4s }, [x24], #0x10\n"
- "ld1 { v1.4s }, [x23], #0x10\n"
- "cmp x25, #0x8\n"
- "ldr q4, [x28, #0x0]\n"
- "ldr q5, [x28, #0x10]\n"
- "ldr q6, [x28, #0x20]\n"
- "ldr q7, [x28, #0x30]\n"
+ "ld1 { v0.4s }, [x23], #0x10\n"
+ "cmp x24, #0x8\n"
"blt 67f\n"
"66:" // Height 2: Multiply loop: Main loop head
".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n"
+ "ld1 { v1.4s }, [x22], #0x10\n"
+ "sub x24, x24, #0x4\n"
".inst 0x4ea16820 // bfcvtn2 v0.8h, v1.4s\n"
+ "ldr q4, [x28, #0x0]\n"
+ "cmp x24, #0x8\n"
".inst 0x6e44ec08 // bfmmla v8.4s, v0.8h, v4.8h\n"
- "ldr q4, [x28, #0x40]\n"
+ "ldr q5, [x28, #0x10]\n"
+ "ldr q6, [x28, #0x20]\n"
".inst 0x6e45ec0e // bfmmla v14.4s, v0.8h, v5.8h\n"
- "ldr q5, [x28, #0x50]\n"
+ "ldr q7, [x28, #0x30]\n"
".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n"
- "ldr q6, [x28, #0x60]\n"
+ "ldr q4, [x28, #0x40]\n"
+ "ldr q5, [x28, #0x50]\n"
".inst 0x6e47ec0f // bfmmla v15.4s, v0.8h, v7.8h\n"
+ "ldr q6, [x28, #0x60]\n"
"ldr q7, [x28, #0x70]\n"
".inst 0x6e44ec0a // bfmmla v10.4s, v0.8h, v4.8h\n"
- "ldr q4, [x28, #0x80]\n"
".inst 0x6e45ec10 // bfmmla v16.4s, v0.8h, v5.8h\n"
+ "ldr q4, [x28, #0x80]\n"
"ldr q5, [x28, #0x90]\n"
".inst 0x6e46ec0b // bfmmla v11.4s, v0.8h, v6.8h\n"
"ldr q6, [x28, #0xa0]\n"
".inst 0x6e47ec11 // bfmmla v17.4s, v0.8h, v7.8h\n"
"ldr q7, [x28, #0xb0]\n"
- "sub x25, x25, #0x4\n"
- "cmp x25, #0x8\n"
"add x28, x28, #0xc0\n"
".inst 0x6e44ec0c // bfmmla v12.4s, v0.8h, v4.8h\n"
- "ldr q4, [x28, #0x0]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x6e45ec12 // bfmmla v18.4s, v0.8h, v5.8h\n"
- "ldr q5, [x28, #0x10]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x6e46ec0d // bfmmla v13.4s, v0.8h, v6.8h\n"
- "ldr q6, [x28, #0x20]\n"
".inst 0x6e47ec13 // bfmmla v19.4s, v0.8h, v7.8h\n"
- "ldr q7, [x28, #0x30]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
- "ld1 { v0.4s }, [x24], #0x10\n"
- "prfm pldl1keep, [x23, #0x80]\n"
- "ld1 { v1.4s }, [x23], #0x10\n"
+ "ld1 { v0.4s }, [x23], #0x10\n"
"bge 66b\n"
"67:" // Height 2: Multiply loop: Single iteration only
".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n"
+ "ld1 { v1.4s }, [x22], #0x10\n"
+ "sub x24, x24, #0x4\n"
".inst 0x4ea16820 // bfcvtn2 v0.8h, v1.4s\n"
+ "ldr q4, [x28, #0x0]\n"
+ "ldr q5, [x28, #0x10]\n"
".inst 0x6e44ec08 // bfmmla v8.4s, v0.8h, v4.8h\n"
- "ldr q4, [x28, #0x40]\n"
+ "ldr q6, [x28, #0x20]\n"
".inst 0x6e45ec0e // bfmmla v14.4s, v0.8h, v5.8h\n"
- "ldr q5, [x28, #0x50]\n"
+ "ldr q7, [x28, #0x30]\n"
+ "ldr q4, [x28, #0x40]\n"
".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n"
+ "ldr q5, [x28, #0x50]\n"
"ldr q6, [x28, #0x60]\n"
".inst 0x6e47ec0f // bfmmla v15.4s, v0.8h, v7.8h\n"
- "ldr q7, [x28, #0x70]\n"
".inst 0x6e44ec0a // bfmmla v10.4s, v0.8h, v4.8h\n"
+ "ldr q7, [x28, #0x70]\n"
"ldr q4, [x28, #0x80]\n"
".inst 0x6e45ec10 // bfmmla v16.4s, v0.8h, v5.8h\n"
"ldr q5, [x28, #0x90]\n"
".inst 0x6e46ec0b // bfmmla v11.4s, v0.8h, v6.8h\n"
"ldr q6, [x28, #0xa0]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x6e47ec11 // bfmmla v17.4s, v0.8h, v7.8h\n"
- "ldr q7, [x28, #0xb0]\n"
- "sub x25, x25, #0x4\n"
".inst 0x6e44ec0c // bfmmla v12.4s, v0.8h, v4.8h\n"
+ "ldr q7, [x28, #0xb0]\n"
+ "add x28, x28, #0xc0\n"
".inst 0x6e45ec12 // bfmmla v18.4s, v0.8h, v5.8h\n"
- "prfm pldl1keep, [x24, #0x80]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x6e46ec0d // bfmmla v13.4s, v0.8h, v6.8h\n"
".inst 0x6e47ec13 // bfmmla v19.4s, v0.8h, v7.8h\n"
- "add x28, x28, #0xc0\n"
"68:" // Height 2: Multiply loop: Main loop skip
- "cbz x25, 71f\n"
- "cbz x25, 71f\n"
- "tbz x25, #1, 69f\n"
- "ldr d0, [x24], #0x8\n"
- "ldr d1, [x23], #0x8\n"
- "tbz x25, #0, 70f\n"
- "ld1 { v0.s }[2], [x24]\n"
- "ld1 { v1.s }[2], [x23]\n"
+ "cbz x24, 71f\n"
+ "cbz x24, 71f\n"
+ "tbz x24, #1, 69f\n"
+ "ldr d0, [x23], #0x8\n"
+ "ldr d1, [x22], #0x8\n"
+ "tbz x24, #0, 70f\n"
+ "ld1 { v0.s }[2], [x23]\n"
+ "ld1 { v1.s }[2], [x22]\n"
"b 70f\n"
"69:" // Height 2: Multiply loop: Ragged operand read: partial_1_0
- "ldr s0, [x24, #0x0]\n"
- "ldr s1, [x23, #0x0]\n"
+ "ldr s0, [x23, #0x0]\n"
+ "ldr s1, [x22, #0x0]\n"
"70:" // Height 2: Multiply loop: Ragged operand read: Done
+ ".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n"
"ldr q4, [x28, #0x0]\n"
"ldr q5, [x28, #0x10]\n"
- ".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n"
".inst 0x4ea16820 // bfcvtn2 v0.8h, v1.4s\n"
"ldr q6, [x28, #0x20]\n"
"ldr q7, [x28, #0x30]\n"
".inst 0x6e44ec08 // bfmmla v8.4s, v0.8h, v4.8h\n"
- ".inst 0x6e45ec0e // bfmmla v14.4s, v0.8h, v5.8h\n"
"ldr q4, [x28, #0x40]\n"
+ ".inst 0x6e45ec0e // bfmmla v14.4s, v0.8h, v5.8h\n"
"ldr q5, [x28, #0x50]\n"
".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n"
- ".inst 0x6e47ec0f // bfmmla v15.4s, v0.8h, v7.8h\n"
"ldr q6, [x28, #0x60]\n"
+ ".inst 0x6e47ec0f // bfmmla v15.4s, v0.8h, v7.8h\n"
"ldr q7, [x28, #0x70]\n"
".inst 0x6e44ec0a // bfmmla v10.4s, v0.8h, v4.8h\n"
- ".inst 0x6e45ec10 // bfmmla v16.4s, v0.8h, v5.8h\n"
"ldr q4, [x28, #0x80]\n"
+ ".inst 0x6e45ec10 // bfmmla v16.4s, v0.8h, v5.8h\n"
"ldr q5, [x28, #0x90]\n"
".inst 0x6e46ec0b // bfmmla v11.4s, v0.8h, v6.8h\n"
- ".inst 0x6e47ec11 // bfmmla v17.4s, v0.8h, v7.8h\n"
"ldr q6, [x28, #0xa0]\n"
+ ".inst 0x6e47ec11 // bfmmla v17.4s, v0.8h, v7.8h\n"
"ldr q7, [x28, #0xb0]\n"
+ "add x28, x28, #0xc0\n"
".inst 0x6e44ec0c // bfmmla v12.4s, v0.8h, v4.8h\n"
".inst 0x6e45ec12 // bfmmla v18.4s, v0.8h, v5.8h\n"
".inst 0x6e46ec0d // bfmmla v13.4s, v0.8h, v6.8h\n"
".inst 0x6e47ec13 // bfmmla v19.4s, v0.8h, v7.8h\n"
- "add x28, x28, #0xc0\n"
"71:" // Height 2: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x26, x26, #0x1\n"
- "cmp x26, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x25, x25, #0x1\n"
+ "cmp x25, x19\n"
"bne 63b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x27, x20, LSL #2\n"
"uzp1 v4.2d, v8.2d, v14.2d\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp2 v8.2d, v8.2d, v14.2d\n"
+ "prfm pstl1keep, [x26, #0x0]\n"
+ "add x22, x26, x19, LSL #2\n"
"uzp1 v14.2d, v9.2d, v15.2d\n"
+ "prfm pstl1keep, [x22, #0x0]\n"
"uzp2 v9.2d, v9.2d, v15.2d\n"
- "prfm pstl1keep, [x27, #0x0]\n"
- "prfm pstl1keep, [x23, #0x0]\n"
"uzp1 v15.2d, v10.2d, v16.2d\n"
"uzp2 v10.2d, v10.2d, v16.2d\n"
"uzp1 v16.2d, v11.2d, v17.2d\n"
@@ -828,183 +828,183 @@ void a64_hybrid_fp32bf16fp32_mmla_4x24 (
"uzp1 v18.2d, v13.2d, v19.2d\n"
"uzp2 v13.2d, v13.2d, v19.2d\n"
"tbz %x[flags], #1, 72f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v1.4s }, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1r { v0.4s }, [x20]\n"
- "fmin v4.4s, v4.4s, v1.4s\n"
- "fmin v14.4s, v14.4s, v1.4s\n"
- "fmin v15.4s, v15.4s, v1.4s\n"
- "fmin v16.4s, v16.4s, v1.4s\n"
- "fmin v17.4s, v17.4s, v1.4s\n"
- "fmin v18.4s, v18.4s, v1.4s\n"
- "fmin v8.4s, v8.4s, v1.4s\n"
- "fmin v9.4s, v9.4s, v1.4s\n"
- "fmin v10.4s, v10.4s, v1.4s\n"
- "fmin v11.4s, v11.4s, v1.4s\n"
- "fmin v12.4s, v12.4s, v1.4s\n"
- "fmin v13.4s, v13.4s, v1.4s\n"
- "fmax v4.4s, v4.4s, v0.4s\n"
- "fmax v14.4s, v14.4s, v0.4s\n"
- "fmax v15.4s, v15.4s, v0.4s\n"
- "fmax v16.4s, v16.4s, v0.4s\n"
- "fmax v17.4s, v17.4s, v0.4s\n"
- "fmax v18.4s, v18.4s, v0.4s\n"
- "fmax v8.4s, v8.4s, v0.4s\n"
- "fmax v9.4s, v9.4s, v0.4s\n"
- "fmax v10.4s, v10.4s, v0.4s\n"
- "fmax v11.4s, v11.4s, v0.4s\n"
- "fmax v12.4s, v12.4s, v0.4s\n"
- "fmax v13.4s, v13.4s, v0.4s\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v1.4s }, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v0.4s }, [x19]\n"
+ "fmin v4.4s, v4.4s, v0.4s\n"
+ "fmin v14.4s, v14.4s, v0.4s\n"
+ "fmin v15.4s, v15.4s, v0.4s\n"
+ "fmin v16.4s, v16.4s, v0.4s\n"
+ "fmax v4.4s, v4.4s, v1.4s\n"
+ "fmax v14.4s, v14.4s, v1.4s\n"
+ "fmax v15.4s, v15.4s, v1.4s\n"
+ "fmax v16.4s, v16.4s, v1.4s\n"
+ "fmin v17.4s, v17.4s, v0.4s\n"
+ "fmin v18.4s, v18.4s, v0.4s\n"
+ "fmin v8.4s, v8.4s, v0.4s\n"
+ "fmax v17.4s, v17.4s, v1.4s\n"
+ "fmax v18.4s, v18.4s, v1.4s\n"
+ "fmax v8.4s, v8.4s, v1.4s\n"
+ "fmin v9.4s, v9.4s, v0.4s\n"
+ "fmin v10.4s, v10.4s, v0.4s\n"
+ "fmin v11.4s, v11.4s, v0.4s\n"
+ "fmax v9.4s, v9.4s, v1.4s\n"
+ "fmax v10.4s, v10.4s, v1.4s\n"
+ "fmax v11.4s, v11.4s, v1.4s\n"
+ "fmin v12.4s, v12.4s, v0.4s\n"
+ "fmin v13.4s, v13.4s, v0.4s\n"
+ "fmax v12.4s, v12.4s, v1.4s\n"
+ "fmax v13.4s, v13.4s, v1.4s\n"
"72:" // Height 2: No activation
"cmp x9, #0x18\n"
"bge 85f\n"
"tbz x9, #4, 76f\n"
- "st1 { v4.4s }, [x27], #0x10\n"
- "st1 { v14.4s }, [x27], #0x10\n"
- "st1 { v15.4s }, [x27], #0x10\n"
- "st1 { v16.4s }, [x27], #0x10\n"
- "st1 { v8.4s }, [x23], #0x10\n"
- "st1 { v9.4s }, [x23], #0x10\n"
- "st1 { v10.4s }, [x23], #0x10\n"
- "st1 { v11.4s }, [x23], #0x10\n"
+ "st1 { v4.4s }, [x26], #0x10\n"
+ "st1 { v14.4s }, [x26], #0x10\n"
+ "st1 { v15.4s }, [x26], #0x10\n"
+ "st1 { v16.4s }, [x26], #0x10\n"
+ "st1 { v8.4s }, [x22], #0x10\n"
+ "st1 { v9.4s }, [x22], #0x10\n"
+ "st1 { v10.4s }, [x22], #0x10\n"
+ "st1 { v11.4s }, [x22], #0x10\n"
"tbz x9, #2, 74f\n"
- "st1 { v17.4s }, [x27], #0x10\n"
- "st1 { v12.4s }, [x23], #0x10\n"
+ "st1 { v17.4s }, [x26], #0x10\n"
+ "st1 { v12.4s }, [x22], #0x10\n"
"tbz x9, #1, 73f\n"
- "str d18, [x27], #0x8\n"
- "str d13, [x23], #0x8\n"
+ "str d18, [x26], #0x8\n"
+ "str d13, [x22], #0x8\n"
"tbz x9, #0, 84f\n"
- "st1 { v18.s }[2], [x27]\n"
- "st1 { v13.s }[2], [x23]\n"
+ "st1 { v18.s }[2], [x26]\n"
+ "st1 { v13.s }[2], [x22]\n"
"b 84f\n"
"73:" // Height 2: Partial direct writeback: partial_1_20
"tbz x9, #0, 84f\n"
- "str s18, [x27, #0x0]\n"
- "str s13, [x23, #0x0]\n"
+ "str s18, [x26, #0x0]\n"
+ "str s13, [x22, #0x0]\n"
"b 84f\n"
"74:" // Height 2: Partial direct writeback: partial_2_16
"tbz x9, #1, 75f\n"
- "str d17, [x27], #0x8\n"
- "str d12, [x23], #0x8\n"
+ "str d17, [x26], #0x8\n"
+ "str d12, [x22], #0x8\n"
"tbz x9, #0, 84f\n"
- "st1 { v17.s }[2], [x27]\n"
- "st1 { v12.s }[2], [x23]\n"
+ "st1 { v17.s }[2], [x26]\n"
+ "st1 { v12.s }[2], [x22]\n"
"b 84f\n"
"75:" // Height 2: Partial direct writeback: partial_1_16
"tbz x9, #0, 84f\n"
- "str s17, [x27, #0x0]\n"
- "str s12, [x23, #0x0]\n"
+ "str s17, [x26, #0x0]\n"
+ "str s12, [x22, #0x0]\n"
"b 84f\n"
"76:" // Height 2: Partial direct writeback: partial_8_0
"tbz x9, #3, 80f\n"
- "st1 { v4.4s }, [x27], #0x10\n"
- "st1 { v14.4s }, [x27], #0x10\n"
- "st1 { v8.4s }, [x23], #0x10\n"
- "st1 { v9.4s }, [x23], #0x10\n"
+ "st1 { v4.4s }, [x26], #0x10\n"
+ "st1 { v14.4s }, [x26], #0x10\n"
+ "st1 { v8.4s }, [x22], #0x10\n"
+ "st1 { v9.4s }, [x22], #0x10\n"
"tbz x9, #2, 78f\n"
- "st1 { v15.4s }, [x27], #0x10\n"
- "st1 { v10.4s }, [x23], #0x10\n"
+ "st1 { v15.4s }, [x26], #0x10\n"
+ "st1 { v10.4s }, [x22], #0x10\n"
"tbz x9, #1, 77f\n"
- "str d16, [x27], #0x8\n"
- "str d11, [x23], #0x8\n"
+ "str d16, [x26], #0x8\n"
+ "str d11, [x22], #0x8\n"
"tbz x9, #0, 84f\n"
- "st1 { v16.s }[2], [x27]\n"
- "st1 { v11.s }[2], [x23]\n"
+ "st1 { v16.s }[2], [x26]\n"
+ "st1 { v11.s }[2], [x22]\n"
"b 84f\n"
"77:" // Height 2: Partial direct writeback: partial_1_12
"tbz x9, #0, 84f\n"
- "str s16, [x27, #0x0]\n"
- "str s11, [x23, #0x0]\n"
+ "str s16, [x26, #0x0]\n"
+ "str s11, [x22, #0x0]\n"
"b 84f\n"
"78:" // Height 2: Partial direct writeback: partial_2_8
"tbz x9, #1, 79f\n"
- "str d15, [x27], #0x8\n"
- "str d10, [x23], #0x8\n"
+ "str d15, [x26], #0x8\n"
+ "str d10, [x22], #0x8\n"
"tbz x9, #0, 84f\n"
- "st1 { v15.s }[2], [x27]\n"
- "st1 { v10.s }[2], [x23]\n"
+ "st1 { v15.s }[2], [x26]\n"
+ "st1 { v10.s }[2], [x22]\n"
"b 84f\n"
"79:" // Height 2: Partial direct writeback: partial_1_8
"tbz x9, #0, 84f\n"
- "str s15, [x27, #0x0]\n"
- "str s10, [x23, #0x0]\n"
+ "str s15, [x26, #0x0]\n"
+ "str s10, [x22, #0x0]\n"
"b 84f\n"
"80:" // Height 2: Partial direct writeback: partial_4_0
"tbz x9, #2, 82f\n"
- "st1 { v4.4s }, [x27], #0x10\n"
- "st1 { v8.4s }, [x23], #0x10\n"
+ "st1 { v4.4s }, [x26], #0x10\n"
+ "st1 { v8.4s }, [x22], #0x10\n"
"tbz x9, #1, 81f\n"
- "str d14, [x27], #0x8\n"
- "str d9, [x23], #0x8\n"
+ "str d14, [x26], #0x8\n"
+ "str d9, [x22], #0x8\n"
"tbz x9, #0, 84f\n"
- "st1 { v14.s }[2], [x27]\n"
- "st1 { v9.s }[2], [x23]\n"
+ "st1 { v14.s }[2], [x26]\n"
+ "st1 { v9.s }[2], [x22]\n"
"b 84f\n"
"81:" // Height 2: Partial direct writeback: partial_1_4
"tbz x9, #0, 84f\n"
- "str s14, [x27, #0x0]\n"
- "str s9, [x23, #0x0]\n"
+ "str s14, [x26, #0x0]\n"
+ "str s9, [x22, #0x0]\n"
"b 84f\n"
"82:" // Height 2: Partial direct writeback: partial_2_0
"tbz x9, #1, 83f\n"
- "str d4, [x27], #0x8\n"
- "str d8, [x23], #0x8\n"
+ "str d4, [x26], #0x8\n"
+ "str d8, [x22], #0x8\n"
"tbz x9, #0, 84f\n"
- "st1 { v4.s }[2], [x27]\n"
- "st1 { v8.s }[2], [x23]\n"
+ "st1 { v4.s }[2], [x26]\n"
+ "st1 { v8.s }[2], [x22]\n"
"b 84f\n"
"83:" // Height 2: Partial direct writeback: partial_1_0
- "str s4, [x27, #0x0]\n"
- "str s8, [x23, #0x0]\n"
+ "str s4, [x26, #0x0]\n"
+ "str s8, [x22, #0x0]\n"
"84:" // Height 2: Partial direct writeback: Done
"b 86f\n"
"85:" // Height 2: Full writeback
- "str q4, [x27, #0x0]\n"
- "str q14, [x27, #0x10]\n"
- "str q15, [x27, #0x20]\n"
- "str q16, [x27, #0x30]\n"
- "str q17, [x27, #0x40]\n"
- "str q18, [x27, #0x50]\n"
- "add x27, x27, #0x60\n"
- "str q8, [x23, #0x0]\n"
- "str q9, [x23, #0x10]\n"
- "str q10, [x23, #0x20]\n"
- "str q11, [x23, #0x30]\n"
- "str q12, [x23, #0x40]\n"
- "str q13, [x23, #0x50]\n"
+ "str q4, [x26, #0x0]\n"
+ "str q14, [x26, #0x10]\n"
+ "str q15, [x26, #0x20]\n"
+ "str q16, [x26, #0x30]\n"
+ "str q17, [x26, #0x40]\n"
+ "str q18, [x26, #0x50]\n"
+ "add x26, x26, #0x60\n"
+ "str q8, [x22, #0x0]\n"
+ "str q9, [x22, #0x10]\n"
+ "str q10, [x22, #0x20]\n"
+ "str q11, [x22, #0x30]\n"
+ "str q12, [x22, #0x40]\n"
+ "str q13, [x22, #0x50]\n"
"86:" // Height 2: Writeback done
"subs x9, x9, #0x18\n"
"bgt 45b\n"
"b 174f\n"
"87:" // Height 3
- "mov x10, %x[bias]\n"
"ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x27, %x[bias]\n"
"ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x27, %x[output_ptr]\n"
+ "mov x26, %x[output_ptr]\n"
"88:" // Height 3: Column loop
- "cbz x10, 89f\n"
- "ldr q8, [x10, #0x0]\n"
- "ldr q9, [x10, #0x10]\n"
+ "cbz x27, 89f\n"
+ "ldr q8, [x27, #0x0]\n"
"zip2 v14.2d, v8.2d, v8.2d\n"
+ "ldr q9, [x27, #0x10]\n"
"zip1 v8.2d, v8.2d, v8.2d\n"
- "ldr q10, [x10, #0x20]\n"
- "ldr q11, [x10, #0x30]\n"
+ "ldr q10, [x27, #0x20]\n"
+ "mov v20.16b, v8.16b\n"
+ "ldr q11, [x27, #0x30]\n"
+ "mov v26.16b, v14.16b\n"
+ "ldr q12, [x27, #0x40]\n"
+ "ldr q13, [x27, #0x50]\n"
"zip2 v15.2d, v9.2d, v9.2d\n"
+ "add x27, x27, #0x60\n"
"zip1 v9.2d, v9.2d, v9.2d\n"
- "ldr q12, [x10, #0x40]\n"
- "ldr q13, [x10, #0x50]\n"
"zip2 v16.2d, v10.2d, v10.2d\n"
"zip1 v10.2d, v10.2d, v10.2d\n"
"zip2 v17.2d, v11.2d, v11.2d\n"
"zip1 v11.2d, v11.2d, v11.2d\n"
- "add x10, x10, #0x60\n"
"zip2 v18.2d, v12.2d, v12.2d\n"
"zip1 v12.2d, v12.2d, v12.2d\n"
"zip2 v19.2d, v13.2d, v13.2d\n"
"zip1 v13.2d, v13.2d, v13.2d\n"
- "mov v20.16b, v8.16b\n"
- "mov v26.16b, v14.16b\n"
"mov v21.16b, v9.16b\n"
"mov v27.16b, v15.16b\n"
"mov v22.16b, v10.16b\n"
@@ -1018,170 +1018,170 @@ void a64_hybrid_fp32bf16fp32_mmla_4x24 (
"b 105f\n"
"89:" // Height 3: no bias
"tbz %x[flags], #0, 104f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x27, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"cmp x9, #0x18\n"
- "add x22, x23, x20, LSL #2\n"
+ "add x22, x26, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
"bge 102f\n"
"tbz x9, #4, 93f\n"
- "ld1 { v9.4s }, [x27], #0x10\n"
- "ld1 { v14.4s }, [x23], #0x10\n"
- "ld1 { v21.4s }, [x22], #0x10\n"
- "ld1 { v10.4s }, [x27], #0x10\n"
- "ld1 { v15.4s }, [x23], #0x10\n"
- "ld1 { v22.4s }, [x22], #0x10\n"
- "ld1 { v11.4s }, [x27], #0x10\n"
- "ld1 { v16.4s }, [x23], #0x10\n"
- "ld1 { v23.4s }, [x22], #0x10\n"
- "ld1 { v12.4s }, [x27], #0x10\n"
- "ld1 { v17.4s }, [x23], #0x10\n"
- "ld1 { v24.4s }, [x22], #0x10\n"
+ "ld1 { v9.4s }, [x26], #0x10\n"
+ "ld1 { v14.4s }, [x22], #0x10\n"
+ "ld1 { v21.4s }, [x21], #0x10\n"
+ "ld1 { v10.4s }, [x26], #0x10\n"
+ "ld1 { v15.4s }, [x22], #0x10\n"
+ "ld1 { v22.4s }, [x21], #0x10\n"
+ "ld1 { v11.4s }, [x26], #0x10\n"
+ "ld1 { v16.4s }, [x22], #0x10\n"
+ "ld1 { v23.4s }, [x21], #0x10\n"
+ "ld1 { v12.4s }, [x26], #0x10\n"
+ "ld1 { v17.4s }, [x22], #0x10\n"
+ "ld1 { v24.4s }, [x21], #0x10\n"
"tbz x9, #2, 91f\n"
- "ld1 { v13.4s }, [x27], #0x10\n"
- "ld1 { v18.4s }, [x23], #0x10\n"
- "ld1 { v25.4s }, [x22], #0x10\n"
+ "ld1 { v13.4s }, [x26], #0x10\n"
+ "ld1 { v18.4s }, [x22], #0x10\n"
+ "ld1 { v25.4s }, [x21], #0x10\n"
"tbz x9, #1, 90f\n"
- "ldr d20, [x27], #0x8\n"
- "ldr d19, [x23], #0x8\n"
- "mov x20, #0x58\n"
- "ldr d4, [x22], #0x8\n"
+ "mov x19, #0x58\n"
+ "ldr d20, [x26], #0x8\n"
+ "ldr d19, [x22], #0x8\n"
+ "ldr d4, [x21], #0x8\n"
"tbz x9, #0, 101f\n"
- "ld1 { v20.s }[2], [x27]\n"
- "ld1 { v19.s }[2], [x23]\n"
- "ld1 { v4.s }[2], [x22]\n"
+ "ld1 { v20.s }[2], [x26]\n"
+ "ld1 { v19.s }[2], [x22]\n"
+ "ld1 { v4.s }[2], [x21]\n"
"b 101f\n"
"90:" // Height 3: Partial accumulate: partial_1_20
- "mov x20, #0x50\n"
+ "mov x19, #0x50\n"
"tbz x9, #0, 101f\n"
- "ldr s20, [x27, #0x0]\n"
- "ldr s19, [x23, #0x0]\n"
- "ldr s4, [x22, #0x0]\n"
+ "ldr s20, [x26, #0x0]\n"
+ "ldr s19, [x22, #0x0]\n"
+ "ldr s4, [x21, #0x0]\n"
"b 101f\n"
"91:" // Height 3: Partial accumulate: partial_2_16
"tbz x9, #1, 92f\n"
- "ldr d13, [x27], #0x8\n"
- "ldr d18, [x23], #0x8\n"
- "mov x20, #0x48\n"
- "ldr d25, [x22], #0x8\n"
+ "ldr d13, [x26], #0x8\n"
+ "ldr d18, [x22], #0x8\n"
+ "mov x19, #0x48\n"
+ "ldr d25, [x21], #0x8\n"
"tbz x9, #0, 101f\n"
- "ld1 { v13.s }[2], [x27]\n"
- "ld1 { v18.s }[2], [x23]\n"
- "ld1 { v25.s }[2], [x22]\n"
+ "ld1 { v13.s }[2], [x26]\n"
+ "ld1 { v18.s }[2], [x22]\n"
+ "ld1 { v25.s }[2], [x21]\n"
"b 101f\n"
"92:" // Height 3: Partial accumulate: partial_1_16
- "mov x20, #0x40\n"
+ "mov x19, #0x40\n"
"tbz x9, #0, 101f\n"
- "ldr s13, [x27, #0x0]\n"
- "ldr s18, [x23, #0x0]\n"
- "ldr s25, [x22, #0x0]\n"
+ "ldr s13, [x26, #0x0]\n"
+ "ldr s18, [x22, #0x0]\n"
+ "ldr s25, [x21, #0x0]\n"
"b 101f\n"
"93:" // Height 3: Partial accumulate: partial_8_0
"tbz x9, #3, 97f\n"
- "ld1 { v9.4s }, [x27], #0x10\n"
- "ld1 { v14.4s }, [x23], #0x10\n"
- "ld1 { v21.4s }, [x22], #0x10\n"
- "ld1 { v10.4s }, [x27], #0x10\n"
- "ld1 { v15.4s }, [x23], #0x10\n"
- "ld1 { v22.4s }, [x22], #0x10\n"
+ "ld1 { v9.4s }, [x26], #0x10\n"
+ "ld1 { v14.4s }, [x22], #0x10\n"
+ "ld1 { v21.4s }, [x21], #0x10\n"
+ "ld1 { v10.4s }, [x26], #0x10\n"
+ "ld1 { v15.4s }, [x22], #0x10\n"
+ "ld1 { v22.4s }, [x21], #0x10\n"
"tbz x9, #2, 95f\n"
- "ld1 { v11.4s }, [x27], #0x10\n"
- "ld1 { v16.4s }, [x23], #0x10\n"
- "ld1 { v23.4s }, [x22], #0x10\n"
+ "ld1 { v11.4s }, [x26], #0x10\n"
+ "ld1 { v16.4s }, [x22], #0x10\n"
+ "ld1 { v23.4s }, [x21], #0x10\n"
"tbz x9, #1, 94f\n"
- "ldr d12, [x27], #0x8\n"
- "ldr d17, [x23], #0x8\n"
- "mov x20, #0x38\n"
- "ldr d24, [x22], #0x8\n"
+ "mov x19, #0x38\n"
+ "ldr d12, [x26], #0x8\n"
+ "ldr d17, [x22], #0x8\n"
+ "ldr d24, [x21], #0x8\n"
"tbz x9, #0, 101f\n"
- "ld1 { v12.s }[2], [x27]\n"
- "ld1 { v17.s }[2], [x23]\n"
- "ld1 { v24.s }[2], [x22]\n"
+ "ld1 { v12.s }[2], [x26]\n"
+ "ld1 { v17.s }[2], [x22]\n"
+ "ld1 { v24.s }[2], [x21]\n"
"b 101f\n"
"94:" // Height 3: Partial accumulate: partial_1_12
- "mov x20, #0x30\n"
+ "mov x19, #0x30\n"
"tbz x9, #0, 101f\n"
- "ldr s12, [x27, #0x0]\n"
- "ldr s17, [x23, #0x0]\n"
- "ldr s24, [x22, #0x0]\n"
+ "ldr s12, [x26, #0x0]\n"
+ "ldr s17, [x22, #0x0]\n"
+ "ldr s24, [x21, #0x0]\n"
"b 101f\n"
"95:" // Height 3: Partial accumulate: partial_2_8
"tbz x9, #1, 96f\n"
- "ldr d11, [x27], #0x8\n"
- "ldr d16, [x23], #0x8\n"
- "mov x20, #0x28\n"
- "ldr d23, [x22], #0x8\n"
+ "ldr d11, [x26], #0x8\n"
+ "ldr d16, [x22], #0x8\n"
+ "mov x19, #0x28\n"
+ "ldr d23, [x21], #0x8\n"
"tbz x9, #0, 101f\n"
- "ld1 { v11.s }[2], [x27]\n"
- "ld1 { v16.s }[2], [x23]\n"
- "ld1 { v23.s }[2], [x22]\n"
+ "ld1 { v11.s }[2], [x26]\n"
+ "ld1 { v16.s }[2], [x22]\n"
+ "ld1 { v23.s }[2], [x21]\n"
"b 101f\n"
"96:" // Height 3: Partial accumulate: partial_1_8
- "mov x20, #0x20\n"
+ "mov x19, #0x20\n"
"tbz x9, #0, 101f\n"
- "ldr s11, [x27, #0x0]\n"
- "ldr s16, [x23, #0x0]\n"
- "ldr s23, [x22, #0x0]\n"
+ "ldr s11, [x26, #0x0]\n"
+ "ldr s16, [x22, #0x0]\n"
+ "ldr s23, [x21, #0x0]\n"
"b 101f\n"
"97:" // Height 3: Partial accumulate: partial_4_0
"tbz x9, #2, 99f\n"
- "ld1 { v9.4s }, [x27], #0x10\n"
- "ld1 { v14.4s }, [x23], #0x10\n"
- "ld1 { v21.4s }, [x22], #0x10\n"
+ "ld1 { v9.4s }, [x26], #0x10\n"
+ "ld1 { v14.4s }, [x22], #0x10\n"
+ "ld1 { v21.4s }, [x21], #0x10\n"
"tbz x9, #1, 98f\n"
- "ldr d10, [x27], #0x8\n"
- "ldr d15, [x23], #0x8\n"
- "mov x20, #0x18\n"
- "ldr d22, [x22], #0x8\n"
+ "mov x19, #0x18\n"
+ "ldr d10, [x26], #0x8\n"
+ "ldr d15, [x22], #0x8\n"
+ "ldr d22, [x21], #0x8\n"
"tbz x9, #0, 101f\n"
- "ld1 { v10.s }[2], [x27]\n"
- "ld1 { v15.s }[2], [x23]\n"
- "ld1 { v22.s }[2], [x22]\n"
+ "ld1 { v10.s }[2], [x26]\n"
+ "ld1 { v15.s }[2], [x22]\n"
+ "ld1 { v22.s }[2], [x21]\n"
"b 101f\n"
"98:" // Height 3: Partial accumulate: partial_1_4
- "mov x20, #0x10\n"
+ "mov x19, #0x10\n"
"tbz x9, #0, 101f\n"
- "ldr s10, [x27, #0x0]\n"
- "ldr s15, [x23, #0x0]\n"
- "ldr s22, [x22, #0x0]\n"
+ "ldr s10, [x26, #0x0]\n"
+ "ldr s15, [x22, #0x0]\n"
+ "ldr s22, [x21, #0x0]\n"
"b 101f\n"
"99:" // Height 3: Partial accumulate: partial_2_0
"tbz x9, #1, 100f\n"
- "ldr d9, [x27], #0x8\n"
- "ldr d14, [x23], #0x8\n"
- "mov x20, #0x8\n"
- "ldr d21, [x22], #0x8\n"
+ "ldr d9, [x26], #0x8\n"
+ "ldr d14, [x22], #0x8\n"
+ "mov x19, #0x8\n"
+ "ldr d21, [x21], #0x8\n"
"tbz x9, #0, 101f\n"
- "ld1 { v9.s }[2], [x27]\n"
- "ld1 { v14.s }[2], [x23]\n"
- "ld1 { v21.s }[2], [x22]\n"
+ "ld1 { v9.s }[2], [x26]\n"
+ "ld1 { v14.s }[2], [x22]\n"
+ "ld1 { v21.s }[2], [x21]\n"
"b 101f\n"
"100:" // Height 3: Partial accumulate: partial_1_0
- "ldr s9, [x27, #0x0]\n"
- "ldr s14, [x23, #0x0]\n"
- "mov x20, #0x0\n"
- "ldr s21, [x22, #0x0]\n"
+ "ldr s9, [x26, #0x0]\n"
+ "mov x19, #0x0\n"
+ "ldr s14, [x22, #0x0]\n"
+ "ldr s21, [x21, #0x0]\n"
"101:" // Height 3: Partial accumulate: Done
- "sub x27, x27, x20\n"
+ "sub x26, x26, x19\n"
"b 103f\n"
"102:" // Height 3: full accumulate
- "ldr q9, [x27, #0x0]\n"
- "ldr q10, [x27, #0x10]\n"
- "ldr q11, [x27, #0x20]\n"
- "ldr q12, [x27, #0x30]\n"
- "ldr q13, [x27, #0x40]\n"
- "ldr q20, [x27, #0x50]\n"
- "ldr q14, [x23, #0x0]\n"
- "ldr q15, [x23, #0x10]\n"
- "ldr q16, [x23, #0x20]\n"
- "ldr q17, [x23, #0x30]\n"
- "ldr q18, [x23, #0x40]\n"
- "ldr q19, [x23, #0x50]\n"
- "ldr q21, [x22, #0x0]\n"
- "ldr q22, [x22, #0x10]\n"
- "ldr q23, [x22, #0x20]\n"
- "ldr q24, [x22, #0x30]\n"
- "ldr q25, [x22, #0x40]\n"
- "ldr q4, [x22, #0x50]\n"
+ "ldr q9, [x26, #0x0]\n"
+ "ldr q10, [x26, #0x10]\n"
+ "ldr q11, [x26, #0x20]\n"
+ "ldr q12, [x26, #0x30]\n"
+ "ldr q13, [x26, #0x40]\n"
+ "ldr q20, [x26, #0x50]\n"
+ "ldr q14, [x22, #0x0]\n"
+ "ldr q15, [x22, #0x10]\n"
+ "ldr q16, [x22, #0x20]\n"
+ "ldr q17, [x22, #0x30]\n"
+ "ldr q18, [x22, #0x40]\n"
+ "ldr q19, [x22, #0x50]\n"
+ "ldr q21, [x21, #0x0]\n"
+ "ldr q22, [x21, #0x10]\n"
+ "ldr q23, [x21, #0x20]\n"
+ "ldr q24, [x21, #0x30]\n"
+ "ldr q25, [x21, #0x40]\n"
+ "ldr q4, [x21, #0x50]\n"
"103:" // Height 3: MMLA fixup
"zip1 v8.2d, v9.2d, v14.2d\n"
"zip2 v14.2d, v9.2d, v14.2d\n"
@@ -1234,68 +1234,67 @@ void a64_hybrid_fp32bf16fp32_mmla_4x24 (
"movi v30.16b, #0x0\n"
"movi v31.16b, #0x0\n"
"105:" // Height 3: setup done
- "mov x26, #0x0\n"
+ "mov x25, #0x0\n"
"106:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w24, [x20, x25, LSL #0x2]\n"
"tbz %x[flags], #3, 107f\n"
- "ldr x21, [%x[input_ptr], x26, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x24, [x21, #0x0]\n"
- "ldr x23, [x21, #0x8]\n"
- "ldr x22, [x21, #0x10]\n"
- "cbnz x26, 108f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x24, x24, x20, LSL #2\n"
- "add x23, x23, x20, LSL #2\n"
- "add x22, x22, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x25, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x23, [x20, #0x0]\n"
+ "ldr x22, [x20, #0x8]\n"
+ "ldr x21, [x20, #0x10]\n"
+ "cbnz x25, 108f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x23, x23, x19, LSL #2\n"
+ "add x22, x22, x19, LSL #2\n"
+ "add x21, x21, x19, LSL #2\n"
"b 108f\n"
"107:" // Height 3: setup direct input
- "mov x24, %x[input_ptr]\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
+ "mov x23, %x[input_ptr]\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
"108:" // Height 3: input setup done
- "cmp x25, #0x4\n"
+ "cmp x24, #0x4\n"
"blt 111f\n"
- "ld1 { v0.4s }, [x24], #0x10\n"
- "ld1 { v1.4s }, [x23], #0x10\n"
- "cmp x25, #0x8\n"
- "ld1 { v2.4s }, [x22], #0x10\n"
- "ldr q4, [x28, #0x0]\n"
- "ldr q5, [x28, #0x10]\n"
- "ldr q6, [x28, #0x20]\n"
- "ldr q7, [x28, #0x30]\n"
+ "ld1 { v0.4s }, [x23], #0x10\n"
+ "cmp x24, #0x8\n"
"blt 110f\n"
"109:" // Height 3: Multiply loop: Main loop head
".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n"
+ "ld1 { v1.4s }, [x22], #0x10\n"
+ "sub x24, x24, #0x4\n"
".inst 0x4ea16820 // bfcvtn2 v0.8h, v1.4s\n"
- ".inst 0x6e44ec08 // bfmmla v8.4s, v0.8h, v4.8h\n"
- "sub x25, x25, #0x4\n"
+ "ld1 { v2.4s }, [x21], #0x10\n"
+ "cmp x24, #0x8\n"
".inst 0x0ea16842 // bfcvtn v2.4h, v2.4s\n"
+ "ldr q4, [x28, #0x0]\n"
+ "ldr q5, [x28, #0x10]\n"
+ ".inst 0x6e44ec08 // bfmmla v8.4s, v0.8h, v4.8h\n"
+ "ldr q6, [x28, #0x20]\n"
".inst 0x6e44ec54 // bfmmla v20.4s, v2.8h, v4.8h\n"
- "ldr q4, [x28, #0x40]\n"
+ "ldr q7, [x28, #0x30]\n"
".inst 0x6e45ec0e // bfmmla v14.4s, v0.8h, v5.8h\n"
+ "ldr q4, [x28, #0x40]\n"
".inst 0x6e45ec5a // bfmmla v26.4s, v2.8h, v5.8h\n"
"ldr q5, [x28, #0x50]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n"
- "cmp x25, #0x8\n"
".inst 0x6e46ec55 // bfmmla v21.4s, v2.8h, v6.8h\n"
"ldr q6, [x28, #0x60]\n"
".inst 0x6e47ec0f // bfmmla v15.4s, v0.8h, v7.8h\n"
- "prfm pldl1keep, [x24, #0x80]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x6e47ec5b // bfmmla v27.4s, v2.8h, v7.8h\n"
"ldr q7, [x28, #0x70]\n"
".inst 0x6e44ec0a // bfmmla v10.4s, v0.8h, v4.8h\n"
- "prfm pldl1keep, [x23, #0x80]\n"
- "ld1 { v1.4s }, [x23], #0x10\n"
+ "prfm pldl1keep, [x21, #0x80]\n"
".inst 0x6e44ec56 // bfmmla v22.4s, v2.8h, v4.8h\n"
"ldr q4, [x28, #0x80]\n"
".inst 0x6e45ec10 // bfmmla v16.4s, v0.8h, v5.8h\n"
".inst 0x6e45ec5c // bfmmla v28.4s, v2.8h, v5.8h\n"
"ldr q5, [x28, #0x90]\n"
".inst 0x6e46ec0b // bfmmla v11.4s, v0.8h, v6.8h\n"
- "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x6e46ec57 // bfmmla v23.4s, v2.8h, v6.8h\n"
"ldr q6, [x28, #0xa0]\n"
".inst 0x6e47ec11 // bfmmla v17.4s, v0.8h, v7.8h\n"
@@ -1304,40 +1303,41 @@ void a64_hybrid_fp32bf16fp32_mmla_4x24 (
"add x28, x28, #0xc0\n"
".inst 0x6e44ec0c // bfmmla v12.4s, v0.8h, v4.8h\n"
".inst 0x6e44ec58 // bfmmla v24.4s, v2.8h, v4.8h\n"
- "ldr q4, [x28, #0x0]\n"
".inst 0x6e45ec12 // bfmmla v18.4s, v0.8h, v5.8h\n"
".inst 0x6e45ec5e // bfmmla v30.4s, v2.8h, v5.8h\n"
- "ldr q5, [x28, #0x10]\n"
".inst 0x6e46ec0d // bfmmla v13.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec59 // bfmmla v25.4s, v2.8h, v6.8h\n"
- "ldr q6, [x28, #0x20]\n"
".inst 0x6e47ec13 // bfmmla v19.4s, v0.8h, v7.8h\n"
- "ld1 { v0.4s }, [x24], #0x10\n"
+ "ld1 { v0.4s }, [x23], #0x10\n"
".inst 0x6e47ec5f // bfmmla v31.4s, v2.8h, v7.8h\n"
- "ld1 { v2.4s }, [x22], #0x10\n"
- "ldr q7, [x28, #0x30]\n"
"bge 109b\n"
"110:" // Height 3: Multiply loop: Single iteration only
".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n"
+ "ld1 { v1.4s }, [x22], #0x10\n"
+ "sub x24, x24, #0x4\n"
".inst 0x4ea16820 // bfcvtn2 v0.8h, v1.4s\n"
- ".inst 0x6e44ec08 // bfmmla v8.4s, v0.8h, v4.8h\n"
- "sub x25, x25, #0x4\n"
+ "ld1 { v2.4s }, [x21], #0x10\n"
+ "ldr q4, [x28, #0x0]\n"
".inst 0x0ea16842 // bfcvtn v2.4h, v2.4s\n"
+ "ldr q5, [x28, #0x10]\n"
+ ".inst 0x6e44ec08 // bfmmla v8.4s, v0.8h, v4.8h\n"
+ "ldr q6, [x28, #0x20]\n"
+ "ldr q7, [x28, #0x30]\n"
".inst 0x6e44ec54 // bfmmla v20.4s, v2.8h, v4.8h\n"
"ldr q4, [x28, #0x40]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x6e45ec0e // bfmmla v14.4s, v0.8h, v5.8h\n"
".inst 0x6e45ec5a // bfmmla v26.4s, v2.8h, v5.8h\n"
"ldr q5, [x28, #0x50]\n"
".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n"
- "prfm pldl1keep, [x24, #0x80]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x6e46ec55 // bfmmla v21.4s, v2.8h, v6.8h\n"
"ldr q6, [x28, #0x60]\n"
".inst 0x6e47ec0f // bfmmla v15.4s, v0.8h, v7.8h\n"
- "prfm pldl1keep, [x23, #0x80]\n"
+ "prfm pldl1keep, [x21, #0x80]\n"
".inst 0x6e47ec5b // bfmmla v27.4s, v2.8h, v7.8h\n"
"ldr q7, [x28, #0x70]\n"
".inst 0x6e44ec0a // bfmmla v10.4s, v0.8h, v4.8h\n"
- "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x6e44ec56 // bfmmla v22.4s, v2.8h, v4.8h\n"
"ldr q4, [x28, #0x80]\n"
".inst 0x6e45ec10 // bfmmla v16.4s, v0.8h, v5.8h\n"
@@ -1359,37 +1359,37 @@ void a64_hybrid_fp32bf16fp32_mmla_4x24 (
".inst 0x6e47ec13 // bfmmla v19.4s, v0.8h, v7.8h\n"
".inst 0x6e47ec5f // bfmmla v31.4s, v2.8h, v7.8h\n"
"111:" // Height 3: Multiply loop: Main loop skip
- "cbz x25, 114f\n"
- "cbz x25, 114f\n"
- "tbz x25, #1, 112f\n"
- "ldr d0, [x24], #0x8\n"
- "ldr d1, [x23], #0x8\n"
- "ldr d2, [x22], #0x8\n"
- "tbz x25, #0, 113f\n"
- "ld1 { v0.s }[2], [x24]\n"
- "ld1 { v1.s }[2], [x23]\n"
- "ld1 { v2.s }[2], [x22]\n"
+ "cbz x24, 114f\n"
+ "cbz x24, 114f\n"
+ "tbz x24, #1, 112f\n"
+ "ldr d0, [x23], #0x8\n"
+ "ldr d1, [x22], #0x8\n"
+ "ldr d2, [x21], #0x8\n"
+ "tbz x24, #0, 113f\n"
+ "ld1 { v0.s }[2], [x23]\n"
+ "ld1 { v1.s }[2], [x22]\n"
+ "ld1 { v2.s }[2], [x21]\n"
"b 113f\n"
"112:" // Height 3: Multiply loop: Ragged operand read: partial_1_0
- "ldr s0, [x24, #0x0]\n"
- "ldr s1, [x23, #0x0]\n"
- "ldr s2, [x22, #0x0]\n"
+ "ldr s0, [x23, #0x0]\n"
+ "ldr s1, [x22, #0x0]\n"
+ "ldr s2, [x21, #0x0]\n"
"113:" // Height 3: Multiply loop: Ragged operand read: Done
+ ".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n"
"ldr q4, [x28, #0x0]\n"
+ ".inst 0x0ea16842 // bfcvtn v2.4h, v2.4s\n"
"ldr q5, [x28, #0x10]\n"
- ".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n"
- ".inst 0x4ea16820 // bfcvtn2 v0.8h, v1.4s\n"
"ldr q6, [x28, #0x20]\n"
+ ".inst 0x4ea16820 // bfcvtn2 v0.8h, v1.4s\n"
"ldr q7, [x28, #0x30]\n"
- ".inst 0x0ea16842 // bfcvtn v2.4h, v2.4s\n"
- ".inst 0x6e44ec08 // bfmmla v8.4s, v0.8h, v4.8h\n"
".inst 0x6e44ec54 // bfmmla v20.4s, v2.8h, v4.8h\n"
+ ".inst 0x6e45ec5a // bfmmla v26.4s, v2.8h, v5.8h\n"
+ ".inst 0x6e46ec55 // bfmmla v21.4s, v2.8h, v6.8h\n"
+ ".inst 0x6e44ec08 // bfmmla v8.4s, v0.8h, v4.8h\n"
"ldr q4, [x28, #0x40]\n"
".inst 0x6e45ec0e // bfmmla v14.4s, v0.8h, v5.8h\n"
- ".inst 0x6e45ec5a // bfmmla v26.4s, v2.8h, v5.8h\n"
"ldr q5, [x28, #0x50]\n"
".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n"
- ".inst 0x6e46ec55 // bfmmla v21.4s, v2.8h, v6.8h\n"
"ldr q6, [x28, #0x60]\n"
".inst 0x6e47ec0f // bfmmla v15.4s, v0.8h, v7.8h\n"
".inst 0x6e47ec5b // bfmmla v27.4s, v2.8h, v7.8h\n"
@@ -1406,8 +1406,8 @@ void a64_hybrid_fp32bf16fp32_mmla_4x24 (
".inst 0x6e47ec11 // bfmmla v17.4s, v0.8h, v7.8h\n"
".inst 0x6e47ec5d // bfmmla v29.4s, v2.8h, v7.8h\n"
"ldr q7, [x28, #0xb0]\n"
- ".inst 0x6e44ec0c // bfmmla v12.4s, v0.8h, v4.8h\n"
"add x28, x28, #0xc0\n"
+ ".inst 0x6e44ec0c // bfmmla v12.4s, v0.8h, v4.8h\n"
".inst 0x6e44ec58 // bfmmla v24.4s, v2.8h, v4.8h\n"
".inst 0x6e45ec12 // bfmmla v18.4s, v0.8h, v5.8h\n"
".inst 0x6e45ec5e // bfmmla v30.4s, v2.8h, v5.8h\n"
@@ -1416,21 +1416,21 @@ void a64_hybrid_fp32bf16fp32_mmla_4x24 (
".inst 0x6e47ec13 // bfmmla v19.4s, v0.8h, v7.8h\n"
".inst 0x6e47ec5f // bfmmla v31.4s, v2.8h, v7.8h\n"
"114:" // Height 3: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x26, x26, #0x1\n"
- "cmp x26, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x25, x25, #0x1\n"
+ "cmp x25, x19\n"
"bne 106b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x27, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
"uzp1 v4.2d, v8.2d, v14.2d\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp2 v8.2d, v8.2d, v14.2d\n"
+ "prfm pstl1keep, [x26, #0x0]\n"
+ "add x22, x26, x19, LSL #2\n"
"uzp1 v14.2d, v9.2d, v15.2d\n"
- "prfm pstl1keep, [x27, #0x0]\n"
- "prfm pstl1keep, [x23, #0x0]\n"
+ "prfm pstl1keep, [x22, #0x0]\n"
"uzp2 v9.2d, v9.2d, v15.2d\n"
+ "add x21, x22, x19, LSL #2\n"
"uzp1 v15.2d, v10.2d, v16.2d\n"
- "prfm pstl1keep, [x22, #0x0]\n"
+ "prfm pstl1keep, [x21, #0x0]\n"
"uzp2 v10.2d, v10.2d, v16.2d\n"
"uzp1 v16.2d, v11.2d, v17.2d\n"
"uzp2 v11.2d, v11.2d, v17.2d\n"
@@ -1445,231 +1445,231 @@ void a64_hybrid_fp32bf16fp32_mmla_4x24 (
"uzp1 v24.2d, v24.2d, v30.2d\n"
"uzp1 v25.2d, v25.2d, v31.2d\n"
"tbz %x[flags], #1, 115f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v1.4s }, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1r { v0.4s }, [x20]\n"
- "fmin v4.4s, v4.4s, v1.4s\n"
- "fmin v14.4s, v14.4s, v1.4s\n"
- "fmin v15.4s, v15.4s, v1.4s\n"
- "fmin v16.4s, v16.4s, v1.4s\n"
- "fmin v17.4s, v17.4s, v1.4s\n"
- "fmin v18.4s, v18.4s, v1.4s\n"
- "fmin v8.4s, v8.4s, v1.4s\n"
- "fmin v9.4s, v9.4s, v1.4s\n"
- "fmin v10.4s, v10.4s, v1.4s\n"
- "fmin v11.4s, v11.4s, v1.4s\n"
- "fmin v12.4s, v12.4s, v1.4s\n"
- "fmin v13.4s, v13.4s, v1.4s\n"
- "fmin v20.4s, v20.4s, v1.4s\n"
- "fmin v21.4s, v21.4s, v1.4s\n"
- "fmin v22.4s, v22.4s, v1.4s\n"
- "fmin v23.4s, v23.4s, v1.4s\n"
- "fmin v24.4s, v24.4s, v1.4s\n"
- "fmin v25.4s, v25.4s, v1.4s\n"
- "fmax v4.4s, v4.4s, v0.4s\n"
- "fmax v14.4s, v14.4s, v0.4s\n"
- "fmax v15.4s, v15.4s, v0.4s\n"
- "fmax v16.4s, v16.4s, v0.4s\n"
- "fmax v17.4s, v17.4s, v0.4s\n"
- "fmax v18.4s, v18.4s, v0.4s\n"
- "fmax v8.4s, v8.4s, v0.4s\n"
- "fmax v9.4s, v9.4s, v0.4s\n"
- "fmax v10.4s, v10.4s, v0.4s\n"
- "fmax v11.4s, v11.4s, v0.4s\n"
- "fmax v12.4s, v12.4s, v0.4s\n"
- "fmax v13.4s, v13.4s, v0.4s\n"
- "fmax v20.4s, v20.4s, v0.4s\n"
- "fmax v21.4s, v21.4s, v0.4s\n"
- "fmax v22.4s, v22.4s, v0.4s\n"
- "fmax v23.4s, v23.4s, v0.4s\n"
- "fmax v24.4s, v24.4s, v0.4s\n"
- "fmax v25.4s, v25.4s, v0.4s\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v1.4s }, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v0.4s }, [x19]\n"
+ "fmin v4.4s, v4.4s, v0.4s\n"
+ "fmin v14.4s, v14.4s, v0.4s\n"
+ "fmin v15.4s, v15.4s, v0.4s\n"
+ "fmin v16.4s, v16.4s, v0.4s\n"
+ "fmax v4.4s, v4.4s, v1.4s\n"
+ "fmax v14.4s, v14.4s, v1.4s\n"
+ "fmax v15.4s, v15.4s, v1.4s\n"
+ "fmax v16.4s, v16.4s, v1.4s\n"
+ "fmin v17.4s, v17.4s, v0.4s\n"
+ "fmin v18.4s, v18.4s, v0.4s\n"
+ "fmin v8.4s, v8.4s, v0.4s\n"
+ "fmax v17.4s, v17.4s, v1.4s\n"
+ "fmax v18.4s, v18.4s, v1.4s\n"
+ "fmax v8.4s, v8.4s, v1.4s\n"
+ "fmin v9.4s, v9.4s, v0.4s\n"
+ "fmin v10.4s, v10.4s, v0.4s\n"
+ "fmin v11.4s, v11.4s, v0.4s\n"
+ "fmax v9.4s, v9.4s, v1.4s\n"
+ "fmax v10.4s, v10.4s, v1.4s\n"
+ "fmax v11.4s, v11.4s, v1.4s\n"
+ "fmin v12.4s, v12.4s, v0.4s\n"
+ "fmin v13.4s, v13.4s, v0.4s\n"
+ "fmin v20.4s, v20.4s, v0.4s\n"
+ "fmax v12.4s, v12.4s, v1.4s\n"
+ "fmax v13.4s, v13.4s, v1.4s\n"
+ "fmax v20.4s, v20.4s, v1.4s\n"
+ "fmin v21.4s, v21.4s, v0.4s\n"
+ "fmin v22.4s, v22.4s, v0.4s\n"
+ "fmin v23.4s, v23.4s, v0.4s\n"
+ "fmax v21.4s, v21.4s, v1.4s\n"
+ "fmax v22.4s, v22.4s, v1.4s\n"
+ "fmax v23.4s, v23.4s, v1.4s\n"
+ "fmin v24.4s, v24.4s, v0.4s\n"
+ "fmin v25.4s, v25.4s, v0.4s\n"
+ "fmax v24.4s, v24.4s, v1.4s\n"
+ "fmax v25.4s, v25.4s, v1.4s\n"
"115:" // Height 3: No activation
"cmp x9, #0x18\n"
"bge 128f\n"
"tbz x9, #4, 119f\n"
- "st1 { v4.4s }, [x27], #0x10\n"
- "st1 { v14.4s }, [x27], #0x10\n"
- "st1 { v15.4s }, [x27], #0x10\n"
- "st1 { v16.4s }, [x27], #0x10\n"
- "st1 { v8.4s }, [x23], #0x10\n"
- "st1 { v9.4s }, [x23], #0x10\n"
- "st1 { v10.4s }, [x23], #0x10\n"
- "st1 { v11.4s }, [x23], #0x10\n"
- "st1 { v20.4s }, [x22], #0x10\n"
- "st1 { v21.4s }, [x22], #0x10\n"
- "st1 { v22.4s }, [x22], #0x10\n"
- "st1 { v23.4s }, [x22], #0x10\n"
+ "st1 { v4.4s }, [x26], #0x10\n"
+ "st1 { v14.4s }, [x26], #0x10\n"
+ "st1 { v15.4s }, [x26], #0x10\n"
+ "st1 { v16.4s }, [x26], #0x10\n"
+ "st1 { v8.4s }, [x22], #0x10\n"
+ "st1 { v9.4s }, [x22], #0x10\n"
+ "st1 { v10.4s }, [x22], #0x10\n"
+ "st1 { v11.4s }, [x22], #0x10\n"
+ "st1 { v20.4s }, [x21], #0x10\n"
+ "st1 { v21.4s }, [x21], #0x10\n"
+ "st1 { v22.4s }, [x21], #0x10\n"
+ "st1 { v23.4s }, [x21], #0x10\n"
"tbz x9, #2, 117f\n"
- "st1 { v17.4s }, [x27], #0x10\n"
- "st1 { v12.4s }, [x23], #0x10\n"
- "st1 { v24.4s }, [x22], #0x10\n"
+ "st1 { v17.4s }, [x26], #0x10\n"
+ "st1 { v12.4s }, [x22], #0x10\n"
+ "st1 { v24.4s }, [x21], #0x10\n"
"tbz x9, #1, 116f\n"
- "str d18, [x27], #0x8\n"
- "str d13, [x23], #0x8\n"
- "str d25, [x22], #0x8\n"
+ "str d18, [x26], #0x8\n"
+ "str d13, [x22], #0x8\n"
+ "str d25, [x21], #0x8\n"
"tbz x9, #0, 127f\n"
- "st1 { v18.s }[2], [x27]\n"
- "st1 { v13.s }[2], [x23]\n"
- "st1 { v25.s }[2], [x22]\n"
+ "st1 { v18.s }[2], [x26]\n"
+ "st1 { v13.s }[2], [x22]\n"
+ "st1 { v25.s }[2], [x21]\n"
"b 127f\n"
"116:" // Height 3: Partial direct writeback: partial_1_20
"tbz x9, #0, 127f\n"
- "str s18, [x27, #0x0]\n"
- "str s13, [x23, #0x0]\n"
- "str s25, [x22, #0x0]\n"
+ "str s18, [x26, #0x0]\n"
+ "str s13, [x22, #0x0]\n"
+ "str s25, [x21, #0x0]\n"
"b 127f\n"
"117:" // Height 3: Partial direct writeback: partial_2_16
"tbz x9, #1, 118f\n"
- "str d17, [x27], #0x8\n"
- "str d12, [x23], #0x8\n"
- "str d24, [x22], #0x8\n"
+ "str d17, [x26], #0x8\n"
+ "str d12, [x22], #0x8\n"
+ "str d24, [x21], #0x8\n"
"tbz x9, #0, 127f\n"
- "st1 { v17.s }[2], [x27]\n"
- "st1 { v12.s }[2], [x23]\n"
- "st1 { v24.s }[2], [x22]\n"
+ "st1 { v17.s }[2], [x26]\n"
+ "st1 { v12.s }[2], [x22]\n"
+ "st1 { v24.s }[2], [x21]\n"
"b 127f\n"
"118:" // Height 3: Partial direct writeback: partial_1_16
"tbz x9, #0, 127f\n"
- "str s17, [x27, #0x0]\n"
- "str s12, [x23, #0x0]\n"
- "str s24, [x22, #0x0]\n"
+ "str s17, [x26, #0x0]\n"
+ "str s12, [x22, #0x0]\n"
+ "str s24, [x21, #0x0]\n"
"b 127f\n"
"119:" // Height 3: Partial direct writeback: partial_8_0
"tbz x9, #3, 123f\n"
- "st1 { v4.4s }, [x27], #0x10\n"
- "st1 { v14.4s }, [x27], #0x10\n"
- "st1 { v8.4s }, [x23], #0x10\n"
- "st1 { v9.4s }, [x23], #0x10\n"
- "st1 { v20.4s }, [x22], #0x10\n"
- "st1 { v21.4s }, [x22], #0x10\n"
+ "st1 { v4.4s }, [x26], #0x10\n"
+ "st1 { v14.4s }, [x26], #0x10\n"
+ "st1 { v8.4s }, [x22], #0x10\n"
+ "st1 { v9.4s }, [x22], #0x10\n"
+ "st1 { v20.4s }, [x21], #0x10\n"
+ "st1 { v21.4s }, [x21], #0x10\n"
"tbz x9, #2, 121f\n"
- "st1 { v15.4s }, [x27], #0x10\n"
- "st1 { v10.4s }, [x23], #0x10\n"
- "st1 { v22.4s }, [x22], #0x10\n"
+ "st1 { v15.4s }, [x26], #0x10\n"
+ "st1 { v10.4s }, [x22], #0x10\n"
+ "st1 { v22.4s }, [x21], #0x10\n"
"tbz x9, #1, 120f\n"
- "str d16, [x27], #0x8\n"
- "str d11, [x23], #0x8\n"
- "str d23, [x22], #0x8\n"
+ "str d16, [x26], #0x8\n"
+ "str d11, [x22], #0x8\n"
+ "str d23, [x21], #0x8\n"
"tbz x9, #0, 127f\n"
- "st1 { v16.s }[2], [x27]\n"
- "st1 { v11.s }[2], [x23]\n"
- "st1 { v23.s }[2], [x22]\n"
+ "st1 { v16.s }[2], [x26]\n"
+ "st1 { v11.s }[2], [x22]\n"
+ "st1 { v23.s }[2], [x21]\n"
"b 127f\n"
"120:" // Height 3: Partial direct writeback: partial_1_12
"tbz x9, #0, 127f\n"
- "str s16, [x27, #0x0]\n"
- "str s11, [x23, #0x0]\n"
- "str s23, [x22, #0x0]\n"
+ "str s16, [x26, #0x0]\n"
+ "str s11, [x22, #0x0]\n"
+ "str s23, [x21, #0x0]\n"
"b 127f\n"
"121:" // Height 3: Partial direct writeback: partial_2_8
"tbz x9, #1, 122f\n"
- "str d15, [x27], #0x8\n"
- "str d10, [x23], #0x8\n"
- "str d22, [x22], #0x8\n"
+ "str d15, [x26], #0x8\n"
+ "str d10, [x22], #0x8\n"
+ "str d22, [x21], #0x8\n"
"tbz x9, #0, 127f\n"
- "st1 { v15.s }[2], [x27]\n"
- "st1 { v10.s }[2], [x23]\n"
- "st1 { v22.s }[2], [x22]\n"
+ "st1 { v15.s }[2], [x26]\n"
+ "st1 { v10.s }[2], [x22]\n"
+ "st1 { v22.s }[2], [x21]\n"
"b 127f\n"
"122:" // Height 3: Partial direct writeback: partial_1_8
"tbz x9, #0, 127f\n"
- "str s15, [x27, #0x0]\n"
- "str s10, [x23, #0x0]\n"
- "str s22, [x22, #0x0]\n"
+ "str s15, [x26, #0x0]\n"
+ "str s10, [x22, #0x0]\n"
+ "str s22, [x21, #0x0]\n"
"b 127f\n"
"123:" // Height 3: Partial direct writeback: partial_4_0
"tbz x9, #2, 125f\n"
- "st1 { v4.4s }, [x27], #0x10\n"
- "st1 { v8.4s }, [x23], #0x10\n"
- "st1 { v20.4s }, [x22], #0x10\n"
+ "st1 { v4.4s }, [x26], #0x10\n"
+ "st1 { v8.4s }, [x22], #0x10\n"
+ "st1 { v20.4s }, [x21], #0x10\n"
"tbz x9, #1, 124f\n"
- "str d14, [x27], #0x8\n"
- "str d9, [x23], #0x8\n"
- "str d21, [x22], #0x8\n"
+ "str d14, [x26], #0x8\n"
+ "str d9, [x22], #0x8\n"
+ "str d21, [x21], #0x8\n"
"tbz x9, #0, 127f\n"
- "st1 { v14.s }[2], [x27]\n"
- "st1 { v9.s }[2], [x23]\n"
- "st1 { v21.s }[2], [x22]\n"
+ "st1 { v14.s }[2], [x26]\n"
+ "st1 { v9.s }[2], [x22]\n"
+ "st1 { v21.s }[2], [x21]\n"
"b 127f\n"
"124:" // Height 3: Partial direct writeback: partial_1_4
"tbz x9, #0, 127f\n"
- "str s14, [x27, #0x0]\n"
- "str s9, [x23, #0x0]\n"
- "str s21, [x22, #0x0]\n"
+ "str s14, [x26, #0x0]\n"
+ "str s9, [x22, #0x0]\n"
+ "str s21, [x21, #0x0]\n"
"b 127f\n"
"125:" // Height 3: Partial direct writeback: partial_2_0
"tbz x9, #1, 126f\n"
- "str d4, [x27], #0x8\n"
- "str d8, [x23], #0x8\n"
- "str d20, [x22], #0x8\n"
+ "str d4, [x26], #0x8\n"
+ "str d8, [x22], #0x8\n"
+ "str d20, [x21], #0x8\n"
"tbz x9, #0, 127f\n"
- "st1 { v4.s }[2], [x27]\n"
- "st1 { v8.s }[2], [x23]\n"
- "st1 { v20.s }[2], [x22]\n"
+ "st1 { v4.s }[2], [x26]\n"
+ "st1 { v8.s }[2], [x22]\n"
+ "st1 { v20.s }[2], [x21]\n"
"b 127f\n"
"126:" // Height 3: Partial direct writeback: partial_1_0
- "str s4, [x27, #0x0]\n"
- "str s8, [x23, #0x0]\n"
- "str s20, [x22, #0x0]\n"
+ "str s4, [x26, #0x0]\n"
+ "str s8, [x22, #0x0]\n"
+ "str s20, [x21, #0x0]\n"
"127:" // Height 3: Partial direct writeback: Done
"b 129f\n"
"128:" // Height 3: Full writeback
- "str q4, [x27, #0x0]\n"
- "str q14, [x27, #0x10]\n"
- "str q15, [x27, #0x20]\n"
- "str q16, [x27, #0x30]\n"
- "str q17, [x27, #0x40]\n"
- "str q18, [x27, #0x50]\n"
- "add x27, x27, #0x60\n"
- "str q8, [x23, #0x0]\n"
- "str q9, [x23, #0x10]\n"
- "str q10, [x23, #0x20]\n"
- "str q11, [x23, #0x30]\n"
- "str q12, [x23, #0x40]\n"
- "str q13, [x23, #0x50]\n"
- "str q20, [x22, #0x0]\n"
- "str q21, [x22, #0x10]\n"
- "str q22, [x22, #0x20]\n"
- "str q23, [x22, #0x30]\n"
- "str q24, [x22, #0x40]\n"
- "str q25, [x22, #0x50]\n"
+ "str q4, [x26, #0x0]\n"
+ "str q14, [x26, #0x10]\n"
+ "str q15, [x26, #0x20]\n"
+ "str q16, [x26, #0x30]\n"
+ "str q17, [x26, #0x40]\n"
+ "str q18, [x26, #0x50]\n"
+ "add x26, x26, #0x60\n"
+ "str q8, [x22, #0x0]\n"
+ "str q9, [x22, #0x10]\n"
+ "str q10, [x22, #0x20]\n"
+ "str q11, [x22, #0x30]\n"
+ "str q12, [x22, #0x40]\n"
+ "str q13, [x22, #0x50]\n"
+ "str q20, [x21, #0x0]\n"
+ "str q21, [x21, #0x10]\n"
+ "str q22, [x21, #0x20]\n"
+ "str q23, [x21, #0x30]\n"
+ "str q24, [x21, #0x40]\n"
+ "str q25, [x21, #0x50]\n"
"129:" // Height 3: Writeback done
"subs x9, x9, #0x18\n"
"bgt 88b\n"
"b 174f\n"
"130:" // Height 4
- "ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "mov x20, #0x10\n"
- "mov x10, %x[bias]\n"
"ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x27, %x[bias]\n"
"ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x27, %x[output_ptr]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
+ "mov x26, %x[output_ptr]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "mov x19, #0x10\n"
+ "madd %x[output_ptr], x20, x19, %x[output_ptr]\n"
"131:" // Height 4: Column loop
- "cbz x10, 132f\n"
- "ldr q8, [x10, #0x0]\n"
- "ldr q9, [x10, #0x10]\n"
+ "cbz x27, 132f\n"
+ "ldr q8, [x27, #0x0]\n"
"zip2 v14.2d, v8.2d, v8.2d\n"
+ "ldr q9, [x27, #0x10]\n"
"zip1 v8.2d, v8.2d, v8.2d\n"
- "ldr q10, [x10, #0x20]\n"
- "ldr q11, [x10, #0x30]\n"
+ "ldr q10, [x27, #0x20]\n"
+ "mov v20.16b, v8.16b\n"
+ "ldr q11, [x27, #0x30]\n"
+ "mov v26.16b, v14.16b\n"
+ "ldr q12, [x27, #0x40]\n"
+ "ldr q13, [x27, #0x50]\n"
"zip2 v15.2d, v9.2d, v9.2d\n"
+ "add x27, x27, #0x60\n"
"zip1 v9.2d, v9.2d, v9.2d\n"
- "ldr q12, [x10, #0x40]\n"
- "ldr q13, [x10, #0x50]\n"
"zip2 v16.2d, v10.2d, v10.2d\n"
"zip1 v10.2d, v10.2d, v10.2d\n"
"zip2 v17.2d, v11.2d, v11.2d\n"
"zip1 v11.2d, v11.2d, v11.2d\n"
- "add x10, x10, #0x60\n"
"zip2 v18.2d, v12.2d, v12.2d\n"
"zip1 v12.2d, v12.2d, v12.2d\n"
"zip2 v19.2d, v13.2d, v13.2d\n"
"zip1 v13.2d, v13.2d, v13.2d\n"
- "mov v20.16b, v8.16b\n"
- "mov v26.16b, v14.16b\n"
"mov v21.16b, v9.16b\n"
"mov v27.16b, v15.16b\n"
"mov v22.16b, v10.16b\n"
@@ -1683,204 +1683,204 @@ void a64_hybrid_fp32bf16fp32_mmla_4x24 (
"b 148f\n"
"132:" // Height 4: no bias
"tbz %x[flags], #0, 147f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x27, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"cmp x9, #0x18\n"
- "add x21, x22, x20, LSL #2\n"
+ "add x22, x26, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
+ "add x20, x21, x19, LSL #2\n"
"bge 145f\n"
"tbz x9, #4, 136f\n"
- "ld1 { v9.4s }, [x27], #0x10\n"
- "ld1 { v14.4s }, [x23], #0x10\n"
- "ld1 { v21.4s }, [x22], #0x10\n"
- "ld1 { v26.4s }, [x21], #0x10\n"
- "ld1 { v10.4s }, [x27], #0x10\n"
- "ld1 { v15.4s }, [x23], #0x10\n"
- "ld1 { v22.4s }, [x22], #0x10\n"
- "ld1 { v27.4s }, [x21], #0x10\n"
- "ld1 { v11.4s }, [x27], #0x10\n"
- "ld1 { v16.4s }, [x23], #0x10\n"
- "ld1 { v23.4s }, [x22], #0x10\n"
- "ld1 { v28.4s }, [x21], #0x10\n"
- "ld1 { v12.4s }, [x27], #0x10\n"
- "ld1 { v17.4s }, [x23], #0x10\n"
- "ld1 { v24.4s }, [x22], #0x10\n"
- "ld1 { v29.4s }, [x21], #0x10\n"
+ "ld1 { v9.4s }, [x26], #0x10\n"
+ "ld1 { v14.4s }, [x22], #0x10\n"
+ "ld1 { v21.4s }, [x21], #0x10\n"
+ "ld1 { v26.4s }, [x20], #0x10\n"
+ "ld1 { v10.4s }, [x26], #0x10\n"
+ "ld1 { v15.4s }, [x22], #0x10\n"
+ "ld1 { v22.4s }, [x21], #0x10\n"
+ "ld1 { v27.4s }, [x20], #0x10\n"
+ "ld1 { v11.4s }, [x26], #0x10\n"
+ "ld1 { v16.4s }, [x22], #0x10\n"
+ "ld1 { v23.4s }, [x21], #0x10\n"
+ "ld1 { v28.4s }, [x20], #0x10\n"
+ "ld1 { v12.4s }, [x26], #0x10\n"
+ "ld1 { v17.4s }, [x22], #0x10\n"
+ "ld1 { v24.4s }, [x21], #0x10\n"
+ "ld1 { v29.4s }, [x20], #0x10\n"
"tbz x9, #2, 134f\n"
- "ld1 { v13.4s }, [x27], #0x10\n"
- "ld1 { v18.4s }, [x23], #0x10\n"
- "ld1 { v25.4s }, [x22], #0x10\n"
- "ld1 { v30.4s }, [x21], #0x10\n"
+ "ld1 { v13.4s }, [x26], #0x10\n"
+ "ld1 { v18.4s }, [x22], #0x10\n"
+ "ld1 { v25.4s }, [x21], #0x10\n"
+ "ld1 { v30.4s }, [x20], #0x10\n"
"tbz x9, #1, 133f\n"
- "ldr d20, [x27], #0x8\n"
- "ldr d19, [x23], #0x8\n"
- "mov x20, #0x58\n"
- "ldr d4, [x22], #0x8\n"
- "ldr d31, [x21], #0x8\n"
+ "mov x19, #0x58\n"
+ "ldr d20, [x26], #0x8\n"
+ "ldr d19, [x22], #0x8\n"
+ "ldr d4, [x21], #0x8\n"
+ "ldr d31, [x20], #0x8\n"
"tbz x9, #0, 144f\n"
- "ld1 { v20.s }[2], [x27]\n"
- "ld1 { v19.s }[2], [x23]\n"
- "ld1 { v4.s }[2], [x22]\n"
- "ld1 { v31.s }[2], [x21]\n"
+ "ld1 { v20.s }[2], [x26]\n"
+ "ld1 { v19.s }[2], [x22]\n"
+ "ld1 { v4.s }[2], [x21]\n"
+ "ld1 { v31.s }[2], [x20]\n"
"b 144f\n"
"133:" // Height 4: Partial accumulate: partial_1_20
- "mov x20, #0x50\n"
+ "mov x19, #0x50\n"
"tbz x9, #0, 144f\n"
- "ldr s20, [x27, #0x0]\n"
- "ldr s19, [x23, #0x0]\n"
- "ldr s4, [x22, #0x0]\n"
- "ldr s31, [x21, #0x0]\n"
+ "ldr s20, [x26, #0x0]\n"
+ "ldr s19, [x22, #0x0]\n"
+ "ldr s4, [x21, #0x0]\n"
+ "ldr s31, [x20, #0x0]\n"
"b 144f\n"
"134:" // Height 4: Partial accumulate: partial_2_16
"tbz x9, #1, 135f\n"
- "ldr d13, [x27], #0x8\n"
- "ldr d18, [x23], #0x8\n"
- "mov x20, #0x48\n"
- "ldr d25, [x22], #0x8\n"
- "ldr d30, [x21], #0x8\n"
+ "ldr d13, [x26], #0x8\n"
+ "ldr d18, [x22], #0x8\n"
+ "mov x19, #0x48\n"
+ "ldr d25, [x21], #0x8\n"
+ "ldr d30, [x20], #0x8\n"
"tbz x9, #0, 144f\n"
- "ld1 { v13.s }[2], [x27]\n"
- "ld1 { v18.s }[2], [x23]\n"
- "ld1 { v25.s }[2], [x22]\n"
- "ld1 { v30.s }[2], [x21]\n"
+ "ld1 { v13.s }[2], [x26]\n"
+ "ld1 { v18.s }[2], [x22]\n"
+ "ld1 { v25.s }[2], [x21]\n"
+ "ld1 { v30.s }[2], [x20]\n"
"b 144f\n"
"135:" // Height 4: Partial accumulate: partial_1_16
- "mov x20, #0x40\n"
+ "mov x19, #0x40\n"
"tbz x9, #0, 144f\n"
- "ldr s13, [x27, #0x0]\n"
- "ldr s18, [x23, #0x0]\n"
- "ldr s25, [x22, #0x0]\n"
- "ldr s30, [x21, #0x0]\n"
+ "ldr s13, [x26, #0x0]\n"
+ "ldr s18, [x22, #0x0]\n"
+ "ldr s25, [x21, #0x0]\n"
+ "ldr s30, [x20, #0x0]\n"
"b 144f\n"
"136:" // Height 4: Partial accumulate: partial_8_0
"tbz x9, #3, 140f\n"
- "ld1 { v9.4s }, [x27], #0x10\n"
- "ld1 { v14.4s }, [x23], #0x10\n"
- "ld1 { v21.4s }, [x22], #0x10\n"
- "ld1 { v26.4s }, [x21], #0x10\n"
- "ld1 { v10.4s }, [x27], #0x10\n"
- "ld1 { v15.4s }, [x23], #0x10\n"
- "ld1 { v22.4s }, [x22], #0x10\n"
- "ld1 { v27.4s }, [x21], #0x10\n"
+ "ld1 { v9.4s }, [x26], #0x10\n"
+ "ld1 { v14.4s }, [x22], #0x10\n"
+ "ld1 { v21.4s }, [x21], #0x10\n"
+ "ld1 { v26.4s }, [x20], #0x10\n"
+ "ld1 { v10.4s }, [x26], #0x10\n"
+ "ld1 { v15.4s }, [x22], #0x10\n"
+ "ld1 { v22.4s }, [x21], #0x10\n"
+ "ld1 { v27.4s }, [x20], #0x10\n"
"tbz x9, #2, 138f\n"
- "ld1 { v11.4s }, [x27], #0x10\n"
- "ld1 { v16.4s }, [x23], #0x10\n"
- "ld1 { v23.4s }, [x22], #0x10\n"
- "ld1 { v28.4s }, [x21], #0x10\n"
+ "ld1 { v11.4s }, [x26], #0x10\n"
+ "ld1 { v16.4s }, [x22], #0x10\n"
+ "ld1 { v23.4s }, [x21], #0x10\n"
+ "ld1 { v28.4s }, [x20], #0x10\n"
"tbz x9, #1, 137f\n"
- "ldr d12, [x27], #0x8\n"
- "ldr d17, [x23], #0x8\n"
- "mov x20, #0x38\n"
- "ldr d24, [x22], #0x8\n"
- "ldr d29, [x21], #0x8\n"
+ "mov x19, #0x38\n"
+ "ldr d12, [x26], #0x8\n"
+ "ldr d17, [x22], #0x8\n"
+ "ldr d24, [x21], #0x8\n"
+ "ldr d29, [x20], #0x8\n"
"tbz x9, #0, 144f\n"
- "ld1 { v12.s }[2], [x27]\n"
- "ld1 { v17.s }[2], [x23]\n"
- "ld1 { v24.s }[2], [x22]\n"
- "ld1 { v29.s }[2], [x21]\n"
+ "ld1 { v12.s }[2], [x26]\n"
+ "ld1 { v17.s }[2], [x22]\n"
+ "ld1 { v24.s }[2], [x21]\n"
+ "ld1 { v29.s }[2], [x20]\n"
"b 144f\n"
"137:" // Height 4: Partial accumulate: partial_1_12
- "mov x20, #0x30\n"
+ "mov x19, #0x30\n"
"tbz x9, #0, 144f\n"
- "ldr s12, [x27, #0x0]\n"
- "ldr s17, [x23, #0x0]\n"
- "ldr s24, [x22, #0x0]\n"
- "ldr s29, [x21, #0x0]\n"
+ "ldr s12, [x26, #0x0]\n"
+ "ldr s17, [x22, #0x0]\n"
+ "ldr s24, [x21, #0x0]\n"
+ "ldr s29, [x20, #0x0]\n"
"b 144f\n"
"138:" // Height 4: Partial accumulate: partial_2_8
"tbz x9, #1, 139f\n"
- "ldr d11, [x27], #0x8\n"
- "ldr d16, [x23], #0x8\n"
- "mov x20, #0x28\n"
- "ldr d23, [x22], #0x8\n"
- "ldr d28, [x21], #0x8\n"
+ "ldr d11, [x26], #0x8\n"
+ "ldr d16, [x22], #0x8\n"
+ "mov x19, #0x28\n"
+ "ldr d23, [x21], #0x8\n"
+ "ldr d28, [x20], #0x8\n"
"tbz x9, #0, 144f\n"
- "ld1 { v11.s }[2], [x27]\n"
- "ld1 { v16.s }[2], [x23]\n"
- "ld1 { v23.s }[2], [x22]\n"
- "ld1 { v28.s }[2], [x21]\n"
+ "ld1 { v11.s }[2], [x26]\n"
+ "ld1 { v16.s }[2], [x22]\n"
+ "ld1 { v23.s }[2], [x21]\n"
+ "ld1 { v28.s }[2], [x20]\n"
"b 144f\n"
"139:" // Height 4: Partial accumulate: partial_1_8
- "mov x20, #0x20\n"
+ "mov x19, #0x20\n"
"tbz x9, #0, 144f\n"
- "ldr s11, [x27, #0x0]\n"
- "ldr s16, [x23, #0x0]\n"
- "ldr s23, [x22, #0x0]\n"
- "ldr s28, [x21, #0x0]\n"
+ "ldr s11, [x26, #0x0]\n"
+ "ldr s16, [x22, #0x0]\n"
+ "ldr s23, [x21, #0x0]\n"
+ "ldr s28, [x20, #0x0]\n"
"b 144f\n"
"140:" // Height 4: Partial accumulate: partial_4_0
"tbz x9, #2, 142f\n"
- "ld1 { v9.4s }, [x27], #0x10\n"
- "ld1 { v14.4s }, [x23], #0x10\n"
- "ld1 { v21.4s }, [x22], #0x10\n"
- "ld1 { v26.4s }, [x21], #0x10\n"
+ "ld1 { v9.4s }, [x26], #0x10\n"
+ "ld1 { v14.4s }, [x22], #0x10\n"
+ "ld1 { v21.4s }, [x21], #0x10\n"
+ "ld1 { v26.4s }, [x20], #0x10\n"
"tbz x9, #1, 141f\n"
- "ldr d10, [x27], #0x8\n"
- "ldr d15, [x23], #0x8\n"
- "mov x20, #0x18\n"
- "ldr d22, [x22], #0x8\n"
- "ldr d27, [x21], #0x8\n"
+ "mov x19, #0x18\n"
+ "ldr d10, [x26], #0x8\n"
+ "ldr d15, [x22], #0x8\n"
+ "ldr d22, [x21], #0x8\n"
+ "ldr d27, [x20], #0x8\n"
"tbz x9, #0, 144f\n"
- "ld1 { v10.s }[2], [x27]\n"
- "ld1 { v15.s }[2], [x23]\n"
- "ld1 { v22.s }[2], [x22]\n"
- "ld1 { v27.s }[2], [x21]\n"
+ "ld1 { v10.s }[2], [x26]\n"
+ "ld1 { v15.s }[2], [x22]\n"
+ "ld1 { v22.s }[2], [x21]\n"
+ "ld1 { v27.s }[2], [x20]\n"
"b 144f\n"
"141:" // Height 4: Partial accumulate: partial_1_4
- "mov x20, #0x10\n"
+ "mov x19, #0x10\n"
"tbz x9, #0, 144f\n"
- "ldr s10, [x27, #0x0]\n"
- "ldr s15, [x23, #0x0]\n"
- "ldr s22, [x22, #0x0]\n"
- "ldr s27, [x21, #0x0]\n"
+ "ldr s10, [x26, #0x0]\n"
+ "ldr s15, [x22, #0x0]\n"
+ "ldr s22, [x21, #0x0]\n"
+ "ldr s27, [x20, #0x0]\n"
"b 144f\n"
"142:" // Height 4: Partial accumulate: partial_2_0
"tbz x9, #1, 143f\n"
- "ldr d9, [x27], #0x8\n"
- "ldr d14, [x23], #0x8\n"
- "mov x20, #0x8\n"
- "ldr d21, [x22], #0x8\n"
- "ldr d26, [x21], #0x8\n"
+ "ldr d9, [x26], #0x8\n"
+ "ldr d14, [x22], #0x8\n"
+ "mov x19, #0x8\n"
+ "ldr d21, [x21], #0x8\n"
+ "ldr d26, [x20], #0x8\n"
"tbz x9, #0, 144f\n"
- "ld1 { v9.s }[2], [x27]\n"
- "ld1 { v14.s }[2], [x23]\n"
- "ld1 { v21.s }[2], [x22]\n"
- "ld1 { v26.s }[2], [x21]\n"
+ "ld1 { v9.s }[2], [x26]\n"
+ "ld1 { v14.s }[2], [x22]\n"
+ "ld1 { v21.s }[2], [x21]\n"
+ "ld1 { v26.s }[2], [x20]\n"
"b 144f\n"
"143:" // Height 4: Partial accumulate: partial_1_0
- "ldr s9, [x27, #0x0]\n"
- "ldr s14, [x23, #0x0]\n"
- "mov x20, #0x0\n"
- "ldr s21, [x22, #0x0]\n"
- "ldr s26, [x21, #0x0]\n"
+ "ldr s9, [x26, #0x0]\n"
+ "mov x19, #0x0\n"
+ "ldr s14, [x22, #0x0]\n"
+ "ldr s21, [x21, #0x0]\n"
+ "ldr s26, [x20, #0x0]\n"
"144:" // Height 4: Partial accumulate: Done
- "sub x27, x27, x20\n"
+ "sub x26, x26, x19\n"
"b 146f\n"
"145:" // Height 4: full accumulate
- "ldr q9, [x27, #0x0]\n"
- "ldr q10, [x27, #0x10]\n"
- "ldr q11, [x27, #0x20]\n"
- "ldr q12, [x27, #0x30]\n"
- "ldr q13, [x27, #0x40]\n"
- "ldr q20, [x27, #0x50]\n"
- "ldr q14, [x23, #0x0]\n"
- "ldr q15, [x23, #0x10]\n"
- "ldr q16, [x23, #0x20]\n"
- "ldr q17, [x23, #0x30]\n"
- "ldr q18, [x23, #0x40]\n"
- "ldr q19, [x23, #0x50]\n"
- "ldr q21, [x22, #0x0]\n"
- "ldr q22, [x22, #0x10]\n"
- "ldr q23, [x22, #0x20]\n"
- "ldr q24, [x22, #0x30]\n"
- "ldr q25, [x22, #0x40]\n"
- "ldr q4, [x22, #0x50]\n"
- "ldr q26, [x21, #0x0]\n"
- "ldr q27, [x21, #0x10]\n"
- "ldr q28, [x21, #0x20]\n"
- "ldr q29, [x21, #0x30]\n"
- "ldr q30, [x21, #0x40]\n"
- "ldr q31, [x21, #0x50]\n"
+ "ldr q9, [x26, #0x0]\n"
+ "ldr q10, [x26, #0x10]\n"
+ "ldr q11, [x26, #0x20]\n"
+ "ldr q12, [x26, #0x30]\n"
+ "ldr q13, [x26, #0x40]\n"
+ "ldr q20, [x26, #0x50]\n"
+ "ldr q14, [x22, #0x0]\n"
+ "ldr q15, [x22, #0x10]\n"
+ "ldr q16, [x22, #0x20]\n"
+ "ldr q17, [x22, #0x30]\n"
+ "ldr q18, [x22, #0x40]\n"
+ "ldr q19, [x22, #0x50]\n"
+ "ldr q21, [x21, #0x0]\n"
+ "ldr q22, [x21, #0x10]\n"
+ "ldr q23, [x21, #0x20]\n"
+ "ldr q24, [x21, #0x30]\n"
+ "ldr q25, [x21, #0x40]\n"
+ "ldr q4, [x21, #0x50]\n"
+ "ldr q26, [x20, #0x0]\n"
+ "ldr q27, [x20, #0x10]\n"
+ "ldr q28, [x20, #0x20]\n"
+ "ldr q29, [x20, #0x30]\n"
+ "ldr q30, [x20, #0x40]\n"
+ "ldr q31, [x20, #0x50]\n"
"146:" // Height 4: MMLA fixup
"zip1 v8.2d, v9.2d, v14.2d\n"
"zip2 v14.2d, v9.2d, v14.2d\n"
@@ -1933,69 +1933,67 @@ void a64_hybrid_fp32bf16fp32_mmla_4x24 (
"movi v30.16b, #0x0\n"
"movi v31.16b, #0x0\n"
"148:" // Height 4: setup done
- "mov x26, #0x0\n"
+ "mov x25, #0x0\n"
"149:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w24, [x20, x25, LSL #0x2]\n"
"tbz %x[flags], #3, 150f\n"
- "ldr x21, [%x[input_ptr], x26, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x24, [x21, #0x0]\n"
- "ldr x23, [x21, #0x8]\n"
- "ldr x22, [x21, #0x10]\n"
- "ldr x21, [x21, #0x18]\n"
- "cbnz x26, 151f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x24, x24, x20, LSL #2\n"
- "add x23, x23, x20, LSL #2\n"
- "add x22, x22, x20, LSL #2\n"
- "add x21, x21, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x25, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x23, [x20, #0x0]\n"
+ "ldr x22, [x20, #0x8]\n"
+ "ldr x21, [x20, #0x10]\n"
+ "ldr x20, [x20, #0x18]\n"
+ "cbnz x25, 151f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x23, x23, x19, LSL #2\n"
+ "add x22, x22, x19, LSL #2\n"
+ "add x21, x21, x19, LSL #2\n"
+ "add x20, x20, x19, LSL #2\n"
"b 151f\n"
"150:" // Height 4: setup direct input
- "mov x24, %x[input_ptr]\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
+ "mov x23, %x[input_ptr]\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
+ "add x20, x21, x19, LSL #2\n"
"151:" // Height 4: input setup done
- "cmp x25, #0x4\n"
+ "cmp x24, #0x4\n"
"blt 154f\n"
- "ld1 { v0.4s }, [x24], #0x10\n"
- "ld1 { v2.4s }, [x22], #0x10\n"
- "cmp x25, #0x8\n"
- "ld1 { v1.4s }, [x23], #0x10\n"
- "ld1 { v3.4s }, [x21], #0x10\n"
- "ldr q4, [x28, #0x0]\n"
- "ldr q5, [x28, #0x10]\n"
- "ldr q6, [x28, #0x20]\n"
- "ldr q7, [x28, #0x30]\n"
+ "ld1 { v0.4s }, [x23], #0x10\n"
+ "cmp x24, #0x8\n"
"blt 153f\n"
"152:" // Height 4: Multiply loop: Main loop head
".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n"
- ".inst 0x0ea16842 // bfcvtn v2.4h, v2.4s\n"
- "sub x25, x25, #0x4\n"
- "cmp x25, #0x8\n"
+ "ld1 { v1.4s }, [x22], #0x10\n"
+ "sub x24, x24, #0x4\n"
".inst 0x4ea16820 // bfcvtn2 v0.8h, v1.4s\n"
+ "ld1 { v2.4s }, [x21], #0x10\n"
+ "cmp x24, #0x8\n"
+ ".inst 0x0ea16842 // bfcvtn v2.4h, v2.4s\n"
+ "ld1 { v3.4s }, [x20], #0x10\n"
+ "ldr q4, [x28, #0x0]\n"
".inst 0x4ea16862 // bfcvtn2 v2.8h, v3.4s\n"
+ "ldr q5, [x28, #0x10]\n"
".inst 0x6e44ec08 // bfmmla v8.4s, v0.8h, v4.8h\n"
- "prfm pldl1keep, [x24, #0x80]\n"
+ "ldr q6, [x28, #0x20]\n"
+ "ldr q7, [x28, #0x30]\n"
".inst 0x6e44ec54 // bfmmla v20.4s, v2.8h, v4.8h\n"
"ldr q4, [x28, #0x40]\n"
- ".inst 0x6e45ec0e // bfmmla v14.4s, v0.8h, v5.8h\n"
"prfm pldl1keep, [x23, #0x80]\n"
- "ld1 { v1.4s }, [x23], #0x10\n"
+ ".inst 0x6e45ec0e // bfmmla v14.4s, v0.8h, v5.8h\n"
".inst 0x6e45ec5a // bfmmla v26.4s, v2.8h, v5.8h\n"
"ldr q5, [x28, #0x50]\n"
".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x6e46ec55 // bfmmla v21.4s, v2.8h, v6.8h\n"
"ldr q6, [x28, #0x60]\n"
".inst 0x6e47ec0f // bfmmla v15.4s, v0.8h, v7.8h\n"
- "prfm pldl1keep, [x22, #0x80]\n"
+ "prfm pldl1keep, [x21, #0x80]\n"
".inst 0x6e47ec5b // bfmmla v27.4s, v2.8h, v7.8h\n"
"ldr q7, [x28, #0x70]\n"
".inst 0x6e44ec0a // bfmmla v10.4s, v0.8h, v4.8h\n"
- "prfm pldl1keep, [x21, #0x80]\n"
- "ld1 { v3.4s }, [x21], #0x10\n"
+ "prfm pldl1keep, [x20, #0x80]\n"
".inst 0x6e44ec56 // bfmmla v22.4s, v2.8h, v4.8h\n"
"ldr q4, [x28, #0x80]\n"
".inst 0x6e45ec10 // bfmmla v16.4s, v0.8h, v5.8h\n"
@@ -2010,42 +2008,44 @@ void a64_hybrid_fp32bf16fp32_mmla_4x24 (
"add x28, x28, #0xc0\n"
".inst 0x6e44ec0c // bfmmla v12.4s, v0.8h, v4.8h\n"
".inst 0x6e44ec58 // bfmmla v24.4s, v2.8h, v4.8h\n"
- "ldr q4, [x28, #0x0]\n"
".inst 0x6e45ec12 // bfmmla v18.4s, v0.8h, v5.8h\n"
".inst 0x6e45ec5e // bfmmla v30.4s, v2.8h, v5.8h\n"
- "ldr q5, [x28, #0x10]\n"
".inst 0x6e46ec0d // bfmmla v13.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec59 // bfmmla v25.4s, v2.8h, v6.8h\n"
- "ldr q6, [x28, #0x20]\n"
".inst 0x6e47ec13 // bfmmla v19.4s, v0.8h, v7.8h\n"
- "ld1 { v0.4s }, [x24], #0x10\n"
+ "ld1 { v0.4s }, [x23], #0x10\n"
".inst 0x6e47ec5f // bfmmla v31.4s, v2.8h, v7.8h\n"
- "ld1 { v2.4s }, [x22], #0x10\n"
- "ldr q7, [x28, #0x30]\n"
"bge 152b\n"
"153:" // Height 4: Multiply loop: Single iteration only
".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n"
- ".inst 0x0ea16842 // bfcvtn v2.4h, v2.4s\n"
- "sub x25, x25, #0x4\n"
- "prfm pldl1keep, [x24, #0x80]\n"
+ "ld1 { v1.4s }, [x22], #0x10\n"
+ "sub x24, x24, #0x4\n"
".inst 0x4ea16820 // bfcvtn2 v0.8h, v1.4s\n"
+ "ld1 { v2.4s }, [x21], #0x10\n"
+ "ld1 { v3.4s }, [x20], #0x10\n"
+ ".inst 0x0ea16842 // bfcvtn v2.4h, v2.4s\n"
+ "ldr q4, [x28, #0x0]\n"
+ "ldr q5, [x28, #0x10]\n"
".inst 0x4ea16862 // bfcvtn2 v2.8h, v3.4s\n"
+ "ldr q6, [x28, #0x20]\n"
+ "ldr q7, [x28, #0x30]\n"
".inst 0x6e44ec08 // bfmmla v8.4s, v0.8h, v4.8h\n"
- "prfm pldl1keep, [x23, #0x80]\n"
- ".inst 0x6e44ec54 // bfmmla v20.4s, v2.8h, v4.8h\n"
- "ldr q4, [x28, #0x40]\n"
".inst 0x6e45ec0e // bfmmla v14.4s, v0.8h, v5.8h\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
"prfm pldl1keep, [x22, #0x80]\n"
+ ".inst 0x6e44ec54 // bfmmla v20.4s, v2.8h, v4.8h\n"
".inst 0x6e45ec5a // bfmmla v26.4s, v2.8h, v5.8h\n"
- "ldr q5, [x28, #0x50]\n"
+ "ldr q4, [x28, #0x40]\n"
".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n"
- "prfm pldl1keep, [x21, #0x80]\n"
+ "ldr q5, [x28, #0x50]\n"
".inst 0x6e46ec55 // bfmmla v21.4s, v2.8h, v6.8h\n"
"ldr q6, [x28, #0x60]\n"
".inst 0x6e47ec0f // bfmmla v15.4s, v0.8h, v7.8h\n"
+ "prfm pldl1keep, [x21, #0x80]\n"
".inst 0x6e47ec5b // bfmmla v27.4s, v2.8h, v7.8h\n"
"ldr q7, [x28, #0x70]\n"
".inst 0x6e44ec0a // bfmmla v10.4s, v0.8h, v4.8h\n"
+ "prfm pldl1keep, [x20, #0x80]\n"
".inst 0x6e44ec56 // bfmmla v22.4s, v2.8h, v4.8h\n"
"ldr q4, [x28, #0x80]\n"
".inst 0x6e45ec10 // bfmmla v16.4s, v0.8h, v5.8h\n"
@@ -2067,32 +2067,32 @@ void a64_hybrid_fp32bf16fp32_mmla_4x24 (
".inst 0x6e47ec13 // bfmmla v19.4s, v0.8h, v7.8h\n"
".inst 0x6e47ec5f // bfmmla v31.4s, v2.8h, v7.8h\n"
"154:" // Height 4: Multiply loop: Main loop skip
- "cbz x25, 157f\n"
- "cbz x25, 157f\n"
- "tbz x25, #1, 155f\n"
- "ldr d0, [x24], #0x8\n"
- "ldr d1, [x23], #0x8\n"
- "ldr d2, [x22], #0x8\n"
- "ldr d3, [x21], #0x8\n"
- "tbz x25, #0, 156f\n"
- "ld1 { v0.s }[2], [x24]\n"
- "ld1 { v1.s }[2], [x23]\n"
- "ld1 { v2.s }[2], [x22]\n"
- "ld1 { v3.s }[2], [x21]\n"
+ "cbz x24, 157f\n"
+ "cbz x24, 157f\n"
+ "tbz x24, #1, 155f\n"
+ "ldr d0, [x23], #0x8\n"
+ "ldr d1, [x22], #0x8\n"
+ "ldr d2, [x21], #0x8\n"
+ "ldr d3, [x20], #0x8\n"
+ "tbz x24, #0, 156f\n"
+ "ld1 { v0.s }[2], [x23]\n"
+ "ld1 { v1.s }[2], [x22]\n"
+ "ld1 { v2.s }[2], [x21]\n"
+ "ld1 { v3.s }[2], [x20]\n"
"b 156f\n"
"155:" // Height 4: Multiply loop: Ragged operand read: partial_1_0
- "ldr s0, [x24, #0x0]\n"
- "ldr s1, [x23, #0x0]\n"
- "ldr s2, [x22, #0x0]\n"
- "ldr s3, [x21, #0x0]\n"
+ "ldr s0, [x23, #0x0]\n"
+ "ldr s1, [x22, #0x0]\n"
+ "ldr s2, [x21, #0x0]\n"
+ "ldr s3, [x20, #0x0]\n"
"156:" // Height 4: Multiply loop: Ragged operand read: Done
- "ldr q4, [x28, #0x0]\n"
- "ldr q5, [x28, #0x10]\n"
".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n"
+ "ldr q4, [x28, #0x0]\n"
".inst 0x0ea16842 // bfcvtn v2.4h, v2.4s\n"
+ "ldr q5, [x28, #0x10]\n"
"ldr q6, [x28, #0x20]\n"
- "ldr q7, [x28, #0x30]\n"
".inst 0x4ea16820 // bfcvtn2 v0.8h, v1.4s\n"
+ "ldr q7, [x28, #0x30]\n"
".inst 0x4ea16862 // bfcvtn2 v2.8h, v3.4s\n"
".inst 0x6e44ec08 // bfmmla v8.4s, v0.8h, v4.8h\n"
".inst 0x6e44ec54 // bfmmla v20.4s, v2.8h, v4.8h\n"
@@ -2128,25 +2128,25 @@ void a64_hybrid_fp32bf16fp32_mmla_4x24 (
".inst 0x6e47ec13 // bfmmla v19.4s, v0.8h, v7.8h\n"
".inst 0x6e47ec5f // bfmmla v31.4s, v2.8h, v7.8h\n"
"157:" // Height 4: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x26, x26, #0x1\n"
- "cmp x26, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x25, x25, #0x1\n"
+ "cmp x25, x19\n"
"bne 149b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x27, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
"uzp1 v4.2d, v8.2d, v14.2d\n"
- "add x21, x22, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp2 v8.2d, v8.2d, v14.2d\n"
+ "prfm pstl1keep, [x26, #0x0]\n"
+ "add x22, x26, x19, LSL #2\n"
"uzp1 v14.2d, v9.2d, v15.2d\n"
- "prfm pstl1keep, [x27, #0x0]\n"
+ "prfm pstl1keep, [x22, #0x0]\n"
"uzp2 v9.2d, v9.2d, v15.2d\n"
+ "add x21, x22, x19, LSL #2\n"
"uzp1 v15.2d, v10.2d, v16.2d\n"
- "prfm pstl1keep, [x23, #0x0]\n"
- "prfm pstl1keep, [x22, #0x0]\n"
+ "prfm pstl1keep, [x21, #0x0]\n"
+ "add x20, x21, x19, LSL #2\n"
"uzp2 v10.2d, v10.2d, v16.2d\n"
+ "prfm pstl1keep, [x20, #0x0]\n"
"uzp1 v16.2d, v11.2d, v17.2d\n"
- "prfm pstl1keep, [x21, #0x0]\n"
"uzp2 v11.2d, v11.2d, v17.2d\n"
"uzp1 v17.2d, v12.2d, v18.2d\n"
"uzp2 v12.2d, v12.2d, v18.2d\n"
@@ -2165,260 +2165,260 @@ void a64_hybrid_fp32bf16fp32_mmla_4x24 (
"uzp1 v30.2d, v25.2d, v31.2d\n"
"uzp2 v25.2d, v25.2d, v31.2d\n"
"tbz %x[flags], #1, 158f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v1.4s }, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1r { v0.4s }, [x20]\n"
- "fmin v4.4s, v4.4s, v1.4s\n"
- "fmin v14.4s, v14.4s, v1.4s\n"
- "fmin v15.4s, v15.4s, v1.4s\n"
- "fmin v16.4s, v16.4s, v1.4s\n"
- "fmin v17.4s, v17.4s, v1.4s\n"
- "fmin v18.4s, v18.4s, v1.4s\n"
- "fmin v8.4s, v8.4s, v1.4s\n"
- "fmin v9.4s, v9.4s, v1.4s\n"
- "fmin v10.4s, v10.4s, v1.4s\n"
- "fmin v11.4s, v11.4s, v1.4s\n"
- "fmin v12.4s, v12.4s, v1.4s\n"
- "fmin v13.4s, v13.4s, v1.4s\n"
- "fmin v19.4s, v19.4s, v1.4s\n"
- "fmin v26.4s, v26.4s, v1.4s\n"
- "fmin v27.4s, v27.4s, v1.4s\n"
- "fmin v28.4s, v28.4s, v1.4s\n"
- "fmin v29.4s, v29.4s, v1.4s\n"
- "fmin v30.4s, v30.4s, v1.4s\n"
- "fmin v20.4s, v20.4s, v1.4s\n"
- "fmin v21.4s, v21.4s, v1.4s\n"
- "fmin v22.4s, v22.4s, v1.4s\n"
- "fmin v23.4s, v23.4s, v1.4s\n"
- "fmin v24.4s, v24.4s, v1.4s\n"
- "fmin v25.4s, v25.4s, v1.4s\n"
- "fmax v4.4s, v4.4s, v0.4s\n"
- "fmax v14.4s, v14.4s, v0.4s\n"
- "fmax v15.4s, v15.4s, v0.4s\n"
- "fmax v16.4s, v16.4s, v0.4s\n"
- "fmax v17.4s, v17.4s, v0.4s\n"
- "fmax v18.4s, v18.4s, v0.4s\n"
- "fmax v8.4s, v8.4s, v0.4s\n"
- "fmax v9.4s, v9.4s, v0.4s\n"
- "fmax v10.4s, v10.4s, v0.4s\n"
- "fmax v11.4s, v11.4s, v0.4s\n"
- "fmax v12.4s, v12.4s, v0.4s\n"
- "fmax v13.4s, v13.4s, v0.4s\n"
- "fmax v19.4s, v19.4s, v0.4s\n"
- "fmax v26.4s, v26.4s, v0.4s\n"
- "fmax v27.4s, v27.4s, v0.4s\n"
- "fmax v28.4s, v28.4s, v0.4s\n"
- "fmax v29.4s, v29.4s, v0.4s\n"
- "fmax v30.4s, v30.4s, v0.4s\n"
- "fmax v20.4s, v20.4s, v0.4s\n"
- "fmax v21.4s, v21.4s, v0.4s\n"
- "fmax v22.4s, v22.4s, v0.4s\n"
- "fmax v23.4s, v23.4s, v0.4s\n"
- "fmax v24.4s, v24.4s, v0.4s\n"
- "fmax v25.4s, v25.4s, v0.4s\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v1.4s }, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v0.4s }, [x19]\n"
+ "fmin v4.4s, v4.4s, v0.4s\n"
+ "fmin v14.4s, v14.4s, v0.4s\n"
+ "fmin v15.4s, v15.4s, v0.4s\n"
+ "fmin v16.4s, v16.4s, v0.4s\n"
+ "fmax v4.4s, v4.4s, v1.4s\n"
+ "fmax v14.4s, v14.4s, v1.4s\n"
+ "fmax v15.4s, v15.4s, v1.4s\n"
+ "fmax v16.4s, v16.4s, v1.4s\n"
+ "fmin v17.4s, v17.4s, v0.4s\n"
+ "fmin v18.4s, v18.4s, v0.4s\n"
+ "fmin v8.4s, v8.4s, v0.4s\n"
+ "fmax v17.4s, v17.4s, v1.4s\n"
+ "fmax v18.4s, v18.4s, v1.4s\n"
+ "fmax v8.4s, v8.4s, v1.4s\n"
+ "fmin v9.4s, v9.4s, v0.4s\n"
+ "fmin v10.4s, v10.4s, v0.4s\n"
+ "fmin v11.4s, v11.4s, v0.4s\n"
+ "fmax v9.4s, v9.4s, v1.4s\n"
+ "fmax v10.4s, v10.4s, v1.4s\n"
+ "fmax v11.4s, v11.4s, v1.4s\n"
+ "fmin v12.4s, v12.4s, v0.4s\n"
+ "fmin v13.4s, v13.4s, v0.4s\n"
+ "fmin v19.4s, v19.4s, v0.4s\n"
+ "fmax v12.4s, v12.4s, v1.4s\n"
+ "fmax v13.4s, v13.4s, v1.4s\n"
+ "fmax v19.4s, v19.4s, v1.4s\n"
+ "fmin v26.4s, v26.4s, v0.4s\n"
+ "fmin v27.4s, v27.4s, v0.4s\n"
+ "fmin v28.4s, v28.4s, v0.4s\n"
+ "fmax v26.4s, v26.4s, v1.4s\n"
+ "fmax v27.4s, v27.4s, v1.4s\n"
+ "fmax v28.4s, v28.4s, v1.4s\n"
+ "fmin v29.4s, v29.4s, v0.4s\n"
+ "fmin v30.4s, v30.4s, v0.4s\n"
+ "fmin v20.4s, v20.4s, v0.4s\n"
+ "fmax v29.4s, v29.4s, v1.4s\n"
+ "fmax v30.4s, v30.4s, v1.4s\n"
+ "fmax v20.4s, v20.4s, v1.4s\n"
+ "fmin v21.4s, v21.4s, v0.4s\n"
+ "fmin v22.4s, v22.4s, v0.4s\n"
+ "fmin v23.4s, v23.4s, v0.4s\n"
+ "fmax v21.4s, v21.4s, v1.4s\n"
+ "fmax v22.4s, v22.4s, v1.4s\n"
+ "fmax v23.4s, v23.4s, v1.4s\n"
+ "fmin v24.4s, v24.4s, v0.4s\n"
+ "fmin v25.4s, v25.4s, v0.4s\n"
+ "fmax v24.4s, v24.4s, v1.4s\n"
+ "fmax v25.4s, v25.4s, v1.4s\n"
"158:" // Height 4: No activation
"cmp x9, #0x18\n"
"bge 171f\n"
"tbz x9, #4, 162f\n"
- "st1 { v4.4s }, [x27], #0x10\n"
- "st1 { v14.4s }, [x27], #0x10\n"
- "st1 { v15.4s }, [x27], #0x10\n"
- "st1 { v16.4s }, [x27], #0x10\n"
- "st1 { v8.4s }, [x23], #0x10\n"
- "st1 { v9.4s }, [x23], #0x10\n"
- "st1 { v10.4s }, [x23], #0x10\n"
- "st1 { v11.4s }, [x23], #0x10\n"
- "st1 { v19.4s }, [x22], #0x10\n"
- "st1 { v26.4s }, [x22], #0x10\n"
- "st1 { v27.4s }, [x22], #0x10\n"
- "st1 { v28.4s }, [x22], #0x10\n"
- "st1 { v20.4s }, [x21], #0x10\n"
- "st1 { v21.4s }, [x21], #0x10\n"
- "st1 { v22.4s }, [x21], #0x10\n"
- "st1 { v23.4s }, [x21], #0x10\n"
+ "st1 { v4.4s }, [x26], #0x10\n"
+ "st1 { v14.4s }, [x26], #0x10\n"
+ "st1 { v15.4s }, [x26], #0x10\n"
+ "st1 { v16.4s }, [x26], #0x10\n"
+ "st1 { v8.4s }, [x22], #0x10\n"
+ "st1 { v9.4s }, [x22], #0x10\n"
+ "st1 { v10.4s }, [x22], #0x10\n"
+ "st1 { v11.4s }, [x22], #0x10\n"
+ "st1 { v19.4s }, [x21], #0x10\n"
+ "st1 { v26.4s }, [x21], #0x10\n"
+ "st1 { v27.4s }, [x21], #0x10\n"
+ "st1 { v28.4s }, [x21], #0x10\n"
+ "st1 { v20.4s }, [x20], #0x10\n"
+ "st1 { v21.4s }, [x20], #0x10\n"
+ "st1 { v22.4s }, [x20], #0x10\n"
+ "st1 { v23.4s }, [x20], #0x10\n"
"tbz x9, #2, 160f\n"
- "st1 { v17.4s }, [x27], #0x10\n"
- "st1 { v12.4s }, [x23], #0x10\n"
- "st1 { v29.4s }, [x22], #0x10\n"
- "st1 { v24.4s }, [x21], #0x10\n"
+ "st1 { v17.4s }, [x26], #0x10\n"
+ "st1 { v12.4s }, [x22], #0x10\n"
+ "st1 { v29.4s }, [x21], #0x10\n"
+ "st1 { v24.4s }, [x20], #0x10\n"
"tbz x9, #1, 159f\n"
- "str d18, [x27], #0x8\n"
- "str d13, [x23], #0x8\n"
- "str d30, [x22], #0x8\n"
- "str d25, [x21], #0x8\n"
+ "str d18, [x26], #0x8\n"
+ "str d13, [x22], #0x8\n"
+ "str d30, [x21], #0x8\n"
+ "str d25, [x20], #0x8\n"
"tbz x9, #0, 170f\n"
- "st1 { v18.s }[2], [x27]\n"
- "st1 { v13.s }[2], [x23]\n"
- "st1 { v30.s }[2], [x22]\n"
- "st1 { v25.s }[2], [x21]\n"
+ "st1 { v18.s }[2], [x26]\n"
+ "st1 { v13.s }[2], [x22]\n"
+ "st1 { v30.s }[2], [x21]\n"
+ "st1 { v25.s }[2], [x20]\n"
"b 170f\n"
"159:" // Height 4: Partial direct writeback: partial_1_20
"tbz x9, #0, 170f\n"
- "str s18, [x27, #0x0]\n"
- "str s13, [x23, #0x0]\n"
- "str s30, [x22, #0x0]\n"
- "str s25, [x21, #0x0]\n"
+ "str s18, [x26, #0x0]\n"
+ "str s13, [x22, #0x0]\n"
+ "str s30, [x21, #0x0]\n"
+ "str s25, [x20, #0x0]\n"
"b 170f\n"
"160:" // Height 4: Partial direct writeback: partial_2_16
"tbz x9, #1, 161f\n"
- "str d17, [x27], #0x8\n"
- "str d12, [x23], #0x8\n"
- "str d29, [x22], #0x8\n"
- "str d24, [x21], #0x8\n"
+ "str d17, [x26], #0x8\n"
+ "str d12, [x22], #0x8\n"
+ "str d29, [x21], #0x8\n"
+ "str d24, [x20], #0x8\n"
"tbz x9, #0, 170f\n"
- "st1 { v17.s }[2], [x27]\n"
- "st1 { v12.s }[2], [x23]\n"
- "st1 { v29.s }[2], [x22]\n"
- "st1 { v24.s }[2], [x21]\n"
+ "st1 { v17.s }[2], [x26]\n"
+ "st1 { v12.s }[2], [x22]\n"
+ "st1 { v29.s }[2], [x21]\n"
+ "st1 { v24.s }[2], [x20]\n"
"b 170f\n"
"161:" // Height 4: Partial direct writeback: partial_1_16
"tbz x9, #0, 170f\n"
- "str s17, [x27, #0x0]\n"
- "str s12, [x23, #0x0]\n"
- "str s29, [x22, #0x0]\n"
- "str s24, [x21, #0x0]\n"
+ "str s17, [x26, #0x0]\n"
+ "str s12, [x22, #0x0]\n"
+ "str s29, [x21, #0x0]\n"
+ "str s24, [x20, #0x0]\n"
"b 170f\n"
"162:" // Height 4: Partial direct writeback: partial_8_0
"tbz x9, #3, 166f\n"
- "st1 { v4.4s }, [x27], #0x10\n"
- "st1 { v14.4s }, [x27], #0x10\n"
- "st1 { v8.4s }, [x23], #0x10\n"
- "st1 { v9.4s }, [x23], #0x10\n"
- "st1 { v19.4s }, [x22], #0x10\n"
- "st1 { v26.4s }, [x22], #0x10\n"
- "st1 { v20.4s }, [x21], #0x10\n"
- "st1 { v21.4s }, [x21], #0x10\n"
+ "st1 { v4.4s }, [x26], #0x10\n"
+ "st1 { v14.4s }, [x26], #0x10\n"
+ "st1 { v8.4s }, [x22], #0x10\n"
+ "st1 { v9.4s }, [x22], #0x10\n"
+ "st1 { v19.4s }, [x21], #0x10\n"
+ "st1 { v26.4s }, [x21], #0x10\n"
+ "st1 { v20.4s }, [x20], #0x10\n"
+ "st1 { v21.4s }, [x20], #0x10\n"
"tbz x9, #2, 164f\n"
- "st1 { v15.4s }, [x27], #0x10\n"
- "st1 { v10.4s }, [x23], #0x10\n"
- "st1 { v27.4s }, [x22], #0x10\n"
- "st1 { v22.4s }, [x21], #0x10\n"
+ "st1 { v15.4s }, [x26], #0x10\n"
+ "st1 { v10.4s }, [x22], #0x10\n"
+ "st1 { v27.4s }, [x21], #0x10\n"
+ "st1 { v22.4s }, [x20], #0x10\n"
"tbz x9, #1, 163f\n"
- "str d16, [x27], #0x8\n"
- "str d11, [x23], #0x8\n"
- "str d28, [x22], #0x8\n"
- "str d23, [x21], #0x8\n"
+ "str d16, [x26], #0x8\n"
+ "str d11, [x22], #0x8\n"
+ "str d28, [x21], #0x8\n"
+ "str d23, [x20], #0x8\n"
"tbz x9, #0, 170f\n"
- "st1 { v16.s }[2], [x27]\n"
- "st1 { v11.s }[2], [x23]\n"
- "st1 { v28.s }[2], [x22]\n"
- "st1 { v23.s }[2], [x21]\n"
+ "st1 { v16.s }[2], [x26]\n"
+ "st1 { v11.s }[2], [x22]\n"
+ "st1 { v28.s }[2], [x21]\n"
+ "st1 { v23.s }[2], [x20]\n"
"b 170f\n"
"163:" // Height 4: Partial direct writeback: partial_1_12
"tbz x9, #0, 170f\n"
- "str s16, [x27, #0x0]\n"
- "str s11, [x23, #0x0]\n"
- "str s28, [x22, #0x0]\n"
- "str s23, [x21, #0x0]\n"
+ "str s16, [x26, #0x0]\n"
+ "str s11, [x22, #0x0]\n"
+ "str s28, [x21, #0x0]\n"
+ "str s23, [x20, #0x0]\n"
"b 170f\n"
"164:" // Height 4: Partial direct writeback: partial_2_8
"tbz x9, #1, 165f\n"
- "str d15, [x27], #0x8\n"
- "str d10, [x23], #0x8\n"
- "str d27, [x22], #0x8\n"
- "str d22, [x21], #0x8\n"
+ "str d15, [x26], #0x8\n"
+ "str d10, [x22], #0x8\n"
+ "str d27, [x21], #0x8\n"
+ "str d22, [x20], #0x8\n"
"tbz x9, #0, 170f\n"
- "st1 { v15.s }[2], [x27]\n"
- "st1 { v10.s }[2], [x23]\n"
- "st1 { v27.s }[2], [x22]\n"
- "st1 { v22.s }[2], [x21]\n"
+ "st1 { v15.s }[2], [x26]\n"
+ "st1 { v10.s }[2], [x22]\n"
+ "st1 { v27.s }[2], [x21]\n"
+ "st1 { v22.s }[2], [x20]\n"
"b 170f\n"
"165:" // Height 4: Partial direct writeback: partial_1_8
"tbz x9, #0, 170f\n"
- "str s15, [x27, #0x0]\n"
- "str s10, [x23, #0x0]\n"
- "str s27, [x22, #0x0]\n"
- "str s22, [x21, #0x0]\n"
+ "str s15, [x26, #0x0]\n"
+ "str s10, [x22, #0x0]\n"
+ "str s27, [x21, #0x0]\n"
+ "str s22, [x20, #0x0]\n"
"b 170f\n"
"166:" // Height 4: Partial direct writeback: partial_4_0
"tbz x9, #2, 168f\n"
- "st1 { v4.4s }, [x27], #0x10\n"
- "st1 { v8.4s }, [x23], #0x10\n"
- "st1 { v19.4s }, [x22], #0x10\n"
- "st1 { v20.4s }, [x21], #0x10\n"
+ "st1 { v4.4s }, [x26], #0x10\n"
+ "st1 { v8.4s }, [x22], #0x10\n"
+ "st1 { v19.4s }, [x21], #0x10\n"
+ "st1 { v20.4s }, [x20], #0x10\n"
"tbz x9, #1, 167f\n"
- "str d14, [x27], #0x8\n"
- "str d9, [x23], #0x8\n"
- "str d26, [x22], #0x8\n"
- "str d21, [x21], #0x8\n"
+ "str d14, [x26], #0x8\n"
+ "str d9, [x22], #0x8\n"
+ "str d26, [x21], #0x8\n"
+ "str d21, [x20], #0x8\n"
"tbz x9, #0, 170f\n"
- "st1 { v14.s }[2], [x27]\n"
- "st1 { v9.s }[2], [x23]\n"
- "st1 { v26.s }[2], [x22]\n"
- "st1 { v21.s }[2], [x21]\n"
+ "st1 { v14.s }[2], [x26]\n"
+ "st1 { v9.s }[2], [x22]\n"
+ "st1 { v26.s }[2], [x21]\n"
+ "st1 { v21.s }[2], [x20]\n"
"b 170f\n"
"167:" // Height 4: Partial direct writeback: partial_1_4
"tbz x9, #0, 170f\n"
- "str s14, [x27, #0x0]\n"
- "str s9, [x23, #0x0]\n"
- "str s26, [x22, #0x0]\n"
- "str s21, [x21, #0x0]\n"
+ "str s14, [x26, #0x0]\n"
+ "str s9, [x22, #0x0]\n"
+ "str s26, [x21, #0x0]\n"
+ "str s21, [x20, #0x0]\n"
"b 170f\n"
"168:" // Height 4: Partial direct writeback: partial_2_0
"tbz x9, #1, 169f\n"
- "str d4, [x27], #0x8\n"
- "str d8, [x23], #0x8\n"
- "str d19, [x22], #0x8\n"
- "str d20, [x21], #0x8\n"
+ "str d4, [x26], #0x8\n"
+ "str d8, [x22], #0x8\n"
+ "str d19, [x21], #0x8\n"
+ "str d20, [x20], #0x8\n"
"tbz x9, #0, 170f\n"
- "st1 { v4.s }[2], [x27]\n"
- "st1 { v8.s }[2], [x23]\n"
- "st1 { v19.s }[2], [x22]\n"
- "st1 { v20.s }[2], [x21]\n"
+ "st1 { v4.s }[2], [x26]\n"
+ "st1 { v8.s }[2], [x22]\n"
+ "st1 { v19.s }[2], [x21]\n"
+ "st1 { v20.s }[2], [x20]\n"
"b 170f\n"
"169:" // Height 4: Partial direct writeback: partial_1_0
- "str s4, [x27, #0x0]\n"
- "str s8, [x23, #0x0]\n"
- "str s19, [x22, #0x0]\n"
- "str s20, [x21, #0x0]\n"
+ "str s4, [x26, #0x0]\n"
+ "str s8, [x22, #0x0]\n"
+ "str s19, [x21, #0x0]\n"
+ "str s20, [x20, #0x0]\n"
"170:" // Height 4: Partial direct writeback: Done
"b 172f\n"
"171:" // Height 4: Full writeback
- "str q4, [x27, #0x0]\n"
- "str q14, [x27, #0x10]\n"
- "str q15, [x27, #0x20]\n"
- "str q16, [x27, #0x30]\n"
- "str q17, [x27, #0x40]\n"
- "str q18, [x27, #0x50]\n"
- "add x27, x27, #0x60\n"
- "str q8, [x23, #0x0]\n"
- "str q9, [x23, #0x10]\n"
- "str q10, [x23, #0x20]\n"
- "str q11, [x23, #0x30]\n"
- "str q12, [x23, #0x40]\n"
- "str q13, [x23, #0x50]\n"
- "str q19, [x22, #0x0]\n"
- "str q26, [x22, #0x10]\n"
- "str q27, [x22, #0x20]\n"
- "str q28, [x22, #0x30]\n"
- "str q29, [x22, #0x40]\n"
- "str q30, [x22, #0x50]\n"
- "str q20, [x21, #0x0]\n"
- "str q21, [x21, #0x10]\n"
- "str q22, [x21, #0x20]\n"
- "str q23, [x21, #0x30]\n"
- "str q24, [x21, #0x40]\n"
- "str q25, [x21, #0x50]\n"
+ "str q4, [x26, #0x0]\n"
+ "str q14, [x26, #0x10]\n"
+ "str q15, [x26, #0x20]\n"
+ "str q16, [x26, #0x30]\n"
+ "str q17, [x26, #0x40]\n"
+ "str q18, [x26, #0x50]\n"
+ "add x26, x26, #0x60\n"
+ "str q8, [x22, #0x0]\n"
+ "str q9, [x22, #0x10]\n"
+ "str q10, [x22, #0x20]\n"
+ "str q11, [x22, #0x30]\n"
+ "str q12, [x22, #0x40]\n"
+ "str q13, [x22, #0x50]\n"
+ "str q19, [x21, #0x0]\n"
+ "str q26, [x21, #0x10]\n"
+ "str q27, [x21, #0x20]\n"
+ "str q28, [x21, #0x30]\n"
+ "str q29, [x21, #0x40]\n"
+ "str q30, [x21, #0x50]\n"
+ "str q20, [x20, #0x0]\n"
+ "str q21, [x20, #0x10]\n"
+ "str q22, [x20, #0x20]\n"
+ "str q23, [x20, #0x30]\n"
+ "str q24, [x20, #0x40]\n"
+ "str q25, [x20, #0x50]\n"
"172:" // Height 4: Writeback done
"subs x9, x9, #0x18\n"
"bgt 131b\n"
"subs %x[M], %x[M], #0x4\n"
"beq 174f\n"
- "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 173f\n"
- "add x21, x21, #0x4\n"
- "str x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "add x20, x20, #0x4\n"
+ "str x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"b 1b\n"
"173:" // Update direct input
- "mov x20, #0x10\n"
- "madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
+ "mov x19, #0x10\n"
+ "madd %x[input_ptr], x19, x20, %x[input_ptr]\n"
"b 1b\n"
"174:" // Exit
: [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
: [args_ptr] "r" (&ka), [bias] "r" (bias), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32bf16fp32_mmla_6x16/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32bf16fp32_mmla_6x16/generic.cpp
index 4993777d62..19dbf0588e 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32bf16fp32_mmla_6x16/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32bf16fp32_mmla_6x16/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef __aarch64__
@@ -103,23 +103,23 @@ void a64_hybrid_fp32bf16fp32_mmla_6x16 (
"cmp %x[M], #0x2\n"
"bgt 71f\n"
"beq 36f\n"
- "mov x12, %x[bias]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "mov x9, %x[bias]\n"
+ "mov x28, %x[output_ptr]\n"
"2:" // Height 1: Column loop
- "cbz x12, 3f\n"
- "ldr q8, [x12, #0x0]\n"
- "ldr q9, [x12, #0x10]\n"
+ "cbz x9, 3f\n"
+ "ldr q8, [x9, #0x0]\n"
"zip2 v12.2d, v8.2d, v8.2d\n"
+ "ldr q9, [x9, #0x10]\n"
"zip1 v8.2d, v8.2d, v8.2d\n"
- "ldr q10, [x12, #0x20]\n"
- "ldr q11, [x12, #0x30]\n"
+ "ldr q10, [x9, #0x20]\n"
+ "ldr q11, [x9, #0x30]\n"
"zip2 v13.2d, v9.2d, v9.2d\n"
+ "add x9, x9, #0x40\n"
"zip1 v9.2d, v9.2d, v9.2d\n"
"zip2 v14.2d, v10.2d, v10.2d\n"
"zip1 v10.2d, v10.2d, v10.2d\n"
- "add x12, x12, #0x40\n"
"zip2 v15.2d, v11.2d, v11.2d\n"
"zip1 v11.2d, v11.2d, v11.2d\n"
"b 15f\n"
@@ -128,65 +128,65 @@ void a64_hybrid_fp32bf16fp32_mmla_6x16 (
"cmp x11, #0x10\n"
"bge 12f\n"
"tbz x11, #3, 7f\n"
- "ld1 { v9.4s }, [x9], #0x10\n"
- "ld1 { v10.4s }, [x9], #0x10\n"
+ "ld1 { v9.4s }, [x28], #0x10\n"
+ "ld1 { v10.4s }, [x28], #0x10\n"
"tbz x11, #2, 5f\n"
- "ld1 { v11.4s }, [x9], #0x10\n"
+ "ld1 { v11.4s }, [x28], #0x10\n"
"tbz x11, #1, 4f\n"
- "ldr d16, [x9], #0x8\n"
- "mov x20, #0x38\n"
+ "mov x19, #0x38\n"
+ "ldr d16, [x28], #0x8\n"
"tbz x11, #0, 11f\n"
- "ld1 { v16.s }[2], [x9]\n"
+ "ld1 { v16.s }[2], [x28]\n"
"b 11f\n"
"4:" // Height 1: Partial accumulate: partial_1_12
- "mov x20, #0x30\n"
+ "mov x19, #0x30\n"
"tbz x11, #0, 11f\n"
- "ldr s16, [x9, #0x0]\n"
+ "ldr s16, [x28, #0x0]\n"
"b 11f\n"
"5:" // Height 1: Partial accumulate: partial_2_8
"tbz x11, #1, 6f\n"
- "ldr d11, [x9], #0x8\n"
- "mov x20, #0x28\n"
+ "ldr d11, [x28], #0x8\n"
+ "mov x19, #0x28\n"
"tbz x11, #0, 11f\n"
- "ld1 { v11.s }[2], [x9]\n"
+ "ld1 { v11.s }[2], [x28]\n"
"b 11f\n"
"6:" // Height 1: Partial accumulate: partial_1_8
- "mov x20, #0x20\n"
+ "mov x19, #0x20\n"
"tbz x11, #0, 11f\n"
- "ldr s11, [x9, #0x0]\n"
+ "ldr s11, [x28, #0x0]\n"
"b 11f\n"
"7:" // Height 1: Partial accumulate: partial_4_0
"tbz x11, #2, 9f\n"
- "ld1 { v9.4s }, [x9], #0x10\n"
+ "ld1 { v9.4s }, [x28], #0x10\n"
"tbz x11, #1, 8f\n"
- "ldr d10, [x9], #0x8\n"
- "mov x20, #0x18\n"
+ "ldr d10, [x28], #0x8\n"
+ "mov x19, #0x18\n"
"tbz x11, #0, 11f\n"
- "ld1 { v10.s }[2], [x9]\n"
+ "ld1 { v10.s }[2], [x28]\n"
"b 11f\n"
"8:" // Height 1: Partial accumulate: partial_1_4
- "mov x20, #0x10\n"
+ "mov x19, #0x10\n"
"tbz x11, #0, 11f\n"
- "ldr s10, [x9, #0x0]\n"
+ "ldr s10, [x28, #0x0]\n"
"b 11f\n"
"9:" // Height 1: Partial accumulate: partial_2_0
"tbz x11, #1, 10f\n"
- "ldr d9, [x9], #0x8\n"
- "mov x20, #0x8\n"
+ "ldr d9, [x28], #0x8\n"
+ "mov x19, #0x8\n"
"tbz x11, #0, 11f\n"
- "ld1 { v9.s }[2], [x9]\n"
+ "ld1 { v9.s }[2], [x28]\n"
"b 11f\n"
"10:" // Height 1: Partial accumulate: partial_1_0
- "ldr s9, [x9, #0x0]\n"
- "mov x20, #0x0\n"
+ "ldr s9, [x28, #0x0]\n"
+ "mov x19, #0x0\n"
"11:" // Height 1: Partial accumulate: Done
- "sub x9, x9, x20\n"
+ "sub x28, x28, x19\n"
"b 13f\n"
"12:" // Height 1: full accumulate
- "ldr q9, [x9, #0x0]\n"
- "ldr q10, [x9, #0x10]\n"
- "ldr q11, [x9, #0x20]\n"
- "ldr q16, [x9, #0x30]\n"
+ "ldr q9, [x28, #0x0]\n"
+ "ldr q10, [x28, #0x10]\n"
+ "ldr q11, [x28, #0x20]\n"
+ "ldr q16, [x28, #0x30]\n"
"13:" // Height 1: MMLA fixup
"zip1 v8.2d, v9.2d, v12.2d\n"
"zip2 v12.2d, v9.2d, v12.2d\n"
@@ -207,86 +207,86 @@ void a64_hybrid_fp32bf16fp32_mmla_6x16 (
"movi v14.16b, #0x0\n"
"movi v15.16b, #0x0\n"
"15:" // Height 1: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"16:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 17f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "cbnz x28, 18f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "cbnz x27, 18f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #2\n"
"b 18f\n"
"17:" // Height 1: setup direct input
- "mov x26, %x[input_ptr]\n"
+ "mov x25, %x[input_ptr]\n"
"18:" // Height 1: input setup done
- "cmp x27, #0x4\n"
+ "cmp x26, #0x4\n"
"blt 21f\n"
- "ld1 { v0.4s }, [x26], #0x10\n"
- "ldr q6, [x10, #0x0]\n"
- "cmp x27, #0x8\n"
- "ldr q7, [x10, #0x10]\n"
+ "ld1 { v0.4s }, [x25], #0x10\n"
+ "cmp x26, #0x8\n"
"blt 20f\n"
"19:" // Height 1: Multiply loop: Main loop head
".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n"
+ "ldr q6, [x10, #0x0]\n"
+ "sub x26, x26, #0x4\n"
".inst 0x6e46ec08 // bfmmla v8.4s, v0.8h, v6.8h\n"
- "ldr q6, [x10, #0x20]\n"
+ "ldr q7, [x10, #0x10]\n"
+ "cmp x26, #0x8\n"
".inst 0x6e47ec0c // bfmmla v12.4s, v0.8h, v7.8h\n"
+ "ldr q6, [x10, #0x20]\n"
"ldr q7, [x10, #0x30]\n"
".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n"
"ldr q6, [x10, #0x40]\n"
".inst 0x6e47ec0d // bfmmla v13.4s, v0.8h, v7.8h\n"
"ldr q7, [x10, #0x50]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x6e46ec0a // bfmmla v10.4s, v0.8h, v6.8h\n"
"ldr q6, [x10, #0x60]\n"
".inst 0x6e47ec0e // bfmmla v14.4s, v0.8h, v7.8h\n"
"ldr q7, [x10, #0x70]\n"
- "sub x27, x27, #0x4\n"
- "cmp x27, #0x8\n"
- ".inst 0x6e46ec0b // bfmmla v11.4s, v0.8h, v6.8h\n"
"add x10, x10, #0x80\n"
- "ldr q6, [x10, #0x0]\n"
+ ".inst 0x6e46ec0b // bfmmla v11.4s, v0.8h, v6.8h\n"
".inst 0x6e47ec0f // bfmmla v15.4s, v0.8h, v7.8h\n"
- "ldr q7, [x10, #0x10]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
- "ld1 { v0.4s }, [x26], #0x10\n"
+ "ld1 { v0.4s }, [x25], #0x10\n"
"bge 19b\n"
"20:" // Height 1: Multiply loop: Single iteration only
".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n"
+ "ldr q6, [x10, #0x0]\n"
+ "sub x26, x26, #0x4\n"
".inst 0x6e46ec08 // bfmmla v8.4s, v0.8h, v6.8h\n"
+ "ldr q7, [x10, #0x10]\n"
"ldr q6, [x10, #0x20]\n"
".inst 0x6e47ec0c // bfmmla v12.4s, v0.8h, v7.8h\n"
"ldr q7, [x10, #0x30]\n"
".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n"
"ldr q6, [x10, #0x40]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x6e47ec0d // bfmmla v13.4s, v0.8h, v7.8h\n"
"ldr q7, [x10, #0x50]\n"
".inst 0x6e46ec0a // bfmmla v10.4s, v0.8h, v6.8h\n"
"ldr q6, [x10, #0x60]\n"
".inst 0x6e47ec0e // bfmmla v14.4s, v0.8h, v7.8h\n"
"ldr q7, [x10, #0x70]\n"
- "sub x27, x27, #0x4\n"
+ "add x10, x10, #0x80\n"
".inst 0x6e46ec0b // bfmmla v11.4s, v0.8h, v6.8h\n"
".inst 0x6e47ec0f // bfmmla v15.4s, v0.8h, v7.8h\n"
- "prfm pldl1keep, [x26, #0x80]\n"
- "add x10, x10, #0x80\n"
"21:" // Height 1: Multiply loop: Main loop skip
- "cbz x27, 24f\n"
- "cbz x27, 24f\n"
- "tbz x27, #1, 22f\n"
- "ldr d0, [x26], #0x8\n"
- "tbz x27, #0, 23f\n"
- "ld1 { v0.s }[2], [x26]\n"
+ "cbz x26, 24f\n"
+ "cbz x26, 24f\n"
+ "tbz x26, #1, 22f\n"
+ "ldr d0, [x25], #0x8\n"
+ "tbz x26, #0, 23f\n"
+ "ld1 { v0.s }[2], [x25]\n"
"b 23f\n"
"22:" // Height 1: Multiply loop: Ragged operand read: partial_1_0
- "ldr s0, [x26, #0x0]\n"
+ "ldr s0, [x25, #0x0]\n"
"23:" // Height 1: Multiply loop: Ragged operand read: Done
+ ".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n"
"ldr q6, [x10, #0x0]\n"
"ldr q7, [x10, #0x10]\n"
- ".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n"
".inst 0x6e46ec08 // bfmmla v8.4s, v0.8h, v6.8h\n"
"ldr q6, [x10, #0x20]\n"
".inst 0x6e47ec0c // bfmmla v12.4s, v0.8h, v7.8h\n"
@@ -299,198 +299,198 @@ void a64_hybrid_fp32bf16fp32_mmla_6x16 (
"ldr q6, [x10, #0x60]\n"
".inst 0x6e47ec0e // bfmmla v14.4s, v0.8h, v7.8h\n"
"ldr q7, [x10, #0x70]\n"
+ "add x10, x10, #0x80\n"
".inst 0x6e46ec0b // bfmmla v11.4s, v0.8h, v6.8h\n"
".inst 0x6e47ec0f // bfmmla v15.4s, v0.8h, v7.8h\n"
- "add x10, x10, #0x80\n"
"24:" // Height 1: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 16b\n"
"uzp1 v8.2d, v8.2d, v12.2d\n"
+ "prfm pstl1keep, [x28, #0x0]\n"
"uzp1 v9.2d, v9.2d, v13.2d\n"
- "prfm pstl1keep, [x9, #0x0]\n"
"uzp1 v10.2d, v10.2d, v14.2d\n"
"uzp1 v11.2d, v11.2d, v15.2d\n"
"tbz %x[flags], #1, 25f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v1.4s }, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1r { v0.4s }, [x20]\n"
- "fmin v8.4s, v8.4s, v1.4s\n"
- "fmin v9.4s, v9.4s, v1.4s\n"
- "fmin v10.4s, v10.4s, v1.4s\n"
- "fmin v11.4s, v11.4s, v1.4s\n"
- "fmax v8.4s, v8.4s, v0.4s\n"
- "fmax v9.4s, v9.4s, v0.4s\n"
- "fmax v10.4s, v10.4s, v0.4s\n"
- "fmax v11.4s, v11.4s, v0.4s\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v1.4s }, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v0.4s }, [x19]\n"
+ "fmin v8.4s, v8.4s, v0.4s\n"
+ "fmin v9.4s, v9.4s, v0.4s\n"
+ "fmin v10.4s, v10.4s, v0.4s\n"
+ "fmin v11.4s, v11.4s, v0.4s\n"
+ "fmax v8.4s, v8.4s, v1.4s\n"
+ "fmax v9.4s, v9.4s, v1.4s\n"
+ "fmax v10.4s, v10.4s, v1.4s\n"
+ "fmax v11.4s, v11.4s, v1.4s\n"
"25:" // Height 1: No activation
"cmp x11, #0x10\n"
"bge 34f\n"
"tbz x11, #3, 29f\n"
- "st1 { v8.4s }, [x9], #0x10\n"
- "st1 { v9.4s }, [x9], #0x10\n"
+ "st1 { v8.4s }, [x28], #0x10\n"
+ "st1 { v9.4s }, [x28], #0x10\n"
"tbz x11, #2, 27f\n"
- "st1 { v10.4s }, [x9], #0x10\n"
+ "st1 { v10.4s }, [x28], #0x10\n"
"tbz x11, #1, 26f\n"
- "str d11, [x9], #0x8\n"
+ "str d11, [x28], #0x8\n"
"tbz x11, #0, 33f\n"
- "st1 { v11.s }[2], [x9]\n"
+ "st1 { v11.s }[2], [x28]\n"
"b 33f\n"
"26:" // Height 1: Partial direct writeback: partial_1_12
"tbz x11, #0, 33f\n"
- "str s11, [x9, #0x0]\n"
+ "str s11, [x28, #0x0]\n"
"b 33f\n"
"27:" // Height 1: Partial direct writeback: partial_2_8
"tbz x11, #1, 28f\n"
- "str d10, [x9], #0x8\n"
+ "str d10, [x28], #0x8\n"
"tbz x11, #0, 33f\n"
- "st1 { v10.s }[2], [x9]\n"
+ "st1 { v10.s }[2], [x28]\n"
"b 33f\n"
"28:" // Height 1: Partial direct writeback: partial_1_8
"tbz x11, #0, 33f\n"
- "str s10, [x9, #0x0]\n"
+ "str s10, [x28, #0x0]\n"
"b 33f\n"
"29:" // Height 1: Partial direct writeback: partial_4_0
"tbz x11, #2, 31f\n"
- "st1 { v8.4s }, [x9], #0x10\n"
+ "st1 { v8.4s }, [x28], #0x10\n"
"tbz x11, #1, 30f\n"
- "str d9, [x9], #0x8\n"
+ "str d9, [x28], #0x8\n"
"tbz x11, #0, 33f\n"
- "st1 { v9.s }[2], [x9]\n"
+ "st1 { v9.s }[2], [x28]\n"
"b 33f\n"
"30:" // Height 1: Partial direct writeback: partial_1_4
"tbz x11, #0, 33f\n"
- "str s9, [x9, #0x0]\n"
+ "str s9, [x28, #0x0]\n"
"b 33f\n"
"31:" // Height 1: Partial direct writeback: partial_2_0
"tbz x11, #1, 32f\n"
- "str d8, [x9], #0x8\n"
+ "str d8, [x28], #0x8\n"
"tbz x11, #0, 33f\n"
- "st1 { v8.s }[2], [x9]\n"
+ "st1 { v8.s }[2], [x28]\n"
"b 33f\n"
"32:" // Height 1: Partial direct writeback: partial_1_0
- "str s8, [x9, #0x0]\n"
+ "str s8, [x28, #0x0]\n"
"33:" // Height 1: Partial direct writeback: Done
"b 35f\n"
"34:" // Height 1: Full writeback
- "str q8, [x9, #0x0]\n"
- "str q9, [x9, #0x10]\n"
- "str q10, [x9, #0x20]\n"
- "str q11, [x9, #0x30]\n"
- "add x9, x9, #0x40\n"
+ "str q8, [x28, #0x0]\n"
+ "str q9, [x28, #0x10]\n"
+ "str q10, [x28, #0x20]\n"
+ "str q11, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
"35:" // Height 1: Writeback done
"subs x11, x11, #0x10\n"
"bgt 2b\n"
"b 212f\n"
"36:" // Height 2
- "mov x12, %x[bias]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x9, %x[bias]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "mov x28, %x[output_ptr]\n"
"37:" // Height 2: Column loop
- "cbz x12, 38f\n"
- "ldr q8, [x12, #0x0]\n"
- "ldr q9, [x12, #0x10]\n"
+ "cbz x9, 38f\n"
+ "ldr q8, [x9, #0x0]\n"
"zip2 v12.2d, v8.2d, v8.2d\n"
+ "ldr q9, [x9, #0x10]\n"
"zip1 v8.2d, v8.2d, v8.2d\n"
- "ldr q10, [x12, #0x20]\n"
- "ldr q11, [x12, #0x30]\n"
+ "ldr q10, [x9, #0x20]\n"
+ "ldr q11, [x9, #0x30]\n"
"zip2 v13.2d, v9.2d, v9.2d\n"
+ "add x9, x9, #0x40\n"
"zip1 v9.2d, v9.2d, v9.2d\n"
"zip2 v14.2d, v10.2d, v10.2d\n"
"zip1 v10.2d, v10.2d, v10.2d\n"
- "add x12, x12, #0x40\n"
"zip2 v15.2d, v11.2d, v11.2d\n"
"zip1 v11.2d, v11.2d, v11.2d\n"
"b 50f\n"
"38:" // Height 2: no bias
"tbz %x[flags], #0, 49f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"cmp x11, #0x10\n"
- "add x25, x9, x20, LSL #2\n"
+ "add x24, x28, x19, LSL #2\n"
"bge 47f\n"
"tbz x11, #3, 42f\n"
- "ld1 { v9.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v10.4s }, [x9], #0x10\n"
- "ld1 { v13.4s }, [x25], #0x10\n"
+ "ld1 { v9.4s }, [x28], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
+ "ld1 { v10.4s }, [x28], #0x10\n"
+ "ld1 { v13.4s }, [x24], #0x10\n"
"tbz x11, #2, 40f\n"
- "ld1 { v11.4s }, [x9], #0x10\n"
- "ld1 { v14.4s }, [x25], #0x10\n"
+ "ld1 { v11.4s }, [x28], #0x10\n"
+ "ld1 { v14.4s }, [x24], #0x10\n"
"tbz x11, #1, 39f\n"
- "ldr d16, [x9], #0x8\n"
- "ldr d15, [x25], #0x8\n"
- "mov x20, #0x38\n"
+ "mov x19, #0x38\n"
+ "ldr d16, [x28], #0x8\n"
+ "ldr d15, [x24], #0x8\n"
"tbz x11, #0, 46f\n"
- "ld1 { v16.s }[2], [x9]\n"
- "ld1 { v15.s }[2], [x25]\n"
+ "ld1 { v16.s }[2], [x28]\n"
+ "ld1 { v15.s }[2], [x24]\n"
"b 46f\n"
"39:" // Height 2: Partial accumulate: partial_1_12
- "mov x20, #0x30\n"
+ "mov x19, #0x30\n"
"tbz x11, #0, 46f\n"
- "ldr s16, [x9, #0x0]\n"
- "ldr s15, [x25, #0x0]\n"
+ "ldr s16, [x28, #0x0]\n"
+ "ldr s15, [x24, #0x0]\n"
"b 46f\n"
"40:" // Height 2: Partial accumulate: partial_2_8
"tbz x11, #1, 41f\n"
- "ldr d11, [x9], #0x8\n"
- "ldr d14, [x25], #0x8\n"
- "mov x20, #0x28\n"
+ "ldr d11, [x28], #0x8\n"
+ "ldr d14, [x24], #0x8\n"
+ "mov x19, #0x28\n"
"tbz x11, #0, 46f\n"
- "ld1 { v11.s }[2], [x9]\n"
- "ld1 { v14.s }[2], [x25]\n"
+ "ld1 { v11.s }[2], [x28]\n"
+ "ld1 { v14.s }[2], [x24]\n"
"b 46f\n"
"41:" // Height 2: Partial accumulate: partial_1_8
- "mov x20, #0x20\n"
+ "mov x19, #0x20\n"
"tbz x11, #0, 46f\n"
- "ldr s11, [x9, #0x0]\n"
- "ldr s14, [x25, #0x0]\n"
+ "ldr s11, [x28, #0x0]\n"
+ "ldr s14, [x24, #0x0]\n"
"b 46f\n"
"42:" // Height 2: Partial accumulate: partial_4_0
"tbz x11, #2, 44f\n"
- "ld1 { v9.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
+ "ld1 { v9.4s }, [x28], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
"tbz x11, #1, 43f\n"
- "ldr d10, [x9], #0x8\n"
- "ldr d13, [x25], #0x8\n"
- "mov x20, #0x18\n"
+ "mov x19, #0x18\n"
+ "ldr d10, [x28], #0x8\n"
+ "ldr d13, [x24], #0x8\n"
"tbz x11, #0, 46f\n"
- "ld1 { v10.s }[2], [x9]\n"
- "ld1 { v13.s }[2], [x25]\n"
+ "ld1 { v10.s }[2], [x28]\n"
+ "ld1 { v13.s }[2], [x24]\n"
"b 46f\n"
"43:" // Height 2: Partial accumulate: partial_1_4
- "mov x20, #0x10\n"
+ "mov x19, #0x10\n"
"tbz x11, #0, 46f\n"
- "ldr s10, [x9, #0x0]\n"
- "ldr s13, [x25, #0x0]\n"
+ "ldr s10, [x28, #0x0]\n"
+ "ldr s13, [x24, #0x0]\n"
"b 46f\n"
"44:" // Height 2: Partial accumulate: partial_2_0
"tbz x11, #1, 45f\n"
- "ldr d9, [x9], #0x8\n"
- "ldr d12, [x25], #0x8\n"
- "mov x20, #0x8\n"
+ "ldr d9, [x28], #0x8\n"
+ "ldr d12, [x24], #0x8\n"
+ "mov x19, #0x8\n"
"tbz x11, #0, 46f\n"
- "ld1 { v9.s }[2], [x9]\n"
- "ld1 { v12.s }[2], [x25]\n"
+ "ld1 { v9.s }[2], [x28]\n"
+ "ld1 { v12.s }[2], [x24]\n"
"b 46f\n"
"45:" // Height 2: Partial accumulate: partial_1_0
- "ldr s9, [x9, #0x0]\n"
- "ldr s12, [x25, #0x0]\n"
- "mov x20, #0x0\n"
+ "ldr s9, [x28, #0x0]\n"
+ "mov x19, #0x0\n"
+ "ldr s12, [x24, #0x0]\n"
"46:" // Height 2: Partial accumulate: Done
- "sub x9, x9, x20\n"
+ "sub x28, x28, x19\n"
"b 48f\n"
"47:" // Height 2: full accumulate
- "ldr q9, [x9, #0x0]\n"
- "ldr q10, [x9, #0x10]\n"
- "ldr q11, [x9, #0x20]\n"
- "ldr q16, [x9, #0x30]\n"
- "ldr q12, [x25, #0x0]\n"
- "ldr q13, [x25, #0x10]\n"
- "ldr q14, [x25, #0x20]\n"
- "ldr q15, [x25, #0x30]\n"
+ "ldr q9, [x28, #0x0]\n"
+ "ldr q10, [x28, #0x10]\n"
+ "ldr q11, [x28, #0x20]\n"
+ "ldr q16, [x28, #0x30]\n"
+ "ldr q12, [x24, #0x0]\n"
+ "ldr q13, [x24, #0x10]\n"
+ "ldr q14, [x24, #0x20]\n"
+ "ldr q15, [x24, #0x30]\n"
"48:" // Height 2: MMLA fixup
"zip1 v8.2d, v9.2d, v12.2d\n"
"zip2 v12.2d, v9.2d, v12.2d\n"
@@ -511,98 +511,98 @@ void a64_hybrid_fp32bf16fp32_mmla_6x16 (
"movi v14.16b, #0x0\n"
"movi v15.16b, #0x0\n"
"50:" // Height 2: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"51:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 52f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "cbnz x28, 53f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #2\n"
- "add x25, x25, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "cbnz x27, 53f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #2\n"
+ "add x24, x24, x19, LSL #2\n"
"b 53f\n"
"52:" // Height 2: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #2\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #2\n"
"53:" // Height 2: input setup done
- "cmp x27, #0x4\n"
+ "cmp x26, #0x4\n"
"blt 56f\n"
- "ld1 { v0.4s }, [x26], #0x10\n"
- "ld1 { v1.4s }, [x25], #0x10\n"
- "cmp x27, #0x8\n"
- "ldr q6, [x10, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
+ "ld1 { v0.4s }, [x25], #0x10\n"
+ "cmp x26, #0x8\n"
"blt 55f\n"
"54:" // Height 2: Multiply loop: Main loop head
".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n"
+ "ld1 { v1.4s }, [x24], #0x10\n"
+ "sub x26, x26, #0x4\n"
".inst 0x4ea16820 // bfcvtn2 v0.8h, v1.4s\n"
+ "ldr q6, [x10, #0x0]\n"
+ "cmp x26, #0x8\n"
".inst 0x6e46ec08 // bfmmla v8.4s, v0.8h, v6.8h\n"
+ "ldr q7, [x10, #0x10]\n"
"ldr q6, [x10, #0x20]\n"
".inst 0x6e47ec0c // bfmmla v12.4s, v0.8h, v7.8h\n"
"ldr q7, [x10, #0x30]\n"
".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n"
"ldr q6, [x10, #0x40]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x6e47ec0d // bfmmla v13.4s, v0.8h, v7.8h\n"
"ldr q7, [x10, #0x50]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x6e46ec0a // bfmmla v10.4s, v0.8h, v6.8h\n"
"ldr q6, [x10, #0x60]\n"
".inst 0x6e47ec0e // bfmmla v14.4s, v0.8h, v7.8h\n"
"ldr q7, [x10, #0x70]\n"
- "sub x27, x27, #0x4\n"
- "cmp x27, #0x8\n"
"add x10, x10, #0x80\n"
".inst 0x6e46ec0b // bfmmla v11.4s, v0.8h, v6.8h\n"
- "ldr q6, [x10, #0x0]\n"
".inst 0x6e47ec0f // bfmmla v15.4s, v0.8h, v7.8h\n"
- "ldr q7, [x10, #0x10]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
- "ld1 { v0.4s }, [x26], #0x10\n"
- "prfm pldl1keep, [x25, #0x80]\n"
- "ld1 { v1.4s }, [x25], #0x10\n"
+ "ld1 { v0.4s }, [x25], #0x10\n"
"bge 54b\n"
"55:" // Height 2: Multiply loop: Single iteration only
".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n"
+ "ld1 { v1.4s }, [x24], #0x10\n"
+ "sub x26, x26, #0x4\n"
".inst 0x4ea16820 // bfcvtn2 v0.8h, v1.4s\n"
+ "ldr q6, [x10, #0x0]\n"
+ "ldr q7, [x10, #0x10]\n"
".inst 0x6e46ec08 // bfmmla v8.4s, v0.8h, v6.8h\n"
"ldr q6, [x10, #0x20]\n"
".inst 0x6e47ec0c // bfmmla v12.4s, v0.8h, v7.8h\n"
"ldr q7, [x10, #0x30]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n"
"ldr q6, [x10, #0x40]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x6e47ec0d // bfmmla v13.4s, v0.8h, v7.8h\n"
"ldr q7, [x10, #0x50]\n"
".inst 0x6e46ec0a // bfmmla v10.4s, v0.8h, v6.8h\n"
"ldr q6, [x10, #0x60]\n"
".inst 0x6e47ec0e // bfmmla v14.4s, v0.8h, v7.8h\n"
"ldr q7, [x10, #0x70]\n"
- "sub x27, x27, #0x4\n"
+ "add x10, x10, #0x80\n"
".inst 0x6e46ec0b // bfmmla v11.4s, v0.8h, v6.8h\n"
".inst 0x6e47ec0f // bfmmla v15.4s, v0.8h, v7.8h\n"
- "prfm pldl1keep, [x26, #0x80]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
- "add x10, x10, #0x80\n"
"56:" // Height 2: Multiply loop: Main loop skip
- "cbz x27, 59f\n"
- "cbz x27, 59f\n"
- "tbz x27, #1, 57f\n"
- "ldr d0, [x26], #0x8\n"
- "ldr d1, [x25], #0x8\n"
- "tbz x27, #0, 58f\n"
- "ld1 { v0.s }[2], [x26]\n"
- "ld1 { v1.s }[2], [x25]\n"
+ "cbz x26, 59f\n"
+ "cbz x26, 59f\n"
+ "tbz x26, #1, 57f\n"
+ "ldr d0, [x25], #0x8\n"
+ "ldr d1, [x24], #0x8\n"
+ "tbz x26, #0, 58f\n"
+ "ld1 { v0.s }[2], [x25]\n"
+ "ld1 { v1.s }[2], [x24]\n"
"b 58f\n"
"57:" // Height 2: Multiply loop: Ragged operand read: partial_1_0
- "ldr s0, [x26, #0x0]\n"
- "ldr s1, [x25, #0x0]\n"
+ "ldr s0, [x25, #0x0]\n"
+ "ldr s1, [x24, #0x0]\n"
"58:" // Height 2: Multiply loop: Ragged operand read: Done
+ ".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n"
"ldr q6, [x10, #0x0]\n"
"ldr q7, [x10, #0x10]\n"
- ".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n"
".inst 0x4ea16820 // bfcvtn2 v0.8h, v1.4s\n"
".inst 0x6e46ec08 // bfmmla v8.4s, v0.8h, v6.8h\n"
"ldr q6, [x10, #0x20]\n"
@@ -616,148 +616,148 @@ void a64_hybrid_fp32bf16fp32_mmla_6x16 (
"ldr q6, [x10, #0x60]\n"
".inst 0x6e47ec0e // bfmmla v14.4s, v0.8h, v7.8h\n"
"ldr q7, [x10, #0x70]\n"
+ "add x10, x10, #0x80\n"
".inst 0x6e46ec0b // bfmmla v11.4s, v0.8h, v6.8h\n"
".inst 0x6e47ec0f // bfmmla v15.4s, v0.8h, v7.8h\n"
- "add x10, x10, #0x80\n"
"59:" // Height 2: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 51b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
"uzp1 v6.2d, v8.2d, v12.2d\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp2 v8.2d, v8.2d, v12.2d\n"
+ "prfm pstl1keep, [x28, #0x0]\n"
+ "add x24, x28, x19, LSL #2\n"
"uzp1 v12.2d, v9.2d, v13.2d\n"
+ "prfm pstl1keep, [x24, #0x0]\n"
"uzp2 v9.2d, v9.2d, v13.2d\n"
- "prfm pstl1keep, [x9, #0x0]\n"
- "prfm pstl1keep, [x25, #0x0]\n"
"uzp1 v13.2d, v10.2d, v14.2d\n"
"uzp2 v10.2d, v10.2d, v14.2d\n"
"uzp1 v14.2d, v11.2d, v15.2d\n"
"uzp2 v11.2d, v11.2d, v15.2d\n"
"tbz %x[flags], #1, 60f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v1.4s }, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1r { v0.4s }, [x20]\n"
- "fmin v6.4s, v6.4s, v1.4s\n"
- "fmin v12.4s, v12.4s, v1.4s\n"
- "fmin v13.4s, v13.4s, v1.4s\n"
- "fmin v14.4s, v14.4s, v1.4s\n"
- "fmin v8.4s, v8.4s, v1.4s\n"
- "fmin v9.4s, v9.4s, v1.4s\n"
- "fmin v10.4s, v10.4s, v1.4s\n"
- "fmin v11.4s, v11.4s, v1.4s\n"
- "fmax v6.4s, v6.4s, v0.4s\n"
- "fmax v12.4s, v12.4s, v0.4s\n"
- "fmax v13.4s, v13.4s, v0.4s\n"
- "fmax v14.4s, v14.4s, v0.4s\n"
- "fmax v8.4s, v8.4s, v0.4s\n"
- "fmax v9.4s, v9.4s, v0.4s\n"
- "fmax v10.4s, v10.4s, v0.4s\n"
- "fmax v11.4s, v11.4s, v0.4s\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v1.4s }, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v0.4s }, [x19]\n"
+ "fmin v6.4s, v6.4s, v0.4s\n"
+ "fmin v12.4s, v12.4s, v0.4s\n"
+ "fmin v13.4s, v13.4s, v0.4s\n"
+ "fmin v14.4s, v14.4s, v0.4s\n"
+ "fmax v6.4s, v6.4s, v1.4s\n"
+ "fmax v12.4s, v12.4s, v1.4s\n"
+ "fmax v13.4s, v13.4s, v1.4s\n"
+ "fmax v14.4s, v14.4s, v1.4s\n"
+ "fmin v8.4s, v8.4s, v0.4s\n"
+ "fmin v9.4s, v9.4s, v0.4s\n"
+ "fmin v10.4s, v10.4s, v0.4s\n"
+ "fmax v8.4s, v8.4s, v1.4s\n"
+ "fmax v9.4s, v9.4s, v1.4s\n"
+ "fmax v10.4s, v10.4s, v1.4s\n"
+ "fmin v11.4s, v11.4s, v0.4s\n"
+ "fmax v11.4s, v11.4s, v1.4s\n"
"60:" // Height 2: No activation
"cmp x11, #0x10\n"
"bge 69f\n"
"tbz x11, #3, 64f\n"
- "st1 { v6.4s }, [x9], #0x10\n"
- "st1 { v12.4s }, [x9], #0x10\n"
- "st1 { v8.4s }, [x25], #0x10\n"
- "st1 { v9.4s }, [x25], #0x10\n"
+ "st1 { v6.4s }, [x28], #0x10\n"
+ "st1 { v12.4s }, [x28], #0x10\n"
+ "st1 { v8.4s }, [x24], #0x10\n"
+ "st1 { v9.4s }, [x24], #0x10\n"
"tbz x11, #2, 62f\n"
- "st1 { v13.4s }, [x9], #0x10\n"
- "st1 { v10.4s }, [x25], #0x10\n"
+ "st1 { v13.4s }, [x28], #0x10\n"
+ "st1 { v10.4s }, [x24], #0x10\n"
"tbz x11, #1, 61f\n"
- "str d14, [x9], #0x8\n"
- "str d11, [x25], #0x8\n"
+ "str d14, [x28], #0x8\n"
+ "str d11, [x24], #0x8\n"
"tbz x11, #0, 68f\n"
- "st1 { v14.s }[2], [x9]\n"
- "st1 { v11.s }[2], [x25]\n"
+ "st1 { v14.s }[2], [x28]\n"
+ "st1 { v11.s }[2], [x24]\n"
"b 68f\n"
"61:" // Height 2: Partial direct writeback: partial_1_12
"tbz x11, #0, 68f\n"
- "str s14, [x9, #0x0]\n"
- "str s11, [x25, #0x0]\n"
+ "str s14, [x28, #0x0]\n"
+ "str s11, [x24, #0x0]\n"
"b 68f\n"
"62:" // Height 2: Partial direct writeback: partial_2_8
"tbz x11, #1, 63f\n"
- "str d13, [x9], #0x8\n"
- "str d10, [x25], #0x8\n"
+ "str d13, [x28], #0x8\n"
+ "str d10, [x24], #0x8\n"
"tbz x11, #0, 68f\n"
- "st1 { v13.s }[2], [x9]\n"
- "st1 { v10.s }[2], [x25]\n"
+ "st1 { v13.s }[2], [x28]\n"
+ "st1 { v10.s }[2], [x24]\n"
"b 68f\n"
"63:" // Height 2: Partial direct writeback: partial_1_8
"tbz x11, #0, 68f\n"
- "str s13, [x9, #0x0]\n"
- "str s10, [x25, #0x0]\n"
+ "str s13, [x28, #0x0]\n"
+ "str s10, [x24, #0x0]\n"
"b 68f\n"
"64:" // Height 2: Partial direct writeback: partial_4_0
"tbz x11, #2, 66f\n"
- "st1 { v6.4s }, [x9], #0x10\n"
- "st1 { v8.4s }, [x25], #0x10\n"
+ "st1 { v6.4s }, [x28], #0x10\n"
+ "st1 { v8.4s }, [x24], #0x10\n"
"tbz x11, #1, 65f\n"
- "str d12, [x9], #0x8\n"
- "str d9, [x25], #0x8\n"
+ "str d12, [x28], #0x8\n"
+ "str d9, [x24], #0x8\n"
"tbz x11, #0, 68f\n"
- "st1 { v12.s }[2], [x9]\n"
- "st1 { v9.s }[2], [x25]\n"
+ "st1 { v12.s }[2], [x28]\n"
+ "st1 { v9.s }[2], [x24]\n"
"b 68f\n"
"65:" // Height 2: Partial direct writeback: partial_1_4
"tbz x11, #0, 68f\n"
- "str s12, [x9, #0x0]\n"
- "str s9, [x25, #0x0]\n"
+ "str s12, [x28, #0x0]\n"
+ "str s9, [x24, #0x0]\n"
"b 68f\n"
"66:" // Height 2: Partial direct writeback: partial_2_0
"tbz x11, #1, 67f\n"
- "str d6, [x9], #0x8\n"
- "str d8, [x25], #0x8\n"
+ "str d6, [x28], #0x8\n"
+ "str d8, [x24], #0x8\n"
"tbz x11, #0, 68f\n"
- "st1 { v6.s }[2], [x9]\n"
- "st1 { v8.s }[2], [x25]\n"
+ "st1 { v6.s }[2], [x28]\n"
+ "st1 { v8.s }[2], [x24]\n"
"b 68f\n"
"67:" // Height 2: Partial direct writeback: partial_1_0
- "str s6, [x9, #0x0]\n"
- "str s8, [x25, #0x0]\n"
+ "str s6, [x28, #0x0]\n"
+ "str s8, [x24, #0x0]\n"
"68:" // Height 2: Partial direct writeback: Done
"b 70f\n"
"69:" // Height 2: Full writeback
- "str q6, [x9, #0x0]\n"
- "str q12, [x9, #0x10]\n"
- "str q13, [x9, #0x20]\n"
- "str q14, [x9, #0x30]\n"
- "add x9, x9, #0x40\n"
- "str q8, [x25, #0x0]\n"
- "str q9, [x25, #0x10]\n"
- "str q10, [x25, #0x20]\n"
- "str q11, [x25, #0x30]\n"
+ "str q6, [x28, #0x0]\n"
+ "str q12, [x28, #0x10]\n"
+ "str q13, [x28, #0x20]\n"
+ "str q14, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
+ "str q8, [x24, #0x0]\n"
+ "str q9, [x24, #0x10]\n"
+ "str q10, [x24, #0x20]\n"
+ "str q11, [x24, #0x30]\n"
"70:" // Height 2: Writeback done
"subs x11, x11, #0x10\n"
"bgt 37b\n"
"b 212f\n"
"71:" // Height 3
- "mov x12, %x[bias]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x9, %x[bias]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "mov x28, %x[output_ptr]\n"
"72:" // Height 3: Column loop
- "cbz x12, 73f\n"
- "ldr q8, [x12, #0x0]\n"
- "ldr q9, [x12, #0x10]\n"
+ "cbz x9, 73f\n"
+ "ldr q8, [x9, #0x0]\n"
"zip2 v12.2d, v8.2d, v8.2d\n"
+ "ldr q9, [x9, #0x10]\n"
"zip1 v8.2d, v8.2d, v8.2d\n"
- "ldr q10, [x12, #0x20]\n"
- "ldr q11, [x12, #0x30]\n"
+ "ldr q10, [x9, #0x20]\n"
+ "mov v16.16b, v8.16b\n"
+ "ldr q11, [x9, #0x30]\n"
+ "add x9, x9, #0x40\n"
+ "mov v20.16b, v12.16b\n"
"zip2 v13.2d, v9.2d, v9.2d\n"
"zip1 v9.2d, v9.2d, v9.2d\n"
"zip2 v14.2d, v10.2d, v10.2d\n"
"zip1 v10.2d, v10.2d, v10.2d\n"
- "add x12, x12, #0x40\n"
"zip2 v15.2d, v11.2d, v11.2d\n"
"zip1 v11.2d, v11.2d, v11.2d\n"
- "mov v16.16b, v8.16b\n"
- "mov v20.16b, v12.16b\n"
"mov v17.16b, v9.16b\n"
"mov v21.16b, v13.16b\n"
"mov v18.16b, v10.16b\n"
@@ -767,111 +767,111 @@ void a64_hybrid_fp32bf16fp32_mmla_6x16 (
"b 85f\n"
"73:" // Height 3: no bias
"tbz %x[flags], #0, 84f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"cmp x11, #0x10\n"
- "add x24, x25, x20, LSL #2\n"
+ "add x24, x28, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
"bge 82f\n"
"tbz x11, #3, 77f\n"
- "ld1 { v9.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v17.4s }, [x24], #0x10\n"
- "ld1 { v10.4s }, [x9], #0x10\n"
- "ld1 { v13.4s }, [x25], #0x10\n"
- "ld1 { v18.4s }, [x24], #0x10\n"
+ "ld1 { v9.4s }, [x28], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
+ "ld1 { v17.4s }, [x23], #0x10\n"
+ "ld1 { v10.4s }, [x28], #0x10\n"
+ "ld1 { v13.4s }, [x24], #0x10\n"
+ "ld1 { v18.4s }, [x23], #0x10\n"
"tbz x11, #2, 75f\n"
- "ld1 { v11.4s }, [x9], #0x10\n"
- "ld1 { v14.4s }, [x25], #0x10\n"
- "ld1 { v19.4s }, [x24], #0x10\n"
+ "ld1 { v11.4s }, [x28], #0x10\n"
+ "ld1 { v14.4s }, [x24], #0x10\n"
+ "ld1 { v19.4s }, [x23], #0x10\n"
"tbz x11, #1, 74f\n"
- "ldr d16, [x9], #0x8\n"
- "ldr d15, [x25], #0x8\n"
- "mov x20, #0x38\n"
- "ldr d24, [x24], #0x8\n"
+ "mov x19, #0x38\n"
+ "ldr d16, [x28], #0x8\n"
+ "ldr d15, [x24], #0x8\n"
+ "ldr d24, [x23], #0x8\n"
"tbz x11, #0, 81f\n"
- "ld1 { v16.s }[2], [x9]\n"
- "ld1 { v15.s }[2], [x25]\n"
- "ld1 { v24.s }[2], [x24]\n"
+ "ld1 { v16.s }[2], [x28]\n"
+ "ld1 { v15.s }[2], [x24]\n"
+ "ld1 { v24.s }[2], [x23]\n"
"b 81f\n"
"74:" // Height 3: Partial accumulate: partial_1_12
- "mov x20, #0x30\n"
+ "mov x19, #0x30\n"
"tbz x11, #0, 81f\n"
- "ldr s16, [x9, #0x0]\n"
- "ldr s15, [x25, #0x0]\n"
- "ldr s24, [x24, #0x0]\n"
+ "ldr s16, [x28, #0x0]\n"
+ "ldr s15, [x24, #0x0]\n"
+ "ldr s24, [x23, #0x0]\n"
"b 81f\n"
"75:" // Height 3: Partial accumulate: partial_2_8
"tbz x11, #1, 76f\n"
- "ldr d11, [x9], #0x8\n"
- "ldr d14, [x25], #0x8\n"
- "mov x20, #0x28\n"
- "ldr d19, [x24], #0x8\n"
+ "ldr d11, [x28], #0x8\n"
+ "ldr d14, [x24], #0x8\n"
+ "mov x19, #0x28\n"
+ "ldr d19, [x23], #0x8\n"
"tbz x11, #0, 81f\n"
- "ld1 { v11.s }[2], [x9]\n"
- "ld1 { v14.s }[2], [x25]\n"
- "ld1 { v19.s }[2], [x24]\n"
+ "ld1 { v11.s }[2], [x28]\n"
+ "ld1 { v14.s }[2], [x24]\n"
+ "ld1 { v19.s }[2], [x23]\n"
"b 81f\n"
"76:" // Height 3: Partial accumulate: partial_1_8
- "mov x20, #0x20\n"
+ "mov x19, #0x20\n"
"tbz x11, #0, 81f\n"
- "ldr s11, [x9, #0x0]\n"
- "ldr s14, [x25, #0x0]\n"
- "ldr s19, [x24, #0x0]\n"
+ "ldr s11, [x28, #0x0]\n"
+ "ldr s14, [x24, #0x0]\n"
+ "ldr s19, [x23, #0x0]\n"
"b 81f\n"
"77:" // Height 3: Partial accumulate: partial_4_0
"tbz x11, #2, 79f\n"
- "ld1 { v9.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v17.4s }, [x24], #0x10\n"
+ "ld1 { v9.4s }, [x28], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
+ "ld1 { v17.4s }, [x23], #0x10\n"
"tbz x11, #1, 78f\n"
- "ldr d10, [x9], #0x8\n"
- "ldr d13, [x25], #0x8\n"
- "mov x20, #0x18\n"
- "ldr d18, [x24], #0x8\n"
+ "mov x19, #0x18\n"
+ "ldr d10, [x28], #0x8\n"
+ "ldr d13, [x24], #0x8\n"
+ "ldr d18, [x23], #0x8\n"
"tbz x11, #0, 81f\n"
- "ld1 { v10.s }[2], [x9]\n"
- "ld1 { v13.s }[2], [x25]\n"
- "ld1 { v18.s }[2], [x24]\n"
+ "ld1 { v10.s }[2], [x28]\n"
+ "ld1 { v13.s }[2], [x24]\n"
+ "ld1 { v18.s }[2], [x23]\n"
"b 81f\n"
"78:" // Height 3: Partial accumulate: partial_1_4
- "mov x20, #0x10\n"
+ "mov x19, #0x10\n"
"tbz x11, #0, 81f\n"
- "ldr s10, [x9, #0x0]\n"
- "ldr s13, [x25, #0x0]\n"
- "ldr s18, [x24, #0x0]\n"
+ "ldr s10, [x28, #0x0]\n"
+ "ldr s13, [x24, #0x0]\n"
+ "ldr s18, [x23, #0x0]\n"
"b 81f\n"
"79:" // Height 3: Partial accumulate: partial_2_0
"tbz x11, #1, 80f\n"
- "ldr d9, [x9], #0x8\n"
- "ldr d12, [x25], #0x8\n"
- "mov x20, #0x8\n"
- "ldr d17, [x24], #0x8\n"
+ "ldr d9, [x28], #0x8\n"
+ "ldr d12, [x24], #0x8\n"
+ "mov x19, #0x8\n"
+ "ldr d17, [x23], #0x8\n"
"tbz x11, #0, 81f\n"
- "ld1 { v9.s }[2], [x9]\n"
- "ld1 { v12.s }[2], [x25]\n"
- "ld1 { v17.s }[2], [x24]\n"
+ "ld1 { v9.s }[2], [x28]\n"
+ "ld1 { v12.s }[2], [x24]\n"
+ "ld1 { v17.s }[2], [x23]\n"
"b 81f\n"
"80:" // Height 3: Partial accumulate: partial_1_0
- "ldr s9, [x9, #0x0]\n"
- "ldr s12, [x25, #0x0]\n"
- "mov x20, #0x0\n"
- "ldr s17, [x24, #0x0]\n"
+ "ldr s9, [x28, #0x0]\n"
+ "mov x19, #0x0\n"
+ "ldr s12, [x24, #0x0]\n"
+ "ldr s17, [x23, #0x0]\n"
"81:" // Height 3: Partial accumulate: Done
- "sub x9, x9, x20\n"
+ "sub x28, x28, x19\n"
"b 83f\n"
"82:" // Height 3: full accumulate
- "ldr q9, [x9, #0x0]\n"
- "ldr q10, [x9, #0x10]\n"
- "ldr q11, [x9, #0x20]\n"
- "ldr q16, [x9, #0x30]\n"
- "ldr q12, [x25, #0x0]\n"
- "ldr q13, [x25, #0x10]\n"
- "ldr q14, [x25, #0x20]\n"
- "ldr q15, [x25, #0x30]\n"
- "ldr q17, [x24, #0x0]\n"
- "ldr q18, [x24, #0x10]\n"
- "ldr q19, [x24, #0x20]\n"
- "ldr q24, [x24, #0x30]\n"
+ "ldr q9, [x28, #0x0]\n"
+ "ldr q10, [x28, #0x10]\n"
+ "ldr q11, [x28, #0x20]\n"
+ "ldr q16, [x28, #0x30]\n"
+ "ldr q12, [x24, #0x0]\n"
+ "ldr q13, [x24, #0x10]\n"
+ "ldr q14, [x24, #0x20]\n"
+ "ldr q15, [x24, #0x30]\n"
+ "ldr q17, [x23, #0x0]\n"
+ "ldr q18, [x23, #0x10]\n"
+ "ldr q19, [x23, #0x20]\n"
+ "ldr q24, [x23, #0x30]\n"
"83:" // Height 3: MMLA fixup
"zip1 v8.2d, v9.2d, v12.2d\n"
"zip2 v12.2d, v9.2d, v12.2d\n"
@@ -908,96 +908,96 @@ void a64_hybrid_fp32bf16fp32_mmla_6x16 (
"movi v22.16b, #0x0\n"
"movi v23.16b, #0x0\n"
"85:" // Height 3: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"86:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 87f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "cbnz x28, 88f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #2\n"
- "add x25, x25, x20, LSL #2\n"
- "add x24, x24, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "cbnz x27, 88f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #2\n"
+ "add x24, x24, x19, LSL #2\n"
+ "add x23, x23, x19, LSL #2\n"
"b 88f\n"
"87:" // Height 3: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
"88:" // Height 3: input setup done
- "cmp x27, #0x4\n"
+ "cmp x26, #0x4\n"
"blt 91f\n"
- "ld1 { v0.4s }, [x26], #0x10\n"
- "ld1 { v1.4s }, [x25], #0x10\n"
- "cmp x27, #0x8\n"
- "ld1 { v2.4s }, [x24], #0x10\n"
- "ldr q6, [x10, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
+ "ld1 { v0.4s }, [x25], #0x10\n"
+ "cmp x26, #0x8\n"
"blt 90f\n"
"89:" // Height 3: Multiply loop: Main loop head
".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n"
+ "ld1 { v1.4s }, [x24], #0x10\n"
+ "sub x26, x26, #0x4\n"
".inst 0x4ea16820 // bfcvtn2 v0.8h, v1.4s\n"
- ".inst 0x6e46ec08 // bfmmla v8.4s, v0.8h, v6.8h\n"
- "sub x27, x27, #0x4\n"
+ "ld1 { v2.4s }, [x23], #0x10\n"
+ "cmp x26, #0x8\n"
".inst 0x0ea16842 // bfcvtn v2.4h, v2.4s\n"
+ "ldr q6, [x10, #0x0]\n"
+ "ldr q7, [x10, #0x10]\n"
+ ".inst 0x6e46ec08 // bfmmla v8.4s, v0.8h, v6.8h\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x6e46ec50 // bfmmla v16.4s, v2.8h, v6.8h\n"
"ldr q6, [x10, #0x20]\n"
".inst 0x6e47ec0c // bfmmla v12.4s, v0.8h, v7.8h\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x6e47ec54 // bfmmla v20.4s, v2.8h, v7.8h\n"
"ldr q7, [x10, #0x30]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n"
- "cmp x27, #0x8\n"
".inst 0x6e46ec51 // bfmmla v17.4s, v2.8h, v6.8h\n"
"ldr q6, [x10, #0x40]\n"
".inst 0x6e47ec0d // bfmmla v13.4s, v0.8h, v7.8h\n"
- "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x6e47ec55 // bfmmla v21.4s, v2.8h, v7.8h\n"
"ldr q7, [x10, #0x50]\n"
".inst 0x6e46ec0a // bfmmla v10.4s, v0.8h, v6.8h\n"
- "prfm pldl1keep, [x25, #0x80]\n"
- "ld1 { v1.4s }, [x25], #0x10\n"
".inst 0x6e46ec52 // bfmmla v18.4s, v2.8h, v6.8h\n"
"ldr q6, [x10, #0x60]\n"
".inst 0x6e47ec0e // bfmmla v14.4s, v0.8h, v7.8h\n"
".inst 0x6e47ec56 // bfmmla v22.4s, v2.8h, v7.8h\n"
"ldr q7, [x10, #0x70]\n"
"add x10, x10, #0x80\n"
- "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x6e46ec0b // bfmmla v11.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec53 // bfmmla v19.4s, v2.8h, v6.8h\n"
- "ldr q6, [x10, #0x0]\n"
".inst 0x6e47ec0f // bfmmla v15.4s, v0.8h, v7.8h\n"
- "ld1 { v0.4s }, [x26], #0x10\n"
+ "ld1 { v0.4s }, [x25], #0x10\n"
".inst 0x6e47ec57 // bfmmla v23.4s, v2.8h, v7.8h\n"
- "ld1 { v2.4s }, [x24], #0x10\n"
- "ldr q7, [x10, #0x10]\n"
"bge 89b\n"
"90:" // Height 3: Multiply loop: Single iteration only
".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n"
+ "ld1 { v1.4s }, [x24], #0x10\n"
+ "sub x26, x26, #0x4\n"
".inst 0x4ea16820 // bfcvtn2 v0.8h, v1.4s\n"
- ".inst 0x6e46ec08 // bfmmla v8.4s, v0.8h, v6.8h\n"
- "sub x27, x27, #0x4\n"
+ "ld1 { v2.4s }, [x23], #0x10\n"
+ "ldr q6, [x10, #0x0]\n"
".inst 0x0ea16842 // bfcvtn v2.4h, v2.4s\n"
+ "ldr q7, [x10, #0x10]\n"
+ ".inst 0x6e46ec08 // bfmmla v8.4s, v0.8h, v6.8h\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x6e46ec50 // bfmmla v16.4s, v2.8h, v6.8h\n"
"ldr q6, [x10, #0x20]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x6e47ec0c // bfmmla v12.4s, v0.8h, v7.8h\n"
".inst 0x6e47ec54 // bfmmla v20.4s, v2.8h, v7.8h\n"
"ldr q7, [x10, #0x30]\n"
".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n"
- "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x6e46ec51 // bfmmla v17.4s, v2.8h, v6.8h\n"
"ldr q6, [x10, #0x40]\n"
".inst 0x6e47ec0d // bfmmla v13.4s, v0.8h, v7.8h\n"
- "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x6e47ec55 // bfmmla v21.4s, v2.8h, v7.8h\n"
"ldr q7, [x10, #0x50]\n"
".inst 0x6e46ec0a // bfmmla v10.4s, v0.8h, v6.8h\n"
- "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x6e46ec52 // bfmmla v18.4s, v2.8h, v6.8h\n"
"ldr q6, [x10, #0x60]\n"
".inst 0x6e47ec0e // bfmmla v14.4s, v0.8h, v7.8h\n"
@@ -1009,32 +1009,32 @@ void a64_hybrid_fp32bf16fp32_mmla_6x16 (
".inst 0x6e47ec0f // bfmmla v15.4s, v0.8h, v7.8h\n"
".inst 0x6e47ec57 // bfmmla v23.4s, v2.8h, v7.8h\n"
"91:" // Height 3: Multiply loop: Main loop skip
- "cbz x27, 94f\n"
- "cbz x27, 94f\n"
- "tbz x27, #1, 92f\n"
- "ldr d0, [x26], #0x8\n"
- "ldr d1, [x25], #0x8\n"
- "ldr d2, [x24], #0x8\n"
- "tbz x27, #0, 93f\n"
- "ld1 { v0.s }[2], [x26]\n"
- "ld1 { v1.s }[2], [x25]\n"
- "ld1 { v2.s }[2], [x24]\n"
+ "cbz x26, 94f\n"
+ "cbz x26, 94f\n"
+ "tbz x26, #1, 92f\n"
+ "ldr d0, [x25], #0x8\n"
+ "ldr d1, [x24], #0x8\n"
+ "ldr d2, [x23], #0x8\n"
+ "tbz x26, #0, 93f\n"
+ "ld1 { v0.s }[2], [x25]\n"
+ "ld1 { v1.s }[2], [x24]\n"
+ "ld1 { v2.s }[2], [x23]\n"
"b 93f\n"
"92:" // Height 3: Multiply loop: Ragged operand read: partial_1_0
- "ldr s0, [x26, #0x0]\n"
- "ldr s1, [x25, #0x0]\n"
- "ldr s2, [x24, #0x0]\n"
+ "ldr s0, [x25, #0x0]\n"
+ "ldr s1, [x24, #0x0]\n"
+ "ldr s2, [x23, #0x0]\n"
"93:" // Height 3: Multiply loop: Ragged operand read: Done
+ ".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n"
"ldr q6, [x10, #0x0]\n"
+ ".inst 0x0ea16842 // bfcvtn v2.4h, v2.4s\n"
"ldr q7, [x10, #0x10]\n"
- ".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n"
".inst 0x4ea16820 // bfcvtn2 v0.8h, v1.4s\n"
- ".inst 0x0ea16842 // bfcvtn v2.4h, v2.4s\n"
- ".inst 0x6e46ec08 // bfmmla v8.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec50 // bfmmla v16.4s, v2.8h, v6.8h\n"
+ ".inst 0x6e47ec54 // bfmmla v20.4s, v2.8h, v7.8h\n"
+ ".inst 0x6e46ec08 // bfmmla v8.4s, v0.8h, v6.8h\n"
"ldr q6, [x10, #0x20]\n"
".inst 0x6e47ec0c // bfmmla v12.4s, v0.8h, v7.8h\n"
- ".inst 0x6e47ec54 // bfmmla v20.4s, v2.8h, v7.8h\n"
"ldr q7, [x10, #0x30]\n"
".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec51 // bfmmla v17.4s, v2.8h, v6.8h\n"
@@ -1054,21 +1054,21 @@ void a64_hybrid_fp32bf16fp32_mmla_6x16 (
".inst 0x6e47ec0f // bfmmla v15.4s, v0.8h, v7.8h\n"
".inst 0x6e47ec57 // bfmmla v23.4s, v2.8h, v7.8h\n"
"94:" // Height 3: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 86b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
"uzp1 v6.2d, v8.2d, v12.2d\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp2 v8.2d, v8.2d, v12.2d\n"
+ "prfm pstl1keep, [x28, #0x0]\n"
+ "add x24, x28, x19, LSL #2\n"
"uzp1 v12.2d, v9.2d, v13.2d\n"
- "prfm pstl1keep, [x9, #0x0]\n"
- "prfm pstl1keep, [x25, #0x0]\n"
+ "prfm pstl1keep, [x24, #0x0]\n"
"uzp2 v9.2d, v9.2d, v13.2d\n"
+ "add x23, x24, x19, LSL #2\n"
"uzp1 v13.2d, v10.2d, v14.2d\n"
- "prfm pstl1keep, [x24, #0x0]\n"
+ "prfm pstl1keep, [x23, #0x0]\n"
"uzp2 v10.2d, v10.2d, v14.2d\n"
"uzp1 v14.2d, v11.2d, v15.2d\n"
"uzp2 v11.2d, v11.2d, v15.2d\n"
@@ -1077,155 +1077,155 @@ void a64_hybrid_fp32bf16fp32_mmla_6x16 (
"uzp1 v18.2d, v18.2d, v22.2d\n"
"uzp1 v19.2d, v19.2d, v23.2d\n"
"tbz %x[flags], #1, 95f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v1.4s }, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1r { v0.4s }, [x20]\n"
- "fmin v6.4s, v6.4s, v1.4s\n"
- "fmin v12.4s, v12.4s, v1.4s\n"
- "fmin v13.4s, v13.4s, v1.4s\n"
- "fmin v14.4s, v14.4s, v1.4s\n"
- "fmin v8.4s, v8.4s, v1.4s\n"
- "fmin v9.4s, v9.4s, v1.4s\n"
- "fmin v10.4s, v10.4s, v1.4s\n"
- "fmin v11.4s, v11.4s, v1.4s\n"
- "fmin v16.4s, v16.4s, v1.4s\n"
- "fmin v17.4s, v17.4s, v1.4s\n"
- "fmin v18.4s, v18.4s, v1.4s\n"
- "fmin v19.4s, v19.4s, v1.4s\n"
- "fmax v6.4s, v6.4s, v0.4s\n"
- "fmax v12.4s, v12.4s, v0.4s\n"
- "fmax v13.4s, v13.4s, v0.4s\n"
- "fmax v14.4s, v14.4s, v0.4s\n"
- "fmax v8.4s, v8.4s, v0.4s\n"
- "fmax v9.4s, v9.4s, v0.4s\n"
- "fmax v10.4s, v10.4s, v0.4s\n"
- "fmax v11.4s, v11.4s, v0.4s\n"
- "fmax v16.4s, v16.4s, v0.4s\n"
- "fmax v17.4s, v17.4s, v0.4s\n"
- "fmax v18.4s, v18.4s, v0.4s\n"
- "fmax v19.4s, v19.4s, v0.4s\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v1.4s }, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v0.4s }, [x19]\n"
+ "fmin v6.4s, v6.4s, v0.4s\n"
+ "fmin v12.4s, v12.4s, v0.4s\n"
+ "fmin v13.4s, v13.4s, v0.4s\n"
+ "fmin v14.4s, v14.4s, v0.4s\n"
+ "fmax v6.4s, v6.4s, v1.4s\n"
+ "fmax v12.4s, v12.4s, v1.4s\n"
+ "fmax v13.4s, v13.4s, v1.4s\n"
+ "fmax v14.4s, v14.4s, v1.4s\n"
+ "fmin v8.4s, v8.4s, v0.4s\n"
+ "fmin v9.4s, v9.4s, v0.4s\n"
+ "fmin v10.4s, v10.4s, v0.4s\n"
+ "fmax v8.4s, v8.4s, v1.4s\n"
+ "fmax v9.4s, v9.4s, v1.4s\n"
+ "fmax v10.4s, v10.4s, v1.4s\n"
+ "fmin v11.4s, v11.4s, v0.4s\n"
+ "fmin v16.4s, v16.4s, v0.4s\n"
+ "fmin v17.4s, v17.4s, v0.4s\n"
+ "fmax v11.4s, v11.4s, v1.4s\n"
+ "fmax v16.4s, v16.4s, v1.4s\n"
+ "fmax v17.4s, v17.4s, v1.4s\n"
+ "fmin v18.4s, v18.4s, v0.4s\n"
+ "fmin v19.4s, v19.4s, v0.4s\n"
+ "fmax v18.4s, v18.4s, v1.4s\n"
+ "fmax v19.4s, v19.4s, v1.4s\n"
"95:" // Height 3: No activation
"cmp x11, #0x10\n"
"bge 104f\n"
"tbz x11, #3, 99f\n"
- "st1 { v6.4s }, [x9], #0x10\n"
- "st1 { v12.4s }, [x9], #0x10\n"
- "st1 { v8.4s }, [x25], #0x10\n"
- "st1 { v9.4s }, [x25], #0x10\n"
- "st1 { v16.4s }, [x24], #0x10\n"
- "st1 { v17.4s }, [x24], #0x10\n"
+ "st1 { v6.4s }, [x28], #0x10\n"
+ "st1 { v12.4s }, [x28], #0x10\n"
+ "st1 { v8.4s }, [x24], #0x10\n"
+ "st1 { v9.4s }, [x24], #0x10\n"
+ "st1 { v16.4s }, [x23], #0x10\n"
+ "st1 { v17.4s }, [x23], #0x10\n"
"tbz x11, #2, 97f\n"
- "st1 { v13.4s }, [x9], #0x10\n"
- "st1 { v10.4s }, [x25], #0x10\n"
- "st1 { v18.4s }, [x24], #0x10\n"
+ "st1 { v13.4s }, [x28], #0x10\n"
+ "st1 { v10.4s }, [x24], #0x10\n"
+ "st1 { v18.4s }, [x23], #0x10\n"
"tbz x11, #1, 96f\n"
- "str d14, [x9], #0x8\n"
- "str d11, [x25], #0x8\n"
- "str d19, [x24], #0x8\n"
+ "str d14, [x28], #0x8\n"
+ "str d11, [x24], #0x8\n"
+ "str d19, [x23], #0x8\n"
"tbz x11, #0, 103f\n"
- "st1 { v14.s }[2], [x9]\n"
- "st1 { v11.s }[2], [x25]\n"
- "st1 { v19.s }[2], [x24]\n"
+ "st1 { v14.s }[2], [x28]\n"
+ "st1 { v11.s }[2], [x24]\n"
+ "st1 { v19.s }[2], [x23]\n"
"b 103f\n"
"96:" // Height 3: Partial direct writeback: partial_1_12
"tbz x11, #0, 103f\n"
- "str s14, [x9, #0x0]\n"
- "str s11, [x25, #0x0]\n"
- "str s19, [x24, #0x0]\n"
+ "str s14, [x28, #0x0]\n"
+ "str s11, [x24, #0x0]\n"
+ "str s19, [x23, #0x0]\n"
"b 103f\n"
"97:" // Height 3: Partial direct writeback: partial_2_8
"tbz x11, #1, 98f\n"
- "str d13, [x9], #0x8\n"
- "str d10, [x25], #0x8\n"
- "str d18, [x24], #0x8\n"
+ "str d13, [x28], #0x8\n"
+ "str d10, [x24], #0x8\n"
+ "str d18, [x23], #0x8\n"
"tbz x11, #0, 103f\n"
- "st1 { v13.s }[2], [x9]\n"
- "st1 { v10.s }[2], [x25]\n"
- "st1 { v18.s }[2], [x24]\n"
+ "st1 { v13.s }[2], [x28]\n"
+ "st1 { v10.s }[2], [x24]\n"
+ "st1 { v18.s }[2], [x23]\n"
"b 103f\n"
"98:" // Height 3: Partial direct writeback: partial_1_8
"tbz x11, #0, 103f\n"
- "str s13, [x9, #0x0]\n"
- "str s10, [x25, #0x0]\n"
- "str s18, [x24, #0x0]\n"
+ "str s13, [x28, #0x0]\n"
+ "str s10, [x24, #0x0]\n"
+ "str s18, [x23, #0x0]\n"
"b 103f\n"
"99:" // Height 3: Partial direct writeback: partial_4_0
"tbz x11, #2, 101f\n"
- "st1 { v6.4s }, [x9], #0x10\n"
- "st1 { v8.4s }, [x25], #0x10\n"
- "st1 { v16.4s }, [x24], #0x10\n"
+ "st1 { v6.4s }, [x28], #0x10\n"
+ "st1 { v8.4s }, [x24], #0x10\n"
+ "st1 { v16.4s }, [x23], #0x10\n"
"tbz x11, #1, 100f\n"
- "str d12, [x9], #0x8\n"
- "str d9, [x25], #0x8\n"
- "str d17, [x24], #0x8\n"
+ "str d12, [x28], #0x8\n"
+ "str d9, [x24], #0x8\n"
+ "str d17, [x23], #0x8\n"
"tbz x11, #0, 103f\n"
- "st1 { v12.s }[2], [x9]\n"
- "st1 { v9.s }[2], [x25]\n"
- "st1 { v17.s }[2], [x24]\n"
+ "st1 { v12.s }[2], [x28]\n"
+ "st1 { v9.s }[2], [x24]\n"
+ "st1 { v17.s }[2], [x23]\n"
"b 103f\n"
"100:" // Height 3: Partial direct writeback: partial_1_4
"tbz x11, #0, 103f\n"
- "str s12, [x9, #0x0]\n"
- "str s9, [x25, #0x0]\n"
- "str s17, [x24, #0x0]\n"
+ "str s12, [x28, #0x0]\n"
+ "str s9, [x24, #0x0]\n"
+ "str s17, [x23, #0x0]\n"
"b 103f\n"
"101:" // Height 3: Partial direct writeback: partial_2_0
"tbz x11, #1, 102f\n"
- "str d6, [x9], #0x8\n"
- "str d8, [x25], #0x8\n"
- "str d16, [x24], #0x8\n"
+ "str d6, [x28], #0x8\n"
+ "str d8, [x24], #0x8\n"
+ "str d16, [x23], #0x8\n"
"tbz x11, #0, 103f\n"
- "st1 { v6.s }[2], [x9]\n"
- "st1 { v8.s }[2], [x25]\n"
- "st1 { v16.s }[2], [x24]\n"
+ "st1 { v6.s }[2], [x28]\n"
+ "st1 { v8.s }[2], [x24]\n"
+ "st1 { v16.s }[2], [x23]\n"
"b 103f\n"
"102:" // Height 3: Partial direct writeback: partial_1_0
- "str s6, [x9, #0x0]\n"
- "str s8, [x25, #0x0]\n"
- "str s16, [x24, #0x0]\n"
+ "str s6, [x28, #0x0]\n"
+ "str s8, [x24, #0x0]\n"
+ "str s16, [x23, #0x0]\n"
"103:" // Height 3: Partial direct writeback: Done
"b 105f\n"
"104:" // Height 3: Full writeback
- "str q6, [x9, #0x0]\n"
- "str q12, [x9, #0x10]\n"
- "str q13, [x9, #0x20]\n"
- "str q14, [x9, #0x30]\n"
- "add x9, x9, #0x40\n"
- "str q8, [x25, #0x0]\n"
- "str q9, [x25, #0x10]\n"
- "str q10, [x25, #0x20]\n"
- "str q11, [x25, #0x30]\n"
- "str q16, [x24, #0x0]\n"
- "str q17, [x24, #0x10]\n"
- "str q18, [x24, #0x20]\n"
- "str q19, [x24, #0x30]\n"
+ "str q6, [x28, #0x0]\n"
+ "str q12, [x28, #0x10]\n"
+ "str q13, [x28, #0x20]\n"
+ "str q14, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
+ "str q8, [x24, #0x0]\n"
+ "str q9, [x24, #0x10]\n"
+ "str q10, [x24, #0x20]\n"
+ "str q11, [x24, #0x30]\n"
+ "str q16, [x23, #0x0]\n"
+ "str q17, [x23, #0x10]\n"
+ "str q18, [x23, #0x20]\n"
+ "str q19, [x23, #0x30]\n"
"105:" // Height 3: Writeback done
"subs x11, x11, #0x10\n"
"bgt 72b\n"
"b 212f\n"
"106:" // Height 4
- "mov x12, %x[bias]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x9, %x[bias]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "mov x28, %x[output_ptr]\n"
"107:" // Height 4: Column loop
- "cbz x12, 108f\n"
- "ldr q8, [x12, #0x0]\n"
- "ldr q9, [x12, #0x10]\n"
+ "cbz x9, 108f\n"
+ "ldr q8, [x9, #0x0]\n"
"zip2 v12.2d, v8.2d, v8.2d\n"
+ "ldr q9, [x9, #0x10]\n"
"zip1 v8.2d, v8.2d, v8.2d\n"
- "ldr q10, [x12, #0x20]\n"
- "ldr q11, [x12, #0x30]\n"
+ "ldr q10, [x9, #0x20]\n"
+ "mov v16.16b, v8.16b\n"
+ "ldr q11, [x9, #0x30]\n"
+ "add x9, x9, #0x40\n"
+ "mov v20.16b, v12.16b\n"
"zip2 v13.2d, v9.2d, v9.2d\n"
"zip1 v9.2d, v9.2d, v9.2d\n"
"zip2 v14.2d, v10.2d, v10.2d\n"
"zip1 v10.2d, v10.2d, v10.2d\n"
- "add x12, x12, #0x40\n"
"zip2 v15.2d, v11.2d, v11.2d\n"
"zip1 v11.2d, v11.2d, v11.2d\n"
- "mov v16.16b, v8.16b\n"
- "mov v20.16b, v12.16b\n"
"mov v17.16b, v9.16b\n"
"mov v21.16b, v13.16b\n"
"mov v18.16b, v10.16b\n"
@@ -1235,132 +1235,132 @@ void a64_hybrid_fp32bf16fp32_mmla_6x16 (
"b 120f\n"
"108:" // Height 4: no bias
"tbz %x[flags], #0, 119f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"cmp x11, #0x10\n"
- "add x23, x24, x20, LSL #2\n"
+ "add x24, x28, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
"bge 117f\n"
"tbz x11, #3, 112f\n"
- "ld1 { v9.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v17.4s }, [x24], #0x10\n"
- "ld1 { v20.4s }, [x23], #0x10\n"
- "ld1 { v10.4s }, [x9], #0x10\n"
- "ld1 { v13.4s }, [x25], #0x10\n"
- "ld1 { v18.4s }, [x24], #0x10\n"
- "ld1 { v21.4s }, [x23], #0x10\n"
+ "ld1 { v9.4s }, [x28], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
+ "ld1 { v17.4s }, [x23], #0x10\n"
+ "ld1 { v20.4s }, [x22], #0x10\n"
+ "ld1 { v10.4s }, [x28], #0x10\n"
+ "ld1 { v13.4s }, [x24], #0x10\n"
+ "ld1 { v18.4s }, [x23], #0x10\n"
+ "ld1 { v21.4s }, [x22], #0x10\n"
"tbz x11, #2, 110f\n"
- "ld1 { v11.4s }, [x9], #0x10\n"
- "ld1 { v14.4s }, [x25], #0x10\n"
- "ld1 { v19.4s }, [x24], #0x10\n"
- "ld1 { v22.4s }, [x23], #0x10\n"
+ "ld1 { v11.4s }, [x28], #0x10\n"
+ "ld1 { v14.4s }, [x24], #0x10\n"
+ "ld1 { v19.4s }, [x23], #0x10\n"
+ "ld1 { v22.4s }, [x22], #0x10\n"
"tbz x11, #1, 109f\n"
- "ldr d16, [x9], #0x8\n"
- "ldr d15, [x25], #0x8\n"
- "mov x20, #0x38\n"
- "ldr d24, [x24], #0x8\n"
- "ldr d23, [x23], #0x8\n"
+ "mov x19, #0x38\n"
+ "ldr d16, [x28], #0x8\n"
+ "ldr d15, [x24], #0x8\n"
+ "ldr d24, [x23], #0x8\n"
+ "ldr d23, [x22], #0x8\n"
"tbz x11, #0, 116f\n"
- "ld1 { v16.s }[2], [x9]\n"
- "ld1 { v15.s }[2], [x25]\n"
- "ld1 { v24.s }[2], [x24]\n"
- "ld1 { v23.s }[2], [x23]\n"
+ "ld1 { v16.s }[2], [x28]\n"
+ "ld1 { v15.s }[2], [x24]\n"
+ "ld1 { v24.s }[2], [x23]\n"
+ "ld1 { v23.s }[2], [x22]\n"
"b 116f\n"
"109:" // Height 4: Partial accumulate: partial_1_12
- "mov x20, #0x30\n"
+ "mov x19, #0x30\n"
"tbz x11, #0, 116f\n"
- "ldr s16, [x9, #0x0]\n"
- "ldr s15, [x25, #0x0]\n"
- "ldr s24, [x24, #0x0]\n"
- "ldr s23, [x23, #0x0]\n"
+ "ldr s16, [x28, #0x0]\n"
+ "ldr s15, [x24, #0x0]\n"
+ "ldr s24, [x23, #0x0]\n"
+ "ldr s23, [x22, #0x0]\n"
"b 116f\n"
"110:" // Height 4: Partial accumulate: partial_2_8
"tbz x11, #1, 111f\n"
- "ldr d11, [x9], #0x8\n"
- "ldr d14, [x25], #0x8\n"
- "mov x20, #0x28\n"
- "ldr d19, [x24], #0x8\n"
- "ldr d22, [x23], #0x8\n"
+ "ldr d11, [x28], #0x8\n"
+ "ldr d14, [x24], #0x8\n"
+ "mov x19, #0x28\n"
+ "ldr d19, [x23], #0x8\n"
+ "ldr d22, [x22], #0x8\n"
"tbz x11, #0, 116f\n"
- "ld1 { v11.s }[2], [x9]\n"
- "ld1 { v14.s }[2], [x25]\n"
- "ld1 { v19.s }[2], [x24]\n"
- "ld1 { v22.s }[2], [x23]\n"
+ "ld1 { v11.s }[2], [x28]\n"
+ "ld1 { v14.s }[2], [x24]\n"
+ "ld1 { v19.s }[2], [x23]\n"
+ "ld1 { v22.s }[2], [x22]\n"
"b 116f\n"
"111:" // Height 4: Partial accumulate: partial_1_8
- "mov x20, #0x20\n"
+ "mov x19, #0x20\n"
"tbz x11, #0, 116f\n"
- "ldr s11, [x9, #0x0]\n"
- "ldr s14, [x25, #0x0]\n"
- "ldr s19, [x24, #0x0]\n"
- "ldr s22, [x23, #0x0]\n"
+ "ldr s11, [x28, #0x0]\n"
+ "ldr s14, [x24, #0x0]\n"
+ "ldr s19, [x23, #0x0]\n"
+ "ldr s22, [x22, #0x0]\n"
"b 116f\n"
"112:" // Height 4: Partial accumulate: partial_4_0
"tbz x11, #2, 114f\n"
- "ld1 { v9.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v17.4s }, [x24], #0x10\n"
- "ld1 { v20.4s }, [x23], #0x10\n"
+ "ld1 { v9.4s }, [x28], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
+ "ld1 { v17.4s }, [x23], #0x10\n"
+ "ld1 { v20.4s }, [x22], #0x10\n"
"tbz x11, #1, 113f\n"
- "ldr d10, [x9], #0x8\n"
- "ldr d13, [x25], #0x8\n"
- "mov x20, #0x18\n"
- "ldr d18, [x24], #0x8\n"
- "ldr d21, [x23], #0x8\n"
+ "mov x19, #0x18\n"
+ "ldr d10, [x28], #0x8\n"
+ "ldr d13, [x24], #0x8\n"
+ "ldr d18, [x23], #0x8\n"
+ "ldr d21, [x22], #0x8\n"
"tbz x11, #0, 116f\n"
- "ld1 { v10.s }[2], [x9]\n"
- "ld1 { v13.s }[2], [x25]\n"
- "ld1 { v18.s }[2], [x24]\n"
- "ld1 { v21.s }[2], [x23]\n"
+ "ld1 { v10.s }[2], [x28]\n"
+ "ld1 { v13.s }[2], [x24]\n"
+ "ld1 { v18.s }[2], [x23]\n"
+ "ld1 { v21.s }[2], [x22]\n"
"b 116f\n"
"113:" // Height 4: Partial accumulate: partial_1_4
- "mov x20, #0x10\n"
+ "mov x19, #0x10\n"
"tbz x11, #0, 116f\n"
- "ldr s10, [x9, #0x0]\n"
- "ldr s13, [x25, #0x0]\n"
- "ldr s18, [x24, #0x0]\n"
- "ldr s21, [x23, #0x0]\n"
+ "ldr s10, [x28, #0x0]\n"
+ "ldr s13, [x24, #0x0]\n"
+ "ldr s18, [x23, #0x0]\n"
+ "ldr s21, [x22, #0x0]\n"
"b 116f\n"
"114:" // Height 4: Partial accumulate: partial_2_0
"tbz x11, #1, 115f\n"
- "ldr d9, [x9], #0x8\n"
- "ldr d12, [x25], #0x8\n"
- "mov x20, #0x8\n"
- "ldr d17, [x24], #0x8\n"
- "ldr d20, [x23], #0x8\n"
+ "ldr d9, [x28], #0x8\n"
+ "ldr d12, [x24], #0x8\n"
+ "mov x19, #0x8\n"
+ "ldr d17, [x23], #0x8\n"
+ "ldr d20, [x22], #0x8\n"
"tbz x11, #0, 116f\n"
- "ld1 { v9.s }[2], [x9]\n"
- "ld1 { v12.s }[2], [x25]\n"
- "ld1 { v17.s }[2], [x24]\n"
- "ld1 { v20.s }[2], [x23]\n"
+ "ld1 { v9.s }[2], [x28]\n"
+ "ld1 { v12.s }[2], [x24]\n"
+ "ld1 { v17.s }[2], [x23]\n"
+ "ld1 { v20.s }[2], [x22]\n"
"b 116f\n"
"115:" // Height 4: Partial accumulate: partial_1_0
- "ldr s9, [x9, #0x0]\n"
- "ldr s12, [x25, #0x0]\n"
- "mov x20, #0x0\n"
- "ldr s17, [x24, #0x0]\n"
- "ldr s20, [x23, #0x0]\n"
+ "ldr s9, [x28, #0x0]\n"
+ "mov x19, #0x0\n"
+ "ldr s12, [x24, #0x0]\n"
+ "ldr s17, [x23, #0x0]\n"
+ "ldr s20, [x22, #0x0]\n"
"116:" // Height 4: Partial accumulate: Done
- "sub x9, x9, x20\n"
+ "sub x28, x28, x19\n"
"b 118f\n"
"117:" // Height 4: full accumulate
- "ldr q9, [x9, #0x0]\n"
- "ldr q10, [x9, #0x10]\n"
- "ldr q11, [x9, #0x20]\n"
- "ldr q16, [x9, #0x30]\n"
- "ldr q12, [x25, #0x0]\n"
- "ldr q13, [x25, #0x10]\n"
- "ldr q14, [x25, #0x20]\n"
- "ldr q15, [x25, #0x30]\n"
- "ldr q17, [x24, #0x0]\n"
- "ldr q18, [x24, #0x10]\n"
- "ldr q19, [x24, #0x20]\n"
- "ldr q24, [x24, #0x30]\n"
- "ldr q20, [x23, #0x0]\n"
- "ldr q21, [x23, #0x10]\n"
- "ldr q22, [x23, #0x20]\n"
- "ldr q23, [x23, #0x30]\n"
+ "ldr q9, [x28, #0x0]\n"
+ "ldr q10, [x28, #0x10]\n"
+ "ldr q11, [x28, #0x20]\n"
+ "ldr q16, [x28, #0x30]\n"
+ "ldr q12, [x24, #0x0]\n"
+ "ldr q13, [x24, #0x10]\n"
+ "ldr q14, [x24, #0x20]\n"
+ "ldr q15, [x24, #0x30]\n"
+ "ldr q17, [x23, #0x0]\n"
+ "ldr q18, [x23, #0x10]\n"
+ "ldr q19, [x23, #0x20]\n"
+ "ldr q24, [x23, #0x30]\n"
+ "ldr q20, [x22, #0x0]\n"
+ "ldr q21, [x22, #0x10]\n"
+ "ldr q22, [x22, #0x20]\n"
+ "ldr q23, [x22, #0x30]\n"
"118:" // Height 4: MMLA fixup
"zip1 v8.2d, v9.2d, v12.2d\n"
"zip2 v12.2d, v9.2d, v12.2d\n"
@@ -1397,67 +1397,65 @@ void a64_hybrid_fp32bf16fp32_mmla_6x16 (
"movi v22.16b, #0x0\n"
"movi v23.16b, #0x0\n"
"120:" // Height 4: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"121:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 122f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "cbnz x28, 123f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #2\n"
- "add x25, x25, x20, LSL #2\n"
- "add x24, x24, x20, LSL #2\n"
- "add x23, x23, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "cbnz x27, 123f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #2\n"
+ "add x24, x24, x19, LSL #2\n"
+ "add x23, x23, x19, LSL #2\n"
+ "add x22, x22, x19, LSL #2\n"
"b 123f\n"
"122:" // Height 4: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
"123:" // Height 4: input setup done
- "cmp x27, #0x4\n"
+ "cmp x26, #0x4\n"
"blt 126f\n"
- "ld1 { v0.4s }, [x26], #0x10\n"
- "ld1 { v2.4s }, [x24], #0x10\n"
- "cmp x27, #0x8\n"
- "ld1 { v1.4s }, [x25], #0x10\n"
- "ld1 { v3.4s }, [x23], #0x10\n"
- "ldr q6, [x10, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
+ "ld1 { v0.4s }, [x25], #0x10\n"
+ "cmp x26, #0x8\n"
"blt 125f\n"
"124:" // Height 4: Multiply loop: Main loop head
".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n"
- ".inst 0x0ea16842 // bfcvtn v2.4h, v2.4s\n"
- "sub x27, x27, #0x4\n"
- "cmp x27, #0x8\n"
+ "ld1 { v1.4s }, [x24], #0x10\n"
+ "sub x26, x26, #0x4\n"
".inst 0x4ea16820 // bfcvtn2 v0.8h, v1.4s\n"
+ "ld1 { v2.4s }, [x23], #0x10\n"
+ "cmp x26, #0x8\n"
+ ".inst 0x0ea16842 // bfcvtn v2.4h, v2.4s\n"
+ "ld1 { v3.4s }, [x22], #0x10\n"
+ "ldr q6, [x10, #0x0]\n"
".inst 0x4ea16862 // bfcvtn2 v2.8h, v3.4s\n"
+ "ldr q7, [x10, #0x10]\n"
".inst 0x6e46ec08 // bfmmla v8.4s, v0.8h, v6.8h\n"
- "prfm pldl1keep, [x26, #0x80]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x6e46ec50 // bfmmla v16.4s, v2.8h, v6.8h\n"
"ldr q6, [x10, #0x20]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x6e47ec0c // bfmmla v12.4s, v0.8h, v7.8h\n"
- "prfm pldl1keep, [x25, #0x80]\n"
- "ld1 { v1.4s }, [x25], #0x10\n"
".inst 0x6e47ec54 // bfmmla v20.4s, v2.8h, v7.8h\n"
"ldr q7, [x10, #0x30]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec51 // bfmmla v17.4s, v2.8h, v6.8h\n"
"ldr q6, [x10, #0x40]\n"
".inst 0x6e47ec0d // bfmmla v13.4s, v0.8h, v7.8h\n"
- "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x6e47ec55 // bfmmla v21.4s, v2.8h, v7.8h\n"
"ldr q7, [x10, #0x50]\n"
".inst 0x6e46ec0a // bfmmla v10.4s, v0.8h, v6.8h\n"
- "prfm pldl1keep, [x23, #0x80]\n"
- "ld1 { v3.4s }, [x23], #0x10\n"
".inst 0x6e46ec52 // bfmmla v18.4s, v2.8h, v6.8h\n"
"ldr q6, [x10, #0x60]\n"
".inst 0x6e47ec0e // bfmmla v14.4s, v0.8h, v7.8h\n"
@@ -1466,30 +1464,32 @@ void a64_hybrid_fp32bf16fp32_mmla_6x16 (
"add x10, x10, #0x80\n"
".inst 0x6e46ec0b // bfmmla v11.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec53 // bfmmla v19.4s, v2.8h, v6.8h\n"
- "ldr q6, [x10, #0x0]\n"
".inst 0x6e47ec0f // bfmmla v15.4s, v0.8h, v7.8h\n"
- "ld1 { v0.4s }, [x26], #0x10\n"
+ "ld1 { v0.4s }, [x25], #0x10\n"
".inst 0x6e47ec57 // bfmmla v23.4s, v2.8h, v7.8h\n"
- "ld1 { v2.4s }, [x24], #0x10\n"
- "ldr q7, [x10, #0x10]\n"
"bge 124b\n"
"125:" // Height 4: Multiply loop: Single iteration only
".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n"
- ".inst 0x0ea16842 // bfcvtn v2.4h, v2.4s\n"
- "sub x27, x27, #0x4\n"
- "prfm pldl1keep, [x26, #0x80]\n"
+ "ld1 { v1.4s }, [x24], #0x10\n"
+ "sub x26, x26, #0x4\n"
".inst 0x4ea16820 // bfcvtn2 v0.8h, v1.4s\n"
+ "ld1 { v2.4s }, [x23], #0x10\n"
+ "ld1 { v3.4s }, [x22], #0x10\n"
+ ".inst 0x0ea16842 // bfcvtn v2.4h, v2.4s\n"
+ "ldr q6, [x10, #0x0]\n"
+ "ldr q7, [x10, #0x10]\n"
".inst 0x4ea16862 // bfcvtn2 v2.8h, v3.4s\n"
- ".inst 0x6e46ec08 // bfmmla v8.4s, v0.8h, v6.8h\n"
"prfm pldl1keep, [x25, #0x80]\n"
- ".inst 0x6e46ec50 // bfmmla v16.4s, v2.8h, v6.8h\n"
- "ldr q6, [x10, #0x20]\n"
- ".inst 0x6e47ec0c // bfmmla v12.4s, v0.8h, v7.8h\n"
"prfm pldl1keep, [x24, #0x80]\n"
+ ".inst 0x6e46ec08 // bfmmla v8.4s, v0.8h, v6.8h\n"
+ ".inst 0x6e47ec0c // bfmmla v12.4s, v0.8h, v7.8h\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
+ ".inst 0x6e46ec50 // bfmmla v16.4s, v2.8h, v6.8h\n"
".inst 0x6e47ec54 // bfmmla v20.4s, v2.8h, v7.8h\n"
+ "ldr q6, [x10, #0x20]\n"
"ldr q7, [x10, #0x30]\n"
".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n"
- "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x6e46ec51 // bfmmla v17.4s, v2.8h, v6.8h\n"
"ldr q6, [x10, #0x40]\n"
".inst 0x6e47ec0d // bfmmla v13.4s, v0.8h, v7.8h\n"
@@ -1507,29 +1507,29 @@ void a64_hybrid_fp32bf16fp32_mmla_6x16 (
".inst 0x6e47ec0f // bfmmla v15.4s, v0.8h, v7.8h\n"
".inst 0x6e47ec57 // bfmmla v23.4s, v2.8h, v7.8h\n"
"126:" // Height 4: Multiply loop: Main loop skip
- "cbz x27, 129f\n"
- "cbz x27, 129f\n"
- "tbz x27, #1, 127f\n"
- "ldr d0, [x26], #0x8\n"
- "ldr d1, [x25], #0x8\n"
- "ldr d2, [x24], #0x8\n"
- "ldr d3, [x23], #0x8\n"
- "tbz x27, #0, 128f\n"
- "ld1 { v0.s }[2], [x26]\n"
- "ld1 { v1.s }[2], [x25]\n"
- "ld1 { v2.s }[2], [x24]\n"
- "ld1 { v3.s }[2], [x23]\n"
+ "cbz x26, 129f\n"
+ "cbz x26, 129f\n"
+ "tbz x26, #1, 127f\n"
+ "ldr d0, [x25], #0x8\n"
+ "ldr d1, [x24], #0x8\n"
+ "ldr d2, [x23], #0x8\n"
+ "ldr d3, [x22], #0x8\n"
+ "tbz x26, #0, 128f\n"
+ "ld1 { v0.s }[2], [x25]\n"
+ "ld1 { v1.s }[2], [x24]\n"
+ "ld1 { v2.s }[2], [x23]\n"
+ "ld1 { v3.s }[2], [x22]\n"
"b 128f\n"
"127:" // Height 4: Multiply loop: Ragged operand read: partial_1_0
- "ldr s0, [x26, #0x0]\n"
- "ldr s1, [x25, #0x0]\n"
- "ldr s2, [x24, #0x0]\n"
- "ldr s3, [x23, #0x0]\n"
+ "ldr s0, [x25, #0x0]\n"
+ "ldr s1, [x24, #0x0]\n"
+ "ldr s2, [x23, #0x0]\n"
+ "ldr s3, [x22, #0x0]\n"
"128:" // Height 4: Multiply loop: Ragged operand read: Done
- "ldr q6, [x10, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n"
+ "ldr q6, [x10, #0x0]\n"
".inst 0x0ea16842 // bfcvtn v2.4h, v2.4s\n"
+ "ldr q7, [x10, #0x10]\n"
".inst 0x4ea16820 // bfcvtn2 v0.8h, v1.4s\n"
".inst 0x4ea16862 // bfcvtn2 v2.8h, v3.4s\n"
".inst 0x6e46ec08 // bfmmla v8.4s, v0.8h, v6.8h\n"
@@ -1550,31 +1550,31 @@ void a64_hybrid_fp32bf16fp32_mmla_6x16 (
".inst 0x6e47ec0e // bfmmla v14.4s, v0.8h, v7.8h\n"
".inst 0x6e47ec56 // bfmmla v22.4s, v2.8h, v7.8h\n"
"ldr q7, [x10, #0x70]\n"
- ".inst 0x6e46ec0b // bfmmla v11.4s, v0.8h, v6.8h\n"
"add x10, x10, #0x80\n"
+ ".inst 0x6e46ec0b // bfmmla v11.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec53 // bfmmla v19.4s, v2.8h, v6.8h\n"
".inst 0x6e47ec0f // bfmmla v15.4s, v0.8h, v7.8h\n"
".inst 0x6e47ec57 // bfmmla v23.4s, v2.8h, v7.8h\n"
"129:" // Height 4: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 121b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
"uzp1 v6.2d, v8.2d, v12.2d\n"
- "add x23, x24, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp2 v8.2d, v8.2d, v12.2d\n"
+ "prfm pstl1keep, [x28, #0x0]\n"
+ "add x24, x28, x19, LSL #2\n"
"uzp1 v12.2d, v9.2d, v13.2d\n"
- "prfm pstl1keep, [x9, #0x0]\n"
+ "prfm pstl1keep, [x24, #0x0]\n"
"uzp2 v9.2d, v9.2d, v13.2d\n"
+ "add x23, x24, x19, LSL #2\n"
"uzp1 v13.2d, v10.2d, v14.2d\n"
- "prfm pstl1keep, [x25, #0x0]\n"
- "prfm pstl1keep, [x24, #0x0]\n"
+ "prfm pstl1keep, [x23, #0x0]\n"
+ "add x22, x23, x19, LSL #2\n"
"uzp2 v10.2d, v10.2d, v14.2d\n"
+ "prfm pstl1keep, [x22, #0x0]\n"
"uzp1 v14.2d, v11.2d, v15.2d\n"
- "prfm pstl1keep, [x23, #0x0]\n"
"uzp2 v11.2d, v11.2d, v15.2d\n"
"uzp1 v15.2d, v16.2d, v20.2d\n"
"uzp2 v16.2d, v16.2d, v20.2d\n"
@@ -1585,190 +1585,190 @@ void a64_hybrid_fp32bf16fp32_mmla_6x16 (
"uzp1 v22.2d, v19.2d, v23.2d\n"
"uzp2 v19.2d, v19.2d, v23.2d\n"
"tbz %x[flags], #1, 130f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v1.4s }, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1r { v0.4s }, [x20]\n"
- "fmin v6.4s, v6.4s, v1.4s\n"
- "fmin v12.4s, v12.4s, v1.4s\n"
- "fmin v13.4s, v13.4s, v1.4s\n"
- "fmin v14.4s, v14.4s, v1.4s\n"
- "fmin v8.4s, v8.4s, v1.4s\n"
- "fmin v9.4s, v9.4s, v1.4s\n"
- "fmin v10.4s, v10.4s, v1.4s\n"
- "fmin v11.4s, v11.4s, v1.4s\n"
- "fmin v15.4s, v15.4s, v1.4s\n"
- "fmin v20.4s, v20.4s, v1.4s\n"
- "fmin v21.4s, v21.4s, v1.4s\n"
- "fmin v22.4s, v22.4s, v1.4s\n"
- "fmin v16.4s, v16.4s, v1.4s\n"
- "fmin v17.4s, v17.4s, v1.4s\n"
- "fmin v18.4s, v18.4s, v1.4s\n"
- "fmin v19.4s, v19.4s, v1.4s\n"
- "fmax v6.4s, v6.4s, v0.4s\n"
- "fmax v12.4s, v12.4s, v0.4s\n"
- "fmax v13.4s, v13.4s, v0.4s\n"
- "fmax v14.4s, v14.4s, v0.4s\n"
- "fmax v8.4s, v8.4s, v0.4s\n"
- "fmax v9.4s, v9.4s, v0.4s\n"
- "fmax v10.4s, v10.4s, v0.4s\n"
- "fmax v11.4s, v11.4s, v0.4s\n"
- "fmax v15.4s, v15.4s, v0.4s\n"
- "fmax v20.4s, v20.4s, v0.4s\n"
- "fmax v21.4s, v21.4s, v0.4s\n"
- "fmax v22.4s, v22.4s, v0.4s\n"
- "fmax v16.4s, v16.4s, v0.4s\n"
- "fmax v17.4s, v17.4s, v0.4s\n"
- "fmax v18.4s, v18.4s, v0.4s\n"
- "fmax v19.4s, v19.4s, v0.4s\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v1.4s }, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v0.4s }, [x19]\n"
+ "fmin v6.4s, v6.4s, v0.4s\n"
+ "fmin v12.4s, v12.4s, v0.4s\n"
+ "fmin v13.4s, v13.4s, v0.4s\n"
+ "fmin v14.4s, v14.4s, v0.4s\n"
+ "fmax v6.4s, v6.4s, v1.4s\n"
+ "fmax v12.4s, v12.4s, v1.4s\n"
+ "fmax v13.4s, v13.4s, v1.4s\n"
+ "fmax v14.4s, v14.4s, v1.4s\n"
+ "fmin v8.4s, v8.4s, v0.4s\n"
+ "fmin v9.4s, v9.4s, v0.4s\n"
+ "fmin v10.4s, v10.4s, v0.4s\n"
+ "fmax v8.4s, v8.4s, v1.4s\n"
+ "fmax v9.4s, v9.4s, v1.4s\n"
+ "fmax v10.4s, v10.4s, v1.4s\n"
+ "fmin v11.4s, v11.4s, v0.4s\n"
+ "fmin v15.4s, v15.4s, v0.4s\n"
+ "fmin v20.4s, v20.4s, v0.4s\n"
+ "fmax v11.4s, v11.4s, v1.4s\n"
+ "fmax v15.4s, v15.4s, v1.4s\n"
+ "fmax v20.4s, v20.4s, v1.4s\n"
+ "fmin v21.4s, v21.4s, v0.4s\n"
+ "fmin v22.4s, v22.4s, v0.4s\n"
+ "fmin v16.4s, v16.4s, v0.4s\n"
+ "fmax v21.4s, v21.4s, v1.4s\n"
+ "fmax v22.4s, v22.4s, v1.4s\n"
+ "fmax v16.4s, v16.4s, v1.4s\n"
+ "fmin v17.4s, v17.4s, v0.4s\n"
+ "fmin v18.4s, v18.4s, v0.4s\n"
+ "fmin v19.4s, v19.4s, v0.4s\n"
+ "fmax v17.4s, v17.4s, v1.4s\n"
+ "fmax v18.4s, v18.4s, v1.4s\n"
+ "fmax v19.4s, v19.4s, v1.4s\n"
"130:" // Height 4: No activation
"cmp x11, #0x10\n"
"bge 139f\n"
"tbz x11, #3, 134f\n"
- "st1 { v6.4s }, [x9], #0x10\n"
- "st1 { v12.4s }, [x9], #0x10\n"
- "st1 { v8.4s }, [x25], #0x10\n"
- "st1 { v9.4s }, [x25], #0x10\n"
- "st1 { v15.4s }, [x24], #0x10\n"
- "st1 { v20.4s }, [x24], #0x10\n"
- "st1 { v16.4s }, [x23], #0x10\n"
- "st1 { v17.4s }, [x23], #0x10\n"
+ "st1 { v6.4s }, [x28], #0x10\n"
+ "st1 { v12.4s }, [x28], #0x10\n"
+ "st1 { v8.4s }, [x24], #0x10\n"
+ "st1 { v9.4s }, [x24], #0x10\n"
+ "st1 { v15.4s }, [x23], #0x10\n"
+ "st1 { v20.4s }, [x23], #0x10\n"
+ "st1 { v16.4s }, [x22], #0x10\n"
+ "st1 { v17.4s }, [x22], #0x10\n"
"tbz x11, #2, 132f\n"
- "st1 { v13.4s }, [x9], #0x10\n"
- "st1 { v10.4s }, [x25], #0x10\n"
- "st1 { v21.4s }, [x24], #0x10\n"
- "st1 { v18.4s }, [x23], #0x10\n"
+ "st1 { v13.4s }, [x28], #0x10\n"
+ "st1 { v10.4s }, [x24], #0x10\n"
+ "st1 { v21.4s }, [x23], #0x10\n"
+ "st1 { v18.4s }, [x22], #0x10\n"
"tbz x11, #1, 131f\n"
- "str d14, [x9], #0x8\n"
- "str d11, [x25], #0x8\n"
- "str d22, [x24], #0x8\n"
- "str d19, [x23], #0x8\n"
+ "str d14, [x28], #0x8\n"
+ "str d11, [x24], #0x8\n"
+ "str d22, [x23], #0x8\n"
+ "str d19, [x22], #0x8\n"
"tbz x11, #0, 138f\n"
- "st1 { v14.s }[2], [x9]\n"
- "st1 { v11.s }[2], [x25]\n"
- "st1 { v22.s }[2], [x24]\n"
- "st1 { v19.s }[2], [x23]\n"
+ "st1 { v14.s }[2], [x28]\n"
+ "st1 { v11.s }[2], [x24]\n"
+ "st1 { v22.s }[2], [x23]\n"
+ "st1 { v19.s }[2], [x22]\n"
"b 138f\n"
"131:" // Height 4: Partial direct writeback: partial_1_12
"tbz x11, #0, 138f\n"
- "str s14, [x9, #0x0]\n"
- "str s11, [x25, #0x0]\n"
- "str s22, [x24, #0x0]\n"
- "str s19, [x23, #0x0]\n"
+ "str s14, [x28, #0x0]\n"
+ "str s11, [x24, #0x0]\n"
+ "str s22, [x23, #0x0]\n"
+ "str s19, [x22, #0x0]\n"
"b 138f\n"
"132:" // Height 4: Partial direct writeback: partial_2_8
"tbz x11, #1, 133f\n"
- "str d13, [x9], #0x8\n"
- "str d10, [x25], #0x8\n"
- "str d21, [x24], #0x8\n"
- "str d18, [x23], #0x8\n"
+ "str d13, [x28], #0x8\n"
+ "str d10, [x24], #0x8\n"
+ "str d21, [x23], #0x8\n"
+ "str d18, [x22], #0x8\n"
"tbz x11, #0, 138f\n"
- "st1 { v13.s }[2], [x9]\n"
- "st1 { v10.s }[2], [x25]\n"
- "st1 { v21.s }[2], [x24]\n"
- "st1 { v18.s }[2], [x23]\n"
+ "st1 { v13.s }[2], [x28]\n"
+ "st1 { v10.s }[2], [x24]\n"
+ "st1 { v21.s }[2], [x23]\n"
+ "st1 { v18.s }[2], [x22]\n"
"b 138f\n"
"133:" // Height 4: Partial direct writeback: partial_1_8
"tbz x11, #0, 138f\n"
- "str s13, [x9, #0x0]\n"
- "str s10, [x25, #0x0]\n"
- "str s21, [x24, #0x0]\n"
- "str s18, [x23, #0x0]\n"
+ "str s13, [x28, #0x0]\n"
+ "str s10, [x24, #0x0]\n"
+ "str s21, [x23, #0x0]\n"
+ "str s18, [x22, #0x0]\n"
"b 138f\n"
"134:" // Height 4: Partial direct writeback: partial_4_0
"tbz x11, #2, 136f\n"
- "st1 { v6.4s }, [x9], #0x10\n"
- "st1 { v8.4s }, [x25], #0x10\n"
- "st1 { v15.4s }, [x24], #0x10\n"
- "st1 { v16.4s }, [x23], #0x10\n"
+ "st1 { v6.4s }, [x28], #0x10\n"
+ "st1 { v8.4s }, [x24], #0x10\n"
+ "st1 { v15.4s }, [x23], #0x10\n"
+ "st1 { v16.4s }, [x22], #0x10\n"
"tbz x11, #1, 135f\n"
- "str d12, [x9], #0x8\n"
- "str d9, [x25], #0x8\n"
- "str d20, [x24], #0x8\n"
- "str d17, [x23], #0x8\n"
+ "str d12, [x28], #0x8\n"
+ "str d9, [x24], #0x8\n"
+ "str d20, [x23], #0x8\n"
+ "str d17, [x22], #0x8\n"
"tbz x11, #0, 138f\n"
- "st1 { v12.s }[2], [x9]\n"
- "st1 { v9.s }[2], [x25]\n"
- "st1 { v20.s }[2], [x24]\n"
- "st1 { v17.s }[2], [x23]\n"
+ "st1 { v12.s }[2], [x28]\n"
+ "st1 { v9.s }[2], [x24]\n"
+ "st1 { v20.s }[2], [x23]\n"
+ "st1 { v17.s }[2], [x22]\n"
"b 138f\n"
"135:" // Height 4: Partial direct writeback: partial_1_4
"tbz x11, #0, 138f\n"
- "str s12, [x9, #0x0]\n"
- "str s9, [x25, #0x0]\n"
- "str s20, [x24, #0x0]\n"
- "str s17, [x23, #0x0]\n"
+ "str s12, [x28, #0x0]\n"
+ "str s9, [x24, #0x0]\n"
+ "str s20, [x23, #0x0]\n"
+ "str s17, [x22, #0x0]\n"
"b 138f\n"
"136:" // Height 4: Partial direct writeback: partial_2_0
"tbz x11, #1, 137f\n"
- "str d6, [x9], #0x8\n"
- "str d8, [x25], #0x8\n"
- "str d15, [x24], #0x8\n"
- "str d16, [x23], #0x8\n"
+ "str d6, [x28], #0x8\n"
+ "str d8, [x24], #0x8\n"
+ "str d15, [x23], #0x8\n"
+ "str d16, [x22], #0x8\n"
"tbz x11, #0, 138f\n"
- "st1 { v6.s }[2], [x9]\n"
- "st1 { v8.s }[2], [x25]\n"
- "st1 { v15.s }[2], [x24]\n"
- "st1 { v16.s }[2], [x23]\n"
+ "st1 { v6.s }[2], [x28]\n"
+ "st1 { v8.s }[2], [x24]\n"
+ "st1 { v15.s }[2], [x23]\n"
+ "st1 { v16.s }[2], [x22]\n"
"b 138f\n"
"137:" // Height 4: Partial direct writeback: partial_1_0
- "str s6, [x9, #0x0]\n"
- "str s8, [x25, #0x0]\n"
- "str s15, [x24, #0x0]\n"
- "str s16, [x23, #0x0]\n"
+ "str s6, [x28, #0x0]\n"
+ "str s8, [x24, #0x0]\n"
+ "str s15, [x23, #0x0]\n"
+ "str s16, [x22, #0x0]\n"
"138:" // Height 4: Partial direct writeback: Done
"b 140f\n"
"139:" // Height 4: Full writeback
- "str q6, [x9, #0x0]\n"
- "str q12, [x9, #0x10]\n"
- "str q13, [x9, #0x20]\n"
- "str q14, [x9, #0x30]\n"
- "add x9, x9, #0x40\n"
- "str q8, [x25, #0x0]\n"
- "str q9, [x25, #0x10]\n"
- "str q10, [x25, #0x20]\n"
- "str q11, [x25, #0x30]\n"
- "str q15, [x24, #0x0]\n"
- "str q20, [x24, #0x10]\n"
- "str q21, [x24, #0x20]\n"
- "str q22, [x24, #0x30]\n"
- "str q16, [x23, #0x0]\n"
- "str q17, [x23, #0x10]\n"
- "str q18, [x23, #0x20]\n"
- "str q19, [x23, #0x30]\n"
+ "str q6, [x28, #0x0]\n"
+ "str q12, [x28, #0x10]\n"
+ "str q13, [x28, #0x20]\n"
+ "str q14, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
+ "str q8, [x24, #0x0]\n"
+ "str q9, [x24, #0x10]\n"
+ "str q10, [x24, #0x20]\n"
+ "str q11, [x24, #0x30]\n"
+ "str q15, [x23, #0x0]\n"
+ "str q20, [x23, #0x10]\n"
+ "str q21, [x23, #0x20]\n"
+ "str q22, [x23, #0x30]\n"
+ "str q16, [x22, #0x0]\n"
+ "str q17, [x22, #0x10]\n"
+ "str q18, [x22, #0x20]\n"
+ "str q19, [x22, #0x30]\n"
"140:" // Height 4: Writeback done
"subs x11, x11, #0x10\n"
"bgt 107b\n"
"b 212f\n"
"141:" // Height 5
- "mov x12, %x[bias]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x9, %x[bias]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "mov x28, %x[output_ptr]\n"
"142:" // Height 5: Column loop
- "cbz x12, 143f\n"
- "ldr q8, [x12, #0x0]\n"
- "ldr q9, [x12, #0x10]\n"
+ "cbz x9, 143f\n"
+ "ldr q8, [x9, #0x0]\n"
"zip2 v12.2d, v8.2d, v8.2d\n"
+ "ldr q9, [x9, #0x10]\n"
"zip1 v8.2d, v8.2d, v8.2d\n"
- "ldr q10, [x12, #0x20]\n"
- "ldr q11, [x12, #0x30]\n"
+ "ldr q10, [x9, #0x20]\n"
+ "mov v16.16b, v8.16b\n"
+ "ldr q11, [x9, #0x30]\n"
+ "add x9, x9, #0x40\n"
+ "mov v20.16b, v12.16b\n"
+ "mov v24.16b, v8.16b\n"
"zip2 v13.2d, v9.2d, v9.2d\n"
"zip1 v9.2d, v9.2d, v9.2d\n"
"zip2 v14.2d, v10.2d, v10.2d\n"
"zip1 v10.2d, v10.2d, v10.2d\n"
- "add x12, x12, #0x40\n"
"zip2 v15.2d, v11.2d, v11.2d\n"
"zip1 v11.2d, v11.2d, v11.2d\n"
- "mov v16.16b, v8.16b\n"
- "mov v20.16b, v12.16b\n"
"mov v17.16b, v9.16b\n"
"mov v21.16b, v13.16b\n"
"mov v18.16b, v10.16b\n"
"mov v22.16b, v14.16b\n"
"mov v19.16b, v11.16b\n"
"mov v23.16b, v15.16b\n"
- "mov v24.16b, v8.16b\n"
"mov v28.16b, v12.16b\n"
"mov v25.16b, v9.16b\n"
"mov v29.16b, v13.16b\n"
@@ -1779,153 +1779,153 @@ void a64_hybrid_fp32bf16fp32_mmla_6x16 (
"b 155f\n"
"143:" // Height 5: no bias
"tbz %x[flags], #0, 154f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"cmp x11, #0x10\n"
- "add x22, x23, x20, LSL #2\n"
+ "add x24, x28, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
"bge 152f\n"
"tbz x11, #3, 147f\n"
- "ld1 { v9.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v17.4s }, [x24], #0x10\n"
- "ld1 { v20.4s }, [x23], #0x10\n"
- "ld1 { v25.4s }, [x22], #0x10\n"
- "ld1 { v10.4s }, [x9], #0x10\n"
- "ld1 { v13.4s }, [x25], #0x10\n"
- "ld1 { v18.4s }, [x24], #0x10\n"
- "ld1 { v21.4s }, [x23], #0x10\n"
- "ld1 { v26.4s }, [x22], #0x10\n"
+ "ld1 { v9.4s }, [x28], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
+ "ld1 { v17.4s }, [x23], #0x10\n"
+ "ld1 { v20.4s }, [x22], #0x10\n"
+ "ld1 { v25.4s }, [x21], #0x10\n"
+ "ld1 { v10.4s }, [x28], #0x10\n"
+ "ld1 { v13.4s }, [x24], #0x10\n"
+ "ld1 { v18.4s }, [x23], #0x10\n"
+ "ld1 { v21.4s }, [x22], #0x10\n"
+ "ld1 { v26.4s }, [x21], #0x10\n"
"tbz x11, #2, 145f\n"
- "ld1 { v11.4s }, [x9], #0x10\n"
- "ld1 { v14.4s }, [x25], #0x10\n"
- "ld1 { v19.4s }, [x24], #0x10\n"
- "ld1 { v22.4s }, [x23], #0x10\n"
- "ld1 { v27.4s }, [x22], #0x10\n"
+ "ld1 { v11.4s }, [x28], #0x10\n"
+ "ld1 { v14.4s }, [x24], #0x10\n"
+ "ld1 { v19.4s }, [x23], #0x10\n"
+ "ld1 { v22.4s }, [x22], #0x10\n"
+ "ld1 { v27.4s }, [x21], #0x10\n"
"tbz x11, #1, 144f\n"
- "ldr d16, [x9], #0x8\n"
- "ldr d15, [x25], #0x8\n"
- "mov x20, #0x38\n"
- "ldr d24, [x24], #0x8\n"
- "ldr d23, [x23], #0x8\n"
- "ldr d6, [x22], #0x8\n"
+ "ldr d16, [x28], #0x8\n"
+ "mov x19, #0x38\n"
+ "ldr d15, [x24], #0x8\n"
+ "ldr d24, [x23], #0x8\n"
+ "ldr d23, [x22], #0x8\n"
+ "ldr d6, [x21], #0x8\n"
"tbz x11, #0, 151f\n"
- "ld1 { v16.s }[2], [x9]\n"
- "ld1 { v15.s }[2], [x25]\n"
- "ld1 { v24.s }[2], [x24]\n"
- "ld1 { v23.s }[2], [x23]\n"
- "ld1 { v6.s }[2], [x22]\n"
+ "ld1 { v16.s }[2], [x28]\n"
+ "ld1 { v15.s }[2], [x24]\n"
+ "ld1 { v24.s }[2], [x23]\n"
+ "ld1 { v23.s }[2], [x22]\n"
+ "ld1 { v6.s }[2], [x21]\n"
"b 151f\n"
"144:" // Height 5: Partial accumulate: partial_1_12
- "mov x20, #0x30\n"
+ "mov x19, #0x30\n"
"tbz x11, #0, 151f\n"
- "ldr s16, [x9, #0x0]\n"
- "ldr s15, [x25, #0x0]\n"
- "ldr s24, [x24, #0x0]\n"
- "ldr s23, [x23, #0x0]\n"
- "ldr s6, [x22, #0x0]\n"
+ "ldr s16, [x28, #0x0]\n"
+ "ldr s15, [x24, #0x0]\n"
+ "ldr s24, [x23, #0x0]\n"
+ "ldr s23, [x22, #0x0]\n"
+ "ldr s6, [x21, #0x0]\n"
"b 151f\n"
"145:" // Height 5: Partial accumulate: partial_2_8
"tbz x11, #1, 146f\n"
- "ldr d11, [x9], #0x8\n"
- "ldr d14, [x25], #0x8\n"
- "mov x20, #0x28\n"
- "ldr d19, [x24], #0x8\n"
- "ldr d22, [x23], #0x8\n"
- "ldr d27, [x22], #0x8\n"
+ "ldr d11, [x28], #0x8\n"
+ "ldr d14, [x24], #0x8\n"
+ "mov x19, #0x28\n"
+ "ldr d19, [x23], #0x8\n"
+ "ldr d22, [x22], #0x8\n"
+ "ldr d27, [x21], #0x8\n"
"tbz x11, #0, 151f\n"
- "ld1 { v11.s }[2], [x9]\n"
- "ld1 { v14.s }[2], [x25]\n"
- "ld1 { v19.s }[2], [x24]\n"
- "ld1 { v22.s }[2], [x23]\n"
- "ld1 { v27.s }[2], [x22]\n"
+ "ld1 { v11.s }[2], [x28]\n"
+ "ld1 { v14.s }[2], [x24]\n"
+ "ld1 { v19.s }[2], [x23]\n"
+ "ld1 { v22.s }[2], [x22]\n"
+ "ld1 { v27.s }[2], [x21]\n"
"b 151f\n"
"146:" // Height 5: Partial accumulate: partial_1_8
- "mov x20, #0x20\n"
+ "mov x19, #0x20\n"
"tbz x11, #0, 151f\n"
- "ldr s11, [x9, #0x0]\n"
- "ldr s14, [x25, #0x0]\n"
- "ldr s19, [x24, #0x0]\n"
- "ldr s22, [x23, #0x0]\n"
- "ldr s27, [x22, #0x0]\n"
+ "ldr s11, [x28, #0x0]\n"
+ "ldr s14, [x24, #0x0]\n"
+ "ldr s19, [x23, #0x0]\n"
+ "ldr s22, [x22, #0x0]\n"
+ "ldr s27, [x21, #0x0]\n"
"b 151f\n"
"147:" // Height 5: Partial accumulate: partial_4_0
"tbz x11, #2, 149f\n"
- "ld1 { v9.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v17.4s }, [x24], #0x10\n"
- "ld1 { v20.4s }, [x23], #0x10\n"
- "ld1 { v25.4s }, [x22], #0x10\n"
+ "ld1 { v9.4s }, [x28], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
+ "ld1 { v17.4s }, [x23], #0x10\n"
+ "ld1 { v20.4s }, [x22], #0x10\n"
+ "ld1 { v25.4s }, [x21], #0x10\n"
"tbz x11, #1, 148f\n"
- "ldr d10, [x9], #0x8\n"
- "ldr d13, [x25], #0x8\n"
- "mov x20, #0x18\n"
- "ldr d18, [x24], #0x8\n"
- "ldr d21, [x23], #0x8\n"
- "ldr d26, [x22], #0x8\n"
+ "ldr d10, [x28], #0x8\n"
+ "mov x19, #0x18\n"
+ "ldr d13, [x24], #0x8\n"
+ "ldr d18, [x23], #0x8\n"
+ "ldr d21, [x22], #0x8\n"
+ "ldr d26, [x21], #0x8\n"
"tbz x11, #0, 151f\n"
- "ld1 { v10.s }[2], [x9]\n"
- "ld1 { v13.s }[2], [x25]\n"
- "ld1 { v18.s }[2], [x24]\n"
- "ld1 { v21.s }[2], [x23]\n"
- "ld1 { v26.s }[2], [x22]\n"
+ "ld1 { v10.s }[2], [x28]\n"
+ "ld1 { v13.s }[2], [x24]\n"
+ "ld1 { v18.s }[2], [x23]\n"
+ "ld1 { v21.s }[2], [x22]\n"
+ "ld1 { v26.s }[2], [x21]\n"
"b 151f\n"
"148:" // Height 5: Partial accumulate: partial_1_4
- "mov x20, #0x10\n"
+ "mov x19, #0x10\n"
"tbz x11, #0, 151f\n"
- "ldr s10, [x9, #0x0]\n"
- "ldr s13, [x25, #0x0]\n"
- "ldr s18, [x24, #0x0]\n"
- "ldr s21, [x23, #0x0]\n"
- "ldr s26, [x22, #0x0]\n"
+ "ldr s10, [x28, #0x0]\n"
+ "ldr s13, [x24, #0x0]\n"
+ "ldr s18, [x23, #0x0]\n"
+ "ldr s21, [x22, #0x0]\n"
+ "ldr s26, [x21, #0x0]\n"
"b 151f\n"
"149:" // Height 5: Partial accumulate: partial_2_0
"tbz x11, #1, 150f\n"
- "ldr d9, [x9], #0x8\n"
- "ldr d12, [x25], #0x8\n"
- "mov x20, #0x8\n"
- "ldr d17, [x24], #0x8\n"
- "ldr d20, [x23], #0x8\n"
- "ldr d25, [x22], #0x8\n"
+ "ldr d9, [x28], #0x8\n"
+ "ldr d12, [x24], #0x8\n"
+ "mov x19, #0x8\n"
+ "ldr d17, [x23], #0x8\n"
+ "ldr d20, [x22], #0x8\n"
+ "ldr d25, [x21], #0x8\n"
"tbz x11, #0, 151f\n"
- "ld1 { v9.s }[2], [x9]\n"
- "ld1 { v12.s }[2], [x25]\n"
- "ld1 { v17.s }[2], [x24]\n"
- "ld1 { v20.s }[2], [x23]\n"
- "ld1 { v25.s }[2], [x22]\n"
+ "ld1 { v9.s }[2], [x28]\n"
+ "ld1 { v12.s }[2], [x24]\n"
+ "ld1 { v17.s }[2], [x23]\n"
+ "ld1 { v20.s }[2], [x22]\n"
+ "ld1 { v25.s }[2], [x21]\n"
"b 151f\n"
"150:" // Height 5: Partial accumulate: partial_1_0
- "ldr s9, [x9, #0x0]\n"
- "ldr s12, [x25, #0x0]\n"
- "mov x20, #0x0\n"
- "ldr s17, [x24, #0x0]\n"
- "ldr s20, [x23, #0x0]\n"
- "ldr s25, [x22, #0x0]\n"
+ "ldr s9, [x28, #0x0]\n"
+ "mov x19, #0x0\n"
+ "ldr s12, [x24, #0x0]\n"
+ "ldr s17, [x23, #0x0]\n"
+ "ldr s20, [x22, #0x0]\n"
+ "ldr s25, [x21, #0x0]\n"
"151:" // Height 5: Partial accumulate: Done
- "sub x9, x9, x20\n"
+ "sub x28, x28, x19\n"
"b 153f\n"
"152:" // Height 5: full accumulate
- "ldr q9, [x9, #0x0]\n"
- "ldr q10, [x9, #0x10]\n"
- "ldr q11, [x9, #0x20]\n"
- "ldr q16, [x9, #0x30]\n"
- "ldr q12, [x25, #0x0]\n"
- "ldr q13, [x25, #0x10]\n"
- "ldr q14, [x25, #0x20]\n"
- "ldr q15, [x25, #0x30]\n"
- "ldr q17, [x24, #0x0]\n"
- "ldr q18, [x24, #0x10]\n"
- "ldr q19, [x24, #0x20]\n"
- "ldr q24, [x24, #0x30]\n"
- "ldr q20, [x23, #0x0]\n"
- "ldr q21, [x23, #0x10]\n"
- "ldr q22, [x23, #0x20]\n"
- "ldr q23, [x23, #0x30]\n"
- "ldr q25, [x22, #0x0]\n"
- "ldr q26, [x22, #0x10]\n"
- "ldr q27, [x22, #0x20]\n"
- "ldr q6, [x22, #0x30]\n"
+ "ldr q9, [x28, #0x0]\n"
+ "ldr q10, [x28, #0x10]\n"
+ "ldr q11, [x28, #0x20]\n"
+ "ldr q16, [x28, #0x30]\n"
+ "ldr q12, [x24, #0x0]\n"
+ "ldr q13, [x24, #0x10]\n"
+ "ldr q14, [x24, #0x20]\n"
+ "ldr q15, [x24, #0x30]\n"
+ "ldr q17, [x23, #0x0]\n"
+ "ldr q18, [x23, #0x10]\n"
+ "ldr q19, [x23, #0x20]\n"
+ "ldr q24, [x23, #0x30]\n"
+ "ldr q20, [x22, #0x0]\n"
+ "ldr q21, [x22, #0x10]\n"
+ "ldr q22, [x22, #0x20]\n"
+ "ldr q23, [x22, #0x30]\n"
+ "ldr q25, [x21, #0x0]\n"
+ "ldr q26, [x21, #0x10]\n"
+ "ldr q27, [x21, #0x20]\n"
+ "ldr q6, [x21, #0x30]\n"
"153:" // Height 5: MMLA fixup
"zip1 v8.2d, v9.2d, v12.2d\n"
"zip2 v12.2d, v9.2d, v12.2d\n"
@@ -1978,74 +1978,72 @@ void a64_hybrid_fp32bf16fp32_mmla_6x16 (
"movi v30.16b, #0x0\n"
"movi v31.16b, #0x0\n"
"155:" // Height 5: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"156:" // Height 5: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 157f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "ldr x22, [x21, #0x20]\n"
- "cbnz x28, 158f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #2\n"
- "add x25, x25, x20, LSL #2\n"
- "add x24, x24, x20, LSL #2\n"
- "add x23, x23, x20, LSL #2\n"
- "add x22, x22, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "ldr x21, [x20, #0x20]\n"
+ "cbnz x27, 158f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #2\n"
+ "add x24, x24, x19, LSL #2\n"
+ "add x23, x23, x19, LSL #2\n"
+ "add x22, x22, x19, LSL #2\n"
+ "add x21, x21, x19, LSL #2\n"
"b 158f\n"
"157:" // Height 5: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
"158:" // Height 5: input setup done
- "cmp x27, #0x4\n"
+ "cmp x26, #0x4\n"
"blt 161f\n"
- "ld1 { v0.4s }, [x26], #0x10\n"
- "ld1 { v2.4s }, [x24], #0x10\n"
- "cmp x27, #0x8\n"
- "ld1 { v1.4s }, [x25], #0x10\n"
- "ld1 { v3.4s }, [x23], #0x10\n"
- "ld1 { v4.4s }, [x22], #0x10\n"
- "ldr q6, [x10, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
+ "ld1 { v0.4s }, [x25], #0x10\n"
+ "cmp x26, #0x8\n"
"blt 160f\n"
"159:" // Height 5: Multiply loop: Main loop head
".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n"
- ".inst 0x0ea16842 // bfcvtn v2.4h, v2.4s\n"
- "sub x27, x27, #0x4\n"
- "cmp x27, #0x8\n"
+ "ld1 { v1.4s }, [x24], #0x10\n"
+ "sub x26, x26, #0x4\n"
".inst 0x4ea16820 // bfcvtn2 v0.8h, v1.4s\n"
+ "ld1 { v2.4s }, [x23], #0x10\n"
+ "cmp x26, #0x8\n"
+ ".inst 0x0ea16842 // bfcvtn v2.4h, v2.4s\n"
+ "ld1 { v3.4s }, [x22], #0x10\n"
+ "ld1 { v4.4s }, [x21], #0x10\n"
".inst 0x4ea16862 // bfcvtn2 v2.8h, v3.4s\n"
- ".inst 0x6e46ec08 // bfmmla v8.4s, v0.8h, v6.8h\n"
- "prfm pldl1keep, [x26, #0x80]\n"
+ "ldr q6, [x10, #0x0]\n"
".inst 0x0ea16884 // bfcvtn v4.4h, v4.4s\n"
+ "ldr q7, [x10, #0x10]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ ".inst 0x6e46ec08 // bfmmla v8.4s, v0.8h, v6.8h\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x6e46ec50 // bfmmla v16.4s, v2.8h, v6.8h\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x6e46ec98 // bfmmla v24.4s, v4.8h, v6.8h\n"
"ldr q6, [x10, #0x20]\n"
".inst 0x6e47ec0c // bfmmla v12.4s, v0.8h, v7.8h\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x6e47ec54 // bfmmla v20.4s, v2.8h, v7.8h\n"
- "prfm pldl1keep, [x25, #0x80]\n"
- "ld1 { v1.4s }, [x25], #0x10\n"
+ "prfm pldl1keep, [x21, #0x80]\n"
".inst 0x6e47ec9c // bfmmla v28.4s, v4.8h, v7.8h\n"
"ldr q7, [x10, #0x30]\n"
".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n"
- "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x6e46ec51 // bfmmla v17.4s, v2.8h, v6.8h\n"
".inst 0x6e46ec99 // bfmmla v25.4s, v4.8h, v6.8h\n"
"ldr q6, [x10, #0x40]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
- "ld1 { v3.4s }, [x23], #0x10\n"
".inst 0x6e47ec0d // bfmmla v13.4s, v0.8h, v7.8h\n"
".inst 0x6e47ec55 // bfmmla v21.4s, v2.8h, v7.8h\n"
- "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x6e47ec9d // bfmmla v29.4s, v4.8h, v7.8h\n"
"ldr q7, [x10, #0x50]\n"
".inst 0x6e46ec0a // bfmmla v10.4s, v0.8h, v6.8h\n"
@@ -2060,36 +2058,38 @@ void a64_hybrid_fp32bf16fp32_mmla_6x16 (
".inst 0x6e46ec0b // bfmmla v11.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec53 // bfmmla v19.4s, v2.8h, v6.8h\n"
".inst 0x6e46ec9b // bfmmla v27.4s, v4.8h, v6.8h\n"
- "ldr q6, [x10, #0x0]\n"
".inst 0x6e47ec0f // bfmmla v15.4s, v0.8h, v7.8h\n"
- "ld1 { v0.4s }, [x26], #0x10\n"
+ "ld1 { v0.4s }, [x25], #0x10\n"
".inst 0x6e47ec57 // bfmmla v23.4s, v2.8h, v7.8h\n"
- "ld1 { v2.4s }, [x24], #0x10\n"
".inst 0x6e47ec9f // bfmmla v31.4s, v4.8h, v7.8h\n"
- "ld1 { v4.4s }, [x22], #0x10\n"
- "ldr q7, [x10, #0x10]\n"
"bge 159b\n"
"160:" // Height 5: Multiply loop: Single iteration only
".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n"
- ".inst 0x0ea16842 // bfcvtn v2.4h, v2.4s\n"
- "sub x27, x27, #0x4\n"
- "prfm pldl1keep, [x26, #0x80]\n"
+ "ld1 { v1.4s }, [x24], #0x10\n"
+ "sub x26, x26, #0x4\n"
".inst 0x4ea16820 // bfcvtn2 v0.8h, v1.4s\n"
+ "ld1 { v2.4s }, [x23], #0x10\n"
+ "ld1 { v3.4s }, [x22], #0x10\n"
+ ".inst 0x0ea16842 // bfcvtn v2.4h, v2.4s\n"
+ "ld1 { v4.4s }, [x21], #0x10\n"
+ "ldr q6, [x10, #0x0]\n"
".inst 0x4ea16862 // bfcvtn2 v2.8h, v3.4s\n"
- ".inst 0x6e46ec08 // bfmmla v8.4s, v0.8h, v6.8h\n"
+ "ldr q7, [x10, #0x10]\n"
"prfm pldl1keep, [x25, #0x80]\n"
".inst 0x0ea16884 // bfcvtn v4.4h, v4.4s\n"
+ ".inst 0x6e46ec08 // bfmmla v8.4s, v0.8h, v6.8h\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x6e46ec50 // bfmmla v16.4s, v2.8h, v6.8h\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x6e46ec98 // bfmmla v24.4s, v4.8h, v6.8h\n"
- "ldr q6, [x10, #0x20]\n"
".inst 0x6e47ec0c // bfmmla v12.4s, v0.8h, v7.8h\n"
+ "ldr q6, [x10, #0x20]\n"
".inst 0x6e47ec54 // bfmmla v20.4s, v2.8h, v7.8h\n"
- "prfm pldl1keep, [x24, #0x80]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
+ "prfm pldl1keep, [x21, #0x80]\n"
".inst 0x6e47ec9c // bfmmla v28.4s, v4.8h, v7.8h\n"
"ldr q7, [x10, #0x30]\n"
".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n"
- "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x6e46ec51 // bfmmla v17.4s, v2.8h, v6.8h\n"
".inst 0x6e46ec99 // bfmmla v25.4s, v4.8h, v6.8h\n"
"ldr q6, [x10, #0x40]\n"
@@ -2113,42 +2113,42 @@ void a64_hybrid_fp32bf16fp32_mmla_6x16 (
".inst 0x6e47ec57 // bfmmla v23.4s, v2.8h, v7.8h\n"
".inst 0x6e47ec9f // bfmmla v31.4s, v4.8h, v7.8h\n"
"161:" // Height 5: Multiply loop: Main loop skip
- "cbz x27, 164f\n"
- "cbz x27, 164f\n"
- "tbz x27, #1, 162f\n"
- "ldr d0, [x26], #0x8\n"
- "ldr d1, [x25], #0x8\n"
- "ldr d2, [x24], #0x8\n"
- "ldr d3, [x23], #0x8\n"
- "ldr d4, [x22], #0x8\n"
- "tbz x27, #0, 163f\n"
- "ld1 { v0.s }[2], [x26]\n"
- "ld1 { v1.s }[2], [x25]\n"
- "ld1 { v2.s }[2], [x24]\n"
- "ld1 { v3.s }[2], [x23]\n"
- "ld1 { v4.s }[2], [x22]\n"
+ "cbz x26, 164f\n"
+ "cbz x26, 164f\n"
+ "tbz x26, #1, 162f\n"
+ "ldr d0, [x25], #0x8\n"
+ "ldr d1, [x24], #0x8\n"
+ "ldr d2, [x23], #0x8\n"
+ "ldr d3, [x22], #0x8\n"
+ "ldr d4, [x21], #0x8\n"
+ "tbz x26, #0, 163f\n"
+ "ld1 { v0.s }[2], [x25]\n"
+ "ld1 { v1.s }[2], [x24]\n"
+ "ld1 { v2.s }[2], [x23]\n"
+ "ld1 { v3.s }[2], [x22]\n"
+ "ld1 { v4.s }[2], [x21]\n"
"b 163f\n"
"162:" // Height 5: Multiply loop: Ragged operand read: partial_1_0
- "ldr s0, [x26, #0x0]\n"
- "ldr s1, [x25, #0x0]\n"
- "ldr s2, [x24, #0x0]\n"
- "ldr s3, [x23, #0x0]\n"
- "ldr s4, [x22, #0x0]\n"
+ "ldr s0, [x25, #0x0]\n"
+ "ldr s1, [x24, #0x0]\n"
+ "ldr s2, [x23, #0x0]\n"
+ "ldr s3, [x22, #0x0]\n"
+ "ldr s4, [x21, #0x0]\n"
"163:" // Height 5: Multiply loop: Ragged operand read: Done
- "ldr q6, [x10, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n"
+ "ldr q6, [x10, #0x0]\n"
".inst 0x0ea16842 // bfcvtn v2.4h, v2.4s\n"
+ "ldr q7, [x10, #0x10]\n"
+ ".inst 0x0ea16884 // bfcvtn v4.4h, v4.4s\n"
".inst 0x4ea16820 // bfcvtn2 v0.8h, v1.4s\n"
".inst 0x4ea16862 // bfcvtn2 v2.8h, v3.4s\n"
+ ".inst 0x6e46ec98 // bfmmla v24.4s, v4.8h, v6.8h\n"
+ ".inst 0x6e47ec9c // bfmmla v28.4s, v4.8h, v7.8h\n"
".inst 0x6e46ec08 // bfmmla v8.4s, v0.8h, v6.8h\n"
- ".inst 0x0ea16884 // bfcvtn v4.4h, v4.4s\n"
".inst 0x6e46ec50 // bfmmla v16.4s, v2.8h, v6.8h\n"
- ".inst 0x6e46ec98 // bfmmla v24.4s, v4.8h, v6.8h\n"
"ldr q6, [x10, #0x20]\n"
".inst 0x6e47ec0c // bfmmla v12.4s, v0.8h, v7.8h\n"
".inst 0x6e47ec54 // bfmmla v20.4s, v2.8h, v7.8h\n"
- ".inst 0x6e47ec9c // bfmmla v28.4s, v4.8h, v7.8h\n"
"ldr q7, [x10, #0x30]\n"
".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec51 // bfmmla v17.4s, v2.8h, v6.8h\n"
@@ -2166,37 +2166,37 @@ void a64_hybrid_fp32bf16fp32_mmla_6x16 (
".inst 0x6e47ec56 // bfmmla v22.4s, v2.8h, v7.8h\n"
".inst 0x6e47ec9e // bfmmla v30.4s, v4.8h, v7.8h\n"
"ldr q7, [x10, #0x70]\n"
- ".inst 0x6e46ec0b // bfmmla v11.4s, v0.8h, v6.8h\n"
"add x10, x10, #0x80\n"
+ ".inst 0x6e46ec0b // bfmmla v11.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec53 // bfmmla v19.4s, v2.8h, v6.8h\n"
".inst 0x6e46ec9b // bfmmla v27.4s, v4.8h, v6.8h\n"
".inst 0x6e47ec0f // bfmmla v15.4s, v0.8h, v7.8h\n"
".inst 0x6e47ec57 // bfmmla v23.4s, v2.8h, v7.8h\n"
".inst 0x6e47ec9f // bfmmla v31.4s, v4.8h, v7.8h\n"
"164:" // Height 5: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 156b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
"uzp1 v6.2d, v8.2d, v12.2d\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp2 v8.2d, v8.2d, v12.2d\n"
+ "prfm pstl1keep, [x28, #0x0]\n"
+ "add x24, x28, x19, LSL #2\n"
"uzp1 v12.2d, v9.2d, v13.2d\n"
+ "prfm pstl1keep, [x24, #0x0]\n"
"uzp2 v9.2d, v9.2d, v13.2d\n"
+ "add x23, x24, x19, LSL #2\n"
"uzp1 v13.2d, v10.2d, v14.2d\n"
- "prfm pstl1keep, [x9, #0x0]\n"
- "prfm pstl1keep, [x25, #0x0]\n"
+ "prfm pstl1keep, [x23, #0x0]\n"
+ "add x22, x23, x19, LSL #2\n"
"uzp2 v10.2d, v10.2d, v14.2d\n"
+ "prfm pstl1keep, [x22, #0x0]\n"
+ "add x21, x22, x19, LSL #2\n"
"uzp1 v14.2d, v11.2d, v15.2d\n"
- "prfm pstl1keep, [x24, #0x0]\n"
- "prfm pstl1keep, [x23, #0x0]\n"
+ "prfm pstl1keep, [x21, #0x0]\n"
"uzp2 v11.2d, v11.2d, v15.2d\n"
"uzp1 v15.2d, v16.2d, v20.2d\n"
- "prfm pstl1keep, [x22, #0x0]\n"
"uzp2 v16.2d, v16.2d, v20.2d\n"
"uzp1 v20.2d, v17.2d, v21.2d\n"
"uzp2 v17.2d, v17.2d, v21.2d\n"
@@ -2209,221 +2209,221 @@ void a64_hybrid_fp32bf16fp32_mmla_6x16 (
"uzp1 v26.2d, v26.2d, v30.2d\n"
"uzp1 v27.2d, v27.2d, v31.2d\n"
"tbz %x[flags], #1, 165f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v1.4s }, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1r { v0.4s }, [x20]\n"
- "fmin v6.4s, v6.4s, v1.4s\n"
- "fmin v12.4s, v12.4s, v1.4s\n"
- "fmin v13.4s, v13.4s, v1.4s\n"
- "fmin v14.4s, v14.4s, v1.4s\n"
- "fmin v8.4s, v8.4s, v1.4s\n"
- "fmin v9.4s, v9.4s, v1.4s\n"
- "fmin v10.4s, v10.4s, v1.4s\n"
- "fmin v11.4s, v11.4s, v1.4s\n"
- "fmin v15.4s, v15.4s, v1.4s\n"
- "fmin v20.4s, v20.4s, v1.4s\n"
- "fmin v21.4s, v21.4s, v1.4s\n"
- "fmin v22.4s, v22.4s, v1.4s\n"
- "fmin v16.4s, v16.4s, v1.4s\n"
- "fmin v17.4s, v17.4s, v1.4s\n"
- "fmin v18.4s, v18.4s, v1.4s\n"
- "fmin v19.4s, v19.4s, v1.4s\n"
- "fmin v24.4s, v24.4s, v1.4s\n"
- "fmin v25.4s, v25.4s, v1.4s\n"
- "fmin v26.4s, v26.4s, v1.4s\n"
- "fmin v27.4s, v27.4s, v1.4s\n"
- "fmax v6.4s, v6.4s, v0.4s\n"
- "fmax v12.4s, v12.4s, v0.4s\n"
- "fmax v13.4s, v13.4s, v0.4s\n"
- "fmax v14.4s, v14.4s, v0.4s\n"
- "fmax v8.4s, v8.4s, v0.4s\n"
- "fmax v9.4s, v9.4s, v0.4s\n"
- "fmax v10.4s, v10.4s, v0.4s\n"
- "fmax v11.4s, v11.4s, v0.4s\n"
- "fmax v15.4s, v15.4s, v0.4s\n"
- "fmax v20.4s, v20.4s, v0.4s\n"
- "fmax v21.4s, v21.4s, v0.4s\n"
- "fmax v22.4s, v22.4s, v0.4s\n"
- "fmax v16.4s, v16.4s, v0.4s\n"
- "fmax v17.4s, v17.4s, v0.4s\n"
- "fmax v18.4s, v18.4s, v0.4s\n"
- "fmax v19.4s, v19.4s, v0.4s\n"
- "fmax v24.4s, v24.4s, v0.4s\n"
- "fmax v25.4s, v25.4s, v0.4s\n"
- "fmax v26.4s, v26.4s, v0.4s\n"
- "fmax v27.4s, v27.4s, v0.4s\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v1.4s }, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v0.4s }, [x19]\n"
+ "fmin v6.4s, v6.4s, v0.4s\n"
+ "fmin v12.4s, v12.4s, v0.4s\n"
+ "fmin v13.4s, v13.4s, v0.4s\n"
+ "fmin v14.4s, v14.4s, v0.4s\n"
+ "fmax v6.4s, v6.4s, v1.4s\n"
+ "fmax v12.4s, v12.4s, v1.4s\n"
+ "fmax v13.4s, v13.4s, v1.4s\n"
+ "fmax v14.4s, v14.4s, v1.4s\n"
+ "fmin v8.4s, v8.4s, v0.4s\n"
+ "fmin v9.4s, v9.4s, v0.4s\n"
+ "fmin v10.4s, v10.4s, v0.4s\n"
+ "fmax v8.4s, v8.4s, v1.4s\n"
+ "fmax v9.4s, v9.4s, v1.4s\n"
+ "fmax v10.4s, v10.4s, v1.4s\n"
+ "fmin v11.4s, v11.4s, v0.4s\n"
+ "fmin v15.4s, v15.4s, v0.4s\n"
+ "fmin v20.4s, v20.4s, v0.4s\n"
+ "fmax v11.4s, v11.4s, v1.4s\n"
+ "fmax v15.4s, v15.4s, v1.4s\n"
+ "fmax v20.4s, v20.4s, v1.4s\n"
+ "fmin v21.4s, v21.4s, v0.4s\n"
+ "fmin v22.4s, v22.4s, v0.4s\n"
+ "fmin v16.4s, v16.4s, v0.4s\n"
+ "fmax v21.4s, v21.4s, v1.4s\n"
+ "fmax v22.4s, v22.4s, v1.4s\n"
+ "fmax v16.4s, v16.4s, v1.4s\n"
+ "fmin v17.4s, v17.4s, v0.4s\n"
+ "fmin v18.4s, v18.4s, v0.4s\n"
+ "fmin v19.4s, v19.4s, v0.4s\n"
+ "fmax v17.4s, v17.4s, v1.4s\n"
+ "fmax v18.4s, v18.4s, v1.4s\n"
+ "fmax v19.4s, v19.4s, v1.4s\n"
+ "fmin v24.4s, v24.4s, v0.4s\n"
+ "fmin v25.4s, v25.4s, v0.4s\n"
+ "fmin v26.4s, v26.4s, v0.4s\n"
+ "fmax v24.4s, v24.4s, v1.4s\n"
+ "fmax v25.4s, v25.4s, v1.4s\n"
+ "fmax v26.4s, v26.4s, v1.4s\n"
+ "fmin v27.4s, v27.4s, v0.4s\n"
+ "fmax v27.4s, v27.4s, v1.4s\n"
"165:" // Height 5: No activation
"cmp x11, #0x10\n"
"bge 174f\n"
"tbz x11, #3, 169f\n"
- "st1 { v6.4s }, [x9], #0x10\n"
- "st1 { v12.4s }, [x9], #0x10\n"
- "st1 { v8.4s }, [x25], #0x10\n"
- "st1 { v9.4s }, [x25], #0x10\n"
- "st1 { v15.4s }, [x24], #0x10\n"
- "st1 { v20.4s }, [x24], #0x10\n"
- "st1 { v16.4s }, [x23], #0x10\n"
- "st1 { v17.4s }, [x23], #0x10\n"
- "st1 { v24.4s }, [x22], #0x10\n"
- "st1 { v25.4s }, [x22], #0x10\n"
+ "st1 { v6.4s }, [x28], #0x10\n"
+ "st1 { v12.4s }, [x28], #0x10\n"
+ "st1 { v8.4s }, [x24], #0x10\n"
+ "st1 { v9.4s }, [x24], #0x10\n"
+ "st1 { v15.4s }, [x23], #0x10\n"
+ "st1 { v20.4s }, [x23], #0x10\n"
+ "st1 { v16.4s }, [x22], #0x10\n"
+ "st1 { v17.4s }, [x22], #0x10\n"
+ "st1 { v24.4s }, [x21], #0x10\n"
+ "st1 { v25.4s }, [x21], #0x10\n"
"tbz x11, #2, 167f\n"
- "st1 { v13.4s }, [x9], #0x10\n"
- "st1 { v10.4s }, [x25], #0x10\n"
- "st1 { v21.4s }, [x24], #0x10\n"
- "st1 { v18.4s }, [x23], #0x10\n"
- "st1 { v26.4s }, [x22], #0x10\n"
+ "st1 { v13.4s }, [x28], #0x10\n"
+ "st1 { v10.4s }, [x24], #0x10\n"
+ "st1 { v21.4s }, [x23], #0x10\n"
+ "st1 { v18.4s }, [x22], #0x10\n"
+ "st1 { v26.4s }, [x21], #0x10\n"
"tbz x11, #1, 166f\n"
- "str d14, [x9], #0x8\n"
- "str d11, [x25], #0x8\n"
- "str d22, [x24], #0x8\n"
- "str d19, [x23], #0x8\n"
- "str d27, [x22], #0x8\n"
+ "str d14, [x28], #0x8\n"
+ "str d11, [x24], #0x8\n"
+ "str d22, [x23], #0x8\n"
+ "str d19, [x22], #0x8\n"
+ "str d27, [x21], #0x8\n"
"tbz x11, #0, 173f\n"
- "st1 { v14.s }[2], [x9]\n"
- "st1 { v11.s }[2], [x25]\n"
- "st1 { v22.s }[2], [x24]\n"
- "st1 { v19.s }[2], [x23]\n"
- "st1 { v27.s }[2], [x22]\n"
+ "st1 { v14.s }[2], [x28]\n"
+ "st1 { v11.s }[2], [x24]\n"
+ "st1 { v22.s }[2], [x23]\n"
+ "st1 { v19.s }[2], [x22]\n"
+ "st1 { v27.s }[2], [x21]\n"
"b 173f\n"
"166:" // Height 5: Partial direct writeback: partial_1_12
"tbz x11, #0, 173f\n"
- "str s14, [x9, #0x0]\n"
- "str s11, [x25, #0x0]\n"
- "str s22, [x24, #0x0]\n"
- "str s19, [x23, #0x0]\n"
- "str s27, [x22, #0x0]\n"
+ "str s14, [x28, #0x0]\n"
+ "str s11, [x24, #0x0]\n"
+ "str s22, [x23, #0x0]\n"
+ "str s19, [x22, #0x0]\n"
+ "str s27, [x21, #0x0]\n"
"b 173f\n"
"167:" // Height 5: Partial direct writeback: partial_2_8
"tbz x11, #1, 168f\n"
- "str d13, [x9], #0x8\n"
- "str d10, [x25], #0x8\n"
- "str d21, [x24], #0x8\n"
- "str d18, [x23], #0x8\n"
- "str d26, [x22], #0x8\n"
+ "str d13, [x28], #0x8\n"
+ "str d10, [x24], #0x8\n"
+ "str d21, [x23], #0x8\n"
+ "str d18, [x22], #0x8\n"
+ "str d26, [x21], #0x8\n"
"tbz x11, #0, 173f\n"
- "st1 { v13.s }[2], [x9]\n"
- "st1 { v10.s }[2], [x25]\n"
- "st1 { v21.s }[2], [x24]\n"
- "st1 { v18.s }[2], [x23]\n"
- "st1 { v26.s }[2], [x22]\n"
+ "st1 { v13.s }[2], [x28]\n"
+ "st1 { v10.s }[2], [x24]\n"
+ "st1 { v21.s }[2], [x23]\n"
+ "st1 { v18.s }[2], [x22]\n"
+ "st1 { v26.s }[2], [x21]\n"
"b 173f\n"
"168:" // Height 5: Partial direct writeback: partial_1_8
"tbz x11, #0, 173f\n"
- "str s13, [x9, #0x0]\n"
- "str s10, [x25, #0x0]\n"
- "str s21, [x24, #0x0]\n"
- "str s18, [x23, #0x0]\n"
- "str s26, [x22, #0x0]\n"
+ "str s13, [x28, #0x0]\n"
+ "str s10, [x24, #0x0]\n"
+ "str s21, [x23, #0x0]\n"
+ "str s18, [x22, #0x0]\n"
+ "str s26, [x21, #0x0]\n"
"b 173f\n"
"169:" // Height 5: Partial direct writeback: partial_4_0
"tbz x11, #2, 171f\n"
- "st1 { v6.4s }, [x9], #0x10\n"
- "st1 { v8.4s }, [x25], #0x10\n"
- "st1 { v15.4s }, [x24], #0x10\n"
- "st1 { v16.4s }, [x23], #0x10\n"
- "st1 { v24.4s }, [x22], #0x10\n"
+ "st1 { v6.4s }, [x28], #0x10\n"
+ "st1 { v8.4s }, [x24], #0x10\n"
+ "st1 { v15.4s }, [x23], #0x10\n"
+ "st1 { v16.4s }, [x22], #0x10\n"
+ "st1 { v24.4s }, [x21], #0x10\n"
"tbz x11, #1, 170f\n"
- "str d12, [x9], #0x8\n"
- "str d9, [x25], #0x8\n"
- "str d20, [x24], #0x8\n"
- "str d17, [x23], #0x8\n"
- "str d25, [x22], #0x8\n"
+ "str d12, [x28], #0x8\n"
+ "str d9, [x24], #0x8\n"
+ "str d20, [x23], #0x8\n"
+ "str d17, [x22], #0x8\n"
+ "str d25, [x21], #0x8\n"
"tbz x11, #0, 173f\n"
- "st1 { v12.s }[2], [x9]\n"
- "st1 { v9.s }[2], [x25]\n"
- "st1 { v20.s }[2], [x24]\n"
- "st1 { v17.s }[2], [x23]\n"
- "st1 { v25.s }[2], [x22]\n"
+ "st1 { v12.s }[2], [x28]\n"
+ "st1 { v9.s }[2], [x24]\n"
+ "st1 { v20.s }[2], [x23]\n"
+ "st1 { v17.s }[2], [x22]\n"
+ "st1 { v25.s }[2], [x21]\n"
"b 173f\n"
"170:" // Height 5: Partial direct writeback: partial_1_4
"tbz x11, #0, 173f\n"
- "str s12, [x9, #0x0]\n"
- "str s9, [x25, #0x0]\n"
- "str s20, [x24, #0x0]\n"
- "str s17, [x23, #0x0]\n"
- "str s25, [x22, #0x0]\n"
+ "str s12, [x28, #0x0]\n"
+ "str s9, [x24, #0x0]\n"
+ "str s20, [x23, #0x0]\n"
+ "str s17, [x22, #0x0]\n"
+ "str s25, [x21, #0x0]\n"
"b 173f\n"
"171:" // Height 5: Partial direct writeback: partial_2_0
"tbz x11, #1, 172f\n"
- "str d6, [x9], #0x8\n"
- "str d8, [x25], #0x8\n"
- "str d15, [x24], #0x8\n"
- "str d16, [x23], #0x8\n"
- "str d24, [x22], #0x8\n"
+ "str d6, [x28], #0x8\n"
+ "str d8, [x24], #0x8\n"
+ "str d15, [x23], #0x8\n"
+ "str d16, [x22], #0x8\n"
+ "str d24, [x21], #0x8\n"
"tbz x11, #0, 173f\n"
- "st1 { v6.s }[2], [x9]\n"
- "st1 { v8.s }[2], [x25]\n"
- "st1 { v15.s }[2], [x24]\n"
- "st1 { v16.s }[2], [x23]\n"
- "st1 { v24.s }[2], [x22]\n"
+ "st1 { v6.s }[2], [x28]\n"
+ "st1 { v8.s }[2], [x24]\n"
+ "st1 { v15.s }[2], [x23]\n"
+ "st1 { v16.s }[2], [x22]\n"
+ "st1 { v24.s }[2], [x21]\n"
"b 173f\n"
"172:" // Height 5: Partial direct writeback: partial_1_0
- "str s6, [x9, #0x0]\n"
- "str s8, [x25, #0x0]\n"
- "str s15, [x24, #0x0]\n"
- "str s16, [x23, #0x0]\n"
- "str s24, [x22, #0x0]\n"
+ "str s6, [x28, #0x0]\n"
+ "str s8, [x24, #0x0]\n"
+ "str s15, [x23, #0x0]\n"
+ "str s16, [x22, #0x0]\n"
+ "str s24, [x21, #0x0]\n"
"173:" // Height 5: Partial direct writeback: Done
"b 175f\n"
"174:" // Height 5: Full writeback
- "str q6, [x9, #0x0]\n"
- "str q12, [x9, #0x10]\n"
- "str q13, [x9, #0x20]\n"
- "str q14, [x9, #0x30]\n"
- "add x9, x9, #0x40\n"
- "str q8, [x25, #0x0]\n"
- "str q9, [x25, #0x10]\n"
- "str q10, [x25, #0x20]\n"
- "str q11, [x25, #0x30]\n"
- "str q15, [x24, #0x0]\n"
- "str q20, [x24, #0x10]\n"
- "str q21, [x24, #0x20]\n"
- "str q22, [x24, #0x30]\n"
- "str q16, [x23, #0x0]\n"
- "str q17, [x23, #0x10]\n"
- "str q18, [x23, #0x20]\n"
- "str q19, [x23, #0x30]\n"
- "str q24, [x22, #0x0]\n"
- "str q25, [x22, #0x10]\n"
- "str q26, [x22, #0x20]\n"
- "str q27, [x22, #0x30]\n"
+ "str q6, [x28, #0x0]\n"
+ "str q12, [x28, #0x10]\n"
+ "str q13, [x28, #0x20]\n"
+ "str q14, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
+ "str q8, [x24, #0x0]\n"
+ "str q9, [x24, #0x10]\n"
+ "str q10, [x24, #0x20]\n"
+ "str q11, [x24, #0x30]\n"
+ "str q15, [x23, #0x0]\n"
+ "str q20, [x23, #0x10]\n"
+ "str q21, [x23, #0x20]\n"
+ "str q22, [x23, #0x30]\n"
+ "str q16, [x22, #0x0]\n"
+ "str q17, [x22, #0x10]\n"
+ "str q18, [x22, #0x20]\n"
+ "str q19, [x22, #0x30]\n"
+ "str q24, [x21, #0x0]\n"
+ "str q25, [x21, #0x10]\n"
+ "str q26, [x21, #0x20]\n"
+ "str q27, [x21, #0x30]\n"
"175:" // Height 5: Writeback done
"subs x11, x11, #0x10\n"
"bgt 142b\n"
"b 212f\n"
"176:" // Height 6
- "ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "mov x20, #0x18\n"
- "mov x12, %x[bias]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x9, %x[bias]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
+ "mov x28, %x[output_ptr]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "mov x19, #0x18\n"
+ "madd %x[output_ptr], x20, x19, %x[output_ptr]\n"
"177:" // Height 6: Column loop
- "cbz x12, 178f\n"
- "ldr q8, [x12, #0x0]\n"
- "ldr q9, [x12, #0x10]\n"
+ "cbz x9, 178f\n"
+ "ldr q8, [x9, #0x0]\n"
"zip2 v12.2d, v8.2d, v8.2d\n"
+ "ldr q9, [x9, #0x10]\n"
"zip1 v8.2d, v8.2d, v8.2d\n"
- "ldr q10, [x12, #0x20]\n"
- "ldr q11, [x12, #0x30]\n"
+ "ldr q10, [x9, #0x20]\n"
+ "mov v16.16b, v8.16b\n"
+ "ldr q11, [x9, #0x30]\n"
+ "add x9, x9, #0x40\n"
+ "mov v20.16b, v12.16b\n"
+ "mov v24.16b, v8.16b\n"
"zip2 v13.2d, v9.2d, v9.2d\n"
"zip1 v9.2d, v9.2d, v9.2d\n"
"zip2 v14.2d, v10.2d, v10.2d\n"
"zip1 v10.2d, v10.2d, v10.2d\n"
- "add x12, x12, #0x40\n"
"zip2 v15.2d, v11.2d, v11.2d\n"
"zip1 v11.2d, v11.2d, v11.2d\n"
- "mov v16.16b, v8.16b\n"
- "mov v20.16b, v12.16b\n"
"mov v17.16b, v9.16b\n"
"mov v21.16b, v13.16b\n"
"mov v18.16b, v10.16b\n"
"mov v22.16b, v14.16b\n"
"mov v19.16b, v11.16b\n"
"mov v23.16b, v15.16b\n"
- "mov v24.16b, v8.16b\n"
"mov v28.16b, v12.16b\n"
"mov v25.16b, v9.16b\n"
"mov v29.16b, v13.16b\n"
@@ -2434,174 +2434,174 @@ void a64_hybrid_fp32bf16fp32_mmla_6x16 (
"b 190f\n"
"178:" // Height 6: no bias
"tbz %x[flags], #0, 189f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"cmp x11, #0x10\n"
- "add x21, x22, x20, LSL #2\n"
+ "add x24, x28, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
+ "add x20, x21, x19, LSL #2\n"
"bge 187f\n"
"tbz x11, #3, 182f\n"
- "ld1 { v9.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v17.4s }, [x24], #0x10\n"
- "ld1 { v20.4s }, [x23], #0x10\n"
- "ld1 { v25.4s }, [x22], #0x10\n"
- "ld1 { v28.4s }, [x21], #0x10\n"
- "ld1 { v10.4s }, [x9], #0x10\n"
- "ld1 { v13.4s }, [x25], #0x10\n"
- "ld1 { v18.4s }, [x24], #0x10\n"
- "ld1 { v21.4s }, [x23], #0x10\n"
- "ld1 { v26.4s }, [x22], #0x10\n"
- "ld1 { v29.4s }, [x21], #0x10\n"
+ "ld1 { v9.4s }, [x28], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
+ "ld1 { v17.4s }, [x23], #0x10\n"
+ "ld1 { v20.4s }, [x22], #0x10\n"
+ "ld1 { v25.4s }, [x21], #0x10\n"
+ "ld1 { v10.4s }, [x28], #0x10\n"
+ "ld1 { v13.4s }, [x24], #0x10\n"
+ "ld1 { v18.4s }, [x23], #0x10\n"
+ "ld1 { v21.4s }, [x22], #0x10\n"
+ "ld1 { v26.4s }, [x21], #0x10\n"
+ "ld1 { v28.4s }, [x20], #0x10\n"
+ "ld1 { v29.4s }, [x20], #0x10\n"
"tbz x11, #2, 180f\n"
- "ld1 { v11.4s }, [x9], #0x10\n"
- "ld1 { v14.4s }, [x25], #0x10\n"
- "ld1 { v19.4s }, [x24], #0x10\n"
- "ld1 { v22.4s }, [x23], #0x10\n"
- "ld1 { v27.4s }, [x22], #0x10\n"
- "ld1 { v30.4s }, [x21], #0x10\n"
+ "ld1 { v11.4s }, [x28], #0x10\n"
+ "ld1 { v14.4s }, [x24], #0x10\n"
+ "ld1 { v19.4s }, [x23], #0x10\n"
+ "ld1 { v22.4s }, [x22], #0x10\n"
+ "ld1 { v27.4s }, [x21], #0x10\n"
+ "ld1 { v30.4s }, [x20], #0x10\n"
"tbz x11, #1, 179f\n"
- "ldr d16, [x9], #0x8\n"
- "ldr d15, [x25], #0x8\n"
- "mov x20, #0x38\n"
- "ldr d24, [x24], #0x8\n"
- "ldr d23, [x23], #0x8\n"
- "ldr d6, [x22], #0x8\n"
- "ldr d31, [x21], #0x8\n"
+ "ldr d16, [x28], #0x8\n"
+ "mov x19, #0x38\n"
+ "ldr d15, [x24], #0x8\n"
+ "ldr d24, [x23], #0x8\n"
+ "ldr d23, [x22], #0x8\n"
+ "ldr d6, [x21], #0x8\n"
+ "ldr d31, [x20], #0x8\n"
"tbz x11, #0, 186f\n"
- "ld1 { v16.s }[2], [x9]\n"
- "ld1 { v15.s }[2], [x25]\n"
- "ld1 { v24.s }[2], [x24]\n"
- "ld1 { v23.s }[2], [x23]\n"
- "ld1 { v6.s }[2], [x22]\n"
- "ld1 { v31.s }[2], [x21]\n"
+ "ld1 { v16.s }[2], [x28]\n"
+ "ld1 { v15.s }[2], [x24]\n"
+ "ld1 { v24.s }[2], [x23]\n"
+ "ld1 { v23.s }[2], [x22]\n"
+ "ld1 { v6.s }[2], [x21]\n"
+ "ld1 { v31.s }[2], [x20]\n"
"b 186f\n"
"179:" // Height 6: Partial accumulate: partial_1_12
- "mov x20, #0x30\n"
+ "mov x19, #0x30\n"
"tbz x11, #0, 186f\n"
- "ldr s16, [x9, #0x0]\n"
- "ldr s15, [x25, #0x0]\n"
- "ldr s24, [x24, #0x0]\n"
- "ldr s23, [x23, #0x0]\n"
- "ldr s6, [x22, #0x0]\n"
- "ldr s31, [x21, #0x0]\n"
+ "ldr s16, [x28, #0x0]\n"
+ "ldr s15, [x24, #0x0]\n"
+ "ldr s24, [x23, #0x0]\n"
+ "ldr s23, [x22, #0x0]\n"
+ "ldr s6, [x21, #0x0]\n"
+ "ldr s31, [x20, #0x0]\n"
"b 186f\n"
"180:" // Height 6: Partial accumulate: partial_2_8
"tbz x11, #1, 181f\n"
- "ldr d11, [x9], #0x8\n"
- "ldr d14, [x25], #0x8\n"
- "mov x20, #0x28\n"
- "ldr d19, [x24], #0x8\n"
- "ldr d22, [x23], #0x8\n"
- "ldr d27, [x22], #0x8\n"
- "ldr d30, [x21], #0x8\n"
+ "ldr d11, [x28], #0x8\n"
+ "ldr d14, [x24], #0x8\n"
+ "mov x19, #0x28\n"
+ "ldr d19, [x23], #0x8\n"
+ "ldr d22, [x22], #0x8\n"
+ "ldr d27, [x21], #0x8\n"
+ "ldr d30, [x20], #0x8\n"
"tbz x11, #0, 186f\n"
- "ld1 { v11.s }[2], [x9]\n"
- "ld1 { v14.s }[2], [x25]\n"
- "ld1 { v19.s }[2], [x24]\n"
- "ld1 { v22.s }[2], [x23]\n"
- "ld1 { v27.s }[2], [x22]\n"
- "ld1 { v30.s }[2], [x21]\n"
+ "ld1 { v11.s }[2], [x28]\n"
+ "ld1 { v14.s }[2], [x24]\n"
+ "ld1 { v19.s }[2], [x23]\n"
+ "ld1 { v22.s }[2], [x22]\n"
+ "ld1 { v27.s }[2], [x21]\n"
+ "ld1 { v30.s }[2], [x20]\n"
"b 186f\n"
"181:" // Height 6: Partial accumulate: partial_1_8
- "mov x20, #0x20\n"
+ "mov x19, #0x20\n"
"tbz x11, #0, 186f\n"
- "ldr s11, [x9, #0x0]\n"
- "ldr s14, [x25, #0x0]\n"
- "ldr s19, [x24, #0x0]\n"
- "ldr s22, [x23, #0x0]\n"
- "ldr s27, [x22, #0x0]\n"
- "ldr s30, [x21, #0x0]\n"
+ "ldr s11, [x28, #0x0]\n"
+ "ldr s14, [x24, #0x0]\n"
+ "ldr s19, [x23, #0x0]\n"
+ "ldr s22, [x22, #0x0]\n"
+ "ldr s27, [x21, #0x0]\n"
+ "ldr s30, [x20, #0x0]\n"
"b 186f\n"
"182:" // Height 6: Partial accumulate: partial_4_0
"tbz x11, #2, 184f\n"
- "ld1 { v9.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v17.4s }, [x24], #0x10\n"
- "ld1 { v20.4s }, [x23], #0x10\n"
- "ld1 { v25.4s }, [x22], #0x10\n"
- "ld1 { v28.4s }, [x21], #0x10\n"
+ "ld1 { v9.4s }, [x28], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
+ "ld1 { v17.4s }, [x23], #0x10\n"
+ "ld1 { v20.4s }, [x22], #0x10\n"
+ "ld1 { v25.4s }, [x21], #0x10\n"
+ "ld1 { v28.4s }, [x20], #0x10\n"
"tbz x11, #1, 183f\n"
- "ldr d10, [x9], #0x8\n"
- "ldr d13, [x25], #0x8\n"
- "mov x20, #0x18\n"
- "ldr d18, [x24], #0x8\n"
- "ldr d21, [x23], #0x8\n"
- "ldr d26, [x22], #0x8\n"
- "ldr d29, [x21], #0x8\n"
+ "ldr d10, [x28], #0x8\n"
+ "mov x19, #0x18\n"
+ "ldr d13, [x24], #0x8\n"
+ "ldr d18, [x23], #0x8\n"
+ "ldr d21, [x22], #0x8\n"
+ "ldr d26, [x21], #0x8\n"
+ "ldr d29, [x20], #0x8\n"
"tbz x11, #0, 186f\n"
- "ld1 { v10.s }[2], [x9]\n"
- "ld1 { v13.s }[2], [x25]\n"
- "ld1 { v18.s }[2], [x24]\n"
- "ld1 { v21.s }[2], [x23]\n"
- "ld1 { v26.s }[2], [x22]\n"
- "ld1 { v29.s }[2], [x21]\n"
+ "ld1 { v10.s }[2], [x28]\n"
+ "ld1 { v13.s }[2], [x24]\n"
+ "ld1 { v18.s }[2], [x23]\n"
+ "ld1 { v21.s }[2], [x22]\n"
+ "ld1 { v26.s }[2], [x21]\n"
+ "ld1 { v29.s }[2], [x20]\n"
"b 186f\n"
"183:" // Height 6: Partial accumulate: partial_1_4
- "mov x20, #0x10\n"
+ "mov x19, #0x10\n"
"tbz x11, #0, 186f\n"
- "ldr s10, [x9, #0x0]\n"
- "ldr s13, [x25, #0x0]\n"
- "ldr s18, [x24, #0x0]\n"
- "ldr s21, [x23, #0x0]\n"
- "ldr s26, [x22, #0x0]\n"
- "ldr s29, [x21, #0x0]\n"
+ "ldr s10, [x28, #0x0]\n"
+ "ldr s13, [x24, #0x0]\n"
+ "ldr s18, [x23, #0x0]\n"
+ "ldr s21, [x22, #0x0]\n"
+ "ldr s26, [x21, #0x0]\n"
+ "ldr s29, [x20, #0x0]\n"
"b 186f\n"
"184:" // Height 6: Partial accumulate: partial_2_0
"tbz x11, #1, 185f\n"
- "ldr d9, [x9], #0x8\n"
- "ldr d12, [x25], #0x8\n"
- "mov x20, #0x8\n"
- "ldr d17, [x24], #0x8\n"
- "ldr d20, [x23], #0x8\n"
- "ldr d25, [x22], #0x8\n"
- "ldr d28, [x21], #0x8\n"
+ "ldr d9, [x28], #0x8\n"
+ "ldr d12, [x24], #0x8\n"
+ "mov x19, #0x8\n"
+ "ldr d17, [x23], #0x8\n"
+ "ldr d20, [x22], #0x8\n"
+ "ldr d25, [x21], #0x8\n"
+ "ldr d28, [x20], #0x8\n"
"tbz x11, #0, 186f\n"
- "ld1 { v9.s }[2], [x9]\n"
- "ld1 { v12.s }[2], [x25]\n"
- "ld1 { v17.s }[2], [x24]\n"
- "ld1 { v20.s }[2], [x23]\n"
- "ld1 { v25.s }[2], [x22]\n"
- "ld1 { v28.s }[2], [x21]\n"
+ "ld1 { v9.s }[2], [x28]\n"
+ "ld1 { v12.s }[2], [x24]\n"
+ "ld1 { v17.s }[2], [x23]\n"
+ "ld1 { v20.s }[2], [x22]\n"
+ "ld1 { v25.s }[2], [x21]\n"
+ "ld1 { v28.s }[2], [x20]\n"
"b 186f\n"
"185:" // Height 6: Partial accumulate: partial_1_0
- "ldr s9, [x9, #0x0]\n"
- "ldr s12, [x25, #0x0]\n"
- "mov x20, #0x0\n"
- "ldr s17, [x24, #0x0]\n"
- "ldr s20, [x23, #0x0]\n"
- "ldr s25, [x22, #0x0]\n"
- "ldr s28, [x21, #0x0]\n"
+ "ldr s9, [x28, #0x0]\n"
+ "mov x19, #0x0\n"
+ "ldr s12, [x24, #0x0]\n"
+ "ldr s17, [x23, #0x0]\n"
+ "ldr s20, [x22, #0x0]\n"
+ "ldr s25, [x21, #0x0]\n"
+ "ldr s28, [x20, #0x0]\n"
"186:" // Height 6: Partial accumulate: Done
- "sub x9, x9, x20\n"
+ "sub x28, x28, x19\n"
"b 188f\n"
"187:" // Height 6: full accumulate
- "ldr q9, [x9, #0x0]\n"
- "ldr q10, [x9, #0x10]\n"
- "ldr q11, [x9, #0x20]\n"
- "ldr q16, [x9, #0x30]\n"
- "ldr q12, [x25, #0x0]\n"
- "ldr q13, [x25, #0x10]\n"
- "ldr q14, [x25, #0x20]\n"
- "ldr q15, [x25, #0x30]\n"
- "ldr q17, [x24, #0x0]\n"
- "ldr q18, [x24, #0x10]\n"
- "ldr q19, [x24, #0x20]\n"
- "ldr q24, [x24, #0x30]\n"
- "ldr q20, [x23, #0x0]\n"
- "ldr q21, [x23, #0x10]\n"
- "ldr q22, [x23, #0x20]\n"
- "ldr q23, [x23, #0x30]\n"
- "ldr q25, [x22, #0x0]\n"
- "ldr q26, [x22, #0x10]\n"
- "ldr q27, [x22, #0x20]\n"
- "ldr q6, [x22, #0x30]\n"
- "ldr q28, [x21, #0x0]\n"
- "ldr q29, [x21, #0x10]\n"
- "ldr q30, [x21, #0x20]\n"
- "ldr q31, [x21, #0x30]\n"
+ "ldr q9, [x28, #0x0]\n"
+ "ldr q10, [x28, #0x10]\n"
+ "ldr q11, [x28, #0x20]\n"
+ "ldr q16, [x28, #0x30]\n"
+ "ldr q12, [x24, #0x0]\n"
+ "ldr q13, [x24, #0x10]\n"
+ "ldr q14, [x24, #0x20]\n"
+ "ldr q15, [x24, #0x30]\n"
+ "ldr q17, [x23, #0x0]\n"
+ "ldr q18, [x23, #0x10]\n"
+ "ldr q19, [x23, #0x20]\n"
+ "ldr q24, [x23, #0x30]\n"
+ "ldr q20, [x22, #0x0]\n"
+ "ldr q21, [x22, #0x10]\n"
+ "ldr q22, [x22, #0x20]\n"
+ "ldr q23, [x22, #0x30]\n"
+ "ldr q25, [x21, #0x0]\n"
+ "ldr q26, [x21, #0x10]\n"
+ "ldr q27, [x21, #0x20]\n"
+ "ldr q6, [x21, #0x30]\n"
+ "ldr q28, [x20, #0x0]\n"
+ "ldr q29, [x20, #0x10]\n"
+ "ldr q30, [x20, #0x20]\n"
+ "ldr q31, [x20, #0x30]\n"
"188:" // Height 6: MMLA fixup
"zip1 v8.2d, v9.2d, v12.2d\n"
"zip2 v12.2d, v9.2d, v12.2d\n"
@@ -2654,83 +2654,80 @@ void a64_hybrid_fp32bf16fp32_mmla_6x16 (
"movi v30.16b, #0x0\n"
"movi v31.16b, #0x0\n"
"190:" // Height 6: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"191:" // Height 6: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 192f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "ldr x22, [x21, #0x20]\n"
- "ldr x21, [x21, #0x28]\n"
- "cbnz x28, 193f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #2\n"
- "add x25, x25, x20, LSL #2\n"
- "add x24, x24, x20, LSL #2\n"
- "add x23, x23, x20, LSL #2\n"
- "add x22, x22, x20, LSL #2\n"
- "add x21, x21, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "ldr x21, [x20, #0x20]\n"
+ "ldr x20, [x20, #0x28]\n"
+ "cbnz x27, 193f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #2\n"
+ "add x24, x24, x19, LSL #2\n"
+ "add x23, x23, x19, LSL #2\n"
+ "add x22, x22, x19, LSL #2\n"
+ "add x21, x21, x19, LSL #2\n"
+ "add x20, x20, x19, LSL #2\n"
"b 193f\n"
"192:" // Height 6: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
+ "add x20, x21, x19, LSL #2\n"
"193:" // Height 6: input setup done
- "cmp x27, #0x4\n"
+ "cmp x26, #0x4\n"
"blt 196f\n"
- "ld1 { v0.4s }, [x26], #0x10\n"
- "ld1 { v2.4s }, [x24], #0x10\n"
- "cmp x27, #0x8\n"
- "ld1 { v4.4s }, [x22], #0x10\n"
- "ld1 { v1.4s }, [x25], #0x10\n"
- "ld1 { v3.4s }, [x23], #0x10\n"
- "ld1 { v5.4s }, [x21], #0x10\n"
- "ldr q6, [x10, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
+ "ld1 { v0.4s }, [x25], #0x10\n"
+ "cmp x26, #0x8\n"
"blt 195f\n"
"194:" // Height 6: Multiply loop: Main loop head
".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n"
- ".inst 0x0ea16842 // bfcvtn v2.4h, v2.4s\n"
- "sub x27, x27, #0x4\n"
- "cmp x27, #0x8\n"
- ".inst 0x0ea16884 // bfcvtn v4.4h, v4.4s\n"
+ "ld1 { v1.4s }, [x24], #0x10\n"
+ "sub x26, x26, #0x4\n"
".inst 0x4ea16820 // bfcvtn2 v0.8h, v1.4s\n"
- ".inst 0x6e46ec08 // bfmmla v8.4s, v0.8h, v6.8h\n"
- "prfm pldl1keep, [x26, #0x80]\n"
+ "ld1 { v2.4s }, [x23], #0x10\n"
+ "cmp x26, #0x8\n"
+ ".inst 0x0ea16842 // bfcvtn v2.4h, v2.4s\n"
+ "ld1 { v3.4s }, [x22], #0x10\n"
+ "ld1 { v4.4s }, [x21], #0x10\n"
".inst 0x4ea16862 // bfcvtn2 v2.8h, v3.4s\n"
+ "ld1 { v5.4s }, [x20], #0x10\n"
+ ".inst 0x0ea16884 // bfcvtn v4.4h, v4.4s\n"
+ "ldr q6, [x10, #0x0]\n"
+ "ldr q7, [x10, #0x10]\n"
".inst 0x4ea168a4 // bfcvtn2 v4.8h, v5.4s\n"
- ".inst 0x6e46ec50 // bfmmla v16.4s, v2.8h, v6.8h\n"
"prfm pldl1keep, [x25, #0x80]\n"
- "ld1 { v1.4s }, [x25], #0x10\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ ".inst 0x6e46ec08 // bfmmla v8.4s, v0.8h, v6.8h\n"
+ ".inst 0x6e46ec50 // bfmmla v16.4s, v2.8h, v6.8h\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
+ ".inst 0x6e47ec0c // bfmmla v12.4s, v0.8h, v7.8h\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x6e46ec98 // bfmmla v24.4s, v4.8h, v6.8h\n"
"ldr q6, [x10, #0x20]\n"
- ".inst 0x6e47ec0c // bfmmla v12.4s, v0.8h, v7.8h\n"
".inst 0x6e47ec54 // bfmmla v20.4s, v2.8h, v7.8h\n"
+ "prfm pldl1keep, [x21, #0x80]\n"
".inst 0x6e47ec9c // bfmmla v28.4s, v4.8h, v7.8h\n"
"ldr q7, [x10, #0x30]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
+ "prfm pldl1keep, [x20, #0x80]\n"
".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec51 // bfmmla v17.4s, v2.8h, v6.8h\n"
- "prfm pldl1keep, [x23, #0x80]\n"
- "ld1 { v3.4s }, [x23], #0x10\n"
".inst 0x6e46ec99 // bfmmla v25.4s, v4.8h, v6.8h\n"
"ldr q6, [x10, #0x40]\n"
".inst 0x6e47ec0d // bfmmla v13.4s, v0.8h, v7.8h\n"
- "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x6e47ec55 // bfmmla v21.4s, v2.8h, v7.8h\n"
".inst 0x6e47ec9d // bfmmla v29.4s, v4.8h, v7.8h\n"
"ldr q7, [x10, #0x50]\n"
- "prfm pldl1keep, [x21, #0x80]\n"
- "ld1 { v5.4s }, [x21], #0x10\n"
".inst 0x6e46ec0a // bfmmla v10.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec52 // bfmmla v18.4s, v2.8h, v6.8h\n"
".inst 0x6e46ec9a // bfmmla v26.4s, v4.8h, v6.8h\n"
@@ -2743,39 +2740,42 @@ void a64_hybrid_fp32bf16fp32_mmla_6x16 (
".inst 0x6e46ec0b // bfmmla v11.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec53 // bfmmla v19.4s, v2.8h, v6.8h\n"
".inst 0x6e46ec9b // bfmmla v27.4s, v4.8h, v6.8h\n"
- "ldr q6, [x10, #0x0]\n"
".inst 0x6e47ec0f // bfmmla v15.4s, v0.8h, v7.8h\n"
- "ld1 { v0.4s }, [x26], #0x10\n"
+ "ld1 { v0.4s }, [x25], #0x10\n"
".inst 0x6e47ec57 // bfmmla v23.4s, v2.8h, v7.8h\n"
- "ld1 { v2.4s }, [x24], #0x10\n"
".inst 0x6e47ec9f // bfmmla v31.4s, v4.8h, v7.8h\n"
- "ld1 { v4.4s }, [x22], #0x10\n"
- "ldr q7, [x10, #0x10]\n"
"bge 194b\n"
"195:" // Height 6: Multiply loop: Single iteration only
".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n"
+ "ld1 { v1.4s }, [x24], #0x10\n"
+ "sub x26, x26, #0x4\n"
+ ".inst 0x4ea16820 // bfcvtn2 v0.8h, v1.4s\n"
+ "ld1 { v2.4s }, [x23], #0x10\n"
+ "ld1 { v3.4s }, [x22], #0x10\n"
".inst 0x0ea16842 // bfcvtn v2.4h, v2.4s\n"
- "sub x27, x27, #0x4\n"
- "prfm pldl1keep, [x26, #0x80]\n"
+ "ld1 { v4.4s }, [x21], #0x10\n"
+ "ld1 { v5.4s }, [x20], #0x10\n"
+ ".inst 0x4ea16862 // bfcvtn2 v2.8h, v3.4s\n"
+ "ldr q6, [x10, #0x0]\n"
+ "ldr q7, [x10, #0x10]\n"
".inst 0x0ea16884 // bfcvtn v4.4h, v4.4s\n"
- ".inst 0x4ea16820 // bfcvtn2 v0.8h, v1.4s\n"
- ".inst 0x6e46ec08 // bfmmla v8.4s, v0.8h, v6.8h\n"
"prfm pldl1keep, [x25, #0x80]\n"
- ".inst 0x4ea16862 // bfcvtn2 v2.8h, v3.4s\n"
".inst 0x4ea168a4 // bfcvtn2 v4.8h, v5.4s\n"
- ".inst 0x6e46ec50 // bfmmla v16.4s, v2.8h, v6.8h\n"
"prfm pldl1keep, [x24, #0x80]\n"
+ ".inst 0x6e46ec08 // bfmmla v8.4s, v0.8h, v6.8h\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
+ ".inst 0x6e46ec50 // bfmmla v16.4s, v2.8h, v6.8h\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
+ ".inst 0x6e47ec0c // bfmmla v12.4s, v0.8h, v7.8h\n"
+ "prfm pldl1keep, [x21, #0x80]\n"
".inst 0x6e46ec98 // bfmmla v24.4s, v4.8h, v6.8h\n"
"ldr q6, [x10, #0x20]\n"
- ".inst 0x6e47ec0c // bfmmla v12.4s, v0.8h, v7.8h\n"
- "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x6e47ec54 // bfmmla v20.4s, v2.8h, v7.8h\n"
+ "prfm pldl1keep, [x20, #0x80]\n"
".inst 0x6e47ec9c // bfmmla v28.4s, v4.8h, v7.8h\n"
"ldr q7, [x10, #0x30]\n"
- "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec51 // bfmmla v17.4s, v2.8h, v6.8h\n"
- "prfm pldl1keep, [x21, #0x80]\n"
".inst 0x6e46ec99 // bfmmla v25.4s, v4.8h, v6.8h\n"
"ldr q6, [x10, #0x40]\n"
".inst 0x6e47ec0d // bfmmla v13.4s, v0.8h, v7.8h\n"
@@ -2798,40 +2798,40 @@ void a64_hybrid_fp32bf16fp32_mmla_6x16 (
".inst 0x6e47ec57 // bfmmla v23.4s, v2.8h, v7.8h\n"
".inst 0x6e47ec9f // bfmmla v31.4s, v4.8h, v7.8h\n"
"196:" // Height 6: Multiply loop: Main loop skip
- "cbz x27, 199f\n"
- "cbz x27, 199f\n"
- "tbz x27, #1, 197f\n"
- "ldr d0, [x26], #0x8\n"
- "ldr d1, [x25], #0x8\n"
- "ldr d2, [x24], #0x8\n"
- "ldr d3, [x23], #0x8\n"
- "ldr d4, [x22], #0x8\n"
- "ldr d5, [x21], #0x8\n"
- "tbz x27, #0, 198f\n"
- "ld1 { v0.s }[2], [x26]\n"
- "ld1 { v1.s }[2], [x25]\n"
- "ld1 { v2.s }[2], [x24]\n"
- "ld1 { v3.s }[2], [x23]\n"
- "ld1 { v4.s }[2], [x22]\n"
- "ld1 { v5.s }[2], [x21]\n"
+ "cbz x26, 199f\n"
+ "cbz x26, 199f\n"
+ "tbz x26, #1, 197f\n"
+ "ldr d0, [x25], #0x8\n"
+ "ldr d1, [x24], #0x8\n"
+ "ldr d2, [x23], #0x8\n"
+ "ldr d3, [x22], #0x8\n"
+ "ldr d4, [x21], #0x8\n"
+ "ldr d5, [x20], #0x8\n"
+ "tbz x26, #0, 198f\n"
+ "ld1 { v0.s }[2], [x25]\n"
+ "ld1 { v1.s }[2], [x24]\n"
+ "ld1 { v2.s }[2], [x23]\n"
+ "ld1 { v3.s }[2], [x22]\n"
+ "ld1 { v4.s }[2], [x21]\n"
+ "ld1 { v5.s }[2], [x20]\n"
"b 198f\n"
"197:" // Height 6: Multiply loop: Ragged operand read: partial_1_0
- "ldr s0, [x26, #0x0]\n"
- "ldr s1, [x25, #0x0]\n"
- "ldr s2, [x24, #0x0]\n"
- "ldr s3, [x23, #0x0]\n"
- "ldr s4, [x22, #0x0]\n"
- "ldr s5, [x21, #0x0]\n"
+ "ldr s0, [x25, #0x0]\n"
+ "ldr s1, [x24, #0x0]\n"
+ "ldr s2, [x23, #0x0]\n"
+ "ldr s3, [x22, #0x0]\n"
+ "ldr s4, [x21, #0x0]\n"
+ "ldr s5, [x20, #0x0]\n"
"198:" // Height 6: Multiply loop: Ragged operand read: Done
- "ldr q6, [x10, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n"
+ "ldr q6, [x10, #0x0]\n"
".inst 0x0ea16842 // bfcvtn v2.4h, v2.4s\n"
+ "ldr q7, [x10, #0x10]\n"
".inst 0x0ea16884 // bfcvtn v4.4h, v4.4s\n"
".inst 0x4ea16820 // bfcvtn2 v0.8h, v1.4s\n"
- ".inst 0x6e46ec08 // bfmmla v8.4s, v0.8h, v6.8h\n"
".inst 0x4ea16862 // bfcvtn2 v2.8h, v3.4s\n"
".inst 0x4ea168a4 // bfcvtn2 v4.8h, v5.4s\n"
+ ".inst 0x6e46ec08 // bfmmla v8.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec50 // bfmmla v16.4s, v2.8h, v6.8h\n"
".inst 0x6e46ec98 // bfmmla v24.4s, v4.8h, v6.8h\n"
"ldr q6, [x10, #0x20]\n"
@@ -2863,33 +2863,33 @@ void a64_hybrid_fp32bf16fp32_mmla_6x16 (
".inst 0x6e47ec57 // bfmmla v23.4s, v2.8h, v7.8h\n"
".inst 0x6e47ec9f // bfmmla v31.4s, v4.8h, v7.8h\n"
"199:" // Height 6: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 191b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
"uzp1 v6.2d, v8.2d, v12.2d\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp2 v8.2d, v8.2d, v12.2d\n"
+ "prfm pstl1keep, [x28, #0x0]\n"
+ "add x24, x28, x19, LSL #2\n"
"uzp1 v12.2d, v9.2d, v13.2d\n"
- "add x21, x22, x20, LSL #2\n"
+ "prfm pstl1keep, [x24, #0x0]\n"
"uzp2 v9.2d, v9.2d, v13.2d\n"
+ "add x23, x24, x19, LSL #2\n"
"uzp1 v13.2d, v10.2d, v14.2d\n"
- "prfm pstl1keep, [x9, #0x0]\n"
+ "prfm pstl1keep, [x23, #0x0]\n"
+ "add x22, x23, x19, LSL #2\n"
"uzp2 v10.2d, v10.2d, v14.2d\n"
+ "prfm pstl1keep, [x22, #0x0]\n"
+ "add x21, x22, x19, LSL #2\n"
"uzp1 v14.2d, v11.2d, v15.2d\n"
- "prfm pstl1keep, [x25, #0x0]\n"
- "prfm pstl1keep, [x24, #0x0]\n"
+ "prfm pstl1keep, [x21, #0x0]\n"
+ "add x20, x21, x19, LSL #2\n"
"uzp2 v11.2d, v11.2d, v15.2d\n"
+ "prfm pstl1keep, [x20, #0x0]\n"
"uzp1 v15.2d, v16.2d, v20.2d\n"
- "prfm pstl1keep, [x23, #0x0]\n"
- "prfm pstl1keep, [x22, #0x0]\n"
"uzp2 v16.2d, v16.2d, v20.2d\n"
"uzp1 v20.2d, v17.2d, v21.2d\n"
- "prfm pstl1keep, [x21, #0x0]\n"
"uzp2 v17.2d, v17.2d, v21.2d\n"
"uzp1 v21.2d, v18.2d, v22.2d\n"
"uzp2 v18.2d, v18.2d, v22.2d\n"
@@ -2904,232 +2904,232 @@ void a64_hybrid_fp32bf16fp32_mmla_6x16 (
"uzp1 v30.2d, v27.2d, v31.2d\n"
"uzp2 v27.2d, v27.2d, v31.2d\n"
"tbz %x[flags], #1, 200f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v1.4s }, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1r { v0.4s }, [x20]\n"
- "fmin v6.4s, v6.4s, v1.4s\n"
- "fmin v12.4s, v12.4s, v1.4s\n"
- "fmin v13.4s, v13.4s, v1.4s\n"
- "fmin v14.4s, v14.4s, v1.4s\n"
- "fmin v8.4s, v8.4s, v1.4s\n"
- "fmin v9.4s, v9.4s, v1.4s\n"
- "fmin v10.4s, v10.4s, v1.4s\n"
- "fmin v11.4s, v11.4s, v1.4s\n"
- "fmin v15.4s, v15.4s, v1.4s\n"
- "fmin v20.4s, v20.4s, v1.4s\n"
- "fmin v21.4s, v21.4s, v1.4s\n"
- "fmin v22.4s, v22.4s, v1.4s\n"
- "fmin v16.4s, v16.4s, v1.4s\n"
- "fmin v17.4s, v17.4s, v1.4s\n"
- "fmin v18.4s, v18.4s, v1.4s\n"
- "fmin v19.4s, v19.4s, v1.4s\n"
- "fmin v23.4s, v23.4s, v1.4s\n"
- "fmin v28.4s, v28.4s, v1.4s\n"
- "fmin v29.4s, v29.4s, v1.4s\n"
- "fmin v30.4s, v30.4s, v1.4s\n"
- "fmin v24.4s, v24.4s, v1.4s\n"
- "fmin v25.4s, v25.4s, v1.4s\n"
- "fmin v26.4s, v26.4s, v1.4s\n"
- "fmin v27.4s, v27.4s, v1.4s\n"
- "fmax v6.4s, v6.4s, v0.4s\n"
- "fmax v12.4s, v12.4s, v0.4s\n"
- "fmax v13.4s, v13.4s, v0.4s\n"
- "fmax v14.4s, v14.4s, v0.4s\n"
- "fmax v8.4s, v8.4s, v0.4s\n"
- "fmax v9.4s, v9.4s, v0.4s\n"
- "fmax v10.4s, v10.4s, v0.4s\n"
- "fmax v11.4s, v11.4s, v0.4s\n"
- "fmax v15.4s, v15.4s, v0.4s\n"
- "fmax v20.4s, v20.4s, v0.4s\n"
- "fmax v21.4s, v21.4s, v0.4s\n"
- "fmax v22.4s, v22.4s, v0.4s\n"
- "fmax v16.4s, v16.4s, v0.4s\n"
- "fmax v17.4s, v17.4s, v0.4s\n"
- "fmax v18.4s, v18.4s, v0.4s\n"
- "fmax v19.4s, v19.4s, v0.4s\n"
- "fmax v23.4s, v23.4s, v0.4s\n"
- "fmax v28.4s, v28.4s, v0.4s\n"
- "fmax v29.4s, v29.4s, v0.4s\n"
- "fmax v30.4s, v30.4s, v0.4s\n"
- "fmax v24.4s, v24.4s, v0.4s\n"
- "fmax v25.4s, v25.4s, v0.4s\n"
- "fmax v26.4s, v26.4s, v0.4s\n"
- "fmax v27.4s, v27.4s, v0.4s\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v1.4s }, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v0.4s }, [x19]\n"
+ "fmin v6.4s, v6.4s, v0.4s\n"
+ "fmin v12.4s, v12.4s, v0.4s\n"
+ "fmin v13.4s, v13.4s, v0.4s\n"
+ "fmin v14.4s, v14.4s, v0.4s\n"
+ "fmax v6.4s, v6.4s, v1.4s\n"
+ "fmax v12.4s, v12.4s, v1.4s\n"
+ "fmax v13.4s, v13.4s, v1.4s\n"
+ "fmax v14.4s, v14.4s, v1.4s\n"
+ "fmin v8.4s, v8.4s, v0.4s\n"
+ "fmin v9.4s, v9.4s, v0.4s\n"
+ "fmin v10.4s, v10.4s, v0.4s\n"
+ "fmax v8.4s, v8.4s, v1.4s\n"
+ "fmax v9.4s, v9.4s, v1.4s\n"
+ "fmax v10.4s, v10.4s, v1.4s\n"
+ "fmin v11.4s, v11.4s, v0.4s\n"
+ "fmin v15.4s, v15.4s, v0.4s\n"
+ "fmin v20.4s, v20.4s, v0.4s\n"
+ "fmax v11.4s, v11.4s, v1.4s\n"
+ "fmax v15.4s, v15.4s, v1.4s\n"
+ "fmax v20.4s, v20.4s, v1.4s\n"
+ "fmin v21.4s, v21.4s, v0.4s\n"
+ "fmin v22.4s, v22.4s, v0.4s\n"
+ "fmin v16.4s, v16.4s, v0.4s\n"
+ "fmax v21.4s, v21.4s, v1.4s\n"
+ "fmax v22.4s, v22.4s, v1.4s\n"
+ "fmax v16.4s, v16.4s, v1.4s\n"
+ "fmin v17.4s, v17.4s, v0.4s\n"
+ "fmin v18.4s, v18.4s, v0.4s\n"
+ "fmin v19.4s, v19.4s, v0.4s\n"
+ "fmax v17.4s, v17.4s, v1.4s\n"
+ "fmax v18.4s, v18.4s, v1.4s\n"
+ "fmax v19.4s, v19.4s, v1.4s\n"
+ "fmin v23.4s, v23.4s, v0.4s\n"
+ "fmin v28.4s, v28.4s, v0.4s\n"
+ "fmin v29.4s, v29.4s, v0.4s\n"
+ "fmax v23.4s, v23.4s, v1.4s\n"
+ "fmax v28.4s, v28.4s, v1.4s\n"
+ "fmax v29.4s, v29.4s, v1.4s\n"
+ "fmin v30.4s, v30.4s, v0.4s\n"
+ "fmin v24.4s, v24.4s, v0.4s\n"
+ "fmin v25.4s, v25.4s, v0.4s\n"
+ "fmax v30.4s, v30.4s, v1.4s\n"
+ "fmax v24.4s, v24.4s, v1.4s\n"
+ "fmax v25.4s, v25.4s, v1.4s\n"
+ "fmin v26.4s, v26.4s, v0.4s\n"
+ "fmin v27.4s, v27.4s, v0.4s\n"
+ "fmax v26.4s, v26.4s, v1.4s\n"
+ "fmax v27.4s, v27.4s, v1.4s\n"
"200:" // Height 6: No activation
"cmp x11, #0x10\n"
"bge 209f\n"
"tbz x11, #3, 204f\n"
- "st1 { v6.4s }, [x9], #0x10\n"
- "st1 { v12.4s }, [x9], #0x10\n"
- "st1 { v8.4s }, [x25], #0x10\n"
- "st1 { v9.4s }, [x25], #0x10\n"
- "st1 { v15.4s }, [x24], #0x10\n"
- "st1 { v20.4s }, [x24], #0x10\n"
- "st1 { v16.4s }, [x23], #0x10\n"
- "st1 { v17.4s }, [x23], #0x10\n"
- "st1 { v23.4s }, [x22], #0x10\n"
- "st1 { v28.4s }, [x22], #0x10\n"
- "st1 { v24.4s }, [x21], #0x10\n"
- "st1 { v25.4s }, [x21], #0x10\n"
+ "st1 { v6.4s }, [x28], #0x10\n"
+ "st1 { v12.4s }, [x28], #0x10\n"
+ "st1 { v8.4s }, [x24], #0x10\n"
+ "st1 { v9.4s }, [x24], #0x10\n"
+ "st1 { v15.4s }, [x23], #0x10\n"
+ "st1 { v20.4s }, [x23], #0x10\n"
+ "st1 { v16.4s }, [x22], #0x10\n"
+ "st1 { v17.4s }, [x22], #0x10\n"
+ "st1 { v23.4s }, [x21], #0x10\n"
+ "st1 { v28.4s }, [x21], #0x10\n"
+ "st1 { v24.4s }, [x20], #0x10\n"
+ "st1 { v25.4s }, [x20], #0x10\n"
"tbz x11, #2, 202f\n"
- "st1 { v13.4s }, [x9], #0x10\n"
- "st1 { v10.4s }, [x25], #0x10\n"
- "st1 { v21.4s }, [x24], #0x10\n"
- "st1 { v18.4s }, [x23], #0x10\n"
- "st1 { v29.4s }, [x22], #0x10\n"
- "st1 { v26.4s }, [x21], #0x10\n"
+ "st1 { v13.4s }, [x28], #0x10\n"
+ "st1 { v10.4s }, [x24], #0x10\n"
+ "st1 { v21.4s }, [x23], #0x10\n"
+ "st1 { v18.4s }, [x22], #0x10\n"
+ "st1 { v29.4s }, [x21], #0x10\n"
+ "st1 { v26.4s }, [x20], #0x10\n"
"tbz x11, #1, 201f\n"
- "str d14, [x9], #0x8\n"
- "str d11, [x25], #0x8\n"
- "str d22, [x24], #0x8\n"
- "str d19, [x23], #0x8\n"
- "str d30, [x22], #0x8\n"
- "str d27, [x21], #0x8\n"
+ "str d14, [x28], #0x8\n"
+ "str d11, [x24], #0x8\n"
+ "str d22, [x23], #0x8\n"
+ "str d19, [x22], #0x8\n"
+ "str d30, [x21], #0x8\n"
+ "str d27, [x20], #0x8\n"
"tbz x11, #0, 208f\n"
- "st1 { v14.s }[2], [x9]\n"
- "st1 { v11.s }[2], [x25]\n"
- "st1 { v22.s }[2], [x24]\n"
- "st1 { v19.s }[2], [x23]\n"
- "st1 { v30.s }[2], [x22]\n"
- "st1 { v27.s }[2], [x21]\n"
+ "st1 { v14.s }[2], [x28]\n"
+ "st1 { v11.s }[2], [x24]\n"
+ "st1 { v22.s }[2], [x23]\n"
+ "st1 { v19.s }[2], [x22]\n"
+ "st1 { v30.s }[2], [x21]\n"
+ "st1 { v27.s }[2], [x20]\n"
"b 208f\n"
"201:" // Height 6: Partial direct writeback: partial_1_12
"tbz x11, #0, 208f\n"
- "str s14, [x9, #0x0]\n"
- "str s11, [x25, #0x0]\n"
- "str s22, [x24, #0x0]\n"
- "str s19, [x23, #0x0]\n"
- "str s30, [x22, #0x0]\n"
- "str s27, [x21, #0x0]\n"
+ "str s14, [x28, #0x0]\n"
+ "str s11, [x24, #0x0]\n"
+ "str s22, [x23, #0x0]\n"
+ "str s19, [x22, #0x0]\n"
+ "str s30, [x21, #0x0]\n"
+ "str s27, [x20, #0x0]\n"
"b 208f\n"
"202:" // Height 6: Partial direct writeback: partial_2_8
"tbz x11, #1, 203f\n"
- "str d13, [x9], #0x8\n"
- "str d10, [x25], #0x8\n"
- "str d21, [x24], #0x8\n"
- "str d18, [x23], #0x8\n"
- "str d29, [x22], #0x8\n"
- "str d26, [x21], #0x8\n"
+ "str d13, [x28], #0x8\n"
+ "str d10, [x24], #0x8\n"
+ "str d21, [x23], #0x8\n"
+ "str d18, [x22], #0x8\n"
+ "str d29, [x21], #0x8\n"
+ "str d26, [x20], #0x8\n"
"tbz x11, #0, 208f\n"
- "st1 { v13.s }[2], [x9]\n"
- "st1 { v10.s }[2], [x25]\n"
- "st1 { v21.s }[2], [x24]\n"
- "st1 { v18.s }[2], [x23]\n"
- "st1 { v29.s }[2], [x22]\n"
- "st1 { v26.s }[2], [x21]\n"
+ "st1 { v13.s }[2], [x28]\n"
+ "st1 { v10.s }[2], [x24]\n"
+ "st1 { v21.s }[2], [x23]\n"
+ "st1 { v18.s }[2], [x22]\n"
+ "st1 { v29.s }[2], [x21]\n"
+ "st1 { v26.s }[2], [x20]\n"
"b 208f\n"
"203:" // Height 6: Partial direct writeback: partial_1_8
"tbz x11, #0, 208f\n"
- "str s13, [x9, #0x0]\n"
- "str s10, [x25, #0x0]\n"
- "str s21, [x24, #0x0]\n"
- "str s18, [x23, #0x0]\n"
- "str s29, [x22, #0x0]\n"
- "str s26, [x21, #0x0]\n"
+ "str s13, [x28, #0x0]\n"
+ "str s10, [x24, #0x0]\n"
+ "str s21, [x23, #0x0]\n"
+ "str s18, [x22, #0x0]\n"
+ "str s29, [x21, #0x0]\n"
+ "str s26, [x20, #0x0]\n"
"b 208f\n"
"204:" // Height 6: Partial direct writeback: partial_4_0
"tbz x11, #2, 206f\n"
- "st1 { v6.4s }, [x9], #0x10\n"
- "st1 { v8.4s }, [x25], #0x10\n"
- "st1 { v15.4s }, [x24], #0x10\n"
- "st1 { v16.4s }, [x23], #0x10\n"
- "st1 { v23.4s }, [x22], #0x10\n"
- "st1 { v24.4s }, [x21], #0x10\n"
+ "st1 { v6.4s }, [x28], #0x10\n"
+ "st1 { v8.4s }, [x24], #0x10\n"
+ "st1 { v15.4s }, [x23], #0x10\n"
+ "st1 { v16.4s }, [x22], #0x10\n"
+ "st1 { v23.4s }, [x21], #0x10\n"
+ "st1 { v24.4s }, [x20], #0x10\n"
"tbz x11, #1, 205f\n"
- "str d12, [x9], #0x8\n"
- "str d9, [x25], #0x8\n"
- "str d20, [x24], #0x8\n"
- "str d17, [x23], #0x8\n"
- "str d28, [x22], #0x8\n"
- "str d25, [x21], #0x8\n"
+ "str d12, [x28], #0x8\n"
+ "str d9, [x24], #0x8\n"
+ "str d20, [x23], #0x8\n"
+ "str d17, [x22], #0x8\n"
+ "str d28, [x21], #0x8\n"
+ "str d25, [x20], #0x8\n"
"tbz x11, #0, 208f\n"
- "st1 { v12.s }[2], [x9]\n"
- "st1 { v9.s }[2], [x25]\n"
- "st1 { v20.s }[2], [x24]\n"
- "st1 { v17.s }[2], [x23]\n"
- "st1 { v28.s }[2], [x22]\n"
- "st1 { v25.s }[2], [x21]\n"
+ "st1 { v12.s }[2], [x28]\n"
+ "st1 { v9.s }[2], [x24]\n"
+ "st1 { v20.s }[2], [x23]\n"
+ "st1 { v17.s }[2], [x22]\n"
+ "st1 { v28.s }[2], [x21]\n"
+ "st1 { v25.s }[2], [x20]\n"
"b 208f\n"
"205:" // Height 6: Partial direct writeback: partial_1_4
"tbz x11, #0, 208f\n"
- "str s12, [x9, #0x0]\n"
- "str s9, [x25, #0x0]\n"
- "str s20, [x24, #0x0]\n"
- "str s17, [x23, #0x0]\n"
- "str s28, [x22, #0x0]\n"
- "str s25, [x21, #0x0]\n"
+ "str s12, [x28, #0x0]\n"
+ "str s9, [x24, #0x0]\n"
+ "str s20, [x23, #0x0]\n"
+ "str s17, [x22, #0x0]\n"
+ "str s28, [x21, #0x0]\n"
+ "str s25, [x20, #0x0]\n"
"b 208f\n"
"206:" // Height 6: Partial direct writeback: partial_2_0
"tbz x11, #1, 207f\n"
- "str d6, [x9], #0x8\n"
- "str d8, [x25], #0x8\n"
- "str d15, [x24], #0x8\n"
- "str d16, [x23], #0x8\n"
- "str d23, [x22], #0x8\n"
- "str d24, [x21], #0x8\n"
+ "str d6, [x28], #0x8\n"
+ "str d8, [x24], #0x8\n"
+ "str d15, [x23], #0x8\n"
+ "str d16, [x22], #0x8\n"
+ "str d23, [x21], #0x8\n"
+ "str d24, [x20], #0x8\n"
"tbz x11, #0, 208f\n"
- "st1 { v6.s }[2], [x9]\n"
- "st1 { v8.s }[2], [x25]\n"
- "st1 { v15.s }[2], [x24]\n"
- "st1 { v16.s }[2], [x23]\n"
- "st1 { v23.s }[2], [x22]\n"
- "st1 { v24.s }[2], [x21]\n"
+ "st1 { v6.s }[2], [x28]\n"
+ "st1 { v8.s }[2], [x24]\n"
+ "st1 { v15.s }[2], [x23]\n"
+ "st1 { v16.s }[2], [x22]\n"
+ "st1 { v23.s }[2], [x21]\n"
+ "st1 { v24.s }[2], [x20]\n"
"b 208f\n"
"207:" // Height 6: Partial direct writeback: partial_1_0
- "str s6, [x9, #0x0]\n"
- "str s8, [x25, #0x0]\n"
- "str s15, [x24, #0x0]\n"
- "str s16, [x23, #0x0]\n"
- "str s23, [x22, #0x0]\n"
- "str s24, [x21, #0x0]\n"
+ "str s6, [x28, #0x0]\n"
+ "str s8, [x24, #0x0]\n"
+ "str s15, [x23, #0x0]\n"
+ "str s16, [x22, #0x0]\n"
+ "str s23, [x21, #0x0]\n"
+ "str s24, [x20, #0x0]\n"
"208:" // Height 6: Partial direct writeback: Done
"b 210f\n"
"209:" // Height 6: Full writeback
- "str q6, [x9, #0x0]\n"
- "str q12, [x9, #0x10]\n"
- "str q13, [x9, #0x20]\n"
- "str q14, [x9, #0x30]\n"
- "add x9, x9, #0x40\n"
- "str q8, [x25, #0x0]\n"
- "str q9, [x25, #0x10]\n"
- "str q10, [x25, #0x20]\n"
- "str q11, [x25, #0x30]\n"
- "str q15, [x24, #0x0]\n"
- "str q20, [x24, #0x10]\n"
- "str q21, [x24, #0x20]\n"
- "str q22, [x24, #0x30]\n"
- "str q16, [x23, #0x0]\n"
- "str q17, [x23, #0x10]\n"
- "str q18, [x23, #0x20]\n"
- "str q19, [x23, #0x30]\n"
- "str q23, [x22, #0x0]\n"
- "str q28, [x22, #0x10]\n"
- "str q29, [x22, #0x20]\n"
- "str q30, [x22, #0x30]\n"
- "str q24, [x21, #0x0]\n"
- "str q25, [x21, #0x10]\n"
- "str q26, [x21, #0x20]\n"
- "str q27, [x21, #0x30]\n"
+ "str q6, [x28, #0x0]\n"
+ "str q12, [x28, #0x10]\n"
+ "str q13, [x28, #0x20]\n"
+ "str q14, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
+ "str q8, [x24, #0x0]\n"
+ "str q9, [x24, #0x10]\n"
+ "str q10, [x24, #0x20]\n"
+ "str q11, [x24, #0x30]\n"
+ "str q15, [x23, #0x0]\n"
+ "str q20, [x23, #0x10]\n"
+ "str q21, [x23, #0x20]\n"
+ "str q22, [x23, #0x30]\n"
+ "str q16, [x22, #0x0]\n"
+ "str q17, [x22, #0x10]\n"
+ "str q18, [x22, #0x20]\n"
+ "str q19, [x22, #0x30]\n"
+ "str q23, [x21, #0x0]\n"
+ "str q28, [x21, #0x10]\n"
+ "str q29, [x21, #0x20]\n"
+ "str q30, [x21, #0x30]\n"
+ "str q24, [x20, #0x0]\n"
+ "str q25, [x20, #0x10]\n"
+ "str q26, [x20, #0x20]\n"
+ "str q27, [x20, #0x30]\n"
"210:" // Height 6: Writeback done
"subs x11, x11, #0x10\n"
"bgt 177b\n"
"subs %x[M], %x[M], #0x6\n"
"beq 212f\n"
- "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 211f\n"
- "add x21, x21, #0x6\n"
- "str x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "add x20, x20, #0x6\n"
+ "str x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"b 1b\n"
"211:" // Update direct input
- "mov x20, #0x18\n"
- "madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
+ "mov x19, #0x18\n"
+ "madd %x[input_ptr], x19, x20, %x[input_ptr]\n"
"b 1b\n"
"212:" // Exit
: [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
: [args_ptr] "r" (&ka), [bias] "r" (bias), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qa_dot_4x16/a55.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qa_dot_4x16/a55.cpp
index b31b80586c..ee7e55f179 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qa_dot_4x16/a55.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qa_dot_4x16/a55.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef __aarch64__
@@ -85,220 +85,232 @@ void a64_hybrid_s8qa_dot_4x16_a55 (
"cmp %x[M], #0x2\n"
"bgt 61f\n"
"beq 31f\n"
- "mov x16, %x[col_bias]\n"
"movi v11.4s, #0x0\n"
"movi v15.16b, #0x1\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x9, %x[col_bias]\n"
"bic %x[flags], %x[flags], #0x80000000\n"
- "ldr x15, [%x[args_ptr], %[offsetof_N]]\n"
- "mov x14, %x[output_ptr]\n"
- "ldr x13, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x28, %x[output_ptr]\n"
"2:" // Height 1: Column loop
"movi v16.4s, #0x0\n"
"movi v17.4s, #0x0\n"
"movi v18.4s, #0x0\n"
"movi v19.4s, #0x0\n"
"3:" // Height 1: setup done
- "mov x12, #0x0\n"
+ "mov x27, #0x0\n"
"4:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w11, [x20, x12, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 5f\n"
- "ldr x21, [%x[input_ptr], x12, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x10, [x21, #0x0]\n"
- "cbnz x12, 6f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x10, x10, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "cbnz x27, 6f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
"b 6f\n"
"5:" // Height 1: setup direct input
- "mov x10, %x[input_ptr]\n"
+ "mov x25, %x[input_ptr]\n"
"6:" // Height 1: input setup done
- "cmp x11, #0x10\n"
+ "cmp x26, #0x10\n"
"blt 11f\n"
- "ldr q0, [x10, #0x0]\n"
- "cmp x11, #0x20\n"
- "ldr q4, [x13, #0x0]\n"
- "ldr q5, [x13, #0x10]\n"
- "ldr q6, [x13, #0x20]\n"
- "ldr q7, [x13, #0x30]\n"
- "ldr q8, [x13, #0x40]\n"
- "ldr q9, [x13, #0x50]\n"
- "ldr q10, [x13, #0x60]\n"
+ "ldr q0, [x25, #0x0]\n"
+ "ldr q4, [x10, #0x0]\n"
+ "cmp x26, #0x20\n"
"blt 9f\n"
"7:" // Height 1: Multiply loop: Main loop head
".inst 0x4f80e090 // sdot v16.4s, v4.16b, v0.4b[0]\n"
- "ldr d4, [x13, #0x70]\n"
- "ldr x9, [x13, #0x78]\n"
+ "ldr d5, [x10, #0x10]\n"
+ "ldr x24, [x10, #0x18]\n"
+ "add x25, x25, #0x10\n"
+ "ldr d6, [x10, #0x20]\n"
+ "ldr x23, [x10, #0x28]\n"
+ "mov v5.d[1], x24\n"
+ "ldr d7, [x10, #0x30]\n"
+ "ldr x19, [x10, #0x38]\n"
".inst 0x4f80e0b1 // sdot v17.4s, v5.16b, v0.4b[0]\n"
- "ldr d5, [x13, #0x80]\n"
+ "mov v6.d[1], x23\n"
+ "ldr d8, [x10, #0x40]\n"
".inst 0x4f80e0d2 // sdot v18.4s, v6.16b, v0.4b[0]\n"
- "ldr d6, [x13, #0x90]\n"
+ "mov v7.d[1], x19\n"
+ "ldr x23, [x10, #0x48]\n"
".inst 0x4f80e0f3 // sdot v19.4s, v7.16b, v0.4b[0]\n"
- "ldr d7, [x13, #0xa0]\n"
- "mov v4.d[1], x9\n"
- "ldr x28, [x13, #0x88]\n"
+ "ldr d9, [x10, #0x50]\n"
+ "ldr x19, [x10, #0x58]\n"
+ "mov v8.d[1], x23\n"
+ "ldr d10, [x10, #0x60]\n"
+ "ldr x23, [x10, #0x68]\n"
".inst 0x4fa0e110 // sdot v16.4s, v8.16b, v0.4b[1]\n"
- "ldr d8, [x13, #0xb0]\n"
+ "mov v9.d[1], x19\n"
+ "ldr d4, [x10, #0x70]\n"
".inst 0x4fa0e131 // sdot v17.4s, v9.16b, v0.4b[1]\n"
- "ldr d9, [x13, #0xc0]\n"
+ "mov v10.d[1], x23\n"
+ "ldr x19, [x10, #0x78]\n"
".inst 0x4fa0e152 // sdot v18.4s, v10.16b, v0.4b[1]\n"
- "ldr d10, [x13, #0xd0]\n"
+ "ldr d5, [x10, #0x80]\n"
+ "ldr x24, [x10, #0x88]\n"
+ "mov v4.d[1], x19\n"
+ "ldr d6, [x10, #0x90]\n"
+ "ldr x23, [x10, #0x98]\n"
".inst 0x4fa0e093 // sdot v19.4s, v4.16b, v0.4b[1]\n"
- "ldr d4, [x13, #0xe0]\n"
- "mov v5.d[1], x28\n"
- "ldr x27, [x13, #0x98]\n"
- "mov v6.d[1], x27\n"
- "ldr x26, [x13, #0xa8]\n"
- "mov v7.d[1], x26\n"
- "ldr x25, [x13, #0xb8]\n"
- "mov v8.d[1], x25\n"
- "ldr x24, [x13, #0xc8]\n"
+ "mov v5.d[1], x24\n"
+ "ldr d7, [x10, #0xa0]\n"
".inst 0x4f80e8b0 // sdot v16.4s, v5.16b, v0.4b[2]\n"
- "ldr d5, [x13, #0xf0]\n"
+ "mov v6.d[1], x23\n"
+ "ldr x19, [x10, #0xa8]\n"
".inst 0x4f80e8d1 // sdot v17.4s, v6.16b, v0.4b[2]\n"
- "ldr x20, [x13, #0xd8]\n"
+ "ldr d8, [x10, #0xb0]\n"
+ "ldr x23, [x10, #0xb8]\n"
+ "mov v7.d[1], x19\n"
+ "ldr d9, [x10, #0xc0]\n"
+ "ldr x19, [x10, #0xc8]\n"
".inst 0x4f80e8f2 // sdot v18.4s, v7.16b, v0.4b[2]\n"
- "ldr x9, [x13, #0xe8]\n"
+ "mov v8.d[1], x23\n"
+ "ldr d10, [x10, #0xd0]\n"
".inst 0x4f80e913 // sdot v19.4s, v8.16b, v0.4b[2]\n"
- "ldr x28, [x13, #0xf8]\n"
- "mov v9.d[1], x24\n"
- "mov v10.d[1], x20\n"
- "add x10, x10, #0x10\n"
- "mov v4.d[1], x9\n"
- "add x13, x13, #0x100\n"
- "mov v5.d[1], x28\n"
+ "mov v9.d[1], x19\n"
+ "ldr x23, [x10, #0xd8]\n"
".inst 0x4fa0e930 // sdot v16.4s, v9.16b, v0.4b[3]\n"
+ "ldr d4, [x10, #0xe0]\n"
+ "ldr x19, [x10, #0xe8]\n"
+ "mov v10.d[1], x23\n"
+ "ldr d5, [x10, #0xf0]\n"
+ "ldr x24, [x10, #0xf8]\n"
+ "add x10, x10, #0x100\n"
".inst 0x4fa0e951 // sdot v17.4s, v10.16b, v0.4b[3]\n"
+ "mov v4.d[1], x19\n"
".inst 0x4fa0e892 // sdot v18.4s, v4.16b, v0.4b[3]\n"
+ "mov v5.d[1], x24\n"
".inst 0x4fa0e8b3 // sdot v19.4s, v5.16b, v0.4b[3]\n"
"tbnz %x[flags], #31, 8f\n"
".inst 0x4e8f940b // sdot v11.4s, v0.16b, v15.16b\n"
"8:" // Height 1: Multiply loop: unique 1: skip row sum
- "ldr q0, [x10, #0x0]\n"
- "sub x11, x11, #0x10\n"
- "ldr q4, [x13, #0x0]\n"
- "cmp x11, #0x20\n"
- "ldr q5, [x13, #0x10]\n"
- "ldr q6, [x13, #0x20]\n"
- "ldr q7, [x13, #0x30]\n"
- "ldr q8, [x13, #0x40]\n"
- "ldr q9, [x13, #0x50]\n"
- "ldr q10, [x13, #0x60]\n"
- "prfm pldl1keep, [x10, #0x80]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "sub x26, x26, #0x10\n"
+ "ldr q0, [x25, #0x0]\n"
+ "cmp x26, #0x20\n"
+ "ldr q4, [x10, #0x0]\n"
"bge 7b\n"
"9:" // Height 1: Multiply loop: Single iteration only
".inst 0x4f80e090 // sdot v16.4s, v4.16b, v0.4b[0]\n"
- "ldr q4, [x13, #0x70]\n"
+ "ldr q5, [x10, #0x10]\n"
+ "ldr q6, [x10, #0x20]\n"
+ "sub x26, x26, #0x10\n"
+ "ldr q7, [x10, #0x30]\n"
+ "add x25, x25, #0x10\n"
".inst 0x4f80e0b1 // sdot v17.4s, v5.16b, v0.4b[0]\n"
- "ldr q5, [x13, #0x80]\n"
+ "ldr q8, [x10, #0x40]\n"
".inst 0x4f80e0d2 // sdot v18.4s, v6.16b, v0.4b[0]\n"
- "ldr q6, [x13, #0x90]\n"
+ "ldr q9, [x10, #0x50]\n"
".inst 0x4f80e0f3 // sdot v19.4s, v7.16b, v0.4b[0]\n"
- "ldr q7, [x13, #0xa0]\n"
+ "ldr q10, [x10, #0x60]\n"
".inst 0x4fa0e110 // sdot v16.4s, v8.16b, v0.4b[1]\n"
- "ldr q8, [x13, #0xb0]\n"
+ "ldr q4, [x10, #0x70]\n"
".inst 0x4fa0e131 // sdot v17.4s, v9.16b, v0.4b[1]\n"
- "ldr q9, [x13, #0xc0]\n"
+ "ldr q5, [x10, #0x80]\n"
".inst 0x4fa0e152 // sdot v18.4s, v10.16b, v0.4b[1]\n"
- "ldr q10, [x13, #0xd0]\n"
+ "ldr q6, [x10, #0x90]\n"
".inst 0x4fa0e093 // sdot v19.4s, v4.16b, v0.4b[1]\n"
- "ldr q4, [x13, #0xe0]\n"
+ "ldr q7, [x10, #0xa0]\n"
".inst 0x4f80e8b0 // sdot v16.4s, v5.16b, v0.4b[2]\n"
- "ldr q5, [x13, #0xf0]\n"
+ "ldr q8, [x10, #0xb0]\n"
".inst 0x4f80e8d1 // sdot v17.4s, v6.16b, v0.4b[2]\n"
- "sub x11, x11, #0x10\n"
+ "ldr q9, [x10, #0xc0]\n"
".inst 0x4f80e8f2 // sdot v18.4s, v7.16b, v0.4b[2]\n"
- "add x10, x10, #0x10\n"
+ "ldr q10, [x10, #0xd0]\n"
".inst 0x4f80e913 // sdot v19.4s, v8.16b, v0.4b[2]\n"
- "add x13, x13, #0x100\n"
+ "ldr q4, [x10, #0xe0]\n"
".inst 0x4fa0e930 // sdot v16.4s, v9.16b, v0.4b[3]\n"
+ "ldr q5, [x10, #0xf0]\n"
".inst 0x4fa0e951 // sdot v17.4s, v10.16b, v0.4b[3]\n"
+ "add x10, x10, #0x100\n"
".inst 0x4fa0e892 // sdot v18.4s, v4.16b, v0.4b[3]\n"
".inst 0x4fa0e8b3 // sdot v19.4s, v5.16b, v0.4b[3]\n"
"tbnz %x[flags], #31, 10f\n"
".inst 0x4e8f940b // sdot v11.4s, v0.16b, v15.16b\n"
"10:" // Height 1: Multiply loop: unique 2: skip row sum
- "prfm pldl1keep, [x10, #0x80]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
"11:" // Height 1: Multiply loop: Main loop skip
- "cbz x11, 18f\n"
- "cmp x11, #0x4\n"
+ "cbz x26, 18f\n"
+ "cmp x26, #0x4\n"
"blt 14f\n"
"12:" // Height 1: Multiply loop: Odd block loop
- "ldr s0, [x10], #0x4\n"
+ "ldr s0, [x25], #0x4\n"
"tbnz %x[flags], #31, 13f\n"
".inst 0x4e8f940b // sdot v11.4s, v0.16b, v15.16b\n"
"13:" // Height 1: Multiply loop: unique 3: skip row sum
- "ldr q6, [x13, #0x0]\n"
- "sub x11, x11, #0x4\n"
- "ldr q7, [x13, #0x10]\n"
- "cmp x11, #0x4\n"
- "ldr q8, [x13, #0x20]\n"
+ "ldr q6, [x10, #0x0]\n"
+ "sub x26, x26, #0x4\n"
+ "ldr q7, [x10, #0x10]\n"
+ "cmp x26, #0x4\n"
+ "ldr q8, [x10, #0x20]\n"
".inst 0x4f80e0d0 // sdot v16.4s, v6.16b, v0.4b[0]\n"
- "ldr q9, [x13, #0x30]\n"
+ "ldr q9, [x10, #0x30]\n"
".inst 0x4f80e0f1 // sdot v17.4s, v7.16b, v0.4b[0]\n"
+ "add x10, x10, #0x40\n"
".inst 0x4f80e112 // sdot v18.4s, v8.16b, v0.4b[0]\n"
- "add x13, x13, #0x40\n"
".inst 0x4f80e133 // sdot v19.4s, v9.16b, v0.4b[0]\n"
"bge 12b\n"
+ "cbz x26, 18f\n"
"14:" // Height 1: Multiply loop: Skip odd blocks
- "cbz x11, 18f\n"
- "tbz x11, #1, 15f\n"
- "ldr h0, [x10], #0x2\n"
- "tbz x11, #0, 16f\n"
- "ld1 { v0.b }[2], [x10]\n"
+ "tbz x26, #1, 15f\n"
+ "ldr h0, [x25], #0x2\n"
+ "tbz x26, #0, 16f\n"
+ "ld1 { v0.b }[2], [x25]\n"
"b 16f\n"
"15:" // Height 1: Multiply loop: Ragged operand read: partial_1_0
- "ldr b0, [x10, #0x0]\n"
+ "ldr b0, [x25, #0x0]\n"
"16:" // Height 1: Multiply loop: Ragged operand read: Done
"tbnz %x[flags], #31, 17f\n"
".inst 0x4e8f940b // sdot v11.4s, v0.16b, v15.16b\n"
"17:" // Height 1: Multiply loop: unique 4: skip row sum
- "ldr q10, [x13, #0x0]\n"
+ "ldr q10, [x10, #0x0]\n"
+ "ldr q4, [x10, #0x10]\n"
+ "ldr q5, [x10, #0x20]\n"
".inst 0x4f80e150 // sdot v16.4s, v10.16b, v0.4b[0]\n"
- "ldr q4, [x13, #0x10]\n"
+ "ldr q6, [x10, #0x30]\n"
".inst 0x4f80e091 // sdot v17.4s, v4.16b, v0.4b[0]\n"
- "ldr q5, [x13, #0x20]\n"
+ "add x10, x10, #0x40\n"
".inst 0x4f80e0b2 // sdot v18.4s, v5.16b, v0.4b[0]\n"
- "ldr q6, [x13, #0x30]\n"
".inst 0x4f80e0d3 // sdot v19.4s, v6.16b, v0.4b[0]\n"
- "add x13, x13, #0x40\n"
"18:" // Height 1: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x12, x12, #0x1\n"
- "cmp x12, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 4b\n"
- "prfm pstl1keep, [x14, #0x0]\n"
+ "prfm pstl1keep, [x28, #0x0]\n"
"tbnz %x[flags], #31, 19f\n"
"addp v11.4s, v11.4s, v11.4s\n"
- "add x23, %x[qp], %[b_offset]\n"
- "ld1r { v1.4s }, [x23]\n"
+ "add x22, %x[qp], %[b_offset]\n"
+ "ld1r { v1.4s }, [x22]\n"
"neg v1.4s, v1.4s\n"
"addp v11.4s, v11.4s, v11.4s\n"
"mul v11.4s, v11.4s, v1.4s\n"
"19:" // Height 1: skip row sum fixup
- "ldr q0, [x16, #0x0]\n"
"add v16.4s, v16.4s, v11.4s\n"
- "ldr q1, [x16, #0x10]\n"
"add v17.4s, v17.4s, v11.4s\n"
- "ldr q2, [x16, #0x20]\n"
"add v18.4s, v18.4s, v11.4s\n"
- "ldr q3, [x16, #0x30]\n"
"add v19.4s, v19.4s, v11.4s\n"
+ "ldr q0, [x9, #0x0]\n"
+ "orr %x[flags], %x[flags], #0x80000000\n"
+ "ldr q1, [x9, #0x10]\n"
+ "add x23, %x[qp], %[per_layer_right_shift]\n"
+ "ldr q2, [x9, #0x20]\n"
+ "add x22, %x[qp], %[per_layer_mul]\n"
"add v16.4s, v16.4s, v0.4s\n"
+ "ldr q3, [x9, #0x30]\n"
"add v17.4s, v17.4s, v1.4s\n"
+ "ld1r { v0.4s }, [x23]\n"
"add v18.4s, v18.4s, v2.4s\n"
+ "ld1r { v4.4s }, [x22]\n"
"add v19.4s, v19.4s, v3.4s\n"
- "add x23, %x[qp], %[per_layer_mul]\n"
- "ld1r { v4.4s }, [x23]\n"
- "orr %x[flags], %x[flags], #0x80000000\n"
- "add x23, %x[qp], %[per_layer_right_shift]\n"
- "ld1r { v0.4s }, [x23]\n"
+ "add x9, x9, #0x40\n"
"sqrdmulh v16.4s, v16.4s, v4.4s\n"
"sqrdmulh v17.4s, v17.4s, v4.4s\n"
"sqrdmulh v18.4s, v18.4s, v4.4s\n"
"sqrdmulh v19.4s, v19.4s, v4.4s\n"
- "add x16, x16, #0x40\n"
"tbz %x[flags], #5, 20f\n"
"and v4.16b, v16.16b, v0.16b\n"
"and v5.16b, v17.16b, v0.16b\n"
@@ -317,90 +329,90 @@ void a64_hybrid_s8qa_dot_4x16_a55 (
"srshl v17.4s, v17.4s, v0.4s\n"
"srshl v18.4s, v18.4s, v0.4s\n"
"srshl v19.4s, v19.4s, v0.4s\n"
- "add x23, %x[qp], %[c_offset]\n"
- "ld1r { v4.4s }, [x23]\n"
+ "add x22, %x[qp], %[c_offset]\n"
+ "add x23, %x[qp], %[minval]\n"
+ "ld1r { v4.4s }, [x22]\n"
+ "add x22, %x[qp], %[maxval]\n"
+ "ld1r { v5.4s }, [x23]\n"
+ "cmp x11, #0x10\n"
+ "ld1r { v6.4s }, [x22]\n"
"add v16.4s, v16.4s, v4.4s\n"
"add v17.4s, v17.4s, v4.4s\n"
"add v18.4s, v18.4s, v4.4s\n"
"add v19.4s, v19.4s, v4.4s\n"
- "add x23, %x[qp], %[maxval]\n"
- "ld1r { v6.4s }, [x23]\n"
"smin v16.4s, v16.4s, v6.4s\n"
"smin v17.4s, v17.4s, v6.4s\n"
"smin v18.4s, v18.4s, v6.4s\n"
"smin v19.4s, v19.4s, v6.4s\n"
- "add x23, %x[qp], %[minval]\n"
- "ld1r { v5.4s }, [x23]\n"
"smax v16.4s, v16.4s, v5.4s\n"
"smax v17.4s, v17.4s, v5.4s\n"
"smax v18.4s, v18.4s, v5.4s\n"
"smax v19.4s, v19.4s, v5.4s\n"
"uzp1 v16.8h, v16.8h, v17.8h\n"
"uzp1 v17.8h, v18.8h, v19.8h\n"
- "cmp x15, #0x10\n"
"uzp1 v16.16b, v16.16b, v17.16b\n"
"bge 29f\n"
- "tbz x15, #3, 24f\n"
- "str d16, [x14], #0x8\n"
- "tbz x15, #2, 22f\n"
- "st1 { v16.s }[2], [x14], #0x4\n"
- "tbz x15, #1, 21f\n"
- "st1 { v16.h }[6], [x14], #0x2\n"
- "tbz x15, #0, 28f\n"
- "st1 { v16.b }[14], [x14]\n"
+ "tbz x11, #3, 24f\n"
+ "str d16, [x28], #0x8\n"
+ "tbz x11, #2, 22f\n"
+ "st1 { v16.s }[2], [x28], #0x4\n"
+ "tbz x11, #1, 21f\n"
+ "st1 { v16.h }[6], [x28], #0x2\n"
+ "tbz x11, #0, 28f\n"
+ "st1 { v16.b }[14], [x28]\n"
"b 28f\n"
"21:" // Height 1: Partial direct writeback: partial_1_12
- "tbz x15, #0, 28f\n"
- "st1 { v16.b }[12], [x14]\n"
+ "tbz x11, #0, 28f\n"
+ "st1 { v16.b }[12], [x28]\n"
"b 28f\n"
"22:" // Height 1: Partial direct writeback: partial_2_8
- "tbz x15, #1, 23f\n"
- "st1 { v16.h }[4], [x14], #0x2\n"
- "tbz x15, #0, 28f\n"
- "st1 { v16.b }[10], [x14]\n"
+ "tbz x11, #1, 23f\n"
+ "st1 { v16.h }[4], [x28], #0x2\n"
+ "tbz x11, #0, 28f\n"
+ "st1 { v16.b }[10], [x28]\n"
"b 28f\n"
"23:" // Height 1: Partial direct writeback: partial_1_8
- "tbz x15, #0, 28f\n"
- "st1 { v16.b }[8], [x14]\n"
+ "tbz x11, #0, 28f\n"
+ "st1 { v16.b }[8], [x28]\n"
"b 28f\n"
"24:" // Height 1: Partial direct writeback: partial_4_0
- "tbz x15, #2, 26f\n"
- "str s16, [x14], #0x4\n"
- "tbz x15, #1, 25f\n"
- "st1 { v16.h }[2], [x14], #0x2\n"
- "tbz x15, #0, 28f\n"
- "st1 { v16.b }[6], [x14]\n"
+ "tbz x11, #2, 26f\n"
+ "str s16, [x28], #0x4\n"
+ "tbz x11, #1, 25f\n"
+ "st1 { v16.h }[2], [x28], #0x2\n"
+ "tbz x11, #0, 28f\n"
+ "st1 { v16.b }[6], [x28]\n"
"b 28f\n"
"25:" // Height 1: Partial direct writeback: partial_1_4
- "tbz x15, #0, 28f\n"
- "st1 { v16.b }[4], [x14]\n"
+ "tbz x11, #0, 28f\n"
+ "st1 { v16.b }[4], [x28]\n"
"b 28f\n"
"26:" // Height 1: Partial direct writeback: partial_2_0
- "tbz x15, #1, 27f\n"
- "str h16, [x14], #0x2\n"
- "tbz x15, #0, 28f\n"
- "st1 { v16.b }[2], [x14]\n"
+ "tbz x11, #1, 27f\n"
+ "str h16, [x28], #0x2\n"
+ "tbz x11, #0, 28f\n"
+ "st1 { v16.b }[2], [x28]\n"
"b 28f\n"
"27:" // Height 1: Partial direct writeback: partial_1_0
- "str b16, [x14, #0x0]\n"
+ "str b16, [x28, #0x0]\n"
"28:" // Height 1: Partial direct writeback: Done
"b 30f\n"
"29:" // Height 1: Full writeback
- "str q16, [x14, #0x0]\n"
- "add x14, x14, #0x10\n"
+ "str q16, [x28, #0x0]\n"
+ "add x28, x28, #0x10\n"
"30:" // Height 1: Writeback done
- "subs x15, x15, #0x10\n"
+ "subs x11, x11, #0x10\n"
"bgt 2b\n"
"b 122f\n"
"31:" // Height 2
- "mov x16, %x[col_bias]\n"
"movi v11.4s, #0x0\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"movi v12.4s, #0x0\n"
- "bic %x[flags], %x[flags], #0x80000000\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"movi v15.16b, #0x1\n"
- "ldr x15, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x13, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x14, %x[output_ptr]\n"
+ "mov x9, %x[col_bias]\n"
+ "bic %x[flags], %x[flags], #0x80000000\n"
+ "mov x28, %x[output_ptr]\n"
"32:" // Height 2: Column loop
"movi v16.4s, #0x0\n"
"movi v17.4s, #0x0\n"
@@ -411,98 +423,110 @@ void a64_hybrid_s8qa_dot_4x16_a55 (
"movi v22.4s, #0x0\n"
"movi v23.4s, #0x0\n"
"33:" // Height 2: setup done
- "mov x12, #0x0\n"
+ "mov x27, #0x0\n"
"34:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w11, [x20, x12, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 35f\n"
- "ldr x21, [%x[input_ptr], x12, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x10, [x21, #0x0]\n"
- "ldr x23, [x21, #0x8]\n"
- "cbnz x12, 36f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x10, x10, x20\n"
- "add x23, x23, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x22, [x20, #0x8]\n"
+ "cbnz x27, 36f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
+ "add x22, x22, x19\n"
"b 36f\n"
"35:" // Height 2: setup direct input
- "mov x10, %x[input_ptr]\n"
- "add x23, x10, x20\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x22, x25, x19\n"
"36:" // Height 2: input setup done
- "cmp x11, #0x10\n"
+ "cmp x26, #0x10\n"
"blt 41f\n"
- "ldr q0, [x10, #0x0]\n"
- "cmp x11, #0x20\n"
- "ldr q1, [x23, #0x0]\n"
- "ldr q4, [x13, #0x0]\n"
- "ldr q5, [x13, #0x10]\n"
- "ldr q6, [x13, #0x20]\n"
- "ldr q7, [x13, #0x30]\n"
- "ldr q8, [x13, #0x40]\n"
- "ldr q9, [x13, #0x50]\n"
- "ldr q10, [x13, #0x60]\n"
+ "ldr q0, [x25, #0x0]\n"
+ "ldr q1, [x22, #0x0]\n"
+ "cmp x26, #0x20\n"
+ "ldr q4, [x10, #0x0]\n"
"blt 39f\n"
"37:" // Height 2: Multiply loop: Main loop head
".inst 0x4f80e090 // sdot v16.4s, v4.16b, v0.4b[0]\n"
- "ldr x9, [x13, #0x78]\n"
+ "ldr d5, [x10, #0x10]\n"
".inst 0x4f81e094 // sdot v20.4s, v4.16b, v1.4b[0]\n"
- "ldr d4, [x13, #0x70]\n"
+ "ldr x24, [x10, #0x18]\n"
+ "ldr d6, [x10, #0x20]\n"
+ "add x25, x25, #0x10\n"
+ "ldr x23, [x10, #0x28]\n"
+ "add x22, x22, #0x10\n"
+ "mov v5.d[1], x24\n"
+ "ldr d7, [x10, #0x30]\n"
+ "ldr x19, [x10, #0x38]\n"
".inst 0x4f80e0b1 // sdot v17.4s, v5.16b, v0.4b[0]\n"
- "mov v4.d[1], x9\n"
+ "mov v6.d[1], x23\n"
".inst 0x4f81e0b5 // sdot v21.4s, v5.16b, v1.4b[0]\n"
- "ldr d5, [x13, #0x80]\n"
+ "ldr d8, [x10, #0x40]\n"
".inst 0x4f80e0d2 // sdot v18.4s, v6.16b, v0.4b[0]\n"
- "ldr x28, [x13, #0x88]\n"
+ "mov v7.d[1], x19\n"
".inst 0x4f81e0d6 // sdot v22.4s, v6.16b, v1.4b[0]\n"
- "ldr d6, [x13, #0x90]\n"
+ "ldr x23, [x10, #0x48]\n"
".inst 0x4f80e0f3 // sdot v19.4s, v7.16b, v0.4b[0]\n"
- "ldr x27, [x13, #0x98]\n"
+ "ldr d9, [x10, #0x50]\n"
".inst 0x4f81e0f7 // sdot v23.4s, v7.16b, v1.4b[0]\n"
- "ldr d7, [x13, #0xa0]\n"
- "ldr x26, [x13, #0xa8]\n"
+ "ldr x19, [x10, #0x58]\n"
+ "mov v8.d[1], x23\n"
+ "ldr d10, [x10, #0x60]\n"
+ "ldr x23, [x10, #0x68]\n"
".inst 0x4fa0e110 // sdot v16.4s, v8.16b, v0.4b[1]\n"
+ "mov v9.d[1], x19\n"
".inst 0x4fa1e114 // sdot v20.4s, v8.16b, v1.4b[1]\n"
- "ldr d8, [x13, #0xb0]\n"
- "ldr x25, [x13, #0xb8]\n"
+ "ldr d4, [x10, #0x70]\n"
".inst 0x4fa0e131 // sdot v17.4s, v9.16b, v0.4b[1]\n"
+ "mov v10.d[1], x23\n"
".inst 0x4fa1e135 // sdot v21.4s, v9.16b, v1.4b[1]\n"
- "ldr d9, [x13, #0xc0]\n"
+ "ldr x19, [x10, #0x78]\n"
".inst 0x4fa0e152 // sdot v18.4s, v10.16b, v0.4b[1]\n"
- "mov v5.d[1], x28\n"
+ "ldr d5, [x10, #0x80]\n"
".inst 0x4fa1e156 // sdot v22.4s, v10.16b, v1.4b[1]\n"
- "ldr d10, [x13, #0xd0]\n"
+ "ldr x24, [x10, #0x88]\n"
+ "mov v4.d[1], x19\n"
+ "ldr d6, [x10, #0x90]\n"
+ "ldr x23, [x10, #0x98]\n"
".inst 0x4fa0e093 // sdot v19.4s, v4.16b, v0.4b[1]\n"
- "mov v6.d[1], x27\n"
+ "mov v5.d[1], x24\n"
".inst 0x4fa1e097 // sdot v23.4s, v4.16b, v1.4b[1]\n"
- "ldr d4, [x13, #0xe0]\n"
- "mov v7.d[1], x26\n"
- "ldr x24, [x13, #0xc8]\n"
- "mov v8.d[1], x25\n"
- "ldr x20, [x13, #0xd8]\n"
- "ldr x9, [x13, #0xe8]\n"
+ "ldr d7, [x10, #0xa0]\n"
".inst 0x4f80e8b0 // sdot v16.4s, v5.16b, v0.4b[2]\n"
+ "mov v6.d[1], x23\n"
".inst 0x4f81e8b4 // sdot v20.4s, v5.16b, v1.4b[2]\n"
- "ldr d5, [x13, #0xf0]\n"
- "ldr x28, [x13, #0xf8]\n"
+ "ldr x19, [x10, #0xa8]\n"
".inst 0x4f80e8d1 // sdot v17.4s, v6.16b, v0.4b[2]\n"
+ "ldr d8, [x10, #0xb0]\n"
".inst 0x4f81e8d5 // sdot v21.4s, v6.16b, v1.4b[2]\n"
- "mov v9.d[1], x24\n"
+ "ldr x23, [x10, #0xb8]\n"
+ "mov v7.d[1], x19\n"
+ "ldr d9, [x10, #0xc0]\n"
+ "ldr x19, [x10, #0xc8]\n"
".inst 0x4f80e8f2 // sdot v18.4s, v7.16b, v0.4b[2]\n"
- "mov v10.d[1], x20\n"
+ "mov v8.d[1], x23\n"
".inst 0x4f81e8f6 // sdot v22.4s, v7.16b, v1.4b[2]\n"
- "mov v4.d[1], x9\n"
+ "ldr d10, [x10, #0xd0]\n"
".inst 0x4f80e913 // sdot v19.4s, v8.16b, v0.4b[2]\n"
- "mov v5.d[1], x28\n"
+ "mov v9.d[1], x19\n"
".inst 0x4f81e917 // sdot v23.4s, v8.16b, v1.4b[2]\n"
- "add x10, x10, #0x10\n"
- "add x23, x23, #0x10\n"
- "add x13, x13, #0x100\n"
+ "ldr x23, [x10, #0xd8]\n"
".inst 0x4fa0e930 // sdot v16.4s, v9.16b, v0.4b[3]\n"
+ "ldr d4, [x10, #0xe0]\n"
".inst 0x4fa1e934 // sdot v20.4s, v9.16b, v1.4b[3]\n"
+ "ldr x19, [x10, #0xe8]\n"
+ "mov v10.d[1], x23\n"
+ "ldr d5, [x10, #0xf0]\n"
+ "ldr x24, [x10, #0xf8]\n"
+ "add x10, x10, #0x100\n"
".inst 0x4fa0e951 // sdot v17.4s, v10.16b, v0.4b[3]\n"
+ "mov v4.d[1], x19\n"
".inst 0x4fa1e955 // sdot v21.4s, v10.16b, v1.4b[3]\n"
".inst 0x4fa0e892 // sdot v18.4s, v4.16b, v0.4b[3]\n"
+ "mov v5.d[1], x24\n"
".inst 0x4fa1e896 // sdot v22.4s, v4.16b, v1.4b[3]\n"
".inst 0x4fa0e8b3 // sdot v19.4s, v5.16b, v0.4b[3]\n"
".inst 0x4fa1e8b7 // sdot v23.4s, v5.16b, v1.4b[3]\n"
@@ -510,53 +534,53 @@ void a64_hybrid_s8qa_dot_4x16_a55 (
".inst 0x4e8f940b // sdot v11.4s, v0.16b, v15.16b\n"
".inst 0x4e8f942c // sdot v12.4s, v1.16b, v15.16b\n"
"38:" // Height 2: Multiply loop: unique 5: skip row sum
- "ldr q0, [x10, #0x0]\n"
- "sub x11, x11, #0x10\n"
- "ldr q1, [x23, #0x0]\n"
- "cmp x11, #0x20\n"
- "ldr q4, [x13, #0x0]\n"
- "ldr q5, [x13, #0x10]\n"
- "ldr q6, [x13, #0x20]\n"
- "ldr q7, [x13, #0x30]\n"
- "ldr q8, [x13, #0x40]\n"
- "ldr q9, [x13, #0x50]\n"
- "ldr q10, [x13, #0x60]\n"
- "prfm pldl1keep, [x10, #0x80]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "sub x26, x26, #0x10\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
+ "cmp x26, #0x20\n"
+ "ldr q0, [x25, #0x0]\n"
+ "ldr q1, [x22, #0x0]\n"
+ "ldr q4, [x10, #0x0]\n"
"bge 37b\n"
"39:" // Height 2: Multiply loop: Single iteration only
".inst 0x4f80e090 // sdot v16.4s, v4.16b, v0.4b[0]\n"
- "sub x11, x11, #0x10\n"
+ "ldr q5, [x10, #0x10]\n"
".inst 0x4f81e094 // sdot v20.4s, v4.16b, v1.4b[0]\n"
- "ldr q4, [x13, #0x70]\n"
+ "ldr q6, [x10, #0x20]\n"
+ "ldr q7, [x10, #0x30]\n"
+ "sub x26, x26, #0x10\n"
".inst 0x4f80e0b1 // sdot v17.4s, v5.16b, v0.4b[0]\n"
- "add x10, x10, #0x10\n"
+ "ldr q8, [x10, #0x40]\n"
".inst 0x4f81e0b5 // sdot v21.4s, v5.16b, v1.4b[0]\n"
- "ldr q5, [x13, #0x80]\n"
+ "ldr q9, [x10, #0x50]\n"
".inst 0x4f80e0d2 // sdot v18.4s, v6.16b, v0.4b[0]\n"
- "add x23, x23, #0x10\n"
+ "ldr q10, [x10, #0x60]\n"
".inst 0x4f81e0d6 // sdot v22.4s, v6.16b, v1.4b[0]\n"
- "ldr q6, [x13, #0x90]\n"
+ "ldr q4, [x10, #0x70]\n"
".inst 0x4f80e0f3 // sdot v19.4s, v7.16b, v0.4b[0]\n"
+ "ldr q5, [x10, #0x80]\n"
".inst 0x4f81e0f7 // sdot v23.4s, v7.16b, v1.4b[0]\n"
- "ldr q7, [x13, #0xa0]\n"
+ "ldr q6, [x10, #0x90]\n"
".inst 0x4fa0e110 // sdot v16.4s, v8.16b, v0.4b[1]\n"
+ "ldr q7, [x10, #0xa0]\n"
".inst 0x4fa1e114 // sdot v20.4s, v8.16b, v1.4b[1]\n"
- "ldr q8, [x13, #0xb0]\n"
+ "ldr q8, [x10, #0xb0]\n"
".inst 0x4fa0e131 // sdot v17.4s, v9.16b, v0.4b[1]\n"
+ "add x25, x25, #0x10\n"
".inst 0x4fa1e135 // sdot v21.4s, v9.16b, v1.4b[1]\n"
- "ldr q9, [x13, #0xc0]\n"
+ "ldr q9, [x10, #0xc0]\n"
".inst 0x4fa0e152 // sdot v18.4s, v10.16b, v0.4b[1]\n"
+ "add x22, x22, #0x10\n"
".inst 0x4fa1e156 // sdot v22.4s, v10.16b, v1.4b[1]\n"
- "ldr q10, [x13, #0xd0]\n"
+ "ldr q10, [x10, #0xd0]\n"
".inst 0x4fa0e093 // sdot v19.4s, v4.16b, v0.4b[1]\n"
".inst 0x4fa1e097 // sdot v23.4s, v4.16b, v1.4b[1]\n"
- "ldr q4, [x13, #0xe0]\n"
+ "ldr q4, [x10, #0xe0]\n"
".inst 0x4f80e8b0 // sdot v16.4s, v5.16b, v0.4b[2]\n"
".inst 0x4f81e8b4 // sdot v20.4s, v5.16b, v1.4b[2]\n"
- "ldr q5, [x13, #0xf0]\n"
+ "ldr q5, [x10, #0xf0]\n"
".inst 0x4f80e8d1 // sdot v17.4s, v6.16b, v0.4b[2]\n"
- "add x13, x13, #0x100\n"
+ "add x10, x10, #0x100\n"
".inst 0x4f81e8d5 // sdot v21.4s, v6.16b, v1.4b[2]\n"
".inst 0x4f80e8f2 // sdot v18.4s, v7.16b, v0.4b[2]\n"
".inst 0x4f81e8f6 // sdot v22.4s, v7.16b, v1.4b[2]\n"
@@ -574,143 +598,143 @@ void a64_hybrid_s8qa_dot_4x16_a55 (
".inst 0x4e8f940b // sdot v11.4s, v0.16b, v15.16b\n"
".inst 0x4e8f942c // sdot v12.4s, v1.16b, v15.16b\n"
"40:" // Height 2: Multiply loop: unique 6: skip row sum
- "prfm pldl1keep, [x10, #0x80]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
"41:" // Height 2: Multiply loop: Main loop skip
- "cbz x11, 48f\n"
- "cmp x11, #0x4\n"
+ "cbz x26, 48f\n"
+ "cmp x26, #0x4\n"
"blt 44f\n"
"42:" // Height 2: Multiply loop: Odd block loop
- "ldr s0, [x10], #0x4\n"
- "ldr s1, [x23], #0x4\n"
+ "ldr s0, [x25], #0x4\n"
+ "ldr s1, [x22], #0x4\n"
"tbnz %x[flags], #31, 43f\n"
".inst 0x4e8f940b // sdot v11.4s, v0.16b, v15.16b\n"
".inst 0x4e8f942c // sdot v12.4s, v1.16b, v15.16b\n"
"43:" // Height 2: Multiply loop: unique 7: skip row sum
- "ldr q6, [x13, #0x0]\n"
- "sub x11, x11, #0x4\n"
- "ldr q7, [x13, #0x10]\n"
- "cmp x11, #0x4\n"
- "ldr q8, [x13, #0x20]\n"
+ "ldr q6, [x10, #0x0]\n"
+ "sub x26, x26, #0x4\n"
+ "ldr q7, [x10, #0x10]\n"
+ "cmp x26, #0x4\n"
+ "ldr q8, [x10, #0x20]\n"
".inst 0x4f80e0d0 // sdot v16.4s, v6.16b, v0.4b[0]\n"
- "ldr q9, [x13, #0x30]\n"
+ "ldr q9, [x10, #0x30]\n"
".inst 0x4f81e0d4 // sdot v20.4s, v6.16b, v1.4b[0]\n"
+ "add x10, x10, #0x40\n"
".inst 0x4f80e0f1 // sdot v17.4s, v7.16b, v0.4b[0]\n"
- "add x13, x13, #0x40\n"
".inst 0x4f81e0f5 // sdot v21.4s, v7.16b, v1.4b[0]\n"
".inst 0x4f80e112 // sdot v18.4s, v8.16b, v0.4b[0]\n"
".inst 0x4f81e116 // sdot v22.4s, v8.16b, v1.4b[0]\n"
".inst 0x4f80e133 // sdot v19.4s, v9.16b, v0.4b[0]\n"
".inst 0x4f81e137 // sdot v23.4s, v9.16b, v1.4b[0]\n"
"bge 42b\n"
+ "cbz x26, 48f\n"
"44:" // Height 2: Multiply loop: Skip odd blocks
- "cbz x11, 48f\n"
- "tbz x11, #1, 45f\n"
- "ldr h0, [x10], #0x2\n"
- "ldr h1, [x23], #0x2\n"
- "tbz x11, #0, 46f\n"
- "ld1 { v0.b }[2], [x10]\n"
- "ld1 { v1.b }[2], [x23]\n"
+ "tbz x26, #1, 45f\n"
+ "ldr h0, [x25], #0x2\n"
+ "ldr h1, [x22], #0x2\n"
+ "tbz x26, #0, 46f\n"
+ "ld1 { v0.b }[2], [x25]\n"
+ "ld1 { v1.b }[2], [x22]\n"
"b 46f\n"
"45:" // Height 2: Multiply loop: Ragged operand read: partial_1_0
- "ldr b0, [x10, #0x0]\n"
- "ldr b1, [x23, #0x0]\n"
+ "ldr b0, [x25, #0x0]\n"
+ "ldr b1, [x22, #0x0]\n"
"46:" // Height 2: Multiply loop: Ragged operand read: Done
"tbnz %x[flags], #31, 47f\n"
".inst 0x4e8f940b // sdot v11.4s, v0.16b, v15.16b\n"
".inst 0x4e8f942c // sdot v12.4s, v1.16b, v15.16b\n"
"47:" // Height 2: Multiply loop: unique 8: skip row sum
- "ldr q10, [x13, #0x0]\n"
+ "ldr q10, [x10, #0x0]\n"
+ "ldr q4, [x10, #0x10]\n"
+ "ldr q5, [x10, #0x20]\n"
".inst 0x4f80e150 // sdot v16.4s, v10.16b, v0.4b[0]\n"
- "ldr q4, [x13, #0x10]\n"
+ "ldr q6, [x10, #0x30]\n"
".inst 0x4f81e154 // sdot v20.4s, v10.16b, v1.4b[0]\n"
- "ldr q5, [x13, #0x20]\n"
+ "add x10, x10, #0x40\n"
".inst 0x4f80e091 // sdot v17.4s, v4.16b, v0.4b[0]\n"
- "ldr q6, [x13, #0x30]\n"
".inst 0x4f81e095 // sdot v21.4s, v4.16b, v1.4b[0]\n"
".inst 0x4f80e0b2 // sdot v18.4s, v5.16b, v0.4b[0]\n"
- "add x13, x13, #0x40\n"
".inst 0x4f81e0b6 // sdot v22.4s, v5.16b, v1.4b[0]\n"
".inst 0x4f80e0d3 // sdot v19.4s, v6.16b, v0.4b[0]\n"
".inst 0x4f81e0d7 // sdot v23.4s, v6.16b, v1.4b[0]\n"
"48:" // Height 2: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x12, x12, #0x1\n"
- "cmp x12, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 34b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x22, x14, x20\n"
- "prfm pstl1keep, [x14, #0x0]\n"
- "prfm pstl1keep, [x22, #0x0]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "prfm pstl1keep, [x28, #0x0]\n"
+ "add x21, x28, x19\n"
+ "prfm pstl1keep, [x21, #0x0]\n"
"tbnz %x[flags], #31, 49f\n"
"addp v11.4s, v11.4s, v11.4s\n"
"addp v12.4s, v12.4s, v12.4s\n"
- "add x23, %x[qp], %[b_offset]\n"
- "ld1r { v2.4s }, [x23]\n"
+ "add x22, %x[qp], %[b_offset]\n"
+ "ld1r { v2.4s }, [x22]\n"
"neg v2.4s, v2.4s\n"
"addp v11.4s, v11.4s, v11.4s\n"
"addp v12.4s, v12.4s, v12.4s\n"
"mul v11.4s, v11.4s, v2.4s\n"
"mul v12.4s, v12.4s, v2.4s\n"
"49:" // Height 2: skip row sum fixup
- "ldr q0, [x16, #0x0]\n"
"add v16.4s, v16.4s, v11.4s\n"
- "ldr q1, [x16, #0x10]\n"
"add v17.4s, v17.4s, v11.4s\n"
- "ldr q2, [x16, #0x20]\n"
"add v18.4s, v18.4s, v11.4s\n"
- "ldr q3, [x16, #0x30]\n"
"add v19.4s, v19.4s, v11.4s\n"
"add v20.4s, v20.4s, v12.4s\n"
"add v21.4s, v21.4s, v12.4s\n"
"add v22.4s, v22.4s, v12.4s\n"
"add v23.4s, v23.4s, v12.4s\n"
+ "ldr q0, [x9, #0x0]\n"
+ "orr %x[flags], %x[flags], #0x80000000\n"
+ "ldr q1, [x9, #0x10]\n"
+ "add x23, %x[qp], %[per_layer_right_shift]\n"
+ "ldr q2, [x9, #0x20]\n"
+ "add x22, %x[qp], %[per_layer_mul]\n"
"add v16.4s, v16.4s, v0.4s\n"
- "add v17.4s, v17.4s, v1.4s\n"
- "add v18.4s, v18.4s, v2.4s\n"
- "add v19.4s, v19.4s, v3.4s\n"
"add v20.4s, v20.4s, v0.4s\n"
+ "add v17.4s, v17.4s, v1.4s\n"
"add v21.4s, v21.4s, v1.4s\n"
+ "add v18.4s, v18.4s, v2.4s\n"
"add v22.4s, v22.4s, v2.4s\n"
- "add v23.4s, v23.4s, v3.4s\n"
- "add x23, %x[qp], %[per_layer_mul]\n"
- "ld1r { v4.4s }, [x23]\n"
- "orr %x[flags], %x[flags], #0x80000000\n"
- "add x23, %x[qp], %[per_layer_right_shift]\n"
+ "ldr q3, [x9, #0x30]\n"
+ "add x9, x9, #0x40\n"
"ld1r { v0.4s }, [x23]\n"
+ "ld1r { v4.4s }, [x22]\n"
+ "add v19.4s, v19.4s, v3.4s\n"
+ "add v23.4s, v23.4s, v3.4s\n"
"sqrdmulh v16.4s, v16.4s, v4.4s\n"
"sqrdmulh v17.4s, v17.4s, v4.4s\n"
"sqrdmulh v18.4s, v18.4s, v4.4s\n"
- "sqrdmulh v19.4s, v19.4s, v4.4s\n"
"sqrdmulh v20.4s, v20.4s, v4.4s\n"
"sqrdmulh v21.4s, v21.4s, v4.4s\n"
"sqrdmulh v22.4s, v22.4s, v4.4s\n"
+ "sqrdmulh v19.4s, v19.4s, v4.4s\n"
"sqrdmulh v23.4s, v23.4s, v4.4s\n"
- "add x16, x16, #0x40\n"
"tbz %x[flags], #5, 50f\n"
"and v4.16b, v16.16b, v0.16b\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
- "sqadd v16.4s, v16.4s, v4.4s\n"
"and v5.16b, v17.16b, v0.16b\n"
"and v6.16b, v18.16b, v0.16b\n"
- "and v7.16b, v19.16b, v0.16b\n"
"and v8.16b, v20.16b, v0.16b\n"
"and v9.16b, v21.16b, v0.16b\n"
"and v10.16b, v22.16b, v0.16b\n"
- "and v4.16b, v23.16b, v0.16b\n"
+ "and v7.16b, v19.16b, v0.16b\n"
+ "sshr v4.4s, v4.4s, #0x1f\n"
"sshr v5.4s, v5.4s, #0x1f\n"
"sshr v6.4s, v6.4s, #0x1f\n"
- "sshr v7.4s, v7.4s, #0x1f\n"
"sshr v8.4s, v8.4s, #0x1f\n"
"sshr v9.4s, v9.4s, #0x1f\n"
"sshr v10.4s, v10.4s, #0x1f\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
+ "sshr v7.4s, v7.4s, #0x1f\n"
+ "sqadd v16.4s, v16.4s, v4.4s\n"
"sqadd v17.4s, v17.4s, v5.4s\n"
"sqadd v18.4s, v18.4s, v6.4s\n"
- "sqadd v19.4s, v19.4s, v7.4s\n"
"sqadd v20.4s, v20.4s, v8.4s\n"
"sqadd v21.4s, v21.4s, v9.4s\n"
"sqadd v22.4s, v22.4s, v10.4s\n"
+ "sqadd v19.4s, v19.4s, v7.4s\n"
+ "and v4.16b, v23.16b, v0.16b\n"
+ "sshr v4.4s, v4.4s, #0x1f\n"
"sqadd v23.4s, v23.4s, v4.4s\n"
"50:" // Height 2: no shift correction
"srshl v16.4s, v16.4s, v0.4s\n"
@@ -721,8 +745,13 @@ void a64_hybrid_s8qa_dot_4x16_a55 (
"srshl v21.4s, v21.4s, v0.4s\n"
"srshl v22.4s, v22.4s, v0.4s\n"
"srshl v23.4s, v23.4s, v0.4s\n"
- "add x23, %x[qp], %[c_offset]\n"
- "ld1r { v4.4s }, [x23]\n"
+ "add x22, %x[qp], %[c_offset]\n"
+ "add x23, %x[qp], %[minval]\n"
+ "ld1r { v4.4s }, [x22]\n"
+ "add x22, %x[qp], %[maxval]\n"
+ "ld1r { v5.4s }, [x23]\n"
+ "cmp x11, #0x10\n"
+ "ld1r { v6.4s }, [x22]\n"
"add v16.4s, v16.4s, v4.4s\n"
"add v17.4s, v17.4s, v4.4s\n"
"add v18.4s, v18.4s, v4.4s\n"
@@ -731,8 +760,6 @@ void a64_hybrid_s8qa_dot_4x16_a55 (
"add v21.4s, v21.4s, v4.4s\n"
"add v22.4s, v22.4s, v4.4s\n"
"add v23.4s, v23.4s, v4.4s\n"
- "add x23, %x[qp], %[maxval]\n"
- "ld1r { v6.4s }, [x23]\n"
"smin v16.4s, v16.4s, v6.4s\n"
"smin v17.4s, v17.4s, v6.4s\n"
"smin v18.4s, v18.4s, v6.4s\n"
@@ -741,8 +768,6 @@ void a64_hybrid_s8qa_dot_4x16_a55 (
"smin v21.4s, v21.4s, v6.4s\n"
"smin v22.4s, v22.4s, v6.4s\n"
"smin v23.4s, v23.4s, v6.4s\n"
- "add x23, %x[qp], %[minval]\n"
- "ld1r { v5.4s }, [x23]\n"
"smax v16.4s, v16.4s, v5.4s\n"
"smax v17.4s, v17.4s, v5.4s\n"
"smax v18.4s, v18.4s, v5.4s\n"
@@ -755,88 +780,87 @@ void a64_hybrid_s8qa_dot_4x16_a55 (
"uzp1 v17.8h, v18.8h, v19.8h\n"
"uzp1 v20.8h, v20.8h, v21.8h\n"
"uzp1 v21.8h, v22.8h, v23.8h\n"
- "cmp x15, #0x10\n"
"uzp1 v16.16b, v16.16b, v17.16b\n"
"uzp1 v20.16b, v20.16b, v21.16b\n"
"bge 59f\n"
- "tbz x15, #3, 54f\n"
- "str d16, [x14], #0x8\n"
- "str d20, [x22], #0x8\n"
- "tbz x15, #2, 52f\n"
- "st1 { v16.s }[2], [x14], #0x4\n"
- "st1 { v20.s }[2], [x22], #0x4\n"
- "tbz x15, #1, 51f\n"
- "st1 { v16.h }[6], [x14], #0x2\n"
- "st1 { v20.h }[6], [x22], #0x2\n"
- "tbz x15, #0, 58f\n"
- "st1 { v16.b }[14], [x14]\n"
- "st1 { v20.b }[14], [x22]\n"
+ "tbz x11, #3, 54f\n"
+ "str d16, [x28], #0x8\n"
+ "str d20, [x21], #0x8\n"
+ "tbz x11, #2, 52f\n"
+ "st1 { v16.s }[2], [x28], #0x4\n"
+ "st1 { v20.s }[2], [x21], #0x4\n"
+ "tbz x11, #1, 51f\n"
+ "st1 { v16.h }[6], [x28], #0x2\n"
+ "st1 { v20.h }[6], [x21], #0x2\n"
+ "tbz x11, #0, 58f\n"
+ "st1 { v16.b }[14], [x28]\n"
+ "st1 { v20.b }[14], [x21]\n"
"b 58f\n"
"51:" // Height 2: Partial direct writeback: partial_1_12
- "tbz x15, #0, 58f\n"
- "st1 { v16.b }[12], [x14]\n"
- "st1 { v20.b }[12], [x22]\n"
+ "tbz x11, #0, 58f\n"
+ "st1 { v16.b }[12], [x28]\n"
+ "st1 { v20.b }[12], [x21]\n"
"b 58f\n"
"52:" // Height 2: Partial direct writeback: partial_2_8
- "tbz x15, #1, 53f\n"
- "st1 { v16.h }[4], [x14], #0x2\n"
- "st1 { v20.h }[4], [x22], #0x2\n"
- "tbz x15, #0, 58f\n"
- "st1 { v16.b }[10], [x14]\n"
- "st1 { v20.b }[10], [x22]\n"
+ "tbz x11, #1, 53f\n"
+ "st1 { v16.h }[4], [x28], #0x2\n"
+ "st1 { v20.h }[4], [x21], #0x2\n"
+ "tbz x11, #0, 58f\n"
+ "st1 { v16.b }[10], [x28]\n"
+ "st1 { v20.b }[10], [x21]\n"
"b 58f\n"
"53:" // Height 2: Partial direct writeback: partial_1_8
- "tbz x15, #0, 58f\n"
- "st1 { v16.b }[8], [x14]\n"
- "st1 { v20.b }[8], [x22]\n"
+ "tbz x11, #0, 58f\n"
+ "st1 { v16.b }[8], [x28]\n"
+ "st1 { v20.b }[8], [x21]\n"
"b 58f\n"
"54:" // Height 2: Partial direct writeback: partial_4_0
- "tbz x15, #2, 56f\n"
- "str s16, [x14], #0x4\n"
- "str s20, [x22], #0x4\n"
- "tbz x15, #1, 55f\n"
- "st1 { v16.h }[2], [x14], #0x2\n"
- "st1 { v20.h }[2], [x22], #0x2\n"
- "tbz x15, #0, 58f\n"
- "st1 { v16.b }[6], [x14]\n"
- "st1 { v20.b }[6], [x22]\n"
+ "tbz x11, #2, 56f\n"
+ "str s16, [x28], #0x4\n"
+ "str s20, [x21], #0x4\n"
+ "tbz x11, #1, 55f\n"
+ "st1 { v16.h }[2], [x28], #0x2\n"
+ "st1 { v20.h }[2], [x21], #0x2\n"
+ "tbz x11, #0, 58f\n"
+ "st1 { v16.b }[6], [x28]\n"
+ "st1 { v20.b }[6], [x21]\n"
"b 58f\n"
"55:" // Height 2: Partial direct writeback: partial_1_4
- "tbz x15, #0, 58f\n"
- "st1 { v16.b }[4], [x14]\n"
- "st1 { v20.b }[4], [x22]\n"
+ "tbz x11, #0, 58f\n"
+ "st1 { v16.b }[4], [x28]\n"
+ "st1 { v20.b }[4], [x21]\n"
"b 58f\n"
"56:" // Height 2: Partial direct writeback: partial_2_0
- "tbz x15, #1, 57f\n"
- "str h16, [x14], #0x2\n"
- "str h20, [x22], #0x2\n"
- "tbz x15, #0, 58f\n"
- "st1 { v16.b }[2], [x14]\n"
- "st1 { v20.b }[2], [x22]\n"
+ "tbz x11, #1, 57f\n"
+ "str h16, [x28], #0x2\n"
+ "str h20, [x21], #0x2\n"
+ "tbz x11, #0, 58f\n"
+ "st1 { v16.b }[2], [x28]\n"
+ "st1 { v20.b }[2], [x21]\n"
"b 58f\n"
"57:" // Height 2: Partial direct writeback: partial_1_0
- "str b16, [x14, #0x0]\n"
- "str b20, [x22, #0x0]\n"
+ "str b16, [x28, #0x0]\n"
+ "str b20, [x21, #0x0]\n"
"58:" // Height 2: Partial direct writeback: Done
"b 60f\n"
"59:" // Height 2: Full writeback
- "str q16, [x14, #0x0]\n"
- "add x14, x14, #0x10\n"
- "str q20, [x22, #0x0]\n"
+ "str q16, [x28, #0x0]\n"
+ "add x28, x28, #0x10\n"
+ "str q20, [x21, #0x0]\n"
"60:" // Height 2: Writeback done
- "subs x15, x15, #0x10\n"
+ "subs x11, x11, #0x10\n"
"bgt 32b\n"
"b 122f\n"
"61:" // Height 3
- "mov x16, %x[col_bias]\n"
"movi v11.4s, #0x0\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"movi v12.4s, #0x0\n"
- "bic %x[flags], %x[flags], #0x80000000\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"movi v13.4s, #0x0\n"
- "ldr x15, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x9, %x[col_bias]\n"
"movi v15.16b, #0x1\n"
- "ldr x13, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x14, %x[output_ptr]\n"
+ "bic %x[flags], %x[flags], #0x80000000\n"
+ "mov x28, %x[output_ptr]\n"
"62:" // Height 3: Column loop
"movi v16.4s, #0x0\n"
"movi v17.4s, #0x0\n"
@@ -851,117 +875,129 @@ void a64_hybrid_s8qa_dot_4x16_a55 (
"movi v26.4s, #0x0\n"
"movi v27.4s, #0x0\n"
"63:" // Height 3: setup done
- "mov x12, #0x0\n"
+ "mov x27, #0x0\n"
"64:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w11, [x20, x12, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 65f\n"
- "ldr x21, [%x[input_ptr], x12, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x10, [x21, #0x0]\n"
- "ldr x23, [x21, #0x8]\n"
- "ldr x22, [x21, #0x10]\n"
- "cbnz x12, 66f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x10, x10, x20\n"
- "add x23, x23, x20\n"
- "add x22, x22, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x22, [x20, #0x8]\n"
+ "ldr x21, [x20, #0x10]\n"
+ "cbnz x27, 66f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
+ "add x22, x22, x19\n"
+ "add x21, x21, x19\n"
"b 66f\n"
"65:" // Height 3: setup direct input
- "mov x10, %x[input_ptr]\n"
- "add x23, x10, x20\n"
- "add x22, x23, x20\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x22, x25, x19\n"
+ "add x21, x22, x19\n"
"66:" // Height 3: input setup done
- "cmp x11, #0x10\n"
+ "cmp x26, #0x10\n"
"blt 71f\n"
- "ldr q0, [x10, #0x0]\n"
- "cmp x11, #0x20\n"
- "ldr q1, [x23, #0x0]\n"
- "ldr q2, [x22, #0x0]\n"
- "ldr q4, [x13, #0x0]\n"
- "ldr q5, [x13, #0x10]\n"
- "ldr q6, [x13, #0x20]\n"
- "ldr q7, [x13, #0x30]\n"
- "ldr q8, [x13, #0x40]\n"
- "ldr q9, [x13, #0x50]\n"
- "ldr q10, [x13, #0x60]\n"
+ "ldr q0, [x25, #0x0]\n"
+ "ldr q1, [x22, #0x0]\n"
+ "cmp x26, #0x20\n"
+ "ldr q2, [x21, #0x0]\n"
+ "ldr q4, [x10, #0x0]\n"
"blt 69f\n"
"67:" // Height 3: Multiply loop: Main loop head
".inst 0x4f80e090 // sdot v16.4s, v4.16b, v0.4b[0]\n"
- "ldr x9, [x13, #0x78]\n"
+ "ldr d5, [x10, #0x10]\n"
".inst 0x4f81e094 // sdot v20.4s, v4.16b, v1.4b[0]\n"
- "ldr x28, [x13, #0x88]\n"
+ "ldr x24, [x10, #0x18]\n"
".inst 0x4f82e098 // sdot v24.4s, v4.16b, v2.4b[0]\n"
- "ldr d4, [x13, #0x70]\n"
+ "ldr d6, [x10, #0x20]\n"
+ "ldr x23, [x10, #0x28]\n"
+ "add x25, x25, #0x10\n"
+ "mov v5.d[1], x24\n"
+ "ldr d7, [x10, #0x30]\n"
+ "ldr x19, [x10, #0x38]\n"
+ "add x22, x22, #0x10\n"
".inst 0x4f80e0b1 // sdot v17.4s, v5.16b, v0.4b[0]\n"
- "mov v4.d[1], x9\n"
+ "mov v6.d[1], x23\n"
".inst 0x4f81e0b5 // sdot v21.4s, v5.16b, v1.4b[0]\n"
- "ldr x27, [x13, #0x98]\n"
+ "ldr d8, [x10, #0x40]\n"
".inst 0x4f82e0b9 // sdot v25.4s, v5.16b, v2.4b[0]\n"
- "ldr d5, [x13, #0x80]\n"
+ "mov v7.d[1], x19\n"
".inst 0x4f80e0d2 // sdot v18.4s, v6.16b, v0.4b[0]\n"
- "ldr x26, [x13, #0xa8]\n"
+ "ldr x23, [x10, #0x48]\n"
".inst 0x4f81e0d6 // sdot v22.4s, v6.16b, v1.4b[0]\n"
- "ldr x25, [x13, #0xb8]\n"
+ "ldr d9, [x10, #0x50]\n"
".inst 0x4f82e0da // sdot v26.4s, v6.16b, v2.4b[0]\n"
- "ldr d6, [x13, #0x90]\n"
+ "ldr x19, [x10, #0x58]\n"
".inst 0x4f80e0f3 // sdot v19.4s, v7.16b, v0.4b[0]\n"
- "mov v5.d[1], x28\n"
+ "mov v8.d[1], x23\n"
".inst 0x4f81e0f7 // sdot v23.4s, v7.16b, v1.4b[0]\n"
- "mov v6.d[1], x27\n"
+ "ldr d10, [x10, #0x60]\n"
".inst 0x4f82e0fb // sdot v27.4s, v7.16b, v2.4b[0]\n"
- "ldr d7, [x13, #0xa0]\n"
+ "mov v9.d[1], x19\n"
".inst 0x4fa0e110 // sdot v16.4s, v8.16b, v0.4b[1]\n"
- "mov v7.d[1], x26\n"
+ "ldr x23, [x10, #0x68]\n"
".inst 0x4fa1e114 // sdot v20.4s, v8.16b, v1.4b[1]\n"
- "ldr x24, [x13, #0xc8]\n"
+ "ldr d4, [x10, #0x70]\n"
".inst 0x4fa2e118 // sdot v24.4s, v8.16b, v2.4b[1]\n"
- "ldr d8, [x13, #0xb0]\n"
+ "ldr x19, [x10, #0x78]\n"
".inst 0x4fa0e131 // sdot v17.4s, v9.16b, v0.4b[1]\n"
- "mov v8.d[1], x25\n"
+ "mov v10.d[1], x23\n"
".inst 0x4fa1e135 // sdot v21.4s, v9.16b, v1.4b[1]\n"
- "ldr x20, [x13, #0xd8]\n"
+ "ldr d5, [x10, #0x80]\n"
".inst 0x4fa2e139 // sdot v25.4s, v9.16b, v2.4b[1]\n"
- "ldr d9, [x13, #0xc0]\n"
+ "mov v4.d[1], x19\n"
".inst 0x4fa0e152 // sdot v18.4s, v10.16b, v0.4b[1]\n"
- "ldr x9, [x13, #0xe8]\n"
+ "ldr x24, [x10, #0x88]\n"
".inst 0x4fa1e156 // sdot v22.4s, v10.16b, v1.4b[1]\n"
- "ldr x28, [x13, #0xf8]\n"
+ "ldr d6, [x10, #0x90]\n"
".inst 0x4fa2e15a // sdot v26.4s, v10.16b, v2.4b[1]\n"
- "ldr d10, [x13, #0xd0]\n"
+ "ldr x23, [x10, #0x98]\n"
".inst 0x4fa0e093 // sdot v19.4s, v4.16b, v0.4b[1]\n"
- "mov v9.d[1], x24\n"
+ "mov v5.d[1], x24\n"
".inst 0x4fa1e097 // sdot v23.4s, v4.16b, v1.4b[1]\n"
- "mov v10.d[1], x20\n"
+ "ldr d7, [x10, #0xa0]\n"
".inst 0x4fa2e09b // sdot v27.4s, v4.16b, v2.4b[1]\n"
- "ldr d4, [x13, #0xe0]\n"
+ "mov v6.d[1], x23\n"
".inst 0x4f80e8b0 // sdot v16.4s, v5.16b, v0.4b[2]\n"
- "mov v4.d[1], x9\n"
+ "ldr x19, [x10, #0xa8]\n"
".inst 0x4f81e8b4 // sdot v20.4s, v5.16b, v1.4b[2]\n"
- "add x10, x10, #0x10\n"
+ "ldr d8, [x10, #0xb0]\n"
".inst 0x4f82e8b8 // sdot v24.4s, v5.16b, v2.4b[2]\n"
- "ldr d5, [x13, #0xf0]\n"
+ "ldr x23, [x10, #0xb8]\n"
".inst 0x4f80e8d1 // sdot v17.4s, v6.16b, v0.4b[2]\n"
- "mov v5.d[1], x28\n"
+ "mov v7.d[1], x19\n"
".inst 0x4f81e8d5 // sdot v21.4s, v6.16b, v1.4b[2]\n"
- "add x23, x23, #0x10\n"
+ "ldr d9, [x10, #0xc0]\n"
".inst 0x4f82e8d9 // sdot v25.4s, v6.16b, v2.4b[2]\n"
- "add x22, x22, #0x10\n"
+ "mov v8.d[1], x23\n"
".inst 0x4f80e8f2 // sdot v18.4s, v7.16b, v0.4b[2]\n"
- "add x13, x13, #0x100\n"
+ "ldr x19, [x10, #0xc8]\n"
".inst 0x4f81e8f6 // sdot v22.4s, v7.16b, v1.4b[2]\n"
+ "ldr d10, [x10, #0xd0]\n"
".inst 0x4f82e8fa // sdot v26.4s, v7.16b, v2.4b[2]\n"
+ "ldr x23, [x10, #0xd8]\n"
".inst 0x4f80e913 // sdot v19.4s, v8.16b, v0.4b[2]\n"
+ "mov v9.d[1], x19\n"
".inst 0x4f81e917 // sdot v23.4s, v8.16b, v1.4b[2]\n"
+ "ldr d4, [x10, #0xe0]\n"
".inst 0x4f82e91b // sdot v27.4s, v8.16b, v2.4b[2]\n"
+ "mov v10.d[1], x23\n"
".inst 0x4fa0e930 // sdot v16.4s, v9.16b, v0.4b[3]\n"
+ "ldr x19, [x10, #0xe8]\n"
".inst 0x4fa1e934 // sdot v20.4s, v9.16b, v1.4b[3]\n"
+ "ldr d5, [x10, #0xf0]\n"
".inst 0x4fa2e938 // sdot v24.4s, v9.16b, v2.4b[3]\n"
+ "ldr x24, [x10, #0xf8]\n"
".inst 0x4fa0e951 // sdot v17.4s, v10.16b, v0.4b[3]\n"
+ "mov v4.d[1], x19\n"
".inst 0x4fa1e955 // sdot v21.4s, v10.16b, v1.4b[3]\n"
+ "add x21, x21, #0x10\n"
".inst 0x4fa2e959 // sdot v25.4s, v10.16b, v2.4b[3]\n"
+ "mov v5.d[1], x24\n"
".inst 0x4fa0e892 // sdot v18.4s, v4.16b, v0.4b[3]\n"
+ "add x10, x10, #0x100\n"
".inst 0x4fa1e896 // sdot v22.4s, v4.16b, v1.4b[3]\n"
".inst 0x4fa2e89a // sdot v26.4s, v4.16b, v2.4b[3]\n"
".inst 0x4fa0e8b3 // sdot v19.4s, v5.16b, v0.4b[3]\n"
@@ -972,65 +1008,65 @@ void a64_hybrid_s8qa_dot_4x16_a55 (
".inst 0x4e8f942c // sdot v12.4s, v1.16b, v15.16b\n"
".inst 0x4e8f944d // sdot v13.4s, v2.16b, v15.16b\n"
"68:" // Height 3: Multiply loop: unique 9: skip row sum
- "ldr q0, [x10, #0x0]\n"
- "sub x11, x11, #0x10\n"
- "ldr q1, [x23, #0x0]\n"
- "cmp x11, #0x20\n"
- "ldr q2, [x22, #0x0]\n"
- "ldr q4, [x13, #0x0]\n"
- "ldr q5, [x13, #0x10]\n"
- "ldr q6, [x13, #0x20]\n"
- "ldr q7, [x13, #0x30]\n"
- "ldr q8, [x13, #0x40]\n"
- "ldr q9, [x13, #0x50]\n"
- "ldr q10, [x13, #0x60]\n"
- "prfm pldl1keep, [x10, #0x80]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "sub x26, x26, #0x10\n"
"prfm pldl1keep, [x22, #0x80]\n"
+ "cmp x26, #0x20\n"
+ "prfm pldl1keep, [x21, #0x80]\n"
+ "ldr q0, [x25, #0x0]\n"
+ "ldr q1, [x22, #0x0]\n"
+ "ldr q2, [x21, #0x0]\n"
+ "ldr q4, [x10, #0x0]\n"
"bge 67b\n"
"69:" // Height 3: Multiply loop: Single iteration only
".inst 0x4f80e090 // sdot v16.4s, v4.16b, v0.4b[0]\n"
- "sub x11, x11, #0x10\n"
+ "ldr q5, [x10, #0x10]\n"
".inst 0x4f81e094 // sdot v20.4s, v4.16b, v1.4b[0]\n"
- "add x10, x10, #0x10\n"
+ "ldr q6, [x10, #0x20]\n"
".inst 0x4f82e098 // sdot v24.4s, v4.16b, v2.4b[0]\n"
- "ldr q4, [x13, #0x70]\n"
+ "ldr q7, [x10, #0x30]\n"
".inst 0x4f80e0b1 // sdot v17.4s, v5.16b, v0.4b[0]\n"
- "add x23, x23, #0x10\n"
+ "ldr q8, [x10, #0x40]\n"
".inst 0x4f81e0b5 // sdot v21.4s, v5.16b, v1.4b[0]\n"
- "add x22, x22, #0x10\n"
+ "ldr q9, [x10, #0x50]\n"
".inst 0x4f82e0b9 // sdot v25.4s, v5.16b, v2.4b[0]\n"
- "ldr q5, [x13, #0x80]\n"
+ "ldr q10, [x10, #0x60]\n"
".inst 0x4f80e0d2 // sdot v18.4s, v6.16b, v0.4b[0]\n"
+ "ldr q4, [x10, #0x70]\n"
".inst 0x4f81e0d6 // sdot v22.4s, v6.16b, v1.4b[0]\n"
+ "ldr q5, [x10, #0x80]\n"
".inst 0x4f82e0da // sdot v26.4s, v6.16b, v2.4b[0]\n"
- "ldr q6, [x13, #0x90]\n"
+ "ldr q6, [x10, #0x90]\n"
".inst 0x4f80e0f3 // sdot v19.4s, v7.16b, v0.4b[0]\n"
+ "sub x26, x26, #0x10\n"
".inst 0x4f81e0f7 // sdot v23.4s, v7.16b, v1.4b[0]\n"
+ "add x25, x25, #0x10\n"
".inst 0x4f82e0fb // sdot v27.4s, v7.16b, v2.4b[0]\n"
- "ldr q7, [x13, #0xa0]\n"
+ "ldr q7, [x10, #0xa0]\n"
".inst 0x4fa0e110 // sdot v16.4s, v8.16b, v0.4b[1]\n"
+ "add x22, x22, #0x10\n"
".inst 0x4fa1e114 // sdot v20.4s, v8.16b, v1.4b[1]\n"
+ "add x21, x21, #0x10\n"
".inst 0x4fa2e118 // sdot v24.4s, v8.16b, v2.4b[1]\n"
- "ldr q8, [x13, #0xb0]\n"
+ "ldr q8, [x10, #0xb0]\n"
".inst 0x4fa0e131 // sdot v17.4s, v9.16b, v0.4b[1]\n"
".inst 0x4fa1e135 // sdot v21.4s, v9.16b, v1.4b[1]\n"
".inst 0x4fa2e139 // sdot v25.4s, v9.16b, v2.4b[1]\n"
- "ldr q9, [x13, #0xc0]\n"
+ "ldr q9, [x10, #0xc0]\n"
".inst 0x4fa0e152 // sdot v18.4s, v10.16b, v0.4b[1]\n"
".inst 0x4fa1e156 // sdot v22.4s, v10.16b, v1.4b[1]\n"
".inst 0x4fa2e15a // sdot v26.4s, v10.16b, v2.4b[1]\n"
- "ldr q10, [x13, #0xd0]\n"
+ "ldr q10, [x10, #0xd0]\n"
".inst 0x4fa0e093 // sdot v19.4s, v4.16b, v0.4b[1]\n"
".inst 0x4fa1e097 // sdot v23.4s, v4.16b, v1.4b[1]\n"
".inst 0x4fa2e09b // sdot v27.4s, v4.16b, v2.4b[1]\n"
- "ldr q4, [x13, #0xe0]\n"
+ "ldr q4, [x10, #0xe0]\n"
".inst 0x4f80e8b0 // sdot v16.4s, v5.16b, v0.4b[2]\n"
".inst 0x4f81e8b4 // sdot v20.4s, v5.16b, v1.4b[2]\n"
".inst 0x4f82e8b8 // sdot v24.4s, v5.16b, v2.4b[2]\n"
- "ldr q5, [x13, #0xf0]\n"
+ "ldr q5, [x10, #0xf0]\n"
".inst 0x4f80e8d1 // sdot v17.4s, v6.16b, v0.4b[2]\n"
- "add x13, x13, #0x100\n"
+ "add x10, x10, #0x100\n"
".inst 0x4f81e8d5 // sdot v21.4s, v6.16b, v1.4b[2]\n"
".inst 0x4f82e8d9 // sdot v25.4s, v6.16b, v2.4b[2]\n"
".inst 0x4f80e8f2 // sdot v18.4s, v7.16b, v0.4b[2]\n"
@@ -1056,32 +1092,32 @@ void a64_hybrid_s8qa_dot_4x16_a55 (
".inst 0x4e8f942c // sdot v12.4s, v1.16b, v15.16b\n"
".inst 0x4e8f944d // sdot v13.4s, v2.16b, v15.16b\n"
"70:" // Height 3: Multiply loop: unique 10: skip row sum
- "prfm pldl1keep, [x10, #0x80]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
"prfm pldl1keep, [x22, #0x80]\n"
+ "prfm pldl1keep, [x21, #0x80]\n"
"71:" // Height 3: Multiply loop: Main loop skip
- "cbz x11, 78f\n"
- "cmp x11, #0x4\n"
+ "cbz x26, 78f\n"
+ "cmp x26, #0x4\n"
"blt 74f\n"
"72:" // Height 3: Multiply loop: Odd block loop
- "ldr s0, [x10], #0x4\n"
- "ldr s1, [x23], #0x4\n"
- "ldr s2, [x22], #0x4\n"
+ "ldr s0, [x25], #0x4\n"
+ "ldr s1, [x22], #0x4\n"
+ "ldr s2, [x21], #0x4\n"
"tbnz %x[flags], #31, 73f\n"
".inst 0x4e8f940b // sdot v11.4s, v0.16b, v15.16b\n"
".inst 0x4e8f942c // sdot v12.4s, v1.16b, v15.16b\n"
".inst 0x4e8f944d // sdot v13.4s, v2.16b, v15.16b\n"
"73:" // Height 3: Multiply loop: unique 11: skip row sum
- "ldr q6, [x13, #0x0]\n"
- "sub x11, x11, #0x4\n"
- "ldr q7, [x13, #0x10]\n"
- "cmp x11, #0x4\n"
- "ldr q8, [x13, #0x20]\n"
+ "ldr q6, [x10, #0x0]\n"
+ "sub x26, x26, #0x4\n"
+ "ldr q7, [x10, #0x10]\n"
+ "cmp x26, #0x4\n"
+ "ldr q8, [x10, #0x20]\n"
".inst 0x4f80e0d0 // sdot v16.4s, v6.16b, v0.4b[0]\n"
- "ldr q9, [x13, #0x30]\n"
+ "ldr q9, [x10, #0x30]\n"
".inst 0x4f81e0d4 // sdot v20.4s, v6.16b, v1.4b[0]\n"
+ "add x10, x10, #0x40\n"
".inst 0x4f82e0d8 // sdot v24.4s, v6.16b, v2.4b[0]\n"
- "add x13, x13, #0x40\n"
".inst 0x4f80e0f1 // sdot v17.4s, v7.16b, v0.4b[0]\n"
".inst 0x4f81e0f5 // sdot v21.4s, v7.16b, v1.4b[0]\n"
".inst 0x4f82e0f9 // sdot v25.4s, v7.16b, v2.4b[0]\n"
@@ -1092,37 +1128,37 @@ void a64_hybrid_s8qa_dot_4x16_a55 (
".inst 0x4f81e137 // sdot v23.4s, v9.16b, v1.4b[0]\n"
".inst 0x4f82e13b // sdot v27.4s, v9.16b, v2.4b[0]\n"
"bge 72b\n"
+ "cbz x26, 78f\n"
"74:" // Height 3: Multiply loop: Skip odd blocks
- "cbz x11, 78f\n"
- "tbz x11, #1, 75f\n"
- "ldr h0, [x10], #0x2\n"
- "ldr h1, [x23], #0x2\n"
- "ldr h2, [x22], #0x2\n"
- "tbz x11, #0, 76f\n"
- "ld1 { v0.b }[2], [x10]\n"
- "ld1 { v1.b }[2], [x23]\n"
- "ld1 { v2.b }[2], [x22]\n"
+ "tbz x26, #1, 75f\n"
+ "ldr h0, [x25], #0x2\n"
+ "ldr h1, [x22], #0x2\n"
+ "ldr h2, [x21], #0x2\n"
+ "tbz x26, #0, 76f\n"
+ "ld1 { v0.b }[2], [x25]\n"
+ "ld1 { v1.b }[2], [x22]\n"
+ "ld1 { v2.b }[2], [x21]\n"
"b 76f\n"
"75:" // Height 3: Multiply loop: Ragged operand read: partial_1_0
- "ldr b0, [x10, #0x0]\n"
- "ldr b1, [x23, #0x0]\n"
- "ldr b2, [x22, #0x0]\n"
+ "ldr b0, [x25, #0x0]\n"
+ "ldr b1, [x22, #0x0]\n"
+ "ldr b2, [x21, #0x0]\n"
"76:" // Height 3: Multiply loop: Ragged operand read: Done
"tbnz %x[flags], #31, 77f\n"
".inst 0x4e8f940b // sdot v11.4s, v0.16b, v15.16b\n"
".inst 0x4e8f942c // sdot v12.4s, v1.16b, v15.16b\n"
".inst 0x4e8f944d // sdot v13.4s, v2.16b, v15.16b\n"
"77:" // Height 3: Multiply loop: unique 12: skip row sum
- "ldr q10, [x13, #0x0]\n"
+ "ldr q10, [x10, #0x0]\n"
+ "ldr q4, [x10, #0x10]\n"
+ "ldr q5, [x10, #0x20]\n"
".inst 0x4f80e150 // sdot v16.4s, v10.16b, v0.4b[0]\n"
- "ldr q4, [x13, #0x10]\n"
+ "ldr q6, [x10, #0x30]\n"
".inst 0x4f81e154 // sdot v20.4s, v10.16b, v1.4b[0]\n"
- "ldr q5, [x13, #0x20]\n"
+ "add x10, x10, #0x40\n"
".inst 0x4f82e158 // sdot v24.4s, v10.16b, v2.4b[0]\n"
- "ldr q6, [x13, #0x30]\n"
".inst 0x4f80e091 // sdot v17.4s, v4.16b, v0.4b[0]\n"
".inst 0x4f81e095 // sdot v21.4s, v4.16b, v1.4b[0]\n"
- "add x13, x13, #0x40\n"
".inst 0x4f82e099 // sdot v25.4s, v4.16b, v2.4b[0]\n"
".inst 0x4f80e0b2 // sdot v18.4s, v5.16b, v0.4b[0]\n"
".inst 0x4f81e0b6 // sdot v22.4s, v5.16b, v1.4b[0]\n"
@@ -1131,22 +1167,22 @@ void a64_hybrid_s8qa_dot_4x16_a55 (
".inst 0x4f81e0d7 // sdot v23.4s, v6.16b, v1.4b[0]\n"
".inst 0x4f82e0db // sdot v27.4s, v6.16b, v2.4b[0]\n"
"78:" // Height 3: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x12, x12, #0x1\n"
- "cmp x12, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 64b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x22, x14, x20\n"
- "add x21, x22, x20\n"
- "prfm pstl1keep, [x14, #0x0]\n"
- "prfm pstl1keep, [x22, #0x0]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "prfm pstl1keep, [x28, #0x0]\n"
+ "add x21, x28, x19\n"
"prfm pstl1keep, [x21, #0x0]\n"
+ "add x20, x21, x19\n"
+ "prfm pstl1keep, [x20, #0x0]\n"
"tbnz %x[flags], #31, 79f\n"
"addp v11.4s, v11.4s, v11.4s\n"
"addp v12.4s, v12.4s, v12.4s\n"
"addp v13.4s, v13.4s, v13.4s\n"
- "add x23, %x[qp], %[b_offset]\n"
- "ld1r { v3.4s }, [x23]\n"
+ "add x22, %x[qp], %[b_offset]\n"
+ "ld1r { v3.4s }, [x22]\n"
"neg v3.4s, v3.4s\n"
"addp v11.4s, v11.4s, v11.4s\n"
"addp v12.4s, v12.4s, v12.4s\n"
@@ -1155,13 +1191,9 @@ void a64_hybrid_s8qa_dot_4x16_a55 (
"mul v12.4s, v12.4s, v3.4s\n"
"mul v13.4s, v13.4s, v3.4s\n"
"79:" // Height 3: skip row sum fixup
- "ldr q0, [x16, #0x0]\n"
"add v16.4s, v16.4s, v11.4s\n"
- "ldr q1, [x16, #0x10]\n"
"add v17.4s, v17.4s, v11.4s\n"
- "ldr q2, [x16, #0x20]\n"
"add v18.4s, v18.4s, v11.4s\n"
- "ldr q3, [x16, #0x30]\n"
"add v19.4s, v19.4s, v11.4s\n"
"add v20.4s, v20.4s, v12.4s\n"
"add v21.4s, v21.4s, v12.4s\n"
@@ -1171,69 +1203,73 @@ void a64_hybrid_s8qa_dot_4x16_a55 (
"add v25.4s, v25.4s, v13.4s\n"
"add v26.4s, v26.4s, v13.4s\n"
"add v27.4s, v27.4s, v13.4s\n"
+ "ldr q0, [x9, #0x0]\n"
+ "orr %x[flags], %x[flags], #0x80000000\n"
+ "ldr q1, [x9, #0x10]\n"
+ "add x23, %x[qp], %[per_layer_right_shift]\n"
+ "ldr q2, [x9, #0x20]\n"
+ "add x22, %x[qp], %[per_layer_mul]\n"
"add v16.4s, v16.4s, v0.4s\n"
- "add v17.4s, v17.4s, v1.4s\n"
- "add v18.4s, v18.4s, v2.4s\n"
- "add v19.4s, v19.4s, v3.4s\n"
"add v20.4s, v20.4s, v0.4s\n"
+ "add v17.4s, v17.4s, v1.4s\n"
"add v21.4s, v21.4s, v1.4s\n"
+ "add v18.4s, v18.4s, v2.4s\n"
"add v22.4s, v22.4s, v2.4s\n"
- "add v23.4s, v23.4s, v3.4s\n"
"add v24.4s, v24.4s, v0.4s\n"
"add v25.4s, v25.4s, v1.4s\n"
"add v26.4s, v26.4s, v2.4s\n"
- "add v27.4s, v27.4s, v3.4s\n"
- "add x23, %x[qp], %[per_layer_mul]\n"
- "ld1r { v4.4s }, [x23]\n"
- "orr %x[flags], %x[flags], #0x80000000\n"
- "add x23, %x[qp], %[per_layer_right_shift]\n"
+ "ldr q3, [x9, #0x30]\n"
"ld1r { v0.4s }, [x23]\n"
+ "add x9, x9, #0x40\n"
+ "ld1r { v4.4s }, [x22]\n"
+ "add v19.4s, v19.4s, v3.4s\n"
+ "add v23.4s, v23.4s, v3.4s\n"
+ "add v27.4s, v27.4s, v3.4s\n"
"sqrdmulh v16.4s, v16.4s, v4.4s\n"
"sqrdmulh v17.4s, v17.4s, v4.4s\n"
"sqrdmulh v18.4s, v18.4s, v4.4s\n"
- "sqrdmulh v19.4s, v19.4s, v4.4s\n"
"sqrdmulh v20.4s, v20.4s, v4.4s\n"
"sqrdmulh v21.4s, v21.4s, v4.4s\n"
"sqrdmulh v22.4s, v22.4s, v4.4s\n"
+ "sqrdmulh v19.4s, v19.4s, v4.4s\n"
"sqrdmulh v23.4s, v23.4s, v4.4s\n"
"sqrdmulh v24.4s, v24.4s, v4.4s\n"
"sqrdmulh v25.4s, v25.4s, v4.4s\n"
"sqrdmulh v26.4s, v26.4s, v4.4s\n"
"sqrdmulh v27.4s, v27.4s, v4.4s\n"
- "add x16, x16, #0x40\n"
"tbz %x[flags], #5, 80f\n"
"and v4.16b, v16.16b, v0.16b\n"
"and v5.16b, v17.16b, v0.16b\n"
"and v6.16b, v18.16b, v0.16b\n"
"and v7.16b, v19.16b, v0.16b\n"
"and v8.16b, v20.16b, v0.16b\n"
+ "and v9.16b, v21.16b, v0.16b\n"
+ "and v10.16b, v22.16b, v0.16b\n"
"sshr v4.4s, v4.4s, #0x1f\n"
"sshr v5.4s, v5.4s, #0x1f\n"
"sshr v6.4s, v6.4s, #0x1f\n"
"sshr v7.4s, v7.4s, #0x1f\n"
"sshr v8.4s, v8.4s, #0x1f\n"
+ "sshr v9.4s, v9.4s, #0x1f\n"
+ "sshr v10.4s, v10.4s, #0x1f\n"
"sqadd v16.4s, v16.4s, v4.4s\n"
+ "and v4.16b, v23.16b, v0.16b\n"
"sqadd v17.4s, v17.4s, v5.4s\n"
"sqadd v18.4s, v18.4s, v6.4s\n"
"sqadd v19.4s, v19.4s, v7.4s\n"
"sqadd v20.4s, v20.4s, v8.4s\n"
- "and v9.16b, v21.16b, v0.16b\n"
- "and v10.16b, v22.16b, v0.16b\n"
- "and v4.16b, v23.16b, v0.16b\n"
+ "sqadd v21.4s, v21.4s, v9.4s\n"
+ "sqadd v22.4s, v22.4s, v10.4s\n"
"and v5.16b, v24.16b, v0.16b\n"
"and v6.16b, v25.16b, v0.16b\n"
+ "sshr v4.4s, v4.4s, #0x1f\n"
"and v7.16b, v26.16b, v0.16b\n"
"and v8.16b, v27.16b, v0.16b\n"
- "sshr v9.4s, v9.4s, #0x1f\n"
- "sshr v10.4s, v10.4s, #0x1f\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
"sshr v5.4s, v5.4s, #0x1f\n"
"sshr v6.4s, v6.4s, #0x1f\n"
+ "sqadd v23.4s, v23.4s, v4.4s\n"
"sshr v7.4s, v7.4s, #0x1f\n"
"sshr v8.4s, v8.4s, #0x1f\n"
- "sqadd v21.4s, v21.4s, v9.4s\n"
- "sqadd v22.4s, v22.4s, v10.4s\n"
- "sqadd v23.4s, v23.4s, v4.4s\n"
"sqadd v24.4s, v24.4s, v5.4s\n"
"sqadd v25.4s, v25.4s, v6.4s\n"
"sqadd v26.4s, v26.4s, v7.4s\n"
@@ -1251,8 +1287,13 @@ void a64_hybrid_s8qa_dot_4x16_a55 (
"srshl v25.4s, v25.4s, v0.4s\n"
"srshl v26.4s, v26.4s, v0.4s\n"
"srshl v27.4s, v27.4s, v0.4s\n"
- "add x23, %x[qp], %[c_offset]\n"
- "ld1r { v4.4s }, [x23]\n"
+ "add x22, %x[qp], %[c_offset]\n"
+ "add x23, %x[qp], %[minval]\n"
+ "ld1r { v4.4s }, [x22]\n"
+ "add x22, %x[qp], %[maxval]\n"
+ "ld1r { v5.4s }, [x23]\n"
+ "cmp x11, #0x10\n"
+ "ld1r { v6.4s }, [x22]\n"
"add v16.4s, v16.4s, v4.4s\n"
"add v17.4s, v17.4s, v4.4s\n"
"add v18.4s, v18.4s, v4.4s\n"
@@ -1263,10 +1304,6 @@ void a64_hybrid_s8qa_dot_4x16_a55 (
"add v23.4s, v23.4s, v4.4s\n"
"add v24.4s, v24.4s, v4.4s\n"
"add v25.4s, v25.4s, v4.4s\n"
- "add v26.4s, v26.4s, v4.4s\n"
- "add v27.4s, v27.4s, v4.4s\n"
- "add x23, %x[qp], %[maxval]\n"
- "ld1r { v6.4s }, [x23]\n"
"smin v16.4s, v16.4s, v6.4s\n"
"smin v17.4s, v17.4s, v6.4s\n"
"smin v18.4s, v18.4s, v6.4s\n"
@@ -1277,10 +1314,6 @@ void a64_hybrid_s8qa_dot_4x16_a55 (
"smin v23.4s, v23.4s, v6.4s\n"
"smin v24.4s, v24.4s, v6.4s\n"
"smin v25.4s, v25.4s, v6.4s\n"
- "smin v26.4s, v26.4s, v6.4s\n"
- "smin v27.4s, v27.4s, v6.4s\n"
- "add x23, %x[qp], %[minval]\n"
- "ld1r { v5.4s }, [x23]\n"
"smax v16.4s, v16.4s, v5.4s\n"
"smax v17.4s, v17.4s, v5.4s\n"
"smax v18.4s, v18.4s, v5.4s\n"
@@ -1291,117 +1324,120 @@ void a64_hybrid_s8qa_dot_4x16_a55 (
"smax v23.4s, v23.4s, v5.4s\n"
"smax v24.4s, v24.4s, v5.4s\n"
"smax v25.4s, v25.4s, v5.4s\n"
- "smax v26.4s, v26.4s, v5.4s\n"
- "smax v27.4s, v27.4s, v5.4s\n"
+ "add v26.4s, v26.4s, v4.4s\n"
+ "add v27.4s, v27.4s, v4.4s\n"
"uzp1 v16.8h, v16.8h, v17.8h\n"
"uzp1 v17.8h, v18.8h, v19.8h\n"
"uzp1 v20.8h, v20.8h, v21.8h\n"
"uzp1 v21.8h, v22.8h, v23.8h\n"
"uzp1 v24.8h, v24.8h, v25.8h\n"
- "uzp1 v25.8h, v26.8h, v27.8h\n"
- "cmp x15, #0x10\n"
+ "smin v26.4s, v26.4s, v6.4s\n"
+ "smin v27.4s, v27.4s, v6.4s\n"
"uzp1 v16.16b, v16.16b, v17.16b\n"
"uzp1 v20.16b, v20.16b, v21.16b\n"
+ "smax v26.4s, v26.4s, v5.4s\n"
+ "smax v27.4s, v27.4s, v5.4s\n"
+ "uzp1 v25.8h, v26.8h, v27.8h\n"
"uzp1 v24.16b, v24.16b, v25.16b\n"
"bge 89f\n"
- "tbz x15, #3, 84f\n"
- "str d16, [x14], #0x8\n"
- "str d20, [x22], #0x8\n"
- "str d24, [x21], #0x8\n"
- "tbz x15, #2, 82f\n"
- "st1 { v16.s }[2], [x14], #0x4\n"
- "st1 { v20.s }[2], [x22], #0x4\n"
- "st1 { v24.s }[2], [x21], #0x4\n"
- "tbz x15, #1, 81f\n"
- "st1 { v16.h }[6], [x14], #0x2\n"
- "st1 { v20.h }[6], [x22], #0x2\n"
- "st1 { v24.h }[6], [x21], #0x2\n"
- "tbz x15, #0, 88f\n"
- "st1 { v16.b }[14], [x14]\n"
- "st1 { v20.b }[14], [x22]\n"
- "st1 { v24.b }[14], [x21]\n"
+ "tbz x11, #3, 84f\n"
+ "str d16, [x28], #0x8\n"
+ "str d20, [x21], #0x8\n"
+ "str d24, [x20], #0x8\n"
+ "tbz x11, #2, 82f\n"
+ "st1 { v16.s }[2], [x28], #0x4\n"
+ "st1 { v20.s }[2], [x21], #0x4\n"
+ "st1 { v24.s }[2], [x20], #0x4\n"
+ "tbz x11, #1, 81f\n"
+ "st1 { v16.h }[6], [x28], #0x2\n"
+ "st1 { v20.h }[6], [x21], #0x2\n"
+ "st1 { v24.h }[6], [x20], #0x2\n"
+ "tbz x11, #0, 88f\n"
+ "st1 { v16.b }[14], [x28]\n"
+ "st1 { v20.b }[14], [x21]\n"
+ "st1 { v24.b }[14], [x20]\n"
"b 88f\n"
"81:" // Height 3: Partial direct writeback: partial_1_12
- "tbz x15, #0, 88f\n"
- "st1 { v16.b }[12], [x14]\n"
- "st1 { v20.b }[12], [x22]\n"
- "st1 { v24.b }[12], [x21]\n"
+ "tbz x11, #0, 88f\n"
+ "st1 { v16.b }[12], [x28]\n"
+ "st1 { v20.b }[12], [x21]\n"
+ "st1 { v24.b }[12], [x20]\n"
"b 88f\n"
"82:" // Height 3: Partial direct writeback: partial_2_8
- "tbz x15, #1, 83f\n"
- "st1 { v16.h }[4], [x14], #0x2\n"
- "st1 { v20.h }[4], [x22], #0x2\n"
- "st1 { v24.h }[4], [x21], #0x2\n"
- "tbz x15, #0, 88f\n"
- "st1 { v16.b }[10], [x14]\n"
- "st1 { v20.b }[10], [x22]\n"
- "st1 { v24.b }[10], [x21]\n"
+ "tbz x11, #1, 83f\n"
+ "st1 { v16.h }[4], [x28], #0x2\n"
+ "st1 { v20.h }[4], [x21], #0x2\n"
+ "st1 { v24.h }[4], [x20], #0x2\n"
+ "tbz x11, #0, 88f\n"
+ "st1 { v16.b }[10], [x28]\n"
+ "st1 { v20.b }[10], [x21]\n"
+ "st1 { v24.b }[10], [x20]\n"
"b 88f\n"
"83:" // Height 3: Partial direct writeback: partial_1_8
- "tbz x15, #0, 88f\n"
- "st1 { v16.b }[8], [x14]\n"
- "st1 { v20.b }[8], [x22]\n"
- "st1 { v24.b }[8], [x21]\n"
+ "tbz x11, #0, 88f\n"
+ "st1 { v16.b }[8], [x28]\n"
+ "st1 { v20.b }[8], [x21]\n"
+ "st1 { v24.b }[8], [x20]\n"
"b 88f\n"
"84:" // Height 3: Partial direct writeback: partial_4_0
- "tbz x15, #2, 86f\n"
- "str s16, [x14], #0x4\n"
- "str s20, [x22], #0x4\n"
- "str s24, [x21], #0x4\n"
- "tbz x15, #1, 85f\n"
- "st1 { v16.h }[2], [x14], #0x2\n"
- "st1 { v20.h }[2], [x22], #0x2\n"
- "st1 { v24.h }[2], [x21], #0x2\n"
- "tbz x15, #0, 88f\n"
- "st1 { v16.b }[6], [x14]\n"
- "st1 { v20.b }[6], [x22]\n"
- "st1 { v24.b }[6], [x21]\n"
+ "tbz x11, #2, 86f\n"
+ "str s16, [x28], #0x4\n"
+ "str s20, [x21], #0x4\n"
+ "str s24, [x20], #0x4\n"
+ "tbz x11, #1, 85f\n"
+ "st1 { v16.h }[2], [x28], #0x2\n"
+ "st1 { v20.h }[2], [x21], #0x2\n"
+ "st1 { v24.h }[2], [x20], #0x2\n"
+ "tbz x11, #0, 88f\n"
+ "st1 { v16.b }[6], [x28]\n"
+ "st1 { v20.b }[6], [x21]\n"
+ "st1 { v24.b }[6], [x20]\n"
"b 88f\n"
"85:" // Height 3: Partial direct writeback: partial_1_4
- "tbz x15, #0, 88f\n"
- "st1 { v16.b }[4], [x14]\n"
- "st1 { v20.b }[4], [x22]\n"
- "st1 { v24.b }[4], [x21]\n"
+ "tbz x11, #0, 88f\n"
+ "st1 { v16.b }[4], [x28]\n"
+ "st1 { v20.b }[4], [x21]\n"
+ "st1 { v24.b }[4], [x20]\n"
"b 88f\n"
"86:" // Height 3: Partial direct writeback: partial_2_0
- "tbz x15, #1, 87f\n"
- "str h16, [x14], #0x2\n"
- "str h20, [x22], #0x2\n"
- "str h24, [x21], #0x2\n"
- "tbz x15, #0, 88f\n"
- "st1 { v16.b }[2], [x14]\n"
- "st1 { v20.b }[2], [x22]\n"
- "st1 { v24.b }[2], [x21]\n"
+ "tbz x11, #1, 87f\n"
+ "str h16, [x28], #0x2\n"
+ "str h20, [x21], #0x2\n"
+ "str h24, [x20], #0x2\n"
+ "tbz x11, #0, 88f\n"
+ "st1 { v16.b }[2], [x28]\n"
+ "st1 { v20.b }[2], [x21]\n"
+ "st1 { v24.b }[2], [x20]\n"
"b 88f\n"
"87:" // Height 3: Partial direct writeback: partial_1_0
- "str b16, [x14, #0x0]\n"
- "str b20, [x22, #0x0]\n"
- "str b24, [x21, #0x0]\n"
+ "str b16, [x28, #0x0]\n"
+ "str b20, [x21, #0x0]\n"
+ "str b24, [x20, #0x0]\n"
"88:" // Height 3: Partial direct writeback: Done
"b 90f\n"
"89:" // Height 3: Full writeback
- "str q16, [x14, #0x0]\n"
- "add x14, x14, #0x10\n"
- "str q20, [x22, #0x0]\n"
- "str q24, [x21, #0x0]\n"
+ "str q16, [x28, #0x0]\n"
+ "add x28, x28, #0x10\n"
+ "str q20, [x21, #0x0]\n"
+ "str q24, [x20, #0x0]\n"
"90:" // Height 3: Writeback done
- "subs x15, x15, #0x10\n"
+ "subs x11, x11, #0x10\n"
"bgt 62b\n"
"b 122f\n"
"91:" // Height 4
- "ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "mov x20, #0x4\n"
- "mov x16, %x[col_bias]\n"
"movi v11.4s, #0x0\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"movi v12.4s, #0x0\n"
- "bic %x[flags], %x[flags], #0x80000000\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"movi v13.4s, #0x0\n"
- "ldr x15, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
"movi v14.4s, #0x0\n"
- "ldr x13, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x9, %x[col_bias]\n"
"movi v15.16b, #0x1\n"
- "mov x14, %x[output_ptr]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
+ "bic %x[flags], %x[flags], #0x80000000\n"
+ "mov x28, %x[output_ptr]\n"
+ "mov x19, #0x4\n"
+ "madd %x[output_ptr], x20, x19, %x[output_ptr]\n"
"92:" // Height 4: Column loop
"movi v16.4s, #0x0\n"
"movi v17.4s, #0x0\n"
@@ -1420,125 +1456,137 @@ void a64_hybrid_s8qa_dot_4x16_a55 (
"movi v30.4s, #0x0\n"
"movi v31.4s, #0x0\n"
"93:" // Height 4: setup done
- "mov x12, #0x0\n"
+ "mov x27, #0x0\n"
"94:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w11, [x20, x12, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 95f\n"
- "ldr x21, [%x[input_ptr], x12, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x10, [x21, #0x0]\n"
- "ldr x23, [x21, #0x8]\n"
- "ldr x22, [x21, #0x10]\n"
- "ldr x21, [x21, #0x18]\n"
- "cbnz x12, 96f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x10, x10, x20\n"
- "add x23, x23, x20\n"
- "add x22, x22, x20\n"
- "add x21, x21, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x22, [x20, #0x8]\n"
+ "ldr x21, [x20, #0x10]\n"
+ "ldr x20, [x20, #0x18]\n"
+ "cbnz x27, 96f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
+ "add x22, x22, x19\n"
+ "add x21, x21, x19\n"
+ "add x20, x20, x19\n"
"b 96f\n"
"95:" // Height 4: setup direct input
- "mov x10, %x[input_ptr]\n"
- "add x23, x10, x20\n"
- "add x22, x23, x20\n"
- "add x21, x22, x20\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x22, x25, x19\n"
+ "add x21, x22, x19\n"
+ "add x20, x21, x19\n"
"96:" // Height 4: input setup done
- "cmp x11, #0x10\n"
+ "cmp x26, #0x10\n"
"blt 101f\n"
- "ldr q0, [x10, #0x0]\n"
- "cmp x11, #0x20\n"
- "ldr q1, [x23, #0x0]\n"
- "ldr q2, [x22, #0x0]\n"
- "ldr q3, [x21, #0x0]\n"
- "ldr q4, [x13, #0x0]\n"
- "ldr q5, [x13, #0x10]\n"
- "ldr q6, [x13, #0x20]\n"
- "ldr q7, [x13, #0x30]\n"
- "ldr q8, [x13, #0x40]\n"
- "ldr q9, [x13, #0x50]\n"
- "ldr q10, [x13, #0x60]\n"
+ "ldr q0, [x25, #0x0]\n"
+ "ldr q1, [x22, #0x0]\n"
+ "cmp x26, #0x20\n"
+ "ldr q2, [x21, #0x0]\n"
+ "ldr q3, [x20, #0x0]\n"
+ "ldr q4, [x10, #0x0]\n"
"blt 99f\n"
"97:" // Height 4: Multiply loop: Main loop head
".inst 0x4f80e090 // sdot v16.4s, v4.16b, v0.4b[0]\n"
- "ldr x9, [x13, #0x78]\n"
+ "ldr d5, [x10, #0x10]\n"
".inst 0x4f81e094 // sdot v20.4s, v4.16b, v1.4b[0]\n"
- "ldr x28, [x13, #0x88]\n"
+ "ldr x24, [x10, #0x18]\n"
".inst 0x4f82e098 // sdot v24.4s, v4.16b, v2.4b[0]\n"
- "ldr x27, [x13, #0x98]\n"
+ "ldr d6, [x10, #0x20]\n"
".inst 0x4f83e09c // sdot v28.4s, v4.16b, v3.4b[0]\n"
- "ldr d4, [x13, #0x70]\n"
+ "ldr x23, [x10, #0x28]\n"
+ "mov v5.d[1], x24\n"
+ "ldr d7, [x10, #0x30]\n"
+ "ldr x19, [x10, #0x38]\n"
+ "add x25, x25, #0x10\n"
".inst 0x4f80e0b1 // sdot v17.4s, v5.16b, v0.4b[0]\n"
- "mov v4.d[1], x9\n"
+ "mov v6.d[1], x23\n"
".inst 0x4f81e0b5 // sdot v21.4s, v5.16b, v1.4b[0]\n"
- "ldr x26, [x13, #0xa8]\n"
+ "ldr d8, [x10, #0x40]\n"
".inst 0x4f82e0b9 // sdot v25.4s, v5.16b, v2.4b[0]\n"
- "ldr x25, [x13, #0xb8]\n"
+ "mov v7.d[1], x19\n"
".inst 0x4f83e0bd // sdot v29.4s, v5.16b, v3.4b[0]\n"
- "ldr d5, [x13, #0x80]\n"
+ "ldr x23, [x10, #0x48]\n"
".inst 0x4f80e0d2 // sdot v18.4s, v6.16b, v0.4b[0]\n"
- "mov v5.d[1], x28\n"
+ "ldr d9, [x10, #0x50]\n"
".inst 0x4f81e0d6 // sdot v22.4s, v6.16b, v1.4b[0]\n"
- "ldr x24, [x13, #0xc8]\n"
+ "ldr x19, [x10, #0x58]\n"
".inst 0x4f82e0da // sdot v26.4s, v6.16b, v2.4b[0]\n"
- "ldr x20, [x13, #0xd8]\n"
+ "mov v8.d[1], x23\n"
".inst 0x4f83e0de // sdot v30.4s, v6.16b, v3.4b[0]\n"
- "ldr d6, [x13, #0x90]\n"
+ "ldr d10, [x10, #0x60]\n"
".inst 0x4f80e0f3 // sdot v19.4s, v7.16b, v0.4b[0]\n"
- "mov v6.d[1], x27\n"
+ "mov v9.d[1], x19\n"
".inst 0x4f81e0f7 // sdot v23.4s, v7.16b, v1.4b[0]\n"
- "ldr x9, [x13, #0xe8]\n"
+ "ldr x23, [x10, #0x68]\n"
".inst 0x4f82e0fb // sdot v27.4s, v7.16b, v2.4b[0]\n"
- "ldr x28, [x13, #0xf8]\n"
+ "ldr d4, [x10, #0x70]\n"
".inst 0x4f83e0ff // sdot v31.4s, v7.16b, v3.4b[0]\n"
- "ldr d7, [x13, #0xa0]\n"
+ "ldr x19, [x10, #0x78]\n"
".inst 0x4fa0e110 // sdot v16.4s, v8.16b, v0.4b[1]\n"
- "mov v7.d[1], x26\n"
+ "mov v10.d[1], x23\n"
".inst 0x4fa1e114 // sdot v20.4s, v8.16b, v1.4b[1]\n"
- "add x10, x10, #0x10\n"
+ "ldr d5, [x10, #0x80]\n"
".inst 0x4fa2e118 // sdot v24.4s, v8.16b, v2.4b[1]\n"
- "add x23, x23, #0x10\n"
+ "mov v4.d[1], x19\n"
".inst 0x4fa3e11c // sdot v28.4s, v8.16b, v3.4b[1]\n"
- "ldr d8, [x13, #0xb0]\n"
+ "ldr x24, [x10, #0x88]\n"
".inst 0x4fa0e131 // sdot v17.4s, v9.16b, v0.4b[1]\n"
- "mov v8.d[1], x25\n"
+ "ldr d6, [x10, #0x90]\n"
".inst 0x4fa1e135 // sdot v21.4s, v9.16b, v1.4b[1]\n"
- "add x22, x22, #0x10\n"
+ "ldr x23, [x10, #0x98]\n"
".inst 0x4fa2e139 // sdot v25.4s, v9.16b, v2.4b[1]\n"
- "add x21, x21, #0x10\n"
+ "mov v5.d[1], x24\n"
".inst 0x4fa3e13d // sdot v29.4s, v9.16b, v3.4b[1]\n"
- "ldr d9, [x13, #0xc0]\n"
+ "ldr d7, [x10, #0xa0]\n"
".inst 0x4fa0e152 // sdot v18.4s, v10.16b, v0.4b[1]\n"
- "mov v9.d[1], x24\n"
+ "mov v6.d[1], x23\n"
".inst 0x4fa1e156 // sdot v22.4s, v10.16b, v1.4b[1]\n"
+ "ldr x19, [x10, #0xa8]\n"
".inst 0x4fa2e15a // sdot v26.4s, v10.16b, v2.4b[1]\n"
+ "ldr d8, [x10, #0xb0]\n"
".inst 0x4fa3e15e // sdot v30.4s, v10.16b, v3.4b[1]\n"
- "ldr d10, [x13, #0xd0]\n"
+ "ldr x23, [x10, #0xb8]\n"
".inst 0x4fa0e093 // sdot v19.4s, v4.16b, v0.4b[1]\n"
- "mov v10.d[1], x20\n"
+ "mov v7.d[1], x19\n"
".inst 0x4fa1e097 // sdot v23.4s, v4.16b, v1.4b[1]\n"
+ "ldr d9, [x10, #0xc0]\n"
".inst 0x4fa2e09b // sdot v27.4s, v4.16b, v2.4b[1]\n"
+ "mov v8.d[1], x23\n"
".inst 0x4fa3e09f // sdot v31.4s, v4.16b, v3.4b[1]\n"
- "ldr d4, [x13, #0xe0]\n"
+ "ldr x19, [x10, #0xc8]\n"
".inst 0x4f80e8b0 // sdot v16.4s, v5.16b, v0.4b[2]\n"
- "mov v4.d[1], x9\n"
+ "ldr d10, [x10, #0xd0]\n"
".inst 0x4f81e8b4 // sdot v20.4s, v5.16b, v1.4b[2]\n"
+ "ldr x23, [x10, #0xd8]\n"
".inst 0x4f82e8b8 // sdot v24.4s, v5.16b, v2.4b[2]\n"
+ "mov v9.d[1], x19\n"
".inst 0x4f83e8bc // sdot v28.4s, v5.16b, v3.4b[2]\n"
- "ldr d5, [x13, #0xf0]\n"
+ "ldr d4, [x10, #0xe0]\n"
".inst 0x4f80e8d1 // sdot v17.4s, v6.16b, v0.4b[2]\n"
- "mov v5.d[1], x28\n"
+ "mov v10.d[1], x23\n"
".inst 0x4f81e8d5 // sdot v21.4s, v6.16b, v1.4b[2]\n"
- "add x13, x13, #0x100\n"
+ "ldr x19, [x10, #0xe8]\n"
".inst 0x4f82e8d9 // sdot v25.4s, v6.16b, v2.4b[2]\n"
+ "ldr d5, [x10, #0xf0]\n"
".inst 0x4f83e8dd // sdot v29.4s, v6.16b, v3.4b[2]\n"
+ "ldr x24, [x10, #0xf8]\n"
".inst 0x4f80e8f2 // sdot v18.4s, v7.16b, v0.4b[2]\n"
+ "mov v4.d[1], x19\n"
".inst 0x4f81e8f6 // sdot v22.4s, v7.16b, v1.4b[2]\n"
+ "add x22, x22, #0x10\n"
".inst 0x4f82e8fa // sdot v26.4s, v7.16b, v2.4b[2]\n"
+ "mov v5.d[1], x24\n"
".inst 0x4f83e8fe // sdot v30.4s, v7.16b, v3.4b[2]\n"
+ "add x21, x21, #0x10\n"
".inst 0x4f80e913 // sdot v19.4s, v8.16b, v0.4b[2]\n"
+ "add x20, x20, #0x10\n"
".inst 0x4f81e917 // sdot v23.4s, v8.16b, v1.4b[2]\n"
+ "add x10, x10, #0x100\n"
".inst 0x4f82e91b // sdot v27.4s, v8.16b, v2.4b[2]\n"
".inst 0x4f83e91f // sdot v31.4s, v8.16b, v3.4b[2]\n"
".inst 0x4fa0e930 // sdot v16.4s, v9.16b, v0.4b[3]\n"
@@ -1563,77 +1611,77 @@ void a64_hybrid_s8qa_dot_4x16_a55 (
".inst 0x4e8f944d // sdot v13.4s, v2.16b, v15.16b\n"
".inst 0x4e8f946e // sdot v14.4s, v3.16b, v15.16b\n"
"98:" // Height 4: Multiply loop: unique 13: skip row sum
- "ldr q0, [x10, #0x0]\n"
- "sub x11, x11, #0x10\n"
- "ldr q1, [x23, #0x0]\n"
- "cmp x11, #0x20\n"
- "ldr q2, [x22, #0x0]\n"
- "ldr q3, [x21, #0x0]\n"
- "ldr q4, [x13, #0x0]\n"
- "ldr q5, [x13, #0x10]\n"
- "ldr q6, [x13, #0x20]\n"
- "ldr q7, [x13, #0x30]\n"
- "ldr q8, [x13, #0x40]\n"
- "ldr q9, [x13, #0x50]\n"
- "ldr q10, [x13, #0x60]\n"
- "prfm pldl1keep, [x10, #0x80]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "sub x26, x26, #0x10\n"
"prfm pldl1keep, [x22, #0x80]\n"
+ "cmp x26, #0x20\n"
"prfm pldl1keep, [x21, #0x80]\n"
+ "prfm pldl1keep, [x20, #0x80]\n"
+ "ldr q0, [x25, #0x0]\n"
+ "ldr q1, [x22, #0x0]\n"
+ "ldr q2, [x21, #0x0]\n"
+ "ldr q3, [x20, #0x0]\n"
+ "ldr q4, [x10, #0x0]\n"
"bge 97b\n"
"99:" // Height 4: Multiply loop: Single iteration only
".inst 0x4f80e090 // sdot v16.4s, v4.16b, v0.4b[0]\n"
- "sub x11, x11, #0x10\n"
+ "ldr q5, [x10, #0x10]\n"
".inst 0x4f81e094 // sdot v20.4s, v4.16b, v1.4b[0]\n"
- "add x10, x10, #0x10\n"
+ "ldr q6, [x10, #0x20]\n"
".inst 0x4f82e098 // sdot v24.4s, v4.16b, v2.4b[0]\n"
- "add x23, x23, #0x10\n"
+ "ldr q7, [x10, #0x30]\n"
".inst 0x4f83e09c // sdot v28.4s, v4.16b, v3.4b[0]\n"
- "ldr q4, [x13, #0x70]\n"
+ "ldr q8, [x10, #0x40]\n"
".inst 0x4f80e0b1 // sdot v17.4s, v5.16b, v0.4b[0]\n"
- "add x22, x22, #0x10\n"
+ "ldr q9, [x10, #0x50]\n"
".inst 0x4f81e0b5 // sdot v21.4s, v5.16b, v1.4b[0]\n"
- "add x21, x21, #0x10\n"
+ "ldr q10, [x10, #0x60]\n"
".inst 0x4f82e0b9 // sdot v25.4s, v5.16b, v2.4b[0]\n"
+ "ldr q4, [x10, #0x70]\n"
".inst 0x4f83e0bd // sdot v29.4s, v5.16b, v3.4b[0]\n"
- "ldr q5, [x13, #0x80]\n"
+ "ldr q5, [x10, #0x80]\n"
".inst 0x4f80e0d2 // sdot v18.4s, v6.16b, v0.4b[0]\n"
+ "sub x26, x26, #0x10\n"
".inst 0x4f81e0d6 // sdot v22.4s, v6.16b, v1.4b[0]\n"
+ "add x25, x25, #0x10\n"
".inst 0x4f82e0da // sdot v26.4s, v6.16b, v2.4b[0]\n"
+ "add x22, x22, #0x10\n"
".inst 0x4f83e0de // sdot v30.4s, v6.16b, v3.4b[0]\n"
- "ldr q6, [x13, #0x90]\n"
+ "ldr q6, [x10, #0x90]\n"
".inst 0x4f80e0f3 // sdot v19.4s, v7.16b, v0.4b[0]\n"
+ "add x21, x21, #0x10\n"
".inst 0x4f81e0f7 // sdot v23.4s, v7.16b, v1.4b[0]\n"
+ "add x20, x20, #0x10\n"
".inst 0x4f82e0fb // sdot v27.4s, v7.16b, v2.4b[0]\n"
".inst 0x4f83e0ff // sdot v31.4s, v7.16b, v3.4b[0]\n"
- "ldr q7, [x13, #0xa0]\n"
+ "ldr q7, [x10, #0xa0]\n"
".inst 0x4fa0e110 // sdot v16.4s, v8.16b, v0.4b[1]\n"
".inst 0x4fa1e114 // sdot v20.4s, v8.16b, v1.4b[1]\n"
".inst 0x4fa2e118 // sdot v24.4s, v8.16b, v2.4b[1]\n"
".inst 0x4fa3e11c // sdot v28.4s, v8.16b, v3.4b[1]\n"
- "ldr q8, [x13, #0xb0]\n"
+ "ldr q8, [x10, #0xb0]\n"
".inst 0x4fa0e131 // sdot v17.4s, v9.16b, v0.4b[1]\n"
".inst 0x4fa1e135 // sdot v21.4s, v9.16b, v1.4b[1]\n"
".inst 0x4fa2e139 // sdot v25.4s, v9.16b, v2.4b[1]\n"
".inst 0x4fa3e13d // sdot v29.4s, v9.16b, v3.4b[1]\n"
- "ldr q9, [x13, #0xc0]\n"
+ "ldr q9, [x10, #0xc0]\n"
".inst 0x4fa0e152 // sdot v18.4s, v10.16b, v0.4b[1]\n"
".inst 0x4fa1e156 // sdot v22.4s, v10.16b, v1.4b[1]\n"
".inst 0x4fa2e15a // sdot v26.4s, v10.16b, v2.4b[1]\n"
".inst 0x4fa3e15e // sdot v30.4s, v10.16b, v3.4b[1]\n"
- "ldr q10, [x13, #0xd0]\n"
+ "ldr q10, [x10, #0xd0]\n"
".inst 0x4fa0e093 // sdot v19.4s, v4.16b, v0.4b[1]\n"
".inst 0x4fa1e097 // sdot v23.4s, v4.16b, v1.4b[1]\n"
".inst 0x4fa2e09b // sdot v27.4s, v4.16b, v2.4b[1]\n"
".inst 0x4fa3e09f // sdot v31.4s, v4.16b, v3.4b[1]\n"
- "ldr q4, [x13, #0xe0]\n"
+ "ldr q4, [x10, #0xe0]\n"
".inst 0x4f80e8b0 // sdot v16.4s, v5.16b, v0.4b[2]\n"
".inst 0x4f81e8b4 // sdot v20.4s, v5.16b, v1.4b[2]\n"
".inst 0x4f82e8b8 // sdot v24.4s, v5.16b, v2.4b[2]\n"
".inst 0x4f83e8bc // sdot v28.4s, v5.16b, v3.4b[2]\n"
- "ldr q5, [x13, #0xf0]\n"
+ "ldr q5, [x10, #0xf0]\n"
".inst 0x4f80e8d1 // sdot v17.4s, v6.16b, v0.4b[2]\n"
- "add x13, x13, #0x100\n"
+ "add x10, x10, #0x100\n"
".inst 0x4f81e8d5 // sdot v21.4s, v6.16b, v1.4b[2]\n"
".inst 0x4f82e8d9 // sdot v25.4s, v6.16b, v2.4b[2]\n"
".inst 0x4f83e8dd // sdot v29.4s, v6.16b, v3.4b[2]\n"
@@ -1667,35 +1715,35 @@ void a64_hybrid_s8qa_dot_4x16_a55 (
".inst 0x4e8f944d // sdot v13.4s, v2.16b, v15.16b\n"
".inst 0x4e8f946e // sdot v14.4s, v3.16b, v15.16b\n"
"100:" // Height 4: Multiply loop: unique 14: skip row sum
- "prfm pldl1keep, [x10, #0x80]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
"prfm pldl1keep, [x22, #0x80]\n"
"prfm pldl1keep, [x21, #0x80]\n"
+ "prfm pldl1keep, [x20, #0x80]\n"
"101:" // Height 4: Multiply loop: Main loop skip
- "cbz x11, 108f\n"
- "cmp x11, #0x4\n"
+ "cbz x26, 108f\n"
+ "cmp x26, #0x4\n"
"blt 104f\n"
"102:" // Height 4: Multiply loop: Odd block loop
- "ldr s0, [x10], #0x4\n"
- "ldr s1, [x23], #0x4\n"
- "ldr s2, [x22], #0x4\n"
- "ldr s3, [x21], #0x4\n"
+ "ldr s0, [x25], #0x4\n"
+ "ldr s1, [x22], #0x4\n"
+ "ldr s2, [x21], #0x4\n"
+ "ldr s3, [x20], #0x4\n"
"tbnz %x[flags], #31, 103f\n"
".inst 0x4e8f940b // sdot v11.4s, v0.16b, v15.16b\n"
".inst 0x4e8f942c // sdot v12.4s, v1.16b, v15.16b\n"
".inst 0x4e8f944d // sdot v13.4s, v2.16b, v15.16b\n"
".inst 0x4e8f946e // sdot v14.4s, v3.16b, v15.16b\n"
"103:" // Height 4: Multiply loop: unique 15: skip row sum
- "ldr q6, [x13, #0x0]\n"
- "sub x11, x11, #0x4\n"
- "ldr q7, [x13, #0x10]\n"
- "cmp x11, #0x4\n"
- "ldr q8, [x13, #0x20]\n"
+ "ldr q6, [x10, #0x0]\n"
+ "sub x26, x26, #0x4\n"
+ "ldr q7, [x10, #0x10]\n"
+ "cmp x26, #0x4\n"
+ "ldr q8, [x10, #0x20]\n"
".inst 0x4f80e0d0 // sdot v16.4s, v6.16b, v0.4b[0]\n"
- "ldr q9, [x13, #0x30]\n"
+ "ldr q9, [x10, #0x30]\n"
".inst 0x4f81e0d4 // sdot v20.4s, v6.16b, v1.4b[0]\n"
+ "add x10, x10, #0x40\n"
".inst 0x4f82e0d8 // sdot v24.4s, v6.16b, v2.4b[0]\n"
- "add x13, x13, #0x40\n"
".inst 0x4f83e0dc // sdot v28.4s, v6.16b, v3.4b[0]\n"
".inst 0x4f80e0f1 // sdot v17.4s, v7.16b, v0.4b[0]\n"
".inst 0x4f81e0f5 // sdot v21.4s, v7.16b, v1.4b[0]\n"
@@ -1710,24 +1758,24 @@ void a64_hybrid_s8qa_dot_4x16_a55 (
".inst 0x4f82e13b // sdot v27.4s, v9.16b, v2.4b[0]\n"
".inst 0x4f83e13f // sdot v31.4s, v9.16b, v3.4b[0]\n"
"bge 102b\n"
+ "cbz x26, 108f\n"
"104:" // Height 4: Multiply loop: Skip odd blocks
- "cbz x11, 108f\n"
- "tbz x11, #1, 105f\n"
- "ldr h0, [x10], #0x2\n"
- "ldr h1, [x23], #0x2\n"
- "ldr h2, [x22], #0x2\n"
- "ldr h3, [x21], #0x2\n"
- "tbz x11, #0, 106f\n"
- "ld1 { v0.b }[2], [x10]\n"
- "ld1 { v1.b }[2], [x23]\n"
- "ld1 { v2.b }[2], [x22]\n"
- "ld1 { v3.b }[2], [x21]\n"
+ "tbz x26, #1, 105f\n"
+ "ldr h0, [x25], #0x2\n"
+ "ldr h1, [x22], #0x2\n"
+ "ldr h2, [x21], #0x2\n"
+ "ldr h3, [x20], #0x2\n"
+ "tbz x26, #0, 106f\n"
+ "ld1 { v0.b }[2], [x25]\n"
+ "ld1 { v1.b }[2], [x22]\n"
+ "ld1 { v2.b }[2], [x21]\n"
+ "ld1 { v3.b }[2], [x20]\n"
"b 106f\n"
"105:" // Height 4: Multiply loop: Ragged operand read: partial_1_0
- "ldr b0, [x10, #0x0]\n"
- "ldr b1, [x23, #0x0]\n"
- "ldr b2, [x22, #0x0]\n"
- "ldr b3, [x21, #0x0]\n"
+ "ldr b0, [x25, #0x0]\n"
+ "ldr b1, [x22, #0x0]\n"
+ "ldr b2, [x21, #0x0]\n"
+ "ldr b3, [x20, #0x0]\n"
"106:" // Height 4: Multiply loop: Ragged operand read: Done
"tbnz %x[flags], #31, 107f\n"
".inst 0x4e8f940b // sdot v11.4s, v0.16b, v15.16b\n"
@@ -1735,16 +1783,16 @@ void a64_hybrid_s8qa_dot_4x16_a55 (
".inst 0x4e8f944d // sdot v13.4s, v2.16b, v15.16b\n"
".inst 0x4e8f946e // sdot v14.4s, v3.16b, v15.16b\n"
"107:" // Height 4: Multiply loop: unique 16: skip row sum
- "ldr q10, [x13, #0x0]\n"
+ "ldr q10, [x10, #0x0]\n"
+ "ldr q4, [x10, #0x10]\n"
+ "ldr q5, [x10, #0x20]\n"
".inst 0x4f80e150 // sdot v16.4s, v10.16b, v0.4b[0]\n"
- "ldr q4, [x13, #0x10]\n"
+ "ldr q6, [x10, #0x30]\n"
".inst 0x4f81e154 // sdot v20.4s, v10.16b, v1.4b[0]\n"
- "ldr q5, [x13, #0x20]\n"
+ "add x10, x10, #0x40\n"
".inst 0x4f82e158 // sdot v24.4s, v10.16b, v2.4b[0]\n"
- "ldr q6, [x13, #0x30]\n"
".inst 0x4f83e15c // sdot v28.4s, v10.16b, v3.4b[0]\n"
".inst 0x4f80e091 // sdot v17.4s, v4.16b, v0.4b[0]\n"
- "add x13, x13, #0x40\n"
".inst 0x4f81e095 // sdot v21.4s, v4.16b, v1.4b[0]\n"
".inst 0x4f82e099 // sdot v25.4s, v4.16b, v2.4b[0]\n"
".inst 0x4f83e09d // sdot v29.4s, v4.16b, v3.4b[0]\n"
@@ -1757,28 +1805,28 @@ void a64_hybrid_s8qa_dot_4x16_a55 (
".inst 0x4f82e0db // sdot v27.4s, v6.16b, v2.4b[0]\n"
".inst 0x4f83e0df // sdot v31.4s, v6.16b, v3.4b[0]\n"
"108:" // Height 4: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x12, x12, #0x1\n"
- "cmp x12, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 94b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x22, x14, x20\n"
- "add x21, x22, x20\n"
- "add x20, x21, x20\n"
- "prfm pstl1keep, [x14, #0x0]\n"
- "prfm pstl1keep, [x22, #0x0]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "prfm pstl1keep, [x28, #0x0]\n"
+ "add x21, x28, x19\n"
"prfm pstl1keep, [x21, #0x0]\n"
+ "add x20, x21, x19\n"
"prfm pstl1keep, [x20, #0x0]\n"
+ "add x19, x20, x19\n"
+ "prfm pstl1keep, [x19, #0x0]\n"
"tbnz %x[flags], #31, 109f\n"
"addp v11.4s, v11.4s, v11.4s\n"
"addp v12.4s, v12.4s, v12.4s\n"
"addp v13.4s, v13.4s, v13.4s\n"
"addp v14.4s, v14.4s, v14.4s\n"
- "add x23, %x[qp], %[b_offset]\n"
- "ld1r { v4.4s }, [x23]\n"
- "neg v4.4s, v4.4s\n"
+ "add x22, %x[qp], %[b_offset]\n"
+ "ld1r { v4.4s }, [x22]\n"
"addp v11.4s, v11.4s, v11.4s\n"
"addp v12.4s, v12.4s, v12.4s\n"
+ "neg v4.4s, v4.4s\n"
"addp v13.4s, v13.4s, v13.4s\n"
"addp v14.4s, v14.4s, v14.4s\n"
"mul v11.4s, v11.4s, v4.4s\n"
@@ -1786,13 +1834,9 @@ void a64_hybrid_s8qa_dot_4x16_a55 (
"mul v13.4s, v13.4s, v4.4s\n"
"mul v14.4s, v14.4s, v4.4s\n"
"109:" // Height 4: skip row sum fixup
- "ldr q0, [x16, #0x0]\n"
"add v16.4s, v16.4s, v11.4s\n"
- "ldr q1, [x16, #0x10]\n"
"add v17.4s, v17.4s, v11.4s\n"
- "ldr q2, [x16, #0x20]\n"
"add v18.4s, v18.4s, v11.4s\n"
- "ldr q3, [x16, #0x30]\n"
"add v19.4s, v19.4s, v11.4s\n"
"add v20.4s, v20.4s, v12.4s\n"
"add v21.4s, v21.4s, v12.4s\n"
@@ -1806,34 +1850,39 @@ void a64_hybrid_s8qa_dot_4x16_a55 (
"add v29.4s, v29.4s, v14.4s\n"
"add v30.4s, v30.4s, v14.4s\n"
"add v31.4s, v31.4s, v14.4s\n"
+ "ldr q0, [x9, #0x0]\n"
+ "orr %x[flags], %x[flags], #0x80000000\n"
+ "ldr q1, [x9, #0x10]\n"
+ "add x23, %x[qp], %[per_layer_right_shift]\n"
+ "ldr q2, [x9, #0x20]\n"
+ "add x22, %x[qp], %[per_layer_mul]\n"
"add v16.4s, v16.4s, v0.4s\n"
- "add v17.4s, v17.4s, v1.4s\n"
- "add v18.4s, v18.4s, v2.4s\n"
- "add v19.4s, v19.4s, v3.4s\n"
"add v20.4s, v20.4s, v0.4s\n"
+ "add v17.4s, v17.4s, v1.4s\n"
"add v21.4s, v21.4s, v1.4s\n"
+ "add v18.4s, v18.4s, v2.4s\n"
"add v22.4s, v22.4s, v2.4s\n"
- "add v23.4s, v23.4s, v3.4s\n"
"add v24.4s, v24.4s, v0.4s\n"
"add v25.4s, v25.4s, v1.4s\n"
"add v26.4s, v26.4s, v2.4s\n"
- "add v27.4s, v27.4s, v3.4s\n"
"add v28.4s, v28.4s, v0.4s\n"
"add v29.4s, v29.4s, v1.4s\n"
"add v30.4s, v30.4s, v2.4s\n"
- "add v31.4s, v31.4s, v3.4s\n"
- "add x23, %x[qp], %[per_layer_mul]\n"
- "ld1r { v4.4s }, [x23]\n"
- "orr %x[flags], %x[flags], #0x80000000\n"
- "add x23, %x[qp], %[per_layer_right_shift]\n"
+ "ldr q3, [x9, #0x30]\n"
+ "add x9, x9, #0x40\n"
"ld1r { v0.4s }, [x23]\n"
+ "ld1r { v4.4s }, [x22]\n"
+ "add v19.4s, v19.4s, v3.4s\n"
+ "add v23.4s, v23.4s, v3.4s\n"
+ "add v27.4s, v27.4s, v3.4s\n"
+ "add v31.4s, v31.4s, v3.4s\n"
"sqrdmulh v16.4s, v16.4s, v4.4s\n"
"sqrdmulh v17.4s, v17.4s, v4.4s\n"
"sqrdmulh v18.4s, v18.4s, v4.4s\n"
- "sqrdmulh v19.4s, v19.4s, v4.4s\n"
"sqrdmulh v20.4s, v20.4s, v4.4s\n"
"sqrdmulh v21.4s, v21.4s, v4.4s\n"
"sqrdmulh v22.4s, v22.4s, v4.4s\n"
+ "sqrdmulh v19.4s, v19.4s, v4.4s\n"
"sqrdmulh v23.4s, v23.4s, v4.4s\n"
"sqrdmulh v24.4s, v24.4s, v4.4s\n"
"sqrdmulh v25.4s, v25.4s, v4.4s\n"
@@ -1843,54 +1892,53 @@ void a64_hybrid_s8qa_dot_4x16_a55 (
"sqrdmulh v29.4s, v29.4s, v4.4s\n"
"sqrdmulh v30.4s, v30.4s, v4.4s\n"
"sqrdmulh v31.4s, v31.4s, v4.4s\n"
- "add x16, x16, #0x40\n"
"tbz %x[flags], #5, 110f\n"
"and v4.16b, v16.16b, v0.16b\n"
"and v5.16b, v17.16b, v0.16b\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
- "sshr v5.4s, v5.4s, #0x1f\n"
- "sqadd v16.4s, v16.4s, v4.4s\n"
- "sqadd v17.4s, v17.4s, v5.4s\n"
"and v6.16b, v18.16b, v0.16b\n"
"and v7.16b, v19.16b, v0.16b\n"
"and v8.16b, v20.16b, v0.16b\n"
"and v9.16b, v21.16b, v0.16b\n"
"and v10.16b, v22.16b, v0.16b\n"
- "and v4.16b, v23.16b, v0.16b\n"
- "and v5.16b, v24.16b, v0.16b\n"
+ "sshr v4.4s, v4.4s, #0x1f\n"
+ "sshr v5.4s, v5.4s, #0x1f\n"
"sshr v6.4s, v6.4s, #0x1f\n"
"sshr v7.4s, v7.4s, #0x1f\n"
"sshr v8.4s, v8.4s, #0x1f\n"
"sshr v9.4s, v9.4s, #0x1f\n"
"sshr v10.4s, v10.4s, #0x1f\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
- "sshr v5.4s, v5.4s, #0x1f\n"
+ "sqadd v16.4s, v16.4s, v4.4s\n"
+ "and v4.16b, v23.16b, v0.16b\n"
+ "sqadd v17.4s, v17.4s, v5.4s\n"
"sqadd v18.4s, v18.4s, v6.4s\n"
"sqadd v19.4s, v19.4s, v7.4s\n"
"sqadd v20.4s, v20.4s, v8.4s\n"
"sqadd v21.4s, v21.4s, v9.4s\n"
"sqadd v22.4s, v22.4s, v10.4s\n"
- "sqadd v23.4s, v23.4s, v4.4s\n"
- "sqadd v24.4s, v24.4s, v5.4s\n"
+ "and v5.16b, v24.16b, v0.16b\n"
"and v6.16b, v25.16b, v0.16b\n"
+ "sshr v4.4s, v4.4s, #0x1f\n"
"and v7.16b, v26.16b, v0.16b\n"
"and v8.16b, v27.16b, v0.16b\n"
"and v9.16b, v28.16b, v0.16b\n"
"and v10.16b, v29.16b, v0.16b\n"
- "and v4.16b, v30.16b, v0.16b\n"
- "and v5.16b, v31.16b, v0.16b\n"
+ "sshr v5.4s, v5.4s, #0x1f\n"
"sshr v6.4s, v6.4s, #0x1f\n"
+ "sqadd v23.4s, v23.4s, v4.4s\n"
"sshr v7.4s, v7.4s, #0x1f\n"
"sshr v8.4s, v8.4s, #0x1f\n"
"sshr v9.4s, v9.4s, #0x1f\n"
"sshr v10.4s, v10.4s, #0x1f\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
- "sshr v5.4s, v5.4s, #0x1f\n"
+ "and v4.16b, v30.16b, v0.16b\n"
+ "sqadd v24.4s, v24.4s, v5.4s\n"
"sqadd v25.4s, v25.4s, v6.4s\n"
"sqadd v26.4s, v26.4s, v7.4s\n"
+ "and v5.16b, v31.16b, v0.16b\n"
"sqadd v27.4s, v27.4s, v8.4s\n"
"sqadd v28.4s, v28.4s, v9.4s\n"
"sqadd v29.4s, v29.4s, v10.4s\n"
+ "sshr v4.4s, v4.4s, #0x1f\n"
+ "sshr v5.4s, v5.4s, #0x1f\n"
"sqadd v30.4s, v30.4s, v4.4s\n"
"sqadd v31.4s, v31.4s, v5.4s\n"
"110:" // Height 4: no shift correction
@@ -1910,8 +1958,13 @@ void a64_hybrid_s8qa_dot_4x16_a55 (
"srshl v29.4s, v29.4s, v0.4s\n"
"srshl v30.4s, v30.4s, v0.4s\n"
"srshl v31.4s, v31.4s, v0.4s\n"
- "add x23, %x[qp], %[c_offset]\n"
- "ld1r { v4.4s }, [x23]\n"
+ "add x22, %x[qp], %[c_offset]\n"
+ "add x23, %x[qp], %[minval]\n"
+ "ld1r { v4.4s }, [x22]\n"
+ "add x22, %x[qp], %[maxval]\n"
+ "ld1r { v5.4s }, [x23]\n"
+ "cmp x11, #0x10\n"
+ "ld1r { v6.4s }, [x22]\n"
"add v16.4s, v16.4s, v4.4s\n"
"add v17.4s, v17.4s, v4.4s\n"
"add v18.4s, v18.4s, v4.4s\n"
@@ -1922,14 +1975,6 @@ void a64_hybrid_s8qa_dot_4x16_a55 (
"add v23.4s, v23.4s, v4.4s\n"
"add v24.4s, v24.4s, v4.4s\n"
"add v25.4s, v25.4s, v4.4s\n"
- "add v26.4s, v26.4s, v4.4s\n"
- "add v27.4s, v27.4s, v4.4s\n"
- "add v28.4s, v28.4s, v4.4s\n"
- "add v29.4s, v29.4s, v4.4s\n"
- "add v30.4s, v30.4s, v4.4s\n"
- "add v31.4s, v31.4s, v4.4s\n"
- "add x23, %x[qp], %[maxval]\n"
- "ld1r { v6.4s }, [x23]\n"
"smin v16.4s, v16.4s, v6.4s\n"
"smin v17.4s, v17.4s, v6.4s\n"
"smin v18.4s, v18.4s, v6.4s\n"
@@ -1940,14 +1985,6 @@ void a64_hybrid_s8qa_dot_4x16_a55 (
"smin v23.4s, v23.4s, v6.4s\n"
"smin v24.4s, v24.4s, v6.4s\n"
"smin v25.4s, v25.4s, v6.4s\n"
- "smin v26.4s, v26.4s, v6.4s\n"
- "smin v27.4s, v27.4s, v6.4s\n"
- "smin v28.4s, v28.4s, v6.4s\n"
- "smin v29.4s, v29.4s, v6.4s\n"
- "smin v30.4s, v30.4s, v6.4s\n"
- "smin v31.4s, v31.4s, v6.4s\n"
- "add x23, %x[qp], %[minval]\n"
- "ld1r { v5.4s }, [x23]\n"
"smax v16.4s, v16.4s, v5.4s\n"
"smax v17.4s, v17.4s, v5.4s\n"
"smax v18.4s, v18.4s, v5.4s\n"
@@ -1958,141 +1995,152 @@ void a64_hybrid_s8qa_dot_4x16_a55 (
"smax v23.4s, v23.4s, v5.4s\n"
"smax v24.4s, v24.4s, v5.4s\n"
"smax v25.4s, v25.4s, v5.4s\n"
+ "add v26.4s, v26.4s, v4.4s\n"
+ "add v27.4s, v27.4s, v4.4s\n"
+ "add v28.4s, v28.4s, v4.4s\n"
+ "add v29.4s, v29.4s, v4.4s\n"
+ "add v30.4s, v30.4s, v4.4s\n"
+ "add v31.4s, v31.4s, v4.4s\n"
+ "uzp1 v16.8h, v16.8h, v17.8h\n"
+ "uzp1 v17.8h, v18.8h, v19.8h\n"
+ "uzp1 v20.8h, v20.8h, v21.8h\n"
+ "uzp1 v21.8h, v22.8h, v23.8h\n"
+ "smin v26.4s, v26.4s, v6.4s\n"
+ "smin v27.4s, v27.4s, v6.4s\n"
+ "smin v28.4s, v28.4s, v6.4s\n"
+ "smin v29.4s, v29.4s, v6.4s\n"
+ "smin v30.4s, v30.4s, v6.4s\n"
+ "smin v31.4s, v31.4s, v6.4s\n"
+ "uzp1 v24.8h, v24.8h, v25.8h\n"
+ "uzp1 v16.16b, v16.16b, v17.16b\n"
+ "uzp1 v20.16b, v20.16b, v21.16b\n"
"smax v26.4s, v26.4s, v5.4s\n"
"smax v27.4s, v27.4s, v5.4s\n"
"smax v28.4s, v28.4s, v5.4s\n"
"smax v29.4s, v29.4s, v5.4s\n"
"smax v30.4s, v30.4s, v5.4s\n"
"smax v31.4s, v31.4s, v5.4s\n"
- "uzp1 v16.8h, v16.8h, v17.8h\n"
- "uzp1 v17.8h, v18.8h, v19.8h\n"
- "uzp1 v20.8h, v20.8h, v21.8h\n"
- "uzp1 v21.8h, v22.8h, v23.8h\n"
- "uzp1 v24.8h, v24.8h, v25.8h\n"
"uzp1 v25.8h, v26.8h, v27.8h\n"
"uzp1 v28.8h, v28.8h, v29.8h\n"
"uzp1 v29.8h, v30.8h, v31.8h\n"
- "cmp x15, #0x10\n"
- "uzp1 v16.16b, v16.16b, v17.16b\n"
- "uzp1 v20.16b, v20.16b, v21.16b\n"
"uzp1 v24.16b, v24.16b, v25.16b\n"
"uzp1 v28.16b, v28.16b, v29.16b\n"
"bge 119f\n"
- "tbz x15, #3, 114f\n"
- "str d16, [x14], #0x8\n"
- "str d20, [x22], #0x8\n"
- "str d24, [x21], #0x8\n"
- "str d28, [x20], #0x8\n"
- "tbz x15, #2, 112f\n"
- "st1 { v16.s }[2], [x14], #0x4\n"
- "st1 { v20.s }[2], [x22], #0x4\n"
- "st1 { v24.s }[2], [x21], #0x4\n"
- "st1 { v28.s }[2], [x20], #0x4\n"
- "tbz x15, #1, 111f\n"
- "st1 { v16.h }[6], [x14], #0x2\n"
- "st1 { v20.h }[6], [x22], #0x2\n"
- "st1 { v24.h }[6], [x21], #0x2\n"
- "st1 { v28.h }[6], [x20], #0x2\n"
- "tbz x15, #0, 118f\n"
- "st1 { v16.b }[14], [x14]\n"
- "st1 { v20.b }[14], [x22]\n"
- "st1 { v24.b }[14], [x21]\n"
- "st1 { v28.b }[14], [x20]\n"
+ "tbz x11, #3, 114f\n"
+ "str d16, [x28], #0x8\n"
+ "str d20, [x21], #0x8\n"
+ "str d24, [x20], #0x8\n"
+ "str d28, [x19], #0x8\n"
+ "tbz x11, #2, 112f\n"
+ "st1 { v16.s }[2], [x28], #0x4\n"
+ "st1 { v20.s }[2], [x21], #0x4\n"
+ "st1 { v24.s }[2], [x20], #0x4\n"
+ "st1 { v28.s }[2], [x19], #0x4\n"
+ "tbz x11, #1, 111f\n"
+ "st1 { v16.h }[6], [x28], #0x2\n"
+ "st1 { v20.h }[6], [x21], #0x2\n"
+ "st1 { v24.h }[6], [x20], #0x2\n"
+ "st1 { v28.h }[6], [x19], #0x2\n"
+ "tbz x11, #0, 118f\n"
+ "st1 { v16.b }[14], [x28]\n"
+ "st1 { v20.b }[14], [x21]\n"
+ "st1 { v24.b }[14], [x20]\n"
+ "st1 { v28.b }[14], [x19]\n"
"b 118f\n"
"111:" // Height 4: Partial direct writeback: partial_1_12
- "tbz x15, #0, 118f\n"
- "st1 { v16.b }[12], [x14]\n"
- "st1 { v20.b }[12], [x22]\n"
- "st1 { v24.b }[12], [x21]\n"
- "st1 { v28.b }[12], [x20]\n"
+ "tbz x11, #0, 118f\n"
+ "st1 { v16.b }[12], [x28]\n"
+ "st1 { v20.b }[12], [x21]\n"
+ "st1 { v24.b }[12], [x20]\n"
+ "st1 { v28.b }[12], [x19]\n"
"b 118f\n"
"112:" // Height 4: Partial direct writeback: partial_2_8
- "tbz x15, #1, 113f\n"
- "st1 { v16.h }[4], [x14], #0x2\n"
- "st1 { v20.h }[4], [x22], #0x2\n"
- "st1 { v24.h }[4], [x21], #0x2\n"
- "st1 { v28.h }[4], [x20], #0x2\n"
- "tbz x15, #0, 118f\n"
- "st1 { v16.b }[10], [x14]\n"
- "st1 { v20.b }[10], [x22]\n"
- "st1 { v24.b }[10], [x21]\n"
- "st1 { v28.b }[10], [x20]\n"
+ "tbz x11, #1, 113f\n"
+ "st1 { v16.h }[4], [x28], #0x2\n"
+ "st1 { v20.h }[4], [x21], #0x2\n"
+ "st1 { v24.h }[4], [x20], #0x2\n"
+ "st1 { v28.h }[4], [x19], #0x2\n"
+ "tbz x11, #0, 118f\n"
+ "st1 { v16.b }[10], [x28]\n"
+ "st1 { v20.b }[10], [x21]\n"
+ "st1 { v24.b }[10], [x20]\n"
+ "st1 { v28.b }[10], [x19]\n"
"b 118f\n"
"113:" // Height 4: Partial direct writeback: partial_1_8
- "tbz x15, #0, 118f\n"
- "st1 { v16.b }[8], [x14]\n"
- "st1 { v20.b }[8], [x22]\n"
- "st1 { v24.b }[8], [x21]\n"
- "st1 { v28.b }[8], [x20]\n"
+ "tbz x11, #0, 118f\n"
+ "st1 { v16.b }[8], [x28]\n"
+ "st1 { v20.b }[8], [x21]\n"
+ "st1 { v24.b }[8], [x20]\n"
+ "st1 { v28.b }[8], [x19]\n"
"b 118f\n"
"114:" // Height 4: Partial direct writeback: partial_4_0
- "tbz x15, #2, 116f\n"
- "str s16, [x14], #0x4\n"
- "str s20, [x22], #0x4\n"
- "str s24, [x21], #0x4\n"
- "str s28, [x20], #0x4\n"
- "tbz x15, #1, 115f\n"
- "st1 { v16.h }[2], [x14], #0x2\n"
- "st1 { v20.h }[2], [x22], #0x2\n"
- "st1 { v24.h }[2], [x21], #0x2\n"
- "st1 { v28.h }[2], [x20], #0x2\n"
- "tbz x15, #0, 118f\n"
- "st1 { v16.b }[6], [x14]\n"
- "st1 { v20.b }[6], [x22]\n"
- "st1 { v24.b }[6], [x21]\n"
- "st1 { v28.b }[6], [x20]\n"
+ "tbz x11, #2, 116f\n"
+ "str s16, [x28], #0x4\n"
+ "str s20, [x21], #0x4\n"
+ "str s24, [x20], #0x4\n"
+ "str s28, [x19], #0x4\n"
+ "tbz x11, #1, 115f\n"
+ "st1 { v16.h }[2], [x28], #0x2\n"
+ "st1 { v20.h }[2], [x21], #0x2\n"
+ "st1 { v24.h }[2], [x20], #0x2\n"
+ "st1 { v28.h }[2], [x19], #0x2\n"
+ "tbz x11, #0, 118f\n"
+ "st1 { v16.b }[6], [x28]\n"
+ "st1 { v20.b }[6], [x21]\n"
+ "st1 { v24.b }[6], [x20]\n"
+ "st1 { v28.b }[6], [x19]\n"
"b 118f\n"
"115:" // Height 4: Partial direct writeback: partial_1_4
- "tbz x15, #0, 118f\n"
- "st1 { v16.b }[4], [x14]\n"
- "st1 { v20.b }[4], [x22]\n"
- "st1 { v24.b }[4], [x21]\n"
- "st1 { v28.b }[4], [x20]\n"
+ "tbz x11, #0, 118f\n"
+ "st1 { v16.b }[4], [x28]\n"
+ "st1 { v20.b }[4], [x21]\n"
+ "st1 { v24.b }[4], [x20]\n"
+ "st1 { v28.b }[4], [x19]\n"
"b 118f\n"
"116:" // Height 4: Partial direct writeback: partial_2_0
- "tbz x15, #1, 117f\n"
- "str h16, [x14], #0x2\n"
- "str h20, [x22], #0x2\n"
- "str h24, [x21], #0x2\n"
- "str h28, [x20], #0x2\n"
- "tbz x15, #0, 118f\n"
- "st1 { v16.b }[2], [x14]\n"
- "st1 { v20.b }[2], [x22]\n"
- "st1 { v24.b }[2], [x21]\n"
- "st1 { v28.b }[2], [x20]\n"
+ "tbz x11, #1, 117f\n"
+ "str h16, [x28], #0x2\n"
+ "str h20, [x21], #0x2\n"
+ "str h24, [x20], #0x2\n"
+ "str h28, [x19], #0x2\n"
+ "tbz x11, #0, 118f\n"
+ "st1 { v16.b }[2], [x28]\n"
+ "st1 { v20.b }[2], [x21]\n"
+ "st1 { v24.b }[2], [x20]\n"
+ "st1 { v28.b }[2], [x19]\n"
"b 118f\n"
"117:" // Height 4: Partial direct writeback: partial_1_0
- "str b16, [x14, #0x0]\n"
- "str b20, [x22, #0x0]\n"
- "str b24, [x21, #0x0]\n"
- "str b28, [x20, #0x0]\n"
+ "str b16, [x28, #0x0]\n"
+ "str b20, [x21, #0x0]\n"
+ "str b24, [x20, #0x0]\n"
+ "str b28, [x19, #0x0]\n"
"118:" // Height 4: Partial direct writeback: Done
"b 120f\n"
"119:" // Height 4: Full writeback
- "str q16, [x14, #0x0]\n"
- "add x14, x14, #0x10\n"
- "str q20, [x22, #0x0]\n"
- "str q24, [x21, #0x0]\n"
- "str q28, [x20, #0x0]\n"
+ "str q16, [x28, #0x0]\n"
+ "add x28, x28, #0x10\n"
+ "str q20, [x21, #0x0]\n"
+ "str q24, [x20, #0x0]\n"
+ "str q28, [x19, #0x0]\n"
"120:" // Height 4: Writeback done
- "subs x15, x15, #0x10\n"
+ "subs x11, x11, #0x10\n"
"bgt 92b\n"
"subs %x[M], %x[M], #0x4\n"
"beq 122f\n"
- "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 121f\n"
- "add x21, x21, #0x4\n"
- "str x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "add x20, x20, #0x4\n"
+ "str x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"b 1b\n"
"121:" // Update direct input
- "mov x20, #0x4\n"
- "madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
+ "mov x19, #0x4\n"
+ "madd %x[input_ptr], x19, x20, %x[input_ptr]\n"
"b 1b\n"
"122:" // Exit
: [M] "+&r" (M), [flags] "+&r" (flags), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
: [args_ptr] "r" (&ka), [b_offset] "I" (offsetof(Requantize32, b_offset)), [c_offset] "I" (offsetof(Requantize32, c_offset)), [col_bias] "r" (col_bias), [maxval] "I" (offsetof(Requantize32, maxval)), [minval] "I" (offsetof(Requantize32, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths)), [per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [qp] "r" (qp)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qa_dot_4x16/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qa_dot_4x16/generic.cpp
index 485a47dc67..a1c4b34d38 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qa_dot_4x16/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qa_dot_4x16/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef __aarch64__
@@ -85,69 +85,69 @@ void a64_hybrid_s8qa_dot_4x16 (
"cmp %x[M], #0x2\n"
"bgt 61f\n"
"beq 31f\n"
- "mov x10, %x[col_bias]\n"
"movi v11.4s, #0x0\n"
- "movi v15.16b, #0x1\n"
- "bic %x[flags], %x[flags], #0x80000000\n"
"ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
+ "movi v15.16b, #0x1\n"
"ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x27, %x[output_ptr]\n"
+ "mov x27, %x[col_bias]\n"
+ "bic %x[flags], %x[flags], #0x80000000\n"
+ "mov x26, %x[output_ptr]\n"
"2:" // Height 1: Column loop
"movi v16.4s, #0x0\n"
"movi v17.4s, #0x0\n"
"movi v18.4s, #0x0\n"
"movi v19.4s, #0x0\n"
"3:" // Height 1: setup done
- "mov x26, #0x0\n"
+ "mov x25, #0x0\n"
"4:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w24, [x20, x25, LSL #0x2]\n"
"tbz %x[flags], #3, 5f\n"
- "ldr x21, [%x[input_ptr], x26, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x24, [x21, #0x0]\n"
- "cbnz x26, 6f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x24, x24, x20\n"
+ "ldr x20, [%x[input_ptr], x25, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x23, [x20, #0x0]\n"
+ "cbnz x25, 6f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x23, x23, x19\n"
"b 6f\n"
"5:" // Height 1: setup direct input
- "mov x24, %x[input_ptr]\n"
+ "mov x23, %x[input_ptr]\n"
"6:" // Height 1: input setup done
- "cmp x25, #0x10\n"
+ "cmp x24, #0x10\n"
"blt 11f\n"
- "ldr q0, [x24, #0x0]\n"
+ "ldr q0, [x23, #0x0]\n"
"ldr q4, [x28, #0x0]\n"
- "cmp x25, #0x20\n"
+ "cmp x24, #0x20\n"
+ "blt 9f\n"
+ "7:" // Height 1: Multiply loop: Main loop head
+ ".inst 0x4f80e090 // sdot v16.4s, v4.16b, v0.4b[0]\n"
"ldr q5, [x28, #0x10]\n"
+ "add x23, x23, #0x10\n"
+ ".inst 0x4f80e0b1 // sdot v17.4s, v5.16b, v0.4b[0]\n"
"ldr q6, [x28, #0x20]\n"
"ldr q7, [x28, #0x30]\n"
+ ".inst 0x4f80e0d2 // sdot v18.4s, v6.16b, v0.4b[0]\n"
"ldr q8, [x28, #0x40]\n"
+ ".inst 0x4f80e0f3 // sdot v19.4s, v7.16b, v0.4b[0]\n"
"ldr q9, [x28, #0x50]\n"
"ldr q10, [x28, #0x60]\n"
- "blt 9f\n"
- "7:" // Height 1: Multiply loop: Main loop head
- ".inst 0x4f80e090 // sdot v16.4s, v4.16b, v0.4b[0]\n"
+ ".inst 0x4fa0e110 // sdot v16.4s, v8.16b, v0.4b[1]\n"
"ldr q4, [x28, #0x70]\n"
- ".inst 0x4f80e0b1 // sdot v17.4s, v5.16b, v0.4b[0]\n"
"ldr q5, [x28, #0x80]\n"
- ".inst 0x4f80e0d2 // sdot v18.4s, v6.16b, v0.4b[0]\n"
+ ".inst 0x4fa0e131 // sdot v17.4s, v9.16b, v0.4b[1]\n"
+ ".inst 0x4fa0e152 // sdot v18.4s, v10.16b, v0.4b[1]\n"
"ldr q6, [x28, #0x90]\n"
- ".inst 0x4f80e0f3 // sdot v19.4s, v7.16b, v0.4b[0]\n"
"ldr q7, [x28, #0xa0]\n"
- ".inst 0x4fa0e110 // sdot v16.4s, v8.16b, v0.4b[1]\n"
+ ".inst 0x4fa0e093 // sdot v19.4s, v4.16b, v0.4b[1]\n"
"ldr q8, [x28, #0xb0]\n"
- ".inst 0x4fa0e131 // sdot v17.4s, v9.16b, v0.4b[1]\n"
+ ".inst 0x4f80e8b0 // sdot v16.4s, v5.16b, v0.4b[2]\n"
"ldr q9, [x28, #0xc0]\n"
- ".inst 0x4fa0e152 // sdot v18.4s, v10.16b, v0.4b[1]\n"
"ldr q10, [x28, #0xd0]\n"
- ".inst 0x4fa0e093 // sdot v19.4s, v4.16b, v0.4b[1]\n"
- "ldr q4, [x28, #0xe0]\n"
- ".inst 0x4f80e8b0 // sdot v16.4s, v5.16b, v0.4b[2]\n"
- "ldr q5, [x28, #0xf0]\n"
".inst 0x4f80e8d1 // sdot v17.4s, v6.16b, v0.4b[2]\n"
- "add x24, x24, #0x10\n"
".inst 0x4f80e8f2 // sdot v18.4s, v7.16b, v0.4b[2]\n"
+ "ldr q4, [x28, #0xe0]\n"
+ "ldr q5, [x28, #0xf0]\n"
".inst 0x4f80e913 // sdot v19.4s, v8.16b, v0.4b[2]\n"
"add x28, x28, #0x100\n"
".inst 0x4fa0e930 // sdot v16.4s, v9.16b, v0.4b[3]\n"
@@ -157,42 +157,42 @@ void a64_hybrid_s8qa_dot_4x16 (
"tbnz %x[flags], #31, 8f\n"
".inst 0x4e8f940b // sdot v11.4s, v0.16b, v15.16b\n"
"8:" // Height 1: Multiply loop: unique 1: skip row sum
- "ldr q0, [x24, #0x0]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
+ "sub x24, x24, #0x10\n"
+ "ldr q0, [x23, #0x0]\n"
+ "cmp x24, #0x20\n"
"ldr q4, [x28, #0x0]\n"
- "sub x25, x25, #0x10\n"
- "cmp x25, #0x20\n"
+ "bge 7b\n"
+ "9:" // Height 1: Multiply loop: Single iteration only
+ ".inst 0x4f80e090 // sdot v16.4s, v4.16b, v0.4b[0]\n"
"ldr q5, [x28, #0x10]\n"
+ "sub x24, x24, #0x10\n"
+ ".inst 0x4f80e0b1 // sdot v17.4s, v5.16b, v0.4b[0]\n"
"ldr q6, [x28, #0x20]\n"
+ "add x23, x23, #0x10\n"
+ ".inst 0x4f80e0d2 // sdot v18.4s, v6.16b, v0.4b[0]\n"
"ldr q7, [x28, #0x30]\n"
"ldr q8, [x28, #0x40]\n"
+ ".inst 0x4f80e0f3 // sdot v19.4s, v7.16b, v0.4b[0]\n"
"ldr q9, [x28, #0x50]\n"
+ ".inst 0x4fa0e110 // sdot v16.4s, v8.16b, v0.4b[1]\n"
"ldr q10, [x28, #0x60]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
- "bge 7b\n"
- "9:" // Height 1: Multiply loop: Single iteration only
- ".inst 0x4f80e090 // sdot v16.4s, v4.16b, v0.4b[0]\n"
"ldr q4, [x28, #0x70]\n"
- ".inst 0x4f80e0b1 // sdot v17.4s, v5.16b, v0.4b[0]\n"
+ ".inst 0x4fa0e131 // sdot v17.4s, v9.16b, v0.4b[1]\n"
"ldr q5, [x28, #0x80]\n"
- ".inst 0x4f80e0d2 // sdot v18.4s, v6.16b, v0.4b[0]\n"
"ldr q6, [x28, #0x90]\n"
- ".inst 0x4f80e0f3 // sdot v19.4s, v7.16b, v0.4b[0]\n"
+ ".inst 0x4fa0e152 // sdot v18.4s, v10.16b, v0.4b[1]\n"
+ ".inst 0x4fa0e093 // sdot v19.4s, v4.16b, v0.4b[1]\n"
"ldr q7, [x28, #0xa0]\n"
- ".inst 0x4fa0e110 // sdot v16.4s, v8.16b, v0.4b[1]\n"
"ldr q8, [x28, #0xb0]\n"
- ".inst 0x4fa0e131 // sdot v17.4s, v9.16b, v0.4b[1]\n"
+ ".inst 0x4f80e8b0 // sdot v16.4s, v5.16b, v0.4b[2]\n"
"ldr q9, [x28, #0xc0]\n"
- ".inst 0x4fa0e152 // sdot v18.4s, v10.16b, v0.4b[1]\n"
+ ".inst 0x4f80e8d1 // sdot v17.4s, v6.16b, v0.4b[2]\n"
"ldr q10, [x28, #0xd0]\n"
- ".inst 0x4fa0e093 // sdot v19.4s, v4.16b, v0.4b[1]\n"
"ldr q4, [x28, #0xe0]\n"
- ".inst 0x4f80e8b0 // sdot v16.4s, v5.16b, v0.4b[2]\n"
- "ldr q5, [x28, #0xf0]\n"
- ".inst 0x4f80e8d1 // sdot v17.4s, v6.16b, v0.4b[2]\n"
- "sub x25, x25, #0x10\n"
".inst 0x4f80e8f2 // sdot v18.4s, v7.16b, v0.4b[2]\n"
".inst 0x4f80e913 // sdot v19.4s, v8.16b, v0.4b[2]\n"
- "add x24, x24, #0x10\n"
+ "ldr q5, [x28, #0xf0]\n"
"add x28, x28, #0x100\n"
".inst 0x4fa0e930 // sdot v16.4s, v9.16b, v0.4b[3]\n"
".inst 0x4fa0e951 // sdot v17.4s, v10.16b, v0.4b[3]\n"
@@ -201,83 +201,83 @@ void a64_hybrid_s8qa_dot_4x16 (
"tbnz %x[flags], #31, 10f\n"
".inst 0x4e8f940b // sdot v11.4s, v0.16b, v15.16b\n"
"10:" // Height 1: Multiply loop: unique 2: skip row sum
- "prfm pldl1keep, [x24, #0x80]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
"11:" // Height 1: Multiply loop: Main loop skip
- "cbz x25, 18f\n"
- "cmp x25, #0x4\n"
+ "cbz x24, 18f\n"
+ "cmp x24, #0x4\n"
"blt 14f\n"
"12:" // Height 1: Multiply loop: Odd block loop
- "ldr s0, [x24], #0x4\n"
+ "ldr s0, [x23], #0x4\n"
"tbnz %x[flags], #31, 13f\n"
".inst 0x4e8f940b // sdot v11.4s, v0.16b, v15.16b\n"
"13:" // Height 1: Multiply loop: unique 3: skip row sum
"ldr q6, [x28, #0x0]\n"
- "ldr q7, [x28, #0x10]\n"
- "sub x25, x25, #0x4\n"
- "cmp x25, #0x4\n"
- "ldr q8, [x28, #0x20]\n"
- "ldr q9, [x28, #0x30]\n"
".inst 0x4f80e0d0 // sdot v16.4s, v6.16b, v0.4b[0]\n"
+ "ldr q7, [x28, #0x10]\n"
+ "sub x24, x24, #0x4\n"
".inst 0x4f80e0f1 // sdot v17.4s, v7.16b, v0.4b[0]\n"
+ "ldr q8, [x28, #0x20]\n"
+ "cmp x24, #0x4\n"
".inst 0x4f80e112 // sdot v18.4s, v8.16b, v0.4b[0]\n"
- ".inst 0x4f80e133 // sdot v19.4s, v9.16b, v0.4b[0]\n"
+ "ldr q9, [x28, #0x30]\n"
"add x28, x28, #0x40\n"
+ ".inst 0x4f80e133 // sdot v19.4s, v9.16b, v0.4b[0]\n"
"bge 12b\n"
+ "cbz x24, 18f\n"
"14:" // Height 1: Multiply loop: Skip odd blocks
- "cbz x25, 18f\n"
- "tbz x25, #1, 15f\n"
- "ldr h0, [x24], #0x2\n"
- "tbz x25, #0, 16f\n"
- "ld1 { v0.b }[2], [x24]\n"
+ "tbz x24, #1, 15f\n"
+ "ldr h0, [x23], #0x2\n"
+ "tbz x24, #0, 16f\n"
+ "ld1 { v0.b }[2], [x23]\n"
"b 16f\n"
"15:" // Height 1: Multiply loop: Ragged operand read: partial_1_0
- "ldr b0, [x24, #0x0]\n"
+ "ldr b0, [x23, #0x0]\n"
"16:" // Height 1: Multiply loop: Ragged operand read: Done
"tbnz %x[flags], #31, 17f\n"
".inst 0x4e8f940b // sdot v11.4s, v0.16b, v15.16b\n"
"17:" // Height 1: Multiply loop: unique 4: skip row sum
"ldr q10, [x28, #0x0]\n"
- "ldr q4, [x28, #0x10]\n"
".inst 0x4f80e150 // sdot v16.4s, v10.16b, v0.4b[0]\n"
- ".inst 0x4f80e091 // sdot v17.4s, v4.16b, v0.4b[0]\n"
+ "ldr q4, [x28, #0x10]\n"
"ldr q5, [x28, #0x20]\n"
+ ".inst 0x4f80e091 // sdot v17.4s, v4.16b, v0.4b[0]\n"
"ldr q6, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
".inst 0x4f80e0b2 // sdot v18.4s, v5.16b, v0.4b[0]\n"
".inst 0x4f80e0d3 // sdot v19.4s, v6.16b, v0.4b[0]\n"
- "add x28, x28, #0x40\n"
"18:" // Height 1: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x26, x26, #0x1\n"
- "cmp x26, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x25, x25, #0x1\n"
+ "cmp x25, x19\n"
"bne 4b\n"
- "prfm pstl1keep, [x27, #0x0]\n"
+ "prfm pstl1keep, [x26, #0x0]\n"
"tbnz %x[flags], #31, 19f\n"
- "add x23, %x[qp], %[b_offset]\n"
- "ld1r { v1.4s }, [x23]\n"
"addp v11.4s, v11.4s, v11.4s\n"
- "neg v1.4s, v1.4s\n"
+ "add x22, %x[qp], %[b_offset]\n"
"addp v11.4s, v11.4s, v11.4s\n"
+ "ld1r { v1.4s }, [x22]\n"
+ "neg v1.4s, v1.4s\n"
"mul v11.4s, v11.4s, v1.4s\n"
"19:" // Height 1: skip row sum fixup
- "ldr q0, [x10, #0x0]\n"
- "ldr q1, [x10, #0x10]\n"
"add v16.4s, v16.4s, v11.4s\n"
+ "ldr q0, [x27, #0x0]\n"
+ "orr %x[flags], %x[flags], #0x80000000\n"
"add v17.4s, v17.4s, v11.4s\n"
- "ldr q2, [x10, #0x20]\n"
- "ldr q3, [x10, #0x30]\n"
+ "ldr q1, [x27, #0x10]\n"
+ "add x23, %x[qp], %[per_layer_right_shift]\n"
"add v18.4s, v18.4s, v11.4s\n"
+ "ldr q2, [x27, #0x20]\n"
+ "add x22, %x[qp], %[per_layer_mul]\n"
"add v19.4s, v19.4s, v11.4s\n"
- "add x23, %x[qp], %[per_layer_mul]\n"
- "ld1r { v4.4s }, [x23]\n"
- "orr %x[flags], %x[flags], #0x80000000\n"
+ "ldr q3, [x27, #0x30]\n"
+ "add x27, x27, #0x40\n"
"add v16.4s, v16.4s, v0.4s\n"
+ "ld1r { v0.4s }, [x23]\n"
+ "ld1r { v4.4s }, [x22]\n"
"add v17.4s, v17.4s, v1.4s\n"
"add v18.4s, v18.4s, v2.4s\n"
- "add x23, %x[qp], %[per_layer_right_shift]\n"
- "ld1r { v0.4s }, [x23]\n"
"add v19.4s, v19.4s, v3.4s\n"
"sqrdmulh v16.4s, v16.4s, v4.4s\n"
- "add x10, x10, #0x40\n"
"sqrdmulh v17.4s, v17.4s, v4.4s\n"
"sqrdmulh v18.4s, v18.4s, v4.4s\n"
"sqrdmulh v19.4s, v19.4s, v4.4s\n"
@@ -289,100 +289,100 @@ void a64_hybrid_s8qa_dot_4x16 (
"sshr v4.4s, v4.4s, #0x1f\n"
"sshr v5.4s, v5.4s, #0x1f\n"
"sshr v6.4s, v6.4s, #0x1f\n"
- "sshr v7.4s, v7.4s, #0x1f\n"
"sqadd v16.4s, v16.4s, v4.4s\n"
"sqadd v17.4s, v17.4s, v5.4s\n"
"sqadd v18.4s, v18.4s, v6.4s\n"
+ "sshr v7.4s, v7.4s, #0x1f\n"
"sqadd v19.4s, v19.4s, v7.4s\n"
"20:" // Height 1: no shift correction
- "add x23, %x[qp], %[c_offset]\n"
- "ld1r { v4.4s }, [x23]\n"
"srshl v16.4s, v16.4s, v0.4s\n"
+ "add x22, %x[qp], %[c_offset]\n"
+ "ld1r { v4.4s }, [x22]\n"
"srshl v17.4s, v17.4s, v0.4s\n"
+ "add x22, %x[qp], %[minval]\n"
"srshl v18.4s, v18.4s, v0.4s\n"
+ "ld1r { v5.4s }, [x22]\n"
+ "add x22, %x[qp], %[maxval]\n"
"srshl v19.4s, v19.4s, v0.4s\n"
- "add x23, %x[qp], %[maxval]\n"
- "ld1r { v6.4s }, [x23]\n"
+ "ld1r { v6.4s }, [x22]\n"
+ "cmp x9, #0x10\n"
"add v16.4s, v16.4s, v4.4s\n"
"add v17.4s, v17.4s, v4.4s\n"
- "add x23, %x[qp], %[minval]\n"
- "ld1r { v5.4s }, [x23]\n"
"add v18.4s, v18.4s, v4.4s\n"
"add v19.4s, v19.4s, v4.4s\n"
- "cmp x9, #0x10\n"
"smin v16.4s, v16.4s, v6.4s\n"
"smin v17.4s, v17.4s, v6.4s\n"
"smin v18.4s, v18.4s, v6.4s\n"
- "smin v19.4s, v19.4s, v6.4s\n"
"smax v16.4s, v16.4s, v5.4s\n"
"smax v17.4s, v17.4s, v5.4s\n"
"smax v18.4s, v18.4s, v5.4s\n"
- "smax v19.4s, v19.4s, v5.4s\n"
+ "smin v19.4s, v19.4s, v6.4s\n"
"uzp1 v16.8h, v16.8h, v17.8h\n"
+ "smax v19.4s, v19.4s, v5.4s\n"
"uzp1 v17.8h, v18.8h, v19.8h\n"
"uzp1 v16.16b, v16.16b, v17.16b\n"
"bge 29f\n"
"tbz x9, #3, 24f\n"
- "str d16, [x27], #0x8\n"
+ "str d16, [x26], #0x8\n"
"tbz x9, #2, 22f\n"
- "st1 { v16.s }[2], [x27], #0x4\n"
+ "st1 { v16.s }[2], [x26], #0x4\n"
"tbz x9, #1, 21f\n"
- "st1 { v16.h }[6], [x27], #0x2\n"
+ "st1 { v16.h }[6], [x26], #0x2\n"
"tbz x9, #0, 28f\n"
- "st1 { v16.b }[14], [x27]\n"
+ "st1 { v16.b }[14], [x26]\n"
"b 28f\n"
"21:" // Height 1: Partial direct writeback: partial_1_12
"tbz x9, #0, 28f\n"
- "st1 { v16.b }[12], [x27]\n"
+ "st1 { v16.b }[12], [x26]\n"
"b 28f\n"
"22:" // Height 1: Partial direct writeback: partial_2_8
"tbz x9, #1, 23f\n"
- "st1 { v16.h }[4], [x27], #0x2\n"
+ "st1 { v16.h }[4], [x26], #0x2\n"
"tbz x9, #0, 28f\n"
- "st1 { v16.b }[10], [x27]\n"
+ "st1 { v16.b }[10], [x26]\n"
"b 28f\n"
"23:" // Height 1: Partial direct writeback: partial_1_8
"tbz x9, #0, 28f\n"
- "st1 { v16.b }[8], [x27]\n"
+ "st1 { v16.b }[8], [x26]\n"
"b 28f\n"
"24:" // Height 1: Partial direct writeback: partial_4_0
"tbz x9, #2, 26f\n"
- "str s16, [x27], #0x4\n"
+ "str s16, [x26], #0x4\n"
"tbz x9, #1, 25f\n"
- "st1 { v16.h }[2], [x27], #0x2\n"
+ "st1 { v16.h }[2], [x26], #0x2\n"
"tbz x9, #0, 28f\n"
- "st1 { v16.b }[6], [x27]\n"
+ "st1 { v16.b }[6], [x26]\n"
"b 28f\n"
"25:" // Height 1: Partial direct writeback: partial_1_4
"tbz x9, #0, 28f\n"
- "st1 { v16.b }[4], [x27]\n"
+ "st1 { v16.b }[4], [x26]\n"
"b 28f\n"
"26:" // Height 1: Partial direct writeback: partial_2_0
"tbz x9, #1, 27f\n"
- "str h16, [x27], #0x2\n"
+ "str h16, [x26], #0x2\n"
"tbz x9, #0, 28f\n"
- "st1 { v16.b }[2], [x27]\n"
+ "st1 { v16.b }[2], [x26]\n"
"b 28f\n"
"27:" // Height 1: Partial direct writeback: partial_1_0
- "str b16, [x27, #0x0]\n"
+ "str b16, [x26, #0x0]\n"
"28:" // Height 1: Partial direct writeback: Done
"b 30f\n"
"29:" // Height 1: Full writeback
- "str q16, [x27, #0x0]\n"
- "add x27, x27, #0x10\n"
+ "str q16, [x26, #0x0]\n"
+ "add x26, x26, #0x10\n"
"30:" // Height 1: Writeback done
"subs x9, x9, #0x10\n"
"bgt 2b\n"
"b 122f\n"
"31:" // Height 2
- "mov x10, %x[col_bias]\n"
"movi v11.4s, #0x0\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x27, %x[col_bias]\n"
"movi v12.4s, #0x0\n"
+ "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"bic %x[flags], %x[flags], #0x80000000\n"
"movi v15.16b, #0x1\n"
- "ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x27, %x[output_ptr]\n"
+ "mov x26, %x[output_ptr]\n"
"32:" // Height 2: Column loop
"movi v16.4s, #0x0\n"
"movi v17.4s, #0x0\n"
@@ -393,57 +393,57 @@ void a64_hybrid_s8qa_dot_4x16 (
"movi v22.4s, #0x0\n"
"movi v23.4s, #0x0\n"
"33:" // Height 2: setup done
- "mov x26, #0x0\n"
+ "mov x25, #0x0\n"
"34:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w24, [x20, x25, LSL #0x2]\n"
"tbz %x[flags], #3, 35f\n"
- "ldr x21, [%x[input_ptr], x26, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x24, [x21, #0x0]\n"
- "ldr x23, [x21, #0x8]\n"
- "cbnz x26, 36f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x24, x24, x20\n"
- "add x23, x23, x20\n"
+ "ldr x20, [%x[input_ptr], x25, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x23, [x20, #0x0]\n"
+ "ldr x22, [x20, #0x8]\n"
+ "cbnz x25, 36f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x23, x23, x19\n"
+ "add x22, x22, x19\n"
"b 36f\n"
"35:" // Height 2: setup direct input
- "mov x24, %x[input_ptr]\n"
- "add x23, x24, x20\n"
+ "mov x23, %x[input_ptr]\n"
+ "add x22, x23, x19\n"
"36:" // Height 2: input setup done
- "cmp x25, #0x10\n"
+ "cmp x24, #0x10\n"
"blt 41f\n"
- "ldr q0, [x24, #0x0]\n"
- "ldr q1, [x23, #0x0]\n"
- "cmp x25, #0x20\n"
+ "ldr q0, [x23, #0x0]\n"
+ "ldr q1, [x22, #0x0]\n"
+ "cmp x24, #0x20\n"
"ldr q4, [x28, #0x0]\n"
- "ldr q5, [x28, #0x10]\n"
- "ldr q6, [x28, #0x20]\n"
- "ldr q7, [x28, #0x30]\n"
- "ldr q8, [x28, #0x40]\n"
- "ldr q9, [x28, #0x50]\n"
- "ldr q10, [x28, #0x60]\n"
"blt 39f\n"
"37:" // Height 2: Multiply loop: Main loop head
".inst 0x4f80e090 // sdot v16.4s, v4.16b, v0.4b[0]\n"
+ "ldr q5, [x28, #0x10]\n"
+ "add x23, x23, #0x10\n"
".inst 0x4f81e094 // sdot v20.4s, v4.16b, v1.4b[0]\n"
- "ldr q4, [x28, #0x70]\n"
- "add x24, x24, #0x10\n"
+ "ldr q6, [x28, #0x20]\n"
+ "add x22, x22, #0x10\n"
".inst 0x4f80e0b1 // sdot v17.4s, v5.16b, v0.4b[0]\n"
+ "ldr q7, [x28, #0x30]\n"
".inst 0x4f81e0b5 // sdot v21.4s, v5.16b, v1.4b[0]\n"
- "ldr q5, [x28, #0x80]\n"
- "add x23, x23, #0x10\n"
+ "ldr q8, [x28, #0x40]\n"
".inst 0x4f80e0d2 // sdot v18.4s, v6.16b, v0.4b[0]\n"
+ "ldr q9, [x28, #0x50]\n"
".inst 0x4f81e0d6 // sdot v22.4s, v6.16b, v1.4b[0]\n"
- "ldr q6, [x28, #0x90]\n"
+ "ldr q10, [x28, #0x60]\n"
+ "ldr q4, [x28, #0x70]\n"
".inst 0x4f80e0f3 // sdot v19.4s, v7.16b, v0.4b[0]\n"
".inst 0x4f81e0f7 // sdot v23.4s, v7.16b, v1.4b[0]\n"
- "ldr q7, [x28, #0xa0]\n"
+ "ldr q5, [x28, #0x80]\n"
".inst 0x4fa0e110 // sdot v16.4s, v8.16b, v0.4b[1]\n"
+ "ldr q6, [x28, #0x90]\n"
".inst 0x4fa1e114 // sdot v20.4s, v8.16b, v1.4b[1]\n"
- "ldr q8, [x28, #0xb0]\n"
+ "ldr q7, [x28, #0xa0]\n"
".inst 0x4fa0e131 // sdot v17.4s, v9.16b, v0.4b[1]\n"
+ "ldr q8, [x28, #0xb0]\n"
".inst 0x4fa1e135 // sdot v21.4s, v9.16b, v1.4b[1]\n"
"ldr q9, [x28, #0xc0]\n"
".inst 0x4fa0e152 // sdot v18.4s, v10.16b, v0.4b[1]\n"
@@ -474,40 +474,40 @@ void a64_hybrid_s8qa_dot_4x16 (
".inst 0x4e8f940b // sdot v11.4s, v0.16b, v15.16b\n"
".inst 0x4e8f942c // sdot v12.4s, v1.16b, v15.16b\n"
"38:" // Height 2: Multiply loop: unique 5: skip row sum
- "ldr q0, [x24, #0x0]\n"
- "ldr q1, [x23, #0x0]\n"
- "sub x25, x25, #0x10\n"
- "cmp x25, #0x20\n"
- "ldr q4, [x28, #0x0]\n"
- "ldr q5, [x28, #0x10]\n"
- "ldr q6, [x28, #0x20]\n"
- "ldr q7, [x28, #0x30]\n"
- "ldr q8, [x28, #0x40]\n"
- "ldr q9, [x28, #0x50]\n"
- "ldr q10, [x28, #0x60]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
"prfm pldl1keep, [x23, #0x80]\n"
+ "sub x24, x24, #0x10\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
+ "cmp x24, #0x20\n"
+ "ldr q0, [x23, #0x0]\n"
+ "ldr q1, [x22, #0x0]\n"
+ "ldr q4, [x28, #0x0]\n"
"bge 37b\n"
"39:" // Height 2: Multiply loop: Single iteration only
".inst 0x4f80e090 // sdot v16.4s, v4.16b, v0.4b[0]\n"
+ "ldr q5, [x28, #0x10]\n"
+ "sub x24, x24, #0x10\n"
".inst 0x4f81e094 // sdot v20.4s, v4.16b, v1.4b[0]\n"
- "ldr q4, [x28, #0x70]\n"
- "sub x25, x25, #0x10\n"
+ "ldr q6, [x28, #0x20]\n"
+ "add x23, x23, #0x10\n"
".inst 0x4f80e0b1 // sdot v17.4s, v5.16b, v0.4b[0]\n"
+ "ldr q7, [x28, #0x30]\n"
+ "add x22, x22, #0x10\n"
".inst 0x4f81e0b5 // sdot v21.4s, v5.16b, v1.4b[0]\n"
- "ldr q5, [x28, #0x80]\n"
- "add x24, x24, #0x10\n"
+ "ldr q8, [x28, #0x40]\n"
".inst 0x4f80e0d2 // sdot v18.4s, v6.16b, v0.4b[0]\n"
+ "ldr q9, [x28, #0x50]\n"
".inst 0x4f81e0d6 // sdot v22.4s, v6.16b, v1.4b[0]\n"
- "ldr q6, [x28, #0x90]\n"
- "add x23, x23, #0x10\n"
+ "ldr q10, [x28, #0x60]\n"
+ "ldr q4, [x28, #0x70]\n"
".inst 0x4f80e0f3 // sdot v19.4s, v7.16b, v0.4b[0]\n"
".inst 0x4f81e0f7 // sdot v23.4s, v7.16b, v1.4b[0]\n"
- "ldr q7, [x28, #0xa0]\n"
+ "ldr q5, [x28, #0x80]\n"
".inst 0x4fa0e110 // sdot v16.4s, v8.16b, v0.4b[1]\n"
+ "ldr q6, [x28, #0x90]\n"
".inst 0x4fa1e114 // sdot v20.4s, v8.16b, v1.4b[1]\n"
- "ldr q8, [x28, #0xb0]\n"
+ "ldr q7, [x28, #0xa0]\n"
".inst 0x4fa0e131 // sdot v17.4s, v9.16b, v0.4b[1]\n"
+ "ldr q8, [x28, #0xb0]\n"
".inst 0x4fa1e135 // sdot v21.4s, v9.16b, v1.4b[1]\n"
"ldr q9, [x28, #0xc0]\n"
".inst 0x4fa0e152 // sdot v18.4s, v10.16b, v0.4b[1]\n"
@@ -538,104 +538,104 @@ void a64_hybrid_s8qa_dot_4x16 (
".inst 0x4e8f940b // sdot v11.4s, v0.16b, v15.16b\n"
".inst 0x4e8f942c // sdot v12.4s, v1.16b, v15.16b\n"
"40:" // Height 2: Multiply loop: unique 6: skip row sum
- "prfm pldl1keep, [x24, #0x80]\n"
"prfm pldl1keep, [x23, #0x80]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
"41:" // Height 2: Multiply loop: Main loop skip
- "cbz x25, 48f\n"
- "cmp x25, #0x4\n"
+ "cbz x24, 48f\n"
+ "cmp x24, #0x4\n"
"blt 44f\n"
"42:" // Height 2: Multiply loop: Odd block loop
- "ldr s0, [x24], #0x4\n"
- "ldr s1, [x23], #0x4\n"
+ "ldr s0, [x23], #0x4\n"
+ "ldr s1, [x22], #0x4\n"
"tbnz %x[flags], #31, 43f\n"
".inst 0x4e8f940b // sdot v11.4s, v0.16b, v15.16b\n"
".inst 0x4e8f942c // sdot v12.4s, v1.16b, v15.16b\n"
"43:" // Height 2: Multiply loop: unique 7: skip row sum
"ldr q6, [x28, #0x0]\n"
- "ldr q7, [x28, #0x10]\n"
- "sub x25, x25, #0x4\n"
- "cmp x25, #0x4\n"
- "ldr q8, [x28, #0x20]\n"
- "ldr q9, [x28, #0x30]\n"
".inst 0x4f80e0d0 // sdot v16.4s, v6.16b, v0.4b[0]\n"
+ "ldr q7, [x28, #0x10]\n"
+ "sub x24, x24, #0x4\n"
".inst 0x4f81e0d4 // sdot v20.4s, v6.16b, v1.4b[0]\n"
+ "ldr q8, [x28, #0x20]\n"
+ "cmp x24, #0x4\n"
".inst 0x4f80e0f1 // sdot v17.4s, v7.16b, v0.4b[0]\n"
- ".inst 0x4f81e0f5 // sdot v21.4s, v7.16b, v1.4b[0]\n"
+ "ldr q9, [x28, #0x30]\n"
"add x28, x28, #0x40\n"
+ ".inst 0x4f81e0f5 // sdot v21.4s, v7.16b, v1.4b[0]\n"
".inst 0x4f80e112 // sdot v18.4s, v8.16b, v0.4b[0]\n"
".inst 0x4f81e116 // sdot v22.4s, v8.16b, v1.4b[0]\n"
".inst 0x4f80e133 // sdot v19.4s, v9.16b, v0.4b[0]\n"
".inst 0x4f81e137 // sdot v23.4s, v9.16b, v1.4b[0]\n"
"bge 42b\n"
+ "cbz x24, 48f\n"
"44:" // Height 2: Multiply loop: Skip odd blocks
- "cbz x25, 48f\n"
- "tbz x25, #1, 45f\n"
- "ldr h0, [x24], #0x2\n"
- "ldr h1, [x23], #0x2\n"
- "tbz x25, #0, 46f\n"
- "ld1 { v0.b }[2], [x24]\n"
- "ld1 { v1.b }[2], [x23]\n"
+ "tbz x24, #1, 45f\n"
+ "ldr h0, [x23], #0x2\n"
+ "ldr h1, [x22], #0x2\n"
+ "tbz x24, #0, 46f\n"
+ "ld1 { v0.b }[2], [x23]\n"
+ "ld1 { v1.b }[2], [x22]\n"
"b 46f\n"
"45:" // Height 2: Multiply loop: Ragged operand read: partial_1_0
- "ldr b0, [x24, #0x0]\n"
- "ldr b1, [x23, #0x0]\n"
+ "ldr b0, [x23, #0x0]\n"
+ "ldr b1, [x22, #0x0]\n"
"46:" // Height 2: Multiply loop: Ragged operand read: Done
"tbnz %x[flags], #31, 47f\n"
".inst 0x4e8f940b // sdot v11.4s, v0.16b, v15.16b\n"
".inst 0x4e8f942c // sdot v12.4s, v1.16b, v15.16b\n"
"47:" // Height 2: Multiply loop: unique 8: skip row sum
"ldr q10, [x28, #0x0]\n"
- "ldr q4, [x28, #0x10]\n"
".inst 0x4f80e150 // sdot v16.4s, v10.16b, v0.4b[0]\n"
+ "ldr q4, [x28, #0x10]\n"
".inst 0x4f81e154 // sdot v20.4s, v10.16b, v1.4b[0]\n"
"ldr q5, [x28, #0x20]\n"
"ldr q6, [x28, #0x30]\n"
".inst 0x4f80e091 // sdot v17.4s, v4.16b, v0.4b[0]\n"
+ "add x28, x28, #0x40\n"
".inst 0x4f81e095 // sdot v21.4s, v4.16b, v1.4b[0]\n"
".inst 0x4f80e0b2 // sdot v18.4s, v5.16b, v0.4b[0]\n"
".inst 0x4f81e0b6 // sdot v22.4s, v5.16b, v1.4b[0]\n"
- "add x28, x28, #0x40\n"
".inst 0x4f80e0d3 // sdot v19.4s, v6.16b, v0.4b[0]\n"
".inst 0x4f81e0d7 // sdot v23.4s, v6.16b, v1.4b[0]\n"
"48:" // Height 2: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x26, x26, #0x1\n"
- "cmp x26, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x25, x25, #0x1\n"
+ "cmp x25, x19\n"
"bne 34b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x22, x27, x20\n"
- "prfm pstl1keep, [x27, #0x0]\n"
- "prfm pstl1keep, [x22, #0x0]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "prfm pstl1keep, [x26, #0x0]\n"
+ "add x21, x26, x19\n"
+ "prfm pstl1keep, [x21, #0x0]\n"
"tbnz %x[flags], #31, 49f\n"
- "add x23, %x[qp], %[b_offset]\n"
- "ld1r { v2.4s }, [x23]\n"
"addp v11.4s, v11.4s, v11.4s\n"
+ "add x22, %x[qp], %[b_offset]\n"
+ "ld1r { v2.4s }, [x22]\n"
"addp v12.4s, v12.4s, v12.4s\n"
- "neg v2.4s, v2.4s\n"
"addp v11.4s, v11.4s, v11.4s\n"
+ "neg v2.4s, v2.4s\n"
"addp v12.4s, v12.4s, v12.4s\n"
"mul v11.4s, v11.4s, v2.4s\n"
"mul v12.4s, v12.4s, v2.4s\n"
"49:" // Height 2: skip row sum fixup
- "ldr q0, [x10, #0x0]\n"
- "ldr q1, [x10, #0x10]\n"
"add v16.4s, v16.4s, v11.4s\n"
+ "ldr q0, [x27, #0x0]\n"
+ "orr %x[flags], %x[flags], #0x80000000\n"
"add v17.4s, v17.4s, v11.4s\n"
- "ldr q2, [x10, #0x20]\n"
- "ldr q3, [x10, #0x30]\n"
+ "ldr q1, [x27, #0x10]\n"
+ "add x23, %x[qp], %[per_layer_right_shift]\n"
"add v18.4s, v18.4s, v11.4s\n"
+ "ldr q2, [x27, #0x20]\n"
+ "add x22, %x[qp], %[per_layer_mul]\n"
"add v19.4s, v19.4s, v11.4s\n"
+ "ldr q3, [x27, #0x30]\n"
+ "add x27, x27, #0x40\n"
"add v20.4s, v20.4s, v12.4s\n"
+ "ld1r { v4.4s }, [x22]\n"
"add v21.4s, v21.4s, v12.4s\n"
- "add x23, %x[qp], %[per_layer_mul]\n"
- "ld1r { v4.4s }, [x23]\n"
"add v22.4s, v22.4s, v12.4s\n"
"add v23.4s, v23.4s, v12.4s\n"
- "orr %x[flags], %x[flags], #0x80000000\n"
- "add x23, %x[qp], %[per_layer_right_shift]\n"
"add v16.4s, v16.4s, v0.4s\n"
"add v17.4s, v17.4s, v1.4s\n"
- "add x10, x10, #0x40\n"
"add v18.4s, v18.4s, v2.4s\n"
"add v19.4s, v19.4s, v3.4s\n"
"add v20.4s, v20.4s, v0.4s\n"
@@ -653,154 +653,154 @@ void a64_hybrid_s8qa_dot_4x16 (
"sqrdmulh v23.4s, v23.4s, v4.4s\n"
"tbz %x[flags], #5, 50f\n"
"and v4.16b, v16.16b, v0.16b\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
- "sqadd v16.4s, v16.4s, v4.4s\n"
"and v5.16b, v17.16b, v0.16b\n"
"and v6.16b, v18.16b, v0.16b\n"
+ "sshr v4.4s, v4.4s, #0x1f\n"
+ "sshr v5.4s, v5.4s, #0x1f\n"
+ "sshr v6.4s, v6.4s, #0x1f\n"
+ "sqadd v16.4s, v16.4s, v4.4s\n"
+ "sqadd v17.4s, v17.4s, v5.4s\n"
+ "sqadd v18.4s, v18.4s, v6.4s\n"
"and v7.16b, v19.16b, v0.16b\n"
"and v8.16b, v20.16b, v0.16b\n"
"and v9.16b, v21.16b, v0.16b\n"
- "and v10.16b, v22.16b, v0.16b\n"
- "and v4.16b, v23.16b, v0.16b\n"
- "sshr v5.4s, v5.4s, #0x1f\n"
- "sshr v6.4s, v6.4s, #0x1f\n"
"sshr v7.4s, v7.4s, #0x1f\n"
"sshr v8.4s, v8.4s, #0x1f\n"
"sshr v9.4s, v9.4s, #0x1f\n"
- "sshr v10.4s, v10.4s, #0x1f\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
- "sqadd v17.4s, v17.4s, v5.4s\n"
- "sqadd v18.4s, v18.4s, v6.4s\n"
"sqadd v19.4s, v19.4s, v7.4s\n"
"sqadd v20.4s, v20.4s, v8.4s\n"
"sqadd v21.4s, v21.4s, v9.4s\n"
+ "and v10.16b, v22.16b, v0.16b\n"
+ "and v4.16b, v23.16b, v0.16b\n"
+ "sshr v10.4s, v10.4s, #0x1f\n"
+ "sshr v4.4s, v4.4s, #0x1f\n"
"sqadd v22.4s, v22.4s, v10.4s\n"
"sqadd v23.4s, v23.4s, v4.4s\n"
"50:" // Height 2: no shift correction
- "add x23, %x[qp], %[c_offset]\n"
- "ld1r { v4.4s }, [x23]\n"
"srshl v16.4s, v16.4s, v0.4s\n"
+ "add x22, %x[qp], %[c_offset]\n"
+ "ld1r { v4.4s }, [x22]\n"
"srshl v17.4s, v17.4s, v0.4s\n"
+ "add x22, %x[qp], %[minval]\n"
"srshl v18.4s, v18.4s, v0.4s\n"
+ "ld1r { v5.4s }, [x22]\n"
+ "add x22, %x[qp], %[maxval]\n"
"srshl v19.4s, v19.4s, v0.4s\n"
- "add x23, %x[qp], %[maxval]\n"
- "ld1r { v6.4s }, [x23]\n"
+ "ld1r { v6.4s }, [x22]\n"
+ "cmp x9, #0x10\n"
"srshl v20.4s, v20.4s, v0.4s\n"
"srshl v21.4s, v21.4s, v0.4s\n"
- "add x23, %x[qp], %[minval]\n"
- "ld1r { v5.4s }, [x23]\n"
- "srshl v22.4s, v22.4s, v0.4s\n"
- "srshl v23.4s, v23.4s, v0.4s\n"
- "cmp x9, #0x10\n"
"add v16.4s, v16.4s, v4.4s\n"
"add v17.4s, v17.4s, v4.4s\n"
"add v18.4s, v18.4s, v4.4s\n"
- "add v19.4s, v19.4s, v4.4s\n"
- "add v20.4s, v20.4s, v4.4s\n"
- "add v21.4s, v21.4s, v4.4s\n"
- "add v22.4s, v22.4s, v4.4s\n"
- "add v23.4s, v23.4s, v4.4s\n"
"smin v16.4s, v16.4s, v6.4s\n"
"smin v17.4s, v17.4s, v6.4s\n"
"smin v18.4s, v18.4s, v6.4s\n"
- "smin v19.4s, v19.4s, v6.4s\n"
- "smin v20.4s, v20.4s, v6.4s\n"
- "smin v21.4s, v21.4s, v6.4s\n"
- "smin v22.4s, v22.4s, v6.4s\n"
- "smin v23.4s, v23.4s, v6.4s\n"
"smax v16.4s, v16.4s, v5.4s\n"
"smax v17.4s, v17.4s, v5.4s\n"
"smax v18.4s, v18.4s, v5.4s\n"
+ "add v19.4s, v19.4s, v4.4s\n"
+ "add v20.4s, v20.4s, v4.4s\n"
+ "add v21.4s, v21.4s, v4.4s\n"
+ "smin v19.4s, v19.4s, v6.4s\n"
+ "smin v20.4s, v20.4s, v6.4s\n"
+ "smin v21.4s, v21.4s, v6.4s\n"
"smax v19.4s, v19.4s, v5.4s\n"
"smax v20.4s, v20.4s, v5.4s\n"
"smax v21.4s, v21.4s, v5.4s\n"
- "smax v22.4s, v22.4s, v5.4s\n"
- "smax v23.4s, v23.4s, v5.4s\n"
+ "srshl v22.4s, v22.4s, v0.4s\n"
+ "srshl v23.4s, v23.4s, v0.4s\n"
"uzp1 v16.8h, v16.8h, v17.8h\n"
"uzp1 v17.8h, v18.8h, v19.8h\n"
+ "add v22.4s, v22.4s, v4.4s\n"
+ "add v23.4s, v23.4s, v4.4s\n"
"uzp1 v20.8h, v20.8h, v21.8h\n"
- "uzp1 v21.8h, v22.8h, v23.8h\n"
+ "smin v22.4s, v22.4s, v6.4s\n"
+ "smin v23.4s, v23.4s, v6.4s\n"
"uzp1 v16.16b, v16.16b, v17.16b\n"
+ "smax v22.4s, v22.4s, v5.4s\n"
+ "smax v23.4s, v23.4s, v5.4s\n"
+ "uzp1 v21.8h, v22.8h, v23.8h\n"
"uzp1 v20.16b, v20.16b, v21.16b\n"
"bge 59f\n"
"tbz x9, #3, 54f\n"
- "str d16, [x27], #0x8\n"
- "str d20, [x22], #0x8\n"
+ "str d16, [x26], #0x8\n"
+ "str d20, [x21], #0x8\n"
"tbz x9, #2, 52f\n"
- "st1 { v16.s }[2], [x27], #0x4\n"
- "st1 { v20.s }[2], [x22], #0x4\n"
+ "st1 { v16.s }[2], [x26], #0x4\n"
+ "st1 { v20.s }[2], [x21], #0x4\n"
"tbz x9, #1, 51f\n"
- "st1 { v16.h }[6], [x27], #0x2\n"
- "st1 { v20.h }[6], [x22], #0x2\n"
+ "st1 { v16.h }[6], [x26], #0x2\n"
+ "st1 { v20.h }[6], [x21], #0x2\n"
"tbz x9, #0, 58f\n"
- "st1 { v16.b }[14], [x27]\n"
- "st1 { v20.b }[14], [x22]\n"
+ "st1 { v16.b }[14], [x26]\n"
+ "st1 { v20.b }[14], [x21]\n"
"b 58f\n"
"51:" // Height 2: Partial direct writeback: partial_1_12
"tbz x9, #0, 58f\n"
- "st1 { v16.b }[12], [x27]\n"
- "st1 { v20.b }[12], [x22]\n"
+ "st1 { v16.b }[12], [x26]\n"
+ "st1 { v20.b }[12], [x21]\n"
"b 58f\n"
"52:" // Height 2: Partial direct writeback: partial_2_8
"tbz x9, #1, 53f\n"
- "st1 { v16.h }[4], [x27], #0x2\n"
- "st1 { v20.h }[4], [x22], #0x2\n"
+ "st1 { v16.h }[4], [x26], #0x2\n"
+ "st1 { v20.h }[4], [x21], #0x2\n"
"tbz x9, #0, 58f\n"
- "st1 { v16.b }[10], [x27]\n"
- "st1 { v20.b }[10], [x22]\n"
+ "st1 { v16.b }[10], [x26]\n"
+ "st1 { v20.b }[10], [x21]\n"
"b 58f\n"
"53:" // Height 2: Partial direct writeback: partial_1_8
"tbz x9, #0, 58f\n"
- "st1 { v16.b }[8], [x27]\n"
- "st1 { v20.b }[8], [x22]\n"
+ "st1 { v16.b }[8], [x26]\n"
+ "st1 { v20.b }[8], [x21]\n"
"b 58f\n"
"54:" // Height 2: Partial direct writeback: partial_4_0
"tbz x9, #2, 56f\n"
- "str s16, [x27], #0x4\n"
- "str s20, [x22], #0x4\n"
+ "str s16, [x26], #0x4\n"
+ "str s20, [x21], #0x4\n"
"tbz x9, #1, 55f\n"
- "st1 { v16.h }[2], [x27], #0x2\n"
- "st1 { v20.h }[2], [x22], #0x2\n"
+ "st1 { v16.h }[2], [x26], #0x2\n"
+ "st1 { v20.h }[2], [x21], #0x2\n"
"tbz x9, #0, 58f\n"
- "st1 { v16.b }[6], [x27]\n"
- "st1 { v20.b }[6], [x22]\n"
+ "st1 { v16.b }[6], [x26]\n"
+ "st1 { v20.b }[6], [x21]\n"
"b 58f\n"
"55:" // Height 2: Partial direct writeback: partial_1_4
"tbz x9, #0, 58f\n"
- "st1 { v16.b }[4], [x27]\n"
- "st1 { v20.b }[4], [x22]\n"
+ "st1 { v16.b }[4], [x26]\n"
+ "st1 { v20.b }[4], [x21]\n"
"b 58f\n"
"56:" // Height 2: Partial direct writeback: partial_2_0
"tbz x9, #1, 57f\n"
- "str h16, [x27], #0x2\n"
- "str h20, [x22], #0x2\n"
+ "str h16, [x26], #0x2\n"
+ "str h20, [x21], #0x2\n"
"tbz x9, #0, 58f\n"
- "st1 { v16.b }[2], [x27]\n"
- "st1 { v20.b }[2], [x22]\n"
+ "st1 { v16.b }[2], [x26]\n"
+ "st1 { v20.b }[2], [x21]\n"
"b 58f\n"
"57:" // Height 2: Partial direct writeback: partial_1_0
- "str b16, [x27, #0x0]\n"
- "str b20, [x22, #0x0]\n"
+ "str b16, [x26, #0x0]\n"
+ "str b20, [x21, #0x0]\n"
"58:" // Height 2: Partial direct writeback: Done
"b 60f\n"
"59:" // Height 2: Full writeback
- "str q16, [x27, #0x0]\n"
- "add x27, x27, #0x10\n"
- "str q20, [x22, #0x0]\n"
+ "str q16, [x26, #0x0]\n"
+ "add x26, x26, #0x10\n"
+ "str q20, [x21, #0x0]\n"
"60:" // Height 2: Writeback done
"subs x9, x9, #0x10\n"
"bgt 32b\n"
"b 122f\n"
"61:" // Height 3
- "mov x10, %x[col_bias]\n"
"movi v11.4s, #0x0\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x27, %x[col_bias]\n"
"movi v12.4s, #0x0\n"
+ "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"bic %x[flags], %x[flags], #0x80000000\n"
"movi v13.4s, #0x0\n"
+ "mov x26, %x[output_ptr]\n"
"movi v15.16b, #0x1\n"
- "ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x27, %x[output_ptr]\n"
"62:" // Height 3: Column loop
"movi v16.4s, #0x0\n"
"movi v17.4s, #0x0\n"
@@ -815,56 +815,56 @@ void a64_hybrid_s8qa_dot_4x16 (
"movi v26.4s, #0x0\n"
"movi v27.4s, #0x0\n"
"63:" // Height 3: setup done
- "mov x26, #0x0\n"
+ "mov x25, #0x0\n"
"64:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w24, [x20, x25, LSL #0x2]\n"
"tbz %x[flags], #3, 65f\n"
- "ldr x21, [%x[input_ptr], x26, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x24, [x21, #0x0]\n"
- "ldr x23, [x21, #0x8]\n"
- "ldr x22, [x21, #0x10]\n"
- "cbnz x26, 66f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x24, x24, x20\n"
- "add x23, x23, x20\n"
- "add x22, x22, x20\n"
+ "ldr x20, [%x[input_ptr], x25, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x23, [x20, #0x0]\n"
+ "ldr x22, [x20, #0x8]\n"
+ "ldr x21, [x20, #0x10]\n"
+ "cbnz x25, 66f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x23, x23, x19\n"
+ "add x22, x22, x19\n"
+ "add x21, x21, x19\n"
"b 66f\n"
"65:" // Height 3: setup direct input
- "mov x24, %x[input_ptr]\n"
- "add x23, x24, x20\n"
- "add x22, x23, x20\n"
+ "mov x23, %x[input_ptr]\n"
+ "add x22, x23, x19\n"
+ "add x21, x22, x19\n"
"66:" // Height 3: input setup done
- "cmp x25, #0x10\n"
+ "cmp x24, #0x10\n"
"blt 71f\n"
- "ldr q0, [x24, #0x0]\n"
- "ldr q1, [x23, #0x0]\n"
- "cmp x25, #0x20\n"
- "ldr q2, [x22, #0x0]\n"
+ "ldr q0, [x23, #0x0]\n"
+ "ldr q1, [x22, #0x0]\n"
+ "cmp x24, #0x20\n"
+ "ldr q2, [x21, #0x0]\n"
"ldr q4, [x28, #0x0]\n"
- "ldr q5, [x28, #0x10]\n"
- "ldr q6, [x28, #0x20]\n"
- "ldr q7, [x28, #0x30]\n"
- "ldr q8, [x28, #0x40]\n"
- "ldr q9, [x28, #0x50]\n"
- "ldr q10, [x28, #0x60]\n"
"blt 69f\n"
"67:" // Height 3: Multiply loop: Main loop head
".inst 0x4f80e090 // sdot v16.4s, v4.16b, v0.4b[0]\n"
- ".inst 0x4f81e094 // sdot v20.4s, v4.16b, v1.4b[0]\n"
- "add x24, x24, #0x10\n"
+ "ldr q5, [x28, #0x10]\n"
"add x23, x23, #0x10\n"
+ ".inst 0x4f81e094 // sdot v20.4s, v4.16b, v1.4b[0]\n"
+ "ldr q6, [x28, #0x20]\n"
+ "add x22, x22, #0x10\n"
".inst 0x4f82e098 // sdot v24.4s, v4.16b, v2.4b[0]\n"
- "ldr q4, [x28, #0x70]\n"
+ "ldr q7, [x28, #0x30]\n"
+ "add x21, x21, #0x10\n"
".inst 0x4f80e0b1 // sdot v17.4s, v5.16b, v0.4b[0]\n"
- "add x22, x22, #0x10\n"
+ "ldr q8, [x28, #0x40]\n"
".inst 0x4f81e0b5 // sdot v21.4s, v5.16b, v1.4b[0]\n"
+ "ldr q9, [x28, #0x50]\n"
".inst 0x4f82e0b9 // sdot v25.4s, v5.16b, v2.4b[0]\n"
- "ldr q5, [x28, #0x80]\n"
+ "ldr q10, [x28, #0x60]\n"
".inst 0x4f80e0d2 // sdot v18.4s, v6.16b, v0.4b[0]\n"
+ "ldr q4, [x28, #0x70]\n"
".inst 0x4f81e0d6 // sdot v22.4s, v6.16b, v1.4b[0]\n"
+ "ldr q5, [x28, #0x80]\n"
".inst 0x4f82e0da // sdot v26.4s, v6.16b, v2.4b[0]\n"
"ldr q6, [x28, #0x90]\n"
".inst 0x4f80e0f3 // sdot v19.4s, v7.16b, v0.4b[0]\n"
@@ -891,8 +891,8 @@ void a64_hybrid_s8qa_dot_4x16 (
".inst 0x4f81e8b4 // sdot v20.4s, v5.16b, v1.4b[2]\n"
".inst 0x4f82e8b8 // sdot v24.4s, v5.16b, v2.4b[2]\n"
"ldr q5, [x28, #0xf0]\n"
- ".inst 0x4f80e8d1 // sdot v17.4s, v6.16b, v0.4b[2]\n"
"add x28, x28, #0x100\n"
+ ".inst 0x4f80e8d1 // sdot v17.4s, v6.16b, v0.4b[2]\n"
".inst 0x4f81e8d5 // sdot v21.4s, v6.16b, v1.4b[2]\n"
".inst 0x4f82e8d9 // sdot v25.4s, v6.16b, v2.4b[2]\n"
".inst 0x4f80e8f2 // sdot v18.4s, v7.16b, v0.4b[2]\n"
@@ -918,37 +918,37 @@ void a64_hybrid_s8qa_dot_4x16 (
".inst 0x4e8f942c // sdot v12.4s, v1.16b, v15.16b\n"
".inst 0x4e8f944d // sdot v13.4s, v2.16b, v15.16b\n"
"68:" // Height 3: Multiply loop: unique 9: skip row sum
- "ldr q0, [x24, #0x0]\n"
- "ldr q1, [x23, #0x0]\n"
- "sub x25, x25, #0x10\n"
- "cmp x25, #0x20\n"
- "ldr q2, [x22, #0x0]\n"
- "ldr q4, [x28, #0x0]\n"
- "ldr q5, [x28, #0x10]\n"
- "ldr q6, [x28, #0x20]\n"
- "ldr q7, [x28, #0x30]\n"
- "ldr q8, [x28, #0x40]\n"
- "ldr q9, [x28, #0x50]\n"
- "ldr q10, [x28, #0x60]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
"prfm pldl1keep, [x23, #0x80]\n"
+ "sub x24, x24, #0x10\n"
"prfm pldl1keep, [x22, #0x80]\n"
+ "cmp x24, #0x20\n"
+ "prfm pldl1keep, [x21, #0x80]\n"
+ "ldr q0, [x23, #0x0]\n"
+ "ldr q1, [x22, #0x0]\n"
+ "ldr q2, [x21, #0x0]\n"
+ "ldr q4, [x28, #0x0]\n"
"bge 67b\n"
"69:" // Height 3: Multiply loop: Single iteration only
".inst 0x4f80e090 // sdot v16.4s, v4.16b, v0.4b[0]\n"
+ "ldr q5, [x28, #0x10]\n"
+ "sub x24, x24, #0x10\n"
".inst 0x4f81e094 // sdot v20.4s, v4.16b, v1.4b[0]\n"
- "sub x25, x25, #0x10\n"
- "add x24, x24, #0x10\n"
+ "ldr q6, [x28, #0x20]\n"
+ "add x23, x23, #0x10\n"
".inst 0x4f82e098 // sdot v24.4s, v4.16b, v2.4b[0]\n"
- "ldr q4, [x28, #0x70]\n"
+ "ldr q7, [x28, #0x30]\n"
+ "add x22, x22, #0x10\n"
".inst 0x4f80e0b1 // sdot v17.4s, v5.16b, v0.4b[0]\n"
- "add x23, x23, #0x10\n"
+ "ldr q8, [x28, #0x40]\n"
+ "add x21, x21, #0x10\n"
".inst 0x4f81e0b5 // sdot v21.4s, v5.16b, v1.4b[0]\n"
+ "ldr q9, [x28, #0x50]\n"
".inst 0x4f82e0b9 // sdot v25.4s, v5.16b, v2.4b[0]\n"
- "ldr q5, [x28, #0x80]\n"
- "add x22, x22, #0x10\n"
+ "ldr q10, [x28, #0x60]\n"
".inst 0x4f80e0d2 // sdot v18.4s, v6.16b, v0.4b[0]\n"
+ "ldr q4, [x28, #0x70]\n"
".inst 0x4f81e0d6 // sdot v22.4s, v6.16b, v1.4b[0]\n"
+ "ldr q5, [x28, #0x80]\n"
".inst 0x4f82e0da // sdot v26.4s, v6.16b, v2.4b[0]\n"
"ldr q6, [x28, #0x90]\n"
".inst 0x4f80e0f3 // sdot v19.4s, v7.16b, v0.4b[0]\n"
@@ -975,8 +975,8 @@ void a64_hybrid_s8qa_dot_4x16 (
".inst 0x4f81e8b4 // sdot v20.4s, v5.16b, v1.4b[2]\n"
".inst 0x4f82e8b8 // sdot v24.4s, v5.16b, v2.4b[2]\n"
"ldr q5, [x28, #0xf0]\n"
- ".inst 0x4f80e8d1 // sdot v17.4s, v6.16b, v0.4b[2]\n"
"add x28, x28, #0x100\n"
+ ".inst 0x4f80e8d1 // sdot v17.4s, v6.16b, v0.4b[2]\n"
".inst 0x4f81e8d5 // sdot v21.4s, v6.16b, v1.4b[2]\n"
".inst 0x4f82e8d9 // sdot v25.4s, v6.16b, v2.4b[2]\n"
".inst 0x4f80e8f2 // sdot v18.4s, v7.16b, v0.4b[2]\n"
@@ -1002,33 +1002,33 @@ void a64_hybrid_s8qa_dot_4x16 (
".inst 0x4e8f942c // sdot v12.4s, v1.16b, v15.16b\n"
".inst 0x4e8f944d // sdot v13.4s, v2.16b, v15.16b\n"
"70:" // Height 3: Multiply loop: unique 10: skip row sum
- "prfm pldl1keep, [x24, #0x80]\n"
"prfm pldl1keep, [x23, #0x80]\n"
"prfm pldl1keep, [x22, #0x80]\n"
+ "prfm pldl1keep, [x21, #0x80]\n"
"71:" // Height 3: Multiply loop: Main loop skip
- "cbz x25, 78f\n"
- "cmp x25, #0x4\n"
+ "cbz x24, 78f\n"
+ "cmp x24, #0x4\n"
"blt 74f\n"
"72:" // Height 3: Multiply loop: Odd block loop
- "ldr s0, [x24], #0x4\n"
- "ldr s1, [x23], #0x4\n"
- "ldr s2, [x22], #0x4\n"
+ "ldr s0, [x23], #0x4\n"
+ "ldr s1, [x22], #0x4\n"
+ "ldr s2, [x21], #0x4\n"
"tbnz %x[flags], #31, 73f\n"
".inst 0x4e8f940b // sdot v11.4s, v0.16b, v15.16b\n"
".inst 0x4e8f942c // sdot v12.4s, v1.16b, v15.16b\n"
".inst 0x4e8f944d // sdot v13.4s, v2.16b, v15.16b\n"
"73:" // Height 3: Multiply loop: unique 11: skip row sum
"ldr q6, [x28, #0x0]\n"
- "ldr q7, [x28, #0x10]\n"
- "sub x25, x25, #0x4\n"
- "cmp x25, #0x4\n"
- "ldr q8, [x28, #0x20]\n"
- "ldr q9, [x28, #0x30]\n"
".inst 0x4f80e0d0 // sdot v16.4s, v6.16b, v0.4b[0]\n"
+ "ldr q7, [x28, #0x10]\n"
+ "sub x24, x24, #0x4\n"
".inst 0x4f81e0d4 // sdot v20.4s, v6.16b, v1.4b[0]\n"
+ "ldr q8, [x28, #0x20]\n"
+ "cmp x24, #0x4\n"
".inst 0x4f82e0d8 // sdot v24.4s, v6.16b, v2.4b[0]\n"
- ".inst 0x4f80e0f1 // sdot v17.4s, v7.16b, v0.4b[0]\n"
+ "ldr q9, [x28, #0x30]\n"
"add x28, x28, #0x40\n"
+ ".inst 0x4f80e0f1 // sdot v17.4s, v7.16b, v0.4b[0]\n"
".inst 0x4f81e0f5 // sdot v21.4s, v7.16b, v1.4b[0]\n"
".inst 0x4f82e0f9 // sdot v25.4s, v7.16b, v2.4b[0]\n"
".inst 0x4f80e112 // sdot v18.4s, v8.16b, v0.4b[0]\n"
@@ -1038,21 +1038,21 @@ void a64_hybrid_s8qa_dot_4x16 (
".inst 0x4f81e137 // sdot v23.4s, v9.16b, v1.4b[0]\n"
".inst 0x4f82e13b // sdot v27.4s, v9.16b, v2.4b[0]\n"
"bge 72b\n"
+ "cbz x24, 78f\n"
"74:" // Height 3: Multiply loop: Skip odd blocks
- "cbz x25, 78f\n"
- "tbz x25, #1, 75f\n"
- "ldr h0, [x24], #0x2\n"
- "ldr h1, [x23], #0x2\n"
- "ldr h2, [x22], #0x2\n"
- "tbz x25, #0, 76f\n"
- "ld1 { v0.b }[2], [x24]\n"
- "ld1 { v1.b }[2], [x23]\n"
- "ld1 { v2.b }[2], [x22]\n"
+ "tbz x24, #1, 75f\n"
+ "ldr h0, [x23], #0x2\n"
+ "ldr h1, [x22], #0x2\n"
+ "ldr h2, [x21], #0x2\n"
+ "tbz x24, #0, 76f\n"
+ "ld1 { v0.b }[2], [x23]\n"
+ "ld1 { v1.b }[2], [x22]\n"
+ "ld1 { v2.b }[2], [x21]\n"
"b 76f\n"
"75:" // Height 3: Multiply loop: Ragged operand read: partial_1_0
- "ldr b0, [x24, #0x0]\n"
- "ldr b1, [x23, #0x0]\n"
- "ldr b2, [x22, #0x0]\n"
+ "ldr b0, [x23, #0x0]\n"
+ "ldr b1, [x22, #0x0]\n"
+ "ldr b2, [x21, #0x0]\n"
"76:" // Height 3: Multiply loop: Ragged operand read: Done
"tbnz %x[flags], #31, 77f\n"
".inst 0x4e8f940b // sdot v11.4s, v0.16b, v15.16b\n"
@@ -1060,16 +1060,16 @@ void a64_hybrid_s8qa_dot_4x16 (
".inst 0x4e8f944d // sdot v13.4s, v2.16b, v15.16b\n"
"77:" // Height 3: Multiply loop: unique 12: skip row sum
"ldr q10, [x28, #0x0]\n"
- "ldr q4, [x28, #0x10]\n"
".inst 0x4f80e150 // sdot v16.4s, v10.16b, v0.4b[0]\n"
+ "ldr q4, [x28, #0x10]\n"
".inst 0x4f81e154 // sdot v20.4s, v10.16b, v1.4b[0]\n"
"ldr q5, [x28, #0x20]\n"
- "ldr q6, [x28, #0x30]\n"
".inst 0x4f82e158 // sdot v24.4s, v10.16b, v2.4b[0]\n"
+ "ldr q6, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
".inst 0x4f80e091 // sdot v17.4s, v4.16b, v0.4b[0]\n"
".inst 0x4f81e095 // sdot v21.4s, v4.16b, v1.4b[0]\n"
".inst 0x4f82e099 // sdot v25.4s, v4.16b, v2.4b[0]\n"
- "add x28, x28, #0x40\n"
".inst 0x4f80e0b2 // sdot v18.4s, v5.16b, v0.4b[0]\n"
".inst 0x4f81e0b6 // sdot v22.4s, v5.16b, v1.4b[0]\n"
".inst 0x4f82e0ba // sdot v26.4s, v5.16b, v2.4b[0]\n"
@@ -1077,49 +1077,49 @@ void a64_hybrid_s8qa_dot_4x16 (
".inst 0x4f81e0d7 // sdot v23.4s, v6.16b, v1.4b[0]\n"
".inst 0x4f82e0db // sdot v27.4s, v6.16b, v2.4b[0]\n"
"78:" // Height 3: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x26, x26, #0x1\n"
- "cmp x26, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x25, x25, #0x1\n"
+ "cmp x25, x19\n"
"bne 64b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x22, x27, x20\n"
- "add x21, x22, x20\n"
- "prfm pstl1keep, [x27, #0x0]\n"
- "prfm pstl1keep, [x22, #0x0]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "prfm pstl1keep, [x26, #0x0]\n"
+ "add x21, x26, x19\n"
"prfm pstl1keep, [x21, #0x0]\n"
+ "add x20, x21, x19\n"
+ "prfm pstl1keep, [x20, #0x0]\n"
"tbnz %x[flags], #31, 79f\n"
- "add x23, %x[qp], %[b_offset]\n"
- "ld1r { v3.4s }, [x23]\n"
"addp v11.4s, v11.4s, v11.4s\n"
+ "add x22, %x[qp], %[b_offset]\n"
+ "ld1r { v3.4s }, [x22]\n"
"addp v12.4s, v12.4s, v12.4s\n"
"addp v13.4s, v13.4s, v13.4s\n"
- "neg v3.4s, v3.4s\n"
"addp v11.4s, v11.4s, v11.4s\n"
+ "neg v3.4s, v3.4s\n"
"addp v12.4s, v12.4s, v12.4s\n"
"addp v13.4s, v13.4s, v13.4s\n"
"mul v11.4s, v11.4s, v3.4s\n"
"mul v12.4s, v12.4s, v3.4s\n"
"mul v13.4s, v13.4s, v3.4s\n"
"79:" // Height 3: skip row sum fixup
- "ldr q0, [x10, #0x0]\n"
- "ldr q1, [x10, #0x10]\n"
"add v16.4s, v16.4s, v11.4s\n"
+ "ldr q0, [x27, #0x0]\n"
+ "orr %x[flags], %x[flags], #0x80000000\n"
"add v17.4s, v17.4s, v11.4s\n"
- "ldr q2, [x10, #0x20]\n"
- "ldr q3, [x10, #0x30]\n"
+ "ldr q1, [x27, #0x10]\n"
+ "add x23, %x[qp], %[per_layer_right_shift]\n"
"add v18.4s, v18.4s, v11.4s\n"
+ "ldr q2, [x27, #0x20]\n"
+ "add x22, %x[qp], %[per_layer_mul]\n"
"add v19.4s, v19.4s, v11.4s\n"
+ "ldr q3, [x27, #0x30]\n"
+ "add x27, x27, #0x40\n"
"add v20.4s, v20.4s, v12.4s\n"
+ "ld1r { v4.4s }, [x22]\n"
"add v21.4s, v21.4s, v12.4s\n"
- "add x23, %x[qp], %[per_layer_mul]\n"
- "ld1r { v4.4s }, [x23]\n"
"add v22.4s, v22.4s, v12.4s\n"
"add v23.4s, v23.4s, v12.4s\n"
- "orr %x[flags], %x[flags], #0x80000000\n"
- "add x23, %x[qp], %[per_layer_right_shift]\n"
"add v24.4s, v24.4s, v13.4s\n"
"add v25.4s, v25.4s, v13.4s\n"
- "add x10, x10, #0x40\n"
"add v26.4s, v26.4s, v13.4s\n"
"add v27.4s, v27.4s, v13.4s\n"
"add v16.4s, v16.4s, v0.4s\n"
@@ -1151,98 +1151,98 @@ void a64_hybrid_s8qa_dot_4x16 (
"and v4.16b, v16.16b, v0.16b\n"
"and v5.16b, v17.16b, v0.16b\n"
"and v6.16b, v18.16b, v0.16b\n"
- "and v7.16b, v19.16b, v0.16b\n"
- "and v8.16b, v20.16b, v0.16b\n"
"sshr v4.4s, v4.4s, #0x1f\n"
"sshr v5.4s, v5.4s, #0x1f\n"
"sshr v6.4s, v6.4s, #0x1f\n"
- "sshr v7.4s, v7.4s, #0x1f\n"
- "sshr v8.4s, v8.4s, #0x1f\n"
"sqadd v16.4s, v16.4s, v4.4s\n"
"sqadd v17.4s, v17.4s, v5.4s\n"
"sqadd v18.4s, v18.4s, v6.4s\n"
+ "and v7.16b, v19.16b, v0.16b\n"
+ "and v8.16b, v20.16b, v0.16b\n"
+ "and v9.16b, v21.16b, v0.16b\n"
+ "sshr v7.4s, v7.4s, #0x1f\n"
+ "sshr v8.4s, v8.4s, #0x1f\n"
+ "sshr v9.4s, v9.4s, #0x1f\n"
"sqadd v19.4s, v19.4s, v7.4s\n"
"sqadd v20.4s, v20.4s, v8.4s\n"
- "and v9.16b, v21.16b, v0.16b\n"
+ "sqadd v21.4s, v21.4s, v9.4s\n"
"and v10.16b, v22.16b, v0.16b\n"
"and v4.16b, v23.16b, v0.16b\n"
"and v5.16b, v24.16b, v0.16b\n"
- "and v6.16b, v25.16b, v0.16b\n"
- "and v7.16b, v26.16b, v0.16b\n"
- "and v8.16b, v27.16b, v0.16b\n"
- "sshr v9.4s, v9.4s, #0x1f\n"
"sshr v10.4s, v10.4s, #0x1f\n"
"sshr v4.4s, v4.4s, #0x1f\n"
"sshr v5.4s, v5.4s, #0x1f\n"
- "sshr v6.4s, v6.4s, #0x1f\n"
- "sshr v7.4s, v7.4s, #0x1f\n"
- "sshr v8.4s, v8.4s, #0x1f\n"
- "sqadd v21.4s, v21.4s, v9.4s\n"
"sqadd v22.4s, v22.4s, v10.4s\n"
"sqadd v23.4s, v23.4s, v4.4s\n"
"sqadd v24.4s, v24.4s, v5.4s\n"
+ "and v6.16b, v25.16b, v0.16b\n"
+ "and v7.16b, v26.16b, v0.16b\n"
+ "and v8.16b, v27.16b, v0.16b\n"
+ "sshr v6.4s, v6.4s, #0x1f\n"
+ "sshr v7.4s, v7.4s, #0x1f\n"
+ "sshr v8.4s, v8.4s, #0x1f\n"
"sqadd v25.4s, v25.4s, v6.4s\n"
"sqadd v26.4s, v26.4s, v7.4s\n"
"sqadd v27.4s, v27.4s, v8.4s\n"
"80:" // Height 3: no shift correction
- "add x23, %x[qp], %[c_offset]\n"
- "ld1r { v4.4s }, [x23]\n"
"srshl v16.4s, v16.4s, v0.4s\n"
+ "add x22, %x[qp], %[c_offset]\n"
+ "ld1r { v4.4s }, [x22]\n"
"srshl v17.4s, v17.4s, v0.4s\n"
+ "add x22, %x[qp], %[minval]\n"
"srshl v18.4s, v18.4s, v0.4s\n"
+ "ld1r { v5.4s }, [x22]\n"
+ "add x22, %x[qp], %[maxval]\n"
"srshl v19.4s, v19.4s, v0.4s\n"
- "add x23, %x[qp], %[maxval]\n"
- "ld1r { v6.4s }, [x23]\n"
+ "ld1r { v6.4s }, [x22]\n"
+ "cmp x9, #0x10\n"
"srshl v20.4s, v20.4s, v0.4s\n"
"srshl v21.4s, v21.4s, v0.4s\n"
- "add x23, %x[qp], %[minval]\n"
- "ld1r { v5.4s }, [x23]\n"
- "srshl v22.4s, v22.4s, v0.4s\n"
- "srshl v23.4s, v23.4s, v0.4s\n"
- "cmp x9, #0x10\n"
- "srshl v24.4s, v24.4s, v0.4s\n"
- "srshl v25.4s, v25.4s, v0.4s\n"
- "srshl v26.4s, v26.4s, v0.4s\n"
- "srshl v27.4s, v27.4s, v0.4s\n"
"add v16.4s, v16.4s, v4.4s\n"
"add v17.4s, v17.4s, v4.4s\n"
"add v18.4s, v18.4s, v4.4s\n"
- "add v19.4s, v19.4s, v4.4s\n"
- "add v20.4s, v20.4s, v4.4s\n"
- "add v21.4s, v21.4s, v4.4s\n"
- "add v22.4s, v22.4s, v4.4s\n"
- "add v23.4s, v23.4s, v4.4s\n"
- "add v24.4s, v24.4s, v4.4s\n"
- "add v25.4s, v25.4s, v4.4s\n"
- "add v26.4s, v26.4s, v4.4s\n"
- "add v27.4s, v27.4s, v4.4s\n"
"smin v16.4s, v16.4s, v6.4s\n"
"smin v17.4s, v17.4s, v6.4s\n"
"smin v18.4s, v18.4s, v6.4s\n"
- "smin v19.4s, v19.4s, v6.4s\n"
- "smin v20.4s, v20.4s, v6.4s\n"
- "smin v21.4s, v21.4s, v6.4s\n"
- "smin v22.4s, v22.4s, v6.4s\n"
- "smin v23.4s, v23.4s, v6.4s\n"
- "smin v24.4s, v24.4s, v6.4s\n"
- "smin v25.4s, v25.4s, v6.4s\n"
- "smin v26.4s, v26.4s, v6.4s\n"
- "smin v27.4s, v27.4s, v6.4s\n"
"smax v16.4s, v16.4s, v5.4s\n"
"smax v17.4s, v17.4s, v5.4s\n"
"smax v18.4s, v18.4s, v5.4s\n"
+ "add v19.4s, v19.4s, v4.4s\n"
+ "add v20.4s, v20.4s, v4.4s\n"
+ "add v21.4s, v21.4s, v4.4s\n"
+ "smin v19.4s, v19.4s, v6.4s\n"
+ "smin v20.4s, v20.4s, v6.4s\n"
+ "smin v21.4s, v21.4s, v6.4s\n"
"smax v19.4s, v19.4s, v5.4s\n"
"smax v20.4s, v20.4s, v5.4s\n"
"smax v21.4s, v21.4s, v5.4s\n"
+ "srshl v22.4s, v22.4s, v0.4s\n"
+ "srshl v23.4s, v23.4s, v0.4s\n"
+ "srshl v24.4s, v24.4s, v0.4s\n"
+ "srshl v25.4s, v25.4s, v0.4s\n"
+ "add v22.4s, v22.4s, v4.4s\n"
+ "add v23.4s, v23.4s, v4.4s\n"
+ "add v24.4s, v24.4s, v4.4s\n"
+ "smin v22.4s, v22.4s, v6.4s\n"
+ "smin v23.4s, v23.4s, v6.4s\n"
+ "smin v24.4s, v24.4s, v6.4s\n"
"smax v22.4s, v22.4s, v5.4s\n"
"smax v23.4s, v23.4s, v5.4s\n"
"smax v24.4s, v24.4s, v5.4s\n"
- "smax v25.4s, v25.4s, v5.4s\n"
- "smax v26.4s, v26.4s, v5.4s\n"
- "smax v27.4s, v27.4s, v5.4s\n"
+ "add v25.4s, v25.4s, v4.4s\n"
+ "srshl v26.4s, v26.4s, v0.4s\n"
+ "srshl v27.4s, v27.4s, v0.4s\n"
+ "smin v25.4s, v25.4s, v6.4s\n"
"uzp1 v16.8h, v16.8h, v17.8h\n"
+ "add v26.4s, v26.4s, v4.4s\n"
+ "smax v25.4s, v25.4s, v5.4s\n"
+ "add v27.4s, v27.4s, v4.4s\n"
+ "smin v26.4s, v26.4s, v6.4s\n"
"uzp1 v17.8h, v18.8h, v19.8h\n"
+ "smin v27.4s, v27.4s, v6.4s\n"
+ "smax v26.4s, v26.4s, v5.4s\n"
"uzp1 v20.8h, v20.8h, v21.8h\n"
+ "smax v27.4s, v27.4s, v5.4s\n"
"uzp1 v21.8h, v22.8h, v23.8h\n"
"uzp1 v24.8h, v24.8h, v25.8h\n"
"uzp1 v25.8h, v26.8h, v27.8h\n"
@@ -1251,103 +1251,103 @@ void a64_hybrid_s8qa_dot_4x16 (
"uzp1 v24.16b, v24.16b, v25.16b\n"
"bge 89f\n"
"tbz x9, #3, 84f\n"
- "str d16, [x27], #0x8\n"
- "str d20, [x22], #0x8\n"
- "str d24, [x21], #0x8\n"
+ "str d16, [x26], #0x8\n"
+ "str d20, [x21], #0x8\n"
+ "str d24, [x20], #0x8\n"
"tbz x9, #2, 82f\n"
- "st1 { v16.s }[2], [x27], #0x4\n"
- "st1 { v20.s }[2], [x22], #0x4\n"
- "st1 { v24.s }[2], [x21], #0x4\n"
+ "st1 { v16.s }[2], [x26], #0x4\n"
+ "st1 { v20.s }[2], [x21], #0x4\n"
+ "st1 { v24.s }[2], [x20], #0x4\n"
"tbz x9, #1, 81f\n"
- "st1 { v16.h }[6], [x27], #0x2\n"
- "st1 { v20.h }[6], [x22], #0x2\n"
- "st1 { v24.h }[6], [x21], #0x2\n"
+ "st1 { v16.h }[6], [x26], #0x2\n"
+ "st1 { v20.h }[6], [x21], #0x2\n"
+ "st1 { v24.h }[6], [x20], #0x2\n"
"tbz x9, #0, 88f\n"
- "st1 { v16.b }[14], [x27]\n"
- "st1 { v20.b }[14], [x22]\n"
- "st1 { v24.b }[14], [x21]\n"
+ "st1 { v16.b }[14], [x26]\n"
+ "st1 { v20.b }[14], [x21]\n"
+ "st1 { v24.b }[14], [x20]\n"
"b 88f\n"
"81:" // Height 3: Partial direct writeback: partial_1_12
"tbz x9, #0, 88f\n"
- "st1 { v16.b }[12], [x27]\n"
- "st1 { v20.b }[12], [x22]\n"
- "st1 { v24.b }[12], [x21]\n"
+ "st1 { v16.b }[12], [x26]\n"
+ "st1 { v20.b }[12], [x21]\n"
+ "st1 { v24.b }[12], [x20]\n"
"b 88f\n"
"82:" // Height 3: Partial direct writeback: partial_2_8
"tbz x9, #1, 83f\n"
- "st1 { v16.h }[4], [x27], #0x2\n"
- "st1 { v20.h }[4], [x22], #0x2\n"
- "st1 { v24.h }[4], [x21], #0x2\n"
+ "st1 { v16.h }[4], [x26], #0x2\n"
+ "st1 { v20.h }[4], [x21], #0x2\n"
+ "st1 { v24.h }[4], [x20], #0x2\n"
"tbz x9, #0, 88f\n"
- "st1 { v16.b }[10], [x27]\n"
- "st1 { v20.b }[10], [x22]\n"
- "st1 { v24.b }[10], [x21]\n"
+ "st1 { v16.b }[10], [x26]\n"
+ "st1 { v20.b }[10], [x21]\n"
+ "st1 { v24.b }[10], [x20]\n"
"b 88f\n"
"83:" // Height 3: Partial direct writeback: partial_1_8
"tbz x9, #0, 88f\n"
- "st1 { v16.b }[8], [x27]\n"
- "st1 { v20.b }[8], [x22]\n"
- "st1 { v24.b }[8], [x21]\n"
+ "st1 { v16.b }[8], [x26]\n"
+ "st1 { v20.b }[8], [x21]\n"
+ "st1 { v24.b }[8], [x20]\n"
"b 88f\n"
"84:" // Height 3: Partial direct writeback: partial_4_0
"tbz x9, #2, 86f\n"
- "str s16, [x27], #0x4\n"
- "str s20, [x22], #0x4\n"
- "str s24, [x21], #0x4\n"
+ "str s16, [x26], #0x4\n"
+ "str s20, [x21], #0x4\n"
+ "str s24, [x20], #0x4\n"
"tbz x9, #1, 85f\n"
- "st1 { v16.h }[2], [x27], #0x2\n"
- "st1 { v20.h }[2], [x22], #0x2\n"
- "st1 { v24.h }[2], [x21], #0x2\n"
+ "st1 { v16.h }[2], [x26], #0x2\n"
+ "st1 { v20.h }[2], [x21], #0x2\n"
+ "st1 { v24.h }[2], [x20], #0x2\n"
"tbz x9, #0, 88f\n"
- "st1 { v16.b }[6], [x27]\n"
- "st1 { v20.b }[6], [x22]\n"
- "st1 { v24.b }[6], [x21]\n"
+ "st1 { v16.b }[6], [x26]\n"
+ "st1 { v20.b }[6], [x21]\n"
+ "st1 { v24.b }[6], [x20]\n"
"b 88f\n"
"85:" // Height 3: Partial direct writeback: partial_1_4
"tbz x9, #0, 88f\n"
- "st1 { v16.b }[4], [x27]\n"
- "st1 { v20.b }[4], [x22]\n"
- "st1 { v24.b }[4], [x21]\n"
+ "st1 { v16.b }[4], [x26]\n"
+ "st1 { v20.b }[4], [x21]\n"
+ "st1 { v24.b }[4], [x20]\n"
"b 88f\n"
"86:" // Height 3: Partial direct writeback: partial_2_0
"tbz x9, #1, 87f\n"
- "str h16, [x27], #0x2\n"
- "str h20, [x22], #0x2\n"
- "str h24, [x21], #0x2\n"
+ "str h16, [x26], #0x2\n"
+ "str h20, [x21], #0x2\n"
+ "str h24, [x20], #0x2\n"
"tbz x9, #0, 88f\n"
- "st1 { v16.b }[2], [x27]\n"
- "st1 { v20.b }[2], [x22]\n"
- "st1 { v24.b }[2], [x21]\n"
+ "st1 { v16.b }[2], [x26]\n"
+ "st1 { v20.b }[2], [x21]\n"
+ "st1 { v24.b }[2], [x20]\n"
"b 88f\n"
"87:" // Height 3: Partial direct writeback: partial_1_0
- "str b16, [x27, #0x0]\n"
- "str b20, [x22, #0x0]\n"
- "str b24, [x21, #0x0]\n"
+ "str b16, [x26, #0x0]\n"
+ "str b20, [x21, #0x0]\n"
+ "str b24, [x20, #0x0]\n"
"88:" // Height 3: Partial direct writeback: Done
"b 90f\n"
"89:" // Height 3: Full writeback
- "str q16, [x27, #0x0]\n"
- "add x27, x27, #0x10\n"
- "str q20, [x22, #0x0]\n"
- "str q24, [x21, #0x0]\n"
+ "str q16, [x26, #0x0]\n"
+ "add x26, x26, #0x10\n"
+ "str q20, [x21, #0x0]\n"
+ "str q24, [x20, #0x0]\n"
"90:" // Height 3: Writeback done
"subs x9, x9, #0x10\n"
"bgt 62b\n"
"b 122f\n"
"91:" // Height 4
- "ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "mov x20, #0x4\n"
- "mov x10, %x[col_bias]\n"
"movi v11.4s, #0x0\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x27, %x[col_bias]\n"
"movi v12.4s, #0x0\n"
- "movi v13.4s, #0x0\n"
+ "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"bic %x[flags], %x[flags], #0x80000000\n"
- "ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
+ "movi v13.4s, #0x0\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "mov x26, %x[output_ptr]\n"
"movi v14.4s, #0x0\n"
+ "mov x19, #0x4\n"
"movi v15.16b, #0x1\n"
- "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x27, %x[output_ptr]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
+ "madd %x[output_ptr], x20, x19, %x[output_ptr]\n"
"92:" // Height 4: Column loop
"movi v16.4s, #0x0\n"
"movi v17.4s, #0x0\n"
@@ -1366,59 +1366,59 @@ void a64_hybrid_s8qa_dot_4x16 (
"movi v30.4s, #0x0\n"
"movi v31.4s, #0x0\n"
"93:" // Height 4: setup done
- "mov x26, #0x0\n"
+ "mov x25, #0x0\n"
"94:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w24, [x20, x25, LSL #0x2]\n"
"tbz %x[flags], #3, 95f\n"
- "ldr x21, [%x[input_ptr], x26, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x24, [x21, #0x0]\n"
- "ldr x23, [x21, #0x8]\n"
- "ldr x22, [x21, #0x10]\n"
- "ldr x21, [x21, #0x18]\n"
- "cbnz x26, 96f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x24, x24, x20\n"
- "add x23, x23, x20\n"
- "add x22, x22, x20\n"
- "add x21, x21, x20\n"
+ "ldr x20, [%x[input_ptr], x25, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x23, [x20, #0x0]\n"
+ "ldr x22, [x20, #0x8]\n"
+ "ldr x21, [x20, #0x10]\n"
+ "ldr x20, [x20, #0x18]\n"
+ "cbnz x25, 96f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x23, x23, x19\n"
+ "add x22, x22, x19\n"
+ "add x21, x21, x19\n"
+ "add x20, x20, x19\n"
"b 96f\n"
"95:" // Height 4: setup direct input
- "mov x24, %x[input_ptr]\n"
- "add x23, x24, x20\n"
- "add x22, x23, x20\n"
- "add x21, x22, x20\n"
+ "mov x23, %x[input_ptr]\n"
+ "add x22, x23, x19\n"
+ "add x21, x22, x19\n"
+ "add x20, x21, x19\n"
"96:" // Height 4: input setup done
- "cmp x25, #0x10\n"
+ "cmp x24, #0x10\n"
"blt 101f\n"
- "ldr q0, [x24, #0x0]\n"
- "ldr q1, [x23, #0x0]\n"
- "cmp x25, #0x20\n"
- "ldr q2, [x22, #0x0]\n"
- "ldr q3, [x21, #0x0]\n"
+ "ldr q0, [x23, #0x0]\n"
+ "ldr q1, [x22, #0x0]\n"
+ "cmp x24, #0x20\n"
+ "ldr q2, [x21, #0x0]\n"
+ "ldr q3, [x20, #0x0]\n"
"ldr q4, [x28, #0x0]\n"
- "ldr q5, [x28, #0x10]\n"
- "ldr q6, [x28, #0x20]\n"
- "ldr q7, [x28, #0x30]\n"
- "ldr q8, [x28, #0x40]\n"
- "ldr q9, [x28, #0x50]\n"
- "ldr q10, [x28, #0x60]\n"
"blt 99f\n"
"97:" // Height 4: Multiply loop: Main loop head
".inst 0x4f80e090 // sdot v16.4s, v4.16b, v0.4b[0]\n"
- ".inst 0x4f81e094 // sdot v20.4s, v4.16b, v1.4b[0]\n"
- "add x24, x24, #0x10\n"
+ "ldr q5, [x28, #0x10]\n"
"add x23, x23, #0x10\n"
+ ".inst 0x4f81e094 // sdot v20.4s, v4.16b, v1.4b[0]\n"
+ "ldr q6, [x28, #0x20]\n"
+ "add x22, x22, #0x10\n"
".inst 0x4f82e098 // sdot v24.4s, v4.16b, v2.4b[0]\n"
+ "ldr q7, [x28, #0x30]\n"
+ "add x21, x21, #0x10\n"
".inst 0x4f83e09c // sdot v28.4s, v4.16b, v3.4b[0]\n"
- "ldr q4, [x28, #0x70]\n"
- "add x22, x22, #0x10\n"
+ "ldr q8, [x28, #0x40]\n"
+ "add x20, x20, #0x10\n"
".inst 0x4f80e0b1 // sdot v17.4s, v5.16b, v0.4b[0]\n"
+ "ldr q9, [x28, #0x50]\n"
".inst 0x4f81e0b5 // sdot v21.4s, v5.16b, v1.4b[0]\n"
- "add x21, x21, #0x10\n"
+ "ldr q10, [x28, #0x60]\n"
".inst 0x4f82e0b9 // sdot v25.4s, v5.16b, v2.4b[0]\n"
+ "ldr q4, [x28, #0x70]\n"
".inst 0x4f83e0bd // sdot v29.4s, v5.16b, v3.4b[0]\n"
"ldr q5, [x28, #0x80]\n"
".inst 0x4f80e0d2 // sdot v18.4s, v6.16b, v0.4b[0]\n"
@@ -1491,38 +1491,38 @@ void a64_hybrid_s8qa_dot_4x16 (
".inst 0x4e8f944d // sdot v13.4s, v2.16b, v15.16b\n"
".inst 0x4e8f946e // sdot v14.4s, v3.16b, v15.16b\n"
"98:" // Height 4: Multiply loop: unique 13: skip row sum
- "ldr q0, [x24, #0x0]\n"
- "ldr q1, [x23, #0x0]\n"
- "sub x25, x25, #0x10\n"
- "cmp x25, #0x20\n"
- "ldr q2, [x22, #0x0]\n"
- "ldr q3, [x21, #0x0]\n"
- "ldr q4, [x28, #0x0]\n"
- "ldr q5, [x28, #0x10]\n"
- "ldr q6, [x28, #0x20]\n"
- "ldr q7, [x28, #0x30]\n"
- "ldr q8, [x28, #0x40]\n"
- "ldr q9, [x28, #0x50]\n"
- "ldr q10, [x28, #0x60]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
"prfm pldl1keep, [x23, #0x80]\n"
+ "sub x24, x24, #0x10\n"
"prfm pldl1keep, [x22, #0x80]\n"
+ "cmp x24, #0x20\n"
"prfm pldl1keep, [x21, #0x80]\n"
+ "prfm pldl1keep, [x20, #0x80]\n"
+ "ldr q0, [x23, #0x0]\n"
+ "ldr q1, [x22, #0x0]\n"
+ "ldr q2, [x21, #0x0]\n"
+ "ldr q3, [x20, #0x0]\n"
+ "ldr q4, [x28, #0x0]\n"
"bge 97b\n"
"99:" // Height 4: Multiply loop: Single iteration only
".inst 0x4f80e090 // sdot v16.4s, v4.16b, v0.4b[0]\n"
+ "ldr q5, [x28, #0x10]\n"
+ "sub x24, x24, #0x10\n"
".inst 0x4f81e094 // sdot v20.4s, v4.16b, v1.4b[0]\n"
- "sub x25, x25, #0x10\n"
- "add x24, x24, #0x10\n"
+ "ldr q6, [x28, #0x20]\n"
+ "add x23, x23, #0x10\n"
".inst 0x4f82e098 // sdot v24.4s, v4.16b, v2.4b[0]\n"
+ "ldr q7, [x28, #0x30]\n"
+ "add x22, x22, #0x10\n"
".inst 0x4f83e09c // sdot v28.4s, v4.16b, v3.4b[0]\n"
- "ldr q4, [x28, #0x70]\n"
- "add x23, x23, #0x10\n"
+ "ldr q8, [x28, #0x40]\n"
+ "add x21, x21, #0x10\n"
".inst 0x4f80e0b1 // sdot v17.4s, v5.16b, v0.4b[0]\n"
+ "ldr q9, [x28, #0x50]\n"
+ "add x20, x20, #0x10\n"
".inst 0x4f81e0b5 // sdot v21.4s, v5.16b, v1.4b[0]\n"
- "add x22, x22, #0x10\n"
- "add x21, x21, #0x10\n"
+ "ldr q10, [x28, #0x60]\n"
".inst 0x4f82e0b9 // sdot v25.4s, v5.16b, v2.4b[0]\n"
+ "ldr q4, [x28, #0x70]\n"
".inst 0x4f83e0bd // sdot v29.4s, v5.16b, v3.4b[0]\n"
"ldr q5, [x28, #0x80]\n"
".inst 0x4f80e0d2 // sdot v18.4s, v6.16b, v0.4b[0]\n"
@@ -1595,19 +1595,19 @@ void a64_hybrid_s8qa_dot_4x16 (
".inst 0x4e8f944d // sdot v13.4s, v2.16b, v15.16b\n"
".inst 0x4e8f946e // sdot v14.4s, v3.16b, v15.16b\n"
"100:" // Height 4: Multiply loop: unique 14: skip row sum
- "prfm pldl1keep, [x24, #0x80]\n"
"prfm pldl1keep, [x23, #0x80]\n"
"prfm pldl1keep, [x22, #0x80]\n"
"prfm pldl1keep, [x21, #0x80]\n"
+ "prfm pldl1keep, [x20, #0x80]\n"
"101:" // Height 4: Multiply loop: Main loop skip
- "cbz x25, 108f\n"
- "cmp x25, #0x4\n"
+ "cbz x24, 108f\n"
+ "cmp x24, #0x4\n"
"blt 104f\n"
"102:" // Height 4: Multiply loop: Odd block loop
- "ldr s0, [x24], #0x4\n"
- "ldr s1, [x23], #0x4\n"
- "ldr s2, [x22], #0x4\n"
- "ldr s3, [x21], #0x4\n"
+ "ldr s0, [x23], #0x4\n"
+ "ldr s1, [x22], #0x4\n"
+ "ldr s2, [x21], #0x4\n"
+ "ldr s3, [x20], #0x4\n"
"tbnz %x[flags], #31, 103f\n"
".inst 0x4e8f940b // sdot v11.4s, v0.16b, v15.16b\n"
".inst 0x4e8f942c // sdot v12.4s, v1.16b, v15.16b\n"
@@ -1615,16 +1615,16 @@ void a64_hybrid_s8qa_dot_4x16 (
".inst 0x4e8f946e // sdot v14.4s, v3.16b, v15.16b\n"
"103:" // Height 4: Multiply loop: unique 15: skip row sum
"ldr q6, [x28, #0x0]\n"
- "ldr q7, [x28, #0x10]\n"
- "sub x25, x25, #0x4\n"
- "cmp x25, #0x4\n"
- "ldr q8, [x28, #0x20]\n"
- "ldr q9, [x28, #0x30]\n"
".inst 0x4f80e0d0 // sdot v16.4s, v6.16b, v0.4b[0]\n"
+ "ldr q7, [x28, #0x10]\n"
+ "sub x24, x24, #0x4\n"
".inst 0x4f81e0d4 // sdot v20.4s, v6.16b, v1.4b[0]\n"
+ "ldr q8, [x28, #0x20]\n"
+ "cmp x24, #0x4\n"
".inst 0x4f82e0d8 // sdot v24.4s, v6.16b, v2.4b[0]\n"
- ".inst 0x4f83e0dc // sdot v28.4s, v6.16b, v3.4b[0]\n"
+ "ldr q9, [x28, #0x30]\n"
"add x28, x28, #0x40\n"
+ ".inst 0x4f83e0dc // sdot v28.4s, v6.16b, v3.4b[0]\n"
".inst 0x4f80e0f1 // sdot v17.4s, v7.16b, v0.4b[0]\n"
".inst 0x4f81e0f5 // sdot v21.4s, v7.16b, v1.4b[0]\n"
".inst 0x4f82e0f9 // sdot v25.4s, v7.16b, v2.4b[0]\n"
@@ -1638,24 +1638,24 @@ void a64_hybrid_s8qa_dot_4x16 (
".inst 0x4f82e13b // sdot v27.4s, v9.16b, v2.4b[0]\n"
".inst 0x4f83e13f // sdot v31.4s, v9.16b, v3.4b[0]\n"
"bge 102b\n"
+ "cbz x24, 108f\n"
"104:" // Height 4: Multiply loop: Skip odd blocks
- "cbz x25, 108f\n"
- "tbz x25, #1, 105f\n"
- "ldr h0, [x24], #0x2\n"
- "ldr h1, [x23], #0x2\n"
- "ldr h2, [x22], #0x2\n"
- "ldr h3, [x21], #0x2\n"
- "tbz x25, #0, 106f\n"
- "ld1 { v0.b }[2], [x24]\n"
- "ld1 { v1.b }[2], [x23]\n"
- "ld1 { v2.b }[2], [x22]\n"
- "ld1 { v3.b }[2], [x21]\n"
+ "tbz x24, #1, 105f\n"
+ "ldr h0, [x23], #0x2\n"
+ "ldr h1, [x22], #0x2\n"
+ "ldr h2, [x21], #0x2\n"
+ "ldr h3, [x20], #0x2\n"
+ "tbz x24, #0, 106f\n"
+ "ld1 { v0.b }[2], [x23]\n"
+ "ld1 { v1.b }[2], [x22]\n"
+ "ld1 { v2.b }[2], [x21]\n"
+ "ld1 { v3.b }[2], [x20]\n"
"b 106f\n"
"105:" // Height 4: Multiply loop: Ragged operand read: partial_1_0
- "ldr b0, [x24, #0x0]\n"
- "ldr b1, [x23, #0x0]\n"
- "ldr b2, [x22, #0x0]\n"
- "ldr b3, [x21, #0x0]\n"
+ "ldr b0, [x23, #0x0]\n"
+ "ldr b1, [x22, #0x0]\n"
+ "ldr b2, [x21, #0x0]\n"
+ "ldr b3, [x20, #0x0]\n"
"106:" // Height 4: Multiply loop: Ragged operand read: Done
"tbnz %x[flags], #31, 107f\n"
".inst 0x4e8f940b // sdot v11.4s, v0.16b, v15.16b\n"
@@ -1664,16 +1664,16 @@ void a64_hybrid_s8qa_dot_4x16 (
".inst 0x4e8f946e // sdot v14.4s, v3.16b, v15.16b\n"
"107:" // Height 4: Multiply loop: unique 16: skip row sum
"ldr q10, [x28, #0x0]\n"
- "ldr q4, [x28, #0x10]\n"
".inst 0x4f80e150 // sdot v16.4s, v10.16b, v0.4b[0]\n"
+ "ldr q4, [x28, #0x10]\n"
".inst 0x4f81e154 // sdot v20.4s, v10.16b, v1.4b[0]\n"
"ldr q5, [x28, #0x20]\n"
- "ldr q6, [x28, #0x30]\n"
".inst 0x4f82e158 // sdot v24.4s, v10.16b, v2.4b[0]\n"
+ "ldr q6, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
".inst 0x4f83e15c // sdot v28.4s, v10.16b, v3.4b[0]\n"
".inst 0x4f80e091 // sdot v17.4s, v4.16b, v0.4b[0]\n"
".inst 0x4f81e095 // sdot v21.4s, v4.16b, v1.4b[0]\n"
- "add x28, x28, #0x40\n"
".inst 0x4f82e099 // sdot v25.4s, v4.16b, v2.4b[0]\n"
".inst 0x4f83e09d // sdot v29.4s, v4.16b, v3.4b[0]\n"
".inst 0x4f80e0b2 // sdot v18.4s, v5.16b, v0.4b[0]\n"
@@ -1685,27 +1685,27 @@ void a64_hybrid_s8qa_dot_4x16 (
".inst 0x4f82e0db // sdot v27.4s, v6.16b, v2.4b[0]\n"
".inst 0x4f83e0df // sdot v31.4s, v6.16b, v3.4b[0]\n"
"108:" // Height 4: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x26, x26, #0x1\n"
- "cmp x26, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x25, x25, #0x1\n"
+ "cmp x25, x19\n"
"bne 94b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x22, x27, x20\n"
- "add x21, x22, x20\n"
- "prfm pstl1keep, [x27, #0x0]\n"
- "add x20, x21, x20\n"
- "prfm pstl1keep, [x22, #0x0]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "prfm pstl1keep, [x26, #0x0]\n"
+ "add x21, x26, x19\n"
"prfm pstl1keep, [x21, #0x0]\n"
+ "add x20, x21, x19\n"
"prfm pstl1keep, [x20, #0x0]\n"
+ "add x19, x20, x19\n"
+ "prfm pstl1keep, [x19, #0x0]\n"
"tbnz %x[flags], #31, 109f\n"
- "add x23, %x[qp], %[b_offset]\n"
- "ld1r { v4.4s }, [x23]\n"
"addp v11.4s, v11.4s, v11.4s\n"
+ "add x22, %x[qp], %[b_offset]\n"
+ "ld1r { v4.4s }, [x22]\n"
"addp v12.4s, v12.4s, v12.4s\n"
"addp v13.4s, v13.4s, v13.4s\n"
"addp v14.4s, v14.4s, v14.4s\n"
- "neg v4.4s, v4.4s\n"
"addp v11.4s, v11.4s, v11.4s\n"
+ "neg v4.4s, v4.4s\n"
"addp v12.4s, v12.4s, v12.4s\n"
"addp v13.4s, v13.4s, v13.4s\n"
"addp v14.4s, v14.4s, v14.4s\n"
@@ -1714,25 +1714,25 @@ void a64_hybrid_s8qa_dot_4x16 (
"mul v13.4s, v13.4s, v4.4s\n"
"mul v14.4s, v14.4s, v4.4s\n"
"109:" // Height 4: skip row sum fixup
- "ldr q0, [x10, #0x0]\n"
- "ldr q1, [x10, #0x10]\n"
"add v16.4s, v16.4s, v11.4s\n"
+ "ldr q0, [x27, #0x0]\n"
+ "orr %x[flags], %x[flags], #0x80000000\n"
"add v17.4s, v17.4s, v11.4s\n"
- "ldr q2, [x10, #0x20]\n"
- "ldr q3, [x10, #0x30]\n"
+ "ldr q1, [x27, #0x10]\n"
+ "add x23, %x[qp], %[per_layer_right_shift]\n"
"add v18.4s, v18.4s, v11.4s\n"
+ "ldr q2, [x27, #0x20]\n"
+ "add x22, %x[qp], %[per_layer_mul]\n"
"add v19.4s, v19.4s, v11.4s\n"
+ "ldr q3, [x27, #0x30]\n"
+ "add x27, x27, #0x40\n"
"add v20.4s, v20.4s, v12.4s\n"
+ "ld1r { v4.4s }, [x22]\n"
"add v21.4s, v21.4s, v12.4s\n"
- "add x23, %x[qp], %[per_layer_mul]\n"
- "ld1r { v4.4s }, [x23]\n"
"add v22.4s, v22.4s, v12.4s\n"
"add v23.4s, v23.4s, v12.4s\n"
- "orr %x[flags], %x[flags], #0x80000000\n"
- "add x23, %x[qp], %[per_layer_right_shift]\n"
"add v24.4s, v24.4s, v13.4s\n"
"add v25.4s, v25.4s, v13.4s\n"
- "add x10, x10, #0x40\n"
"add v26.4s, v26.4s, v13.4s\n"
"add v27.4s, v27.4s, v13.4s\n"
"add v28.4s, v28.4s, v14.4s\n"
@@ -1775,126 +1775,126 @@ void a64_hybrid_s8qa_dot_4x16 (
"tbz %x[flags], #5, 110f\n"
"and v4.16b, v16.16b, v0.16b\n"
"and v5.16b, v17.16b, v0.16b\n"
+ "and v6.16b, v18.16b, v0.16b\n"
"sshr v4.4s, v4.4s, #0x1f\n"
"sshr v5.4s, v5.4s, #0x1f\n"
+ "sshr v6.4s, v6.4s, #0x1f\n"
"sqadd v16.4s, v16.4s, v4.4s\n"
"sqadd v17.4s, v17.4s, v5.4s\n"
- "and v6.16b, v18.16b, v0.16b\n"
+ "sqadd v18.4s, v18.4s, v6.4s\n"
"and v7.16b, v19.16b, v0.16b\n"
"and v8.16b, v20.16b, v0.16b\n"
"and v9.16b, v21.16b, v0.16b\n"
- "and v10.16b, v22.16b, v0.16b\n"
- "and v4.16b, v23.16b, v0.16b\n"
- "and v5.16b, v24.16b, v0.16b\n"
- "sshr v6.4s, v6.4s, #0x1f\n"
"sshr v7.4s, v7.4s, #0x1f\n"
"sshr v8.4s, v8.4s, #0x1f\n"
"sshr v9.4s, v9.4s, #0x1f\n"
- "sshr v10.4s, v10.4s, #0x1f\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
- "sshr v5.4s, v5.4s, #0x1f\n"
- "sqadd v18.4s, v18.4s, v6.4s\n"
"sqadd v19.4s, v19.4s, v7.4s\n"
"sqadd v20.4s, v20.4s, v8.4s\n"
"sqadd v21.4s, v21.4s, v9.4s\n"
+ "and v10.16b, v22.16b, v0.16b\n"
+ "and v4.16b, v23.16b, v0.16b\n"
+ "and v5.16b, v24.16b, v0.16b\n"
+ "sshr v10.4s, v10.4s, #0x1f\n"
+ "sshr v4.4s, v4.4s, #0x1f\n"
+ "sshr v5.4s, v5.4s, #0x1f\n"
"sqadd v22.4s, v22.4s, v10.4s\n"
"sqadd v23.4s, v23.4s, v4.4s\n"
"sqadd v24.4s, v24.4s, v5.4s\n"
"and v6.16b, v25.16b, v0.16b\n"
"and v7.16b, v26.16b, v0.16b\n"
"and v8.16b, v27.16b, v0.16b\n"
- "and v9.16b, v28.16b, v0.16b\n"
- "and v10.16b, v29.16b, v0.16b\n"
- "and v4.16b, v30.16b, v0.16b\n"
- "and v5.16b, v31.16b, v0.16b\n"
"sshr v6.4s, v6.4s, #0x1f\n"
"sshr v7.4s, v7.4s, #0x1f\n"
"sshr v8.4s, v8.4s, #0x1f\n"
- "sshr v9.4s, v9.4s, #0x1f\n"
- "sshr v10.4s, v10.4s, #0x1f\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
- "sshr v5.4s, v5.4s, #0x1f\n"
"sqadd v25.4s, v25.4s, v6.4s\n"
"sqadd v26.4s, v26.4s, v7.4s\n"
"sqadd v27.4s, v27.4s, v8.4s\n"
+ "and v9.16b, v28.16b, v0.16b\n"
+ "and v10.16b, v29.16b, v0.16b\n"
+ "and v4.16b, v30.16b, v0.16b\n"
+ "sshr v9.4s, v9.4s, #0x1f\n"
+ "sshr v10.4s, v10.4s, #0x1f\n"
+ "sshr v4.4s, v4.4s, #0x1f\n"
"sqadd v28.4s, v28.4s, v9.4s\n"
"sqadd v29.4s, v29.4s, v10.4s\n"
"sqadd v30.4s, v30.4s, v4.4s\n"
+ "and v5.16b, v31.16b, v0.16b\n"
+ "sshr v5.4s, v5.4s, #0x1f\n"
"sqadd v31.4s, v31.4s, v5.4s\n"
"110:" // Height 4: no shift correction
- "add x23, %x[qp], %[c_offset]\n"
- "ld1r { v4.4s }, [x23]\n"
"srshl v16.4s, v16.4s, v0.4s\n"
+ "add x22, %x[qp], %[c_offset]\n"
+ "ld1r { v4.4s }, [x22]\n"
"srshl v17.4s, v17.4s, v0.4s\n"
+ "add x22, %x[qp], %[minval]\n"
"srshl v18.4s, v18.4s, v0.4s\n"
+ "ld1r { v5.4s }, [x22]\n"
+ "add x22, %x[qp], %[maxval]\n"
"srshl v19.4s, v19.4s, v0.4s\n"
- "add x23, %x[qp], %[maxval]\n"
- "ld1r { v6.4s }, [x23]\n"
+ "ld1r { v6.4s }, [x22]\n"
+ "cmp x9, #0x10\n"
"srshl v20.4s, v20.4s, v0.4s\n"
"srshl v21.4s, v21.4s, v0.4s\n"
- "add x23, %x[qp], %[minval]\n"
- "ld1r { v5.4s }, [x23]\n"
- "srshl v22.4s, v22.4s, v0.4s\n"
- "srshl v23.4s, v23.4s, v0.4s\n"
- "cmp x9, #0x10\n"
- "srshl v24.4s, v24.4s, v0.4s\n"
- "srshl v25.4s, v25.4s, v0.4s\n"
- "srshl v26.4s, v26.4s, v0.4s\n"
- "srshl v27.4s, v27.4s, v0.4s\n"
- "srshl v28.4s, v28.4s, v0.4s\n"
- "srshl v29.4s, v29.4s, v0.4s\n"
- "srshl v30.4s, v30.4s, v0.4s\n"
- "srshl v31.4s, v31.4s, v0.4s\n"
"add v16.4s, v16.4s, v4.4s\n"
"add v17.4s, v17.4s, v4.4s\n"
"add v18.4s, v18.4s, v4.4s\n"
- "add v19.4s, v19.4s, v4.4s\n"
- "add v20.4s, v20.4s, v4.4s\n"
- "add v21.4s, v21.4s, v4.4s\n"
- "add v22.4s, v22.4s, v4.4s\n"
- "add v23.4s, v23.4s, v4.4s\n"
- "add v24.4s, v24.4s, v4.4s\n"
- "add v25.4s, v25.4s, v4.4s\n"
- "add v26.4s, v26.4s, v4.4s\n"
- "add v27.4s, v27.4s, v4.4s\n"
- "add v28.4s, v28.4s, v4.4s\n"
- "add v29.4s, v29.4s, v4.4s\n"
- "add v30.4s, v30.4s, v4.4s\n"
- "add v31.4s, v31.4s, v4.4s\n"
"smin v16.4s, v16.4s, v6.4s\n"
"smin v17.4s, v17.4s, v6.4s\n"
"smin v18.4s, v18.4s, v6.4s\n"
- "smin v19.4s, v19.4s, v6.4s\n"
- "smin v20.4s, v20.4s, v6.4s\n"
- "smin v21.4s, v21.4s, v6.4s\n"
- "smin v22.4s, v22.4s, v6.4s\n"
- "smin v23.4s, v23.4s, v6.4s\n"
- "smin v24.4s, v24.4s, v6.4s\n"
- "smin v25.4s, v25.4s, v6.4s\n"
- "smin v26.4s, v26.4s, v6.4s\n"
- "smin v27.4s, v27.4s, v6.4s\n"
- "smin v28.4s, v28.4s, v6.4s\n"
- "smin v29.4s, v29.4s, v6.4s\n"
- "smin v30.4s, v30.4s, v6.4s\n"
- "smin v31.4s, v31.4s, v6.4s\n"
"smax v16.4s, v16.4s, v5.4s\n"
"smax v17.4s, v17.4s, v5.4s\n"
"smax v18.4s, v18.4s, v5.4s\n"
+ "add v19.4s, v19.4s, v4.4s\n"
+ "add v20.4s, v20.4s, v4.4s\n"
+ "add v21.4s, v21.4s, v4.4s\n"
+ "smin v19.4s, v19.4s, v6.4s\n"
+ "smin v20.4s, v20.4s, v6.4s\n"
+ "smin v21.4s, v21.4s, v6.4s\n"
"smax v19.4s, v19.4s, v5.4s\n"
"smax v20.4s, v20.4s, v5.4s\n"
"smax v21.4s, v21.4s, v5.4s\n"
+ "srshl v22.4s, v22.4s, v0.4s\n"
+ "srshl v23.4s, v23.4s, v0.4s\n"
+ "srshl v24.4s, v24.4s, v0.4s\n"
+ "srshl v25.4s, v25.4s, v0.4s\n"
+ "add v22.4s, v22.4s, v4.4s\n"
+ "add v23.4s, v23.4s, v4.4s\n"
+ "add v24.4s, v24.4s, v4.4s\n"
+ "smin v22.4s, v22.4s, v6.4s\n"
+ "smin v23.4s, v23.4s, v6.4s\n"
+ "smin v24.4s, v24.4s, v6.4s\n"
"smax v22.4s, v22.4s, v5.4s\n"
"smax v23.4s, v23.4s, v5.4s\n"
"smax v24.4s, v24.4s, v5.4s\n"
+ "add v25.4s, v25.4s, v4.4s\n"
+ "srshl v26.4s, v26.4s, v0.4s\n"
+ "srshl v27.4s, v27.4s, v0.4s\n"
+ "smin v25.4s, v25.4s, v6.4s\n"
+ "srshl v28.4s, v28.4s, v0.4s\n"
+ "add v26.4s, v26.4s, v4.4s\n"
"smax v25.4s, v25.4s, v5.4s\n"
+ "add v27.4s, v27.4s, v4.4s\n"
+ "smin v26.4s, v26.4s, v6.4s\n"
+ "add v28.4s, v28.4s, v4.4s\n"
+ "smin v27.4s, v27.4s, v6.4s\n"
"smax v26.4s, v26.4s, v5.4s\n"
+ "smin v28.4s, v28.4s, v6.4s\n"
"smax v27.4s, v27.4s, v5.4s\n"
+ "srshl v29.4s, v29.4s, v0.4s\n"
"smax v28.4s, v28.4s, v5.4s\n"
+ "srshl v30.4s, v30.4s, v0.4s\n"
+ "srshl v31.4s, v31.4s, v0.4s\n"
+ "add v29.4s, v29.4s, v4.4s\n"
+ "uzp1 v16.8h, v16.8h, v17.8h\n"
+ "add v30.4s, v30.4s, v4.4s\n"
+ "smin v29.4s, v29.4s, v6.4s\n"
+ "add v31.4s, v31.4s, v4.4s\n"
+ "smin v30.4s, v30.4s, v6.4s\n"
"smax v29.4s, v29.4s, v5.4s\n"
+ "smin v31.4s, v31.4s, v6.4s\n"
"smax v30.4s, v30.4s, v5.4s\n"
- "smax v31.4s, v31.4s, v5.4s\n"
- "uzp1 v16.8h, v16.8h, v17.8h\n"
"uzp1 v17.8h, v18.8h, v19.8h\n"
+ "smax v31.4s, v31.4s, v5.4s\n"
"uzp1 v20.8h, v20.8h, v21.8h\n"
"uzp1 v21.8h, v22.8h, v23.8h\n"
"uzp1 v24.8h, v24.8h, v25.8h\n"
@@ -1907,120 +1907,120 @@ void a64_hybrid_s8qa_dot_4x16 (
"uzp1 v28.16b, v28.16b, v29.16b\n"
"bge 119f\n"
"tbz x9, #3, 114f\n"
- "str d16, [x27], #0x8\n"
- "str d20, [x22], #0x8\n"
- "str d24, [x21], #0x8\n"
- "str d28, [x20], #0x8\n"
+ "str d16, [x26], #0x8\n"
+ "str d20, [x21], #0x8\n"
+ "str d24, [x20], #0x8\n"
+ "str d28, [x19], #0x8\n"
"tbz x9, #2, 112f\n"
- "st1 { v16.s }[2], [x27], #0x4\n"
- "st1 { v20.s }[2], [x22], #0x4\n"
- "st1 { v24.s }[2], [x21], #0x4\n"
- "st1 { v28.s }[2], [x20], #0x4\n"
+ "st1 { v16.s }[2], [x26], #0x4\n"
+ "st1 { v20.s }[2], [x21], #0x4\n"
+ "st1 { v24.s }[2], [x20], #0x4\n"
+ "st1 { v28.s }[2], [x19], #0x4\n"
"tbz x9, #1, 111f\n"
- "st1 { v16.h }[6], [x27], #0x2\n"
- "st1 { v20.h }[6], [x22], #0x2\n"
- "st1 { v24.h }[6], [x21], #0x2\n"
- "st1 { v28.h }[6], [x20], #0x2\n"
+ "st1 { v16.h }[6], [x26], #0x2\n"
+ "st1 { v20.h }[6], [x21], #0x2\n"
+ "st1 { v24.h }[6], [x20], #0x2\n"
+ "st1 { v28.h }[6], [x19], #0x2\n"
"tbz x9, #0, 118f\n"
- "st1 { v16.b }[14], [x27]\n"
- "st1 { v20.b }[14], [x22]\n"
- "st1 { v24.b }[14], [x21]\n"
- "st1 { v28.b }[14], [x20]\n"
+ "st1 { v16.b }[14], [x26]\n"
+ "st1 { v20.b }[14], [x21]\n"
+ "st1 { v24.b }[14], [x20]\n"
+ "st1 { v28.b }[14], [x19]\n"
"b 118f\n"
"111:" // Height 4: Partial direct writeback: partial_1_12
"tbz x9, #0, 118f\n"
- "st1 { v16.b }[12], [x27]\n"
- "st1 { v20.b }[12], [x22]\n"
- "st1 { v24.b }[12], [x21]\n"
- "st1 { v28.b }[12], [x20]\n"
+ "st1 { v16.b }[12], [x26]\n"
+ "st1 { v20.b }[12], [x21]\n"
+ "st1 { v24.b }[12], [x20]\n"
+ "st1 { v28.b }[12], [x19]\n"
"b 118f\n"
"112:" // Height 4: Partial direct writeback: partial_2_8
"tbz x9, #1, 113f\n"
- "st1 { v16.h }[4], [x27], #0x2\n"
- "st1 { v20.h }[4], [x22], #0x2\n"
- "st1 { v24.h }[4], [x21], #0x2\n"
- "st1 { v28.h }[4], [x20], #0x2\n"
+ "st1 { v16.h }[4], [x26], #0x2\n"
+ "st1 { v20.h }[4], [x21], #0x2\n"
+ "st1 { v24.h }[4], [x20], #0x2\n"
+ "st1 { v28.h }[4], [x19], #0x2\n"
"tbz x9, #0, 118f\n"
- "st1 { v16.b }[10], [x27]\n"
- "st1 { v20.b }[10], [x22]\n"
- "st1 { v24.b }[10], [x21]\n"
- "st1 { v28.b }[10], [x20]\n"
+ "st1 { v16.b }[10], [x26]\n"
+ "st1 { v20.b }[10], [x21]\n"
+ "st1 { v24.b }[10], [x20]\n"
+ "st1 { v28.b }[10], [x19]\n"
"b 118f\n"
"113:" // Height 4: Partial direct writeback: partial_1_8
"tbz x9, #0, 118f\n"
- "st1 { v16.b }[8], [x27]\n"
- "st1 { v20.b }[8], [x22]\n"
- "st1 { v24.b }[8], [x21]\n"
- "st1 { v28.b }[8], [x20]\n"
+ "st1 { v16.b }[8], [x26]\n"
+ "st1 { v20.b }[8], [x21]\n"
+ "st1 { v24.b }[8], [x20]\n"
+ "st1 { v28.b }[8], [x19]\n"
"b 118f\n"
"114:" // Height 4: Partial direct writeback: partial_4_0
"tbz x9, #2, 116f\n"
- "str s16, [x27], #0x4\n"
- "str s20, [x22], #0x4\n"
- "str s24, [x21], #0x4\n"
- "str s28, [x20], #0x4\n"
+ "str s16, [x26], #0x4\n"
+ "str s20, [x21], #0x4\n"
+ "str s24, [x20], #0x4\n"
+ "str s28, [x19], #0x4\n"
"tbz x9, #1, 115f\n"
- "st1 { v16.h }[2], [x27], #0x2\n"
- "st1 { v20.h }[2], [x22], #0x2\n"
- "st1 { v24.h }[2], [x21], #0x2\n"
- "st1 { v28.h }[2], [x20], #0x2\n"
+ "st1 { v16.h }[2], [x26], #0x2\n"
+ "st1 { v20.h }[2], [x21], #0x2\n"
+ "st1 { v24.h }[2], [x20], #0x2\n"
+ "st1 { v28.h }[2], [x19], #0x2\n"
"tbz x9, #0, 118f\n"
- "st1 { v16.b }[6], [x27]\n"
- "st1 { v20.b }[6], [x22]\n"
- "st1 { v24.b }[6], [x21]\n"
- "st1 { v28.b }[6], [x20]\n"
+ "st1 { v16.b }[6], [x26]\n"
+ "st1 { v20.b }[6], [x21]\n"
+ "st1 { v24.b }[6], [x20]\n"
+ "st1 { v28.b }[6], [x19]\n"
"b 118f\n"
"115:" // Height 4: Partial direct writeback: partial_1_4
"tbz x9, #0, 118f\n"
- "st1 { v16.b }[4], [x27]\n"
- "st1 { v20.b }[4], [x22]\n"
- "st1 { v24.b }[4], [x21]\n"
- "st1 { v28.b }[4], [x20]\n"
+ "st1 { v16.b }[4], [x26]\n"
+ "st1 { v20.b }[4], [x21]\n"
+ "st1 { v24.b }[4], [x20]\n"
+ "st1 { v28.b }[4], [x19]\n"
"b 118f\n"
"116:" // Height 4: Partial direct writeback: partial_2_0
"tbz x9, #1, 117f\n"
- "str h16, [x27], #0x2\n"
- "str h20, [x22], #0x2\n"
- "str h24, [x21], #0x2\n"
- "str h28, [x20], #0x2\n"
+ "str h16, [x26], #0x2\n"
+ "str h20, [x21], #0x2\n"
+ "str h24, [x20], #0x2\n"
+ "str h28, [x19], #0x2\n"
"tbz x9, #0, 118f\n"
- "st1 { v16.b }[2], [x27]\n"
- "st1 { v20.b }[2], [x22]\n"
- "st1 { v24.b }[2], [x21]\n"
- "st1 { v28.b }[2], [x20]\n"
+ "st1 { v16.b }[2], [x26]\n"
+ "st1 { v20.b }[2], [x21]\n"
+ "st1 { v24.b }[2], [x20]\n"
+ "st1 { v28.b }[2], [x19]\n"
"b 118f\n"
"117:" // Height 4: Partial direct writeback: partial_1_0
- "str b16, [x27, #0x0]\n"
- "str b20, [x22, #0x0]\n"
- "str b24, [x21, #0x0]\n"
- "str b28, [x20, #0x0]\n"
+ "str b16, [x26, #0x0]\n"
+ "str b20, [x21, #0x0]\n"
+ "str b24, [x20, #0x0]\n"
+ "str b28, [x19, #0x0]\n"
"118:" // Height 4: Partial direct writeback: Done
"b 120f\n"
"119:" // Height 4: Full writeback
- "str q16, [x27, #0x0]\n"
- "add x27, x27, #0x10\n"
- "str q20, [x22, #0x0]\n"
- "str q24, [x21, #0x0]\n"
- "str q28, [x20, #0x0]\n"
+ "str q16, [x26, #0x0]\n"
+ "add x26, x26, #0x10\n"
+ "str q20, [x21, #0x0]\n"
+ "str q24, [x20, #0x0]\n"
+ "str q28, [x19, #0x0]\n"
"120:" // Height 4: Writeback done
"subs x9, x9, #0x10\n"
"bgt 92b\n"
"subs %x[M], %x[M], #0x4\n"
"beq 122f\n"
- "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 121f\n"
- "add x21, x21, #0x4\n"
- "str x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "add x20, x20, #0x4\n"
+ "str x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"b 1b\n"
"121:" // Update direct input
- "mov x20, #0x4\n"
- "madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
+ "mov x19, #0x4\n"
+ "madd %x[input_ptr], x19, x20, %x[input_ptr]\n"
"b 1b\n"
"122:" // Exit
: [M] "+&r" (M), [flags] "+&r" (flags), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
: [args_ptr] "r" (&ka), [b_offset] "I" (offsetof(Requantize32, b_offset)), [c_offset] "I" (offsetof(Requantize32, c_offset)), [col_bias] "r" (col_bias), [maxval] "I" (offsetof(Requantize32, maxval)), [minval] "I" (offsetof(Requantize32, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths)), [per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [qp] "r" (qp)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qa_mmla_4x16/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qa_mmla_4x16/generic.cpp
index 69d01a265e..4bc807cd8e 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qa_mmla_4x16/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qa_mmla_4x16/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef __aarch64__
@@ -85,13 +85,13 @@ void a64_hybrid_s8qa_mmla_4x16 (
"cmp %x[M], #0x2\n"
"bgt 65f\n"
"beq 33f\n"
- "mov x10, %x[col_bias]\n"
"movi v11.4s, #0x0\n"
- "movi v15.16b, #0x1\n"
- "bic %x[flags], %x[flags], #0x80000000\n"
"ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
+ "movi v15.16b, #0x1\n"
"ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x27, %x[output_ptr]\n"
+ "mov x27, %x[col_bias]\n"
+ "bic %x[flags], %x[flags], #0x80000000\n"
+ "mov x26, %x[output_ptr]\n"
"2:" // Height 1: Column loop
"movi v16.4s, #0x0\n"
"movi v17.4s, #0x0\n"
@@ -102,27 +102,27 @@ void a64_hybrid_s8qa_mmla_4x16 (
"movi v22.4s, #0x0\n"
"movi v23.4s, #0x0\n"
"3:" // Height 1: setup done
- "mov x26, #0x0\n"
+ "mov x25, #0x0\n"
"4:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w24, [x20, x25, LSL #0x2]\n"
"tbz %x[flags], #3, 5f\n"
- "ldr x21, [%x[input_ptr], x26, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x24, [x21, #0x0]\n"
- "cbnz x26, 6f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x24, x24, x20\n"
+ "ldr x20, [%x[input_ptr], x25, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x23, [x20, #0x0]\n"
+ "cbnz x25, 6f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x23, x23, x19\n"
"b 6f\n"
"5:" // Height 1: setup direct input
- "mov x24, %x[input_ptr]\n"
+ "mov x23, %x[input_ptr]\n"
"6:" // Height 1: input setup done
- "cmp x25, #0x10\n"
+ "cmp x24, #0x10\n"
"blt 11f\n"
- "ldr q1, [x24, #0x0]\n"
+ "ldr q1, [x23, #0x0]\n"
"ldr q5, [x28, #0x0]\n"
- "cmp x25, #0x20\n"
+ "cmp x24, #0x20\n"
"ldr q6, [x28, #0x10]\n"
"ldr q7, [x28, #0x20]\n"
"ldr q8, [x28, #0x30]\n"
@@ -132,9 +132,10 @@ void a64_hybrid_s8qa_mmla_4x16 (
"blt 9f\n"
"7:" // Height 1: Multiply loop: Main loop head
"trn1 v0.2d, v1.2d, v2.2d\n"
+ "add x23, x23, #0x10\n"
+ "trn2 v1.2d, v1.2d, v2.2d\n"
".inst 0x4e85a410 // smmla v16.4s, v0.16b, v5.16b\n"
"ldr q5, [x28, #0x70]\n"
- "trn2 v1.2d, v1.2d, v2.2d\n"
".inst 0x4e86a414 // smmla v20.4s, v0.16b, v6.16b\n"
"ldr q6, [x28, #0x80]\n"
".inst 0x4e87a411 // smmla v17.4s, v0.16b, v7.16b\n"
@@ -151,10 +152,9 @@ void a64_hybrid_s8qa_mmla_4x16 (
"ldr q5, [x28, #0xe0]\n"
".inst 0x4e86a430 // smmla v16.4s, v1.16b, v6.16b\n"
"ldr q6, [x28, #0xf0]\n"
+ "add x28, x28, #0x100\n"
".inst 0x4e87a434 // smmla v20.4s, v1.16b, v7.16b\n"
- "add x24, x24, #0x10\n"
".inst 0x4e88a431 // smmla v17.4s, v1.16b, v8.16b\n"
- "add x28, x28, #0x100\n"
".inst 0x4e89a435 // smmla v21.4s, v1.16b, v9.16b\n"
".inst 0x4e8aa432 // smmla v18.4s, v1.16b, v10.16b\n"
".inst 0x4e84a436 // smmla v22.4s, v1.16b, v4.16b\n"
@@ -164,23 +164,25 @@ void a64_hybrid_s8qa_mmla_4x16 (
".inst 0x4e8f940b // sdot v11.4s, v0.16b, v15.16b\n"
".inst 0x4e8f942b // sdot v11.4s, v1.16b, v15.16b\n"
"8:" // Height 1: Multiply loop: unique 1: skip row sum
- "ldr q1, [x24, #0x0]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
+ "sub x24, x24, #0x10\n"
+ "ldr q1, [x23, #0x0]\n"
+ "cmp x24, #0x20\n"
"ldr q5, [x28, #0x0]\n"
- "sub x25, x25, #0x10\n"
- "cmp x25, #0x20\n"
"ldr q6, [x28, #0x10]\n"
"ldr q7, [x28, #0x20]\n"
"ldr q8, [x28, #0x30]\n"
"ldr q9, [x28, #0x40]\n"
"ldr q10, [x28, #0x50]\n"
"ldr q4, [x28, #0x60]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
"bge 7b\n"
"9:" // Height 1: Multiply loop: Single iteration only
+ "sub x24, x24, #0x10\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
+ "trn2 v1.2d, v1.2d, v2.2d\n"
+ "add x23, x23, #0x10\n"
".inst 0x4e85a410 // smmla v16.4s, v0.16b, v5.16b\n"
"ldr q5, [x28, #0x70]\n"
- "trn2 v1.2d, v1.2d, v2.2d\n"
".inst 0x4e86a414 // smmla v20.4s, v0.16b, v6.16b\n"
"ldr q6, [x28, #0x80]\n"
".inst 0x4e87a411 // smmla v17.4s, v0.16b, v7.16b\n"
@@ -197,11 +199,9 @@ void a64_hybrid_s8qa_mmla_4x16 (
"ldr q5, [x28, #0xe0]\n"
".inst 0x4e86a430 // smmla v16.4s, v1.16b, v6.16b\n"
"ldr q6, [x28, #0xf0]\n"
- "sub x25, x25, #0x10\n"
+ "add x28, x28, #0x100\n"
".inst 0x4e87a434 // smmla v20.4s, v1.16b, v7.16b\n"
".inst 0x4e88a431 // smmla v17.4s, v1.16b, v8.16b\n"
- "add x24, x24, #0x10\n"
- "add x28, x28, #0x100\n"
".inst 0x4e89a435 // smmla v21.4s, v1.16b, v9.16b\n"
".inst 0x4e8aa432 // smmla v18.4s, v1.16b, v10.16b\n"
".inst 0x4e84a436 // smmla v22.4s, v1.16b, v4.16b\n"
@@ -211,118 +211,120 @@ void a64_hybrid_s8qa_mmla_4x16 (
".inst 0x4e8f940b // sdot v11.4s, v0.16b, v15.16b\n"
".inst 0x4e8f942b // sdot v11.4s, v1.16b, v15.16b\n"
"10:" // Height 1: Multiply loop: unique 2: skip row sum
- "prfm pldl1keep, [x24, #0x80]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
"11:" // Height 1: Multiply loop: Main loop skip
- "cbz x25, 20f\n"
- "cmp x25, #0x8\n"
+ "cbz x24, 20f\n"
+ "cmp x24, #0x8\n"
"blt 14f\n"
"12:" // Height 1: Multiply loop: Odd block loop
- "ldr d1, [x24], #0x8\n"
+ "movi v2.16b, #0x0\n"
+ "ldr d1, [x23], #0x8\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
"tbnz %x[flags], #31, 13f\n"
".inst 0x4e8f940b // sdot v11.4s, v0.16b, v15.16b\n"
"13:" // Height 1: Multiply loop: unique 3: skip row sum
"ldr q8, [x28, #0x0]\n"
- "ldr q9, [x28, #0x10]\n"
".inst 0x4e88a410 // smmla v16.4s, v0.16b, v8.16b\n"
- "sub x25, x25, #0x8\n"
+ "ldr q9, [x28, #0x10]\n"
+ "sub x24, x24, #0x8\n"
+ ".inst 0x4e89a414 // smmla v20.4s, v0.16b, v9.16b\n"
"ldr q10, [x28, #0x20]\n"
+ "cmp x24, #0x8\n"
+ ".inst 0x4e8aa411 // smmla v17.4s, v0.16b, v10.16b\n"
"ldr q4, [x28, #0x30]\n"
- "cmp x25, #0x8\n"
- ".inst 0x4e89a414 // smmla v20.4s, v0.16b, v9.16b\n"
"ldr q5, [x28, #0x40]\n"
- "ldr q6, [x28, #0x50]\n"
- ".inst 0x4e8aa411 // smmla v17.4s, v0.16b, v10.16b\n"
".inst 0x4e84a415 // smmla v21.4s, v0.16b, v4.16b\n"
+ "ldr q6, [x28, #0x50]\n"
+ ".inst 0x4e85a412 // smmla v18.4s, v0.16b, v5.16b\n"
"ldr q7, [x28, #0x60]\n"
"ldr q8, [x28, #0x70]\n"
- ".inst 0x4e85a412 // smmla v18.4s, v0.16b, v5.16b\n"
".inst 0x4e86a416 // smmla v22.4s, v0.16b, v6.16b\n"
+ "add x28, x28, #0x80\n"
".inst 0x4e87a413 // smmla v19.4s, v0.16b, v7.16b\n"
".inst 0x4e88a417 // smmla v23.4s, v0.16b, v8.16b\n"
- "add x28, x28, #0x80\n"
"bge 12b\n"
+ "cbz x24, 20f\n"
"14:" // Height 1: Multiply loop: Skip odd blocks
- "cbz x25, 20f\n"
- "tbz x25, #2, 16f\n"
- "ldr s1, [x24], #0x4\n"
- "tbz x25, #1, 15f\n"
- "ld1 { v1.h }[2], [x24], #0x2\n"
- "tbz x25, #0, 18f\n"
- "ld1 { v1.b }[6], [x24]\n"
+ "tbz x24, #2, 16f\n"
+ "ldr s1, [x23], #0x4\n"
+ "tbz x24, #1, 15f\n"
+ "ld1 { v1.h }[2], [x23], #0x2\n"
+ "tbz x24, #0, 18f\n"
+ "ld1 { v1.b }[6], [x23]\n"
"b 18f\n"
"15:" // Height 1: Multiply loop: Ragged operand read: partial_1_4
- "tbz x25, #0, 18f\n"
- "ld1 { v1.b }[4], [x24]\n"
+ "tbz x24, #0, 18f\n"
+ "ld1 { v1.b }[4], [x23]\n"
"b 18f\n"
"16:" // Height 1: Multiply loop: Ragged operand read: partial_2_0
- "tbz x25, #1, 17f\n"
- "ldr h1, [x24], #0x2\n"
- "tbz x25, #0, 18f\n"
- "ld1 { v1.b }[2], [x24]\n"
+ "tbz x24, #1, 17f\n"
+ "ldr h1, [x23], #0x2\n"
+ "tbz x24, #0, 18f\n"
+ "ld1 { v1.b }[2], [x23]\n"
"b 18f\n"
"17:" // Height 1: Multiply loop: Ragged operand read: partial_1_0
- "ldr b1, [x24, #0x0]\n"
+ "ldr b1, [x23, #0x0]\n"
"18:" // Height 1: Multiply loop: Ragged operand read: Done
+ "movi v2.16b, #0x0\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
"tbnz %x[flags], #31, 19f\n"
".inst 0x4e8f940b // sdot v11.4s, v0.16b, v15.16b\n"
"19:" // Height 1: Multiply loop: unique 4: skip row sum
"ldr q10, [x28, #0x0]\n"
- "ldr q4, [x28, #0x10]\n"
".inst 0x4e8aa410 // smmla v16.4s, v0.16b, v10.16b\n"
- ".inst 0x4e84a414 // smmla v20.4s, v0.16b, v4.16b\n"
+ "ldr q4, [x28, #0x10]\n"
"ldr q5, [x28, #0x20]\n"
+ ".inst 0x4e84a414 // smmla v20.4s, v0.16b, v4.16b\n"
"ldr q6, [x28, #0x30]\n"
".inst 0x4e85a411 // smmla v17.4s, v0.16b, v5.16b\n"
- ".inst 0x4e86a415 // smmla v21.4s, v0.16b, v6.16b\n"
"ldr q7, [x28, #0x40]\n"
"ldr q8, [x28, #0x50]\n"
- ".inst 0x4e87a412 // smmla v18.4s, v0.16b, v7.16b\n"
- ".inst 0x4e88a416 // smmla v22.4s, v0.16b, v8.16b\n"
+ ".inst 0x4e86a415 // smmla v21.4s, v0.16b, v6.16b\n"
"ldr q9, [x28, #0x60]\n"
"ldr q10, [x28, #0x70]\n"
+ ".inst 0x4e87a412 // smmla v18.4s, v0.16b, v7.16b\n"
+ "add x28, x28, #0x80\n"
+ ".inst 0x4e88a416 // smmla v22.4s, v0.16b, v8.16b\n"
".inst 0x4e89a413 // smmla v19.4s, v0.16b, v9.16b\n"
".inst 0x4e8aa417 // smmla v23.4s, v0.16b, v10.16b\n"
- "add x28, x28, #0x80\n"
"20:" // Height 1: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x26, x26, #0x1\n"
- "cmp x26, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x25, x25, #0x1\n"
+ "cmp x25, x19\n"
"bne 4b\n"
"uzp1 v16.2d, v16.2d, v20.2d\n"
+ "prfm pstl1keep, [x26, #0x0]\n"
"uzp1 v17.2d, v17.2d, v21.2d\n"
- "prfm pstl1keep, [x27, #0x0]\n"
"uzp1 v18.2d, v18.2d, v22.2d\n"
"uzp1 v19.2d, v19.2d, v23.2d\n"
"mov v23.16b, v16.16b\n"
"tbnz %x[flags], #31, 21f\n"
- "add x23, %x[qp], %[b_offset]\n"
- "ld1r { v1.4s }, [x23]\n"
"addp v11.4s, v11.4s, v11.4s\n"
- "neg v1.4s, v1.4s\n"
+ "add x22, %x[qp], %[b_offset]\n"
+ "ld1r { v1.4s }, [x22]\n"
"dup v11.4s, v11.s[0]\n"
+ "neg v1.4s, v1.4s\n"
"mul v11.4s, v11.4s, v1.4s\n"
"21:" // Height 1: skip row sum fixup
- "ldr q0, [x10, #0x0]\n"
- "ldr q1, [x10, #0x10]\n"
"add v23.4s, v23.4s, v11.4s\n"
+ "ldr q0, [x27, #0x0]\n"
+ "orr %x[flags], %x[flags], #0x80000000\n"
"add v17.4s, v17.4s, v11.4s\n"
- "ldr q2, [x10, #0x20]\n"
- "ldr q3, [x10, #0x30]\n"
+ "ldr q1, [x27, #0x10]\n"
+ "add x23, %x[qp], %[per_layer_right_shift]\n"
"add v18.4s, v18.4s, v11.4s\n"
+ "ldr q2, [x27, #0x20]\n"
+ "add x22, %x[qp], %[per_layer_mul]\n"
"add v19.4s, v19.4s, v11.4s\n"
- "add x23, %x[qp], %[per_layer_mul]\n"
- "ld1r { v4.4s }, [x23]\n"
- "orr %x[flags], %x[flags], #0x80000000\n"
+ "ldr q3, [x27, #0x30]\n"
+ "add x27, x27, #0x40\n"
"add v23.4s, v23.4s, v0.4s\n"
+ "ld1r { v0.4s }, [x23]\n"
+ "ld1r { v4.4s }, [x22]\n"
"add v17.4s, v17.4s, v1.4s\n"
"add v18.4s, v18.4s, v2.4s\n"
- "add x23, %x[qp], %[per_layer_right_shift]\n"
- "ld1r { v0.4s }, [x23]\n"
"add v19.4s, v19.4s, v3.4s\n"
"sqrdmulh v23.4s, v23.4s, v4.4s\n"
- "add x10, x10, #0x40\n"
"sqrdmulh v17.4s, v17.4s, v4.4s\n"
"sqrdmulh v18.4s, v18.4s, v4.4s\n"
"sqrdmulh v19.4s, v19.4s, v4.4s\n"
@@ -334,100 +336,100 @@ void a64_hybrid_s8qa_mmla_4x16 (
"sshr v4.4s, v4.4s, #0x1f\n"
"sshr v5.4s, v5.4s, #0x1f\n"
"sshr v6.4s, v6.4s, #0x1f\n"
- "sshr v7.4s, v7.4s, #0x1f\n"
"sqadd v23.4s, v23.4s, v4.4s\n"
"sqadd v17.4s, v17.4s, v5.4s\n"
"sqadd v18.4s, v18.4s, v6.4s\n"
+ "sshr v7.4s, v7.4s, #0x1f\n"
"sqadd v19.4s, v19.4s, v7.4s\n"
"22:" // Height 1: no shift correction
- "add x23, %x[qp], %[c_offset]\n"
- "ld1r { v4.4s }, [x23]\n"
"srshl v23.4s, v23.4s, v0.4s\n"
+ "add x22, %x[qp], %[c_offset]\n"
+ "ld1r { v4.4s }, [x22]\n"
"srshl v17.4s, v17.4s, v0.4s\n"
+ "add x22, %x[qp], %[minval]\n"
"srshl v18.4s, v18.4s, v0.4s\n"
+ "ld1r { v5.4s }, [x22]\n"
+ "add x22, %x[qp], %[maxval]\n"
"srshl v19.4s, v19.4s, v0.4s\n"
- "add x23, %x[qp], %[maxval]\n"
- "ld1r { v6.4s }, [x23]\n"
+ "ld1r { v6.4s }, [x22]\n"
+ "cmp x9, #0x10\n"
"add v23.4s, v23.4s, v4.4s\n"
"add v17.4s, v17.4s, v4.4s\n"
- "add x23, %x[qp], %[minval]\n"
- "ld1r { v5.4s }, [x23]\n"
"add v18.4s, v18.4s, v4.4s\n"
"add v19.4s, v19.4s, v4.4s\n"
- "cmp x9, #0x10\n"
"smin v23.4s, v23.4s, v6.4s\n"
"smin v17.4s, v17.4s, v6.4s\n"
"smin v18.4s, v18.4s, v6.4s\n"
- "smin v19.4s, v19.4s, v6.4s\n"
"smax v23.4s, v23.4s, v5.4s\n"
"smax v17.4s, v17.4s, v5.4s\n"
"smax v18.4s, v18.4s, v5.4s\n"
- "smax v19.4s, v19.4s, v5.4s\n"
+ "smin v19.4s, v19.4s, v6.4s\n"
"uzp1 v23.8h, v23.8h, v17.8h\n"
+ "smax v19.4s, v19.4s, v5.4s\n"
"uzp1 v17.8h, v18.8h, v19.8h\n"
"uzp1 v23.16b, v23.16b, v17.16b\n"
"bge 31f\n"
"tbz x9, #3, 26f\n"
- "str d23, [x27], #0x8\n"
+ "str d23, [x26], #0x8\n"
"tbz x9, #2, 24f\n"
- "st1 { v23.s }[2], [x27], #0x4\n"
+ "st1 { v23.s }[2], [x26], #0x4\n"
"tbz x9, #1, 23f\n"
- "st1 { v23.h }[6], [x27], #0x2\n"
+ "st1 { v23.h }[6], [x26], #0x2\n"
"tbz x9, #0, 30f\n"
- "st1 { v23.b }[14], [x27]\n"
+ "st1 { v23.b }[14], [x26]\n"
"b 30f\n"
"23:" // Height 1: Partial direct writeback: partial_1_12
"tbz x9, #0, 30f\n"
- "st1 { v23.b }[12], [x27]\n"
+ "st1 { v23.b }[12], [x26]\n"
"b 30f\n"
"24:" // Height 1: Partial direct writeback: partial_2_8
"tbz x9, #1, 25f\n"
- "st1 { v23.h }[4], [x27], #0x2\n"
+ "st1 { v23.h }[4], [x26], #0x2\n"
"tbz x9, #0, 30f\n"
- "st1 { v23.b }[10], [x27]\n"
+ "st1 { v23.b }[10], [x26]\n"
"b 30f\n"
"25:" // Height 1: Partial direct writeback: partial_1_8
"tbz x9, #0, 30f\n"
- "st1 { v23.b }[8], [x27]\n"
+ "st1 { v23.b }[8], [x26]\n"
"b 30f\n"
"26:" // Height 1: Partial direct writeback: partial_4_0
"tbz x9, #2, 28f\n"
- "str s23, [x27], #0x4\n"
+ "str s23, [x26], #0x4\n"
"tbz x9, #1, 27f\n"
- "st1 { v23.h }[2], [x27], #0x2\n"
+ "st1 { v23.h }[2], [x26], #0x2\n"
"tbz x9, #0, 30f\n"
- "st1 { v23.b }[6], [x27]\n"
+ "st1 { v23.b }[6], [x26]\n"
"b 30f\n"
"27:" // Height 1: Partial direct writeback: partial_1_4
"tbz x9, #0, 30f\n"
- "st1 { v23.b }[4], [x27]\n"
+ "st1 { v23.b }[4], [x26]\n"
"b 30f\n"
"28:" // Height 1: Partial direct writeback: partial_2_0
"tbz x9, #1, 29f\n"
- "str h23, [x27], #0x2\n"
+ "str h23, [x26], #0x2\n"
"tbz x9, #0, 30f\n"
- "st1 { v23.b }[2], [x27]\n"
+ "st1 { v23.b }[2], [x26]\n"
"b 30f\n"
"29:" // Height 1: Partial direct writeback: partial_1_0
- "str b23, [x27, #0x0]\n"
+ "str b23, [x26, #0x0]\n"
"30:" // Height 1: Partial direct writeback: Done
"b 32f\n"
"31:" // Height 1: Full writeback
- "str q23, [x27, #0x0]\n"
- "add x27, x27, #0x10\n"
+ "str q23, [x26, #0x0]\n"
+ "add x26, x26, #0x10\n"
"32:" // Height 1: Writeback done
"subs x9, x9, #0x10\n"
"bgt 2b\n"
"b 130f\n"
"33:" // Height 2
- "mov x10, %x[col_bias]\n"
"movi v11.4s, #0x0\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x27, %x[col_bias]\n"
"movi v12.4s, #0x0\n"
+ "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"bic %x[flags], %x[flags], #0x80000000\n"
"movi v15.16b, #0x1\n"
- "ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x27, %x[output_ptr]\n"
+ "mov x26, %x[output_ptr]\n"
"34:" // Height 2: Column loop
"movi v16.4s, #0x0\n"
"movi v17.4s, #0x0\n"
@@ -438,64 +440,64 @@ void a64_hybrid_s8qa_mmla_4x16 (
"movi v22.4s, #0x0\n"
"movi v23.4s, #0x0\n"
"35:" // Height 2: setup done
- "mov x26, #0x0\n"
+ "mov x25, #0x0\n"
"36:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w24, [x20, x25, LSL #0x2]\n"
"tbz %x[flags], #3, 37f\n"
- "ldr x21, [%x[input_ptr], x26, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x24, [x21, #0x0]\n"
- "ldr x23, [x21, #0x8]\n"
- "cbnz x26, 38f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x24, x24, x20\n"
- "add x23, x23, x20\n"
+ "ldr x20, [%x[input_ptr], x25, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x23, [x20, #0x0]\n"
+ "ldr x22, [x20, #0x8]\n"
+ "cbnz x25, 38f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x23, x23, x19\n"
+ "add x22, x22, x19\n"
"b 38f\n"
"37:" // Height 2: setup direct input
- "mov x24, %x[input_ptr]\n"
- "add x23, x24, x20\n"
+ "mov x23, %x[input_ptr]\n"
+ "add x22, x23, x19\n"
"38:" // Height 2: input setup done
- "cmp x25, #0x10\n"
+ "cmp x24, #0x10\n"
"blt 43f\n"
- "ldr q1, [x24, #0x0]\n"
- "ldr q2, [x23, #0x0]\n"
- "cmp x25, #0x20\n"
+ "ldr q1, [x23, #0x0]\n"
+ "ldr q2, [x22, #0x0]\n"
+ "cmp x24, #0x20\n"
+ "blt 41f\n"
+ "39:" // Height 2: Multiply loop: Main loop head
+ "trn1 v0.2d, v1.2d, v2.2d\n"
"ldr q5, [x28, #0x0]\n"
+ "add x23, x23, #0x10\n"
+ "trn2 v1.2d, v1.2d, v2.2d\n"
"ldr q6, [x28, #0x10]\n"
+ "add x22, x22, #0x10\n"
+ ".inst 0x4e85a410 // smmla v16.4s, v0.16b, v5.16b\n"
"ldr q7, [x28, #0x20]\n"
"ldr q8, [x28, #0x30]\n"
+ ".inst 0x4e86a414 // smmla v20.4s, v0.16b, v6.16b\n"
"ldr q9, [x28, #0x40]\n"
+ ".inst 0x4e87a411 // smmla v17.4s, v0.16b, v7.16b\n"
"ldr q10, [x28, #0x50]\n"
+ ".inst 0x4e88a415 // smmla v21.4s, v0.16b, v8.16b\n"
"ldr q4, [x28, #0x60]\n"
- "blt 41f\n"
- "39:" // Height 2: Multiply loop: Main loop head
- "trn1 v0.2d, v1.2d, v2.2d\n"
- ".inst 0x4e85a410 // smmla v16.4s, v0.16b, v5.16b\n"
+ ".inst 0x4e89a412 // smmla v18.4s, v0.16b, v9.16b\n"
"ldr q5, [x28, #0x70]\n"
- ".inst 0x4e86a414 // smmla v20.4s, v0.16b, v6.16b\n"
"ldr q6, [x28, #0x80]\n"
- "trn2 v1.2d, v1.2d, v2.2d\n"
- ".inst 0x4e87a411 // smmla v17.4s, v0.16b, v7.16b\n"
+ ".inst 0x4e8aa416 // smmla v22.4s, v0.16b, v10.16b\n"
"ldr q7, [x28, #0x90]\n"
- ".inst 0x4e88a415 // smmla v21.4s, v0.16b, v8.16b\n"
"ldr q8, [x28, #0xa0]\n"
- ".inst 0x4e89a412 // smmla v18.4s, v0.16b, v9.16b\n"
- "ldr q9, [x28, #0xb0]\n"
- ".inst 0x4e8aa416 // smmla v22.4s, v0.16b, v10.16b\n"
- "ldr q10, [x28, #0xc0]\n"
".inst 0x4e84a413 // smmla v19.4s, v0.16b, v4.16b\n"
- "ldr q4, [x28, #0xd0]\n"
+ "ldr q9, [x28, #0xb0]\n"
".inst 0x4e85a417 // smmla v23.4s, v0.16b, v5.16b\n"
- "ldr q5, [x28, #0xe0]\n"
".inst 0x4e86a430 // smmla v16.4s, v1.16b, v6.16b\n"
- "ldr q6, [x28, #0xf0]\n"
+ "ldr q10, [x28, #0xc0]\n"
+ "ldr q4, [x28, #0xd0]\n"
".inst 0x4e87a434 // smmla v20.4s, v1.16b, v7.16b\n"
- "add x24, x24, #0x10\n"
".inst 0x4e88a431 // smmla v17.4s, v1.16b, v8.16b\n"
- "add x23, x23, #0x10\n"
+ "ldr q5, [x28, #0xe0]\n"
".inst 0x4e89a435 // smmla v21.4s, v1.16b, v9.16b\n"
+ "ldr q6, [x28, #0xf0]\n"
"add x28, x28, #0x100\n"
".inst 0x4e8aa432 // smmla v18.4s, v1.16b, v10.16b\n"
".inst 0x4e84a436 // smmla v22.4s, v1.16b, v4.16b\n"
@@ -505,49 +507,49 @@ void a64_hybrid_s8qa_mmla_4x16 (
".inst 0x4e8f940b // sdot v11.4s, v0.16b, v15.16b\n"
".inst 0x4e8f942b // sdot v11.4s, v1.16b, v15.16b\n"
"40:" // Height 2: Multiply loop: unique 5: skip row sum
- "ldr q1, [x24, #0x0]\n"
- "ldr q2, [x23, #0x0]\n"
- "sub x25, x25, #0x10\n"
- "cmp x25, #0x20\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
+ "sub x24, x24, #0x10\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
+ "cmp x24, #0x20\n"
+ "ldr q1, [x23, #0x0]\n"
+ "ldr q2, [x22, #0x0]\n"
+ "bge 39b\n"
+ "41:" // Height 2: Multiply loop: Single iteration only
+ "trn1 v0.2d, v1.2d, v2.2d\n"
"ldr q5, [x28, #0x0]\n"
+ "sub x24, x24, #0x10\n"
+ "trn2 v1.2d, v1.2d, v2.2d\n"
"ldr q6, [x28, #0x10]\n"
+ "add x23, x23, #0x10\n"
+ ".inst 0x4e85a410 // smmla v16.4s, v0.16b, v5.16b\n"
"ldr q7, [x28, #0x20]\n"
+ "add x22, x22, #0x10\n"
+ ".inst 0x4e86a414 // smmla v20.4s, v0.16b, v6.16b\n"
"ldr q8, [x28, #0x30]\n"
"ldr q9, [x28, #0x40]\n"
+ ".inst 0x4e87a411 // smmla v17.4s, v0.16b, v7.16b\n"
"ldr q10, [x28, #0x50]\n"
"ldr q4, [x28, #0x60]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
- "bge 39b\n"
- "41:" // Height 2: Multiply loop: Single iteration only
- "trn1 v0.2d, v1.2d, v2.2d\n"
- ".inst 0x4e85a410 // smmla v16.4s, v0.16b, v5.16b\n"
+ ".inst 0x4e88a415 // smmla v21.4s, v0.16b, v8.16b\n"
+ ".inst 0x4e89a412 // smmla v18.4s, v0.16b, v9.16b\n"
"ldr q5, [x28, #0x70]\n"
- ".inst 0x4e86a414 // smmla v20.4s, v0.16b, v6.16b\n"
"ldr q6, [x28, #0x80]\n"
- "trn2 v1.2d, v1.2d, v2.2d\n"
- ".inst 0x4e87a411 // smmla v17.4s, v0.16b, v7.16b\n"
+ ".inst 0x4e8aa416 // smmla v22.4s, v0.16b, v10.16b\n"
"ldr q7, [x28, #0x90]\n"
- ".inst 0x4e88a415 // smmla v21.4s, v0.16b, v8.16b\n"
+ ".inst 0x4e84a413 // smmla v19.4s, v0.16b, v4.16b\n"
"ldr q8, [x28, #0xa0]\n"
- ".inst 0x4e89a412 // smmla v18.4s, v0.16b, v9.16b\n"
"ldr q9, [x28, #0xb0]\n"
- ".inst 0x4e8aa416 // smmla v22.4s, v0.16b, v10.16b\n"
- "ldr q10, [x28, #0xc0]\n"
- ".inst 0x4e84a413 // smmla v19.4s, v0.16b, v4.16b\n"
- "ldr q4, [x28, #0xd0]\n"
".inst 0x4e85a417 // smmla v23.4s, v0.16b, v5.16b\n"
- "ldr q5, [x28, #0xe0]\n"
".inst 0x4e86a430 // smmla v16.4s, v1.16b, v6.16b\n"
- "ldr q6, [x28, #0xf0]\n"
- "sub x25, x25, #0x10\n"
+ "ldr q10, [x28, #0xc0]\n"
+ "ldr q4, [x28, #0xd0]\n"
".inst 0x4e87a434 // smmla v20.4s, v1.16b, v7.16b\n"
+ "ldr q5, [x28, #0xe0]\n"
".inst 0x4e88a431 // smmla v17.4s, v1.16b, v8.16b\n"
- "add x24, x24, #0x10\n"
- "add x23, x23, #0x10\n"
".inst 0x4e89a435 // smmla v21.4s, v1.16b, v9.16b\n"
- ".inst 0x4e8aa432 // smmla v18.4s, v1.16b, v10.16b\n"
+ "ldr q6, [x28, #0xf0]\n"
"add x28, x28, #0x100\n"
+ ".inst 0x4e8aa432 // smmla v18.4s, v1.16b, v10.16b\n"
".inst 0x4e84a436 // smmla v22.4s, v1.16b, v4.16b\n"
".inst 0x4e85a433 // smmla v19.4s, v1.16b, v5.16b\n"
".inst 0x4e86a437 // smmla v23.4s, v1.16b, v6.16b\n"
@@ -555,136 +557,136 @@ void a64_hybrid_s8qa_mmla_4x16 (
".inst 0x4e8f940b // sdot v11.4s, v0.16b, v15.16b\n"
".inst 0x4e8f942b // sdot v11.4s, v1.16b, v15.16b\n"
"42:" // Height 2: Multiply loop: unique 6: skip row sum
- "prfm pldl1keep, [x24, #0x80]\n"
"prfm pldl1keep, [x23, #0x80]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
"43:" // Height 2: Multiply loop: Main loop skip
- "cbz x25, 52f\n"
- "cmp x25, #0x8\n"
+ "cbz x24, 52f\n"
+ "cmp x24, #0x8\n"
"blt 46f\n"
"44:" // Height 2: Multiply loop: Odd block loop
- "ldr d1, [x24], #0x8\n"
- "ldr d2, [x23], #0x8\n"
+ "ldr d1, [x23], #0x8\n"
+ "ldr d2, [x22], #0x8\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
"tbnz %x[flags], #31, 45f\n"
".inst 0x4e8f940b // sdot v11.4s, v0.16b, v15.16b\n"
"45:" // Height 2: Multiply loop: unique 7: skip row sum
"ldr q8, [x28, #0x0]\n"
- "ldr q9, [x28, #0x10]\n"
".inst 0x4e88a410 // smmla v16.4s, v0.16b, v8.16b\n"
- "sub x25, x25, #0x8\n"
+ "ldr q9, [x28, #0x10]\n"
+ "sub x24, x24, #0x8\n"
+ ".inst 0x4e89a414 // smmla v20.4s, v0.16b, v9.16b\n"
"ldr q10, [x28, #0x20]\n"
+ "cmp x24, #0x8\n"
+ ".inst 0x4e8aa411 // smmla v17.4s, v0.16b, v10.16b\n"
"ldr q4, [x28, #0x30]\n"
- "cmp x25, #0x8\n"
- ".inst 0x4e89a414 // smmla v20.4s, v0.16b, v9.16b\n"
"ldr q5, [x28, #0x40]\n"
- "ldr q6, [x28, #0x50]\n"
- ".inst 0x4e8aa411 // smmla v17.4s, v0.16b, v10.16b\n"
".inst 0x4e84a415 // smmla v21.4s, v0.16b, v4.16b\n"
+ "ldr q6, [x28, #0x50]\n"
+ ".inst 0x4e85a412 // smmla v18.4s, v0.16b, v5.16b\n"
"ldr q7, [x28, #0x60]\n"
"ldr q8, [x28, #0x70]\n"
- ".inst 0x4e85a412 // smmla v18.4s, v0.16b, v5.16b\n"
".inst 0x4e86a416 // smmla v22.4s, v0.16b, v6.16b\n"
+ "add x28, x28, #0x80\n"
".inst 0x4e87a413 // smmla v19.4s, v0.16b, v7.16b\n"
".inst 0x4e88a417 // smmla v23.4s, v0.16b, v8.16b\n"
- "add x28, x28, #0x80\n"
"bge 44b\n"
+ "cbz x24, 52f\n"
"46:" // Height 2: Multiply loop: Skip odd blocks
- "cbz x25, 52f\n"
- "tbz x25, #2, 48f\n"
- "ldr s1, [x24], #0x4\n"
- "ldr s2, [x23], #0x4\n"
- "tbz x25, #1, 47f\n"
- "ld1 { v1.h }[2], [x24], #0x2\n"
- "ld1 { v2.h }[2], [x23], #0x2\n"
- "tbz x25, #0, 50f\n"
- "ld1 { v1.b }[6], [x24]\n"
- "ld1 { v2.b }[6], [x23]\n"
+ "tbz x24, #2, 48f\n"
+ "ldr s1, [x23], #0x4\n"
+ "ldr s2, [x22], #0x4\n"
+ "tbz x24, #1, 47f\n"
+ "ld1 { v1.h }[2], [x23], #0x2\n"
+ "ld1 { v2.h }[2], [x22], #0x2\n"
+ "tbz x24, #0, 50f\n"
+ "ld1 { v1.b }[6], [x23]\n"
+ "ld1 { v2.b }[6], [x22]\n"
"b 50f\n"
"47:" // Height 2: Multiply loop: Ragged operand read: partial_1_4
- "tbz x25, #0, 50f\n"
- "ld1 { v1.b }[4], [x24]\n"
- "ld1 { v2.b }[4], [x23]\n"
+ "tbz x24, #0, 50f\n"
+ "ld1 { v1.b }[4], [x23]\n"
+ "ld1 { v2.b }[4], [x22]\n"
"b 50f\n"
"48:" // Height 2: Multiply loop: Ragged operand read: partial_2_0
- "tbz x25, #1, 49f\n"
- "ldr h1, [x24], #0x2\n"
- "ldr h2, [x23], #0x2\n"
- "tbz x25, #0, 50f\n"
- "ld1 { v1.b }[2], [x24]\n"
- "ld1 { v2.b }[2], [x23]\n"
+ "tbz x24, #1, 49f\n"
+ "ldr h1, [x23], #0x2\n"
+ "ldr h2, [x22], #0x2\n"
+ "tbz x24, #0, 50f\n"
+ "ld1 { v1.b }[2], [x23]\n"
+ "ld1 { v2.b }[2], [x22]\n"
"b 50f\n"
"49:" // Height 2: Multiply loop: Ragged operand read: partial_1_0
- "ldr b1, [x24, #0x0]\n"
- "ldr b2, [x23, #0x0]\n"
+ "ldr b1, [x23, #0x0]\n"
+ "ldr b2, [x22, #0x0]\n"
"50:" // Height 2: Multiply loop: Ragged operand read: Done
"trn1 v0.2d, v1.2d, v2.2d\n"
"tbnz %x[flags], #31, 51f\n"
".inst 0x4e8f940b // sdot v11.4s, v0.16b, v15.16b\n"
"51:" // Height 2: Multiply loop: unique 8: skip row sum
"ldr q10, [x28, #0x0]\n"
- "ldr q4, [x28, #0x10]\n"
".inst 0x4e8aa410 // smmla v16.4s, v0.16b, v10.16b\n"
- ".inst 0x4e84a414 // smmla v20.4s, v0.16b, v4.16b\n"
+ "ldr q4, [x28, #0x10]\n"
"ldr q5, [x28, #0x20]\n"
+ ".inst 0x4e84a414 // smmla v20.4s, v0.16b, v4.16b\n"
"ldr q6, [x28, #0x30]\n"
".inst 0x4e85a411 // smmla v17.4s, v0.16b, v5.16b\n"
- ".inst 0x4e86a415 // smmla v21.4s, v0.16b, v6.16b\n"
"ldr q7, [x28, #0x40]\n"
"ldr q8, [x28, #0x50]\n"
- ".inst 0x4e87a412 // smmla v18.4s, v0.16b, v7.16b\n"
- ".inst 0x4e88a416 // smmla v22.4s, v0.16b, v8.16b\n"
+ ".inst 0x4e86a415 // smmla v21.4s, v0.16b, v6.16b\n"
"ldr q9, [x28, #0x60]\n"
"ldr q10, [x28, #0x70]\n"
+ ".inst 0x4e87a412 // smmla v18.4s, v0.16b, v7.16b\n"
+ "add x28, x28, #0x80\n"
+ ".inst 0x4e88a416 // smmla v22.4s, v0.16b, v8.16b\n"
".inst 0x4e89a413 // smmla v19.4s, v0.16b, v9.16b\n"
".inst 0x4e8aa417 // smmla v23.4s, v0.16b, v10.16b\n"
- "add x28, x28, #0x80\n"
"52:" // Height 2: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x26, x26, #0x1\n"
- "cmp x26, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x25, x25, #0x1\n"
+ "cmp x25, x19\n"
"bne 36b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp1 v4.2d, v16.2d, v20.2d\n"
- "add x22, x27, x20\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp2 v16.2d, v16.2d, v20.2d\n"
+ "prfm pstl1keep, [x26, #0x0]\n"
+ "add x21, x26, x19\n"
"uzp1 v20.2d, v17.2d, v21.2d\n"
+ "prfm pstl1keep, [x21, #0x0]\n"
"uzp2 v17.2d, v17.2d, v21.2d\n"
- "prfm pstl1keep, [x27, #0x0]\n"
- "prfm pstl1keep, [x22, #0x0]\n"
"uzp1 v21.2d, v18.2d, v22.2d\n"
"uzp2 v18.2d, v18.2d, v22.2d\n"
"uzp1 v22.2d, v19.2d, v23.2d\n"
"uzp2 v19.2d, v19.2d, v23.2d\n"
"mov v23.16b, v4.16b\n"
"tbnz %x[flags], #31, 53f\n"
- "add x23, %x[qp], %[b_offset]\n"
- "ld1r { v2.4s }, [x23]\n"
"addp v11.4s, v11.4s, v11.4s\n"
- "neg v2.4s, v2.4s\n"
+ "add x22, %x[qp], %[b_offset]\n"
+ "ld1r { v2.4s }, [x22]\n"
"dup v12.4s, v11.s[3]\n"
"dup v11.4s, v11.s[0]\n"
+ "neg v2.4s, v2.4s\n"
"mul v11.4s, v11.4s, v2.4s\n"
"mul v12.4s, v12.4s, v2.4s\n"
"53:" // Height 2: skip row sum fixup
- "ldr q0, [x10, #0x0]\n"
- "ldr q1, [x10, #0x10]\n"
"add v23.4s, v23.4s, v11.4s\n"
+ "ldr q0, [x27, #0x0]\n"
+ "orr %x[flags], %x[flags], #0x80000000\n"
"add v20.4s, v20.4s, v11.4s\n"
- "ldr q2, [x10, #0x20]\n"
- "ldr q3, [x10, #0x30]\n"
+ "ldr q1, [x27, #0x10]\n"
+ "add x23, %x[qp], %[per_layer_right_shift]\n"
"add v21.4s, v21.4s, v11.4s\n"
+ "ldr q2, [x27, #0x20]\n"
+ "add x22, %x[qp], %[per_layer_mul]\n"
"add v22.4s, v22.4s, v11.4s\n"
+ "ldr q3, [x27, #0x30]\n"
+ "add x27, x27, #0x40\n"
"add v16.4s, v16.4s, v12.4s\n"
+ "ld1r { v4.4s }, [x22]\n"
"add v17.4s, v17.4s, v12.4s\n"
- "add x23, %x[qp], %[per_layer_mul]\n"
- "ld1r { v4.4s }, [x23]\n"
"add v18.4s, v18.4s, v12.4s\n"
"add v19.4s, v19.4s, v12.4s\n"
- "orr %x[flags], %x[flags], #0x80000000\n"
- "add x23, %x[qp], %[per_layer_right_shift]\n"
"add v23.4s, v23.4s, v0.4s\n"
"add v20.4s, v20.4s, v1.4s\n"
- "add x10, x10, #0x40\n"
"add v21.4s, v21.4s, v2.4s\n"
"add v22.4s, v22.4s, v3.4s\n"
"add v16.4s, v16.4s, v0.4s\n"
@@ -702,154 +704,154 @@ void a64_hybrid_s8qa_mmla_4x16 (
"sqrdmulh v19.4s, v19.4s, v4.4s\n"
"tbz %x[flags], #5, 54f\n"
"and v4.16b, v23.16b, v0.16b\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
- "sqadd v23.4s, v23.4s, v4.4s\n"
"and v5.16b, v20.16b, v0.16b\n"
"and v6.16b, v21.16b, v0.16b\n"
+ "sshr v4.4s, v4.4s, #0x1f\n"
+ "sshr v5.4s, v5.4s, #0x1f\n"
+ "sshr v6.4s, v6.4s, #0x1f\n"
+ "sqadd v23.4s, v23.4s, v4.4s\n"
+ "sqadd v20.4s, v20.4s, v5.4s\n"
+ "sqadd v21.4s, v21.4s, v6.4s\n"
"and v7.16b, v22.16b, v0.16b\n"
"and v8.16b, v16.16b, v0.16b\n"
"and v9.16b, v17.16b, v0.16b\n"
- "and v10.16b, v18.16b, v0.16b\n"
- "and v4.16b, v19.16b, v0.16b\n"
- "sshr v5.4s, v5.4s, #0x1f\n"
- "sshr v6.4s, v6.4s, #0x1f\n"
"sshr v7.4s, v7.4s, #0x1f\n"
"sshr v8.4s, v8.4s, #0x1f\n"
"sshr v9.4s, v9.4s, #0x1f\n"
- "sshr v10.4s, v10.4s, #0x1f\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
- "sqadd v20.4s, v20.4s, v5.4s\n"
- "sqadd v21.4s, v21.4s, v6.4s\n"
"sqadd v22.4s, v22.4s, v7.4s\n"
"sqadd v16.4s, v16.4s, v8.4s\n"
"sqadd v17.4s, v17.4s, v9.4s\n"
+ "and v10.16b, v18.16b, v0.16b\n"
+ "and v4.16b, v19.16b, v0.16b\n"
+ "sshr v10.4s, v10.4s, #0x1f\n"
+ "sshr v4.4s, v4.4s, #0x1f\n"
"sqadd v18.4s, v18.4s, v10.4s\n"
"sqadd v19.4s, v19.4s, v4.4s\n"
"54:" // Height 2: no shift correction
- "add x23, %x[qp], %[c_offset]\n"
- "ld1r { v4.4s }, [x23]\n"
"srshl v23.4s, v23.4s, v0.4s\n"
+ "add x22, %x[qp], %[c_offset]\n"
+ "ld1r { v4.4s }, [x22]\n"
"srshl v20.4s, v20.4s, v0.4s\n"
+ "add x22, %x[qp], %[minval]\n"
"srshl v21.4s, v21.4s, v0.4s\n"
+ "ld1r { v5.4s }, [x22]\n"
+ "add x22, %x[qp], %[maxval]\n"
"srshl v22.4s, v22.4s, v0.4s\n"
- "add x23, %x[qp], %[maxval]\n"
- "ld1r { v6.4s }, [x23]\n"
+ "ld1r { v6.4s }, [x22]\n"
+ "cmp x9, #0x10\n"
"srshl v16.4s, v16.4s, v0.4s\n"
"srshl v17.4s, v17.4s, v0.4s\n"
- "add x23, %x[qp], %[minval]\n"
- "ld1r { v5.4s }, [x23]\n"
- "srshl v18.4s, v18.4s, v0.4s\n"
- "srshl v19.4s, v19.4s, v0.4s\n"
- "cmp x9, #0x10\n"
"add v23.4s, v23.4s, v4.4s\n"
"add v20.4s, v20.4s, v4.4s\n"
"add v21.4s, v21.4s, v4.4s\n"
- "add v22.4s, v22.4s, v4.4s\n"
- "add v16.4s, v16.4s, v4.4s\n"
- "add v17.4s, v17.4s, v4.4s\n"
- "add v18.4s, v18.4s, v4.4s\n"
- "add v19.4s, v19.4s, v4.4s\n"
"smin v23.4s, v23.4s, v6.4s\n"
"smin v20.4s, v20.4s, v6.4s\n"
"smin v21.4s, v21.4s, v6.4s\n"
- "smin v22.4s, v22.4s, v6.4s\n"
- "smin v16.4s, v16.4s, v6.4s\n"
- "smin v17.4s, v17.4s, v6.4s\n"
- "smin v18.4s, v18.4s, v6.4s\n"
- "smin v19.4s, v19.4s, v6.4s\n"
"smax v23.4s, v23.4s, v5.4s\n"
"smax v20.4s, v20.4s, v5.4s\n"
"smax v21.4s, v21.4s, v5.4s\n"
+ "add v22.4s, v22.4s, v4.4s\n"
+ "add v16.4s, v16.4s, v4.4s\n"
+ "add v17.4s, v17.4s, v4.4s\n"
+ "smin v22.4s, v22.4s, v6.4s\n"
+ "smin v16.4s, v16.4s, v6.4s\n"
+ "smin v17.4s, v17.4s, v6.4s\n"
"smax v22.4s, v22.4s, v5.4s\n"
"smax v16.4s, v16.4s, v5.4s\n"
"smax v17.4s, v17.4s, v5.4s\n"
- "smax v18.4s, v18.4s, v5.4s\n"
- "smax v19.4s, v19.4s, v5.4s\n"
+ "srshl v18.4s, v18.4s, v0.4s\n"
+ "srshl v19.4s, v19.4s, v0.4s\n"
"uzp1 v23.8h, v23.8h, v20.8h\n"
"uzp1 v20.8h, v21.8h, v22.8h\n"
+ "add v18.4s, v18.4s, v4.4s\n"
+ "add v19.4s, v19.4s, v4.4s\n"
"uzp1 v16.8h, v16.8h, v17.8h\n"
- "uzp1 v17.8h, v18.8h, v19.8h\n"
+ "smin v18.4s, v18.4s, v6.4s\n"
+ "smin v19.4s, v19.4s, v6.4s\n"
"uzp1 v23.16b, v23.16b, v20.16b\n"
+ "smax v18.4s, v18.4s, v5.4s\n"
+ "smax v19.4s, v19.4s, v5.4s\n"
+ "uzp1 v17.8h, v18.8h, v19.8h\n"
"uzp1 v16.16b, v16.16b, v17.16b\n"
"bge 63f\n"
"tbz x9, #3, 58f\n"
- "str d23, [x27], #0x8\n"
- "str d16, [x22], #0x8\n"
+ "str d23, [x26], #0x8\n"
+ "str d16, [x21], #0x8\n"
"tbz x9, #2, 56f\n"
- "st1 { v23.s }[2], [x27], #0x4\n"
- "st1 { v16.s }[2], [x22], #0x4\n"
+ "st1 { v23.s }[2], [x26], #0x4\n"
+ "st1 { v16.s }[2], [x21], #0x4\n"
"tbz x9, #1, 55f\n"
- "st1 { v23.h }[6], [x27], #0x2\n"
- "st1 { v16.h }[6], [x22], #0x2\n"
+ "st1 { v23.h }[6], [x26], #0x2\n"
+ "st1 { v16.h }[6], [x21], #0x2\n"
"tbz x9, #0, 62f\n"
- "st1 { v23.b }[14], [x27]\n"
- "st1 { v16.b }[14], [x22]\n"
+ "st1 { v23.b }[14], [x26]\n"
+ "st1 { v16.b }[14], [x21]\n"
"b 62f\n"
"55:" // Height 2: Partial direct writeback: partial_1_12
"tbz x9, #0, 62f\n"
- "st1 { v23.b }[12], [x27]\n"
- "st1 { v16.b }[12], [x22]\n"
+ "st1 { v23.b }[12], [x26]\n"
+ "st1 { v16.b }[12], [x21]\n"
"b 62f\n"
"56:" // Height 2: Partial direct writeback: partial_2_8
"tbz x9, #1, 57f\n"
- "st1 { v23.h }[4], [x27], #0x2\n"
- "st1 { v16.h }[4], [x22], #0x2\n"
+ "st1 { v23.h }[4], [x26], #0x2\n"
+ "st1 { v16.h }[4], [x21], #0x2\n"
"tbz x9, #0, 62f\n"
- "st1 { v23.b }[10], [x27]\n"
- "st1 { v16.b }[10], [x22]\n"
+ "st1 { v23.b }[10], [x26]\n"
+ "st1 { v16.b }[10], [x21]\n"
"b 62f\n"
"57:" // Height 2: Partial direct writeback: partial_1_8
"tbz x9, #0, 62f\n"
- "st1 { v23.b }[8], [x27]\n"
- "st1 { v16.b }[8], [x22]\n"
+ "st1 { v23.b }[8], [x26]\n"
+ "st1 { v16.b }[8], [x21]\n"
"b 62f\n"
"58:" // Height 2: Partial direct writeback: partial_4_0
"tbz x9, #2, 60f\n"
- "str s23, [x27], #0x4\n"
- "str s16, [x22], #0x4\n"
+ "str s23, [x26], #0x4\n"
+ "str s16, [x21], #0x4\n"
"tbz x9, #1, 59f\n"
- "st1 { v23.h }[2], [x27], #0x2\n"
- "st1 { v16.h }[2], [x22], #0x2\n"
+ "st1 { v23.h }[2], [x26], #0x2\n"
+ "st1 { v16.h }[2], [x21], #0x2\n"
"tbz x9, #0, 62f\n"
- "st1 { v23.b }[6], [x27]\n"
- "st1 { v16.b }[6], [x22]\n"
+ "st1 { v23.b }[6], [x26]\n"
+ "st1 { v16.b }[6], [x21]\n"
"b 62f\n"
"59:" // Height 2: Partial direct writeback: partial_1_4
"tbz x9, #0, 62f\n"
- "st1 { v23.b }[4], [x27]\n"
- "st1 { v16.b }[4], [x22]\n"
+ "st1 { v23.b }[4], [x26]\n"
+ "st1 { v16.b }[4], [x21]\n"
"b 62f\n"
"60:" // Height 2: Partial direct writeback: partial_2_0
"tbz x9, #1, 61f\n"
- "str h23, [x27], #0x2\n"
- "str h16, [x22], #0x2\n"
+ "str h23, [x26], #0x2\n"
+ "str h16, [x21], #0x2\n"
"tbz x9, #0, 62f\n"
- "st1 { v23.b }[2], [x27]\n"
- "st1 { v16.b }[2], [x22]\n"
+ "st1 { v23.b }[2], [x26]\n"
+ "st1 { v16.b }[2], [x21]\n"
"b 62f\n"
"61:" // Height 2: Partial direct writeback: partial_1_0
- "str b23, [x27, #0x0]\n"
- "str b16, [x22, #0x0]\n"
+ "str b23, [x26, #0x0]\n"
+ "str b16, [x21, #0x0]\n"
"62:" // Height 2: Partial direct writeback: Done
"b 64f\n"
"63:" // Height 2: Full writeback
- "str q23, [x27, #0x0]\n"
- "add x27, x27, #0x10\n"
- "str q16, [x22, #0x0]\n"
+ "str q23, [x26, #0x0]\n"
+ "add x26, x26, #0x10\n"
+ "str q16, [x21, #0x0]\n"
"64:" // Height 2: Writeback done
"subs x9, x9, #0x10\n"
"bgt 34b\n"
"b 130f\n"
"65:" // Height 3
- "mov x10, %x[col_bias]\n"
"movi v11.4s, #0x0\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x27, %x[col_bias]\n"
"movi v12.4s, #0x0\n"
+ "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"bic %x[flags], %x[flags], #0x80000000\n"
"movi v13.4s, #0x0\n"
+ "mov x26, %x[output_ptr]\n"
"movi v15.16b, #0x1\n"
- "ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x27, %x[output_ptr]\n"
"66:" // Height 3: Column loop
"movi v16.4s, #0x0\n"
"movi v17.4s, #0x0\n"
@@ -868,65 +870,65 @@ void a64_hybrid_s8qa_mmla_4x16 (
"movi v30.4s, #0x0\n"
"movi v31.4s, #0x0\n"
"67:" // Height 3: setup done
- "mov x26, #0x0\n"
+ "mov x25, #0x0\n"
"68:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w24, [x20, x25, LSL #0x2]\n"
"tbz %x[flags], #3, 69f\n"
- "ldr x21, [%x[input_ptr], x26, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x24, [x21, #0x0]\n"
- "ldr x23, [x21, #0x8]\n"
- "ldr x22, [x21, #0x10]\n"
- "cbnz x26, 70f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x24, x24, x20\n"
- "add x23, x23, x20\n"
- "add x22, x22, x20\n"
+ "ldr x20, [%x[input_ptr], x25, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x23, [x20, #0x0]\n"
+ "ldr x22, [x20, #0x8]\n"
+ "ldr x21, [x20, #0x10]\n"
+ "cbnz x25, 70f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x23, x23, x19\n"
+ "add x22, x22, x19\n"
+ "add x21, x21, x19\n"
"b 70f\n"
"69:" // Height 3: setup direct input
- "mov x24, %x[input_ptr]\n"
- "add x23, x24, x20\n"
- "add x22, x23, x20\n"
+ "mov x23, %x[input_ptr]\n"
+ "add x22, x23, x19\n"
+ "add x21, x22, x19\n"
"70:" // Height 3: input setup done
- "cmp x25, #0x10\n"
+ "cmp x24, #0x10\n"
"blt 75f\n"
- "ldr q1, [x24, #0x0]\n"
- "ldr q2, [x23, #0x0]\n"
- "cmp x25, #0x20\n"
- "ldr q3, [x22, #0x0]\n"
- "ldr q5, [x28, #0x0]\n"
- "ldr q6, [x28, #0x10]\n"
- "ldr q7, [x28, #0x20]\n"
- "ldr q8, [x28, #0x30]\n"
- "ldr q9, [x28, #0x40]\n"
- "ldr q10, [x28, #0x50]\n"
+ "ldr q1, [x23, #0x0]\n"
+ "ldr q2, [x22, #0x0]\n"
+ "cmp x24, #0x20\n"
"blt 73f\n"
"71:" // Height 3: Multiply loop: Main loop head
"trn1 v0.2d, v1.2d, v2.2d\n"
+ "ldr q3, [x21, #0x0]\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
- ".inst 0x4e85a410 // smmla v16.4s, v0.16b, v5.16b\n"
+ "ldr q5, [x28, #0x0]\n"
+ "add x23, x23, #0x10\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
+ "ldr q6, [x28, #0x10]\n"
+ "add x22, x22, #0x10\n"
+ "trn2 v3.2d, v3.2d, v4.2d\n"
+ "ldr q7, [x28, #0x20]\n"
+ "add x21, x21, #0x10\n"
+ ".inst 0x4e85a410 // smmla v16.4s, v0.16b, v5.16b\n"
+ "ldr q8, [x28, #0x30]\n"
".inst 0x4e85a458 // smmla v24.4s, v2.16b, v5.16b\n"
- "ldr q5, [x28, #0x70]\n"
+ "ldr q9, [x28, #0x40]\n"
+ "ldr q10, [x28, #0x50]\n"
".inst 0x4e86a414 // smmla v20.4s, v0.16b, v6.16b\n"
- "trn2 v3.2d, v3.2d, v4.2d\n"
- "ldr q4, [x28, #0x60]\n"
".inst 0x4e86a45c // smmla v28.4s, v2.16b, v6.16b\n"
- "ldr q6, [x28, #0x80]\n"
+ "ldr q4, [x28, #0x60]\n"
".inst 0x4e87a411 // smmla v17.4s, v0.16b, v7.16b\n"
+ "ldr q5, [x28, #0x70]\n"
".inst 0x4e87a459 // smmla v25.4s, v2.16b, v7.16b\n"
- "ldr q7, [x28, #0x90]\n"
- "add x24, x24, #0x10\n"
+ "ldr q6, [x28, #0x80]\n"
".inst 0x4e88a415 // smmla v21.4s, v0.16b, v8.16b\n"
+ "ldr q7, [x28, #0x90]\n"
".inst 0x4e88a45d // smmla v29.4s, v2.16b, v8.16b\n"
"ldr q8, [x28, #0xa0]\n"
- "add x23, x23, #0x10\n"
".inst 0x4e89a412 // smmla v18.4s, v0.16b, v9.16b\n"
".inst 0x4e89a45a // smmla v26.4s, v2.16b, v9.16b\n"
"ldr q9, [x28, #0xb0]\n"
- "add x22, x22, #0x10\n"
".inst 0x4e8aa416 // smmla v22.4s, v0.16b, v10.16b\n"
".inst 0x4e8aa45e // smmla v30.4s, v2.16b, v10.16b\n"
"ldr q10, [x28, #0xc0]\n"
@@ -960,49 +962,49 @@ void a64_hybrid_s8qa_mmla_4x16 (
".inst 0x4e8f942b // sdot v11.4s, v1.16b, v15.16b\n"
".inst 0x4e8f946d // sdot v13.4s, v3.16b, v15.16b\n"
"72:" // Height 3: Multiply loop: unique 9: skip row sum
- "ldr q1, [x24, #0x0]\n"
- "ldr q2, [x23, #0x0]\n"
- "sub x25, x25, #0x10\n"
- "cmp x25, #0x20\n"
- "ldr q3, [x22, #0x0]\n"
- "ldr q5, [x28, #0x0]\n"
- "ldr q6, [x28, #0x10]\n"
- "ldr q7, [x28, #0x20]\n"
- "ldr q8, [x28, #0x30]\n"
- "ldr q9, [x28, #0x40]\n"
- "ldr q10, [x28, #0x50]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
"prfm pldl1keep, [x23, #0x80]\n"
+ "sub x24, x24, #0x10\n"
"prfm pldl1keep, [x22, #0x80]\n"
+ "cmp x24, #0x20\n"
+ "prfm pldl1keep, [x21, #0x80]\n"
+ "ldr q1, [x23, #0x0]\n"
+ "ldr q2, [x22, #0x0]\n"
"bge 71b\n"
"73:" // Height 3: Multiply loop: Single iteration only
"trn1 v0.2d, v1.2d, v2.2d\n"
+ "ldr q3, [x21, #0x0]\n"
+ "sub x24, x24, #0x10\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
- ".inst 0x4e85a410 // smmla v16.4s, v0.16b, v5.16b\n"
+ "ldr q5, [x28, #0x0]\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
+ "ldr q6, [x28, #0x10]\n"
+ "add x23, x23, #0x10\n"
+ "trn2 v3.2d, v3.2d, v4.2d\n"
+ "ldr q7, [x28, #0x20]\n"
+ "add x22, x22, #0x10\n"
+ ".inst 0x4e85a410 // smmla v16.4s, v0.16b, v5.16b\n"
+ "ldr q8, [x28, #0x30]\n"
+ "add x21, x21, #0x10\n"
".inst 0x4e85a458 // smmla v24.4s, v2.16b, v5.16b\n"
- "ldr q5, [x28, #0x70]\n"
+ "ldr q9, [x28, #0x40]\n"
+ "ldr q10, [x28, #0x50]\n"
".inst 0x4e86a414 // smmla v20.4s, v0.16b, v6.16b\n"
- "trn2 v3.2d, v3.2d, v4.2d\n"
- "ldr q4, [x28, #0x60]\n"
".inst 0x4e86a45c // smmla v28.4s, v2.16b, v6.16b\n"
- "ldr q6, [x28, #0x80]\n"
+ "ldr q4, [x28, #0x60]\n"
".inst 0x4e87a411 // smmla v17.4s, v0.16b, v7.16b\n"
+ "ldr q5, [x28, #0x70]\n"
".inst 0x4e87a459 // smmla v25.4s, v2.16b, v7.16b\n"
- "ldr q7, [x28, #0x90]\n"
- "sub x25, x25, #0x10\n"
+ "ldr q6, [x28, #0x80]\n"
".inst 0x4e88a415 // smmla v21.4s, v0.16b, v8.16b\n"
+ "ldr q7, [x28, #0x90]\n"
".inst 0x4e88a45d // smmla v29.4s, v2.16b, v8.16b\n"
"ldr q8, [x28, #0xa0]\n"
- "add x24, x24, #0x10\n"
".inst 0x4e89a412 // smmla v18.4s, v0.16b, v9.16b\n"
".inst 0x4e89a45a // smmla v26.4s, v2.16b, v9.16b\n"
"ldr q9, [x28, #0xb0]\n"
- "add x23, x23, #0x10\n"
".inst 0x4e8aa416 // smmla v22.4s, v0.16b, v10.16b\n"
".inst 0x4e8aa45e // smmla v30.4s, v2.16b, v10.16b\n"
"ldr q10, [x28, #0xc0]\n"
- "add x22, x22, #0x10\n"
".inst 0x4e84a413 // smmla v19.4s, v0.16b, v4.16b\n"
".inst 0x4e84a45b // smmla v27.4s, v2.16b, v4.16b\n"
"ldr q4, [x28, #0xd0]\n"
@@ -1033,42 +1035,43 @@ void a64_hybrid_s8qa_mmla_4x16 (
".inst 0x4e8f942b // sdot v11.4s, v1.16b, v15.16b\n"
".inst 0x4e8f946d // sdot v13.4s, v3.16b, v15.16b\n"
"74:" // Height 3: Multiply loop: unique 10: skip row sum
- "prfm pldl1keep, [x24, #0x80]\n"
"prfm pldl1keep, [x23, #0x80]\n"
"prfm pldl1keep, [x22, #0x80]\n"
+ "prfm pldl1keep, [x21, #0x80]\n"
"75:" // Height 3: Multiply loop: Main loop skip
- "cbz x25, 84f\n"
- "cmp x25, #0x8\n"
+ "cbz x24, 84f\n"
+ "cmp x24, #0x8\n"
"blt 78f\n"
"76:" // Height 3: Multiply loop: Odd block loop
- "ldr d1, [x24], #0x8\n"
- "ldr d2, [x23], #0x8\n"
+ "movi v7.16b, #0x0\n"
+ "ldr d1, [x23], #0x8\n"
+ "ldr d2, [x22], #0x8\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
- "ldr d3, [x22], #0x8\n"
+ "ldr d3, [x21], #0x8\n"
"trn1 v2.2d, v3.2d, v7.2d\n"
"tbnz %x[flags], #31, 77f\n"
".inst 0x4e8f940b // sdot v11.4s, v0.16b, v15.16b\n"
".inst 0x4e8f944d // sdot v13.4s, v2.16b, v15.16b\n"
"77:" // Height 3: Multiply loop: unique 11: skip row sum
"ldr q8, [x28, #0x0]\n"
- "ldr q9, [x28, #0x10]\n"
".inst 0x4e88a410 // smmla v16.4s, v0.16b, v8.16b\n"
+ "ldr q9, [x28, #0x10]\n"
+ "sub x24, x24, #0x8\n"
".inst 0x4e88a458 // smmla v24.4s, v2.16b, v8.16b\n"
"ldr q10, [x28, #0x20]\n"
+ "cmp x24, #0x8\n"
+ ".inst 0x4e89a414 // smmla v20.4s, v0.16b, v9.16b\n"
"ldr q4, [x28, #0x30]\n"
- "sub x25, x25, #0x8\n"
- "cmp x25, #0x8\n"
+ ".inst 0x4e89a45c // smmla v28.4s, v2.16b, v9.16b\n"
"ldr q5, [x28, #0x40]\n"
+ ".inst 0x4e8aa411 // smmla v17.4s, v0.16b, v10.16b\n"
"ldr q6, [x28, #0x50]\n"
- ".inst 0x4e89a414 // smmla v20.4s, v0.16b, v9.16b\n"
- ".inst 0x4e89a45c // smmla v28.4s, v2.16b, v9.16b\n"
+ ".inst 0x4e8aa459 // smmla v25.4s, v2.16b, v10.16b\n"
"ldr q7, [x28, #0x60]\n"
"ldr q8, [x28, #0x70]\n"
- ".inst 0x4e8aa411 // smmla v17.4s, v0.16b, v10.16b\n"
- ".inst 0x4e8aa459 // smmla v25.4s, v2.16b, v10.16b\n"
".inst 0x4e84a415 // smmla v21.4s, v0.16b, v4.16b\n"
- ".inst 0x4e84a45d // smmla v29.4s, v2.16b, v4.16b\n"
"add x28, x28, #0x80\n"
+ ".inst 0x4e84a45d // smmla v29.4s, v2.16b, v4.16b\n"
".inst 0x4e85a412 // smmla v18.4s, v0.16b, v5.16b\n"
".inst 0x4e85a45a // smmla v26.4s, v2.16b, v5.16b\n"
".inst 0x4e86a416 // smmla v22.4s, v0.16b, v6.16b\n"
@@ -1078,42 +1081,43 @@ void a64_hybrid_s8qa_mmla_4x16 (
".inst 0x4e88a417 // smmla v23.4s, v0.16b, v8.16b\n"
".inst 0x4e88a45f // smmla v31.4s, v2.16b, v8.16b\n"
"bge 76b\n"
+ "cbz x24, 84f\n"
"78:" // Height 3: Multiply loop: Skip odd blocks
- "cbz x25, 84f\n"
- "tbz x25, #2, 80f\n"
- "ldr s1, [x24], #0x4\n"
- "ldr s2, [x23], #0x4\n"
- "ldr s3, [x22], #0x4\n"
- "tbz x25, #1, 79f\n"
- "ld1 { v1.h }[2], [x24], #0x2\n"
- "ld1 { v2.h }[2], [x23], #0x2\n"
- "ld1 { v3.h }[2], [x22], #0x2\n"
- "tbz x25, #0, 82f\n"
- "ld1 { v1.b }[6], [x24]\n"
- "ld1 { v2.b }[6], [x23]\n"
- "ld1 { v3.b }[6], [x22]\n"
+ "tbz x24, #2, 80f\n"
+ "ldr s1, [x23], #0x4\n"
+ "ldr s2, [x22], #0x4\n"
+ "ldr s3, [x21], #0x4\n"
+ "tbz x24, #1, 79f\n"
+ "ld1 { v1.h }[2], [x23], #0x2\n"
+ "ld1 { v2.h }[2], [x22], #0x2\n"
+ "ld1 { v3.h }[2], [x21], #0x2\n"
+ "tbz x24, #0, 82f\n"
+ "ld1 { v1.b }[6], [x23]\n"
+ "ld1 { v2.b }[6], [x22]\n"
+ "ld1 { v3.b }[6], [x21]\n"
"b 82f\n"
"79:" // Height 3: Multiply loop: Ragged operand read: partial_1_4
- "tbz x25, #0, 82f\n"
- "ld1 { v1.b }[4], [x24]\n"
- "ld1 { v2.b }[4], [x23]\n"
- "ld1 { v3.b }[4], [x22]\n"
+ "tbz x24, #0, 82f\n"
+ "ld1 { v1.b }[4], [x23]\n"
+ "ld1 { v2.b }[4], [x22]\n"
+ "ld1 { v3.b }[4], [x21]\n"
"b 82f\n"
"80:" // Height 3: Multiply loop: Ragged operand read: partial_2_0
- "tbz x25, #1, 81f\n"
- "ldr h1, [x24], #0x2\n"
- "ldr h2, [x23], #0x2\n"
- "ldr h3, [x22], #0x2\n"
- "tbz x25, #0, 82f\n"
- "ld1 { v1.b }[2], [x24]\n"
- "ld1 { v2.b }[2], [x23]\n"
- "ld1 { v3.b }[2], [x22]\n"
+ "tbz x24, #1, 81f\n"
+ "ldr h1, [x23], #0x2\n"
+ "ldr h2, [x22], #0x2\n"
+ "ldr h3, [x21], #0x2\n"
+ "tbz x24, #0, 82f\n"
+ "ld1 { v1.b }[2], [x23]\n"
+ "ld1 { v2.b }[2], [x22]\n"
+ "ld1 { v3.b }[2], [x21]\n"
"b 82f\n"
"81:" // Height 3: Multiply loop: Ragged operand read: partial_1_0
- "ldr b1, [x24, #0x0]\n"
- "ldr b2, [x23, #0x0]\n"
- "ldr b3, [x22, #0x0]\n"
+ "ldr b1, [x23, #0x0]\n"
+ "ldr b2, [x22, #0x0]\n"
+ "ldr b3, [x21, #0x0]\n"
"82:" // Height 3: Multiply loop: Ragged operand read: Done
+ "movi v9.16b, #0x0\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
"trn1 v2.2d, v3.2d, v9.2d\n"
"tbnz %x[flags], #31, 83f\n"
@@ -1121,24 +1125,24 @@ void a64_hybrid_s8qa_mmla_4x16 (
".inst 0x4e8f944d // sdot v13.4s, v2.16b, v15.16b\n"
"83:" // Height 3: Multiply loop: unique 12: skip row sum
"ldr q10, [x28, #0x0]\n"
- "ldr q4, [x28, #0x10]\n"
".inst 0x4e8aa410 // smmla v16.4s, v0.16b, v10.16b\n"
+ "ldr q4, [x28, #0x10]\n"
".inst 0x4e8aa458 // smmla v24.4s, v2.16b, v10.16b\n"
"ldr q5, [x28, #0x20]\n"
"ldr q6, [x28, #0x30]\n"
".inst 0x4e84a414 // smmla v20.4s, v0.16b, v4.16b\n"
- ".inst 0x4e84a45c // smmla v28.4s, v2.16b, v4.16b\n"
"ldr q7, [x28, #0x40]\n"
+ ".inst 0x4e84a45c // smmla v28.4s, v2.16b, v4.16b\n"
"ldr q8, [x28, #0x50]\n"
".inst 0x4e85a411 // smmla v17.4s, v0.16b, v5.16b\n"
- ".inst 0x4e85a459 // smmla v25.4s, v2.16b, v5.16b\n"
"ldr q9, [x28, #0x60]\n"
+ ".inst 0x4e85a459 // smmla v25.4s, v2.16b, v5.16b\n"
"ldr q10, [x28, #0x70]\n"
+ "add x28, x28, #0x80\n"
".inst 0x4e86a415 // smmla v21.4s, v0.16b, v6.16b\n"
".inst 0x4e86a45d // smmla v29.4s, v2.16b, v6.16b\n"
".inst 0x4e87a412 // smmla v18.4s, v0.16b, v7.16b\n"
".inst 0x4e87a45a // smmla v26.4s, v2.16b, v7.16b\n"
- "add x28, x28, #0x80\n"
".inst 0x4e88a416 // smmla v22.4s, v0.16b, v8.16b\n"
".inst 0x4e88a45e // smmla v30.4s, v2.16b, v8.16b\n"
".inst 0x4e89a413 // smmla v19.4s, v0.16b, v9.16b\n"
@@ -1146,21 +1150,21 @@ void a64_hybrid_s8qa_mmla_4x16 (
".inst 0x4e8aa417 // smmla v23.4s, v0.16b, v10.16b\n"
".inst 0x4e8aa45f // smmla v31.4s, v2.16b, v10.16b\n"
"84:" // Height 3: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x26, x26, #0x1\n"
- "cmp x26, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x25, x25, #0x1\n"
+ "cmp x25, x19\n"
"bne 68b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp1 v4.2d, v16.2d, v20.2d\n"
- "add x22, x27, x20\n"
- "add x21, x22, x20\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp2 v16.2d, v16.2d, v20.2d\n"
+ "prfm pstl1keep, [x26, #0x0]\n"
+ "add x21, x26, x19\n"
"uzp1 v20.2d, v17.2d, v21.2d\n"
- "prfm pstl1keep, [x27, #0x0]\n"
- "prfm pstl1keep, [x22, #0x0]\n"
+ "prfm pstl1keep, [x21, #0x0]\n"
"uzp2 v17.2d, v17.2d, v21.2d\n"
+ "add x20, x21, x19\n"
"uzp1 v21.2d, v18.2d, v22.2d\n"
- "prfm pstl1keep, [x21, #0x0]\n"
+ "prfm pstl1keep, [x20, #0x0]\n"
"uzp2 v18.2d, v18.2d, v22.2d\n"
"uzp1 v22.2d, v19.2d, v23.2d\n"
"uzp2 v19.2d, v19.2d, v23.2d\n"
@@ -1170,37 +1174,37 @@ void a64_hybrid_s8qa_mmla_4x16 (
"uzp1 v27.2d, v27.2d, v31.2d\n"
"mov v31.16b, v4.16b\n"
"tbnz %x[flags], #31, 85f\n"
- "add x23, %x[qp], %[b_offset]\n"
- "ld1r { v3.4s }, [x23]\n"
"addp v11.4s, v11.4s, v11.4s\n"
+ "add x22, %x[qp], %[b_offset]\n"
+ "ld1r { v3.4s }, [x22]\n"
"addp v13.4s, v13.4s, v13.4s\n"
- "neg v3.4s, v3.4s\n"
"dup v12.4s, v11.s[3]\n"
"dup v11.4s, v11.s[0]\n"
+ "neg v3.4s, v3.4s\n"
"dup v13.4s, v13.s[0]\n"
"mul v11.4s, v11.4s, v3.4s\n"
"mul v12.4s, v12.4s, v3.4s\n"
"mul v13.4s, v13.4s, v3.4s\n"
"85:" // Height 3: skip row sum fixup
- "ldr q0, [x10, #0x0]\n"
- "ldr q1, [x10, #0x10]\n"
"add v31.4s, v31.4s, v11.4s\n"
+ "ldr q0, [x27, #0x0]\n"
+ "orr %x[flags], %x[flags], #0x80000000\n"
"add v20.4s, v20.4s, v11.4s\n"
- "ldr q2, [x10, #0x20]\n"
- "ldr q3, [x10, #0x30]\n"
+ "ldr q1, [x27, #0x10]\n"
+ "add x23, %x[qp], %[per_layer_right_shift]\n"
"add v21.4s, v21.4s, v11.4s\n"
+ "ldr q2, [x27, #0x20]\n"
+ "add x22, %x[qp], %[per_layer_mul]\n"
"add v22.4s, v22.4s, v11.4s\n"
+ "ldr q3, [x27, #0x30]\n"
+ "add x27, x27, #0x40\n"
"add v16.4s, v16.4s, v12.4s\n"
+ "ld1r { v4.4s }, [x22]\n"
"add v17.4s, v17.4s, v12.4s\n"
- "add x23, %x[qp], %[per_layer_mul]\n"
- "ld1r { v4.4s }, [x23]\n"
"add v18.4s, v18.4s, v12.4s\n"
"add v19.4s, v19.4s, v12.4s\n"
- "orr %x[flags], %x[flags], #0x80000000\n"
- "add x23, %x[qp], %[per_layer_right_shift]\n"
"add v24.4s, v24.4s, v13.4s\n"
"add v25.4s, v25.4s, v13.4s\n"
- "add x10, x10, #0x40\n"
"add v26.4s, v26.4s, v13.4s\n"
"add v27.4s, v27.4s, v13.4s\n"
"add v31.4s, v31.4s, v0.4s\n"
@@ -1232,98 +1236,98 @@ void a64_hybrid_s8qa_mmla_4x16 (
"and v4.16b, v31.16b, v0.16b\n"
"and v5.16b, v20.16b, v0.16b\n"
"and v6.16b, v21.16b, v0.16b\n"
- "and v7.16b, v22.16b, v0.16b\n"
- "and v8.16b, v16.16b, v0.16b\n"
"sshr v4.4s, v4.4s, #0x1f\n"
"sshr v5.4s, v5.4s, #0x1f\n"
"sshr v6.4s, v6.4s, #0x1f\n"
- "sshr v7.4s, v7.4s, #0x1f\n"
- "sshr v8.4s, v8.4s, #0x1f\n"
"sqadd v31.4s, v31.4s, v4.4s\n"
"sqadd v20.4s, v20.4s, v5.4s\n"
"sqadd v21.4s, v21.4s, v6.4s\n"
+ "and v7.16b, v22.16b, v0.16b\n"
+ "and v8.16b, v16.16b, v0.16b\n"
+ "and v9.16b, v17.16b, v0.16b\n"
+ "sshr v7.4s, v7.4s, #0x1f\n"
+ "sshr v8.4s, v8.4s, #0x1f\n"
+ "sshr v9.4s, v9.4s, #0x1f\n"
"sqadd v22.4s, v22.4s, v7.4s\n"
"sqadd v16.4s, v16.4s, v8.4s\n"
- "and v9.16b, v17.16b, v0.16b\n"
+ "sqadd v17.4s, v17.4s, v9.4s\n"
"and v10.16b, v18.16b, v0.16b\n"
"and v4.16b, v19.16b, v0.16b\n"
"and v5.16b, v24.16b, v0.16b\n"
- "and v6.16b, v25.16b, v0.16b\n"
- "and v7.16b, v26.16b, v0.16b\n"
- "and v8.16b, v27.16b, v0.16b\n"
- "sshr v9.4s, v9.4s, #0x1f\n"
"sshr v10.4s, v10.4s, #0x1f\n"
"sshr v4.4s, v4.4s, #0x1f\n"
"sshr v5.4s, v5.4s, #0x1f\n"
- "sshr v6.4s, v6.4s, #0x1f\n"
- "sshr v7.4s, v7.4s, #0x1f\n"
- "sshr v8.4s, v8.4s, #0x1f\n"
- "sqadd v17.4s, v17.4s, v9.4s\n"
"sqadd v18.4s, v18.4s, v10.4s\n"
"sqadd v19.4s, v19.4s, v4.4s\n"
"sqadd v24.4s, v24.4s, v5.4s\n"
+ "and v6.16b, v25.16b, v0.16b\n"
+ "and v7.16b, v26.16b, v0.16b\n"
+ "and v8.16b, v27.16b, v0.16b\n"
+ "sshr v6.4s, v6.4s, #0x1f\n"
+ "sshr v7.4s, v7.4s, #0x1f\n"
+ "sshr v8.4s, v8.4s, #0x1f\n"
"sqadd v25.4s, v25.4s, v6.4s\n"
"sqadd v26.4s, v26.4s, v7.4s\n"
"sqadd v27.4s, v27.4s, v8.4s\n"
"86:" // Height 3: no shift correction
- "add x23, %x[qp], %[c_offset]\n"
- "ld1r { v4.4s }, [x23]\n"
"srshl v31.4s, v31.4s, v0.4s\n"
+ "add x22, %x[qp], %[c_offset]\n"
+ "ld1r { v4.4s }, [x22]\n"
"srshl v20.4s, v20.4s, v0.4s\n"
+ "add x22, %x[qp], %[minval]\n"
"srshl v21.4s, v21.4s, v0.4s\n"
+ "ld1r { v5.4s }, [x22]\n"
+ "add x22, %x[qp], %[maxval]\n"
"srshl v22.4s, v22.4s, v0.4s\n"
- "add x23, %x[qp], %[maxval]\n"
- "ld1r { v6.4s }, [x23]\n"
+ "ld1r { v6.4s }, [x22]\n"
+ "cmp x9, #0x10\n"
"srshl v16.4s, v16.4s, v0.4s\n"
"srshl v17.4s, v17.4s, v0.4s\n"
- "add x23, %x[qp], %[minval]\n"
- "ld1r { v5.4s }, [x23]\n"
- "srshl v18.4s, v18.4s, v0.4s\n"
- "srshl v19.4s, v19.4s, v0.4s\n"
- "cmp x9, #0x10\n"
- "srshl v24.4s, v24.4s, v0.4s\n"
- "srshl v25.4s, v25.4s, v0.4s\n"
- "srshl v26.4s, v26.4s, v0.4s\n"
- "srshl v27.4s, v27.4s, v0.4s\n"
"add v31.4s, v31.4s, v4.4s\n"
"add v20.4s, v20.4s, v4.4s\n"
"add v21.4s, v21.4s, v4.4s\n"
- "add v22.4s, v22.4s, v4.4s\n"
- "add v16.4s, v16.4s, v4.4s\n"
- "add v17.4s, v17.4s, v4.4s\n"
- "add v18.4s, v18.4s, v4.4s\n"
- "add v19.4s, v19.4s, v4.4s\n"
- "add v24.4s, v24.4s, v4.4s\n"
- "add v25.4s, v25.4s, v4.4s\n"
- "add v26.4s, v26.4s, v4.4s\n"
- "add v27.4s, v27.4s, v4.4s\n"
"smin v31.4s, v31.4s, v6.4s\n"
"smin v20.4s, v20.4s, v6.4s\n"
"smin v21.4s, v21.4s, v6.4s\n"
- "smin v22.4s, v22.4s, v6.4s\n"
- "smin v16.4s, v16.4s, v6.4s\n"
- "smin v17.4s, v17.4s, v6.4s\n"
- "smin v18.4s, v18.4s, v6.4s\n"
- "smin v19.4s, v19.4s, v6.4s\n"
- "smin v24.4s, v24.4s, v6.4s\n"
- "smin v25.4s, v25.4s, v6.4s\n"
- "smin v26.4s, v26.4s, v6.4s\n"
- "smin v27.4s, v27.4s, v6.4s\n"
"smax v31.4s, v31.4s, v5.4s\n"
"smax v20.4s, v20.4s, v5.4s\n"
"smax v21.4s, v21.4s, v5.4s\n"
+ "add v22.4s, v22.4s, v4.4s\n"
+ "add v16.4s, v16.4s, v4.4s\n"
+ "add v17.4s, v17.4s, v4.4s\n"
+ "smin v22.4s, v22.4s, v6.4s\n"
+ "smin v16.4s, v16.4s, v6.4s\n"
+ "smin v17.4s, v17.4s, v6.4s\n"
"smax v22.4s, v22.4s, v5.4s\n"
"smax v16.4s, v16.4s, v5.4s\n"
"smax v17.4s, v17.4s, v5.4s\n"
+ "srshl v18.4s, v18.4s, v0.4s\n"
+ "srshl v19.4s, v19.4s, v0.4s\n"
+ "srshl v24.4s, v24.4s, v0.4s\n"
+ "srshl v25.4s, v25.4s, v0.4s\n"
+ "add v18.4s, v18.4s, v4.4s\n"
+ "add v19.4s, v19.4s, v4.4s\n"
+ "add v24.4s, v24.4s, v4.4s\n"
+ "smin v18.4s, v18.4s, v6.4s\n"
+ "smin v19.4s, v19.4s, v6.4s\n"
+ "smin v24.4s, v24.4s, v6.4s\n"
"smax v18.4s, v18.4s, v5.4s\n"
"smax v19.4s, v19.4s, v5.4s\n"
"smax v24.4s, v24.4s, v5.4s\n"
- "smax v25.4s, v25.4s, v5.4s\n"
- "smax v26.4s, v26.4s, v5.4s\n"
- "smax v27.4s, v27.4s, v5.4s\n"
+ "add v25.4s, v25.4s, v4.4s\n"
+ "srshl v26.4s, v26.4s, v0.4s\n"
+ "srshl v27.4s, v27.4s, v0.4s\n"
+ "smin v25.4s, v25.4s, v6.4s\n"
"uzp1 v31.8h, v31.8h, v20.8h\n"
+ "add v26.4s, v26.4s, v4.4s\n"
+ "smax v25.4s, v25.4s, v5.4s\n"
+ "add v27.4s, v27.4s, v4.4s\n"
+ "smin v26.4s, v26.4s, v6.4s\n"
"uzp1 v20.8h, v21.8h, v22.8h\n"
+ "smin v27.4s, v27.4s, v6.4s\n"
+ "smax v26.4s, v26.4s, v5.4s\n"
"uzp1 v16.8h, v16.8h, v17.8h\n"
+ "smax v27.4s, v27.4s, v5.4s\n"
"uzp1 v17.8h, v18.8h, v19.8h\n"
"uzp1 v24.8h, v24.8h, v25.8h\n"
"uzp1 v25.8h, v26.8h, v27.8h\n"
@@ -1332,103 +1336,103 @@ void a64_hybrid_s8qa_mmla_4x16 (
"uzp1 v24.16b, v24.16b, v25.16b\n"
"bge 95f\n"
"tbz x9, #3, 90f\n"
- "str d31, [x27], #0x8\n"
- "str d16, [x22], #0x8\n"
- "str d24, [x21], #0x8\n"
+ "str d31, [x26], #0x8\n"
+ "str d16, [x21], #0x8\n"
+ "str d24, [x20], #0x8\n"
"tbz x9, #2, 88f\n"
- "st1 { v31.s }[2], [x27], #0x4\n"
- "st1 { v16.s }[2], [x22], #0x4\n"
- "st1 { v24.s }[2], [x21], #0x4\n"
+ "st1 { v31.s }[2], [x26], #0x4\n"
+ "st1 { v16.s }[2], [x21], #0x4\n"
+ "st1 { v24.s }[2], [x20], #0x4\n"
"tbz x9, #1, 87f\n"
- "st1 { v31.h }[6], [x27], #0x2\n"
- "st1 { v16.h }[6], [x22], #0x2\n"
- "st1 { v24.h }[6], [x21], #0x2\n"
+ "st1 { v31.h }[6], [x26], #0x2\n"
+ "st1 { v16.h }[6], [x21], #0x2\n"
+ "st1 { v24.h }[6], [x20], #0x2\n"
"tbz x9, #0, 94f\n"
- "st1 { v31.b }[14], [x27]\n"
- "st1 { v16.b }[14], [x22]\n"
- "st1 { v24.b }[14], [x21]\n"
+ "st1 { v31.b }[14], [x26]\n"
+ "st1 { v16.b }[14], [x21]\n"
+ "st1 { v24.b }[14], [x20]\n"
"b 94f\n"
"87:" // Height 3: Partial direct writeback: partial_1_12
"tbz x9, #0, 94f\n"
- "st1 { v31.b }[12], [x27]\n"
- "st1 { v16.b }[12], [x22]\n"
- "st1 { v24.b }[12], [x21]\n"
+ "st1 { v31.b }[12], [x26]\n"
+ "st1 { v16.b }[12], [x21]\n"
+ "st1 { v24.b }[12], [x20]\n"
"b 94f\n"
"88:" // Height 3: Partial direct writeback: partial_2_8
"tbz x9, #1, 89f\n"
- "st1 { v31.h }[4], [x27], #0x2\n"
- "st1 { v16.h }[4], [x22], #0x2\n"
- "st1 { v24.h }[4], [x21], #0x2\n"
+ "st1 { v31.h }[4], [x26], #0x2\n"
+ "st1 { v16.h }[4], [x21], #0x2\n"
+ "st1 { v24.h }[4], [x20], #0x2\n"
"tbz x9, #0, 94f\n"
- "st1 { v31.b }[10], [x27]\n"
- "st1 { v16.b }[10], [x22]\n"
- "st1 { v24.b }[10], [x21]\n"
+ "st1 { v31.b }[10], [x26]\n"
+ "st1 { v16.b }[10], [x21]\n"
+ "st1 { v24.b }[10], [x20]\n"
"b 94f\n"
"89:" // Height 3: Partial direct writeback: partial_1_8
"tbz x9, #0, 94f\n"
- "st1 { v31.b }[8], [x27]\n"
- "st1 { v16.b }[8], [x22]\n"
- "st1 { v24.b }[8], [x21]\n"
+ "st1 { v31.b }[8], [x26]\n"
+ "st1 { v16.b }[8], [x21]\n"
+ "st1 { v24.b }[8], [x20]\n"
"b 94f\n"
"90:" // Height 3: Partial direct writeback: partial_4_0
"tbz x9, #2, 92f\n"
- "str s31, [x27], #0x4\n"
- "str s16, [x22], #0x4\n"
- "str s24, [x21], #0x4\n"
+ "str s31, [x26], #0x4\n"
+ "str s16, [x21], #0x4\n"
+ "str s24, [x20], #0x4\n"
"tbz x9, #1, 91f\n"
- "st1 { v31.h }[2], [x27], #0x2\n"
- "st1 { v16.h }[2], [x22], #0x2\n"
- "st1 { v24.h }[2], [x21], #0x2\n"
+ "st1 { v31.h }[2], [x26], #0x2\n"
+ "st1 { v16.h }[2], [x21], #0x2\n"
+ "st1 { v24.h }[2], [x20], #0x2\n"
"tbz x9, #0, 94f\n"
- "st1 { v31.b }[6], [x27]\n"
- "st1 { v16.b }[6], [x22]\n"
- "st1 { v24.b }[6], [x21]\n"
+ "st1 { v31.b }[6], [x26]\n"
+ "st1 { v16.b }[6], [x21]\n"
+ "st1 { v24.b }[6], [x20]\n"
"b 94f\n"
"91:" // Height 3: Partial direct writeback: partial_1_4
"tbz x9, #0, 94f\n"
- "st1 { v31.b }[4], [x27]\n"
- "st1 { v16.b }[4], [x22]\n"
- "st1 { v24.b }[4], [x21]\n"
+ "st1 { v31.b }[4], [x26]\n"
+ "st1 { v16.b }[4], [x21]\n"
+ "st1 { v24.b }[4], [x20]\n"
"b 94f\n"
"92:" // Height 3: Partial direct writeback: partial_2_0
"tbz x9, #1, 93f\n"
- "str h31, [x27], #0x2\n"
- "str h16, [x22], #0x2\n"
- "str h24, [x21], #0x2\n"
+ "str h31, [x26], #0x2\n"
+ "str h16, [x21], #0x2\n"
+ "str h24, [x20], #0x2\n"
"tbz x9, #0, 94f\n"
- "st1 { v31.b }[2], [x27]\n"
- "st1 { v16.b }[2], [x22]\n"
- "st1 { v24.b }[2], [x21]\n"
+ "st1 { v31.b }[2], [x26]\n"
+ "st1 { v16.b }[2], [x21]\n"
+ "st1 { v24.b }[2], [x20]\n"
"b 94f\n"
"93:" // Height 3: Partial direct writeback: partial_1_0
- "str b31, [x27, #0x0]\n"
- "str b16, [x22, #0x0]\n"
- "str b24, [x21, #0x0]\n"
+ "str b31, [x26, #0x0]\n"
+ "str b16, [x21, #0x0]\n"
+ "str b24, [x20, #0x0]\n"
"94:" // Height 3: Partial direct writeback: Done
"b 96f\n"
"95:" // Height 3: Full writeback
- "str q31, [x27, #0x0]\n"
- "add x27, x27, #0x10\n"
- "str q16, [x22, #0x0]\n"
- "str q24, [x21, #0x0]\n"
+ "str q31, [x26, #0x0]\n"
+ "add x26, x26, #0x10\n"
+ "str q16, [x21, #0x0]\n"
+ "str q24, [x20, #0x0]\n"
"96:" // Height 3: Writeback done
"subs x9, x9, #0x10\n"
"bgt 66b\n"
"b 130f\n"
"97:" // Height 4
- "ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "mov x20, #0x4\n"
- "mov x10, %x[col_bias]\n"
"movi v11.4s, #0x0\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x27, %x[col_bias]\n"
"movi v12.4s, #0x0\n"
- "movi v13.4s, #0x0\n"
+ "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"bic %x[flags], %x[flags], #0x80000000\n"
- "ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
+ "movi v13.4s, #0x0\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "mov x26, %x[output_ptr]\n"
"movi v14.4s, #0x0\n"
+ "mov x19, #0x4\n"
"movi v15.16b, #0x1\n"
- "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x27, %x[output_ptr]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
+ "madd %x[output_ptr], x20, x19, %x[output_ptr]\n"
"98:" // Height 4: Column loop
"movi v16.4s, #0x0\n"
"movi v17.4s, #0x0\n"
@@ -1447,70 +1451,70 @@ void a64_hybrid_s8qa_mmla_4x16 (
"movi v30.4s, #0x0\n"
"movi v31.4s, #0x0\n"
"99:" // Height 4: setup done
- "mov x26, #0x0\n"
+ "mov x25, #0x0\n"
"100:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w24, [x20, x25, LSL #0x2]\n"
"tbz %x[flags], #3, 101f\n"
- "ldr x21, [%x[input_ptr], x26, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x24, [x21, #0x0]\n"
- "ldr x23, [x21, #0x8]\n"
- "ldr x22, [x21, #0x10]\n"
- "ldr x21, [x21, #0x18]\n"
- "cbnz x26, 102f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x24, x24, x20\n"
- "add x23, x23, x20\n"
- "add x22, x22, x20\n"
- "add x21, x21, x20\n"
+ "ldr x20, [%x[input_ptr], x25, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x23, [x20, #0x0]\n"
+ "ldr x22, [x20, #0x8]\n"
+ "ldr x21, [x20, #0x10]\n"
+ "ldr x20, [x20, #0x18]\n"
+ "cbnz x25, 102f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x23, x23, x19\n"
+ "add x22, x22, x19\n"
+ "add x21, x21, x19\n"
+ "add x20, x20, x19\n"
"b 102f\n"
"101:" // Height 4: setup direct input
- "mov x24, %x[input_ptr]\n"
- "add x23, x24, x20\n"
- "add x22, x23, x20\n"
- "add x21, x22, x20\n"
+ "mov x23, %x[input_ptr]\n"
+ "add x22, x23, x19\n"
+ "add x21, x22, x19\n"
+ "add x20, x21, x19\n"
"102:" // Height 4: input setup done
- "cmp x25, #0x10\n"
+ "cmp x24, #0x10\n"
"blt 107f\n"
- "ldr q1, [x24, #0x0]\n"
- "ldr q2, [x23, #0x0]\n"
- "cmp x25, #0x20\n"
- "ldr q3, [x22, #0x0]\n"
- "ldr q4, [x21, #0x0]\n"
- "ldr q5, [x28, #0x0]\n"
- "ldr q6, [x28, #0x10]\n"
- "ldr q7, [x28, #0x20]\n"
- "ldr q8, [x28, #0x30]\n"
- "ldr q9, [x28, #0x40]\n"
- "ldr q10, [x28, #0x50]\n"
+ "ldr q1, [x23, #0x0]\n"
+ "ldr q2, [x22, #0x0]\n"
+ "cmp x24, #0x20\n"
"blt 105f\n"
"103:" // Height 4: Multiply loop: Main loop head
"trn1 v0.2d, v1.2d, v2.2d\n"
+ "ldr q3, [x21, #0x0]\n"
+ "add x23, x23, #0x10\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
- ".inst 0x4e85a410 // smmla v16.4s, v0.16b, v5.16b\n"
- "add x24, x24, #0x10\n"
+ "ldr q4, [x20, #0x0]\n"
+ "add x22, x22, #0x10\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
+ "ldr q5, [x28, #0x0]\n"
+ "add x21, x21, #0x10\n"
"trn2 v3.2d, v3.2d, v4.2d\n"
- "ldr q4, [x28, #0x60]\n"
+ "ldr q6, [x28, #0x10]\n"
+ "add x20, x20, #0x10\n"
+ ".inst 0x4e85a410 // smmla v16.4s, v0.16b, v5.16b\n"
+ "ldr q7, [x28, #0x20]\n"
".inst 0x4e85a458 // smmla v24.4s, v2.16b, v5.16b\n"
- "ldr q5, [x28, #0x70]\n"
+ "ldr q8, [x28, #0x30]\n"
".inst 0x4e86a414 // smmla v20.4s, v0.16b, v6.16b\n"
+ "ldr q9, [x28, #0x40]\n"
".inst 0x4e86a45c // smmla v28.4s, v2.16b, v6.16b\n"
- "ldr q6, [x28, #0x80]\n"
+ "ldr q10, [x28, #0x50]\n"
+ "ldr q4, [x28, #0x60]\n"
".inst 0x4e87a411 // smmla v17.4s, v0.16b, v7.16b\n"
".inst 0x4e87a459 // smmla v25.4s, v2.16b, v7.16b\n"
- "ldr q7, [x28, #0x90]\n"
- "add x23, x23, #0x10\n"
+ "ldr q5, [x28, #0x70]\n"
".inst 0x4e88a415 // smmla v21.4s, v0.16b, v8.16b\n"
+ "ldr q6, [x28, #0x80]\n"
".inst 0x4e88a45d // smmla v29.4s, v2.16b, v8.16b\n"
- "ldr q8, [x28, #0xa0]\n"
- "add x22, x22, #0x10\n"
+ "ldr q7, [x28, #0x90]\n"
".inst 0x4e89a412 // smmla v18.4s, v0.16b, v9.16b\n"
+ "ldr q8, [x28, #0xa0]\n"
".inst 0x4e89a45a // smmla v26.4s, v2.16b, v9.16b\n"
"ldr q9, [x28, #0xb0]\n"
- "add x21, x21, #0x10\n"
".inst 0x4e8aa416 // smmla v22.4s, v0.16b, v10.16b\n"
".inst 0x4e8aa45e // smmla v30.4s, v2.16b, v10.16b\n"
"ldr q10, [x28, #0xc0]\n"
@@ -1544,52 +1548,52 @@ void a64_hybrid_s8qa_mmla_4x16 (
".inst 0x4e8f942b // sdot v11.4s, v1.16b, v15.16b\n"
".inst 0x4e8f946d // sdot v13.4s, v3.16b, v15.16b\n"
"104:" // Height 4: Multiply loop: unique 13: skip row sum
- "ldr q1, [x24, #0x0]\n"
- "ldr q2, [x23, #0x0]\n"
- "sub x25, x25, #0x10\n"
- "cmp x25, #0x20\n"
- "ldr q3, [x22, #0x0]\n"
- "ldr q4, [x21, #0x0]\n"
- "ldr q5, [x28, #0x0]\n"
- "ldr q6, [x28, #0x10]\n"
- "ldr q7, [x28, #0x20]\n"
- "ldr q8, [x28, #0x30]\n"
- "ldr q9, [x28, #0x40]\n"
- "ldr q10, [x28, #0x50]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
"prfm pldl1keep, [x23, #0x80]\n"
+ "sub x24, x24, #0x10\n"
"prfm pldl1keep, [x22, #0x80]\n"
+ "cmp x24, #0x20\n"
"prfm pldl1keep, [x21, #0x80]\n"
+ "prfm pldl1keep, [x20, #0x80]\n"
+ "ldr q1, [x23, #0x0]\n"
+ "ldr q2, [x22, #0x0]\n"
"bge 103b\n"
"105:" // Height 4: Multiply loop: Single iteration only
"trn1 v0.2d, v1.2d, v2.2d\n"
+ "ldr q3, [x21, #0x0]\n"
+ "sub x24, x24, #0x10\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
- ".inst 0x4e85a410 // smmla v16.4s, v0.16b, v5.16b\n"
- "sub x25, x25, #0x10\n"
+ "ldr q4, [x20, #0x0]\n"
+ "add x23, x23, #0x10\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
+ "ldr q5, [x28, #0x0]\n"
+ "add x22, x22, #0x10\n"
"trn2 v3.2d, v3.2d, v4.2d\n"
- "ldr q4, [x28, #0x60]\n"
+ "ldr q6, [x28, #0x10]\n"
+ "add x21, x21, #0x10\n"
+ ".inst 0x4e85a410 // smmla v16.4s, v0.16b, v5.16b\n"
+ "ldr q7, [x28, #0x20]\n"
+ "add x20, x20, #0x10\n"
".inst 0x4e85a458 // smmla v24.4s, v2.16b, v5.16b\n"
- "ldr q5, [x28, #0x70]\n"
+ "ldr q8, [x28, #0x30]\n"
".inst 0x4e86a414 // smmla v20.4s, v0.16b, v6.16b\n"
+ "ldr q9, [x28, #0x40]\n"
".inst 0x4e86a45c // smmla v28.4s, v2.16b, v6.16b\n"
- "ldr q6, [x28, #0x80]\n"
+ "ldr q10, [x28, #0x50]\n"
+ "ldr q4, [x28, #0x60]\n"
".inst 0x4e87a411 // smmla v17.4s, v0.16b, v7.16b\n"
".inst 0x4e87a459 // smmla v25.4s, v2.16b, v7.16b\n"
- "ldr q7, [x28, #0x90]\n"
- "add x24, x24, #0x10\n"
+ "ldr q5, [x28, #0x70]\n"
".inst 0x4e88a415 // smmla v21.4s, v0.16b, v8.16b\n"
+ "ldr q6, [x28, #0x80]\n"
".inst 0x4e88a45d // smmla v29.4s, v2.16b, v8.16b\n"
- "ldr q8, [x28, #0xa0]\n"
- "add x23, x23, #0x10\n"
+ "ldr q7, [x28, #0x90]\n"
".inst 0x4e89a412 // smmla v18.4s, v0.16b, v9.16b\n"
+ "ldr q8, [x28, #0xa0]\n"
".inst 0x4e89a45a // smmla v26.4s, v2.16b, v9.16b\n"
"ldr q9, [x28, #0xb0]\n"
- "add x22, x22, #0x10\n"
".inst 0x4e8aa416 // smmla v22.4s, v0.16b, v10.16b\n"
".inst 0x4e8aa45e // smmla v30.4s, v2.16b, v10.16b\n"
"ldr q10, [x28, #0xc0]\n"
- "add x21, x21, #0x10\n"
".inst 0x4e84a413 // smmla v19.4s, v0.16b, v4.16b\n"
".inst 0x4e84a45b // smmla v27.4s, v2.16b, v4.16b\n"
"ldr q4, [x28, #0xd0]\n"
@@ -1620,44 +1624,44 @@ void a64_hybrid_s8qa_mmla_4x16 (
".inst 0x4e8f942b // sdot v11.4s, v1.16b, v15.16b\n"
".inst 0x4e8f946d // sdot v13.4s, v3.16b, v15.16b\n"
"106:" // Height 4: Multiply loop: unique 14: skip row sum
- "prfm pldl1keep, [x24, #0x80]\n"
"prfm pldl1keep, [x23, #0x80]\n"
"prfm pldl1keep, [x22, #0x80]\n"
"prfm pldl1keep, [x21, #0x80]\n"
+ "prfm pldl1keep, [x20, #0x80]\n"
"107:" // Height 4: Multiply loop: Main loop skip
- "cbz x25, 116f\n"
- "cmp x25, #0x8\n"
+ "cbz x24, 116f\n"
+ "cmp x24, #0x8\n"
"blt 110f\n"
"108:" // Height 4: Multiply loop: Odd block loop
- "ldr d1, [x24], #0x8\n"
- "ldr d2, [x23], #0x8\n"
+ "ldr d1, [x23], #0x8\n"
+ "ldr d2, [x22], #0x8\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
- "ldr d3, [x22], #0x8\n"
- "ldr d7, [x21], #0x8\n"
+ "ldr d3, [x21], #0x8\n"
+ "ldr d7, [x20], #0x8\n"
"trn1 v2.2d, v3.2d, v7.2d\n"
"tbnz %x[flags], #31, 109f\n"
".inst 0x4e8f940b // sdot v11.4s, v0.16b, v15.16b\n"
".inst 0x4e8f944d // sdot v13.4s, v2.16b, v15.16b\n"
"109:" // Height 4: Multiply loop: unique 15: skip row sum
"ldr q8, [x28, #0x0]\n"
- "ldr q9, [x28, #0x10]\n"
".inst 0x4e88a410 // smmla v16.4s, v0.16b, v8.16b\n"
+ "ldr q9, [x28, #0x10]\n"
+ "sub x24, x24, #0x8\n"
".inst 0x4e88a458 // smmla v24.4s, v2.16b, v8.16b\n"
"ldr q10, [x28, #0x20]\n"
+ "cmp x24, #0x8\n"
+ ".inst 0x4e89a414 // smmla v20.4s, v0.16b, v9.16b\n"
"ldr q4, [x28, #0x30]\n"
- "sub x25, x25, #0x8\n"
- "cmp x25, #0x8\n"
+ ".inst 0x4e89a45c // smmla v28.4s, v2.16b, v9.16b\n"
"ldr q5, [x28, #0x40]\n"
+ ".inst 0x4e8aa411 // smmla v17.4s, v0.16b, v10.16b\n"
"ldr q6, [x28, #0x50]\n"
- ".inst 0x4e89a414 // smmla v20.4s, v0.16b, v9.16b\n"
- ".inst 0x4e89a45c // smmla v28.4s, v2.16b, v9.16b\n"
+ ".inst 0x4e8aa459 // smmla v25.4s, v2.16b, v10.16b\n"
"ldr q7, [x28, #0x60]\n"
"ldr q8, [x28, #0x70]\n"
- ".inst 0x4e8aa411 // smmla v17.4s, v0.16b, v10.16b\n"
- ".inst 0x4e8aa459 // smmla v25.4s, v2.16b, v10.16b\n"
".inst 0x4e84a415 // smmla v21.4s, v0.16b, v4.16b\n"
- ".inst 0x4e84a45d // smmla v29.4s, v2.16b, v4.16b\n"
"add x28, x28, #0x80\n"
+ ".inst 0x4e84a45d // smmla v29.4s, v2.16b, v4.16b\n"
".inst 0x4e85a412 // smmla v18.4s, v0.16b, v5.16b\n"
".inst 0x4e85a45a // smmla v26.4s, v2.16b, v5.16b\n"
".inst 0x4e86a416 // smmla v22.4s, v0.16b, v6.16b\n"
@@ -1667,48 +1671,48 @@ void a64_hybrid_s8qa_mmla_4x16 (
".inst 0x4e88a417 // smmla v23.4s, v0.16b, v8.16b\n"
".inst 0x4e88a45f // smmla v31.4s, v2.16b, v8.16b\n"
"bge 108b\n"
+ "cbz x24, 116f\n"
"110:" // Height 4: Multiply loop: Skip odd blocks
- "cbz x25, 116f\n"
- "tbz x25, #2, 112f\n"
- "ldr s1, [x24], #0x4\n"
- "ldr s2, [x23], #0x4\n"
- "ldr s3, [x22], #0x4\n"
- "ldr s9, [x21], #0x4\n"
- "tbz x25, #1, 111f\n"
- "ld1 { v1.h }[2], [x24], #0x2\n"
- "ld1 { v2.h }[2], [x23], #0x2\n"
- "ld1 { v3.h }[2], [x22], #0x2\n"
- "ld1 { v9.h }[2], [x21], #0x2\n"
- "tbz x25, #0, 114f\n"
- "ld1 { v1.b }[6], [x24]\n"
- "ld1 { v2.b }[6], [x23]\n"
- "ld1 { v3.b }[6], [x22]\n"
- "ld1 { v9.b }[6], [x21]\n"
+ "tbz x24, #2, 112f\n"
+ "ldr s1, [x23], #0x4\n"
+ "ldr s2, [x22], #0x4\n"
+ "ldr s3, [x21], #0x4\n"
+ "ldr s9, [x20], #0x4\n"
+ "tbz x24, #1, 111f\n"
+ "ld1 { v1.h }[2], [x23], #0x2\n"
+ "ld1 { v2.h }[2], [x22], #0x2\n"
+ "ld1 { v3.h }[2], [x21], #0x2\n"
+ "ld1 { v9.h }[2], [x20], #0x2\n"
+ "tbz x24, #0, 114f\n"
+ "ld1 { v1.b }[6], [x23]\n"
+ "ld1 { v2.b }[6], [x22]\n"
+ "ld1 { v3.b }[6], [x21]\n"
+ "ld1 { v9.b }[6], [x20]\n"
"b 114f\n"
"111:" // Height 4: Multiply loop: Ragged operand read: partial_1_4
- "tbz x25, #0, 114f\n"
- "ld1 { v1.b }[4], [x24]\n"
- "ld1 { v2.b }[4], [x23]\n"
- "ld1 { v3.b }[4], [x22]\n"
- "ld1 { v9.b }[4], [x21]\n"
+ "tbz x24, #0, 114f\n"
+ "ld1 { v1.b }[4], [x23]\n"
+ "ld1 { v2.b }[4], [x22]\n"
+ "ld1 { v3.b }[4], [x21]\n"
+ "ld1 { v9.b }[4], [x20]\n"
"b 114f\n"
"112:" // Height 4: Multiply loop: Ragged operand read: partial_2_0
- "tbz x25, #1, 113f\n"
- "ldr h1, [x24], #0x2\n"
- "ldr h2, [x23], #0x2\n"
- "ldr h3, [x22], #0x2\n"
- "ldr h9, [x21], #0x2\n"
- "tbz x25, #0, 114f\n"
- "ld1 { v1.b }[2], [x24]\n"
- "ld1 { v2.b }[2], [x23]\n"
- "ld1 { v3.b }[2], [x22]\n"
- "ld1 { v9.b }[2], [x21]\n"
+ "tbz x24, #1, 113f\n"
+ "ldr h1, [x23], #0x2\n"
+ "ldr h2, [x22], #0x2\n"
+ "ldr h3, [x21], #0x2\n"
+ "ldr h9, [x20], #0x2\n"
+ "tbz x24, #0, 114f\n"
+ "ld1 { v1.b }[2], [x23]\n"
+ "ld1 { v2.b }[2], [x22]\n"
+ "ld1 { v3.b }[2], [x21]\n"
+ "ld1 { v9.b }[2], [x20]\n"
"b 114f\n"
"113:" // Height 4: Multiply loop: Ragged operand read: partial_1_0
- "ldr b1, [x24, #0x0]\n"
- "ldr b2, [x23, #0x0]\n"
- "ldr b3, [x22, #0x0]\n"
- "ldr b9, [x21, #0x0]\n"
+ "ldr b1, [x23, #0x0]\n"
+ "ldr b2, [x22, #0x0]\n"
+ "ldr b3, [x21, #0x0]\n"
+ "ldr b9, [x20, #0x0]\n"
"114:" // Height 4: Multiply loop: Ragged operand read: Done
"trn1 v0.2d, v1.2d, v2.2d\n"
"trn1 v2.2d, v3.2d, v9.2d\n"
@@ -1717,24 +1721,24 @@ void a64_hybrid_s8qa_mmla_4x16 (
".inst 0x4e8f944d // sdot v13.4s, v2.16b, v15.16b\n"
"115:" // Height 4: Multiply loop: unique 16: skip row sum
"ldr q10, [x28, #0x0]\n"
- "ldr q4, [x28, #0x10]\n"
".inst 0x4e8aa410 // smmla v16.4s, v0.16b, v10.16b\n"
+ "ldr q4, [x28, #0x10]\n"
".inst 0x4e8aa458 // smmla v24.4s, v2.16b, v10.16b\n"
"ldr q5, [x28, #0x20]\n"
"ldr q6, [x28, #0x30]\n"
".inst 0x4e84a414 // smmla v20.4s, v0.16b, v4.16b\n"
- ".inst 0x4e84a45c // smmla v28.4s, v2.16b, v4.16b\n"
"ldr q7, [x28, #0x40]\n"
+ ".inst 0x4e84a45c // smmla v28.4s, v2.16b, v4.16b\n"
"ldr q8, [x28, #0x50]\n"
".inst 0x4e85a411 // smmla v17.4s, v0.16b, v5.16b\n"
- ".inst 0x4e85a459 // smmla v25.4s, v2.16b, v5.16b\n"
"ldr q9, [x28, #0x60]\n"
+ ".inst 0x4e85a459 // smmla v25.4s, v2.16b, v5.16b\n"
"ldr q10, [x28, #0x70]\n"
+ "add x28, x28, #0x80\n"
".inst 0x4e86a415 // smmla v21.4s, v0.16b, v6.16b\n"
".inst 0x4e86a45d // smmla v29.4s, v2.16b, v6.16b\n"
".inst 0x4e87a412 // smmla v18.4s, v0.16b, v7.16b\n"
".inst 0x4e87a45a // smmla v26.4s, v2.16b, v7.16b\n"
- "add x28, x28, #0x80\n"
".inst 0x4e88a416 // smmla v22.4s, v0.16b, v8.16b\n"
".inst 0x4e88a45e // smmla v30.4s, v2.16b, v8.16b\n"
".inst 0x4e89a413 // smmla v19.4s, v0.16b, v9.16b\n"
@@ -1742,25 +1746,25 @@ void a64_hybrid_s8qa_mmla_4x16 (
".inst 0x4e8aa417 // smmla v23.4s, v0.16b, v10.16b\n"
".inst 0x4e8aa45f // smmla v31.4s, v2.16b, v10.16b\n"
"116:" // Height 4: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x26, x26, #0x1\n"
- "cmp x26, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x25, x25, #0x1\n"
+ "cmp x25, x19\n"
"bne 100b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp1 v4.2d, v16.2d, v20.2d\n"
- "add x22, x27, x20\n"
- "add x21, x22, x20\n"
- "add x20, x21, x20\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp2 v16.2d, v16.2d, v20.2d\n"
+ "prfm pstl1keep, [x26, #0x0]\n"
+ "add x21, x26, x19\n"
"uzp1 v20.2d, v17.2d, v21.2d\n"
- "prfm pstl1keep, [x27, #0x0]\n"
+ "prfm pstl1keep, [x21, #0x0]\n"
"uzp2 v17.2d, v17.2d, v21.2d\n"
+ "add x20, x21, x19\n"
"uzp1 v21.2d, v18.2d, v22.2d\n"
- "prfm pstl1keep, [x22, #0x0]\n"
- "prfm pstl1keep, [x21, #0x0]\n"
+ "prfm pstl1keep, [x20, #0x0]\n"
+ "add x19, x20, x19\n"
"uzp2 v18.2d, v18.2d, v22.2d\n"
+ "prfm pstl1keep, [x19, #0x0]\n"
"uzp1 v22.2d, v19.2d, v23.2d\n"
- "prfm pstl1keep, [x20, #0x0]\n"
"uzp2 v19.2d, v19.2d, v23.2d\n"
"uzp1 v23.2d, v24.2d, v28.2d\n"
"uzp2 v24.2d, v24.2d, v28.2d\n"
@@ -1772,13 +1776,13 @@ void a64_hybrid_s8qa_mmla_4x16 (
"uzp2 v27.2d, v27.2d, v31.2d\n"
"mov v31.16b, v4.16b\n"
"tbnz %x[flags], #31, 117f\n"
- "add x23, %x[qp], %[b_offset]\n"
- "ld1r { v4.4s }, [x23]\n"
"addp v11.4s, v11.4s, v11.4s\n"
+ "add x22, %x[qp], %[b_offset]\n"
+ "ld1r { v4.4s }, [x22]\n"
"addp v13.4s, v13.4s, v13.4s\n"
- "neg v4.4s, v4.4s\n"
"dup v12.4s, v11.s[3]\n"
"dup v11.4s, v11.s[0]\n"
+ "neg v4.4s, v4.4s\n"
"dup v14.4s, v13.s[3]\n"
"dup v13.4s, v13.s[0]\n"
"mul v11.4s, v11.4s, v4.4s\n"
@@ -1786,25 +1790,25 @@ void a64_hybrid_s8qa_mmla_4x16 (
"mul v13.4s, v13.4s, v4.4s\n"
"mul v14.4s, v14.4s, v4.4s\n"
"117:" // Height 4: skip row sum fixup
- "ldr q0, [x10, #0x0]\n"
- "ldr q1, [x10, #0x10]\n"
"add v31.4s, v31.4s, v11.4s\n"
+ "ldr q0, [x27, #0x0]\n"
+ "orr %x[flags], %x[flags], #0x80000000\n"
"add v20.4s, v20.4s, v11.4s\n"
- "ldr q2, [x10, #0x20]\n"
- "ldr q3, [x10, #0x30]\n"
+ "ldr q1, [x27, #0x10]\n"
+ "add x23, %x[qp], %[per_layer_right_shift]\n"
"add v21.4s, v21.4s, v11.4s\n"
+ "ldr q2, [x27, #0x20]\n"
+ "add x22, %x[qp], %[per_layer_mul]\n"
"add v22.4s, v22.4s, v11.4s\n"
+ "ldr q3, [x27, #0x30]\n"
+ "add x27, x27, #0x40\n"
"add v16.4s, v16.4s, v12.4s\n"
+ "ld1r { v4.4s }, [x22]\n"
"add v17.4s, v17.4s, v12.4s\n"
- "add x23, %x[qp], %[per_layer_mul]\n"
- "ld1r { v4.4s }, [x23]\n"
"add v18.4s, v18.4s, v12.4s\n"
"add v19.4s, v19.4s, v12.4s\n"
- "orr %x[flags], %x[flags], #0x80000000\n"
- "add x23, %x[qp], %[per_layer_right_shift]\n"
"add v23.4s, v23.4s, v13.4s\n"
"add v28.4s, v28.4s, v13.4s\n"
- "add x10, x10, #0x40\n"
"add v29.4s, v29.4s, v13.4s\n"
"add v30.4s, v30.4s, v13.4s\n"
"add v24.4s, v24.4s, v14.4s\n"
@@ -1847,126 +1851,126 @@ void a64_hybrid_s8qa_mmla_4x16 (
"tbz %x[flags], #5, 118f\n"
"and v4.16b, v31.16b, v0.16b\n"
"and v5.16b, v20.16b, v0.16b\n"
+ "and v6.16b, v21.16b, v0.16b\n"
"sshr v4.4s, v4.4s, #0x1f\n"
"sshr v5.4s, v5.4s, #0x1f\n"
+ "sshr v6.4s, v6.4s, #0x1f\n"
"sqadd v31.4s, v31.4s, v4.4s\n"
"sqadd v20.4s, v20.4s, v5.4s\n"
- "and v6.16b, v21.16b, v0.16b\n"
+ "sqadd v21.4s, v21.4s, v6.4s\n"
"and v7.16b, v22.16b, v0.16b\n"
"and v8.16b, v16.16b, v0.16b\n"
"and v9.16b, v17.16b, v0.16b\n"
- "and v10.16b, v18.16b, v0.16b\n"
- "and v4.16b, v19.16b, v0.16b\n"
- "and v5.16b, v23.16b, v0.16b\n"
- "sshr v6.4s, v6.4s, #0x1f\n"
"sshr v7.4s, v7.4s, #0x1f\n"
"sshr v8.4s, v8.4s, #0x1f\n"
"sshr v9.4s, v9.4s, #0x1f\n"
- "sshr v10.4s, v10.4s, #0x1f\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
- "sshr v5.4s, v5.4s, #0x1f\n"
- "sqadd v21.4s, v21.4s, v6.4s\n"
"sqadd v22.4s, v22.4s, v7.4s\n"
"sqadd v16.4s, v16.4s, v8.4s\n"
"sqadd v17.4s, v17.4s, v9.4s\n"
+ "and v10.16b, v18.16b, v0.16b\n"
+ "and v4.16b, v19.16b, v0.16b\n"
+ "and v5.16b, v23.16b, v0.16b\n"
+ "sshr v10.4s, v10.4s, #0x1f\n"
+ "sshr v4.4s, v4.4s, #0x1f\n"
+ "sshr v5.4s, v5.4s, #0x1f\n"
"sqadd v18.4s, v18.4s, v10.4s\n"
"sqadd v19.4s, v19.4s, v4.4s\n"
"sqadd v23.4s, v23.4s, v5.4s\n"
"and v6.16b, v28.16b, v0.16b\n"
"and v7.16b, v29.16b, v0.16b\n"
"and v8.16b, v30.16b, v0.16b\n"
- "and v9.16b, v24.16b, v0.16b\n"
- "and v10.16b, v25.16b, v0.16b\n"
- "and v4.16b, v26.16b, v0.16b\n"
- "and v5.16b, v27.16b, v0.16b\n"
"sshr v6.4s, v6.4s, #0x1f\n"
"sshr v7.4s, v7.4s, #0x1f\n"
"sshr v8.4s, v8.4s, #0x1f\n"
- "sshr v9.4s, v9.4s, #0x1f\n"
- "sshr v10.4s, v10.4s, #0x1f\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
- "sshr v5.4s, v5.4s, #0x1f\n"
"sqadd v28.4s, v28.4s, v6.4s\n"
"sqadd v29.4s, v29.4s, v7.4s\n"
"sqadd v30.4s, v30.4s, v8.4s\n"
+ "and v9.16b, v24.16b, v0.16b\n"
+ "and v10.16b, v25.16b, v0.16b\n"
+ "and v4.16b, v26.16b, v0.16b\n"
+ "sshr v9.4s, v9.4s, #0x1f\n"
+ "sshr v10.4s, v10.4s, #0x1f\n"
+ "sshr v4.4s, v4.4s, #0x1f\n"
"sqadd v24.4s, v24.4s, v9.4s\n"
"sqadd v25.4s, v25.4s, v10.4s\n"
"sqadd v26.4s, v26.4s, v4.4s\n"
+ "and v5.16b, v27.16b, v0.16b\n"
+ "sshr v5.4s, v5.4s, #0x1f\n"
"sqadd v27.4s, v27.4s, v5.4s\n"
"118:" // Height 4: no shift correction
- "add x23, %x[qp], %[c_offset]\n"
- "ld1r { v4.4s }, [x23]\n"
"srshl v31.4s, v31.4s, v0.4s\n"
+ "add x22, %x[qp], %[c_offset]\n"
+ "ld1r { v4.4s }, [x22]\n"
"srshl v20.4s, v20.4s, v0.4s\n"
+ "add x22, %x[qp], %[minval]\n"
"srshl v21.4s, v21.4s, v0.4s\n"
+ "ld1r { v5.4s }, [x22]\n"
+ "add x22, %x[qp], %[maxval]\n"
"srshl v22.4s, v22.4s, v0.4s\n"
- "add x23, %x[qp], %[maxval]\n"
- "ld1r { v6.4s }, [x23]\n"
+ "ld1r { v6.4s }, [x22]\n"
+ "cmp x9, #0x10\n"
"srshl v16.4s, v16.4s, v0.4s\n"
"srshl v17.4s, v17.4s, v0.4s\n"
- "add x23, %x[qp], %[minval]\n"
- "ld1r { v5.4s }, [x23]\n"
- "srshl v18.4s, v18.4s, v0.4s\n"
- "srshl v19.4s, v19.4s, v0.4s\n"
- "cmp x9, #0x10\n"
- "srshl v23.4s, v23.4s, v0.4s\n"
- "srshl v28.4s, v28.4s, v0.4s\n"
- "srshl v29.4s, v29.4s, v0.4s\n"
- "srshl v30.4s, v30.4s, v0.4s\n"
- "srshl v24.4s, v24.4s, v0.4s\n"
- "srshl v25.4s, v25.4s, v0.4s\n"
- "srshl v26.4s, v26.4s, v0.4s\n"
- "srshl v27.4s, v27.4s, v0.4s\n"
"add v31.4s, v31.4s, v4.4s\n"
"add v20.4s, v20.4s, v4.4s\n"
"add v21.4s, v21.4s, v4.4s\n"
- "add v22.4s, v22.4s, v4.4s\n"
- "add v16.4s, v16.4s, v4.4s\n"
- "add v17.4s, v17.4s, v4.4s\n"
- "add v18.4s, v18.4s, v4.4s\n"
- "add v19.4s, v19.4s, v4.4s\n"
- "add v23.4s, v23.4s, v4.4s\n"
- "add v28.4s, v28.4s, v4.4s\n"
- "add v29.4s, v29.4s, v4.4s\n"
- "add v30.4s, v30.4s, v4.4s\n"
- "add v24.4s, v24.4s, v4.4s\n"
- "add v25.4s, v25.4s, v4.4s\n"
- "add v26.4s, v26.4s, v4.4s\n"
- "add v27.4s, v27.4s, v4.4s\n"
"smin v31.4s, v31.4s, v6.4s\n"
"smin v20.4s, v20.4s, v6.4s\n"
"smin v21.4s, v21.4s, v6.4s\n"
- "smin v22.4s, v22.4s, v6.4s\n"
- "smin v16.4s, v16.4s, v6.4s\n"
- "smin v17.4s, v17.4s, v6.4s\n"
- "smin v18.4s, v18.4s, v6.4s\n"
- "smin v19.4s, v19.4s, v6.4s\n"
- "smin v23.4s, v23.4s, v6.4s\n"
- "smin v28.4s, v28.4s, v6.4s\n"
- "smin v29.4s, v29.4s, v6.4s\n"
- "smin v30.4s, v30.4s, v6.4s\n"
- "smin v24.4s, v24.4s, v6.4s\n"
- "smin v25.4s, v25.4s, v6.4s\n"
- "smin v26.4s, v26.4s, v6.4s\n"
- "smin v27.4s, v27.4s, v6.4s\n"
"smax v31.4s, v31.4s, v5.4s\n"
"smax v20.4s, v20.4s, v5.4s\n"
"smax v21.4s, v21.4s, v5.4s\n"
+ "add v22.4s, v22.4s, v4.4s\n"
+ "add v16.4s, v16.4s, v4.4s\n"
+ "add v17.4s, v17.4s, v4.4s\n"
+ "smin v22.4s, v22.4s, v6.4s\n"
+ "smin v16.4s, v16.4s, v6.4s\n"
+ "smin v17.4s, v17.4s, v6.4s\n"
"smax v22.4s, v22.4s, v5.4s\n"
"smax v16.4s, v16.4s, v5.4s\n"
"smax v17.4s, v17.4s, v5.4s\n"
+ "srshl v18.4s, v18.4s, v0.4s\n"
+ "srshl v19.4s, v19.4s, v0.4s\n"
+ "srshl v23.4s, v23.4s, v0.4s\n"
+ "srshl v28.4s, v28.4s, v0.4s\n"
+ "add v18.4s, v18.4s, v4.4s\n"
+ "add v19.4s, v19.4s, v4.4s\n"
+ "add v23.4s, v23.4s, v4.4s\n"
+ "smin v18.4s, v18.4s, v6.4s\n"
+ "smin v19.4s, v19.4s, v6.4s\n"
+ "smin v23.4s, v23.4s, v6.4s\n"
"smax v18.4s, v18.4s, v5.4s\n"
"smax v19.4s, v19.4s, v5.4s\n"
"smax v23.4s, v23.4s, v5.4s\n"
+ "add v28.4s, v28.4s, v4.4s\n"
+ "srshl v29.4s, v29.4s, v0.4s\n"
+ "srshl v30.4s, v30.4s, v0.4s\n"
+ "smin v28.4s, v28.4s, v6.4s\n"
+ "srshl v24.4s, v24.4s, v0.4s\n"
+ "add v29.4s, v29.4s, v4.4s\n"
"smax v28.4s, v28.4s, v5.4s\n"
+ "add v30.4s, v30.4s, v4.4s\n"
+ "smin v29.4s, v29.4s, v6.4s\n"
+ "add v24.4s, v24.4s, v4.4s\n"
+ "smin v30.4s, v30.4s, v6.4s\n"
"smax v29.4s, v29.4s, v5.4s\n"
+ "smin v24.4s, v24.4s, v6.4s\n"
"smax v30.4s, v30.4s, v5.4s\n"
+ "srshl v25.4s, v25.4s, v0.4s\n"
"smax v24.4s, v24.4s, v5.4s\n"
+ "srshl v26.4s, v26.4s, v0.4s\n"
+ "srshl v27.4s, v27.4s, v0.4s\n"
+ "add v25.4s, v25.4s, v4.4s\n"
+ "uzp1 v31.8h, v31.8h, v20.8h\n"
+ "add v26.4s, v26.4s, v4.4s\n"
+ "smin v25.4s, v25.4s, v6.4s\n"
+ "add v27.4s, v27.4s, v4.4s\n"
+ "smin v26.4s, v26.4s, v6.4s\n"
"smax v25.4s, v25.4s, v5.4s\n"
+ "smin v27.4s, v27.4s, v6.4s\n"
"smax v26.4s, v26.4s, v5.4s\n"
- "smax v27.4s, v27.4s, v5.4s\n"
- "uzp1 v31.8h, v31.8h, v20.8h\n"
"uzp1 v20.8h, v21.8h, v22.8h\n"
+ "smax v27.4s, v27.4s, v5.4s\n"
"uzp1 v16.8h, v16.8h, v17.8h\n"
"uzp1 v17.8h, v18.8h, v19.8h\n"
"uzp1 v23.8h, v23.8h, v28.8h\n"
@@ -1979,120 +1983,120 @@ void a64_hybrid_s8qa_mmla_4x16 (
"uzp1 v24.16b, v24.16b, v25.16b\n"
"bge 127f\n"
"tbz x9, #3, 122f\n"
- "str d31, [x27], #0x8\n"
- "str d16, [x22], #0x8\n"
- "str d23, [x21], #0x8\n"
- "str d24, [x20], #0x8\n"
+ "str d31, [x26], #0x8\n"
+ "str d16, [x21], #0x8\n"
+ "str d23, [x20], #0x8\n"
+ "str d24, [x19], #0x8\n"
"tbz x9, #2, 120f\n"
- "st1 { v31.s }[2], [x27], #0x4\n"
- "st1 { v16.s }[2], [x22], #0x4\n"
- "st1 { v23.s }[2], [x21], #0x4\n"
- "st1 { v24.s }[2], [x20], #0x4\n"
+ "st1 { v31.s }[2], [x26], #0x4\n"
+ "st1 { v16.s }[2], [x21], #0x4\n"
+ "st1 { v23.s }[2], [x20], #0x4\n"
+ "st1 { v24.s }[2], [x19], #0x4\n"
"tbz x9, #1, 119f\n"
- "st1 { v31.h }[6], [x27], #0x2\n"
- "st1 { v16.h }[6], [x22], #0x2\n"
- "st1 { v23.h }[6], [x21], #0x2\n"
- "st1 { v24.h }[6], [x20], #0x2\n"
+ "st1 { v31.h }[6], [x26], #0x2\n"
+ "st1 { v16.h }[6], [x21], #0x2\n"
+ "st1 { v23.h }[6], [x20], #0x2\n"
+ "st1 { v24.h }[6], [x19], #0x2\n"
"tbz x9, #0, 126f\n"
- "st1 { v31.b }[14], [x27]\n"
- "st1 { v16.b }[14], [x22]\n"
- "st1 { v23.b }[14], [x21]\n"
- "st1 { v24.b }[14], [x20]\n"
+ "st1 { v31.b }[14], [x26]\n"
+ "st1 { v16.b }[14], [x21]\n"
+ "st1 { v23.b }[14], [x20]\n"
+ "st1 { v24.b }[14], [x19]\n"
"b 126f\n"
"119:" // Height 4: Partial direct writeback: partial_1_12
"tbz x9, #0, 126f\n"
- "st1 { v31.b }[12], [x27]\n"
- "st1 { v16.b }[12], [x22]\n"
- "st1 { v23.b }[12], [x21]\n"
- "st1 { v24.b }[12], [x20]\n"
+ "st1 { v31.b }[12], [x26]\n"
+ "st1 { v16.b }[12], [x21]\n"
+ "st1 { v23.b }[12], [x20]\n"
+ "st1 { v24.b }[12], [x19]\n"
"b 126f\n"
"120:" // Height 4: Partial direct writeback: partial_2_8
"tbz x9, #1, 121f\n"
- "st1 { v31.h }[4], [x27], #0x2\n"
- "st1 { v16.h }[4], [x22], #0x2\n"
- "st1 { v23.h }[4], [x21], #0x2\n"
- "st1 { v24.h }[4], [x20], #0x2\n"
+ "st1 { v31.h }[4], [x26], #0x2\n"
+ "st1 { v16.h }[4], [x21], #0x2\n"
+ "st1 { v23.h }[4], [x20], #0x2\n"
+ "st1 { v24.h }[4], [x19], #0x2\n"
"tbz x9, #0, 126f\n"
- "st1 { v31.b }[10], [x27]\n"
- "st1 { v16.b }[10], [x22]\n"
- "st1 { v23.b }[10], [x21]\n"
- "st1 { v24.b }[10], [x20]\n"
+ "st1 { v31.b }[10], [x26]\n"
+ "st1 { v16.b }[10], [x21]\n"
+ "st1 { v23.b }[10], [x20]\n"
+ "st1 { v24.b }[10], [x19]\n"
"b 126f\n"
"121:" // Height 4: Partial direct writeback: partial_1_8
"tbz x9, #0, 126f\n"
- "st1 { v31.b }[8], [x27]\n"
- "st1 { v16.b }[8], [x22]\n"
- "st1 { v23.b }[8], [x21]\n"
- "st1 { v24.b }[8], [x20]\n"
+ "st1 { v31.b }[8], [x26]\n"
+ "st1 { v16.b }[8], [x21]\n"
+ "st1 { v23.b }[8], [x20]\n"
+ "st1 { v24.b }[8], [x19]\n"
"b 126f\n"
"122:" // Height 4: Partial direct writeback: partial_4_0
"tbz x9, #2, 124f\n"
- "str s31, [x27], #0x4\n"
- "str s16, [x22], #0x4\n"
- "str s23, [x21], #0x4\n"
- "str s24, [x20], #0x4\n"
+ "str s31, [x26], #0x4\n"
+ "str s16, [x21], #0x4\n"
+ "str s23, [x20], #0x4\n"
+ "str s24, [x19], #0x4\n"
"tbz x9, #1, 123f\n"
- "st1 { v31.h }[2], [x27], #0x2\n"
- "st1 { v16.h }[2], [x22], #0x2\n"
- "st1 { v23.h }[2], [x21], #0x2\n"
- "st1 { v24.h }[2], [x20], #0x2\n"
+ "st1 { v31.h }[2], [x26], #0x2\n"
+ "st1 { v16.h }[2], [x21], #0x2\n"
+ "st1 { v23.h }[2], [x20], #0x2\n"
+ "st1 { v24.h }[2], [x19], #0x2\n"
"tbz x9, #0, 126f\n"
- "st1 { v31.b }[6], [x27]\n"
- "st1 { v16.b }[6], [x22]\n"
- "st1 { v23.b }[6], [x21]\n"
- "st1 { v24.b }[6], [x20]\n"
+ "st1 { v31.b }[6], [x26]\n"
+ "st1 { v16.b }[6], [x21]\n"
+ "st1 { v23.b }[6], [x20]\n"
+ "st1 { v24.b }[6], [x19]\n"
"b 126f\n"
"123:" // Height 4: Partial direct writeback: partial_1_4
"tbz x9, #0, 126f\n"
- "st1 { v31.b }[4], [x27]\n"
- "st1 { v16.b }[4], [x22]\n"
- "st1 { v23.b }[4], [x21]\n"
- "st1 { v24.b }[4], [x20]\n"
+ "st1 { v31.b }[4], [x26]\n"
+ "st1 { v16.b }[4], [x21]\n"
+ "st1 { v23.b }[4], [x20]\n"
+ "st1 { v24.b }[4], [x19]\n"
"b 126f\n"
"124:" // Height 4: Partial direct writeback: partial_2_0
"tbz x9, #1, 125f\n"
- "str h31, [x27], #0x2\n"
- "str h16, [x22], #0x2\n"
- "str h23, [x21], #0x2\n"
- "str h24, [x20], #0x2\n"
+ "str h31, [x26], #0x2\n"
+ "str h16, [x21], #0x2\n"
+ "str h23, [x20], #0x2\n"
+ "str h24, [x19], #0x2\n"
"tbz x9, #0, 126f\n"
- "st1 { v31.b }[2], [x27]\n"
- "st1 { v16.b }[2], [x22]\n"
- "st1 { v23.b }[2], [x21]\n"
- "st1 { v24.b }[2], [x20]\n"
+ "st1 { v31.b }[2], [x26]\n"
+ "st1 { v16.b }[2], [x21]\n"
+ "st1 { v23.b }[2], [x20]\n"
+ "st1 { v24.b }[2], [x19]\n"
"b 126f\n"
"125:" // Height 4: Partial direct writeback: partial_1_0
- "str b31, [x27, #0x0]\n"
- "str b16, [x22, #0x0]\n"
- "str b23, [x21, #0x0]\n"
- "str b24, [x20, #0x0]\n"
+ "str b31, [x26, #0x0]\n"
+ "str b16, [x21, #0x0]\n"
+ "str b23, [x20, #0x0]\n"
+ "str b24, [x19, #0x0]\n"
"126:" // Height 4: Partial direct writeback: Done
"b 128f\n"
"127:" // Height 4: Full writeback
- "str q31, [x27, #0x0]\n"
- "add x27, x27, #0x10\n"
- "str q16, [x22, #0x0]\n"
- "str q23, [x21, #0x0]\n"
- "str q24, [x20, #0x0]\n"
+ "str q31, [x26, #0x0]\n"
+ "add x26, x26, #0x10\n"
+ "str q16, [x21, #0x0]\n"
+ "str q23, [x20, #0x0]\n"
+ "str q24, [x19, #0x0]\n"
"128:" // Height 4: Writeback done
"subs x9, x9, #0x10\n"
"bgt 98b\n"
"subs %x[M], %x[M], #0x4\n"
"beq 130f\n"
- "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 129f\n"
- "add x21, x21, #0x4\n"
- "str x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "add x20, x20, #0x4\n"
+ "str x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"b 1b\n"
"129:" // Update direct input
- "mov x20, #0x4\n"
- "madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
+ "mov x19, #0x4\n"
+ "madd %x[input_ptr], x19, x20, %x[input_ptr]\n"
"b 1b\n"
"130:" // Exit
: [M] "+&r" (M), [flags] "+&r" (flags), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
: [args_ptr] "r" (&ka), [b_offset] "I" (offsetof(Requantize32, b_offset)), [c_offset] "I" (offsetof(Requantize32, c_offset)), [col_bias] "r" (col_bias), [maxval] "I" (offsetof(Requantize32, maxval)), [minval] "I" (offsetof(Requantize32, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths)), [per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [qp] "r" (qp)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qs_dot_6x16/a55.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qs_dot_6x16/a55.cpp
index b97b63cdce..ba8a2ccb1d 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qs_dot_6x16/a55.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qs_dot_6x16/a55.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef __aarch64__
@@ -95,11 +95,11 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"cmp %x[M], #0x2\n"
"bgt 55f\n"
"beq 28f\n"
- "mov x6, %x[col_bias]\n"
- "ldr x7, [%x[args_ptr], %[offsetof_multiplier_ptr]]\n"
- "ldr x8, [%x[args_ptr], %[offsetof_shift_ptr]]\n"
- "mov x17, %x[output_ptr]\n"
- "ldr x16, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x6, [%x[args_ptr], %[offsetof_multiplier_ptr]]\n"
+ "ldr x7, [%x[args_ptr], %[offsetof_shift_ptr]]\n"
+ "mov x8, %x[col_bias]\n"
+ "ldr x17, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x16, %x[output_ptr]\n"
"ldr x15, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"2:" // Height 1: Column loop
"movi v8.4s, #0x0\n"
@@ -110,15 +110,15 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"mov x14, #0x0\n"
"4:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"ldr w13, [x20, x14, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 5f\n"
- "ldr x21, [%x[input_ptr], x14, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x12, [x21, #0x0]\n"
+ "ldr x20, [%x[input_ptr], x14, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x12, [x20, #0x0]\n"
"cbnz x14, 6f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x12, x12, x20\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x12, x12, x19\n"
"b 6f\n"
"5:" // Height 1: setup direct input
"mov x12, %x[input_ptr]\n"
@@ -126,90 +126,93 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"cmp x13, #0x10\n"
"blt 9f\n"
"ldr q0, [x12, #0x0]\n"
- "cmp x13, #0x20\n"
"ldr q6, [x15, #0x0]\n"
- "ldr q7, [x15, #0x10]\n"
+ "cmp x13, #0x20\n"
"blt 8f\n"
"7:" // Height 1: Multiply loop: Main loop head
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
+ "ldr d7, [x15, #0x10]\n"
+ "ldr x11, [x15, #0x18]\n"
+ "add x12, x12, #0x10\n"
"ldr d6, [x15, #0x20]\n"
- "ldr x20, [x15, #0x28]\n"
+ "sub x13, x13, #0x10\n"
+ "ldr x10, [x15, #0x28]\n"
+ "cmp x13, #0x20\n"
+ "mov v7.d[1], x11\n"
+ "prfm pldl1keep, [x12, #0x80]\n"
+ "ldr x11, [x15, #0x38]\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
+ "mov v6.d[1], x10\n"
"ldr d7, [x15, #0x30]\n"
- "mov v6.d[1], x20\n"
- "ldr x11, [x15, #0x38]\n"
- "mov v7.d[1], x11\n"
".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n"
"ldr d6, [x15, #0x40]\n"
- "ldr x20, [x15, #0x48]\n"
+ "ldr x10, [x15, #0x48]\n"
+ "mov v7.d[1], x11\n"
+ "ldr x11, [x15, #0x58]\n"
+ "ldr x9, [x12, #0x8]\n"
".inst 0x4f80e0eb // sdot v11.4s, v7.16b, v0.4b[0]\n"
+ "mov v6.d[1], x10\n"
"ldr d7, [x15, #0x50]\n"
- "mov v6.d[1], x20\n"
- "ldr x11, [x15, #0x58]\n"
- "mov v7.d[1], x11\n"
".inst 0x4fa0e0c8 // sdot v8.4s, v6.16b, v0.4b[1]\n"
"ldr d6, [x15, #0x60]\n"
- "ldr x20, [x15, #0x68]\n"
+ "ldr x10, [x15, #0x68]\n"
+ "mov v7.d[1], x11\n"
+ "ldr x11, [x15, #0x78]\n"
".inst 0x4fa0e0e9 // sdot v9.4s, v7.16b, v0.4b[1]\n"
+ "mov v6.d[1], x10\n"
"ldr d7, [x15, #0x70]\n"
- "mov v6.d[1], x20\n"
- "ldr x11, [x15, #0x78]\n"
- "mov v7.d[1], x11\n"
".inst 0x4fa0e0ca // sdot v10.4s, v6.16b, v0.4b[1]\n"
"ldr d6, [x15, #0x80]\n"
- "ldr x20, [x15, #0x88]\n"
+ "ldr x10, [x15, #0x88]\n"
+ "mov v7.d[1], x11\n"
+ "ldr x11, [x15, #0x98]\n"
".inst 0x4fa0e0eb // sdot v11.4s, v7.16b, v0.4b[1]\n"
+ "mov v6.d[1], x10\n"
"ldr d7, [x15, #0x90]\n"
- "mov v6.d[1], x20\n"
- "ldr x11, [x15, #0x98]\n"
- "mov v7.d[1], x11\n"
".inst 0x4f80e8c8 // sdot v8.4s, v6.16b, v0.4b[2]\n"
"ldr d6, [x15, #0xa0]\n"
- "ldr x20, [x15, #0xa8]\n"
+ "ldr x10, [x15, #0xa8]\n"
+ "mov v7.d[1], x11\n"
+ "ldr x11, [x15, #0xb8]\n"
".inst 0x4f80e8e9 // sdot v9.4s, v7.16b, v0.4b[2]\n"
+ "mov v6.d[1], x10\n"
"ldr d7, [x15, #0xb0]\n"
- "mov v6.d[1], x20\n"
- "ldr x11, [x15, #0xb8]\n"
- "mov v7.d[1], x11\n"
".inst 0x4f80e8ca // sdot v10.4s, v6.16b, v0.4b[2]\n"
"ldr d6, [x15, #0xc0]\n"
- "ldr x20, [x15, #0xc8]\n"
+ "ldr x10, [x15, #0xc8]\n"
+ "mov v7.d[1], x11\n"
+ "ldr x11, [x15, #0xd8]\n"
".inst 0x4f80e8eb // sdot v11.4s, v7.16b, v0.4b[2]\n"
+ "mov v6.d[1], x10\n"
"ldr d7, [x15, #0xd0]\n"
- "mov v6.d[1], x20\n"
- "ldr x11, [x15, #0xd8]\n"
- "mov v7.d[1], x11\n"
".inst 0x4fa0e8c8 // sdot v8.4s, v6.16b, v0.4b[3]\n"
"ldr d6, [x15, #0xe0]\n"
- "ldr x20, [x15, #0xe8]\n"
+ "ldr x10, [x15, #0xe8]\n"
+ "mov v7.d[1], x11\n"
+ "ldr x11, [x15, #0xf8]\n"
".inst 0x4fa0e8e9 // sdot v9.4s, v7.16b, v0.4b[3]\n"
+ "mov v6.d[1], x10\n"
"ldr d7, [x15, #0xf0]\n"
- "mov v6.d[1], x20\n"
- "ldr x11, [x15, #0xf8]\n"
- "mov v7.d[1], x11\n"
- "add x12, x12, #0x10\n"
"add x15, x15, #0x100\n"
".inst 0x4fa0e8ca // sdot v10.4s, v6.16b, v0.4b[3]\n"
"ldr d6, [x15, #0x0]\n"
- "ldr x20, [x15, #0x8]\n"
+ "ldr x10, [x15, #0x8]\n"
+ "mov v7.d[1], x11\n"
".inst 0x4fa0e8eb // sdot v11.4s, v7.16b, v0.4b[3]\n"
+ "mov v6.d[1], x10\n"
"ldr d0, [x12, #0x0]\n"
- "sub x13, x13, #0x10\n"
- "ldr d7, [x15, #0x10]\n"
- "cmp x13, #0x20\n"
- "ldr x10, [x12, #0x8]\n"
- "mov v6.d[1], x20\n"
- "ldr x11, [x15, #0x18]\n"
- "mov v0.d[1], x10\n"
- "mov v7.d[1], x11\n"
- "prfm pldl1keep, [x12, #0x80]\n"
+ "mov v0.d[1], x9\n"
"bge 7b\n"
"8:" // Height 1: Multiply loop: Single iteration only
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
+ "ldr q7, [x15, #0x10]\n"
"ldr q6, [x15, #0x20]\n"
+ "sub x13, x13, #0x10\n"
+ "add x12, x12, #0x10\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
- "ldr q7, [x15, #0x30]\n"
+ "prfm pldl1keep, [x12, #0x80]\n"
".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n"
+ "ldr q7, [x15, #0x30]\n"
"ldr q6, [x15, #0x40]\n"
".inst 0x4f80e0eb // sdot v11.4s, v7.16b, v0.4b[0]\n"
"ldr q7, [x15, #0x50]\n"
@@ -233,12 +236,9 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"ldr q6, [x15, #0xe0]\n"
".inst 0x4fa0e8e9 // sdot v9.4s, v7.16b, v0.4b[3]\n"
"ldr q7, [x15, #0xf0]\n"
- "add x12, x12, #0x10\n"
- "sub x13, x13, #0x10\n"
".inst 0x4fa0e8ca // sdot v10.4s, v6.16b, v0.4b[3]\n"
- "prfm pldl1keep, [x12, #0x80]\n"
- ".inst 0x4fa0e8eb // sdot v11.4s, v7.16b, v0.4b[3]\n"
"add x15, x15, #0x100\n"
+ ".inst 0x4fa0e8eb // sdot v11.4s, v7.16b, v0.4b[3]\n"
"9:" // Height 1: Multiply loop: Main loop skip
"cbz x13, 14f\n"
"cmp x13, #0x4\n"
@@ -247,18 +247,18 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"ldr s0, [x12], #0x4\n"
"sub x13, x13, #0x4\n"
"ldr q6, [x15, #0x0]\n"
- ".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
+ "cmp x13, #0x4\n"
"ldr q7, [x15, #0x10]\n"
- ".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
+ ".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
"ldr q6, [x15, #0x20]\n"
- "cmp x13, #0x4\n"
+ ".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
"ldr q7, [x15, #0x30]\n"
+ "add x15, x15, #0x40\n"
".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n"
".inst 0x4f80e0eb // sdot v11.4s, v7.16b, v0.4b[0]\n"
- "add x15, x15, #0x40\n"
"bge 10b\n"
- "11:" // Height 1: Multiply loop: Skip odd blocks
"cbz x13, 14f\n"
+ "11:" // Height 1: Multiply loop: Skip odd blocks
"tbz x13, #1, 12f\n"
"ldr h0, [x12], #0x2\n"
"tbz x13, #0, 13f\n"
@@ -268,46 +268,46 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"ldr b0, [x12, #0x0]\n"
"13:" // Height 1: Multiply loop: Ragged operand read: Done
"ldr q6, [x15, #0x0]\n"
- ".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
"ldr q7, [x15, #0x10]\n"
- ".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
+ ".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
"ldr q6, [x15, #0x20]\n"
- ".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n"
+ ".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
"ldr q7, [x15, #0x30]\n"
- ".inst 0x4f80e0eb // sdot v11.4s, v7.16b, v0.4b[0]\n"
"add x15, x15, #0x40\n"
+ ".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n"
+ ".inst 0x4f80e0eb // sdot v11.4s, v7.16b, v0.4b[0]\n"
"14:" // Height 1: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
"add x14, x14, #0x1\n"
- "cmp x14, x20\n"
+ "cmp x14, x19\n"
"bne 4b\n"
- "ldr q0, [x6, #0x0]\n"
- "add v8.4s, v8.4s, v0.4s\n"
- "ldr q1, [x6, #0x10]\n"
- "add v9.4s, v9.4s, v1.4s\n"
- "ldr q2, [x6, #0x20]\n"
- "add v10.4s, v10.4s, v2.4s\n"
- "ldr q3, [x6, #0x30]\n"
- "add v11.4s, v11.4s, v3.4s\n"
- "prfm pstl1keep, [x17, #0x0]\n"
- "add x6, x6, #0x40\n"
- "tbz %x[flags], #4, 15f\n"
+ "prfm pstl1keep, [x16, #0x0]\n"
"ldr q0, [x8, #0x0]\n"
- "ldr q4, [x7, #0x0]\n"
"ldr q1, [x8, #0x10]\n"
- "ldr q5, [x7, #0x10]\n"
"ldr q2, [x8, #0x20]\n"
- "ldr q6, [x7, #0x20]\n"
+ "add v8.4s, v8.4s, v0.4s\n"
"ldr q3, [x8, #0x30]\n"
+ "add v9.4s, v9.4s, v1.4s\n"
"add x8, x8, #0x40\n"
- "ldr q7, [x7, #0x30]\n"
+ "add v10.4s, v10.4s, v2.4s\n"
+ "add v11.4s, v11.4s, v3.4s\n"
+ "tbz %x[flags], #4, 15f\n"
+ "ldr q0, [x7, #0x0]\n"
+ "ldr q4, [x6, #0x0]\n"
+ "ldr q1, [x7, #0x10]\n"
+ "ldr q5, [x6, #0x10]\n"
+ "ldr q2, [x7, #0x20]\n"
+ "ldr q6, [x6, #0x20]\n"
+ "ldr q3, [x7, #0x30]\n"
"add x7, x7, #0x40\n"
+ "ldr q7, [x6, #0x30]\n"
+ "add x6, x6, #0x40\n"
"b 16f\n"
"15:" // Height 1: per layer parameters
"add x25, %x[qp], %[per_layer_right_shift]\n"
+ "add x24, %x[qp], %[per_layer_mul]\n"
"ld1r { v0.4s }, [x25]\n"
- "add x25, %x[qp], %[per_layer_mul]\n"
- "ld1r { v4.4s }, [x25]\n"
+ "ld1r { v4.4s }, [x24]\n"
"mov v1.16b, v0.16b\n"
"mov v5.16b, v4.16b\n"
"mov v2.16b, v0.16b\n"
@@ -337,87 +337,87 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"srshl v9.4s, v9.4s, v1.4s\n"
"srshl v10.4s, v10.4s, v2.4s\n"
"srshl v11.4s, v11.4s, v3.4s\n"
- "add x25, %x[qp], %[c_offset]\n"
- "ld1r { v4.4s }, [x25]\n"
+ "add x24, %x[qp], %[c_offset]\n"
+ "add x25, %x[qp], %[minval]\n"
+ "ld1r { v4.4s }, [x24]\n"
+ "add x24, %x[qp], %[maxval]\n"
+ "ld1r { v5.4s }, [x25]\n"
+ "cmp x17, #0x10\n"
+ "ld1r { v6.4s }, [x24]\n"
"add v8.4s, v8.4s, v4.4s\n"
"add v9.4s, v9.4s, v4.4s\n"
"add v10.4s, v10.4s, v4.4s\n"
"add v11.4s, v11.4s, v4.4s\n"
- "add x25, %x[qp], %[maxval]\n"
- "ld1r { v6.4s }, [x25]\n"
"smin v8.4s, v8.4s, v6.4s\n"
"smin v9.4s, v9.4s, v6.4s\n"
"smin v10.4s, v10.4s, v6.4s\n"
"smin v11.4s, v11.4s, v6.4s\n"
- "add x25, %x[qp], %[minval]\n"
- "ld1r { v5.4s }, [x25]\n"
"smax v8.4s, v8.4s, v5.4s\n"
"smax v9.4s, v9.4s, v5.4s\n"
"smax v10.4s, v10.4s, v5.4s\n"
"smax v11.4s, v11.4s, v5.4s\n"
"uzp1 v8.8h, v8.8h, v9.8h\n"
"uzp1 v9.8h, v10.8h, v11.8h\n"
- "cmp x16, #0x10\n"
"uzp1 v8.16b, v8.16b, v9.16b\n"
"bge 26f\n"
- "tbz x16, #3, 21f\n"
- "str d8, [x17], #0x8\n"
- "tbz x16, #2, 19f\n"
- "st1 { v8.s }[2], [x17], #0x4\n"
- "tbz x16, #1, 18f\n"
- "st1 { v8.h }[6], [x17], #0x2\n"
- "tbz x16, #0, 25f\n"
- "st1 { v8.b }[14], [x17]\n"
+ "tbz x17, #3, 21f\n"
+ "str d8, [x16], #0x8\n"
+ "tbz x17, #2, 19f\n"
+ "st1 { v8.s }[2], [x16], #0x4\n"
+ "tbz x17, #1, 18f\n"
+ "st1 { v8.h }[6], [x16], #0x2\n"
+ "tbz x17, #0, 25f\n"
+ "st1 { v8.b }[14], [x16]\n"
"b 25f\n"
"18:" // Height 1: Partial direct writeback: partial_1_12
- "tbz x16, #0, 25f\n"
- "st1 { v8.b }[12], [x17]\n"
+ "tbz x17, #0, 25f\n"
+ "st1 { v8.b }[12], [x16]\n"
"b 25f\n"
"19:" // Height 1: Partial direct writeback: partial_2_8
- "tbz x16, #1, 20f\n"
- "st1 { v8.h }[4], [x17], #0x2\n"
- "tbz x16, #0, 25f\n"
- "st1 { v8.b }[10], [x17]\n"
+ "tbz x17, #1, 20f\n"
+ "st1 { v8.h }[4], [x16], #0x2\n"
+ "tbz x17, #0, 25f\n"
+ "st1 { v8.b }[10], [x16]\n"
"b 25f\n"
"20:" // Height 1: Partial direct writeback: partial_1_8
- "tbz x16, #0, 25f\n"
- "st1 { v8.b }[8], [x17]\n"
+ "tbz x17, #0, 25f\n"
+ "st1 { v8.b }[8], [x16]\n"
"b 25f\n"
"21:" // Height 1: Partial direct writeback: partial_4_0
- "tbz x16, #2, 23f\n"
- "str s8, [x17], #0x4\n"
- "tbz x16, #1, 22f\n"
- "st1 { v8.h }[2], [x17], #0x2\n"
- "tbz x16, #0, 25f\n"
- "st1 { v8.b }[6], [x17]\n"
+ "tbz x17, #2, 23f\n"
+ "str s8, [x16], #0x4\n"
+ "tbz x17, #1, 22f\n"
+ "st1 { v8.h }[2], [x16], #0x2\n"
+ "tbz x17, #0, 25f\n"
+ "st1 { v8.b }[6], [x16]\n"
"b 25f\n"
"22:" // Height 1: Partial direct writeback: partial_1_4
- "tbz x16, #0, 25f\n"
- "st1 { v8.b }[4], [x17]\n"
+ "tbz x17, #0, 25f\n"
+ "st1 { v8.b }[4], [x16]\n"
"b 25f\n"
"23:" // Height 1: Partial direct writeback: partial_2_0
- "tbz x16, #1, 24f\n"
- "str h8, [x17], #0x2\n"
- "tbz x16, #0, 25f\n"
- "st1 { v8.b }[2], [x17]\n"
+ "tbz x17, #1, 24f\n"
+ "str h8, [x16], #0x2\n"
+ "tbz x17, #0, 25f\n"
+ "st1 { v8.b }[2], [x16]\n"
"b 25f\n"
"24:" // Height 1: Partial direct writeback: partial_1_0
- "str b8, [x17, #0x0]\n"
+ "str b8, [x16, #0x0]\n"
"25:" // Height 1: Partial direct writeback: Done
"b 27f\n"
"26:" // Height 1: Full writeback
- "str q8, [x17, #0x0]\n"
- "add x17, x17, #0x10\n"
+ "str q8, [x16, #0x0]\n"
+ "add x16, x16, #0x10\n"
"27:" // Height 1: Writeback done
- "subs x16, x16, #0x10\n"
+ "subs x17, x17, #0x10\n"
"bgt 2b\n"
"b 164f\n"
"28:" // Height 2
- "mov x6, %x[col_bias]\n"
- "ldr x7, [%x[args_ptr], %[offsetof_multiplier_ptr]]\n"
- "ldr x8, [%x[args_ptr], %[offsetof_shift_ptr]]\n"
- "mov x17, %x[output_ptr]\n"
- "ldr x16, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x6, [%x[args_ptr], %[offsetof_multiplier_ptr]]\n"
+ "mov x8, %x[col_bias]\n"
+ "ldr x7, [%x[args_ptr], %[offsetof_shift_ptr]]\n"
+ "mov x16, %x[output_ptr]\n"
+ "ldr x17, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x15, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"29:" // Height 2: Column loop
"movi v8.4s, #0x0\n"
@@ -432,144 +432,144 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"mov x14, #0x0\n"
"31:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"ldr w13, [x20, x14, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 32f\n"
- "ldr x21, [%x[input_ptr], x14, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x12, [x21, #0x0]\n"
- "ldr x9, [x21, #0x8]\n"
+ "ldr x20, [%x[input_ptr], x14, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x12, [x20, #0x0]\n"
+ "ldr x28, [x20, #0x8]\n"
"cbnz x14, 33f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x12, x12, x20\n"
- "add x9, x9, x20\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x12, x12, x19\n"
+ "add x28, x28, x19\n"
"b 33f\n"
"32:" // Height 2: setup direct input
"mov x12, %x[input_ptr]\n"
- "add x9, x12, x20\n"
+ "add x28, x12, x19\n"
"33:" // Height 2: input setup done
"cmp x13, #0x10\n"
"blt 36f\n"
"ldr q0, [x12, #0x0]\n"
+ "ldr q1, [x28, #0x0]\n"
"cmp x13, #0x20\n"
- "ldr q1, [x9, #0x0]\n"
"ldr q6, [x15, #0x0]\n"
- "ldr q7, [x15, #0x10]\n"
"blt 35f\n"
"34:" // Height 2: Multiply loop: Main loop head
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
- "ldr x20, [x15, #0x28]\n"
+ "ldr d7, [x15, #0x10]\n"
".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n"
+ "ldr x11, [x15, #0x18]\n"
"ldr d6, [x15, #0x20]\n"
+ "add x12, x12, #0x10\n"
+ "ldr x10, [x15, #0x28]\n"
+ "add x28, x28, #0x10\n"
+ "mov v7.d[1], x11\n"
+ "prfm pldl1keep, [x12, #0x80]\n"
+ "prfm pldl1keep, [x28, #0x80]\n"
+ "sub x13, x13, #0x10\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
- "ldr x11, [x15, #0x38]\n"
+ "mov v6.d[1], x10\n"
".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n"
"ldr d7, [x15, #0x30]\n"
- "mov v6.d[1], x20\n"
".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n"
- "mov v7.d[1], x11\n"
+ "ldr x11, [x15, #0x38]\n"
".inst 0x4f81e0ce // sdot v14.4s, v6.16b, v1.4b[0]\n"
"ldr d6, [x15, #0x40]\n"
+ "ldr x10, [x15, #0x48]\n"
+ "cmp x13, #0x20\n"
+ "mov v7.d[1], x11\n"
+ "ldr x11, [x15, #0x58]\n"
+ "ldr x9, [x12, #0x8]\n"
".inst 0x4f80e0eb // sdot v11.4s, v7.16b, v0.4b[0]\n"
- "ldr x20, [x15, #0x48]\n"
+ "mov v6.d[1], x10\n"
".inst 0x4f81e0ef // sdot v15.4s, v7.16b, v1.4b[0]\n"
"ldr d7, [x15, #0x50]\n"
- "mov v6.d[1], x20\n"
- "ldr x11, [x15, #0x58]\n"
- "mov v7.d[1], x11\n"
".inst 0x4fa0e0c8 // sdot v8.4s, v6.16b, v0.4b[1]\n"
- "ldr x20, [x15, #0x68]\n"
+ "ldr x10, [x15, #0x68]\n"
".inst 0x4fa1e0cc // sdot v12.4s, v6.16b, v1.4b[1]\n"
"ldr d6, [x15, #0x60]\n"
- ".inst 0x4fa0e0e9 // sdot v9.4s, v7.16b, v0.4b[1]\n"
+ "mov v7.d[1], x11\n"
"ldr x11, [x15, #0x78]\n"
+ "ldr x27, [x28, #0x8]\n"
+ ".inst 0x4fa0e0e9 // sdot v9.4s, v7.16b, v0.4b[1]\n"
+ "mov v6.d[1], x10\n"
".inst 0x4fa1e0ed // sdot v13.4s, v7.16b, v1.4b[1]\n"
"ldr d7, [x15, #0x70]\n"
- "mov v6.d[1], x20\n"
".inst 0x4fa0e0ca // sdot v10.4s, v6.16b, v0.4b[1]\n"
- "mov v7.d[1], x11\n"
+ "ldr x10, [x15, #0x88]\n"
".inst 0x4fa1e0ce // sdot v14.4s, v6.16b, v1.4b[1]\n"
"ldr d6, [x15, #0x80]\n"
+ "mov v7.d[1], x11\n"
+ "ldr x11, [x15, #0x98]\n"
".inst 0x4fa0e0eb // sdot v11.4s, v7.16b, v0.4b[1]\n"
- "ldr x20, [x15, #0x88]\n"
+ "mov v6.d[1], x10\n"
".inst 0x4fa1e0ef // sdot v15.4s, v7.16b, v1.4b[1]\n"
"ldr d7, [x15, #0x90]\n"
- "mov v6.d[1], x20\n"
- "ldr x11, [x15, #0x98]\n"
- "mov v7.d[1], x11\n"
".inst 0x4f80e8c8 // sdot v8.4s, v6.16b, v0.4b[2]\n"
- "ldr x20, [x15, #0xa8]\n"
+ "ldr x10, [x15, #0xa8]\n"
".inst 0x4f81e8cc // sdot v12.4s, v6.16b, v1.4b[2]\n"
"ldr d6, [x15, #0xa0]\n"
- ".inst 0x4f80e8e9 // sdot v9.4s, v7.16b, v0.4b[2]\n"
+ "mov v7.d[1], x11\n"
"ldr x11, [x15, #0xb8]\n"
+ ".inst 0x4f80e8e9 // sdot v9.4s, v7.16b, v0.4b[2]\n"
+ "mov v6.d[1], x10\n"
".inst 0x4f81e8ed // sdot v13.4s, v7.16b, v1.4b[2]\n"
"ldr d7, [x15, #0xb0]\n"
- "mov v6.d[1], x20\n"
".inst 0x4f80e8ca // sdot v10.4s, v6.16b, v0.4b[2]\n"
- "mov v7.d[1], x11\n"
+ "ldr x10, [x15, #0xc8]\n"
".inst 0x4f81e8ce // sdot v14.4s, v6.16b, v1.4b[2]\n"
"ldr d6, [x15, #0xc0]\n"
+ "mov v7.d[1], x11\n"
+ "ldr x11, [x15, #0xd8]\n"
".inst 0x4f80e8eb // sdot v11.4s, v7.16b, v0.4b[2]\n"
- "ldr x20, [x15, #0xc8]\n"
+ "mov v6.d[1], x10\n"
".inst 0x4f81e8ef // sdot v15.4s, v7.16b, v1.4b[2]\n"
"ldr d7, [x15, #0xd0]\n"
- "mov v6.d[1], x20\n"
- "ldr x11, [x15, #0xd8]\n"
- "mov v7.d[1], x11\n"
".inst 0x4fa0e8c8 // sdot v8.4s, v6.16b, v0.4b[3]\n"
- "ldr x20, [x15, #0xe8]\n"
+ "ldr x10, [x15, #0xe8]\n"
".inst 0x4fa1e8cc // sdot v12.4s, v6.16b, v1.4b[3]\n"
"ldr d6, [x15, #0xe0]\n"
- ".inst 0x4fa0e8e9 // sdot v9.4s, v7.16b, v0.4b[3]\n"
+ "mov v7.d[1], x11\n"
"ldr x11, [x15, #0xf8]\n"
+ ".inst 0x4fa0e8e9 // sdot v9.4s, v7.16b, v0.4b[3]\n"
+ "mov v6.d[1], x10\n"
".inst 0x4fa1e8ed // sdot v13.4s, v7.16b, v1.4b[3]\n"
"ldr d7, [x15, #0xf0]\n"
- "mov v6.d[1], x20\n"
- "add x12, x12, #0x10\n"
- "mov v7.d[1], x11\n"
- "add x9, x9, #0x10\n"
- "add x15, x15, #0x100\n"
".inst 0x4fa0e8ca // sdot v10.4s, v6.16b, v0.4b[3]\n"
+ "add x15, x15, #0x100\n"
".inst 0x4fa1e8ce // sdot v14.4s, v6.16b, v1.4b[3]\n"
"ldr d6, [x15, #0x0]\n"
- "ldr x20, [x15, #0x8]\n"
+ "mov v7.d[1], x11\n"
+ "ldr x10, [x15, #0x8]\n"
".inst 0x4fa0e8eb // sdot v11.4s, v7.16b, v0.4b[3]\n"
"ldr d0, [x12, #0x0]\n"
".inst 0x4fa1e8ef // sdot v15.4s, v7.16b, v1.4b[3]\n"
- "ldr d1, [x9, #0x0]\n"
- "sub x13, x13, #0x10\n"
- "ldr d7, [x15, #0x10]\n"
- "cmp x13, #0x20\n"
- "ldr x10, [x12, #0x8]\n"
- "mov v6.d[1], x20\n"
- "ldr x28, [x9, #0x8]\n"
- "mov v0.d[1], x10\n"
- "ldr x11, [x15, #0x18]\n"
- "mov v1.d[1], x28\n"
- "prfm pldl1keep, [x12, #0x80]\n"
- "mov v7.d[1], x11\n"
- "prfm pldl1keep, [x9, #0x80]\n"
+ "mov v6.d[1], x10\n"
+ "ldr d1, [x28, #0x0]\n"
+ "mov v0.d[1], x9\n"
+ "mov v1.d[1], x27\n"
"bge 34b\n"
"35:" // Height 2: Multiply loop: Single iteration only
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
- "add x12, x12, #0x10\n"
+ "ldr q7, [x15, #0x10]\n"
".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n"
"ldr q6, [x15, #0x20]\n"
+ "sub x13, x13, #0x10\n"
+ "add x12, x12, #0x10\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
- "add x9, x9, #0x10\n"
+ "prfm pldl1keep, [x12, #0x80]\n"
".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n"
"ldr q7, [x15, #0x30]\n"
".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n"
- "sub x13, x13, #0x10\n"
+ "add x28, x28, #0x10\n"
".inst 0x4f81e0ce // sdot v14.4s, v6.16b, v1.4b[0]\n"
- "ldr q6, [x15, #0x40]\n"
+ "prfm pldl1keep, [x28, #0x80]\n"
".inst 0x4f80e0eb // sdot v11.4s, v7.16b, v0.4b[0]\n"
- "prfm pldl1keep, [x12, #0x80]\n"
+ "ldr q6, [x15, #0x40]\n"
".inst 0x4f81e0ef // sdot v15.4s, v7.16b, v1.4b[0]\n"
"ldr q7, [x15, #0x50]\n"
".inst 0x4fa0e0c8 // sdot v8.4s, v6.16b, v0.4b[1]\n"
- "prfm pldl1keep, [x9, #0x80]\n"
".inst 0x4fa1e0cc // sdot v12.4s, v6.16b, v1.4b[1]\n"
"ldr q6, [x15, #0x60]\n"
".inst 0x4fa0e0e9 // sdot v9.4s, v7.16b, v0.4b[1]\n"
@@ -611,11 +611,11 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"37:" // Height 2: Multiply loop: Odd block loop
"ldr s0, [x12], #0x4\n"
"sub x13, x13, #0x4\n"
- "ldr s1, [x9], #0x4\n"
+ "ldr s1, [x28], #0x4\n"
"cmp x13, #0x4\n"
"ldr q6, [x15, #0x0]\n"
- ".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
"ldr q7, [x15, #0x10]\n"
+ ".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n"
"ldr q6, [x15, #0x20]\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
@@ -627,22 +627,22 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
".inst 0x4f80e0eb // sdot v11.4s, v7.16b, v0.4b[0]\n"
".inst 0x4f81e0ef // sdot v15.4s, v7.16b, v1.4b[0]\n"
"bge 37b\n"
- "38:" // Height 2: Multiply loop: Skip odd blocks
"cbz x13, 41f\n"
+ "38:" // Height 2: Multiply loop: Skip odd blocks
"tbz x13, #1, 39f\n"
"ldr h0, [x12], #0x2\n"
- "ldr h1, [x9], #0x2\n"
+ "ldr h1, [x28], #0x2\n"
"tbz x13, #0, 40f\n"
"ld1 { v0.b }[2], [x12]\n"
- "ld1 { v1.b }[2], [x9]\n"
+ "ld1 { v1.b }[2], [x28]\n"
"b 40f\n"
"39:" // Height 2: Multiply loop: Ragged operand read: partial_1_0
"ldr b0, [x12, #0x0]\n"
- "ldr b1, [x9, #0x0]\n"
+ "ldr b1, [x28, #0x0]\n"
"40:" // Height 2: Multiply loop: Ragged operand read: Done
"ldr q6, [x15, #0x0]\n"
- ".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
"ldr q7, [x15, #0x10]\n"
+ ".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n"
"ldr q6, [x15, #0x20]\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
@@ -654,44 +654,44 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
".inst 0x4f80e0eb // sdot v11.4s, v7.16b, v0.4b[0]\n"
".inst 0x4f81e0ef // sdot v15.4s, v7.16b, v1.4b[0]\n"
"41:" // Height 2: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
"add x14, x14, #0x1\n"
- "cmp x14, x20\n"
+ "cmp x14, x19\n"
"bne 31b\n"
- "ldr q0, [x6, #0x0]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "prfm pstl1keep, [x16, #0x0]\n"
+ "ldr q0, [x8, #0x0]\n"
+ "ldr q1, [x8, #0x10]\n"
+ "add x23, x16, x19\n"
+ "prfm pstl1keep, [x23, #0x0]\n"
"add v8.4s, v8.4s, v0.4s\n"
- "ldr q1, [x6, #0x10]\n"
- "add v9.4s, v9.4s, v1.4s\n"
- "ldr q2, [x6, #0x20]\n"
- "add v10.4s, v10.4s, v2.4s\n"
- "ldr q3, [x6, #0x30]\n"
- "add v11.4s, v11.4s, v3.4s\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x17, x20\n"
- "prfm pstl1keep, [x17, #0x0]\n"
"add v12.4s, v12.4s, v0.4s\n"
- "prfm pstl1keep, [x24, #0x0]\n"
+ "add v9.4s, v9.4s, v1.4s\n"
"add v13.4s, v13.4s, v1.4s\n"
- "add v14.4s, v14.4s, v2.4s\n"
- "add v15.4s, v15.4s, v3.4s\n"
- "add x6, x6, #0x40\n"
- "tbz %x[flags], #4, 42f\n"
- "ldr q0, [x8, #0x0]\n"
- "ldr q4, [x7, #0x0]\n"
- "ldr q1, [x8, #0x10]\n"
- "ldr q5, [x7, #0x10]\n"
"ldr q2, [x8, #0x20]\n"
- "ldr q6, [x7, #0x20]\n"
"ldr q3, [x8, #0x30]\n"
"add x8, x8, #0x40\n"
- "ldr q7, [x7, #0x30]\n"
+ "add v10.4s, v10.4s, v2.4s\n"
+ "add v14.4s, v14.4s, v2.4s\n"
+ "add v11.4s, v11.4s, v3.4s\n"
+ "add v15.4s, v15.4s, v3.4s\n"
+ "tbz %x[flags], #4, 42f\n"
+ "ldr q0, [x7, #0x0]\n"
+ "ldr q4, [x6, #0x0]\n"
+ "ldr q1, [x7, #0x10]\n"
+ "ldr q5, [x6, #0x10]\n"
+ "ldr q2, [x7, #0x20]\n"
+ "ldr q6, [x6, #0x20]\n"
+ "ldr q3, [x7, #0x30]\n"
"add x7, x7, #0x40\n"
+ "ldr q7, [x6, #0x30]\n"
+ "add x6, x6, #0x40\n"
"b 43f\n"
"42:" // Height 2: per layer parameters
"add x25, %x[qp], %[per_layer_right_shift]\n"
+ "add x24, %x[qp], %[per_layer_mul]\n"
"ld1r { v0.4s }, [x25]\n"
- "add x25, %x[qp], %[per_layer_mul]\n"
- "ld1r { v4.4s }, [x25]\n"
+ "ld1r { v4.4s }, [x24]\n"
"mov v1.16b, v0.16b\n"
"mov v5.16b, v4.16b\n"
"mov v2.16b, v0.16b\n"
@@ -741,8 +741,13 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"srshl v13.4s, v13.4s, v1.4s\n"
"srshl v14.4s, v14.4s, v2.4s\n"
"srshl v15.4s, v15.4s, v3.4s\n"
- "add x25, %x[qp], %[c_offset]\n"
- "ld1r { v4.4s }, [x25]\n"
+ "add x24, %x[qp], %[c_offset]\n"
+ "add x25, %x[qp], %[minval]\n"
+ "ld1r { v4.4s }, [x24]\n"
+ "add x24, %x[qp], %[maxval]\n"
+ "ld1r { v5.4s }, [x25]\n"
+ "cmp x17, #0x10\n"
+ "ld1r { v6.4s }, [x24]\n"
"add v8.4s, v8.4s, v4.4s\n"
"add v9.4s, v9.4s, v4.4s\n"
"add v10.4s, v10.4s, v4.4s\n"
@@ -751,8 +756,6 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"add v13.4s, v13.4s, v4.4s\n"
"add v14.4s, v14.4s, v4.4s\n"
"add v15.4s, v15.4s, v4.4s\n"
- "add x25, %x[qp], %[maxval]\n"
- "ld1r { v6.4s }, [x25]\n"
"smin v8.4s, v8.4s, v6.4s\n"
"smin v9.4s, v9.4s, v6.4s\n"
"smin v10.4s, v10.4s, v6.4s\n"
@@ -761,8 +764,6 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"smin v13.4s, v13.4s, v6.4s\n"
"smin v14.4s, v14.4s, v6.4s\n"
"smin v15.4s, v15.4s, v6.4s\n"
- "add x25, %x[qp], %[minval]\n"
- "ld1r { v5.4s }, [x25]\n"
"smax v8.4s, v8.4s, v5.4s\n"
"smax v9.4s, v9.4s, v5.4s\n"
"smax v10.4s, v10.4s, v5.4s\n"
@@ -775,84 +776,83 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"uzp1 v9.8h, v10.8h, v11.8h\n"
"uzp1 v12.8h, v12.8h, v13.8h\n"
"uzp1 v13.8h, v14.8h, v15.8h\n"
- "cmp x16, #0x10\n"
"uzp1 v8.16b, v8.16b, v9.16b\n"
"uzp1 v12.16b, v12.16b, v13.16b\n"
"bge 53f\n"
- "tbz x16, #3, 48f\n"
- "str d8, [x17], #0x8\n"
- "str d12, [x24], #0x8\n"
- "tbz x16, #2, 46f\n"
- "st1 { v8.s }[2], [x17], #0x4\n"
- "st1 { v12.s }[2], [x24], #0x4\n"
- "tbz x16, #1, 45f\n"
- "st1 { v8.h }[6], [x17], #0x2\n"
- "st1 { v12.h }[6], [x24], #0x2\n"
- "tbz x16, #0, 52f\n"
- "st1 { v8.b }[14], [x17]\n"
- "st1 { v12.b }[14], [x24]\n"
+ "tbz x17, #3, 48f\n"
+ "str d8, [x16], #0x8\n"
+ "str d12, [x23], #0x8\n"
+ "tbz x17, #2, 46f\n"
+ "st1 { v8.s }[2], [x16], #0x4\n"
+ "st1 { v12.s }[2], [x23], #0x4\n"
+ "tbz x17, #1, 45f\n"
+ "st1 { v8.h }[6], [x16], #0x2\n"
+ "st1 { v12.h }[6], [x23], #0x2\n"
+ "tbz x17, #0, 52f\n"
+ "st1 { v8.b }[14], [x16]\n"
+ "st1 { v12.b }[14], [x23]\n"
"b 52f\n"
"45:" // Height 2: Partial direct writeback: partial_1_12
- "tbz x16, #0, 52f\n"
- "st1 { v8.b }[12], [x17]\n"
- "st1 { v12.b }[12], [x24]\n"
+ "tbz x17, #0, 52f\n"
+ "st1 { v8.b }[12], [x16]\n"
+ "st1 { v12.b }[12], [x23]\n"
"b 52f\n"
"46:" // Height 2: Partial direct writeback: partial_2_8
- "tbz x16, #1, 47f\n"
- "st1 { v8.h }[4], [x17], #0x2\n"
- "st1 { v12.h }[4], [x24], #0x2\n"
- "tbz x16, #0, 52f\n"
- "st1 { v8.b }[10], [x17]\n"
- "st1 { v12.b }[10], [x24]\n"
+ "tbz x17, #1, 47f\n"
+ "st1 { v8.h }[4], [x16], #0x2\n"
+ "st1 { v12.h }[4], [x23], #0x2\n"
+ "tbz x17, #0, 52f\n"
+ "st1 { v8.b }[10], [x16]\n"
+ "st1 { v12.b }[10], [x23]\n"
"b 52f\n"
"47:" // Height 2: Partial direct writeback: partial_1_8
- "tbz x16, #0, 52f\n"
- "st1 { v8.b }[8], [x17]\n"
- "st1 { v12.b }[8], [x24]\n"
+ "tbz x17, #0, 52f\n"
+ "st1 { v8.b }[8], [x16]\n"
+ "st1 { v12.b }[8], [x23]\n"
"b 52f\n"
"48:" // Height 2: Partial direct writeback: partial_4_0
- "tbz x16, #2, 50f\n"
- "str s8, [x17], #0x4\n"
- "str s12, [x24], #0x4\n"
- "tbz x16, #1, 49f\n"
- "st1 { v8.h }[2], [x17], #0x2\n"
- "st1 { v12.h }[2], [x24], #0x2\n"
- "tbz x16, #0, 52f\n"
- "st1 { v8.b }[6], [x17]\n"
- "st1 { v12.b }[6], [x24]\n"
+ "tbz x17, #2, 50f\n"
+ "str s8, [x16], #0x4\n"
+ "str s12, [x23], #0x4\n"
+ "tbz x17, #1, 49f\n"
+ "st1 { v8.h }[2], [x16], #0x2\n"
+ "st1 { v12.h }[2], [x23], #0x2\n"
+ "tbz x17, #0, 52f\n"
+ "st1 { v8.b }[6], [x16]\n"
+ "st1 { v12.b }[6], [x23]\n"
"b 52f\n"
"49:" // Height 2: Partial direct writeback: partial_1_4
- "tbz x16, #0, 52f\n"
- "st1 { v8.b }[4], [x17]\n"
- "st1 { v12.b }[4], [x24]\n"
+ "tbz x17, #0, 52f\n"
+ "st1 { v8.b }[4], [x16]\n"
+ "st1 { v12.b }[4], [x23]\n"
"b 52f\n"
"50:" // Height 2: Partial direct writeback: partial_2_0
- "tbz x16, #1, 51f\n"
- "str h8, [x17], #0x2\n"
- "str h12, [x24], #0x2\n"
- "tbz x16, #0, 52f\n"
- "st1 { v8.b }[2], [x17]\n"
- "st1 { v12.b }[2], [x24]\n"
+ "tbz x17, #1, 51f\n"
+ "str h8, [x16], #0x2\n"
+ "str h12, [x23], #0x2\n"
+ "tbz x17, #0, 52f\n"
+ "st1 { v8.b }[2], [x16]\n"
+ "st1 { v12.b }[2], [x23]\n"
"b 52f\n"
"51:" // Height 2: Partial direct writeback: partial_1_0
- "str b8, [x17, #0x0]\n"
- "str b12, [x24, #0x0]\n"
+ "str b8, [x16, #0x0]\n"
+ "str b12, [x23, #0x0]\n"
"52:" // Height 2: Partial direct writeback: Done
"b 54f\n"
"53:" // Height 2: Full writeback
- "str q8, [x17, #0x0]\n"
- "add x17, x17, #0x10\n"
- "str q12, [x24, #0x0]\n"
+ "str q8, [x16, #0x0]\n"
+ "add x16, x16, #0x10\n"
+ "str q12, [x23, #0x0]\n"
"54:" // Height 2: Writeback done
- "subs x16, x16, #0x10\n"
+ "subs x17, x17, #0x10\n"
"bgt 29b\n"
"b 164f\n"
"55:" // Height 3
- "mov x6, %x[col_bias]\n"
- "ldr x7, [%x[args_ptr], %[offsetof_multiplier_ptr]]\n"
- "ldr x8, [%x[args_ptr], %[offsetof_shift_ptr]]\n"
- "mov x17, %x[output_ptr]\n"
- "ldr x16, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x6, [%x[args_ptr], %[offsetof_multiplier_ptr]]\n"
+ "mov x8, %x[col_bias]\n"
+ "ldr x7, [%x[args_ptr], %[offsetof_shift_ptr]]\n"
+ "mov x16, %x[output_ptr]\n"
+ "ldr x17, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x15, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"56:" // Height 3: Column loop
"movi v8.4s, #0x0\n"
@@ -871,172 +871,172 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"mov x14, #0x0\n"
"58:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"ldr w13, [x20, x14, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 59f\n"
- "ldr x21, [%x[input_ptr], x14, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x12, [x21, #0x0]\n"
- "ldr x9, [x21, #0x8]\n"
- "ldr x27, [x21, #0x10]\n"
+ "ldr x20, [%x[input_ptr], x14, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x12, [x20, #0x0]\n"
+ "ldr x28, [x20, #0x8]\n"
+ "ldr x26, [x20, #0x10]\n"
"cbnz x14, 60f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x12, x12, x20\n"
- "add x9, x9, x20\n"
- "add x27, x27, x20\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x12, x12, x19\n"
+ "add x28, x28, x19\n"
+ "add x26, x26, x19\n"
"b 60f\n"
"59:" // Height 3: setup direct input
"mov x12, %x[input_ptr]\n"
- "add x9, x12, x20\n"
- "add x27, x9, x20\n"
+ "add x28, x12, x19\n"
+ "add x26, x28, x19\n"
"60:" // Height 3: input setup done
"cmp x13, #0x10\n"
"blt 63f\n"
"ldr q0, [x12, #0x0]\n"
+ "ldr q1, [x28, #0x0]\n"
"cmp x13, #0x20\n"
- "ldr q1, [x9, #0x0]\n"
- "ldr q2, [x27, #0x0]\n"
+ "ldr q2, [x26, #0x0]\n"
"ldr q6, [x15, #0x0]\n"
- "ldr q7, [x15, #0x10]\n"
"blt 62f\n"
"61:" // Height 3: Multiply loop: Main loop head
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
- "ldr x20, [x15, #0x28]\n"
+ "ldr d7, [x15, #0x10]\n"
".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n"
- "ldr x11, [x15, #0x38]\n"
+ "ldr x11, [x15, #0x18]\n"
".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n"
"ldr d6, [x15, #0x20]\n"
+ "ldr x10, [x15, #0x28]\n"
+ "add x12, x12, #0x10\n"
+ "mov v7.d[1], x11\n"
+ "prfm pldl1keep, [x12, #0x80]\n"
+ "ldr x11, [x15, #0x38]\n"
+ "add x28, x28, #0x10\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
- "mov v6.d[1], x20\n"
+ "mov v6.d[1], x10\n"
".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n"
- "ldr x20, [x15, #0x48]\n"
+ "prfm pldl1keep, [x28, #0x80]\n"
".inst 0x4f82e0f1 // sdot v17.4s, v7.16b, v2.4b[0]\n"
"ldr d7, [x15, #0x30]\n"
- "mov v7.d[1], x11\n"
".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n"
+ "ldr x10, [x15, #0x48]\n"
".inst 0x4f81e0ce // sdot v14.4s, v6.16b, v1.4b[0]\n"
- "ldr x11, [x15, #0x58]\n"
+ "ldr x9, [x12, #0x8]\n"
".inst 0x4f82e0d2 // sdot v18.4s, v6.16b, v2.4b[0]\n"
+ "mov v7.d[1], x11\n"
"ldr d6, [x15, #0x40]\n"
+ "add x26, x26, #0x10\n"
".inst 0x4f80e0eb // sdot v11.4s, v7.16b, v0.4b[0]\n"
- "mov v6.d[1], x20\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x4f81e0ef // sdot v15.4s, v7.16b, v1.4b[0]\n"
- "ldr x20, [x15, #0x68]\n"
+ "ldr x11, [x15, #0x58]\n"
".inst 0x4f82e0f3 // sdot v19.4s, v7.16b, v2.4b[0]\n"
+ "mov v6.d[1], x10\n"
"ldr d7, [x15, #0x50]\n"
- "mov v7.d[1], x11\n"
+ "sub x13, x13, #0x10\n"
".inst 0x4fa0e0c8 // sdot v8.4s, v6.16b, v0.4b[1]\n"
+ "ldr x10, [x15, #0x68]\n"
".inst 0x4fa1e0cc // sdot v12.4s, v6.16b, v1.4b[1]\n"
- "ldr x11, [x15, #0x78]\n"
+ "ldr x27, [x28, #0x8]\n"
".inst 0x4fa2e0d0 // sdot v16.4s, v6.16b, v2.4b[1]\n"
+ "mov v7.d[1], x11\n"
"ldr d6, [x15, #0x60]\n"
+ "cmp x13, #0x20\n"
".inst 0x4fa0e0e9 // sdot v9.4s, v7.16b, v0.4b[1]\n"
- "mov v6.d[1], x20\n"
+ "ldr x11, [x15, #0x78]\n"
".inst 0x4fa1e0ed // sdot v13.4s, v7.16b, v1.4b[1]\n"
- "ldr x20, [x15, #0x88]\n"
+ "ldr x25, [x26, #0x8]\n"
".inst 0x4fa2e0f1 // sdot v17.4s, v7.16b, v2.4b[1]\n"
+ "mov v6.d[1], x10\n"
"ldr d7, [x15, #0x70]\n"
- "mov v7.d[1], x11\n"
".inst 0x4fa0e0ca // sdot v10.4s, v6.16b, v0.4b[1]\n"
+ "ldr x10, [x15, #0x88]\n"
".inst 0x4fa1e0ce // sdot v14.4s, v6.16b, v1.4b[1]\n"
- "ldr x11, [x15, #0x98]\n"
".inst 0x4fa2e0d2 // sdot v18.4s, v6.16b, v2.4b[1]\n"
+ "mov v7.d[1], x11\n"
"ldr d6, [x15, #0x80]\n"
".inst 0x4fa0e0eb // sdot v11.4s, v7.16b, v0.4b[1]\n"
- "mov v6.d[1], x20\n"
+ "ldr x11, [x15, #0x98]\n"
".inst 0x4fa1e0ef // sdot v15.4s, v7.16b, v1.4b[1]\n"
- "ldr x20, [x15, #0xa8]\n"
".inst 0x4fa2e0f3 // sdot v19.4s, v7.16b, v2.4b[1]\n"
+ "mov v6.d[1], x10\n"
"ldr d7, [x15, #0x90]\n"
- "mov v7.d[1], x11\n"
".inst 0x4f80e8c8 // sdot v8.4s, v6.16b, v0.4b[2]\n"
+ "ldr x10, [x15, #0xa8]\n"
".inst 0x4f81e8cc // sdot v12.4s, v6.16b, v1.4b[2]\n"
- "ldr x11, [x15, #0xb8]\n"
".inst 0x4f82e8d0 // sdot v16.4s, v6.16b, v2.4b[2]\n"
+ "mov v7.d[1], x11\n"
"ldr d6, [x15, #0xa0]\n"
".inst 0x4f80e8e9 // sdot v9.4s, v7.16b, v0.4b[2]\n"
- "mov v6.d[1], x20\n"
+ "ldr x11, [x15, #0xb8]\n"
".inst 0x4f81e8ed // sdot v13.4s, v7.16b, v1.4b[2]\n"
- "ldr x20, [x15, #0xc8]\n"
".inst 0x4f82e8f1 // sdot v17.4s, v7.16b, v2.4b[2]\n"
+ "mov v6.d[1], x10\n"
"ldr d7, [x15, #0xb0]\n"
- "mov v7.d[1], x11\n"
".inst 0x4f80e8ca // sdot v10.4s, v6.16b, v0.4b[2]\n"
+ "ldr x10, [x15, #0xc8]\n"
".inst 0x4f81e8ce // sdot v14.4s, v6.16b, v1.4b[2]\n"
- "ldr x11, [x15, #0xd8]\n"
".inst 0x4f82e8d2 // sdot v18.4s, v6.16b, v2.4b[2]\n"
+ "mov v7.d[1], x11\n"
"ldr d6, [x15, #0xc0]\n"
".inst 0x4f80e8eb // sdot v11.4s, v7.16b, v0.4b[2]\n"
- "mov v6.d[1], x20\n"
+ "ldr x11, [x15, #0xd8]\n"
".inst 0x4f81e8ef // sdot v15.4s, v7.16b, v1.4b[2]\n"
- "ldr x20, [x15, #0xe8]\n"
".inst 0x4f82e8f3 // sdot v19.4s, v7.16b, v2.4b[2]\n"
+ "mov v6.d[1], x10\n"
"ldr d7, [x15, #0xd0]\n"
- "mov v7.d[1], x11\n"
".inst 0x4fa0e8c8 // sdot v8.4s, v6.16b, v0.4b[3]\n"
+ "ldr x10, [x15, #0xe8]\n"
".inst 0x4fa1e8cc // sdot v12.4s, v6.16b, v1.4b[3]\n"
- "ldr x11, [x15, #0xf8]\n"
".inst 0x4fa2e8d0 // sdot v16.4s, v6.16b, v2.4b[3]\n"
+ "mov v7.d[1], x11\n"
"ldr d6, [x15, #0xe0]\n"
".inst 0x4fa0e8e9 // sdot v9.4s, v7.16b, v0.4b[3]\n"
- "mov v6.d[1], x20\n"
+ "ldr x11, [x15, #0xf8]\n"
".inst 0x4fa1e8ed // sdot v13.4s, v7.16b, v1.4b[3]\n"
- "add x12, x12, #0x10\n"
".inst 0x4fa2e8f1 // sdot v17.4s, v7.16b, v2.4b[3]\n"
+ "mov v6.d[1], x10\n"
"ldr d7, [x15, #0xf0]\n"
- "mov v7.d[1], x11\n"
- "add x9, x9, #0x10\n"
- "add x27, x27, #0x10\n"
"add x15, x15, #0x100\n"
".inst 0x4fa0e8ca // sdot v10.4s, v6.16b, v0.4b[3]\n"
- "ldr x20, [x15, #0x8]\n"
+ "ldr x10, [x15, #0x8]\n"
".inst 0x4fa1e8ce // sdot v14.4s, v6.16b, v1.4b[3]\n"
- "ldr x10, [x12, #0x8]\n"
".inst 0x4fa2e8d2 // sdot v18.4s, v6.16b, v2.4b[3]\n"
+ "mov v7.d[1], x11\n"
"ldr d6, [x15, #0x0]\n"
".inst 0x4fa0e8eb // sdot v11.4s, v7.16b, v0.4b[3]\n"
"ldr d0, [x12, #0x0]\n"
".inst 0x4fa1e8ef // sdot v15.4s, v7.16b, v1.4b[3]\n"
- "ldr d1, [x9, #0x0]\n"
- "ldr x28, [x9, #0x8]\n"
+ "ldr d1, [x28, #0x0]\n"
".inst 0x4fa2e8f3 // sdot v19.4s, v7.16b, v2.4b[3]\n"
- "ldr d2, [x27, #0x0]\n"
- "sub x13, x13, #0x10\n"
- "ldr d7, [x15, #0x10]\n"
- "cmp x13, #0x20\n"
- "ldr x26, [x27, #0x8]\n"
- "mov v6.d[1], x20\n"
- "ldr x11, [x15, #0x18]\n"
- "mov v0.d[1], x10\n"
- "prfm pldl1keep, [x12, #0x80]\n"
- "mov v1.d[1], x28\n"
- "prfm pldl1keep, [x9, #0x80]\n"
- "mov v2.d[1], x26\n"
- "prfm pldl1keep, [x27, #0x80]\n"
- "mov v7.d[1], x11\n"
+ "mov v6.d[1], x10\n"
+ "mov v0.d[1], x9\n"
+ "ldr d2, [x26, #0x0]\n"
+ "mov v1.d[1], x27\n"
+ "mov v2.d[1], x25\n"
"bge 61b\n"
"62:" // Height 3: Multiply loop: Single iteration only
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
- "add x12, x12, #0x10\n"
+ "ldr q7, [x15, #0x10]\n"
".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n"
- "add x9, x9, #0x10\n"
+ "sub x13, x13, #0x10\n"
".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n"
"ldr q6, [x15, #0x20]\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
- "add x27, x27, #0x10\n"
+ "add x12, x12, #0x10\n"
".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n"
- "sub x13, x13, #0x10\n"
+ "prfm pldl1keep, [x12, #0x80]\n"
".inst 0x4f82e0f1 // sdot v17.4s, v7.16b, v2.4b[0]\n"
"ldr q7, [x15, #0x30]\n"
".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n"
- "prfm pldl1keep, [x12, #0x80]\n"
+ "add x28, x28, #0x10\n"
".inst 0x4f81e0ce // sdot v14.4s, v6.16b, v1.4b[0]\n"
- "prfm pldl1keep, [x9, #0x80]\n"
+ "prfm pldl1keep, [x28, #0x80]\n"
".inst 0x4f82e0d2 // sdot v18.4s, v6.16b, v2.4b[0]\n"
"ldr q6, [x15, #0x40]\n"
".inst 0x4f80e0eb // sdot v11.4s, v7.16b, v0.4b[0]\n"
- "prfm pldl1keep, [x27, #0x80]\n"
+ "add x26, x26, #0x10\n"
".inst 0x4f81e0ef // sdot v15.4s, v7.16b, v1.4b[0]\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x4f82e0f3 // sdot v19.4s, v7.16b, v2.4b[0]\n"
"ldr q7, [x15, #0x50]\n"
".inst 0x4fa0e0c8 // sdot v8.4s, v6.16b, v0.4b[1]\n"
@@ -1093,12 +1093,12 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"64:" // Height 3: Multiply loop: Odd block loop
"ldr s0, [x12], #0x4\n"
"sub x13, x13, #0x4\n"
- "ldr s1, [x9], #0x4\n"
+ "ldr s1, [x28], #0x4\n"
"cmp x13, #0x4\n"
- "ldr s2, [x27], #0x4\n"
+ "ldr s2, [x26], #0x4\n"
"ldr q6, [x15, #0x0]\n"
- ".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
"ldr q7, [x15, #0x10]\n"
+ ".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n"
".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n"
"ldr q6, [x15, #0x20]\n"
@@ -1114,25 +1114,25 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
".inst 0x4f81e0ef // sdot v15.4s, v7.16b, v1.4b[0]\n"
".inst 0x4f82e0f3 // sdot v19.4s, v7.16b, v2.4b[0]\n"
"bge 64b\n"
- "65:" // Height 3: Multiply loop: Skip odd blocks
"cbz x13, 68f\n"
+ "65:" // Height 3: Multiply loop: Skip odd blocks
"tbz x13, #1, 66f\n"
"ldr h0, [x12], #0x2\n"
- "ldr h1, [x9], #0x2\n"
- "ldr h2, [x27], #0x2\n"
+ "ldr h1, [x28], #0x2\n"
+ "ldr h2, [x26], #0x2\n"
"tbz x13, #0, 67f\n"
"ld1 { v0.b }[2], [x12]\n"
- "ld1 { v1.b }[2], [x9]\n"
- "ld1 { v2.b }[2], [x27]\n"
+ "ld1 { v1.b }[2], [x28]\n"
+ "ld1 { v2.b }[2], [x26]\n"
"b 67f\n"
"66:" // Height 3: Multiply loop: Ragged operand read: partial_1_0
"ldr b0, [x12, #0x0]\n"
- "ldr b1, [x9, #0x0]\n"
- "ldr b2, [x27, #0x0]\n"
+ "ldr b1, [x28, #0x0]\n"
+ "ldr b2, [x26, #0x0]\n"
"67:" // Height 3: Multiply loop: Ragged operand read: Done
"ldr q6, [x15, #0x0]\n"
- ".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
"ldr q7, [x15, #0x10]\n"
+ ".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n"
".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n"
"ldr q6, [x15, #0x20]\n"
@@ -1148,50 +1148,50 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
".inst 0x4f81e0ef // sdot v15.4s, v7.16b, v1.4b[0]\n"
".inst 0x4f82e0f3 // sdot v19.4s, v7.16b, v2.4b[0]\n"
"68:" // Height 3: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
"add x14, x14, #0x1\n"
- "cmp x14, x20\n"
+ "cmp x14, x19\n"
"bne 58b\n"
- "ldr q0, [x6, #0x0]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "prfm pstl1keep, [x16, #0x0]\n"
+ "ldr q0, [x8, #0x0]\n"
+ "ldr q1, [x8, #0x10]\n"
+ "add x23, x16, x19\n"
+ "prfm pstl1keep, [x23, #0x0]\n"
+ "add x22, x23, x19\n"
"add v8.4s, v8.4s, v0.4s\n"
- "ldr q1, [x6, #0x10]\n"
- "add v9.4s, v9.4s, v1.4s\n"
- "ldr q2, [x6, #0x20]\n"
- "add v10.4s, v10.4s, v2.4s\n"
- "ldr q3, [x6, #0x30]\n"
- "add v11.4s, v11.4s, v3.4s\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x17, x20\n"
- "add x23, x24, x20\n"
- "prfm pstl1keep, [x17, #0x0]\n"
- "prfm pstl1keep, [x24, #0x0]\n"
"add v12.4s, v12.4s, v0.4s\n"
- "prfm pstl1keep, [x23, #0x0]\n"
+ "add v9.4s, v9.4s, v1.4s\n"
"add v13.4s, v13.4s, v1.4s\n"
- "add v14.4s, v14.4s, v2.4s\n"
- "add v15.4s, v15.4s, v3.4s\n"
"add v16.4s, v16.4s, v0.4s\n"
"add v17.4s, v17.4s, v1.4s\n"
- "add v18.4s, v18.4s, v2.4s\n"
- "add v19.4s, v19.4s, v3.4s\n"
- "add x6, x6, #0x40\n"
- "tbz %x[flags], #4, 69f\n"
- "ldr q0, [x8, #0x0]\n"
- "ldr q4, [x7, #0x0]\n"
- "ldr q1, [x8, #0x10]\n"
- "ldr q5, [x7, #0x10]\n"
+ "prfm pstl1keep, [x22, #0x0]\n"
"ldr q2, [x8, #0x20]\n"
- "ldr q6, [x7, #0x20]\n"
"ldr q3, [x8, #0x30]\n"
"add x8, x8, #0x40\n"
- "ldr q7, [x7, #0x30]\n"
+ "add v10.4s, v10.4s, v2.4s\n"
+ "add v14.4s, v14.4s, v2.4s\n"
+ "add v11.4s, v11.4s, v3.4s\n"
+ "add v15.4s, v15.4s, v3.4s\n"
+ "add v18.4s, v18.4s, v2.4s\n"
+ "add v19.4s, v19.4s, v3.4s\n"
+ "tbz %x[flags], #4, 69f\n"
+ "ldr q0, [x7, #0x0]\n"
+ "ldr q4, [x6, #0x0]\n"
+ "ldr q1, [x7, #0x10]\n"
+ "ldr q5, [x6, #0x10]\n"
+ "ldr q2, [x7, #0x20]\n"
+ "ldr q6, [x6, #0x20]\n"
+ "ldr q3, [x7, #0x30]\n"
"add x7, x7, #0x40\n"
+ "ldr q7, [x6, #0x30]\n"
+ "add x6, x6, #0x40\n"
"b 70f\n"
"69:" // Height 3: per layer parameters
"add x25, %x[qp], %[per_layer_right_shift]\n"
+ "add x24, %x[qp], %[per_layer_mul]\n"
"ld1r { v0.4s }, [x25]\n"
- "add x25, %x[qp], %[per_layer_mul]\n"
- "ld1r { v4.4s }, [x25]\n"
+ "ld1r { v4.4s }, [x24]\n"
"mov v1.16b, v0.16b\n"
"mov v5.16b, v4.16b\n"
"mov v2.16b, v0.16b\n"
@@ -1221,10 +1221,10 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"sshr v6.4s, v6.4s, #0x1f\n"
"sshr v7.4s, v7.4s, #0x1f\n"
"sqadd v8.4s, v8.4s, v4.4s\n"
+ "and v4.16b, v12.16b, v0.16b\n"
"sqadd v9.4s, v9.4s, v5.4s\n"
"sqadd v10.4s, v10.4s, v6.4s\n"
"sqadd v11.4s, v11.4s, v7.4s\n"
- "and v4.16b, v12.16b, v0.16b\n"
"and v5.16b, v13.16b, v1.16b\n"
"and v6.16b, v14.16b, v2.16b\n"
"and v7.16b, v15.16b, v3.16b\n"
@@ -1233,11 +1233,11 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"sshr v6.4s, v6.4s, #0x1f\n"
"sshr v7.4s, v7.4s, #0x1f\n"
"sqadd v12.4s, v12.4s, v4.4s\n"
+ "and v4.16b, v16.16b, v0.16b\n"
"sqadd v13.4s, v13.4s, v5.4s\n"
+ "and v5.16b, v17.16b, v1.16b\n"
"sqadd v14.4s, v14.4s, v6.4s\n"
"sqadd v15.4s, v15.4s, v7.4s\n"
- "and v4.16b, v16.16b, v0.16b\n"
- "and v5.16b, v17.16b, v1.16b\n"
"and v6.16b, v18.16b, v2.16b\n"
"and v7.16b, v19.16b, v3.16b\n"
"sshr v4.4s, v4.4s, #0x1f\n"
@@ -1261,8 +1261,13 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"srshl v17.4s, v17.4s, v1.4s\n"
"srshl v18.4s, v18.4s, v2.4s\n"
"srshl v19.4s, v19.4s, v3.4s\n"
- "add x25, %x[qp], %[c_offset]\n"
- "ld1r { v4.4s }, [x25]\n"
+ "add x24, %x[qp], %[c_offset]\n"
+ "add x25, %x[qp], %[minval]\n"
+ "ld1r { v4.4s }, [x24]\n"
+ "add x24, %x[qp], %[maxval]\n"
+ "ld1r { v5.4s }, [x25]\n"
+ "cmp x17, #0x10\n"
+ "ld1r { v6.4s }, [x24]\n"
"add v8.4s, v8.4s, v4.4s\n"
"add v9.4s, v9.4s, v4.4s\n"
"add v10.4s, v10.4s, v4.4s\n"
@@ -1273,10 +1278,6 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"add v15.4s, v15.4s, v4.4s\n"
"add v16.4s, v16.4s, v4.4s\n"
"add v17.4s, v17.4s, v4.4s\n"
- "add v18.4s, v18.4s, v4.4s\n"
- "add v19.4s, v19.4s, v4.4s\n"
- "add x25, %x[qp], %[maxval]\n"
- "ld1r { v6.4s }, [x25]\n"
"smin v8.4s, v8.4s, v6.4s\n"
"smin v9.4s, v9.4s, v6.4s\n"
"smin v10.4s, v10.4s, v6.4s\n"
@@ -1287,10 +1288,6 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"smin v15.4s, v15.4s, v6.4s\n"
"smin v16.4s, v16.4s, v6.4s\n"
"smin v17.4s, v17.4s, v6.4s\n"
- "smin v18.4s, v18.4s, v6.4s\n"
- "smin v19.4s, v19.4s, v6.4s\n"
- "add x25, %x[qp], %[minval]\n"
- "ld1r { v5.4s }, [x25]\n"
"smax v8.4s, v8.4s, v5.4s\n"
"smax v9.4s, v9.4s, v5.4s\n"
"smax v10.4s, v10.4s, v5.4s\n"
@@ -1301,109 +1298,112 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"smax v15.4s, v15.4s, v5.4s\n"
"smax v16.4s, v16.4s, v5.4s\n"
"smax v17.4s, v17.4s, v5.4s\n"
- "smax v18.4s, v18.4s, v5.4s\n"
- "smax v19.4s, v19.4s, v5.4s\n"
+ "add v18.4s, v18.4s, v4.4s\n"
+ "add v19.4s, v19.4s, v4.4s\n"
"uzp1 v8.8h, v8.8h, v9.8h\n"
"uzp1 v9.8h, v10.8h, v11.8h\n"
"uzp1 v12.8h, v12.8h, v13.8h\n"
"uzp1 v13.8h, v14.8h, v15.8h\n"
"uzp1 v16.8h, v16.8h, v17.8h\n"
- "uzp1 v17.8h, v18.8h, v19.8h\n"
- "cmp x16, #0x10\n"
+ "smin v18.4s, v18.4s, v6.4s\n"
+ "smin v19.4s, v19.4s, v6.4s\n"
"uzp1 v8.16b, v8.16b, v9.16b\n"
"uzp1 v12.16b, v12.16b, v13.16b\n"
+ "smax v18.4s, v18.4s, v5.4s\n"
+ "smax v19.4s, v19.4s, v5.4s\n"
+ "uzp1 v17.8h, v18.8h, v19.8h\n"
"uzp1 v16.16b, v16.16b, v17.16b\n"
"bge 80f\n"
- "tbz x16, #3, 75f\n"
- "str d8, [x17], #0x8\n"
- "str d12, [x24], #0x8\n"
- "str d16, [x23], #0x8\n"
- "tbz x16, #2, 73f\n"
- "st1 { v8.s }[2], [x17], #0x4\n"
- "st1 { v12.s }[2], [x24], #0x4\n"
- "st1 { v16.s }[2], [x23], #0x4\n"
- "tbz x16, #1, 72f\n"
- "st1 { v8.h }[6], [x17], #0x2\n"
- "st1 { v12.h }[6], [x24], #0x2\n"
- "st1 { v16.h }[6], [x23], #0x2\n"
- "tbz x16, #0, 79f\n"
- "st1 { v8.b }[14], [x17]\n"
- "st1 { v12.b }[14], [x24]\n"
- "st1 { v16.b }[14], [x23]\n"
+ "tbz x17, #3, 75f\n"
+ "str d8, [x16], #0x8\n"
+ "str d12, [x23], #0x8\n"
+ "str d16, [x22], #0x8\n"
+ "tbz x17, #2, 73f\n"
+ "st1 { v8.s }[2], [x16], #0x4\n"
+ "st1 { v12.s }[2], [x23], #0x4\n"
+ "st1 { v16.s }[2], [x22], #0x4\n"
+ "tbz x17, #1, 72f\n"
+ "st1 { v8.h }[6], [x16], #0x2\n"
+ "st1 { v12.h }[6], [x23], #0x2\n"
+ "st1 { v16.h }[6], [x22], #0x2\n"
+ "tbz x17, #0, 79f\n"
+ "st1 { v8.b }[14], [x16]\n"
+ "st1 { v12.b }[14], [x23]\n"
+ "st1 { v16.b }[14], [x22]\n"
"b 79f\n"
"72:" // Height 3: Partial direct writeback: partial_1_12
- "tbz x16, #0, 79f\n"
- "st1 { v8.b }[12], [x17]\n"
- "st1 { v12.b }[12], [x24]\n"
- "st1 { v16.b }[12], [x23]\n"
+ "tbz x17, #0, 79f\n"
+ "st1 { v8.b }[12], [x16]\n"
+ "st1 { v12.b }[12], [x23]\n"
+ "st1 { v16.b }[12], [x22]\n"
"b 79f\n"
"73:" // Height 3: Partial direct writeback: partial_2_8
- "tbz x16, #1, 74f\n"
- "st1 { v8.h }[4], [x17], #0x2\n"
- "st1 { v12.h }[4], [x24], #0x2\n"
- "st1 { v16.h }[4], [x23], #0x2\n"
- "tbz x16, #0, 79f\n"
- "st1 { v8.b }[10], [x17]\n"
- "st1 { v12.b }[10], [x24]\n"
- "st1 { v16.b }[10], [x23]\n"
+ "tbz x17, #1, 74f\n"
+ "st1 { v8.h }[4], [x16], #0x2\n"
+ "st1 { v12.h }[4], [x23], #0x2\n"
+ "st1 { v16.h }[4], [x22], #0x2\n"
+ "tbz x17, #0, 79f\n"
+ "st1 { v8.b }[10], [x16]\n"
+ "st1 { v12.b }[10], [x23]\n"
+ "st1 { v16.b }[10], [x22]\n"
"b 79f\n"
"74:" // Height 3: Partial direct writeback: partial_1_8
- "tbz x16, #0, 79f\n"
- "st1 { v8.b }[8], [x17]\n"
- "st1 { v12.b }[8], [x24]\n"
- "st1 { v16.b }[8], [x23]\n"
+ "tbz x17, #0, 79f\n"
+ "st1 { v8.b }[8], [x16]\n"
+ "st1 { v12.b }[8], [x23]\n"
+ "st1 { v16.b }[8], [x22]\n"
"b 79f\n"
"75:" // Height 3: Partial direct writeback: partial_4_0
- "tbz x16, #2, 77f\n"
- "str s8, [x17], #0x4\n"
- "str s12, [x24], #0x4\n"
- "str s16, [x23], #0x4\n"
- "tbz x16, #1, 76f\n"
- "st1 { v8.h }[2], [x17], #0x2\n"
- "st1 { v12.h }[2], [x24], #0x2\n"
- "st1 { v16.h }[2], [x23], #0x2\n"
- "tbz x16, #0, 79f\n"
- "st1 { v8.b }[6], [x17]\n"
- "st1 { v12.b }[6], [x24]\n"
- "st1 { v16.b }[6], [x23]\n"
+ "tbz x17, #2, 77f\n"
+ "str s8, [x16], #0x4\n"
+ "str s12, [x23], #0x4\n"
+ "str s16, [x22], #0x4\n"
+ "tbz x17, #1, 76f\n"
+ "st1 { v8.h }[2], [x16], #0x2\n"
+ "st1 { v12.h }[2], [x23], #0x2\n"
+ "st1 { v16.h }[2], [x22], #0x2\n"
+ "tbz x17, #0, 79f\n"
+ "st1 { v8.b }[6], [x16]\n"
+ "st1 { v12.b }[6], [x23]\n"
+ "st1 { v16.b }[6], [x22]\n"
"b 79f\n"
"76:" // Height 3: Partial direct writeback: partial_1_4
- "tbz x16, #0, 79f\n"
- "st1 { v8.b }[4], [x17]\n"
- "st1 { v12.b }[4], [x24]\n"
- "st1 { v16.b }[4], [x23]\n"
+ "tbz x17, #0, 79f\n"
+ "st1 { v8.b }[4], [x16]\n"
+ "st1 { v12.b }[4], [x23]\n"
+ "st1 { v16.b }[4], [x22]\n"
"b 79f\n"
"77:" // Height 3: Partial direct writeback: partial_2_0
- "tbz x16, #1, 78f\n"
- "str h8, [x17], #0x2\n"
- "str h12, [x24], #0x2\n"
- "str h16, [x23], #0x2\n"
- "tbz x16, #0, 79f\n"
- "st1 { v8.b }[2], [x17]\n"
- "st1 { v12.b }[2], [x24]\n"
- "st1 { v16.b }[2], [x23]\n"
+ "tbz x17, #1, 78f\n"
+ "str h8, [x16], #0x2\n"
+ "str h12, [x23], #0x2\n"
+ "str h16, [x22], #0x2\n"
+ "tbz x17, #0, 79f\n"
+ "st1 { v8.b }[2], [x16]\n"
+ "st1 { v12.b }[2], [x23]\n"
+ "st1 { v16.b }[2], [x22]\n"
"b 79f\n"
"78:" // Height 3: Partial direct writeback: partial_1_0
- "str b8, [x17, #0x0]\n"
- "str b12, [x24, #0x0]\n"
- "str b16, [x23, #0x0]\n"
+ "str b8, [x16, #0x0]\n"
+ "str b12, [x23, #0x0]\n"
+ "str b16, [x22, #0x0]\n"
"79:" // Height 3: Partial direct writeback: Done
"b 81f\n"
"80:" // Height 3: Full writeback
- "str q8, [x17, #0x0]\n"
- "add x17, x17, #0x10\n"
- "str q12, [x24, #0x0]\n"
- "str q16, [x23, #0x0]\n"
+ "str q8, [x16, #0x0]\n"
+ "add x16, x16, #0x10\n"
+ "str q12, [x23, #0x0]\n"
+ "str q16, [x22, #0x0]\n"
"81:" // Height 3: Writeback done
- "subs x16, x16, #0x10\n"
+ "subs x17, x17, #0x10\n"
"bgt 56b\n"
"b 164f\n"
"82:" // Height 4
- "mov x6, %x[col_bias]\n"
- "ldr x7, [%x[args_ptr], %[offsetof_multiplier_ptr]]\n"
- "ldr x8, [%x[args_ptr], %[offsetof_shift_ptr]]\n"
- "mov x17, %x[output_ptr]\n"
- "ldr x16, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x6, [%x[args_ptr], %[offsetof_multiplier_ptr]]\n"
+ "mov x8, %x[col_bias]\n"
+ "ldr x7, [%x[args_ptr], %[offsetof_shift_ptr]]\n"
+ "mov x16, %x[output_ptr]\n"
+ "ldr x17, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x15, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"83:" // Height 4: Column loop
"movi v8.4s, #0x0\n"
@@ -1426,201 +1426,201 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"mov x14, #0x0\n"
"85:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"ldr w13, [x20, x14, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 86f\n"
- "ldr x21, [%x[input_ptr], x14, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x12, [x21, #0x0]\n"
- "ldr x9, [x21, #0x8]\n"
- "ldr x27, [x21, #0x10]\n"
- "ldr x25, [x21, #0x18]\n"
+ "ldr x20, [%x[input_ptr], x14, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x12, [x20, #0x0]\n"
+ "ldr x28, [x20, #0x8]\n"
+ "ldr x26, [x20, #0x10]\n"
+ "ldr x24, [x20, #0x18]\n"
"cbnz x14, 87f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x12, x12, x20\n"
- "add x9, x9, x20\n"
- "add x27, x27, x20\n"
- "add x25, x25, x20\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x12, x12, x19\n"
+ "add x28, x28, x19\n"
+ "add x26, x26, x19\n"
+ "add x24, x24, x19\n"
"b 87f\n"
"86:" // Height 4: setup direct input
"mov x12, %x[input_ptr]\n"
- "add x9, x12, x20\n"
- "add x27, x9, x20\n"
- "add x25, x27, x20\n"
+ "add x28, x12, x19\n"
+ "add x26, x28, x19\n"
+ "add x24, x26, x19\n"
"87:" // Height 4: input setup done
"cmp x13, #0x10\n"
"blt 90f\n"
"ldr q0, [x12, #0x0]\n"
+ "ldr q1, [x28, #0x0]\n"
"cmp x13, #0x20\n"
- "ldr q1, [x9, #0x0]\n"
- "ldr q2, [x27, #0x0]\n"
- "ldr q3, [x25, #0x0]\n"
+ "ldr q2, [x26, #0x0]\n"
+ "ldr q3, [x24, #0x0]\n"
"ldr q6, [x15, #0x0]\n"
- "ldr q7, [x15, #0x10]\n"
"blt 89f\n"
"88:" // Height 4: Multiply loop: Main loop head
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
- "ldr x20, [x15, #0x28]\n"
+ "ldr d7, [x15, #0x10]\n"
".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n"
- "ldr x11, [x15, #0x38]\n"
+ "ldr x11, [x15, #0x18]\n"
".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n"
- "add x12, x12, #0x10\n"
+ "ldr x10, [x15, #0x28]\n"
".inst 0x4f83e0d4 // sdot v20.4s, v6.16b, v3.4b[0]\n"
"ldr d6, [x15, #0x20]\n"
+ "mov v7.d[1], x11\n"
+ "ldr x11, [x15, #0x38]\n"
+ "add x12, x12, #0x10\n"
+ "add x28, x28, #0x10\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
- "mov v6.d[1], x20\n"
+ "mov v6.d[1], x10\n"
".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n"
- "ldr x20, [x15, #0x48]\n"
+ "prfm pldl1keep, [x12, #0x80]\n"
".inst 0x4f82e0f1 // sdot v17.4s, v7.16b, v2.4b[0]\n"
- "add x9, x9, #0x10\n"
+ "prfm pldl1keep, [x28, #0x80]\n"
".inst 0x4f83e0f5 // sdot v21.4s, v7.16b, v3.4b[0]\n"
"ldr d7, [x15, #0x30]\n"
- "mov v7.d[1], x11\n"
".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n"
+ "ldr x10, [x15, #0x48]\n"
".inst 0x4f81e0ce // sdot v14.4s, v6.16b, v1.4b[0]\n"
- "ldr x11, [x15, #0x58]\n"
+ "ldr x9, [x12, #0x8]\n"
".inst 0x4f82e0d2 // sdot v18.4s, v6.16b, v2.4b[0]\n"
- "add x27, x27, #0x10\n"
+ "mov v7.d[1], x11\n"
".inst 0x4f83e0d6 // sdot v22.4s, v6.16b, v3.4b[0]\n"
"ldr d6, [x15, #0x40]\n"
".inst 0x4f80e0eb // sdot v11.4s, v7.16b, v0.4b[0]\n"
- "mov v6.d[1], x20\n"
+ "ldr x11, [x15, #0x58]\n"
".inst 0x4f81e0ef // sdot v15.4s, v7.16b, v1.4b[0]\n"
- "ldr x20, [x15, #0x68]\n"
+ "ldr x27, [x28, #0x8]\n"
".inst 0x4f82e0f3 // sdot v19.4s, v7.16b, v2.4b[0]\n"
- "add x25, x25, #0x10\n"
+ "mov v6.d[1], x10\n"
".inst 0x4f83e0f7 // sdot v23.4s, v7.16b, v3.4b[0]\n"
"ldr d7, [x15, #0x50]\n"
- "mov v7.d[1], x11\n"
".inst 0x4fa0e0c8 // sdot v8.4s, v6.16b, v0.4b[1]\n"
+ "ldr x10, [x15, #0x68]\n"
".inst 0x4fa1e0cc // sdot v12.4s, v6.16b, v1.4b[1]\n"
- "ldr x11, [x15, #0x78]\n"
+ "add x26, x26, #0x10\n"
".inst 0x4fa2e0d0 // sdot v16.4s, v6.16b, v2.4b[1]\n"
- "ldr x10, [x12, #0x8]\n"
+ "mov v7.d[1], x11\n"
".inst 0x4fa3e0d4 // sdot v20.4s, v6.16b, v3.4b[1]\n"
- "ldr d6, [x15, #0x60]\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x4fa0e0e9 // sdot v9.4s, v7.16b, v0.4b[1]\n"
- "mov v6.d[1], x20\n"
+ "ldr d6, [x15, #0x60]\n"
".inst 0x4fa1e0ed // sdot v13.4s, v7.16b, v1.4b[1]\n"
- "ldr x20, [x15, #0x88]\n"
+ "ldr x11, [x15, #0x78]\n"
".inst 0x4fa2e0f1 // sdot v17.4s, v7.16b, v2.4b[1]\n"
- "ldr x28, [x9, #0x8]\n"
+ "ldr x25, [x26, #0x8]\n"
".inst 0x4fa3e0f5 // sdot v21.4s, v7.16b, v3.4b[1]\n"
+ "mov v6.d[1], x10\n"
"ldr d7, [x15, #0x70]\n"
- "mov v7.d[1], x11\n"
+ "add x24, x24, #0x10\n"
".inst 0x4fa0e0ca // sdot v10.4s, v6.16b, v0.4b[1]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x4fa1e0ce // sdot v14.4s, v6.16b, v1.4b[1]\n"
- "ldr x11, [x15, #0x98]\n"
+ "ldr x10, [x15, #0x88]\n"
".inst 0x4fa2e0d2 // sdot v18.4s, v6.16b, v2.4b[1]\n"
- "ldr x26, [x27, #0x8]\n"
+ "mov v7.d[1], x11\n"
".inst 0x4fa3e0d6 // sdot v22.4s, v6.16b, v3.4b[1]\n"
"ldr d6, [x15, #0x80]\n"
".inst 0x4fa0e0eb // sdot v11.4s, v7.16b, v0.4b[1]\n"
- "mov v6.d[1], x20\n"
+ "ldr x11, [x15, #0x98]\n"
".inst 0x4fa1e0ef // sdot v15.4s, v7.16b, v1.4b[1]\n"
- "ldr x20, [x15, #0xa8]\n"
+ "ldr x23, [x24, #0x8]\n"
".inst 0x4fa2e0f3 // sdot v19.4s, v7.16b, v2.4b[1]\n"
- "ldr x24, [x25, #0x8]\n"
+ "mov v6.d[1], x10\n"
".inst 0x4fa3e0f7 // sdot v23.4s, v7.16b, v3.4b[1]\n"
"ldr d7, [x15, #0x90]\n"
- "mov v7.d[1], x11\n"
".inst 0x4f80e8c8 // sdot v8.4s, v6.16b, v0.4b[2]\n"
+ "ldr x10, [x15, #0xa8]\n"
".inst 0x4f81e8cc // sdot v12.4s, v6.16b, v1.4b[2]\n"
- "ldr x11, [x15, #0xb8]\n"
- ".inst 0x4f82e8d0 // sdot v16.4s, v6.16b, v2.4b[2]\n"
"sub x13, x13, #0x10\n"
+ ".inst 0x4f82e8d0 // sdot v16.4s, v6.16b, v2.4b[2]\n"
+ "mov v7.d[1], x11\n"
".inst 0x4f83e8d4 // sdot v20.4s, v6.16b, v3.4b[2]\n"
"ldr d6, [x15, #0xa0]\n"
".inst 0x4f80e8e9 // sdot v9.4s, v7.16b, v0.4b[2]\n"
- "mov v6.d[1], x20\n"
+ "ldr x11, [x15, #0xb8]\n"
".inst 0x4f81e8ed // sdot v13.4s, v7.16b, v1.4b[2]\n"
- "ldr x20, [x15, #0xc8]\n"
- ".inst 0x4f82e8f1 // sdot v17.4s, v7.16b, v2.4b[2]\n"
"cmp x13, #0x20\n"
+ ".inst 0x4f82e8f1 // sdot v17.4s, v7.16b, v2.4b[2]\n"
+ "mov v6.d[1], x10\n"
".inst 0x4f83e8f5 // sdot v21.4s, v7.16b, v3.4b[2]\n"
"ldr d7, [x15, #0xb0]\n"
- "mov v7.d[1], x11\n"
".inst 0x4f80e8ca // sdot v10.4s, v6.16b, v0.4b[2]\n"
+ "ldr x10, [x15, #0xc8]\n"
".inst 0x4f81e8ce // sdot v14.4s, v6.16b, v1.4b[2]\n"
- "ldr x11, [x15, #0xd8]\n"
".inst 0x4f82e8d2 // sdot v18.4s, v6.16b, v2.4b[2]\n"
- "prfm pldl1keep, [x12, #0x80]\n"
+ "mov v7.d[1], x11\n"
".inst 0x4f83e8d6 // sdot v22.4s, v6.16b, v3.4b[2]\n"
"ldr d6, [x15, #0xc0]\n"
".inst 0x4f80e8eb // sdot v11.4s, v7.16b, v0.4b[2]\n"
- "mov v6.d[1], x20\n"
+ "ldr x11, [x15, #0xd8]\n"
".inst 0x4f81e8ef // sdot v15.4s, v7.16b, v1.4b[2]\n"
- "ldr x20, [x15, #0xe8]\n"
".inst 0x4f82e8f3 // sdot v19.4s, v7.16b, v2.4b[2]\n"
- "prfm pldl1keep, [x9, #0x80]\n"
+ "mov v6.d[1], x10\n"
".inst 0x4f83e8f7 // sdot v23.4s, v7.16b, v3.4b[2]\n"
"ldr d7, [x15, #0xd0]\n"
- "mov v7.d[1], x11\n"
".inst 0x4fa0e8c8 // sdot v8.4s, v6.16b, v0.4b[3]\n"
+ "ldr x10, [x15, #0xe8]\n"
".inst 0x4fa1e8cc // sdot v12.4s, v6.16b, v1.4b[3]\n"
- "ldr x11, [x15, #0xf8]\n"
".inst 0x4fa2e8d0 // sdot v16.4s, v6.16b, v2.4b[3]\n"
- "prfm pldl1keep, [x27, #0x80]\n"
+ "mov v7.d[1], x11\n"
".inst 0x4fa3e8d4 // sdot v20.4s, v6.16b, v3.4b[3]\n"
"ldr d6, [x15, #0xe0]\n"
".inst 0x4fa0e8e9 // sdot v9.4s, v7.16b, v0.4b[3]\n"
- "mov v6.d[1], x20\n"
+ "ldr x11, [x15, #0xf8]\n"
".inst 0x4fa1e8ed // sdot v13.4s, v7.16b, v1.4b[3]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x4fa2e8f1 // sdot v17.4s, v7.16b, v2.4b[3]\n"
+ "mov v6.d[1], x10\n"
".inst 0x4fa3e8f5 // sdot v21.4s, v7.16b, v3.4b[3]\n"
"ldr d7, [x15, #0xf0]\n"
- "mov v7.d[1], x11\n"
- "add x15, x15, #0x100\n"
".inst 0x4fa0e8ca // sdot v10.4s, v6.16b, v0.4b[3]\n"
- "ldr x20, [x15, #0x8]\n"
+ "add x15, x15, #0x100\n"
".inst 0x4fa1e8ce // sdot v14.4s, v6.16b, v1.4b[3]\n"
- "ldr x11, [x15, #0x18]\n"
+ "ldr x10, [x15, #0x8]\n"
".inst 0x4fa2e8d2 // sdot v18.4s, v6.16b, v2.4b[3]\n"
+ "mov v7.d[1], x11\n"
".inst 0x4fa3e8d6 // sdot v22.4s, v6.16b, v3.4b[3]\n"
"ldr d6, [x15, #0x0]\n"
".inst 0x4fa0e8eb // sdot v11.4s, v7.16b, v0.4b[3]\n"
"ldr d0, [x12, #0x0]\n"
".inst 0x4fa1e8ef // sdot v15.4s, v7.16b, v1.4b[3]\n"
- "ldr d1, [x9, #0x0]\n"
+ "ldr d1, [x28, #0x0]\n"
".inst 0x4fa2e8f3 // sdot v19.4s, v7.16b, v2.4b[3]\n"
- "ldr d2, [x27, #0x0]\n"
+ "mov v6.d[1], x10\n"
".inst 0x4fa3e8f7 // sdot v23.4s, v7.16b, v3.4b[3]\n"
- "ldr d3, [x25, #0x0]\n"
- "ldr d7, [x15, #0x10]\n"
- "mov v6.d[1], x20\n"
- "mov v0.d[1], x10\n"
- "mov v1.d[1], x28\n"
- "mov v2.d[1], x26\n"
- "mov v3.d[1], x24\n"
- "mov v7.d[1], x11\n"
+ "mov v0.d[1], x9\n"
+ "mov v1.d[1], x27\n"
+ "ldr d2, [x26, #0x0]\n"
+ "ldr d3, [x24, #0x0]\n"
+ "mov v2.d[1], x25\n"
+ "mov v3.d[1], x23\n"
"bge 88b\n"
"89:" // Height 4: Multiply loop: Single iteration only
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
- "add x12, x12, #0x10\n"
+ "ldr q7, [x15, #0x10]\n"
".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n"
- "add x9, x9, #0x10\n"
+ "sub x13, x13, #0x10\n"
".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n"
- "add x27, x27, #0x10\n"
+ "add x12, x12, #0x10\n"
".inst 0x4f83e0d4 // sdot v20.4s, v6.16b, v3.4b[0]\n"
- "ldr q6, [x15, #0x20]\n"
+ "prfm pldl1keep, [x12, #0x80]\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
- "add x25, x25, #0x10\n"
+ "ldr q6, [x15, #0x20]\n"
".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n"
- "sub x13, x13, #0x10\n"
+ "add x28, x28, #0x10\n"
".inst 0x4f82e0f1 // sdot v17.4s, v7.16b, v2.4b[0]\n"
- "prfm pldl1keep, [x12, #0x80]\n"
+ "prfm pldl1keep, [x28, #0x80]\n"
".inst 0x4f83e0f5 // sdot v21.4s, v7.16b, v3.4b[0]\n"
"ldr q7, [x15, #0x30]\n"
".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n"
- "prfm pldl1keep, [x9, #0x80]\n"
+ "add x26, x26, #0x10\n"
".inst 0x4f81e0ce // sdot v14.4s, v6.16b, v1.4b[0]\n"
- "prfm pldl1keep, [x27, #0x80]\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x4f82e0d2 // sdot v18.4s, v6.16b, v2.4b[0]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
+ "add x24, x24, #0x10\n"
".inst 0x4f83e0d6 // sdot v22.4s, v6.16b, v3.4b[0]\n"
- "ldr q6, [x15, #0x40]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x4f80e0eb // sdot v11.4s, v7.16b, v0.4b[0]\n"
+ "ldr q6, [x15, #0x40]\n"
".inst 0x4f81e0ef // sdot v15.4s, v7.16b, v1.4b[0]\n"
".inst 0x4f82e0f3 // sdot v19.4s, v7.16b, v2.4b[0]\n"
".inst 0x4f83e0f7 // sdot v23.4s, v7.16b, v3.4b[0]\n"
@@ -1691,13 +1691,13 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"91:" // Height 4: Multiply loop: Odd block loop
"ldr s0, [x12], #0x4\n"
"sub x13, x13, #0x4\n"
- "ldr s1, [x9], #0x4\n"
+ "ldr s1, [x28], #0x4\n"
"cmp x13, #0x4\n"
- "ldr s2, [x27], #0x4\n"
- "ldr s3, [x25], #0x4\n"
+ "ldr s2, [x26], #0x4\n"
+ "ldr s3, [x24], #0x4\n"
"ldr q6, [x15, #0x0]\n"
- ".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
"ldr q7, [x15, #0x10]\n"
+ ".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n"
".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n"
".inst 0x4f83e0d4 // sdot v20.4s, v6.16b, v3.4b[0]\n"
@@ -1717,28 +1717,28 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
".inst 0x4f82e0f3 // sdot v19.4s, v7.16b, v2.4b[0]\n"
".inst 0x4f83e0f7 // sdot v23.4s, v7.16b, v3.4b[0]\n"
"bge 91b\n"
- "92:" // Height 4: Multiply loop: Skip odd blocks
"cbz x13, 95f\n"
+ "92:" // Height 4: Multiply loop: Skip odd blocks
"tbz x13, #1, 93f\n"
"ldr h0, [x12], #0x2\n"
- "ldr h1, [x9], #0x2\n"
- "ldr h2, [x27], #0x2\n"
- "ldr h3, [x25], #0x2\n"
+ "ldr h1, [x28], #0x2\n"
+ "ldr h2, [x26], #0x2\n"
+ "ldr h3, [x24], #0x2\n"
"tbz x13, #0, 94f\n"
"ld1 { v0.b }[2], [x12]\n"
- "ld1 { v1.b }[2], [x9]\n"
- "ld1 { v2.b }[2], [x27]\n"
- "ld1 { v3.b }[2], [x25]\n"
+ "ld1 { v1.b }[2], [x28]\n"
+ "ld1 { v2.b }[2], [x26]\n"
+ "ld1 { v3.b }[2], [x24]\n"
"b 94f\n"
"93:" // Height 4: Multiply loop: Ragged operand read: partial_1_0
"ldr b0, [x12, #0x0]\n"
- "ldr b1, [x9, #0x0]\n"
- "ldr b2, [x27, #0x0]\n"
- "ldr b3, [x25, #0x0]\n"
+ "ldr b1, [x28, #0x0]\n"
+ "ldr b2, [x26, #0x0]\n"
+ "ldr b3, [x24, #0x0]\n"
"94:" // Height 4: Multiply loop: Ragged operand read: Done
"ldr q6, [x15, #0x0]\n"
- ".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
"ldr q7, [x15, #0x10]\n"
+ ".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n"
".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n"
".inst 0x4f83e0d4 // sdot v20.4s, v6.16b, v3.4b[0]\n"
@@ -1758,56 +1758,56 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
".inst 0x4f82e0f3 // sdot v19.4s, v7.16b, v2.4b[0]\n"
".inst 0x4f83e0f7 // sdot v23.4s, v7.16b, v3.4b[0]\n"
"95:" // Height 4: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
"add x14, x14, #0x1\n"
- "cmp x14, x20\n"
+ "cmp x14, x19\n"
"bne 85b\n"
- "ldr q0, [x6, #0x0]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "prfm pstl1keep, [x16, #0x0]\n"
+ "ldr q0, [x8, #0x0]\n"
+ "ldr q1, [x8, #0x10]\n"
+ "add x23, x16, x19\n"
+ "prfm pstl1keep, [x23, #0x0]\n"
+ "add x22, x23, x19\n"
"add v8.4s, v8.4s, v0.4s\n"
- "ldr q1, [x6, #0x10]\n"
- "add v9.4s, v9.4s, v1.4s\n"
- "ldr q2, [x6, #0x20]\n"
- "add v10.4s, v10.4s, v2.4s\n"
- "ldr q3, [x6, #0x30]\n"
- "add v11.4s, v11.4s, v3.4s\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x17, x20\n"
- "add x23, x24, x20\n"
- "add x22, x23, x20\n"
- "prfm pstl1keep, [x17, #0x0]\n"
"add v12.4s, v12.4s, v0.4s\n"
- "prfm pstl1keep, [x24, #0x0]\n"
+ "add v9.4s, v9.4s, v1.4s\n"
"add v13.4s, v13.4s, v1.4s\n"
- "prfm pstl1keep, [x23, #0x0]\n"
- "add v14.4s, v14.4s, v2.4s\n"
- "prfm pstl1keep, [x22, #0x0]\n"
- "add v15.4s, v15.4s, v3.4s\n"
"add v16.4s, v16.4s, v0.4s\n"
"add v17.4s, v17.4s, v1.4s\n"
- "add v18.4s, v18.4s, v2.4s\n"
- "add v19.4s, v19.4s, v3.4s\n"
"add v20.4s, v20.4s, v0.4s\n"
"add v21.4s, v21.4s, v1.4s\n"
- "add v22.4s, v22.4s, v2.4s\n"
- "add v23.4s, v23.4s, v3.4s\n"
- "add x6, x6, #0x40\n"
- "tbz %x[flags], #4, 96f\n"
- "ldr q0, [x8, #0x0]\n"
- "ldr q4, [x7, #0x0]\n"
- "ldr q1, [x8, #0x10]\n"
- "ldr q5, [x7, #0x10]\n"
+ "prfm pstl1keep, [x22, #0x0]\n"
+ "add x21, x22, x19\n"
+ "prfm pstl1keep, [x21, #0x0]\n"
"ldr q2, [x8, #0x20]\n"
- "ldr q6, [x7, #0x20]\n"
"ldr q3, [x8, #0x30]\n"
"add x8, x8, #0x40\n"
- "ldr q7, [x7, #0x30]\n"
+ "add v10.4s, v10.4s, v2.4s\n"
+ "add v14.4s, v14.4s, v2.4s\n"
+ "add v11.4s, v11.4s, v3.4s\n"
+ "add v15.4s, v15.4s, v3.4s\n"
+ "add v18.4s, v18.4s, v2.4s\n"
+ "add v19.4s, v19.4s, v3.4s\n"
+ "add v22.4s, v22.4s, v2.4s\n"
+ "add v23.4s, v23.4s, v3.4s\n"
+ "tbz %x[flags], #4, 96f\n"
+ "ldr q0, [x7, #0x0]\n"
+ "ldr q4, [x6, #0x0]\n"
+ "ldr q1, [x7, #0x10]\n"
+ "ldr q5, [x6, #0x10]\n"
+ "ldr q2, [x7, #0x20]\n"
+ "ldr q6, [x6, #0x20]\n"
+ "ldr q3, [x7, #0x30]\n"
"add x7, x7, #0x40\n"
+ "ldr q7, [x6, #0x30]\n"
+ "add x6, x6, #0x40\n"
"b 97f\n"
"96:" // Height 4: per layer parameters
"add x25, %x[qp], %[per_layer_right_shift]\n"
+ "add x24, %x[qp], %[per_layer_mul]\n"
"ld1r { v0.4s }, [x25]\n"
- "add x25, %x[qp], %[per_layer_mul]\n"
- "ld1r { v4.4s }, [x25]\n"
+ "ld1r { v4.4s }, [x24]\n"
"mov v1.16b, v0.16b\n"
"mov v5.16b, v4.16b\n"
"mov v2.16b, v0.16b\n"
@@ -1841,10 +1841,10 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"sshr v6.4s, v6.4s, #0x1f\n"
"sshr v7.4s, v7.4s, #0x1f\n"
"sqadd v8.4s, v8.4s, v4.4s\n"
+ "and v4.16b, v12.16b, v0.16b\n"
"sqadd v9.4s, v9.4s, v5.4s\n"
"sqadd v10.4s, v10.4s, v6.4s\n"
"sqadd v11.4s, v11.4s, v7.4s\n"
- "and v4.16b, v12.16b, v0.16b\n"
"and v5.16b, v13.16b, v1.16b\n"
"and v6.16b, v14.16b, v2.16b\n"
"and v7.16b, v15.16b, v3.16b\n"
@@ -1853,11 +1853,11 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"sshr v6.4s, v6.4s, #0x1f\n"
"sshr v7.4s, v7.4s, #0x1f\n"
"sqadd v12.4s, v12.4s, v4.4s\n"
+ "and v4.16b, v16.16b, v0.16b\n"
"sqadd v13.4s, v13.4s, v5.4s\n"
+ "and v5.16b, v17.16b, v1.16b\n"
"sqadd v14.4s, v14.4s, v6.4s\n"
"sqadd v15.4s, v15.4s, v7.4s\n"
- "and v4.16b, v16.16b, v0.16b\n"
- "and v5.16b, v17.16b, v1.16b\n"
"and v6.16b, v18.16b, v2.16b\n"
"and v7.16b, v19.16b, v3.16b\n"
"sshr v4.4s, v4.4s, #0x1f\n"
@@ -1865,18 +1865,18 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"sshr v6.4s, v6.4s, #0x1f\n"
"sshr v7.4s, v7.4s, #0x1f\n"
"sqadd v16.4s, v16.4s, v4.4s\n"
+ "and v4.16b, v20.16b, v0.16b\n"
"sqadd v17.4s, v17.4s, v5.4s\n"
+ "and v5.16b, v21.16b, v1.16b\n"
"sqadd v18.4s, v18.4s, v6.4s\n"
"sqadd v19.4s, v19.4s, v7.4s\n"
- "and v4.16b, v20.16b, v0.16b\n"
- "and v5.16b, v21.16b, v1.16b\n"
+ "sshr v4.4s, v4.4s, #0x1f\n"
"and v6.16b, v22.16b, v2.16b\n"
"and v7.16b, v23.16b, v3.16b\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
"sshr v5.4s, v5.4s, #0x1f\n"
+ "sqadd v20.4s, v20.4s, v4.4s\n"
"sshr v6.4s, v6.4s, #0x1f\n"
"sshr v7.4s, v7.4s, #0x1f\n"
- "sqadd v20.4s, v20.4s, v4.4s\n"
"sqadd v21.4s, v21.4s, v5.4s\n"
"sqadd v22.4s, v22.4s, v6.4s\n"
"sqadd v23.4s, v23.4s, v7.4s\n"
@@ -1897,8 +1897,13 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"srshl v21.4s, v21.4s, v1.4s\n"
"srshl v22.4s, v22.4s, v2.4s\n"
"srshl v23.4s, v23.4s, v3.4s\n"
- "add x25, %x[qp], %[c_offset]\n"
- "ld1r { v4.4s }, [x25]\n"
+ "add x24, %x[qp], %[c_offset]\n"
+ "add x25, %x[qp], %[minval]\n"
+ "ld1r { v4.4s }, [x24]\n"
+ "add x24, %x[qp], %[maxval]\n"
+ "ld1r { v5.4s }, [x25]\n"
+ "cmp x17, #0x10\n"
+ "ld1r { v6.4s }, [x24]\n"
"add v8.4s, v8.4s, v4.4s\n"
"add v9.4s, v9.4s, v4.4s\n"
"add v10.4s, v10.4s, v4.4s\n"
@@ -1909,14 +1914,6 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"add v15.4s, v15.4s, v4.4s\n"
"add v16.4s, v16.4s, v4.4s\n"
"add v17.4s, v17.4s, v4.4s\n"
- "add v18.4s, v18.4s, v4.4s\n"
- "add v19.4s, v19.4s, v4.4s\n"
- "add v20.4s, v20.4s, v4.4s\n"
- "add v21.4s, v21.4s, v4.4s\n"
- "add v22.4s, v22.4s, v4.4s\n"
- "add v23.4s, v23.4s, v4.4s\n"
- "add x25, %x[qp], %[maxval]\n"
- "ld1r { v6.4s }, [x25]\n"
"smin v8.4s, v8.4s, v6.4s\n"
"smin v9.4s, v9.4s, v6.4s\n"
"smin v10.4s, v10.4s, v6.4s\n"
@@ -1927,14 +1924,6 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"smin v15.4s, v15.4s, v6.4s\n"
"smin v16.4s, v16.4s, v6.4s\n"
"smin v17.4s, v17.4s, v6.4s\n"
- "smin v18.4s, v18.4s, v6.4s\n"
- "smin v19.4s, v19.4s, v6.4s\n"
- "smin v20.4s, v20.4s, v6.4s\n"
- "smin v21.4s, v21.4s, v6.4s\n"
- "smin v22.4s, v22.4s, v6.4s\n"
- "smin v23.4s, v23.4s, v6.4s\n"
- "add x25, %x[qp], %[minval]\n"
- "ld1r { v5.4s }, [x25]\n"
"smax v8.4s, v8.4s, v5.4s\n"
"smax v9.4s, v9.4s, v5.4s\n"
"smax v10.4s, v10.4s, v5.4s\n"
@@ -1945,132 +1934,143 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"smax v15.4s, v15.4s, v5.4s\n"
"smax v16.4s, v16.4s, v5.4s\n"
"smax v17.4s, v17.4s, v5.4s\n"
+ "add v18.4s, v18.4s, v4.4s\n"
+ "add v19.4s, v19.4s, v4.4s\n"
+ "add v20.4s, v20.4s, v4.4s\n"
+ "add v21.4s, v21.4s, v4.4s\n"
+ "add v22.4s, v22.4s, v4.4s\n"
+ "add v23.4s, v23.4s, v4.4s\n"
+ "uzp1 v8.8h, v8.8h, v9.8h\n"
+ "uzp1 v9.8h, v10.8h, v11.8h\n"
+ "uzp1 v12.8h, v12.8h, v13.8h\n"
+ "uzp1 v13.8h, v14.8h, v15.8h\n"
+ "smin v18.4s, v18.4s, v6.4s\n"
+ "smin v19.4s, v19.4s, v6.4s\n"
+ "smin v20.4s, v20.4s, v6.4s\n"
+ "smin v21.4s, v21.4s, v6.4s\n"
+ "smin v22.4s, v22.4s, v6.4s\n"
+ "smin v23.4s, v23.4s, v6.4s\n"
+ "uzp1 v16.8h, v16.8h, v17.8h\n"
+ "uzp1 v8.16b, v8.16b, v9.16b\n"
+ "uzp1 v12.16b, v12.16b, v13.16b\n"
"smax v18.4s, v18.4s, v5.4s\n"
"smax v19.4s, v19.4s, v5.4s\n"
"smax v20.4s, v20.4s, v5.4s\n"
"smax v21.4s, v21.4s, v5.4s\n"
"smax v22.4s, v22.4s, v5.4s\n"
"smax v23.4s, v23.4s, v5.4s\n"
- "uzp1 v8.8h, v8.8h, v9.8h\n"
- "uzp1 v9.8h, v10.8h, v11.8h\n"
- "uzp1 v12.8h, v12.8h, v13.8h\n"
- "uzp1 v13.8h, v14.8h, v15.8h\n"
- "uzp1 v16.8h, v16.8h, v17.8h\n"
"uzp1 v17.8h, v18.8h, v19.8h\n"
"uzp1 v20.8h, v20.8h, v21.8h\n"
"uzp1 v21.8h, v22.8h, v23.8h\n"
- "cmp x16, #0x10\n"
- "uzp1 v8.16b, v8.16b, v9.16b\n"
- "uzp1 v12.16b, v12.16b, v13.16b\n"
"uzp1 v16.16b, v16.16b, v17.16b\n"
"uzp1 v20.16b, v20.16b, v21.16b\n"
"bge 107f\n"
- "tbz x16, #3, 102f\n"
- "str d8, [x17], #0x8\n"
- "str d12, [x24], #0x8\n"
- "str d16, [x23], #0x8\n"
- "str d20, [x22], #0x8\n"
- "tbz x16, #2, 100f\n"
- "st1 { v8.s }[2], [x17], #0x4\n"
- "st1 { v12.s }[2], [x24], #0x4\n"
- "st1 { v16.s }[2], [x23], #0x4\n"
- "st1 { v20.s }[2], [x22], #0x4\n"
- "tbz x16, #1, 99f\n"
- "st1 { v8.h }[6], [x17], #0x2\n"
- "st1 { v12.h }[6], [x24], #0x2\n"
- "st1 { v16.h }[6], [x23], #0x2\n"
- "st1 { v20.h }[6], [x22], #0x2\n"
- "tbz x16, #0, 106f\n"
- "st1 { v8.b }[14], [x17]\n"
- "st1 { v12.b }[14], [x24]\n"
- "st1 { v16.b }[14], [x23]\n"
- "st1 { v20.b }[14], [x22]\n"
+ "tbz x17, #3, 102f\n"
+ "str d8, [x16], #0x8\n"
+ "str d12, [x23], #0x8\n"
+ "str d16, [x22], #0x8\n"
+ "str d20, [x21], #0x8\n"
+ "tbz x17, #2, 100f\n"
+ "st1 { v8.s }[2], [x16], #0x4\n"
+ "st1 { v12.s }[2], [x23], #0x4\n"
+ "st1 { v16.s }[2], [x22], #0x4\n"
+ "st1 { v20.s }[2], [x21], #0x4\n"
+ "tbz x17, #1, 99f\n"
+ "st1 { v8.h }[6], [x16], #0x2\n"
+ "st1 { v12.h }[6], [x23], #0x2\n"
+ "st1 { v16.h }[6], [x22], #0x2\n"
+ "st1 { v20.h }[6], [x21], #0x2\n"
+ "tbz x17, #0, 106f\n"
+ "st1 { v8.b }[14], [x16]\n"
+ "st1 { v12.b }[14], [x23]\n"
+ "st1 { v16.b }[14], [x22]\n"
+ "st1 { v20.b }[14], [x21]\n"
"b 106f\n"
"99:" // Height 4: Partial direct writeback: partial_1_12
- "tbz x16, #0, 106f\n"
- "st1 { v8.b }[12], [x17]\n"
- "st1 { v12.b }[12], [x24]\n"
- "st1 { v16.b }[12], [x23]\n"
- "st1 { v20.b }[12], [x22]\n"
+ "tbz x17, #0, 106f\n"
+ "st1 { v8.b }[12], [x16]\n"
+ "st1 { v12.b }[12], [x23]\n"
+ "st1 { v16.b }[12], [x22]\n"
+ "st1 { v20.b }[12], [x21]\n"
"b 106f\n"
"100:" // Height 4: Partial direct writeback: partial_2_8
- "tbz x16, #1, 101f\n"
- "st1 { v8.h }[4], [x17], #0x2\n"
- "st1 { v12.h }[4], [x24], #0x2\n"
- "st1 { v16.h }[4], [x23], #0x2\n"
- "st1 { v20.h }[4], [x22], #0x2\n"
- "tbz x16, #0, 106f\n"
- "st1 { v8.b }[10], [x17]\n"
- "st1 { v12.b }[10], [x24]\n"
- "st1 { v16.b }[10], [x23]\n"
- "st1 { v20.b }[10], [x22]\n"
+ "tbz x17, #1, 101f\n"
+ "st1 { v8.h }[4], [x16], #0x2\n"
+ "st1 { v12.h }[4], [x23], #0x2\n"
+ "st1 { v16.h }[4], [x22], #0x2\n"
+ "st1 { v20.h }[4], [x21], #0x2\n"
+ "tbz x17, #0, 106f\n"
+ "st1 { v8.b }[10], [x16]\n"
+ "st1 { v12.b }[10], [x23]\n"
+ "st1 { v16.b }[10], [x22]\n"
+ "st1 { v20.b }[10], [x21]\n"
"b 106f\n"
"101:" // Height 4: Partial direct writeback: partial_1_8
- "tbz x16, #0, 106f\n"
- "st1 { v8.b }[8], [x17]\n"
- "st1 { v12.b }[8], [x24]\n"
- "st1 { v16.b }[8], [x23]\n"
- "st1 { v20.b }[8], [x22]\n"
+ "tbz x17, #0, 106f\n"
+ "st1 { v8.b }[8], [x16]\n"
+ "st1 { v12.b }[8], [x23]\n"
+ "st1 { v16.b }[8], [x22]\n"
+ "st1 { v20.b }[8], [x21]\n"
"b 106f\n"
"102:" // Height 4: Partial direct writeback: partial_4_0
- "tbz x16, #2, 104f\n"
- "str s8, [x17], #0x4\n"
- "str s12, [x24], #0x4\n"
- "str s16, [x23], #0x4\n"
- "str s20, [x22], #0x4\n"
- "tbz x16, #1, 103f\n"
- "st1 { v8.h }[2], [x17], #0x2\n"
- "st1 { v12.h }[2], [x24], #0x2\n"
- "st1 { v16.h }[2], [x23], #0x2\n"
- "st1 { v20.h }[2], [x22], #0x2\n"
- "tbz x16, #0, 106f\n"
- "st1 { v8.b }[6], [x17]\n"
- "st1 { v12.b }[6], [x24]\n"
- "st1 { v16.b }[6], [x23]\n"
- "st1 { v20.b }[6], [x22]\n"
+ "tbz x17, #2, 104f\n"
+ "str s8, [x16], #0x4\n"
+ "str s12, [x23], #0x4\n"
+ "str s16, [x22], #0x4\n"
+ "str s20, [x21], #0x4\n"
+ "tbz x17, #1, 103f\n"
+ "st1 { v8.h }[2], [x16], #0x2\n"
+ "st1 { v12.h }[2], [x23], #0x2\n"
+ "st1 { v16.h }[2], [x22], #0x2\n"
+ "st1 { v20.h }[2], [x21], #0x2\n"
+ "tbz x17, #0, 106f\n"
+ "st1 { v8.b }[6], [x16]\n"
+ "st1 { v12.b }[6], [x23]\n"
+ "st1 { v16.b }[6], [x22]\n"
+ "st1 { v20.b }[6], [x21]\n"
"b 106f\n"
"103:" // Height 4: Partial direct writeback: partial_1_4
- "tbz x16, #0, 106f\n"
- "st1 { v8.b }[4], [x17]\n"
- "st1 { v12.b }[4], [x24]\n"
- "st1 { v16.b }[4], [x23]\n"
- "st1 { v20.b }[4], [x22]\n"
+ "tbz x17, #0, 106f\n"
+ "st1 { v8.b }[4], [x16]\n"
+ "st1 { v12.b }[4], [x23]\n"
+ "st1 { v16.b }[4], [x22]\n"
+ "st1 { v20.b }[4], [x21]\n"
"b 106f\n"
"104:" // Height 4: Partial direct writeback: partial_2_0
- "tbz x16, #1, 105f\n"
- "str h8, [x17], #0x2\n"
- "str h12, [x24], #0x2\n"
- "str h16, [x23], #0x2\n"
- "str h20, [x22], #0x2\n"
- "tbz x16, #0, 106f\n"
- "st1 { v8.b }[2], [x17]\n"
- "st1 { v12.b }[2], [x24]\n"
- "st1 { v16.b }[2], [x23]\n"
- "st1 { v20.b }[2], [x22]\n"
+ "tbz x17, #1, 105f\n"
+ "str h8, [x16], #0x2\n"
+ "str h12, [x23], #0x2\n"
+ "str h16, [x22], #0x2\n"
+ "str h20, [x21], #0x2\n"
+ "tbz x17, #0, 106f\n"
+ "st1 { v8.b }[2], [x16]\n"
+ "st1 { v12.b }[2], [x23]\n"
+ "st1 { v16.b }[2], [x22]\n"
+ "st1 { v20.b }[2], [x21]\n"
"b 106f\n"
"105:" // Height 4: Partial direct writeback: partial_1_0
- "str b8, [x17, #0x0]\n"
- "str b12, [x24, #0x0]\n"
- "str b16, [x23, #0x0]\n"
- "str b20, [x22, #0x0]\n"
+ "str b8, [x16, #0x0]\n"
+ "str b12, [x23, #0x0]\n"
+ "str b16, [x22, #0x0]\n"
+ "str b20, [x21, #0x0]\n"
"106:" // Height 4: Partial direct writeback: Done
"b 108f\n"
"107:" // Height 4: Full writeback
- "str q8, [x17, #0x0]\n"
- "add x17, x17, #0x10\n"
- "str q12, [x24, #0x0]\n"
- "str q16, [x23, #0x0]\n"
- "str q20, [x22, #0x0]\n"
+ "str q8, [x16, #0x0]\n"
+ "add x16, x16, #0x10\n"
+ "str q12, [x23, #0x0]\n"
+ "str q16, [x22, #0x0]\n"
+ "str q20, [x21, #0x0]\n"
"108:" // Height 4: Writeback done
- "subs x16, x16, #0x10\n"
+ "subs x17, x17, #0x10\n"
"bgt 83b\n"
"b 164f\n"
"109:" // Height 5
- "mov x6, %x[col_bias]\n"
- "ldr x7, [%x[args_ptr], %[offsetof_multiplier_ptr]]\n"
- "ldr x8, [%x[args_ptr], %[offsetof_shift_ptr]]\n"
- "mov x17, %x[output_ptr]\n"
- "ldr x16, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x6, [%x[args_ptr], %[offsetof_multiplier_ptr]]\n"
+ "mov x8, %x[col_bias]\n"
+ "ldr x7, [%x[args_ptr], %[offsetof_shift_ptr]]\n"
+ "mov x16, %x[output_ptr]\n"
+ "ldr x17, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x15, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"110:" // Height 5: Column loop
"movi v8.4s, #0x0\n"
@@ -2097,228 +2097,228 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"mov x14, #0x0\n"
"112:" // Height 5: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"ldr w13, [x20, x14, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 113f\n"
- "ldr x21, [%x[input_ptr], x14, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x12, [x21, #0x0]\n"
- "ldr x9, [x21, #0x8]\n"
- "ldr x27, [x21, #0x10]\n"
- "ldr x25, [x21, #0x18]\n"
- "ldr x23, [x21, #0x20]\n"
+ "ldr x20, [%x[input_ptr], x14, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x12, [x20, #0x0]\n"
+ "ldr x28, [x20, #0x8]\n"
+ "ldr x26, [x20, #0x10]\n"
+ "ldr x24, [x20, #0x18]\n"
+ "ldr x22, [x20, #0x20]\n"
"cbnz x14, 114f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x12, x12, x20\n"
- "add x9, x9, x20\n"
- "add x27, x27, x20\n"
- "add x25, x25, x20\n"
- "add x23, x23, x20\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x12, x12, x19\n"
+ "add x28, x28, x19\n"
+ "add x26, x26, x19\n"
+ "add x24, x24, x19\n"
+ "add x22, x22, x19\n"
"b 114f\n"
"113:" // Height 5: setup direct input
"mov x12, %x[input_ptr]\n"
- "add x9, x12, x20\n"
- "add x27, x9, x20\n"
- "add x25, x27, x20\n"
- "add x23, x25, x20\n"
+ "add x28, x12, x19\n"
+ "add x26, x28, x19\n"
+ "add x24, x26, x19\n"
+ "add x22, x24, x19\n"
"114:" // Height 5: input setup done
"cmp x13, #0x10\n"
"blt 117f\n"
"ldr q0, [x12, #0x0]\n"
+ "ldr q1, [x28, #0x0]\n"
"cmp x13, #0x20\n"
- "ldr q1, [x9, #0x0]\n"
- "ldr q2, [x27, #0x0]\n"
- "ldr q3, [x25, #0x0]\n"
- "ldr q4, [x23, #0x0]\n"
+ "ldr q2, [x26, #0x0]\n"
+ "ldr q3, [x24, #0x0]\n"
+ "ldr q4, [x22, #0x0]\n"
"ldr q6, [x15, #0x0]\n"
- "ldr q7, [x15, #0x10]\n"
"blt 116f\n"
"115:" // Height 5: Multiply loop: Main loop head
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
- "ldr x20, [x15, #0x28]\n"
+ "ldr d7, [x15, #0x10]\n"
".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n"
- "ldr x11, [x15, #0x38]\n"
+ "ldr x11, [x15, #0x18]\n"
".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n"
- "add x12, x12, #0x10\n"
+ "ldr x10, [x15, #0x28]\n"
".inst 0x4f83e0d4 // sdot v20.4s, v6.16b, v3.4b[0]\n"
- "add x9, x9, #0x10\n"
+ "add x12, x12, #0x10\n"
".inst 0x4f84e0d8 // sdot v24.4s, v6.16b, v4.4b[0]\n"
- "ldr d6, [x15, #0x20]\n"
+ "mov v7.d[1], x11\n"
+ "prfm pldl1keep, [x12, #0x80]\n"
+ "add x28, x28, #0x10\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
- "mov v6.d[1], x20\n"
+ "prfm pldl1keep, [x28, #0x80]\n"
".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n"
- "ldr x20, [x15, #0x48]\n"
+ "ldr d6, [x15, #0x20]\n"
".inst 0x4f82e0f1 // sdot v17.4s, v7.16b, v2.4b[0]\n"
- "add x27, x27, #0x10\n"
+ "ldr x11, [x15, #0x38]\n"
".inst 0x4f83e0f5 // sdot v21.4s, v7.16b, v3.4b[0]\n"
- "add x25, x25, #0x10\n"
+ "ldr x9, [x12, #0x8]\n"
".inst 0x4f84e0f9 // sdot v25.4s, v7.16b, v4.4b[0]\n"
+ "mov v6.d[1], x10\n"
"ldr d7, [x15, #0x30]\n"
- "mov v7.d[1], x11\n"
+ "add x26, x26, #0x10\n"
".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x4f81e0ce // sdot v14.4s, v6.16b, v1.4b[0]\n"
- "ldr x11, [x15, #0x58]\n"
+ "ldr x10, [x15, #0x48]\n"
".inst 0x4f82e0d2 // sdot v18.4s, v6.16b, v2.4b[0]\n"
- "add x23, x23, #0x10\n"
+ "mov v7.d[1], x11\n"
".inst 0x4f83e0d6 // sdot v22.4s, v6.16b, v3.4b[0]\n"
- "ldr x10, [x12, #0x8]\n"
+ "ldr x11, [x15, #0x58]\n"
".inst 0x4f84e0da // sdot v26.4s, v6.16b, v4.4b[0]\n"
"ldr d6, [x15, #0x40]\n"
".inst 0x4f80e0eb // sdot v11.4s, v7.16b, v0.4b[0]\n"
- "mov v6.d[1], x20\n"
+ "ldr x27, [x28, #0x8]\n"
".inst 0x4f81e0ef // sdot v15.4s, v7.16b, v1.4b[0]\n"
- "ldr x20, [x15, #0x68]\n"
+ "ldr x25, [x26, #0x8]\n"
".inst 0x4f82e0f3 // sdot v19.4s, v7.16b, v2.4b[0]\n"
- "ldr x28, [x9, #0x8]\n"
+ "mov v6.d[1], x10\n"
".inst 0x4f83e0f7 // sdot v23.4s, v7.16b, v3.4b[0]\n"
- "ldr x26, [x27, #0x8]\n"
+ "ldr x10, [x15, #0x68]\n"
".inst 0x4f84e0fb // sdot v27.4s, v7.16b, v4.4b[0]\n"
"ldr d7, [x15, #0x50]\n"
- "mov v7.d[1], x11\n"
".inst 0x4fa0e0c8 // sdot v8.4s, v6.16b, v0.4b[1]\n"
+ "add x24, x24, #0x10\n"
".inst 0x4fa1e0cc // sdot v12.4s, v6.16b, v1.4b[1]\n"
- "ldr x11, [x15, #0x78]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x4fa2e0d0 // sdot v16.4s, v6.16b, v2.4b[1]\n"
- "ldr x24, [x25, #0x8]\n"
+ "mov v7.d[1], x11\n"
".inst 0x4fa3e0d4 // sdot v20.4s, v6.16b, v3.4b[1]\n"
- "ldr x22, [x23, #0x8]\n"
+ "ldr x11, [x15, #0x78]\n"
".inst 0x4fa4e0d8 // sdot v24.4s, v6.16b, v4.4b[1]\n"
"ldr d6, [x15, #0x60]\n"
".inst 0x4fa0e0e9 // sdot v9.4s, v7.16b, v0.4b[1]\n"
- "mov v6.d[1], x20\n"
+ "ldr x23, [x24, #0x8]\n"
".inst 0x4fa1e0ed // sdot v13.4s, v7.16b, v1.4b[1]\n"
- "ldr x20, [x15, #0x88]\n"
+ "add x22, x22, #0x10\n"
".inst 0x4fa2e0f1 // sdot v17.4s, v7.16b, v2.4b[1]\n"
- "sub x13, x13, #0x10\n"
+ "mov v6.d[1], x10\n"
".inst 0x4fa3e0f5 // sdot v21.4s, v7.16b, v3.4b[1]\n"
- "cmp x13, #0x20\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x4fa4e0f9 // sdot v25.4s, v7.16b, v4.4b[1]\n"
"ldr d7, [x15, #0x70]\n"
- "mov v7.d[1], x11\n"
".inst 0x4fa0e0ca // sdot v10.4s, v6.16b, v0.4b[1]\n"
+ "ldr x10, [x15, #0x88]\n"
".inst 0x4fa1e0ce // sdot v14.4s, v6.16b, v1.4b[1]\n"
- "ldr x11, [x15, #0x98]\n"
+ "ldr x21, [x22, #0x8]\n"
".inst 0x4fa2e0d2 // sdot v18.4s, v6.16b, v2.4b[1]\n"
- "prfm pldl1keep, [x12, #0x80]\n"
+ "mov v7.d[1], x11\n"
".inst 0x4fa3e0d6 // sdot v22.4s, v6.16b, v3.4b[1]\n"
- "prfm pldl1keep, [x9, #0x80]\n"
+ "ldr x11, [x15, #0x98]\n"
".inst 0x4fa4e0da // sdot v26.4s, v6.16b, v4.4b[1]\n"
"ldr d6, [x15, #0x80]\n"
".inst 0x4fa0e0eb // sdot v11.4s, v7.16b, v0.4b[1]\n"
- "mov v6.d[1], x20\n"
+ "sub x13, x13, #0x10\n"
".inst 0x4fa1e0ef // sdot v15.4s, v7.16b, v1.4b[1]\n"
- "ldr x20, [x15, #0xa8]\n"
+ "cmp x13, #0x20\n"
".inst 0x4fa2e0f3 // sdot v19.4s, v7.16b, v2.4b[1]\n"
- "prfm pldl1keep, [x27, #0x80]\n"
+ "mov v6.d[1], x10\n"
".inst 0x4fa3e0f7 // sdot v23.4s, v7.16b, v3.4b[1]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
+ "ldr x10, [x15, #0xa8]\n"
".inst 0x4fa4e0fb // sdot v27.4s, v7.16b, v4.4b[1]\n"
"ldr d7, [x15, #0x90]\n"
- "mov v7.d[1], x11\n"
".inst 0x4f80e8c8 // sdot v8.4s, v6.16b, v0.4b[2]\n"
".inst 0x4f81e8cc // sdot v12.4s, v6.16b, v1.4b[2]\n"
- "ldr x11, [x15, #0xb8]\n"
".inst 0x4f82e8d0 // sdot v16.4s, v6.16b, v2.4b[2]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
+ "mov v7.d[1], x11\n"
".inst 0x4f83e8d4 // sdot v20.4s, v6.16b, v3.4b[2]\n"
+ "ldr x11, [x15, #0xb8]\n"
".inst 0x4f84e8d8 // sdot v24.4s, v6.16b, v4.4b[2]\n"
"ldr d6, [x15, #0xa0]\n"
".inst 0x4f80e8e9 // sdot v9.4s, v7.16b, v0.4b[2]\n"
- "mov v6.d[1], x20\n"
".inst 0x4f81e8ed // sdot v13.4s, v7.16b, v1.4b[2]\n"
- "ldr x20, [x15, #0xc8]\n"
".inst 0x4f82e8f1 // sdot v17.4s, v7.16b, v2.4b[2]\n"
+ "mov v6.d[1], x10\n"
".inst 0x4f83e8f5 // sdot v21.4s, v7.16b, v3.4b[2]\n"
+ "ldr x10, [x15, #0xc8]\n"
".inst 0x4f84e8f9 // sdot v25.4s, v7.16b, v4.4b[2]\n"
"ldr d7, [x15, #0xb0]\n"
- "mov v7.d[1], x11\n"
".inst 0x4f80e8ca // sdot v10.4s, v6.16b, v0.4b[2]\n"
".inst 0x4f81e8ce // sdot v14.4s, v6.16b, v1.4b[2]\n"
- "ldr x11, [x15, #0xd8]\n"
".inst 0x4f82e8d2 // sdot v18.4s, v6.16b, v2.4b[2]\n"
+ "mov v7.d[1], x11\n"
".inst 0x4f83e8d6 // sdot v22.4s, v6.16b, v3.4b[2]\n"
+ "ldr x11, [x15, #0xd8]\n"
".inst 0x4f84e8da // sdot v26.4s, v6.16b, v4.4b[2]\n"
"ldr d6, [x15, #0xc0]\n"
".inst 0x4f80e8eb // sdot v11.4s, v7.16b, v0.4b[2]\n"
- "mov v6.d[1], x20\n"
".inst 0x4f81e8ef // sdot v15.4s, v7.16b, v1.4b[2]\n"
- "ldr x20, [x15, #0xe8]\n"
".inst 0x4f82e8f3 // sdot v19.4s, v7.16b, v2.4b[2]\n"
+ "mov v6.d[1], x10\n"
".inst 0x4f83e8f7 // sdot v23.4s, v7.16b, v3.4b[2]\n"
+ "ldr x10, [x15, #0xe8]\n"
".inst 0x4f84e8fb // sdot v27.4s, v7.16b, v4.4b[2]\n"
"ldr d7, [x15, #0xd0]\n"
- "mov v7.d[1], x11\n"
".inst 0x4fa0e8c8 // sdot v8.4s, v6.16b, v0.4b[3]\n"
".inst 0x4fa1e8cc // sdot v12.4s, v6.16b, v1.4b[3]\n"
- "ldr x11, [x15, #0xf8]\n"
".inst 0x4fa2e8d0 // sdot v16.4s, v6.16b, v2.4b[3]\n"
+ "mov v7.d[1], x11\n"
".inst 0x4fa3e8d4 // sdot v20.4s, v6.16b, v3.4b[3]\n"
+ "ldr x11, [x15, #0xf8]\n"
".inst 0x4fa4e8d8 // sdot v24.4s, v6.16b, v4.4b[3]\n"
"ldr d6, [x15, #0xe0]\n"
".inst 0x4fa0e8e9 // sdot v9.4s, v7.16b, v0.4b[3]\n"
- "mov v6.d[1], x20\n"
".inst 0x4fa1e8ed // sdot v13.4s, v7.16b, v1.4b[3]\n"
".inst 0x4fa2e8f1 // sdot v17.4s, v7.16b, v2.4b[3]\n"
+ "mov v6.d[1], x10\n"
".inst 0x4fa3e8f5 // sdot v21.4s, v7.16b, v3.4b[3]\n"
".inst 0x4fa4e8f9 // sdot v25.4s, v7.16b, v4.4b[3]\n"
"ldr d7, [x15, #0xf0]\n"
- "mov v7.d[1], x11\n"
- "add x15, x15, #0x100\n"
".inst 0x4fa0e8ca // sdot v10.4s, v6.16b, v0.4b[3]\n"
- "ldr x20, [x15, #0x8]\n"
+ "add x15, x15, #0x100\n"
".inst 0x4fa1e8ce // sdot v14.4s, v6.16b, v1.4b[3]\n"
- "ldr x11, [x15, #0x18]\n"
+ "ldr x10, [x15, #0x8]\n"
".inst 0x4fa2e8d2 // sdot v18.4s, v6.16b, v2.4b[3]\n"
+ "mov v7.d[1], x11\n"
".inst 0x4fa3e8d6 // sdot v22.4s, v6.16b, v3.4b[3]\n"
".inst 0x4fa4e8da // sdot v26.4s, v6.16b, v4.4b[3]\n"
"ldr d6, [x15, #0x0]\n"
".inst 0x4fa0e8eb // sdot v11.4s, v7.16b, v0.4b[3]\n"
"ldr d0, [x12, #0x0]\n"
".inst 0x4fa1e8ef // sdot v15.4s, v7.16b, v1.4b[3]\n"
- "ldr d1, [x9, #0x0]\n"
+ "ldr d1, [x28, #0x0]\n"
".inst 0x4fa2e8f3 // sdot v19.4s, v7.16b, v2.4b[3]\n"
- "ldr d2, [x27, #0x0]\n"
+ "mov v6.d[1], x10\n"
".inst 0x4fa3e8f7 // sdot v23.4s, v7.16b, v3.4b[3]\n"
- "ldr d3, [x25, #0x0]\n"
+ "mov v0.d[1], x9\n"
".inst 0x4fa4e8fb // sdot v27.4s, v7.16b, v4.4b[3]\n"
- "ldr d4, [x23, #0x0]\n"
- "ldr d7, [x15, #0x10]\n"
- "mov v6.d[1], x20\n"
- "mov v0.d[1], x10\n"
- "mov v1.d[1], x28\n"
- "mov v2.d[1], x26\n"
- "mov v3.d[1], x24\n"
- "mov v4.d[1], x22\n"
- "mov v7.d[1], x11\n"
+ "mov v1.d[1], x27\n"
+ "ldr d2, [x26, #0x0]\n"
+ "ldr d3, [x24, #0x0]\n"
+ "ldr d4, [x22, #0x0]\n"
+ "mov v2.d[1], x25\n"
+ "mov v3.d[1], x23\n"
+ "mov v4.d[1], x21\n"
"bge 115b\n"
"116:" // Height 5: Multiply loop: Single iteration only
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
- "add x12, x12, #0x10\n"
+ "ldr q7, [x15, #0x10]\n"
".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n"
- "add x9, x9, #0x10\n"
+ "sub x13, x13, #0x10\n"
".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n"
- "add x27, x27, #0x10\n"
+ "add x12, x12, #0x10\n"
".inst 0x4f83e0d4 // sdot v20.4s, v6.16b, v3.4b[0]\n"
- "add x25, x25, #0x10\n"
+ "prfm pldl1keep, [x12, #0x80]\n"
".inst 0x4f84e0d8 // sdot v24.4s, v6.16b, v4.4b[0]\n"
"ldr q6, [x15, #0x20]\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
- "add x23, x23, #0x10\n"
+ "add x28, x28, #0x10\n"
".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n"
- "sub x13, x13, #0x10\n"
+ "prfm pldl1keep, [x28, #0x80]\n"
".inst 0x4f82e0f1 // sdot v17.4s, v7.16b, v2.4b[0]\n"
- "prfm pldl1keep, [x12, #0x80]\n"
+ "add x26, x26, #0x10\n"
".inst 0x4f83e0f5 // sdot v21.4s, v7.16b, v3.4b[0]\n"
- "prfm pldl1keep, [x9, #0x80]\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x4f84e0f9 // sdot v25.4s, v7.16b, v4.4b[0]\n"
"ldr q7, [x15, #0x30]\n"
".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n"
- "prfm pldl1keep, [x27, #0x80]\n"
+ "add x24, x24, #0x10\n"
".inst 0x4f81e0ce // sdot v14.4s, v6.16b, v1.4b[0]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x4f82e0d2 // sdot v18.4s, v6.16b, v2.4b[0]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
+ "add x22, x22, #0x10\n"
".inst 0x4f83e0d6 // sdot v22.4s, v6.16b, v3.4b[0]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x4f84e0da // sdot v26.4s, v6.16b, v4.4b[0]\n"
"ldr q6, [x15, #0x40]\n"
".inst 0x4f80e0eb // sdot v11.4s, v7.16b, v0.4b[0]\n"
@@ -2405,14 +2405,14 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"118:" // Height 5: Multiply loop: Odd block loop
"ldr s0, [x12], #0x4\n"
"sub x13, x13, #0x4\n"
- "ldr s1, [x9], #0x4\n"
+ "ldr s1, [x28], #0x4\n"
"cmp x13, #0x4\n"
- "ldr s2, [x27], #0x4\n"
- "ldr s3, [x25], #0x4\n"
- "ldr s4, [x23], #0x4\n"
+ "ldr s2, [x26], #0x4\n"
+ "ldr s3, [x24], #0x4\n"
+ "ldr s4, [x22], #0x4\n"
"ldr q6, [x15, #0x0]\n"
- ".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
"ldr q7, [x15, #0x10]\n"
+ ".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n"
".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n"
".inst 0x4f83e0d4 // sdot v20.4s, v6.16b, v3.4b[0]\n"
@@ -2436,31 +2436,31 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
".inst 0x4f83e0f7 // sdot v23.4s, v7.16b, v3.4b[0]\n"
".inst 0x4f84e0fb // sdot v27.4s, v7.16b, v4.4b[0]\n"
"bge 118b\n"
- "119:" // Height 5: Multiply loop: Skip odd blocks
"cbz x13, 122f\n"
+ "119:" // Height 5: Multiply loop: Skip odd blocks
"tbz x13, #1, 120f\n"
"ldr h0, [x12], #0x2\n"
- "ldr h1, [x9], #0x2\n"
- "ldr h2, [x27], #0x2\n"
- "ldr h3, [x25], #0x2\n"
- "ldr h4, [x23], #0x2\n"
+ "ldr h1, [x28], #0x2\n"
+ "ldr h2, [x26], #0x2\n"
+ "ldr h3, [x24], #0x2\n"
+ "ldr h4, [x22], #0x2\n"
"tbz x13, #0, 121f\n"
"ld1 { v0.b }[2], [x12]\n"
- "ld1 { v1.b }[2], [x9]\n"
- "ld1 { v2.b }[2], [x27]\n"
- "ld1 { v3.b }[2], [x25]\n"
- "ld1 { v4.b }[2], [x23]\n"
+ "ld1 { v1.b }[2], [x28]\n"
+ "ld1 { v2.b }[2], [x26]\n"
+ "ld1 { v3.b }[2], [x24]\n"
+ "ld1 { v4.b }[2], [x22]\n"
"b 121f\n"
"120:" // Height 5: Multiply loop: Ragged operand read: partial_1_0
"ldr b0, [x12, #0x0]\n"
- "ldr b1, [x9, #0x0]\n"
- "ldr b2, [x27, #0x0]\n"
- "ldr b3, [x25, #0x0]\n"
- "ldr b4, [x23, #0x0]\n"
+ "ldr b1, [x28, #0x0]\n"
+ "ldr b2, [x26, #0x0]\n"
+ "ldr b3, [x24, #0x0]\n"
+ "ldr b4, [x22, #0x0]\n"
"121:" // Height 5: Multiply loop: Ragged operand read: Done
"ldr q6, [x15, #0x0]\n"
- ".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
"ldr q7, [x15, #0x10]\n"
+ ".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n"
".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n"
".inst 0x4f83e0d4 // sdot v20.4s, v6.16b, v3.4b[0]\n"
@@ -2484,62 +2484,62 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
".inst 0x4f83e0f7 // sdot v23.4s, v7.16b, v3.4b[0]\n"
".inst 0x4f84e0fb // sdot v27.4s, v7.16b, v4.4b[0]\n"
"122:" // Height 5: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
"add x14, x14, #0x1\n"
- "cmp x14, x20\n"
+ "cmp x14, x19\n"
"bne 112b\n"
- "ldr q0, [x6, #0x0]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "prfm pstl1keep, [x16, #0x0]\n"
+ "ldr q0, [x8, #0x0]\n"
+ "ldr q1, [x8, #0x10]\n"
+ "add x23, x16, x19\n"
+ "prfm pstl1keep, [x23, #0x0]\n"
+ "add x22, x23, x19\n"
"add v8.4s, v8.4s, v0.4s\n"
- "ldr q1, [x6, #0x10]\n"
- "add v9.4s, v9.4s, v1.4s\n"
- "ldr q2, [x6, #0x20]\n"
- "add v10.4s, v10.4s, v2.4s\n"
- "ldr q3, [x6, #0x30]\n"
- "add v11.4s, v11.4s, v3.4s\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x17, x20\n"
- "add x23, x24, x20\n"
- "add x22, x23, x20\n"
- "add x21, x22, x20\n"
- "prfm pstl1keep, [x17, #0x0]\n"
- "prfm pstl1keep, [x24, #0x0]\n"
"add v12.4s, v12.4s, v0.4s\n"
- "prfm pstl1keep, [x23, #0x0]\n"
+ "add v9.4s, v9.4s, v1.4s\n"
"add v13.4s, v13.4s, v1.4s\n"
+ "add v16.4s, v16.4s, v0.4s\n"
+ "add v17.4s, v17.4s, v1.4s\n"
+ "add v20.4s, v20.4s, v0.4s\n"
+ "add v21.4s, v21.4s, v1.4s\n"
+ "add v24.4s, v24.4s, v0.4s\n"
+ "add v25.4s, v25.4s, v1.4s\n"
"prfm pstl1keep, [x22, #0x0]\n"
- "add v14.4s, v14.4s, v2.4s\n"
+ "add x21, x22, x19\n"
"prfm pstl1keep, [x21, #0x0]\n"
+ "add x20, x21, x19\n"
+ "prfm pstl1keep, [x20, #0x0]\n"
+ "ldr q2, [x8, #0x20]\n"
+ "ldr q3, [x8, #0x30]\n"
+ "add x8, x8, #0x40\n"
+ "add v10.4s, v10.4s, v2.4s\n"
+ "add v14.4s, v14.4s, v2.4s\n"
+ "add v11.4s, v11.4s, v3.4s\n"
"add v15.4s, v15.4s, v3.4s\n"
- "add v16.4s, v16.4s, v0.4s\n"
- "add v17.4s, v17.4s, v1.4s\n"
"add v18.4s, v18.4s, v2.4s\n"
"add v19.4s, v19.4s, v3.4s\n"
- "add v20.4s, v20.4s, v0.4s\n"
- "add v21.4s, v21.4s, v1.4s\n"
"add v22.4s, v22.4s, v2.4s\n"
"add v23.4s, v23.4s, v3.4s\n"
- "add v24.4s, v24.4s, v0.4s\n"
- "add v25.4s, v25.4s, v1.4s\n"
"add v26.4s, v26.4s, v2.4s\n"
"add v27.4s, v27.4s, v3.4s\n"
- "add x6, x6, #0x40\n"
"tbz %x[flags], #4, 123f\n"
- "ldr q0, [x8, #0x0]\n"
- "ldr q4, [x7, #0x0]\n"
- "ldr q1, [x8, #0x10]\n"
- "ldr q5, [x7, #0x10]\n"
- "ldr q2, [x8, #0x20]\n"
- "ldr q6, [x7, #0x20]\n"
- "ldr q3, [x8, #0x30]\n"
- "add x8, x8, #0x40\n"
- "ldr q7, [x7, #0x30]\n"
+ "ldr q0, [x7, #0x0]\n"
+ "ldr q4, [x6, #0x0]\n"
+ "ldr q1, [x7, #0x10]\n"
+ "ldr q5, [x6, #0x10]\n"
+ "ldr q2, [x7, #0x20]\n"
+ "ldr q6, [x6, #0x20]\n"
+ "ldr q3, [x7, #0x30]\n"
"add x7, x7, #0x40\n"
+ "ldr q7, [x6, #0x30]\n"
+ "add x6, x6, #0x40\n"
"b 124f\n"
"123:" // Height 5: per layer parameters
"add x25, %x[qp], %[per_layer_right_shift]\n"
+ "add x24, %x[qp], %[per_layer_mul]\n"
"ld1r { v0.4s }, [x25]\n"
- "add x25, %x[qp], %[per_layer_mul]\n"
- "ld1r { v4.4s }, [x25]\n"
+ "ld1r { v4.4s }, [x24]\n"
"mov v1.16b, v0.16b\n"
"mov v5.16b, v4.16b\n"
"mov v2.16b, v0.16b\n"
@@ -2577,10 +2577,10 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"sshr v6.4s, v6.4s, #0x1f\n"
"sshr v7.4s, v7.4s, #0x1f\n"
"sqadd v8.4s, v8.4s, v4.4s\n"
+ "and v4.16b, v12.16b, v0.16b\n"
"sqadd v9.4s, v9.4s, v5.4s\n"
"sqadd v10.4s, v10.4s, v6.4s\n"
"sqadd v11.4s, v11.4s, v7.4s\n"
- "and v4.16b, v12.16b, v0.16b\n"
"and v5.16b, v13.16b, v1.16b\n"
"and v6.16b, v14.16b, v2.16b\n"
"and v7.16b, v15.16b, v3.16b\n"
@@ -2589,11 +2589,11 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"sshr v6.4s, v6.4s, #0x1f\n"
"sshr v7.4s, v7.4s, #0x1f\n"
"sqadd v12.4s, v12.4s, v4.4s\n"
+ "and v4.16b, v16.16b, v0.16b\n"
"sqadd v13.4s, v13.4s, v5.4s\n"
+ "and v5.16b, v17.16b, v1.16b\n"
"sqadd v14.4s, v14.4s, v6.4s\n"
"sqadd v15.4s, v15.4s, v7.4s\n"
- "and v4.16b, v16.16b, v0.16b\n"
- "and v5.16b, v17.16b, v1.16b\n"
"and v6.16b, v18.16b, v2.16b\n"
"and v7.16b, v19.16b, v3.16b\n"
"sshr v4.4s, v4.4s, #0x1f\n"
@@ -2601,31 +2601,31 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"sshr v6.4s, v6.4s, #0x1f\n"
"sshr v7.4s, v7.4s, #0x1f\n"
"sqadd v16.4s, v16.4s, v4.4s\n"
+ "and v4.16b, v20.16b, v0.16b\n"
"sqadd v17.4s, v17.4s, v5.4s\n"
+ "and v5.16b, v21.16b, v1.16b\n"
"sqadd v18.4s, v18.4s, v6.4s\n"
"sqadd v19.4s, v19.4s, v7.4s\n"
- "and v4.16b, v20.16b, v0.16b\n"
- "and v5.16b, v21.16b, v1.16b\n"
+ "sshr v4.4s, v4.4s, #0x1f\n"
"and v6.16b, v22.16b, v2.16b\n"
"and v7.16b, v23.16b, v3.16b\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
"sshr v5.4s, v5.4s, #0x1f\n"
+ "sqadd v20.4s, v20.4s, v4.4s\n"
"sshr v6.4s, v6.4s, #0x1f\n"
"sshr v7.4s, v7.4s, #0x1f\n"
- "sqadd v20.4s, v20.4s, v4.4s\n"
- "sqadd v21.4s, v21.4s, v5.4s\n"
- "sqadd v22.4s, v22.4s, v6.4s\n"
- "sqadd v23.4s, v23.4s, v7.4s\n"
"and v4.16b, v24.16b, v0.16b\n"
+ "sqadd v21.4s, v21.4s, v5.4s\n"
"and v5.16b, v25.16b, v1.16b\n"
+ "sqadd v22.4s, v22.4s, v6.4s\n"
"and v6.16b, v26.16b, v2.16b\n"
- "and v7.16b, v27.16b, v3.16b\n"
+ "sqadd v23.4s, v23.4s, v7.4s\n"
"sshr v4.4s, v4.4s, #0x1f\n"
"sshr v5.4s, v5.4s, #0x1f\n"
+ "and v7.16b, v27.16b, v3.16b\n"
"sshr v6.4s, v6.4s, #0x1f\n"
- "sshr v7.4s, v7.4s, #0x1f\n"
"sqadd v24.4s, v24.4s, v4.4s\n"
"sqadd v25.4s, v25.4s, v5.4s\n"
+ "sshr v7.4s, v7.4s, #0x1f\n"
"sqadd v26.4s, v26.4s, v6.4s\n"
"sqadd v27.4s, v27.4s, v7.4s\n"
"125:" // Height 5: no shift correction
@@ -2649,8 +2649,13 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"srshl v25.4s, v25.4s, v1.4s\n"
"srshl v26.4s, v26.4s, v2.4s\n"
"srshl v27.4s, v27.4s, v3.4s\n"
- "add x25, %x[qp], %[c_offset]\n"
- "ld1r { v4.4s }, [x25]\n"
+ "add x24, %x[qp], %[c_offset]\n"
+ "add x25, %x[qp], %[minval]\n"
+ "ld1r { v4.4s }, [x24]\n"
+ "add x24, %x[qp], %[maxval]\n"
+ "ld1r { v5.4s }, [x25]\n"
+ "cmp x17, #0x10\n"
+ "ld1r { v6.4s }, [x24]\n"
"add v8.4s, v8.4s, v4.4s\n"
"add v9.4s, v9.4s, v4.4s\n"
"add v10.4s, v10.4s, v4.4s\n"
@@ -2661,18 +2666,6 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"add v15.4s, v15.4s, v4.4s\n"
"add v16.4s, v16.4s, v4.4s\n"
"add v17.4s, v17.4s, v4.4s\n"
- "add v18.4s, v18.4s, v4.4s\n"
- "add v19.4s, v19.4s, v4.4s\n"
- "add v20.4s, v20.4s, v4.4s\n"
- "add v21.4s, v21.4s, v4.4s\n"
- "add v22.4s, v22.4s, v4.4s\n"
- "add v23.4s, v23.4s, v4.4s\n"
- "add v24.4s, v24.4s, v4.4s\n"
- "add v25.4s, v25.4s, v4.4s\n"
- "add v26.4s, v26.4s, v4.4s\n"
- "add v27.4s, v27.4s, v4.4s\n"
- "add x25, %x[qp], %[maxval]\n"
- "ld1r { v6.4s }, [x25]\n"
"smin v8.4s, v8.4s, v6.4s\n"
"smin v9.4s, v9.4s, v6.4s\n"
"smin v10.4s, v10.4s, v6.4s\n"
@@ -2683,18 +2676,6 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"smin v15.4s, v15.4s, v6.4s\n"
"smin v16.4s, v16.4s, v6.4s\n"
"smin v17.4s, v17.4s, v6.4s\n"
- "smin v18.4s, v18.4s, v6.4s\n"
- "smin v19.4s, v19.4s, v6.4s\n"
- "smin v20.4s, v20.4s, v6.4s\n"
- "smin v21.4s, v21.4s, v6.4s\n"
- "smin v22.4s, v22.4s, v6.4s\n"
- "smin v23.4s, v23.4s, v6.4s\n"
- "smin v24.4s, v24.4s, v6.4s\n"
- "smin v25.4s, v25.4s, v6.4s\n"
- "smin v26.4s, v26.4s, v6.4s\n"
- "smin v27.4s, v27.4s, v6.4s\n"
- "add x25, %x[qp], %[minval]\n"
- "ld1r { v5.4s }, [x25]\n"
"smax v8.4s, v8.4s, v5.4s\n"
"smax v9.4s, v9.4s, v5.4s\n"
"smax v10.4s, v10.4s, v5.4s\n"
@@ -2705,6 +2686,26 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"smax v15.4s, v15.4s, v5.4s\n"
"smax v16.4s, v16.4s, v5.4s\n"
"smax v17.4s, v17.4s, v5.4s\n"
+ "add v18.4s, v18.4s, v4.4s\n"
+ "add v19.4s, v19.4s, v4.4s\n"
+ "add v20.4s, v20.4s, v4.4s\n"
+ "add v21.4s, v21.4s, v4.4s\n"
+ "add v22.4s, v22.4s, v4.4s\n"
+ "add v23.4s, v23.4s, v4.4s\n"
+ "add v24.4s, v24.4s, v4.4s\n"
+ "add v25.4s, v25.4s, v4.4s\n"
+ "add v26.4s, v26.4s, v4.4s\n"
+ "add v27.4s, v27.4s, v4.4s\n"
+ "smin v18.4s, v18.4s, v6.4s\n"
+ "smin v19.4s, v19.4s, v6.4s\n"
+ "smin v20.4s, v20.4s, v6.4s\n"
+ "smin v21.4s, v21.4s, v6.4s\n"
+ "smin v22.4s, v22.4s, v6.4s\n"
+ "smin v23.4s, v23.4s, v6.4s\n"
+ "smin v24.4s, v24.4s, v6.4s\n"
+ "smin v25.4s, v25.4s, v6.4s\n"
+ "smin v26.4s, v26.4s, v6.4s\n"
+ "smin v27.4s, v27.4s, v6.4s\n"
"smax v18.4s, v18.4s, v5.4s\n"
"smax v19.4s, v19.4s, v5.4s\n"
"smax v20.4s, v20.4s, v5.4s\n"
@@ -2725,139 +2726,138 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"uzp1 v21.8h, v22.8h, v23.8h\n"
"uzp1 v24.8h, v24.8h, v25.8h\n"
"uzp1 v25.8h, v26.8h, v27.8h\n"
- "cmp x16, #0x10\n"
"uzp1 v8.16b, v8.16b, v9.16b\n"
"uzp1 v12.16b, v12.16b, v13.16b\n"
"uzp1 v16.16b, v16.16b, v17.16b\n"
"uzp1 v20.16b, v20.16b, v21.16b\n"
"uzp1 v24.16b, v24.16b, v25.16b\n"
"bge 134f\n"
- "tbz x16, #3, 129f\n"
- "str d8, [x17], #0x8\n"
- "str d12, [x24], #0x8\n"
- "str d16, [x23], #0x8\n"
- "str d20, [x22], #0x8\n"
- "str d24, [x21], #0x8\n"
- "tbz x16, #2, 127f\n"
- "st1 { v8.s }[2], [x17], #0x4\n"
- "st1 { v12.s }[2], [x24], #0x4\n"
- "st1 { v16.s }[2], [x23], #0x4\n"
- "st1 { v20.s }[2], [x22], #0x4\n"
- "st1 { v24.s }[2], [x21], #0x4\n"
- "tbz x16, #1, 126f\n"
- "st1 { v8.h }[6], [x17], #0x2\n"
- "st1 { v12.h }[6], [x24], #0x2\n"
- "st1 { v16.h }[6], [x23], #0x2\n"
- "st1 { v20.h }[6], [x22], #0x2\n"
- "st1 { v24.h }[6], [x21], #0x2\n"
- "tbz x16, #0, 133f\n"
- "st1 { v8.b }[14], [x17]\n"
- "st1 { v12.b }[14], [x24]\n"
- "st1 { v16.b }[14], [x23]\n"
- "st1 { v20.b }[14], [x22]\n"
- "st1 { v24.b }[14], [x21]\n"
+ "tbz x17, #3, 129f\n"
+ "str d8, [x16], #0x8\n"
+ "str d12, [x23], #0x8\n"
+ "str d16, [x22], #0x8\n"
+ "str d20, [x21], #0x8\n"
+ "str d24, [x20], #0x8\n"
+ "tbz x17, #2, 127f\n"
+ "st1 { v8.s }[2], [x16], #0x4\n"
+ "st1 { v12.s }[2], [x23], #0x4\n"
+ "st1 { v16.s }[2], [x22], #0x4\n"
+ "st1 { v20.s }[2], [x21], #0x4\n"
+ "st1 { v24.s }[2], [x20], #0x4\n"
+ "tbz x17, #1, 126f\n"
+ "st1 { v8.h }[6], [x16], #0x2\n"
+ "st1 { v12.h }[6], [x23], #0x2\n"
+ "st1 { v16.h }[6], [x22], #0x2\n"
+ "st1 { v20.h }[6], [x21], #0x2\n"
+ "st1 { v24.h }[6], [x20], #0x2\n"
+ "tbz x17, #0, 133f\n"
+ "st1 { v8.b }[14], [x16]\n"
+ "st1 { v12.b }[14], [x23]\n"
+ "st1 { v16.b }[14], [x22]\n"
+ "st1 { v20.b }[14], [x21]\n"
+ "st1 { v24.b }[14], [x20]\n"
"b 133f\n"
"126:" // Height 5: Partial direct writeback: partial_1_12
- "tbz x16, #0, 133f\n"
- "st1 { v8.b }[12], [x17]\n"
- "st1 { v12.b }[12], [x24]\n"
- "st1 { v16.b }[12], [x23]\n"
- "st1 { v20.b }[12], [x22]\n"
- "st1 { v24.b }[12], [x21]\n"
+ "tbz x17, #0, 133f\n"
+ "st1 { v8.b }[12], [x16]\n"
+ "st1 { v12.b }[12], [x23]\n"
+ "st1 { v16.b }[12], [x22]\n"
+ "st1 { v20.b }[12], [x21]\n"
+ "st1 { v24.b }[12], [x20]\n"
"b 133f\n"
"127:" // Height 5: Partial direct writeback: partial_2_8
- "tbz x16, #1, 128f\n"
- "st1 { v8.h }[4], [x17], #0x2\n"
- "st1 { v12.h }[4], [x24], #0x2\n"
- "st1 { v16.h }[4], [x23], #0x2\n"
- "st1 { v20.h }[4], [x22], #0x2\n"
- "st1 { v24.h }[4], [x21], #0x2\n"
- "tbz x16, #0, 133f\n"
- "st1 { v8.b }[10], [x17]\n"
- "st1 { v12.b }[10], [x24]\n"
- "st1 { v16.b }[10], [x23]\n"
- "st1 { v20.b }[10], [x22]\n"
- "st1 { v24.b }[10], [x21]\n"
+ "tbz x17, #1, 128f\n"
+ "st1 { v8.h }[4], [x16], #0x2\n"
+ "st1 { v12.h }[4], [x23], #0x2\n"
+ "st1 { v16.h }[4], [x22], #0x2\n"
+ "st1 { v20.h }[4], [x21], #0x2\n"
+ "st1 { v24.h }[4], [x20], #0x2\n"
+ "tbz x17, #0, 133f\n"
+ "st1 { v8.b }[10], [x16]\n"
+ "st1 { v12.b }[10], [x23]\n"
+ "st1 { v16.b }[10], [x22]\n"
+ "st1 { v20.b }[10], [x21]\n"
+ "st1 { v24.b }[10], [x20]\n"
"b 133f\n"
"128:" // Height 5: Partial direct writeback: partial_1_8
- "tbz x16, #0, 133f\n"
- "st1 { v8.b }[8], [x17]\n"
- "st1 { v12.b }[8], [x24]\n"
- "st1 { v16.b }[8], [x23]\n"
- "st1 { v20.b }[8], [x22]\n"
- "st1 { v24.b }[8], [x21]\n"
+ "tbz x17, #0, 133f\n"
+ "st1 { v8.b }[8], [x16]\n"
+ "st1 { v12.b }[8], [x23]\n"
+ "st1 { v16.b }[8], [x22]\n"
+ "st1 { v20.b }[8], [x21]\n"
+ "st1 { v24.b }[8], [x20]\n"
"b 133f\n"
"129:" // Height 5: Partial direct writeback: partial_4_0
- "tbz x16, #2, 131f\n"
- "str s8, [x17], #0x4\n"
- "str s12, [x24], #0x4\n"
- "str s16, [x23], #0x4\n"
- "str s20, [x22], #0x4\n"
- "str s24, [x21], #0x4\n"
- "tbz x16, #1, 130f\n"
- "st1 { v8.h }[2], [x17], #0x2\n"
- "st1 { v12.h }[2], [x24], #0x2\n"
- "st1 { v16.h }[2], [x23], #0x2\n"
- "st1 { v20.h }[2], [x22], #0x2\n"
- "st1 { v24.h }[2], [x21], #0x2\n"
- "tbz x16, #0, 133f\n"
- "st1 { v8.b }[6], [x17]\n"
- "st1 { v12.b }[6], [x24]\n"
- "st1 { v16.b }[6], [x23]\n"
- "st1 { v20.b }[6], [x22]\n"
- "st1 { v24.b }[6], [x21]\n"
+ "tbz x17, #2, 131f\n"
+ "str s8, [x16], #0x4\n"
+ "str s12, [x23], #0x4\n"
+ "str s16, [x22], #0x4\n"
+ "str s20, [x21], #0x4\n"
+ "str s24, [x20], #0x4\n"
+ "tbz x17, #1, 130f\n"
+ "st1 { v8.h }[2], [x16], #0x2\n"
+ "st1 { v12.h }[2], [x23], #0x2\n"
+ "st1 { v16.h }[2], [x22], #0x2\n"
+ "st1 { v20.h }[2], [x21], #0x2\n"
+ "st1 { v24.h }[2], [x20], #0x2\n"
+ "tbz x17, #0, 133f\n"
+ "st1 { v8.b }[6], [x16]\n"
+ "st1 { v12.b }[6], [x23]\n"
+ "st1 { v16.b }[6], [x22]\n"
+ "st1 { v20.b }[6], [x21]\n"
+ "st1 { v24.b }[6], [x20]\n"
"b 133f\n"
"130:" // Height 5: Partial direct writeback: partial_1_4
- "tbz x16, #0, 133f\n"
- "st1 { v8.b }[4], [x17]\n"
- "st1 { v12.b }[4], [x24]\n"
- "st1 { v16.b }[4], [x23]\n"
- "st1 { v20.b }[4], [x22]\n"
- "st1 { v24.b }[4], [x21]\n"
+ "tbz x17, #0, 133f\n"
+ "st1 { v8.b }[4], [x16]\n"
+ "st1 { v12.b }[4], [x23]\n"
+ "st1 { v16.b }[4], [x22]\n"
+ "st1 { v20.b }[4], [x21]\n"
+ "st1 { v24.b }[4], [x20]\n"
"b 133f\n"
"131:" // Height 5: Partial direct writeback: partial_2_0
- "tbz x16, #1, 132f\n"
- "str h8, [x17], #0x2\n"
- "str h12, [x24], #0x2\n"
- "str h16, [x23], #0x2\n"
- "str h20, [x22], #0x2\n"
- "str h24, [x21], #0x2\n"
- "tbz x16, #0, 133f\n"
- "st1 { v8.b }[2], [x17]\n"
- "st1 { v12.b }[2], [x24]\n"
- "st1 { v16.b }[2], [x23]\n"
- "st1 { v20.b }[2], [x22]\n"
- "st1 { v24.b }[2], [x21]\n"
+ "tbz x17, #1, 132f\n"
+ "str h8, [x16], #0x2\n"
+ "str h12, [x23], #0x2\n"
+ "str h16, [x22], #0x2\n"
+ "str h20, [x21], #0x2\n"
+ "str h24, [x20], #0x2\n"
+ "tbz x17, #0, 133f\n"
+ "st1 { v8.b }[2], [x16]\n"
+ "st1 { v12.b }[2], [x23]\n"
+ "st1 { v16.b }[2], [x22]\n"
+ "st1 { v20.b }[2], [x21]\n"
+ "st1 { v24.b }[2], [x20]\n"
"b 133f\n"
"132:" // Height 5: Partial direct writeback: partial_1_0
- "str b8, [x17, #0x0]\n"
- "str b12, [x24, #0x0]\n"
- "str b16, [x23, #0x0]\n"
- "str b20, [x22, #0x0]\n"
- "str b24, [x21, #0x0]\n"
+ "str b8, [x16, #0x0]\n"
+ "str b12, [x23, #0x0]\n"
+ "str b16, [x22, #0x0]\n"
+ "str b20, [x21, #0x0]\n"
+ "str b24, [x20, #0x0]\n"
"133:" // Height 5: Partial direct writeback: Done
"b 135f\n"
"134:" // Height 5: Full writeback
- "str q8, [x17, #0x0]\n"
- "add x17, x17, #0x10\n"
- "str q12, [x24, #0x0]\n"
- "str q16, [x23, #0x0]\n"
- "str q20, [x22, #0x0]\n"
- "str q24, [x21, #0x0]\n"
+ "str q8, [x16, #0x0]\n"
+ "add x16, x16, #0x10\n"
+ "str q12, [x23, #0x0]\n"
+ "str q16, [x22, #0x0]\n"
+ "str q20, [x21, #0x0]\n"
+ "str q24, [x20, #0x0]\n"
"135:" // Height 5: Writeback done
- "subs x16, x16, #0x10\n"
+ "subs x17, x17, #0x10\n"
"bgt 110b\n"
"b 164f\n"
"136:" // Height 6
- "ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ldr x6, [%x[args_ptr], %[offsetof_multiplier_ptr]]\n"
+ "mov x8, %x[col_bias]\n"
+ "ldr x7, [%x[args_ptr], %[offsetof_shift_ptr]]\n"
+ "mov x16, %x[output_ptr]\n"
+ "ldr x17, [%x[args_ptr], %[offsetof_N]]\n"
"mov x20, #0x6\n"
- "mov x6, %x[col_bias]\n"
- "ldr x7, [%x[args_ptr], %[offsetof_multiplier_ptr]]\n"
- "ldr x8, [%x[args_ptr], %[offsetof_shift_ptr]]\n"
- "mov x17, %x[output_ptr]\n"
- "ldr x16, [%x[args_ptr], %[offsetof_N]]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
"ldr x15, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "madd %x[output_ptr], x19, x20, %x[output_ptr]\n"
"137:" // Height 6: Column loop
"movi v8.4s, #0x0\n"
"movi v9.4s, #0x0\n"
@@ -2887,257 +2887,257 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"mov x14, #0x0\n"
"139:" // Height 6: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"ldr w13, [x20, x14, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 140f\n"
- "ldr x21, [%x[input_ptr], x14, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x12, [x21, #0x0]\n"
- "ldr x9, [x21, #0x8]\n"
- "ldr x27, [x21, #0x10]\n"
- "ldr x25, [x21, #0x18]\n"
- "ldr x23, [x21, #0x20]\n"
- "ldr x21, [x21, #0x28]\n"
+ "ldr x20, [%x[input_ptr], x14, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x12, [x20, #0x0]\n"
+ "ldr x28, [x20, #0x8]\n"
+ "ldr x26, [x20, #0x10]\n"
+ "ldr x24, [x20, #0x18]\n"
+ "ldr x22, [x20, #0x20]\n"
+ "ldr x20, [x20, #0x28]\n"
"cbnz x14, 141f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x12, x12, x20\n"
- "add x9, x9, x20\n"
- "add x27, x27, x20\n"
- "add x25, x25, x20\n"
- "add x23, x23, x20\n"
- "add x21, x21, x20\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x12, x12, x19\n"
+ "add x28, x28, x19\n"
+ "add x26, x26, x19\n"
+ "add x24, x24, x19\n"
+ "add x22, x22, x19\n"
+ "add x20, x20, x19\n"
"b 141f\n"
"140:" // Height 6: setup direct input
"mov x12, %x[input_ptr]\n"
- "add x9, x12, x20\n"
- "add x27, x9, x20\n"
- "add x25, x27, x20\n"
- "add x23, x25, x20\n"
- "add x21, x23, x20\n"
+ "add x28, x12, x19\n"
+ "add x26, x28, x19\n"
+ "add x24, x26, x19\n"
+ "add x22, x24, x19\n"
+ "add x20, x22, x19\n"
"141:" // Height 6: input setup done
"cmp x13, #0x10\n"
"blt 144f\n"
"ldr q0, [x12, #0x0]\n"
+ "ldr q1, [x28, #0x0]\n"
"cmp x13, #0x20\n"
- "ldr q1, [x9, #0x0]\n"
- "ldr q2, [x27, #0x0]\n"
- "ldr q3, [x25, #0x0]\n"
- "ldr q4, [x23, #0x0]\n"
- "ldr q5, [x21, #0x0]\n"
+ "ldr q2, [x26, #0x0]\n"
+ "ldr q3, [x24, #0x0]\n"
+ "ldr q4, [x22, #0x0]\n"
+ "ldr q5, [x20, #0x0]\n"
"ldr q6, [x15, #0x0]\n"
- "ldr q7, [x15, #0x10]\n"
"blt 143f\n"
"142:" // Height 6: Multiply loop: Main loop head
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
- "ldr x20, [x15, #0x28]\n"
+ "ldr d7, [x15, #0x10]\n"
".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n"
- "ldr x11, [x15, #0x38]\n"
+ "ldr x11, [x15, #0x18]\n"
".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n"
- "add x12, x12, #0x10\n"
+ "ldr x10, [x15, #0x28]\n"
".inst 0x4f83e0d4 // sdot v20.4s, v6.16b, v3.4b[0]\n"
- "add x9, x9, #0x10\n"
+ "add x12, x12, #0x10\n"
".inst 0x4f84e0d8 // sdot v24.4s, v6.16b, v4.4b[0]\n"
- "add x27, x27, #0x10\n"
+ "mov v7.d[1], x11\n"
".inst 0x4f85e0dc // sdot v28.4s, v6.16b, v5.4b[0]\n"
- "ldr d6, [x15, #0x20]\n"
+ "prfm pldl1keep, [x12, #0x80]\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
- "mov v6.d[1], x20\n"
+ "ldr d6, [x15, #0x20]\n"
".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n"
- "ldr x20, [x15, #0x48]\n"
+ "ldr x11, [x15, #0x38]\n"
".inst 0x4f82e0f1 // sdot v17.4s, v7.16b, v2.4b[0]\n"
- "add x25, x25, #0x10\n"
+ "ldr x9, [x12, #0x8]\n"
".inst 0x4f83e0f5 // sdot v21.4s, v7.16b, v3.4b[0]\n"
- "add x23, x23, #0x10\n"
+ "mov v6.d[1], x10\n"
".inst 0x4f84e0f9 // sdot v25.4s, v7.16b, v4.4b[0]\n"
- "add x21, x21, #0x10\n"
+ "ldr x10, [x15, #0x48]\n"
".inst 0x4f85e0fd // sdot v29.4s, v7.16b, v5.4b[0]\n"
"ldr d7, [x15, #0x30]\n"
- "mov v7.d[1], x11\n"
".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n"
+ "add x28, x28, #0x10\n"
".inst 0x4f81e0ce // sdot v14.4s, v6.16b, v1.4b[0]\n"
- "ldr x11, [x15, #0x58]\n"
+ "prfm pldl1keep, [x28, #0x80]\n"
".inst 0x4f82e0d2 // sdot v18.4s, v6.16b, v2.4b[0]\n"
- "ldr x10, [x12, #0x8]\n"
+ "mov v7.d[1], x11\n"
".inst 0x4f83e0d6 // sdot v22.4s, v6.16b, v3.4b[0]\n"
- "ldr x28, [x9, #0x8]\n"
+ "ldr x11, [x15, #0x58]\n"
".inst 0x4f84e0da // sdot v26.4s, v6.16b, v4.4b[0]\n"
- "ldr x26, [x27, #0x8]\n"
+ "ldr x27, [x28, #0x8]\n"
".inst 0x4f85e0de // sdot v30.4s, v6.16b, v5.4b[0]\n"
"ldr d6, [x15, #0x40]\n"
".inst 0x4f80e0eb // sdot v11.4s, v7.16b, v0.4b[0]\n"
- "mov v6.d[1], x20\n"
+ "add x26, x26, #0x10\n"
".inst 0x4f81e0ef // sdot v15.4s, v7.16b, v1.4b[0]\n"
- "ldr x20, [x15, #0x68]\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x4f82e0f3 // sdot v19.4s, v7.16b, v2.4b[0]\n"
- "ldr x24, [x25, #0x8]\n"
+ "mov v6.d[1], x10\n"
".inst 0x4f83e0f7 // sdot v23.4s, v7.16b, v3.4b[0]\n"
- "sub x13, x13, #0x10\n"
+ "ldr x10, [x15, #0x68]\n"
".inst 0x4f84e0fb // sdot v27.4s, v7.16b, v4.4b[0]\n"
- "cmp x13, #0x20\n"
+ "ldr x25, [x26, #0x8]\n"
".inst 0x4f85e0ff // sdot v31.4s, v7.16b, v5.4b[0]\n"
"ldr d7, [x15, #0x50]\n"
- "mov v7.d[1], x11\n"
".inst 0x4fa0e0c8 // sdot v8.4s, v6.16b, v0.4b[1]\n"
+ "add x24, x24, #0x10\n"
".inst 0x4fa1e0cc // sdot v12.4s, v6.16b, v1.4b[1]\n"
- "ldr x11, [x15, #0x78]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x4fa2e0d0 // sdot v16.4s, v6.16b, v2.4b[1]\n"
- "prfm pldl1keep, [x12, #0x80]\n"
+ "mov v7.d[1], x11\n"
".inst 0x4fa3e0d4 // sdot v20.4s, v6.16b, v3.4b[1]\n"
- "prfm pldl1keep, [x9, #0x80]\n"
+ "ldr x11, [x15, #0x78]\n"
".inst 0x4fa4e0d8 // sdot v24.4s, v6.16b, v4.4b[1]\n"
- "prfm pldl1keep, [x27, #0x80]\n"
+ "ldr x23, [x24, #0x8]\n"
".inst 0x4fa5e0dc // sdot v28.4s, v6.16b, v5.4b[1]\n"
"ldr d6, [x15, #0x60]\n"
".inst 0x4fa0e0e9 // sdot v9.4s, v7.16b, v0.4b[1]\n"
- "mov v6.d[1], x20\n"
+ "add x22, x22, #0x10\n"
".inst 0x4fa1e0ed // sdot v13.4s, v7.16b, v1.4b[1]\n"
- "ldr x20, [x15, #0x88]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x4fa2e0f1 // sdot v17.4s, v7.16b, v2.4b[1]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
+ "mov v6.d[1], x10\n"
".inst 0x4fa3e0f5 // sdot v21.4s, v7.16b, v3.4b[1]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
+ "ldr x10, [x15, #0x88]\n"
".inst 0x4fa4e0f9 // sdot v25.4s, v7.16b, v4.4b[1]\n"
- "prfm pldl1keep, [x21, #0x80]\n"
+ "ldr x21, [x22, #0x8]\n"
".inst 0x4fa5e0fd // sdot v29.4s, v7.16b, v5.4b[1]\n"
"ldr d7, [x15, #0x70]\n"
- "mov v7.d[1], x11\n"
".inst 0x4fa0e0ca // sdot v10.4s, v6.16b, v0.4b[1]\n"
+ "add x20, x20, #0x10\n"
".inst 0x4fa1e0ce // sdot v14.4s, v6.16b, v1.4b[1]\n"
- "ldr x11, [x15, #0x98]\n"
+ "prfm pldl1keep, [x20, #0x80]\n"
".inst 0x4fa2e0d2 // sdot v18.4s, v6.16b, v2.4b[1]\n"
+ "mov v7.d[1], x11\n"
".inst 0x4fa3e0d6 // sdot v22.4s, v6.16b, v3.4b[1]\n"
+ "ldr x11, [x15, #0x98]\n"
".inst 0x4fa4e0da // sdot v26.4s, v6.16b, v4.4b[1]\n"
+ "sub x13, x13, #0x10\n"
".inst 0x4fa5e0de // sdot v30.4s, v6.16b, v5.4b[1]\n"
"ldr d6, [x15, #0x80]\n"
".inst 0x4fa0e0eb // sdot v11.4s, v7.16b, v0.4b[1]\n"
- "mov v6.d[1], x20\n"
+ "cmp x13, #0x20\n"
".inst 0x4fa1e0ef // sdot v15.4s, v7.16b, v1.4b[1]\n"
- "ldr x20, [x15, #0xa8]\n"
".inst 0x4fa2e0f3 // sdot v19.4s, v7.16b, v2.4b[1]\n"
+ "mov v6.d[1], x10\n"
".inst 0x4fa3e0f7 // sdot v23.4s, v7.16b, v3.4b[1]\n"
+ "ldr x10, [x15, #0xa8]\n"
".inst 0x4fa4e0fb // sdot v27.4s, v7.16b, v4.4b[1]\n"
".inst 0x4fa5e0ff // sdot v31.4s, v7.16b, v5.4b[1]\n"
"ldr d7, [x15, #0x90]\n"
- "mov v7.d[1], x11\n"
".inst 0x4f80e8c8 // sdot v8.4s, v6.16b, v0.4b[2]\n"
".inst 0x4f81e8cc // sdot v12.4s, v6.16b, v1.4b[2]\n"
- "ldr x11, [x15, #0xb8]\n"
".inst 0x4f82e8d0 // sdot v16.4s, v6.16b, v2.4b[2]\n"
+ "mov v7.d[1], x11\n"
".inst 0x4f83e8d4 // sdot v20.4s, v6.16b, v3.4b[2]\n"
+ "ldr x11, [x15, #0xb8]\n"
".inst 0x4f84e8d8 // sdot v24.4s, v6.16b, v4.4b[2]\n"
".inst 0x4f85e8dc // sdot v28.4s, v6.16b, v5.4b[2]\n"
"ldr d6, [x15, #0xa0]\n"
".inst 0x4f80e8e9 // sdot v9.4s, v7.16b, v0.4b[2]\n"
- "mov v6.d[1], x20\n"
".inst 0x4f81e8ed // sdot v13.4s, v7.16b, v1.4b[2]\n"
- "ldr x20, [x15, #0xc8]\n"
".inst 0x4f82e8f1 // sdot v17.4s, v7.16b, v2.4b[2]\n"
+ "mov v6.d[1], x10\n"
".inst 0x4f83e8f5 // sdot v21.4s, v7.16b, v3.4b[2]\n"
+ "ldr x10, [x15, #0xc8]\n"
".inst 0x4f84e8f9 // sdot v25.4s, v7.16b, v4.4b[2]\n"
".inst 0x4f85e8fd // sdot v29.4s, v7.16b, v5.4b[2]\n"
"ldr d7, [x15, #0xb0]\n"
- "mov v7.d[1], x11\n"
".inst 0x4f80e8ca // sdot v10.4s, v6.16b, v0.4b[2]\n"
".inst 0x4f81e8ce // sdot v14.4s, v6.16b, v1.4b[2]\n"
- "ldr x11, [x15, #0xd8]\n"
".inst 0x4f82e8d2 // sdot v18.4s, v6.16b, v2.4b[2]\n"
+ "mov v7.d[1], x11\n"
".inst 0x4f83e8d6 // sdot v22.4s, v6.16b, v3.4b[2]\n"
+ "ldr x11, [x15, #0xd8]\n"
".inst 0x4f84e8da // sdot v26.4s, v6.16b, v4.4b[2]\n"
".inst 0x4f85e8de // sdot v30.4s, v6.16b, v5.4b[2]\n"
"ldr d6, [x15, #0xc0]\n"
".inst 0x4f80e8eb // sdot v11.4s, v7.16b, v0.4b[2]\n"
- "mov v6.d[1], x20\n"
".inst 0x4f81e8ef // sdot v15.4s, v7.16b, v1.4b[2]\n"
- "ldr x20, [x15, #0xe8]\n"
".inst 0x4f82e8f3 // sdot v19.4s, v7.16b, v2.4b[2]\n"
+ "mov v6.d[1], x10\n"
".inst 0x4f83e8f7 // sdot v23.4s, v7.16b, v3.4b[2]\n"
+ "ldr x10, [x15, #0xe8]\n"
".inst 0x4f84e8fb // sdot v27.4s, v7.16b, v4.4b[2]\n"
".inst 0x4f85e8ff // sdot v31.4s, v7.16b, v5.4b[2]\n"
"ldr d7, [x15, #0xd0]\n"
- "mov v7.d[1], x11\n"
".inst 0x4fa0e8c8 // sdot v8.4s, v6.16b, v0.4b[3]\n"
".inst 0x4fa1e8cc // sdot v12.4s, v6.16b, v1.4b[3]\n"
- "ldr x11, [x15, #0xf8]\n"
".inst 0x4fa2e8d0 // sdot v16.4s, v6.16b, v2.4b[3]\n"
+ "mov v7.d[1], x11\n"
".inst 0x4fa3e8d4 // sdot v20.4s, v6.16b, v3.4b[3]\n"
+ "ldr x11, [x15, #0xf8]\n"
".inst 0x4fa4e8d8 // sdot v24.4s, v6.16b, v4.4b[3]\n"
".inst 0x4fa5e8dc // sdot v28.4s, v6.16b, v5.4b[3]\n"
"ldr d6, [x15, #0xe0]\n"
".inst 0x4fa0e8e9 // sdot v9.4s, v7.16b, v0.4b[3]\n"
- "mov v6.d[1], x20\n"
".inst 0x4fa1e8ed // sdot v13.4s, v7.16b, v1.4b[3]\n"
- "ldr x22, [x23, #0x8]\n"
".inst 0x4fa2e8f1 // sdot v17.4s, v7.16b, v2.4b[3]\n"
+ "mov v6.d[1], x10\n"
".inst 0x4fa3e8f5 // sdot v21.4s, v7.16b, v3.4b[3]\n"
+ "ldr x19, [x20, #0x8]\n"
".inst 0x4fa4e8f9 // sdot v25.4s, v7.16b, v4.4b[3]\n"
".inst 0x4fa5e8fd // sdot v29.4s, v7.16b, v5.4b[3]\n"
"ldr d7, [x15, #0xf0]\n"
- "mov v7.d[1], x11\n"
- "add x15, x15, #0x100\n"
".inst 0x4fa0e8ca // sdot v10.4s, v6.16b, v0.4b[3]\n"
- "ldr x20, [x15, #0x8]\n"
+ "add x15, x15, #0x100\n"
".inst 0x4fa1e8ce // sdot v14.4s, v6.16b, v1.4b[3]\n"
".inst 0x4fa2e8d2 // sdot v18.4s, v6.16b, v2.4b[3]\n"
+ "mov v7.d[1], x11\n"
".inst 0x4fa3e8d6 // sdot v22.4s, v6.16b, v3.4b[3]\n"
+ "ldr x10, [x15, #0x8]\n"
".inst 0x4fa4e8da // sdot v26.4s, v6.16b, v4.4b[3]\n"
".inst 0x4fa5e8de // sdot v30.4s, v6.16b, v5.4b[3]\n"
"ldr d6, [x15, #0x0]\n"
".inst 0x4fa0e8eb // sdot v11.4s, v7.16b, v0.4b[3]\n"
"ldr d0, [x12, #0x0]\n"
".inst 0x4fa1e8ef // sdot v15.4s, v7.16b, v1.4b[3]\n"
- "ldr d1, [x9, #0x0]\n"
+ "ldr d1, [x28, #0x0]\n"
".inst 0x4fa2e8f3 // sdot v19.4s, v7.16b, v2.4b[3]\n"
- "ldr d2, [x27, #0x0]\n"
+ "mov v6.d[1], x10\n"
".inst 0x4fa3e8f7 // sdot v23.4s, v7.16b, v3.4b[3]\n"
- "ldr d3, [x25, #0x0]\n"
+ "mov v0.d[1], x9\n"
".inst 0x4fa4e8fb // sdot v27.4s, v7.16b, v4.4b[3]\n"
- "ldr d4, [x23, #0x0]\n"
+ "mov v1.d[1], x27\n"
".inst 0x4fa5e8ff // sdot v31.4s, v7.16b, v5.4b[3]\n"
- "ldr d5, [x21, #0x0]\n"
- "ldr d7, [x15, #0x10]\n"
- "mov v6.d[1], x20\n"
- "ldr x20, [x21, #0x8]\n"
- "mov v0.d[1], x10\n"
- "ldr x11, [x15, #0x18]\n"
- "mov v1.d[1], x28\n"
- "mov v2.d[1], x26\n"
- "mov v3.d[1], x24\n"
- "mov v4.d[1], x22\n"
- "mov v5.d[1], x20\n"
- "mov v7.d[1], x11\n"
+ "ldr d2, [x26, #0x0]\n"
+ "ldr d3, [x24, #0x0]\n"
+ "ldr d4, [x22, #0x0]\n"
+ "mov v2.d[1], x25\n"
+ "ldr d5, [x20, #0x0]\n"
+ "mov v3.d[1], x23\n"
+ "mov v4.d[1], x21\n"
+ "mov v5.d[1], x19\n"
"bge 142b\n"
"143:" // Height 6: Multiply loop: Single iteration only
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
- "add x12, x12, #0x10\n"
+ "ldr q7, [x15, #0x10]\n"
".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n"
- "add x9, x9, #0x10\n"
+ "sub x13, x13, #0x10\n"
".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n"
- "add x27, x27, #0x10\n"
+ "add x12, x12, #0x10\n"
".inst 0x4f83e0d4 // sdot v20.4s, v6.16b, v3.4b[0]\n"
- "add x25, x25, #0x10\n"
+ "prfm pldl1keep, [x12, #0x80]\n"
".inst 0x4f84e0d8 // sdot v24.4s, v6.16b, v4.4b[0]\n"
- "add x23, x23, #0x10\n"
+ "add x28, x28, #0x10\n"
".inst 0x4f85e0dc // sdot v28.4s, v6.16b, v5.4b[0]\n"
- "ldr q6, [x15, #0x20]\n"
+ "prfm pldl1keep, [x28, #0x80]\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
- "add x21, x21, #0x10\n"
+ "ldr q6, [x15, #0x20]\n"
".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n"
- "sub x13, x13, #0x10\n"
+ "add x26, x26, #0x10\n"
".inst 0x4f82e0f1 // sdot v17.4s, v7.16b, v2.4b[0]\n"
- "prfm pldl1keep, [x12, #0x80]\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x4f83e0f5 // sdot v21.4s, v7.16b, v3.4b[0]\n"
- "prfm pldl1keep, [x9, #0x80]\n"
+ "add x24, x24, #0x10\n"
".inst 0x4f84e0f9 // sdot v25.4s, v7.16b, v4.4b[0]\n"
- "prfm pldl1keep, [x27, #0x80]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x4f85e0fd // sdot v29.4s, v7.16b, v5.4b[0]\n"
"ldr q7, [x15, #0x30]\n"
".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
+ "add x22, x22, #0x10\n"
".inst 0x4f81e0ce // sdot v14.4s, v6.16b, v1.4b[0]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x4f82e0d2 // sdot v18.4s, v6.16b, v2.4b[0]\n"
- "prfm pldl1keep, [x21, #0x80]\n"
+ "add x20, x20, #0x10\n"
".inst 0x4f83e0d6 // sdot v22.4s, v6.16b, v3.4b[0]\n"
+ "prfm pldl1keep, [x20, #0x80]\n"
".inst 0x4f84e0da // sdot v26.4s, v6.16b, v4.4b[0]\n"
".inst 0x4f85e0de // sdot v30.4s, v6.16b, v5.4b[0]\n"
"ldr q6, [x15, #0x40]\n"
@@ -3238,15 +3238,15 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"145:" // Height 6: Multiply loop: Odd block loop
"ldr s0, [x12], #0x4\n"
"sub x13, x13, #0x4\n"
- "ldr s1, [x9], #0x4\n"
+ "ldr s1, [x28], #0x4\n"
"cmp x13, #0x4\n"
- "ldr s2, [x27], #0x4\n"
- "ldr s3, [x25], #0x4\n"
- "ldr s4, [x23], #0x4\n"
- "ldr s5, [x21], #0x4\n"
+ "ldr s2, [x26], #0x4\n"
+ "ldr s3, [x24], #0x4\n"
+ "ldr s4, [x22], #0x4\n"
+ "ldr s5, [x20], #0x4\n"
"ldr q6, [x15, #0x0]\n"
- ".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
"ldr q7, [x15, #0x10]\n"
+ ".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n"
".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n"
".inst 0x4f83e0d4 // sdot v20.4s, v6.16b, v3.4b[0]\n"
@@ -3274,34 +3274,34 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
".inst 0x4f84e0fb // sdot v27.4s, v7.16b, v4.4b[0]\n"
".inst 0x4f85e0ff // sdot v31.4s, v7.16b, v5.4b[0]\n"
"bge 145b\n"
- "146:" // Height 6: Multiply loop: Skip odd blocks
"cbz x13, 149f\n"
+ "146:" // Height 6: Multiply loop: Skip odd blocks
"tbz x13, #1, 147f\n"
"ldr h0, [x12], #0x2\n"
- "ldr h1, [x9], #0x2\n"
- "ldr h2, [x27], #0x2\n"
- "ldr h3, [x25], #0x2\n"
- "ldr h4, [x23], #0x2\n"
- "ldr h5, [x21], #0x2\n"
+ "ldr h1, [x28], #0x2\n"
+ "ldr h2, [x26], #0x2\n"
+ "ldr h3, [x24], #0x2\n"
+ "ldr h4, [x22], #0x2\n"
+ "ldr h5, [x20], #0x2\n"
"tbz x13, #0, 148f\n"
"ld1 { v0.b }[2], [x12]\n"
- "ld1 { v1.b }[2], [x9]\n"
- "ld1 { v2.b }[2], [x27]\n"
- "ld1 { v3.b }[2], [x25]\n"
- "ld1 { v4.b }[2], [x23]\n"
- "ld1 { v5.b }[2], [x21]\n"
+ "ld1 { v1.b }[2], [x28]\n"
+ "ld1 { v2.b }[2], [x26]\n"
+ "ld1 { v3.b }[2], [x24]\n"
+ "ld1 { v4.b }[2], [x22]\n"
+ "ld1 { v5.b }[2], [x20]\n"
"b 148f\n"
"147:" // Height 6: Multiply loop: Ragged operand read: partial_1_0
"ldr b0, [x12, #0x0]\n"
- "ldr b1, [x9, #0x0]\n"
- "ldr b2, [x27, #0x0]\n"
- "ldr b3, [x25, #0x0]\n"
- "ldr b4, [x23, #0x0]\n"
- "ldr b5, [x21, #0x0]\n"
+ "ldr b1, [x28, #0x0]\n"
+ "ldr b2, [x26, #0x0]\n"
+ "ldr b3, [x24, #0x0]\n"
+ "ldr b4, [x22, #0x0]\n"
+ "ldr b5, [x20, #0x0]\n"
"148:" // Height 6: Multiply loop: Ragged operand read: Done
"ldr q6, [x15, #0x0]\n"
- ".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
"ldr q7, [x15, #0x10]\n"
+ ".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n"
".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n"
".inst 0x4f83e0d4 // sdot v20.4s, v6.16b, v3.4b[0]\n"
@@ -3329,68 +3329,68 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
".inst 0x4f84e0fb // sdot v27.4s, v7.16b, v4.4b[0]\n"
".inst 0x4f85e0ff // sdot v31.4s, v7.16b, v5.4b[0]\n"
"149:" // Height 6: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
"add x14, x14, #0x1\n"
- "cmp x14, x20\n"
+ "cmp x14, x19\n"
"bne 139b\n"
- "ldr q0, [x6, #0x0]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "prfm pstl1keep, [x16, #0x0]\n"
+ "ldr q0, [x8, #0x0]\n"
+ "ldr q1, [x8, #0x10]\n"
+ "add x23, x16, x19\n"
+ "prfm pstl1keep, [x23, #0x0]\n"
+ "add x22, x23, x19\n"
"add v8.4s, v8.4s, v0.4s\n"
- "ldr q1, [x6, #0x10]\n"
- "add v9.4s, v9.4s, v1.4s\n"
- "ldr q2, [x6, #0x20]\n"
- "add v10.4s, v10.4s, v2.4s\n"
- "ldr q3, [x6, #0x30]\n"
- "add v11.4s, v11.4s, v3.4s\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x17, x20\n"
- "add x23, x24, x20\n"
- "add x22, x23, x20\n"
- "add x21, x22, x20\n"
- "add x20, x21, x20\n"
- "prfm pstl1keep, [x17, #0x0]\n"
"add v12.4s, v12.4s, v0.4s\n"
- "prfm pstl1keep, [x24, #0x0]\n"
+ "add v9.4s, v9.4s, v1.4s\n"
"add v13.4s, v13.4s, v1.4s\n"
- "prfm pstl1keep, [x23, #0x0]\n"
- "add v14.4s, v14.4s, v2.4s\n"
+ "add v16.4s, v16.4s, v0.4s\n"
+ "add v17.4s, v17.4s, v1.4s\n"
+ "add v20.4s, v20.4s, v0.4s\n"
+ "add v21.4s, v21.4s, v1.4s\n"
+ "add v24.4s, v24.4s, v0.4s\n"
+ "add v25.4s, v25.4s, v1.4s\n"
+ "add v28.4s, v28.4s, v0.4s\n"
+ "add v29.4s, v29.4s, v1.4s\n"
"prfm pstl1keep, [x22, #0x0]\n"
- "add v15.4s, v15.4s, v3.4s\n"
+ "add x21, x22, x19\n"
"prfm pstl1keep, [x21, #0x0]\n"
- "add v16.4s, v16.4s, v0.4s\n"
+ "add x20, x21, x19\n"
"prfm pstl1keep, [x20, #0x0]\n"
- "add v17.4s, v17.4s, v1.4s\n"
+ "add x19, x20, x19\n"
+ "prfm pstl1keep, [x19, #0x0]\n"
+ "ldr q2, [x8, #0x20]\n"
+ "ldr q3, [x8, #0x30]\n"
+ "add x8, x8, #0x40\n"
+ "add v10.4s, v10.4s, v2.4s\n"
+ "add v14.4s, v14.4s, v2.4s\n"
+ "add v11.4s, v11.4s, v3.4s\n"
+ "add v15.4s, v15.4s, v3.4s\n"
"add v18.4s, v18.4s, v2.4s\n"
"add v19.4s, v19.4s, v3.4s\n"
- "add v20.4s, v20.4s, v0.4s\n"
- "add v21.4s, v21.4s, v1.4s\n"
"add v22.4s, v22.4s, v2.4s\n"
"add v23.4s, v23.4s, v3.4s\n"
- "add v24.4s, v24.4s, v0.4s\n"
- "add v25.4s, v25.4s, v1.4s\n"
"add v26.4s, v26.4s, v2.4s\n"
"add v27.4s, v27.4s, v3.4s\n"
- "add v28.4s, v28.4s, v0.4s\n"
- "add v29.4s, v29.4s, v1.4s\n"
"add v30.4s, v30.4s, v2.4s\n"
"add v31.4s, v31.4s, v3.4s\n"
- "add x6, x6, #0x40\n"
"tbz %x[flags], #4, 150f\n"
- "ldr q0, [x8, #0x0]\n"
- "ldr q4, [x7, #0x0]\n"
- "ldr q1, [x8, #0x10]\n"
- "ldr q5, [x7, #0x10]\n"
- "ldr q2, [x8, #0x20]\n"
- "ldr q6, [x7, #0x20]\n"
- "ldr q3, [x8, #0x30]\n"
- "add x8, x8, #0x40\n"
- "ldr q7, [x7, #0x30]\n"
+ "ldr q0, [x7, #0x0]\n"
+ "ldr q4, [x6, #0x0]\n"
+ "ldr q1, [x7, #0x10]\n"
+ "ldr q5, [x6, #0x10]\n"
+ "ldr q2, [x7, #0x20]\n"
+ "ldr q6, [x6, #0x20]\n"
+ "ldr q3, [x7, #0x30]\n"
"add x7, x7, #0x40\n"
+ "ldr q7, [x6, #0x30]\n"
+ "add x6, x6, #0x40\n"
"b 151f\n"
"150:" // Height 6: per layer parameters
"add x25, %x[qp], %[per_layer_right_shift]\n"
+ "add x24, %x[qp], %[per_layer_mul]\n"
"ld1r { v0.4s }, [x25]\n"
- "add x25, %x[qp], %[per_layer_mul]\n"
- "ld1r { v4.4s }, [x25]\n"
+ "ld1r { v4.4s }, [x24]\n"
"mov v1.16b, v0.16b\n"
"mov v5.16b, v4.16b\n"
"mov v2.16b, v0.16b\n"
@@ -3432,10 +3432,10 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"sshr v6.4s, v6.4s, #0x1f\n"
"sshr v7.4s, v7.4s, #0x1f\n"
"sqadd v8.4s, v8.4s, v4.4s\n"
+ "and v4.16b, v12.16b, v0.16b\n"
"sqadd v9.4s, v9.4s, v5.4s\n"
"sqadd v10.4s, v10.4s, v6.4s\n"
"sqadd v11.4s, v11.4s, v7.4s\n"
- "and v4.16b, v12.16b, v0.16b\n"
"and v5.16b, v13.16b, v1.16b\n"
"and v6.16b, v14.16b, v2.16b\n"
"and v7.16b, v15.16b, v3.16b\n"
@@ -3444,11 +3444,11 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"sshr v6.4s, v6.4s, #0x1f\n"
"sshr v7.4s, v7.4s, #0x1f\n"
"sqadd v12.4s, v12.4s, v4.4s\n"
+ "and v4.16b, v16.16b, v0.16b\n"
"sqadd v13.4s, v13.4s, v5.4s\n"
+ "and v5.16b, v17.16b, v1.16b\n"
"sqadd v14.4s, v14.4s, v6.4s\n"
"sqadd v15.4s, v15.4s, v7.4s\n"
- "and v4.16b, v16.16b, v0.16b\n"
- "and v5.16b, v17.16b, v1.16b\n"
"and v6.16b, v18.16b, v2.16b\n"
"and v7.16b, v19.16b, v3.16b\n"
"sshr v4.4s, v4.4s, #0x1f\n"
@@ -3456,42 +3456,42 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"sshr v6.4s, v6.4s, #0x1f\n"
"sshr v7.4s, v7.4s, #0x1f\n"
"sqadd v16.4s, v16.4s, v4.4s\n"
+ "and v4.16b, v20.16b, v0.16b\n"
"sqadd v17.4s, v17.4s, v5.4s\n"
+ "and v5.16b, v21.16b, v1.16b\n"
"sqadd v18.4s, v18.4s, v6.4s\n"
"sqadd v19.4s, v19.4s, v7.4s\n"
- "and v4.16b, v20.16b, v0.16b\n"
- "and v5.16b, v21.16b, v1.16b\n"
+ "sshr v4.4s, v4.4s, #0x1f\n"
"and v6.16b, v22.16b, v2.16b\n"
"and v7.16b, v23.16b, v3.16b\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
"sshr v5.4s, v5.4s, #0x1f\n"
+ "sqadd v20.4s, v20.4s, v4.4s\n"
"sshr v6.4s, v6.4s, #0x1f\n"
"sshr v7.4s, v7.4s, #0x1f\n"
- "sqadd v20.4s, v20.4s, v4.4s\n"
- "sqadd v21.4s, v21.4s, v5.4s\n"
- "sqadd v22.4s, v22.4s, v6.4s\n"
- "sqadd v23.4s, v23.4s, v7.4s\n"
"and v4.16b, v24.16b, v0.16b\n"
+ "sqadd v21.4s, v21.4s, v5.4s\n"
"and v5.16b, v25.16b, v1.16b\n"
+ "sqadd v22.4s, v22.4s, v6.4s\n"
"and v6.16b, v26.16b, v2.16b\n"
- "and v7.16b, v27.16b, v3.16b\n"
+ "sqadd v23.4s, v23.4s, v7.4s\n"
"sshr v4.4s, v4.4s, #0x1f\n"
"sshr v5.4s, v5.4s, #0x1f\n"
+ "and v7.16b, v27.16b, v3.16b\n"
"sshr v6.4s, v6.4s, #0x1f\n"
- "sshr v7.4s, v7.4s, #0x1f\n"
"sqadd v24.4s, v24.4s, v4.4s\n"
- "sqadd v25.4s, v25.4s, v5.4s\n"
- "sqadd v26.4s, v26.4s, v6.4s\n"
- "sqadd v27.4s, v27.4s, v7.4s\n"
"and v4.16b, v28.16b, v0.16b\n"
+ "sqadd v25.4s, v25.4s, v5.4s\n"
+ "sshr v7.4s, v7.4s, #0x1f\n"
"and v5.16b, v29.16b, v1.16b\n"
+ "sqadd v26.4s, v26.4s, v6.4s\n"
"and v6.16b, v30.16b, v2.16b\n"
- "and v7.16b, v31.16b, v3.16b\n"
"sshr v4.4s, v4.4s, #0x1f\n"
+ "sqadd v27.4s, v27.4s, v7.4s\n"
+ "and v7.16b, v31.16b, v3.16b\n"
"sshr v5.4s, v5.4s, #0x1f\n"
"sshr v6.4s, v6.4s, #0x1f\n"
- "sshr v7.4s, v7.4s, #0x1f\n"
"sqadd v28.4s, v28.4s, v4.4s\n"
+ "sshr v7.4s, v7.4s, #0x1f\n"
"sqadd v29.4s, v29.4s, v5.4s\n"
"sqadd v30.4s, v30.4s, v6.4s\n"
"sqadd v31.4s, v31.4s, v7.4s\n"
@@ -3520,8 +3520,13 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"srshl v29.4s, v29.4s, v1.4s\n"
"srshl v30.4s, v30.4s, v2.4s\n"
"srshl v31.4s, v31.4s, v3.4s\n"
- "add x25, %x[qp], %[c_offset]\n"
- "ld1r { v4.4s }, [x25]\n"
+ "add x24, %x[qp], %[c_offset]\n"
+ "add x25, %x[qp], %[minval]\n"
+ "ld1r { v4.4s }, [x24]\n"
+ "add x24, %x[qp], %[maxval]\n"
+ "ld1r { v5.4s }, [x25]\n"
+ "cmp x17, #0x10\n"
+ "ld1r { v6.4s }, [x24]\n"
"add v8.4s, v8.4s, v4.4s\n"
"add v9.4s, v9.4s, v4.4s\n"
"add v10.4s, v10.4s, v4.4s\n"
@@ -3532,22 +3537,6 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"add v15.4s, v15.4s, v4.4s\n"
"add v16.4s, v16.4s, v4.4s\n"
"add v17.4s, v17.4s, v4.4s\n"
- "add v18.4s, v18.4s, v4.4s\n"
- "add v19.4s, v19.4s, v4.4s\n"
- "add v20.4s, v20.4s, v4.4s\n"
- "add v21.4s, v21.4s, v4.4s\n"
- "add v22.4s, v22.4s, v4.4s\n"
- "add v23.4s, v23.4s, v4.4s\n"
- "add v24.4s, v24.4s, v4.4s\n"
- "add v25.4s, v25.4s, v4.4s\n"
- "add v26.4s, v26.4s, v4.4s\n"
- "add v27.4s, v27.4s, v4.4s\n"
- "add v28.4s, v28.4s, v4.4s\n"
- "add v29.4s, v29.4s, v4.4s\n"
- "add v30.4s, v30.4s, v4.4s\n"
- "add v31.4s, v31.4s, v4.4s\n"
- "add x25, %x[qp], %[maxval]\n"
- "ld1r { v6.4s }, [x25]\n"
"smin v8.4s, v8.4s, v6.4s\n"
"smin v9.4s, v9.4s, v6.4s\n"
"smin v10.4s, v10.4s, v6.4s\n"
@@ -3558,22 +3547,6 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"smin v15.4s, v15.4s, v6.4s\n"
"smin v16.4s, v16.4s, v6.4s\n"
"smin v17.4s, v17.4s, v6.4s\n"
- "smin v18.4s, v18.4s, v6.4s\n"
- "smin v19.4s, v19.4s, v6.4s\n"
- "smin v20.4s, v20.4s, v6.4s\n"
- "smin v21.4s, v21.4s, v6.4s\n"
- "smin v22.4s, v22.4s, v6.4s\n"
- "smin v23.4s, v23.4s, v6.4s\n"
- "smin v24.4s, v24.4s, v6.4s\n"
- "smin v25.4s, v25.4s, v6.4s\n"
- "smin v26.4s, v26.4s, v6.4s\n"
- "smin v27.4s, v27.4s, v6.4s\n"
- "smin v28.4s, v28.4s, v6.4s\n"
- "smin v29.4s, v29.4s, v6.4s\n"
- "smin v30.4s, v30.4s, v6.4s\n"
- "smin v31.4s, v31.4s, v6.4s\n"
- "add x25, %x[qp], %[minval]\n"
- "ld1r { v5.4s }, [x25]\n"
"smax v8.4s, v8.4s, v5.4s\n"
"smax v9.4s, v9.4s, v5.4s\n"
"smax v10.4s, v10.4s, v5.4s\n"
@@ -3584,6 +3557,26 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"smax v15.4s, v15.4s, v5.4s\n"
"smax v16.4s, v16.4s, v5.4s\n"
"smax v17.4s, v17.4s, v5.4s\n"
+ "add v18.4s, v18.4s, v4.4s\n"
+ "add v19.4s, v19.4s, v4.4s\n"
+ "add v20.4s, v20.4s, v4.4s\n"
+ "add v21.4s, v21.4s, v4.4s\n"
+ "add v22.4s, v22.4s, v4.4s\n"
+ "add v23.4s, v23.4s, v4.4s\n"
+ "add v24.4s, v24.4s, v4.4s\n"
+ "add v25.4s, v25.4s, v4.4s\n"
+ "add v26.4s, v26.4s, v4.4s\n"
+ "add v27.4s, v27.4s, v4.4s\n"
+ "smin v18.4s, v18.4s, v6.4s\n"
+ "smin v19.4s, v19.4s, v6.4s\n"
+ "smin v20.4s, v20.4s, v6.4s\n"
+ "smin v21.4s, v21.4s, v6.4s\n"
+ "smin v22.4s, v22.4s, v6.4s\n"
+ "smin v23.4s, v23.4s, v6.4s\n"
+ "smin v24.4s, v24.4s, v6.4s\n"
+ "smin v25.4s, v25.4s, v6.4s\n"
+ "smin v26.4s, v26.4s, v6.4s\n"
+ "smin v27.4s, v27.4s, v6.4s\n"
"smax v18.4s, v18.4s, v5.4s\n"
"smax v19.4s, v19.4s, v5.4s\n"
"smax v20.4s, v20.4s, v5.4s\n"
@@ -3594,177 +3587,184 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"smax v25.4s, v25.4s, v5.4s\n"
"smax v26.4s, v26.4s, v5.4s\n"
"smax v27.4s, v27.4s, v5.4s\n"
- "smax v28.4s, v28.4s, v5.4s\n"
- "smax v29.4s, v29.4s, v5.4s\n"
- "smax v30.4s, v30.4s, v5.4s\n"
- "smax v31.4s, v31.4s, v5.4s\n"
+ "add v28.4s, v28.4s, v4.4s\n"
+ "add v29.4s, v29.4s, v4.4s\n"
+ "add v30.4s, v30.4s, v4.4s\n"
+ "add v31.4s, v31.4s, v4.4s\n"
"uzp1 v8.8h, v8.8h, v9.8h\n"
"uzp1 v9.8h, v10.8h, v11.8h\n"
"uzp1 v12.8h, v12.8h, v13.8h\n"
"uzp1 v13.8h, v14.8h, v15.8h\n"
"uzp1 v16.8h, v16.8h, v17.8h\n"
"uzp1 v17.8h, v18.8h, v19.8h\n"
+ "smin v28.4s, v28.4s, v6.4s\n"
+ "smin v29.4s, v29.4s, v6.4s\n"
+ "smin v30.4s, v30.4s, v6.4s\n"
+ "smin v31.4s, v31.4s, v6.4s\n"
"uzp1 v20.8h, v20.8h, v21.8h\n"
"uzp1 v21.8h, v22.8h, v23.8h\n"
"uzp1 v24.8h, v24.8h, v25.8h\n"
"uzp1 v25.8h, v26.8h, v27.8h\n"
- "uzp1 v28.8h, v28.8h, v29.8h\n"
- "uzp1 v29.8h, v30.8h, v31.8h\n"
- "cmp x16, #0x10\n"
"uzp1 v8.16b, v8.16b, v9.16b\n"
"uzp1 v12.16b, v12.16b, v13.16b\n"
+ "smax v28.4s, v28.4s, v5.4s\n"
+ "smax v29.4s, v29.4s, v5.4s\n"
+ "smax v30.4s, v30.4s, v5.4s\n"
+ "smax v31.4s, v31.4s, v5.4s\n"
"uzp1 v16.16b, v16.16b, v17.16b\n"
"uzp1 v20.16b, v20.16b, v21.16b\n"
"uzp1 v24.16b, v24.16b, v25.16b\n"
+ "uzp1 v28.8h, v28.8h, v29.8h\n"
+ "uzp1 v29.8h, v30.8h, v31.8h\n"
"uzp1 v28.16b, v28.16b, v29.16b\n"
"bge 161f\n"
- "tbz x16, #3, 156f\n"
- "str d8, [x17], #0x8\n"
- "str d12, [x24], #0x8\n"
- "str d16, [x23], #0x8\n"
- "str d20, [x22], #0x8\n"
- "str d24, [x21], #0x8\n"
- "str d28, [x20], #0x8\n"
- "tbz x16, #2, 154f\n"
- "st1 { v8.s }[2], [x17], #0x4\n"
- "st1 { v12.s }[2], [x24], #0x4\n"
- "st1 { v16.s }[2], [x23], #0x4\n"
- "st1 { v20.s }[2], [x22], #0x4\n"
- "st1 { v24.s }[2], [x21], #0x4\n"
- "st1 { v28.s }[2], [x20], #0x4\n"
- "tbz x16, #1, 153f\n"
- "st1 { v8.h }[6], [x17], #0x2\n"
- "st1 { v12.h }[6], [x24], #0x2\n"
- "st1 { v16.h }[6], [x23], #0x2\n"
- "st1 { v20.h }[6], [x22], #0x2\n"
- "st1 { v24.h }[6], [x21], #0x2\n"
- "st1 { v28.h }[6], [x20], #0x2\n"
- "tbz x16, #0, 160f\n"
- "st1 { v8.b }[14], [x17]\n"
- "st1 { v12.b }[14], [x24]\n"
- "st1 { v16.b }[14], [x23]\n"
- "st1 { v20.b }[14], [x22]\n"
- "st1 { v24.b }[14], [x21]\n"
- "st1 { v28.b }[14], [x20]\n"
+ "tbz x17, #3, 156f\n"
+ "str d8, [x16], #0x8\n"
+ "str d12, [x23], #0x8\n"
+ "str d16, [x22], #0x8\n"
+ "str d20, [x21], #0x8\n"
+ "str d24, [x20], #0x8\n"
+ "str d28, [x19], #0x8\n"
+ "tbz x17, #2, 154f\n"
+ "st1 { v8.s }[2], [x16], #0x4\n"
+ "st1 { v12.s }[2], [x23], #0x4\n"
+ "st1 { v16.s }[2], [x22], #0x4\n"
+ "st1 { v20.s }[2], [x21], #0x4\n"
+ "st1 { v24.s }[2], [x20], #0x4\n"
+ "st1 { v28.s }[2], [x19], #0x4\n"
+ "tbz x17, #1, 153f\n"
+ "st1 { v8.h }[6], [x16], #0x2\n"
+ "st1 { v12.h }[6], [x23], #0x2\n"
+ "st1 { v16.h }[6], [x22], #0x2\n"
+ "st1 { v20.h }[6], [x21], #0x2\n"
+ "st1 { v24.h }[6], [x20], #0x2\n"
+ "st1 { v28.h }[6], [x19], #0x2\n"
+ "tbz x17, #0, 160f\n"
+ "st1 { v8.b }[14], [x16]\n"
+ "st1 { v12.b }[14], [x23]\n"
+ "st1 { v16.b }[14], [x22]\n"
+ "st1 { v20.b }[14], [x21]\n"
+ "st1 { v24.b }[14], [x20]\n"
+ "st1 { v28.b }[14], [x19]\n"
"b 160f\n"
"153:" // Height 6: Partial direct writeback: partial_1_12
- "tbz x16, #0, 160f\n"
- "st1 { v8.b }[12], [x17]\n"
- "st1 { v12.b }[12], [x24]\n"
- "st1 { v16.b }[12], [x23]\n"
- "st1 { v20.b }[12], [x22]\n"
- "st1 { v24.b }[12], [x21]\n"
- "st1 { v28.b }[12], [x20]\n"
+ "tbz x17, #0, 160f\n"
+ "st1 { v8.b }[12], [x16]\n"
+ "st1 { v12.b }[12], [x23]\n"
+ "st1 { v16.b }[12], [x22]\n"
+ "st1 { v20.b }[12], [x21]\n"
+ "st1 { v24.b }[12], [x20]\n"
+ "st1 { v28.b }[12], [x19]\n"
"b 160f\n"
"154:" // Height 6: Partial direct writeback: partial_2_8
- "tbz x16, #1, 155f\n"
- "st1 { v8.h }[4], [x17], #0x2\n"
- "st1 { v12.h }[4], [x24], #0x2\n"
- "st1 { v16.h }[4], [x23], #0x2\n"
- "st1 { v20.h }[4], [x22], #0x2\n"
- "st1 { v24.h }[4], [x21], #0x2\n"
- "st1 { v28.h }[4], [x20], #0x2\n"
- "tbz x16, #0, 160f\n"
- "st1 { v8.b }[10], [x17]\n"
- "st1 { v12.b }[10], [x24]\n"
- "st1 { v16.b }[10], [x23]\n"
- "st1 { v20.b }[10], [x22]\n"
- "st1 { v24.b }[10], [x21]\n"
- "st1 { v28.b }[10], [x20]\n"
+ "tbz x17, #1, 155f\n"
+ "st1 { v8.h }[4], [x16], #0x2\n"
+ "st1 { v12.h }[4], [x23], #0x2\n"
+ "st1 { v16.h }[4], [x22], #0x2\n"
+ "st1 { v20.h }[4], [x21], #0x2\n"
+ "st1 { v24.h }[4], [x20], #0x2\n"
+ "st1 { v28.h }[4], [x19], #0x2\n"
+ "tbz x17, #0, 160f\n"
+ "st1 { v8.b }[10], [x16]\n"
+ "st1 { v12.b }[10], [x23]\n"
+ "st1 { v16.b }[10], [x22]\n"
+ "st1 { v20.b }[10], [x21]\n"
+ "st1 { v24.b }[10], [x20]\n"
+ "st1 { v28.b }[10], [x19]\n"
"b 160f\n"
"155:" // Height 6: Partial direct writeback: partial_1_8
- "tbz x16, #0, 160f\n"
- "st1 { v8.b }[8], [x17]\n"
- "st1 { v12.b }[8], [x24]\n"
- "st1 { v16.b }[8], [x23]\n"
- "st1 { v20.b }[8], [x22]\n"
- "st1 { v24.b }[8], [x21]\n"
- "st1 { v28.b }[8], [x20]\n"
+ "tbz x17, #0, 160f\n"
+ "st1 { v8.b }[8], [x16]\n"
+ "st1 { v12.b }[8], [x23]\n"
+ "st1 { v16.b }[8], [x22]\n"
+ "st1 { v20.b }[8], [x21]\n"
+ "st1 { v24.b }[8], [x20]\n"
+ "st1 { v28.b }[8], [x19]\n"
"b 160f\n"
"156:" // Height 6: Partial direct writeback: partial_4_0
- "tbz x16, #2, 158f\n"
- "str s8, [x17], #0x4\n"
- "str s12, [x24], #0x4\n"
- "str s16, [x23], #0x4\n"
- "str s20, [x22], #0x4\n"
- "str s24, [x21], #0x4\n"
- "str s28, [x20], #0x4\n"
- "tbz x16, #1, 157f\n"
- "st1 { v8.h }[2], [x17], #0x2\n"
- "st1 { v12.h }[2], [x24], #0x2\n"
- "st1 { v16.h }[2], [x23], #0x2\n"
- "st1 { v20.h }[2], [x22], #0x2\n"
- "st1 { v24.h }[2], [x21], #0x2\n"
- "st1 { v28.h }[2], [x20], #0x2\n"
- "tbz x16, #0, 160f\n"
- "st1 { v8.b }[6], [x17]\n"
- "st1 { v12.b }[6], [x24]\n"
- "st1 { v16.b }[6], [x23]\n"
- "st1 { v20.b }[6], [x22]\n"
- "st1 { v24.b }[6], [x21]\n"
- "st1 { v28.b }[6], [x20]\n"
+ "tbz x17, #2, 158f\n"
+ "str s8, [x16], #0x4\n"
+ "str s12, [x23], #0x4\n"
+ "str s16, [x22], #0x4\n"
+ "str s20, [x21], #0x4\n"
+ "str s24, [x20], #0x4\n"
+ "str s28, [x19], #0x4\n"
+ "tbz x17, #1, 157f\n"
+ "st1 { v8.h }[2], [x16], #0x2\n"
+ "st1 { v12.h }[2], [x23], #0x2\n"
+ "st1 { v16.h }[2], [x22], #0x2\n"
+ "st1 { v20.h }[2], [x21], #0x2\n"
+ "st1 { v24.h }[2], [x20], #0x2\n"
+ "st1 { v28.h }[2], [x19], #0x2\n"
+ "tbz x17, #0, 160f\n"
+ "st1 { v8.b }[6], [x16]\n"
+ "st1 { v12.b }[6], [x23]\n"
+ "st1 { v16.b }[6], [x22]\n"
+ "st1 { v20.b }[6], [x21]\n"
+ "st1 { v24.b }[6], [x20]\n"
+ "st1 { v28.b }[6], [x19]\n"
"b 160f\n"
"157:" // Height 6: Partial direct writeback: partial_1_4
- "tbz x16, #0, 160f\n"
- "st1 { v8.b }[4], [x17]\n"
- "st1 { v12.b }[4], [x24]\n"
- "st1 { v16.b }[4], [x23]\n"
- "st1 { v20.b }[4], [x22]\n"
- "st1 { v24.b }[4], [x21]\n"
- "st1 { v28.b }[4], [x20]\n"
+ "tbz x17, #0, 160f\n"
+ "st1 { v8.b }[4], [x16]\n"
+ "st1 { v12.b }[4], [x23]\n"
+ "st1 { v16.b }[4], [x22]\n"
+ "st1 { v20.b }[4], [x21]\n"
+ "st1 { v24.b }[4], [x20]\n"
+ "st1 { v28.b }[4], [x19]\n"
"b 160f\n"
"158:" // Height 6: Partial direct writeback: partial_2_0
- "tbz x16, #1, 159f\n"
- "str h8, [x17], #0x2\n"
- "str h12, [x24], #0x2\n"
- "str h16, [x23], #0x2\n"
- "str h20, [x22], #0x2\n"
- "str h24, [x21], #0x2\n"
- "str h28, [x20], #0x2\n"
- "tbz x16, #0, 160f\n"
- "st1 { v8.b }[2], [x17]\n"
- "st1 { v12.b }[2], [x24]\n"
- "st1 { v16.b }[2], [x23]\n"
- "st1 { v20.b }[2], [x22]\n"
- "st1 { v24.b }[2], [x21]\n"
- "st1 { v28.b }[2], [x20]\n"
+ "tbz x17, #1, 159f\n"
+ "str h8, [x16], #0x2\n"
+ "str h12, [x23], #0x2\n"
+ "str h16, [x22], #0x2\n"
+ "str h20, [x21], #0x2\n"
+ "str h24, [x20], #0x2\n"
+ "str h28, [x19], #0x2\n"
+ "tbz x17, #0, 160f\n"
+ "st1 { v8.b }[2], [x16]\n"
+ "st1 { v12.b }[2], [x23]\n"
+ "st1 { v16.b }[2], [x22]\n"
+ "st1 { v20.b }[2], [x21]\n"
+ "st1 { v24.b }[2], [x20]\n"
+ "st1 { v28.b }[2], [x19]\n"
"b 160f\n"
"159:" // Height 6: Partial direct writeback: partial_1_0
- "str b8, [x17, #0x0]\n"
- "str b12, [x24, #0x0]\n"
- "str b16, [x23, #0x0]\n"
- "str b20, [x22, #0x0]\n"
- "str b24, [x21, #0x0]\n"
- "str b28, [x20, #0x0]\n"
+ "str b8, [x16, #0x0]\n"
+ "str b12, [x23, #0x0]\n"
+ "str b16, [x22, #0x0]\n"
+ "str b20, [x21, #0x0]\n"
+ "str b24, [x20, #0x0]\n"
+ "str b28, [x19, #0x0]\n"
"160:" // Height 6: Partial direct writeback: Done
"b 162f\n"
"161:" // Height 6: Full writeback
- "str q8, [x17, #0x0]\n"
- "add x17, x17, #0x10\n"
- "str q12, [x24, #0x0]\n"
- "str q16, [x23, #0x0]\n"
- "str q20, [x22, #0x0]\n"
- "str q24, [x21, #0x0]\n"
- "str q28, [x20, #0x0]\n"
+ "str q8, [x16, #0x0]\n"
+ "add x16, x16, #0x10\n"
+ "str q12, [x23, #0x0]\n"
+ "str q16, [x22, #0x0]\n"
+ "str q20, [x21, #0x0]\n"
+ "str q24, [x20, #0x0]\n"
+ "str q28, [x19, #0x0]\n"
"162:" // Height 6: Writeback done
- "subs x16, x16, #0x10\n"
+ "subs x17, x17, #0x10\n"
"bgt 137b\n"
"subs %x[M], %x[M], #0x6\n"
"beq 164f\n"
- "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 163f\n"
- "add x21, x21, #0x6\n"
- "str x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "add x20, x20, #0x6\n"
+ "str x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"b 1b\n"
"163:" // Update direct input
- "mov x20, #0x6\n"
- "madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
+ "mov x19, #0x6\n"
+ "madd %x[input_ptr], x19, x20, %x[input_ptr]\n"
"b 1b\n"
"164:" // Exit
: [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
: [args_ptr] "r" (&ka), [c_offset] "I" (offsetof(Requantize32, c_offset)), [col_bias] "r" (col_bias), [flags] "r" (flags), [maxval] "I" (offsetof(Requantize32, maxval)), [minval] "I" (offsetof(Requantize32, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_multiplier_ptr] "I" (offsetof(KernelArgs, multiplier_ptr)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_shift_ptr] "I" (offsetof(KernelArgs, shift_ptr)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths)), [per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [qp] "r" (qp)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qs_dot_6x16/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qs_dot_6x16/generic.cpp
index 598d1524e8..f503f40b0c 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qs_dot_6x16/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qs_dot_6x16/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef __aarch64__
@@ -95,168 +95,168 @@ void a64_hybrid_s8qs_dot_6x16 (
"cmp %x[M], #0x2\n"
"bgt 55f\n"
"beq 28f\n"
- "mov x14, %x[col_bias]\n"
"ldr x13, [%x[args_ptr], %[offsetof_multiplier_ptr]]\n"
"ldr x12, [%x[args_ptr], %[offsetof_shift_ptr]]\n"
- "mov x11, %x[output_ptr]\n"
+ "mov x11, %x[col_bias]\n"
"ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x9, %x[output_ptr]\n"
+ "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"2:" // Height 1: Column loop
"movi v8.4s, #0x0\n"
"movi v9.4s, #0x0\n"
"movi v10.4s, #0x0\n"
"movi v11.4s, #0x0\n"
"3:" // Height 1: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"4:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 5f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "cbnz x28, 6f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "cbnz x27, 6f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
"b 6f\n"
"5:" // Height 1: setup direct input
- "mov x26, %x[input_ptr]\n"
+ "mov x25, %x[input_ptr]\n"
"6:" // Height 1: input setup done
- "cmp x27, #0x10\n"
+ "cmp x26, #0x10\n"
"blt 9f\n"
- "ldr q0, [x26, #0x0]\n"
- "ldr q6, [x9, #0x0]\n"
- "cmp x27, #0x20\n"
- "ldr q7, [x9, #0x10]\n"
+ "ldr q0, [x25, #0x0]\n"
+ "ldr q6, [x28, #0x0]\n"
+ "cmp x26, #0x20\n"
"blt 8f\n"
"7:" // Height 1: Multiply loop: Main loop head
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
- "ldr q6, [x9, #0x20]\n"
+ "ldr q7, [x28, #0x10]\n"
+ "add x25, x25, #0x10\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
- "ldr q7, [x9, #0x30]\n"
+ "ldr q6, [x28, #0x20]\n"
+ "sub x26, x26, #0x10\n"
".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n"
- "ldr q6, [x9, #0x40]\n"
+ "ldr q7, [x28, #0x30]\n"
+ "cmp x26, #0x20\n"
".inst 0x4f80e0eb // sdot v11.4s, v7.16b, v0.4b[0]\n"
- "ldr q7, [x9, #0x50]\n"
+ "ldr q6, [x28, #0x40]\n"
+ "ldr q7, [x28, #0x50]\n"
".inst 0x4fa0e0c8 // sdot v8.4s, v6.16b, v0.4b[1]\n"
- "ldr q6, [x9, #0x60]\n"
+ "ldr q6, [x28, #0x60]\n"
".inst 0x4fa0e0e9 // sdot v9.4s, v7.16b, v0.4b[1]\n"
- "ldr q7, [x9, #0x70]\n"
+ "ldr q7, [x28, #0x70]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x4fa0e0ca // sdot v10.4s, v6.16b, v0.4b[1]\n"
- "ldr q6, [x9, #0x80]\n"
+ "ldr q6, [x28, #0x80]\n"
".inst 0x4fa0e0eb // sdot v11.4s, v7.16b, v0.4b[1]\n"
- "ldr q7, [x9, #0x90]\n"
+ "ldr q7, [x28, #0x90]\n"
".inst 0x4f80e8c8 // sdot v8.4s, v6.16b, v0.4b[2]\n"
- "ldr q6, [x9, #0xa0]\n"
+ "ldr q6, [x28, #0xa0]\n"
".inst 0x4f80e8e9 // sdot v9.4s, v7.16b, v0.4b[2]\n"
- "ldr q7, [x9, #0xb0]\n"
+ "ldr q7, [x28, #0xb0]\n"
".inst 0x4f80e8ca // sdot v10.4s, v6.16b, v0.4b[2]\n"
- "ldr q6, [x9, #0xc0]\n"
+ "ldr q6, [x28, #0xc0]\n"
".inst 0x4f80e8eb // sdot v11.4s, v7.16b, v0.4b[2]\n"
- "ldr q7, [x9, #0xd0]\n"
+ "ldr q7, [x28, #0xd0]\n"
".inst 0x4fa0e8c8 // sdot v8.4s, v6.16b, v0.4b[3]\n"
- "ldr q6, [x9, #0xe0]\n"
+ "ldr q6, [x28, #0xe0]\n"
".inst 0x4fa0e8e9 // sdot v9.4s, v7.16b, v0.4b[3]\n"
- "ldr q7, [x9, #0xf0]\n"
- "sub x27, x27, #0x10\n"
- "add x26, x26, #0x10\n"
+ "ldr q7, [x28, #0xf0]\n"
+ "add x28, x28, #0x100\n"
".inst 0x4fa0e8ca // sdot v10.4s, v6.16b, v0.4b[3]\n"
+ "ldr q6, [x28, #0x0]\n"
".inst 0x4fa0e8eb // sdot v11.4s, v7.16b, v0.4b[3]\n"
- "ldr q0, [x26, #0x0]\n"
- "cmp x27, #0x20\n"
- "add x9, x9, #0x100\n"
- "ldr q6, [x9, #0x0]\n"
- "ldr q7, [x9, #0x10]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
+ "ldr q0, [x25, #0x0]\n"
"bge 7b\n"
"8:" // Height 1: Multiply loop: Single iteration only
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
- "ldr q6, [x9, #0x20]\n"
+ "ldr q7, [x28, #0x10]\n"
+ "sub x26, x26, #0x10\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
- "ldr q7, [x9, #0x30]\n"
+ "ldr q6, [x28, #0x20]\n"
+ "add x25, x25, #0x10\n"
".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n"
- "ldr q6, [x9, #0x40]\n"
+ "ldr q7, [x28, #0x30]\n"
+ "ldr q6, [x28, #0x40]\n"
".inst 0x4f80e0eb // sdot v11.4s, v7.16b, v0.4b[0]\n"
- "ldr q7, [x9, #0x50]\n"
+ "ldr q7, [x28, #0x50]\n"
".inst 0x4fa0e0c8 // sdot v8.4s, v6.16b, v0.4b[1]\n"
- "ldr q6, [x9, #0x60]\n"
+ "ldr q6, [x28, #0x60]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x4fa0e0e9 // sdot v9.4s, v7.16b, v0.4b[1]\n"
- "ldr q7, [x9, #0x70]\n"
+ "ldr q7, [x28, #0x70]\n"
".inst 0x4fa0e0ca // sdot v10.4s, v6.16b, v0.4b[1]\n"
- "ldr q6, [x9, #0x80]\n"
+ "ldr q6, [x28, #0x80]\n"
".inst 0x4fa0e0eb // sdot v11.4s, v7.16b, v0.4b[1]\n"
- "ldr q7, [x9, #0x90]\n"
+ "ldr q7, [x28, #0x90]\n"
".inst 0x4f80e8c8 // sdot v8.4s, v6.16b, v0.4b[2]\n"
- "ldr q6, [x9, #0xa0]\n"
+ "ldr q6, [x28, #0xa0]\n"
".inst 0x4f80e8e9 // sdot v9.4s, v7.16b, v0.4b[2]\n"
- "ldr q7, [x9, #0xb0]\n"
+ "ldr q7, [x28, #0xb0]\n"
".inst 0x4f80e8ca // sdot v10.4s, v6.16b, v0.4b[2]\n"
- "ldr q6, [x9, #0xc0]\n"
+ "ldr q6, [x28, #0xc0]\n"
".inst 0x4f80e8eb // sdot v11.4s, v7.16b, v0.4b[2]\n"
- "ldr q7, [x9, #0xd0]\n"
+ "ldr q7, [x28, #0xd0]\n"
".inst 0x4fa0e8c8 // sdot v8.4s, v6.16b, v0.4b[3]\n"
- "ldr q6, [x9, #0xe0]\n"
+ "ldr q6, [x28, #0xe0]\n"
".inst 0x4fa0e8e9 // sdot v9.4s, v7.16b, v0.4b[3]\n"
- "ldr q7, [x9, #0xf0]\n"
- "add x26, x26, #0x10\n"
- "sub x27, x27, #0x10\n"
+ "ldr q7, [x28, #0xf0]\n"
+ "add x28, x28, #0x100\n"
".inst 0x4fa0e8ca // sdot v10.4s, v6.16b, v0.4b[3]\n"
".inst 0x4fa0e8eb // sdot v11.4s, v7.16b, v0.4b[3]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
- "add x9, x9, #0x100\n"
"9:" // Height 1: Multiply loop: Main loop skip
- "cbz x27, 14f\n"
- "cmp x27, #0x4\n"
+ "cbz x26, 14f\n"
+ "cmp x26, #0x4\n"
"blt 11f\n"
"10:" // Height 1: Multiply loop: Odd block loop
- "ldr s0, [x26], #0x4\n"
- "ldr q6, [x9, #0x0]\n"
+ "ldr s0, [x25], #0x4\n"
+ "sub x26, x26, #0x4\n"
+ "ldr q6, [x28, #0x0]\n"
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
- "sub x27, x27, #0x4\n"
- "ldr q7, [x9, #0x10]\n"
- "ldr q6, [x9, #0x20]\n"
+ "ldr q7, [x28, #0x10]\n"
+ "cmp x26, #0x4\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
- "cmp x27, #0x4\n"
- "ldr q7, [x9, #0x30]\n"
+ "ldr q6, [x28, #0x20]\n"
+ "ldr q7, [x28, #0x30]\n"
".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n"
+ "add x28, x28, #0x40\n"
".inst 0x4f80e0eb // sdot v11.4s, v7.16b, v0.4b[0]\n"
- "add x9, x9, #0x40\n"
"bge 10b\n"
+ "cbz x26, 14f\n"
"11:" // Height 1: Multiply loop: Skip odd blocks
- "cbz x27, 14f\n"
- "tbz x27, #1, 12f\n"
- "ldr h0, [x26], #0x2\n"
- "tbz x27, #0, 13f\n"
- "ld1 { v0.b }[2], [x26]\n"
+ "tbz x26, #1, 12f\n"
+ "ldr h0, [x25], #0x2\n"
+ "tbz x26, #0, 13f\n"
+ "ld1 { v0.b }[2], [x25]\n"
"b 13f\n"
"12:" // Height 1: Multiply loop: Ragged operand read: partial_1_0
- "ldr b0, [x26, #0x0]\n"
+ "ldr b0, [x25, #0x0]\n"
"13:" // Height 1: Multiply loop: Ragged operand read: Done
- "ldr q6, [x9, #0x0]\n"
- "ldr q7, [x9, #0x10]\n"
+ "ldr q6, [x28, #0x0]\n"
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
+ "ldr q7, [x28, #0x10]\n"
+ "ldr q6, [x28, #0x20]\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
- "ldr q6, [x9, #0x20]\n"
- "ldr q7, [x9, #0x30]\n"
+ "ldr q7, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n"
".inst 0x4f80e0eb // sdot v11.4s, v7.16b, v0.4b[0]\n"
- "add x9, x9, #0x40\n"
"14:" // Height 1: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 4b\n"
- "ldr q0, [x14, #0x0]\n"
- "ldr q1, [x14, #0x10]\n"
+ "prfm pstl1keep, [x9, #0x0]\n"
+ "ldr q0, [x11, #0x0]\n"
"add v8.4s, v8.4s, v0.4s\n"
+ "ldr q1, [x11, #0x10]\n"
+ "ldr q2, [x11, #0x20]\n"
"add v9.4s, v9.4s, v1.4s\n"
- "ldr q2, [x14, #0x20]\n"
- "ldr q3, [x14, #0x30]\n"
+ "ldr q3, [x11, #0x30]\n"
+ "add x11, x11, #0x40\n"
"add v10.4s, v10.4s, v2.4s\n"
"add v11.4s, v11.4s, v3.4s\n"
- "prfm pstl1keep, [x11, #0x0]\n"
- "add x14, x14, #0x40\n"
"tbz %x[flags], #4, 15f\n"
"ldr q0, [x12, #0x0]\n"
"ldr q4, [x13, #0x0]\n"
@@ -265,20 +265,20 @@ void a64_hybrid_s8qs_dot_6x16 (
"ldr q2, [x12, #0x20]\n"
"ldr q6, [x13, #0x20]\n"
"ldr q3, [x12, #0x30]\n"
- "ldr q7, [x13, #0x30]\n"
"add x12, x12, #0x40\n"
+ "ldr q7, [x13, #0x30]\n"
"add x13, x13, #0x40\n"
"b 16f\n"
"15:" // Height 1: per layer parameters
- "add x25, %x[qp], %[per_layer_right_shift]\n"
- "ld1r { v0.4s }, [x25]\n"
- "add x25, %x[qp], %[per_layer_mul]\n"
- "ld1r { v4.4s }, [x25]\n"
+ "add x24, %x[qp], %[per_layer_right_shift]\n"
+ "ld1r { v0.4s }, [x24]\n"
"mov v1.16b, v0.16b\n"
- "mov v5.16b, v4.16b\n"
+ "add x24, %x[qp], %[per_layer_mul]\n"
+ "ld1r { v4.4s }, [x24]\n"
"mov v2.16b, v0.16b\n"
- "mov v6.16b, v4.16b\n"
"mov v3.16b, v0.16b\n"
+ "mov v5.16b, v4.16b\n"
+ "mov v6.16b, v4.16b\n"
"mov v7.16b, v4.16b\n"
"16:" // Height 1: parameters loaded
"sqrdmulh v8.4s, v8.4s, v4.4s\n"
@@ -293,98 +293,98 @@ void a64_hybrid_s8qs_dot_6x16 (
"sshr v4.4s, v4.4s, #0x1f\n"
"sshr v5.4s, v5.4s, #0x1f\n"
"sshr v6.4s, v6.4s, #0x1f\n"
- "sshr v7.4s, v7.4s, #0x1f\n"
"sqadd v8.4s, v8.4s, v4.4s\n"
"sqadd v9.4s, v9.4s, v5.4s\n"
"sqadd v10.4s, v10.4s, v6.4s\n"
+ "sshr v7.4s, v7.4s, #0x1f\n"
"sqadd v11.4s, v11.4s, v7.4s\n"
"17:" // Height 1: no shift correction
- "add x25, %x[qp], %[c_offset]\n"
- "ld1r { v4.4s }, [x25]\n"
"srshl v8.4s, v8.4s, v0.4s\n"
+ "add x24, %x[qp], %[c_offset]\n"
+ "ld1r { v4.4s }, [x24]\n"
"srshl v9.4s, v9.4s, v1.4s\n"
+ "add x24, %x[qp], %[minval]\n"
"srshl v10.4s, v10.4s, v2.4s\n"
+ "ld1r { v5.4s }, [x24]\n"
+ "add x24, %x[qp], %[maxval]\n"
"srshl v11.4s, v11.4s, v3.4s\n"
- "add x25, %x[qp], %[maxval]\n"
- "ld1r { v6.4s }, [x25]\n"
+ "ld1r { v6.4s }, [x24]\n"
+ "cmp x10, #0x10\n"
"add v8.4s, v8.4s, v4.4s\n"
"add v9.4s, v9.4s, v4.4s\n"
- "add x25, %x[qp], %[minval]\n"
- "ld1r { v5.4s }, [x25]\n"
"add v10.4s, v10.4s, v4.4s\n"
"add v11.4s, v11.4s, v4.4s\n"
- "cmp x10, #0x10\n"
"smin v8.4s, v8.4s, v6.4s\n"
"smin v9.4s, v9.4s, v6.4s\n"
"smin v10.4s, v10.4s, v6.4s\n"
- "smin v11.4s, v11.4s, v6.4s\n"
"smax v8.4s, v8.4s, v5.4s\n"
"smax v9.4s, v9.4s, v5.4s\n"
"smax v10.4s, v10.4s, v5.4s\n"
- "smax v11.4s, v11.4s, v5.4s\n"
+ "smin v11.4s, v11.4s, v6.4s\n"
"uzp1 v8.8h, v8.8h, v9.8h\n"
+ "smax v11.4s, v11.4s, v5.4s\n"
"uzp1 v9.8h, v10.8h, v11.8h\n"
"uzp1 v8.16b, v8.16b, v9.16b\n"
"bge 26f\n"
"tbz x10, #3, 21f\n"
- "str d8, [x11], #0x8\n"
+ "str d8, [x9], #0x8\n"
"tbz x10, #2, 19f\n"
- "st1 { v8.s }[2], [x11], #0x4\n"
+ "st1 { v8.s }[2], [x9], #0x4\n"
"tbz x10, #1, 18f\n"
- "st1 { v8.h }[6], [x11], #0x2\n"
+ "st1 { v8.h }[6], [x9], #0x2\n"
"tbz x10, #0, 25f\n"
- "st1 { v8.b }[14], [x11]\n"
+ "st1 { v8.b }[14], [x9]\n"
"b 25f\n"
"18:" // Height 1: Partial direct writeback: partial_1_12
"tbz x10, #0, 25f\n"
- "st1 { v8.b }[12], [x11]\n"
+ "st1 { v8.b }[12], [x9]\n"
"b 25f\n"
"19:" // Height 1: Partial direct writeback: partial_2_8
"tbz x10, #1, 20f\n"
- "st1 { v8.h }[4], [x11], #0x2\n"
+ "st1 { v8.h }[4], [x9], #0x2\n"
"tbz x10, #0, 25f\n"
- "st1 { v8.b }[10], [x11]\n"
+ "st1 { v8.b }[10], [x9]\n"
"b 25f\n"
"20:" // Height 1: Partial direct writeback: partial_1_8
"tbz x10, #0, 25f\n"
- "st1 { v8.b }[8], [x11]\n"
+ "st1 { v8.b }[8], [x9]\n"
"b 25f\n"
"21:" // Height 1: Partial direct writeback: partial_4_0
"tbz x10, #2, 23f\n"
- "str s8, [x11], #0x4\n"
+ "str s8, [x9], #0x4\n"
"tbz x10, #1, 22f\n"
- "st1 { v8.h }[2], [x11], #0x2\n"
+ "st1 { v8.h }[2], [x9], #0x2\n"
"tbz x10, #0, 25f\n"
- "st1 { v8.b }[6], [x11]\n"
+ "st1 { v8.b }[6], [x9]\n"
"b 25f\n"
"22:" // Height 1: Partial direct writeback: partial_1_4
"tbz x10, #0, 25f\n"
- "st1 { v8.b }[4], [x11]\n"
+ "st1 { v8.b }[4], [x9]\n"
"b 25f\n"
"23:" // Height 1: Partial direct writeback: partial_2_0
"tbz x10, #1, 24f\n"
- "str h8, [x11], #0x2\n"
+ "str h8, [x9], #0x2\n"
"tbz x10, #0, 25f\n"
- "st1 { v8.b }[2], [x11]\n"
+ "st1 { v8.b }[2], [x9]\n"
"b 25f\n"
"24:" // Height 1: Partial direct writeback: partial_1_0
- "str b8, [x11, #0x0]\n"
+ "str b8, [x9, #0x0]\n"
"25:" // Height 1: Partial direct writeback: Done
"b 27f\n"
"26:" // Height 1: Full writeback
- "str q8, [x11, #0x0]\n"
- "add x11, x11, #0x10\n"
+ "str q8, [x9, #0x0]\n"
+ "add x9, x9, #0x10\n"
"27:" // Height 1: Writeback done
"subs x10, x10, #0x10\n"
"bgt 2b\n"
"b 164f\n"
"28:" // Height 2
- "mov x14, %x[col_bias]\n"
"ldr x13, [%x[args_ptr], %[offsetof_multiplier_ptr]]\n"
+ "mov x11, %x[col_bias]\n"
"ldr x12, [%x[args_ptr], %[offsetof_shift_ptr]]\n"
- "mov x11, %x[output_ptr]\n"
+ "mov x9, %x[output_ptr]\n"
"ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"29:" // Height 2: Column loop
"movi v8.4s, #0x0\n"
"movi v9.4s, #0x0\n"
@@ -395,215 +395,215 @@ void a64_hybrid_s8qs_dot_6x16 (
"movi v14.4s, #0x0\n"
"movi v15.4s, #0x0\n"
"30:" // Height 2: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"31:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 32f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "cbnz x28, 33f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20\n"
- "add x25, x25, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "cbnz x27, 33f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
+ "add x24, x24, x19\n"
"b 33f\n"
"32:" // Height 2: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19\n"
"33:" // Height 2: input setup done
- "cmp x27, #0x10\n"
+ "cmp x26, #0x10\n"
"blt 36f\n"
- "ldr q0, [x26, #0x0]\n"
- "ldr q1, [x25, #0x0]\n"
- "cmp x27, #0x20\n"
- "ldr q6, [x9, #0x0]\n"
- "ldr q7, [x9, #0x10]\n"
+ "ldr q0, [x25, #0x0]\n"
+ "ldr q1, [x24, #0x0]\n"
+ "cmp x26, #0x20\n"
+ "ldr q6, [x28, #0x0]\n"
"blt 35f\n"
"34:" // Height 2: Multiply loop: Main loop head
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
+ "ldr q7, [x28, #0x10]\n"
+ "add x25, x25, #0x10\n"
".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n"
- "ldr q6, [x9, #0x20]\n"
- "sub x27, x27, #0x10\n"
+ "ldr q6, [x28, #0x20]\n"
+ "add x24, x24, #0x10\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "sub x26, x26, #0x10\n"
".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n"
- "ldr q7, [x9, #0x30]\n"
- "add x26, x26, #0x10\n"
+ "ldr q7, [x28, #0x30]\n"
+ "cmp x26, #0x20\n"
".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x4f81e0ce // sdot v14.4s, v6.16b, v1.4b[0]\n"
- "ldr q6, [x9, #0x40]\n"
- "add x25, x25, #0x10\n"
+ "ldr q6, [x28, #0x40]\n"
".inst 0x4f80e0eb // sdot v11.4s, v7.16b, v0.4b[0]\n"
".inst 0x4f81e0ef // sdot v15.4s, v7.16b, v1.4b[0]\n"
- "ldr q7, [x9, #0x50]\n"
- "cmp x27, #0x20\n"
+ "ldr q7, [x28, #0x50]\n"
".inst 0x4fa0e0c8 // sdot v8.4s, v6.16b, v0.4b[1]\n"
".inst 0x4fa1e0cc // sdot v12.4s, v6.16b, v1.4b[1]\n"
- "ldr q6, [x9, #0x60]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
+ "ldr q6, [x28, #0x60]\n"
".inst 0x4fa0e0e9 // sdot v9.4s, v7.16b, v0.4b[1]\n"
".inst 0x4fa1e0ed // sdot v13.4s, v7.16b, v1.4b[1]\n"
- "ldr q7, [x9, #0x70]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
+ "ldr q7, [x28, #0x70]\n"
".inst 0x4fa0e0ca // sdot v10.4s, v6.16b, v0.4b[1]\n"
".inst 0x4fa1e0ce // sdot v14.4s, v6.16b, v1.4b[1]\n"
- "ldr q6, [x9, #0x80]\n"
+ "ldr q6, [x28, #0x80]\n"
".inst 0x4fa0e0eb // sdot v11.4s, v7.16b, v0.4b[1]\n"
".inst 0x4fa1e0ef // sdot v15.4s, v7.16b, v1.4b[1]\n"
- "ldr q7, [x9, #0x90]\n"
+ "ldr q7, [x28, #0x90]\n"
".inst 0x4f80e8c8 // sdot v8.4s, v6.16b, v0.4b[2]\n"
".inst 0x4f81e8cc // sdot v12.4s, v6.16b, v1.4b[2]\n"
- "ldr q6, [x9, #0xa0]\n"
+ "ldr q6, [x28, #0xa0]\n"
".inst 0x4f80e8e9 // sdot v9.4s, v7.16b, v0.4b[2]\n"
".inst 0x4f81e8ed // sdot v13.4s, v7.16b, v1.4b[2]\n"
- "ldr q7, [x9, #0xb0]\n"
+ "ldr q7, [x28, #0xb0]\n"
".inst 0x4f80e8ca // sdot v10.4s, v6.16b, v0.4b[2]\n"
".inst 0x4f81e8ce // sdot v14.4s, v6.16b, v1.4b[2]\n"
- "ldr q6, [x9, #0xc0]\n"
+ "ldr q6, [x28, #0xc0]\n"
".inst 0x4f80e8eb // sdot v11.4s, v7.16b, v0.4b[2]\n"
".inst 0x4f81e8ef // sdot v15.4s, v7.16b, v1.4b[2]\n"
- "ldr q7, [x9, #0xd0]\n"
+ "ldr q7, [x28, #0xd0]\n"
".inst 0x4fa0e8c8 // sdot v8.4s, v6.16b, v0.4b[3]\n"
".inst 0x4fa1e8cc // sdot v12.4s, v6.16b, v1.4b[3]\n"
- "ldr q6, [x9, #0xe0]\n"
+ "ldr q6, [x28, #0xe0]\n"
".inst 0x4fa0e8e9 // sdot v9.4s, v7.16b, v0.4b[3]\n"
".inst 0x4fa1e8ed // sdot v13.4s, v7.16b, v1.4b[3]\n"
- "ldr q7, [x9, #0xf0]\n"
- "add x9, x9, #0x100\n"
+ "ldr q7, [x28, #0xf0]\n"
+ "add x28, x28, #0x100\n"
".inst 0x4fa0e8ca // sdot v10.4s, v6.16b, v0.4b[3]\n"
".inst 0x4fa1e8ce // sdot v14.4s, v6.16b, v1.4b[3]\n"
- "ldr q6, [x9, #0x0]\n"
+ "ldr q6, [x28, #0x0]\n"
".inst 0x4fa0e8eb // sdot v11.4s, v7.16b, v0.4b[3]\n"
- "ldr q0, [x26, #0x0]\n"
+ "ldr q0, [x25, #0x0]\n"
".inst 0x4fa1e8ef // sdot v15.4s, v7.16b, v1.4b[3]\n"
- "ldr q1, [x25, #0x0]\n"
- "ldr q7, [x9, #0x10]\n"
+ "ldr q1, [x24, #0x0]\n"
"bge 34b\n"
"35:" // Height 2: Multiply loop: Single iteration only
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
+ "ldr q7, [x28, #0x10]\n"
+ "sub x26, x26, #0x10\n"
".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n"
- "ldr q6, [x9, #0x20]\n"
- "add x26, x26, #0x10\n"
+ "ldr q6, [x28, #0x20]\n"
+ "add x25, x25, #0x10\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "add x24, x24, #0x10\n"
".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n"
- "ldr q7, [x9, #0x30]\n"
- "add x25, x25, #0x10\n"
+ "ldr q7, [x28, #0x30]\n"
".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x4f81e0ce // sdot v14.4s, v6.16b, v1.4b[0]\n"
- "ldr q6, [x9, #0x40]\n"
- "sub x27, x27, #0x10\n"
+ "ldr q6, [x28, #0x40]\n"
".inst 0x4f80e0eb // sdot v11.4s, v7.16b, v0.4b[0]\n"
".inst 0x4f81e0ef // sdot v15.4s, v7.16b, v1.4b[0]\n"
- "ldr q7, [x9, #0x50]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
+ "ldr q7, [x28, #0x50]\n"
".inst 0x4fa0e0c8 // sdot v8.4s, v6.16b, v0.4b[1]\n"
".inst 0x4fa1e0cc // sdot v12.4s, v6.16b, v1.4b[1]\n"
- "ldr q6, [x9, #0x60]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
+ "ldr q6, [x28, #0x60]\n"
".inst 0x4fa0e0e9 // sdot v9.4s, v7.16b, v0.4b[1]\n"
".inst 0x4fa1e0ed // sdot v13.4s, v7.16b, v1.4b[1]\n"
- "ldr q7, [x9, #0x70]\n"
+ "ldr q7, [x28, #0x70]\n"
".inst 0x4fa0e0ca // sdot v10.4s, v6.16b, v0.4b[1]\n"
".inst 0x4fa1e0ce // sdot v14.4s, v6.16b, v1.4b[1]\n"
- "ldr q6, [x9, #0x80]\n"
+ "ldr q6, [x28, #0x80]\n"
".inst 0x4fa0e0eb // sdot v11.4s, v7.16b, v0.4b[1]\n"
".inst 0x4fa1e0ef // sdot v15.4s, v7.16b, v1.4b[1]\n"
- "ldr q7, [x9, #0x90]\n"
+ "ldr q7, [x28, #0x90]\n"
".inst 0x4f80e8c8 // sdot v8.4s, v6.16b, v0.4b[2]\n"
".inst 0x4f81e8cc // sdot v12.4s, v6.16b, v1.4b[2]\n"
- "ldr q6, [x9, #0xa0]\n"
+ "ldr q6, [x28, #0xa0]\n"
".inst 0x4f80e8e9 // sdot v9.4s, v7.16b, v0.4b[2]\n"
".inst 0x4f81e8ed // sdot v13.4s, v7.16b, v1.4b[2]\n"
- "ldr q7, [x9, #0xb0]\n"
+ "ldr q7, [x28, #0xb0]\n"
".inst 0x4f80e8ca // sdot v10.4s, v6.16b, v0.4b[2]\n"
".inst 0x4f81e8ce // sdot v14.4s, v6.16b, v1.4b[2]\n"
- "ldr q6, [x9, #0xc0]\n"
+ "ldr q6, [x28, #0xc0]\n"
".inst 0x4f80e8eb // sdot v11.4s, v7.16b, v0.4b[2]\n"
".inst 0x4f81e8ef // sdot v15.4s, v7.16b, v1.4b[2]\n"
- "ldr q7, [x9, #0xd0]\n"
+ "ldr q7, [x28, #0xd0]\n"
".inst 0x4fa0e8c8 // sdot v8.4s, v6.16b, v0.4b[3]\n"
".inst 0x4fa1e8cc // sdot v12.4s, v6.16b, v1.4b[3]\n"
- "ldr q6, [x9, #0xe0]\n"
+ "ldr q6, [x28, #0xe0]\n"
".inst 0x4fa0e8e9 // sdot v9.4s, v7.16b, v0.4b[3]\n"
".inst 0x4fa1e8ed // sdot v13.4s, v7.16b, v1.4b[3]\n"
- "ldr q7, [x9, #0xf0]\n"
- "add x9, x9, #0x100\n"
+ "ldr q7, [x28, #0xf0]\n"
+ "add x28, x28, #0x100\n"
".inst 0x4fa0e8ca // sdot v10.4s, v6.16b, v0.4b[3]\n"
".inst 0x4fa1e8ce // sdot v14.4s, v6.16b, v1.4b[3]\n"
".inst 0x4fa0e8eb // sdot v11.4s, v7.16b, v0.4b[3]\n"
".inst 0x4fa1e8ef // sdot v15.4s, v7.16b, v1.4b[3]\n"
"36:" // Height 2: Multiply loop: Main loop skip
- "cbz x27, 41f\n"
- "cmp x27, #0x4\n"
+ "cbz x26, 41f\n"
+ "cmp x26, #0x4\n"
"blt 38f\n"
"37:" // Height 2: Multiply loop: Odd block loop
- "ldr s0, [x26], #0x4\n"
- "ldr s1, [x25], #0x4\n"
- "sub x27, x27, #0x4\n"
- "cmp x27, #0x4\n"
- "ldr q6, [x9, #0x0]\n"
- "ldr q7, [x9, #0x10]\n"
+ "ldr s0, [x25], #0x4\n"
+ "sub x26, x26, #0x4\n"
+ "ldr s1, [x24], #0x4\n"
+ "cmp x26, #0x4\n"
+ "ldr q6, [x28, #0x0]\n"
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
+ "ldr q7, [x28, #0x10]\n"
".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n"
- "ldr q6, [x9, #0x20]\n"
+ "ldr q6, [x28, #0x20]\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n"
- "ldr q7, [x9, #0x30]\n"
+ "ldr q7, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n"
".inst 0x4f81e0ce // sdot v14.4s, v6.16b, v1.4b[0]\n"
- "add x9, x9, #0x40\n"
".inst 0x4f80e0eb // sdot v11.4s, v7.16b, v0.4b[0]\n"
".inst 0x4f81e0ef // sdot v15.4s, v7.16b, v1.4b[0]\n"
"bge 37b\n"
+ "cbz x26, 41f\n"
"38:" // Height 2: Multiply loop: Skip odd blocks
- "cbz x27, 41f\n"
- "tbz x27, #1, 39f\n"
- "ldr h0, [x26], #0x2\n"
- "ldr h1, [x25], #0x2\n"
- "tbz x27, #0, 40f\n"
- "ld1 { v0.b }[2], [x26]\n"
- "ld1 { v1.b }[2], [x25]\n"
+ "tbz x26, #1, 39f\n"
+ "ldr h0, [x25], #0x2\n"
+ "ldr h1, [x24], #0x2\n"
+ "tbz x26, #0, 40f\n"
+ "ld1 { v0.b }[2], [x25]\n"
+ "ld1 { v1.b }[2], [x24]\n"
"b 40f\n"
"39:" // Height 2: Multiply loop: Ragged operand read: partial_1_0
- "ldr b0, [x26, #0x0]\n"
- "ldr b1, [x25, #0x0]\n"
+ "ldr b0, [x25, #0x0]\n"
+ "ldr b1, [x24, #0x0]\n"
"40:" // Height 2: Multiply loop: Ragged operand read: Done
- "ldr q6, [x9, #0x0]\n"
- "ldr q7, [x9, #0x10]\n"
+ "ldr q6, [x28, #0x0]\n"
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
+ "ldr q7, [x28, #0x10]\n"
".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n"
- "ldr q6, [x9, #0x20]\n"
+ "ldr q6, [x28, #0x20]\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n"
- "ldr q7, [x9, #0x30]\n"
+ "ldr q7, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n"
".inst 0x4f81e0ce // sdot v14.4s, v6.16b, v1.4b[0]\n"
- "add x9, x9, #0x40\n"
".inst 0x4f80e0eb // sdot v11.4s, v7.16b, v0.4b[0]\n"
".inst 0x4f81e0ef // sdot v15.4s, v7.16b, v1.4b[0]\n"
"41:" // Height 2: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 31b\n"
- "ldr q0, [x14, #0x0]\n"
- "ldr q1, [x14, #0x10]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "prfm pstl1keep, [x9, #0x0]\n"
+ "add x23, x9, x19\n"
+ "ldr q0, [x11, #0x0]\n"
"add v8.4s, v8.4s, v0.4s\n"
+ "prfm pstl1keep, [x23, #0x0]\n"
+ "add v12.4s, v12.4s, v0.4s\n"
+ "ldr q1, [x11, #0x10]\n"
+ "ldr q2, [x11, #0x20]\n"
"add v9.4s, v9.4s, v1.4s\n"
- "ldr q2, [x14, #0x20]\n"
- "ldr q3, [x14, #0x30]\n"
+ "ldr q3, [x11, #0x30]\n"
+ "add x11, x11, #0x40\n"
"add v10.4s, v10.4s, v2.4s\n"
- "add v11.4s, v11.4s, v3.4s\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x11, x20\n"
- "prfm pstl1keep, [x11, #0x0]\n"
- "add v12.4s, v12.4s, v0.4s\n"
- "prfm pstl1keep, [x24, #0x0]\n"
"add v13.4s, v13.4s, v1.4s\n"
"add v14.4s, v14.4s, v2.4s\n"
- "add x14, x14, #0x40\n"
+ "add v11.4s, v11.4s, v3.4s\n"
"add v15.4s, v15.4s, v3.4s\n"
"tbz %x[flags], #4, 42f\n"
"ldr q0, [x12, #0x0]\n"
@@ -613,20 +613,20 @@ void a64_hybrid_s8qs_dot_6x16 (
"ldr q2, [x12, #0x20]\n"
"ldr q6, [x13, #0x20]\n"
"ldr q3, [x12, #0x30]\n"
- "ldr q7, [x13, #0x30]\n"
"add x12, x12, #0x40\n"
+ "ldr q7, [x13, #0x30]\n"
"add x13, x13, #0x40\n"
"b 43f\n"
"42:" // Height 2: per layer parameters
- "add x25, %x[qp], %[per_layer_right_shift]\n"
- "ld1r { v0.4s }, [x25]\n"
- "add x25, %x[qp], %[per_layer_mul]\n"
- "ld1r { v4.4s }, [x25]\n"
+ "add x24, %x[qp], %[per_layer_right_shift]\n"
+ "ld1r { v0.4s }, [x24]\n"
"mov v1.16b, v0.16b\n"
- "mov v5.16b, v4.16b\n"
+ "add x24, %x[qp], %[per_layer_mul]\n"
+ "ld1r { v4.4s }, [x24]\n"
"mov v2.16b, v0.16b\n"
- "mov v6.16b, v4.16b\n"
"mov v3.16b, v0.16b\n"
+ "mov v5.16b, v4.16b\n"
+ "mov v6.16b, v4.16b\n"
"mov v7.16b, v4.16b\n"
"43:" // Height 2: parameters loaded
"sqrdmulh v8.4s, v8.4s, v4.4s\n"
@@ -641,149 +641,149 @@ void a64_hybrid_s8qs_dot_6x16 (
"and v4.16b, v8.16b, v0.16b\n"
"and v5.16b, v9.16b, v1.16b\n"
"and v6.16b, v10.16b, v2.16b\n"
- "and v7.16b, v11.16b, v3.16b\n"
"sshr v4.4s, v4.4s, #0x1f\n"
"sshr v5.4s, v5.4s, #0x1f\n"
"sshr v6.4s, v6.4s, #0x1f\n"
- "sshr v7.4s, v7.4s, #0x1f\n"
"sqadd v8.4s, v8.4s, v4.4s\n"
"sqadd v9.4s, v9.4s, v5.4s\n"
"sqadd v10.4s, v10.4s, v6.4s\n"
- "sqadd v11.4s, v11.4s, v7.4s\n"
+ "and v7.16b, v11.16b, v3.16b\n"
"and v4.16b, v12.16b, v0.16b\n"
"and v5.16b, v13.16b, v1.16b\n"
- "and v6.16b, v14.16b, v2.16b\n"
- "and v7.16b, v15.16b, v3.16b\n"
+ "sshr v7.4s, v7.4s, #0x1f\n"
"sshr v4.4s, v4.4s, #0x1f\n"
"sshr v5.4s, v5.4s, #0x1f\n"
- "sshr v6.4s, v6.4s, #0x1f\n"
- "sshr v7.4s, v7.4s, #0x1f\n"
+ "sqadd v11.4s, v11.4s, v7.4s\n"
"sqadd v12.4s, v12.4s, v4.4s\n"
"sqadd v13.4s, v13.4s, v5.4s\n"
+ "and v6.16b, v14.16b, v2.16b\n"
+ "and v7.16b, v15.16b, v3.16b\n"
+ "sshr v6.4s, v6.4s, #0x1f\n"
+ "sshr v7.4s, v7.4s, #0x1f\n"
"sqadd v14.4s, v14.4s, v6.4s\n"
"sqadd v15.4s, v15.4s, v7.4s\n"
"44:" // Height 2: no shift correction
- "add x25, %x[qp], %[c_offset]\n"
- "ld1r { v4.4s }, [x25]\n"
"srshl v8.4s, v8.4s, v0.4s\n"
+ "add x24, %x[qp], %[c_offset]\n"
+ "ld1r { v4.4s }, [x24]\n"
"srshl v9.4s, v9.4s, v1.4s\n"
+ "add x24, %x[qp], %[minval]\n"
"srshl v10.4s, v10.4s, v2.4s\n"
+ "ld1r { v5.4s }, [x24]\n"
+ "add x24, %x[qp], %[maxval]\n"
"srshl v11.4s, v11.4s, v3.4s\n"
- "add x25, %x[qp], %[maxval]\n"
- "ld1r { v6.4s }, [x25]\n"
+ "ld1r { v6.4s }, [x24]\n"
+ "cmp x10, #0x10\n"
"srshl v12.4s, v12.4s, v0.4s\n"
"srshl v13.4s, v13.4s, v1.4s\n"
- "add x25, %x[qp], %[minval]\n"
- "ld1r { v5.4s }, [x25]\n"
- "srshl v14.4s, v14.4s, v2.4s\n"
- "srshl v15.4s, v15.4s, v3.4s\n"
- "cmp x10, #0x10\n"
"add v8.4s, v8.4s, v4.4s\n"
"add v9.4s, v9.4s, v4.4s\n"
"add v10.4s, v10.4s, v4.4s\n"
- "add v11.4s, v11.4s, v4.4s\n"
- "add v12.4s, v12.4s, v4.4s\n"
- "add v13.4s, v13.4s, v4.4s\n"
- "add v14.4s, v14.4s, v4.4s\n"
- "add v15.4s, v15.4s, v4.4s\n"
"smin v8.4s, v8.4s, v6.4s\n"
"smin v9.4s, v9.4s, v6.4s\n"
"smin v10.4s, v10.4s, v6.4s\n"
- "smin v11.4s, v11.4s, v6.4s\n"
- "smin v12.4s, v12.4s, v6.4s\n"
- "smin v13.4s, v13.4s, v6.4s\n"
- "smin v14.4s, v14.4s, v6.4s\n"
- "smin v15.4s, v15.4s, v6.4s\n"
"smax v8.4s, v8.4s, v5.4s\n"
"smax v9.4s, v9.4s, v5.4s\n"
"smax v10.4s, v10.4s, v5.4s\n"
+ "add v11.4s, v11.4s, v4.4s\n"
+ "add v12.4s, v12.4s, v4.4s\n"
+ "add v13.4s, v13.4s, v4.4s\n"
+ "smin v11.4s, v11.4s, v6.4s\n"
+ "smin v12.4s, v12.4s, v6.4s\n"
+ "smin v13.4s, v13.4s, v6.4s\n"
"smax v11.4s, v11.4s, v5.4s\n"
"smax v12.4s, v12.4s, v5.4s\n"
"smax v13.4s, v13.4s, v5.4s\n"
- "smax v14.4s, v14.4s, v5.4s\n"
- "smax v15.4s, v15.4s, v5.4s\n"
+ "srshl v14.4s, v14.4s, v2.4s\n"
+ "srshl v15.4s, v15.4s, v3.4s\n"
"uzp1 v8.8h, v8.8h, v9.8h\n"
"uzp1 v9.8h, v10.8h, v11.8h\n"
+ "add v14.4s, v14.4s, v4.4s\n"
+ "add v15.4s, v15.4s, v4.4s\n"
"uzp1 v12.8h, v12.8h, v13.8h\n"
- "uzp1 v13.8h, v14.8h, v15.8h\n"
+ "smin v14.4s, v14.4s, v6.4s\n"
+ "smin v15.4s, v15.4s, v6.4s\n"
"uzp1 v8.16b, v8.16b, v9.16b\n"
+ "smax v14.4s, v14.4s, v5.4s\n"
+ "smax v15.4s, v15.4s, v5.4s\n"
+ "uzp1 v13.8h, v14.8h, v15.8h\n"
"uzp1 v12.16b, v12.16b, v13.16b\n"
"bge 53f\n"
"tbz x10, #3, 48f\n"
- "str d8, [x11], #0x8\n"
- "str d12, [x24], #0x8\n"
+ "str d8, [x9], #0x8\n"
+ "str d12, [x23], #0x8\n"
"tbz x10, #2, 46f\n"
- "st1 { v8.s }[2], [x11], #0x4\n"
- "st1 { v12.s }[2], [x24], #0x4\n"
+ "st1 { v8.s }[2], [x9], #0x4\n"
+ "st1 { v12.s }[2], [x23], #0x4\n"
"tbz x10, #1, 45f\n"
- "st1 { v8.h }[6], [x11], #0x2\n"
- "st1 { v12.h }[6], [x24], #0x2\n"
+ "st1 { v8.h }[6], [x9], #0x2\n"
+ "st1 { v12.h }[6], [x23], #0x2\n"
"tbz x10, #0, 52f\n"
- "st1 { v8.b }[14], [x11]\n"
- "st1 { v12.b }[14], [x24]\n"
+ "st1 { v8.b }[14], [x9]\n"
+ "st1 { v12.b }[14], [x23]\n"
"b 52f\n"
"45:" // Height 2: Partial direct writeback: partial_1_12
"tbz x10, #0, 52f\n"
- "st1 { v8.b }[12], [x11]\n"
- "st1 { v12.b }[12], [x24]\n"
+ "st1 { v8.b }[12], [x9]\n"
+ "st1 { v12.b }[12], [x23]\n"
"b 52f\n"
"46:" // Height 2: Partial direct writeback: partial_2_8
"tbz x10, #1, 47f\n"
- "st1 { v8.h }[4], [x11], #0x2\n"
- "st1 { v12.h }[4], [x24], #0x2\n"
+ "st1 { v8.h }[4], [x9], #0x2\n"
+ "st1 { v12.h }[4], [x23], #0x2\n"
"tbz x10, #0, 52f\n"
- "st1 { v8.b }[10], [x11]\n"
- "st1 { v12.b }[10], [x24]\n"
+ "st1 { v8.b }[10], [x9]\n"
+ "st1 { v12.b }[10], [x23]\n"
"b 52f\n"
"47:" // Height 2: Partial direct writeback: partial_1_8
"tbz x10, #0, 52f\n"
- "st1 { v8.b }[8], [x11]\n"
- "st1 { v12.b }[8], [x24]\n"
+ "st1 { v8.b }[8], [x9]\n"
+ "st1 { v12.b }[8], [x23]\n"
"b 52f\n"
"48:" // Height 2: Partial direct writeback: partial_4_0
"tbz x10, #2, 50f\n"
- "str s8, [x11], #0x4\n"
- "str s12, [x24], #0x4\n"
+ "str s8, [x9], #0x4\n"
+ "str s12, [x23], #0x4\n"
"tbz x10, #1, 49f\n"
- "st1 { v8.h }[2], [x11], #0x2\n"
- "st1 { v12.h }[2], [x24], #0x2\n"
+ "st1 { v8.h }[2], [x9], #0x2\n"
+ "st1 { v12.h }[2], [x23], #0x2\n"
"tbz x10, #0, 52f\n"
- "st1 { v8.b }[6], [x11]\n"
- "st1 { v12.b }[6], [x24]\n"
+ "st1 { v8.b }[6], [x9]\n"
+ "st1 { v12.b }[6], [x23]\n"
"b 52f\n"
"49:" // Height 2: Partial direct writeback: partial_1_4
"tbz x10, #0, 52f\n"
- "st1 { v8.b }[4], [x11]\n"
- "st1 { v12.b }[4], [x24]\n"
+ "st1 { v8.b }[4], [x9]\n"
+ "st1 { v12.b }[4], [x23]\n"
"b 52f\n"
"50:" // Height 2: Partial direct writeback: partial_2_0
"tbz x10, #1, 51f\n"
- "str h8, [x11], #0x2\n"
- "str h12, [x24], #0x2\n"
+ "str h8, [x9], #0x2\n"
+ "str h12, [x23], #0x2\n"
"tbz x10, #0, 52f\n"
- "st1 { v8.b }[2], [x11]\n"
- "st1 { v12.b }[2], [x24]\n"
+ "st1 { v8.b }[2], [x9]\n"
+ "st1 { v12.b }[2], [x23]\n"
"b 52f\n"
"51:" // Height 2: Partial direct writeback: partial_1_0
- "str b8, [x11, #0x0]\n"
- "str b12, [x24, #0x0]\n"
+ "str b8, [x9, #0x0]\n"
+ "str b12, [x23, #0x0]\n"
"52:" // Height 2: Partial direct writeback: Done
"b 54f\n"
"53:" // Height 2: Full writeback
- "str q8, [x11, #0x0]\n"
- "add x11, x11, #0x10\n"
- "str q12, [x24, #0x0]\n"
+ "str q8, [x9, #0x0]\n"
+ "add x9, x9, #0x10\n"
+ "str q12, [x23, #0x0]\n"
"54:" // Height 2: Writeback done
"subs x10, x10, #0x10\n"
"bgt 29b\n"
"b 164f\n"
"55:" // Height 3
- "mov x14, %x[col_bias]\n"
"ldr x13, [%x[args_ptr], %[offsetof_multiplier_ptr]]\n"
+ "mov x11, %x[col_bias]\n"
"ldr x12, [%x[args_ptr], %[offsetof_shift_ptr]]\n"
- "mov x11, %x[output_ptr]\n"
+ "mov x9, %x[output_ptr]\n"
"ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"56:" // Height 3: Column loop
"movi v8.4s, #0x0\n"
"movi v9.4s, #0x0\n"
@@ -798,180 +798,180 @@ void a64_hybrid_s8qs_dot_6x16 (
"movi v18.4s, #0x0\n"
"movi v19.4s, #0x0\n"
"57:" // Height 3: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"58:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 59f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "cbnz x28, 60f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20\n"
- "add x25, x25, x20\n"
- "add x24, x24, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "cbnz x27, 60f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
+ "add x24, x24, x19\n"
+ "add x23, x23, x19\n"
"b 60f\n"
"59:" // Height 3: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20\n"
- "add x24, x25, x20\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19\n"
+ "add x23, x24, x19\n"
"60:" // Height 3: input setup done
- "cmp x27, #0x10\n"
+ "cmp x26, #0x10\n"
"blt 63f\n"
- "ldr q0, [x26, #0x0]\n"
- "ldr q1, [x25, #0x0]\n"
- "cmp x27, #0x20\n"
- "ldr q2, [x24, #0x0]\n"
- "ldr q6, [x9, #0x0]\n"
- "ldr q7, [x9, #0x10]\n"
+ "ldr q0, [x25, #0x0]\n"
+ "ldr q1, [x24, #0x0]\n"
+ "cmp x26, #0x20\n"
+ "ldr q2, [x23, #0x0]\n"
+ "ldr q6, [x28, #0x0]\n"
"blt 62f\n"
"61:" // Height 3: Multiply loop: Main loop head
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
+ "ldr q7, [x28, #0x10]\n"
+ "add x25, x25, #0x10\n"
".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n"
- "sub x27, x27, #0x10\n"
- "add x26, x26, #0x10\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "add x24, x24, #0x10\n"
".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n"
- "ldr q6, [x9, #0x20]\n"
+ "ldr q6, [x28, #0x20]\n"
+ "add x23, x23, #0x10\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
- "add x25, x25, #0x10\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "sub x26, x26, #0x10\n"
".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
+ "cmp x26, #0x20\n"
".inst 0x4f82e0f1 // sdot v17.4s, v7.16b, v2.4b[0]\n"
- "ldr q7, [x9, #0x30]\n"
- "add x24, x24, #0x10\n"
+ "ldr q7, [x28, #0x30]\n"
".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n"
".inst 0x4f81e0ce // sdot v14.4s, v6.16b, v1.4b[0]\n"
- "cmp x27, #0x20\n"
- "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x4f82e0d2 // sdot v18.4s, v6.16b, v2.4b[0]\n"
- "ldr q6, [x9, #0x40]\n"
+ "ldr q6, [x28, #0x40]\n"
".inst 0x4f80e0eb // sdot v11.4s, v7.16b, v0.4b[0]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x4f81e0ef // sdot v15.4s, v7.16b, v1.4b[0]\n"
".inst 0x4f82e0f3 // sdot v19.4s, v7.16b, v2.4b[0]\n"
- "ldr q7, [x9, #0x50]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
+ "ldr q7, [x28, #0x50]\n"
".inst 0x4fa0e0c8 // sdot v8.4s, v6.16b, v0.4b[1]\n"
".inst 0x4fa1e0cc // sdot v12.4s, v6.16b, v1.4b[1]\n"
".inst 0x4fa2e0d0 // sdot v16.4s, v6.16b, v2.4b[1]\n"
- "ldr q6, [x9, #0x60]\n"
+ "ldr q6, [x28, #0x60]\n"
".inst 0x4fa0e0e9 // sdot v9.4s, v7.16b, v0.4b[1]\n"
".inst 0x4fa1e0ed // sdot v13.4s, v7.16b, v1.4b[1]\n"
".inst 0x4fa2e0f1 // sdot v17.4s, v7.16b, v2.4b[1]\n"
- "ldr q7, [x9, #0x70]\n"
+ "ldr q7, [x28, #0x70]\n"
".inst 0x4fa0e0ca // sdot v10.4s, v6.16b, v0.4b[1]\n"
".inst 0x4fa1e0ce // sdot v14.4s, v6.16b, v1.4b[1]\n"
".inst 0x4fa2e0d2 // sdot v18.4s, v6.16b, v2.4b[1]\n"
- "ldr q6, [x9, #0x80]\n"
+ "ldr q6, [x28, #0x80]\n"
".inst 0x4fa0e0eb // sdot v11.4s, v7.16b, v0.4b[1]\n"
".inst 0x4fa1e0ef // sdot v15.4s, v7.16b, v1.4b[1]\n"
".inst 0x4fa2e0f3 // sdot v19.4s, v7.16b, v2.4b[1]\n"
- "ldr q7, [x9, #0x90]\n"
+ "ldr q7, [x28, #0x90]\n"
".inst 0x4f80e8c8 // sdot v8.4s, v6.16b, v0.4b[2]\n"
".inst 0x4f81e8cc // sdot v12.4s, v6.16b, v1.4b[2]\n"
".inst 0x4f82e8d0 // sdot v16.4s, v6.16b, v2.4b[2]\n"
- "ldr q6, [x9, #0xa0]\n"
+ "ldr q6, [x28, #0xa0]\n"
".inst 0x4f80e8e9 // sdot v9.4s, v7.16b, v0.4b[2]\n"
".inst 0x4f81e8ed // sdot v13.4s, v7.16b, v1.4b[2]\n"
".inst 0x4f82e8f1 // sdot v17.4s, v7.16b, v2.4b[2]\n"
- "ldr q7, [x9, #0xb0]\n"
+ "ldr q7, [x28, #0xb0]\n"
".inst 0x4f80e8ca // sdot v10.4s, v6.16b, v0.4b[2]\n"
".inst 0x4f81e8ce // sdot v14.4s, v6.16b, v1.4b[2]\n"
".inst 0x4f82e8d2 // sdot v18.4s, v6.16b, v2.4b[2]\n"
- "ldr q6, [x9, #0xc0]\n"
+ "ldr q6, [x28, #0xc0]\n"
".inst 0x4f80e8eb // sdot v11.4s, v7.16b, v0.4b[2]\n"
".inst 0x4f81e8ef // sdot v15.4s, v7.16b, v1.4b[2]\n"
".inst 0x4f82e8f3 // sdot v19.4s, v7.16b, v2.4b[2]\n"
- "ldr q7, [x9, #0xd0]\n"
+ "ldr q7, [x28, #0xd0]\n"
".inst 0x4fa0e8c8 // sdot v8.4s, v6.16b, v0.4b[3]\n"
".inst 0x4fa1e8cc // sdot v12.4s, v6.16b, v1.4b[3]\n"
".inst 0x4fa2e8d0 // sdot v16.4s, v6.16b, v2.4b[3]\n"
- "ldr q6, [x9, #0xe0]\n"
+ "ldr q6, [x28, #0xe0]\n"
".inst 0x4fa0e8e9 // sdot v9.4s, v7.16b, v0.4b[3]\n"
".inst 0x4fa1e8ed // sdot v13.4s, v7.16b, v1.4b[3]\n"
".inst 0x4fa2e8f1 // sdot v17.4s, v7.16b, v2.4b[3]\n"
- "ldr q7, [x9, #0xf0]\n"
- "add x9, x9, #0x100\n"
+ "ldr q7, [x28, #0xf0]\n"
+ "add x28, x28, #0x100\n"
".inst 0x4fa0e8ca // sdot v10.4s, v6.16b, v0.4b[3]\n"
".inst 0x4fa1e8ce // sdot v14.4s, v6.16b, v1.4b[3]\n"
".inst 0x4fa2e8d2 // sdot v18.4s, v6.16b, v2.4b[3]\n"
- "ldr q6, [x9, #0x0]\n"
+ "ldr q6, [x28, #0x0]\n"
".inst 0x4fa0e8eb // sdot v11.4s, v7.16b, v0.4b[3]\n"
- "ldr q0, [x26, #0x0]\n"
+ "ldr q0, [x25, #0x0]\n"
".inst 0x4fa1e8ef // sdot v15.4s, v7.16b, v1.4b[3]\n"
- "ldr q1, [x25, #0x0]\n"
+ "ldr q1, [x24, #0x0]\n"
".inst 0x4fa2e8f3 // sdot v19.4s, v7.16b, v2.4b[3]\n"
- "ldr q2, [x24, #0x0]\n"
- "ldr q7, [x9, #0x10]\n"
+ "ldr q2, [x23, #0x0]\n"
"bge 61b\n"
"62:" // Height 3: Multiply loop: Single iteration only
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
+ "ldr q7, [x28, #0x10]\n"
+ "sub x26, x26, #0x10\n"
".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n"
- "add x26, x26, #0x10\n"
"add x25, x25, #0x10\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n"
- "ldr q6, [x9, #0x20]\n"
- ".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
"add x24, x24, #0x10\n"
+ ".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
+ "ldr q6, [x28, #0x20]\n"
+ "add x23, x23, #0x10\n"
".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x4f82e0f1 // sdot v17.4s, v7.16b, v2.4b[0]\n"
- "ldr q7, [x9, #0x30]\n"
- "sub x27, x27, #0x10\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
+ "ldr q7, [x28, #0x30]\n"
".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n"
".inst 0x4f81e0ce // sdot v14.4s, v6.16b, v1.4b[0]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x4f82e0d2 // sdot v18.4s, v6.16b, v2.4b[0]\n"
- "ldr q6, [x9, #0x40]\n"
+ "ldr q6, [x28, #0x40]\n"
".inst 0x4f80e0eb // sdot v11.4s, v7.16b, v0.4b[0]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x4f81e0ef // sdot v15.4s, v7.16b, v1.4b[0]\n"
".inst 0x4f82e0f3 // sdot v19.4s, v7.16b, v2.4b[0]\n"
- "ldr q7, [x9, #0x50]\n"
+ "ldr q7, [x28, #0x50]\n"
".inst 0x4fa0e0c8 // sdot v8.4s, v6.16b, v0.4b[1]\n"
".inst 0x4fa1e0cc // sdot v12.4s, v6.16b, v1.4b[1]\n"
".inst 0x4fa2e0d0 // sdot v16.4s, v6.16b, v2.4b[1]\n"
- "ldr q6, [x9, #0x60]\n"
+ "ldr q6, [x28, #0x60]\n"
".inst 0x4fa0e0e9 // sdot v9.4s, v7.16b, v0.4b[1]\n"
".inst 0x4fa1e0ed // sdot v13.4s, v7.16b, v1.4b[1]\n"
".inst 0x4fa2e0f1 // sdot v17.4s, v7.16b, v2.4b[1]\n"
- "ldr q7, [x9, #0x70]\n"
+ "ldr q7, [x28, #0x70]\n"
".inst 0x4fa0e0ca // sdot v10.4s, v6.16b, v0.4b[1]\n"
".inst 0x4fa1e0ce // sdot v14.4s, v6.16b, v1.4b[1]\n"
".inst 0x4fa2e0d2 // sdot v18.4s, v6.16b, v2.4b[1]\n"
- "ldr q6, [x9, #0x80]\n"
+ "ldr q6, [x28, #0x80]\n"
".inst 0x4fa0e0eb // sdot v11.4s, v7.16b, v0.4b[1]\n"
".inst 0x4fa1e0ef // sdot v15.4s, v7.16b, v1.4b[1]\n"
".inst 0x4fa2e0f3 // sdot v19.4s, v7.16b, v2.4b[1]\n"
- "ldr q7, [x9, #0x90]\n"
+ "ldr q7, [x28, #0x90]\n"
".inst 0x4f80e8c8 // sdot v8.4s, v6.16b, v0.4b[2]\n"
".inst 0x4f81e8cc // sdot v12.4s, v6.16b, v1.4b[2]\n"
".inst 0x4f82e8d0 // sdot v16.4s, v6.16b, v2.4b[2]\n"
- "ldr q6, [x9, #0xa0]\n"
+ "ldr q6, [x28, #0xa0]\n"
".inst 0x4f80e8e9 // sdot v9.4s, v7.16b, v0.4b[2]\n"
".inst 0x4f81e8ed // sdot v13.4s, v7.16b, v1.4b[2]\n"
".inst 0x4f82e8f1 // sdot v17.4s, v7.16b, v2.4b[2]\n"
- "ldr q7, [x9, #0xb0]\n"
+ "ldr q7, [x28, #0xb0]\n"
".inst 0x4f80e8ca // sdot v10.4s, v6.16b, v0.4b[2]\n"
".inst 0x4f81e8ce // sdot v14.4s, v6.16b, v1.4b[2]\n"
".inst 0x4f82e8d2 // sdot v18.4s, v6.16b, v2.4b[2]\n"
- "ldr q6, [x9, #0xc0]\n"
+ "ldr q6, [x28, #0xc0]\n"
".inst 0x4f80e8eb // sdot v11.4s, v7.16b, v0.4b[2]\n"
".inst 0x4f81e8ef // sdot v15.4s, v7.16b, v1.4b[2]\n"
".inst 0x4f82e8f3 // sdot v19.4s, v7.16b, v2.4b[2]\n"
- "ldr q7, [x9, #0xd0]\n"
+ "ldr q7, [x28, #0xd0]\n"
".inst 0x4fa0e8c8 // sdot v8.4s, v6.16b, v0.4b[3]\n"
".inst 0x4fa1e8cc // sdot v12.4s, v6.16b, v1.4b[3]\n"
".inst 0x4fa2e8d0 // sdot v16.4s, v6.16b, v2.4b[3]\n"
- "ldr q6, [x9, #0xe0]\n"
+ "ldr q6, [x28, #0xe0]\n"
".inst 0x4fa0e8e9 // sdot v9.4s, v7.16b, v0.4b[3]\n"
".inst 0x4fa1e8ed // sdot v13.4s, v7.16b, v1.4b[3]\n"
".inst 0x4fa2e8f1 // sdot v17.4s, v7.16b, v2.4b[3]\n"
- "ldr q7, [x9, #0xf0]\n"
- "add x9, x9, #0x100\n"
+ "ldr q7, [x28, #0xf0]\n"
+ "add x28, x28, #0x100\n"
".inst 0x4fa0e8ca // sdot v10.4s, v6.16b, v0.4b[3]\n"
".inst 0x4fa1e8ce // sdot v14.4s, v6.16b, v1.4b[3]\n"
".inst 0x4fa2e8d2 // sdot v18.4s, v6.16b, v2.4b[3]\n"
@@ -979,26 +979,26 @@ void a64_hybrid_s8qs_dot_6x16 (
".inst 0x4fa1e8ef // sdot v15.4s, v7.16b, v1.4b[3]\n"
".inst 0x4fa2e8f3 // sdot v19.4s, v7.16b, v2.4b[3]\n"
"63:" // Height 3: Multiply loop: Main loop skip
- "cbz x27, 68f\n"
- "cmp x27, #0x4\n"
+ "cbz x26, 68f\n"
+ "cmp x26, #0x4\n"
"blt 65f\n"
"64:" // Height 3: Multiply loop: Odd block loop
- "ldr s0, [x26], #0x4\n"
- "ldr s1, [x25], #0x4\n"
- "sub x27, x27, #0x4\n"
- "cmp x27, #0x4\n"
- "ldr s2, [x24], #0x4\n"
- "ldr q6, [x9, #0x0]\n"
+ "ldr s0, [x25], #0x4\n"
+ "sub x26, x26, #0x4\n"
+ "ldr s1, [x24], #0x4\n"
+ "cmp x26, #0x4\n"
+ "ldr s2, [x23], #0x4\n"
+ "ldr q6, [x28, #0x0]\n"
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
+ "ldr q7, [x28, #0x10]\n"
".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n"
- "ldr q7, [x9, #0x10]\n"
".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n"
- "ldr q6, [x9, #0x20]\n"
+ "ldr q6, [x28, #0x20]\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n"
".inst 0x4f82e0f1 // sdot v17.4s, v7.16b, v2.4b[0]\n"
- "ldr q7, [x9, #0x30]\n"
- "add x9, x9, #0x40\n"
+ "ldr q7, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n"
".inst 0x4f81e0ce // sdot v14.4s, v6.16b, v1.4b[0]\n"
".inst 0x4f82e0d2 // sdot v18.4s, v6.16b, v2.4b[0]\n"
@@ -1006,33 +1006,33 @@ void a64_hybrid_s8qs_dot_6x16 (
".inst 0x4f81e0ef // sdot v15.4s, v7.16b, v1.4b[0]\n"
".inst 0x4f82e0f3 // sdot v19.4s, v7.16b, v2.4b[0]\n"
"bge 64b\n"
+ "cbz x26, 68f\n"
"65:" // Height 3: Multiply loop: Skip odd blocks
- "cbz x27, 68f\n"
- "tbz x27, #1, 66f\n"
- "ldr h0, [x26], #0x2\n"
- "ldr h1, [x25], #0x2\n"
- "ldr h2, [x24], #0x2\n"
- "tbz x27, #0, 67f\n"
- "ld1 { v0.b }[2], [x26]\n"
- "ld1 { v1.b }[2], [x25]\n"
- "ld1 { v2.b }[2], [x24]\n"
+ "tbz x26, #1, 66f\n"
+ "ldr h0, [x25], #0x2\n"
+ "ldr h1, [x24], #0x2\n"
+ "ldr h2, [x23], #0x2\n"
+ "tbz x26, #0, 67f\n"
+ "ld1 { v0.b }[2], [x25]\n"
+ "ld1 { v1.b }[2], [x24]\n"
+ "ld1 { v2.b }[2], [x23]\n"
"b 67f\n"
"66:" // Height 3: Multiply loop: Ragged operand read: partial_1_0
- "ldr b0, [x26, #0x0]\n"
- "ldr b1, [x25, #0x0]\n"
- "ldr b2, [x24, #0x0]\n"
+ "ldr b0, [x25, #0x0]\n"
+ "ldr b1, [x24, #0x0]\n"
+ "ldr b2, [x23, #0x0]\n"
"67:" // Height 3: Multiply loop: Ragged operand read: Done
- "ldr q6, [x9, #0x0]\n"
- "ldr q7, [x9, #0x10]\n"
+ "ldr q6, [x28, #0x0]\n"
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
+ "ldr q7, [x28, #0x10]\n"
".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n"
".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n"
- "ldr q6, [x9, #0x20]\n"
+ "ldr q6, [x28, #0x20]\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n"
".inst 0x4f82e0f1 // sdot v17.4s, v7.16b, v2.4b[0]\n"
- "ldr q7, [x9, #0x30]\n"
- "add x9, x9, #0x40\n"
+ "ldr q7, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n"
".inst 0x4f81e0ce // sdot v14.4s, v6.16b, v1.4b[0]\n"
".inst 0x4f82e0d2 // sdot v18.4s, v6.16b, v2.4b[0]\n"
@@ -1040,31 +1040,31 @@ void a64_hybrid_s8qs_dot_6x16 (
".inst 0x4f81e0ef // sdot v15.4s, v7.16b, v1.4b[0]\n"
".inst 0x4f82e0f3 // sdot v19.4s, v7.16b, v2.4b[0]\n"
"68:" // Height 3: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 58b\n"
- "ldr q0, [x14, #0x0]\n"
- "ldr q1, [x14, #0x10]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "prfm pstl1keep, [x9, #0x0]\n"
+ "add x23, x9, x19\n"
+ "ldr q0, [x11, #0x0]\n"
"add v8.4s, v8.4s, v0.4s\n"
- "add v9.4s, v9.4s, v1.4s\n"
- "ldr q2, [x14, #0x20]\n"
- "ldr q3, [x14, #0x30]\n"
- "add v10.4s, v10.4s, v2.4s\n"
- "add v11.4s, v11.4s, v3.4s\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x11, x20\n"
- "add x23, x24, x20\n"
- "prfm pstl1keep, [x11, #0x0]\n"
- "prfm pstl1keep, [x24, #0x0]\n"
"prfm pstl1keep, [x23, #0x0]\n"
+ "add x22, x23, x19\n"
"add v12.4s, v12.4s, v0.4s\n"
+ "prfm pstl1keep, [x22, #0x0]\n"
+ "add v16.4s, v16.4s, v0.4s\n"
+ "ldr q1, [x11, #0x10]\n"
+ "ldr q2, [x11, #0x20]\n"
+ "add v9.4s, v9.4s, v1.4s\n"
+ "ldr q3, [x11, #0x30]\n"
+ "add x11, x11, #0x40\n"
+ "add v10.4s, v10.4s, v2.4s\n"
"add v13.4s, v13.4s, v1.4s\n"
"add v14.4s, v14.4s, v2.4s\n"
- "add v15.4s, v15.4s, v3.4s\n"
- "add x14, x14, #0x40\n"
- "add v16.4s, v16.4s, v0.4s\n"
"add v17.4s, v17.4s, v1.4s\n"
+ "add v11.4s, v11.4s, v3.4s\n"
+ "add v15.4s, v15.4s, v3.4s\n"
"add v18.4s, v18.4s, v2.4s\n"
"add v19.4s, v19.4s, v3.4s\n"
"tbz %x[flags], #4, 69f\n"
@@ -1075,20 +1075,20 @@ void a64_hybrid_s8qs_dot_6x16 (
"ldr q2, [x12, #0x20]\n"
"ldr q6, [x13, #0x20]\n"
"ldr q3, [x12, #0x30]\n"
- "ldr q7, [x13, #0x30]\n"
"add x12, x12, #0x40\n"
+ "ldr q7, [x13, #0x30]\n"
"add x13, x13, #0x40\n"
"b 70f\n"
"69:" // Height 3: per layer parameters
- "add x25, %x[qp], %[per_layer_right_shift]\n"
- "ld1r { v0.4s }, [x25]\n"
- "add x25, %x[qp], %[per_layer_mul]\n"
- "ld1r { v4.4s }, [x25]\n"
+ "add x24, %x[qp], %[per_layer_right_shift]\n"
+ "ld1r { v0.4s }, [x24]\n"
"mov v1.16b, v0.16b\n"
- "mov v5.16b, v4.16b\n"
+ "add x24, %x[qp], %[per_layer_mul]\n"
+ "ld1r { v4.4s }, [x24]\n"
"mov v2.16b, v0.16b\n"
- "mov v6.16b, v4.16b\n"
"mov v3.16b, v0.16b\n"
+ "mov v5.16b, v4.16b\n"
+ "mov v6.16b, v4.16b\n"
"mov v7.16b, v4.16b\n"
"70:" // Height 3: parameters loaded
"sqrdmulh v8.4s, v8.4s, v4.4s\n"
@@ -1107,98 +1107,98 @@ void a64_hybrid_s8qs_dot_6x16 (
"and v4.16b, v8.16b, v0.16b\n"
"and v5.16b, v9.16b, v1.16b\n"
"and v6.16b, v10.16b, v2.16b\n"
- "and v7.16b, v11.16b, v3.16b\n"
"sshr v4.4s, v4.4s, #0x1f\n"
"sshr v5.4s, v5.4s, #0x1f\n"
"sshr v6.4s, v6.4s, #0x1f\n"
- "sshr v7.4s, v7.4s, #0x1f\n"
"sqadd v8.4s, v8.4s, v4.4s\n"
"sqadd v9.4s, v9.4s, v5.4s\n"
"sqadd v10.4s, v10.4s, v6.4s\n"
- "sqadd v11.4s, v11.4s, v7.4s\n"
+ "and v7.16b, v11.16b, v3.16b\n"
"and v4.16b, v12.16b, v0.16b\n"
"and v5.16b, v13.16b, v1.16b\n"
- "and v6.16b, v14.16b, v2.16b\n"
- "and v7.16b, v15.16b, v3.16b\n"
+ "sshr v7.4s, v7.4s, #0x1f\n"
"sshr v4.4s, v4.4s, #0x1f\n"
"sshr v5.4s, v5.4s, #0x1f\n"
- "sshr v6.4s, v6.4s, #0x1f\n"
- "sshr v7.4s, v7.4s, #0x1f\n"
+ "sqadd v11.4s, v11.4s, v7.4s\n"
"sqadd v12.4s, v12.4s, v4.4s\n"
"sqadd v13.4s, v13.4s, v5.4s\n"
+ "and v6.16b, v14.16b, v2.16b\n"
+ "and v7.16b, v15.16b, v3.16b\n"
+ "and v4.16b, v16.16b, v0.16b\n"
+ "sshr v6.4s, v6.4s, #0x1f\n"
+ "sshr v7.4s, v7.4s, #0x1f\n"
+ "sshr v4.4s, v4.4s, #0x1f\n"
"sqadd v14.4s, v14.4s, v6.4s\n"
"sqadd v15.4s, v15.4s, v7.4s\n"
- "and v4.16b, v16.16b, v0.16b\n"
+ "sqadd v16.4s, v16.4s, v4.4s\n"
"and v5.16b, v17.16b, v1.16b\n"
"and v6.16b, v18.16b, v2.16b\n"
"and v7.16b, v19.16b, v3.16b\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
"sshr v5.4s, v5.4s, #0x1f\n"
"sshr v6.4s, v6.4s, #0x1f\n"
"sshr v7.4s, v7.4s, #0x1f\n"
- "sqadd v16.4s, v16.4s, v4.4s\n"
"sqadd v17.4s, v17.4s, v5.4s\n"
"sqadd v18.4s, v18.4s, v6.4s\n"
"sqadd v19.4s, v19.4s, v7.4s\n"
"71:" // Height 3: no shift correction
- "add x25, %x[qp], %[c_offset]\n"
- "ld1r { v4.4s }, [x25]\n"
"srshl v8.4s, v8.4s, v0.4s\n"
+ "add x24, %x[qp], %[c_offset]\n"
+ "ld1r { v4.4s }, [x24]\n"
"srshl v9.4s, v9.4s, v1.4s\n"
+ "add x24, %x[qp], %[minval]\n"
"srshl v10.4s, v10.4s, v2.4s\n"
+ "ld1r { v5.4s }, [x24]\n"
+ "add x24, %x[qp], %[maxval]\n"
"srshl v11.4s, v11.4s, v3.4s\n"
- "add x25, %x[qp], %[maxval]\n"
- "ld1r { v6.4s }, [x25]\n"
+ "ld1r { v6.4s }, [x24]\n"
+ "cmp x10, #0x10\n"
"srshl v12.4s, v12.4s, v0.4s\n"
"srshl v13.4s, v13.4s, v1.4s\n"
- "add x25, %x[qp], %[minval]\n"
- "ld1r { v5.4s }, [x25]\n"
- "srshl v14.4s, v14.4s, v2.4s\n"
- "srshl v15.4s, v15.4s, v3.4s\n"
- "cmp x10, #0x10\n"
- "srshl v16.4s, v16.4s, v0.4s\n"
- "srshl v17.4s, v17.4s, v1.4s\n"
- "srshl v18.4s, v18.4s, v2.4s\n"
- "srshl v19.4s, v19.4s, v3.4s\n"
"add v8.4s, v8.4s, v4.4s\n"
"add v9.4s, v9.4s, v4.4s\n"
"add v10.4s, v10.4s, v4.4s\n"
- "add v11.4s, v11.4s, v4.4s\n"
- "add v12.4s, v12.4s, v4.4s\n"
- "add v13.4s, v13.4s, v4.4s\n"
- "add v14.4s, v14.4s, v4.4s\n"
- "add v15.4s, v15.4s, v4.4s\n"
- "add v16.4s, v16.4s, v4.4s\n"
- "add v17.4s, v17.4s, v4.4s\n"
- "add v18.4s, v18.4s, v4.4s\n"
- "add v19.4s, v19.4s, v4.4s\n"
"smin v8.4s, v8.4s, v6.4s\n"
"smin v9.4s, v9.4s, v6.4s\n"
"smin v10.4s, v10.4s, v6.4s\n"
- "smin v11.4s, v11.4s, v6.4s\n"
- "smin v12.4s, v12.4s, v6.4s\n"
- "smin v13.4s, v13.4s, v6.4s\n"
- "smin v14.4s, v14.4s, v6.4s\n"
- "smin v15.4s, v15.4s, v6.4s\n"
- "smin v16.4s, v16.4s, v6.4s\n"
- "smin v17.4s, v17.4s, v6.4s\n"
- "smin v18.4s, v18.4s, v6.4s\n"
- "smin v19.4s, v19.4s, v6.4s\n"
"smax v8.4s, v8.4s, v5.4s\n"
"smax v9.4s, v9.4s, v5.4s\n"
"smax v10.4s, v10.4s, v5.4s\n"
+ "add v11.4s, v11.4s, v4.4s\n"
+ "add v12.4s, v12.4s, v4.4s\n"
+ "add v13.4s, v13.4s, v4.4s\n"
+ "smin v11.4s, v11.4s, v6.4s\n"
+ "smin v12.4s, v12.4s, v6.4s\n"
+ "smin v13.4s, v13.4s, v6.4s\n"
"smax v11.4s, v11.4s, v5.4s\n"
"smax v12.4s, v12.4s, v5.4s\n"
"smax v13.4s, v13.4s, v5.4s\n"
+ "srshl v14.4s, v14.4s, v2.4s\n"
+ "srshl v15.4s, v15.4s, v3.4s\n"
+ "srshl v16.4s, v16.4s, v0.4s\n"
+ "srshl v17.4s, v17.4s, v1.4s\n"
+ "add v14.4s, v14.4s, v4.4s\n"
+ "add v15.4s, v15.4s, v4.4s\n"
+ "add v16.4s, v16.4s, v4.4s\n"
+ "smin v14.4s, v14.4s, v6.4s\n"
+ "smin v15.4s, v15.4s, v6.4s\n"
+ "smin v16.4s, v16.4s, v6.4s\n"
"smax v14.4s, v14.4s, v5.4s\n"
"smax v15.4s, v15.4s, v5.4s\n"
"smax v16.4s, v16.4s, v5.4s\n"
- "smax v17.4s, v17.4s, v5.4s\n"
- "smax v18.4s, v18.4s, v5.4s\n"
- "smax v19.4s, v19.4s, v5.4s\n"
+ "add v17.4s, v17.4s, v4.4s\n"
+ "srshl v18.4s, v18.4s, v2.4s\n"
+ "srshl v19.4s, v19.4s, v3.4s\n"
+ "smin v17.4s, v17.4s, v6.4s\n"
"uzp1 v8.8h, v8.8h, v9.8h\n"
+ "add v18.4s, v18.4s, v4.4s\n"
+ "smax v17.4s, v17.4s, v5.4s\n"
+ "add v19.4s, v19.4s, v4.4s\n"
+ "smin v18.4s, v18.4s, v6.4s\n"
"uzp1 v9.8h, v10.8h, v11.8h\n"
+ "smin v19.4s, v19.4s, v6.4s\n"
+ "smax v18.4s, v18.4s, v5.4s\n"
"uzp1 v12.8h, v12.8h, v13.8h\n"
+ "smax v19.4s, v19.4s, v5.4s\n"
"uzp1 v13.8h, v14.8h, v15.8h\n"
"uzp1 v16.8h, v16.8h, v17.8h\n"
"uzp1 v17.8h, v18.8h, v19.8h\n"
@@ -1207,96 +1207,96 @@ void a64_hybrid_s8qs_dot_6x16 (
"uzp1 v16.16b, v16.16b, v17.16b\n"
"bge 80f\n"
"tbz x10, #3, 75f\n"
- "str d8, [x11], #0x8\n"
- "str d12, [x24], #0x8\n"
- "str d16, [x23], #0x8\n"
+ "str d8, [x9], #0x8\n"
+ "str d12, [x23], #0x8\n"
+ "str d16, [x22], #0x8\n"
"tbz x10, #2, 73f\n"
- "st1 { v8.s }[2], [x11], #0x4\n"
- "st1 { v12.s }[2], [x24], #0x4\n"
- "st1 { v16.s }[2], [x23], #0x4\n"
+ "st1 { v8.s }[2], [x9], #0x4\n"
+ "st1 { v12.s }[2], [x23], #0x4\n"
+ "st1 { v16.s }[2], [x22], #0x4\n"
"tbz x10, #1, 72f\n"
- "st1 { v8.h }[6], [x11], #0x2\n"
- "st1 { v12.h }[6], [x24], #0x2\n"
- "st1 { v16.h }[6], [x23], #0x2\n"
+ "st1 { v8.h }[6], [x9], #0x2\n"
+ "st1 { v12.h }[6], [x23], #0x2\n"
+ "st1 { v16.h }[6], [x22], #0x2\n"
"tbz x10, #0, 79f\n"
- "st1 { v8.b }[14], [x11]\n"
- "st1 { v12.b }[14], [x24]\n"
- "st1 { v16.b }[14], [x23]\n"
+ "st1 { v8.b }[14], [x9]\n"
+ "st1 { v12.b }[14], [x23]\n"
+ "st1 { v16.b }[14], [x22]\n"
"b 79f\n"
"72:" // Height 3: Partial direct writeback: partial_1_12
"tbz x10, #0, 79f\n"
- "st1 { v8.b }[12], [x11]\n"
- "st1 { v12.b }[12], [x24]\n"
- "st1 { v16.b }[12], [x23]\n"
+ "st1 { v8.b }[12], [x9]\n"
+ "st1 { v12.b }[12], [x23]\n"
+ "st1 { v16.b }[12], [x22]\n"
"b 79f\n"
"73:" // Height 3: Partial direct writeback: partial_2_8
"tbz x10, #1, 74f\n"
- "st1 { v8.h }[4], [x11], #0x2\n"
- "st1 { v12.h }[4], [x24], #0x2\n"
- "st1 { v16.h }[4], [x23], #0x2\n"
+ "st1 { v8.h }[4], [x9], #0x2\n"
+ "st1 { v12.h }[4], [x23], #0x2\n"
+ "st1 { v16.h }[4], [x22], #0x2\n"
"tbz x10, #0, 79f\n"
- "st1 { v8.b }[10], [x11]\n"
- "st1 { v12.b }[10], [x24]\n"
- "st1 { v16.b }[10], [x23]\n"
+ "st1 { v8.b }[10], [x9]\n"
+ "st1 { v12.b }[10], [x23]\n"
+ "st1 { v16.b }[10], [x22]\n"
"b 79f\n"
"74:" // Height 3: Partial direct writeback: partial_1_8
"tbz x10, #0, 79f\n"
- "st1 { v8.b }[8], [x11]\n"
- "st1 { v12.b }[8], [x24]\n"
- "st1 { v16.b }[8], [x23]\n"
+ "st1 { v8.b }[8], [x9]\n"
+ "st1 { v12.b }[8], [x23]\n"
+ "st1 { v16.b }[8], [x22]\n"
"b 79f\n"
"75:" // Height 3: Partial direct writeback: partial_4_0
"tbz x10, #2, 77f\n"
- "str s8, [x11], #0x4\n"
- "str s12, [x24], #0x4\n"
- "str s16, [x23], #0x4\n"
+ "str s8, [x9], #0x4\n"
+ "str s12, [x23], #0x4\n"
+ "str s16, [x22], #0x4\n"
"tbz x10, #1, 76f\n"
- "st1 { v8.h }[2], [x11], #0x2\n"
- "st1 { v12.h }[2], [x24], #0x2\n"
- "st1 { v16.h }[2], [x23], #0x2\n"
+ "st1 { v8.h }[2], [x9], #0x2\n"
+ "st1 { v12.h }[2], [x23], #0x2\n"
+ "st1 { v16.h }[2], [x22], #0x2\n"
"tbz x10, #0, 79f\n"
- "st1 { v8.b }[6], [x11]\n"
- "st1 { v12.b }[6], [x24]\n"
- "st1 { v16.b }[6], [x23]\n"
+ "st1 { v8.b }[6], [x9]\n"
+ "st1 { v12.b }[6], [x23]\n"
+ "st1 { v16.b }[6], [x22]\n"
"b 79f\n"
"76:" // Height 3: Partial direct writeback: partial_1_4
"tbz x10, #0, 79f\n"
- "st1 { v8.b }[4], [x11]\n"
- "st1 { v12.b }[4], [x24]\n"
- "st1 { v16.b }[4], [x23]\n"
+ "st1 { v8.b }[4], [x9]\n"
+ "st1 { v12.b }[4], [x23]\n"
+ "st1 { v16.b }[4], [x22]\n"
"b 79f\n"
"77:" // Height 3: Partial direct writeback: partial_2_0
"tbz x10, #1, 78f\n"
- "str h8, [x11], #0x2\n"
- "str h12, [x24], #0x2\n"
- "str h16, [x23], #0x2\n"
+ "str h8, [x9], #0x2\n"
+ "str h12, [x23], #0x2\n"
+ "str h16, [x22], #0x2\n"
"tbz x10, #0, 79f\n"
- "st1 { v8.b }[2], [x11]\n"
- "st1 { v12.b }[2], [x24]\n"
- "st1 { v16.b }[2], [x23]\n"
+ "st1 { v8.b }[2], [x9]\n"
+ "st1 { v12.b }[2], [x23]\n"
+ "st1 { v16.b }[2], [x22]\n"
"b 79f\n"
"78:" // Height 3: Partial direct writeback: partial_1_0
- "str b8, [x11, #0x0]\n"
- "str b12, [x24, #0x0]\n"
- "str b16, [x23, #0x0]\n"
+ "str b8, [x9, #0x0]\n"
+ "str b12, [x23, #0x0]\n"
+ "str b16, [x22, #0x0]\n"
"79:" // Height 3: Partial direct writeback: Done
"b 81f\n"
"80:" // Height 3: Full writeback
- "str q8, [x11, #0x0]\n"
- "add x11, x11, #0x10\n"
- "str q12, [x24, #0x0]\n"
- "str q16, [x23, #0x0]\n"
+ "str q8, [x9, #0x0]\n"
+ "add x9, x9, #0x10\n"
+ "str q12, [x23, #0x0]\n"
+ "str q16, [x22, #0x0]\n"
"81:" // Height 3: Writeback done
"subs x10, x10, #0x10\n"
"bgt 56b\n"
"b 164f\n"
"82:" // Height 4
- "mov x14, %x[col_bias]\n"
"ldr x13, [%x[args_ptr], %[offsetof_multiplier_ptr]]\n"
+ "mov x11, %x[col_bias]\n"
"ldr x12, [%x[args_ptr], %[offsetof_shift_ptr]]\n"
- "mov x11, %x[output_ptr]\n"
+ "mov x9, %x[output_ptr]\n"
"ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"83:" // Height 4: Column loop
"movi v8.4s, #0x0\n"
"movi v9.4s, #0x0\n"
@@ -1315,219 +1315,219 @@ void a64_hybrid_s8qs_dot_6x16 (
"movi v22.4s, #0x0\n"
"movi v23.4s, #0x0\n"
"84:" // Height 4: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"85:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 86f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "cbnz x28, 87f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20\n"
- "add x25, x25, x20\n"
- "add x24, x24, x20\n"
- "add x23, x23, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "cbnz x27, 87f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
+ "add x24, x24, x19\n"
+ "add x23, x23, x19\n"
+ "add x22, x22, x19\n"
"b 87f\n"
"86:" // Height 4: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20\n"
- "add x24, x25, x20\n"
- "add x23, x24, x20\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19\n"
+ "add x23, x24, x19\n"
+ "add x22, x23, x19\n"
"87:" // Height 4: input setup done
- "cmp x27, #0x10\n"
+ "cmp x26, #0x10\n"
"blt 90f\n"
- "ldr q0, [x26, #0x0]\n"
- "ldr q1, [x25, #0x0]\n"
- "cmp x27, #0x20\n"
- "ldr q2, [x24, #0x0]\n"
- "ldr q3, [x23, #0x0]\n"
- "ldr q6, [x9, #0x0]\n"
- "ldr q7, [x9, #0x10]\n"
+ "ldr q0, [x25, #0x0]\n"
+ "ldr q1, [x24, #0x0]\n"
+ "cmp x26, #0x20\n"
+ "ldr q2, [x23, #0x0]\n"
+ "ldr q3, [x22, #0x0]\n"
+ "ldr q6, [x28, #0x0]\n"
"blt 89f\n"
"88:" // Height 4: Multiply loop: Main loop head
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
+ "ldr q7, [x28, #0x10]\n"
+ "add x25, x25, #0x10\n"
".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n"
- "sub x27, x27, #0x10\n"
- "add x26, x26, #0x10\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "add x24, x24, #0x10\n"
".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "add x23, x23, #0x10\n"
".inst 0x4f83e0d4 // sdot v20.4s, v6.16b, v3.4b[0]\n"
- "ldr q6, [x9, #0x20]\n"
- "add x25, x25, #0x10\n"
+ "ldr q6, [x28, #0x20]\n"
+ "add x22, x22, #0x10\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
+ "sub x26, x26, #0x10\n"
".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n"
- "add x24, x24, #0x10\n"
- "add x23, x23, #0x10\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
+ "cmp x26, #0x20\n"
".inst 0x4f82e0f1 // sdot v17.4s, v7.16b, v2.4b[0]\n"
".inst 0x4f83e0f5 // sdot v21.4s, v7.16b, v3.4b[0]\n"
- "ldr q7, [x9, #0x30]\n"
- "cmp x27, #0x20\n"
+ "ldr q7, [x28, #0x30]\n"
".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n"
".inst 0x4f81e0ce // sdot v14.4s, v6.16b, v1.4b[0]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x4f82e0d2 // sdot v18.4s, v6.16b, v2.4b[0]\n"
".inst 0x4f83e0d6 // sdot v22.4s, v6.16b, v3.4b[0]\n"
- "ldr q6, [x9, #0x40]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
+ "ldr q6, [x28, #0x40]\n"
".inst 0x4f80e0eb // sdot v11.4s, v7.16b, v0.4b[0]\n"
".inst 0x4f81e0ef // sdot v15.4s, v7.16b, v1.4b[0]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x4f82e0f3 // sdot v19.4s, v7.16b, v2.4b[0]\n"
".inst 0x4f83e0f7 // sdot v23.4s, v7.16b, v3.4b[0]\n"
- "ldr q7, [x9, #0x50]\n"
+ "ldr q7, [x28, #0x50]\n"
".inst 0x4fa0e0c8 // sdot v8.4s, v6.16b, v0.4b[1]\n"
".inst 0x4fa1e0cc // sdot v12.4s, v6.16b, v1.4b[1]\n"
".inst 0x4fa2e0d0 // sdot v16.4s, v6.16b, v2.4b[1]\n"
".inst 0x4fa3e0d4 // sdot v20.4s, v6.16b, v3.4b[1]\n"
- "ldr q6, [x9, #0x60]\n"
+ "ldr q6, [x28, #0x60]\n"
".inst 0x4fa0e0e9 // sdot v9.4s, v7.16b, v0.4b[1]\n"
".inst 0x4fa1e0ed // sdot v13.4s, v7.16b, v1.4b[1]\n"
".inst 0x4fa2e0f1 // sdot v17.4s, v7.16b, v2.4b[1]\n"
".inst 0x4fa3e0f5 // sdot v21.4s, v7.16b, v3.4b[1]\n"
- "ldr q7, [x9, #0x70]\n"
+ "ldr q7, [x28, #0x70]\n"
".inst 0x4fa0e0ca // sdot v10.4s, v6.16b, v0.4b[1]\n"
".inst 0x4fa1e0ce // sdot v14.4s, v6.16b, v1.4b[1]\n"
".inst 0x4fa2e0d2 // sdot v18.4s, v6.16b, v2.4b[1]\n"
".inst 0x4fa3e0d6 // sdot v22.4s, v6.16b, v3.4b[1]\n"
- "ldr q6, [x9, #0x80]\n"
+ "ldr q6, [x28, #0x80]\n"
".inst 0x4fa0e0eb // sdot v11.4s, v7.16b, v0.4b[1]\n"
".inst 0x4fa1e0ef // sdot v15.4s, v7.16b, v1.4b[1]\n"
".inst 0x4fa2e0f3 // sdot v19.4s, v7.16b, v2.4b[1]\n"
".inst 0x4fa3e0f7 // sdot v23.4s, v7.16b, v3.4b[1]\n"
- "ldr q7, [x9, #0x90]\n"
+ "ldr q7, [x28, #0x90]\n"
".inst 0x4f80e8c8 // sdot v8.4s, v6.16b, v0.4b[2]\n"
".inst 0x4f81e8cc // sdot v12.4s, v6.16b, v1.4b[2]\n"
".inst 0x4f82e8d0 // sdot v16.4s, v6.16b, v2.4b[2]\n"
".inst 0x4f83e8d4 // sdot v20.4s, v6.16b, v3.4b[2]\n"
- "ldr q6, [x9, #0xa0]\n"
+ "ldr q6, [x28, #0xa0]\n"
".inst 0x4f80e8e9 // sdot v9.4s, v7.16b, v0.4b[2]\n"
".inst 0x4f81e8ed // sdot v13.4s, v7.16b, v1.4b[2]\n"
".inst 0x4f82e8f1 // sdot v17.4s, v7.16b, v2.4b[2]\n"
".inst 0x4f83e8f5 // sdot v21.4s, v7.16b, v3.4b[2]\n"
- "ldr q7, [x9, #0xb0]\n"
+ "ldr q7, [x28, #0xb0]\n"
".inst 0x4f80e8ca // sdot v10.4s, v6.16b, v0.4b[2]\n"
".inst 0x4f81e8ce // sdot v14.4s, v6.16b, v1.4b[2]\n"
".inst 0x4f82e8d2 // sdot v18.4s, v6.16b, v2.4b[2]\n"
".inst 0x4f83e8d6 // sdot v22.4s, v6.16b, v3.4b[2]\n"
- "ldr q6, [x9, #0xc0]\n"
+ "ldr q6, [x28, #0xc0]\n"
".inst 0x4f80e8eb // sdot v11.4s, v7.16b, v0.4b[2]\n"
".inst 0x4f81e8ef // sdot v15.4s, v7.16b, v1.4b[2]\n"
".inst 0x4f82e8f3 // sdot v19.4s, v7.16b, v2.4b[2]\n"
".inst 0x4f83e8f7 // sdot v23.4s, v7.16b, v3.4b[2]\n"
- "ldr q7, [x9, #0xd0]\n"
+ "ldr q7, [x28, #0xd0]\n"
".inst 0x4fa0e8c8 // sdot v8.4s, v6.16b, v0.4b[3]\n"
".inst 0x4fa1e8cc // sdot v12.4s, v6.16b, v1.4b[3]\n"
".inst 0x4fa2e8d0 // sdot v16.4s, v6.16b, v2.4b[3]\n"
".inst 0x4fa3e8d4 // sdot v20.4s, v6.16b, v3.4b[3]\n"
- "ldr q6, [x9, #0xe0]\n"
+ "ldr q6, [x28, #0xe0]\n"
".inst 0x4fa0e8e9 // sdot v9.4s, v7.16b, v0.4b[3]\n"
".inst 0x4fa1e8ed // sdot v13.4s, v7.16b, v1.4b[3]\n"
".inst 0x4fa2e8f1 // sdot v17.4s, v7.16b, v2.4b[3]\n"
".inst 0x4fa3e8f5 // sdot v21.4s, v7.16b, v3.4b[3]\n"
- "ldr q7, [x9, #0xf0]\n"
- "add x9, x9, #0x100\n"
+ "ldr q7, [x28, #0xf0]\n"
+ "add x28, x28, #0x100\n"
".inst 0x4fa0e8ca // sdot v10.4s, v6.16b, v0.4b[3]\n"
".inst 0x4fa1e8ce // sdot v14.4s, v6.16b, v1.4b[3]\n"
".inst 0x4fa2e8d2 // sdot v18.4s, v6.16b, v2.4b[3]\n"
".inst 0x4fa3e8d6 // sdot v22.4s, v6.16b, v3.4b[3]\n"
- "ldr q6, [x9, #0x0]\n"
+ "ldr q6, [x28, #0x0]\n"
".inst 0x4fa0e8eb // sdot v11.4s, v7.16b, v0.4b[3]\n"
- "ldr q0, [x26, #0x0]\n"
+ "ldr q0, [x25, #0x0]\n"
".inst 0x4fa1e8ef // sdot v15.4s, v7.16b, v1.4b[3]\n"
- "ldr q1, [x25, #0x0]\n"
+ "ldr q1, [x24, #0x0]\n"
".inst 0x4fa2e8f3 // sdot v19.4s, v7.16b, v2.4b[3]\n"
- "ldr q2, [x24, #0x0]\n"
+ "ldr q2, [x23, #0x0]\n"
".inst 0x4fa3e8f7 // sdot v23.4s, v7.16b, v3.4b[3]\n"
- "ldr q3, [x23, #0x0]\n"
- "ldr q7, [x9, #0x10]\n"
+ "ldr q3, [x22, #0x0]\n"
"bge 88b\n"
"89:" // Height 4: Multiply loop: Single iteration only
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
+ "ldr q7, [x28, #0x10]\n"
+ "sub x26, x26, #0x10\n"
".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n"
- "add x26, x26, #0x10\n"
"add x25, x25, #0x10\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n"
- ".inst 0x4f83e0d4 // sdot v20.4s, v6.16b, v3.4b[0]\n"
- "ldr q6, [x9, #0x20]\n"
"add x24, x24, #0x10\n"
+ ".inst 0x4f83e0d4 // sdot v20.4s, v6.16b, v3.4b[0]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "add x23, x23, #0x10\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
+ "ldr q6, [x28, #0x20]\n"
+ "add x22, x22, #0x10\n"
".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n"
- "add x23, x23, #0x10\n"
- "sub x27, x27, #0x10\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x4f82e0f1 // sdot v17.4s, v7.16b, v2.4b[0]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x4f83e0f5 // sdot v21.4s, v7.16b, v3.4b[0]\n"
- "ldr q7, [x9, #0x30]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
+ "ldr q7, [x28, #0x30]\n"
".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n"
".inst 0x4f81e0ce // sdot v14.4s, v6.16b, v1.4b[0]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x4f82e0d2 // sdot v18.4s, v6.16b, v2.4b[0]\n"
".inst 0x4f83e0d6 // sdot v22.4s, v6.16b, v3.4b[0]\n"
- "ldr q6, [x9, #0x40]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
+ "ldr q6, [x28, #0x40]\n"
".inst 0x4f80e0eb // sdot v11.4s, v7.16b, v0.4b[0]\n"
".inst 0x4f81e0ef // sdot v15.4s, v7.16b, v1.4b[0]\n"
".inst 0x4f82e0f3 // sdot v19.4s, v7.16b, v2.4b[0]\n"
".inst 0x4f83e0f7 // sdot v23.4s, v7.16b, v3.4b[0]\n"
- "ldr q7, [x9, #0x50]\n"
+ "ldr q7, [x28, #0x50]\n"
".inst 0x4fa0e0c8 // sdot v8.4s, v6.16b, v0.4b[1]\n"
".inst 0x4fa1e0cc // sdot v12.4s, v6.16b, v1.4b[1]\n"
".inst 0x4fa2e0d0 // sdot v16.4s, v6.16b, v2.4b[1]\n"
".inst 0x4fa3e0d4 // sdot v20.4s, v6.16b, v3.4b[1]\n"
- "ldr q6, [x9, #0x60]\n"
+ "ldr q6, [x28, #0x60]\n"
".inst 0x4fa0e0e9 // sdot v9.4s, v7.16b, v0.4b[1]\n"
".inst 0x4fa1e0ed // sdot v13.4s, v7.16b, v1.4b[1]\n"
".inst 0x4fa2e0f1 // sdot v17.4s, v7.16b, v2.4b[1]\n"
".inst 0x4fa3e0f5 // sdot v21.4s, v7.16b, v3.4b[1]\n"
- "ldr q7, [x9, #0x70]\n"
+ "ldr q7, [x28, #0x70]\n"
".inst 0x4fa0e0ca // sdot v10.4s, v6.16b, v0.4b[1]\n"
".inst 0x4fa1e0ce // sdot v14.4s, v6.16b, v1.4b[1]\n"
".inst 0x4fa2e0d2 // sdot v18.4s, v6.16b, v2.4b[1]\n"
".inst 0x4fa3e0d6 // sdot v22.4s, v6.16b, v3.4b[1]\n"
- "ldr q6, [x9, #0x80]\n"
+ "ldr q6, [x28, #0x80]\n"
".inst 0x4fa0e0eb // sdot v11.4s, v7.16b, v0.4b[1]\n"
".inst 0x4fa1e0ef // sdot v15.4s, v7.16b, v1.4b[1]\n"
".inst 0x4fa2e0f3 // sdot v19.4s, v7.16b, v2.4b[1]\n"
".inst 0x4fa3e0f7 // sdot v23.4s, v7.16b, v3.4b[1]\n"
- "ldr q7, [x9, #0x90]\n"
+ "ldr q7, [x28, #0x90]\n"
".inst 0x4f80e8c8 // sdot v8.4s, v6.16b, v0.4b[2]\n"
".inst 0x4f81e8cc // sdot v12.4s, v6.16b, v1.4b[2]\n"
".inst 0x4f82e8d0 // sdot v16.4s, v6.16b, v2.4b[2]\n"
".inst 0x4f83e8d4 // sdot v20.4s, v6.16b, v3.4b[2]\n"
- "ldr q6, [x9, #0xa0]\n"
+ "ldr q6, [x28, #0xa0]\n"
".inst 0x4f80e8e9 // sdot v9.4s, v7.16b, v0.4b[2]\n"
".inst 0x4f81e8ed // sdot v13.4s, v7.16b, v1.4b[2]\n"
".inst 0x4f82e8f1 // sdot v17.4s, v7.16b, v2.4b[2]\n"
".inst 0x4f83e8f5 // sdot v21.4s, v7.16b, v3.4b[2]\n"
- "ldr q7, [x9, #0xb0]\n"
+ "ldr q7, [x28, #0xb0]\n"
".inst 0x4f80e8ca // sdot v10.4s, v6.16b, v0.4b[2]\n"
".inst 0x4f81e8ce // sdot v14.4s, v6.16b, v1.4b[2]\n"
".inst 0x4f82e8d2 // sdot v18.4s, v6.16b, v2.4b[2]\n"
".inst 0x4f83e8d6 // sdot v22.4s, v6.16b, v3.4b[2]\n"
- "ldr q6, [x9, #0xc0]\n"
+ "ldr q6, [x28, #0xc0]\n"
".inst 0x4f80e8eb // sdot v11.4s, v7.16b, v0.4b[2]\n"
".inst 0x4f81e8ef // sdot v15.4s, v7.16b, v1.4b[2]\n"
".inst 0x4f82e8f3 // sdot v19.4s, v7.16b, v2.4b[2]\n"
".inst 0x4f83e8f7 // sdot v23.4s, v7.16b, v3.4b[2]\n"
- "ldr q7, [x9, #0xd0]\n"
+ "ldr q7, [x28, #0xd0]\n"
".inst 0x4fa0e8c8 // sdot v8.4s, v6.16b, v0.4b[3]\n"
".inst 0x4fa1e8cc // sdot v12.4s, v6.16b, v1.4b[3]\n"
".inst 0x4fa2e8d0 // sdot v16.4s, v6.16b, v2.4b[3]\n"
".inst 0x4fa3e8d4 // sdot v20.4s, v6.16b, v3.4b[3]\n"
- "ldr q6, [x9, #0xe0]\n"
+ "ldr q6, [x28, #0xe0]\n"
".inst 0x4fa0e8e9 // sdot v9.4s, v7.16b, v0.4b[3]\n"
".inst 0x4fa1e8ed // sdot v13.4s, v7.16b, v1.4b[3]\n"
".inst 0x4fa2e8f1 // sdot v17.4s, v7.16b, v2.4b[3]\n"
".inst 0x4fa3e8f5 // sdot v21.4s, v7.16b, v3.4b[3]\n"
- "ldr q7, [x9, #0xf0]\n"
- "add x9, x9, #0x100\n"
+ "ldr q7, [x28, #0xf0]\n"
+ "add x28, x28, #0x100\n"
".inst 0x4fa0e8ca // sdot v10.4s, v6.16b, v0.4b[3]\n"
".inst 0x4fa1e8ce // sdot v14.4s, v6.16b, v1.4b[3]\n"
".inst 0x4fa2e8d2 // sdot v18.4s, v6.16b, v2.4b[3]\n"
@@ -1537,29 +1537,29 @@ void a64_hybrid_s8qs_dot_6x16 (
".inst 0x4fa2e8f3 // sdot v19.4s, v7.16b, v2.4b[3]\n"
".inst 0x4fa3e8f7 // sdot v23.4s, v7.16b, v3.4b[3]\n"
"90:" // Height 4: Multiply loop: Main loop skip
- "cbz x27, 95f\n"
- "cmp x27, #0x4\n"
+ "cbz x26, 95f\n"
+ "cmp x26, #0x4\n"
"blt 92f\n"
"91:" // Height 4: Multiply loop: Odd block loop
- "ldr s0, [x26], #0x4\n"
- "ldr s1, [x25], #0x4\n"
- "sub x27, x27, #0x4\n"
- "cmp x27, #0x4\n"
- "ldr s2, [x24], #0x4\n"
- "ldr s3, [x23], #0x4\n"
- "ldr q6, [x9, #0x0]\n"
- "ldr q7, [x9, #0x10]\n"
+ "ldr s0, [x25], #0x4\n"
+ "sub x26, x26, #0x4\n"
+ "ldr s1, [x24], #0x4\n"
+ "cmp x26, #0x4\n"
+ "ldr s2, [x23], #0x4\n"
+ "ldr s3, [x22], #0x4\n"
+ "ldr q6, [x28, #0x0]\n"
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
+ "ldr q7, [x28, #0x10]\n"
".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n"
".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n"
".inst 0x4f83e0d4 // sdot v20.4s, v6.16b, v3.4b[0]\n"
- "ldr q6, [x9, #0x20]\n"
+ "ldr q6, [x28, #0x20]\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n"
".inst 0x4f82e0f1 // sdot v17.4s, v7.16b, v2.4b[0]\n"
".inst 0x4f83e0f5 // sdot v21.4s, v7.16b, v3.4b[0]\n"
- "ldr q7, [x9, #0x30]\n"
- "add x9, x9, #0x40\n"
+ "ldr q7, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n"
".inst 0x4f81e0ce // sdot v14.4s, v6.16b, v1.4b[0]\n"
".inst 0x4f82e0d2 // sdot v18.4s, v6.16b, v2.4b[0]\n"
@@ -1569,38 +1569,38 @@ void a64_hybrid_s8qs_dot_6x16 (
".inst 0x4f82e0f3 // sdot v19.4s, v7.16b, v2.4b[0]\n"
".inst 0x4f83e0f7 // sdot v23.4s, v7.16b, v3.4b[0]\n"
"bge 91b\n"
+ "cbz x26, 95f\n"
"92:" // Height 4: Multiply loop: Skip odd blocks
- "cbz x27, 95f\n"
- "tbz x27, #1, 93f\n"
- "ldr h0, [x26], #0x2\n"
- "ldr h1, [x25], #0x2\n"
- "ldr h2, [x24], #0x2\n"
- "ldr h3, [x23], #0x2\n"
- "tbz x27, #0, 94f\n"
- "ld1 { v0.b }[2], [x26]\n"
- "ld1 { v1.b }[2], [x25]\n"
- "ld1 { v2.b }[2], [x24]\n"
- "ld1 { v3.b }[2], [x23]\n"
+ "tbz x26, #1, 93f\n"
+ "ldr h0, [x25], #0x2\n"
+ "ldr h1, [x24], #0x2\n"
+ "ldr h2, [x23], #0x2\n"
+ "ldr h3, [x22], #0x2\n"
+ "tbz x26, #0, 94f\n"
+ "ld1 { v0.b }[2], [x25]\n"
+ "ld1 { v1.b }[2], [x24]\n"
+ "ld1 { v2.b }[2], [x23]\n"
+ "ld1 { v3.b }[2], [x22]\n"
"b 94f\n"
"93:" // Height 4: Multiply loop: Ragged operand read: partial_1_0
- "ldr b0, [x26, #0x0]\n"
- "ldr b1, [x25, #0x0]\n"
- "ldr b2, [x24, #0x0]\n"
- "ldr b3, [x23, #0x0]\n"
+ "ldr b0, [x25, #0x0]\n"
+ "ldr b1, [x24, #0x0]\n"
+ "ldr b2, [x23, #0x0]\n"
+ "ldr b3, [x22, #0x0]\n"
"94:" // Height 4: Multiply loop: Ragged operand read: Done
- "ldr q6, [x9, #0x0]\n"
- "ldr q7, [x9, #0x10]\n"
+ "ldr q6, [x28, #0x0]\n"
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
+ "ldr q7, [x28, #0x10]\n"
".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n"
".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n"
".inst 0x4f83e0d4 // sdot v20.4s, v6.16b, v3.4b[0]\n"
- "ldr q6, [x9, #0x20]\n"
+ "ldr q6, [x28, #0x20]\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n"
".inst 0x4f82e0f1 // sdot v17.4s, v7.16b, v2.4b[0]\n"
".inst 0x4f83e0f5 // sdot v21.4s, v7.16b, v3.4b[0]\n"
- "ldr q7, [x9, #0x30]\n"
- "add x9, x9, #0x40\n"
+ "ldr q7, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n"
".inst 0x4f81e0ce // sdot v14.4s, v6.16b, v1.4b[0]\n"
".inst 0x4f82e0d2 // sdot v18.4s, v6.16b, v2.4b[0]\n"
@@ -1610,36 +1610,36 @@ void a64_hybrid_s8qs_dot_6x16 (
".inst 0x4f82e0f3 // sdot v19.4s, v7.16b, v2.4b[0]\n"
".inst 0x4f83e0f7 // sdot v23.4s, v7.16b, v3.4b[0]\n"
"95:" // Height 4: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 85b\n"
- "ldr q0, [x14, #0x0]\n"
- "ldr q1, [x14, #0x10]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "prfm pstl1keep, [x9, #0x0]\n"
+ "add x23, x9, x19\n"
+ "ldr q0, [x11, #0x0]\n"
"add v8.4s, v8.4s, v0.4s\n"
- "add v9.4s, v9.4s, v1.4s\n"
- "ldr q2, [x14, #0x20]\n"
- "ldr q3, [x14, #0x30]\n"
- "add v10.4s, v10.4s, v2.4s\n"
- "add v11.4s, v11.4s, v3.4s\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x11, x20\n"
- "add x23, x24, x20\n"
- "prfm pstl1keep, [x11, #0x0]\n"
- "add x22, x23, x20\n"
- "prfm pstl1keep, [x24, #0x0]\n"
"prfm pstl1keep, [x23, #0x0]\n"
+ "add x22, x23, x19\n"
"add v12.4s, v12.4s, v0.4s\n"
"prfm pstl1keep, [x22, #0x0]\n"
+ "add x21, x22, x19\n"
+ "add v16.4s, v16.4s, v0.4s\n"
+ "prfm pstl1keep, [x21, #0x0]\n"
+ "add v20.4s, v20.4s, v0.4s\n"
+ "ldr q1, [x11, #0x10]\n"
+ "ldr q2, [x11, #0x20]\n"
+ "add v9.4s, v9.4s, v1.4s\n"
+ "ldr q3, [x11, #0x30]\n"
+ "add x11, x11, #0x40\n"
+ "add v10.4s, v10.4s, v2.4s\n"
"add v13.4s, v13.4s, v1.4s\n"
"add v14.4s, v14.4s, v2.4s\n"
- "add x14, x14, #0x40\n"
- "add v15.4s, v15.4s, v3.4s\n"
- "add v16.4s, v16.4s, v0.4s\n"
"add v17.4s, v17.4s, v1.4s\n"
+ "add v11.4s, v11.4s, v3.4s\n"
+ "add v15.4s, v15.4s, v3.4s\n"
"add v18.4s, v18.4s, v2.4s\n"
"add v19.4s, v19.4s, v3.4s\n"
- "add v20.4s, v20.4s, v0.4s\n"
"add v21.4s, v21.4s, v1.4s\n"
"add v22.4s, v22.4s, v2.4s\n"
"add v23.4s, v23.4s, v3.4s\n"
@@ -1651,20 +1651,20 @@ void a64_hybrid_s8qs_dot_6x16 (
"ldr q2, [x12, #0x20]\n"
"ldr q6, [x13, #0x20]\n"
"ldr q3, [x12, #0x30]\n"
- "ldr q7, [x13, #0x30]\n"
"add x12, x12, #0x40\n"
+ "ldr q7, [x13, #0x30]\n"
"add x13, x13, #0x40\n"
"b 97f\n"
"96:" // Height 4: per layer parameters
- "add x25, %x[qp], %[per_layer_right_shift]\n"
- "ld1r { v0.4s }, [x25]\n"
- "add x25, %x[qp], %[per_layer_mul]\n"
- "ld1r { v4.4s }, [x25]\n"
+ "add x24, %x[qp], %[per_layer_right_shift]\n"
+ "ld1r { v0.4s }, [x24]\n"
"mov v1.16b, v0.16b\n"
- "mov v5.16b, v4.16b\n"
+ "add x24, %x[qp], %[per_layer_mul]\n"
+ "ld1r { v4.4s }, [x24]\n"
"mov v2.16b, v0.16b\n"
- "mov v6.16b, v4.16b\n"
"mov v3.16b, v0.16b\n"
+ "mov v5.16b, v4.16b\n"
+ "mov v6.16b, v4.16b\n"
"mov v7.16b, v4.16b\n"
"97:" // Height 4: parameters loaded
"sqrdmulh v8.4s, v8.4s, v4.4s\n"
@@ -1687,125 +1687,125 @@ void a64_hybrid_s8qs_dot_6x16 (
"and v4.16b, v8.16b, v0.16b\n"
"and v5.16b, v9.16b, v1.16b\n"
"and v6.16b, v10.16b, v2.16b\n"
- "and v7.16b, v11.16b, v3.16b\n"
"sshr v4.4s, v4.4s, #0x1f\n"
"sshr v5.4s, v5.4s, #0x1f\n"
"sshr v6.4s, v6.4s, #0x1f\n"
- "sshr v7.4s, v7.4s, #0x1f\n"
"sqadd v8.4s, v8.4s, v4.4s\n"
"sqadd v9.4s, v9.4s, v5.4s\n"
"sqadd v10.4s, v10.4s, v6.4s\n"
- "sqadd v11.4s, v11.4s, v7.4s\n"
+ "and v7.16b, v11.16b, v3.16b\n"
"and v4.16b, v12.16b, v0.16b\n"
"and v5.16b, v13.16b, v1.16b\n"
- "and v6.16b, v14.16b, v2.16b\n"
- "and v7.16b, v15.16b, v3.16b\n"
+ "sshr v7.4s, v7.4s, #0x1f\n"
"sshr v4.4s, v4.4s, #0x1f\n"
"sshr v5.4s, v5.4s, #0x1f\n"
- "sshr v6.4s, v6.4s, #0x1f\n"
- "sshr v7.4s, v7.4s, #0x1f\n"
+ "sqadd v11.4s, v11.4s, v7.4s\n"
"sqadd v12.4s, v12.4s, v4.4s\n"
"sqadd v13.4s, v13.4s, v5.4s\n"
+ "and v6.16b, v14.16b, v2.16b\n"
+ "and v7.16b, v15.16b, v3.16b\n"
+ "and v4.16b, v16.16b, v0.16b\n"
+ "sshr v6.4s, v6.4s, #0x1f\n"
+ "sshr v7.4s, v7.4s, #0x1f\n"
+ "sshr v4.4s, v4.4s, #0x1f\n"
"sqadd v14.4s, v14.4s, v6.4s\n"
"sqadd v15.4s, v15.4s, v7.4s\n"
- "and v4.16b, v16.16b, v0.16b\n"
+ "sqadd v16.4s, v16.4s, v4.4s\n"
"and v5.16b, v17.16b, v1.16b\n"
"and v6.16b, v18.16b, v2.16b\n"
"and v7.16b, v19.16b, v3.16b\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
"sshr v5.4s, v5.4s, #0x1f\n"
"sshr v6.4s, v6.4s, #0x1f\n"
"sshr v7.4s, v7.4s, #0x1f\n"
- "sqadd v16.4s, v16.4s, v4.4s\n"
"sqadd v17.4s, v17.4s, v5.4s\n"
"sqadd v18.4s, v18.4s, v6.4s\n"
"sqadd v19.4s, v19.4s, v7.4s\n"
"and v4.16b, v20.16b, v0.16b\n"
"and v5.16b, v21.16b, v1.16b\n"
"and v6.16b, v22.16b, v2.16b\n"
- "and v7.16b, v23.16b, v3.16b\n"
"sshr v4.4s, v4.4s, #0x1f\n"
"sshr v5.4s, v5.4s, #0x1f\n"
"sshr v6.4s, v6.4s, #0x1f\n"
- "sshr v7.4s, v7.4s, #0x1f\n"
"sqadd v20.4s, v20.4s, v4.4s\n"
"sqadd v21.4s, v21.4s, v5.4s\n"
"sqadd v22.4s, v22.4s, v6.4s\n"
+ "and v7.16b, v23.16b, v3.16b\n"
+ "sshr v7.4s, v7.4s, #0x1f\n"
"sqadd v23.4s, v23.4s, v7.4s\n"
"98:" // Height 4: no shift correction
- "add x25, %x[qp], %[c_offset]\n"
- "ld1r { v4.4s }, [x25]\n"
"srshl v8.4s, v8.4s, v0.4s\n"
+ "add x24, %x[qp], %[c_offset]\n"
+ "ld1r { v4.4s }, [x24]\n"
"srshl v9.4s, v9.4s, v1.4s\n"
+ "add x24, %x[qp], %[minval]\n"
"srshl v10.4s, v10.4s, v2.4s\n"
+ "ld1r { v5.4s }, [x24]\n"
+ "add x24, %x[qp], %[maxval]\n"
"srshl v11.4s, v11.4s, v3.4s\n"
- "add x25, %x[qp], %[maxval]\n"
- "ld1r { v6.4s }, [x25]\n"
+ "ld1r { v6.4s }, [x24]\n"
+ "cmp x10, #0x10\n"
"srshl v12.4s, v12.4s, v0.4s\n"
"srshl v13.4s, v13.4s, v1.4s\n"
- "add x25, %x[qp], %[minval]\n"
- "ld1r { v5.4s }, [x25]\n"
- "srshl v14.4s, v14.4s, v2.4s\n"
- "srshl v15.4s, v15.4s, v3.4s\n"
- "cmp x10, #0x10\n"
- "srshl v16.4s, v16.4s, v0.4s\n"
- "srshl v17.4s, v17.4s, v1.4s\n"
- "srshl v18.4s, v18.4s, v2.4s\n"
- "srshl v19.4s, v19.4s, v3.4s\n"
- "srshl v20.4s, v20.4s, v0.4s\n"
- "srshl v21.4s, v21.4s, v1.4s\n"
- "srshl v22.4s, v22.4s, v2.4s\n"
- "srshl v23.4s, v23.4s, v3.4s\n"
"add v8.4s, v8.4s, v4.4s\n"
"add v9.4s, v9.4s, v4.4s\n"
"add v10.4s, v10.4s, v4.4s\n"
- "add v11.4s, v11.4s, v4.4s\n"
- "add v12.4s, v12.4s, v4.4s\n"
- "add v13.4s, v13.4s, v4.4s\n"
- "add v14.4s, v14.4s, v4.4s\n"
- "add v15.4s, v15.4s, v4.4s\n"
- "add v16.4s, v16.4s, v4.4s\n"
- "add v17.4s, v17.4s, v4.4s\n"
- "add v18.4s, v18.4s, v4.4s\n"
- "add v19.4s, v19.4s, v4.4s\n"
- "add v20.4s, v20.4s, v4.4s\n"
- "add v21.4s, v21.4s, v4.4s\n"
- "add v22.4s, v22.4s, v4.4s\n"
- "add v23.4s, v23.4s, v4.4s\n"
"smin v8.4s, v8.4s, v6.4s\n"
"smin v9.4s, v9.4s, v6.4s\n"
"smin v10.4s, v10.4s, v6.4s\n"
- "smin v11.4s, v11.4s, v6.4s\n"
- "smin v12.4s, v12.4s, v6.4s\n"
- "smin v13.4s, v13.4s, v6.4s\n"
- "smin v14.4s, v14.4s, v6.4s\n"
- "smin v15.4s, v15.4s, v6.4s\n"
- "smin v16.4s, v16.4s, v6.4s\n"
- "smin v17.4s, v17.4s, v6.4s\n"
- "smin v18.4s, v18.4s, v6.4s\n"
- "smin v19.4s, v19.4s, v6.4s\n"
- "smin v20.4s, v20.4s, v6.4s\n"
- "smin v21.4s, v21.4s, v6.4s\n"
- "smin v22.4s, v22.4s, v6.4s\n"
- "smin v23.4s, v23.4s, v6.4s\n"
"smax v8.4s, v8.4s, v5.4s\n"
"smax v9.4s, v9.4s, v5.4s\n"
"smax v10.4s, v10.4s, v5.4s\n"
+ "add v11.4s, v11.4s, v4.4s\n"
+ "add v12.4s, v12.4s, v4.4s\n"
+ "add v13.4s, v13.4s, v4.4s\n"
+ "smin v11.4s, v11.4s, v6.4s\n"
+ "smin v12.4s, v12.4s, v6.4s\n"
+ "smin v13.4s, v13.4s, v6.4s\n"
"smax v11.4s, v11.4s, v5.4s\n"
"smax v12.4s, v12.4s, v5.4s\n"
"smax v13.4s, v13.4s, v5.4s\n"
+ "srshl v14.4s, v14.4s, v2.4s\n"
+ "srshl v15.4s, v15.4s, v3.4s\n"
+ "srshl v16.4s, v16.4s, v0.4s\n"
+ "srshl v17.4s, v17.4s, v1.4s\n"
+ "add v14.4s, v14.4s, v4.4s\n"
+ "add v15.4s, v15.4s, v4.4s\n"
+ "add v16.4s, v16.4s, v4.4s\n"
+ "smin v14.4s, v14.4s, v6.4s\n"
+ "smin v15.4s, v15.4s, v6.4s\n"
+ "smin v16.4s, v16.4s, v6.4s\n"
"smax v14.4s, v14.4s, v5.4s\n"
"smax v15.4s, v15.4s, v5.4s\n"
"smax v16.4s, v16.4s, v5.4s\n"
+ "add v17.4s, v17.4s, v4.4s\n"
+ "srshl v18.4s, v18.4s, v2.4s\n"
+ "srshl v19.4s, v19.4s, v3.4s\n"
+ "smin v17.4s, v17.4s, v6.4s\n"
+ "srshl v20.4s, v20.4s, v0.4s\n"
+ "add v18.4s, v18.4s, v4.4s\n"
"smax v17.4s, v17.4s, v5.4s\n"
+ "add v19.4s, v19.4s, v4.4s\n"
+ "smin v18.4s, v18.4s, v6.4s\n"
+ "add v20.4s, v20.4s, v4.4s\n"
+ "smin v19.4s, v19.4s, v6.4s\n"
"smax v18.4s, v18.4s, v5.4s\n"
+ "smin v20.4s, v20.4s, v6.4s\n"
"smax v19.4s, v19.4s, v5.4s\n"
+ "srshl v21.4s, v21.4s, v1.4s\n"
"smax v20.4s, v20.4s, v5.4s\n"
+ "srshl v22.4s, v22.4s, v2.4s\n"
+ "srshl v23.4s, v23.4s, v3.4s\n"
+ "add v21.4s, v21.4s, v4.4s\n"
+ "uzp1 v8.8h, v8.8h, v9.8h\n"
+ "add v22.4s, v22.4s, v4.4s\n"
+ "smin v21.4s, v21.4s, v6.4s\n"
+ "add v23.4s, v23.4s, v4.4s\n"
+ "smin v22.4s, v22.4s, v6.4s\n"
"smax v21.4s, v21.4s, v5.4s\n"
+ "smin v23.4s, v23.4s, v6.4s\n"
"smax v22.4s, v22.4s, v5.4s\n"
- "smax v23.4s, v23.4s, v5.4s\n"
- "uzp1 v8.8h, v8.8h, v9.8h\n"
"uzp1 v9.8h, v10.8h, v11.8h\n"
+ "smax v23.4s, v23.4s, v5.4s\n"
"uzp1 v12.8h, v12.8h, v13.8h\n"
"uzp1 v13.8h, v14.8h, v15.8h\n"
"uzp1 v16.8h, v16.8h, v17.8h\n"
@@ -1818,112 +1818,112 @@ void a64_hybrid_s8qs_dot_6x16 (
"uzp1 v20.16b, v20.16b, v21.16b\n"
"bge 107f\n"
"tbz x10, #3, 102f\n"
- "str d8, [x11], #0x8\n"
- "str d12, [x24], #0x8\n"
- "str d16, [x23], #0x8\n"
- "str d20, [x22], #0x8\n"
+ "str d8, [x9], #0x8\n"
+ "str d12, [x23], #0x8\n"
+ "str d16, [x22], #0x8\n"
+ "str d20, [x21], #0x8\n"
"tbz x10, #2, 100f\n"
- "st1 { v8.s }[2], [x11], #0x4\n"
- "st1 { v12.s }[2], [x24], #0x4\n"
- "st1 { v16.s }[2], [x23], #0x4\n"
- "st1 { v20.s }[2], [x22], #0x4\n"
+ "st1 { v8.s }[2], [x9], #0x4\n"
+ "st1 { v12.s }[2], [x23], #0x4\n"
+ "st1 { v16.s }[2], [x22], #0x4\n"
+ "st1 { v20.s }[2], [x21], #0x4\n"
"tbz x10, #1, 99f\n"
- "st1 { v8.h }[6], [x11], #0x2\n"
- "st1 { v12.h }[6], [x24], #0x2\n"
- "st1 { v16.h }[6], [x23], #0x2\n"
- "st1 { v20.h }[6], [x22], #0x2\n"
+ "st1 { v8.h }[6], [x9], #0x2\n"
+ "st1 { v12.h }[6], [x23], #0x2\n"
+ "st1 { v16.h }[6], [x22], #0x2\n"
+ "st1 { v20.h }[6], [x21], #0x2\n"
"tbz x10, #0, 106f\n"
- "st1 { v8.b }[14], [x11]\n"
- "st1 { v12.b }[14], [x24]\n"
- "st1 { v16.b }[14], [x23]\n"
- "st1 { v20.b }[14], [x22]\n"
+ "st1 { v8.b }[14], [x9]\n"
+ "st1 { v12.b }[14], [x23]\n"
+ "st1 { v16.b }[14], [x22]\n"
+ "st1 { v20.b }[14], [x21]\n"
"b 106f\n"
"99:" // Height 4: Partial direct writeback: partial_1_12
"tbz x10, #0, 106f\n"
- "st1 { v8.b }[12], [x11]\n"
- "st1 { v12.b }[12], [x24]\n"
- "st1 { v16.b }[12], [x23]\n"
- "st1 { v20.b }[12], [x22]\n"
+ "st1 { v8.b }[12], [x9]\n"
+ "st1 { v12.b }[12], [x23]\n"
+ "st1 { v16.b }[12], [x22]\n"
+ "st1 { v20.b }[12], [x21]\n"
"b 106f\n"
"100:" // Height 4: Partial direct writeback: partial_2_8
"tbz x10, #1, 101f\n"
- "st1 { v8.h }[4], [x11], #0x2\n"
- "st1 { v12.h }[4], [x24], #0x2\n"
- "st1 { v16.h }[4], [x23], #0x2\n"
- "st1 { v20.h }[4], [x22], #0x2\n"
+ "st1 { v8.h }[4], [x9], #0x2\n"
+ "st1 { v12.h }[4], [x23], #0x2\n"
+ "st1 { v16.h }[4], [x22], #0x2\n"
+ "st1 { v20.h }[4], [x21], #0x2\n"
"tbz x10, #0, 106f\n"
- "st1 { v8.b }[10], [x11]\n"
- "st1 { v12.b }[10], [x24]\n"
- "st1 { v16.b }[10], [x23]\n"
- "st1 { v20.b }[10], [x22]\n"
+ "st1 { v8.b }[10], [x9]\n"
+ "st1 { v12.b }[10], [x23]\n"
+ "st1 { v16.b }[10], [x22]\n"
+ "st1 { v20.b }[10], [x21]\n"
"b 106f\n"
"101:" // Height 4: Partial direct writeback: partial_1_8
"tbz x10, #0, 106f\n"
- "st1 { v8.b }[8], [x11]\n"
- "st1 { v12.b }[8], [x24]\n"
- "st1 { v16.b }[8], [x23]\n"
- "st1 { v20.b }[8], [x22]\n"
+ "st1 { v8.b }[8], [x9]\n"
+ "st1 { v12.b }[8], [x23]\n"
+ "st1 { v16.b }[8], [x22]\n"
+ "st1 { v20.b }[8], [x21]\n"
"b 106f\n"
"102:" // Height 4: Partial direct writeback: partial_4_0
"tbz x10, #2, 104f\n"
- "str s8, [x11], #0x4\n"
- "str s12, [x24], #0x4\n"
- "str s16, [x23], #0x4\n"
- "str s20, [x22], #0x4\n"
+ "str s8, [x9], #0x4\n"
+ "str s12, [x23], #0x4\n"
+ "str s16, [x22], #0x4\n"
+ "str s20, [x21], #0x4\n"
"tbz x10, #1, 103f\n"
- "st1 { v8.h }[2], [x11], #0x2\n"
- "st1 { v12.h }[2], [x24], #0x2\n"
- "st1 { v16.h }[2], [x23], #0x2\n"
- "st1 { v20.h }[2], [x22], #0x2\n"
+ "st1 { v8.h }[2], [x9], #0x2\n"
+ "st1 { v12.h }[2], [x23], #0x2\n"
+ "st1 { v16.h }[2], [x22], #0x2\n"
+ "st1 { v20.h }[2], [x21], #0x2\n"
"tbz x10, #0, 106f\n"
- "st1 { v8.b }[6], [x11]\n"
- "st1 { v12.b }[6], [x24]\n"
- "st1 { v16.b }[6], [x23]\n"
- "st1 { v20.b }[6], [x22]\n"
+ "st1 { v8.b }[6], [x9]\n"
+ "st1 { v12.b }[6], [x23]\n"
+ "st1 { v16.b }[6], [x22]\n"
+ "st1 { v20.b }[6], [x21]\n"
"b 106f\n"
"103:" // Height 4: Partial direct writeback: partial_1_4
"tbz x10, #0, 106f\n"
- "st1 { v8.b }[4], [x11]\n"
- "st1 { v12.b }[4], [x24]\n"
- "st1 { v16.b }[4], [x23]\n"
- "st1 { v20.b }[4], [x22]\n"
+ "st1 { v8.b }[4], [x9]\n"
+ "st1 { v12.b }[4], [x23]\n"
+ "st1 { v16.b }[4], [x22]\n"
+ "st1 { v20.b }[4], [x21]\n"
"b 106f\n"
"104:" // Height 4: Partial direct writeback: partial_2_0
"tbz x10, #1, 105f\n"
- "str h8, [x11], #0x2\n"
- "str h12, [x24], #0x2\n"
- "str h16, [x23], #0x2\n"
- "str h20, [x22], #0x2\n"
+ "str h8, [x9], #0x2\n"
+ "str h12, [x23], #0x2\n"
+ "str h16, [x22], #0x2\n"
+ "str h20, [x21], #0x2\n"
"tbz x10, #0, 106f\n"
- "st1 { v8.b }[2], [x11]\n"
- "st1 { v12.b }[2], [x24]\n"
- "st1 { v16.b }[2], [x23]\n"
- "st1 { v20.b }[2], [x22]\n"
+ "st1 { v8.b }[2], [x9]\n"
+ "st1 { v12.b }[2], [x23]\n"
+ "st1 { v16.b }[2], [x22]\n"
+ "st1 { v20.b }[2], [x21]\n"
"b 106f\n"
"105:" // Height 4: Partial direct writeback: partial_1_0
- "str b8, [x11, #0x0]\n"
- "str b12, [x24, #0x0]\n"
- "str b16, [x23, #0x0]\n"
- "str b20, [x22, #0x0]\n"
+ "str b8, [x9, #0x0]\n"
+ "str b12, [x23, #0x0]\n"
+ "str b16, [x22, #0x0]\n"
+ "str b20, [x21, #0x0]\n"
"106:" // Height 4: Partial direct writeback: Done
"b 108f\n"
"107:" // Height 4: Full writeback
- "str q8, [x11, #0x0]\n"
- "add x11, x11, #0x10\n"
- "str q12, [x24, #0x0]\n"
- "str q16, [x23, #0x0]\n"
- "str q20, [x22, #0x0]\n"
+ "str q8, [x9, #0x0]\n"
+ "add x9, x9, #0x10\n"
+ "str q12, [x23, #0x0]\n"
+ "str q16, [x22, #0x0]\n"
+ "str q20, [x21, #0x0]\n"
"108:" // Height 4: Writeback done
"subs x10, x10, #0x10\n"
"bgt 83b\n"
"b 164f\n"
"109:" // Height 5
- "mov x14, %x[col_bias]\n"
"ldr x13, [%x[args_ptr], %[offsetof_multiplier_ptr]]\n"
+ "mov x11, %x[col_bias]\n"
"ldr x12, [%x[args_ptr], %[offsetof_shift_ptr]]\n"
- "mov x11, %x[output_ptr]\n"
+ "mov x9, %x[output_ptr]\n"
"ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"110:" // Height 5: Column loop
"movi v8.4s, #0x0\n"
"movi v9.4s, #0x0\n"
@@ -1946,258 +1946,258 @@ void a64_hybrid_s8qs_dot_6x16 (
"movi v26.4s, #0x0\n"
"movi v27.4s, #0x0\n"
"111:" // Height 5: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"112:" // Height 5: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 113f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "ldr x22, [x21, #0x20]\n"
- "cbnz x28, 114f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20\n"
- "add x25, x25, x20\n"
- "add x24, x24, x20\n"
- "add x23, x23, x20\n"
- "add x22, x22, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "ldr x21, [x20, #0x20]\n"
+ "cbnz x27, 114f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
+ "add x24, x24, x19\n"
+ "add x23, x23, x19\n"
+ "add x22, x22, x19\n"
+ "add x21, x21, x19\n"
"b 114f\n"
"113:" // Height 5: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20\n"
- "add x24, x25, x20\n"
- "add x23, x24, x20\n"
- "add x22, x23, x20\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19\n"
+ "add x23, x24, x19\n"
+ "add x22, x23, x19\n"
+ "add x21, x22, x19\n"
"114:" // Height 5: input setup done
- "cmp x27, #0x10\n"
+ "cmp x26, #0x10\n"
"blt 117f\n"
- "ldr q0, [x26, #0x0]\n"
- "ldr q1, [x25, #0x0]\n"
- "cmp x27, #0x20\n"
- "ldr q2, [x24, #0x0]\n"
- "ldr q3, [x23, #0x0]\n"
- "ldr q4, [x22, #0x0]\n"
- "ldr q6, [x9, #0x0]\n"
- "ldr q7, [x9, #0x10]\n"
+ "ldr q0, [x25, #0x0]\n"
+ "ldr q1, [x24, #0x0]\n"
+ "cmp x26, #0x20\n"
+ "ldr q2, [x23, #0x0]\n"
+ "ldr q3, [x22, #0x0]\n"
+ "ldr q4, [x21, #0x0]\n"
+ "ldr q6, [x28, #0x0]\n"
"blt 116f\n"
"115:" // Height 5: Multiply loop: Main loop head
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
+ "ldr q7, [x28, #0x10]\n"
+ "add x25, x25, #0x10\n"
".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n"
- "sub x27, x27, #0x10\n"
- "add x26, x26, #0x10\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "add x24, x24, #0x10\n"
".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "add x23, x23, #0x10\n"
".inst 0x4f83e0d4 // sdot v20.4s, v6.16b, v3.4b[0]\n"
- "add x25, x25, #0x10\n"
- "add x24, x24, #0x10\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
+ "add x22, x22, #0x10\n"
".inst 0x4f84e0d8 // sdot v24.4s, v6.16b, v4.4b[0]\n"
- "ldr q6, [x9, #0x20]\n"
+ "ldr q6, [x28, #0x20]\n"
+ "add x21, x21, #0x10\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
- "add x23, x23, #0x10\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
+ "sub x26, x26, #0x10\n"
".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n"
+ "prfm pldl1keep, [x21, #0x80]\n"
+ "cmp x26, #0x20\n"
".inst 0x4f82e0f1 // sdot v17.4s, v7.16b, v2.4b[0]\n"
- "add x22, x22, #0x10\n"
- "cmp x27, #0x20\n"
".inst 0x4f83e0f5 // sdot v21.4s, v7.16b, v3.4b[0]\n"
".inst 0x4f84e0f9 // sdot v25.4s, v7.16b, v4.4b[0]\n"
- "ldr q7, [x9, #0x30]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
+ "ldr q7, [x28, #0x30]\n"
".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n"
".inst 0x4f81e0ce // sdot v14.4s, v6.16b, v1.4b[0]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x4f82e0d2 // sdot v18.4s, v6.16b, v2.4b[0]\n"
".inst 0x4f83e0d6 // sdot v22.4s, v6.16b, v3.4b[0]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
- "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x4f84e0da // sdot v26.4s, v6.16b, v4.4b[0]\n"
- "ldr q6, [x9, #0x40]\n"
+ "ldr q6, [x28, #0x40]\n"
".inst 0x4f80e0eb // sdot v11.4s, v7.16b, v0.4b[0]\n"
".inst 0x4f81e0ef // sdot v15.4s, v7.16b, v1.4b[0]\n"
".inst 0x4f82e0f3 // sdot v19.4s, v7.16b, v2.4b[0]\n"
".inst 0x4f83e0f7 // sdot v23.4s, v7.16b, v3.4b[0]\n"
".inst 0x4f84e0fb // sdot v27.4s, v7.16b, v4.4b[0]\n"
- "ldr q7, [x9, #0x50]\n"
+ "ldr q7, [x28, #0x50]\n"
".inst 0x4fa0e0c8 // sdot v8.4s, v6.16b, v0.4b[1]\n"
".inst 0x4fa1e0cc // sdot v12.4s, v6.16b, v1.4b[1]\n"
".inst 0x4fa2e0d0 // sdot v16.4s, v6.16b, v2.4b[1]\n"
".inst 0x4fa3e0d4 // sdot v20.4s, v6.16b, v3.4b[1]\n"
".inst 0x4fa4e0d8 // sdot v24.4s, v6.16b, v4.4b[1]\n"
- "ldr q6, [x9, #0x60]\n"
+ "ldr q6, [x28, #0x60]\n"
".inst 0x4fa0e0e9 // sdot v9.4s, v7.16b, v0.4b[1]\n"
".inst 0x4fa1e0ed // sdot v13.4s, v7.16b, v1.4b[1]\n"
".inst 0x4fa2e0f1 // sdot v17.4s, v7.16b, v2.4b[1]\n"
".inst 0x4fa3e0f5 // sdot v21.4s, v7.16b, v3.4b[1]\n"
".inst 0x4fa4e0f9 // sdot v25.4s, v7.16b, v4.4b[1]\n"
- "ldr q7, [x9, #0x70]\n"
+ "ldr q7, [x28, #0x70]\n"
".inst 0x4fa0e0ca // sdot v10.4s, v6.16b, v0.4b[1]\n"
".inst 0x4fa1e0ce // sdot v14.4s, v6.16b, v1.4b[1]\n"
".inst 0x4fa2e0d2 // sdot v18.4s, v6.16b, v2.4b[1]\n"
".inst 0x4fa3e0d6 // sdot v22.4s, v6.16b, v3.4b[1]\n"
".inst 0x4fa4e0da // sdot v26.4s, v6.16b, v4.4b[1]\n"
- "ldr q6, [x9, #0x80]\n"
+ "ldr q6, [x28, #0x80]\n"
".inst 0x4fa0e0eb // sdot v11.4s, v7.16b, v0.4b[1]\n"
".inst 0x4fa1e0ef // sdot v15.4s, v7.16b, v1.4b[1]\n"
".inst 0x4fa2e0f3 // sdot v19.4s, v7.16b, v2.4b[1]\n"
".inst 0x4fa3e0f7 // sdot v23.4s, v7.16b, v3.4b[1]\n"
".inst 0x4fa4e0fb // sdot v27.4s, v7.16b, v4.4b[1]\n"
- "ldr q7, [x9, #0x90]\n"
+ "ldr q7, [x28, #0x90]\n"
".inst 0x4f80e8c8 // sdot v8.4s, v6.16b, v0.4b[2]\n"
".inst 0x4f81e8cc // sdot v12.4s, v6.16b, v1.4b[2]\n"
".inst 0x4f82e8d0 // sdot v16.4s, v6.16b, v2.4b[2]\n"
".inst 0x4f83e8d4 // sdot v20.4s, v6.16b, v3.4b[2]\n"
".inst 0x4f84e8d8 // sdot v24.4s, v6.16b, v4.4b[2]\n"
- "ldr q6, [x9, #0xa0]\n"
+ "ldr q6, [x28, #0xa0]\n"
".inst 0x4f80e8e9 // sdot v9.4s, v7.16b, v0.4b[2]\n"
".inst 0x4f81e8ed // sdot v13.4s, v7.16b, v1.4b[2]\n"
".inst 0x4f82e8f1 // sdot v17.4s, v7.16b, v2.4b[2]\n"
".inst 0x4f83e8f5 // sdot v21.4s, v7.16b, v3.4b[2]\n"
".inst 0x4f84e8f9 // sdot v25.4s, v7.16b, v4.4b[2]\n"
- "ldr q7, [x9, #0xb0]\n"
+ "ldr q7, [x28, #0xb0]\n"
".inst 0x4f80e8ca // sdot v10.4s, v6.16b, v0.4b[2]\n"
".inst 0x4f81e8ce // sdot v14.4s, v6.16b, v1.4b[2]\n"
".inst 0x4f82e8d2 // sdot v18.4s, v6.16b, v2.4b[2]\n"
".inst 0x4f83e8d6 // sdot v22.4s, v6.16b, v3.4b[2]\n"
".inst 0x4f84e8da // sdot v26.4s, v6.16b, v4.4b[2]\n"
- "ldr q6, [x9, #0xc0]\n"
+ "ldr q6, [x28, #0xc0]\n"
".inst 0x4f80e8eb // sdot v11.4s, v7.16b, v0.4b[2]\n"
".inst 0x4f81e8ef // sdot v15.4s, v7.16b, v1.4b[2]\n"
".inst 0x4f82e8f3 // sdot v19.4s, v7.16b, v2.4b[2]\n"
".inst 0x4f83e8f7 // sdot v23.4s, v7.16b, v3.4b[2]\n"
".inst 0x4f84e8fb // sdot v27.4s, v7.16b, v4.4b[2]\n"
- "ldr q7, [x9, #0xd0]\n"
+ "ldr q7, [x28, #0xd0]\n"
".inst 0x4fa0e8c8 // sdot v8.4s, v6.16b, v0.4b[3]\n"
".inst 0x4fa1e8cc // sdot v12.4s, v6.16b, v1.4b[3]\n"
".inst 0x4fa2e8d0 // sdot v16.4s, v6.16b, v2.4b[3]\n"
".inst 0x4fa3e8d4 // sdot v20.4s, v6.16b, v3.4b[3]\n"
".inst 0x4fa4e8d8 // sdot v24.4s, v6.16b, v4.4b[3]\n"
- "ldr q6, [x9, #0xe0]\n"
+ "ldr q6, [x28, #0xe0]\n"
".inst 0x4fa0e8e9 // sdot v9.4s, v7.16b, v0.4b[3]\n"
".inst 0x4fa1e8ed // sdot v13.4s, v7.16b, v1.4b[3]\n"
".inst 0x4fa2e8f1 // sdot v17.4s, v7.16b, v2.4b[3]\n"
".inst 0x4fa3e8f5 // sdot v21.4s, v7.16b, v3.4b[3]\n"
".inst 0x4fa4e8f9 // sdot v25.4s, v7.16b, v4.4b[3]\n"
- "ldr q7, [x9, #0xf0]\n"
- "add x9, x9, #0x100\n"
+ "ldr q7, [x28, #0xf0]\n"
+ "add x28, x28, #0x100\n"
".inst 0x4fa0e8ca // sdot v10.4s, v6.16b, v0.4b[3]\n"
".inst 0x4fa1e8ce // sdot v14.4s, v6.16b, v1.4b[3]\n"
".inst 0x4fa2e8d2 // sdot v18.4s, v6.16b, v2.4b[3]\n"
".inst 0x4fa3e8d6 // sdot v22.4s, v6.16b, v3.4b[3]\n"
".inst 0x4fa4e8da // sdot v26.4s, v6.16b, v4.4b[3]\n"
- "ldr q6, [x9, #0x0]\n"
+ "ldr q6, [x28, #0x0]\n"
".inst 0x4fa0e8eb // sdot v11.4s, v7.16b, v0.4b[3]\n"
- "ldr q0, [x26, #0x0]\n"
+ "ldr q0, [x25, #0x0]\n"
".inst 0x4fa1e8ef // sdot v15.4s, v7.16b, v1.4b[3]\n"
- "ldr q1, [x25, #0x0]\n"
+ "ldr q1, [x24, #0x0]\n"
".inst 0x4fa2e8f3 // sdot v19.4s, v7.16b, v2.4b[3]\n"
- "ldr q2, [x24, #0x0]\n"
+ "ldr q2, [x23, #0x0]\n"
".inst 0x4fa3e8f7 // sdot v23.4s, v7.16b, v3.4b[3]\n"
- "ldr q3, [x23, #0x0]\n"
+ "ldr q3, [x22, #0x0]\n"
".inst 0x4fa4e8fb // sdot v27.4s, v7.16b, v4.4b[3]\n"
- "ldr q4, [x22, #0x0]\n"
- "ldr q7, [x9, #0x10]\n"
+ "ldr q4, [x21, #0x0]\n"
"bge 115b\n"
"116:" // Height 5: Multiply loop: Single iteration only
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
+ "ldr q7, [x28, #0x10]\n"
+ "sub x26, x26, #0x10\n"
".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n"
- "add x26, x26, #0x10\n"
"add x25, x25, #0x10\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n"
- ".inst 0x4f83e0d4 // sdot v20.4s, v6.16b, v3.4b[0]\n"
"add x24, x24, #0x10\n"
+ ".inst 0x4f83e0d4 // sdot v20.4s, v6.16b, v3.4b[0]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
"add x23, x23, #0x10\n"
".inst 0x4f84e0d8 // sdot v24.4s, v6.16b, v4.4b[0]\n"
- "ldr q6, [x9, #0x20]\n"
- ".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
"add x22, x22, #0x10\n"
+ ".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
+ "ldr q6, [x28, #0x20]\n"
+ "add x21, x21, #0x10\n"
".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x4f82e0f1 // sdot v17.4s, v7.16b, v2.4b[0]\n"
- "sub x27, x27, #0x10\n"
- "prfm pldl1keep, [x26, #0x80]\n"
+ "prfm pldl1keep, [x21, #0x80]\n"
".inst 0x4f83e0f5 // sdot v21.4s, v7.16b, v3.4b[0]\n"
".inst 0x4f84e0f9 // sdot v25.4s, v7.16b, v4.4b[0]\n"
- "ldr q7, [x9, #0x30]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
+ "ldr q7, [x28, #0x30]\n"
".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n"
".inst 0x4f81e0ce // sdot v14.4s, v6.16b, v1.4b[0]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x4f82e0d2 // sdot v18.4s, v6.16b, v2.4b[0]\n"
".inst 0x4f83e0d6 // sdot v22.4s, v6.16b, v3.4b[0]\n"
- "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x4f84e0da // sdot v26.4s, v6.16b, v4.4b[0]\n"
- "ldr q6, [x9, #0x40]\n"
+ "ldr q6, [x28, #0x40]\n"
".inst 0x4f80e0eb // sdot v11.4s, v7.16b, v0.4b[0]\n"
".inst 0x4f81e0ef // sdot v15.4s, v7.16b, v1.4b[0]\n"
".inst 0x4f82e0f3 // sdot v19.4s, v7.16b, v2.4b[0]\n"
".inst 0x4f83e0f7 // sdot v23.4s, v7.16b, v3.4b[0]\n"
".inst 0x4f84e0fb // sdot v27.4s, v7.16b, v4.4b[0]\n"
- "ldr q7, [x9, #0x50]\n"
+ "ldr q7, [x28, #0x50]\n"
".inst 0x4fa0e0c8 // sdot v8.4s, v6.16b, v0.4b[1]\n"
".inst 0x4fa1e0cc // sdot v12.4s, v6.16b, v1.4b[1]\n"
".inst 0x4fa2e0d0 // sdot v16.4s, v6.16b, v2.4b[1]\n"
".inst 0x4fa3e0d4 // sdot v20.4s, v6.16b, v3.4b[1]\n"
".inst 0x4fa4e0d8 // sdot v24.4s, v6.16b, v4.4b[1]\n"
- "ldr q6, [x9, #0x60]\n"
+ "ldr q6, [x28, #0x60]\n"
".inst 0x4fa0e0e9 // sdot v9.4s, v7.16b, v0.4b[1]\n"
".inst 0x4fa1e0ed // sdot v13.4s, v7.16b, v1.4b[1]\n"
".inst 0x4fa2e0f1 // sdot v17.4s, v7.16b, v2.4b[1]\n"
".inst 0x4fa3e0f5 // sdot v21.4s, v7.16b, v3.4b[1]\n"
".inst 0x4fa4e0f9 // sdot v25.4s, v7.16b, v4.4b[1]\n"
- "ldr q7, [x9, #0x70]\n"
+ "ldr q7, [x28, #0x70]\n"
".inst 0x4fa0e0ca // sdot v10.4s, v6.16b, v0.4b[1]\n"
".inst 0x4fa1e0ce // sdot v14.4s, v6.16b, v1.4b[1]\n"
".inst 0x4fa2e0d2 // sdot v18.4s, v6.16b, v2.4b[1]\n"
".inst 0x4fa3e0d6 // sdot v22.4s, v6.16b, v3.4b[1]\n"
".inst 0x4fa4e0da // sdot v26.4s, v6.16b, v4.4b[1]\n"
- "ldr q6, [x9, #0x80]\n"
+ "ldr q6, [x28, #0x80]\n"
".inst 0x4fa0e0eb // sdot v11.4s, v7.16b, v0.4b[1]\n"
".inst 0x4fa1e0ef // sdot v15.4s, v7.16b, v1.4b[1]\n"
".inst 0x4fa2e0f3 // sdot v19.4s, v7.16b, v2.4b[1]\n"
".inst 0x4fa3e0f7 // sdot v23.4s, v7.16b, v3.4b[1]\n"
".inst 0x4fa4e0fb // sdot v27.4s, v7.16b, v4.4b[1]\n"
- "ldr q7, [x9, #0x90]\n"
+ "ldr q7, [x28, #0x90]\n"
".inst 0x4f80e8c8 // sdot v8.4s, v6.16b, v0.4b[2]\n"
".inst 0x4f81e8cc // sdot v12.4s, v6.16b, v1.4b[2]\n"
".inst 0x4f82e8d0 // sdot v16.4s, v6.16b, v2.4b[2]\n"
".inst 0x4f83e8d4 // sdot v20.4s, v6.16b, v3.4b[2]\n"
".inst 0x4f84e8d8 // sdot v24.4s, v6.16b, v4.4b[2]\n"
- "ldr q6, [x9, #0xa0]\n"
+ "ldr q6, [x28, #0xa0]\n"
".inst 0x4f80e8e9 // sdot v9.4s, v7.16b, v0.4b[2]\n"
".inst 0x4f81e8ed // sdot v13.4s, v7.16b, v1.4b[2]\n"
".inst 0x4f82e8f1 // sdot v17.4s, v7.16b, v2.4b[2]\n"
".inst 0x4f83e8f5 // sdot v21.4s, v7.16b, v3.4b[2]\n"
".inst 0x4f84e8f9 // sdot v25.4s, v7.16b, v4.4b[2]\n"
- "ldr q7, [x9, #0xb0]\n"
+ "ldr q7, [x28, #0xb0]\n"
".inst 0x4f80e8ca // sdot v10.4s, v6.16b, v0.4b[2]\n"
".inst 0x4f81e8ce // sdot v14.4s, v6.16b, v1.4b[2]\n"
".inst 0x4f82e8d2 // sdot v18.4s, v6.16b, v2.4b[2]\n"
".inst 0x4f83e8d6 // sdot v22.4s, v6.16b, v3.4b[2]\n"
".inst 0x4f84e8da // sdot v26.4s, v6.16b, v4.4b[2]\n"
- "ldr q6, [x9, #0xc0]\n"
+ "ldr q6, [x28, #0xc0]\n"
".inst 0x4f80e8eb // sdot v11.4s, v7.16b, v0.4b[2]\n"
".inst 0x4f81e8ef // sdot v15.4s, v7.16b, v1.4b[2]\n"
".inst 0x4f82e8f3 // sdot v19.4s, v7.16b, v2.4b[2]\n"
".inst 0x4f83e8f7 // sdot v23.4s, v7.16b, v3.4b[2]\n"
".inst 0x4f84e8fb // sdot v27.4s, v7.16b, v4.4b[2]\n"
- "ldr q7, [x9, #0xd0]\n"
+ "ldr q7, [x28, #0xd0]\n"
".inst 0x4fa0e8c8 // sdot v8.4s, v6.16b, v0.4b[3]\n"
".inst 0x4fa1e8cc // sdot v12.4s, v6.16b, v1.4b[3]\n"
".inst 0x4fa2e8d0 // sdot v16.4s, v6.16b, v2.4b[3]\n"
".inst 0x4fa3e8d4 // sdot v20.4s, v6.16b, v3.4b[3]\n"
".inst 0x4fa4e8d8 // sdot v24.4s, v6.16b, v4.4b[3]\n"
- "ldr q6, [x9, #0xe0]\n"
+ "ldr q6, [x28, #0xe0]\n"
".inst 0x4fa0e8e9 // sdot v9.4s, v7.16b, v0.4b[3]\n"
".inst 0x4fa1e8ed // sdot v13.4s, v7.16b, v1.4b[3]\n"
".inst 0x4fa2e8f1 // sdot v17.4s, v7.16b, v2.4b[3]\n"
".inst 0x4fa3e8f5 // sdot v21.4s, v7.16b, v3.4b[3]\n"
".inst 0x4fa4e8f9 // sdot v25.4s, v7.16b, v4.4b[3]\n"
- "ldr q7, [x9, #0xf0]\n"
- "add x9, x9, #0x100\n"
+ "ldr q7, [x28, #0xf0]\n"
+ "add x28, x28, #0x100\n"
".inst 0x4fa0e8ca // sdot v10.4s, v6.16b, v0.4b[3]\n"
".inst 0x4fa1e8ce // sdot v14.4s, v6.16b, v1.4b[3]\n"
".inst 0x4fa2e8d2 // sdot v18.4s, v6.16b, v2.4b[3]\n"
@@ -2209,32 +2209,32 @@ void a64_hybrid_s8qs_dot_6x16 (
".inst 0x4fa3e8f7 // sdot v23.4s, v7.16b, v3.4b[3]\n"
".inst 0x4fa4e8fb // sdot v27.4s, v7.16b, v4.4b[3]\n"
"117:" // Height 5: Multiply loop: Main loop skip
- "cbz x27, 122f\n"
- "cmp x27, #0x4\n"
+ "cbz x26, 122f\n"
+ "cmp x26, #0x4\n"
"blt 119f\n"
"118:" // Height 5: Multiply loop: Odd block loop
- "ldr s0, [x26], #0x4\n"
- "ldr s1, [x25], #0x4\n"
- "sub x27, x27, #0x4\n"
- "cmp x27, #0x4\n"
- "ldr s2, [x24], #0x4\n"
- "ldr s3, [x23], #0x4\n"
- "ldr s4, [x22], #0x4\n"
- "ldr q6, [x9, #0x0]\n"
+ "ldr s0, [x25], #0x4\n"
+ "sub x26, x26, #0x4\n"
+ "ldr s1, [x24], #0x4\n"
+ "cmp x26, #0x4\n"
+ "ldr s2, [x23], #0x4\n"
+ "ldr s3, [x22], #0x4\n"
+ "ldr s4, [x21], #0x4\n"
+ "ldr q6, [x28, #0x0]\n"
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
+ "ldr q7, [x28, #0x10]\n"
".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n"
- "ldr q7, [x9, #0x10]\n"
".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n"
".inst 0x4f83e0d4 // sdot v20.4s, v6.16b, v3.4b[0]\n"
".inst 0x4f84e0d8 // sdot v24.4s, v6.16b, v4.4b[0]\n"
- "ldr q6, [x9, #0x20]\n"
+ "ldr q6, [x28, #0x20]\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n"
".inst 0x4f82e0f1 // sdot v17.4s, v7.16b, v2.4b[0]\n"
".inst 0x4f83e0f5 // sdot v21.4s, v7.16b, v3.4b[0]\n"
".inst 0x4f84e0f9 // sdot v25.4s, v7.16b, v4.4b[0]\n"
- "ldr q7, [x9, #0x30]\n"
- "add x9, x9, #0x40\n"
+ "ldr q7, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n"
".inst 0x4f81e0ce // sdot v14.4s, v6.16b, v1.4b[0]\n"
".inst 0x4f82e0d2 // sdot v18.4s, v6.16b, v2.4b[0]\n"
@@ -2246,43 +2246,43 @@ void a64_hybrid_s8qs_dot_6x16 (
".inst 0x4f83e0f7 // sdot v23.4s, v7.16b, v3.4b[0]\n"
".inst 0x4f84e0fb // sdot v27.4s, v7.16b, v4.4b[0]\n"
"bge 118b\n"
+ "cbz x26, 122f\n"
"119:" // Height 5: Multiply loop: Skip odd blocks
- "cbz x27, 122f\n"
- "tbz x27, #1, 120f\n"
- "ldr h0, [x26], #0x2\n"
- "ldr h1, [x25], #0x2\n"
- "ldr h2, [x24], #0x2\n"
- "ldr h3, [x23], #0x2\n"
- "ldr h4, [x22], #0x2\n"
- "tbz x27, #0, 121f\n"
- "ld1 { v0.b }[2], [x26]\n"
- "ld1 { v1.b }[2], [x25]\n"
- "ld1 { v2.b }[2], [x24]\n"
- "ld1 { v3.b }[2], [x23]\n"
- "ld1 { v4.b }[2], [x22]\n"
+ "tbz x26, #1, 120f\n"
+ "ldr h0, [x25], #0x2\n"
+ "ldr h1, [x24], #0x2\n"
+ "ldr h2, [x23], #0x2\n"
+ "ldr h3, [x22], #0x2\n"
+ "ldr h4, [x21], #0x2\n"
+ "tbz x26, #0, 121f\n"
+ "ld1 { v0.b }[2], [x25]\n"
+ "ld1 { v1.b }[2], [x24]\n"
+ "ld1 { v2.b }[2], [x23]\n"
+ "ld1 { v3.b }[2], [x22]\n"
+ "ld1 { v4.b }[2], [x21]\n"
"b 121f\n"
"120:" // Height 5: Multiply loop: Ragged operand read: partial_1_0
- "ldr b0, [x26, #0x0]\n"
- "ldr b1, [x25, #0x0]\n"
- "ldr b2, [x24, #0x0]\n"
- "ldr b3, [x23, #0x0]\n"
- "ldr b4, [x22, #0x0]\n"
+ "ldr b0, [x25, #0x0]\n"
+ "ldr b1, [x24, #0x0]\n"
+ "ldr b2, [x23, #0x0]\n"
+ "ldr b3, [x22, #0x0]\n"
+ "ldr b4, [x21, #0x0]\n"
"121:" // Height 5: Multiply loop: Ragged operand read: Done
- "ldr q6, [x9, #0x0]\n"
- "ldr q7, [x9, #0x10]\n"
+ "ldr q6, [x28, #0x0]\n"
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
+ "ldr q7, [x28, #0x10]\n"
".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n"
".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n"
".inst 0x4f83e0d4 // sdot v20.4s, v6.16b, v3.4b[0]\n"
".inst 0x4f84e0d8 // sdot v24.4s, v6.16b, v4.4b[0]\n"
- "ldr q6, [x9, #0x20]\n"
+ "ldr q6, [x28, #0x20]\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n"
".inst 0x4f82e0f1 // sdot v17.4s, v7.16b, v2.4b[0]\n"
".inst 0x4f83e0f5 // sdot v21.4s, v7.16b, v3.4b[0]\n"
".inst 0x4f84e0f9 // sdot v25.4s, v7.16b, v4.4b[0]\n"
- "ldr q7, [x9, #0x30]\n"
- "add x9, x9, #0x40\n"
+ "ldr q7, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n"
".inst 0x4f81e0ce // sdot v14.4s, v6.16b, v1.4b[0]\n"
".inst 0x4f82e0d2 // sdot v18.4s, v6.16b, v2.4b[0]\n"
@@ -2294,42 +2294,42 @@ void a64_hybrid_s8qs_dot_6x16 (
".inst 0x4f83e0f7 // sdot v23.4s, v7.16b, v3.4b[0]\n"
".inst 0x4f84e0fb // sdot v27.4s, v7.16b, v4.4b[0]\n"
"122:" // Height 5: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 112b\n"
- "ldr q0, [x14, #0x0]\n"
- "ldr q1, [x14, #0x10]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "prfm pstl1keep, [x9, #0x0]\n"
+ "add x23, x9, x19\n"
+ "ldr q0, [x11, #0x0]\n"
"add v8.4s, v8.4s, v0.4s\n"
- "add v9.4s, v9.4s, v1.4s\n"
- "ldr q2, [x14, #0x20]\n"
- "ldr q3, [x14, #0x30]\n"
- "add v10.4s, v10.4s, v2.4s\n"
- "add v11.4s, v11.4s, v3.4s\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x11, x20\n"
- "add x23, x24, x20\n"
- "prfm pstl1keep, [x11, #0x0]\n"
- "add x22, x23, x20\n"
- "add x21, x22, x20\n"
- "prfm pstl1keep, [x24, #0x0]\n"
"prfm pstl1keep, [x23, #0x0]\n"
+ "add x22, x23, x19\n"
+ "add v12.4s, v12.4s, v0.4s\n"
"prfm pstl1keep, [x22, #0x0]\n"
+ "add x21, x22, x19\n"
+ "add v16.4s, v16.4s, v0.4s\n"
"prfm pstl1keep, [x21, #0x0]\n"
- "add v12.4s, v12.4s, v0.4s\n"
+ "add x20, x21, x19\n"
+ "add v20.4s, v20.4s, v0.4s\n"
+ "prfm pstl1keep, [x20, #0x0]\n"
+ "add v24.4s, v24.4s, v0.4s\n"
+ "ldr q1, [x11, #0x10]\n"
+ "ldr q2, [x11, #0x20]\n"
+ "add v9.4s, v9.4s, v1.4s\n"
+ "ldr q3, [x11, #0x30]\n"
+ "add x11, x11, #0x40\n"
+ "add v10.4s, v10.4s, v2.4s\n"
"add v13.4s, v13.4s, v1.4s\n"
"add v14.4s, v14.4s, v2.4s\n"
- "add v15.4s, v15.4s, v3.4s\n"
- "add x14, x14, #0x40\n"
- "add v16.4s, v16.4s, v0.4s\n"
"add v17.4s, v17.4s, v1.4s\n"
+ "add v11.4s, v11.4s, v3.4s\n"
+ "add v15.4s, v15.4s, v3.4s\n"
"add v18.4s, v18.4s, v2.4s\n"
"add v19.4s, v19.4s, v3.4s\n"
- "add v20.4s, v20.4s, v0.4s\n"
"add v21.4s, v21.4s, v1.4s\n"
"add v22.4s, v22.4s, v2.4s\n"
"add v23.4s, v23.4s, v3.4s\n"
- "add v24.4s, v24.4s, v0.4s\n"
"add v25.4s, v25.4s, v1.4s\n"
"add v26.4s, v26.4s, v2.4s\n"
"add v27.4s, v27.4s, v3.4s\n"
@@ -2341,20 +2341,20 @@ void a64_hybrid_s8qs_dot_6x16 (
"ldr q2, [x12, #0x20]\n"
"ldr q6, [x13, #0x20]\n"
"ldr q3, [x12, #0x30]\n"
- "ldr q7, [x13, #0x30]\n"
"add x12, x12, #0x40\n"
+ "ldr q7, [x13, #0x30]\n"
"add x13, x13, #0x40\n"
"b 124f\n"
"123:" // Height 5: per layer parameters
- "add x25, %x[qp], %[per_layer_right_shift]\n"
- "ld1r { v0.4s }, [x25]\n"
- "add x25, %x[qp], %[per_layer_mul]\n"
- "ld1r { v4.4s }, [x25]\n"
+ "add x24, %x[qp], %[per_layer_right_shift]\n"
+ "ld1r { v0.4s }, [x24]\n"
"mov v1.16b, v0.16b\n"
- "mov v5.16b, v4.16b\n"
+ "add x24, %x[qp], %[per_layer_mul]\n"
+ "ld1r { v4.4s }, [x24]\n"
"mov v2.16b, v0.16b\n"
- "mov v6.16b, v4.16b\n"
"mov v3.16b, v0.16b\n"
+ "mov v5.16b, v4.16b\n"
+ "mov v6.16b, v4.16b\n"
"mov v7.16b, v4.16b\n"
"124:" // Height 5: parameters loaded
"sqrdmulh v8.4s, v8.4s, v4.4s\n"
@@ -2381,148 +2381,148 @@ void a64_hybrid_s8qs_dot_6x16 (
"and v4.16b, v8.16b, v0.16b\n"
"and v5.16b, v9.16b, v1.16b\n"
"and v6.16b, v10.16b, v2.16b\n"
- "and v7.16b, v11.16b, v3.16b\n"
"sshr v4.4s, v4.4s, #0x1f\n"
"sshr v5.4s, v5.4s, #0x1f\n"
"sshr v6.4s, v6.4s, #0x1f\n"
- "sshr v7.4s, v7.4s, #0x1f\n"
"sqadd v8.4s, v8.4s, v4.4s\n"
"sqadd v9.4s, v9.4s, v5.4s\n"
"sqadd v10.4s, v10.4s, v6.4s\n"
- "sqadd v11.4s, v11.4s, v7.4s\n"
+ "and v7.16b, v11.16b, v3.16b\n"
"and v4.16b, v12.16b, v0.16b\n"
"and v5.16b, v13.16b, v1.16b\n"
- "and v6.16b, v14.16b, v2.16b\n"
- "and v7.16b, v15.16b, v3.16b\n"
+ "sshr v7.4s, v7.4s, #0x1f\n"
"sshr v4.4s, v4.4s, #0x1f\n"
"sshr v5.4s, v5.4s, #0x1f\n"
- "sshr v6.4s, v6.4s, #0x1f\n"
- "sshr v7.4s, v7.4s, #0x1f\n"
+ "sqadd v11.4s, v11.4s, v7.4s\n"
"sqadd v12.4s, v12.4s, v4.4s\n"
"sqadd v13.4s, v13.4s, v5.4s\n"
+ "and v6.16b, v14.16b, v2.16b\n"
+ "and v7.16b, v15.16b, v3.16b\n"
+ "and v4.16b, v16.16b, v0.16b\n"
+ "sshr v6.4s, v6.4s, #0x1f\n"
+ "sshr v7.4s, v7.4s, #0x1f\n"
+ "sshr v4.4s, v4.4s, #0x1f\n"
"sqadd v14.4s, v14.4s, v6.4s\n"
"sqadd v15.4s, v15.4s, v7.4s\n"
- "and v4.16b, v16.16b, v0.16b\n"
+ "sqadd v16.4s, v16.4s, v4.4s\n"
"and v5.16b, v17.16b, v1.16b\n"
"and v6.16b, v18.16b, v2.16b\n"
"and v7.16b, v19.16b, v3.16b\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
"sshr v5.4s, v5.4s, #0x1f\n"
"sshr v6.4s, v6.4s, #0x1f\n"
"sshr v7.4s, v7.4s, #0x1f\n"
- "sqadd v16.4s, v16.4s, v4.4s\n"
"sqadd v17.4s, v17.4s, v5.4s\n"
"sqadd v18.4s, v18.4s, v6.4s\n"
"sqadd v19.4s, v19.4s, v7.4s\n"
"and v4.16b, v20.16b, v0.16b\n"
"and v5.16b, v21.16b, v1.16b\n"
"and v6.16b, v22.16b, v2.16b\n"
- "and v7.16b, v23.16b, v3.16b\n"
"sshr v4.4s, v4.4s, #0x1f\n"
"sshr v5.4s, v5.4s, #0x1f\n"
"sshr v6.4s, v6.4s, #0x1f\n"
- "sshr v7.4s, v7.4s, #0x1f\n"
"sqadd v20.4s, v20.4s, v4.4s\n"
"sqadd v21.4s, v21.4s, v5.4s\n"
"sqadd v22.4s, v22.4s, v6.4s\n"
- "sqadd v23.4s, v23.4s, v7.4s\n"
+ "and v7.16b, v23.16b, v3.16b\n"
"and v4.16b, v24.16b, v0.16b\n"
"and v5.16b, v25.16b, v1.16b\n"
- "and v6.16b, v26.16b, v2.16b\n"
- "and v7.16b, v27.16b, v3.16b\n"
+ "sshr v7.4s, v7.4s, #0x1f\n"
"sshr v4.4s, v4.4s, #0x1f\n"
"sshr v5.4s, v5.4s, #0x1f\n"
- "sshr v6.4s, v6.4s, #0x1f\n"
- "sshr v7.4s, v7.4s, #0x1f\n"
+ "sqadd v23.4s, v23.4s, v7.4s\n"
"sqadd v24.4s, v24.4s, v4.4s\n"
"sqadd v25.4s, v25.4s, v5.4s\n"
+ "and v6.16b, v26.16b, v2.16b\n"
+ "and v7.16b, v27.16b, v3.16b\n"
+ "sshr v6.4s, v6.4s, #0x1f\n"
+ "sshr v7.4s, v7.4s, #0x1f\n"
"sqadd v26.4s, v26.4s, v6.4s\n"
"sqadd v27.4s, v27.4s, v7.4s\n"
"125:" // Height 5: no shift correction
- "add x25, %x[qp], %[c_offset]\n"
- "ld1r { v4.4s }, [x25]\n"
"srshl v8.4s, v8.4s, v0.4s\n"
+ "add x24, %x[qp], %[c_offset]\n"
+ "ld1r { v4.4s }, [x24]\n"
"srshl v9.4s, v9.4s, v1.4s\n"
+ "add x24, %x[qp], %[minval]\n"
"srshl v10.4s, v10.4s, v2.4s\n"
+ "ld1r { v5.4s }, [x24]\n"
+ "add x24, %x[qp], %[maxval]\n"
"srshl v11.4s, v11.4s, v3.4s\n"
- "add x25, %x[qp], %[maxval]\n"
- "ld1r { v6.4s }, [x25]\n"
+ "ld1r { v6.4s }, [x24]\n"
+ "cmp x10, #0x10\n"
"srshl v12.4s, v12.4s, v0.4s\n"
"srshl v13.4s, v13.4s, v1.4s\n"
- "add x25, %x[qp], %[minval]\n"
- "ld1r { v5.4s }, [x25]\n"
- "srshl v14.4s, v14.4s, v2.4s\n"
- "srshl v15.4s, v15.4s, v3.4s\n"
- "cmp x10, #0x10\n"
- "srshl v16.4s, v16.4s, v0.4s\n"
- "srshl v17.4s, v17.4s, v1.4s\n"
- "srshl v18.4s, v18.4s, v2.4s\n"
- "srshl v19.4s, v19.4s, v3.4s\n"
- "srshl v20.4s, v20.4s, v0.4s\n"
- "srshl v21.4s, v21.4s, v1.4s\n"
- "srshl v22.4s, v22.4s, v2.4s\n"
- "srshl v23.4s, v23.4s, v3.4s\n"
- "srshl v24.4s, v24.4s, v0.4s\n"
- "srshl v25.4s, v25.4s, v1.4s\n"
- "srshl v26.4s, v26.4s, v2.4s\n"
- "srshl v27.4s, v27.4s, v3.4s\n"
"add v8.4s, v8.4s, v4.4s\n"
"add v9.4s, v9.4s, v4.4s\n"
"add v10.4s, v10.4s, v4.4s\n"
- "add v11.4s, v11.4s, v4.4s\n"
- "add v12.4s, v12.4s, v4.4s\n"
- "add v13.4s, v13.4s, v4.4s\n"
- "add v14.4s, v14.4s, v4.4s\n"
- "add v15.4s, v15.4s, v4.4s\n"
- "add v16.4s, v16.4s, v4.4s\n"
- "add v17.4s, v17.4s, v4.4s\n"
- "add v18.4s, v18.4s, v4.4s\n"
- "add v19.4s, v19.4s, v4.4s\n"
- "add v20.4s, v20.4s, v4.4s\n"
- "add v21.4s, v21.4s, v4.4s\n"
- "add v22.4s, v22.4s, v4.4s\n"
- "add v23.4s, v23.4s, v4.4s\n"
- "add v24.4s, v24.4s, v4.4s\n"
- "add v25.4s, v25.4s, v4.4s\n"
- "add v26.4s, v26.4s, v4.4s\n"
- "add v27.4s, v27.4s, v4.4s\n"
"smin v8.4s, v8.4s, v6.4s\n"
"smin v9.4s, v9.4s, v6.4s\n"
"smin v10.4s, v10.4s, v6.4s\n"
- "smin v11.4s, v11.4s, v6.4s\n"
- "smin v12.4s, v12.4s, v6.4s\n"
- "smin v13.4s, v13.4s, v6.4s\n"
- "smin v14.4s, v14.4s, v6.4s\n"
- "smin v15.4s, v15.4s, v6.4s\n"
- "smin v16.4s, v16.4s, v6.4s\n"
- "smin v17.4s, v17.4s, v6.4s\n"
- "smin v18.4s, v18.4s, v6.4s\n"
- "smin v19.4s, v19.4s, v6.4s\n"
- "smin v20.4s, v20.4s, v6.4s\n"
- "smin v21.4s, v21.4s, v6.4s\n"
- "smin v22.4s, v22.4s, v6.4s\n"
- "smin v23.4s, v23.4s, v6.4s\n"
- "smin v24.4s, v24.4s, v6.4s\n"
- "smin v25.4s, v25.4s, v6.4s\n"
- "smin v26.4s, v26.4s, v6.4s\n"
- "smin v27.4s, v27.4s, v6.4s\n"
"smax v8.4s, v8.4s, v5.4s\n"
"smax v9.4s, v9.4s, v5.4s\n"
"smax v10.4s, v10.4s, v5.4s\n"
+ "add v11.4s, v11.4s, v4.4s\n"
+ "add v12.4s, v12.4s, v4.4s\n"
+ "add v13.4s, v13.4s, v4.4s\n"
+ "smin v11.4s, v11.4s, v6.4s\n"
+ "smin v12.4s, v12.4s, v6.4s\n"
+ "smin v13.4s, v13.4s, v6.4s\n"
"smax v11.4s, v11.4s, v5.4s\n"
"smax v12.4s, v12.4s, v5.4s\n"
"smax v13.4s, v13.4s, v5.4s\n"
+ "srshl v14.4s, v14.4s, v2.4s\n"
+ "srshl v15.4s, v15.4s, v3.4s\n"
+ "srshl v16.4s, v16.4s, v0.4s\n"
+ "srshl v17.4s, v17.4s, v1.4s\n"
+ "add v14.4s, v14.4s, v4.4s\n"
+ "add v15.4s, v15.4s, v4.4s\n"
+ "add v16.4s, v16.4s, v4.4s\n"
+ "smin v14.4s, v14.4s, v6.4s\n"
+ "smin v15.4s, v15.4s, v6.4s\n"
+ "smin v16.4s, v16.4s, v6.4s\n"
"smax v14.4s, v14.4s, v5.4s\n"
"smax v15.4s, v15.4s, v5.4s\n"
"smax v16.4s, v16.4s, v5.4s\n"
+ "add v17.4s, v17.4s, v4.4s\n"
+ "srshl v18.4s, v18.4s, v2.4s\n"
+ "srshl v19.4s, v19.4s, v3.4s\n"
+ "smin v17.4s, v17.4s, v6.4s\n"
+ "srshl v20.4s, v20.4s, v0.4s\n"
+ "add v18.4s, v18.4s, v4.4s\n"
"smax v17.4s, v17.4s, v5.4s\n"
+ "add v19.4s, v19.4s, v4.4s\n"
+ "smin v18.4s, v18.4s, v6.4s\n"
+ "add v20.4s, v20.4s, v4.4s\n"
+ "smin v19.4s, v19.4s, v6.4s\n"
"smax v18.4s, v18.4s, v5.4s\n"
+ "smin v20.4s, v20.4s, v6.4s\n"
"smax v19.4s, v19.4s, v5.4s\n"
+ "srshl v21.4s, v21.4s, v1.4s\n"
"smax v20.4s, v20.4s, v5.4s\n"
+ "srshl v22.4s, v22.4s, v2.4s\n"
+ "srshl v23.4s, v23.4s, v3.4s\n"
+ "add v21.4s, v21.4s, v4.4s\n"
+ "srshl v24.4s, v24.4s, v0.4s\n"
+ "add v22.4s, v22.4s, v4.4s\n"
+ "smin v21.4s, v21.4s, v6.4s\n"
+ "add v23.4s, v23.4s, v4.4s\n"
+ "smin v22.4s, v22.4s, v6.4s\n"
"smax v21.4s, v21.4s, v5.4s\n"
+ "smin v23.4s, v23.4s, v6.4s\n"
"smax v22.4s, v22.4s, v5.4s\n"
+ "add v24.4s, v24.4s, v4.4s\n"
"smax v23.4s, v23.4s, v5.4s\n"
+ "srshl v25.4s, v25.4s, v1.4s\n"
+ "smin v24.4s, v24.4s, v6.4s\n"
+ "srshl v26.4s, v26.4s, v2.4s\n"
+ "srshl v27.4s, v27.4s, v3.4s\n"
"smax v24.4s, v24.4s, v5.4s\n"
+ "add v25.4s, v25.4s, v4.4s\n"
+ "add v26.4s, v26.4s, v4.4s\n"
+ "add v27.4s, v27.4s, v4.4s\n"
+ "smin v25.4s, v25.4s, v6.4s\n"
+ "smin v26.4s, v26.4s, v6.4s\n"
+ "smin v27.4s, v27.4s, v6.4s\n"
"smax v25.4s, v25.4s, v5.4s\n"
"smax v26.4s, v26.4s, v5.4s\n"
"smax v27.4s, v27.4s, v5.4s\n"
@@ -2543,131 +2543,131 @@ void a64_hybrid_s8qs_dot_6x16 (
"uzp1 v24.16b, v24.16b, v25.16b\n"
"bge 134f\n"
"tbz x10, #3, 129f\n"
- "str d8, [x11], #0x8\n"
- "str d12, [x24], #0x8\n"
- "str d16, [x23], #0x8\n"
- "str d20, [x22], #0x8\n"
- "str d24, [x21], #0x8\n"
+ "str d8, [x9], #0x8\n"
+ "str d12, [x23], #0x8\n"
+ "str d16, [x22], #0x8\n"
+ "str d20, [x21], #0x8\n"
+ "str d24, [x20], #0x8\n"
"tbz x10, #2, 127f\n"
- "st1 { v8.s }[2], [x11], #0x4\n"
- "st1 { v12.s }[2], [x24], #0x4\n"
- "st1 { v16.s }[2], [x23], #0x4\n"
- "st1 { v20.s }[2], [x22], #0x4\n"
- "st1 { v24.s }[2], [x21], #0x4\n"
+ "st1 { v8.s }[2], [x9], #0x4\n"
+ "st1 { v12.s }[2], [x23], #0x4\n"
+ "st1 { v16.s }[2], [x22], #0x4\n"
+ "st1 { v20.s }[2], [x21], #0x4\n"
+ "st1 { v24.s }[2], [x20], #0x4\n"
"tbz x10, #1, 126f\n"
- "st1 { v8.h }[6], [x11], #0x2\n"
- "st1 { v12.h }[6], [x24], #0x2\n"
- "st1 { v16.h }[6], [x23], #0x2\n"
- "st1 { v20.h }[6], [x22], #0x2\n"
- "st1 { v24.h }[6], [x21], #0x2\n"
+ "st1 { v8.h }[6], [x9], #0x2\n"
+ "st1 { v12.h }[6], [x23], #0x2\n"
+ "st1 { v16.h }[6], [x22], #0x2\n"
+ "st1 { v20.h }[6], [x21], #0x2\n"
+ "st1 { v24.h }[6], [x20], #0x2\n"
"tbz x10, #0, 133f\n"
- "st1 { v8.b }[14], [x11]\n"
- "st1 { v12.b }[14], [x24]\n"
- "st1 { v16.b }[14], [x23]\n"
- "st1 { v20.b }[14], [x22]\n"
- "st1 { v24.b }[14], [x21]\n"
+ "st1 { v8.b }[14], [x9]\n"
+ "st1 { v12.b }[14], [x23]\n"
+ "st1 { v16.b }[14], [x22]\n"
+ "st1 { v20.b }[14], [x21]\n"
+ "st1 { v24.b }[14], [x20]\n"
"b 133f\n"
"126:" // Height 5: Partial direct writeback: partial_1_12
"tbz x10, #0, 133f\n"
- "st1 { v8.b }[12], [x11]\n"
- "st1 { v12.b }[12], [x24]\n"
- "st1 { v16.b }[12], [x23]\n"
- "st1 { v20.b }[12], [x22]\n"
- "st1 { v24.b }[12], [x21]\n"
+ "st1 { v8.b }[12], [x9]\n"
+ "st1 { v12.b }[12], [x23]\n"
+ "st1 { v16.b }[12], [x22]\n"
+ "st1 { v20.b }[12], [x21]\n"
+ "st1 { v24.b }[12], [x20]\n"
"b 133f\n"
"127:" // Height 5: Partial direct writeback: partial_2_8
"tbz x10, #1, 128f\n"
- "st1 { v8.h }[4], [x11], #0x2\n"
- "st1 { v12.h }[4], [x24], #0x2\n"
- "st1 { v16.h }[4], [x23], #0x2\n"
- "st1 { v20.h }[4], [x22], #0x2\n"
- "st1 { v24.h }[4], [x21], #0x2\n"
+ "st1 { v8.h }[4], [x9], #0x2\n"
+ "st1 { v12.h }[4], [x23], #0x2\n"
+ "st1 { v16.h }[4], [x22], #0x2\n"
+ "st1 { v20.h }[4], [x21], #0x2\n"
+ "st1 { v24.h }[4], [x20], #0x2\n"
"tbz x10, #0, 133f\n"
- "st1 { v8.b }[10], [x11]\n"
- "st1 { v12.b }[10], [x24]\n"
- "st1 { v16.b }[10], [x23]\n"
- "st1 { v20.b }[10], [x22]\n"
- "st1 { v24.b }[10], [x21]\n"
+ "st1 { v8.b }[10], [x9]\n"
+ "st1 { v12.b }[10], [x23]\n"
+ "st1 { v16.b }[10], [x22]\n"
+ "st1 { v20.b }[10], [x21]\n"
+ "st1 { v24.b }[10], [x20]\n"
"b 133f\n"
"128:" // Height 5: Partial direct writeback: partial_1_8
"tbz x10, #0, 133f\n"
- "st1 { v8.b }[8], [x11]\n"
- "st1 { v12.b }[8], [x24]\n"
- "st1 { v16.b }[8], [x23]\n"
- "st1 { v20.b }[8], [x22]\n"
- "st1 { v24.b }[8], [x21]\n"
+ "st1 { v8.b }[8], [x9]\n"
+ "st1 { v12.b }[8], [x23]\n"
+ "st1 { v16.b }[8], [x22]\n"
+ "st1 { v20.b }[8], [x21]\n"
+ "st1 { v24.b }[8], [x20]\n"
"b 133f\n"
"129:" // Height 5: Partial direct writeback: partial_4_0
"tbz x10, #2, 131f\n"
- "str s8, [x11], #0x4\n"
- "str s12, [x24], #0x4\n"
- "str s16, [x23], #0x4\n"
- "str s20, [x22], #0x4\n"
- "str s24, [x21], #0x4\n"
+ "str s8, [x9], #0x4\n"
+ "str s12, [x23], #0x4\n"
+ "str s16, [x22], #0x4\n"
+ "str s20, [x21], #0x4\n"
+ "str s24, [x20], #0x4\n"
"tbz x10, #1, 130f\n"
- "st1 { v8.h }[2], [x11], #0x2\n"
- "st1 { v12.h }[2], [x24], #0x2\n"
- "st1 { v16.h }[2], [x23], #0x2\n"
- "st1 { v20.h }[2], [x22], #0x2\n"
- "st1 { v24.h }[2], [x21], #0x2\n"
+ "st1 { v8.h }[2], [x9], #0x2\n"
+ "st1 { v12.h }[2], [x23], #0x2\n"
+ "st1 { v16.h }[2], [x22], #0x2\n"
+ "st1 { v20.h }[2], [x21], #0x2\n"
+ "st1 { v24.h }[2], [x20], #0x2\n"
"tbz x10, #0, 133f\n"
- "st1 { v8.b }[6], [x11]\n"
- "st1 { v12.b }[6], [x24]\n"
- "st1 { v16.b }[6], [x23]\n"
- "st1 { v20.b }[6], [x22]\n"
- "st1 { v24.b }[6], [x21]\n"
+ "st1 { v8.b }[6], [x9]\n"
+ "st1 { v12.b }[6], [x23]\n"
+ "st1 { v16.b }[6], [x22]\n"
+ "st1 { v20.b }[6], [x21]\n"
+ "st1 { v24.b }[6], [x20]\n"
"b 133f\n"
"130:" // Height 5: Partial direct writeback: partial_1_4
"tbz x10, #0, 133f\n"
- "st1 { v8.b }[4], [x11]\n"
- "st1 { v12.b }[4], [x24]\n"
- "st1 { v16.b }[4], [x23]\n"
- "st1 { v20.b }[4], [x22]\n"
- "st1 { v24.b }[4], [x21]\n"
+ "st1 { v8.b }[4], [x9]\n"
+ "st1 { v12.b }[4], [x23]\n"
+ "st1 { v16.b }[4], [x22]\n"
+ "st1 { v20.b }[4], [x21]\n"
+ "st1 { v24.b }[4], [x20]\n"
"b 133f\n"
"131:" // Height 5: Partial direct writeback: partial_2_0
"tbz x10, #1, 132f\n"
- "str h8, [x11], #0x2\n"
- "str h12, [x24], #0x2\n"
- "str h16, [x23], #0x2\n"
- "str h20, [x22], #0x2\n"
- "str h24, [x21], #0x2\n"
+ "str h8, [x9], #0x2\n"
+ "str h12, [x23], #0x2\n"
+ "str h16, [x22], #0x2\n"
+ "str h20, [x21], #0x2\n"
+ "str h24, [x20], #0x2\n"
"tbz x10, #0, 133f\n"
- "st1 { v8.b }[2], [x11]\n"
- "st1 { v12.b }[2], [x24]\n"
- "st1 { v16.b }[2], [x23]\n"
- "st1 { v20.b }[2], [x22]\n"
- "st1 { v24.b }[2], [x21]\n"
+ "st1 { v8.b }[2], [x9]\n"
+ "st1 { v12.b }[2], [x23]\n"
+ "st1 { v16.b }[2], [x22]\n"
+ "st1 { v20.b }[2], [x21]\n"
+ "st1 { v24.b }[2], [x20]\n"
"b 133f\n"
"132:" // Height 5: Partial direct writeback: partial_1_0
- "str b8, [x11, #0x0]\n"
- "str b12, [x24, #0x0]\n"
- "str b16, [x23, #0x0]\n"
- "str b20, [x22, #0x0]\n"
- "str b24, [x21, #0x0]\n"
+ "str b8, [x9, #0x0]\n"
+ "str b12, [x23, #0x0]\n"
+ "str b16, [x22, #0x0]\n"
+ "str b20, [x21, #0x0]\n"
+ "str b24, [x20, #0x0]\n"
"133:" // Height 5: Partial direct writeback: Done
"b 135f\n"
"134:" // Height 5: Full writeback
- "str q8, [x11, #0x0]\n"
- "add x11, x11, #0x10\n"
- "str q12, [x24, #0x0]\n"
- "str q16, [x23, #0x0]\n"
- "str q20, [x22, #0x0]\n"
- "str q24, [x21, #0x0]\n"
+ "str q8, [x9, #0x0]\n"
+ "add x9, x9, #0x10\n"
+ "str q12, [x23, #0x0]\n"
+ "str q16, [x22, #0x0]\n"
+ "str q20, [x21, #0x0]\n"
+ "str q24, [x20, #0x0]\n"
"135:" // Height 5: Writeback done
"subs x10, x10, #0x10\n"
"bgt 110b\n"
"b 164f\n"
"136:" // Height 6
- "ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "mov x20, #0x6\n"
- "mov x14, %x[col_bias]\n"
"ldr x13, [%x[args_ptr], %[offsetof_multiplier_ptr]]\n"
+ "mov x11, %x[col_bias]\n"
"ldr x12, [%x[args_ptr], %[offsetof_shift_ptr]]\n"
+ "mov x9, %x[output_ptr]\n"
"ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
- "mov x11, %x[output_ptr]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
- "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x20, #0x6\n"
+ "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "madd %x[output_ptr], x19, x20, %x[output_ptr]\n"
"137:" // Height 6: Column loop
"movi v8.4s, #0x0\n"
"movi v9.4s, #0x0\n"
@@ -2694,297 +2694,297 @@ void a64_hybrid_s8qs_dot_6x16 (
"movi v30.4s, #0x0\n"
"movi v31.4s, #0x0\n"
"138:" // Height 6: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"139:" // Height 6: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 140f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "ldr x22, [x21, #0x20]\n"
- "ldr x21, [x21, #0x28]\n"
- "cbnz x28, 141f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20\n"
- "add x25, x25, x20\n"
- "add x24, x24, x20\n"
- "add x23, x23, x20\n"
- "add x22, x22, x20\n"
- "add x21, x21, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "ldr x21, [x20, #0x20]\n"
+ "ldr x20, [x20, #0x28]\n"
+ "cbnz x27, 141f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
+ "add x24, x24, x19\n"
+ "add x23, x23, x19\n"
+ "add x22, x22, x19\n"
+ "add x21, x21, x19\n"
+ "add x20, x20, x19\n"
"b 141f\n"
"140:" // Height 6: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20\n"
- "add x24, x25, x20\n"
- "add x23, x24, x20\n"
- "add x22, x23, x20\n"
- "add x21, x22, x20\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19\n"
+ "add x23, x24, x19\n"
+ "add x22, x23, x19\n"
+ "add x21, x22, x19\n"
+ "add x20, x21, x19\n"
"141:" // Height 6: input setup done
- "cmp x27, #0x10\n"
+ "cmp x26, #0x10\n"
"blt 144f\n"
- "ldr q0, [x26, #0x0]\n"
- "ldr q1, [x25, #0x0]\n"
- "cmp x27, #0x20\n"
- "ldr q2, [x24, #0x0]\n"
- "ldr q3, [x23, #0x0]\n"
- "ldr q4, [x22, #0x0]\n"
- "ldr q5, [x21, #0x0]\n"
- "ldr q6, [x9, #0x0]\n"
- "ldr q7, [x9, #0x10]\n"
+ "ldr q0, [x25, #0x0]\n"
+ "ldr q1, [x24, #0x0]\n"
+ "cmp x26, #0x20\n"
+ "ldr q2, [x23, #0x0]\n"
+ "ldr q3, [x22, #0x0]\n"
+ "ldr q4, [x21, #0x0]\n"
+ "ldr q5, [x20, #0x0]\n"
+ "ldr q6, [x28, #0x0]\n"
"blt 143f\n"
"142:" // Height 6: Multiply loop: Main loop head
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
+ "ldr q7, [x28, #0x10]\n"
+ "add x25, x25, #0x10\n"
".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n"
- "sub x27, x27, #0x10\n"
- "add x26, x26, #0x10\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "add x24, x24, #0x10\n"
".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "add x23, x23, #0x10\n"
".inst 0x4f83e0d4 // sdot v20.4s, v6.16b, v3.4b[0]\n"
- "add x25, x25, #0x10\n"
- "add x24, x24, #0x10\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
+ "add x22, x22, #0x10\n"
".inst 0x4f84e0d8 // sdot v24.4s, v6.16b, v4.4b[0]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
+ "add x21, x21, #0x10\n"
".inst 0x4f85e0dc // sdot v28.4s, v6.16b, v5.4b[0]\n"
- "ldr q6, [x9, #0x20]\n"
- "add x23, x23, #0x10\n"
+ "ldr q6, [x28, #0x20]\n"
+ "add x20, x20, #0x10\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
+ "prfm pldl1keep, [x21, #0x80]\n"
+ "sub x26, x26, #0x10\n"
".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n"
- "add x22, x22, #0x10\n"
- "add x21, x21, #0x10\n"
+ "prfm pldl1keep, [x20, #0x80]\n"
+ "cmp x26, #0x20\n"
".inst 0x4f82e0f1 // sdot v17.4s, v7.16b, v2.4b[0]\n"
".inst 0x4f83e0f5 // sdot v21.4s, v7.16b, v3.4b[0]\n"
- "cmp x27, #0x20\n"
- "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x4f84e0f9 // sdot v25.4s, v7.16b, v4.4b[0]\n"
".inst 0x4f85e0fd // sdot v29.4s, v7.16b, v5.4b[0]\n"
- "ldr q7, [x9, #0x30]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
+ "ldr q7, [x28, #0x30]\n"
".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n"
".inst 0x4f81e0ce // sdot v14.4s, v6.16b, v1.4b[0]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x4f82e0d2 // sdot v18.4s, v6.16b, v2.4b[0]\n"
".inst 0x4f83e0d6 // sdot v22.4s, v6.16b, v3.4b[0]\n"
- "prfm pldl1keep, [x22, #0x80]\n"
- "prfm pldl1keep, [x21, #0x80]\n"
".inst 0x4f84e0da // sdot v26.4s, v6.16b, v4.4b[0]\n"
".inst 0x4f85e0de // sdot v30.4s, v6.16b, v5.4b[0]\n"
- "ldr q6, [x9, #0x40]\n"
+ "ldr q6, [x28, #0x40]\n"
".inst 0x4f80e0eb // sdot v11.4s, v7.16b, v0.4b[0]\n"
".inst 0x4f81e0ef // sdot v15.4s, v7.16b, v1.4b[0]\n"
".inst 0x4f82e0f3 // sdot v19.4s, v7.16b, v2.4b[0]\n"
".inst 0x4f83e0f7 // sdot v23.4s, v7.16b, v3.4b[0]\n"
".inst 0x4f84e0fb // sdot v27.4s, v7.16b, v4.4b[0]\n"
".inst 0x4f85e0ff // sdot v31.4s, v7.16b, v5.4b[0]\n"
- "ldr q7, [x9, #0x50]\n"
+ "ldr q7, [x28, #0x50]\n"
".inst 0x4fa0e0c8 // sdot v8.4s, v6.16b, v0.4b[1]\n"
".inst 0x4fa1e0cc // sdot v12.4s, v6.16b, v1.4b[1]\n"
".inst 0x4fa2e0d0 // sdot v16.4s, v6.16b, v2.4b[1]\n"
".inst 0x4fa3e0d4 // sdot v20.4s, v6.16b, v3.4b[1]\n"
".inst 0x4fa4e0d8 // sdot v24.4s, v6.16b, v4.4b[1]\n"
".inst 0x4fa5e0dc // sdot v28.4s, v6.16b, v5.4b[1]\n"
- "ldr q6, [x9, #0x60]\n"
+ "ldr q6, [x28, #0x60]\n"
".inst 0x4fa0e0e9 // sdot v9.4s, v7.16b, v0.4b[1]\n"
".inst 0x4fa1e0ed // sdot v13.4s, v7.16b, v1.4b[1]\n"
".inst 0x4fa2e0f1 // sdot v17.4s, v7.16b, v2.4b[1]\n"
".inst 0x4fa3e0f5 // sdot v21.4s, v7.16b, v3.4b[1]\n"
".inst 0x4fa4e0f9 // sdot v25.4s, v7.16b, v4.4b[1]\n"
".inst 0x4fa5e0fd // sdot v29.4s, v7.16b, v5.4b[1]\n"
- "ldr q7, [x9, #0x70]\n"
+ "ldr q7, [x28, #0x70]\n"
".inst 0x4fa0e0ca // sdot v10.4s, v6.16b, v0.4b[1]\n"
".inst 0x4fa1e0ce // sdot v14.4s, v6.16b, v1.4b[1]\n"
".inst 0x4fa2e0d2 // sdot v18.4s, v6.16b, v2.4b[1]\n"
".inst 0x4fa3e0d6 // sdot v22.4s, v6.16b, v3.4b[1]\n"
".inst 0x4fa4e0da // sdot v26.4s, v6.16b, v4.4b[1]\n"
".inst 0x4fa5e0de // sdot v30.4s, v6.16b, v5.4b[1]\n"
- "ldr q6, [x9, #0x80]\n"
+ "ldr q6, [x28, #0x80]\n"
".inst 0x4fa0e0eb // sdot v11.4s, v7.16b, v0.4b[1]\n"
".inst 0x4fa1e0ef // sdot v15.4s, v7.16b, v1.4b[1]\n"
".inst 0x4fa2e0f3 // sdot v19.4s, v7.16b, v2.4b[1]\n"
".inst 0x4fa3e0f7 // sdot v23.4s, v7.16b, v3.4b[1]\n"
".inst 0x4fa4e0fb // sdot v27.4s, v7.16b, v4.4b[1]\n"
".inst 0x4fa5e0ff // sdot v31.4s, v7.16b, v5.4b[1]\n"
- "ldr q7, [x9, #0x90]\n"
+ "ldr q7, [x28, #0x90]\n"
".inst 0x4f80e8c8 // sdot v8.4s, v6.16b, v0.4b[2]\n"
".inst 0x4f81e8cc // sdot v12.4s, v6.16b, v1.4b[2]\n"
".inst 0x4f82e8d0 // sdot v16.4s, v6.16b, v2.4b[2]\n"
".inst 0x4f83e8d4 // sdot v20.4s, v6.16b, v3.4b[2]\n"
".inst 0x4f84e8d8 // sdot v24.4s, v6.16b, v4.4b[2]\n"
".inst 0x4f85e8dc // sdot v28.4s, v6.16b, v5.4b[2]\n"
- "ldr q6, [x9, #0xa0]\n"
+ "ldr q6, [x28, #0xa0]\n"
".inst 0x4f80e8e9 // sdot v9.4s, v7.16b, v0.4b[2]\n"
".inst 0x4f81e8ed // sdot v13.4s, v7.16b, v1.4b[2]\n"
".inst 0x4f82e8f1 // sdot v17.4s, v7.16b, v2.4b[2]\n"
".inst 0x4f83e8f5 // sdot v21.4s, v7.16b, v3.4b[2]\n"
".inst 0x4f84e8f9 // sdot v25.4s, v7.16b, v4.4b[2]\n"
".inst 0x4f85e8fd // sdot v29.4s, v7.16b, v5.4b[2]\n"
- "ldr q7, [x9, #0xb0]\n"
+ "ldr q7, [x28, #0xb0]\n"
".inst 0x4f80e8ca // sdot v10.4s, v6.16b, v0.4b[2]\n"
".inst 0x4f81e8ce // sdot v14.4s, v6.16b, v1.4b[2]\n"
".inst 0x4f82e8d2 // sdot v18.4s, v6.16b, v2.4b[2]\n"
".inst 0x4f83e8d6 // sdot v22.4s, v6.16b, v3.4b[2]\n"
".inst 0x4f84e8da // sdot v26.4s, v6.16b, v4.4b[2]\n"
".inst 0x4f85e8de // sdot v30.4s, v6.16b, v5.4b[2]\n"
- "ldr q6, [x9, #0xc0]\n"
+ "ldr q6, [x28, #0xc0]\n"
".inst 0x4f80e8eb // sdot v11.4s, v7.16b, v0.4b[2]\n"
".inst 0x4f81e8ef // sdot v15.4s, v7.16b, v1.4b[2]\n"
".inst 0x4f82e8f3 // sdot v19.4s, v7.16b, v2.4b[2]\n"
".inst 0x4f83e8f7 // sdot v23.4s, v7.16b, v3.4b[2]\n"
".inst 0x4f84e8fb // sdot v27.4s, v7.16b, v4.4b[2]\n"
".inst 0x4f85e8ff // sdot v31.4s, v7.16b, v5.4b[2]\n"
- "ldr q7, [x9, #0xd0]\n"
+ "ldr q7, [x28, #0xd0]\n"
".inst 0x4fa0e8c8 // sdot v8.4s, v6.16b, v0.4b[3]\n"
".inst 0x4fa1e8cc // sdot v12.4s, v6.16b, v1.4b[3]\n"
".inst 0x4fa2e8d0 // sdot v16.4s, v6.16b, v2.4b[3]\n"
".inst 0x4fa3e8d4 // sdot v20.4s, v6.16b, v3.4b[3]\n"
".inst 0x4fa4e8d8 // sdot v24.4s, v6.16b, v4.4b[3]\n"
".inst 0x4fa5e8dc // sdot v28.4s, v6.16b, v5.4b[3]\n"
- "ldr q6, [x9, #0xe0]\n"
+ "ldr q6, [x28, #0xe0]\n"
".inst 0x4fa0e8e9 // sdot v9.4s, v7.16b, v0.4b[3]\n"
".inst 0x4fa1e8ed // sdot v13.4s, v7.16b, v1.4b[3]\n"
".inst 0x4fa2e8f1 // sdot v17.4s, v7.16b, v2.4b[3]\n"
".inst 0x4fa3e8f5 // sdot v21.4s, v7.16b, v3.4b[3]\n"
".inst 0x4fa4e8f9 // sdot v25.4s, v7.16b, v4.4b[3]\n"
".inst 0x4fa5e8fd // sdot v29.4s, v7.16b, v5.4b[3]\n"
- "ldr q7, [x9, #0xf0]\n"
- "add x9, x9, #0x100\n"
+ "ldr q7, [x28, #0xf0]\n"
+ "add x28, x28, #0x100\n"
".inst 0x4fa0e8ca // sdot v10.4s, v6.16b, v0.4b[3]\n"
".inst 0x4fa1e8ce // sdot v14.4s, v6.16b, v1.4b[3]\n"
".inst 0x4fa2e8d2 // sdot v18.4s, v6.16b, v2.4b[3]\n"
".inst 0x4fa3e8d6 // sdot v22.4s, v6.16b, v3.4b[3]\n"
".inst 0x4fa4e8da // sdot v26.4s, v6.16b, v4.4b[3]\n"
".inst 0x4fa5e8de // sdot v30.4s, v6.16b, v5.4b[3]\n"
- "ldr q6, [x9, #0x0]\n"
+ "ldr q6, [x28, #0x0]\n"
".inst 0x4fa0e8eb // sdot v11.4s, v7.16b, v0.4b[3]\n"
- "ldr q0, [x26, #0x0]\n"
+ "ldr q0, [x25, #0x0]\n"
".inst 0x4fa1e8ef // sdot v15.4s, v7.16b, v1.4b[3]\n"
- "ldr q1, [x25, #0x0]\n"
+ "ldr q1, [x24, #0x0]\n"
".inst 0x4fa2e8f3 // sdot v19.4s, v7.16b, v2.4b[3]\n"
- "ldr q2, [x24, #0x0]\n"
+ "ldr q2, [x23, #0x0]\n"
".inst 0x4fa3e8f7 // sdot v23.4s, v7.16b, v3.4b[3]\n"
- "ldr q3, [x23, #0x0]\n"
+ "ldr q3, [x22, #0x0]\n"
".inst 0x4fa4e8fb // sdot v27.4s, v7.16b, v4.4b[3]\n"
- "ldr q4, [x22, #0x0]\n"
+ "ldr q4, [x21, #0x0]\n"
".inst 0x4fa5e8ff // sdot v31.4s, v7.16b, v5.4b[3]\n"
- "ldr q5, [x21, #0x0]\n"
- "ldr q7, [x9, #0x10]\n"
+ "ldr q5, [x20, #0x0]\n"
"bge 142b\n"
"143:" // Height 6: Multiply loop: Single iteration only
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
+ "ldr q7, [x28, #0x10]\n"
+ "sub x26, x26, #0x10\n"
".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n"
- "add x26, x26, #0x10\n"
"add x25, x25, #0x10\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n"
- ".inst 0x4f83e0d4 // sdot v20.4s, v6.16b, v3.4b[0]\n"
"add x24, x24, #0x10\n"
+ ".inst 0x4f83e0d4 // sdot v20.4s, v6.16b, v3.4b[0]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
"add x23, x23, #0x10\n"
".inst 0x4f84e0d8 // sdot v24.4s, v6.16b, v4.4b[0]\n"
- ".inst 0x4f85e0dc // sdot v28.4s, v6.16b, v5.4b[0]\n"
- "ldr q6, [x9, #0x20]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
"add x22, x22, #0x10\n"
+ ".inst 0x4f85e0dc // sdot v28.4s, v6.16b, v5.4b[0]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
+ "add x21, x21, #0x10\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
+ "ldr q6, [x28, #0x20]\n"
+ "add x20, x20, #0x10\n"
".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n"
- "add x21, x21, #0x10\n"
- "sub x27, x27, #0x10\n"
+ "prfm pldl1keep, [x21, #0x80]\n"
".inst 0x4f82e0f1 // sdot v17.4s, v7.16b, v2.4b[0]\n"
+ "prfm pldl1keep, [x20, #0x80]\n"
".inst 0x4f83e0f5 // sdot v21.4s, v7.16b, v3.4b[0]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x4f84e0f9 // sdot v25.4s, v7.16b, v4.4b[0]\n"
".inst 0x4f85e0fd // sdot v29.4s, v7.16b, v5.4b[0]\n"
- "ldr q7, [x9, #0x30]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
+ "ldr q7, [x28, #0x30]\n"
".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n"
".inst 0x4f81e0ce // sdot v14.4s, v6.16b, v1.4b[0]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
- "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x4f82e0d2 // sdot v18.4s, v6.16b, v2.4b[0]\n"
".inst 0x4f83e0d6 // sdot v22.4s, v6.16b, v3.4b[0]\n"
- "prfm pldl1keep, [x21, #0x80]\n"
".inst 0x4f84e0da // sdot v26.4s, v6.16b, v4.4b[0]\n"
".inst 0x4f85e0de // sdot v30.4s, v6.16b, v5.4b[0]\n"
- "ldr q6, [x9, #0x40]\n"
+ "ldr q6, [x28, #0x40]\n"
".inst 0x4f80e0eb // sdot v11.4s, v7.16b, v0.4b[0]\n"
".inst 0x4f81e0ef // sdot v15.4s, v7.16b, v1.4b[0]\n"
".inst 0x4f82e0f3 // sdot v19.4s, v7.16b, v2.4b[0]\n"
".inst 0x4f83e0f7 // sdot v23.4s, v7.16b, v3.4b[0]\n"
".inst 0x4f84e0fb // sdot v27.4s, v7.16b, v4.4b[0]\n"
".inst 0x4f85e0ff // sdot v31.4s, v7.16b, v5.4b[0]\n"
- "ldr q7, [x9, #0x50]\n"
+ "ldr q7, [x28, #0x50]\n"
".inst 0x4fa0e0c8 // sdot v8.4s, v6.16b, v0.4b[1]\n"
".inst 0x4fa1e0cc // sdot v12.4s, v6.16b, v1.4b[1]\n"
".inst 0x4fa2e0d0 // sdot v16.4s, v6.16b, v2.4b[1]\n"
".inst 0x4fa3e0d4 // sdot v20.4s, v6.16b, v3.4b[1]\n"
".inst 0x4fa4e0d8 // sdot v24.4s, v6.16b, v4.4b[1]\n"
".inst 0x4fa5e0dc // sdot v28.4s, v6.16b, v5.4b[1]\n"
- "ldr q6, [x9, #0x60]\n"
+ "ldr q6, [x28, #0x60]\n"
".inst 0x4fa0e0e9 // sdot v9.4s, v7.16b, v0.4b[1]\n"
".inst 0x4fa1e0ed // sdot v13.4s, v7.16b, v1.4b[1]\n"
".inst 0x4fa2e0f1 // sdot v17.4s, v7.16b, v2.4b[1]\n"
".inst 0x4fa3e0f5 // sdot v21.4s, v7.16b, v3.4b[1]\n"
".inst 0x4fa4e0f9 // sdot v25.4s, v7.16b, v4.4b[1]\n"
".inst 0x4fa5e0fd // sdot v29.4s, v7.16b, v5.4b[1]\n"
- "ldr q7, [x9, #0x70]\n"
+ "ldr q7, [x28, #0x70]\n"
".inst 0x4fa0e0ca // sdot v10.4s, v6.16b, v0.4b[1]\n"
".inst 0x4fa1e0ce // sdot v14.4s, v6.16b, v1.4b[1]\n"
".inst 0x4fa2e0d2 // sdot v18.4s, v6.16b, v2.4b[1]\n"
".inst 0x4fa3e0d6 // sdot v22.4s, v6.16b, v3.4b[1]\n"
".inst 0x4fa4e0da // sdot v26.4s, v6.16b, v4.4b[1]\n"
".inst 0x4fa5e0de // sdot v30.4s, v6.16b, v5.4b[1]\n"
- "ldr q6, [x9, #0x80]\n"
+ "ldr q6, [x28, #0x80]\n"
".inst 0x4fa0e0eb // sdot v11.4s, v7.16b, v0.4b[1]\n"
".inst 0x4fa1e0ef // sdot v15.4s, v7.16b, v1.4b[1]\n"
".inst 0x4fa2e0f3 // sdot v19.4s, v7.16b, v2.4b[1]\n"
".inst 0x4fa3e0f7 // sdot v23.4s, v7.16b, v3.4b[1]\n"
".inst 0x4fa4e0fb // sdot v27.4s, v7.16b, v4.4b[1]\n"
".inst 0x4fa5e0ff // sdot v31.4s, v7.16b, v5.4b[1]\n"
- "ldr q7, [x9, #0x90]\n"
+ "ldr q7, [x28, #0x90]\n"
".inst 0x4f80e8c8 // sdot v8.4s, v6.16b, v0.4b[2]\n"
".inst 0x4f81e8cc // sdot v12.4s, v6.16b, v1.4b[2]\n"
".inst 0x4f82e8d0 // sdot v16.4s, v6.16b, v2.4b[2]\n"
".inst 0x4f83e8d4 // sdot v20.4s, v6.16b, v3.4b[2]\n"
".inst 0x4f84e8d8 // sdot v24.4s, v6.16b, v4.4b[2]\n"
".inst 0x4f85e8dc // sdot v28.4s, v6.16b, v5.4b[2]\n"
- "ldr q6, [x9, #0xa0]\n"
+ "ldr q6, [x28, #0xa0]\n"
".inst 0x4f80e8e9 // sdot v9.4s, v7.16b, v0.4b[2]\n"
".inst 0x4f81e8ed // sdot v13.4s, v7.16b, v1.4b[2]\n"
".inst 0x4f82e8f1 // sdot v17.4s, v7.16b, v2.4b[2]\n"
".inst 0x4f83e8f5 // sdot v21.4s, v7.16b, v3.4b[2]\n"
".inst 0x4f84e8f9 // sdot v25.4s, v7.16b, v4.4b[2]\n"
".inst 0x4f85e8fd // sdot v29.4s, v7.16b, v5.4b[2]\n"
- "ldr q7, [x9, #0xb0]\n"
+ "ldr q7, [x28, #0xb0]\n"
".inst 0x4f80e8ca // sdot v10.4s, v6.16b, v0.4b[2]\n"
".inst 0x4f81e8ce // sdot v14.4s, v6.16b, v1.4b[2]\n"
".inst 0x4f82e8d2 // sdot v18.4s, v6.16b, v2.4b[2]\n"
".inst 0x4f83e8d6 // sdot v22.4s, v6.16b, v3.4b[2]\n"
".inst 0x4f84e8da // sdot v26.4s, v6.16b, v4.4b[2]\n"
".inst 0x4f85e8de // sdot v30.4s, v6.16b, v5.4b[2]\n"
- "ldr q6, [x9, #0xc0]\n"
+ "ldr q6, [x28, #0xc0]\n"
".inst 0x4f80e8eb // sdot v11.4s, v7.16b, v0.4b[2]\n"
".inst 0x4f81e8ef // sdot v15.4s, v7.16b, v1.4b[2]\n"
".inst 0x4f82e8f3 // sdot v19.4s, v7.16b, v2.4b[2]\n"
".inst 0x4f83e8f7 // sdot v23.4s, v7.16b, v3.4b[2]\n"
".inst 0x4f84e8fb // sdot v27.4s, v7.16b, v4.4b[2]\n"
".inst 0x4f85e8ff // sdot v31.4s, v7.16b, v5.4b[2]\n"
- "ldr q7, [x9, #0xd0]\n"
+ "ldr q7, [x28, #0xd0]\n"
".inst 0x4fa0e8c8 // sdot v8.4s, v6.16b, v0.4b[3]\n"
".inst 0x4fa1e8cc // sdot v12.4s, v6.16b, v1.4b[3]\n"
".inst 0x4fa2e8d0 // sdot v16.4s, v6.16b, v2.4b[3]\n"
".inst 0x4fa3e8d4 // sdot v20.4s, v6.16b, v3.4b[3]\n"
".inst 0x4fa4e8d8 // sdot v24.4s, v6.16b, v4.4b[3]\n"
".inst 0x4fa5e8dc // sdot v28.4s, v6.16b, v5.4b[3]\n"
- "ldr q6, [x9, #0xe0]\n"
+ "ldr q6, [x28, #0xe0]\n"
".inst 0x4fa0e8e9 // sdot v9.4s, v7.16b, v0.4b[3]\n"
".inst 0x4fa1e8ed // sdot v13.4s, v7.16b, v1.4b[3]\n"
".inst 0x4fa2e8f1 // sdot v17.4s, v7.16b, v2.4b[3]\n"
".inst 0x4fa3e8f5 // sdot v21.4s, v7.16b, v3.4b[3]\n"
".inst 0x4fa4e8f9 // sdot v25.4s, v7.16b, v4.4b[3]\n"
".inst 0x4fa5e8fd // sdot v29.4s, v7.16b, v5.4b[3]\n"
- "ldr q7, [x9, #0xf0]\n"
- "add x9, x9, #0x100\n"
+ "ldr q7, [x28, #0xf0]\n"
+ "add x28, x28, #0x100\n"
".inst 0x4fa0e8ca // sdot v10.4s, v6.16b, v0.4b[3]\n"
".inst 0x4fa1e8ce // sdot v14.4s, v6.16b, v1.4b[3]\n"
".inst 0x4fa2e8d2 // sdot v18.4s, v6.16b, v2.4b[3]\n"
@@ -2998,35 +2998,35 @@ void a64_hybrid_s8qs_dot_6x16 (
".inst 0x4fa4e8fb // sdot v27.4s, v7.16b, v4.4b[3]\n"
".inst 0x4fa5e8ff // sdot v31.4s, v7.16b, v5.4b[3]\n"
"144:" // Height 6: Multiply loop: Main loop skip
- "cbz x27, 149f\n"
- "cmp x27, #0x4\n"
+ "cbz x26, 149f\n"
+ "cmp x26, #0x4\n"
"blt 146f\n"
"145:" // Height 6: Multiply loop: Odd block loop
- "ldr s0, [x26], #0x4\n"
- "ldr s1, [x25], #0x4\n"
- "sub x27, x27, #0x4\n"
- "cmp x27, #0x4\n"
- "ldr s2, [x24], #0x4\n"
- "ldr s3, [x23], #0x4\n"
- "ldr s4, [x22], #0x4\n"
- "ldr s5, [x21], #0x4\n"
- "ldr q6, [x9, #0x0]\n"
- "ldr q7, [x9, #0x10]\n"
+ "ldr s0, [x25], #0x4\n"
+ "sub x26, x26, #0x4\n"
+ "ldr s1, [x24], #0x4\n"
+ "cmp x26, #0x4\n"
+ "ldr s2, [x23], #0x4\n"
+ "ldr s3, [x22], #0x4\n"
+ "ldr s4, [x21], #0x4\n"
+ "ldr s5, [x20], #0x4\n"
+ "ldr q6, [x28, #0x0]\n"
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
+ "ldr q7, [x28, #0x10]\n"
".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n"
".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n"
".inst 0x4f83e0d4 // sdot v20.4s, v6.16b, v3.4b[0]\n"
".inst 0x4f84e0d8 // sdot v24.4s, v6.16b, v4.4b[0]\n"
".inst 0x4f85e0dc // sdot v28.4s, v6.16b, v5.4b[0]\n"
- "ldr q6, [x9, #0x20]\n"
+ "ldr q6, [x28, #0x20]\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n"
".inst 0x4f82e0f1 // sdot v17.4s, v7.16b, v2.4b[0]\n"
".inst 0x4f83e0f5 // sdot v21.4s, v7.16b, v3.4b[0]\n"
".inst 0x4f84e0f9 // sdot v25.4s, v7.16b, v4.4b[0]\n"
".inst 0x4f85e0fd // sdot v29.4s, v7.16b, v5.4b[0]\n"
- "ldr q7, [x9, #0x30]\n"
- "add x9, x9, #0x40\n"
+ "ldr q7, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n"
".inst 0x4f81e0ce // sdot v14.4s, v6.16b, v1.4b[0]\n"
".inst 0x4f82e0d2 // sdot v18.4s, v6.16b, v2.4b[0]\n"
@@ -3040,48 +3040,48 @@ void a64_hybrid_s8qs_dot_6x16 (
".inst 0x4f84e0fb // sdot v27.4s, v7.16b, v4.4b[0]\n"
".inst 0x4f85e0ff // sdot v31.4s, v7.16b, v5.4b[0]\n"
"bge 145b\n"
+ "cbz x26, 149f\n"
"146:" // Height 6: Multiply loop: Skip odd blocks
- "cbz x27, 149f\n"
- "tbz x27, #1, 147f\n"
- "ldr h0, [x26], #0x2\n"
- "ldr h1, [x25], #0x2\n"
- "ldr h2, [x24], #0x2\n"
- "ldr h3, [x23], #0x2\n"
- "ldr h4, [x22], #0x2\n"
- "ldr h5, [x21], #0x2\n"
- "tbz x27, #0, 148f\n"
- "ld1 { v0.b }[2], [x26]\n"
- "ld1 { v1.b }[2], [x25]\n"
- "ld1 { v2.b }[2], [x24]\n"
- "ld1 { v3.b }[2], [x23]\n"
- "ld1 { v4.b }[2], [x22]\n"
- "ld1 { v5.b }[2], [x21]\n"
+ "tbz x26, #1, 147f\n"
+ "ldr h0, [x25], #0x2\n"
+ "ldr h1, [x24], #0x2\n"
+ "ldr h2, [x23], #0x2\n"
+ "ldr h3, [x22], #0x2\n"
+ "ldr h4, [x21], #0x2\n"
+ "ldr h5, [x20], #0x2\n"
+ "tbz x26, #0, 148f\n"
+ "ld1 { v0.b }[2], [x25]\n"
+ "ld1 { v1.b }[2], [x24]\n"
+ "ld1 { v2.b }[2], [x23]\n"
+ "ld1 { v3.b }[2], [x22]\n"
+ "ld1 { v4.b }[2], [x21]\n"
+ "ld1 { v5.b }[2], [x20]\n"
"b 148f\n"
"147:" // Height 6: Multiply loop: Ragged operand read: partial_1_0
- "ldr b0, [x26, #0x0]\n"
- "ldr b1, [x25, #0x0]\n"
- "ldr b2, [x24, #0x0]\n"
- "ldr b3, [x23, #0x0]\n"
- "ldr b4, [x22, #0x0]\n"
- "ldr b5, [x21, #0x0]\n"
+ "ldr b0, [x25, #0x0]\n"
+ "ldr b1, [x24, #0x0]\n"
+ "ldr b2, [x23, #0x0]\n"
+ "ldr b3, [x22, #0x0]\n"
+ "ldr b4, [x21, #0x0]\n"
+ "ldr b5, [x20, #0x0]\n"
"148:" // Height 6: Multiply loop: Ragged operand read: Done
- "ldr q6, [x9, #0x0]\n"
- "ldr q7, [x9, #0x10]\n"
+ "ldr q6, [x28, #0x0]\n"
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
+ "ldr q7, [x28, #0x10]\n"
".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n"
".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n"
".inst 0x4f83e0d4 // sdot v20.4s, v6.16b, v3.4b[0]\n"
".inst 0x4f84e0d8 // sdot v24.4s, v6.16b, v4.4b[0]\n"
".inst 0x4f85e0dc // sdot v28.4s, v6.16b, v5.4b[0]\n"
- "ldr q6, [x9, #0x20]\n"
+ "ldr q6, [x28, #0x20]\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n"
".inst 0x4f82e0f1 // sdot v17.4s, v7.16b, v2.4b[0]\n"
".inst 0x4f83e0f5 // sdot v21.4s, v7.16b, v3.4b[0]\n"
".inst 0x4f84e0f9 // sdot v25.4s, v7.16b, v4.4b[0]\n"
".inst 0x4f85e0fd // sdot v29.4s, v7.16b, v5.4b[0]\n"
- "ldr q7, [x9, #0x30]\n"
- "add x9, x9, #0x40\n"
+ "ldr q7, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n"
".inst 0x4f81e0ce // sdot v14.4s, v6.16b, v1.4b[0]\n"
".inst 0x4f82e0d2 // sdot v18.4s, v6.16b, v2.4b[0]\n"
@@ -3095,48 +3095,48 @@ void a64_hybrid_s8qs_dot_6x16 (
".inst 0x4f84e0fb // sdot v27.4s, v7.16b, v4.4b[0]\n"
".inst 0x4f85e0ff // sdot v31.4s, v7.16b, v5.4b[0]\n"
"149:" // Height 6: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 139b\n"
- "ldr q0, [x14, #0x0]\n"
- "ldr q1, [x14, #0x10]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "prfm pstl1keep, [x9, #0x0]\n"
+ "add x23, x9, x19\n"
+ "ldr q0, [x11, #0x0]\n"
"add v8.4s, v8.4s, v0.4s\n"
- "add v9.4s, v9.4s, v1.4s\n"
- "ldr q2, [x14, #0x20]\n"
- "ldr q3, [x14, #0x30]\n"
- "add v10.4s, v10.4s, v2.4s\n"
- "add v11.4s, v11.4s, v3.4s\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x11, x20\n"
- "add x23, x24, x20\n"
- "prfm pstl1keep, [x11, #0x0]\n"
- "add x22, x23, x20\n"
- "add x21, x22, x20\n"
- "prfm pstl1keep, [x24, #0x0]\n"
"prfm pstl1keep, [x23, #0x0]\n"
- "add x20, x21, x20\n"
+ "add x22, x23, x19\n"
+ "add v12.4s, v12.4s, v0.4s\n"
"prfm pstl1keep, [x22, #0x0]\n"
+ "add x21, x22, x19\n"
+ "add v16.4s, v16.4s, v0.4s\n"
"prfm pstl1keep, [x21, #0x0]\n"
- "add v12.4s, v12.4s, v0.4s\n"
+ "add x20, x21, x19\n"
+ "add v20.4s, v20.4s, v0.4s\n"
"prfm pstl1keep, [x20, #0x0]\n"
+ "add x19, x20, x19\n"
+ "add v24.4s, v24.4s, v0.4s\n"
+ "prfm pstl1keep, [x19, #0x0]\n"
+ "add v28.4s, v28.4s, v0.4s\n"
+ "ldr q1, [x11, #0x10]\n"
+ "ldr q2, [x11, #0x20]\n"
+ "add v9.4s, v9.4s, v1.4s\n"
+ "ldr q3, [x11, #0x30]\n"
+ "add x11, x11, #0x40\n"
+ "add v10.4s, v10.4s, v2.4s\n"
"add v13.4s, v13.4s, v1.4s\n"
"add v14.4s, v14.4s, v2.4s\n"
- "add x14, x14, #0x40\n"
- "add v15.4s, v15.4s, v3.4s\n"
- "add v16.4s, v16.4s, v0.4s\n"
"add v17.4s, v17.4s, v1.4s\n"
+ "add v11.4s, v11.4s, v3.4s\n"
+ "add v15.4s, v15.4s, v3.4s\n"
"add v18.4s, v18.4s, v2.4s\n"
"add v19.4s, v19.4s, v3.4s\n"
- "add v20.4s, v20.4s, v0.4s\n"
"add v21.4s, v21.4s, v1.4s\n"
"add v22.4s, v22.4s, v2.4s\n"
"add v23.4s, v23.4s, v3.4s\n"
- "add v24.4s, v24.4s, v0.4s\n"
"add v25.4s, v25.4s, v1.4s\n"
"add v26.4s, v26.4s, v2.4s\n"
"add v27.4s, v27.4s, v3.4s\n"
- "add v28.4s, v28.4s, v0.4s\n"
"add v29.4s, v29.4s, v1.4s\n"
"add v30.4s, v30.4s, v2.4s\n"
"add v31.4s, v31.4s, v3.4s\n"
@@ -3148,20 +3148,20 @@ void a64_hybrid_s8qs_dot_6x16 (
"ldr q2, [x12, #0x20]\n"
"ldr q6, [x13, #0x20]\n"
"ldr q3, [x12, #0x30]\n"
- "ldr q7, [x13, #0x30]\n"
"add x12, x12, #0x40\n"
+ "ldr q7, [x13, #0x30]\n"
"add x13, x13, #0x40\n"
"b 151f\n"
"150:" // Height 6: per layer parameters
- "add x25, %x[qp], %[per_layer_right_shift]\n"
- "ld1r { v0.4s }, [x25]\n"
- "add x25, %x[qp], %[per_layer_mul]\n"
- "ld1r { v4.4s }, [x25]\n"
+ "add x24, %x[qp], %[per_layer_right_shift]\n"
+ "ld1r { v0.4s }, [x24]\n"
"mov v1.16b, v0.16b\n"
- "mov v5.16b, v4.16b\n"
+ "add x24, %x[qp], %[per_layer_mul]\n"
+ "ld1r { v4.4s }, [x24]\n"
"mov v2.16b, v0.16b\n"
- "mov v6.16b, v4.16b\n"
"mov v3.16b, v0.16b\n"
+ "mov v5.16b, v4.16b\n"
+ "mov v6.16b, v4.16b\n"
"mov v7.16b, v4.16b\n"
"151:" // Height 6: parameters loaded
"sqrdmulh v8.4s, v8.4s, v4.4s\n"
@@ -3192,183 +3192,183 @@ void a64_hybrid_s8qs_dot_6x16 (
"and v4.16b, v8.16b, v0.16b\n"
"and v5.16b, v9.16b, v1.16b\n"
"and v6.16b, v10.16b, v2.16b\n"
- "and v7.16b, v11.16b, v3.16b\n"
"sshr v4.4s, v4.4s, #0x1f\n"
"sshr v5.4s, v5.4s, #0x1f\n"
"sshr v6.4s, v6.4s, #0x1f\n"
- "sshr v7.4s, v7.4s, #0x1f\n"
"sqadd v8.4s, v8.4s, v4.4s\n"
"sqadd v9.4s, v9.4s, v5.4s\n"
"sqadd v10.4s, v10.4s, v6.4s\n"
- "sqadd v11.4s, v11.4s, v7.4s\n"
+ "and v7.16b, v11.16b, v3.16b\n"
"and v4.16b, v12.16b, v0.16b\n"
"and v5.16b, v13.16b, v1.16b\n"
- "and v6.16b, v14.16b, v2.16b\n"
- "and v7.16b, v15.16b, v3.16b\n"
+ "sshr v7.4s, v7.4s, #0x1f\n"
"sshr v4.4s, v4.4s, #0x1f\n"
"sshr v5.4s, v5.4s, #0x1f\n"
- "sshr v6.4s, v6.4s, #0x1f\n"
- "sshr v7.4s, v7.4s, #0x1f\n"
+ "sqadd v11.4s, v11.4s, v7.4s\n"
"sqadd v12.4s, v12.4s, v4.4s\n"
"sqadd v13.4s, v13.4s, v5.4s\n"
+ "and v6.16b, v14.16b, v2.16b\n"
+ "and v7.16b, v15.16b, v3.16b\n"
+ "and v4.16b, v16.16b, v0.16b\n"
+ "sshr v6.4s, v6.4s, #0x1f\n"
+ "sshr v7.4s, v7.4s, #0x1f\n"
+ "sshr v4.4s, v4.4s, #0x1f\n"
"sqadd v14.4s, v14.4s, v6.4s\n"
"sqadd v15.4s, v15.4s, v7.4s\n"
- "and v4.16b, v16.16b, v0.16b\n"
+ "sqadd v16.4s, v16.4s, v4.4s\n"
"and v5.16b, v17.16b, v1.16b\n"
"and v6.16b, v18.16b, v2.16b\n"
"and v7.16b, v19.16b, v3.16b\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
"sshr v5.4s, v5.4s, #0x1f\n"
"sshr v6.4s, v6.4s, #0x1f\n"
"sshr v7.4s, v7.4s, #0x1f\n"
- "sqadd v16.4s, v16.4s, v4.4s\n"
"sqadd v17.4s, v17.4s, v5.4s\n"
"sqadd v18.4s, v18.4s, v6.4s\n"
"sqadd v19.4s, v19.4s, v7.4s\n"
"and v4.16b, v20.16b, v0.16b\n"
"and v5.16b, v21.16b, v1.16b\n"
"and v6.16b, v22.16b, v2.16b\n"
- "and v7.16b, v23.16b, v3.16b\n"
"sshr v4.4s, v4.4s, #0x1f\n"
"sshr v5.4s, v5.4s, #0x1f\n"
"sshr v6.4s, v6.4s, #0x1f\n"
- "sshr v7.4s, v7.4s, #0x1f\n"
"sqadd v20.4s, v20.4s, v4.4s\n"
"sqadd v21.4s, v21.4s, v5.4s\n"
"sqadd v22.4s, v22.4s, v6.4s\n"
- "sqadd v23.4s, v23.4s, v7.4s\n"
+ "and v7.16b, v23.16b, v3.16b\n"
"and v4.16b, v24.16b, v0.16b\n"
"and v5.16b, v25.16b, v1.16b\n"
- "and v6.16b, v26.16b, v2.16b\n"
- "and v7.16b, v27.16b, v3.16b\n"
+ "sshr v7.4s, v7.4s, #0x1f\n"
"sshr v4.4s, v4.4s, #0x1f\n"
"sshr v5.4s, v5.4s, #0x1f\n"
- "sshr v6.4s, v6.4s, #0x1f\n"
- "sshr v7.4s, v7.4s, #0x1f\n"
+ "sqadd v23.4s, v23.4s, v7.4s\n"
"sqadd v24.4s, v24.4s, v4.4s\n"
"sqadd v25.4s, v25.4s, v5.4s\n"
+ "and v6.16b, v26.16b, v2.16b\n"
+ "and v7.16b, v27.16b, v3.16b\n"
+ "and v4.16b, v28.16b, v0.16b\n"
+ "sshr v6.4s, v6.4s, #0x1f\n"
+ "sshr v7.4s, v7.4s, #0x1f\n"
+ "sshr v4.4s, v4.4s, #0x1f\n"
"sqadd v26.4s, v26.4s, v6.4s\n"
"sqadd v27.4s, v27.4s, v7.4s\n"
- "and v4.16b, v28.16b, v0.16b\n"
+ "sqadd v28.4s, v28.4s, v4.4s\n"
"and v5.16b, v29.16b, v1.16b\n"
"and v6.16b, v30.16b, v2.16b\n"
"and v7.16b, v31.16b, v3.16b\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
"sshr v5.4s, v5.4s, #0x1f\n"
"sshr v6.4s, v6.4s, #0x1f\n"
"sshr v7.4s, v7.4s, #0x1f\n"
- "sqadd v28.4s, v28.4s, v4.4s\n"
"sqadd v29.4s, v29.4s, v5.4s\n"
"sqadd v30.4s, v30.4s, v6.4s\n"
"sqadd v31.4s, v31.4s, v7.4s\n"
"152:" // Height 6: no shift correction
- "add x25, %x[qp], %[c_offset]\n"
- "ld1r { v4.4s }, [x25]\n"
"srshl v8.4s, v8.4s, v0.4s\n"
+ "add x24, %x[qp], %[c_offset]\n"
+ "ld1r { v4.4s }, [x24]\n"
"srshl v9.4s, v9.4s, v1.4s\n"
+ "add x24, %x[qp], %[minval]\n"
"srshl v10.4s, v10.4s, v2.4s\n"
+ "ld1r { v5.4s }, [x24]\n"
+ "add x24, %x[qp], %[maxval]\n"
"srshl v11.4s, v11.4s, v3.4s\n"
- "add x25, %x[qp], %[maxval]\n"
- "ld1r { v6.4s }, [x25]\n"
+ "ld1r { v6.4s }, [x24]\n"
+ "cmp x10, #0x10\n"
"srshl v12.4s, v12.4s, v0.4s\n"
"srshl v13.4s, v13.4s, v1.4s\n"
- "add x25, %x[qp], %[minval]\n"
- "ld1r { v5.4s }, [x25]\n"
- "srshl v14.4s, v14.4s, v2.4s\n"
- "srshl v15.4s, v15.4s, v3.4s\n"
- "cmp x10, #0x10\n"
- "srshl v16.4s, v16.4s, v0.4s\n"
- "srshl v17.4s, v17.4s, v1.4s\n"
- "srshl v18.4s, v18.4s, v2.4s\n"
- "srshl v19.4s, v19.4s, v3.4s\n"
- "srshl v20.4s, v20.4s, v0.4s\n"
- "srshl v21.4s, v21.4s, v1.4s\n"
- "srshl v22.4s, v22.4s, v2.4s\n"
- "srshl v23.4s, v23.4s, v3.4s\n"
- "srshl v24.4s, v24.4s, v0.4s\n"
- "srshl v25.4s, v25.4s, v1.4s\n"
- "srshl v26.4s, v26.4s, v2.4s\n"
- "srshl v27.4s, v27.4s, v3.4s\n"
- "srshl v28.4s, v28.4s, v0.4s\n"
- "srshl v29.4s, v29.4s, v1.4s\n"
- "srshl v30.4s, v30.4s, v2.4s\n"
- "srshl v31.4s, v31.4s, v3.4s\n"
"add v8.4s, v8.4s, v4.4s\n"
"add v9.4s, v9.4s, v4.4s\n"
"add v10.4s, v10.4s, v4.4s\n"
- "add v11.4s, v11.4s, v4.4s\n"
- "add v12.4s, v12.4s, v4.4s\n"
- "add v13.4s, v13.4s, v4.4s\n"
- "add v14.4s, v14.4s, v4.4s\n"
- "add v15.4s, v15.4s, v4.4s\n"
- "add v16.4s, v16.4s, v4.4s\n"
- "add v17.4s, v17.4s, v4.4s\n"
- "add v18.4s, v18.4s, v4.4s\n"
- "add v19.4s, v19.4s, v4.4s\n"
- "add v20.4s, v20.4s, v4.4s\n"
- "add v21.4s, v21.4s, v4.4s\n"
- "add v22.4s, v22.4s, v4.4s\n"
- "add v23.4s, v23.4s, v4.4s\n"
- "add v24.4s, v24.4s, v4.4s\n"
- "add v25.4s, v25.4s, v4.4s\n"
- "add v26.4s, v26.4s, v4.4s\n"
- "add v27.4s, v27.4s, v4.4s\n"
- "add v28.4s, v28.4s, v4.4s\n"
- "add v29.4s, v29.4s, v4.4s\n"
- "add v30.4s, v30.4s, v4.4s\n"
- "add v31.4s, v31.4s, v4.4s\n"
"smin v8.4s, v8.4s, v6.4s\n"
"smin v9.4s, v9.4s, v6.4s\n"
"smin v10.4s, v10.4s, v6.4s\n"
- "smin v11.4s, v11.4s, v6.4s\n"
- "smin v12.4s, v12.4s, v6.4s\n"
- "smin v13.4s, v13.4s, v6.4s\n"
- "smin v14.4s, v14.4s, v6.4s\n"
- "smin v15.4s, v15.4s, v6.4s\n"
- "smin v16.4s, v16.4s, v6.4s\n"
- "smin v17.4s, v17.4s, v6.4s\n"
- "smin v18.4s, v18.4s, v6.4s\n"
- "smin v19.4s, v19.4s, v6.4s\n"
- "smin v20.4s, v20.4s, v6.4s\n"
- "smin v21.4s, v21.4s, v6.4s\n"
- "smin v22.4s, v22.4s, v6.4s\n"
- "smin v23.4s, v23.4s, v6.4s\n"
- "smin v24.4s, v24.4s, v6.4s\n"
- "smin v25.4s, v25.4s, v6.4s\n"
- "smin v26.4s, v26.4s, v6.4s\n"
- "smin v27.4s, v27.4s, v6.4s\n"
- "smin v28.4s, v28.4s, v6.4s\n"
- "smin v29.4s, v29.4s, v6.4s\n"
- "smin v30.4s, v30.4s, v6.4s\n"
- "smin v31.4s, v31.4s, v6.4s\n"
"smax v8.4s, v8.4s, v5.4s\n"
"smax v9.4s, v9.4s, v5.4s\n"
"smax v10.4s, v10.4s, v5.4s\n"
+ "add v11.4s, v11.4s, v4.4s\n"
+ "add v12.4s, v12.4s, v4.4s\n"
+ "add v13.4s, v13.4s, v4.4s\n"
+ "smin v11.4s, v11.4s, v6.4s\n"
+ "smin v12.4s, v12.4s, v6.4s\n"
+ "smin v13.4s, v13.4s, v6.4s\n"
"smax v11.4s, v11.4s, v5.4s\n"
"smax v12.4s, v12.4s, v5.4s\n"
"smax v13.4s, v13.4s, v5.4s\n"
+ "srshl v14.4s, v14.4s, v2.4s\n"
+ "srshl v15.4s, v15.4s, v3.4s\n"
+ "srshl v16.4s, v16.4s, v0.4s\n"
+ "srshl v17.4s, v17.4s, v1.4s\n"
+ "add v14.4s, v14.4s, v4.4s\n"
+ "add v15.4s, v15.4s, v4.4s\n"
+ "add v16.4s, v16.4s, v4.4s\n"
+ "smin v14.4s, v14.4s, v6.4s\n"
+ "smin v15.4s, v15.4s, v6.4s\n"
+ "smin v16.4s, v16.4s, v6.4s\n"
"smax v14.4s, v14.4s, v5.4s\n"
"smax v15.4s, v15.4s, v5.4s\n"
"smax v16.4s, v16.4s, v5.4s\n"
+ "add v17.4s, v17.4s, v4.4s\n"
+ "srshl v18.4s, v18.4s, v2.4s\n"
+ "srshl v19.4s, v19.4s, v3.4s\n"
+ "smin v17.4s, v17.4s, v6.4s\n"
+ "srshl v20.4s, v20.4s, v0.4s\n"
+ "add v18.4s, v18.4s, v4.4s\n"
"smax v17.4s, v17.4s, v5.4s\n"
+ "add v19.4s, v19.4s, v4.4s\n"
+ "smin v18.4s, v18.4s, v6.4s\n"
+ "add v20.4s, v20.4s, v4.4s\n"
+ "smin v19.4s, v19.4s, v6.4s\n"
"smax v18.4s, v18.4s, v5.4s\n"
+ "smin v20.4s, v20.4s, v6.4s\n"
"smax v19.4s, v19.4s, v5.4s\n"
+ "srshl v21.4s, v21.4s, v1.4s\n"
"smax v20.4s, v20.4s, v5.4s\n"
+ "srshl v22.4s, v22.4s, v2.4s\n"
+ "srshl v23.4s, v23.4s, v3.4s\n"
+ "add v21.4s, v21.4s, v4.4s\n"
+ "srshl v24.4s, v24.4s, v0.4s\n"
+ "add v22.4s, v22.4s, v4.4s\n"
+ "smin v21.4s, v21.4s, v6.4s\n"
+ "add v23.4s, v23.4s, v4.4s\n"
+ "smin v22.4s, v22.4s, v6.4s\n"
"smax v21.4s, v21.4s, v5.4s\n"
+ "smin v23.4s, v23.4s, v6.4s\n"
"smax v22.4s, v22.4s, v5.4s\n"
+ "add v24.4s, v24.4s, v4.4s\n"
"smax v23.4s, v23.4s, v5.4s\n"
+ "srshl v25.4s, v25.4s, v1.4s\n"
+ "smin v24.4s, v24.4s, v6.4s\n"
+ "srshl v26.4s, v26.4s, v2.4s\n"
+ "srshl v27.4s, v27.4s, v3.4s\n"
"smax v24.4s, v24.4s, v5.4s\n"
+ "add v25.4s, v25.4s, v4.4s\n"
+ "add v26.4s, v26.4s, v4.4s\n"
+ "add v27.4s, v27.4s, v4.4s\n"
+ "smin v25.4s, v25.4s, v6.4s\n"
+ "smin v26.4s, v26.4s, v6.4s\n"
+ "smin v27.4s, v27.4s, v6.4s\n"
"smax v25.4s, v25.4s, v5.4s\n"
"smax v26.4s, v26.4s, v5.4s\n"
"smax v27.4s, v27.4s, v5.4s\n"
+ "srshl v28.4s, v28.4s, v0.4s\n"
+ "srshl v29.4s, v29.4s, v1.4s\n"
+ "srshl v30.4s, v30.4s, v2.4s\n"
+ "srshl v31.4s, v31.4s, v3.4s\n"
+ "add v28.4s, v28.4s, v4.4s\n"
+ "add v29.4s, v29.4s, v4.4s\n"
+ "add v30.4s, v30.4s, v4.4s\n"
+ "smin v28.4s, v28.4s, v6.4s\n"
+ "smin v29.4s, v29.4s, v6.4s\n"
+ "smin v30.4s, v30.4s, v6.4s\n"
"smax v28.4s, v28.4s, v5.4s\n"
"smax v29.4s, v29.4s, v5.4s\n"
"smax v30.4s, v30.4s, v5.4s\n"
- "smax v31.4s, v31.4s, v5.4s\n"
+ "add v31.4s, v31.4s, v4.4s\n"
"uzp1 v8.8h, v8.8h, v9.8h\n"
"uzp1 v9.8h, v10.8h, v11.8h\n"
+ "smin v31.4s, v31.4s, v6.4s\n"
"uzp1 v12.8h, v12.8h, v13.8h\n"
"uzp1 v13.8h, v14.8h, v15.8h\n"
+ "smax v31.4s, v31.4s, v5.4s\n"
"uzp1 v16.8h, v16.8h, v17.8h\n"
"uzp1 v17.8h, v18.8h, v19.8h\n"
"uzp1 v20.8h, v20.8h, v21.8h\n"
@@ -3385,152 +3385,152 @@ void a64_hybrid_s8qs_dot_6x16 (
"uzp1 v28.16b, v28.16b, v29.16b\n"
"bge 161f\n"
"tbz x10, #3, 156f\n"
- "str d8, [x11], #0x8\n"
- "str d12, [x24], #0x8\n"
- "str d16, [x23], #0x8\n"
- "str d20, [x22], #0x8\n"
- "str d24, [x21], #0x8\n"
- "str d28, [x20], #0x8\n"
+ "str d8, [x9], #0x8\n"
+ "str d12, [x23], #0x8\n"
+ "str d16, [x22], #0x8\n"
+ "str d20, [x21], #0x8\n"
+ "str d24, [x20], #0x8\n"
+ "str d28, [x19], #0x8\n"
"tbz x10, #2, 154f\n"
- "st1 { v8.s }[2], [x11], #0x4\n"
- "st1 { v12.s }[2], [x24], #0x4\n"
- "st1 { v16.s }[2], [x23], #0x4\n"
- "st1 { v20.s }[2], [x22], #0x4\n"
- "st1 { v24.s }[2], [x21], #0x4\n"
- "st1 { v28.s }[2], [x20], #0x4\n"
+ "st1 { v8.s }[2], [x9], #0x4\n"
+ "st1 { v12.s }[2], [x23], #0x4\n"
+ "st1 { v16.s }[2], [x22], #0x4\n"
+ "st1 { v20.s }[2], [x21], #0x4\n"
+ "st1 { v24.s }[2], [x20], #0x4\n"
+ "st1 { v28.s }[2], [x19], #0x4\n"
"tbz x10, #1, 153f\n"
- "st1 { v8.h }[6], [x11], #0x2\n"
- "st1 { v12.h }[6], [x24], #0x2\n"
- "st1 { v16.h }[6], [x23], #0x2\n"
- "st1 { v20.h }[6], [x22], #0x2\n"
- "st1 { v24.h }[6], [x21], #0x2\n"
- "st1 { v28.h }[6], [x20], #0x2\n"
+ "st1 { v8.h }[6], [x9], #0x2\n"
+ "st1 { v12.h }[6], [x23], #0x2\n"
+ "st1 { v16.h }[6], [x22], #0x2\n"
+ "st1 { v20.h }[6], [x21], #0x2\n"
+ "st1 { v24.h }[6], [x20], #0x2\n"
+ "st1 { v28.h }[6], [x19], #0x2\n"
"tbz x10, #0, 160f\n"
- "st1 { v8.b }[14], [x11]\n"
- "st1 { v12.b }[14], [x24]\n"
- "st1 { v16.b }[14], [x23]\n"
- "st1 { v20.b }[14], [x22]\n"
- "st1 { v24.b }[14], [x21]\n"
- "st1 { v28.b }[14], [x20]\n"
+ "st1 { v8.b }[14], [x9]\n"
+ "st1 { v12.b }[14], [x23]\n"
+ "st1 { v16.b }[14], [x22]\n"
+ "st1 { v20.b }[14], [x21]\n"
+ "st1 { v24.b }[14], [x20]\n"
+ "st1 { v28.b }[14], [x19]\n"
"b 160f\n"
"153:" // Height 6: Partial direct writeback: partial_1_12
"tbz x10, #0, 160f\n"
- "st1 { v8.b }[12], [x11]\n"
- "st1 { v12.b }[12], [x24]\n"
- "st1 { v16.b }[12], [x23]\n"
- "st1 { v20.b }[12], [x22]\n"
- "st1 { v24.b }[12], [x21]\n"
- "st1 { v28.b }[12], [x20]\n"
+ "st1 { v8.b }[12], [x9]\n"
+ "st1 { v12.b }[12], [x23]\n"
+ "st1 { v16.b }[12], [x22]\n"
+ "st1 { v20.b }[12], [x21]\n"
+ "st1 { v24.b }[12], [x20]\n"
+ "st1 { v28.b }[12], [x19]\n"
"b 160f\n"
"154:" // Height 6: Partial direct writeback: partial_2_8
"tbz x10, #1, 155f\n"
- "st1 { v8.h }[4], [x11], #0x2\n"
- "st1 { v12.h }[4], [x24], #0x2\n"
- "st1 { v16.h }[4], [x23], #0x2\n"
- "st1 { v20.h }[4], [x22], #0x2\n"
- "st1 { v24.h }[4], [x21], #0x2\n"
- "st1 { v28.h }[4], [x20], #0x2\n"
+ "st1 { v8.h }[4], [x9], #0x2\n"
+ "st1 { v12.h }[4], [x23], #0x2\n"
+ "st1 { v16.h }[4], [x22], #0x2\n"
+ "st1 { v20.h }[4], [x21], #0x2\n"
+ "st1 { v24.h }[4], [x20], #0x2\n"
+ "st1 { v28.h }[4], [x19], #0x2\n"
"tbz x10, #0, 160f\n"
- "st1 { v8.b }[10], [x11]\n"
- "st1 { v12.b }[10], [x24]\n"
- "st1 { v16.b }[10], [x23]\n"
- "st1 { v20.b }[10], [x22]\n"
- "st1 { v24.b }[10], [x21]\n"
- "st1 { v28.b }[10], [x20]\n"
+ "st1 { v8.b }[10], [x9]\n"
+ "st1 { v12.b }[10], [x23]\n"
+ "st1 { v16.b }[10], [x22]\n"
+ "st1 { v20.b }[10], [x21]\n"
+ "st1 { v24.b }[10], [x20]\n"
+ "st1 { v28.b }[10], [x19]\n"
"b 160f\n"
"155:" // Height 6: Partial direct writeback: partial_1_8
"tbz x10, #0, 160f\n"
- "st1 { v8.b }[8], [x11]\n"
- "st1 { v12.b }[8], [x24]\n"
- "st1 { v16.b }[8], [x23]\n"
- "st1 { v20.b }[8], [x22]\n"
- "st1 { v24.b }[8], [x21]\n"
- "st1 { v28.b }[8], [x20]\n"
+ "st1 { v8.b }[8], [x9]\n"
+ "st1 { v12.b }[8], [x23]\n"
+ "st1 { v16.b }[8], [x22]\n"
+ "st1 { v20.b }[8], [x21]\n"
+ "st1 { v24.b }[8], [x20]\n"
+ "st1 { v28.b }[8], [x19]\n"
"b 160f\n"
"156:" // Height 6: Partial direct writeback: partial_4_0
"tbz x10, #2, 158f\n"
- "str s8, [x11], #0x4\n"
- "str s12, [x24], #0x4\n"
- "str s16, [x23], #0x4\n"
- "str s20, [x22], #0x4\n"
- "str s24, [x21], #0x4\n"
- "str s28, [x20], #0x4\n"
+ "str s8, [x9], #0x4\n"
+ "str s12, [x23], #0x4\n"
+ "str s16, [x22], #0x4\n"
+ "str s20, [x21], #0x4\n"
+ "str s24, [x20], #0x4\n"
+ "str s28, [x19], #0x4\n"
"tbz x10, #1, 157f\n"
- "st1 { v8.h }[2], [x11], #0x2\n"
- "st1 { v12.h }[2], [x24], #0x2\n"
- "st1 { v16.h }[2], [x23], #0x2\n"
- "st1 { v20.h }[2], [x22], #0x2\n"
- "st1 { v24.h }[2], [x21], #0x2\n"
- "st1 { v28.h }[2], [x20], #0x2\n"
+ "st1 { v8.h }[2], [x9], #0x2\n"
+ "st1 { v12.h }[2], [x23], #0x2\n"
+ "st1 { v16.h }[2], [x22], #0x2\n"
+ "st1 { v20.h }[2], [x21], #0x2\n"
+ "st1 { v24.h }[2], [x20], #0x2\n"
+ "st1 { v28.h }[2], [x19], #0x2\n"
"tbz x10, #0, 160f\n"
- "st1 { v8.b }[6], [x11]\n"
- "st1 { v12.b }[6], [x24]\n"
- "st1 { v16.b }[6], [x23]\n"
- "st1 { v20.b }[6], [x22]\n"
- "st1 { v24.b }[6], [x21]\n"
- "st1 { v28.b }[6], [x20]\n"
+ "st1 { v8.b }[6], [x9]\n"
+ "st1 { v12.b }[6], [x23]\n"
+ "st1 { v16.b }[6], [x22]\n"
+ "st1 { v20.b }[6], [x21]\n"
+ "st1 { v24.b }[6], [x20]\n"
+ "st1 { v28.b }[6], [x19]\n"
"b 160f\n"
"157:" // Height 6: Partial direct writeback: partial_1_4
"tbz x10, #0, 160f\n"
- "st1 { v8.b }[4], [x11]\n"
- "st1 { v12.b }[4], [x24]\n"
- "st1 { v16.b }[4], [x23]\n"
- "st1 { v20.b }[4], [x22]\n"
- "st1 { v24.b }[4], [x21]\n"
- "st1 { v28.b }[4], [x20]\n"
+ "st1 { v8.b }[4], [x9]\n"
+ "st1 { v12.b }[4], [x23]\n"
+ "st1 { v16.b }[4], [x22]\n"
+ "st1 { v20.b }[4], [x21]\n"
+ "st1 { v24.b }[4], [x20]\n"
+ "st1 { v28.b }[4], [x19]\n"
"b 160f\n"
"158:" // Height 6: Partial direct writeback: partial_2_0
"tbz x10, #1, 159f\n"
- "str h8, [x11], #0x2\n"
- "str h12, [x24], #0x2\n"
- "str h16, [x23], #0x2\n"
- "str h20, [x22], #0x2\n"
- "str h24, [x21], #0x2\n"
- "str h28, [x20], #0x2\n"
+ "str h8, [x9], #0x2\n"
+ "str h12, [x23], #0x2\n"
+ "str h16, [x22], #0x2\n"
+ "str h20, [x21], #0x2\n"
+ "str h24, [x20], #0x2\n"
+ "str h28, [x19], #0x2\n"
"tbz x10, #0, 160f\n"
- "st1 { v8.b }[2], [x11]\n"
- "st1 { v12.b }[2], [x24]\n"
- "st1 { v16.b }[2], [x23]\n"
- "st1 { v20.b }[2], [x22]\n"
- "st1 { v24.b }[2], [x21]\n"
- "st1 { v28.b }[2], [x20]\n"
+ "st1 { v8.b }[2], [x9]\n"
+ "st1 { v12.b }[2], [x23]\n"
+ "st1 { v16.b }[2], [x22]\n"
+ "st1 { v20.b }[2], [x21]\n"
+ "st1 { v24.b }[2], [x20]\n"
+ "st1 { v28.b }[2], [x19]\n"
"b 160f\n"
"159:" // Height 6: Partial direct writeback: partial_1_0
- "str b8, [x11, #0x0]\n"
- "str b12, [x24, #0x0]\n"
- "str b16, [x23, #0x0]\n"
- "str b20, [x22, #0x0]\n"
- "str b24, [x21, #0x0]\n"
- "str b28, [x20, #0x0]\n"
+ "str b8, [x9, #0x0]\n"
+ "str b12, [x23, #0x0]\n"
+ "str b16, [x22, #0x0]\n"
+ "str b20, [x21, #0x0]\n"
+ "str b24, [x20, #0x0]\n"
+ "str b28, [x19, #0x0]\n"
"160:" // Height 6: Partial direct writeback: Done
"b 162f\n"
"161:" // Height 6: Full writeback
- "str q8, [x11, #0x0]\n"
- "add x11, x11, #0x10\n"
- "str q12, [x24, #0x0]\n"
- "str q16, [x23, #0x0]\n"
- "str q20, [x22, #0x0]\n"
- "str q24, [x21, #0x0]\n"
- "str q28, [x20, #0x0]\n"
+ "str q8, [x9, #0x0]\n"
+ "add x9, x9, #0x10\n"
+ "str q12, [x23, #0x0]\n"
+ "str q16, [x22, #0x0]\n"
+ "str q20, [x21, #0x0]\n"
+ "str q24, [x20, #0x0]\n"
+ "str q28, [x19, #0x0]\n"
"162:" // Height 6: Writeback done
"subs x10, x10, #0x10\n"
"bgt 137b\n"
"subs %x[M], %x[M], #0x6\n"
"beq 164f\n"
- "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 163f\n"
- "add x21, x21, #0x6\n"
- "str x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "add x20, x20, #0x6\n"
+ "str x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"b 1b\n"
"163:" // Update direct input
- "mov x20, #0x6\n"
- "madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
+ "mov x19, #0x6\n"
+ "madd %x[input_ptr], x19, x20, %x[input_ptr]\n"
"b 1b\n"
"164:" // Exit
: [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
: [args_ptr] "r" (&ka), [c_offset] "I" (offsetof(Requantize32, c_offset)), [col_bias] "r" (col_bias), [flags] "r" (flags), [maxval] "I" (offsetof(Requantize32, maxval)), [minval] "I" (offsetof(Requantize32, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_multiplier_ptr] "I" (offsetof(KernelArgs, multiplier_ptr)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_shift_ptr] "I" (offsetof(KernelArgs, shift_ptr)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths)), [per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [qp] "r" (qp)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qs_mmla_6x16/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qs_mmla_6x16/generic.cpp
index fc525531b2..8924492e41 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qs_mmla_6x16/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qs_mmla_6x16/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef __aarch64__
@@ -95,12 +95,12 @@ void a64_hybrid_s8qs_mmla_6x16 (
"cmp %x[M], #0x2\n"
"bgt 59f\n"
"beq 30f\n"
- "mov x14, %x[col_bias]\n"
"ldr x13, [%x[args_ptr], %[offsetof_multiplier_ptr]]\n"
"ldr x12, [%x[args_ptr], %[offsetof_shift_ptr]]\n"
- "mov x11, %x[output_ptr]\n"
+ "mov x11, %x[col_bias]\n"
"ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x9, %x[output_ptr]\n"
+ "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"2:" // Height 1: Column loop
"movi v8.4s, #0x0\n"
"movi v9.4s, #0x0\n"
@@ -111,192 +111,196 @@ void a64_hybrid_s8qs_mmla_6x16 (
"movi v14.4s, #0x0\n"
"movi v15.4s, #0x0\n"
"3:" // Height 1: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"4:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 5f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "cbnz x28, 6f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "cbnz x27, 6f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
"b 6f\n"
"5:" // Height 1: setup direct input
- "mov x26, %x[input_ptr]\n"
+ "mov x25, %x[input_ptr]\n"
"6:" // Height 1: input setup done
- "cmp x27, #0x10\n"
+ "cmp x26, #0x10\n"
"blt 9f\n"
- "ldr q1, [x26, #0x0]\n"
- "ldr q7, [x9, #0x0]\n"
- "cmp x27, #0x20\n"
- "ldr q6, [x9, #0x10]\n"
+ "ldr q1, [x25, #0x0]\n"
+ "cmp x26, #0x20\n"
"blt 8f\n"
"7:" // Height 1: Multiply loop: Main loop head
+ "movi v2.16b, #0x0\n"
+ "ldr q7, [x28, #0x0]\n"
+ "add x25, x25, #0x10\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
+ "ldr q6, [x28, #0x10]\n"
+ "sub x26, x26, #0x10\n"
+ "trn2 v1.2d, v1.2d, v2.2d\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "cmp x26, #0x20\n"
".inst 0x4e87a408 // smmla v8.4s, v0.16b, v7.16b\n"
- "ldr q7, [x9, #0x20]\n"
+ "ldr q7, [x28, #0x20]\n"
".inst 0x4e86a40c // smmla v12.4s, v0.16b, v6.16b\n"
- "ldr q6, [x9, #0x30]\n"
+ "ldr q6, [x28, #0x30]\n"
".inst 0x4e87a409 // smmla v9.4s, v0.16b, v7.16b\n"
- "ldr q7, [x9, #0x40]\n"
+ "ldr q7, [x28, #0x40]\n"
".inst 0x4e86a40d // smmla v13.4s, v0.16b, v6.16b\n"
- "ldr q6, [x9, #0x50]\n"
+ "ldr q6, [x28, #0x50]\n"
".inst 0x4e87a40a // smmla v10.4s, v0.16b, v7.16b\n"
- "ldr q7, [x9, #0x60]\n"
+ "ldr q7, [x28, #0x60]\n"
".inst 0x4e86a40e // smmla v14.4s, v0.16b, v6.16b\n"
- "ldr q6, [x9, #0x70]\n"
- "trn2 v1.2d, v1.2d, v2.2d\n"
+ "ldr q6, [x28, #0x70]\n"
".inst 0x4e87a40b // smmla v11.4s, v0.16b, v7.16b\n"
- "ldr q7, [x9, #0x80]\n"
+ "ldr q7, [x28, #0x80]\n"
".inst 0x4e86a40f // smmla v15.4s, v0.16b, v6.16b\n"
- "ldr q6, [x9, #0x90]\n"
+ "ldr q6, [x28, #0x90]\n"
".inst 0x4e87a428 // smmla v8.4s, v1.16b, v7.16b\n"
- "ldr q7, [x9, #0xa0]\n"
+ "ldr q7, [x28, #0xa0]\n"
".inst 0x4e86a42c // smmla v12.4s, v1.16b, v6.16b\n"
- "ldr q6, [x9, #0xb0]\n"
+ "ldr q6, [x28, #0xb0]\n"
".inst 0x4e87a429 // smmla v9.4s, v1.16b, v7.16b\n"
- "ldr q7, [x9, #0xc0]\n"
+ "ldr q7, [x28, #0xc0]\n"
".inst 0x4e86a42d // smmla v13.4s, v1.16b, v6.16b\n"
- "ldr q6, [x9, #0xd0]\n"
+ "ldr q6, [x28, #0xd0]\n"
".inst 0x4e87a42a // smmla v10.4s, v1.16b, v7.16b\n"
- "ldr q7, [x9, #0xe0]\n"
+ "ldr q7, [x28, #0xe0]\n"
".inst 0x4e86a42e // smmla v14.4s, v1.16b, v6.16b\n"
- "ldr q6, [x9, #0xf0]\n"
- "sub x27, x27, #0x10\n"
- "add x26, x26, #0x10\n"
- "cmp x27, #0x20\n"
+ "ldr q6, [x28, #0xf0]\n"
+ "add x28, x28, #0x100\n"
".inst 0x4e87a42b // smmla v11.4s, v1.16b, v7.16b\n"
".inst 0x4e86a42f // smmla v15.4s, v1.16b, v6.16b\n"
- "ldr q1, [x26, #0x0]\n"
- "add x9, x9, #0x100\n"
- "ldr q7, [x9, #0x0]\n"
- "ldr q6, [x9, #0x10]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
+ "ldr q1, [x25, #0x0]\n"
"bge 7b\n"
"8:" // Height 1: Multiply loop: Single iteration only
+ "movi v2.16b, #0x0\n"
+ "ldr q7, [x28, #0x0]\n"
+ "sub x26, x26, #0x10\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
+ "ldr q6, [x28, #0x10]\n"
+ "add x25, x25, #0x10\n"
+ "trn2 v1.2d, v1.2d, v2.2d\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x4e87a408 // smmla v8.4s, v0.16b, v7.16b\n"
- "ldr q7, [x9, #0x20]\n"
+ "ldr q7, [x28, #0x20]\n"
".inst 0x4e86a40c // smmla v12.4s, v0.16b, v6.16b\n"
- "ldr q6, [x9, #0x30]\n"
+ "ldr q6, [x28, #0x30]\n"
".inst 0x4e87a409 // smmla v9.4s, v0.16b, v7.16b\n"
- "ldr q7, [x9, #0x40]\n"
+ "ldr q7, [x28, #0x40]\n"
".inst 0x4e86a40d // smmla v13.4s, v0.16b, v6.16b\n"
- "ldr q6, [x9, #0x50]\n"
+ "ldr q6, [x28, #0x50]\n"
".inst 0x4e87a40a // smmla v10.4s, v0.16b, v7.16b\n"
- "ldr q7, [x9, #0x60]\n"
+ "ldr q7, [x28, #0x60]\n"
".inst 0x4e86a40e // smmla v14.4s, v0.16b, v6.16b\n"
- "ldr q6, [x9, #0x70]\n"
- "trn2 v1.2d, v1.2d, v2.2d\n"
+ "ldr q6, [x28, #0x70]\n"
".inst 0x4e87a40b // smmla v11.4s, v0.16b, v7.16b\n"
- "ldr q7, [x9, #0x80]\n"
+ "ldr q7, [x28, #0x80]\n"
".inst 0x4e86a40f // smmla v15.4s, v0.16b, v6.16b\n"
- "ldr q6, [x9, #0x90]\n"
+ "ldr q6, [x28, #0x90]\n"
".inst 0x4e87a428 // smmla v8.4s, v1.16b, v7.16b\n"
- "ldr q7, [x9, #0xa0]\n"
+ "ldr q7, [x28, #0xa0]\n"
".inst 0x4e86a42c // smmla v12.4s, v1.16b, v6.16b\n"
- "ldr q6, [x9, #0xb0]\n"
+ "ldr q6, [x28, #0xb0]\n"
".inst 0x4e87a429 // smmla v9.4s, v1.16b, v7.16b\n"
- "ldr q7, [x9, #0xc0]\n"
+ "ldr q7, [x28, #0xc0]\n"
".inst 0x4e86a42d // smmla v13.4s, v1.16b, v6.16b\n"
- "ldr q6, [x9, #0xd0]\n"
+ "ldr q6, [x28, #0xd0]\n"
".inst 0x4e87a42a // smmla v10.4s, v1.16b, v7.16b\n"
- "ldr q7, [x9, #0xe0]\n"
+ "ldr q7, [x28, #0xe0]\n"
".inst 0x4e86a42e // smmla v14.4s, v1.16b, v6.16b\n"
- "ldr q6, [x9, #0xf0]\n"
- "add x26, x26, #0x10\n"
- "sub x27, x27, #0x10\n"
+ "ldr q6, [x28, #0xf0]\n"
+ "add x28, x28, #0x100\n"
".inst 0x4e87a42b // smmla v11.4s, v1.16b, v7.16b\n"
".inst 0x4e86a42f // smmla v15.4s, v1.16b, v6.16b\n"
- "prfm pldl1keep, [x26, #0x80]\n"
- "add x9, x9, #0x100\n"
"9:" // Height 1: Multiply loop: Main loop skip
- "cbz x27, 16f\n"
- "cmp x27, #0x8\n"
+ "cbz x26, 16f\n"
+ "cmp x26, #0x8\n"
"blt 11f\n"
"10:" // Height 1: Multiply loop: Odd block loop
- "ldr d1, [x26], #0x8\n"
- "ldr q6, [x9, #0x0]\n"
+ "movi v2.16b, #0x0\n"
+ "ldr d1, [x25], #0x8\n"
+ "sub x26, x26, #0x8\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
- "ldr q7, [x9, #0x10]\n"
+ "ldr q6, [x28, #0x0]\n"
+ "cmp x26, #0x8\n"
".inst 0x4e86a408 // smmla v8.4s, v0.16b, v6.16b\n"
- "ldr q6, [x9, #0x20]\n"
+ "ldr q7, [x28, #0x10]\n"
+ "ldr q6, [x28, #0x20]\n"
".inst 0x4e87a40c // smmla v12.4s, v0.16b, v7.16b\n"
- "ldr q7, [x9, #0x30]\n"
+ "ldr q7, [x28, #0x30]\n"
".inst 0x4e86a409 // smmla v9.4s, v0.16b, v6.16b\n"
- "ldr q6, [x9, #0x40]\n"
+ "ldr q6, [x28, #0x40]\n"
".inst 0x4e87a40d // smmla v13.4s, v0.16b, v7.16b\n"
- "ldr q7, [x9, #0x50]\n"
+ "ldr q7, [x28, #0x50]\n"
".inst 0x4e86a40a // smmla v10.4s, v0.16b, v6.16b\n"
- "ldr q6, [x9, #0x60]\n"
+ "ldr q6, [x28, #0x60]\n"
".inst 0x4e87a40e // smmla v14.4s, v0.16b, v7.16b\n"
- "ldr q7, [x9, #0x70]\n"
- "sub x27, x27, #0x8\n"
- "cmp x27, #0x8\n"
+ "ldr q7, [x28, #0x70]\n"
+ "add x28, x28, #0x80\n"
".inst 0x4e86a40b // smmla v11.4s, v0.16b, v6.16b\n"
".inst 0x4e87a40f // smmla v15.4s, v0.16b, v7.16b\n"
- "add x9, x9, #0x80\n"
"bge 10b\n"
+ "cbz x26, 16f\n"
"11:" // Height 1: Multiply loop: Skip odd blocks
- "cbz x27, 16f\n"
- "tbz x27, #2, 13f\n"
- "ldr s1, [x26], #0x4\n"
- "tbz x27, #1, 12f\n"
- "ld1 { v1.h }[2], [x26], #0x2\n"
- "tbz x27, #0, 15f\n"
- "ld1 { v1.b }[6], [x26]\n"
+ "tbz x26, #2, 13f\n"
+ "ldr s1, [x25], #0x4\n"
+ "tbz x26, #1, 12f\n"
+ "ld1 { v1.h }[2], [x25], #0x2\n"
+ "tbz x26, #0, 15f\n"
+ "ld1 { v1.b }[6], [x25]\n"
"b 15f\n"
"12:" // Height 1: Multiply loop: Ragged operand read: partial_1_4
- "tbz x27, #0, 15f\n"
- "ld1 { v1.b }[4], [x26]\n"
+ "tbz x26, #0, 15f\n"
+ "ld1 { v1.b }[4], [x25]\n"
"b 15f\n"
"13:" // Height 1: Multiply loop: Ragged operand read: partial_2_0
- "tbz x27, #1, 14f\n"
- "ldr h1, [x26], #0x2\n"
- "tbz x27, #0, 15f\n"
- "ld1 { v1.b }[2], [x26]\n"
+ "tbz x26, #1, 14f\n"
+ "ldr h1, [x25], #0x2\n"
+ "tbz x26, #0, 15f\n"
+ "ld1 { v1.b }[2], [x25]\n"
"b 15f\n"
"14:" // Height 1: Multiply loop: Ragged operand read: partial_1_0
- "ldr b1, [x26, #0x0]\n"
+ "ldr b1, [x25, #0x0]\n"
"15:" // Height 1: Multiply loop: Ragged operand read: Done
- "ldr q7, [x9, #0x0]\n"
- "ldr q6, [x9, #0x10]\n"
+ "movi v2.16b, #0x0\n"
+ "ldr q7, [x28, #0x0]\n"
+ "ldr q6, [x28, #0x10]\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
".inst 0x4e87a408 // smmla v8.4s, v0.16b, v7.16b\n"
- "ldr q7, [x9, #0x20]\n"
+ "ldr q7, [x28, #0x20]\n"
".inst 0x4e86a40c // smmla v12.4s, v0.16b, v6.16b\n"
- "ldr q6, [x9, #0x30]\n"
+ "ldr q6, [x28, #0x30]\n"
".inst 0x4e87a409 // smmla v9.4s, v0.16b, v7.16b\n"
- "ldr q7, [x9, #0x40]\n"
+ "ldr q7, [x28, #0x40]\n"
".inst 0x4e86a40d // smmla v13.4s, v0.16b, v6.16b\n"
- "ldr q6, [x9, #0x50]\n"
+ "ldr q6, [x28, #0x50]\n"
".inst 0x4e87a40a // smmla v10.4s, v0.16b, v7.16b\n"
- "ldr q7, [x9, #0x60]\n"
+ "ldr q7, [x28, #0x60]\n"
".inst 0x4e86a40e // smmla v14.4s, v0.16b, v6.16b\n"
- "ldr q6, [x9, #0x70]\n"
+ "ldr q6, [x28, #0x70]\n"
+ "add x28, x28, #0x80\n"
".inst 0x4e87a40b // smmla v11.4s, v0.16b, v7.16b\n"
".inst 0x4e86a40f // smmla v15.4s, v0.16b, v6.16b\n"
- "add x9, x9, #0x80\n"
"16:" // Height 1: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 4b\n"
- "ldr q0, [x14, #0x0]\n"
- "ldr q1, [x14, #0x10]\n"
"uzp1 v8.2d, v8.2d, v12.2d\n"
+ "prfm pstl1keep, [x9, #0x0]\n"
"uzp1 v9.2d, v9.2d, v13.2d\n"
- "ldr q2, [x14, #0x20]\n"
- "ldr q3, [x14, #0x30]\n"
+ "ldr q0, [x11, #0x0]\n"
"uzp1 v10.2d, v10.2d, v14.2d\n"
+ "ldr q1, [x11, #0x10]\n"
"uzp1 v11.2d, v11.2d, v15.2d\n"
+ "ldr q2, [x11, #0x20]\n"
"mov v15.16b, v8.16b\n"
- "prfm pstl1keep, [x11, #0x0]\n"
+ "ldr q3, [x11, #0x30]\n"
+ "add x11, x11, #0x40\n"
"add v15.4s, v15.4s, v0.4s\n"
- "add x14, x14, #0x40\n"
"add v9.4s, v9.4s, v1.4s\n"
"add v10.4s, v10.4s, v2.4s\n"
"add v11.4s, v11.4s, v3.4s\n"
@@ -308,20 +312,20 @@ void a64_hybrid_s8qs_mmla_6x16 (
"ldr q2, [x12, #0x20]\n"
"ldr q6, [x13, #0x20]\n"
"ldr q3, [x12, #0x30]\n"
- "ldr q7, [x13, #0x30]\n"
"add x12, x12, #0x40\n"
+ "ldr q7, [x13, #0x30]\n"
"add x13, x13, #0x40\n"
"b 18f\n"
"17:" // Height 1: per layer parameters
- "add x25, %x[qp], %[per_layer_right_shift]\n"
- "ld1r { v0.4s }, [x25]\n"
- "add x25, %x[qp], %[per_layer_mul]\n"
- "ld1r { v4.4s }, [x25]\n"
+ "add x24, %x[qp], %[per_layer_right_shift]\n"
+ "ld1r { v0.4s }, [x24]\n"
"mov v1.16b, v0.16b\n"
- "mov v5.16b, v4.16b\n"
+ "add x24, %x[qp], %[per_layer_mul]\n"
+ "ld1r { v4.4s }, [x24]\n"
"mov v2.16b, v0.16b\n"
- "mov v6.16b, v4.16b\n"
"mov v3.16b, v0.16b\n"
+ "mov v5.16b, v4.16b\n"
+ "mov v6.16b, v4.16b\n"
"mov v7.16b, v4.16b\n"
"18:" // Height 1: parameters loaded
"sqrdmulh v15.4s, v15.4s, v4.4s\n"
@@ -336,98 +340,98 @@ void a64_hybrid_s8qs_mmla_6x16 (
"sshr v4.4s, v4.4s, #0x1f\n"
"sshr v5.4s, v5.4s, #0x1f\n"
"sshr v6.4s, v6.4s, #0x1f\n"
- "sshr v7.4s, v7.4s, #0x1f\n"
"sqadd v15.4s, v15.4s, v4.4s\n"
"sqadd v9.4s, v9.4s, v5.4s\n"
"sqadd v10.4s, v10.4s, v6.4s\n"
+ "sshr v7.4s, v7.4s, #0x1f\n"
"sqadd v11.4s, v11.4s, v7.4s\n"
"19:" // Height 1: no shift correction
- "add x25, %x[qp], %[c_offset]\n"
- "ld1r { v4.4s }, [x25]\n"
"srshl v15.4s, v15.4s, v0.4s\n"
+ "add x24, %x[qp], %[c_offset]\n"
+ "ld1r { v4.4s }, [x24]\n"
"srshl v9.4s, v9.4s, v1.4s\n"
+ "add x24, %x[qp], %[minval]\n"
"srshl v10.4s, v10.4s, v2.4s\n"
+ "ld1r { v5.4s }, [x24]\n"
+ "add x24, %x[qp], %[maxval]\n"
"srshl v11.4s, v11.4s, v3.4s\n"
- "add x25, %x[qp], %[maxval]\n"
- "ld1r { v6.4s }, [x25]\n"
+ "ld1r { v6.4s }, [x24]\n"
+ "cmp x10, #0x10\n"
"add v15.4s, v15.4s, v4.4s\n"
"add v9.4s, v9.4s, v4.4s\n"
- "add x25, %x[qp], %[minval]\n"
- "ld1r { v5.4s }, [x25]\n"
"add v10.4s, v10.4s, v4.4s\n"
"add v11.4s, v11.4s, v4.4s\n"
- "cmp x10, #0x10\n"
"smin v15.4s, v15.4s, v6.4s\n"
"smin v9.4s, v9.4s, v6.4s\n"
"smin v10.4s, v10.4s, v6.4s\n"
- "smin v11.4s, v11.4s, v6.4s\n"
"smax v15.4s, v15.4s, v5.4s\n"
"smax v9.4s, v9.4s, v5.4s\n"
"smax v10.4s, v10.4s, v5.4s\n"
- "smax v11.4s, v11.4s, v5.4s\n"
+ "smin v11.4s, v11.4s, v6.4s\n"
"uzp1 v15.8h, v15.8h, v9.8h\n"
+ "smax v11.4s, v11.4s, v5.4s\n"
"uzp1 v9.8h, v10.8h, v11.8h\n"
"uzp1 v15.16b, v15.16b, v9.16b\n"
"bge 28f\n"
"tbz x10, #3, 23f\n"
- "str d15, [x11], #0x8\n"
+ "str d15, [x9], #0x8\n"
"tbz x10, #2, 21f\n"
- "st1 { v15.s }[2], [x11], #0x4\n"
+ "st1 { v15.s }[2], [x9], #0x4\n"
"tbz x10, #1, 20f\n"
- "st1 { v15.h }[6], [x11], #0x2\n"
+ "st1 { v15.h }[6], [x9], #0x2\n"
"tbz x10, #0, 27f\n"
- "st1 { v15.b }[14], [x11]\n"
+ "st1 { v15.b }[14], [x9]\n"
"b 27f\n"
"20:" // Height 1: Partial direct writeback: partial_1_12
"tbz x10, #0, 27f\n"
- "st1 { v15.b }[12], [x11]\n"
+ "st1 { v15.b }[12], [x9]\n"
"b 27f\n"
"21:" // Height 1: Partial direct writeback: partial_2_8
"tbz x10, #1, 22f\n"
- "st1 { v15.h }[4], [x11], #0x2\n"
+ "st1 { v15.h }[4], [x9], #0x2\n"
"tbz x10, #0, 27f\n"
- "st1 { v15.b }[10], [x11]\n"
+ "st1 { v15.b }[10], [x9]\n"
"b 27f\n"
"22:" // Height 1: Partial direct writeback: partial_1_8
"tbz x10, #0, 27f\n"
- "st1 { v15.b }[8], [x11]\n"
+ "st1 { v15.b }[8], [x9]\n"
"b 27f\n"
"23:" // Height 1: Partial direct writeback: partial_4_0
"tbz x10, #2, 25f\n"
- "str s15, [x11], #0x4\n"
+ "str s15, [x9], #0x4\n"
"tbz x10, #1, 24f\n"
- "st1 { v15.h }[2], [x11], #0x2\n"
+ "st1 { v15.h }[2], [x9], #0x2\n"
"tbz x10, #0, 27f\n"
- "st1 { v15.b }[6], [x11]\n"
+ "st1 { v15.b }[6], [x9]\n"
"b 27f\n"
"24:" // Height 1: Partial direct writeback: partial_1_4
"tbz x10, #0, 27f\n"
- "st1 { v15.b }[4], [x11]\n"
+ "st1 { v15.b }[4], [x9]\n"
"b 27f\n"
"25:" // Height 1: Partial direct writeback: partial_2_0
"tbz x10, #1, 26f\n"
- "str h15, [x11], #0x2\n"
+ "str h15, [x9], #0x2\n"
"tbz x10, #0, 27f\n"
- "st1 { v15.b }[2], [x11]\n"
+ "st1 { v15.b }[2], [x9]\n"
"b 27f\n"
"26:" // Height 1: Partial direct writeback: partial_1_0
- "str b15, [x11, #0x0]\n"
+ "str b15, [x9, #0x0]\n"
"27:" // Height 1: Partial direct writeback: Done
"b 29f\n"
"28:" // Height 1: Full writeback
- "str q15, [x11, #0x0]\n"
- "add x11, x11, #0x10\n"
+ "str q15, [x9, #0x0]\n"
+ "add x9, x9, #0x10\n"
"29:" // Height 1: Writeback done
"subs x10, x10, #0x10\n"
"bgt 2b\n"
"b 176f\n"
"30:" // Height 2
- "mov x14, %x[col_bias]\n"
"ldr x13, [%x[args_ptr], %[offsetof_multiplier_ptr]]\n"
+ "mov x11, %x[col_bias]\n"
"ldr x12, [%x[args_ptr], %[offsetof_shift_ptr]]\n"
- "mov x11, %x[output_ptr]\n"
+ "mov x9, %x[output_ptr]\n"
"ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"31:" // Height 2: Column loop
"movi v8.4s, #0x0\n"
"movi v9.4s, #0x0\n"
@@ -438,216 +442,216 @@ void a64_hybrid_s8qs_mmla_6x16 (
"movi v14.4s, #0x0\n"
"movi v15.4s, #0x0\n"
"32:" // Height 2: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"33:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 34f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "cbnz x28, 35f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20\n"
- "add x25, x25, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "cbnz x27, 35f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
+ "add x24, x24, x19\n"
"b 35f\n"
"34:" // Height 2: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19\n"
"35:" // Height 2: input setup done
- "cmp x27, #0x10\n"
+ "cmp x26, #0x10\n"
"blt 38f\n"
- "ldr q1, [x26, #0x0]\n"
- "ldr q2, [x25, #0x0]\n"
- "cmp x27, #0x20\n"
- "ldr q7, [x9, #0x0]\n"
- "ldr q6, [x9, #0x10]\n"
+ "ldr q1, [x25, #0x0]\n"
+ "ldr q2, [x24, #0x0]\n"
+ "cmp x26, #0x20\n"
"blt 37f\n"
"36:" // Height 2: Multiply loop: Main loop head
"trn1 v0.2d, v1.2d, v2.2d\n"
+ "ldr q7, [x28, #0x0]\n"
+ "add x25, x25, #0x10\n"
+ "trn2 v1.2d, v1.2d, v2.2d\n"
+ "ldr q6, [x28, #0x10]\n"
+ "add x24, x24, #0x10\n"
".inst 0x4e87a408 // smmla v8.4s, v0.16b, v7.16b\n"
- "ldr q7, [x9, #0x20]\n"
+ "ldr q7, [x28, #0x20]\n"
+ "sub x26, x26, #0x10\n"
".inst 0x4e86a40c // smmla v12.4s, v0.16b, v6.16b\n"
- "ldr q6, [x9, #0x30]\n"
+ "ldr q6, [x28, #0x30]\n"
+ "cmp x26, #0x20\n"
".inst 0x4e87a409 // smmla v9.4s, v0.16b, v7.16b\n"
- "ldr q7, [x9, #0x40]\n"
+ "ldr q7, [x28, #0x40]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x4e86a40d // smmla v13.4s, v0.16b, v6.16b\n"
- "ldr q6, [x9, #0x50]\n"
+ "ldr q6, [x28, #0x50]\n"
".inst 0x4e87a40a // smmla v10.4s, v0.16b, v7.16b\n"
- "ldr q7, [x9, #0x60]\n"
+ "ldr q7, [x28, #0x60]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x4e86a40e // smmla v14.4s, v0.16b, v6.16b\n"
- "ldr q6, [x9, #0x70]\n"
- "trn2 v1.2d, v1.2d, v2.2d\n"
+ "ldr q6, [x28, #0x70]\n"
".inst 0x4e87a40b // smmla v11.4s, v0.16b, v7.16b\n"
- "ldr q7, [x9, #0x80]\n"
+ "ldr q7, [x28, #0x80]\n"
+ "ldr q2, [x24, #0x0]\n"
".inst 0x4e86a40f // smmla v15.4s, v0.16b, v6.16b\n"
- "ldr q6, [x9, #0x90]\n"
+ "ldr q6, [x28, #0x90]\n"
".inst 0x4e87a428 // smmla v8.4s, v1.16b, v7.16b\n"
- "ldr q7, [x9, #0xa0]\n"
+ "ldr q7, [x28, #0xa0]\n"
".inst 0x4e86a42c // smmla v12.4s, v1.16b, v6.16b\n"
- "ldr q6, [x9, #0xb0]\n"
+ "ldr q6, [x28, #0xb0]\n"
".inst 0x4e87a429 // smmla v9.4s, v1.16b, v7.16b\n"
- "ldr q7, [x9, #0xc0]\n"
+ "ldr q7, [x28, #0xc0]\n"
".inst 0x4e86a42d // smmla v13.4s, v1.16b, v6.16b\n"
- "ldr q6, [x9, #0xd0]\n"
+ "ldr q6, [x28, #0xd0]\n"
".inst 0x4e87a42a // smmla v10.4s, v1.16b, v7.16b\n"
- "ldr q7, [x9, #0xe0]\n"
+ "ldr q7, [x28, #0xe0]\n"
".inst 0x4e86a42e // smmla v14.4s, v1.16b, v6.16b\n"
- "ldr q6, [x9, #0xf0]\n"
- "sub x27, x27, #0x10\n"
- "add x26, x26, #0x10\n"
- "add x25, x25, #0x10\n"
- "ldr q2, [x25, #0x0]\n"
- "cmp x27, #0x20\n"
+ "ldr q6, [x28, #0xf0]\n"
+ "add x28, x28, #0x100\n"
".inst 0x4e87a42b // smmla v11.4s, v1.16b, v7.16b\n"
- "add x9, x9, #0x100\n"
- "ldr q7, [x9, #0x0]\n"
".inst 0x4e86a42f // smmla v15.4s, v1.16b, v6.16b\n"
- "ldr q1, [x26, #0x0]\n"
- "ldr q6, [x9, #0x10]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
+ "ldr q1, [x25, #0x0]\n"
"bge 36b\n"
"37:" // Height 2: Multiply loop: Single iteration only
"trn1 v0.2d, v1.2d, v2.2d\n"
+ "ldr q7, [x28, #0x0]\n"
+ "sub x26, x26, #0x10\n"
+ "trn2 v1.2d, v1.2d, v2.2d\n"
+ "ldr q6, [x28, #0x10]\n"
+ "add x25, x25, #0x10\n"
".inst 0x4e87a408 // smmla v8.4s, v0.16b, v7.16b\n"
- "ldr q7, [x9, #0x20]\n"
+ "ldr q7, [x28, #0x20]\n"
+ "add x24, x24, #0x10\n"
".inst 0x4e86a40c // smmla v12.4s, v0.16b, v6.16b\n"
- "ldr q6, [x9, #0x30]\n"
+ "ldr q6, [x28, #0x30]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x4e87a409 // smmla v9.4s, v0.16b, v7.16b\n"
- "ldr q7, [x9, #0x40]\n"
+ "ldr q7, [x28, #0x40]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x4e86a40d // smmla v13.4s, v0.16b, v6.16b\n"
- "ldr q6, [x9, #0x50]\n"
+ "ldr q6, [x28, #0x50]\n"
".inst 0x4e87a40a // smmla v10.4s, v0.16b, v7.16b\n"
- "ldr q7, [x9, #0x60]\n"
+ "ldr q7, [x28, #0x60]\n"
".inst 0x4e86a40e // smmla v14.4s, v0.16b, v6.16b\n"
- "ldr q6, [x9, #0x70]\n"
- "trn2 v1.2d, v1.2d, v2.2d\n"
+ "ldr q6, [x28, #0x70]\n"
".inst 0x4e87a40b // smmla v11.4s, v0.16b, v7.16b\n"
- "ldr q7, [x9, #0x80]\n"
+ "ldr q7, [x28, #0x80]\n"
".inst 0x4e86a40f // smmla v15.4s, v0.16b, v6.16b\n"
- "ldr q6, [x9, #0x90]\n"
+ "ldr q6, [x28, #0x90]\n"
".inst 0x4e87a428 // smmla v8.4s, v1.16b, v7.16b\n"
- "ldr q7, [x9, #0xa0]\n"
+ "ldr q7, [x28, #0xa0]\n"
".inst 0x4e86a42c // smmla v12.4s, v1.16b, v6.16b\n"
- "ldr q6, [x9, #0xb0]\n"
+ "ldr q6, [x28, #0xb0]\n"
".inst 0x4e87a429 // smmla v9.4s, v1.16b, v7.16b\n"
- "ldr q7, [x9, #0xc0]\n"
+ "ldr q7, [x28, #0xc0]\n"
".inst 0x4e86a42d // smmla v13.4s, v1.16b, v6.16b\n"
- "ldr q6, [x9, #0xd0]\n"
+ "ldr q6, [x28, #0xd0]\n"
".inst 0x4e87a42a // smmla v10.4s, v1.16b, v7.16b\n"
- "ldr q7, [x9, #0xe0]\n"
+ "ldr q7, [x28, #0xe0]\n"
".inst 0x4e86a42e // smmla v14.4s, v1.16b, v6.16b\n"
- "ldr q6, [x9, #0xf0]\n"
- "add x26, x26, #0x10\n"
- "add x25, x25, #0x10\n"
+ "ldr q6, [x28, #0xf0]\n"
+ "add x28, x28, #0x100\n"
".inst 0x4e87a42b // smmla v11.4s, v1.16b, v7.16b\n"
".inst 0x4e86a42f // smmla v15.4s, v1.16b, v6.16b\n"
- "sub x27, x27, #0x10\n"
- "prfm pldl1keep, [x26, #0x80]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
- "add x9, x9, #0x100\n"
"38:" // Height 2: Multiply loop: Main loop skip
- "cbz x27, 45f\n"
- "cmp x27, #0x8\n"
+ "cbz x26, 45f\n"
+ "cmp x26, #0x8\n"
"blt 40f\n"
"39:" // Height 2: Multiply loop: Odd block loop
- "ldr d1, [x26], #0x8\n"
- "ldr d2, [x25], #0x8\n"
+ "ldr d1, [x25], #0x8\n"
+ "sub x26, x26, #0x8\n"
+ "ldr d2, [x24], #0x8\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
- "sub x27, x27, #0x8\n"
- "ldr q6, [x9, #0x0]\n"
- "ldr q7, [x9, #0x10]\n"
+ "ldr q6, [x28, #0x0]\n"
+ "cmp x26, #0x8\n"
".inst 0x4e86a408 // smmla v8.4s, v0.16b, v6.16b\n"
+ "ldr q7, [x28, #0x10]\n"
+ "ldr q6, [x28, #0x20]\n"
".inst 0x4e87a40c // smmla v12.4s, v0.16b, v7.16b\n"
- "ldr q6, [x9, #0x20]\n"
- "ldr q7, [x9, #0x30]\n"
+ "ldr q7, [x28, #0x30]\n"
".inst 0x4e86a409 // smmla v9.4s, v0.16b, v6.16b\n"
+ "ldr q6, [x28, #0x40]\n"
".inst 0x4e87a40d // smmla v13.4s, v0.16b, v7.16b\n"
- "ldr q6, [x9, #0x40]\n"
- "ldr q7, [x9, #0x50]\n"
+ "ldr q7, [x28, #0x50]\n"
".inst 0x4e86a40a // smmla v10.4s, v0.16b, v6.16b\n"
+ "ldr q6, [x28, #0x60]\n"
".inst 0x4e87a40e // smmla v14.4s, v0.16b, v7.16b\n"
- "ldr q6, [x9, #0x60]\n"
- "ldr q7, [x9, #0x70]\n"
- "cmp x27, #0x8\n"
+ "ldr q7, [x28, #0x70]\n"
+ "add x28, x28, #0x80\n"
".inst 0x4e86a40b // smmla v11.4s, v0.16b, v6.16b\n"
".inst 0x4e87a40f // smmla v15.4s, v0.16b, v7.16b\n"
- "add x9, x9, #0x80\n"
"bge 39b\n"
+ "cbz x26, 45f\n"
"40:" // Height 2: Multiply loop: Skip odd blocks
- "cbz x27, 45f\n"
- "tbz x27, #2, 42f\n"
- "ldr s1, [x26], #0x4\n"
- "ldr s2, [x25], #0x4\n"
- "tbz x27, #1, 41f\n"
- "ld1 { v1.h }[2], [x26], #0x2\n"
- "ld1 { v2.h }[2], [x25], #0x2\n"
- "tbz x27, #0, 44f\n"
- "ld1 { v1.b }[6], [x26]\n"
- "ld1 { v2.b }[6], [x25]\n"
+ "tbz x26, #2, 42f\n"
+ "ldr s1, [x25], #0x4\n"
+ "ldr s2, [x24], #0x4\n"
+ "tbz x26, #1, 41f\n"
+ "ld1 { v1.h }[2], [x25], #0x2\n"
+ "ld1 { v2.h }[2], [x24], #0x2\n"
+ "tbz x26, #0, 44f\n"
+ "ld1 { v1.b }[6], [x25]\n"
+ "ld1 { v2.b }[6], [x24]\n"
"b 44f\n"
"41:" // Height 2: Multiply loop: Ragged operand read: partial_1_4
- "tbz x27, #0, 44f\n"
- "ld1 { v1.b }[4], [x26]\n"
- "ld1 { v2.b }[4], [x25]\n"
+ "tbz x26, #0, 44f\n"
+ "ld1 { v1.b }[4], [x25]\n"
+ "ld1 { v2.b }[4], [x24]\n"
"b 44f\n"
"42:" // Height 2: Multiply loop: Ragged operand read: partial_2_0
- "tbz x27, #1, 43f\n"
- "ldr h1, [x26], #0x2\n"
- "ldr h2, [x25], #0x2\n"
- "tbz x27, #0, 44f\n"
- "ld1 { v1.b }[2], [x26]\n"
- "ld1 { v2.b }[2], [x25]\n"
+ "tbz x26, #1, 43f\n"
+ "ldr h1, [x25], #0x2\n"
+ "ldr h2, [x24], #0x2\n"
+ "tbz x26, #0, 44f\n"
+ "ld1 { v1.b }[2], [x25]\n"
+ "ld1 { v2.b }[2], [x24]\n"
"b 44f\n"
"43:" // Height 2: Multiply loop: Ragged operand read: partial_1_0
- "ldr b1, [x26, #0x0]\n"
- "ldr b2, [x25, #0x0]\n"
+ "ldr b1, [x25, #0x0]\n"
+ "ldr b2, [x24, #0x0]\n"
"44:" // Height 2: Multiply loop: Ragged operand read: Done
- "ldr q7, [x9, #0x0]\n"
- "ldr q6, [x9, #0x10]\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
+ "ldr q7, [x28, #0x0]\n"
+ "ldr q6, [x28, #0x10]\n"
".inst 0x4e87a408 // smmla v8.4s, v0.16b, v7.16b\n"
- "ldr q7, [x9, #0x20]\n"
+ "ldr q7, [x28, #0x20]\n"
".inst 0x4e86a40c // smmla v12.4s, v0.16b, v6.16b\n"
- "ldr q6, [x9, #0x30]\n"
+ "ldr q6, [x28, #0x30]\n"
".inst 0x4e87a409 // smmla v9.4s, v0.16b, v7.16b\n"
- "ldr q7, [x9, #0x40]\n"
+ "ldr q7, [x28, #0x40]\n"
".inst 0x4e86a40d // smmla v13.4s, v0.16b, v6.16b\n"
- "ldr q6, [x9, #0x50]\n"
+ "ldr q6, [x28, #0x50]\n"
".inst 0x4e87a40a // smmla v10.4s, v0.16b, v7.16b\n"
- "ldr q7, [x9, #0x60]\n"
+ "ldr q7, [x28, #0x60]\n"
".inst 0x4e86a40e // smmla v14.4s, v0.16b, v6.16b\n"
- "ldr q6, [x9, #0x70]\n"
+ "ldr q6, [x28, #0x70]\n"
+ "add x28, x28, #0x80\n"
".inst 0x4e87a40b // smmla v11.4s, v0.16b, v7.16b\n"
".inst 0x4e86a40f // smmla v15.4s, v0.16b, v6.16b\n"
- "add x9, x9, #0x80\n"
"45:" // Height 2: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 33b\n"
- "ldr q0, [x14, #0x0]\n"
- "ldr q1, [x14, #0x10]\n"
"uzp1 v7.2d, v8.2d, v12.2d\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp2 v8.2d, v8.2d, v12.2d\n"
- "ldr q2, [x14, #0x20]\n"
- "ldr q3, [x14, #0x30]\n"
+ "prfm pstl1keep, [x9, #0x0]\n"
+ "add x23, x9, x19\n"
"uzp1 v12.2d, v9.2d, v13.2d\n"
+ "ldr q0, [x11, #0x0]\n"
"uzp2 v9.2d, v9.2d, v13.2d\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "prfm pstl1keep, [x23, #0x0]\n"
"uzp1 v13.2d, v10.2d, v14.2d\n"
+ "ldr q1, [x11, #0x10]\n"
"uzp2 v10.2d, v10.2d, v14.2d\n"
- "add x24, x11, x20\n"
+ "ldr q2, [x11, #0x20]\n"
"uzp1 v14.2d, v11.2d, v15.2d\n"
+ "ldr q3, [x11, #0x30]\n"
+ "add x11, x11, #0x40\n"
"uzp2 v11.2d, v11.2d, v15.2d\n"
- "prfm pstl1keep, [x11, #0x0]\n"
- "prfm pstl1keep, [x24, #0x0]\n"
"mov v15.16b, v7.16b\n"
"add v15.4s, v15.4s, v0.4s\n"
- "add x14, x14, #0x40\n"
"add v12.4s, v12.4s, v1.4s\n"
"add v13.4s, v13.4s, v2.4s\n"
"add v14.4s, v14.4s, v3.4s\n"
@@ -663,20 +667,20 @@ void a64_hybrid_s8qs_mmla_6x16 (
"ldr q2, [x12, #0x20]\n"
"ldr q6, [x13, #0x20]\n"
"ldr q3, [x12, #0x30]\n"
- "ldr q7, [x13, #0x30]\n"
"add x12, x12, #0x40\n"
+ "ldr q7, [x13, #0x30]\n"
"add x13, x13, #0x40\n"
"b 47f\n"
"46:" // Height 2: per layer parameters
- "add x25, %x[qp], %[per_layer_right_shift]\n"
- "ld1r { v0.4s }, [x25]\n"
- "add x25, %x[qp], %[per_layer_mul]\n"
- "ld1r { v4.4s }, [x25]\n"
+ "add x24, %x[qp], %[per_layer_right_shift]\n"
+ "ld1r { v0.4s }, [x24]\n"
"mov v1.16b, v0.16b\n"
- "mov v5.16b, v4.16b\n"
+ "add x24, %x[qp], %[per_layer_mul]\n"
+ "ld1r { v4.4s }, [x24]\n"
"mov v2.16b, v0.16b\n"
- "mov v6.16b, v4.16b\n"
"mov v3.16b, v0.16b\n"
+ "mov v5.16b, v4.16b\n"
+ "mov v6.16b, v4.16b\n"
"mov v7.16b, v4.16b\n"
"47:" // Height 2: parameters loaded
"sqrdmulh v15.4s, v15.4s, v4.4s\n"
@@ -691,149 +695,149 @@ void a64_hybrid_s8qs_mmla_6x16 (
"and v4.16b, v15.16b, v0.16b\n"
"and v5.16b, v12.16b, v1.16b\n"
"and v6.16b, v13.16b, v2.16b\n"
- "and v7.16b, v14.16b, v3.16b\n"
"sshr v4.4s, v4.4s, #0x1f\n"
"sshr v5.4s, v5.4s, #0x1f\n"
"sshr v6.4s, v6.4s, #0x1f\n"
- "sshr v7.4s, v7.4s, #0x1f\n"
"sqadd v15.4s, v15.4s, v4.4s\n"
"sqadd v12.4s, v12.4s, v5.4s\n"
"sqadd v13.4s, v13.4s, v6.4s\n"
- "sqadd v14.4s, v14.4s, v7.4s\n"
+ "and v7.16b, v14.16b, v3.16b\n"
"and v4.16b, v8.16b, v0.16b\n"
"and v5.16b, v9.16b, v1.16b\n"
- "and v6.16b, v10.16b, v2.16b\n"
- "and v7.16b, v11.16b, v3.16b\n"
+ "sshr v7.4s, v7.4s, #0x1f\n"
"sshr v4.4s, v4.4s, #0x1f\n"
"sshr v5.4s, v5.4s, #0x1f\n"
- "sshr v6.4s, v6.4s, #0x1f\n"
- "sshr v7.4s, v7.4s, #0x1f\n"
+ "sqadd v14.4s, v14.4s, v7.4s\n"
"sqadd v8.4s, v8.4s, v4.4s\n"
"sqadd v9.4s, v9.4s, v5.4s\n"
+ "and v6.16b, v10.16b, v2.16b\n"
+ "and v7.16b, v11.16b, v3.16b\n"
+ "sshr v6.4s, v6.4s, #0x1f\n"
+ "sshr v7.4s, v7.4s, #0x1f\n"
"sqadd v10.4s, v10.4s, v6.4s\n"
"sqadd v11.4s, v11.4s, v7.4s\n"
"48:" // Height 2: no shift correction
- "add x25, %x[qp], %[c_offset]\n"
- "ld1r { v4.4s }, [x25]\n"
"srshl v15.4s, v15.4s, v0.4s\n"
+ "add x24, %x[qp], %[c_offset]\n"
+ "ld1r { v4.4s }, [x24]\n"
"srshl v12.4s, v12.4s, v1.4s\n"
+ "add x24, %x[qp], %[minval]\n"
"srshl v13.4s, v13.4s, v2.4s\n"
+ "ld1r { v5.4s }, [x24]\n"
+ "add x24, %x[qp], %[maxval]\n"
"srshl v14.4s, v14.4s, v3.4s\n"
- "add x25, %x[qp], %[maxval]\n"
- "ld1r { v6.4s }, [x25]\n"
+ "ld1r { v6.4s }, [x24]\n"
+ "cmp x10, #0x10\n"
"srshl v8.4s, v8.4s, v0.4s\n"
"srshl v9.4s, v9.4s, v1.4s\n"
- "add x25, %x[qp], %[minval]\n"
- "ld1r { v5.4s }, [x25]\n"
- "srshl v10.4s, v10.4s, v2.4s\n"
- "srshl v11.4s, v11.4s, v3.4s\n"
- "cmp x10, #0x10\n"
"add v15.4s, v15.4s, v4.4s\n"
"add v12.4s, v12.4s, v4.4s\n"
"add v13.4s, v13.4s, v4.4s\n"
- "add v14.4s, v14.4s, v4.4s\n"
- "add v8.4s, v8.4s, v4.4s\n"
- "add v9.4s, v9.4s, v4.4s\n"
- "add v10.4s, v10.4s, v4.4s\n"
- "add v11.4s, v11.4s, v4.4s\n"
"smin v15.4s, v15.4s, v6.4s\n"
"smin v12.4s, v12.4s, v6.4s\n"
"smin v13.4s, v13.4s, v6.4s\n"
- "smin v14.4s, v14.4s, v6.4s\n"
- "smin v8.4s, v8.4s, v6.4s\n"
- "smin v9.4s, v9.4s, v6.4s\n"
- "smin v10.4s, v10.4s, v6.4s\n"
- "smin v11.4s, v11.4s, v6.4s\n"
"smax v15.4s, v15.4s, v5.4s\n"
"smax v12.4s, v12.4s, v5.4s\n"
"smax v13.4s, v13.4s, v5.4s\n"
+ "add v14.4s, v14.4s, v4.4s\n"
+ "add v8.4s, v8.4s, v4.4s\n"
+ "add v9.4s, v9.4s, v4.4s\n"
+ "smin v14.4s, v14.4s, v6.4s\n"
+ "smin v8.4s, v8.4s, v6.4s\n"
+ "smin v9.4s, v9.4s, v6.4s\n"
"smax v14.4s, v14.4s, v5.4s\n"
"smax v8.4s, v8.4s, v5.4s\n"
"smax v9.4s, v9.4s, v5.4s\n"
- "smax v10.4s, v10.4s, v5.4s\n"
- "smax v11.4s, v11.4s, v5.4s\n"
+ "srshl v10.4s, v10.4s, v2.4s\n"
+ "srshl v11.4s, v11.4s, v3.4s\n"
"uzp1 v15.8h, v15.8h, v12.8h\n"
"uzp1 v12.8h, v13.8h, v14.8h\n"
+ "add v10.4s, v10.4s, v4.4s\n"
+ "add v11.4s, v11.4s, v4.4s\n"
"uzp1 v8.8h, v8.8h, v9.8h\n"
- "uzp1 v9.8h, v10.8h, v11.8h\n"
+ "smin v10.4s, v10.4s, v6.4s\n"
+ "smin v11.4s, v11.4s, v6.4s\n"
"uzp1 v15.16b, v15.16b, v12.16b\n"
+ "smax v10.4s, v10.4s, v5.4s\n"
+ "smax v11.4s, v11.4s, v5.4s\n"
+ "uzp1 v9.8h, v10.8h, v11.8h\n"
"uzp1 v8.16b, v8.16b, v9.16b\n"
"bge 57f\n"
"tbz x10, #3, 52f\n"
- "str d15, [x11], #0x8\n"
- "str d8, [x24], #0x8\n"
+ "str d15, [x9], #0x8\n"
+ "str d8, [x23], #0x8\n"
"tbz x10, #2, 50f\n"
- "st1 { v15.s }[2], [x11], #0x4\n"
- "st1 { v8.s }[2], [x24], #0x4\n"
+ "st1 { v15.s }[2], [x9], #0x4\n"
+ "st1 { v8.s }[2], [x23], #0x4\n"
"tbz x10, #1, 49f\n"
- "st1 { v15.h }[6], [x11], #0x2\n"
- "st1 { v8.h }[6], [x24], #0x2\n"
+ "st1 { v15.h }[6], [x9], #0x2\n"
+ "st1 { v8.h }[6], [x23], #0x2\n"
"tbz x10, #0, 56f\n"
- "st1 { v15.b }[14], [x11]\n"
- "st1 { v8.b }[14], [x24]\n"
+ "st1 { v15.b }[14], [x9]\n"
+ "st1 { v8.b }[14], [x23]\n"
"b 56f\n"
"49:" // Height 2: Partial direct writeback: partial_1_12
"tbz x10, #0, 56f\n"
- "st1 { v15.b }[12], [x11]\n"
- "st1 { v8.b }[12], [x24]\n"
+ "st1 { v15.b }[12], [x9]\n"
+ "st1 { v8.b }[12], [x23]\n"
"b 56f\n"
"50:" // Height 2: Partial direct writeback: partial_2_8
"tbz x10, #1, 51f\n"
- "st1 { v15.h }[4], [x11], #0x2\n"
- "st1 { v8.h }[4], [x24], #0x2\n"
+ "st1 { v15.h }[4], [x9], #0x2\n"
+ "st1 { v8.h }[4], [x23], #0x2\n"
"tbz x10, #0, 56f\n"
- "st1 { v15.b }[10], [x11]\n"
- "st1 { v8.b }[10], [x24]\n"
+ "st1 { v15.b }[10], [x9]\n"
+ "st1 { v8.b }[10], [x23]\n"
"b 56f\n"
"51:" // Height 2: Partial direct writeback: partial_1_8
"tbz x10, #0, 56f\n"
- "st1 { v15.b }[8], [x11]\n"
- "st1 { v8.b }[8], [x24]\n"
+ "st1 { v15.b }[8], [x9]\n"
+ "st1 { v8.b }[8], [x23]\n"
"b 56f\n"
"52:" // Height 2: Partial direct writeback: partial_4_0
"tbz x10, #2, 54f\n"
- "str s15, [x11], #0x4\n"
- "str s8, [x24], #0x4\n"
+ "str s15, [x9], #0x4\n"
+ "str s8, [x23], #0x4\n"
"tbz x10, #1, 53f\n"
- "st1 { v15.h }[2], [x11], #0x2\n"
- "st1 { v8.h }[2], [x24], #0x2\n"
+ "st1 { v15.h }[2], [x9], #0x2\n"
+ "st1 { v8.h }[2], [x23], #0x2\n"
"tbz x10, #0, 56f\n"
- "st1 { v15.b }[6], [x11]\n"
- "st1 { v8.b }[6], [x24]\n"
+ "st1 { v15.b }[6], [x9]\n"
+ "st1 { v8.b }[6], [x23]\n"
"b 56f\n"
"53:" // Height 2: Partial direct writeback: partial_1_4
"tbz x10, #0, 56f\n"
- "st1 { v15.b }[4], [x11]\n"
- "st1 { v8.b }[4], [x24]\n"
+ "st1 { v15.b }[4], [x9]\n"
+ "st1 { v8.b }[4], [x23]\n"
"b 56f\n"
"54:" // Height 2: Partial direct writeback: partial_2_0
"tbz x10, #1, 55f\n"
- "str h15, [x11], #0x2\n"
- "str h8, [x24], #0x2\n"
+ "str h15, [x9], #0x2\n"
+ "str h8, [x23], #0x2\n"
"tbz x10, #0, 56f\n"
- "st1 { v15.b }[2], [x11]\n"
- "st1 { v8.b }[2], [x24]\n"
+ "st1 { v15.b }[2], [x9]\n"
+ "st1 { v8.b }[2], [x23]\n"
"b 56f\n"
"55:" // Height 2: Partial direct writeback: partial_1_0
- "str b15, [x11, #0x0]\n"
- "str b8, [x24, #0x0]\n"
+ "str b15, [x9, #0x0]\n"
+ "str b8, [x23, #0x0]\n"
"56:" // Height 2: Partial direct writeback: Done
"b 58f\n"
"57:" // Height 2: Full writeback
- "str q15, [x11, #0x0]\n"
- "add x11, x11, #0x10\n"
- "str q8, [x24, #0x0]\n"
+ "str q15, [x9, #0x0]\n"
+ "add x9, x9, #0x10\n"
+ "str q8, [x23, #0x0]\n"
"58:" // Height 2: Writeback done
"subs x10, x10, #0x10\n"
"bgt 31b\n"
"b 176f\n"
"59:" // Height 3
- "mov x14, %x[col_bias]\n"
"ldr x13, [%x[args_ptr], %[offsetof_multiplier_ptr]]\n"
+ "mov x11, %x[col_bias]\n"
"ldr x12, [%x[args_ptr], %[offsetof_shift_ptr]]\n"
- "mov x11, %x[output_ptr]\n"
+ "mov x9, %x[output_ptr]\n"
"ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"60:" // Height 3: Column loop
"movi v8.4s, #0x0\n"
"movi v9.4s, #0x0\n"
@@ -852,291 +856,295 @@ void a64_hybrid_s8qs_mmla_6x16 (
"movi v22.4s, #0x0\n"
"movi v23.4s, #0x0\n"
"61:" // Height 3: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"62:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 63f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "cbnz x28, 64f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20\n"
- "add x25, x25, x20\n"
- "add x24, x24, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "cbnz x27, 64f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
+ "add x24, x24, x19\n"
+ "add x23, x23, x19\n"
"b 64f\n"
"63:" // Height 3: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20\n"
- "add x24, x25, x20\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19\n"
+ "add x23, x24, x19\n"
"64:" // Height 3: input setup done
- "cmp x27, #0x10\n"
+ "cmp x26, #0x10\n"
"blt 67f\n"
- "ldr q1, [x26, #0x0]\n"
- "ldr q2, [x25, #0x0]\n"
- "cmp x27, #0x20\n"
- "ldr q3, [x24, #0x0]\n"
- "ldr q7, [x9, #0x0]\n"
- "ldr q6, [x9, #0x10]\n"
+ "ldr q1, [x25, #0x0]\n"
+ "cmp x26, #0x20\n"
"blt 66f\n"
"65:" // Height 3: Multiply loop: Main loop head
+ "movi v4.16b, #0x0\n"
+ "ldr q2, [x24, #0x0]\n"
+ "add x25, x25, #0x10\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
+ "ldr q3, [x23, #0x0]\n"
+ "add x24, x24, #0x10\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
- ".inst 0x4e87a408 // smmla v8.4s, v0.16b, v7.16b\n"
+ "ldr q7, [x28, #0x0]\n"
+ "add x23, x23, #0x10\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
+ "ldr q6, [x28, #0x10]\n"
+ "sub x26, x26, #0x10\n"
+ "trn2 v3.2d, v3.2d, v4.2d\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "cmp x26, #0x20\n"
+ ".inst 0x4e87a408 // smmla v8.4s, v0.16b, v7.16b\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x4e87a450 // smmla v16.4s, v2.16b, v7.16b\n"
- "ldr q7, [x9, #0x20]\n"
+ "ldr q7, [x28, #0x20]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x4e86a40c // smmla v12.4s, v0.16b, v6.16b\n"
".inst 0x4e86a454 // smmla v20.4s, v2.16b, v6.16b\n"
- "ldr q6, [x9, #0x30]\n"
+ "ldr q6, [x28, #0x30]\n"
".inst 0x4e87a409 // smmla v9.4s, v0.16b, v7.16b\n"
- "trn2 v3.2d, v3.2d, v4.2d\n"
".inst 0x4e87a451 // smmla v17.4s, v2.16b, v7.16b\n"
- "ldr q7, [x9, #0x40]\n"
+ "ldr q7, [x28, #0x40]\n"
".inst 0x4e86a40d // smmla v13.4s, v0.16b, v6.16b\n"
- "sub x27, x27, #0x10\n"
".inst 0x4e86a455 // smmla v21.4s, v2.16b, v6.16b\n"
- "ldr q6, [x9, #0x50]\n"
+ "ldr q6, [x28, #0x50]\n"
".inst 0x4e87a40a // smmla v10.4s, v0.16b, v7.16b\n"
- "add x26, x26, #0x10\n"
".inst 0x4e87a452 // smmla v18.4s, v2.16b, v7.16b\n"
- "ldr q7, [x9, #0x60]\n"
+ "ldr q7, [x28, #0x60]\n"
".inst 0x4e86a40e // smmla v14.4s, v0.16b, v6.16b\n"
- "add x25, x25, #0x10\n"
".inst 0x4e86a456 // smmla v22.4s, v2.16b, v6.16b\n"
- "ldr q6, [x9, #0x70]\n"
+ "ldr q6, [x28, #0x70]\n"
".inst 0x4e87a40b // smmla v11.4s, v0.16b, v7.16b\n"
- "add x24, x24, #0x10\n"
".inst 0x4e87a453 // smmla v19.4s, v2.16b, v7.16b\n"
- "ldr q7, [x9, #0x80]\n"
+ "ldr q7, [x28, #0x80]\n"
".inst 0x4e86a40f // smmla v15.4s, v0.16b, v6.16b\n"
- "cmp x27, #0x20\n"
".inst 0x4e86a457 // smmla v23.4s, v2.16b, v6.16b\n"
- "ldr q6, [x9, #0x90]\n"
- "ldr q2, [x25, #0x0]\n"
+ "ldr q6, [x28, #0x90]\n"
".inst 0x4e87a428 // smmla v8.4s, v1.16b, v7.16b\n"
".inst 0x4e87a470 // smmla v16.4s, v3.16b, v7.16b\n"
- "ldr q7, [x9, #0xa0]\n"
+ "ldr q7, [x28, #0xa0]\n"
".inst 0x4e86a42c // smmla v12.4s, v1.16b, v6.16b\n"
- "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x4e86a474 // smmla v20.4s, v3.16b, v6.16b\n"
- "ldr q6, [x9, #0xb0]\n"
+ "ldr q6, [x28, #0xb0]\n"
".inst 0x4e87a429 // smmla v9.4s, v1.16b, v7.16b\n"
- "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x4e87a471 // smmla v17.4s, v3.16b, v7.16b\n"
- "ldr q7, [x9, #0xc0]\n"
+ "ldr q7, [x28, #0xc0]\n"
".inst 0x4e86a42d // smmla v13.4s, v1.16b, v6.16b\n"
- "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x4e86a475 // smmla v21.4s, v3.16b, v6.16b\n"
- "ldr q6, [x9, #0xd0]\n"
+ "ldr q6, [x28, #0xd0]\n"
".inst 0x4e87a42a // smmla v10.4s, v1.16b, v7.16b\n"
".inst 0x4e87a472 // smmla v18.4s, v3.16b, v7.16b\n"
- "ldr q7, [x9, #0xe0]\n"
+ "ldr q7, [x28, #0xe0]\n"
".inst 0x4e86a42e // smmla v14.4s, v1.16b, v6.16b\n"
".inst 0x4e86a476 // smmla v22.4s, v3.16b, v6.16b\n"
- "ldr q6, [x9, #0xf0]\n"
- "add x9, x9, #0x100\n"
+ "ldr q6, [x28, #0xf0]\n"
+ "add x28, x28, #0x100\n"
".inst 0x4e87a42b // smmla v11.4s, v1.16b, v7.16b\n"
".inst 0x4e87a473 // smmla v19.4s, v3.16b, v7.16b\n"
- "ldr q7, [x9, #0x0]\n"
".inst 0x4e86a42f // smmla v15.4s, v1.16b, v6.16b\n"
- "ldr q1, [x26, #0x0]\n"
+ "ldr q1, [x25, #0x0]\n"
".inst 0x4e86a477 // smmla v23.4s, v3.16b, v6.16b\n"
- "ldr q3, [x24, #0x0]\n"
- "ldr q6, [x9, #0x10]\n"
"bge 65b\n"
"66:" // Height 3: Multiply loop: Single iteration only
+ "movi v4.16b, #0x0\n"
+ "ldr q2, [x24, #0x0]\n"
+ "sub x26, x26, #0x10\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
+ "ldr q3, [x23, #0x0]\n"
+ "add x25, x25, #0x10\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
- ".inst 0x4e87a408 // smmla v8.4s, v0.16b, v7.16b\n"
+ "ldr q7, [x28, #0x0]\n"
+ "add x24, x24, #0x10\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
+ "ldr q6, [x28, #0x10]\n"
+ "add x23, x23, #0x10\n"
+ "trn2 v3.2d, v3.2d, v4.2d\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ ".inst 0x4e87a408 // smmla v8.4s, v0.16b, v7.16b\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x4e87a450 // smmla v16.4s, v2.16b, v7.16b\n"
- "ldr q7, [x9, #0x20]\n"
+ "ldr q7, [x28, #0x20]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x4e86a40c // smmla v12.4s, v0.16b, v6.16b\n"
".inst 0x4e86a454 // smmla v20.4s, v2.16b, v6.16b\n"
- "ldr q6, [x9, #0x30]\n"
+ "ldr q6, [x28, #0x30]\n"
".inst 0x4e87a409 // smmla v9.4s, v0.16b, v7.16b\n"
- "trn2 v3.2d, v3.2d, v4.2d\n"
".inst 0x4e87a451 // smmla v17.4s, v2.16b, v7.16b\n"
- "ldr q7, [x9, #0x40]\n"
+ "ldr q7, [x28, #0x40]\n"
".inst 0x4e86a40d // smmla v13.4s, v0.16b, v6.16b\n"
- "add x26, x26, #0x10\n"
".inst 0x4e86a455 // smmla v21.4s, v2.16b, v6.16b\n"
- "ldr q6, [x9, #0x50]\n"
+ "ldr q6, [x28, #0x50]\n"
".inst 0x4e87a40a // smmla v10.4s, v0.16b, v7.16b\n"
- "add x25, x25, #0x10\n"
".inst 0x4e87a452 // smmla v18.4s, v2.16b, v7.16b\n"
- "ldr q7, [x9, #0x60]\n"
+ "ldr q7, [x28, #0x60]\n"
".inst 0x4e86a40e // smmla v14.4s, v0.16b, v6.16b\n"
- "add x24, x24, #0x10\n"
".inst 0x4e86a456 // smmla v22.4s, v2.16b, v6.16b\n"
- "ldr q6, [x9, #0x70]\n"
+ "ldr q6, [x28, #0x70]\n"
".inst 0x4e87a40b // smmla v11.4s, v0.16b, v7.16b\n"
- "sub x27, x27, #0x10\n"
".inst 0x4e87a453 // smmla v19.4s, v2.16b, v7.16b\n"
- "ldr q7, [x9, #0x80]\n"
+ "ldr q7, [x28, #0x80]\n"
".inst 0x4e86a40f // smmla v15.4s, v0.16b, v6.16b\n"
- "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x4e86a457 // smmla v23.4s, v2.16b, v6.16b\n"
- "ldr q6, [x9, #0x90]\n"
+ "ldr q6, [x28, #0x90]\n"
".inst 0x4e87a428 // smmla v8.4s, v1.16b, v7.16b\n"
- "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x4e87a470 // smmla v16.4s, v3.16b, v7.16b\n"
- "ldr q7, [x9, #0xa0]\n"
+ "ldr q7, [x28, #0xa0]\n"
".inst 0x4e86a42c // smmla v12.4s, v1.16b, v6.16b\n"
- "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x4e86a474 // smmla v20.4s, v3.16b, v6.16b\n"
- "ldr q6, [x9, #0xb0]\n"
+ "ldr q6, [x28, #0xb0]\n"
".inst 0x4e87a429 // smmla v9.4s, v1.16b, v7.16b\n"
".inst 0x4e87a471 // smmla v17.4s, v3.16b, v7.16b\n"
- "ldr q7, [x9, #0xc0]\n"
+ "ldr q7, [x28, #0xc0]\n"
".inst 0x4e86a42d // smmla v13.4s, v1.16b, v6.16b\n"
".inst 0x4e86a475 // smmla v21.4s, v3.16b, v6.16b\n"
- "ldr q6, [x9, #0xd0]\n"
+ "ldr q6, [x28, #0xd0]\n"
".inst 0x4e87a42a // smmla v10.4s, v1.16b, v7.16b\n"
".inst 0x4e87a472 // smmla v18.4s, v3.16b, v7.16b\n"
- "ldr q7, [x9, #0xe0]\n"
+ "ldr q7, [x28, #0xe0]\n"
".inst 0x4e86a42e // smmla v14.4s, v1.16b, v6.16b\n"
".inst 0x4e86a476 // smmla v22.4s, v3.16b, v6.16b\n"
- "ldr q6, [x9, #0xf0]\n"
- "add x9, x9, #0x100\n"
+ "ldr q6, [x28, #0xf0]\n"
+ "add x28, x28, #0x100\n"
".inst 0x4e87a42b // smmla v11.4s, v1.16b, v7.16b\n"
".inst 0x4e87a473 // smmla v19.4s, v3.16b, v7.16b\n"
".inst 0x4e86a42f // smmla v15.4s, v1.16b, v6.16b\n"
".inst 0x4e86a477 // smmla v23.4s, v3.16b, v6.16b\n"
"67:" // Height 3: Multiply loop: Main loop skip
- "cbz x27, 74f\n"
- "cmp x27, #0x8\n"
+ "cbz x26, 74f\n"
+ "cmp x26, #0x8\n"
"blt 69f\n"
"68:" // Height 3: Multiply loop: Odd block loop
- "ldr d1, [x26], #0x8\n"
- "ldr d2, [x25], #0x8\n"
+ "movi v4.16b, #0x0\n"
+ "ldr d1, [x25], #0x8\n"
+ "sub x26, x26, #0x8\n"
+ "ldr d2, [x24], #0x8\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
- "ldr d3, [x24], #0x8\n"
- "ldr q6, [x9, #0x0]\n"
+ "ldr d3, [x23], #0x8\n"
+ "cmp x26, #0x8\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
+ "ldr q6, [x28, #0x0]\n"
+ "ldr q7, [x28, #0x10]\n"
".inst 0x4e86a408 // smmla v8.4s, v0.16b, v6.16b\n"
- "ldr q7, [x9, #0x10]\n"
".inst 0x4e86a450 // smmla v16.4s, v2.16b, v6.16b\n"
- "ldr q6, [x9, #0x20]\n"
+ "ldr q6, [x28, #0x20]\n"
".inst 0x4e87a40c // smmla v12.4s, v0.16b, v7.16b\n"
".inst 0x4e87a454 // smmla v20.4s, v2.16b, v7.16b\n"
- "ldr q7, [x9, #0x30]\n"
+ "ldr q7, [x28, #0x30]\n"
".inst 0x4e86a409 // smmla v9.4s, v0.16b, v6.16b\n"
- "sub x27, x27, #0x8\n"
".inst 0x4e86a451 // smmla v17.4s, v2.16b, v6.16b\n"
- "ldr q6, [x9, #0x40]\n"
+ "ldr q6, [x28, #0x40]\n"
".inst 0x4e87a40d // smmla v13.4s, v0.16b, v7.16b\n"
- "cmp x27, #0x8\n"
".inst 0x4e87a455 // smmla v21.4s, v2.16b, v7.16b\n"
- "ldr q7, [x9, #0x50]\n"
+ "ldr q7, [x28, #0x50]\n"
".inst 0x4e86a40a // smmla v10.4s, v0.16b, v6.16b\n"
".inst 0x4e86a452 // smmla v18.4s, v2.16b, v6.16b\n"
- "ldr q6, [x9, #0x60]\n"
+ "ldr q6, [x28, #0x60]\n"
".inst 0x4e87a40e // smmla v14.4s, v0.16b, v7.16b\n"
".inst 0x4e87a456 // smmla v22.4s, v2.16b, v7.16b\n"
- "ldr q7, [x9, #0x70]\n"
+ "ldr q7, [x28, #0x70]\n"
+ "add x28, x28, #0x80\n"
".inst 0x4e86a40b // smmla v11.4s, v0.16b, v6.16b\n"
- "add x9, x9, #0x80\n"
".inst 0x4e86a453 // smmla v19.4s, v2.16b, v6.16b\n"
".inst 0x4e87a40f // smmla v15.4s, v0.16b, v7.16b\n"
".inst 0x4e87a457 // smmla v23.4s, v2.16b, v7.16b\n"
"bge 68b\n"
+ "cbz x26, 74f\n"
"69:" // Height 3: Multiply loop: Skip odd blocks
- "cbz x27, 74f\n"
- "tbz x27, #2, 71f\n"
- "ldr s1, [x26], #0x4\n"
- "ldr s2, [x25], #0x4\n"
- "ldr s3, [x24], #0x4\n"
- "tbz x27, #1, 70f\n"
- "ld1 { v1.h }[2], [x26], #0x2\n"
- "ld1 { v2.h }[2], [x25], #0x2\n"
- "ld1 { v3.h }[2], [x24], #0x2\n"
- "tbz x27, #0, 73f\n"
- "ld1 { v1.b }[6], [x26]\n"
- "ld1 { v2.b }[6], [x25]\n"
- "ld1 { v3.b }[6], [x24]\n"
+ "tbz x26, #2, 71f\n"
+ "ldr s1, [x25], #0x4\n"
+ "ldr s2, [x24], #0x4\n"
+ "ldr s3, [x23], #0x4\n"
+ "tbz x26, #1, 70f\n"
+ "ld1 { v1.h }[2], [x25], #0x2\n"
+ "ld1 { v2.h }[2], [x24], #0x2\n"
+ "ld1 { v3.h }[2], [x23], #0x2\n"
+ "tbz x26, #0, 73f\n"
+ "ld1 { v1.b }[6], [x25]\n"
+ "ld1 { v2.b }[6], [x24]\n"
+ "ld1 { v3.b }[6], [x23]\n"
"b 73f\n"
"70:" // Height 3: Multiply loop: Ragged operand read: partial_1_4
- "tbz x27, #0, 73f\n"
- "ld1 { v1.b }[4], [x26]\n"
- "ld1 { v2.b }[4], [x25]\n"
- "ld1 { v3.b }[4], [x24]\n"
+ "tbz x26, #0, 73f\n"
+ "ld1 { v1.b }[4], [x25]\n"
+ "ld1 { v2.b }[4], [x24]\n"
+ "ld1 { v3.b }[4], [x23]\n"
"b 73f\n"
"71:" // Height 3: Multiply loop: Ragged operand read: partial_2_0
- "tbz x27, #1, 72f\n"
- "ldr h1, [x26], #0x2\n"
- "ldr h2, [x25], #0x2\n"
- "ldr h3, [x24], #0x2\n"
- "tbz x27, #0, 73f\n"
- "ld1 { v1.b }[2], [x26]\n"
- "ld1 { v2.b }[2], [x25]\n"
- "ld1 { v3.b }[2], [x24]\n"
+ "tbz x26, #1, 72f\n"
+ "ldr h1, [x25], #0x2\n"
+ "ldr h2, [x24], #0x2\n"
+ "ldr h3, [x23], #0x2\n"
+ "tbz x26, #0, 73f\n"
+ "ld1 { v1.b }[2], [x25]\n"
+ "ld1 { v2.b }[2], [x24]\n"
+ "ld1 { v3.b }[2], [x23]\n"
"b 73f\n"
"72:" // Height 3: Multiply loop: Ragged operand read: partial_1_0
- "ldr b1, [x26, #0x0]\n"
- "ldr b2, [x25, #0x0]\n"
- "ldr b3, [x24, #0x0]\n"
+ "ldr b1, [x25, #0x0]\n"
+ "ldr b2, [x24, #0x0]\n"
+ "ldr b3, [x23, #0x0]\n"
"73:" // Height 3: Multiply loop: Ragged operand read: Done
- "ldr q7, [x9, #0x0]\n"
- "ldr q6, [x9, #0x10]\n"
+ "movi v4.16b, #0x0\n"
+ "ldr q7, [x28, #0x0]\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
+ "ldr q6, [x28, #0x10]\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
".inst 0x4e87a408 // smmla v8.4s, v0.16b, v7.16b\n"
".inst 0x4e87a450 // smmla v16.4s, v2.16b, v7.16b\n"
- "ldr q7, [x9, #0x20]\n"
+ "ldr q7, [x28, #0x20]\n"
".inst 0x4e86a40c // smmla v12.4s, v0.16b, v6.16b\n"
".inst 0x4e86a454 // smmla v20.4s, v2.16b, v6.16b\n"
- "ldr q6, [x9, #0x30]\n"
+ "ldr q6, [x28, #0x30]\n"
".inst 0x4e87a409 // smmla v9.4s, v0.16b, v7.16b\n"
".inst 0x4e87a451 // smmla v17.4s, v2.16b, v7.16b\n"
- "ldr q7, [x9, #0x40]\n"
+ "ldr q7, [x28, #0x40]\n"
".inst 0x4e86a40d // smmla v13.4s, v0.16b, v6.16b\n"
".inst 0x4e86a455 // smmla v21.4s, v2.16b, v6.16b\n"
- "ldr q6, [x9, #0x50]\n"
+ "ldr q6, [x28, #0x50]\n"
".inst 0x4e87a40a // smmla v10.4s, v0.16b, v7.16b\n"
".inst 0x4e87a452 // smmla v18.4s, v2.16b, v7.16b\n"
- "ldr q7, [x9, #0x60]\n"
+ "ldr q7, [x28, #0x60]\n"
".inst 0x4e86a40e // smmla v14.4s, v0.16b, v6.16b\n"
".inst 0x4e86a456 // smmla v22.4s, v2.16b, v6.16b\n"
- "ldr q6, [x9, #0x70]\n"
- "add x9, x9, #0x80\n"
+ "ldr q6, [x28, #0x70]\n"
+ "add x28, x28, #0x80\n"
".inst 0x4e87a40b // smmla v11.4s, v0.16b, v7.16b\n"
".inst 0x4e87a453 // smmla v19.4s, v2.16b, v7.16b\n"
".inst 0x4e86a40f // smmla v15.4s, v0.16b, v6.16b\n"
".inst 0x4e86a457 // smmla v23.4s, v2.16b, v6.16b\n"
"74:" // Height 3: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 62b\n"
- "ldr q0, [x14, #0x0]\n"
- "ldr q1, [x14, #0x10]\n"
"uzp1 v7.2d, v8.2d, v12.2d\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp2 v8.2d, v8.2d, v12.2d\n"
- "ldr q2, [x14, #0x20]\n"
- "ldr q3, [x14, #0x30]\n"
+ "prfm pstl1keep, [x9, #0x0]\n"
+ "add x23, x9, x19\n"
"uzp1 v12.2d, v9.2d, v13.2d\n"
+ "ldr q0, [x11, #0x0]\n"
"uzp2 v9.2d, v9.2d, v13.2d\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "prfm pstl1keep, [x23, #0x0]\n"
+ "add x22, x23, x19\n"
"uzp1 v13.2d, v10.2d, v14.2d\n"
+ "prfm pstl1keep, [x22, #0x0]\n"
"uzp2 v10.2d, v10.2d, v14.2d\n"
- "add x24, x11, x20\n"
+ "ldr q1, [x11, #0x10]\n"
"uzp1 v14.2d, v11.2d, v15.2d\n"
+ "ldr q2, [x11, #0x20]\n"
"uzp2 v11.2d, v11.2d, v15.2d\n"
- "add x23, x24, x20\n"
- "prfm pstl1keep, [x11, #0x0]\n"
+ "ldr q3, [x11, #0x30]\n"
+ "add x11, x11, #0x40\n"
"uzp1 v16.2d, v16.2d, v20.2d\n"
"uzp1 v17.2d, v17.2d, v21.2d\n"
- "prfm pstl1keep, [x24, #0x0]\n"
- "prfm pstl1keep, [x23, #0x0]\n"
"uzp1 v18.2d, v18.2d, v22.2d\n"
"uzp1 v19.2d, v19.2d, v23.2d\n"
- "add x14, x14, #0x40\n"
"mov v23.16b, v7.16b\n"
"add v23.4s, v23.4s, v0.4s\n"
"add v12.4s, v12.4s, v1.4s\n"
@@ -1158,20 +1166,20 @@ void a64_hybrid_s8qs_mmla_6x16 (
"ldr q2, [x12, #0x20]\n"
"ldr q6, [x13, #0x20]\n"
"ldr q3, [x12, #0x30]\n"
- "ldr q7, [x13, #0x30]\n"
"add x12, x12, #0x40\n"
+ "ldr q7, [x13, #0x30]\n"
"add x13, x13, #0x40\n"
"b 76f\n"
"75:" // Height 3: per layer parameters
- "add x25, %x[qp], %[per_layer_right_shift]\n"
- "ld1r { v0.4s }, [x25]\n"
- "add x25, %x[qp], %[per_layer_mul]\n"
- "ld1r { v4.4s }, [x25]\n"
+ "add x24, %x[qp], %[per_layer_right_shift]\n"
+ "ld1r { v0.4s }, [x24]\n"
"mov v1.16b, v0.16b\n"
- "mov v5.16b, v4.16b\n"
+ "add x24, %x[qp], %[per_layer_mul]\n"
+ "ld1r { v4.4s }, [x24]\n"
"mov v2.16b, v0.16b\n"
- "mov v6.16b, v4.16b\n"
"mov v3.16b, v0.16b\n"
+ "mov v5.16b, v4.16b\n"
+ "mov v6.16b, v4.16b\n"
"mov v7.16b, v4.16b\n"
"76:" // Height 3: parameters loaded
"sqrdmulh v23.4s, v23.4s, v4.4s\n"
@@ -1190,98 +1198,98 @@ void a64_hybrid_s8qs_mmla_6x16 (
"and v4.16b, v23.16b, v0.16b\n"
"and v5.16b, v12.16b, v1.16b\n"
"and v6.16b, v13.16b, v2.16b\n"
- "and v7.16b, v14.16b, v3.16b\n"
"sshr v4.4s, v4.4s, #0x1f\n"
"sshr v5.4s, v5.4s, #0x1f\n"
"sshr v6.4s, v6.4s, #0x1f\n"
- "sshr v7.4s, v7.4s, #0x1f\n"
"sqadd v23.4s, v23.4s, v4.4s\n"
"sqadd v12.4s, v12.4s, v5.4s\n"
"sqadd v13.4s, v13.4s, v6.4s\n"
- "sqadd v14.4s, v14.4s, v7.4s\n"
+ "and v7.16b, v14.16b, v3.16b\n"
"and v4.16b, v8.16b, v0.16b\n"
"and v5.16b, v9.16b, v1.16b\n"
- "and v6.16b, v10.16b, v2.16b\n"
- "and v7.16b, v11.16b, v3.16b\n"
+ "sshr v7.4s, v7.4s, #0x1f\n"
"sshr v4.4s, v4.4s, #0x1f\n"
"sshr v5.4s, v5.4s, #0x1f\n"
- "sshr v6.4s, v6.4s, #0x1f\n"
- "sshr v7.4s, v7.4s, #0x1f\n"
+ "sqadd v14.4s, v14.4s, v7.4s\n"
"sqadd v8.4s, v8.4s, v4.4s\n"
"sqadd v9.4s, v9.4s, v5.4s\n"
+ "and v6.16b, v10.16b, v2.16b\n"
+ "and v7.16b, v11.16b, v3.16b\n"
+ "and v4.16b, v16.16b, v0.16b\n"
+ "sshr v6.4s, v6.4s, #0x1f\n"
+ "sshr v7.4s, v7.4s, #0x1f\n"
+ "sshr v4.4s, v4.4s, #0x1f\n"
"sqadd v10.4s, v10.4s, v6.4s\n"
"sqadd v11.4s, v11.4s, v7.4s\n"
- "and v4.16b, v16.16b, v0.16b\n"
+ "sqadd v16.4s, v16.4s, v4.4s\n"
"and v5.16b, v17.16b, v1.16b\n"
"and v6.16b, v18.16b, v2.16b\n"
"and v7.16b, v19.16b, v3.16b\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
"sshr v5.4s, v5.4s, #0x1f\n"
"sshr v6.4s, v6.4s, #0x1f\n"
"sshr v7.4s, v7.4s, #0x1f\n"
- "sqadd v16.4s, v16.4s, v4.4s\n"
"sqadd v17.4s, v17.4s, v5.4s\n"
"sqadd v18.4s, v18.4s, v6.4s\n"
"sqadd v19.4s, v19.4s, v7.4s\n"
"77:" // Height 3: no shift correction
- "add x25, %x[qp], %[c_offset]\n"
- "ld1r { v4.4s }, [x25]\n"
"srshl v23.4s, v23.4s, v0.4s\n"
+ "add x24, %x[qp], %[c_offset]\n"
+ "ld1r { v4.4s }, [x24]\n"
"srshl v12.4s, v12.4s, v1.4s\n"
+ "add x24, %x[qp], %[minval]\n"
"srshl v13.4s, v13.4s, v2.4s\n"
+ "ld1r { v5.4s }, [x24]\n"
+ "add x24, %x[qp], %[maxval]\n"
"srshl v14.4s, v14.4s, v3.4s\n"
- "add x25, %x[qp], %[maxval]\n"
- "ld1r { v6.4s }, [x25]\n"
+ "ld1r { v6.4s }, [x24]\n"
+ "cmp x10, #0x10\n"
"srshl v8.4s, v8.4s, v0.4s\n"
"srshl v9.4s, v9.4s, v1.4s\n"
- "add x25, %x[qp], %[minval]\n"
- "ld1r { v5.4s }, [x25]\n"
- "srshl v10.4s, v10.4s, v2.4s\n"
- "srshl v11.4s, v11.4s, v3.4s\n"
- "cmp x10, #0x10\n"
- "srshl v16.4s, v16.4s, v0.4s\n"
- "srshl v17.4s, v17.4s, v1.4s\n"
- "srshl v18.4s, v18.4s, v2.4s\n"
- "srshl v19.4s, v19.4s, v3.4s\n"
"add v23.4s, v23.4s, v4.4s\n"
"add v12.4s, v12.4s, v4.4s\n"
"add v13.4s, v13.4s, v4.4s\n"
- "add v14.4s, v14.4s, v4.4s\n"
- "add v8.4s, v8.4s, v4.4s\n"
- "add v9.4s, v9.4s, v4.4s\n"
- "add v10.4s, v10.4s, v4.4s\n"
- "add v11.4s, v11.4s, v4.4s\n"
- "add v16.4s, v16.4s, v4.4s\n"
- "add v17.4s, v17.4s, v4.4s\n"
- "add v18.4s, v18.4s, v4.4s\n"
- "add v19.4s, v19.4s, v4.4s\n"
"smin v23.4s, v23.4s, v6.4s\n"
"smin v12.4s, v12.4s, v6.4s\n"
"smin v13.4s, v13.4s, v6.4s\n"
- "smin v14.4s, v14.4s, v6.4s\n"
- "smin v8.4s, v8.4s, v6.4s\n"
- "smin v9.4s, v9.4s, v6.4s\n"
- "smin v10.4s, v10.4s, v6.4s\n"
- "smin v11.4s, v11.4s, v6.4s\n"
- "smin v16.4s, v16.4s, v6.4s\n"
- "smin v17.4s, v17.4s, v6.4s\n"
- "smin v18.4s, v18.4s, v6.4s\n"
- "smin v19.4s, v19.4s, v6.4s\n"
"smax v23.4s, v23.4s, v5.4s\n"
"smax v12.4s, v12.4s, v5.4s\n"
"smax v13.4s, v13.4s, v5.4s\n"
+ "add v14.4s, v14.4s, v4.4s\n"
+ "add v8.4s, v8.4s, v4.4s\n"
+ "add v9.4s, v9.4s, v4.4s\n"
+ "smin v14.4s, v14.4s, v6.4s\n"
+ "smin v8.4s, v8.4s, v6.4s\n"
+ "smin v9.4s, v9.4s, v6.4s\n"
"smax v14.4s, v14.4s, v5.4s\n"
"smax v8.4s, v8.4s, v5.4s\n"
"smax v9.4s, v9.4s, v5.4s\n"
+ "srshl v10.4s, v10.4s, v2.4s\n"
+ "srshl v11.4s, v11.4s, v3.4s\n"
+ "srshl v16.4s, v16.4s, v0.4s\n"
+ "srshl v17.4s, v17.4s, v1.4s\n"
+ "add v10.4s, v10.4s, v4.4s\n"
+ "add v11.4s, v11.4s, v4.4s\n"
+ "add v16.4s, v16.4s, v4.4s\n"
+ "smin v10.4s, v10.4s, v6.4s\n"
+ "smin v11.4s, v11.4s, v6.4s\n"
+ "smin v16.4s, v16.4s, v6.4s\n"
"smax v10.4s, v10.4s, v5.4s\n"
"smax v11.4s, v11.4s, v5.4s\n"
"smax v16.4s, v16.4s, v5.4s\n"
- "smax v17.4s, v17.4s, v5.4s\n"
- "smax v18.4s, v18.4s, v5.4s\n"
- "smax v19.4s, v19.4s, v5.4s\n"
+ "add v17.4s, v17.4s, v4.4s\n"
+ "srshl v18.4s, v18.4s, v2.4s\n"
+ "srshl v19.4s, v19.4s, v3.4s\n"
+ "smin v17.4s, v17.4s, v6.4s\n"
"uzp1 v23.8h, v23.8h, v12.8h\n"
+ "add v18.4s, v18.4s, v4.4s\n"
+ "smax v17.4s, v17.4s, v5.4s\n"
+ "add v19.4s, v19.4s, v4.4s\n"
+ "smin v18.4s, v18.4s, v6.4s\n"
"uzp1 v12.8h, v13.8h, v14.8h\n"
+ "smin v19.4s, v19.4s, v6.4s\n"
+ "smax v18.4s, v18.4s, v5.4s\n"
"uzp1 v8.8h, v8.8h, v9.8h\n"
+ "smax v19.4s, v19.4s, v5.4s\n"
"uzp1 v9.8h, v10.8h, v11.8h\n"
"uzp1 v16.8h, v16.8h, v17.8h\n"
"uzp1 v17.8h, v18.8h, v19.8h\n"
@@ -1290,96 +1298,96 @@ void a64_hybrid_s8qs_mmla_6x16 (
"uzp1 v16.16b, v16.16b, v17.16b\n"
"bge 86f\n"
"tbz x10, #3, 81f\n"
- "str d23, [x11], #0x8\n"
- "str d8, [x24], #0x8\n"
- "str d16, [x23], #0x8\n"
+ "str d23, [x9], #0x8\n"
+ "str d8, [x23], #0x8\n"
+ "str d16, [x22], #0x8\n"
"tbz x10, #2, 79f\n"
- "st1 { v23.s }[2], [x11], #0x4\n"
- "st1 { v8.s }[2], [x24], #0x4\n"
- "st1 { v16.s }[2], [x23], #0x4\n"
+ "st1 { v23.s }[2], [x9], #0x4\n"
+ "st1 { v8.s }[2], [x23], #0x4\n"
+ "st1 { v16.s }[2], [x22], #0x4\n"
"tbz x10, #1, 78f\n"
- "st1 { v23.h }[6], [x11], #0x2\n"
- "st1 { v8.h }[6], [x24], #0x2\n"
- "st1 { v16.h }[6], [x23], #0x2\n"
+ "st1 { v23.h }[6], [x9], #0x2\n"
+ "st1 { v8.h }[6], [x23], #0x2\n"
+ "st1 { v16.h }[6], [x22], #0x2\n"
"tbz x10, #0, 85f\n"
- "st1 { v23.b }[14], [x11]\n"
- "st1 { v8.b }[14], [x24]\n"
- "st1 { v16.b }[14], [x23]\n"
+ "st1 { v23.b }[14], [x9]\n"
+ "st1 { v8.b }[14], [x23]\n"
+ "st1 { v16.b }[14], [x22]\n"
"b 85f\n"
"78:" // Height 3: Partial direct writeback: partial_1_12
"tbz x10, #0, 85f\n"
- "st1 { v23.b }[12], [x11]\n"
- "st1 { v8.b }[12], [x24]\n"
- "st1 { v16.b }[12], [x23]\n"
+ "st1 { v23.b }[12], [x9]\n"
+ "st1 { v8.b }[12], [x23]\n"
+ "st1 { v16.b }[12], [x22]\n"
"b 85f\n"
"79:" // Height 3: Partial direct writeback: partial_2_8
"tbz x10, #1, 80f\n"
- "st1 { v23.h }[4], [x11], #0x2\n"
- "st1 { v8.h }[4], [x24], #0x2\n"
- "st1 { v16.h }[4], [x23], #0x2\n"
+ "st1 { v23.h }[4], [x9], #0x2\n"
+ "st1 { v8.h }[4], [x23], #0x2\n"
+ "st1 { v16.h }[4], [x22], #0x2\n"
"tbz x10, #0, 85f\n"
- "st1 { v23.b }[10], [x11]\n"
- "st1 { v8.b }[10], [x24]\n"
- "st1 { v16.b }[10], [x23]\n"
+ "st1 { v23.b }[10], [x9]\n"
+ "st1 { v8.b }[10], [x23]\n"
+ "st1 { v16.b }[10], [x22]\n"
"b 85f\n"
"80:" // Height 3: Partial direct writeback: partial_1_8
"tbz x10, #0, 85f\n"
- "st1 { v23.b }[8], [x11]\n"
- "st1 { v8.b }[8], [x24]\n"
- "st1 { v16.b }[8], [x23]\n"
+ "st1 { v23.b }[8], [x9]\n"
+ "st1 { v8.b }[8], [x23]\n"
+ "st1 { v16.b }[8], [x22]\n"
"b 85f\n"
"81:" // Height 3: Partial direct writeback: partial_4_0
"tbz x10, #2, 83f\n"
- "str s23, [x11], #0x4\n"
- "str s8, [x24], #0x4\n"
- "str s16, [x23], #0x4\n"
+ "str s23, [x9], #0x4\n"
+ "str s8, [x23], #0x4\n"
+ "str s16, [x22], #0x4\n"
"tbz x10, #1, 82f\n"
- "st1 { v23.h }[2], [x11], #0x2\n"
- "st1 { v8.h }[2], [x24], #0x2\n"
- "st1 { v16.h }[2], [x23], #0x2\n"
+ "st1 { v23.h }[2], [x9], #0x2\n"
+ "st1 { v8.h }[2], [x23], #0x2\n"
+ "st1 { v16.h }[2], [x22], #0x2\n"
"tbz x10, #0, 85f\n"
- "st1 { v23.b }[6], [x11]\n"
- "st1 { v8.b }[6], [x24]\n"
- "st1 { v16.b }[6], [x23]\n"
+ "st1 { v23.b }[6], [x9]\n"
+ "st1 { v8.b }[6], [x23]\n"
+ "st1 { v16.b }[6], [x22]\n"
"b 85f\n"
"82:" // Height 3: Partial direct writeback: partial_1_4
"tbz x10, #0, 85f\n"
- "st1 { v23.b }[4], [x11]\n"
- "st1 { v8.b }[4], [x24]\n"
- "st1 { v16.b }[4], [x23]\n"
+ "st1 { v23.b }[4], [x9]\n"
+ "st1 { v8.b }[4], [x23]\n"
+ "st1 { v16.b }[4], [x22]\n"
"b 85f\n"
"83:" // Height 3: Partial direct writeback: partial_2_0
"tbz x10, #1, 84f\n"
- "str h23, [x11], #0x2\n"
- "str h8, [x24], #0x2\n"
- "str h16, [x23], #0x2\n"
+ "str h23, [x9], #0x2\n"
+ "str h8, [x23], #0x2\n"
+ "str h16, [x22], #0x2\n"
"tbz x10, #0, 85f\n"
- "st1 { v23.b }[2], [x11]\n"
- "st1 { v8.b }[2], [x24]\n"
- "st1 { v16.b }[2], [x23]\n"
+ "st1 { v23.b }[2], [x9]\n"
+ "st1 { v8.b }[2], [x23]\n"
+ "st1 { v16.b }[2], [x22]\n"
"b 85f\n"
"84:" // Height 3: Partial direct writeback: partial_1_0
- "str b23, [x11, #0x0]\n"
- "str b8, [x24, #0x0]\n"
- "str b16, [x23, #0x0]\n"
+ "str b23, [x9, #0x0]\n"
+ "str b8, [x23, #0x0]\n"
+ "str b16, [x22, #0x0]\n"
"85:" // Height 3: Partial direct writeback: Done
"b 87f\n"
"86:" // Height 3: Full writeback
- "str q23, [x11, #0x0]\n"
- "add x11, x11, #0x10\n"
- "str q8, [x24, #0x0]\n"
- "str q16, [x23, #0x0]\n"
+ "str q23, [x9, #0x0]\n"
+ "add x9, x9, #0x10\n"
+ "str q8, [x23, #0x0]\n"
+ "str q16, [x22, #0x0]\n"
"87:" // Height 3: Writeback done
"subs x10, x10, #0x10\n"
"bgt 60b\n"
"b 176f\n"
"88:" // Height 4
- "mov x14, %x[col_bias]\n"
"ldr x13, [%x[args_ptr], %[offsetof_multiplier_ptr]]\n"
+ "mov x11, %x[col_bias]\n"
"ldr x12, [%x[args_ptr], %[offsetof_shift_ptr]]\n"
- "mov x11, %x[output_ptr]\n"
+ "mov x9, %x[output_ptr]\n"
"ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"89:" // Height 4: Column loop
"movi v8.4s, #0x0\n"
"movi v9.4s, #0x0\n"
@@ -1398,312 +1406,312 @@ void a64_hybrid_s8qs_mmla_6x16 (
"movi v22.4s, #0x0\n"
"movi v23.4s, #0x0\n"
"90:" // Height 4: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"91:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 92f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "cbnz x28, 93f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20\n"
- "add x25, x25, x20\n"
- "add x24, x24, x20\n"
- "add x23, x23, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "cbnz x27, 93f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
+ "add x24, x24, x19\n"
+ "add x23, x23, x19\n"
+ "add x22, x22, x19\n"
"b 93f\n"
"92:" // Height 4: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20\n"
- "add x24, x25, x20\n"
- "add x23, x24, x20\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19\n"
+ "add x23, x24, x19\n"
+ "add x22, x23, x19\n"
"93:" // Height 4: input setup done
- "cmp x27, #0x10\n"
+ "cmp x26, #0x10\n"
"blt 96f\n"
- "ldr q1, [x26, #0x0]\n"
- "ldr q2, [x25, #0x0]\n"
- "cmp x27, #0x20\n"
- "ldr q3, [x24, #0x0]\n"
- "ldr q4, [x23, #0x0]\n"
- "ldr q7, [x9, #0x0]\n"
- "ldr q6, [x9, #0x10]\n"
+ "ldr q1, [x25, #0x0]\n"
+ "ldr q2, [x24, #0x0]\n"
+ "cmp x26, #0x20\n"
"blt 95f\n"
"94:" // Height 4: Multiply loop: Main loop head
"trn1 v0.2d, v1.2d, v2.2d\n"
+ "ldr q3, [x23, #0x0]\n"
+ "add x25, x25, #0x10\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
- ".inst 0x4e87a408 // smmla v8.4s, v0.16b, v7.16b\n"
- "sub x27, x27, #0x10\n"
+ "ldr q4, [x22, #0x0]\n"
+ "add x24, x24, #0x10\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
+ "ldr q7, [x28, #0x0]\n"
+ "add x23, x23, #0x10\n"
+ "trn2 v3.2d, v3.2d, v4.2d\n"
+ "ldr q6, [x28, #0x10]\n"
+ "add x22, x22, #0x10\n"
+ ".inst 0x4e87a408 // smmla v8.4s, v0.16b, v7.16b\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "sub x26, x26, #0x10\n"
".inst 0x4e87a450 // smmla v16.4s, v2.16b, v7.16b\n"
- "ldr q7, [x9, #0x20]\n"
+ "ldr q7, [x28, #0x20]\n"
+ "cmp x26, #0x20\n"
".inst 0x4e86a40c // smmla v12.4s, v0.16b, v6.16b\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x4e86a454 // smmla v20.4s, v2.16b, v6.16b\n"
- "ldr q6, [x9, #0x30]\n"
+ "ldr q6, [x28, #0x30]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x4e87a409 // smmla v9.4s, v0.16b, v7.16b\n"
- "trn2 v3.2d, v3.2d, v4.2d\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x4e87a451 // smmla v17.4s, v2.16b, v7.16b\n"
- "ldr q7, [x9, #0x40]\n"
+ "ldr q7, [x28, #0x40]\n"
".inst 0x4e86a40d // smmla v13.4s, v0.16b, v6.16b\n"
- "add x26, x26, #0x10\n"
".inst 0x4e86a455 // smmla v21.4s, v2.16b, v6.16b\n"
- "ldr q6, [x9, #0x50]\n"
+ "ldr q6, [x28, #0x50]\n"
".inst 0x4e87a40a // smmla v10.4s, v0.16b, v7.16b\n"
- "add x25, x25, #0x10\n"
".inst 0x4e87a452 // smmla v18.4s, v2.16b, v7.16b\n"
- "ldr q7, [x9, #0x60]\n"
+ "ldr q7, [x28, #0x60]\n"
".inst 0x4e86a40e // smmla v14.4s, v0.16b, v6.16b\n"
- "add x24, x24, #0x10\n"
".inst 0x4e86a456 // smmla v22.4s, v2.16b, v6.16b\n"
- "ldr q6, [x9, #0x70]\n"
+ "ldr q6, [x28, #0x70]\n"
".inst 0x4e87a40b // smmla v11.4s, v0.16b, v7.16b\n"
- "add x23, x23, #0x10\n"
- "ldr q4, [x23, #0x0]\n"
".inst 0x4e87a453 // smmla v19.4s, v2.16b, v7.16b\n"
- "ldr q7, [x9, #0x80]\n"
+ "ldr q7, [x28, #0x80]\n"
".inst 0x4e86a40f // smmla v15.4s, v0.16b, v6.16b\n"
".inst 0x4e86a457 // smmla v23.4s, v2.16b, v6.16b\n"
- "ldr q6, [x9, #0x90]\n"
- "ldr q2, [x25, #0x0]\n"
+ "ldr q6, [x28, #0x90]\n"
+ "ldr q2, [x24, #0x0]\n"
".inst 0x4e87a428 // smmla v8.4s, v1.16b, v7.16b\n"
".inst 0x4e87a470 // smmla v16.4s, v3.16b, v7.16b\n"
- "ldr q7, [x9, #0xa0]\n"
+ "ldr q7, [x28, #0xa0]\n"
".inst 0x4e86a42c // smmla v12.4s, v1.16b, v6.16b\n"
- "cmp x27, #0x20\n"
".inst 0x4e86a474 // smmla v20.4s, v3.16b, v6.16b\n"
- "ldr q6, [x9, #0xb0]\n"
+ "ldr q6, [x28, #0xb0]\n"
".inst 0x4e87a429 // smmla v9.4s, v1.16b, v7.16b\n"
- "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x4e87a471 // smmla v17.4s, v3.16b, v7.16b\n"
- "ldr q7, [x9, #0xc0]\n"
+ "ldr q7, [x28, #0xc0]\n"
".inst 0x4e86a42d // smmla v13.4s, v1.16b, v6.16b\n"
- "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x4e86a475 // smmla v21.4s, v3.16b, v6.16b\n"
- "ldr q6, [x9, #0xd0]\n"
+ "ldr q6, [x28, #0xd0]\n"
".inst 0x4e87a42a // smmla v10.4s, v1.16b, v7.16b\n"
- "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x4e87a472 // smmla v18.4s, v3.16b, v7.16b\n"
- "ldr q7, [x9, #0xe0]\n"
+ "ldr q7, [x28, #0xe0]\n"
".inst 0x4e86a42e // smmla v14.4s, v1.16b, v6.16b\n"
- "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x4e86a476 // smmla v22.4s, v3.16b, v6.16b\n"
- "ldr q6, [x9, #0xf0]\n"
- "add x9, x9, #0x100\n"
+ "ldr q6, [x28, #0xf0]\n"
+ "add x28, x28, #0x100\n"
".inst 0x4e87a42b // smmla v11.4s, v1.16b, v7.16b\n"
".inst 0x4e87a473 // smmla v19.4s, v3.16b, v7.16b\n"
- "ldr q7, [x9, #0x0]\n"
".inst 0x4e86a42f // smmla v15.4s, v1.16b, v6.16b\n"
- "ldr q1, [x26, #0x0]\n"
+ "ldr q1, [x25, #0x0]\n"
".inst 0x4e86a477 // smmla v23.4s, v3.16b, v6.16b\n"
- "ldr q3, [x24, #0x0]\n"
- "ldr q6, [x9, #0x10]\n"
"bge 94b\n"
"95:" // Height 4: Multiply loop: Single iteration only
"trn1 v0.2d, v1.2d, v2.2d\n"
+ "ldr q3, [x23, #0x0]\n"
+ "sub x26, x26, #0x10\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
- ".inst 0x4e87a408 // smmla v8.4s, v0.16b, v7.16b\n"
- "add x26, x26, #0x10\n"
+ "ldr q4, [x22, #0x0]\n"
+ "add x25, x25, #0x10\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
+ "ldr q7, [x28, #0x0]\n"
+ "add x24, x24, #0x10\n"
+ "trn2 v3.2d, v3.2d, v4.2d\n"
+ "ldr q6, [x28, #0x10]\n"
+ "add x23, x23, #0x10\n"
+ ".inst 0x4e87a408 // smmla v8.4s, v0.16b, v7.16b\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "add x22, x22, #0x10\n"
".inst 0x4e87a450 // smmla v16.4s, v2.16b, v7.16b\n"
- "ldr q7, [x9, #0x20]\n"
+ "ldr q7, [x28, #0x20]\n"
".inst 0x4e86a40c // smmla v12.4s, v0.16b, v6.16b\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x4e86a454 // smmla v20.4s, v2.16b, v6.16b\n"
- "ldr q6, [x9, #0x30]\n"
+ "ldr q6, [x28, #0x30]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x4e87a409 // smmla v9.4s, v0.16b, v7.16b\n"
- "trn2 v3.2d, v3.2d, v4.2d\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x4e87a451 // smmla v17.4s, v2.16b, v7.16b\n"
- "ldr q7, [x9, #0x40]\n"
+ "ldr q7, [x28, #0x40]\n"
".inst 0x4e86a40d // smmla v13.4s, v0.16b, v6.16b\n"
- "add x25, x25, #0x10\n"
".inst 0x4e86a455 // smmla v21.4s, v2.16b, v6.16b\n"
- "ldr q6, [x9, #0x50]\n"
+ "ldr q6, [x28, #0x50]\n"
".inst 0x4e87a40a // smmla v10.4s, v0.16b, v7.16b\n"
- "add x24, x24, #0x10\n"
".inst 0x4e87a452 // smmla v18.4s, v2.16b, v7.16b\n"
- "ldr q7, [x9, #0x60]\n"
+ "ldr q7, [x28, #0x60]\n"
".inst 0x4e86a40e // smmla v14.4s, v0.16b, v6.16b\n"
- "add x23, x23, #0x10\n"
".inst 0x4e86a456 // smmla v22.4s, v2.16b, v6.16b\n"
- "ldr q6, [x9, #0x70]\n"
+ "ldr q6, [x28, #0x70]\n"
".inst 0x4e87a40b // smmla v11.4s, v0.16b, v7.16b\n"
- "sub x27, x27, #0x10\n"
".inst 0x4e87a453 // smmla v19.4s, v2.16b, v7.16b\n"
- "ldr q7, [x9, #0x80]\n"
+ "ldr q7, [x28, #0x80]\n"
".inst 0x4e86a40f // smmla v15.4s, v0.16b, v6.16b\n"
- "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x4e86a457 // smmla v23.4s, v2.16b, v6.16b\n"
- "ldr q6, [x9, #0x90]\n"
+ "ldr q6, [x28, #0x90]\n"
".inst 0x4e87a428 // smmla v8.4s, v1.16b, v7.16b\n"
- "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x4e87a470 // smmla v16.4s, v3.16b, v7.16b\n"
- "ldr q7, [x9, #0xa0]\n"
+ "ldr q7, [x28, #0xa0]\n"
".inst 0x4e86a42c // smmla v12.4s, v1.16b, v6.16b\n"
- "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x4e86a474 // smmla v20.4s, v3.16b, v6.16b\n"
- "ldr q6, [x9, #0xb0]\n"
+ "ldr q6, [x28, #0xb0]\n"
".inst 0x4e87a429 // smmla v9.4s, v1.16b, v7.16b\n"
- "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x4e87a471 // smmla v17.4s, v3.16b, v7.16b\n"
- "ldr q7, [x9, #0xc0]\n"
+ "ldr q7, [x28, #0xc0]\n"
".inst 0x4e86a42d // smmla v13.4s, v1.16b, v6.16b\n"
".inst 0x4e86a475 // smmla v21.4s, v3.16b, v6.16b\n"
- "ldr q6, [x9, #0xd0]\n"
+ "ldr q6, [x28, #0xd0]\n"
".inst 0x4e87a42a // smmla v10.4s, v1.16b, v7.16b\n"
".inst 0x4e87a472 // smmla v18.4s, v3.16b, v7.16b\n"
- "ldr q7, [x9, #0xe0]\n"
+ "ldr q7, [x28, #0xe0]\n"
".inst 0x4e86a42e // smmla v14.4s, v1.16b, v6.16b\n"
".inst 0x4e86a476 // smmla v22.4s, v3.16b, v6.16b\n"
- "ldr q6, [x9, #0xf0]\n"
- "add x9, x9, #0x100\n"
+ "ldr q6, [x28, #0xf0]\n"
+ "add x28, x28, #0x100\n"
".inst 0x4e87a42b // smmla v11.4s, v1.16b, v7.16b\n"
".inst 0x4e87a473 // smmla v19.4s, v3.16b, v7.16b\n"
".inst 0x4e86a42f // smmla v15.4s, v1.16b, v6.16b\n"
".inst 0x4e86a477 // smmla v23.4s, v3.16b, v6.16b\n"
"96:" // Height 4: Multiply loop: Main loop skip
- "cbz x27, 103f\n"
- "cmp x27, #0x8\n"
+ "cbz x26, 103f\n"
+ "cmp x26, #0x8\n"
"blt 98f\n"
"97:" // Height 4: Multiply loop: Odd block loop
- "ldr d1, [x26], #0x8\n"
- "ldr d2, [x25], #0x8\n"
+ "ldr d1, [x25], #0x8\n"
+ "sub x26, x26, #0x8\n"
+ "ldr d2, [x24], #0x8\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
- "sub x27, x27, #0x8\n"
- "ldr d3, [x24], #0x8\n"
- "ldr d4, [x23], #0x8\n"
+ "ldr d3, [x23], #0x8\n"
+ "cmp x26, #0x8\n"
+ "ldr d4, [x22], #0x8\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
- "cmp x27, #0x8\n"
- "ldr q6, [x9, #0x0]\n"
- "ldr q7, [x9, #0x10]\n"
+ "ldr q6, [x28, #0x0]\n"
+ "ldr q7, [x28, #0x10]\n"
".inst 0x4e86a408 // smmla v8.4s, v0.16b, v6.16b\n"
".inst 0x4e86a450 // smmla v16.4s, v2.16b, v6.16b\n"
- "ldr q6, [x9, #0x20]\n"
+ "ldr q6, [x28, #0x20]\n"
".inst 0x4e87a40c // smmla v12.4s, v0.16b, v7.16b\n"
".inst 0x4e87a454 // smmla v20.4s, v2.16b, v7.16b\n"
- "ldr q7, [x9, #0x30]\n"
+ "ldr q7, [x28, #0x30]\n"
".inst 0x4e86a409 // smmla v9.4s, v0.16b, v6.16b\n"
".inst 0x4e86a451 // smmla v17.4s, v2.16b, v6.16b\n"
- "ldr q6, [x9, #0x40]\n"
+ "ldr q6, [x28, #0x40]\n"
".inst 0x4e87a40d // smmla v13.4s, v0.16b, v7.16b\n"
".inst 0x4e87a455 // smmla v21.4s, v2.16b, v7.16b\n"
- "ldr q7, [x9, #0x50]\n"
+ "ldr q7, [x28, #0x50]\n"
".inst 0x4e86a40a // smmla v10.4s, v0.16b, v6.16b\n"
".inst 0x4e86a452 // smmla v18.4s, v2.16b, v6.16b\n"
- "ldr q6, [x9, #0x60]\n"
+ "ldr q6, [x28, #0x60]\n"
".inst 0x4e87a40e // smmla v14.4s, v0.16b, v7.16b\n"
".inst 0x4e87a456 // smmla v22.4s, v2.16b, v7.16b\n"
- "ldr q7, [x9, #0x70]\n"
- "add x9, x9, #0x80\n"
+ "ldr q7, [x28, #0x70]\n"
+ "add x28, x28, #0x80\n"
".inst 0x4e86a40b // smmla v11.4s, v0.16b, v6.16b\n"
".inst 0x4e86a453 // smmla v19.4s, v2.16b, v6.16b\n"
".inst 0x4e87a40f // smmla v15.4s, v0.16b, v7.16b\n"
".inst 0x4e87a457 // smmla v23.4s, v2.16b, v7.16b\n"
"bge 97b\n"
+ "cbz x26, 103f\n"
"98:" // Height 4: Multiply loop: Skip odd blocks
- "cbz x27, 103f\n"
- "tbz x27, #2, 100f\n"
- "ldr s1, [x26], #0x4\n"
- "ldr s2, [x25], #0x4\n"
- "ldr s3, [x24], #0x4\n"
- "ldr s4, [x23], #0x4\n"
- "tbz x27, #1, 99f\n"
- "ld1 { v1.h }[2], [x26], #0x2\n"
- "ld1 { v2.h }[2], [x25], #0x2\n"
- "ld1 { v3.h }[2], [x24], #0x2\n"
- "ld1 { v4.h }[2], [x23], #0x2\n"
- "tbz x27, #0, 102f\n"
- "ld1 { v1.b }[6], [x26]\n"
- "ld1 { v2.b }[6], [x25]\n"
- "ld1 { v3.b }[6], [x24]\n"
- "ld1 { v4.b }[6], [x23]\n"
+ "tbz x26, #2, 100f\n"
+ "ldr s1, [x25], #0x4\n"
+ "ldr s2, [x24], #0x4\n"
+ "ldr s3, [x23], #0x4\n"
+ "ldr s4, [x22], #0x4\n"
+ "tbz x26, #1, 99f\n"
+ "ld1 { v1.h }[2], [x25], #0x2\n"
+ "ld1 { v2.h }[2], [x24], #0x2\n"
+ "ld1 { v3.h }[2], [x23], #0x2\n"
+ "ld1 { v4.h }[2], [x22], #0x2\n"
+ "tbz x26, #0, 102f\n"
+ "ld1 { v1.b }[6], [x25]\n"
+ "ld1 { v2.b }[6], [x24]\n"
+ "ld1 { v3.b }[6], [x23]\n"
+ "ld1 { v4.b }[6], [x22]\n"
"b 102f\n"
"99:" // Height 4: Multiply loop: Ragged operand read: partial_1_4
- "tbz x27, #0, 102f\n"
- "ld1 { v1.b }[4], [x26]\n"
- "ld1 { v2.b }[4], [x25]\n"
- "ld1 { v3.b }[4], [x24]\n"
- "ld1 { v4.b }[4], [x23]\n"
+ "tbz x26, #0, 102f\n"
+ "ld1 { v1.b }[4], [x25]\n"
+ "ld1 { v2.b }[4], [x24]\n"
+ "ld1 { v3.b }[4], [x23]\n"
+ "ld1 { v4.b }[4], [x22]\n"
"b 102f\n"
"100:" // Height 4: Multiply loop: Ragged operand read: partial_2_0
- "tbz x27, #1, 101f\n"
- "ldr h1, [x26], #0x2\n"
- "ldr h2, [x25], #0x2\n"
- "ldr h3, [x24], #0x2\n"
- "ldr h4, [x23], #0x2\n"
- "tbz x27, #0, 102f\n"
- "ld1 { v1.b }[2], [x26]\n"
- "ld1 { v2.b }[2], [x25]\n"
- "ld1 { v3.b }[2], [x24]\n"
- "ld1 { v4.b }[2], [x23]\n"
+ "tbz x26, #1, 101f\n"
+ "ldr h1, [x25], #0x2\n"
+ "ldr h2, [x24], #0x2\n"
+ "ldr h3, [x23], #0x2\n"
+ "ldr h4, [x22], #0x2\n"
+ "tbz x26, #0, 102f\n"
+ "ld1 { v1.b }[2], [x25]\n"
+ "ld1 { v2.b }[2], [x24]\n"
+ "ld1 { v3.b }[2], [x23]\n"
+ "ld1 { v4.b }[2], [x22]\n"
"b 102f\n"
"101:" // Height 4: Multiply loop: Ragged operand read: partial_1_0
- "ldr b1, [x26, #0x0]\n"
- "ldr b2, [x25, #0x0]\n"
- "ldr b3, [x24, #0x0]\n"
- "ldr b4, [x23, #0x0]\n"
+ "ldr b1, [x25, #0x0]\n"
+ "ldr b2, [x24, #0x0]\n"
+ "ldr b3, [x23, #0x0]\n"
+ "ldr b4, [x22, #0x0]\n"
"102:" // Height 4: Multiply loop: Ragged operand read: Done
- "ldr q7, [x9, #0x0]\n"
- "ldr q6, [x9, #0x10]\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
+ "ldr q7, [x28, #0x0]\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
+ "ldr q6, [x28, #0x10]\n"
".inst 0x4e87a408 // smmla v8.4s, v0.16b, v7.16b\n"
".inst 0x4e87a450 // smmla v16.4s, v2.16b, v7.16b\n"
- "ldr q7, [x9, #0x20]\n"
+ "ldr q7, [x28, #0x20]\n"
".inst 0x4e86a40c // smmla v12.4s, v0.16b, v6.16b\n"
".inst 0x4e86a454 // smmla v20.4s, v2.16b, v6.16b\n"
- "ldr q6, [x9, #0x30]\n"
+ "ldr q6, [x28, #0x30]\n"
".inst 0x4e87a409 // smmla v9.4s, v0.16b, v7.16b\n"
".inst 0x4e87a451 // smmla v17.4s, v2.16b, v7.16b\n"
- "ldr q7, [x9, #0x40]\n"
+ "ldr q7, [x28, #0x40]\n"
".inst 0x4e86a40d // smmla v13.4s, v0.16b, v6.16b\n"
".inst 0x4e86a455 // smmla v21.4s, v2.16b, v6.16b\n"
- "ldr q6, [x9, #0x50]\n"
+ "ldr q6, [x28, #0x50]\n"
".inst 0x4e87a40a // smmla v10.4s, v0.16b, v7.16b\n"
".inst 0x4e87a452 // smmla v18.4s, v2.16b, v7.16b\n"
- "ldr q7, [x9, #0x60]\n"
+ "ldr q7, [x28, #0x60]\n"
".inst 0x4e86a40e // smmla v14.4s, v0.16b, v6.16b\n"
".inst 0x4e86a456 // smmla v22.4s, v2.16b, v6.16b\n"
- "ldr q6, [x9, #0x70]\n"
- "add x9, x9, #0x80\n"
+ "ldr q6, [x28, #0x70]\n"
+ "add x28, x28, #0x80\n"
".inst 0x4e87a40b // smmla v11.4s, v0.16b, v7.16b\n"
".inst 0x4e87a453 // smmla v19.4s, v2.16b, v7.16b\n"
".inst 0x4e86a40f // smmla v15.4s, v0.16b, v6.16b\n"
".inst 0x4e86a457 // smmla v23.4s, v2.16b, v6.16b\n"
"103:" // Height 4: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 91b\n"
- "ldr q0, [x14, #0x0]\n"
- "ldr q1, [x14, #0x10]\n"
"uzp1 v7.2d, v8.2d, v12.2d\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp2 v8.2d, v8.2d, v12.2d\n"
- "ldr q2, [x14, #0x20]\n"
- "ldr q3, [x14, #0x30]\n"
+ "prfm pstl1keep, [x9, #0x0]\n"
+ "add x23, x9, x19\n"
"uzp1 v12.2d, v9.2d, v13.2d\n"
+ "ldr q0, [x11, #0x0]\n"
"uzp2 v9.2d, v9.2d, v13.2d\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "prfm pstl1keep, [x23, #0x0]\n"
+ "add x22, x23, x19\n"
"uzp1 v13.2d, v10.2d, v14.2d\n"
+ "prfm pstl1keep, [x22, #0x0]\n"
+ "add x21, x22, x19\n"
"uzp2 v10.2d, v10.2d, v14.2d\n"
- "add x24, x11, x20\n"
+ "prfm pstl1keep, [x21, #0x0]\n"
"uzp1 v14.2d, v11.2d, v15.2d\n"
+ "ldr q1, [x11, #0x10]\n"
"uzp2 v11.2d, v11.2d, v15.2d\n"
- "add x23, x24, x20\n"
- "add x22, x23, x20\n"
+ "ldr q2, [x11, #0x20]\n"
"uzp1 v15.2d, v16.2d, v20.2d\n"
+ "ldr q3, [x11, #0x30]\n"
+ "add x11, x11, #0x40\n"
"uzp2 v16.2d, v16.2d, v20.2d\n"
- "prfm pstl1keep, [x11, #0x0]\n"
- "prfm pstl1keep, [x24, #0x0]\n"
"uzp1 v20.2d, v17.2d, v21.2d\n"
"uzp2 v17.2d, v17.2d, v21.2d\n"
- "prfm pstl1keep, [x23, #0x0]\n"
- "prfm pstl1keep, [x22, #0x0]\n"
"uzp1 v21.2d, v18.2d, v22.2d\n"
"uzp2 v18.2d, v18.2d, v22.2d\n"
- "add x14, x14, #0x40\n"
"uzp1 v22.2d, v19.2d, v23.2d\n"
"uzp2 v19.2d, v19.2d, v23.2d\n"
"mov v23.16b, v7.16b\n"
@@ -1731,20 +1739,20 @@ void a64_hybrid_s8qs_mmla_6x16 (
"ldr q2, [x12, #0x20]\n"
"ldr q6, [x13, #0x20]\n"
"ldr q3, [x12, #0x30]\n"
- "ldr q7, [x13, #0x30]\n"
"add x12, x12, #0x40\n"
+ "ldr q7, [x13, #0x30]\n"
"add x13, x13, #0x40\n"
"b 105f\n"
"104:" // Height 4: per layer parameters
- "add x25, %x[qp], %[per_layer_right_shift]\n"
- "ld1r { v0.4s }, [x25]\n"
- "add x25, %x[qp], %[per_layer_mul]\n"
- "ld1r { v4.4s }, [x25]\n"
+ "add x24, %x[qp], %[per_layer_right_shift]\n"
+ "ld1r { v0.4s }, [x24]\n"
"mov v1.16b, v0.16b\n"
- "mov v5.16b, v4.16b\n"
+ "add x24, %x[qp], %[per_layer_mul]\n"
+ "ld1r { v4.4s }, [x24]\n"
"mov v2.16b, v0.16b\n"
- "mov v6.16b, v4.16b\n"
"mov v3.16b, v0.16b\n"
+ "mov v5.16b, v4.16b\n"
+ "mov v6.16b, v4.16b\n"
"mov v7.16b, v4.16b\n"
"105:" // Height 4: parameters loaded
"sqrdmulh v23.4s, v23.4s, v4.4s\n"
@@ -1767,125 +1775,125 @@ void a64_hybrid_s8qs_mmla_6x16 (
"and v4.16b, v23.16b, v0.16b\n"
"and v5.16b, v12.16b, v1.16b\n"
"and v6.16b, v13.16b, v2.16b\n"
- "and v7.16b, v14.16b, v3.16b\n"
"sshr v4.4s, v4.4s, #0x1f\n"
"sshr v5.4s, v5.4s, #0x1f\n"
"sshr v6.4s, v6.4s, #0x1f\n"
- "sshr v7.4s, v7.4s, #0x1f\n"
"sqadd v23.4s, v23.4s, v4.4s\n"
"sqadd v12.4s, v12.4s, v5.4s\n"
"sqadd v13.4s, v13.4s, v6.4s\n"
- "sqadd v14.4s, v14.4s, v7.4s\n"
+ "and v7.16b, v14.16b, v3.16b\n"
"and v4.16b, v8.16b, v0.16b\n"
"and v5.16b, v9.16b, v1.16b\n"
- "and v6.16b, v10.16b, v2.16b\n"
- "and v7.16b, v11.16b, v3.16b\n"
+ "sshr v7.4s, v7.4s, #0x1f\n"
"sshr v4.4s, v4.4s, #0x1f\n"
"sshr v5.4s, v5.4s, #0x1f\n"
- "sshr v6.4s, v6.4s, #0x1f\n"
- "sshr v7.4s, v7.4s, #0x1f\n"
+ "sqadd v14.4s, v14.4s, v7.4s\n"
"sqadd v8.4s, v8.4s, v4.4s\n"
"sqadd v9.4s, v9.4s, v5.4s\n"
+ "and v6.16b, v10.16b, v2.16b\n"
+ "and v7.16b, v11.16b, v3.16b\n"
+ "and v4.16b, v15.16b, v0.16b\n"
+ "sshr v6.4s, v6.4s, #0x1f\n"
+ "sshr v7.4s, v7.4s, #0x1f\n"
+ "sshr v4.4s, v4.4s, #0x1f\n"
"sqadd v10.4s, v10.4s, v6.4s\n"
"sqadd v11.4s, v11.4s, v7.4s\n"
- "and v4.16b, v15.16b, v0.16b\n"
+ "sqadd v15.4s, v15.4s, v4.4s\n"
"and v5.16b, v20.16b, v1.16b\n"
"and v6.16b, v21.16b, v2.16b\n"
"and v7.16b, v22.16b, v3.16b\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
"sshr v5.4s, v5.4s, #0x1f\n"
"sshr v6.4s, v6.4s, #0x1f\n"
"sshr v7.4s, v7.4s, #0x1f\n"
- "sqadd v15.4s, v15.4s, v4.4s\n"
"sqadd v20.4s, v20.4s, v5.4s\n"
"sqadd v21.4s, v21.4s, v6.4s\n"
"sqadd v22.4s, v22.4s, v7.4s\n"
"and v4.16b, v16.16b, v0.16b\n"
"and v5.16b, v17.16b, v1.16b\n"
"and v6.16b, v18.16b, v2.16b\n"
- "and v7.16b, v19.16b, v3.16b\n"
"sshr v4.4s, v4.4s, #0x1f\n"
"sshr v5.4s, v5.4s, #0x1f\n"
"sshr v6.4s, v6.4s, #0x1f\n"
- "sshr v7.4s, v7.4s, #0x1f\n"
"sqadd v16.4s, v16.4s, v4.4s\n"
"sqadd v17.4s, v17.4s, v5.4s\n"
"sqadd v18.4s, v18.4s, v6.4s\n"
+ "and v7.16b, v19.16b, v3.16b\n"
+ "sshr v7.4s, v7.4s, #0x1f\n"
"sqadd v19.4s, v19.4s, v7.4s\n"
"106:" // Height 4: no shift correction
- "add x25, %x[qp], %[c_offset]\n"
- "ld1r { v4.4s }, [x25]\n"
"srshl v23.4s, v23.4s, v0.4s\n"
+ "add x24, %x[qp], %[c_offset]\n"
+ "ld1r { v4.4s }, [x24]\n"
"srshl v12.4s, v12.4s, v1.4s\n"
+ "add x24, %x[qp], %[minval]\n"
"srshl v13.4s, v13.4s, v2.4s\n"
+ "ld1r { v5.4s }, [x24]\n"
+ "add x24, %x[qp], %[maxval]\n"
"srshl v14.4s, v14.4s, v3.4s\n"
- "add x25, %x[qp], %[maxval]\n"
- "ld1r { v6.4s }, [x25]\n"
+ "ld1r { v6.4s }, [x24]\n"
+ "cmp x10, #0x10\n"
"srshl v8.4s, v8.4s, v0.4s\n"
"srshl v9.4s, v9.4s, v1.4s\n"
- "add x25, %x[qp], %[minval]\n"
- "ld1r { v5.4s }, [x25]\n"
- "srshl v10.4s, v10.4s, v2.4s\n"
- "srshl v11.4s, v11.4s, v3.4s\n"
- "cmp x10, #0x10\n"
- "srshl v15.4s, v15.4s, v0.4s\n"
- "srshl v20.4s, v20.4s, v1.4s\n"
- "srshl v21.4s, v21.4s, v2.4s\n"
- "srshl v22.4s, v22.4s, v3.4s\n"
- "srshl v16.4s, v16.4s, v0.4s\n"
- "srshl v17.4s, v17.4s, v1.4s\n"
- "srshl v18.4s, v18.4s, v2.4s\n"
- "srshl v19.4s, v19.4s, v3.4s\n"
"add v23.4s, v23.4s, v4.4s\n"
"add v12.4s, v12.4s, v4.4s\n"
"add v13.4s, v13.4s, v4.4s\n"
- "add v14.4s, v14.4s, v4.4s\n"
- "add v8.4s, v8.4s, v4.4s\n"
- "add v9.4s, v9.4s, v4.4s\n"
- "add v10.4s, v10.4s, v4.4s\n"
- "add v11.4s, v11.4s, v4.4s\n"
- "add v15.4s, v15.4s, v4.4s\n"
- "add v20.4s, v20.4s, v4.4s\n"
- "add v21.4s, v21.4s, v4.4s\n"
- "add v22.4s, v22.4s, v4.4s\n"
- "add v16.4s, v16.4s, v4.4s\n"
- "add v17.4s, v17.4s, v4.4s\n"
- "add v18.4s, v18.4s, v4.4s\n"
- "add v19.4s, v19.4s, v4.4s\n"
"smin v23.4s, v23.4s, v6.4s\n"
"smin v12.4s, v12.4s, v6.4s\n"
"smin v13.4s, v13.4s, v6.4s\n"
- "smin v14.4s, v14.4s, v6.4s\n"
- "smin v8.4s, v8.4s, v6.4s\n"
- "smin v9.4s, v9.4s, v6.4s\n"
- "smin v10.4s, v10.4s, v6.4s\n"
- "smin v11.4s, v11.4s, v6.4s\n"
- "smin v15.4s, v15.4s, v6.4s\n"
- "smin v20.4s, v20.4s, v6.4s\n"
- "smin v21.4s, v21.4s, v6.4s\n"
- "smin v22.4s, v22.4s, v6.4s\n"
- "smin v16.4s, v16.4s, v6.4s\n"
- "smin v17.4s, v17.4s, v6.4s\n"
- "smin v18.4s, v18.4s, v6.4s\n"
- "smin v19.4s, v19.4s, v6.4s\n"
"smax v23.4s, v23.4s, v5.4s\n"
"smax v12.4s, v12.4s, v5.4s\n"
"smax v13.4s, v13.4s, v5.4s\n"
+ "add v14.4s, v14.4s, v4.4s\n"
+ "add v8.4s, v8.4s, v4.4s\n"
+ "add v9.4s, v9.4s, v4.4s\n"
+ "smin v14.4s, v14.4s, v6.4s\n"
+ "smin v8.4s, v8.4s, v6.4s\n"
+ "smin v9.4s, v9.4s, v6.4s\n"
"smax v14.4s, v14.4s, v5.4s\n"
"smax v8.4s, v8.4s, v5.4s\n"
"smax v9.4s, v9.4s, v5.4s\n"
+ "srshl v10.4s, v10.4s, v2.4s\n"
+ "srshl v11.4s, v11.4s, v3.4s\n"
+ "srshl v15.4s, v15.4s, v0.4s\n"
+ "srshl v20.4s, v20.4s, v1.4s\n"
+ "add v10.4s, v10.4s, v4.4s\n"
+ "add v11.4s, v11.4s, v4.4s\n"
+ "add v15.4s, v15.4s, v4.4s\n"
+ "smin v10.4s, v10.4s, v6.4s\n"
+ "smin v11.4s, v11.4s, v6.4s\n"
+ "smin v15.4s, v15.4s, v6.4s\n"
"smax v10.4s, v10.4s, v5.4s\n"
"smax v11.4s, v11.4s, v5.4s\n"
"smax v15.4s, v15.4s, v5.4s\n"
+ "add v20.4s, v20.4s, v4.4s\n"
+ "srshl v21.4s, v21.4s, v2.4s\n"
+ "srshl v22.4s, v22.4s, v3.4s\n"
+ "smin v20.4s, v20.4s, v6.4s\n"
+ "srshl v16.4s, v16.4s, v0.4s\n"
+ "add v21.4s, v21.4s, v4.4s\n"
"smax v20.4s, v20.4s, v5.4s\n"
+ "add v22.4s, v22.4s, v4.4s\n"
+ "smin v21.4s, v21.4s, v6.4s\n"
+ "add v16.4s, v16.4s, v4.4s\n"
+ "smin v22.4s, v22.4s, v6.4s\n"
"smax v21.4s, v21.4s, v5.4s\n"
+ "smin v16.4s, v16.4s, v6.4s\n"
"smax v22.4s, v22.4s, v5.4s\n"
+ "srshl v17.4s, v17.4s, v1.4s\n"
"smax v16.4s, v16.4s, v5.4s\n"
+ "srshl v18.4s, v18.4s, v2.4s\n"
+ "srshl v19.4s, v19.4s, v3.4s\n"
+ "add v17.4s, v17.4s, v4.4s\n"
+ "uzp1 v23.8h, v23.8h, v12.8h\n"
+ "add v18.4s, v18.4s, v4.4s\n"
+ "smin v17.4s, v17.4s, v6.4s\n"
+ "add v19.4s, v19.4s, v4.4s\n"
+ "smin v18.4s, v18.4s, v6.4s\n"
"smax v17.4s, v17.4s, v5.4s\n"
+ "smin v19.4s, v19.4s, v6.4s\n"
"smax v18.4s, v18.4s, v5.4s\n"
- "smax v19.4s, v19.4s, v5.4s\n"
- "uzp1 v23.8h, v23.8h, v12.8h\n"
"uzp1 v12.8h, v13.8h, v14.8h\n"
+ "smax v19.4s, v19.4s, v5.4s\n"
"uzp1 v8.8h, v8.8h, v9.8h\n"
"uzp1 v9.8h, v10.8h, v11.8h\n"
"uzp1 v15.8h, v15.8h, v20.8h\n"
@@ -1898,112 +1906,112 @@ void a64_hybrid_s8qs_mmla_6x16 (
"uzp1 v16.16b, v16.16b, v17.16b\n"
"bge 115f\n"
"tbz x10, #3, 110f\n"
- "str d23, [x11], #0x8\n"
- "str d8, [x24], #0x8\n"
- "str d15, [x23], #0x8\n"
- "str d16, [x22], #0x8\n"
+ "str d23, [x9], #0x8\n"
+ "str d8, [x23], #0x8\n"
+ "str d15, [x22], #0x8\n"
+ "str d16, [x21], #0x8\n"
"tbz x10, #2, 108f\n"
- "st1 { v23.s }[2], [x11], #0x4\n"
- "st1 { v8.s }[2], [x24], #0x4\n"
- "st1 { v15.s }[2], [x23], #0x4\n"
- "st1 { v16.s }[2], [x22], #0x4\n"
+ "st1 { v23.s }[2], [x9], #0x4\n"
+ "st1 { v8.s }[2], [x23], #0x4\n"
+ "st1 { v15.s }[2], [x22], #0x4\n"
+ "st1 { v16.s }[2], [x21], #0x4\n"
"tbz x10, #1, 107f\n"
- "st1 { v23.h }[6], [x11], #0x2\n"
- "st1 { v8.h }[6], [x24], #0x2\n"
- "st1 { v15.h }[6], [x23], #0x2\n"
- "st1 { v16.h }[6], [x22], #0x2\n"
+ "st1 { v23.h }[6], [x9], #0x2\n"
+ "st1 { v8.h }[6], [x23], #0x2\n"
+ "st1 { v15.h }[6], [x22], #0x2\n"
+ "st1 { v16.h }[6], [x21], #0x2\n"
"tbz x10, #0, 114f\n"
- "st1 { v23.b }[14], [x11]\n"
- "st1 { v8.b }[14], [x24]\n"
- "st1 { v15.b }[14], [x23]\n"
- "st1 { v16.b }[14], [x22]\n"
+ "st1 { v23.b }[14], [x9]\n"
+ "st1 { v8.b }[14], [x23]\n"
+ "st1 { v15.b }[14], [x22]\n"
+ "st1 { v16.b }[14], [x21]\n"
"b 114f\n"
"107:" // Height 4: Partial direct writeback: partial_1_12
"tbz x10, #0, 114f\n"
- "st1 { v23.b }[12], [x11]\n"
- "st1 { v8.b }[12], [x24]\n"
- "st1 { v15.b }[12], [x23]\n"
- "st1 { v16.b }[12], [x22]\n"
+ "st1 { v23.b }[12], [x9]\n"
+ "st1 { v8.b }[12], [x23]\n"
+ "st1 { v15.b }[12], [x22]\n"
+ "st1 { v16.b }[12], [x21]\n"
"b 114f\n"
"108:" // Height 4: Partial direct writeback: partial_2_8
"tbz x10, #1, 109f\n"
- "st1 { v23.h }[4], [x11], #0x2\n"
- "st1 { v8.h }[4], [x24], #0x2\n"
- "st1 { v15.h }[4], [x23], #0x2\n"
- "st1 { v16.h }[4], [x22], #0x2\n"
+ "st1 { v23.h }[4], [x9], #0x2\n"
+ "st1 { v8.h }[4], [x23], #0x2\n"
+ "st1 { v15.h }[4], [x22], #0x2\n"
+ "st1 { v16.h }[4], [x21], #0x2\n"
"tbz x10, #0, 114f\n"
- "st1 { v23.b }[10], [x11]\n"
- "st1 { v8.b }[10], [x24]\n"
- "st1 { v15.b }[10], [x23]\n"
- "st1 { v16.b }[10], [x22]\n"
+ "st1 { v23.b }[10], [x9]\n"
+ "st1 { v8.b }[10], [x23]\n"
+ "st1 { v15.b }[10], [x22]\n"
+ "st1 { v16.b }[10], [x21]\n"
"b 114f\n"
"109:" // Height 4: Partial direct writeback: partial_1_8
"tbz x10, #0, 114f\n"
- "st1 { v23.b }[8], [x11]\n"
- "st1 { v8.b }[8], [x24]\n"
- "st1 { v15.b }[8], [x23]\n"
- "st1 { v16.b }[8], [x22]\n"
+ "st1 { v23.b }[8], [x9]\n"
+ "st1 { v8.b }[8], [x23]\n"
+ "st1 { v15.b }[8], [x22]\n"
+ "st1 { v16.b }[8], [x21]\n"
"b 114f\n"
"110:" // Height 4: Partial direct writeback: partial_4_0
"tbz x10, #2, 112f\n"
- "str s23, [x11], #0x4\n"
- "str s8, [x24], #0x4\n"
- "str s15, [x23], #0x4\n"
- "str s16, [x22], #0x4\n"
+ "str s23, [x9], #0x4\n"
+ "str s8, [x23], #0x4\n"
+ "str s15, [x22], #0x4\n"
+ "str s16, [x21], #0x4\n"
"tbz x10, #1, 111f\n"
- "st1 { v23.h }[2], [x11], #0x2\n"
- "st1 { v8.h }[2], [x24], #0x2\n"
- "st1 { v15.h }[2], [x23], #0x2\n"
- "st1 { v16.h }[2], [x22], #0x2\n"
+ "st1 { v23.h }[2], [x9], #0x2\n"
+ "st1 { v8.h }[2], [x23], #0x2\n"
+ "st1 { v15.h }[2], [x22], #0x2\n"
+ "st1 { v16.h }[2], [x21], #0x2\n"
"tbz x10, #0, 114f\n"
- "st1 { v23.b }[6], [x11]\n"
- "st1 { v8.b }[6], [x24]\n"
- "st1 { v15.b }[6], [x23]\n"
- "st1 { v16.b }[6], [x22]\n"
+ "st1 { v23.b }[6], [x9]\n"
+ "st1 { v8.b }[6], [x23]\n"
+ "st1 { v15.b }[6], [x22]\n"
+ "st1 { v16.b }[6], [x21]\n"
"b 114f\n"
"111:" // Height 4: Partial direct writeback: partial_1_4
"tbz x10, #0, 114f\n"
- "st1 { v23.b }[4], [x11]\n"
- "st1 { v8.b }[4], [x24]\n"
- "st1 { v15.b }[4], [x23]\n"
- "st1 { v16.b }[4], [x22]\n"
+ "st1 { v23.b }[4], [x9]\n"
+ "st1 { v8.b }[4], [x23]\n"
+ "st1 { v15.b }[4], [x22]\n"
+ "st1 { v16.b }[4], [x21]\n"
"b 114f\n"
"112:" // Height 4: Partial direct writeback: partial_2_0
"tbz x10, #1, 113f\n"
- "str h23, [x11], #0x2\n"
- "str h8, [x24], #0x2\n"
- "str h15, [x23], #0x2\n"
- "str h16, [x22], #0x2\n"
+ "str h23, [x9], #0x2\n"
+ "str h8, [x23], #0x2\n"
+ "str h15, [x22], #0x2\n"
+ "str h16, [x21], #0x2\n"
"tbz x10, #0, 114f\n"
- "st1 { v23.b }[2], [x11]\n"
- "st1 { v8.b }[2], [x24]\n"
- "st1 { v15.b }[2], [x23]\n"
- "st1 { v16.b }[2], [x22]\n"
+ "st1 { v23.b }[2], [x9]\n"
+ "st1 { v8.b }[2], [x23]\n"
+ "st1 { v15.b }[2], [x22]\n"
+ "st1 { v16.b }[2], [x21]\n"
"b 114f\n"
"113:" // Height 4: Partial direct writeback: partial_1_0
- "str b23, [x11, #0x0]\n"
- "str b8, [x24, #0x0]\n"
- "str b15, [x23, #0x0]\n"
- "str b16, [x22, #0x0]\n"
+ "str b23, [x9, #0x0]\n"
+ "str b8, [x23, #0x0]\n"
+ "str b15, [x22, #0x0]\n"
+ "str b16, [x21, #0x0]\n"
"114:" // Height 4: Partial direct writeback: Done
"b 116f\n"
"115:" // Height 4: Full writeback
- "str q23, [x11, #0x0]\n"
- "add x11, x11, #0x10\n"
- "str q8, [x24, #0x0]\n"
- "str q15, [x23, #0x0]\n"
- "str q16, [x22, #0x0]\n"
+ "str q23, [x9, #0x0]\n"
+ "add x9, x9, #0x10\n"
+ "str q8, [x23, #0x0]\n"
+ "str q15, [x22, #0x0]\n"
+ "str q16, [x21, #0x0]\n"
"116:" // Height 4: Writeback done
"subs x10, x10, #0x10\n"
"bgt 89b\n"
"b 176f\n"
"117:" // Height 5
- "mov x14, %x[col_bias]\n"
"ldr x13, [%x[args_ptr], %[offsetof_multiplier_ptr]]\n"
+ "mov x11, %x[col_bias]\n"
"ldr x12, [%x[args_ptr], %[offsetof_shift_ptr]]\n"
- "mov x11, %x[output_ptr]\n"
+ "mov x9, %x[output_ptr]\n"
"ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"118:" // Height 5: Column loop
"movi v8.4s, #0x0\n"
"movi v9.4s, #0x0\n"
@@ -2030,210 +2038,212 @@ void a64_hybrid_s8qs_mmla_6x16 (
"movi v30.4s, #0x0\n"
"movi v31.4s, #0x0\n"
"119:" // Height 5: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"120:" // Height 5: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 121f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "ldr x22, [x21, #0x20]\n"
- "cbnz x28, 122f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20\n"
- "add x25, x25, x20\n"
- "add x24, x24, x20\n"
- "add x23, x23, x20\n"
- "add x22, x22, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "ldr x21, [x20, #0x20]\n"
+ "cbnz x27, 122f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
+ "add x24, x24, x19\n"
+ "add x23, x23, x19\n"
+ "add x22, x22, x19\n"
+ "add x21, x21, x19\n"
"b 122f\n"
"121:" // Height 5: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20\n"
- "add x24, x25, x20\n"
- "add x23, x24, x20\n"
- "add x22, x23, x20\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19\n"
+ "add x23, x24, x19\n"
+ "add x22, x23, x19\n"
+ "add x21, x22, x19\n"
"122:" // Height 5: input setup done
- "cmp x27, #0x10\n"
+ "cmp x26, #0x10\n"
"blt 125f\n"
- "ldr q1, [x26, #0x0]\n"
- "ldr q2, [x25, #0x0]\n"
- "cmp x27, #0x20\n"
- "ldr q3, [x24, #0x0]\n"
- "ldr q4, [x23, #0x0]\n"
- "ldr q5, [x22, #0x0]\n"
- "ldr q7, [x9, #0x0]\n"
+ "ldr q1, [x25, #0x0]\n"
+ "cmp x26, #0x20\n"
"blt 124f\n"
"123:" // Height 5: Multiply loop: Main loop head
+ "movi v6.16b, #0x0\n"
+ "ldr q2, [x24, #0x0]\n"
+ "add x25, x25, #0x10\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
+ "ldr q3, [x23, #0x0]\n"
+ "add x24, x24, #0x10\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
- ".inst 0x4e87a408 // smmla v8.4s, v0.16b, v7.16b\n"
+ "ldr q4, [x22, #0x0]\n"
+ "add x23, x23, #0x10\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
+ "ldr q5, [x21, #0x0]\n"
+ "add x22, x22, #0x10\n"
"trn2 v3.2d, v3.2d, v4.2d\n"
- ".inst 0x4e87a450 // smmla v16.4s, v2.16b, v7.16b\n"
- "sub x27, x27, #0x10\n"
+ "ldr q7, [x28, #0x0]\n"
+ "add x21, x21, #0x10\n"
"trn1 v4.2d, v5.2d, v6.2d\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "sub x26, x26, #0x10\n"
"trn2 v5.2d, v5.2d, v6.2d\n"
- "ldr q6, [x9, #0x10]\n"
+ "ldr q6, [x28, #0x10]\n"
+ "cmp x26, #0x20\n"
+ ".inst 0x4e87a408 // smmla v8.4s, v0.16b, v7.16b\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ ".inst 0x4e87a450 // smmla v16.4s, v2.16b, v7.16b\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x4e87a498 // smmla v24.4s, v4.16b, v7.16b\n"
- "ldr q7, [x9, #0x20]\n"
+ "ldr q7, [x28, #0x20]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x4e86a40c // smmla v12.4s, v0.16b, v6.16b\n"
".inst 0x4e86a454 // smmla v20.4s, v2.16b, v6.16b\n"
- "add x26, x26, #0x10\n"
+ "prfm pldl1keep, [x21, #0x80]\n"
".inst 0x4e86a49c // smmla v28.4s, v4.16b, v6.16b\n"
- "ldr q6, [x9, #0x30]\n"
+ "ldr q6, [x28, #0x30]\n"
".inst 0x4e87a409 // smmla v9.4s, v0.16b, v7.16b\n"
- "add x25, x25, #0x10\n"
".inst 0x4e87a451 // smmla v17.4s, v2.16b, v7.16b\n"
".inst 0x4e87a499 // smmla v25.4s, v4.16b, v7.16b\n"
- "ldr q7, [x9, #0x40]\n"
- "add x24, x24, #0x10\n"
+ "ldr q7, [x28, #0x40]\n"
".inst 0x4e86a40d // smmla v13.4s, v0.16b, v6.16b\n"
".inst 0x4e86a455 // smmla v21.4s, v2.16b, v6.16b\n"
- "add x23, x23, #0x10\n"
- "add x22, x22, #0x10\n"
".inst 0x4e86a49d // smmla v29.4s, v4.16b, v6.16b\n"
- "ldr q6, [x9, #0x50]\n"
+ "ldr q6, [x28, #0x50]\n"
".inst 0x4e87a40a // smmla v10.4s, v0.16b, v7.16b\n"
- "cmp x27, #0x20\n"
".inst 0x4e87a452 // smmla v18.4s, v2.16b, v7.16b\n"
".inst 0x4e87a49a // smmla v26.4s, v4.16b, v7.16b\n"
- "ldr q7, [x9, #0x60]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
+ "ldr q7, [x28, #0x60]\n"
".inst 0x4e86a40e // smmla v14.4s, v0.16b, v6.16b\n"
".inst 0x4e86a456 // smmla v22.4s, v2.16b, v6.16b\n"
- "prfm pldl1keep, [x25, #0x80]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x4e86a49e // smmla v30.4s, v4.16b, v6.16b\n"
- "ldr q6, [x9, #0x70]\n"
+ "ldr q6, [x28, #0x70]\n"
".inst 0x4e87a40b // smmla v11.4s, v0.16b, v7.16b\n"
- "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x4e87a453 // smmla v19.4s, v2.16b, v7.16b\n"
".inst 0x4e87a49b // smmla v27.4s, v4.16b, v7.16b\n"
- "ldr q7, [x9, #0x80]\n"
- "prfm pldl1keep, [x22, #0x80]\n"
+ "ldr q7, [x28, #0x80]\n"
".inst 0x4e86a40f // smmla v15.4s, v0.16b, v6.16b\n"
".inst 0x4e86a457 // smmla v23.4s, v2.16b, v6.16b\n"
- "ldr q2, [x25, #0x0]\n"
".inst 0x4e86a49f // smmla v31.4s, v4.16b, v6.16b\n"
- "ldr q6, [x9, #0x90]\n"
- "ldr q4, [x23, #0x0]\n"
+ "ldr q6, [x28, #0x90]\n"
".inst 0x4e87a428 // smmla v8.4s, v1.16b, v7.16b\n"
".inst 0x4e87a470 // smmla v16.4s, v3.16b, v7.16b\n"
".inst 0x4e87a4b8 // smmla v24.4s, v5.16b, v7.16b\n"
- "ldr q7, [x9, #0xa0]\n"
+ "ldr q7, [x28, #0xa0]\n"
".inst 0x4e86a42c // smmla v12.4s, v1.16b, v6.16b\n"
".inst 0x4e86a474 // smmla v20.4s, v3.16b, v6.16b\n"
".inst 0x4e86a4bc // smmla v28.4s, v5.16b, v6.16b\n"
- "ldr q6, [x9, #0xb0]\n"
+ "ldr q6, [x28, #0xb0]\n"
".inst 0x4e87a429 // smmla v9.4s, v1.16b, v7.16b\n"
".inst 0x4e87a471 // smmla v17.4s, v3.16b, v7.16b\n"
".inst 0x4e87a4b9 // smmla v25.4s, v5.16b, v7.16b\n"
- "ldr q7, [x9, #0xc0]\n"
+ "ldr q7, [x28, #0xc0]\n"
".inst 0x4e86a42d // smmla v13.4s, v1.16b, v6.16b\n"
".inst 0x4e86a475 // smmla v21.4s, v3.16b, v6.16b\n"
".inst 0x4e86a4bd // smmla v29.4s, v5.16b, v6.16b\n"
- "ldr q6, [x9, #0xd0]\n"
+ "ldr q6, [x28, #0xd0]\n"
".inst 0x4e87a42a // smmla v10.4s, v1.16b, v7.16b\n"
".inst 0x4e87a472 // smmla v18.4s, v3.16b, v7.16b\n"
".inst 0x4e87a4ba // smmla v26.4s, v5.16b, v7.16b\n"
- "ldr q7, [x9, #0xe0]\n"
+ "ldr q7, [x28, #0xe0]\n"
".inst 0x4e86a42e // smmla v14.4s, v1.16b, v6.16b\n"
".inst 0x4e86a476 // smmla v22.4s, v3.16b, v6.16b\n"
".inst 0x4e86a4be // smmla v30.4s, v5.16b, v6.16b\n"
- "ldr q6, [x9, #0xf0]\n"
- "add x9, x9, #0x100\n"
+ "ldr q6, [x28, #0xf0]\n"
+ "add x28, x28, #0x100\n"
".inst 0x4e87a42b // smmla v11.4s, v1.16b, v7.16b\n"
".inst 0x4e87a473 // smmla v19.4s, v3.16b, v7.16b\n"
".inst 0x4e87a4bb // smmla v27.4s, v5.16b, v7.16b\n"
- "ldr q7, [x9, #0x0]\n"
".inst 0x4e86a42f // smmla v15.4s, v1.16b, v6.16b\n"
- "ldr q1, [x26, #0x0]\n"
+ "ldr q1, [x25, #0x0]\n"
".inst 0x4e86a477 // smmla v23.4s, v3.16b, v6.16b\n"
- "ldr q3, [x24, #0x0]\n"
".inst 0x4e86a4bf // smmla v31.4s, v5.16b, v6.16b\n"
- "ldr q5, [x22, #0x0]\n"
"bge 123b\n"
"124:" // Height 5: Multiply loop: Single iteration only
+ "movi v6.16b, #0x0\n"
+ "ldr q2, [x24, #0x0]\n"
+ "sub x26, x26, #0x10\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
+ "ldr q3, [x23, #0x0]\n"
+ "add x25, x25, #0x10\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
- ".inst 0x4e87a408 // smmla v8.4s, v0.16b, v7.16b\n"
+ "ldr q4, [x22, #0x0]\n"
+ "add x24, x24, #0x10\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
+ "ldr q5, [x21, #0x0]\n"
+ "add x23, x23, #0x10\n"
"trn2 v3.2d, v3.2d, v4.2d\n"
- ".inst 0x4e87a450 // smmla v16.4s, v2.16b, v7.16b\n"
- "add x26, x26, #0x10\n"
+ "ldr q7, [x28, #0x0]\n"
+ "add x22, x22, #0x10\n"
"trn1 v4.2d, v5.2d, v6.2d\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "add x21, x21, #0x10\n"
"trn2 v5.2d, v5.2d, v6.2d\n"
- "ldr q6, [x9, #0x10]\n"
+ "ldr q6, [x28, #0x10]\n"
+ ".inst 0x4e87a408 // smmla v8.4s, v0.16b, v7.16b\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ ".inst 0x4e87a450 // smmla v16.4s, v2.16b, v7.16b\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x4e87a498 // smmla v24.4s, v4.16b, v7.16b\n"
- "ldr q7, [x9, #0x20]\n"
+ "ldr q7, [x28, #0x20]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x4e86a40c // smmla v12.4s, v0.16b, v6.16b\n"
".inst 0x4e86a454 // smmla v20.4s, v2.16b, v6.16b\n"
- "add x25, x25, #0x10\n"
+ "prfm pldl1keep, [x21, #0x80]\n"
".inst 0x4e86a49c // smmla v28.4s, v4.16b, v6.16b\n"
- "ldr q6, [x9, #0x30]\n"
+ "ldr q6, [x28, #0x30]\n"
".inst 0x4e87a409 // smmla v9.4s, v0.16b, v7.16b\n"
- "add x24, x24, #0x10\n"
".inst 0x4e87a451 // smmla v17.4s, v2.16b, v7.16b\n"
".inst 0x4e87a499 // smmla v25.4s, v4.16b, v7.16b\n"
- "ldr q7, [x9, #0x40]\n"
- "add x23, x23, #0x10\n"
+ "ldr q7, [x28, #0x40]\n"
".inst 0x4e86a40d // smmla v13.4s, v0.16b, v6.16b\n"
".inst 0x4e86a455 // smmla v21.4s, v2.16b, v6.16b\n"
- "add x22, x22, #0x10\n"
- "sub x27, x27, #0x10\n"
".inst 0x4e86a49d // smmla v29.4s, v4.16b, v6.16b\n"
- "ldr q6, [x9, #0x50]\n"
+ "ldr q6, [x28, #0x50]\n"
".inst 0x4e87a40a // smmla v10.4s, v0.16b, v7.16b\n"
- "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x4e87a452 // smmla v18.4s, v2.16b, v7.16b\n"
".inst 0x4e87a49a // smmla v26.4s, v4.16b, v7.16b\n"
- "ldr q7, [x9, #0x60]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
+ "ldr q7, [x28, #0x60]\n"
".inst 0x4e86a40e // smmla v14.4s, v0.16b, v6.16b\n"
".inst 0x4e86a456 // smmla v22.4s, v2.16b, v6.16b\n"
- "prfm pldl1keep, [x24, #0x80]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x4e86a49e // smmla v30.4s, v4.16b, v6.16b\n"
- "ldr q6, [x9, #0x70]\n"
+ "ldr q6, [x28, #0x70]\n"
".inst 0x4e87a40b // smmla v11.4s, v0.16b, v7.16b\n"
- "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x4e87a453 // smmla v19.4s, v2.16b, v7.16b\n"
".inst 0x4e87a49b // smmla v27.4s, v4.16b, v7.16b\n"
- "ldr q7, [x9, #0x80]\n"
+ "ldr q7, [x28, #0x80]\n"
".inst 0x4e86a40f // smmla v15.4s, v0.16b, v6.16b\n"
".inst 0x4e86a457 // smmla v23.4s, v2.16b, v6.16b\n"
".inst 0x4e86a49f // smmla v31.4s, v4.16b, v6.16b\n"
- "ldr q6, [x9, #0x90]\n"
+ "ldr q6, [x28, #0x90]\n"
".inst 0x4e87a428 // smmla v8.4s, v1.16b, v7.16b\n"
".inst 0x4e87a470 // smmla v16.4s, v3.16b, v7.16b\n"
".inst 0x4e87a4b8 // smmla v24.4s, v5.16b, v7.16b\n"
- "ldr q7, [x9, #0xa0]\n"
+ "ldr q7, [x28, #0xa0]\n"
".inst 0x4e86a42c // smmla v12.4s, v1.16b, v6.16b\n"
".inst 0x4e86a474 // smmla v20.4s, v3.16b, v6.16b\n"
".inst 0x4e86a4bc // smmla v28.4s, v5.16b, v6.16b\n"
- "ldr q6, [x9, #0xb0]\n"
+ "ldr q6, [x28, #0xb0]\n"
".inst 0x4e87a429 // smmla v9.4s, v1.16b, v7.16b\n"
".inst 0x4e87a471 // smmla v17.4s, v3.16b, v7.16b\n"
".inst 0x4e87a4b9 // smmla v25.4s, v5.16b, v7.16b\n"
- "ldr q7, [x9, #0xc0]\n"
+ "ldr q7, [x28, #0xc0]\n"
".inst 0x4e86a42d // smmla v13.4s, v1.16b, v6.16b\n"
".inst 0x4e86a475 // smmla v21.4s, v3.16b, v6.16b\n"
".inst 0x4e86a4bd // smmla v29.4s, v5.16b, v6.16b\n"
- "ldr q6, [x9, #0xd0]\n"
+ "ldr q6, [x28, #0xd0]\n"
".inst 0x4e87a42a // smmla v10.4s, v1.16b, v7.16b\n"
".inst 0x4e87a472 // smmla v18.4s, v3.16b, v7.16b\n"
".inst 0x4e87a4ba // smmla v26.4s, v5.16b, v7.16b\n"
- "ldr q7, [x9, #0xe0]\n"
+ "ldr q7, [x28, #0xe0]\n"
".inst 0x4e86a42e // smmla v14.4s, v1.16b, v6.16b\n"
".inst 0x4e86a476 // smmla v22.4s, v3.16b, v6.16b\n"
".inst 0x4e86a4be // smmla v30.4s, v5.16b, v6.16b\n"
- "ldr q6, [x9, #0xf0]\n"
- "add x9, x9, #0x100\n"
+ "ldr q6, [x28, #0xf0]\n"
+ "add x28, x28, #0x100\n"
".inst 0x4e87a42b // smmla v11.4s, v1.16b, v7.16b\n"
".inst 0x4e87a473 // smmla v19.4s, v3.16b, v7.16b\n"
".inst 0x4e87a4bb // smmla v27.4s, v5.16b, v7.16b\n"
@@ -2241,134 +2251,136 @@ void a64_hybrid_s8qs_mmla_6x16 (
".inst 0x4e86a477 // smmla v23.4s, v3.16b, v6.16b\n"
".inst 0x4e86a4bf // smmla v31.4s, v5.16b, v6.16b\n"
"125:" // Height 5: Multiply loop: Main loop skip
- "cbz x27, 132f\n"
- "cmp x27, #0x8\n"
+ "cbz x26, 132f\n"
+ "cmp x26, #0x8\n"
"blt 127f\n"
"126:" // Height 5: Multiply loop: Odd block loop
- "ldr d1, [x26], #0x8\n"
- "ldr d2, [x25], #0x8\n"
+ "movi v7.4s, #0x0\n"
+ "ldr d1, [x25], #0x8\n"
+ "sub x26, x26, #0x8\n"
+ "ldr d2, [x24], #0x8\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
- "ldr d3, [x24], #0x8\n"
- "ldr d4, [x23], #0x8\n"
+ "ldr d3, [x23], #0x8\n"
+ "cmp x26, #0x8\n"
+ "ldr d4, [x22], #0x8\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
- "sub x27, x27, #0x8\n"
- "ldr d5, [x22], #0x8\n"
- "ldr q6, [x9, #0x0]\n"
+ "ldr d5, [x21], #0x8\n"
+ "ldr q6, [x28, #0x0]\n"
"trn1 v4.2d, v5.2d, v7.2d\n"
+ "ldr q7, [x28, #0x10]\n"
".inst 0x4e86a408 // smmla v8.4s, v0.16b, v6.16b\n"
- "ldr q7, [x9, #0x10]\n"
".inst 0x4e86a450 // smmla v16.4s, v2.16b, v6.16b\n"
".inst 0x4e86a498 // smmla v24.4s, v4.16b, v6.16b\n"
- "ldr q6, [x9, #0x20]\n"
+ "ldr q6, [x28, #0x20]\n"
".inst 0x4e87a40c // smmla v12.4s, v0.16b, v7.16b\n"
".inst 0x4e87a454 // smmla v20.4s, v2.16b, v7.16b\n"
- "cmp x27, #0x8\n"
".inst 0x4e87a49c // smmla v28.4s, v4.16b, v7.16b\n"
- "ldr q7, [x9, #0x30]\n"
+ "ldr q7, [x28, #0x30]\n"
".inst 0x4e86a409 // smmla v9.4s, v0.16b, v6.16b\n"
".inst 0x4e86a451 // smmla v17.4s, v2.16b, v6.16b\n"
".inst 0x4e86a499 // smmla v25.4s, v4.16b, v6.16b\n"
- "ldr q6, [x9, #0x40]\n"
+ "ldr q6, [x28, #0x40]\n"
".inst 0x4e87a40d // smmla v13.4s, v0.16b, v7.16b\n"
".inst 0x4e87a455 // smmla v21.4s, v2.16b, v7.16b\n"
".inst 0x4e87a49d // smmla v29.4s, v4.16b, v7.16b\n"
- "ldr q7, [x9, #0x50]\n"
+ "ldr q7, [x28, #0x50]\n"
".inst 0x4e86a40a // smmla v10.4s, v0.16b, v6.16b\n"
".inst 0x4e86a452 // smmla v18.4s, v2.16b, v6.16b\n"
".inst 0x4e86a49a // smmla v26.4s, v4.16b, v6.16b\n"
- "ldr q6, [x9, #0x60]\n"
+ "ldr q6, [x28, #0x60]\n"
".inst 0x4e87a40e // smmla v14.4s, v0.16b, v7.16b\n"
".inst 0x4e87a456 // smmla v22.4s, v2.16b, v7.16b\n"
".inst 0x4e87a49e // smmla v30.4s, v4.16b, v7.16b\n"
- "ldr q7, [x9, #0x70]\n"
+ "ldr q7, [x28, #0x70]\n"
+ "add x28, x28, #0x80\n"
".inst 0x4e86a40b // smmla v11.4s, v0.16b, v6.16b\n"
- "add x9, x9, #0x80\n"
".inst 0x4e86a453 // smmla v19.4s, v2.16b, v6.16b\n"
".inst 0x4e86a49b // smmla v27.4s, v4.16b, v6.16b\n"
".inst 0x4e87a40f // smmla v15.4s, v0.16b, v7.16b\n"
".inst 0x4e87a457 // smmla v23.4s, v2.16b, v7.16b\n"
".inst 0x4e87a49f // smmla v31.4s, v4.16b, v7.16b\n"
"bge 126b\n"
+ "cbz x26, 132f\n"
"127:" // Height 5: Multiply loop: Skip odd blocks
- "cbz x27, 132f\n"
- "tbz x27, #2, 129f\n"
- "ldr s1, [x26], #0x4\n"
- "ldr s2, [x25], #0x4\n"
- "ldr s3, [x24], #0x4\n"
- "ldr s4, [x23], #0x4\n"
- "ldr s5, [x22], #0x4\n"
- "tbz x27, #1, 128f\n"
- "ld1 { v1.h }[2], [x26], #0x2\n"
- "ld1 { v2.h }[2], [x25], #0x2\n"
- "ld1 { v3.h }[2], [x24], #0x2\n"
- "ld1 { v4.h }[2], [x23], #0x2\n"
- "ld1 { v5.h }[2], [x22], #0x2\n"
- "tbz x27, #0, 131f\n"
- "ld1 { v1.b }[6], [x26]\n"
- "ld1 { v2.b }[6], [x25]\n"
- "ld1 { v3.b }[6], [x24]\n"
- "ld1 { v4.b }[6], [x23]\n"
- "ld1 { v5.b }[6], [x22]\n"
+ "tbz x26, #2, 129f\n"
+ "ldr s1, [x25], #0x4\n"
+ "ldr s2, [x24], #0x4\n"
+ "ldr s3, [x23], #0x4\n"
+ "ldr s4, [x22], #0x4\n"
+ "ldr s5, [x21], #0x4\n"
+ "tbz x26, #1, 128f\n"
+ "ld1 { v1.h }[2], [x25], #0x2\n"
+ "ld1 { v2.h }[2], [x24], #0x2\n"
+ "ld1 { v3.h }[2], [x23], #0x2\n"
+ "ld1 { v4.h }[2], [x22], #0x2\n"
+ "ld1 { v5.h }[2], [x21], #0x2\n"
+ "tbz x26, #0, 131f\n"
+ "ld1 { v1.b }[6], [x25]\n"
+ "ld1 { v2.b }[6], [x24]\n"
+ "ld1 { v3.b }[6], [x23]\n"
+ "ld1 { v4.b }[6], [x22]\n"
+ "ld1 { v5.b }[6], [x21]\n"
"b 131f\n"
"128:" // Height 5: Multiply loop: Ragged operand read: partial_1_4
- "tbz x27, #0, 131f\n"
- "ld1 { v1.b }[4], [x26]\n"
- "ld1 { v2.b }[4], [x25]\n"
- "ld1 { v3.b }[4], [x24]\n"
- "ld1 { v4.b }[4], [x23]\n"
- "ld1 { v5.b }[4], [x22]\n"
+ "tbz x26, #0, 131f\n"
+ "ld1 { v1.b }[4], [x25]\n"
+ "ld1 { v2.b }[4], [x24]\n"
+ "ld1 { v3.b }[4], [x23]\n"
+ "ld1 { v4.b }[4], [x22]\n"
+ "ld1 { v5.b }[4], [x21]\n"
"b 131f\n"
"129:" // Height 5: Multiply loop: Ragged operand read: partial_2_0
- "tbz x27, #1, 130f\n"
- "ldr h1, [x26], #0x2\n"
- "ldr h2, [x25], #0x2\n"
- "ldr h3, [x24], #0x2\n"
- "ldr h4, [x23], #0x2\n"
- "ldr h5, [x22], #0x2\n"
- "tbz x27, #0, 131f\n"
- "ld1 { v1.b }[2], [x26]\n"
- "ld1 { v2.b }[2], [x25]\n"
- "ld1 { v3.b }[2], [x24]\n"
- "ld1 { v4.b }[2], [x23]\n"
- "ld1 { v5.b }[2], [x22]\n"
+ "tbz x26, #1, 130f\n"
+ "ldr h1, [x25], #0x2\n"
+ "ldr h2, [x24], #0x2\n"
+ "ldr h3, [x23], #0x2\n"
+ "ldr h4, [x22], #0x2\n"
+ "ldr h5, [x21], #0x2\n"
+ "tbz x26, #0, 131f\n"
+ "ld1 { v1.b }[2], [x25]\n"
+ "ld1 { v2.b }[2], [x24]\n"
+ "ld1 { v3.b }[2], [x23]\n"
+ "ld1 { v4.b }[2], [x22]\n"
+ "ld1 { v5.b }[2], [x21]\n"
"b 131f\n"
"130:" // Height 5: Multiply loop: Ragged operand read: partial_1_0
- "ldr b1, [x26, #0x0]\n"
- "ldr b2, [x25, #0x0]\n"
- "ldr b3, [x24, #0x0]\n"
- "ldr b4, [x23, #0x0]\n"
- "ldr b5, [x22, #0x0]\n"
+ "ldr b1, [x25, #0x0]\n"
+ "ldr b2, [x24, #0x0]\n"
+ "ldr b3, [x23, #0x0]\n"
+ "ldr b4, [x22, #0x0]\n"
+ "ldr b5, [x21, #0x0]\n"
"131:" // Height 5: Multiply loop: Ragged operand read: Done
- "ldr q7, [x9, #0x0]\n"
+ "movi v6.4s, #0x0\n"
+ "ldr q7, [x28, #0x0]\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
"trn1 v4.2d, v5.2d, v6.2d\n"
- "ldr q6, [x9, #0x10]\n"
+ "ldr q6, [x28, #0x10]\n"
".inst 0x4e87a408 // smmla v8.4s, v0.16b, v7.16b\n"
".inst 0x4e87a450 // smmla v16.4s, v2.16b, v7.16b\n"
".inst 0x4e87a498 // smmla v24.4s, v4.16b, v7.16b\n"
- "ldr q7, [x9, #0x20]\n"
+ "ldr q7, [x28, #0x20]\n"
".inst 0x4e86a40c // smmla v12.4s, v0.16b, v6.16b\n"
".inst 0x4e86a454 // smmla v20.4s, v2.16b, v6.16b\n"
".inst 0x4e86a49c // smmla v28.4s, v4.16b, v6.16b\n"
- "ldr q6, [x9, #0x30]\n"
+ "ldr q6, [x28, #0x30]\n"
".inst 0x4e87a409 // smmla v9.4s, v0.16b, v7.16b\n"
".inst 0x4e87a451 // smmla v17.4s, v2.16b, v7.16b\n"
".inst 0x4e87a499 // smmla v25.4s, v4.16b, v7.16b\n"
- "ldr q7, [x9, #0x40]\n"
+ "ldr q7, [x28, #0x40]\n"
".inst 0x4e86a40d // smmla v13.4s, v0.16b, v6.16b\n"
".inst 0x4e86a455 // smmla v21.4s, v2.16b, v6.16b\n"
".inst 0x4e86a49d // smmla v29.4s, v4.16b, v6.16b\n"
- "ldr q6, [x9, #0x50]\n"
+ "ldr q6, [x28, #0x50]\n"
".inst 0x4e87a40a // smmla v10.4s, v0.16b, v7.16b\n"
".inst 0x4e87a452 // smmla v18.4s, v2.16b, v7.16b\n"
".inst 0x4e87a49a // smmla v26.4s, v4.16b, v7.16b\n"
- "ldr q7, [x9, #0x60]\n"
+ "ldr q7, [x28, #0x60]\n"
".inst 0x4e86a40e // smmla v14.4s, v0.16b, v6.16b\n"
".inst 0x4e86a456 // smmla v22.4s, v2.16b, v6.16b\n"
".inst 0x4e86a49e // smmla v30.4s, v4.16b, v6.16b\n"
- "ldr q6, [x9, #0x70]\n"
- "add x9, x9, #0x80\n"
+ "ldr q6, [x28, #0x70]\n"
+ "add x28, x28, #0x80\n"
".inst 0x4e87a40b // smmla v11.4s, v0.16b, v7.16b\n"
".inst 0x4e87a453 // smmla v19.4s, v2.16b, v7.16b\n"
".inst 0x4e87a49b // smmla v27.4s, v4.16b, v7.16b\n"
@@ -2376,41 +2388,41 @@ void a64_hybrid_s8qs_mmla_6x16 (
".inst 0x4e86a457 // smmla v23.4s, v2.16b, v6.16b\n"
".inst 0x4e86a49f // smmla v31.4s, v4.16b, v6.16b\n"
"132:" // Height 5: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 120b\n"
- "ldr q0, [x14, #0x0]\n"
- "ldr q1, [x14, #0x10]\n"
"uzp1 v7.2d, v8.2d, v12.2d\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp2 v8.2d, v8.2d, v12.2d\n"
- "ldr q2, [x14, #0x20]\n"
- "ldr q3, [x14, #0x30]\n"
+ "prfm pstl1keep, [x9, #0x0]\n"
+ "add x23, x9, x19\n"
"uzp1 v12.2d, v9.2d, v13.2d\n"
+ "ldr q0, [x11, #0x0]\n"
"uzp2 v9.2d, v9.2d, v13.2d\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x11, x20\n"
+ "prfm pstl1keep, [x23, #0x0]\n"
+ "add x22, x23, x19\n"
"uzp1 v13.2d, v10.2d, v14.2d\n"
+ "prfm pstl1keep, [x22, #0x0]\n"
+ "add x21, x22, x19\n"
"uzp2 v10.2d, v10.2d, v14.2d\n"
+ "prfm pstl1keep, [x21, #0x0]\n"
+ "add x20, x21, x19\n"
"uzp1 v14.2d, v11.2d, v15.2d\n"
+ "prfm pstl1keep, [x20, #0x0]\n"
"uzp2 v11.2d, v11.2d, v15.2d\n"
- "add x23, x24, x20\n"
- "add x22, x23, x20\n"
+ "ldr q1, [x11, #0x10]\n"
"uzp1 v15.2d, v16.2d, v20.2d\n"
+ "ldr q2, [x11, #0x20]\n"
"uzp2 v16.2d, v16.2d, v20.2d\n"
- "add x21, x22, x20\n"
- "prfm pstl1keep, [x11, #0x0]\n"
+ "ldr q3, [x11, #0x30]\n"
+ "add x11, x11, #0x40\n"
"uzp1 v20.2d, v17.2d, v21.2d\n"
"uzp2 v17.2d, v17.2d, v21.2d\n"
- "prfm pstl1keep, [x24, #0x0]\n"
- "prfm pstl1keep, [x23, #0x0]\n"
"uzp1 v21.2d, v18.2d, v22.2d\n"
"uzp2 v18.2d, v18.2d, v22.2d\n"
- "prfm pstl1keep, [x22, #0x0]\n"
- "prfm pstl1keep, [x21, #0x0]\n"
"uzp1 v22.2d, v19.2d, v23.2d\n"
"uzp2 v19.2d, v19.2d, v23.2d\n"
- "add x14, x14, #0x40\n"
"uzp1 v24.2d, v24.2d, v28.2d\n"
"uzp1 v25.2d, v25.2d, v29.2d\n"
"uzp1 v26.2d, v26.2d, v30.2d\n"
@@ -2444,20 +2456,20 @@ void a64_hybrid_s8qs_mmla_6x16 (
"ldr q2, [x12, #0x20]\n"
"ldr q6, [x13, #0x20]\n"
"ldr q3, [x12, #0x30]\n"
- "ldr q7, [x13, #0x30]\n"
"add x12, x12, #0x40\n"
+ "ldr q7, [x13, #0x30]\n"
"add x13, x13, #0x40\n"
"b 134f\n"
"133:" // Height 5: per layer parameters
- "add x25, %x[qp], %[per_layer_right_shift]\n"
- "ld1r { v0.4s }, [x25]\n"
- "add x25, %x[qp], %[per_layer_mul]\n"
- "ld1r { v4.4s }, [x25]\n"
+ "add x24, %x[qp], %[per_layer_right_shift]\n"
+ "ld1r { v0.4s }, [x24]\n"
"mov v1.16b, v0.16b\n"
- "mov v5.16b, v4.16b\n"
+ "add x24, %x[qp], %[per_layer_mul]\n"
+ "ld1r { v4.4s }, [x24]\n"
"mov v2.16b, v0.16b\n"
- "mov v6.16b, v4.16b\n"
"mov v3.16b, v0.16b\n"
+ "mov v5.16b, v4.16b\n"
+ "mov v6.16b, v4.16b\n"
"mov v7.16b, v4.16b\n"
"134:" // Height 5: parameters loaded
"sqrdmulh v31.4s, v31.4s, v4.4s\n"
@@ -2484,148 +2496,148 @@ void a64_hybrid_s8qs_mmla_6x16 (
"and v4.16b, v31.16b, v0.16b\n"
"and v5.16b, v12.16b, v1.16b\n"
"and v6.16b, v13.16b, v2.16b\n"
- "and v7.16b, v14.16b, v3.16b\n"
"sshr v4.4s, v4.4s, #0x1f\n"
"sshr v5.4s, v5.4s, #0x1f\n"
"sshr v6.4s, v6.4s, #0x1f\n"
- "sshr v7.4s, v7.4s, #0x1f\n"
"sqadd v31.4s, v31.4s, v4.4s\n"
"sqadd v12.4s, v12.4s, v5.4s\n"
"sqadd v13.4s, v13.4s, v6.4s\n"
- "sqadd v14.4s, v14.4s, v7.4s\n"
+ "and v7.16b, v14.16b, v3.16b\n"
"and v4.16b, v8.16b, v0.16b\n"
"and v5.16b, v9.16b, v1.16b\n"
- "and v6.16b, v10.16b, v2.16b\n"
- "and v7.16b, v11.16b, v3.16b\n"
+ "sshr v7.4s, v7.4s, #0x1f\n"
"sshr v4.4s, v4.4s, #0x1f\n"
"sshr v5.4s, v5.4s, #0x1f\n"
- "sshr v6.4s, v6.4s, #0x1f\n"
- "sshr v7.4s, v7.4s, #0x1f\n"
+ "sqadd v14.4s, v14.4s, v7.4s\n"
"sqadd v8.4s, v8.4s, v4.4s\n"
"sqadd v9.4s, v9.4s, v5.4s\n"
+ "and v6.16b, v10.16b, v2.16b\n"
+ "and v7.16b, v11.16b, v3.16b\n"
+ "and v4.16b, v15.16b, v0.16b\n"
+ "sshr v6.4s, v6.4s, #0x1f\n"
+ "sshr v7.4s, v7.4s, #0x1f\n"
+ "sshr v4.4s, v4.4s, #0x1f\n"
"sqadd v10.4s, v10.4s, v6.4s\n"
"sqadd v11.4s, v11.4s, v7.4s\n"
- "and v4.16b, v15.16b, v0.16b\n"
+ "sqadd v15.4s, v15.4s, v4.4s\n"
"and v5.16b, v20.16b, v1.16b\n"
"and v6.16b, v21.16b, v2.16b\n"
"and v7.16b, v22.16b, v3.16b\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
"sshr v5.4s, v5.4s, #0x1f\n"
"sshr v6.4s, v6.4s, #0x1f\n"
"sshr v7.4s, v7.4s, #0x1f\n"
- "sqadd v15.4s, v15.4s, v4.4s\n"
"sqadd v20.4s, v20.4s, v5.4s\n"
"sqadd v21.4s, v21.4s, v6.4s\n"
"sqadd v22.4s, v22.4s, v7.4s\n"
"and v4.16b, v16.16b, v0.16b\n"
"and v5.16b, v17.16b, v1.16b\n"
"and v6.16b, v18.16b, v2.16b\n"
- "and v7.16b, v19.16b, v3.16b\n"
"sshr v4.4s, v4.4s, #0x1f\n"
"sshr v5.4s, v5.4s, #0x1f\n"
"sshr v6.4s, v6.4s, #0x1f\n"
- "sshr v7.4s, v7.4s, #0x1f\n"
"sqadd v16.4s, v16.4s, v4.4s\n"
"sqadd v17.4s, v17.4s, v5.4s\n"
"sqadd v18.4s, v18.4s, v6.4s\n"
- "sqadd v19.4s, v19.4s, v7.4s\n"
+ "and v7.16b, v19.16b, v3.16b\n"
"and v4.16b, v24.16b, v0.16b\n"
"and v5.16b, v25.16b, v1.16b\n"
- "and v6.16b, v26.16b, v2.16b\n"
- "and v7.16b, v27.16b, v3.16b\n"
+ "sshr v7.4s, v7.4s, #0x1f\n"
"sshr v4.4s, v4.4s, #0x1f\n"
"sshr v5.4s, v5.4s, #0x1f\n"
- "sshr v6.4s, v6.4s, #0x1f\n"
- "sshr v7.4s, v7.4s, #0x1f\n"
+ "sqadd v19.4s, v19.4s, v7.4s\n"
"sqadd v24.4s, v24.4s, v4.4s\n"
"sqadd v25.4s, v25.4s, v5.4s\n"
+ "and v6.16b, v26.16b, v2.16b\n"
+ "and v7.16b, v27.16b, v3.16b\n"
+ "sshr v6.4s, v6.4s, #0x1f\n"
+ "sshr v7.4s, v7.4s, #0x1f\n"
"sqadd v26.4s, v26.4s, v6.4s\n"
"sqadd v27.4s, v27.4s, v7.4s\n"
"135:" // Height 5: no shift correction
- "add x25, %x[qp], %[c_offset]\n"
- "ld1r { v4.4s }, [x25]\n"
"srshl v31.4s, v31.4s, v0.4s\n"
+ "add x24, %x[qp], %[c_offset]\n"
+ "ld1r { v4.4s }, [x24]\n"
"srshl v12.4s, v12.4s, v1.4s\n"
+ "add x24, %x[qp], %[minval]\n"
"srshl v13.4s, v13.4s, v2.4s\n"
+ "ld1r { v5.4s }, [x24]\n"
+ "add x24, %x[qp], %[maxval]\n"
"srshl v14.4s, v14.4s, v3.4s\n"
- "add x25, %x[qp], %[maxval]\n"
- "ld1r { v6.4s }, [x25]\n"
+ "ld1r { v6.4s }, [x24]\n"
+ "cmp x10, #0x10\n"
"srshl v8.4s, v8.4s, v0.4s\n"
"srshl v9.4s, v9.4s, v1.4s\n"
- "add x25, %x[qp], %[minval]\n"
- "ld1r { v5.4s }, [x25]\n"
- "srshl v10.4s, v10.4s, v2.4s\n"
- "srshl v11.4s, v11.4s, v3.4s\n"
- "cmp x10, #0x10\n"
- "srshl v15.4s, v15.4s, v0.4s\n"
- "srshl v20.4s, v20.4s, v1.4s\n"
- "srshl v21.4s, v21.4s, v2.4s\n"
- "srshl v22.4s, v22.4s, v3.4s\n"
- "srshl v16.4s, v16.4s, v0.4s\n"
- "srshl v17.4s, v17.4s, v1.4s\n"
- "srshl v18.4s, v18.4s, v2.4s\n"
- "srshl v19.4s, v19.4s, v3.4s\n"
- "srshl v24.4s, v24.4s, v0.4s\n"
- "srshl v25.4s, v25.4s, v1.4s\n"
- "srshl v26.4s, v26.4s, v2.4s\n"
- "srshl v27.4s, v27.4s, v3.4s\n"
"add v31.4s, v31.4s, v4.4s\n"
"add v12.4s, v12.4s, v4.4s\n"
"add v13.4s, v13.4s, v4.4s\n"
- "add v14.4s, v14.4s, v4.4s\n"
- "add v8.4s, v8.4s, v4.4s\n"
- "add v9.4s, v9.4s, v4.4s\n"
- "add v10.4s, v10.4s, v4.4s\n"
- "add v11.4s, v11.4s, v4.4s\n"
- "add v15.4s, v15.4s, v4.4s\n"
- "add v20.4s, v20.4s, v4.4s\n"
- "add v21.4s, v21.4s, v4.4s\n"
- "add v22.4s, v22.4s, v4.4s\n"
- "add v16.4s, v16.4s, v4.4s\n"
- "add v17.4s, v17.4s, v4.4s\n"
- "add v18.4s, v18.4s, v4.4s\n"
- "add v19.4s, v19.4s, v4.4s\n"
- "add v24.4s, v24.4s, v4.4s\n"
- "add v25.4s, v25.4s, v4.4s\n"
- "add v26.4s, v26.4s, v4.4s\n"
- "add v27.4s, v27.4s, v4.4s\n"
"smin v31.4s, v31.4s, v6.4s\n"
"smin v12.4s, v12.4s, v6.4s\n"
"smin v13.4s, v13.4s, v6.4s\n"
- "smin v14.4s, v14.4s, v6.4s\n"
- "smin v8.4s, v8.4s, v6.4s\n"
- "smin v9.4s, v9.4s, v6.4s\n"
- "smin v10.4s, v10.4s, v6.4s\n"
- "smin v11.4s, v11.4s, v6.4s\n"
- "smin v15.4s, v15.4s, v6.4s\n"
- "smin v20.4s, v20.4s, v6.4s\n"
- "smin v21.4s, v21.4s, v6.4s\n"
- "smin v22.4s, v22.4s, v6.4s\n"
- "smin v16.4s, v16.4s, v6.4s\n"
- "smin v17.4s, v17.4s, v6.4s\n"
- "smin v18.4s, v18.4s, v6.4s\n"
- "smin v19.4s, v19.4s, v6.4s\n"
- "smin v24.4s, v24.4s, v6.4s\n"
- "smin v25.4s, v25.4s, v6.4s\n"
- "smin v26.4s, v26.4s, v6.4s\n"
- "smin v27.4s, v27.4s, v6.4s\n"
"smax v31.4s, v31.4s, v5.4s\n"
"smax v12.4s, v12.4s, v5.4s\n"
"smax v13.4s, v13.4s, v5.4s\n"
+ "add v14.4s, v14.4s, v4.4s\n"
+ "add v8.4s, v8.4s, v4.4s\n"
+ "add v9.4s, v9.4s, v4.4s\n"
+ "smin v14.4s, v14.4s, v6.4s\n"
+ "smin v8.4s, v8.4s, v6.4s\n"
+ "smin v9.4s, v9.4s, v6.4s\n"
"smax v14.4s, v14.4s, v5.4s\n"
"smax v8.4s, v8.4s, v5.4s\n"
"smax v9.4s, v9.4s, v5.4s\n"
+ "srshl v10.4s, v10.4s, v2.4s\n"
+ "srshl v11.4s, v11.4s, v3.4s\n"
+ "srshl v15.4s, v15.4s, v0.4s\n"
+ "srshl v20.4s, v20.4s, v1.4s\n"
+ "add v10.4s, v10.4s, v4.4s\n"
+ "add v11.4s, v11.4s, v4.4s\n"
+ "add v15.4s, v15.4s, v4.4s\n"
+ "smin v10.4s, v10.4s, v6.4s\n"
+ "smin v11.4s, v11.4s, v6.4s\n"
+ "smin v15.4s, v15.4s, v6.4s\n"
"smax v10.4s, v10.4s, v5.4s\n"
"smax v11.4s, v11.4s, v5.4s\n"
"smax v15.4s, v15.4s, v5.4s\n"
+ "add v20.4s, v20.4s, v4.4s\n"
+ "srshl v21.4s, v21.4s, v2.4s\n"
+ "srshl v22.4s, v22.4s, v3.4s\n"
+ "smin v20.4s, v20.4s, v6.4s\n"
+ "srshl v16.4s, v16.4s, v0.4s\n"
+ "add v21.4s, v21.4s, v4.4s\n"
"smax v20.4s, v20.4s, v5.4s\n"
+ "add v22.4s, v22.4s, v4.4s\n"
+ "smin v21.4s, v21.4s, v6.4s\n"
+ "add v16.4s, v16.4s, v4.4s\n"
+ "smin v22.4s, v22.4s, v6.4s\n"
"smax v21.4s, v21.4s, v5.4s\n"
+ "smin v16.4s, v16.4s, v6.4s\n"
"smax v22.4s, v22.4s, v5.4s\n"
+ "srshl v17.4s, v17.4s, v1.4s\n"
"smax v16.4s, v16.4s, v5.4s\n"
+ "srshl v18.4s, v18.4s, v2.4s\n"
+ "srshl v19.4s, v19.4s, v3.4s\n"
+ "add v17.4s, v17.4s, v4.4s\n"
+ "srshl v24.4s, v24.4s, v0.4s\n"
+ "add v18.4s, v18.4s, v4.4s\n"
+ "smin v17.4s, v17.4s, v6.4s\n"
+ "add v19.4s, v19.4s, v4.4s\n"
+ "smin v18.4s, v18.4s, v6.4s\n"
"smax v17.4s, v17.4s, v5.4s\n"
+ "smin v19.4s, v19.4s, v6.4s\n"
"smax v18.4s, v18.4s, v5.4s\n"
+ "add v24.4s, v24.4s, v4.4s\n"
"smax v19.4s, v19.4s, v5.4s\n"
+ "srshl v25.4s, v25.4s, v1.4s\n"
+ "smin v24.4s, v24.4s, v6.4s\n"
+ "srshl v26.4s, v26.4s, v2.4s\n"
+ "srshl v27.4s, v27.4s, v3.4s\n"
"smax v24.4s, v24.4s, v5.4s\n"
+ "add v25.4s, v25.4s, v4.4s\n"
+ "add v26.4s, v26.4s, v4.4s\n"
+ "add v27.4s, v27.4s, v4.4s\n"
+ "smin v25.4s, v25.4s, v6.4s\n"
+ "smin v26.4s, v26.4s, v6.4s\n"
+ "smin v27.4s, v27.4s, v6.4s\n"
"smax v25.4s, v25.4s, v5.4s\n"
"smax v26.4s, v26.4s, v5.4s\n"
"smax v27.4s, v27.4s, v5.4s\n"
@@ -2646,131 +2658,131 @@ void a64_hybrid_s8qs_mmla_6x16 (
"uzp1 v24.16b, v24.16b, v25.16b\n"
"bge 144f\n"
"tbz x10, #3, 139f\n"
- "str d31, [x11], #0x8\n"
- "str d8, [x24], #0x8\n"
- "str d15, [x23], #0x8\n"
- "str d16, [x22], #0x8\n"
- "str d24, [x21], #0x8\n"
+ "str d31, [x9], #0x8\n"
+ "str d8, [x23], #0x8\n"
+ "str d15, [x22], #0x8\n"
+ "str d16, [x21], #0x8\n"
+ "str d24, [x20], #0x8\n"
"tbz x10, #2, 137f\n"
- "st1 { v31.s }[2], [x11], #0x4\n"
- "st1 { v8.s }[2], [x24], #0x4\n"
- "st1 { v15.s }[2], [x23], #0x4\n"
- "st1 { v16.s }[2], [x22], #0x4\n"
- "st1 { v24.s }[2], [x21], #0x4\n"
+ "st1 { v31.s }[2], [x9], #0x4\n"
+ "st1 { v8.s }[2], [x23], #0x4\n"
+ "st1 { v15.s }[2], [x22], #0x4\n"
+ "st1 { v16.s }[2], [x21], #0x4\n"
+ "st1 { v24.s }[2], [x20], #0x4\n"
"tbz x10, #1, 136f\n"
- "st1 { v31.h }[6], [x11], #0x2\n"
- "st1 { v8.h }[6], [x24], #0x2\n"
- "st1 { v15.h }[6], [x23], #0x2\n"
- "st1 { v16.h }[6], [x22], #0x2\n"
- "st1 { v24.h }[6], [x21], #0x2\n"
+ "st1 { v31.h }[6], [x9], #0x2\n"
+ "st1 { v8.h }[6], [x23], #0x2\n"
+ "st1 { v15.h }[6], [x22], #0x2\n"
+ "st1 { v16.h }[6], [x21], #0x2\n"
+ "st1 { v24.h }[6], [x20], #0x2\n"
"tbz x10, #0, 143f\n"
- "st1 { v31.b }[14], [x11]\n"
- "st1 { v8.b }[14], [x24]\n"
- "st1 { v15.b }[14], [x23]\n"
- "st1 { v16.b }[14], [x22]\n"
- "st1 { v24.b }[14], [x21]\n"
+ "st1 { v31.b }[14], [x9]\n"
+ "st1 { v8.b }[14], [x23]\n"
+ "st1 { v15.b }[14], [x22]\n"
+ "st1 { v16.b }[14], [x21]\n"
+ "st1 { v24.b }[14], [x20]\n"
"b 143f\n"
"136:" // Height 5: Partial direct writeback: partial_1_12
"tbz x10, #0, 143f\n"
- "st1 { v31.b }[12], [x11]\n"
- "st1 { v8.b }[12], [x24]\n"
- "st1 { v15.b }[12], [x23]\n"
- "st1 { v16.b }[12], [x22]\n"
- "st1 { v24.b }[12], [x21]\n"
+ "st1 { v31.b }[12], [x9]\n"
+ "st1 { v8.b }[12], [x23]\n"
+ "st1 { v15.b }[12], [x22]\n"
+ "st1 { v16.b }[12], [x21]\n"
+ "st1 { v24.b }[12], [x20]\n"
"b 143f\n"
"137:" // Height 5: Partial direct writeback: partial_2_8
"tbz x10, #1, 138f\n"
- "st1 { v31.h }[4], [x11], #0x2\n"
- "st1 { v8.h }[4], [x24], #0x2\n"
- "st1 { v15.h }[4], [x23], #0x2\n"
- "st1 { v16.h }[4], [x22], #0x2\n"
- "st1 { v24.h }[4], [x21], #0x2\n"
+ "st1 { v31.h }[4], [x9], #0x2\n"
+ "st1 { v8.h }[4], [x23], #0x2\n"
+ "st1 { v15.h }[4], [x22], #0x2\n"
+ "st1 { v16.h }[4], [x21], #0x2\n"
+ "st1 { v24.h }[4], [x20], #0x2\n"
"tbz x10, #0, 143f\n"
- "st1 { v31.b }[10], [x11]\n"
- "st1 { v8.b }[10], [x24]\n"
- "st1 { v15.b }[10], [x23]\n"
- "st1 { v16.b }[10], [x22]\n"
- "st1 { v24.b }[10], [x21]\n"
+ "st1 { v31.b }[10], [x9]\n"
+ "st1 { v8.b }[10], [x23]\n"
+ "st1 { v15.b }[10], [x22]\n"
+ "st1 { v16.b }[10], [x21]\n"
+ "st1 { v24.b }[10], [x20]\n"
"b 143f\n"
"138:" // Height 5: Partial direct writeback: partial_1_8
"tbz x10, #0, 143f\n"
- "st1 { v31.b }[8], [x11]\n"
- "st1 { v8.b }[8], [x24]\n"
- "st1 { v15.b }[8], [x23]\n"
- "st1 { v16.b }[8], [x22]\n"
- "st1 { v24.b }[8], [x21]\n"
+ "st1 { v31.b }[8], [x9]\n"
+ "st1 { v8.b }[8], [x23]\n"
+ "st1 { v15.b }[8], [x22]\n"
+ "st1 { v16.b }[8], [x21]\n"
+ "st1 { v24.b }[8], [x20]\n"
"b 143f\n"
"139:" // Height 5: Partial direct writeback: partial_4_0
"tbz x10, #2, 141f\n"
- "str s31, [x11], #0x4\n"
- "str s8, [x24], #0x4\n"
- "str s15, [x23], #0x4\n"
- "str s16, [x22], #0x4\n"
- "str s24, [x21], #0x4\n"
+ "str s31, [x9], #0x4\n"
+ "str s8, [x23], #0x4\n"
+ "str s15, [x22], #0x4\n"
+ "str s16, [x21], #0x4\n"
+ "str s24, [x20], #0x4\n"
"tbz x10, #1, 140f\n"
- "st1 { v31.h }[2], [x11], #0x2\n"
- "st1 { v8.h }[2], [x24], #0x2\n"
- "st1 { v15.h }[2], [x23], #0x2\n"
- "st1 { v16.h }[2], [x22], #0x2\n"
- "st1 { v24.h }[2], [x21], #0x2\n"
+ "st1 { v31.h }[2], [x9], #0x2\n"
+ "st1 { v8.h }[2], [x23], #0x2\n"
+ "st1 { v15.h }[2], [x22], #0x2\n"
+ "st1 { v16.h }[2], [x21], #0x2\n"
+ "st1 { v24.h }[2], [x20], #0x2\n"
"tbz x10, #0, 143f\n"
- "st1 { v31.b }[6], [x11]\n"
- "st1 { v8.b }[6], [x24]\n"
- "st1 { v15.b }[6], [x23]\n"
- "st1 { v16.b }[6], [x22]\n"
- "st1 { v24.b }[6], [x21]\n"
+ "st1 { v31.b }[6], [x9]\n"
+ "st1 { v8.b }[6], [x23]\n"
+ "st1 { v15.b }[6], [x22]\n"
+ "st1 { v16.b }[6], [x21]\n"
+ "st1 { v24.b }[6], [x20]\n"
"b 143f\n"
"140:" // Height 5: Partial direct writeback: partial_1_4
"tbz x10, #0, 143f\n"
- "st1 { v31.b }[4], [x11]\n"
- "st1 { v8.b }[4], [x24]\n"
- "st1 { v15.b }[4], [x23]\n"
- "st1 { v16.b }[4], [x22]\n"
- "st1 { v24.b }[4], [x21]\n"
+ "st1 { v31.b }[4], [x9]\n"
+ "st1 { v8.b }[4], [x23]\n"
+ "st1 { v15.b }[4], [x22]\n"
+ "st1 { v16.b }[4], [x21]\n"
+ "st1 { v24.b }[4], [x20]\n"
"b 143f\n"
"141:" // Height 5: Partial direct writeback: partial_2_0
"tbz x10, #1, 142f\n"
- "str h31, [x11], #0x2\n"
- "str h8, [x24], #0x2\n"
- "str h15, [x23], #0x2\n"
- "str h16, [x22], #0x2\n"
- "str h24, [x21], #0x2\n"
+ "str h31, [x9], #0x2\n"
+ "str h8, [x23], #0x2\n"
+ "str h15, [x22], #0x2\n"
+ "str h16, [x21], #0x2\n"
+ "str h24, [x20], #0x2\n"
"tbz x10, #0, 143f\n"
- "st1 { v31.b }[2], [x11]\n"
- "st1 { v8.b }[2], [x24]\n"
- "st1 { v15.b }[2], [x23]\n"
- "st1 { v16.b }[2], [x22]\n"
- "st1 { v24.b }[2], [x21]\n"
+ "st1 { v31.b }[2], [x9]\n"
+ "st1 { v8.b }[2], [x23]\n"
+ "st1 { v15.b }[2], [x22]\n"
+ "st1 { v16.b }[2], [x21]\n"
+ "st1 { v24.b }[2], [x20]\n"
"b 143f\n"
"142:" // Height 5: Partial direct writeback: partial_1_0
- "str b31, [x11, #0x0]\n"
- "str b8, [x24, #0x0]\n"
- "str b15, [x23, #0x0]\n"
- "str b16, [x22, #0x0]\n"
- "str b24, [x21, #0x0]\n"
+ "str b31, [x9, #0x0]\n"
+ "str b8, [x23, #0x0]\n"
+ "str b15, [x22, #0x0]\n"
+ "str b16, [x21, #0x0]\n"
+ "str b24, [x20, #0x0]\n"
"143:" // Height 5: Partial direct writeback: Done
"b 145f\n"
"144:" // Height 5: Full writeback
- "str q31, [x11, #0x0]\n"
- "add x11, x11, #0x10\n"
- "str q8, [x24, #0x0]\n"
- "str q15, [x23, #0x0]\n"
- "str q16, [x22, #0x0]\n"
- "str q24, [x21, #0x0]\n"
+ "str q31, [x9, #0x0]\n"
+ "add x9, x9, #0x10\n"
+ "str q8, [x23, #0x0]\n"
+ "str q15, [x22, #0x0]\n"
+ "str q16, [x21, #0x0]\n"
+ "str q24, [x20, #0x0]\n"
"145:" // Height 5: Writeback done
"subs x10, x10, #0x10\n"
"bgt 118b\n"
"b 176f\n"
"146:" // Height 6
- "ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "mov x20, #0x6\n"
- "mov x14, %x[col_bias]\n"
"ldr x13, [%x[args_ptr], %[offsetof_multiplier_ptr]]\n"
+ "mov x11, %x[col_bias]\n"
"ldr x12, [%x[args_ptr], %[offsetof_shift_ptr]]\n"
+ "mov x9, %x[output_ptr]\n"
"ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
- "mov x11, %x[output_ptr]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
- "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x20, #0x6\n"
+ "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "madd %x[output_ptr], x19, x20, %x[output_ptr]\n"
"147:" // Height 6: Column loop
"movi v8.4s, #0x0\n"
"movi v9.4s, #0x0\n"
@@ -2797,219 +2809,219 @@ void a64_hybrid_s8qs_mmla_6x16 (
"movi v30.4s, #0x0\n"
"movi v31.4s, #0x0\n"
"148:" // Height 6: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"149:" // Height 6: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 150f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "ldr x22, [x21, #0x20]\n"
- "ldr x21, [x21, #0x28]\n"
- "cbnz x28, 151f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20\n"
- "add x25, x25, x20\n"
- "add x24, x24, x20\n"
- "add x23, x23, x20\n"
- "add x22, x22, x20\n"
- "add x21, x21, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "ldr x21, [x20, #0x20]\n"
+ "ldr x20, [x20, #0x28]\n"
+ "cbnz x27, 151f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
+ "add x24, x24, x19\n"
+ "add x23, x23, x19\n"
+ "add x22, x22, x19\n"
+ "add x21, x21, x19\n"
+ "add x20, x20, x19\n"
"b 151f\n"
"150:" // Height 6: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20\n"
- "add x24, x25, x20\n"
- "add x23, x24, x20\n"
- "add x22, x23, x20\n"
- "add x21, x22, x20\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19\n"
+ "add x23, x24, x19\n"
+ "add x22, x23, x19\n"
+ "add x21, x22, x19\n"
+ "add x20, x21, x19\n"
"151:" // Height 6: input setup done
- "cmp x27, #0x10\n"
+ "cmp x26, #0x10\n"
"blt 154f\n"
- "ldr q1, [x26, #0x0]\n"
- "ldr q2, [x25, #0x0]\n"
- "cmp x27, #0x20\n"
- "ldr q3, [x24, #0x0]\n"
- "ldr q4, [x23, #0x0]\n"
- "ldr q5, [x22, #0x0]\n"
- "ldr q6, [x21, #0x0]\n"
- "ldr q7, [x9, #0x0]\n"
+ "ldr q1, [x25, #0x0]\n"
+ "ldr q2, [x24, #0x0]\n"
+ "cmp x26, #0x20\n"
"blt 153f\n"
"152:" // Height 6: Multiply loop: Main loop head
"trn1 v0.2d, v1.2d, v2.2d\n"
+ "ldr q3, [x23, #0x0]\n"
+ "add x25, x25, #0x10\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
- ".inst 0x4e87a408 // smmla v8.4s, v0.16b, v7.16b\n"
- "sub x27, x27, #0x10\n"
+ "ldr q4, [x22, #0x0]\n"
+ "add x24, x24, #0x10\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
+ "ldr q5, [x21, #0x0]\n"
+ "add x23, x23, #0x10\n"
"trn2 v3.2d, v3.2d, v4.2d\n"
- ".inst 0x4e87a450 // smmla v16.4s, v2.16b, v7.16b\n"
- "add x26, x26, #0x10\n"
+ "ldr q6, [x20, #0x0]\n"
+ "add x22, x22, #0x10\n"
"trn1 v4.2d, v5.2d, v6.2d\n"
+ "ldr q7, [x28, #0x0]\n"
+ "add x21, x21, #0x10\n"
"trn2 v5.2d, v5.2d, v6.2d\n"
- "ldr q6, [x9, #0x10]\n"
+ "ldr q6, [x28, #0x10]\n"
+ "add x20, x20, #0x10\n"
+ ".inst 0x4e87a408 // smmla v8.4s, v0.16b, v7.16b\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "sub x26, x26, #0x10\n"
+ ".inst 0x4e87a450 // smmla v16.4s, v2.16b, v7.16b\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "cmp x26, #0x20\n"
".inst 0x4e87a498 // smmla v24.4s, v4.16b, v7.16b\n"
- "ldr q7, [x9, #0x20]\n"
+ "ldr q7, [x28, #0x20]\n"
".inst 0x4e86a40c // smmla v12.4s, v0.16b, v6.16b\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x4e86a454 // smmla v20.4s, v2.16b, v6.16b\n"
- "add x25, x25, #0x10\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x4e86a49c // smmla v28.4s, v4.16b, v6.16b\n"
- "ldr q6, [x9, #0x30]\n"
+ "ldr q6, [x28, #0x30]\n"
+ "prfm pldl1keep, [x21, #0x80]\n"
".inst 0x4e87a409 // smmla v9.4s, v0.16b, v7.16b\n"
- "add x24, x24, #0x10\n"
".inst 0x4e87a451 // smmla v17.4s, v2.16b, v7.16b\n"
+ "prfm pldl1keep, [x20, #0x80]\n"
".inst 0x4e87a499 // smmla v25.4s, v4.16b, v7.16b\n"
- "ldr q7, [x9, #0x40]\n"
- "add x23, x23, #0x10\n"
+ "ldr q7, [x28, #0x40]\n"
".inst 0x4e86a40d // smmla v13.4s, v0.16b, v6.16b\n"
".inst 0x4e86a455 // smmla v21.4s, v2.16b, v6.16b\n"
- "add x22, x22, #0x10\n"
- "add x21, x21, #0x10\n"
".inst 0x4e86a49d // smmla v29.4s, v4.16b, v6.16b\n"
- "ldr q6, [x9, #0x50]\n"
+ "ldr q6, [x28, #0x50]\n"
".inst 0x4e87a40a // smmla v10.4s, v0.16b, v7.16b\n"
- "cmp x27, #0x20\n"
".inst 0x4e87a452 // smmla v18.4s, v2.16b, v7.16b\n"
".inst 0x4e87a49a // smmla v26.4s, v4.16b, v7.16b\n"
- "ldr q7, [x9, #0x60]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
+ "ldr q7, [x28, #0x60]\n"
".inst 0x4e86a40e // smmla v14.4s, v0.16b, v6.16b\n"
".inst 0x4e86a456 // smmla v22.4s, v2.16b, v6.16b\n"
- "prfm pldl1keep, [x25, #0x80]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x4e86a49e // smmla v30.4s, v4.16b, v6.16b\n"
- "ldr q6, [x9, #0x70]\n"
+ "ldr q6, [x28, #0x70]\n"
".inst 0x4e87a40b // smmla v11.4s, v0.16b, v7.16b\n"
- "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x4e87a453 // smmla v19.4s, v2.16b, v7.16b\n"
".inst 0x4e87a49b // smmla v27.4s, v4.16b, v7.16b\n"
- "ldr q7, [x9, #0x80]\n"
- "prfm pldl1keep, [x22, #0x80]\n"
+ "ldr q7, [x28, #0x80]\n"
".inst 0x4e86a40f // smmla v15.4s, v0.16b, v6.16b\n"
".inst 0x4e86a457 // smmla v23.4s, v2.16b, v6.16b\n"
- "ldr q2, [x25, #0x0]\n"
- "prfm pldl1keep, [x21, #0x80]\n"
+ "ldr q2, [x24, #0x0]\n"
".inst 0x4e86a49f // smmla v31.4s, v4.16b, v6.16b\n"
- "ldr q6, [x9, #0x90]\n"
- "ldr q4, [x23, #0x0]\n"
+ "ldr q6, [x28, #0x90]\n"
".inst 0x4e87a428 // smmla v8.4s, v1.16b, v7.16b\n"
".inst 0x4e87a470 // smmla v16.4s, v3.16b, v7.16b\n"
".inst 0x4e87a4b8 // smmla v24.4s, v5.16b, v7.16b\n"
- "ldr q7, [x9, #0xa0]\n"
+ "ldr q7, [x28, #0xa0]\n"
".inst 0x4e86a42c // smmla v12.4s, v1.16b, v6.16b\n"
".inst 0x4e86a474 // smmla v20.4s, v3.16b, v6.16b\n"
".inst 0x4e86a4bc // smmla v28.4s, v5.16b, v6.16b\n"
- "ldr q6, [x9, #0xb0]\n"
+ "ldr q6, [x28, #0xb0]\n"
".inst 0x4e87a429 // smmla v9.4s, v1.16b, v7.16b\n"
".inst 0x4e87a471 // smmla v17.4s, v3.16b, v7.16b\n"
".inst 0x4e87a4b9 // smmla v25.4s, v5.16b, v7.16b\n"
- "ldr q7, [x9, #0xc0]\n"
+ "ldr q7, [x28, #0xc0]\n"
".inst 0x4e86a42d // smmla v13.4s, v1.16b, v6.16b\n"
".inst 0x4e86a475 // smmla v21.4s, v3.16b, v6.16b\n"
".inst 0x4e86a4bd // smmla v29.4s, v5.16b, v6.16b\n"
- "ldr q6, [x9, #0xd0]\n"
+ "ldr q6, [x28, #0xd0]\n"
".inst 0x4e87a42a // smmla v10.4s, v1.16b, v7.16b\n"
".inst 0x4e87a472 // smmla v18.4s, v3.16b, v7.16b\n"
".inst 0x4e87a4ba // smmla v26.4s, v5.16b, v7.16b\n"
- "ldr q7, [x9, #0xe0]\n"
+ "ldr q7, [x28, #0xe0]\n"
".inst 0x4e86a42e // smmla v14.4s, v1.16b, v6.16b\n"
".inst 0x4e86a476 // smmla v22.4s, v3.16b, v6.16b\n"
".inst 0x4e86a4be // smmla v30.4s, v5.16b, v6.16b\n"
- "ldr q6, [x9, #0xf0]\n"
- "add x9, x9, #0x100\n"
+ "ldr q6, [x28, #0xf0]\n"
+ "add x28, x28, #0x100\n"
".inst 0x4e87a42b // smmla v11.4s, v1.16b, v7.16b\n"
".inst 0x4e87a473 // smmla v19.4s, v3.16b, v7.16b\n"
".inst 0x4e87a4bb // smmla v27.4s, v5.16b, v7.16b\n"
- "ldr q7, [x9, #0x0]\n"
".inst 0x4e86a42f // smmla v15.4s, v1.16b, v6.16b\n"
- "ldr q1, [x26, #0x0]\n"
+ "ldr q1, [x25, #0x0]\n"
".inst 0x4e86a477 // smmla v23.4s, v3.16b, v6.16b\n"
- "ldr q3, [x24, #0x0]\n"
".inst 0x4e86a4bf // smmla v31.4s, v5.16b, v6.16b\n"
- "ldr q5, [x22, #0x0]\n"
- "ldr q6, [x21, #0x0]\n"
"bge 152b\n"
"153:" // Height 6: Multiply loop: Single iteration only
"trn1 v0.2d, v1.2d, v2.2d\n"
+ "ldr q3, [x23, #0x0]\n"
+ "sub x26, x26, #0x10\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
- ".inst 0x4e87a408 // smmla v8.4s, v0.16b, v7.16b\n"
- "add x26, x26, #0x10\n"
+ "ldr q4, [x22, #0x0]\n"
+ "add x25, x25, #0x10\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
+ "ldr q5, [x21, #0x0]\n"
+ "add x24, x24, #0x10\n"
"trn2 v3.2d, v3.2d, v4.2d\n"
- ".inst 0x4e87a450 // smmla v16.4s, v2.16b, v7.16b\n"
- "add x25, x25, #0x10\n"
+ "ldr q6, [x20, #0x0]\n"
+ "add x23, x23, #0x10\n"
"trn1 v4.2d, v5.2d, v6.2d\n"
+ "ldr q7, [x28, #0x0]\n"
+ "add x22, x22, #0x10\n"
"trn2 v5.2d, v5.2d, v6.2d\n"
- "ldr q6, [x9, #0x10]\n"
+ "ldr q6, [x28, #0x10]\n"
+ "add x21, x21, #0x10\n"
+ ".inst 0x4e87a408 // smmla v8.4s, v0.16b, v7.16b\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "add x20, x20, #0x10\n"
+ ".inst 0x4e87a450 // smmla v16.4s, v2.16b, v7.16b\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x4e87a498 // smmla v24.4s, v4.16b, v7.16b\n"
- "ldr q7, [x9, #0x20]\n"
+ "ldr q7, [x28, #0x20]\n"
".inst 0x4e86a40c // smmla v12.4s, v0.16b, v6.16b\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x4e86a454 // smmla v20.4s, v2.16b, v6.16b\n"
- "add x24, x24, #0x10\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x4e86a49c // smmla v28.4s, v4.16b, v6.16b\n"
- "ldr q6, [x9, #0x30]\n"
+ "ldr q6, [x28, #0x30]\n"
+ "prfm pldl1keep, [x21, #0x80]\n"
".inst 0x4e87a409 // smmla v9.4s, v0.16b, v7.16b\n"
- "add x23, x23, #0x10\n"
".inst 0x4e87a451 // smmla v17.4s, v2.16b, v7.16b\n"
+ "prfm pldl1keep, [x20, #0x80]\n"
".inst 0x4e87a499 // smmla v25.4s, v4.16b, v7.16b\n"
- "ldr q7, [x9, #0x40]\n"
- "add x22, x22, #0x10\n"
+ "ldr q7, [x28, #0x40]\n"
".inst 0x4e86a40d // smmla v13.4s, v0.16b, v6.16b\n"
".inst 0x4e86a455 // smmla v21.4s, v2.16b, v6.16b\n"
- "add x21, x21, #0x10\n"
- "sub x27, x27, #0x10\n"
".inst 0x4e86a49d // smmla v29.4s, v4.16b, v6.16b\n"
- "ldr q6, [x9, #0x50]\n"
+ "ldr q6, [x28, #0x50]\n"
".inst 0x4e87a40a // smmla v10.4s, v0.16b, v7.16b\n"
- "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x4e87a452 // smmla v18.4s, v2.16b, v7.16b\n"
".inst 0x4e87a49a // smmla v26.4s, v4.16b, v7.16b\n"
- "ldr q7, [x9, #0x60]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
+ "ldr q7, [x28, #0x60]\n"
".inst 0x4e86a40e // smmla v14.4s, v0.16b, v6.16b\n"
".inst 0x4e86a456 // smmla v22.4s, v2.16b, v6.16b\n"
- "prfm pldl1keep, [x24, #0x80]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x4e86a49e // smmla v30.4s, v4.16b, v6.16b\n"
- "ldr q6, [x9, #0x70]\n"
+ "ldr q6, [x28, #0x70]\n"
".inst 0x4e87a40b // smmla v11.4s, v0.16b, v7.16b\n"
- "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x4e87a453 // smmla v19.4s, v2.16b, v7.16b\n"
".inst 0x4e87a49b // smmla v27.4s, v4.16b, v7.16b\n"
- "ldr q7, [x9, #0x80]\n"
- "prfm pldl1keep, [x21, #0x80]\n"
+ "ldr q7, [x28, #0x80]\n"
".inst 0x4e86a40f // smmla v15.4s, v0.16b, v6.16b\n"
".inst 0x4e86a457 // smmla v23.4s, v2.16b, v6.16b\n"
".inst 0x4e86a49f // smmla v31.4s, v4.16b, v6.16b\n"
- "ldr q6, [x9, #0x90]\n"
+ "ldr q6, [x28, #0x90]\n"
".inst 0x4e87a428 // smmla v8.4s, v1.16b, v7.16b\n"
".inst 0x4e87a470 // smmla v16.4s, v3.16b, v7.16b\n"
".inst 0x4e87a4b8 // smmla v24.4s, v5.16b, v7.16b\n"
- "ldr q7, [x9, #0xa0]\n"
+ "ldr q7, [x28, #0xa0]\n"
".inst 0x4e86a42c // smmla v12.4s, v1.16b, v6.16b\n"
".inst 0x4e86a474 // smmla v20.4s, v3.16b, v6.16b\n"
".inst 0x4e86a4bc // smmla v28.4s, v5.16b, v6.16b\n"
- "ldr q6, [x9, #0xb0]\n"
+ "ldr q6, [x28, #0xb0]\n"
".inst 0x4e87a429 // smmla v9.4s, v1.16b, v7.16b\n"
".inst 0x4e87a471 // smmla v17.4s, v3.16b, v7.16b\n"
".inst 0x4e87a4b9 // smmla v25.4s, v5.16b, v7.16b\n"
- "ldr q7, [x9, #0xc0]\n"
+ "ldr q7, [x28, #0xc0]\n"
".inst 0x4e86a42d // smmla v13.4s, v1.16b, v6.16b\n"
".inst 0x4e86a475 // smmla v21.4s, v3.16b, v6.16b\n"
".inst 0x4e86a4bd // smmla v29.4s, v5.16b, v6.16b\n"
- "ldr q6, [x9, #0xd0]\n"
+ "ldr q6, [x28, #0xd0]\n"
".inst 0x4e87a42a // smmla v10.4s, v1.16b, v7.16b\n"
".inst 0x4e87a472 // smmla v18.4s, v3.16b, v7.16b\n"
".inst 0x4e87a4ba // smmla v26.4s, v5.16b, v7.16b\n"
- "ldr q7, [x9, #0xe0]\n"
+ "ldr q7, [x28, #0xe0]\n"
".inst 0x4e86a42e // smmla v14.4s, v1.16b, v6.16b\n"
".inst 0x4e86a476 // smmla v22.4s, v3.16b, v6.16b\n"
".inst 0x4e86a4be // smmla v30.4s, v5.16b, v6.16b\n"
- "ldr q6, [x9, #0xf0]\n"
- "add x9, x9, #0x100\n"
+ "ldr q6, [x28, #0xf0]\n"
+ "add x28, x28, #0x100\n"
".inst 0x4e87a42b // smmla v11.4s, v1.16b, v7.16b\n"
".inst 0x4e87a473 // smmla v19.4s, v3.16b, v7.16b\n"
".inst 0x4e87a4bb // smmla v27.4s, v5.16b, v7.16b\n"
@@ -3017,48 +3029,48 @@ void a64_hybrid_s8qs_mmla_6x16 (
".inst 0x4e86a477 // smmla v23.4s, v3.16b, v6.16b\n"
".inst 0x4e86a4bf // smmla v31.4s, v5.16b, v6.16b\n"
"154:" // Height 6: Multiply loop: Main loop skip
- "cbz x27, 161f\n"
- "cmp x27, #0x8\n"
+ "cbz x26, 161f\n"
+ "cmp x26, #0x8\n"
"blt 156f\n"
"155:" // Height 6: Multiply loop: Odd block loop
- "ldr d1, [x26], #0x8\n"
- "ldr d2, [x25], #0x8\n"
+ "ldr d1, [x25], #0x8\n"
+ "sub x26, x26, #0x8\n"
+ "ldr d2, [x24], #0x8\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
- "sub x27, x27, #0x8\n"
- "ldr d3, [x24], #0x8\n"
- "ldr d4, [x23], #0x8\n"
+ "ldr d3, [x23], #0x8\n"
+ "cmp x26, #0x8\n"
+ "ldr d4, [x22], #0x8\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
- "cmp x27, #0x8\n"
- "ldr d5, [x22], #0x8\n"
- "ldr d7, [x21], #0x8\n"
+ "ldr d5, [x21], #0x8\n"
+ "ldr d7, [x20], #0x8\n"
"trn1 v4.2d, v5.2d, v7.2d\n"
- "ldr q6, [x9, #0x0]\n"
- "ldr q7, [x9, #0x10]\n"
+ "ldr q6, [x28, #0x0]\n"
+ "ldr q7, [x28, #0x10]\n"
".inst 0x4e86a408 // smmla v8.4s, v0.16b, v6.16b\n"
".inst 0x4e86a450 // smmla v16.4s, v2.16b, v6.16b\n"
".inst 0x4e86a498 // smmla v24.4s, v4.16b, v6.16b\n"
- "ldr q6, [x9, #0x20]\n"
+ "ldr q6, [x28, #0x20]\n"
".inst 0x4e87a40c // smmla v12.4s, v0.16b, v7.16b\n"
".inst 0x4e87a454 // smmla v20.4s, v2.16b, v7.16b\n"
".inst 0x4e87a49c // smmla v28.4s, v4.16b, v7.16b\n"
- "ldr q7, [x9, #0x30]\n"
+ "ldr q7, [x28, #0x30]\n"
".inst 0x4e86a409 // smmla v9.4s, v0.16b, v6.16b\n"
".inst 0x4e86a451 // smmla v17.4s, v2.16b, v6.16b\n"
".inst 0x4e86a499 // smmla v25.4s, v4.16b, v6.16b\n"
- "ldr q6, [x9, #0x40]\n"
+ "ldr q6, [x28, #0x40]\n"
".inst 0x4e87a40d // smmla v13.4s, v0.16b, v7.16b\n"
".inst 0x4e87a455 // smmla v21.4s, v2.16b, v7.16b\n"
".inst 0x4e87a49d // smmla v29.4s, v4.16b, v7.16b\n"
- "ldr q7, [x9, #0x50]\n"
+ "ldr q7, [x28, #0x50]\n"
".inst 0x4e86a40a // smmla v10.4s, v0.16b, v6.16b\n"
".inst 0x4e86a452 // smmla v18.4s, v2.16b, v6.16b\n"
".inst 0x4e86a49a // smmla v26.4s, v4.16b, v6.16b\n"
- "ldr q6, [x9, #0x60]\n"
+ "ldr q6, [x28, #0x60]\n"
".inst 0x4e87a40e // smmla v14.4s, v0.16b, v7.16b\n"
".inst 0x4e87a456 // smmla v22.4s, v2.16b, v7.16b\n"
".inst 0x4e87a49e // smmla v30.4s, v4.16b, v7.16b\n"
- "ldr q7, [x9, #0x70]\n"
- "add x9, x9, #0x80\n"
+ "ldr q7, [x28, #0x70]\n"
+ "add x28, x28, #0x80\n"
".inst 0x4e86a40b // smmla v11.4s, v0.16b, v6.16b\n"
".inst 0x4e86a453 // smmla v19.4s, v2.16b, v6.16b\n"
".inst 0x4e86a49b // smmla v27.4s, v4.16b, v6.16b\n"
@@ -3066,136 +3078,136 @@ void a64_hybrid_s8qs_mmla_6x16 (
".inst 0x4e87a457 // smmla v23.4s, v2.16b, v7.16b\n"
".inst 0x4e87a49f // smmla v31.4s, v4.16b, v7.16b\n"
"bge 155b\n"
+ "cbz x26, 161f\n"
"156:" // Height 6: Multiply loop: Skip odd blocks
- "cbz x27, 161f\n"
- "tbz x27, #2, 158f\n"
- "ldr s1, [x26], #0x4\n"
- "ldr s2, [x25], #0x4\n"
- "ldr s3, [x24], #0x4\n"
- "ldr s4, [x23], #0x4\n"
- "ldr s5, [x22], #0x4\n"
- "ldr s6, [x21], #0x4\n"
- "tbz x27, #1, 157f\n"
- "ld1 { v1.h }[2], [x26], #0x2\n"
- "ld1 { v2.h }[2], [x25], #0x2\n"
- "ld1 { v3.h }[2], [x24], #0x2\n"
- "ld1 { v4.h }[2], [x23], #0x2\n"
- "ld1 { v5.h }[2], [x22], #0x2\n"
- "ld1 { v6.h }[2], [x21], #0x2\n"
- "tbz x27, #0, 160f\n"
- "ld1 { v1.b }[6], [x26]\n"
- "ld1 { v2.b }[6], [x25]\n"
- "ld1 { v3.b }[6], [x24]\n"
- "ld1 { v4.b }[6], [x23]\n"
- "ld1 { v5.b }[6], [x22]\n"
- "ld1 { v6.b }[6], [x21]\n"
+ "tbz x26, #2, 158f\n"
+ "ldr s1, [x25], #0x4\n"
+ "ldr s2, [x24], #0x4\n"
+ "ldr s3, [x23], #0x4\n"
+ "ldr s4, [x22], #0x4\n"
+ "ldr s5, [x21], #0x4\n"
+ "ldr s6, [x20], #0x4\n"
+ "tbz x26, #1, 157f\n"
+ "ld1 { v1.h }[2], [x25], #0x2\n"
+ "ld1 { v2.h }[2], [x24], #0x2\n"
+ "ld1 { v3.h }[2], [x23], #0x2\n"
+ "ld1 { v4.h }[2], [x22], #0x2\n"
+ "ld1 { v5.h }[2], [x21], #0x2\n"
+ "ld1 { v6.h }[2], [x20], #0x2\n"
+ "tbz x26, #0, 160f\n"
+ "ld1 { v1.b }[6], [x25]\n"
+ "ld1 { v2.b }[6], [x24]\n"
+ "ld1 { v3.b }[6], [x23]\n"
+ "ld1 { v4.b }[6], [x22]\n"
+ "ld1 { v5.b }[6], [x21]\n"
+ "ld1 { v6.b }[6], [x20]\n"
"b 160f\n"
"157:" // Height 6: Multiply loop: Ragged operand read: partial_1_4
- "tbz x27, #0, 160f\n"
- "ld1 { v1.b }[4], [x26]\n"
- "ld1 { v2.b }[4], [x25]\n"
- "ld1 { v3.b }[4], [x24]\n"
- "ld1 { v4.b }[4], [x23]\n"
- "ld1 { v5.b }[4], [x22]\n"
- "ld1 { v6.b }[4], [x21]\n"
+ "tbz x26, #0, 160f\n"
+ "ld1 { v1.b }[4], [x25]\n"
+ "ld1 { v2.b }[4], [x24]\n"
+ "ld1 { v3.b }[4], [x23]\n"
+ "ld1 { v4.b }[4], [x22]\n"
+ "ld1 { v5.b }[4], [x21]\n"
+ "ld1 { v6.b }[4], [x20]\n"
"b 160f\n"
"158:" // Height 6: Multiply loop: Ragged operand read: partial_2_0
- "tbz x27, #1, 159f\n"
- "ldr h1, [x26], #0x2\n"
- "ldr h2, [x25], #0x2\n"
- "ldr h3, [x24], #0x2\n"
- "ldr h4, [x23], #0x2\n"
- "ldr h5, [x22], #0x2\n"
- "ldr h6, [x21], #0x2\n"
- "tbz x27, #0, 160f\n"
- "ld1 { v1.b }[2], [x26]\n"
- "ld1 { v2.b }[2], [x25]\n"
- "ld1 { v3.b }[2], [x24]\n"
- "ld1 { v4.b }[2], [x23]\n"
- "ld1 { v5.b }[2], [x22]\n"
- "ld1 { v6.b }[2], [x21]\n"
+ "tbz x26, #1, 159f\n"
+ "ldr h1, [x25], #0x2\n"
+ "ldr h2, [x24], #0x2\n"
+ "ldr h3, [x23], #0x2\n"
+ "ldr h4, [x22], #0x2\n"
+ "ldr h5, [x21], #0x2\n"
+ "ldr h6, [x20], #0x2\n"
+ "tbz x26, #0, 160f\n"
+ "ld1 { v1.b }[2], [x25]\n"
+ "ld1 { v2.b }[2], [x24]\n"
+ "ld1 { v3.b }[2], [x23]\n"
+ "ld1 { v4.b }[2], [x22]\n"
+ "ld1 { v5.b }[2], [x21]\n"
+ "ld1 { v6.b }[2], [x20]\n"
"b 160f\n"
"159:" // Height 6: Multiply loop: Ragged operand read: partial_1_0
- "ldr b1, [x26, #0x0]\n"
- "ldr b2, [x25, #0x0]\n"
- "ldr b3, [x24, #0x0]\n"
- "ldr b4, [x23, #0x0]\n"
- "ldr b5, [x22, #0x0]\n"
- "ldr b6, [x21, #0x0]\n"
+ "ldr b1, [x25, #0x0]\n"
+ "ldr b2, [x24, #0x0]\n"
+ "ldr b3, [x23, #0x0]\n"
+ "ldr b4, [x22, #0x0]\n"
+ "ldr b5, [x21, #0x0]\n"
+ "ldr b6, [x20, #0x0]\n"
"160:" // Height 6: Multiply loop: Ragged operand read: Done
- "ldr q7, [x9, #0x0]\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
+ "ldr q7, [x28, #0x0]\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
- ".inst 0x4e87a408 // smmla v8.4s, v0.16b, v7.16b\n"
"trn1 v4.2d, v5.2d, v6.2d\n"
- "ldr q6, [x9, #0x10]\n"
+ "ldr q6, [x28, #0x10]\n"
+ ".inst 0x4e87a408 // smmla v8.4s, v0.16b, v7.16b\n"
".inst 0x4e87a450 // smmla v16.4s, v2.16b, v7.16b\n"
".inst 0x4e87a498 // smmla v24.4s, v4.16b, v7.16b\n"
- "ldr q7, [x9, #0x20]\n"
+ "ldr q7, [x28, #0x20]\n"
".inst 0x4e86a40c // smmla v12.4s, v0.16b, v6.16b\n"
".inst 0x4e86a454 // smmla v20.4s, v2.16b, v6.16b\n"
".inst 0x4e86a49c // smmla v28.4s, v4.16b, v6.16b\n"
- "ldr q6, [x9, #0x30]\n"
+ "ldr q6, [x28, #0x30]\n"
".inst 0x4e87a409 // smmla v9.4s, v0.16b, v7.16b\n"
".inst 0x4e87a451 // smmla v17.4s, v2.16b, v7.16b\n"
".inst 0x4e87a499 // smmla v25.4s, v4.16b, v7.16b\n"
- "ldr q7, [x9, #0x40]\n"
+ "ldr q7, [x28, #0x40]\n"
".inst 0x4e86a40d // smmla v13.4s, v0.16b, v6.16b\n"
".inst 0x4e86a455 // smmla v21.4s, v2.16b, v6.16b\n"
".inst 0x4e86a49d // smmla v29.4s, v4.16b, v6.16b\n"
- "ldr q6, [x9, #0x50]\n"
+ "ldr q6, [x28, #0x50]\n"
".inst 0x4e87a40a // smmla v10.4s, v0.16b, v7.16b\n"
".inst 0x4e87a452 // smmla v18.4s, v2.16b, v7.16b\n"
".inst 0x4e87a49a // smmla v26.4s, v4.16b, v7.16b\n"
- "ldr q7, [x9, #0x60]\n"
+ "ldr q7, [x28, #0x60]\n"
".inst 0x4e86a40e // smmla v14.4s, v0.16b, v6.16b\n"
".inst 0x4e86a456 // smmla v22.4s, v2.16b, v6.16b\n"
".inst 0x4e86a49e // smmla v30.4s, v4.16b, v6.16b\n"
- "ldr q6, [x9, #0x70]\n"
+ "ldr q6, [x28, #0x70]\n"
+ "add x28, x28, #0x80\n"
".inst 0x4e87a40b // smmla v11.4s, v0.16b, v7.16b\n"
- "add x9, x9, #0x80\n"
".inst 0x4e87a453 // smmla v19.4s, v2.16b, v7.16b\n"
".inst 0x4e87a49b // smmla v27.4s, v4.16b, v7.16b\n"
".inst 0x4e86a40f // smmla v15.4s, v0.16b, v6.16b\n"
".inst 0x4e86a457 // smmla v23.4s, v2.16b, v6.16b\n"
".inst 0x4e86a49f // smmla v31.4s, v4.16b, v6.16b\n"
"161:" // Height 6: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 149b\n"
- "ldr q0, [x14, #0x0]\n"
- "ldr q1, [x14, #0x10]\n"
"uzp1 v7.2d, v8.2d, v12.2d\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp2 v8.2d, v8.2d, v12.2d\n"
- "ldr q2, [x14, #0x20]\n"
- "ldr q3, [x14, #0x30]\n"
+ "prfm pstl1keep, [x9, #0x0]\n"
+ "add x23, x9, x19\n"
"uzp1 v12.2d, v9.2d, v13.2d\n"
+ "ldr q0, [x11, #0x0]\n"
"uzp2 v9.2d, v9.2d, v13.2d\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x11, x20\n"
- "add x23, x24, x20\n"
+ "prfm pstl1keep, [x23, #0x0]\n"
+ "add x22, x23, x19\n"
"uzp1 v13.2d, v10.2d, v14.2d\n"
+ "prfm pstl1keep, [x22, #0x0]\n"
+ "add x21, x22, x19\n"
"uzp2 v10.2d, v10.2d, v14.2d\n"
+ "prfm pstl1keep, [x21, #0x0]\n"
+ "add x20, x21, x19\n"
"uzp1 v14.2d, v11.2d, v15.2d\n"
- "add x22, x23, x20\n"
- "add x21, x22, x20\n"
+ "prfm pstl1keep, [x20, #0x0]\n"
+ "add x19, x20, x19\n"
"uzp2 v11.2d, v11.2d, v15.2d\n"
+ "prfm pstl1keep, [x19, #0x0]\n"
"uzp1 v15.2d, v16.2d, v20.2d\n"
- "add x20, x21, x20\n"
- "prfm pstl1keep, [x11, #0x0]\n"
+ "ldr q1, [x11, #0x10]\n"
"uzp2 v16.2d, v16.2d, v20.2d\n"
+ "ldr q2, [x11, #0x20]\n"
"uzp1 v20.2d, v17.2d, v21.2d\n"
- "prfm pstl1keep, [x24, #0x0]\n"
- "prfm pstl1keep, [x23, #0x0]\n"
+ "ldr q3, [x11, #0x30]\n"
+ "add x11, x11, #0x40\n"
"uzp2 v17.2d, v17.2d, v21.2d\n"
"uzp1 v21.2d, v18.2d, v22.2d\n"
- "prfm pstl1keep, [x22, #0x0]\n"
- "prfm pstl1keep, [x21, #0x0]\n"
"uzp2 v18.2d, v18.2d, v22.2d\n"
"uzp1 v22.2d, v19.2d, v23.2d\n"
- "prfm pstl1keep, [x20, #0x0]\n"
- "add x14, x14, #0x40\n"
"uzp2 v19.2d, v19.2d, v23.2d\n"
"uzp1 v23.2d, v24.2d, v28.2d\n"
"uzp2 v24.2d, v24.2d, v28.2d\n"
@@ -3238,20 +3250,20 @@ void a64_hybrid_s8qs_mmla_6x16 (
"ldr q2, [x12, #0x20]\n"
"ldr q6, [x13, #0x20]\n"
"ldr q3, [x12, #0x30]\n"
- "ldr q7, [x13, #0x30]\n"
"add x12, x12, #0x40\n"
+ "ldr q7, [x13, #0x30]\n"
"add x13, x13, #0x40\n"
"b 163f\n"
"162:" // Height 6: per layer parameters
- "add x25, %x[qp], %[per_layer_right_shift]\n"
- "ld1r { v0.4s }, [x25]\n"
- "add x25, %x[qp], %[per_layer_mul]\n"
- "ld1r { v4.4s }, [x25]\n"
+ "add x24, %x[qp], %[per_layer_right_shift]\n"
+ "ld1r { v0.4s }, [x24]\n"
"mov v1.16b, v0.16b\n"
- "mov v5.16b, v4.16b\n"
+ "add x24, %x[qp], %[per_layer_mul]\n"
+ "ld1r { v4.4s }, [x24]\n"
"mov v2.16b, v0.16b\n"
- "mov v6.16b, v4.16b\n"
"mov v3.16b, v0.16b\n"
+ "mov v5.16b, v4.16b\n"
+ "mov v6.16b, v4.16b\n"
"mov v7.16b, v4.16b\n"
"163:" // Height 6: parameters loaded
"sqrdmulh v31.4s, v31.4s, v4.4s\n"
@@ -3282,183 +3294,183 @@ void a64_hybrid_s8qs_mmla_6x16 (
"and v4.16b, v31.16b, v0.16b\n"
"and v5.16b, v12.16b, v1.16b\n"
"and v6.16b, v13.16b, v2.16b\n"
- "and v7.16b, v14.16b, v3.16b\n"
"sshr v4.4s, v4.4s, #0x1f\n"
"sshr v5.4s, v5.4s, #0x1f\n"
"sshr v6.4s, v6.4s, #0x1f\n"
- "sshr v7.4s, v7.4s, #0x1f\n"
"sqadd v31.4s, v31.4s, v4.4s\n"
"sqadd v12.4s, v12.4s, v5.4s\n"
"sqadd v13.4s, v13.4s, v6.4s\n"
- "sqadd v14.4s, v14.4s, v7.4s\n"
+ "and v7.16b, v14.16b, v3.16b\n"
"and v4.16b, v8.16b, v0.16b\n"
"and v5.16b, v9.16b, v1.16b\n"
- "and v6.16b, v10.16b, v2.16b\n"
- "and v7.16b, v11.16b, v3.16b\n"
+ "sshr v7.4s, v7.4s, #0x1f\n"
"sshr v4.4s, v4.4s, #0x1f\n"
"sshr v5.4s, v5.4s, #0x1f\n"
- "sshr v6.4s, v6.4s, #0x1f\n"
- "sshr v7.4s, v7.4s, #0x1f\n"
+ "sqadd v14.4s, v14.4s, v7.4s\n"
"sqadd v8.4s, v8.4s, v4.4s\n"
"sqadd v9.4s, v9.4s, v5.4s\n"
+ "and v6.16b, v10.16b, v2.16b\n"
+ "and v7.16b, v11.16b, v3.16b\n"
+ "and v4.16b, v15.16b, v0.16b\n"
+ "sshr v6.4s, v6.4s, #0x1f\n"
+ "sshr v7.4s, v7.4s, #0x1f\n"
+ "sshr v4.4s, v4.4s, #0x1f\n"
"sqadd v10.4s, v10.4s, v6.4s\n"
"sqadd v11.4s, v11.4s, v7.4s\n"
- "and v4.16b, v15.16b, v0.16b\n"
+ "sqadd v15.4s, v15.4s, v4.4s\n"
"and v5.16b, v20.16b, v1.16b\n"
"and v6.16b, v21.16b, v2.16b\n"
"and v7.16b, v22.16b, v3.16b\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
"sshr v5.4s, v5.4s, #0x1f\n"
"sshr v6.4s, v6.4s, #0x1f\n"
"sshr v7.4s, v7.4s, #0x1f\n"
- "sqadd v15.4s, v15.4s, v4.4s\n"
"sqadd v20.4s, v20.4s, v5.4s\n"
"sqadd v21.4s, v21.4s, v6.4s\n"
"sqadd v22.4s, v22.4s, v7.4s\n"
"and v4.16b, v16.16b, v0.16b\n"
"and v5.16b, v17.16b, v1.16b\n"
"and v6.16b, v18.16b, v2.16b\n"
- "and v7.16b, v19.16b, v3.16b\n"
"sshr v4.4s, v4.4s, #0x1f\n"
"sshr v5.4s, v5.4s, #0x1f\n"
"sshr v6.4s, v6.4s, #0x1f\n"
- "sshr v7.4s, v7.4s, #0x1f\n"
"sqadd v16.4s, v16.4s, v4.4s\n"
"sqadd v17.4s, v17.4s, v5.4s\n"
"sqadd v18.4s, v18.4s, v6.4s\n"
- "sqadd v19.4s, v19.4s, v7.4s\n"
+ "and v7.16b, v19.16b, v3.16b\n"
"and v4.16b, v23.16b, v0.16b\n"
"and v5.16b, v28.16b, v1.16b\n"
- "and v6.16b, v29.16b, v2.16b\n"
- "and v7.16b, v30.16b, v3.16b\n"
+ "sshr v7.4s, v7.4s, #0x1f\n"
"sshr v4.4s, v4.4s, #0x1f\n"
"sshr v5.4s, v5.4s, #0x1f\n"
- "sshr v6.4s, v6.4s, #0x1f\n"
- "sshr v7.4s, v7.4s, #0x1f\n"
+ "sqadd v19.4s, v19.4s, v7.4s\n"
"sqadd v23.4s, v23.4s, v4.4s\n"
"sqadd v28.4s, v28.4s, v5.4s\n"
+ "and v6.16b, v29.16b, v2.16b\n"
+ "and v7.16b, v30.16b, v3.16b\n"
+ "and v4.16b, v24.16b, v0.16b\n"
+ "sshr v6.4s, v6.4s, #0x1f\n"
+ "sshr v7.4s, v7.4s, #0x1f\n"
+ "sshr v4.4s, v4.4s, #0x1f\n"
"sqadd v29.4s, v29.4s, v6.4s\n"
"sqadd v30.4s, v30.4s, v7.4s\n"
- "and v4.16b, v24.16b, v0.16b\n"
+ "sqadd v24.4s, v24.4s, v4.4s\n"
"and v5.16b, v25.16b, v1.16b\n"
"and v6.16b, v26.16b, v2.16b\n"
"and v7.16b, v27.16b, v3.16b\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
"sshr v5.4s, v5.4s, #0x1f\n"
"sshr v6.4s, v6.4s, #0x1f\n"
"sshr v7.4s, v7.4s, #0x1f\n"
- "sqadd v24.4s, v24.4s, v4.4s\n"
"sqadd v25.4s, v25.4s, v5.4s\n"
"sqadd v26.4s, v26.4s, v6.4s\n"
"sqadd v27.4s, v27.4s, v7.4s\n"
"164:" // Height 6: no shift correction
- "add x25, %x[qp], %[c_offset]\n"
- "ld1r { v4.4s }, [x25]\n"
"srshl v31.4s, v31.4s, v0.4s\n"
+ "add x24, %x[qp], %[c_offset]\n"
+ "ld1r { v4.4s }, [x24]\n"
"srshl v12.4s, v12.4s, v1.4s\n"
+ "add x24, %x[qp], %[minval]\n"
"srshl v13.4s, v13.4s, v2.4s\n"
+ "ld1r { v5.4s }, [x24]\n"
+ "add x24, %x[qp], %[maxval]\n"
"srshl v14.4s, v14.4s, v3.4s\n"
- "add x25, %x[qp], %[maxval]\n"
- "ld1r { v6.4s }, [x25]\n"
+ "ld1r { v6.4s }, [x24]\n"
+ "cmp x10, #0x10\n"
"srshl v8.4s, v8.4s, v0.4s\n"
"srshl v9.4s, v9.4s, v1.4s\n"
- "add x25, %x[qp], %[minval]\n"
- "ld1r { v5.4s }, [x25]\n"
- "srshl v10.4s, v10.4s, v2.4s\n"
- "srshl v11.4s, v11.4s, v3.4s\n"
- "cmp x10, #0x10\n"
- "srshl v15.4s, v15.4s, v0.4s\n"
- "srshl v20.4s, v20.4s, v1.4s\n"
- "srshl v21.4s, v21.4s, v2.4s\n"
- "srshl v22.4s, v22.4s, v3.4s\n"
- "srshl v16.4s, v16.4s, v0.4s\n"
- "srshl v17.4s, v17.4s, v1.4s\n"
- "srshl v18.4s, v18.4s, v2.4s\n"
- "srshl v19.4s, v19.4s, v3.4s\n"
- "srshl v23.4s, v23.4s, v0.4s\n"
- "srshl v28.4s, v28.4s, v1.4s\n"
- "srshl v29.4s, v29.4s, v2.4s\n"
- "srshl v30.4s, v30.4s, v3.4s\n"
- "srshl v24.4s, v24.4s, v0.4s\n"
- "srshl v25.4s, v25.4s, v1.4s\n"
- "srshl v26.4s, v26.4s, v2.4s\n"
- "srshl v27.4s, v27.4s, v3.4s\n"
"add v31.4s, v31.4s, v4.4s\n"
"add v12.4s, v12.4s, v4.4s\n"
"add v13.4s, v13.4s, v4.4s\n"
- "add v14.4s, v14.4s, v4.4s\n"
- "add v8.4s, v8.4s, v4.4s\n"
- "add v9.4s, v9.4s, v4.4s\n"
- "add v10.4s, v10.4s, v4.4s\n"
- "add v11.4s, v11.4s, v4.4s\n"
- "add v15.4s, v15.4s, v4.4s\n"
- "add v20.4s, v20.4s, v4.4s\n"
- "add v21.4s, v21.4s, v4.4s\n"
- "add v22.4s, v22.4s, v4.4s\n"
- "add v16.4s, v16.4s, v4.4s\n"
- "add v17.4s, v17.4s, v4.4s\n"
- "add v18.4s, v18.4s, v4.4s\n"
- "add v19.4s, v19.4s, v4.4s\n"
- "add v23.4s, v23.4s, v4.4s\n"
- "add v28.4s, v28.4s, v4.4s\n"
- "add v29.4s, v29.4s, v4.4s\n"
- "add v30.4s, v30.4s, v4.4s\n"
- "add v24.4s, v24.4s, v4.4s\n"
- "add v25.4s, v25.4s, v4.4s\n"
- "add v26.4s, v26.4s, v4.4s\n"
- "add v27.4s, v27.4s, v4.4s\n"
"smin v31.4s, v31.4s, v6.4s\n"
"smin v12.4s, v12.4s, v6.4s\n"
"smin v13.4s, v13.4s, v6.4s\n"
- "smin v14.4s, v14.4s, v6.4s\n"
- "smin v8.4s, v8.4s, v6.4s\n"
- "smin v9.4s, v9.4s, v6.4s\n"
- "smin v10.4s, v10.4s, v6.4s\n"
- "smin v11.4s, v11.4s, v6.4s\n"
- "smin v15.4s, v15.4s, v6.4s\n"
- "smin v20.4s, v20.4s, v6.4s\n"
- "smin v21.4s, v21.4s, v6.4s\n"
- "smin v22.4s, v22.4s, v6.4s\n"
- "smin v16.4s, v16.4s, v6.4s\n"
- "smin v17.4s, v17.4s, v6.4s\n"
- "smin v18.4s, v18.4s, v6.4s\n"
- "smin v19.4s, v19.4s, v6.4s\n"
- "smin v23.4s, v23.4s, v6.4s\n"
- "smin v28.4s, v28.4s, v6.4s\n"
- "smin v29.4s, v29.4s, v6.4s\n"
- "smin v30.4s, v30.4s, v6.4s\n"
- "smin v24.4s, v24.4s, v6.4s\n"
- "smin v25.4s, v25.4s, v6.4s\n"
- "smin v26.4s, v26.4s, v6.4s\n"
- "smin v27.4s, v27.4s, v6.4s\n"
"smax v31.4s, v31.4s, v5.4s\n"
"smax v12.4s, v12.4s, v5.4s\n"
"smax v13.4s, v13.4s, v5.4s\n"
+ "add v14.4s, v14.4s, v4.4s\n"
+ "add v8.4s, v8.4s, v4.4s\n"
+ "add v9.4s, v9.4s, v4.4s\n"
+ "smin v14.4s, v14.4s, v6.4s\n"
+ "smin v8.4s, v8.4s, v6.4s\n"
+ "smin v9.4s, v9.4s, v6.4s\n"
"smax v14.4s, v14.4s, v5.4s\n"
"smax v8.4s, v8.4s, v5.4s\n"
"smax v9.4s, v9.4s, v5.4s\n"
+ "srshl v10.4s, v10.4s, v2.4s\n"
+ "srshl v11.4s, v11.4s, v3.4s\n"
+ "srshl v15.4s, v15.4s, v0.4s\n"
+ "srshl v20.4s, v20.4s, v1.4s\n"
+ "add v10.4s, v10.4s, v4.4s\n"
+ "add v11.4s, v11.4s, v4.4s\n"
+ "add v15.4s, v15.4s, v4.4s\n"
+ "smin v10.4s, v10.4s, v6.4s\n"
+ "smin v11.4s, v11.4s, v6.4s\n"
+ "smin v15.4s, v15.4s, v6.4s\n"
"smax v10.4s, v10.4s, v5.4s\n"
"smax v11.4s, v11.4s, v5.4s\n"
"smax v15.4s, v15.4s, v5.4s\n"
+ "add v20.4s, v20.4s, v4.4s\n"
+ "srshl v21.4s, v21.4s, v2.4s\n"
+ "srshl v22.4s, v22.4s, v3.4s\n"
+ "smin v20.4s, v20.4s, v6.4s\n"
+ "srshl v16.4s, v16.4s, v0.4s\n"
+ "add v21.4s, v21.4s, v4.4s\n"
"smax v20.4s, v20.4s, v5.4s\n"
+ "add v22.4s, v22.4s, v4.4s\n"
+ "smin v21.4s, v21.4s, v6.4s\n"
+ "add v16.4s, v16.4s, v4.4s\n"
+ "smin v22.4s, v22.4s, v6.4s\n"
"smax v21.4s, v21.4s, v5.4s\n"
+ "smin v16.4s, v16.4s, v6.4s\n"
"smax v22.4s, v22.4s, v5.4s\n"
+ "srshl v17.4s, v17.4s, v1.4s\n"
"smax v16.4s, v16.4s, v5.4s\n"
+ "srshl v18.4s, v18.4s, v2.4s\n"
+ "srshl v19.4s, v19.4s, v3.4s\n"
+ "add v17.4s, v17.4s, v4.4s\n"
+ "srshl v23.4s, v23.4s, v0.4s\n"
+ "add v18.4s, v18.4s, v4.4s\n"
+ "smin v17.4s, v17.4s, v6.4s\n"
+ "add v19.4s, v19.4s, v4.4s\n"
+ "smin v18.4s, v18.4s, v6.4s\n"
"smax v17.4s, v17.4s, v5.4s\n"
+ "smin v19.4s, v19.4s, v6.4s\n"
"smax v18.4s, v18.4s, v5.4s\n"
+ "add v23.4s, v23.4s, v4.4s\n"
"smax v19.4s, v19.4s, v5.4s\n"
+ "srshl v28.4s, v28.4s, v1.4s\n"
+ "smin v23.4s, v23.4s, v6.4s\n"
+ "srshl v29.4s, v29.4s, v2.4s\n"
+ "srshl v30.4s, v30.4s, v3.4s\n"
"smax v23.4s, v23.4s, v5.4s\n"
+ "add v28.4s, v28.4s, v4.4s\n"
+ "add v29.4s, v29.4s, v4.4s\n"
+ "add v30.4s, v30.4s, v4.4s\n"
+ "smin v28.4s, v28.4s, v6.4s\n"
+ "smin v29.4s, v29.4s, v6.4s\n"
+ "smin v30.4s, v30.4s, v6.4s\n"
"smax v28.4s, v28.4s, v5.4s\n"
"smax v29.4s, v29.4s, v5.4s\n"
"smax v30.4s, v30.4s, v5.4s\n"
+ "srshl v24.4s, v24.4s, v0.4s\n"
+ "srshl v25.4s, v25.4s, v1.4s\n"
+ "srshl v26.4s, v26.4s, v2.4s\n"
+ "srshl v27.4s, v27.4s, v3.4s\n"
+ "add v24.4s, v24.4s, v4.4s\n"
+ "add v25.4s, v25.4s, v4.4s\n"
+ "add v26.4s, v26.4s, v4.4s\n"
+ "smin v24.4s, v24.4s, v6.4s\n"
+ "smin v25.4s, v25.4s, v6.4s\n"
+ "smin v26.4s, v26.4s, v6.4s\n"
"smax v24.4s, v24.4s, v5.4s\n"
"smax v25.4s, v25.4s, v5.4s\n"
"smax v26.4s, v26.4s, v5.4s\n"
- "smax v27.4s, v27.4s, v5.4s\n"
+ "add v27.4s, v27.4s, v4.4s\n"
"uzp1 v31.8h, v31.8h, v12.8h\n"
"uzp1 v12.8h, v13.8h, v14.8h\n"
+ "smin v27.4s, v27.4s, v6.4s\n"
"uzp1 v8.8h, v8.8h, v9.8h\n"
"uzp1 v9.8h, v10.8h, v11.8h\n"
+ "smax v27.4s, v27.4s, v5.4s\n"
"uzp1 v15.8h, v15.8h, v20.8h\n"
"uzp1 v20.8h, v21.8h, v22.8h\n"
"uzp1 v16.8h, v16.8h, v17.8h\n"
@@ -3475,152 +3487,152 @@ void a64_hybrid_s8qs_mmla_6x16 (
"uzp1 v24.16b, v24.16b, v25.16b\n"
"bge 173f\n"
"tbz x10, #3, 168f\n"
- "str d31, [x11], #0x8\n"
- "str d8, [x24], #0x8\n"
- "str d15, [x23], #0x8\n"
- "str d16, [x22], #0x8\n"
- "str d23, [x21], #0x8\n"
- "str d24, [x20], #0x8\n"
+ "str d31, [x9], #0x8\n"
+ "str d8, [x23], #0x8\n"
+ "str d15, [x22], #0x8\n"
+ "str d16, [x21], #0x8\n"
+ "str d23, [x20], #0x8\n"
+ "str d24, [x19], #0x8\n"
"tbz x10, #2, 166f\n"
- "st1 { v31.s }[2], [x11], #0x4\n"
- "st1 { v8.s }[2], [x24], #0x4\n"
- "st1 { v15.s }[2], [x23], #0x4\n"
- "st1 { v16.s }[2], [x22], #0x4\n"
- "st1 { v23.s }[2], [x21], #0x4\n"
- "st1 { v24.s }[2], [x20], #0x4\n"
+ "st1 { v31.s }[2], [x9], #0x4\n"
+ "st1 { v8.s }[2], [x23], #0x4\n"
+ "st1 { v15.s }[2], [x22], #0x4\n"
+ "st1 { v16.s }[2], [x21], #0x4\n"
+ "st1 { v23.s }[2], [x20], #0x4\n"
+ "st1 { v24.s }[2], [x19], #0x4\n"
"tbz x10, #1, 165f\n"
- "st1 { v31.h }[6], [x11], #0x2\n"
- "st1 { v8.h }[6], [x24], #0x2\n"
- "st1 { v15.h }[6], [x23], #0x2\n"
- "st1 { v16.h }[6], [x22], #0x2\n"
- "st1 { v23.h }[6], [x21], #0x2\n"
- "st1 { v24.h }[6], [x20], #0x2\n"
+ "st1 { v31.h }[6], [x9], #0x2\n"
+ "st1 { v8.h }[6], [x23], #0x2\n"
+ "st1 { v15.h }[6], [x22], #0x2\n"
+ "st1 { v16.h }[6], [x21], #0x2\n"
+ "st1 { v23.h }[6], [x20], #0x2\n"
+ "st1 { v24.h }[6], [x19], #0x2\n"
"tbz x10, #0, 172f\n"
- "st1 { v31.b }[14], [x11]\n"
- "st1 { v8.b }[14], [x24]\n"
- "st1 { v15.b }[14], [x23]\n"
- "st1 { v16.b }[14], [x22]\n"
- "st1 { v23.b }[14], [x21]\n"
- "st1 { v24.b }[14], [x20]\n"
+ "st1 { v31.b }[14], [x9]\n"
+ "st1 { v8.b }[14], [x23]\n"
+ "st1 { v15.b }[14], [x22]\n"
+ "st1 { v16.b }[14], [x21]\n"
+ "st1 { v23.b }[14], [x20]\n"
+ "st1 { v24.b }[14], [x19]\n"
"b 172f\n"
"165:" // Height 6: Partial direct writeback: partial_1_12
"tbz x10, #0, 172f\n"
- "st1 { v31.b }[12], [x11]\n"
- "st1 { v8.b }[12], [x24]\n"
- "st1 { v15.b }[12], [x23]\n"
- "st1 { v16.b }[12], [x22]\n"
- "st1 { v23.b }[12], [x21]\n"
- "st1 { v24.b }[12], [x20]\n"
+ "st1 { v31.b }[12], [x9]\n"
+ "st1 { v8.b }[12], [x23]\n"
+ "st1 { v15.b }[12], [x22]\n"
+ "st1 { v16.b }[12], [x21]\n"
+ "st1 { v23.b }[12], [x20]\n"
+ "st1 { v24.b }[12], [x19]\n"
"b 172f\n"
"166:" // Height 6: Partial direct writeback: partial_2_8
"tbz x10, #1, 167f\n"
- "st1 { v31.h }[4], [x11], #0x2\n"
- "st1 { v8.h }[4], [x24], #0x2\n"
- "st1 { v15.h }[4], [x23], #0x2\n"
- "st1 { v16.h }[4], [x22], #0x2\n"
- "st1 { v23.h }[4], [x21], #0x2\n"
- "st1 { v24.h }[4], [x20], #0x2\n"
+ "st1 { v31.h }[4], [x9], #0x2\n"
+ "st1 { v8.h }[4], [x23], #0x2\n"
+ "st1 { v15.h }[4], [x22], #0x2\n"
+ "st1 { v16.h }[4], [x21], #0x2\n"
+ "st1 { v23.h }[4], [x20], #0x2\n"
+ "st1 { v24.h }[4], [x19], #0x2\n"
"tbz x10, #0, 172f\n"
- "st1 { v31.b }[10], [x11]\n"
- "st1 { v8.b }[10], [x24]\n"
- "st1 { v15.b }[10], [x23]\n"
- "st1 { v16.b }[10], [x22]\n"
- "st1 { v23.b }[10], [x21]\n"
- "st1 { v24.b }[10], [x20]\n"
+ "st1 { v31.b }[10], [x9]\n"
+ "st1 { v8.b }[10], [x23]\n"
+ "st1 { v15.b }[10], [x22]\n"
+ "st1 { v16.b }[10], [x21]\n"
+ "st1 { v23.b }[10], [x20]\n"
+ "st1 { v24.b }[10], [x19]\n"
"b 172f\n"
"167:" // Height 6: Partial direct writeback: partial_1_8
"tbz x10, #0, 172f\n"
- "st1 { v31.b }[8], [x11]\n"
- "st1 { v8.b }[8], [x24]\n"
- "st1 { v15.b }[8], [x23]\n"
- "st1 { v16.b }[8], [x22]\n"
- "st1 { v23.b }[8], [x21]\n"
- "st1 { v24.b }[8], [x20]\n"
+ "st1 { v31.b }[8], [x9]\n"
+ "st1 { v8.b }[8], [x23]\n"
+ "st1 { v15.b }[8], [x22]\n"
+ "st1 { v16.b }[8], [x21]\n"
+ "st1 { v23.b }[8], [x20]\n"
+ "st1 { v24.b }[8], [x19]\n"
"b 172f\n"
"168:" // Height 6: Partial direct writeback: partial_4_0
"tbz x10, #2, 170f\n"
- "str s31, [x11], #0x4\n"
- "str s8, [x24], #0x4\n"
- "str s15, [x23], #0x4\n"
- "str s16, [x22], #0x4\n"
- "str s23, [x21], #0x4\n"
- "str s24, [x20], #0x4\n"
+ "str s31, [x9], #0x4\n"
+ "str s8, [x23], #0x4\n"
+ "str s15, [x22], #0x4\n"
+ "str s16, [x21], #0x4\n"
+ "str s23, [x20], #0x4\n"
+ "str s24, [x19], #0x4\n"
"tbz x10, #1, 169f\n"
- "st1 { v31.h }[2], [x11], #0x2\n"
- "st1 { v8.h }[2], [x24], #0x2\n"
- "st1 { v15.h }[2], [x23], #0x2\n"
- "st1 { v16.h }[2], [x22], #0x2\n"
- "st1 { v23.h }[2], [x21], #0x2\n"
- "st1 { v24.h }[2], [x20], #0x2\n"
+ "st1 { v31.h }[2], [x9], #0x2\n"
+ "st1 { v8.h }[2], [x23], #0x2\n"
+ "st1 { v15.h }[2], [x22], #0x2\n"
+ "st1 { v16.h }[2], [x21], #0x2\n"
+ "st1 { v23.h }[2], [x20], #0x2\n"
+ "st1 { v24.h }[2], [x19], #0x2\n"
"tbz x10, #0, 172f\n"
- "st1 { v31.b }[6], [x11]\n"
- "st1 { v8.b }[6], [x24]\n"
- "st1 { v15.b }[6], [x23]\n"
- "st1 { v16.b }[6], [x22]\n"
- "st1 { v23.b }[6], [x21]\n"
- "st1 { v24.b }[6], [x20]\n"
+ "st1 { v31.b }[6], [x9]\n"
+ "st1 { v8.b }[6], [x23]\n"
+ "st1 { v15.b }[6], [x22]\n"
+ "st1 { v16.b }[6], [x21]\n"
+ "st1 { v23.b }[6], [x20]\n"
+ "st1 { v24.b }[6], [x19]\n"
"b 172f\n"
"169:" // Height 6: Partial direct writeback: partial_1_4
"tbz x10, #0, 172f\n"
- "st1 { v31.b }[4], [x11]\n"
- "st1 { v8.b }[4], [x24]\n"
- "st1 { v15.b }[4], [x23]\n"
- "st1 { v16.b }[4], [x22]\n"
- "st1 { v23.b }[4], [x21]\n"
- "st1 { v24.b }[4], [x20]\n"
+ "st1 { v31.b }[4], [x9]\n"
+ "st1 { v8.b }[4], [x23]\n"
+ "st1 { v15.b }[4], [x22]\n"
+ "st1 { v16.b }[4], [x21]\n"
+ "st1 { v23.b }[4], [x20]\n"
+ "st1 { v24.b }[4], [x19]\n"
"b 172f\n"
"170:" // Height 6: Partial direct writeback: partial_2_0
"tbz x10, #1, 171f\n"
- "str h31, [x11], #0x2\n"
- "str h8, [x24], #0x2\n"
- "str h15, [x23], #0x2\n"
- "str h16, [x22], #0x2\n"
- "str h23, [x21], #0x2\n"
- "str h24, [x20], #0x2\n"
+ "str h31, [x9], #0x2\n"
+ "str h8, [x23], #0x2\n"
+ "str h15, [x22], #0x2\n"
+ "str h16, [x21], #0x2\n"
+ "str h23, [x20], #0x2\n"
+ "str h24, [x19], #0x2\n"
"tbz x10, #0, 172f\n"
- "st1 { v31.b }[2], [x11]\n"
- "st1 { v8.b }[2], [x24]\n"
- "st1 { v15.b }[2], [x23]\n"
- "st1 { v16.b }[2], [x22]\n"
- "st1 { v23.b }[2], [x21]\n"
- "st1 { v24.b }[2], [x20]\n"
+ "st1 { v31.b }[2], [x9]\n"
+ "st1 { v8.b }[2], [x23]\n"
+ "st1 { v15.b }[2], [x22]\n"
+ "st1 { v16.b }[2], [x21]\n"
+ "st1 { v23.b }[2], [x20]\n"
+ "st1 { v24.b }[2], [x19]\n"
"b 172f\n"
"171:" // Height 6: Partial direct writeback: partial_1_0
- "str b31, [x11, #0x0]\n"
- "str b8, [x24, #0x0]\n"
- "str b15, [x23, #0x0]\n"
- "str b16, [x22, #0x0]\n"
- "str b23, [x21, #0x0]\n"
- "str b24, [x20, #0x0]\n"
+ "str b31, [x9, #0x0]\n"
+ "str b8, [x23, #0x0]\n"
+ "str b15, [x22, #0x0]\n"
+ "str b16, [x21, #0x0]\n"
+ "str b23, [x20, #0x0]\n"
+ "str b24, [x19, #0x0]\n"
"172:" // Height 6: Partial direct writeback: Done
"b 174f\n"
"173:" // Height 6: Full writeback
- "str q31, [x11, #0x0]\n"
- "add x11, x11, #0x10\n"
- "str q8, [x24, #0x0]\n"
- "str q15, [x23, #0x0]\n"
- "str q16, [x22, #0x0]\n"
- "str q23, [x21, #0x0]\n"
- "str q24, [x20, #0x0]\n"
+ "str q31, [x9, #0x0]\n"
+ "add x9, x9, #0x10\n"
+ "str q8, [x23, #0x0]\n"
+ "str q15, [x22, #0x0]\n"
+ "str q16, [x21, #0x0]\n"
+ "str q23, [x20, #0x0]\n"
+ "str q24, [x19, #0x0]\n"
"174:" // Height 6: Writeback done
"subs x10, x10, #0x10\n"
"bgt 147b\n"
"subs %x[M], %x[M], #0x6\n"
"beq 176f\n"
- "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 175f\n"
- "add x21, x21, #0x6\n"
- "str x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "add x20, x20, #0x6\n"
+ "str x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"b 1b\n"
"175:" // Update direct input
- "mov x20, #0x6\n"
- "madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
+ "mov x19, #0x6\n"
+ "madd %x[input_ptr], x19, x20, %x[input_ptr]\n"
"b 1b\n"
"176:" // Exit
: [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
: [args_ptr] "r" (&ka), [c_offset] "I" (offsetof(Requantize32, c_offset)), [col_bias] "r" (col_bias), [flags] "r" (flags), [maxval] "I" (offsetof(Requantize32, maxval)), [minval] "I" (offsetof(Requantize32, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_multiplier_ptr] "I" (offsetof(KernelArgs, multiplier_ptr)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_shift_ptr] "I" (offsetof(KernelArgs, shift_ptr)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths)), [per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [qp] "r" (qp)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8s32_dot_6x16/a55.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8s32_dot_6x16/a55.cpp
index 8046b2ebb0..3817785a79 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8s32_dot_6x16/a55.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8s32_dot_6x16/a55.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef __aarch64__
@@ -87,73 +87,73 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
"cmp %x[M], #0x2\n"
"bgt 69f\n"
"beq 35f\n"
- "ldr x8, [%x[args_ptr], %[offsetof_N]]\n"
- "mov x17, %x[output_ptr]\n"
+ "ldr x17, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x16, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x15, %x[output_ptr]\n"
"2:" // Height 1: Column loop
"tbz %x[flags], #0, 12f\n"
- "cmp x8, #0x10\n"
+ "cmp x17, #0x10\n"
"bge 11f\n"
- "tbz x8, #3, 6f\n"
- "ld1 { v8.4s }, [x17], #0x10\n"
- "ld1 { v9.4s }, [x17], #0x10\n"
- "tbz x8, #2, 4f\n"
- "ld1 { v10.4s }, [x17], #0x10\n"
- "tbz x8, #1, 3f\n"
- "ldr d11, [x17], #0x8\n"
- "mov x25, #0x38\n"
- "tbz x8, #0, 10f\n"
- "ld1 { v11.s }[2], [x17]\n"
+ "tbz x17, #3, 6f\n"
+ "ld1 { v8.4s }, [x15], #0x10\n"
+ "ld1 { v9.4s }, [x15], #0x10\n"
+ "tbz x17, #2, 4f\n"
+ "ld1 { v10.4s }, [x15], #0x10\n"
+ "tbz x17, #1, 3f\n"
+ "mov x24, #0x38\n"
+ "ldr d11, [x15], #0x8\n"
+ "tbz x17, #0, 10f\n"
+ "ld1 { v11.s }[2], [x15]\n"
"b 10f\n"
"3:" // Height 1: Partial accumulate: partial_1_12
- "mov x25, #0x30\n"
- "tbz x8, #0, 10f\n"
- "ldr s11, [x17, #0x0]\n"
+ "mov x24, #0x30\n"
+ "tbz x17, #0, 10f\n"
+ "ldr s11, [x15, #0x0]\n"
"b 10f\n"
"4:" // Height 1: Partial accumulate: partial_2_8
- "tbz x8, #1, 5f\n"
- "ldr d10, [x17], #0x8\n"
- "mov x25, #0x28\n"
- "tbz x8, #0, 10f\n"
- "ld1 { v10.s }[2], [x17]\n"
+ "tbz x17, #1, 5f\n"
+ "ldr d10, [x15], #0x8\n"
+ "mov x24, #0x28\n"
+ "tbz x17, #0, 10f\n"
+ "ld1 { v10.s }[2], [x15]\n"
"b 10f\n"
"5:" // Height 1: Partial accumulate: partial_1_8
- "mov x25, #0x20\n"
- "tbz x8, #0, 10f\n"
- "ldr s10, [x17, #0x0]\n"
+ "mov x24, #0x20\n"
+ "tbz x17, #0, 10f\n"
+ "ldr s10, [x15, #0x0]\n"
"b 10f\n"
"6:" // Height 1: Partial accumulate: partial_4_0
- "tbz x8, #2, 8f\n"
- "ld1 { v8.4s }, [x17], #0x10\n"
- "tbz x8, #1, 7f\n"
- "ldr d9, [x17], #0x8\n"
- "mov x25, #0x18\n"
- "tbz x8, #0, 10f\n"
- "ld1 { v9.s }[2], [x17]\n"
+ "tbz x17, #2, 8f\n"
+ "ld1 { v8.4s }, [x15], #0x10\n"
+ "tbz x17, #1, 7f\n"
+ "mov x24, #0x18\n"
+ "ldr d9, [x15], #0x8\n"
+ "tbz x17, #0, 10f\n"
+ "ld1 { v9.s }[2], [x15]\n"
"b 10f\n"
"7:" // Height 1: Partial accumulate: partial_1_4
- "mov x25, #0x10\n"
- "tbz x8, #0, 10f\n"
- "ldr s9, [x17, #0x0]\n"
+ "mov x24, #0x10\n"
+ "tbz x17, #0, 10f\n"
+ "ldr s9, [x15, #0x0]\n"
"b 10f\n"
"8:" // Height 1: Partial accumulate: partial_2_0
- "tbz x8, #1, 9f\n"
- "ldr d8, [x17], #0x8\n"
- "mov x25, #0x8\n"
- "tbz x8, #0, 10f\n"
- "ld1 { v8.s }[2], [x17]\n"
+ "tbz x17, #1, 9f\n"
+ "ldr d8, [x15], #0x8\n"
+ "mov x24, #0x8\n"
+ "tbz x17, #0, 10f\n"
+ "ld1 { v8.s }[2], [x15]\n"
"b 10f\n"
"9:" // Height 1: Partial accumulate: partial_1_0
- "ldr s8, [x17, #0x0]\n"
- "mov x25, #0x0\n"
+ "ldr s8, [x15, #0x0]\n"
+ "mov x24, #0x0\n"
"10:" // Height 1: Partial accumulate: Done
- "sub x17, x17, x25\n"
+ "sub x15, x15, x24\n"
"b 13f\n"
"11:" // Height 1: full accumulate
- "ldr q8, [x17, #0x0]\n"
- "ldr q9, [x17, #0x10]\n"
- "ldr q10, [x17, #0x20]\n"
- "ldr q11, [x17, #0x30]\n"
+ "ldr q8, [x15, #0x0]\n"
+ "ldr q9, [x15, #0x10]\n"
+ "ldr q10, [x15, #0x20]\n"
+ "ldr q11, [x15, #0x30]\n"
"b 13f\n"
"12:" // Height 1: no accumulate
"movi v8.4s, #0x0\n"
@@ -161,109 +161,112 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
"movi v10.4s, #0x0\n"
"movi v11.4s, #0x0\n"
"13:" // Height 1: setup done
- "mov x15, #0x0\n"
+ "mov x14, #0x0\n"
"14:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w14, [x20, x15, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w13, [x20, x14, LSL #0x2]\n"
"tbz %x[flags], #3, 15f\n"
- "ldr x21, [%x[input_ptr], x15, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x13, [x21, #0x0]\n"
- "cbnz x15, 16f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x13, x13, x20\n"
+ "ldr x20, [%x[input_ptr], x14, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x12, [x20, #0x0]\n"
+ "cbnz x14, 16f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x12, x12, x19\n"
"b 16f\n"
"15:" // Height 1: setup direct input
- "mov x13, %x[input_ptr]\n"
+ "mov x12, %x[input_ptr]\n"
"16:" // Height 1: input setup done
- "cmp x14, #0x10\n"
+ "cmp x13, #0x10\n"
"blt 19f\n"
- "ldr q0, [x13, #0x0]\n"
- "cmp x14, #0x20\n"
+ "ldr q0, [x12, #0x0]\n"
"ldr q6, [x16, #0x0]\n"
- "ldr q7, [x16, #0x10]\n"
+ "cmp x13, #0x20\n"
"blt 18f\n"
"17:" // Height 1: Multiply loop: Main loop head
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
+ "ldr d7, [x16, #0x10]\n"
+ "ldr x11, [x16, #0x18]\n"
+ "add x12, x12, #0x10\n"
"ldr d6, [x16, #0x20]\n"
- "ldr x12, [x16, #0x28]\n"
+ "sub x13, x13, #0x10\n"
+ "ldr x10, [x16, #0x28]\n"
+ "cmp x13, #0x20\n"
+ "mov v7.d[1], x11\n"
+ "prfm pldl1keep, [x12, #0x80]\n"
+ "ldr x11, [x16, #0x38]\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
+ "mov v6.d[1], x10\n"
"ldr d7, [x16, #0x30]\n"
- "mov v6.d[1], x12\n"
- "ldr x11, [x16, #0x38]\n"
- "mov v7.d[1], x11\n"
".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n"
"ldr d6, [x16, #0x40]\n"
- "ldr x12, [x16, #0x48]\n"
+ "ldr x10, [x16, #0x48]\n"
+ "mov v7.d[1], x11\n"
+ "ldr x11, [x16, #0x58]\n"
+ "ldr x9, [x12, #0x8]\n"
".inst 0x4f80e0eb // sdot v11.4s, v7.16b, v0.4b[0]\n"
+ "mov v6.d[1], x10\n"
"ldr d7, [x16, #0x50]\n"
- "mov v6.d[1], x12\n"
- "ldr x11, [x16, #0x58]\n"
- "mov v7.d[1], x11\n"
".inst 0x4fa0e0c8 // sdot v8.4s, v6.16b, v0.4b[1]\n"
"ldr d6, [x16, #0x60]\n"
- "ldr x12, [x16, #0x68]\n"
+ "ldr x10, [x16, #0x68]\n"
+ "mov v7.d[1], x11\n"
+ "ldr x11, [x16, #0x78]\n"
".inst 0x4fa0e0e9 // sdot v9.4s, v7.16b, v0.4b[1]\n"
+ "mov v6.d[1], x10\n"
"ldr d7, [x16, #0x70]\n"
- "mov v6.d[1], x12\n"
- "ldr x11, [x16, #0x78]\n"
- "mov v7.d[1], x11\n"
".inst 0x4fa0e0ca // sdot v10.4s, v6.16b, v0.4b[1]\n"
"ldr d6, [x16, #0x80]\n"
- "ldr x12, [x16, #0x88]\n"
+ "ldr x10, [x16, #0x88]\n"
+ "mov v7.d[1], x11\n"
+ "ldr x11, [x16, #0x98]\n"
".inst 0x4fa0e0eb // sdot v11.4s, v7.16b, v0.4b[1]\n"
+ "mov v6.d[1], x10\n"
"ldr d7, [x16, #0x90]\n"
- "mov v6.d[1], x12\n"
- "ldr x11, [x16, #0x98]\n"
- "mov v7.d[1], x11\n"
".inst 0x4f80e8c8 // sdot v8.4s, v6.16b, v0.4b[2]\n"
"ldr d6, [x16, #0xa0]\n"
- "ldr x12, [x16, #0xa8]\n"
+ "ldr x10, [x16, #0xa8]\n"
+ "mov v7.d[1], x11\n"
+ "ldr x11, [x16, #0xb8]\n"
".inst 0x4f80e8e9 // sdot v9.4s, v7.16b, v0.4b[2]\n"
+ "mov v6.d[1], x10\n"
"ldr d7, [x16, #0xb0]\n"
- "mov v6.d[1], x12\n"
- "ldr x11, [x16, #0xb8]\n"
- "mov v7.d[1], x11\n"
".inst 0x4f80e8ca // sdot v10.4s, v6.16b, v0.4b[2]\n"
"ldr d6, [x16, #0xc0]\n"
- "ldr x12, [x16, #0xc8]\n"
+ "ldr x10, [x16, #0xc8]\n"
+ "mov v7.d[1], x11\n"
+ "ldr x11, [x16, #0xd8]\n"
".inst 0x4f80e8eb // sdot v11.4s, v7.16b, v0.4b[2]\n"
+ "mov v6.d[1], x10\n"
"ldr d7, [x16, #0xd0]\n"
- "mov v6.d[1], x12\n"
- "ldr x11, [x16, #0xd8]\n"
- "mov v7.d[1], x11\n"
".inst 0x4fa0e8c8 // sdot v8.4s, v6.16b, v0.4b[3]\n"
"ldr d6, [x16, #0xe0]\n"
- "ldr x12, [x16, #0xe8]\n"
+ "ldr x10, [x16, #0xe8]\n"
+ "mov v7.d[1], x11\n"
+ "ldr x11, [x16, #0xf8]\n"
".inst 0x4fa0e8e9 // sdot v9.4s, v7.16b, v0.4b[3]\n"
+ "mov v6.d[1], x10\n"
"ldr d7, [x16, #0xf0]\n"
- "mov v6.d[1], x12\n"
- "ldr x11, [x16, #0xf8]\n"
- "mov v7.d[1], x11\n"
- "add x13, x13, #0x10\n"
"add x16, x16, #0x100\n"
".inst 0x4fa0e8ca // sdot v10.4s, v6.16b, v0.4b[3]\n"
"ldr d6, [x16, #0x0]\n"
- "ldr x12, [x16, #0x8]\n"
- ".inst 0x4fa0e8eb // sdot v11.4s, v7.16b, v0.4b[3]\n"
- "ldr d0, [x13, #0x0]\n"
- "sub x14, x14, #0x10\n"
- "ldr d7, [x16, #0x10]\n"
- "cmp x14, #0x20\n"
- "ldr x10, [x13, #0x8]\n"
- "mov v6.d[1], x12\n"
- "ldr x11, [x16, #0x18]\n"
- "mov v0.d[1], x10\n"
+ "ldr x10, [x16, #0x8]\n"
"mov v7.d[1], x11\n"
- "prfm pldl1keep, [x13, #0x80]\n"
+ ".inst 0x4fa0e8eb // sdot v11.4s, v7.16b, v0.4b[3]\n"
+ "mov v6.d[1], x10\n"
+ "ldr d0, [x12, #0x0]\n"
+ "mov v0.d[1], x9\n"
"bge 17b\n"
"18:" // Height 1: Multiply loop: Single iteration only
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
+ "ldr q7, [x16, #0x10]\n"
"ldr q6, [x16, #0x20]\n"
+ "sub x13, x13, #0x10\n"
+ "add x12, x12, #0x10\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
- "ldr q7, [x16, #0x30]\n"
+ "prfm pldl1keep, [x12, #0x80]\n"
".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n"
+ "ldr q7, [x16, #0x30]\n"
"ldr q6, [x16, #0x40]\n"
".inst 0x4f80e0eb // sdot v11.4s, v7.16b, v0.4b[0]\n"
"ldr q7, [x16, #0x50]\n"
@@ -287,203 +290,200 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
"ldr q6, [x16, #0xe0]\n"
".inst 0x4fa0e8e9 // sdot v9.4s, v7.16b, v0.4b[3]\n"
"ldr q7, [x16, #0xf0]\n"
- "add x13, x13, #0x10\n"
- "sub x14, x14, #0x10\n"
".inst 0x4fa0e8ca // sdot v10.4s, v6.16b, v0.4b[3]\n"
- "prfm pldl1keep, [x13, #0x80]\n"
- ".inst 0x4fa0e8eb // sdot v11.4s, v7.16b, v0.4b[3]\n"
"add x16, x16, #0x100\n"
+ ".inst 0x4fa0e8eb // sdot v11.4s, v7.16b, v0.4b[3]\n"
"19:" // Height 1: Multiply loop: Main loop skip
- "cbz x14, 24f\n"
- "cmp x14, #0x4\n"
+ "cbz x13, 24f\n"
+ "cmp x13, #0x4\n"
"blt 21f\n"
"20:" // Height 1: Multiply loop: Odd block loop
- "ldr s0, [x13], #0x4\n"
- "sub x14, x14, #0x4\n"
+ "ldr s0, [x12], #0x4\n"
+ "sub x13, x13, #0x4\n"
"ldr q6, [x16, #0x0]\n"
- ".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
+ "cmp x13, #0x4\n"
"ldr q7, [x16, #0x10]\n"
- ".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
+ ".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
"ldr q6, [x16, #0x20]\n"
- "cmp x14, #0x4\n"
+ ".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
"ldr q7, [x16, #0x30]\n"
+ "add x16, x16, #0x40\n"
".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n"
".inst 0x4f80e0eb // sdot v11.4s, v7.16b, v0.4b[0]\n"
- "add x16, x16, #0x40\n"
"bge 20b\n"
+ "cbz x13, 24f\n"
"21:" // Height 1: Multiply loop: Skip odd blocks
- "cbz x14, 24f\n"
- "tbz x14, #1, 22f\n"
- "ldr h0, [x13], #0x2\n"
- "tbz x14, #0, 23f\n"
- "ld1 { v0.b }[2], [x13]\n"
+ "tbz x13, #1, 22f\n"
+ "ldr h0, [x12], #0x2\n"
+ "tbz x13, #0, 23f\n"
+ "ld1 { v0.b }[2], [x12]\n"
"b 23f\n"
"22:" // Height 1: Multiply loop: Ragged operand read: partial_1_0
- "ldr b0, [x13, #0x0]\n"
+ "ldr b0, [x12, #0x0]\n"
"23:" // Height 1: Multiply loop: Ragged operand read: Done
"ldr q6, [x16, #0x0]\n"
- ".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
"ldr q7, [x16, #0x10]\n"
- ".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
+ ".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
"ldr q6, [x16, #0x20]\n"
- ".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n"
+ ".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
"ldr q7, [x16, #0x30]\n"
- ".inst 0x4f80e0eb // sdot v11.4s, v7.16b, v0.4b[0]\n"
"add x16, x16, #0x40\n"
+ ".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n"
+ ".inst 0x4f80e0eb // sdot v11.4s, v7.16b, v0.4b[0]\n"
"24:" // Height 1: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x15, x15, #0x1\n"
- "cmp x15, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x14, x14, #0x1\n"
+ "cmp x14, x19\n"
"bne 14b\n"
- "cmp x8, #0x10\n"
- "prfm pstl1keep, [x17, #0x0]\n"
+ "prfm pstl1keep, [x15, #0x0]\n"
+ "cmp x17, #0x10\n"
"bge 33f\n"
- "tbz x8, #3, 28f\n"
- "st1 { v8.4s }, [x17], #0x10\n"
- "st1 { v9.4s }, [x17], #0x10\n"
- "tbz x8, #2, 26f\n"
- "st1 { v10.4s }, [x17], #0x10\n"
- "tbz x8, #1, 25f\n"
- "str d11, [x17], #0x8\n"
- "tbz x8, #0, 32f\n"
- "st1 { v11.s }[2], [x17]\n"
+ "tbz x17, #3, 28f\n"
+ "st1 { v8.4s }, [x15], #0x10\n"
+ "st1 { v9.4s }, [x15], #0x10\n"
+ "tbz x17, #2, 26f\n"
+ "st1 { v10.4s }, [x15], #0x10\n"
+ "tbz x17, #1, 25f\n"
+ "str d11, [x15], #0x8\n"
+ "tbz x17, #0, 32f\n"
+ "st1 { v11.s }[2], [x15]\n"
"b 32f\n"
"25:" // Height 1: Partial direct writeback: partial_1_12
- "tbz x8, #0, 32f\n"
- "str s11, [x17, #0x0]\n"
+ "tbz x17, #0, 32f\n"
+ "str s11, [x15, #0x0]\n"
"b 32f\n"
"26:" // Height 1: Partial direct writeback: partial_2_8
- "tbz x8, #1, 27f\n"
- "str d10, [x17], #0x8\n"
- "tbz x8, #0, 32f\n"
- "st1 { v10.s }[2], [x17]\n"
+ "tbz x17, #1, 27f\n"
+ "str d10, [x15], #0x8\n"
+ "tbz x17, #0, 32f\n"
+ "st1 { v10.s }[2], [x15]\n"
"b 32f\n"
"27:" // Height 1: Partial direct writeback: partial_1_8
- "tbz x8, #0, 32f\n"
- "str s10, [x17, #0x0]\n"
+ "tbz x17, #0, 32f\n"
+ "str s10, [x15, #0x0]\n"
"b 32f\n"
"28:" // Height 1: Partial direct writeback: partial_4_0
- "tbz x8, #2, 30f\n"
- "st1 { v8.4s }, [x17], #0x10\n"
- "tbz x8, #1, 29f\n"
- "str d9, [x17], #0x8\n"
- "tbz x8, #0, 32f\n"
- "st1 { v9.s }[2], [x17]\n"
+ "tbz x17, #2, 30f\n"
+ "st1 { v8.4s }, [x15], #0x10\n"
+ "tbz x17, #1, 29f\n"
+ "str d9, [x15], #0x8\n"
+ "tbz x17, #0, 32f\n"
+ "st1 { v9.s }[2], [x15]\n"
"b 32f\n"
"29:" // Height 1: Partial direct writeback: partial_1_4
- "tbz x8, #0, 32f\n"
- "str s9, [x17, #0x0]\n"
+ "tbz x17, #0, 32f\n"
+ "str s9, [x15, #0x0]\n"
"b 32f\n"
"30:" // Height 1: Partial direct writeback: partial_2_0
- "tbz x8, #1, 31f\n"
- "str d8, [x17], #0x8\n"
- "tbz x8, #0, 32f\n"
- "st1 { v8.s }[2], [x17]\n"
+ "tbz x17, #1, 31f\n"
+ "str d8, [x15], #0x8\n"
+ "tbz x17, #0, 32f\n"
+ "st1 { v8.s }[2], [x15]\n"
"b 32f\n"
"31:" // Height 1: Partial direct writeback: partial_1_0
- "str s8, [x17, #0x0]\n"
+ "str s8, [x15, #0x0]\n"
"32:" // Height 1: Partial direct writeback: Done
"b 34f\n"
"33:" // Height 1: Full writeback
- "str q8, [x17, #0x0]\n"
- "str q9, [x17, #0x10]\n"
- "str q10, [x17, #0x20]\n"
- "str q11, [x17, #0x30]\n"
- "add x17, x17, #0x40\n"
+ "str q8, [x15, #0x0]\n"
+ "str q9, [x15, #0x10]\n"
+ "str q10, [x15, #0x20]\n"
+ "str q11, [x15, #0x30]\n"
+ "add x15, x15, #0x40\n"
"34:" // Height 1: Writeback done
- "subs x8, x8, #0x10\n"
+ "subs x17, x17, #0x10\n"
"bgt 2b\n"
"b 206f\n"
"35:" // Height 2
- "ldr x8, [%x[args_ptr], %[offsetof_N]]\n"
- "mov x17, %x[output_ptr]\n"
+ "ldr x17, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x15, %x[output_ptr]\n"
"ldr x16, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"36:" // Height 2: Column loop
"tbz %x[flags], #0, 46f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "cmp x8, #0x10\n"
- "add x24, x17, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "cmp x17, #0x10\n"
+ "add x23, x15, x19, LSL #2\n"
"bge 45f\n"
- "tbz x8, #3, 40f\n"
- "ld1 { v8.4s }, [x17], #0x10\n"
- "ld1 { v12.4s }, [x24], #0x10\n"
- "ld1 { v9.4s }, [x17], #0x10\n"
- "ld1 { v13.4s }, [x24], #0x10\n"
- "tbz x8, #2, 38f\n"
- "ld1 { v10.4s }, [x17], #0x10\n"
- "ld1 { v14.4s }, [x24], #0x10\n"
- "tbz x8, #1, 37f\n"
- "ldr d11, [x17], #0x8\n"
- "mov x25, #0x38\n"
- "ldr d15, [x24], #0x8\n"
- "tbz x8, #0, 44f\n"
- "ld1 { v11.s }[2], [x17]\n"
- "ld1 { v15.s }[2], [x24]\n"
+ "tbz x17, #3, 40f\n"
+ "ld1 { v8.4s }, [x15], #0x10\n"
+ "ld1 { v12.4s }, [x23], #0x10\n"
+ "ld1 { v9.4s }, [x15], #0x10\n"
+ "ld1 { v13.4s }, [x23], #0x10\n"
+ "tbz x17, #2, 38f\n"
+ "ld1 { v10.4s }, [x15], #0x10\n"
+ "ld1 { v14.4s }, [x23], #0x10\n"
+ "tbz x17, #1, 37f\n"
+ "mov x24, #0x38\n"
+ "ldr d11, [x15], #0x8\n"
+ "ldr d15, [x23], #0x8\n"
+ "tbz x17, #0, 44f\n"
+ "ld1 { v11.s }[2], [x15]\n"
+ "ld1 { v15.s }[2], [x23]\n"
"b 44f\n"
"37:" // Height 2: Partial accumulate: partial_1_12
- "mov x25, #0x30\n"
- "tbz x8, #0, 44f\n"
- "ldr s11, [x17, #0x0]\n"
- "ldr s15, [x24, #0x0]\n"
+ "mov x24, #0x30\n"
+ "tbz x17, #0, 44f\n"
+ "ldr s11, [x15, #0x0]\n"
+ "ldr s15, [x23, #0x0]\n"
"b 44f\n"
"38:" // Height 2: Partial accumulate: partial_2_8
- "tbz x8, #1, 39f\n"
- "ldr d10, [x17], #0x8\n"
- "mov x25, #0x28\n"
- "ldr d14, [x24], #0x8\n"
- "tbz x8, #0, 44f\n"
- "ld1 { v10.s }[2], [x17]\n"
- "ld1 { v14.s }[2], [x24]\n"
+ "tbz x17, #1, 39f\n"
+ "ldr d10, [x15], #0x8\n"
+ "ldr d14, [x23], #0x8\n"
+ "mov x24, #0x28\n"
+ "tbz x17, #0, 44f\n"
+ "ld1 { v10.s }[2], [x15]\n"
+ "ld1 { v14.s }[2], [x23]\n"
"b 44f\n"
"39:" // Height 2: Partial accumulate: partial_1_8
- "mov x25, #0x20\n"
- "tbz x8, #0, 44f\n"
- "ldr s10, [x17, #0x0]\n"
- "ldr s14, [x24, #0x0]\n"
+ "mov x24, #0x20\n"
+ "tbz x17, #0, 44f\n"
+ "ldr s10, [x15, #0x0]\n"
+ "ldr s14, [x23, #0x0]\n"
"b 44f\n"
"40:" // Height 2: Partial accumulate: partial_4_0
- "tbz x8, #2, 42f\n"
- "ld1 { v8.4s }, [x17], #0x10\n"
- "ld1 { v12.4s }, [x24], #0x10\n"
- "tbz x8, #1, 41f\n"
- "ldr d9, [x17], #0x8\n"
- "mov x25, #0x18\n"
- "ldr d13, [x24], #0x8\n"
- "tbz x8, #0, 44f\n"
- "ld1 { v9.s }[2], [x17]\n"
- "ld1 { v13.s }[2], [x24]\n"
+ "tbz x17, #2, 42f\n"
+ "ld1 { v8.4s }, [x15], #0x10\n"
+ "ld1 { v12.4s }, [x23], #0x10\n"
+ "tbz x17, #1, 41f\n"
+ "mov x24, #0x18\n"
+ "ldr d9, [x15], #0x8\n"
+ "ldr d13, [x23], #0x8\n"
+ "tbz x17, #0, 44f\n"
+ "ld1 { v9.s }[2], [x15]\n"
+ "ld1 { v13.s }[2], [x23]\n"
"b 44f\n"
"41:" // Height 2: Partial accumulate: partial_1_4
- "mov x25, #0x10\n"
- "tbz x8, #0, 44f\n"
- "ldr s9, [x17, #0x0]\n"
- "ldr s13, [x24, #0x0]\n"
+ "mov x24, #0x10\n"
+ "tbz x17, #0, 44f\n"
+ "ldr s9, [x15, #0x0]\n"
+ "ldr s13, [x23, #0x0]\n"
"b 44f\n"
"42:" // Height 2: Partial accumulate: partial_2_0
- "tbz x8, #1, 43f\n"
- "ldr d8, [x17], #0x8\n"
- "mov x25, #0x8\n"
- "ldr d12, [x24], #0x8\n"
- "tbz x8, #0, 44f\n"
- "ld1 { v8.s }[2], [x17]\n"
- "ld1 { v12.s }[2], [x24]\n"
+ "tbz x17, #1, 43f\n"
+ "ldr d8, [x15], #0x8\n"
+ "ldr d12, [x23], #0x8\n"
+ "mov x24, #0x8\n"
+ "tbz x17, #0, 44f\n"
+ "ld1 { v8.s }[2], [x15]\n"
+ "ld1 { v12.s }[2], [x23]\n"
"b 44f\n"
"43:" // Height 2: Partial accumulate: partial_1_0
- "ldr s8, [x17, #0x0]\n"
- "mov x25, #0x0\n"
- "ldr s12, [x24, #0x0]\n"
+ "ldr s8, [x15, #0x0]\n"
+ "mov x24, #0x0\n"
+ "ldr s12, [x23, #0x0]\n"
"44:" // Height 2: Partial accumulate: Done
- "sub x17, x17, x25\n"
+ "sub x15, x15, x24\n"
"b 47f\n"
"45:" // Height 2: full accumulate
- "ldr q8, [x17, #0x0]\n"
- "ldr q9, [x17, #0x10]\n"
- "ldr q10, [x17, #0x20]\n"
- "ldr q11, [x17, #0x30]\n"
- "ldr q12, [x24, #0x0]\n"
- "ldr q13, [x24, #0x10]\n"
- "ldr q14, [x24, #0x20]\n"
- "ldr q15, [x24, #0x30]\n"
+ "ldr q8, [x15, #0x0]\n"
+ "ldr q9, [x15, #0x10]\n"
+ "ldr q10, [x15, #0x20]\n"
+ "ldr q11, [x15, #0x30]\n"
+ "ldr q12, [x23, #0x0]\n"
+ "ldr q13, [x23, #0x10]\n"
+ "ldr q14, [x23, #0x20]\n"
+ "ldr q15, [x23, #0x30]\n"
"b 47f\n"
"46:" // Height 2: no accumulate
"movi v8.4s, #0x0\n"
@@ -495,147 +495,147 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
"movi v14.4s, #0x0\n"
"movi v15.4s, #0x0\n"
"47:" // Height 2: setup done
- "mov x15, #0x0\n"
+ "mov x14, #0x0\n"
"48:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w14, [x20, x15, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w13, [x20, x14, LSL #0x2]\n"
"tbz %x[flags], #3, 49f\n"
- "ldr x21, [%x[input_ptr], x15, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x13, [x21, #0x0]\n"
- "ldr x9, [x21, #0x8]\n"
- "cbnz x15, 50f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x13, x13, x20\n"
- "add x9, x9, x20\n"
+ "ldr x20, [%x[input_ptr], x14, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x12, [x20, #0x0]\n"
+ "ldr x28, [x20, #0x8]\n"
+ "cbnz x14, 50f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x12, x12, x19\n"
+ "add x28, x28, x19\n"
"b 50f\n"
"49:" // Height 2: setup direct input
- "mov x13, %x[input_ptr]\n"
- "add x9, x13, x20\n"
+ "mov x12, %x[input_ptr]\n"
+ "add x28, x12, x19\n"
"50:" // Height 2: input setup done
- "cmp x14, #0x10\n"
+ "cmp x13, #0x10\n"
"blt 53f\n"
- "ldr q0, [x13, #0x0]\n"
- "cmp x14, #0x20\n"
- "ldr q1, [x9, #0x0]\n"
+ "ldr q0, [x12, #0x0]\n"
+ "ldr q1, [x28, #0x0]\n"
+ "cmp x13, #0x20\n"
"ldr q6, [x16, #0x0]\n"
- "ldr q7, [x16, #0x10]\n"
"blt 52f\n"
"51:" // Height 2: Multiply loop: Main loop head
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
- "ldr x12, [x16, #0x28]\n"
+ "ldr d7, [x16, #0x10]\n"
".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n"
+ "ldr x11, [x16, #0x18]\n"
"ldr d6, [x16, #0x20]\n"
+ "add x12, x12, #0x10\n"
+ "ldr x10, [x16, #0x28]\n"
+ "add x28, x28, #0x10\n"
+ "mov v7.d[1], x11\n"
+ "prfm pldl1keep, [x12, #0x80]\n"
+ "prfm pldl1keep, [x28, #0x80]\n"
+ "sub x13, x13, #0x10\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
- "ldr x11, [x16, #0x38]\n"
+ "mov v6.d[1], x10\n"
".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n"
"ldr d7, [x16, #0x30]\n"
- "mov v6.d[1], x12\n"
".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n"
- "mov v7.d[1], x11\n"
+ "ldr x11, [x16, #0x38]\n"
".inst 0x4f81e0ce // sdot v14.4s, v6.16b, v1.4b[0]\n"
"ldr d6, [x16, #0x40]\n"
+ "ldr x10, [x16, #0x48]\n"
+ "cmp x13, #0x20\n"
+ "mov v7.d[1], x11\n"
+ "ldr x11, [x16, #0x58]\n"
+ "ldr x9, [x12, #0x8]\n"
".inst 0x4f80e0eb // sdot v11.4s, v7.16b, v0.4b[0]\n"
- "ldr x12, [x16, #0x48]\n"
+ "mov v6.d[1], x10\n"
".inst 0x4f81e0ef // sdot v15.4s, v7.16b, v1.4b[0]\n"
"ldr d7, [x16, #0x50]\n"
- "mov v6.d[1], x12\n"
- "ldr x11, [x16, #0x58]\n"
- "mov v7.d[1], x11\n"
".inst 0x4fa0e0c8 // sdot v8.4s, v6.16b, v0.4b[1]\n"
- "ldr x12, [x16, #0x68]\n"
+ "ldr x10, [x16, #0x68]\n"
".inst 0x4fa1e0cc // sdot v12.4s, v6.16b, v1.4b[1]\n"
"ldr d6, [x16, #0x60]\n"
- ".inst 0x4fa0e0e9 // sdot v9.4s, v7.16b, v0.4b[1]\n"
+ "mov v7.d[1], x11\n"
"ldr x11, [x16, #0x78]\n"
+ "ldr x27, [x28, #0x8]\n"
+ ".inst 0x4fa0e0e9 // sdot v9.4s, v7.16b, v0.4b[1]\n"
+ "mov v6.d[1], x10\n"
".inst 0x4fa1e0ed // sdot v13.4s, v7.16b, v1.4b[1]\n"
"ldr d7, [x16, #0x70]\n"
- "mov v6.d[1], x12\n"
".inst 0x4fa0e0ca // sdot v10.4s, v6.16b, v0.4b[1]\n"
- "mov v7.d[1], x11\n"
+ "ldr x10, [x16, #0x88]\n"
".inst 0x4fa1e0ce // sdot v14.4s, v6.16b, v1.4b[1]\n"
"ldr d6, [x16, #0x80]\n"
+ "mov v7.d[1], x11\n"
+ "ldr x11, [x16, #0x98]\n"
".inst 0x4fa0e0eb // sdot v11.4s, v7.16b, v0.4b[1]\n"
- "ldr x12, [x16, #0x88]\n"
+ "mov v6.d[1], x10\n"
".inst 0x4fa1e0ef // sdot v15.4s, v7.16b, v1.4b[1]\n"
"ldr d7, [x16, #0x90]\n"
- "mov v6.d[1], x12\n"
- "ldr x11, [x16, #0x98]\n"
- "mov v7.d[1], x11\n"
".inst 0x4f80e8c8 // sdot v8.4s, v6.16b, v0.4b[2]\n"
- "ldr x12, [x16, #0xa8]\n"
+ "ldr x10, [x16, #0xa8]\n"
".inst 0x4f81e8cc // sdot v12.4s, v6.16b, v1.4b[2]\n"
"ldr d6, [x16, #0xa0]\n"
- ".inst 0x4f80e8e9 // sdot v9.4s, v7.16b, v0.4b[2]\n"
+ "mov v7.d[1], x11\n"
"ldr x11, [x16, #0xb8]\n"
+ ".inst 0x4f80e8e9 // sdot v9.4s, v7.16b, v0.4b[2]\n"
+ "mov v6.d[1], x10\n"
".inst 0x4f81e8ed // sdot v13.4s, v7.16b, v1.4b[2]\n"
"ldr d7, [x16, #0xb0]\n"
- "mov v6.d[1], x12\n"
".inst 0x4f80e8ca // sdot v10.4s, v6.16b, v0.4b[2]\n"
- "mov v7.d[1], x11\n"
+ "ldr x10, [x16, #0xc8]\n"
".inst 0x4f81e8ce // sdot v14.4s, v6.16b, v1.4b[2]\n"
"ldr d6, [x16, #0xc0]\n"
+ "mov v7.d[1], x11\n"
+ "ldr x11, [x16, #0xd8]\n"
".inst 0x4f80e8eb // sdot v11.4s, v7.16b, v0.4b[2]\n"
- "ldr x12, [x16, #0xc8]\n"
+ "mov v6.d[1], x10\n"
".inst 0x4f81e8ef // sdot v15.4s, v7.16b, v1.4b[2]\n"
"ldr d7, [x16, #0xd0]\n"
- "mov v6.d[1], x12\n"
- "ldr x11, [x16, #0xd8]\n"
- "mov v7.d[1], x11\n"
".inst 0x4fa0e8c8 // sdot v8.4s, v6.16b, v0.4b[3]\n"
- "ldr x12, [x16, #0xe8]\n"
+ "ldr x10, [x16, #0xe8]\n"
".inst 0x4fa1e8cc // sdot v12.4s, v6.16b, v1.4b[3]\n"
"ldr d6, [x16, #0xe0]\n"
- ".inst 0x4fa0e8e9 // sdot v9.4s, v7.16b, v0.4b[3]\n"
+ "mov v7.d[1], x11\n"
"ldr x11, [x16, #0xf8]\n"
+ ".inst 0x4fa0e8e9 // sdot v9.4s, v7.16b, v0.4b[3]\n"
+ "mov v6.d[1], x10\n"
".inst 0x4fa1e8ed // sdot v13.4s, v7.16b, v1.4b[3]\n"
"ldr d7, [x16, #0xf0]\n"
- "mov v6.d[1], x12\n"
- "add x13, x13, #0x10\n"
- "mov v7.d[1], x11\n"
- "add x9, x9, #0x10\n"
- "add x16, x16, #0x100\n"
".inst 0x4fa0e8ca // sdot v10.4s, v6.16b, v0.4b[3]\n"
+ "add x16, x16, #0x100\n"
".inst 0x4fa1e8ce // sdot v14.4s, v6.16b, v1.4b[3]\n"
"ldr d6, [x16, #0x0]\n"
- "ldr x12, [x16, #0x8]\n"
+ "mov v7.d[1], x11\n"
+ "ldr x10, [x16, #0x8]\n"
".inst 0x4fa0e8eb // sdot v11.4s, v7.16b, v0.4b[3]\n"
- "ldr d0, [x13, #0x0]\n"
+ "ldr d0, [x12, #0x0]\n"
".inst 0x4fa1e8ef // sdot v15.4s, v7.16b, v1.4b[3]\n"
- "ldr d1, [x9, #0x0]\n"
- "sub x14, x14, #0x10\n"
- "ldr d7, [x16, #0x10]\n"
- "cmp x14, #0x20\n"
- "ldr x10, [x13, #0x8]\n"
- "mov v6.d[1], x12\n"
- "ldr x28, [x9, #0x8]\n"
- "mov v0.d[1], x10\n"
- "ldr x11, [x16, #0x18]\n"
- "mov v1.d[1], x28\n"
- "prfm pldl1keep, [x13, #0x80]\n"
- "mov v7.d[1], x11\n"
- "prfm pldl1keep, [x9, #0x80]\n"
+ "mov v6.d[1], x10\n"
+ "ldr d1, [x28, #0x0]\n"
+ "mov v0.d[1], x9\n"
+ "mov v1.d[1], x27\n"
"bge 51b\n"
"52:" // Height 2: Multiply loop: Single iteration only
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
- "add x13, x13, #0x10\n"
+ "ldr q7, [x16, #0x10]\n"
".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n"
"ldr q6, [x16, #0x20]\n"
+ "sub x13, x13, #0x10\n"
+ "add x12, x12, #0x10\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
- "add x9, x9, #0x10\n"
+ "prfm pldl1keep, [x12, #0x80]\n"
".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n"
"ldr q7, [x16, #0x30]\n"
".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n"
- "sub x14, x14, #0x10\n"
+ "add x28, x28, #0x10\n"
".inst 0x4f81e0ce // sdot v14.4s, v6.16b, v1.4b[0]\n"
- "ldr q6, [x16, #0x40]\n"
+ "prfm pldl1keep, [x28, #0x80]\n"
".inst 0x4f80e0eb // sdot v11.4s, v7.16b, v0.4b[0]\n"
- "prfm pldl1keep, [x13, #0x80]\n"
+ "ldr q6, [x16, #0x40]\n"
".inst 0x4f81e0ef // sdot v15.4s, v7.16b, v1.4b[0]\n"
"ldr q7, [x16, #0x50]\n"
".inst 0x4fa0e0c8 // sdot v8.4s, v6.16b, v0.4b[1]\n"
- "prfm pldl1keep, [x9, #0x80]\n"
".inst 0x4fa1e0cc // sdot v12.4s, v6.16b, v1.4b[1]\n"
"ldr q6, [x16, #0x60]\n"
".inst 0x4fa0e0e9 // sdot v9.4s, v7.16b, v0.4b[1]\n"
@@ -671,17 +671,17 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
".inst 0x4fa0e8eb // sdot v11.4s, v7.16b, v0.4b[3]\n"
".inst 0x4fa1e8ef // sdot v15.4s, v7.16b, v1.4b[3]\n"
"53:" // Height 2: Multiply loop: Main loop skip
- "cbz x14, 58f\n"
- "cmp x14, #0x4\n"
+ "cbz x13, 58f\n"
+ "cmp x13, #0x4\n"
"blt 55f\n"
"54:" // Height 2: Multiply loop: Odd block loop
- "ldr s0, [x13], #0x4\n"
- "sub x14, x14, #0x4\n"
- "ldr s1, [x9], #0x4\n"
- "cmp x14, #0x4\n"
+ "ldr s0, [x12], #0x4\n"
+ "sub x13, x13, #0x4\n"
+ "ldr s1, [x28], #0x4\n"
+ "cmp x13, #0x4\n"
"ldr q6, [x16, #0x0]\n"
- ".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
"ldr q7, [x16, #0x10]\n"
+ ".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n"
"ldr q6, [x16, #0x20]\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
@@ -693,22 +693,22 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
".inst 0x4f80e0eb // sdot v11.4s, v7.16b, v0.4b[0]\n"
".inst 0x4f81e0ef // sdot v15.4s, v7.16b, v1.4b[0]\n"
"bge 54b\n"
+ "cbz x13, 58f\n"
"55:" // Height 2: Multiply loop: Skip odd blocks
- "cbz x14, 58f\n"
- "tbz x14, #1, 56f\n"
- "ldr h0, [x13], #0x2\n"
- "ldr h1, [x9], #0x2\n"
- "tbz x14, #0, 57f\n"
- "ld1 { v0.b }[2], [x13]\n"
- "ld1 { v1.b }[2], [x9]\n"
+ "tbz x13, #1, 56f\n"
+ "ldr h0, [x12], #0x2\n"
+ "ldr h1, [x28], #0x2\n"
+ "tbz x13, #0, 57f\n"
+ "ld1 { v0.b }[2], [x12]\n"
+ "ld1 { v1.b }[2], [x28]\n"
"b 57f\n"
"56:" // Height 2: Multiply loop: Ragged operand read: partial_1_0
- "ldr b0, [x13, #0x0]\n"
- "ldr b1, [x9, #0x0]\n"
+ "ldr b0, [x12, #0x0]\n"
+ "ldr b1, [x28, #0x0]\n"
"57:" // Height 2: Multiply loop: Ragged operand read: Done
"ldr q6, [x16, #0x0]\n"
- ".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
"ldr q7, [x16, #0x10]\n"
+ ".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n"
"ldr q6, [x16, #0x20]\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
@@ -720,203 +720,203 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
".inst 0x4f80e0eb // sdot v11.4s, v7.16b, v0.4b[0]\n"
".inst 0x4f81e0ef // sdot v15.4s, v7.16b, v1.4b[0]\n"
"58:" // Height 2: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x15, x15, #0x1\n"
- "cmp x15, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x14, x14, #0x1\n"
+ "cmp x14, x19\n"
"bne 48b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x17, x20, LSL #2\n"
- "cmp x8, #0x10\n"
- "prfm pstl1keep, [x17, #0x0]\n"
- "prfm pstl1keep, [x24, #0x0]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "prfm pstl1keep, [x15, #0x0]\n"
+ "cmp x17, #0x10\n"
+ "add x23, x15, x19, LSL #2\n"
+ "prfm pstl1keep, [x23, #0x0]\n"
"bge 67f\n"
- "tbz x8, #3, 62f\n"
- "st1 { v8.4s }, [x17], #0x10\n"
- "st1 { v9.4s }, [x17], #0x10\n"
- "st1 { v12.4s }, [x24], #0x10\n"
- "st1 { v13.4s }, [x24], #0x10\n"
- "tbz x8, #2, 60f\n"
- "st1 { v10.4s }, [x17], #0x10\n"
- "st1 { v14.4s }, [x24], #0x10\n"
- "tbz x8, #1, 59f\n"
- "str d11, [x17], #0x8\n"
- "str d15, [x24], #0x8\n"
- "tbz x8, #0, 66f\n"
- "st1 { v11.s }[2], [x17]\n"
- "st1 { v15.s }[2], [x24]\n"
+ "tbz x17, #3, 62f\n"
+ "st1 { v8.4s }, [x15], #0x10\n"
+ "st1 { v9.4s }, [x15], #0x10\n"
+ "st1 { v12.4s }, [x23], #0x10\n"
+ "st1 { v13.4s }, [x23], #0x10\n"
+ "tbz x17, #2, 60f\n"
+ "st1 { v10.4s }, [x15], #0x10\n"
+ "st1 { v14.4s }, [x23], #0x10\n"
+ "tbz x17, #1, 59f\n"
+ "str d11, [x15], #0x8\n"
+ "str d15, [x23], #0x8\n"
+ "tbz x17, #0, 66f\n"
+ "st1 { v11.s }[2], [x15]\n"
+ "st1 { v15.s }[2], [x23]\n"
"b 66f\n"
"59:" // Height 2: Partial direct writeback: partial_1_12
- "tbz x8, #0, 66f\n"
- "str s11, [x17, #0x0]\n"
- "str s15, [x24, #0x0]\n"
+ "tbz x17, #0, 66f\n"
+ "str s11, [x15, #0x0]\n"
+ "str s15, [x23, #0x0]\n"
"b 66f\n"
"60:" // Height 2: Partial direct writeback: partial_2_8
- "tbz x8, #1, 61f\n"
- "str d10, [x17], #0x8\n"
- "str d14, [x24], #0x8\n"
- "tbz x8, #0, 66f\n"
- "st1 { v10.s }[2], [x17]\n"
- "st1 { v14.s }[2], [x24]\n"
+ "tbz x17, #1, 61f\n"
+ "str d10, [x15], #0x8\n"
+ "str d14, [x23], #0x8\n"
+ "tbz x17, #0, 66f\n"
+ "st1 { v10.s }[2], [x15]\n"
+ "st1 { v14.s }[2], [x23]\n"
"b 66f\n"
"61:" // Height 2: Partial direct writeback: partial_1_8
- "tbz x8, #0, 66f\n"
- "str s10, [x17, #0x0]\n"
- "str s14, [x24, #0x0]\n"
+ "tbz x17, #0, 66f\n"
+ "str s10, [x15, #0x0]\n"
+ "str s14, [x23, #0x0]\n"
"b 66f\n"
"62:" // Height 2: Partial direct writeback: partial_4_0
- "tbz x8, #2, 64f\n"
- "st1 { v8.4s }, [x17], #0x10\n"
- "st1 { v12.4s }, [x24], #0x10\n"
- "tbz x8, #1, 63f\n"
- "str d9, [x17], #0x8\n"
- "str d13, [x24], #0x8\n"
- "tbz x8, #0, 66f\n"
- "st1 { v9.s }[2], [x17]\n"
- "st1 { v13.s }[2], [x24]\n"
+ "tbz x17, #2, 64f\n"
+ "st1 { v8.4s }, [x15], #0x10\n"
+ "st1 { v12.4s }, [x23], #0x10\n"
+ "tbz x17, #1, 63f\n"
+ "str d9, [x15], #0x8\n"
+ "str d13, [x23], #0x8\n"
+ "tbz x17, #0, 66f\n"
+ "st1 { v9.s }[2], [x15]\n"
+ "st1 { v13.s }[2], [x23]\n"
"b 66f\n"
"63:" // Height 2: Partial direct writeback: partial_1_4
- "tbz x8, #0, 66f\n"
- "str s9, [x17, #0x0]\n"
- "str s13, [x24, #0x0]\n"
+ "tbz x17, #0, 66f\n"
+ "str s9, [x15, #0x0]\n"
+ "str s13, [x23, #0x0]\n"
"b 66f\n"
"64:" // Height 2: Partial direct writeback: partial_2_0
- "tbz x8, #1, 65f\n"
- "str d8, [x17], #0x8\n"
- "str d12, [x24], #0x8\n"
- "tbz x8, #0, 66f\n"
- "st1 { v8.s }[2], [x17]\n"
- "st1 { v12.s }[2], [x24]\n"
+ "tbz x17, #1, 65f\n"
+ "str d8, [x15], #0x8\n"
+ "str d12, [x23], #0x8\n"
+ "tbz x17, #0, 66f\n"
+ "st1 { v8.s }[2], [x15]\n"
+ "st1 { v12.s }[2], [x23]\n"
"b 66f\n"
"65:" // Height 2: Partial direct writeback: partial_1_0
- "str s8, [x17, #0x0]\n"
- "str s12, [x24, #0x0]\n"
+ "str s8, [x15, #0x0]\n"
+ "str s12, [x23, #0x0]\n"
"66:" // Height 2: Partial direct writeback: Done
"b 68f\n"
"67:" // Height 2: Full writeback
- "str q8, [x17, #0x0]\n"
- "str q9, [x17, #0x10]\n"
- "str q10, [x17, #0x20]\n"
- "str q11, [x17, #0x30]\n"
- "add x17, x17, #0x40\n"
- "str q12, [x24, #0x0]\n"
- "str q13, [x24, #0x10]\n"
- "str q14, [x24, #0x20]\n"
- "str q15, [x24, #0x30]\n"
+ "str q8, [x15, #0x0]\n"
+ "str q9, [x15, #0x10]\n"
+ "str q10, [x15, #0x20]\n"
+ "str q11, [x15, #0x30]\n"
+ "add x15, x15, #0x40\n"
+ "str q12, [x23, #0x0]\n"
+ "str q13, [x23, #0x10]\n"
+ "str q14, [x23, #0x20]\n"
+ "str q15, [x23, #0x30]\n"
"68:" // Height 2: Writeback done
- "subs x8, x8, #0x10\n"
+ "subs x17, x17, #0x10\n"
"bgt 36b\n"
"b 206f\n"
"69:" // Height 3
- "ldr x8, [%x[args_ptr], %[offsetof_N]]\n"
- "mov x17, %x[output_ptr]\n"
+ "ldr x17, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x15, %x[output_ptr]\n"
"ldr x16, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"70:" // Height 3: Column loop
"tbz %x[flags], #0, 80f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x17, x20, LSL #2\n"
- "cmp x8, #0x10\n"
- "add x23, x24, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "cmp x17, #0x10\n"
+ "add x23, x15, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
"bge 79f\n"
- "tbz x8, #3, 74f\n"
- "ld1 { v8.4s }, [x17], #0x10\n"
- "ld1 { v12.4s }, [x24], #0x10\n"
- "ld1 { v16.4s }, [x23], #0x10\n"
- "ld1 { v9.4s }, [x17], #0x10\n"
- "ld1 { v13.4s }, [x24], #0x10\n"
- "ld1 { v17.4s }, [x23], #0x10\n"
- "tbz x8, #2, 72f\n"
- "ld1 { v10.4s }, [x17], #0x10\n"
- "ld1 { v14.4s }, [x24], #0x10\n"
- "ld1 { v18.4s }, [x23], #0x10\n"
- "tbz x8, #1, 71f\n"
- "ldr d11, [x17], #0x8\n"
- "mov x25, #0x38\n"
- "ldr d15, [x24], #0x8\n"
- "ldr d19, [x23], #0x8\n"
- "tbz x8, #0, 78f\n"
- "ld1 { v11.s }[2], [x17]\n"
- "ld1 { v15.s }[2], [x24]\n"
- "ld1 { v19.s }[2], [x23]\n"
+ "tbz x17, #3, 74f\n"
+ "ld1 { v8.4s }, [x15], #0x10\n"
+ "ld1 { v12.4s }, [x23], #0x10\n"
+ "ld1 { v16.4s }, [x22], #0x10\n"
+ "ld1 { v9.4s }, [x15], #0x10\n"
+ "ld1 { v13.4s }, [x23], #0x10\n"
+ "ld1 { v17.4s }, [x22], #0x10\n"
+ "tbz x17, #2, 72f\n"
+ "ld1 { v10.4s }, [x15], #0x10\n"
+ "ld1 { v14.4s }, [x23], #0x10\n"
+ "ld1 { v18.4s }, [x22], #0x10\n"
+ "tbz x17, #1, 71f\n"
+ "ldr d11, [x15], #0x8\n"
+ "mov x24, #0x38\n"
+ "ldr d15, [x23], #0x8\n"
+ "ldr d19, [x22], #0x8\n"
+ "tbz x17, #0, 78f\n"
+ "ld1 { v11.s }[2], [x15]\n"
+ "ld1 { v15.s }[2], [x23]\n"
+ "ld1 { v19.s }[2], [x22]\n"
"b 78f\n"
"71:" // Height 3: Partial accumulate: partial_1_12
- "mov x25, #0x30\n"
- "tbz x8, #0, 78f\n"
- "ldr s11, [x17, #0x0]\n"
- "ldr s15, [x24, #0x0]\n"
- "ldr s19, [x23, #0x0]\n"
+ "mov x24, #0x30\n"
+ "tbz x17, #0, 78f\n"
+ "ldr s11, [x15, #0x0]\n"
+ "ldr s15, [x23, #0x0]\n"
+ "ldr s19, [x22, #0x0]\n"
"b 78f\n"
"72:" // Height 3: Partial accumulate: partial_2_8
- "tbz x8, #1, 73f\n"
- "ldr d10, [x17], #0x8\n"
- "mov x25, #0x28\n"
- "ldr d14, [x24], #0x8\n"
- "ldr d18, [x23], #0x8\n"
- "tbz x8, #0, 78f\n"
- "ld1 { v10.s }[2], [x17]\n"
- "ld1 { v14.s }[2], [x24]\n"
- "ld1 { v18.s }[2], [x23]\n"
+ "tbz x17, #1, 73f\n"
+ "ldr d10, [x15], #0x8\n"
+ "ldr d14, [x23], #0x8\n"
+ "mov x24, #0x28\n"
+ "ldr d18, [x22], #0x8\n"
+ "tbz x17, #0, 78f\n"
+ "ld1 { v10.s }[2], [x15]\n"
+ "ld1 { v14.s }[2], [x23]\n"
+ "ld1 { v18.s }[2], [x22]\n"
"b 78f\n"
"73:" // Height 3: Partial accumulate: partial_1_8
- "mov x25, #0x20\n"
- "tbz x8, #0, 78f\n"
- "ldr s10, [x17, #0x0]\n"
- "ldr s14, [x24, #0x0]\n"
- "ldr s18, [x23, #0x0]\n"
+ "mov x24, #0x20\n"
+ "tbz x17, #0, 78f\n"
+ "ldr s10, [x15, #0x0]\n"
+ "ldr s14, [x23, #0x0]\n"
+ "ldr s18, [x22, #0x0]\n"
"b 78f\n"
"74:" // Height 3: Partial accumulate: partial_4_0
- "tbz x8, #2, 76f\n"
- "ld1 { v8.4s }, [x17], #0x10\n"
- "ld1 { v12.4s }, [x24], #0x10\n"
- "ld1 { v16.4s }, [x23], #0x10\n"
- "tbz x8, #1, 75f\n"
- "ldr d9, [x17], #0x8\n"
- "mov x25, #0x18\n"
- "ldr d13, [x24], #0x8\n"
- "ldr d17, [x23], #0x8\n"
- "tbz x8, #0, 78f\n"
- "ld1 { v9.s }[2], [x17]\n"
- "ld1 { v13.s }[2], [x24]\n"
- "ld1 { v17.s }[2], [x23]\n"
+ "tbz x17, #2, 76f\n"
+ "ld1 { v8.4s }, [x15], #0x10\n"
+ "ld1 { v12.4s }, [x23], #0x10\n"
+ "ld1 { v16.4s }, [x22], #0x10\n"
+ "tbz x17, #1, 75f\n"
+ "ldr d9, [x15], #0x8\n"
+ "mov x24, #0x18\n"
+ "ldr d13, [x23], #0x8\n"
+ "ldr d17, [x22], #0x8\n"
+ "tbz x17, #0, 78f\n"
+ "ld1 { v9.s }[2], [x15]\n"
+ "ld1 { v13.s }[2], [x23]\n"
+ "ld1 { v17.s }[2], [x22]\n"
"b 78f\n"
"75:" // Height 3: Partial accumulate: partial_1_4
- "mov x25, #0x10\n"
- "tbz x8, #0, 78f\n"
- "ldr s9, [x17, #0x0]\n"
- "ldr s13, [x24, #0x0]\n"
- "ldr s17, [x23, #0x0]\n"
+ "mov x24, #0x10\n"
+ "tbz x17, #0, 78f\n"
+ "ldr s9, [x15, #0x0]\n"
+ "ldr s13, [x23, #0x0]\n"
+ "ldr s17, [x22, #0x0]\n"
"b 78f\n"
"76:" // Height 3: Partial accumulate: partial_2_0
- "tbz x8, #1, 77f\n"
- "ldr d8, [x17], #0x8\n"
- "mov x25, #0x8\n"
- "ldr d12, [x24], #0x8\n"
- "ldr d16, [x23], #0x8\n"
- "tbz x8, #0, 78f\n"
- "ld1 { v8.s }[2], [x17]\n"
- "ld1 { v12.s }[2], [x24]\n"
- "ld1 { v16.s }[2], [x23]\n"
+ "tbz x17, #1, 77f\n"
+ "ldr d8, [x15], #0x8\n"
+ "ldr d12, [x23], #0x8\n"
+ "mov x24, #0x8\n"
+ "ldr d16, [x22], #0x8\n"
+ "tbz x17, #0, 78f\n"
+ "ld1 { v8.s }[2], [x15]\n"
+ "ld1 { v12.s }[2], [x23]\n"
+ "ld1 { v16.s }[2], [x22]\n"
"b 78f\n"
"77:" // Height 3: Partial accumulate: partial_1_0
- "ldr s8, [x17, #0x0]\n"
- "mov x25, #0x0\n"
- "ldr s12, [x24, #0x0]\n"
- "ldr s16, [x23, #0x0]\n"
+ "ldr s8, [x15, #0x0]\n"
+ "mov x24, #0x0\n"
+ "ldr s12, [x23, #0x0]\n"
+ "ldr s16, [x22, #0x0]\n"
"78:" // Height 3: Partial accumulate: Done
- "sub x17, x17, x25\n"
+ "sub x15, x15, x24\n"
"b 81f\n"
"79:" // Height 3: full accumulate
- "ldr q8, [x17, #0x0]\n"
- "ldr q9, [x17, #0x10]\n"
- "ldr q10, [x17, #0x20]\n"
- "ldr q11, [x17, #0x30]\n"
- "ldr q12, [x24, #0x0]\n"
- "ldr q13, [x24, #0x10]\n"
- "ldr q14, [x24, #0x20]\n"
- "ldr q15, [x24, #0x30]\n"
- "ldr q16, [x23, #0x0]\n"
- "ldr q17, [x23, #0x10]\n"
- "ldr q18, [x23, #0x20]\n"
- "ldr q19, [x23, #0x30]\n"
+ "ldr q8, [x15, #0x0]\n"
+ "ldr q9, [x15, #0x10]\n"
+ "ldr q10, [x15, #0x20]\n"
+ "ldr q11, [x15, #0x30]\n"
+ "ldr q12, [x23, #0x0]\n"
+ "ldr q13, [x23, #0x10]\n"
+ "ldr q14, [x23, #0x20]\n"
+ "ldr q15, [x23, #0x30]\n"
+ "ldr q16, [x22, #0x0]\n"
+ "ldr q17, [x22, #0x10]\n"
+ "ldr q18, [x22, #0x20]\n"
+ "ldr q19, [x22, #0x30]\n"
"b 81f\n"
"80:" // Height 3: no accumulate
"movi v8.4s, #0x0\n"
@@ -932,175 +932,175 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
"movi v18.4s, #0x0\n"
"movi v19.4s, #0x0\n"
"81:" // Height 3: setup done
- "mov x15, #0x0\n"
+ "mov x14, #0x0\n"
"82:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w14, [x20, x15, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w13, [x20, x14, LSL #0x2]\n"
"tbz %x[flags], #3, 83f\n"
- "ldr x21, [%x[input_ptr], x15, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x13, [x21, #0x0]\n"
- "ldr x9, [x21, #0x8]\n"
- "ldr x27, [x21, #0x10]\n"
- "cbnz x15, 84f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x13, x13, x20\n"
- "add x9, x9, x20\n"
- "add x27, x27, x20\n"
+ "ldr x20, [%x[input_ptr], x14, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x12, [x20, #0x0]\n"
+ "ldr x28, [x20, #0x8]\n"
+ "ldr x26, [x20, #0x10]\n"
+ "cbnz x14, 84f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x12, x12, x19\n"
+ "add x28, x28, x19\n"
+ "add x26, x26, x19\n"
"b 84f\n"
"83:" // Height 3: setup direct input
- "mov x13, %x[input_ptr]\n"
- "add x9, x13, x20\n"
- "add x27, x9, x20\n"
+ "mov x12, %x[input_ptr]\n"
+ "add x28, x12, x19\n"
+ "add x26, x28, x19\n"
"84:" // Height 3: input setup done
- "cmp x14, #0x10\n"
+ "cmp x13, #0x10\n"
"blt 87f\n"
- "ldr q0, [x13, #0x0]\n"
- "cmp x14, #0x20\n"
- "ldr q1, [x9, #0x0]\n"
- "ldr q2, [x27, #0x0]\n"
+ "ldr q0, [x12, #0x0]\n"
+ "ldr q1, [x28, #0x0]\n"
+ "cmp x13, #0x20\n"
+ "ldr q2, [x26, #0x0]\n"
"ldr q6, [x16, #0x0]\n"
- "ldr q7, [x16, #0x10]\n"
"blt 86f\n"
"85:" // Height 3: Multiply loop: Main loop head
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
- "ldr x12, [x16, #0x28]\n"
+ "ldr d7, [x16, #0x10]\n"
".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n"
- "ldr x11, [x16, #0x38]\n"
+ "ldr x11, [x16, #0x18]\n"
".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n"
"ldr d6, [x16, #0x20]\n"
+ "ldr x10, [x16, #0x28]\n"
+ "add x12, x12, #0x10\n"
+ "mov v7.d[1], x11\n"
+ "prfm pldl1keep, [x12, #0x80]\n"
+ "ldr x11, [x16, #0x38]\n"
+ "add x28, x28, #0x10\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
- "mov v6.d[1], x12\n"
+ "mov v6.d[1], x10\n"
".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n"
- "ldr x12, [x16, #0x48]\n"
+ "prfm pldl1keep, [x28, #0x80]\n"
".inst 0x4f82e0f1 // sdot v17.4s, v7.16b, v2.4b[0]\n"
"ldr d7, [x16, #0x30]\n"
- "mov v7.d[1], x11\n"
".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n"
+ "ldr x10, [x16, #0x48]\n"
".inst 0x4f81e0ce // sdot v14.4s, v6.16b, v1.4b[0]\n"
- "ldr x11, [x16, #0x58]\n"
+ "ldr x9, [x12, #0x8]\n"
".inst 0x4f82e0d2 // sdot v18.4s, v6.16b, v2.4b[0]\n"
+ "mov v7.d[1], x11\n"
"ldr d6, [x16, #0x40]\n"
+ "add x26, x26, #0x10\n"
".inst 0x4f80e0eb // sdot v11.4s, v7.16b, v0.4b[0]\n"
- "mov v6.d[1], x12\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x4f81e0ef // sdot v15.4s, v7.16b, v1.4b[0]\n"
- "ldr x12, [x16, #0x68]\n"
+ "ldr x11, [x16, #0x58]\n"
".inst 0x4f82e0f3 // sdot v19.4s, v7.16b, v2.4b[0]\n"
+ "mov v6.d[1], x10\n"
"ldr d7, [x16, #0x50]\n"
- "mov v7.d[1], x11\n"
+ "sub x13, x13, #0x10\n"
".inst 0x4fa0e0c8 // sdot v8.4s, v6.16b, v0.4b[1]\n"
+ "ldr x10, [x16, #0x68]\n"
".inst 0x4fa1e0cc // sdot v12.4s, v6.16b, v1.4b[1]\n"
- "ldr x11, [x16, #0x78]\n"
+ "ldr x27, [x28, #0x8]\n"
".inst 0x4fa2e0d0 // sdot v16.4s, v6.16b, v2.4b[1]\n"
+ "mov v7.d[1], x11\n"
"ldr d6, [x16, #0x60]\n"
+ "cmp x13, #0x20\n"
".inst 0x4fa0e0e9 // sdot v9.4s, v7.16b, v0.4b[1]\n"
- "mov v6.d[1], x12\n"
+ "ldr x11, [x16, #0x78]\n"
".inst 0x4fa1e0ed // sdot v13.4s, v7.16b, v1.4b[1]\n"
- "ldr x12, [x16, #0x88]\n"
+ "ldr x25, [x26, #0x8]\n"
".inst 0x4fa2e0f1 // sdot v17.4s, v7.16b, v2.4b[1]\n"
+ "mov v6.d[1], x10\n"
"ldr d7, [x16, #0x70]\n"
- "mov v7.d[1], x11\n"
".inst 0x4fa0e0ca // sdot v10.4s, v6.16b, v0.4b[1]\n"
+ "ldr x10, [x16, #0x88]\n"
".inst 0x4fa1e0ce // sdot v14.4s, v6.16b, v1.4b[1]\n"
- "ldr x11, [x16, #0x98]\n"
".inst 0x4fa2e0d2 // sdot v18.4s, v6.16b, v2.4b[1]\n"
+ "mov v7.d[1], x11\n"
"ldr d6, [x16, #0x80]\n"
".inst 0x4fa0e0eb // sdot v11.4s, v7.16b, v0.4b[1]\n"
- "mov v6.d[1], x12\n"
+ "ldr x11, [x16, #0x98]\n"
".inst 0x4fa1e0ef // sdot v15.4s, v7.16b, v1.4b[1]\n"
- "ldr x12, [x16, #0xa8]\n"
".inst 0x4fa2e0f3 // sdot v19.4s, v7.16b, v2.4b[1]\n"
+ "mov v6.d[1], x10\n"
"ldr d7, [x16, #0x90]\n"
- "mov v7.d[1], x11\n"
".inst 0x4f80e8c8 // sdot v8.4s, v6.16b, v0.4b[2]\n"
+ "ldr x10, [x16, #0xa8]\n"
".inst 0x4f81e8cc // sdot v12.4s, v6.16b, v1.4b[2]\n"
- "ldr x11, [x16, #0xb8]\n"
".inst 0x4f82e8d0 // sdot v16.4s, v6.16b, v2.4b[2]\n"
+ "mov v7.d[1], x11\n"
"ldr d6, [x16, #0xa0]\n"
".inst 0x4f80e8e9 // sdot v9.4s, v7.16b, v0.4b[2]\n"
- "mov v6.d[1], x12\n"
+ "ldr x11, [x16, #0xb8]\n"
".inst 0x4f81e8ed // sdot v13.4s, v7.16b, v1.4b[2]\n"
- "ldr x12, [x16, #0xc8]\n"
".inst 0x4f82e8f1 // sdot v17.4s, v7.16b, v2.4b[2]\n"
+ "mov v6.d[1], x10\n"
"ldr d7, [x16, #0xb0]\n"
- "mov v7.d[1], x11\n"
".inst 0x4f80e8ca // sdot v10.4s, v6.16b, v0.4b[2]\n"
+ "ldr x10, [x16, #0xc8]\n"
".inst 0x4f81e8ce // sdot v14.4s, v6.16b, v1.4b[2]\n"
- "ldr x11, [x16, #0xd8]\n"
".inst 0x4f82e8d2 // sdot v18.4s, v6.16b, v2.4b[2]\n"
+ "mov v7.d[1], x11\n"
"ldr d6, [x16, #0xc0]\n"
".inst 0x4f80e8eb // sdot v11.4s, v7.16b, v0.4b[2]\n"
- "mov v6.d[1], x12\n"
+ "ldr x11, [x16, #0xd8]\n"
".inst 0x4f81e8ef // sdot v15.4s, v7.16b, v1.4b[2]\n"
- "ldr x12, [x16, #0xe8]\n"
".inst 0x4f82e8f3 // sdot v19.4s, v7.16b, v2.4b[2]\n"
+ "mov v6.d[1], x10\n"
"ldr d7, [x16, #0xd0]\n"
- "mov v7.d[1], x11\n"
".inst 0x4fa0e8c8 // sdot v8.4s, v6.16b, v0.4b[3]\n"
+ "ldr x10, [x16, #0xe8]\n"
".inst 0x4fa1e8cc // sdot v12.4s, v6.16b, v1.4b[3]\n"
- "ldr x11, [x16, #0xf8]\n"
".inst 0x4fa2e8d0 // sdot v16.4s, v6.16b, v2.4b[3]\n"
+ "mov v7.d[1], x11\n"
"ldr d6, [x16, #0xe0]\n"
".inst 0x4fa0e8e9 // sdot v9.4s, v7.16b, v0.4b[3]\n"
- "mov v6.d[1], x12\n"
+ "ldr x11, [x16, #0xf8]\n"
".inst 0x4fa1e8ed // sdot v13.4s, v7.16b, v1.4b[3]\n"
- "add x13, x13, #0x10\n"
".inst 0x4fa2e8f1 // sdot v17.4s, v7.16b, v2.4b[3]\n"
+ "mov v6.d[1], x10\n"
"ldr d7, [x16, #0xf0]\n"
- "mov v7.d[1], x11\n"
- "add x9, x9, #0x10\n"
- "add x27, x27, #0x10\n"
"add x16, x16, #0x100\n"
".inst 0x4fa0e8ca // sdot v10.4s, v6.16b, v0.4b[3]\n"
- "ldr x12, [x16, #0x8]\n"
+ "ldr x10, [x16, #0x8]\n"
".inst 0x4fa1e8ce // sdot v14.4s, v6.16b, v1.4b[3]\n"
- "ldr x10, [x13, #0x8]\n"
".inst 0x4fa2e8d2 // sdot v18.4s, v6.16b, v2.4b[3]\n"
+ "mov v7.d[1], x11\n"
"ldr d6, [x16, #0x0]\n"
".inst 0x4fa0e8eb // sdot v11.4s, v7.16b, v0.4b[3]\n"
- "ldr d0, [x13, #0x0]\n"
+ "ldr d0, [x12, #0x0]\n"
".inst 0x4fa1e8ef // sdot v15.4s, v7.16b, v1.4b[3]\n"
- "ldr d1, [x9, #0x0]\n"
- "ldr x28, [x9, #0x8]\n"
+ "ldr d1, [x28, #0x0]\n"
".inst 0x4fa2e8f3 // sdot v19.4s, v7.16b, v2.4b[3]\n"
- "ldr d2, [x27, #0x0]\n"
- "sub x14, x14, #0x10\n"
- "ldr d7, [x16, #0x10]\n"
- "cmp x14, #0x20\n"
- "ldr x26, [x27, #0x8]\n"
- "mov v6.d[1], x12\n"
- "ldr x11, [x16, #0x18]\n"
- "mov v0.d[1], x10\n"
- "prfm pldl1keep, [x13, #0x80]\n"
- "mov v1.d[1], x28\n"
- "prfm pldl1keep, [x9, #0x80]\n"
- "mov v2.d[1], x26\n"
- "prfm pldl1keep, [x27, #0x80]\n"
- "mov v7.d[1], x11\n"
+ "mov v6.d[1], x10\n"
+ "mov v0.d[1], x9\n"
+ "ldr d2, [x26, #0x0]\n"
+ "mov v1.d[1], x27\n"
+ "mov v2.d[1], x25\n"
"bge 85b\n"
"86:" // Height 3: Multiply loop: Single iteration only
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
- "add x13, x13, #0x10\n"
+ "ldr q7, [x16, #0x10]\n"
".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n"
- "add x9, x9, #0x10\n"
+ "sub x13, x13, #0x10\n"
".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n"
"ldr q6, [x16, #0x20]\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
- "add x27, x27, #0x10\n"
+ "add x12, x12, #0x10\n"
".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n"
- "sub x14, x14, #0x10\n"
+ "prfm pldl1keep, [x12, #0x80]\n"
".inst 0x4f82e0f1 // sdot v17.4s, v7.16b, v2.4b[0]\n"
"ldr q7, [x16, #0x30]\n"
".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n"
- "prfm pldl1keep, [x13, #0x80]\n"
+ "add x28, x28, #0x10\n"
".inst 0x4f81e0ce // sdot v14.4s, v6.16b, v1.4b[0]\n"
- "prfm pldl1keep, [x9, #0x80]\n"
+ "prfm pldl1keep, [x28, #0x80]\n"
".inst 0x4f82e0d2 // sdot v18.4s, v6.16b, v2.4b[0]\n"
"ldr q6, [x16, #0x40]\n"
".inst 0x4f80e0eb // sdot v11.4s, v7.16b, v0.4b[0]\n"
- "prfm pldl1keep, [x27, #0x80]\n"
+ "add x26, x26, #0x10\n"
".inst 0x4f81e0ef // sdot v15.4s, v7.16b, v1.4b[0]\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x4f82e0f3 // sdot v19.4s, v7.16b, v2.4b[0]\n"
"ldr q7, [x16, #0x50]\n"
".inst 0x4fa0e0c8 // sdot v8.4s, v6.16b, v0.4b[1]\n"
@@ -1151,18 +1151,18 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
".inst 0x4fa1e8ef // sdot v15.4s, v7.16b, v1.4b[3]\n"
".inst 0x4fa2e8f3 // sdot v19.4s, v7.16b, v2.4b[3]\n"
"87:" // Height 3: Multiply loop: Main loop skip
- "cbz x14, 92f\n"
- "cmp x14, #0x4\n"
+ "cbz x13, 92f\n"
+ "cmp x13, #0x4\n"
"blt 89f\n"
"88:" // Height 3: Multiply loop: Odd block loop
- "ldr s0, [x13], #0x4\n"
- "sub x14, x14, #0x4\n"
- "ldr s1, [x9], #0x4\n"
- "cmp x14, #0x4\n"
- "ldr s2, [x27], #0x4\n"
+ "ldr s0, [x12], #0x4\n"
+ "sub x13, x13, #0x4\n"
+ "ldr s1, [x28], #0x4\n"
+ "cmp x13, #0x4\n"
+ "ldr s2, [x26], #0x4\n"
"ldr q6, [x16, #0x0]\n"
- ".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
"ldr q7, [x16, #0x10]\n"
+ ".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n"
".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n"
"ldr q6, [x16, #0x20]\n"
@@ -1178,25 +1178,25 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
".inst 0x4f81e0ef // sdot v15.4s, v7.16b, v1.4b[0]\n"
".inst 0x4f82e0f3 // sdot v19.4s, v7.16b, v2.4b[0]\n"
"bge 88b\n"
+ "cbz x13, 92f\n"
"89:" // Height 3: Multiply loop: Skip odd blocks
- "cbz x14, 92f\n"
- "tbz x14, #1, 90f\n"
- "ldr h0, [x13], #0x2\n"
- "ldr h1, [x9], #0x2\n"
- "ldr h2, [x27], #0x2\n"
- "tbz x14, #0, 91f\n"
- "ld1 { v0.b }[2], [x13]\n"
- "ld1 { v1.b }[2], [x9]\n"
- "ld1 { v2.b }[2], [x27]\n"
+ "tbz x13, #1, 90f\n"
+ "ldr h0, [x12], #0x2\n"
+ "ldr h1, [x28], #0x2\n"
+ "ldr h2, [x26], #0x2\n"
+ "tbz x13, #0, 91f\n"
+ "ld1 { v0.b }[2], [x12]\n"
+ "ld1 { v1.b }[2], [x28]\n"
+ "ld1 { v2.b }[2], [x26]\n"
"b 91f\n"
"90:" // Height 3: Multiply loop: Ragged operand read: partial_1_0
- "ldr b0, [x13, #0x0]\n"
- "ldr b1, [x9, #0x0]\n"
- "ldr b2, [x27, #0x0]\n"
+ "ldr b0, [x12, #0x0]\n"
+ "ldr b1, [x28, #0x0]\n"
+ "ldr b2, [x26, #0x0]\n"
"91:" // Height 3: Multiply loop: Ragged operand read: Done
"ldr q6, [x16, #0x0]\n"
- ".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
"ldr q7, [x16, #0x10]\n"
+ ".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n"
".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n"
"ldr q6, [x16, #0x20]\n"
@@ -1212,246 +1212,246 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
".inst 0x4f81e0ef // sdot v15.4s, v7.16b, v1.4b[0]\n"
".inst 0x4f82e0f3 // sdot v19.4s, v7.16b, v2.4b[0]\n"
"92:" // Height 3: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x15, x15, #0x1\n"
- "cmp x15, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x14, x14, #0x1\n"
+ "cmp x14, x19\n"
"bne 82b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x17, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "cmp x8, #0x10\n"
- "prfm pstl1keep, [x17, #0x0]\n"
- "prfm pstl1keep, [x24, #0x0]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "prfm pstl1keep, [x15, #0x0]\n"
+ "cmp x17, #0x10\n"
+ "add x23, x15, x19, LSL #2\n"
"prfm pstl1keep, [x23, #0x0]\n"
+ "add x22, x23, x19, LSL #2\n"
+ "prfm pstl1keep, [x22, #0x0]\n"
"bge 101f\n"
- "tbz x8, #3, 96f\n"
- "st1 { v8.4s }, [x17], #0x10\n"
- "st1 { v9.4s }, [x17], #0x10\n"
- "st1 { v12.4s }, [x24], #0x10\n"
- "st1 { v13.4s }, [x24], #0x10\n"
- "st1 { v16.4s }, [x23], #0x10\n"
- "st1 { v17.4s }, [x23], #0x10\n"
- "tbz x8, #2, 94f\n"
- "st1 { v10.4s }, [x17], #0x10\n"
- "st1 { v14.4s }, [x24], #0x10\n"
- "st1 { v18.4s }, [x23], #0x10\n"
- "tbz x8, #1, 93f\n"
- "str d11, [x17], #0x8\n"
- "str d15, [x24], #0x8\n"
- "str d19, [x23], #0x8\n"
- "tbz x8, #0, 100f\n"
- "st1 { v11.s }[2], [x17]\n"
- "st1 { v15.s }[2], [x24]\n"
- "st1 { v19.s }[2], [x23]\n"
+ "tbz x17, #3, 96f\n"
+ "st1 { v8.4s }, [x15], #0x10\n"
+ "st1 { v9.4s }, [x15], #0x10\n"
+ "st1 { v12.4s }, [x23], #0x10\n"
+ "st1 { v13.4s }, [x23], #0x10\n"
+ "st1 { v16.4s }, [x22], #0x10\n"
+ "st1 { v17.4s }, [x22], #0x10\n"
+ "tbz x17, #2, 94f\n"
+ "st1 { v10.4s }, [x15], #0x10\n"
+ "st1 { v14.4s }, [x23], #0x10\n"
+ "st1 { v18.4s }, [x22], #0x10\n"
+ "tbz x17, #1, 93f\n"
+ "str d11, [x15], #0x8\n"
+ "str d15, [x23], #0x8\n"
+ "str d19, [x22], #0x8\n"
+ "tbz x17, #0, 100f\n"
+ "st1 { v11.s }[2], [x15]\n"
+ "st1 { v15.s }[2], [x23]\n"
+ "st1 { v19.s }[2], [x22]\n"
"b 100f\n"
"93:" // Height 3: Partial direct writeback: partial_1_12
- "tbz x8, #0, 100f\n"
- "str s11, [x17, #0x0]\n"
- "str s15, [x24, #0x0]\n"
- "str s19, [x23, #0x0]\n"
+ "tbz x17, #0, 100f\n"
+ "str s11, [x15, #0x0]\n"
+ "str s15, [x23, #0x0]\n"
+ "str s19, [x22, #0x0]\n"
"b 100f\n"
"94:" // Height 3: Partial direct writeback: partial_2_8
- "tbz x8, #1, 95f\n"
- "str d10, [x17], #0x8\n"
- "str d14, [x24], #0x8\n"
- "str d18, [x23], #0x8\n"
- "tbz x8, #0, 100f\n"
- "st1 { v10.s }[2], [x17]\n"
- "st1 { v14.s }[2], [x24]\n"
- "st1 { v18.s }[2], [x23]\n"
+ "tbz x17, #1, 95f\n"
+ "str d10, [x15], #0x8\n"
+ "str d14, [x23], #0x8\n"
+ "str d18, [x22], #0x8\n"
+ "tbz x17, #0, 100f\n"
+ "st1 { v10.s }[2], [x15]\n"
+ "st1 { v14.s }[2], [x23]\n"
+ "st1 { v18.s }[2], [x22]\n"
"b 100f\n"
"95:" // Height 3: Partial direct writeback: partial_1_8
- "tbz x8, #0, 100f\n"
- "str s10, [x17, #0x0]\n"
- "str s14, [x24, #0x0]\n"
- "str s18, [x23, #0x0]\n"
+ "tbz x17, #0, 100f\n"
+ "str s10, [x15, #0x0]\n"
+ "str s14, [x23, #0x0]\n"
+ "str s18, [x22, #0x0]\n"
"b 100f\n"
"96:" // Height 3: Partial direct writeback: partial_4_0
- "tbz x8, #2, 98f\n"
- "st1 { v8.4s }, [x17], #0x10\n"
- "st1 { v12.4s }, [x24], #0x10\n"
- "st1 { v16.4s }, [x23], #0x10\n"
- "tbz x8, #1, 97f\n"
- "str d9, [x17], #0x8\n"
- "str d13, [x24], #0x8\n"
- "str d17, [x23], #0x8\n"
- "tbz x8, #0, 100f\n"
- "st1 { v9.s }[2], [x17]\n"
- "st1 { v13.s }[2], [x24]\n"
- "st1 { v17.s }[2], [x23]\n"
+ "tbz x17, #2, 98f\n"
+ "st1 { v8.4s }, [x15], #0x10\n"
+ "st1 { v12.4s }, [x23], #0x10\n"
+ "st1 { v16.4s }, [x22], #0x10\n"
+ "tbz x17, #1, 97f\n"
+ "str d9, [x15], #0x8\n"
+ "str d13, [x23], #0x8\n"
+ "str d17, [x22], #0x8\n"
+ "tbz x17, #0, 100f\n"
+ "st1 { v9.s }[2], [x15]\n"
+ "st1 { v13.s }[2], [x23]\n"
+ "st1 { v17.s }[2], [x22]\n"
"b 100f\n"
"97:" // Height 3: Partial direct writeback: partial_1_4
- "tbz x8, #0, 100f\n"
- "str s9, [x17, #0x0]\n"
- "str s13, [x24, #0x0]\n"
- "str s17, [x23, #0x0]\n"
+ "tbz x17, #0, 100f\n"
+ "str s9, [x15, #0x0]\n"
+ "str s13, [x23, #0x0]\n"
+ "str s17, [x22, #0x0]\n"
"b 100f\n"
"98:" // Height 3: Partial direct writeback: partial_2_0
- "tbz x8, #1, 99f\n"
- "str d8, [x17], #0x8\n"
- "str d12, [x24], #0x8\n"
- "str d16, [x23], #0x8\n"
- "tbz x8, #0, 100f\n"
- "st1 { v8.s }[2], [x17]\n"
- "st1 { v12.s }[2], [x24]\n"
- "st1 { v16.s }[2], [x23]\n"
+ "tbz x17, #1, 99f\n"
+ "str d8, [x15], #0x8\n"
+ "str d12, [x23], #0x8\n"
+ "str d16, [x22], #0x8\n"
+ "tbz x17, #0, 100f\n"
+ "st1 { v8.s }[2], [x15]\n"
+ "st1 { v12.s }[2], [x23]\n"
+ "st1 { v16.s }[2], [x22]\n"
"b 100f\n"
"99:" // Height 3: Partial direct writeback: partial_1_0
- "str s8, [x17, #0x0]\n"
- "str s12, [x24, #0x0]\n"
- "str s16, [x23, #0x0]\n"
+ "str s8, [x15, #0x0]\n"
+ "str s12, [x23, #0x0]\n"
+ "str s16, [x22, #0x0]\n"
"100:" // Height 3: Partial direct writeback: Done
"b 102f\n"
"101:" // Height 3: Full writeback
- "str q8, [x17, #0x0]\n"
- "str q9, [x17, #0x10]\n"
- "str q10, [x17, #0x20]\n"
- "str q11, [x17, #0x30]\n"
- "add x17, x17, #0x40\n"
- "str q12, [x24, #0x0]\n"
- "str q13, [x24, #0x10]\n"
- "str q14, [x24, #0x20]\n"
- "str q15, [x24, #0x30]\n"
- "str q16, [x23, #0x0]\n"
- "str q17, [x23, #0x10]\n"
- "str q18, [x23, #0x20]\n"
- "str q19, [x23, #0x30]\n"
+ "str q8, [x15, #0x0]\n"
+ "str q9, [x15, #0x10]\n"
+ "str q10, [x15, #0x20]\n"
+ "str q11, [x15, #0x30]\n"
+ "add x15, x15, #0x40\n"
+ "str q12, [x23, #0x0]\n"
+ "str q13, [x23, #0x10]\n"
+ "str q14, [x23, #0x20]\n"
+ "str q15, [x23, #0x30]\n"
+ "str q16, [x22, #0x0]\n"
+ "str q17, [x22, #0x10]\n"
+ "str q18, [x22, #0x20]\n"
+ "str q19, [x22, #0x30]\n"
"102:" // Height 3: Writeback done
- "subs x8, x8, #0x10\n"
+ "subs x17, x17, #0x10\n"
"bgt 70b\n"
"b 206f\n"
"103:" // Height 4
- "ldr x8, [%x[args_ptr], %[offsetof_N]]\n"
- "mov x17, %x[output_ptr]\n"
+ "ldr x17, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x15, %x[output_ptr]\n"
"ldr x16, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"104:" // Height 4: Column loop
"tbz %x[flags], #0, 114f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x17, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "cmp x8, #0x10\n"
- "add x22, x23, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "cmp x17, #0x10\n"
+ "add x23, x15, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
"bge 113f\n"
- "tbz x8, #3, 108f\n"
- "ld1 { v8.4s }, [x17], #0x10\n"
- "ld1 { v12.4s }, [x24], #0x10\n"
- "ld1 { v16.4s }, [x23], #0x10\n"
- "ld1 { v20.4s }, [x22], #0x10\n"
- "ld1 { v9.4s }, [x17], #0x10\n"
- "ld1 { v13.4s }, [x24], #0x10\n"
- "ld1 { v17.4s }, [x23], #0x10\n"
- "ld1 { v21.4s }, [x22], #0x10\n"
- "tbz x8, #2, 106f\n"
- "ld1 { v10.4s }, [x17], #0x10\n"
- "ld1 { v14.4s }, [x24], #0x10\n"
- "ld1 { v18.4s }, [x23], #0x10\n"
- "ld1 { v22.4s }, [x22], #0x10\n"
- "tbz x8, #1, 105f\n"
- "ldr d11, [x17], #0x8\n"
- "mov x25, #0x38\n"
- "ldr d15, [x24], #0x8\n"
- "ldr d19, [x23], #0x8\n"
- "ldr d23, [x22], #0x8\n"
- "tbz x8, #0, 112f\n"
- "ld1 { v11.s }[2], [x17]\n"
- "ld1 { v15.s }[2], [x24]\n"
- "ld1 { v19.s }[2], [x23]\n"
- "ld1 { v23.s }[2], [x22]\n"
+ "tbz x17, #3, 108f\n"
+ "ld1 { v8.4s }, [x15], #0x10\n"
+ "ld1 { v12.4s }, [x23], #0x10\n"
+ "ld1 { v16.4s }, [x22], #0x10\n"
+ "ld1 { v9.4s }, [x15], #0x10\n"
+ "ld1 { v13.4s }, [x23], #0x10\n"
+ "ld1 { v17.4s }, [x22], #0x10\n"
+ "ld1 { v20.4s }, [x21], #0x10\n"
+ "ld1 { v21.4s }, [x21], #0x10\n"
+ "tbz x17, #2, 106f\n"
+ "ld1 { v10.4s }, [x15], #0x10\n"
+ "ld1 { v14.4s }, [x23], #0x10\n"
+ "ld1 { v18.4s }, [x22], #0x10\n"
+ "ld1 { v22.4s }, [x21], #0x10\n"
+ "tbz x17, #1, 105f\n"
+ "ldr d11, [x15], #0x8\n"
+ "mov x24, #0x38\n"
+ "ldr d15, [x23], #0x8\n"
+ "ldr d19, [x22], #0x8\n"
+ "ldr d23, [x21], #0x8\n"
+ "tbz x17, #0, 112f\n"
+ "ld1 { v11.s }[2], [x15]\n"
+ "ld1 { v15.s }[2], [x23]\n"
+ "ld1 { v19.s }[2], [x22]\n"
+ "ld1 { v23.s }[2], [x21]\n"
"b 112f\n"
"105:" // Height 4: Partial accumulate: partial_1_12
- "mov x25, #0x30\n"
- "tbz x8, #0, 112f\n"
- "ldr s11, [x17, #0x0]\n"
- "ldr s15, [x24, #0x0]\n"
- "ldr s19, [x23, #0x0]\n"
- "ldr s23, [x22, #0x0]\n"
+ "mov x24, #0x30\n"
+ "tbz x17, #0, 112f\n"
+ "ldr s11, [x15, #0x0]\n"
+ "ldr s15, [x23, #0x0]\n"
+ "ldr s19, [x22, #0x0]\n"
+ "ldr s23, [x21, #0x0]\n"
"b 112f\n"
"106:" // Height 4: Partial accumulate: partial_2_8
- "tbz x8, #1, 107f\n"
- "ldr d10, [x17], #0x8\n"
- "mov x25, #0x28\n"
- "ldr d14, [x24], #0x8\n"
- "ldr d18, [x23], #0x8\n"
- "ldr d22, [x22], #0x8\n"
- "tbz x8, #0, 112f\n"
- "ld1 { v10.s }[2], [x17]\n"
- "ld1 { v14.s }[2], [x24]\n"
- "ld1 { v18.s }[2], [x23]\n"
- "ld1 { v22.s }[2], [x22]\n"
+ "tbz x17, #1, 107f\n"
+ "ldr d10, [x15], #0x8\n"
+ "ldr d14, [x23], #0x8\n"
+ "mov x24, #0x28\n"
+ "ldr d18, [x22], #0x8\n"
+ "ldr d22, [x21], #0x8\n"
+ "tbz x17, #0, 112f\n"
+ "ld1 { v10.s }[2], [x15]\n"
+ "ld1 { v14.s }[2], [x23]\n"
+ "ld1 { v18.s }[2], [x22]\n"
+ "ld1 { v22.s }[2], [x21]\n"
"b 112f\n"
"107:" // Height 4: Partial accumulate: partial_1_8
- "mov x25, #0x20\n"
- "tbz x8, #0, 112f\n"
- "ldr s10, [x17, #0x0]\n"
- "ldr s14, [x24, #0x0]\n"
- "ldr s18, [x23, #0x0]\n"
- "ldr s22, [x22, #0x0]\n"
+ "mov x24, #0x20\n"
+ "tbz x17, #0, 112f\n"
+ "ldr s10, [x15, #0x0]\n"
+ "ldr s14, [x23, #0x0]\n"
+ "ldr s18, [x22, #0x0]\n"
+ "ldr s22, [x21, #0x0]\n"
"b 112f\n"
"108:" // Height 4: Partial accumulate: partial_4_0
- "tbz x8, #2, 110f\n"
- "ld1 { v8.4s }, [x17], #0x10\n"
- "ld1 { v12.4s }, [x24], #0x10\n"
- "ld1 { v16.4s }, [x23], #0x10\n"
- "ld1 { v20.4s }, [x22], #0x10\n"
- "tbz x8, #1, 109f\n"
- "ldr d9, [x17], #0x8\n"
- "mov x25, #0x18\n"
- "ldr d13, [x24], #0x8\n"
- "ldr d17, [x23], #0x8\n"
- "ldr d21, [x22], #0x8\n"
- "tbz x8, #0, 112f\n"
- "ld1 { v9.s }[2], [x17]\n"
- "ld1 { v13.s }[2], [x24]\n"
- "ld1 { v17.s }[2], [x23]\n"
- "ld1 { v21.s }[2], [x22]\n"
+ "tbz x17, #2, 110f\n"
+ "ld1 { v8.4s }, [x15], #0x10\n"
+ "ld1 { v12.4s }, [x23], #0x10\n"
+ "ld1 { v16.4s }, [x22], #0x10\n"
+ "ld1 { v20.4s }, [x21], #0x10\n"
+ "tbz x17, #1, 109f\n"
+ "ldr d9, [x15], #0x8\n"
+ "mov x24, #0x18\n"
+ "ldr d13, [x23], #0x8\n"
+ "ldr d17, [x22], #0x8\n"
+ "ldr d21, [x21], #0x8\n"
+ "tbz x17, #0, 112f\n"
+ "ld1 { v9.s }[2], [x15]\n"
+ "ld1 { v13.s }[2], [x23]\n"
+ "ld1 { v17.s }[2], [x22]\n"
+ "ld1 { v21.s }[2], [x21]\n"
"b 112f\n"
"109:" // Height 4: Partial accumulate: partial_1_4
- "mov x25, #0x10\n"
- "tbz x8, #0, 112f\n"
- "ldr s9, [x17, #0x0]\n"
- "ldr s13, [x24, #0x0]\n"
- "ldr s17, [x23, #0x0]\n"
- "ldr s21, [x22, #0x0]\n"
+ "mov x24, #0x10\n"
+ "tbz x17, #0, 112f\n"
+ "ldr s9, [x15, #0x0]\n"
+ "ldr s13, [x23, #0x0]\n"
+ "ldr s17, [x22, #0x0]\n"
+ "ldr s21, [x21, #0x0]\n"
"b 112f\n"
"110:" // Height 4: Partial accumulate: partial_2_0
- "tbz x8, #1, 111f\n"
- "ldr d8, [x17], #0x8\n"
- "mov x25, #0x8\n"
- "ldr d12, [x24], #0x8\n"
- "ldr d16, [x23], #0x8\n"
- "ldr d20, [x22], #0x8\n"
- "tbz x8, #0, 112f\n"
- "ld1 { v8.s }[2], [x17]\n"
- "ld1 { v12.s }[2], [x24]\n"
- "ld1 { v16.s }[2], [x23]\n"
- "ld1 { v20.s }[2], [x22]\n"
+ "tbz x17, #1, 111f\n"
+ "ldr d8, [x15], #0x8\n"
+ "ldr d12, [x23], #0x8\n"
+ "mov x24, #0x8\n"
+ "ldr d16, [x22], #0x8\n"
+ "ldr d20, [x21], #0x8\n"
+ "tbz x17, #0, 112f\n"
+ "ld1 { v8.s }[2], [x15]\n"
+ "ld1 { v12.s }[2], [x23]\n"
+ "ld1 { v16.s }[2], [x22]\n"
+ "ld1 { v20.s }[2], [x21]\n"
"b 112f\n"
"111:" // Height 4: Partial accumulate: partial_1_0
- "ldr s8, [x17, #0x0]\n"
- "mov x25, #0x0\n"
- "ldr s12, [x24, #0x0]\n"
- "ldr s16, [x23, #0x0]\n"
- "ldr s20, [x22, #0x0]\n"
+ "ldr s8, [x15, #0x0]\n"
+ "mov x24, #0x0\n"
+ "ldr s12, [x23, #0x0]\n"
+ "ldr s16, [x22, #0x0]\n"
+ "ldr s20, [x21, #0x0]\n"
"112:" // Height 4: Partial accumulate: Done
- "sub x17, x17, x25\n"
+ "sub x15, x15, x24\n"
"b 115f\n"
"113:" // Height 4: full accumulate
- "ldr q8, [x17, #0x0]\n"
- "ldr q9, [x17, #0x10]\n"
- "ldr q10, [x17, #0x20]\n"
- "ldr q11, [x17, #0x30]\n"
- "ldr q12, [x24, #0x0]\n"
- "ldr q13, [x24, #0x10]\n"
- "ldr q14, [x24, #0x20]\n"
- "ldr q15, [x24, #0x30]\n"
- "ldr q16, [x23, #0x0]\n"
- "ldr q17, [x23, #0x10]\n"
- "ldr q18, [x23, #0x20]\n"
- "ldr q19, [x23, #0x30]\n"
- "ldr q20, [x22, #0x0]\n"
- "ldr q21, [x22, #0x10]\n"
- "ldr q22, [x22, #0x20]\n"
- "ldr q23, [x22, #0x30]\n"
+ "ldr q8, [x15, #0x0]\n"
+ "ldr q9, [x15, #0x10]\n"
+ "ldr q10, [x15, #0x20]\n"
+ "ldr q11, [x15, #0x30]\n"
+ "ldr q12, [x23, #0x0]\n"
+ "ldr q13, [x23, #0x10]\n"
+ "ldr q14, [x23, #0x20]\n"
+ "ldr q15, [x23, #0x30]\n"
+ "ldr q16, [x22, #0x0]\n"
+ "ldr q17, [x22, #0x10]\n"
+ "ldr q18, [x22, #0x20]\n"
+ "ldr q19, [x22, #0x30]\n"
+ "ldr q20, [x21, #0x0]\n"
+ "ldr q21, [x21, #0x10]\n"
+ "ldr q22, [x21, #0x20]\n"
+ "ldr q23, [x21, #0x30]\n"
"b 115f\n"
"114:" // Height 4: no accumulate
"movi v8.4s, #0x0\n"
@@ -1471,204 +1471,204 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
"movi v22.4s, #0x0\n"
"movi v23.4s, #0x0\n"
"115:" // Height 4: setup done
- "mov x15, #0x0\n"
+ "mov x14, #0x0\n"
"116:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w14, [x20, x15, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w13, [x20, x14, LSL #0x2]\n"
"tbz %x[flags], #3, 117f\n"
- "ldr x21, [%x[input_ptr], x15, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x13, [x21, #0x0]\n"
- "ldr x9, [x21, #0x8]\n"
- "ldr x27, [x21, #0x10]\n"
- "ldr x25, [x21, #0x18]\n"
- "cbnz x15, 118f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x13, x13, x20\n"
- "add x9, x9, x20\n"
- "add x27, x27, x20\n"
- "add x25, x25, x20\n"
+ "ldr x20, [%x[input_ptr], x14, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x12, [x20, #0x0]\n"
+ "ldr x28, [x20, #0x8]\n"
+ "ldr x26, [x20, #0x10]\n"
+ "ldr x24, [x20, #0x18]\n"
+ "cbnz x14, 118f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x12, x12, x19\n"
+ "add x28, x28, x19\n"
+ "add x26, x26, x19\n"
+ "add x24, x24, x19\n"
"b 118f\n"
"117:" // Height 4: setup direct input
- "mov x13, %x[input_ptr]\n"
- "add x9, x13, x20\n"
- "add x27, x9, x20\n"
- "add x25, x27, x20\n"
+ "mov x12, %x[input_ptr]\n"
+ "add x28, x12, x19\n"
+ "add x26, x28, x19\n"
+ "add x24, x26, x19\n"
"118:" // Height 4: input setup done
- "cmp x14, #0x10\n"
+ "cmp x13, #0x10\n"
"blt 121f\n"
- "ldr q0, [x13, #0x0]\n"
- "cmp x14, #0x20\n"
- "ldr q1, [x9, #0x0]\n"
- "ldr q2, [x27, #0x0]\n"
- "ldr q3, [x25, #0x0]\n"
+ "ldr q0, [x12, #0x0]\n"
+ "ldr q1, [x28, #0x0]\n"
+ "cmp x13, #0x20\n"
+ "ldr q2, [x26, #0x0]\n"
+ "ldr q3, [x24, #0x0]\n"
"ldr q6, [x16, #0x0]\n"
- "ldr q7, [x16, #0x10]\n"
"blt 120f\n"
"119:" // Height 4: Multiply loop: Main loop head
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
- "ldr x12, [x16, #0x28]\n"
+ "ldr d7, [x16, #0x10]\n"
".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n"
- "ldr x11, [x16, #0x38]\n"
+ "ldr x11, [x16, #0x18]\n"
".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n"
- "add x13, x13, #0x10\n"
+ "ldr x10, [x16, #0x28]\n"
".inst 0x4f83e0d4 // sdot v20.4s, v6.16b, v3.4b[0]\n"
"ldr d6, [x16, #0x20]\n"
+ "mov v7.d[1], x11\n"
+ "ldr x11, [x16, #0x38]\n"
+ "add x12, x12, #0x10\n"
+ "add x28, x28, #0x10\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
- "mov v6.d[1], x12\n"
+ "mov v6.d[1], x10\n"
".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n"
- "ldr x12, [x16, #0x48]\n"
+ "prfm pldl1keep, [x12, #0x80]\n"
".inst 0x4f82e0f1 // sdot v17.4s, v7.16b, v2.4b[0]\n"
- "add x9, x9, #0x10\n"
+ "prfm pldl1keep, [x28, #0x80]\n"
".inst 0x4f83e0f5 // sdot v21.4s, v7.16b, v3.4b[0]\n"
"ldr d7, [x16, #0x30]\n"
- "mov v7.d[1], x11\n"
".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n"
+ "ldr x10, [x16, #0x48]\n"
".inst 0x4f81e0ce // sdot v14.4s, v6.16b, v1.4b[0]\n"
- "ldr x11, [x16, #0x58]\n"
+ "ldr x9, [x12, #0x8]\n"
".inst 0x4f82e0d2 // sdot v18.4s, v6.16b, v2.4b[0]\n"
- "add x27, x27, #0x10\n"
+ "mov v7.d[1], x11\n"
".inst 0x4f83e0d6 // sdot v22.4s, v6.16b, v3.4b[0]\n"
"ldr d6, [x16, #0x40]\n"
".inst 0x4f80e0eb // sdot v11.4s, v7.16b, v0.4b[0]\n"
- "mov v6.d[1], x12\n"
+ "ldr x11, [x16, #0x58]\n"
".inst 0x4f81e0ef // sdot v15.4s, v7.16b, v1.4b[0]\n"
- "ldr x12, [x16, #0x68]\n"
+ "ldr x27, [x28, #0x8]\n"
".inst 0x4f82e0f3 // sdot v19.4s, v7.16b, v2.4b[0]\n"
- "add x25, x25, #0x10\n"
+ "mov v6.d[1], x10\n"
".inst 0x4f83e0f7 // sdot v23.4s, v7.16b, v3.4b[0]\n"
"ldr d7, [x16, #0x50]\n"
- "mov v7.d[1], x11\n"
".inst 0x4fa0e0c8 // sdot v8.4s, v6.16b, v0.4b[1]\n"
+ "ldr x10, [x16, #0x68]\n"
".inst 0x4fa1e0cc // sdot v12.4s, v6.16b, v1.4b[1]\n"
- "ldr x11, [x16, #0x78]\n"
+ "add x26, x26, #0x10\n"
".inst 0x4fa2e0d0 // sdot v16.4s, v6.16b, v2.4b[1]\n"
- "ldr x10, [x13, #0x8]\n"
+ "mov v7.d[1], x11\n"
".inst 0x4fa3e0d4 // sdot v20.4s, v6.16b, v3.4b[1]\n"
- "ldr d6, [x16, #0x60]\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x4fa0e0e9 // sdot v9.4s, v7.16b, v0.4b[1]\n"
- "mov v6.d[1], x12\n"
+ "ldr d6, [x16, #0x60]\n"
".inst 0x4fa1e0ed // sdot v13.4s, v7.16b, v1.4b[1]\n"
- "ldr x12, [x16, #0x88]\n"
+ "ldr x11, [x16, #0x78]\n"
".inst 0x4fa2e0f1 // sdot v17.4s, v7.16b, v2.4b[1]\n"
- "ldr x28, [x9, #0x8]\n"
+ "ldr x25, [x26, #0x8]\n"
".inst 0x4fa3e0f5 // sdot v21.4s, v7.16b, v3.4b[1]\n"
+ "mov v6.d[1], x10\n"
"ldr d7, [x16, #0x70]\n"
- "mov v7.d[1], x11\n"
+ "add x24, x24, #0x10\n"
".inst 0x4fa0e0ca // sdot v10.4s, v6.16b, v0.4b[1]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x4fa1e0ce // sdot v14.4s, v6.16b, v1.4b[1]\n"
- "ldr x11, [x16, #0x98]\n"
+ "ldr x10, [x16, #0x88]\n"
".inst 0x4fa2e0d2 // sdot v18.4s, v6.16b, v2.4b[1]\n"
- "ldr x26, [x27, #0x8]\n"
+ "mov v7.d[1], x11\n"
".inst 0x4fa3e0d6 // sdot v22.4s, v6.16b, v3.4b[1]\n"
"ldr d6, [x16, #0x80]\n"
".inst 0x4fa0e0eb // sdot v11.4s, v7.16b, v0.4b[1]\n"
- "mov v6.d[1], x12\n"
+ "ldr x11, [x16, #0x98]\n"
".inst 0x4fa1e0ef // sdot v15.4s, v7.16b, v1.4b[1]\n"
- "ldr x12, [x16, #0xa8]\n"
+ "ldr x23, [x24, #0x8]\n"
".inst 0x4fa2e0f3 // sdot v19.4s, v7.16b, v2.4b[1]\n"
- "ldr x24, [x25, #0x8]\n"
+ "mov v6.d[1], x10\n"
".inst 0x4fa3e0f7 // sdot v23.4s, v7.16b, v3.4b[1]\n"
"ldr d7, [x16, #0x90]\n"
- "mov v7.d[1], x11\n"
".inst 0x4f80e8c8 // sdot v8.4s, v6.16b, v0.4b[2]\n"
+ "ldr x10, [x16, #0xa8]\n"
".inst 0x4f81e8cc // sdot v12.4s, v6.16b, v1.4b[2]\n"
- "ldr x11, [x16, #0xb8]\n"
+ "sub x13, x13, #0x10\n"
".inst 0x4f82e8d0 // sdot v16.4s, v6.16b, v2.4b[2]\n"
- "sub x14, x14, #0x10\n"
+ "mov v7.d[1], x11\n"
".inst 0x4f83e8d4 // sdot v20.4s, v6.16b, v3.4b[2]\n"
"ldr d6, [x16, #0xa0]\n"
".inst 0x4f80e8e9 // sdot v9.4s, v7.16b, v0.4b[2]\n"
- "mov v6.d[1], x12\n"
+ "ldr x11, [x16, #0xb8]\n"
".inst 0x4f81e8ed // sdot v13.4s, v7.16b, v1.4b[2]\n"
- "ldr x12, [x16, #0xc8]\n"
+ "cmp x13, #0x20\n"
".inst 0x4f82e8f1 // sdot v17.4s, v7.16b, v2.4b[2]\n"
- "cmp x14, #0x20\n"
+ "mov v6.d[1], x10\n"
".inst 0x4f83e8f5 // sdot v21.4s, v7.16b, v3.4b[2]\n"
"ldr d7, [x16, #0xb0]\n"
- "mov v7.d[1], x11\n"
".inst 0x4f80e8ca // sdot v10.4s, v6.16b, v0.4b[2]\n"
+ "ldr x10, [x16, #0xc8]\n"
".inst 0x4f81e8ce // sdot v14.4s, v6.16b, v1.4b[2]\n"
- "ldr x11, [x16, #0xd8]\n"
".inst 0x4f82e8d2 // sdot v18.4s, v6.16b, v2.4b[2]\n"
- "prfm pldl1keep, [x13, #0x80]\n"
+ "mov v7.d[1], x11\n"
".inst 0x4f83e8d6 // sdot v22.4s, v6.16b, v3.4b[2]\n"
"ldr d6, [x16, #0xc0]\n"
".inst 0x4f80e8eb // sdot v11.4s, v7.16b, v0.4b[2]\n"
- "mov v6.d[1], x12\n"
+ "ldr x11, [x16, #0xd8]\n"
".inst 0x4f81e8ef // sdot v15.4s, v7.16b, v1.4b[2]\n"
- "ldr x12, [x16, #0xe8]\n"
".inst 0x4f82e8f3 // sdot v19.4s, v7.16b, v2.4b[2]\n"
- "prfm pldl1keep, [x9, #0x80]\n"
+ "mov v6.d[1], x10\n"
".inst 0x4f83e8f7 // sdot v23.4s, v7.16b, v3.4b[2]\n"
"ldr d7, [x16, #0xd0]\n"
- "mov v7.d[1], x11\n"
".inst 0x4fa0e8c8 // sdot v8.4s, v6.16b, v0.4b[3]\n"
+ "ldr x10, [x16, #0xe8]\n"
".inst 0x4fa1e8cc // sdot v12.4s, v6.16b, v1.4b[3]\n"
- "ldr x11, [x16, #0xf8]\n"
".inst 0x4fa2e8d0 // sdot v16.4s, v6.16b, v2.4b[3]\n"
- "prfm pldl1keep, [x27, #0x80]\n"
+ "mov v7.d[1], x11\n"
".inst 0x4fa3e8d4 // sdot v20.4s, v6.16b, v3.4b[3]\n"
"ldr d6, [x16, #0xe0]\n"
".inst 0x4fa0e8e9 // sdot v9.4s, v7.16b, v0.4b[3]\n"
- "mov v6.d[1], x12\n"
+ "ldr x11, [x16, #0xf8]\n"
".inst 0x4fa1e8ed // sdot v13.4s, v7.16b, v1.4b[3]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x4fa2e8f1 // sdot v17.4s, v7.16b, v2.4b[3]\n"
+ "mov v6.d[1], x10\n"
".inst 0x4fa3e8f5 // sdot v21.4s, v7.16b, v3.4b[3]\n"
"ldr d7, [x16, #0xf0]\n"
- "mov v7.d[1], x11\n"
- "add x16, x16, #0x100\n"
".inst 0x4fa0e8ca // sdot v10.4s, v6.16b, v0.4b[3]\n"
- "ldr x12, [x16, #0x8]\n"
+ "add x16, x16, #0x100\n"
".inst 0x4fa1e8ce // sdot v14.4s, v6.16b, v1.4b[3]\n"
- "ldr x11, [x16, #0x18]\n"
+ "ldr x10, [x16, #0x8]\n"
".inst 0x4fa2e8d2 // sdot v18.4s, v6.16b, v2.4b[3]\n"
+ "mov v7.d[1], x11\n"
".inst 0x4fa3e8d6 // sdot v22.4s, v6.16b, v3.4b[3]\n"
"ldr d6, [x16, #0x0]\n"
".inst 0x4fa0e8eb // sdot v11.4s, v7.16b, v0.4b[3]\n"
- "ldr d0, [x13, #0x0]\n"
+ "ldr d0, [x12, #0x0]\n"
".inst 0x4fa1e8ef // sdot v15.4s, v7.16b, v1.4b[3]\n"
- "ldr d1, [x9, #0x0]\n"
+ "ldr d1, [x28, #0x0]\n"
".inst 0x4fa2e8f3 // sdot v19.4s, v7.16b, v2.4b[3]\n"
- "ldr d2, [x27, #0x0]\n"
+ "mov v6.d[1], x10\n"
".inst 0x4fa3e8f7 // sdot v23.4s, v7.16b, v3.4b[3]\n"
- "ldr d3, [x25, #0x0]\n"
- "ldr d7, [x16, #0x10]\n"
- "mov v6.d[1], x12\n"
- "mov v0.d[1], x10\n"
- "mov v1.d[1], x28\n"
- "mov v2.d[1], x26\n"
- "mov v3.d[1], x24\n"
- "mov v7.d[1], x11\n"
+ "mov v0.d[1], x9\n"
+ "mov v1.d[1], x27\n"
+ "ldr d2, [x26, #0x0]\n"
+ "ldr d3, [x24, #0x0]\n"
+ "mov v2.d[1], x25\n"
+ "mov v3.d[1], x23\n"
"bge 119b\n"
"120:" // Height 4: Multiply loop: Single iteration only
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
- "add x13, x13, #0x10\n"
+ "ldr q7, [x16, #0x10]\n"
".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n"
- "add x9, x9, #0x10\n"
+ "sub x13, x13, #0x10\n"
".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n"
- "add x27, x27, #0x10\n"
+ "add x12, x12, #0x10\n"
".inst 0x4f83e0d4 // sdot v20.4s, v6.16b, v3.4b[0]\n"
- "ldr q6, [x16, #0x20]\n"
+ "prfm pldl1keep, [x12, #0x80]\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
- "add x25, x25, #0x10\n"
+ "ldr q6, [x16, #0x20]\n"
".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n"
- "sub x14, x14, #0x10\n"
+ "add x28, x28, #0x10\n"
".inst 0x4f82e0f1 // sdot v17.4s, v7.16b, v2.4b[0]\n"
- "prfm pldl1keep, [x13, #0x80]\n"
+ "prfm pldl1keep, [x28, #0x80]\n"
".inst 0x4f83e0f5 // sdot v21.4s, v7.16b, v3.4b[0]\n"
"ldr q7, [x16, #0x30]\n"
".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n"
- "prfm pldl1keep, [x9, #0x80]\n"
+ "add x26, x26, #0x10\n"
".inst 0x4f81e0ce // sdot v14.4s, v6.16b, v1.4b[0]\n"
- "prfm pldl1keep, [x27, #0x80]\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x4f82e0d2 // sdot v18.4s, v6.16b, v2.4b[0]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
+ "add x24, x24, #0x10\n"
".inst 0x4f83e0d6 // sdot v22.4s, v6.16b, v3.4b[0]\n"
- "ldr q6, [x16, #0x40]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x4f80e0eb // sdot v11.4s, v7.16b, v0.4b[0]\n"
+ "ldr q6, [x16, #0x40]\n"
".inst 0x4f81e0ef // sdot v15.4s, v7.16b, v1.4b[0]\n"
".inst 0x4f82e0f3 // sdot v19.4s, v7.16b, v2.4b[0]\n"
".inst 0x4f83e0f7 // sdot v23.4s, v7.16b, v3.4b[0]\n"
@@ -1733,19 +1733,19 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
".inst 0x4fa2e8f3 // sdot v19.4s, v7.16b, v2.4b[3]\n"
".inst 0x4fa3e8f7 // sdot v23.4s, v7.16b, v3.4b[3]\n"
"121:" // Height 4: Multiply loop: Main loop skip
- "cbz x14, 126f\n"
- "cmp x14, #0x4\n"
+ "cbz x13, 126f\n"
+ "cmp x13, #0x4\n"
"blt 123f\n"
"122:" // Height 4: Multiply loop: Odd block loop
- "ldr s0, [x13], #0x4\n"
- "sub x14, x14, #0x4\n"
- "ldr s1, [x9], #0x4\n"
- "cmp x14, #0x4\n"
- "ldr s2, [x27], #0x4\n"
- "ldr s3, [x25], #0x4\n"
+ "ldr s0, [x12], #0x4\n"
+ "sub x13, x13, #0x4\n"
+ "ldr s1, [x28], #0x4\n"
+ "cmp x13, #0x4\n"
+ "ldr s2, [x26], #0x4\n"
+ "ldr s3, [x24], #0x4\n"
"ldr q6, [x16, #0x0]\n"
- ".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
"ldr q7, [x16, #0x10]\n"
+ ".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n"
".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n"
".inst 0x4f83e0d4 // sdot v20.4s, v6.16b, v3.4b[0]\n"
@@ -1765,28 +1765,28 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
".inst 0x4f82e0f3 // sdot v19.4s, v7.16b, v2.4b[0]\n"
".inst 0x4f83e0f7 // sdot v23.4s, v7.16b, v3.4b[0]\n"
"bge 122b\n"
+ "cbz x13, 126f\n"
"123:" // Height 4: Multiply loop: Skip odd blocks
- "cbz x14, 126f\n"
- "tbz x14, #1, 124f\n"
- "ldr h0, [x13], #0x2\n"
- "ldr h1, [x9], #0x2\n"
- "ldr h2, [x27], #0x2\n"
- "ldr h3, [x25], #0x2\n"
- "tbz x14, #0, 125f\n"
- "ld1 { v0.b }[2], [x13]\n"
- "ld1 { v1.b }[2], [x9]\n"
- "ld1 { v2.b }[2], [x27]\n"
- "ld1 { v3.b }[2], [x25]\n"
+ "tbz x13, #1, 124f\n"
+ "ldr h0, [x12], #0x2\n"
+ "ldr h1, [x28], #0x2\n"
+ "ldr h2, [x26], #0x2\n"
+ "ldr h3, [x24], #0x2\n"
+ "tbz x13, #0, 125f\n"
+ "ld1 { v0.b }[2], [x12]\n"
+ "ld1 { v1.b }[2], [x28]\n"
+ "ld1 { v2.b }[2], [x26]\n"
+ "ld1 { v3.b }[2], [x24]\n"
"b 125f\n"
"124:" // Height 4: Multiply loop: Ragged operand read: partial_1_0
- "ldr b0, [x13, #0x0]\n"
- "ldr b1, [x9, #0x0]\n"
- "ldr b2, [x27, #0x0]\n"
- "ldr b3, [x25, #0x0]\n"
+ "ldr b0, [x12, #0x0]\n"
+ "ldr b1, [x28, #0x0]\n"
+ "ldr b2, [x26, #0x0]\n"
+ "ldr b3, [x24, #0x0]\n"
"125:" // Height 4: Multiply loop: Ragged operand read: Done
"ldr q6, [x16, #0x0]\n"
- ".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
"ldr q7, [x16, #0x10]\n"
+ ".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n"
".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n"
".inst 0x4f83e0d4 // sdot v20.4s, v6.16b, v3.4b[0]\n"
@@ -1806,289 +1806,289 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
".inst 0x4f82e0f3 // sdot v19.4s, v7.16b, v2.4b[0]\n"
".inst 0x4f83e0f7 // sdot v23.4s, v7.16b, v3.4b[0]\n"
"126:" // Height 4: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x15, x15, #0x1\n"
- "cmp x15, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x14, x14, #0x1\n"
+ "cmp x14, x19\n"
"bne 116b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x17, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
- "cmp x8, #0x10\n"
- "prfm pstl1keep, [x17, #0x0]\n"
- "prfm pstl1keep, [x24, #0x0]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "prfm pstl1keep, [x15, #0x0]\n"
+ "cmp x17, #0x10\n"
+ "add x23, x15, x19, LSL #2\n"
"prfm pstl1keep, [x23, #0x0]\n"
+ "add x22, x23, x19, LSL #2\n"
"prfm pstl1keep, [x22, #0x0]\n"
+ "add x21, x22, x19, LSL #2\n"
+ "prfm pstl1keep, [x21, #0x0]\n"
"bge 135f\n"
- "tbz x8, #3, 130f\n"
- "st1 { v8.4s }, [x17], #0x10\n"
- "st1 { v9.4s }, [x17], #0x10\n"
- "st1 { v12.4s }, [x24], #0x10\n"
- "st1 { v13.4s }, [x24], #0x10\n"
- "st1 { v16.4s }, [x23], #0x10\n"
- "st1 { v17.4s }, [x23], #0x10\n"
- "st1 { v20.4s }, [x22], #0x10\n"
- "st1 { v21.4s }, [x22], #0x10\n"
- "tbz x8, #2, 128f\n"
- "st1 { v10.4s }, [x17], #0x10\n"
- "st1 { v14.4s }, [x24], #0x10\n"
- "st1 { v18.4s }, [x23], #0x10\n"
- "st1 { v22.4s }, [x22], #0x10\n"
- "tbz x8, #1, 127f\n"
- "str d11, [x17], #0x8\n"
- "str d15, [x24], #0x8\n"
- "str d19, [x23], #0x8\n"
- "str d23, [x22], #0x8\n"
- "tbz x8, #0, 134f\n"
- "st1 { v11.s }[2], [x17]\n"
- "st1 { v15.s }[2], [x24]\n"
- "st1 { v19.s }[2], [x23]\n"
- "st1 { v23.s }[2], [x22]\n"
+ "tbz x17, #3, 130f\n"
+ "st1 { v8.4s }, [x15], #0x10\n"
+ "st1 { v9.4s }, [x15], #0x10\n"
+ "st1 { v12.4s }, [x23], #0x10\n"
+ "st1 { v13.4s }, [x23], #0x10\n"
+ "st1 { v16.4s }, [x22], #0x10\n"
+ "st1 { v17.4s }, [x22], #0x10\n"
+ "st1 { v20.4s }, [x21], #0x10\n"
+ "st1 { v21.4s }, [x21], #0x10\n"
+ "tbz x17, #2, 128f\n"
+ "st1 { v10.4s }, [x15], #0x10\n"
+ "st1 { v14.4s }, [x23], #0x10\n"
+ "st1 { v18.4s }, [x22], #0x10\n"
+ "st1 { v22.4s }, [x21], #0x10\n"
+ "tbz x17, #1, 127f\n"
+ "str d11, [x15], #0x8\n"
+ "str d15, [x23], #0x8\n"
+ "str d19, [x22], #0x8\n"
+ "str d23, [x21], #0x8\n"
+ "tbz x17, #0, 134f\n"
+ "st1 { v11.s }[2], [x15]\n"
+ "st1 { v15.s }[2], [x23]\n"
+ "st1 { v19.s }[2], [x22]\n"
+ "st1 { v23.s }[2], [x21]\n"
"b 134f\n"
"127:" // Height 4: Partial direct writeback: partial_1_12
- "tbz x8, #0, 134f\n"
- "str s11, [x17, #0x0]\n"
- "str s15, [x24, #0x0]\n"
- "str s19, [x23, #0x0]\n"
- "str s23, [x22, #0x0]\n"
+ "tbz x17, #0, 134f\n"
+ "str s11, [x15, #0x0]\n"
+ "str s15, [x23, #0x0]\n"
+ "str s19, [x22, #0x0]\n"
+ "str s23, [x21, #0x0]\n"
"b 134f\n"
"128:" // Height 4: Partial direct writeback: partial_2_8
- "tbz x8, #1, 129f\n"
- "str d10, [x17], #0x8\n"
- "str d14, [x24], #0x8\n"
- "str d18, [x23], #0x8\n"
- "str d22, [x22], #0x8\n"
- "tbz x8, #0, 134f\n"
- "st1 { v10.s }[2], [x17]\n"
- "st1 { v14.s }[2], [x24]\n"
- "st1 { v18.s }[2], [x23]\n"
- "st1 { v22.s }[2], [x22]\n"
+ "tbz x17, #1, 129f\n"
+ "str d10, [x15], #0x8\n"
+ "str d14, [x23], #0x8\n"
+ "str d18, [x22], #0x8\n"
+ "str d22, [x21], #0x8\n"
+ "tbz x17, #0, 134f\n"
+ "st1 { v10.s }[2], [x15]\n"
+ "st1 { v14.s }[2], [x23]\n"
+ "st1 { v18.s }[2], [x22]\n"
+ "st1 { v22.s }[2], [x21]\n"
"b 134f\n"
"129:" // Height 4: Partial direct writeback: partial_1_8
- "tbz x8, #0, 134f\n"
- "str s10, [x17, #0x0]\n"
- "str s14, [x24, #0x0]\n"
- "str s18, [x23, #0x0]\n"
- "str s22, [x22, #0x0]\n"
+ "tbz x17, #0, 134f\n"
+ "str s10, [x15, #0x0]\n"
+ "str s14, [x23, #0x0]\n"
+ "str s18, [x22, #0x0]\n"
+ "str s22, [x21, #0x0]\n"
"b 134f\n"
"130:" // Height 4: Partial direct writeback: partial_4_0
- "tbz x8, #2, 132f\n"
- "st1 { v8.4s }, [x17], #0x10\n"
- "st1 { v12.4s }, [x24], #0x10\n"
- "st1 { v16.4s }, [x23], #0x10\n"
- "st1 { v20.4s }, [x22], #0x10\n"
- "tbz x8, #1, 131f\n"
- "str d9, [x17], #0x8\n"
- "str d13, [x24], #0x8\n"
- "str d17, [x23], #0x8\n"
- "str d21, [x22], #0x8\n"
- "tbz x8, #0, 134f\n"
- "st1 { v9.s }[2], [x17]\n"
- "st1 { v13.s }[2], [x24]\n"
- "st1 { v17.s }[2], [x23]\n"
- "st1 { v21.s }[2], [x22]\n"
+ "tbz x17, #2, 132f\n"
+ "st1 { v8.4s }, [x15], #0x10\n"
+ "st1 { v12.4s }, [x23], #0x10\n"
+ "st1 { v16.4s }, [x22], #0x10\n"
+ "st1 { v20.4s }, [x21], #0x10\n"
+ "tbz x17, #1, 131f\n"
+ "str d9, [x15], #0x8\n"
+ "str d13, [x23], #0x8\n"
+ "str d17, [x22], #0x8\n"
+ "str d21, [x21], #0x8\n"
+ "tbz x17, #0, 134f\n"
+ "st1 { v9.s }[2], [x15]\n"
+ "st1 { v13.s }[2], [x23]\n"
+ "st1 { v17.s }[2], [x22]\n"
+ "st1 { v21.s }[2], [x21]\n"
"b 134f\n"
"131:" // Height 4: Partial direct writeback: partial_1_4
- "tbz x8, #0, 134f\n"
- "str s9, [x17, #0x0]\n"
- "str s13, [x24, #0x0]\n"
- "str s17, [x23, #0x0]\n"
- "str s21, [x22, #0x0]\n"
+ "tbz x17, #0, 134f\n"
+ "str s9, [x15, #0x0]\n"
+ "str s13, [x23, #0x0]\n"
+ "str s17, [x22, #0x0]\n"
+ "str s21, [x21, #0x0]\n"
"b 134f\n"
"132:" // Height 4: Partial direct writeback: partial_2_0
- "tbz x8, #1, 133f\n"
- "str d8, [x17], #0x8\n"
- "str d12, [x24], #0x8\n"
- "str d16, [x23], #0x8\n"
- "str d20, [x22], #0x8\n"
- "tbz x8, #0, 134f\n"
- "st1 { v8.s }[2], [x17]\n"
- "st1 { v12.s }[2], [x24]\n"
- "st1 { v16.s }[2], [x23]\n"
- "st1 { v20.s }[2], [x22]\n"
+ "tbz x17, #1, 133f\n"
+ "str d8, [x15], #0x8\n"
+ "str d12, [x23], #0x8\n"
+ "str d16, [x22], #0x8\n"
+ "str d20, [x21], #0x8\n"
+ "tbz x17, #0, 134f\n"
+ "st1 { v8.s }[2], [x15]\n"
+ "st1 { v12.s }[2], [x23]\n"
+ "st1 { v16.s }[2], [x22]\n"
+ "st1 { v20.s }[2], [x21]\n"
"b 134f\n"
"133:" // Height 4: Partial direct writeback: partial_1_0
- "str s8, [x17, #0x0]\n"
- "str s12, [x24, #0x0]\n"
- "str s16, [x23, #0x0]\n"
- "str s20, [x22, #0x0]\n"
+ "str s8, [x15, #0x0]\n"
+ "str s12, [x23, #0x0]\n"
+ "str s16, [x22, #0x0]\n"
+ "str s20, [x21, #0x0]\n"
"134:" // Height 4: Partial direct writeback: Done
"b 136f\n"
"135:" // Height 4: Full writeback
- "str q8, [x17, #0x0]\n"
- "str q9, [x17, #0x10]\n"
- "str q10, [x17, #0x20]\n"
- "str q11, [x17, #0x30]\n"
- "add x17, x17, #0x40\n"
- "str q12, [x24, #0x0]\n"
- "str q13, [x24, #0x10]\n"
- "str q14, [x24, #0x20]\n"
- "str q15, [x24, #0x30]\n"
- "str q16, [x23, #0x0]\n"
- "str q17, [x23, #0x10]\n"
- "str q18, [x23, #0x20]\n"
- "str q19, [x23, #0x30]\n"
- "str q20, [x22, #0x0]\n"
- "str q21, [x22, #0x10]\n"
- "str q22, [x22, #0x20]\n"
- "str q23, [x22, #0x30]\n"
+ "str q8, [x15, #0x0]\n"
+ "str q9, [x15, #0x10]\n"
+ "str q10, [x15, #0x20]\n"
+ "str q11, [x15, #0x30]\n"
+ "add x15, x15, #0x40\n"
+ "str q12, [x23, #0x0]\n"
+ "str q13, [x23, #0x10]\n"
+ "str q14, [x23, #0x20]\n"
+ "str q15, [x23, #0x30]\n"
+ "str q16, [x22, #0x0]\n"
+ "str q17, [x22, #0x10]\n"
+ "str q18, [x22, #0x20]\n"
+ "str q19, [x22, #0x30]\n"
+ "str q20, [x21, #0x0]\n"
+ "str q21, [x21, #0x10]\n"
+ "str q22, [x21, #0x20]\n"
+ "str q23, [x21, #0x30]\n"
"136:" // Height 4: Writeback done
- "subs x8, x8, #0x10\n"
+ "subs x17, x17, #0x10\n"
"bgt 104b\n"
"b 206f\n"
"137:" // Height 5
- "ldr x8, [%x[args_ptr], %[offsetof_N]]\n"
- "mov x17, %x[output_ptr]\n"
+ "ldr x17, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x15, %x[output_ptr]\n"
"ldr x16, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"138:" // Height 5: Column loop
"tbz %x[flags], #0, 148f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x17, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
- "cmp x8, #0x10\n"
- "add x21, x22, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "cmp x17, #0x10\n"
+ "add x23, x15, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
+ "add x20, x21, x19, LSL #2\n"
"bge 147f\n"
- "tbz x8, #3, 142f\n"
- "ld1 { v8.4s }, [x17], #0x10\n"
- "ld1 { v12.4s }, [x24], #0x10\n"
- "ld1 { v16.4s }, [x23], #0x10\n"
- "ld1 { v20.4s }, [x22], #0x10\n"
- "ld1 { v24.4s }, [x21], #0x10\n"
- "ld1 { v9.4s }, [x17], #0x10\n"
- "ld1 { v13.4s }, [x24], #0x10\n"
- "ld1 { v17.4s }, [x23], #0x10\n"
- "ld1 { v21.4s }, [x22], #0x10\n"
- "ld1 { v25.4s }, [x21], #0x10\n"
- "tbz x8, #2, 140f\n"
- "ld1 { v10.4s }, [x17], #0x10\n"
- "ld1 { v14.4s }, [x24], #0x10\n"
- "ld1 { v18.4s }, [x23], #0x10\n"
- "ld1 { v22.4s }, [x22], #0x10\n"
- "ld1 { v26.4s }, [x21], #0x10\n"
- "tbz x8, #1, 139f\n"
- "ldr d11, [x17], #0x8\n"
- "mov x25, #0x38\n"
- "ldr d15, [x24], #0x8\n"
- "ldr d19, [x23], #0x8\n"
- "ldr d23, [x22], #0x8\n"
- "ldr d27, [x21], #0x8\n"
- "tbz x8, #0, 146f\n"
- "ld1 { v11.s }[2], [x17]\n"
- "ld1 { v15.s }[2], [x24]\n"
- "ld1 { v19.s }[2], [x23]\n"
- "ld1 { v23.s }[2], [x22]\n"
- "ld1 { v27.s }[2], [x21]\n"
+ "tbz x17, #3, 142f\n"
+ "ld1 { v8.4s }, [x15], #0x10\n"
+ "ld1 { v12.4s }, [x23], #0x10\n"
+ "ld1 { v16.4s }, [x22], #0x10\n"
+ "ld1 { v9.4s }, [x15], #0x10\n"
+ "ld1 { v13.4s }, [x23], #0x10\n"
+ "ld1 { v17.4s }, [x22], #0x10\n"
+ "ld1 { v20.4s }, [x21], #0x10\n"
+ "ld1 { v24.4s }, [x20], #0x10\n"
+ "ld1 { v21.4s }, [x21], #0x10\n"
+ "ld1 { v25.4s }, [x20], #0x10\n"
+ "tbz x17, #2, 140f\n"
+ "ld1 { v10.4s }, [x15], #0x10\n"
+ "ld1 { v14.4s }, [x23], #0x10\n"
+ "ld1 { v18.4s }, [x22], #0x10\n"
+ "ld1 { v22.4s }, [x21], #0x10\n"
+ "ld1 { v26.4s }, [x20], #0x10\n"
+ "tbz x17, #1, 139f\n"
+ "ldr d11, [x15], #0x8\n"
+ "mov x24, #0x38\n"
+ "ldr d15, [x23], #0x8\n"
+ "ldr d19, [x22], #0x8\n"
+ "ldr d23, [x21], #0x8\n"
+ "ldr d27, [x20], #0x8\n"
+ "tbz x17, #0, 146f\n"
+ "ld1 { v11.s }[2], [x15]\n"
+ "ld1 { v15.s }[2], [x23]\n"
+ "ld1 { v19.s }[2], [x22]\n"
+ "ld1 { v23.s }[2], [x21]\n"
+ "ld1 { v27.s }[2], [x20]\n"
"b 146f\n"
"139:" // Height 5: Partial accumulate: partial_1_12
- "mov x25, #0x30\n"
- "tbz x8, #0, 146f\n"
- "ldr s11, [x17, #0x0]\n"
- "ldr s15, [x24, #0x0]\n"
- "ldr s19, [x23, #0x0]\n"
- "ldr s23, [x22, #0x0]\n"
- "ldr s27, [x21, #0x0]\n"
+ "mov x24, #0x30\n"
+ "tbz x17, #0, 146f\n"
+ "ldr s11, [x15, #0x0]\n"
+ "ldr s15, [x23, #0x0]\n"
+ "ldr s19, [x22, #0x0]\n"
+ "ldr s23, [x21, #0x0]\n"
+ "ldr s27, [x20, #0x0]\n"
"b 146f\n"
"140:" // Height 5: Partial accumulate: partial_2_8
- "tbz x8, #1, 141f\n"
- "ldr d10, [x17], #0x8\n"
- "mov x25, #0x28\n"
- "ldr d14, [x24], #0x8\n"
- "ldr d18, [x23], #0x8\n"
- "ldr d22, [x22], #0x8\n"
- "ldr d26, [x21], #0x8\n"
- "tbz x8, #0, 146f\n"
- "ld1 { v10.s }[2], [x17]\n"
- "ld1 { v14.s }[2], [x24]\n"
- "ld1 { v18.s }[2], [x23]\n"
- "ld1 { v22.s }[2], [x22]\n"
- "ld1 { v26.s }[2], [x21]\n"
+ "tbz x17, #1, 141f\n"
+ "ldr d10, [x15], #0x8\n"
+ "ldr d14, [x23], #0x8\n"
+ "mov x24, #0x28\n"
+ "ldr d18, [x22], #0x8\n"
+ "ldr d22, [x21], #0x8\n"
+ "ldr d26, [x20], #0x8\n"
+ "tbz x17, #0, 146f\n"
+ "ld1 { v10.s }[2], [x15]\n"
+ "ld1 { v14.s }[2], [x23]\n"
+ "ld1 { v18.s }[2], [x22]\n"
+ "ld1 { v22.s }[2], [x21]\n"
+ "ld1 { v26.s }[2], [x20]\n"
"b 146f\n"
"141:" // Height 5: Partial accumulate: partial_1_8
- "mov x25, #0x20\n"
- "tbz x8, #0, 146f\n"
- "ldr s10, [x17, #0x0]\n"
- "ldr s14, [x24, #0x0]\n"
- "ldr s18, [x23, #0x0]\n"
- "ldr s22, [x22, #0x0]\n"
- "ldr s26, [x21, #0x0]\n"
+ "mov x24, #0x20\n"
+ "tbz x17, #0, 146f\n"
+ "ldr s10, [x15, #0x0]\n"
+ "ldr s14, [x23, #0x0]\n"
+ "ldr s18, [x22, #0x0]\n"
+ "ldr s22, [x21, #0x0]\n"
+ "ldr s26, [x20, #0x0]\n"
"b 146f\n"
"142:" // Height 5: Partial accumulate: partial_4_0
- "tbz x8, #2, 144f\n"
- "ld1 { v8.4s }, [x17], #0x10\n"
- "ld1 { v12.4s }, [x24], #0x10\n"
- "ld1 { v16.4s }, [x23], #0x10\n"
- "ld1 { v20.4s }, [x22], #0x10\n"
- "ld1 { v24.4s }, [x21], #0x10\n"
- "tbz x8, #1, 143f\n"
- "ldr d9, [x17], #0x8\n"
- "mov x25, #0x18\n"
- "ldr d13, [x24], #0x8\n"
- "ldr d17, [x23], #0x8\n"
- "ldr d21, [x22], #0x8\n"
- "ldr d25, [x21], #0x8\n"
- "tbz x8, #0, 146f\n"
- "ld1 { v9.s }[2], [x17]\n"
- "ld1 { v13.s }[2], [x24]\n"
- "ld1 { v17.s }[2], [x23]\n"
- "ld1 { v21.s }[2], [x22]\n"
- "ld1 { v25.s }[2], [x21]\n"
+ "tbz x17, #2, 144f\n"
+ "ld1 { v8.4s }, [x15], #0x10\n"
+ "ld1 { v12.4s }, [x23], #0x10\n"
+ "ld1 { v16.4s }, [x22], #0x10\n"
+ "ld1 { v20.4s }, [x21], #0x10\n"
+ "ld1 { v24.4s }, [x20], #0x10\n"
+ "tbz x17, #1, 143f\n"
+ "ldr d9, [x15], #0x8\n"
+ "mov x24, #0x18\n"
+ "ldr d13, [x23], #0x8\n"
+ "ldr d17, [x22], #0x8\n"
+ "ldr d21, [x21], #0x8\n"
+ "ldr d25, [x20], #0x8\n"
+ "tbz x17, #0, 146f\n"
+ "ld1 { v9.s }[2], [x15]\n"
+ "ld1 { v13.s }[2], [x23]\n"
+ "ld1 { v17.s }[2], [x22]\n"
+ "ld1 { v21.s }[2], [x21]\n"
+ "ld1 { v25.s }[2], [x20]\n"
"b 146f\n"
"143:" // Height 5: Partial accumulate: partial_1_4
- "mov x25, #0x10\n"
- "tbz x8, #0, 146f\n"
- "ldr s9, [x17, #0x0]\n"
- "ldr s13, [x24, #0x0]\n"
- "ldr s17, [x23, #0x0]\n"
- "ldr s21, [x22, #0x0]\n"
- "ldr s25, [x21, #0x0]\n"
+ "mov x24, #0x10\n"
+ "tbz x17, #0, 146f\n"
+ "ldr s9, [x15, #0x0]\n"
+ "ldr s13, [x23, #0x0]\n"
+ "ldr s17, [x22, #0x0]\n"
+ "ldr s21, [x21, #0x0]\n"
+ "ldr s25, [x20, #0x0]\n"
"b 146f\n"
"144:" // Height 5: Partial accumulate: partial_2_0
- "tbz x8, #1, 145f\n"
- "ldr d8, [x17], #0x8\n"
- "mov x25, #0x8\n"
- "ldr d12, [x24], #0x8\n"
- "ldr d16, [x23], #0x8\n"
- "ldr d20, [x22], #0x8\n"
- "ldr d24, [x21], #0x8\n"
- "tbz x8, #0, 146f\n"
- "ld1 { v8.s }[2], [x17]\n"
- "ld1 { v12.s }[2], [x24]\n"
- "ld1 { v16.s }[2], [x23]\n"
- "ld1 { v20.s }[2], [x22]\n"
- "ld1 { v24.s }[2], [x21]\n"
+ "tbz x17, #1, 145f\n"
+ "ldr d8, [x15], #0x8\n"
+ "ldr d12, [x23], #0x8\n"
+ "mov x24, #0x8\n"
+ "ldr d16, [x22], #0x8\n"
+ "ldr d20, [x21], #0x8\n"
+ "ldr d24, [x20], #0x8\n"
+ "tbz x17, #0, 146f\n"
+ "ld1 { v8.s }[2], [x15]\n"
+ "ld1 { v12.s }[2], [x23]\n"
+ "ld1 { v16.s }[2], [x22]\n"
+ "ld1 { v20.s }[2], [x21]\n"
+ "ld1 { v24.s }[2], [x20]\n"
"b 146f\n"
"145:" // Height 5: Partial accumulate: partial_1_0
- "ldr s8, [x17, #0x0]\n"
- "mov x25, #0x0\n"
- "ldr s12, [x24, #0x0]\n"
- "ldr s16, [x23, #0x0]\n"
- "ldr s20, [x22, #0x0]\n"
- "ldr s24, [x21, #0x0]\n"
+ "ldr s8, [x15, #0x0]\n"
+ "mov x24, #0x0\n"
+ "ldr s12, [x23, #0x0]\n"
+ "ldr s16, [x22, #0x0]\n"
+ "ldr s20, [x21, #0x0]\n"
+ "ldr s24, [x20, #0x0]\n"
"146:" // Height 5: Partial accumulate: Done
- "sub x17, x17, x25\n"
+ "sub x15, x15, x24\n"
"b 149f\n"
"147:" // Height 5: full accumulate
- "ldr q8, [x17, #0x0]\n"
- "ldr q9, [x17, #0x10]\n"
- "ldr q10, [x17, #0x20]\n"
- "ldr q11, [x17, #0x30]\n"
- "ldr q12, [x24, #0x0]\n"
- "ldr q13, [x24, #0x10]\n"
- "ldr q14, [x24, #0x20]\n"
- "ldr q15, [x24, #0x30]\n"
- "ldr q16, [x23, #0x0]\n"
- "ldr q17, [x23, #0x10]\n"
- "ldr q18, [x23, #0x20]\n"
- "ldr q19, [x23, #0x30]\n"
- "ldr q20, [x22, #0x0]\n"
- "ldr q21, [x22, #0x10]\n"
- "ldr q22, [x22, #0x20]\n"
- "ldr q23, [x22, #0x30]\n"
- "ldr q24, [x21, #0x0]\n"
- "ldr q25, [x21, #0x10]\n"
- "ldr q26, [x21, #0x20]\n"
- "ldr q27, [x21, #0x30]\n"
+ "ldr q8, [x15, #0x0]\n"
+ "ldr q9, [x15, #0x10]\n"
+ "ldr q10, [x15, #0x20]\n"
+ "ldr q11, [x15, #0x30]\n"
+ "ldr q12, [x23, #0x0]\n"
+ "ldr q13, [x23, #0x10]\n"
+ "ldr q14, [x23, #0x20]\n"
+ "ldr q15, [x23, #0x30]\n"
+ "ldr q16, [x22, #0x0]\n"
+ "ldr q17, [x22, #0x10]\n"
+ "ldr q18, [x22, #0x20]\n"
+ "ldr q19, [x22, #0x30]\n"
+ "ldr q20, [x21, #0x0]\n"
+ "ldr q21, [x21, #0x10]\n"
+ "ldr q22, [x21, #0x20]\n"
+ "ldr q23, [x21, #0x30]\n"
+ "ldr q24, [x20, #0x0]\n"
+ "ldr q25, [x20, #0x10]\n"
+ "ldr q26, [x20, #0x20]\n"
+ "ldr q27, [x20, #0x30]\n"
"b 149f\n"
"148:" // Height 5: no accumulate
"movi v8.4s, #0x0\n"
@@ -2112,231 +2112,231 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
"movi v26.4s, #0x0\n"
"movi v27.4s, #0x0\n"
"149:" // Height 5: setup done
- "mov x15, #0x0\n"
+ "mov x14, #0x0\n"
"150:" // Height 5: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w14, [x20, x15, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w13, [x20, x14, LSL #0x2]\n"
"tbz %x[flags], #3, 151f\n"
- "ldr x21, [%x[input_ptr], x15, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x13, [x21, #0x0]\n"
- "ldr x9, [x21, #0x8]\n"
- "ldr x27, [x21, #0x10]\n"
- "ldr x25, [x21, #0x18]\n"
- "ldr x23, [x21, #0x20]\n"
- "cbnz x15, 152f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x13, x13, x20\n"
- "add x9, x9, x20\n"
- "add x27, x27, x20\n"
- "add x25, x25, x20\n"
- "add x23, x23, x20\n"
+ "ldr x20, [%x[input_ptr], x14, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x12, [x20, #0x0]\n"
+ "ldr x28, [x20, #0x8]\n"
+ "ldr x26, [x20, #0x10]\n"
+ "ldr x24, [x20, #0x18]\n"
+ "ldr x22, [x20, #0x20]\n"
+ "cbnz x14, 152f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x12, x12, x19\n"
+ "add x28, x28, x19\n"
+ "add x26, x26, x19\n"
+ "add x24, x24, x19\n"
+ "add x22, x22, x19\n"
"b 152f\n"
"151:" // Height 5: setup direct input
- "mov x13, %x[input_ptr]\n"
- "add x9, x13, x20\n"
- "add x27, x9, x20\n"
- "add x25, x27, x20\n"
- "add x23, x25, x20\n"
+ "mov x12, %x[input_ptr]\n"
+ "add x28, x12, x19\n"
+ "add x26, x28, x19\n"
+ "add x24, x26, x19\n"
+ "add x22, x24, x19\n"
"152:" // Height 5: input setup done
- "cmp x14, #0x10\n"
+ "cmp x13, #0x10\n"
"blt 155f\n"
- "ldr q0, [x13, #0x0]\n"
- "cmp x14, #0x20\n"
- "ldr q1, [x9, #0x0]\n"
- "ldr q2, [x27, #0x0]\n"
- "ldr q3, [x25, #0x0]\n"
- "ldr q4, [x23, #0x0]\n"
+ "ldr q0, [x12, #0x0]\n"
+ "ldr q1, [x28, #0x0]\n"
+ "cmp x13, #0x20\n"
+ "ldr q2, [x26, #0x0]\n"
+ "ldr q3, [x24, #0x0]\n"
+ "ldr q4, [x22, #0x0]\n"
"ldr q6, [x16, #0x0]\n"
- "ldr q7, [x16, #0x10]\n"
"blt 154f\n"
"153:" // Height 5: Multiply loop: Main loop head
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
- "ldr x12, [x16, #0x28]\n"
+ "ldr d7, [x16, #0x10]\n"
".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n"
- "ldr x11, [x16, #0x38]\n"
+ "ldr x11, [x16, #0x18]\n"
".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n"
- "add x13, x13, #0x10\n"
+ "ldr x10, [x16, #0x28]\n"
".inst 0x4f83e0d4 // sdot v20.4s, v6.16b, v3.4b[0]\n"
- "add x9, x9, #0x10\n"
+ "add x12, x12, #0x10\n"
".inst 0x4f84e0d8 // sdot v24.4s, v6.16b, v4.4b[0]\n"
- "ldr d6, [x16, #0x20]\n"
+ "mov v7.d[1], x11\n"
+ "prfm pldl1keep, [x12, #0x80]\n"
+ "add x28, x28, #0x10\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
- "mov v6.d[1], x12\n"
+ "prfm pldl1keep, [x28, #0x80]\n"
".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n"
- "ldr x12, [x16, #0x48]\n"
+ "ldr d6, [x16, #0x20]\n"
".inst 0x4f82e0f1 // sdot v17.4s, v7.16b, v2.4b[0]\n"
- "add x27, x27, #0x10\n"
+ "ldr x11, [x16, #0x38]\n"
".inst 0x4f83e0f5 // sdot v21.4s, v7.16b, v3.4b[0]\n"
- "add x25, x25, #0x10\n"
+ "ldr x9, [x12, #0x8]\n"
".inst 0x4f84e0f9 // sdot v25.4s, v7.16b, v4.4b[0]\n"
+ "mov v6.d[1], x10\n"
"ldr d7, [x16, #0x30]\n"
- "mov v7.d[1], x11\n"
+ "add x26, x26, #0x10\n"
".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x4f81e0ce // sdot v14.4s, v6.16b, v1.4b[0]\n"
- "ldr x11, [x16, #0x58]\n"
+ "ldr x10, [x16, #0x48]\n"
".inst 0x4f82e0d2 // sdot v18.4s, v6.16b, v2.4b[0]\n"
- "add x23, x23, #0x10\n"
+ "mov v7.d[1], x11\n"
".inst 0x4f83e0d6 // sdot v22.4s, v6.16b, v3.4b[0]\n"
- "ldr x10, [x13, #0x8]\n"
+ "ldr x11, [x16, #0x58]\n"
".inst 0x4f84e0da // sdot v26.4s, v6.16b, v4.4b[0]\n"
"ldr d6, [x16, #0x40]\n"
".inst 0x4f80e0eb // sdot v11.4s, v7.16b, v0.4b[0]\n"
- "mov v6.d[1], x12\n"
+ "ldr x27, [x28, #0x8]\n"
".inst 0x4f81e0ef // sdot v15.4s, v7.16b, v1.4b[0]\n"
- "ldr x12, [x16, #0x68]\n"
+ "ldr x25, [x26, #0x8]\n"
".inst 0x4f82e0f3 // sdot v19.4s, v7.16b, v2.4b[0]\n"
- "ldr x28, [x9, #0x8]\n"
+ "mov v6.d[1], x10\n"
".inst 0x4f83e0f7 // sdot v23.4s, v7.16b, v3.4b[0]\n"
- "ldr x26, [x27, #0x8]\n"
+ "ldr x10, [x16, #0x68]\n"
".inst 0x4f84e0fb // sdot v27.4s, v7.16b, v4.4b[0]\n"
"ldr d7, [x16, #0x50]\n"
- "mov v7.d[1], x11\n"
".inst 0x4fa0e0c8 // sdot v8.4s, v6.16b, v0.4b[1]\n"
+ "add x24, x24, #0x10\n"
".inst 0x4fa1e0cc // sdot v12.4s, v6.16b, v1.4b[1]\n"
- "ldr x11, [x16, #0x78]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x4fa2e0d0 // sdot v16.4s, v6.16b, v2.4b[1]\n"
- "ldr x24, [x25, #0x8]\n"
+ "mov v7.d[1], x11\n"
".inst 0x4fa3e0d4 // sdot v20.4s, v6.16b, v3.4b[1]\n"
- "ldr x22, [x23, #0x8]\n"
+ "ldr x11, [x16, #0x78]\n"
".inst 0x4fa4e0d8 // sdot v24.4s, v6.16b, v4.4b[1]\n"
"ldr d6, [x16, #0x60]\n"
".inst 0x4fa0e0e9 // sdot v9.4s, v7.16b, v0.4b[1]\n"
- "mov v6.d[1], x12\n"
+ "ldr x23, [x24, #0x8]\n"
".inst 0x4fa1e0ed // sdot v13.4s, v7.16b, v1.4b[1]\n"
- "ldr x12, [x16, #0x88]\n"
+ "add x22, x22, #0x10\n"
".inst 0x4fa2e0f1 // sdot v17.4s, v7.16b, v2.4b[1]\n"
- "sub x14, x14, #0x10\n"
+ "mov v6.d[1], x10\n"
".inst 0x4fa3e0f5 // sdot v21.4s, v7.16b, v3.4b[1]\n"
- "cmp x14, #0x20\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x4fa4e0f9 // sdot v25.4s, v7.16b, v4.4b[1]\n"
"ldr d7, [x16, #0x70]\n"
- "mov v7.d[1], x11\n"
".inst 0x4fa0e0ca // sdot v10.4s, v6.16b, v0.4b[1]\n"
+ "ldr x10, [x16, #0x88]\n"
".inst 0x4fa1e0ce // sdot v14.4s, v6.16b, v1.4b[1]\n"
- "ldr x11, [x16, #0x98]\n"
+ "ldr x21, [x22, #0x8]\n"
".inst 0x4fa2e0d2 // sdot v18.4s, v6.16b, v2.4b[1]\n"
- "prfm pldl1keep, [x13, #0x80]\n"
+ "mov v7.d[1], x11\n"
".inst 0x4fa3e0d6 // sdot v22.4s, v6.16b, v3.4b[1]\n"
- "prfm pldl1keep, [x9, #0x80]\n"
+ "ldr x11, [x16, #0x98]\n"
".inst 0x4fa4e0da // sdot v26.4s, v6.16b, v4.4b[1]\n"
"ldr d6, [x16, #0x80]\n"
".inst 0x4fa0e0eb // sdot v11.4s, v7.16b, v0.4b[1]\n"
- "mov v6.d[1], x12\n"
+ "sub x13, x13, #0x10\n"
".inst 0x4fa1e0ef // sdot v15.4s, v7.16b, v1.4b[1]\n"
- "ldr x12, [x16, #0xa8]\n"
+ "cmp x13, #0x20\n"
".inst 0x4fa2e0f3 // sdot v19.4s, v7.16b, v2.4b[1]\n"
- "prfm pldl1keep, [x27, #0x80]\n"
+ "mov v6.d[1], x10\n"
".inst 0x4fa3e0f7 // sdot v23.4s, v7.16b, v3.4b[1]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
+ "ldr x10, [x16, #0xa8]\n"
".inst 0x4fa4e0fb // sdot v27.4s, v7.16b, v4.4b[1]\n"
"ldr d7, [x16, #0x90]\n"
- "mov v7.d[1], x11\n"
".inst 0x4f80e8c8 // sdot v8.4s, v6.16b, v0.4b[2]\n"
".inst 0x4f81e8cc // sdot v12.4s, v6.16b, v1.4b[2]\n"
- "ldr x11, [x16, #0xb8]\n"
".inst 0x4f82e8d0 // sdot v16.4s, v6.16b, v2.4b[2]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
+ "mov v7.d[1], x11\n"
".inst 0x4f83e8d4 // sdot v20.4s, v6.16b, v3.4b[2]\n"
+ "ldr x11, [x16, #0xb8]\n"
".inst 0x4f84e8d8 // sdot v24.4s, v6.16b, v4.4b[2]\n"
"ldr d6, [x16, #0xa0]\n"
".inst 0x4f80e8e9 // sdot v9.4s, v7.16b, v0.4b[2]\n"
- "mov v6.d[1], x12\n"
".inst 0x4f81e8ed // sdot v13.4s, v7.16b, v1.4b[2]\n"
- "ldr x12, [x16, #0xc8]\n"
".inst 0x4f82e8f1 // sdot v17.4s, v7.16b, v2.4b[2]\n"
+ "mov v6.d[1], x10\n"
".inst 0x4f83e8f5 // sdot v21.4s, v7.16b, v3.4b[2]\n"
+ "ldr x10, [x16, #0xc8]\n"
".inst 0x4f84e8f9 // sdot v25.4s, v7.16b, v4.4b[2]\n"
"ldr d7, [x16, #0xb0]\n"
- "mov v7.d[1], x11\n"
".inst 0x4f80e8ca // sdot v10.4s, v6.16b, v0.4b[2]\n"
".inst 0x4f81e8ce // sdot v14.4s, v6.16b, v1.4b[2]\n"
- "ldr x11, [x16, #0xd8]\n"
".inst 0x4f82e8d2 // sdot v18.4s, v6.16b, v2.4b[2]\n"
+ "mov v7.d[1], x11\n"
".inst 0x4f83e8d6 // sdot v22.4s, v6.16b, v3.4b[2]\n"
+ "ldr x11, [x16, #0xd8]\n"
".inst 0x4f84e8da // sdot v26.4s, v6.16b, v4.4b[2]\n"
"ldr d6, [x16, #0xc0]\n"
".inst 0x4f80e8eb // sdot v11.4s, v7.16b, v0.4b[2]\n"
- "mov v6.d[1], x12\n"
".inst 0x4f81e8ef // sdot v15.4s, v7.16b, v1.4b[2]\n"
- "ldr x12, [x16, #0xe8]\n"
".inst 0x4f82e8f3 // sdot v19.4s, v7.16b, v2.4b[2]\n"
+ "mov v6.d[1], x10\n"
".inst 0x4f83e8f7 // sdot v23.4s, v7.16b, v3.4b[2]\n"
+ "ldr x10, [x16, #0xe8]\n"
".inst 0x4f84e8fb // sdot v27.4s, v7.16b, v4.4b[2]\n"
"ldr d7, [x16, #0xd0]\n"
- "mov v7.d[1], x11\n"
".inst 0x4fa0e8c8 // sdot v8.4s, v6.16b, v0.4b[3]\n"
".inst 0x4fa1e8cc // sdot v12.4s, v6.16b, v1.4b[3]\n"
- "ldr x11, [x16, #0xf8]\n"
".inst 0x4fa2e8d0 // sdot v16.4s, v6.16b, v2.4b[3]\n"
+ "mov v7.d[1], x11\n"
".inst 0x4fa3e8d4 // sdot v20.4s, v6.16b, v3.4b[3]\n"
+ "ldr x11, [x16, #0xf8]\n"
".inst 0x4fa4e8d8 // sdot v24.4s, v6.16b, v4.4b[3]\n"
"ldr d6, [x16, #0xe0]\n"
".inst 0x4fa0e8e9 // sdot v9.4s, v7.16b, v0.4b[3]\n"
- "mov v6.d[1], x12\n"
".inst 0x4fa1e8ed // sdot v13.4s, v7.16b, v1.4b[3]\n"
".inst 0x4fa2e8f1 // sdot v17.4s, v7.16b, v2.4b[3]\n"
+ "mov v6.d[1], x10\n"
".inst 0x4fa3e8f5 // sdot v21.4s, v7.16b, v3.4b[3]\n"
".inst 0x4fa4e8f9 // sdot v25.4s, v7.16b, v4.4b[3]\n"
"ldr d7, [x16, #0xf0]\n"
- "mov v7.d[1], x11\n"
- "add x16, x16, #0x100\n"
".inst 0x4fa0e8ca // sdot v10.4s, v6.16b, v0.4b[3]\n"
- "ldr x12, [x16, #0x8]\n"
+ "add x16, x16, #0x100\n"
".inst 0x4fa1e8ce // sdot v14.4s, v6.16b, v1.4b[3]\n"
- "ldr x11, [x16, #0x18]\n"
+ "ldr x10, [x16, #0x8]\n"
".inst 0x4fa2e8d2 // sdot v18.4s, v6.16b, v2.4b[3]\n"
+ "mov v7.d[1], x11\n"
".inst 0x4fa3e8d6 // sdot v22.4s, v6.16b, v3.4b[3]\n"
".inst 0x4fa4e8da // sdot v26.4s, v6.16b, v4.4b[3]\n"
"ldr d6, [x16, #0x0]\n"
".inst 0x4fa0e8eb // sdot v11.4s, v7.16b, v0.4b[3]\n"
- "ldr d0, [x13, #0x0]\n"
+ "ldr d0, [x12, #0x0]\n"
".inst 0x4fa1e8ef // sdot v15.4s, v7.16b, v1.4b[3]\n"
- "ldr d1, [x9, #0x0]\n"
+ "ldr d1, [x28, #0x0]\n"
".inst 0x4fa2e8f3 // sdot v19.4s, v7.16b, v2.4b[3]\n"
- "ldr d2, [x27, #0x0]\n"
+ "mov v6.d[1], x10\n"
".inst 0x4fa3e8f7 // sdot v23.4s, v7.16b, v3.4b[3]\n"
- "ldr d3, [x25, #0x0]\n"
+ "mov v0.d[1], x9\n"
".inst 0x4fa4e8fb // sdot v27.4s, v7.16b, v4.4b[3]\n"
- "ldr d4, [x23, #0x0]\n"
- "ldr d7, [x16, #0x10]\n"
- "mov v6.d[1], x12\n"
- "mov v0.d[1], x10\n"
- "mov v1.d[1], x28\n"
- "mov v2.d[1], x26\n"
- "mov v3.d[1], x24\n"
- "mov v4.d[1], x22\n"
- "mov v7.d[1], x11\n"
+ "mov v1.d[1], x27\n"
+ "ldr d2, [x26, #0x0]\n"
+ "ldr d3, [x24, #0x0]\n"
+ "ldr d4, [x22, #0x0]\n"
+ "mov v2.d[1], x25\n"
+ "mov v3.d[1], x23\n"
+ "mov v4.d[1], x21\n"
"bge 153b\n"
"154:" // Height 5: Multiply loop: Single iteration only
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
- "add x13, x13, #0x10\n"
+ "ldr q7, [x16, #0x10]\n"
".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n"
- "add x9, x9, #0x10\n"
+ "sub x13, x13, #0x10\n"
".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n"
- "add x27, x27, #0x10\n"
+ "add x12, x12, #0x10\n"
".inst 0x4f83e0d4 // sdot v20.4s, v6.16b, v3.4b[0]\n"
- "add x25, x25, #0x10\n"
+ "prfm pldl1keep, [x12, #0x80]\n"
".inst 0x4f84e0d8 // sdot v24.4s, v6.16b, v4.4b[0]\n"
"ldr q6, [x16, #0x20]\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
- "add x23, x23, #0x10\n"
+ "add x28, x28, #0x10\n"
".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n"
- "sub x14, x14, #0x10\n"
+ "prfm pldl1keep, [x28, #0x80]\n"
".inst 0x4f82e0f1 // sdot v17.4s, v7.16b, v2.4b[0]\n"
- "prfm pldl1keep, [x13, #0x80]\n"
+ "add x26, x26, #0x10\n"
".inst 0x4f83e0f5 // sdot v21.4s, v7.16b, v3.4b[0]\n"
- "prfm pldl1keep, [x9, #0x80]\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x4f84e0f9 // sdot v25.4s, v7.16b, v4.4b[0]\n"
"ldr q7, [x16, #0x30]\n"
".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n"
- "prfm pldl1keep, [x27, #0x80]\n"
+ "add x24, x24, #0x10\n"
".inst 0x4f81e0ce // sdot v14.4s, v6.16b, v1.4b[0]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x4f82e0d2 // sdot v18.4s, v6.16b, v2.4b[0]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
+ "add x22, x22, #0x10\n"
".inst 0x4f83e0d6 // sdot v22.4s, v6.16b, v3.4b[0]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x4f84e0da // sdot v26.4s, v6.16b, v4.4b[0]\n"
"ldr q6, [x16, #0x40]\n"
".inst 0x4f80e0eb // sdot v11.4s, v7.16b, v0.4b[0]\n"
@@ -2417,20 +2417,20 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
".inst 0x4fa3e8f7 // sdot v23.4s, v7.16b, v3.4b[3]\n"
".inst 0x4fa4e8fb // sdot v27.4s, v7.16b, v4.4b[3]\n"
"155:" // Height 5: Multiply loop: Main loop skip
- "cbz x14, 160f\n"
- "cmp x14, #0x4\n"
+ "cbz x13, 160f\n"
+ "cmp x13, #0x4\n"
"blt 157f\n"
"156:" // Height 5: Multiply loop: Odd block loop
- "ldr s0, [x13], #0x4\n"
- "sub x14, x14, #0x4\n"
- "ldr s1, [x9], #0x4\n"
- "cmp x14, #0x4\n"
- "ldr s2, [x27], #0x4\n"
- "ldr s3, [x25], #0x4\n"
- "ldr s4, [x23], #0x4\n"
+ "ldr s0, [x12], #0x4\n"
+ "sub x13, x13, #0x4\n"
+ "ldr s1, [x28], #0x4\n"
+ "cmp x13, #0x4\n"
+ "ldr s2, [x26], #0x4\n"
+ "ldr s3, [x24], #0x4\n"
+ "ldr s4, [x22], #0x4\n"
"ldr q6, [x16, #0x0]\n"
- ".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
"ldr q7, [x16, #0x10]\n"
+ ".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n"
".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n"
".inst 0x4f83e0d4 // sdot v20.4s, v6.16b, v3.4b[0]\n"
@@ -2454,31 +2454,31 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
".inst 0x4f83e0f7 // sdot v23.4s, v7.16b, v3.4b[0]\n"
".inst 0x4f84e0fb // sdot v27.4s, v7.16b, v4.4b[0]\n"
"bge 156b\n"
+ "cbz x13, 160f\n"
"157:" // Height 5: Multiply loop: Skip odd blocks
- "cbz x14, 160f\n"
- "tbz x14, #1, 158f\n"
- "ldr h0, [x13], #0x2\n"
- "ldr h1, [x9], #0x2\n"
- "ldr h2, [x27], #0x2\n"
- "ldr h3, [x25], #0x2\n"
- "ldr h4, [x23], #0x2\n"
- "tbz x14, #0, 159f\n"
- "ld1 { v0.b }[2], [x13]\n"
- "ld1 { v1.b }[2], [x9]\n"
- "ld1 { v2.b }[2], [x27]\n"
- "ld1 { v3.b }[2], [x25]\n"
- "ld1 { v4.b }[2], [x23]\n"
+ "tbz x13, #1, 158f\n"
+ "ldr h0, [x12], #0x2\n"
+ "ldr h1, [x28], #0x2\n"
+ "ldr h2, [x26], #0x2\n"
+ "ldr h3, [x24], #0x2\n"
+ "ldr h4, [x22], #0x2\n"
+ "tbz x13, #0, 159f\n"
+ "ld1 { v0.b }[2], [x12]\n"
+ "ld1 { v1.b }[2], [x28]\n"
+ "ld1 { v2.b }[2], [x26]\n"
+ "ld1 { v3.b }[2], [x24]\n"
+ "ld1 { v4.b }[2], [x22]\n"
"b 159f\n"
"158:" // Height 5: Multiply loop: Ragged operand read: partial_1_0
- "ldr b0, [x13, #0x0]\n"
- "ldr b1, [x9, #0x0]\n"
- "ldr b2, [x27, #0x0]\n"
- "ldr b3, [x25, #0x0]\n"
- "ldr b4, [x23, #0x0]\n"
+ "ldr b0, [x12, #0x0]\n"
+ "ldr b1, [x28, #0x0]\n"
+ "ldr b2, [x26, #0x0]\n"
+ "ldr b3, [x24, #0x0]\n"
+ "ldr b4, [x22, #0x0]\n"
"159:" // Height 5: Multiply loop: Ragged operand read: Done
"ldr q6, [x16, #0x0]\n"
- ".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
"ldr q7, [x16, #0x10]\n"
+ ".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n"
".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n"
".inst 0x4f83e0d4 // sdot v20.4s, v6.16b, v3.4b[0]\n"
@@ -2502,335 +2502,335 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
".inst 0x4f83e0f7 // sdot v23.4s, v7.16b, v3.4b[0]\n"
".inst 0x4f84e0fb // sdot v27.4s, v7.16b, v4.4b[0]\n"
"160:" // Height 5: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x15, x15, #0x1\n"
- "cmp x15, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x14, x14, #0x1\n"
+ "cmp x14, x19\n"
"bne 150b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x17, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
- "cmp x8, #0x10\n"
- "prfm pstl1keep, [x17, #0x0]\n"
- "prfm pstl1keep, [x24, #0x0]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "prfm pstl1keep, [x15, #0x0]\n"
+ "cmp x17, #0x10\n"
+ "add x23, x15, x19, LSL #2\n"
"prfm pstl1keep, [x23, #0x0]\n"
+ "add x22, x23, x19, LSL #2\n"
"prfm pstl1keep, [x22, #0x0]\n"
+ "add x21, x22, x19, LSL #2\n"
"prfm pstl1keep, [x21, #0x0]\n"
+ "add x20, x21, x19, LSL #2\n"
+ "prfm pstl1keep, [x20, #0x0]\n"
"bge 169f\n"
- "tbz x8, #3, 164f\n"
- "st1 { v8.4s }, [x17], #0x10\n"
- "st1 { v9.4s }, [x17], #0x10\n"
- "st1 { v12.4s }, [x24], #0x10\n"
- "st1 { v13.4s }, [x24], #0x10\n"
- "st1 { v16.4s }, [x23], #0x10\n"
- "st1 { v17.4s }, [x23], #0x10\n"
- "st1 { v20.4s }, [x22], #0x10\n"
- "st1 { v21.4s }, [x22], #0x10\n"
- "st1 { v24.4s }, [x21], #0x10\n"
- "st1 { v25.4s }, [x21], #0x10\n"
- "tbz x8, #2, 162f\n"
- "st1 { v10.4s }, [x17], #0x10\n"
- "st1 { v14.4s }, [x24], #0x10\n"
- "st1 { v18.4s }, [x23], #0x10\n"
- "st1 { v22.4s }, [x22], #0x10\n"
- "st1 { v26.4s }, [x21], #0x10\n"
- "tbz x8, #1, 161f\n"
- "str d11, [x17], #0x8\n"
- "str d15, [x24], #0x8\n"
- "str d19, [x23], #0x8\n"
- "str d23, [x22], #0x8\n"
- "str d27, [x21], #0x8\n"
- "tbz x8, #0, 168f\n"
- "st1 { v11.s }[2], [x17]\n"
- "st1 { v15.s }[2], [x24]\n"
- "st1 { v19.s }[2], [x23]\n"
- "st1 { v23.s }[2], [x22]\n"
- "st1 { v27.s }[2], [x21]\n"
+ "tbz x17, #3, 164f\n"
+ "st1 { v8.4s }, [x15], #0x10\n"
+ "st1 { v9.4s }, [x15], #0x10\n"
+ "st1 { v12.4s }, [x23], #0x10\n"
+ "st1 { v13.4s }, [x23], #0x10\n"
+ "st1 { v16.4s }, [x22], #0x10\n"
+ "st1 { v17.4s }, [x22], #0x10\n"
+ "st1 { v20.4s }, [x21], #0x10\n"
+ "st1 { v21.4s }, [x21], #0x10\n"
+ "st1 { v24.4s }, [x20], #0x10\n"
+ "st1 { v25.4s }, [x20], #0x10\n"
+ "tbz x17, #2, 162f\n"
+ "st1 { v10.4s }, [x15], #0x10\n"
+ "st1 { v14.4s }, [x23], #0x10\n"
+ "st1 { v18.4s }, [x22], #0x10\n"
+ "st1 { v22.4s }, [x21], #0x10\n"
+ "st1 { v26.4s }, [x20], #0x10\n"
+ "tbz x17, #1, 161f\n"
+ "str d11, [x15], #0x8\n"
+ "str d15, [x23], #0x8\n"
+ "str d19, [x22], #0x8\n"
+ "str d23, [x21], #0x8\n"
+ "str d27, [x20], #0x8\n"
+ "tbz x17, #0, 168f\n"
+ "st1 { v11.s }[2], [x15]\n"
+ "st1 { v15.s }[2], [x23]\n"
+ "st1 { v19.s }[2], [x22]\n"
+ "st1 { v23.s }[2], [x21]\n"
+ "st1 { v27.s }[2], [x20]\n"
"b 168f\n"
"161:" // Height 5: Partial direct writeback: partial_1_12
- "tbz x8, #0, 168f\n"
- "str s11, [x17, #0x0]\n"
- "str s15, [x24, #0x0]\n"
- "str s19, [x23, #0x0]\n"
- "str s23, [x22, #0x0]\n"
- "str s27, [x21, #0x0]\n"
+ "tbz x17, #0, 168f\n"
+ "str s11, [x15, #0x0]\n"
+ "str s15, [x23, #0x0]\n"
+ "str s19, [x22, #0x0]\n"
+ "str s23, [x21, #0x0]\n"
+ "str s27, [x20, #0x0]\n"
"b 168f\n"
"162:" // Height 5: Partial direct writeback: partial_2_8
- "tbz x8, #1, 163f\n"
- "str d10, [x17], #0x8\n"
- "str d14, [x24], #0x8\n"
- "str d18, [x23], #0x8\n"
- "str d22, [x22], #0x8\n"
- "str d26, [x21], #0x8\n"
- "tbz x8, #0, 168f\n"
- "st1 { v10.s }[2], [x17]\n"
- "st1 { v14.s }[2], [x24]\n"
- "st1 { v18.s }[2], [x23]\n"
- "st1 { v22.s }[2], [x22]\n"
- "st1 { v26.s }[2], [x21]\n"
+ "tbz x17, #1, 163f\n"
+ "str d10, [x15], #0x8\n"
+ "str d14, [x23], #0x8\n"
+ "str d18, [x22], #0x8\n"
+ "str d22, [x21], #0x8\n"
+ "str d26, [x20], #0x8\n"
+ "tbz x17, #0, 168f\n"
+ "st1 { v10.s }[2], [x15]\n"
+ "st1 { v14.s }[2], [x23]\n"
+ "st1 { v18.s }[2], [x22]\n"
+ "st1 { v22.s }[2], [x21]\n"
+ "st1 { v26.s }[2], [x20]\n"
"b 168f\n"
"163:" // Height 5: Partial direct writeback: partial_1_8
- "tbz x8, #0, 168f\n"
- "str s10, [x17, #0x0]\n"
- "str s14, [x24, #0x0]\n"
- "str s18, [x23, #0x0]\n"
- "str s22, [x22, #0x0]\n"
- "str s26, [x21, #0x0]\n"
+ "tbz x17, #0, 168f\n"
+ "str s10, [x15, #0x0]\n"
+ "str s14, [x23, #0x0]\n"
+ "str s18, [x22, #0x0]\n"
+ "str s22, [x21, #0x0]\n"
+ "str s26, [x20, #0x0]\n"
"b 168f\n"
"164:" // Height 5: Partial direct writeback: partial_4_0
- "tbz x8, #2, 166f\n"
- "st1 { v8.4s }, [x17], #0x10\n"
- "st1 { v12.4s }, [x24], #0x10\n"
- "st1 { v16.4s }, [x23], #0x10\n"
- "st1 { v20.4s }, [x22], #0x10\n"
- "st1 { v24.4s }, [x21], #0x10\n"
- "tbz x8, #1, 165f\n"
- "str d9, [x17], #0x8\n"
- "str d13, [x24], #0x8\n"
- "str d17, [x23], #0x8\n"
- "str d21, [x22], #0x8\n"
- "str d25, [x21], #0x8\n"
- "tbz x8, #0, 168f\n"
- "st1 { v9.s }[2], [x17]\n"
- "st1 { v13.s }[2], [x24]\n"
- "st1 { v17.s }[2], [x23]\n"
- "st1 { v21.s }[2], [x22]\n"
- "st1 { v25.s }[2], [x21]\n"
+ "tbz x17, #2, 166f\n"
+ "st1 { v8.4s }, [x15], #0x10\n"
+ "st1 { v12.4s }, [x23], #0x10\n"
+ "st1 { v16.4s }, [x22], #0x10\n"
+ "st1 { v20.4s }, [x21], #0x10\n"
+ "st1 { v24.4s }, [x20], #0x10\n"
+ "tbz x17, #1, 165f\n"
+ "str d9, [x15], #0x8\n"
+ "str d13, [x23], #0x8\n"
+ "str d17, [x22], #0x8\n"
+ "str d21, [x21], #0x8\n"
+ "str d25, [x20], #0x8\n"
+ "tbz x17, #0, 168f\n"
+ "st1 { v9.s }[2], [x15]\n"
+ "st1 { v13.s }[2], [x23]\n"
+ "st1 { v17.s }[2], [x22]\n"
+ "st1 { v21.s }[2], [x21]\n"
+ "st1 { v25.s }[2], [x20]\n"
"b 168f\n"
"165:" // Height 5: Partial direct writeback: partial_1_4
- "tbz x8, #0, 168f\n"
- "str s9, [x17, #0x0]\n"
- "str s13, [x24, #0x0]\n"
- "str s17, [x23, #0x0]\n"
- "str s21, [x22, #0x0]\n"
- "str s25, [x21, #0x0]\n"
+ "tbz x17, #0, 168f\n"
+ "str s9, [x15, #0x0]\n"
+ "str s13, [x23, #0x0]\n"
+ "str s17, [x22, #0x0]\n"
+ "str s21, [x21, #0x0]\n"
+ "str s25, [x20, #0x0]\n"
"b 168f\n"
"166:" // Height 5: Partial direct writeback: partial_2_0
- "tbz x8, #1, 167f\n"
- "str d8, [x17], #0x8\n"
- "str d12, [x24], #0x8\n"
- "str d16, [x23], #0x8\n"
- "str d20, [x22], #0x8\n"
- "str d24, [x21], #0x8\n"
- "tbz x8, #0, 168f\n"
- "st1 { v8.s }[2], [x17]\n"
- "st1 { v12.s }[2], [x24]\n"
- "st1 { v16.s }[2], [x23]\n"
- "st1 { v20.s }[2], [x22]\n"
- "st1 { v24.s }[2], [x21]\n"
+ "tbz x17, #1, 167f\n"
+ "str d8, [x15], #0x8\n"
+ "str d12, [x23], #0x8\n"
+ "str d16, [x22], #0x8\n"
+ "str d20, [x21], #0x8\n"
+ "str d24, [x20], #0x8\n"
+ "tbz x17, #0, 168f\n"
+ "st1 { v8.s }[2], [x15]\n"
+ "st1 { v12.s }[2], [x23]\n"
+ "st1 { v16.s }[2], [x22]\n"
+ "st1 { v20.s }[2], [x21]\n"
+ "st1 { v24.s }[2], [x20]\n"
"b 168f\n"
"167:" // Height 5: Partial direct writeback: partial_1_0
- "str s8, [x17, #0x0]\n"
- "str s12, [x24, #0x0]\n"
- "str s16, [x23, #0x0]\n"
- "str s20, [x22, #0x0]\n"
- "str s24, [x21, #0x0]\n"
+ "str s8, [x15, #0x0]\n"
+ "str s12, [x23, #0x0]\n"
+ "str s16, [x22, #0x0]\n"
+ "str s20, [x21, #0x0]\n"
+ "str s24, [x20, #0x0]\n"
"168:" // Height 5: Partial direct writeback: Done
"b 170f\n"
"169:" // Height 5: Full writeback
- "str q8, [x17, #0x0]\n"
- "str q9, [x17, #0x10]\n"
- "str q10, [x17, #0x20]\n"
- "str q11, [x17, #0x30]\n"
- "add x17, x17, #0x40\n"
- "str q12, [x24, #0x0]\n"
- "str q13, [x24, #0x10]\n"
- "str q14, [x24, #0x20]\n"
- "str q15, [x24, #0x30]\n"
- "str q16, [x23, #0x0]\n"
- "str q17, [x23, #0x10]\n"
- "str q18, [x23, #0x20]\n"
- "str q19, [x23, #0x30]\n"
- "str q20, [x22, #0x0]\n"
- "str q21, [x22, #0x10]\n"
- "str q22, [x22, #0x20]\n"
- "str q23, [x22, #0x30]\n"
- "str q24, [x21, #0x0]\n"
- "str q25, [x21, #0x10]\n"
- "str q26, [x21, #0x20]\n"
- "str q27, [x21, #0x30]\n"
+ "str q8, [x15, #0x0]\n"
+ "str q9, [x15, #0x10]\n"
+ "str q10, [x15, #0x20]\n"
+ "str q11, [x15, #0x30]\n"
+ "add x15, x15, #0x40\n"
+ "str q12, [x23, #0x0]\n"
+ "str q13, [x23, #0x10]\n"
+ "str q14, [x23, #0x20]\n"
+ "str q15, [x23, #0x30]\n"
+ "str q16, [x22, #0x0]\n"
+ "str q17, [x22, #0x10]\n"
+ "str q18, [x22, #0x20]\n"
+ "str q19, [x22, #0x30]\n"
+ "str q20, [x21, #0x0]\n"
+ "str q21, [x21, #0x10]\n"
+ "str q22, [x21, #0x20]\n"
+ "str q23, [x21, #0x30]\n"
+ "str q24, [x20, #0x0]\n"
+ "str q25, [x20, #0x10]\n"
+ "str q26, [x20, #0x20]\n"
+ "str q27, [x20, #0x30]\n"
"170:" // Height 5: Writeback done
- "subs x8, x8, #0x10\n"
+ "subs x17, x17, #0x10\n"
"bgt 138b\n"
"b 206f\n"
"171:" // Height 6
- "ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "mov x20, #0x18\n"
- "ldr x8, [%x[args_ptr], %[offsetof_N]]\n"
- "mov x17, %x[output_ptr]\n"
+ "ldr x17, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x15, %x[output_ptr]\n"
"ldr x16, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
+ "mov x20, #0x18\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "madd %x[output_ptr], x19, x20, %x[output_ptr]\n"
"172:" // Height 6: Column loop
"tbz %x[flags], #0, 182f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x17, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
- "cmp x8, #0x10\n"
- "add x20, x21, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "cmp x17, #0x10\n"
+ "add x23, x15, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
+ "add x20, x21, x19, LSL #2\n"
+ "add x19, x20, x19, LSL #2\n"
"bge 181f\n"
- "tbz x8, #3, 176f\n"
- "ld1 { v8.4s }, [x17], #0x10\n"
- "ld1 { v12.4s }, [x24], #0x10\n"
- "ld1 { v16.4s }, [x23], #0x10\n"
- "ld1 { v20.4s }, [x22], #0x10\n"
- "ld1 { v24.4s }, [x21], #0x10\n"
- "ld1 { v28.4s }, [x20], #0x10\n"
- "ld1 { v9.4s }, [x17], #0x10\n"
- "ld1 { v13.4s }, [x24], #0x10\n"
- "ld1 { v17.4s }, [x23], #0x10\n"
- "ld1 { v21.4s }, [x22], #0x10\n"
- "ld1 { v25.4s }, [x21], #0x10\n"
- "ld1 { v29.4s }, [x20], #0x10\n"
- "tbz x8, #2, 174f\n"
- "ld1 { v10.4s }, [x17], #0x10\n"
- "ld1 { v14.4s }, [x24], #0x10\n"
- "ld1 { v18.4s }, [x23], #0x10\n"
- "ld1 { v22.4s }, [x22], #0x10\n"
- "ld1 { v26.4s }, [x21], #0x10\n"
- "ld1 { v30.4s }, [x20], #0x10\n"
- "tbz x8, #1, 173f\n"
- "ldr d11, [x17], #0x8\n"
- "mov x25, #0x38\n"
- "ldr d15, [x24], #0x8\n"
- "ldr d19, [x23], #0x8\n"
- "ldr d23, [x22], #0x8\n"
- "ldr d27, [x21], #0x8\n"
- "ldr d31, [x20], #0x8\n"
- "tbz x8, #0, 180f\n"
- "ld1 { v11.s }[2], [x17]\n"
- "ld1 { v15.s }[2], [x24]\n"
- "ld1 { v19.s }[2], [x23]\n"
- "ld1 { v23.s }[2], [x22]\n"
- "ld1 { v27.s }[2], [x21]\n"
- "ld1 { v31.s }[2], [x20]\n"
+ "tbz x17, #3, 176f\n"
+ "ld1 { v8.4s }, [x15], #0x10\n"
+ "ld1 { v12.4s }, [x23], #0x10\n"
+ "ld1 { v16.4s }, [x22], #0x10\n"
+ "ld1 { v9.4s }, [x15], #0x10\n"
+ "ld1 { v13.4s }, [x23], #0x10\n"
+ "ld1 { v17.4s }, [x22], #0x10\n"
+ "ld1 { v20.4s }, [x21], #0x10\n"
+ "ld1 { v24.4s }, [x20], #0x10\n"
+ "ld1 { v28.4s }, [x19], #0x10\n"
+ "ld1 { v21.4s }, [x21], #0x10\n"
+ "ld1 { v25.4s }, [x20], #0x10\n"
+ "ld1 { v29.4s }, [x19], #0x10\n"
+ "tbz x17, #2, 174f\n"
+ "ld1 { v10.4s }, [x15], #0x10\n"
+ "ld1 { v14.4s }, [x23], #0x10\n"
+ "ld1 { v18.4s }, [x22], #0x10\n"
+ "ld1 { v22.4s }, [x21], #0x10\n"
+ "ld1 { v26.4s }, [x20], #0x10\n"
+ "ld1 { v30.4s }, [x19], #0x10\n"
+ "tbz x17, #1, 173f\n"
+ "ldr d11, [x15], #0x8\n"
+ "mov x24, #0x38\n"
+ "ldr d15, [x23], #0x8\n"
+ "ldr d19, [x22], #0x8\n"
+ "ldr d23, [x21], #0x8\n"
+ "ldr d27, [x20], #0x8\n"
+ "ldr d31, [x19], #0x8\n"
+ "tbz x17, #0, 180f\n"
+ "ld1 { v11.s }[2], [x15]\n"
+ "ld1 { v15.s }[2], [x23]\n"
+ "ld1 { v19.s }[2], [x22]\n"
+ "ld1 { v23.s }[2], [x21]\n"
+ "ld1 { v27.s }[2], [x20]\n"
+ "ld1 { v31.s }[2], [x19]\n"
"b 180f\n"
"173:" // Height 6: Partial accumulate: partial_1_12
- "mov x25, #0x30\n"
- "tbz x8, #0, 180f\n"
- "ldr s11, [x17, #0x0]\n"
- "ldr s15, [x24, #0x0]\n"
- "ldr s19, [x23, #0x0]\n"
- "ldr s23, [x22, #0x0]\n"
- "ldr s27, [x21, #0x0]\n"
- "ldr s31, [x20, #0x0]\n"
+ "mov x24, #0x30\n"
+ "tbz x17, #0, 180f\n"
+ "ldr s11, [x15, #0x0]\n"
+ "ldr s15, [x23, #0x0]\n"
+ "ldr s19, [x22, #0x0]\n"
+ "ldr s23, [x21, #0x0]\n"
+ "ldr s27, [x20, #0x0]\n"
+ "ldr s31, [x19, #0x0]\n"
"b 180f\n"
"174:" // Height 6: Partial accumulate: partial_2_8
- "tbz x8, #1, 175f\n"
- "ldr d10, [x17], #0x8\n"
- "mov x25, #0x28\n"
- "ldr d14, [x24], #0x8\n"
- "ldr d18, [x23], #0x8\n"
- "ldr d22, [x22], #0x8\n"
- "ldr d26, [x21], #0x8\n"
- "ldr d30, [x20], #0x8\n"
- "tbz x8, #0, 180f\n"
- "ld1 { v10.s }[2], [x17]\n"
- "ld1 { v14.s }[2], [x24]\n"
- "ld1 { v18.s }[2], [x23]\n"
- "ld1 { v22.s }[2], [x22]\n"
- "ld1 { v26.s }[2], [x21]\n"
- "ld1 { v30.s }[2], [x20]\n"
+ "tbz x17, #1, 175f\n"
+ "ldr d10, [x15], #0x8\n"
+ "ldr d14, [x23], #0x8\n"
+ "mov x24, #0x28\n"
+ "ldr d18, [x22], #0x8\n"
+ "ldr d22, [x21], #0x8\n"
+ "ldr d26, [x20], #0x8\n"
+ "ldr d30, [x19], #0x8\n"
+ "tbz x17, #0, 180f\n"
+ "ld1 { v10.s }[2], [x15]\n"
+ "ld1 { v14.s }[2], [x23]\n"
+ "ld1 { v18.s }[2], [x22]\n"
+ "ld1 { v22.s }[2], [x21]\n"
+ "ld1 { v26.s }[2], [x20]\n"
+ "ld1 { v30.s }[2], [x19]\n"
"b 180f\n"
"175:" // Height 6: Partial accumulate: partial_1_8
- "mov x25, #0x20\n"
- "tbz x8, #0, 180f\n"
- "ldr s10, [x17, #0x0]\n"
- "ldr s14, [x24, #0x0]\n"
- "ldr s18, [x23, #0x0]\n"
- "ldr s22, [x22, #0x0]\n"
- "ldr s26, [x21, #0x0]\n"
- "ldr s30, [x20, #0x0]\n"
+ "mov x24, #0x20\n"
+ "tbz x17, #0, 180f\n"
+ "ldr s10, [x15, #0x0]\n"
+ "ldr s14, [x23, #0x0]\n"
+ "ldr s18, [x22, #0x0]\n"
+ "ldr s22, [x21, #0x0]\n"
+ "ldr s26, [x20, #0x0]\n"
+ "ldr s30, [x19, #0x0]\n"
"b 180f\n"
"176:" // Height 6: Partial accumulate: partial_4_0
- "tbz x8, #2, 178f\n"
- "ld1 { v8.4s }, [x17], #0x10\n"
- "ld1 { v12.4s }, [x24], #0x10\n"
- "ld1 { v16.4s }, [x23], #0x10\n"
- "ld1 { v20.4s }, [x22], #0x10\n"
- "ld1 { v24.4s }, [x21], #0x10\n"
- "ld1 { v28.4s }, [x20], #0x10\n"
- "tbz x8, #1, 177f\n"
- "ldr d9, [x17], #0x8\n"
- "mov x25, #0x18\n"
- "ldr d13, [x24], #0x8\n"
- "ldr d17, [x23], #0x8\n"
- "ldr d21, [x22], #0x8\n"
- "ldr d25, [x21], #0x8\n"
- "ldr d29, [x20], #0x8\n"
- "tbz x8, #0, 180f\n"
- "ld1 { v9.s }[2], [x17]\n"
- "ld1 { v13.s }[2], [x24]\n"
- "ld1 { v17.s }[2], [x23]\n"
- "ld1 { v21.s }[2], [x22]\n"
- "ld1 { v25.s }[2], [x21]\n"
- "ld1 { v29.s }[2], [x20]\n"
+ "tbz x17, #2, 178f\n"
+ "ld1 { v8.4s }, [x15], #0x10\n"
+ "ld1 { v12.4s }, [x23], #0x10\n"
+ "ld1 { v16.4s }, [x22], #0x10\n"
+ "ld1 { v20.4s }, [x21], #0x10\n"
+ "ld1 { v24.4s }, [x20], #0x10\n"
+ "ld1 { v28.4s }, [x19], #0x10\n"
+ "tbz x17, #1, 177f\n"
+ "ldr d9, [x15], #0x8\n"
+ "mov x24, #0x18\n"
+ "ldr d13, [x23], #0x8\n"
+ "ldr d17, [x22], #0x8\n"
+ "ldr d21, [x21], #0x8\n"
+ "ldr d25, [x20], #0x8\n"
+ "ldr d29, [x19], #0x8\n"
+ "tbz x17, #0, 180f\n"
+ "ld1 { v9.s }[2], [x15]\n"
+ "ld1 { v13.s }[2], [x23]\n"
+ "ld1 { v17.s }[2], [x22]\n"
+ "ld1 { v21.s }[2], [x21]\n"
+ "ld1 { v25.s }[2], [x20]\n"
+ "ld1 { v29.s }[2], [x19]\n"
"b 180f\n"
"177:" // Height 6: Partial accumulate: partial_1_4
- "mov x25, #0x10\n"
- "tbz x8, #0, 180f\n"
- "ldr s9, [x17, #0x0]\n"
- "ldr s13, [x24, #0x0]\n"
- "ldr s17, [x23, #0x0]\n"
- "ldr s21, [x22, #0x0]\n"
- "ldr s25, [x21, #0x0]\n"
- "ldr s29, [x20, #0x0]\n"
+ "mov x24, #0x10\n"
+ "tbz x17, #0, 180f\n"
+ "ldr s9, [x15, #0x0]\n"
+ "ldr s13, [x23, #0x0]\n"
+ "ldr s17, [x22, #0x0]\n"
+ "ldr s21, [x21, #0x0]\n"
+ "ldr s25, [x20, #0x0]\n"
+ "ldr s29, [x19, #0x0]\n"
"b 180f\n"
"178:" // Height 6: Partial accumulate: partial_2_0
- "tbz x8, #1, 179f\n"
- "ldr d8, [x17], #0x8\n"
- "mov x25, #0x8\n"
- "ldr d12, [x24], #0x8\n"
- "ldr d16, [x23], #0x8\n"
- "ldr d20, [x22], #0x8\n"
- "ldr d24, [x21], #0x8\n"
- "ldr d28, [x20], #0x8\n"
- "tbz x8, #0, 180f\n"
- "ld1 { v8.s }[2], [x17]\n"
- "ld1 { v12.s }[2], [x24]\n"
- "ld1 { v16.s }[2], [x23]\n"
- "ld1 { v20.s }[2], [x22]\n"
- "ld1 { v24.s }[2], [x21]\n"
- "ld1 { v28.s }[2], [x20]\n"
+ "tbz x17, #1, 179f\n"
+ "ldr d8, [x15], #0x8\n"
+ "ldr d12, [x23], #0x8\n"
+ "mov x24, #0x8\n"
+ "ldr d16, [x22], #0x8\n"
+ "ldr d20, [x21], #0x8\n"
+ "ldr d24, [x20], #0x8\n"
+ "ldr d28, [x19], #0x8\n"
+ "tbz x17, #0, 180f\n"
+ "ld1 { v8.s }[2], [x15]\n"
+ "ld1 { v12.s }[2], [x23]\n"
+ "ld1 { v16.s }[2], [x22]\n"
+ "ld1 { v20.s }[2], [x21]\n"
+ "ld1 { v24.s }[2], [x20]\n"
+ "ld1 { v28.s }[2], [x19]\n"
"b 180f\n"
"179:" // Height 6: Partial accumulate: partial_1_0
- "ldr s8, [x17, #0x0]\n"
- "mov x25, #0x0\n"
- "ldr s12, [x24, #0x0]\n"
- "ldr s16, [x23, #0x0]\n"
- "ldr s20, [x22, #0x0]\n"
- "ldr s24, [x21, #0x0]\n"
- "ldr s28, [x20, #0x0]\n"
+ "ldr s8, [x15, #0x0]\n"
+ "mov x24, #0x0\n"
+ "ldr s12, [x23, #0x0]\n"
+ "ldr s16, [x22, #0x0]\n"
+ "ldr s20, [x21, #0x0]\n"
+ "ldr s24, [x20, #0x0]\n"
+ "ldr s28, [x19, #0x0]\n"
"180:" // Height 6: Partial accumulate: Done
- "sub x17, x17, x25\n"
+ "sub x15, x15, x24\n"
"b 183f\n"
"181:" // Height 6: full accumulate
- "ldr q8, [x17, #0x0]\n"
- "ldr q9, [x17, #0x10]\n"
- "ldr q10, [x17, #0x20]\n"
- "ldr q11, [x17, #0x30]\n"
- "ldr q12, [x24, #0x0]\n"
- "ldr q13, [x24, #0x10]\n"
- "ldr q14, [x24, #0x20]\n"
- "ldr q15, [x24, #0x30]\n"
- "ldr q16, [x23, #0x0]\n"
- "ldr q17, [x23, #0x10]\n"
- "ldr q18, [x23, #0x20]\n"
- "ldr q19, [x23, #0x30]\n"
- "ldr q20, [x22, #0x0]\n"
- "ldr q21, [x22, #0x10]\n"
- "ldr q22, [x22, #0x20]\n"
- "ldr q23, [x22, #0x30]\n"
- "ldr q24, [x21, #0x0]\n"
- "ldr q25, [x21, #0x10]\n"
- "ldr q26, [x21, #0x20]\n"
- "ldr q27, [x21, #0x30]\n"
- "ldr q28, [x20, #0x0]\n"
- "ldr q29, [x20, #0x10]\n"
- "ldr q30, [x20, #0x20]\n"
- "ldr q31, [x20, #0x30]\n"
+ "ldr q8, [x15, #0x0]\n"
+ "ldr q9, [x15, #0x10]\n"
+ "ldr q10, [x15, #0x20]\n"
+ "ldr q11, [x15, #0x30]\n"
+ "ldr q12, [x23, #0x0]\n"
+ "ldr q13, [x23, #0x10]\n"
+ "ldr q14, [x23, #0x20]\n"
+ "ldr q15, [x23, #0x30]\n"
+ "ldr q16, [x22, #0x0]\n"
+ "ldr q17, [x22, #0x10]\n"
+ "ldr q18, [x22, #0x20]\n"
+ "ldr q19, [x22, #0x30]\n"
+ "ldr q20, [x21, #0x0]\n"
+ "ldr q21, [x21, #0x10]\n"
+ "ldr q22, [x21, #0x20]\n"
+ "ldr q23, [x21, #0x30]\n"
+ "ldr q24, [x20, #0x0]\n"
+ "ldr q25, [x20, #0x10]\n"
+ "ldr q26, [x20, #0x20]\n"
+ "ldr q27, [x20, #0x30]\n"
+ "ldr q28, [x19, #0x0]\n"
+ "ldr q29, [x19, #0x10]\n"
+ "ldr q30, [x19, #0x20]\n"
+ "ldr q31, [x19, #0x30]\n"
"b 183f\n"
"182:" // Height 6: no accumulate
"movi v8.4s, #0x0\n"
@@ -2858,260 +2858,260 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
"movi v30.4s, #0x0\n"
"movi v31.4s, #0x0\n"
"183:" // Height 6: setup done
- "mov x15, #0x0\n"
+ "mov x14, #0x0\n"
"184:" // Height 6: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w14, [x20, x15, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w13, [x20, x14, LSL #0x2]\n"
"tbz %x[flags], #3, 185f\n"
- "ldr x21, [%x[input_ptr], x15, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x13, [x21, #0x0]\n"
- "ldr x9, [x21, #0x8]\n"
- "ldr x27, [x21, #0x10]\n"
- "ldr x25, [x21, #0x18]\n"
- "ldr x23, [x21, #0x20]\n"
- "ldr x21, [x21, #0x28]\n"
- "cbnz x15, 186f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x13, x13, x20\n"
- "add x9, x9, x20\n"
- "add x27, x27, x20\n"
- "add x25, x25, x20\n"
- "add x23, x23, x20\n"
- "add x21, x21, x20\n"
+ "ldr x20, [%x[input_ptr], x14, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x12, [x20, #0x0]\n"
+ "ldr x28, [x20, #0x8]\n"
+ "ldr x26, [x20, #0x10]\n"
+ "ldr x24, [x20, #0x18]\n"
+ "ldr x22, [x20, #0x20]\n"
+ "ldr x20, [x20, #0x28]\n"
+ "cbnz x14, 186f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x12, x12, x19\n"
+ "add x28, x28, x19\n"
+ "add x26, x26, x19\n"
+ "add x24, x24, x19\n"
+ "add x22, x22, x19\n"
+ "add x20, x20, x19\n"
"b 186f\n"
"185:" // Height 6: setup direct input
- "mov x13, %x[input_ptr]\n"
- "add x9, x13, x20\n"
- "add x27, x9, x20\n"
- "add x25, x27, x20\n"
- "add x23, x25, x20\n"
- "add x21, x23, x20\n"
+ "mov x12, %x[input_ptr]\n"
+ "add x28, x12, x19\n"
+ "add x26, x28, x19\n"
+ "add x24, x26, x19\n"
+ "add x22, x24, x19\n"
+ "add x20, x22, x19\n"
"186:" // Height 6: input setup done
- "cmp x14, #0x10\n"
+ "cmp x13, #0x10\n"
"blt 189f\n"
- "ldr q0, [x13, #0x0]\n"
- "cmp x14, #0x20\n"
- "ldr q1, [x9, #0x0]\n"
- "ldr q2, [x27, #0x0]\n"
- "ldr q3, [x25, #0x0]\n"
- "ldr q4, [x23, #0x0]\n"
- "ldr q5, [x21, #0x0]\n"
+ "ldr q0, [x12, #0x0]\n"
+ "ldr q1, [x28, #0x0]\n"
+ "cmp x13, #0x20\n"
+ "ldr q2, [x26, #0x0]\n"
+ "ldr q3, [x24, #0x0]\n"
+ "ldr q4, [x22, #0x0]\n"
+ "ldr q5, [x20, #0x0]\n"
"ldr q6, [x16, #0x0]\n"
- "ldr q7, [x16, #0x10]\n"
"blt 188f\n"
"187:" // Height 6: Multiply loop: Main loop head
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
- "ldr x12, [x16, #0x28]\n"
+ "ldr d7, [x16, #0x10]\n"
".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n"
- "ldr x11, [x16, #0x38]\n"
+ "ldr x11, [x16, #0x18]\n"
".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n"
- "add x13, x13, #0x10\n"
+ "ldr x10, [x16, #0x28]\n"
".inst 0x4f83e0d4 // sdot v20.4s, v6.16b, v3.4b[0]\n"
- "add x9, x9, #0x10\n"
+ "add x12, x12, #0x10\n"
".inst 0x4f84e0d8 // sdot v24.4s, v6.16b, v4.4b[0]\n"
- "add x27, x27, #0x10\n"
+ "mov v7.d[1], x11\n"
".inst 0x4f85e0dc // sdot v28.4s, v6.16b, v5.4b[0]\n"
- "ldr d6, [x16, #0x20]\n"
+ "prfm pldl1keep, [x12, #0x80]\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
- "mov v6.d[1], x12\n"
+ "ldr d6, [x16, #0x20]\n"
".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n"
- "ldr x12, [x16, #0x48]\n"
+ "ldr x11, [x16, #0x38]\n"
".inst 0x4f82e0f1 // sdot v17.4s, v7.16b, v2.4b[0]\n"
- "add x25, x25, #0x10\n"
+ "ldr x9, [x12, #0x8]\n"
".inst 0x4f83e0f5 // sdot v21.4s, v7.16b, v3.4b[0]\n"
- "add x23, x23, #0x10\n"
+ "mov v6.d[1], x10\n"
".inst 0x4f84e0f9 // sdot v25.4s, v7.16b, v4.4b[0]\n"
- "add x21, x21, #0x10\n"
+ "ldr x10, [x16, #0x48]\n"
".inst 0x4f85e0fd // sdot v29.4s, v7.16b, v5.4b[0]\n"
"ldr d7, [x16, #0x30]\n"
- "mov v7.d[1], x11\n"
".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n"
+ "add x28, x28, #0x10\n"
".inst 0x4f81e0ce // sdot v14.4s, v6.16b, v1.4b[0]\n"
- "ldr x11, [x16, #0x58]\n"
+ "prfm pldl1keep, [x28, #0x80]\n"
".inst 0x4f82e0d2 // sdot v18.4s, v6.16b, v2.4b[0]\n"
- "ldr x10, [x13, #0x8]\n"
+ "mov v7.d[1], x11\n"
".inst 0x4f83e0d6 // sdot v22.4s, v6.16b, v3.4b[0]\n"
- "ldr x28, [x9, #0x8]\n"
+ "ldr x11, [x16, #0x58]\n"
".inst 0x4f84e0da // sdot v26.4s, v6.16b, v4.4b[0]\n"
- "ldr x26, [x27, #0x8]\n"
+ "ldr x27, [x28, #0x8]\n"
".inst 0x4f85e0de // sdot v30.4s, v6.16b, v5.4b[0]\n"
"ldr d6, [x16, #0x40]\n"
".inst 0x4f80e0eb // sdot v11.4s, v7.16b, v0.4b[0]\n"
- "mov v6.d[1], x12\n"
+ "add x26, x26, #0x10\n"
".inst 0x4f81e0ef // sdot v15.4s, v7.16b, v1.4b[0]\n"
- "ldr x12, [x16, #0x68]\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x4f82e0f3 // sdot v19.4s, v7.16b, v2.4b[0]\n"
- "ldr x24, [x25, #0x8]\n"
+ "mov v6.d[1], x10\n"
".inst 0x4f83e0f7 // sdot v23.4s, v7.16b, v3.4b[0]\n"
- "ldr x22, [x23, #0x8]\n"
+ "ldr x10, [x16, #0x68]\n"
".inst 0x4f84e0fb // sdot v27.4s, v7.16b, v4.4b[0]\n"
- "ldr x20, [x21, #0x8]\n"
+ "ldr x25, [x26, #0x8]\n"
".inst 0x4f85e0ff // sdot v31.4s, v7.16b, v5.4b[0]\n"
"ldr d7, [x16, #0x50]\n"
- "mov v7.d[1], x11\n"
".inst 0x4fa0e0c8 // sdot v8.4s, v6.16b, v0.4b[1]\n"
+ "add x24, x24, #0x10\n"
".inst 0x4fa1e0cc // sdot v12.4s, v6.16b, v1.4b[1]\n"
- "ldr x11, [x16, #0x78]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x4fa2e0d0 // sdot v16.4s, v6.16b, v2.4b[1]\n"
- "sub x14, x14, #0x10\n"
+ "mov v7.d[1], x11\n"
".inst 0x4fa3e0d4 // sdot v20.4s, v6.16b, v3.4b[1]\n"
- "cmp x14, #0x20\n"
+ "ldr x11, [x16, #0x78]\n"
".inst 0x4fa4e0d8 // sdot v24.4s, v6.16b, v4.4b[1]\n"
- "prfm pldl1keep, [x13, #0x80]\n"
+ "ldr x23, [x24, #0x8]\n"
".inst 0x4fa5e0dc // sdot v28.4s, v6.16b, v5.4b[1]\n"
"ldr d6, [x16, #0x60]\n"
".inst 0x4fa0e0e9 // sdot v9.4s, v7.16b, v0.4b[1]\n"
- "mov v6.d[1], x12\n"
+ "add x22, x22, #0x10\n"
".inst 0x4fa1e0ed // sdot v13.4s, v7.16b, v1.4b[1]\n"
- "ldr x12, [x16, #0x88]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x4fa2e0f1 // sdot v17.4s, v7.16b, v2.4b[1]\n"
- "prfm pldl1keep, [x9, #0x80]\n"
+ "mov v6.d[1], x10\n"
".inst 0x4fa3e0f5 // sdot v21.4s, v7.16b, v3.4b[1]\n"
- "prfm pldl1keep, [x27, #0x80]\n"
+ "ldr x10, [x16, #0x88]\n"
".inst 0x4fa4e0f9 // sdot v25.4s, v7.16b, v4.4b[1]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
+ "ldr x21, [x22, #0x8]\n"
".inst 0x4fa5e0fd // sdot v29.4s, v7.16b, v5.4b[1]\n"
"ldr d7, [x16, #0x70]\n"
- "mov v7.d[1], x11\n"
".inst 0x4fa0e0ca // sdot v10.4s, v6.16b, v0.4b[1]\n"
+ "add x20, x20, #0x10\n"
".inst 0x4fa1e0ce // sdot v14.4s, v6.16b, v1.4b[1]\n"
- "ldr x11, [x16, #0x98]\n"
+ "prfm pldl1keep, [x20, #0x80]\n"
".inst 0x4fa2e0d2 // sdot v18.4s, v6.16b, v2.4b[1]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
+ "mov v7.d[1], x11\n"
".inst 0x4fa3e0d6 // sdot v22.4s, v6.16b, v3.4b[1]\n"
- "prfm pldl1keep, [x21, #0x80]\n"
+ "ldr x11, [x16, #0x98]\n"
".inst 0x4fa4e0da // sdot v26.4s, v6.16b, v4.4b[1]\n"
+ "ldr x19, [x20, #0x8]\n"
".inst 0x4fa5e0de // sdot v30.4s, v6.16b, v5.4b[1]\n"
"ldr d6, [x16, #0x80]\n"
".inst 0x4fa0e0eb // sdot v11.4s, v7.16b, v0.4b[1]\n"
- "mov v6.d[1], x12\n"
+ "sub x13, x13, #0x10\n"
".inst 0x4fa1e0ef // sdot v15.4s, v7.16b, v1.4b[1]\n"
- "ldr x12, [x16, #0xa8]\n"
+ "cmp x13, #0x20\n"
".inst 0x4fa2e0f3 // sdot v19.4s, v7.16b, v2.4b[1]\n"
+ "mov v6.d[1], x10\n"
".inst 0x4fa3e0f7 // sdot v23.4s, v7.16b, v3.4b[1]\n"
+ "ldr x10, [x16, #0xa8]\n"
".inst 0x4fa4e0fb // sdot v27.4s, v7.16b, v4.4b[1]\n"
".inst 0x4fa5e0ff // sdot v31.4s, v7.16b, v5.4b[1]\n"
"ldr d7, [x16, #0x90]\n"
- "mov v7.d[1], x11\n"
".inst 0x4f80e8c8 // sdot v8.4s, v6.16b, v0.4b[2]\n"
".inst 0x4f81e8cc // sdot v12.4s, v6.16b, v1.4b[2]\n"
- "ldr x11, [x16, #0xb8]\n"
".inst 0x4f82e8d0 // sdot v16.4s, v6.16b, v2.4b[2]\n"
+ "mov v7.d[1], x11\n"
".inst 0x4f83e8d4 // sdot v20.4s, v6.16b, v3.4b[2]\n"
+ "ldr x11, [x16, #0xb8]\n"
".inst 0x4f84e8d8 // sdot v24.4s, v6.16b, v4.4b[2]\n"
".inst 0x4f85e8dc // sdot v28.4s, v6.16b, v5.4b[2]\n"
"ldr d6, [x16, #0xa0]\n"
".inst 0x4f80e8e9 // sdot v9.4s, v7.16b, v0.4b[2]\n"
- "mov v6.d[1], x12\n"
".inst 0x4f81e8ed // sdot v13.4s, v7.16b, v1.4b[2]\n"
- "ldr x12, [x16, #0xc8]\n"
".inst 0x4f82e8f1 // sdot v17.4s, v7.16b, v2.4b[2]\n"
+ "mov v6.d[1], x10\n"
".inst 0x4f83e8f5 // sdot v21.4s, v7.16b, v3.4b[2]\n"
+ "ldr x10, [x16, #0xc8]\n"
".inst 0x4f84e8f9 // sdot v25.4s, v7.16b, v4.4b[2]\n"
".inst 0x4f85e8fd // sdot v29.4s, v7.16b, v5.4b[2]\n"
"ldr d7, [x16, #0xb0]\n"
- "mov v7.d[1], x11\n"
".inst 0x4f80e8ca // sdot v10.4s, v6.16b, v0.4b[2]\n"
".inst 0x4f81e8ce // sdot v14.4s, v6.16b, v1.4b[2]\n"
- "ldr x11, [x16, #0xd8]\n"
".inst 0x4f82e8d2 // sdot v18.4s, v6.16b, v2.4b[2]\n"
+ "mov v7.d[1], x11\n"
".inst 0x4f83e8d6 // sdot v22.4s, v6.16b, v3.4b[2]\n"
+ "ldr x11, [x16, #0xd8]\n"
".inst 0x4f84e8da // sdot v26.4s, v6.16b, v4.4b[2]\n"
".inst 0x4f85e8de // sdot v30.4s, v6.16b, v5.4b[2]\n"
"ldr d6, [x16, #0xc0]\n"
".inst 0x4f80e8eb // sdot v11.4s, v7.16b, v0.4b[2]\n"
- "mov v6.d[1], x12\n"
".inst 0x4f81e8ef // sdot v15.4s, v7.16b, v1.4b[2]\n"
- "ldr x12, [x16, #0xe8]\n"
".inst 0x4f82e8f3 // sdot v19.4s, v7.16b, v2.4b[2]\n"
+ "mov v6.d[1], x10\n"
".inst 0x4f83e8f7 // sdot v23.4s, v7.16b, v3.4b[2]\n"
+ "ldr x10, [x16, #0xe8]\n"
".inst 0x4f84e8fb // sdot v27.4s, v7.16b, v4.4b[2]\n"
".inst 0x4f85e8ff // sdot v31.4s, v7.16b, v5.4b[2]\n"
"ldr d7, [x16, #0xd0]\n"
- "mov v7.d[1], x11\n"
".inst 0x4fa0e8c8 // sdot v8.4s, v6.16b, v0.4b[3]\n"
".inst 0x4fa1e8cc // sdot v12.4s, v6.16b, v1.4b[3]\n"
- "ldr x11, [x16, #0xf8]\n"
".inst 0x4fa2e8d0 // sdot v16.4s, v6.16b, v2.4b[3]\n"
+ "mov v7.d[1], x11\n"
".inst 0x4fa3e8d4 // sdot v20.4s, v6.16b, v3.4b[3]\n"
+ "ldr x11, [x16, #0xf8]\n"
".inst 0x4fa4e8d8 // sdot v24.4s, v6.16b, v4.4b[3]\n"
".inst 0x4fa5e8dc // sdot v28.4s, v6.16b, v5.4b[3]\n"
"ldr d6, [x16, #0xe0]\n"
".inst 0x4fa0e8e9 // sdot v9.4s, v7.16b, v0.4b[3]\n"
- "mov v6.d[1], x12\n"
".inst 0x4fa1e8ed // sdot v13.4s, v7.16b, v1.4b[3]\n"
".inst 0x4fa2e8f1 // sdot v17.4s, v7.16b, v2.4b[3]\n"
+ "mov v6.d[1], x10\n"
".inst 0x4fa3e8f5 // sdot v21.4s, v7.16b, v3.4b[3]\n"
".inst 0x4fa4e8f9 // sdot v25.4s, v7.16b, v4.4b[3]\n"
".inst 0x4fa5e8fd // sdot v29.4s, v7.16b, v5.4b[3]\n"
"ldr d7, [x16, #0xf0]\n"
- "mov v7.d[1], x11\n"
- "add x16, x16, #0x100\n"
".inst 0x4fa0e8ca // sdot v10.4s, v6.16b, v0.4b[3]\n"
- "ldr x12, [x16, #0x8]\n"
+ "add x16, x16, #0x100\n"
".inst 0x4fa1e8ce // sdot v14.4s, v6.16b, v1.4b[3]\n"
- "ldr x11, [x16, #0x18]\n"
+ "ldr x10, [x16, #0x8]\n"
".inst 0x4fa2e8d2 // sdot v18.4s, v6.16b, v2.4b[3]\n"
+ "mov v7.d[1], x11\n"
".inst 0x4fa3e8d6 // sdot v22.4s, v6.16b, v3.4b[3]\n"
".inst 0x4fa4e8da // sdot v26.4s, v6.16b, v4.4b[3]\n"
".inst 0x4fa5e8de // sdot v30.4s, v6.16b, v5.4b[3]\n"
"ldr d6, [x16, #0x0]\n"
".inst 0x4fa0e8eb // sdot v11.4s, v7.16b, v0.4b[3]\n"
- "ldr d0, [x13, #0x0]\n"
+ "ldr d0, [x12, #0x0]\n"
".inst 0x4fa1e8ef // sdot v15.4s, v7.16b, v1.4b[3]\n"
- "ldr d1, [x9, #0x0]\n"
+ "ldr d1, [x28, #0x0]\n"
".inst 0x4fa2e8f3 // sdot v19.4s, v7.16b, v2.4b[3]\n"
- "ldr d2, [x27, #0x0]\n"
+ "mov v6.d[1], x10\n"
".inst 0x4fa3e8f7 // sdot v23.4s, v7.16b, v3.4b[3]\n"
- "ldr d3, [x25, #0x0]\n"
+ "mov v0.d[1], x9\n"
".inst 0x4fa4e8fb // sdot v27.4s, v7.16b, v4.4b[3]\n"
- "ldr d4, [x23, #0x0]\n"
+ "mov v1.d[1], x27\n"
".inst 0x4fa5e8ff // sdot v31.4s, v7.16b, v5.4b[3]\n"
- "ldr d5, [x21, #0x0]\n"
- "ldr d7, [x16, #0x10]\n"
- "mov v6.d[1], x12\n"
- "mov v0.d[1], x10\n"
- "mov v1.d[1], x28\n"
- "mov v2.d[1], x26\n"
- "mov v3.d[1], x24\n"
- "mov v4.d[1], x22\n"
- "mov v5.d[1], x20\n"
- "mov v7.d[1], x11\n"
+ "ldr d2, [x26, #0x0]\n"
+ "ldr d3, [x24, #0x0]\n"
+ "ldr d4, [x22, #0x0]\n"
+ "mov v2.d[1], x25\n"
+ "ldr d5, [x20, #0x0]\n"
+ "mov v3.d[1], x23\n"
+ "mov v4.d[1], x21\n"
+ "mov v5.d[1], x19\n"
"bge 187b\n"
"188:" // Height 6: Multiply loop: Single iteration only
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
- "add x13, x13, #0x10\n"
+ "ldr q7, [x16, #0x10]\n"
".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n"
- "add x9, x9, #0x10\n"
+ "sub x13, x13, #0x10\n"
".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n"
- "add x27, x27, #0x10\n"
+ "add x12, x12, #0x10\n"
".inst 0x4f83e0d4 // sdot v20.4s, v6.16b, v3.4b[0]\n"
- "add x25, x25, #0x10\n"
+ "prfm pldl1keep, [x12, #0x80]\n"
".inst 0x4f84e0d8 // sdot v24.4s, v6.16b, v4.4b[0]\n"
- "add x23, x23, #0x10\n"
+ "add x28, x28, #0x10\n"
".inst 0x4f85e0dc // sdot v28.4s, v6.16b, v5.4b[0]\n"
- "ldr q6, [x16, #0x20]\n"
+ "prfm pldl1keep, [x28, #0x80]\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
- "add x21, x21, #0x10\n"
+ "ldr q6, [x16, #0x20]\n"
".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n"
- "sub x14, x14, #0x10\n"
+ "add x26, x26, #0x10\n"
".inst 0x4f82e0f1 // sdot v17.4s, v7.16b, v2.4b[0]\n"
- "prfm pldl1keep, [x13, #0x80]\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x4f83e0f5 // sdot v21.4s, v7.16b, v3.4b[0]\n"
- "prfm pldl1keep, [x9, #0x80]\n"
+ "add x24, x24, #0x10\n"
".inst 0x4f84e0f9 // sdot v25.4s, v7.16b, v4.4b[0]\n"
- "prfm pldl1keep, [x27, #0x80]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x4f85e0fd // sdot v29.4s, v7.16b, v5.4b[0]\n"
"ldr q7, [x16, #0x30]\n"
".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
+ "add x22, x22, #0x10\n"
".inst 0x4f81e0ce // sdot v14.4s, v6.16b, v1.4b[0]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x4f82e0d2 // sdot v18.4s, v6.16b, v2.4b[0]\n"
- "prfm pldl1keep, [x21, #0x80]\n"
+ "add x20, x20, #0x10\n"
".inst 0x4f83e0d6 // sdot v22.4s, v6.16b, v3.4b[0]\n"
+ "prfm pldl1keep, [x20, #0x80]\n"
".inst 0x4f84e0da // sdot v26.4s, v6.16b, v4.4b[0]\n"
".inst 0x4f85e0de // sdot v30.4s, v6.16b, v5.4b[0]\n"
"ldr q6, [x16, #0x40]\n"
@@ -3206,21 +3206,21 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
".inst 0x4fa4e8fb // sdot v27.4s, v7.16b, v4.4b[3]\n"
".inst 0x4fa5e8ff // sdot v31.4s, v7.16b, v5.4b[3]\n"
"189:" // Height 6: Multiply loop: Main loop skip
- "cbz x14, 194f\n"
- "cmp x14, #0x4\n"
+ "cbz x13, 194f\n"
+ "cmp x13, #0x4\n"
"blt 191f\n"
"190:" // Height 6: Multiply loop: Odd block loop
- "ldr s0, [x13], #0x4\n"
- "sub x14, x14, #0x4\n"
- "ldr s1, [x9], #0x4\n"
- "cmp x14, #0x4\n"
- "ldr s2, [x27], #0x4\n"
- "ldr s3, [x25], #0x4\n"
- "ldr s4, [x23], #0x4\n"
- "ldr s5, [x21], #0x4\n"
+ "ldr s0, [x12], #0x4\n"
+ "sub x13, x13, #0x4\n"
+ "ldr s1, [x28], #0x4\n"
+ "cmp x13, #0x4\n"
+ "ldr s2, [x26], #0x4\n"
+ "ldr s3, [x24], #0x4\n"
+ "ldr s4, [x22], #0x4\n"
+ "ldr s5, [x20], #0x4\n"
"ldr q6, [x16, #0x0]\n"
- ".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
"ldr q7, [x16, #0x10]\n"
+ ".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n"
".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n"
".inst 0x4f83e0d4 // sdot v20.4s, v6.16b, v3.4b[0]\n"
@@ -3248,34 +3248,34 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
".inst 0x4f84e0fb // sdot v27.4s, v7.16b, v4.4b[0]\n"
".inst 0x4f85e0ff // sdot v31.4s, v7.16b, v5.4b[0]\n"
"bge 190b\n"
+ "cbz x13, 194f\n"
"191:" // Height 6: Multiply loop: Skip odd blocks
- "cbz x14, 194f\n"
- "tbz x14, #1, 192f\n"
- "ldr h0, [x13], #0x2\n"
- "ldr h1, [x9], #0x2\n"
- "ldr h2, [x27], #0x2\n"
- "ldr h3, [x25], #0x2\n"
- "ldr h4, [x23], #0x2\n"
- "ldr h5, [x21], #0x2\n"
- "tbz x14, #0, 193f\n"
- "ld1 { v0.b }[2], [x13]\n"
- "ld1 { v1.b }[2], [x9]\n"
- "ld1 { v2.b }[2], [x27]\n"
- "ld1 { v3.b }[2], [x25]\n"
- "ld1 { v4.b }[2], [x23]\n"
- "ld1 { v5.b }[2], [x21]\n"
+ "tbz x13, #1, 192f\n"
+ "ldr h0, [x12], #0x2\n"
+ "ldr h1, [x28], #0x2\n"
+ "ldr h2, [x26], #0x2\n"
+ "ldr h3, [x24], #0x2\n"
+ "ldr h4, [x22], #0x2\n"
+ "ldr h5, [x20], #0x2\n"
+ "tbz x13, #0, 193f\n"
+ "ld1 { v0.b }[2], [x12]\n"
+ "ld1 { v1.b }[2], [x28]\n"
+ "ld1 { v2.b }[2], [x26]\n"
+ "ld1 { v3.b }[2], [x24]\n"
+ "ld1 { v4.b }[2], [x22]\n"
+ "ld1 { v5.b }[2], [x20]\n"
"b 193f\n"
"192:" // Height 6: Multiply loop: Ragged operand read: partial_1_0
- "ldr b0, [x13, #0x0]\n"
- "ldr b1, [x9, #0x0]\n"
- "ldr b2, [x27, #0x0]\n"
- "ldr b3, [x25, #0x0]\n"
- "ldr b4, [x23, #0x0]\n"
- "ldr b5, [x21, #0x0]\n"
+ "ldr b0, [x12, #0x0]\n"
+ "ldr b1, [x28, #0x0]\n"
+ "ldr b2, [x26, #0x0]\n"
+ "ldr b3, [x24, #0x0]\n"
+ "ldr b4, [x22, #0x0]\n"
+ "ldr b5, [x20, #0x0]\n"
"193:" // Height 6: Multiply loop: Ragged operand read: Done
"ldr q6, [x16, #0x0]\n"
- ".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
"ldr q7, [x16, #0x10]\n"
+ ".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n"
".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n"
".inst 0x4f83e0d4 // sdot v20.4s, v6.16b, v3.4b[0]\n"
@@ -3303,195 +3303,195 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
".inst 0x4f84e0fb // sdot v27.4s, v7.16b, v4.4b[0]\n"
".inst 0x4f85e0ff // sdot v31.4s, v7.16b, v5.4b[0]\n"
"194:" // Height 6: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x15, x15, #0x1\n"
- "cmp x15, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x14, x14, #0x1\n"
+ "cmp x14, x19\n"
"bne 184b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x17, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
- "add x20, x21, x20, LSL #2\n"
- "cmp x8, #0x10\n"
- "prfm pstl1keep, [x17, #0x0]\n"
- "prfm pstl1keep, [x24, #0x0]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "prfm pstl1keep, [x15, #0x0]\n"
+ "cmp x17, #0x10\n"
+ "add x23, x15, x19, LSL #2\n"
"prfm pstl1keep, [x23, #0x0]\n"
+ "add x22, x23, x19, LSL #2\n"
"prfm pstl1keep, [x22, #0x0]\n"
+ "add x21, x22, x19, LSL #2\n"
"prfm pstl1keep, [x21, #0x0]\n"
+ "add x20, x21, x19, LSL #2\n"
"prfm pstl1keep, [x20, #0x0]\n"
+ "add x19, x20, x19, LSL #2\n"
+ "prfm pstl1keep, [x19, #0x0]\n"
"bge 203f\n"
- "tbz x8, #3, 198f\n"
- "st1 { v8.4s }, [x17], #0x10\n"
- "st1 { v9.4s }, [x17], #0x10\n"
- "st1 { v12.4s }, [x24], #0x10\n"
- "st1 { v13.4s }, [x24], #0x10\n"
- "st1 { v16.4s }, [x23], #0x10\n"
- "st1 { v17.4s }, [x23], #0x10\n"
- "st1 { v20.4s }, [x22], #0x10\n"
- "st1 { v21.4s }, [x22], #0x10\n"
- "st1 { v24.4s }, [x21], #0x10\n"
- "st1 { v25.4s }, [x21], #0x10\n"
- "st1 { v28.4s }, [x20], #0x10\n"
- "st1 { v29.4s }, [x20], #0x10\n"
- "tbz x8, #2, 196f\n"
- "st1 { v10.4s }, [x17], #0x10\n"
- "st1 { v14.4s }, [x24], #0x10\n"
- "st1 { v18.4s }, [x23], #0x10\n"
- "st1 { v22.4s }, [x22], #0x10\n"
- "st1 { v26.4s }, [x21], #0x10\n"
- "st1 { v30.4s }, [x20], #0x10\n"
- "tbz x8, #1, 195f\n"
- "str d11, [x17], #0x8\n"
- "str d15, [x24], #0x8\n"
- "str d19, [x23], #0x8\n"
- "str d23, [x22], #0x8\n"
- "str d27, [x21], #0x8\n"
- "str d31, [x20], #0x8\n"
- "tbz x8, #0, 202f\n"
- "st1 { v11.s }[2], [x17]\n"
- "st1 { v15.s }[2], [x24]\n"
- "st1 { v19.s }[2], [x23]\n"
- "st1 { v23.s }[2], [x22]\n"
- "st1 { v27.s }[2], [x21]\n"
- "st1 { v31.s }[2], [x20]\n"
+ "tbz x17, #3, 198f\n"
+ "st1 { v8.4s }, [x15], #0x10\n"
+ "st1 { v9.4s }, [x15], #0x10\n"
+ "st1 { v12.4s }, [x23], #0x10\n"
+ "st1 { v13.4s }, [x23], #0x10\n"
+ "st1 { v16.4s }, [x22], #0x10\n"
+ "st1 { v17.4s }, [x22], #0x10\n"
+ "st1 { v20.4s }, [x21], #0x10\n"
+ "st1 { v21.4s }, [x21], #0x10\n"
+ "st1 { v24.4s }, [x20], #0x10\n"
+ "st1 { v25.4s }, [x20], #0x10\n"
+ "st1 { v28.4s }, [x19], #0x10\n"
+ "st1 { v29.4s }, [x19], #0x10\n"
+ "tbz x17, #2, 196f\n"
+ "st1 { v10.4s }, [x15], #0x10\n"
+ "st1 { v14.4s }, [x23], #0x10\n"
+ "st1 { v18.4s }, [x22], #0x10\n"
+ "st1 { v22.4s }, [x21], #0x10\n"
+ "st1 { v26.4s }, [x20], #0x10\n"
+ "st1 { v30.4s }, [x19], #0x10\n"
+ "tbz x17, #1, 195f\n"
+ "str d11, [x15], #0x8\n"
+ "str d15, [x23], #0x8\n"
+ "str d19, [x22], #0x8\n"
+ "str d23, [x21], #0x8\n"
+ "str d27, [x20], #0x8\n"
+ "str d31, [x19], #0x8\n"
+ "tbz x17, #0, 202f\n"
+ "st1 { v11.s }[2], [x15]\n"
+ "st1 { v15.s }[2], [x23]\n"
+ "st1 { v19.s }[2], [x22]\n"
+ "st1 { v23.s }[2], [x21]\n"
+ "st1 { v27.s }[2], [x20]\n"
+ "st1 { v31.s }[2], [x19]\n"
"b 202f\n"
"195:" // Height 6: Partial direct writeback: partial_1_12
- "tbz x8, #0, 202f\n"
- "str s11, [x17, #0x0]\n"
- "str s15, [x24, #0x0]\n"
- "str s19, [x23, #0x0]\n"
- "str s23, [x22, #0x0]\n"
- "str s27, [x21, #0x0]\n"
- "str s31, [x20, #0x0]\n"
+ "tbz x17, #0, 202f\n"
+ "str s11, [x15, #0x0]\n"
+ "str s15, [x23, #0x0]\n"
+ "str s19, [x22, #0x0]\n"
+ "str s23, [x21, #0x0]\n"
+ "str s27, [x20, #0x0]\n"
+ "str s31, [x19, #0x0]\n"
"b 202f\n"
"196:" // Height 6: Partial direct writeback: partial_2_8
- "tbz x8, #1, 197f\n"
- "str d10, [x17], #0x8\n"
- "str d14, [x24], #0x8\n"
- "str d18, [x23], #0x8\n"
- "str d22, [x22], #0x8\n"
- "str d26, [x21], #0x8\n"
- "str d30, [x20], #0x8\n"
- "tbz x8, #0, 202f\n"
- "st1 { v10.s }[2], [x17]\n"
- "st1 { v14.s }[2], [x24]\n"
- "st1 { v18.s }[2], [x23]\n"
- "st1 { v22.s }[2], [x22]\n"
- "st1 { v26.s }[2], [x21]\n"
- "st1 { v30.s }[2], [x20]\n"
+ "tbz x17, #1, 197f\n"
+ "str d10, [x15], #0x8\n"
+ "str d14, [x23], #0x8\n"
+ "str d18, [x22], #0x8\n"
+ "str d22, [x21], #0x8\n"
+ "str d26, [x20], #0x8\n"
+ "str d30, [x19], #0x8\n"
+ "tbz x17, #0, 202f\n"
+ "st1 { v10.s }[2], [x15]\n"
+ "st1 { v14.s }[2], [x23]\n"
+ "st1 { v18.s }[2], [x22]\n"
+ "st1 { v22.s }[2], [x21]\n"
+ "st1 { v26.s }[2], [x20]\n"
+ "st1 { v30.s }[2], [x19]\n"
"b 202f\n"
"197:" // Height 6: Partial direct writeback: partial_1_8
- "tbz x8, #0, 202f\n"
- "str s10, [x17, #0x0]\n"
- "str s14, [x24, #0x0]\n"
- "str s18, [x23, #0x0]\n"
- "str s22, [x22, #0x0]\n"
- "str s26, [x21, #0x0]\n"
- "str s30, [x20, #0x0]\n"
+ "tbz x17, #0, 202f\n"
+ "str s10, [x15, #0x0]\n"
+ "str s14, [x23, #0x0]\n"
+ "str s18, [x22, #0x0]\n"
+ "str s22, [x21, #0x0]\n"
+ "str s26, [x20, #0x0]\n"
+ "str s30, [x19, #0x0]\n"
"b 202f\n"
"198:" // Height 6: Partial direct writeback: partial_4_0
- "tbz x8, #2, 200f\n"
- "st1 { v8.4s }, [x17], #0x10\n"
- "st1 { v12.4s }, [x24], #0x10\n"
- "st1 { v16.4s }, [x23], #0x10\n"
- "st1 { v20.4s }, [x22], #0x10\n"
- "st1 { v24.4s }, [x21], #0x10\n"
- "st1 { v28.4s }, [x20], #0x10\n"
- "tbz x8, #1, 199f\n"
- "str d9, [x17], #0x8\n"
- "str d13, [x24], #0x8\n"
- "str d17, [x23], #0x8\n"
- "str d21, [x22], #0x8\n"
- "str d25, [x21], #0x8\n"
- "str d29, [x20], #0x8\n"
- "tbz x8, #0, 202f\n"
- "st1 { v9.s }[2], [x17]\n"
- "st1 { v13.s }[2], [x24]\n"
- "st1 { v17.s }[2], [x23]\n"
- "st1 { v21.s }[2], [x22]\n"
- "st1 { v25.s }[2], [x21]\n"
- "st1 { v29.s }[2], [x20]\n"
+ "tbz x17, #2, 200f\n"
+ "st1 { v8.4s }, [x15], #0x10\n"
+ "st1 { v12.4s }, [x23], #0x10\n"
+ "st1 { v16.4s }, [x22], #0x10\n"
+ "st1 { v20.4s }, [x21], #0x10\n"
+ "st1 { v24.4s }, [x20], #0x10\n"
+ "st1 { v28.4s }, [x19], #0x10\n"
+ "tbz x17, #1, 199f\n"
+ "str d9, [x15], #0x8\n"
+ "str d13, [x23], #0x8\n"
+ "str d17, [x22], #0x8\n"
+ "str d21, [x21], #0x8\n"
+ "str d25, [x20], #0x8\n"
+ "str d29, [x19], #0x8\n"
+ "tbz x17, #0, 202f\n"
+ "st1 { v9.s }[2], [x15]\n"
+ "st1 { v13.s }[2], [x23]\n"
+ "st1 { v17.s }[2], [x22]\n"
+ "st1 { v21.s }[2], [x21]\n"
+ "st1 { v25.s }[2], [x20]\n"
+ "st1 { v29.s }[2], [x19]\n"
"b 202f\n"
"199:" // Height 6: Partial direct writeback: partial_1_4
- "tbz x8, #0, 202f\n"
- "str s9, [x17, #0x0]\n"
- "str s13, [x24, #0x0]\n"
- "str s17, [x23, #0x0]\n"
- "str s21, [x22, #0x0]\n"
- "str s25, [x21, #0x0]\n"
- "str s29, [x20, #0x0]\n"
+ "tbz x17, #0, 202f\n"
+ "str s9, [x15, #0x0]\n"
+ "str s13, [x23, #0x0]\n"
+ "str s17, [x22, #0x0]\n"
+ "str s21, [x21, #0x0]\n"
+ "str s25, [x20, #0x0]\n"
+ "str s29, [x19, #0x0]\n"
"b 202f\n"
"200:" // Height 6: Partial direct writeback: partial_2_0
- "tbz x8, #1, 201f\n"
- "str d8, [x17], #0x8\n"
- "str d12, [x24], #0x8\n"
- "str d16, [x23], #0x8\n"
- "str d20, [x22], #0x8\n"
- "str d24, [x21], #0x8\n"
- "str d28, [x20], #0x8\n"
- "tbz x8, #0, 202f\n"
- "st1 { v8.s }[2], [x17]\n"
- "st1 { v12.s }[2], [x24]\n"
- "st1 { v16.s }[2], [x23]\n"
- "st1 { v20.s }[2], [x22]\n"
- "st1 { v24.s }[2], [x21]\n"
- "st1 { v28.s }[2], [x20]\n"
+ "tbz x17, #1, 201f\n"
+ "str d8, [x15], #0x8\n"
+ "str d12, [x23], #0x8\n"
+ "str d16, [x22], #0x8\n"
+ "str d20, [x21], #0x8\n"
+ "str d24, [x20], #0x8\n"
+ "str d28, [x19], #0x8\n"
+ "tbz x17, #0, 202f\n"
+ "st1 { v8.s }[2], [x15]\n"
+ "st1 { v12.s }[2], [x23]\n"
+ "st1 { v16.s }[2], [x22]\n"
+ "st1 { v20.s }[2], [x21]\n"
+ "st1 { v24.s }[2], [x20]\n"
+ "st1 { v28.s }[2], [x19]\n"
"b 202f\n"
"201:" // Height 6: Partial direct writeback: partial_1_0
- "str s8, [x17, #0x0]\n"
- "str s12, [x24, #0x0]\n"
- "str s16, [x23, #0x0]\n"
- "str s20, [x22, #0x0]\n"
- "str s24, [x21, #0x0]\n"
- "str s28, [x20, #0x0]\n"
+ "str s8, [x15, #0x0]\n"
+ "str s12, [x23, #0x0]\n"
+ "str s16, [x22, #0x0]\n"
+ "str s20, [x21, #0x0]\n"
+ "str s24, [x20, #0x0]\n"
+ "str s28, [x19, #0x0]\n"
"202:" // Height 6: Partial direct writeback: Done
"b 204f\n"
"203:" // Height 6: Full writeback
- "str q8, [x17, #0x0]\n"
- "str q9, [x17, #0x10]\n"
- "str q10, [x17, #0x20]\n"
- "str q11, [x17, #0x30]\n"
- "add x17, x17, #0x40\n"
- "str q12, [x24, #0x0]\n"
- "str q13, [x24, #0x10]\n"
- "str q14, [x24, #0x20]\n"
- "str q15, [x24, #0x30]\n"
- "str q16, [x23, #0x0]\n"
- "str q17, [x23, #0x10]\n"
- "str q18, [x23, #0x20]\n"
- "str q19, [x23, #0x30]\n"
- "str q20, [x22, #0x0]\n"
- "str q21, [x22, #0x10]\n"
- "str q22, [x22, #0x20]\n"
- "str q23, [x22, #0x30]\n"
- "str q24, [x21, #0x0]\n"
- "str q25, [x21, #0x10]\n"
- "str q26, [x21, #0x20]\n"
- "str q27, [x21, #0x30]\n"
- "str q28, [x20, #0x0]\n"
- "str q29, [x20, #0x10]\n"
- "str q30, [x20, #0x20]\n"
- "str q31, [x20, #0x30]\n"
+ "str q8, [x15, #0x0]\n"
+ "str q9, [x15, #0x10]\n"
+ "str q10, [x15, #0x20]\n"
+ "str q11, [x15, #0x30]\n"
+ "add x15, x15, #0x40\n"
+ "str q12, [x23, #0x0]\n"
+ "str q13, [x23, #0x10]\n"
+ "str q14, [x23, #0x20]\n"
+ "str q15, [x23, #0x30]\n"
+ "str q16, [x22, #0x0]\n"
+ "str q17, [x22, #0x10]\n"
+ "str q18, [x22, #0x20]\n"
+ "str q19, [x22, #0x30]\n"
+ "str q20, [x21, #0x0]\n"
+ "str q21, [x21, #0x10]\n"
+ "str q22, [x21, #0x20]\n"
+ "str q23, [x21, #0x30]\n"
+ "str q24, [x20, #0x0]\n"
+ "str q25, [x20, #0x10]\n"
+ "str q26, [x20, #0x20]\n"
+ "str q27, [x20, #0x30]\n"
+ "str q28, [x19, #0x0]\n"
+ "str q29, [x19, #0x10]\n"
+ "str q30, [x19, #0x20]\n"
+ "str q31, [x19, #0x30]\n"
"204:" // Height 6: Writeback done
- "subs x8, x8, #0x10\n"
+ "subs x17, x17, #0x10\n"
"bgt 172b\n"
"subs %x[M], %x[M], #0x6\n"
"beq 206f\n"
- "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 205f\n"
- "add x21, x21, #0x6\n"
- "str x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "add x20, x20, #0x6\n"
+ "str x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"b 1b\n"
"205:" // Update direct input
- "mov x20, #0x6\n"
- "madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
+ "mov x19, #0x6\n"
+ "madd %x[input_ptr], x19, x20, %x[input_ptr]\n"
"b 1b\n"
"206:" // Exit
: [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
: [args_ptr] "r" (&ka), [flags] "r" (flags), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8s32_dot_6x16/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8s32_dot_6x16/generic.cpp
index ddf776107a..e47295a766 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8s32_dot_6x16/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8s32_dot_6x16/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef __aarch64__
@@ -87,73 +87,73 @@ void a64_hybrid_s8s32_dot_6x16 (
"cmp %x[M], #0x2\n"
"bgt 69f\n"
"beq 35f\n"
- "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x28, %x[output_ptr]\n"
"2:" // Height 1: Column loop
"tbz %x[flags], #0, 12f\n"
- "cmp x11, #0x10\n"
+ "cmp x10, #0x10\n"
"bge 11f\n"
- "tbz x11, #3, 6f\n"
- "ld1 { v8.4s }, [x9], #0x10\n"
- "ld1 { v9.4s }, [x9], #0x10\n"
- "tbz x11, #2, 4f\n"
- "ld1 { v10.4s }, [x9], #0x10\n"
- "tbz x11, #1, 3f\n"
- "ldr d11, [x9], #0x8\n"
- "mov x25, #0x38\n"
- "tbz x11, #0, 10f\n"
- "ld1 { v11.s }[2], [x9]\n"
+ "tbz x10, #3, 6f\n"
+ "ld1 { v8.4s }, [x28], #0x10\n"
+ "ld1 { v9.4s }, [x28], #0x10\n"
+ "tbz x10, #2, 4f\n"
+ "ld1 { v10.4s }, [x28], #0x10\n"
+ "tbz x10, #1, 3f\n"
+ "mov x24, #0x38\n"
+ "ldr d11, [x28], #0x8\n"
+ "tbz x10, #0, 10f\n"
+ "ld1 { v11.s }[2], [x28]\n"
"b 10f\n"
"3:" // Height 1: Partial accumulate: partial_1_12
- "mov x25, #0x30\n"
- "tbz x11, #0, 10f\n"
- "ldr s11, [x9, #0x0]\n"
+ "mov x24, #0x30\n"
+ "tbz x10, #0, 10f\n"
+ "ldr s11, [x28, #0x0]\n"
"b 10f\n"
"4:" // Height 1: Partial accumulate: partial_2_8
- "tbz x11, #1, 5f\n"
- "ldr d10, [x9], #0x8\n"
- "mov x25, #0x28\n"
- "tbz x11, #0, 10f\n"
- "ld1 { v10.s }[2], [x9]\n"
+ "tbz x10, #1, 5f\n"
+ "ldr d10, [x28], #0x8\n"
+ "mov x24, #0x28\n"
+ "tbz x10, #0, 10f\n"
+ "ld1 { v10.s }[2], [x28]\n"
"b 10f\n"
"5:" // Height 1: Partial accumulate: partial_1_8
- "mov x25, #0x20\n"
- "tbz x11, #0, 10f\n"
- "ldr s10, [x9, #0x0]\n"
+ "mov x24, #0x20\n"
+ "tbz x10, #0, 10f\n"
+ "ldr s10, [x28, #0x0]\n"
"b 10f\n"
"6:" // Height 1: Partial accumulate: partial_4_0
- "tbz x11, #2, 8f\n"
- "ld1 { v8.4s }, [x9], #0x10\n"
- "tbz x11, #1, 7f\n"
- "ldr d9, [x9], #0x8\n"
- "mov x25, #0x18\n"
- "tbz x11, #0, 10f\n"
- "ld1 { v9.s }[2], [x9]\n"
+ "tbz x10, #2, 8f\n"
+ "ld1 { v8.4s }, [x28], #0x10\n"
+ "tbz x10, #1, 7f\n"
+ "ldr d9, [x28], #0x8\n"
+ "mov x24, #0x18\n"
+ "tbz x10, #0, 10f\n"
+ "ld1 { v9.s }[2], [x28]\n"
"b 10f\n"
"7:" // Height 1: Partial accumulate: partial_1_4
- "mov x25, #0x10\n"
- "tbz x11, #0, 10f\n"
- "ldr s9, [x9, #0x0]\n"
+ "mov x24, #0x10\n"
+ "tbz x10, #0, 10f\n"
+ "ldr s9, [x28, #0x0]\n"
"b 10f\n"
"8:" // Height 1: Partial accumulate: partial_2_0
- "tbz x11, #1, 9f\n"
- "ldr d8, [x9], #0x8\n"
- "mov x25, #0x8\n"
- "tbz x11, #0, 10f\n"
- "ld1 { v8.s }[2], [x9]\n"
+ "tbz x10, #1, 9f\n"
+ "ldr d8, [x28], #0x8\n"
+ "mov x24, #0x8\n"
+ "tbz x10, #0, 10f\n"
+ "ld1 { v8.s }[2], [x28]\n"
"b 10f\n"
"9:" // Height 1: Partial accumulate: partial_1_0
- "ldr s8, [x9, #0x0]\n"
- "mov x25, #0x0\n"
+ "ldr s8, [x28, #0x0]\n"
+ "mov x24, #0x0\n"
"10:" // Height 1: Partial accumulate: Done
- "sub x9, x9, x25\n"
+ "sub x28, x28, x24\n"
"b 13f\n"
"11:" // Height 1: full accumulate
- "ldr q8, [x9, #0x0]\n"
- "ldr q9, [x9, #0x10]\n"
- "ldr q10, [x9, #0x20]\n"
- "ldr q11, [x9, #0x30]\n"
+ "ldr q8, [x28, #0x0]\n"
+ "ldr q9, [x28, #0x10]\n"
+ "ldr q10, [x28, #0x20]\n"
+ "ldr q11, [x28, #0x30]\n"
"b 13f\n"
"12:" // Height 1: no accumulate
"movi v8.4s, #0x0\n"
@@ -161,295 +161,295 @@ void a64_hybrid_s8s32_dot_6x16 (
"movi v10.4s, #0x0\n"
"movi v11.4s, #0x0\n"
"13:" // Height 1: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"14:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 15f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "cbnz x28, 16f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "cbnz x27, 16f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
"b 16f\n"
"15:" // Height 1: setup direct input
- "mov x26, %x[input_ptr]\n"
+ "mov x25, %x[input_ptr]\n"
"16:" // Height 1: input setup done
- "cmp x27, #0x10\n"
+ "cmp x26, #0x10\n"
"blt 19f\n"
- "ldr q0, [x26, #0x0]\n"
- "ldr q6, [x10, #0x0]\n"
- "cmp x27, #0x20\n"
- "ldr q7, [x10, #0x10]\n"
+ "ldr q0, [x25, #0x0]\n"
+ "ldr q6, [x9, #0x0]\n"
+ "cmp x26, #0x20\n"
"blt 18f\n"
"17:" // Height 1: Multiply loop: Main loop head
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
- "ldr q6, [x10, #0x20]\n"
+ "ldr q7, [x9, #0x10]\n"
+ "add x25, x25, #0x10\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
- "ldr q7, [x10, #0x30]\n"
+ "ldr q6, [x9, #0x20]\n"
+ "sub x26, x26, #0x10\n"
".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n"
- "ldr q6, [x10, #0x40]\n"
+ "ldr q7, [x9, #0x30]\n"
+ "cmp x26, #0x20\n"
".inst 0x4f80e0eb // sdot v11.4s, v7.16b, v0.4b[0]\n"
- "ldr q7, [x10, #0x50]\n"
+ "ldr q6, [x9, #0x40]\n"
+ "ldr q7, [x9, #0x50]\n"
".inst 0x4fa0e0c8 // sdot v8.4s, v6.16b, v0.4b[1]\n"
- "ldr q6, [x10, #0x60]\n"
+ "ldr q6, [x9, #0x60]\n"
".inst 0x4fa0e0e9 // sdot v9.4s, v7.16b, v0.4b[1]\n"
- "ldr q7, [x10, #0x70]\n"
+ "ldr q7, [x9, #0x70]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x4fa0e0ca // sdot v10.4s, v6.16b, v0.4b[1]\n"
- "ldr q6, [x10, #0x80]\n"
+ "ldr q6, [x9, #0x80]\n"
".inst 0x4fa0e0eb // sdot v11.4s, v7.16b, v0.4b[1]\n"
- "ldr q7, [x10, #0x90]\n"
+ "ldr q7, [x9, #0x90]\n"
".inst 0x4f80e8c8 // sdot v8.4s, v6.16b, v0.4b[2]\n"
- "ldr q6, [x10, #0xa0]\n"
+ "ldr q6, [x9, #0xa0]\n"
".inst 0x4f80e8e9 // sdot v9.4s, v7.16b, v0.4b[2]\n"
- "ldr q7, [x10, #0xb0]\n"
+ "ldr q7, [x9, #0xb0]\n"
".inst 0x4f80e8ca // sdot v10.4s, v6.16b, v0.4b[2]\n"
- "ldr q6, [x10, #0xc0]\n"
+ "ldr q6, [x9, #0xc0]\n"
".inst 0x4f80e8eb // sdot v11.4s, v7.16b, v0.4b[2]\n"
- "ldr q7, [x10, #0xd0]\n"
+ "ldr q7, [x9, #0xd0]\n"
".inst 0x4fa0e8c8 // sdot v8.4s, v6.16b, v0.4b[3]\n"
- "ldr q6, [x10, #0xe0]\n"
+ "ldr q6, [x9, #0xe0]\n"
".inst 0x4fa0e8e9 // sdot v9.4s, v7.16b, v0.4b[3]\n"
- "ldr q7, [x10, #0xf0]\n"
- "sub x27, x27, #0x10\n"
- "add x26, x26, #0x10\n"
+ "ldr q7, [x9, #0xf0]\n"
+ "add x9, x9, #0x100\n"
".inst 0x4fa0e8ca // sdot v10.4s, v6.16b, v0.4b[3]\n"
+ "ldr q6, [x9, #0x0]\n"
".inst 0x4fa0e8eb // sdot v11.4s, v7.16b, v0.4b[3]\n"
- "ldr q0, [x26, #0x0]\n"
- "cmp x27, #0x20\n"
- "add x10, x10, #0x100\n"
- "ldr q6, [x10, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
+ "ldr q0, [x25, #0x0]\n"
"bge 17b\n"
"18:" // Height 1: Multiply loop: Single iteration only
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
- "ldr q6, [x10, #0x20]\n"
+ "ldr q7, [x9, #0x10]\n"
+ "sub x26, x26, #0x10\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
- "ldr q7, [x10, #0x30]\n"
+ "ldr q6, [x9, #0x20]\n"
+ "add x25, x25, #0x10\n"
".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n"
- "ldr q6, [x10, #0x40]\n"
+ "ldr q7, [x9, #0x30]\n"
+ "ldr q6, [x9, #0x40]\n"
".inst 0x4f80e0eb // sdot v11.4s, v7.16b, v0.4b[0]\n"
- "ldr q7, [x10, #0x50]\n"
+ "ldr q7, [x9, #0x50]\n"
".inst 0x4fa0e0c8 // sdot v8.4s, v6.16b, v0.4b[1]\n"
- "ldr q6, [x10, #0x60]\n"
+ "ldr q6, [x9, #0x60]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x4fa0e0e9 // sdot v9.4s, v7.16b, v0.4b[1]\n"
- "ldr q7, [x10, #0x70]\n"
+ "ldr q7, [x9, #0x70]\n"
".inst 0x4fa0e0ca // sdot v10.4s, v6.16b, v0.4b[1]\n"
- "ldr q6, [x10, #0x80]\n"
+ "ldr q6, [x9, #0x80]\n"
".inst 0x4fa0e0eb // sdot v11.4s, v7.16b, v0.4b[1]\n"
- "ldr q7, [x10, #0x90]\n"
+ "ldr q7, [x9, #0x90]\n"
".inst 0x4f80e8c8 // sdot v8.4s, v6.16b, v0.4b[2]\n"
- "ldr q6, [x10, #0xa0]\n"
+ "ldr q6, [x9, #0xa0]\n"
".inst 0x4f80e8e9 // sdot v9.4s, v7.16b, v0.4b[2]\n"
- "ldr q7, [x10, #0xb0]\n"
+ "ldr q7, [x9, #0xb0]\n"
".inst 0x4f80e8ca // sdot v10.4s, v6.16b, v0.4b[2]\n"
- "ldr q6, [x10, #0xc0]\n"
+ "ldr q6, [x9, #0xc0]\n"
".inst 0x4f80e8eb // sdot v11.4s, v7.16b, v0.4b[2]\n"
- "ldr q7, [x10, #0xd0]\n"
+ "ldr q7, [x9, #0xd0]\n"
".inst 0x4fa0e8c8 // sdot v8.4s, v6.16b, v0.4b[3]\n"
- "ldr q6, [x10, #0xe0]\n"
+ "ldr q6, [x9, #0xe0]\n"
".inst 0x4fa0e8e9 // sdot v9.4s, v7.16b, v0.4b[3]\n"
- "ldr q7, [x10, #0xf0]\n"
- "add x26, x26, #0x10\n"
- "sub x27, x27, #0x10\n"
+ "ldr q7, [x9, #0xf0]\n"
+ "add x9, x9, #0x100\n"
".inst 0x4fa0e8ca // sdot v10.4s, v6.16b, v0.4b[3]\n"
".inst 0x4fa0e8eb // sdot v11.4s, v7.16b, v0.4b[3]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
- "add x10, x10, #0x100\n"
"19:" // Height 1: Multiply loop: Main loop skip
- "cbz x27, 24f\n"
- "cmp x27, #0x4\n"
+ "cbz x26, 24f\n"
+ "cmp x26, #0x4\n"
"blt 21f\n"
"20:" // Height 1: Multiply loop: Odd block loop
- "ldr s0, [x26], #0x4\n"
- "ldr q6, [x10, #0x0]\n"
+ "ldr s0, [x25], #0x4\n"
+ "sub x26, x26, #0x4\n"
+ "ldr q6, [x9, #0x0]\n"
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
- "sub x27, x27, #0x4\n"
- "ldr q7, [x10, #0x10]\n"
- "ldr q6, [x10, #0x20]\n"
+ "ldr q7, [x9, #0x10]\n"
+ "cmp x26, #0x4\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
- "cmp x27, #0x4\n"
- "ldr q7, [x10, #0x30]\n"
+ "ldr q6, [x9, #0x20]\n"
+ "ldr q7, [x9, #0x30]\n"
".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n"
+ "add x9, x9, #0x40\n"
".inst 0x4f80e0eb // sdot v11.4s, v7.16b, v0.4b[0]\n"
- "add x10, x10, #0x40\n"
"bge 20b\n"
+ "cbz x26, 24f\n"
"21:" // Height 1: Multiply loop: Skip odd blocks
- "cbz x27, 24f\n"
- "tbz x27, #1, 22f\n"
- "ldr h0, [x26], #0x2\n"
- "tbz x27, #0, 23f\n"
- "ld1 { v0.b }[2], [x26]\n"
+ "tbz x26, #1, 22f\n"
+ "ldr h0, [x25], #0x2\n"
+ "tbz x26, #0, 23f\n"
+ "ld1 { v0.b }[2], [x25]\n"
"b 23f\n"
"22:" // Height 1: Multiply loop: Ragged operand read: partial_1_0
- "ldr b0, [x26, #0x0]\n"
+ "ldr b0, [x25, #0x0]\n"
"23:" // Height 1: Multiply loop: Ragged operand read: Done
- "ldr q6, [x10, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
+ "ldr q6, [x9, #0x0]\n"
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
+ "ldr q7, [x9, #0x10]\n"
+ "ldr q6, [x9, #0x20]\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
- "ldr q6, [x10, #0x20]\n"
- "ldr q7, [x10, #0x30]\n"
+ "ldr q7, [x9, #0x30]\n"
+ "add x9, x9, #0x40\n"
".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n"
".inst 0x4f80e0eb // sdot v11.4s, v7.16b, v0.4b[0]\n"
- "add x10, x10, #0x40\n"
"24:" // Height 1: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 14b\n"
- "cmp x11, #0x10\n"
- "prfm pstl1keep, [x9, #0x0]\n"
+ "prfm pstl1keep, [x28, #0x0]\n"
+ "cmp x10, #0x10\n"
"bge 33f\n"
- "tbz x11, #3, 28f\n"
- "st1 { v8.4s }, [x9], #0x10\n"
- "st1 { v9.4s }, [x9], #0x10\n"
- "tbz x11, #2, 26f\n"
- "st1 { v10.4s }, [x9], #0x10\n"
- "tbz x11, #1, 25f\n"
- "str d11, [x9], #0x8\n"
- "tbz x11, #0, 32f\n"
- "st1 { v11.s }[2], [x9]\n"
+ "tbz x10, #3, 28f\n"
+ "st1 { v8.4s }, [x28], #0x10\n"
+ "st1 { v9.4s }, [x28], #0x10\n"
+ "tbz x10, #2, 26f\n"
+ "st1 { v10.4s }, [x28], #0x10\n"
+ "tbz x10, #1, 25f\n"
+ "str d11, [x28], #0x8\n"
+ "tbz x10, #0, 32f\n"
+ "st1 { v11.s }[2], [x28]\n"
"b 32f\n"
"25:" // Height 1: Partial direct writeback: partial_1_12
- "tbz x11, #0, 32f\n"
- "str s11, [x9, #0x0]\n"
+ "tbz x10, #0, 32f\n"
+ "str s11, [x28, #0x0]\n"
"b 32f\n"
"26:" // Height 1: Partial direct writeback: partial_2_8
- "tbz x11, #1, 27f\n"
- "str d10, [x9], #0x8\n"
- "tbz x11, #0, 32f\n"
- "st1 { v10.s }[2], [x9]\n"
+ "tbz x10, #1, 27f\n"
+ "str d10, [x28], #0x8\n"
+ "tbz x10, #0, 32f\n"
+ "st1 { v10.s }[2], [x28]\n"
"b 32f\n"
"27:" // Height 1: Partial direct writeback: partial_1_8
- "tbz x11, #0, 32f\n"
- "str s10, [x9, #0x0]\n"
+ "tbz x10, #0, 32f\n"
+ "str s10, [x28, #0x0]\n"
"b 32f\n"
"28:" // Height 1: Partial direct writeback: partial_4_0
- "tbz x11, #2, 30f\n"
- "st1 { v8.4s }, [x9], #0x10\n"
- "tbz x11, #1, 29f\n"
- "str d9, [x9], #0x8\n"
- "tbz x11, #0, 32f\n"
- "st1 { v9.s }[2], [x9]\n"
+ "tbz x10, #2, 30f\n"
+ "st1 { v8.4s }, [x28], #0x10\n"
+ "tbz x10, #1, 29f\n"
+ "str d9, [x28], #0x8\n"
+ "tbz x10, #0, 32f\n"
+ "st1 { v9.s }[2], [x28]\n"
"b 32f\n"
"29:" // Height 1: Partial direct writeback: partial_1_4
- "tbz x11, #0, 32f\n"
- "str s9, [x9, #0x0]\n"
+ "tbz x10, #0, 32f\n"
+ "str s9, [x28, #0x0]\n"
"b 32f\n"
"30:" // Height 1: Partial direct writeback: partial_2_0
- "tbz x11, #1, 31f\n"
- "str d8, [x9], #0x8\n"
- "tbz x11, #0, 32f\n"
- "st1 { v8.s }[2], [x9]\n"
+ "tbz x10, #1, 31f\n"
+ "str d8, [x28], #0x8\n"
+ "tbz x10, #0, 32f\n"
+ "st1 { v8.s }[2], [x28]\n"
"b 32f\n"
"31:" // Height 1: Partial direct writeback: partial_1_0
- "str s8, [x9, #0x0]\n"
+ "str s8, [x28, #0x0]\n"
"32:" // Height 1: Partial direct writeback: Done
"b 34f\n"
"33:" // Height 1: Full writeback
- "str q8, [x9, #0x0]\n"
- "str q9, [x9, #0x10]\n"
- "str q10, [x9, #0x20]\n"
- "str q11, [x9, #0x30]\n"
- "add x9, x9, #0x40\n"
+ "str q8, [x28, #0x0]\n"
+ "str q9, [x28, #0x10]\n"
+ "str q10, [x28, #0x20]\n"
+ "str q11, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
"34:" // Height 1: Writeback done
- "subs x11, x11, #0x10\n"
+ "subs x10, x10, #0x10\n"
"bgt 2b\n"
"b 206f\n"
"35:" // Height 2
- "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x28, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"36:" // Height 2: Column loop
"tbz %x[flags], #0, 46f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "cmp x11, #0x10\n"
- "add x24, x9, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "cmp x10, #0x10\n"
+ "add x23, x28, x19, LSL #2\n"
"bge 45f\n"
- "tbz x11, #3, 40f\n"
- "ld1 { v8.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x24], #0x10\n"
- "ld1 { v9.4s }, [x9], #0x10\n"
- "ld1 { v13.4s }, [x24], #0x10\n"
- "tbz x11, #2, 38f\n"
- "ld1 { v10.4s }, [x9], #0x10\n"
- "ld1 { v14.4s }, [x24], #0x10\n"
- "tbz x11, #1, 37f\n"
- "ldr d11, [x9], #0x8\n"
- "ldr d15, [x24], #0x8\n"
- "mov x25, #0x38\n"
- "tbz x11, #0, 44f\n"
- "ld1 { v11.s }[2], [x9]\n"
- "ld1 { v15.s }[2], [x24]\n"
+ "tbz x10, #3, 40f\n"
+ "ld1 { v8.4s }, [x28], #0x10\n"
+ "ld1 { v12.4s }, [x23], #0x10\n"
+ "ld1 { v9.4s }, [x28], #0x10\n"
+ "ld1 { v13.4s }, [x23], #0x10\n"
+ "tbz x10, #2, 38f\n"
+ "ld1 { v10.4s }, [x28], #0x10\n"
+ "ld1 { v14.4s }, [x23], #0x10\n"
+ "tbz x10, #1, 37f\n"
+ "mov x24, #0x38\n"
+ "ldr d11, [x28], #0x8\n"
+ "ldr d15, [x23], #0x8\n"
+ "tbz x10, #0, 44f\n"
+ "ld1 { v11.s }[2], [x28]\n"
+ "ld1 { v15.s }[2], [x23]\n"
"b 44f\n"
"37:" // Height 2: Partial accumulate: partial_1_12
- "mov x25, #0x30\n"
- "tbz x11, #0, 44f\n"
- "ldr s11, [x9, #0x0]\n"
- "ldr s15, [x24, #0x0]\n"
+ "mov x24, #0x30\n"
+ "tbz x10, #0, 44f\n"
+ "ldr s11, [x28, #0x0]\n"
+ "ldr s15, [x23, #0x0]\n"
"b 44f\n"
"38:" // Height 2: Partial accumulate: partial_2_8
- "tbz x11, #1, 39f\n"
- "ldr d10, [x9], #0x8\n"
- "ldr d14, [x24], #0x8\n"
- "mov x25, #0x28\n"
- "tbz x11, #0, 44f\n"
- "ld1 { v10.s }[2], [x9]\n"
- "ld1 { v14.s }[2], [x24]\n"
+ "tbz x10, #1, 39f\n"
+ "ldr d10, [x28], #0x8\n"
+ "ldr d14, [x23], #0x8\n"
+ "mov x24, #0x28\n"
+ "tbz x10, #0, 44f\n"
+ "ld1 { v10.s }[2], [x28]\n"
+ "ld1 { v14.s }[2], [x23]\n"
"b 44f\n"
"39:" // Height 2: Partial accumulate: partial_1_8
- "mov x25, #0x20\n"
- "tbz x11, #0, 44f\n"
- "ldr s10, [x9, #0x0]\n"
- "ldr s14, [x24, #0x0]\n"
+ "mov x24, #0x20\n"
+ "tbz x10, #0, 44f\n"
+ "ldr s10, [x28, #0x0]\n"
+ "ldr s14, [x23, #0x0]\n"
"b 44f\n"
"40:" // Height 2: Partial accumulate: partial_4_0
- "tbz x11, #2, 42f\n"
- "ld1 { v8.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x24], #0x10\n"
- "tbz x11, #1, 41f\n"
- "ldr d9, [x9], #0x8\n"
- "ldr d13, [x24], #0x8\n"
- "mov x25, #0x18\n"
- "tbz x11, #0, 44f\n"
- "ld1 { v9.s }[2], [x9]\n"
- "ld1 { v13.s }[2], [x24]\n"
+ "tbz x10, #2, 42f\n"
+ "ld1 { v8.4s }, [x28], #0x10\n"
+ "ld1 { v12.4s }, [x23], #0x10\n"
+ "tbz x10, #1, 41f\n"
+ "mov x24, #0x18\n"
+ "ldr d9, [x28], #0x8\n"
+ "ldr d13, [x23], #0x8\n"
+ "tbz x10, #0, 44f\n"
+ "ld1 { v9.s }[2], [x28]\n"
+ "ld1 { v13.s }[2], [x23]\n"
"b 44f\n"
"41:" // Height 2: Partial accumulate: partial_1_4
- "mov x25, #0x10\n"
- "tbz x11, #0, 44f\n"
- "ldr s9, [x9, #0x0]\n"
- "ldr s13, [x24, #0x0]\n"
+ "mov x24, #0x10\n"
+ "tbz x10, #0, 44f\n"
+ "ldr s9, [x28, #0x0]\n"
+ "ldr s13, [x23, #0x0]\n"
"b 44f\n"
"42:" // Height 2: Partial accumulate: partial_2_0
- "tbz x11, #1, 43f\n"
- "ldr d8, [x9], #0x8\n"
- "ldr d12, [x24], #0x8\n"
- "mov x25, #0x8\n"
- "tbz x11, #0, 44f\n"
- "ld1 { v8.s }[2], [x9]\n"
- "ld1 { v12.s }[2], [x24]\n"
+ "tbz x10, #1, 43f\n"
+ "ldr d8, [x28], #0x8\n"
+ "ldr d12, [x23], #0x8\n"
+ "mov x24, #0x8\n"
+ "tbz x10, #0, 44f\n"
+ "ld1 { v8.s }[2], [x28]\n"
+ "ld1 { v12.s }[2], [x23]\n"
"b 44f\n"
"43:" // Height 2: Partial accumulate: partial_1_0
- "ldr s8, [x9, #0x0]\n"
- "ldr s12, [x24, #0x0]\n"
- "mov x25, #0x0\n"
+ "ldr s8, [x28, #0x0]\n"
+ "mov x24, #0x0\n"
+ "ldr s12, [x23, #0x0]\n"
"44:" // Height 2: Partial accumulate: Done
- "sub x9, x9, x25\n"
+ "sub x28, x28, x24\n"
"b 47f\n"
"45:" // Height 2: full accumulate
- "ldr q8, [x9, #0x0]\n"
- "ldr q9, [x9, #0x10]\n"
- "ldr q10, [x9, #0x20]\n"
- "ldr q11, [x9, #0x30]\n"
- "ldr q12, [x24, #0x0]\n"
- "ldr q13, [x24, #0x10]\n"
- "ldr q14, [x24, #0x20]\n"
- "ldr q15, [x24, #0x30]\n"
+ "ldr q8, [x28, #0x0]\n"
+ "ldr q9, [x28, #0x10]\n"
+ "ldr q10, [x28, #0x20]\n"
+ "ldr q11, [x28, #0x30]\n"
+ "ldr q12, [x23, #0x0]\n"
+ "ldr q13, [x23, #0x10]\n"
+ "ldr q14, [x23, #0x20]\n"
+ "ldr q15, [x23, #0x30]\n"
"b 47f\n"
"46:" // Height 2: no accumulate
"movi v8.4s, #0x0\n"
@@ -461,392 +461,392 @@ void a64_hybrid_s8s32_dot_6x16 (
"movi v14.4s, #0x0\n"
"movi v15.4s, #0x0\n"
"47:" // Height 2: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"48:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 49f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "cbnz x28, 50f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20\n"
- "add x25, x25, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "cbnz x27, 50f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
+ "add x24, x24, x19\n"
"b 50f\n"
"49:" // Height 2: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19\n"
"50:" // Height 2: input setup done
- "cmp x27, #0x10\n"
+ "cmp x26, #0x10\n"
"blt 53f\n"
- "ldr q0, [x26, #0x0]\n"
- "ldr q1, [x25, #0x0]\n"
- "cmp x27, #0x20\n"
- "ldr q6, [x10, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
+ "ldr q0, [x25, #0x0]\n"
+ "ldr q1, [x24, #0x0]\n"
+ "cmp x26, #0x20\n"
+ "ldr q6, [x9, #0x0]\n"
"blt 52f\n"
"51:" // Height 2: Multiply loop: Main loop head
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
+ "ldr q7, [x9, #0x10]\n"
+ "add x25, x25, #0x10\n"
".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n"
- "ldr q6, [x10, #0x20]\n"
- "sub x27, x27, #0x10\n"
+ "ldr q6, [x9, #0x20]\n"
+ "add x24, x24, #0x10\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "sub x26, x26, #0x10\n"
".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n"
- "ldr q7, [x10, #0x30]\n"
- "add x26, x26, #0x10\n"
+ "ldr q7, [x9, #0x30]\n"
+ "cmp x26, #0x20\n"
".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x4f81e0ce // sdot v14.4s, v6.16b, v1.4b[0]\n"
- "ldr q6, [x10, #0x40]\n"
- "add x25, x25, #0x10\n"
+ "ldr q6, [x9, #0x40]\n"
".inst 0x4f80e0eb // sdot v11.4s, v7.16b, v0.4b[0]\n"
".inst 0x4f81e0ef // sdot v15.4s, v7.16b, v1.4b[0]\n"
- "ldr q7, [x10, #0x50]\n"
- "cmp x27, #0x20\n"
+ "ldr q7, [x9, #0x50]\n"
".inst 0x4fa0e0c8 // sdot v8.4s, v6.16b, v0.4b[1]\n"
".inst 0x4fa1e0cc // sdot v12.4s, v6.16b, v1.4b[1]\n"
- "ldr q6, [x10, #0x60]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
+ "ldr q6, [x9, #0x60]\n"
".inst 0x4fa0e0e9 // sdot v9.4s, v7.16b, v0.4b[1]\n"
".inst 0x4fa1e0ed // sdot v13.4s, v7.16b, v1.4b[1]\n"
- "ldr q7, [x10, #0x70]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
+ "ldr q7, [x9, #0x70]\n"
".inst 0x4fa0e0ca // sdot v10.4s, v6.16b, v0.4b[1]\n"
".inst 0x4fa1e0ce // sdot v14.4s, v6.16b, v1.4b[1]\n"
- "ldr q6, [x10, #0x80]\n"
+ "ldr q6, [x9, #0x80]\n"
".inst 0x4fa0e0eb // sdot v11.4s, v7.16b, v0.4b[1]\n"
".inst 0x4fa1e0ef // sdot v15.4s, v7.16b, v1.4b[1]\n"
- "ldr q7, [x10, #0x90]\n"
+ "ldr q7, [x9, #0x90]\n"
".inst 0x4f80e8c8 // sdot v8.4s, v6.16b, v0.4b[2]\n"
".inst 0x4f81e8cc // sdot v12.4s, v6.16b, v1.4b[2]\n"
- "ldr q6, [x10, #0xa0]\n"
+ "ldr q6, [x9, #0xa0]\n"
".inst 0x4f80e8e9 // sdot v9.4s, v7.16b, v0.4b[2]\n"
".inst 0x4f81e8ed // sdot v13.4s, v7.16b, v1.4b[2]\n"
- "ldr q7, [x10, #0xb0]\n"
+ "ldr q7, [x9, #0xb0]\n"
".inst 0x4f80e8ca // sdot v10.4s, v6.16b, v0.4b[2]\n"
".inst 0x4f81e8ce // sdot v14.4s, v6.16b, v1.4b[2]\n"
- "ldr q6, [x10, #0xc0]\n"
+ "ldr q6, [x9, #0xc0]\n"
".inst 0x4f80e8eb // sdot v11.4s, v7.16b, v0.4b[2]\n"
".inst 0x4f81e8ef // sdot v15.4s, v7.16b, v1.4b[2]\n"
- "ldr q7, [x10, #0xd0]\n"
+ "ldr q7, [x9, #0xd0]\n"
".inst 0x4fa0e8c8 // sdot v8.4s, v6.16b, v0.4b[3]\n"
".inst 0x4fa1e8cc // sdot v12.4s, v6.16b, v1.4b[3]\n"
- "ldr q6, [x10, #0xe0]\n"
+ "ldr q6, [x9, #0xe0]\n"
".inst 0x4fa0e8e9 // sdot v9.4s, v7.16b, v0.4b[3]\n"
".inst 0x4fa1e8ed // sdot v13.4s, v7.16b, v1.4b[3]\n"
- "ldr q7, [x10, #0xf0]\n"
- "add x10, x10, #0x100\n"
+ "ldr q7, [x9, #0xf0]\n"
+ "add x9, x9, #0x100\n"
".inst 0x4fa0e8ca // sdot v10.4s, v6.16b, v0.4b[3]\n"
".inst 0x4fa1e8ce // sdot v14.4s, v6.16b, v1.4b[3]\n"
- "ldr q6, [x10, #0x0]\n"
+ "ldr q6, [x9, #0x0]\n"
".inst 0x4fa0e8eb // sdot v11.4s, v7.16b, v0.4b[3]\n"
- "ldr q0, [x26, #0x0]\n"
+ "ldr q0, [x25, #0x0]\n"
".inst 0x4fa1e8ef // sdot v15.4s, v7.16b, v1.4b[3]\n"
- "ldr q1, [x25, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
+ "ldr q1, [x24, #0x0]\n"
"bge 51b\n"
"52:" // Height 2: Multiply loop: Single iteration only
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
+ "ldr q7, [x9, #0x10]\n"
+ "sub x26, x26, #0x10\n"
".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n"
- "ldr q6, [x10, #0x20]\n"
- "add x26, x26, #0x10\n"
+ "ldr q6, [x9, #0x20]\n"
+ "add x25, x25, #0x10\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "add x24, x24, #0x10\n"
".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n"
- "ldr q7, [x10, #0x30]\n"
- "add x25, x25, #0x10\n"
+ "ldr q7, [x9, #0x30]\n"
".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x4f81e0ce // sdot v14.4s, v6.16b, v1.4b[0]\n"
- "ldr q6, [x10, #0x40]\n"
- "sub x27, x27, #0x10\n"
+ "ldr q6, [x9, #0x40]\n"
".inst 0x4f80e0eb // sdot v11.4s, v7.16b, v0.4b[0]\n"
".inst 0x4f81e0ef // sdot v15.4s, v7.16b, v1.4b[0]\n"
- "ldr q7, [x10, #0x50]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
+ "ldr q7, [x9, #0x50]\n"
".inst 0x4fa0e0c8 // sdot v8.4s, v6.16b, v0.4b[1]\n"
".inst 0x4fa1e0cc // sdot v12.4s, v6.16b, v1.4b[1]\n"
- "ldr q6, [x10, #0x60]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
+ "ldr q6, [x9, #0x60]\n"
".inst 0x4fa0e0e9 // sdot v9.4s, v7.16b, v0.4b[1]\n"
".inst 0x4fa1e0ed // sdot v13.4s, v7.16b, v1.4b[1]\n"
- "ldr q7, [x10, #0x70]\n"
+ "ldr q7, [x9, #0x70]\n"
".inst 0x4fa0e0ca // sdot v10.4s, v6.16b, v0.4b[1]\n"
".inst 0x4fa1e0ce // sdot v14.4s, v6.16b, v1.4b[1]\n"
- "ldr q6, [x10, #0x80]\n"
+ "ldr q6, [x9, #0x80]\n"
".inst 0x4fa0e0eb // sdot v11.4s, v7.16b, v0.4b[1]\n"
".inst 0x4fa1e0ef // sdot v15.4s, v7.16b, v1.4b[1]\n"
- "ldr q7, [x10, #0x90]\n"
+ "ldr q7, [x9, #0x90]\n"
".inst 0x4f80e8c8 // sdot v8.4s, v6.16b, v0.4b[2]\n"
".inst 0x4f81e8cc // sdot v12.4s, v6.16b, v1.4b[2]\n"
- "ldr q6, [x10, #0xa0]\n"
+ "ldr q6, [x9, #0xa0]\n"
".inst 0x4f80e8e9 // sdot v9.4s, v7.16b, v0.4b[2]\n"
".inst 0x4f81e8ed // sdot v13.4s, v7.16b, v1.4b[2]\n"
- "ldr q7, [x10, #0xb0]\n"
+ "ldr q7, [x9, #0xb0]\n"
".inst 0x4f80e8ca // sdot v10.4s, v6.16b, v0.4b[2]\n"
".inst 0x4f81e8ce // sdot v14.4s, v6.16b, v1.4b[2]\n"
- "ldr q6, [x10, #0xc0]\n"
+ "ldr q6, [x9, #0xc0]\n"
".inst 0x4f80e8eb // sdot v11.4s, v7.16b, v0.4b[2]\n"
".inst 0x4f81e8ef // sdot v15.4s, v7.16b, v1.4b[2]\n"
- "ldr q7, [x10, #0xd0]\n"
+ "ldr q7, [x9, #0xd0]\n"
".inst 0x4fa0e8c8 // sdot v8.4s, v6.16b, v0.4b[3]\n"
".inst 0x4fa1e8cc // sdot v12.4s, v6.16b, v1.4b[3]\n"
- "ldr q6, [x10, #0xe0]\n"
+ "ldr q6, [x9, #0xe0]\n"
".inst 0x4fa0e8e9 // sdot v9.4s, v7.16b, v0.4b[3]\n"
".inst 0x4fa1e8ed // sdot v13.4s, v7.16b, v1.4b[3]\n"
- "ldr q7, [x10, #0xf0]\n"
- "add x10, x10, #0x100\n"
+ "ldr q7, [x9, #0xf0]\n"
+ "add x9, x9, #0x100\n"
".inst 0x4fa0e8ca // sdot v10.4s, v6.16b, v0.4b[3]\n"
".inst 0x4fa1e8ce // sdot v14.4s, v6.16b, v1.4b[3]\n"
".inst 0x4fa0e8eb // sdot v11.4s, v7.16b, v0.4b[3]\n"
".inst 0x4fa1e8ef // sdot v15.4s, v7.16b, v1.4b[3]\n"
"53:" // Height 2: Multiply loop: Main loop skip
- "cbz x27, 58f\n"
- "cmp x27, #0x4\n"
+ "cbz x26, 58f\n"
+ "cmp x26, #0x4\n"
"blt 55f\n"
"54:" // Height 2: Multiply loop: Odd block loop
- "ldr s0, [x26], #0x4\n"
- "ldr s1, [x25], #0x4\n"
- "sub x27, x27, #0x4\n"
- "cmp x27, #0x4\n"
- "ldr q6, [x10, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
+ "ldr s0, [x25], #0x4\n"
+ "sub x26, x26, #0x4\n"
+ "ldr s1, [x24], #0x4\n"
+ "cmp x26, #0x4\n"
+ "ldr q6, [x9, #0x0]\n"
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
+ "ldr q7, [x9, #0x10]\n"
".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n"
- "ldr q6, [x10, #0x20]\n"
+ "ldr q6, [x9, #0x20]\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n"
- "ldr q7, [x10, #0x30]\n"
+ "ldr q7, [x9, #0x30]\n"
+ "add x9, x9, #0x40\n"
".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n"
".inst 0x4f81e0ce // sdot v14.4s, v6.16b, v1.4b[0]\n"
- "add x10, x10, #0x40\n"
".inst 0x4f80e0eb // sdot v11.4s, v7.16b, v0.4b[0]\n"
".inst 0x4f81e0ef // sdot v15.4s, v7.16b, v1.4b[0]\n"
"bge 54b\n"
+ "cbz x26, 58f\n"
"55:" // Height 2: Multiply loop: Skip odd blocks
- "cbz x27, 58f\n"
- "tbz x27, #1, 56f\n"
- "ldr h0, [x26], #0x2\n"
- "ldr h1, [x25], #0x2\n"
- "tbz x27, #0, 57f\n"
- "ld1 { v0.b }[2], [x26]\n"
- "ld1 { v1.b }[2], [x25]\n"
+ "tbz x26, #1, 56f\n"
+ "ldr h0, [x25], #0x2\n"
+ "ldr h1, [x24], #0x2\n"
+ "tbz x26, #0, 57f\n"
+ "ld1 { v0.b }[2], [x25]\n"
+ "ld1 { v1.b }[2], [x24]\n"
"b 57f\n"
"56:" // Height 2: Multiply loop: Ragged operand read: partial_1_0
- "ldr b0, [x26, #0x0]\n"
- "ldr b1, [x25, #0x0]\n"
+ "ldr b0, [x25, #0x0]\n"
+ "ldr b1, [x24, #0x0]\n"
"57:" // Height 2: Multiply loop: Ragged operand read: Done
- "ldr q6, [x10, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
+ "ldr q6, [x9, #0x0]\n"
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
+ "ldr q7, [x9, #0x10]\n"
".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n"
- "ldr q6, [x10, #0x20]\n"
+ "ldr q6, [x9, #0x20]\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n"
- "ldr q7, [x10, #0x30]\n"
+ "ldr q7, [x9, #0x30]\n"
+ "add x9, x9, #0x40\n"
".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n"
".inst 0x4f81e0ce // sdot v14.4s, v6.16b, v1.4b[0]\n"
- "add x10, x10, #0x40\n"
".inst 0x4f80e0eb // sdot v11.4s, v7.16b, v0.4b[0]\n"
".inst 0x4f81e0ef // sdot v15.4s, v7.16b, v1.4b[0]\n"
"58:" // Height 2: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 48b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "cmp x11, #0x10\n"
- "prfm pstl1keep, [x9, #0x0]\n"
- "prfm pstl1keep, [x24, #0x0]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "prfm pstl1keep, [x28, #0x0]\n"
+ "cmp x10, #0x10\n"
+ "add x23, x28, x19, LSL #2\n"
+ "prfm pstl1keep, [x23, #0x0]\n"
"bge 67f\n"
- "tbz x11, #3, 62f\n"
- "st1 { v8.4s }, [x9], #0x10\n"
- "st1 { v9.4s }, [x9], #0x10\n"
- "st1 { v12.4s }, [x24], #0x10\n"
- "st1 { v13.4s }, [x24], #0x10\n"
- "tbz x11, #2, 60f\n"
- "st1 { v10.4s }, [x9], #0x10\n"
- "st1 { v14.4s }, [x24], #0x10\n"
- "tbz x11, #1, 59f\n"
- "str d11, [x9], #0x8\n"
- "str d15, [x24], #0x8\n"
- "tbz x11, #0, 66f\n"
- "st1 { v11.s }[2], [x9]\n"
- "st1 { v15.s }[2], [x24]\n"
+ "tbz x10, #3, 62f\n"
+ "st1 { v8.4s }, [x28], #0x10\n"
+ "st1 { v9.4s }, [x28], #0x10\n"
+ "st1 { v12.4s }, [x23], #0x10\n"
+ "st1 { v13.4s }, [x23], #0x10\n"
+ "tbz x10, #2, 60f\n"
+ "st1 { v10.4s }, [x28], #0x10\n"
+ "st1 { v14.4s }, [x23], #0x10\n"
+ "tbz x10, #1, 59f\n"
+ "str d11, [x28], #0x8\n"
+ "str d15, [x23], #0x8\n"
+ "tbz x10, #0, 66f\n"
+ "st1 { v11.s }[2], [x28]\n"
+ "st1 { v15.s }[2], [x23]\n"
"b 66f\n"
"59:" // Height 2: Partial direct writeback: partial_1_12
- "tbz x11, #0, 66f\n"
- "str s11, [x9, #0x0]\n"
- "str s15, [x24, #0x0]\n"
+ "tbz x10, #0, 66f\n"
+ "str s11, [x28, #0x0]\n"
+ "str s15, [x23, #0x0]\n"
"b 66f\n"
"60:" // Height 2: Partial direct writeback: partial_2_8
- "tbz x11, #1, 61f\n"
- "str d10, [x9], #0x8\n"
- "str d14, [x24], #0x8\n"
- "tbz x11, #0, 66f\n"
- "st1 { v10.s }[2], [x9]\n"
- "st1 { v14.s }[2], [x24]\n"
+ "tbz x10, #1, 61f\n"
+ "str d10, [x28], #0x8\n"
+ "str d14, [x23], #0x8\n"
+ "tbz x10, #0, 66f\n"
+ "st1 { v10.s }[2], [x28]\n"
+ "st1 { v14.s }[2], [x23]\n"
"b 66f\n"
"61:" // Height 2: Partial direct writeback: partial_1_8
- "tbz x11, #0, 66f\n"
- "str s10, [x9, #0x0]\n"
- "str s14, [x24, #0x0]\n"
+ "tbz x10, #0, 66f\n"
+ "str s10, [x28, #0x0]\n"
+ "str s14, [x23, #0x0]\n"
"b 66f\n"
"62:" // Height 2: Partial direct writeback: partial_4_0
- "tbz x11, #2, 64f\n"
- "st1 { v8.4s }, [x9], #0x10\n"
- "st1 { v12.4s }, [x24], #0x10\n"
- "tbz x11, #1, 63f\n"
- "str d9, [x9], #0x8\n"
- "str d13, [x24], #0x8\n"
- "tbz x11, #0, 66f\n"
- "st1 { v9.s }[2], [x9]\n"
- "st1 { v13.s }[2], [x24]\n"
+ "tbz x10, #2, 64f\n"
+ "st1 { v8.4s }, [x28], #0x10\n"
+ "st1 { v12.4s }, [x23], #0x10\n"
+ "tbz x10, #1, 63f\n"
+ "str d9, [x28], #0x8\n"
+ "str d13, [x23], #0x8\n"
+ "tbz x10, #0, 66f\n"
+ "st1 { v9.s }[2], [x28]\n"
+ "st1 { v13.s }[2], [x23]\n"
"b 66f\n"
"63:" // Height 2: Partial direct writeback: partial_1_4
- "tbz x11, #0, 66f\n"
- "str s9, [x9, #0x0]\n"
- "str s13, [x24, #0x0]\n"
+ "tbz x10, #0, 66f\n"
+ "str s9, [x28, #0x0]\n"
+ "str s13, [x23, #0x0]\n"
"b 66f\n"
"64:" // Height 2: Partial direct writeback: partial_2_0
- "tbz x11, #1, 65f\n"
- "str d8, [x9], #0x8\n"
- "str d12, [x24], #0x8\n"
- "tbz x11, #0, 66f\n"
- "st1 { v8.s }[2], [x9]\n"
- "st1 { v12.s }[2], [x24]\n"
+ "tbz x10, #1, 65f\n"
+ "str d8, [x28], #0x8\n"
+ "str d12, [x23], #0x8\n"
+ "tbz x10, #0, 66f\n"
+ "st1 { v8.s }[2], [x28]\n"
+ "st1 { v12.s }[2], [x23]\n"
"b 66f\n"
"65:" // Height 2: Partial direct writeback: partial_1_0
- "str s8, [x9, #0x0]\n"
- "str s12, [x24, #0x0]\n"
+ "str s8, [x28, #0x0]\n"
+ "str s12, [x23, #0x0]\n"
"66:" // Height 2: Partial direct writeback: Done
"b 68f\n"
"67:" // Height 2: Full writeback
- "str q8, [x9, #0x0]\n"
- "str q9, [x9, #0x10]\n"
- "str q10, [x9, #0x20]\n"
- "str q11, [x9, #0x30]\n"
- "add x9, x9, #0x40\n"
- "str q12, [x24, #0x0]\n"
- "str q13, [x24, #0x10]\n"
- "str q14, [x24, #0x20]\n"
- "str q15, [x24, #0x30]\n"
+ "str q8, [x28, #0x0]\n"
+ "str q9, [x28, #0x10]\n"
+ "str q10, [x28, #0x20]\n"
+ "str q11, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
+ "str q12, [x23, #0x0]\n"
+ "str q13, [x23, #0x10]\n"
+ "str q14, [x23, #0x20]\n"
+ "str q15, [x23, #0x30]\n"
"68:" // Height 2: Writeback done
- "subs x11, x11, #0x10\n"
+ "subs x10, x10, #0x10\n"
"bgt 36b\n"
"b 206f\n"
"69:" // Height 3
- "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x28, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"70:" // Height 3: Column loop
"tbz %x[flags], #0, 80f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "cmp x11, #0x10\n"
- "add x23, x24, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "cmp x10, #0x10\n"
+ "add x23, x28, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
"bge 79f\n"
- "tbz x11, #3, 74f\n"
- "ld1 { v8.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x24], #0x10\n"
- "ld1 { v16.4s }, [x23], #0x10\n"
- "ld1 { v9.4s }, [x9], #0x10\n"
- "ld1 { v13.4s }, [x24], #0x10\n"
- "ld1 { v17.4s }, [x23], #0x10\n"
- "tbz x11, #2, 72f\n"
- "ld1 { v10.4s }, [x9], #0x10\n"
- "ld1 { v14.4s }, [x24], #0x10\n"
- "ld1 { v18.4s }, [x23], #0x10\n"
- "tbz x11, #1, 71f\n"
- "ldr d11, [x9], #0x8\n"
- "ldr d15, [x24], #0x8\n"
- "mov x25, #0x38\n"
- "ldr d19, [x23], #0x8\n"
- "tbz x11, #0, 78f\n"
- "ld1 { v11.s }[2], [x9]\n"
- "ld1 { v15.s }[2], [x24]\n"
- "ld1 { v19.s }[2], [x23]\n"
+ "tbz x10, #3, 74f\n"
+ "ld1 { v8.4s }, [x28], #0x10\n"
+ "ld1 { v12.4s }, [x23], #0x10\n"
+ "ld1 { v16.4s }, [x22], #0x10\n"
+ "ld1 { v9.4s }, [x28], #0x10\n"
+ "ld1 { v13.4s }, [x23], #0x10\n"
+ "ld1 { v17.4s }, [x22], #0x10\n"
+ "tbz x10, #2, 72f\n"
+ "ld1 { v10.4s }, [x28], #0x10\n"
+ "ld1 { v14.4s }, [x23], #0x10\n"
+ "ld1 { v18.4s }, [x22], #0x10\n"
+ "tbz x10, #1, 71f\n"
+ "mov x24, #0x38\n"
+ "ldr d11, [x28], #0x8\n"
+ "ldr d15, [x23], #0x8\n"
+ "ldr d19, [x22], #0x8\n"
+ "tbz x10, #0, 78f\n"
+ "ld1 { v11.s }[2], [x28]\n"
+ "ld1 { v15.s }[2], [x23]\n"
+ "ld1 { v19.s }[2], [x22]\n"
"b 78f\n"
"71:" // Height 3: Partial accumulate: partial_1_12
- "mov x25, #0x30\n"
- "tbz x11, #0, 78f\n"
- "ldr s11, [x9, #0x0]\n"
- "ldr s15, [x24, #0x0]\n"
- "ldr s19, [x23, #0x0]\n"
+ "mov x24, #0x30\n"
+ "tbz x10, #0, 78f\n"
+ "ldr s11, [x28, #0x0]\n"
+ "ldr s15, [x23, #0x0]\n"
+ "ldr s19, [x22, #0x0]\n"
"b 78f\n"
"72:" // Height 3: Partial accumulate: partial_2_8
- "tbz x11, #1, 73f\n"
- "ldr d10, [x9], #0x8\n"
- "ldr d14, [x24], #0x8\n"
- "mov x25, #0x28\n"
- "ldr d18, [x23], #0x8\n"
- "tbz x11, #0, 78f\n"
- "ld1 { v10.s }[2], [x9]\n"
- "ld1 { v14.s }[2], [x24]\n"
- "ld1 { v18.s }[2], [x23]\n"
+ "tbz x10, #1, 73f\n"
+ "ldr d10, [x28], #0x8\n"
+ "ldr d14, [x23], #0x8\n"
+ "mov x24, #0x28\n"
+ "ldr d18, [x22], #0x8\n"
+ "tbz x10, #0, 78f\n"
+ "ld1 { v10.s }[2], [x28]\n"
+ "ld1 { v14.s }[2], [x23]\n"
+ "ld1 { v18.s }[2], [x22]\n"
"b 78f\n"
"73:" // Height 3: Partial accumulate: partial_1_8
- "mov x25, #0x20\n"
- "tbz x11, #0, 78f\n"
- "ldr s10, [x9, #0x0]\n"
- "ldr s14, [x24, #0x0]\n"
- "ldr s18, [x23, #0x0]\n"
+ "mov x24, #0x20\n"
+ "tbz x10, #0, 78f\n"
+ "ldr s10, [x28, #0x0]\n"
+ "ldr s14, [x23, #0x0]\n"
+ "ldr s18, [x22, #0x0]\n"
"b 78f\n"
"74:" // Height 3: Partial accumulate: partial_4_0
- "tbz x11, #2, 76f\n"
- "ld1 { v8.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x24], #0x10\n"
- "ld1 { v16.4s }, [x23], #0x10\n"
- "tbz x11, #1, 75f\n"
- "ldr d9, [x9], #0x8\n"
- "ldr d13, [x24], #0x8\n"
- "mov x25, #0x18\n"
- "ldr d17, [x23], #0x8\n"
- "tbz x11, #0, 78f\n"
- "ld1 { v9.s }[2], [x9]\n"
- "ld1 { v13.s }[2], [x24]\n"
- "ld1 { v17.s }[2], [x23]\n"
+ "tbz x10, #2, 76f\n"
+ "ld1 { v8.4s }, [x28], #0x10\n"
+ "ld1 { v12.4s }, [x23], #0x10\n"
+ "ld1 { v16.4s }, [x22], #0x10\n"
+ "tbz x10, #1, 75f\n"
+ "mov x24, #0x18\n"
+ "ldr d9, [x28], #0x8\n"
+ "ldr d13, [x23], #0x8\n"
+ "ldr d17, [x22], #0x8\n"
+ "tbz x10, #0, 78f\n"
+ "ld1 { v9.s }[2], [x28]\n"
+ "ld1 { v13.s }[2], [x23]\n"
+ "ld1 { v17.s }[2], [x22]\n"
"b 78f\n"
"75:" // Height 3: Partial accumulate: partial_1_4
- "mov x25, #0x10\n"
- "tbz x11, #0, 78f\n"
- "ldr s9, [x9, #0x0]\n"
- "ldr s13, [x24, #0x0]\n"
- "ldr s17, [x23, #0x0]\n"
+ "mov x24, #0x10\n"
+ "tbz x10, #0, 78f\n"
+ "ldr s9, [x28, #0x0]\n"
+ "ldr s13, [x23, #0x0]\n"
+ "ldr s17, [x22, #0x0]\n"
"b 78f\n"
"76:" // Height 3: Partial accumulate: partial_2_0
- "tbz x11, #1, 77f\n"
- "ldr d8, [x9], #0x8\n"
- "ldr d12, [x24], #0x8\n"
- "mov x25, #0x8\n"
- "ldr d16, [x23], #0x8\n"
- "tbz x11, #0, 78f\n"
- "ld1 { v8.s }[2], [x9]\n"
- "ld1 { v12.s }[2], [x24]\n"
- "ld1 { v16.s }[2], [x23]\n"
+ "tbz x10, #1, 77f\n"
+ "ldr d8, [x28], #0x8\n"
+ "ldr d12, [x23], #0x8\n"
+ "mov x24, #0x8\n"
+ "ldr d16, [x22], #0x8\n"
+ "tbz x10, #0, 78f\n"
+ "ld1 { v8.s }[2], [x28]\n"
+ "ld1 { v12.s }[2], [x23]\n"
+ "ld1 { v16.s }[2], [x22]\n"
"b 78f\n"
"77:" // Height 3: Partial accumulate: partial_1_0
- "ldr s8, [x9, #0x0]\n"
- "ldr s12, [x24, #0x0]\n"
- "mov x25, #0x0\n"
- "ldr s16, [x23, #0x0]\n"
+ "ldr s8, [x28, #0x0]\n"
+ "mov x24, #0x0\n"
+ "ldr s12, [x23, #0x0]\n"
+ "ldr s16, [x22, #0x0]\n"
"78:" // Height 3: Partial accumulate: Done
- "sub x9, x9, x25\n"
+ "sub x28, x28, x24\n"
"b 81f\n"
"79:" // Height 3: full accumulate
- "ldr q8, [x9, #0x0]\n"
- "ldr q9, [x9, #0x10]\n"
- "ldr q10, [x9, #0x20]\n"
- "ldr q11, [x9, #0x30]\n"
- "ldr q12, [x24, #0x0]\n"
- "ldr q13, [x24, #0x10]\n"
- "ldr q14, [x24, #0x20]\n"
- "ldr q15, [x24, #0x30]\n"
- "ldr q16, [x23, #0x0]\n"
- "ldr q17, [x23, #0x10]\n"
- "ldr q18, [x23, #0x20]\n"
- "ldr q19, [x23, #0x30]\n"
+ "ldr q8, [x28, #0x0]\n"
+ "ldr q9, [x28, #0x10]\n"
+ "ldr q10, [x28, #0x20]\n"
+ "ldr q11, [x28, #0x30]\n"
+ "ldr q12, [x23, #0x0]\n"
+ "ldr q13, [x23, #0x10]\n"
+ "ldr q14, [x23, #0x20]\n"
+ "ldr q15, [x23, #0x30]\n"
+ "ldr q16, [x22, #0x0]\n"
+ "ldr q17, [x22, #0x10]\n"
+ "ldr q18, [x22, #0x20]\n"
+ "ldr q19, [x22, #0x30]\n"
"b 81f\n"
"80:" // Height 3: no accumulate
"movi v8.4s, #0x0\n"
@@ -862,180 +862,180 @@ void a64_hybrid_s8s32_dot_6x16 (
"movi v18.4s, #0x0\n"
"movi v19.4s, #0x0\n"
"81:" // Height 3: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"82:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 83f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "cbnz x28, 84f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20\n"
- "add x25, x25, x20\n"
- "add x24, x24, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "cbnz x27, 84f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
+ "add x24, x24, x19\n"
+ "add x23, x23, x19\n"
"b 84f\n"
"83:" // Height 3: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20\n"
- "add x24, x25, x20\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19\n"
+ "add x23, x24, x19\n"
"84:" // Height 3: input setup done
- "cmp x27, #0x10\n"
+ "cmp x26, #0x10\n"
"blt 87f\n"
- "ldr q0, [x26, #0x0]\n"
- "ldr q1, [x25, #0x0]\n"
- "cmp x27, #0x20\n"
- "ldr q2, [x24, #0x0]\n"
- "ldr q6, [x10, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
+ "ldr q0, [x25, #0x0]\n"
+ "ldr q1, [x24, #0x0]\n"
+ "cmp x26, #0x20\n"
+ "ldr q2, [x23, #0x0]\n"
+ "ldr q6, [x9, #0x0]\n"
"blt 86f\n"
"85:" // Height 3: Multiply loop: Main loop head
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
+ "ldr q7, [x9, #0x10]\n"
+ "add x25, x25, #0x10\n"
".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n"
- "sub x27, x27, #0x10\n"
- "add x26, x26, #0x10\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "add x24, x24, #0x10\n"
".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n"
- "ldr q6, [x10, #0x20]\n"
+ "ldr q6, [x9, #0x20]\n"
+ "add x23, x23, #0x10\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
- "add x25, x25, #0x10\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "sub x26, x26, #0x10\n"
".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
+ "cmp x26, #0x20\n"
".inst 0x4f82e0f1 // sdot v17.4s, v7.16b, v2.4b[0]\n"
- "ldr q7, [x10, #0x30]\n"
- "add x24, x24, #0x10\n"
+ "ldr q7, [x9, #0x30]\n"
".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n"
".inst 0x4f81e0ce // sdot v14.4s, v6.16b, v1.4b[0]\n"
- "cmp x27, #0x20\n"
- "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x4f82e0d2 // sdot v18.4s, v6.16b, v2.4b[0]\n"
- "ldr q6, [x10, #0x40]\n"
+ "ldr q6, [x9, #0x40]\n"
".inst 0x4f80e0eb // sdot v11.4s, v7.16b, v0.4b[0]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x4f81e0ef // sdot v15.4s, v7.16b, v1.4b[0]\n"
".inst 0x4f82e0f3 // sdot v19.4s, v7.16b, v2.4b[0]\n"
- "ldr q7, [x10, #0x50]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
+ "ldr q7, [x9, #0x50]\n"
".inst 0x4fa0e0c8 // sdot v8.4s, v6.16b, v0.4b[1]\n"
".inst 0x4fa1e0cc // sdot v12.4s, v6.16b, v1.4b[1]\n"
".inst 0x4fa2e0d0 // sdot v16.4s, v6.16b, v2.4b[1]\n"
- "ldr q6, [x10, #0x60]\n"
+ "ldr q6, [x9, #0x60]\n"
".inst 0x4fa0e0e9 // sdot v9.4s, v7.16b, v0.4b[1]\n"
".inst 0x4fa1e0ed // sdot v13.4s, v7.16b, v1.4b[1]\n"
".inst 0x4fa2e0f1 // sdot v17.4s, v7.16b, v2.4b[1]\n"
- "ldr q7, [x10, #0x70]\n"
+ "ldr q7, [x9, #0x70]\n"
".inst 0x4fa0e0ca // sdot v10.4s, v6.16b, v0.4b[1]\n"
".inst 0x4fa1e0ce // sdot v14.4s, v6.16b, v1.4b[1]\n"
".inst 0x4fa2e0d2 // sdot v18.4s, v6.16b, v2.4b[1]\n"
- "ldr q6, [x10, #0x80]\n"
+ "ldr q6, [x9, #0x80]\n"
".inst 0x4fa0e0eb // sdot v11.4s, v7.16b, v0.4b[1]\n"
".inst 0x4fa1e0ef // sdot v15.4s, v7.16b, v1.4b[1]\n"
".inst 0x4fa2e0f3 // sdot v19.4s, v7.16b, v2.4b[1]\n"
- "ldr q7, [x10, #0x90]\n"
+ "ldr q7, [x9, #0x90]\n"
".inst 0x4f80e8c8 // sdot v8.4s, v6.16b, v0.4b[2]\n"
".inst 0x4f81e8cc // sdot v12.4s, v6.16b, v1.4b[2]\n"
".inst 0x4f82e8d0 // sdot v16.4s, v6.16b, v2.4b[2]\n"
- "ldr q6, [x10, #0xa0]\n"
+ "ldr q6, [x9, #0xa0]\n"
".inst 0x4f80e8e9 // sdot v9.4s, v7.16b, v0.4b[2]\n"
".inst 0x4f81e8ed // sdot v13.4s, v7.16b, v1.4b[2]\n"
".inst 0x4f82e8f1 // sdot v17.4s, v7.16b, v2.4b[2]\n"
- "ldr q7, [x10, #0xb0]\n"
+ "ldr q7, [x9, #0xb0]\n"
".inst 0x4f80e8ca // sdot v10.4s, v6.16b, v0.4b[2]\n"
".inst 0x4f81e8ce // sdot v14.4s, v6.16b, v1.4b[2]\n"
".inst 0x4f82e8d2 // sdot v18.4s, v6.16b, v2.4b[2]\n"
- "ldr q6, [x10, #0xc0]\n"
+ "ldr q6, [x9, #0xc0]\n"
".inst 0x4f80e8eb // sdot v11.4s, v7.16b, v0.4b[2]\n"
".inst 0x4f81e8ef // sdot v15.4s, v7.16b, v1.4b[2]\n"
".inst 0x4f82e8f3 // sdot v19.4s, v7.16b, v2.4b[2]\n"
- "ldr q7, [x10, #0xd0]\n"
+ "ldr q7, [x9, #0xd0]\n"
".inst 0x4fa0e8c8 // sdot v8.4s, v6.16b, v0.4b[3]\n"
".inst 0x4fa1e8cc // sdot v12.4s, v6.16b, v1.4b[3]\n"
".inst 0x4fa2e8d0 // sdot v16.4s, v6.16b, v2.4b[3]\n"
- "ldr q6, [x10, #0xe0]\n"
+ "ldr q6, [x9, #0xe0]\n"
".inst 0x4fa0e8e9 // sdot v9.4s, v7.16b, v0.4b[3]\n"
".inst 0x4fa1e8ed // sdot v13.4s, v7.16b, v1.4b[3]\n"
".inst 0x4fa2e8f1 // sdot v17.4s, v7.16b, v2.4b[3]\n"
- "ldr q7, [x10, #0xf0]\n"
- "add x10, x10, #0x100\n"
+ "ldr q7, [x9, #0xf0]\n"
+ "add x9, x9, #0x100\n"
".inst 0x4fa0e8ca // sdot v10.4s, v6.16b, v0.4b[3]\n"
".inst 0x4fa1e8ce // sdot v14.4s, v6.16b, v1.4b[3]\n"
".inst 0x4fa2e8d2 // sdot v18.4s, v6.16b, v2.4b[3]\n"
- "ldr q6, [x10, #0x0]\n"
+ "ldr q6, [x9, #0x0]\n"
".inst 0x4fa0e8eb // sdot v11.4s, v7.16b, v0.4b[3]\n"
- "ldr q0, [x26, #0x0]\n"
+ "ldr q0, [x25, #0x0]\n"
".inst 0x4fa1e8ef // sdot v15.4s, v7.16b, v1.4b[3]\n"
- "ldr q1, [x25, #0x0]\n"
+ "ldr q1, [x24, #0x0]\n"
".inst 0x4fa2e8f3 // sdot v19.4s, v7.16b, v2.4b[3]\n"
- "ldr q2, [x24, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
+ "ldr q2, [x23, #0x0]\n"
"bge 85b\n"
"86:" // Height 3: Multiply loop: Single iteration only
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
+ "ldr q7, [x9, #0x10]\n"
+ "sub x26, x26, #0x10\n"
".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n"
- "add x26, x26, #0x10\n"
"add x25, x25, #0x10\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n"
- "ldr q6, [x10, #0x20]\n"
- ".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
"add x24, x24, #0x10\n"
+ ".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
+ "ldr q6, [x9, #0x20]\n"
+ "add x23, x23, #0x10\n"
".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x4f82e0f1 // sdot v17.4s, v7.16b, v2.4b[0]\n"
- "ldr q7, [x10, #0x30]\n"
- "sub x27, x27, #0x10\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
+ "ldr q7, [x9, #0x30]\n"
".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n"
".inst 0x4f81e0ce // sdot v14.4s, v6.16b, v1.4b[0]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x4f82e0d2 // sdot v18.4s, v6.16b, v2.4b[0]\n"
- "ldr q6, [x10, #0x40]\n"
+ "ldr q6, [x9, #0x40]\n"
".inst 0x4f80e0eb // sdot v11.4s, v7.16b, v0.4b[0]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x4f81e0ef // sdot v15.4s, v7.16b, v1.4b[0]\n"
".inst 0x4f82e0f3 // sdot v19.4s, v7.16b, v2.4b[0]\n"
- "ldr q7, [x10, #0x50]\n"
+ "ldr q7, [x9, #0x50]\n"
".inst 0x4fa0e0c8 // sdot v8.4s, v6.16b, v0.4b[1]\n"
".inst 0x4fa1e0cc // sdot v12.4s, v6.16b, v1.4b[1]\n"
".inst 0x4fa2e0d0 // sdot v16.4s, v6.16b, v2.4b[1]\n"
- "ldr q6, [x10, #0x60]\n"
+ "ldr q6, [x9, #0x60]\n"
".inst 0x4fa0e0e9 // sdot v9.4s, v7.16b, v0.4b[1]\n"
".inst 0x4fa1e0ed // sdot v13.4s, v7.16b, v1.4b[1]\n"
".inst 0x4fa2e0f1 // sdot v17.4s, v7.16b, v2.4b[1]\n"
- "ldr q7, [x10, #0x70]\n"
+ "ldr q7, [x9, #0x70]\n"
".inst 0x4fa0e0ca // sdot v10.4s, v6.16b, v0.4b[1]\n"
".inst 0x4fa1e0ce // sdot v14.4s, v6.16b, v1.4b[1]\n"
".inst 0x4fa2e0d2 // sdot v18.4s, v6.16b, v2.4b[1]\n"
- "ldr q6, [x10, #0x80]\n"
+ "ldr q6, [x9, #0x80]\n"
".inst 0x4fa0e0eb // sdot v11.4s, v7.16b, v0.4b[1]\n"
".inst 0x4fa1e0ef // sdot v15.4s, v7.16b, v1.4b[1]\n"
".inst 0x4fa2e0f3 // sdot v19.4s, v7.16b, v2.4b[1]\n"
- "ldr q7, [x10, #0x90]\n"
+ "ldr q7, [x9, #0x90]\n"
".inst 0x4f80e8c8 // sdot v8.4s, v6.16b, v0.4b[2]\n"
".inst 0x4f81e8cc // sdot v12.4s, v6.16b, v1.4b[2]\n"
".inst 0x4f82e8d0 // sdot v16.4s, v6.16b, v2.4b[2]\n"
- "ldr q6, [x10, #0xa0]\n"
+ "ldr q6, [x9, #0xa0]\n"
".inst 0x4f80e8e9 // sdot v9.4s, v7.16b, v0.4b[2]\n"
".inst 0x4f81e8ed // sdot v13.4s, v7.16b, v1.4b[2]\n"
".inst 0x4f82e8f1 // sdot v17.4s, v7.16b, v2.4b[2]\n"
- "ldr q7, [x10, #0xb0]\n"
+ "ldr q7, [x9, #0xb0]\n"
".inst 0x4f80e8ca // sdot v10.4s, v6.16b, v0.4b[2]\n"
".inst 0x4f81e8ce // sdot v14.4s, v6.16b, v1.4b[2]\n"
".inst 0x4f82e8d2 // sdot v18.4s, v6.16b, v2.4b[2]\n"
- "ldr q6, [x10, #0xc0]\n"
+ "ldr q6, [x9, #0xc0]\n"
".inst 0x4f80e8eb // sdot v11.4s, v7.16b, v0.4b[2]\n"
".inst 0x4f81e8ef // sdot v15.4s, v7.16b, v1.4b[2]\n"
".inst 0x4f82e8f3 // sdot v19.4s, v7.16b, v2.4b[2]\n"
- "ldr q7, [x10, #0xd0]\n"
+ "ldr q7, [x9, #0xd0]\n"
".inst 0x4fa0e8c8 // sdot v8.4s, v6.16b, v0.4b[3]\n"
".inst 0x4fa1e8cc // sdot v12.4s, v6.16b, v1.4b[3]\n"
".inst 0x4fa2e8d0 // sdot v16.4s, v6.16b, v2.4b[3]\n"
- "ldr q6, [x10, #0xe0]\n"
+ "ldr q6, [x9, #0xe0]\n"
".inst 0x4fa0e8e9 // sdot v9.4s, v7.16b, v0.4b[3]\n"
".inst 0x4fa1e8ed // sdot v13.4s, v7.16b, v1.4b[3]\n"
".inst 0x4fa2e8f1 // sdot v17.4s, v7.16b, v2.4b[3]\n"
- "ldr q7, [x10, #0xf0]\n"
- "add x10, x10, #0x100\n"
+ "ldr q7, [x9, #0xf0]\n"
+ "add x9, x9, #0x100\n"
".inst 0x4fa0e8ca // sdot v10.4s, v6.16b, v0.4b[3]\n"
".inst 0x4fa1e8ce // sdot v14.4s, v6.16b, v1.4b[3]\n"
".inst 0x4fa2e8d2 // sdot v18.4s, v6.16b, v2.4b[3]\n"
@@ -1043,26 +1043,26 @@ void a64_hybrid_s8s32_dot_6x16 (
".inst 0x4fa1e8ef // sdot v15.4s, v7.16b, v1.4b[3]\n"
".inst 0x4fa2e8f3 // sdot v19.4s, v7.16b, v2.4b[3]\n"
"87:" // Height 3: Multiply loop: Main loop skip
- "cbz x27, 92f\n"
- "cmp x27, #0x4\n"
+ "cbz x26, 92f\n"
+ "cmp x26, #0x4\n"
"blt 89f\n"
"88:" // Height 3: Multiply loop: Odd block loop
- "ldr s0, [x26], #0x4\n"
- "ldr s1, [x25], #0x4\n"
- "sub x27, x27, #0x4\n"
- "cmp x27, #0x4\n"
- "ldr s2, [x24], #0x4\n"
- "ldr q6, [x10, #0x0]\n"
+ "ldr s0, [x25], #0x4\n"
+ "sub x26, x26, #0x4\n"
+ "ldr s1, [x24], #0x4\n"
+ "cmp x26, #0x4\n"
+ "ldr s2, [x23], #0x4\n"
+ "ldr q6, [x9, #0x0]\n"
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
+ "ldr q7, [x9, #0x10]\n"
".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n"
- "ldr q7, [x10, #0x10]\n"
".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n"
- "ldr q6, [x10, #0x20]\n"
+ "ldr q6, [x9, #0x20]\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n"
".inst 0x4f82e0f1 // sdot v17.4s, v7.16b, v2.4b[0]\n"
- "ldr q7, [x10, #0x30]\n"
- "add x10, x10, #0x40\n"
+ "ldr q7, [x9, #0x30]\n"
+ "add x9, x9, #0x40\n"
".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n"
".inst 0x4f81e0ce // sdot v14.4s, v6.16b, v1.4b[0]\n"
".inst 0x4f82e0d2 // sdot v18.4s, v6.16b, v2.4b[0]\n"
@@ -1070,33 +1070,33 @@ void a64_hybrid_s8s32_dot_6x16 (
".inst 0x4f81e0ef // sdot v15.4s, v7.16b, v1.4b[0]\n"
".inst 0x4f82e0f3 // sdot v19.4s, v7.16b, v2.4b[0]\n"
"bge 88b\n"
+ "cbz x26, 92f\n"
"89:" // Height 3: Multiply loop: Skip odd blocks
- "cbz x27, 92f\n"
- "tbz x27, #1, 90f\n"
- "ldr h0, [x26], #0x2\n"
- "ldr h1, [x25], #0x2\n"
- "ldr h2, [x24], #0x2\n"
- "tbz x27, #0, 91f\n"
- "ld1 { v0.b }[2], [x26]\n"
- "ld1 { v1.b }[2], [x25]\n"
- "ld1 { v2.b }[2], [x24]\n"
+ "tbz x26, #1, 90f\n"
+ "ldr h0, [x25], #0x2\n"
+ "ldr h1, [x24], #0x2\n"
+ "ldr h2, [x23], #0x2\n"
+ "tbz x26, #0, 91f\n"
+ "ld1 { v0.b }[2], [x25]\n"
+ "ld1 { v1.b }[2], [x24]\n"
+ "ld1 { v2.b }[2], [x23]\n"
"b 91f\n"
"90:" // Height 3: Multiply loop: Ragged operand read: partial_1_0
- "ldr b0, [x26, #0x0]\n"
- "ldr b1, [x25, #0x0]\n"
- "ldr b2, [x24, #0x0]\n"
+ "ldr b0, [x25, #0x0]\n"
+ "ldr b1, [x24, #0x0]\n"
+ "ldr b2, [x23, #0x0]\n"
"91:" // Height 3: Multiply loop: Ragged operand read: Done
- "ldr q6, [x10, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
+ "ldr q6, [x9, #0x0]\n"
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
+ "ldr q7, [x9, #0x10]\n"
".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n"
".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n"
- "ldr q6, [x10, #0x20]\n"
+ "ldr q6, [x9, #0x20]\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n"
".inst 0x4f82e0f1 // sdot v17.4s, v7.16b, v2.4b[0]\n"
- "ldr q7, [x10, #0x30]\n"
- "add x10, x10, #0x40\n"
+ "ldr q7, [x9, #0x30]\n"
+ "add x9, x9, #0x40\n"
".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n"
".inst 0x4f81e0ce // sdot v14.4s, v6.16b, v1.4b[0]\n"
".inst 0x4f82e0d2 // sdot v18.4s, v6.16b, v2.4b[0]\n"
@@ -1104,246 +1104,246 @@ void a64_hybrid_s8s32_dot_6x16 (
".inst 0x4f81e0ef // sdot v15.4s, v7.16b, v1.4b[0]\n"
".inst 0x4f82e0f3 // sdot v19.4s, v7.16b, v2.4b[0]\n"
"92:" // Height 3: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 82b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "prfm pstl1keep, [x9, #0x0]\n"
- "cmp x11, #0x10\n"
- "prfm pstl1keep, [x24, #0x0]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "prfm pstl1keep, [x28, #0x0]\n"
+ "cmp x10, #0x10\n"
+ "add x23, x28, x19, LSL #2\n"
"prfm pstl1keep, [x23, #0x0]\n"
+ "add x22, x23, x19, LSL #2\n"
+ "prfm pstl1keep, [x22, #0x0]\n"
"bge 101f\n"
- "tbz x11, #3, 96f\n"
- "st1 { v8.4s }, [x9], #0x10\n"
- "st1 { v9.4s }, [x9], #0x10\n"
- "st1 { v12.4s }, [x24], #0x10\n"
- "st1 { v13.4s }, [x24], #0x10\n"
- "st1 { v16.4s }, [x23], #0x10\n"
- "st1 { v17.4s }, [x23], #0x10\n"
- "tbz x11, #2, 94f\n"
- "st1 { v10.4s }, [x9], #0x10\n"
- "st1 { v14.4s }, [x24], #0x10\n"
- "st1 { v18.4s }, [x23], #0x10\n"
- "tbz x11, #1, 93f\n"
- "str d11, [x9], #0x8\n"
- "str d15, [x24], #0x8\n"
- "str d19, [x23], #0x8\n"
- "tbz x11, #0, 100f\n"
- "st1 { v11.s }[2], [x9]\n"
- "st1 { v15.s }[2], [x24]\n"
- "st1 { v19.s }[2], [x23]\n"
+ "tbz x10, #3, 96f\n"
+ "st1 { v8.4s }, [x28], #0x10\n"
+ "st1 { v9.4s }, [x28], #0x10\n"
+ "st1 { v12.4s }, [x23], #0x10\n"
+ "st1 { v13.4s }, [x23], #0x10\n"
+ "st1 { v16.4s }, [x22], #0x10\n"
+ "st1 { v17.4s }, [x22], #0x10\n"
+ "tbz x10, #2, 94f\n"
+ "st1 { v10.4s }, [x28], #0x10\n"
+ "st1 { v14.4s }, [x23], #0x10\n"
+ "st1 { v18.4s }, [x22], #0x10\n"
+ "tbz x10, #1, 93f\n"
+ "str d11, [x28], #0x8\n"
+ "str d15, [x23], #0x8\n"
+ "str d19, [x22], #0x8\n"
+ "tbz x10, #0, 100f\n"
+ "st1 { v11.s }[2], [x28]\n"
+ "st1 { v15.s }[2], [x23]\n"
+ "st1 { v19.s }[2], [x22]\n"
"b 100f\n"
"93:" // Height 3: Partial direct writeback: partial_1_12
- "tbz x11, #0, 100f\n"
- "str s11, [x9, #0x0]\n"
- "str s15, [x24, #0x0]\n"
- "str s19, [x23, #0x0]\n"
+ "tbz x10, #0, 100f\n"
+ "str s11, [x28, #0x0]\n"
+ "str s15, [x23, #0x0]\n"
+ "str s19, [x22, #0x0]\n"
"b 100f\n"
"94:" // Height 3: Partial direct writeback: partial_2_8
- "tbz x11, #1, 95f\n"
- "str d10, [x9], #0x8\n"
- "str d14, [x24], #0x8\n"
- "str d18, [x23], #0x8\n"
- "tbz x11, #0, 100f\n"
- "st1 { v10.s }[2], [x9]\n"
- "st1 { v14.s }[2], [x24]\n"
- "st1 { v18.s }[2], [x23]\n"
+ "tbz x10, #1, 95f\n"
+ "str d10, [x28], #0x8\n"
+ "str d14, [x23], #0x8\n"
+ "str d18, [x22], #0x8\n"
+ "tbz x10, #0, 100f\n"
+ "st1 { v10.s }[2], [x28]\n"
+ "st1 { v14.s }[2], [x23]\n"
+ "st1 { v18.s }[2], [x22]\n"
"b 100f\n"
"95:" // Height 3: Partial direct writeback: partial_1_8
- "tbz x11, #0, 100f\n"
- "str s10, [x9, #0x0]\n"
- "str s14, [x24, #0x0]\n"
- "str s18, [x23, #0x0]\n"
+ "tbz x10, #0, 100f\n"
+ "str s10, [x28, #0x0]\n"
+ "str s14, [x23, #0x0]\n"
+ "str s18, [x22, #0x0]\n"
"b 100f\n"
"96:" // Height 3: Partial direct writeback: partial_4_0
- "tbz x11, #2, 98f\n"
- "st1 { v8.4s }, [x9], #0x10\n"
- "st1 { v12.4s }, [x24], #0x10\n"
- "st1 { v16.4s }, [x23], #0x10\n"
- "tbz x11, #1, 97f\n"
- "str d9, [x9], #0x8\n"
- "str d13, [x24], #0x8\n"
- "str d17, [x23], #0x8\n"
- "tbz x11, #0, 100f\n"
- "st1 { v9.s }[2], [x9]\n"
- "st1 { v13.s }[2], [x24]\n"
- "st1 { v17.s }[2], [x23]\n"
+ "tbz x10, #2, 98f\n"
+ "st1 { v8.4s }, [x28], #0x10\n"
+ "st1 { v12.4s }, [x23], #0x10\n"
+ "st1 { v16.4s }, [x22], #0x10\n"
+ "tbz x10, #1, 97f\n"
+ "str d9, [x28], #0x8\n"
+ "str d13, [x23], #0x8\n"
+ "str d17, [x22], #0x8\n"
+ "tbz x10, #0, 100f\n"
+ "st1 { v9.s }[2], [x28]\n"
+ "st1 { v13.s }[2], [x23]\n"
+ "st1 { v17.s }[2], [x22]\n"
"b 100f\n"
"97:" // Height 3: Partial direct writeback: partial_1_4
- "tbz x11, #0, 100f\n"
- "str s9, [x9, #0x0]\n"
- "str s13, [x24, #0x0]\n"
- "str s17, [x23, #0x0]\n"
+ "tbz x10, #0, 100f\n"
+ "str s9, [x28, #0x0]\n"
+ "str s13, [x23, #0x0]\n"
+ "str s17, [x22, #0x0]\n"
"b 100f\n"
"98:" // Height 3: Partial direct writeback: partial_2_0
- "tbz x11, #1, 99f\n"
- "str d8, [x9], #0x8\n"
- "str d12, [x24], #0x8\n"
- "str d16, [x23], #0x8\n"
- "tbz x11, #0, 100f\n"
- "st1 { v8.s }[2], [x9]\n"
- "st1 { v12.s }[2], [x24]\n"
- "st1 { v16.s }[2], [x23]\n"
+ "tbz x10, #1, 99f\n"
+ "str d8, [x28], #0x8\n"
+ "str d12, [x23], #0x8\n"
+ "str d16, [x22], #0x8\n"
+ "tbz x10, #0, 100f\n"
+ "st1 { v8.s }[2], [x28]\n"
+ "st1 { v12.s }[2], [x23]\n"
+ "st1 { v16.s }[2], [x22]\n"
"b 100f\n"
"99:" // Height 3: Partial direct writeback: partial_1_0
- "str s8, [x9, #0x0]\n"
- "str s12, [x24, #0x0]\n"
- "str s16, [x23, #0x0]\n"
+ "str s8, [x28, #0x0]\n"
+ "str s12, [x23, #0x0]\n"
+ "str s16, [x22, #0x0]\n"
"100:" // Height 3: Partial direct writeback: Done
"b 102f\n"
"101:" // Height 3: Full writeback
- "str q8, [x9, #0x0]\n"
- "str q9, [x9, #0x10]\n"
- "str q10, [x9, #0x20]\n"
- "str q11, [x9, #0x30]\n"
- "add x9, x9, #0x40\n"
- "str q12, [x24, #0x0]\n"
- "str q13, [x24, #0x10]\n"
- "str q14, [x24, #0x20]\n"
- "str q15, [x24, #0x30]\n"
- "str q16, [x23, #0x0]\n"
- "str q17, [x23, #0x10]\n"
- "str q18, [x23, #0x20]\n"
- "str q19, [x23, #0x30]\n"
+ "str q8, [x28, #0x0]\n"
+ "str q9, [x28, #0x10]\n"
+ "str q10, [x28, #0x20]\n"
+ "str q11, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
+ "str q12, [x23, #0x0]\n"
+ "str q13, [x23, #0x10]\n"
+ "str q14, [x23, #0x20]\n"
+ "str q15, [x23, #0x30]\n"
+ "str q16, [x22, #0x0]\n"
+ "str q17, [x22, #0x10]\n"
+ "str q18, [x22, #0x20]\n"
+ "str q19, [x22, #0x30]\n"
"102:" // Height 3: Writeback done
- "subs x11, x11, #0x10\n"
+ "subs x10, x10, #0x10\n"
"bgt 70b\n"
"b 206f\n"
"103:" // Height 4
- "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x28, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"104:" // Height 4: Column loop
"tbz %x[flags], #0, 114f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "cmp x11, #0x10\n"
- "add x22, x23, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "cmp x10, #0x10\n"
+ "add x23, x28, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
"bge 113f\n"
- "tbz x11, #3, 108f\n"
- "ld1 { v8.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x24], #0x10\n"
- "ld1 { v16.4s }, [x23], #0x10\n"
- "ld1 { v20.4s }, [x22], #0x10\n"
- "ld1 { v9.4s }, [x9], #0x10\n"
- "ld1 { v13.4s }, [x24], #0x10\n"
- "ld1 { v17.4s }, [x23], #0x10\n"
- "ld1 { v21.4s }, [x22], #0x10\n"
- "tbz x11, #2, 106f\n"
- "ld1 { v10.4s }, [x9], #0x10\n"
- "ld1 { v14.4s }, [x24], #0x10\n"
- "ld1 { v18.4s }, [x23], #0x10\n"
- "ld1 { v22.4s }, [x22], #0x10\n"
- "tbz x11, #1, 105f\n"
- "ldr d11, [x9], #0x8\n"
- "ldr d15, [x24], #0x8\n"
- "mov x25, #0x38\n"
- "ldr d19, [x23], #0x8\n"
- "ldr d23, [x22], #0x8\n"
- "tbz x11, #0, 112f\n"
- "ld1 { v11.s }[2], [x9]\n"
- "ld1 { v15.s }[2], [x24]\n"
- "ld1 { v19.s }[2], [x23]\n"
- "ld1 { v23.s }[2], [x22]\n"
+ "tbz x10, #3, 108f\n"
+ "ld1 { v8.4s }, [x28], #0x10\n"
+ "ld1 { v12.4s }, [x23], #0x10\n"
+ "ld1 { v16.4s }, [x22], #0x10\n"
+ "ld1 { v20.4s }, [x21], #0x10\n"
+ "ld1 { v9.4s }, [x28], #0x10\n"
+ "ld1 { v13.4s }, [x23], #0x10\n"
+ "ld1 { v17.4s }, [x22], #0x10\n"
+ "ld1 { v21.4s }, [x21], #0x10\n"
+ "tbz x10, #2, 106f\n"
+ "ld1 { v10.4s }, [x28], #0x10\n"
+ "ld1 { v14.4s }, [x23], #0x10\n"
+ "ld1 { v18.4s }, [x22], #0x10\n"
+ "ld1 { v22.4s }, [x21], #0x10\n"
+ "tbz x10, #1, 105f\n"
+ "mov x24, #0x38\n"
+ "ldr d11, [x28], #0x8\n"
+ "ldr d15, [x23], #0x8\n"
+ "ldr d19, [x22], #0x8\n"
+ "ldr d23, [x21], #0x8\n"
+ "tbz x10, #0, 112f\n"
+ "ld1 { v11.s }[2], [x28]\n"
+ "ld1 { v15.s }[2], [x23]\n"
+ "ld1 { v19.s }[2], [x22]\n"
+ "ld1 { v23.s }[2], [x21]\n"
"b 112f\n"
"105:" // Height 4: Partial accumulate: partial_1_12
- "mov x25, #0x30\n"
- "tbz x11, #0, 112f\n"
- "ldr s11, [x9, #0x0]\n"
- "ldr s15, [x24, #0x0]\n"
- "ldr s19, [x23, #0x0]\n"
- "ldr s23, [x22, #0x0]\n"
+ "mov x24, #0x30\n"
+ "tbz x10, #0, 112f\n"
+ "ldr s11, [x28, #0x0]\n"
+ "ldr s15, [x23, #0x0]\n"
+ "ldr s19, [x22, #0x0]\n"
+ "ldr s23, [x21, #0x0]\n"
"b 112f\n"
"106:" // Height 4: Partial accumulate: partial_2_8
- "tbz x11, #1, 107f\n"
- "ldr d10, [x9], #0x8\n"
- "ldr d14, [x24], #0x8\n"
- "mov x25, #0x28\n"
- "ldr d18, [x23], #0x8\n"
- "ldr d22, [x22], #0x8\n"
- "tbz x11, #0, 112f\n"
- "ld1 { v10.s }[2], [x9]\n"
- "ld1 { v14.s }[2], [x24]\n"
- "ld1 { v18.s }[2], [x23]\n"
- "ld1 { v22.s }[2], [x22]\n"
+ "tbz x10, #1, 107f\n"
+ "ldr d10, [x28], #0x8\n"
+ "ldr d14, [x23], #0x8\n"
+ "mov x24, #0x28\n"
+ "ldr d18, [x22], #0x8\n"
+ "ldr d22, [x21], #0x8\n"
+ "tbz x10, #0, 112f\n"
+ "ld1 { v10.s }[2], [x28]\n"
+ "ld1 { v14.s }[2], [x23]\n"
+ "ld1 { v18.s }[2], [x22]\n"
+ "ld1 { v22.s }[2], [x21]\n"
"b 112f\n"
"107:" // Height 4: Partial accumulate: partial_1_8
- "mov x25, #0x20\n"
- "tbz x11, #0, 112f\n"
- "ldr s10, [x9, #0x0]\n"
- "ldr s14, [x24, #0x0]\n"
- "ldr s18, [x23, #0x0]\n"
- "ldr s22, [x22, #0x0]\n"
+ "mov x24, #0x20\n"
+ "tbz x10, #0, 112f\n"
+ "ldr s10, [x28, #0x0]\n"
+ "ldr s14, [x23, #0x0]\n"
+ "ldr s18, [x22, #0x0]\n"
+ "ldr s22, [x21, #0x0]\n"
"b 112f\n"
"108:" // Height 4: Partial accumulate: partial_4_0
- "tbz x11, #2, 110f\n"
- "ld1 { v8.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x24], #0x10\n"
- "ld1 { v16.4s }, [x23], #0x10\n"
- "ld1 { v20.4s }, [x22], #0x10\n"
- "tbz x11, #1, 109f\n"
- "ldr d9, [x9], #0x8\n"
- "ldr d13, [x24], #0x8\n"
- "mov x25, #0x18\n"
- "ldr d17, [x23], #0x8\n"
- "ldr d21, [x22], #0x8\n"
- "tbz x11, #0, 112f\n"
- "ld1 { v9.s }[2], [x9]\n"
- "ld1 { v13.s }[2], [x24]\n"
- "ld1 { v17.s }[2], [x23]\n"
- "ld1 { v21.s }[2], [x22]\n"
+ "tbz x10, #2, 110f\n"
+ "ld1 { v8.4s }, [x28], #0x10\n"
+ "ld1 { v12.4s }, [x23], #0x10\n"
+ "ld1 { v16.4s }, [x22], #0x10\n"
+ "ld1 { v20.4s }, [x21], #0x10\n"
+ "tbz x10, #1, 109f\n"
+ "mov x24, #0x18\n"
+ "ldr d9, [x28], #0x8\n"
+ "ldr d13, [x23], #0x8\n"
+ "ldr d17, [x22], #0x8\n"
+ "ldr d21, [x21], #0x8\n"
+ "tbz x10, #0, 112f\n"
+ "ld1 { v9.s }[2], [x28]\n"
+ "ld1 { v13.s }[2], [x23]\n"
+ "ld1 { v17.s }[2], [x22]\n"
+ "ld1 { v21.s }[2], [x21]\n"
"b 112f\n"
"109:" // Height 4: Partial accumulate: partial_1_4
- "mov x25, #0x10\n"
- "tbz x11, #0, 112f\n"
- "ldr s9, [x9, #0x0]\n"
- "ldr s13, [x24, #0x0]\n"
- "ldr s17, [x23, #0x0]\n"
- "ldr s21, [x22, #0x0]\n"
+ "mov x24, #0x10\n"
+ "tbz x10, #0, 112f\n"
+ "ldr s9, [x28, #0x0]\n"
+ "ldr s13, [x23, #0x0]\n"
+ "ldr s17, [x22, #0x0]\n"
+ "ldr s21, [x21, #0x0]\n"
"b 112f\n"
"110:" // Height 4: Partial accumulate: partial_2_0
- "tbz x11, #1, 111f\n"
- "ldr d8, [x9], #0x8\n"
- "ldr d12, [x24], #0x8\n"
- "mov x25, #0x8\n"
- "ldr d16, [x23], #0x8\n"
- "ldr d20, [x22], #0x8\n"
- "tbz x11, #0, 112f\n"
- "ld1 { v8.s }[2], [x9]\n"
- "ld1 { v12.s }[2], [x24]\n"
- "ld1 { v16.s }[2], [x23]\n"
- "ld1 { v20.s }[2], [x22]\n"
+ "tbz x10, #1, 111f\n"
+ "ldr d8, [x28], #0x8\n"
+ "ldr d12, [x23], #0x8\n"
+ "mov x24, #0x8\n"
+ "ldr d16, [x22], #0x8\n"
+ "ldr d20, [x21], #0x8\n"
+ "tbz x10, #0, 112f\n"
+ "ld1 { v8.s }[2], [x28]\n"
+ "ld1 { v12.s }[2], [x23]\n"
+ "ld1 { v16.s }[2], [x22]\n"
+ "ld1 { v20.s }[2], [x21]\n"
"b 112f\n"
"111:" // Height 4: Partial accumulate: partial_1_0
- "ldr s8, [x9, #0x0]\n"
- "ldr s12, [x24, #0x0]\n"
- "mov x25, #0x0\n"
- "ldr s16, [x23, #0x0]\n"
- "ldr s20, [x22, #0x0]\n"
+ "ldr s8, [x28, #0x0]\n"
+ "mov x24, #0x0\n"
+ "ldr s12, [x23, #0x0]\n"
+ "ldr s16, [x22, #0x0]\n"
+ "ldr s20, [x21, #0x0]\n"
"112:" // Height 4: Partial accumulate: Done
- "sub x9, x9, x25\n"
+ "sub x28, x28, x24\n"
"b 115f\n"
"113:" // Height 4: full accumulate
- "ldr q8, [x9, #0x0]\n"
- "ldr q9, [x9, #0x10]\n"
- "ldr q10, [x9, #0x20]\n"
- "ldr q11, [x9, #0x30]\n"
- "ldr q12, [x24, #0x0]\n"
- "ldr q13, [x24, #0x10]\n"
- "ldr q14, [x24, #0x20]\n"
- "ldr q15, [x24, #0x30]\n"
- "ldr q16, [x23, #0x0]\n"
- "ldr q17, [x23, #0x10]\n"
- "ldr q18, [x23, #0x20]\n"
- "ldr q19, [x23, #0x30]\n"
- "ldr q20, [x22, #0x0]\n"
- "ldr q21, [x22, #0x10]\n"
- "ldr q22, [x22, #0x20]\n"
- "ldr q23, [x22, #0x30]\n"
+ "ldr q8, [x28, #0x0]\n"
+ "ldr q9, [x28, #0x10]\n"
+ "ldr q10, [x28, #0x20]\n"
+ "ldr q11, [x28, #0x30]\n"
+ "ldr q12, [x23, #0x0]\n"
+ "ldr q13, [x23, #0x10]\n"
+ "ldr q14, [x23, #0x20]\n"
+ "ldr q15, [x23, #0x30]\n"
+ "ldr q16, [x22, #0x0]\n"
+ "ldr q17, [x22, #0x10]\n"
+ "ldr q18, [x22, #0x20]\n"
+ "ldr q19, [x22, #0x30]\n"
+ "ldr q20, [x21, #0x0]\n"
+ "ldr q21, [x21, #0x10]\n"
+ "ldr q22, [x21, #0x20]\n"
+ "ldr q23, [x21, #0x30]\n"
"b 115f\n"
"114:" // Height 4: no accumulate
"movi v8.4s, #0x0\n"
@@ -1363,219 +1363,219 @@ void a64_hybrid_s8s32_dot_6x16 (
"movi v22.4s, #0x0\n"
"movi v23.4s, #0x0\n"
"115:" // Height 4: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"116:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 117f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "cbnz x28, 118f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20\n"
- "add x25, x25, x20\n"
- "add x24, x24, x20\n"
- "add x23, x23, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "cbnz x27, 118f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
+ "add x24, x24, x19\n"
+ "add x23, x23, x19\n"
+ "add x22, x22, x19\n"
"b 118f\n"
"117:" // Height 4: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20\n"
- "add x24, x25, x20\n"
- "add x23, x24, x20\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19\n"
+ "add x23, x24, x19\n"
+ "add x22, x23, x19\n"
"118:" // Height 4: input setup done
- "cmp x27, #0x10\n"
+ "cmp x26, #0x10\n"
"blt 121f\n"
- "ldr q0, [x26, #0x0]\n"
- "ldr q1, [x25, #0x0]\n"
- "cmp x27, #0x20\n"
- "ldr q2, [x24, #0x0]\n"
- "ldr q3, [x23, #0x0]\n"
- "ldr q6, [x10, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
+ "ldr q0, [x25, #0x0]\n"
+ "ldr q1, [x24, #0x0]\n"
+ "cmp x26, #0x20\n"
+ "ldr q2, [x23, #0x0]\n"
+ "ldr q3, [x22, #0x0]\n"
+ "ldr q6, [x9, #0x0]\n"
"blt 120f\n"
"119:" // Height 4: Multiply loop: Main loop head
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
+ "ldr q7, [x9, #0x10]\n"
+ "add x25, x25, #0x10\n"
".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n"
- "sub x27, x27, #0x10\n"
- "add x26, x26, #0x10\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "add x24, x24, #0x10\n"
".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "add x23, x23, #0x10\n"
".inst 0x4f83e0d4 // sdot v20.4s, v6.16b, v3.4b[0]\n"
- "ldr q6, [x10, #0x20]\n"
- "add x25, x25, #0x10\n"
+ "ldr q6, [x9, #0x20]\n"
+ "add x22, x22, #0x10\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
+ "sub x26, x26, #0x10\n"
".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n"
- "add x24, x24, #0x10\n"
- "add x23, x23, #0x10\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
+ "cmp x26, #0x20\n"
".inst 0x4f82e0f1 // sdot v17.4s, v7.16b, v2.4b[0]\n"
".inst 0x4f83e0f5 // sdot v21.4s, v7.16b, v3.4b[0]\n"
- "ldr q7, [x10, #0x30]\n"
- "cmp x27, #0x20\n"
+ "ldr q7, [x9, #0x30]\n"
".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n"
".inst 0x4f81e0ce // sdot v14.4s, v6.16b, v1.4b[0]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x4f82e0d2 // sdot v18.4s, v6.16b, v2.4b[0]\n"
".inst 0x4f83e0d6 // sdot v22.4s, v6.16b, v3.4b[0]\n"
- "ldr q6, [x10, #0x40]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
+ "ldr q6, [x9, #0x40]\n"
".inst 0x4f80e0eb // sdot v11.4s, v7.16b, v0.4b[0]\n"
".inst 0x4f81e0ef // sdot v15.4s, v7.16b, v1.4b[0]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x4f82e0f3 // sdot v19.4s, v7.16b, v2.4b[0]\n"
".inst 0x4f83e0f7 // sdot v23.4s, v7.16b, v3.4b[0]\n"
- "ldr q7, [x10, #0x50]\n"
+ "ldr q7, [x9, #0x50]\n"
".inst 0x4fa0e0c8 // sdot v8.4s, v6.16b, v0.4b[1]\n"
".inst 0x4fa1e0cc // sdot v12.4s, v6.16b, v1.4b[1]\n"
".inst 0x4fa2e0d0 // sdot v16.4s, v6.16b, v2.4b[1]\n"
".inst 0x4fa3e0d4 // sdot v20.4s, v6.16b, v3.4b[1]\n"
- "ldr q6, [x10, #0x60]\n"
+ "ldr q6, [x9, #0x60]\n"
".inst 0x4fa0e0e9 // sdot v9.4s, v7.16b, v0.4b[1]\n"
".inst 0x4fa1e0ed // sdot v13.4s, v7.16b, v1.4b[1]\n"
".inst 0x4fa2e0f1 // sdot v17.4s, v7.16b, v2.4b[1]\n"
".inst 0x4fa3e0f5 // sdot v21.4s, v7.16b, v3.4b[1]\n"
- "ldr q7, [x10, #0x70]\n"
+ "ldr q7, [x9, #0x70]\n"
".inst 0x4fa0e0ca // sdot v10.4s, v6.16b, v0.4b[1]\n"
".inst 0x4fa1e0ce // sdot v14.4s, v6.16b, v1.4b[1]\n"
".inst 0x4fa2e0d2 // sdot v18.4s, v6.16b, v2.4b[1]\n"
".inst 0x4fa3e0d6 // sdot v22.4s, v6.16b, v3.4b[1]\n"
- "ldr q6, [x10, #0x80]\n"
+ "ldr q6, [x9, #0x80]\n"
".inst 0x4fa0e0eb // sdot v11.4s, v7.16b, v0.4b[1]\n"
".inst 0x4fa1e0ef // sdot v15.4s, v7.16b, v1.4b[1]\n"
".inst 0x4fa2e0f3 // sdot v19.4s, v7.16b, v2.4b[1]\n"
".inst 0x4fa3e0f7 // sdot v23.4s, v7.16b, v3.4b[1]\n"
- "ldr q7, [x10, #0x90]\n"
+ "ldr q7, [x9, #0x90]\n"
".inst 0x4f80e8c8 // sdot v8.4s, v6.16b, v0.4b[2]\n"
".inst 0x4f81e8cc // sdot v12.4s, v6.16b, v1.4b[2]\n"
".inst 0x4f82e8d0 // sdot v16.4s, v6.16b, v2.4b[2]\n"
".inst 0x4f83e8d4 // sdot v20.4s, v6.16b, v3.4b[2]\n"
- "ldr q6, [x10, #0xa0]\n"
+ "ldr q6, [x9, #0xa0]\n"
".inst 0x4f80e8e9 // sdot v9.4s, v7.16b, v0.4b[2]\n"
".inst 0x4f81e8ed // sdot v13.4s, v7.16b, v1.4b[2]\n"
".inst 0x4f82e8f1 // sdot v17.4s, v7.16b, v2.4b[2]\n"
".inst 0x4f83e8f5 // sdot v21.4s, v7.16b, v3.4b[2]\n"
- "ldr q7, [x10, #0xb0]\n"
+ "ldr q7, [x9, #0xb0]\n"
".inst 0x4f80e8ca // sdot v10.4s, v6.16b, v0.4b[2]\n"
".inst 0x4f81e8ce // sdot v14.4s, v6.16b, v1.4b[2]\n"
".inst 0x4f82e8d2 // sdot v18.4s, v6.16b, v2.4b[2]\n"
".inst 0x4f83e8d6 // sdot v22.4s, v6.16b, v3.4b[2]\n"
- "ldr q6, [x10, #0xc0]\n"
+ "ldr q6, [x9, #0xc0]\n"
".inst 0x4f80e8eb // sdot v11.4s, v7.16b, v0.4b[2]\n"
".inst 0x4f81e8ef // sdot v15.4s, v7.16b, v1.4b[2]\n"
".inst 0x4f82e8f3 // sdot v19.4s, v7.16b, v2.4b[2]\n"
".inst 0x4f83e8f7 // sdot v23.4s, v7.16b, v3.4b[2]\n"
- "ldr q7, [x10, #0xd0]\n"
+ "ldr q7, [x9, #0xd0]\n"
".inst 0x4fa0e8c8 // sdot v8.4s, v6.16b, v0.4b[3]\n"
".inst 0x4fa1e8cc // sdot v12.4s, v6.16b, v1.4b[3]\n"
".inst 0x4fa2e8d0 // sdot v16.4s, v6.16b, v2.4b[3]\n"
".inst 0x4fa3e8d4 // sdot v20.4s, v6.16b, v3.4b[3]\n"
- "ldr q6, [x10, #0xe0]\n"
+ "ldr q6, [x9, #0xe0]\n"
".inst 0x4fa0e8e9 // sdot v9.4s, v7.16b, v0.4b[3]\n"
".inst 0x4fa1e8ed // sdot v13.4s, v7.16b, v1.4b[3]\n"
".inst 0x4fa2e8f1 // sdot v17.4s, v7.16b, v2.4b[3]\n"
".inst 0x4fa3e8f5 // sdot v21.4s, v7.16b, v3.4b[3]\n"
- "ldr q7, [x10, #0xf0]\n"
- "add x10, x10, #0x100\n"
+ "ldr q7, [x9, #0xf0]\n"
+ "add x9, x9, #0x100\n"
".inst 0x4fa0e8ca // sdot v10.4s, v6.16b, v0.4b[3]\n"
".inst 0x4fa1e8ce // sdot v14.4s, v6.16b, v1.4b[3]\n"
".inst 0x4fa2e8d2 // sdot v18.4s, v6.16b, v2.4b[3]\n"
".inst 0x4fa3e8d6 // sdot v22.4s, v6.16b, v3.4b[3]\n"
- "ldr q6, [x10, #0x0]\n"
+ "ldr q6, [x9, #0x0]\n"
".inst 0x4fa0e8eb // sdot v11.4s, v7.16b, v0.4b[3]\n"
- "ldr q0, [x26, #0x0]\n"
+ "ldr q0, [x25, #0x0]\n"
".inst 0x4fa1e8ef // sdot v15.4s, v7.16b, v1.4b[3]\n"
- "ldr q1, [x25, #0x0]\n"
+ "ldr q1, [x24, #0x0]\n"
".inst 0x4fa2e8f3 // sdot v19.4s, v7.16b, v2.4b[3]\n"
- "ldr q2, [x24, #0x0]\n"
+ "ldr q2, [x23, #0x0]\n"
".inst 0x4fa3e8f7 // sdot v23.4s, v7.16b, v3.4b[3]\n"
- "ldr q3, [x23, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
+ "ldr q3, [x22, #0x0]\n"
"bge 119b\n"
"120:" // Height 4: Multiply loop: Single iteration only
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
+ "ldr q7, [x9, #0x10]\n"
+ "sub x26, x26, #0x10\n"
".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n"
- "add x26, x26, #0x10\n"
"add x25, x25, #0x10\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n"
- ".inst 0x4f83e0d4 // sdot v20.4s, v6.16b, v3.4b[0]\n"
- "ldr q6, [x10, #0x20]\n"
"add x24, x24, #0x10\n"
+ ".inst 0x4f83e0d4 // sdot v20.4s, v6.16b, v3.4b[0]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "add x23, x23, #0x10\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
+ "ldr q6, [x9, #0x20]\n"
+ "add x22, x22, #0x10\n"
".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n"
- "add x23, x23, #0x10\n"
- "sub x27, x27, #0x10\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x4f82e0f1 // sdot v17.4s, v7.16b, v2.4b[0]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x4f83e0f5 // sdot v21.4s, v7.16b, v3.4b[0]\n"
- "ldr q7, [x10, #0x30]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
+ "ldr q7, [x9, #0x30]\n"
".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n"
".inst 0x4f81e0ce // sdot v14.4s, v6.16b, v1.4b[0]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x4f82e0d2 // sdot v18.4s, v6.16b, v2.4b[0]\n"
".inst 0x4f83e0d6 // sdot v22.4s, v6.16b, v3.4b[0]\n"
- "ldr q6, [x10, #0x40]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
+ "ldr q6, [x9, #0x40]\n"
".inst 0x4f80e0eb // sdot v11.4s, v7.16b, v0.4b[0]\n"
".inst 0x4f81e0ef // sdot v15.4s, v7.16b, v1.4b[0]\n"
".inst 0x4f82e0f3 // sdot v19.4s, v7.16b, v2.4b[0]\n"
".inst 0x4f83e0f7 // sdot v23.4s, v7.16b, v3.4b[0]\n"
- "ldr q7, [x10, #0x50]\n"
+ "ldr q7, [x9, #0x50]\n"
".inst 0x4fa0e0c8 // sdot v8.4s, v6.16b, v0.4b[1]\n"
".inst 0x4fa1e0cc // sdot v12.4s, v6.16b, v1.4b[1]\n"
".inst 0x4fa2e0d0 // sdot v16.4s, v6.16b, v2.4b[1]\n"
".inst 0x4fa3e0d4 // sdot v20.4s, v6.16b, v3.4b[1]\n"
- "ldr q6, [x10, #0x60]\n"
+ "ldr q6, [x9, #0x60]\n"
".inst 0x4fa0e0e9 // sdot v9.4s, v7.16b, v0.4b[1]\n"
".inst 0x4fa1e0ed // sdot v13.4s, v7.16b, v1.4b[1]\n"
".inst 0x4fa2e0f1 // sdot v17.4s, v7.16b, v2.4b[1]\n"
".inst 0x4fa3e0f5 // sdot v21.4s, v7.16b, v3.4b[1]\n"
- "ldr q7, [x10, #0x70]\n"
+ "ldr q7, [x9, #0x70]\n"
".inst 0x4fa0e0ca // sdot v10.4s, v6.16b, v0.4b[1]\n"
".inst 0x4fa1e0ce // sdot v14.4s, v6.16b, v1.4b[1]\n"
".inst 0x4fa2e0d2 // sdot v18.4s, v6.16b, v2.4b[1]\n"
".inst 0x4fa3e0d6 // sdot v22.4s, v6.16b, v3.4b[1]\n"
- "ldr q6, [x10, #0x80]\n"
+ "ldr q6, [x9, #0x80]\n"
".inst 0x4fa0e0eb // sdot v11.4s, v7.16b, v0.4b[1]\n"
".inst 0x4fa1e0ef // sdot v15.4s, v7.16b, v1.4b[1]\n"
".inst 0x4fa2e0f3 // sdot v19.4s, v7.16b, v2.4b[1]\n"
".inst 0x4fa3e0f7 // sdot v23.4s, v7.16b, v3.4b[1]\n"
- "ldr q7, [x10, #0x90]\n"
+ "ldr q7, [x9, #0x90]\n"
".inst 0x4f80e8c8 // sdot v8.4s, v6.16b, v0.4b[2]\n"
".inst 0x4f81e8cc // sdot v12.4s, v6.16b, v1.4b[2]\n"
".inst 0x4f82e8d0 // sdot v16.4s, v6.16b, v2.4b[2]\n"
".inst 0x4f83e8d4 // sdot v20.4s, v6.16b, v3.4b[2]\n"
- "ldr q6, [x10, #0xa0]\n"
+ "ldr q6, [x9, #0xa0]\n"
".inst 0x4f80e8e9 // sdot v9.4s, v7.16b, v0.4b[2]\n"
".inst 0x4f81e8ed // sdot v13.4s, v7.16b, v1.4b[2]\n"
".inst 0x4f82e8f1 // sdot v17.4s, v7.16b, v2.4b[2]\n"
".inst 0x4f83e8f5 // sdot v21.4s, v7.16b, v3.4b[2]\n"
- "ldr q7, [x10, #0xb0]\n"
+ "ldr q7, [x9, #0xb0]\n"
".inst 0x4f80e8ca // sdot v10.4s, v6.16b, v0.4b[2]\n"
".inst 0x4f81e8ce // sdot v14.4s, v6.16b, v1.4b[2]\n"
".inst 0x4f82e8d2 // sdot v18.4s, v6.16b, v2.4b[2]\n"
".inst 0x4f83e8d6 // sdot v22.4s, v6.16b, v3.4b[2]\n"
- "ldr q6, [x10, #0xc0]\n"
+ "ldr q6, [x9, #0xc0]\n"
".inst 0x4f80e8eb // sdot v11.4s, v7.16b, v0.4b[2]\n"
".inst 0x4f81e8ef // sdot v15.4s, v7.16b, v1.4b[2]\n"
".inst 0x4f82e8f3 // sdot v19.4s, v7.16b, v2.4b[2]\n"
".inst 0x4f83e8f7 // sdot v23.4s, v7.16b, v3.4b[2]\n"
- "ldr q7, [x10, #0xd0]\n"
+ "ldr q7, [x9, #0xd0]\n"
".inst 0x4fa0e8c8 // sdot v8.4s, v6.16b, v0.4b[3]\n"
".inst 0x4fa1e8cc // sdot v12.4s, v6.16b, v1.4b[3]\n"
".inst 0x4fa2e8d0 // sdot v16.4s, v6.16b, v2.4b[3]\n"
".inst 0x4fa3e8d4 // sdot v20.4s, v6.16b, v3.4b[3]\n"
- "ldr q6, [x10, #0xe0]\n"
+ "ldr q6, [x9, #0xe0]\n"
".inst 0x4fa0e8e9 // sdot v9.4s, v7.16b, v0.4b[3]\n"
".inst 0x4fa1e8ed // sdot v13.4s, v7.16b, v1.4b[3]\n"
".inst 0x4fa2e8f1 // sdot v17.4s, v7.16b, v2.4b[3]\n"
".inst 0x4fa3e8f5 // sdot v21.4s, v7.16b, v3.4b[3]\n"
- "ldr q7, [x10, #0xf0]\n"
- "add x10, x10, #0x100\n"
+ "ldr q7, [x9, #0xf0]\n"
+ "add x9, x9, #0x100\n"
".inst 0x4fa0e8ca // sdot v10.4s, v6.16b, v0.4b[3]\n"
".inst 0x4fa1e8ce // sdot v14.4s, v6.16b, v1.4b[3]\n"
".inst 0x4fa2e8d2 // sdot v18.4s, v6.16b, v2.4b[3]\n"
@@ -1585,29 +1585,29 @@ void a64_hybrid_s8s32_dot_6x16 (
".inst 0x4fa2e8f3 // sdot v19.4s, v7.16b, v2.4b[3]\n"
".inst 0x4fa3e8f7 // sdot v23.4s, v7.16b, v3.4b[3]\n"
"121:" // Height 4: Multiply loop: Main loop skip
- "cbz x27, 126f\n"
- "cmp x27, #0x4\n"
+ "cbz x26, 126f\n"
+ "cmp x26, #0x4\n"
"blt 123f\n"
"122:" // Height 4: Multiply loop: Odd block loop
- "ldr s0, [x26], #0x4\n"
- "ldr s1, [x25], #0x4\n"
- "sub x27, x27, #0x4\n"
- "cmp x27, #0x4\n"
- "ldr s2, [x24], #0x4\n"
- "ldr s3, [x23], #0x4\n"
- "ldr q6, [x10, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
+ "ldr s0, [x25], #0x4\n"
+ "sub x26, x26, #0x4\n"
+ "ldr s1, [x24], #0x4\n"
+ "cmp x26, #0x4\n"
+ "ldr s2, [x23], #0x4\n"
+ "ldr s3, [x22], #0x4\n"
+ "ldr q6, [x9, #0x0]\n"
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
+ "ldr q7, [x9, #0x10]\n"
".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n"
".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n"
".inst 0x4f83e0d4 // sdot v20.4s, v6.16b, v3.4b[0]\n"
- "ldr q6, [x10, #0x20]\n"
+ "ldr q6, [x9, #0x20]\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n"
".inst 0x4f82e0f1 // sdot v17.4s, v7.16b, v2.4b[0]\n"
".inst 0x4f83e0f5 // sdot v21.4s, v7.16b, v3.4b[0]\n"
- "ldr q7, [x10, #0x30]\n"
- "add x10, x10, #0x40\n"
+ "ldr q7, [x9, #0x30]\n"
+ "add x9, x9, #0x40\n"
".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n"
".inst 0x4f81e0ce // sdot v14.4s, v6.16b, v1.4b[0]\n"
".inst 0x4f82e0d2 // sdot v18.4s, v6.16b, v2.4b[0]\n"
@@ -1617,38 +1617,38 @@ void a64_hybrid_s8s32_dot_6x16 (
".inst 0x4f82e0f3 // sdot v19.4s, v7.16b, v2.4b[0]\n"
".inst 0x4f83e0f7 // sdot v23.4s, v7.16b, v3.4b[0]\n"
"bge 122b\n"
+ "cbz x26, 126f\n"
"123:" // Height 4: Multiply loop: Skip odd blocks
- "cbz x27, 126f\n"
- "tbz x27, #1, 124f\n"
- "ldr h0, [x26], #0x2\n"
- "ldr h1, [x25], #0x2\n"
- "ldr h2, [x24], #0x2\n"
- "ldr h3, [x23], #0x2\n"
- "tbz x27, #0, 125f\n"
- "ld1 { v0.b }[2], [x26]\n"
- "ld1 { v1.b }[2], [x25]\n"
- "ld1 { v2.b }[2], [x24]\n"
- "ld1 { v3.b }[2], [x23]\n"
+ "tbz x26, #1, 124f\n"
+ "ldr h0, [x25], #0x2\n"
+ "ldr h1, [x24], #0x2\n"
+ "ldr h2, [x23], #0x2\n"
+ "ldr h3, [x22], #0x2\n"
+ "tbz x26, #0, 125f\n"
+ "ld1 { v0.b }[2], [x25]\n"
+ "ld1 { v1.b }[2], [x24]\n"
+ "ld1 { v2.b }[2], [x23]\n"
+ "ld1 { v3.b }[2], [x22]\n"
"b 125f\n"
"124:" // Height 4: Multiply loop: Ragged operand read: partial_1_0
- "ldr b0, [x26, #0x0]\n"
- "ldr b1, [x25, #0x0]\n"
- "ldr b2, [x24, #0x0]\n"
- "ldr b3, [x23, #0x0]\n"
+ "ldr b0, [x25, #0x0]\n"
+ "ldr b1, [x24, #0x0]\n"
+ "ldr b2, [x23, #0x0]\n"
+ "ldr b3, [x22, #0x0]\n"
"125:" // Height 4: Multiply loop: Ragged operand read: Done
- "ldr q6, [x10, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
+ "ldr q6, [x9, #0x0]\n"
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
+ "ldr q7, [x9, #0x10]\n"
".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n"
".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n"
".inst 0x4f83e0d4 // sdot v20.4s, v6.16b, v3.4b[0]\n"
- "ldr q6, [x10, #0x20]\n"
+ "ldr q6, [x9, #0x20]\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n"
".inst 0x4f82e0f1 // sdot v17.4s, v7.16b, v2.4b[0]\n"
".inst 0x4f83e0f5 // sdot v21.4s, v7.16b, v3.4b[0]\n"
- "ldr q7, [x10, #0x30]\n"
- "add x10, x10, #0x40\n"
+ "ldr q7, [x9, #0x30]\n"
+ "add x9, x9, #0x40\n"
".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n"
".inst 0x4f81e0ce // sdot v14.4s, v6.16b, v1.4b[0]\n"
".inst 0x4f82e0d2 // sdot v18.4s, v6.16b, v2.4b[0]\n"
@@ -1658,289 +1658,289 @@ void a64_hybrid_s8s32_dot_6x16 (
".inst 0x4f82e0f3 // sdot v19.4s, v7.16b, v2.4b[0]\n"
".inst 0x4f83e0f7 // sdot v23.4s, v7.16b, v3.4b[0]\n"
"126:" // Height 4: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 116b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "prfm pstl1keep, [x9, #0x0]\n"
- "add x22, x23, x20, LSL #2\n"
- "cmp x11, #0x10\n"
- "prfm pstl1keep, [x24, #0x0]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "prfm pstl1keep, [x28, #0x0]\n"
+ "cmp x10, #0x10\n"
+ "add x23, x28, x19, LSL #2\n"
"prfm pstl1keep, [x23, #0x0]\n"
+ "add x22, x23, x19, LSL #2\n"
"prfm pstl1keep, [x22, #0x0]\n"
+ "add x21, x22, x19, LSL #2\n"
+ "prfm pstl1keep, [x21, #0x0]\n"
"bge 135f\n"
- "tbz x11, #3, 130f\n"
- "st1 { v8.4s }, [x9], #0x10\n"
- "st1 { v9.4s }, [x9], #0x10\n"
- "st1 { v12.4s }, [x24], #0x10\n"
- "st1 { v13.4s }, [x24], #0x10\n"
- "st1 { v16.4s }, [x23], #0x10\n"
- "st1 { v17.4s }, [x23], #0x10\n"
- "st1 { v20.4s }, [x22], #0x10\n"
- "st1 { v21.4s }, [x22], #0x10\n"
- "tbz x11, #2, 128f\n"
- "st1 { v10.4s }, [x9], #0x10\n"
- "st1 { v14.4s }, [x24], #0x10\n"
- "st1 { v18.4s }, [x23], #0x10\n"
- "st1 { v22.4s }, [x22], #0x10\n"
- "tbz x11, #1, 127f\n"
- "str d11, [x9], #0x8\n"
- "str d15, [x24], #0x8\n"
- "str d19, [x23], #0x8\n"
- "str d23, [x22], #0x8\n"
- "tbz x11, #0, 134f\n"
- "st1 { v11.s }[2], [x9]\n"
- "st1 { v15.s }[2], [x24]\n"
- "st1 { v19.s }[2], [x23]\n"
- "st1 { v23.s }[2], [x22]\n"
+ "tbz x10, #3, 130f\n"
+ "st1 { v8.4s }, [x28], #0x10\n"
+ "st1 { v9.4s }, [x28], #0x10\n"
+ "st1 { v12.4s }, [x23], #0x10\n"
+ "st1 { v13.4s }, [x23], #0x10\n"
+ "st1 { v16.4s }, [x22], #0x10\n"
+ "st1 { v17.4s }, [x22], #0x10\n"
+ "st1 { v20.4s }, [x21], #0x10\n"
+ "st1 { v21.4s }, [x21], #0x10\n"
+ "tbz x10, #2, 128f\n"
+ "st1 { v10.4s }, [x28], #0x10\n"
+ "st1 { v14.4s }, [x23], #0x10\n"
+ "st1 { v18.4s }, [x22], #0x10\n"
+ "st1 { v22.4s }, [x21], #0x10\n"
+ "tbz x10, #1, 127f\n"
+ "str d11, [x28], #0x8\n"
+ "str d15, [x23], #0x8\n"
+ "str d19, [x22], #0x8\n"
+ "str d23, [x21], #0x8\n"
+ "tbz x10, #0, 134f\n"
+ "st1 { v11.s }[2], [x28]\n"
+ "st1 { v15.s }[2], [x23]\n"
+ "st1 { v19.s }[2], [x22]\n"
+ "st1 { v23.s }[2], [x21]\n"
"b 134f\n"
"127:" // Height 4: Partial direct writeback: partial_1_12
- "tbz x11, #0, 134f\n"
- "str s11, [x9, #0x0]\n"
- "str s15, [x24, #0x0]\n"
- "str s19, [x23, #0x0]\n"
- "str s23, [x22, #0x0]\n"
+ "tbz x10, #0, 134f\n"
+ "str s11, [x28, #0x0]\n"
+ "str s15, [x23, #0x0]\n"
+ "str s19, [x22, #0x0]\n"
+ "str s23, [x21, #0x0]\n"
"b 134f\n"
"128:" // Height 4: Partial direct writeback: partial_2_8
- "tbz x11, #1, 129f\n"
- "str d10, [x9], #0x8\n"
- "str d14, [x24], #0x8\n"
- "str d18, [x23], #0x8\n"
- "str d22, [x22], #0x8\n"
- "tbz x11, #0, 134f\n"
- "st1 { v10.s }[2], [x9]\n"
- "st1 { v14.s }[2], [x24]\n"
- "st1 { v18.s }[2], [x23]\n"
- "st1 { v22.s }[2], [x22]\n"
+ "tbz x10, #1, 129f\n"
+ "str d10, [x28], #0x8\n"
+ "str d14, [x23], #0x8\n"
+ "str d18, [x22], #0x8\n"
+ "str d22, [x21], #0x8\n"
+ "tbz x10, #0, 134f\n"
+ "st1 { v10.s }[2], [x28]\n"
+ "st1 { v14.s }[2], [x23]\n"
+ "st1 { v18.s }[2], [x22]\n"
+ "st1 { v22.s }[2], [x21]\n"
"b 134f\n"
"129:" // Height 4: Partial direct writeback: partial_1_8
- "tbz x11, #0, 134f\n"
- "str s10, [x9, #0x0]\n"
- "str s14, [x24, #0x0]\n"
- "str s18, [x23, #0x0]\n"
- "str s22, [x22, #0x0]\n"
+ "tbz x10, #0, 134f\n"
+ "str s10, [x28, #0x0]\n"
+ "str s14, [x23, #0x0]\n"
+ "str s18, [x22, #0x0]\n"
+ "str s22, [x21, #0x0]\n"
"b 134f\n"
"130:" // Height 4: Partial direct writeback: partial_4_0
- "tbz x11, #2, 132f\n"
- "st1 { v8.4s }, [x9], #0x10\n"
- "st1 { v12.4s }, [x24], #0x10\n"
- "st1 { v16.4s }, [x23], #0x10\n"
- "st1 { v20.4s }, [x22], #0x10\n"
- "tbz x11, #1, 131f\n"
- "str d9, [x9], #0x8\n"
- "str d13, [x24], #0x8\n"
- "str d17, [x23], #0x8\n"
- "str d21, [x22], #0x8\n"
- "tbz x11, #0, 134f\n"
- "st1 { v9.s }[2], [x9]\n"
- "st1 { v13.s }[2], [x24]\n"
- "st1 { v17.s }[2], [x23]\n"
- "st1 { v21.s }[2], [x22]\n"
+ "tbz x10, #2, 132f\n"
+ "st1 { v8.4s }, [x28], #0x10\n"
+ "st1 { v12.4s }, [x23], #0x10\n"
+ "st1 { v16.4s }, [x22], #0x10\n"
+ "st1 { v20.4s }, [x21], #0x10\n"
+ "tbz x10, #1, 131f\n"
+ "str d9, [x28], #0x8\n"
+ "str d13, [x23], #0x8\n"
+ "str d17, [x22], #0x8\n"
+ "str d21, [x21], #0x8\n"
+ "tbz x10, #0, 134f\n"
+ "st1 { v9.s }[2], [x28]\n"
+ "st1 { v13.s }[2], [x23]\n"
+ "st1 { v17.s }[2], [x22]\n"
+ "st1 { v21.s }[2], [x21]\n"
"b 134f\n"
"131:" // Height 4: Partial direct writeback: partial_1_4
- "tbz x11, #0, 134f\n"
- "str s9, [x9, #0x0]\n"
- "str s13, [x24, #0x0]\n"
- "str s17, [x23, #0x0]\n"
- "str s21, [x22, #0x0]\n"
+ "tbz x10, #0, 134f\n"
+ "str s9, [x28, #0x0]\n"
+ "str s13, [x23, #0x0]\n"
+ "str s17, [x22, #0x0]\n"
+ "str s21, [x21, #0x0]\n"
"b 134f\n"
"132:" // Height 4: Partial direct writeback: partial_2_0
- "tbz x11, #1, 133f\n"
- "str d8, [x9], #0x8\n"
- "str d12, [x24], #0x8\n"
- "str d16, [x23], #0x8\n"
- "str d20, [x22], #0x8\n"
- "tbz x11, #0, 134f\n"
- "st1 { v8.s }[2], [x9]\n"
- "st1 { v12.s }[2], [x24]\n"
- "st1 { v16.s }[2], [x23]\n"
- "st1 { v20.s }[2], [x22]\n"
+ "tbz x10, #1, 133f\n"
+ "str d8, [x28], #0x8\n"
+ "str d12, [x23], #0x8\n"
+ "str d16, [x22], #0x8\n"
+ "str d20, [x21], #0x8\n"
+ "tbz x10, #0, 134f\n"
+ "st1 { v8.s }[2], [x28]\n"
+ "st1 { v12.s }[2], [x23]\n"
+ "st1 { v16.s }[2], [x22]\n"
+ "st1 { v20.s }[2], [x21]\n"
"b 134f\n"
"133:" // Height 4: Partial direct writeback: partial_1_0
- "str s8, [x9, #0x0]\n"
- "str s12, [x24, #0x0]\n"
- "str s16, [x23, #0x0]\n"
- "str s20, [x22, #0x0]\n"
+ "str s8, [x28, #0x0]\n"
+ "str s12, [x23, #0x0]\n"
+ "str s16, [x22, #0x0]\n"
+ "str s20, [x21, #0x0]\n"
"134:" // Height 4: Partial direct writeback: Done
"b 136f\n"
"135:" // Height 4: Full writeback
- "str q8, [x9, #0x0]\n"
- "str q9, [x9, #0x10]\n"
- "str q10, [x9, #0x20]\n"
- "str q11, [x9, #0x30]\n"
- "add x9, x9, #0x40\n"
- "str q12, [x24, #0x0]\n"
- "str q13, [x24, #0x10]\n"
- "str q14, [x24, #0x20]\n"
- "str q15, [x24, #0x30]\n"
- "str q16, [x23, #0x0]\n"
- "str q17, [x23, #0x10]\n"
- "str q18, [x23, #0x20]\n"
- "str q19, [x23, #0x30]\n"
- "str q20, [x22, #0x0]\n"
- "str q21, [x22, #0x10]\n"
- "str q22, [x22, #0x20]\n"
- "str q23, [x22, #0x30]\n"
+ "str q8, [x28, #0x0]\n"
+ "str q9, [x28, #0x10]\n"
+ "str q10, [x28, #0x20]\n"
+ "str q11, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
+ "str q12, [x23, #0x0]\n"
+ "str q13, [x23, #0x10]\n"
+ "str q14, [x23, #0x20]\n"
+ "str q15, [x23, #0x30]\n"
+ "str q16, [x22, #0x0]\n"
+ "str q17, [x22, #0x10]\n"
+ "str q18, [x22, #0x20]\n"
+ "str q19, [x22, #0x30]\n"
+ "str q20, [x21, #0x0]\n"
+ "str q21, [x21, #0x10]\n"
+ "str q22, [x21, #0x20]\n"
+ "str q23, [x21, #0x30]\n"
"136:" // Height 4: Writeback done
- "subs x11, x11, #0x10\n"
+ "subs x10, x10, #0x10\n"
"bgt 104b\n"
"b 206f\n"
"137:" // Height 5
- "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x28, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"138:" // Height 5: Column loop
"tbz %x[flags], #0, 148f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
- "cmp x11, #0x10\n"
- "add x21, x22, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "cmp x10, #0x10\n"
+ "add x23, x28, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
+ "add x20, x21, x19, LSL #2\n"
"bge 147f\n"
- "tbz x11, #3, 142f\n"
- "ld1 { v8.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x24], #0x10\n"
- "ld1 { v16.4s }, [x23], #0x10\n"
- "ld1 { v20.4s }, [x22], #0x10\n"
- "ld1 { v24.4s }, [x21], #0x10\n"
- "ld1 { v9.4s }, [x9], #0x10\n"
- "ld1 { v13.4s }, [x24], #0x10\n"
- "ld1 { v17.4s }, [x23], #0x10\n"
- "ld1 { v21.4s }, [x22], #0x10\n"
- "ld1 { v25.4s }, [x21], #0x10\n"
- "tbz x11, #2, 140f\n"
- "ld1 { v10.4s }, [x9], #0x10\n"
- "ld1 { v14.4s }, [x24], #0x10\n"
- "ld1 { v18.4s }, [x23], #0x10\n"
- "ld1 { v22.4s }, [x22], #0x10\n"
- "ld1 { v26.4s }, [x21], #0x10\n"
- "tbz x11, #1, 139f\n"
- "ldr d11, [x9], #0x8\n"
- "ldr d15, [x24], #0x8\n"
- "mov x25, #0x38\n"
- "ldr d19, [x23], #0x8\n"
- "ldr d23, [x22], #0x8\n"
- "ldr d27, [x21], #0x8\n"
- "tbz x11, #0, 146f\n"
- "ld1 { v11.s }[2], [x9]\n"
- "ld1 { v15.s }[2], [x24]\n"
- "ld1 { v19.s }[2], [x23]\n"
- "ld1 { v23.s }[2], [x22]\n"
- "ld1 { v27.s }[2], [x21]\n"
+ "tbz x10, #3, 142f\n"
+ "ld1 { v8.4s }, [x28], #0x10\n"
+ "ld1 { v12.4s }, [x23], #0x10\n"
+ "ld1 { v16.4s }, [x22], #0x10\n"
+ "ld1 { v20.4s }, [x21], #0x10\n"
+ "ld1 { v24.4s }, [x20], #0x10\n"
+ "ld1 { v9.4s }, [x28], #0x10\n"
+ "ld1 { v13.4s }, [x23], #0x10\n"
+ "ld1 { v17.4s }, [x22], #0x10\n"
+ "ld1 { v21.4s }, [x21], #0x10\n"
+ "ld1 { v25.4s }, [x20], #0x10\n"
+ "tbz x10, #2, 140f\n"
+ "ld1 { v10.4s }, [x28], #0x10\n"
+ "ld1 { v14.4s }, [x23], #0x10\n"
+ "ld1 { v18.4s }, [x22], #0x10\n"
+ "ld1 { v22.4s }, [x21], #0x10\n"
+ "ld1 { v26.4s }, [x20], #0x10\n"
+ "tbz x10, #1, 139f\n"
+ "ldr d11, [x28], #0x8\n"
+ "mov x24, #0x38\n"
+ "ldr d15, [x23], #0x8\n"
+ "ldr d19, [x22], #0x8\n"
+ "ldr d23, [x21], #0x8\n"
+ "ldr d27, [x20], #0x8\n"
+ "tbz x10, #0, 146f\n"
+ "ld1 { v11.s }[2], [x28]\n"
+ "ld1 { v15.s }[2], [x23]\n"
+ "ld1 { v19.s }[2], [x22]\n"
+ "ld1 { v23.s }[2], [x21]\n"
+ "ld1 { v27.s }[2], [x20]\n"
"b 146f\n"
"139:" // Height 5: Partial accumulate: partial_1_12
- "mov x25, #0x30\n"
- "tbz x11, #0, 146f\n"
- "ldr s11, [x9, #0x0]\n"
- "ldr s15, [x24, #0x0]\n"
- "ldr s19, [x23, #0x0]\n"
- "ldr s23, [x22, #0x0]\n"
- "ldr s27, [x21, #0x0]\n"
+ "mov x24, #0x30\n"
+ "tbz x10, #0, 146f\n"
+ "ldr s11, [x28, #0x0]\n"
+ "ldr s15, [x23, #0x0]\n"
+ "ldr s19, [x22, #0x0]\n"
+ "ldr s23, [x21, #0x0]\n"
+ "ldr s27, [x20, #0x0]\n"
"b 146f\n"
"140:" // Height 5: Partial accumulate: partial_2_8
- "tbz x11, #1, 141f\n"
- "ldr d10, [x9], #0x8\n"
- "ldr d14, [x24], #0x8\n"
- "mov x25, #0x28\n"
- "ldr d18, [x23], #0x8\n"
- "ldr d22, [x22], #0x8\n"
- "ldr d26, [x21], #0x8\n"
- "tbz x11, #0, 146f\n"
- "ld1 { v10.s }[2], [x9]\n"
- "ld1 { v14.s }[2], [x24]\n"
- "ld1 { v18.s }[2], [x23]\n"
- "ld1 { v22.s }[2], [x22]\n"
- "ld1 { v26.s }[2], [x21]\n"
+ "tbz x10, #1, 141f\n"
+ "ldr d10, [x28], #0x8\n"
+ "ldr d14, [x23], #0x8\n"
+ "mov x24, #0x28\n"
+ "ldr d18, [x22], #0x8\n"
+ "ldr d22, [x21], #0x8\n"
+ "ldr d26, [x20], #0x8\n"
+ "tbz x10, #0, 146f\n"
+ "ld1 { v10.s }[2], [x28]\n"
+ "ld1 { v14.s }[2], [x23]\n"
+ "ld1 { v18.s }[2], [x22]\n"
+ "ld1 { v22.s }[2], [x21]\n"
+ "ld1 { v26.s }[2], [x20]\n"
"b 146f\n"
"141:" // Height 5: Partial accumulate: partial_1_8
- "mov x25, #0x20\n"
- "tbz x11, #0, 146f\n"
- "ldr s10, [x9, #0x0]\n"
- "ldr s14, [x24, #0x0]\n"
- "ldr s18, [x23, #0x0]\n"
- "ldr s22, [x22, #0x0]\n"
- "ldr s26, [x21, #0x0]\n"
+ "mov x24, #0x20\n"
+ "tbz x10, #0, 146f\n"
+ "ldr s10, [x28, #0x0]\n"
+ "ldr s14, [x23, #0x0]\n"
+ "ldr s18, [x22, #0x0]\n"
+ "ldr s22, [x21, #0x0]\n"
+ "ldr s26, [x20, #0x0]\n"
"b 146f\n"
"142:" // Height 5: Partial accumulate: partial_4_0
- "tbz x11, #2, 144f\n"
- "ld1 { v8.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x24], #0x10\n"
- "ld1 { v16.4s }, [x23], #0x10\n"
- "ld1 { v20.4s }, [x22], #0x10\n"
- "ld1 { v24.4s }, [x21], #0x10\n"
- "tbz x11, #1, 143f\n"
- "ldr d9, [x9], #0x8\n"
- "ldr d13, [x24], #0x8\n"
- "mov x25, #0x18\n"
- "ldr d17, [x23], #0x8\n"
- "ldr d21, [x22], #0x8\n"
- "ldr d25, [x21], #0x8\n"
- "tbz x11, #0, 146f\n"
- "ld1 { v9.s }[2], [x9]\n"
- "ld1 { v13.s }[2], [x24]\n"
- "ld1 { v17.s }[2], [x23]\n"
- "ld1 { v21.s }[2], [x22]\n"
- "ld1 { v25.s }[2], [x21]\n"
+ "tbz x10, #2, 144f\n"
+ "ld1 { v8.4s }, [x28], #0x10\n"
+ "ld1 { v12.4s }, [x23], #0x10\n"
+ "ld1 { v16.4s }, [x22], #0x10\n"
+ "ld1 { v20.4s }, [x21], #0x10\n"
+ "ld1 { v24.4s }, [x20], #0x10\n"
+ "tbz x10, #1, 143f\n"
+ "ldr d9, [x28], #0x8\n"
+ "mov x24, #0x18\n"
+ "ldr d13, [x23], #0x8\n"
+ "ldr d17, [x22], #0x8\n"
+ "ldr d21, [x21], #0x8\n"
+ "ldr d25, [x20], #0x8\n"
+ "tbz x10, #0, 146f\n"
+ "ld1 { v9.s }[2], [x28]\n"
+ "ld1 { v13.s }[2], [x23]\n"
+ "ld1 { v17.s }[2], [x22]\n"
+ "ld1 { v21.s }[2], [x21]\n"
+ "ld1 { v25.s }[2], [x20]\n"
"b 146f\n"
"143:" // Height 5: Partial accumulate: partial_1_4
- "mov x25, #0x10\n"
- "tbz x11, #0, 146f\n"
- "ldr s9, [x9, #0x0]\n"
- "ldr s13, [x24, #0x0]\n"
- "ldr s17, [x23, #0x0]\n"
- "ldr s21, [x22, #0x0]\n"
- "ldr s25, [x21, #0x0]\n"
+ "mov x24, #0x10\n"
+ "tbz x10, #0, 146f\n"
+ "ldr s9, [x28, #0x0]\n"
+ "ldr s13, [x23, #0x0]\n"
+ "ldr s17, [x22, #0x0]\n"
+ "ldr s21, [x21, #0x0]\n"
+ "ldr s25, [x20, #0x0]\n"
"b 146f\n"
"144:" // Height 5: Partial accumulate: partial_2_0
- "tbz x11, #1, 145f\n"
- "ldr d8, [x9], #0x8\n"
- "ldr d12, [x24], #0x8\n"
- "mov x25, #0x8\n"
- "ldr d16, [x23], #0x8\n"
- "ldr d20, [x22], #0x8\n"
- "ldr d24, [x21], #0x8\n"
- "tbz x11, #0, 146f\n"
- "ld1 { v8.s }[2], [x9]\n"
- "ld1 { v12.s }[2], [x24]\n"
- "ld1 { v16.s }[2], [x23]\n"
- "ld1 { v20.s }[2], [x22]\n"
- "ld1 { v24.s }[2], [x21]\n"
+ "tbz x10, #1, 145f\n"
+ "ldr d8, [x28], #0x8\n"
+ "ldr d12, [x23], #0x8\n"
+ "mov x24, #0x8\n"
+ "ldr d16, [x22], #0x8\n"
+ "ldr d20, [x21], #0x8\n"
+ "ldr d24, [x20], #0x8\n"
+ "tbz x10, #0, 146f\n"
+ "ld1 { v8.s }[2], [x28]\n"
+ "ld1 { v12.s }[2], [x23]\n"
+ "ld1 { v16.s }[2], [x22]\n"
+ "ld1 { v20.s }[2], [x21]\n"
+ "ld1 { v24.s }[2], [x20]\n"
"b 146f\n"
"145:" // Height 5: Partial accumulate: partial_1_0
- "ldr s8, [x9, #0x0]\n"
- "ldr s12, [x24, #0x0]\n"
- "mov x25, #0x0\n"
- "ldr s16, [x23, #0x0]\n"
- "ldr s20, [x22, #0x0]\n"
- "ldr s24, [x21, #0x0]\n"
+ "ldr s8, [x28, #0x0]\n"
+ "mov x24, #0x0\n"
+ "ldr s12, [x23, #0x0]\n"
+ "ldr s16, [x22, #0x0]\n"
+ "ldr s20, [x21, #0x0]\n"
+ "ldr s24, [x20, #0x0]\n"
"146:" // Height 5: Partial accumulate: Done
- "sub x9, x9, x25\n"
+ "sub x28, x28, x24\n"
"b 149f\n"
"147:" // Height 5: full accumulate
- "ldr q8, [x9, #0x0]\n"
- "ldr q9, [x9, #0x10]\n"
- "ldr q10, [x9, #0x20]\n"
- "ldr q11, [x9, #0x30]\n"
- "ldr q12, [x24, #0x0]\n"
- "ldr q13, [x24, #0x10]\n"
- "ldr q14, [x24, #0x20]\n"
- "ldr q15, [x24, #0x30]\n"
- "ldr q16, [x23, #0x0]\n"
- "ldr q17, [x23, #0x10]\n"
- "ldr q18, [x23, #0x20]\n"
- "ldr q19, [x23, #0x30]\n"
- "ldr q20, [x22, #0x0]\n"
- "ldr q21, [x22, #0x10]\n"
- "ldr q22, [x22, #0x20]\n"
- "ldr q23, [x22, #0x30]\n"
- "ldr q24, [x21, #0x0]\n"
- "ldr q25, [x21, #0x10]\n"
- "ldr q26, [x21, #0x20]\n"
- "ldr q27, [x21, #0x30]\n"
+ "ldr q8, [x28, #0x0]\n"
+ "ldr q9, [x28, #0x10]\n"
+ "ldr q10, [x28, #0x20]\n"
+ "ldr q11, [x28, #0x30]\n"
+ "ldr q12, [x23, #0x0]\n"
+ "ldr q13, [x23, #0x10]\n"
+ "ldr q14, [x23, #0x20]\n"
+ "ldr q15, [x23, #0x30]\n"
+ "ldr q16, [x22, #0x0]\n"
+ "ldr q17, [x22, #0x10]\n"
+ "ldr q18, [x22, #0x20]\n"
+ "ldr q19, [x22, #0x30]\n"
+ "ldr q20, [x21, #0x0]\n"
+ "ldr q21, [x21, #0x10]\n"
+ "ldr q22, [x21, #0x20]\n"
+ "ldr q23, [x21, #0x30]\n"
+ "ldr q24, [x20, #0x0]\n"
+ "ldr q25, [x20, #0x10]\n"
+ "ldr q26, [x20, #0x20]\n"
+ "ldr q27, [x20, #0x30]\n"
"b 149f\n"
"148:" // Height 5: no accumulate
"movi v8.4s, #0x0\n"
@@ -1964,258 +1964,258 @@ void a64_hybrid_s8s32_dot_6x16 (
"movi v26.4s, #0x0\n"
"movi v27.4s, #0x0\n"
"149:" // Height 5: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"150:" // Height 5: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 151f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "ldr x22, [x21, #0x20]\n"
- "cbnz x28, 152f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20\n"
- "add x25, x25, x20\n"
- "add x24, x24, x20\n"
- "add x23, x23, x20\n"
- "add x22, x22, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "ldr x21, [x20, #0x20]\n"
+ "cbnz x27, 152f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
+ "add x24, x24, x19\n"
+ "add x23, x23, x19\n"
+ "add x22, x22, x19\n"
+ "add x21, x21, x19\n"
"b 152f\n"
"151:" // Height 5: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20\n"
- "add x24, x25, x20\n"
- "add x23, x24, x20\n"
- "add x22, x23, x20\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19\n"
+ "add x23, x24, x19\n"
+ "add x22, x23, x19\n"
+ "add x21, x22, x19\n"
"152:" // Height 5: input setup done
- "cmp x27, #0x10\n"
+ "cmp x26, #0x10\n"
"blt 155f\n"
- "ldr q0, [x26, #0x0]\n"
- "ldr q1, [x25, #0x0]\n"
- "cmp x27, #0x20\n"
- "ldr q2, [x24, #0x0]\n"
- "ldr q3, [x23, #0x0]\n"
- "ldr q4, [x22, #0x0]\n"
- "ldr q6, [x10, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
+ "ldr q0, [x25, #0x0]\n"
+ "ldr q1, [x24, #0x0]\n"
+ "cmp x26, #0x20\n"
+ "ldr q2, [x23, #0x0]\n"
+ "ldr q3, [x22, #0x0]\n"
+ "ldr q4, [x21, #0x0]\n"
+ "ldr q6, [x9, #0x0]\n"
"blt 154f\n"
"153:" // Height 5: Multiply loop: Main loop head
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
+ "ldr q7, [x9, #0x10]\n"
+ "add x25, x25, #0x10\n"
".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n"
- "sub x27, x27, #0x10\n"
- "add x26, x26, #0x10\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "add x24, x24, #0x10\n"
".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "add x23, x23, #0x10\n"
".inst 0x4f83e0d4 // sdot v20.4s, v6.16b, v3.4b[0]\n"
- "add x25, x25, #0x10\n"
- "add x24, x24, #0x10\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
+ "add x22, x22, #0x10\n"
".inst 0x4f84e0d8 // sdot v24.4s, v6.16b, v4.4b[0]\n"
- "ldr q6, [x10, #0x20]\n"
+ "ldr q6, [x9, #0x20]\n"
+ "add x21, x21, #0x10\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
- "add x23, x23, #0x10\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
+ "sub x26, x26, #0x10\n"
".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n"
+ "prfm pldl1keep, [x21, #0x80]\n"
+ "cmp x26, #0x20\n"
".inst 0x4f82e0f1 // sdot v17.4s, v7.16b, v2.4b[0]\n"
- "add x22, x22, #0x10\n"
- "cmp x27, #0x20\n"
".inst 0x4f83e0f5 // sdot v21.4s, v7.16b, v3.4b[0]\n"
".inst 0x4f84e0f9 // sdot v25.4s, v7.16b, v4.4b[0]\n"
- "ldr q7, [x10, #0x30]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
+ "ldr q7, [x9, #0x30]\n"
".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n"
".inst 0x4f81e0ce // sdot v14.4s, v6.16b, v1.4b[0]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x4f82e0d2 // sdot v18.4s, v6.16b, v2.4b[0]\n"
".inst 0x4f83e0d6 // sdot v22.4s, v6.16b, v3.4b[0]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
- "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x4f84e0da // sdot v26.4s, v6.16b, v4.4b[0]\n"
- "ldr q6, [x10, #0x40]\n"
+ "ldr q6, [x9, #0x40]\n"
".inst 0x4f80e0eb // sdot v11.4s, v7.16b, v0.4b[0]\n"
".inst 0x4f81e0ef // sdot v15.4s, v7.16b, v1.4b[0]\n"
".inst 0x4f82e0f3 // sdot v19.4s, v7.16b, v2.4b[0]\n"
".inst 0x4f83e0f7 // sdot v23.4s, v7.16b, v3.4b[0]\n"
".inst 0x4f84e0fb // sdot v27.4s, v7.16b, v4.4b[0]\n"
- "ldr q7, [x10, #0x50]\n"
+ "ldr q7, [x9, #0x50]\n"
".inst 0x4fa0e0c8 // sdot v8.4s, v6.16b, v0.4b[1]\n"
".inst 0x4fa1e0cc // sdot v12.4s, v6.16b, v1.4b[1]\n"
".inst 0x4fa2e0d0 // sdot v16.4s, v6.16b, v2.4b[1]\n"
".inst 0x4fa3e0d4 // sdot v20.4s, v6.16b, v3.4b[1]\n"
".inst 0x4fa4e0d8 // sdot v24.4s, v6.16b, v4.4b[1]\n"
- "ldr q6, [x10, #0x60]\n"
+ "ldr q6, [x9, #0x60]\n"
".inst 0x4fa0e0e9 // sdot v9.4s, v7.16b, v0.4b[1]\n"
".inst 0x4fa1e0ed // sdot v13.4s, v7.16b, v1.4b[1]\n"
".inst 0x4fa2e0f1 // sdot v17.4s, v7.16b, v2.4b[1]\n"
".inst 0x4fa3e0f5 // sdot v21.4s, v7.16b, v3.4b[1]\n"
".inst 0x4fa4e0f9 // sdot v25.4s, v7.16b, v4.4b[1]\n"
- "ldr q7, [x10, #0x70]\n"
+ "ldr q7, [x9, #0x70]\n"
".inst 0x4fa0e0ca // sdot v10.4s, v6.16b, v0.4b[1]\n"
".inst 0x4fa1e0ce // sdot v14.4s, v6.16b, v1.4b[1]\n"
".inst 0x4fa2e0d2 // sdot v18.4s, v6.16b, v2.4b[1]\n"
".inst 0x4fa3e0d6 // sdot v22.4s, v6.16b, v3.4b[1]\n"
".inst 0x4fa4e0da // sdot v26.4s, v6.16b, v4.4b[1]\n"
- "ldr q6, [x10, #0x80]\n"
+ "ldr q6, [x9, #0x80]\n"
".inst 0x4fa0e0eb // sdot v11.4s, v7.16b, v0.4b[1]\n"
".inst 0x4fa1e0ef // sdot v15.4s, v7.16b, v1.4b[1]\n"
".inst 0x4fa2e0f3 // sdot v19.4s, v7.16b, v2.4b[1]\n"
".inst 0x4fa3e0f7 // sdot v23.4s, v7.16b, v3.4b[1]\n"
".inst 0x4fa4e0fb // sdot v27.4s, v7.16b, v4.4b[1]\n"
- "ldr q7, [x10, #0x90]\n"
+ "ldr q7, [x9, #0x90]\n"
".inst 0x4f80e8c8 // sdot v8.4s, v6.16b, v0.4b[2]\n"
".inst 0x4f81e8cc // sdot v12.4s, v6.16b, v1.4b[2]\n"
".inst 0x4f82e8d0 // sdot v16.4s, v6.16b, v2.4b[2]\n"
".inst 0x4f83e8d4 // sdot v20.4s, v6.16b, v3.4b[2]\n"
".inst 0x4f84e8d8 // sdot v24.4s, v6.16b, v4.4b[2]\n"
- "ldr q6, [x10, #0xa0]\n"
+ "ldr q6, [x9, #0xa0]\n"
".inst 0x4f80e8e9 // sdot v9.4s, v7.16b, v0.4b[2]\n"
".inst 0x4f81e8ed // sdot v13.4s, v7.16b, v1.4b[2]\n"
".inst 0x4f82e8f1 // sdot v17.4s, v7.16b, v2.4b[2]\n"
".inst 0x4f83e8f5 // sdot v21.4s, v7.16b, v3.4b[2]\n"
".inst 0x4f84e8f9 // sdot v25.4s, v7.16b, v4.4b[2]\n"
- "ldr q7, [x10, #0xb0]\n"
+ "ldr q7, [x9, #0xb0]\n"
".inst 0x4f80e8ca // sdot v10.4s, v6.16b, v0.4b[2]\n"
".inst 0x4f81e8ce // sdot v14.4s, v6.16b, v1.4b[2]\n"
".inst 0x4f82e8d2 // sdot v18.4s, v6.16b, v2.4b[2]\n"
".inst 0x4f83e8d6 // sdot v22.4s, v6.16b, v3.4b[2]\n"
".inst 0x4f84e8da // sdot v26.4s, v6.16b, v4.4b[2]\n"
- "ldr q6, [x10, #0xc0]\n"
+ "ldr q6, [x9, #0xc0]\n"
".inst 0x4f80e8eb // sdot v11.4s, v7.16b, v0.4b[2]\n"
".inst 0x4f81e8ef // sdot v15.4s, v7.16b, v1.4b[2]\n"
".inst 0x4f82e8f3 // sdot v19.4s, v7.16b, v2.4b[2]\n"
".inst 0x4f83e8f7 // sdot v23.4s, v7.16b, v3.4b[2]\n"
".inst 0x4f84e8fb // sdot v27.4s, v7.16b, v4.4b[2]\n"
- "ldr q7, [x10, #0xd0]\n"
+ "ldr q7, [x9, #0xd0]\n"
".inst 0x4fa0e8c8 // sdot v8.4s, v6.16b, v0.4b[3]\n"
".inst 0x4fa1e8cc // sdot v12.4s, v6.16b, v1.4b[3]\n"
".inst 0x4fa2e8d0 // sdot v16.4s, v6.16b, v2.4b[3]\n"
".inst 0x4fa3e8d4 // sdot v20.4s, v6.16b, v3.4b[3]\n"
".inst 0x4fa4e8d8 // sdot v24.4s, v6.16b, v4.4b[3]\n"
- "ldr q6, [x10, #0xe0]\n"
+ "ldr q6, [x9, #0xe0]\n"
".inst 0x4fa0e8e9 // sdot v9.4s, v7.16b, v0.4b[3]\n"
".inst 0x4fa1e8ed // sdot v13.4s, v7.16b, v1.4b[3]\n"
".inst 0x4fa2e8f1 // sdot v17.4s, v7.16b, v2.4b[3]\n"
".inst 0x4fa3e8f5 // sdot v21.4s, v7.16b, v3.4b[3]\n"
".inst 0x4fa4e8f9 // sdot v25.4s, v7.16b, v4.4b[3]\n"
- "ldr q7, [x10, #0xf0]\n"
- "add x10, x10, #0x100\n"
+ "ldr q7, [x9, #0xf0]\n"
+ "add x9, x9, #0x100\n"
".inst 0x4fa0e8ca // sdot v10.4s, v6.16b, v0.4b[3]\n"
".inst 0x4fa1e8ce // sdot v14.4s, v6.16b, v1.4b[3]\n"
".inst 0x4fa2e8d2 // sdot v18.4s, v6.16b, v2.4b[3]\n"
".inst 0x4fa3e8d6 // sdot v22.4s, v6.16b, v3.4b[3]\n"
".inst 0x4fa4e8da // sdot v26.4s, v6.16b, v4.4b[3]\n"
- "ldr q6, [x10, #0x0]\n"
+ "ldr q6, [x9, #0x0]\n"
".inst 0x4fa0e8eb // sdot v11.4s, v7.16b, v0.4b[3]\n"
- "ldr q0, [x26, #0x0]\n"
+ "ldr q0, [x25, #0x0]\n"
".inst 0x4fa1e8ef // sdot v15.4s, v7.16b, v1.4b[3]\n"
- "ldr q1, [x25, #0x0]\n"
+ "ldr q1, [x24, #0x0]\n"
".inst 0x4fa2e8f3 // sdot v19.4s, v7.16b, v2.4b[3]\n"
- "ldr q2, [x24, #0x0]\n"
+ "ldr q2, [x23, #0x0]\n"
".inst 0x4fa3e8f7 // sdot v23.4s, v7.16b, v3.4b[3]\n"
- "ldr q3, [x23, #0x0]\n"
+ "ldr q3, [x22, #0x0]\n"
".inst 0x4fa4e8fb // sdot v27.4s, v7.16b, v4.4b[3]\n"
- "ldr q4, [x22, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
+ "ldr q4, [x21, #0x0]\n"
"bge 153b\n"
"154:" // Height 5: Multiply loop: Single iteration only
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
+ "ldr q7, [x9, #0x10]\n"
+ "sub x26, x26, #0x10\n"
".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n"
- "add x26, x26, #0x10\n"
"add x25, x25, #0x10\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n"
- ".inst 0x4f83e0d4 // sdot v20.4s, v6.16b, v3.4b[0]\n"
"add x24, x24, #0x10\n"
+ ".inst 0x4f83e0d4 // sdot v20.4s, v6.16b, v3.4b[0]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
"add x23, x23, #0x10\n"
".inst 0x4f84e0d8 // sdot v24.4s, v6.16b, v4.4b[0]\n"
- "ldr q6, [x10, #0x20]\n"
- ".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
"add x22, x22, #0x10\n"
+ ".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
+ "ldr q6, [x9, #0x20]\n"
+ "add x21, x21, #0x10\n"
".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x4f82e0f1 // sdot v17.4s, v7.16b, v2.4b[0]\n"
- "sub x27, x27, #0x10\n"
- "prfm pldl1keep, [x26, #0x80]\n"
+ "prfm pldl1keep, [x21, #0x80]\n"
".inst 0x4f83e0f5 // sdot v21.4s, v7.16b, v3.4b[0]\n"
".inst 0x4f84e0f9 // sdot v25.4s, v7.16b, v4.4b[0]\n"
- "ldr q7, [x10, #0x30]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
+ "ldr q7, [x9, #0x30]\n"
".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n"
".inst 0x4f81e0ce // sdot v14.4s, v6.16b, v1.4b[0]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x4f82e0d2 // sdot v18.4s, v6.16b, v2.4b[0]\n"
".inst 0x4f83e0d6 // sdot v22.4s, v6.16b, v3.4b[0]\n"
- "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x4f84e0da // sdot v26.4s, v6.16b, v4.4b[0]\n"
- "ldr q6, [x10, #0x40]\n"
+ "ldr q6, [x9, #0x40]\n"
".inst 0x4f80e0eb // sdot v11.4s, v7.16b, v0.4b[0]\n"
".inst 0x4f81e0ef // sdot v15.4s, v7.16b, v1.4b[0]\n"
".inst 0x4f82e0f3 // sdot v19.4s, v7.16b, v2.4b[0]\n"
".inst 0x4f83e0f7 // sdot v23.4s, v7.16b, v3.4b[0]\n"
".inst 0x4f84e0fb // sdot v27.4s, v7.16b, v4.4b[0]\n"
- "ldr q7, [x10, #0x50]\n"
+ "ldr q7, [x9, #0x50]\n"
".inst 0x4fa0e0c8 // sdot v8.4s, v6.16b, v0.4b[1]\n"
".inst 0x4fa1e0cc // sdot v12.4s, v6.16b, v1.4b[1]\n"
".inst 0x4fa2e0d0 // sdot v16.4s, v6.16b, v2.4b[1]\n"
".inst 0x4fa3e0d4 // sdot v20.4s, v6.16b, v3.4b[1]\n"
".inst 0x4fa4e0d8 // sdot v24.4s, v6.16b, v4.4b[1]\n"
- "ldr q6, [x10, #0x60]\n"
+ "ldr q6, [x9, #0x60]\n"
".inst 0x4fa0e0e9 // sdot v9.4s, v7.16b, v0.4b[1]\n"
".inst 0x4fa1e0ed // sdot v13.4s, v7.16b, v1.4b[1]\n"
".inst 0x4fa2e0f1 // sdot v17.4s, v7.16b, v2.4b[1]\n"
".inst 0x4fa3e0f5 // sdot v21.4s, v7.16b, v3.4b[1]\n"
".inst 0x4fa4e0f9 // sdot v25.4s, v7.16b, v4.4b[1]\n"
- "ldr q7, [x10, #0x70]\n"
+ "ldr q7, [x9, #0x70]\n"
".inst 0x4fa0e0ca // sdot v10.4s, v6.16b, v0.4b[1]\n"
".inst 0x4fa1e0ce // sdot v14.4s, v6.16b, v1.4b[1]\n"
".inst 0x4fa2e0d2 // sdot v18.4s, v6.16b, v2.4b[1]\n"
".inst 0x4fa3e0d6 // sdot v22.4s, v6.16b, v3.4b[1]\n"
".inst 0x4fa4e0da // sdot v26.4s, v6.16b, v4.4b[1]\n"
- "ldr q6, [x10, #0x80]\n"
+ "ldr q6, [x9, #0x80]\n"
".inst 0x4fa0e0eb // sdot v11.4s, v7.16b, v0.4b[1]\n"
".inst 0x4fa1e0ef // sdot v15.4s, v7.16b, v1.4b[1]\n"
".inst 0x4fa2e0f3 // sdot v19.4s, v7.16b, v2.4b[1]\n"
".inst 0x4fa3e0f7 // sdot v23.4s, v7.16b, v3.4b[1]\n"
".inst 0x4fa4e0fb // sdot v27.4s, v7.16b, v4.4b[1]\n"
- "ldr q7, [x10, #0x90]\n"
+ "ldr q7, [x9, #0x90]\n"
".inst 0x4f80e8c8 // sdot v8.4s, v6.16b, v0.4b[2]\n"
".inst 0x4f81e8cc // sdot v12.4s, v6.16b, v1.4b[2]\n"
".inst 0x4f82e8d0 // sdot v16.4s, v6.16b, v2.4b[2]\n"
".inst 0x4f83e8d4 // sdot v20.4s, v6.16b, v3.4b[2]\n"
".inst 0x4f84e8d8 // sdot v24.4s, v6.16b, v4.4b[2]\n"
- "ldr q6, [x10, #0xa0]\n"
+ "ldr q6, [x9, #0xa0]\n"
".inst 0x4f80e8e9 // sdot v9.4s, v7.16b, v0.4b[2]\n"
".inst 0x4f81e8ed // sdot v13.4s, v7.16b, v1.4b[2]\n"
".inst 0x4f82e8f1 // sdot v17.4s, v7.16b, v2.4b[2]\n"
".inst 0x4f83e8f5 // sdot v21.4s, v7.16b, v3.4b[2]\n"
".inst 0x4f84e8f9 // sdot v25.4s, v7.16b, v4.4b[2]\n"
- "ldr q7, [x10, #0xb0]\n"
+ "ldr q7, [x9, #0xb0]\n"
".inst 0x4f80e8ca // sdot v10.4s, v6.16b, v0.4b[2]\n"
".inst 0x4f81e8ce // sdot v14.4s, v6.16b, v1.4b[2]\n"
".inst 0x4f82e8d2 // sdot v18.4s, v6.16b, v2.4b[2]\n"
".inst 0x4f83e8d6 // sdot v22.4s, v6.16b, v3.4b[2]\n"
".inst 0x4f84e8da // sdot v26.4s, v6.16b, v4.4b[2]\n"
- "ldr q6, [x10, #0xc0]\n"
+ "ldr q6, [x9, #0xc0]\n"
".inst 0x4f80e8eb // sdot v11.4s, v7.16b, v0.4b[2]\n"
".inst 0x4f81e8ef // sdot v15.4s, v7.16b, v1.4b[2]\n"
".inst 0x4f82e8f3 // sdot v19.4s, v7.16b, v2.4b[2]\n"
".inst 0x4f83e8f7 // sdot v23.4s, v7.16b, v3.4b[2]\n"
".inst 0x4f84e8fb // sdot v27.4s, v7.16b, v4.4b[2]\n"
- "ldr q7, [x10, #0xd0]\n"
+ "ldr q7, [x9, #0xd0]\n"
".inst 0x4fa0e8c8 // sdot v8.4s, v6.16b, v0.4b[3]\n"
".inst 0x4fa1e8cc // sdot v12.4s, v6.16b, v1.4b[3]\n"
".inst 0x4fa2e8d0 // sdot v16.4s, v6.16b, v2.4b[3]\n"
".inst 0x4fa3e8d4 // sdot v20.4s, v6.16b, v3.4b[3]\n"
".inst 0x4fa4e8d8 // sdot v24.4s, v6.16b, v4.4b[3]\n"
- "ldr q6, [x10, #0xe0]\n"
+ "ldr q6, [x9, #0xe0]\n"
".inst 0x4fa0e8e9 // sdot v9.4s, v7.16b, v0.4b[3]\n"
".inst 0x4fa1e8ed // sdot v13.4s, v7.16b, v1.4b[3]\n"
".inst 0x4fa2e8f1 // sdot v17.4s, v7.16b, v2.4b[3]\n"
".inst 0x4fa3e8f5 // sdot v21.4s, v7.16b, v3.4b[3]\n"
".inst 0x4fa4e8f9 // sdot v25.4s, v7.16b, v4.4b[3]\n"
- "ldr q7, [x10, #0xf0]\n"
- "add x10, x10, #0x100\n"
+ "ldr q7, [x9, #0xf0]\n"
+ "add x9, x9, #0x100\n"
".inst 0x4fa0e8ca // sdot v10.4s, v6.16b, v0.4b[3]\n"
".inst 0x4fa1e8ce // sdot v14.4s, v6.16b, v1.4b[3]\n"
".inst 0x4fa2e8d2 // sdot v18.4s, v6.16b, v2.4b[3]\n"
@@ -2227,32 +2227,32 @@ void a64_hybrid_s8s32_dot_6x16 (
".inst 0x4fa3e8f7 // sdot v23.4s, v7.16b, v3.4b[3]\n"
".inst 0x4fa4e8fb // sdot v27.4s, v7.16b, v4.4b[3]\n"
"155:" // Height 5: Multiply loop: Main loop skip
- "cbz x27, 160f\n"
- "cmp x27, #0x4\n"
+ "cbz x26, 160f\n"
+ "cmp x26, #0x4\n"
"blt 157f\n"
"156:" // Height 5: Multiply loop: Odd block loop
- "ldr s0, [x26], #0x4\n"
- "ldr s1, [x25], #0x4\n"
- "sub x27, x27, #0x4\n"
- "cmp x27, #0x4\n"
- "ldr s2, [x24], #0x4\n"
- "ldr s3, [x23], #0x4\n"
- "ldr s4, [x22], #0x4\n"
- "ldr q6, [x10, #0x0]\n"
+ "ldr s0, [x25], #0x4\n"
+ "sub x26, x26, #0x4\n"
+ "ldr s1, [x24], #0x4\n"
+ "cmp x26, #0x4\n"
+ "ldr s2, [x23], #0x4\n"
+ "ldr s3, [x22], #0x4\n"
+ "ldr s4, [x21], #0x4\n"
+ "ldr q6, [x9, #0x0]\n"
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
+ "ldr q7, [x9, #0x10]\n"
".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n"
- "ldr q7, [x10, #0x10]\n"
".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n"
".inst 0x4f83e0d4 // sdot v20.4s, v6.16b, v3.4b[0]\n"
".inst 0x4f84e0d8 // sdot v24.4s, v6.16b, v4.4b[0]\n"
- "ldr q6, [x10, #0x20]\n"
+ "ldr q6, [x9, #0x20]\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n"
".inst 0x4f82e0f1 // sdot v17.4s, v7.16b, v2.4b[0]\n"
".inst 0x4f83e0f5 // sdot v21.4s, v7.16b, v3.4b[0]\n"
".inst 0x4f84e0f9 // sdot v25.4s, v7.16b, v4.4b[0]\n"
- "ldr q7, [x10, #0x30]\n"
- "add x10, x10, #0x40\n"
+ "ldr q7, [x9, #0x30]\n"
+ "add x9, x9, #0x40\n"
".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n"
".inst 0x4f81e0ce // sdot v14.4s, v6.16b, v1.4b[0]\n"
".inst 0x4f82e0d2 // sdot v18.4s, v6.16b, v2.4b[0]\n"
@@ -2264,43 +2264,43 @@ void a64_hybrid_s8s32_dot_6x16 (
".inst 0x4f83e0f7 // sdot v23.4s, v7.16b, v3.4b[0]\n"
".inst 0x4f84e0fb // sdot v27.4s, v7.16b, v4.4b[0]\n"
"bge 156b\n"
+ "cbz x26, 160f\n"
"157:" // Height 5: Multiply loop: Skip odd blocks
- "cbz x27, 160f\n"
- "tbz x27, #1, 158f\n"
- "ldr h0, [x26], #0x2\n"
- "ldr h1, [x25], #0x2\n"
- "ldr h2, [x24], #0x2\n"
- "ldr h3, [x23], #0x2\n"
- "ldr h4, [x22], #0x2\n"
- "tbz x27, #0, 159f\n"
- "ld1 { v0.b }[2], [x26]\n"
- "ld1 { v1.b }[2], [x25]\n"
- "ld1 { v2.b }[2], [x24]\n"
- "ld1 { v3.b }[2], [x23]\n"
- "ld1 { v4.b }[2], [x22]\n"
+ "tbz x26, #1, 158f\n"
+ "ldr h0, [x25], #0x2\n"
+ "ldr h1, [x24], #0x2\n"
+ "ldr h2, [x23], #0x2\n"
+ "ldr h3, [x22], #0x2\n"
+ "ldr h4, [x21], #0x2\n"
+ "tbz x26, #0, 159f\n"
+ "ld1 { v0.b }[2], [x25]\n"
+ "ld1 { v1.b }[2], [x24]\n"
+ "ld1 { v2.b }[2], [x23]\n"
+ "ld1 { v3.b }[2], [x22]\n"
+ "ld1 { v4.b }[2], [x21]\n"
"b 159f\n"
"158:" // Height 5: Multiply loop: Ragged operand read: partial_1_0
- "ldr b0, [x26, #0x0]\n"
- "ldr b1, [x25, #0x0]\n"
- "ldr b2, [x24, #0x0]\n"
- "ldr b3, [x23, #0x0]\n"
- "ldr b4, [x22, #0x0]\n"
+ "ldr b0, [x25, #0x0]\n"
+ "ldr b1, [x24, #0x0]\n"
+ "ldr b2, [x23, #0x0]\n"
+ "ldr b3, [x22, #0x0]\n"
+ "ldr b4, [x21, #0x0]\n"
"159:" // Height 5: Multiply loop: Ragged operand read: Done
- "ldr q6, [x10, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
+ "ldr q6, [x9, #0x0]\n"
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
+ "ldr q7, [x9, #0x10]\n"
".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n"
".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n"
".inst 0x4f83e0d4 // sdot v20.4s, v6.16b, v3.4b[0]\n"
".inst 0x4f84e0d8 // sdot v24.4s, v6.16b, v4.4b[0]\n"
- "ldr q6, [x10, #0x20]\n"
+ "ldr q6, [x9, #0x20]\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n"
".inst 0x4f82e0f1 // sdot v17.4s, v7.16b, v2.4b[0]\n"
".inst 0x4f83e0f5 // sdot v21.4s, v7.16b, v3.4b[0]\n"
".inst 0x4f84e0f9 // sdot v25.4s, v7.16b, v4.4b[0]\n"
- "ldr q7, [x10, #0x30]\n"
- "add x10, x10, #0x40\n"
+ "ldr q7, [x9, #0x30]\n"
+ "add x9, x9, #0x40\n"
".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n"
".inst 0x4f81e0ce // sdot v14.4s, v6.16b, v1.4b[0]\n"
".inst 0x4f82e0d2 // sdot v18.4s, v6.16b, v2.4b[0]\n"
@@ -2312,335 +2312,335 @@ void a64_hybrid_s8s32_dot_6x16 (
".inst 0x4f83e0f7 // sdot v23.4s, v7.16b, v3.4b[0]\n"
".inst 0x4f84e0fb // sdot v27.4s, v7.16b, v4.4b[0]\n"
"160:" // Height 5: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 150b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "prfm pstl1keep, [x9, #0x0]\n"
- "add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
- "prfm pstl1keep, [x24, #0x0]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "prfm pstl1keep, [x28, #0x0]\n"
+ "cmp x10, #0x10\n"
+ "add x23, x28, x19, LSL #2\n"
"prfm pstl1keep, [x23, #0x0]\n"
- "cmp x11, #0x10\n"
+ "add x22, x23, x19, LSL #2\n"
"prfm pstl1keep, [x22, #0x0]\n"
+ "add x21, x22, x19, LSL #2\n"
"prfm pstl1keep, [x21, #0x0]\n"
+ "add x20, x21, x19, LSL #2\n"
+ "prfm pstl1keep, [x20, #0x0]\n"
"bge 169f\n"
- "tbz x11, #3, 164f\n"
- "st1 { v8.4s }, [x9], #0x10\n"
- "st1 { v9.4s }, [x9], #0x10\n"
- "st1 { v12.4s }, [x24], #0x10\n"
- "st1 { v13.4s }, [x24], #0x10\n"
- "st1 { v16.4s }, [x23], #0x10\n"
- "st1 { v17.4s }, [x23], #0x10\n"
- "st1 { v20.4s }, [x22], #0x10\n"
- "st1 { v21.4s }, [x22], #0x10\n"
- "st1 { v24.4s }, [x21], #0x10\n"
- "st1 { v25.4s }, [x21], #0x10\n"
- "tbz x11, #2, 162f\n"
- "st1 { v10.4s }, [x9], #0x10\n"
- "st1 { v14.4s }, [x24], #0x10\n"
- "st1 { v18.4s }, [x23], #0x10\n"
- "st1 { v22.4s }, [x22], #0x10\n"
- "st1 { v26.4s }, [x21], #0x10\n"
- "tbz x11, #1, 161f\n"
- "str d11, [x9], #0x8\n"
- "str d15, [x24], #0x8\n"
- "str d19, [x23], #0x8\n"
- "str d23, [x22], #0x8\n"
- "str d27, [x21], #0x8\n"
- "tbz x11, #0, 168f\n"
- "st1 { v11.s }[2], [x9]\n"
- "st1 { v15.s }[2], [x24]\n"
- "st1 { v19.s }[2], [x23]\n"
- "st1 { v23.s }[2], [x22]\n"
- "st1 { v27.s }[2], [x21]\n"
+ "tbz x10, #3, 164f\n"
+ "st1 { v8.4s }, [x28], #0x10\n"
+ "st1 { v9.4s }, [x28], #0x10\n"
+ "st1 { v12.4s }, [x23], #0x10\n"
+ "st1 { v13.4s }, [x23], #0x10\n"
+ "st1 { v16.4s }, [x22], #0x10\n"
+ "st1 { v17.4s }, [x22], #0x10\n"
+ "st1 { v20.4s }, [x21], #0x10\n"
+ "st1 { v21.4s }, [x21], #0x10\n"
+ "st1 { v24.4s }, [x20], #0x10\n"
+ "st1 { v25.4s }, [x20], #0x10\n"
+ "tbz x10, #2, 162f\n"
+ "st1 { v10.4s }, [x28], #0x10\n"
+ "st1 { v14.4s }, [x23], #0x10\n"
+ "st1 { v18.4s }, [x22], #0x10\n"
+ "st1 { v22.4s }, [x21], #0x10\n"
+ "st1 { v26.4s }, [x20], #0x10\n"
+ "tbz x10, #1, 161f\n"
+ "str d11, [x28], #0x8\n"
+ "str d15, [x23], #0x8\n"
+ "str d19, [x22], #0x8\n"
+ "str d23, [x21], #0x8\n"
+ "str d27, [x20], #0x8\n"
+ "tbz x10, #0, 168f\n"
+ "st1 { v11.s }[2], [x28]\n"
+ "st1 { v15.s }[2], [x23]\n"
+ "st1 { v19.s }[2], [x22]\n"
+ "st1 { v23.s }[2], [x21]\n"
+ "st1 { v27.s }[2], [x20]\n"
"b 168f\n"
"161:" // Height 5: Partial direct writeback: partial_1_12
- "tbz x11, #0, 168f\n"
- "str s11, [x9, #0x0]\n"
- "str s15, [x24, #0x0]\n"
- "str s19, [x23, #0x0]\n"
- "str s23, [x22, #0x0]\n"
- "str s27, [x21, #0x0]\n"
+ "tbz x10, #0, 168f\n"
+ "str s11, [x28, #0x0]\n"
+ "str s15, [x23, #0x0]\n"
+ "str s19, [x22, #0x0]\n"
+ "str s23, [x21, #0x0]\n"
+ "str s27, [x20, #0x0]\n"
"b 168f\n"
"162:" // Height 5: Partial direct writeback: partial_2_8
- "tbz x11, #1, 163f\n"
- "str d10, [x9], #0x8\n"
- "str d14, [x24], #0x8\n"
- "str d18, [x23], #0x8\n"
- "str d22, [x22], #0x8\n"
- "str d26, [x21], #0x8\n"
- "tbz x11, #0, 168f\n"
- "st1 { v10.s }[2], [x9]\n"
- "st1 { v14.s }[2], [x24]\n"
- "st1 { v18.s }[2], [x23]\n"
- "st1 { v22.s }[2], [x22]\n"
- "st1 { v26.s }[2], [x21]\n"
+ "tbz x10, #1, 163f\n"
+ "str d10, [x28], #0x8\n"
+ "str d14, [x23], #0x8\n"
+ "str d18, [x22], #0x8\n"
+ "str d22, [x21], #0x8\n"
+ "str d26, [x20], #0x8\n"
+ "tbz x10, #0, 168f\n"
+ "st1 { v10.s }[2], [x28]\n"
+ "st1 { v14.s }[2], [x23]\n"
+ "st1 { v18.s }[2], [x22]\n"
+ "st1 { v22.s }[2], [x21]\n"
+ "st1 { v26.s }[2], [x20]\n"
"b 168f\n"
"163:" // Height 5: Partial direct writeback: partial_1_8
- "tbz x11, #0, 168f\n"
- "str s10, [x9, #0x0]\n"
- "str s14, [x24, #0x0]\n"
- "str s18, [x23, #0x0]\n"
- "str s22, [x22, #0x0]\n"
- "str s26, [x21, #0x0]\n"
+ "tbz x10, #0, 168f\n"
+ "str s10, [x28, #0x0]\n"
+ "str s14, [x23, #0x0]\n"
+ "str s18, [x22, #0x0]\n"
+ "str s22, [x21, #0x0]\n"
+ "str s26, [x20, #0x0]\n"
"b 168f\n"
"164:" // Height 5: Partial direct writeback: partial_4_0
- "tbz x11, #2, 166f\n"
- "st1 { v8.4s }, [x9], #0x10\n"
- "st1 { v12.4s }, [x24], #0x10\n"
- "st1 { v16.4s }, [x23], #0x10\n"
- "st1 { v20.4s }, [x22], #0x10\n"
- "st1 { v24.4s }, [x21], #0x10\n"
- "tbz x11, #1, 165f\n"
- "str d9, [x9], #0x8\n"
- "str d13, [x24], #0x8\n"
- "str d17, [x23], #0x8\n"
- "str d21, [x22], #0x8\n"
- "str d25, [x21], #0x8\n"
- "tbz x11, #0, 168f\n"
- "st1 { v9.s }[2], [x9]\n"
- "st1 { v13.s }[2], [x24]\n"
- "st1 { v17.s }[2], [x23]\n"
- "st1 { v21.s }[2], [x22]\n"
- "st1 { v25.s }[2], [x21]\n"
+ "tbz x10, #2, 166f\n"
+ "st1 { v8.4s }, [x28], #0x10\n"
+ "st1 { v12.4s }, [x23], #0x10\n"
+ "st1 { v16.4s }, [x22], #0x10\n"
+ "st1 { v20.4s }, [x21], #0x10\n"
+ "st1 { v24.4s }, [x20], #0x10\n"
+ "tbz x10, #1, 165f\n"
+ "str d9, [x28], #0x8\n"
+ "str d13, [x23], #0x8\n"
+ "str d17, [x22], #0x8\n"
+ "str d21, [x21], #0x8\n"
+ "str d25, [x20], #0x8\n"
+ "tbz x10, #0, 168f\n"
+ "st1 { v9.s }[2], [x28]\n"
+ "st1 { v13.s }[2], [x23]\n"
+ "st1 { v17.s }[2], [x22]\n"
+ "st1 { v21.s }[2], [x21]\n"
+ "st1 { v25.s }[2], [x20]\n"
"b 168f\n"
"165:" // Height 5: Partial direct writeback: partial_1_4
- "tbz x11, #0, 168f\n"
- "str s9, [x9, #0x0]\n"
- "str s13, [x24, #0x0]\n"
- "str s17, [x23, #0x0]\n"
- "str s21, [x22, #0x0]\n"
- "str s25, [x21, #0x0]\n"
+ "tbz x10, #0, 168f\n"
+ "str s9, [x28, #0x0]\n"
+ "str s13, [x23, #0x0]\n"
+ "str s17, [x22, #0x0]\n"
+ "str s21, [x21, #0x0]\n"
+ "str s25, [x20, #0x0]\n"
"b 168f\n"
"166:" // Height 5: Partial direct writeback: partial_2_0
- "tbz x11, #1, 167f\n"
- "str d8, [x9], #0x8\n"
- "str d12, [x24], #0x8\n"
- "str d16, [x23], #0x8\n"
- "str d20, [x22], #0x8\n"
- "str d24, [x21], #0x8\n"
- "tbz x11, #0, 168f\n"
- "st1 { v8.s }[2], [x9]\n"
- "st1 { v12.s }[2], [x24]\n"
- "st1 { v16.s }[2], [x23]\n"
- "st1 { v20.s }[2], [x22]\n"
- "st1 { v24.s }[2], [x21]\n"
+ "tbz x10, #1, 167f\n"
+ "str d8, [x28], #0x8\n"
+ "str d12, [x23], #0x8\n"
+ "str d16, [x22], #0x8\n"
+ "str d20, [x21], #0x8\n"
+ "str d24, [x20], #0x8\n"
+ "tbz x10, #0, 168f\n"
+ "st1 { v8.s }[2], [x28]\n"
+ "st1 { v12.s }[2], [x23]\n"
+ "st1 { v16.s }[2], [x22]\n"
+ "st1 { v20.s }[2], [x21]\n"
+ "st1 { v24.s }[2], [x20]\n"
"b 168f\n"
"167:" // Height 5: Partial direct writeback: partial_1_0
- "str s8, [x9, #0x0]\n"
- "str s12, [x24, #0x0]\n"
- "str s16, [x23, #0x0]\n"
- "str s20, [x22, #0x0]\n"
- "str s24, [x21, #0x0]\n"
+ "str s8, [x28, #0x0]\n"
+ "str s12, [x23, #0x0]\n"
+ "str s16, [x22, #0x0]\n"
+ "str s20, [x21, #0x0]\n"
+ "str s24, [x20, #0x0]\n"
"168:" // Height 5: Partial direct writeback: Done
"b 170f\n"
"169:" // Height 5: Full writeback
- "str q8, [x9, #0x0]\n"
- "str q9, [x9, #0x10]\n"
- "str q10, [x9, #0x20]\n"
- "str q11, [x9, #0x30]\n"
- "add x9, x9, #0x40\n"
- "str q12, [x24, #0x0]\n"
- "str q13, [x24, #0x10]\n"
- "str q14, [x24, #0x20]\n"
- "str q15, [x24, #0x30]\n"
- "str q16, [x23, #0x0]\n"
- "str q17, [x23, #0x10]\n"
- "str q18, [x23, #0x20]\n"
- "str q19, [x23, #0x30]\n"
- "str q20, [x22, #0x0]\n"
- "str q21, [x22, #0x10]\n"
- "str q22, [x22, #0x20]\n"
- "str q23, [x22, #0x30]\n"
- "str q24, [x21, #0x0]\n"
- "str q25, [x21, #0x10]\n"
- "str q26, [x21, #0x20]\n"
- "str q27, [x21, #0x30]\n"
+ "str q8, [x28, #0x0]\n"
+ "str q9, [x28, #0x10]\n"
+ "str q10, [x28, #0x20]\n"
+ "str q11, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
+ "str q12, [x23, #0x0]\n"
+ "str q13, [x23, #0x10]\n"
+ "str q14, [x23, #0x20]\n"
+ "str q15, [x23, #0x30]\n"
+ "str q16, [x22, #0x0]\n"
+ "str q17, [x22, #0x10]\n"
+ "str q18, [x22, #0x20]\n"
+ "str q19, [x22, #0x30]\n"
+ "str q20, [x21, #0x0]\n"
+ "str q21, [x21, #0x10]\n"
+ "str q22, [x21, #0x20]\n"
+ "str q23, [x21, #0x30]\n"
+ "str q24, [x20, #0x0]\n"
+ "str q25, [x20, #0x10]\n"
+ "str q26, [x20, #0x20]\n"
+ "str q27, [x20, #0x30]\n"
"170:" // Height 5: Writeback done
- "subs x11, x11, #0x10\n"
+ "subs x10, x10, #0x10\n"
"bgt 138b\n"
"b 206f\n"
"171:" // Height 6
- "ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x28, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"mov x20, #0x18\n"
- "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
- "mov x9, %x[output_ptr]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "madd %x[output_ptr], x19, x20, %x[output_ptr]\n"
"172:" // Height 6: Column loop
"tbz %x[flags], #0, 182f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
- "cmp x11, #0x10\n"
- "add x20, x21, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "cmp x10, #0x10\n"
+ "add x23, x28, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
+ "add x20, x21, x19, LSL #2\n"
+ "add x19, x20, x19, LSL #2\n"
"bge 181f\n"
- "tbz x11, #3, 176f\n"
- "ld1 { v8.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x24], #0x10\n"
- "ld1 { v16.4s }, [x23], #0x10\n"
- "ld1 { v20.4s }, [x22], #0x10\n"
- "ld1 { v24.4s }, [x21], #0x10\n"
- "ld1 { v28.4s }, [x20], #0x10\n"
- "ld1 { v9.4s }, [x9], #0x10\n"
- "ld1 { v13.4s }, [x24], #0x10\n"
- "ld1 { v17.4s }, [x23], #0x10\n"
- "ld1 { v21.4s }, [x22], #0x10\n"
- "ld1 { v25.4s }, [x21], #0x10\n"
- "ld1 { v29.4s }, [x20], #0x10\n"
- "tbz x11, #2, 174f\n"
- "ld1 { v10.4s }, [x9], #0x10\n"
- "ld1 { v14.4s }, [x24], #0x10\n"
- "ld1 { v18.4s }, [x23], #0x10\n"
- "ld1 { v22.4s }, [x22], #0x10\n"
- "ld1 { v26.4s }, [x21], #0x10\n"
- "ld1 { v30.4s }, [x20], #0x10\n"
- "tbz x11, #1, 173f\n"
- "ldr d11, [x9], #0x8\n"
- "ldr d15, [x24], #0x8\n"
- "mov x25, #0x38\n"
- "ldr d19, [x23], #0x8\n"
- "ldr d23, [x22], #0x8\n"
- "ldr d27, [x21], #0x8\n"
- "ldr d31, [x20], #0x8\n"
- "tbz x11, #0, 180f\n"
- "ld1 { v11.s }[2], [x9]\n"
- "ld1 { v15.s }[2], [x24]\n"
- "ld1 { v19.s }[2], [x23]\n"
- "ld1 { v23.s }[2], [x22]\n"
- "ld1 { v27.s }[2], [x21]\n"
- "ld1 { v31.s }[2], [x20]\n"
+ "tbz x10, #3, 176f\n"
+ "ld1 { v8.4s }, [x28], #0x10\n"
+ "ld1 { v12.4s }, [x23], #0x10\n"
+ "ld1 { v16.4s }, [x22], #0x10\n"
+ "ld1 { v20.4s }, [x21], #0x10\n"
+ "ld1 { v24.4s }, [x20], #0x10\n"
+ "ld1 { v9.4s }, [x28], #0x10\n"
+ "ld1 { v13.4s }, [x23], #0x10\n"
+ "ld1 { v17.4s }, [x22], #0x10\n"
+ "ld1 { v21.4s }, [x21], #0x10\n"
+ "ld1 { v25.4s }, [x20], #0x10\n"
+ "ld1 { v28.4s }, [x19], #0x10\n"
+ "ld1 { v29.4s }, [x19], #0x10\n"
+ "tbz x10, #2, 174f\n"
+ "ld1 { v10.4s }, [x28], #0x10\n"
+ "ld1 { v14.4s }, [x23], #0x10\n"
+ "ld1 { v18.4s }, [x22], #0x10\n"
+ "ld1 { v22.4s }, [x21], #0x10\n"
+ "ld1 { v26.4s }, [x20], #0x10\n"
+ "ld1 { v30.4s }, [x19], #0x10\n"
+ "tbz x10, #1, 173f\n"
+ "ldr d11, [x28], #0x8\n"
+ "mov x24, #0x38\n"
+ "ldr d15, [x23], #0x8\n"
+ "ldr d19, [x22], #0x8\n"
+ "ldr d23, [x21], #0x8\n"
+ "ldr d27, [x20], #0x8\n"
+ "ldr d31, [x19], #0x8\n"
+ "tbz x10, #0, 180f\n"
+ "ld1 { v11.s }[2], [x28]\n"
+ "ld1 { v15.s }[2], [x23]\n"
+ "ld1 { v19.s }[2], [x22]\n"
+ "ld1 { v23.s }[2], [x21]\n"
+ "ld1 { v27.s }[2], [x20]\n"
+ "ld1 { v31.s }[2], [x19]\n"
"b 180f\n"
"173:" // Height 6: Partial accumulate: partial_1_12
- "mov x25, #0x30\n"
- "tbz x11, #0, 180f\n"
- "ldr s11, [x9, #0x0]\n"
- "ldr s15, [x24, #0x0]\n"
- "ldr s19, [x23, #0x0]\n"
- "ldr s23, [x22, #0x0]\n"
- "ldr s27, [x21, #0x0]\n"
- "ldr s31, [x20, #0x0]\n"
+ "mov x24, #0x30\n"
+ "tbz x10, #0, 180f\n"
+ "ldr s11, [x28, #0x0]\n"
+ "ldr s15, [x23, #0x0]\n"
+ "ldr s19, [x22, #0x0]\n"
+ "ldr s23, [x21, #0x0]\n"
+ "ldr s27, [x20, #0x0]\n"
+ "ldr s31, [x19, #0x0]\n"
"b 180f\n"
"174:" // Height 6: Partial accumulate: partial_2_8
- "tbz x11, #1, 175f\n"
- "ldr d10, [x9], #0x8\n"
- "ldr d14, [x24], #0x8\n"
- "mov x25, #0x28\n"
- "ldr d18, [x23], #0x8\n"
- "ldr d22, [x22], #0x8\n"
- "ldr d26, [x21], #0x8\n"
- "ldr d30, [x20], #0x8\n"
- "tbz x11, #0, 180f\n"
- "ld1 { v10.s }[2], [x9]\n"
- "ld1 { v14.s }[2], [x24]\n"
- "ld1 { v18.s }[2], [x23]\n"
- "ld1 { v22.s }[2], [x22]\n"
- "ld1 { v26.s }[2], [x21]\n"
- "ld1 { v30.s }[2], [x20]\n"
+ "tbz x10, #1, 175f\n"
+ "ldr d10, [x28], #0x8\n"
+ "ldr d14, [x23], #0x8\n"
+ "mov x24, #0x28\n"
+ "ldr d18, [x22], #0x8\n"
+ "ldr d22, [x21], #0x8\n"
+ "ldr d26, [x20], #0x8\n"
+ "ldr d30, [x19], #0x8\n"
+ "tbz x10, #0, 180f\n"
+ "ld1 { v10.s }[2], [x28]\n"
+ "ld1 { v14.s }[2], [x23]\n"
+ "ld1 { v18.s }[2], [x22]\n"
+ "ld1 { v22.s }[2], [x21]\n"
+ "ld1 { v26.s }[2], [x20]\n"
+ "ld1 { v30.s }[2], [x19]\n"
"b 180f\n"
"175:" // Height 6: Partial accumulate: partial_1_8
- "mov x25, #0x20\n"
- "tbz x11, #0, 180f\n"
- "ldr s10, [x9, #0x0]\n"
- "ldr s14, [x24, #0x0]\n"
- "ldr s18, [x23, #0x0]\n"
- "ldr s22, [x22, #0x0]\n"
- "ldr s26, [x21, #0x0]\n"
- "ldr s30, [x20, #0x0]\n"
+ "mov x24, #0x20\n"
+ "tbz x10, #0, 180f\n"
+ "ldr s10, [x28, #0x0]\n"
+ "ldr s14, [x23, #0x0]\n"
+ "ldr s18, [x22, #0x0]\n"
+ "ldr s22, [x21, #0x0]\n"
+ "ldr s26, [x20, #0x0]\n"
+ "ldr s30, [x19, #0x0]\n"
"b 180f\n"
"176:" // Height 6: Partial accumulate: partial_4_0
- "tbz x11, #2, 178f\n"
- "ld1 { v8.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x24], #0x10\n"
- "ld1 { v16.4s }, [x23], #0x10\n"
- "ld1 { v20.4s }, [x22], #0x10\n"
- "ld1 { v24.4s }, [x21], #0x10\n"
- "ld1 { v28.4s }, [x20], #0x10\n"
- "tbz x11, #1, 177f\n"
- "ldr d9, [x9], #0x8\n"
- "ldr d13, [x24], #0x8\n"
- "mov x25, #0x18\n"
- "ldr d17, [x23], #0x8\n"
- "ldr d21, [x22], #0x8\n"
- "ldr d25, [x21], #0x8\n"
- "ldr d29, [x20], #0x8\n"
- "tbz x11, #0, 180f\n"
- "ld1 { v9.s }[2], [x9]\n"
- "ld1 { v13.s }[2], [x24]\n"
- "ld1 { v17.s }[2], [x23]\n"
- "ld1 { v21.s }[2], [x22]\n"
- "ld1 { v25.s }[2], [x21]\n"
- "ld1 { v29.s }[2], [x20]\n"
+ "tbz x10, #2, 178f\n"
+ "ld1 { v8.4s }, [x28], #0x10\n"
+ "ld1 { v12.4s }, [x23], #0x10\n"
+ "ld1 { v16.4s }, [x22], #0x10\n"
+ "ld1 { v20.4s }, [x21], #0x10\n"
+ "ld1 { v24.4s }, [x20], #0x10\n"
+ "ld1 { v28.4s }, [x19], #0x10\n"
+ "tbz x10, #1, 177f\n"
+ "ldr d9, [x28], #0x8\n"
+ "mov x24, #0x18\n"
+ "ldr d13, [x23], #0x8\n"
+ "ldr d17, [x22], #0x8\n"
+ "ldr d21, [x21], #0x8\n"
+ "ldr d25, [x20], #0x8\n"
+ "ldr d29, [x19], #0x8\n"
+ "tbz x10, #0, 180f\n"
+ "ld1 { v9.s }[2], [x28]\n"
+ "ld1 { v13.s }[2], [x23]\n"
+ "ld1 { v17.s }[2], [x22]\n"
+ "ld1 { v21.s }[2], [x21]\n"
+ "ld1 { v25.s }[2], [x20]\n"
+ "ld1 { v29.s }[2], [x19]\n"
"b 180f\n"
"177:" // Height 6: Partial accumulate: partial_1_4
- "mov x25, #0x10\n"
- "tbz x11, #0, 180f\n"
- "ldr s9, [x9, #0x0]\n"
- "ldr s13, [x24, #0x0]\n"
- "ldr s17, [x23, #0x0]\n"
- "ldr s21, [x22, #0x0]\n"
- "ldr s25, [x21, #0x0]\n"
- "ldr s29, [x20, #0x0]\n"
+ "mov x24, #0x10\n"
+ "tbz x10, #0, 180f\n"
+ "ldr s9, [x28, #0x0]\n"
+ "ldr s13, [x23, #0x0]\n"
+ "ldr s17, [x22, #0x0]\n"
+ "ldr s21, [x21, #0x0]\n"
+ "ldr s25, [x20, #0x0]\n"
+ "ldr s29, [x19, #0x0]\n"
"b 180f\n"
"178:" // Height 6: Partial accumulate: partial_2_0
- "tbz x11, #1, 179f\n"
- "ldr d8, [x9], #0x8\n"
- "ldr d12, [x24], #0x8\n"
- "mov x25, #0x8\n"
- "ldr d16, [x23], #0x8\n"
- "ldr d20, [x22], #0x8\n"
- "ldr d24, [x21], #0x8\n"
- "ldr d28, [x20], #0x8\n"
- "tbz x11, #0, 180f\n"
- "ld1 { v8.s }[2], [x9]\n"
- "ld1 { v12.s }[2], [x24]\n"
- "ld1 { v16.s }[2], [x23]\n"
- "ld1 { v20.s }[2], [x22]\n"
- "ld1 { v24.s }[2], [x21]\n"
- "ld1 { v28.s }[2], [x20]\n"
+ "tbz x10, #1, 179f\n"
+ "ldr d8, [x28], #0x8\n"
+ "ldr d12, [x23], #0x8\n"
+ "mov x24, #0x8\n"
+ "ldr d16, [x22], #0x8\n"
+ "ldr d20, [x21], #0x8\n"
+ "ldr d24, [x20], #0x8\n"
+ "ldr d28, [x19], #0x8\n"
+ "tbz x10, #0, 180f\n"
+ "ld1 { v8.s }[2], [x28]\n"
+ "ld1 { v12.s }[2], [x23]\n"
+ "ld1 { v16.s }[2], [x22]\n"
+ "ld1 { v20.s }[2], [x21]\n"
+ "ld1 { v24.s }[2], [x20]\n"
+ "ld1 { v28.s }[2], [x19]\n"
"b 180f\n"
"179:" // Height 6: Partial accumulate: partial_1_0
- "ldr s8, [x9, #0x0]\n"
- "ldr s12, [x24, #0x0]\n"
- "mov x25, #0x0\n"
- "ldr s16, [x23, #0x0]\n"
- "ldr s20, [x22, #0x0]\n"
- "ldr s24, [x21, #0x0]\n"
- "ldr s28, [x20, #0x0]\n"
+ "ldr s8, [x28, #0x0]\n"
+ "mov x24, #0x0\n"
+ "ldr s12, [x23, #0x0]\n"
+ "ldr s16, [x22, #0x0]\n"
+ "ldr s20, [x21, #0x0]\n"
+ "ldr s24, [x20, #0x0]\n"
+ "ldr s28, [x19, #0x0]\n"
"180:" // Height 6: Partial accumulate: Done
- "sub x9, x9, x25\n"
+ "sub x28, x28, x24\n"
"b 183f\n"
"181:" // Height 6: full accumulate
- "ldr q8, [x9, #0x0]\n"
- "ldr q9, [x9, #0x10]\n"
- "ldr q10, [x9, #0x20]\n"
- "ldr q11, [x9, #0x30]\n"
- "ldr q12, [x24, #0x0]\n"
- "ldr q13, [x24, #0x10]\n"
- "ldr q14, [x24, #0x20]\n"
- "ldr q15, [x24, #0x30]\n"
- "ldr q16, [x23, #0x0]\n"
- "ldr q17, [x23, #0x10]\n"
- "ldr q18, [x23, #0x20]\n"
- "ldr q19, [x23, #0x30]\n"
- "ldr q20, [x22, #0x0]\n"
- "ldr q21, [x22, #0x10]\n"
- "ldr q22, [x22, #0x20]\n"
- "ldr q23, [x22, #0x30]\n"
- "ldr q24, [x21, #0x0]\n"
- "ldr q25, [x21, #0x10]\n"
- "ldr q26, [x21, #0x20]\n"
- "ldr q27, [x21, #0x30]\n"
- "ldr q28, [x20, #0x0]\n"
- "ldr q29, [x20, #0x10]\n"
- "ldr q30, [x20, #0x20]\n"
- "ldr q31, [x20, #0x30]\n"
+ "ldr q8, [x28, #0x0]\n"
+ "ldr q9, [x28, #0x10]\n"
+ "ldr q10, [x28, #0x20]\n"
+ "ldr q11, [x28, #0x30]\n"
+ "ldr q12, [x23, #0x0]\n"
+ "ldr q13, [x23, #0x10]\n"
+ "ldr q14, [x23, #0x20]\n"
+ "ldr q15, [x23, #0x30]\n"
+ "ldr q16, [x22, #0x0]\n"
+ "ldr q17, [x22, #0x10]\n"
+ "ldr q18, [x22, #0x20]\n"
+ "ldr q19, [x22, #0x30]\n"
+ "ldr q20, [x21, #0x0]\n"
+ "ldr q21, [x21, #0x10]\n"
+ "ldr q22, [x21, #0x20]\n"
+ "ldr q23, [x21, #0x30]\n"
+ "ldr q24, [x20, #0x0]\n"
+ "ldr q25, [x20, #0x10]\n"
+ "ldr q26, [x20, #0x20]\n"
+ "ldr q27, [x20, #0x30]\n"
+ "ldr q28, [x19, #0x0]\n"
+ "ldr q29, [x19, #0x10]\n"
+ "ldr q30, [x19, #0x20]\n"
+ "ldr q31, [x19, #0x30]\n"
"b 183f\n"
"182:" // Height 6: no accumulate
"movi v8.4s, #0x0\n"
@@ -2668,297 +2668,297 @@ void a64_hybrid_s8s32_dot_6x16 (
"movi v30.4s, #0x0\n"
"movi v31.4s, #0x0\n"
"183:" // Height 6: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"184:" // Height 6: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 185f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "ldr x22, [x21, #0x20]\n"
- "ldr x21, [x21, #0x28]\n"
- "cbnz x28, 186f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20\n"
- "add x25, x25, x20\n"
- "add x24, x24, x20\n"
- "add x23, x23, x20\n"
- "add x22, x22, x20\n"
- "add x21, x21, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "ldr x21, [x20, #0x20]\n"
+ "ldr x20, [x20, #0x28]\n"
+ "cbnz x27, 186f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
+ "add x24, x24, x19\n"
+ "add x23, x23, x19\n"
+ "add x22, x22, x19\n"
+ "add x21, x21, x19\n"
+ "add x20, x20, x19\n"
"b 186f\n"
"185:" // Height 6: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20\n"
- "add x24, x25, x20\n"
- "add x23, x24, x20\n"
- "add x22, x23, x20\n"
- "add x21, x22, x20\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19\n"
+ "add x23, x24, x19\n"
+ "add x22, x23, x19\n"
+ "add x21, x22, x19\n"
+ "add x20, x21, x19\n"
"186:" // Height 6: input setup done
- "cmp x27, #0x10\n"
+ "cmp x26, #0x10\n"
"blt 189f\n"
- "ldr q0, [x26, #0x0]\n"
- "ldr q1, [x25, #0x0]\n"
- "cmp x27, #0x20\n"
- "ldr q2, [x24, #0x0]\n"
- "ldr q3, [x23, #0x0]\n"
- "ldr q4, [x22, #0x0]\n"
- "ldr q5, [x21, #0x0]\n"
- "ldr q6, [x10, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
+ "ldr q0, [x25, #0x0]\n"
+ "ldr q1, [x24, #0x0]\n"
+ "cmp x26, #0x20\n"
+ "ldr q2, [x23, #0x0]\n"
+ "ldr q3, [x22, #0x0]\n"
+ "ldr q4, [x21, #0x0]\n"
+ "ldr q5, [x20, #0x0]\n"
+ "ldr q6, [x9, #0x0]\n"
"blt 188f\n"
"187:" // Height 6: Multiply loop: Main loop head
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
+ "ldr q7, [x9, #0x10]\n"
+ "add x25, x25, #0x10\n"
".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n"
- "sub x27, x27, #0x10\n"
- "add x26, x26, #0x10\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "add x24, x24, #0x10\n"
".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "add x23, x23, #0x10\n"
".inst 0x4f83e0d4 // sdot v20.4s, v6.16b, v3.4b[0]\n"
- "add x25, x25, #0x10\n"
- "add x24, x24, #0x10\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
+ "add x22, x22, #0x10\n"
".inst 0x4f84e0d8 // sdot v24.4s, v6.16b, v4.4b[0]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
+ "add x21, x21, #0x10\n"
".inst 0x4f85e0dc // sdot v28.4s, v6.16b, v5.4b[0]\n"
- "ldr q6, [x10, #0x20]\n"
- "add x23, x23, #0x10\n"
+ "ldr q6, [x9, #0x20]\n"
+ "add x20, x20, #0x10\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
+ "prfm pldl1keep, [x21, #0x80]\n"
+ "sub x26, x26, #0x10\n"
".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n"
- "add x22, x22, #0x10\n"
- "add x21, x21, #0x10\n"
+ "prfm pldl1keep, [x20, #0x80]\n"
+ "cmp x26, #0x20\n"
".inst 0x4f82e0f1 // sdot v17.4s, v7.16b, v2.4b[0]\n"
".inst 0x4f83e0f5 // sdot v21.4s, v7.16b, v3.4b[0]\n"
- "cmp x27, #0x20\n"
- "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x4f84e0f9 // sdot v25.4s, v7.16b, v4.4b[0]\n"
".inst 0x4f85e0fd // sdot v29.4s, v7.16b, v5.4b[0]\n"
- "ldr q7, [x10, #0x30]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
+ "ldr q7, [x9, #0x30]\n"
".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n"
".inst 0x4f81e0ce // sdot v14.4s, v6.16b, v1.4b[0]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x4f82e0d2 // sdot v18.4s, v6.16b, v2.4b[0]\n"
".inst 0x4f83e0d6 // sdot v22.4s, v6.16b, v3.4b[0]\n"
- "prfm pldl1keep, [x22, #0x80]\n"
- "prfm pldl1keep, [x21, #0x80]\n"
".inst 0x4f84e0da // sdot v26.4s, v6.16b, v4.4b[0]\n"
".inst 0x4f85e0de // sdot v30.4s, v6.16b, v5.4b[0]\n"
- "ldr q6, [x10, #0x40]\n"
+ "ldr q6, [x9, #0x40]\n"
".inst 0x4f80e0eb // sdot v11.4s, v7.16b, v0.4b[0]\n"
".inst 0x4f81e0ef // sdot v15.4s, v7.16b, v1.4b[0]\n"
".inst 0x4f82e0f3 // sdot v19.4s, v7.16b, v2.4b[0]\n"
".inst 0x4f83e0f7 // sdot v23.4s, v7.16b, v3.4b[0]\n"
".inst 0x4f84e0fb // sdot v27.4s, v7.16b, v4.4b[0]\n"
".inst 0x4f85e0ff // sdot v31.4s, v7.16b, v5.4b[0]\n"
- "ldr q7, [x10, #0x50]\n"
+ "ldr q7, [x9, #0x50]\n"
".inst 0x4fa0e0c8 // sdot v8.4s, v6.16b, v0.4b[1]\n"
".inst 0x4fa1e0cc // sdot v12.4s, v6.16b, v1.4b[1]\n"
".inst 0x4fa2e0d0 // sdot v16.4s, v6.16b, v2.4b[1]\n"
".inst 0x4fa3e0d4 // sdot v20.4s, v6.16b, v3.4b[1]\n"
".inst 0x4fa4e0d8 // sdot v24.4s, v6.16b, v4.4b[1]\n"
".inst 0x4fa5e0dc // sdot v28.4s, v6.16b, v5.4b[1]\n"
- "ldr q6, [x10, #0x60]\n"
+ "ldr q6, [x9, #0x60]\n"
".inst 0x4fa0e0e9 // sdot v9.4s, v7.16b, v0.4b[1]\n"
".inst 0x4fa1e0ed // sdot v13.4s, v7.16b, v1.4b[1]\n"
".inst 0x4fa2e0f1 // sdot v17.4s, v7.16b, v2.4b[1]\n"
".inst 0x4fa3e0f5 // sdot v21.4s, v7.16b, v3.4b[1]\n"
".inst 0x4fa4e0f9 // sdot v25.4s, v7.16b, v4.4b[1]\n"
".inst 0x4fa5e0fd // sdot v29.4s, v7.16b, v5.4b[1]\n"
- "ldr q7, [x10, #0x70]\n"
+ "ldr q7, [x9, #0x70]\n"
".inst 0x4fa0e0ca // sdot v10.4s, v6.16b, v0.4b[1]\n"
".inst 0x4fa1e0ce // sdot v14.4s, v6.16b, v1.4b[1]\n"
".inst 0x4fa2e0d2 // sdot v18.4s, v6.16b, v2.4b[1]\n"
".inst 0x4fa3e0d6 // sdot v22.4s, v6.16b, v3.4b[1]\n"
".inst 0x4fa4e0da // sdot v26.4s, v6.16b, v4.4b[1]\n"
".inst 0x4fa5e0de // sdot v30.4s, v6.16b, v5.4b[1]\n"
- "ldr q6, [x10, #0x80]\n"
+ "ldr q6, [x9, #0x80]\n"
".inst 0x4fa0e0eb // sdot v11.4s, v7.16b, v0.4b[1]\n"
".inst 0x4fa1e0ef // sdot v15.4s, v7.16b, v1.4b[1]\n"
".inst 0x4fa2e0f3 // sdot v19.4s, v7.16b, v2.4b[1]\n"
".inst 0x4fa3e0f7 // sdot v23.4s, v7.16b, v3.4b[1]\n"
".inst 0x4fa4e0fb // sdot v27.4s, v7.16b, v4.4b[1]\n"
".inst 0x4fa5e0ff // sdot v31.4s, v7.16b, v5.4b[1]\n"
- "ldr q7, [x10, #0x90]\n"
+ "ldr q7, [x9, #0x90]\n"
".inst 0x4f80e8c8 // sdot v8.4s, v6.16b, v0.4b[2]\n"
".inst 0x4f81e8cc // sdot v12.4s, v6.16b, v1.4b[2]\n"
".inst 0x4f82e8d0 // sdot v16.4s, v6.16b, v2.4b[2]\n"
".inst 0x4f83e8d4 // sdot v20.4s, v6.16b, v3.4b[2]\n"
".inst 0x4f84e8d8 // sdot v24.4s, v6.16b, v4.4b[2]\n"
".inst 0x4f85e8dc // sdot v28.4s, v6.16b, v5.4b[2]\n"
- "ldr q6, [x10, #0xa0]\n"
+ "ldr q6, [x9, #0xa0]\n"
".inst 0x4f80e8e9 // sdot v9.4s, v7.16b, v0.4b[2]\n"
".inst 0x4f81e8ed // sdot v13.4s, v7.16b, v1.4b[2]\n"
".inst 0x4f82e8f1 // sdot v17.4s, v7.16b, v2.4b[2]\n"
".inst 0x4f83e8f5 // sdot v21.4s, v7.16b, v3.4b[2]\n"
".inst 0x4f84e8f9 // sdot v25.4s, v7.16b, v4.4b[2]\n"
".inst 0x4f85e8fd // sdot v29.4s, v7.16b, v5.4b[2]\n"
- "ldr q7, [x10, #0xb0]\n"
+ "ldr q7, [x9, #0xb0]\n"
".inst 0x4f80e8ca // sdot v10.4s, v6.16b, v0.4b[2]\n"
".inst 0x4f81e8ce // sdot v14.4s, v6.16b, v1.4b[2]\n"
".inst 0x4f82e8d2 // sdot v18.4s, v6.16b, v2.4b[2]\n"
".inst 0x4f83e8d6 // sdot v22.4s, v6.16b, v3.4b[2]\n"
".inst 0x4f84e8da // sdot v26.4s, v6.16b, v4.4b[2]\n"
".inst 0x4f85e8de // sdot v30.4s, v6.16b, v5.4b[2]\n"
- "ldr q6, [x10, #0xc0]\n"
+ "ldr q6, [x9, #0xc0]\n"
".inst 0x4f80e8eb // sdot v11.4s, v7.16b, v0.4b[2]\n"
".inst 0x4f81e8ef // sdot v15.4s, v7.16b, v1.4b[2]\n"
".inst 0x4f82e8f3 // sdot v19.4s, v7.16b, v2.4b[2]\n"
".inst 0x4f83e8f7 // sdot v23.4s, v7.16b, v3.4b[2]\n"
".inst 0x4f84e8fb // sdot v27.4s, v7.16b, v4.4b[2]\n"
".inst 0x4f85e8ff // sdot v31.4s, v7.16b, v5.4b[2]\n"
- "ldr q7, [x10, #0xd0]\n"
+ "ldr q7, [x9, #0xd0]\n"
".inst 0x4fa0e8c8 // sdot v8.4s, v6.16b, v0.4b[3]\n"
".inst 0x4fa1e8cc // sdot v12.4s, v6.16b, v1.4b[3]\n"
".inst 0x4fa2e8d0 // sdot v16.4s, v6.16b, v2.4b[3]\n"
".inst 0x4fa3e8d4 // sdot v20.4s, v6.16b, v3.4b[3]\n"
".inst 0x4fa4e8d8 // sdot v24.4s, v6.16b, v4.4b[3]\n"
".inst 0x4fa5e8dc // sdot v28.4s, v6.16b, v5.4b[3]\n"
- "ldr q6, [x10, #0xe0]\n"
+ "ldr q6, [x9, #0xe0]\n"
".inst 0x4fa0e8e9 // sdot v9.4s, v7.16b, v0.4b[3]\n"
".inst 0x4fa1e8ed // sdot v13.4s, v7.16b, v1.4b[3]\n"
".inst 0x4fa2e8f1 // sdot v17.4s, v7.16b, v2.4b[3]\n"
".inst 0x4fa3e8f5 // sdot v21.4s, v7.16b, v3.4b[3]\n"
".inst 0x4fa4e8f9 // sdot v25.4s, v7.16b, v4.4b[3]\n"
".inst 0x4fa5e8fd // sdot v29.4s, v7.16b, v5.4b[3]\n"
- "ldr q7, [x10, #0xf0]\n"
- "add x10, x10, #0x100\n"
+ "ldr q7, [x9, #0xf0]\n"
+ "add x9, x9, #0x100\n"
".inst 0x4fa0e8ca // sdot v10.4s, v6.16b, v0.4b[3]\n"
".inst 0x4fa1e8ce // sdot v14.4s, v6.16b, v1.4b[3]\n"
".inst 0x4fa2e8d2 // sdot v18.4s, v6.16b, v2.4b[3]\n"
".inst 0x4fa3e8d6 // sdot v22.4s, v6.16b, v3.4b[3]\n"
".inst 0x4fa4e8da // sdot v26.4s, v6.16b, v4.4b[3]\n"
".inst 0x4fa5e8de // sdot v30.4s, v6.16b, v5.4b[3]\n"
- "ldr q6, [x10, #0x0]\n"
+ "ldr q6, [x9, #0x0]\n"
".inst 0x4fa0e8eb // sdot v11.4s, v7.16b, v0.4b[3]\n"
- "ldr q0, [x26, #0x0]\n"
+ "ldr q0, [x25, #0x0]\n"
".inst 0x4fa1e8ef // sdot v15.4s, v7.16b, v1.4b[3]\n"
- "ldr q1, [x25, #0x0]\n"
+ "ldr q1, [x24, #0x0]\n"
".inst 0x4fa2e8f3 // sdot v19.4s, v7.16b, v2.4b[3]\n"
- "ldr q2, [x24, #0x0]\n"
+ "ldr q2, [x23, #0x0]\n"
".inst 0x4fa3e8f7 // sdot v23.4s, v7.16b, v3.4b[3]\n"
- "ldr q3, [x23, #0x0]\n"
+ "ldr q3, [x22, #0x0]\n"
".inst 0x4fa4e8fb // sdot v27.4s, v7.16b, v4.4b[3]\n"
- "ldr q4, [x22, #0x0]\n"
+ "ldr q4, [x21, #0x0]\n"
".inst 0x4fa5e8ff // sdot v31.4s, v7.16b, v5.4b[3]\n"
- "ldr q5, [x21, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
+ "ldr q5, [x20, #0x0]\n"
"bge 187b\n"
"188:" // Height 6: Multiply loop: Single iteration only
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
+ "ldr q7, [x9, #0x10]\n"
+ "sub x26, x26, #0x10\n"
".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n"
- "add x26, x26, #0x10\n"
"add x25, x25, #0x10\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n"
- ".inst 0x4f83e0d4 // sdot v20.4s, v6.16b, v3.4b[0]\n"
"add x24, x24, #0x10\n"
+ ".inst 0x4f83e0d4 // sdot v20.4s, v6.16b, v3.4b[0]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
"add x23, x23, #0x10\n"
".inst 0x4f84e0d8 // sdot v24.4s, v6.16b, v4.4b[0]\n"
- ".inst 0x4f85e0dc // sdot v28.4s, v6.16b, v5.4b[0]\n"
- "ldr q6, [x10, #0x20]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
"add x22, x22, #0x10\n"
+ ".inst 0x4f85e0dc // sdot v28.4s, v6.16b, v5.4b[0]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
+ "add x21, x21, #0x10\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
+ "ldr q6, [x9, #0x20]\n"
+ "add x20, x20, #0x10\n"
".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n"
- "add x21, x21, #0x10\n"
- "sub x27, x27, #0x10\n"
+ "prfm pldl1keep, [x21, #0x80]\n"
".inst 0x4f82e0f1 // sdot v17.4s, v7.16b, v2.4b[0]\n"
+ "prfm pldl1keep, [x20, #0x80]\n"
".inst 0x4f83e0f5 // sdot v21.4s, v7.16b, v3.4b[0]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x4f84e0f9 // sdot v25.4s, v7.16b, v4.4b[0]\n"
".inst 0x4f85e0fd // sdot v29.4s, v7.16b, v5.4b[0]\n"
- "ldr q7, [x10, #0x30]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
+ "ldr q7, [x9, #0x30]\n"
".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n"
".inst 0x4f81e0ce // sdot v14.4s, v6.16b, v1.4b[0]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
- "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x4f82e0d2 // sdot v18.4s, v6.16b, v2.4b[0]\n"
".inst 0x4f83e0d6 // sdot v22.4s, v6.16b, v3.4b[0]\n"
- "prfm pldl1keep, [x21, #0x80]\n"
".inst 0x4f84e0da // sdot v26.4s, v6.16b, v4.4b[0]\n"
".inst 0x4f85e0de // sdot v30.4s, v6.16b, v5.4b[0]\n"
- "ldr q6, [x10, #0x40]\n"
+ "ldr q6, [x9, #0x40]\n"
".inst 0x4f80e0eb // sdot v11.4s, v7.16b, v0.4b[0]\n"
".inst 0x4f81e0ef // sdot v15.4s, v7.16b, v1.4b[0]\n"
".inst 0x4f82e0f3 // sdot v19.4s, v7.16b, v2.4b[0]\n"
".inst 0x4f83e0f7 // sdot v23.4s, v7.16b, v3.4b[0]\n"
".inst 0x4f84e0fb // sdot v27.4s, v7.16b, v4.4b[0]\n"
".inst 0x4f85e0ff // sdot v31.4s, v7.16b, v5.4b[0]\n"
- "ldr q7, [x10, #0x50]\n"
+ "ldr q7, [x9, #0x50]\n"
".inst 0x4fa0e0c8 // sdot v8.4s, v6.16b, v0.4b[1]\n"
".inst 0x4fa1e0cc // sdot v12.4s, v6.16b, v1.4b[1]\n"
".inst 0x4fa2e0d0 // sdot v16.4s, v6.16b, v2.4b[1]\n"
".inst 0x4fa3e0d4 // sdot v20.4s, v6.16b, v3.4b[1]\n"
".inst 0x4fa4e0d8 // sdot v24.4s, v6.16b, v4.4b[1]\n"
".inst 0x4fa5e0dc // sdot v28.4s, v6.16b, v5.4b[1]\n"
- "ldr q6, [x10, #0x60]\n"
+ "ldr q6, [x9, #0x60]\n"
".inst 0x4fa0e0e9 // sdot v9.4s, v7.16b, v0.4b[1]\n"
".inst 0x4fa1e0ed // sdot v13.4s, v7.16b, v1.4b[1]\n"
".inst 0x4fa2e0f1 // sdot v17.4s, v7.16b, v2.4b[1]\n"
".inst 0x4fa3e0f5 // sdot v21.4s, v7.16b, v3.4b[1]\n"
".inst 0x4fa4e0f9 // sdot v25.4s, v7.16b, v4.4b[1]\n"
".inst 0x4fa5e0fd // sdot v29.4s, v7.16b, v5.4b[1]\n"
- "ldr q7, [x10, #0x70]\n"
+ "ldr q7, [x9, #0x70]\n"
".inst 0x4fa0e0ca // sdot v10.4s, v6.16b, v0.4b[1]\n"
".inst 0x4fa1e0ce // sdot v14.4s, v6.16b, v1.4b[1]\n"
".inst 0x4fa2e0d2 // sdot v18.4s, v6.16b, v2.4b[1]\n"
".inst 0x4fa3e0d6 // sdot v22.4s, v6.16b, v3.4b[1]\n"
".inst 0x4fa4e0da // sdot v26.4s, v6.16b, v4.4b[1]\n"
".inst 0x4fa5e0de // sdot v30.4s, v6.16b, v5.4b[1]\n"
- "ldr q6, [x10, #0x80]\n"
+ "ldr q6, [x9, #0x80]\n"
".inst 0x4fa0e0eb // sdot v11.4s, v7.16b, v0.4b[1]\n"
".inst 0x4fa1e0ef // sdot v15.4s, v7.16b, v1.4b[1]\n"
".inst 0x4fa2e0f3 // sdot v19.4s, v7.16b, v2.4b[1]\n"
".inst 0x4fa3e0f7 // sdot v23.4s, v7.16b, v3.4b[1]\n"
".inst 0x4fa4e0fb // sdot v27.4s, v7.16b, v4.4b[1]\n"
".inst 0x4fa5e0ff // sdot v31.4s, v7.16b, v5.4b[1]\n"
- "ldr q7, [x10, #0x90]\n"
+ "ldr q7, [x9, #0x90]\n"
".inst 0x4f80e8c8 // sdot v8.4s, v6.16b, v0.4b[2]\n"
".inst 0x4f81e8cc // sdot v12.4s, v6.16b, v1.4b[2]\n"
".inst 0x4f82e8d0 // sdot v16.4s, v6.16b, v2.4b[2]\n"
".inst 0x4f83e8d4 // sdot v20.4s, v6.16b, v3.4b[2]\n"
".inst 0x4f84e8d8 // sdot v24.4s, v6.16b, v4.4b[2]\n"
".inst 0x4f85e8dc // sdot v28.4s, v6.16b, v5.4b[2]\n"
- "ldr q6, [x10, #0xa0]\n"
+ "ldr q6, [x9, #0xa0]\n"
".inst 0x4f80e8e9 // sdot v9.4s, v7.16b, v0.4b[2]\n"
".inst 0x4f81e8ed // sdot v13.4s, v7.16b, v1.4b[2]\n"
".inst 0x4f82e8f1 // sdot v17.4s, v7.16b, v2.4b[2]\n"
".inst 0x4f83e8f5 // sdot v21.4s, v7.16b, v3.4b[2]\n"
".inst 0x4f84e8f9 // sdot v25.4s, v7.16b, v4.4b[2]\n"
".inst 0x4f85e8fd // sdot v29.4s, v7.16b, v5.4b[2]\n"
- "ldr q7, [x10, #0xb0]\n"
+ "ldr q7, [x9, #0xb0]\n"
".inst 0x4f80e8ca // sdot v10.4s, v6.16b, v0.4b[2]\n"
".inst 0x4f81e8ce // sdot v14.4s, v6.16b, v1.4b[2]\n"
".inst 0x4f82e8d2 // sdot v18.4s, v6.16b, v2.4b[2]\n"
".inst 0x4f83e8d6 // sdot v22.4s, v6.16b, v3.4b[2]\n"
".inst 0x4f84e8da // sdot v26.4s, v6.16b, v4.4b[2]\n"
".inst 0x4f85e8de // sdot v30.4s, v6.16b, v5.4b[2]\n"
- "ldr q6, [x10, #0xc0]\n"
+ "ldr q6, [x9, #0xc0]\n"
".inst 0x4f80e8eb // sdot v11.4s, v7.16b, v0.4b[2]\n"
".inst 0x4f81e8ef // sdot v15.4s, v7.16b, v1.4b[2]\n"
".inst 0x4f82e8f3 // sdot v19.4s, v7.16b, v2.4b[2]\n"
".inst 0x4f83e8f7 // sdot v23.4s, v7.16b, v3.4b[2]\n"
".inst 0x4f84e8fb // sdot v27.4s, v7.16b, v4.4b[2]\n"
".inst 0x4f85e8ff // sdot v31.4s, v7.16b, v5.4b[2]\n"
- "ldr q7, [x10, #0xd0]\n"
+ "ldr q7, [x9, #0xd0]\n"
".inst 0x4fa0e8c8 // sdot v8.4s, v6.16b, v0.4b[3]\n"
".inst 0x4fa1e8cc // sdot v12.4s, v6.16b, v1.4b[3]\n"
".inst 0x4fa2e8d0 // sdot v16.4s, v6.16b, v2.4b[3]\n"
".inst 0x4fa3e8d4 // sdot v20.4s, v6.16b, v3.4b[3]\n"
".inst 0x4fa4e8d8 // sdot v24.4s, v6.16b, v4.4b[3]\n"
".inst 0x4fa5e8dc // sdot v28.4s, v6.16b, v5.4b[3]\n"
- "ldr q6, [x10, #0xe0]\n"
+ "ldr q6, [x9, #0xe0]\n"
".inst 0x4fa0e8e9 // sdot v9.4s, v7.16b, v0.4b[3]\n"
".inst 0x4fa1e8ed // sdot v13.4s, v7.16b, v1.4b[3]\n"
".inst 0x4fa2e8f1 // sdot v17.4s, v7.16b, v2.4b[3]\n"
".inst 0x4fa3e8f5 // sdot v21.4s, v7.16b, v3.4b[3]\n"
".inst 0x4fa4e8f9 // sdot v25.4s, v7.16b, v4.4b[3]\n"
".inst 0x4fa5e8fd // sdot v29.4s, v7.16b, v5.4b[3]\n"
- "ldr q7, [x10, #0xf0]\n"
- "add x10, x10, #0x100\n"
+ "ldr q7, [x9, #0xf0]\n"
+ "add x9, x9, #0x100\n"
".inst 0x4fa0e8ca // sdot v10.4s, v6.16b, v0.4b[3]\n"
".inst 0x4fa1e8ce // sdot v14.4s, v6.16b, v1.4b[3]\n"
".inst 0x4fa2e8d2 // sdot v18.4s, v6.16b, v2.4b[3]\n"
@@ -2972,35 +2972,35 @@ void a64_hybrid_s8s32_dot_6x16 (
".inst 0x4fa4e8fb // sdot v27.4s, v7.16b, v4.4b[3]\n"
".inst 0x4fa5e8ff // sdot v31.4s, v7.16b, v5.4b[3]\n"
"189:" // Height 6: Multiply loop: Main loop skip
- "cbz x27, 194f\n"
- "cmp x27, #0x4\n"
+ "cbz x26, 194f\n"
+ "cmp x26, #0x4\n"
"blt 191f\n"
"190:" // Height 6: Multiply loop: Odd block loop
- "ldr s0, [x26], #0x4\n"
- "ldr s1, [x25], #0x4\n"
- "sub x27, x27, #0x4\n"
- "cmp x27, #0x4\n"
- "ldr s2, [x24], #0x4\n"
- "ldr s3, [x23], #0x4\n"
- "ldr s4, [x22], #0x4\n"
- "ldr s5, [x21], #0x4\n"
- "ldr q6, [x10, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
+ "ldr s0, [x25], #0x4\n"
+ "sub x26, x26, #0x4\n"
+ "ldr s1, [x24], #0x4\n"
+ "cmp x26, #0x4\n"
+ "ldr s2, [x23], #0x4\n"
+ "ldr s3, [x22], #0x4\n"
+ "ldr s4, [x21], #0x4\n"
+ "ldr s5, [x20], #0x4\n"
+ "ldr q6, [x9, #0x0]\n"
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
+ "ldr q7, [x9, #0x10]\n"
".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n"
".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n"
".inst 0x4f83e0d4 // sdot v20.4s, v6.16b, v3.4b[0]\n"
".inst 0x4f84e0d8 // sdot v24.4s, v6.16b, v4.4b[0]\n"
".inst 0x4f85e0dc // sdot v28.4s, v6.16b, v5.4b[0]\n"
- "ldr q6, [x10, #0x20]\n"
+ "ldr q6, [x9, #0x20]\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n"
".inst 0x4f82e0f1 // sdot v17.4s, v7.16b, v2.4b[0]\n"
".inst 0x4f83e0f5 // sdot v21.4s, v7.16b, v3.4b[0]\n"
".inst 0x4f84e0f9 // sdot v25.4s, v7.16b, v4.4b[0]\n"
".inst 0x4f85e0fd // sdot v29.4s, v7.16b, v5.4b[0]\n"
- "ldr q7, [x10, #0x30]\n"
- "add x10, x10, #0x40\n"
+ "ldr q7, [x9, #0x30]\n"
+ "add x9, x9, #0x40\n"
".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n"
".inst 0x4f81e0ce // sdot v14.4s, v6.16b, v1.4b[0]\n"
".inst 0x4f82e0d2 // sdot v18.4s, v6.16b, v2.4b[0]\n"
@@ -3014,48 +3014,48 @@ void a64_hybrid_s8s32_dot_6x16 (
".inst 0x4f84e0fb // sdot v27.4s, v7.16b, v4.4b[0]\n"
".inst 0x4f85e0ff // sdot v31.4s, v7.16b, v5.4b[0]\n"
"bge 190b\n"
+ "cbz x26, 194f\n"
"191:" // Height 6: Multiply loop: Skip odd blocks
- "cbz x27, 194f\n"
- "tbz x27, #1, 192f\n"
- "ldr h0, [x26], #0x2\n"
- "ldr h1, [x25], #0x2\n"
- "ldr h2, [x24], #0x2\n"
- "ldr h3, [x23], #0x2\n"
- "ldr h4, [x22], #0x2\n"
- "ldr h5, [x21], #0x2\n"
- "tbz x27, #0, 193f\n"
- "ld1 { v0.b }[2], [x26]\n"
- "ld1 { v1.b }[2], [x25]\n"
- "ld1 { v2.b }[2], [x24]\n"
- "ld1 { v3.b }[2], [x23]\n"
- "ld1 { v4.b }[2], [x22]\n"
- "ld1 { v5.b }[2], [x21]\n"
+ "tbz x26, #1, 192f\n"
+ "ldr h0, [x25], #0x2\n"
+ "ldr h1, [x24], #0x2\n"
+ "ldr h2, [x23], #0x2\n"
+ "ldr h3, [x22], #0x2\n"
+ "ldr h4, [x21], #0x2\n"
+ "ldr h5, [x20], #0x2\n"
+ "tbz x26, #0, 193f\n"
+ "ld1 { v0.b }[2], [x25]\n"
+ "ld1 { v1.b }[2], [x24]\n"
+ "ld1 { v2.b }[2], [x23]\n"
+ "ld1 { v3.b }[2], [x22]\n"
+ "ld1 { v4.b }[2], [x21]\n"
+ "ld1 { v5.b }[2], [x20]\n"
"b 193f\n"
"192:" // Height 6: Multiply loop: Ragged operand read: partial_1_0
- "ldr b0, [x26, #0x0]\n"
- "ldr b1, [x25, #0x0]\n"
- "ldr b2, [x24, #0x0]\n"
- "ldr b3, [x23, #0x0]\n"
- "ldr b4, [x22, #0x0]\n"
- "ldr b5, [x21, #0x0]\n"
+ "ldr b0, [x25, #0x0]\n"
+ "ldr b1, [x24, #0x0]\n"
+ "ldr b2, [x23, #0x0]\n"
+ "ldr b3, [x22, #0x0]\n"
+ "ldr b4, [x21, #0x0]\n"
+ "ldr b5, [x20, #0x0]\n"
"193:" // Height 6: Multiply loop: Ragged operand read: Done
- "ldr q6, [x10, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
+ "ldr q6, [x9, #0x0]\n"
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
+ "ldr q7, [x9, #0x10]\n"
".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n"
".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n"
".inst 0x4f83e0d4 // sdot v20.4s, v6.16b, v3.4b[0]\n"
".inst 0x4f84e0d8 // sdot v24.4s, v6.16b, v4.4b[0]\n"
".inst 0x4f85e0dc // sdot v28.4s, v6.16b, v5.4b[0]\n"
- "ldr q6, [x10, #0x20]\n"
+ "ldr q6, [x9, #0x20]\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n"
".inst 0x4f82e0f1 // sdot v17.4s, v7.16b, v2.4b[0]\n"
".inst 0x4f83e0f5 // sdot v21.4s, v7.16b, v3.4b[0]\n"
".inst 0x4f84e0f9 // sdot v25.4s, v7.16b, v4.4b[0]\n"
".inst 0x4f85e0fd // sdot v29.4s, v7.16b, v5.4b[0]\n"
- "ldr q7, [x10, #0x30]\n"
- "add x10, x10, #0x40\n"
+ "ldr q7, [x9, #0x30]\n"
+ "add x9, x9, #0x40\n"
".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n"
".inst 0x4f81e0ce // sdot v14.4s, v6.16b, v1.4b[0]\n"
".inst 0x4f82e0d2 // sdot v18.4s, v6.16b, v2.4b[0]\n"
@@ -3069,195 +3069,195 @@ void a64_hybrid_s8s32_dot_6x16 (
".inst 0x4f84e0fb // sdot v27.4s, v7.16b, v4.4b[0]\n"
".inst 0x4f85e0ff // sdot v31.4s, v7.16b, v5.4b[0]\n"
"194:" // Height 6: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 184b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "prfm pstl1keep, [x9, #0x0]\n"
- "add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
- "prfm pstl1keep, [x24, #0x0]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "prfm pstl1keep, [x28, #0x0]\n"
+ "cmp x10, #0x10\n"
+ "add x23, x28, x19, LSL #2\n"
"prfm pstl1keep, [x23, #0x0]\n"
- "add x20, x21, x20, LSL #2\n"
- "cmp x11, #0x10\n"
+ "add x22, x23, x19, LSL #2\n"
"prfm pstl1keep, [x22, #0x0]\n"
+ "add x21, x22, x19, LSL #2\n"
"prfm pstl1keep, [x21, #0x0]\n"
+ "add x20, x21, x19, LSL #2\n"
"prfm pstl1keep, [x20, #0x0]\n"
+ "add x19, x20, x19, LSL #2\n"
+ "prfm pstl1keep, [x19, #0x0]\n"
"bge 203f\n"
- "tbz x11, #3, 198f\n"
- "st1 { v8.4s }, [x9], #0x10\n"
- "st1 { v9.4s }, [x9], #0x10\n"
- "st1 { v12.4s }, [x24], #0x10\n"
- "st1 { v13.4s }, [x24], #0x10\n"
- "st1 { v16.4s }, [x23], #0x10\n"
- "st1 { v17.4s }, [x23], #0x10\n"
- "st1 { v20.4s }, [x22], #0x10\n"
- "st1 { v21.4s }, [x22], #0x10\n"
- "st1 { v24.4s }, [x21], #0x10\n"
- "st1 { v25.4s }, [x21], #0x10\n"
- "st1 { v28.4s }, [x20], #0x10\n"
- "st1 { v29.4s }, [x20], #0x10\n"
- "tbz x11, #2, 196f\n"
- "st1 { v10.4s }, [x9], #0x10\n"
- "st1 { v14.4s }, [x24], #0x10\n"
- "st1 { v18.4s }, [x23], #0x10\n"
- "st1 { v22.4s }, [x22], #0x10\n"
- "st1 { v26.4s }, [x21], #0x10\n"
- "st1 { v30.4s }, [x20], #0x10\n"
- "tbz x11, #1, 195f\n"
- "str d11, [x9], #0x8\n"
- "str d15, [x24], #0x8\n"
- "str d19, [x23], #0x8\n"
- "str d23, [x22], #0x8\n"
- "str d27, [x21], #0x8\n"
- "str d31, [x20], #0x8\n"
- "tbz x11, #0, 202f\n"
- "st1 { v11.s }[2], [x9]\n"
- "st1 { v15.s }[2], [x24]\n"
- "st1 { v19.s }[2], [x23]\n"
- "st1 { v23.s }[2], [x22]\n"
- "st1 { v27.s }[2], [x21]\n"
- "st1 { v31.s }[2], [x20]\n"
+ "tbz x10, #3, 198f\n"
+ "st1 { v8.4s }, [x28], #0x10\n"
+ "st1 { v9.4s }, [x28], #0x10\n"
+ "st1 { v12.4s }, [x23], #0x10\n"
+ "st1 { v13.4s }, [x23], #0x10\n"
+ "st1 { v16.4s }, [x22], #0x10\n"
+ "st1 { v17.4s }, [x22], #0x10\n"
+ "st1 { v20.4s }, [x21], #0x10\n"
+ "st1 { v21.4s }, [x21], #0x10\n"
+ "st1 { v24.4s }, [x20], #0x10\n"
+ "st1 { v25.4s }, [x20], #0x10\n"
+ "st1 { v28.4s }, [x19], #0x10\n"
+ "st1 { v29.4s }, [x19], #0x10\n"
+ "tbz x10, #2, 196f\n"
+ "st1 { v10.4s }, [x28], #0x10\n"
+ "st1 { v14.4s }, [x23], #0x10\n"
+ "st1 { v18.4s }, [x22], #0x10\n"
+ "st1 { v22.4s }, [x21], #0x10\n"
+ "st1 { v26.4s }, [x20], #0x10\n"
+ "st1 { v30.4s }, [x19], #0x10\n"
+ "tbz x10, #1, 195f\n"
+ "str d11, [x28], #0x8\n"
+ "str d15, [x23], #0x8\n"
+ "str d19, [x22], #0x8\n"
+ "str d23, [x21], #0x8\n"
+ "str d27, [x20], #0x8\n"
+ "str d31, [x19], #0x8\n"
+ "tbz x10, #0, 202f\n"
+ "st1 { v11.s }[2], [x28]\n"
+ "st1 { v15.s }[2], [x23]\n"
+ "st1 { v19.s }[2], [x22]\n"
+ "st1 { v23.s }[2], [x21]\n"
+ "st1 { v27.s }[2], [x20]\n"
+ "st1 { v31.s }[2], [x19]\n"
"b 202f\n"
"195:" // Height 6: Partial direct writeback: partial_1_12
- "tbz x11, #0, 202f\n"
- "str s11, [x9, #0x0]\n"
- "str s15, [x24, #0x0]\n"
- "str s19, [x23, #0x0]\n"
- "str s23, [x22, #0x0]\n"
- "str s27, [x21, #0x0]\n"
- "str s31, [x20, #0x0]\n"
+ "tbz x10, #0, 202f\n"
+ "str s11, [x28, #0x0]\n"
+ "str s15, [x23, #0x0]\n"
+ "str s19, [x22, #0x0]\n"
+ "str s23, [x21, #0x0]\n"
+ "str s27, [x20, #0x0]\n"
+ "str s31, [x19, #0x0]\n"
"b 202f\n"
"196:" // Height 6: Partial direct writeback: partial_2_8
- "tbz x11, #1, 197f\n"
- "str d10, [x9], #0x8\n"
- "str d14, [x24], #0x8\n"
- "str d18, [x23], #0x8\n"
- "str d22, [x22], #0x8\n"
- "str d26, [x21], #0x8\n"
- "str d30, [x20], #0x8\n"
- "tbz x11, #0, 202f\n"
- "st1 { v10.s }[2], [x9]\n"
- "st1 { v14.s }[2], [x24]\n"
- "st1 { v18.s }[2], [x23]\n"
- "st1 { v22.s }[2], [x22]\n"
- "st1 { v26.s }[2], [x21]\n"
- "st1 { v30.s }[2], [x20]\n"
+ "tbz x10, #1, 197f\n"
+ "str d10, [x28], #0x8\n"
+ "str d14, [x23], #0x8\n"
+ "str d18, [x22], #0x8\n"
+ "str d22, [x21], #0x8\n"
+ "str d26, [x20], #0x8\n"
+ "str d30, [x19], #0x8\n"
+ "tbz x10, #0, 202f\n"
+ "st1 { v10.s }[2], [x28]\n"
+ "st1 { v14.s }[2], [x23]\n"
+ "st1 { v18.s }[2], [x22]\n"
+ "st1 { v22.s }[2], [x21]\n"
+ "st1 { v26.s }[2], [x20]\n"
+ "st1 { v30.s }[2], [x19]\n"
"b 202f\n"
"197:" // Height 6: Partial direct writeback: partial_1_8
- "tbz x11, #0, 202f\n"
- "str s10, [x9, #0x0]\n"
- "str s14, [x24, #0x0]\n"
- "str s18, [x23, #0x0]\n"
- "str s22, [x22, #0x0]\n"
- "str s26, [x21, #0x0]\n"
- "str s30, [x20, #0x0]\n"
+ "tbz x10, #0, 202f\n"
+ "str s10, [x28, #0x0]\n"
+ "str s14, [x23, #0x0]\n"
+ "str s18, [x22, #0x0]\n"
+ "str s22, [x21, #0x0]\n"
+ "str s26, [x20, #0x0]\n"
+ "str s30, [x19, #0x0]\n"
"b 202f\n"
"198:" // Height 6: Partial direct writeback: partial_4_0
- "tbz x11, #2, 200f\n"
- "st1 { v8.4s }, [x9], #0x10\n"
- "st1 { v12.4s }, [x24], #0x10\n"
- "st1 { v16.4s }, [x23], #0x10\n"
- "st1 { v20.4s }, [x22], #0x10\n"
- "st1 { v24.4s }, [x21], #0x10\n"
- "st1 { v28.4s }, [x20], #0x10\n"
- "tbz x11, #1, 199f\n"
- "str d9, [x9], #0x8\n"
- "str d13, [x24], #0x8\n"
- "str d17, [x23], #0x8\n"
- "str d21, [x22], #0x8\n"
- "str d25, [x21], #0x8\n"
- "str d29, [x20], #0x8\n"
- "tbz x11, #0, 202f\n"
- "st1 { v9.s }[2], [x9]\n"
- "st1 { v13.s }[2], [x24]\n"
- "st1 { v17.s }[2], [x23]\n"
- "st1 { v21.s }[2], [x22]\n"
- "st1 { v25.s }[2], [x21]\n"
- "st1 { v29.s }[2], [x20]\n"
+ "tbz x10, #2, 200f\n"
+ "st1 { v8.4s }, [x28], #0x10\n"
+ "st1 { v12.4s }, [x23], #0x10\n"
+ "st1 { v16.4s }, [x22], #0x10\n"
+ "st1 { v20.4s }, [x21], #0x10\n"
+ "st1 { v24.4s }, [x20], #0x10\n"
+ "st1 { v28.4s }, [x19], #0x10\n"
+ "tbz x10, #1, 199f\n"
+ "str d9, [x28], #0x8\n"
+ "str d13, [x23], #0x8\n"
+ "str d17, [x22], #0x8\n"
+ "str d21, [x21], #0x8\n"
+ "str d25, [x20], #0x8\n"
+ "str d29, [x19], #0x8\n"
+ "tbz x10, #0, 202f\n"
+ "st1 { v9.s }[2], [x28]\n"
+ "st1 { v13.s }[2], [x23]\n"
+ "st1 { v17.s }[2], [x22]\n"
+ "st1 { v21.s }[2], [x21]\n"
+ "st1 { v25.s }[2], [x20]\n"
+ "st1 { v29.s }[2], [x19]\n"
"b 202f\n"
"199:" // Height 6: Partial direct writeback: partial_1_4
- "tbz x11, #0, 202f\n"
- "str s9, [x9, #0x0]\n"
- "str s13, [x24, #0x0]\n"
- "str s17, [x23, #0x0]\n"
- "str s21, [x22, #0x0]\n"
- "str s25, [x21, #0x0]\n"
- "str s29, [x20, #0x0]\n"
+ "tbz x10, #0, 202f\n"
+ "str s9, [x28, #0x0]\n"
+ "str s13, [x23, #0x0]\n"
+ "str s17, [x22, #0x0]\n"
+ "str s21, [x21, #0x0]\n"
+ "str s25, [x20, #0x0]\n"
+ "str s29, [x19, #0x0]\n"
"b 202f\n"
"200:" // Height 6: Partial direct writeback: partial_2_0
- "tbz x11, #1, 201f\n"
- "str d8, [x9], #0x8\n"
- "str d12, [x24], #0x8\n"
- "str d16, [x23], #0x8\n"
- "str d20, [x22], #0x8\n"
- "str d24, [x21], #0x8\n"
- "str d28, [x20], #0x8\n"
- "tbz x11, #0, 202f\n"
- "st1 { v8.s }[2], [x9]\n"
- "st1 { v12.s }[2], [x24]\n"
- "st1 { v16.s }[2], [x23]\n"
- "st1 { v20.s }[2], [x22]\n"
- "st1 { v24.s }[2], [x21]\n"
- "st1 { v28.s }[2], [x20]\n"
+ "tbz x10, #1, 201f\n"
+ "str d8, [x28], #0x8\n"
+ "str d12, [x23], #0x8\n"
+ "str d16, [x22], #0x8\n"
+ "str d20, [x21], #0x8\n"
+ "str d24, [x20], #0x8\n"
+ "str d28, [x19], #0x8\n"
+ "tbz x10, #0, 202f\n"
+ "st1 { v8.s }[2], [x28]\n"
+ "st1 { v12.s }[2], [x23]\n"
+ "st1 { v16.s }[2], [x22]\n"
+ "st1 { v20.s }[2], [x21]\n"
+ "st1 { v24.s }[2], [x20]\n"
+ "st1 { v28.s }[2], [x19]\n"
"b 202f\n"
"201:" // Height 6: Partial direct writeback: partial_1_0
- "str s8, [x9, #0x0]\n"
- "str s12, [x24, #0x0]\n"
- "str s16, [x23, #0x0]\n"
- "str s20, [x22, #0x0]\n"
- "str s24, [x21, #0x0]\n"
- "str s28, [x20, #0x0]\n"
+ "str s8, [x28, #0x0]\n"
+ "str s12, [x23, #0x0]\n"
+ "str s16, [x22, #0x0]\n"
+ "str s20, [x21, #0x0]\n"
+ "str s24, [x20, #0x0]\n"
+ "str s28, [x19, #0x0]\n"
"202:" // Height 6: Partial direct writeback: Done
"b 204f\n"
"203:" // Height 6: Full writeback
- "str q8, [x9, #0x0]\n"
- "str q9, [x9, #0x10]\n"
- "str q10, [x9, #0x20]\n"
- "str q11, [x9, #0x30]\n"
- "add x9, x9, #0x40\n"
- "str q12, [x24, #0x0]\n"
- "str q13, [x24, #0x10]\n"
- "str q14, [x24, #0x20]\n"
- "str q15, [x24, #0x30]\n"
- "str q16, [x23, #0x0]\n"
- "str q17, [x23, #0x10]\n"
- "str q18, [x23, #0x20]\n"
- "str q19, [x23, #0x30]\n"
- "str q20, [x22, #0x0]\n"
- "str q21, [x22, #0x10]\n"
- "str q22, [x22, #0x20]\n"
- "str q23, [x22, #0x30]\n"
- "str q24, [x21, #0x0]\n"
- "str q25, [x21, #0x10]\n"
- "str q26, [x21, #0x20]\n"
- "str q27, [x21, #0x30]\n"
- "str q28, [x20, #0x0]\n"
- "str q29, [x20, #0x10]\n"
- "str q30, [x20, #0x20]\n"
- "str q31, [x20, #0x30]\n"
+ "str q8, [x28, #0x0]\n"
+ "str q9, [x28, #0x10]\n"
+ "str q10, [x28, #0x20]\n"
+ "str q11, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
+ "str q12, [x23, #0x0]\n"
+ "str q13, [x23, #0x10]\n"
+ "str q14, [x23, #0x20]\n"
+ "str q15, [x23, #0x30]\n"
+ "str q16, [x22, #0x0]\n"
+ "str q17, [x22, #0x10]\n"
+ "str q18, [x22, #0x20]\n"
+ "str q19, [x22, #0x30]\n"
+ "str q20, [x21, #0x0]\n"
+ "str q21, [x21, #0x10]\n"
+ "str q22, [x21, #0x20]\n"
+ "str q23, [x21, #0x30]\n"
+ "str q24, [x20, #0x0]\n"
+ "str q25, [x20, #0x10]\n"
+ "str q26, [x20, #0x20]\n"
+ "str q27, [x20, #0x30]\n"
+ "str q28, [x19, #0x0]\n"
+ "str q29, [x19, #0x10]\n"
+ "str q30, [x19, #0x20]\n"
+ "str q31, [x19, #0x30]\n"
"204:" // Height 6: Writeback done
- "subs x11, x11, #0x10\n"
+ "subs x10, x10, #0x10\n"
"bgt 172b\n"
"subs %x[M], %x[M], #0x6\n"
"beq 206f\n"
- "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 205f\n"
- "add x21, x21, #0x6\n"
- "str x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "add x20, x20, #0x6\n"
+ "str x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"b 1b\n"
"205:" // Update direct input
- "mov x20, #0x6\n"
- "madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
+ "mov x19, #0x6\n"
+ "madd %x[input_ptr], x19, x20, %x[input_ptr]\n"
"b 1b\n"
"206:" // Exit
: [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
: [args_ptr] "r" (&ka), [flags] "r" (flags), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8s32_mmla_6x16/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8s32_mmla_6x16/generic.cpp
index f48623e129..a9f6b06ae1 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8s32_mmla_6x16/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8s32_mmla_6x16/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef __aarch64__
@@ -87,73 +87,73 @@ void a64_hybrid_s8s32_mmla_6x16 (
"cmp %x[M], #0x2\n"
"bgt 75f\n"
"beq 38f\n"
- "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x28, %x[output_ptr]\n"
"2:" // Height 1: Column loop
"tbz %x[flags], #0, 13f\n"
- "cmp x11, #0x10\n"
+ "cmp x10, #0x10\n"
"bge 11f\n"
- "tbz x11, #3, 6f\n"
- "ld1 { v9.4s }, [x9], #0x10\n"
- "ld1 { v10.4s }, [x9], #0x10\n"
- "tbz x11, #2, 4f\n"
- "ld1 { v11.4s }, [x9], #0x10\n"
- "tbz x11, #1, 3f\n"
- "ldr d16, [x9], #0x8\n"
- "mov x25, #0x38\n"
- "tbz x11, #0, 10f\n"
- "ld1 { v16.s }[2], [x9]\n"
+ "tbz x10, #3, 6f\n"
+ "ld1 { v9.4s }, [x28], #0x10\n"
+ "ld1 { v10.4s }, [x28], #0x10\n"
+ "tbz x10, #2, 4f\n"
+ "ld1 { v11.4s }, [x28], #0x10\n"
+ "tbz x10, #1, 3f\n"
+ "mov x24, #0x38\n"
+ "ldr d16, [x28], #0x8\n"
+ "tbz x10, #0, 10f\n"
+ "ld1 { v16.s }[2], [x28]\n"
"b 10f\n"
"3:" // Height 1: Partial accumulate: partial_1_12
- "mov x25, #0x30\n"
- "tbz x11, #0, 10f\n"
- "ldr s16, [x9, #0x0]\n"
+ "mov x24, #0x30\n"
+ "tbz x10, #0, 10f\n"
+ "ldr s16, [x28, #0x0]\n"
"b 10f\n"
"4:" // Height 1: Partial accumulate: partial_2_8
- "tbz x11, #1, 5f\n"
- "ldr d11, [x9], #0x8\n"
- "mov x25, #0x28\n"
- "tbz x11, #0, 10f\n"
- "ld1 { v11.s }[2], [x9]\n"
+ "tbz x10, #1, 5f\n"
+ "ldr d11, [x28], #0x8\n"
+ "mov x24, #0x28\n"
+ "tbz x10, #0, 10f\n"
+ "ld1 { v11.s }[2], [x28]\n"
"b 10f\n"
"5:" // Height 1: Partial accumulate: partial_1_8
- "mov x25, #0x20\n"
- "tbz x11, #0, 10f\n"
- "ldr s11, [x9, #0x0]\n"
+ "mov x24, #0x20\n"
+ "tbz x10, #0, 10f\n"
+ "ldr s11, [x28, #0x0]\n"
"b 10f\n"
"6:" // Height 1: Partial accumulate: partial_4_0
- "tbz x11, #2, 8f\n"
- "ld1 { v9.4s }, [x9], #0x10\n"
- "tbz x11, #1, 7f\n"
- "ldr d10, [x9], #0x8\n"
- "mov x25, #0x18\n"
- "tbz x11, #0, 10f\n"
- "ld1 { v10.s }[2], [x9]\n"
+ "tbz x10, #2, 8f\n"
+ "ld1 { v9.4s }, [x28], #0x10\n"
+ "tbz x10, #1, 7f\n"
+ "ldr d10, [x28], #0x8\n"
+ "mov x24, #0x18\n"
+ "tbz x10, #0, 10f\n"
+ "ld1 { v10.s }[2], [x28]\n"
"b 10f\n"
"7:" // Height 1: Partial accumulate: partial_1_4
- "mov x25, #0x10\n"
- "tbz x11, #0, 10f\n"
- "ldr s10, [x9, #0x0]\n"
+ "mov x24, #0x10\n"
+ "tbz x10, #0, 10f\n"
+ "ldr s10, [x28, #0x0]\n"
"b 10f\n"
"8:" // Height 1: Partial accumulate: partial_2_0
- "tbz x11, #1, 9f\n"
- "ldr d9, [x9], #0x8\n"
- "mov x25, #0x8\n"
- "tbz x11, #0, 10f\n"
- "ld1 { v9.s }[2], [x9]\n"
+ "tbz x10, #1, 9f\n"
+ "ldr d9, [x28], #0x8\n"
+ "mov x24, #0x8\n"
+ "tbz x10, #0, 10f\n"
+ "ld1 { v9.s }[2], [x28]\n"
"b 10f\n"
"9:" // Height 1: Partial accumulate: partial_1_0
- "ldr s9, [x9, #0x0]\n"
- "mov x25, #0x0\n"
+ "ldr s9, [x28, #0x0]\n"
+ "mov x24, #0x0\n"
"10:" // Height 1: Partial accumulate: Done
- "sub x9, x9, x25\n"
+ "sub x28, x28, x24\n"
"b 12f\n"
"11:" // Height 1: full accumulate
- "ldr q9, [x9, #0x0]\n"
- "ldr q10, [x9, #0x10]\n"
- "ldr q11, [x9, #0x20]\n"
- "ldr q16, [x9, #0x30]\n"
+ "ldr q9, [x28, #0x0]\n"
+ "ldr q10, [x28, #0x10]\n"
+ "ldr q11, [x28, #0x20]\n"
+ "ldr q16, [x28, #0x30]\n"
"12:" // Height 1: MMLA fixup
"zip1 v8.2d, v9.2d, v12.2d\n"
"zip2 v12.2d, v9.2d, v12.2d\n"
@@ -174,333 +174,337 @@ void a64_hybrid_s8s32_mmla_6x16 (
"movi v14.4s, #0x0\n"
"movi v15.4s, #0x0\n"
"14:" // Height 1: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"15:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 16f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "cbnz x28, 17f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "cbnz x27, 17f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
"b 17f\n"
"16:" // Height 1: setup direct input
- "mov x26, %x[input_ptr]\n"
+ "mov x25, %x[input_ptr]\n"
"17:" // Height 1: input setup done
- "cmp x27, #0x10\n"
+ "cmp x26, #0x10\n"
"blt 20f\n"
- "ldr q1, [x26, #0x0]\n"
- "ldr q7, [x10, #0x0]\n"
- "cmp x27, #0x20\n"
- "ldr q6, [x10, #0x10]\n"
+ "ldr q1, [x25, #0x0]\n"
+ "cmp x26, #0x20\n"
"blt 19f\n"
"18:" // Height 1: Multiply loop: Main loop head
+ "movi v2.16b, #0x0\n"
+ "ldr q7, [x9, #0x0]\n"
+ "add x25, x25, #0x10\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
+ "ldr q6, [x9, #0x10]\n"
+ "sub x26, x26, #0x10\n"
+ "trn2 v1.2d, v1.2d, v2.2d\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "cmp x26, #0x20\n"
".inst 0x4e87a408 // smmla v8.4s, v0.16b, v7.16b\n"
- "ldr q7, [x10, #0x20]\n"
+ "ldr q7, [x9, #0x20]\n"
".inst 0x4e86a40c // smmla v12.4s, v0.16b, v6.16b\n"
- "ldr q6, [x10, #0x30]\n"
+ "ldr q6, [x9, #0x30]\n"
".inst 0x4e87a409 // smmla v9.4s, v0.16b, v7.16b\n"
- "ldr q7, [x10, #0x40]\n"
+ "ldr q7, [x9, #0x40]\n"
".inst 0x4e86a40d // smmla v13.4s, v0.16b, v6.16b\n"
- "ldr q6, [x10, #0x50]\n"
+ "ldr q6, [x9, #0x50]\n"
".inst 0x4e87a40a // smmla v10.4s, v0.16b, v7.16b\n"
- "ldr q7, [x10, #0x60]\n"
+ "ldr q7, [x9, #0x60]\n"
".inst 0x4e86a40e // smmla v14.4s, v0.16b, v6.16b\n"
- "ldr q6, [x10, #0x70]\n"
- "trn2 v1.2d, v1.2d, v2.2d\n"
+ "ldr q6, [x9, #0x70]\n"
".inst 0x4e87a40b // smmla v11.4s, v0.16b, v7.16b\n"
- "ldr q7, [x10, #0x80]\n"
+ "ldr q7, [x9, #0x80]\n"
".inst 0x4e86a40f // smmla v15.4s, v0.16b, v6.16b\n"
- "ldr q6, [x10, #0x90]\n"
+ "ldr q6, [x9, #0x90]\n"
".inst 0x4e87a428 // smmla v8.4s, v1.16b, v7.16b\n"
- "ldr q7, [x10, #0xa0]\n"
+ "ldr q7, [x9, #0xa0]\n"
".inst 0x4e86a42c // smmla v12.4s, v1.16b, v6.16b\n"
- "ldr q6, [x10, #0xb0]\n"
+ "ldr q6, [x9, #0xb0]\n"
".inst 0x4e87a429 // smmla v9.4s, v1.16b, v7.16b\n"
- "ldr q7, [x10, #0xc0]\n"
+ "ldr q7, [x9, #0xc0]\n"
".inst 0x4e86a42d // smmla v13.4s, v1.16b, v6.16b\n"
- "ldr q6, [x10, #0xd0]\n"
+ "ldr q6, [x9, #0xd0]\n"
".inst 0x4e87a42a // smmla v10.4s, v1.16b, v7.16b\n"
- "ldr q7, [x10, #0xe0]\n"
+ "ldr q7, [x9, #0xe0]\n"
".inst 0x4e86a42e // smmla v14.4s, v1.16b, v6.16b\n"
- "ldr q6, [x10, #0xf0]\n"
- "sub x27, x27, #0x10\n"
- "add x26, x26, #0x10\n"
- "cmp x27, #0x20\n"
+ "ldr q6, [x9, #0xf0]\n"
+ "add x9, x9, #0x100\n"
".inst 0x4e87a42b // smmla v11.4s, v1.16b, v7.16b\n"
".inst 0x4e86a42f // smmla v15.4s, v1.16b, v6.16b\n"
- "ldr q1, [x26, #0x0]\n"
- "add x10, x10, #0x100\n"
- "ldr q7, [x10, #0x0]\n"
- "ldr q6, [x10, #0x10]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
+ "ldr q1, [x25, #0x0]\n"
"bge 18b\n"
"19:" // Height 1: Multiply loop: Single iteration only
+ "movi v2.16b, #0x0\n"
+ "ldr q7, [x9, #0x0]\n"
+ "sub x26, x26, #0x10\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
+ "ldr q6, [x9, #0x10]\n"
+ "add x25, x25, #0x10\n"
+ "trn2 v1.2d, v1.2d, v2.2d\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x4e87a408 // smmla v8.4s, v0.16b, v7.16b\n"
- "ldr q7, [x10, #0x20]\n"
+ "ldr q7, [x9, #0x20]\n"
".inst 0x4e86a40c // smmla v12.4s, v0.16b, v6.16b\n"
- "ldr q6, [x10, #0x30]\n"
+ "ldr q6, [x9, #0x30]\n"
".inst 0x4e87a409 // smmla v9.4s, v0.16b, v7.16b\n"
- "ldr q7, [x10, #0x40]\n"
+ "ldr q7, [x9, #0x40]\n"
".inst 0x4e86a40d // smmla v13.4s, v0.16b, v6.16b\n"
- "ldr q6, [x10, #0x50]\n"
+ "ldr q6, [x9, #0x50]\n"
".inst 0x4e87a40a // smmla v10.4s, v0.16b, v7.16b\n"
- "ldr q7, [x10, #0x60]\n"
+ "ldr q7, [x9, #0x60]\n"
".inst 0x4e86a40e // smmla v14.4s, v0.16b, v6.16b\n"
- "ldr q6, [x10, #0x70]\n"
- "trn2 v1.2d, v1.2d, v2.2d\n"
+ "ldr q6, [x9, #0x70]\n"
".inst 0x4e87a40b // smmla v11.4s, v0.16b, v7.16b\n"
- "ldr q7, [x10, #0x80]\n"
+ "ldr q7, [x9, #0x80]\n"
".inst 0x4e86a40f // smmla v15.4s, v0.16b, v6.16b\n"
- "ldr q6, [x10, #0x90]\n"
+ "ldr q6, [x9, #0x90]\n"
".inst 0x4e87a428 // smmla v8.4s, v1.16b, v7.16b\n"
- "ldr q7, [x10, #0xa0]\n"
+ "ldr q7, [x9, #0xa0]\n"
".inst 0x4e86a42c // smmla v12.4s, v1.16b, v6.16b\n"
- "ldr q6, [x10, #0xb0]\n"
+ "ldr q6, [x9, #0xb0]\n"
".inst 0x4e87a429 // smmla v9.4s, v1.16b, v7.16b\n"
- "ldr q7, [x10, #0xc0]\n"
+ "ldr q7, [x9, #0xc0]\n"
".inst 0x4e86a42d // smmla v13.4s, v1.16b, v6.16b\n"
- "ldr q6, [x10, #0xd0]\n"
+ "ldr q6, [x9, #0xd0]\n"
".inst 0x4e87a42a // smmla v10.4s, v1.16b, v7.16b\n"
- "ldr q7, [x10, #0xe0]\n"
+ "ldr q7, [x9, #0xe0]\n"
".inst 0x4e86a42e // smmla v14.4s, v1.16b, v6.16b\n"
- "ldr q6, [x10, #0xf0]\n"
- "add x26, x26, #0x10\n"
- "sub x27, x27, #0x10\n"
+ "ldr q6, [x9, #0xf0]\n"
+ "add x9, x9, #0x100\n"
".inst 0x4e87a42b // smmla v11.4s, v1.16b, v7.16b\n"
".inst 0x4e86a42f // smmla v15.4s, v1.16b, v6.16b\n"
- "prfm pldl1keep, [x26, #0x80]\n"
- "add x10, x10, #0x100\n"
"20:" // Height 1: Multiply loop: Main loop skip
- "cbz x27, 27f\n"
- "cmp x27, #0x8\n"
+ "cbz x26, 27f\n"
+ "cmp x26, #0x8\n"
"blt 22f\n"
"21:" // Height 1: Multiply loop: Odd block loop
- "ldr d1, [x26], #0x8\n"
- "ldr q6, [x10, #0x0]\n"
+ "movi v2.16b, #0x0\n"
+ "ldr d1, [x25], #0x8\n"
+ "sub x26, x26, #0x8\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
- "ldr q7, [x10, #0x10]\n"
+ "ldr q6, [x9, #0x0]\n"
+ "cmp x26, #0x8\n"
".inst 0x4e86a408 // smmla v8.4s, v0.16b, v6.16b\n"
- "ldr q6, [x10, #0x20]\n"
+ "ldr q7, [x9, #0x10]\n"
+ "ldr q6, [x9, #0x20]\n"
".inst 0x4e87a40c // smmla v12.4s, v0.16b, v7.16b\n"
- "ldr q7, [x10, #0x30]\n"
+ "ldr q7, [x9, #0x30]\n"
".inst 0x4e86a409 // smmla v9.4s, v0.16b, v6.16b\n"
- "ldr q6, [x10, #0x40]\n"
+ "ldr q6, [x9, #0x40]\n"
".inst 0x4e87a40d // smmla v13.4s, v0.16b, v7.16b\n"
- "ldr q7, [x10, #0x50]\n"
+ "ldr q7, [x9, #0x50]\n"
".inst 0x4e86a40a // smmla v10.4s, v0.16b, v6.16b\n"
- "ldr q6, [x10, #0x60]\n"
+ "ldr q6, [x9, #0x60]\n"
".inst 0x4e87a40e // smmla v14.4s, v0.16b, v7.16b\n"
- "ldr q7, [x10, #0x70]\n"
- "sub x27, x27, #0x8\n"
- "cmp x27, #0x8\n"
+ "ldr q7, [x9, #0x70]\n"
+ "add x9, x9, #0x80\n"
".inst 0x4e86a40b // smmla v11.4s, v0.16b, v6.16b\n"
".inst 0x4e87a40f // smmla v15.4s, v0.16b, v7.16b\n"
- "add x10, x10, #0x80\n"
"bge 21b\n"
+ "cbz x26, 27f\n"
"22:" // Height 1: Multiply loop: Skip odd blocks
- "cbz x27, 27f\n"
- "tbz x27, #2, 24f\n"
- "ldr s1, [x26], #0x4\n"
- "tbz x27, #1, 23f\n"
- "ld1 { v1.h }[2], [x26], #0x2\n"
- "tbz x27, #0, 26f\n"
- "ld1 { v1.b }[6], [x26]\n"
+ "tbz x26, #2, 24f\n"
+ "ldr s1, [x25], #0x4\n"
+ "tbz x26, #1, 23f\n"
+ "ld1 { v1.h }[2], [x25], #0x2\n"
+ "tbz x26, #0, 26f\n"
+ "ld1 { v1.b }[6], [x25]\n"
"b 26f\n"
"23:" // Height 1: Multiply loop: Ragged operand read: partial_1_4
- "tbz x27, #0, 26f\n"
- "ld1 { v1.b }[4], [x26]\n"
+ "tbz x26, #0, 26f\n"
+ "ld1 { v1.b }[4], [x25]\n"
"b 26f\n"
"24:" // Height 1: Multiply loop: Ragged operand read: partial_2_0
- "tbz x27, #1, 25f\n"
- "ldr h1, [x26], #0x2\n"
- "tbz x27, #0, 26f\n"
- "ld1 { v1.b }[2], [x26]\n"
+ "tbz x26, #1, 25f\n"
+ "ldr h1, [x25], #0x2\n"
+ "tbz x26, #0, 26f\n"
+ "ld1 { v1.b }[2], [x25]\n"
"b 26f\n"
"25:" // Height 1: Multiply loop: Ragged operand read: partial_1_0
- "ldr b1, [x26, #0x0]\n"
+ "ldr b1, [x25, #0x0]\n"
"26:" // Height 1: Multiply loop: Ragged operand read: Done
- "ldr q7, [x10, #0x0]\n"
- "ldr q6, [x10, #0x10]\n"
+ "movi v2.16b, #0x0\n"
+ "ldr q7, [x9, #0x0]\n"
+ "ldr q6, [x9, #0x10]\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
".inst 0x4e87a408 // smmla v8.4s, v0.16b, v7.16b\n"
- "ldr q7, [x10, #0x20]\n"
+ "ldr q7, [x9, #0x20]\n"
".inst 0x4e86a40c // smmla v12.4s, v0.16b, v6.16b\n"
- "ldr q6, [x10, #0x30]\n"
+ "ldr q6, [x9, #0x30]\n"
".inst 0x4e87a409 // smmla v9.4s, v0.16b, v7.16b\n"
- "ldr q7, [x10, #0x40]\n"
+ "ldr q7, [x9, #0x40]\n"
".inst 0x4e86a40d // smmla v13.4s, v0.16b, v6.16b\n"
- "ldr q6, [x10, #0x50]\n"
+ "ldr q6, [x9, #0x50]\n"
".inst 0x4e87a40a // smmla v10.4s, v0.16b, v7.16b\n"
- "ldr q7, [x10, #0x60]\n"
+ "ldr q7, [x9, #0x60]\n"
".inst 0x4e86a40e // smmla v14.4s, v0.16b, v6.16b\n"
- "ldr q6, [x10, #0x70]\n"
+ "ldr q6, [x9, #0x70]\n"
+ "add x9, x9, #0x80\n"
".inst 0x4e87a40b // smmla v11.4s, v0.16b, v7.16b\n"
".inst 0x4e86a40f // smmla v15.4s, v0.16b, v6.16b\n"
- "add x10, x10, #0x80\n"
"27:" // Height 1: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 15b\n"
- "cmp x11, #0x10\n"
"uzp1 v8.2d, v8.2d, v12.2d\n"
+ "prfm pstl1keep, [x28, #0x0]\n"
"uzp1 v9.2d, v9.2d, v13.2d\n"
- "prfm pstl1keep, [x9, #0x0]\n"
+ "cmp x10, #0x10\n"
"uzp1 v10.2d, v10.2d, v14.2d\n"
"uzp1 v11.2d, v11.2d, v15.2d\n"
"bge 36f\n"
- "tbz x11, #3, 31f\n"
- "st1 { v8.4s }, [x9], #0x10\n"
- "st1 { v9.4s }, [x9], #0x10\n"
- "tbz x11, #2, 29f\n"
- "st1 { v10.4s }, [x9], #0x10\n"
- "tbz x11, #1, 28f\n"
- "str d11, [x9], #0x8\n"
- "tbz x11, #0, 35f\n"
- "st1 { v11.s }[2], [x9]\n"
+ "tbz x10, #3, 31f\n"
+ "st1 { v8.4s }, [x28], #0x10\n"
+ "st1 { v9.4s }, [x28], #0x10\n"
+ "tbz x10, #2, 29f\n"
+ "st1 { v10.4s }, [x28], #0x10\n"
+ "tbz x10, #1, 28f\n"
+ "str d11, [x28], #0x8\n"
+ "tbz x10, #0, 35f\n"
+ "st1 { v11.s }[2], [x28]\n"
"b 35f\n"
"28:" // Height 1: Partial direct writeback: partial_1_12
- "tbz x11, #0, 35f\n"
- "str s11, [x9, #0x0]\n"
+ "tbz x10, #0, 35f\n"
+ "str s11, [x28, #0x0]\n"
"b 35f\n"
"29:" // Height 1: Partial direct writeback: partial_2_8
- "tbz x11, #1, 30f\n"
- "str d10, [x9], #0x8\n"
- "tbz x11, #0, 35f\n"
- "st1 { v10.s }[2], [x9]\n"
+ "tbz x10, #1, 30f\n"
+ "str d10, [x28], #0x8\n"
+ "tbz x10, #0, 35f\n"
+ "st1 { v10.s }[2], [x28]\n"
"b 35f\n"
"30:" // Height 1: Partial direct writeback: partial_1_8
- "tbz x11, #0, 35f\n"
- "str s10, [x9, #0x0]\n"
+ "tbz x10, #0, 35f\n"
+ "str s10, [x28, #0x0]\n"
"b 35f\n"
"31:" // Height 1: Partial direct writeback: partial_4_0
- "tbz x11, #2, 33f\n"
- "st1 { v8.4s }, [x9], #0x10\n"
- "tbz x11, #1, 32f\n"
- "str d9, [x9], #0x8\n"
- "tbz x11, #0, 35f\n"
- "st1 { v9.s }[2], [x9]\n"
+ "tbz x10, #2, 33f\n"
+ "st1 { v8.4s }, [x28], #0x10\n"
+ "tbz x10, #1, 32f\n"
+ "str d9, [x28], #0x8\n"
+ "tbz x10, #0, 35f\n"
+ "st1 { v9.s }[2], [x28]\n"
"b 35f\n"
"32:" // Height 1: Partial direct writeback: partial_1_4
- "tbz x11, #0, 35f\n"
- "str s9, [x9, #0x0]\n"
+ "tbz x10, #0, 35f\n"
+ "str s9, [x28, #0x0]\n"
"b 35f\n"
"33:" // Height 1: Partial direct writeback: partial_2_0
- "tbz x11, #1, 34f\n"
- "str d8, [x9], #0x8\n"
- "tbz x11, #0, 35f\n"
- "st1 { v8.s }[2], [x9]\n"
+ "tbz x10, #1, 34f\n"
+ "str d8, [x28], #0x8\n"
+ "tbz x10, #0, 35f\n"
+ "st1 { v8.s }[2], [x28]\n"
"b 35f\n"
"34:" // Height 1: Partial direct writeback: partial_1_0
- "str s8, [x9, #0x0]\n"
+ "str s8, [x28, #0x0]\n"
"35:" // Height 1: Partial direct writeback: Done
"b 37f\n"
"36:" // Height 1: Full writeback
- "str q8, [x9, #0x0]\n"
- "str q9, [x9, #0x10]\n"
- "str q10, [x9, #0x20]\n"
- "str q11, [x9, #0x30]\n"
- "add x9, x9, #0x40\n"
+ "str q8, [x28, #0x0]\n"
+ "str q9, [x28, #0x10]\n"
+ "str q10, [x28, #0x20]\n"
+ "str q11, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
"37:" // Height 1: Writeback done
- "subs x11, x11, #0x10\n"
+ "subs x10, x10, #0x10\n"
"bgt 2b\n"
"b 224f\n"
"38:" // Height 2
- "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x28, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"39:" // Height 2: Column loop
"tbz %x[flags], #0, 50f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "cmp x11, #0x10\n"
- "add x24, x9, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "cmp x10, #0x10\n"
+ "add x23, x28, x19, LSL #2\n"
"bge 48f\n"
- "tbz x11, #3, 43f\n"
- "ld1 { v9.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x24], #0x10\n"
- "ld1 { v10.4s }, [x9], #0x10\n"
- "ld1 { v13.4s }, [x24], #0x10\n"
- "tbz x11, #2, 41f\n"
- "ld1 { v11.4s }, [x9], #0x10\n"
- "ld1 { v14.4s }, [x24], #0x10\n"
- "tbz x11, #1, 40f\n"
- "ldr d16, [x9], #0x8\n"
- "ldr d15, [x24], #0x8\n"
- "mov x25, #0x38\n"
- "tbz x11, #0, 47f\n"
- "ld1 { v16.s }[2], [x9]\n"
- "ld1 { v15.s }[2], [x24]\n"
+ "tbz x10, #3, 43f\n"
+ "ld1 { v9.4s }, [x28], #0x10\n"
+ "ld1 { v12.4s }, [x23], #0x10\n"
+ "ld1 { v10.4s }, [x28], #0x10\n"
+ "ld1 { v13.4s }, [x23], #0x10\n"
+ "tbz x10, #2, 41f\n"
+ "ld1 { v11.4s }, [x28], #0x10\n"
+ "ld1 { v14.4s }, [x23], #0x10\n"
+ "tbz x10, #1, 40f\n"
+ "mov x24, #0x38\n"
+ "ldr d16, [x28], #0x8\n"
+ "ldr d15, [x23], #0x8\n"
+ "tbz x10, #0, 47f\n"
+ "ld1 { v16.s }[2], [x28]\n"
+ "ld1 { v15.s }[2], [x23]\n"
"b 47f\n"
"40:" // Height 2: Partial accumulate: partial_1_12
- "mov x25, #0x30\n"
- "tbz x11, #0, 47f\n"
- "ldr s16, [x9, #0x0]\n"
- "ldr s15, [x24, #0x0]\n"
+ "mov x24, #0x30\n"
+ "tbz x10, #0, 47f\n"
+ "ldr s16, [x28, #0x0]\n"
+ "ldr s15, [x23, #0x0]\n"
"b 47f\n"
"41:" // Height 2: Partial accumulate: partial_2_8
- "tbz x11, #1, 42f\n"
- "ldr d11, [x9], #0x8\n"
- "ldr d14, [x24], #0x8\n"
- "mov x25, #0x28\n"
- "tbz x11, #0, 47f\n"
- "ld1 { v11.s }[2], [x9]\n"
- "ld1 { v14.s }[2], [x24]\n"
+ "tbz x10, #1, 42f\n"
+ "ldr d11, [x28], #0x8\n"
+ "ldr d14, [x23], #0x8\n"
+ "mov x24, #0x28\n"
+ "tbz x10, #0, 47f\n"
+ "ld1 { v11.s }[2], [x28]\n"
+ "ld1 { v14.s }[2], [x23]\n"
"b 47f\n"
"42:" // Height 2: Partial accumulate: partial_1_8
- "mov x25, #0x20\n"
- "tbz x11, #0, 47f\n"
- "ldr s11, [x9, #0x0]\n"
- "ldr s14, [x24, #0x0]\n"
+ "mov x24, #0x20\n"
+ "tbz x10, #0, 47f\n"
+ "ldr s11, [x28, #0x0]\n"
+ "ldr s14, [x23, #0x0]\n"
"b 47f\n"
"43:" // Height 2: Partial accumulate: partial_4_0
- "tbz x11, #2, 45f\n"
- "ld1 { v9.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x24], #0x10\n"
- "tbz x11, #1, 44f\n"
- "ldr d10, [x9], #0x8\n"
- "ldr d13, [x24], #0x8\n"
- "mov x25, #0x18\n"
- "tbz x11, #0, 47f\n"
- "ld1 { v10.s }[2], [x9]\n"
- "ld1 { v13.s }[2], [x24]\n"
+ "tbz x10, #2, 45f\n"
+ "ld1 { v9.4s }, [x28], #0x10\n"
+ "ld1 { v12.4s }, [x23], #0x10\n"
+ "tbz x10, #1, 44f\n"
+ "mov x24, #0x18\n"
+ "ldr d10, [x28], #0x8\n"
+ "ldr d13, [x23], #0x8\n"
+ "tbz x10, #0, 47f\n"
+ "ld1 { v10.s }[2], [x28]\n"
+ "ld1 { v13.s }[2], [x23]\n"
"b 47f\n"
"44:" // Height 2: Partial accumulate: partial_1_4
- "mov x25, #0x10\n"
- "tbz x11, #0, 47f\n"
- "ldr s10, [x9, #0x0]\n"
- "ldr s13, [x24, #0x0]\n"
+ "mov x24, #0x10\n"
+ "tbz x10, #0, 47f\n"
+ "ldr s10, [x28, #0x0]\n"
+ "ldr s13, [x23, #0x0]\n"
"b 47f\n"
"45:" // Height 2: Partial accumulate: partial_2_0
- "tbz x11, #1, 46f\n"
- "ldr d9, [x9], #0x8\n"
- "ldr d12, [x24], #0x8\n"
- "mov x25, #0x8\n"
- "tbz x11, #0, 47f\n"
- "ld1 { v9.s }[2], [x9]\n"
- "ld1 { v12.s }[2], [x24]\n"
+ "tbz x10, #1, 46f\n"
+ "ldr d9, [x28], #0x8\n"
+ "ldr d12, [x23], #0x8\n"
+ "mov x24, #0x8\n"
+ "tbz x10, #0, 47f\n"
+ "ld1 { v9.s }[2], [x28]\n"
+ "ld1 { v12.s }[2], [x23]\n"
"b 47f\n"
"46:" // Height 2: Partial accumulate: partial_1_0
- "ldr s9, [x9, #0x0]\n"
- "ldr s12, [x24, #0x0]\n"
- "mov x25, #0x0\n"
+ "ldr s9, [x28, #0x0]\n"
+ "mov x24, #0x0\n"
+ "ldr s12, [x23, #0x0]\n"
"47:" // Height 2: Partial accumulate: Done
- "sub x9, x9, x25\n"
+ "sub x28, x28, x24\n"
"b 49f\n"
"48:" // Height 2: full accumulate
- "ldr q9, [x9, #0x0]\n"
- "ldr q10, [x9, #0x10]\n"
- "ldr q11, [x9, #0x20]\n"
- "ldr q16, [x9, #0x30]\n"
- "ldr q12, [x24, #0x0]\n"
- "ldr q13, [x24, #0x10]\n"
- "ldr q14, [x24, #0x20]\n"
- "ldr q15, [x24, #0x30]\n"
+ "ldr q9, [x28, #0x0]\n"
+ "ldr q10, [x28, #0x10]\n"
+ "ldr q11, [x28, #0x20]\n"
+ "ldr q16, [x28, #0x30]\n"
+ "ldr q12, [x23, #0x0]\n"
+ "ldr q13, [x23, #0x10]\n"
+ "ldr q14, [x23, #0x20]\n"
+ "ldr q15, [x23, #0x30]\n"
"49:" // Height 2: MMLA fixup
"zip1 v8.2d, v9.2d, v12.2d\n"
"zip2 v12.2d, v9.2d, v12.2d\n"
@@ -521,398 +525,398 @@ void a64_hybrid_s8s32_mmla_6x16 (
"movi v14.4s, #0x0\n"
"movi v15.4s, #0x0\n"
"51:" // Height 2: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"52:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 53f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "cbnz x28, 54f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20\n"
- "add x25, x25, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "cbnz x27, 54f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
+ "add x24, x24, x19\n"
"b 54f\n"
"53:" // Height 2: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19\n"
"54:" // Height 2: input setup done
- "cmp x27, #0x10\n"
+ "cmp x26, #0x10\n"
"blt 57f\n"
- "ldr q1, [x26, #0x0]\n"
- "ldr q2, [x25, #0x0]\n"
- "cmp x27, #0x20\n"
- "ldr q7, [x10, #0x0]\n"
- "ldr q6, [x10, #0x10]\n"
+ "ldr q1, [x25, #0x0]\n"
+ "ldr q2, [x24, #0x0]\n"
+ "cmp x26, #0x20\n"
"blt 56f\n"
"55:" // Height 2: Multiply loop: Main loop head
"trn1 v0.2d, v1.2d, v2.2d\n"
+ "ldr q7, [x9, #0x0]\n"
+ "add x25, x25, #0x10\n"
+ "trn2 v1.2d, v1.2d, v2.2d\n"
+ "ldr q6, [x9, #0x10]\n"
+ "add x24, x24, #0x10\n"
".inst 0x4e87a408 // smmla v8.4s, v0.16b, v7.16b\n"
- "ldr q7, [x10, #0x20]\n"
+ "ldr q7, [x9, #0x20]\n"
+ "sub x26, x26, #0x10\n"
".inst 0x4e86a40c // smmla v12.4s, v0.16b, v6.16b\n"
- "ldr q6, [x10, #0x30]\n"
+ "ldr q6, [x9, #0x30]\n"
+ "cmp x26, #0x20\n"
".inst 0x4e87a409 // smmla v9.4s, v0.16b, v7.16b\n"
- "ldr q7, [x10, #0x40]\n"
+ "ldr q7, [x9, #0x40]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x4e86a40d // smmla v13.4s, v0.16b, v6.16b\n"
- "ldr q6, [x10, #0x50]\n"
+ "ldr q6, [x9, #0x50]\n"
".inst 0x4e87a40a // smmla v10.4s, v0.16b, v7.16b\n"
- "ldr q7, [x10, #0x60]\n"
+ "ldr q7, [x9, #0x60]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x4e86a40e // smmla v14.4s, v0.16b, v6.16b\n"
- "ldr q6, [x10, #0x70]\n"
- "trn2 v1.2d, v1.2d, v2.2d\n"
+ "ldr q6, [x9, #0x70]\n"
".inst 0x4e87a40b // smmla v11.4s, v0.16b, v7.16b\n"
- "ldr q7, [x10, #0x80]\n"
+ "ldr q7, [x9, #0x80]\n"
+ "ldr q2, [x24, #0x0]\n"
".inst 0x4e86a40f // smmla v15.4s, v0.16b, v6.16b\n"
- "ldr q6, [x10, #0x90]\n"
+ "ldr q6, [x9, #0x90]\n"
".inst 0x4e87a428 // smmla v8.4s, v1.16b, v7.16b\n"
- "ldr q7, [x10, #0xa0]\n"
+ "ldr q7, [x9, #0xa0]\n"
".inst 0x4e86a42c // smmla v12.4s, v1.16b, v6.16b\n"
- "ldr q6, [x10, #0xb0]\n"
+ "ldr q6, [x9, #0xb0]\n"
".inst 0x4e87a429 // smmla v9.4s, v1.16b, v7.16b\n"
- "ldr q7, [x10, #0xc0]\n"
+ "ldr q7, [x9, #0xc0]\n"
".inst 0x4e86a42d // smmla v13.4s, v1.16b, v6.16b\n"
- "ldr q6, [x10, #0xd0]\n"
+ "ldr q6, [x9, #0xd0]\n"
".inst 0x4e87a42a // smmla v10.4s, v1.16b, v7.16b\n"
- "ldr q7, [x10, #0xe0]\n"
+ "ldr q7, [x9, #0xe0]\n"
".inst 0x4e86a42e // smmla v14.4s, v1.16b, v6.16b\n"
- "ldr q6, [x10, #0xf0]\n"
- "sub x27, x27, #0x10\n"
- "add x26, x26, #0x10\n"
- "add x25, x25, #0x10\n"
- "ldr q2, [x25, #0x0]\n"
- "cmp x27, #0x20\n"
+ "ldr q6, [x9, #0xf0]\n"
+ "add x9, x9, #0x100\n"
".inst 0x4e87a42b // smmla v11.4s, v1.16b, v7.16b\n"
- "add x10, x10, #0x100\n"
- "ldr q7, [x10, #0x0]\n"
".inst 0x4e86a42f // smmla v15.4s, v1.16b, v6.16b\n"
- "ldr q1, [x26, #0x0]\n"
- "ldr q6, [x10, #0x10]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
+ "ldr q1, [x25, #0x0]\n"
"bge 55b\n"
"56:" // Height 2: Multiply loop: Single iteration only
"trn1 v0.2d, v1.2d, v2.2d\n"
+ "ldr q7, [x9, #0x0]\n"
+ "sub x26, x26, #0x10\n"
+ "trn2 v1.2d, v1.2d, v2.2d\n"
+ "ldr q6, [x9, #0x10]\n"
+ "add x25, x25, #0x10\n"
".inst 0x4e87a408 // smmla v8.4s, v0.16b, v7.16b\n"
- "ldr q7, [x10, #0x20]\n"
+ "ldr q7, [x9, #0x20]\n"
+ "add x24, x24, #0x10\n"
".inst 0x4e86a40c // smmla v12.4s, v0.16b, v6.16b\n"
- "ldr q6, [x10, #0x30]\n"
+ "ldr q6, [x9, #0x30]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x4e87a409 // smmla v9.4s, v0.16b, v7.16b\n"
- "ldr q7, [x10, #0x40]\n"
+ "ldr q7, [x9, #0x40]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x4e86a40d // smmla v13.4s, v0.16b, v6.16b\n"
- "ldr q6, [x10, #0x50]\n"
+ "ldr q6, [x9, #0x50]\n"
".inst 0x4e87a40a // smmla v10.4s, v0.16b, v7.16b\n"
- "ldr q7, [x10, #0x60]\n"
+ "ldr q7, [x9, #0x60]\n"
".inst 0x4e86a40e // smmla v14.4s, v0.16b, v6.16b\n"
- "ldr q6, [x10, #0x70]\n"
- "trn2 v1.2d, v1.2d, v2.2d\n"
+ "ldr q6, [x9, #0x70]\n"
".inst 0x4e87a40b // smmla v11.4s, v0.16b, v7.16b\n"
- "ldr q7, [x10, #0x80]\n"
+ "ldr q7, [x9, #0x80]\n"
".inst 0x4e86a40f // smmla v15.4s, v0.16b, v6.16b\n"
- "ldr q6, [x10, #0x90]\n"
+ "ldr q6, [x9, #0x90]\n"
".inst 0x4e87a428 // smmla v8.4s, v1.16b, v7.16b\n"
- "ldr q7, [x10, #0xa0]\n"
+ "ldr q7, [x9, #0xa0]\n"
".inst 0x4e86a42c // smmla v12.4s, v1.16b, v6.16b\n"
- "ldr q6, [x10, #0xb0]\n"
+ "ldr q6, [x9, #0xb0]\n"
".inst 0x4e87a429 // smmla v9.4s, v1.16b, v7.16b\n"
- "ldr q7, [x10, #0xc0]\n"
+ "ldr q7, [x9, #0xc0]\n"
".inst 0x4e86a42d // smmla v13.4s, v1.16b, v6.16b\n"
- "ldr q6, [x10, #0xd0]\n"
+ "ldr q6, [x9, #0xd0]\n"
".inst 0x4e87a42a // smmla v10.4s, v1.16b, v7.16b\n"
- "ldr q7, [x10, #0xe0]\n"
+ "ldr q7, [x9, #0xe0]\n"
".inst 0x4e86a42e // smmla v14.4s, v1.16b, v6.16b\n"
- "ldr q6, [x10, #0xf0]\n"
- "add x26, x26, #0x10\n"
- "add x25, x25, #0x10\n"
+ "ldr q6, [x9, #0xf0]\n"
+ "add x9, x9, #0x100\n"
".inst 0x4e87a42b // smmla v11.4s, v1.16b, v7.16b\n"
".inst 0x4e86a42f // smmla v15.4s, v1.16b, v6.16b\n"
- "sub x27, x27, #0x10\n"
- "prfm pldl1keep, [x26, #0x80]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
- "add x10, x10, #0x100\n"
"57:" // Height 2: Multiply loop: Main loop skip
- "cbz x27, 64f\n"
- "cmp x27, #0x8\n"
+ "cbz x26, 64f\n"
+ "cmp x26, #0x8\n"
"blt 59f\n"
"58:" // Height 2: Multiply loop: Odd block loop
- "ldr d1, [x26], #0x8\n"
- "ldr d2, [x25], #0x8\n"
+ "ldr d1, [x25], #0x8\n"
+ "sub x26, x26, #0x8\n"
+ "ldr d2, [x24], #0x8\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
- "sub x27, x27, #0x8\n"
- "ldr q6, [x10, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
+ "ldr q6, [x9, #0x0]\n"
+ "cmp x26, #0x8\n"
".inst 0x4e86a408 // smmla v8.4s, v0.16b, v6.16b\n"
+ "ldr q7, [x9, #0x10]\n"
+ "ldr q6, [x9, #0x20]\n"
".inst 0x4e87a40c // smmla v12.4s, v0.16b, v7.16b\n"
- "ldr q6, [x10, #0x20]\n"
- "ldr q7, [x10, #0x30]\n"
+ "ldr q7, [x9, #0x30]\n"
".inst 0x4e86a409 // smmla v9.4s, v0.16b, v6.16b\n"
+ "ldr q6, [x9, #0x40]\n"
".inst 0x4e87a40d // smmla v13.4s, v0.16b, v7.16b\n"
- "ldr q6, [x10, #0x40]\n"
- "ldr q7, [x10, #0x50]\n"
+ "ldr q7, [x9, #0x50]\n"
".inst 0x4e86a40a // smmla v10.4s, v0.16b, v6.16b\n"
+ "ldr q6, [x9, #0x60]\n"
".inst 0x4e87a40e // smmla v14.4s, v0.16b, v7.16b\n"
- "ldr q6, [x10, #0x60]\n"
- "ldr q7, [x10, #0x70]\n"
- "cmp x27, #0x8\n"
+ "ldr q7, [x9, #0x70]\n"
+ "add x9, x9, #0x80\n"
".inst 0x4e86a40b // smmla v11.4s, v0.16b, v6.16b\n"
".inst 0x4e87a40f // smmla v15.4s, v0.16b, v7.16b\n"
- "add x10, x10, #0x80\n"
"bge 58b\n"
+ "cbz x26, 64f\n"
"59:" // Height 2: Multiply loop: Skip odd blocks
- "cbz x27, 64f\n"
- "tbz x27, #2, 61f\n"
- "ldr s1, [x26], #0x4\n"
- "ldr s2, [x25], #0x4\n"
- "tbz x27, #1, 60f\n"
- "ld1 { v1.h }[2], [x26], #0x2\n"
- "ld1 { v2.h }[2], [x25], #0x2\n"
- "tbz x27, #0, 63f\n"
- "ld1 { v1.b }[6], [x26]\n"
- "ld1 { v2.b }[6], [x25]\n"
+ "tbz x26, #2, 61f\n"
+ "ldr s1, [x25], #0x4\n"
+ "ldr s2, [x24], #0x4\n"
+ "tbz x26, #1, 60f\n"
+ "ld1 { v1.h }[2], [x25], #0x2\n"
+ "ld1 { v2.h }[2], [x24], #0x2\n"
+ "tbz x26, #0, 63f\n"
+ "ld1 { v1.b }[6], [x25]\n"
+ "ld1 { v2.b }[6], [x24]\n"
"b 63f\n"
"60:" // Height 2: Multiply loop: Ragged operand read: partial_1_4
- "tbz x27, #0, 63f\n"
- "ld1 { v1.b }[4], [x26]\n"
- "ld1 { v2.b }[4], [x25]\n"
+ "tbz x26, #0, 63f\n"
+ "ld1 { v1.b }[4], [x25]\n"
+ "ld1 { v2.b }[4], [x24]\n"
"b 63f\n"
"61:" // Height 2: Multiply loop: Ragged operand read: partial_2_0
- "tbz x27, #1, 62f\n"
- "ldr h1, [x26], #0x2\n"
- "ldr h2, [x25], #0x2\n"
- "tbz x27, #0, 63f\n"
- "ld1 { v1.b }[2], [x26]\n"
- "ld1 { v2.b }[2], [x25]\n"
+ "tbz x26, #1, 62f\n"
+ "ldr h1, [x25], #0x2\n"
+ "ldr h2, [x24], #0x2\n"
+ "tbz x26, #0, 63f\n"
+ "ld1 { v1.b }[2], [x25]\n"
+ "ld1 { v2.b }[2], [x24]\n"
"b 63f\n"
"62:" // Height 2: Multiply loop: Ragged operand read: partial_1_0
- "ldr b1, [x26, #0x0]\n"
- "ldr b2, [x25, #0x0]\n"
+ "ldr b1, [x25, #0x0]\n"
+ "ldr b2, [x24, #0x0]\n"
"63:" // Height 2: Multiply loop: Ragged operand read: Done
- "ldr q7, [x10, #0x0]\n"
- "ldr q6, [x10, #0x10]\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
+ "ldr q7, [x9, #0x0]\n"
+ "ldr q6, [x9, #0x10]\n"
".inst 0x4e87a408 // smmla v8.4s, v0.16b, v7.16b\n"
- "ldr q7, [x10, #0x20]\n"
+ "ldr q7, [x9, #0x20]\n"
".inst 0x4e86a40c // smmla v12.4s, v0.16b, v6.16b\n"
- "ldr q6, [x10, #0x30]\n"
+ "ldr q6, [x9, #0x30]\n"
".inst 0x4e87a409 // smmla v9.4s, v0.16b, v7.16b\n"
- "ldr q7, [x10, #0x40]\n"
+ "ldr q7, [x9, #0x40]\n"
".inst 0x4e86a40d // smmla v13.4s, v0.16b, v6.16b\n"
- "ldr q6, [x10, #0x50]\n"
+ "ldr q6, [x9, #0x50]\n"
".inst 0x4e87a40a // smmla v10.4s, v0.16b, v7.16b\n"
- "ldr q7, [x10, #0x60]\n"
+ "ldr q7, [x9, #0x60]\n"
".inst 0x4e86a40e // smmla v14.4s, v0.16b, v6.16b\n"
- "ldr q6, [x10, #0x70]\n"
+ "ldr q6, [x9, #0x70]\n"
+ "add x9, x9, #0x80\n"
".inst 0x4e87a40b // smmla v11.4s, v0.16b, v7.16b\n"
".inst 0x4e86a40f // smmla v15.4s, v0.16b, v6.16b\n"
- "add x10, x10, #0x80\n"
"64:" // Height 2: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 52b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "cmp x11, #0x10\n"
"uzp1 v7.2d, v8.2d, v12.2d\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp2 v8.2d, v8.2d, v12.2d\n"
+ "prfm pstl1keep, [x28, #0x0]\n"
+ "cmp x10, #0x10\n"
"uzp1 v12.2d, v9.2d, v13.2d\n"
- "prfm pstl1keep, [x9, #0x0]\n"
- "prfm pstl1keep, [x24, #0x0]\n"
+ "add x23, x28, x19, LSL #2\n"
"uzp2 v9.2d, v9.2d, v13.2d\n"
+ "prfm pstl1keep, [x23, #0x0]\n"
"uzp1 v13.2d, v10.2d, v14.2d\n"
"uzp2 v10.2d, v10.2d, v14.2d\n"
"uzp1 v14.2d, v11.2d, v15.2d\n"
"uzp2 v11.2d, v11.2d, v15.2d\n"
"bge 73f\n"
- "tbz x11, #3, 68f\n"
- "st1 { v7.4s }, [x9], #0x10\n"
- "st1 { v12.4s }, [x9], #0x10\n"
- "st1 { v8.4s }, [x24], #0x10\n"
- "st1 { v9.4s }, [x24], #0x10\n"
- "tbz x11, #2, 66f\n"
- "st1 { v13.4s }, [x9], #0x10\n"
- "st1 { v10.4s }, [x24], #0x10\n"
- "tbz x11, #1, 65f\n"
- "str d14, [x9], #0x8\n"
- "str d11, [x24], #0x8\n"
- "tbz x11, #0, 72f\n"
- "st1 { v14.s }[2], [x9]\n"
- "st1 { v11.s }[2], [x24]\n"
+ "tbz x10, #3, 68f\n"
+ "st1 { v7.4s }, [x28], #0x10\n"
+ "st1 { v12.4s }, [x28], #0x10\n"
+ "st1 { v8.4s }, [x23], #0x10\n"
+ "st1 { v9.4s }, [x23], #0x10\n"
+ "tbz x10, #2, 66f\n"
+ "st1 { v13.4s }, [x28], #0x10\n"
+ "st1 { v10.4s }, [x23], #0x10\n"
+ "tbz x10, #1, 65f\n"
+ "str d14, [x28], #0x8\n"
+ "str d11, [x23], #0x8\n"
+ "tbz x10, #0, 72f\n"
+ "st1 { v14.s }[2], [x28]\n"
+ "st1 { v11.s }[2], [x23]\n"
"b 72f\n"
"65:" // Height 2: Partial direct writeback: partial_1_12
- "tbz x11, #0, 72f\n"
- "str s14, [x9, #0x0]\n"
- "str s11, [x24, #0x0]\n"
+ "tbz x10, #0, 72f\n"
+ "str s14, [x28, #0x0]\n"
+ "str s11, [x23, #0x0]\n"
"b 72f\n"
"66:" // Height 2: Partial direct writeback: partial_2_8
- "tbz x11, #1, 67f\n"
- "str d13, [x9], #0x8\n"
- "str d10, [x24], #0x8\n"
- "tbz x11, #0, 72f\n"
- "st1 { v13.s }[2], [x9]\n"
- "st1 { v10.s }[2], [x24]\n"
+ "tbz x10, #1, 67f\n"
+ "str d13, [x28], #0x8\n"
+ "str d10, [x23], #0x8\n"
+ "tbz x10, #0, 72f\n"
+ "st1 { v13.s }[2], [x28]\n"
+ "st1 { v10.s }[2], [x23]\n"
"b 72f\n"
"67:" // Height 2: Partial direct writeback: partial_1_8
- "tbz x11, #0, 72f\n"
- "str s13, [x9, #0x0]\n"
- "str s10, [x24, #0x0]\n"
+ "tbz x10, #0, 72f\n"
+ "str s13, [x28, #0x0]\n"
+ "str s10, [x23, #0x0]\n"
"b 72f\n"
"68:" // Height 2: Partial direct writeback: partial_4_0
- "tbz x11, #2, 70f\n"
- "st1 { v7.4s }, [x9], #0x10\n"
- "st1 { v8.4s }, [x24], #0x10\n"
- "tbz x11, #1, 69f\n"
- "str d12, [x9], #0x8\n"
- "str d9, [x24], #0x8\n"
- "tbz x11, #0, 72f\n"
- "st1 { v12.s }[2], [x9]\n"
- "st1 { v9.s }[2], [x24]\n"
+ "tbz x10, #2, 70f\n"
+ "st1 { v7.4s }, [x28], #0x10\n"
+ "st1 { v8.4s }, [x23], #0x10\n"
+ "tbz x10, #1, 69f\n"
+ "str d12, [x28], #0x8\n"
+ "str d9, [x23], #0x8\n"
+ "tbz x10, #0, 72f\n"
+ "st1 { v12.s }[2], [x28]\n"
+ "st1 { v9.s }[2], [x23]\n"
"b 72f\n"
"69:" // Height 2: Partial direct writeback: partial_1_4
- "tbz x11, #0, 72f\n"
- "str s12, [x9, #0x0]\n"
- "str s9, [x24, #0x0]\n"
+ "tbz x10, #0, 72f\n"
+ "str s12, [x28, #0x0]\n"
+ "str s9, [x23, #0x0]\n"
"b 72f\n"
"70:" // Height 2: Partial direct writeback: partial_2_0
- "tbz x11, #1, 71f\n"
- "str d7, [x9], #0x8\n"
- "str d8, [x24], #0x8\n"
- "tbz x11, #0, 72f\n"
- "st1 { v7.s }[2], [x9]\n"
- "st1 { v8.s }[2], [x24]\n"
+ "tbz x10, #1, 71f\n"
+ "str d7, [x28], #0x8\n"
+ "str d8, [x23], #0x8\n"
+ "tbz x10, #0, 72f\n"
+ "st1 { v7.s }[2], [x28]\n"
+ "st1 { v8.s }[2], [x23]\n"
"b 72f\n"
"71:" // Height 2: Partial direct writeback: partial_1_0
- "str s7, [x9, #0x0]\n"
- "str s8, [x24, #0x0]\n"
+ "str s7, [x28, #0x0]\n"
+ "str s8, [x23, #0x0]\n"
"72:" // Height 2: Partial direct writeback: Done
"b 74f\n"
"73:" // Height 2: Full writeback
- "str q7, [x9, #0x0]\n"
- "str q12, [x9, #0x10]\n"
- "str q13, [x9, #0x20]\n"
- "str q14, [x9, #0x30]\n"
- "add x9, x9, #0x40\n"
- "str q8, [x24, #0x0]\n"
- "str q9, [x24, #0x10]\n"
- "str q10, [x24, #0x20]\n"
- "str q11, [x24, #0x30]\n"
+ "str q7, [x28, #0x0]\n"
+ "str q12, [x28, #0x10]\n"
+ "str q13, [x28, #0x20]\n"
+ "str q14, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
+ "str q8, [x23, #0x0]\n"
+ "str q9, [x23, #0x10]\n"
+ "str q10, [x23, #0x20]\n"
+ "str q11, [x23, #0x30]\n"
"74:" // Height 2: Writeback done
- "subs x11, x11, #0x10\n"
+ "subs x10, x10, #0x10\n"
"bgt 39b\n"
"b 224f\n"
"75:" // Height 3
- "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x28, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"76:" // Height 3: Column loop
"tbz %x[flags], #0, 87f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "cmp x11, #0x10\n"
- "add x23, x24, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "cmp x10, #0x10\n"
+ "add x23, x28, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
"bge 85f\n"
- "tbz x11, #3, 80f\n"
- "ld1 { v9.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x24], #0x10\n"
- "ld1 { v17.4s }, [x23], #0x10\n"
- "ld1 { v10.4s }, [x9], #0x10\n"
- "ld1 { v13.4s }, [x24], #0x10\n"
- "ld1 { v18.4s }, [x23], #0x10\n"
- "tbz x11, #2, 78f\n"
- "ld1 { v11.4s }, [x9], #0x10\n"
- "ld1 { v14.4s }, [x24], #0x10\n"
- "ld1 { v19.4s }, [x23], #0x10\n"
- "tbz x11, #1, 77f\n"
- "ldr d16, [x9], #0x8\n"
- "ldr d15, [x24], #0x8\n"
- "mov x25, #0x38\n"
- "ldr d24, [x23], #0x8\n"
- "tbz x11, #0, 84f\n"
- "ld1 { v16.s }[2], [x9]\n"
- "ld1 { v15.s }[2], [x24]\n"
- "ld1 { v24.s }[2], [x23]\n"
+ "tbz x10, #3, 80f\n"
+ "ld1 { v9.4s }, [x28], #0x10\n"
+ "ld1 { v12.4s }, [x23], #0x10\n"
+ "ld1 { v17.4s }, [x22], #0x10\n"
+ "ld1 { v10.4s }, [x28], #0x10\n"
+ "ld1 { v13.4s }, [x23], #0x10\n"
+ "ld1 { v18.4s }, [x22], #0x10\n"
+ "tbz x10, #2, 78f\n"
+ "ld1 { v11.4s }, [x28], #0x10\n"
+ "ld1 { v14.4s }, [x23], #0x10\n"
+ "ld1 { v19.4s }, [x22], #0x10\n"
+ "tbz x10, #1, 77f\n"
+ "mov x24, #0x38\n"
+ "ldr d16, [x28], #0x8\n"
+ "ldr d15, [x23], #0x8\n"
+ "ldr d24, [x22], #0x8\n"
+ "tbz x10, #0, 84f\n"
+ "ld1 { v16.s }[2], [x28]\n"
+ "ld1 { v15.s }[2], [x23]\n"
+ "ld1 { v24.s }[2], [x22]\n"
"b 84f\n"
"77:" // Height 3: Partial accumulate: partial_1_12
- "mov x25, #0x30\n"
- "tbz x11, #0, 84f\n"
- "ldr s16, [x9, #0x0]\n"
- "ldr s15, [x24, #0x0]\n"
- "ldr s24, [x23, #0x0]\n"
+ "mov x24, #0x30\n"
+ "tbz x10, #0, 84f\n"
+ "ldr s16, [x28, #0x0]\n"
+ "ldr s15, [x23, #0x0]\n"
+ "ldr s24, [x22, #0x0]\n"
"b 84f\n"
"78:" // Height 3: Partial accumulate: partial_2_8
- "tbz x11, #1, 79f\n"
- "ldr d11, [x9], #0x8\n"
- "ldr d14, [x24], #0x8\n"
- "mov x25, #0x28\n"
- "ldr d19, [x23], #0x8\n"
- "tbz x11, #0, 84f\n"
- "ld1 { v11.s }[2], [x9]\n"
- "ld1 { v14.s }[2], [x24]\n"
- "ld1 { v19.s }[2], [x23]\n"
+ "tbz x10, #1, 79f\n"
+ "ldr d11, [x28], #0x8\n"
+ "ldr d14, [x23], #0x8\n"
+ "mov x24, #0x28\n"
+ "ldr d19, [x22], #0x8\n"
+ "tbz x10, #0, 84f\n"
+ "ld1 { v11.s }[2], [x28]\n"
+ "ld1 { v14.s }[2], [x23]\n"
+ "ld1 { v19.s }[2], [x22]\n"
"b 84f\n"
"79:" // Height 3: Partial accumulate: partial_1_8
- "mov x25, #0x20\n"
- "tbz x11, #0, 84f\n"
- "ldr s11, [x9, #0x0]\n"
- "ldr s14, [x24, #0x0]\n"
- "ldr s19, [x23, #0x0]\n"
+ "mov x24, #0x20\n"
+ "tbz x10, #0, 84f\n"
+ "ldr s11, [x28, #0x0]\n"
+ "ldr s14, [x23, #0x0]\n"
+ "ldr s19, [x22, #0x0]\n"
"b 84f\n"
"80:" // Height 3: Partial accumulate: partial_4_0
- "tbz x11, #2, 82f\n"
- "ld1 { v9.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x24], #0x10\n"
- "ld1 { v17.4s }, [x23], #0x10\n"
- "tbz x11, #1, 81f\n"
- "ldr d10, [x9], #0x8\n"
- "ldr d13, [x24], #0x8\n"
- "mov x25, #0x18\n"
- "ldr d18, [x23], #0x8\n"
- "tbz x11, #0, 84f\n"
- "ld1 { v10.s }[2], [x9]\n"
- "ld1 { v13.s }[2], [x24]\n"
- "ld1 { v18.s }[2], [x23]\n"
+ "tbz x10, #2, 82f\n"
+ "ld1 { v9.4s }, [x28], #0x10\n"
+ "ld1 { v12.4s }, [x23], #0x10\n"
+ "ld1 { v17.4s }, [x22], #0x10\n"
+ "tbz x10, #1, 81f\n"
+ "mov x24, #0x18\n"
+ "ldr d10, [x28], #0x8\n"
+ "ldr d13, [x23], #0x8\n"
+ "ldr d18, [x22], #0x8\n"
+ "tbz x10, #0, 84f\n"
+ "ld1 { v10.s }[2], [x28]\n"
+ "ld1 { v13.s }[2], [x23]\n"
+ "ld1 { v18.s }[2], [x22]\n"
"b 84f\n"
"81:" // Height 3: Partial accumulate: partial_1_4
- "mov x25, #0x10\n"
- "tbz x11, #0, 84f\n"
- "ldr s10, [x9, #0x0]\n"
- "ldr s13, [x24, #0x0]\n"
- "ldr s18, [x23, #0x0]\n"
+ "mov x24, #0x10\n"
+ "tbz x10, #0, 84f\n"
+ "ldr s10, [x28, #0x0]\n"
+ "ldr s13, [x23, #0x0]\n"
+ "ldr s18, [x22, #0x0]\n"
"b 84f\n"
"82:" // Height 3: Partial accumulate: partial_2_0
- "tbz x11, #1, 83f\n"
- "ldr d9, [x9], #0x8\n"
- "ldr d12, [x24], #0x8\n"
- "mov x25, #0x8\n"
- "ldr d17, [x23], #0x8\n"
- "tbz x11, #0, 84f\n"
- "ld1 { v9.s }[2], [x9]\n"
- "ld1 { v12.s }[2], [x24]\n"
- "ld1 { v17.s }[2], [x23]\n"
+ "tbz x10, #1, 83f\n"
+ "ldr d9, [x28], #0x8\n"
+ "ldr d12, [x23], #0x8\n"
+ "mov x24, #0x8\n"
+ "ldr d17, [x22], #0x8\n"
+ "tbz x10, #0, 84f\n"
+ "ld1 { v9.s }[2], [x28]\n"
+ "ld1 { v12.s }[2], [x23]\n"
+ "ld1 { v17.s }[2], [x22]\n"
"b 84f\n"
"83:" // Height 3: Partial accumulate: partial_1_0
- "ldr s9, [x9, #0x0]\n"
- "ldr s12, [x24, #0x0]\n"
- "mov x25, #0x0\n"
- "ldr s17, [x23, #0x0]\n"
+ "ldr s9, [x28, #0x0]\n"
+ "mov x24, #0x0\n"
+ "ldr s12, [x23, #0x0]\n"
+ "ldr s17, [x22, #0x0]\n"
"84:" // Height 3: Partial accumulate: Done
- "sub x9, x9, x25\n"
+ "sub x28, x28, x24\n"
"b 86f\n"
"85:" // Height 3: full accumulate
- "ldr q9, [x9, #0x0]\n"
- "ldr q10, [x9, #0x10]\n"
- "ldr q11, [x9, #0x20]\n"
- "ldr q16, [x9, #0x30]\n"
- "ldr q12, [x24, #0x0]\n"
- "ldr q13, [x24, #0x10]\n"
- "ldr q14, [x24, #0x20]\n"
- "ldr q15, [x24, #0x30]\n"
- "ldr q17, [x23, #0x0]\n"
- "ldr q18, [x23, #0x10]\n"
- "ldr q19, [x23, #0x20]\n"
- "ldr q24, [x23, #0x30]\n"
+ "ldr q9, [x28, #0x0]\n"
+ "ldr q10, [x28, #0x10]\n"
+ "ldr q11, [x28, #0x20]\n"
+ "ldr q16, [x28, #0x30]\n"
+ "ldr q12, [x23, #0x0]\n"
+ "ldr q13, [x23, #0x10]\n"
+ "ldr q14, [x23, #0x20]\n"
+ "ldr q15, [x23, #0x30]\n"
+ "ldr q17, [x22, #0x0]\n"
+ "ldr q18, [x22, #0x10]\n"
+ "ldr q19, [x22, #0x20]\n"
+ "ldr q24, [x22, #0x30]\n"
"86:" // Height 3: MMLA fixup
"zip1 v8.2d, v9.2d, v12.2d\n"
"zip2 v12.2d, v9.2d, v12.2d\n"
@@ -949,281 +953,285 @@ void a64_hybrid_s8s32_mmla_6x16 (
"movi v22.4s, #0x0\n"
"movi v23.4s, #0x0\n"
"88:" // Height 3: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"89:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 90f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "cbnz x28, 91f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20\n"
- "add x25, x25, x20\n"
- "add x24, x24, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "cbnz x27, 91f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
+ "add x24, x24, x19\n"
+ "add x23, x23, x19\n"
"b 91f\n"
"90:" // Height 3: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20\n"
- "add x24, x25, x20\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19\n"
+ "add x23, x24, x19\n"
"91:" // Height 3: input setup done
- "cmp x27, #0x10\n"
+ "cmp x26, #0x10\n"
"blt 94f\n"
- "ldr q1, [x26, #0x0]\n"
- "ldr q2, [x25, #0x0]\n"
- "cmp x27, #0x20\n"
- "ldr q3, [x24, #0x0]\n"
- "ldr q7, [x10, #0x0]\n"
- "ldr q6, [x10, #0x10]\n"
+ "ldr q1, [x25, #0x0]\n"
+ "cmp x26, #0x20\n"
"blt 93f\n"
"92:" // Height 3: Multiply loop: Main loop head
+ "movi v4.16b, #0x0\n"
+ "ldr q2, [x24, #0x0]\n"
+ "add x25, x25, #0x10\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
+ "ldr q3, [x23, #0x0]\n"
+ "add x24, x24, #0x10\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
- ".inst 0x4e87a408 // smmla v8.4s, v0.16b, v7.16b\n"
+ "ldr q7, [x9, #0x0]\n"
+ "add x23, x23, #0x10\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
+ "ldr q6, [x9, #0x10]\n"
+ "sub x26, x26, #0x10\n"
+ "trn2 v3.2d, v3.2d, v4.2d\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "cmp x26, #0x20\n"
+ ".inst 0x4e87a408 // smmla v8.4s, v0.16b, v7.16b\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x4e87a450 // smmla v16.4s, v2.16b, v7.16b\n"
- "ldr q7, [x10, #0x20]\n"
+ "ldr q7, [x9, #0x20]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x4e86a40c // smmla v12.4s, v0.16b, v6.16b\n"
".inst 0x4e86a454 // smmla v20.4s, v2.16b, v6.16b\n"
- "ldr q6, [x10, #0x30]\n"
+ "ldr q6, [x9, #0x30]\n"
".inst 0x4e87a409 // smmla v9.4s, v0.16b, v7.16b\n"
- "trn2 v3.2d, v3.2d, v4.2d\n"
".inst 0x4e87a451 // smmla v17.4s, v2.16b, v7.16b\n"
- "ldr q7, [x10, #0x40]\n"
+ "ldr q7, [x9, #0x40]\n"
".inst 0x4e86a40d // smmla v13.4s, v0.16b, v6.16b\n"
- "sub x27, x27, #0x10\n"
".inst 0x4e86a455 // smmla v21.4s, v2.16b, v6.16b\n"
- "ldr q6, [x10, #0x50]\n"
+ "ldr q6, [x9, #0x50]\n"
".inst 0x4e87a40a // smmla v10.4s, v0.16b, v7.16b\n"
- "add x26, x26, #0x10\n"
".inst 0x4e87a452 // smmla v18.4s, v2.16b, v7.16b\n"
- "ldr q7, [x10, #0x60]\n"
+ "ldr q7, [x9, #0x60]\n"
".inst 0x4e86a40e // smmla v14.4s, v0.16b, v6.16b\n"
- "add x25, x25, #0x10\n"
".inst 0x4e86a456 // smmla v22.4s, v2.16b, v6.16b\n"
- "ldr q6, [x10, #0x70]\n"
+ "ldr q6, [x9, #0x70]\n"
".inst 0x4e87a40b // smmla v11.4s, v0.16b, v7.16b\n"
- "add x24, x24, #0x10\n"
".inst 0x4e87a453 // smmla v19.4s, v2.16b, v7.16b\n"
- "ldr q7, [x10, #0x80]\n"
+ "ldr q7, [x9, #0x80]\n"
".inst 0x4e86a40f // smmla v15.4s, v0.16b, v6.16b\n"
- "cmp x27, #0x20\n"
".inst 0x4e86a457 // smmla v23.4s, v2.16b, v6.16b\n"
- "ldr q6, [x10, #0x90]\n"
- "ldr q2, [x25, #0x0]\n"
+ "ldr q6, [x9, #0x90]\n"
".inst 0x4e87a428 // smmla v8.4s, v1.16b, v7.16b\n"
".inst 0x4e87a470 // smmla v16.4s, v3.16b, v7.16b\n"
- "ldr q7, [x10, #0xa0]\n"
+ "ldr q7, [x9, #0xa0]\n"
".inst 0x4e86a42c // smmla v12.4s, v1.16b, v6.16b\n"
- "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x4e86a474 // smmla v20.4s, v3.16b, v6.16b\n"
- "ldr q6, [x10, #0xb0]\n"
+ "ldr q6, [x9, #0xb0]\n"
".inst 0x4e87a429 // smmla v9.4s, v1.16b, v7.16b\n"
- "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x4e87a471 // smmla v17.4s, v3.16b, v7.16b\n"
- "ldr q7, [x10, #0xc0]\n"
+ "ldr q7, [x9, #0xc0]\n"
".inst 0x4e86a42d // smmla v13.4s, v1.16b, v6.16b\n"
- "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x4e86a475 // smmla v21.4s, v3.16b, v6.16b\n"
- "ldr q6, [x10, #0xd0]\n"
+ "ldr q6, [x9, #0xd0]\n"
".inst 0x4e87a42a // smmla v10.4s, v1.16b, v7.16b\n"
".inst 0x4e87a472 // smmla v18.4s, v3.16b, v7.16b\n"
- "ldr q7, [x10, #0xe0]\n"
+ "ldr q7, [x9, #0xe0]\n"
".inst 0x4e86a42e // smmla v14.4s, v1.16b, v6.16b\n"
".inst 0x4e86a476 // smmla v22.4s, v3.16b, v6.16b\n"
- "ldr q6, [x10, #0xf0]\n"
- "add x10, x10, #0x100\n"
+ "ldr q6, [x9, #0xf0]\n"
+ "add x9, x9, #0x100\n"
".inst 0x4e87a42b // smmla v11.4s, v1.16b, v7.16b\n"
".inst 0x4e87a473 // smmla v19.4s, v3.16b, v7.16b\n"
- "ldr q7, [x10, #0x0]\n"
".inst 0x4e86a42f // smmla v15.4s, v1.16b, v6.16b\n"
- "ldr q1, [x26, #0x0]\n"
+ "ldr q1, [x25, #0x0]\n"
".inst 0x4e86a477 // smmla v23.4s, v3.16b, v6.16b\n"
- "ldr q3, [x24, #0x0]\n"
- "ldr q6, [x10, #0x10]\n"
"bge 92b\n"
"93:" // Height 3: Multiply loop: Single iteration only
+ "movi v4.16b, #0x0\n"
+ "ldr q2, [x24, #0x0]\n"
+ "sub x26, x26, #0x10\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
+ "ldr q3, [x23, #0x0]\n"
+ "add x25, x25, #0x10\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
- ".inst 0x4e87a408 // smmla v8.4s, v0.16b, v7.16b\n"
+ "ldr q7, [x9, #0x0]\n"
+ "add x24, x24, #0x10\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
+ "ldr q6, [x9, #0x10]\n"
+ "add x23, x23, #0x10\n"
+ "trn2 v3.2d, v3.2d, v4.2d\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ ".inst 0x4e87a408 // smmla v8.4s, v0.16b, v7.16b\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x4e87a450 // smmla v16.4s, v2.16b, v7.16b\n"
- "ldr q7, [x10, #0x20]\n"
+ "ldr q7, [x9, #0x20]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x4e86a40c // smmla v12.4s, v0.16b, v6.16b\n"
".inst 0x4e86a454 // smmla v20.4s, v2.16b, v6.16b\n"
- "ldr q6, [x10, #0x30]\n"
+ "ldr q6, [x9, #0x30]\n"
".inst 0x4e87a409 // smmla v9.4s, v0.16b, v7.16b\n"
- "trn2 v3.2d, v3.2d, v4.2d\n"
".inst 0x4e87a451 // smmla v17.4s, v2.16b, v7.16b\n"
- "ldr q7, [x10, #0x40]\n"
+ "ldr q7, [x9, #0x40]\n"
".inst 0x4e86a40d // smmla v13.4s, v0.16b, v6.16b\n"
- "add x26, x26, #0x10\n"
".inst 0x4e86a455 // smmla v21.4s, v2.16b, v6.16b\n"
- "ldr q6, [x10, #0x50]\n"
+ "ldr q6, [x9, #0x50]\n"
".inst 0x4e87a40a // smmla v10.4s, v0.16b, v7.16b\n"
- "add x25, x25, #0x10\n"
".inst 0x4e87a452 // smmla v18.4s, v2.16b, v7.16b\n"
- "ldr q7, [x10, #0x60]\n"
+ "ldr q7, [x9, #0x60]\n"
".inst 0x4e86a40e // smmla v14.4s, v0.16b, v6.16b\n"
- "add x24, x24, #0x10\n"
".inst 0x4e86a456 // smmla v22.4s, v2.16b, v6.16b\n"
- "ldr q6, [x10, #0x70]\n"
+ "ldr q6, [x9, #0x70]\n"
".inst 0x4e87a40b // smmla v11.4s, v0.16b, v7.16b\n"
- "sub x27, x27, #0x10\n"
".inst 0x4e87a453 // smmla v19.4s, v2.16b, v7.16b\n"
- "ldr q7, [x10, #0x80]\n"
+ "ldr q7, [x9, #0x80]\n"
".inst 0x4e86a40f // smmla v15.4s, v0.16b, v6.16b\n"
- "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x4e86a457 // smmla v23.4s, v2.16b, v6.16b\n"
- "ldr q6, [x10, #0x90]\n"
+ "ldr q6, [x9, #0x90]\n"
".inst 0x4e87a428 // smmla v8.4s, v1.16b, v7.16b\n"
- "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x4e87a470 // smmla v16.4s, v3.16b, v7.16b\n"
- "ldr q7, [x10, #0xa0]\n"
+ "ldr q7, [x9, #0xa0]\n"
".inst 0x4e86a42c // smmla v12.4s, v1.16b, v6.16b\n"
- "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x4e86a474 // smmla v20.4s, v3.16b, v6.16b\n"
- "ldr q6, [x10, #0xb0]\n"
+ "ldr q6, [x9, #0xb0]\n"
".inst 0x4e87a429 // smmla v9.4s, v1.16b, v7.16b\n"
".inst 0x4e87a471 // smmla v17.4s, v3.16b, v7.16b\n"
- "ldr q7, [x10, #0xc0]\n"
+ "ldr q7, [x9, #0xc0]\n"
".inst 0x4e86a42d // smmla v13.4s, v1.16b, v6.16b\n"
".inst 0x4e86a475 // smmla v21.4s, v3.16b, v6.16b\n"
- "ldr q6, [x10, #0xd0]\n"
+ "ldr q6, [x9, #0xd0]\n"
".inst 0x4e87a42a // smmla v10.4s, v1.16b, v7.16b\n"
".inst 0x4e87a472 // smmla v18.4s, v3.16b, v7.16b\n"
- "ldr q7, [x10, #0xe0]\n"
+ "ldr q7, [x9, #0xe0]\n"
".inst 0x4e86a42e // smmla v14.4s, v1.16b, v6.16b\n"
".inst 0x4e86a476 // smmla v22.4s, v3.16b, v6.16b\n"
- "ldr q6, [x10, #0xf0]\n"
- "add x10, x10, #0x100\n"
+ "ldr q6, [x9, #0xf0]\n"
+ "add x9, x9, #0x100\n"
".inst 0x4e87a42b // smmla v11.4s, v1.16b, v7.16b\n"
".inst 0x4e87a473 // smmla v19.4s, v3.16b, v7.16b\n"
".inst 0x4e86a42f // smmla v15.4s, v1.16b, v6.16b\n"
".inst 0x4e86a477 // smmla v23.4s, v3.16b, v6.16b\n"
"94:" // Height 3: Multiply loop: Main loop skip
- "cbz x27, 101f\n"
- "cmp x27, #0x8\n"
+ "cbz x26, 101f\n"
+ "cmp x26, #0x8\n"
"blt 96f\n"
"95:" // Height 3: Multiply loop: Odd block loop
- "ldr d1, [x26], #0x8\n"
- "ldr d2, [x25], #0x8\n"
+ "movi v4.16b, #0x0\n"
+ "ldr d1, [x25], #0x8\n"
+ "sub x26, x26, #0x8\n"
+ "ldr d2, [x24], #0x8\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
- "ldr d3, [x24], #0x8\n"
- "ldr q6, [x10, #0x0]\n"
+ "ldr d3, [x23], #0x8\n"
+ "cmp x26, #0x8\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
+ "ldr q6, [x9, #0x0]\n"
+ "ldr q7, [x9, #0x10]\n"
".inst 0x4e86a408 // smmla v8.4s, v0.16b, v6.16b\n"
- "ldr q7, [x10, #0x10]\n"
".inst 0x4e86a450 // smmla v16.4s, v2.16b, v6.16b\n"
- "ldr q6, [x10, #0x20]\n"
+ "ldr q6, [x9, #0x20]\n"
".inst 0x4e87a40c // smmla v12.4s, v0.16b, v7.16b\n"
".inst 0x4e87a454 // smmla v20.4s, v2.16b, v7.16b\n"
- "ldr q7, [x10, #0x30]\n"
+ "ldr q7, [x9, #0x30]\n"
".inst 0x4e86a409 // smmla v9.4s, v0.16b, v6.16b\n"
- "sub x27, x27, #0x8\n"
".inst 0x4e86a451 // smmla v17.4s, v2.16b, v6.16b\n"
- "ldr q6, [x10, #0x40]\n"
+ "ldr q6, [x9, #0x40]\n"
".inst 0x4e87a40d // smmla v13.4s, v0.16b, v7.16b\n"
- "cmp x27, #0x8\n"
".inst 0x4e87a455 // smmla v21.4s, v2.16b, v7.16b\n"
- "ldr q7, [x10, #0x50]\n"
+ "ldr q7, [x9, #0x50]\n"
".inst 0x4e86a40a // smmla v10.4s, v0.16b, v6.16b\n"
".inst 0x4e86a452 // smmla v18.4s, v2.16b, v6.16b\n"
- "ldr q6, [x10, #0x60]\n"
+ "ldr q6, [x9, #0x60]\n"
".inst 0x4e87a40e // smmla v14.4s, v0.16b, v7.16b\n"
".inst 0x4e87a456 // smmla v22.4s, v2.16b, v7.16b\n"
- "ldr q7, [x10, #0x70]\n"
+ "ldr q7, [x9, #0x70]\n"
+ "add x9, x9, #0x80\n"
".inst 0x4e86a40b // smmla v11.4s, v0.16b, v6.16b\n"
- "add x10, x10, #0x80\n"
".inst 0x4e86a453 // smmla v19.4s, v2.16b, v6.16b\n"
".inst 0x4e87a40f // smmla v15.4s, v0.16b, v7.16b\n"
".inst 0x4e87a457 // smmla v23.4s, v2.16b, v7.16b\n"
"bge 95b\n"
+ "cbz x26, 101f\n"
"96:" // Height 3: Multiply loop: Skip odd blocks
- "cbz x27, 101f\n"
- "tbz x27, #2, 98f\n"
- "ldr s1, [x26], #0x4\n"
- "ldr s2, [x25], #0x4\n"
- "ldr s3, [x24], #0x4\n"
- "tbz x27, #1, 97f\n"
- "ld1 { v1.h }[2], [x26], #0x2\n"
- "ld1 { v2.h }[2], [x25], #0x2\n"
- "ld1 { v3.h }[2], [x24], #0x2\n"
- "tbz x27, #0, 100f\n"
- "ld1 { v1.b }[6], [x26]\n"
- "ld1 { v2.b }[6], [x25]\n"
- "ld1 { v3.b }[6], [x24]\n"
+ "tbz x26, #2, 98f\n"
+ "ldr s1, [x25], #0x4\n"
+ "ldr s2, [x24], #0x4\n"
+ "ldr s3, [x23], #0x4\n"
+ "tbz x26, #1, 97f\n"
+ "ld1 { v1.h }[2], [x25], #0x2\n"
+ "ld1 { v2.h }[2], [x24], #0x2\n"
+ "ld1 { v3.h }[2], [x23], #0x2\n"
+ "tbz x26, #0, 100f\n"
+ "ld1 { v1.b }[6], [x25]\n"
+ "ld1 { v2.b }[6], [x24]\n"
+ "ld1 { v3.b }[6], [x23]\n"
"b 100f\n"
"97:" // Height 3: Multiply loop: Ragged operand read: partial_1_4
- "tbz x27, #0, 100f\n"
- "ld1 { v1.b }[4], [x26]\n"
- "ld1 { v2.b }[4], [x25]\n"
- "ld1 { v3.b }[4], [x24]\n"
+ "tbz x26, #0, 100f\n"
+ "ld1 { v1.b }[4], [x25]\n"
+ "ld1 { v2.b }[4], [x24]\n"
+ "ld1 { v3.b }[4], [x23]\n"
"b 100f\n"
"98:" // Height 3: Multiply loop: Ragged operand read: partial_2_0
- "tbz x27, #1, 99f\n"
- "ldr h1, [x26], #0x2\n"
- "ldr h2, [x25], #0x2\n"
- "ldr h3, [x24], #0x2\n"
- "tbz x27, #0, 100f\n"
- "ld1 { v1.b }[2], [x26]\n"
- "ld1 { v2.b }[2], [x25]\n"
- "ld1 { v3.b }[2], [x24]\n"
+ "tbz x26, #1, 99f\n"
+ "ldr h1, [x25], #0x2\n"
+ "ldr h2, [x24], #0x2\n"
+ "ldr h3, [x23], #0x2\n"
+ "tbz x26, #0, 100f\n"
+ "ld1 { v1.b }[2], [x25]\n"
+ "ld1 { v2.b }[2], [x24]\n"
+ "ld1 { v3.b }[2], [x23]\n"
"b 100f\n"
"99:" // Height 3: Multiply loop: Ragged operand read: partial_1_0
- "ldr b1, [x26, #0x0]\n"
- "ldr b2, [x25, #0x0]\n"
- "ldr b3, [x24, #0x0]\n"
+ "ldr b1, [x25, #0x0]\n"
+ "ldr b2, [x24, #0x0]\n"
+ "ldr b3, [x23, #0x0]\n"
"100:" // Height 3: Multiply loop: Ragged operand read: Done
- "ldr q7, [x10, #0x0]\n"
- "ldr q6, [x10, #0x10]\n"
+ "movi v4.16b, #0x0\n"
+ "ldr q7, [x9, #0x0]\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
+ "ldr q6, [x9, #0x10]\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
".inst 0x4e87a408 // smmla v8.4s, v0.16b, v7.16b\n"
".inst 0x4e87a450 // smmla v16.4s, v2.16b, v7.16b\n"
- "ldr q7, [x10, #0x20]\n"
+ "ldr q7, [x9, #0x20]\n"
".inst 0x4e86a40c // smmla v12.4s, v0.16b, v6.16b\n"
".inst 0x4e86a454 // smmla v20.4s, v2.16b, v6.16b\n"
- "ldr q6, [x10, #0x30]\n"
+ "ldr q6, [x9, #0x30]\n"
".inst 0x4e87a409 // smmla v9.4s, v0.16b, v7.16b\n"
".inst 0x4e87a451 // smmla v17.4s, v2.16b, v7.16b\n"
- "ldr q7, [x10, #0x40]\n"
+ "ldr q7, [x9, #0x40]\n"
".inst 0x4e86a40d // smmla v13.4s, v0.16b, v6.16b\n"
".inst 0x4e86a455 // smmla v21.4s, v2.16b, v6.16b\n"
- "ldr q6, [x10, #0x50]\n"
+ "ldr q6, [x9, #0x50]\n"
".inst 0x4e87a40a // smmla v10.4s, v0.16b, v7.16b\n"
".inst 0x4e87a452 // smmla v18.4s, v2.16b, v7.16b\n"
- "ldr q7, [x10, #0x60]\n"
+ "ldr q7, [x9, #0x60]\n"
".inst 0x4e86a40e // smmla v14.4s, v0.16b, v6.16b\n"
".inst 0x4e86a456 // smmla v22.4s, v2.16b, v6.16b\n"
- "ldr q6, [x10, #0x70]\n"
- "add x10, x10, #0x80\n"
+ "ldr q6, [x9, #0x70]\n"
+ "add x9, x9, #0x80\n"
".inst 0x4e87a40b // smmla v11.4s, v0.16b, v7.16b\n"
".inst 0x4e87a453 // smmla v19.4s, v2.16b, v7.16b\n"
".inst 0x4e86a40f // smmla v15.4s, v0.16b, v6.16b\n"
".inst 0x4e86a457 // smmla v23.4s, v2.16b, v6.16b\n"
"101:" // Height 3: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 89b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
"uzp1 v7.2d, v8.2d, v12.2d\n"
- "cmp x11, #0x10\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp2 v8.2d, v8.2d, v12.2d\n"
+ "prfm pstl1keep, [x28, #0x0]\n"
+ "cmp x10, #0x10\n"
"uzp1 v12.2d, v9.2d, v13.2d\n"
- "prfm pstl1keep, [x9, #0x0]\n"
+ "add x23, x28, x19, LSL #2\n"
"uzp2 v9.2d, v9.2d, v13.2d\n"
- "uzp1 v13.2d, v10.2d, v14.2d\n"
- "prfm pstl1keep, [x24, #0x0]\n"
"prfm pstl1keep, [x23, #0x0]\n"
+ "uzp1 v13.2d, v10.2d, v14.2d\n"
+ "add x22, x23, x19, LSL #2\n"
"uzp2 v10.2d, v10.2d, v14.2d\n"
+ "prfm pstl1keep, [x22, #0x0]\n"
"uzp1 v14.2d, v11.2d, v15.2d\n"
"uzp2 v11.2d, v11.2d, v15.2d\n"
"uzp1 v16.2d, v16.2d, v20.2d\n"
@@ -1231,234 +1239,234 @@ void a64_hybrid_s8s32_mmla_6x16 (
"uzp1 v18.2d, v18.2d, v22.2d\n"
"uzp1 v19.2d, v19.2d, v23.2d\n"
"bge 110f\n"
- "tbz x11, #3, 105f\n"
- "st1 { v7.4s }, [x9], #0x10\n"
- "st1 { v12.4s }, [x9], #0x10\n"
- "st1 { v8.4s }, [x24], #0x10\n"
- "st1 { v9.4s }, [x24], #0x10\n"
- "st1 { v16.4s }, [x23], #0x10\n"
- "st1 { v17.4s }, [x23], #0x10\n"
- "tbz x11, #2, 103f\n"
- "st1 { v13.4s }, [x9], #0x10\n"
- "st1 { v10.4s }, [x24], #0x10\n"
- "st1 { v18.4s }, [x23], #0x10\n"
- "tbz x11, #1, 102f\n"
- "str d14, [x9], #0x8\n"
- "str d11, [x24], #0x8\n"
- "str d19, [x23], #0x8\n"
- "tbz x11, #0, 109f\n"
- "st1 { v14.s }[2], [x9]\n"
- "st1 { v11.s }[2], [x24]\n"
- "st1 { v19.s }[2], [x23]\n"
+ "tbz x10, #3, 105f\n"
+ "st1 { v7.4s }, [x28], #0x10\n"
+ "st1 { v12.4s }, [x28], #0x10\n"
+ "st1 { v8.4s }, [x23], #0x10\n"
+ "st1 { v9.4s }, [x23], #0x10\n"
+ "st1 { v16.4s }, [x22], #0x10\n"
+ "st1 { v17.4s }, [x22], #0x10\n"
+ "tbz x10, #2, 103f\n"
+ "st1 { v13.4s }, [x28], #0x10\n"
+ "st1 { v10.4s }, [x23], #0x10\n"
+ "st1 { v18.4s }, [x22], #0x10\n"
+ "tbz x10, #1, 102f\n"
+ "str d14, [x28], #0x8\n"
+ "str d11, [x23], #0x8\n"
+ "str d19, [x22], #0x8\n"
+ "tbz x10, #0, 109f\n"
+ "st1 { v14.s }[2], [x28]\n"
+ "st1 { v11.s }[2], [x23]\n"
+ "st1 { v19.s }[2], [x22]\n"
"b 109f\n"
"102:" // Height 3: Partial direct writeback: partial_1_12
- "tbz x11, #0, 109f\n"
- "str s14, [x9, #0x0]\n"
- "str s11, [x24, #0x0]\n"
- "str s19, [x23, #0x0]\n"
+ "tbz x10, #0, 109f\n"
+ "str s14, [x28, #0x0]\n"
+ "str s11, [x23, #0x0]\n"
+ "str s19, [x22, #0x0]\n"
"b 109f\n"
"103:" // Height 3: Partial direct writeback: partial_2_8
- "tbz x11, #1, 104f\n"
- "str d13, [x9], #0x8\n"
- "str d10, [x24], #0x8\n"
- "str d18, [x23], #0x8\n"
- "tbz x11, #0, 109f\n"
- "st1 { v13.s }[2], [x9]\n"
- "st1 { v10.s }[2], [x24]\n"
- "st1 { v18.s }[2], [x23]\n"
+ "tbz x10, #1, 104f\n"
+ "str d13, [x28], #0x8\n"
+ "str d10, [x23], #0x8\n"
+ "str d18, [x22], #0x8\n"
+ "tbz x10, #0, 109f\n"
+ "st1 { v13.s }[2], [x28]\n"
+ "st1 { v10.s }[2], [x23]\n"
+ "st1 { v18.s }[2], [x22]\n"
"b 109f\n"
"104:" // Height 3: Partial direct writeback: partial_1_8
- "tbz x11, #0, 109f\n"
- "str s13, [x9, #0x0]\n"
- "str s10, [x24, #0x0]\n"
- "str s18, [x23, #0x0]\n"
+ "tbz x10, #0, 109f\n"
+ "str s13, [x28, #0x0]\n"
+ "str s10, [x23, #0x0]\n"
+ "str s18, [x22, #0x0]\n"
"b 109f\n"
"105:" // Height 3: Partial direct writeback: partial_4_0
- "tbz x11, #2, 107f\n"
- "st1 { v7.4s }, [x9], #0x10\n"
- "st1 { v8.4s }, [x24], #0x10\n"
- "st1 { v16.4s }, [x23], #0x10\n"
- "tbz x11, #1, 106f\n"
- "str d12, [x9], #0x8\n"
- "str d9, [x24], #0x8\n"
- "str d17, [x23], #0x8\n"
- "tbz x11, #0, 109f\n"
- "st1 { v12.s }[2], [x9]\n"
- "st1 { v9.s }[2], [x24]\n"
- "st1 { v17.s }[2], [x23]\n"
+ "tbz x10, #2, 107f\n"
+ "st1 { v7.4s }, [x28], #0x10\n"
+ "st1 { v8.4s }, [x23], #0x10\n"
+ "st1 { v16.4s }, [x22], #0x10\n"
+ "tbz x10, #1, 106f\n"
+ "str d12, [x28], #0x8\n"
+ "str d9, [x23], #0x8\n"
+ "str d17, [x22], #0x8\n"
+ "tbz x10, #0, 109f\n"
+ "st1 { v12.s }[2], [x28]\n"
+ "st1 { v9.s }[2], [x23]\n"
+ "st1 { v17.s }[2], [x22]\n"
"b 109f\n"
"106:" // Height 3: Partial direct writeback: partial_1_4
- "tbz x11, #0, 109f\n"
- "str s12, [x9, #0x0]\n"
- "str s9, [x24, #0x0]\n"
- "str s17, [x23, #0x0]\n"
+ "tbz x10, #0, 109f\n"
+ "str s12, [x28, #0x0]\n"
+ "str s9, [x23, #0x0]\n"
+ "str s17, [x22, #0x0]\n"
"b 109f\n"
"107:" // Height 3: Partial direct writeback: partial_2_0
- "tbz x11, #1, 108f\n"
- "str d7, [x9], #0x8\n"
- "str d8, [x24], #0x8\n"
- "str d16, [x23], #0x8\n"
- "tbz x11, #0, 109f\n"
- "st1 { v7.s }[2], [x9]\n"
- "st1 { v8.s }[2], [x24]\n"
- "st1 { v16.s }[2], [x23]\n"
+ "tbz x10, #1, 108f\n"
+ "str d7, [x28], #0x8\n"
+ "str d8, [x23], #0x8\n"
+ "str d16, [x22], #0x8\n"
+ "tbz x10, #0, 109f\n"
+ "st1 { v7.s }[2], [x28]\n"
+ "st1 { v8.s }[2], [x23]\n"
+ "st1 { v16.s }[2], [x22]\n"
"b 109f\n"
"108:" // Height 3: Partial direct writeback: partial_1_0
- "str s7, [x9, #0x0]\n"
- "str s8, [x24, #0x0]\n"
- "str s16, [x23, #0x0]\n"
+ "str s7, [x28, #0x0]\n"
+ "str s8, [x23, #0x0]\n"
+ "str s16, [x22, #0x0]\n"
"109:" // Height 3: Partial direct writeback: Done
"b 111f\n"
"110:" // Height 3: Full writeback
- "str q7, [x9, #0x0]\n"
- "str q12, [x9, #0x10]\n"
- "str q13, [x9, #0x20]\n"
- "str q14, [x9, #0x30]\n"
- "add x9, x9, #0x40\n"
- "str q8, [x24, #0x0]\n"
- "str q9, [x24, #0x10]\n"
- "str q10, [x24, #0x20]\n"
- "str q11, [x24, #0x30]\n"
- "str q16, [x23, #0x0]\n"
- "str q17, [x23, #0x10]\n"
- "str q18, [x23, #0x20]\n"
- "str q19, [x23, #0x30]\n"
+ "str q7, [x28, #0x0]\n"
+ "str q12, [x28, #0x10]\n"
+ "str q13, [x28, #0x20]\n"
+ "str q14, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
+ "str q8, [x23, #0x0]\n"
+ "str q9, [x23, #0x10]\n"
+ "str q10, [x23, #0x20]\n"
+ "str q11, [x23, #0x30]\n"
+ "str q16, [x22, #0x0]\n"
+ "str q17, [x22, #0x10]\n"
+ "str q18, [x22, #0x20]\n"
+ "str q19, [x22, #0x30]\n"
"111:" // Height 3: Writeback done
- "subs x11, x11, #0x10\n"
+ "subs x10, x10, #0x10\n"
"bgt 76b\n"
"b 224f\n"
"112:" // Height 4
- "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x28, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"113:" // Height 4: Column loop
"tbz %x[flags], #0, 124f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "cmp x11, #0x10\n"
- "add x22, x23, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "cmp x10, #0x10\n"
+ "add x23, x28, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
"bge 122f\n"
- "tbz x11, #3, 117f\n"
- "ld1 { v9.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x24], #0x10\n"
- "ld1 { v17.4s }, [x23], #0x10\n"
- "ld1 { v20.4s }, [x22], #0x10\n"
- "ld1 { v10.4s }, [x9], #0x10\n"
- "ld1 { v13.4s }, [x24], #0x10\n"
- "ld1 { v18.4s }, [x23], #0x10\n"
- "ld1 { v21.4s }, [x22], #0x10\n"
- "tbz x11, #2, 115f\n"
- "ld1 { v11.4s }, [x9], #0x10\n"
- "ld1 { v14.4s }, [x24], #0x10\n"
- "ld1 { v19.4s }, [x23], #0x10\n"
- "ld1 { v22.4s }, [x22], #0x10\n"
- "tbz x11, #1, 114f\n"
- "ldr d16, [x9], #0x8\n"
- "ldr d15, [x24], #0x8\n"
- "mov x25, #0x38\n"
- "ldr d24, [x23], #0x8\n"
- "ldr d23, [x22], #0x8\n"
- "tbz x11, #0, 121f\n"
- "ld1 { v16.s }[2], [x9]\n"
- "ld1 { v15.s }[2], [x24]\n"
- "ld1 { v24.s }[2], [x23]\n"
- "ld1 { v23.s }[2], [x22]\n"
+ "tbz x10, #3, 117f\n"
+ "ld1 { v9.4s }, [x28], #0x10\n"
+ "ld1 { v12.4s }, [x23], #0x10\n"
+ "ld1 { v17.4s }, [x22], #0x10\n"
+ "ld1 { v20.4s }, [x21], #0x10\n"
+ "ld1 { v10.4s }, [x28], #0x10\n"
+ "ld1 { v13.4s }, [x23], #0x10\n"
+ "ld1 { v18.4s }, [x22], #0x10\n"
+ "ld1 { v21.4s }, [x21], #0x10\n"
+ "tbz x10, #2, 115f\n"
+ "ld1 { v11.4s }, [x28], #0x10\n"
+ "ld1 { v14.4s }, [x23], #0x10\n"
+ "ld1 { v19.4s }, [x22], #0x10\n"
+ "ld1 { v22.4s }, [x21], #0x10\n"
+ "tbz x10, #1, 114f\n"
+ "mov x24, #0x38\n"
+ "ldr d16, [x28], #0x8\n"
+ "ldr d15, [x23], #0x8\n"
+ "ldr d24, [x22], #0x8\n"
+ "ldr d23, [x21], #0x8\n"
+ "tbz x10, #0, 121f\n"
+ "ld1 { v16.s }[2], [x28]\n"
+ "ld1 { v15.s }[2], [x23]\n"
+ "ld1 { v24.s }[2], [x22]\n"
+ "ld1 { v23.s }[2], [x21]\n"
"b 121f\n"
"114:" // Height 4: Partial accumulate: partial_1_12
- "mov x25, #0x30\n"
- "tbz x11, #0, 121f\n"
- "ldr s16, [x9, #0x0]\n"
- "ldr s15, [x24, #0x0]\n"
- "ldr s24, [x23, #0x0]\n"
- "ldr s23, [x22, #0x0]\n"
+ "mov x24, #0x30\n"
+ "tbz x10, #0, 121f\n"
+ "ldr s16, [x28, #0x0]\n"
+ "ldr s15, [x23, #0x0]\n"
+ "ldr s24, [x22, #0x0]\n"
+ "ldr s23, [x21, #0x0]\n"
"b 121f\n"
"115:" // Height 4: Partial accumulate: partial_2_8
- "tbz x11, #1, 116f\n"
- "ldr d11, [x9], #0x8\n"
- "ldr d14, [x24], #0x8\n"
- "mov x25, #0x28\n"
- "ldr d19, [x23], #0x8\n"
- "ldr d22, [x22], #0x8\n"
- "tbz x11, #0, 121f\n"
- "ld1 { v11.s }[2], [x9]\n"
- "ld1 { v14.s }[2], [x24]\n"
- "ld1 { v19.s }[2], [x23]\n"
- "ld1 { v22.s }[2], [x22]\n"
+ "tbz x10, #1, 116f\n"
+ "ldr d11, [x28], #0x8\n"
+ "ldr d14, [x23], #0x8\n"
+ "mov x24, #0x28\n"
+ "ldr d19, [x22], #0x8\n"
+ "ldr d22, [x21], #0x8\n"
+ "tbz x10, #0, 121f\n"
+ "ld1 { v11.s }[2], [x28]\n"
+ "ld1 { v14.s }[2], [x23]\n"
+ "ld1 { v19.s }[2], [x22]\n"
+ "ld1 { v22.s }[2], [x21]\n"
"b 121f\n"
"116:" // Height 4: Partial accumulate: partial_1_8
- "mov x25, #0x20\n"
- "tbz x11, #0, 121f\n"
- "ldr s11, [x9, #0x0]\n"
- "ldr s14, [x24, #0x0]\n"
- "ldr s19, [x23, #0x0]\n"
- "ldr s22, [x22, #0x0]\n"
+ "mov x24, #0x20\n"
+ "tbz x10, #0, 121f\n"
+ "ldr s11, [x28, #0x0]\n"
+ "ldr s14, [x23, #0x0]\n"
+ "ldr s19, [x22, #0x0]\n"
+ "ldr s22, [x21, #0x0]\n"
"b 121f\n"
"117:" // Height 4: Partial accumulate: partial_4_0
- "tbz x11, #2, 119f\n"
- "ld1 { v9.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x24], #0x10\n"
- "ld1 { v17.4s }, [x23], #0x10\n"
- "ld1 { v20.4s }, [x22], #0x10\n"
- "tbz x11, #1, 118f\n"
- "ldr d10, [x9], #0x8\n"
- "ldr d13, [x24], #0x8\n"
- "mov x25, #0x18\n"
- "ldr d18, [x23], #0x8\n"
- "ldr d21, [x22], #0x8\n"
- "tbz x11, #0, 121f\n"
- "ld1 { v10.s }[2], [x9]\n"
- "ld1 { v13.s }[2], [x24]\n"
- "ld1 { v18.s }[2], [x23]\n"
- "ld1 { v21.s }[2], [x22]\n"
+ "tbz x10, #2, 119f\n"
+ "ld1 { v9.4s }, [x28], #0x10\n"
+ "ld1 { v12.4s }, [x23], #0x10\n"
+ "ld1 { v17.4s }, [x22], #0x10\n"
+ "ld1 { v20.4s }, [x21], #0x10\n"
+ "tbz x10, #1, 118f\n"
+ "mov x24, #0x18\n"
+ "ldr d10, [x28], #0x8\n"
+ "ldr d13, [x23], #0x8\n"
+ "ldr d18, [x22], #0x8\n"
+ "ldr d21, [x21], #0x8\n"
+ "tbz x10, #0, 121f\n"
+ "ld1 { v10.s }[2], [x28]\n"
+ "ld1 { v13.s }[2], [x23]\n"
+ "ld1 { v18.s }[2], [x22]\n"
+ "ld1 { v21.s }[2], [x21]\n"
"b 121f\n"
"118:" // Height 4: Partial accumulate: partial_1_4
- "mov x25, #0x10\n"
- "tbz x11, #0, 121f\n"
- "ldr s10, [x9, #0x0]\n"
- "ldr s13, [x24, #0x0]\n"
- "ldr s18, [x23, #0x0]\n"
- "ldr s21, [x22, #0x0]\n"
+ "mov x24, #0x10\n"
+ "tbz x10, #0, 121f\n"
+ "ldr s10, [x28, #0x0]\n"
+ "ldr s13, [x23, #0x0]\n"
+ "ldr s18, [x22, #0x0]\n"
+ "ldr s21, [x21, #0x0]\n"
"b 121f\n"
"119:" // Height 4: Partial accumulate: partial_2_0
- "tbz x11, #1, 120f\n"
- "ldr d9, [x9], #0x8\n"
- "ldr d12, [x24], #0x8\n"
- "mov x25, #0x8\n"
- "ldr d17, [x23], #0x8\n"
- "ldr d20, [x22], #0x8\n"
- "tbz x11, #0, 121f\n"
- "ld1 { v9.s }[2], [x9]\n"
- "ld1 { v12.s }[2], [x24]\n"
- "ld1 { v17.s }[2], [x23]\n"
- "ld1 { v20.s }[2], [x22]\n"
+ "tbz x10, #1, 120f\n"
+ "ldr d9, [x28], #0x8\n"
+ "ldr d12, [x23], #0x8\n"
+ "mov x24, #0x8\n"
+ "ldr d17, [x22], #0x8\n"
+ "ldr d20, [x21], #0x8\n"
+ "tbz x10, #0, 121f\n"
+ "ld1 { v9.s }[2], [x28]\n"
+ "ld1 { v12.s }[2], [x23]\n"
+ "ld1 { v17.s }[2], [x22]\n"
+ "ld1 { v20.s }[2], [x21]\n"
"b 121f\n"
"120:" // Height 4: Partial accumulate: partial_1_0
- "ldr s9, [x9, #0x0]\n"
- "ldr s12, [x24, #0x0]\n"
- "mov x25, #0x0\n"
- "ldr s17, [x23, #0x0]\n"
- "ldr s20, [x22, #0x0]\n"
+ "ldr s9, [x28, #0x0]\n"
+ "mov x24, #0x0\n"
+ "ldr s12, [x23, #0x0]\n"
+ "ldr s17, [x22, #0x0]\n"
+ "ldr s20, [x21, #0x0]\n"
"121:" // Height 4: Partial accumulate: Done
- "sub x9, x9, x25\n"
+ "sub x28, x28, x24\n"
"b 123f\n"
"122:" // Height 4: full accumulate
- "ldr q9, [x9, #0x0]\n"
- "ldr q10, [x9, #0x10]\n"
- "ldr q11, [x9, #0x20]\n"
- "ldr q16, [x9, #0x30]\n"
- "ldr q12, [x24, #0x0]\n"
- "ldr q13, [x24, #0x10]\n"
- "ldr q14, [x24, #0x20]\n"
- "ldr q15, [x24, #0x30]\n"
- "ldr q17, [x23, #0x0]\n"
- "ldr q18, [x23, #0x10]\n"
- "ldr q19, [x23, #0x20]\n"
- "ldr q24, [x23, #0x30]\n"
- "ldr q20, [x22, #0x0]\n"
- "ldr q21, [x22, #0x10]\n"
- "ldr q22, [x22, #0x20]\n"
- "ldr q23, [x22, #0x30]\n"
+ "ldr q9, [x28, #0x0]\n"
+ "ldr q10, [x28, #0x10]\n"
+ "ldr q11, [x28, #0x20]\n"
+ "ldr q16, [x28, #0x30]\n"
+ "ldr q12, [x23, #0x0]\n"
+ "ldr q13, [x23, #0x10]\n"
+ "ldr q14, [x23, #0x20]\n"
+ "ldr q15, [x23, #0x30]\n"
+ "ldr q17, [x22, #0x0]\n"
+ "ldr q18, [x22, #0x10]\n"
+ "ldr q19, [x22, #0x20]\n"
+ "ldr q24, [x22, #0x30]\n"
+ "ldr q20, [x21, #0x0]\n"
+ "ldr q21, [x21, #0x10]\n"
+ "ldr q22, [x21, #0x20]\n"
+ "ldr q23, [x21, #0x30]\n"
"123:" // Height 4: MMLA fixup
"zip1 v8.2d, v9.2d, v12.2d\n"
"zip2 v12.2d, v9.2d, v12.2d\n"
@@ -1495,301 +1503,301 @@ void a64_hybrid_s8s32_mmla_6x16 (
"movi v22.4s, #0x0\n"
"movi v23.4s, #0x0\n"
"125:" // Height 4: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"126:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 127f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "cbnz x28, 128f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20\n"
- "add x25, x25, x20\n"
- "add x24, x24, x20\n"
- "add x23, x23, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "cbnz x27, 128f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
+ "add x24, x24, x19\n"
+ "add x23, x23, x19\n"
+ "add x22, x22, x19\n"
"b 128f\n"
"127:" // Height 4: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20\n"
- "add x24, x25, x20\n"
- "add x23, x24, x20\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19\n"
+ "add x23, x24, x19\n"
+ "add x22, x23, x19\n"
"128:" // Height 4: input setup done
- "cmp x27, #0x10\n"
+ "cmp x26, #0x10\n"
"blt 131f\n"
- "ldr q1, [x26, #0x0]\n"
- "ldr q2, [x25, #0x0]\n"
- "cmp x27, #0x20\n"
- "ldr q3, [x24, #0x0]\n"
- "ldr q4, [x23, #0x0]\n"
- "ldr q7, [x10, #0x0]\n"
- "ldr q6, [x10, #0x10]\n"
+ "ldr q1, [x25, #0x0]\n"
+ "ldr q2, [x24, #0x0]\n"
+ "cmp x26, #0x20\n"
"blt 130f\n"
"129:" // Height 4: Multiply loop: Main loop head
"trn1 v0.2d, v1.2d, v2.2d\n"
+ "ldr q3, [x23, #0x0]\n"
+ "add x25, x25, #0x10\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
- ".inst 0x4e87a408 // smmla v8.4s, v0.16b, v7.16b\n"
- "sub x27, x27, #0x10\n"
+ "ldr q4, [x22, #0x0]\n"
+ "add x24, x24, #0x10\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
+ "ldr q7, [x9, #0x0]\n"
+ "add x23, x23, #0x10\n"
+ "trn2 v3.2d, v3.2d, v4.2d\n"
+ "ldr q6, [x9, #0x10]\n"
+ "add x22, x22, #0x10\n"
+ ".inst 0x4e87a408 // smmla v8.4s, v0.16b, v7.16b\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "sub x26, x26, #0x10\n"
".inst 0x4e87a450 // smmla v16.4s, v2.16b, v7.16b\n"
- "ldr q7, [x10, #0x20]\n"
+ "ldr q7, [x9, #0x20]\n"
+ "cmp x26, #0x20\n"
".inst 0x4e86a40c // smmla v12.4s, v0.16b, v6.16b\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x4e86a454 // smmla v20.4s, v2.16b, v6.16b\n"
- "ldr q6, [x10, #0x30]\n"
+ "ldr q6, [x9, #0x30]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x4e87a409 // smmla v9.4s, v0.16b, v7.16b\n"
- "trn2 v3.2d, v3.2d, v4.2d\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x4e87a451 // smmla v17.4s, v2.16b, v7.16b\n"
- "ldr q7, [x10, #0x40]\n"
+ "ldr q7, [x9, #0x40]\n"
".inst 0x4e86a40d // smmla v13.4s, v0.16b, v6.16b\n"
- "add x26, x26, #0x10\n"
".inst 0x4e86a455 // smmla v21.4s, v2.16b, v6.16b\n"
- "ldr q6, [x10, #0x50]\n"
+ "ldr q6, [x9, #0x50]\n"
".inst 0x4e87a40a // smmla v10.4s, v0.16b, v7.16b\n"
- "add x25, x25, #0x10\n"
".inst 0x4e87a452 // smmla v18.4s, v2.16b, v7.16b\n"
- "ldr q7, [x10, #0x60]\n"
+ "ldr q7, [x9, #0x60]\n"
".inst 0x4e86a40e // smmla v14.4s, v0.16b, v6.16b\n"
- "add x24, x24, #0x10\n"
".inst 0x4e86a456 // smmla v22.4s, v2.16b, v6.16b\n"
- "ldr q6, [x10, #0x70]\n"
+ "ldr q6, [x9, #0x70]\n"
".inst 0x4e87a40b // smmla v11.4s, v0.16b, v7.16b\n"
- "add x23, x23, #0x10\n"
- "ldr q4, [x23, #0x0]\n"
".inst 0x4e87a453 // smmla v19.4s, v2.16b, v7.16b\n"
- "ldr q7, [x10, #0x80]\n"
+ "ldr q7, [x9, #0x80]\n"
".inst 0x4e86a40f // smmla v15.4s, v0.16b, v6.16b\n"
".inst 0x4e86a457 // smmla v23.4s, v2.16b, v6.16b\n"
- "ldr q6, [x10, #0x90]\n"
- "ldr q2, [x25, #0x0]\n"
+ "ldr q6, [x9, #0x90]\n"
+ "ldr q2, [x24, #0x0]\n"
".inst 0x4e87a428 // smmla v8.4s, v1.16b, v7.16b\n"
".inst 0x4e87a470 // smmla v16.4s, v3.16b, v7.16b\n"
- "ldr q7, [x10, #0xa0]\n"
+ "ldr q7, [x9, #0xa0]\n"
".inst 0x4e86a42c // smmla v12.4s, v1.16b, v6.16b\n"
- "cmp x27, #0x20\n"
".inst 0x4e86a474 // smmla v20.4s, v3.16b, v6.16b\n"
- "ldr q6, [x10, #0xb0]\n"
+ "ldr q6, [x9, #0xb0]\n"
".inst 0x4e87a429 // smmla v9.4s, v1.16b, v7.16b\n"
- "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x4e87a471 // smmla v17.4s, v3.16b, v7.16b\n"
- "ldr q7, [x10, #0xc0]\n"
+ "ldr q7, [x9, #0xc0]\n"
".inst 0x4e86a42d // smmla v13.4s, v1.16b, v6.16b\n"
- "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x4e86a475 // smmla v21.4s, v3.16b, v6.16b\n"
- "ldr q6, [x10, #0xd0]\n"
+ "ldr q6, [x9, #0xd0]\n"
".inst 0x4e87a42a // smmla v10.4s, v1.16b, v7.16b\n"
- "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x4e87a472 // smmla v18.4s, v3.16b, v7.16b\n"
- "ldr q7, [x10, #0xe0]\n"
+ "ldr q7, [x9, #0xe0]\n"
".inst 0x4e86a42e // smmla v14.4s, v1.16b, v6.16b\n"
- "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x4e86a476 // smmla v22.4s, v3.16b, v6.16b\n"
- "ldr q6, [x10, #0xf0]\n"
- "add x10, x10, #0x100\n"
+ "ldr q6, [x9, #0xf0]\n"
+ "add x9, x9, #0x100\n"
".inst 0x4e87a42b // smmla v11.4s, v1.16b, v7.16b\n"
".inst 0x4e87a473 // smmla v19.4s, v3.16b, v7.16b\n"
- "ldr q7, [x10, #0x0]\n"
".inst 0x4e86a42f // smmla v15.4s, v1.16b, v6.16b\n"
- "ldr q1, [x26, #0x0]\n"
+ "ldr q1, [x25, #0x0]\n"
".inst 0x4e86a477 // smmla v23.4s, v3.16b, v6.16b\n"
- "ldr q3, [x24, #0x0]\n"
- "ldr q6, [x10, #0x10]\n"
"bge 129b\n"
"130:" // Height 4: Multiply loop: Single iteration only
"trn1 v0.2d, v1.2d, v2.2d\n"
+ "ldr q3, [x23, #0x0]\n"
+ "sub x26, x26, #0x10\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
- ".inst 0x4e87a408 // smmla v8.4s, v0.16b, v7.16b\n"
- "add x26, x26, #0x10\n"
+ "ldr q4, [x22, #0x0]\n"
+ "add x25, x25, #0x10\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
+ "ldr q7, [x9, #0x0]\n"
+ "add x24, x24, #0x10\n"
+ "trn2 v3.2d, v3.2d, v4.2d\n"
+ "ldr q6, [x9, #0x10]\n"
+ "add x23, x23, #0x10\n"
+ ".inst 0x4e87a408 // smmla v8.4s, v0.16b, v7.16b\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "add x22, x22, #0x10\n"
".inst 0x4e87a450 // smmla v16.4s, v2.16b, v7.16b\n"
- "ldr q7, [x10, #0x20]\n"
+ "ldr q7, [x9, #0x20]\n"
".inst 0x4e86a40c // smmla v12.4s, v0.16b, v6.16b\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x4e86a454 // smmla v20.4s, v2.16b, v6.16b\n"
- "ldr q6, [x10, #0x30]\n"
+ "ldr q6, [x9, #0x30]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x4e87a409 // smmla v9.4s, v0.16b, v7.16b\n"
- "trn2 v3.2d, v3.2d, v4.2d\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x4e87a451 // smmla v17.4s, v2.16b, v7.16b\n"
- "ldr q7, [x10, #0x40]\n"
+ "ldr q7, [x9, #0x40]\n"
".inst 0x4e86a40d // smmla v13.4s, v0.16b, v6.16b\n"
- "add x25, x25, #0x10\n"
".inst 0x4e86a455 // smmla v21.4s, v2.16b, v6.16b\n"
- "ldr q6, [x10, #0x50]\n"
+ "ldr q6, [x9, #0x50]\n"
".inst 0x4e87a40a // smmla v10.4s, v0.16b, v7.16b\n"
- "add x24, x24, #0x10\n"
".inst 0x4e87a452 // smmla v18.4s, v2.16b, v7.16b\n"
- "ldr q7, [x10, #0x60]\n"
+ "ldr q7, [x9, #0x60]\n"
".inst 0x4e86a40e // smmla v14.4s, v0.16b, v6.16b\n"
- "add x23, x23, #0x10\n"
".inst 0x4e86a456 // smmla v22.4s, v2.16b, v6.16b\n"
- "ldr q6, [x10, #0x70]\n"
+ "ldr q6, [x9, #0x70]\n"
".inst 0x4e87a40b // smmla v11.4s, v0.16b, v7.16b\n"
- "sub x27, x27, #0x10\n"
".inst 0x4e87a453 // smmla v19.4s, v2.16b, v7.16b\n"
- "ldr q7, [x10, #0x80]\n"
+ "ldr q7, [x9, #0x80]\n"
".inst 0x4e86a40f // smmla v15.4s, v0.16b, v6.16b\n"
- "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x4e86a457 // smmla v23.4s, v2.16b, v6.16b\n"
- "ldr q6, [x10, #0x90]\n"
+ "ldr q6, [x9, #0x90]\n"
".inst 0x4e87a428 // smmla v8.4s, v1.16b, v7.16b\n"
- "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x4e87a470 // smmla v16.4s, v3.16b, v7.16b\n"
- "ldr q7, [x10, #0xa0]\n"
+ "ldr q7, [x9, #0xa0]\n"
".inst 0x4e86a42c // smmla v12.4s, v1.16b, v6.16b\n"
- "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x4e86a474 // smmla v20.4s, v3.16b, v6.16b\n"
- "ldr q6, [x10, #0xb0]\n"
+ "ldr q6, [x9, #0xb0]\n"
".inst 0x4e87a429 // smmla v9.4s, v1.16b, v7.16b\n"
- "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x4e87a471 // smmla v17.4s, v3.16b, v7.16b\n"
- "ldr q7, [x10, #0xc0]\n"
+ "ldr q7, [x9, #0xc0]\n"
".inst 0x4e86a42d // smmla v13.4s, v1.16b, v6.16b\n"
".inst 0x4e86a475 // smmla v21.4s, v3.16b, v6.16b\n"
- "ldr q6, [x10, #0xd0]\n"
+ "ldr q6, [x9, #0xd0]\n"
".inst 0x4e87a42a // smmla v10.4s, v1.16b, v7.16b\n"
".inst 0x4e87a472 // smmla v18.4s, v3.16b, v7.16b\n"
- "ldr q7, [x10, #0xe0]\n"
+ "ldr q7, [x9, #0xe0]\n"
".inst 0x4e86a42e // smmla v14.4s, v1.16b, v6.16b\n"
".inst 0x4e86a476 // smmla v22.4s, v3.16b, v6.16b\n"
- "ldr q6, [x10, #0xf0]\n"
- "add x10, x10, #0x100\n"
+ "ldr q6, [x9, #0xf0]\n"
+ "add x9, x9, #0x100\n"
".inst 0x4e87a42b // smmla v11.4s, v1.16b, v7.16b\n"
".inst 0x4e87a473 // smmla v19.4s, v3.16b, v7.16b\n"
".inst 0x4e86a42f // smmla v15.4s, v1.16b, v6.16b\n"
".inst 0x4e86a477 // smmla v23.4s, v3.16b, v6.16b\n"
"131:" // Height 4: Multiply loop: Main loop skip
- "cbz x27, 138f\n"
- "cmp x27, #0x8\n"
+ "cbz x26, 138f\n"
+ "cmp x26, #0x8\n"
"blt 133f\n"
"132:" // Height 4: Multiply loop: Odd block loop
- "ldr d1, [x26], #0x8\n"
- "ldr d2, [x25], #0x8\n"
+ "ldr d1, [x25], #0x8\n"
+ "sub x26, x26, #0x8\n"
+ "ldr d2, [x24], #0x8\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
- "sub x27, x27, #0x8\n"
- "ldr d3, [x24], #0x8\n"
- "ldr d4, [x23], #0x8\n"
+ "ldr d3, [x23], #0x8\n"
+ "cmp x26, #0x8\n"
+ "ldr d4, [x22], #0x8\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
- "cmp x27, #0x8\n"
- "ldr q6, [x10, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
+ "ldr q6, [x9, #0x0]\n"
+ "ldr q7, [x9, #0x10]\n"
".inst 0x4e86a408 // smmla v8.4s, v0.16b, v6.16b\n"
".inst 0x4e86a450 // smmla v16.4s, v2.16b, v6.16b\n"
- "ldr q6, [x10, #0x20]\n"
+ "ldr q6, [x9, #0x20]\n"
".inst 0x4e87a40c // smmla v12.4s, v0.16b, v7.16b\n"
".inst 0x4e87a454 // smmla v20.4s, v2.16b, v7.16b\n"
- "ldr q7, [x10, #0x30]\n"
+ "ldr q7, [x9, #0x30]\n"
".inst 0x4e86a409 // smmla v9.4s, v0.16b, v6.16b\n"
".inst 0x4e86a451 // smmla v17.4s, v2.16b, v6.16b\n"
- "ldr q6, [x10, #0x40]\n"
+ "ldr q6, [x9, #0x40]\n"
".inst 0x4e87a40d // smmla v13.4s, v0.16b, v7.16b\n"
".inst 0x4e87a455 // smmla v21.4s, v2.16b, v7.16b\n"
- "ldr q7, [x10, #0x50]\n"
+ "ldr q7, [x9, #0x50]\n"
".inst 0x4e86a40a // smmla v10.4s, v0.16b, v6.16b\n"
".inst 0x4e86a452 // smmla v18.4s, v2.16b, v6.16b\n"
- "ldr q6, [x10, #0x60]\n"
+ "ldr q6, [x9, #0x60]\n"
".inst 0x4e87a40e // smmla v14.4s, v0.16b, v7.16b\n"
".inst 0x4e87a456 // smmla v22.4s, v2.16b, v7.16b\n"
- "ldr q7, [x10, #0x70]\n"
- "add x10, x10, #0x80\n"
+ "ldr q7, [x9, #0x70]\n"
+ "add x9, x9, #0x80\n"
".inst 0x4e86a40b // smmla v11.4s, v0.16b, v6.16b\n"
".inst 0x4e86a453 // smmla v19.4s, v2.16b, v6.16b\n"
".inst 0x4e87a40f // smmla v15.4s, v0.16b, v7.16b\n"
".inst 0x4e87a457 // smmla v23.4s, v2.16b, v7.16b\n"
"bge 132b\n"
+ "cbz x26, 138f\n"
"133:" // Height 4: Multiply loop: Skip odd blocks
- "cbz x27, 138f\n"
- "tbz x27, #2, 135f\n"
- "ldr s1, [x26], #0x4\n"
- "ldr s2, [x25], #0x4\n"
- "ldr s3, [x24], #0x4\n"
- "ldr s4, [x23], #0x4\n"
- "tbz x27, #1, 134f\n"
- "ld1 { v1.h }[2], [x26], #0x2\n"
- "ld1 { v2.h }[2], [x25], #0x2\n"
- "ld1 { v3.h }[2], [x24], #0x2\n"
- "ld1 { v4.h }[2], [x23], #0x2\n"
- "tbz x27, #0, 137f\n"
- "ld1 { v1.b }[6], [x26]\n"
- "ld1 { v2.b }[6], [x25]\n"
- "ld1 { v3.b }[6], [x24]\n"
- "ld1 { v4.b }[6], [x23]\n"
+ "tbz x26, #2, 135f\n"
+ "ldr s1, [x25], #0x4\n"
+ "ldr s2, [x24], #0x4\n"
+ "ldr s3, [x23], #0x4\n"
+ "ldr s4, [x22], #0x4\n"
+ "tbz x26, #1, 134f\n"
+ "ld1 { v1.h }[2], [x25], #0x2\n"
+ "ld1 { v2.h }[2], [x24], #0x2\n"
+ "ld1 { v3.h }[2], [x23], #0x2\n"
+ "ld1 { v4.h }[2], [x22], #0x2\n"
+ "tbz x26, #0, 137f\n"
+ "ld1 { v1.b }[6], [x25]\n"
+ "ld1 { v2.b }[6], [x24]\n"
+ "ld1 { v3.b }[6], [x23]\n"
+ "ld1 { v4.b }[6], [x22]\n"
"b 137f\n"
"134:" // Height 4: Multiply loop: Ragged operand read: partial_1_4
- "tbz x27, #0, 137f\n"
- "ld1 { v1.b }[4], [x26]\n"
- "ld1 { v2.b }[4], [x25]\n"
- "ld1 { v3.b }[4], [x24]\n"
- "ld1 { v4.b }[4], [x23]\n"
+ "tbz x26, #0, 137f\n"
+ "ld1 { v1.b }[4], [x25]\n"
+ "ld1 { v2.b }[4], [x24]\n"
+ "ld1 { v3.b }[4], [x23]\n"
+ "ld1 { v4.b }[4], [x22]\n"
"b 137f\n"
"135:" // Height 4: Multiply loop: Ragged operand read: partial_2_0
- "tbz x27, #1, 136f\n"
- "ldr h1, [x26], #0x2\n"
- "ldr h2, [x25], #0x2\n"
- "ldr h3, [x24], #0x2\n"
- "ldr h4, [x23], #0x2\n"
- "tbz x27, #0, 137f\n"
- "ld1 { v1.b }[2], [x26]\n"
- "ld1 { v2.b }[2], [x25]\n"
- "ld1 { v3.b }[2], [x24]\n"
- "ld1 { v4.b }[2], [x23]\n"
+ "tbz x26, #1, 136f\n"
+ "ldr h1, [x25], #0x2\n"
+ "ldr h2, [x24], #0x2\n"
+ "ldr h3, [x23], #0x2\n"
+ "ldr h4, [x22], #0x2\n"
+ "tbz x26, #0, 137f\n"
+ "ld1 { v1.b }[2], [x25]\n"
+ "ld1 { v2.b }[2], [x24]\n"
+ "ld1 { v3.b }[2], [x23]\n"
+ "ld1 { v4.b }[2], [x22]\n"
"b 137f\n"
"136:" // Height 4: Multiply loop: Ragged operand read: partial_1_0
- "ldr b1, [x26, #0x0]\n"
- "ldr b2, [x25, #0x0]\n"
- "ldr b3, [x24, #0x0]\n"
- "ldr b4, [x23, #0x0]\n"
+ "ldr b1, [x25, #0x0]\n"
+ "ldr b2, [x24, #0x0]\n"
+ "ldr b3, [x23, #0x0]\n"
+ "ldr b4, [x22, #0x0]\n"
"137:" // Height 4: Multiply loop: Ragged operand read: Done
- "ldr q7, [x10, #0x0]\n"
- "ldr q6, [x10, #0x10]\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
+ "ldr q7, [x9, #0x0]\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
+ "ldr q6, [x9, #0x10]\n"
".inst 0x4e87a408 // smmla v8.4s, v0.16b, v7.16b\n"
".inst 0x4e87a450 // smmla v16.4s, v2.16b, v7.16b\n"
- "ldr q7, [x10, #0x20]\n"
+ "ldr q7, [x9, #0x20]\n"
".inst 0x4e86a40c // smmla v12.4s, v0.16b, v6.16b\n"
".inst 0x4e86a454 // smmla v20.4s, v2.16b, v6.16b\n"
- "ldr q6, [x10, #0x30]\n"
+ "ldr q6, [x9, #0x30]\n"
".inst 0x4e87a409 // smmla v9.4s, v0.16b, v7.16b\n"
".inst 0x4e87a451 // smmla v17.4s, v2.16b, v7.16b\n"
- "ldr q7, [x10, #0x40]\n"
+ "ldr q7, [x9, #0x40]\n"
".inst 0x4e86a40d // smmla v13.4s, v0.16b, v6.16b\n"
".inst 0x4e86a455 // smmla v21.4s, v2.16b, v6.16b\n"
- "ldr q6, [x10, #0x50]\n"
+ "ldr q6, [x9, #0x50]\n"
".inst 0x4e87a40a // smmla v10.4s, v0.16b, v7.16b\n"
".inst 0x4e87a452 // smmla v18.4s, v2.16b, v7.16b\n"
- "ldr q7, [x10, #0x60]\n"
+ "ldr q7, [x9, #0x60]\n"
".inst 0x4e86a40e // smmla v14.4s, v0.16b, v6.16b\n"
".inst 0x4e86a456 // smmla v22.4s, v2.16b, v6.16b\n"
- "ldr q6, [x10, #0x70]\n"
- "add x10, x10, #0x80\n"
+ "ldr q6, [x9, #0x70]\n"
+ "add x9, x9, #0x80\n"
".inst 0x4e87a40b // smmla v11.4s, v0.16b, v7.16b\n"
".inst 0x4e87a453 // smmla v19.4s, v2.16b, v7.16b\n"
".inst 0x4e86a40f // smmla v15.4s, v0.16b, v6.16b\n"
".inst 0x4e86a457 // smmla v23.4s, v2.16b, v6.16b\n"
"138:" // Height 4: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 126b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
"uzp1 v7.2d, v8.2d, v12.2d\n"
- "add x22, x23, x20, LSL #2\n"
- "cmp x11, #0x10\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp2 v8.2d, v8.2d, v12.2d\n"
+ "prfm pstl1keep, [x28, #0x0]\n"
+ "cmp x10, #0x10\n"
"uzp1 v12.2d, v9.2d, v13.2d\n"
+ "add x23, x28, x19, LSL #2\n"
"uzp2 v9.2d, v9.2d, v13.2d\n"
+ "prfm pstl1keep, [x23, #0x0]\n"
"uzp1 v13.2d, v10.2d, v14.2d\n"
- "prfm pstl1keep, [x9, #0x0]\n"
- "prfm pstl1keep, [x24, #0x0]\n"
+ "add x22, x23, x19, LSL #2\n"
"uzp2 v10.2d, v10.2d, v14.2d\n"
- "uzp1 v14.2d, v11.2d, v15.2d\n"
- "prfm pstl1keep, [x23, #0x0]\n"
"prfm pstl1keep, [x22, #0x0]\n"
+ "add x21, x22, x19, LSL #2\n"
+ "uzp1 v14.2d, v11.2d, v15.2d\n"
+ "prfm pstl1keep, [x21, #0x0]\n"
"uzp2 v11.2d, v11.2d, v15.2d\n"
"uzp1 v15.2d, v16.2d, v20.2d\n"
"uzp2 v16.2d, v16.2d, v20.2d\n"
@@ -1800,275 +1808,275 @@ void a64_hybrid_s8s32_mmla_6x16 (
"uzp1 v22.2d, v19.2d, v23.2d\n"
"uzp2 v19.2d, v19.2d, v23.2d\n"
"bge 147f\n"
- "tbz x11, #3, 142f\n"
- "st1 { v7.4s }, [x9], #0x10\n"
- "st1 { v12.4s }, [x9], #0x10\n"
- "st1 { v8.4s }, [x24], #0x10\n"
- "st1 { v9.4s }, [x24], #0x10\n"
- "st1 { v15.4s }, [x23], #0x10\n"
- "st1 { v20.4s }, [x23], #0x10\n"
- "st1 { v16.4s }, [x22], #0x10\n"
- "st1 { v17.4s }, [x22], #0x10\n"
- "tbz x11, #2, 140f\n"
- "st1 { v13.4s }, [x9], #0x10\n"
- "st1 { v10.4s }, [x24], #0x10\n"
- "st1 { v21.4s }, [x23], #0x10\n"
- "st1 { v18.4s }, [x22], #0x10\n"
- "tbz x11, #1, 139f\n"
- "str d14, [x9], #0x8\n"
- "str d11, [x24], #0x8\n"
- "str d22, [x23], #0x8\n"
- "str d19, [x22], #0x8\n"
- "tbz x11, #0, 146f\n"
- "st1 { v14.s }[2], [x9]\n"
- "st1 { v11.s }[2], [x24]\n"
- "st1 { v22.s }[2], [x23]\n"
- "st1 { v19.s }[2], [x22]\n"
+ "tbz x10, #3, 142f\n"
+ "st1 { v7.4s }, [x28], #0x10\n"
+ "st1 { v12.4s }, [x28], #0x10\n"
+ "st1 { v8.4s }, [x23], #0x10\n"
+ "st1 { v9.4s }, [x23], #0x10\n"
+ "st1 { v15.4s }, [x22], #0x10\n"
+ "st1 { v20.4s }, [x22], #0x10\n"
+ "st1 { v16.4s }, [x21], #0x10\n"
+ "st1 { v17.4s }, [x21], #0x10\n"
+ "tbz x10, #2, 140f\n"
+ "st1 { v13.4s }, [x28], #0x10\n"
+ "st1 { v10.4s }, [x23], #0x10\n"
+ "st1 { v21.4s }, [x22], #0x10\n"
+ "st1 { v18.4s }, [x21], #0x10\n"
+ "tbz x10, #1, 139f\n"
+ "str d14, [x28], #0x8\n"
+ "str d11, [x23], #0x8\n"
+ "str d22, [x22], #0x8\n"
+ "str d19, [x21], #0x8\n"
+ "tbz x10, #0, 146f\n"
+ "st1 { v14.s }[2], [x28]\n"
+ "st1 { v11.s }[2], [x23]\n"
+ "st1 { v22.s }[2], [x22]\n"
+ "st1 { v19.s }[2], [x21]\n"
"b 146f\n"
"139:" // Height 4: Partial direct writeback: partial_1_12
- "tbz x11, #0, 146f\n"
- "str s14, [x9, #0x0]\n"
- "str s11, [x24, #0x0]\n"
- "str s22, [x23, #0x0]\n"
- "str s19, [x22, #0x0]\n"
+ "tbz x10, #0, 146f\n"
+ "str s14, [x28, #0x0]\n"
+ "str s11, [x23, #0x0]\n"
+ "str s22, [x22, #0x0]\n"
+ "str s19, [x21, #0x0]\n"
"b 146f\n"
"140:" // Height 4: Partial direct writeback: partial_2_8
- "tbz x11, #1, 141f\n"
- "str d13, [x9], #0x8\n"
- "str d10, [x24], #0x8\n"
- "str d21, [x23], #0x8\n"
- "str d18, [x22], #0x8\n"
- "tbz x11, #0, 146f\n"
- "st1 { v13.s }[2], [x9]\n"
- "st1 { v10.s }[2], [x24]\n"
- "st1 { v21.s }[2], [x23]\n"
- "st1 { v18.s }[2], [x22]\n"
+ "tbz x10, #1, 141f\n"
+ "str d13, [x28], #0x8\n"
+ "str d10, [x23], #0x8\n"
+ "str d21, [x22], #0x8\n"
+ "str d18, [x21], #0x8\n"
+ "tbz x10, #0, 146f\n"
+ "st1 { v13.s }[2], [x28]\n"
+ "st1 { v10.s }[2], [x23]\n"
+ "st1 { v21.s }[2], [x22]\n"
+ "st1 { v18.s }[2], [x21]\n"
"b 146f\n"
"141:" // Height 4: Partial direct writeback: partial_1_8
- "tbz x11, #0, 146f\n"
- "str s13, [x9, #0x0]\n"
- "str s10, [x24, #0x0]\n"
- "str s21, [x23, #0x0]\n"
- "str s18, [x22, #0x0]\n"
+ "tbz x10, #0, 146f\n"
+ "str s13, [x28, #0x0]\n"
+ "str s10, [x23, #0x0]\n"
+ "str s21, [x22, #0x0]\n"
+ "str s18, [x21, #0x0]\n"
"b 146f\n"
"142:" // Height 4: Partial direct writeback: partial_4_0
- "tbz x11, #2, 144f\n"
- "st1 { v7.4s }, [x9], #0x10\n"
- "st1 { v8.4s }, [x24], #0x10\n"
- "st1 { v15.4s }, [x23], #0x10\n"
- "st1 { v16.4s }, [x22], #0x10\n"
- "tbz x11, #1, 143f\n"
- "str d12, [x9], #0x8\n"
- "str d9, [x24], #0x8\n"
- "str d20, [x23], #0x8\n"
- "str d17, [x22], #0x8\n"
- "tbz x11, #0, 146f\n"
- "st1 { v12.s }[2], [x9]\n"
- "st1 { v9.s }[2], [x24]\n"
- "st1 { v20.s }[2], [x23]\n"
- "st1 { v17.s }[2], [x22]\n"
+ "tbz x10, #2, 144f\n"
+ "st1 { v7.4s }, [x28], #0x10\n"
+ "st1 { v8.4s }, [x23], #0x10\n"
+ "st1 { v15.4s }, [x22], #0x10\n"
+ "st1 { v16.4s }, [x21], #0x10\n"
+ "tbz x10, #1, 143f\n"
+ "str d12, [x28], #0x8\n"
+ "str d9, [x23], #0x8\n"
+ "str d20, [x22], #0x8\n"
+ "str d17, [x21], #0x8\n"
+ "tbz x10, #0, 146f\n"
+ "st1 { v12.s }[2], [x28]\n"
+ "st1 { v9.s }[2], [x23]\n"
+ "st1 { v20.s }[2], [x22]\n"
+ "st1 { v17.s }[2], [x21]\n"
"b 146f\n"
"143:" // Height 4: Partial direct writeback: partial_1_4
- "tbz x11, #0, 146f\n"
- "str s12, [x9, #0x0]\n"
- "str s9, [x24, #0x0]\n"
- "str s20, [x23, #0x0]\n"
- "str s17, [x22, #0x0]\n"
+ "tbz x10, #0, 146f\n"
+ "str s12, [x28, #0x0]\n"
+ "str s9, [x23, #0x0]\n"
+ "str s20, [x22, #0x0]\n"
+ "str s17, [x21, #0x0]\n"
"b 146f\n"
"144:" // Height 4: Partial direct writeback: partial_2_0
- "tbz x11, #1, 145f\n"
- "str d7, [x9], #0x8\n"
- "str d8, [x24], #0x8\n"
- "str d15, [x23], #0x8\n"
- "str d16, [x22], #0x8\n"
- "tbz x11, #0, 146f\n"
- "st1 { v7.s }[2], [x9]\n"
- "st1 { v8.s }[2], [x24]\n"
- "st1 { v15.s }[2], [x23]\n"
- "st1 { v16.s }[2], [x22]\n"
+ "tbz x10, #1, 145f\n"
+ "str d7, [x28], #0x8\n"
+ "str d8, [x23], #0x8\n"
+ "str d15, [x22], #0x8\n"
+ "str d16, [x21], #0x8\n"
+ "tbz x10, #0, 146f\n"
+ "st1 { v7.s }[2], [x28]\n"
+ "st1 { v8.s }[2], [x23]\n"
+ "st1 { v15.s }[2], [x22]\n"
+ "st1 { v16.s }[2], [x21]\n"
"b 146f\n"
"145:" // Height 4: Partial direct writeback: partial_1_0
- "str s7, [x9, #0x0]\n"
- "str s8, [x24, #0x0]\n"
- "str s15, [x23, #0x0]\n"
- "str s16, [x22, #0x0]\n"
+ "str s7, [x28, #0x0]\n"
+ "str s8, [x23, #0x0]\n"
+ "str s15, [x22, #0x0]\n"
+ "str s16, [x21, #0x0]\n"
"146:" // Height 4: Partial direct writeback: Done
"b 148f\n"
"147:" // Height 4: Full writeback
- "str q7, [x9, #0x0]\n"
- "str q12, [x9, #0x10]\n"
- "str q13, [x9, #0x20]\n"
- "str q14, [x9, #0x30]\n"
- "add x9, x9, #0x40\n"
- "str q8, [x24, #0x0]\n"
- "str q9, [x24, #0x10]\n"
- "str q10, [x24, #0x20]\n"
- "str q11, [x24, #0x30]\n"
- "str q15, [x23, #0x0]\n"
- "str q20, [x23, #0x10]\n"
- "str q21, [x23, #0x20]\n"
- "str q22, [x23, #0x30]\n"
- "str q16, [x22, #0x0]\n"
- "str q17, [x22, #0x10]\n"
- "str q18, [x22, #0x20]\n"
- "str q19, [x22, #0x30]\n"
+ "str q7, [x28, #0x0]\n"
+ "str q12, [x28, #0x10]\n"
+ "str q13, [x28, #0x20]\n"
+ "str q14, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
+ "str q8, [x23, #0x0]\n"
+ "str q9, [x23, #0x10]\n"
+ "str q10, [x23, #0x20]\n"
+ "str q11, [x23, #0x30]\n"
+ "str q15, [x22, #0x0]\n"
+ "str q20, [x22, #0x10]\n"
+ "str q21, [x22, #0x20]\n"
+ "str q22, [x22, #0x30]\n"
+ "str q16, [x21, #0x0]\n"
+ "str q17, [x21, #0x10]\n"
+ "str q18, [x21, #0x20]\n"
+ "str q19, [x21, #0x30]\n"
"148:" // Height 4: Writeback done
- "subs x11, x11, #0x10\n"
+ "subs x10, x10, #0x10\n"
"bgt 113b\n"
"b 224f\n"
"149:" // Height 5
- "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x28, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"150:" // Height 5: Column loop
"tbz %x[flags], #0, 161f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
- "cmp x11, #0x10\n"
- "add x21, x22, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "cmp x10, #0x10\n"
+ "add x23, x28, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
+ "add x20, x21, x19, LSL #2\n"
"bge 159f\n"
- "tbz x11, #3, 154f\n"
- "ld1 { v9.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x24], #0x10\n"
- "ld1 { v17.4s }, [x23], #0x10\n"
- "ld1 { v20.4s }, [x22], #0x10\n"
- "ld1 { v25.4s }, [x21], #0x10\n"
- "ld1 { v10.4s }, [x9], #0x10\n"
- "ld1 { v13.4s }, [x24], #0x10\n"
- "ld1 { v18.4s }, [x23], #0x10\n"
- "ld1 { v21.4s }, [x22], #0x10\n"
- "ld1 { v26.4s }, [x21], #0x10\n"
- "tbz x11, #2, 152f\n"
- "ld1 { v11.4s }, [x9], #0x10\n"
- "ld1 { v14.4s }, [x24], #0x10\n"
- "ld1 { v19.4s }, [x23], #0x10\n"
- "ld1 { v22.4s }, [x22], #0x10\n"
- "ld1 { v27.4s }, [x21], #0x10\n"
- "tbz x11, #1, 151f\n"
- "ldr d16, [x9], #0x8\n"
- "ldr d15, [x24], #0x8\n"
- "mov x25, #0x38\n"
- "ldr d24, [x23], #0x8\n"
- "ldr d23, [x22], #0x8\n"
- "ldr d6, [x21], #0x8\n"
- "tbz x11, #0, 158f\n"
- "ld1 { v16.s }[2], [x9]\n"
- "ld1 { v15.s }[2], [x24]\n"
- "ld1 { v24.s }[2], [x23]\n"
- "ld1 { v23.s }[2], [x22]\n"
- "ld1 { v6.s }[2], [x21]\n"
+ "tbz x10, #3, 154f\n"
+ "ld1 { v9.4s }, [x28], #0x10\n"
+ "ld1 { v12.4s }, [x23], #0x10\n"
+ "ld1 { v17.4s }, [x22], #0x10\n"
+ "ld1 { v20.4s }, [x21], #0x10\n"
+ "ld1 { v25.4s }, [x20], #0x10\n"
+ "ld1 { v10.4s }, [x28], #0x10\n"
+ "ld1 { v13.4s }, [x23], #0x10\n"
+ "ld1 { v18.4s }, [x22], #0x10\n"
+ "ld1 { v21.4s }, [x21], #0x10\n"
+ "ld1 { v26.4s }, [x20], #0x10\n"
+ "tbz x10, #2, 152f\n"
+ "ld1 { v11.4s }, [x28], #0x10\n"
+ "ld1 { v14.4s }, [x23], #0x10\n"
+ "ld1 { v19.4s }, [x22], #0x10\n"
+ "ld1 { v22.4s }, [x21], #0x10\n"
+ "ld1 { v27.4s }, [x20], #0x10\n"
+ "tbz x10, #1, 151f\n"
+ "ldr d16, [x28], #0x8\n"
+ "mov x24, #0x38\n"
+ "ldr d15, [x23], #0x8\n"
+ "ldr d24, [x22], #0x8\n"
+ "ldr d23, [x21], #0x8\n"
+ "ldr d6, [x20], #0x8\n"
+ "tbz x10, #0, 158f\n"
+ "ld1 { v16.s }[2], [x28]\n"
+ "ld1 { v15.s }[2], [x23]\n"
+ "ld1 { v24.s }[2], [x22]\n"
+ "ld1 { v23.s }[2], [x21]\n"
+ "ld1 { v6.s }[2], [x20]\n"
"b 158f\n"
"151:" // Height 5: Partial accumulate: partial_1_12
- "mov x25, #0x30\n"
- "tbz x11, #0, 158f\n"
- "ldr s16, [x9, #0x0]\n"
- "ldr s15, [x24, #0x0]\n"
- "ldr s24, [x23, #0x0]\n"
- "ldr s23, [x22, #0x0]\n"
- "ldr s6, [x21, #0x0]\n"
+ "mov x24, #0x30\n"
+ "tbz x10, #0, 158f\n"
+ "ldr s16, [x28, #0x0]\n"
+ "ldr s15, [x23, #0x0]\n"
+ "ldr s24, [x22, #0x0]\n"
+ "ldr s23, [x21, #0x0]\n"
+ "ldr s6, [x20, #0x0]\n"
"b 158f\n"
"152:" // Height 5: Partial accumulate: partial_2_8
- "tbz x11, #1, 153f\n"
- "ldr d11, [x9], #0x8\n"
- "ldr d14, [x24], #0x8\n"
- "mov x25, #0x28\n"
- "ldr d19, [x23], #0x8\n"
- "ldr d22, [x22], #0x8\n"
- "ldr d27, [x21], #0x8\n"
- "tbz x11, #0, 158f\n"
- "ld1 { v11.s }[2], [x9]\n"
- "ld1 { v14.s }[2], [x24]\n"
- "ld1 { v19.s }[2], [x23]\n"
- "ld1 { v22.s }[2], [x22]\n"
- "ld1 { v27.s }[2], [x21]\n"
+ "tbz x10, #1, 153f\n"
+ "ldr d11, [x28], #0x8\n"
+ "ldr d14, [x23], #0x8\n"
+ "mov x24, #0x28\n"
+ "ldr d19, [x22], #0x8\n"
+ "ldr d22, [x21], #0x8\n"
+ "ldr d27, [x20], #0x8\n"
+ "tbz x10, #0, 158f\n"
+ "ld1 { v11.s }[2], [x28]\n"
+ "ld1 { v14.s }[2], [x23]\n"
+ "ld1 { v19.s }[2], [x22]\n"
+ "ld1 { v22.s }[2], [x21]\n"
+ "ld1 { v27.s }[2], [x20]\n"
"b 158f\n"
"153:" // Height 5: Partial accumulate: partial_1_8
- "mov x25, #0x20\n"
- "tbz x11, #0, 158f\n"
- "ldr s11, [x9, #0x0]\n"
- "ldr s14, [x24, #0x0]\n"
- "ldr s19, [x23, #0x0]\n"
- "ldr s22, [x22, #0x0]\n"
- "ldr s27, [x21, #0x0]\n"
+ "mov x24, #0x20\n"
+ "tbz x10, #0, 158f\n"
+ "ldr s11, [x28, #0x0]\n"
+ "ldr s14, [x23, #0x0]\n"
+ "ldr s19, [x22, #0x0]\n"
+ "ldr s22, [x21, #0x0]\n"
+ "ldr s27, [x20, #0x0]\n"
"b 158f\n"
"154:" // Height 5: Partial accumulate: partial_4_0
- "tbz x11, #2, 156f\n"
- "ld1 { v9.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x24], #0x10\n"
- "ld1 { v17.4s }, [x23], #0x10\n"
- "ld1 { v20.4s }, [x22], #0x10\n"
- "ld1 { v25.4s }, [x21], #0x10\n"
- "tbz x11, #1, 155f\n"
- "ldr d10, [x9], #0x8\n"
- "ldr d13, [x24], #0x8\n"
- "mov x25, #0x18\n"
- "ldr d18, [x23], #0x8\n"
- "ldr d21, [x22], #0x8\n"
- "ldr d26, [x21], #0x8\n"
- "tbz x11, #0, 158f\n"
- "ld1 { v10.s }[2], [x9]\n"
- "ld1 { v13.s }[2], [x24]\n"
- "ld1 { v18.s }[2], [x23]\n"
- "ld1 { v21.s }[2], [x22]\n"
- "ld1 { v26.s }[2], [x21]\n"
+ "tbz x10, #2, 156f\n"
+ "ld1 { v9.4s }, [x28], #0x10\n"
+ "ld1 { v12.4s }, [x23], #0x10\n"
+ "ld1 { v17.4s }, [x22], #0x10\n"
+ "ld1 { v20.4s }, [x21], #0x10\n"
+ "ld1 { v25.4s }, [x20], #0x10\n"
+ "tbz x10, #1, 155f\n"
+ "ldr d10, [x28], #0x8\n"
+ "mov x24, #0x18\n"
+ "ldr d13, [x23], #0x8\n"
+ "ldr d18, [x22], #0x8\n"
+ "ldr d21, [x21], #0x8\n"
+ "ldr d26, [x20], #0x8\n"
+ "tbz x10, #0, 158f\n"
+ "ld1 { v10.s }[2], [x28]\n"
+ "ld1 { v13.s }[2], [x23]\n"
+ "ld1 { v18.s }[2], [x22]\n"
+ "ld1 { v21.s }[2], [x21]\n"
+ "ld1 { v26.s }[2], [x20]\n"
"b 158f\n"
"155:" // Height 5: Partial accumulate: partial_1_4
- "mov x25, #0x10\n"
- "tbz x11, #0, 158f\n"
- "ldr s10, [x9, #0x0]\n"
- "ldr s13, [x24, #0x0]\n"
- "ldr s18, [x23, #0x0]\n"
- "ldr s21, [x22, #0x0]\n"
- "ldr s26, [x21, #0x0]\n"
+ "mov x24, #0x10\n"
+ "tbz x10, #0, 158f\n"
+ "ldr s10, [x28, #0x0]\n"
+ "ldr s13, [x23, #0x0]\n"
+ "ldr s18, [x22, #0x0]\n"
+ "ldr s21, [x21, #0x0]\n"
+ "ldr s26, [x20, #0x0]\n"
"b 158f\n"
"156:" // Height 5: Partial accumulate: partial_2_0
- "tbz x11, #1, 157f\n"
- "ldr d9, [x9], #0x8\n"
- "ldr d12, [x24], #0x8\n"
- "mov x25, #0x8\n"
- "ldr d17, [x23], #0x8\n"
- "ldr d20, [x22], #0x8\n"
- "ldr d25, [x21], #0x8\n"
- "tbz x11, #0, 158f\n"
- "ld1 { v9.s }[2], [x9]\n"
- "ld1 { v12.s }[2], [x24]\n"
- "ld1 { v17.s }[2], [x23]\n"
- "ld1 { v20.s }[2], [x22]\n"
- "ld1 { v25.s }[2], [x21]\n"
+ "tbz x10, #1, 157f\n"
+ "ldr d9, [x28], #0x8\n"
+ "ldr d12, [x23], #0x8\n"
+ "mov x24, #0x8\n"
+ "ldr d17, [x22], #0x8\n"
+ "ldr d20, [x21], #0x8\n"
+ "ldr d25, [x20], #0x8\n"
+ "tbz x10, #0, 158f\n"
+ "ld1 { v9.s }[2], [x28]\n"
+ "ld1 { v12.s }[2], [x23]\n"
+ "ld1 { v17.s }[2], [x22]\n"
+ "ld1 { v20.s }[2], [x21]\n"
+ "ld1 { v25.s }[2], [x20]\n"
"b 158f\n"
"157:" // Height 5: Partial accumulate: partial_1_0
- "ldr s9, [x9, #0x0]\n"
- "ldr s12, [x24, #0x0]\n"
- "mov x25, #0x0\n"
- "ldr s17, [x23, #0x0]\n"
- "ldr s20, [x22, #0x0]\n"
- "ldr s25, [x21, #0x0]\n"
+ "ldr s9, [x28, #0x0]\n"
+ "mov x24, #0x0\n"
+ "ldr s12, [x23, #0x0]\n"
+ "ldr s17, [x22, #0x0]\n"
+ "ldr s20, [x21, #0x0]\n"
+ "ldr s25, [x20, #0x0]\n"
"158:" // Height 5: Partial accumulate: Done
- "sub x9, x9, x25\n"
+ "sub x28, x28, x24\n"
"b 160f\n"
"159:" // Height 5: full accumulate
- "ldr q9, [x9, #0x0]\n"
- "ldr q10, [x9, #0x10]\n"
- "ldr q11, [x9, #0x20]\n"
- "ldr q16, [x9, #0x30]\n"
- "ldr q12, [x24, #0x0]\n"
- "ldr q13, [x24, #0x10]\n"
- "ldr q14, [x24, #0x20]\n"
- "ldr q15, [x24, #0x30]\n"
- "ldr q17, [x23, #0x0]\n"
- "ldr q18, [x23, #0x10]\n"
- "ldr q19, [x23, #0x20]\n"
- "ldr q24, [x23, #0x30]\n"
- "ldr q20, [x22, #0x0]\n"
- "ldr q21, [x22, #0x10]\n"
- "ldr q22, [x22, #0x20]\n"
- "ldr q23, [x22, #0x30]\n"
- "ldr q25, [x21, #0x0]\n"
- "ldr q26, [x21, #0x10]\n"
- "ldr q27, [x21, #0x20]\n"
- "ldr q6, [x21, #0x30]\n"
+ "ldr q9, [x28, #0x0]\n"
+ "ldr q10, [x28, #0x10]\n"
+ "ldr q11, [x28, #0x20]\n"
+ "ldr q16, [x28, #0x30]\n"
+ "ldr q12, [x23, #0x0]\n"
+ "ldr q13, [x23, #0x10]\n"
+ "ldr q14, [x23, #0x20]\n"
+ "ldr q15, [x23, #0x30]\n"
+ "ldr q17, [x22, #0x0]\n"
+ "ldr q18, [x22, #0x10]\n"
+ "ldr q19, [x22, #0x20]\n"
+ "ldr q24, [x22, #0x30]\n"
+ "ldr q20, [x21, #0x0]\n"
+ "ldr q21, [x21, #0x10]\n"
+ "ldr q22, [x21, #0x20]\n"
+ "ldr q23, [x21, #0x30]\n"
+ "ldr q25, [x20, #0x0]\n"
+ "ldr q26, [x20, #0x10]\n"
+ "ldr q27, [x20, #0x20]\n"
+ "ldr q6, [x20, #0x30]\n"
"160:" // Height 5: MMLA fixup
"zip1 v8.2d, v9.2d, v12.2d\n"
"zip2 v12.2d, v9.2d, v12.2d\n"
@@ -2121,210 +2129,212 @@ void a64_hybrid_s8s32_mmla_6x16 (
"movi v30.4s, #0x0\n"
"movi v31.4s, #0x0\n"
"162:" // Height 5: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"163:" // Height 5: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 164f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "ldr x22, [x21, #0x20]\n"
- "cbnz x28, 165f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20\n"
- "add x25, x25, x20\n"
- "add x24, x24, x20\n"
- "add x23, x23, x20\n"
- "add x22, x22, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "ldr x21, [x20, #0x20]\n"
+ "cbnz x27, 165f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
+ "add x24, x24, x19\n"
+ "add x23, x23, x19\n"
+ "add x22, x22, x19\n"
+ "add x21, x21, x19\n"
"b 165f\n"
"164:" // Height 5: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20\n"
- "add x24, x25, x20\n"
- "add x23, x24, x20\n"
- "add x22, x23, x20\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19\n"
+ "add x23, x24, x19\n"
+ "add x22, x23, x19\n"
+ "add x21, x22, x19\n"
"165:" // Height 5: input setup done
- "cmp x27, #0x10\n"
+ "cmp x26, #0x10\n"
"blt 168f\n"
- "ldr q1, [x26, #0x0]\n"
- "ldr q2, [x25, #0x0]\n"
- "cmp x27, #0x20\n"
- "ldr q3, [x24, #0x0]\n"
- "ldr q4, [x23, #0x0]\n"
- "ldr q5, [x22, #0x0]\n"
- "ldr q7, [x10, #0x0]\n"
+ "ldr q1, [x25, #0x0]\n"
+ "cmp x26, #0x20\n"
"blt 167f\n"
"166:" // Height 5: Multiply loop: Main loop head
+ "movi v6.4s, #0x0\n"
+ "ldr q2, [x24, #0x0]\n"
+ "add x25, x25, #0x10\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
+ "ldr q3, [x23, #0x0]\n"
+ "add x24, x24, #0x10\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
- ".inst 0x4e87a408 // smmla v8.4s, v0.16b, v7.16b\n"
+ "ldr q4, [x22, #0x0]\n"
+ "add x23, x23, #0x10\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
+ "ldr q5, [x21, #0x0]\n"
+ "add x22, x22, #0x10\n"
"trn2 v3.2d, v3.2d, v4.2d\n"
- ".inst 0x4e87a450 // smmla v16.4s, v2.16b, v7.16b\n"
- "sub x27, x27, #0x10\n"
+ "ldr q7, [x9, #0x0]\n"
+ "add x21, x21, #0x10\n"
"trn1 v4.2d, v5.2d, v6.2d\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "sub x26, x26, #0x10\n"
"trn2 v5.2d, v5.2d, v6.2d\n"
- "ldr q6, [x10, #0x10]\n"
+ "ldr q6, [x9, #0x10]\n"
+ "cmp x26, #0x20\n"
+ ".inst 0x4e87a408 // smmla v8.4s, v0.16b, v7.16b\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ ".inst 0x4e87a450 // smmla v16.4s, v2.16b, v7.16b\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x4e87a498 // smmla v24.4s, v4.16b, v7.16b\n"
- "ldr q7, [x10, #0x20]\n"
+ "ldr q7, [x9, #0x20]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x4e86a40c // smmla v12.4s, v0.16b, v6.16b\n"
".inst 0x4e86a454 // smmla v20.4s, v2.16b, v6.16b\n"
- "add x26, x26, #0x10\n"
+ "prfm pldl1keep, [x21, #0x80]\n"
".inst 0x4e86a49c // smmla v28.4s, v4.16b, v6.16b\n"
- "ldr q6, [x10, #0x30]\n"
+ "ldr q6, [x9, #0x30]\n"
".inst 0x4e87a409 // smmla v9.4s, v0.16b, v7.16b\n"
- "add x25, x25, #0x10\n"
".inst 0x4e87a451 // smmla v17.4s, v2.16b, v7.16b\n"
".inst 0x4e87a499 // smmla v25.4s, v4.16b, v7.16b\n"
- "ldr q7, [x10, #0x40]\n"
- "add x24, x24, #0x10\n"
+ "ldr q7, [x9, #0x40]\n"
".inst 0x4e86a40d // smmla v13.4s, v0.16b, v6.16b\n"
".inst 0x4e86a455 // smmla v21.4s, v2.16b, v6.16b\n"
- "add x23, x23, #0x10\n"
- "add x22, x22, #0x10\n"
".inst 0x4e86a49d // smmla v29.4s, v4.16b, v6.16b\n"
- "ldr q6, [x10, #0x50]\n"
+ "ldr q6, [x9, #0x50]\n"
".inst 0x4e87a40a // smmla v10.4s, v0.16b, v7.16b\n"
- "cmp x27, #0x20\n"
".inst 0x4e87a452 // smmla v18.4s, v2.16b, v7.16b\n"
".inst 0x4e87a49a // smmla v26.4s, v4.16b, v7.16b\n"
- "ldr q7, [x10, #0x60]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
+ "ldr q7, [x9, #0x60]\n"
".inst 0x4e86a40e // smmla v14.4s, v0.16b, v6.16b\n"
".inst 0x4e86a456 // smmla v22.4s, v2.16b, v6.16b\n"
- "prfm pldl1keep, [x25, #0x80]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x4e86a49e // smmla v30.4s, v4.16b, v6.16b\n"
- "ldr q6, [x10, #0x70]\n"
+ "ldr q6, [x9, #0x70]\n"
".inst 0x4e87a40b // smmla v11.4s, v0.16b, v7.16b\n"
- "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x4e87a453 // smmla v19.4s, v2.16b, v7.16b\n"
".inst 0x4e87a49b // smmla v27.4s, v4.16b, v7.16b\n"
- "ldr q7, [x10, #0x80]\n"
- "prfm pldl1keep, [x22, #0x80]\n"
+ "ldr q7, [x9, #0x80]\n"
".inst 0x4e86a40f // smmla v15.4s, v0.16b, v6.16b\n"
".inst 0x4e86a457 // smmla v23.4s, v2.16b, v6.16b\n"
- "ldr q2, [x25, #0x0]\n"
".inst 0x4e86a49f // smmla v31.4s, v4.16b, v6.16b\n"
- "ldr q6, [x10, #0x90]\n"
- "ldr q4, [x23, #0x0]\n"
+ "ldr q6, [x9, #0x90]\n"
".inst 0x4e87a428 // smmla v8.4s, v1.16b, v7.16b\n"
".inst 0x4e87a470 // smmla v16.4s, v3.16b, v7.16b\n"
".inst 0x4e87a4b8 // smmla v24.4s, v5.16b, v7.16b\n"
- "ldr q7, [x10, #0xa0]\n"
+ "ldr q7, [x9, #0xa0]\n"
".inst 0x4e86a42c // smmla v12.4s, v1.16b, v6.16b\n"
".inst 0x4e86a474 // smmla v20.4s, v3.16b, v6.16b\n"
".inst 0x4e86a4bc // smmla v28.4s, v5.16b, v6.16b\n"
- "ldr q6, [x10, #0xb0]\n"
+ "ldr q6, [x9, #0xb0]\n"
".inst 0x4e87a429 // smmla v9.4s, v1.16b, v7.16b\n"
".inst 0x4e87a471 // smmla v17.4s, v3.16b, v7.16b\n"
".inst 0x4e87a4b9 // smmla v25.4s, v5.16b, v7.16b\n"
- "ldr q7, [x10, #0xc0]\n"
+ "ldr q7, [x9, #0xc0]\n"
".inst 0x4e86a42d // smmla v13.4s, v1.16b, v6.16b\n"
".inst 0x4e86a475 // smmla v21.4s, v3.16b, v6.16b\n"
".inst 0x4e86a4bd // smmla v29.4s, v5.16b, v6.16b\n"
- "ldr q6, [x10, #0xd0]\n"
+ "ldr q6, [x9, #0xd0]\n"
".inst 0x4e87a42a // smmla v10.4s, v1.16b, v7.16b\n"
".inst 0x4e87a472 // smmla v18.4s, v3.16b, v7.16b\n"
".inst 0x4e87a4ba // smmla v26.4s, v5.16b, v7.16b\n"
- "ldr q7, [x10, #0xe0]\n"
+ "ldr q7, [x9, #0xe0]\n"
".inst 0x4e86a42e // smmla v14.4s, v1.16b, v6.16b\n"
".inst 0x4e86a476 // smmla v22.4s, v3.16b, v6.16b\n"
".inst 0x4e86a4be // smmla v30.4s, v5.16b, v6.16b\n"
- "ldr q6, [x10, #0xf0]\n"
- "add x10, x10, #0x100\n"
+ "ldr q6, [x9, #0xf0]\n"
+ "add x9, x9, #0x100\n"
".inst 0x4e87a42b // smmla v11.4s, v1.16b, v7.16b\n"
".inst 0x4e87a473 // smmla v19.4s, v3.16b, v7.16b\n"
".inst 0x4e87a4bb // smmla v27.4s, v5.16b, v7.16b\n"
- "ldr q7, [x10, #0x0]\n"
".inst 0x4e86a42f // smmla v15.4s, v1.16b, v6.16b\n"
- "ldr q1, [x26, #0x0]\n"
+ "ldr q1, [x25, #0x0]\n"
".inst 0x4e86a477 // smmla v23.4s, v3.16b, v6.16b\n"
- "ldr q3, [x24, #0x0]\n"
".inst 0x4e86a4bf // smmla v31.4s, v5.16b, v6.16b\n"
- "ldr q5, [x22, #0x0]\n"
"bge 166b\n"
"167:" // Height 5: Multiply loop: Single iteration only
+ "movi v6.4s, #0x0\n"
+ "ldr q2, [x24, #0x0]\n"
+ "sub x26, x26, #0x10\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
+ "ldr q3, [x23, #0x0]\n"
+ "add x25, x25, #0x10\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
- ".inst 0x4e87a408 // smmla v8.4s, v0.16b, v7.16b\n"
+ "ldr q4, [x22, #0x0]\n"
+ "add x24, x24, #0x10\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
+ "ldr q5, [x21, #0x0]\n"
+ "add x23, x23, #0x10\n"
"trn2 v3.2d, v3.2d, v4.2d\n"
- ".inst 0x4e87a450 // smmla v16.4s, v2.16b, v7.16b\n"
- "add x26, x26, #0x10\n"
+ "ldr q7, [x9, #0x0]\n"
+ "add x22, x22, #0x10\n"
"trn1 v4.2d, v5.2d, v6.2d\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "add x21, x21, #0x10\n"
"trn2 v5.2d, v5.2d, v6.2d\n"
- "ldr q6, [x10, #0x10]\n"
+ "ldr q6, [x9, #0x10]\n"
+ ".inst 0x4e87a408 // smmla v8.4s, v0.16b, v7.16b\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ ".inst 0x4e87a450 // smmla v16.4s, v2.16b, v7.16b\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x4e87a498 // smmla v24.4s, v4.16b, v7.16b\n"
- "ldr q7, [x10, #0x20]\n"
+ "ldr q7, [x9, #0x20]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x4e86a40c // smmla v12.4s, v0.16b, v6.16b\n"
".inst 0x4e86a454 // smmla v20.4s, v2.16b, v6.16b\n"
- "add x25, x25, #0x10\n"
+ "prfm pldl1keep, [x21, #0x80]\n"
".inst 0x4e86a49c // smmla v28.4s, v4.16b, v6.16b\n"
- "ldr q6, [x10, #0x30]\n"
+ "ldr q6, [x9, #0x30]\n"
".inst 0x4e87a409 // smmla v9.4s, v0.16b, v7.16b\n"
- "add x24, x24, #0x10\n"
".inst 0x4e87a451 // smmla v17.4s, v2.16b, v7.16b\n"
".inst 0x4e87a499 // smmla v25.4s, v4.16b, v7.16b\n"
- "ldr q7, [x10, #0x40]\n"
- "add x23, x23, #0x10\n"
+ "ldr q7, [x9, #0x40]\n"
".inst 0x4e86a40d // smmla v13.4s, v0.16b, v6.16b\n"
".inst 0x4e86a455 // smmla v21.4s, v2.16b, v6.16b\n"
- "add x22, x22, #0x10\n"
- "sub x27, x27, #0x10\n"
".inst 0x4e86a49d // smmla v29.4s, v4.16b, v6.16b\n"
- "ldr q6, [x10, #0x50]\n"
+ "ldr q6, [x9, #0x50]\n"
".inst 0x4e87a40a // smmla v10.4s, v0.16b, v7.16b\n"
- "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x4e87a452 // smmla v18.4s, v2.16b, v7.16b\n"
".inst 0x4e87a49a // smmla v26.4s, v4.16b, v7.16b\n"
- "ldr q7, [x10, #0x60]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
+ "ldr q7, [x9, #0x60]\n"
".inst 0x4e86a40e // smmla v14.4s, v0.16b, v6.16b\n"
".inst 0x4e86a456 // smmla v22.4s, v2.16b, v6.16b\n"
- "prfm pldl1keep, [x24, #0x80]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x4e86a49e // smmla v30.4s, v4.16b, v6.16b\n"
- "ldr q6, [x10, #0x70]\n"
+ "ldr q6, [x9, #0x70]\n"
".inst 0x4e87a40b // smmla v11.4s, v0.16b, v7.16b\n"
- "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x4e87a453 // smmla v19.4s, v2.16b, v7.16b\n"
".inst 0x4e87a49b // smmla v27.4s, v4.16b, v7.16b\n"
- "ldr q7, [x10, #0x80]\n"
+ "ldr q7, [x9, #0x80]\n"
".inst 0x4e86a40f // smmla v15.4s, v0.16b, v6.16b\n"
".inst 0x4e86a457 // smmla v23.4s, v2.16b, v6.16b\n"
".inst 0x4e86a49f // smmla v31.4s, v4.16b, v6.16b\n"
- "ldr q6, [x10, #0x90]\n"
+ "ldr q6, [x9, #0x90]\n"
".inst 0x4e87a428 // smmla v8.4s, v1.16b, v7.16b\n"
".inst 0x4e87a470 // smmla v16.4s, v3.16b, v7.16b\n"
".inst 0x4e87a4b8 // smmla v24.4s, v5.16b, v7.16b\n"
- "ldr q7, [x10, #0xa0]\n"
+ "ldr q7, [x9, #0xa0]\n"
".inst 0x4e86a42c // smmla v12.4s, v1.16b, v6.16b\n"
".inst 0x4e86a474 // smmla v20.4s, v3.16b, v6.16b\n"
".inst 0x4e86a4bc // smmla v28.4s, v5.16b, v6.16b\n"
- "ldr q6, [x10, #0xb0]\n"
+ "ldr q6, [x9, #0xb0]\n"
".inst 0x4e87a429 // smmla v9.4s, v1.16b, v7.16b\n"
".inst 0x4e87a471 // smmla v17.4s, v3.16b, v7.16b\n"
".inst 0x4e87a4b9 // smmla v25.4s, v5.16b, v7.16b\n"
- "ldr q7, [x10, #0xc0]\n"
+ "ldr q7, [x9, #0xc0]\n"
".inst 0x4e86a42d // smmla v13.4s, v1.16b, v6.16b\n"
".inst 0x4e86a475 // smmla v21.4s, v3.16b, v6.16b\n"
".inst 0x4e86a4bd // smmla v29.4s, v5.16b, v6.16b\n"
- "ldr q6, [x10, #0xd0]\n"
+ "ldr q6, [x9, #0xd0]\n"
".inst 0x4e87a42a // smmla v10.4s, v1.16b, v7.16b\n"
".inst 0x4e87a472 // smmla v18.4s, v3.16b, v7.16b\n"
".inst 0x4e87a4ba // smmla v26.4s, v5.16b, v7.16b\n"
- "ldr q7, [x10, #0xe0]\n"
+ "ldr q7, [x9, #0xe0]\n"
".inst 0x4e86a42e // smmla v14.4s, v1.16b, v6.16b\n"
".inst 0x4e86a476 // smmla v22.4s, v3.16b, v6.16b\n"
".inst 0x4e86a4be // smmla v30.4s, v5.16b, v6.16b\n"
- "ldr q6, [x10, #0xf0]\n"
- "add x10, x10, #0x100\n"
+ "ldr q6, [x9, #0xf0]\n"
+ "add x9, x9, #0x100\n"
".inst 0x4e87a42b // smmla v11.4s, v1.16b, v7.16b\n"
".inst 0x4e87a473 // smmla v19.4s, v3.16b, v7.16b\n"
".inst 0x4e87a4bb // smmla v27.4s, v5.16b, v7.16b\n"
@@ -2332,134 +2342,136 @@ void a64_hybrid_s8s32_mmla_6x16 (
".inst 0x4e86a477 // smmla v23.4s, v3.16b, v6.16b\n"
".inst 0x4e86a4bf // smmla v31.4s, v5.16b, v6.16b\n"
"168:" // Height 5: Multiply loop: Main loop skip
- "cbz x27, 175f\n"
- "cmp x27, #0x8\n"
+ "cbz x26, 175f\n"
+ "cmp x26, #0x8\n"
"blt 170f\n"
"169:" // Height 5: Multiply loop: Odd block loop
- "ldr d1, [x26], #0x8\n"
- "ldr d2, [x25], #0x8\n"
+ "movi v7.4s, #0x0\n"
+ "ldr d1, [x25], #0x8\n"
+ "sub x26, x26, #0x8\n"
+ "ldr d2, [x24], #0x8\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
- "ldr d3, [x24], #0x8\n"
- "ldr d4, [x23], #0x8\n"
+ "ldr d3, [x23], #0x8\n"
+ "cmp x26, #0x8\n"
+ "ldr d4, [x22], #0x8\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
- "sub x27, x27, #0x8\n"
- "ldr d5, [x22], #0x8\n"
- "ldr q6, [x10, #0x0]\n"
+ "ldr d5, [x21], #0x8\n"
+ "ldr q6, [x9, #0x0]\n"
"trn1 v4.2d, v5.2d, v7.2d\n"
+ "ldr q7, [x9, #0x10]\n"
".inst 0x4e86a408 // smmla v8.4s, v0.16b, v6.16b\n"
- "ldr q7, [x10, #0x10]\n"
".inst 0x4e86a450 // smmla v16.4s, v2.16b, v6.16b\n"
".inst 0x4e86a498 // smmla v24.4s, v4.16b, v6.16b\n"
- "ldr q6, [x10, #0x20]\n"
+ "ldr q6, [x9, #0x20]\n"
".inst 0x4e87a40c // smmla v12.4s, v0.16b, v7.16b\n"
".inst 0x4e87a454 // smmla v20.4s, v2.16b, v7.16b\n"
- "cmp x27, #0x8\n"
".inst 0x4e87a49c // smmla v28.4s, v4.16b, v7.16b\n"
- "ldr q7, [x10, #0x30]\n"
+ "ldr q7, [x9, #0x30]\n"
".inst 0x4e86a409 // smmla v9.4s, v0.16b, v6.16b\n"
".inst 0x4e86a451 // smmla v17.4s, v2.16b, v6.16b\n"
".inst 0x4e86a499 // smmla v25.4s, v4.16b, v6.16b\n"
- "ldr q6, [x10, #0x40]\n"
+ "ldr q6, [x9, #0x40]\n"
".inst 0x4e87a40d // smmla v13.4s, v0.16b, v7.16b\n"
".inst 0x4e87a455 // smmla v21.4s, v2.16b, v7.16b\n"
".inst 0x4e87a49d // smmla v29.4s, v4.16b, v7.16b\n"
- "ldr q7, [x10, #0x50]\n"
+ "ldr q7, [x9, #0x50]\n"
".inst 0x4e86a40a // smmla v10.4s, v0.16b, v6.16b\n"
".inst 0x4e86a452 // smmla v18.4s, v2.16b, v6.16b\n"
".inst 0x4e86a49a // smmla v26.4s, v4.16b, v6.16b\n"
- "ldr q6, [x10, #0x60]\n"
+ "ldr q6, [x9, #0x60]\n"
".inst 0x4e87a40e // smmla v14.4s, v0.16b, v7.16b\n"
".inst 0x4e87a456 // smmla v22.4s, v2.16b, v7.16b\n"
".inst 0x4e87a49e // smmla v30.4s, v4.16b, v7.16b\n"
- "ldr q7, [x10, #0x70]\n"
+ "ldr q7, [x9, #0x70]\n"
+ "add x9, x9, #0x80\n"
".inst 0x4e86a40b // smmla v11.4s, v0.16b, v6.16b\n"
- "add x10, x10, #0x80\n"
".inst 0x4e86a453 // smmla v19.4s, v2.16b, v6.16b\n"
".inst 0x4e86a49b // smmla v27.4s, v4.16b, v6.16b\n"
".inst 0x4e87a40f // smmla v15.4s, v0.16b, v7.16b\n"
".inst 0x4e87a457 // smmla v23.4s, v2.16b, v7.16b\n"
".inst 0x4e87a49f // smmla v31.4s, v4.16b, v7.16b\n"
"bge 169b\n"
+ "cbz x26, 175f\n"
"170:" // Height 5: Multiply loop: Skip odd blocks
- "cbz x27, 175f\n"
- "tbz x27, #2, 172f\n"
- "ldr s1, [x26], #0x4\n"
- "ldr s2, [x25], #0x4\n"
- "ldr s3, [x24], #0x4\n"
- "ldr s4, [x23], #0x4\n"
- "ldr s5, [x22], #0x4\n"
- "tbz x27, #1, 171f\n"
- "ld1 { v1.h }[2], [x26], #0x2\n"
- "ld1 { v2.h }[2], [x25], #0x2\n"
- "ld1 { v3.h }[2], [x24], #0x2\n"
- "ld1 { v4.h }[2], [x23], #0x2\n"
- "ld1 { v5.h }[2], [x22], #0x2\n"
- "tbz x27, #0, 174f\n"
- "ld1 { v1.b }[6], [x26]\n"
- "ld1 { v2.b }[6], [x25]\n"
- "ld1 { v3.b }[6], [x24]\n"
- "ld1 { v4.b }[6], [x23]\n"
- "ld1 { v5.b }[6], [x22]\n"
+ "tbz x26, #2, 172f\n"
+ "ldr s1, [x25], #0x4\n"
+ "ldr s2, [x24], #0x4\n"
+ "ldr s3, [x23], #0x4\n"
+ "ldr s4, [x22], #0x4\n"
+ "ldr s5, [x21], #0x4\n"
+ "tbz x26, #1, 171f\n"
+ "ld1 { v1.h }[2], [x25], #0x2\n"
+ "ld1 { v2.h }[2], [x24], #0x2\n"
+ "ld1 { v3.h }[2], [x23], #0x2\n"
+ "ld1 { v4.h }[2], [x22], #0x2\n"
+ "ld1 { v5.h }[2], [x21], #0x2\n"
+ "tbz x26, #0, 174f\n"
+ "ld1 { v1.b }[6], [x25]\n"
+ "ld1 { v2.b }[6], [x24]\n"
+ "ld1 { v3.b }[6], [x23]\n"
+ "ld1 { v4.b }[6], [x22]\n"
+ "ld1 { v5.b }[6], [x21]\n"
"b 174f\n"
"171:" // Height 5: Multiply loop: Ragged operand read: partial_1_4
- "tbz x27, #0, 174f\n"
- "ld1 { v1.b }[4], [x26]\n"
- "ld1 { v2.b }[4], [x25]\n"
- "ld1 { v3.b }[4], [x24]\n"
- "ld1 { v4.b }[4], [x23]\n"
- "ld1 { v5.b }[4], [x22]\n"
+ "tbz x26, #0, 174f\n"
+ "ld1 { v1.b }[4], [x25]\n"
+ "ld1 { v2.b }[4], [x24]\n"
+ "ld1 { v3.b }[4], [x23]\n"
+ "ld1 { v4.b }[4], [x22]\n"
+ "ld1 { v5.b }[4], [x21]\n"
"b 174f\n"
"172:" // Height 5: Multiply loop: Ragged operand read: partial_2_0
- "tbz x27, #1, 173f\n"
- "ldr h1, [x26], #0x2\n"
- "ldr h2, [x25], #0x2\n"
- "ldr h3, [x24], #0x2\n"
- "ldr h4, [x23], #0x2\n"
- "ldr h5, [x22], #0x2\n"
- "tbz x27, #0, 174f\n"
- "ld1 { v1.b }[2], [x26]\n"
- "ld1 { v2.b }[2], [x25]\n"
- "ld1 { v3.b }[2], [x24]\n"
- "ld1 { v4.b }[2], [x23]\n"
- "ld1 { v5.b }[2], [x22]\n"
+ "tbz x26, #1, 173f\n"
+ "ldr h1, [x25], #0x2\n"
+ "ldr h2, [x24], #0x2\n"
+ "ldr h3, [x23], #0x2\n"
+ "ldr h4, [x22], #0x2\n"
+ "ldr h5, [x21], #0x2\n"
+ "tbz x26, #0, 174f\n"
+ "ld1 { v1.b }[2], [x25]\n"
+ "ld1 { v2.b }[2], [x24]\n"
+ "ld1 { v3.b }[2], [x23]\n"
+ "ld1 { v4.b }[2], [x22]\n"
+ "ld1 { v5.b }[2], [x21]\n"
"b 174f\n"
"173:" // Height 5: Multiply loop: Ragged operand read: partial_1_0
- "ldr b1, [x26, #0x0]\n"
- "ldr b2, [x25, #0x0]\n"
- "ldr b3, [x24, #0x0]\n"
- "ldr b4, [x23, #0x0]\n"
- "ldr b5, [x22, #0x0]\n"
+ "ldr b1, [x25, #0x0]\n"
+ "ldr b2, [x24, #0x0]\n"
+ "ldr b3, [x23, #0x0]\n"
+ "ldr b4, [x22, #0x0]\n"
+ "ldr b5, [x21, #0x0]\n"
"174:" // Height 5: Multiply loop: Ragged operand read: Done
- "ldr q7, [x10, #0x0]\n"
+ "movi v6.4s, #0x0\n"
+ "ldr q7, [x9, #0x0]\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
"trn1 v4.2d, v5.2d, v6.2d\n"
- "ldr q6, [x10, #0x10]\n"
+ "ldr q6, [x9, #0x10]\n"
".inst 0x4e87a408 // smmla v8.4s, v0.16b, v7.16b\n"
".inst 0x4e87a450 // smmla v16.4s, v2.16b, v7.16b\n"
".inst 0x4e87a498 // smmla v24.4s, v4.16b, v7.16b\n"
- "ldr q7, [x10, #0x20]\n"
+ "ldr q7, [x9, #0x20]\n"
".inst 0x4e86a40c // smmla v12.4s, v0.16b, v6.16b\n"
".inst 0x4e86a454 // smmla v20.4s, v2.16b, v6.16b\n"
".inst 0x4e86a49c // smmla v28.4s, v4.16b, v6.16b\n"
- "ldr q6, [x10, #0x30]\n"
+ "ldr q6, [x9, #0x30]\n"
".inst 0x4e87a409 // smmla v9.4s, v0.16b, v7.16b\n"
".inst 0x4e87a451 // smmla v17.4s, v2.16b, v7.16b\n"
".inst 0x4e87a499 // smmla v25.4s, v4.16b, v7.16b\n"
- "ldr q7, [x10, #0x40]\n"
+ "ldr q7, [x9, #0x40]\n"
".inst 0x4e86a40d // smmla v13.4s, v0.16b, v6.16b\n"
".inst 0x4e86a455 // smmla v21.4s, v2.16b, v6.16b\n"
".inst 0x4e86a49d // smmla v29.4s, v4.16b, v6.16b\n"
- "ldr q6, [x10, #0x50]\n"
+ "ldr q6, [x9, #0x50]\n"
".inst 0x4e87a40a // smmla v10.4s, v0.16b, v7.16b\n"
".inst 0x4e87a452 // smmla v18.4s, v2.16b, v7.16b\n"
".inst 0x4e87a49a // smmla v26.4s, v4.16b, v7.16b\n"
- "ldr q7, [x10, #0x60]\n"
+ "ldr q7, [x9, #0x60]\n"
".inst 0x4e86a40e // smmla v14.4s, v0.16b, v6.16b\n"
".inst 0x4e86a456 // smmla v22.4s, v2.16b, v6.16b\n"
".inst 0x4e86a49e // smmla v30.4s, v4.16b, v6.16b\n"
- "ldr q6, [x10, #0x70]\n"
- "add x10, x10, #0x80\n"
+ "ldr q6, [x9, #0x70]\n"
+ "add x9, x9, #0x80\n"
".inst 0x4e87a40b // smmla v11.4s, v0.16b, v7.16b\n"
".inst 0x4e87a453 // smmla v19.4s, v2.16b, v7.16b\n"
".inst 0x4e87a49b // smmla v27.4s, v4.16b, v7.16b\n"
@@ -2467,30 +2479,30 @@ void a64_hybrid_s8s32_mmla_6x16 (
".inst 0x4e86a457 // smmla v23.4s, v2.16b, v6.16b\n"
".inst 0x4e86a49f // smmla v31.4s, v4.16b, v6.16b\n"
"175:" // Height 5: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 163b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
"uzp1 v7.2d, v8.2d, v12.2d\n"
- "add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp2 v8.2d, v8.2d, v12.2d\n"
+ "prfm pstl1keep, [x28, #0x0]\n"
+ "cmp x10, #0x10\n"
"uzp1 v12.2d, v9.2d, v13.2d\n"
- "cmp x11, #0x10\n"
+ "add x23, x28, x19, LSL #2\n"
"uzp2 v9.2d, v9.2d, v13.2d\n"
+ "prfm pstl1keep, [x23, #0x0]\n"
"uzp1 v13.2d, v10.2d, v14.2d\n"
- "prfm pstl1keep, [x9, #0x0]\n"
+ "add x22, x23, x19, LSL #2\n"
"uzp2 v10.2d, v10.2d, v14.2d\n"
+ "prfm pstl1keep, [x22, #0x0]\n"
+ "add x21, x22, x19, LSL #2\n"
"uzp1 v14.2d, v11.2d, v15.2d\n"
- "prfm pstl1keep, [x24, #0x0]\n"
- "prfm pstl1keep, [x23, #0x0]\n"
+ "prfm pstl1keep, [x21, #0x0]\n"
+ "add x20, x21, x19, LSL #2\n"
"uzp2 v11.2d, v11.2d, v15.2d\n"
+ "prfm pstl1keep, [x20, #0x0]\n"
"uzp1 v15.2d, v16.2d, v20.2d\n"
- "prfm pstl1keep, [x22, #0x0]\n"
- "prfm pstl1keep, [x21, #0x0]\n"
"uzp2 v16.2d, v16.2d, v20.2d\n"
"uzp1 v20.2d, v17.2d, v21.2d\n"
"uzp2 v17.2d, v17.2d, v21.2d\n"
@@ -2503,319 +2515,319 @@ void a64_hybrid_s8s32_mmla_6x16 (
"uzp1 v26.2d, v26.2d, v30.2d\n"
"uzp1 v27.2d, v27.2d, v31.2d\n"
"bge 184f\n"
- "tbz x11, #3, 179f\n"
- "st1 { v7.4s }, [x9], #0x10\n"
- "st1 { v12.4s }, [x9], #0x10\n"
- "st1 { v8.4s }, [x24], #0x10\n"
- "st1 { v9.4s }, [x24], #0x10\n"
- "st1 { v15.4s }, [x23], #0x10\n"
- "st1 { v20.4s }, [x23], #0x10\n"
- "st1 { v16.4s }, [x22], #0x10\n"
- "st1 { v17.4s }, [x22], #0x10\n"
- "st1 { v24.4s }, [x21], #0x10\n"
- "st1 { v25.4s }, [x21], #0x10\n"
- "tbz x11, #2, 177f\n"
- "st1 { v13.4s }, [x9], #0x10\n"
- "st1 { v10.4s }, [x24], #0x10\n"
- "st1 { v21.4s }, [x23], #0x10\n"
- "st1 { v18.4s }, [x22], #0x10\n"
- "st1 { v26.4s }, [x21], #0x10\n"
- "tbz x11, #1, 176f\n"
- "str d14, [x9], #0x8\n"
- "str d11, [x24], #0x8\n"
- "str d22, [x23], #0x8\n"
- "str d19, [x22], #0x8\n"
- "str d27, [x21], #0x8\n"
- "tbz x11, #0, 183f\n"
- "st1 { v14.s }[2], [x9]\n"
- "st1 { v11.s }[2], [x24]\n"
- "st1 { v22.s }[2], [x23]\n"
- "st1 { v19.s }[2], [x22]\n"
- "st1 { v27.s }[2], [x21]\n"
+ "tbz x10, #3, 179f\n"
+ "st1 { v7.4s }, [x28], #0x10\n"
+ "st1 { v12.4s }, [x28], #0x10\n"
+ "st1 { v8.4s }, [x23], #0x10\n"
+ "st1 { v9.4s }, [x23], #0x10\n"
+ "st1 { v15.4s }, [x22], #0x10\n"
+ "st1 { v20.4s }, [x22], #0x10\n"
+ "st1 { v16.4s }, [x21], #0x10\n"
+ "st1 { v17.4s }, [x21], #0x10\n"
+ "st1 { v24.4s }, [x20], #0x10\n"
+ "st1 { v25.4s }, [x20], #0x10\n"
+ "tbz x10, #2, 177f\n"
+ "st1 { v13.4s }, [x28], #0x10\n"
+ "st1 { v10.4s }, [x23], #0x10\n"
+ "st1 { v21.4s }, [x22], #0x10\n"
+ "st1 { v18.4s }, [x21], #0x10\n"
+ "st1 { v26.4s }, [x20], #0x10\n"
+ "tbz x10, #1, 176f\n"
+ "str d14, [x28], #0x8\n"
+ "str d11, [x23], #0x8\n"
+ "str d22, [x22], #0x8\n"
+ "str d19, [x21], #0x8\n"
+ "str d27, [x20], #0x8\n"
+ "tbz x10, #0, 183f\n"
+ "st1 { v14.s }[2], [x28]\n"
+ "st1 { v11.s }[2], [x23]\n"
+ "st1 { v22.s }[2], [x22]\n"
+ "st1 { v19.s }[2], [x21]\n"
+ "st1 { v27.s }[2], [x20]\n"
"b 183f\n"
"176:" // Height 5: Partial direct writeback: partial_1_12
- "tbz x11, #0, 183f\n"
- "str s14, [x9, #0x0]\n"
- "str s11, [x24, #0x0]\n"
- "str s22, [x23, #0x0]\n"
- "str s19, [x22, #0x0]\n"
- "str s27, [x21, #0x0]\n"
+ "tbz x10, #0, 183f\n"
+ "str s14, [x28, #0x0]\n"
+ "str s11, [x23, #0x0]\n"
+ "str s22, [x22, #0x0]\n"
+ "str s19, [x21, #0x0]\n"
+ "str s27, [x20, #0x0]\n"
"b 183f\n"
"177:" // Height 5: Partial direct writeback: partial_2_8
- "tbz x11, #1, 178f\n"
- "str d13, [x9], #0x8\n"
- "str d10, [x24], #0x8\n"
- "str d21, [x23], #0x8\n"
- "str d18, [x22], #0x8\n"
- "str d26, [x21], #0x8\n"
- "tbz x11, #0, 183f\n"
- "st1 { v13.s }[2], [x9]\n"
- "st1 { v10.s }[2], [x24]\n"
- "st1 { v21.s }[2], [x23]\n"
- "st1 { v18.s }[2], [x22]\n"
- "st1 { v26.s }[2], [x21]\n"
+ "tbz x10, #1, 178f\n"
+ "str d13, [x28], #0x8\n"
+ "str d10, [x23], #0x8\n"
+ "str d21, [x22], #0x8\n"
+ "str d18, [x21], #0x8\n"
+ "str d26, [x20], #0x8\n"
+ "tbz x10, #0, 183f\n"
+ "st1 { v13.s }[2], [x28]\n"
+ "st1 { v10.s }[2], [x23]\n"
+ "st1 { v21.s }[2], [x22]\n"
+ "st1 { v18.s }[2], [x21]\n"
+ "st1 { v26.s }[2], [x20]\n"
"b 183f\n"
"178:" // Height 5: Partial direct writeback: partial_1_8
- "tbz x11, #0, 183f\n"
- "str s13, [x9, #0x0]\n"
- "str s10, [x24, #0x0]\n"
- "str s21, [x23, #0x0]\n"
- "str s18, [x22, #0x0]\n"
- "str s26, [x21, #0x0]\n"
+ "tbz x10, #0, 183f\n"
+ "str s13, [x28, #0x0]\n"
+ "str s10, [x23, #0x0]\n"
+ "str s21, [x22, #0x0]\n"
+ "str s18, [x21, #0x0]\n"
+ "str s26, [x20, #0x0]\n"
"b 183f\n"
"179:" // Height 5: Partial direct writeback: partial_4_0
- "tbz x11, #2, 181f\n"
- "st1 { v7.4s }, [x9], #0x10\n"
- "st1 { v8.4s }, [x24], #0x10\n"
- "st1 { v15.4s }, [x23], #0x10\n"
- "st1 { v16.4s }, [x22], #0x10\n"
- "st1 { v24.4s }, [x21], #0x10\n"
- "tbz x11, #1, 180f\n"
- "str d12, [x9], #0x8\n"
- "str d9, [x24], #0x8\n"
- "str d20, [x23], #0x8\n"
- "str d17, [x22], #0x8\n"
- "str d25, [x21], #0x8\n"
- "tbz x11, #0, 183f\n"
- "st1 { v12.s }[2], [x9]\n"
- "st1 { v9.s }[2], [x24]\n"
- "st1 { v20.s }[2], [x23]\n"
- "st1 { v17.s }[2], [x22]\n"
- "st1 { v25.s }[2], [x21]\n"
+ "tbz x10, #2, 181f\n"
+ "st1 { v7.4s }, [x28], #0x10\n"
+ "st1 { v8.4s }, [x23], #0x10\n"
+ "st1 { v15.4s }, [x22], #0x10\n"
+ "st1 { v16.4s }, [x21], #0x10\n"
+ "st1 { v24.4s }, [x20], #0x10\n"
+ "tbz x10, #1, 180f\n"
+ "str d12, [x28], #0x8\n"
+ "str d9, [x23], #0x8\n"
+ "str d20, [x22], #0x8\n"
+ "str d17, [x21], #0x8\n"
+ "str d25, [x20], #0x8\n"
+ "tbz x10, #0, 183f\n"
+ "st1 { v12.s }[2], [x28]\n"
+ "st1 { v9.s }[2], [x23]\n"
+ "st1 { v20.s }[2], [x22]\n"
+ "st1 { v17.s }[2], [x21]\n"
+ "st1 { v25.s }[2], [x20]\n"
"b 183f\n"
"180:" // Height 5: Partial direct writeback: partial_1_4
- "tbz x11, #0, 183f\n"
- "str s12, [x9, #0x0]\n"
- "str s9, [x24, #0x0]\n"
- "str s20, [x23, #0x0]\n"
- "str s17, [x22, #0x0]\n"
- "str s25, [x21, #0x0]\n"
+ "tbz x10, #0, 183f\n"
+ "str s12, [x28, #0x0]\n"
+ "str s9, [x23, #0x0]\n"
+ "str s20, [x22, #0x0]\n"
+ "str s17, [x21, #0x0]\n"
+ "str s25, [x20, #0x0]\n"
"b 183f\n"
"181:" // Height 5: Partial direct writeback: partial_2_0
- "tbz x11, #1, 182f\n"
- "str d7, [x9], #0x8\n"
- "str d8, [x24], #0x8\n"
- "str d15, [x23], #0x8\n"
- "str d16, [x22], #0x8\n"
- "str d24, [x21], #0x8\n"
- "tbz x11, #0, 183f\n"
- "st1 { v7.s }[2], [x9]\n"
- "st1 { v8.s }[2], [x24]\n"
- "st1 { v15.s }[2], [x23]\n"
- "st1 { v16.s }[2], [x22]\n"
- "st1 { v24.s }[2], [x21]\n"
+ "tbz x10, #1, 182f\n"
+ "str d7, [x28], #0x8\n"
+ "str d8, [x23], #0x8\n"
+ "str d15, [x22], #0x8\n"
+ "str d16, [x21], #0x8\n"
+ "str d24, [x20], #0x8\n"
+ "tbz x10, #0, 183f\n"
+ "st1 { v7.s }[2], [x28]\n"
+ "st1 { v8.s }[2], [x23]\n"
+ "st1 { v15.s }[2], [x22]\n"
+ "st1 { v16.s }[2], [x21]\n"
+ "st1 { v24.s }[2], [x20]\n"
"b 183f\n"
"182:" // Height 5: Partial direct writeback: partial_1_0
- "str s7, [x9, #0x0]\n"
- "str s8, [x24, #0x0]\n"
- "str s15, [x23, #0x0]\n"
- "str s16, [x22, #0x0]\n"
- "str s24, [x21, #0x0]\n"
+ "str s7, [x28, #0x0]\n"
+ "str s8, [x23, #0x0]\n"
+ "str s15, [x22, #0x0]\n"
+ "str s16, [x21, #0x0]\n"
+ "str s24, [x20, #0x0]\n"
"183:" // Height 5: Partial direct writeback: Done
"b 185f\n"
"184:" // Height 5: Full writeback
- "str q7, [x9, #0x0]\n"
- "str q12, [x9, #0x10]\n"
- "str q13, [x9, #0x20]\n"
- "str q14, [x9, #0x30]\n"
- "add x9, x9, #0x40\n"
- "str q8, [x24, #0x0]\n"
- "str q9, [x24, #0x10]\n"
- "str q10, [x24, #0x20]\n"
- "str q11, [x24, #0x30]\n"
- "str q15, [x23, #0x0]\n"
- "str q20, [x23, #0x10]\n"
- "str q21, [x23, #0x20]\n"
- "str q22, [x23, #0x30]\n"
- "str q16, [x22, #0x0]\n"
- "str q17, [x22, #0x10]\n"
- "str q18, [x22, #0x20]\n"
- "str q19, [x22, #0x30]\n"
- "str q24, [x21, #0x0]\n"
- "str q25, [x21, #0x10]\n"
- "str q26, [x21, #0x20]\n"
- "str q27, [x21, #0x30]\n"
+ "str q7, [x28, #0x0]\n"
+ "str q12, [x28, #0x10]\n"
+ "str q13, [x28, #0x20]\n"
+ "str q14, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
+ "str q8, [x23, #0x0]\n"
+ "str q9, [x23, #0x10]\n"
+ "str q10, [x23, #0x20]\n"
+ "str q11, [x23, #0x30]\n"
+ "str q15, [x22, #0x0]\n"
+ "str q20, [x22, #0x10]\n"
+ "str q21, [x22, #0x20]\n"
+ "str q22, [x22, #0x30]\n"
+ "str q16, [x21, #0x0]\n"
+ "str q17, [x21, #0x10]\n"
+ "str q18, [x21, #0x20]\n"
+ "str q19, [x21, #0x30]\n"
+ "str q24, [x20, #0x0]\n"
+ "str q25, [x20, #0x10]\n"
+ "str q26, [x20, #0x20]\n"
+ "str q27, [x20, #0x30]\n"
"185:" // Height 5: Writeback done
- "subs x11, x11, #0x10\n"
+ "subs x10, x10, #0x10\n"
"bgt 150b\n"
"b 224f\n"
"186:" // Height 6
- "ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x28, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"mov x20, #0x18\n"
- "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
- "mov x9, %x[output_ptr]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "madd %x[output_ptr], x19, x20, %x[output_ptr]\n"
"187:" // Height 6: Column loop
"tbz %x[flags], #0, 198f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
- "cmp x11, #0x10\n"
- "add x20, x21, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "cmp x10, #0x10\n"
+ "add x23, x28, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
+ "add x20, x21, x19, LSL #2\n"
+ "add x19, x20, x19, LSL #2\n"
"bge 196f\n"
- "tbz x11, #3, 191f\n"
- "ld1 { v9.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x24], #0x10\n"
- "ld1 { v17.4s }, [x23], #0x10\n"
- "ld1 { v20.4s }, [x22], #0x10\n"
- "ld1 { v25.4s }, [x21], #0x10\n"
- "ld1 { v28.4s }, [x20], #0x10\n"
- "ld1 { v10.4s }, [x9], #0x10\n"
- "ld1 { v13.4s }, [x24], #0x10\n"
- "ld1 { v18.4s }, [x23], #0x10\n"
- "ld1 { v21.4s }, [x22], #0x10\n"
- "ld1 { v26.4s }, [x21], #0x10\n"
- "ld1 { v29.4s }, [x20], #0x10\n"
- "tbz x11, #2, 189f\n"
- "ld1 { v11.4s }, [x9], #0x10\n"
- "ld1 { v14.4s }, [x24], #0x10\n"
- "ld1 { v19.4s }, [x23], #0x10\n"
- "ld1 { v22.4s }, [x22], #0x10\n"
- "ld1 { v27.4s }, [x21], #0x10\n"
- "ld1 { v30.4s }, [x20], #0x10\n"
- "tbz x11, #1, 188f\n"
- "ldr d16, [x9], #0x8\n"
- "ldr d15, [x24], #0x8\n"
- "mov x25, #0x38\n"
- "ldr d24, [x23], #0x8\n"
- "ldr d23, [x22], #0x8\n"
- "ldr d6, [x21], #0x8\n"
- "ldr d31, [x20], #0x8\n"
- "tbz x11, #0, 195f\n"
- "ld1 { v16.s }[2], [x9]\n"
- "ld1 { v15.s }[2], [x24]\n"
- "ld1 { v24.s }[2], [x23]\n"
- "ld1 { v23.s }[2], [x22]\n"
- "ld1 { v6.s }[2], [x21]\n"
- "ld1 { v31.s }[2], [x20]\n"
+ "tbz x10, #3, 191f\n"
+ "ld1 { v9.4s }, [x28], #0x10\n"
+ "ld1 { v12.4s }, [x23], #0x10\n"
+ "ld1 { v17.4s }, [x22], #0x10\n"
+ "ld1 { v20.4s }, [x21], #0x10\n"
+ "ld1 { v25.4s }, [x20], #0x10\n"
+ "ld1 { v10.4s }, [x28], #0x10\n"
+ "ld1 { v13.4s }, [x23], #0x10\n"
+ "ld1 { v18.4s }, [x22], #0x10\n"
+ "ld1 { v21.4s }, [x21], #0x10\n"
+ "ld1 { v26.4s }, [x20], #0x10\n"
+ "ld1 { v28.4s }, [x19], #0x10\n"
+ "ld1 { v29.4s }, [x19], #0x10\n"
+ "tbz x10, #2, 189f\n"
+ "ld1 { v11.4s }, [x28], #0x10\n"
+ "ld1 { v14.4s }, [x23], #0x10\n"
+ "ld1 { v19.4s }, [x22], #0x10\n"
+ "ld1 { v22.4s }, [x21], #0x10\n"
+ "ld1 { v27.4s }, [x20], #0x10\n"
+ "ld1 { v30.4s }, [x19], #0x10\n"
+ "tbz x10, #1, 188f\n"
+ "ldr d16, [x28], #0x8\n"
+ "mov x24, #0x38\n"
+ "ldr d15, [x23], #0x8\n"
+ "ldr d24, [x22], #0x8\n"
+ "ldr d23, [x21], #0x8\n"
+ "ldr d6, [x20], #0x8\n"
+ "ldr d31, [x19], #0x8\n"
+ "tbz x10, #0, 195f\n"
+ "ld1 { v16.s }[2], [x28]\n"
+ "ld1 { v15.s }[2], [x23]\n"
+ "ld1 { v24.s }[2], [x22]\n"
+ "ld1 { v23.s }[2], [x21]\n"
+ "ld1 { v6.s }[2], [x20]\n"
+ "ld1 { v31.s }[2], [x19]\n"
"b 195f\n"
"188:" // Height 6: Partial accumulate: partial_1_12
- "mov x25, #0x30\n"
- "tbz x11, #0, 195f\n"
- "ldr s16, [x9, #0x0]\n"
- "ldr s15, [x24, #0x0]\n"
- "ldr s24, [x23, #0x0]\n"
- "ldr s23, [x22, #0x0]\n"
- "ldr s6, [x21, #0x0]\n"
- "ldr s31, [x20, #0x0]\n"
+ "mov x24, #0x30\n"
+ "tbz x10, #0, 195f\n"
+ "ldr s16, [x28, #0x0]\n"
+ "ldr s15, [x23, #0x0]\n"
+ "ldr s24, [x22, #0x0]\n"
+ "ldr s23, [x21, #0x0]\n"
+ "ldr s6, [x20, #0x0]\n"
+ "ldr s31, [x19, #0x0]\n"
"b 195f\n"
"189:" // Height 6: Partial accumulate: partial_2_8
- "tbz x11, #1, 190f\n"
- "ldr d11, [x9], #0x8\n"
- "ldr d14, [x24], #0x8\n"
- "mov x25, #0x28\n"
- "ldr d19, [x23], #0x8\n"
- "ldr d22, [x22], #0x8\n"
- "ldr d27, [x21], #0x8\n"
- "ldr d30, [x20], #0x8\n"
- "tbz x11, #0, 195f\n"
- "ld1 { v11.s }[2], [x9]\n"
- "ld1 { v14.s }[2], [x24]\n"
- "ld1 { v19.s }[2], [x23]\n"
- "ld1 { v22.s }[2], [x22]\n"
- "ld1 { v27.s }[2], [x21]\n"
- "ld1 { v30.s }[2], [x20]\n"
+ "tbz x10, #1, 190f\n"
+ "ldr d11, [x28], #0x8\n"
+ "ldr d14, [x23], #0x8\n"
+ "mov x24, #0x28\n"
+ "ldr d19, [x22], #0x8\n"
+ "ldr d22, [x21], #0x8\n"
+ "ldr d27, [x20], #0x8\n"
+ "ldr d30, [x19], #0x8\n"
+ "tbz x10, #0, 195f\n"
+ "ld1 { v11.s }[2], [x28]\n"
+ "ld1 { v14.s }[2], [x23]\n"
+ "ld1 { v19.s }[2], [x22]\n"
+ "ld1 { v22.s }[2], [x21]\n"
+ "ld1 { v27.s }[2], [x20]\n"
+ "ld1 { v30.s }[2], [x19]\n"
"b 195f\n"
"190:" // Height 6: Partial accumulate: partial_1_8
- "mov x25, #0x20\n"
- "tbz x11, #0, 195f\n"
- "ldr s11, [x9, #0x0]\n"
- "ldr s14, [x24, #0x0]\n"
- "ldr s19, [x23, #0x0]\n"
- "ldr s22, [x22, #0x0]\n"
- "ldr s27, [x21, #0x0]\n"
- "ldr s30, [x20, #0x0]\n"
+ "mov x24, #0x20\n"
+ "tbz x10, #0, 195f\n"
+ "ldr s11, [x28, #0x0]\n"
+ "ldr s14, [x23, #0x0]\n"
+ "ldr s19, [x22, #0x0]\n"
+ "ldr s22, [x21, #0x0]\n"
+ "ldr s27, [x20, #0x0]\n"
+ "ldr s30, [x19, #0x0]\n"
"b 195f\n"
"191:" // Height 6: Partial accumulate: partial_4_0
- "tbz x11, #2, 193f\n"
- "ld1 { v9.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x24], #0x10\n"
- "ld1 { v17.4s }, [x23], #0x10\n"
- "ld1 { v20.4s }, [x22], #0x10\n"
- "ld1 { v25.4s }, [x21], #0x10\n"
- "ld1 { v28.4s }, [x20], #0x10\n"
- "tbz x11, #1, 192f\n"
- "ldr d10, [x9], #0x8\n"
- "ldr d13, [x24], #0x8\n"
- "mov x25, #0x18\n"
- "ldr d18, [x23], #0x8\n"
- "ldr d21, [x22], #0x8\n"
- "ldr d26, [x21], #0x8\n"
- "ldr d29, [x20], #0x8\n"
- "tbz x11, #0, 195f\n"
- "ld1 { v10.s }[2], [x9]\n"
- "ld1 { v13.s }[2], [x24]\n"
- "ld1 { v18.s }[2], [x23]\n"
- "ld1 { v21.s }[2], [x22]\n"
- "ld1 { v26.s }[2], [x21]\n"
- "ld1 { v29.s }[2], [x20]\n"
+ "tbz x10, #2, 193f\n"
+ "ld1 { v9.4s }, [x28], #0x10\n"
+ "ld1 { v12.4s }, [x23], #0x10\n"
+ "ld1 { v17.4s }, [x22], #0x10\n"
+ "ld1 { v20.4s }, [x21], #0x10\n"
+ "ld1 { v25.4s }, [x20], #0x10\n"
+ "ld1 { v28.4s }, [x19], #0x10\n"
+ "tbz x10, #1, 192f\n"
+ "ldr d10, [x28], #0x8\n"
+ "mov x24, #0x18\n"
+ "ldr d13, [x23], #0x8\n"
+ "ldr d18, [x22], #0x8\n"
+ "ldr d21, [x21], #0x8\n"
+ "ldr d26, [x20], #0x8\n"
+ "ldr d29, [x19], #0x8\n"
+ "tbz x10, #0, 195f\n"
+ "ld1 { v10.s }[2], [x28]\n"
+ "ld1 { v13.s }[2], [x23]\n"
+ "ld1 { v18.s }[2], [x22]\n"
+ "ld1 { v21.s }[2], [x21]\n"
+ "ld1 { v26.s }[2], [x20]\n"
+ "ld1 { v29.s }[2], [x19]\n"
"b 195f\n"
"192:" // Height 6: Partial accumulate: partial_1_4
- "mov x25, #0x10\n"
- "tbz x11, #0, 195f\n"
- "ldr s10, [x9, #0x0]\n"
- "ldr s13, [x24, #0x0]\n"
- "ldr s18, [x23, #0x0]\n"
- "ldr s21, [x22, #0x0]\n"
- "ldr s26, [x21, #0x0]\n"
- "ldr s29, [x20, #0x0]\n"
+ "mov x24, #0x10\n"
+ "tbz x10, #0, 195f\n"
+ "ldr s10, [x28, #0x0]\n"
+ "ldr s13, [x23, #0x0]\n"
+ "ldr s18, [x22, #0x0]\n"
+ "ldr s21, [x21, #0x0]\n"
+ "ldr s26, [x20, #0x0]\n"
+ "ldr s29, [x19, #0x0]\n"
"b 195f\n"
"193:" // Height 6: Partial accumulate: partial_2_0
- "tbz x11, #1, 194f\n"
- "ldr d9, [x9], #0x8\n"
- "ldr d12, [x24], #0x8\n"
- "mov x25, #0x8\n"
- "ldr d17, [x23], #0x8\n"
- "ldr d20, [x22], #0x8\n"
- "ldr d25, [x21], #0x8\n"
- "ldr d28, [x20], #0x8\n"
- "tbz x11, #0, 195f\n"
- "ld1 { v9.s }[2], [x9]\n"
- "ld1 { v12.s }[2], [x24]\n"
- "ld1 { v17.s }[2], [x23]\n"
- "ld1 { v20.s }[2], [x22]\n"
- "ld1 { v25.s }[2], [x21]\n"
- "ld1 { v28.s }[2], [x20]\n"
+ "tbz x10, #1, 194f\n"
+ "ldr d9, [x28], #0x8\n"
+ "ldr d12, [x23], #0x8\n"
+ "mov x24, #0x8\n"
+ "ldr d17, [x22], #0x8\n"
+ "ldr d20, [x21], #0x8\n"
+ "ldr d25, [x20], #0x8\n"
+ "ldr d28, [x19], #0x8\n"
+ "tbz x10, #0, 195f\n"
+ "ld1 { v9.s }[2], [x28]\n"
+ "ld1 { v12.s }[2], [x23]\n"
+ "ld1 { v17.s }[2], [x22]\n"
+ "ld1 { v20.s }[2], [x21]\n"
+ "ld1 { v25.s }[2], [x20]\n"
+ "ld1 { v28.s }[2], [x19]\n"
"b 195f\n"
"194:" // Height 6: Partial accumulate: partial_1_0
- "ldr s9, [x9, #0x0]\n"
- "ldr s12, [x24, #0x0]\n"
- "mov x25, #0x0\n"
- "ldr s17, [x23, #0x0]\n"
- "ldr s20, [x22, #0x0]\n"
- "ldr s25, [x21, #0x0]\n"
- "ldr s28, [x20, #0x0]\n"
+ "ldr s9, [x28, #0x0]\n"
+ "mov x24, #0x0\n"
+ "ldr s12, [x23, #0x0]\n"
+ "ldr s17, [x22, #0x0]\n"
+ "ldr s20, [x21, #0x0]\n"
+ "ldr s25, [x20, #0x0]\n"
+ "ldr s28, [x19, #0x0]\n"
"195:" // Height 6: Partial accumulate: Done
- "sub x9, x9, x25\n"
+ "sub x28, x28, x24\n"
"b 197f\n"
"196:" // Height 6: full accumulate
- "ldr q9, [x9, #0x0]\n"
- "ldr q10, [x9, #0x10]\n"
- "ldr q11, [x9, #0x20]\n"
- "ldr q16, [x9, #0x30]\n"
- "ldr q12, [x24, #0x0]\n"
- "ldr q13, [x24, #0x10]\n"
- "ldr q14, [x24, #0x20]\n"
- "ldr q15, [x24, #0x30]\n"
- "ldr q17, [x23, #0x0]\n"
- "ldr q18, [x23, #0x10]\n"
- "ldr q19, [x23, #0x20]\n"
- "ldr q24, [x23, #0x30]\n"
- "ldr q20, [x22, #0x0]\n"
- "ldr q21, [x22, #0x10]\n"
- "ldr q22, [x22, #0x20]\n"
- "ldr q23, [x22, #0x30]\n"
- "ldr q25, [x21, #0x0]\n"
- "ldr q26, [x21, #0x10]\n"
- "ldr q27, [x21, #0x20]\n"
- "ldr q6, [x21, #0x30]\n"
- "ldr q28, [x20, #0x0]\n"
- "ldr q29, [x20, #0x10]\n"
- "ldr q30, [x20, #0x20]\n"
- "ldr q31, [x20, #0x30]\n"
+ "ldr q9, [x28, #0x0]\n"
+ "ldr q10, [x28, #0x10]\n"
+ "ldr q11, [x28, #0x20]\n"
+ "ldr q16, [x28, #0x30]\n"
+ "ldr q12, [x23, #0x0]\n"
+ "ldr q13, [x23, #0x10]\n"
+ "ldr q14, [x23, #0x20]\n"
+ "ldr q15, [x23, #0x30]\n"
+ "ldr q17, [x22, #0x0]\n"
+ "ldr q18, [x22, #0x10]\n"
+ "ldr q19, [x22, #0x20]\n"
+ "ldr q24, [x22, #0x30]\n"
+ "ldr q20, [x21, #0x0]\n"
+ "ldr q21, [x21, #0x10]\n"
+ "ldr q22, [x21, #0x20]\n"
+ "ldr q23, [x21, #0x30]\n"
+ "ldr q25, [x20, #0x0]\n"
+ "ldr q26, [x20, #0x10]\n"
+ "ldr q27, [x20, #0x20]\n"
+ "ldr q6, [x20, #0x30]\n"
+ "ldr q28, [x19, #0x0]\n"
+ "ldr q29, [x19, #0x10]\n"
+ "ldr q30, [x19, #0x20]\n"
+ "ldr q31, [x19, #0x30]\n"
"197:" // Height 6: MMLA fixup
"zip1 v8.2d, v9.2d, v12.2d\n"
"zip2 v12.2d, v9.2d, v12.2d\n"
@@ -2868,219 +2880,219 @@ void a64_hybrid_s8s32_mmla_6x16 (
"movi v30.4s, #0x0\n"
"movi v31.4s, #0x0\n"
"199:" // Height 6: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"200:" // Height 6: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 201f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "ldr x22, [x21, #0x20]\n"
- "ldr x21, [x21, #0x28]\n"
- "cbnz x28, 202f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20\n"
- "add x25, x25, x20\n"
- "add x24, x24, x20\n"
- "add x23, x23, x20\n"
- "add x22, x22, x20\n"
- "add x21, x21, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "ldr x21, [x20, #0x20]\n"
+ "ldr x20, [x20, #0x28]\n"
+ "cbnz x27, 202f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
+ "add x24, x24, x19\n"
+ "add x23, x23, x19\n"
+ "add x22, x22, x19\n"
+ "add x21, x21, x19\n"
+ "add x20, x20, x19\n"
"b 202f\n"
"201:" // Height 6: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20\n"
- "add x24, x25, x20\n"
- "add x23, x24, x20\n"
- "add x22, x23, x20\n"
- "add x21, x22, x20\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19\n"
+ "add x23, x24, x19\n"
+ "add x22, x23, x19\n"
+ "add x21, x22, x19\n"
+ "add x20, x21, x19\n"
"202:" // Height 6: input setup done
- "cmp x27, #0x10\n"
+ "cmp x26, #0x10\n"
"blt 205f\n"
- "ldr q1, [x26, #0x0]\n"
- "ldr q2, [x25, #0x0]\n"
- "cmp x27, #0x20\n"
- "ldr q3, [x24, #0x0]\n"
- "ldr q4, [x23, #0x0]\n"
- "ldr q5, [x22, #0x0]\n"
- "ldr q6, [x21, #0x0]\n"
- "ldr q7, [x10, #0x0]\n"
+ "ldr q1, [x25, #0x0]\n"
+ "ldr q2, [x24, #0x0]\n"
+ "cmp x26, #0x20\n"
"blt 204f\n"
"203:" // Height 6: Multiply loop: Main loop head
"trn1 v0.2d, v1.2d, v2.2d\n"
+ "ldr q3, [x23, #0x0]\n"
+ "add x25, x25, #0x10\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
- ".inst 0x4e87a408 // smmla v8.4s, v0.16b, v7.16b\n"
- "sub x27, x27, #0x10\n"
+ "ldr q4, [x22, #0x0]\n"
+ "add x24, x24, #0x10\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
+ "ldr q5, [x21, #0x0]\n"
+ "add x23, x23, #0x10\n"
"trn2 v3.2d, v3.2d, v4.2d\n"
- ".inst 0x4e87a450 // smmla v16.4s, v2.16b, v7.16b\n"
- "add x26, x26, #0x10\n"
+ "ldr q6, [x20, #0x0]\n"
+ "add x22, x22, #0x10\n"
"trn1 v4.2d, v5.2d, v6.2d\n"
+ "ldr q7, [x9, #0x0]\n"
+ "add x21, x21, #0x10\n"
"trn2 v5.2d, v5.2d, v6.2d\n"
- "ldr q6, [x10, #0x10]\n"
+ "ldr q6, [x9, #0x10]\n"
+ "add x20, x20, #0x10\n"
+ ".inst 0x4e87a408 // smmla v8.4s, v0.16b, v7.16b\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "sub x26, x26, #0x10\n"
+ ".inst 0x4e87a450 // smmla v16.4s, v2.16b, v7.16b\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "cmp x26, #0x20\n"
".inst 0x4e87a498 // smmla v24.4s, v4.16b, v7.16b\n"
- "ldr q7, [x10, #0x20]\n"
+ "ldr q7, [x9, #0x20]\n"
".inst 0x4e86a40c // smmla v12.4s, v0.16b, v6.16b\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x4e86a454 // smmla v20.4s, v2.16b, v6.16b\n"
- "add x25, x25, #0x10\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x4e86a49c // smmla v28.4s, v4.16b, v6.16b\n"
- "ldr q6, [x10, #0x30]\n"
+ "ldr q6, [x9, #0x30]\n"
+ "prfm pldl1keep, [x21, #0x80]\n"
".inst 0x4e87a409 // smmla v9.4s, v0.16b, v7.16b\n"
- "add x24, x24, #0x10\n"
".inst 0x4e87a451 // smmla v17.4s, v2.16b, v7.16b\n"
+ "prfm pldl1keep, [x20, #0x80]\n"
".inst 0x4e87a499 // smmla v25.4s, v4.16b, v7.16b\n"
- "ldr q7, [x10, #0x40]\n"
- "add x23, x23, #0x10\n"
+ "ldr q7, [x9, #0x40]\n"
".inst 0x4e86a40d // smmla v13.4s, v0.16b, v6.16b\n"
".inst 0x4e86a455 // smmla v21.4s, v2.16b, v6.16b\n"
- "add x22, x22, #0x10\n"
- "add x21, x21, #0x10\n"
".inst 0x4e86a49d // smmla v29.4s, v4.16b, v6.16b\n"
- "ldr q6, [x10, #0x50]\n"
+ "ldr q6, [x9, #0x50]\n"
".inst 0x4e87a40a // smmla v10.4s, v0.16b, v7.16b\n"
- "cmp x27, #0x20\n"
".inst 0x4e87a452 // smmla v18.4s, v2.16b, v7.16b\n"
".inst 0x4e87a49a // smmla v26.4s, v4.16b, v7.16b\n"
- "ldr q7, [x10, #0x60]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
+ "ldr q7, [x9, #0x60]\n"
".inst 0x4e86a40e // smmla v14.4s, v0.16b, v6.16b\n"
".inst 0x4e86a456 // smmla v22.4s, v2.16b, v6.16b\n"
- "prfm pldl1keep, [x25, #0x80]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x4e86a49e // smmla v30.4s, v4.16b, v6.16b\n"
- "ldr q6, [x10, #0x70]\n"
+ "ldr q6, [x9, #0x70]\n"
".inst 0x4e87a40b // smmla v11.4s, v0.16b, v7.16b\n"
- "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x4e87a453 // smmla v19.4s, v2.16b, v7.16b\n"
".inst 0x4e87a49b // smmla v27.4s, v4.16b, v7.16b\n"
- "ldr q7, [x10, #0x80]\n"
- "prfm pldl1keep, [x22, #0x80]\n"
+ "ldr q7, [x9, #0x80]\n"
".inst 0x4e86a40f // smmla v15.4s, v0.16b, v6.16b\n"
".inst 0x4e86a457 // smmla v23.4s, v2.16b, v6.16b\n"
- "ldr q2, [x25, #0x0]\n"
- "prfm pldl1keep, [x21, #0x80]\n"
+ "ldr q2, [x24, #0x0]\n"
".inst 0x4e86a49f // smmla v31.4s, v4.16b, v6.16b\n"
- "ldr q6, [x10, #0x90]\n"
- "ldr q4, [x23, #0x0]\n"
+ "ldr q6, [x9, #0x90]\n"
".inst 0x4e87a428 // smmla v8.4s, v1.16b, v7.16b\n"
".inst 0x4e87a470 // smmla v16.4s, v3.16b, v7.16b\n"
".inst 0x4e87a4b8 // smmla v24.4s, v5.16b, v7.16b\n"
- "ldr q7, [x10, #0xa0]\n"
+ "ldr q7, [x9, #0xa0]\n"
".inst 0x4e86a42c // smmla v12.4s, v1.16b, v6.16b\n"
".inst 0x4e86a474 // smmla v20.4s, v3.16b, v6.16b\n"
".inst 0x4e86a4bc // smmla v28.4s, v5.16b, v6.16b\n"
- "ldr q6, [x10, #0xb0]\n"
+ "ldr q6, [x9, #0xb0]\n"
".inst 0x4e87a429 // smmla v9.4s, v1.16b, v7.16b\n"
".inst 0x4e87a471 // smmla v17.4s, v3.16b, v7.16b\n"
".inst 0x4e87a4b9 // smmla v25.4s, v5.16b, v7.16b\n"
- "ldr q7, [x10, #0xc0]\n"
+ "ldr q7, [x9, #0xc0]\n"
".inst 0x4e86a42d // smmla v13.4s, v1.16b, v6.16b\n"
".inst 0x4e86a475 // smmla v21.4s, v3.16b, v6.16b\n"
".inst 0x4e86a4bd // smmla v29.4s, v5.16b, v6.16b\n"
- "ldr q6, [x10, #0xd0]\n"
+ "ldr q6, [x9, #0xd0]\n"
".inst 0x4e87a42a // smmla v10.4s, v1.16b, v7.16b\n"
".inst 0x4e87a472 // smmla v18.4s, v3.16b, v7.16b\n"
".inst 0x4e87a4ba // smmla v26.4s, v5.16b, v7.16b\n"
- "ldr q7, [x10, #0xe0]\n"
+ "ldr q7, [x9, #0xe0]\n"
".inst 0x4e86a42e // smmla v14.4s, v1.16b, v6.16b\n"
".inst 0x4e86a476 // smmla v22.4s, v3.16b, v6.16b\n"
".inst 0x4e86a4be // smmla v30.4s, v5.16b, v6.16b\n"
- "ldr q6, [x10, #0xf0]\n"
- "add x10, x10, #0x100\n"
+ "ldr q6, [x9, #0xf0]\n"
+ "add x9, x9, #0x100\n"
".inst 0x4e87a42b // smmla v11.4s, v1.16b, v7.16b\n"
".inst 0x4e87a473 // smmla v19.4s, v3.16b, v7.16b\n"
".inst 0x4e87a4bb // smmla v27.4s, v5.16b, v7.16b\n"
- "ldr q7, [x10, #0x0]\n"
".inst 0x4e86a42f // smmla v15.4s, v1.16b, v6.16b\n"
- "ldr q1, [x26, #0x0]\n"
+ "ldr q1, [x25, #0x0]\n"
".inst 0x4e86a477 // smmla v23.4s, v3.16b, v6.16b\n"
- "ldr q3, [x24, #0x0]\n"
".inst 0x4e86a4bf // smmla v31.4s, v5.16b, v6.16b\n"
- "ldr q5, [x22, #0x0]\n"
- "ldr q6, [x21, #0x0]\n"
"bge 203b\n"
"204:" // Height 6: Multiply loop: Single iteration only
"trn1 v0.2d, v1.2d, v2.2d\n"
+ "ldr q3, [x23, #0x0]\n"
+ "sub x26, x26, #0x10\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
- ".inst 0x4e87a408 // smmla v8.4s, v0.16b, v7.16b\n"
- "add x26, x26, #0x10\n"
+ "ldr q4, [x22, #0x0]\n"
+ "add x25, x25, #0x10\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
+ "ldr q5, [x21, #0x0]\n"
+ "add x24, x24, #0x10\n"
"trn2 v3.2d, v3.2d, v4.2d\n"
- ".inst 0x4e87a450 // smmla v16.4s, v2.16b, v7.16b\n"
- "add x25, x25, #0x10\n"
+ "ldr q6, [x20, #0x0]\n"
+ "add x23, x23, #0x10\n"
"trn1 v4.2d, v5.2d, v6.2d\n"
+ "ldr q7, [x9, #0x0]\n"
+ "add x22, x22, #0x10\n"
"trn2 v5.2d, v5.2d, v6.2d\n"
- "ldr q6, [x10, #0x10]\n"
+ "ldr q6, [x9, #0x10]\n"
+ "add x21, x21, #0x10\n"
+ ".inst 0x4e87a408 // smmla v8.4s, v0.16b, v7.16b\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "add x20, x20, #0x10\n"
+ ".inst 0x4e87a450 // smmla v16.4s, v2.16b, v7.16b\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x4e87a498 // smmla v24.4s, v4.16b, v7.16b\n"
- "ldr q7, [x10, #0x20]\n"
+ "ldr q7, [x9, #0x20]\n"
".inst 0x4e86a40c // smmla v12.4s, v0.16b, v6.16b\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x4e86a454 // smmla v20.4s, v2.16b, v6.16b\n"
- "add x24, x24, #0x10\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x4e86a49c // smmla v28.4s, v4.16b, v6.16b\n"
- "ldr q6, [x10, #0x30]\n"
+ "ldr q6, [x9, #0x30]\n"
+ "prfm pldl1keep, [x21, #0x80]\n"
".inst 0x4e87a409 // smmla v9.4s, v0.16b, v7.16b\n"
- "add x23, x23, #0x10\n"
".inst 0x4e87a451 // smmla v17.4s, v2.16b, v7.16b\n"
+ "prfm pldl1keep, [x20, #0x80]\n"
".inst 0x4e87a499 // smmla v25.4s, v4.16b, v7.16b\n"
- "ldr q7, [x10, #0x40]\n"
- "add x22, x22, #0x10\n"
+ "ldr q7, [x9, #0x40]\n"
".inst 0x4e86a40d // smmla v13.4s, v0.16b, v6.16b\n"
".inst 0x4e86a455 // smmla v21.4s, v2.16b, v6.16b\n"
- "add x21, x21, #0x10\n"
- "sub x27, x27, #0x10\n"
".inst 0x4e86a49d // smmla v29.4s, v4.16b, v6.16b\n"
- "ldr q6, [x10, #0x50]\n"
+ "ldr q6, [x9, #0x50]\n"
".inst 0x4e87a40a // smmla v10.4s, v0.16b, v7.16b\n"
- "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x4e87a452 // smmla v18.4s, v2.16b, v7.16b\n"
".inst 0x4e87a49a // smmla v26.4s, v4.16b, v7.16b\n"
- "ldr q7, [x10, #0x60]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
+ "ldr q7, [x9, #0x60]\n"
".inst 0x4e86a40e // smmla v14.4s, v0.16b, v6.16b\n"
".inst 0x4e86a456 // smmla v22.4s, v2.16b, v6.16b\n"
- "prfm pldl1keep, [x24, #0x80]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x4e86a49e // smmla v30.4s, v4.16b, v6.16b\n"
- "ldr q6, [x10, #0x70]\n"
+ "ldr q6, [x9, #0x70]\n"
".inst 0x4e87a40b // smmla v11.4s, v0.16b, v7.16b\n"
- "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x4e87a453 // smmla v19.4s, v2.16b, v7.16b\n"
".inst 0x4e87a49b // smmla v27.4s, v4.16b, v7.16b\n"
- "ldr q7, [x10, #0x80]\n"
- "prfm pldl1keep, [x21, #0x80]\n"
+ "ldr q7, [x9, #0x80]\n"
".inst 0x4e86a40f // smmla v15.4s, v0.16b, v6.16b\n"
".inst 0x4e86a457 // smmla v23.4s, v2.16b, v6.16b\n"
".inst 0x4e86a49f // smmla v31.4s, v4.16b, v6.16b\n"
- "ldr q6, [x10, #0x90]\n"
+ "ldr q6, [x9, #0x90]\n"
".inst 0x4e87a428 // smmla v8.4s, v1.16b, v7.16b\n"
".inst 0x4e87a470 // smmla v16.4s, v3.16b, v7.16b\n"
".inst 0x4e87a4b8 // smmla v24.4s, v5.16b, v7.16b\n"
- "ldr q7, [x10, #0xa0]\n"
+ "ldr q7, [x9, #0xa0]\n"
".inst 0x4e86a42c // smmla v12.4s, v1.16b, v6.16b\n"
".inst 0x4e86a474 // smmla v20.4s, v3.16b, v6.16b\n"
".inst 0x4e86a4bc // smmla v28.4s, v5.16b, v6.16b\n"
- "ldr q6, [x10, #0xb0]\n"
+ "ldr q6, [x9, #0xb0]\n"
".inst 0x4e87a429 // smmla v9.4s, v1.16b, v7.16b\n"
".inst 0x4e87a471 // smmla v17.4s, v3.16b, v7.16b\n"
".inst 0x4e87a4b9 // smmla v25.4s, v5.16b, v7.16b\n"
- "ldr q7, [x10, #0xc0]\n"
+ "ldr q7, [x9, #0xc0]\n"
".inst 0x4e86a42d // smmla v13.4s, v1.16b, v6.16b\n"
".inst 0x4e86a475 // smmla v21.4s, v3.16b, v6.16b\n"
".inst 0x4e86a4bd // smmla v29.4s, v5.16b, v6.16b\n"
- "ldr q6, [x10, #0xd0]\n"
+ "ldr q6, [x9, #0xd0]\n"
".inst 0x4e87a42a // smmla v10.4s, v1.16b, v7.16b\n"
".inst 0x4e87a472 // smmla v18.4s, v3.16b, v7.16b\n"
".inst 0x4e87a4ba // smmla v26.4s, v5.16b, v7.16b\n"
- "ldr q7, [x10, #0xe0]\n"
+ "ldr q7, [x9, #0xe0]\n"
".inst 0x4e86a42e // smmla v14.4s, v1.16b, v6.16b\n"
".inst 0x4e86a476 // smmla v22.4s, v3.16b, v6.16b\n"
".inst 0x4e86a4be // smmla v30.4s, v5.16b, v6.16b\n"
- "ldr q6, [x10, #0xf0]\n"
- "add x10, x10, #0x100\n"
+ "ldr q6, [x9, #0xf0]\n"
+ "add x9, x9, #0x100\n"
".inst 0x4e87a42b // smmla v11.4s, v1.16b, v7.16b\n"
".inst 0x4e87a473 // smmla v19.4s, v3.16b, v7.16b\n"
".inst 0x4e87a4bb // smmla v27.4s, v5.16b, v7.16b\n"
@@ -3088,48 +3100,48 @@ void a64_hybrid_s8s32_mmla_6x16 (
".inst 0x4e86a477 // smmla v23.4s, v3.16b, v6.16b\n"
".inst 0x4e86a4bf // smmla v31.4s, v5.16b, v6.16b\n"
"205:" // Height 6: Multiply loop: Main loop skip
- "cbz x27, 212f\n"
- "cmp x27, #0x8\n"
+ "cbz x26, 212f\n"
+ "cmp x26, #0x8\n"
"blt 207f\n"
"206:" // Height 6: Multiply loop: Odd block loop
- "ldr d1, [x26], #0x8\n"
- "ldr d2, [x25], #0x8\n"
+ "ldr d1, [x25], #0x8\n"
+ "sub x26, x26, #0x8\n"
+ "ldr d2, [x24], #0x8\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
- "sub x27, x27, #0x8\n"
- "ldr d3, [x24], #0x8\n"
- "ldr d4, [x23], #0x8\n"
+ "ldr d3, [x23], #0x8\n"
+ "cmp x26, #0x8\n"
+ "ldr d4, [x22], #0x8\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
- "cmp x27, #0x8\n"
- "ldr d5, [x22], #0x8\n"
- "ldr d7, [x21], #0x8\n"
+ "ldr d5, [x21], #0x8\n"
+ "ldr d7, [x20], #0x8\n"
"trn1 v4.2d, v5.2d, v7.2d\n"
- "ldr q6, [x10, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
+ "ldr q6, [x9, #0x0]\n"
+ "ldr q7, [x9, #0x10]\n"
".inst 0x4e86a408 // smmla v8.4s, v0.16b, v6.16b\n"
".inst 0x4e86a450 // smmla v16.4s, v2.16b, v6.16b\n"
".inst 0x4e86a498 // smmla v24.4s, v4.16b, v6.16b\n"
- "ldr q6, [x10, #0x20]\n"
+ "ldr q6, [x9, #0x20]\n"
".inst 0x4e87a40c // smmla v12.4s, v0.16b, v7.16b\n"
".inst 0x4e87a454 // smmla v20.4s, v2.16b, v7.16b\n"
".inst 0x4e87a49c // smmla v28.4s, v4.16b, v7.16b\n"
- "ldr q7, [x10, #0x30]\n"
+ "ldr q7, [x9, #0x30]\n"
".inst 0x4e86a409 // smmla v9.4s, v0.16b, v6.16b\n"
".inst 0x4e86a451 // smmla v17.4s, v2.16b, v6.16b\n"
".inst 0x4e86a499 // smmla v25.4s, v4.16b, v6.16b\n"
- "ldr q6, [x10, #0x40]\n"
+ "ldr q6, [x9, #0x40]\n"
".inst 0x4e87a40d // smmla v13.4s, v0.16b, v7.16b\n"
".inst 0x4e87a455 // smmla v21.4s, v2.16b, v7.16b\n"
".inst 0x4e87a49d // smmla v29.4s, v4.16b, v7.16b\n"
- "ldr q7, [x10, #0x50]\n"
+ "ldr q7, [x9, #0x50]\n"
".inst 0x4e86a40a // smmla v10.4s, v0.16b, v6.16b\n"
".inst 0x4e86a452 // smmla v18.4s, v2.16b, v6.16b\n"
".inst 0x4e86a49a // smmla v26.4s, v4.16b, v6.16b\n"
- "ldr q6, [x10, #0x60]\n"
+ "ldr q6, [x9, #0x60]\n"
".inst 0x4e87a40e // smmla v14.4s, v0.16b, v7.16b\n"
".inst 0x4e87a456 // smmla v22.4s, v2.16b, v7.16b\n"
".inst 0x4e87a49e // smmla v30.4s, v4.16b, v7.16b\n"
- "ldr q7, [x10, #0x70]\n"
- "add x10, x10, #0x80\n"
+ "ldr q7, [x9, #0x70]\n"
+ "add x9, x9, #0x80\n"
".inst 0x4e86a40b // smmla v11.4s, v0.16b, v6.16b\n"
".inst 0x4e86a453 // smmla v19.4s, v2.16b, v6.16b\n"
".inst 0x4e86a49b // smmla v27.4s, v4.16b, v6.16b\n"
@@ -3137,128 +3149,128 @@ void a64_hybrid_s8s32_mmla_6x16 (
".inst 0x4e87a457 // smmla v23.4s, v2.16b, v7.16b\n"
".inst 0x4e87a49f // smmla v31.4s, v4.16b, v7.16b\n"
"bge 206b\n"
+ "cbz x26, 212f\n"
"207:" // Height 6: Multiply loop: Skip odd blocks
- "cbz x27, 212f\n"
- "tbz x27, #2, 209f\n"
- "ldr s1, [x26], #0x4\n"
- "ldr s2, [x25], #0x4\n"
- "ldr s3, [x24], #0x4\n"
- "ldr s4, [x23], #0x4\n"
- "ldr s5, [x22], #0x4\n"
- "ldr s6, [x21], #0x4\n"
- "tbz x27, #1, 208f\n"
- "ld1 { v1.h }[2], [x26], #0x2\n"
- "ld1 { v2.h }[2], [x25], #0x2\n"
- "ld1 { v3.h }[2], [x24], #0x2\n"
- "ld1 { v4.h }[2], [x23], #0x2\n"
- "ld1 { v5.h }[2], [x22], #0x2\n"
- "ld1 { v6.h }[2], [x21], #0x2\n"
- "tbz x27, #0, 211f\n"
- "ld1 { v1.b }[6], [x26]\n"
- "ld1 { v2.b }[6], [x25]\n"
- "ld1 { v3.b }[6], [x24]\n"
- "ld1 { v4.b }[6], [x23]\n"
- "ld1 { v5.b }[6], [x22]\n"
- "ld1 { v6.b }[6], [x21]\n"
+ "tbz x26, #2, 209f\n"
+ "ldr s1, [x25], #0x4\n"
+ "ldr s2, [x24], #0x4\n"
+ "ldr s3, [x23], #0x4\n"
+ "ldr s4, [x22], #0x4\n"
+ "ldr s5, [x21], #0x4\n"
+ "ldr s6, [x20], #0x4\n"
+ "tbz x26, #1, 208f\n"
+ "ld1 { v1.h }[2], [x25], #0x2\n"
+ "ld1 { v2.h }[2], [x24], #0x2\n"
+ "ld1 { v3.h }[2], [x23], #0x2\n"
+ "ld1 { v4.h }[2], [x22], #0x2\n"
+ "ld1 { v5.h }[2], [x21], #0x2\n"
+ "ld1 { v6.h }[2], [x20], #0x2\n"
+ "tbz x26, #0, 211f\n"
+ "ld1 { v1.b }[6], [x25]\n"
+ "ld1 { v2.b }[6], [x24]\n"
+ "ld1 { v3.b }[6], [x23]\n"
+ "ld1 { v4.b }[6], [x22]\n"
+ "ld1 { v5.b }[6], [x21]\n"
+ "ld1 { v6.b }[6], [x20]\n"
"b 211f\n"
"208:" // Height 6: Multiply loop: Ragged operand read: partial_1_4
- "tbz x27, #0, 211f\n"
- "ld1 { v1.b }[4], [x26]\n"
- "ld1 { v2.b }[4], [x25]\n"
- "ld1 { v3.b }[4], [x24]\n"
- "ld1 { v4.b }[4], [x23]\n"
- "ld1 { v5.b }[4], [x22]\n"
- "ld1 { v6.b }[4], [x21]\n"
+ "tbz x26, #0, 211f\n"
+ "ld1 { v1.b }[4], [x25]\n"
+ "ld1 { v2.b }[4], [x24]\n"
+ "ld1 { v3.b }[4], [x23]\n"
+ "ld1 { v4.b }[4], [x22]\n"
+ "ld1 { v5.b }[4], [x21]\n"
+ "ld1 { v6.b }[4], [x20]\n"
"b 211f\n"
"209:" // Height 6: Multiply loop: Ragged operand read: partial_2_0
- "tbz x27, #1, 210f\n"
- "ldr h1, [x26], #0x2\n"
- "ldr h2, [x25], #0x2\n"
- "ldr h3, [x24], #0x2\n"
- "ldr h4, [x23], #0x2\n"
- "ldr h5, [x22], #0x2\n"
- "ldr h6, [x21], #0x2\n"
- "tbz x27, #0, 211f\n"
- "ld1 { v1.b }[2], [x26]\n"
- "ld1 { v2.b }[2], [x25]\n"
- "ld1 { v3.b }[2], [x24]\n"
- "ld1 { v4.b }[2], [x23]\n"
- "ld1 { v5.b }[2], [x22]\n"
- "ld1 { v6.b }[2], [x21]\n"
+ "tbz x26, #1, 210f\n"
+ "ldr h1, [x25], #0x2\n"
+ "ldr h2, [x24], #0x2\n"
+ "ldr h3, [x23], #0x2\n"
+ "ldr h4, [x22], #0x2\n"
+ "ldr h5, [x21], #0x2\n"
+ "ldr h6, [x20], #0x2\n"
+ "tbz x26, #0, 211f\n"
+ "ld1 { v1.b }[2], [x25]\n"
+ "ld1 { v2.b }[2], [x24]\n"
+ "ld1 { v3.b }[2], [x23]\n"
+ "ld1 { v4.b }[2], [x22]\n"
+ "ld1 { v5.b }[2], [x21]\n"
+ "ld1 { v6.b }[2], [x20]\n"
"b 211f\n"
"210:" // Height 6: Multiply loop: Ragged operand read: partial_1_0
- "ldr b1, [x26, #0x0]\n"
- "ldr b2, [x25, #0x0]\n"
- "ldr b3, [x24, #0x0]\n"
- "ldr b4, [x23, #0x0]\n"
- "ldr b5, [x22, #0x0]\n"
- "ldr b6, [x21, #0x0]\n"
+ "ldr b1, [x25, #0x0]\n"
+ "ldr b2, [x24, #0x0]\n"
+ "ldr b3, [x23, #0x0]\n"
+ "ldr b4, [x22, #0x0]\n"
+ "ldr b5, [x21, #0x0]\n"
+ "ldr b6, [x20, #0x0]\n"
"211:" // Height 6: Multiply loop: Ragged operand read: Done
- "ldr q7, [x10, #0x0]\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
+ "ldr q7, [x9, #0x0]\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
- ".inst 0x4e87a408 // smmla v8.4s, v0.16b, v7.16b\n"
"trn1 v4.2d, v5.2d, v6.2d\n"
- "ldr q6, [x10, #0x10]\n"
+ "ldr q6, [x9, #0x10]\n"
+ ".inst 0x4e87a408 // smmla v8.4s, v0.16b, v7.16b\n"
".inst 0x4e87a450 // smmla v16.4s, v2.16b, v7.16b\n"
".inst 0x4e87a498 // smmla v24.4s, v4.16b, v7.16b\n"
- "ldr q7, [x10, #0x20]\n"
+ "ldr q7, [x9, #0x20]\n"
".inst 0x4e86a40c // smmla v12.4s, v0.16b, v6.16b\n"
".inst 0x4e86a454 // smmla v20.4s, v2.16b, v6.16b\n"
".inst 0x4e86a49c // smmla v28.4s, v4.16b, v6.16b\n"
- "ldr q6, [x10, #0x30]\n"
+ "ldr q6, [x9, #0x30]\n"
".inst 0x4e87a409 // smmla v9.4s, v0.16b, v7.16b\n"
".inst 0x4e87a451 // smmla v17.4s, v2.16b, v7.16b\n"
".inst 0x4e87a499 // smmla v25.4s, v4.16b, v7.16b\n"
- "ldr q7, [x10, #0x40]\n"
+ "ldr q7, [x9, #0x40]\n"
".inst 0x4e86a40d // smmla v13.4s, v0.16b, v6.16b\n"
".inst 0x4e86a455 // smmla v21.4s, v2.16b, v6.16b\n"
".inst 0x4e86a49d // smmla v29.4s, v4.16b, v6.16b\n"
- "ldr q6, [x10, #0x50]\n"
+ "ldr q6, [x9, #0x50]\n"
".inst 0x4e87a40a // smmla v10.4s, v0.16b, v7.16b\n"
".inst 0x4e87a452 // smmla v18.4s, v2.16b, v7.16b\n"
".inst 0x4e87a49a // smmla v26.4s, v4.16b, v7.16b\n"
- "ldr q7, [x10, #0x60]\n"
+ "ldr q7, [x9, #0x60]\n"
".inst 0x4e86a40e // smmla v14.4s, v0.16b, v6.16b\n"
".inst 0x4e86a456 // smmla v22.4s, v2.16b, v6.16b\n"
".inst 0x4e86a49e // smmla v30.4s, v4.16b, v6.16b\n"
- "ldr q6, [x10, #0x70]\n"
+ "ldr q6, [x9, #0x70]\n"
+ "add x9, x9, #0x80\n"
".inst 0x4e87a40b // smmla v11.4s, v0.16b, v7.16b\n"
- "add x10, x10, #0x80\n"
".inst 0x4e87a453 // smmla v19.4s, v2.16b, v7.16b\n"
".inst 0x4e87a49b // smmla v27.4s, v4.16b, v7.16b\n"
".inst 0x4e86a40f // smmla v15.4s, v0.16b, v6.16b\n"
".inst 0x4e86a457 // smmla v23.4s, v2.16b, v6.16b\n"
".inst 0x4e86a49f // smmla v31.4s, v4.16b, v6.16b\n"
"212:" // Height 6: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 200b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
"uzp1 v7.2d, v8.2d, v12.2d\n"
- "add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp2 v8.2d, v8.2d, v12.2d\n"
+ "prfm pstl1keep, [x28, #0x0]\n"
+ "cmp x10, #0x10\n"
"uzp1 v12.2d, v9.2d, v13.2d\n"
- "add x20, x21, x20, LSL #2\n"
- "cmp x11, #0x10\n"
+ "add x23, x28, x19, LSL #2\n"
"uzp2 v9.2d, v9.2d, v13.2d\n"
+ "prfm pstl1keep, [x23, #0x0]\n"
"uzp1 v13.2d, v10.2d, v14.2d\n"
+ "add x22, x23, x19, LSL #2\n"
"uzp2 v10.2d, v10.2d, v14.2d\n"
+ "prfm pstl1keep, [x22, #0x0]\n"
+ "add x21, x22, x19, LSL #2\n"
"uzp1 v14.2d, v11.2d, v15.2d\n"
- "prfm pstl1keep, [x9, #0x0]\n"
- "prfm pstl1keep, [x24, #0x0]\n"
+ "prfm pstl1keep, [x21, #0x0]\n"
+ "add x20, x21, x19, LSL #2\n"
"uzp2 v11.2d, v11.2d, v15.2d\n"
+ "prfm pstl1keep, [x20, #0x0]\n"
+ "add x19, x20, x19, LSL #2\n"
"uzp1 v15.2d, v16.2d, v20.2d\n"
- "prfm pstl1keep, [x23, #0x0]\n"
- "prfm pstl1keep, [x22, #0x0]\n"
+ "prfm pstl1keep, [x19, #0x0]\n"
"uzp2 v16.2d, v16.2d, v20.2d\n"
"uzp1 v20.2d, v17.2d, v21.2d\n"
- "prfm pstl1keep, [x21, #0x0]\n"
- "prfm pstl1keep, [x20, #0x0]\n"
"uzp2 v17.2d, v17.2d, v21.2d\n"
"uzp1 v21.2d, v18.2d, v22.2d\n"
"uzp2 v18.2d, v18.2d, v22.2d\n"
@@ -3273,177 +3285,177 @@ void a64_hybrid_s8s32_mmla_6x16 (
"uzp1 v30.2d, v27.2d, v31.2d\n"
"uzp2 v27.2d, v27.2d, v31.2d\n"
"bge 221f\n"
- "tbz x11, #3, 216f\n"
- "st1 { v7.4s }, [x9], #0x10\n"
- "st1 { v12.4s }, [x9], #0x10\n"
- "st1 { v8.4s }, [x24], #0x10\n"
- "st1 { v9.4s }, [x24], #0x10\n"
- "st1 { v15.4s }, [x23], #0x10\n"
- "st1 { v20.4s }, [x23], #0x10\n"
- "st1 { v16.4s }, [x22], #0x10\n"
- "st1 { v17.4s }, [x22], #0x10\n"
- "st1 { v23.4s }, [x21], #0x10\n"
- "st1 { v28.4s }, [x21], #0x10\n"
- "st1 { v24.4s }, [x20], #0x10\n"
- "st1 { v25.4s }, [x20], #0x10\n"
- "tbz x11, #2, 214f\n"
- "st1 { v13.4s }, [x9], #0x10\n"
- "st1 { v10.4s }, [x24], #0x10\n"
- "st1 { v21.4s }, [x23], #0x10\n"
- "st1 { v18.4s }, [x22], #0x10\n"
- "st1 { v29.4s }, [x21], #0x10\n"
- "st1 { v26.4s }, [x20], #0x10\n"
- "tbz x11, #1, 213f\n"
- "str d14, [x9], #0x8\n"
- "str d11, [x24], #0x8\n"
- "str d22, [x23], #0x8\n"
- "str d19, [x22], #0x8\n"
- "str d30, [x21], #0x8\n"
- "str d27, [x20], #0x8\n"
- "tbz x11, #0, 220f\n"
- "st1 { v14.s }[2], [x9]\n"
- "st1 { v11.s }[2], [x24]\n"
- "st1 { v22.s }[2], [x23]\n"
- "st1 { v19.s }[2], [x22]\n"
- "st1 { v30.s }[2], [x21]\n"
- "st1 { v27.s }[2], [x20]\n"
+ "tbz x10, #3, 216f\n"
+ "st1 { v7.4s }, [x28], #0x10\n"
+ "st1 { v12.4s }, [x28], #0x10\n"
+ "st1 { v8.4s }, [x23], #0x10\n"
+ "st1 { v9.4s }, [x23], #0x10\n"
+ "st1 { v15.4s }, [x22], #0x10\n"
+ "st1 { v20.4s }, [x22], #0x10\n"
+ "st1 { v16.4s }, [x21], #0x10\n"
+ "st1 { v17.4s }, [x21], #0x10\n"
+ "st1 { v23.4s }, [x20], #0x10\n"
+ "st1 { v28.4s }, [x20], #0x10\n"
+ "st1 { v24.4s }, [x19], #0x10\n"
+ "st1 { v25.4s }, [x19], #0x10\n"
+ "tbz x10, #2, 214f\n"
+ "st1 { v13.4s }, [x28], #0x10\n"
+ "st1 { v10.4s }, [x23], #0x10\n"
+ "st1 { v21.4s }, [x22], #0x10\n"
+ "st1 { v18.4s }, [x21], #0x10\n"
+ "st1 { v29.4s }, [x20], #0x10\n"
+ "st1 { v26.4s }, [x19], #0x10\n"
+ "tbz x10, #1, 213f\n"
+ "str d14, [x28], #0x8\n"
+ "str d11, [x23], #0x8\n"
+ "str d22, [x22], #0x8\n"
+ "str d19, [x21], #0x8\n"
+ "str d30, [x20], #0x8\n"
+ "str d27, [x19], #0x8\n"
+ "tbz x10, #0, 220f\n"
+ "st1 { v14.s }[2], [x28]\n"
+ "st1 { v11.s }[2], [x23]\n"
+ "st1 { v22.s }[2], [x22]\n"
+ "st1 { v19.s }[2], [x21]\n"
+ "st1 { v30.s }[2], [x20]\n"
+ "st1 { v27.s }[2], [x19]\n"
"b 220f\n"
"213:" // Height 6: Partial direct writeback: partial_1_12
- "tbz x11, #0, 220f\n"
- "str s14, [x9, #0x0]\n"
- "str s11, [x24, #0x0]\n"
- "str s22, [x23, #0x0]\n"
- "str s19, [x22, #0x0]\n"
- "str s30, [x21, #0x0]\n"
- "str s27, [x20, #0x0]\n"
+ "tbz x10, #0, 220f\n"
+ "str s14, [x28, #0x0]\n"
+ "str s11, [x23, #0x0]\n"
+ "str s22, [x22, #0x0]\n"
+ "str s19, [x21, #0x0]\n"
+ "str s30, [x20, #0x0]\n"
+ "str s27, [x19, #0x0]\n"
"b 220f\n"
"214:" // Height 6: Partial direct writeback: partial_2_8
- "tbz x11, #1, 215f\n"
- "str d13, [x9], #0x8\n"
- "str d10, [x24], #0x8\n"
- "str d21, [x23], #0x8\n"
- "str d18, [x22], #0x8\n"
- "str d29, [x21], #0x8\n"
- "str d26, [x20], #0x8\n"
- "tbz x11, #0, 220f\n"
- "st1 { v13.s }[2], [x9]\n"
- "st1 { v10.s }[2], [x24]\n"
- "st1 { v21.s }[2], [x23]\n"
- "st1 { v18.s }[2], [x22]\n"
- "st1 { v29.s }[2], [x21]\n"
- "st1 { v26.s }[2], [x20]\n"
+ "tbz x10, #1, 215f\n"
+ "str d13, [x28], #0x8\n"
+ "str d10, [x23], #0x8\n"
+ "str d21, [x22], #0x8\n"
+ "str d18, [x21], #0x8\n"
+ "str d29, [x20], #0x8\n"
+ "str d26, [x19], #0x8\n"
+ "tbz x10, #0, 220f\n"
+ "st1 { v13.s }[2], [x28]\n"
+ "st1 { v10.s }[2], [x23]\n"
+ "st1 { v21.s }[2], [x22]\n"
+ "st1 { v18.s }[2], [x21]\n"
+ "st1 { v29.s }[2], [x20]\n"
+ "st1 { v26.s }[2], [x19]\n"
"b 220f\n"
"215:" // Height 6: Partial direct writeback: partial_1_8
- "tbz x11, #0, 220f\n"
- "str s13, [x9, #0x0]\n"
- "str s10, [x24, #0x0]\n"
- "str s21, [x23, #0x0]\n"
- "str s18, [x22, #0x0]\n"
- "str s29, [x21, #0x0]\n"
- "str s26, [x20, #0x0]\n"
+ "tbz x10, #0, 220f\n"
+ "str s13, [x28, #0x0]\n"
+ "str s10, [x23, #0x0]\n"
+ "str s21, [x22, #0x0]\n"
+ "str s18, [x21, #0x0]\n"
+ "str s29, [x20, #0x0]\n"
+ "str s26, [x19, #0x0]\n"
"b 220f\n"
"216:" // Height 6: Partial direct writeback: partial_4_0
- "tbz x11, #2, 218f\n"
- "st1 { v7.4s }, [x9], #0x10\n"
- "st1 { v8.4s }, [x24], #0x10\n"
- "st1 { v15.4s }, [x23], #0x10\n"
- "st1 { v16.4s }, [x22], #0x10\n"
- "st1 { v23.4s }, [x21], #0x10\n"
- "st1 { v24.4s }, [x20], #0x10\n"
- "tbz x11, #1, 217f\n"
- "str d12, [x9], #0x8\n"
- "str d9, [x24], #0x8\n"
- "str d20, [x23], #0x8\n"
- "str d17, [x22], #0x8\n"
- "str d28, [x21], #0x8\n"
- "str d25, [x20], #0x8\n"
- "tbz x11, #0, 220f\n"
- "st1 { v12.s }[2], [x9]\n"
- "st1 { v9.s }[2], [x24]\n"
- "st1 { v20.s }[2], [x23]\n"
- "st1 { v17.s }[2], [x22]\n"
- "st1 { v28.s }[2], [x21]\n"
- "st1 { v25.s }[2], [x20]\n"
+ "tbz x10, #2, 218f\n"
+ "st1 { v7.4s }, [x28], #0x10\n"
+ "st1 { v8.4s }, [x23], #0x10\n"
+ "st1 { v15.4s }, [x22], #0x10\n"
+ "st1 { v16.4s }, [x21], #0x10\n"
+ "st1 { v23.4s }, [x20], #0x10\n"
+ "st1 { v24.4s }, [x19], #0x10\n"
+ "tbz x10, #1, 217f\n"
+ "str d12, [x28], #0x8\n"
+ "str d9, [x23], #0x8\n"
+ "str d20, [x22], #0x8\n"
+ "str d17, [x21], #0x8\n"
+ "str d28, [x20], #0x8\n"
+ "str d25, [x19], #0x8\n"
+ "tbz x10, #0, 220f\n"
+ "st1 { v12.s }[2], [x28]\n"
+ "st1 { v9.s }[2], [x23]\n"
+ "st1 { v20.s }[2], [x22]\n"
+ "st1 { v17.s }[2], [x21]\n"
+ "st1 { v28.s }[2], [x20]\n"
+ "st1 { v25.s }[2], [x19]\n"
"b 220f\n"
"217:" // Height 6: Partial direct writeback: partial_1_4
- "tbz x11, #0, 220f\n"
- "str s12, [x9, #0x0]\n"
- "str s9, [x24, #0x0]\n"
- "str s20, [x23, #0x0]\n"
- "str s17, [x22, #0x0]\n"
- "str s28, [x21, #0x0]\n"
- "str s25, [x20, #0x0]\n"
+ "tbz x10, #0, 220f\n"
+ "str s12, [x28, #0x0]\n"
+ "str s9, [x23, #0x0]\n"
+ "str s20, [x22, #0x0]\n"
+ "str s17, [x21, #0x0]\n"
+ "str s28, [x20, #0x0]\n"
+ "str s25, [x19, #0x0]\n"
"b 220f\n"
"218:" // Height 6: Partial direct writeback: partial_2_0
- "tbz x11, #1, 219f\n"
- "str d7, [x9], #0x8\n"
- "str d8, [x24], #0x8\n"
- "str d15, [x23], #0x8\n"
- "str d16, [x22], #0x8\n"
- "str d23, [x21], #0x8\n"
- "str d24, [x20], #0x8\n"
- "tbz x11, #0, 220f\n"
- "st1 { v7.s }[2], [x9]\n"
- "st1 { v8.s }[2], [x24]\n"
- "st1 { v15.s }[2], [x23]\n"
- "st1 { v16.s }[2], [x22]\n"
- "st1 { v23.s }[2], [x21]\n"
- "st1 { v24.s }[2], [x20]\n"
+ "tbz x10, #1, 219f\n"
+ "str d7, [x28], #0x8\n"
+ "str d8, [x23], #0x8\n"
+ "str d15, [x22], #0x8\n"
+ "str d16, [x21], #0x8\n"
+ "str d23, [x20], #0x8\n"
+ "str d24, [x19], #0x8\n"
+ "tbz x10, #0, 220f\n"
+ "st1 { v7.s }[2], [x28]\n"
+ "st1 { v8.s }[2], [x23]\n"
+ "st1 { v15.s }[2], [x22]\n"
+ "st1 { v16.s }[2], [x21]\n"
+ "st1 { v23.s }[2], [x20]\n"
+ "st1 { v24.s }[2], [x19]\n"
"b 220f\n"
"219:" // Height 6: Partial direct writeback: partial_1_0
- "str s7, [x9, #0x0]\n"
- "str s8, [x24, #0x0]\n"
- "str s15, [x23, #0x0]\n"
- "str s16, [x22, #0x0]\n"
- "str s23, [x21, #0x0]\n"
- "str s24, [x20, #0x0]\n"
+ "str s7, [x28, #0x0]\n"
+ "str s8, [x23, #0x0]\n"
+ "str s15, [x22, #0x0]\n"
+ "str s16, [x21, #0x0]\n"
+ "str s23, [x20, #0x0]\n"
+ "str s24, [x19, #0x0]\n"
"220:" // Height 6: Partial direct writeback: Done
"b 222f\n"
"221:" // Height 6: Full writeback
- "str q7, [x9, #0x0]\n"
- "str q12, [x9, #0x10]\n"
- "str q13, [x9, #0x20]\n"
- "str q14, [x9, #0x30]\n"
- "add x9, x9, #0x40\n"
- "str q8, [x24, #0x0]\n"
- "str q9, [x24, #0x10]\n"
- "str q10, [x24, #0x20]\n"
- "str q11, [x24, #0x30]\n"
- "str q15, [x23, #0x0]\n"
- "str q20, [x23, #0x10]\n"
- "str q21, [x23, #0x20]\n"
- "str q22, [x23, #0x30]\n"
- "str q16, [x22, #0x0]\n"
- "str q17, [x22, #0x10]\n"
- "str q18, [x22, #0x20]\n"
- "str q19, [x22, #0x30]\n"
- "str q23, [x21, #0x0]\n"
- "str q28, [x21, #0x10]\n"
- "str q29, [x21, #0x20]\n"
- "str q30, [x21, #0x30]\n"
- "str q24, [x20, #0x0]\n"
- "str q25, [x20, #0x10]\n"
- "str q26, [x20, #0x20]\n"
- "str q27, [x20, #0x30]\n"
+ "str q7, [x28, #0x0]\n"
+ "str q12, [x28, #0x10]\n"
+ "str q13, [x28, #0x20]\n"
+ "str q14, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
+ "str q8, [x23, #0x0]\n"
+ "str q9, [x23, #0x10]\n"
+ "str q10, [x23, #0x20]\n"
+ "str q11, [x23, #0x30]\n"
+ "str q15, [x22, #0x0]\n"
+ "str q20, [x22, #0x10]\n"
+ "str q21, [x22, #0x20]\n"
+ "str q22, [x22, #0x30]\n"
+ "str q16, [x21, #0x0]\n"
+ "str q17, [x21, #0x10]\n"
+ "str q18, [x21, #0x20]\n"
+ "str q19, [x21, #0x30]\n"
+ "str q23, [x20, #0x0]\n"
+ "str q28, [x20, #0x10]\n"
+ "str q29, [x20, #0x20]\n"
+ "str q30, [x20, #0x30]\n"
+ "str q24, [x19, #0x0]\n"
+ "str q25, [x19, #0x10]\n"
+ "str q26, [x19, #0x20]\n"
+ "str q27, [x19, #0x30]\n"
"222:" // Height 6: Writeback done
- "subs x11, x11, #0x10\n"
+ "subs x10, x10, #0x10\n"
"bgt 187b\n"
"subs %x[M], %x[M], #0x6\n"
"beq 224f\n"
- "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 223f\n"
- "add x21, x21, #0x6\n"
- "str x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "add x20, x20, #0x6\n"
+ "str x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"b 1b\n"
"223:" // Update direct input
- "mov x20, #0x6\n"
- "madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
+ "mov x19, #0x6\n"
+ "madd %x[input_ptr], x19, x20, %x[input_ptr]\n"
"b 1b\n"
"224:" // Exit
: [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
: [args_ptr] "r" (&ka), [flags] "r" (flags), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8qa_dot_4x16/a55.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8qa_dot_4x16/a55.cpp
index b9caf545f1..c410374357 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8qa_dot_4x16/a55.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8qa_dot_4x16/a55.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef __aarch64__
@@ -85,220 +85,232 @@ void a64_hybrid_u8qa_dot_4x16_a55 (
"cmp %x[M], #0x2\n"
"bgt 61f\n"
"beq 31f\n"
- "mov x16, %x[col_bias]\n"
"movi v11.4s, #0x0\n"
"movi v15.16b, #0x1\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x9, %x[col_bias]\n"
"bic %x[flags], %x[flags], #0x80000000\n"
- "ldr x15, [%x[args_ptr], %[offsetof_N]]\n"
- "mov x14, %x[output_ptr]\n"
- "ldr x13, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x28, %x[output_ptr]\n"
"2:" // Height 1: Column loop
"movi v16.4s, #0x0\n"
"movi v17.4s, #0x0\n"
"movi v18.4s, #0x0\n"
"movi v19.4s, #0x0\n"
"3:" // Height 1: setup done
- "mov x12, #0x0\n"
+ "mov x27, #0x0\n"
"4:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w11, [x20, x12, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 5f\n"
- "ldr x21, [%x[input_ptr], x12, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x10, [x21, #0x0]\n"
- "cbnz x12, 6f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x10, x10, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "cbnz x27, 6f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
"b 6f\n"
"5:" // Height 1: setup direct input
- "mov x10, %x[input_ptr]\n"
+ "mov x25, %x[input_ptr]\n"
"6:" // Height 1: input setup done
- "cmp x11, #0x10\n"
+ "cmp x26, #0x10\n"
"blt 11f\n"
- "ldr q0, [x10, #0x0]\n"
- "cmp x11, #0x20\n"
- "ldr q4, [x13, #0x0]\n"
- "ldr q5, [x13, #0x10]\n"
- "ldr q6, [x13, #0x20]\n"
- "ldr q7, [x13, #0x30]\n"
- "ldr q8, [x13, #0x40]\n"
- "ldr q9, [x13, #0x50]\n"
- "ldr q10, [x13, #0x60]\n"
+ "ldr q0, [x25, #0x0]\n"
+ "ldr q4, [x10, #0x0]\n"
+ "cmp x26, #0x20\n"
"blt 9f\n"
"7:" // Height 1: Multiply loop: Main loop head
".inst 0x6f80e090 // udot v16.4s, v4.16b, v0.4b[0]\n"
- "ldr d4, [x13, #0x70]\n"
- "ldr x9, [x13, #0x78]\n"
+ "ldr d5, [x10, #0x10]\n"
+ "ldr x24, [x10, #0x18]\n"
+ "add x25, x25, #0x10\n"
+ "ldr d6, [x10, #0x20]\n"
+ "ldr x23, [x10, #0x28]\n"
+ "mov v5.d[1], x24\n"
+ "ldr d7, [x10, #0x30]\n"
+ "ldr x19, [x10, #0x38]\n"
".inst 0x6f80e0b1 // udot v17.4s, v5.16b, v0.4b[0]\n"
- "ldr d5, [x13, #0x80]\n"
+ "mov v6.d[1], x23\n"
+ "ldr d8, [x10, #0x40]\n"
".inst 0x6f80e0d2 // udot v18.4s, v6.16b, v0.4b[0]\n"
- "ldr d6, [x13, #0x90]\n"
+ "mov v7.d[1], x19\n"
+ "ldr x23, [x10, #0x48]\n"
".inst 0x6f80e0f3 // udot v19.4s, v7.16b, v0.4b[0]\n"
- "ldr d7, [x13, #0xa0]\n"
- "mov v4.d[1], x9\n"
- "ldr x28, [x13, #0x88]\n"
+ "ldr d9, [x10, #0x50]\n"
+ "ldr x19, [x10, #0x58]\n"
+ "mov v8.d[1], x23\n"
+ "ldr d10, [x10, #0x60]\n"
+ "ldr x23, [x10, #0x68]\n"
".inst 0x6fa0e110 // udot v16.4s, v8.16b, v0.4b[1]\n"
- "ldr d8, [x13, #0xb0]\n"
+ "mov v9.d[1], x19\n"
+ "ldr d4, [x10, #0x70]\n"
".inst 0x6fa0e131 // udot v17.4s, v9.16b, v0.4b[1]\n"
- "ldr d9, [x13, #0xc0]\n"
+ "mov v10.d[1], x23\n"
+ "ldr x19, [x10, #0x78]\n"
".inst 0x6fa0e152 // udot v18.4s, v10.16b, v0.4b[1]\n"
- "ldr d10, [x13, #0xd0]\n"
+ "ldr d5, [x10, #0x80]\n"
+ "ldr x24, [x10, #0x88]\n"
+ "mov v4.d[1], x19\n"
+ "ldr d6, [x10, #0x90]\n"
+ "ldr x23, [x10, #0x98]\n"
".inst 0x6fa0e093 // udot v19.4s, v4.16b, v0.4b[1]\n"
- "ldr d4, [x13, #0xe0]\n"
- "mov v5.d[1], x28\n"
- "ldr x27, [x13, #0x98]\n"
- "mov v6.d[1], x27\n"
- "ldr x26, [x13, #0xa8]\n"
- "mov v7.d[1], x26\n"
- "ldr x25, [x13, #0xb8]\n"
- "mov v8.d[1], x25\n"
- "ldr x24, [x13, #0xc8]\n"
+ "mov v5.d[1], x24\n"
+ "ldr d7, [x10, #0xa0]\n"
".inst 0x6f80e8b0 // udot v16.4s, v5.16b, v0.4b[2]\n"
- "ldr d5, [x13, #0xf0]\n"
+ "mov v6.d[1], x23\n"
+ "ldr x19, [x10, #0xa8]\n"
".inst 0x6f80e8d1 // udot v17.4s, v6.16b, v0.4b[2]\n"
- "ldr x20, [x13, #0xd8]\n"
+ "ldr d8, [x10, #0xb0]\n"
+ "ldr x23, [x10, #0xb8]\n"
+ "mov v7.d[1], x19\n"
+ "ldr d9, [x10, #0xc0]\n"
+ "ldr x19, [x10, #0xc8]\n"
".inst 0x6f80e8f2 // udot v18.4s, v7.16b, v0.4b[2]\n"
- "ldr x9, [x13, #0xe8]\n"
+ "mov v8.d[1], x23\n"
+ "ldr d10, [x10, #0xd0]\n"
".inst 0x6f80e913 // udot v19.4s, v8.16b, v0.4b[2]\n"
- "ldr x28, [x13, #0xf8]\n"
- "mov v9.d[1], x24\n"
- "mov v10.d[1], x20\n"
- "add x10, x10, #0x10\n"
- "mov v4.d[1], x9\n"
- "add x13, x13, #0x100\n"
- "mov v5.d[1], x28\n"
+ "mov v9.d[1], x19\n"
+ "ldr x23, [x10, #0xd8]\n"
".inst 0x6fa0e930 // udot v16.4s, v9.16b, v0.4b[3]\n"
+ "ldr d4, [x10, #0xe0]\n"
+ "ldr x19, [x10, #0xe8]\n"
+ "mov v10.d[1], x23\n"
+ "ldr d5, [x10, #0xf0]\n"
+ "ldr x24, [x10, #0xf8]\n"
+ "add x10, x10, #0x100\n"
".inst 0x6fa0e951 // udot v17.4s, v10.16b, v0.4b[3]\n"
+ "mov v4.d[1], x19\n"
".inst 0x6fa0e892 // udot v18.4s, v4.16b, v0.4b[3]\n"
+ "mov v5.d[1], x24\n"
".inst 0x6fa0e8b3 // udot v19.4s, v5.16b, v0.4b[3]\n"
"tbnz %x[flags], #31, 8f\n"
".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n"
"8:" // Height 1: Multiply loop: unique 1: skip row sum
- "ldr q0, [x10, #0x0]\n"
- "sub x11, x11, #0x10\n"
- "ldr q4, [x13, #0x0]\n"
- "cmp x11, #0x20\n"
- "ldr q5, [x13, #0x10]\n"
- "ldr q6, [x13, #0x20]\n"
- "ldr q7, [x13, #0x30]\n"
- "ldr q8, [x13, #0x40]\n"
- "ldr q9, [x13, #0x50]\n"
- "ldr q10, [x13, #0x60]\n"
- "prfm pldl1keep, [x10, #0x80]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "sub x26, x26, #0x10\n"
+ "ldr q0, [x25, #0x0]\n"
+ "cmp x26, #0x20\n"
+ "ldr q4, [x10, #0x0]\n"
"bge 7b\n"
"9:" // Height 1: Multiply loop: Single iteration only
".inst 0x6f80e090 // udot v16.4s, v4.16b, v0.4b[0]\n"
- "ldr q4, [x13, #0x70]\n"
+ "ldr q5, [x10, #0x10]\n"
+ "ldr q6, [x10, #0x20]\n"
+ "sub x26, x26, #0x10\n"
+ "ldr q7, [x10, #0x30]\n"
+ "add x25, x25, #0x10\n"
".inst 0x6f80e0b1 // udot v17.4s, v5.16b, v0.4b[0]\n"
- "ldr q5, [x13, #0x80]\n"
+ "ldr q8, [x10, #0x40]\n"
".inst 0x6f80e0d2 // udot v18.4s, v6.16b, v0.4b[0]\n"
- "ldr q6, [x13, #0x90]\n"
+ "ldr q9, [x10, #0x50]\n"
".inst 0x6f80e0f3 // udot v19.4s, v7.16b, v0.4b[0]\n"
- "ldr q7, [x13, #0xa0]\n"
+ "ldr q10, [x10, #0x60]\n"
".inst 0x6fa0e110 // udot v16.4s, v8.16b, v0.4b[1]\n"
- "ldr q8, [x13, #0xb0]\n"
+ "ldr q4, [x10, #0x70]\n"
".inst 0x6fa0e131 // udot v17.4s, v9.16b, v0.4b[1]\n"
- "ldr q9, [x13, #0xc0]\n"
+ "ldr q5, [x10, #0x80]\n"
".inst 0x6fa0e152 // udot v18.4s, v10.16b, v0.4b[1]\n"
- "ldr q10, [x13, #0xd0]\n"
+ "ldr q6, [x10, #0x90]\n"
".inst 0x6fa0e093 // udot v19.4s, v4.16b, v0.4b[1]\n"
- "ldr q4, [x13, #0xe0]\n"
+ "ldr q7, [x10, #0xa0]\n"
".inst 0x6f80e8b0 // udot v16.4s, v5.16b, v0.4b[2]\n"
- "ldr q5, [x13, #0xf0]\n"
+ "ldr q8, [x10, #0xb0]\n"
".inst 0x6f80e8d1 // udot v17.4s, v6.16b, v0.4b[2]\n"
- "sub x11, x11, #0x10\n"
+ "ldr q9, [x10, #0xc0]\n"
".inst 0x6f80e8f2 // udot v18.4s, v7.16b, v0.4b[2]\n"
- "add x10, x10, #0x10\n"
+ "ldr q10, [x10, #0xd0]\n"
".inst 0x6f80e913 // udot v19.4s, v8.16b, v0.4b[2]\n"
- "add x13, x13, #0x100\n"
+ "ldr q4, [x10, #0xe0]\n"
".inst 0x6fa0e930 // udot v16.4s, v9.16b, v0.4b[3]\n"
+ "ldr q5, [x10, #0xf0]\n"
".inst 0x6fa0e951 // udot v17.4s, v10.16b, v0.4b[3]\n"
+ "add x10, x10, #0x100\n"
".inst 0x6fa0e892 // udot v18.4s, v4.16b, v0.4b[3]\n"
".inst 0x6fa0e8b3 // udot v19.4s, v5.16b, v0.4b[3]\n"
"tbnz %x[flags], #31, 10f\n"
".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n"
"10:" // Height 1: Multiply loop: unique 2: skip row sum
- "prfm pldl1keep, [x10, #0x80]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
"11:" // Height 1: Multiply loop: Main loop skip
- "cbz x11, 18f\n"
- "cmp x11, #0x4\n"
+ "cbz x26, 18f\n"
+ "cmp x26, #0x4\n"
"blt 14f\n"
"12:" // Height 1: Multiply loop: Odd block loop
- "ldr s0, [x10], #0x4\n"
+ "ldr s0, [x25], #0x4\n"
"tbnz %x[flags], #31, 13f\n"
".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n"
"13:" // Height 1: Multiply loop: unique 3: skip row sum
- "ldr q6, [x13, #0x0]\n"
- "sub x11, x11, #0x4\n"
- "ldr q7, [x13, #0x10]\n"
- "cmp x11, #0x4\n"
- "ldr q8, [x13, #0x20]\n"
+ "ldr q6, [x10, #0x0]\n"
+ "sub x26, x26, #0x4\n"
+ "ldr q7, [x10, #0x10]\n"
+ "cmp x26, #0x4\n"
+ "ldr q8, [x10, #0x20]\n"
".inst 0x6f80e0d0 // udot v16.4s, v6.16b, v0.4b[0]\n"
- "ldr q9, [x13, #0x30]\n"
+ "ldr q9, [x10, #0x30]\n"
".inst 0x6f80e0f1 // udot v17.4s, v7.16b, v0.4b[0]\n"
+ "add x10, x10, #0x40\n"
".inst 0x6f80e112 // udot v18.4s, v8.16b, v0.4b[0]\n"
- "add x13, x13, #0x40\n"
".inst 0x6f80e133 // udot v19.4s, v9.16b, v0.4b[0]\n"
"bge 12b\n"
+ "cbz x26, 18f\n"
"14:" // Height 1: Multiply loop: Skip odd blocks
- "cbz x11, 18f\n"
- "tbz x11, #1, 15f\n"
- "ldr h0, [x10], #0x2\n"
- "tbz x11, #0, 16f\n"
- "ld1 { v0.b }[2], [x10]\n"
+ "tbz x26, #1, 15f\n"
+ "ldr h0, [x25], #0x2\n"
+ "tbz x26, #0, 16f\n"
+ "ld1 { v0.b }[2], [x25]\n"
"b 16f\n"
"15:" // Height 1: Multiply loop: Ragged operand read: partial_1_0
- "ldr b0, [x10, #0x0]\n"
+ "ldr b0, [x25, #0x0]\n"
"16:" // Height 1: Multiply loop: Ragged operand read: Done
"tbnz %x[flags], #31, 17f\n"
".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n"
"17:" // Height 1: Multiply loop: unique 4: skip row sum
- "ldr q10, [x13, #0x0]\n"
+ "ldr q10, [x10, #0x0]\n"
+ "ldr q4, [x10, #0x10]\n"
+ "ldr q5, [x10, #0x20]\n"
".inst 0x6f80e150 // udot v16.4s, v10.16b, v0.4b[0]\n"
- "ldr q4, [x13, #0x10]\n"
+ "ldr q6, [x10, #0x30]\n"
".inst 0x6f80e091 // udot v17.4s, v4.16b, v0.4b[0]\n"
- "ldr q5, [x13, #0x20]\n"
+ "add x10, x10, #0x40\n"
".inst 0x6f80e0b2 // udot v18.4s, v5.16b, v0.4b[0]\n"
- "ldr q6, [x13, #0x30]\n"
".inst 0x6f80e0d3 // udot v19.4s, v6.16b, v0.4b[0]\n"
- "add x13, x13, #0x40\n"
"18:" // Height 1: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x12, x12, #0x1\n"
- "cmp x12, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 4b\n"
- "prfm pstl1keep, [x14, #0x0]\n"
+ "prfm pstl1keep, [x28, #0x0]\n"
"tbnz %x[flags], #31, 19f\n"
"addp v11.4s, v11.4s, v11.4s\n"
- "add x23, %x[qp], %[b_offset]\n"
- "ld1r { v1.4s }, [x23]\n"
+ "add x22, %x[qp], %[b_offset]\n"
+ "ld1r { v1.4s }, [x22]\n"
"neg v1.4s, v1.4s\n"
"addp v11.4s, v11.4s, v11.4s\n"
"mul v11.4s, v11.4s, v1.4s\n"
"19:" // Height 1: skip row sum fixup
- "ldr q0, [x16, #0x0]\n"
"add v16.4s, v16.4s, v11.4s\n"
- "ldr q1, [x16, #0x10]\n"
"add v17.4s, v17.4s, v11.4s\n"
- "ldr q2, [x16, #0x20]\n"
"add v18.4s, v18.4s, v11.4s\n"
- "ldr q3, [x16, #0x30]\n"
"add v19.4s, v19.4s, v11.4s\n"
+ "ldr q0, [x9, #0x0]\n"
+ "orr %x[flags], %x[flags], #0x80000000\n"
+ "ldr q1, [x9, #0x10]\n"
+ "add x23, %x[qp], %[per_layer_right_shift]\n"
+ "ldr q2, [x9, #0x20]\n"
+ "add x22, %x[qp], %[per_layer_mul]\n"
"add v16.4s, v16.4s, v0.4s\n"
+ "ldr q3, [x9, #0x30]\n"
"add v17.4s, v17.4s, v1.4s\n"
+ "ld1r { v0.4s }, [x23]\n"
"add v18.4s, v18.4s, v2.4s\n"
+ "ld1r { v4.4s }, [x22]\n"
"add v19.4s, v19.4s, v3.4s\n"
- "add x23, %x[qp], %[per_layer_mul]\n"
- "ld1r { v4.4s }, [x23]\n"
- "orr %x[flags], %x[flags], #0x80000000\n"
- "add x23, %x[qp], %[per_layer_right_shift]\n"
- "ld1r { v0.4s }, [x23]\n"
+ "add x9, x9, #0x40\n"
"sqrdmulh v16.4s, v16.4s, v4.4s\n"
"sqrdmulh v17.4s, v17.4s, v4.4s\n"
"sqrdmulh v18.4s, v18.4s, v4.4s\n"
"sqrdmulh v19.4s, v19.4s, v4.4s\n"
- "add x16, x16, #0x40\n"
"tbz %x[flags], #5, 20f\n"
"and v4.16b, v16.16b, v0.16b\n"
"and v5.16b, v17.16b, v0.16b\n"
@@ -317,90 +329,90 @@ void a64_hybrid_u8qa_dot_4x16_a55 (
"srshl v17.4s, v17.4s, v0.4s\n"
"srshl v18.4s, v18.4s, v0.4s\n"
"srshl v19.4s, v19.4s, v0.4s\n"
- "add x23, %x[qp], %[c_offset]\n"
- "ld1r { v4.4s }, [x23]\n"
+ "add x22, %x[qp], %[c_offset]\n"
+ "add x23, %x[qp], %[minval]\n"
+ "ld1r { v4.4s }, [x22]\n"
+ "add x22, %x[qp], %[maxval]\n"
+ "ld1r { v5.4s }, [x23]\n"
+ "cmp x11, #0x10\n"
+ "ld1r { v6.4s }, [x22]\n"
"add v16.4s, v16.4s, v4.4s\n"
"add v17.4s, v17.4s, v4.4s\n"
"add v18.4s, v18.4s, v4.4s\n"
"add v19.4s, v19.4s, v4.4s\n"
- "add x23, %x[qp], %[maxval]\n"
- "ld1r { v6.4s }, [x23]\n"
"smin v16.4s, v16.4s, v6.4s\n"
"smin v17.4s, v17.4s, v6.4s\n"
"smin v18.4s, v18.4s, v6.4s\n"
"smin v19.4s, v19.4s, v6.4s\n"
- "add x23, %x[qp], %[minval]\n"
- "ld1r { v5.4s }, [x23]\n"
"smax v16.4s, v16.4s, v5.4s\n"
"smax v17.4s, v17.4s, v5.4s\n"
"smax v18.4s, v18.4s, v5.4s\n"
"smax v19.4s, v19.4s, v5.4s\n"
"uzp1 v16.8h, v16.8h, v17.8h\n"
"uzp1 v17.8h, v18.8h, v19.8h\n"
- "cmp x15, #0x10\n"
"uzp1 v16.16b, v16.16b, v17.16b\n"
"bge 29f\n"
- "tbz x15, #3, 24f\n"
- "str d16, [x14], #0x8\n"
- "tbz x15, #2, 22f\n"
- "st1 { v16.s }[2], [x14], #0x4\n"
- "tbz x15, #1, 21f\n"
- "st1 { v16.h }[6], [x14], #0x2\n"
- "tbz x15, #0, 28f\n"
- "st1 { v16.b }[14], [x14]\n"
+ "tbz x11, #3, 24f\n"
+ "str d16, [x28], #0x8\n"
+ "tbz x11, #2, 22f\n"
+ "st1 { v16.s }[2], [x28], #0x4\n"
+ "tbz x11, #1, 21f\n"
+ "st1 { v16.h }[6], [x28], #0x2\n"
+ "tbz x11, #0, 28f\n"
+ "st1 { v16.b }[14], [x28]\n"
"b 28f\n"
"21:" // Height 1: Partial direct writeback: partial_1_12
- "tbz x15, #0, 28f\n"
- "st1 { v16.b }[12], [x14]\n"
+ "tbz x11, #0, 28f\n"
+ "st1 { v16.b }[12], [x28]\n"
"b 28f\n"
"22:" // Height 1: Partial direct writeback: partial_2_8
- "tbz x15, #1, 23f\n"
- "st1 { v16.h }[4], [x14], #0x2\n"
- "tbz x15, #0, 28f\n"
- "st1 { v16.b }[10], [x14]\n"
+ "tbz x11, #1, 23f\n"
+ "st1 { v16.h }[4], [x28], #0x2\n"
+ "tbz x11, #0, 28f\n"
+ "st1 { v16.b }[10], [x28]\n"
"b 28f\n"
"23:" // Height 1: Partial direct writeback: partial_1_8
- "tbz x15, #0, 28f\n"
- "st1 { v16.b }[8], [x14]\n"
+ "tbz x11, #0, 28f\n"
+ "st1 { v16.b }[8], [x28]\n"
"b 28f\n"
"24:" // Height 1: Partial direct writeback: partial_4_0
- "tbz x15, #2, 26f\n"
- "str s16, [x14], #0x4\n"
- "tbz x15, #1, 25f\n"
- "st1 { v16.h }[2], [x14], #0x2\n"
- "tbz x15, #0, 28f\n"
- "st1 { v16.b }[6], [x14]\n"
+ "tbz x11, #2, 26f\n"
+ "str s16, [x28], #0x4\n"
+ "tbz x11, #1, 25f\n"
+ "st1 { v16.h }[2], [x28], #0x2\n"
+ "tbz x11, #0, 28f\n"
+ "st1 { v16.b }[6], [x28]\n"
"b 28f\n"
"25:" // Height 1: Partial direct writeback: partial_1_4
- "tbz x15, #0, 28f\n"
- "st1 { v16.b }[4], [x14]\n"
+ "tbz x11, #0, 28f\n"
+ "st1 { v16.b }[4], [x28]\n"
"b 28f\n"
"26:" // Height 1: Partial direct writeback: partial_2_0
- "tbz x15, #1, 27f\n"
- "str h16, [x14], #0x2\n"
- "tbz x15, #0, 28f\n"
- "st1 { v16.b }[2], [x14]\n"
+ "tbz x11, #1, 27f\n"
+ "str h16, [x28], #0x2\n"
+ "tbz x11, #0, 28f\n"
+ "st1 { v16.b }[2], [x28]\n"
"b 28f\n"
"27:" // Height 1: Partial direct writeback: partial_1_0
- "str b16, [x14, #0x0]\n"
+ "str b16, [x28, #0x0]\n"
"28:" // Height 1: Partial direct writeback: Done
"b 30f\n"
"29:" // Height 1: Full writeback
- "str q16, [x14, #0x0]\n"
- "add x14, x14, #0x10\n"
+ "str q16, [x28, #0x0]\n"
+ "add x28, x28, #0x10\n"
"30:" // Height 1: Writeback done
- "subs x15, x15, #0x10\n"
+ "subs x11, x11, #0x10\n"
"bgt 2b\n"
"b 122f\n"
"31:" // Height 2
- "mov x16, %x[col_bias]\n"
"movi v11.4s, #0x0\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"movi v12.4s, #0x0\n"
- "bic %x[flags], %x[flags], #0x80000000\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"movi v15.16b, #0x1\n"
- "ldr x15, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x13, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x14, %x[output_ptr]\n"
+ "mov x9, %x[col_bias]\n"
+ "bic %x[flags], %x[flags], #0x80000000\n"
+ "mov x28, %x[output_ptr]\n"
"32:" // Height 2: Column loop
"movi v16.4s, #0x0\n"
"movi v17.4s, #0x0\n"
@@ -411,98 +423,110 @@ void a64_hybrid_u8qa_dot_4x16_a55 (
"movi v22.4s, #0x0\n"
"movi v23.4s, #0x0\n"
"33:" // Height 2: setup done
- "mov x12, #0x0\n"
+ "mov x27, #0x0\n"
"34:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w11, [x20, x12, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 35f\n"
- "ldr x21, [%x[input_ptr], x12, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x10, [x21, #0x0]\n"
- "ldr x23, [x21, #0x8]\n"
- "cbnz x12, 36f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x10, x10, x20\n"
- "add x23, x23, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x22, [x20, #0x8]\n"
+ "cbnz x27, 36f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
+ "add x22, x22, x19\n"
"b 36f\n"
"35:" // Height 2: setup direct input
- "mov x10, %x[input_ptr]\n"
- "add x23, x10, x20\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x22, x25, x19\n"
"36:" // Height 2: input setup done
- "cmp x11, #0x10\n"
+ "cmp x26, #0x10\n"
"blt 41f\n"
- "ldr q0, [x10, #0x0]\n"
- "cmp x11, #0x20\n"
- "ldr q1, [x23, #0x0]\n"
- "ldr q4, [x13, #0x0]\n"
- "ldr q5, [x13, #0x10]\n"
- "ldr q6, [x13, #0x20]\n"
- "ldr q7, [x13, #0x30]\n"
- "ldr q8, [x13, #0x40]\n"
- "ldr q9, [x13, #0x50]\n"
- "ldr q10, [x13, #0x60]\n"
+ "ldr q0, [x25, #0x0]\n"
+ "ldr q1, [x22, #0x0]\n"
+ "cmp x26, #0x20\n"
+ "ldr q4, [x10, #0x0]\n"
"blt 39f\n"
"37:" // Height 2: Multiply loop: Main loop head
".inst 0x6f80e090 // udot v16.4s, v4.16b, v0.4b[0]\n"
- "ldr x9, [x13, #0x78]\n"
+ "ldr d5, [x10, #0x10]\n"
".inst 0x6f81e094 // udot v20.4s, v4.16b, v1.4b[0]\n"
- "ldr d4, [x13, #0x70]\n"
+ "ldr x24, [x10, #0x18]\n"
+ "ldr d6, [x10, #0x20]\n"
+ "add x25, x25, #0x10\n"
+ "ldr x23, [x10, #0x28]\n"
+ "add x22, x22, #0x10\n"
+ "mov v5.d[1], x24\n"
+ "ldr d7, [x10, #0x30]\n"
+ "ldr x19, [x10, #0x38]\n"
".inst 0x6f80e0b1 // udot v17.4s, v5.16b, v0.4b[0]\n"
- "mov v4.d[1], x9\n"
+ "mov v6.d[1], x23\n"
".inst 0x6f81e0b5 // udot v21.4s, v5.16b, v1.4b[0]\n"
- "ldr d5, [x13, #0x80]\n"
+ "ldr d8, [x10, #0x40]\n"
".inst 0x6f80e0d2 // udot v18.4s, v6.16b, v0.4b[0]\n"
- "ldr x28, [x13, #0x88]\n"
+ "mov v7.d[1], x19\n"
".inst 0x6f81e0d6 // udot v22.4s, v6.16b, v1.4b[0]\n"
- "ldr d6, [x13, #0x90]\n"
+ "ldr x23, [x10, #0x48]\n"
".inst 0x6f80e0f3 // udot v19.4s, v7.16b, v0.4b[0]\n"
- "ldr x27, [x13, #0x98]\n"
+ "ldr d9, [x10, #0x50]\n"
".inst 0x6f81e0f7 // udot v23.4s, v7.16b, v1.4b[0]\n"
- "ldr d7, [x13, #0xa0]\n"
- "ldr x26, [x13, #0xa8]\n"
+ "ldr x19, [x10, #0x58]\n"
+ "mov v8.d[1], x23\n"
+ "ldr d10, [x10, #0x60]\n"
+ "ldr x23, [x10, #0x68]\n"
".inst 0x6fa0e110 // udot v16.4s, v8.16b, v0.4b[1]\n"
+ "mov v9.d[1], x19\n"
".inst 0x6fa1e114 // udot v20.4s, v8.16b, v1.4b[1]\n"
- "ldr d8, [x13, #0xb0]\n"
- "ldr x25, [x13, #0xb8]\n"
+ "ldr d4, [x10, #0x70]\n"
".inst 0x6fa0e131 // udot v17.4s, v9.16b, v0.4b[1]\n"
+ "mov v10.d[1], x23\n"
".inst 0x6fa1e135 // udot v21.4s, v9.16b, v1.4b[1]\n"
- "ldr d9, [x13, #0xc0]\n"
+ "ldr x19, [x10, #0x78]\n"
".inst 0x6fa0e152 // udot v18.4s, v10.16b, v0.4b[1]\n"
- "mov v5.d[1], x28\n"
+ "ldr d5, [x10, #0x80]\n"
".inst 0x6fa1e156 // udot v22.4s, v10.16b, v1.4b[1]\n"
- "ldr d10, [x13, #0xd0]\n"
+ "ldr x24, [x10, #0x88]\n"
+ "mov v4.d[1], x19\n"
+ "ldr d6, [x10, #0x90]\n"
+ "ldr x23, [x10, #0x98]\n"
".inst 0x6fa0e093 // udot v19.4s, v4.16b, v0.4b[1]\n"
- "mov v6.d[1], x27\n"
+ "mov v5.d[1], x24\n"
".inst 0x6fa1e097 // udot v23.4s, v4.16b, v1.4b[1]\n"
- "ldr d4, [x13, #0xe0]\n"
- "mov v7.d[1], x26\n"
- "ldr x24, [x13, #0xc8]\n"
- "mov v8.d[1], x25\n"
- "ldr x20, [x13, #0xd8]\n"
- "ldr x9, [x13, #0xe8]\n"
+ "ldr d7, [x10, #0xa0]\n"
".inst 0x6f80e8b0 // udot v16.4s, v5.16b, v0.4b[2]\n"
+ "mov v6.d[1], x23\n"
".inst 0x6f81e8b4 // udot v20.4s, v5.16b, v1.4b[2]\n"
- "ldr d5, [x13, #0xf0]\n"
- "ldr x28, [x13, #0xf8]\n"
+ "ldr x19, [x10, #0xa8]\n"
".inst 0x6f80e8d1 // udot v17.4s, v6.16b, v0.4b[2]\n"
+ "ldr d8, [x10, #0xb0]\n"
".inst 0x6f81e8d5 // udot v21.4s, v6.16b, v1.4b[2]\n"
- "mov v9.d[1], x24\n"
+ "ldr x23, [x10, #0xb8]\n"
+ "mov v7.d[1], x19\n"
+ "ldr d9, [x10, #0xc0]\n"
+ "ldr x19, [x10, #0xc8]\n"
".inst 0x6f80e8f2 // udot v18.4s, v7.16b, v0.4b[2]\n"
- "mov v10.d[1], x20\n"
+ "mov v8.d[1], x23\n"
".inst 0x6f81e8f6 // udot v22.4s, v7.16b, v1.4b[2]\n"
- "mov v4.d[1], x9\n"
+ "ldr d10, [x10, #0xd0]\n"
".inst 0x6f80e913 // udot v19.4s, v8.16b, v0.4b[2]\n"
- "mov v5.d[1], x28\n"
+ "mov v9.d[1], x19\n"
".inst 0x6f81e917 // udot v23.4s, v8.16b, v1.4b[2]\n"
- "add x10, x10, #0x10\n"
- "add x23, x23, #0x10\n"
- "add x13, x13, #0x100\n"
+ "ldr x23, [x10, #0xd8]\n"
".inst 0x6fa0e930 // udot v16.4s, v9.16b, v0.4b[3]\n"
+ "ldr d4, [x10, #0xe0]\n"
".inst 0x6fa1e934 // udot v20.4s, v9.16b, v1.4b[3]\n"
+ "ldr x19, [x10, #0xe8]\n"
+ "mov v10.d[1], x23\n"
+ "ldr d5, [x10, #0xf0]\n"
+ "ldr x24, [x10, #0xf8]\n"
+ "add x10, x10, #0x100\n"
".inst 0x6fa0e951 // udot v17.4s, v10.16b, v0.4b[3]\n"
+ "mov v4.d[1], x19\n"
".inst 0x6fa1e955 // udot v21.4s, v10.16b, v1.4b[3]\n"
".inst 0x6fa0e892 // udot v18.4s, v4.16b, v0.4b[3]\n"
+ "mov v5.d[1], x24\n"
".inst 0x6fa1e896 // udot v22.4s, v4.16b, v1.4b[3]\n"
".inst 0x6fa0e8b3 // udot v19.4s, v5.16b, v0.4b[3]\n"
".inst 0x6fa1e8b7 // udot v23.4s, v5.16b, v1.4b[3]\n"
@@ -510,53 +534,53 @@ void a64_hybrid_u8qa_dot_4x16_a55 (
".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n"
".inst 0x6e8f942c // udot v12.4s, v1.16b, v15.16b\n"
"38:" // Height 2: Multiply loop: unique 5: skip row sum
- "ldr q0, [x10, #0x0]\n"
- "sub x11, x11, #0x10\n"
- "ldr q1, [x23, #0x0]\n"
- "cmp x11, #0x20\n"
- "ldr q4, [x13, #0x0]\n"
- "ldr q5, [x13, #0x10]\n"
- "ldr q6, [x13, #0x20]\n"
- "ldr q7, [x13, #0x30]\n"
- "ldr q8, [x13, #0x40]\n"
- "ldr q9, [x13, #0x50]\n"
- "ldr q10, [x13, #0x60]\n"
- "prfm pldl1keep, [x10, #0x80]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "sub x26, x26, #0x10\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
+ "cmp x26, #0x20\n"
+ "ldr q0, [x25, #0x0]\n"
+ "ldr q1, [x22, #0x0]\n"
+ "ldr q4, [x10, #0x0]\n"
"bge 37b\n"
"39:" // Height 2: Multiply loop: Single iteration only
".inst 0x6f80e090 // udot v16.4s, v4.16b, v0.4b[0]\n"
- "sub x11, x11, #0x10\n"
+ "ldr q5, [x10, #0x10]\n"
".inst 0x6f81e094 // udot v20.4s, v4.16b, v1.4b[0]\n"
- "ldr q4, [x13, #0x70]\n"
+ "ldr q6, [x10, #0x20]\n"
+ "ldr q7, [x10, #0x30]\n"
+ "sub x26, x26, #0x10\n"
".inst 0x6f80e0b1 // udot v17.4s, v5.16b, v0.4b[0]\n"
- "add x10, x10, #0x10\n"
+ "ldr q8, [x10, #0x40]\n"
".inst 0x6f81e0b5 // udot v21.4s, v5.16b, v1.4b[0]\n"
- "ldr q5, [x13, #0x80]\n"
+ "ldr q9, [x10, #0x50]\n"
".inst 0x6f80e0d2 // udot v18.4s, v6.16b, v0.4b[0]\n"
- "add x23, x23, #0x10\n"
+ "ldr q10, [x10, #0x60]\n"
".inst 0x6f81e0d6 // udot v22.4s, v6.16b, v1.4b[0]\n"
- "ldr q6, [x13, #0x90]\n"
+ "ldr q4, [x10, #0x70]\n"
".inst 0x6f80e0f3 // udot v19.4s, v7.16b, v0.4b[0]\n"
+ "ldr q5, [x10, #0x80]\n"
".inst 0x6f81e0f7 // udot v23.4s, v7.16b, v1.4b[0]\n"
- "ldr q7, [x13, #0xa0]\n"
+ "ldr q6, [x10, #0x90]\n"
".inst 0x6fa0e110 // udot v16.4s, v8.16b, v0.4b[1]\n"
+ "ldr q7, [x10, #0xa0]\n"
".inst 0x6fa1e114 // udot v20.4s, v8.16b, v1.4b[1]\n"
- "ldr q8, [x13, #0xb0]\n"
+ "ldr q8, [x10, #0xb0]\n"
".inst 0x6fa0e131 // udot v17.4s, v9.16b, v0.4b[1]\n"
+ "add x25, x25, #0x10\n"
".inst 0x6fa1e135 // udot v21.4s, v9.16b, v1.4b[1]\n"
- "ldr q9, [x13, #0xc0]\n"
+ "ldr q9, [x10, #0xc0]\n"
".inst 0x6fa0e152 // udot v18.4s, v10.16b, v0.4b[1]\n"
+ "add x22, x22, #0x10\n"
".inst 0x6fa1e156 // udot v22.4s, v10.16b, v1.4b[1]\n"
- "ldr q10, [x13, #0xd0]\n"
+ "ldr q10, [x10, #0xd0]\n"
".inst 0x6fa0e093 // udot v19.4s, v4.16b, v0.4b[1]\n"
".inst 0x6fa1e097 // udot v23.4s, v4.16b, v1.4b[1]\n"
- "ldr q4, [x13, #0xe0]\n"
+ "ldr q4, [x10, #0xe0]\n"
".inst 0x6f80e8b0 // udot v16.4s, v5.16b, v0.4b[2]\n"
".inst 0x6f81e8b4 // udot v20.4s, v5.16b, v1.4b[2]\n"
- "ldr q5, [x13, #0xf0]\n"
+ "ldr q5, [x10, #0xf0]\n"
".inst 0x6f80e8d1 // udot v17.4s, v6.16b, v0.4b[2]\n"
- "add x13, x13, #0x100\n"
+ "add x10, x10, #0x100\n"
".inst 0x6f81e8d5 // udot v21.4s, v6.16b, v1.4b[2]\n"
".inst 0x6f80e8f2 // udot v18.4s, v7.16b, v0.4b[2]\n"
".inst 0x6f81e8f6 // udot v22.4s, v7.16b, v1.4b[2]\n"
@@ -574,143 +598,143 @@ void a64_hybrid_u8qa_dot_4x16_a55 (
".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n"
".inst 0x6e8f942c // udot v12.4s, v1.16b, v15.16b\n"
"40:" // Height 2: Multiply loop: unique 6: skip row sum
- "prfm pldl1keep, [x10, #0x80]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
"41:" // Height 2: Multiply loop: Main loop skip
- "cbz x11, 48f\n"
- "cmp x11, #0x4\n"
+ "cbz x26, 48f\n"
+ "cmp x26, #0x4\n"
"blt 44f\n"
"42:" // Height 2: Multiply loop: Odd block loop
- "ldr s0, [x10], #0x4\n"
- "ldr s1, [x23], #0x4\n"
+ "ldr s0, [x25], #0x4\n"
+ "ldr s1, [x22], #0x4\n"
"tbnz %x[flags], #31, 43f\n"
".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n"
".inst 0x6e8f942c // udot v12.4s, v1.16b, v15.16b\n"
"43:" // Height 2: Multiply loop: unique 7: skip row sum
- "ldr q6, [x13, #0x0]\n"
- "sub x11, x11, #0x4\n"
- "ldr q7, [x13, #0x10]\n"
- "cmp x11, #0x4\n"
- "ldr q8, [x13, #0x20]\n"
+ "ldr q6, [x10, #0x0]\n"
+ "sub x26, x26, #0x4\n"
+ "ldr q7, [x10, #0x10]\n"
+ "cmp x26, #0x4\n"
+ "ldr q8, [x10, #0x20]\n"
".inst 0x6f80e0d0 // udot v16.4s, v6.16b, v0.4b[0]\n"
- "ldr q9, [x13, #0x30]\n"
+ "ldr q9, [x10, #0x30]\n"
".inst 0x6f81e0d4 // udot v20.4s, v6.16b, v1.4b[0]\n"
+ "add x10, x10, #0x40\n"
".inst 0x6f80e0f1 // udot v17.4s, v7.16b, v0.4b[0]\n"
- "add x13, x13, #0x40\n"
".inst 0x6f81e0f5 // udot v21.4s, v7.16b, v1.4b[0]\n"
".inst 0x6f80e112 // udot v18.4s, v8.16b, v0.4b[0]\n"
".inst 0x6f81e116 // udot v22.4s, v8.16b, v1.4b[0]\n"
".inst 0x6f80e133 // udot v19.4s, v9.16b, v0.4b[0]\n"
".inst 0x6f81e137 // udot v23.4s, v9.16b, v1.4b[0]\n"
"bge 42b\n"
+ "cbz x26, 48f\n"
"44:" // Height 2: Multiply loop: Skip odd blocks
- "cbz x11, 48f\n"
- "tbz x11, #1, 45f\n"
- "ldr h0, [x10], #0x2\n"
- "ldr h1, [x23], #0x2\n"
- "tbz x11, #0, 46f\n"
- "ld1 { v0.b }[2], [x10]\n"
- "ld1 { v1.b }[2], [x23]\n"
+ "tbz x26, #1, 45f\n"
+ "ldr h0, [x25], #0x2\n"
+ "ldr h1, [x22], #0x2\n"
+ "tbz x26, #0, 46f\n"
+ "ld1 { v0.b }[2], [x25]\n"
+ "ld1 { v1.b }[2], [x22]\n"
"b 46f\n"
"45:" // Height 2: Multiply loop: Ragged operand read: partial_1_0
- "ldr b0, [x10, #0x0]\n"
- "ldr b1, [x23, #0x0]\n"
+ "ldr b0, [x25, #0x0]\n"
+ "ldr b1, [x22, #0x0]\n"
"46:" // Height 2: Multiply loop: Ragged operand read: Done
"tbnz %x[flags], #31, 47f\n"
".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n"
".inst 0x6e8f942c // udot v12.4s, v1.16b, v15.16b\n"
"47:" // Height 2: Multiply loop: unique 8: skip row sum
- "ldr q10, [x13, #0x0]\n"
+ "ldr q10, [x10, #0x0]\n"
+ "ldr q4, [x10, #0x10]\n"
+ "ldr q5, [x10, #0x20]\n"
".inst 0x6f80e150 // udot v16.4s, v10.16b, v0.4b[0]\n"
- "ldr q4, [x13, #0x10]\n"
+ "ldr q6, [x10, #0x30]\n"
".inst 0x6f81e154 // udot v20.4s, v10.16b, v1.4b[0]\n"
- "ldr q5, [x13, #0x20]\n"
+ "add x10, x10, #0x40\n"
".inst 0x6f80e091 // udot v17.4s, v4.16b, v0.4b[0]\n"
- "ldr q6, [x13, #0x30]\n"
".inst 0x6f81e095 // udot v21.4s, v4.16b, v1.4b[0]\n"
".inst 0x6f80e0b2 // udot v18.4s, v5.16b, v0.4b[0]\n"
- "add x13, x13, #0x40\n"
".inst 0x6f81e0b6 // udot v22.4s, v5.16b, v1.4b[0]\n"
".inst 0x6f80e0d3 // udot v19.4s, v6.16b, v0.4b[0]\n"
".inst 0x6f81e0d7 // udot v23.4s, v6.16b, v1.4b[0]\n"
"48:" // Height 2: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x12, x12, #0x1\n"
- "cmp x12, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 34b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x22, x14, x20\n"
- "prfm pstl1keep, [x14, #0x0]\n"
- "prfm pstl1keep, [x22, #0x0]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "prfm pstl1keep, [x28, #0x0]\n"
+ "add x21, x28, x19\n"
+ "prfm pstl1keep, [x21, #0x0]\n"
"tbnz %x[flags], #31, 49f\n"
"addp v11.4s, v11.4s, v11.4s\n"
"addp v12.4s, v12.4s, v12.4s\n"
- "add x23, %x[qp], %[b_offset]\n"
- "ld1r { v2.4s }, [x23]\n"
+ "add x22, %x[qp], %[b_offset]\n"
+ "ld1r { v2.4s }, [x22]\n"
"neg v2.4s, v2.4s\n"
"addp v11.4s, v11.4s, v11.4s\n"
"addp v12.4s, v12.4s, v12.4s\n"
"mul v11.4s, v11.4s, v2.4s\n"
"mul v12.4s, v12.4s, v2.4s\n"
"49:" // Height 2: skip row sum fixup
- "ldr q0, [x16, #0x0]\n"
"add v16.4s, v16.4s, v11.4s\n"
- "ldr q1, [x16, #0x10]\n"
"add v17.4s, v17.4s, v11.4s\n"
- "ldr q2, [x16, #0x20]\n"
"add v18.4s, v18.4s, v11.4s\n"
- "ldr q3, [x16, #0x30]\n"
"add v19.4s, v19.4s, v11.4s\n"
"add v20.4s, v20.4s, v12.4s\n"
"add v21.4s, v21.4s, v12.4s\n"
"add v22.4s, v22.4s, v12.4s\n"
"add v23.4s, v23.4s, v12.4s\n"
+ "ldr q0, [x9, #0x0]\n"
+ "orr %x[flags], %x[flags], #0x80000000\n"
+ "ldr q1, [x9, #0x10]\n"
+ "add x23, %x[qp], %[per_layer_right_shift]\n"
+ "ldr q2, [x9, #0x20]\n"
+ "add x22, %x[qp], %[per_layer_mul]\n"
"add v16.4s, v16.4s, v0.4s\n"
- "add v17.4s, v17.4s, v1.4s\n"
- "add v18.4s, v18.4s, v2.4s\n"
- "add v19.4s, v19.4s, v3.4s\n"
"add v20.4s, v20.4s, v0.4s\n"
+ "add v17.4s, v17.4s, v1.4s\n"
"add v21.4s, v21.4s, v1.4s\n"
+ "add v18.4s, v18.4s, v2.4s\n"
"add v22.4s, v22.4s, v2.4s\n"
- "add v23.4s, v23.4s, v3.4s\n"
- "add x23, %x[qp], %[per_layer_mul]\n"
- "ld1r { v4.4s }, [x23]\n"
- "orr %x[flags], %x[flags], #0x80000000\n"
- "add x23, %x[qp], %[per_layer_right_shift]\n"
+ "ldr q3, [x9, #0x30]\n"
+ "add x9, x9, #0x40\n"
"ld1r { v0.4s }, [x23]\n"
+ "ld1r { v4.4s }, [x22]\n"
+ "add v19.4s, v19.4s, v3.4s\n"
+ "add v23.4s, v23.4s, v3.4s\n"
"sqrdmulh v16.4s, v16.4s, v4.4s\n"
"sqrdmulh v17.4s, v17.4s, v4.4s\n"
"sqrdmulh v18.4s, v18.4s, v4.4s\n"
- "sqrdmulh v19.4s, v19.4s, v4.4s\n"
"sqrdmulh v20.4s, v20.4s, v4.4s\n"
"sqrdmulh v21.4s, v21.4s, v4.4s\n"
"sqrdmulh v22.4s, v22.4s, v4.4s\n"
+ "sqrdmulh v19.4s, v19.4s, v4.4s\n"
"sqrdmulh v23.4s, v23.4s, v4.4s\n"
- "add x16, x16, #0x40\n"
"tbz %x[flags], #5, 50f\n"
"and v4.16b, v16.16b, v0.16b\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
- "sqadd v16.4s, v16.4s, v4.4s\n"
"and v5.16b, v17.16b, v0.16b\n"
"and v6.16b, v18.16b, v0.16b\n"
- "and v7.16b, v19.16b, v0.16b\n"
"and v8.16b, v20.16b, v0.16b\n"
"and v9.16b, v21.16b, v0.16b\n"
"and v10.16b, v22.16b, v0.16b\n"
- "and v4.16b, v23.16b, v0.16b\n"
+ "and v7.16b, v19.16b, v0.16b\n"
+ "sshr v4.4s, v4.4s, #0x1f\n"
"sshr v5.4s, v5.4s, #0x1f\n"
"sshr v6.4s, v6.4s, #0x1f\n"
- "sshr v7.4s, v7.4s, #0x1f\n"
"sshr v8.4s, v8.4s, #0x1f\n"
"sshr v9.4s, v9.4s, #0x1f\n"
"sshr v10.4s, v10.4s, #0x1f\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
+ "sshr v7.4s, v7.4s, #0x1f\n"
+ "sqadd v16.4s, v16.4s, v4.4s\n"
"sqadd v17.4s, v17.4s, v5.4s\n"
"sqadd v18.4s, v18.4s, v6.4s\n"
- "sqadd v19.4s, v19.4s, v7.4s\n"
"sqadd v20.4s, v20.4s, v8.4s\n"
"sqadd v21.4s, v21.4s, v9.4s\n"
"sqadd v22.4s, v22.4s, v10.4s\n"
+ "sqadd v19.4s, v19.4s, v7.4s\n"
+ "and v4.16b, v23.16b, v0.16b\n"
+ "sshr v4.4s, v4.4s, #0x1f\n"
"sqadd v23.4s, v23.4s, v4.4s\n"
"50:" // Height 2: no shift correction
"srshl v16.4s, v16.4s, v0.4s\n"
@@ -721,8 +745,13 @@ void a64_hybrid_u8qa_dot_4x16_a55 (
"srshl v21.4s, v21.4s, v0.4s\n"
"srshl v22.4s, v22.4s, v0.4s\n"
"srshl v23.4s, v23.4s, v0.4s\n"
- "add x23, %x[qp], %[c_offset]\n"
- "ld1r { v4.4s }, [x23]\n"
+ "add x22, %x[qp], %[c_offset]\n"
+ "add x23, %x[qp], %[minval]\n"
+ "ld1r { v4.4s }, [x22]\n"
+ "add x22, %x[qp], %[maxval]\n"
+ "ld1r { v5.4s }, [x23]\n"
+ "cmp x11, #0x10\n"
+ "ld1r { v6.4s }, [x22]\n"
"add v16.4s, v16.4s, v4.4s\n"
"add v17.4s, v17.4s, v4.4s\n"
"add v18.4s, v18.4s, v4.4s\n"
@@ -731,8 +760,6 @@ void a64_hybrid_u8qa_dot_4x16_a55 (
"add v21.4s, v21.4s, v4.4s\n"
"add v22.4s, v22.4s, v4.4s\n"
"add v23.4s, v23.4s, v4.4s\n"
- "add x23, %x[qp], %[maxval]\n"
- "ld1r { v6.4s }, [x23]\n"
"smin v16.4s, v16.4s, v6.4s\n"
"smin v17.4s, v17.4s, v6.4s\n"
"smin v18.4s, v18.4s, v6.4s\n"
@@ -741,8 +768,6 @@ void a64_hybrid_u8qa_dot_4x16_a55 (
"smin v21.4s, v21.4s, v6.4s\n"
"smin v22.4s, v22.4s, v6.4s\n"
"smin v23.4s, v23.4s, v6.4s\n"
- "add x23, %x[qp], %[minval]\n"
- "ld1r { v5.4s }, [x23]\n"
"smax v16.4s, v16.4s, v5.4s\n"
"smax v17.4s, v17.4s, v5.4s\n"
"smax v18.4s, v18.4s, v5.4s\n"
@@ -755,88 +780,87 @@ void a64_hybrid_u8qa_dot_4x16_a55 (
"uzp1 v17.8h, v18.8h, v19.8h\n"
"uzp1 v20.8h, v20.8h, v21.8h\n"
"uzp1 v21.8h, v22.8h, v23.8h\n"
- "cmp x15, #0x10\n"
"uzp1 v16.16b, v16.16b, v17.16b\n"
"uzp1 v20.16b, v20.16b, v21.16b\n"
"bge 59f\n"
- "tbz x15, #3, 54f\n"
- "str d16, [x14], #0x8\n"
- "str d20, [x22], #0x8\n"
- "tbz x15, #2, 52f\n"
- "st1 { v16.s }[2], [x14], #0x4\n"
- "st1 { v20.s }[2], [x22], #0x4\n"
- "tbz x15, #1, 51f\n"
- "st1 { v16.h }[6], [x14], #0x2\n"
- "st1 { v20.h }[6], [x22], #0x2\n"
- "tbz x15, #0, 58f\n"
- "st1 { v16.b }[14], [x14]\n"
- "st1 { v20.b }[14], [x22]\n"
+ "tbz x11, #3, 54f\n"
+ "str d16, [x28], #0x8\n"
+ "str d20, [x21], #0x8\n"
+ "tbz x11, #2, 52f\n"
+ "st1 { v16.s }[2], [x28], #0x4\n"
+ "st1 { v20.s }[2], [x21], #0x4\n"
+ "tbz x11, #1, 51f\n"
+ "st1 { v16.h }[6], [x28], #0x2\n"
+ "st1 { v20.h }[6], [x21], #0x2\n"
+ "tbz x11, #0, 58f\n"
+ "st1 { v16.b }[14], [x28]\n"
+ "st1 { v20.b }[14], [x21]\n"
"b 58f\n"
"51:" // Height 2: Partial direct writeback: partial_1_12
- "tbz x15, #0, 58f\n"
- "st1 { v16.b }[12], [x14]\n"
- "st1 { v20.b }[12], [x22]\n"
+ "tbz x11, #0, 58f\n"
+ "st1 { v16.b }[12], [x28]\n"
+ "st1 { v20.b }[12], [x21]\n"
"b 58f\n"
"52:" // Height 2: Partial direct writeback: partial_2_8
- "tbz x15, #1, 53f\n"
- "st1 { v16.h }[4], [x14], #0x2\n"
- "st1 { v20.h }[4], [x22], #0x2\n"
- "tbz x15, #0, 58f\n"
- "st1 { v16.b }[10], [x14]\n"
- "st1 { v20.b }[10], [x22]\n"
+ "tbz x11, #1, 53f\n"
+ "st1 { v16.h }[4], [x28], #0x2\n"
+ "st1 { v20.h }[4], [x21], #0x2\n"
+ "tbz x11, #0, 58f\n"
+ "st1 { v16.b }[10], [x28]\n"
+ "st1 { v20.b }[10], [x21]\n"
"b 58f\n"
"53:" // Height 2: Partial direct writeback: partial_1_8
- "tbz x15, #0, 58f\n"
- "st1 { v16.b }[8], [x14]\n"
- "st1 { v20.b }[8], [x22]\n"
+ "tbz x11, #0, 58f\n"
+ "st1 { v16.b }[8], [x28]\n"
+ "st1 { v20.b }[8], [x21]\n"
"b 58f\n"
"54:" // Height 2: Partial direct writeback: partial_4_0
- "tbz x15, #2, 56f\n"
- "str s16, [x14], #0x4\n"
- "str s20, [x22], #0x4\n"
- "tbz x15, #1, 55f\n"
- "st1 { v16.h }[2], [x14], #0x2\n"
- "st1 { v20.h }[2], [x22], #0x2\n"
- "tbz x15, #0, 58f\n"
- "st1 { v16.b }[6], [x14]\n"
- "st1 { v20.b }[6], [x22]\n"
+ "tbz x11, #2, 56f\n"
+ "str s16, [x28], #0x4\n"
+ "str s20, [x21], #0x4\n"
+ "tbz x11, #1, 55f\n"
+ "st1 { v16.h }[2], [x28], #0x2\n"
+ "st1 { v20.h }[2], [x21], #0x2\n"
+ "tbz x11, #0, 58f\n"
+ "st1 { v16.b }[6], [x28]\n"
+ "st1 { v20.b }[6], [x21]\n"
"b 58f\n"
"55:" // Height 2: Partial direct writeback: partial_1_4
- "tbz x15, #0, 58f\n"
- "st1 { v16.b }[4], [x14]\n"
- "st1 { v20.b }[4], [x22]\n"
+ "tbz x11, #0, 58f\n"
+ "st1 { v16.b }[4], [x28]\n"
+ "st1 { v20.b }[4], [x21]\n"
"b 58f\n"
"56:" // Height 2: Partial direct writeback: partial_2_0
- "tbz x15, #1, 57f\n"
- "str h16, [x14], #0x2\n"
- "str h20, [x22], #0x2\n"
- "tbz x15, #0, 58f\n"
- "st1 { v16.b }[2], [x14]\n"
- "st1 { v20.b }[2], [x22]\n"
+ "tbz x11, #1, 57f\n"
+ "str h16, [x28], #0x2\n"
+ "str h20, [x21], #0x2\n"
+ "tbz x11, #0, 58f\n"
+ "st1 { v16.b }[2], [x28]\n"
+ "st1 { v20.b }[2], [x21]\n"
"b 58f\n"
"57:" // Height 2: Partial direct writeback: partial_1_0
- "str b16, [x14, #0x0]\n"
- "str b20, [x22, #0x0]\n"
+ "str b16, [x28, #0x0]\n"
+ "str b20, [x21, #0x0]\n"
"58:" // Height 2: Partial direct writeback: Done
"b 60f\n"
"59:" // Height 2: Full writeback
- "str q16, [x14, #0x0]\n"
- "add x14, x14, #0x10\n"
- "str q20, [x22, #0x0]\n"
+ "str q16, [x28, #0x0]\n"
+ "add x28, x28, #0x10\n"
+ "str q20, [x21, #0x0]\n"
"60:" // Height 2: Writeback done
- "subs x15, x15, #0x10\n"
+ "subs x11, x11, #0x10\n"
"bgt 32b\n"
"b 122f\n"
"61:" // Height 3
- "mov x16, %x[col_bias]\n"
"movi v11.4s, #0x0\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"movi v12.4s, #0x0\n"
- "bic %x[flags], %x[flags], #0x80000000\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"movi v13.4s, #0x0\n"
- "ldr x15, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x9, %x[col_bias]\n"
"movi v15.16b, #0x1\n"
- "ldr x13, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x14, %x[output_ptr]\n"
+ "bic %x[flags], %x[flags], #0x80000000\n"
+ "mov x28, %x[output_ptr]\n"
"62:" // Height 3: Column loop
"movi v16.4s, #0x0\n"
"movi v17.4s, #0x0\n"
@@ -851,117 +875,129 @@ void a64_hybrid_u8qa_dot_4x16_a55 (
"movi v26.4s, #0x0\n"
"movi v27.4s, #0x0\n"
"63:" // Height 3: setup done
- "mov x12, #0x0\n"
+ "mov x27, #0x0\n"
"64:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w11, [x20, x12, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 65f\n"
- "ldr x21, [%x[input_ptr], x12, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x10, [x21, #0x0]\n"
- "ldr x23, [x21, #0x8]\n"
- "ldr x22, [x21, #0x10]\n"
- "cbnz x12, 66f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x10, x10, x20\n"
- "add x23, x23, x20\n"
- "add x22, x22, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x22, [x20, #0x8]\n"
+ "ldr x21, [x20, #0x10]\n"
+ "cbnz x27, 66f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
+ "add x22, x22, x19\n"
+ "add x21, x21, x19\n"
"b 66f\n"
"65:" // Height 3: setup direct input
- "mov x10, %x[input_ptr]\n"
- "add x23, x10, x20\n"
- "add x22, x23, x20\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x22, x25, x19\n"
+ "add x21, x22, x19\n"
"66:" // Height 3: input setup done
- "cmp x11, #0x10\n"
+ "cmp x26, #0x10\n"
"blt 71f\n"
- "ldr q0, [x10, #0x0]\n"
- "cmp x11, #0x20\n"
- "ldr q1, [x23, #0x0]\n"
- "ldr q2, [x22, #0x0]\n"
- "ldr q4, [x13, #0x0]\n"
- "ldr q5, [x13, #0x10]\n"
- "ldr q6, [x13, #0x20]\n"
- "ldr q7, [x13, #0x30]\n"
- "ldr q8, [x13, #0x40]\n"
- "ldr q9, [x13, #0x50]\n"
- "ldr q10, [x13, #0x60]\n"
+ "ldr q0, [x25, #0x0]\n"
+ "ldr q1, [x22, #0x0]\n"
+ "cmp x26, #0x20\n"
+ "ldr q2, [x21, #0x0]\n"
+ "ldr q4, [x10, #0x0]\n"
"blt 69f\n"
"67:" // Height 3: Multiply loop: Main loop head
".inst 0x6f80e090 // udot v16.4s, v4.16b, v0.4b[0]\n"
- "ldr x9, [x13, #0x78]\n"
+ "ldr d5, [x10, #0x10]\n"
".inst 0x6f81e094 // udot v20.4s, v4.16b, v1.4b[0]\n"
- "ldr x28, [x13, #0x88]\n"
+ "ldr x24, [x10, #0x18]\n"
".inst 0x6f82e098 // udot v24.4s, v4.16b, v2.4b[0]\n"
- "ldr d4, [x13, #0x70]\n"
+ "ldr d6, [x10, #0x20]\n"
+ "ldr x23, [x10, #0x28]\n"
+ "add x25, x25, #0x10\n"
+ "mov v5.d[1], x24\n"
+ "ldr d7, [x10, #0x30]\n"
+ "ldr x19, [x10, #0x38]\n"
+ "add x22, x22, #0x10\n"
".inst 0x6f80e0b1 // udot v17.4s, v5.16b, v0.4b[0]\n"
- "mov v4.d[1], x9\n"
+ "mov v6.d[1], x23\n"
".inst 0x6f81e0b5 // udot v21.4s, v5.16b, v1.4b[0]\n"
- "ldr x27, [x13, #0x98]\n"
+ "ldr d8, [x10, #0x40]\n"
".inst 0x6f82e0b9 // udot v25.4s, v5.16b, v2.4b[0]\n"
- "ldr d5, [x13, #0x80]\n"
+ "mov v7.d[1], x19\n"
".inst 0x6f80e0d2 // udot v18.4s, v6.16b, v0.4b[0]\n"
- "ldr x26, [x13, #0xa8]\n"
+ "ldr x23, [x10, #0x48]\n"
".inst 0x6f81e0d6 // udot v22.4s, v6.16b, v1.4b[0]\n"
- "ldr x25, [x13, #0xb8]\n"
+ "ldr d9, [x10, #0x50]\n"
".inst 0x6f82e0da // udot v26.4s, v6.16b, v2.4b[0]\n"
- "ldr d6, [x13, #0x90]\n"
+ "ldr x19, [x10, #0x58]\n"
".inst 0x6f80e0f3 // udot v19.4s, v7.16b, v0.4b[0]\n"
- "mov v5.d[1], x28\n"
+ "mov v8.d[1], x23\n"
".inst 0x6f81e0f7 // udot v23.4s, v7.16b, v1.4b[0]\n"
- "mov v6.d[1], x27\n"
+ "ldr d10, [x10, #0x60]\n"
".inst 0x6f82e0fb // udot v27.4s, v7.16b, v2.4b[0]\n"
- "ldr d7, [x13, #0xa0]\n"
+ "mov v9.d[1], x19\n"
".inst 0x6fa0e110 // udot v16.4s, v8.16b, v0.4b[1]\n"
- "mov v7.d[1], x26\n"
+ "ldr x23, [x10, #0x68]\n"
".inst 0x6fa1e114 // udot v20.4s, v8.16b, v1.4b[1]\n"
- "ldr x24, [x13, #0xc8]\n"
+ "ldr d4, [x10, #0x70]\n"
".inst 0x6fa2e118 // udot v24.4s, v8.16b, v2.4b[1]\n"
- "ldr d8, [x13, #0xb0]\n"
+ "ldr x19, [x10, #0x78]\n"
".inst 0x6fa0e131 // udot v17.4s, v9.16b, v0.4b[1]\n"
- "mov v8.d[1], x25\n"
+ "mov v10.d[1], x23\n"
".inst 0x6fa1e135 // udot v21.4s, v9.16b, v1.4b[1]\n"
- "ldr x20, [x13, #0xd8]\n"
+ "ldr d5, [x10, #0x80]\n"
".inst 0x6fa2e139 // udot v25.4s, v9.16b, v2.4b[1]\n"
- "ldr d9, [x13, #0xc0]\n"
+ "mov v4.d[1], x19\n"
".inst 0x6fa0e152 // udot v18.4s, v10.16b, v0.4b[1]\n"
- "ldr x9, [x13, #0xe8]\n"
+ "ldr x24, [x10, #0x88]\n"
".inst 0x6fa1e156 // udot v22.4s, v10.16b, v1.4b[1]\n"
- "ldr x28, [x13, #0xf8]\n"
+ "ldr d6, [x10, #0x90]\n"
".inst 0x6fa2e15a // udot v26.4s, v10.16b, v2.4b[1]\n"
- "ldr d10, [x13, #0xd0]\n"
+ "ldr x23, [x10, #0x98]\n"
".inst 0x6fa0e093 // udot v19.4s, v4.16b, v0.4b[1]\n"
- "mov v9.d[1], x24\n"
+ "mov v5.d[1], x24\n"
".inst 0x6fa1e097 // udot v23.4s, v4.16b, v1.4b[1]\n"
- "mov v10.d[1], x20\n"
+ "ldr d7, [x10, #0xa0]\n"
".inst 0x6fa2e09b // udot v27.4s, v4.16b, v2.4b[1]\n"
- "ldr d4, [x13, #0xe0]\n"
+ "mov v6.d[1], x23\n"
".inst 0x6f80e8b0 // udot v16.4s, v5.16b, v0.4b[2]\n"
- "mov v4.d[1], x9\n"
+ "ldr x19, [x10, #0xa8]\n"
".inst 0x6f81e8b4 // udot v20.4s, v5.16b, v1.4b[2]\n"
- "add x10, x10, #0x10\n"
+ "ldr d8, [x10, #0xb0]\n"
".inst 0x6f82e8b8 // udot v24.4s, v5.16b, v2.4b[2]\n"
- "ldr d5, [x13, #0xf0]\n"
+ "ldr x23, [x10, #0xb8]\n"
".inst 0x6f80e8d1 // udot v17.4s, v6.16b, v0.4b[2]\n"
- "mov v5.d[1], x28\n"
+ "mov v7.d[1], x19\n"
".inst 0x6f81e8d5 // udot v21.4s, v6.16b, v1.4b[2]\n"
- "add x23, x23, #0x10\n"
+ "ldr d9, [x10, #0xc0]\n"
".inst 0x6f82e8d9 // udot v25.4s, v6.16b, v2.4b[2]\n"
- "add x22, x22, #0x10\n"
+ "mov v8.d[1], x23\n"
".inst 0x6f80e8f2 // udot v18.4s, v7.16b, v0.4b[2]\n"
- "add x13, x13, #0x100\n"
+ "ldr x19, [x10, #0xc8]\n"
".inst 0x6f81e8f6 // udot v22.4s, v7.16b, v1.4b[2]\n"
+ "ldr d10, [x10, #0xd0]\n"
".inst 0x6f82e8fa // udot v26.4s, v7.16b, v2.4b[2]\n"
+ "ldr x23, [x10, #0xd8]\n"
".inst 0x6f80e913 // udot v19.4s, v8.16b, v0.4b[2]\n"
+ "mov v9.d[1], x19\n"
".inst 0x6f81e917 // udot v23.4s, v8.16b, v1.4b[2]\n"
+ "ldr d4, [x10, #0xe0]\n"
".inst 0x6f82e91b // udot v27.4s, v8.16b, v2.4b[2]\n"
+ "mov v10.d[1], x23\n"
".inst 0x6fa0e930 // udot v16.4s, v9.16b, v0.4b[3]\n"
+ "ldr x19, [x10, #0xe8]\n"
".inst 0x6fa1e934 // udot v20.4s, v9.16b, v1.4b[3]\n"
+ "ldr d5, [x10, #0xf0]\n"
".inst 0x6fa2e938 // udot v24.4s, v9.16b, v2.4b[3]\n"
+ "ldr x24, [x10, #0xf8]\n"
".inst 0x6fa0e951 // udot v17.4s, v10.16b, v0.4b[3]\n"
+ "mov v4.d[1], x19\n"
".inst 0x6fa1e955 // udot v21.4s, v10.16b, v1.4b[3]\n"
+ "add x21, x21, #0x10\n"
".inst 0x6fa2e959 // udot v25.4s, v10.16b, v2.4b[3]\n"
+ "mov v5.d[1], x24\n"
".inst 0x6fa0e892 // udot v18.4s, v4.16b, v0.4b[3]\n"
+ "add x10, x10, #0x100\n"
".inst 0x6fa1e896 // udot v22.4s, v4.16b, v1.4b[3]\n"
".inst 0x6fa2e89a // udot v26.4s, v4.16b, v2.4b[3]\n"
".inst 0x6fa0e8b3 // udot v19.4s, v5.16b, v0.4b[3]\n"
@@ -972,65 +1008,65 @@ void a64_hybrid_u8qa_dot_4x16_a55 (
".inst 0x6e8f942c // udot v12.4s, v1.16b, v15.16b\n"
".inst 0x6e8f944d // udot v13.4s, v2.16b, v15.16b\n"
"68:" // Height 3: Multiply loop: unique 9: skip row sum
- "ldr q0, [x10, #0x0]\n"
- "sub x11, x11, #0x10\n"
- "ldr q1, [x23, #0x0]\n"
- "cmp x11, #0x20\n"
- "ldr q2, [x22, #0x0]\n"
- "ldr q4, [x13, #0x0]\n"
- "ldr q5, [x13, #0x10]\n"
- "ldr q6, [x13, #0x20]\n"
- "ldr q7, [x13, #0x30]\n"
- "ldr q8, [x13, #0x40]\n"
- "ldr q9, [x13, #0x50]\n"
- "ldr q10, [x13, #0x60]\n"
- "prfm pldl1keep, [x10, #0x80]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "sub x26, x26, #0x10\n"
"prfm pldl1keep, [x22, #0x80]\n"
+ "cmp x26, #0x20\n"
+ "prfm pldl1keep, [x21, #0x80]\n"
+ "ldr q0, [x25, #0x0]\n"
+ "ldr q1, [x22, #0x0]\n"
+ "ldr q2, [x21, #0x0]\n"
+ "ldr q4, [x10, #0x0]\n"
"bge 67b\n"
"69:" // Height 3: Multiply loop: Single iteration only
".inst 0x6f80e090 // udot v16.4s, v4.16b, v0.4b[0]\n"
- "sub x11, x11, #0x10\n"
+ "ldr q5, [x10, #0x10]\n"
".inst 0x6f81e094 // udot v20.4s, v4.16b, v1.4b[0]\n"
- "add x10, x10, #0x10\n"
+ "ldr q6, [x10, #0x20]\n"
".inst 0x6f82e098 // udot v24.4s, v4.16b, v2.4b[0]\n"
- "ldr q4, [x13, #0x70]\n"
+ "ldr q7, [x10, #0x30]\n"
".inst 0x6f80e0b1 // udot v17.4s, v5.16b, v0.4b[0]\n"
- "add x23, x23, #0x10\n"
+ "ldr q8, [x10, #0x40]\n"
".inst 0x6f81e0b5 // udot v21.4s, v5.16b, v1.4b[0]\n"
- "add x22, x22, #0x10\n"
+ "ldr q9, [x10, #0x50]\n"
".inst 0x6f82e0b9 // udot v25.4s, v5.16b, v2.4b[0]\n"
- "ldr q5, [x13, #0x80]\n"
+ "ldr q10, [x10, #0x60]\n"
".inst 0x6f80e0d2 // udot v18.4s, v6.16b, v0.4b[0]\n"
+ "ldr q4, [x10, #0x70]\n"
".inst 0x6f81e0d6 // udot v22.4s, v6.16b, v1.4b[0]\n"
+ "ldr q5, [x10, #0x80]\n"
".inst 0x6f82e0da // udot v26.4s, v6.16b, v2.4b[0]\n"
- "ldr q6, [x13, #0x90]\n"
+ "ldr q6, [x10, #0x90]\n"
".inst 0x6f80e0f3 // udot v19.4s, v7.16b, v0.4b[0]\n"
+ "sub x26, x26, #0x10\n"
".inst 0x6f81e0f7 // udot v23.4s, v7.16b, v1.4b[0]\n"
+ "add x25, x25, #0x10\n"
".inst 0x6f82e0fb // udot v27.4s, v7.16b, v2.4b[0]\n"
- "ldr q7, [x13, #0xa0]\n"
+ "ldr q7, [x10, #0xa0]\n"
".inst 0x6fa0e110 // udot v16.4s, v8.16b, v0.4b[1]\n"
+ "add x22, x22, #0x10\n"
".inst 0x6fa1e114 // udot v20.4s, v8.16b, v1.4b[1]\n"
+ "add x21, x21, #0x10\n"
".inst 0x6fa2e118 // udot v24.4s, v8.16b, v2.4b[1]\n"
- "ldr q8, [x13, #0xb0]\n"
+ "ldr q8, [x10, #0xb0]\n"
".inst 0x6fa0e131 // udot v17.4s, v9.16b, v0.4b[1]\n"
".inst 0x6fa1e135 // udot v21.4s, v9.16b, v1.4b[1]\n"
".inst 0x6fa2e139 // udot v25.4s, v9.16b, v2.4b[1]\n"
- "ldr q9, [x13, #0xc0]\n"
+ "ldr q9, [x10, #0xc0]\n"
".inst 0x6fa0e152 // udot v18.4s, v10.16b, v0.4b[1]\n"
".inst 0x6fa1e156 // udot v22.4s, v10.16b, v1.4b[1]\n"
".inst 0x6fa2e15a // udot v26.4s, v10.16b, v2.4b[1]\n"
- "ldr q10, [x13, #0xd0]\n"
+ "ldr q10, [x10, #0xd0]\n"
".inst 0x6fa0e093 // udot v19.4s, v4.16b, v0.4b[1]\n"
".inst 0x6fa1e097 // udot v23.4s, v4.16b, v1.4b[1]\n"
".inst 0x6fa2e09b // udot v27.4s, v4.16b, v2.4b[1]\n"
- "ldr q4, [x13, #0xe0]\n"
+ "ldr q4, [x10, #0xe0]\n"
".inst 0x6f80e8b0 // udot v16.4s, v5.16b, v0.4b[2]\n"
".inst 0x6f81e8b4 // udot v20.4s, v5.16b, v1.4b[2]\n"
".inst 0x6f82e8b8 // udot v24.4s, v5.16b, v2.4b[2]\n"
- "ldr q5, [x13, #0xf0]\n"
+ "ldr q5, [x10, #0xf0]\n"
".inst 0x6f80e8d1 // udot v17.4s, v6.16b, v0.4b[2]\n"
- "add x13, x13, #0x100\n"
+ "add x10, x10, #0x100\n"
".inst 0x6f81e8d5 // udot v21.4s, v6.16b, v1.4b[2]\n"
".inst 0x6f82e8d9 // udot v25.4s, v6.16b, v2.4b[2]\n"
".inst 0x6f80e8f2 // udot v18.4s, v7.16b, v0.4b[2]\n"
@@ -1056,32 +1092,32 @@ void a64_hybrid_u8qa_dot_4x16_a55 (
".inst 0x6e8f942c // udot v12.4s, v1.16b, v15.16b\n"
".inst 0x6e8f944d // udot v13.4s, v2.16b, v15.16b\n"
"70:" // Height 3: Multiply loop: unique 10: skip row sum
- "prfm pldl1keep, [x10, #0x80]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
"prfm pldl1keep, [x22, #0x80]\n"
+ "prfm pldl1keep, [x21, #0x80]\n"
"71:" // Height 3: Multiply loop: Main loop skip
- "cbz x11, 78f\n"
- "cmp x11, #0x4\n"
+ "cbz x26, 78f\n"
+ "cmp x26, #0x4\n"
"blt 74f\n"
"72:" // Height 3: Multiply loop: Odd block loop
- "ldr s0, [x10], #0x4\n"
- "ldr s1, [x23], #0x4\n"
- "ldr s2, [x22], #0x4\n"
+ "ldr s0, [x25], #0x4\n"
+ "ldr s1, [x22], #0x4\n"
+ "ldr s2, [x21], #0x4\n"
"tbnz %x[flags], #31, 73f\n"
".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n"
".inst 0x6e8f942c // udot v12.4s, v1.16b, v15.16b\n"
".inst 0x6e8f944d // udot v13.4s, v2.16b, v15.16b\n"
"73:" // Height 3: Multiply loop: unique 11: skip row sum
- "ldr q6, [x13, #0x0]\n"
- "sub x11, x11, #0x4\n"
- "ldr q7, [x13, #0x10]\n"
- "cmp x11, #0x4\n"
- "ldr q8, [x13, #0x20]\n"
+ "ldr q6, [x10, #0x0]\n"
+ "sub x26, x26, #0x4\n"
+ "ldr q7, [x10, #0x10]\n"
+ "cmp x26, #0x4\n"
+ "ldr q8, [x10, #0x20]\n"
".inst 0x6f80e0d0 // udot v16.4s, v6.16b, v0.4b[0]\n"
- "ldr q9, [x13, #0x30]\n"
+ "ldr q9, [x10, #0x30]\n"
".inst 0x6f81e0d4 // udot v20.4s, v6.16b, v1.4b[0]\n"
+ "add x10, x10, #0x40\n"
".inst 0x6f82e0d8 // udot v24.4s, v6.16b, v2.4b[0]\n"
- "add x13, x13, #0x40\n"
".inst 0x6f80e0f1 // udot v17.4s, v7.16b, v0.4b[0]\n"
".inst 0x6f81e0f5 // udot v21.4s, v7.16b, v1.4b[0]\n"
".inst 0x6f82e0f9 // udot v25.4s, v7.16b, v2.4b[0]\n"
@@ -1092,37 +1128,37 @@ void a64_hybrid_u8qa_dot_4x16_a55 (
".inst 0x6f81e137 // udot v23.4s, v9.16b, v1.4b[0]\n"
".inst 0x6f82e13b // udot v27.4s, v9.16b, v2.4b[0]\n"
"bge 72b\n"
+ "cbz x26, 78f\n"
"74:" // Height 3: Multiply loop: Skip odd blocks
- "cbz x11, 78f\n"
- "tbz x11, #1, 75f\n"
- "ldr h0, [x10], #0x2\n"
- "ldr h1, [x23], #0x2\n"
- "ldr h2, [x22], #0x2\n"
- "tbz x11, #0, 76f\n"
- "ld1 { v0.b }[2], [x10]\n"
- "ld1 { v1.b }[2], [x23]\n"
- "ld1 { v2.b }[2], [x22]\n"
+ "tbz x26, #1, 75f\n"
+ "ldr h0, [x25], #0x2\n"
+ "ldr h1, [x22], #0x2\n"
+ "ldr h2, [x21], #0x2\n"
+ "tbz x26, #0, 76f\n"
+ "ld1 { v0.b }[2], [x25]\n"
+ "ld1 { v1.b }[2], [x22]\n"
+ "ld1 { v2.b }[2], [x21]\n"
"b 76f\n"
"75:" // Height 3: Multiply loop: Ragged operand read: partial_1_0
- "ldr b0, [x10, #0x0]\n"
- "ldr b1, [x23, #0x0]\n"
- "ldr b2, [x22, #0x0]\n"
+ "ldr b0, [x25, #0x0]\n"
+ "ldr b1, [x22, #0x0]\n"
+ "ldr b2, [x21, #0x0]\n"
"76:" // Height 3: Multiply loop: Ragged operand read: Done
"tbnz %x[flags], #31, 77f\n"
".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n"
".inst 0x6e8f942c // udot v12.4s, v1.16b, v15.16b\n"
".inst 0x6e8f944d // udot v13.4s, v2.16b, v15.16b\n"
"77:" // Height 3: Multiply loop: unique 12: skip row sum
- "ldr q10, [x13, #0x0]\n"
+ "ldr q10, [x10, #0x0]\n"
+ "ldr q4, [x10, #0x10]\n"
+ "ldr q5, [x10, #0x20]\n"
".inst 0x6f80e150 // udot v16.4s, v10.16b, v0.4b[0]\n"
- "ldr q4, [x13, #0x10]\n"
+ "ldr q6, [x10, #0x30]\n"
".inst 0x6f81e154 // udot v20.4s, v10.16b, v1.4b[0]\n"
- "ldr q5, [x13, #0x20]\n"
+ "add x10, x10, #0x40\n"
".inst 0x6f82e158 // udot v24.4s, v10.16b, v2.4b[0]\n"
- "ldr q6, [x13, #0x30]\n"
".inst 0x6f80e091 // udot v17.4s, v4.16b, v0.4b[0]\n"
".inst 0x6f81e095 // udot v21.4s, v4.16b, v1.4b[0]\n"
- "add x13, x13, #0x40\n"
".inst 0x6f82e099 // udot v25.4s, v4.16b, v2.4b[0]\n"
".inst 0x6f80e0b2 // udot v18.4s, v5.16b, v0.4b[0]\n"
".inst 0x6f81e0b6 // udot v22.4s, v5.16b, v1.4b[0]\n"
@@ -1131,22 +1167,22 @@ void a64_hybrid_u8qa_dot_4x16_a55 (
".inst 0x6f81e0d7 // udot v23.4s, v6.16b, v1.4b[0]\n"
".inst 0x6f82e0db // udot v27.4s, v6.16b, v2.4b[0]\n"
"78:" // Height 3: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x12, x12, #0x1\n"
- "cmp x12, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 64b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x22, x14, x20\n"
- "add x21, x22, x20\n"
- "prfm pstl1keep, [x14, #0x0]\n"
- "prfm pstl1keep, [x22, #0x0]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "prfm pstl1keep, [x28, #0x0]\n"
+ "add x21, x28, x19\n"
"prfm pstl1keep, [x21, #0x0]\n"
+ "add x20, x21, x19\n"
+ "prfm pstl1keep, [x20, #0x0]\n"
"tbnz %x[flags], #31, 79f\n"
"addp v11.4s, v11.4s, v11.4s\n"
"addp v12.4s, v12.4s, v12.4s\n"
"addp v13.4s, v13.4s, v13.4s\n"
- "add x23, %x[qp], %[b_offset]\n"
- "ld1r { v3.4s }, [x23]\n"
+ "add x22, %x[qp], %[b_offset]\n"
+ "ld1r { v3.4s }, [x22]\n"
"neg v3.4s, v3.4s\n"
"addp v11.4s, v11.4s, v11.4s\n"
"addp v12.4s, v12.4s, v12.4s\n"
@@ -1155,13 +1191,9 @@ void a64_hybrid_u8qa_dot_4x16_a55 (
"mul v12.4s, v12.4s, v3.4s\n"
"mul v13.4s, v13.4s, v3.4s\n"
"79:" // Height 3: skip row sum fixup
- "ldr q0, [x16, #0x0]\n"
"add v16.4s, v16.4s, v11.4s\n"
- "ldr q1, [x16, #0x10]\n"
"add v17.4s, v17.4s, v11.4s\n"
- "ldr q2, [x16, #0x20]\n"
"add v18.4s, v18.4s, v11.4s\n"
- "ldr q3, [x16, #0x30]\n"
"add v19.4s, v19.4s, v11.4s\n"
"add v20.4s, v20.4s, v12.4s\n"
"add v21.4s, v21.4s, v12.4s\n"
@@ -1171,69 +1203,73 @@ void a64_hybrid_u8qa_dot_4x16_a55 (
"add v25.4s, v25.4s, v13.4s\n"
"add v26.4s, v26.4s, v13.4s\n"
"add v27.4s, v27.4s, v13.4s\n"
+ "ldr q0, [x9, #0x0]\n"
+ "orr %x[flags], %x[flags], #0x80000000\n"
+ "ldr q1, [x9, #0x10]\n"
+ "add x23, %x[qp], %[per_layer_right_shift]\n"
+ "ldr q2, [x9, #0x20]\n"
+ "add x22, %x[qp], %[per_layer_mul]\n"
"add v16.4s, v16.4s, v0.4s\n"
- "add v17.4s, v17.4s, v1.4s\n"
- "add v18.4s, v18.4s, v2.4s\n"
- "add v19.4s, v19.4s, v3.4s\n"
"add v20.4s, v20.4s, v0.4s\n"
+ "add v17.4s, v17.4s, v1.4s\n"
"add v21.4s, v21.4s, v1.4s\n"
+ "add v18.4s, v18.4s, v2.4s\n"
"add v22.4s, v22.4s, v2.4s\n"
- "add v23.4s, v23.4s, v3.4s\n"
"add v24.4s, v24.4s, v0.4s\n"
"add v25.4s, v25.4s, v1.4s\n"
"add v26.4s, v26.4s, v2.4s\n"
- "add v27.4s, v27.4s, v3.4s\n"
- "add x23, %x[qp], %[per_layer_mul]\n"
- "ld1r { v4.4s }, [x23]\n"
- "orr %x[flags], %x[flags], #0x80000000\n"
- "add x23, %x[qp], %[per_layer_right_shift]\n"
+ "ldr q3, [x9, #0x30]\n"
"ld1r { v0.4s }, [x23]\n"
+ "add x9, x9, #0x40\n"
+ "ld1r { v4.4s }, [x22]\n"
+ "add v19.4s, v19.4s, v3.4s\n"
+ "add v23.4s, v23.4s, v3.4s\n"
+ "add v27.4s, v27.4s, v3.4s\n"
"sqrdmulh v16.4s, v16.4s, v4.4s\n"
"sqrdmulh v17.4s, v17.4s, v4.4s\n"
"sqrdmulh v18.4s, v18.4s, v4.4s\n"
- "sqrdmulh v19.4s, v19.4s, v4.4s\n"
"sqrdmulh v20.4s, v20.4s, v4.4s\n"
"sqrdmulh v21.4s, v21.4s, v4.4s\n"
"sqrdmulh v22.4s, v22.4s, v4.4s\n"
+ "sqrdmulh v19.4s, v19.4s, v4.4s\n"
"sqrdmulh v23.4s, v23.4s, v4.4s\n"
"sqrdmulh v24.4s, v24.4s, v4.4s\n"
"sqrdmulh v25.4s, v25.4s, v4.4s\n"
"sqrdmulh v26.4s, v26.4s, v4.4s\n"
"sqrdmulh v27.4s, v27.4s, v4.4s\n"
- "add x16, x16, #0x40\n"
"tbz %x[flags], #5, 80f\n"
"and v4.16b, v16.16b, v0.16b\n"
"and v5.16b, v17.16b, v0.16b\n"
"and v6.16b, v18.16b, v0.16b\n"
"and v7.16b, v19.16b, v0.16b\n"
"and v8.16b, v20.16b, v0.16b\n"
+ "and v9.16b, v21.16b, v0.16b\n"
+ "and v10.16b, v22.16b, v0.16b\n"
"sshr v4.4s, v4.4s, #0x1f\n"
"sshr v5.4s, v5.4s, #0x1f\n"
"sshr v6.4s, v6.4s, #0x1f\n"
"sshr v7.4s, v7.4s, #0x1f\n"
"sshr v8.4s, v8.4s, #0x1f\n"
+ "sshr v9.4s, v9.4s, #0x1f\n"
+ "sshr v10.4s, v10.4s, #0x1f\n"
"sqadd v16.4s, v16.4s, v4.4s\n"
+ "and v4.16b, v23.16b, v0.16b\n"
"sqadd v17.4s, v17.4s, v5.4s\n"
"sqadd v18.4s, v18.4s, v6.4s\n"
"sqadd v19.4s, v19.4s, v7.4s\n"
"sqadd v20.4s, v20.4s, v8.4s\n"
- "and v9.16b, v21.16b, v0.16b\n"
- "and v10.16b, v22.16b, v0.16b\n"
- "and v4.16b, v23.16b, v0.16b\n"
+ "sqadd v21.4s, v21.4s, v9.4s\n"
+ "sqadd v22.4s, v22.4s, v10.4s\n"
"and v5.16b, v24.16b, v0.16b\n"
"and v6.16b, v25.16b, v0.16b\n"
+ "sshr v4.4s, v4.4s, #0x1f\n"
"and v7.16b, v26.16b, v0.16b\n"
"and v8.16b, v27.16b, v0.16b\n"
- "sshr v9.4s, v9.4s, #0x1f\n"
- "sshr v10.4s, v10.4s, #0x1f\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
"sshr v5.4s, v5.4s, #0x1f\n"
"sshr v6.4s, v6.4s, #0x1f\n"
+ "sqadd v23.4s, v23.4s, v4.4s\n"
"sshr v7.4s, v7.4s, #0x1f\n"
"sshr v8.4s, v8.4s, #0x1f\n"
- "sqadd v21.4s, v21.4s, v9.4s\n"
- "sqadd v22.4s, v22.4s, v10.4s\n"
- "sqadd v23.4s, v23.4s, v4.4s\n"
"sqadd v24.4s, v24.4s, v5.4s\n"
"sqadd v25.4s, v25.4s, v6.4s\n"
"sqadd v26.4s, v26.4s, v7.4s\n"
@@ -1251,8 +1287,13 @@ void a64_hybrid_u8qa_dot_4x16_a55 (
"srshl v25.4s, v25.4s, v0.4s\n"
"srshl v26.4s, v26.4s, v0.4s\n"
"srshl v27.4s, v27.4s, v0.4s\n"
- "add x23, %x[qp], %[c_offset]\n"
- "ld1r { v4.4s }, [x23]\n"
+ "add x22, %x[qp], %[c_offset]\n"
+ "add x23, %x[qp], %[minval]\n"
+ "ld1r { v4.4s }, [x22]\n"
+ "add x22, %x[qp], %[maxval]\n"
+ "ld1r { v5.4s }, [x23]\n"
+ "cmp x11, #0x10\n"
+ "ld1r { v6.4s }, [x22]\n"
"add v16.4s, v16.4s, v4.4s\n"
"add v17.4s, v17.4s, v4.4s\n"
"add v18.4s, v18.4s, v4.4s\n"
@@ -1263,10 +1304,6 @@ void a64_hybrid_u8qa_dot_4x16_a55 (
"add v23.4s, v23.4s, v4.4s\n"
"add v24.4s, v24.4s, v4.4s\n"
"add v25.4s, v25.4s, v4.4s\n"
- "add v26.4s, v26.4s, v4.4s\n"
- "add v27.4s, v27.4s, v4.4s\n"
- "add x23, %x[qp], %[maxval]\n"
- "ld1r { v6.4s }, [x23]\n"
"smin v16.4s, v16.4s, v6.4s\n"
"smin v17.4s, v17.4s, v6.4s\n"
"smin v18.4s, v18.4s, v6.4s\n"
@@ -1277,10 +1314,6 @@ void a64_hybrid_u8qa_dot_4x16_a55 (
"smin v23.4s, v23.4s, v6.4s\n"
"smin v24.4s, v24.4s, v6.4s\n"
"smin v25.4s, v25.4s, v6.4s\n"
- "smin v26.4s, v26.4s, v6.4s\n"
- "smin v27.4s, v27.4s, v6.4s\n"
- "add x23, %x[qp], %[minval]\n"
- "ld1r { v5.4s }, [x23]\n"
"smax v16.4s, v16.4s, v5.4s\n"
"smax v17.4s, v17.4s, v5.4s\n"
"smax v18.4s, v18.4s, v5.4s\n"
@@ -1291,117 +1324,120 @@ void a64_hybrid_u8qa_dot_4x16_a55 (
"smax v23.4s, v23.4s, v5.4s\n"
"smax v24.4s, v24.4s, v5.4s\n"
"smax v25.4s, v25.4s, v5.4s\n"
- "smax v26.4s, v26.4s, v5.4s\n"
- "smax v27.4s, v27.4s, v5.4s\n"
+ "add v26.4s, v26.4s, v4.4s\n"
+ "add v27.4s, v27.4s, v4.4s\n"
"uzp1 v16.8h, v16.8h, v17.8h\n"
"uzp1 v17.8h, v18.8h, v19.8h\n"
"uzp1 v20.8h, v20.8h, v21.8h\n"
"uzp1 v21.8h, v22.8h, v23.8h\n"
"uzp1 v24.8h, v24.8h, v25.8h\n"
- "uzp1 v25.8h, v26.8h, v27.8h\n"
- "cmp x15, #0x10\n"
+ "smin v26.4s, v26.4s, v6.4s\n"
+ "smin v27.4s, v27.4s, v6.4s\n"
"uzp1 v16.16b, v16.16b, v17.16b\n"
"uzp1 v20.16b, v20.16b, v21.16b\n"
+ "smax v26.4s, v26.4s, v5.4s\n"
+ "smax v27.4s, v27.4s, v5.4s\n"
+ "uzp1 v25.8h, v26.8h, v27.8h\n"
"uzp1 v24.16b, v24.16b, v25.16b\n"
"bge 89f\n"
- "tbz x15, #3, 84f\n"
- "str d16, [x14], #0x8\n"
- "str d20, [x22], #0x8\n"
- "str d24, [x21], #0x8\n"
- "tbz x15, #2, 82f\n"
- "st1 { v16.s }[2], [x14], #0x4\n"
- "st1 { v20.s }[2], [x22], #0x4\n"
- "st1 { v24.s }[2], [x21], #0x4\n"
- "tbz x15, #1, 81f\n"
- "st1 { v16.h }[6], [x14], #0x2\n"
- "st1 { v20.h }[6], [x22], #0x2\n"
- "st1 { v24.h }[6], [x21], #0x2\n"
- "tbz x15, #0, 88f\n"
- "st1 { v16.b }[14], [x14]\n"
- "st1 { v20.b }[14], [x22]\n"
- "st1 { v24.b }[14], [x21]\n"
+ "tbz x11, #3, 84f\n"
+ "str d16, [x28], #0x8\n"
+ "str d20, [x21], #0x8\n"
+ "str d24, [x20], #0x8\n"
+ "tbz x11, #2, 82f\n"
+ "st1 { v16.s }[2], [x28], #0x4\n"
+ "st1 { v20.s }[2], [x21], #0x4\n"
+ "st1 { v24.s }[2], [x20], #0x4\n"
+ "tbz x11, #1, 81f\n"
+ "st1 { v16.h }[6], [x28], #0x2\n"
+ "st1 { v20.h }[6], [x21], #0x2\n"
+ "st1 { v24.h }[6], [x20], #0x2\n"
+ "tbz x11, #0, 88f\n"
+ "st1 { v16.b }[14], [x28]\n"
+ "st1 { v20.b }[14], [x21]\n"
+ "st1 { v24.b }[14], [x20]\n"
"b 88f\n"
"81:" // Height 3: Partial direct writeback: partial_1_12
- "tbz x15, #0, 88f\n"
- "st1 { v16.b }[12], [x14]\n"
- "st1 { v20.b }[12], [x22]\n"
- "st1 { v24.b }[12], [x21]\n"
+ "tbz x11, #0, 88f\n"
+ "st1 { v16.b }[12], [x28]\n"
+ "st1 { v20.b }[12], [x21]\n"
+ "st1 { v24.b }[12], [x20]\n"
"b 88f\n"
"82:" // Height 3: Partial direct writeback: partial_2_8
- "tbz x15, #1, 83f\n"
- "st1 { v16.h }[4], [x14], #0x2\n"
- "st1 { v20.h }[4], [x22], #0x2\n"
- "st1 { v24.h }[4], [x21], #0x2\n"
- "tbz x15, #0, 88f\n"
- "st1 { v16.b }[10], [x14]\n"
- "st1 { v20.b }[10], [x22]\n"
- "st1 { v24.b }[10], [x21]\n"
+ "tbz x11, #1, 83f\n"
+ "st1 { v16.h }[4], [x28], #0x2\n"
+ "st1 { v20.h }[4], [x21], #0x2\n"
+ "st1 { v24.h }[4], [x20], #0x2\n"
+ "tbz x11, #0, 88f\n"
+ "st1 { v16.b }[10], [x28]\n"
+ "st1 { v20.b }[10], [x21]\n"
+ "st1 { v24.b }[10], [x20]\n"
"b 88f\n"
"83:" // Height 3: Partial direct writeback: partial_1_8
- "tbz x15, #0, 88f\n"
- "st1 { v16.b }[8], [x14]\n"
- "st1 { v20.b }[8], [x22]\n"
- "st1 { v24.b }[8], [x21]\n"
+ "tbz x11, #0, 88f\n"
+ "st1 { v16.b }[8], [x28]\n"
+ "st1 { v20.b }[8], [x21]\n"
+ "st1 { v24.b }[8], [x20]\n"
"b 88f\n"
"84:" // Height 3: Partial direct writeback: partial_4_0
- "tbz x15, #2, 86f\n"
- "str s16, [x14], #0x4\n"
- "str s20, [x22], #0x4\n"
- "str s24, [x21], #0x4\n"
- "tbz x15, #1, 85f\n"
- "st1 { v16.h }[2], [x14], #0x2\n"
- "st1 { v20.h }[2], [x22], #0x2\n"
- "st1 { v24.h }[2], [x21], #0x2\n"
- "tbz x15, #0, 88f\n"
- "st1 { v16.b }[6], [x14]\n"
- "st1 { v20.b }[6], [x22]\n"
- "st1 { v24.b }[6], [x21]\n"
+ "tbz x11, #2, 86f\n"
+ "str s16, [x28], #0x4\n"
+ "str s20, [x21], #0x4\n"
+ "str s24, [x20], #0x4\n"
+ "tbz x11, #1, 85f\n"
+ "st1 { v16.h }[2], [x28], #0x2\n"
+ "st1 { v20.h }[2], [x21], #0x2\n"
+ "st1 { v24.h }[2], [x20], #0x2\n"
+ "tbz x11, #0, 88f\n"
+ "st1 { v16.b }[6], [x28]\n"
+ "st1 { v20.b }[6], [x21]\n"
+ "st1 { v24.b }[6], [x20]\n"
"b 88f\n"
"85:" // Height 3: Partial direct writeback: partial_1_4
- "tbz x15, #0, 88f\n"
- "st1 { v16.b }[4], [x14]\n"
- "st1 { v20.b }[4], [x22]\n"
- "st1 { v24.b }[4], [x21]\n"
+ "tbz x11, #0, 88f\n"
+ "st1 { v16.b }[4], [x28]\n"
+ "st1 { v20.b }[4], [x21]\n"
+ "st1 { v24.b }[4], [x20]\n"
"b 88f\n"
"86:" // Height 3: Partial direct writeback: partial_2_0
- "tbz x15, #1, 87f\n"
- "str h16, [x14], #0x2\n"
- "str h20, [x22], #0x2\n"
- "str h24, [x21], #0x2\n"
- "tbz x15, #0, 88f\n"
- "st1 { v16.b }[2], [x14]\n"
- "st1 { v20.b }[2], [x22]\n"
- "st1 { v24.b }[2], [x21]\n"
+ "tbz x11, #1, 87f\n"
+ "str h16, [x28], #0x2\n"
+ "str h20, [x21], #0x2\n"
+ "str h24, [x20], #0x2\n"
+ "tbz x11, #0, 88f\n"
+ "st1 { v16.b }[2], [x28]\n"
+ "st1 { v20.b }[2], [x21]\n"
+ "st1 { v24.b }[2], [x20]\n"
"b 88f\n"
"87:" // Height 3: Partial direct writeback: partial_1_0
- "str b16, [x14, #0x0]\n"
- "str b20, [x22, #0x0]\n"
- "str b24, [x21, #0x0]\n"
+ "str b16, [x28, #0x0]\n"
+ "str b20, [x21, #0x0]\n"
+ "str b24, [x20, #0x0]\n"
"88:" // Height 3: Partial direct writeback: Done
"b 90f\n"
"89:" // Height 3: Full writeback
- "str q16, [x14, #0x0]\n"
- "add x14, x14, #0x10\n"
- "str q20, [x22, #0x0]\n"
- "str q24, [x21, #0x0]\n"
+ "str q16, [x28, #0x0]\n"
+ "add x28, x28, #0x10\n"
+ "str q20, [x21, #0x0]\n"
+ "str q24, [x20, #0x0]\n"
"90:" // Height 3: Writeback done
- "subs x15, x15, #0x10\n"
+ "subs x11, x11, #0x10\n"
"bgt 62b\n"
"b 122f\n"
"91:" // Height 4
- "ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "mov x20, #0x4\n"
- "mov x16, %x[col_bias]\n"
"movi v11.4s, #0x0\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"movi v12.4s, #0x0\n"
- "bic %x[flags], %x[flags], #0x80000000\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"movi v13.4s, #0x0\n"
- "ldr x15, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
"movi v14.4s, #0x0\n"
- "ldr x13, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x9, %x[col_bias]\n"
"movi v15.16b, #0x1\n"
- "mov x14, %x[output_ptr]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
+ "bic %x[flags], %x[flags], #0x80000000\n"
+ "mov x28, %x[output_ptr]\n"
+ "mov x19, #0x4\n"
+ "madd %x[output_ptr], x20, x19, %x[output_ptr]\n"
"92:" // Height 4: Column loop
"movi v16.4s, #0x0\n"
"movi v17.4s, #0x0\n"
@@ -1420,125 +1456,137 @@ void a64_hybrid_u8qa_dot_4x16_a55 (
"movi v30.4s, #0x0\n"
"movi v31.4s, #0x0\n"
"93:" // Height 4: setup done
- "mov x12, #0x0\n"
+ "mov x27, #0x0\n"
"94:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w11, [x20, x12, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 95f\n"
- "ldr x21, [%x[input_ptr], x12, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x10, [x21, #0x0]\n"
- "ldr x23, [x21, #0x8]\n"
- "ldr x22, [x21, #0x10]\n"
- "ldr x21, [x21, #0x18]\n"
- "cbnz x12, 96f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x10, x10, x20\n"
- "add x23, x23, x20\n"
- "add x22, x22, x20\n"
- "add x21, x21, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x22, [x20, #0x8]\n"
+ "ldr x21, [x20, #0x10]\n"
+ "ldr x20, [x20, #0x18]\n"
+ "cbnz x27, 96f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
+ "add x22, x22, x19\n"
+ "add x21, x21, x19\n"
+ "add x20, x20, x19\n"
"b 96f\n"
"95:" // Height 4: setup direct input
- "mov x10, %x[input_ptr]\n"
- "add x23, x10, x20\n"
- "add x22, x23, x20\n"
- "add x21, x22, x20\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x22, x25, x19\n"
+ "add x21, x22, x19\n"
+ "add x20, x21, x19\n"
"96:" // Height 4: input setup done
- "cmp x11, #0x10\n"
+ "cmp x26, #0x10\n"
"blt 101f\n"
- "ldr q0, [x10, #0x0]\n"
- "cmp x11, #0x20\n"
- "ldr q1, [x23, #0x0]\n"
- "ldr q2, [x22, #0x0]\n"
- "ldr q3, [x21, #0x0]\n"
- "ldr q4, [x13, #0x0]\n"
- "ldr q5, [x13, #0x10]\n"
- "ldr q6, [x13, #0x20]\n"
- "ldr q7, [x13, #0x30]\n"
- "ldr q8, [x13, #0x40]\n"
- "ldr q9, [x13, #0x50]\n"
- "ldr q10, [x13, #0x60]\n"
+ "ldr q0, [x25, #0x0]\n"
+ "ldr q1, [x22, #0x0]\n"
+ "cmp x26, #0x20\n"
+ "ldr q2, [x21, #0x0]\n"
+ "ldr q3, [x20, #0x0]\n"
+ "ldr q4, [x10, #0x0]\n"
"blt 99f\n"
"97:" // Height 4: Multiply loop: Main loop head
".inst 0x6f80e090 // udot v16.4s, v4.16b, v0.4b[0]\n"
- "ldr x9, [x13, #0x78]\n"
+ "ldr d5, [x10, #0x10]\n"
".inst 0x6f81e094 // udot v20.4s, v4.16b, v1.4b[0]\n"
- "ldr x28, [x13, #0x88]\n"
+ "ldr x24, [x10, #0x18]\n"
".inst 0x6f82e098 // udot v24.4s, v4.16b, v2.4b[0]\n"
- "ldr x27, [x13, #0x98]\n"
+ "ldr d6, [x10, #0x20]\n"
".inst 0x6f83e09c // udot v28.4s, v4.16b, v3.4b[0]\n"
- "ldr d4, [x13, #0x70]\n"
+ "ldr x23, [x10, #0x28]\n"
+ "mov v5.d[1], x24\n"
+ "ldr d7, [x10, #0x30]\n"
+ "ldr x19, [x10, #0x38]\n"
+ "add x25, x25, #0x10\n"
".inst 0x6f80e0b1 // udot v17.4s, v5.16b, v0.4b[0]\n"
- "mov v4.d[1], x9\n"
+ "mov v6.d[1], x23\n"
".inst 0x6f81e0b5 // udot v21.4s, v5.16b, v1.4b[0]\n"
- "ldr x26, [x13, #0xa8]\n"
+ "ldr d8, [x10, #0x40]\n"
".inst 0x6f82e0b9 // udot v25.4s, v5.16b, v2.4b[0]\n"
- "ldr x25, [x13, #0xb8]\n"
+ "mov v7.d[1], x19\n"
".inst 0x6f83e0bd // udot v29.4s, v5.16b, v3.4b[0]\n"
- "ldr d5, [x13, #0x80]\n"
+ "ldr x23, [x10, #0x48]\n"
".inst 0x6f80e0d2 // udot v18.4s, v6.16b, v0.4b[0]\n"
- "mov v5.d[1], x28\n"
+ "ldr d9, [x10, #0x50]\n"
".inst 0x6f81e0d6 // udot v22.4s, v6.16b, v1.4b[0]\n"
- "ldr x24, [x13, #0xc8]\n"
+ "ldr x19, [x10, #0x58]\n"
".inst 0x6f82e0da // udot v26.4s, v6.16b, v2.4b[0]\n"
- "ldr x20, [x13, #0xd8]\n"
+ "mov v8.d[1], x23\n"
".inst 0x6f83e0de // udot v30.4s, v6.16b, v3.4b[0]\n"
- "ldr d6, [x13, #0x90]\n"
+ "ldr d10, [x10, #0x60]\n"
".inst 0x6f80e0f3 // udot v19.4s, v7.16b, v0.4b[0]\n"
- "mov v6.d[1], x27\n"
+ "mov v9.d[1], x19\n"
".inst 0x6f81e0f7 // udot v23.4s, v7.16b, v1.4b[0]\n"
- "ldr x9, [x13, #0xe8]\n"
+ "ldr x23, [x10, #0x68]\n"
".inst 0x6f82e0fb // udot v27.4s, v7.16b, v2.4b[0]\n"
- "ldr x28, [x13, #0xf8]\n"
+ "ldr d4, [x10, #0x70]\n"
".inst 0x6f83e0ff // udot v31.4s, v7.16b, v3.4b[0]\n"
- "ldr d7, [x13, #0xa0]\n"
+ "ldr x19, [x10, #0x78]\n"
".inst 0x6fa0e110 // udot v16.4s, v8.16b, v0.4b[1]\n"
- "mov v7.d[1], x26\n"
+ "mov v10.d[1], x23\n"
".inst 0x6fa1e114 // udot v20.4s, v8.16b, v1.4b[1]\n"
- "add x10, x10, #0x10\n"
+ "ldr d5, [x10, #0x80]\n"
".inst 0x6fa2e118 // udot v24.4s, v8.16b, v2.4b[1]\n"
- "add x23, x23, #0x10\n"
+ "mov v4.d[1], x19\n"
".inst 0x6fa3e11c // udot v28.4s, v8.16b, v3.4b[1]\n"
- "ldr d8, [x13, #0xb0]\n"
+ "ldr x24, [x10, #0x88]\n"
".inst 0x6fa0e131 // udot v17.4s, v9.16b, v0.4b[1]\n"
- "mov v8.d[1], x25\n"
+ "ldr d6, [x10, #0x90]\n"
".inst 0x6fa1e135 // udot v21.4s, v9.16b, v1.4b[1]\n"
- "add x22, x22, #0x10\n"
+ "ldr x23, [x10, #0x98]\n"
".inst 0x6fa2e139 // udot v25.4s, v9.16b, v2.4b[1]\n"
- "add x21, x21, #0x10\n"
+ "mov v5.d[1], x24\n"
".inst 0x6fa3e13d // udot v29.4s, v9.16b, v3.4b[1]\n"
- "ldr d9, [x13, #0xc0]\n"
+ "ldr d7, [x10, #0xa0]\n"
".inst 0x6fa0e152 // udot v18.4s, v10.16b, v0.4b[1]\n"
- "mov v9.d[1], x24\n"
+ "mov v6.d[1], x23\n"
".inst 0x6fa1e156 // udot v22.4s, v10.16b, v1.4b[1]\n"
+ "ldr x19, [x10, #0xa8]\n"
".inst 0x6fa2e15a // udot v26.4s, v10.16b, v2.4b[1]\n"
+ "ldr d8, [x10, #0xb0]\n"
".inst 0x6fa3e15e // udot v30.4s, v10.16b, v3.4b[1]\n"
- "ldr d10, [x13, #0xd0]\n"
+ "ldr x23, [x10, #0xb8]\n"
".inst 0x6fa0e093 // udot v19.4s, v4.16b, v0.4b[1]\n"
- "mov v10.d[1], x20\n"
+ "mov v7.d[1], x19\n"
".inst 0x6fa1e097 // udot v23.4s, v4.16b, v1.4b[1]\n"
+ "ldr d9, [x10, #0xc0]\n"
".inst 0x6fa2e09b // udot v27.4s, v4.16b, v2.4b[1]\n"
+ "mov v8.d[1], x23\n"
".inst 0x6fa3e09f // udot v31.4s, v4.16b, v3.4b[1]\n"
- "ldr d4, [x13, #0xe0]\n"
+ "ldr x19, [x10, #0xc8]\n"
".inst 0x6f80e8b0 // udot v16.4s, v5.16b, v0.4b[2]\n"
- "mov v4.d[1], x9\n"
+ "ldr d10, [x10, #0xd0]\n"
".inst 0x6f81e8b4 // udot v20.4s, v5.16b, v1.4b[2]\n"
+ "ldr x23, [x10, #0xd8]\n"
".inst 0x6f82e8b8 // udot v24.4s, v5.16b, v2.4b[2]\n"
+ "mov v9.d[1], x19\n"
".inst 0x6f83e8bc // udot v28.4s, v5.16b, v3.4b[2]\n"
- "ldr d5, [x13, #0xf0]\n"
+ "ldr d4, [x10, #0xe0]\n"
".inst 0x6f80e8d1 // udot v17.4s, v6.16b, v0.4b[2]\n"
- "mov v5.d[1], x28\n"
+ "mov v10.d[1], x23\n"
".inst 0x6f81e8d5 // udot v21.4s, v6.16b, v1.4b[2]\n"
- "add x13, x13, #0x100\n"
+ "ldr x19, [x10, #0xe8]\n"
".inst 0x6f82e8d9 // udot v25.4s, v6.16b, v2.4b[2]\n"
+ "ldr d5, [x10, #0xf0]\n"
".inst 0x6f83e8dd // udot v29.4s, v6.16b, v3.4b[2]\n"
+ "ldr x24, [x10, #0xf8]\n"
".inst 0x6f80e8f2 // udot v18.4s, v7.16b, v0.4b[2]\n"
+ "mov v4.d[1], x19\n"
".inst 0x6f81e8f6 // udot v22.4s, v7.16b, v1.4b[2]\n"
+ "add x22, x22, #0x10\n"
".inst 0x6f82e8fa // udot v26.4s, v7.16b, v2.4b[2]\n"
+ "mov v5.d[1], x24\n"
".inst 0x6f83e8fe // udot v30.4s, v7.16b, v3.4b[2]\n"
+ "add x21, x21, #0x10\n"
".inst 0x6f80e913 // udot v19.4s, v8.16b, v0.4b[2]\n"
+ "add x20, x20, #0x10\n"
".inst 0x6f81e917 // udot v23.4s, v8.16b, v1.4b[2]\n"
+ "add x10, x10, #0x100\n"
".inst 0x6f82e91b // udot v27.4s, v8.16b, v2.4b[2]\n"
".inst 0x6f83e91f // udot v31.4s, v8.16b, v3.4b[2]\n"
".inst 0x6fa0e930 // udot v16.4s, v9.16b, v0.4b[3]\n"
@@ -1563,77 +1611,77 @@ void a64_hybrid_u8qa_dot_4x16_a55 (
".inst 0x6e8f944d // udot v13.4s, v2.16b, v15.16b\n"
".inst 0x6e8f946e // udot v14.4s, v3.16b, v15.16b\n"
"98:" // Height 4: Multiply loop: unique 13: skip row sum
- "ldr q0, [x10, #0x0]\n"
- "sub x11, x11, #0x10\n"
- "ldr q1, [x23, #0x0]\n"
- "cmp x11, #0x20\n"
- "ldr q2, [x22, #0x0]\n"
- "ldr q3, [x21, #0x0]\n"
- "ldr q4, [x13, #0x0]\n"
- "ldr q5, [x13, #0x10]\n"
- "ldr q6, [x13, #0x20]\n"
- "ldr q7, [x13, #0x30]\n"
- "ldr q8, [x13, #0x40]\n"
- "ldr q9, [x13, #0x50]\n"
- "ldr q10, [x13, #0x60]\n"
- "prfm pldl1keep, [x10, #0x80]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "sub x26, x26, #0x10\n"
"prfm pldl1keep, [x22, #0x80]\n"
+ "cmp x26, #0x20\n"
"prfm pldl1keep, [x21, #0x80]\n"
+ "prfm pldl1keep, [x20, #0x80]\n"
+ "ldr q0, [x25, #0x0]\n"
+ "ldr q1, [x22, #0x0]\n"
+ "ldr q2, [x21, #0x0]\n"
+ "ldr q3, [x20, #0x0]\n"
+ "ldr q4, [x10, #0x0]\n"
"bge 97b\n"
"99:" // Height 4: Multiply loop: Single iteration only
".inst 0x6f80e090 // udot v16.4s, v4.16b, v0.4b[0]\n"
- "sub x11, x11, #0x10\n"
+ "ldr q5, [x10, #0x10]\n"
".inst 0x6f81e094 // udot v20.4s, v4.16b, v1.4b[0]\n"
- "add x10, x10, #0x10\n"
+ "ldr q6, [x10, #0x20]\n"
".inst 0x6f82e098 // udot v24.4s, v4.16b, v2.4b[0]\n"
- "add x23, x23, #0x10\n"
+ "ldr q7, [x10, #0x30]\n"
".inst 0x6f83e09c // udot v28.4s, v4.16b, v3.4b[0]\n"
- "ldr q4, [x13, #0x70]\n"
+ "ldr q8, [x10, #0x40]\n"
".inst 0x6f80e0b1 // udot v17.4s, v5.16b, v0.4b[0]\n"
- "add x22, x22, #0x10\n"
+ "ldr q9, [x10, #0x50]\n"
".inst 0x6f81e0b5 // udot v21.4s, v5.16b, v1.4b[0]\n"
- "add x21, x21, #0x10\n"
+ "ldr q10, [x10, #0x60]\n"
".inst 0x6f82e0b9 // udot v25.4s, v5.16b, v2.4b[0]\n"
+ "ldr q4, [x10, #0x70]\n"
".inst 0x6f83e0bd // udot v29.4s, v5.16b, v3.4b[0]\n"
- "ldr q5, [x13, #0x80]\n"
+ "ldr q5, [x10, #0x80]\n"
".inst 0x6f80e0d2 // udot v18.4s, v6.16b, v0.4b[0]\n"
+ "sub x26, x26, #0x10\n"
".inst 0x6f81e0d6 // udot v22.4s, v6.16b, v1.4b[0]\n"
+ "add x25, x25, #0x10\n"
".inst 0x6f82e0da // udot v26.4s, v6.16b, v2.4b[0]\n"
+ "add x22, x22, #0x10\n"
".inst 0x6f83e0de // udot v30.4s, v6.16b, v3.4b[0]\n"
- "ldr q6, [x13, #0x90]\n"
+ "ldr q6, [x10, #0x90]\n"
".inst 0x6f80e0f3 // udot v19.4s, v7.16b, v0.4b[0]\n"
+ "add x21, x21, #0x10\n"
".inst 0x6f81e0f7 // udot v23.4s, v7.16b, v1.4b[0]\n"
+ "add x20, x20, #0x10\n"
".inst 0x6f82e0fb // udot v27.4s, v7.16b, v2.4b[0]\n"
".inst 0x6f83e0ff // udot v31.4s, v7.16b, v3.4b[0]\n"
- "ldr q7, [x13, #0xa0]\n"
+ "ldr q7, [x10, #0xa0]\n"
".inst 0x6fa0e110 // udot v16.4s, v8.16b, v0.4b[1]\n"
".inst 0x6fa1e114 // udot v20.4s, v8.16b, v1.4b[1]\n"
".inst 0x6fa2e118 // udot v24.4s, v8.16b, v2.4b[1]\n"
".inst 0x6fa3e11c // udot v28.4s, v8.16b, v3.4b[1]\n"
- "ldr q8, [x13, #0xb0]\n"
+ "ldr q8, [x10, #0xb0]\n"
".inst 0x6fa0e131 // udot v17.4s, v9.16b, v0.4b[1]\n"
".inst 0x6fa1e135 // udot v21.4s, v9.16b, v1.4b[1]\n"
".inst 0x6fa2e139 // udot v25.4s, v9.16b, v2.4b[1]\n"
".inst 0x6fa3e13d // udot v29.4s, v9.16b, v3.4b[1]\n"
- "ldr q9, [x13, #0xc0]\n"
+ "ldr q9, [x10, #0xc0]\n"
".inst 0x6fa0e152 // udot v18.4s, v10.16b, v0.4b[1]\n"
".inst 0x6fa1e156 // udot v22.4s, v10.16b, v1.4b[1]\n"
".inst 0x6fa2e15a // udot v26.4s, v10.16b, v2.4b[1]\n"
".inst 0x6fa3e15e // udot v30.4s, v10.16b, v3.4b[1]\n"
- "ldr q10, [x13, #0xd0]\n"
+ "ldr q10, [x10, #0xd0]\n"
".inst 0x6fa0e093 // udot v19.4s, v4.16b, v0.4b[1]\n"
".inst 0x6fa1e097 // udot v23.4s, v4.16b, v1.4b[1]\n"
".inst 0x6fa2e09b // udot v27.4s, v4.16b, v2.4b[1]\n"
".inst 0x6fa3e09f // udot v31.4s, v4.16b, v3.4b[1]\n"
- "ldr q4, [x13, #0xe0]\n"
+ "ldr q4, [x10, #0xe0]\n"
".inst 0x6f80e8b0 // udot v16.4s, v5.16b, v0.4b[2]\n"
".inst 0x6f81e8b4 // udot v20.4s, v5.16b, v1.4b[2]\n"
".inst 0x6f82e8b8 // udot v24.4s, v5.16b, v2.4b[2]\n"
".inst 0x6f83e8bc // udot v28.4s, v5.16b, v3.4b[2]\n"
- "ldr q5, [x13, #0xf0]\n"
+ "ldr q5, [x10, #0xf0]\n"
".inst 0x6f80e8d1 // udot v17.4s, v6.16b, v0.4b[2]\n"
- "add x13, x13, #0x100\n"
+ "add x10, x10, #0x100\n"
".inst 0x6f81e8d5 // udot v21.4s, v6.16b, v1.4b[2]\n"
".inst 0x6f82e8d9 // udot v25.4s, v6.16b, v2.4b[2]\n"
".inst 0x6f83e8dd // udot v29.4s, v6.16b, v3.4b[2]\n"
@@ -1667,35 +1715,35 @@ void a64_hybrid_u8qa_dot_4x16_a55 (
".inst 0x6e8f944d // udot v13.4s, v2.16b, v15.16b\n"
".inst 0x6e8f946e // udot v14.4s, v3.16b, v15.16b\n"
"100:" // Height 4: Multiply loop: unique 14: skip row sum
- "prfm pldl1keep, [x10, #0x80]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
"prfm pldl1keep, [x22, #0x80]\n"
"prfm pldl1keep, [x21, #0x80]\n"
+ "prfm pldl1keep, [x20, #0x80]\n"
"101:" // Height 4: Multiply loop: Main loop skip
- "cbz x11, 108f\n"
- "cmp x11, #0x4\n"
+ "cbz x26, 108f\n"
+ "cmp x26, #0x4\n"
"blt 104f\n"
"102:" // Height 4: Multiply loop: Odd block loop
- "ldr s0, [x10], #0x4\n"
- "ldr s1, [x23], #0x4\n"
- "ldr s2, [x22], #0x4\n"
- "ldr s3, [x21], #0x4\n"
+ "ldr s0, [x25], #0x4\n"
+ "ldr s1, [x22], #0x4\n"
+ "ldr s2, [x21], #0x4\n"
+ "ldr s3, [x20], #0x4\n"
"tbnz %x[flags], #31, 103f\n"
".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n"
".inst 0x6e8f942c // udot v12.4s, v1.16b, v15.16b\n"
".inst 0x6e8f944d // udot v13.4s, v2.16b, v15.16b\n"
".inst 0x6e8f946e // udot v14.4s, v3.16b, v15.16b\n"
"103:" // Height 4: Multiply loop: unique 15: skip row sum
- "ldr q6, [x13, #0x0]\n"
- "sub x11, x11, #0x4\n"
- "ldr q7, [x13, #0x10]\n"
- "cmp x11, #0x4\n"
- "ldr q8, [x13, #0x20]\n"
+ "ldr q6, [x10, #0x0]\n"
+ "sub x26, x26, #0x4\n"
+ "ldr q7, [x10, #0x10]\n"
+ "cmp x26, #0x4\n"
+ "ldr q8, [x10, #0x20]\n"
".inst 0x6f80e0d0 // udot v16.4s, v6.16b, v0.4b[0]\n"
- "ldr q9, [x13, #0x30]\n"
+ "ldr q9, [x10, #0x30]\n"
".inst 0x6f81e0d4 // udot v20.4s, v6.16b, v1.4b[0]\n"
+ "add x10, x10, #0x40\n"
".inst 0x6f82e0d8 // udot v24.4s, v6.16b, v2.4b[0]\n"
- "add x13, x13, #0x40\n"
".inst 0x6f83e0dc // udot v28.4s, v6.16b, v3.4b[0]\n"
".inst 0x6f80e0f1 // udot v17.4s, v7.16b, v0.4b[0]\n"
".inst 0x6f81e0f5 // udot v21.4s, v7.16b, v1.4b[0]\n"
@@ -1710,24 +1758,24 @@ void a64_hybrid_u8qa_dot_4x16_a55 (
".inst 0x6f82e13b // udot v27.4s, v9.16b, v2.4b[0]\n"
".inst 0x6f83e13f // udot v31.4s, v9.16b, v3.4b[0]\n"
"bge 102b\n"
+ "cbz x26, 108f\n"
"104:" // Height 4: Multiply loop: Skip odd blocks
- "cbz x11, 108f\n"
- "tbz x11, #1, 105f\n"
- "ldr h0, [x10], #0x2\n"
- "ldr h1, [x23], #0x2\n"
- "ldr h2, [x22], #0x2\n"
- "ldr h3, [x21], #0x2\n"
- "tbz x11, #0, 106f\n"
- "ld1 { v0.b }[2], [x10]\n"
- "ld1 { v1.b }[2], [x23]\n"
- "ld1 { v2.b }[2], [x22]\n"
- "ld1 { v3.b }[2], [x21]\n"
+ "tbz x26, #1, 105f\n"
+ "ldr h0, [x25], #0x2\n"
+ "ldr h1, [x22], #0x2\n"
+ "ldr h2, [x21], #0x2\n"
+ "ldr h3, [x20], #0x2\n"
+ "tbz x26, #0, 106f\n"
+ "ld1 { v0.b }[2], [x25]\n"
+ "ld1 { v1.b }[2], [x22]\n"
+ "ld1 { v2.b }[2], [x21]\n"
+ "ld1 { v3.b }[2], [x20]\n"
"b 106f\n"
"105:" // Height 4: Multiply loop: Ragged operand read: partial_1_0
- "ldr b0, [x10, #0x0]\n"
- "ldr b1, [x23, #0x0]\n"
- "ldr b2, [x22, #0x0]\n"
- "ldr b3, [x21, #0x0]\n"
+ "ldr b0, [x25, #0x0]\n"
+ "ldr b1, [x22, #0x0]\n"
+ "ldr b2, [x21, #0x0]\n"
+ "ldr b3, [x20, #0x0]\n"
"106:" // Height 4: Multiply loop: Ragged operand read: Done
"tbnz %x[flags], #31, 107f\n"
".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n"
@@ -1735,16 +1783,16 @@ void a64_hybrid_u8qa_dot_4x16_a55 (
".inst 0x6e8f944d // udot v13.4s, v2.16b, v15.16b\n"
".inst 0x6e8f946e // udot v14.4s, v3.16b, v15.16b\n"
"107:" // Height 4: Multiply loop: unique 16: skip row sum
- "ldr q10, [x13, #0x0]\n"
+ "ldr q10, [x10, #0x0]\n"
+ "ldr q4, [x10, #0x10]\n"
+ "ldr q5, [x10, #0x20]\n"
".inst 0x6f80e150 // udot v16.4s, v10.16b, v0.4b[0]\n"
- "ldr q4, [x13, #0x10]\n"
+ "ldr q6, [x10, #0x30]\n"
".inst 0x6f81e154 // udot v20.4s, v10.16b, v1.4b[0]\n"
- "ldr q5, [x13, #0x20]\n"
+ "add x10, x10, #0x40\n"
".inst 0x6f82e158 // udot v24.4s, v10.16b, v2.4b[0]\n"
- "ldr q6, [x13, #0x30]\n"
".inst 0x6f83e15c // udot v28.4s, v10.16b, v3.4b[0]\n"
".inst 0x6f80e091 // udot v17.4s, v4.16b, v0.4b[0]\n"
- "add x13, x13, #0x40\n"
".inst 0x6f81e095 // udot v21.4s, v4.16b, v1.4b[0]\n"
".inst 0x6f82e099 // udot v25.4s, v4.16b, v2.4b[0]\n"
".inst 0x6f83e09d // udot v29.4s, v4.16b, v3.4b[0]\n"
@@ -1757,28 +1805,28 @@ void a64_hybrid_u8qa_dot_4x16_a55 (
".inst 0x6f82e0db // udot v27.4s, v6.16b, v2.4b[0]\n"
".inst 0x6f83e0df // udot v31.4s, v6.16b, v3.4b[0]\n"
"108:" // Height 4: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x12, x12, #0x1\n"
- "cmp x12, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 94b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x22, x14, x20\n"
- "add x21, x22, x20\n"
- "add x20, x21, x20\n"
- "prfm pstl1keep, [x14, #0x0]\n"
- "prfm pstl1keep, [x22, #0x0]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "prfm pstl1keep, [x28, #0x0]\n"
+ "add x21, x28, x19\n"
"prfm pstl1keep, [x21, #0x0]\n"
+ "add x20, x21, x19\n"
"prfm pstl1keep, [x20, #0x0]\n"
+ "add x19, x20, x19\n"
+ "prfm pstl1keep, [x19, #0x0]\n"
"tbnz %x[flags], #31, 109f\n"
"addp v11.4s, v11.4s, v11.4s\n"
"addp v12.4s, v12.4s, v12.4s\n"
"addp v13.4s, v13.4s, v13.4s\n"
"addp v14.4s, v14.4s, v14.4s\n"
- "add x23, %x[qp], %[b_offset]\n"
- "ld1r { v4.4s }, [x23]\n"
- "neg v4.4s, v4.4s\n"
+ "add x22, %x[qp], %[b_offset]\n"
+ "ld1r { v4.4s }, [x22]\n"
"addp v11.4s, v11.4s, v11.4s\n"
"addp v12.4s, v12.4s, v12.4s\n"
+ "neg v4.4s, v4.4s\n"
"addp v13.4s, v13.4s, v13.4s\n"
"addp v14.4s, v14.4s, v14.4s\n"
"mul v11.4s, v11.4s, v4.4s\n"
@@ -1786,13 +1834,9 @@ void a64_hybrid_u8qa_dot_4x16_a55 (
"mul v13.4s, v13.4s, v4.4s\n"
"mul v14.4s, v14.4s, v4.4s\n"
"109:" // Height 4: skip row sum fixup
- "ldr q0, [x16, #0x0]\n"
"add v16.4s, v16.4s, v11.4s\n"
- "ldr q1, [x16, #0x10]\n"
"add v17.4s, v17.4s, v11.4s\n"
- "ldr q2, [x16, #0x20]\n"
"add v18.4s, v18.4s, v11.4s\n"
- "ldr q3, [x16, #0x30]\n"
"add v19.4s, v19.4s, v11.4s\n"
"add v20.4s, v20.4s, v12.4s\n"
"add v21.4s, v21.4s, v12.4s\n"
@@ -1806,34 +1850,39 @@ void a64_hybrid_u8qa_dot_4x16_a55 (
"add v29.4s, v29.4s, v14.4s\n"
"add v30.4s, v30.4s, v14.4s\n"
"add v31.4s, v31.4s, v14.4s\n"
+ "ldr q0, [x9, #0x0]\n"
+ "orr %x[flags], %x[flags], #0x80000000\n"
+ "ldr q1, [x9, #0x10]\n"
+ "add x23, %x[qp], %[per_layer_right_shift]\n"
+ "ldr q2, [x9, #0x20]\n"
+ "add x22, %x[qp], %[per_layer_mul]\n"
"add v16.4s, v16.4s, v0.4s\n"
- "add v17.4s, v17.4s, v1.4s\n"
- "add v18.4s, v18.4s, v2.4s\n"
- "add v19.4s, v19.4s, v3.4s\n"
"add v20.4s, v20.4s, v0.4s\n"
+ "add v17.4s, v17.4s, v1.4s\n"
"add v21.4s, v21.4s, v1.4s\n"
+ "add v18.4s, v18.4s, v2.4s\n"
"add v22.4s, v22.4s, v2.4s\n"
- "add v23.4s, v23.4s, v3.4s\n"
"add v24.4s, v24.4s, v0.4s\n"
"add v25.4s, v25.4s, v1.4s\n"
"add v26.4s, v26.4s, v2.4s\n"
- "add v27.4s, v27.4s, v3.4s\n"
"add v28.4s, v28.4s, v0.4s\n"
"add v29.4s, v29.4s, v1.4s\n"
"add v30.4s, v30.4s, v2.4s\n"
- "add v31.4s, v31.4s, v3.4s\n"
- "add x23, %x[qp], %[per_layer_mul]\n"
- "ld1r { v4.4s }, [x23]\n"
- "orr %x[flags], %x[flags], #0x80000000\n"
- "add x23, %x[qp], %[per_layer_right_shift]\n"
+ "ldr q3, [x9, #0x30]\n"
+ "add x9, x9, #0x40\n"
"ld1r { v0.4s }, [x23]\n"
+ "ld1r { v4.4s }, [x22]\n"
+ "add v19.4s, v19.4s, v3.4s\n"
+ "add v23.4s, v23.4s, v3.4s\n"
+ "add v27.4s, v27.4s, v3.4s\n"
+ "add v31.4s, v31.4s, v3.4s\n"
"sqrdmulh v16.4s, v16.4s, v4.4s\n"
"sqrdmulh v17.4s, v17.4s, v4.4s\n"
"sqrdmulh v18.4s, v18.4s, v4.4s\n"
- "sqrdmulh v19.4s, v19.4s, v4.4s\n"
"sqrdmulh v20.4s, v20.4s, v4.4s\n"
"sqrdmulh v21.4s, v21.4s, v4.4s\n"
"sqrdmulh v22.4s, v22.4s, v4.4s\n"
+ "sqrdmulh v19.4s, v19.4s, v4.4s\n"
"sqrdmulh v23.4s, v23.4s, v4.4s\n"
"sqrdmulh v24.4s, v24.4s, v4.4s\n"
"sqrdmulh v25.4s, v25.4s, v4.4s\n"
@@ -1843,54 +1892,53 @@ void a64_hybrid_u8qa_dot_4x16_a55 (
"sqrdmulh v29.4s, v29.4s, v4.4s\n"
"sqrdmulh v30.4s, v30.4s, v4.4s\n"
"sqrdmulh v31.4s, v31.4s, v4.4s\n"
- "add x16, x16, #0x40\n"
"tbz %x[flags], #5, 110f\n"
"and v4.16b, v16.16b, v0.16b\n"
"and v5.16b, v17.16b, v0.16b\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
- "sshr v5.4s, v5.4s, #0x1f\n"
- "sqadd v16.4s, v16.4s, v4.4s\n"
- "sqadd v17.4s, v17.4s, v5.4s\n"
"and v6.16b, v18.16b, v0.16b\n"
"and v7.16b, v19.16b, v0.16b\n"
"and v8.16b, v20.16b, v0.16b\n"
"and v9.16b, v21.16b, v0.16b\n"
"and v10.16b, v22.16b, v0.16b\n"
- "and v4.16b, v23.16b, v0.16b\n"
- "and v5.16b, v24.16b, v0.16b\n"
+ "sshr v4.4s, v4.4s, #0x1f\n"
+ "sshr v5.4s, v5.4s, #0x1f\n"
"sshr v6.4s, v6.4s, #0x1f\n"
"sshr v7.4s, v7.4s, #0x1f\n"
"sshr v8.4s, v8.4s, #0x1f\n"
"sshr v9.4s, v9.4s, #0x1f\n"
"sshr v10.4s, v10.4s, #0x1f\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
- "sshr v5.4s, v5.4s, #0x1f\n"
+ "sqadd v16.4s, v16.4s, v4.4s\n"
+ "and v4.16b, v23.16b, v0.16b\n"
+ "sqadd v17.4s, v17.4s, v5.4s\n"
"sqadd v18.4s, v18.4s, v6.4s\n"
"sqadd v19.4s, v19.4s, v7.4s\n"
"sqadd v20.4s, v20.4s, v8.4s\n"
"sqadd v21.4s, v21.4s, v9.4s\n"
"sqadd v22.4s, v22.4s, v10.4s\n"
- "sqadd v23.4s, v23.4s, v4.4s\n"
- "sqadd v24.4s, v24.4s, v5.4s\n"
+ "and v5.16b, v24.16b, v0.16b\n"
"and v6.16b, v25.16b, v0.16b\n"
+ "sshr v4.4s, v4.4s, #0x1f\n"
"and v7.16b, v26.16b, v0.16b\n"
"and v8.16b, v27.16b, v0.16b\n"
"and v9.16b, v28.16b, v0.16b\n"
"and v10.16b, v29.16b, v0.16b\n"
- "and v4.16b, v30.16b, v0.16b\n"
- "and v5.16b, v31.16b, v0.16b\n"
+ "sshr v5.4s, v5.4s, #0x1f\n"
"sshr v6.4s, v6.4s, #0x1f\n"
+ "sqadd v23.4s, v23.4s, v4.4s\n"
"sshr v7.4s, v7.4s, #0x1f\n"
"sshr v8.4s, v8.4s, #0x1f\n"
"sshr v9.4s, v9.4s, #0x1f\n"
"sshr v10.4s, v10.4s, #0x1f\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
- "sshr v5.4s, v5.4s, #0x1f\n"
+ "and v4.16b, v30.16b, v0.16b\n"
+ "sqadd v24.4s, v24.4s, v5.4s\n"
"sqadd v25.4s, v25.4s, v6.4s\n"
"sqadd v26.4s, v26.4s, v7.4s\n"
+ "and v5.16b, v31.16b, v0.16b\n"
"sqadd v27.4s, v27.4s, v8.4s\n"
"sqadd v28.4s, v28.4s, v9.4s\n"
"sqadd v29.4s, v29.4s, v10.4s\n"
+ "sshr v4.4s, v4.4s, #0x1f\n"
+ "sshr v5.4s, v5.4s, #0x1f\n"
"sqadd v30.4s, v30.4s, v4.4s\n"
"sqadd v31.4s, v31.4s, v5.4s\n"
"110:" // Height 4: no shift correction
@@ -1910,8 +1958,13 @@ void a64_hybrid_u8qa_dot_4x16_a55 (
"srshl v29.4s, v29.4s, v0.4s\n"
"srshl v30.4s, v30.4s, v0.4s\n"
"srshl v31.4s, v31.4s, v0.4s\n"
- "add x23, %x[qp], %[c_offset]\n"
- "ld1r { v4.4s }, [x23]\n"
+ "add x22, %x[qp], %[c_offset]\n"
+ "add x23, %x[qp], %[minval]\n"
+ "ld1r { v4.4s }, [x22]\n"
+ "add x22, %x[qp], %[maxval]\n"
+ "ld1r { v5.4s }, [x23]\n"
+ "cmp x11, #0x10\n"
+ "ld1r { v6.4s }, [x22]\n"
"add v16.4s, v16.4s, v4.4s\n"
"add v17.4s, v17.4s, v4.4s\n"
"add v18.4s, v18.4s, v4.4s\n"
@@ -1922,14 +1975,6 @@ void a64_hybrid_u8qa_dot_4x16_a55 (
"add v23.4s, v23.4s, v4.4s\n"
"add v24.4s, v24.4s, v4.4s\n"
"add v25.4s, v25.4s, v4.4s\n"
- "add v26.4s, v26.4s, v4.4s\n"
- "add v27.4s, v27.4s, v4.4s\n"
- "add v28.4s, v28.4s, v4.4s\n"
- "add v29.4s, v29.4s, v4.4s\n"
- "add v30.4s, v30.4s, v4.4s\n"
- "add v31.4s, v31.4s, v4.4s\n"
- "add x23, %x[qp], %[maxval]\n"
- "ld1r { v6.4s }, [x23]\n"
"smin v16.4s, v16.4s, v6.4s\n"
"smin v17.4s, v17.4s, v6.4s\n"
"smin v18.4s, v18.4s, v6.4s\n"
@@ -1940,14 +1985,6 @@ void a64_hybrid_u8qa_dot_4x16_a55 (
"smin v23.4s, v23.4s, v6.4s\n"
"smin v24.4s, v24.4s, v6.4s\n"
"smin v25.4s, v25.4s, v6.4s\n"
- "smin v26.4s, v26.4s, v6.4s\n"
- "smin v27.4s, v27.4s, v6.4s\n"
- "smin v28.4s, v28.4s, v6.4s\n"
- "smin v29.4s, v29.4s, v6.4s\n"
- "smin v30.4s, v30.4s, v6.4s\n"
- "smin v31.4s, v31.4s, v6.4s\n"
- "add x23, %x[qp], %[minval]\n"
- "ld1r { v5.4s }, [x23]\n"
"smax v16.4s, v16.4s, v5.4s\n"
"smax v17.4s, v17.4s, v5.4s\n"
"smax v18.4s, v18.4s, v5.4s\n"
@@ -1958,141 +1995,152 @@ void a64_hybrid_u8qa_dot_4x16_a55 (
"smax v23.4s, v23.4s, v5.4s\n"
"smax v24.4s, v24.4s, v5.4s\n"
"smax v25.4s, v25.4s, v5.4s\n"
+ "add v26.4s, v26.4s, v4.4s\n"
+ "add v27.4s, v27.4s, v4.4s\n"
+ "add v28.4s, v28.4s, v4.4s\n"
+ "add v29.4s, v29.4s, v4.4s\n"
+ "add v30.4s, v30.4s, v4.4s\n"
+ "add v31.4s, v31.4s, v4.4s\n"
+ "uzp1 v16.8h, v16.8h, v17.8h\n"
+ "uzp1 v17.8h, v18.8h, v19.8h\n"
+ "uzp1 v20.8h, v20.8h, v21.8h\n"
+ "uzp1 v21.8h, v22.8h, v23.8h\n"
+ "smin v26.4s, v26.4s, v6.4s\n"
+ "smin v27.4s, v27.4s, v6.4s\n"
+ "smin v28.4s, v28.4s, v6.4s\n"
+ "smin v29.4s, v29.4s, v6.4s\n"
+ "smin v30.4s, v30.4s, v6.4s\n"
+ "smin v31.4s, v31.4s, v6.4s\n"
+ "uzp1 v24.8h, v24.8h, v25.8h\n"
+ "uzp1 v16.16b, v16.16b, v17.16b\n"
+ "uzp1 v20.16b, v20.16b, v21.16b\n"
"smax v26.4s, v26.4s, v5.4s\n"
"smax v27.4s, v27.4s, v5.4s\n"
"smax v28.4s, v28.4s, v5.4s\n"
"smax v29.4s, v29.4s, v5.4s\n"
"smax v30.4s, v30.4s, v5.4s\n"
"smax v31.4s, v31.4s, v5.4s\n"
- "uzp1 v16.8h, v16.8h, v17.8h\n"
- "uzp1 v17.8h, v18.8h, v19.8h\n"
- "uzp1 v20.8h, v20.8h, v21.8h\n"
- "uzp1 v21.8h, v22.8h, v23.8h\n"
- "uzp1 v24.8h, v24.8h, v25.8h\n"
"uzp1 v25.8h, v26.8h, v27.8h\n"
"uzp1 v28.8h, v28.8h, v29.8h\n"
"uzp1 v29.8h, v30.8h, v31.8h\n"
- "cmp x15, #0x10\n"
- "uzp1 v16.16b, v16.16b, v17.16b\n"
- "uzp1 v20.16b, v20.16b, v21.16b\n"
"uzp1 v24.16b, v24.16b, v25.16b\n"
"uzp1 v28.16b, v28.16b, v29.16b\n"
"bge 119f\n"
- "tbz x15, #3, 114f\n"
- "str d16, [x14], #0x8\n"
- "str d20, [x22], #0x8\n"
- "str d24, [x21], #0x8\n"
- "str d28, [x20], #0x8\n"
- "tbz x15, #2, 112f\n"
- "st1 { v16.s }[2], [x14], #0x4\n"
- "st1 { v20.s }[2], [x22], #0x4\n"
- "st1 { v24.s }[2], [x21], #0x4\n"
- "st1 { v28.s }[2], [x20], #0x4\n"
- "tbz x15, #1, 111f\n"
- "st1 { v16.h }[6], [x14], #0x2\n"
- "st1 { v20.h }[6], [x22], #0x2\n"
- "st1 { v24.h }[6], [x21], #0x2\n"
- "st1 { v28.h }[6], [x20], #0x2\n"
- "tbz x15, #0, 118f\n"
- "st1 { v16.b }[14], [x14]\n"
- "st1 { v20.b }[14], [x22]\n"
- "st1 { v24.b }[14], [x21]\n"
- "st1 { v28.b }[14], [x20]\n"
+ "tbz x11, #3, 114f\n"
+ "str d16, [x28], #0x8\n"
+ "str d20, [x21], #0x8\n"
+ "str d24, [x20], #0x8\n"
+ "str d28, [x19], #0x8\n"
+ "tbz x11, #2, 112f\n"
+ "st1 { v16.s }[2], [x28], #0x4\n"
+ "st1 { v20.s }[2], [x21], #0x4\n"
+ "st1 { v24.s }[2], [x20], #0x4\n"
+ "st1 { v28.s }[2], [x19], #0x4\n"
+ "tbz x11, #1, 111f\n"
+ "st1 { v16.h }[6], [x28], #0x2\n"
+ "st1 { v20.h }[6], [x21], #0x2\n"
+ "st1 { v24.h }[6], [x20], #0x2\n"
+ "st1 { v28.h }[6], [x19], #0x2\n"
+ "tbz x11, #0, 118f\n"
+ "st1 { v16.b }[14], [x28]\n"
+ "st1 { v20.b }[14], [x21]\n"
+ "st1 { v24.b }[14], [x20]\n"
+ "st1 { v28.b }[14], [x19]\n"
"b 118f\n"
"111:" // Height 4: Partial direct writeback: partial_1_12
- "tbz x15, #0, 118f\n"
- "st1 { v16.b }[12], [x14]\n"
- "st1 { v20.b }[12], [x22]\n"
- "st1 { v24.b }[12], [x21]\n"
- "st1 { v28.b }[12], [x20]\n"
+ "tbz x11, #0, 118f\n"
+ "st1 { v16.b }[12], [x28]\n"
+ "st1 { v20.b }[12], [x21]\n"
+ "st1 { v24.b }[12], [x20]\n"
+ "st1 { v28.b }[12], [x19]\n"
"b 118f\n"
"112:" // Height 4: Partial direct writeback: partial_2_8
- "tbz x15, #1, 113f\n"
- "st1 { v16.h }[4], [x14], #0x2\n"
- "st1 { v20.h }[4], [x22], #0x2\n"
- "st1 { v24.h }[4], [x21], #0x2\n"
- "st1 { v28.h }[4], [x20], #0x2\n"
- "tbz x15, #0, 118f\n"
- "st1 { v16.b }[10], [x14]\n"
- "st1 { v20.b }[10], [x22]\n"
- "st1 { v24.b }[10], [x21]\n"
- "st1 { v28.b }[10], [x20]\n"
+ "tbz x11, #1, 113f\n"
+ "st1 { v16.h }[4], [x28], #0x2\n"
+ "st1 { v20.h }[4], [x21], #0x2\n"
+ "st1 { v24.h }[4], [x20], #0x2\n"
+ "st1 { v28.h }[4], [x19], #0x2\n"
+ "tbz x11, #0, 118f\n"
+ "st1 { v16.b }[10], [x28]\n"
+ "st1 { v20.b }[10], [x21]\n"
+ "st1 { v24.b }[10], [x20]\n"
+ "st1 { v28.b }[10], [x19]\n"
"b 118f\n"
"113:" // Height 4: Partial direct writeback: partial_1_8
- "tbz x15, #0, 118f\n"
- "st1 { v16.b }[8], [x14]\n"
- "st1 { v20.b }[8], [x22]\n"
- "st1 { v24.b }[8], [x21]\n"
- "st1 { v28.b }[8], [x20]\n"
+ "tbz x11, #0, 118f\n"
+ "st1 { v16.b }[8], [x28]\n"
+ "st1 { v20.b }[8], [x21]\n"
+ "st1 { v24.b }[8], [x20]\n"
+ "st1 { v28.b }[8], [x19]\n"
"b 118f\n"
"114:" // Height 4: Partial direct writeback: partial_4_0
- "tbz x15, #2, 116f\n"
- "str s16, [x14], #0x4\n"
- "str s20, [x22], #0x4\n"
- "str s24, [x21], #0x4\n"
- "str s28, [x20], #0x4\n"
- "tbz x15, #1, 115f\n"
- "st1 { v16.h }[2], [x14], #0x2\n"
- "st1 { v20.h }[2], [x22], #0x2\n"
- "st1 { v24.h }[2], [x21], #0x2\n"
- "st1 { v28.h }[2], [x20], #0x2\n"
- "tbz x15, #0, 118f\n"
- "st1 { v16.b }[6], [x14]\n"
- "st1 { v20.b }[6], [x22]\n"
- "st1 { v24.b }[6], [x21]\n"
- "st1 { v28.b }[6], [x20]\n"
+ "tbz x11, #2, 116f\n"
+ "str s16, [x28], #0x4\n"
+ "str s20, [x21], #0x4\n"
+ "str s24, [x20], #0x4\n"
+ "str s28, [x19], #0x4\n"
+ "tbz x11, #1, 115f\n"
+ "st1 { v16.h }[2], [x28], #0x2\n"
+ "st1 { v20.h }[2], [x21], #0x2\n"
+ "st1 { v24.h }[2], [x20], #0x2\n"
+ "st1 { v28.h }[2], [x19], #0x2\n"
+ "tbz x11, #0, 118f\n"
+ "st1 { v16.b }[6], [x28]\n"
+ "st1 { v20.b }[6], [x21]\n"
+ "st1 { v24.b }[6], [x20]\n"
+ "st1 { v28.b }[6], [x19]\n"
"b 118f\n"
"115:" // Height 4: Partial direct writeback: partial_1_4
- "tbz x15, #0, 118f\n"
- "st1 { v16.b }[4], [x14]\n"
- "st1 { v20.b }[4], [x22]\n"
- "st1 { v24.b }[4], [x21]\n"
- "st1 { v28.b }[4], [x20]\n"
+ "tbz x11, #0, 118f\n"
+ "st1 { v16.b }[4], [x28]\n"
+ "st1 { v20.b }[4], [x21]\n"
+ "st1 { v24.b }[4], [x20]\n"
+ "st1 { v28.b }[4], [x19]\n"
"b 118f\n"
"116:" // Height 4: Partial direct writeback: partial_2_0
- "tbz x15, #1, 117f\n"
- "str h16, [x14], #0x2\n"
- "str h20, [x22], #0x2\n"
- "str h24, [x21], #0x2\n"
- "str h28, [x20], #0x2\n"
- "tbz x15, #0, 118f\n"
- "st1 { v16.b }[2], [x14]\n"
- "st1 { v20.b }[2], [x22]\n"
- "st1 { v24.b }[2], [x21]\n"
- "st1 { v28.b }[2], [x20]\n"
+ "tbz x11, #1, 117f\n"
+ "str h16, [x28], #0x2\n"
+ "str h20, [x21], #0x2\n"
+ "str h24, [x20], #0x2\n"
+ "str h28, [x19], #0x2\n"
+ "tbz x11, #0, 118f\n"
+ "st1 { v16.b }[2], [x28]\n"
+ "st1 { v20.b }[2], [x21]\n"
+ "st1 { v24.b }[2], [x20]\n"
+ "st1 { v28.b }[2], [x19]\n"
"b 118f\n"
"117:" // Height 4: Partial direct writeback: partial_1_0
- "str b16, [x14, #0x0]\n"
- "str b20, [x22, #0x0]\n"
- "str b24, [x21, #0x0]\n"
- "str b28, [x20, #0x0]\n"
+ "str b16, [x28, #0x0]\n"
+ "str b20, [x21, #0x0]\n"
+ "str b24, [x20, #0x0]\n"
+ "str b28, [x19, #0x0]\n"
"118:" // Height 4: Partial direct writeback: Done
"b 120f\n"
"119:" // Height 4: Full writeback
- "str q16, [x14, #0x0]\n"
- "add x14, x14, #0x10\n"
- "str q20, [x22, #0x0]\n"
- "str q24, [x21, #0x0]\n"
- "str q28, [x20, #0x0]\n"
+ "str q16, [x28, #0x0]\n"
+ "add x28, x28, #0x10\n"
+ "str q20, [x21, #0x0]\n"
+ "str q24, [x20, #0x0]\n"
+ "str q28, [x19, #0x0]\n"
"120:" // Height 4: Writeback done
- "subs x15, x15, #0x10\n"
+ "subs x11, x11, #0x10\n"
"bgt 92b\n"
"subs %x[M], %x[M], #0x4\n"
"beq 122f\n"
- "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 121f\n"
- "add x21, x21, #0x4\n"
- "str x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "add x20, x20, #0x4\n"
+ "str x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"b 1b\n"
"121:" // Update direct input
- "mov x20, #0x4\n"
- "madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
+ "mov x19, #0x4\n"
+ "madd %x[input_ptr], x19, x20, %x[input_ptr]\n"
"b 1b\n"
"122:" // Exit
: [M] "+&r" (M), [flags] "+&r" (flags), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
: [args_ptr] "r" (&ka), [b_offset] "I" (offsetof(Requantize32, b_offset)), [c_offset] "I" (offsetof(Requantize32, c_offset)), [col_bias] "r" (col_bias), [maxval] "I" (offsetof(Requantize32, maxval)), [minval] "I" (offsetof(Requantize32, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths)), [per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [qp] "r" (qp)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8qa_dot_4x16/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8qa_dot_4x16/generic.cpp
index 31fbf88603..4fc680c45b 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8qa_dot_4x16/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8qa_dot_4x16/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef __aarch64__
@@ -85,69 +85,69 @@ void a64_hybrid_u8qa_dot_4x16 (
"cmp %x[M], #0x2\n"
"bgt 61f\n"
"beq 31f\n"
- "mov x10, %x[col_bias]\n"
"movi v11.4s, #0x0\n"
- "movi v15.16b, #0x1\n"
- "bic %x[flags], %x[flags], #0x80000000\n"
"ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
+ "movi v15.16b, #0x1\n"
"ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x27, %x[output_ptr]\n"
+ "mov x27, %x[col_bias]\n"
+ "bic %x[flags], %x[flags], #0x80000000\n"
+ "mov x26, %x[output_ptr]\n"
"2:" // Height 1: Column loop
"movi v16.4s, #0x0\n"
"movi v17.4s, #0x0\n"
"movi v18.4s, #0x0\n"
"movi v19.4s, #0x0\n"
"3:" // Height 1: setup done
- "mov x26, #0x0\n"
+ "mov x25, #0x0\n"
"4:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w24, [x20, x25, LSL #0x2]\n"
"tbz %x[flags], #3, 5f\n"
- "ldr x21, [%x[input_ptr], x26, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x24, [x21, #0x0]\n"
- "cbnz x26, 6f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x24, x24, x20\n"
+ "ldr x20, [%x[input_ptr], x25, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x23, [x20, #0x0]\n"
+ "cbnz x25, 6f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x23, x23, x19\n"
"b 6f\n"
"5:" // Height 1: setup direct input
- "mov x24, %x[input_ptr]\n"
+ "mov x23, %x[input_ptr]\n"
"6:" // Height 1: input setup done
- "cmp x25, #0x10\n"
+ "cmp x24, #0x10\n"
"blt 11f\n"
- "ldr q0, [x24, #0x0]\n"
+ "ldr q0, [x23, #0x0]\n"
"ldr q4, [x28, #0x0]\n"
- "cmp x25, #0x20\n"
+ "cmp x24, #0x20\n"
+ "blt 9f\n"
+ "7:" // Height 1: Multiply loop: Main loop head
+ ".inst 0x6f80e090 // udot v16.4s, v4.16b, v0.4b[0]\n"
"ldr q5, [x28, #0x10]\n"
+ "add x23, x23, #0x10\n"
+ ".inst 0x6f80e0b1 // udot v17.4s, v5.16b, v0.4b[0]\n"
"ldr q6, [x28, #0x20]\n"
"ldr q7, [x28, #0x30]\n"
+ ".inst 0x6f80e0d2 // udot v18.4s, v6.16b, v0.4b[0]\n"
"ldr q8, [x28, #0x40]\n"
+ ".inst 0x6f80e0f3 // udot v19.4s, v7.16b, v0.4b[0]\n"
"ldr q9, [x28, #0x50]\n"
"ldr q10, [x28, #0x60]\n"
- "blt 9f\n"
- "7:" // Height 1: Multiply loop: Main loop head
- ".inst 0x6f80e090 // udot v16.4s, v4.16b, v0.4b[0]\n"
+ ".inst 0x6fa0e110 // udot v16.4s, v8.16b, v0.4b[1]\n"
"ldr q4, [x28, #0x70]\n"
- ".inst 0x6f80e0b1 // udot v17.4s, v5.16b, v0.4b[0]\n"
"ldr q5, [x28, #0x80]\n"
- ".inst 0x6f80e0d2 // udot v18.4s, v6.16b, v0.4b[0]\n"
+ ".inst 0x6fa0e131 // udot v17.4s, v9.16b, v0.4b[1]\n"
+ ".inst 0x6fa0e152 // udot v18.4s, v10.16b, v0.4b[1]\n"
"ldr q6, [x28, #0x90]\n"
- ".inst 0x6f80e0f3 // udot v19.4s, v7.16b, v0.4b[0]\n"
"ldr q7, [x28, #0xa0]\n"
- ".inst 0x6fa0e110 // udot v16.4s, v8.16b, v0.4b[1]\n"
+ ".inst 0x6fa0e093 // udot v19.4s, v4.16b, v0.4b[1]\n"
"ldr q8, [x28, #0xb0]\n"
- ".inst 0x6fa0e131 // udot v17.4s, v9.16b, v0.4b[1]\n"
+ ".inst 0x6f80e8b0 // udot v16.4s, v5.16b, v0.4b[2]\n"
"ldr q9, [x28, #0xc0]\n"
- ".inst 0x6fa0e152 // udot v18.4s, v10.16b, v0.4b[1]\n"
"ldr q10, [x28, #0xd0]\n"
- ".inst 0x6fa0e093 // udot v19.4s, v4.16b, v0.4b[1]\n"
- "ldr q4, [x28, #0xe0]\n"
- ".inst 0x6f80e8b0 // udot v16.4s, v5.16b, v0.4b[2]\n"
- "ldr q5, [x28, #0xf0]\n"
".inst 0x6f80e8d1 // udot v17.4s, v6.16b, v0.4b[2]\n"
- "add x24, x24, #0x10\n"
".inst 0x6f80e8f2 // udot v18.4s, v7.16b, v0.4b[2]\n"
+ "ldr q4, [x28, #0xe0]\n"
+ "ldr q5, [x28, #0xf0]\n"
".inst 0x6f80e913 // udot v19.4s, v8.16b, v0.4b[2]\n"
"add x28, x28, #0x100\n"
".inst 0x6fa0e930 // udot v16.4s, v9.16b, v0.4b[3]\n"
@@ -157,42 +157,42 @@ void a64_hybrid_u8qa_dot_4x16 (
"tbnz %x[flags], #31, 8f\n"
".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n"
"8:" // Height 1: Multiply loop: unique 1: skip row sum
- "ldr q0, [x24, #0x0]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
+ "sub x24, x24, #0x10\n"
+ "ldr q0, [x23, #0x0]\n"
+ "cmp x24, #0x20\n"
"ldr q4, [x28, #0x0]\n"
- "sub x25, x25, #0x10\n"
- "cmp x25, #0x20\n"
+ "bge 7b\n"
+ "9:" // Height 1: Multiply loop: Single iteration only
+ ".inst 0x6f80e090 // udot v16.4s, v4.16b, v0.4b[0]\n"
"ldr q5, [x28, #0x10]\n"
+ "sub x24, x24, #0x10\n"
+ ".inst 0x6f80e0b1 // udot v17.4s, v5.16b, v0.4b[0]\n"
"ldr q6, [x28, #0x20]\n"
+ "add x23, x23, #0x10\n"
+ ".inst 0x6f80e0d2 // udot v18.4s, v6.16b, v0.4b[0]\n"
"ldr q7, [x28, #0x30]\n"
"ldr q8, [x28, #0x40]\n"
+ ".inst 0x6f80e0f3 // udot v19.4s, v7.16b, v0.4b[0]\n"
"ldr q9, [x28, #0x50]\n"
+ ".inst 0x6fa0e110 // udot v16.4s, v8.16b, v0.4b[1]\n"
"ldr q10, [x28, #0x60]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
- "bge 7b\n"
- "9:" // Height 1: Multiply loop: Single iteration only
- ".inst 0x6f80e090 // udot v16.4s, v4.16b, v0.4b[0]\n"
"ldr q4, [x28, #0x70]\n"
- ".inst 0x6f80e0b1 // udot v17.4s, v5.16b, v0.4b[0]\n"
+ ".inst 0x6fa0e131 // udot v17.4s, v9.16b, v0.4b[1]\n"
"ldr q5, [x28, #0x80]\n"
- ".inst 0x6f80e0d2 // udot v18.4s, v6.16b, v0.4b[0]\n"
"ldr q6, [x28, #0x90]\n"
- ".inst 0x6f80e0f3 // udot v19.4s, v7.16b, v0.4b[0]\n"
+ ".inst 0x6fa0e152 // udot v18.4s, v10.16b, v0.4b[1]\n"
+ ".inst 0x6fa0e093 // udot v19.4s, v4.16b, v0.4b[1]\n"
"ldr q7, [x28, #0xa0]\n"
- ".inst 0x6fa0e110 // udot v16.4s, v8.16b, v0.4b[1]\n"
"ldr q8, [x28, #0xb0]\n"
- ".inst 0x6fa0e131 // udot v17.4s, v9.16b, v0.4b[1]\n"
+ ".inst 0x6f80e8b0 // udot v16.4s, v5.16b, v0.4b[2]\n"
"ldr q9, [x28, #0xc0]\n"
- ".inst 0x6fa0e152 // udot v18.4s, v10.16b, v0.4b[1]\n"
+ ".inst 0x6f80e8d1 // udot v17.4s, v6.16b, v0.4b[2]\n"
"ldr q10, [x28, #0xd0]\n"
- ".inst 0x6fa0e093 // udot v19.4s, v4.16b, v0.4b[1]\n"
"ldr q4, [x28, #0xe0]\n"
- ".inst 0x6f80e8b0 // udot v16.4s, v5.16b, v0.4b[2]\n"
- "ldr q5, [x28, #0xf0]\n"
- ".inst 0x6f80e8d1 // udot v17.4s, v6.16b, v0.4b[2]\n"
- "sub x25, x25, #0x10\n"
".inst 0x6f80e8f2 // udot v18.4s, v7.16b, v0.4b[2]\n"
".inst 0x6f80e913 // udot v19.4s, v8.16b, v0.4b[2]\n"
- "add x24, x24, #0x10\n"
+ "ldr q5, [x28, #0xf0]\n"
"add x28, x28, #0x100\n"
".inst 0x6fa0e930 // udot v16.4s, v9.16b, v0.4b[3]\n"
".inst 0x6fa0e951 // udot v17.4s, v10.16b, v0.4b[3]\n"
@@ -201,83 +201,83 @@ void a64_hybrid_u8qa_dot_4x16 (
"tbnz %x[flags], #31, 10f\n"
".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n"
"10:" // Height 1: Multiply loop: unique 2: skip row sum
- "prfm pldl1keep, [x24, #0x80]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
"11:" // Height 1: Multiply loop: Main loop skip
- "cbz x25, 18f\n"
- "cmp x25, #0x4\n"
+ "cbz x24, 18f\n"
+ "cmp x24, #0x4\n"
"blt 14f\n"
"12:" // Height 1: Multiply loop: Odd block loop
- "ldr s0, [x24], #0x4\n"
+ "ldr s0, [x23], #0x4\n"
"tbnz %x[flags], #31, 13f\n"
".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n"
"13:" // Height 1: Multiply loop: unique 3: skip row sum
"ldr q6, [x28, #0x0]\n"
- "ldr q7, [x28, #0x10]\n"
- "sub x25, x25, #0x4\n"
- "cmp x25, #0x4\n"
- "ldr q8, [x28, #0x20]\n"
- "ldr q9, [x28, #0x30]\n"
".inst 0x6f80e0d0 // udot v16.4s, v6.16b, v0.4b[0]\n"
+ "ldr q7, [x28, #0x10]\n"
+ "sub x24, x24, #0x4\n"
".inst 0x6f80e0f1 // udot v17.4s, v7.16b, v0.4b[0]\n"
+ "ldr q8, [x28, #0x20]\n"
+ "cmp x24, #0x4\n"
".inst 0x6f80e112 // udot v18.4s, v8.16b, v0.4b[0]\n"
- ".inst 0x6f80e133 // udot v19.4s, v9.16b, v0.4b[0]\n"
+ "ldr q9, [x28, #0x30]\n"
"add x28, x28, #0x40\n"
+ ".inst 0x6f80e133 // udot v19.4s, v9.16b, v0.4b[0]\n"
"bge 12b\n"
+ "cbz x24, 18f\n"
"14:" // Height 1: Multiply loop: Skip odd blocks
- "cbz x25, 18f\n"
- "tbz x25, #1, 15f\n"
- "ldr h0, [x24], #0x2\n"
- "tbz x25, #0, 16f\n"
- "ld1 { v0.b }[2], [x24]\n"
+ "tbz x24, #1, 15f\n"
+ "ldr h0, [x23], #0x2\n"
+ "tbz x24, #0, 16f\n"
+ "ld1 { v0.b }[2], [x23]\n"
"b 16f\n"
"15:" // Height 1: Multiply loop: Ragged operand read: partial_1_0
- "ldr b0, [x24, #0x0]\n"
+ "ldr b0, [x23, #0x0]\n"
"16:" // Height 1: Multiply loop: Ragged operand read: Done
"tbnz %x[flags], #31, 17f\n"
".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n"
"17:" // Height 1: Multiply loop: unique 4: skip row sum
"ldr q10, [x28, #0x0]\n"
- "ldr q4, [x28, #0x10]\n"
".inst 0x6f80e150 // udot v16.4s, v10.16b, v0.4b[0]\n"
- ".inst 0x6f80e091 // udot v17.4s, v4.16b, v0.4b[0]\n"
+ "ldr q4, [x28, #0x10]\n"
"ldr q5, [x28, #0x20]\n"
+ ".inst 0x6f80e091 // udot v17.4s, v4.16b, v0.4b[0]\n"
"ldr q6, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
".inst 0x6f80e0b2 // udot v18.4s, v5.16b, v0.4b[0]\n"
".inst 0x6f80e0d3 // udot v19.4s, v6.16b, v0.4b[0]\n"
- "add x28, x28, #0x40\n"
"18:" // Height 1: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x26, x26, #0x1\n"
- "cmp x26, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x25, x25, #0x1\n"
+ "cmp x25, x19\n"
"bne 4b\n"
- "prfm pstl1keep, [x27, #0x0]\n"
+ "prfm pstl1keep, [x26, #0x0]\n"
"tbnz %x[flags], #31, 19f\n"
- "add x23, %x[qp], %[b_offset]\n"
- "ld1r { v1.4s }, [x23]\n"
"addp v11.4s, v11.4s, v11.4s\n"
- "neg v1.4s, v1.4s\n"
+ "add x22, %x[qp], %[b_offset]\n"
"addp v11.4s, v11.4s, v11.4s\n"
+ "ld1r { v1.4s }, [x22]\n"
+ "neg v1.4s, v1.4s\n"
"mul v11.4s, v11.4s, v1.4s\n"
"19:" // Height 1: skip row sum fixup
- "ldr q0, [x10, #0x0]\n"
- "ldr q1, [x10, #0x10]\n"
"add v16.4s, v16.4s, v11.4s\n"
+ "ldr q0, [x27, #0x0]\n"
+ "orr %x[flags], %x[flags], #0x80000000\n"
"add v17.4s, v17.4s, v11.4s\n"
- "ldr q2, [x10, #0x20]\n"
- "ldr q3, [x10, #0x30]\n"
+ "ldr q1, [x27, #0x10]\n"
+ "add x23, %x[qp], %[per_layer_right_shift]\n"
"add v18.4s, v18.4s, v11.4s\n"
+ "ldr q2, [x27, #0x20]\n"
+ "add x22, %x[qp], %[per_layer_mul]\n"
"add v19.4s, v19.4s, v11.4s\n"
- "add x23, %x[qp], %[per_layer_mul]\n"
- "ld1r { v4.4s }, [x23]\n"
- "orr %x[flags], %x[flags], #0x80000000\n"
+ "ldr q3, [x27, #0x30]\n"
+ "add x27, x27, #0x40\n"
"add v16.4s, v16.4s, v0.4s\n"
+ "ld1r { v0.4s }, [x23]\n"
+ "ld1r { v4.4s }, [x22]\n"
"add v17.4s, v17.4s, v1.4s\n"
"add v18.4s, v18.4s, v2.4s\n"
- "add x23, %x[qp], %[per_layer_right_shift]\n"
- "ld1r { v0.4s }, [x23]\n"
"add v19.4s, v19.4s, v3.4s\n"
"sqrdmulh v16.4s, v16.4s, v4.4s\n"
- "add x10, x10, #0x40\n"
"sqrdmulh v17.4s, v17.4s, v4.4s\n"
"sqrdmulh v18.4s, v18.4s, v4.4s\n"
"sqrdmulh v19.4s, v19.4s, v4.4s\n"
@@ -289,100 +289,100 @@ void a64_hybrid_u8qa_dot_4x16 (
"sshr v4.4s, v4.4s, #0x1f\n"
"sshr v5.4s, v5.4s, #0x1f\n"
"sshr v6.4s, v6.4s, #0x1f\n"
- "sshr v7.4s, v7.4s, #0x1f\n"
"sqadd v16.4s, v16.4s, v4.4s\n"
"sqadd v17.4s, v17.4s, v5.4s\n"
"sqadd v18.4s, v18.4s, v6.4s\n"
+ "sshr v7.4s, v7.4s, #0x1f\n"
"sqadd v19.4s, v19.4s, v7.4s\n"
"20:" // Height 1: no shift correction
- "add x23, %x[qp], %[c_offset]\n"
- "ld1r { v4.4s }, [x23]\n"
"srshl v16.4s, v16.4s, v0.4s\n"
+ "add x22, %x[qp], %[c_offset]\n"
+ "ld1r { v4.4s }, [x22]\n"
"srshl v17.4s, v17.4s, v0.4s\n"
+ "add x22, %x[qp], %[minval]\n"
"srshl v18.4s, v18.4s, v0.4s\n"
+ "ld1r { v5.4s }, [x22]\n"
+ "add x22, %x[qp], %[maxval]\n"
"srshl v19.4s, v19.4s, v0.4s\n"
- "add x23, %x[qp], %[maxval]\n"
- "ld1r { v6.4s }, [x23]\n"
+ "ld1r { v6.4s }, [x22]\n"
+ "cmp x9, #0x10\n"
"add v16.4s, v16.4s, v4.4s\n"
"add v17.4s, v17.4s, v4.4s\n"
- "add x23, %x[qp], %[minval]\n"
- "ld1r { v5.4s }, [x23]\n"
"add v18.4s, v18.4s, v4.4s\n"
"add v19.4s, v19.4s, v4.4s\n"
- "cmp x9, #0x10\n"
"smin v16.4s, v16.4s, v6.4s\n"
"smin v17.4s, v17.4s, v6.4s\n"
"smin v18.4s, v18.4s, v6.4s\n"
- "smin v19.4s, v19.4s, v6.4s\n"
"smax v16.4s, v16.4s, v5.4s\n"
"smax v17.4s, v17.4s, v5.4s\n"
"smax v18.4s, v18.4s, v5.4s\n"
- "smax v19.4s, v19.4s, v5.4s\n"
+ "smin v19.4s, v19.4s, v6.4s\n"
"uzp1 v16.8h, v16.8h, v17.8h\n"
+ "smax v19.4s, v19.4s, v5.4s\n"
"uzp1 v17.8h, v18.8h, v19.8h\n"
"uzp1 v16.16b, v16.16b, v17.16b\n"
"bge 29f\n"
"tbz x9, #3, 24f\n"
- "str d16, [x27], #0x8\n"
+ "str d16, [x26], #0x8\n"
"tbz x9, #2, 22f\n"
- "st1 { v16.s }[2], [x27], #0x4\n"
+ "st1 { v16.s }[2], [x26], #0x4\n"
"tbz x9, #1, 21f\n"
- "st1 { v16.h }[6], [x27], #0x2\n"
+ "st1 { v16.h }[6], [x26], #0x2\n"
"tbz x9, #0, 28f\n"
- "st1 { v16.b }[14], [x27]\n"
+ "st1 { v16.b }[14], [x26]\n"
"b 28f\n"
"21:" // Height 1: Partial direct writeback: partial_1_12
"tbz x9, #0, 28f\n"
- "st1 { v16.b }[12], [x27]\n"
+ "st1 { v16.b }[12], [x26]\n"
"b 28f\n"
"22:" // Height 1: Partial direct writeback: partial_2_8
"tbz x9, #1, 23f\n"
- "st1 { v16.h }[4], [x27], #0x2\n"
+ "st1 { v16.h }[4], [x26], #0x2\n"
"tbz x9, #0, 28f\n"
- "st1 { v16.b }[10], [x27]\n"
+ "st1 { v16.b }[10], [x26]\n"
"b 28f\n"
"23:" // Height 1: Partial direct writeback: partial_1_8
"tbz x9, #0, 28f\n"
- "st1 { v16.b }[8], [x27]\n"
+ "st1 { v16.b }[8], [x26]\n"
"b 28f\n"
"24:" // Height 1: Partial direct writeback: partial_4_0
"tbz x9, #2, 26f\n"
- "str s16, [x27], #0x4\n"
+ "str s16, [x26], #0x4\n"
"tbz x9, #1, 25f\n"
- "st1 { v16.h }[2], [x27], #0x2\n"
+ "st1 { v16.h }[2], [x26], #0x2\n"
"tbz x9, #0, 28f\n"
- "st1 { v16.b }[6], [x27]\n"
+ "st1 { v16.b }[6], [x26]\n"
"b 28f\n"
"25:" // Height 1: Partial direct writeback: partial_1_4
"tbz x9, #0, 28f\n"
- "st1 { v16.b }[4], [x27]\n"
+ "st1 { v16.b }[4], [x26]\n"
"b 28f\n"
"26:" // Height 1: Partial direct writeback: partial_2_0
"tbz x9, #1, 27f\n"
- "str h16, [x27], #0x2\n"
+ "str h16, [x26], #0x2\n"
"tbz x9, #0, 28f\n"
- "st1 { v16.b }[2], [x27]\n"
+ "st1 { v16.b }[2], [x26]\n"
"b 28f\n"
"27:" // Height 1: Partial direct writeback: partial_1_0
- "str b16, [x27, #0x0]\n"
+ "str b16, [x26, #0x0]\n"
"28:" // Height 1: Partial direct writeback: Done
"b 30f\n"
"29:" // Height 1: Full writeback
- "str q16, [x27, #0x0]\n"
- "add x27, x27, #0x10\n"
+ "str q16, [x26, #0x0]\n"
+ "add x26, x26, #0x10\n"
"30:" // Height 1: Writeback done
"subs x9, x9, #0x10\n"
"bgt 2b\n"
"b 122f\n"
"31:" // Height 2
- "mov x10, %x[col_bias]\n"
"movi v11.4s, #0x0\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x27, %x[col_bias]\n"
"movi v12.4s, #0x0\n"
+ "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"bic %x[flags], %x[flags], #0x80000000\n"
"movi v15.16b, #0x1\n"
- "ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x27, %x[output_ptr]\n"
+ "mov x26, %x[output_ptr]\n"
"32:" // Height 2: Column loop
"movi v16.4s, #0x0\n"
"movi v17.4s, #0x0\n"
@@ -393,57 +393,57 @@ void a64_hybrid_u8qa_dot_4x16 (
"movi v22.4s, #0x0\n"
"movi v23.4s, #0x0\n"
"33:" // Height 2: setup done
- "mov x26, #0x0\n"
+ "mov x25, #0x0\n"
"34:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w24, [x20, x25, LSL #0x2]\n"
"tbz %x[flags], #3, 35f\n"
- "ldr x21, [%x[input_ptr], x26, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x24, [x21, #0x0]\n"
- "ldr x23, [x21, #0x8]\n"
- "cbnz x26, 36f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x24, x24, x20\n"
- "add x23, x23, x20\n"
+ "ldr x20, [%x[input_ptr], x25, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x23, [x20, #0x0]\n"
+ "ldr x22, [x20, #0x8]\n"
+ "cbnz x25, 36f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x23, x23, x19\n"
+ "add x22, x22, x19\n"
"b 36f\n"
"35:" // Height 2: setup direct input
- "mov x24, %x[input_ptr]\n"
- "add x23, x24, x20\n"
+ "mov x23, %x[input_ptr]\n"
+ "add x22, x23, x19\n"
"36:" // Height 2: input setup done
- "cmp x25, #0x10\n"
+ "cmp x24, #0x10\n"
"blt 41f\n"
- "ldr q0, [x24, #0x0]\n"
- "ldr q1, [x23, #0x0]\n"
- "cmp x25, #0x20\n"
+ "ldr q0, [x23, #0x0]\n"
+ "ldr q1, [x22, #0x0]\n"
+ "cmp x24, #0x20\n"
"ldr q4, [x28, #0x0]\n"
- "ldr q5, [x28, #0x10]\n"
- "ldr q6, [x28, #0x20]\n"
- "ldr q7, [x28, #0x30]\n"
- "ldr q8, [x28, #0x40]\n"
- "ldr q9, [x28, #0x50]\n"
- "ldr q10, [x28, #0x60]\n"
"blt 39f\n"
"37:" // Height 2: Multiply loop: Main loop head
".inst 0x6f80e090 // udot v16.4s, v4.16b, v0.4b[0]\n"
+ "ldr q5, [x28, #0x10]\n"
+ "add x23, x23, #0x10\n"
".inst 0x6f81e094 // udot v20.4s, v4.16b, v1.4b[0]\n"
- "ldr q4, [x28, #0x70]\n"
- "add x24, x24, #0x10\n"
+ "ldr q6, [x28, #0x20]\n"
+ "add x22, x22, #0x10\n"
".inst 0x6f80e0b1 // udot v17.4s, v5.16b, v0.4b[0]\n"
+ "ldr q7, [x28, #0x30]\n"
".inst 0x6f81e0b5 // udot v21.4s, v5.16b, v1.4b[0]\n"
- "ldr q5, [x28, #0x80]\n"
- "add x23, x23, #0x10\n"
+ "ldr q8, [x28, #0x40]\n"
".inst 0x6f80e0d2 // udot v18.4s, v6.16b, v0.4b[0]\n"
+ "ldr q9, [x28, #0x50]\n"
".inst 0x6f81e0d6 // udot v22.4s, v6.16b, v1.4b[0]\n"
- "ldr q6, [x28, #0x90]\n"
+ "ldr q10, [x28, #0x60]\n"
+ "ldr q4, [x28, #0x70]\n"
".inst 0x6f80e0f3 // udot v19.4s, v7.16b, v0.4b[0]\n"
".inst 0x6f81e0f7 // udot v23.4s, v7.16b, v1.4b[0]\n"
- "ldr q7, [x28, #0xa0]\n"
+ "ldr q5, [x28, #0x80]\n"
".inst 0x6fa0e110 // udot v16.4s, v8.16b, v0.4b[1]\n"
+ "ldr q6, [x28, #0x90]\n"
".inst 0x6fa1e114 // udot v20.4s, v8.16b, v1.4b[1]\n"
- "ldr q8, [x28, #0xb0]\n"
+ "ldr q7, [x28, #0xa0]\n"
".inst 0x6fa0e131 // udot v17.4s, v9.16b, v0.4b[1]\n"
+ "ldr q8, [x28, #0xb0]\n"
".inst 0x6fa1e135 // udot v21.4s, v9.16b, v1.4b[1]\n"
"ldr q9, [x28, #0xc0]\n"
".inst 0x6fa0e152 // udot v18.4s, v10.16b, v0.4b[1]\n"
@@ -474,40 +474,40 @@ void a64_hybrid_u8qa_dot_4x16 (
".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n"
".inst 0x6e8f942c // udot v12.4s, v1.16b, v15.16b\n"
"38:" // Height 2: Multiply loop: unique 5: skip row sum
- "ldr q0, [x24, #0x0]\n"
- "ldr q1, [x23, #0x0]\n"
- "sub x25, x25, #0x10\n"
- "cmp x25, #0x20\n"
- "ldr q4, [x28, #0x0]\n"
- "ldr q5, [x28, #0x10]\n"
- "ldr q6, [x28, #0x20]\n"
- "ldr q7, [x28, #0x30]\n"
- "ldr q8, [x28, #0x40]\n"
- "ldr q9, [x28, #0x50]\n"
- "ldr q10, [x28, #0x60]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
"prfm pldl1keep, [x23, #0x80]\n"
+ "sub x24, x24, #0x10\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
+ "cmp x24, #0x20\n"
+ "ldr q0, [x23, #0x0]\n"
+ "ldr q1, [x22, #0x0]\n"
+ "ldr q4, [x28, #0x0]\n"
"bge 37b\n"
"39:" // Height 2: Multiply loop: Single iteration only
".inst 0x6f80e090 // udot v16.4s, v4.16b, v0.4b[0]\n"
+ "ldr q5, [x28, #0x10]\n"
+ "sub x24, x24, #0x10\n"
".inst 0x6f81e094 // udot v20.4s, v4.16b, v1.4b[0]\n"
- "ldr q4, [x28, #0x70]\n"
- "sub x25, x25, #0x10\n"
+ "ldr q6, [x28, #0x20]\n"
+ "add x23, x23, #0x10\n"
".inst 0x6f80e0b1 // udot v17.4s, v5.16b, v0.4b[0]\n"
+ "ldr q7, [x28, #0x30]\n"
+ "add x22, x22, #0x10\n"
".inst 0x6f81e0b5 // udot v21.4s, v5.16b, v1.4b[0]\n"
- "ldr q5, [x28, #0x80]\n"
- "add x24, x24, #0x10\n"
+ "ldr q8, [x28, #0x40]\n"
".inst 0x6f80e0d2 // udot v18.4s, v6.16b, v0.4b[0]\n"
+ "ldr q9, [x28, #0x50]\n"
".inst 0x6f81e0d6 // udot v22.4s, v6.16b, v1.4b[0]\n"
- "ldr q6, [x28, #0x90]\n"
- "add x23, x23, #0x10\n"
+ "ldr q10, [x28, #0x60]\n"
+ "ldr q4, [x28, #0x70]\n"
".inst 0x6f80e0f3 // udot v19.4s, v7.16b, v0.4b[0]\n"
".inst 0x6f81e0f7 // udot v23.4s, v7.16b, v1.4b[0]\n"
- "ldr q7, [x28, #0xa0]\n"
+ "ldr q5, [x28, #0x80]\n"
".inst 0x6fa0e110 // udot v16.4s, v8.16b, v0.4b[1]\n"
+ "ldr q6, [x28, #0x90]\n"
".inst 0x6fa1e114 // udot v20.4s, v8.16b, v1.4b[1]\n"
- "ldr q8, [x28, #0xb0]\n"
+ "ldr q7, [x28, #0xa0]\n"
".inst 0x6fa0e131 // udot v17.4s, v9.16b, v0.4b[1]\n"
+ "ldr q8, [x28, #0xb0]\n"
".inst 0x6fa1e135 // udot v21.4s, v9.16b, v1.4b[1]\n"
"ldr q9, [x28, #0xc0]\n"
".inst 0x6fa0e152 // udot v18.4s, v10.16b, v0.4b[1]\n"
@@ -538,104 +538,104 @@ void a64_hybrid_u8qa_dot_4x16 (
".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n"
".inst 0x6e8f942c // udot v12.4s, v1.16b, v15.16b\n"
"40:" // Height 2: Multiply loop: unique 6: skip row sum
- "prfm pldl1keep, [x24, #0x80]\n"
"prfm pldl1keep, [x23, #0x80]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
"41:" // Height 2: Multiply loop: Main loop skip
- "cbz x25, 48f\n"
- "cmp x25, #0x4\n"
+ "cbz x24, 48f\n"
+ "cmp x24, #0x4\n"
"blt 44f\n"
"42:" // Height 2: Multiply loop: Odd block loop
- "ldr s0, [x24], #0x4\n"
- "ldr s1, [x23], #0x4\n"
+ "ldr s0, [x23], #0x4\n"
+ "ldr s1, [x22], #0x4\n"
"tbnz %x[flags], #31, 43f\n"
".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n"
".inst 0x6e8f942c // udot v12.4s, v1.16b, v15.16b\n"
"43:" // Height 2: Multiply loop: unique 7: skip row sum
"ldr q6, [x28, #0x0]\n"
- "ldr q7, [x28, #0x10]\n"
- "sub x25, x25, #0x4\n"
- "cmp x25, #0x4\n"
- "ldr q8, [x28, #0x20]\n"
- "ldr q9, [x28, #0x30]\n"
".inst 0x6f80e0d0 // udot v16.4s, v6.16b, v0.4b[0]\n"
+ "ldr q7, [x28, #0x10]\n"
+ "sub x24, x24, #0x4\n"
".inst 0x6f81e0d4 // udot v20.4s, v6.16b, v1.4b[0]\n"
+ "ldr q8, [x28, #0x20]\n"
+ "cmp x24, #0x4\n"
".inst 0x6f80e0f1 // udot v17.4s, v7.16b, v0.4b[0]\n"
- ".inst 0x6f81e0f5 // udot v21.4s, v7.16b, v1.4b[0]\n"
+ "ldr q9, [x28, #0x30]\n"
"add x28, x28, #0x40\n"
+ ".inst 0x6f81e0f5 // udot v21.4s, v7.16b, v1.4b[0]\n"
".inst 0x6f80e112 // udot v18.4s, v8.16b, v0.4b[0]\n"
".inst 0x6f81e116 // udot v22.4s, v8.16b, v1.4b[0]\n"
".inst 0x6f80e133 // udot v19.4s, v9.16b, v0.4b[0]\n"
".inst 0x6f81e137 // udot v23.4s, v9.16b, v1.4b[0]\n"
"bge 42b\n"
+ "cbz x24, 48f\n"
"44:" // Height 2: Multiply loop: Skip odd blocks
- "cbz x25, 48f\n"
- "tbz x25, #1, 45f\n"
- "ldr h0, [x24], #0x2\n"
- "ldr h1, [x23], #0x2\n"
- "tbz x25, #0, 46f\n"
- "ld1 { v0.b }[2], [x24]\n"
- "ld1 { v1.b }[2], [x23]\n"
+ "tbz x24, #1, 45f\n"
+ "ldr h0, [x23], #0x2\n"
+ "ldr h1, [x22], #0x2\n"
+ "tbz x24, #0, 46f\n"
+ "ld1 { v0.b }[2], [x23]\n"
+ "ld1 { v1.b }[2], [x22]\n"
"b 46f\n"
"45:" // Height 2: Multiply loop: Ragged operand read: partial_1_0
- "ldr b0, [x24, #0x0]\n"
- "ldr b1, [x23, #0x0]\n"
+ "ldr b0, [x23, #0x0]\n"
+ "ldr b1, [x22, #0x0]\n"
"46:" // Height 2: Multiply loop: Ragged operand read: Done
"tbnz %x[flags], #31, 47f\n"
".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n"
".inst 0x6e8f942c // udot v12.4s, v1.16b, v15.16b\n"
"47:" // Height 2: Multiply loop: unique 8: skip row sum
"ldr q10, [x28, #0x0]\n"
- "ldr q4, [x28, #0x10]\n"
".inst 0x6f80e150 // udot v16.4s, v10.16b, v0.4b[0]\n"
+ "ldr q4, [x28, #0x10]\n"
".inst 0x6f81e154 // udot v20.4s, v10.16b, v1.4b[0]\n"
"ldr q5, [x28, #0x20]\n"
"ldr q6, [x28, #0x30]\n"
".inst 0x6f80e091 // udot v17.4s, v4.16b, v0.4b[0]\n"
+ "add x28, x28, #0x40\n"
".inst 0x6f81e095 // udot v21.4s, v4.16b, v1.4b[0]\n"
".inst 0x6f80e0b2 // udot v18.4s, v5.16b, v0.4b[0]\n"
".inst 0x6f81e0b6 // udot v22.4s, v5.16b, v1.4b[0]\n"
- "add x28, x28, #0x40\n"
".inst 0x6f80e0d3 // udot v19.4s, v6.16b, v0.4b[0]\n"
".inst 0x6f81e0d7 // udot v23.4s, v6.16b, v1.4b[0]\n"
"48:" // Height 2: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x26, x26, #0x1\n"
- "cmp x26, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x25, x25, #0x1\n"
+ "cmp x25, x19\n"
"bne 34b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x22, x27, x20\n"
- "prfm pstl1keep, [x27, #0x0]\n"
- "prfm pstl1keep, [x22, #0x0]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "prfm pstl1keep, [x26, #0x0]\n"
+ "add x21, x26, x19\n"
+ "prfm pstl1keep, [x21, #0x0]\n"
"tbnz %x[flags], #31, 49f\n"
- "add x23, %x[qp], %[b_offset]\n"
- "ld1r { v2.4s }, [x23]\n"
"addp v11.4s, v11.4s, v11.4s\n"
+ "add x22, %x[qp], %[b_offset]\n"
+ "ld1r { v2.4s }, [x22]\n"
"addp v12.4s, v12.4s, v12.4s\n"
- "neg v2.4s, v2.4s\n"
"addp v11.4s, v11.4s, v11.4s\n"
+ "neg v2.4s, v2.4s\n"
"addp v12.4s, v12.4s, v12.4s\n"
"mul v11.4s, v11.4s, v2.4s\n"
"mul v12.4s, v12.4s, v2.4s\n"
"49:" // Height 2: skip row sum fixup
- "ldr q0, [x10, #0x0]\n"
- "ldr q1, [x10, #0x10]\n"
"add v16.4s, v16.4s, v11.4s\n"
+ "ldr q0, [x27, #0x0]\n"
+ "orr %x[flags], %x[flags], #0x80000000\n"
"add v17.4s, v17.4s, v11.4s\n"
- "ldr q2, [x10, #0x20]\n"
- "ldr q3, [x10, #0x30]\n"
+ "ldr q1, [x27, #0x10]\n"
+ "add x23, %x[qp], %[per_layer_right_shift]\n"
"add v18.4s, v18.4s, v11.4s\n"
+ "ldr q2, [x27, #0x20]\n"
+ "add x22, %x[qp], %[per_layer_mul]\n"
"add v19.4s, v19.4s, v11.4s\n"
+ "ldr q3, [x27, #0x30]\n"
+ "add x27, x27, #0x40\n"
"add v20.4s, v20.4s, v12.4s\n"
+ "ld1r { v4.4s }, [x22]\n"
"add v21.4s, v21.4s, v12.4s\n"
- "add x23, %x[qp], %[per_layer_mul]\n"
- "ld1r { v4.4s }, [x23]\n"
"add v22.4s, v22.4s, v12.4s\n"
"add v23.4s, v23.4s, v12.4s\n"
- "orr %x[flags], %x[flags], #0x80000000\n"
- "add x23, %x[qp], %[per_layer_right_shift]\n"
"add v16.4s, v16.4s, v0.4s\n"
"add v17.4s, v17.4s, v1.4s\n"
- "add x10, x10, #0x40\n"
"add v18.4s, v18.4s, v2.4s\n"
"add v19.4s, v19.4s, v3.4s\n"
"add v20.4s, v20.4s, v0.4s\n"
@@ -653,154 +653,154 @@ void a64_hybrid_u8qa_dot_4x16 (
"sqrdmulh v23.4s, v23.4s, v4.4s\n"
"tbz %x[flags], #5, 50f\n"
"and v4.16b, v16.16b, v0.16b\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
- "sqadd v16.4s, v16.4s, v4.4s\n"
"and v5.16b, v17.16b, v0.16b\n"
"and v6.16b, v18.16b, v0.16b\n"
+ "sshr v4.4s, v4.4s, #0x1f\n"
+ "sshr v5.4s, v5.4s, #0x1f\n"
+ "sshr v6.4s, v6.4s, #0x1f\n"
+ "sqadd v16.4s, v16.4s, v4.4s\n"
+ "sqadd v17.4s, v17.4s, v5.4s\n"
+ "sqadd v18.4s, v18.4s, v6.4s\n"
"and v7.16b, v19.16b, v0.16b\n"
"and v8.16b, v20.16b, v0.16b\n"
"and v9.16b, v21.16b, v0.16b\n"
- "and v10.16b, v22.16b, v0.16b\n"
- "and v4.16b, v23.16b, v0.16b\n"
- "sshr v5.4s, v5.4s, #0x1f\n"
- "sshr v6.4s, v6.4s, #0x1f\n"
"sshr v7.4s, v7.4s, #0x1f\n"
"sshr v8.4s, v8.4s, #0x1f\n"
"sshr v9.4s, v9.4s, #0x1f\n"
- "sshr v10.4s, v10.4s, #0x1f\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
- "sqadd v17.4s, v17.4s, v5.4s\n"
- "sqadd v18.4s, v18.4s, v6.4s\n"
"sqadd v19.4s, v19.4s, v7.4s\n"
"sqadd v20.4s, v20.4s, v8.4s\n"
"sqadd v21.4s, v21.4s, v9.4s\n"
+ "and v10.16b, v22.16b, v0.16b\n"
+ "and v4.16b, v23.16b, v0.16b\n"
+ "sshr v10.4s, v10.4s, #0x1f\n"
+ "sshr v4.4s, v4.4s, #0x1f\n"
"sqadd v22.4s, v22.4s, v10.4s\n"
"sqadd v23.4s, v23.4s, v4.4s\n"
"50:" // Height 2: no shift correction
- "add x23, %x[qp], %[c_offset]\n"
- "ld1r { v4.4s }, [x23]\n"
"srshl v16.4s, v16.4s, v0.4s\n"
+ "add x22, %x[qp], %[c_offset]\n"
+ "ld1r { v4.4s }, [x22]\n"
"srshl v17.4s, v17.4s, v0.4s\n"
+ "add x22, %x[qp], %[minval]\n"
"srshl v18.4s, v18.4s, v0.4s\n"
+ "ld1r { v5.4s }, [x22]\n"
+ "add x22, %x[qp], %[maxval]\n"
"srshl v19.4s, v19.4s, v0.4s\n"
- "add x23, %x[qp], %[maxval]\n"
- "ld1r { v6.4s }, [x23]\n"
+ "ld1r { v6.4s }, [x22]\n"
+ "cmp x9, #0x10\n"
"srshl v20.4s, v20.4s, v0.4s\n"
"srshl v21.4s, v21.4s, v0.4s\n"
- "add x23, %x[qp], %[minval]\n"
- "ld1r { v5.4s }, [x23]\n"
- "srshl v22.4s, v22.4s, v0.4s\n"
- "srshl v23.4s, v23.4s, v0.4s\n"
- "cmp x9, #0x10\n"
"add v16.4s, v16.4s, v4.4s\n"
"add v17.4s, v17.4s, v4.4s\n"
"add v18.4s, v18.4s, v4.4s\n"
- "add v19.4s, v19.4s, v4.4s\n"
- "add v20.4s, v20.4s, v4.4s\n"
- "add v21.4s, v21.4s, v4.4s\n"
- "add v22.4s, v22.4s, v4.4s\n"
- "add v23.4s, v23.4s, v4.4s\n"
"smin v16.4s, v16.4s, v6.4s\n"
"smin v17.4s, v17.4s, v6.4s\n"
"smin v18.4s, v18.4s, v6.4s\n"
- "smin v19.4s, v19.4s, v6.4s\n"
- "smin v20.4s, v20.4s, v6.4s\n"
- "smin v21.4s, v21.4s, v6.4s\n"
- "smin v22.4s, v22.4s, v6.4s\n"
- "smin v23.4s, v23.4s, v6.4s\n"
"smax v16.4s, v16.4s, v5.4s\n"
"smax v17.4s, v17.4s, v5.4s\n"
"smax v18.4s, v18.4s, v5.4s\n"
+ "add v19.4s, v19.4s, v4.4s\n"
+ "add v20.4s, v20.4s, v4.4s\n"
+ "add v21.4s, v21.4s, v4.4s\n"
+ "smin v19.4s, v19.4s, v6.4s\n"
+ "smin v20.4s, v20.4s, v6.4s\n"
+ "smin v21.4s, v21.4s, v6.4s\n"
"smax v19.4s, v19.4s, v5.4s\n"
"smax v20.4s, v20.4s, v5.4s\n"
"smax v21.4s, v21.4s, v5.4s\n"
- "smax v22.4s, v22.4s, v5.4s\n"
- "smax v23.4s, v23.4s, v5.4s\n"
+ "srshl v22.4s, v22.4s, v0.4s\n"
+ "srshl v23.4s, v23.4s, v0.4s\n"
"uzp1 v16.8h, v16.8h, v17.8h\n"
"uzp1 v17.8h, v18.8h, v19.8h\n"
+ "add v22.4s, v22.4s, v4.4s\n"
+ "add v23.4s, v23.4s, v4.4s\n"
"uzp1 v20.8h, v20.8h, v21.8h\n"
- "uzp1 v21.8h, v22.8h, v23.8h\n"
+ "smin v22.4s, v22.4s, v6.4s\n"
+ "smin v23.4s, v23.4s, v6.4s\n"
"uzp1 v16.16b, v16.16b, v17.16b\n"
+ "smax v22.4s, v22.4s, v5.4s\n"
+ "smax v23.4s, v23.4s, v5.4s\n"
+ "uzp1 v21.8h, v22.8h, v23.8h\n"
"uzp1 v20.16b, v20.16b, v21.16b\n"
"bge 59f\n"
"tbz x9, #3, 54f\n"
- "str d16, [x27], #0x8\n"
- "str d20, [x22], #0x8\n"
+ "str d16, [x26], #0x8\n"
+ "str d20, [x21], #0x8\n"
"tbz x9, #2, 52f\n"
- "st1 { v16.s }[2], [x27], #0x4\n"
- "st1 { v20.s }[2], [x22], #0x4\n"
+ "st1 { v16.s }[2], [x26], #0x4\n"
+ "st1 { v20.s }[2], [x21], #0x4\n"
"tbz x9, #1, 51f\n"
- "st1 { v16.h }[6], [x27], #0x2\n"
- "st1 { v20.h }[6], [x22], #0x2\n"
+ "st1 { v16.h }[6], [x26], #0x2\n"
+ "st1 { v20.h }[6], [x21], #0x2\n"
"tbz x9, #0, 58f\n"
- "st1 { v16.b }[14], [x27]\n"
- "st1 { v20.b }[14], [x22]\n"
+ "st1 { v16.b }[14], [x26]\n"
+ "st1 { v20.b }[14], [x21]\n"
"b 58f\n"
"51:" // Height 2: Partial direct writeback: partial_1_12
"tbz x9, #0, 58f\n"
- "st1 { v16.b }[12], [x27]\n"
- "st1 { v20.b }[12], [x22]\n"
+ "st1 { v16.b }[12], [x26]\n"
+ "st1 { v20.b }[12], [x21]\n"
"b 58f\n"
"52:" // Height 2: Partial direct writeback: partial_2_8
"tbz x9, #1, 53f\n"
- "st1 { v16.h }[4], [x27], #0x2\n"
- "st1 { v20.h }[4], [x22], #0x2\n"
+ "st1 { v16.h }[4], [x26], #0x2\n"
+ "st1 { v20.h }[4], [x21], #0x2\n"
"tbz x9, #0, 58f\n"
- "st1 { v16.b }[10], [x27]\n"
- "st1 { v20.b }[10], [x22]\n"
+ "st1 { v16.b }[10], [x26]\n"
+ "st1 { v20.b }[10], [x21]\n"
"b 58f\n"
"53:" // Height 2: Partial direct writeback: partial_1_8
"tbz x9, #0, 58f\n"
- "st1 { v16.b }[8], [x27]\n"
- "st1 { v20.b }[8], [x22]\n"
+ "st1 { v16.b }[8], [x26]\n"
+ "st1 { v20.b }[8], [x21]\n"
"b 58f\n"
"54:" // Height 2: Partial direct writeback: partial_4_0
"tbz x9, #2, 56f\n"
- "str s16, [x27], #0x4\n"
- "str s20, [x22], #0x4\n"
+ "str s16, [x26], #0x4\n"
+ "str s20, [x21], #0x4\n"
"tbz x9, #1, 55f\n"
- "st1 { v16.h }[2], [x27], #0x2\n"
- "st1 { v20.h }[2], [x22], #0x2\n"
+ "st1 { v16.h }[2], [x26], #0x2\n"
+ "st1 { v20.h }[2], [x21], #0x2\n"
"tbz x9, #0, 58f\n"
- "st1 { v16.b }[6], [x27]\n"
- "st1 { v20.b }[6], [x22]\n"
+ "st1 { v16.b }[6], [x26]\n"
+ "st1 { v20.b }[6], [x21]\n"
"b 58f\n"
"55:" // Height 2: Partial direct writeback: partial_1_4
"tbz x9, #0, 58f\n"
- "st1 { v16.b }[4], [x27]\n"
- "st1 { v20.b }[4], [x22]\n"
+ "st1 { v16.b }[4], [x26]\n"
+ "st1 { v20.b }[4], [x21]\n"
"b 58f\n"
"56:" // Height 2: Partial direct writeback: partial_2_0
"tbz x9, #1, 57f\n"
- "str h16, [x27], #0x2\n"
- "str h20, [x22], #0x2\n"
+ "str h16, [x26], #0x2\n"
+ "str h20, [x21], #0x2\n"
"tbz x9, #0, 58f\n"
- "st1 { v16.b }[2], [x27]\n"
- "st1 { v20.b }[2], [x22]\n"
+ "st1 { v16.b }[2], [x26]\n"
+ "st1 { v20.b }[2], [x21]\n"
"b 58f\n"
"57:" // Height 2: Partial direct writeback: partial_1_0
- "str b16, [x27, #0x0]\n"
- "str b20, [x22, #0x0]\n"
+ "str b16, [x26, #0x0]\n"
+ "str b20, [x21, #0x0]\n"
"58:" // Height 2: Partial direct writeback: Done
"b 60f\n"
"59:" // Height 2: Full writeback
- "str q16, [x27, #0x0]\n"
- "add x27, x27, #0x10\n"
- "str q20, [x22, #0x0]\n"
+ "str q16, [x26, #0x0]\n"
+ "add x26, x26, #0x10\n"
+ "str q20, [x21, #0x0]\n"
"60:" // Height 2: Writeback done
"subs x9, x9, #0x10\n"
"bgt 32b\n"
"b 122f\n"
"61:" // Height 3
- "mov x10, %x[col_bias]\n"
"movi v11.4s, #0x0\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x27, %x[col_bias]\n"
"movi v12.4s, #0x0\n"
+ "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"bic %x[flags], %x[flags], #0x80000000\n"
"movi v13.4s, #0x0\n"
+ "mov x26, %x[output_ptr]\n"
"movi v15.16b, #0x1\n"
- "ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x27, %x[output_ptr]\n"
"62:" // Height 3: Column loop
"movi v16.4s, #0x0\n"
"movi v17.4s, #0x0\n"
@@ -815,56 +815,56 @@ void a64_hybrid_u8qa_dot_4x16 (
"movi v26.4s, #0x0\n"
"movi v27.4s, #0x0\n"
"63:" // Height 3: setup done
- "mov x26, #0x0\n"
+ "mov x25, #0x0\n"
"64:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w24, [x20, x25, LSL #0x2]\n"
"tbz %x[flags], #3, 65f\n"
- "ldr x21, [%x[input_ptr], x26, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x24, [x21, #0x0]\n"
- "ldr x23, [x21, #0x8]\n"
- "ldr x22, [x21, #0x10]\n"
- "cbnz x26, 66f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x24, x24, x20\n"
- "add x23, x23, x20\n"
- "add x22, x22, x20\n"
+ "ldr x20, [%x[input_ptr], x25, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x23, [x20, #0x0]\n"
+ "ldr x22, [x20, #0x8]\n"
+ "ldr x21, [x20, #0x10]\n"
+ "cbnz x25, 66f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x23, x23, x19\n"
+ "add x22, x22, x19\n"
+ "add x21, x21, x19\n"
"b 66f\n"
"65:" // Height 3: setup direct input
- "mov x24, %x[input_ptr]\n"
- "add x23, x24, x20\n"
- "add x22, x23, x20\n"
+ "mov x23, %x[input_ptr]\n"
+ "add x22, x23, x19\n"
+ "add x21, x22, x19\n"
"66:" // Height 3: input setup done
- "cmp x25, #0x10\n"
+ "cmp x24, #0x10\n"
"blt 71f\n"
- "ldr q0, [x24, #0x0]\n"
- "ldr q1, [x23, #0x0]\n"
- "cmp x25, #0x20\n"
- "ldr q2, [x22, #0x0]\n"
+ "ldr q0, [x23, #0x0]\n"
+ "ldr q1, [x22, #0x0]\n"
+ "cmp x24, #0x20\n"
+ "ldr q2, [x21, #0x0]\n"
"ldr q4, [x28, #0x0]\n"
- "ldr q5, [x28, #0x10]\n"
- "ldr q6, [x28, #0x20]\n"
- "ldr q7, [x28, #0x30]\n"
- "ldr q8, [x28, #0x40]\n"
- "ldr q9, [x28, #0x50]\n"
- "ldr q10, [x28, #0x60]\n"
"blt 69f\n"
"67:" // Height 3: Multiply loop: Main loop head
".inst 0x6f80e090 // udot v16.4s, v4.16b, v0.4b[0]\n"
- ".inst 0x6f81e094 // udot v20.4s, v4.16b, v1.4b[0]\n"
- "add x24, x24, #0x10\n"
+ "ldr q5, [x28, #0x10]\n"
"add x23, x23, #0x10\n"
+ ".inst 0x6f81e094 // udot v20.4s, v4.16b, v1.4b[0]\n"
+ "ldr q6, [x28, #0x20]\n"
+ "add x22, x22, #0x10\n"
".inst 0x6f82e098 // udot v24.4s, v4.16b, v2.4b[0]\n"
- "ldr q4, [x28, #0x70]\n"
+ "ldr q7, [x28, #0x30]\n"
+ "add x21, x21, #0x10\n"
".inst 0x6f80e0b1 // udot v17.4s, v5.16b, v0.4b[0]\n"
- "add x22, x22, #0x10\n"
+ "ldr q8, [x28, #0x40]\n"
".inst 0x6f81e0b5 // udot v21.4s, v5.16b, v1.4b[0]\n"
+ "ldr q9, [x28, #0x50]\n"
".inst 0x6f82e0b9 // udot v25.4s, v5.16b, v2.4b[0]\n"
- "ldr q5, [x28, #0x80]\n"
+ "ldr q10, [x28, #0x60]\n"
".inst 0x6f80e0d2 // udot v18.4s, v6.16b, v0.4b[0]\n"
+ "ldr q4, [x28, #0x70]\n"
".inst 0x6f81e0d6 // udot v22.4s, v6.16b, v1.4b[0]\n"
+ "ldr q5, [x28, #0x80]\n"
".inst 0x6f82e0da // udot v26.4s, v6.16b, v2.4b[0]\n"
"ldr q6, [x28, #0x90]\n"
".inst 0x6f80e0f3 // udot v19.4s, v7.16b, v0.4b[0]\n"
@@ -891,8 +891,8 @@ void a64_hybrid_u8qa_dot_4x16 (
".inst 0x6f81e8b4 // udot v20.4s, v5.16b, v1.4b[2]\n"
".inst 0x6f82e8b8 // udot v24.4s, v5.16b, v2.4b[2]\n"
"ldr q5, [x28, #0xf0]\n"
- ".inst 0x6f80e8d1 // udot v17.4s, v6.16b, v0.4b[2]\n"
"add x28, x28, #0x100\n"
+ ".inst 0x6f80e8d1 // udot v17.4s, v6.16b, v0.4b[2]\n"
".inst 0x6f81e8d5 // udot v21.4s, v6.16b, v1.4b[2]\n"
".inst 0x6f82e8d9 // udot v25.4s, v6.16b, v2.4b[2]\n"
".inst 0x6f80e8f2 // udot v18.4s, v7.16b, v0.4b[2]\n"
@@ -918,37 +918,37 @@ void a64_hybrid_u8qa_dot_4x16 (
".inst 0x6e8f942c // udot v12.4s, v1.16b, v15.16b\n"
".inst 0x6e8f944d // udot v13.4s, v2.16b, v15.16b\n"
"68:" // Height 3: Multiply loop: unique 9: skip row sum
- "ldr q0, [x24, #0x0]\n"
- "ldr q1, [x23, #0x0]\n"
- "sub x25, x25, #0x10\n"
- "cmp x25, #0x20\n"
- "ldr q2, [x22, #0x0]\n"
- "ldr q4, [x28, #0x0]\n"
- "ldr q5, [x28, #0x10]\n"
- "ldr q6, [x28, #0x20]\n"
- "ldr q7, [x28, #0x30]\n"
- "ldr q8, [x28, #0x40]\n"
- "ldr q9, [x28, #0x50]\n"
- "ldr q10, [x28, #0x60]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
"prfm pldl1keep, [x23, #0x80]\n"
+ "sub x24, x24, #0x10\n"
"prfm pldl1keep, [x22, #0x80]\n"
+ "cmp x24, #0x20\n"
+ "prfm pldl1keep, [x21, #0x80]\n"
+ "ldr q0, [x23, #0x0]\n"
+ "ldr q1, [x22, #0x0]\n"
+ "ldr q2, [x21, #0x0]\n"
+ "ldr q4, [x28, #0x0]\n"
"bge 67b\n"
"69:" // Height 3: Multiply loop: Single iteration only
".inst 0x6f80e090 // udot v16.4s, v4.16b, v0.4b[0]\n"
+ "ldr q5, [x28, #0x10]\n"
+ "sub x24, x24, #0x10\n"
".inst 0x6f81e094 // udot v20.4s, v4.16b, v1.4b[0]\n"
- "sub x25, x25, #0x10\n"
- "add x24, x24, #0x10\n"
+ "ldr q6, [x28, #0x20]\n"
+ "add x23, x23, #0x10\n"
".inst 0x6f82e098 // udot v24.4s, v4.16b, v2.4b[0]\n"
- "ldr q4, [x28, #0x70]\n"
+ "ldr q7, [x28, #0x30]\n"
+ "add x22, x22, #0x10\n"
".inst 0x6f80e0b1 // udot v17.4s, v5.16b, v0.4b[0]\n"
- "add x23, x23, #0x10\n"
+ "ldr q8, [x28, #0x40]\n"
+ "add x21, x21, #0x10\n"
".inst 0x6f81e0b5 // udot v21.4s, v5.16b, v1.4b[0]\n"
+ "ldr q9, [x28, #0x50]\n"
".inst 0x6f82e0b9 // udot v25.4s, v5.16b, v2.4b[0]\n"
- "ldr q5, [x28, #0x80]\n"
- "add x22, x22, #0x10\n"
+ "ldr q10, [x28, #0x60]\n"
".inst 0x6f80e0d2 // udot v18.4s, v6.16b, v0.4b[0]\n"
+ "ldr q4, [x28, #0x70]\n"
".inst 0x6f81e0d6 // udot v22.4s, v6.16b, v1.4b[0]\n"
+ "ldr q5, [x28, #0x80]\n"
".inst 0x6f82e0da // udot v26.4s, v6.16b, v2.4b[0]\n"
"ldr q6, [x28, #0x90]\n"
".inst 0x6f80e0f3 // udot v19.4s, v7.16b, v0.4b[0]\n"
@@ -975,8 +975,8 @@ void a64_hybrid_u8qa_dot_4x16 (
".inst 0x6f81e8b4 // udot v20.4s, v5.16b, v1.4b[2]\n"
".inst 0x6f82e8b8 // udot v24.4s, v5.16b, v2.4b[2]\n"
"ldr q5, [x28, #0xf0]\n"
- ".inst 0x6f80e8d1 // udot v17.4s, v6.16b, v0.4b[2]\n"
"add x28, x28, #0x100\n"
+ ".inst 0x6f80e8d1 // udot v17.4s, v6.16b, v0.4b[2]\n"
".inst 0x6f81e8d5 // udot v21.4s, v6.16b, v1.4b[2]\n"
".inst 0x6f82e8d9 // udot v25.4s, v6.16b, v2.4b[2]\n"
".inst 0x6f80e8f2 // udot v18.4s, v7.16b, v0.4b[2]\n"
@@ -1002,33 +1002,33 @@ void a64_hybrid_u8qa_dot_4x16 (
".inst 0x6e8f942c // udot v12.4s, v1.16b, v15.16b\n"
".inst 0x6e8f944d // udot v13.4s, v2.16b, v15.16b\n"
"70:" // Height 3: Multiply loop: unique 10: skip row sum
- "prfm pldl1keep, [x24, #0x80]\n"
"prfm pldl1keep, [x23, #0x80]\n"
"prfm pldl1keep, [x22, #0x80]\n"
+ "prfm pldl1keep, [x21, #0x80]\n"
"71:" // Height 3: Multiply loop: Main loop skip
- "cbz x25, 78f\n"
- "cmp x25, #0x4\n"
+ "cbz x24, 78f\n"
+ "cmp x24, #0x4\n"
"blt 74f\n"
"72:" // Height 3: Multiply loop: Odd block loop
- "ldr s0, [x24], #0x4\n"
- "ldr s1, [x23], #0x4\n"
- "ldr s2, [x22], #0x4\n"
+ "ldr s0, [x23], #0x4\n"
+ "ldr s1, [x22], #0x4\n"
+ "ldr s2, [x21], #0x4\n"
"tbnz %x[flags], #31, 73f\n"
".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n"
".inst 0x6e8f942c // udot v12.4s, v1.16b, v15.16b\n"
".inst 0x6e8f944d // udot v13.4s, v2.16b, v15.16b\n"
"73:" // Height 3: Multiply loop: unique 11: skip row sum
"ldr q6, [x28, #0x0]\n"
- "ldr q7, [x28, #0x10]\n"
- "sub x25, x25, #0x4\n"
- "cmp x25, #0x4\n"
- "ldr q8, [x28, #0x20]\n"
- "ldr q9, [x28, #0x30]\n"
".inst 0x6f80e0d0 // udot v16.4s, v6.16b, v0.4b[0]\n"
+ "ldr q7, [x28, #0x10]\n"
+ "sub x24, x24, #0x4\n"
".inst 0x6f81e0d4 // udot v20.4s, v6.16b, v1.4b[0]\n"
+ "ldr q8, [x28, #0x20]\n"
+ "cmp x24, #0x4\n"
".inst 0x6f82e0d8 // udot v24.4s, v6.16b, v2.4b[0]\n"
- ".inst 0x6f80e0f1 // udot v17.4s, v7.16b, v0.4b[0]\n"
+ "ldr q9, [x28, #0x30]\n"
"add x28, x28, #0x40\n"
+ ".inst 0x6f80e0f1 // udot v17.4s, v7.16b, v0.4b[0]\n"
".inst 0x6f81e0f5 // udot v21.4s, v7.16b, v1.4b[0]\n"
".inst 0x6f82e0f9 // udot v25.4s, v7.16b, v2.4b[0]\n"
".inst 0x6f80e112 // udot v18.4s, v8.16b, v0.4b[0]\n"
@@ -1038,21 +1038,21 @@ void a64_hybrid_u8qa_dot_4x16 (
".inst 0x6f81e137 // udot v23.4s, v9.16b, v1.4b[0]\n"
".inst 0x6f82e13b // udot v27.4s, v9.16b, v2.4b[0]\n"
"bge 72b\n"
+ "cbz x24, 78f\n"
"74:" // Height 3: Multiply loop: Skip odd blocks
- "cbz x25, 78f\n"
- "tbz x25, #1, 75f\n"
- "ldr h0, [x24], #0x2\n"
- "ldr h1, [x23], #0x2\n"
- "ldr h2, [x22], #0x2\n"
- "tbz x25, #0, 76f\n"
- "ld1 { v0.b }[2], [x24]\n"
- "ld1 { v1.b }[2], [x23]\n"
- "ld1 { v2.b }[2], [x22]\n"
+ "tbz x24, #1, 75f\n"
+ "ldr h0, [x23], #0x2\n"
+ "ldr h1, [x22], #0x2\n"
+ "ldr h2, [x21], #0x2\n"
+ "tbz x24, #0, 76f\n"
+ "ld1 { v0.b }[2], [x23]\n"
+ "ld1 { v1.b }[2], [x22]\n"
+ "ld1 { v2.b }[2], [x21]\n"
"b 76f\n"
"75:" // Height 3: Multiply loop: Ragged operand read: partial_1_0
- "ldr b0, [x24, #0x0]\n"
- "ldr b1, [x23, #0x0]\n"
- "ldr b2, [x22, #0x0]\n"
+ "ldr b0, [x23, #0x0]\n"
+ "ldr b1, [x22, #0x0]\n"
+ "ldr b2, [x21, #0x0]\n"
"76:" // Height 3: Multiply loop: Ragged operand read: Done
"tbnz %x[flags], #31, 77f\n"
".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n"
@@ -1060,16 +1060,16 @@ void a64_hybrid_u8qa_dot_4x16 (
".inst 0x6e8f944d // udot v13.4s, v2.16b, v15.16b\n"
"77:" // Height 3: Multiply loop: unique 12: skip row sum
"ldr q10, [x28, #0x0]\n"
- "ldr q4, [x28, #0x10]\n"
".inst 0x6f80e150 // udot v16.4s, v10.16b, v0.4b[0]\n"
+ "ldr q4, [x28, #0x10]\n"
".inst 0x6f81e154 // udot v20.4s, v10.16b, v1.4b[0]\n"
"ldr q5, [x28, #0x20]\n"
- "ldr q6, [x28, #0x30]\n"
".inst 0x6f82e158 // udot v24.4s, v10.16b, v2.4b[0]\n"
+ "ldr q6, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
".inst 0x6f80e091 // udot v17.4s, v4.16b, v0.4b[0]\n"
".inst 0x6f81e095 // udot v21.4s, v4.16b, v1.4b[0]\n"
".inst 0x6f82e099 // udot v25.4s, v4.16b, v2.4b[0]\n"
- "add x28, x28, #0x40\n"
".inst 0x6f80e0b2 // udot v18.4s, v5.16b, v0.4b[0]\n"
".inst 0x6f81e0b6 // udot v22.4s, v5.16b, v1.4b[0]\n"
".inst 0x6f82e0ba // udot v26.4s, v5.16b, v2.4b[0]\n"
@@ -1077,49 +1077,49 @@ void a64_hybrid_u8qa_dot_4x16 (
".inst 0x6f81e0d7 // udot v23.4s, v6.16b, v1.4b[0]\n"
".inst 0x6f82e0db // udot v27.4s, v6.16b, v2.4b[0]\n"
"78:" // Height 3: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x26, x26, #0x1\n"
- "cmp x26, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x25, x25, #0x1\n"
+ "cmp x25, x19\n"
"bne 64b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x22, x27, x20\n"
- "add x21, x22, x20\n"
- "prfm pstl1keep, [x27, #0x0]\n"
- "prfm pstl1keep, [x22, #0x0]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "prfm pstl1keep, [x26, #0x0]\n"
+ "add x21, x26, x19\n"
"prfm pstl1keep, [x21, #0x0]\n"
+ "add x20, x21, x19\n"
+ "prfm pstl1keep, [x20, #0x0]\n"
"tbnz %x[flags], #31, 79f\n"
- "add x23, %x[qp], %[b_offset]\n"
- "ld1r { v3.4s }, [x23]\n"
"addp v11.4s, v11.4s, v11.4s\n"
+ "add x22, %x[qp], %[b_offset]\n"
+ "ld1r { v3.4s }, [x22]\n"
"addp v12.4s, v12.4s, v12.4s\n"
"addp v13.4s, v13.4s, v13.4s\n"
- "neg v3.4s, v3.4s\n"
"addp v11.4s, v11.4s, v11.4s\n"
+ "neg v3.4s, v3.4s\n"
"addp v12.4s, v12.4s, v12.4s\n"
"addp v13.4s, v13.4s, v13.4s\n"
"mul v11.4s, v11.4s, v3.4s\n"
"mul v12.4s, v12.4s, v3.4s\n"
"mul v13.4s, v13.4s, v3.4s\n"
"79:" // Height 3: skip row sum fixup
- "ldr q0, [x10, #0x0]\n"
- "ldr q1, [x10, #0x10]\n"
"add v16.4s, v16.4s, v11.4s\n"
+ "ldr q0, [x27, #0x0]\n"
+ "orr %x[flags], %x[flags], #0x80000000\n"
"add v17.4s, v17.4s, v11.4s\n"
- "ldr q2, [x10, #0x20]\n"
- "ldr q3, [x10, #0x30]\n"
+ "ldr q1, [x27, #0x10]\n"
+ "add x23, %x[qp], %[per_layer_right_shift]\n"
"add v18.4s, v18.4s, v11.4s\n"
+ "ldr q2, [x27, #0x20]\n"
+ "add x22, %x[qp], %[per_layer_mul]\n"
"add v19.4s, v19.4s, v11.4s\n"
+ "ldr q3, [x27, #0x30]\n"
+ "add x27, x27, #0x40\n"
"add v20.4s, v20.4s, v12.4s\n"
+ "ld1r { v4.4s }, [x22]\n"
"add v21.4s, v21.4s, v12.4s\n"
- "add x23, %x[qp], %[per_layer_mul]\n"
- "ld1r { v4.4s }, [x23]\n"
"add v22.4s, v22.4s, v12.4s\n"
"add v23.4s, v23.4s, v12.4s\n"
- "orr %x[flags], %x[flags], #0x80000000\n"
- "add x23, %x[qp], %[per_layer_right_shift]\n"
"add v24.4s, v24.4s, v13.4s\n"
"add v25.4s, v25.4s, v13.4s\n"
- "add x10, x10, #0x40\n"
"add v26.4s, v26.4s, v13.4s\n"
"add v27.4s, v27.4s, v13.4s\n"
"add v16.4s, v16.4s, v0.4s\n"
@@ -1151,98 +1151,98 @@ void a64_hybrid_u8qa_dot_4x16 (
"and v4.16b, v16.16b, v0.16b\n"
"and v5.16b, v17.16b, v0.16b\n"
"and v6.16b, v18.16b, v0.16b\n"
- "and v7.16b, v19.16b, v0.16b\n"
- "and v8.16b, v20.16b, v0.16b\n"
"sshr v4.4s, v4.4s, #0x1f\n"
"sshr v5.4s, v5.4s, #0x1f\n"
"sshr v6.4s, v6.4s, #0x1f\n"
- "sshr v7.4s, v7.4s, #0x1f\n"
- "sshr v8.4s, v8.4s, #0x1f\n"
"sqadd v16.4s, v16.4s, v4.4s\n"
"sqadd v17.4s, v17.4s, v5.4s\n"
"sqadd v18.4s, v18.4s, v6.4s\n"
+ "and v7.16b, v19.16b, v0.16b\n"
+ "and v8.16b, v20.16b, v0.16b\n"
+ "and v9.16b, v21.16b, v0.16b\n"
+ "sshr v7.4s, v7.4s, #0x1f\n"
+ "sshr v8.4s, v8.4s, #0x1f\n"
+ "sshr v9.4s, v9.4s, #0x1f\n"
"sqadd v19.4s, v19.4s, v7.4s\n"
"sqadd v20.4s, v20.4s, v8.4s\n"
- "and v9.16b, v21.16b, v0.16b\n"
+ "sqadd v21.4s, v21.4s, v9.4s\n"
"and v10.16b, v22.16b, v0.16b\n"
"and v4.16b, v23.16b, v0.16b\n"
"and v5.16b, v24.16b, v0.16b\n"
- "and v6.16b, v25.16b, v0.16b\n"
- "and v7.16b, v26.16b, v0.16b\n"
- "and v8.16b, v27.16b, v0.16b\n"
- "sshr v9.4s, v9.4s, #0x1f\n"
"sshr v10.4s, v10.4s, #0x1f\n"
"sshr v4.4s, v4.4s, #0x1f\n"
"sshr v5.4s, v5.4s, #0x1f\n"
- "sshr v6.4s, v6.4s, #0x1f\n"
- "sshr v7.4s, v7.4s, #0x1f\n"
- "sshr v8.4s, v8.4s, #0x1f\n"
- "sqadd v21.4s, v21.4s, v9.4s\n"
"sqadd v22.4s, v22.4s, v10.4s\n"
"sqadd v23.4s, v23.4s, v4.4s\n"
"sqadd v24.4s, v24.4s, v5.4s\n"
+ "and v6.16b, v25.16b, v0.16b\n"
+ "and v7.16b, v26.16b, v0.16b\n"
+ "and v8.16b, v27.16b, v0.16b\n"
+ "sshr v6.4s, v6.4s, #0x1f\n"
+ "sshr v7.4s, v7.4s, #0x1f\n"
+ "sshr v8.4s, v8.4s, #0x1f\n"
"sqadd v25.4s, v25.4s, v6.4s\n"
"sqadd v26.4s, v26.4s, v7.4s\n"
"sqadd v27.4s, v27.4s, v8.4s\n"
"80:" // Height 3: no shift correction
- "add x23, %x[qp], %[c_offset]\n"
- "ld1r { v4.4s }, [x23]\n"
"srshl v16.4s, v16.4s, v0.4s\n"
+ "add x22, %x[qp], %[c_offset]\n"
+ "ld1r { v4.4s }, [x22]\n"
"srshl v17.4s, v17.4s, v0.4s\n"
+ "add x22, %x[qp], %[minval]\n"
"srshl v18.4s, v18.4s, v0.4s\n"
+ "ld1r { v5.4s }, [x22]\n"
+ "add x22, %x[qp], %[maxval]\n"
"srshl v19.4s, v19.4s, v0.4s\n"
- "add x23, %x[qp], %[maxval]\n"
- "ld1r { v6.4s }, [x23]\n"
+ "ld1r { v6.4s }, [x22]\n"
+ "cmp x9, #0x10\n"
"srshl v20.4s, v20.4s, v0.4s\n"
"srshl v21.4s, v21.4s, v0.4s\n"
- "add x23, %x[qp], %[minval]\n"
- "ld1r { v5.4s }, [x23]\n"
- "srshl v22.4s, v22.4s, v0.4s\n"
- "srshl v23.4s, v23.4s, v0.4s\n"
- "cmp x9, #0x10\n"
- "srshl v24.4s, v24.4s, v0.4s\n"
- "srshl v25.4s, v25.4s, v0.4s\n"
- "srshl v26.4s, v26.4s, v0.4s\n"
- "srshl v27.4s, v27.4s, v0.4s\n"
"add v16.4s, v16.4s, v4.4s\n"
"add v17.4s, v17.4s, v4.4s\n"
"add v18.4s, v18.4s, v4.4s\n"
- "add v19.4s, v19.4s, v4.4s\n"
- "add v20.4s, v20.4s, v4.4s\n"
- "add v21.4s, v21.4s, v4.4s\n"
- "add v22.4s, v22.4s, v4.4s\n"
- "add v23.4s, v23.4s, v4.4s\n"
- "add v24.4s, v24.4s, v4.4s\n"
- "add v25.4s, v25.4s, v4.4s\n"
- "add v26.4s, v26.4s, v4.4s\n"
- "add v27.4s, v27.4s, v4.4s\n"
"smin v16.4s, v16.4s, v6.4s\n"
"smin v17.4s, v17.4s, v6.4s\n"
"smin v18.4s, v18.4s, v6.4s\n"
- "smin v19.4s, v19.4s, v6.4s\n"
- "smin v20.4s, v20.4s, v6.4s\n"
- "smin v21.4s, v21.4s, v6.4s\n"
- "smin v22.4s, v22.4s, v6.4s\n"
- "smin v23.4s, v23.4s, v6.4s\n"
- "smin v24.4s, v24.4s, v6.4s\n"
- "smin v25.4s, v25.4s, v6.4s\n"
- "smin v26.4s, v26.4s, v6.4s\n"
- "smin v27.4s, v27.4s, v6.4s\n"
"smax v16.4s, v16.4s, v5.4s\n"
"smax v17.4s, v17.4s, v5.4s\n"
"smax v18.4s, v18.4s, v5.4s\n"
+ "add v19.4s, v19.4s, v4.4s\n"
+ "add v20.4s, v20.4s, v4.4s\n"
+ "add v21.4s, v21.4s, v4.4s\n"
+ "smin v19.4s, v19.4s, v6.4s\n"
+ "smin v20.4s, v20.4s, v6.4s\n"
+ "smin v21.4s, v21.4s, v6.4s\n"
"smax v19.4s, v19.4s, v5.4s\n"
"smax v20.4s, v20.4s, v5.4s\n"
"smax v21.4s, v21.4s, v5.4s\n"
+ "srshl v22.4s, v22.4s, v0.4s\n"
+ "srshl v23.4s, v23.4s, v0.4s\n"
+ "srshl v24.4s, v24.4s, v0.4s\n"
+ "srshl v25.4s, v25.4s, v0.4s\n"
+ "add v22.4s, v22.4s, v4.4s\n"
+ "add v23.4s, v23.4s, v4.4s\n"
+ "add v24.4s, v24.4s, v4.4s\n"
+ "smin v22.4s, v22.4s, v6.4s\n"
+ "smin v23.4s, v23.4s, v6.4s\n"
+ "smin v24.4s, v24.4s, v6.4s\n"
"smax v22.4s, v22.4s, v5.4s\n"
"smax v23.4s, v23.4s, v5.4s\n"
"smax v24.4s, v24.4s, v5.4s\n"
- "smax v25.4s, v25.4s, v5.4s\n"
- "smax v26.4s, v26.4s, v5.4s\n"
- "smax v27.4s, v27.4s, v5.4s\n"
+ "add v25.4s, v25.4s, v4.4s\n"
+ "srshl v26.4s, v26.4s, v0.4s\n"
+ "srshl v27.4s, v27.4s, v0.4s\n"
+ "smin v25.4s, v25.4s, v6.4s\n"
"uzp1 v16.8h, v16.8h, v17.8h\n"
+ "add v26.4s, v26.4s, v4.4s\n"
+ "smax v25.4s, v25.4s, v5.4s\n"
+ "add v27.4s, v27.4s, v4.4s\n"
+ "smin v26.4s, v26.4s, v6.4s\n"
"uzp1 v17.8h, v18.8h, v19.8h\n"
+ "smin v27.4s, v27.4s, v6.4s\n"
+ "smax v26.4s, v26.4s, v5.4s\n"
"uzp1 v20.8h, v20.8h, v21.8h\n"
+ "smax v27.4s, v27.4s, v5.4s\n"
"uzp1 v21.8h, v22.8h, v23.8h\n"
"uzp1 v24.8h, v24.8h, v25.8h\n"
"uzp1 v25.8h, v26.8h, v27.8h\n"
@@ -1251,103 +1251,103 @@ void a64_hybrid_u8qa_dot_4x16 (
"uzp1 v24.16b, v24.16b, v25.16b\n"
"bge 89f\n"
"tbz x9, #3, 84f\n"
- "str d16, [x27], #0x8\n"
- "str d20, [x22], #0x8\n"
- "str d24, [x21], #0x8\n"
+ "str d16, [x26], #0x8\n"
+ "str d20, [x21], #0x8\n"
+ "str d24, [x20], #0x8\n"
"tbz x9, #2, 82f\n"
- "st1 { v16.s }[2], [x27], #0x4\n"
- "st1 { v20.s }[2], [x22], #0x4\n"
- "st1 { v24.s }[2], [x21], #0x4\n"
+ "st1 { v16.s }[2], [x26], #0x4\n"
+ "st1 { v20.s }[2], [x21], #0x4\n"
+ "st1 { v24.s }[2], [x20], #0x4\n"
"tbz x9, #1, 81f\n"
- "st1 { v16.h }[6], [x27], #0x2\n"
- "st1 { v20.h }[6], [x22], #0x2\n"
- "st1 { v24.h }[6], [x21], #0x2\n"
+ "st1 { v16.h }[6], [x26], #0x2\n"
+ "st1 { v20.h }[6], [x21], #0x2\n"
+ "st1 { v24.h }[6], [x20], #0x2\n"
"tbz x9, #0, 88f\n"
- "st1 { v16.b }[14], [x27]\n"
- "st1 { v20.b }[14], [x22]\n"
- "st1 { v24.b }[14], [x21]\n"
+ "st1 { v16.b }[14], [x26]\n"
+ "st1 { v20.b }[14], [x21]\n"
+ "st1 { v24.b }[14], [x20]\n"
"b 88f\n"
"81:" // Height 3: Partial direct writeback: partial_1_12
"tbz x9, #0, 88f\n"
- "st1 { v16.b }[12], [x27]\n"
- "st1 { v20.b }[12], [x22]\n"
- "st1 { v24.b }[12], [x21]\n"
+ "st1 { v16.b }[12], [x26]\n"
+ "st1 { v20.b }[12], [x21]\n"
+ "st1 { v24.b }[12], [x20]\n"
"b 88f\n"
"82:" // Height 3: Partial direct writeback: partial_2_8
"tbz x9, #1, 83f\n"
- "st1 { v16.h }[4], [x27], #0x2\n"
- "st1 { v20.h }[4], [x22], #0x2\n"
- "st1 { v24.h }[4], [x21], #0x2\n"
+ "st1 { v16.h }[4], [x26], #0x2\n"
+ "st1 { v20.h }[4], [x21], #0x2\n"
+ "st1 { v24.h }[4], [x20], #0x2\n"
"tbz x9, #0, 88f\n"
- "st1 { v16.b }[10], [x27]\n"
- "st1 { v20.b }[10], [x22]\n"
- "st1 { v24.b }[10], [x21]\n"
+ "st1 { v16.b }[10], [x26]\n"
+ "st1 { v20.b }[10], [x21]\n"
+ "st1 { v24.b }[10], [x20]\n"
"b 88f\n"
"83:" // Height 3: Partial direct writeback: partial_1_8
"tbz x9, #0, 88f\n"
- "st1 { v16.b }[8], [x27]\n"
- "st1 { v20.b }[8], [x22]\n"
- "st1 { v24.b }[8], [x21]\n"
+ "st1 { v16.b }[8], [x26]\n"
+ "st1 { v20.b }[8], [x21]\n"
+ "st1 { v24.b }[8], [x20]\n"
"b 88f\n"
"84:" // Height 3: Partial direct writeback: partial_4_0
"tbz x9, #2, 86f\n"
- "str s16, [x27], #0x4\n"
- "str s20, [x22], #0x4\n"
- "str s24, [x21], #0x4\n"
+ "str s16, [x26], #0x4\n"
+ "str s20, [x21], #0x4\n"
+ "str s24, [x20], #0x4\n"
"tbz x9, #1, 85f\n"
- "st1 { v16.h }[2], [x27], #0x2\n"
- "st1 { v20.h }[2], [x22], #0x2\n"
- "st1 { v24.h }[2], [x21], #0x2\n"
+ "st1 { v16.h }[2], [x26], #0x2\n"
+ "st1 { v20.h }[2], [x21], #0x2\n"
+ "st1 { v24.h }[2], [x20], #0x2\n"
"tbz x9, #0, 88f\n"
- "st1 { v16.b }[6], [x27]\n"
- "st1 { v20.b }[6], [x22]\n"
- "st1 { v24.b }[6], [x21]\n"
+ "st1 { v16.b }[6], [x26]\n"
+ "st1 { v20.b }[6], [x21]\n"
+ "st1 { v24.b }[6], [x20]\n"
"b 88f\n"
"85:" // Height 3: Partial direct writeback: partial_1_4
"tbz x9, #0, 88f\n"
- "st1 { v16.b }[4], [x27]\n"
- "st1 { v20.b }[4], [x22]\n"
- "st1 { v24.b }[4], [x21]\n"
+ "st1 { v16.b }[4], [x26]\n"
+ "st1 { v20.b }[4], [x21]\n"
+ "st1 { v24.b }[4], [x20]\n"
"b 88f\n"
"86:" // Height 3: Partial direct writeback: partial_2_0
"tbz x9, #1, 87f\n"
- "str h16, [x27], #0x2\n"
- "str h20, [x22], #0x2\n"
- "str h24, [x21], #0x2\n"
+ "str h16, [x26], #0x2\n"
+ "str h20, [x21], #0x2\n"
+ "str h24, [x20], #0x2\n"
"tbz x9, #0, 88f\n"
- "st1 { v16.b }[2], [x27]\n"
- "st1 { v20.b }[2], [x22]\n"
- "st1 { v24.b }[2], [x21]\n"
+ "st1 { v16.b }[2], [x26]\n"
+ "st1 { v20.b }[2], [x21]\n"
+ "st1 { v24.b }[2], [x20]\n"
"b 88f\n"
"87:" // Height 3: Partial direct writeback: partial_1_0
- "str b16, [x27, #0x0]\n"
- "str b20, [x22, #0x0]\n"
- "str b24, [x21, #0x0]\n"
+ "str b16, [x26, #0x0]\n"
+ "str b20, [x21, #0x0]\n"
+ "str b24, [x20, #0x0]\n"
"88:" // Height 3: Partial direct writeback: Done
"b 90f\n"
"89:" // Height 3: Full writeback
- "str q16, [x27, #0x0]\n"
- "add x27, x27, #0x10\n"
- "str q20, [x22, #0x0]\n"
- "str q24, [x21, #0x0]\n"
+ "str q16, [x26, #0x0]\n"
+ "add x26, x26, #0x10\n"
+ "str q20, [x21, #0x0]\n"
+ "str q24, [x20, #0x0]\n"
"90:" // Height 3: Writeback done
"subs x9, x9, #0x10\n"
"bgt 62b\n"
"b 122f\n"
"91:" // Height 4
- "ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "mov x20, #0x4\n"
- "mov x10, %x[col_bias]\n"
"movi v11.4s, #0x0\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x27, %x[col_bias]\n"
"movi v12.4s, #0x0\n"
- "movi v13.4s, #0x0\n"
+ "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"bic %x[flags], %x[flags], #0x80000000\n"
- "ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
+ "movi v13.4s, #0x0\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "mov x26, %x[output_ptr]\n"
"movi v14.4s, #0x0\n"
+ "mov x19, #0x4\n"
"movi v15.16b, #0x1\n"
- "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x27, %x[output_ptr]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
+ "madd %x[output_ptr], x20, x19, %x[output_ptr]\n"
"92:" // Height 4: Column loop
"movi v16.4s, #0x0\n"
"movi v17.4s, #0x0\n"
@@ -1366,59 +1366,59 @@ void a64_hybrid_u8qa_dot_4x16 (
"movi v30.4s, #0x0\n"
"movi v31.4s, #0x0\n"
"93:" // Height 4: setup done
- "mov x26, #0x0\n"
+ "mov x25, #0x0\n"
"94:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w24, [x20, x25, LSL #0x2]\n"
"tbz %x[flags], #3, 95f\n"
- "ldr x21, [%x[input_ptr], x26, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x24, [x21, #0x0]\n"
- "ldr x23, [x21, #0x8]\n"
- "ldr x22, [x21, #0x10]\n"
- "ldr x21, [x21, #0x18]\n"
- "cbnz x26, 96f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x24, x24, x20\n"
- "add x23, x23, x20\n"
- "add x22, x22, x20\n"
- "add x21, x21, x20\n"
+ "ldr x20, [%x[input_ptr], x25, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x23, [x20, #0x0]\n"
+ "ldr x22, [x20, #0x8]\n"
+ "ldr x21, [x20, #0x10]\n"
+ "ldr x20, [x20, #0x18]\n"
+ "cbnz x25, 96f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x23, x23, x19\n"
+ "add x22, x22, x19\n"
+ "add x21, x21, x19\n"
+ "add x20, x20, x19\n"
"b 96f\n"
"95:" // Height 4: setup direct input
- "mov x24, %x[input_ptr]\n"
- "add x23, x24, x20\n"
- "add x22, x23, x20\n"
- "add x21, x22, x20\n"
+ "mov x23, %x[input_ptr]\n"
+ "add x22, x23, x19\n"
+ "add x21, x22, x19\n"
+ "add x20, x21, x19\n"
"96:" // Height 4: input setup done
- "cmp x25, #0x10\n"
+ "cmp x24, #0x10\n"
"blt 101f\n"
- "ldr q0, [x24, #0x0]\n"
- "ldr q1, [x23, #0x0]\n"
- "cmp x25, #0x20\n"
- "ldr q2, [x22, #0x0]\n"
- "ldr q3, [x21, #0x0]\n"
+ "ldr q0, [x23, #0x0]\n"
+ "ldr q1, [x22, #0x0]\n"
+ "cmp x24, #0x20\n"
+ "ldr q2, [x21, #0x0]\n"
+ "ldr q3, [x20, #0x0]\n"
"ldr q4, [x28, #0x0]\n"
- "ldr q5, [x28, #0x10]\n"
- "ldr q6, [x28, #0x20]\n"
- "ldr q7, [x28, #0x30]\n"
- "ldr q8, [x28, #0x40]\n"
- "ldr q9, [x28, #0x50]\n"
- "ldr q10, [x28, #0x60]\n"
"blt 99f\n"
"97:" // Height 4: Multiply loop: Main loop head
".inst 0x6f80e090 // udot v16.4s, v4.16b, v0.4b[0]\n"
- ".inst 0x6f81e094 // udot v20.4s, v4.16b, v1.4b[0]\n"
- "add x24, x24, #0x10\n"
+ "ldr q5, [x28, #0x10]\n"
"add x23, x23, #0x10\n"
+ ".inst 0x6f81e094 // udot v20.4s, v4.16b, v1.4b[0]\n"
+ "ldr q6, [x28, #0x20]\n"
+ "add x22, x22, #0x10\n"
".inst 0x6f82e098 // udot v24.4s, v4.16b, v2.4b[0]\n"
+ "ldr q7, [x28, #0x30]\n"
+ "add x21, x21, #0x10\n"
".inst 0x6f83e09c // udot v28.4s, v4.16b, v3.4b[0]\n"
- "ldr q4, [x28, #0x70]\n"
- "add x22, x22, #0x10\n"
+ "ldr q8, [x28, #0x40]\n"
+ "add x20, x20, #0x10\n"
".inst 0x6f80e0b1 // udot v17.4s, v5.16b, v0.4b[0]\n"
+ "ldr q9, [x28, #0x50]\n"
".inst 0x6f81e0b5 // udot v21.4s, v5.16b, v1.4b[0]\n"
- "add x21, x21, #0x10\n"
+ "ldr q10, [x28, #0x60]\n"
".inst 0x6f82e0b9 // udot v25.4s, v5.16b, v2.4b[0]\n"
+ "ldr q4, [x28, #0x70]\n"
".inst 0x6f83e0bd // udot v29.4s, v5.16b, v3.4b[0]\n"
"ldr q5, [x28, #0x80]\n"
".inst 0x6f80e0d2 // udot v18.4s, v6.16b, v0.4b[0]\n"
@@ -1491,38 +1491,38 @@ void a64_hybrid_u8qa_dot_4x16 (
".inst 0x6e8f944d // udot v13.4s, v2.16b, v15.16b\n"
".inst 0x6e8f946e // udot v14.4s, v3.16b, v15.16b\n"
"98:" // Height 4: Multiply loop: unique 13: skip row sum
- "ldr q0, [x24, #0x0]\n"
- "ldr q1, [x23, #0x0]\n"
- "sub x25, x25, #0x10\n"
- "cmp x25, #0x20\n"
- "ldr q2, [x22, #0x0]\n"
- "ldr q3, [x21, #0x0]\n"
- "ldr q4, [x28, #0x0]\n"
- "ldr q5, [x28, #0x10]\n"
- "ldr q6, [x28, #0x20]\n"
- "ldr q7, [x28, #0x30]\n"
- "ldr q8, [x28, #0x40]\n"
- "ldr q9, [x28, #0x50]\n"
- "ldr q10, [x28, #0x60]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
"prfm pldl1keep, [x23, #0x80]\n"
+ "sub x24, x24, #0x10\n"
"prfm pldl1keep, [x22, #0x80]\n"
+ "cmp x24, #0x20\n"
"prfm pldl1keep, [x21, #0x80]\n"
+ "prfm pldl1keep, [x20, #0x80]\n"
+ "ldr q0, [x23, #0x0]\n"
+ "ldr q1, [x22, #0x0]\n"
+ "ldr q2, [x21, #0x0]\n"
+ "ldr q3, [x20, #0x0]\n"
+ "ldr q4, [x28, #0x0]\n"
"bge 97b\n"
"99:" // Height 4: Multiply loop: Single iteration only
".inst 0x6f80e090 // udot v16.4s, v4.16b, v0.4b[0]\n"
+ "ldr q5, [x28, #0x10]\n"
+ "sub x24, x24, #0x10\n"
".inst 0x6f81e094 // udot v20.4s, v4.16b, v1.4b[0]\n"
- "sub x25, x25, #0x10\n"
- "add x24, x24, #0x10\n"
+ "ldr q6, [x28, #0x20]\n"
+ "add x23, x23, #0x10\n"
".inst 0x6f82e098 // udot v24.4s, v4.16b, v2.4b[0]\n"
+ "ldr q7, [x28, #0x30]\n"
+ "add x22, x22, #0x10\n"
".inst 0x6f83e09c // udot v28.4s, v4.16b, v3.4b[0]\n"
- "ldr q4, [x28, #0x70]\n"
- "add x23, x23, #0x10\n"
+ "ldr q8, [x28, #0x40]\n"
+ "add x21, x21, #0x10\n"
".inst 0x6f80e0b1 // udot v17.4s, v5.16b, v0.4b[0]\n"
+ "ldr q9, [x28, #0x50]\n"
+ "add x20, x20, #0x10\n"
".inst 0x6f81e0b5 // udot v21.4s, v5.16b, v1.4b[0]\n"
- "add x22, x22, #0x10\n"
- "add x21, x21, #0x10\n"
+ "ldr q10, [x28, #0x60]\n"
".inst 0x6f82e0b9 // udot v25.4s, v5.16b, v2.4b[0]\n"
+ "ldr q4, [x28, #0x70]\n"
".inst 0x6f83e0bd // udot v29.4s, v5.16b, v3.4b[0]\n"
"ldr q5, [x28, #0x80]\n"
".inst 0x6f80e0d2 // udot v18.4s, v6.16b, v0.4b[0]\n"
@@ -1595,19 +1595,19 @@ void a64_hybrid_u8qa_dot_4x16 (
".inst 0x6e8f944d // udot v13.4s, v2.16b, v15.16b\n"
".inst 0x6e8f946e // udot v14.4s, v3.16b, v15.16b\n"
"100:" // Height 4: Multiply loop: unique 14: skip row sum
- "prfm pldl1keep, [x24, #0x80]\n"
"prfm pldl1keep, [x23, #0x80]\n"
"prfm pldl1keep, [x22, #0x80]\n"
"prfm pldl1keep, [x21, #0x80]\n"
+ "prfm pldl1keep, [x20, #0x80]\n"
"101:" // Height 4: Multiply loop: Main loop skip
- "cbz x25, 108f\n"
- "cmp x25, #0x4\n"
+ "cbz x24, 108f\n"
+ "cmp x24, #0x4\n"
"blt 104f\n"
"102:" // Height 4: Multiply loop: Odd block loop
- "ldr s0, [x24], #0x4\n"
- "ldr s1, [x23], #0x4\n"
- "ldr s2, [x22], #0x4\n"
- "ldr s3, [x21], #0x4\n"
+ "ldr s0, [x23], #0x4\n"
+ "ldr s1, [x22], #0x4\n"
+ "ldr s2, [x21], #0x4\n"
+ "ldr s3, [x20], #0x4\n"
"tbnz %x[flags], #31, 103f\n"
".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n"
".inst 0x6e8f942c // udot v12.4s, v1.16b, v15.16b\n"
@@ -1615,16 +1615,16 @@ void a64_hybrid_u8qa_dot_4x16 (
".inst 0x6e8f946e // udot v14.4s, v3.16b, v15.16b\n"
"103:" // Height 4: Multiply loop: unique 15: skip row sum
"ldr q6, [x28, #0x0]\n"
- "ldr q7, [x28, #0x10]\n"
- "sub x25, x25, #0x4\n"
- "cmp x25, #0x4\n"
- "ldr q8, [x28, #0x20]\n"
- "ldr q9, [x28, #0x30]\n"
".inst 0x6f80e0d0 // udot v16.4s, v6.16b, v0.4b[0]\n"
+ "ldr q7, [x28, #0x10]\n"
+ "sub x24, x24, #0x4\n"
".inst 0x6f81e0d4 // udot v20.4s, v6.16b, v1.4b[0]\n"
+ "ldr q8, [x28, #0x20]\n"
+ "cmp x24, #0x4\n"
".inst 0x6f82e0d8 // udot v24.4s, v6.16b, v2.4b[0]\n"
- ".inst 0x6f83e0dc // udot v28.4s, v6.16b, v3.4b[0]\n"
+ "ldr q9, [x28, #0x30]\n"
"add x28, x28, #0x40\n"
+ ".inst 0x6f83e0dc // udot v28.4s, v6.16b, v3.4b[0]\n"
".inst 0x6f80e0f1 // udot v17.4s, v7.16b, v0.4b[0]\n"
".inst 0x6f81e0f5 // udot v21.4s, v7.16b, v1.4b[0]\n"
".inst 0x6f82e0f9 // udot v25.4s, v7.16b, v2.4b[0]\n"
@@ -1638,24 +1638,24 @@ void a64_hybrid_u8qa_dot_4x16 (
".inst 0x6f82e13b // udot v27.4s, v9.16b, v2.4b[0]\n"
".inst 0x6f83e13f // udot v31.4s, v9.16b, v3.4b[0]\n"
"bge 102b\n"
+ "cbz x24, 108f\n"
"104:" // Height 4: Multiply loop: Skip odd blocks
- "cbz x25, 108f\n"
- "tbz x25, #1, 105f\n"
- "ldr h0, [x24], #0x2\n"
- "ldr h1, [x23], #0x2\n"
- "ldr h2, [x22], #0x2\n"
- "ldr h3, [x21], #0x2\n"
- "tbz x25, #0, 106f\n"
- "ld1 { v0.b }[2], [x24]\n"
- "ld1 { v1.b }[2], [x23]\n"
- "ld1 { v2.b }[2], [x22]\n"
- "ld1 { v3.b }[2], [x21]\n"
+ "tbz x24, #1, 105f\n"
+ "ldr h0, [x23], #0x2\n"
+ "ldr h1, [x22], #0x2\n"
+ "ldr h2, [x21], #0x2\n"
+ "ldr h3, [x20], #0x2\n"
+ "tbz x24, #0, 106f\n"
+ "ld1 { v0.b }[2], [x23]\n"
+ "ld1 { v1.b }[2], [x22]\n"
+ "ld1 { v2.b }[2], [x21]\n"
+ "ld1 { v3.b }[2], [x20]\n"
"b 106f\n"
"105:" // Height 4: Multiply loop: Ragged operand read: partial_1_0
- "ldr b0, [x24, #0x0]\n"
- "ldr b1, [x23, #0x0]\n"
- "ldr b2, [x22, #0x0]\n"
- "ldr b3, [x21, #0x0]\n"
+ "ldr b0, [x23, #0x0]\n"
+ "ldr b1, [x22, #0x0]\n"
+ "ldr b2, [x21, #0x0]\n"
+ "ldr b3, [x20, #0x0]\n"
"106:" // Height 4: Multiply loop: Ragged operand read: Done
"tbnz %x[flags], #31, 107f\n"
".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n"
@@ -1664,16 +1664,16 @@ void a64_hybrid_u8qa_dot_4x16 (
".inst 0x6e8f946e // udot v14.4s, v3.16b, v15.16b\n"
"107:" // Height 4: Multiply loop: unique 16: skip row sum
"ldr q10, [x28, #0x0]\n"
- "ldr q4, [x28, #0x10]\n"
".inst 0x6f80e150 // udot v16.4s, v10.16b, v0.4b[0]\n"
+ "ldr q4, [x28, #0x10]\n"
".inst 0x6f81e154 // udot v20.4s, v10.16b, v1.4b[0]\n"
"ldr q5, [x28, #0x20]\n"
- "ldr q6, [x28, #0x30]\n"
".inst 0x6f82e158 // udot v24.4s, v10.16b, v2.4b[0]\n"
+ "ldr q6, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
".inst 0x6f83e15c // udot v28.4s, v10.16b, v3.4b[0]\n"
".inst 0x6f80e091 // udot v17.4s, v4.16b, v0.4b[0]\n"
".inst 0x6f81e095 // udot v21.4s, v4.16b, v1.4b[0]\n"
- "add x28, x28, #0x40\n"
".inst 0x6f82e099 // udot v25.4s, v4.16b, v2.4b[0]\n"
".inst 0x6f83e09d // udot v29.4s, v4.16b, v3.4b[0]\n"
".inst 0x6f80e0b2 // udot v18.4s, v5.16b, v0.4b[0]\n"
@@ -1685,27 +1685,27 @@ void a64_hybrid_u8qa_dot_4x16 (
".inst 0x6f82e0db // udot v27.4s, v6.16b, v2.4b[0]\n"
".inst 0x6f83e0df // udot v31.4s, v6.16b, v3.4b[0]\n"
"108:" // Height 4: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x26, x26, #0x1\n"
- "cmp x26, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x25, x25, #0x1\n"
+ "cmp x25, x19\n"
"bne 94b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x22, x27, x20\n"
- "add x21, x22, x20\n"
- "prfm pstl1keep, [x27, #0x0]\n"
- "add x20, x21, x20\n"
- "prfm pstl1keep, [x22, #0x0]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "prfm pstl1keep, [x26, #0x0]\n"
+ "add x21, x26, x19\n"
"prfm pstl1keep, [x21, #0x0]\n"
+ "add x20, x21, x19\n"
"prfm pstl1keep, [x20, #0x0]\n"
+ "add x19, x20, x19\n"
+ "prfm pstl1keep, [x19, #0x0]\n"
"tbnz %x[flags], #31, 109f\n"
- "add x23, %x[qp], %[b_offset]\n"
- "ld1r { v4.4s }, [x23]\n"
"addp v11.4s, v11.4s, v11.4s\n"
+ "add x22, %x[qp], %[b_offset]\n"
+ "ld1r { v4.4s }, [x22]\n"
"addp v12.4s, v12.4s, v12.4s\n"
"addp v13.4s, v13.4s, v13.4s\n"
"addp v14.4s, v14.4s, v14.4s\n"
- "neg v4.4s, v4.4s\n"
"addp v11.4s, v11.4s, v11.4s\n"
+ "neg v4.4s, v4.4s\n"
"addp v12.4s, v12.4s, v12.4s\n"
"addp v13.4s, v13.4s, v13.4s\n"
"addp v14.4s, v14.4s, v14.4s\n"
@@ -1714,25 +1714,25 @@ void a64_hybrid_u8qa_dot_4x16 (
"mul v13.4s, v13.4s, v4.4s\n"
"mul v14.4s, v14.4s, v4.4s\n"
"109:" // Height 4: skip row sum fixup
- "ldr q0, [x10, #0x0]\n"
- "ldr q1, [x10, #0x10]\n"
"add v16.4s, v16.4s, v11.4s\n"
+ "ldr q0, [x27, #0x0]\n"
+ "orr %x[flags], %x[flags], #0x80000000\n"
"add v17.4s, v17.4s, v11.4s\n"
- "ldr q2, [x10, #0x20]\n"
- "ldr q3, [x10, #0x30]\n"
+ "ldr q1, [x27, #0x10]\n"
+ "add x23, %x[qp], %[per_layer_right_shift]\n"
"add v18.4s, v18.4s, v11.4s\n"
+ "ldr q2, [x27, #0x20]\n"
+ "add x22, %x[qp], %[per_layer_mul]\n"
"add v19.4s, v19.4s, v11.4s\n"
+ "ldr q3, [x27, #0x30]\n"
+ "add x27, x27, #0x40\n"
"add v20.4s, v20.4s, v12.4s\n"
+ "ld1r { v4.4s }, [x22]\n"
"add v21.4s, v21.4s, v12.4s\n"
- "add x23, %x[qp], %[per_layer_mul]\n"
- "ld1r { v4.4s }, [x23]\n"
"add v22.4s, v22.4s, v12.4s\n"
"add v23.4s, v23.4s, v12.4s\n"
- "orr %x[flags], %x[flags], #0x80000000\n"
- "add x23, %x[qp], %[per_layer_right_shift]\n"
"add v24.4s, v24.4s, v13.4s\n"
"add v25.4s, v25.4s, v13.4s\n"
- "add x10, x10, #0x40\n"
"add v26.4s, v26.4s, v13.4s\n"
"add v27.4s, v27.4s, v13.4s\n"
"add v28.4s, v28.4s, v14.4s\n"
@@ -1775,126 +1775,126 @@ void a64_hybrid_u8qa_dot_4x16 (
"tbz %x[flags], #5, 110f\n"
"and v4.16b, v16.16b, v0.16b\n"
"and v5.16b, v17.16b, v0.16b\n"
+ "and v6.16b, v18.16b, v0.16b\n"
"sshr v4.4s, v4.4s, #0x1f\n"
"sshr v5.4s, v5.4s, #0x1f\n"
+ "sshr v6.4s, v6.4s, #0x1f\n"
"sqadd v16.4s, v16.4s, v4.4s\n"
"sqadd v17.4s, v17.4s, v5.4s\n"
- "and v6.16b, v18.16b, v0.16b\n"
+ "sqadd v18.4s, v18.4s, v6.4s\n"
"and v7.16b, v19.16b, v0.16b\n"
"and v8.16b, v20.16b, v0.16b\n"
"and v9.16b, v21.16b, v0.16b\n"
- "and v10.16b, v22.16b, v0.16b\n"
- "and v4.16b, v23.16b, v0.16b\n"
- "and v5.16b, v24.16b, v0.16b\n"
- "sshr v6.4s, v6.4s, #0x1f\n"
"sshr v7.4s, v7.4s, #0x1f\n"
"sshr v8.4s, v8.4s, #0x1f\n"
"sshr v9.4s, v9.4s, #0x1f\n"
- "sshr v10.4s, v10.4s, #0x1f\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
- "sshr v5.4s, v5.4s, #0x1f\n"
- "sqadd v18.4s, v18.4s, v6.4s\n"
"sqadd v19.4s, v19.4s, v7.4s\n"
"sqadd v20.4s, v20.4s, v8.4s\n"
"sqadd v21.4s, v21.4s, v9.4s\n"
+ "and v10.16b, v22.16b, v0.16b\n"
+ "and v4.16b, v23.16b, v0.16b\n"
+ "and v5.16b, v24.16b, v0.16b\n"
+ "sshr v10.4s, v10.4s, #0x1f\n"
+ "sshr v4.4s, v4.4s, #0x1f\n"
+ "sshr v5.4s, v5.4s, #0x1f\n"
"sqadd v22.4s, v22.4s, v10.4s\n"
"sqadd v23.4s, v23.4s, v4.4s\n"
"sqadd v24.4s, v24.4s, v5.4s\n"
"and v6.16b, v25.16b, v0.16b\n"
"and v7.16b, v26.16b, v0.16b\n"
"and v8.16b, v27.16b, v0.16b\n"
- "and v9.16b, v28.16b, v0.16b\n"
- "and v10.16b, v29.16b, v0.16b\n"
- "and v4.16b, v30.16b, v0.16b\n"
- "and v5.16b, v31.16b, v0.16b\n"
"sshr v6.4s, v6.4s, #0x1f\n"
"sshr v7.4s, v7.4s, #0x1f\n"
"sshr v8.4s, v8.4s, #0x1f\n"
- "sshr v9.4s, v9.4s, #0x1f\n"
- "sshr v10.4s, v10.4s, #0x1f\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
- "sshr v5.4s, v5.4s, #0x1f\n"
"sqadd v25.4s, v25.4s, v6.4s\n"
"sqadd v26.4s, v26.4s, v7.4s\n"
"sqadd v27.4s, v27.4s, v8.4s\n"
+ "and v9.16b, v28.16b, v0.16b\n"
+ "and v10.16b, v29.16b, v0.16b\n"
+ "and v4.16b, v30.16b, v0.16b\n"
+ "sshr v9.4s, v9.4s, #0x1f\n"
+ "sshr v10.4s, v10.4s, #0x1f\n"
+ "sshr v4.4s, v4.4s, #0x1f\n"
"sqadd v28.4s, v28.4s, v9.4s\n"
"sqadd v29.4s, v29.4s, v10.4s\n"
"sqadd v30.4s, v30.4s, v4.4s\n"
+ "and v5.16b, v31.16b, v0.16b\n"
+ "sshr v5.4s, v5.4s, #0x1f\n"
"sqadd v31.4s, v31.4s, v5.4s\n"
"110:" // Height 4: no shift correction
- "add x23, %x[qp], %[c_offset]\n"
- "ld1r { v4.4s }, [x23]\n"
"srshl v16.4s, v16.4s, v0.4s\n"
+ "add x22, %x[qp], %[c_offset]\n"
+ "ld1r { v4.4s }, [x22]\n"
"srshl v17.4s, v17.4s, v0.4s\n"
+ "add x22, %x[qp], %[minval]\n"
"srshl v18.4s, v18.4s, v0.4s\n"
+ "ld1r { v5.4s }, [x22]\n"
+ "add x22, %x[qp], %[maxval]\n"
"srshl v19.4s, v19.4s, v0.4s\n"
- "add x23, %x[qp], %[maxval]\n"
- "ld1r { v6.4s }, [x23]\n"
+ "ld1r { v6.4s }, [x22]\n"
+ "cmp x9, #0x10\n"
"srshl v20.4s, v20.4s, v0.4s\n"
"srshl v21.4s, v21.4s, v0.4s\n"
- "add x23, %x[qp], %[minval]\n"
- "ld1r { v5.4s }, [x23]\n"
- "srshl v22.4s, v22.4s, v0.4s\n"
- "srshl v23.4s, v23.4s, v0.4s\n"
- "cmp x9, #0x10\n"
- "srshl v24.4s, v24.4s, v0.4s\n"
- "srshl v25.4s, v25.4s, v0.4s\n"
- "srshl v26.4s, v26.4s, v0.4s\n"
- "srshl v27.4s, v27.4s, v0.4s\n"
- "srshl v28.4s, v28.4s, v0.4s\n"
- "srshl v29.4s, v29.4s, v0.4s\n"
- "srshl v30.4s, v30.4s, v0.4s\n"
- "srshl v31.4s, v31.4s, v0.4s\n"
"add v16.4s, v16.4s, v4.4s\n"
"add v17.4s, v17.4s, v4.4s\n"
"add v18.4s, v18.4s, v4.4s\n"
- "add v19.4s, v19.4s, v4.4s\n"
- "add v20.4s, v20.4s, v4.4s\n"
- "add v21.4s, v21.4s, v4.4s\n"
- "add v22.4s, v22.4s, v4.4s\n"
- "add v23.4s, v23.4s, v4.4s\n"
- "add v24.4s, v24.4s, v4.4s\n"
- "add v25.4s, v25.4s, v4.4s\n"
- "add v26.4s, v26.4s, v4.4s\n"
- "add v27.4s, v27.4s, v4.4s\n"
- "add v28.4s, v28.4s, v4.4s\n"
- "add v29.4s, v29.4s, v4.4s\n"
- "add v30.4s, v30.4s, v4.4s\n"
- "add v31.4s, v31.4s, v4.4s\n"
"smin v16.4s, v16.4s, v6.4s\n"
"smin v17.4s, v17.4s, v6.4s\n"
"smin v18.4s, v18.4s, v6.4s\n"
- "smin v19.4s, v19.4s, v6.4s\n"
- "smin v20.4s, v20.4s, v6.4s\n"
- "smin v21.4s, v21.4s, v6.4s\n"
- "smin v22.4s, v22.4s, v6.4s\n"
- "smin v23.4s, v23.4s, v6.4s\n"
- "smin v24.4s, v24.4s, v6.4s\n"
- "smin v25.4s, v25.4s, v6.4s\n"
- "smin v26.4s, v26.4s, v6.4s\n"
- "smin v27.4s, v27.4s, v6.4s\n"
- "smin v28.4s, v28.4s, v6.4s\n"
- "smin v29.4s, v29.4s, v6.4s\n"
- "smin v30.4s, v30.4s, v6.4s\n"
- "smin v31.4s, v31.4s, v6.4s\n"
"smax v16.4s, v16.4s, v5.4s\n"
"smax v17.4s, v17.4s, v5.4s\n"
"smax v18.4s, v18.4s, v5.4s\n"
+ "add v19.4s, v19.4s, v4.4s\n"
+ "add v20.4s, v20.4s, v4.4s\n"
+ "add v21.4s, v21.4s, v4.4s\n"
+ "smin v19.4s, v19.4s, v6.4s\n"
+ "smin v20.4s, v20.4s, v6.4s\n"
+ "smin v21.4s, v21.4s, v6.4s\n"
"smax v19.4s, v19.4s, v5.4s\n"
"smax v20.4s, v20.4s, v5.4s\n"
"smax v21.4s, v21.4s, v5.4s\n"
+ "srshl v22.4s, v22.4s, v0.4s\n"
+ "srshl v23.4s, v23.4s, v0.4s\n"
+ "srshl v24.4s, v24.4s, v0.4s\n"
+ "srshl v25.4s, v25.4s, v0.4s\n"
+ "add v22.4s, v22.4s, v4.4s\n"
+ "add v23.4s, v23.4s, v4.4s\n"
+ "add v24.4s, v24.4s, v4.4s\n"
+ "smin v22.4s, v22.4s, v6.4s\n"
+ "smin v23.4s, v23.4s, v6.4s\n"
+ "smin v24.4s, v24.4s, v6.4s\n"
"smax v22.4s, v22.4s, v5.4s\n"
"smax v23.4s, v23.4s, v5.4s\n"
"smax v24.4s, v24.4s, v5.4s\n"
+ "add v25.4s, v25.4s, v4.4s\n"
+ "srshl v26.4s, v26.4s, v0.4s\n"
+ "srshl v27.4s, v27.4s, v0.4s\n"
+ "smin v25.4s, v25.4s, v6.4s\n"
+ "srshl v28.4s, v28.4s, v0.4s\n"
+ "add v26.4s, v26.4s, v4.4s\n"
"smax v25.4s, v25.4s, v5.4s\n"
+ "add v27.4s, v27.4s, v4.4s\n"
+ "smin v26.4s, v26.4s, v6.4s\n"
+ "add v28.4s, v28.4s, v4.4s\n"
+ "smin v27.4s, v27.4s, v6.4s\n"
"smax v26.4s, v26.4s, v5.4s\n"
+ "smin v28.4s, v28.4s, v6.4s\n"
"smax v27.4s, v27.4s, v5.4s\n"
+ "srshl v29.4s, v29.4s, v0.4s\n"
"smax v28.4s, v28.4s, v5.4s\n"
+ "srshl v30.4s, v30.4s, v0.4s\n"
+ "srshl v31.4s, v31.4s, v0.4s\n"
+ "add v29.4s, v29.4s, v4.4s\n"
+ "uzp1 v16.8h, v16.8h, v17.8h\n"
+ "add v30.4s, v30.4s, v4.4s\n"
+ "smin v29.4s, v29.4s, v6.4s\n"
+ "add v31.4s, v31.4s, v4.4s\n"
+ "smin v30.4s, v30.4s, v6.4s\n"
"smax v29.4s, v29.4s, v5.4s\n"
+ "smin v31.4s, v31.4s, v6.4s\n"
"smax v30.4s, v30.4s, v5.4s\n"
- "smax v31.4s, v31.4s, v5.4s\n"
- "uzp1 v16.8h, v16.8h, v17.8h\n"
"uzp1 v17.8h, v18.8h, v19.8h\n"
+ "smax v31.4s, v31.4s, v5.4s\n"
"uzp1 v20.8h, v20.8h, v21.8h\n"
"uzp1 v21.8h, v22.8h, v23.8h\n"
"uzp1 v24.8h, v24.8h, v25.8h\n"
@@ -1907,120 +1907,120 @@ void a64_hybrid_u8qa_dot_4x16 (
"uzp1 v28.16b, v28.16b, v29.16b\n"
"bge 119f\n"
"tbz x9, #3, 114f\n"
- "str d16, [x27], #0x8\n"
- "str d20, [x22], #0x8\n"
- "str d24, [x21], #0x8\n"
- "str d28, [x20], #0x8\n"
+ "str d16, [x26], #0x8\n"
+ "str d20, [x21], #0x8\n"
+ "str d24, [x20], #0x8\n"
+ "str d28, [x19], #0x8\n"
"tbz x9, #2, 112f\n"
- "st1 { v16.s }[2], [x27], #0x4\n"
- "st1 { v20.s }[2], [x22], #0x4\n"
- "st1 { v24.s }[2], [x21], #0x4\n"
- "st1 { v28.s }[2], [x20], #0x4\n"
+ "st1 { v16.s }[2], [x26], #0x4\n"
+ "st1 { v20.s }[2], [x21], #0x4\n"
+ "st1 { v24.s }[2], [x20], #0x4\n"
+ "st1 { v28.s }[2], [x19], #0x4\n"
"tbz x9, #1, 111f\n"
- "st1 { v16.h }[6], [x27], #0x2\n"
- "st1 { v20.h }[6], [x22], #0x2\n"
- "st1 { v24.h }[6], [x21], #0x2\n"
- "st1 { v28.h }[6], [x20], #0x2\n"
+ "st1 { v16.h }[6], [x26], #0x2\n"
+ "st1 { v20.h }[6], [x21], #0x2\n"
+ "st1 { v24.h }[6], [x20], #0x2\n"
+ "st1 { v28.h }[6], [x19], #0x2\n"
"tbz x9, #0, 118f\n"
- "st1 { v16.b }[14], [x27]\n"
- "st1 { v20.b }[14], [x22]\n"
- "st1 { v24.b }[14], [x21]\n"
- "st1 { v28.b }[14], [x20]\n"
+ "st1 { v16.b }[14], [x26]\n"
+ "st1 { v20.b }[14], [x21]\n"
+ "st1 { v24.b }[14], [x20]\n"
+ "st1 { v28.b }[14], [x19]\n"
"b 118f\n"
"111:" // Height 4: Partial direct writeback: partial_1_12
"tbz x9, #0, 118f\n"
- "st1 { v16.b }[12], [x27]\n"
- "st1 { v20.b }[12], [x22]\n"
- "st1 { v24.b }[12], [x21]\n"
- "st1 { v28.b }[12], [x20]\n"
+ "st1 { v16.b }[12], [x26]\n"
+ "st1 { v20.b }[12], [x21]\n"
+ "st1 { v24.b }[12], [x20]\n"
+ "st1 { v28.b }[12], [x19]\n"
"b 118f\n"
"112:" // Height 4: Partial direct writeback: partial_2_8
"tbz x9, #1, 113f\n"
- "st1 { v16.h }[4], [x27], #0x2\n"
- "st1 { v20.h }[4], [x22], #0x2\n"
- "st1 { v24.h }[4], [x21], #0x2\n"
- "st1 { v28.h }[4], [x20], #0x2\n"
+ "st1 { v16.h }[4], [x26], #0x2\n"
+ "st1 { v20.h }[4], [x21], #0x2\n"
+ "st1 { v24.h }[4], [x20], #0x2\n"
+ "st1 { v28.h }[4], [x19], #0x2\n"
"tbz x9, #0, 118f\n"
- "st1 { v16.b }[10], [x27]\n"
- "st1 { v20.b }[10], [x22]\n"
- "st1 { v24.b }[10], [x21]\n"
- "st1 { v28.b }[10], [x20]\n"
+ "st1 { v16.b }[10], [x26]\n"
+ "st1 { v20.b }[10], [x21]\n"
+ "st1 { v24.b }[10], [x20]\n"
+ "st1 { v28.b }[10], [x19]\n"
"b 118f\n"
"113:" // Height 4: Partial direct writeback: partial_1_8
"tbz x9, #0, 118f\n"
- "st1 { v16.b }[8], [x27]\n"
- "st1 { v20.b }[8], [x22]\n"
- "st1 { v24.b }[8], [x21]\n"
- "st1 { v28.b }[8], [x20]\n"
+ "st1 { v16.b }[8], [x26]\n"
+ "st1 { v20.b }[8], [x21]\n"
+ "st1 { v24.b }[8], [x20]\n"
+ "st1 { v28.b }[8], [x19]\n"
"b 118f\n"
"114:" // Height 4: Partial direct writeback: partial_4_0
"tbz x9, #2, 116f\n"
- "str s16, [x27], #0x4\n"
- "str s20, [x22], #0x4\n"
- "str s24, [x21], #0x4\n"
- "str s28, [x20], #0x4\n"
+ "str s16, [x26], #0x4\n"
+ "str s20, [x21], #0x4\n"
+ "str s24, [x20], #0x4\n"
+ "str s28, [x19], #0x4\n"
"tbz x9, #1, 115f\n"
- "st1 { v16.h }[2], [x27], #0x2\n"
- "st1 { v20.h }[2], [x22], #0x2\n"
- "st1 { v24.h }[2], [x21], #0x2\n"
- "st1 { v28.h }[2], [x20], #0x2\n"
+ "st1 { v16.h }[2], [x26], #0x2\n"
+ "st1 { v20.h }[2], [x21], #0x2\n"
+ "st1 { v24.h }[2], [x20], #0x2\n"
+ "st1 { v28.h }[2], [x19], #0x2\n"
"tbz x9, #0, 118f\n"
- "st1 { v16.b }[6], [x27]\n"
- "st1 { v20.b }[6], [x22]\n"
- "st1 { v24.b }[6], [x21]\n"
- "st1 { v28.b }[6], [x20]\n"
+ "st1 { v16.b }[6], [x26]\n"
+ "st1 { v20.b }[6], [x21]\n"
+ "st1 { v24.b }[6], [x20]\n"
+ "st1 { v28.b }[6], [x19]\n"
"b 118f\n"
"115:" // Height 4: Partial direct writeback: partial_1_4
"tbz x9, #0, 118f\n"
- "st1 { v16.b }[4], [x27]\n"
- "st1 { v20.b }[4], [x22]\n"
- "st1 { v24.b }[4], [x21]\n"
- "st1 { v28.b }[4], [x20]\n"
+ "st1 { v16.b }[4], [x26]\n"
+ "st1 { v20.b }[4], [x21]\n"
+ "st1 { v24.b }[4], [x20]\n"
+ "st1 { v28.b }[4], [x19]\n"
"b 118f\n"
"116:" // Height 4: Partial direct writeback: partial_2_0
"tbz x9, #1, 117f\n"
- "str h16, [x27], #0x2\n"
- "str h20, [x22], #0x2\n"
- "str h24, [x21], #0x2\n"
- "str h28, [x20], #0x2\n"
+ "str h16, [x26], #0x2\n"
+ "str h20, [x21], #0x2\n"
+ "str h24, [x20], #0x2\n"
+ "str h28, [x19], #0x2\n"
"tbz x9, #0, 118f\n"
- "st1 { v16.b }[2], [x27]\n"
- "st1 { v20.b }[2], [x22]\n"
- "st1 { v24.b }[2], [x21]\n"
- "st1 { v28.b }[2], [x20]\n"
+ "st1 { v16.b }[2], [x26]\n"
+ "st1 { v20.b }[2], [x21]\n"
+ "st1 { v24.b }[2], [x20]\n"
+ "st1 { v28.b }[2], [x19]\n"
"b 118f\n"
"117:" // Height 4: Partial direct writeback: partial_1_0
- "str b16, [x27, #0x0]\n"
- "str b20, [x22, #0x0]\n"
- "str b24, [x21, #0x0]\n"
- "str b28, [x20, #0x0]\n"
+ "str b16, [x26, #0x0]\n"
+ "str b20, [x21, #0x0]\n"
+ "str b24, [x20, #0x0]\n"
+ "str b28, [x19, #0x0]\n"
"118:" // Height 4: Partial direct writeback: Done
"b 120f\n"
"119:" // Height 4: Full writeback
- "str q16, [x27, #0x0]\n"
- "add x27, x27, #0x10\n"
- "str q20, [x22, #0x0]\n"
- "str q24, [x21, #0x0]\n"
- "str q28, [x20, #0x0]\n"
+ "str q16, [x26, #0x0]\n"
+ "add x26, x26, #0x10\n"
+ "str q20, [x21, #0x0]\n"
+ "str q24, [x20, #0x0]\n"
+ "str q28, [x19, #0x0]\n"
"120:" // Height 4: Writeback done
"subs x9, x9, #0x10\n"
"bgt 92b\n"
"subs %x[M], %x[M], #0x4\n"
"beq 122f\n"
- "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 121f\n"
- "add x21, x21, #0x4\n"
- "str x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "add x20, x20, #0x4\n"
+ "str x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"b 1b\n"
"121:" // Update direct input
- "mov x20, #0x4\n"
- "madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
+ "mov x19, #0x4\n"
+ "madd %x[input_ptr], x19, x20, %x[input_ptr]\n"
"b 1b\n"
"122:" // Exit
: [M] "+&r" (M), [flags] "+&r" (flags), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
: [args_ptr] "r" (&ka), [b_offset] "I" (offsetof(Requantize32, b_offset)), [c_offset] "I" (offsetof(Requantize32, c_offset)), [col_bias] "r" (col_bias), [maxval] "I" (offsetof(Requantize32, maxval)), [minval] "I" (offsetof(Requantize32, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths)), [per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [qp] "r" (qp)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8qa_mmla_4x16/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8qa_mmla_4x16/generic.cpp
index f808cb199d..daeb986529 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8qa_mmla_4x16/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8qa_mmla_4x16/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef __aarch64__
@@ -85,13 +85,13 @@ void a64_hybrid_u8qa_mmla_4x16 (
"cmp %x[M], #0x2\n"
"bgt 65f\n"
"beq 33f\n"
- "mov x10, %x[col_bias]\n"
"movi v11.4s, #0x0\n"
- "movi v15.16b, #0x1\n"
- "bic %x[flags], %x[flags], #0x80000000\n"
"ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
+ "movi v15.16b, #0x1\n"
"ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x27, %x[output_ptr]\n"
+ "mov x27, %x[col_bias]\n"
+ "bic %x[flags], %x[flags], #0x80000000\n"
+ "mov x26, %x[output_ptr]\n"
"2:" // Height 1: Column loop
"movi v16.4s, #0x0\n"
"movi v17.4s, #0x0\n"
@@ -102,27 +102,27 @@ void a64_hybrid_u8qa_mmla_4x16 (
"movi v22.4s, #0x0\n"
"movi v23.4s, #0x0\n"
"3:" // Height 1: setup done
- "mov x26, #0x0\n"
+ "mov x25, #0x0\n"
"4:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w24, [x20, x25, LSL #0x2]\n"
"tbz %x[flags], #3, 5f\n"
- "ldr x21, [%x[input_ptr], x26, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x24, [x21, #0x0]\n"
- "cbnz x26, 6f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x24, x24, x20\n"
+ "ldr x20, [%x[input_ptr], x25, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x23, [x20, #0x0]\n"
+ "cbnz x25, 6f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x23, x23, x19\n"
"b 6f\n"
"5:" // Height 1: setup direct input
- "mov x24, %x[input_ptr]\n"
+ "mov x23, %x[input_ptr]\n"
"6:" // Height 1: input setup done
- "cmp x25, #0x10\n"
+ "cmp x24, #0x10\n"
"blt 11f\n"
- "ldr q1, [x24, #0x0]\n"
+ "ldr q1, [x23, #0x0]\n"
"ldr q5, [x28, #0x0]\n"
- "cmp x25, #0x20\n"
+ "cmp x24, #0x20\n"
"ldr q6, [x28, #0x10]\n"
"ldr q7, [x28, #0x20]\n"
"ldr q8, [x28, #0x30]\n"
@@ -132,9 +132,10 @@ void a64_hybrid_u8qa_mmla_4x16 (
"blt 9f\n"
"7:" // Height 1: Multiply loop: Main loop head
"trn1 v0.2d, v1.2d, v2.2d\n"
+ "add x23, x23, #0x10\n"
+ "trn2 v1.2d, v1.2d, v2.2d\n"
".inst 0x6e85a410 // ummla v16.4s, v0.16b, v5.16b\n"
"ldr q5, [x28, #0x70]\n"
- "trn2 v1.2d, v1.2d, v2.2d\n"
".inst 0x6e86a414 // ummla v20.4s, v0.16b, v6.16b\n"
"ldr q6, [x28, #0x80]\n"
".inst 0x6e87a411 // ummla v17.4s, v0.16b, v7.16b\n"
@@ -151,10 +152,9 @@ void a64_hybrid_u8qa_mmla_4x16 (
"ldr q5, [x28, #0xe0]\n"
".inst 0x6e86a430 // ummla v16.4s, v1.16b, v6.16b\n"
"ldr q6, [x28, #0xf0]\n"
+ "add x28, x28, #0x100\n"
".inst 0x6e87a434 // ummla v20.4s, v1.16b, v7.16b\n"
- "add x24, x24, #0x10\n"
".inst 0x6e88a431 // ummla v17.4s, v1.16b, v8.16b\n"
- "add x28, x28, #0x100\n"
".inst 0x6e89a435 // ummla v21.4s, v1.16b, v9.16b\n"
".inst 0x6e8aa432 // ummla v18.4s, v1.16b, v10.16b\n"
".inst 0x6e84a436 // ummla v22.4s, v1.16b, v4.16b\n"
@@ -164,23 +164,25 @@ void a64_hybrid_u8qa_mmla_4x16 (
".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n"
".inst 0x6e8f942b // udot v11.4s, v1.16b, v15.16b\n"
"8:" // Height 1: Multiply loop: unique 1: skip row sum
- "ldr q1, [x24, #0x0]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
+ "sub x24, x24, #0x10\n"
+ "ldr q1, [x23, #0x0]\n"
+ "cmp x24, #0x20\n"
"ldr q5, [x28, #0x0]\n"
- "sub x25, x25, #0x10\n"
- "cmp x25, #0x20\n"
"ldr q6, [x28, #0x10]\n"
"ldr q7, [x28, #0x20]\n"
"ldr q8, [x28, #0x30]\n"
"ldr q9, [x28, #0x40]\n"
"ldr q10, [x28, #0x50]\n"
"ldr q4, [x28, #0x60]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
"bge 7b\n"
"9:" // Height 1: Multiply loop: Single iteration only
+ "sub x24, x24, #0x10\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
+ "trn2 v1.2d, v1.2d, v2.2d\n"
+ "add x23, x23, #0x10\n"
".inst 0x6e85a410 // ummla v16.4s, v0.16b, v5.16b\n"
"ldr q5, [x28, #0x70]\n"
- "trn2 v1.2d, v1.2d, v2.2d\n"
".inst 0x6e86a414 // ummla v20.4s, v0.16b, v6.16b\n"
"ldr q6, [x28, #0x80]\n"
".inst 0x6e87a411 // ummla v17.4s, v0.16b, v7.16b\n"
@@ -197,11 +199,9 @@ void a64_hybrid_u8qa_mmla_4x16 (
"ldr q5, [x28, #0xe0]\n"
".inst 0x6e86a430 // ummla v16.4s, v1.16b, v6.16b\n"
"ldr q6, [x28, #0xf0]\n"
- "sub x25, x25, #0x10\n"
+ "add x28, x28, #0x100\n"
".inst 0x6e87a434 // ummla v20.4s, v1.16b, v7.16b\n"
".inst 0x6e88a431 // ummla v17.4s, v1.16b, v8.16b\n"
- "add x24, x24, #0x10\n"
- "add x28, x28, #0x100\n"
".inst 0x6e89a435 // ummla v21.4s, v1.16b, v9.16b\n"
".inst 0x6e8aa432 // ummla v18.4s, v1.16b, v10.16b\n"
".inst 0x6e84a436 // ummla v22.4s, v1.16b, v4.16b\n"
@@ -211,118 +211,120 @@ void a64_hybrid_u8qa_mmla_4x16 (
".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n"
".inst 0x6e8f942b // udot v11.4s, v1.16b, v15.16b\n"
"10:" // Height 1: Multiply loop: unique 2: skip row sum
- "prfm pldl1keep, [x24, #0x80]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
"11:" // Height 1: Multiply loop: Main loop skip
- "cbz x25, 20f\n"
- "cmp x25, #0x8\n"
+ "cbz x24, 20f\n"
+ "cmp x24, #0x8\n"
"blt 14f\n"
"12:" // Height 1: Multiply loop: Odd block loop
- "ldr d1, [x24], #0x8\n"
+ "movi v2.16b, #0x0\n"
+ "ldr d1, [x23], #0x8\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
"tbnz %x[flags], #31, 13f\n"
".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n"
"13:" // Height 1: Multiply loop: unique 3: skip row sum
"ldr q8, [x28, #0x0]\n"
- "ldr q9, [x28, #0x10]\n"
".inst 0x6e88a410 // ummla v16.4s, v0.16b, v8.16b\n"
- "sub x25, x25, #0x8\n"
+ "ldr q9, [x28, #0x10]\n"
+ "sub x24, x24, #0x8\n"
+ ".inst 0x6e89a414 // ummla v20.4s, v0.16b, v9.16b\n"
"ldr q10, [x28, #0x20]\n"
+ "cmp x24, #0x8\n"
+ ".inst 0x6e8aa411 // ummla v17.4s, v0.16b, v10.16b\n"
"ldr q4, [x28, #0x30]\n"
- "cmp x25, #0x8\n"
- ".inst 0x6e89a414 // ummla v20.4s, v0.16b, v9.16b\n"
"ldr q5, [x28, #0x40]\n"
- "ldr q6, [x28, #0x50]\n"
- ".inst 0x6e8aa411 // ummla v17.4s, v0.16b, v10.16b\n"
".inst 0x6e84a415 // ummla v21.4s, v0.16b, v4.16b\n"
+ "ldr q6, [x28, #0x50]\n"
+ ".inst 0x6e85a412 // ummla v18.4s, v0.16b, v5.16b\n"
"ldr q7, [x28, #0x60]\n"
"ldr q8, [x28, #0x70]\n"
- ".inst 0x6e85a412 // ummla v18.4s, v0.16b, v5.16b\n"
".inst 0x6e86a416 // ummla v22.4s, v0.16b, v6.16b\n"
+ "add x28, x28, #0x80\n"
".inst 0x6e87a413 // ummla v19.4s, v0.16b, v7.16b\n"
".inst 0x6e88a417 // ummla v23.4s, v0.16b, v8.16b\n"
- "add x28, x28, #0x80\n"
"bge 12b\n"
+ "cbz x24, 20f\n"
"14:" // Height 1: Multiply loop: Skip odd blocks
- "cbz x25, 20f\n"
- "tbz x25, #2, 16f\n"
- "ldr s1, [x24], #0x4\n"
- "tbz x25, #1, 15f\n"
- "ld1 { v1.h }[2], [x24], #0x2\n"
- "tbz x25, #0, 18f\n"
- "ld1 { v1.b }[6], [x24]\n"
+ "tbz x24, #2, 16f\n"
+ "ldr s1, [x23], #0x4\n"
+ "tbz x24, #1, 15f\n"
+ "ld1 { v1.h }[2], [x23], #0x2\n"
+ "tbz x24, #0, 18f\n"
+ "ld1 { v1.b }[6], [x23]\n"
"b 18f\n"
"15:" // Height 1: Multiply loop: Ragged operand read: partial_1_4
- "tbz x25, #0, 18f\n"
- "ld1 { v1.b }[4], [x24]\n"
+ "tbz x24, #0, 18f\n"
+ "ld1 { v1.b }[4], [x23]\n"
"b 18f\n"
"16:" // Height 1: Multiply loop: Ragged operand read: partial_2_0
- "tbz x25, #1, 17f\n"
- "ldr h1, [x24], #0x2\n"
- "tbz x25, #0, 18f\n"
- "ld1 { v1.b }[2], [x24]\n"
+ "tbz x24, #1, 17f\n"
+ "ldr h1, [x23], #0x2\n"
+ "tbz x24, #0, 18f\n"
+ "ld1 { v1.b }[2], [x23]\n"
"b 18f\n"
"17:" // Height 1: Multiply loop: Ragged operand read: partial_1_0
- "ldr b1, [x24, #0x0]\n"
+ "ldr b1, [x23, #0x0]\n"
"18:" // Height 1: Multiply loop: Ragged operand read: Done
+ "movi v2.16b, #0x0\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
"tbnz %x[flags], #31, 19f\n"
".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n"
"19:" // Height 1: Multiply loop: unique 4: skip row sum
"ldr q10, [x28, #0x0]\n"
- "ldr q4, [x28, #0x10]\n"
".inst 0x6e8aa410 // ummla v16.4s, v0.16b, v10.16b\n"
- ".inst 0x6e84a414 // ummla v20.4s, v0.16b, v4.16b\n"
+ "ldr q4, [x28, #0x10]\n"
"ldr q5, [x28, #0x20]\n"
+ ".inst 0x6e84a414 // ummla v20.4s, v0.16b, v4.16b\n"
"ldr q6, [x28, #0x30]\n"
".inst 0x6e85a411 // ummla v17.4s, v0.16b, v5.16b\n"
- ".inst 0x6e86a415 // ummla v21.4s, v0.16b, v6.16b\n"
"ldr q7, [x28, #0x40]\n"
"ldr q8, [x28, #0x50]\n"
- ".inst 0x6e87a412 // ummla v18.4s, v0.16b, v7.16b\n"
- ".inst 0x6e88a416 // ummla v22.4s, v0.16b, v8.16b\n"
+ ".inst 0x6e86a415 // ummla v21.4s, v0.16b, v6.16b\n"
"ldr q9, [x28, #0x60]\n"
"ldr q10, [x28, #0x70]\n"
+ ".inst 0x6e87a412 // ummla v18.4s, v0.16b, v7.16b\n"
+ "add x28, x28, #0x80\n"
+ ".inst 0x6e88a416 // ummla v22.4s, v0.16b, v8.16b\n"
".inst 0x6e89a413 // ummla v19.4s, v0.16b, v9.16b\n"
".inst 0x6e8aa417 // ummla v23.4s, v0.16b, v10.16b\n"
- "add x28, x28, #0x80\n"
"20:" // Height 1: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x26, x26, #0x1\n"
- "cmp x26, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x25, x25, #0x1\n"
+ "cmp x25, x19\n"
"bne 4b\n"
"uzp1 v16.2d, v16.2d, v20.2d\n"
+ "prfm pstl1keep, [x26, #0x0]\n"
"uzp1 v17.2d, v17.2d, v21.2d\n"
- "prfm pstl1keep, [x27, #0x0]\n"
"uzp1 v18.2d, v18.2d, v22.2d\n"
"uzp1 v19.2d, v19.2d, v23.2d\n"
"mov v23.16b, v16.16b\n"
"tbnz %x[flags], #31, 21f\n"
- "add x23, %x[qp], %[b_offset]\n"
- "ld1r { v1.4s }, [x23]\n"
"addp v11.4s, v11.4s, v11.4s\n"
- "neg v1.4s, v1.4s\n"
+ "add x22, %x[qp], %[b_offset]\n"
+ "ld1r { v1.4s }, [x22]\n"
"dup v11.4s, v11.s[0]\n"
+ "neg v1.4s, v1.4s\n"
"mul v11.4s, v11.4s, v1.4s\n"
"21:" // Height 1: skip row sum fixup
- "ldr q0, [x10, #0x0]\n"
- "ldr q1, [x10, #0x10]\n"
"add v23.4s, v23.4s, v11.4s\n"
+ "ldr q0, [x27, #0x0]\n"
+ "orr %x[flags], %x[flags], #0x80000000\n"
"add v17.4s, v17.4s, v11.4s\n"
- "ldr q2, [x10, #0x20]\n"
- "ldr q3, [x10, #0x30]\n"
+ "ldr q1, [x27, #0x10]\n"
+ "add x23, %x[qp], %[per_layer_right_shift]\n"
"add v18.4s, v18.4s, v11.4s\n"
+ "ldr q2, [x27, #0x20]\n"
+ "add x22, %x[qp], %[per_layer_mul]\n"
"add v19.4s, v19.4s, v11.4s\n"
- "add x23, %x[qp], %[per_layer_mul]\n"
- "ld1r { v4.4s }, [x23]\n"
- "orr %x[flags], %x[flags], #0x80000000\n"
+ "ldr q3, [x27, #0x30]\n"
+ "add x27, x27, #0x40\n"
"add v23.4s, v23.4s, v0.4s\n"
+ "ld1r { v0.4s }, [x23]\n"
+ "ld1r { v4.4s }, [x22]\n"
"add v17.4s, v17.4s, v1.4s\n"
"add v18.4s, v18.4s, v2.4s\n"
- "add x23, %x[qp], %[per_layer_right_shift]\n"
- "ld1r { v0.4s }, [x23]\n"
"add v19.4s, v19.4s, v3.4s\n"
"sqrdmulh v23.4s, v23.4s, v4.4s\n"
- "add x10, x10, #0x40\n"
"sqrdmulh v17.4s, v17.4s, v4.4s\n"
"sqrdmulh v18.4s, v18.4s, v4.4s\n"
"sqrdmulh v19.4s, v19.4s, v4.4s\n"
@@ -334,100 +336,100 @@ void a64_hybrid_u8qa_mmla_4x16 (
"sshr v4.4s, v4.4s, #0x1f\n"
"sshr v5.4s, v5.4s, #0x1f\n"
"sshr v6.4s, v6.4s, #0x1f\n"
- "sshr v7.4s, v7.4s, #0x1f\n"
"sqadd v23.4s, v23.4s, v4.4s\n"
"sqadd v17.4s, v17.4s, v5.4s\n"
"sqadd v18.4s, v18.4s, v6.4s\n"
+ "sshr v7.4s, v7.4s, #0x1f\n"
"sqadd v19.4s, v19.4s, v7.4s\n"
"22:" // Height 1: no shift correction
- "add x23, %x[qp], %[c_offset]\n"
- "ld1r { v4.4s }, [x23]\n"
"srshl v23.4s, v23.4s, v0.4s\n"
+ "add x22, %x[qp], %[c_offset]\n"
+ "ld1r { v4.4s }, [x22]\n"
"srshl v17.4s, v17.4s, v0.4s\n"
+ "add x22, %x[qp], %[minval]\n"
"srshl v18.4s, v18.4s, v0.4s\n"
+ "ld1r { v5.4s }, [x22]\n"
+ "add x22, %x[qp], %[maxval]\n"
"srshl v19.4s, v19.4s, v0.4s\n"
- "add x23, %x[qp], %[maxval]\n"
- "ld1r { v6.4s }, [x23]\n"
+ "ld1r { v6.4s }, [x22]\n"
+ "cmp x9, #0x10\n"
"add v23.4s, v23.4s, v4.4s\n"
"add v17.4s, v17.4s, v4.4s\n"
- "add x23, %x[qp], %[minval]\n"
- "ld1r { v5.4s }, [x23]\n"
"add v18.4s, v18.4s, v4.4s\n"
"add v19.4s, v19.4s, v4.4s\n"
- "cmp x9, #0x10\n"
"smin v23.4s, v23.4s, v6.4s\n"
"smin v17.4s, v17.4s, v6.4s\n"
"smin v18.4s, v18.4s, v6.4s\n"
- "smin v19.4s, v19.4s, v6.4s\n"
"smax v23.4s, v23.4s, v5.4s\n"
"smax v17.4s, v17.4s, v5.4s\n"
"smax v18.4s, v18.4s, v5.4s\n"
- "smax v19.4s, v19.4s, v5.4s\n"
+ "smin v19.4s, v19.4s, v6.4s\n"
"uzp1 v23.8h, v23.8h, v17.8h\n"
+ "smax v19.4s, v19.4s, v5.4s\n"
"uzp1 v17.8h, v18.8h, v19.8h\n"
"uzp1 v23.16b, v23.16b, v17.16b\n"
"bge 31f\n"
"tbz x9, #3, 26f\n"
- "str d23, [x27], #0x8\n"
+ "str d23, [x26], #0x8\n"
"tbz x9, #2, 24f\n"
- "st1 { v23.s }[2], [x27], #0x4\n"
+ "st1 { v23.s }[2], [x26], #0x4\n"
"tbz x9, #1, 23f\n"
- "st1 { v23.h }[6], [x27], #0x2\n"
+ "st1 { v23.h }[6], [x26], #0x2\n"
"tbz x9, #0, 30f\n"
- "st1 { v23.b }[14], [x27]\n"
+ "st1 { v23.b }[14], [x26]\n"
"b 30f\n"
"23:" // Height 1: Partial direct writeback: partial_1_12
"tbz x9, #0, 30f\n"
- "st1 { v23.b }[12], [x27]\n"
+ "st1 { v23.b }[12], [x26]\n"
"b 30f\n"
"24:" // Height 1: Partial direct writeback: partial_2_8
"tbz x9, #1, 25f\n"
- "st1 { v23.h }[4], [x27], #0x2\n"
+ "st1 { v23.h }[4], [x26], #0x2\n"
"tbz x9, #0, 30f\n"
- "st1 { v23.b }[10], [x27]\n"
+ "st1 { v23.b }[10], [x26]\n"
"b 30f\n"
"25:" // Height 1: Partial direct writeback: partial_1_8
"tbz x9, #0, 30f\n"
- "st1 { v23.b }[8], [x27]\n"
+ "st1 { v23.b }[8], [x26]\n"
"b 30f\n"
"26:" // Height 1: Partial direct writeback: partial_4_0
"tbz x9, #2, 28f\n"
- "str s23, [x27], #0x4\n"
+ "str s23, [x26], #0x4\n"
"tbz x9, #1, 27f\n"
- "st1 { v23.h }[2], [x27], #0x2\n"
+ "st1 { v23.h }[2], [x26], #0x2\n"
"tbz x9, #0, 30f\n"
- "st1 { v23.b }[6], [x27]\n"
+ "st1 { v23.b }[6], [x26]\n"
"b 30f\n"
"27:" // Height 1: Partial direct writeback: partial_1_4
"tbz x9, #0, 30f\n"
- "st1 { v23.b }[4], [x27]\n"
+ "st1 { v23.b }[4], [x26]\n"
"b 30f\n"
"28:" // Height 1: Partial direct writeback: partial_2_0
"tbz x9, #1, 29f\n"
- "str h23, [x27], #0x2\n"
+ "str h23, [x26], #0x2\n"
"tbz x9, #0, 30f\n"
- "st1 { v23.b }[2], [x27]\n"
+ "st1 { v23.b }[2], [x26]\n"
"b 30f\n"
"29:" // Height 1: Partial direct writeback: partial_1_0
- "str b23, [x27, #0x0]\n"
+ "str b23, [x26, #0x0]\n"
"30:" // Height 1: Partial direct writeback: Done
"b 32f\n"
"31:" // Height 1: Full writeback
- "str q23, [x27, #0x0]\n"
- "add x27, x27, #0x10\n"
+ "str q23, [x26, #0x0]\n"
+ "add x26, x26, #0x10\n"
"32:" // Height 1: Writeback done
"subs x9, x9, #0x10\n"
"bgt 2b\n"
"b 130f\n"
"33:" // Height 2
- "mov x10, %x[col_bias]\n"
"movi v11.4s, #0x0\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x27, %x[col_bias]\n"
"movi v12.4s, #0x0\n"
+ "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"bic %x[flags], %x[flags], #0x80000000\n"
"movi v15.16b, #0x1\n"
- "ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x27, %x[output_ptr]\n"
+ "mov x26, %x[output_ptr]\n"
"34:" // Height 2: Column loop
"movi v16.4s, #0x0\n"
"movi v17.4s, #0x0\n"
@@ -438,64 +440,64 @@ void a64_hybrid_u8qa_mmla_4x16 (
"movi v22.4s, #0x0\n"
"movi v23.4s, #0x0\n"
"35:" // Height 2: setup done
- "mov x26, #0x0\n"
+ "mov x25, #0x0\n"
"36:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w24, [x20, x25, LSL #0x2]\n"
"tbz %x[flags], #3, 37f\n"
- "ldr x21, [%x[input_ptr], x26, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x24, [x21, #0x0]\n"
- "ldr x23, [x21, #0x8]\n"
- "cbnz x26, 38f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x24, x24, x20\n"
- "add x23, x23, x20\n"
+ "ldr x20, [%x[input_ptr], x25, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x23, [x20, #0x0]\n"
+ "ldr x22, [x20, #0x8]\n"
+ "cbnz x25, 38f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x23, x23, x19\n"
+ "add x22, x22, x19\n"
"b 38f\n"
"37:" // Height 2: setup direct input
- "mov x24, %x[input_ptr]\n"
- "add x23, x24, x20\n"
+ "mov x23, %x[input_ptr]\n"
+ "add x22, x23, x19\n"
"38:" // Height 2: input setup done
- "cmp x25, #0x10\n"
+ "cmp x24, #0x10\n"
"blt 43f\n"
- "ldr q1, [x24, #0x0]\n"
- "ldr q2, [x23, #0x0]\n"
- "cmp x25, #0x20\n"
+ "ldr q1, [x23, #0x0]\n"
+ "ldr q2, [x22, #0x0]\n"
+ "cmp x24, #0x20\n"
+ "blt 41f\n"
+ "39:" // Height 2: Multiply loop: Main loop head
+ "trn1 v0.2d, v1.2d, v2.2d\n"
"ldr q5, [x28, #0x0]\n"
+ "add x23, x23, #0x10\n"
+ "trn2 v1.2d, v1.2d, v2.2d\n"
"ldr q6, [x28, #0x10]\n"
+ "add x22, x22, #0x10\n"
+ ".inst 0x6e85a410 // ummla v16.4s, v0.16b, v5.16b\n"
"ldr q7, [x28, #0x20]\n"
"ldr q8, [x28, #0x30]\n"
+ ".inst 0x6e86a414 // ummla v20.4s, v0.16b, v6.16b\n"
"ldr q9, [x28, #0x40]\n"
+ ".inst 0x6e87a411 // ummla v17.4s, v0.16b, v7.16b\n"
"ldr q10, [x28, #0x50]\n"
+ ".inst 0x6e88a415 // ummla v21.4s, v0.16b, v8.16b\n"
"ldr q4, [x28, #0x60]\n"
- "blt 41f\n"
- "39:" // Height 2: Multiply loop: Main loop head
- "trn1 v0.2d, v1.2d, v2.2d\n"
- ".inst 0x6e85a410 // ummla v16.4s, v0.16b, v5.16b\n"
+ ".inst 0x6e89a412 // ummla v18.4s, v0.16b, v9.16b\n"
"ldr q5, [x28, #0x70]\n"
- ".inst 0x6e86a414 // ummla v20.4s, v0.16b, v6.16b\n"
"ldr q6, [x28, #0x80]\n"
- "trn2 v1.2d, v1.2d, v2.2d\n"
- ".inst 0x6e87a411 // ummla v17.4s, v0.16b, v7.16b\n"
+ ".inst 0x6e8aa416 // ummla v22.4s, v0.16b, v10.16b\n"
"ldr q7, [x28, #0x90]\n"
- ".inst 0x6e88a415 // ummla v21.4s, v0.16b, v8.16b\n"
"ldr q8, [x28, #0xa0]\n"
- ".inst 0x6e89a412 // ummla v18.4s, v0.16b, v9.16b\n"
- "ldr q9, [x28, #0xb0]\n"
- ".inst 0x6e8aa416 // ummla v22.4s, v0.16b, v10.16b\n"
- "ldr q10, [x28, #0xc0]\n"
".inst 0x6e84a413 // ummla v19.4s, v0.16b, v4.16b\n"
- "ldr q4, [x28, #0xd0]\n"
+ "ldr q9, [x28, #0xb0]\n"
".inst 0x6e85a417 // ummla v23.4s, v0.16b, v5.16b\n"
- "ldr q5, [x28, #0xe0]\n"
".inst 0x6e86a430 // ummla v16.4s, v1.16b, v6.16b\n"
- "ldr q6, [x28, #0xf0]\n"
+ "ldr q10, [x28, #0xc0]\n"
+ "ldr q4, [x28, #0xd0]\n"
".inst 0x6e87a434 // ummla v20.4s, v1.16b, v7.16b\n"
- "add x24, x24, #0x10\n"
".inst 0x6e88a431 // ummla v17.4s, v1.16b, v8.16b\n"
- "add x23, x23, #0x10\n"
+ "ldr q5, [x28, #0xe0]\n"
".inst 0x6e89a435 // ummla v21.4s, v1.16b, v9.16b\n"
+ "ldr q6, [x28, #0xf0]\n"
"add x28, x28, #0x100\n"
".inst 0x6e8aa432 // ummla v18.4s, v1.16b, v10.16b\n"
".inst 0x6e84a436 // ummla v22.4s, v1.16b, v4.16b\n"
@@ -505,49 +507,49 @@ void a64_hybrid_u8qa_mmla_4x16 (
".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n"
".inst 0x6e8f942b // udot v11.4s, v1.16b, v15.16b\n"
"40:" // Height 2: Multiply loop: unique 5: skip row sum
- "ldr q1, [x24, #0x0]\n"
- "ldr q2, [x23, #0x0]\n"
- "sub x25, x25, #0x10\n"
- "cmp x25, #0x20\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
+ "sub x24, x24, #0x10\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
+ "cmp x24, #0x20\n"
+ "ldr q1, [x23, #0x0]\n"
+ "ldr q2, [x22, #0x0]\n"
+ "bge 39b\n"
+ "41:" // Height 2: Multiply loop: Single iteration only
+ "trn1 v0.2d, v1.2d, v2.2d\n"
"ldr q5, [x28, #0x0]\n"
+ "sub x24, x24, #0x10\n"
+ "trn2 v1.2d, v1.2d, v2.2d\n"
"ldr q6, [x28, #0x10]\n"
+ "add x23, x23, #0x10\n"
+ ".inst 0x6e85a410 // ummla v16.4s, v0.16b, v5.16b\n"
"ldr q7, [x28, #0x20]\n"
+ "add x22, x22, #0x10\n"
+ ".inst 0x6e86a414 // ummla v20.4s, v0.16b, v6.16b\n"
"ldr q8, [x28, #0x30]\n"
"ldr q9, [x28, #0x40]\n"
+ ".inst 0x6e87a411 // ummla v17.4s, v0.16b, v7.16b\n"
"ldr q10, [x28, #0x50]\n"
"ldr q4, [x28, #0x60]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
- "bge 39b\n"
- "41:" // Height 2: Multiply loop: Single iteration only
- "trn1 v0.2d, v1.2d, v2.2d\n"
- ".inst 0x6e85a410 // ummla v16.4s, v0.16b, v5.16b\n"
+ ".inst 0x6e88a415 // ummla v21.4s, v0.16b, v8.16b\n"
+ ".inst 0x6e89a412 // ummla v18.4s, v0.16b, v9.16b\n"
"ldr q5, [x28, #0x70]\n"
- ".inst 0x6e86a414 // ummla v20.4s, v0.16b, v6.16b\n"
"ldr q6, [x28, #0x80]\n"
- "trn2 v1.2d, v1.2d, v2.2d\n"
- ".inst 0x6e87a411 // ummla v17.4s, v0.16b, v7.16b\n"
+ ".inst 0x6e8aa416 // ummla v22.4s, v0.16b, v10.16b\n"
"ldr q7, [x28, #0x90]\n"
- ".inst 0x6e88a415 // ummla v21.4s, v0.16b, v8.16b\n"
+ ".inst 0x6e84a413 // ummla v19.4s, v0.16b, v4.16b\n"
"ldr q8, [x28, #0xa0]\n"
- ".inst 0x6e89a412 // ummla v18.4s, v0.16b, v9.16b\n"
"ldr q9, [x28, #0xb0]\n"
- ".inst 0x6e8aa416 // ummla v22.4s, v0.16b, v10.16b\n"
- "ldr q10, [x28, #0xc0]\n"
- ".inst 0x6e84a413 // ummla v19.4s, v0.16b, v4.16b\n"
- "ldr q4, [x28, #0xd0]\n"
".inst 0x6e85a417 // ummla v23.4s, v0.16b, v5.16b\n"
- "ldr q5, [x28, #0xe0]\n"
".inst 0x6e86a430 // ummla v16.4s, v1.16b, v6.16b\n"
- "ldr q6, [x28, #0xf0]\n"
- "sub x25, x25, #0x10\n"
+ "ldr q10, [x28, #0xc0]\n"
+ "ldr q4, [x28, #0xd0]\n"
".inst 0x6e87a434 // ummla v20.4s, v1.16b, v7.16b\n"
+ "ldr q5, [x28, #0xe0]\n"
".inst 0x6e88a431 // ummla v17.4s, v1.16b, v8.16b\n"
- "add x24, x24, #0x10\n"
- "add x23, x23, #0x10\n"
".inst 0x6e89a435 // ummla v21.4s, v1.16b, v9.16b\n"
- ".inst 0x6e8aa432 // ummla v18.4s, v1.16b, v10.16b\n"
+ "ldr q6, [x28, #0xf0]\n"
"add x28, x28, #0x100\n"
+ ".inst 0x6e8aa432 // ummla v18.4s, v1.16b, v10.16b\n"
".inst 0x6e84a436 // ummla v22.4s, v1.16b, v4.16b\n"
".inst 0x6e85a433 // ummla v19.4s, v1.16b, v5.16b\n"
".inst 0x6e86a437 // ummla v23.4s, v1.16b, v6.16b\n"
@@ -555,136 +557,136 @@ void a64_hybrid_u8qa_mmla_4x16 (
".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n"
".inst 0x6e8f942b // udot v11.4s, v1.16b, v15.16b\n"
"42:" // Height 2: Multiply loop: unique 6: skip row sum
- "prfm pldl1keep, [x24, #0x80]\n"
"prfm pldl1keep, [x23, #0x80]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
"43:" // Height 2: Multiply loop: Main loop skip
- "cbz x25, 52f\n"
- "cmp x25, #0x8\n"
+ "cbz x24, 52f\n"
+ "cmp x24, #0x8\n"
"blt 46f\n"
"44:" // Height 2: Multiply loop: Odd block loop
- "ldr d1, [x24], #0x8\n"
- "ldr d2, [x23], #0x8\n"
+ "ldr d1, [x23], #0x8\n"
+ "ldr d2, [x22], #0x8\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
"tbnz %x[flags], #31, 45f\n"
".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n"
"45:" // Height 2: Multiply loop: unique 7: skip row sum
"ldr q8, [x28, #0x0]\n"
- "ldr q9, [x28, #0x10]\n"
".inst 0x6e88a410 // ummla v16.4s, v0.16b, v8.16b\n"
- "sub x25, x25, #0x8\n"
+ "ldr q9, [x28, #0x10]\n"
+ "sub x24, x24, #0x8\n"
+ ".inst 0x6e89a414 // ummla v20.4s, v0.16b, v9.16b\n"
"ldr q10, [x28, #0x20]\n"
+ "cmp x24, #0x8\n"
+ ".inst 0x6e8aa411 // ummla v17.4s, v0.16b, v10.16b\n"
"ldr q4, [x28, #0x30]\n"
- "cmp x25, #0x8\n"
- ".inst 0x6e89a414 // ummla v20.4s, v0.16b, v9.16b\n"
"ldr q5, [x28, #0x40]\n"
- "ldr q6, [x28, #0x50]\n"
- ".inst 0x6e8aa411 // ummla v17.4s, v0.16b, v10.16b\n"
".inst 0x6e84a415 // ummla v21.4s, v0.16b, v4.16b\n"
+ "ldr q6, [x28, #0x50]\n"
+ ".inst 0x6e85a412 // ummla v18.4s, v0.16b, v5.16b\n"
"ldr q7, [x28, #0x60]\n"
"ldr q8, [x28, #0x70]\n"
- ".inst 0x6e85a412 // ummla v18.4s, v0.16b, v5.16b\n"
".inst 0x6e86a416 // ummla v22.4s, v0.16b, v6.16b\n"
+ "add x28, x28, #0x80\n"
".inst 0x6e87a413 // ummla v19.4s, v0.16b, v7.16b\n"
".inst 0x6e88a417 // ummla v23.4s, v0.16b, v8.16b\n"
- "add x28, x28, #0x80\n"
"bge 44b\n"
+ "cbz x24, 52f\n"
"46:" // Height 2: Multiply loop: Skip odd blocks
- "cbz x25, 52f\n"
- "tbz x25, #2, 48f\n"
- "ldr s1, [x24], #0x4\n"
- "ldr s2, [x23], #0x4\n"
- "tbz x25, #1, 47f\n"
- "ld1 { v1.h }[2], [x24], #0x2\n"
- "ld1 { v2.h }[2], [x23], #0x2\n"
- "tbz x25, #0, 50f\n"
- "ld1 { v1.b }[6], [x24]\n"
- "ld1 { v2.b }[6], [x23]\n"
+ "tbz x24, #2, 48f\n"
+ "ldr s1, [x23], #0x4\n"
+ "ldr s2, [x22], #0x4\n"
+ "tbz x24, #1, 47f\n"
+ "ld1 { v1.h }[2], [x23], #0x2\n"
+ "ld1 { v2.h }[2], [x22], #0x2\n"
+ "tbz x24, #0, 50f\n"
+ "ld1 { v1.b }[6], [x23]\n"
+ "ld1 { v2.b }[6], [x22]\n"
"b 50f\n"
"47:" // Height 2: Multiply loop: Ragged operand read: partial_1_4
- "tbz x25, #0, 50f\n"
- "ld1 { v1.b }[4], [x24]\n"
- "ld1 { v2.b }[4], [x23]\n"
+ "tbz x24, #0, 50f\n"
+ "ld1 { v1.b }[4], [x23]\n"
+ "ld1 { v2.b }[4], [x22]\n"
"b 50f\n"
"48:" // Height 2: Multiply loop: Ragged operand read: partial_2_0
- "tbz x25, #1, 49f\n"
- "ldr h1, [x24], #0x2\n"
- "ldr h2, [x23], #0x2\n"
- "tbz x25, #0, 50f\n"
- "ld1 { v1.b }[2], [x24]\n"
- "ld1 { v2.b }[2], [x23]\n"
+ "tbz x24, #1, 49f\n"
+ "ldr h1, [x23], #0x2\n"
+ "ldr h2, [x22], #0x2\n"
+ "tbz x24, #0, 50f\n"
+ "ld1 { v1.b }[2], [x23]\n"
+ "ld1 { v2.b }[2], [x22]\n"
"b 50f\n"
"49:" // Height 2: Multiply loop: Ragged operand read: partial_1_0
- "ldr b1, [x24, #0x0]\n"
- "ldr b2, [x23, #0x0]\n"
+ "ldr b1, [x23, #0x0]\n"
+ "ldr b2, [x22, #0x0]\n"
"50:" // Height 2: Multiply loop: Ragged operand read: Done
"trn1 v0.2d, v1.2d, v2.2d\n"
"tbnz %x[flags], #31, 51f\n"
".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n"
"51:" // Height 2: Multiply loop: unique 8: skip row sum
"ldr q10, [x28, #0x0]\n"
- "ldr q4, [x28, #0x10]\n"
".inst 0x6e8aa410 // ummla v16.4s, v0.16b, v10.16b\n"
- ".inst 0x6e84a414 // ummla v20.4s, v0.16b, v4.16b\n"
+ "ldr q4, [x28, #0x10]\n"
"ldr q5, [x28, #0x20]\n"
+ ".inst 0x6e84a414 // ummla v20.4s, v0.16b, v4.16b\n"
"ldr q6, [x28, #0x30]\n"
".inst 0x6e85a411 // ummla v17.4s, v0.16b, v5.16b\n"
- ".inst 0x6e86a415 // ummla v21.4s, v0.16b, v6.16b\n"
"ldr q7, [x28, #0x40]\n"
"ldr q8, [x28, #0x50]\n"
- ".inst 0x6e87a412 // ummla v18.4s, v0.16b, v7.16b\n"
- ".inst 0x6e88a416 // ummla v22.4s, v0.16b, v8.16b\n"
+ ".inst 0x6e86a415 // ummla v21.4s, v0.16b, v6.16b\n"
"ldr q9, [x28, #0x60]\n"
"ldr q10, [x28, #0x70]\n"
+ ".inst 0x6e87a412 // ummla v18.4s, v0.16b, v7.16b\n"
+ "add x28, x28, #0x80\n"
+ ".inst 0x6e88a416 // ummla v22.4s, v0.16b, v8.16b\n"
".inst 0x6e89a413 // ummla v19.4s, v0.16b, v9.16b\n"
".inst 0x6e8aa417 // ummla v23.4s, v0.16b, v10.16b\n"
- "add x28, x28, #0x80\n"
"52:" // Height 2: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x26, x26, #0x1\n"
- "cmp x26, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x25, x25, #0x1\n"
+ "cmp x25, x19\n"
"bne 36b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp1 v4.2d, v16.2d, v20.2d\n"
- "add x22, x27, x20\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp2 v16.2d, v16.2d, v20.2d\n"
+ "prfm pstl1keep, [x26, #0x0]\n"
+ "add x21, x26, x19\n"
"uzp1 v20.2d, v17.2d, v21.2d\n"
+ "prfm pstl1keep, [x21, #0x0]\n"
"uzp2 v17.2d, v17.2d, v21.2d\n"
- "prfm pstl1keep, [x27, #0x0]\n"
- "prfm pstl1keep, [x22, #0x0]\n"
"uzp1 v21.2d, v18.2d, v22.2d\n"
"uzp2 v18.2d, v18.2d, v22.2d\n"
"uzp1 v22.2d, v19.2d, v23.2d\n"
"uzp2 v19.2d, v19.2d, v23.2d\n"
"mov v23.16b, v4.16b\n"
"tbnz %x[flags], #31, 53f\n"
- "add x23, %x[qp], %[b_offset]\n"
- "ld1r { v2.4s }, [x23]\n"
"addp v11.4s, v11.4s, v11.4s\n"
- "neg v2.4s, v2.4s\n"
+ "add x22, %x[qp], %[b_offset]\n"
+ "ld1r { v2.4s }, [x22]\n"
"dup v12.4s, v11.s[3]\n"
"dup v11.4s, v11.s[0]\n"
+ "neg v2.4s, v2.4s\n"
"mul v11.4s, v11.4s, v2.4s\n"
"mul v12.4s, v12.4s, v2.4s\n"
"53:" // Height 2: skip row sum fixup
- "ldr q0, [x10, #0x0]\n"
- "ldr q1, [x10, #0x10]\n"
"add v23.4s, v23.4s, v11.4s\n"
+ "ldr q0, [x27, #0x0]\n"
+ "orr %x[flags], %x[flags], #0x80000000\n"
"add v20.4s, v20.4s, v11.4s\n"
- "ldr q2, [x10, #0x20]\n"
- "ldr q3, [x10, #0x30]\n"
+ "ldr q1, [x27, #0x10]\n"
+ "add x23, %x[qp], %[per_layer_right_shift]\n"
"add v21.4s, v21.4s, v11.4s\n"
+ "ldr q2, [x27, #0x20]\n"
+ "add x22, %x[qp], %[per_layer_mul]\n"
"add v22.4s, v22.4s, v11.4s\n"
+ "ldr q3, [x27, #0x30]\n"
+ "add x27, x27, #0x40\n"
"add v16.4s, v16.4s, v12.4s\n"
+ "ld1r { v4.4s }, [x22]\n"
"add v17.4s, v17.4s, v12.4s\n"
- "add x23, %x[qp], %[per_layer_mul]\n"
- "ld1r { v4.4s }, [x23]\n"
"add v18.4s, v18.4s, v12.4s\n"
"add v19.4s, v19.4s, v12.4s\n"
- "orr %x[flags], %x[flags], #0x80000000\n"
- "add x23, %x[qp], %[per_layer_right_shift]\n"
"add v23.4s, v23.4s, v0.4s\n"
"add v20.4s, v20.4s, v1.4s\n"
- "add x10, x10, #0x40\n"
"add v21.4s, v21.4s, v2.4s\n"
"add v22.4s, v22.4s, v3.4s\n"
"add v16.4s, v16.4s, v0.4s\n"
@@ -702,154 +704,154 @@ void a64_hybrid_u8qa_mmla_4x16 (
"sqrdmulh v19.4s, v19.4s, v4.4s\n"
"tbz %x[flags], #5, 54f\n"
"and v4.16b, v23.16b, v0.16b\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
- "sqadd v23.4s, v23.4s, v4.4s\n"
"and v5.16b, v20.16b, v0.16b\n"
"and v6.16b, v21.16b, v0.16b\n"
+ "sshr v4.4s, v4.4s, #0x1f\n"
+ "sshr v5.4s, v5.4s, #0x1f\n"
+ "sshr v6.4s, v6.4s, #0x1f\n"
+ "sqadd v23.4s, v23.4s, v4.4s\n"
+ "sqadd v20.4s, v20.4s, v5.4s\n"
+ "sqadd v21.4s, v21.4s, v6.4s\n"
"and v7.16b, v22.16b, v0.16b\n"
"and v8.16b, v16.16b, v0.16b\n"
"and v9.16b, v17.16b, v0.16b\n"
- "and v10.16b, v18.16b, v0.16b\n"
- "and v4.16b, v19.16b, v0.16b\n"
- "sshr v5.4s, v5.4s, #0x1f\n"
- "sshr v6.4s, v6.4s, #0x1f\n"
"sshr v7.4s, v7.4s, #0x1f\n"
"sshr v8.4s, v8.4s, #0x1f\n"
"sshr v9.4s, v9.4s, #0x1f\n"
- "sshr v10.4s, v10.4s, #0x1f\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
- "sqadd v20.4s, v20.4s, v5.4s\n"
- "sqadd v21.4s, v21.4s, v6.4s\n"
"sqadd v22.4s, v22.4s, v7.4s\n"
"sqadd v16.4s, v16.4s, v8.4s\n"
"sqadd v17.4s, v17.4s, v9.4s\n"
+ "and v10.16b, v18.16b, v0.16b\n"
+ "and v4.16b, v19.16b, v0.16b\n"
+ "sshr v10.4s, v10.4s, #0x1f\n"
+ "sshr v4.4s, v4.4s, #0x1f\n"
"sqadd v18.4s, v18.4s, v10.4s\n"
"sqadd v19.4s, v19.4s, v4.4s\n"
"54:" // Height 2: no shift correction
- "add x23, %x[qp], %[c_offset]\n"
- "ld1r { v4.4s }, [x23]\n"
"srshl v23.4s, v23.4s, v0.4s\n"
+ "add x22, %x[qp], %[c_offset]\n"
+ "ld1r { v4.4s }, [x22]\n"
"srshl v20.4s, v20.4s, v0.4s\n"
+ "add x22, %x[qp], %[minval]\n"
"srshl v21.4s, v21.4s, v0.4s\n"
+ "ld1r { v5.4s }, [x22]\n"
+ "add x22, %x[qp], %[maxval]\n"
"srshl v22.4s, v22.4s, v0.4s\n"
- "add x23, %x[qp], %[maxval]\n"
- "ld1r { v6.4s }, [x23]\n"
+ "ld1r { v6.4s }, [x22]\n"
+ "cmp x9, #0x10\n"
"srshl v16.4s, v16.4s, v0.4s\n"
"srshl v17.4s, v17.4s, v0.4s\n"
- "add x23, %x[qp], %[minval]\n"
- "ld1r { v5.4s }, [x23]\n"
- "srshl v18.4s, v18.4s, v0.4s\n"
- "srshl v19.4s, v19.4s, v0.4s\n"
- "cmp x9, #0x10\n"
"add v23.4s, v23.4s, v4.4s\n"
"add v20.4s, v20.4s, v4.4s\n"
"add v21.4s, v21.4s, v4.4s\n"
- "add v22.4s, v22.4s, v4.4s\n"
- "add v16.4s, v16.4s, v4.4s\n"
- "add v17.4s, v17.4s, v4.4s\n"
- "add v18.4s, v18.4s, v4.4s\n"
- "add v19.4s, v19.4s, v4.4s\n"
"smin v23.4s, v23.4s, v6.4s\n"
"smin v20.4s, v20.4s, v6.4s\n"
"smin v21.4s, v21.4s, v6.4s\n"
- "smin v22.4s, v22.4s, v6.4s\n"
- "smin v16.4s, v16.4s, v6.4s\n"
- "smin v17.4s, v17.4s, v6.4s\n"
- "smin v18.4s, v18.4s, v6.4s\n"
- "smin v19.4s, v19.4s, v6.4s\n"
"smax v23.4s, v23.4s, v5.4s\n"
"smax v20.4s, v20.4s, v5.4s\n"
"smax v21.4s, v21.4s, v5.4s\n"
+ "add v22.4s, v22.4s, v4.4s\n"
+ "add v16.4s, v16.4s, v4.4s\n"
+ "add v17.4s, v17.4s, v4.4s\n"
+ "smin v22.4s, v22.4s, v6.4s\n"
+ "smin v16.4s, v16.4s, v6.4s\n"
+ "smin v17.4s, v17.4s, v6.4s\n"
"smax v22.4s, v22.4s, v5.4s\n"
"smax v16.4s, v16.4s, v5.4s\n"
"smax v17.4s, v17.4s, v5.4s\n"
- "smax v18.4s, v18.4s, v5.4s\n"
- "smax v19.4s, v19.4s, v5.4s\n"
+ "srshl v18.4s, v18.4s, v0.4s\n"
+ "srshl v19.4s, v19.4s, v0.4s\n"
"uzp1 v23.8h, v23.8h, v20.8h\n"
"uzp1 v20.8h, v21.8h, v22.8h\n"
+ "add v18.4s, v18.4s, v4.4s\n"
+ "add v19.4s, v19.4s, v4.4s\n"
"uzp1 v16.8h, v16.8h, v17.8h\n"
- "uzp1 v17.8h, v18.8h, v19.8h\n"
+ "smin v18.4s, v18.4s, v6.4s\n"
+ "smin v19.4s, v19.4s, v6.4s\n"
"uzp1 v23.16b, v23.16b, v20.16b\n"
+ "smax v18.4s, v18.4s, v5.4s\n"
+ "smax v19.4s, v19.4s, v5.4s\n"
+ "uzp1 v17.8h, v18.8h, v19.8h\n"
"uzp1 v16.16b, v16.16b, v17.16b\n"
"bge 63f\n"
"tbz x9, #3, 58f\n"
- "str d23, [x27], #0x8\n"
- "str d16, [x22], #0x8\n"
+ "str d23, [x26], #0x8\n"
+ "str d16, [x21], #0x8\n"
"tbz x9, #2, 56f\n"
- "st1 { v23.s }[2], [x27], #0x4\n"
- "st1 { v16.s }[2], [x22], #0x4\n"
+ "st1 { v23.s }[2], [x26], #0x4\n"
+ "st1 { v16.s }[2], [x21], #0x4\n"
"tbz x9, #1, 55f\n"
- "st1 { v23.h }[6], [x27], #0x2\n"
- "st1 { v16.h }[6], [x22], #0x2\n"
+ "st1 { v23.h }[6], [x26], #0x2\n"
+ "st1 { v16.h }[6], [x21], #0x2\n"
"tbz x9, #0, 62f\n"
- "st1 { v23.b }[14], [x27]\n"
- "st1 { v16.b }[14], [x22]\n"
+ "st1 { v23.b }[14], [x26]\n"
+ "st1 { v16.b }[14], [x21]\n"
"b 62f\n"
"55:" // Height 2: Partial direct writeback: partial_1_12
"tbz x9, #0, 62f\n"
- "st1 { v23.b }[12], [x27]\n"
- "st1 { v16.b }[12], [x22]\n"
+ "st1 { v23.b }[12], [x26]\n"
+ "st1 { v16.b }[12], [x21]\n"
"b 62f\n"
"56:" // Height 2: Partial direct writeback: partial_2_8
"tbz x9, #1, 57f\n"
- "st1 { v23.h }[4], [x27], #0x2\n"
- "st1 { v16.h }[4], [x22], #0x2\n"
+ "st1 { v23.h }[4], [x26], #0x2\n"
+ "st1 { v16.h }[4], [x21], #0x2\n"
"tbz x9, #0, 62f\n"
- "st1 { v23.b }[10], [x27]\n"
- "st1 { v16.b }[10], [x22]\n"
+ "st1 { v23.b }[10], [x26]\n"
+ "st1 { v16.b }[10], [x21]\n"
"b 62f\n"
"57:" // Height 2: Partial direct writeback: partial_1_8
"tbz x9, #0, 62f\n"
- "st1 { v23.b }[8], [x27]\n"
- "st1 { v16.b }[8], [x22]\n"
+ "st1 { v23.b }[8], [x26]\n"
+ "st1 { v16.b }[8], [x21]\n"
"b 62f\n"
"58:" // Height 2: Partial direct writeback: partial_4_0
"tbz x9, #2, 60f\n"
- "str s23, [x27], #0x4\n"
- "str s16, [x22], #0x4\n"
+ "str s23, [x26], #0x4\n"
+ "str s16, [x21], #0x4\n"
"tbz x9, #1, 59f\n"
- "st1 { v23.h }[2], [x27], #0x2\n"
- "st1 { v16.h }[2], [x22], #0x2\n"
+ "st1 { v23.h }[2], [x26], #0x2\n"
+ "st1 { v16.h }[2], [x21], #0x2\n"
"tbz x9, #0, 62f\n"
- "st1 { v23.b }[6], [x27]\n"
- "st1 { v16.b }[6], [x22]\n"
+ "st1 { v23.b }[6], [x26]\n"
+ "st1 { v16.b }[6], [x21]\n"
"b 62f\n"
"59:" // Height 2: Partial direct writeback: partial_1_4
"tbz x9, #0, 62f\n"
- "st1 { v23.b }[4], [x27]\n"
- "st1 { v16.b }[4], [x22]\n"
+ "st1 { v23.b }[4], [x26]\n"
+ "st1 { v16.b }[4], [x21]\n"
"b 62f\n"
"60:" // Height 2: Partial direct writeback: partial_2_0
"tbz x9, #1, 61f\n"
- "str h23, [x27], #0x2\n"
- "str h16, [x22], #0x2\n"
+ "str h23, [x26], #0x2\n"
+ "str h16, [x21], #0x2\n"
"tbz x9, #0, 62f\n"
- "st1 { v23.b }[2], [x27]\n"
- "st1 { v16.b }[2], [x22]\n"
+ "st1 { v23.b }[2], [x26]\n"
+ "st1 { v16.b }[2], [x21]\n"
"b 62f\n"
"61:" // Height 2: Partial direct writeback: partial_1_0
- "str b23, [x27, #0x0]\n"
- "str b16, [x22, #0x0]\n"
+ "str b23, [x26, #0x0]\n"
+ "str b16, [x21, #0x0]\n"
"62:" // Height 2: Partial direct writeback: Done
"b 64f\n"
"63:" // Height 2: Full writeback
- "str q23, [x27, #0x0]\n"
- "add x27, x27, #0x10\n"
- "str q16, [x22, #0x0]\n"
+ "str q23, [x26, #0x0]\n"
+ "add x26, x26, #0x10\n"
+ "str q16, [x21, #0x0]\n"
"64:" // Height 2: Writeback done
"subs x9, x9, #0x10\n"
"bgt 34b\n"
"b 130f\n"
"65:" // Height 3
- "mov x10, %x[col_bias]\n"
"movi v11.4s, #0x0\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x27, %x[col_bias]\n"
"movi v12.4s, #0x0\n"
+ "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"bic %x[flags], %x[flags], #0x80000000\n"
"movi v13.4s, #0x0\n"
+ "mov x26, %x[output_ptr]\n"
"movi v15.16b, #0x1\n"
- "ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x27, %x[output_ptr]\n"
"66:" // Height 3: Column loop
"movi v16.4s, #0x0\n"
"movi v17.4s, #0x0\n"
@@ -868,65 +870,65 @@ void a64_hybrid_u8qa_mmla_4x16 (
"movi v30.4s, #0x0\n"
"movi v31.4s, #0x0\n"
"67:" // Height 3: setup done
- "mov x26, #0x0\n"
+ "mov x25, #0x0\n"
"68:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w24, [x20, x25, LSL #0x2]\n"
"tbz %x[flags], #3, 69f\n"
- "ldr x21, [%x[input_ptr], x26, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x24, [x21, #0x0]\n"
- "ldr x23, [x21, #0x8]\n"
- "ldr x22, [x21, #0x10]\n"
- "cbnz x26, 70f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x24, x24, x20\n"
- "add x23, x23, x20\n"
- "add x22, x22, x20\n"
+ "ldr x20, [%x[input_ptr], x25, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x23, [x20, #0x0]\n"
+ "ldr x22, [x20, #0x8]\n"
+ "ldr x21, [x20, #0x10]\n"
+ "cbnz x25, 70f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x23, x23, x19\n"
+ "add x22, x22, x19\n"
+ "add x21, x21, x19\n"
"b 70f\n"
"69:" // Height 3: setup direct input
- "mov x24, %x[input_ptr]\n"
- "add x23, x24, x20\n"
- "add x22, x23, x20\n"
+ "mov x23, %x[input_ptr]\n"
+ "add x22, x23, x19\n"
+ "add x21, x22, x19\n"
"70:" // Height 3: input setup done
- "cmp x25, #0x10\n"
+ "cmp x24, #0x10\n"
"blt 75f\n"
- "ldr q1, [x24, #0x0]\n"
- "ldr q2, [x23, #0x0]\n"
- "cmp x25, #0x20\n"
- "ldr q3, [x22, #0x0]\n"
- "ldr q5, [x28, #0x0]\n"
- "ldr q6, [x28, #0x10]\n"
- "ldr q7, [x28, #0x20]\n"
- "ldr q8, [x28, #0x30]\n"
- "ldr q9, [x28, #0x40]\n"
- "ldr q10, [x28, #0x50]\n"
+ "ldr q1, [x23, #0x0]\n"
+ "ldr q2, [x22, #0x0]\n"
+ "cmp x24, #0x20\n"
"blt 73f\n"
"71:" // Height 3: Multiply loop: Main loop head
"trn1 v0.2d, v1.2d, v2.2d\n"
+ "ldr q3, [x21, #0x0]\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
- ".inst 0x6e85a410 // ummla v16.4s, v0.16b, v5.16b\n"
+ "ldr q5, [x28, #0x0]\n"
+ "add x23, x23, #0x10\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
+ "ldr q6, [x28, #0x10]\n"
+ "add x22, x22, #0x10\n"
+ "trn2 v3.2d, v3.2d, v4.2d\n"
+ "ldr q7, [x28, #0x20]\n"
+ "add x21, x21, #0x10\n"
+ ".inst 0x6e85a410 // ummla v16.4s, v0.16b, v5.16b\n"
+ "ldr q8, [x28, #0x30]\n"
".inst 0x6e85a458 // ummla v24.4s, v2.16b, v5.16b\n"
- "ldr q5, [x28, #0x70]\n"
+ "ldr q9, [x28, #0x40]\n"
+ "ldr q10, [x28, #0x50]\n"
".inst 0x6e86a414 // ummla v20.4s, v0.16b, v6.16b\n"
- "trn2 v3.2d, v3.2d, v4.2d\n"
- "ldr q4, [x28, #0x60]\n"
".inst 0x6e86a45c // ummla v28.4s, v2.16b, v6.16b\n"
- "ldr q6, [x28, #0x80]\n"
+ "ldr q4, [x28, #0x60]\n"
".inst 0x6e87a411 // ummla v17.4s, v0.16b, v7.16b\n"
+ "ldr q5, [x28, #0x70]\n"
".inst 0x6e87a459 // ummla v25.4s, v2.16b, v7.16b\n"
- "ldr q7, [x28, #0x90]\n"
- "add x24, x24, #0x10\n"
+ "ldr q6, [x28, #0x80]\n"
".inst 0x6e88a415 // ummla v21.4s, v0.16b, v8.16b\n"
+ "ldr q7, [x28, #0x90]\n"
".inst 0x6e88a45d // ummla v29.4s, v2.16b, v8.16b\n"
"ldr q8, [x28, #0xa0]\n"
- "add x23, x23, #0x10\n"
".inst 0x6e89a412 // ummla v18.4s, v0.16b, v9.16b\n"
".inst 0x6e89a45a // ummla v26.4s, v2.16b, v9.16b\n"
"ldr q9, [x28, #0xb0]\n"
- "add x22, x22, #0x10\n"
".inst 0x6e8aa416 // ummla v22.4s, v0.16b, v10.16b\n"
".inst 0x6e8aa45e // ummla v30.4s, v2.16b, v10.16b\n"
"ldr q10, [x28, #0xc0]\n"
@@ -960,49 +962,49 @@ void a64_hybrid_u8qa_mmla_4x16 (
".inst 0x6e8f942b // udot v11.4s, v1.16b, v15.16b\n"
".inst 0x6e8f946d // udot v13.4s, v3.16b, v15.16b\n"
"72:" // Height 3: Multiply loop: unique 9: skip row sum
- "ldr q1, [x24, #0x0]\n"
- "ldr q2, [x23, #0x0]\n"
- "sub x25, x25, #0x10\n"
- "cmp x25, #0x20\n"
- "ldr q3, [x22, #0x0]\n"
- "ldr q5, [x28, #0x0]\n"
- "ldr q6, [x28, #0x10]\n"
- "ldr q7, [x28, #0x20]\n"
- "ldr q8, [x28, #0x30]\n"
- "ldr q9, [x28, #0x40]\n"
- "ldr q10, [x28, #0x50]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
"prfm pldl1keep, [x23, #0x80]\n"
+ "sub x24, x24, #0x10\n"
"prfm pldl1keep, [x22, #0x80]\n"
+ "cmp x24, #0x20\n"
+ "prfm pldl1keep, [x21, #0x80]\n"
+ "ldr q1, [x23, #0x0]\n"
+ "ldr q2, [x22, #0x0]\n"
"bge 71b\n"
"73:" // Height 3: Multiply loop: Single iteration only
"trn1 v0.2d, v1.2d, v2.2d\n"
+ "ldr q3, [x21, #0x0]\n"
+ "sub x24, x24, #0x10\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
- ".inst 0x6e85a410 // ummla v16.4s, v0.16b, v5.16b\n"
+ "ldr q5, [x28, #0x0]\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
+ "ldr q6, [x28, #0x10]\n"
+ "add x23, x23, #0x10\n"
+ "trn2 v3.2d, v3.2d, v4.2d\n"
+ "ldr q7, [x28, #0x20]\n"
+ "add x22, x22, #0x10\n"
+ ".inst 0x6e85a410 // ummla v16.4s, v0.16b, v5.16b\n"
+ "ldr q8, [x28, #0x30]\n"
+ "add x21, x21, #0x10\n"
".inst 0x6e85a458 // ummla v24.4s, v2.16b, v5.16b\n"
- "ldr q5, [x28, #0x70]\n"
+ "ldr q9, [x28, #0x40]\n"
+ "ldr q10, [x28, #0x50]\n"
".inst 0x6e86a414 // ummla v20.4s, v0.16b, v6.16b\n"
- "trn2 v3.2d, v3.2d, v4.2d\n"
- "ldr q4, [x28, #0x60]\n"
".inst 0x6e86a45c // ummla v28.4s, v2.16b, v6.16b\n"
- "ldr q6, [x28, #0x80]\n"
+ "ldr q4, [x28, #0x60]\n"
".inst 0x6e87a411 // ummla v17.4s, v0.16b, v7.16b\n"
+ "ldr q5, [x28, #0x70]\n"
".inst 0x6e87a459 // ummla v25.4s, v2.16b, v7.16b\n"
- "ldr q7, [x28, #0x90]\n"
- "sub x25, x25, #0x10\n"
+ "ldr q6, [x28, #0x80]\n"
".inst 0x6e88a415 // ummla v21.4s, v0.16b, v8.16b\n"
+ "ldr q7, [x28, #0x90]\n"
".inst 0x6e88a45d // ummla v29.4s, v2.16b, v8.16b\n"
"ldr q8, [x28, #0xa0]\n"
- "add x24, x24, #0x10\n"
".inst 0x6e89a412 // ummla v18.4s, v0.16b, v9.16b\n"
".inst 0x6e89a45a // ummla v26.4s, v2.16b, v9.16b\n"
"ldr q9, [x28, #0xb0]\n"
- "add x23, x23, #0x10\n"
".inst 0x6e8aa416 // ummla v22.4s, v0.16b, v10.16b\n"
".inst 0x6e8aa45e // ummla v30.4s, v2.16b, v10.16b\n"
"ldr q10, [x28, #0xc0]\n"
- "add x22, x22, #0x10\n"
".inst 0x6e84a413 // ummla v19.4s, v0.16b, v4.16b\n"
".inst 0x6e84a45b // ummla v27.4s, v2.16b, v4.16b\n"
"ldr q4, [x28, #0xd0]\n"
@@ -1033,42 +1035,43 @@ void a64_hybrid_u8qa_mmla_4x16 (
".inst 0x6e8f942b // udot v11.4s, v1.16b, v15.16b\n"
".inst 0x6e8f946d // udot v13.4s, v3.16b, v15.16b\n"
"74:" // Height 3: Multiply loop: unique 10: skip row sum
- "prfm pldl1keep, [x24, #0x80]\n"
"prfm pldl1keep, [x23, #0x80]\n"
"prfm pldl1keep, [x22, #0x80]\n"
+ "prfm pldl1keep, [x21, #0x80]\n"
"75:" // Height 3: Multiply loop: Main loop skip
- "cbz x25, 84f\n"
- "cmp x25, #0x8\n"
+ "cbz x24, 84f\n"
+ "cmp x24, #0x8\n"
"blt 78f\n"
"76:" // Height 3: Multiply loop: Odd block loop
- "ldr d1, [x24], #0x8\n"
- "ldr d2, [x23], #0x8\n"
+ "movi v7.16b, #0x0\n"
+ "ldr d1, [x23], #0x8\n"
+ "ldr d2, [x22], #0x8\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
- "ldr d3, [x22], #0x8\n"
+ "ldr d3, [x21], #0x8\n"
"trn1 v2.2d, v3.2d, v7.2d\n"
"tbnz %x[flags], #31, 77f\n"
".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n"
".inst 0x6e8f944d // udot v13.4s, v2.16b, v15.16b\n"
"77:" // Height 3: Multiply loop: unique 11: skip row sum
"ldr q8, [x28, #0x0]\n"
- "ldr q9, [x28, #0x10]\n"
".inst 0x6e88a410 // ummla v16.4s, v0.16b, v8.16b\n"
+ "ldr q9, [x28, #0x10]\n"
+ "sub x24, x24, #0x8\n"
".inst 0x6e88a458 // ummla v24.4s, v2.16b, v8.16b\n"
"ldr q10, [x28, #0x20]\n"
+ "cmp x24, #0x8\n"
+ ".inst 0x6e89a414 // ummla v20.4s, v0.16b, v9.16b\n"
"ldr q4, [x28, #0x30]\n"
- "sub x25, x25, #0x8\n"
- "cmp x25, #0x8\n"
+ ".inst 0x6e89a45c // ummla v28.4s, v2.16b, v9.16b\n"
"ldr q5, [x28, #0x40]\n"
+ ".inst 0x6e8aa411 // ummla v17.4s, v0.16b, v10.16b\n"
"ldr q6, [x28, #0x50]\n"
- ".inst 0x6e89a414 // ummla v20.4s, v0.16b, v9.16b\n"
- ".inst 0x6e89a45c // ummla v28.4s, v2.16b, v9.16b\n"
+ ".inst 0x6e8aa459 // ummla v25.4s, v2.16b, v10.16b\n"
"ldr q7, [x28, #0x60]\n"
"ldr q8, [x28, #0x70]\n"
- ".inst 0x6e8aa411 // ummla v17.4s, v0.16b, v10.16b\n"
- ".inst 0x6e8aa459 // ummla v25.4s, v2.16b, v10.16b\n"
".inst 0x6e84a415 // ummla v21.4s, v0.16b, v4.16b\n"
- ".inst 0x6e84a45d // ummla v29.4s, v2.16b, v4.16b\n"
"add x28, x28, #0x80\n"
+ ".inst 0x6e84a45d // ummla v29.4s, v2.16b, v4.16b\n"
".inst 0x6e85a412 // ummla v18.4s, v0.16b, v5.16b\n"
".inst 0x6e85a45a // ummla v26.4s, v2.16b, v5.16b\n"
".inst 0x6e86a416 // ummla v22.4s, v0.16b, v6.16b\n"
@@ -1078,42 +1081,43 @@ void a64_hybrid_u8qa_mmla_4x16 (
".inst 0x6e88a417 // ummla v23.4s, v0.16b, v8.16b\n"
".inst 0x6e88a45f // ummla v31.4s, v2.16b, v8.16b\n"
"bge 76b\n"
+ "cbz x24, 84f\n"
"78:" // Height 3: Multiply loop: Skip odd blocks
- "cbz x25, 84f\n"
- "tbz x25, #2, 80f\n"
- "ldr s1, [x24], #0x4\n"
- "ldr s2, [x23], #0x4\n"
- "ldr s3, [x22], #0x4\n"
- "tbz x25, #1, 79f\n"
- "ld1 { v1.h }[2], [x24], #0x2\n"
- "ld1 { v2.h }[2], [x23], #0x2\n"
- "ld1 { v3.h }[2], [x22], #0x2\n"
- "tbz x25, #0, 82f\n"
- "ld1 { v1.b }[6], [x24]\n"
- "ld1 { v2.b }[6], [x23]\n"
- "ld1 { v3.b }[6], [x22]\n"
+ "tbz x24, #2, 80f\n"
+ "ldr s1, [x23], #0x4\n"
+ "ldr s2, [x22], #0x4\n"
+ "ldr s3, [x21], #0x4\n"
+ "tbz x24, #1, 79f\n"
+ "ld1 { v1.h }[2], [x23], #0x2\n"
+ "ld1 { v2.h }[2], [x22], #0x2\n"
+ "ld1 { v3.h }[2], [x21], #0x2\n"
+ "tbz x24, #0, 82f\n"
+ "ld1 { v1.b }[6], [x23]\n"
+ "ld1 { v2.b }[6], [x22]\n"
+ "ld1 { v3.b }[6], [x21]\n"
"b 82f\n"
"79:" // Height 3: Multiply loop: Ragged operand read: partial_1_4
- "tbz x25, #0, 82f\n"
- "ld1 { v1.b }[4], [x24]\n"
- "ld1 { v2.b }[4], [x23]\n"
- "ld1 { v3.b }[4], [x22]\n"
+ "tbz x24, #0, 82f\n"
+ "ld1 { v1.b }[4], [x23]\n"
+ "ld1 { v2.b }[4], [x22]\n"
+ "ld1 { v3.b }[4], [x21]\n"
"b 82f\n"
"80:" // Height 3: Multiply loop: Ragged operand read: partial_2_0
- "tbz x25, #1, 81f\n"
- "ldr h1, [x24], #0x2\n"
- "ldr h2, [x23], #0x2\n"
- "ldr h3, [x22], #0x2\n"
- "tbz x25, #0, 82f\n"
- "ld1 { v1.b }[2], [x24]\n"
- "ld1 { v2.b }[2], [x23]\n"
- "ld1 { v3.b }[2], [x22]\n"
+ "tbz x24, #1, 81f\n"
+ "ldr h1, [x23], #0x2\n"
+ "ldr h2, [x22], #0x2\n"
+ "ldr h3, [x21], #0x2\n"
+ "tbz x24, #0, 82f\n"
+ "ld1 { v1.b }[2], [x23]\n"
+ "ld1 { v2.b }[2], [x22]\n"
+ "ld1 { v3.b }[2], [x21]\n"
"b 82f\n"
"81:" // Height 3: Multiply loop: Ragged operand read: partial_1_0
- "ldr b1, [x24, #0x0]\n"
- "ldr b2, [x23, #0x0]\n"
- "ldr b3, [x22, #0x0]\n"
+ "ldr b1, [x23, #0x0]\n"
+ "ldr b2, [x22, #0x0]\n"
+ "ldr b3, [x21, #0x0]\n"
"82:" // Height 3: Multiply loop: Ragged operand read: Done
+ "movi v9.16b, #0x0\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
"trn1 v2.2d, v3.2d, v9.2d\n"
"tbnz %x[flags], #31, 83f\n"
@@ -1121,24 +1125,24 @@ void a64_hybrid_u8qa_mmla_4x16 (
".inst 0x6e8f944d // udot v13.4s, v2.16b, v15.16b\n"
"83:" // Height 3: Multiply loop: unique 12: skip row sum
"ldr q10, [x28, #0x0]\n"
- "ldr q4, [x28, #0x10]\n"
".inst 0x6e8aa410 // ummla v16.4s, v0.16b, v10.16b\n"
+ "ldr q4, [x28, #0x10]\n"
".inst 0x6e8aa458 // ummla v24.4s, v2.16b, v10.16b\n"
"ldr q5, [x28, #0x20]\n"
"ldr q6, [x28, #0x30]\n"
".inst 0x6e84a414 // ummla v20.4s, v0.16b, v4.16b\n"
- ".inst 0x6e84a45c // ummla v28.4s, v2.16b, v4.16b\n"
"ldr q7, [x28, #0x40]\n"
+ ".inst 0x6e84a45c // ummla v28.4s, v2.16b, v4.16b\n"
"ldr q8, [x28, #0x50]\n"
".inst 0x6e85a411 // ummla v17.4s, v0.16b, v5.16b\n"
- ".inst 0x6e85a459 // ummla v25.4s, v2.16b, v5.16b\n"
"ldr q9, [x28, #0x60]\n"
+ ".inst 0x6e85a459 // ummla v25.4s, v2.16b, v5.16b\n"
"ldr q10, [x28, #0x70]\n"
+ "add x28, x28, #0x80\n"
".inst 0x6e86a415 // ummla v21.4s, v0.16b, v6.16b\n"
".inst 0x6e86a45d // ummla v29.4s, v2.16b, v6.16b\n"
".inst 0x6e87a412 // ummla v18.4s, v0.16b, v7.16b\n"
".inst 0x6e87a45a // ummla v26.4s, v2.16b, v7.16b\n"
- "add x28, x28, #0x80\n"
".inst 0x6e88a416 // ummla v22.4s, v0.16b, v8.16b\n"
".inst 0x6e88a45e // ummla v30.4s, v2.16b, v8.16b\n"
".inst 0x6e89a413 // ummla v19.4s, v0.16b, v9.16b\n"
@@ -1146,21 +1150,21 @@ void a64_hybrid_u8qa_mmla_4x16 (
".inst 0x6e8aa417 // ummla v23.4s, v0.16b, v10.16b\n"
".inst 0x6e8aa45f // ummla v31.4s, v2.16b, v10.16b\n"
"84:" // Height 3: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x26, x26, #0x1\n"
- "cmp x26, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x25, x25, #0x1\n"
+ "cmp x25, x19\n"
"bne 68b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp1 v4.2d, v16.2d, v20.2d\n"
- "add x22, x27, x20\n"
- "add x21, x22, x20\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp2 v16.2d, v16.2d, v20.2d\n"
+ "prfm pstl1keep, [x26, #0x0]\n"
+ "add x21, x26, x19\n"
"uzp1 v20.2d, v17.2d, v21.2d\n"
- "prfm pstl1keep, [x27, #0x0]\n"
- "prfm pstl1keep, [x22, #0x0]\n"
+ "prfm pstl1keep, [x21, #0x0]\n"
"uzp2 v17.2d, v17.2d, v21.2d\n"
+ "add x20, x21, x19\n"
"uzp1 v21.2d, v18.2d, v22.2d\n"
- "prfm pstl1keep, [x21, #0x0]\n"
+ "prfm pstl1keep, [x20, #0x0]\n"
"uzp2 v18.2d, v18.2d, v22.2d\n"
"uzp1 v22.2d, v19.2d, v23.2d\n"
"uzp2 v19.2d, v19.2d, v23.2d\n"
@@ -1170,37 +1174,37 @@ void a64_hybrid_u8qa_mmla_4x16 (
"uzp1 v27.2d, v27.2d, v31.2d\n"
"mov v31.16b, v4.16b\n"
"tbnz %x[flags], #31, 85f\n"
- "add x23, %x[qp], %[b_offset]\n"
- "ld1r { v3.4s }, [x23]\n"
"addp v11.4s, v11.4s, v11.4s\n"
+ "add x22, %x[qp], %[b_offset]\n"
+ "ld1r { v3.4s }, [x22]\n"
"addp v13.4s, v13.4s, v13.4s\n"
- "neg v3.4s, v3.4s\n"
"dup v12.4s, v11.s[3]\n"
"dup v11.4s, v11.s[0]\n"
+ "neg v3.4s, v3.4s\n"
"dup v13.4s, v13.s[0]\n"
"mul v11.4s, v11.4s, v3.4s\n"
"mul v12.4s, v12.4s, v3.4s\n"
"mul v13.4s, v13.4s, v3.4s\n"
"85:" // Height 3: skip row sum fixup
- "ldr q0, [x10, #0x0]\n"
- "ldr q1, [x10, #0x10]\n"
"add v31.4s, v31.4s, v11.4s\n"
+ "ldr q0, [x27, #0x0]\n"
+ "orr %x[flags], %x[flags], #0x80000000\n"
"add v20.4s, v20.4s, v11.4s\n"
- "ldr q2, [x10, #0x20]\n"
- "ldr q3, [x10, #0x30]\n"
+ "ldr q1, [x27, #0x10]\n"
+ "add x23, %x[qp], %[per_layer_right_shift]\n"
"add v21.4s, v21.4s, v11.4s\n"
+ "ldr q2, [x27, #0x20]\n"
+ "add x22, %x[qp], %[per_layer_mul]\n"
"add v22.4s, v22.4s, v11.4s\n"
+ "ldr q3, [x27, #0x30]\n"
+ "add x27, x27, #0x40\n"
"add v16.4s, v16.4s, v12.4s\n"
+ "ld1r { v4.4s }, [x22]\n"
"add v17.4s, v17.4s, v12.4s\n"
- "add x23, %x[qp], %[per_layer_mul]\n"
- "ld1r { v4.4s }, [x23]\n"
"add v18.4s, v18.4s, v12.4s\n"
"add v19.4s, v19.4s, v12.4s\n"
- "orr %x[flags], %x[flags], #0x80000000\n"
- "add x23, %x[qp], %[per_layer_right_shift]\n"
"add v24.4s, v24.4s, v13.4s\n"
"add v25.4s, v25.4s, v13.4s\n"
- "add x10, x10, #0x40\n"
"add v26.4s, v26.4s, v13.4s\n"
"add v27.4s, v27.4s, v13.4s\n"
"add v31.4s, v31.4s, v0.4s\n"
@@ -1232,98 +1236,98 @@ void a64_hybrid_u8qa_mmla_4x16 (
"and v4.16b, v31.16b, v0.16b\n"
"and v5.16b, v20.16b, v0.16b\n"
"and v6.16b, v21.16b, v0.16b\n"
- "and v7.16b, v22.16b, v0.16b\n"
- "and v8.16b, v16.16b, v0.16b\n"
"sshr v4.4s, v4.4s, #0x1f\n"
"sshr v5.4s, v5.4s, #0x1f\n"
"sshr v6.4s, v6.4s, #0x1f\n"
- "sshr v7.4s, v7.4s, #0x1f\n"
- "sshr v8.4s, v8.4s, #0x1f\n"
"sqadd v31.4s, v31.4s, v4.4s\n"
"sqadd v20.4s, v20.4s, v5.4s\n"
"sqadd v21.4s, v21.4s, v6.4s\n"
+ "and v7.16b, v22.16b, v0.16b\n"
+ "and v8.16b, v16.16b, v0.16b\n"
+ "and v9.16b, v17.16b, v0.16b\n"
+ "sshr v7.4s, v7.4s, #0x1f\n"
+ "sshr v8.4s, v8.4s, #0x1f\n"
+ "sshr v9.4s, v9.4s, #0x1f\n"
"sqadd v22.4s, v22.4s, v7.4s\n"
"sqadd v16.4s, v16.4s, v8.4s\n"
- "and v9.16b, v17.16b, v0.16b\n"
+ "sqadd v17.4s, v17.4s, v9.4s\n"
"and v10.16b, v18.16b, v0.16b\n"
"and v4.16b, v19.16b, v0.16b\n"
"and v5.16b, v24.16b, v0.16b\n"
- "and v6.16b, v25.16b, v0.16b\n"
- "and v7.16b, v26.16b, v0.16b\n"
- "and v8.16b, v27.16b, v0.16b\n"
- "sshr v9.4s, v9.4s, #0x1f\n"
"sshr v10.4s, v10.4s, #0x1f\n"
"sshr v4.4s, v4.4s, #0x1f\n"
"sshr v5.4s, v5.4s, #0x1f\n"
- "sshr v6.4s, v6.4s, #0x1f\n"
- "sshr v7.4s, v7.4s, #0x1f\n"
- "sshr v8.4s, v8.4s, #0x1f\n"
- "sqadd v17.4s, v17.4s, v9.4s\n"
"sqadd v18.4s, v18.4s, v10.4s\n"
"sqadd v19.4s, v19.4s, v4.4s\n"
"sqadd v24.4s, v24.4s, v5.4s\n"
+ "and v6.16b, v25.16b, v0.16b\n"
+ "and v7.16b, v26.16b, v0.16b\n"
+ "and v8.16b, v27.16b, v0.16b\n"
+ "sshr v6.4s, v6.4s, #0x1f\n"
+ "sshr v7.4s, v7.4s, #0x1f\n"
+ "sshr v8.4s, v8.4s, #0x1f\n"
"sqadd v25.4s, v25.4s, v6.4s\n"
"sqadd v26.4s, v26.4s, v7.4s\n"
"sqadd v27.4s, v27.4s, v8.4s\n"
"86:" // Height 3: no shift correction
- "add x23, %x[qp], %[c_offset]\n"
- "ld1r { v4.4s }, [x23]\n"
"srshl v31.4s, v31.4s, v0.4s\n"
+ "add x22, %x[qp], %[c_offset]\n"
+ "ld1r { v4.4s }, [x22]\n"
"srshl v20.4s, v20.4s, v0.4s\n"
+ "add x22, %x[qp], %[minval]\n"
"srshl v21.4s, v21.4s, v0.4s\n"
+ "ld1r { v5.4s }, [x22]\n"
+ "add x22, %x[qp], %[maxval]\n"
"srshl v22.4s, v22.4s, v0.4s\n"
- "add x23, %x[qp], %[maxval]\n"
- "ld1r { v6.4s }, [x23]\n"
+ "ld1r { v6.4s }, [x22]\n"
+ "cmp x9, #0x10\n"
"srshl v16.4s, v16.4s, v0.4s\n"
"srshl v17.4s, v17.4s, v0.4s\n"
- "add x23, %x[qp], %[minval]\n"
- "ld1r { v5.4s }, [x23]\n"
- "srshl v18.4s, v18.4s, v0.4s\n"
- "srshl v19.4s, v19.4s, v0.4s\n"
- "cmp x9, #0x10\n"
- "srshl v24.4s, v24.4s, v0.4s\n"
- "srshl v25.4s, v25.4s, v0.4s\n"
- "srshl v26.4s, v26.4s, v0.4s\n"
- "srshl v27.4s, v27.4s, v0.4s\n"
"add v31.4s, v31.4s, v4.4s\n"
"add v20.4s, v20.4s, v4.4s\n"
"add v21.4s, v21.4s, v4.4s\n"
- "add v22.4s, v22.4s, v4.4s\n"
- "add v16.4s, v16.4s, v4.4s\n"
- "add v17.4s, v17.4s, v4.4s\n"
- "add v18.4s, v18.4s, v4.4s\n"
- "add v19.4s, v19.4s, v4.4s\n"
- "add v24.4s, v24.4s, v4.4s\n"
- "add v25.4s, v25.4s, v4.4s\n"
- "add v26.4s, v26.4s, v4.4s\n"
- "add v27.4s, v27.4s, v4.4s\n"
"smin v31.4s, v31.4s, v6.4s\n"
"smin v20.4s, v20.4s, v6.4s\n"
"smin v21.4s, v21.4s, v6.4s\n"
- "smin v22.4s, v22.4s, v6.4s\n"
- "smin v16.4s, v16.4s, v6.4s\n"
- "smin v17.4s, v17.4s, v6.4s\n"
- "smin v18.4s, v18.4s, v6.4s\n"
- "smin v19.4s, v19.4s, v6.4s\n"
- "smin v24.4s, v24.4s, v6.4s\n"
- "smin v25.4s, v25.4s, v6.4s\n"
- "smin v26.4s, v26.4s, v6.4s\n"
- "smin v27.4s, v27.4s, v6.4s\n"
"smax v31.4s, v31.4s, v5.4s\n"
"smax v20.4s, v20.4s, v5.4s\n"
"smax v21.4s, v21.4s, v5.4s\n"
+ "add v22.4s, v22.4s, v4.4s\n"
+ "add v16.4s, v16.4s, v4.4s\n"
+ "add v17.4s, v17.4s, v4.4s\n"
+ "smin v22.4s, v22.4s, v6.4s\n"
+ "smin v16.4s, v16.4s, v6.4s\n"
+ "smin v17.4s, v17.4s, v6.4s\n"
"smax v22.4s, v22.4s, v5.4s\n"
"smax v16.4s, v16.4s, v5.4s\n"
"smax v17.4s, v17.4s, v5.4s\n"
+ "srshl v18.4s, v18.4s, v0.4s\n"
+ "srshl v19.4s, v19.4s, v0.4s\n"
+ "srshl v24.4s, v24.4s, v0.4s\n"
+ "srshl v25.4s, v25.4s, v0.4s\n"
+ "add v18.4s, v18.4s, v4.4s\n"
+ "add v19.4s, v19.4s, v4.4s\n"
+ "add v24.4s, v24.4s, v4.4s\n"
+ "smin v18.4s, v18.4s, v6.4s\n"
+ "smin v19.4s, v19.4s, v6.4s\n"
+ "smin v24.4s, v24.4s, v6.4s\n"
"smax v18.4s, v18.4s, v5.4s\n"
"smax v19.4s, v19.4s, v5.4s\n"
"smax v24.4s, v24.4s, v5.4s\n"
- "smax v25.4s, v25.4s, v5.4s\n"
- "smax v26.4s, v26.4s, v5.4s\n"
- "smax v27.4s, v27.4s, v5.4s\n"
+ "add v25.4s, v25.4s, v4.4s\n"
+ "srshl v26.4s, v26.4s, v0.4s\n"
+ "srshl v27.4s, v27.4s, v0.4s\n"
+ "smin v25.4s, v25.4s, v6.4s\n"
"uzp1 v31.8h, v31.8h, v20.8h\n"
+ "add v26.4s, v26.4s, v4.4s\n"
+ "smax v25.4s, v25.4s, v5.4s\n"
+ "add v27.4s, v27.4s, v4.4s\n"
+ "smin v26.4s, v26.4s, v6.4s\n"
"uzp1 v20.8h, v21.8h, v22.8h\n"
+ "smin v27.4s, v27.4s, v6.4s\n"
+ "smax v26.4s, v26.4s, v5.4s\n"
"uzp1 v16.8h, v16.8h, v17.8h\n"
+ "smax v27.4s, v27.4s, v5.4s\n"
"uzp1 v17.8h, v18.8h, v19.8h\n"
"uzp1 v24.8h, v24.8h, v25.8h\n"
"uzp1 v25.8h, v26.8h, v27.8h\n"
@@ -1332,103 +1336,103 @@ void a64_hybrid_u8qa_mmla_4x16 (
"uzp1 v24.16b, v24.16b, v25.16b\n"
"bge 95f\n"
"tbz x9, #3, 90f\n"
- "str d31, [x27], #0x8\n"
- "str d16, [x22], #0x8\n"
- "str d24, [x21], #0x8\n"
+ "str d31, [x26], #0x8\n"
+ "str d16, [x21], #0x8\n"
+ "str d24, [x20], #0x8\n"
"tbz x9, #2, 88f\n"
- "st1 { v31.s }[2], [x27], #0x4\n"
- "st1 { v16.s }[2], [x22], #0x4\n"
- "st1 { v24.s }[2], [x21], #0x4\n"
+ "st1 { v31.s }[2], [x26], #0x4\n"
+ "st1 { v16.s }[2], [x21], #0x4\n"
+ "st1 { v24.s }[2], [x20], #0x4\n"
"tbz x9, #1, 87f\n"
- "st1 { v31.h }[6], [x27], #0x2\n"
- "st1 { v16.h }[6], [x22], #0x2\n"
- "st1 { v24.h }[6], [x21], #0x2\n"
+ "st1 { v31.h }[6], [x26], #0x2\n"
+ "st1 { v16.h }[6], [x21], #0x2\n"
+ "st1 { v24.h }[6], [x20], #0x2\n"
"tbz x9, #0, 94f\n"
- "st1 { v31.b }[14], [x27]\n"
- "st1 { v16.b }[14], [x22]\n"
- "st1 { v24.b }[14], [x21]\n"
+ "st1 { v31.b }[14], [x26]\n"
+ "st1 { v16.b }[14], [x21]\n"
+ "st1 { v24.b }[14], [x20]\n"
"b 94f\n"
"87:" // Height 3: Partial direct writeback: partial_1_12
"tbz x9, #0, 94f\n"
- "st1 { v31.b }[12], [x27]\n"
- "st1 { v16.b }[12], [x22]\n"
- "st1 { v24.b }[12], [x21]\n"
+ "st1 { v31.b }[12], [x26]\n"
+ "st1 { v16.b }[12], [x21]\n"
+ "st1 { v24.b }[12], [x20]\n"
"b 94f\n"
"88:" // Height 3: Partial direct writeback: partial_2_8
"tbz x9, #1, 89f\n"
- "st1 { v31.h }[4], [x27], #0x2\n"
- "st1 { v16.h }[4], [x22], #0x2\n"
- "st1 { v24.h }[4], [x21], #0x2\n"
+ "st1 { v31.h }[4], [x26], #0x2\n"
+ "st1 { v16.h }[4], [x21], #0x2\n"
+ "st1 { v24.h }[4], [x20], #0x2\n"
"tbz x9, #0, 94f\n"
- "st1 { v31.b }[10], [x27]\n"
- "st1 { v16.b }[10], [x22]\n"
- "st1 { v24.b }[10], [x21]\n"
+ "st1 { v31.b }[10], [x26]\n"
+ "st1 { v16.b }[10], [x21]\n"
+ "st1 { v24.b }[10], [x20]\n"
"b 94f\n"
"89:" // Height 3: Partial direct writeback: partial_1_8
"tbz x9, #0, 94f\n"
- "st1 { v31.b }[8], [x27]\n"
- "st1 { v16.b }[8], [x22]\n"
- "st1 { v24.b }[8], [x21]\n"
+ "st1 { v31.b }[8], [x26]\n"
+ "st1 { v16.b }[8], [x21]\n"
+ "st1 { v24.b }[8], [x20]\n"
"b 94f\n"
"90:" // Height 3: Partial direct writeback: partial_4_0
"tbz x9, #2, 92f\n"
- "str s31, [x27], #0x4\n"
- "str s16, [x22], #0x4\n"
- "str s24, [x21], #0x4\n"
+ "str s31, [x26], #0x4\n"
+ "str s16, [x21], #0x4\n"
+ "str s24, [x20], #0x4\n"
"tbz x9, #1, 91f\n"
- "st1 { v31.h }[2], [x27], #0x2\n"
- "st1 { v16.h }[2], [x22], #0x2\n"
- "st1 { v24.h }[2], [x21], #0x2\n"
+ "st1 { v31.h }[2], [x26], #0x2\n"
+ "st1 { v16.h }[2], [x21], #0x2\n"
+ "st1 { v24.h }[2], [x20], #0x2\n"
"tbz x9, #0, 94f\n"
- "st1 { v31.b }[6], [x27]\n"
- "st1 { v16.b }[6], [x22]\n"
- "st1 { v24.b }[6], [x21]\n"
+ "st1 { v31.b }[6], [x26]\n"
+ "st1 { v16.b }[6], [x21]\n"
+ "st1 { v24.b }[6], [x20]\n"
"b 94f\n"
"91:" // Height 3: Partial direct writeback: partial_1_4
"tbz x9, #0, 94f\n"
- "st1 { v31.b }[4], [x27]\n"
- "st1 { v16.b }[4], [x22]\n"
- "st1 { v24.b }[4], [x21]\n"
+ "st1 { v31.b }[4], [x26]\n"
+ "st1 { v16.b }[4], [x21]\n"
+ "st1 { v24.b }[4], [x20]\n"
"b 94f\n"
"92:" // Height 3: Partial direct writeback: partial_2_0
"tbz x9, #1, 93f\n"
- "str h31, [x27], #0x2\n"
- "str h16, [x22], #0x2\n"
- "str h24, [x21], #0x2\n"
+ "str h31, [x26], #0x2\n"
+ "str h16, [x21], #0x2\n"
+ "str h24, [x20], #0x2\n"
"tbz x9, #0, 94f\n"
- "st1 { v31.b }[2], [x27]\n"
- "st1 { v16.b }[2], [x22]\n"
- "st1 { v24.b }[2], [x21]\n"
+ "st1 { v31.b }[2], [x26]\n"
+ "st1 { v16.b }[2], [x21]\n"
+ "st1 { v24.b }[2], [x20]\n"
"b 94f\n"
"93:" // Height 3: Partial direct writeback: partial_1_0
- "str b31, [x27, #0x0]\n"
- "str b16, [x22, #0x0]\n"
- "str b24, [x21, #0x0]\n"
+ "str b31, [x26, #0x0]\n"
+ "str b16, [x21, #0x0]\n"
+ "str b24, [x20, #0x0]\n"
"94:" // Height 3: Partial direct writeback: Done
"b 96f\n"
"95:" // Height 3: Full writeback
- "str q31, [x27, #0x0]\n"
- "add x27, x27, #0x10\n"
- "str q16, [x22, #0x0]\n"
- "str q24, [x21, #0x0]\n"
+ "str q31, [x26, #0x0]\n"
+ "add x26, x26, #0x10\n"
+ "str q16, [x21, #0x0]\n"
+ "str q24, [x20, #0x0]\n"
"96:" // Height 3: Writeback done
"subs x9, x9, #0x10\n"
"bgt 66b\n"
"b 130f\n"
"97:" // Height 4
- "ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "mov x20, #0x4\n"
- "mov x10, %x[col_bias]\n"
"movi v11.4s, #0x0\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x27, %x[col_bias]\n"
"movi v12.4s, #0x0\n"
- "movi v13.4s, #0x0\n"
+ "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"bic %x[flags], %x[flags], #0x80000000\n"
- "ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
+ "movi v13.4s, #0x0\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "mov x26, %x[output_ptr]\n"
"movi v14.4s, #0x0\n"
+ "mov x19, #0x4\n"
"movi v15.16b, #0x1\n"
- "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x27, %x[output_ptr]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
+ "madd %x[output_ptr], x20, x19, %x[output_ptr]\n"
"98:" // Height 4: Column loop
"movi v16.4s, #0x0\n"
"movi v17.4s, #0x0\n"
@@ -1447,70 +1451,70 @@ void a64_hybrid_u8qa_mmla_4x16 (
"movi v30.4s, #0x0\n"
"movi v31.4s, #0x0\n"
"99:" // Height 4: setup done
- "mov x26, #0x0\n"
+ "mov x25, #0x0\n"
"100:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w24, [x20, x25, LSL #0x2]\n"
"tbz %x[flags], #3, 101f\n"
- "ldr x21, [%x[input_ptr], x26, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x24, [x21, #0x0]\n"
- "ldr x23, [x21, #0x8]\n"
- "ldr x22, [x21, #0x10]\n"
- "ldr x21, [x21, #0x18]\n"
- "cbnz x26, 102f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x24, x24, x20\n"
- "add x23, x23, x20\n"
- "add x22, x22, x20\n"
- "add x21, x21, x20\n"
+ "ldr x20, [%x[input_ptr], x25, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x23, [x20, #0x0]\n"
+ "ldr x22, [x20, #0x8]\n"
+ "ldr x21, [x20, #0x10]\n"
+ "ldr x20, [x20, #0x18]\n"
+ "cbnz x25, 102f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x23, x23, x19\n"
+ "add x22, x22, x19\n"
+ "add x21, x21, x19\n"
+ "add x20, x20, x19\n"
"b 102f\n"
"101:" // Height 4: setup direct input
- "mov x24, %x[input_ptr]\n"
- "add x23, x24, x20\n"
- "add x22, x23, x20\n"
- "add x21, x22, x20\n"
+ "mov x23, %x[input_ptr]\n"
+ "add x22, x23, x19\n"
+ "add x21, x22, x19\n"
+ "add x20, x21, x19\n"
"102:" // Height 4: input setup done
- "cmp x25, #0x10\n"
+ "cmp x24, #0x10\n"
"blt 107f\n"
- "ldr q1, [x24, #0x0]\n"
- "ldr q2, [x23, #0x0]\n"
- "cmp x25, #0x20\n"
- "ldr q3, [x22, #0x0]\n"
- "ldr q4, [x21, #0x0]\n"
- "ldr q5, [x28, #0x0]\n"
- "ldr q6, [x28, #0x10]\n"
- "ldr q7, [x28, #0x20]\n"
- "ldr q8, [x28, #0x30]\n"
- "ldr q9, [x28, #0x40]\n"
- "ldr q10, [x28, #0x50]\n"
+ "ldr q1, [x23, #0x0]\n"
+ "ldr q2, [x22, #0x0]\n"
+ "cmp x24, #0x20\n"
"blt 105f\n"
"103:" // Height 4: Multiply loop: Main loop head
"trn1 v0.2d, v1.2d, v2.2d\n"
+ "ldr q3, [x21, #0x0]\n"
+ "add x23, x23, #0x10\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
- ".inst 0x6e85a410 // ummla v16.4s, v0.16b, v5.16b\n"
- "add x24, x24, #0x10\n"
+ "ldr q4, [x20, #0x0]\n"
+ "add x22, x22, #0x10\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
+ "ldr q5, [x28, #0x0]\n"
+ "add x21, x21, #0x10\n"
"trn2 v3.2d, v3.2d, v4.2d\n"
- "ldr q4, [x28, #0x60]\n"
+ "ldr q6, [x28, #0x10]\n"
+ "add x20, x20, #0x10\n"
+ ".inst 0x6e85a410 // ummla v16.4s, v0.16b, v5.16b\n"
+ "ldr q7, [x28, #0x20]\n"
".inst 0x6e85a458 // ummla v24.4s, v2.16b, v5.16b\n"
- "ldr q5, [x28, #0x70]\n"
+ "ldr q8, [x28, #0x30]\n"
".inst 0x6e86a414 // ummla v20.4s, v0.16b, v6.16b\n"
+ "ldr q9, [x28, #0x40]\n"
".inst 0x6e86a45c // ummla v28.4s, v2.16b, v6.16b\n"
- "ldr q6, [x28, #0x80]\n"
+ "ldr q10, [x28, #0x50]\n"
+ "ldr q4, [x28, #0x60]\n"
".inst 0x6e87a411 // ummla v17.4s, v0.16b, v7.16b\n"
".inst 0x6e87a459 // ummla v25.4s, v2.16b, v7.16b\n"
- "ldr q7, [x28, #0x90]\n"
- "add x23, x23, #0x10\n"
+ "ldr q5, [x28, #0x70]\n"
".inst 0x6e88a415 // ummla v21.4s, v0.16b, v8.16b\n"
+ "ldr q6, [x28, #0x80]\n"
".inst 0x6e88a45d // ummla v29.4s, v2.16b, v8.16b\n"
- "ldr q8, [x28, #0xa0]\n"
- "add x22, x22, #0x10\n"
+ "ldr q7, [x28, #0x90]\n"
".inst 0x6e89a412 // ummla v18.4s, v0.16b, v9.16b\n"
+ "ldr q8, [x28, #0xa0]\n"
".inst 0x6e89a45a // ummla v26.4s, v2.16b, v9.16b\n"
"ldr q9, [x28, #0xb0]\n"
- "add x21, x21, #0x10\n"
".inst 0x6e8aa416 // ummla v22.4s, v0.16b, v10.16b\n"
".inst 0x6e8aa45e // ummla v30.4s, v2.16b, v10.16b\n"
"ldr q10, [x28, #0xc0]\n"
@@ -1544,52 +1548,52 @@ void a64_hybrid_u8qa_mmla_4x16 (
".inst 0x6e8f942b // udot v11.4s, v1.16b, v15.16b\n"
".inst 0x6e8f946d // udot v13.4s, v3.16b, v15.16b\n"
"104:" // Height 4: Multiply loop: unique 13: skip row sum
- "ldr q1, [x24, #0x0]\n"
- "ldr q2, [x23, #0x0]\n"
- "sub x25, x25, #0x10\n"
- "cmp x25, #0x20\n"
- "ldr q3, [x22, #0x0]\n"
- "ldr q4, [x21, #0x0]\n"
- "ldr q5, [x28, #0x0]\n"
- "ldr q6, [x28, #0x10]\n"
- "ldr q7, [x28, #0x20]\n"
- "ldr q8, [x28, #0x30]\n"
- "ldr q9, [x28, #0x40]\n"
- "ldr q10, [x28, #0x50]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
"prfm pldl1keep, [x23, #0x80]\n"
+ "sub x24, x24, #0x10\n"
"prfm pldl1keep, [x22, #0x80]\n"
+ "cmp x24, #0x20\n"
"prfm pldl1keep, [x21, #0x80]\n"
+ "prfm pldl1keep, [x20, #0x80]\n"
+ "ldr q1, [x23, #0x0]\n"
+ "ldr q2, [x22, #0x0]\n"
"bge 103b\n"
"105:" // Height 4: Multiply loop: Single iteration only
"trn1 v0.2d, v1.2d, v2.2d\n"
+ "ldr q3, [x21, #0x0]\n"
+ "sub x24, x24, #0x10\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
- ".inst 0x6e85a410 // ummla v16.4s, v0.16b, v5.16b\n"
- "sub x25, x25, #0x10\n"
+ "ldr q4, [x20, #0x0]\n"
+ "add x23, x23, #0x10\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
+ "ldr q5, [x28, #0x0]\n"
+ "add x22, x22, #0x10\n"
"trn2 v3.2d, v3.2d, v4.2d\n"
- "ldr q4, [x28, #0x60]\n"
+ "ldr q6, [x28, #0x10]\n"
+ "add x21, x21, #0x10\n"
+ ".inst 0x6e85a410 // ummla v16.4s, v0.16b, v5.16b\n"
+ "ldr q7, [x28, #0x20]\n"
+ "add x20, x20, #0x10\n"
".inst 0x6e85a458 // ummla v24.4s, v2.16b, v5.16b\n"
- "ldr q5, [x28, #0x70]\n"
+ "ldr q8, [x28, #0x30]\n"
".inst 0x6e86a414 // ummla v20.4s, v0.16b, v6.16b\n"
+ "ldr q9, [x28, #0x40]\n"
".inst 0x6e86a45c // ummla v28.4s, v2.16b, v6.16b\n"
- "ldr q6, [x28, #0x80]\n"
+ "ldr q10, [x28, #0x50]\n"
+ "ldr q4, [x28, #0x60]\n"
".inst 0x6e87a411 // ummla v17.4s, v0.16b, v7.16b\n"
".inst 0x6e87a459 // ummla v25.4s, v2.16b, v7.16b\n"
- "ldr q7, [x28, #0x90]\n"
- "add x24, x24, #0x10\n"
+ "ldr q5, [x28, #0x70]\n"
".inst 0x6e88a415 // ummla v21.4s, v0.16b, v8.16b\n"
+ "ldr q6, [x28, #0x80]\n"
".inst 0x6e88a45d // ummla v29.4s, v2.16b, v8.16b\n"
- "ldr q8, [x28, #0xa0]\n"
- "add x23, x23, #0x10\n"
+ "ldr q7, [x28, #0x90]\n"
".inst 0x6e89a412 // ummla v18.4s, v0.16b, v9.16b\n"
+ "ldr q8, [x28, #0xa0]\n"
".inst 0x6e89a45a // ummla v26.4s, v2.16b, v9.16b\n"
"ldr q9, [x28, #0xb0]\n"
- "add x22, x22, #0x10\n"
".inst 0x6e8aa416 // ummla v22.4s, v0.16b, v10.16b\n"
".inst 0x6e8aa45e // ummla v30.4s, v2.16b, v10.16b\n"
"ldr q10, [x28, #0xc0]\n"
- "add x21, x21, #0x10\n"
".inst 0x6e84a413 // ummla v19.4s, v0.16b, v4.16b\n"
".inst 0x6e84a45b // ummla v27.4s, v2.16b, v4.16b\n"
"ldr q4, [x28, #0xd0]\n"
@@ -1620,44 +1624,44 @@ void a64_hybrid_u8qa_mmla_4x16 (
".inst 0x6e8f942b // udot v11.4s, v1.16b, v15.16b\n"
".inst 0x6e8f946d // udot v13.4s, v3.16b, v15.16b\n"
"106:" // Height 4: Multiply loop: unique 14: skip row sum
- "prfm pldl1keep, [x24, #0x80]\n"
"prfm pldl1keep, [x23, #0x80]\n"
"prfm pldl1keep, [x22, #0x80]\n"
"prfm pldl1keep, [x21, #0x80]\n"
+ "prfm pldl1keep, [x20, #0x80]\n"
"107:" // Height 4: Multiply loop: Main loop skip
- "cbz x25, 116f\n"
- "cmp x25, #0x8\n"
+ "cbz x24, 116f\n"
+ "cmp x24, #0x8\n"
"blt 110f\n"
"108:" // Height 4: Multiply loop: Odd block loop
- "ldr d1, [x24], #0x8\n"
- "ldr d2, [x23], #0x8\n"
+ "ldr d1, [x23], #0x8\n"
+ "ldr d2, [x22], #0x8\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
- "ldr d3, [x22], #0x8\n"
- "ldr d7, [x21], #0x8\n"
+ "ldr d3, [x21], #0x8\n"
+ "ldr d7, [x20], #0x8\n"
"trn1 v2.2d, v3.2d, v7.2d\n"
"tbnz %x[flags], #31, 109f\n"
".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n"
".inst 0x6e8f944d // udot v13.4s, v2.16b, v15.16b\n"
"109:" // Height 4: Multiply loop: unique 15: skip row sum
"ldr q8, [x28, #0x0]\n"
- "ldr q9, [x28, #0x10]\n"
".inst 0x6e88a410 // ummla v16.4s, v0.16b, v8.16b\n"
+ "ldr q9, [x28, #0x10]\n"
+ "sub x24, x24, #0x8\n"
".inst 0x6e88a458 // ummla v24.4s, v2.16b, v8.16b\n"
"ldr q10, [x28, #0x20]\n"
+ "cmp x24, #0x8\n"
+ ".inst 0x6e89a414 // ummla v20.4s, v0.16b, v9.16b\n"
"ldr q4, [x28, #0x30]\n"
- "sub x25, x25, #0x8\n"
- "cmp x25, #0x8\n"
+ ".inst 0x6e89a45c // ummla v28.4s, v2.16b, v9.16b\n"
"ldr q5, [x28, #0x40]\n"
+ ".inst 0x6e8aa411 // ummla v17.4s, v0.16b, v10.16b\n"
"ldr q6, [x28, #0x50]\n"
- ".inst 0x6e89a414 // ummla v20.4s, v0.16b, v9.16b\n"
- ".inst 0x6e89a45c // ummla v28.4s, v2.16b, v9.16b\n"
+ ".inst 0x6e8aa459 // ummla v25.4s, v2.16b, v10.16b\n"
"ldr q7, [x28, #0x60]\n"
"ldr q8, [x28, #0x70]\n"
- ".inst 0x6e8aa411 // ummla v17.4s, v0.16b, v10.16b\n"
- ".inst 0x6e8aa459 // ummla v25.4s, v2.16b, v10.16b\n"
".inst 0x6e84a415 // ummla v21.4s, v0.16b, v4.16b\n"
- ".inst 0x6e84a45d // ummla v29.4s, v2.16b, v4.16b\n"
"add x28, x28, #0x80\n"
+ ".inst 0x6e84a45d // ummla v29.4s, v2.16b, v4.16b\n"
".inst 0x6e85a412 // ummla v18.4s, v0.16b, v5.16b\n"
".inst 0x6e85a45a // ummla v26.4s, v2.16b, v5.16b\n"
".inst 0x6e86a416 // ummla v22.4s, v0.16b, v6.16b\n"
@@ -1667,48 +1671,48 @@ void a64_hybrid_u8qa_mmla_4x16 (
".inst 0x6e88a417 // ummla v23.4s, v0.16b, v8.16b\n"
".inst 0x6e88a45f // ummla v31.4s, v2.16b, v8.16b\n"
"bge 108b\n"
+ "cbz x24, 116f\n"
"110:" // Height 4: Multiply loop: Skip odd blocks
- "cbz x25, 116f\n"
- "tbz x25, #2, 112f\n"
- "ldr s1, [x24], #0x4\n"
- "ldr s2, [x23], #0x4\n"
- "ldr s3, [x22], #0x4\n"
- "ldr s9, [x21], #0x4\n"
- "tbz x25, #1, 111f\n"
- "ld1 { v1.h }[2], [x24], #0x2\n"
- "ld1 { v2.h }[2], [x23], #0x2\n"
- "ld1 { v3.h }[2], [x22], #0x2\n"
- "ld1 { v9.h }[2], [x21], #0x2\n"
- "tbz x25, #0, 114f\n"
- "ld1 { v1.b }[6], [x24]\n"
- "ld1 { v2.b }[6], [x23]\n"
- "ld1 { v3.b }[6], [x22]\n"
- "ld1 { v9.b }[6], [x21]\n"
+ "tbz x24, #2, 112f\n"
+ "ldr s1, [x23], #0x4\n"
+ "ldr s2, [x22], #0x4\n"
+ "ldr s3, [x21], #0x4\n"
+ "ldr s9, [x20], #0x4\n"
+ "tbz x24, #1, 111f\n"
+ "ld1 { v1.h }[2], [x23], #0x2\n"
+ "ld1 { v2.h }[2], [x22], #0x2\n"
+ "ld1 { v3.h }[2], [x21], #0x2\n"
+ "ld1 { v9.h }[2], [x20], #0x2\n"
+ "tbz x24, #0, 114f\n"
+ "ld1 { v1.b }[6], [x23]\n"
+ "ld1 { v2.b }[6], [x22]\n"
+ "ld1 { v3.b }[6], [x21]\n"
+ "ld1 { v9.b }[6], [x20]\n"
"b 114f\n"
"111:" // Height 4: Multiply loop: Ragged operand read: partial_1_4
- "tbz x25, #0, 114f\n"
- "ld1 { v1.b }[4], [x24]\n"
- "ld1 { v2.b }[4], [x23]\n"
- "ld1 { v3.b }[4], [x22]\n"
- "ld1 { v9.b }[4], [x21]\n"
+ "tbz x24, #0, 114f\n"
+ "ld1 { v1.b }[4], [x23]\n"
+ "ld1 { v2.b }[4], [x22]\n"
+ "ld1 { v3.b }[4], [x21]\n"
+ "ld1 { v9.b }[4], [x20]\n"
"b 114f\n"
"112:" // Height 4: Multiply loop: Ragged operand read: partial_2_0
- "tbz x25, #1, 113f\n"
- "ldr h1, [x24], #0x2\n"
- "ldr h2, [x23], #0x2\n"
- "ldr h3, [x22], #0x2\n"
- "ldr h9, [x21], #0x2\n"
- "tbz x25, #0, 114f\n"
- "ld1 { v1.b }[2], [x24]\n"
- "ld1 { v2.b }[2], [x23]\n"
- "ld1 { v3.b }[2], [x22]\n"
- "ld1 { v9.b }[2], [x21]\n"
+ "tbz x24, #1, 113f\n"
+ "ldr h1, [x23], #0x2\n"
+ "ldr h2, [x22], #0x2\n"
+ "ldr h3, [x21], #0x2\n"
+ "ldr h9, [x20], #0x2\n"
+ "tbz x24, #0, 114f\n"
+ "ld1 { v1.b }[2], [x23]\n"
+ "ld1 { v2.b }[2], [x22]\n"
+ "ld1 { v3.b }[2], [x21]\n"
+ "ld1 { v9.b }[2], [x20]\n"
"b 114f\n"
"113:" // Height 4: Multiply loop: Ragged operand read: partial_1_0
- "ldr b1, [x24, #0x0]\n"
- "ldr b2, [x23, #0x0]\n"
- "ldr b3, [x22, #0x0]\n"
- "ldr b9, [x21, #0x0]\n"
+ "ldr b1, [x23, #0x0]\n"
+ "ldr b2, [x22, #0x0]\n"
+ "ldr b3, [x21, #0x0]\n"
+ "ldr b9, [x20, #0x0]\n"
"114:" // Height 4: Multiply loop: Ragged operand read: Done
"trn1 v0.2d, v1.2d, v2.2d\n"
"trn1 v2.2d, v3.2d, v9.2d\n"
@@ -1717,24 +1721,24 @@ void a64_hybrid_u8qa_mmla_4x16 (
".inst 0x6e8f944d // udot v13.4s, v2.16b, v15.16b\n"
"115:" // Height 4: Multiply loop: unique 16: skip row sum
"ldr q10, [x28, #0x0]\n"
- "ldr q4, [x28, #0x10]\n"
".inst 0x6e8aa410 // ummla v16.4s, v0.16b, v10.16b\n"
+ "ldr q4, [x28, #0x10]\n"
".inst 0x6e8aa458 // ummla v24.4s, v2.16b, v10.16b\n"
"ldr q5, [x28, #0x20]\n"
"ldr q6, [x28, #0x30]\n"
".inst 0x6e84a414 // ummla v20.4s, v0.16b, v4.16b\n"
- ".inst 0x6e84a45c // ummla v28.4s, v2.16b, v4.16b\n"
"ldr q7, [x28, #0x40]\n"
+ ".inst 0x6e84a45c // ummla v28.4s, v2.16b, v4.16b\n"
"ldr q8, [x28, #0x50]\n"
".inst 0x6e85a411 // ummla v17.4s, v0.16b, v5.16b\n"
- ".inst 0x6e85a459 // ummla v25.4s, v2.16b, v5.16b\n"
"ldr q9, [x28, #0x60]\n"
+ ".inst 0x6e85a459 // ummla v25.4s, v2.16b, v5.16b\n"
"ldr q10, [x28, #0x70]\n"
+ "add x28, x28, #0x80\n"
".inst 0x6e86a415 // ummla v21.4s, v0.16b, v6.16b\n"
".inst 0x6e86a45d // ummla v29.4s, v2.16b, v6.16b\n"
".inst 0x6e87a412 // ummla v18.4s, v0.16b, v7.16b\n"
".inst 0x6e87a45a // ummla v26.4s, v2.16b, v7.16b\n"
- "add x28, x28, #0x80\n"
".inst 0x6e88a416 // ummla v22.4s, v0.16b, v8.16b\n"
".inst 0x6e88a45e // ummla v30.4s, v2.16b, v8.16b\n"
".inst 0x6e89a413 // ummla v19.4s, v0.16b, v9.16b\n"
@@ -1742,25 +1746,25 @@ void a64_hybrid_u8qa_mmla_4x16 (
".inst 0x6e8aa417 // ummla v23.4s, v0.16b, v10.16b\n"
".inst 0x6e8aa45f // ummla v31.4s, v2.16b, v10.16b\n"
"116:" // Height 4: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x26, x26, #0x1\n"
- "cmp x26, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x25, x25, #0x1\n"
+ "cmp x25, x19\n"
"bne 100b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp1 v4.2d, v16.2d, v20.2d\n"
- "add x22, x27, x20\n"
- "add x21, x22, x20\n"
- "add x20, x21, x20\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp2 v16.2d, v16.2d, v20.2d\n"
+ "prfm pstl1keep, [x26, #0x0]\n"
+ "add x21, x26, x19\n"
"uzp1 v20.2d, v17.2d, v21.2d\n"
- "prfm pstl1keep, [x27, #0x0]\n"
+ "prfm pstl1keep, [x21, #0x0]\n"
"uzp2 v17.2d, v17.2d, v21.2d\n"
+ "add x20, x21, x19\n"
"uzp1 v21.2d, v18.2d, v22.2d\n"
- "prfm pstl1keep, [x22, #0x0]\n"
- "prfm pstl1keep, [x21, #0x0]\n"
+ "prfm pstl1keep, [x20, #0x0]\n"
+ "add x19, x20, x19\n"
"uzp2 v18.2d, v18.2d, v22.2d\n"
+ "prfm pstl1keep, [x19, #0x0]\n"
"uzp1 v22.2d, v19.2d, v23.2d\n"
- "prfm pstl1keep, [x20, #0x0]\n"
"uzp2 v19.2d, v19.2d, v23.2d\n"
"uzp1 v23.2d, v24.2d, v28.2d\n"
"uzp2 v24.2d, v24.2d, v28.2d\n"
@@ -1772,13 +1776,13 @@ void a64_hybrid_u8qa_mmla_4x16 (
"uzp2 v27.2d, v27.2d, v31.2d\n"
"mov v31.16b, v4.16b\n"
"tbnz %x[flags], #31, 117f\n"
- "add x23, %x[qp], %[b_offset]\n"
- "ld1r { v4.4s }, [x23]\n"
"addp v11.4s, v11.4s, v11.4s\n"
+ "add x22, %x[qp], %[b_offset]\n"
+ "ld1r { v4.4s }, [x22]\n"
"addp v13.4s, v13.4s, v13.4s\n"
- "neg v4.4s, v4.4s\n"
"dup v12.4s, v11.s[3]\n"
"dup v11.4s, v11.s[0]\n"
+ "neg v4.4s, v4.4s\n"
"dup v14.4s, v13.s[3]\n"
"dup v13.4s, v13.s[0]\n"
"mul v11.4s, v11.4s, v4.4s\n"
@@ -1786,25 +1790,25 @@ void a64_hybrid_u8qa_mmla_4x16 (
"mul v13.4s, v13.4s, v4.4s\n"
"mul v14.4s, v14.4s, v4.4s\n"
"117:" // Height 4: skip row sum fixup
- "ldr q0, [x10, #0x0]\n"
- "ldr q1, [x10, #0x10]\n"
"add v31.4s, v31.4s, v11.4s\n"
+ "ldr q0, [x27, #0x0]\n"
+ "orr %x[flags], %x[flags], #0x80000000\n"
"add v20.4s, v20.4s, v11.4s\n"
- "ldr q2, [x10, #0x20]\n"
- "ldr q3, [x10, #0x30]\n"
+ "ldr q1, [x27, #0x10]\n"
+ "add x23, %x[qp], %[per_layer_right_shift]\n"
"add v21.4s, v21.4s, v11.4s\n"
+ "ldr q2, [x27, #0x20]\n"
+ "add x22, %x[qp], %[per_layer_mul]\n"
"add v22.4s, v22.4s, v11.4s\n"
+ "ldr q3, [x27, #0x30]\n"
+ "add x27, x27, #0x40\n"
"add v16.4s, v16.4s, v12.4s\n"
+ "ld1r { v4.4s }, [x22]\n"
"add v17.4s, v17.4s, v12.4s\n"
- "add x23, %x[qp], %[per_layer_mul]\n"
- "ld1r { v4.4s }, [x23]\n"
"add v18.4s, v18.4s, v12.4s\n"
"add v19.4s, v19.4s, v12.4s\n"
- "orr %x[flags], %x[flags], #0x80000000\n"
- "add x23, %x[qp], %[per_layer_right_shift]\n"
"add v23.4s, v23.4s, v13.4s\n"
"add v28.4s, v28.4s, v13.4s\n"
- "add x10, x10, #0x40\n"
"add v29.4s, v29.4s, v13.4s\n"
"add v30.4s, v30.4s, v13.4s\n"
"add v24.4s, v24.4s, v14.4s\n"
@@ -1847,126 +1851,126 @@ void a64_hybrid_u8qa_mmla_4x16 (
"tbz %x[flags], #5, 118f\n"
"and v4.16b, v31.16b, v0.16b\n"
"and v5.16b, v20.16b, v0.16b\n"
+ "and v6.16b, v21.16b, v0.16b\n"
"sshr v4.4s, v4.4s, #0x1f\n"
"sshr v5.4s, v5.4s, #0x1f\n"
+ "sshr v6.4s, v6.4s, #0x1f\n"
"sqadd v31.4s, v31.4s, v4.4s\n"
"sqadd v20.4s, v20.4s, v5.4s\n"
- "and v6.16b, v21.16b, v0.16b\n"
+ "sqadd v21.4s, v21.4s, v6.4s\n"
"and v7.16b, v22.16b, v0.16b\n"
"and v8.16b, v16.16b, v0.16b\n"
"and v9.16b, v17.16b, v0.16b\n"
- "and v10.16b, v18.16b, v0.16b\n"
- "and v4.16b, v19.16b, v0.16b\n"
- "and v5.16b, v23.16b, v0.16b\n"
- "sshr v6.4s, v6.4s, #0x1f\n"
"sshr v7.4s, v7.4s, #0x1f\n"
"sshr v8.4s, v8.4s, #0x1f\n"
"sshr v9.4s, v9.4s, #0x1f\n"
- "sshr v10.4s, v10.4s, #0x1f\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
- "sshr v5.4s, v5.4s, #0x1f\n"
- "sqadd v21.4s, v21.4s, v6.4s\n"
"sqadd v22.4s, v22.4s, v7.4s\n"
"sqadd v16.4s, v16.4s, v8.4s\n"
"sqadd v17.4s, v17.4s, v9.4s\n"
+ "and v10.16b, v18.16b, v0.16b\n"
+ "and v4.16b, v19.16b, v0.16b\n"
+ "and v5.16b, v23.16b, v0.16b\n"
+ "sshr v10.4s, v10.4s, #0x1f\n"
+ "sshr v4.4s, v4.4s, #0x1f\n"
+ "sshr v5.4s, v5.4s, #0x1f\n"
"sqadd v18.4s, v18.4s, v10.4s\n"
"sqadd v19.4s, v19.4s, v4.4s\n"
"sqadd v23.4s, v23.4s, v5.4s\n"
"and v6.16b, v28.16b, v0.16b\n"
"and v7.16b, v29.16b, v0.16b\n"
"and v8.16b, v30.16b, v0.16b\n"
- "and v9.16b, v24.16b, v0.16b\n"
- "and v10.16b, v25.16b, v0.16b\n"
- "and v4.16b, v26.16b, v0.16b\n"
- "and v5.16b, v27.16b, v0.16b\n"
"sshr v6.4s, v6.4s, #0x1f\n"
"sshr v7.4s, v7.4s, #0x1f\n"
"sshr v8.4s, v8.4s, #0x1f\n"
- "sshr v9.4s, v9.4s, #0x1f\n"
- "sshr v10.4s, v10.4s, #0x1f\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
- "sshr v5.4s, v5.4s, #0x1f\n"
"sqadd v28.4s, v28.4s, v6.4s\n"
"sqadd v29.4s, v29.4s, v7.4s\n"
"sqadd v30.4s, v30.4s, v8.4s\n"
+ "and v9.16b, v24.16b, v0.16b\n"
+ "and v10.16b, v25.16b, v0.16b\n"
+ "and v4.16b, v26.16b, v0.16b\n"
+ "sshr v9.4s, v9.4s, #0x1f\n"
+ "sshr v10.4s, v10.4s, #0x1f\n"
+ "sshr v4.4s, v4.4s, #0x1f\n"
"sqadd v24.4s, v24.4s, v9.4s\n"
"sqadd v25.4s, v25.4s, v10.4s\n"
"sqadd v26.4s, v26.4s, v4.4s\n"
+ "and v5.16b, v27.16b, v0.16b\n"
+ "sshr v5.4s, v5.4s, #0x1f\n"
"sqadd v27.4s, v27.4s, v5.4s\n"
"118:" // Height 4: no shift correction
- "add x23, %x[qp], %[c_offset]\n"
- "ld1r { v4.4s }, [x23]\n"
"srshl v31.4s, v31.4s, v0.4s\n"
+ "add x22, %x[qp], %[c_offset]\n"
+ "ld1r { v4.4s }, [x22]\n"
"srshl v20.4s, v20.4s, v0.4s\n"
+ "add x22, %x[qp], %[minval]\n"
"srshl v21.4s, v21.4s, v0.4s\n"
+ "ld1r { v5.4s }, [x22]\n"
+ "add x22, %x[qp], %[maxval]\n"
"srshl v22.4s, v22.4s, v0.4s\n"
- "add x23, %x[qp], %[maxval]\n"
- "ld1r { v6.4s }, [x23]\n"
+ "ld1r { v6.4s }, [x22]\n"
+ "cmp x9, #0x10\n"
"srshl v16.4s, v16.4s, v0.4s\n"
"srshl v17.4s, v17.4s, v0.4s\n"
- "add x23, %x[qp], %[minval]\n"
- "ld1r { v5.4s }, [x23]\n"
- "srshl v18.4s, v18.4s, v0.4s\n"
- "srshl v19.4s, v19.4s, v0.4s\n"
- "cmp x9, #0x10\n"
- "srshl v23.4s, v23.4s, v0.4s\n"
- "srshl v28.4s, v28.4s, v0.4s\n"
- "srshl v29.4s, v29.4s, v0.4s\n"
- "srshl v30.4s, v30.4s, v0.4s\n"
- "srshl v24.4s, v24.4s, v0.4s\n"
- "srshl v25.4s, v25.4s, v0.4s\n"
- "srshl v26.4s, v26.4s, v0.4s\n"
- "srshl v27.4s, v27.4s, v0.4s\n"
"add v31.4s, v31.4s, v4.4s\n"
"add v20.4s, v20.4s, v4.4s\n"
"add v21.4s, v21.4s, v4.4s\n"
- "add v22.4s, v22.4s, v4.4s\n"
- "add v16.4s, v16.4s, v4.4s\n"
- "add v17.4s, v17.4s, v4.4s\n"
- "add v18.4s, v18.4s, v4.4s\n"
- "add v19.4s, v19.4s, v4.4s\n"
- "add v23.4s, v23.4s, v4.4s\n"
- "add v28.4s, v28.4s, v4.4s\n"
- "add v29.4s, v29.4s, v4.4s\n"
- "add v30.4s, v30.4s, v4.4s\n"
- "add v24.4s, v24.4s, v4.4s\n"
- "add v25.4s, v25.4s, v4.4s\n"
- "add v26.4s, v26.4s, v4.4s\n"
- "add v27.4s, v27.4s, v4.4s\n"
"smin v31.4s, v31.4s, v6.4s\n"
"smin v20.4s, v20.4s, v6.4s\n"
"smin v21.4s, v21.4s, v6.4s\n"
- "smin v22.4s, v22.4s, v6.4s\n"
- "smin v16.4s, v16.4s, v6.4s\n"
- "smin v17.4s, v17.4s, v6.4s\n"
- "smin v18.4s, v18.4s, v6.4s\n"
- "smin v19.4s, v19.4s, v6.4s\n"
- "smin v23.4s, v23.4s, v6.4s\n"
- "smin v28.4s, v28.4s, v6.4s\n"
- "smin v29.4s, v29.4s, v6.4s\n"
- "smin v30.4s, v30.4s, v6.4s\n"
- "smin v24.4s, v24.4s, v6.4s\n"
- "smin v25.4s, v25.4s, v6.4s\n"
- "smin v26.4s, v26.4s, v6.4s\n"
- "smin v27.4s, v27.4s, v6.4s\n"
"smax v31.4s, v31.4s, v5.4s\n"
"smax v20.4s, v20.4s, v5.4s\n"
"smax v21.4s, v21.4s, v5.4s\n"
+ "add v22.4s, v22.4s, v4.4s\n"
+ "add v16.4s, v16.4s, v4.4s\n"
+ "add v17.4s, v17.4s, v4.4s\n"
+ "smin v22.4s, v22.4s, v6.4s\n"
+ "smin v16.4s, v16.4s, v6.4s\n"
+ "smin v17.4s, v17.4s, v6.4s\n"
"smax v22.4s, v22.4s, v5.4s\n"
"smax v16.4s, v16.4s, v5.4s\n"
"smax v17.4s, v17.4s, v5.4s\n"
+ "srshl v18.4s, v18.4s, v0.4s\n"
+ "srshl v19.4s, v19.4s, v0.4s\n"
+ "srshl v23.4s, v23.4s, v0.4s\n"
+ "srshl v28.4s, v28.4s, v0.4s\n"
+ "add v18.4s, v18.4s, v4.4s\n"
+ "add v19.4s, v19.4s, v4.4s\n"
+ "add v23.4s, v23.4s, v4.4s\n"
+ "smin v18.4s, v18.4s, v6.4s\n"
+ "smin v19.4s, v19.4s, v6.4s\n"
+ "smin v23.4s, v23.4s, v6.4s\n"
"smax v18.4s, v18.4s, v5.4s\n"
"smax v19.4s, v19.4s, v5.4s\n"
"smax v23.4s, v23.4s, v5.4s\n"
+ "add v28.4s, v28.4s, v4.4s\n"
+ "srshl v29.4s, v29.4s, v0.4s\n"
+ "srshl v30.4s, v30.4s, v0.4s\n"
+ "smin v28.4s, v28.4s, v6.4s\n"
+ "srshl v24.4s, v24.4s, v0.4s\n"
+ "add v29.4s, v29.4s, v4.4s\n"
"smax v28.4s, v28.4s, v5.4s\n"
+ "add v30.4s, v30.4s, v4.4s\n"
+ "smin v29.4s, v29.4s, v6.4s\n"
+ "add v24.4s, v24.4s, v4.4s\n"
+ "smin v30.4s, v30.4s, v6.4s\n"
"smax v29.4s, v29.4s, v5.4s\n"
+ "smin v24.4s, v24.4s, v6.4s\n"
"smax v30.4s, v30.4s, v5.4s\n"
+ "srshl v25.4s, v25.4s, v0.4s\n"
"smax v24.4s, v24.4s, v5.4s\n"
+ "srshl v26.4s, v26.4s, v0.4s\n"
+ "srshl v27.4s, v27.4s, v0.4s\n"
+ "add v25.4s, v25.4s, v4.4s\n"
+ "uzp1 v31.8h, v31.8h, v20.8h\n"
+ "add v26.4s, v26.4s, v4.4s\n"
+ "smin v25.4s, v25.4s, v6.4s\n"
+ "add v27.4s, v27.4s, v4.4s\n"
+ "smin v26.4s, v26.4s, v6.4s\n"
"smax v25.4s, v25.4s, v5.4s\n"
+ "smin v27.4s, v27.4s, v6.4s\n"
"smax v26.4s, v26.4s, v5.4s\n"
- "smax v27.4s, v27.4s, v5.4s\n"
- "uzp1 v31.8h, v31.8h, v20.8h\n"
"uzp1 v20.8h, v21.8h, v22.8h\n"
+ "smax v27.4s, v27.4s, v5.4s\n"
"uzp1 v16.8h, v16.8h, v17.8h\n"
"uzp1 v17.8h, v18.8h, v19.8h\n"
"uzp1 v23.8h, v23.8h, v28.8h\n"
@@ -1979,120 +1983,120 @@ void a64_hybrid_u8qa_mmla_4x16 (
"uzp1 v24.16b, v24.16b, v25.16b\n"
"bge 127f\n"
"tbz x9, #3, 122f\n"
- "str d31, [x27], #0x8\n"
- "str d16, [x22], #0x8\n"
- "str d23, [x21], #0x8\n"
- "str d24, [x20], #0x8\n"
+ "str d31, [x26], #0x8\n"
+ "str d16, [x21], #0x8\n"
+ "str d23, [x20], #0x8\n"
+ "str d24, [x19], #0x8\n"
"tbz x9, #2, 120f\n"
- "st1 { v31.s }[2], [x27], #0x4\n"
- "st1 { v16.s }[2], [x22], #0x4\n"
- "st1 { v23.s }[2], [x21], #0x4\n"
- "st1 { v24.s }[2], [x20], #0x4\n"
+ "st1 { v31.s }[2], [x26], #0x4\n"
+ "st1 { v16.s }[2], [x21], #0x4\n"
+ "st1 { v23.s }[2], [x20], #0x4\n"
+ "st1 { v24.s }[2], [x19], #0x4\n"
"tbz x9, #1, 119f\n"
- "st1 { v31.h }[6], [x27], #0x2\n"
- "st1 { v16.h }[6], [x22], #0x2\n"
- "st1 { v23.h }[6], [x21], #0x2\n"
- "st1 { v24.h }[6], [x20], #0x2\n"
+ "st1 { v31.h }[6], [x26], #0x2\n"
+ "st1 { v16.h }[6], [x21], #0x2\n"
+ "st1 { v23.h }[6], [x20], #0x2\n"
+ "st1 { v24.h }[6], [x19], #0x2\n"
"tbz x9, #0, 126f\n"
- "st1 { v31.b }[14], [x27]\n"
- "st1 { v16.b }[14], [x22]\n"
- "st1 { v23.b }[14], [x21]\n"
- "st1 { v24.b }[14], [x20]\n"
+ "st1 { v31.b }[14], [x26]\n"
+ "st1 { v16.b }[14], [x21]\n"
+ "st1 { v23.b }[14], [x20]\n"
+ "st1 { v24.b }[14], [x19]\n"
"b 126f\n"
"119:" // Height 4: Partial direct writeback: partial_1_12
"tbz x9, #0, 126f\n"
- "st1 { v31.b }[12], [x27]\n"
- "st1 { v16.b }[12], [x22]\n"
- "st1 { v23.b }[12], [x21]\n"
- "st1 { v24.b }[12], [x20]\n"
+ "st1 { v31.b }[12], [x26]\n"
+ "st1 { v16.b }[12], [x21]\n"
+ "st1 { v23.b }[12], [x20]\n"
+ "st1 { v24.b }[12], [x19]\n"
"b 126f\n"
"120:" // Height 4: Partial direct writeback: partial_2_8
"tbz x9, #1, 121f\n"
- "st1 { v31.h }[4], [x27], #0x2\n"
- "st1 { v16.h }[4], [x22], #0x2\n"
- "st1 { v23.h }[4], [x21], #0x2\n"
- "st1 { v24.h }[4], [x20], #0x2\n"
+ "st1 { v31.h }[4], [x26], #0x2\n"
+ "st1 { v16.h }[4], [x21], #0x2\n"
+ "st1 { v23.h }[4], [x20], #0x2\n"
+ "st1 { v24.h }[4], [x19], #0x2\n"
"tbz x9, #0, 126f\n"
- "st1 { v31.b }[10], [x27]\n"
- "st1 { v16.b }[10], [x22]\n"
- "st1 { v23.b }[10], [x21]\n"
- "st1 { v24.b }[10], [x20]\n"
+ "st1 { v31.b }[10], [x26]\n"
+ "st1 { v16.b }[10], [x21]\n"
+ "st1 { v23.b }[10], [x20]\n"
+ "st1 { v24.b }[10], [x19]\n"
"b 126f\n"
"121:" // Height 4: Partial direct writeback: partial_1_8
"tbz x9, #0, 126f\n"
- "st1 { v31.b }[8], [x27]\n"
- "st1 { v16.b }[8], [x22]\n"
- "st1 { v23.b }[8], [x21]\n"
- "st1 { v24.b }[8], [x20]\n"
+ "st1 { v31.b }[8], [x26]\n"
+ "st1 { v16.b }[8], [x21]\n"
+ "st1 { v23.b }[8], [x20]\n"
+ "st1 { v24.b }[8], [x19]\n"
"b 126f\n"
"122:" // Height 4: Partial direct writeback: partial_4_0
"tbz x9, #2, 124f\n"
- "str s31, [x27], #0x4\n"
- "str s16, [x22], #0x4\n"
- "str s23, [x21], #0x4\n"
- "str s24, [x20], #0x4\n"
+ "str s31, [x26], #0x4\n"
+ "str s16, [x21], #0x4\n"
+ "str s23, [x20], #0x4\n"
+ "str s24, [x19], #0x4\n"
"tbz x9, #1, 123f\n"
- "st1 { v31.h }[2], [x27], #0x2\n"
- "st1 { v16.h }[2], [x22], #0x2\n"
- "st1 { v23.h }[2], [x21], #0x2\n"
- "st1 { v24.h }[2], [x20], #0x2\n"
+ "st1 { v31.h }[2], [x26], #0x2\n"
+ "st1 { v16.h }[2], [x21], #0x2\n"
+ "st1 { v23.h }[2], [x20], #0x2\n"
+ "st1 { v24.h }[2], [x19], #0x2\n"
"tbz x9, #0, 126f\n"
- "st1 { v31.b }[6], [x27]\n"
- "st1 { v16.b }[6], [x22]\n"
- "st1 { v23.b }[6], [x21]\n"
- "st1 { v24.b }[6], [x20]\n"
+ "st1 { v31.b }[6], [x26]\n"
+ "st1 { v16.b }[6], [x21]\n"
+ "st1 { v23.b }[6], [x20]\n"
+ "st1 { v24.b }[6], [x19]\n"
"b 126f\n"
"123:" // Height 4: Partial direct writeback: partial_1_4
"tbz x9, #0, 126f\n"
- "st1 { v31.b }[4], [x27]\n"
- "st1 { v16.b }[4], [x22]\n"
- "st1 { v23.b }[4], [x21]\n"
- "st1 { v24.b }[4], [x20]\n"
+ "st1 { v31.b }[4], [x26]\n"
+ "st1 { v16.b }[4], [x21]\n"
+ "st1 { v23.b }[4], [x20]\n"
+ "st1 { v24.b }[4], [x19]\n"
"b 126f\n"
"124:" // Height 4: Partial direct writeback: partial_2_0
"tbz x9, #1, 125f\n"
- "str h31, [x27], #0x2\n"
- "str h16, [x22], #0x2\n"
- "str h23, [x21], #0x2\n"
- "str h24, [x20], #0x2\n"
+ "str h31, [x26], #0x2\n"
+ "str h16, [x21], #0x2\n"
+ "str h23, [x20], #0x2\n"
+ "str h24, [x19], #0x2\n"
"tbz x9, #0, 126f\n"
- "st1 { v31.b }[2], [x27]\n"
- "st1 { v16.b }[2], [x22]\n"
- "st1 { v23.b }[2], [x21]\n"
- "st1 { v24.b }[2], [x20]\n"
+ "st1 { v31.b }[2], [x26]\n"
+ "st1 { v16.b }[2], [x21]\n"
+ "st1 { v23.b }[2], [x20]\n"
+ "st1 { v24.b }[2], [x19]\n"
"b 126f\n"
"125:" // Height 4: Partial direct writeback: partial_1_0
- "str b31, [x27, #0x0]\n"
- "str b16, [x22, #0x0]\n"
- "str b23, [x21, #0x0]\n"
- "str b24, [x20, #0x0]\n"
+ "str b31, [x26, #0x0]\n"
+ "str b16, [x21, #0x0]\n"
+ "str b23, [x20, #0x0]\n"
+ "str b24, [x19, #0x0]\n"
"126:" // Height 4: Partial direct writeback: Done
"b 128f\n"
"127:" // Height 4: Full writeback
- "str q31, [x27, #0x0]\n"
- "add x27, x27, #0x10\n"
- "str q16, [x22, #0x0]\n"
- "str q23, [x21, #0x0]\n"
- "str q24, [x20, #0x0]\n"
+ "str q31, [x26, #0x0]\n"
+ "add x26, x26, #0x10\n"
+ "str q16, [x21, #0x0]\n"
+ "str q23, [x20, #0x0]\n"
+ "str q24, [x19, #0x0]\n"
"128:" // Height 4: Writeback done
"subs x9, x9, #0x10\n"
"bgt 98b\n"
"subs %x[M], %x[M], #0x4\n"
"beq 130f\n"
- "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 129f\n"
- "add x21, x21, #0x4\n"
- "str x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "add x20, x20, #0x4\n"
+ "str x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"b 1b\n"
"129:" // Update direct input
- "mov x20, #0x4\n"
- "madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
+ "mov x19, #0x4\n"
+ "madd %x[input_ptr], x19, x20, %x[input_ptr]\n"
"b 1b\n"
"130:" // Exit
: [M] "+&r" (M), [flags] "+&r" (flags), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
: [args_ptr] "r" (&ka), [b_offset] "I" (offsetof(Requantize32, b_offset)), [c_offset] "I" (offsetof(Requantize32, c_offset)), [col_bias] "r" (col_bias), [maxval] "I" (offsetof(Requantize32, maxval)), [minval] "I" (offsetof(Requantize32, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths)), [per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [qp] "r" (qp)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8u32_dot_6x16/a55.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8u32_dot_6x16/a55.cpp
index 705f6525b6..8833651768 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8u32_dot_6x16/a55.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8u32_dot_6x16/a55.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef __aarch64__
@@ -87,73 +87,73 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
"cmp %x[M], #0x2\n"
"bgt 69f\n"
"beq 35f\n"
- "ldr x8, [%x[args_ptr], %[offsetof_N]]\n"
- "mov x17, %x[output_ptr]\n"
+ "ldr x17, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x16, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x15, %x[output_ptr]\n"
"2:" // Height 1: Column loop
"tbz %x[flags], #0, 12f\n"
- "cmp x8, #0x10\n"
+ "cmp x17, #0x10\n"
"bge 11f\n"
- "tbz x8, #3, 6f\n"
- "ld1 { v8.4s }, [x17], #0x10\n"
- "ld1 { v9.4s }, [x17], #0x10\n"
- "tbz x8, #2, 4f\n"
- "ld1 { v10.4s }, [x17], #0x10\n"
- "tbz x8, #1, 3f\n"
- "ldr d11, [x17], #0x8\n"
- "mov x25, #0x38\n"
- "tbz x8, #0, 10f\n"
- "ld1 { v11.s }[2], [x17]\n"
+ "tbz x17, #3, 6f\n"
+ "ld1 { v8.4s }, [x15], #0x10\n"
+ "ld1 { v9.4s }, [x15], #0x10\n"
+ "tbz x17, #2, 4f\n"
+ "ld1 { v10.4s }, [x15], #0x10\n"
+ "tbz x17, #1, 3f\n"
+ "mov x24, #0x38\n"
+ "ldr d11, [x15], #0x8\n"
+ "tbz x17, #0, 10f\n"
+ "ld1 { v11.s }[2], [x15]\n"
"b 10f\n"
"3:" // Height 1: Partial accumulate: partial_1_12
- "mov x25, #0x30\n"
- "tbz x8, #0, 10f\n"
- "ldr s11, [x17, #0x0]\n"
+ "mov x24, #0x30\n"
+ "tbz x17, #0, 10f\n"
+ "ldr s11, [x15, #0x0]\n"
"b 10f\n"
"4:" // Height 1: Partial accumulate: partial_2_8
- "tbz x8, #1, 5f\n"
- "ldr d10, [x17], #0x8\n"
- "mov x25, #0x28\n"
- "tbz x8, #0, 10f\n"
- "ld1 { v10.s }[2], [x17]\n"
+ "tbz x17, #1, 5f\n"
+ "ldr d10, [x15], #0x8\n"
+ "mov x24, #0x28\n"
+ "tbz x17, #0, 10f\n"
+ "ld1 { v10.s }[2], [x15]\n"
"b 10f\n"
"5:" // Height 1: Partial accumulate: partial_1_8
- "mov x25, #0x20\n"
- "tbz x8, #0, 10f\n"
- "ldr s10, [x17, #0x0]\n"
+ "mov x24, #0x20\n"
+ "tbz x17, #0, 10f\n"
+ "ldr s10, [x15, #0x0]\n"
"b 10f\n"
"6:" // Height 1: Partial accumulate: partial_4_0
- "tbz x8, #2, 8f\n"
- "ld1 { v8.4s }, [x17], #0x10\n"
- "tbz x8, #1, 7f\n"
- "ldr d9, [x17], #0x8\n"
- "mov x25, #0x18\n"
- "tbz x8, #0, 10f\n"
- "ld1 { v9.s }[2], [x17]\n"
+ "tbz x17, #2, 8f\n"
+ "ld1 { v8.4s }, [x15], #0x10\n"
+ "tbz x17, #1, 7f\n"
+ "mov x24, #0x18\n"
+ "ldr d9, [x15], #0x8\n"
+ "tbz x17, #0, 10f\n"
+ "ld1 { v9.s }[2], [x15]\n"
"b 10f\n"
"7:" // Height 1: Partial accumulate: partial_1_4
- "mov x25, #0x10\n"
- "tbz x8, #0, 10f\n"
- "ldr s9, [x17, #0x0]\n"
+ "mov x24, #0x10\n"
+ "tbz x17, #0, 10f\n"
+ "ldr s9, [x15, #0x0]\n"
"b 10f\n"
"8:" // Height 1: Partial accumulate: partial_2_0
- "tbz x8, #1, 9f\n"
- "ldr d8, [x17], #0x8\n"
- "mov x25, #0x8\n"
- "tbz x8, #0, 10f\n"
- "ld1 { v8.s }[2], [x17]\n"
+ "tbz x17, #1, 9f\n"
+ "ldr d8, [x15], #0x8\n"
+ "mov x24, #0x8\n"
+ "tbz x17, #0, 10f\n"
+ "ld1 { v8.s }[2], [x15]\n"
"b 10f\n"
"9:" // Height 1: Partial accumulate: partial_1_0
- "ldr s8, [x17, #0x0]\n"
- "mov x25, #0x0\n"
+ "ldr s8, [x15, #0x0]\n"
+ "mov x24, #0x0\n"
"10:" // Height 1: Partial accumulate: Done
- "sub x17, x17, x25\n"
+ "sub x15, x15, x24\n"
"b 13f\n"
"11:" // Height 1: full accumulate
- "ldr q8, [x17, #0x0]\n"
- "ldr q9, [x17, #0x10]\n"
- "ldr q10, [x17, #0x20]\n"
- "ldr q11, [x17, #0x30]\n"
+ "ldr q8, [x15, #0x0]\n"
+ "ldr q9, [x15, #0x10]\n"
+ "ldr q10, [x15, #0x20]\n"
+ "ldr q11, [x15, #0x30]\n"
"b 13f\n"
"12:" // Height 1: no accumulate
"movi v8.4s, #0x0\n"
@@ -161,109 +161,112 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
"movi v10.4s, #0x0\n"
"movi v11.4s, #0x0\n"
"13:" // Height 1: setup done
- "mov x15, #0x0\n"
+ "mov x14, #0x0\n"
"14:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w14, [x20, x15, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w13, [x20, x14, LSL #0x2]\n"
"tbz %x[flags], #3, 15f\n"
- "ldr x21, [%x[input_ptr], x15, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x13, [x21, #0x0]\n"
- "cbnz x15, 16f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x13, x13, x20\n"
+ "ldr x20, [%x[input_ptr], x14, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x12, [x20, #0x0]\n"
+ "cbnz x14, 16f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x12, x12, x19\n"
"b 16f\n"
"15:" // Height 1: setup direct input
- "mov x13, %x[input_ptr]\n"
+ "mov x12, %x[input_ptr]\n"
"16:" // Height 1: input setup done
- "cmp x14, #0x10\n"
+ "cmp x13, #0x10\n"
"blt 19f\n"
- "ldr q0, [x13, #0x0]\n"
- "cmp x14, #0x20\n"
+ "ldr q0, [x12, #0x0]\n"
"ldr q6, [x16, #0x0]\n"
- "ldr q7, [x16, #0x10]\n"
+ "cmp x13, #0x20\n"
"blt 18f\n"
"17:" // Height 1: Multiply loop: Main loop head
".inst 0x6f80e0c8 // udot v8.4s, v6.16b, v0.4b[0]\n"
+ "ldr d7, [x16, #0x10]\n"
+ "ldr x11, [x16, #0x18]\n"
+ "add x12, x12, #0x10\n"
"ldr d6, [x16, #0x20]\n"
- "ldr x12, [x16, #0x28]\n"
+ "sub x13, x13, #0x10\n"
+ "ldr x10, [x16, #0x28]\n"
+ "cmp x13, #0x20\n"
+ "mov v7.d[1], x11\n"
+ "prfm pldl1keep, [x12, #0x80]\n"
+ "ldr x11, [x16, #0x38]\n"
".inst 0x6f80e0e9 // udot v9.4s, v7.16b, v0.4b[0]\n"
+ "mov v6.d[1], x10\n"
"ldr d7, [x16, #0x30]\n"
- "mov v6.d[1], x12\n"
- "ldr x11, [x16, #0x38]\n"
- "mov v7.d[1], x11\n"
".inst 0x6f80e0ca // udot v10.4s, v6.16b, v0.4b[0]\n"
"ldr d6, [x16, #0x40]\n"
- "ldr x12, [x16, #0x48]\n"
+ "ldr x10, [x16, #0x48]\n"
+ "mov v7.d[1], x11\n"
+ "ldr x11, [x16, #0x58]\n"
+ "ldr x9, [x12, #0x8]\n"
".inst 0x6f80e0eb // udot v11.4s, v7.16b, v0.4b[0]\n"
+ "mov v6.d[1], x10\n"
"ldr d7, [x16, #0x50]\n"
- "mov v6.d[1], x12\n"
- "ldr x11, [x16, #0x58]\n"
- "mov v7.d[1], x11\n"
".inst 0x6fa0e0c8 // udot v8.4s, v6.16b, v0.4b[1]\n"
"ldr d6, [x16, #0x60]\n"
- "ldr x12, [x16, #0x68]\n"
+ "ldr x10, [x16, #0x68]\n"
+ "mov v7.d[1], x11\n"
+ "ldr x11, [x16, #0x78]\n"
".inst 0x6fa0e0e9 // udot v9.4s, v7.16b, v0.4b[1]\n"
+ "mov v6.d[1], x10\n"
"ldr d7, [x16, #0x70]\n"
- "mov v6.d[1], x12\n"
- "ldr x11, [x16, #0x78]\n"
- "mov v7.d[1], x11\n"
".inst 0x6fa0e0ca // udot v10.4s, v6.16b, v0.4b[1]\n"
"ldr d6, [x16, #0x80]\n"
- "ldr x12, [x16, #0x88]\n"
+ "ldr x10, [x16, #0x88]\n"
+ "mov v7.d[1], x11\n"
+ "ldr x11, [x16, #0x98]\n"
".inst 0x6fa0e0eb // udot v11.4s, v7.16b, v0.4b[1]\n"
+ "mov v6.d[1], x10\n"
"ldr d7, [x16, #0x90]\n"
- "mov v6.d[1], x12\n"
- "ldr x11, [x16, #0x98]\n"
- "mov v7.d[1], x11\n"
".inst 0x6f80e8c8 // udot v8.4s, v6.16b, v0.4b[2]\n"
"ldr d6, [x16, #0xa0]\n"
- "ldr x12, [x16, #0xa8]\n"
+ "ldr x10, [x16, #0xa8]\n"
+ "mov v7.d[1], x11\n"
+ "ldr x11, [x16, #0xb8]\n"
".inst 0x6f80e8e9 // udot v9.4s, v7.16b, v0.4b[2]\n"
+ "mov v6.d[1], x10\n"
"ldr d7, [x16, #0xb0]\n"
- "mov v6.d[1], x12\n"
- "ldr x11, [x16, #0xb8]\n"
- "mov v7.d[1], x11\n"
".inst 0x6f80e8ca // udot v10.4s, v6.16b, v0.4b[2]\n"
"ldr d6, [x16, #0xc0]\n"
- "ldr x12, [x16, #0xc8]\n"
+ "ldr x10, [x16, #0xc8]\n"
+ "mov v7.d[1], x11\n"
+ "ldr x11, [x16, #0xd8]\n"
".inst 0x6f80e8eb // udot v11.4s, v7.16b, v0.4b[2]\n"
+ "mov v6.d[1], x10\n"
"ldr d7, [x16, #0xd0]\n"
- "mov v6.d[1], x12\n"
- "ldr x11, [x16, #0xd8]\n"
- "mov v7.d[1], x11\n"
".inst 0x6fa0e8c8 // udot v8.4s, v6.16b, v0.4b[3]\n"
"ldr d6, [x16, #0xe0]\n"
- "ldr x12, [x16, #0xe8]\n"
+ "ldr x10, [x16, #0xe8]\n"
+ "mov v7.d[1], x11\n"
+ "ldr x11, [x16, #0xf8]\n"
".inst 0x6fa0e8e9 // udot v9.4s, v7.16b, v0.4b[3]\n"
+ "mov v6.d[1], x10\n"
"ldr d7, [x16, #0xf0]\n"
- "mov v6.d[1], x12\n"
- "ldr x11, [x16, #0xf8]\n"
- "mov v7.d[1], x11\n"
- "add x13, x13, #0x10\n"
"add x16, x16, #0x100\n"
".inst 0x6fa0e8ca // udot v10.4s, v6.16b, v0.4b[3]\n"
"ldr d6, [x16, #0x0]\n"
- "ldr x12, [x16, #0x8]\n"
- ".inst 0x6fa0e8eb // udot v11.4s, v7.16b, v0.4b[3]\n"
- "ldr d0, [x13, #0x0]\n"
- "sub x14, x14, #0x10\n"
- "ldr d7, [x16, #0x10]\n"
- "cmp x14, #0x20\n"
- "ldr x10, [x13, #0x8]\n"
- "mov v6.d[1], x12\n"
- "ldr x11, [x16, #0x18]\n"
- "mov v0.d[1], x10\n"
+ "ldr x10, [x16, #0x8]\n"
"mov v7.d[1], x11\n"
- "prfm pldl1keep, [x13, #0x80]\n"
+ ".inst 0x6fa0e8eb // udot v11.4s, v7.16b, v0.4b[3]\n"
+ "mov v6.d[1], x10\n"
+ "ldr d0, [x12, #0x0]\n"
+ "mov v0.d[1], x9\n"
"bge 17b\n"
"18:" // Height 1: Multiply loop: Single iteration only
".inst 0x6f80e0c8 // udot v8.4s, v6.16b, v0.4b[0]\n"
+ "ldr q7, [x16, #0x10]\n"
"ldr q6, [x16, #0x20]\n"
+ "sub x13, x13, #0x10\n"
+ "add x12, x12, #0x10\n"
".inst 0x6f80e0e9 // udot v9.4s, v7.16b, v0.4b[0]\n"
- "ldr q7, [x16, #0x30]\n"
+ "prfm pldl1keep, [x12, #0x80]\n"
".inst 0x6f80e0ca // udot v10.4s, v6.16b, v0.4b[0]\n"
+ "ldr q7, [x16, #0x30]\n"
"ldr q6, [x16, #0x40]\n"
".inst 0x6f80e0eb // udot v11.4s, v7.16b, v0.4b[0]\n"
"ldr q7, [x16, #0x50]\n"
@@ -287,203 +290,200 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
"ldr q6, [x16, #0xe0]\n"
".inst 0x6fa0e8e9 // udot v9.4s, v7.16b, v0.4b[3]\n"
"ldr q7, [x16, #0xf0]\n"
- "add x13, x13, #0x10\n"
- "sub x14, x14, #0x10\n"
".inst 0x6fa0e8ca // udot v10.4s, v6.16b, v0.4b[3]\n"
- "prfm pldl1keep, [x13, #0x80]\n"
- ".inst 0x6fa0e8eb // udot v11.4s, v7.16b, v0.4b[3]\n"
"add x16, x16, #0x100\n"
+ ".inst 0x6fa0e8eb // udot v11.4s, v7.16b, v0.4b[3]\n"
"19:" // Height 1: Multiply loop: Main loop skip
- "cbz x14, 24f\n"
- "cmp x14, #0x4\n"
+ "cbz x13, 24f\n"
+ "cmp x13, #0x4\n"
"blt 21f\n"
"20:" // Height 1: Multiply loop: Odd block loop
- "ldr s0, [x13], #0x4\n"
- "sub x14, x14, #0x4\n"
+ "ldr s0, [x12], #0x4\n"
+ "sub x13, x13, #0x4\n"
"ldr q6, [x16, #0x0]\n"
- ".inst 0x6f80e0c8 // udot v8.4s, v6.16b, v0.4b[0]\n"
+ "cmp x13, #0x4\n"
"ldr q7, [x16, #0x10]\n"
- ".inst 0x6f80e0e9 // udot v9.4s, v7.16b, v0.4b[0]\n"
+ ".inst 0x6f80e0c8 // udot v8.4s, v6.16b, v0.4b[0]\n"
"ldr q6, [x16, #0x20]\n"
- "cmp x14, #0x4\n"
+ ".inst 0x6f80e0e9 // udot v9.4s, v7.16b, v0.4b[0]\n"
"ldr q7, [x16, #0x30]\n"
+ "add x16, x16, #0x40\n"
".inst 0x6f80e0ca // udot v10.4s, v6.16b, v0.4b[0]\n"
".inst 0x6f80e0eb // udot v11.4s, v7.16b, v0.4b[0]\n"
- "add x16, x16, #0x40\n"
"bge 20b\n"
+ "cbz x13, 24f\n"
"21:" // Height 1: Multiply loop: Skip odd blocks
- "cbz x14, 24f\n"
- "tbz x14, #1, 22f\n"
- "ldr h0, [x13], #0x2\n"
- "tbz x14, #0, 23f\n"
- "ld1 { v0.b }[2], [x13]\n"
+ "tbz x13, #1, 22f\n"
+ "ldr h0, [x12], #0x2\n"
+ "tbz x13, #0, 23f\n"
+ "ld1 { v0.b }[2], [x12]\n"
"b 23f\n"
"22:" // Height 1: Multiply loop: Ragged operand read: partial_1_0
- "ldr b0, [x13, #0x0]\n"
+ "ldr b0, [x12, #0x0]\n"
"23:" // Height 1: Multiply loop: Ragged operand read: Done
"ldr q6, [x16, #0x0]\n"
- ".inst 0x6f80e0c8 // udot v8.4s, v6.16b, v0.4b[0]\n"
"ldr q7, [x16, #0x10]\n"
- ".inst 0x6f80e0e9 // udot v9.4s, v7.16b, v0.4b[0]\n"
+ ".inst 0x6f80e0c8 // udot v8.4s, v6.16b, v0.4b[0]\n"
"ldr q6, [x16, #0x20]\n"
- ".inst 0x6f80e0ca // udot v10.4s, v6.16b, v0.4b[0]\n"
+ ".inst 0x6f80e0e9 // udot v9.4s, v7.16b, v0.4b[0]\n"
"ldr q7, [x16, #0x30]\n"
- ".inst 0x6f80e0eb // udot v11.4s, v7.16b, v0.4b[0]\n"
"add x16, x16, #0x40\n"
+ ".inst 0x6f80e0ca // udot v10.4s, v6.16b, v0.4b[0]\n"
+ ".inst 0x6f80e0eb // udot v11.4s, v7.16b, v0.4b[0]\n"
"24:" // Height 1: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x15, x15, #0x1\n"
- "cmp x15, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x14, x14, #0x1\n"
+ "cmp x14, x19\n"
"bne 14b\n"
- "cmp x8, #0x10\n"
- "prfm pstl1keep, [x17, #0x0]\n"
+ "prfm pstl1keep, [x15, #0x0]\n"
+ "cmp x17, #0x10\n"
"bge 33f\n"
- "tbz x8, #3, 28f\n"
- "st1 { v8.4s }, [x17], #0x10\n"
- "st1 { v9.4s }, [x17], #0x10\n"
- "tbz x8, #2, 26f\n"
- "st1 { v10.4s }, [x17], #0x10\n"
- "tbz x8, #1, 25f\n"
- "str d11, [x17], #0x8\n"
- "tbz x8, #0, 32f\n"
- "st1 { v11.s }[2], [x17]\n"
+ "tbz x17, #3, 28f\n"
+ "st1 { v8.4s }, [x15], #0x10\n"
+ "st1 { v9.4s }, [x15], #0x10\n"
+ "tbz x17, #2, 26f\n"
+ "st1 { v10.4s }, [x15], #0x10\n"
+ "tbz x17, #1, 25f\n"
+ "str d11, [x15], #0x8\n"
+ "tbz x17, #0, 32f\n"
+ "st1 { v11.s }[2], [x15]\n"
"b 32f\n"
"25:" // Height 1: Partial direct writeback: partial_1_12
- "tbz x8, #0, 32f\n"
- "str s11, [x17, #0x0]\n"
+ "tbz x17, #0, 32f\n"
+ "str s11, [x15, #0x0]\n"
"b 32f\n"
"26:" // Height 1: Partial direct writeback: partial_2_8
- "tbz x8, #1, 27f\n"
- "str d10, [x17], #0x8\n"
- "tbz x8, #0, 32f\n"
- "st1 { v10.s }[2], [x17]\n"
+ "tbz x17, #1, 27f\n"
+ "str d10, [x15], #0x8\n"
+ "tbz x17, #0, 32f\n"
+ "st1 { v10.s }[2], [x15]\n"
"b 32f\n"
"27:" // Height 1: Partial direct writeback: partial_1_8
- "tbz x8, #0, 32f\n"
- "str s10, [x17, #0x0]\n"
+ "tbz x17, #0, 32f\n"
+ "str s10, [x15, #0x0]\n"
"b 32f\n"
"28:" // Height 1: Partial direct writeback: partial_4_0
- "tbz x8, #2, 30f\n"
- "st1 { v8.4s }, [x17], #0x10\n"
- "tbz x8, #1, 29f\n"
- "str d9, [x17], #0x8\n"
- "tbz x8, #0, 32f\n"
- "st1 { v9.s }[2], [x17]\n"
+ "tbz x17, #2, 30f\n"
+ "st1 { v8.4s }, [x15], #0x10\n"
+ "tbz x17, #1, 29f\n"
+ "str d9, [x15], #0x8\n"
+ "tbz x17, #0, 32f\n"
+ "st1 { v9.s }[2], [x15]\n"
"b 32f\n"
"29:" // Height 1: Partial direct writeback: partial_1_4
- "tbz x8, #0, 32f\n"
- "str s9, [x17, #0x0]\n"
+ "tbz x17, #0, 32f\n"
+ "str s9, [x15, #0x0]\n"
"b 32f\n"
"30:" // Height 1: Partial direct writeback: partial_2_0
- "tbz x8, #1, 31f\n"
- "str d8, [x17], #0x8\n"
- "tbz x8, #0, 32f\n"
- "st1 { v8.s }[2], [x17]\n"
+ "tbz x17, #1, 31f\n"
+ "str d8, [x15], #0x8\n"
+ "tbz x17, #0, 32f\n"
+ "st1 { v8.s }[2], [x15]\n"
"b 32f\n"
"31:" // Height 1: Partial direct writeback: partial_1_0
- "str s8, [x17, #0x0]\n"
+ "str s8, [x15, #0x0]\n"
"32:" // Height 1: Partial direct writeback: Done
"b 34f\n"
"33:" // Height 1: Full writeback
- "str q8, [x17, #0x0]\n"
- "str q9, [x17, #0x10]\n"
- "str q10, [x17, #0x20]\n"
- "str q11, [x17, #0x30]\n"
- "add x17, x17, #0x40\n"
+ "str q8, [x15, #0x0]\n"
+ "str q9, [x15, #0x10]\n"
+ "str q10, [x15, #0x20]\n"
+ "str q11, [x15, #0x30]\n"
+ "add x15, x15, #0x40\n"
"34:" // Height 1: Writeback done
- "subs x8, x8, #0x10\n"
+ "subs x17, x17, #0x10\n"
"bgt 2b\n"
"b 206f\n"
"35:" // Height 2
- "ldr x8, [%x[args_ptr], %[offsetof_N]]\n"
- "mov x17, %x[output_ptr]\n"
+ "ldr x17, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x15, %x[output_ptr]\n"
"ldr x16, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"36:" // Height 2: Column loop
"tbz %x[flags], #0, 46f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "cmp x8, #0x10\n"
- "add x24, x17, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "cmp x17, #0x10\n"
+ "add x23, x15, x19, LSL #2\n"
"bge 45f\n"
- "tbz x8, #3, 40f\n"
- "ld1 { v8.4s }, [x17], #0x10\n"
- "ld1 { v12.4s }, [x24], #0x10\n"
- "ld1 { v9.4s }, [x17], #0x10\n"
- "ld1 { v13.4s }, [x24], #0x10\n"
- "tbz x8, #2, 38f\n"
- "ld1 { v10.4s }, [x17], #0x10\n"
- "ld1 { v14.4s }, [x24], #0x10\n"
- "tbz x8, #1, 37f\n"
- "ldr d11, [x17], #0x8\n"
- "mov x25, #0x38\n"
- "ldr d15, [x24], #0x8\n"
- "tbz x8, #0, 44f\n"
- "ld1 { v11.s }[2], [x17]\n"
- "ld1 { v15.s }[2], [x24]\n"
+ "tbz x17, #3, 40f\n"
+ "ld1 { v8.4s }, [x15], #0x10\n"
+ "ld1 { v12.4s }, [x23], #0x10\n"
+ "ld1 { v9.4s }, [x15], #0x10\n"
+ "ld1 { v13.4s }, [x23], #0x10\n"
+ "tbz x17, #2, 38f\n"
+ "ld1 { v10.4s }, [x15], #0x10\n"
+ "ld1 { v14.4s }, [x23], #0x10\n"
+ "tbz x17, #1, 37f\n"
+ "mov x24, #0x38\n"
+ "ldr d11, [x15], #0x8\n"
+ "ldr d15, [x23], #0x8\n"
+ "tbz x17, #0, 44f\n"
+ "ld1 { v11.s }[2], [x15]\n"
+ "ld1 { v15.s }[2], [x23]\n"
"b 44f\n"
"37:" // Height 2: Partial accumulate: partial_1_12
- "mov x25, #0x30\n"
- "tbz x8, #0, 44f\n"
- "ldr s11, [x17, #0x0]\n"
- "ldr s15, [x24, #0x0]\n"
+ "mov x24, #0x30\n"
+ "tbz x17, #0, 44f\n"
+ "ldr s11, [x15, #0x0]\n"
+ "ldr s15, [x23, #0x0]\n"
"b 44f\n"
"38:" // Height 2: Partial accumulate: partial_2_8
- "tbz x8, #1, 39f\n"
- "ldr d10, [x17], #0x8\n"
- "mov x25, #0x28\n"
- "ldr d14, [x24], #0x8\n"
- "tbz x8, #0, 44f\n"
- "ld1 { v10.s }[2], [x17]\n"
- "ld1 { v14.s }[2], [x24]\n"
+ "tbz x17, #1, 39f\n"
+ "ldr d10, [x15], #0x8\n"
+ "ldr d14, [x23], #0x8\n"
+ "mov x24, #0x28\n"
+ "tbz x17, #0, 44f\n"
+ "ld1 { v10.s }[2], [x15]\n"
+ "ld1 { v14.s }[2], [x23]\n"
"b 44f\n"
"39:" // Height 2: Partial accumulate: partial_1_8
- "mov x25, #0x20\n"
- "tbz x8, #0, 44f\n"
- "ldr s10, [x17, #0x0]\n"
- "ldr s14, [x24, #0x0]\n"
+ "mov x24, #0x20\n"
+ "tbz x17, #0, 44f\n"
+ "ldr s10, [x15, #0x0]\n"
+ "ldr s14, [x23, #0x0]\n"
"b 44f\n"
"40:" // Height 2: Partial accumulate: partial_4_0
- "tbz x8, #2, 42f\n"
- "ld1 { v8.4s }, [x17], #0x10\n"
- "ld1 { v12.4s }, [x24], #0x10\n"
- "tbz x8, #1, 41f\n"
- "ldr d9, [x17], #0x8\n"
- "mov x25, #0x18\n"
- "ldr d13, [x24], #0x8\n"
- "tbz x8, #0, 44f\n"
- "ld1 { v9.s }[2], [x17]\n"
- "ld1 { v13.s }[2], [x24]\n"
+ "tbz x17, #2, 42f\n"
+ "ld1 { v8.4s }, [x15], #0x10\n"
+ "ld1 { v12.4s }, [x23], #0x10\n"
+ "tbz x17, #1, 41f\n"
+ "mov x24, #0x18\n"
+ "ldr d9, [x15], #0x8\n"
+ "ldr d13, [x23], #0x8\n"
+ "tbz x17, #0, 44f\n"
+ "ld1 { v9.s }[2], [x15]\n"
+ "ld1 { v13.s }[2], [x23]\n"
"b 44f\n"
"41:" // Height 2: Partial accumulate: partial_1_4
- "mov x25, #0x10\n"
- "tbz x8, #0, 44f\n"
- "ldr s9, [x17, #0x0]\n"
- "ldr s13, [x24, #0x0]\n"
+ "mov x24, #0x10\n"
+ "tbz x17, #0, 44f\n"
+ "ldr s9, [x15, #0x0]\n"
+ "ldr s13, [x23, #0x0]\n"
"b 44f\n"
"42:" // Height 2: Partial accumulate: partial_2_0
- "tbz x8, #1, 43f\n"
- "ldr d8, [x17], #0x8\n"
- "mov x25, #0x8\n"
- "ldr d12, [x24], #0x8\n"
- "tbz x8, #0, 44f\n"
- "ld1 { v8.s }[2], [x17]\n"
- "ld1 { v12.s }[2], [x24]\n"
+ "tbz x17, #1, 43f\n"
+ "ldr d8, [x15], #0x8\n"
+ "ldr d12, [x23], #0x8\n"
+ "mov x24, #0x8\n"
+ "tbz x17, #0, 44f\n"
+ "ld1 { v8.s }[2], [x15]\n"
+ "ld1 { v12.s }[2], [x23]\n"
"b 44f\n"
"43:" // Height 2: Partial accumulate: partial_1_0
- "ldr s8, [x17, #0x0]\n"
- "mov x25, #0x0\n"
- "ldr s12, [x24, #0x0]\n"
+ "ldr s8, [x15, #0x0]\n"
+ "mov x24, #0x0\n"
+ "ldr s12, [x23, #0x0]\n"
"44:" // Height 2: Partial accumulate: Done
- "sub x17, x17, x25\n"
+ "sub x15, x15, x24\n"
"b 47f\n"
"45:" // Height 2: full accumulate
- "ldr q8, [x17, #0x0]\n"
- "ldr q9, [x17, #0x10]\n"
- "ldr q10, [x17, #0x20]\n"
- "ldr q11, [x17, #0x30]\n"
- "ldr q12, [x24, #0x0]\n"
- "ldr q13, [x24, #0x10]\n"
- "ldr q14, [x24, #0x20]\n"
- "ldr q15, [x24, #0x30]\n"
+ "ldr q8, [x15, #0x0]\n"
+ "ldr q9, [x15, #0x10]\n"
+ "ldr q10, [x15, #0x20]\n"
+ "ldr q11, [x15, #0x30]\n"
+ "ldr q12, [x23, #0x0]\n"
+ "ldr q13, [x23, #0x10]\n"
+ "ldr q14, [x23, #0x20]\n"
+ "ldr q15, [x23, #0x30]\n"
"b 47f\n"
"46:" // Height 2: no accumulate
"movi v8.4s, #0x0\n"
@@ -495,147 +495,147 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
"movi v14.4s, #0x0\n"
"movi v15.4s, #0x0\n"
"47:" // Height 2: setup done
- "mov x15, #0x0\n"
+ "mov x14, #0x0\n"
"48:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w14, [x20, x15, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w13, [x20, x14, LSL #0x2]\n"
"tbz %x[flags], #3, 49f\n"
- "ldr x21, [%x[input_ptr], x15, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x13, [x21, #0x0]\n"
- "ldr x9, [x21, #0x8]\n"
- "cbnz x15, 50f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x13, x13, x20\n"
- "add x9, x9, x20\n"
+ "ldr x20, [%x[input_ptr], x14, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x12, [x20, #0x0]\n"
+ "ldr x28, [x20, #0x8]\n"
+ "cbnz x14, 50f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x12, x12, x19\n"
+ "add x28, x28, x19\n"
"b 50f\n"
"49:" // Height 2: setup direct input
- "mov x13, %x[input_ptr]\n"
- "add x9, x13, x20\n"
+ "mov x12, %x[input_ptr]\n"
+ "add x28, x12, x19\n"
"50:" // Height 2: input setup done
- "cmp x14, #0x10\n"
+ "cmp x13, #0x10\n"
"blt 53f\n"
- "ldr q0, [x13, #0x0]\n"
- "cmp x14, #0x20\n"
- "ldr q1, [x9, #0x0]\n"
+ "ldr q0, [x12, #0x0]\n"
+ "ldr q1, [x28, #0x0]\n"
+ "cmp x13, #0x20\n"
"ldr q6, [x16, #0x0]\n"
- "ldr q7, [x16, #0x10]\n"
"blt 52f\n"
"51:" // Height 2: Multiply loop: Main loop head
".inst 0x6f80e0c8 // udot v8.4s, v6.16b, v0.4b[0]\n"
- "ldr x12, [x16, #0x28]\n"
+ "ldr d7, [x16, #0x10]\n"
".inst 0x6f81e0cc // udot v12.4s, v6.16b, v1.4b[0]\n"
+ "ldr x11, [x16, #0x18]\n"
"ldr d6, [x16, #0x20]\n"
+ "add x12, x12, #0x10\n"
+ "ldr x10, [x16, #0x28]\n"
+ "add x28, x28, #0x10\n"
+ "mov v7.d[1], x11\n"
+ "prfm pldl1keep, [x12, #0x80]\n"
+ "prfm pldl1keep, [x28, #0x80]\n"
+ "sub x13, x13, #0x10\n"
".inst 0x6f80e0e9 // udot v9.4s, v7.16b, v0.4b[0]\n"
- "ldr x11, [x16, #0x38]\n"
+ "mov v6.d[1], x10\n"
".inst 0x6f81e0ed // udot v13.4s, v7.16b, v1.4b[0]\n"
"ldr d7, [x16, #0x30]\n"
- "mov v6.d[1], x12\n"
".inst 0x6f80e0ca // udot v10.4s, v6.16b, v0.4b[0]\n"
- "mov v7.d[1], x11\n"
+ "ldr x11, [x16, #0x38]\n"
".inst 0x6f81e0ce // udot v14.4s, v6.16b, v1.4b[0]\n"
"ldr d6, [x16, #0x40]\n"
+ "ldr x10, [x16, #0x48]\n"
+ "cmp x13, #0x20\n"
+ "mov v7.d[1], x11\n"
+ "ldr x11, [x16, #0x58]\n"
+ "ldr x9, [x12, #0x8]\n"
".inst 0x6f80e0eb // udot v11.4s, v7.16b, v0.4b[0]\n"
- "ldr x12, [x16, #0x48]\n"
+ "mov v6.d[1], x10\n"
".inst 0x6f81e0ef // udot v15.4s, v7.16b, v1.4b[0]\n"
"ldr d7, [x16, #0x50]\n"
- "mov v6.d[1], x12\n"
- "ldr x11, [x16, #0x58]\n"
- "mov v7.d[1], x11\n"
".inst 0x6fa0e0c8 // udot v8.4s, v6.16b, v0.4b[1]\n"
- "ldr x12, [x16, #0x68]\n"
+ "ldr x10, [x16, #0x68]\n"
".inst 0x6fa1e0cc // udot v12.4s, v6.16b, v1.4b[1]\n"
"ldr d6, [x16, #0x60]\n"
- ".inst 0x6fa0e0e9 // udot v9.4s, v7.16b, v0.4b[1]\n"
+ "mov v7.d[1], x11\n"
"ldr x11, [x16, #0x78]\n"
+ "ldr x27, [x28, #0x8]\n"
+ ".inst 0x6fa0e0e9 // udot v9.4s, v7.16b, v0.4b[1]\n"
+ "mov v6.d[1], x10\n"
".inst 0x6fa1e0ed // udot v13.4s, v7.16b, v1.4b[1]\n"
"ldr d7, [x16, #0x70]\n"
- "mov v6.d[1], x12\n"
".inst 0x6fa0e0ca // udot v10.4s, v6.16b, v0.4b[1]\n"
- "mov v7.d[1], x11\n"
+ "ldr x10, [x16, #0x88]\n"
".inst 0x6fa1e0ce // udot v14.4s, v6.16b, v1.4b[1]\n"
"ldr d6, [x16, #0x80]\n"
+ "mov v7.d[1], x11\n"
+ "ldr x11, [x16, #0x98]\n"
".inst 0x6fa0e0eb // udot v11.4s, v7.16b, v0.4b[1]\n"
- "ldr x12, [x16, #0x88]\n"
+ "mov v6.d[1], x10\n"
".inst 0x6fa1e0ef // udot v15.4s, v7.16b, v1.4b[1]\n"
"ldr d7, [x16, #0x90]\n"
- "mov v6.d[1], x12\n"
- "ldr x11, [x16, #0x98]\n"
- "mov v7.d[1], x11\n"
".inst 0x6f80e8c8 // udot v8.4s, v6.16b, v0.4b[2]\n"
- "ldr x12, [x16, #0xa8]\n"
+ "ldr x10, [x16, #0xa8]\n"
".inst 0x6f81e8cc // udot v12.4s, v6.16b, v1.4b[2]\n"
"ldr d6, [x16, #0xa0]\n"
- ".inst 0x6f80e8e9 // udot v9.4s, v7.16b, v0.4b[2]\n"
+ "mov v7.d[1], x11\n"
"ldr x11, [x16, #0xb8]\n"
+ ".inst 0x6f80e8e9 // udot v9.4s, v7.16b, v0.4b[2]\n"
+ "mov v6.d[1], x10\n"
".inst 0x6f81e8ed // udot v13.4s, v7.16b, v1.4b[2]\n"
"ldr d7, [x16, #0xb0]\n"
- "mov v6.d[1], x12\n"
".inst 0x6f80e8ca // udot v10.4s, v6.16b, v0.4b[2]\n"
- "mov v7.d[1], x11\n"
+ "ldr x10, [x16, #0xc8]\n"
".inst 0x6f81e8ce // udot v14.4s, v6.16b, v1.4b[2]\n"
"ldr d6, [x16, #0xc0]\n"
+ "mov v7.d[1], x11\n"
+ "ldr x11, [x16, #0xd8]\n"
".inst 0x6f80e8eb // udot v11.4s, v7.16b, v0.4b[2]\n"
- "ldr x12, [x16, #0xc8]\n"
+ "mov v6.d[1], x10\n"
".inst 0x6f81e8ef // udot v15.4s, v7.16b, v1.4b[2]\n"
"ldr d7, [x16, #0xd0]\n"
- "mov v6.d[1], x12\n"
- "ldr x11, [x16, #0xd8]\n"
- "mov v7.d[1], x11\n"
".inst 0x6fa0e8c8 // udot v8.4s, v6.16b, v0.4b[3]\n"
- "ldr x12, [x16, #0xe8]\n"
+ "ldr x10, [x16, #0xe8]\n"
".inst 0x6fa1e8cc // udot v12.4s, v6.16b, v1.4b[3]\n"
"ldr d6, [x16, #0xe0]\n"
- ".inst 0x6fa0e8e9 // udot v9.4s, v7.16b, v0.4b[3]\n"
+ "mov v7.d[1], x11\n"
"ldr x11, [x16, #0xf8]\n"
+ ".inst 0x6fa0e8e9 // udot v9.4s, v7.16b, v0.4b[3]\n"
+ "mov v6.d[1], x10\n"
".inst 0x6fa1e8ed // udot v13.4s, v7.16b, v1.4b[3]\n"
"ldr d7, [x16, #0xf0]\n"
- "mov v6.d[1], x12\n"
- "add x13, x13, #0x10\n"
- "mov v7.d[1], x11\n"
- "add x9, x9, #0x10\n"
- "add x16, x16, #0x100\n"
".inst 0x6fa0e8ca // udot v10.4s, v6.16b, v0.4b[3]\n"
+ "add x16, x16, #0x100\n"
".inst 0x6fa1e8ce // udot v14.4s, v6.16b, v1.4b[3]\n"
"ldr d6, [x16, #0x0]\n"
- "ldr x12, [x16, #0x8]\n"
+ "mov v7.d[1], x11\n"
+ "ldr x10, [x16, #0x8]\n"
".inst 0x6fa0e8eb // udot v11.4s, v7.16b, v0.4b[3]\n"
- "ldr d0, [x13, #0x0]\n"
+ "ldr d0, [x12, #0x0]\n"
".inst 0x6fa1e8ef // udot v15.4s, v7.16b, v1.4b[3]\n"
- "ldr d1, [x9, #0x0]\n"
- "sub x14, x14, #0x10\n"
- "ldr d7, [x16, #0x10]\n"
- "cmp x14, #0x20\n"
- "ldr x10, [x13, #0x8]\n"
- "mov v6.d[1], x12\n"
- "ldr x28, [x9, #0x8]\n"
- "mov v0.d[1], x10\n"
- "ldr x11, [x16, #0x18]\n"
- "mov v1.d[1], x28\n"
- "prfm pldl1keep, [x13, #0x80]\n"
- "mov v7.d[1], x11\n"
- "prfm pldl1keep, [x9, #0x80]\n"
+ "mov v6.d[1], x10\n"
+ "ldr d1, [x28, #0x0]\n"
+ "mov v0.d[1], x9\n"
+ "mov v1.d[1], x27\n"
"bge 51b\n"
"52:" // Height 2: Multiply loop: Single iteration only
".inst 0x6f80e0c8 // udot v8.4s, v6.16b, v0.4b[0]\n"
- "add x13, x13, #0x10\n"
+ "ldr q7, [x16, #0x10]\n"
".inst 0x6f81e0cc // udot v12.4s, v6.16b, v1.4b[0]\n"
"ldr q6, [x16, #0x20]\n"
+ "sub x13, x13, #0x10\n"
+ "add x12, x12, #0x10\n"
".inst 0x6f80e0e9 // udot v9.4s, v7.16b, v0.4b[0]\n"
- "add x9, x9, #0x10\n"
+ "prfm pldl1keep, [x12, #0x80]\n"
".inst 0x6f81e0ed // udot v13.4s, v7.16b, v1.4b[0]\n"
"ldr q7, [x16, #0x30]\n"
".inst 0x6f80e0ca // udot v10.4s, v6.16b, v0.4b[0]\n"
- "sub x14, x14, #0x10\n"
+ "add x28, x28, #0x10\n"
".inst 0x6f81e0ce // udot v14.4s, v6.16b, v1.4b[0]\n"
- "ldr q6, [x16, #0x40]\n"
+ "prfm pldl1keep, [x28, #0x80]\n"
".inst 0x6f80e0eb // udot v11.4s, v7.16b, v0.4b[0]\n"
- "prfm pldl1keep, [x13, #0x80]\n"
+ "ldr q6, [x16, #0x40]\n"
".inst 0x6f81e0ef // udot v15.4s, v7.16b, v1.4b[0]\n"
"ldr q7, [x16, #0x50]\n"
".inst 0x6fa0e0c8 // udot v8.4s, v6.16b, v0.4b[1]\n"
- "prfm pldl1keep, [x9, #0x80]\n"
".inst 0x6fa1e0cc // udot v12.4s, v6.16b, v1.4b[1]\n"
"ldr q6, [x16, #0x60]\n"
".inst 0x6fa0e0e9 // udot v9.4s, v7.16b, v0.4b[1]\n"
@@ -671,17 +671,17 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
".inst 0x6fa0e8eb // udot v11.4s, v7.16b, v0.4b[3]\n"
".inst 0x6fa1e8ef // udot v15.4s, v7.16b, v1.4b[3]\n"
"53:" // Height 2: Multiply loop: Main loop skip
- "cbz x14, 58f\n"
- "cmp x14, #0x4\n"
+ "cbz x13, 58f\n"
+ "cmp x13, #0x4\n"
"blt 55f\n"
"54:" // Height 2: Multiply loop: Odd block loop
- "ldr s0, [x13], #0x4\n"
- "sub x14, x14, #0x4\n"
- "ldr s1, [x9], #0x4\n"
- "cmp x14, #0x4\n"
+ "ldr s0, [x12], #0x4\n"
+ "sub x13, x13, #0x4\n"
+ "ldr s1, [x28], #0x4\n"
+ "cmp x13, #0x4\n"
"ldr q6, [x16, #0x0]\n"
- ".inst 0x6f80e0c8 // udot v8.4s, v6.16b, v0.4b[0]\n"
"ldr q7, [x16, #0x10]\n"
+ ".inst 0x6f80e0c8 // udot v8.4s, v6.16b, v0.4b[0]\n"
".inst 0x6f81e0cc // udot v12.4s, v6.16b, v1.4b[0]\n"
"ldr q6, [x16, #0x20]\n"
".inst 0x6f80e0e9 // udot v9.4s, v7.16b, v0.4b[0]\n"
@@ -693,22 +693,22 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
".inst 0x6f80e0eb // udot v11.4s, v7.16b, v0.4b[0]\n"
".inst 0x6f81e0ef // udot v15.4s, v7.16b, v1.4b[0]\n"
"bge 54b\n"
+ "cbz x13, 58f\n"
"55:" // Height 2: Multiply loop: Skip odd blocks
- "cbz x14, 58f\n"
- "tbz x14, #1, 56f\n"
- "ldr h0, [x13], #0x2\n"
- "ldr h1, [x9], #0x2\n"
- "tbz x14, #0, 57f\n"
- "ld1 { v0.b }[2], [x13]\n"
- "ld1 { v1.b }[2], [x9]\n"
+ "tbz x13, #1, 56f\n"
+ "ldr h0, [x12], #0x2\n"
+ "ldr h1, [x28], #0x2\n"
+ "tbz x13, #0, 57f\n"
+ "ld1 { v0.b }[2], [x12]\n"
+ "ld1 { v1.b }[2], [x28]\n"
"b 57f\n"
"56:" // Height 2: Multiply loop: Ragged operand read: partial_1_0
- "ldr b0, [x13, #0x0]\n"
- "ldr b1, [x9, #0x0]\n"
+ "ldr b0, [x12, #0x0]\n"
+ "ldr b1, [x28, #0x0]\n"
"57:" // Height 2: Multiply loop: Ragged operand read: Done
"ldr q6, [x16, #0x0]\n"
- ".inst 0x6f80e0c8 // udot v8.4s, v6.16b, v0.4b[0]\n"
"ldr q7, [x16, #0x10]\n"
+ ".inst 0x6f80e0c8 // udot v8.4s, v6.16b, v0.4b[0]\n"
".inst 0x6f81e0cc // udot v12.4s, v6.16b, v1.4b[0]\n"
"ldr q6, [x16, #0x20]\n"
".inst 0x6f80e0e9 // udot v9.4s, v7.16b, v0.4b[0]\n"
@@ -720,203 +720,203 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
".inst 0x6f80e0eb // udot v11.4s, v7.16b, v0.4b[0]\n"
".inst 0x6f81e0ef // udot v15.4s, v7.16b, v1.4b[0]\n"
"58:" // Height 2: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x15, x15, #0x1\n"
- "cmp x15, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x14, x14, #0x1\n"
+ "cmp x14, x19\n"
"bne 48b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x17, x20, LSL #2\n"
- "cmp x8, #0x10\n"
- "prfm pstl1keep, [x17, #0x0]\n"
- "prfm pstl1keep, [x24, #0x0]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "prfm pstl1keep, [x15, #0x0]\n"
+ "cmp x17, #0x10\n"
+ "add x23, x15, x19, LSL #2\n"
+ "prfm pstl1keep, [x23, #0x0]\n"
"bge 67f\n"
- "tbz x8, #3, 62f\n"
- "st1 { v8.4s }, [x17], #0x10\n"
- "st1 { v9.4s }, [x17], #0x10\n"
- "st1 { v12.4s }, [x24], #0x10\n"
- "st1 { v13.4s }, [x24], #0x10\n"
- "tbz x8, #2, 60f\n"
- "st1 { v10.4s }, [x17], #0x10\n"
- "st1 { v14.4s }, [x24], #0x10\n"
- "tbz x8, #1, 59f\n"
- "str d11, [x17], #0x8\n"
- "str d15, [x24], #0x8\n"
- "tbz x8, #0, 66f\n"
- "st1 { v11.s }[2], [x17]\n"
- "st1 { v15.s }[2], [x24]\n"
+ "tbz x17, #3, 62f\n"
+ "st1 { v8.4s }, [x15], #0x10\n"
+ "st1 { v9.4s }, [x15], #0x10\n"
+ "st1 { v12.4s }, [x23], #0x10\n"
+ "st1 { v13.4s }, [x23], #0x10\n"
+ "tbz x17, #2, 60f\n"
+ "st1 { v10.4s }, [x15], #0x10\n"
+ "st1 { v14.4s }, [x23], #0x10\n"
+ "tbz x17, #1, 59f\n"
+ "str d11, [x15], #0x8\n"
+ "str d15, [x23], #0x8\n"
+ "tbz x17, #0, 66f\n"
+ "st1 { v11.s }[2], [x15]\n"
+ "st1 { v15.s }[2], [x23]\n"
"b 66f\n"
"59:" // Height 2: Partial direct writeback: partial_1_12
- "tbz x8, #0, 66f\n"
- "str s11, [x17, #0x0]\n"
- "str s15, [x24, #0x0]\n"
+ "tbz x17, #0, 66f\n"
+ "str s11, [x15, #0x0]\n"
+ "str s15, [x23, #0x0]\n"
"b 66f\n"
"60:" // Height 2: Partial direct writeback: partial_2_8
- "tbz x8, #1, 61f\n"
- "str d10, [x17], #0x8\n"
- "str d14, [x24], #0x8\n"
- "tbz x8, #0, 66f\n"
- "st1 { v10.s }[2], [x17]\n"
- "st1 { v14.s }[2], [x24]\n"
+ "tbz x17, #1, 61f\n"
+ "str d10, [x15], #0x8\n"
+ "str d14, [x23], #0x8\n"
+ "tbz x17, #0, 66f\n"
+ "st1 { v10.s }[2], [x15]\n"
+ "st1 { v14.s }[2], [x23]\n"
"b 66f\n"
"61:" // Height 2: Partial direct writeback: partial_1_8
- "tbz x8, #0, 66f\n"
- "str s10, [x17, #0x0]\n"
- "str s14, [x24, #0x0]\n"
+ "tbz x17, #0, 66f\n"
+ "str s10, [x15, #0x0]\n"
+ "str s14, [x23, #0x0]\n"
"b 66f\n"
"62:" // Height 2: Partial direct writeback: partial_4_0
- "tbz x8, #2, 64f\n"
- "st1 { v8.4s }, [x17], #0x10\n"
- "st1 { v12.4s }, [x24], #0x10\n"
- "tbz x8, #1, 63f\n"
- "str d9, [x17], #0x8\n"
- "str d13, [x24], #0x8\n"
- "tbz x8, #0, 66f\n"
- "st1 { v9.s }[2], [x17]\n"
- "st1 { v13.s }[2], [x24]\n"
+ "tbz x17, #2, 64f\n"
+ "st1 { v8.4s }, [x15], #0x10\n"
+ "st1 { v12.4s }, [x23], #0x10\n"
+ "tbz x17, #1, 63f\n"
+ "str d9, [x15], #0x8\n"
+ "str d13, [x23], #0x8\n"
+ "tbz x17, #0, 66f\n"
+ "st1 { v9.s }[2], [x15]\n"
+ "st1 { v13.s }[2], [x23]\n"
"b 66f\n"
"63:" // Height 2: Partial direct writeback: partial_1_4
- "tbz x8, #0, 66f\n"
- "str s9, [x17, #0x0]\n"
- "str s13, [x24, #0x0]\n"
+ "tbz x17, #0, 66f\n"
+ "str s9, [x15, #0x0]\n"
+ "str s13, [x23, #0x0]\n"
"b 66f\n"
"64:" // Height 2: Partial direct writeback: partial_2_0
- "tbz x8, #1, 65f\n"
- "str d8, [x17], #0x8\n"
- "str d12, [x24], #0x8\n"
- "tbz x8, #0, 66f\n"
- "st1 { v8.s }[2], [x17]\n"
- "st1 { v12.s }[2], [x24]\n"
+ "tbz x17, #1, 65f\n"
+ "str d8, [x15], #0x8\n"
+ "str d12, [x23], #0x8\n"
+ "tbz x17, #0, 66f\n"
+ "st1 { v8.s }[2], [x15]\n"
+ "st1 { v12.s }[2], [x23]\n"
"b 66f\n"
"65:" // Height 2: Partial direct writeback: partial_1_0
- "str s8, [x17, #0x0]\n"
- "str s12, [x24, #0x0]\n"
+ "str s8, [x15, #0x0]\n"
+ "str s12, [x23, #0x0]\n"
"66:" // Height 2: Partial direct writeback: Done
"b 68f\n"
"67:" // Height 2: Full writeback
- "str q8, [x17, #0x0]\n"
- "str q9, [x17, #0x10]\n"
- "str q10, [x17, #0x20]\n"
- "str q11, [x17, #0x30]\n"
- "add x17, x17, #0x40\n"
- "str q12, [x24, #0x0]\n"
- "str q13, [x24, #0x10]\n"
- "str q14, [x24, #0x20]\n"
- "str q15, [x24, #0x30]\n"
+ "str q8, [x15, #0x0]\n"
+ "str q9, [x15, #0x10]\n"
+ "str q10, [x15, #0x20]\n"
+ "str q11, [x15, #0x30]\n"
+ "add x15, x15, #0x40\n"
+ "str q12, [x23, #0x0]\n"
+ "str q13, [x23, #0x10]\n"
+ "str q14, [x23, #0x20]\n"
+ "str q15, [x23, #0x30]\n"
"68:" // Height 2: Writeback done
- "subs x8, x8, #0x10\n"
+ "subs x17, x17, #0x10\n"
"bgt 36b\n"
"b 206f\n"
"69:" // Height 3
- "ldr x8, [%x[args_ptr], %[offsetof_N]]\n"
- "mov x17, %x[output_ptr]\n"
+ "ldr x17, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x15, %x[output_ptr]\n"
"ldr x16, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"70:" // Height 3: Column loop
"tbz %x[flags], #0, 80f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x17, x20, LSL #2\n"
- "cmp x8, #0x10\n"
- "add x23, x24, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "cmp x17, #0x10\n"
+ "add x23, x15, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
"bge 79f\n"
- "tbz x8, #3, 74f\n"
- "ld1 { v8.4s }, [x17], #0x10\n"
- "ld1 { v12.4s }, [x24], #0x10\n"
- "ld1 { v16.4s }, [x23], #0x10\n"
- "ld1 { v9.4s }, [x17], #0x10\n"
- "ld1 { v13.4s }, [x24], #0x10\n"
- "ld1 { v17.4s }, [x23], #0x10\n"
- "tbz x8, #2, 72f\n"
- "ld1 { v10.4s }, [x17], #0x10\n"
- "ld1 { v14.4s }, [x24], #0x10\n"
- "ld1 { v18.4s }, [x23], #0x10\n"
- "tbz x8, #1, 71f\n"
- "ldr d11, [x17], #0x8\n"
- "mov x25, #0x38\n"
- "ldr d15, [x24], #0x8\n"
- "ldr d19, [x23], #0x8\n"
- "tbz x8, #0, 78f\n"
- "ld1 { v11.s }[2], [x17]\n"
- "ld1 { v15.s }[2], [x24]\n"
- "ld1 { v19.s }[2], [x23]\n"
+ "tbz x17, #3, 74f\n"
+ "ld1 { v8.4s }, [x15], #0x10\n"
+ "ld1 { v12.4s }, [x23], #0x10\n"
+ "ld1 { v16.4s }, [x22], #0x10\n"
+ "ld1 { v9.4s }, [x15], #0x10\n"
+ "ld1 { v13.4s }, [x23], #0x10\n"
+ "ld1 { v17.4s }, [x22], #0x10\n"
+ "tbz x17, #2, 72f\n"
+ "ld1 { v10.4s }, [x15], #0x10\n"
+ "ld1 { v14.4s }, [x23], #0x10\n"
+ "ld1 { v18.4s }, [x22], #0x10\n"
+ "tbz x17, #1, 71f\n"
+ "ldr d11, [x15], #0x8\n"
+ "mov x24, #0x38\n"
+ "ldr d15, [x23], #0x8\n"
+ "ldr d19, [x22], #0x8\n"
+ "tbz x17, #0, 78f\n"
+ "ld1 { v11.s }[2], [x15]\n"
+ "ld1 { v15.s }[2], [x23]\n"
+ "ld1 { v19.s }[2], [x22]\n"
"b 78f\n"
"71:" // Height 3: Partial accumulate: partial_1_12
- "mov x25, #0x30\n"
- "tbz x8, #0, 78f\n"
- "ldr s11, [x17, #0x0]\n"
- "ldr s15, [x24, #0x0]\n"
- "ldr s19, [x23, #0x0]\n"
+ "mov x24, #0x30\n"
+ "tbz x17, #0, 78f\n"
+ "ldr s11, [x15, #0x0]\n"
+ "ldr s15, [x23, #0x0]\n"
+ "ldr s19, [x22, #0x0]\n"
"b 78f\n"
"72:" // Height 3: Partial accumulate: partial_2_8
- "tbz x8, #1, 73f\n"
- "ldr d10, [x17], #0x8\n"
- "mov x25, #0x28\n"
- "ldr d14, [x24], #0x8\n"
- "ldr d18, [x23], #0x8\n"
- "tbz x8, #0, 78f\n"
- "ld1 { v10.s }[2], [x17]\n"
- "ld1 { v14.s }[2], [x24]\n"
- "ld1 { v18.s }[2], [x23]\n"
+ "tbz x17, #1, 73f\n"
+ "ldr d10, [x15], #0x8\n"
+ "ldr d14, [x23], #0x8\n"
+ "mov x24, #0x28\n"
+ "ldr d18, [x22], #0x8\n"
+ "tbz x17, #0, 78f\n"
+ "ld1 { v10.s }[2], [x15]\n"
+ "ld1 { v14.s }[2], [x23]\n"
+ "ld1 { v18.s }[2], [x22]\n"
"b 78f\n"
"73:" // Height 3: Partial accumulate: partial_1_8
- "mov x25, #0x20\n"
- "tbz x8, #0, 78f\n"
- "ldr s10, [x17, #0x0]\n"
- "ldr s14, [x24, #0x0]\n"
- "ldr s18, [x23, #0x0]\n"
+ "mov x24, #0x20\n"
+ "tbz x17, #0, 78f\n"
+ "ldr s10, [x15, #0x0]\n"
+ "ldr s14, [x23, #0x0]\n"
+ "ldr s18, [x22, #0x0]\n"
"b 78f\n"
"74:" // Height 3: Partial accumulate: partial_4_0
- "tbz x8, #2, 76f\n"
- "ld1 { v8.4s }, [x17], #0x10\n"
- "ld1 { v12.4s }, [x24], #0x10\n"
- "ld1 { v16.4s }, [x23], #0x10\n"
- "tbz x8, #1, 75f\n"
- "ldr d9, [x17], #0x8\n"
- "mov x25, #0x18\n"
- "ldr d13, [x24], #0x8\n"
- "ldr d17, [x23], #0x8\n"
- "tbz x8, #0, 78f\n"
- "ld1 { v9.s }[2], [x17]\n"
- "ld1 { v13.s }[2], [x24]\n"
- "ld1 { v17.s }[2], [x23]\n"
+ "tbz x17, #2, 76f\n"
+ "ld1 { v8.4s }, [x15], #0x10\n"
+ "ld1 { v12.4s }, [x23], #0x10\n"
+ "ld1 { v16.4s }, [x22], #0x10\n"
+ "tbz x17, #1, 75f\n"
+ "ldr d9, [x15], #0x8\n"
+ "mov x24, #0x18\n"
+ "ldr d13, [x23], #0x8\n"
+ "ldr d17, [x22], #0x8\n"
+ "tbz x17, #0, 78f\n"
+ "ld1 { v9.s }[2], [x15]\n"
+ "ld1 { v13.s }[2], [x23]\n"
+ "ld1 { v17.s }[2], [x22]\n"
"b 78f\n"
"75:" // Height 3: Partial accumulate: partial_1_4
- "mov x25, #0x10\n"
- "tbz x8, #0, 78f\n"
- "ldr s9, [x17, #0x0]\n"
- "ldr s13, [x24, #0x0]\n"
- "ldr s17, [x23, #0x0]\n"
+ "mov x24, #0x10\n"
+ "tbz x17, #0, 78f\n"
+ "ldr s9, [x15, #0x0]\n"
+ "ldr s13, [x23, #0x0]\n"
+ "ldr s17, [x22, #0x0]\n"
"b 78f\n"
"76:" // Height 3: Partial accumulate: partial_2_0
- "tbz x8, #1, 77f\n"
- "ldr d8, [x17], #0x8\n"
- "mov x25, #0x8\n"
- "ldr d12, [x24], #0x8\n"
- "ldr d16, [x23], #0x8\n"
- "tbz x8, #0, 78f\n"
- "ld1 { v8.s }[2], [x17]\n"
- "ld1 { v12.s }[2], [x24]\n"
- "ld1 { v16.s }[2], [x23]\n"
+ "tbz x17, #1, 77f\n"
+ "ldr d8, [x15], #0x8\n"
+ "ldr d12, [x23], #0x8\n"
+ "mov x24, #0x8\n"
+ "ldr d16, [x22], #0x8\n"
+ "tbz x17, #0, 78f\n"
+ "ld1 { v8.s }[2], [x15]\n"
+ "ld1 { v12.s }[2], [x23]\n"
+ "ld1 { v16.s }[2], [x22]\n"
"b 78f\n"
"77:" // Height 3: Partial accumulate: partial_1_0
- "ldr s8, [x17, #0x0]\n"
- "mov x25, #0x0\n"
- "ldr s12, [x24, #0x0]\n"
- "ldr s16, [x23, #0x0]\n"
+ "ldr s8, [x15, #0x0]\n"
+ "mov x24, #0x0\n"
+ "ldr s12, [x23, #0x0]\n"
+ "ldr s16, [x22, #0x0]\n"
"78:" // Height 3: Partial accumulate: Done
- "sub x17, x17, x25\n"
+ "sub x15, x15, x24\n"
"b 81f\n"
"79:" // Height 3: full accumulate
- "ldr q8, [x17, #0x0]\n"
- "ldr q9, [x17, #0x10]\n"
- "ldr q10, [x17, #0x20]\n"
- "ldr q11, [x17, #0x30]\n"
- "ldr q12, [x24, #0x0]\n"
- "ldr q13, [x24, #0x10]\n"
- "ldr q14, [x24, #0x20]\n"
- "ldr q15, [x24, #0x30]\n"
- "ldr q16, [x23, #0x0]\n"
- "ldr q17, [x23, #0x10]\n"
- "ldr q18, [x23, #0x20]\n"
- "ldr q19, [x23, #0x30]\n"
+ "ldr q8, [x15, #0x0]\n"
+ "ldr q9, [x15, #0x10]\n"
+ "ldr q10, [x15, #0x20]\n"
+ "ldr q11, [x15, #0x30]\n"
+ "ldr q12, [x23, #0x0]\n"
+ "ldr q13, [x23, #0x10]\n"
+ "ldr q14, [x23, #0x20]\n"
+ "ldr q15, [x23, #0x30]\n"
+ "ldr q16, [x22, #0x0]\n"
+ "ldr q17, [x22, #0x10]\n"
+ "ldr q18, [x22, #0x20]\n"
+ "ldr q19, [x22, #0x30]\n"
"b 81f\n"
"80:" // Height 3: no accumulate
"movi v8.4s, #0x0\n"
@@ -932,175 +932,175 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
"movi v18.4s, #0x0\n"
"movi v19.4s, #0x0\n"
"81:" // Height 3: setup done
- "mov x15, #0x0\n"
+ "mov x14, #0x0\n"
"82:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w14, [x20, x15, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w13, [x20, x14, LSL #0x2]\n"
"tbz %x[flags], #3, 83f\n"
- "ldr x21, [%x[input_ptr], x15, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x13, [x21, #0x0]\n"
- "ldr x9, [x21, #0x8]\n"
- "ldr x27, [x21, #0x10]\n"
- "cbnz x15, 84f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x13, x13, x20\n"
- "add x9, x9, x20\n"
- "add x27, x27, x20\n"
+ "ldr x20, [%x[input_ptr], x14, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x12, [x20, #0x0]\n"
+ "ldr x28, [x20, #0x8]\n"
+ "ldr x26, [x20, #0x10]\n"
+ "cbnz x14, 84f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x12, x12, x19\n"
+ "add x28, x28, x19\n"
+ "add x26, x26, x19\n"
"b 84f\n"
"83:" // Height 3: setup direct input
- "mov x13, %x[input_ptr]\n"
- "add x9, x13, x20\n"
- "add x27, x9, x20\n"
+ "mov x12, %x[input_ptr]\n"
+ "add x28, x12, x19\n"
+ "add x26, x28, x19\n"
"84:" // Height 3: input setup done
- "cmp x14, #0x10\n"
+ "cmp x13, #0x10\n"
"blt 87f\n"
- "ldr q0, [x13, #0x0]\n"
- "cmp x14, #0x20\n"
- "ldr q1, [x9, #0x0]\n"
- "ldr q2, [x27, #0x0]\n"
+ "ldr q0, [x12, #0x0]\n"
+ "ldr q1, [x28, #0x0]\n"
+ "cmp x13, #0x20\n"
+ "ldr q2, [x26, #0x0]\n"
"ldr q6, [x16, #0x0]\n"
- "ldr q7, [x16, #0x10]\n"
"blt 86f\n"
"85:" // Height 3: Multiply loop: Main loop head
".inst 0x6f80e0c8 // udot v8.4s, v6.16b, v0.4b[0]\n"
- "ldr x12, [x16, #0x28]\n"
+ "ldr d7, [x16, #0x10]\n"
".inst 0x6f81e0cc // udot v12.4s, v6.16b, v1.4b[0]\n"
- "ldr x11, [x16, #0x38]\n"
+ "ldr x11, [x16, #0x18]\n"
".inst 0x6f82e0d0 // udot v16.4s, v6.16b, v2.4b[0]\n"
"ldr d6, [x16, #0x20]\n"
+ "ldr x10, [x16, #0x28]\n"
+ "add x12, x12, #0x10\n"
+ "mov v7.d[1], x11\n"
+ "prfm pldl1keep, [x12, #0x80]\n"
+ "ldr x11, [x16, #0x38]\n"
+ "add x28, x28, #0x10\n"
".inst 0x6f80e0e9 // udot v9.4s, v7.16b, v0.4b[0]\n"
- "mov v6.d[1], x12\n"
+ "mov v6.d[1], x10\n"
".inst 0x6f81e0ed // udot v13.4s, v7.16b, v1.4b[0]\n"
- "ldr x12, [x16, #0x48]\n"
+ "prfm pldl1keep, [x28, #0x80]\n"
".inst 0x6f82e0f1 // udot v17.4s, v7.16b, v2.4b[0]\n"
"ldr d7, [x16, #0x30]\n"
- "mov v7.d[1], x11\n"
".inst 0x6f80e0ca // udot v10.4s, v6.16b, v0.4b[0]\n"
+ "ldr x10, [x16, #0x48]\n"
".inst 0x6f81e0ce // udot v14.4s, v6.16b, v1.4b[0]\n"
- "ldr x11, [x16, #0x58]\n"
+ "ldr x9, [x12, #0x8]\n"
".inst 0x6f82e0d2 // udot v18.4s, v6.16b, v2.4b[0]\n"
+ "mov v7.d[1], x11\n"
"ldr d6, [x16, #0x40]\n"
+ "add x26, x26, #0x10\n"
".inst 0x6f80e0eb // udot v11.4s, v7.16b, v0.4b[0]\n"
- "mov v6.d[1], x12\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x6f81e0ef // udot v15.4s, v7.16b, v1.4b[0]\n"
- "ldr x12, [x16, #0x68]\n"
+ "ldr x11, [x16, #0x58]\n"
".inst 0x6f82e0f3 // udot v19.4s, v7.16b, v2.4b[0]\n"
+ "mov v6.d[1], x10\n"
"ldr d7, [x16, #0x50]\n"
- "mov v7.d[1], x11\n"
+ "sub x13, x13, #0x10\n"
".inst 0x6fa0e0c8 // udot v8.4s, v6.16b, v0.4b[1]\n"
+ "ldr x10, [x16, #0x68]\n"
".inst 0x6fa1e0cc // udot v12.4s, v6.16b, v1.4b[1]\n"
- "ldr x11, [x16, #0x78]\n"
+ "ldr x27, [x28, #0x8]\n"
".inst 0x6fa2e0d0 // udot v16.4s, v6.16b, v2.4b[1]\n"
+ "mov v7.d[1], x11\n"
"ldr d6, [x16, #0x60]\n"
+ "cmp x13, #0x20\n"
".inst 0x6fa0e0e9 // udot v9.4s, v7.16b, v0.4b[1]\n"
- "mov v6.d[1], x12\n"
+ "ldr x11, [x16, #0x78]\n"
".inst 0x6fa1e0ed // udot v13.4s, v7.16b, v1.4b[1]\n"
- "ldr x12, [x16, #0x88]\n"
+ "ldr x25, [x26, #0x8]\n"
".inst 0x6fa2e0f1 // udot v17.4s, v7.16b, v2.4b[1]\n"
+ "mov v6.d[1], x10\n"
"ldr d7, [x16, #0x70]\n"
- "mov v7.d[1], x11\n"
".inst 0x6fa0e0ca // udot v10.4s, v6.16b, v0.4b[1]\n"
+ "ldr x10, [x16, #0x88]\n"
".inst 0x6fa1e0ce // udot v14.4s, v6.16b, v1.4b[1]\n"
- "ldr x11, [x16, #0x98]\n"
".inst 0x6fa2e0d2 // udot v18.4s, v6.16b, v2.4b[1]\n"
+ "mov v7.d[1], x11\n"
"ldr d6, [x16, #0x80]\n"
".inst 0x6fa0e0eb // udot v11.4s, v7.16b, v0.4b[1]\n"
- "mov v6.d[1], x12\n"
+ "ldr x11, [x16, #0x98]\n"
".inst 0x6fa1e0ef // udot v15.4s, v7.16b, v1.4b[1]\n"
- "ldr x12, [x16, #0xa8]\n"
".inst 0x6fa2e0f3 // udot v19.4s, v7.16b, v2.4b[1]\n"
+ "mov v6.d[1], x10\n"
"ldr d7, [x16, #0x90]\n"
- "mov v7.d[1], x11\n"
".inst 0x6f80e8c8 // udot v8.4s, v6.16b, v0.4b[2]\n"
+ "ldr x10, [x16, #0xa8]\n"
".inst 0x6f81e8cc // udot v12.4s, v6.16b, v1.4b[2]\n"
- "ldr x11, [x16, #0xb8]\n"
".inst 0x6f82e8d0 // udot v16.4s, v6.16b, v2.4b[2]\n"
+ "mov v7.d[1], x11\n"
"ldr d6, [x16, #0xa0]\n"
".inst 0x6f80e8e9 // udot v9.4s, v7.16b, v0.4b[2]\n"
- "mov v6.d[1], x12\n"
+ "ldr x11, [x16, #0xb8]\n"
".inst 0x6f81e8ed // udot v13.4s, v7.16b, v1.4b[2]\n"
- "ldr x12, [x16, #0xc8]\n"
".inst 0x6f82e8f1 // udot v17.4s, v7.16b, v2.4b[2]\n"
+ "mov v6.d[1], x10\n"
"ldr d7, [x16, #0xb0]\n"
- "mov v7.d[1], x11\n"
".inst 0x6f80e8ca // udot v10.4s, v6.16b, v0.4b[2]\n"
+ "ldr x10, [x16, #0xc8]\n"
".inst 0x6f81e8ce // udot v14.4s, v6.16b, v1.4b[2]\n"
- "ldr x11, [x16, #0xd8]\n"
".inst 0x6f82e8d2 // udot v18.4s, v6.16b, v2.4b[2]\n"
+ "mov v7.d[1], x11\n"
"ldr d6, [x16, #0xc0]\n"
".inst 0x6f80e8eb // udot v11.4s, v7.16b, v0.4b[2]\n"
- "mov v6.d[1], x12\n"
+ "ldr x11, [x16, #0xd8]\n"
".inst 0x6f81e8ef // udot v15.4s, v7.16b, v1.4b[2]\n"
- "ldr x12, [x16, #0xe8]\n"
".inst 0x6f82e8f3 // udot v19.4s, v7.16b, v2.4b[2]\n"
+ "mov v6.d[1], x10\n"
"ldr d7, [x16, #0xd0]\n"
- "mov v7.d[1], x11\n"
".inst 0x6fa0e8c8 // udot v8.4s, v6.16b, v0.4b[3]\n"
+ "ldr x10, [x16, #0xe8]\n"
".inst 0x6fa1e8cc // udot v12.4s, v6.16b, v1.4b[3]\n"
- "ldr x11, [x16, #0xf8]\n"
".inst 0x6fa2e8d0 // udot v16.4s, v6.16b, v2.4b[3]\n"
+ "mov v7.d[1], x11\n"
"ldr d6, [x16, #0xe0]\n"
".inst 0x6fa0e8e9 // udot v9.4s, v7.16b, v0.4b[3]\n"
- "mov v6.d[1], x12\n"
+ "ldr x11, [x16, #0xf8]\n"
".inst 0x6fa1e8ed // udot v13.4s, v7.16b, v1.4b[3]\n"
- "add x13, x13, #0x10\n"
".inst 0x6fa2e8f1 // udot v17.4s, v7.16b, v2.4b[3]\n"
+ "mov v6.d[1], x10\n"
"ldr d7, [x16, #0xf0]\n"
- "mov v7.d[1], x11\n"
- "add x9, x9, #0x10\n"
- "add x27, x27, #0x10\n"
"add x16, x16, #0x100\n"
".inst 0x6fa0e8ca // udot v10.4s, v6.16b, v0.4b[3]\n"
- "ldr x12, [x16, #0x8]\n"
+ "ldr x10, [x16, #0x8]\n"
".inst 0x6fa1e8ce // udot v14.4s, v6.16b, v1.4b[3]\n"
- "ldr x10, [x13, #0x8]\n"
".inst 0x6fa2e8d2 // udot v18.4s, v6.16b, v2.4b[3]\n"
+ "mov v7.d[1], x11\n"
"ldr d6, [x16, #0x0]\n"
".inst 0x6fa0e8eb // udot v11.4s, v7.16b, v0.4b[3]\n"
- "ldr d0, [x13, #0x0]\n"
+ "ldr d0, [x12, #0x0]\n"
".inst 0x6fa1e8ef // udot v15.4s, v7.16b, v1.4b[3]\n"
- "ldr d1, [x9, #0x0]\n"
- "ldr x28, [x9, #0x8]\n"
+ "ldr d1, [x28, #0x0]\n"
".inst 0x6fa2e8f3 // udot v19.4s, v7.16b, v2.4b[3]\n"
- "ldr d2, [x27, #0x0]\n"
- "sub x14, x14, #0x10\n"
- "ldr d7, [x16, #0x10]\n"
- "cmp x14, #0x20\n"
- "ldr x26, [x27, #0x8]\n"
- "mov v6.d[1], x12\n"
- "ldr x11, [x16, #0x18]\n"
- "mov v0.d[1], x10\n"
- "prfm pldl1keep, [x13, #0x80]\n"
- "mov v1.d[1], x28\n"
- "prfm pldl1keep, [x9, #0x80]\n"
- "mov v2.d[1], x26\n"
- "prfm pldl1keep, [x27, #0x80]\n"
- "mov v7.d[1], x11\n"
+ "mov v6.d[1], x10\n"
+ "mov v0.d[1], x9\n"
+ "ldr d2, [x26, #0x0]\n"
+ "mov v1.d[1], x27\n"
+ "mov v2.d[1], x25\n"
"bge 85b\n"
"86:" // Height 3: Multiply loop: Single iteration only
".inst 0x6f80e0c8 // udot v8.4s, v6.16b, v0.4b[0]\n"
- "add x13, x13, #0x10\n"
+ "ldr q7, [x16, #0x10]\n"
".inst 0x6f81e0cc // udot v12.4s, v6.16b, v1.4b[0]\n"
- "add x9, x9, #0x10\n"
+ "sub x13, x13, #0x10\n"
".inst 0x6f82e0d0 // udot v16.4s, v6.16b, v2.4b[0]\n"
"ldr q6, [x16, #0x20]\n"
".inst 0x6f80e0e9 // udot v9.4s, v7.16b, v0.4b[0]\n"
- "add x27, x27, #0x10\n"
+ "add x12, x12, #0x10\n"
".inst 0x6f81e0ed // udot v13.4s, v7.16b, v1.4b[0]\n"
- "sub x14, x14, #0x10\n"
+ "prfm pldl1keep, [x12, #0x80]\n"
".inst 0x6f82e0f1 // udot v17.4s, v7.16b, v2.4b[0]\n"
"ldr q7, [x16, #0x30]\n"
".inst 0x6f80e0ca // udot v10.4s, v6.16b, v0.4b[0]\n"
- "prfm pldl1keep, [x13, #0x80]\n"
+ "add x28, x28, #0x10\n"
".inst 0x6f81e0ce // udot v14.4s, v6.16b, v1.4b[0]\n"
- "prfm pldl1keep, [x9, #0x80]\n"
+ "prfm pldl1keep, [x28, #0x80]\n"
".inst 0x6f82e0d2 // udot v18.4s, v6.16b, v2.4b[0]\n"
"ldr q6, [x16, #0x40]\n"
".inst 0x6f80e0eb // udot v11.4s, v7.16b, v0.4b[0]\n"
- "prfm pldl1keep, [x27, #0x80]\n"
+ "add x26, x26, #0x10\n"
".inst 0x6f81e0ef // udot v15.4s, v7.16b, v1.4b[0]\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x6f82e0f3 // udot v19.4s, v7.16b, v2.4b[0]\n"
"ldr q7, [x16, #0x50]\n"
".inst 0x6fa0e0c8 // udot v8.4s, v6.16b, v0.4b[1]\n"
@@ -1151,18 +1151,18 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
".inst 0x6fa1e8ef // udot v15.4s, v7.16b, v1.4b[3]\n"
".inst 0x6fa2e8f3 // udot v19.4s, v7.16b, v2.4b[3]\n"
"87:" // Height 3: Multiply loop: Main loop skip
- "cbz x14, 92f\n"
- "cmp x14, #0x4\n"
+ "cbz x13, 92f\n"
+ "cmp x13, #0x4\n"
"blt 89f\n"
"88:" // Height 3: Multiply loop: Odd block loop
- "ldr s0, [x13], #0x4\n"
- "sub x14, x14, #0x4\n"
- "ldr s1, [x9], #0x4\n"
- "cmp x14, #0x4\n"
- "ldr s2, [x27], #0x4\n"
+ "ldr s0, [x12], #0x4\n"
+ "sub x13, x13, #0x4\n"
+ "ldr s1, [x28], #0x4\n"
+ "cmp x13, #0x4\n"
+ "ldr s2, [x26], #0x4\n"
"ldr q6, [x16, #0x0]\n"
- ".inst 0x6f80e0c8 // udot v8.4s, v6.16b, v0.4b[0]\n"
"ldr q7, [x16, #0x10]\n"
+ ".inst 0x6f80e0c8 // udot v8.4s, v6.16b, v0.4b[0]\n"
".inst 0x6f81e0cc // udot v12.4s, v6.16b, v1.4b[0]\n"
".inst 0x6f82e0d0 // udot v16.4s, v6.16b, v2.4b[0]\n"
"ldr q6, [x16, #0x20]\n"
@@ -1178,25 +1178,25 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
".inst 0x6f81e0ef // udot v15.4s, v7.16b, v1.4b[0]\n"
".inst 0x6f82e0f3 // udot v19.4s, v7.16b, v2.4b[0]\n"
"bge 88b\n"
+ "cbz x13, 92f\n"
"89:" // Height 3: Multiply loop: Skip odd blocks
- "cbz x14, 92f\n"
- "tbz x14, #1, 90f\n"
- "ldr h0, [x13], #0x2\n"
- "ldr h1, [x9], #0x2\n"
- "ldr h2, [x27], #0x2\n"
- "tbz x14, #0, 91f\n"
- "ld1 { v0.b }[2], [x13]\n"
- "ld1 { v1.b }[2], [x9]\n"
- "ld1 { v2.b }[2], [x27]\n"
+ "tbz x13, #1, 90f\n"
+ "ldr h0, [x12], #0x2\n"
+ "ldr h1, [x28], #0x2\n"
+ "ldr h2, [x26], #0x2\n"
+ "tbz x13, #0, 91f\n"
+ "ld1 { v0.b }[2], [x12]\n"
+ "ld1 { v1.b }[2], [x28]\n"
+ "ld1 { v2.b }[2], [x26]\n"
"b 91f\n"
"90:" // Height 3: Multiply loop: Ragged operand read: partial_1_0
- "ldr b0, [x13, #0x0]\n"
- "ldr b1, [x9, #0x0]\n"
- "ldr b2, [x27, #0x0]\n"
+ "ldr b0, [x12, #0x0]\n"
+ "ldr b1, [x28, #0x0]\n"
+ "ldr b2, [x26, #0x0]\n"
"91:" // Height 3: Multiply loop: Ragged operand read: Done
"ldr q6, [x16, #0x0]\n"
- ".inst 0x6f80e0c8 // udot v8.4s, v6.16b, v0.4b[0]\n"
"ldr q7, [x16, #0x10]\n"
+ ".inst 0x6f80e0c8 // udot v8.4s, v6.16b, v0.4b[0]\n"
".inst 0x6f81e0cc // udot v12.4s, v6.16b, v1.4b[0]\n"
".inst 0x6f82e0d0 // udot v16.4s, v6.16b, v2.4b[0]\n"
"ldr q6, [x16, #0x20]\n"
@@ -1212,246 +1212,246 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
".inst 0x6f81e0ef // udot v15.4s, v7.16b, v1.4b[0]\n"
".inst 0x6f82e0f3 // udot v19.4s, v7.16b, v2.4b[0]\n"
"92:" // Height 3: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x15, x15, #0x1\n"
- "cmp x15, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x14, x14, #0x1\n"
+ "cmp x14, x19\n"
"bne 82b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x17, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "cmp x8, #0x10\n"
- "prfm pstl1keep, [x17, #0x0]\n"
- "prfm pstl1keep, [x24, #0x0]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "prfm pstl1keep, [x15, #0x0]\n"
+ "cmp x17, #0x10\n"
+ "add x23, x15, x19, LSL #2\n"
"prfm pstl1keep, [x23, #0x0]\n"
+ "add x22, x23, x19, LSL #2\n"
+ "prfm pstl1keep, [x22, #0x0]\n"
"bge 101f\n"
- "tbz x8, #3, 96f\n"
- "st1 { v8.4s }, [x17], #0x10\n"
- "st1 { v9.4s }, [x17], #0x10\n"
- "st1 { v12.4s }, [x24], #0x10\n"
- "st1 { v13.4s }, [x24], #0x10\n"
- "st1 { v16.4s }, [x23], #0x10\n"
- "st1 { v17.4s }, [x23], #0x10\n"
- "tbz x8, #2, 94f\n"
- "st1 { v10.4s }, [x17], #0x10\n"
- "st1 { v14.4s }, [x24], #0x10\n"
- "st1 { v18.4s }, [x23], #0x10\n"
- "tbz x8, #1, 93f\n"
- "str d11, [x17], #0x8\n"
- "str d15, [x24], #0x8\n"
- "str d19, [x23], #0x8\n"
- "tbz x8, #0, 100f\n"
- "st1 { v11.s }[2], [x17]\n"
- "st1 { v15.s }[2], [x24]\n"
- "st1 { v19.s }[2], [x23]\n"
+ "tbz x17, #3, 96f\n"
+ "st1 { v8.4s }, [x15], #0x10\n"
+ "st1 { v9.4s }, [x15], #0x10\n"
+ "st1 { v12.4s }, [x23], #0x10\n"
+ "st1 { v13.4s }, [x23], #0x10\n"
+ "st1 { v16.4s }, [x22], #0x10\n"
+ "st1 { v17.4s }, [x22], #0x10\n"
+ "tbz x17, #2, 94f\n"
+ "st1 { v10.4s }, [x15], #0x10\n"
+ "st1 { v14.4s }, [x23], #0x10\n"
+ "st1 { v18.4s }, [x22], #0x10\n"
+ "tbz x17, #1, 93f\n"
+ "str d11, [x15], #0x8\n"
+ "str d15, [x23], #0x8\n"
+ "str d19, [x22], #0x8\n"
+ "tbz x17, #0, 100f\n"
+ "st1 { v11.s }[2], [x15]\n"
+ "st1 { v15.s }[2], [x23]\n"
+ "st1 { v19.s }[2], [x22]\n"
"b 100f\n"
"93:" // Height 3: Partial direct writeback: partial_1_12
- "tbz x8, #0, 100f\n"
- "str s11, [x17, #0x0]\n"
- "str s15, [x24, #0x0]\n"
- "str s19, [x23, #0x0]\n"
+ "tbz x17, #0, 100f\n"
+ "str s11, [x15, #0x0]\n"
+ "str s15, [x23, #0x0]\n"
+ "str s19, [x22, #0x0]\n"
"b 100f\n"
"94:" // Height 3: Partial direct writeback: partial_2_8
- "tbz x8, #1, 95f\n"
- "str d10, [x17], #0x8\n"
- "str d14, [x24], #0x8\n"
- "str d18, [x23], #0x8\n"
- "tbz x8, #0, 100f\n"
- "st1 { v10.s }[2], [x17]\n"
- "st1 { v14.s }[2], [x24]\n"
- "st1 { v18.s }[2], [x23]\n"
+ "tbz x17, #1, 95f\n"
+ "str d10, [x15], #0x8\n"
+ "str d14, [x23], #0x8\n"
+ "str d18, [x22], #0x8\n"
+ "tbz x17, #0, 100f\n"
+ "st1 { v10.s }[2], [x15]\n"
+ "st1 { v14.s }[2], [x23]\n"
+ "st1 { v18.s }[2], [x22]\n"
"b 100f\n"
"95:" // Height 3: Partial direct writeback: partial_1_8
- "tbz x8, #0, 100f\n"
- "str s10, [x17, #0x0]\n"
- "str s14, [x24, #0x0]\n"
- "str s18, [x23, #0x0]\n"
+ "tbz x17, #0, 100f\n"
+ "str s10, [x15, #0x0]\n"
+ "str s14, [x23, #0x0]\n"
+ "str s18, [x22, #0x0]\n"
"b 100f\n"
"96:" // Height 3: Partial direct writeback: partial_4_0
- "tbz x8, #2, 98f\n"
- "st1 { v8.4s }, [x17], #0x10\n"
- "st1 { v12.4s }, [x24], #0x10\n"
- "st1 { v16.4s }, [x23], #0x10\n"
- "tbz x8, #1, 97f\n"
- "str d9, [x17], #0x8\n"
- "str d13, [x24], #0x8\n"
- "str d17, [x23], #0x8\n"
- "tbz x8, #0, 100f\n"
- "st1 { v9.s }[2], [x17]\n"
- "st1 { v13.s }[2], [x24]\n"
- "st1 { v17.s }[2], [x23]\n"
+ "tbz x17, #2, 98f\n"
+ "st1 { v8.4s }, [x15], #0x10\n"
+ "st1 { v12.4s }, [x23], #0x10\n"
+ "st1 { v16.4s }, [x22], #0x10\n"
+ "tbz x17, #1, 97f\n"
+ "str d9, [x15], #0x8\n"
+ "str d13, [x23], #0x8\n"
+ "str d17, [x22], #0x8\n"
+ "tbz x17, #0, 100f\n"
+ "st1 { v9.s }[2], [x15]\n"
+ "st1 { v13.s }[2], [x23]\n"
+ "st1 { v17.s }[2], [x22]\n"
"b 100f\n"
"97:" // Height 3: Partial direct writeback: partial_1_4
- "tbz x8, #0, 100f\n"
- "str s9, [x17, #0x0]\n"
- "str s13, [x24, #0x0]\n"
- "str s17, [x23, #0x0]\n"
+ "tbz x17, #0, 100f\n"
+ "str s9, [x15, #0x0]\n"
+ "str s13, [x23, #0x0]\n"
+ "str s17, [x22, #0x0]\n"
"b 100f\n"
"98:" // Height 3: Partial direct writeback: partial_2_0
- "tbz x8, #1, 99f\n"
- "str d8, [x17], #0x8\n"
- "str d12, [x24], #0x8\n"
- "str d16, [x23], #0x8\n"
- "tbz x8, #0, 100f\n"
- "st1 { v8.s }[2], [x17]\n"
- "st1 { v12.s }[2], [x24]\n"
- "st1 { v16.s }[2], [x23]\n"
+ "tbz x17, #1, 99f\n"
+ "str d8, [x15], #0x8\n"
+ "str d12, [x23], #0x8\n"
+ "str d16, [x22], #0x8\n"
+ "tbz x17, #0, 100f\n"
+ "st1 { v8.s }[2], [x15]\n"
+ "st1 { v12.s }[2], [x23]\n"
+ "st1 { v16.s }[2], [x22]\n"
"b 100f\n"
"99:" // Height 3: Partial direct writeback: partial_1_0
- "str s8, [x17, #0x0]\n"
- "str s12, [x24, #0x0]\n"
- "str s16, [x23, #0x0]\n"
+ "str s8, [x15, #0x0]\n"
+ "str s12, [x23, #0x0]\n"
+ "str s16, [x22, #0x0]\n"
"100:" // Height 3: Partial direct writeback: Done
"b 102f\n"
"101:" // Height 3: Full writeback
- "str q8, [x17, #0x0]\n"
- "str q9, [x17, #0x10]\n"
- "str q10, [x17, #0x20]\n"
- "str q11, [x17, #0x30]\n"
- "add x17, x17, #0x40\n"
- "str q12, [x24, #0x0]\n"
- "str q13, [x24, #0x10]\n"
- "str q14, [x24, #0x20]\n"
- "str q15, [x24, #0x30]\n"
- "str q16, [x23, #0x0]\n"
- "str q17, [x23, #0x10]\n"
- "str q18, [x23, #0x20]\n"
- "str q19, [x23, #0x30]\n"
+ "str q8, [x15, #0x0]\n"
+ "str q9, [x15, #0x10]\n"
+ "str q10, [x15, #0x20]\n"
+ "str q11, [x15, #0x30]\n"
+ "add x15, x15, #0x40\n"
+ "str q12, [x23, #0x0]\n"
+ "str q13, [x23, #0x10]\n"
+ "str q14, [x23, #0x20]\n"
+ "str q15, [x23, #0x30]\n"
+ "str q16, [x22, #0x0]\n"
+ "str q17, [x22, #0x10]\n"
+ "str q18, [x22, #0x20]\n"
+ "str q19, [x22, #0x30]\n"
"102:" // Height 3: Writeback done
- "subs x8, x8, #0x10\n"
+ "subs x17, x17, #0x10\n"
"bgt 70b\n"
"b 206f\n"
"103:" // Height 4
- "ldr x8, [%x[args_ptr], %[offsetof_N]]\n"
- "mov x17, %x[output_ptr]\n"
+ "ldr x17, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x15, %x[output_ptr]\n"
"ldr x16, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"104:" // Height 4: Column loop
"tbz %x[flags], #0, 114f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x17, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "cmp x8, #0x10\n"
- "add x22, x23, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "cmp x17, #0x10\n"
+ "add x23, x15, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
"bge 113f\n"
- "tbz x8, #3, 108f\n"
- "ld1 { v8.4s }, [x17], #0x10\n"
- "ld1 { v12.4s }, [x24], #0x10\n"
- "ld1 { v16.4s }, [x23], #0x10\n"
- "ld1 { v20.4s }, [x22], #0x10\n"
- "ld1 { v9.4s }, [x17], #0x10\n"
- "ld1 { v13.4s }, [x24], #0x10\n"
- "ld1 { v17.4s }, [x23], #0x10\n"
- "ld1 { v21.4s }, [x22], #0x10\n"
- "tbz x8, #2, 106f\n"
- "ld1 { v10.4s }, [x17], #0x10\n"
- "ld1 { v14.4s }, [x24], #0x10\n"
- "ld1 { v18.4s }, [x23], #0x10\n"
- "ld1 { v22.4s }, [x22], #0x10\n"
- "tbz x8, #1, 105f\n"
- "ldr d11, [x17], #0x8\n"
- "mov x25, #0x38\n"
- "ldr d15, [x24], #0x8\n"
- "ldr d19, [x23], #0x8\n"
- "ldr d23, [x22], #0x8\n"
- "tbz x8, #0, 112f\n"
- "ld1 { v11.s }[2], [x17]\n"
- "ld1 { v15.s }[2], [x24]\n"
- "ld1 { v19.s }[2], [x23]\n"
- "ld1 { v23.s }[2], [x22]\n"
+ "tbz x17, #3, 108f\n"
+ "ld1 { v8.4s }, [x15], #0x10\n"
+ "ld1 { v12.4s }, [x23], #0x10\n"
+ "ld1 { v16.4s }, [x22], #0x10\n"
+ "ld1 { v9.4s }, [x15], #0x10\n"
+ "ld1 { v13.4s }, [x23], #0x10\n"
+ "ld1 { v17.4s }, [x22], #0x10\n"
+ "ld1 { v20.4s }, [x21], #0x10\n"
+ "ld1 { v21.4s }, [x21], #0x10\n"
+ "tbz x17, #2, 106f\n"
+ "ld1 { v10.4s }, [x15], #0x10\n"
+ "ld1 { v14.4s }, [x23], #0x10\n"
+ "ld1 { v18.4s }, [x22], #0x10\n"
+ "ld1 { v22.4s }, [x21], #0x10\n"
+ "tbz x17, #1, 105f\n"
+ "ldr d11, [x15], #0x8\n"
+ "mov x24, #0x38\n"
+ "ldr d15, [x23], #0x8\n"
+ "ldr d19, [x22], #0x8\n"
+ "ldr d23, [x21], #0x8\n"
+ "tbz x17, #0, 112f\n"
+ "ld1 { v11.s }[2], [x15]\n"
+ "ld1 { v15.s }[2], [x23]\n"
+ "ld1 { v19.s }[2], [x22]\n"
+ "ld1 { v23.s }[2], [x21]\n"
"b 112f\n"
"105:" // Height 4: Partial accumulate: partial_1_12
- "mov x25, #0x30\n"
- "tbz x8, #0, 112f\n"
- "ldr s11, [x17, #0x0]\n"
- "ldr s15, [x24, #0x0]\n"
- "ldr s19, [x23, #0x0]\n"
- "ldr s23, [x22, #0x0]\n"
+ "mov x24, #0x30\n"
+ "tbz x17, #0, 112f\n"
+ "ldr s11, [x15, #0x0]\n"
+ "ldr s15, [x23, #0x0]\n"
+ "ldr s19, [x22, #0x0]\n"
+ "ldr s23, [x21, #0x0]\n"
"b 112f\n"
"106:" // Height 4: Partial accumulate: partial_2_8
- "tbz x8, #1, 107f\n"
- "ldr d10, [x17], #0x8\n"
- "mov x25, #0x28\n"
- "ldr d14, [x24], #0x8\n"
- "ldr d18, [x23], #0x8\n"
- "ldr d22, [x22], #0x8\n"
- "tbz x8, #0, 112f\n"
- "ld1 { v10.s }[2], [x17]\n"
- "ld1 { v14.s }[2], [x24]\n"
- "ld1 { v18.s }[2], [x23]\n"
- "ld1 { v22.s }[2], [x22]\n"
+ "tbz x17, #1, 107f\n"
+ "ldr d10, [x15], #0x8\n"
+ "ldr d14, [x23], #0x8\n"
+ "mov x24, #0x28\n"
+ "ldr d18, [x22], #0x8\n"
+ "ldr d22, [x21], #0x8\n"
+ "tbz x17, #0, 112f\n"
+ "ld1 { v10.s }[2], [x15]\n"
+ "ld1 { v14.s }[2], [x23]\n"
+ "ld1 { v18.s }[2], [x22]\n"
+ "ld1 { v22.s }[2], [x21]\n"
"b 112f\n"
"107:" // Height 4: Partial accumulate: partial_1_8
- "mov x25, #0x20\n"
- "tbz x8, #0, 112f\n"
- "ldr s10, [x17, #0x0]\n"
- "ldr s14, [x24, #0x0]\n"
- "ldr s18, [x23, #0x0]\n"
- "ldr s22, [x22, #0x0]\n"
+ "mov x24, #0x20\n"
+ "tbz x17, #0, 112f\n"
+ "ldr s10, [x15, #0x0]\n"
+ "ldr s14, [x23, #0x0]\n"
+ "ldr s18, [x22, #0x0]\n"
+ "ldr s22, [x21, #0x0]\n"
"b 112f\n"
"108:" // Height 4: Partial accumulate: partial_4_0
- "tbz x8, #2, 110f\n"
- "ld1 { v8.4s }, [x17], #0x10\n"
- "ld1 { v12.4s }, [x24], #0x10\n"
- "ld1 { v16.4s }, [x23], #0x10\n"
- "ld1 { v20.4s }, [x22], #0x10\n"
- "tbz x8, #1, 109f\n"
- "ldr d9, [x17], #0x8\n"
- "mov x25, #0x18\n"
- "ldr d13, [x24], #0x8\n"
- "ldr d17, [x23], #0x8\n"
- "ldr d21, [x22], #0x8\n"
- "tbz x8, #0, 112f\n"
- "ld1 { v9.s }[2], [x17]\n"
- "ld1 { v13.s }[2], [x24]\n"
- "ld1 { v17.s }[2], [x23]\n"
- "ld1 { v21.s }[2], [x22]\n"
+ "tbz x17, #2, 110f\n"
+ "ld1 { v8.4s }, [x15], #0x10\n"
+ "ld1 { v12.4s }, [x23], #0x10\n"
+ "ld1 { v16.4s }, [x22], #0x10\n"
+ "ld1 { v20.4s }, [x21], #0x10\n"
+ "tbz x17, #1, 109f\n"
+ "ldr d9, [x15], #0x8\n"
+ "mov x24, #0x18\n"
+ "ldr d13, [x23], #0x8\n"
+ "ldr d17, [x22], #0x8\n"
+ "ldr d21, [x21], #0x8\n"
+ "tbz x17, #0, 112f\n"
+ "ld1 { v9.s }[2], [x15]\n"
+ "ld1 { v13.s }[2], [x23]\n"
+ "ld1 { v17.s }[2], [x22]\n"
+ "ld1 { v21.s }[2], [x21]\n"
"b 112f\n"
"109:" // Height 4: Partial accumulate: partial_1_4
- "mov x25, #0x10\n"
- "tbz x8, #0, 112f\n"
- "ldr s9, [x17, #0x0]\n"
- "ldr s13, [x24, #0x0]\n"
- "ldr s17, [x23, #0x0]\n"
- "ldr s21, [x22, #0x0]\n"
+ "mov x24, #0x10\n"
+ "tbz x17, #0, 112f\n"
+ "ldr s9, [x15, #0x0]\n"
+ "ldr s13, [x23, #0x0]\n"
+ "ldr s17, [x22, #0x0]\n"
+ "ldr s21, [x21, #0x0]\n"
"b 112f\n"
"110:" // Height 4: Partial accumulate: partial_2_0
- "tbz x8, #1, 111f\n"
- "ldr d8, [x17], #0x8\n"
- "mov x25, #0x8\n"
- "ldr d12, [x24], #0x8\n"
- "ldr d16, [x23], #0x8\n"
- "ldr d20, [x22], #0x8\n"
- "tbz x8, #0, 112f\n"
- "ld1 { v8.s }[2], [x17]\n"
- "ld1 { v12.s }[2], [x24]\n"
- "ld1 { v16.s }[2], [x23]\n"
- "ld1 { v20.s }[2], [x22]\n"
+ "tbz x17, #1, 111f\n"
+ "ldr d8, [x15], #0x8\n"
+ "ldr d12, [x23], #0x8\n"
+ "mov x24, #0x8\n"
+ "ldr d16, [x22], #0x8\n"
+ "ldr d20, [x21], #0x8\n"
+ "tbz x17, #0, 112f\n"
+ "ld1 { v8.s }[2], [x15]\n"
+ "ld1 { v12.s }[2], [x23]\n"
+ "ld1 { v16.s }[2], [x22]\n"
+ "ld1 { v20.s }[2], [x21]\n"
"b 112f\n"
"111:" // Height 4: Partial accumulate: partial_1_0
- "ldr s8, [x17, #0x0]\n"
- "mov x25, #0x0\n"
- "ldr s12, [x24, #0x0]\n"
- "ldr s16, [x23, #0x0]\n"
- "ldr s20, [x22, #0x0]\n"
+ "ldr s8, [x15, #0x0]\n"
+ "mov x24, #0x0\n"
+ "ldr s12, [x23, #0x0]\n"
+ "ldr s16, [x22, #0x0]\n"
+ "ldr s20, [x21, #0x0]\n"
"112:" // Height 4: Partial accumulate: Done
- "sub x17, x17, x25\n"
+ "sub x15, x15, x24\n"
"b 115f\n"
"113:" // Height 4: full accumulate
- "ldr q8, [x17, #0x0]\n"
- "ldr q9, [x17, #0x10]\n"
- "ldr q10, [x17, #0x20]\n"
- "ldr q11, [x17, #0x30]\n"
- "ldr q12, [x24, #0x0]\n"
- "ldr q13, [x24, #0x10]\n"
- "ldr q14, [x24, #0x20]\n"
- "ldr q15, [x24, #0x30]\n"
- "ldr q16, [x23, #0x0]\n"
- "ldr q17, [x23, #0x10]\n"
- "ldr q18, [x23, #0x20]\n"
- "ldr q19, [x23, #0x30]\n"
- "ldr q20, [x22, #0x0]\n"
- "ldr q21, [x22, #0x10]\n"
- "ldr q22, [x22, #0x20]\n"
- "ldr q23, [x22, #0x30]\n"
+ "ldr q8, [x15, #0x0]\n"
+ "ldr q9, [x15, #0x10]\n"
+ "ldr q10, [x15, #0x20]\n"
+ "ldr q11, [x15, #0x30]\n"
+ "ldr q12, [x23, #0x0]\n"
+ "ldr q13, [x23, #0x10]\n"
+ "ldr q14, [x23, #0x20]\n"
+ "ldr q15, [x23, #0x30]\n"
+ "ldr q16, [x22, #0x0]\n"
+ "ldr q17, [x22, #0x10]\n"
+ "ldr q18, [x22, #0x20]\n"
+ "ldr q19, [x22, #0x30]\n"
+ "ldr q20, [x21, #0x0]\n"
+ "ldr q21, [x21, #0x10]\n"
+ "ldr q22, [x21, #0x20]\n"
+ "ldr q23, [x21, #0x30]\n"
"b 115f\n"
"114:" // Height 4: no accumulate
"movi v8.4s, #0x0\n"
@@ -1471,204 +1471,204 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
"movi v22.4s, #0x0\n"
"movi v23.4s, #0x0\n"
"115:" // Height 4: setup done
- "mov x15, #0x0\n"
+ "mov x14, #0x0\n"
"116:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w14, [x20, x15, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w13, [x20, x14, LSL #0x2]\n"
"tbz %x[flags], #3, 117f\n"
- "ldr x21, [%x[input_ptr], x15, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x13, [x21, #0x0]\n"
- "ldr x9, [x21, #0x8]\n"
- "ldr x27, [x21, #0x10]\n"
- "ldr x25, [x21, #0x18]\n"
- "cbnz x15, 118f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x13, x13, x20\n"
- "add x9, x9, x20\n"
- "add x27, x27, x20\n"
- "add x25, x25, x20\n"
+ "ldr x20, [%x[input_ptr], x14, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x12, [x20, #0x0]\n"
+ "ldr x28, [x20, #0x8]\n"
+ "ldr x26, [x20, #0x10]\n"
+ "ldr x24, [x20, #0x18]\n"
+ "cbnz x14, 118f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x12, x12, x19\n"
+ "add x28, x28, x19\n"
+ "add x26, x26, x19\n"
+ "add x24, x24, x19\n"
"b 118f\n"
"117:" // Height 4: setup direct input
- "mov x13, %x[input_ptr]\n"
- "add x9, x13, x20\n"
- "add x27, x9, x20\n"
- "add x25, x27, x20\n"
+ "mov x12, %x[input_ptr]\n"
+ "add x28, x12, x19\n"
+ "add x26, x28, x19\n"
+ "add x24, x26, x19\n"
"118:" // Height 4: input setup done
- "cmp x14, #0x10\n"
+ "cmp x13, #0x10\n"
"blt 121f\n"
- "ldr q0, [x13, #0x0]\n"
- "cmp x14, #0x20\n"
- "ldr q1, [x9, #0x0]\n"
- "ldr q2, [x27, #0x0]\n"
- "ldr q3, [x25, #0x0]\n"
+ "ldr q0, [x12, #0x0]\n"
+ "ldr q1, [x28, #0x0]\n"
+ "cmp x13, #0x20\n"
+ "ldr q2, [x26, #0x0]\n"
+ "ldr q3, [x24, #0x0]\n"
"ldr q6, [x16, #0x0]\n"
- "ldr q7, [x16, #0x10]\n"
"blt 120f\n"
"119:" // Height 4: Multiply loop: Main loop head
".inst 0x6f80e0c8 // udot v8.4s, v6.16b, v0.4b[0]\n"
- "ldr x12, [x16, #0x28]\n"
+ "ldr d7, [x16, #0x10]\n"
".inst 0x6f81e0cc // udot v12.4s, v6.16b, v1.4b[0]\n"
- "ldr x11, [x16, #0x38]\n"
+ "ldr x11, [x16, #0x18]\n"
".inst 0x6f82e0d0 // udot v16.4s, v6.16b, v2.4b[0]\n"
- "add x13, x13, #0x10\n"
+ "ldr x10, [x16, #0x28]\n"
".inst 0x6f83e0d4 // udot v20.4s, v6.16b, v3.4b[0]\n"
"ldr d6, [x16, #0x20]\n"
+ "mov v7.d[1], x11\n"
+ "ldr x11, [x16, #0x38]\n"
+ "add x12, x12, #0x10\n"
+ "add x28, x28, #0x10\n"
".inst 0x6f80e0e9 // udot v9.4s, v7.16b, v0.4b[0]\n"
- "mov v6.d[1], x12\n"
+ "mov v6.d[1], x10\n"
".inst 0x6f81e0ed // udot v13.4s, v7.16b, v1.4b[0]\n"
- "ldr x12, [x16, #0x48]\n"
+ "prfm pldl1keep, [x12, #0x80]\n"
".inst 0x6f82e0f1 // udot v17.4s, v7.16b, v2.4b[0]\n"
- "add x9, x9, #0x10\n"
+ "prfm pldl1keep, [x28, #0x80]\n"
".inst 0x6f83e0f5 // udot v21.4s, v7.16b, v3.4b[0]\n"
"ldr d7, [x16, #0x30]\n"
- "mov v7.d[1], x11\n"
".inst 0x6f80e0ca // udot v10.4s, v6.16b, v0.4b[0]\n"
+ "ldr x10, [x16, #0x48]\n"
".inst 0x6f81e0ce // udot v14.4s, v6.16b, v1.4b[0]\n"
- "ldr x11, [x16, #0x58]\n"
+ "ldr x9, [x12, #0x8]\n"
".inst 0x6f82e0d2 // udot v18.4s, v6.16b, v2.4b[0]\n"
- "add x27, x27, #0x10\n"
+ "mov v7.d[1], x11\n"
".inst 0x6f83e0d6 // udot v22.4s, v6.16b, v3.4b[0]\n"
"ldr d6, [x16, #0x40]\n"
".inst 0x6f80e0eb // udot v11.4s, v7.16b, v0.4b[0]\n"
- "mov v6.d[1], x12\n"
+ "ldr x11, [x16, #0x58]\n"
".inst 0x6f81e0ef // udot v15.4s, v7.16b, v1.4b[0]\n"
- "ldr x12, [x16, #0x68]\n"
+ "ldr x27, [x28, #0x8]\n"
".inst 0x6f82e0f3 // udot v19.4s, v7.16b, v2.4b[0]\n"
- "add x25, x25, #0x10\n"
+ "mov v6.d[1], x10\n"
".inst 0x6f83e0f7 // udot v23.4s, v7.16b, v3.4b[0]\n"
"ldr d7, [x16, #0x50]\n"
- "mov v7.d[1], x11\n"
".inst 0x6fa0e0c8 // udot v8.4s, v6.16b, v0.4b[1]\n"
+ "ldr x10, [x16, #0x68]\n"
".inst 0x6fa1e0cc // udot v12.4s, v6.16b, v1.4b[1]\n"
- "ldr x11, [x16, #0x78]\n"
+ "add x26, x26, #0x10\n"
".inst 0x6fa2e0d0 // udot v16.4s, v6.16b, v2.4b[1]\n"
- "ldr x10, [x13, #0x8]\n"
+ "mov v7.d[1], x11\n"
".inst 0x6fa3e0d4 // udot v20.4s, v6.16b, v3.4b[1]\n"
- "ldr d6, [x16, #0x60]\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x6fa0e0e9 // udot v9.4s, v7.16b, v0.4b[1]\n"
- "mov v6.d[1], x12\n"
+ "ldr d6, [x16, #0x60]\n"
".inst 0x6fa1e0ed // udot v13.4s, v7.16b, v1.4b[1]\n"
- "ldr x12, [x16, #0x88]\n"
+ "ldr x11, [x16, #0x78]\n"
".inst 0x6fa2e0f1 // udot v17.4s, v7.16b, v2.4b[1]\n"
- "ldr x28, [x9, #0x8]\n"
+ "ldr x25, [x26, #0x8]\n"
".inst 0x6fa3e0f5 // udot v21.4s, v7.16b, v3.4b[1]\n"
+ "mov v6.d[1], x10\n"
"ldr d7, [x16, #0x70]\n"
- "mov v7.d[1], x11\n"
+ "add x24, x24, #0x10\n"
".inst 0x6fa0e0ca // udot v10.4s, v6.16b, v0.4b[1]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x6fa1e0ce // udot v14.4s, v6.16b, v1.4b[1]\n"
- "ldr x11, [x16, #0x98]\n"
+ "ldr x10, [x16, #0x88]\n"
".inst 0x6fa2e0d2 // udot v18.4s, v6.16b, v2.4b[1]\n"
- "ldr x26, [x27, #0x8]\n"
+ "mov v7.d[1], x11\n"
".inst 0x6fa3e0d6 // udot v22.4s, v6.16b, v3.4b[1]\n"
"ldr d6, [x16, #0x80]\n"
".inst 0x6fa0e0eb // udot v11.4s, v7.16b, v0.4b[1]\n"
- "mov v6.d[1], x12\n"
+ "ldr x11, [x16, #0x98]\n"
".inst 0x6fa1e0ef // udot v15.4s, v7.16b, v1.4b[1]\n"
- "ldr x12, [x16, #0xa8]\n"
+ "ldr x23, [x24, #0x8]\n"
".inst 0x6fa2e0f3 // udot v19.4s, v7.16b, v2.4b[1]\n"
- "ldr x24, [x25, #0x8]\n"
+ "mov v6.d[1], x10\n"
".inst 0x6fa3e0f7 // udot v23.4s, v7.16b, v3.4b[1]\n"
"ldr d7, [x16, #0x90]\n"
- "mov v7.d[1], x11\n"
".inst 0x6f80e8c8 // udot v8.4s, v6.16b, v0.4b[2]\n"
+ "ldr x10, [x16, #0xa8]\n"
".inst 0x6f81e8cc // udot v12.4s, v6.16b, v1.4b[2]\n"
- "ldr x11, [x16, #0xb8]\n"
+ "sub x13, x13, #0x10\n"
".inst 0x6f82e8d0 // udot v16.4s, v6.16b, v2.4b[2]\n"
- "sub x14, x14, #0x10\n"
+ "mov v7.d[1], x11\n"
".inst 0x6f83e8d4 // udot v20.4s, v6.16b, v3.4b[2]\n"
"ldr d6, [x16, #0xa0]\n"
".inst 0x6f80e8e9 // udot v9.4s, v7.16b, v0.4b[2]\n"
- "mov v6.d[1], x12\n"
+ "ldr x11, [x16, #0xb8]\n"
".inst 0x6f81e8ed // udot v13.4s, v7.16b, v1.4b[2]\n"
- "ldr x12, [x16, #0xc8]\n"
+ "cmp x13, #0x20\n"
".inst 0x6f82e8f1 // udot v17.4s, v7.16b, v2.4b[2]\n"
- "cmp x14, #0x20\n"
+ "mov v6.d[1], x10\n"
".inst 0x6f83e8f5 // udot v21.4s, v7.16b, v3.4b[2]\n"
"ldr d7, [x16, #0xb0]\n"
- "mov v7.d[1], x11\n"
".inst 0x6f80e8ca // udot v10.4s, v6.16b, v0.4b[2]\n"
+ "ldr x10, [x16, #0xc8]\n"
".inst 0x6f81e8ce // udot v14.4s, v6.16b, v1.4b[2]\n"
- "ldr x11, [x16, #0xd8]\n"
".inst 0x6f82e8d2 // udot v18.4s, v6.16b, v2.4b[2]\n"
- "prfm pldl1keep, [x13, #0x80]\n"
+ "mov v7.d[1], x11\n"
".inst 0x6f83e8d6 // udot v22.4s, v6.16b, v3.4b[2]\n"
"ldr d6, [x16, #0xc0]\n"
".inst 0x6f80e8eb // udot v11.4s, v7.16b, v0.4b[2]\n"
- "mov v6.d[1], x12\n"
+ "ldr x11, [x16, #0xd8]\n"
".inst 0x6f81e8ef // udot v15.4s, v7.16b, v1.4b[2]\n"
- "ldr x12, [x16, #0xe8]\n"
".inst 0x6f82e8f3 // udot v19.4s, v7.16b, v2.4b[2]\n"
- "prfm pldl1keep, [x9, #0x80]\n"
+ "mov v6.d[1], x10\n"
".inst 0x6f83e8f7 // udot v23.4s, v7.16b, v3.4b[2]\n"
"ldr d7, [x16, #0xd0]\n"
- "mov v7.d[1], x11\n"
".inst 0x6fa0e8c8 // udot v8.4s, v6.16b, v0.4b[3]\n"
+ "ldr x10, [x16, #0xe8]\n"
".inst 0x6fa1e8cc // udot v12.4s, v6.16b, v1.4b[3]\n"
- "ldr x11, [x16, #0xf8]\n"
".inst 0x6fa2e8d0 // udot v16.4s, v6.16b, v2.4b[3]\n"
- "prfm pldl1keep, [x27, #0x80]\n"
+ "mov v7.d[1], x11\n"
".inst 0x6fa3e8d4 // udot v20.4s, v6.16b, v3.4b[3]\n"
"ldr d6, [x16, #0xe0]\n"
".inst 0x6fa0e8e9 // udot v9.4s, v7.16b, v0.4b[3]\n"
- "mov v6.d[1], x12\n"
+ "ldr x11, [x16, #0xf8]\n"
".inst 0x6fa1e8ed // udot v13.4s, v7.16b, v1.4b[3]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x6fa2e8f1 // udot v17.4s, v7.16b, v2.4b[3]\n"
+ "mov v6.d[1], x10\n"
".inst 0x6fa3e8f5 // udot v21.4s, v7.16b, v3.4b[3]\n"
"ldr d7, [x16, #0xf0]\n"
- "mov v7.d[1], x11\n"
- "add x16, x16, #0x100\n"
".inst 0x6fa0e8ca // udot v10.4s, v6.16b, v0.4b[3]\n"
- "ldr x12, [x16, #0x8]\n"
+ "add x16, x16, #0x100\n"
".inst 0x6fa1e8ce // udot v14.4s, v6.16b, v1.4b[3]\n"
- "ldr x11, [x16, #0x18]\n"
+ "ldr x10, [x16, #0x8]\n"
".inst 0x6fa2e8d2 // udot v18.4s, v6.16b, v2.4b[3]\n"
+ "mov v7.d[1], x11\n"
".inst 0x6fa3e8d6 // udot v22.4s, v6.16b, v3.4b[3]\n"
"ldr d6, [x16, #0x0]\n"
".inst 0x6fa0e8eb // udot v11.4s, v7.16b, v0.4b[3]\n"
- "ldr d0, [x13, #0x0]\n"
+ "ldr d0, [x12, #0x0]\n"
".inst 0x6fa1e8ef // udot v15.4s, v7.16b, v1.4b[3]\n"
- "ldr d1, [x9, #0x0]\n"
+ "ldr d1, [x28, #0x0]\n"
".inst 0x6fa2e8f3 // udot v19.4s, v7.16b, v2.4b[3]\n"
- "ldr d2, [x27, #0x0]\n"
+ "mov v6.d[1], x10\n"
".inst 0x6fa3e8f7 // udot v23.4s, v7.16b, v3.4b[3]\n"
- "ldr d3, [x25, #0x0]\n"
- "ldr d7, [x16, #0x10]\n"
- "mov v6.d[1], x12\n"
- "mov v0.d[1], x10\n"
- "mov v1.d[1], x28\n"
- "mov v2.d[1], x26\n"
- "mov v3.d[1], x24\n"
- "mov v7.d[1], x11\n"
+ "mov v0.d[1], x9\n"
+ "mov v1.d[1], x27\n"
+ "ldr d2, [x26, #0x0]\n"
+ "ldr d3, [x24, #0x0]\n"
+ "mov v2.d[1], x25\n"
+ "mov v3.d[1], x23\n"
"bge 119b\n"
"120:" // Height 4: Multiply loop: Single iteration only
".inst 0x6f80e0c8 // udot v8.4s, v6.16b, v0.4b[0]\n"
- "add x13, x13, #0x10\n"
+ "ldr q7, [x16, #0x10]\n"
".inst 0x6f81e0cc // udot v12.4s, v6.16b, v1.4b[0]\n"
- "add x9, x9, #0x10\n"
+ "sub x13, x13, #0x10\n"
".inst 0x6f82e0d0 // udot v16.4s, v6.16b, v2.4b[0]\n"
- "add x27, x27, #0x10\n"
+ "add x12, x12, #0x10\n"
".inst 0x6f83e0d4 // udot v20.4s, v6.16b, v3.4b[0]\n"
- "ldr q6, [x16, #0x20]\n"
+ "prfm pldl1keep, [x12, #0x80]\n"
".inst 0x6f80e0e9 // udot v9.4s, v7.16b, v0.4b[0]\n"
- "add x25, x25, #0x10\n"
+ "ldr q6, [x16, #0x20]\n"
".inst 0x6f81e0ed // udot v13.4s, v7.16b, v1.4b[0]\n"
- "sub x14, x14, #0x10\n"
+ "add x28, x28, #0x10\n"
".inst 0x6f82e0f1 // udot v17.4s, v7.16b, v2.4b[0]\n"
- "prfm pldl1keep, [x13, #0x80]\n"
+ "prfm pldl1keep, [x28, #0x80]\n"
".inst 0x6f83e0f5 // udot v21.4s, v7.16b, v3.4b[0]\n"
"ldr q7, [x16, #0x30]\n"
".inst 0x6f80e0ca // udot v10.4s, v6.16b, v0.4b[0]\n"
- "prfm pldl1keep, [x9, #0x80]\n"
+ "add x26, x26, #0x10\n"
".inst 0x6f81e0ce // udot v14.4s, v6.16b, v1.4b[0]\n"
- "prfm pldl1keep, [x27, #0x80]\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x6f82e0d2 // udot v18.4s, v6.16b, v2.4b[0]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
+ "add x24, x24, #0x10\n"
".inst 0x6f83e0d6 // udot v22.4s, v6.16b, v3.4b[0]\n"
- "ldr q6, [x16, #0x40]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x6f80e0eb // udot v11.4s, v7.16b, v0.4b[0]\n"
+ "ldr q6, [x16, #0x40]\n"
".inst 0x6f81e0ef // udot v15.4s, v7.16b, v1.4b[0]\n"
".inst 0x6f82e0f3 // udot v19.4s, v7.16b, v2.4b[0]\n"
".inst 0x6f83e0f7 // udot v23.4s, v7.16b, v3.4b[0]\n"
@@ -1733,19 +1733,19 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
".inst 0x6fa2e8f3 // udot v19.4s, v7.16b, v2.4b[3]\n"
".inst 0x6fa3e8f7 // udot v23.4s, v7.16b, v3.4b[3]\n"
"121:" // Height 4: Multiply loop: Main loop skip
- "cbz x14, 126f\n"
- "cmp x14, #0x4\n"
+ "cbz x13, 126f\n"
+ "cmp x13, #0x4\n"
"blt 123f\n"
"122:" // Height 4: Multiply loop: Odd block loop
- "ldr s0, [x13], #0x4\n"
- "sub x14, x14, #0x4\n"
- "ldr s1, [x9], #0x4\n"
- "cmp x14, #0x4\n"
- "ldr s2, [x27], #0x4\n"
- "ldr s3, [x25], #0x4\n"
+ "ldr s0, [x12], #0x4\n"
+ "sub x13, x13, #0x4\n"
+ "ldr s1, [x28], #0x4\n"
+ "cmp x13, #0x4\n"
+ "ldr s2, [x26], #0x4\n"
+ "ldr s3, [x24], #0x4\n"
"ldr q6, [x16, #0x0]\n"
- ".inst 0x6f80e0c8 // udot v8.4s, v6.16b, v0.4b[0]\n"
"ldr q7, [x16, #0x10]\n"
+ ".inst 0x6f80e0c8 // udot v8.4s, v6.16b, v0.4b[0]\n"
".inst 0x6f81e0cc // udot v12.4s, v6.16b, v1.4b[0]\n"
".inst 0x6f82e0d0 // udot v16.4s, v6.16b, v2.4b[0]\n"
".inst 0x6f83e0d4 // udot v20.4s, v6.16b, v3.4b[0]\n"
@@ -1765,28 +1765,28 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
".inst 0x6f82e0f3 // udot v19.4s, v7.16b, v2.4b[0]\n"
".inst 0x6f83e0f7 // udot v23.4s, v7.16b, v3.4b[0]\n"
"bge 122b\n"
+ "cbz x13, 126f\n"
"123:" // Height 4: Multiply loop: Skip odd blocks
- "cbz x14, 126f\n"
- "tbz x14, #1, 124f\n"
- "ldr h0, [x13], #0x2\n"
- "ldr h1, [x9], #0x2\n"
- "ldr h2, [x27], #0x2\n"
- "ldr h3, [x25], #0x2\n"
- "tbz x14, #0, 125f\n"
- "ld1 { v0.b }[2], [x13]\n"
- "ld1 { v1.b }[2], [x9]\n"
- "ld1 { v2.b }[2], [x27]\n"
- "ld1 { v3.b }[2], [x25]\n"
+ "tbz x13, #1, 124f\n"
+ "ldr h0, [x12], #0x2\n"
+ "ldr h1, [x28], #0x2\n"
+ "ldr h2, [x26], #0x2\n"
+ "ldr h3, [x24], #0x2\n"
+ "tbz x13, #0, 125f\n"
+ "ld1 { v0.b }[2], [x12]\n"
+ "ld1 { v1.b }[2], [x28]\n"
+ "ld1 { v2.b }[2], [x26]\n"
+ "ld1 { v3.b }[2], [x24]\n"
"b 125f\n"
"124:" // Height 4: Multiply loop: Ragged operand read: partial_1_0
- "ldr b0, [x13, #0x0]\n"
- "ldr b1, [x9, #0x0]\n"
- "ldr b2, [x27, #0x0]\n"
- "ldr b3, [x25, #0x0]\n"
+ "ldr b0, [x12, #0x0]\n"
+ "ldr b1, [x28, #0x0]\n"
+ "ldr b2, [x26, #0x0]\n"
+ "ldr b3, [x24, #0x0]\n"
"125:" // Height 4: Multiply loop: Ragged operand read: Done
"ldr q6, [x16, #0x0]\n"
- ".inst 0x6f80e0c8 // udot v8.4s, v6.16b, v0.4b[0]\n"
"ldr q7, [x16, #0x10]\n"
+ ".inst 0x6f80e0c8 // udot v8.4s, v6.16b, v0.4b[0]\n"
".inst 0x6f81e0cc // udot v12.4s, v6.16b, v1.4b[0]\n"
".inst 0x6f82e0d0 // udot v16.4s, v6.16b, v2.4b[0]\n"
".inst 0x6f83e0d4 // udot v20.4s, v6.16b, v3.4b[0]\n"
@@ -1806,289 +1806,289 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
".inst 0x6f82e0f3 // udot v19.4s, v7.16b, v2.4b[0]\n"
".inst 0x6f83e0f7 // udot v23.4s, v7.16b, v3.4b[0]\n"
"126:" // Height 4: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x15, x15, #0x1\n"
- "cmp x15, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x14, x14, #0x1\n"
+ "cmp x14, x19\n"
"bne 116b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x17, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
- "cmp x8, #0x10\n"
- "prfm pstl1keep, [x17, #0x0]\n"
- "prfm pstl1keep, [x24, #0x0]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "prfm pstl1keep, [x15, #0x0]\n"
+ "cmp x17, #0x10\n"
+ "add x23, x15, x19, LSL #2\n"
"prfm pstl1keep, [x23, #0x0]\n"
+ "add x22, x23, x19, LSL #2\n"
"prfm pstl1keep, [x22, #0x0]\n"
+ "add x21, x22, x19, LSL #2\n"
+ "prfm pstl1keep, [x21, #0x0]\n"
"bge 135f\n"
- "tbz x8, #3, 130f\n"
- "st1 { v8.4s }, [x17], #0x10\n"
- "st1 { v9.4s }, [x17], #0x10\n"
- "st1 { v12.4s }, [x24], #0x10\n"
- "st1 { v13.4s }, [x24], #0x10\n"
- "st1 { v16.4s }, [x23], #0x10\n"
- "st1 { v17.4s }, [x23], #0x10\n"
- "st1 { v20.4s }, [x22], #0x10\n"
- "st1 { v21.4s }, [x22], #0x10\n"
- "tbz x8, #2, 128f\n"
- "st1 { v10.4s }, [x17], #0x10\n"
- "st1 { v14.4s }, [x24], #0x10\n"
- "st1 { v18.4s }, [x23], #0x10\n"
- "st1 { v22.4s }, [x22], #0x10\n"
- "tbz x8, #1, 127f\n"
- "str d11, [x17], #0x8\n"
- "str d15, [x24], #0x8\n"
- "str d19, [x23], #0x8\n"
- "str d23, [x22], #0x8\n"
- "tbz x8, #0, 134f\n"
- "st1 { v11.s }[2], [x17]\n"
- "st1 { v15.s }[2], [x24]\n"
- "st1 { v19.s }[2], [x23]\n"
- "st1 { v23.s }[2], [x22]\n"
+ "tbz x17, #3, 130f\n"
+ "st1 { v8.4s }, [x15], #0x10\n"
+ "st1 { v9.4s }, [x15], #0x10\n"
+ "st1 { v12.4s }, [x23], #0x10\n"
+ "st1 { v13.4s }, [x23], #0x10\n"
+ "st1 { v16.4s }, [x22], #0x10\n"
+ "st1 { v17.4s }, [x22], #0x10\n"
+ "st1 { v20.4s }, [x21], #0x10\n"
+ "st1 { v21.4s }, [x21], #0x10\n"
+ "tbz x17, #2, 128f\n"
+ "st1 { v10.4s }, [x15], #0x10\n"
+ "st1 { v14.4s }, [x23], #0x10\n"
+ "st1 { v18.4s }, [x22], #0x10\n"
+ "st1 { v22.4s }, [x21], #0x10\n"
+ "tbz x17, #1, 127f\n"
+ "str d11, [x15], #0x8\n"
+ "str d15, [x23], #0x8\n"
+ "str d19, [x22], #0x8\n"
+ "str d23, [x21], #0x8\n"
+ "tbz x17, #0, 134f\n"
+ "st1 { v11.s }[2], [x15]\n"
+ "st1 { v15.s }[2], [x23]\n"
+ "st1 { v19.s }[2], [x22]\n"
+ "st1 { v23.s }[2], [x21]\n"
"b 134f\n"
"127:" // Height 4: Partial direct writeback: partial_1_12
- "tbz x8, #0, 134f\n"
- "str s11, [x17, #0x0]\n"
- "str s15, [x24, #0x0]\n"
- "str s19, [x23, #0x0]\n"
- "str s23, [x22, #0x0]\n"
+ "tbz x17, #0, 134f\n"
+ "str s11, [x15, #0x0]\n"
+ "str s15, [x23, #0x0]\n"
+ "str s19, [x22, #0x0]\n"
+ "str s23, [x21, #0x0]\n"
"b 134f\n"
"128:" // Height 4: Partial direct writeback: partial_2_8
- "tbz x8, #1, 129f\n"
- "str d10, [x17], #0x8\n"
- "str d14, [x24], #0x8\n"
- "str d18, [x23], #0x8\n"
- "str d22, [x22], #0x8\n"
- "tbz x8, #0, 134f\n"
- "st1 { v10.s }[2], [x17]\n"
- "st1 { v14.s }[2], [x24]\n"
- "st1 { v18.s }[2], [x23]\n"
- "st1 { v22.s }[2], [x22]\n"
+ "tbz x17, #1, 129f\n"
+ "str d10, [x15], #0x8\n"
+ "str d14, [x23], #0x8\n"
+ "str d18, [x22], #0x8\n"
+ "str d22, [x21], #0x8\n"
+ "tbz x17, #0, 134f\n"
+ "st1 { v10.s }[2], [x15]\n"
+ "st1 { v14.s }[2], [x23]\n"
+ "st1 { v18.s }[2], [x22]\n"
+ "st1 { v22.s }[2], [x21]\n"
"b 134f\n"
"129:" // Height 4: Partial direct writeback: partial_1_8
- "tbz x8, #0, 134f\n"
- "str s10, [x17, #0x0]\n"
- "str s14, [x24, #0x0]\n"
- "str s18, [x23, #0x0]\n"
- "str s22, [x22, #0x0]\n"
+ "tbz x17, #0, 134f\n"
+ "str s10, [x15, #0x0]\n"
+ "str s14, [x23, #0x0]\n"
+ "str s18, [x22, #0x0]\n"
+ "str s22, [x21, #0x0]\n"
"b 134f\n"
"130:" // Height 4: Partial direct writeback: partial_4_0
- "tbz x8, #2, 132f\n"
- "st1 { v8.4s }, [x17], #0x10\n"
- "st1 { v12.4s }, [x24], #0x10\n"
- "st1 { v16.4s }, [x23], #0x10\n"
- "st1 { v20.4s }, [x22], #0x10\n"
- "tbz x8, #1, 131f\n"
- "str d9, [x17], #0x8\n"
- "str d13, [x24], #0x8\n"
- "str d17, [x23], #0x8\n"
- "str d21, [x22], #0x8\n"
- "tbz x8, #0, 134f\n"
- "st1 { v9.s }[2], [x17]\n"
- "st1 { v13.s }[2], [x24]\n"
- "st1 { v17.s }[2], [x23]\n"
- "st1 { v21.s }[2], [x22]\n"
+ "tbz x17, #2, 132f\n"
+ "st1 { v8.4s }, [x15], #0x10\n"
+ "st1 { v12.4s }, [x23], #0x10\n"
+ "st1 { v16.4s }, [x22], #0x10\n"
+ "st1 { v20.4s }, [x21], #0x10\n"
+ "tbz x17, #1, 131f\n"
+ "str d9, [x15], #0x8\n"
+ "str d13, [x23], #0x8\n"
+ "str d17, [x22], #0x8\n"
+ "str d21, [x21], #0x8\n"
+ "tbz x17, #0, 134f\n"
+ "st1 { v9.s }[2], [x15]\n"
+ "st1 { v13.s }[2], [x23]\n"
+ "st1 { v17.s }[2], [x22]\n"
+ "st1 { v21.s }[2], [x21]\n"
"b 134f\n"
"131:" // Height 4: Partial direct writeback: partial_1_4
- "tbz x8, #0, 134f\n"
- "str s9, [x17, #0x0]\n"
- "str s13, [x24, #0x0]\n"
- "str s17, [x23, #0x0]\n"
- "str s21, [x22, #0x0]\n"
+ "tbz x17, #0, 134f\n"
+ "str s9, [x15, #0x0]\n"
+ "str s13, [x23, #0x0]\n"
+ "str s17, [x22, #0x0]\n"
+ "str s21, [x21, #0x0]\n"
"b 134f\n"
"132:" // Height 4: Partial direct writeback: partial_2_0
- "tbz x8, #1, 133f\n"
- "str d8, [x17], #0x8\n"
- "str d12, [x24], #0x8\n"
- "str d16, [x23], #0x8\n"
- "str d20, [x22], #0x8\n"
- "tbz x8, #0, 134f\n"
- "st1 { v8.s }[2], [x17]\n"
- "st1 { v12.s }[2], [x24]\n"
- "st1 { v16.s }[2], [x23]\n"
- "st1 { v20.s }[2], [x22]\n"
+ "tbz x17, #1, 133f\n"
+ "str d8, [x15], #0x8\n"
+ "str d12, [x23], #0x8\n"
+ "str d16, [x22], #0x8\n"
+ "str d20, [x21], #0x8\n"
+ "tbz x17, #0, 134f\n"
+ "st1 { v8.s }[2], [x15]\n"
+ "st1 { v12.s }[2], [x23]\n"
+ "st1 { v16.s }[2], [x22]\n"
+ "st1 { v20.s }[2], [x21]\n"
"b 134f\n"
"133:" // Height 4: Partial direct writeback: partial_1_0
- "str s8, [x17, #0x0]\n"
- "str s12, [x24, #0x0]\n"
- "str s16, [x23, #0x0]\n"
- "str s20, [x22, #0x0]\n"
+ "str s8, [x15, #0x0]\n"
+ "str s12, [x23, #0x0]\n"
+ "str s16, [x22, #0x0]\n"
+ "str s20, [x21, #0x0]\n"
"134:" // Height 4: Partial direct writeback: Done
"b 136f\n"
"135:" // Height 4: Full writeback
- "str q8, [x17, #0x0]\n"
- "str q9, [x17, #0x10]\n"
- "str q10, [x17, #0x20]\n"
- "str q11, [x17, #0x30]\n"
- "add x17, x17, #0x40\n"
- "str q12, [x24, #0x0]\n"
- "str q13, [x24, #0x10]\n"
- "str q14, [x24, #0x20]\n"
- "str q15, [x24, #0x30]\n"
- "str q16, [x23, #0x0]\n"
- "str q17, [x23, #0x10]\n"
- "str q18, [x23, #0x20]\n"
- "str q19, [x23, #0x30]\n"
- "str q20, [x22, #0x0]\n"
- "str q21, [x22, #0x10]\n"
- "str q22, [x22, #0x20]\n"
- "str q23, [x22, #0x30]\n"
+ "str q8, [x15, #0x0]\n"
+ "str q9, [x15, #0x10]\n"
+ "str q10, [x15, #0x20]\n"
+ "str q11, [x15, #0x30]\n"
+ "add x15, x15, #0x40\n"
+ "str q12, [x23, #0x0]\n"
+ "str q13, [x23, #0x10]\n"
+ "str q14, [x23, #0x20]\n"
+ "str q15, [x23, #0x30]\n"
+ "str q16, [x22, #0x0]\n"
+ "str q17, [x22, #0x10]\n"
+ "str q18, [x22, #0x20]\n"
+ "str q19, [x22, #0x30]\n"
+ "str q20, [x21, #0x0]\n"
+ "str q21, [x21, #0x10]\n"
+ "str q22, [x21, #0x20]\n"
+ "str q23, [x21, #0x30]\n"
"136:" // Height 4: Writeback done
- "subs x8, x8, #0x10\n"
+ "subs x17, x17, #0x10\n"
"bgt 104b\n"
"b 206f\n"
"137:" // Height 5
- "ldr x8, [%x[args_ptr], %[offsetof_N]]\n"
- "mov x17, %x[output_ptr]\n"
+ "ldr x17, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x15, %x[output_ptr]\n"
"ldr x16, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"138:" // Height 5: Column loop
"tbz %x[flags], #0, 148f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x17, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
- "cmp x8, #0x10\n"
- "add x21, x22, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "cmp x17, #0x10\n"
+ "add x23, x15, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
+ "add x20, x21, x19, LSL #2\n"
"bge 147f\n"
- "tbz x8, #3, 142f\n"
- "ld1 { v8.4s }, [x17], #0x10\n"
- "ld1 { v12.4s }, [x24], #0x10\n"
- "ld1 { v16.4s }, [x23], #0x10\n"
- "ld1 { v20.4s }, [x22], #0x10\n"
- "ld1 { v24.4s }, [x21], #0x10\n"
- "ld1 { v9.4s }, [x17], #0x10\n"
- "ld1 { v13.4s }, [x24], #0x10\n"
- "ld1 { v17.4s }, [x23], #0x10\n"
- "ld1 { v21.4s }, [x22], #0x10\n"
- "ld1 { v25.4s }, [x21], #0x10\n"
- "tbz x8, #2, 140f\n"
- "ld1 { v10.4s }, [x17], #0x10\n"
- "ld1 { v14.4s }, [x24], #0x10\n"
- "ld1 { v18.4s }, [x23], #0x10\n"
- "ld1 { v22.4s }, [x22], #0x10\n"
- "ld1 { v26.4s }, [x21], #0x10\n"
- "tbz x8, #1, 139f\n"
- "ldr d11, [x17], #0x8\n"
- "mov x25, #0x38\n"
- "ldr d15, [x24], #0x8\n"
- "ldr d19, [x23], #0x8\n"
- "ldr d23, [x22], #0x8\n"
- "ldr d27, [x21], #0x8\n"
- "tbz x8, #0, 146f\n"
- "ld1 { v11.s }[2], [x17]\n"
- "ld1 { v15.s }[2], [x24]\n"
- "ld1 { v19.s }[2], [x23]\n"
- "ld1 { v23.s }[2], [x22]\n"
- "ld1 { v27.s }[2], [x21]\n"
+ "tbz x17, #3, 142f\n"
+ "ld1 { v8.4s }, [x15], #0x10\n"
+ "ld1 { v12.4s }, [x23], #0x10\n"
+ "ld1 { v16.4s }, [x22], #0x10\n"
+ "ld1 { v9.4s }, [x15], #0x10\n"
+ "ld1 { v13.4s }, [x23], #0x10\n"
+ "ld1 { v17.4s }, [x22], #0x10\n"
+ "ld1 { v20.4s }, [x21], #0x10\n"
+ "ld1 { v24.4s }, [x20], #0x10\n"
+ "ld1 { v21.4s }, [x21], #0x10\n"
+ "ld1 { v25.4s }, [x20], #0x10\n"
+ "tbz x17, #2, 140f\n"
+ "ld1 { v10.4s }, [x15], #0x10\n"
+ "ld1 { v14.4s }, [x23], #0x10\n"
+ "ld1 { v18.4s }, [x22], #0x10\n"
+ "ld1 { v22.4s }, [x21], #0x10\n"
+ "ld1 { v26.4s }, [x20], #0x10\n"
+ "tbz x17, #1, 139f\n"
+ "ldr d11, [x15], #0x8\n"
+ "mov x24, #0x38\n"
+ "ldr d15, [x23], #0x8\n"
+ "ldr d19, [x22], #0x8\n"
+ "ldr d23, [x21], #0x8\n"
+ "ldr d27, [x20], #0x8\n"
+ "tbz x17, #0, 146f\n"
+ "ld1 { v11.s }[2], [x15]\n"
+ "ld1 { v15.s }[2], [x23]\n"
+ "ld1 { v19.s }[2], [x22]\n"
+ "ld1 { v23.s }[2], [x21]\n"
+ "ld1 { v27.s }[2], [x20]\n"
"b 146f\n"
"139:" // Height 5: Partial accumulate: partial_1_12
- "mov x25, #0x30\n"
- "tbz x8, #0, 146f\n"
- "ldr s11, [x17, #0x0]\n"
- "ldr s15, [x24, #0x0]\n"
- "ldr s19, [x23, #0x0]\n"
- "ldr s23, [x22, #0x0]\n"
- "ldr s27, [x21, #0x0]\n"
+ "mov x24, #0x30\n"
+ "tbz x17, #0, 146f\n"
+ "ldr s11, [x15, #0x0]\n"
+ "ldr s15, [x23, #0x0]\n"
+ "ldr s19, [x22, #0x0]\n"
+ "ldr s23, [x21, #0x0]\n"
+ "ldr s27, [x20, #0x0]\n"
"b 146f\n"
"140:" // Height 5: Partial accumulate: partial_2_8
- "tbz x8, #1, 141f\n"
- "ldr d10, [x17], #0x8\n"
- "mov x25, #0x28\n"
- "ldr d14, [x24], #0x8\n"
- "ldr d18, [x23], #0x8\n"
- "ldr d22, [x22], #0x8\n"
- "ldr d26, [x21], #0x8\n"
- "tbz x8, #0, 146f\n"
- "ld1 { v10.s }[2], [x17]\n"
- "ld1 { v14.s }[2], [x24]\n"
- "ld1 { v18.s }[2], [x23]\n"
- "ld1 { v22.s }[2], [x22]\n"
- "ld1 { v26.s }[2], [x21]\n"
+ "tbz x17, #1, 141f\n"
+ "ldr d10, [x15], #0x8\n"
+ "ldr d14, [x23], #0x8\n"
+ "mov x24, #0x28\n"
+ "ldr d18, [x22], #0x8\n"
+ "ldr d22, [x21], #0x8\n"
+ "ldr d26, [x20], #0x8\n"
+ "tbz x17, #0, 146f\n"
+ "ld1 { v10.s }[2], [x15]\n"
+ "ld1 { v14.s }[2], [x23]\n"
+ "ld1 { v18.s }[2], [x22]\n"
+ "ld1 { v22.s }[2], [x21]\n"
+ "ld1 { v26.s }[2], [x20]\n"
"b 146f\n"
"141:" // Height 5: Partial accumulate: partial_1_8
- "mov x25, #0x20\n"
- "tbz x8, #0, 146f\n"
- "ldr s10, [x17, #0x0]\n"
- "ldr s14, [x24, #0x0]\n"
- "ldr s18, [x23, #0x0]\n"
- "ldr s22, [x22, #0x0]\n"
- "ldr s26, [x21, #0x0]\n"
+ "mov x24, #0x20\n"
+ "tbz x17, #0, 146f\n"
+ "ldr s10, [x15, #0x0]\n"
+ "ldr s14, [x23, #0x0]\n"
+ "ldr s18, [x22, #0x0]\n"
+ "ldr s22, [x21, #0x0]\n"
+ "ldr s26, [x20, #0x0]\n"
"b 146f\n"
"142:" // Height 5: Partial accumulate: partial_4_0
- "tbz x8, #2, 144f\n"
- "ld1 { v8.4s }, [x17], #0x10\n"
- "ld1 { v12.4s }, [x24], #0x10\n"
- "ld1 { v16.4s }, [x23], #0x10\n"
- "ld1 { v20.4s }, [x22], #0x10\n"
- "ld1 { v24.4s }, [x21], #0x10\n"
- "tbz x8, #1, 143f\n"
- "ldr d9, [x17], #0x8\n"
- "mov x25, #0x18\n"
- "ldr d13, [x24], #0x8\n"
- "ldr d17, [x23], #0x8\n"
- "ldr d21, [x22], #0x8\n"
- "ldr d25, [x21], #0x8\n"
- "tbz x8, #0, 146f\n"
- "ld1 { v9.s }[2], [x17]\n"
- "ld1 { v13.s }[2], [x24]\n"
- "ld1 { v17.s }[2], [x23]\n"
- "ld1 { v21.s }[2], [x22]\n"
- "ld1 { v25.s }[2], [x21]\n"
+ "tbz x17, #2, 144f\n"
+ "ld1 { v8.4s }, [x15], #0x10\n"
+ "ld1 { v12.4s }, [x23], #0x10\n"
+ "ld1 { v16.4s }, [x22], #0x10\n"
+ "ld1 { v20.4s }, [x21], #0x10\n"
+ "ld1 { v24.4s }, [x20], #0x10\n"
+ "tbz x17, #1, 143f\n"
+ "ldr d9, [x15], #0x8\n"
+ "mov x24, #0x18\n"
+ "ldr d13, [x23], #0x8\n"
+ "ldr d17, [x22], #0x8\n"
+ "ldr d21, [x21], #0x8\n"
+ "ldr d25, [x20], #0x8\n"
+ "tbz x17, #0, 146f\n"
+ "ld1 { v9.s }[2], [x15]\n"
+ "ld1 { v13.s }[2], [x23]\n"
+ "ld1 { v17.s }[2], [x22]\n"
+ "ld1 { v21.s }[2], [x21]\n"
+ "ld1 { v25.s }[2], [x20]\n"
"b 146f\n"
"143:" // Height 5: Partial accumulate: partial_1_4
- "mov x25, #0x10\n"
- "tbz x8, #0, 146f\n"
- "ldr s9, [x17, #0x0]\n"
- "ldr s13, [x24, #0x0]\n"
- "ldr s17, [x23, #0x0]\n"
- "ldr s21, [x22, #0x0]\n"
- "ldr s25, [x21, #0x0]\n"
+ "mov x24, #0x10\n"
+ "tbz x17, #0, 146f\n"
+ "ldr s9, [x15, #0x0]\n"
+ "ldr s13, [x23, #0x0]\n"
+ "ldr s17, [x22, #0x0]\n"
+ "ldr s21, [x21, #0x0]\n"
+ "ldr s25, [x20, #0x0]\n"
"b 146f\n"
"144:" // Height 5: Partial accumulate: partial_2_0
- "tbz x8, #1, 145f\n"
- "ldr d8, [x17], #0x8\n"
- "mov x25, #0x8\n"
- "ldr d12, [x24], #0x8\n"
- "ldr d16, [x23], #0x8\n"
- "ldr d20, [x22], #0x8\n"
- "ldr d24, [x21], #0x8\n"
- "tbz x8, #0, 146f\n"
- "ld1 { v8.s }[2], [x17]\n"
- "ld1 { v12.s }[2], [x24]\n"
- "ld1 { v16.s }[2], [x23]\n"
- "ld1 { v20.s }[2], [x22]\n"
- "ld1 { v24.s }[2], [x21]\n"
+ "tbz x17, #1, 145f\n"
+ "ldr d8, [x15], #0x8\n"
+ "ldr d12, [x23], #0x8\n"
+ "mov x24, #0x8\n"
+ "ldr d16, [x22], #0x8\n"
+ "ldr d20, [x21], #0x8\n"
+ "ldr d24, [x20], #0x8\n"
+ "tbz x17, #0, 146f\n"
+ "ld1 { v8.s }[2], [x15]\n"
+ "ld1 { v12.s }[2], [x23]\n"
+ "ld1 { v16.s }[2], [x22]\n"
+ "ld1 { v20.s }[2], [x21]\n"
+ "ld1 { v24.s }[2], [x20]\n"
"b 146f\n"
"145:" // Height 5: Partial accumulate: partial_1_0
- "ldr s8, [x17, #0x0]\n"
- "mov x25, #0x0\n"
- "ldr s12, [x24, #0x0]\n"
- "ldr s16, [x23, #0x0]\n"
- "ldr s20, [x22, #0x0]\n"
- "ldr s24, [x21, #0x0]\n"
+ "ldr s8, [x15, #0x0]\n"
+ "mov x24, #0x0\n"
+ "ldr s12, [x23, #0x0]\n"
+ "ldr s16, [x22, #0x0]\n"
+ "ldr s20, [x21, #0x0]\n"
+ "ldr s24, [x20, #0x0]\n"
"146:" // Height 5: Partial accumulate: Done
- "sub x17, x17, x25\n"
+ "sub x15, x15, x24\n"
"b 149f\n"
"147:" // Height 5: full accumulate
- "ldr q8, [x17, #0x0]\n"
- "ldr q9, [x17, #0x10]\n"
- "ldr q10, [x17, #0x20]\n"
- "ldr q11, [x17, #0x30]\n"
- "ldr q12, [x24, #0x0]\n"
- "ldr q13, [x24, #0x10]\n"
- "ldr q14, [x24, #0x20]\n"
- "ldr q15, [x24, #0x30]\n"
- "ldr q16, [x23, #0x0]\n"
- "ldr q17, [x23, #0x10]\n"
- "ldr q18, [x23, #0x20]\n"
- "ldr q19, [x23, #0x30]\n"
- "ldr q20, [x22, #0x0]\n"
- "ldr q21, [x22, #0x10]\n"
- "ldr q22, [x22, #0x20]\n"
- "ldr q23, [x22, #0x30]\n"
- "ldr q24, [x21, #0x0]\n"
- "ldr q25, [x21, #0x10]\n"
- "ldr q26, [x21, #0x20]\n"
- "ldr q27, [x21, #0x30]\n"
+ "ldr q8, [x15, #0x0]\n"
+ "ldr q9, [x15, #0x10]\n"
+ "ldr q10, [x15, #0x20]\n"
+ "ldr q11, [x15, #0x30]\n"
+ "ldr q12, [x23, #0x0]\n"
+ "ldr q13, [x23, #0x10]\n"
+ "ldr q14, [x23, #0x20]\n"
+ "ldr q15, [x23, #0x30]\n"
+ "ldr q16, [x22, #0x0]\n"
+ "ldr q17, [x22, #0x10]\n"
+ "ldr q18, [x22, #0x20]\n"
+ "ldr q19, [x22, #0x30]\n"
+ "ldr q20, [x21, #0x0]\n"
+ "ldr q21, [x21, #0x10]\n"
+ "ldr q22, [x21, #0x20]\n"
+ "ldr q23, [x21, #0x30]\n"
+ "ldr q24, [x20, #0x0]\n"
+ "ldr q25, [x20, #0x10]\n"
+ "ldr q26, [x20, #0x20]\n"
+ "ldr q27, [x20, #0x30]\n"
"b 149f\n"
"148:" // Height 5: no accumulate
"movi v8.4s, #0x0\n"
@@ -2112,231 +2112,231 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
"movi v26.4s, #0x0\n"
"movi v27.4s, #0x0\n"
"149:" // Height 5: setup done
- "mov x15, #0x0\n"
+ "mov x14, #0x0\n"
"150:" // Height 5: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w14, [x20, x15, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w13, [x20, x14, LSL #0x2]\n"
"tbz %x[flags], #3, 151f\n"
- "ldr x21, [%x[input_ptr], x15, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x13, [x21, #0x0]\n"
- "ldr x9, [x21, #0x8]\n"
- "ldr x27, [x21, #0x10]\n"
- "ldr x25, [x21, #0x18]\n"
- "ldr x23, [x21, #0x20]\n"
- "cbnz x15, 152f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x13, x13, x20\n"
- "add x9, x9, x20\n"
- "add x27, x27, x20\n"
- "add x25, x25, x20\n"
- "add x23, x23, x20\n"
+ "ldr x20, [%x[input_ptr], x14, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x12, [x20, #0x0]\n"
+ "ldr x28, [x20, #0x8]\n"
+ "ldr x26, [x20, #0x10]\n"
+ "ldr x24, [x20, #0x18]\n"
+ "ldr x22, [x20, #0x20]\n"
+ "cbnz x14, 152f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x12, x12, x19\n"
+ "add x28, x28, x19\n"
+ "add x26, x26, x19\n"
+ "add x24, x24, x19\n"
+ "add x22, x22, x19\n"
"b 152f\n"
"151:" // Height 5: setup direct input
- "mov x13, %x[input_ptr]\n"
- "add x9, x13, x20\n"
- "add x27, x9, x20\n"
- "add x25, x27, x20\n"
- "add x23, x25, x20\n"
+ "mov x12, %x[input_ptr]\n"
+ "add x28, x12, x19\n"
+ "add x26, x28, x19\n"
+ "add x24, x26, x19\n"
+ "add x22, x24, x19\n"
"152:" // Height 5: input setup done
- "cmp x14, #0x10\n"
+ "cmp x13, #0x10\n"
"blt 155f\n"
- "ldr q0, [x13, #0x0]\n"
- "cmp x14, #0x20\n"
- "ldr q1, [x9, #0x0]\n"
- "ldr q2, [x27, #0x0]\n"
- "ldr q3, [x25, #0x0]\n"
- "ldr q4, [x23, #0x0]\n"
+ "ldr q0, [x12, #0x0]\n"
+ "ldr q1, [x28, #0x0]\n"
+ "cmp x13, #0x20\n"
+ "ldr q2, [x26, #0x0]\n"
+ "ldr q3, [x24, #0x0]\n"
+ "ldr q4, [x22, #0x0]\n"
"ldr q6, [x16, #0x0]\n"
- "ldr q7, [x16, #0x10]\n"
"blt 154f\n"
"153:" // Height 5: Multiply loop: Main loop head
".inst 0x6f80e0c8 // udot v8.4s, v6.16b, v0.4b[0]\n"
- "ldr x12, [x16, #0x28]\n"
+ "ldr d7, [x16, #0x10]\n"
".inst 0x6f81e0cc // udot v12.4s, v6.16b, v1.4b[0]\n"
- "ldr x11, [x16, #0x38]\n"
+ "ldr x11, [x16, #0x18]\n"
".inst 0x6f82e0d0 // udot v16.4s, v6.16b, v2.4b[0]\n"
- "add x13, x13, #0x10\n"
+ "ldr x10, [x16, #0x28]\n"
".inst 0x6f83e0d4 // udot v20.4s, v6.16b, v3.4b[0]\n"
- "add x9, x9, #0x10\n"
+ "add x12, x12, #0x10\n"
".inst 0x6f84e0d8 // udot v24.4s, v6.16b, v4.4b[0]\n"
- "ldr d6, [x16, #0x20]\n"
+ "mov v7.d[1], x11\n"
+ "prfm pldl1keep, [x12, #0x80]\n"
+ "add x28, x28, #0x10\n"
".inst 0x6f80e0e9 // udot v9.4s, v7.16b, v0.4b[0]\n"
- "mov v6.d[1], x12\n"
+ "prfm pldl1keep, [x28, #0x80]\n"
".inst 0x6f81e0ed // udot v13.4s, v7.16b, v1.4b[0]\n"
- "ldr x12, [x16, #0x48]\n"
+ "ldr d6, [x16, #0x20]\n"
".inst 0x6f82e0f1 // udot v17.4s, v7.16b, v2.4b[0]\n"
- "add x27, x27, #0x10\n"
+ "ldr x11, [x16, #0x38]\n"
".inst 0x6f83e0f5 // udot v21.4s, v7.16b, v3.4b[0]\n"
- "add x25, x25, #0x10\n"
+ "ldr x9, [x12, #0x8]\n"
".inst 0x6f84e0f9 // udot v25.4s, v7.16b, v4.4b[0]\n"
+ "mov v6.d[1], x10\n"
"ldr d7, [x16, #0x30]\n"
- "mov v7.d[1], x11\n"
+ "add x26, x26, #0x10\n"
".inst 0x6f80e0ca // udot v10.4s, v6.16b, v0.4b[0]\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x6f81e0ce // udot v14.4s, v6.16b, v1.4b[0]\n"
- "ldr x11, [x16, #0x58]\n"
+ "ldr x10, [x16, #0x48]\n"
".inst 0x6f82e0d2 // udot v18.4s, v6.16b, v2.4b[0]\n"
- "add x23, x23, #0x10\n"
+ "mov v7.d[1], x11\n"
".inst 0x6f83e0d6 // udot v22.4s, v6.16b, v3.4b[0]\n"
- "ldr x10, [x13, #0x8]\n"
+ "ldr x11, [x16, #0x58]\n"
".inst 0x6f84e0da // udot v26.4s, v6.16b, v4.4b[0]\n"
"ldr d6, [x16, #0x40]\n"
".inst 0x6f80e0eb // udot v11.4s, v7.16b, v0.4b[0]\n"
- "mov v6.d[1], x12\n"
+ "ldr x27, [x28, #0x8]\n"
".inst 0x6f81e0ef // udot v15.4s, v7.16b, v1.4b[0]\n"
- "ldr x12, [x16, #0x68]\n"
+ "ldr x25, [x26, #0x8]\n"
".inst 0x6f82e0f3 // udot v19.4s, v7.16b, v2.4b[0]\n"
- "ldr x28, [x9, #0x8]\n"
+ "mov v6.d[1], x10\n"
".inst 0x6f83e0f7 // udot v23.4s, v7.16b, v3.4b[0]\n"
- "ldr x26, [x27, #0x8]\n"
+ "ldr x10, [x16, #0x68]\n"
".inst 0x6f84e0fb // udot v27.4s, v7.16b, v4.4b[0]\n"
"ldr d7, [x16, #0x50]\n"
- "mov v7.d[1], x11\n"
".inst 0x6fa0e0c8 // udot v8.4s, v6.16b, v0.4b[1]\n"
+ "add x24, x24, #0x10\n"
".inst 0x6fa1e0cc // udot v12.4s, v6.16b, v1.4b[1]\n"
- "ldr x11, [x16, #0x78]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x6fa2e0d0 // udot v16.4s, v6.16b, v2.4b[1]\n"
- "ldr x24, [x25, #0x8]\n"
+ "mov v7.d[1], x11\n"
".inst 0x6fa3e0d4 // udot v20.4s, v6.16b, v3.4b[1]\n"
- "ldr x22, [x23, #0x8]\n"
+ "ldr x11, [x16, #0x78]\n"
".inst 0x6fa4e0d8 // udot v24.4s, v6.16b, v4.4b[1]\n"
"ldr d6, [x16, #0x60]\n"
".inst 0x6fa0e0e9 // udot v9.4s, v7.16b, v0.4b[1]\n"
- "mov v6.d[1], x12\n"
+ "ldr x23, [x24, #0x8]\n"
".inst 0x6fa1e0ed // udot v13.4s, v7.16b, v1.4b[1]\n"
- "ldr x12, [x16, #0x88]\n"
+ "add x22, x22, #0x10\n"
".inst 0x6fa2e0f1 // udot v17.4s, v7.16b, v2.4b[1]\n"
- "sub x14, x14, #0x10\n"
+ "mov v6.d[1], x10\n"
".inst 0x6fa3e0f5 // udot v21.4s, v7.16b, v3.4b[1]\n"
- "cmp x14, #0x20\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x6fa4e0f9 // udot v25.4s, v7.16b, v4.4b[1]\n"
"ldr d7, [x16, #0x70]\n"
- "mov v7.d[1], x11\n"
".inst 0x6fa0e0ca // udot v10.4s, v6.16b, v0.4b[1]\n"
+ "ldr x10, [x16, #0x88]\n"
".inst 0x6fa1e0ce // udot v14.4s, v6.16b, v1.4b[1]\n"
- "ldr x11, [x16, #0x98]\n"
+ "ldr x21, [x22, #0x8]\n"
".inst 0x6fa2e0d2 // udot v18.4s, v6.16b, v2.4b[1]\n"
- "prfm pldl1keep, [x13, #0x80]\n"
+ "mov v7.d[1], x11\n"
".inst 0x6fa3e0d6 // udot v22.4s, v6.16b, v3.4b[1]\n"
- "prfm pldl1keep, [x9, #0x80]\n"
+ "ldr x11, [x16, #0x98]\n"
".inst 0x6fa4e0da // udot v26.4s, v6.16b, v4.4b[1]\n"
"ldr d6, [x16, #0x80]\n"
".inst 0x6fa0e0eb // udot v11.4s, v7.16b, v0.4b[1]\n"
- "mov v6.d[1], x12\n"
+ "sub x13, x13, #0x10\n"
".inst 0x6fa1e0ef // udot v15.4s, v7.16b, v1.4b[1]\n"
- "ldr x12, [x16, #0xa8]\n"
+ "cmp x13, #0x20\n"
".inst 0x6fa2e0f3 // udot v19.4s, v7.16b, v2.4b[1]\n"
- "prfm pldl1keep, [x27, #0x80]\n"
+ "mov v6.d[1], x10\n"
".inst 0x6fa3e0f7 // udot v23.4s, v7.16b, v3.4b[1]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
+ "ldr x10, [x16, #0xa8]\n"
".inst 0x6fa4e0fb // udot v27.4s, v7.16b, v4.4b[1]\n"
"ldr d7, [x16, #0x90]\n"
- "mov v7.d[1], x11\n"
".inst 0x6f80e8c8 // udot v8.4s, v6.16b, v0.4b[2]\n"
".inst 0x6f81e8cc // udot v12.4s, v6.16b, v1.4b[2]\n"
- "ldr x11, [x16, #0xb8]\n"
".inst 0x6f82e8d0 // udot v16.4s, v6.16b, v2.4b[2]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
+ "mov v7.d[1], x11\n"
".inst 0x6f83e8d4 // udot v20.4s, v6.16b, v3.4b[2]\n"
+ "ldr x11, [x16, #0xb8]\n"
".inst 0x6f84e8d8 // udot v24.4s, v6.16b, v4.4b[2]\n"
"ldr d6, [x16, #0xa0]\n"
".inst 0x6f80e8e9 // udot v9.4s, v7.16b, v0.4b[2]\n"
- "mov v6.d[1], x12\n"
".inst 0x6f81e8ed // udot v13.4s, v7.16b, v1.4b[2]\n"
- "ldr x12, [x16, #0xc8]\n"
".inst 0x6f82e8f1 // udot v17.4s, v7.16b, v2.4b[2]\n"
+ "mov v6.d[1], x10\n"
".inst 0x6f83e8f5 // udot v21.4s, v7.16b, v3.4b[2]\n"
+ "ldr x10, [x16, #0xc8]\n"
".inst 0x6f84e8f9 // udot v25.4s, v7.16b, v4.4b[2]\n"
"ldr d7, [x16, #0xb0]\n"
- "mov v7.d[1], x11\n"
".inst 0x6f80e8ca // udot v10.4s, v6.16b, v0.4b[2]\n"
".inst 0x6f81e8ce // udot v14.4s, v6.16b, v1.4b[2]\n"
- "ldr x11, [x16, #0xd8]\n"
".inst 0x6f82e8d2 // udot v18.4s, v6.16b, v2.4b[2]\n"
+ "mov v7.d[1], x11\n"
".inst 0x6f83e8d6 // udot v22.4s, v6.16b, v3.4b[2]\n"
+ "ldr x11, [x16, #0xd8]\n"
".inst 0x6f84e8da // udot v26.4s, v6.16b, v4.4b[2]\n"
"ldr d6, [x16, #0xc0]\n"
".inst 0x6f80e8eb // udot v11.4s, v7.16b, v0.4b[2]\n"
- "mov v6.d[1], x12\n"
".inst 0x6f81e8ef // udot v15.4s, v7.16b, v1.4b[2]\n"
- "ldr x12, [x16, #0xe8]\n"
".inst 0x6f82e8f3 // udot v19.4s, v7.16b, v2.4b[2]\n"
+ "mov v6.d[1], x10\n"
".inst 0x6f83e8f7 // udot v23.4s, v7.16b, v3.4b[2]\n"
+ "ldr x10, [x16, #0xe8]\n"
".inst 0x6f84e8fb // udot v27.4s, v7.16b, v4.4b[2]\n"
"ldr d7, [x16, #0xd0]\n"
- "mov v7.d[1], x11\n"
".inst 0x6fa0e8c8 // udot v8.4s, v6.16b, v0.4b[3]\n"
".inst 0x6fa1e8cc // udot v12.4s, v6.16b, v1.4b[3]\n"
- "ldr x11, [x16, #0xf8]\n"
".inst 0x6fa2e8d0 // udot v16.4s, v6.16b, v2.4b[3]\n"
+ "mov v7.d[1], x11\n"
".inst 0x6fa3e8d4 // udot v20.4s, v6.16b, v3.4b[3]\n"
+ "ldr x11, [x16, #0xf8]\n"
".inst 0x6fa4e8d8 // udot v24.4s, v6.16b, v4.4b[3]\n"
"ldr d6, [x16, #0xe0]\n"
".inst 0x6fa0e8e9 // udot v9.4s, v7.16b, v0.4b[3]\n"
- "mov v6.d[1], x12\n"
".inst 0x6fa1e8ed // udot v13.4s, v7.16b, v1.4b[3]\n"
".inst 0x6fa2e8f1 // udot v17.4s, v7.16b, v2.4b[3]\n"
+ "mov v6.d[1], x10\n"
".inst 0x6fa3e8f5 // udot v21.4s, v7.16b, v3.4b[3]\n"
".inst 0x6fa4e8f9 // udot v25.4s, v7.16b, v4.4b[3]\n"
"ldr d7, [x16, #0xf0]\n"
- "mov v7.d[1], x11\n"
- "add x16, x16, #0x100\n"
".inst 0x6fa0e8ca // udot v10.4s, v6.16b, v0.4b[3]\n"
- "ldr x12, [x16, #0x8]\n"
+ "add x16, x16, #0x100\n"
".inst 0x6fa1e8ce // udot v14.4s, v6.16b, v1.4b[3]\n"
- "ldr x11, [x16, #0x18]\n"
+ "ldr x10, [x16, #0x8]\n"
".inst 0x6fa2e8d2 // udot v18.4s, v6.16b, v2.4b[3]\n"
+ "mov v7.d[1], x11\n"
".inst 0x6fa3e8d6 // udot v22.4s, v6.16b, v3.4b[3]\n"
".inst 0x6fa4e8da // udot v26.4s, v6.16b, v4.4b[3]\n"
"ldr d6, [x16, #0x0]\n"
".inst 0x6fa0e8eb // udot v11.4s, v7.16b, v0.4b[3]\n"
- "ldr d0, [x13, #0x0]\n"
+ "ldr d0, [x12, #0x0]\n"
".inst 0x6fa1e8ef // udot v15.4s, v7.16b, v1.4b[3]\n"
- "ldr d1, [x9, #0x0]\n"
+ "ldr d1, [x28, #0x0]\n"
".inst 0x6fa2e8f3 // udot v19.4s, v7.16b, v2.4b[3]\n"
- "ldr d2, [x27, #0x0]\n"
+ "mov v6.d[1], x10\n"
".inst 0x6fa3e8f7 // udot v23.4s, v7.16b, v3.4b[3]\n"
- "ldr d3, [x25, #0x0]\n"
+ "mov v0.d[1], x9\n"
".inst 0x6fa4e8fb // udot v27.4s, v7.16b, v4.4b[3]\n"
- "ldr d4, [x23, #0x0]\n"
- "ldr d7, [x16, #0x10]\n"
- "mov v6.d[1], x12\n"
- "mov v0.d[1], x10\n"
- "mov v1.d[1], x28\n"
- "mov v2.d[1], x26\n"
- "mov v3.d[1], x24\n"
- "mov v4.d[1], x22\n"
- "mov v7.d[1], x11\n"
+ "mov v1.d[1], x27\n"
+ "ldr d2, [x26, #0x0]\n"
+ "ldr d3, [x24, #0x0]\n"
+ "ldr d4, [x22, #0x0]\n"
+ "mov v2.d[1], x25\n"
+ "mov v3.d[1], x23\n"
+ "mov v4.d[1], x21\n"
"bge 153b\n"
"154:" // Height 5: Multiply loop: Single iteration only
".inst 0x6f80e0c8 // udot v8.4s, v6.16b, v0.4b[0]\n"
- "add x13, x13, #0x10\n"
+ "ldr q7, [x16, #0x10]\n"
".inst 0x6f81e0cc // udot v12.4s, v6.16b, v1.4b[0]\n"
- "add x9, x9, #0x10\n"
+ "sub x13, x13, #0x10\n"
".inst 0x6f82e0d0 // udot v16.4s, v6.16b, v2.4b[0]\n"
- "add x27, x27, #0x10\n"
+ "add x12, x12, #0x10\n"
".inst 0x6f83e0d4 // udot v20.4s, v6.16b, v3.4b[0]\n"
- "add x25, x25, #0x10\n"
+ "prfm pldl1keep, [x12, #0x80]\n"
".inst 0x6f84e0d8 // udot v24.4s, v6.16b, v4.4b[0]\n"
"ldr q6, [x16, #0x20]\n"
".inst 0x6f80e0e9 // udot v9.4s, v7.16b, v0.4b[0]\n"
- "add x23, x23, #0x10\n"
+ "add x28, x28, #0x10\n"
".inst 0x6f81e0ed // udot v13.4s, v7.16b, v1.4b[0]\n"
- "sub x14, x14, #0x10\n"
+ "prfm pldl1keep, [x28, #0x80]\n"
".inst 0x6f82e0f1 // udot v17.4s, v7.16b, v2.4b[0]\n"
- "prfm pldl1keep, [x13, #0x80]\n"
+ "add x26, x26, #0x10\n"
".inst 0x6f83e0f5 // udot v21.4s, v7.16b, v3.4b[0]\n"
- "prfm pldl1keep, [x9, #0x80]\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x6f84e0f9 // udot v25.4s, v7.16b, v4.4b[0]\n"
"ldr q7, [x16, #0x30]\n"
".inst 0x6f80e0ca // udot v10.4s, v6.16b, v0.4b[0]\n"
- "prfm pldl1keep, [x27, #0x80]\n"
+ "add x24, x24, #0x10\n"
".inst 0x6f81e0ce // udot v14.4s, v6.16b, v1.4b[0]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x6f82e0d2 // udot v18.4s, v6.16b, v2.4b[0]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
+ "add x22, x22, #0x10\n"
".inst 0x6f83e0d6 // udot v22.4s, v6.16b, v3.4b[0]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x6f84e0da // udot v26.4s, v6.16b, v4.4b[0]\n"
"ldr q6, [x16, #0x40]\n"
".inst 0x6f80e0eb // udot v11.4s, v7.16b, v0.4b[0]\n"
@@ -2417,20 +2417,20 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
".inst 0x6fa3e8f7 // udot v23.4s, v7.16b, v3.4b[3]\n"
".inst 0x6fa4e8fb // udot v27.4s, v7.16b, v4.4b[3]\n"
"155:" // Height 5: Multiply loop: Main loop skip
- "cbz x14, 160f\n"
- "cmp x14, #0x4\n"
+ "cbz x13, 160f\n"
+ "cmp x13, #0x4\n"
"blt 157f\n"
"156:" // Height 5: Multiply loop: Odd block loop
- "ldr s0, [x13], #0x4\n"
- "sub x14, x14, #0x4\n"
- "ldr s1, [x9], #0x4\n"
- "cmp x14, #0x4\n"
- "ldr s2, [x27], #0x4\n"
- "ldr s3, [x25], #0x4\n"
- "ldr s4, [x23], #0x4\n"
+ "ldr s0, [x12], #0x4\n"
+ "sub x13, x13, #0x4\n"
+ "ldr s1, [x28], #0x4\n"
+ "cmp x13, #0x4\n"
+ "ldr s2, [x26], #0x4\n"
+ "ldr s3, [x24], #0x4\n"
+ "ldr s4, [x22], #0x4\n"
"ldr q6, [x16, #0x0]\n"
- ".inst 0x6f80e0c8 // udot v8.4s, v6.16b, v0.4b[0]\n"
"ldr q7, [x16, #0x10]\n"
+ ".inst 0x6f80e0c8 // udot v8.4s, v6.16b, v0.4b[0]\n"
".inst 0x6f81e0cc // udot v12.4s, v6.16b, v1.4b[0]\n"
".inst 0x6f82e0d0 // udot v16.4s, v6.16b, v2.4b[0]\n"
".inst 0x6f83e0d4 // udot v20.4s, v6.16b, v3.4b[0]\n"
@@ -2454,31 +2454,31 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
".inst 0x6f83e0f7 // udot v23.4s, v7.16b, v3.4b[0]\n"
".inst 0x6f84e0fb // udot v27.4s, v7.16b, v4.4b[0]\n"
"bge 156b\n"
+ "cbz x13, 160f\n"
"157:" // Height 5: Multiply loop: Skip odd blocks
- "cbz x14, 160f\n"
- "tbz x14, #1, 158f\n"
- "ldr h0, [x13], #0x2\n"
- "ldr h1, [x9], #0x2\n"
- "ldr h2, [x27], #0x2\n"
- "ldr h3, [x25], #0x2\n"
- "ldr h4, [x23], #0x2\n"
- "tbz x14, #0, 159f\n"
- "ld1 { v0.b }[2], [x13]\n"
- "ld1 { v1.b }[2], [x9]\n"
- "ld1 { v2.b }[2], [x27]\n"
- "ld1 { v3.b }[2], [x25]\n"
- "ld1 { v4.b }[2], [x23]\n"
+ "tbz x13, #1, 158f\n"
+ "ldr h0, [x12], #0x2\n"
+ "ldr h1, [x28], #0x2\n"
+ "ldr h2, [x26], #0x2\n"
+ "ldr h3, [x24], #0x2\n"
+ "ldr h4, [x22], #0x2\n"
+ "tbz x13, #0, 159f\n"
+ "ld1 { v0.b }[2], [x12]\n"
+ "ld1 { v1.b }[2], [x28]\n"
+ "ld1 { v2.b }[2], [x26]\n"
+ "ld1 { v3.b }[2], [x24]\n"
+ "ld1 { v4.b }[2], [x22]\n"
"b 159f\n"
"158:" // Height 5: Multiply loop: Ragged operand read: partial_1_0
- "ldr b0, [x13, #0x0]\n"
- "ldr b1, [x9, #0x0]\n"
- "ldr b2, [x27, #0x0]\n"
- "ldr b3, [x25, #0x0]\n"
- "ldr b4, [x23, #0x0]\n"
+ "ldr b0, [x12, #0x0]\n"
+ "ldr b1, [x28, #0x0]\n"
+ "ldr b2, [x26, #0x0]\n"
+ "ldr b3, [x24, #0x0]\n"
+ "ldr b4, [x22, #0x0]\n"
"159:" // Height 5: Multiply loop: Ragged operand read: Done
"ldr q6, [x16, #0x0]\n"
- ".inst 0x6f80e0c8 // udot v8.4s, v6.16b, v0.4b[0]\n"
"ldr q7, [x16, #0x10]\n"
+ ".inst 0x6f80e0c8 // udot v8.4s, v6.16b, v0.4b[0]\n"
".inst 0x6f81e0cc // udot v12.4s, v6.16b, v1.4b[0]\n"
".inst 0x6f82e0d0 // udot v16.4s, v6.16b, v2.4b[0]\n"
".inst 0x6f83e0d4 // udot v20.4s, v6.16b, v3.4b[0]\n"
@@ -2502,335 +2502,335 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
".inst 0x6f83e0f7 // udot v23.4s, v7.16b, v3.4b[0]\n"
".inst 0x6f84e0fb // udot v27.4s, v7.16b, v4.4b[0]\n"
"160:" // Height 5: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x15, x15, #0x1\n"
- "cmp x15, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x14, x14, #0x1\n"
+ "cmp x14, x19\n"
"bne 150b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x17, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
- "cmp x8, #0x10\n"
- "prfm pstl1keep, [x17, #0x0]\n"
- "prfm pstl1keep, [x24, #0x0]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "prfm pstl1keep, [x15, #0x0]\n"
+ "cmp x17, #0x10\n"
+ "add x23, x15, x19, LSL #2\n"
"prfm pstl1keep, [x23, #0x0]\n"
+ "add x22, x23, x19, LSL #2\n"
"prfm pstl1keep, [x22, #0x0]\n"
+ "add x21, x22, x19, LSL #2\n"
"prfm pstl1keep, [x21, #0x0]\n"
+ "add x20, x21, x19, LSL #2\n"
+ "prfm pstl1keep, [x20, #0x0]\n"
"bge 169f\n"
- "tbz x8, #3, 164f\n"
- "st1 { v8.4s }, [x17], #0x10\n"
- "st1 { v9.4s }, [x17], #0x10\n"
- "st1 { v12.4s }, [x24], #0x10\n"
- "st1 { v13.4s }, [x24], #0x10\n"
- "st1 { v16.4s }, [x23], #0x10\n"
- "st1 { v17.4s }, [x23], #0x10\n"
- "st1 { v20.4s }, [x22], #0x10\n"
- "st1 { v21.4s }, [x22], #0x10\n"
- "st1 { v24.4s }, [x21], #0x10\n"
- "st1 { v25.4s }, [x21], #0x10\n"
- "tbz x8, #2, 162f\n"
- "st1 { v10.4s }, [x17], #0x10\n"
- "st1 { v14.4s }, [x24], #0x10\n"
- "st1 { v18.4s }, [x23], #0x10\n"
- "st1 { v22.4s }, [x22], #0x10\n"
- "st1 { v26.4s }, [x21], #0x10\n"
- "tbz x8, #1, 161f\n"
- "str d11, [x17], #0x8\n"
- "str d15, [x24], #0x8\n"
- "str d19, [x23], #0x8\n"
- "str d23, [x22], #0x8\n"
- "str d27, [x21], #0x8\n"
- "tbz x8, #0, 168f\n"
- "st1 { v11.s }[2], [x17]\n"
- "st1 { v15.s }[2], [x24]\n"
- "st1 { v19.s }[2], [x23]\n"
- "st1 { v23.s }[2], [x22]\n"
- "st1 { v27.s }[2], [x21]\n"
+ "tbz x17, #3, 164f\n"
+ "st1 { v8.4s }, [x15], #0x10\n"
+ "st1 { v9.4s }, [x15], #0x10\n"
+ "st1 { v12.4s }, [x23], #0x10\n"
+ "st1 { v13.4s }, [x23], #0x10\n"
+ "st1 { v16.4s }, [x22], #0x10\n"
+ "st1 { v17.4s }, [x22], #0x10\n"
+ "st1 { v20.4s }, [x21], #0x10\n"
+ "st1 { v21.4s }, [x21], #0x10\n"
+ "st1 { v24.4s }, [x20], #0x10\n"
+ "st1 { v25.4s }, [x20], #0x10\n"
+ "tbz x17, #2, 162f\n"
+ "st1 { v10.4s }, [x15], #0x10\n"
+ "st1 { v14.4s }, [x23], #0x10\n"
+ "st1 { v18.4s }, [x22], #0x10\n"
+ "st1 { v22.4s }, [x21], #0x10\n"
+ "st1 { v26.4s }, [x20], #0x10\n"
+ "tbz x17, #1, 161f\n"
+ "str d11, [x15], #0x8\n"
+ "str d15, [x23], #0x8\n"
+ "str d19, [x22], #0x8\n"
+ "str d23, [x21], #0x8\n"
+ "str d27, [x20], #0x8\n"
+ "tbz x17, #0, 168f\n"
+ "st1 { v11.s }[2], [x15]\n"
+ "st1 { v15.s }[2], [x23]\n"
+ "st1 { v19.s }[2], [x22]\n"
+ "st1 { v23.s }[2], [x21]\n"
+ "st1 { v27.s }[2], [x20]\n"
"b 168f\n"
"161:" // Height 5: Partial direct writeback: partial_1_12
- "tbz x8, #0, 168f\n"
- "str s11, [x17, #0x0]\n"
- "str s15, [x24, #0x0]\n"
- "str s19, [x23, #0x0]\n"
- "str s23, [x22, #0x0]\n"
- "str s27, [x21, #0x0]\n"
+ "tbz x17, #0, 168f\n"
+ "str s11, [x15, #0x0]\n"
+ "str s15, [x23, #0x0]\n"
+ "str s19, [x22, #0x0]\n"
+ "str s23, [x21, #0x0]\n"
+ "str s27, [x20, #0x0]\n"
"b 168f\n"
"162:" // Height 5: Partial direct writeback: partial_2_8
- "tbz x8, #1, 163f\n"
- "str d10, [x17], #0x8\n"
- "str d14, [x24], #0x8\n"
- "str d18, [x23], #0x8\n"
- "str d22, [x22], #0x8\n"
- "str d26, [x21], #0x8\n"
- "tbz x8, #0, 168f\n"
- "st1 { v10.s }[2], [x17]\n"
- "st1 { v14.s }[2], [x24]\n"
- "st1 { v18.s }[2], [x23]\n"
- "st1 { v22.s }[2], [x22]\n"
- "st1 { v26.s }[2], [x21]\n"
+ "tbz x17, #1, 163f\n"
+ "str d10, [x15], #0x8\n"
+ "str d14, [x23], #0x8\n"
+ "str d18, [x22], #0x8\n"
+ "str d22, [x21], #0x8\n"
+ "str d26, [x20], #0x8\n"
+ "tbz x17, #0, 168f\n"
+ "st1 { v10.s }[2], [x15]\n"
+ "st1 { v14.s }[2], [x23]\n"
+ "st1 { v18.s }[2], [x22]\n"
+ "st1 { v22.s }[2], [x21]\n"
+ "st1 { v26.s }[2], [x20]\n"
"b 168f\n"
"163:" // Height 5: Partial direct writeback: partial_1_8
- "tbz x8, #0, 168f\n"
- "str s10, [x17, #0x0]\n"
- "str s14, [x24, #0x0]\n"
- "str s18, [x23, #0x0]\n"
- "str s22, [x22, #0x0]\n"
- "str s26, [x21, #0x0]\n"
+ "tbz x17, #0, 168f\n"
+ "str s10, [x15, #0x0]\n"
+ "str s14, [x23, #0x0]\n"
+ "str s18, [x22, #0x0]\n"
+ "str s22, [x21, #0x0]\n"
+ "str s26, [x20, #0x0]\n"
"b 168f\n"
"164:" // Height 5: Partial direct writeback: partial_4_0
- "tbz x8, #2, 166f\n"
- "st1 { v8.4s }, [x17], #0x10\n"
- "st1 { v12.4s }, [x24], #0x10\n"
- "st1 { v16.4s }, [x23], #0x10\n"
- "st1 { v20.4s }, [x22], #0x10\n"
- "st1 { v24.4s }, [x21], #0x10\n"
- "tbz x8, #1, 165f\n"
- "str d9, [x17], #0x8\n"
- "str d13, [x24], #0x8\n"
- "str d17, [x23], #0x8\n"
- "str d21, [x22], #0x8\n"
- "str d25, [x21], #0x8\n"
- "tbz x8, #0, 168f\n"
- "st1 { v9.s }[2], [x17]\n"
- "st1 { v13.s }[2], [x24]\n"
- "st1 { v17.s }[2], [x23]\n"
- "st1 { v21.s }[2], [x22]\n"
- "st1 { v25.s }[2], [x21]\n"
+ "tbz x17, #2, 166f\n"
+ "st1 { v8.4s }, [x15], #0x10\n"
+ "st1 { v12.4s }, [x23], #0x10\n"
+ "st1 { v16.4s }, [x22], #0x10\n"
+ "st1 { v20.4s }, [x21], #0x10\n"
+ "st1 { v24.4s }, [x20], #0x10\n"
+ "tbz x17, #1, 165f\n"
+ "str d9, [x15], #0x8\n"
+ "str d13, [x23], #0x8\n"
+ "str d17, [x22], #0x8\n"
+ "str d21, [x21], #0x8\n"
+ "str d25, [x20], #0x8\n"
+ "tbz x17, #0, 168f\n"
+ "st1 { v9.s }[2], [x15]\n"
+ "st1 { v13.s }[2], [x23]\n"
+ "st1 { v17.s }[2], [x22]\n"
+ "st1 { v21.s }[2], [x21]\n"
+ "st1 { v25.s }[2], [x20]\n"
"b 168f\n"
"165:" // Height 5: Partial direct writeback: partial_1_4
- "tbz x8, #0, 168f\n"
- "str s9, [x17, #0x0]\n"
- "str s13, [x24, #0x0]\n"
- "str s17, [x23, #0x0]\n"
- "str s21, [x22, #0x0]\n"
- "str s25, [x21, #0x0]\n"
+ "tbz x17, #0, 168f\n"
+ "str s9, [x15, #0x0]\n"
+ "str s13, [x23, #0x0]\n"
+ "str s17, [x22, #0x0]\n"
+ "str s21, [x21, #0x0]\n"
+ "str s25, [x20, #0x0]\n"
"b 168f\n"
"166:" // Height 5: Partial direct writeback: partial_2_0
- "tbz x8, #1, 167f\n"
- "str d8, [x17], #0x8\n"
- "str d12, [x24], #0x8\n"
- "str d16, [x23], #0x8\n"
- "str d20, [x22], #0x8\n"
- "str d24, [x21], #0x8\n"
- "tbz x8, #0, 168f\n"
- "st1 { v8.s }[2], [x17]\n"
- "st1 { v12.s }[2], [x24]\n"
- "st1 { v16.s }[2], [x23]\n"
- "st1 { v20.s }[2], [x22]\n"
- "st1 { v24.s }[2], [x21]\n"
+ "tbz x17, #1, 167f\n"
+ "str d8, [x15], #0x8\n"
+ "str d12, [x23], #0x8\n"
+ "str d16, [x22], #0x8\n"
+ "str d20, [x21], #0x8\n"
+ "str d24, [x20], #0x8\n"
+ "tbz x17, #0, 168f\n"
+ "st1 { v8.s }[2], [x15]\n"
+ "st1 { v12.s }[2], [x23]\n"
+ "st1 { v16.s }[2], [x22]\n"
+ "st1 { v20.s }[2], [x21]\n"
+ "st1 { v24.s }[2], [x20]\n"
"b 168f\n"
"167:" // Height 5: Partial direct writeback: partial_1_0
- "str s8, [x17, #0x0]\n"
- "str s12, [x24, #0x0]\n"
- "str s16, [x23, #0x0]\n"
- "str s20, [x22, #0x0]\n"
- "str s24, [x21, #0x0]\n"
+ "str s8, [x15, #0x0]\n"
+ "str s12, [x23, #0x0]\n"
+ "str s16, [x22, #0x0]\n"
+ "str s20, [x21, #0x0]\n"
+ "str s24, [x20, #0x0]\n"
"168:" // Height 5: Partial direct writeback: Done
"b 170f\n"
"169:" // Height 5: Full writeback
- "str q8, [x17, #0x0]\n"
- "str q9, [x17, #0x10]\n"
- "str q10, [x17, #0x20]\n"
- "str q11, [x17, #0x30]\n"
- "add x17, x17, #0x40\n"
- "str q12, [x24, #0x0]\n"
- "str q13, [x24, #0x10]\n"
- "str q14, [x24, #0x20]\n"
- "str q15, [x24, #0x30]\n"
- "str q16, [x23, #0x0]\n"
- "str q17, [x23, #0x10]\n"
- "str q18, [x23, #0x20]\n"
- "str q19, [x23, #0x30]\n"
- "str q20, [x22, #0x0]\n"
- "str q21, [x22, #0x10]\n"
- "str q22, [x22, #0x20]\n"
- "str q23, [x22, #0x30]\n"
- "str q24, [x21, #0x0]\n"
- "str q25, [x21, #0x10]\n"
- "str q26, [x21, #0x20]\n"
- "str q27, [x21, #0x30]\n"
+ "str q8, [x15, #0x0]\n"
+ "str q9, [x15, #0x10]\n"
+ "str q10, [x15, #0x20]\n"
+ "str q11, [x15, #0x30]\n"
+ "add x15, x15, #0x40\n"
+ "str q12, [x23, #0x0]\n"
+ "str q13, [x23, #0x10]\n"
+ "str q14, [x23, #0x20]\n"
+ "str q15, [x23, #0x30]\n"
+ "str q16, [x22, #0x0]\n"
+ "str q17, [x22, #0x10]\n"
+ "str q18, [x22, #0x20]\n"
+ "str q19, [x22, #0x30]\n"
+ "str q20, [x21, #0x0]\n"
+ "str q21, [x21, #0x10]\n"
+ "str q22, [x21, #0x20]\n"
+ "str q23, [x21, #0x30]\n"
+ "str q24, [x20, #0x0]\n"
+ "str q25, [x20, #0x10]\n"
+ "str q26, [x20, #0x20]\n"
+ "str q27, [x20, #0x30]\n"
"170:" // Height 5: Writeback done
- "subs x8, x8, #0x10\n"
+ "subs x17, x17, #0x10\n"
"bgt 138b\n"
"b 206f\n"
"171:" // Height 6
- "ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "mov x20, #0x18\n"
- "ldr x8, [%x[args_ptr], %[offsetof_N]]\n"
- "mov x17, %x[output_ptr]\n"
+ "ldr x17, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x15, %x[output_ptr]\n"
"ldr x16, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
+ "mov x20, #0x18\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "madd %x[output_ptr], x19, x20, %x[output_ptr]\n"
"172:" // Height 6: Column loop
"tbz %x[flags], #0, 182f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x17, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
- "cmp x8, #0x10\n"
- "add x20, x21, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "cmp x17, #0x10\n"
+ "add x23, x15, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
+ "add x20, x21, x19, LSL #2\n"
+ "add x19, x20, x19, LSL #2\n"
"bge 181f\n"
- "tbz x8, #3, 176f\n"
- "ld1 { v8.4s }, [x17], #0x10\n"
- "ld1 { v12.4s }, [x24], #0x10\n"
- "ld1 { v16.4s }, [x23], #0x10\n"
- "ld1 { v20.4s }, [x22], #0x10\n"
- "ld1 { v24.4s }, [x21], #0x10\n"
- "ld1 { v28.4s }, [x20], #0x10\n"
- "ld1 { v9.4s }, [x17], #0x10\n"
- "ld1 { v13.4s }, [x24], #0x10\n"
- "ld1 { v17.4s }, [x23], #0x10\n"
- "ld1 { v21.4s }, [x22], #0x10\n"
- "ld1 { v25.4s }, [x21], #0x10\n"
- "ld1 { v29.4s }, [x20], #0x10\n"
- "tbz x8, #2, 174f\n"
- "ld1 { v10.4s }, [x17], #0x10\n"
- "ld1 { v14.4s }, [x24], #0x10\n"
- "ld1 { v18.4s }, [x23], #0x10\n"
- "ld1 { v22.4s }, [x22], #0x10\n"
- "ld1 { v26.4s }, [x21], #0x10\n"
- "ld1 { v30.4s }, [x20], #0x10\n"
- "tbz x8, #1, 173f\n"
- "ldr d11, [x17], #0x8\n"
- "mov x25, #0x38\n"
- "ldr d15, [x24], #0x8\n"
- "ldr d19, [x23], #0x8\n"
- "ldr d23, [x22], #0x8\n"
- "ldr d27, [x21], #0x8\n"
- "ldr d31, [x20], #0x8\n"
- "tbz x8, #0, 180f\n"
- "ld1 { v11.s }[2], [x17]\n"
- "ld1 { v15.s }[2], [x24]\n"
- "ld1 { v19.s }[2], [x23]\n"
- "ld1 { v23.s }[2], [x22]\n"
- "ld1 { v27.s }[2], [x21]\n"
- "ld1 { v31.s }[2], [x20]\n"
+ "tbz x17, #3, 176f\n"
+ "ld1 { v8.4s }, [x15], #0x10\n"
+ "ld1 { v12.4s }, [x23], #0x10\n"
+ "ld1 { v16.4s }, [x22], #0x10\n"
+ "ld1 { v9.4s }, [x15], #0x10\n"
+ "ld1 { v13.4s }, [x23], #0x10\n"
+ "ld1 { v17.4s }, [x22], #0x10\n"
+ "ld1 { v20.4s }, [x21], #0x10\n"
+ "ld1 { v24.4s }, [x20], #0x10\n"
+ "ld1 { v28.4s }, [x19], #0x10\n"
+ "ld1 { v21.4s }, [x21], #0x10\n"
+ "ld1 { v25.4s }, [x20], #0x10\n"
+ "ld1 { v29.4s }, [x19], #0x10\n"
+ "tbz x17, #2, 174f\n"
+ "ld1 { v10.4s }, [x15], #0x10\n"
+ "ld1 { v14.4s }, [x23], #0x10\n"
+ "ld1 { v18.4s }, [x22], #0x10\n"
+ "ld1 { v22.4s }, [x21], #0x10\n"
+ "ld1 { v26.4s }, [x20], #0x10\n"
+ "ld1 { v30.4s }, [x19], #0x10\n"
+ "tbz x17, #1, 173f\n"
+ "ldr d11, [x15], #0x8\n"
+ "mov x24, #0x38\n"
+ "ldr d15, [x23], #0x8\n"
+ "ldr d19, [x22], #0x8\n"
+ "ldr d23, [x21], #0x8\n"
+ "ldr d27, [x20], #0x8\n"
+ "ldr d31, [x19], #0x8\n"
+ "tbz x17, #0, 180f\n"
+ "ld1 { v11.s }[2], [x15]\n"
+ "ld1 { v15.s }[2], [x23]\n"
+ "ld1 { v19.s }[2], [x22]\n"
+ "ld1 { v23.s }[2], [x21]\n"
+ "ld1 { v27.s }[2], [x20]\n"
+ "ld1 { v31.s }[2], [x19]\n"
"b 180f\n"
"173:" // Height 6: Partial accumulate: partial_1_12
- "mov x25, #0x30\n"
- "tbz x8, #0, 180f\n"
- "ldr s11, [x17, #0x0]\n"
- "ldr s15, [x24, #0x0]\n"
- "ldr s19, [x23, #0x0]\n"
- "ldr s23, [x22, #0x0]\n"
- "ldr s27, [x21, #0x0]\n"
- "ldr s31, [x20, #0x0]\n"
+ "mov x24, #0x30\n"
+ "tbz x17, #0, 180f\n"
+ "ldr s11, [x15, #0x0]\n"
+ "ldr s15, [x23, #0x0]\n"
+ "ldr s19, [x22, #0x0]\n"
+ "ldr s23, [x21, #0x0]\n"
+ "ldr s27, [x20, #0x0]\n"
+ "ldr s31, [x19, #0x0]\n"
"b 180f\n"
"174:" // Height 6: Partial accumulate: partial_2_8
- "tbz x8, #1, 175f\n"
- "ldr d10, [x17], #0x8\n"
- "mov x25, #0x28\n"
- "ldr d14, [x24], #0x8\n"
- "ldr d18, [x23], #0x8\n"
- "ldr d22, [x22], #0x8\n"
- "ldr d26, [x21], #0x8\n"
- "ldr d30, [x20], #0x8\n"
- "tbz x8, #0, 180f\n"
- "ld1 { v10.s }[2], [x17]\n"
- "ld1 { v14.s }[2], [x24]\n"
- "ld1 { v18.s }[2], [x23]\n"
- "ld1 { v22.s }[2], [x22]\n"
- "ld1 { v26.s }[2], [x21]\n"
- "ld1 { v30.s }[2], [x20]\n"
+ "tbz x17, #1, 175f\n"
+ "ldr d10, [x15], #0x8\n"
+ "ldr d14, [x23], #0x8\n"
+ "mov x24, #0x28\n"
+ "ldr d18, [x22], #0x8\n"
+ "ldr d22, [x21], #0x8\n"
+ "ldr d26, [x20], #0x8\n"
+ "ldr d30, [x19], #0x8\n"
+ "tbz x17, #0, 180f\n"
+ "ld1 { v10.s }[2], [x15]\n"
+ "ld1 { v14.s }[2], [x23]\n"
+ "ld1 { v18.s }[2], [x22]\n"
+ "ld1 { v22.s }[2], [x21]\n"
+ "ld1 { v26.s }[2], [x20]\n"
+ "ld1 { v30.s }[2], [x19]\n"
"b 180f\n"
"175:" // Height 6: Partial accumulate: partial_1_8
- "mov x25, #0x20\n"
- "tbz x8, #0, 180f\n"
- "ldr s10, [x17, #0x0]\n"
- "ldr s14, [x24, #0x0]\n"
- "ldr s18, [x23, #0x0]\n"
- "ldr s22, [x22, #0x0]\n"
- "ldr s26, [x21, #0x0]\n"
- "ldr s30, [x20, #0x0]\n"
+ "mov x24, #0x20\n"
+ "tbz x17, #0, 180f\n"
+ "ldr s10, [x15, #0x0]\n"
+ "ldr s14, [x23, #0x0]\n"
+ "ldr s18, [x22, #0x0]\n"
+ "ldr s22, [x21, #0x0]\n"
+ "ldr s26, [x20, #0x0]\n"
+ "ldr s30, [x19, #0x0]\n"
"b 180f\n"
"176:" // Height 6: Partial accumulate: partial_4_0
- "tbz x8, #2, 178f\n"
- "ld1 { v8.4s }, [x17], #0x10\n"
- "ld1 { v12.4s }, [x24], #0x10\n"
- "ld1 { v16.4s }, [x23], #0x10\n"
- "ld1 { v20.4s }, [x22], #0x10\n"
- "ld1 { v24.4s }, [x21], #0x10\n"
- "ld1 { v28.4s }, [x20], #0x10\n"
- "tbz x8, #1, 177f\n"
- "ldr d9, [x17], #0x8\n"
- "mov x25, #0x18\n"
- "ldr d13, [x24], #0x8\n"
- "ldr d17, [x23], #0x8\n"
- "ldr d21, [x22], #0x8\n"
- "ldr d25, [x21], #0x8\n"
- "ldr d29, [x20], #0x8\n"
- "tbz x8, #0, 180f\n"
- "ld1 { v9.s }[2], [x17]\n"
- "ld1 { v13.s }[2], [x24]\n"
- "ld1 { v17.s }[2], [x23]\n"
- "ld1 { v21.s }[2], [x22]\n"
- "ld1 { v25.s }[2], [x21]\n"
- "ld1 { v29.s }[2], [x20]\n"
+ "tbz x17, #2, 178f\n"
+ "ld1 { v8.4s }, [x15], #0x10\n"
+ "ld1 { v12.4s }, [x23], #0x10\n"
+ "ld1 { v16.4s }, [x22], #0x10\n"
+ "ld1 { v20.4s }, [x21], #0x10\n"
+ "ld1 { v24.4s }, [x20], #0x10\n"
+ "ld1 { v28.4s }, [x19], #0x10\n"
+ "tbz x17, #1, 177f\n"
+ "ldr d9, [x15], #0x8\n"
+ "mov x24, #0x18\n"
+ "ldr d13, [x23], #0x8\n"
+ "ldr d17, [x22], #0x8\n"
+ "ldr d21, [x21], #0x8\n"
+ "ldr d25, [x20], #0x8\n"
+ "ldr d29, [x19], #0x8\n"
+ "tbz x17, #0, 180f\n"
+ "ld1 { v9.s }[2], [x15]\n"
+ "ld1 { v13.s }[2], [x23]\n"
+ "ld1 { v17.s }[2], [x22]\n"
+ "ld1 { v21.s }[2], [x21]\n"
+ "ld1 { v25.s }[2], [x20]\n"
+ "ld1 { v29.s }[2], [x19]\n"
"b 180f\n"
"177:" // Height 6: Partial accumulate: partial_1_4
- "mov x25, #0x10\n"
- "tbz x8, #0, 180f\n"
- "ldr s9, [x17, #0x0]\n"
- "ldr s13, [x24, #0x0]\n"
- "ldr s17, [x23, #0x0]\n"
- "ldr s21, [x22, #0x0]\n"
- "ldr s25, [x21, #0x0]\n"
- "ldr s29, [x20, #0x0]\n"
+ "mov x24, #0x10\n"
+ "tbz x17, #0, 180f\n"
+ "ldr s9, [x15, #0x0]\n"
+ "ldr s13, [x23, #0x0]\n"
+ "ldr s17, [x22, #0x0]\n"
+ "ldr s21, [x21, #0x0]\n"
+ "ldr s25, [x20, #0x0]\n"
+ "ldr s29, [x19, #0x0]\n"
"b 180f\n"
"178:" // Height 6: Partial accumulate: partial_2_0
- "tbz x8, #1, 179f\n"
- "ldr d8, [x17], #0x8\n"
- "mov x25, #0x8\n"
- "ldr d12, [x24], #0x8\n"
- "ldr d16, [x23], #0x8\n"
- "ldr d20, [x22], #0x8\n"
- "ldr d24, [x21], #0x8\n"
- "ldr d28, [x20], #0x8\n"
- "tbz x8, #0, 180f\n"
- "ld1 { v8.s }[2], [x17]\n"
- "ld1 { v12.s }[2], [x24]\n"
- "ld1 { v16.s }[2], [x23]\n"
- "ld1 { v20.s }[2], [x22]\n"
- "ld1 { v24.s }[2], [x21]\n"
- "ld1 { v28.s }[2], [x20]\n"
+ "tbz x17, #1, 179f\n"
+ "ldr d8, [x15], #0x8\n"
+ "ldr d12, [x23], #0x8\n"
+ "mov x24, #0x8\n"
+ "ldr d16, [x22], #0x8\n"
+ "ldr d20, [x21], #0x8\n"
+ "ldr d24, [x20], #0x8\n"
+ "ldr d28, [x19], #0x8\n"
+ "tbz x17, #0, 180f\n"
+ "ld1 { v8.s }[2], [x15]\n"
+ "ld1 { v12.s }[2], [x23]\n"
+ "ld1 { v16.s }[2], [x22]\n"
+ "ld1 { v20.s }[2], [x21]\n"
+ "ld1 { v24.s }[2], [x20]\n"
+ "ld1 { v28.s }[2], [x19]\n"
"b 180f\n"
"179:" // Height 6: Partial accumulate: partial_1_0
- "ldr s8, [x17, #0x0]\n"
- "mov x25, #0x0\n"
- "ldr s12, [x24, #0x0]\n"
- "ldr s16, [x23, #0x0]\n"
- "ldr s20, [x22, #0x0]\n"
- "ldr s24, [x21, #0x0]\n"
- "ldr s28, [x20, #0x0]\n"
+ "ldr s8, [x15, #0x0]\n"
+ "mov x24, #0x0\n"
+ "ldr s12, [x23, #0x0]\n"
+ "ldr s16, [x22, #0x0]\n"
+ "ldr s20, [x21, #0x0]\n"
+ "ldr s24, [x20, #0x0]\n"
+ "ldr s28, [x19, #0x0]\n"
"180:" // Height 6: Partial accumulate: Done
- "sub x17, x17, x25\n"
+ "sub x15, x15, x24\n"
"b 183f\n"
"181:" // Height 6: full accumulate
- "ldr q8, [x17, #0x0]\n"
- "ldr q9, [x17, #0x10]\n"
- "ldr q10, [x17, #0x20]\n"
- "ldr q11, [x17, #0x30]\n"
- "ldr q12, [x24, #0x0]\n"
- "ldr q13, [x24, #0x10]\n"
- "ldr q14, [x24, #0x20]\n"
- "ldr q15, [x24, #0x30]\n"
- "ldr q16, [x23, #0x0]\n"
- "ldr q17, [x23, #0x10]\n"
- "ldr q18, [x23, #0x20]\n"
- "ldr q19, [x23, #0x30]\n"
- "ldr q20, [x22, #0x0]\n"
- "ldr q21, [x22, #0x10]\n"
- "ldr q22, [x22, #0x20]\n"
- "ldr q23, [x22, #0x30]\n"
- "ldr q24, [x21, #0x0]\n"
- "ldr q25, [x21, #0x10]\n"
- "ldr q26, [x21, #0x20]\n"
- "ldr q27, [x21, #0x30]\n"
- "ldr q28, [x20, #0x0]\n"
- "ldr q29, [x20, #0x10]\n"
- "ldr q30, [x20, #0x20]\n"
- "ldr q31, [x20, #0x30]\n"
+ "ldr q8, [x15, #0x0]\n"
+ "ldr q9, [x15, #0x10]\n"
+ "ldr q10, [x15, #0x20]\n"
+ "ldr q11, [x15, #0x30]\n"
+ "ldr q12, [x23, #0x0]\n"
+ "ldr q13, [x23, #0x10]\n"
+ "ldr q14, [x23, #0x20]\n"
+ "ldr q15, [x23, #0x30]\n"
+ "ldr q16, [x22, #0x0]\n"
+ "ldr q17, [x22, #0x10]\n"
+ "ldr q18, [x22, #0x20]\n"
+ "ldr q19, [x22, #0x30]\n"
+ "ldr q20, [x21, #0x0]\n"
+ "ldr q21, [x21, #0x10]\n"
+ "ldr q22, [x21, #0x20]\n"
+ "ldr q23, [x21, #0x30]\n"
+ "ldr q24, [x20, #0x0]\n"
+ "ldr q25, [x20, #0x10]\n"
+ "ldr q26, [x20, #0x20]\n"
+ "ldr q27, [x20, #0x30]\n"
+ "ldr q28, [x19, #0x0]\n"
+ "ldr q29, [x19, #0x10]\n"
+ "ldr q30, [x19, #0x20]\n"
+ "ldr q31, [x19, #0x30]\n"
"b 183f\n"
"182:" // Height 6: no accumulate
"movi v8.4s, #0x0\n"
@@ -2858,260 +2858,260 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
"movi v30.4s, #0x0\n"
"movi v31.4s, #0x0\n"
"183:" // Height 6: setup done
- "mov x15, #0x0\n"
+ "mov x14, #0x0\n"
"184:" // Height 6: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w14, [x20, x15, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w13, [x20, x14, LSL #0x2]\n"
"tbz %x[flags], #3, 185f\n"
- "ldr x21, [%x[input_ptr], x15, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x13, [x21, #0x0]\n"
- "ldr x9, [x21, #0x8]\n"
- "ldr x27, [x21, #0x10]\n"
- "ldr x25, [x21, #0x18]\n"
- "ldr x23, [x21, #0x20]\n"
- "ldr x21, [x21, #0x28]\n"
- "cbnz x15, 186f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x13, x13, x20\n"
- "add x9, x9, x20\n"
- "add x27, x27, x20\n"
- "add x25, x25, x20\n"
- "add x23, x23, x20\n"
- "add x21, x21, x20\n"
+ "ldr x20, [%x[input_ptr], x14, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x12, [x20, #0x0]\n"
+ "ldr x28, [x20, #0x8]\n"
+ "ldr x26, [x20, #0x10]\n"
+ "ldr x24, [x20, #0x18]\n"
+ "ldr x22, [x20, #0x20]\n"
+ "ldr x20, [x20, #0x28]\n"
+ "cbnz x14, 186f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x12, x12, x19\n"
+ "add x28, x28, x19\n"
+ "add x26, x26, x19\n"
+ "add x24, x24, x19\n"
+ "add x22, x22, x19\n"
+ "add x20, x20, x19\n"
"b 186f\n"
"185:" // Height 6: setup direct input
- "mov x13, %x[input_ptr]\n"
- "add x9, x13, x20\n"
- "add x27, x9, x20\n"
- "add x25, x27, x20\n"
- "add x23, x25, x20\n"
- "add x21, x23, x20\n"
+ "mov x12, %x[input_ptr]\n"
+ "add x28, x12, x19\n"
+ "add x26, x28, x19\n"
+ "add x24, x26, x19\n"
+ "add x22, x24, x19\n"
+ "add x20, x22, x19\n"
"186:" // Height 6: input setup done
- "cmp x14, #0x10\n"
+ "cmp x13, #0x10\n"
"blt 189f\n"
- "ldr q0, [x13, #0x0]\n"
- "cmp x14, #0x20\n"
- "ldr q1, [x9, #0x0]\n"
- "ldr q2, [x27, #0x0]\n"
- "ldr q3, [x25, #0x0]\n"
- "ldr q4, [x23, #0x0]\n"
- "ldr q5, [x21, #0x0]\n"
+ "ldr q0, [x12, #0x0]\n"
+ "ldr q1, [x28, #0x0]\n"
+ "cmp x13, #0x20\n"
+ "ldr q2, [x26, #0x0]\n"
+ "ldr q3, [x24, #0x0]\n"
+ "ldr q4, [x22, #0x0]\n"
+ "ldr q5, [x20, #0x0]\n"
"ldr q6, [x16, #0x0]\n"
- "ldr q7, [x16, #0x10]\n"
"blt 188f\n"
"187:" // Height 6: Multiply loop: Main loop head
".inst 0x6f80e0c8 // udot v8.4s, v6.16b, v0.4b[0]\n"
- "ldr x12, [x16, #0x28]\n"
+ "ldr d7, [x16, #0x10]\n"
".inst 0x6f81e0cc // udot v12.4s, v6.16b, v1.4b[0]\n"
- "ldr x11, [x16, #0x38]\n"
+ "ldr x11, [x16, #0x18]\n"
".inst 0x6f82e0d0 // udot v16.4s, v6.16b, v2.4b[0]\n"
- "add x13, x13, #0x10\n"
+ "ldr x10, [x16, #0x28]\n"
".inst 0x6f83e0d4 // udot v20.4s, v6.16b, v3.4b[0]\n"
- "add x9, x9, #0x10\n"
+ "add x12, x12, #0x10\n"
".inst 0x6f84e0d8 // udot v24.4s, v6.16b, v4.4b[0]\n"
- "add x27, x27, #0x10\n"
+ "mov v7.d[1], x11\n"
".inst 0x6f85e0dc // udot v28.4s, v6.16b, v5.4b[0]\n"
- "ldr d6, [x16, #0x20]\n"
+ "prfm pldl1keep, [x12, #0x80]\n"
".inst 0x6f80e0e9 // udot v9.4s, v7.16b, v0.4b[0]\n"
- "mov v6.d[1], x12\n"
+ "ldr d6, [x16, #0x20]\n"
".inst 0x6f81e0ed // udot v13.4s, v7.16b, v1.4b[0]\n"
- "ldr x12, [x16, #0x48]\n"
+ "ldr x11, [x16, #0x38]\n"
".inst 0x6f82e0f1 // udot v17.4s, v7.16b, v2.4b[0]\n"
- "add x25, x25, #0x10\n"
+ "ldr x9, [x12, #0x8]\n"
".inst 0x6f83e0f5 // udot v21.4s, v7.16b, v3.4b[0]\n"
- "add x23, x23, #0x10\n"
+ "mov v6.d[1], x10\n"
".inst 0x6f84e0f9 // udot v25.4s, v7.16b, v4.4b[0]\n"
- "add x21, x21, #0x10\n"
+ "ldr x10, [x16, #0x48]\n"
".inst 0x6f85e0fd // udot v29.4s, v7.16b, v5.4b[0]\n"
"ldr d7, [x16, #0x30]\n"
- "mov v7.d[1], x11\n"
".inst 0x6f80e0ca // udot v10.4s, v6.16b, v0.4b[0]\n"
+ "add x28, x28, #0x10\n"
".inst 0x6f81e0ce // udot v14.4s, v6.16b, v1.4b[0]\n"
- "ldr x11, [x16, #0x58]\n"
+ "prfm pldl1keep, [x28, #0x80]\n"
".inst 0x6f82e0d2 // udot v18.4s, v6.16b, v2.4b[0]\n"
- "ldr x10, [x13, #0x8]\n"
+ "mov v7.d[1], x11\n"
".inst 0x6f83e0d6 // udot v22.4s, v6.16b, v3.4b[0]\n"
- "ldr x28, [x9, #0x8]\n"
+ "ldr x11, [x16, #0x58]\n"
".inst 0x6f84e0da // udot v26.4s, v6.16b, v4.4b[0]\n"
- "ldr x26, [x27, #0x8]\n"
+ "ldr x27, [x28, #0x8]\n"
".inst 0x6f85e0de // udot v30.4s, v6.16b, v5.4b[0]\n"
"ldr d6, [x16, #0x40]\n"
".inst 0x6f80e0eb // udot v11.4s, v7.16b, v0.4b[0]\n"
- "mov v6.d[1], x12\n"
+ "add x26, x26, #0x10\n"
".inst 0x6f81e0ef // udot v15.4s, v7.16b, v1.4b[0]\n"
- "ldr x12, [x16, #0x68]\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x6f82e0f3 // udot v19.4s, v7.16b, v2.4b[0]\n"
- "ldr x24, [x25, #0x8]\n"
+ "mov v6.d[1], x10\n"
".inst 0x6f83e0f7 // udot v23.4s, v7.16b, v3.4b[0]\n"
- "ldr x22, [x23, #0x8]\n"
+ "ldr x10, [x16, #0x68]\n"
".inst 0x6f84e0fb // udot v27.4s, v7.16b, v4.4b[0]\n"
- "ldr x20, [x21, #0x8]\n"
+ "ldr x25, [x26, #0x8]\n"
".inst 0x6f85e0ff // udot v31.4s, v7.16b, v5.4b[0]\n"
"ldr d7, [x16, #0x50]\n"
- "mov v7.d[1], x11\n"
".inst 0x6fa0e0c8 // udot v8.4s, v6.16b, v0.4b[1]\n"
+ "add x24, x24, #0x10\n"
".inst 0x6fa1e0cc // udot v12.4s, v6.16b, v1.4b[1]\n"
- "ldr x11, [x16, #0x78]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x6fa2e0d0 // udot v16.4s, v6.16b, v2.4b[1]\n"
- "sub x14, x14, #0x10\n"
+ "mov v7.d[1], x11\n"
".inst 0x6fa3e0d4 // udot v20.4s, v6.16b, v3.4b[1]\n"
- "cmp x14, #0x20\n"
+ "ldr x11, [x16, #0x78]\n"
".inst 0x6fa4e0d8 // udot v24.4s, v6.16b, v4.4b[1]\n"
- "prfm pldl1keep, [x13, #0x80]\n"
+ "ldr x23, [x24, #0x8]\n"
".inst 0x6fa5e0dc // udot v28.4s, v6.16b, v5.4b[1]\n"
"ldr d6, [x16, #0x60]\n"
".inst 0x6fa0e0e9 // udot v9.4s, v7.16b, v0.4b[1]\n"
- "mov v6.d[1], x12\n"
+ "add x22, x22, #0x10\n"
".inst 0x6fa1e0ed // udot v13.4s, v7.16b, v1.4b[1]\n"
- "ldr x12, [x16, #0x88]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x6fa2e0f1 // udot v17.4s, v7.16b, v2.4b[1]\n"
- "prfm pldl1keep, [x9, #0x80]\n"
+ "mov v6.d[1], x10\n"
".inst 0x6fa3e0f5 // udot v21.4s, v7.16b, v3.4b[1]\n"
- "prfm pldl1keep, [x27, #0x80]\n"
+ "ldr x10, [x16, #0x88]\n"
".inst 0x6fa4e0f9 // udot v25.4s, v7.16b, v4.4b[1]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
+ "ldr x21, [x22, #0x8]\n"
".inst 0x6fa5e0fd // udot v29.4s, v7.16b, v5.4b[1]\n"
"ldr d7, [x16, #0x70]\n"
- "mov v7.d[1], x11\n"
".inst 0x6fa0e0ca // udot v10.4s, v6.16b, v0.4b[1]\n"
+ "add x20, x20, #0x10\n"
".inst 0x6fa1e0ce // udot v14.4s, v6.16b, v1.4b[1]\n"
- "ldr x11, [x16, #0x98]\n"
+ "prfm pldl1keep, [x20, #0x80]\n"
".inst 0x6fa2e0d2 // udot v18.4s, v6.16b, v2.4b[1]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
+ "mov v7.d[1], x11\n"
".inst 0x6fa3e0d6 // udot v22.4s, v6.16b, v3.4b[1]\n"
- "prfm pldl1keep, [x21, #0x80]\n"
+ "ldr x11, [x16, #0x98]\n"
".inst 0x6fa4e0da // udot v26.4s, v6.16b, v4.4b[1]\n"
+ "ldr x19, [x20, #0x8]\n"
".inst 0x6fa5e0de // udot v30.4s, v6.16b, v5.4b[1]\n"
"ldr d6, [x16, #0x80]\n"
".inst 0x6fa0e0eb // udot v11.4s, v7.16b, v0.4b[1]\n"
- "mov v6.d[1], x12\n"
+ "sub x13, x13, #0x10\n"
".inst 0x6fa1e0ef // udot v15.4s, v7.16b, v1.4b[1]\n"
- "ldr x12, [x16, #0xa8]\n"
+ "cmp x13, #0x20\n"
".inst 0x6fa2e0f3 // udot v19.4s, v7.16b, v2.4b[1]\n"
+ "mov v6.d[1], x10\n"
".inst 0x6fa3e0f7 // udot v23.4s, v7.16b, v3.4b[1]\n"
+ "ldr x10, [x16, #0xa8]\n"
".inst 0x6fa4e0fb // udot v27.4s, v7.16b, v4.4b[1]\n"
".inst 0x6fa5e0ff // udot v31.4s, v7.16b, v5.4b[1]\n"
"ldr d7, [x16, #0x90]\n"
- "mov v7.d[1], x11\n"
".inst 0x6f80e8c8 // udot v8.4s, v6.16b, v0.4b[2]\n"
".inst 0x6f81e8cc // udot v12.4s, v6.16b, v1.4b[2]\n"
- "ldr x11, [x16, #0xb8]\n"
".inst 0x6f82e8d0 // udot v16.4s, v6.16b, v2.4b[2]\n"
+ "mov v7.d[1], x11\n"
".inst 0x6f83e8d4 // udot v20.4s, v6.16b, v3.4b[2]\n"
+ "ldr x11, [x16, #0xb8]\n"
".inst 0x6f84e8d8 // udot v24.4s, v6.16b, v4.4b[2]\n"
".inst 0x6f85e8dc // udot v28.4s, v6.16b, v5.4b[2]\n"
"ldr d6, [x16, #0xa0]\n"
".inst 0x6f80e8e9 // udot v9.4s, v7.16b, v0.4b[2]\n"
- "mov v6.d[1], x12\n"
".inst 0x6f81e8ed // udot v13.4s, v7.16b, v1.4b[2]\n"
- "ldr x12, [x16, #0xc8]\n"
".inst 0x6f82e8f1 // udot v17.4s, v7.16b, v2.4b[2]\n"
+ "mov v6.d[1], x10\n"
".inst 0x6f83e8f5 // udot v21.4s, v7.16b, v3.4b[2]\n"
+ "ldr x10, [x16, #0xc8]\n"
".inst 0x6f84e8f9 // udot v25.4s, v7.16b, v4.4b[2]\n"
".inst 0x6f85e8fd // udot v29.4s, v7.16b, v5.4b[2]\n"
"ldr d7, [x16, #0xb0]\n"
- "mov v7.d[1], x11\n"
".inst 0x6f80e8ca // udot v10.4s, v6.16b, v0.4b[2]\n"
".inst 0x6f81e8ce // udot v14.4s, v6.16b, v1.4b[2]\n"
- "ldr x11, [x16, #0xd8]\n"
".inst 0x6f82e8d2 // udot v18.4s, v6.16b, v2.4b[2]\n"
+ "mov v7.d[1], x11\n"
".inst 0x6f83e8d6 // udot v22.4s, v6.16b, v3.4b[2]\n"
+ "ldr x11, [x16, #0xd8]\n"
".inst 0x6f84e8da // udot v26.4s, v6.16b, v4.4b[2]\n"
".inst 0x6f85e8de // udot v30.4s, v6.16b, v5.4b[2]\n"
"ldr d6, [x16, #0xc0]\n"
".inst 0x6f80e8eb // udot v11.4s, v7.16b, v0.4b[2]\n"
- "mov v6.d[1], x12\n"
".inst 0x6f81e8ef // udot v15.4s, v7.16b, v1.4b[2]\n"
- "ldr x12, [x16, #0xe8]\n"
".inst 0x6f82e8f3 // udot v19.4s, v7.16b, v2.4b[2]\n"
+ "mov v6.d[1], x10\n"
".inst 0x6f83e8f7 // udot v23.4s, v7.16b, v3.4b[2]\n"
+ "ldr x10, [x16, #0xe8]\n"
".inst 0x6f84e8fb // udot v27.4s, v7.16b, v4.4b[2]\n"
".inst 0x6f85e8ff // udot v31.4s, v7.16b, v5.4b[2]\n"
"ldr d7, [x16, #0xd0]\n"
- "mov v7.d[1], x11\n"
".inst 0x6fa0e8c8 // udot v8.4s, v6.16b, v0.4b[3]\n"
".inst 0x6fa1e8cc // udot v12.4s, v6.16b, v1.4b[3]\n"
- "ldr x11, [x16, #0xf8]\n"
".inst 0x6fa2e8d0 // udot v16.4s, v6.16b, v2.4b[3]\n"
+ "mov v7.d[1], x11\n"
".inst 0x6fa3e8d4 // udot v20.4s, v6.16b, v3.4b[3]\n"
+ "ldr x11, [x16, #0xf8]\n"
".inst 0x6fa4e8d8 // udot v24.4s, v6.16b, v4.4b[3]\n"
".inst 0x6fa5e8dc // udot v28.4s, v6.16b, v5.4b[3]\n"
"ldr d6, [x16, #0xe0]\n"
".inst 0x6fa0e8e9 // udot v9.4s, v7.16b, v0.4b[3]\n"
- "mov v6.d[1], x12\n"
".inst 0x6fa1e8ed // udot v13.4s, v7.16b, v1.4b[3]\n"
".inst 0x6fa2e8f1 // udot v17.4s, v7.16b, v2.4b[3]\n"
+ "mov v6.d[1], x10\n"
".inst 0x6fa3e8f5 // udot v21.4s, v7.16b, v3.4b[3]\n"
".inst 0x6fa4e8f9 // udot v25.4s, v7.16b, v4.4b[3]\n"
".inst 0x6fa5e8fd // udot v29.4s, v7.16b, v5.4b[3]\n"
"ldr d7, [x16, #0xf0]\n"
- "mov v7.d[1], x11\n"
- "add x16, x16, #0x100\n"
".inst 0x6fa0e8ca // udot v10.4s, v6.16b, v0.4b[3]\n"
- "ldr x12, [x16, #0x8]\n"
+ "add x16, x16, #0x100\n"
".inst 0x6fa1e8ce // udot v14.4s, v6.16b, v1.4b[3]\n"
- "ldr x11, [x16, #0x18]\n"
+ "ldr x10, [x16, #0x8]\n"
".inst 0x6fa2e8d2 // udot v18.4s, v6.16b, v2.4b[3]\n"
+ "mov v7.d[1], x11\n"
".inst 0x6fa3e8d6 // udot v22.4s, v6.16b, v3.4b[3]\n"
".inst 0x6fa4e8da // udot v26.4s, v6.16b, v4.4b[3]\n"
".inst 0x6fa5e8de // udot v30.4s, v6.16b, v5.4b[3]\n"
"ldr d6, [x16, #0x0]\n"
".inst 0x6fa0e8eb // udot v11.4s, v7.16b, v0.4b[3]\n"
- "ldr d0, [x13, #0x0]\n"
+ "ldr d0, [x12, #0x0]\n"
".inst 0x6fa1e8ef // udot v15.4s, v7.16b, v1.4b[3]\n"
- "ldr d1, [x9, #0x0]\n"
+ "ldr d1, [x28, #0x0]\n"
".inst 0x6fa2e8f3 // udot v19.4s, v7.16b, v2.4b[3]\n"
- "ldr d2, [x27, #0x0]\n"
+ "mov v6.d[1], x10\n"
".inst 0x6fa3e8f7 // udot v23.4s, v7.16b, v3.4b[3]\n"
- "ldr d3, [x25, #0x0]\n"
+ "mov v0.d[1], x9\n"
".inst 0x6fa4e8fb // udot v27.4s, v7.16b, v4.4b[3]\n"
- "ldr d4, [x23, #0x0]\n"
+ "mov v1.d[1], x27\n"
".inst 0x6fa5e8ff // udot v31.4s, v7.16b, v5.4b[3]\n"
- "ldr d5, [x21, #0x0]\n"
- "ldr d7, [x16, #0x10]\n"
- "mov v6.d[1], x12\n"
- "mov v0.d[1], x10\n"
- "mov v1.d[1], x28\n"
- "mov v2.d[1], x26\n"
- "mov v3.d[1], x24\n"
- "mov v4.d[1], x22\n"
- "mov v5.d[1], x20\n"
- "mov v7.d[1], x11\n"
+ "ldr d2, [x26, #0x0]\n"
+ "ldr d3, [x24, #0x0]\n"
+ "ldr d4, [x22, #0x0]\n"
+ "mov v2.d[1], x25\n"
+ "ldr d5, [x20, #0x0]\n"
+ "mov v3.d[1], x23\n"
+ "mov v4.d[1], x21\n"
+ "mov v5.d[1], x19\n"
"bge 187b\n"
"188:" // Height 6: Multiply loop: Single iteration only
".inst 0x6f80e0c8 // udot v8.4s, v6.16b, v0.4b[0]\n"
- "add x13, x13, #0x10\n"
+ "ldr q7, [x16, #0x10]\n"
".inst 0x6f81e0cc // udot v12.4s, v6.16b, v1.4b[0]\n"
- "add x9, x9, #0x10\n"
+ "sub x13, x13, #0x10\n"
".inst 0x6f82e0d0 // udot v16.4s, v6.16b, v2.4b[0]\n"
- "add x27, x27, #0x10\n"
+ "add x12, x12, #0x10\n"
".inst 0x6f83e0d4 // udot v20.4s, v6.16b, v3.4b[0]\n"
- "add x25, x25, #0x10\n"
+ "prfm pldl1keep, [x12, #0x80]\n"
".inst 0x6f84e0d8 // udot v24.4s, v6.16b, v4.4b[0]\n"
- "add x23, x23, #0x10\n"
+ "add x28, x28, #0x10\n"
".inst 0x6f85e0dc // udot v28.4s, v6.16b, v5.4b[0]\n"
- "ldr q6, [x16, #0x20]\n"
+ "prfm pldl1keep, [x28, #0x80]\n"
".inst 0x6f80e0e9 // udot v9.4s, v7.16b, v0.4b[0]\n"
- "add x21, x21, #0x10\n"
+ "ldr q6, [x16, #0x20]\n"
".inst 0x6f81e0ed // udot v13.4s, v7.16b, v1.4b[0]\n"
- "sub x14, x14, #0x10\n"
+ "add x26, x26, #0x10\n"
".inst 0x6f82e0f1 // udot v17.4s, v7.16b, v2.4b[0]\n"
- "prfm pldl1keep, [x13, #0x80]\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x6f83e0f5 // udot v21.4s, v7.16b, v3.4b[0]\n"
- "prfm pldl1keep, [x9, #0x80]\n"
+ "add x24, x24, #0x10\n"
".inst 0x6f84e0f9 // udot v25.4s, v7.16b, v4.4b[0]\n"
- "prfm pldl1keep, [x27, #0x80]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x6f85e0fd // udot v29.4s, v7.16b, v5.4b[0]\n"
"ldr q7, [x16, #0x30]\n"
".inst 0x6f80e0ca // udot v10.4s, v6.16b, v0.4b[0]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
+ "add x22, x22, #0x10\n"
".inst 0x6f81e0ce // udot v14.4s, v6.16b, v1.4b[0]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x6f82e0d2 // udot v18.4s, v6.16b, v2.4b[0]\n"
- "prfm pldl1keep, [x21, #0x80]\n"
+ "add x20, x20, #0x10\n"
".inst 0x6f83e0d6 // udot v22.4s, v6.16b, v3.4b[0]\n"
+ "prfm pldl1keep, [x20, #0x80]\n"
".inst 0x6f84e0da // udot v26.4s, v6.16b, v4.4b[0]\n"
".inst 0x6f85e0de // udot v30.4s, v6.16b, v5.4b[0]\n"
"ldr q6, [x16, #0x40]\n"
@@ -3206,21 +3206,21 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
".inst 0x6fa4e8fb // udot v27.4s, v7.16b, v4.4b[3]\n"
".inst 0x6fa5e8ff // udot v31.4s, v7.16b, v5.4b[3]\n"
"189:" // Height 6: Multiply loop: Main loop skip
- "cbz x14, 194f\n"
- "cmp x14, #0x4\n"
+ "cbz x13, 194f\n"
+ "cmp x13, #0x4\n"
"blt 191f\n"
"190:" // Height 6: Multiply loop: Odd block loop
- "ldr s0, [x13], #0x4\n"
- "sub x14, x14, #0x4\n"
- "ldr s1, [x9], #0x4\n"
- "cmp x14, #0x4\n"
- "ldr s2, [x27], #0x4\n"
- "ldr s3, [x25], #0x4\n"
- "ldr s4, [x23], #0x4\n"
- "ldr s5, [x21], #0x4\n"
+ "ldr s0, [x12], #0x4\n"
+ "sub x13, x13, #0x4\n"
+ "ldr s1, [x28], #0x4\n"
+ "cmp x13, #0x4\n"
+ "ldr s2, [x26], #0x4\n"
+ "ldr s3, [x24], #0x4\n"
+ "ldr s4, [x22], #0x4\n"
+ "ldr s5, [x20], #0x4\n"
"ldr q6, [x16, #0x0]\n"
- ".inst 0x6f80e0c8 // udot v8.4s, v6.16b, v0.4b[0]\n"
"ldr q7, [x16, #0x10]\n"
+ ".inst 0x6f80e0c8 // udot v8.4s, v6.16b, v0.4b[0]\n"
".inst 0x6f81e0cc // udot v12.4s, v6.16b, v1.4b[0]\n"
".inst 0x6f82e0d0 // udot v16.4s, v6.16b, v2.4b[0]\n"
".inst 0x6f83e0d4 // udot v20.4s, v6.16b, v3.4b[0]\n"
@@ -3248,34 +3248,34 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
".inst 0x6f84e0fb // udot v27.4s, v7.16b, v4.4b[0]\n"
".inst 0x6f85e0ff // udot v31.4s, v7.16b, v5.4b[0]\n"
"bge 190b\n"
+ "cbz x13, 194f\n"
"191:" // Height 6: Multiply loop: Skip odd blocks
- "cbz x14, 194f\n"
- "tbz x14, #1, 192f\n"
- "ldr h0, [x13], #0x2\n"
- "ldr h1, [x9], #0x2\n"
- "ldr h2, [x27], #0x2\n"
- "ldr h3, [x25], #0x2\n"
- "ldr h4, [x23], #0x2\n"
- "ldr h5, [x21], #0x2\n"
- "tbz x14, #0, 193f\n"
- "ld1 { v0.b }[2], [x13]\n"
- "ld1 { v1.b }[2], [x9]\n"
- "ld1 { v2.b }[2], [x27]\n"
- "ld1 { v3.b }[2], [x25]\n"
- "ld1 { v4.b }[2], [x23]\n"
- "ld1 { v5.b }[2], [x21]\n"
+ "tbz x13, #1, 192f\n"
+ "ldr h0, [x12], #0x2\n"
+ "ldr h1, [x28], #0x2\n"
+ "ldr h2, [x26], #0x2\n"
+ "ldr h3, [x24], #0x2\n"
+ "ldr h4, [x22], #0x2\n"
+ "ldr h5, [x20], #0x2\n"
+ "tbz x13, #0, 193f\n"
+ "ld1 { v0.b }[2], [x12]\n"
+ "ld1 { v1.b }[2], [x28]\n"
+ "ld1 { v2.b }[2], [x26]\n"
+ "ld1 { v3.b }[2], [x24]\n"
+ "ld1 { v4.b }[2], [x22]\n"
+ "ld1 { v5.b }[2], [x20]\n"
"b 193f\n"
"192:" // Height 6: Multiply loop: Ragged operand read: partial_1_0
- "ldr b0, [x13, #0x0]\n"
- "ldr b1, [x9, #0x0]\n"
- "ldr b2, [x27, #0x0]\n"
- "ldr b3, [x25, #0x0]\n"
- "ldr b4, [x23, #0x0]\n"
- "ldr b5, [x21, #0x0]\n"
+ "ldr b0, [x12, #0x0]\n"
+ "ldr b1, [x28, #0x0]\n"
+ "ldr b2, [x26, #0x0]\n"
+ "ldr b3, [x24, #0x0]\n"
+ "ldr b4, [x22, #0x0]\n"
+ "ldr b5, [x20, #0x0]\n"
"193:" // Height 6: Multiply loop: Ragged operand read: Done
"ldr q6, [x16, #0x0]\n"
- ".inst 0x6f80e0c8 // udot v8.4s, v6.16b, v0.4b[0]\n"
"ldr q7, [x16, #0x10]\n"
+ ".inst 0x6f80e0c8 // udot v8.4s, v6.16b, v0.4b[0]\n"
".inst 0x6f81e0cc // udot v12.4s, v6.16b, v1.4b[0]\n"
".inst 0x6f82e0d0 // udot v16.4s, v6.16b, v2.4b[0]\n"
".inst 0x6f83e0d4 // udot v20.4s, v6.16b, v3.4b[0]\n"
@@ -3303,195 +3303,195 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
".inst 0x6f84e0fb // udot v27.4s, v7.16b, v4.4b[0]\n"
".inst 0x6f85e0ff // udot v31.4s, v7.16b, v5.4b[0]\n"
"194:" // Height 6: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x15, x15, #0x1\n"
- "cmp x15, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x14, x14, #0x1\n"
+ "cmp x14, x19\n"
"bne 184b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x17, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
- "add x20, x21, x20, LSL #2\n"
- "cmp x8, #0x10\n"
- "prfm pstl1keep, [x17, #0x0]\n"
- "prfm pstl1keep, [x24, #0x0]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "prfm pstl1keep, [x15, #0x0]\n"
+ "cmp x17, #0x10\n"
+ "add x23, x15, x19, LSL #2\n"
"prfm pstl1keep, [x23, #0x0]\n"
+ "add x22, x23, x19, LSL #2\n"
"prfm pstl1keep, [x22, #0x0]\n"
+ "add x21, x22, x19, LSL #2\n"
"prfm pstl1keep, [x21, #0x0]\n"
+ "add x20, x21, x19, LSL #2\n"
"prfm pstl1keep, [x20, #0x0]\n"
+ "add x19, x20, x19, LSL #2\n"
+ "prfm pstl1keep, [x19, #0x0]\n"
"bge 203f\n"
- "tbz x8, #3, 198f\n"
- "st1 { v8.4s }, [x17], #0x10\n"
- "st1 { v9.4s }, [x17], #0x10\n"
- "st1 { v12.4s }, [x24], #0x10\n"
- "st1 { v13.4s }, [x24], #0x10\n"
- "st1 { v16.4s }, [x23], #0x10\n"
- "st1 { v17.4s }, [x23], #0x10\n"
- "st1 { v20.4s }, [x22], #0x10\n"
- "st1 { v21.4s }, [x22], #0x10\n"
- "st1 { v24.4s }, [x21], #0x10\n"
- "st1 { v25.4s }, [x21], #0x10\n"
- "st1 { v28.4s }, [x20], #0x10\n"
- "st1 { v29.4s }, [x20], #0x10\n"
- "tbz x8, #2, 196f\n"
- "st1 { v10.4s }, [x17], #0x10\n"
- "st1 { v14.4s }, [x24], #0x10\n"
- "st1 { v18.4s }, [x23], #0x10\n"
- "st1 { v22.4s }, [x22], #0x10\n"
- "st1 { v26.4s }, [x21], #0x10\n"
- "st1 { v30.4s }, [x20], #0x10\n"
- "tbz x8, #1, 195f\n"
- "str d11, [x17], #0x8\n"
- "str d15, [x24], #0x8\n"
- "str d19, [x23], #0x8\n"
- "str d23, [x22], #0x8\n"
- "str d27, [x21], #0x8\n"
- "str d31, [x20], #0x8\n"
- "tbz x8, #0, 202f\n"
- "st1 { v11.s }[2], [x17]\n"
- "st1 { v15.s }[2], [x24]\n"
- "st1 { v19.s }[2], [x23]\n"
- "st1 { v23.s }[2], [x22]\n"
- "st1 { v27.s }[2], [x21]\n"
- "st1 { v31.s }[2], [x20]\n"
+ "tbz x17, #3, 198f\n"
+ "st1 { v8.4s }, [x15], #0x10\n"
+ "st1 { v9.4s }, [x15], #0x10\n"
+ "st1 { v12.4s }, [x23], #0x10\n"
+ "st1 { v13.4s }, [x23], #0x10\n"
+ "st1 { v16.4s }, [x22], #0x10\n"
+ "st1 { v17.4s }, [x22], #0x10\n"
+ "st1 { v20.4s }, [x21], #0x10\n"
+ "st1 { v21.4s }, [x21], #0x10\n"
+ "st1 { v24.4s }, [x20], #0x10\n"
+ "st1 { v25.4s }, [x20], #0x10\n"
+ "st1 { v28.4s }, [x19], #0x10\n"
+ "st1 { v29.4s }, [x19], #0x10\n"
+ "tbz x17, #2, 196f\n"
+ "st1 { v10.4s }, [x15], #0x10\n"
+ "st1 { v14.4s }, [x23], #0x10\n"
+ "st1 { v18.4s }, [x22], #0x10\n"
+ "st1 { v22.4s }, [x21], #0x10\n"
+ "st1 { v26.4s }, [x20], #0x10\n"
+ "st1 { v30.4s }, [x19], #0x10\n"
+ "tbz x17, #1, 195f\n"
+ "str d11, [x15], #0x8\n"
+ "str d15, [x23], #0x8\n"
+ "str d19, [x22], #0x8\n"
+ "str d23, [x21], #0x8\n"
+ "str d27, [x20], #0x8\n"
+ "str d31, [x19], #0x8\n"
+ "tbz x17, #0, 202f\n"
+ "st1 { v11.s }[2], [x15]\n"
+ "st1 { v15.s }[2], [x23]\n"
+ "st1 { v19.s }[2], [x22]\n"
+ "st1 { v23.s }[2], [x21]\n"
+ "st1 { v27.s }[2], [x20]\n"
+ "st1 { v31.s }[2], [x19]\n"
"b 202f\n"
"195:" // Height 6: Partial direct writeback: partial_1_12
- "tbz x8, #0, 202f\n"
- "str s11, [x17, #0x0]\n"
- "str s15, [x24, #0x0]\n"
- "str s19, [x23, #0x0]\n"
- "str s23, [x22, #0x0]\n"
- "str s27, [x21, #0x0]\n"
- "str s31, [x20, #0x0]\n"
+ "tbz x17, #0, 202f\n"
+ "str s11, [x15, #0x0]\n"
+ "str s15, [x23, #0x0]\n"
+ "str s19, [x22, #0x0]\n"
+ "str s23, [x21, #0x0]\n"
+ "str s27, [x20, #0x0]\n"
+ "str s31, [x19, #0x0]\n"
"b 202f\n"
"196:" // Height 6: Partial direct writeback: partial_2_8
- "tbz x8, #1, 197f\n"
- "str d10, [x17], #0x8\n"
- "str d14, [x24], #0x8\n"
- "str d18, [x23], #0x8\n"
- "str d22, [x22], #0x8\n"
- "str d26, [x21], #0x8\n"
- "str d30, [x20], #0x8\n"
- "tbz x8, #0, 202f\n"
- "st1 { v10.s }[2], [x17]\n"
- "st1 { v14.s }[2], [x24]\n"
- "st1 { v18.s }[2], [x23]\n"
- "st1 { v22.s }[2], [x22]\n"
- "st1 { v26.s }[2], [x21]\n"
- "st1 { v30.s }[2], [x20]\n"
+ "tbz x17, #1, 197f\n"
+ "str d10, [x15], #0x8\n"
+ "str d14, [x23], #0x8\n"
+ "str d18, [x22], #0x8\n"
+ "str d22, [x21], #0x8\n"
+ "str d26, [x20], #0x8\n"
+ "str d30, [x19], #0x8\n"
+ "tbz x17, #0, 202f\n"
+ "st1 { v10.s }[2], [x15]\n"
+ "st1 { v14.s }[2], [x23]\n"
+ "st1 { v18.s }[2], [x22]\n"
+ "st1 { v22.s }[2], [x21]\n"
+ "st1 { v26.s }[2], [x20]\n"
+ "st1 { v30.s }[2], [x19]\n"
"b 202f\n"
"197:" // Height 6: Partial direct writeback: partial_1_8
- "tbz x8, #0, 202f\n"
- "str s10, [x17, #0x0]\n"
- "str s14, [x24, #0x0]\n"
- "str s18, [x23, #0x0]\n"
- "str s22, [x22, #0x0]\n"
- "str s26, [x21, #0x0]\n"
- "str s30, [x20, #0x0]\n"
+ "tbz x17, #0, 202f\n"
+ "str s10, [x15, #0x0]\n"
+ "str s14, [x23, #0x0]\n"
+ "str s18, [x22, #0x0]\n"
+ "str s22, [x21, #0x0]\n"
+ "str s26, [x20, #0x0]\n"
+ "str s30, [x19, #0x0]\n"
"b 202f\n"
"198:" // Height 6: Partial direct writeback: partial_4_0
- "tbz x8, #2, 200f\n"
- "st1 { v8.4s }, [x17], #0x10\n"
- "st1 { v12.4s }, [x24], #0x10\n"
- "st1 { v16.4s }, [x23], #0x10\n"
- "st1 { v20.4s }, [x22], #0x10\n"
- "st1 { v24.4s }, [x21], #0x10\n"
- "st1 { v28.4s }, [x20], #0x10\n"
- "tbz x8, #1, 199f\n"
- "str d9, [x17], #0x8\n"
- "str d13, [x24], #0x8\n"
- "str d17, [x23], #0x8\n"
- "str d21, [x22], #0x8\n"
- "str d25, [x21], #0x8\n"
- "str d29, [x20], #0x8\n"
- "tbz x8, #0, 202f\n"
- "st1 { v9.s }[2], [x17]\n"
- "st1 { v13.s }[2], [x24]\n"
- "st1 { v17.s }[2], [x23]\n"
- "st1 { v21.s }[2], [x22]\n"
- "st1 { v25.s }[2], [x21]\n"
- "st1 { v29.s }[2], [x20]\n"
+ "tbz x17, #2, 200f\n"
+ "st1 { v8.4s }, [x15], #0x10\n"
+ "st1 { v12.4s }, [x23], #0x10\n"
+ "st1 { v16.4s }, [x22], #0x10\n"
+ "st1 { v20.4s }, [x21], #0x10\n"
+ "st1 { v24.4s }, [x20], #0x10\n"
+ "st1 { v28.4s }, [x19], #0x10\n"
+ "tbz x17, #1, 199f\n"
+ "str d9, [x15], #0x8\n"
+ "str d13, [x23], #0x8\n"
+ "str d17, [x22], #0x8\n"
+ "str d21, [x21], #0x8\n"
+ "str d25, [x20], #0x8\n"
+ "str d29, [x19], #0x8\n"
+ "tbz x17, #0, 202f\n"
+ "st1 { v9.s }[2], [x15]\n"
+ "st1 { v13.s }[2], [x23]\n"
+ "st1 { v17.s }[2], [x22]\n"
+ "st1 { v21.s }[2], [x21]\n"
+ "st1 { v25.s }[2], [x20]\n"
+ "st1 { v29.s }[2], [x19]\n"
"b 202f\n"
"199:" // Height 6: Partial direct writeback: partial_1_4
- "tbz x8, #0, 202f\n"
- "str s9, [x17, #0x0]\n"
- "str s13, [x24, #0x0]\n"
- "str s17, [x23, #0x0]\n"
- "str s21, [x22, #0x0]\n"
- "str s25, [x21, #0x0]\n"
- "str s29, [x20, #0x0]\n"
+ "tbz x17, #0, 202f\n"
+ "str s9, [x15, #0x0]\n"
+ "str s13, [x23, #0x0]\n"
+ "str s17, [x22, #0x0]\n"
+ "str s21, [x21, #0x0]\n"
+ "str s25, [x20, #0x0]\n"
+ "str s29, [x19, #0x0]\n"
"b 202f\n"
"200:" // Height 6: Partial direct writeback: partial_2_0
- "tbz x8, #1, 201f\n"
- "str d8, [x17], #0x8\n"
- "str d12, [x24], #0x8\n"
- "str d16, [x23], #0x8\n"
- "str d20, [x22], #0x8\n"
- "str d24, [x21], #0x8\n"
- "str d28, [x20], #0x8\n"
- "tbz x8, #0, 202f\n"
- "st1 { v8.s }[2], [x17]\n"
- "st1 { v12.s }[2], [x24]\n"
- "st1 { v16.s }[2], [x23]\n"
- "st1 { v20.s }[2], [x22]\n"
- "st1 { v24.s }[2], [x21]\n"
- "st1 { v28.s }[2], [x20]\n"
+ "tbz x17, #1, 201f\n"
+ "str d8, [x15], #0x8\n"
+ "str d12, [x23], #0x8\n"
+ "str d16, [x22], #0x8\n"
+ "str d20, [x21], #0x8\n"
+ "str d24, [x20], #0x8\n"
+ "str d28, [x19], #0x8\n"
+ "tbz x17, #0, 202f\n"
+ "st1 { v8.s }[2], [x15]\n"
+ "st1 { v12.s }[2], [x23]\n"
+ "st1 { v16.s }[2], [x22]\n"
+ "st1 { v20.s }[2], [x21]\n"
+ "st1 { v24.s }[2], [x20]\n"
+ "st1 { v28.s }[2], [x19]\n"
"b 202f\n"
"201:" // Height 6: Partial direct writeback: partial_1_0
- "str s8, [x17, #0x0]\n"
- "str s12, [x24, #0x0]\n"
- "str s16, [x23, #0x0]\n"
- "str s20, [x22, #0x0]\n"
- "str s24, [x21, #0x0]\n"
- "str s28, [x20, #0x0]\n"
+ "str s8, [x15, #0x0]\n"
+ "str s12, [x23, #0x0]\n"
+ "str s16, [x22, #0x0]\n"
+ "str s20, [x21, #0x0]\n"
+ "str s24, [x20, #0x0]\n"
+ "str s28, [x19, #0x0]\n"
"202:" // Height 6: Partial direct writeback: Done
"b 204f\n"
"203:" // Height 6: Full writeback
- "str q8, [x17, #0x0]\n"
- "str q9, [x17, #0x10]\n"
- "str q10, [x17, #0x20]\n"
- "str q11, [x17, #0x30]\n"
- "add x17, x17, #0x40\n"
- "str q12, [x24, #0x0]\n"
- "str q13, [x24, #0x10]\n"
- "str q14, [x24, #0x20]\n"
- "str q15, [x24, #0x30]\n"
- "str q16, [x23, #0x0]\n"
- "str q17, [x23, #0x10]\n"
- "str q18, [x23, #0x20]\n"
- "str q19, [x23, #0x30]\n"
- "str q20, [x22, #0x0]\n"
- "str q21, [x22, #0x10]\n"
- "str q22, [x22, #0x20]\n"
- "str q23, [x22, #0x30]\n"
- "str q24, [x21, #0x0]\n"
- "str q25, [x21, #0x10]\n"
- "str q26, [x21, #0x20]\n"
- "str q27, [x21, #0x30]\n"
- "str q28, [x20, #0x0]\n"
- "str q29, [x20, #0x10]\n"
- "str q30, [x20, #0x20]\n"
- "str q31, [x20, #0x30]\n"
+ "str q8, [x15, #0x0]\n"
+ "str q9, [x15, #0x10]\n"
+ "str q10, [x15, #0x20]\n"
+ "str q11, [x15, #0x30]\n"
+ "add x15, x15, #0x40\n"
+ "str q12, [x23, #0x0]\n"
+ "str q13, [x23, #0x10]\n"
+ "str q14, [x23, #0x20]\n"
+ "str q15, [x23, #0x30]\n"
+ "str q16, [x22, #0x0]\n"
+ "str q17, [x22, #0x10]\n"
+ "str q18, [x22, #0x20]\n"
+ "str q19, [x22, #0x30]\n"
+ "str q20, [x21, #0x0]\n"
+ "str q21, [x21, #0x10]\n"
+ "str q22, [x21, #0x20]\n"
+ "str q23, [x21, #0x30]\n"
+ "str q24, [x20, #0x0]\n"
+ "str q25, [x20, #0x10]\n"
+ "str q26, [x20, #0x20]\n"
+ "str q27, [x20, #0x30]\n"
+ "str q28, [x19, #0x0]\n"
+ "str q29, [x19, #0x10]\n"
+ "str q30, [x19, #0x20]\n"
+ "str q31, [x19, #0x30]\n"
"204:" // Height 6: Writeback done
- "subs x8, x8, #0x10\n"
+ "subs x17, x17, #0x10\n"
"bgt 172b\n"
"subs %x[M], %x[M], #0x6\n"
"beq 206f\n"
- "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 205f\n"
- "add x21, x21, #0x6\n"
- "str x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "add x20, x20, #0x6\n"
+ "str x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"b 1b\n"
"205:" // Update direct input
- "mov x20, #0x6\n"
- "madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
+ "mov x19, #0x6\n"
+ "madd %x[input_ptr], x19, x20, %x[input_ptr]\n"
"b 1b\n"
"206:" // Exit
: [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
: [args_ptr] "r" (&ka), [flags] "r" (flags), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8u32_dot_6x16/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8u32_dot_6x16/generic.cpp
index 38131cfd4b..ab0c88a3b2 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8u32_dot_6x16/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8u32_dot_6x16/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef __aarch64__
@@ -87,73 +87,73 @@ void a64_hybrid_u8u32_dot_6x16 (
"cmp %x[M], #0x2\n"
"bgt 69f\n"
"beq 35f\n"
- "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x28, %x[output_ptr]\n"
"2:" // Height 1: Column loop
"tbz %x[flags], #0, 12f\n"
- "cmp x11, #0x10\n"
+ "cmp x10, #0x10\n"
"bge 11f\n"
- "tbz x11, #3, 6f\n"
- "ld1 { v8.4s }, [x9], #0x10\n"
- "ld1 { v9.4s }, [x9], #0x10\n"
- "tbz x11, #2, 4f\n"
- "ld1 { v10.4s }, [x9], #0x10\n"
- "tbz x11, #1, 3f\n"
- "ldr d11, [x9], #0x8\n"
- "mov x25, #0x38\n"
- "tbz x11, #0, 10f\n"
- "ld1 { v11.s }[2], [x9]\n"
+ "tbz x10, #3, 6f\n"
+ "ld1 { v8.4s }, [x28], #0x10\n"
+ "ld1 { v9.4s }, [x28], #0x10\n"
+ "tbz x10, #2, 4f\n"
+ "ld1 { v10.4s }, [x28], #0x10\n"
+ "tbz x10, #1, 3f\n"
+ "mov x24, #0x38\n"
+ "ldr d11, [x28], #0x8\n"
+ "tbz x10, #0, 10f\n"
+ "ld1 { v11.s }[2], [x28]\n"
"b 10f\n"
"3:" // Height 1: Partial accumulate: partial_1_12
- "mov x25, #0x30\n"
- "tbz x11, #0, 10f\n"
- "ldr s11, [x9, #0x0]\n"
+ "mov x24, #0x30\n"
+ "tbz x10, #0, 10f\n"
+ "ldr s11, [x28, #0x0]\n"
"b 10f\n"
"4:" // Height 1: Partial accumulate: partial_2_8
- "tbz x11, #1, 5f\n"
- "ldr d10, [x9], #0x8\n"
- "mov x25, #0x28\n"
- "tbz x11, #0, 10f\n"
- "ld1 { v10.s }[2], [x9]\n"
+ "tbz x10, #1, 5f\n"
+ "ldr d10, [x28], #0x8\n"
+ "mov x24, #0x28\n"
+ "tbz x10, #0, 10f\n"
+ "ld1 { v10.s }[2], [x28]\n"
"b 10f\n"
"5:" // Height 1: Partial accumulate: partial_1_8
- "mov x25, #0x20\n"
- "tbz x11, #0, 10f\n"
- "ldr s10, [x9, #0x0]\n"
+ "mov x24, #0x20\n"
+ "tbz x10, #0, 10f\n"
+ "ldr s10, [x28, #0x0]\n"
"b 10f\n"
"6:" // Height 1: Partial accumulate: partial_4_0
- "tbz x11, #2, 8f\n"
- "ld1 { v8.4s }, [x9], #0x10\n"
- "tbz x11, #1, 7f\n"
- "ldr d9, [x9], #0x8\n"
- "mov x25, #0x18\n"
- "tbz x11, #0, 10f\n"
- "ld1 { v9.s }[2], [x9]\n"
+ "tbz x10, #2, 8f\n"
+ "ld1 { v8.4s }, [x28], #0x10\n"
+ "tbz x10, #1, 7f\n"
+ "ldr d9, [x28], #0x8\n"
+ "mov x24, #0x18\n"
+ "tbz x10, #0, 10f\n"
+ "ld1 { v9.s }[2], [x28]\n"
"b 10f\n"
"7:" // Height 1: Partial accumulate: partial_1_4
- "mov x25, #0x10\n"
- "tbz x11, #0, 10f\n"
- "ldr s9, [x9, #0x0]\n"
+ "mov x24, #0x10\n"
+ "tbz x10, #0, 10f\n"
+ "ldr s9, [x28, #0x0]\n"
"b 10f\n"
"8:" // Height 1: Partial accumulate: partial_2_0
- "tbz x11, #1, 9f\n"
- "ldr d8, [x9], #0x8\n"
- "mov x25, #0x8\n"
- "tbz x11, #0, 10f\n"
- "ld1 { v8.s }[2], [x9]\n"
+ "tbz x10, #1, 9f\n"
+ "ldr d8, [x28], #0x8\n"
+ "mov x24, #0x8\n"
+ "tbz x10, #0, 10f\n"
+ "ld1 { v8.s }[2], [x28]\n"
"b 10f\n"
"9:" // Height 1: Partial accumulate: partial_1_0
- "ldr s8, [x9, #0x0]\n"
- "mov x25, #0x0\n"
+ "ldr s8, [x28, #0x0]\n"
+ "mov x24, #0x0\n"
"10:" // Height 1: Partial accumulate: Done
- "sub x9, x9, x25\n"
+ "sub x28, x28, x24\n"
"b 13f\n"
"11:" // Height 1: full accumulate
- "ldr q8, [x9, #0x0]\n"
- "ldr q9, [x9, #0x10]\n"
- "ldr q10, [x9, #0x20]\n"
- "ldr q11, [x9, #0x30]\n"
+ "ldr q8, [x28, #0x0]\n"
+ "ldr q9, [x28, #0x10]\n"
+ "ldr q10, [x28, #0x20]\n"
+ "ldr q11, [x28, #0x30]\n"
"b 13f\n"
"12:" // Height 1: no accumulate
"movi v8.4s, #0x0\n"
@@ -161,295 +161,295 @@ void a64_hybrid_u8u32_dot_6x16 (
"movi v10.4s, #0x0\n"
"movi v11.4s, #0x0\n"
"13:" // Height 1: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"14:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 15f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "cbnz x28, 16f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "cbnz x27, 16f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
"b 16f\n"
"15:" // Height 1: setup direct input
- "mov x26, %x[input_ptr]\n"
+ "mov x25, %x[input_ptr]\n"
"16:" // Height 1: input setup done
- "cmp x27, #0x10\n"
+ "cmp x26, #0x10\n"
"blt 19f\n"
- "ldr q0, [x26, #0x0]\n"
- "ldr q6, [x10, #0x0]\n"
- "cmp x27, #0x20\n"
- "ldr q7, [x10, #0x10]\n"
+ "ldr q0, [x25, #0x0]\n"
+ "ldr q6, [x9, #0x0]\n"
+ "cmp x26, #0x20\n"
"blt 18f\n"
"17:" // Height 1: Multiply loop: Main loop head
".inst 0x6f80e0c8 // udot v8.4s, v6.16b, v0.4b[0]\n"
- "ldr q6, [x10, #0x20]\n"
+ "ldr q7, [x9, #0x10]\n"
+ "add x25, x25, #0x10\n"
".inst 0x6f80e0e9 // udot v9.4s, v7.16b, v0.4b[0]\n"
- "ldr q7, [x10, #0x30]\n"
+ "ldr q6, [x9, #0x20]\n"
+ "sub x26, x26, #0x10\n"
".inst 0x6f80e0ca // udot v10.4s, v6.16b, v0.4b[0]\n"
- "ldr q6, [x10, #0x40]\n"
+ "ldr q7, [x9, #0x30]\n"
+ "cmp x26, #0x20\n"
".inst 0x6f80e0eb // udot v11.4s, v7.16b, v0.4b[0]\n"
- "ldr q7, [x10, #0x50]\n"
+ "ldr q6, [x9, #0x40]\n"
+ "ldr q7, [x9, #0x50]\n"
".inst 0x6fa0e0c8 // udot v8.4s, v6.16b, v0.4b[1]\n"
- "ldr q6, [x10, #0x60]\n"
+ "ldr q6, [x9, #0x60]\n"
".inst 0x6fa0e0e9 // udot v9.4s, v7.16b, v0.4b[1]\n"
- "ldr q7, [x10, #0x70]\n"
+ "ldr q7, [x9, #0x70]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x6fa0e0ca // udot v10.4s, v6.16b, v0.4b[1]\n"
- "ldr q6, [x10, #0x80]\n"
+ "ldr q6, [x9, #0x80]\n"
".inst 0x6fa0e0eb // udot v11.4s, v7.16b, v0.4b[1]\n"
- "ldr q7, [x10, #0x90]\n"
+ "ldr q7, [x9, #0x90]\n"
".inst 0x6f80e8c8 // udot v8.4s, v6.16b, v0.4b[2]\n"
- "ldr q6, [x10, #0xa0]\n"
+ "ldr q6, [x9, #0xa0]\n"
".inst 0x6f80e8e9 // udot v9.4s, v7.16b, v0.4b[2]\n"
- "ldr q7, [x10, #0xb0]\n"
+ "ldr q7, [x9, #0xb0]\n"
".inst 0x6f80e8ca // udot v10.4s, v6.16b, v0.4b[2]\n"
- "ldr q6, [x10, #0xc0]\n"
+ "ldr q6, [x9, #0xc0]\n"
".inst 0x6f80e8eb // udot v11.4s, v7.16b, v0.4b[2]\n"
- "ldr q7, [x10, #0xd0]\n"
+ "ldr q7, [x9, #0xd0]\n"
".inst 0x6fa0e8c8 // udot v8.4s, v6.16b, v0.4b[3]\n"
- "ldr q6, [x10, #0xe0]\n"
+ "ldr q6, [x9, #0xe0]\n"
".inst 0x6fa0e8e9 // udot v9.4s, v7.16b, v0.4b[3]\n"
- "ldr q7, [x10, #0xf0]\n"
- "sub x27, x27, #0x10\n"
- "add x26, x26, #0x10\n"
+ "ldr q7, [x9, #0xf0]\n"
+ "add x9, x9, #0x100\n"
".inst 0x6fa0e8ca // udot v10.4s, v6.16b, v0.4b[3]\n"
+ "ldr q6, [x9, #0x0]\n"
".inst 0x6fa0e8eb // udot v11.4s, v7.16b, v0.4b[3]\n"
- "ldr q0, [x26, #0x0]\n"
- "cmp x27, #0x20\n"
- "add x10, x10, #0x100\n"
- "ldr q6, [x10, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
+ "ldr q0, [x25, #0x0]\n"
"bge 17b\n"
"18:" // Height 1: Multiply loop: Single iteration only
".inst 0x6f80e0c8 // udot v8.4s, v6.16b, v0.4b[0]\n"
- "ldr q6, [x10, #0x20]\n"
+ "ldr q7, [x9, #0x10]\n"
+ "sub x26, x26, #0x10\n"
".inst 0x6f80e0e9 // udot v9.4s, v7.16b, v0.4b[0]\n"
- "ldr q7, [x10, #0x30]\n"
+ "ldr q6, [x9, #0x20]\n"
+ "add x25, x25, #0x10\n"
".inst 0x6f80e0ca // udot v10.4s, v6.16b, v0.4b[0]\n"
- "ldr q6, [x10, #0x40]\n"
+ "ldr q7, [x9, #0x30]\n"
+ "ldr q6, [x9, #0x40]\n"
".inst 0x6f80e0eb // udot v11.4s, v7.16b, v0.4b[0]\n"
- "ldr q7, [x10, #0x50]\n"
+ "ldr q7, [x9, #0x50]\n"
".inst 0x6fa0e0c8 // udot v8.4s, v6.16b, v0.4b[1]\n"
- "ldr q6, [x10, #0x60]\n"
+ "ldr q6, [x9, #0x60]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x6fa0e0e9 // udot v9.4s, v7.16b, v0.4b[1]\n"
- "ldr q7, [x10, #0x70]\n"
+ "ldr q7, [x9, #0x70]\n"
".inst 0x6fa0e0ca // udot v10.4s, v6.16b, v0.4b[1]\n"
- "ldr q6, [x10, #0x80]\n"
+ "ldr q6, [x9, #0x80]\n"
".inst 0x6fa0e0eb // udot v11.4s, v7.16b, v0.4b[1]\n"
- "ldr q7, [x10, #0x90]\n"
+ "ldr q7, [x9, #0x90]\n"
".inst 0x6f80e8c8 // udot v8.4s, v6.16b, v0.4b[2]\n"
- "ldr q6, [x10, #0xa0]\n"
+ "ldr q6, [x9, #0xa0]\n"
".inst 0x6f80e8e9 // udot v9.4s, v7.16b, v0.4b[2]\n"
- "ldr q7, [x10, #0xb0]\n"
+ "ldr q7, [x9, #0xb0]\n"
".inst 0x6f80e8ca // udot v10.4s, v6.16b, v0.4b[2]\n"
- "ldr q6, [x10, #0xc0]\n"
+ "ldr q6, [x9, #0xc0]\n"
".inst 0x6f80e8eb // udot v11.4s, v7.16b, v0.4b[2]\n"
- "ldr q7, [x10, #0xd0]\n"
+ "ldr q7, [x9, #0xd0]\n"
".inst 0x6fa0e8c8 // udot v8.4s, v6.16b, v0.4b[3]\n"
- "ldr q6, [x10, #0xe0]\n"
+ "ldr q6, [x9, #0xe0]\n"
".inst 0x6fa0e8e9 // udot v9.4s, v7.16b, v0.4b[3]\n"
- "ldr q7, [x10, #0xf0]\n"
- "add x26, x26, #0x10\n"
- "sub x27, x27, #0x10\n"
+ "ldr q7, [x9, #0xf0]\n"
+ "add x9, x9, #0x100\n"
".inst 0x6fa0e8ca // udot v10.4s, v6.16b, v0.4b[3]\n"
".inst 0x6fa0e8eb // udot v11.4s, v7.16b, v0.4b[3]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
- "add x10, x10, #0x100\n"
"19:" // Height 1: Multiply loop: Main loop skip
- "cbz x27, 24f\n"
- "cmp x27, #0x4\n"
+ "cbz x26, 24f\n"
+ "cmp x26, #0x4\n"
"blt 21f\n"
"20:" // Height 1: Multiply loop: Odd block loop
- "ldr s0, [x26], #0x4\n"
- "ldr q6, [x10, #0x0]\n"
+ "ldr s0, [x25], #0x4\n"
+ "sub x26, x26, #0x4\n"
+ "ldr q6, [x9, #0x0]\n"
".inst 0x6f80e0c8 // udot v8.4s, v6.16b, v0.4b[0]\n"
- "sub x27, x27, #0x4\n"
- "ldr q7, [x10, #0x10]\n"
- "ldr q6, [x10, #0x20]\n"
+ "ldr q7, [x9, #0x10]\n"
+ "cmp x26, #0x4\n"
".inst 0x6f80e0e9 // udot v9.4s, v7.16b, v0.4b[0]\n"
- "cmp x27, #0x4\n"
- "ldr q7, [x10, #0x30]\n"
+ "ldr q6, [x9, #0x20]\n"
+ "ldr q7, [x9, #0x30]\n"
".inst 0x6f80e0ca // udot v10.4s, v6.16b, v0.4b[0]\n"
+ "add x9, x9, #0x40\n"
".inst 0x6f80e0eb // udot v11.4s, v7.16b, v0.4b[0]\n"
- "add x10, x10, #0x40\n"
"bge 20b\n"
+ "cbz x26, 24f\n"
"21:" // Height 1: Multiply loop: Skip odd blocks
- "cbz x27, 24f\n"
- "tbz x27, #1, 22f\n"
- "ldr h0, [x26], #0x2\n"
- "tbz x27, #0, 23f\n"
- "ld1 { v0.b }[2], [x26]\n"
+ "tbz x26, #1, 22f\n"
+ "ldr h0, [x25], #0x2\n"
+ "tbz x26, #0, 23f\n"
+ "ld1 { v0.b }[2], [x25]\n"
"b 23f\n"
"22:" // Height 1: Multiply loop: Ragged operand read: partial_1_0
- "ldr b0, [x26, #0x0]\n"
+ "ldr b0, [x25, #0x0]\n"
"23:" // Height 1: Multiply loop: Ragged operand read: Done
- "ldr q6, [x10, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
+ "ldr q6, [x9, #0x0]\n"
".inst 0x6f80e0c8 // udot v8.4s, v6.16b, v0.4b[0]\n"
+ "ldr q7, [x9, #0x10]\n"
+ "ldr q6, [x9, #0x20]\n"
".inst 0x6f80e0e9 // udot v9.4s, v7.16b, v0.4b[0]\n"
- "ldr q6, [x10, #0x20]\n"
- "ldr q7, [x10, #0x30]\n"
+ "ldr q7, [x9, #0x30]\n"
+ "add x9, x9, #0x40\n"
".inst 0x6f80e0ca // udot v10.4s, v6.16b, v0.4b[0]\n"
".inst 0x6f80e0eb // udot v11.4s, v7.16b, v0.4b[0]\n"
- "add x10, x10, #0x40\n"
"24:" // Height 1: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 14b\n"
- "cmp x11, #0x10\n"
- "prfm pstl1keep, [x9, #0x0]\n"
+ "prfm pstl1keep, [x28, #0x0]\n"
+ "cmp x10, #0x10\n"
"bge 33f\n"
- "tbz x11, #3, 28f\n"
- "st1 { v8.4s }, [x9], #0x10\n"
- "st1 { v9.4s }, [x9], #0x10\n"
- "tbz x11, #2, 26f\n"
- "st1 { v10.4s }, [x9], #0x10\n"
- "tbz x11, #1, 25f\n"
- "str d11, [x9], #0x8\n"
- "tbz x11, #0, 32f\n"
- "st1 { v11.s }[2], [x9]\n"
+ "tbz x10, #3, 28f\n"
+ "st1 { v8.4s }, [x28], #0x10\n"
+ "st1 { v9.4s }, [x28], #0x10\n"
+ "tbz x10, #2, 26f\n"
+ "st1 { v10.4s }, [x28], #0x10\n"
+ "tbz x10, #1, 25f\n"
+ "str d11, [x28], #0x8\n"
+ "tbz x10, #0, 32f\n"
+ "st1 { v11.s }[2], [x28]\n"
"b 32f\n"
"25:" // Height 1: Partial direct writeback: partial_1_12
- "tbz x11, #0, 32f\n"
- "str s11, [x9, #0x0]\n"
+ "tbz x10, #0, 32f\n"
+ "str s11, [x28, #0x0]\n"
"b 32f\n"
"26:" // Height 1: Partial direct writeback: partial_2_8
- "tbz x11, #1, 27f\n"
- "str d10, [x9], #0x8\n"
- "tbz x11, #0, 32f\n"
- "st1 { v10.s }[2], [x9]\n"
+ "tbz x10, #1, 27f\n"
+ "str d10, [x28], #0x8\n"
+ "tbz x10, #0, 32f\n"
+ "st1 { v10.s }[2], [x28]\n"
"b 32f\n"
"27:" // Height 1: Partial direct writeback: partial_1_8
- "tbz x11, #0, 32f\n"
- "str s10, [x9, #0x0]\n"
+ "tbz x10, #0, 32f\n"
+ "str s10, [x28, #0x0]\n"
"b 32f\n"
"28:" // Height 1: Partial direct writeback: partial_4_0
- "tbz x11, #2, 30f\n"
- "st1 { v8.4s }, [x9], #0x10\n"
- "tbz x11, #1, 29f\n"
- "str d9, [x9], #0x8\n"
- "tbz x11, #0, 32f\n"
- "st1 { v9.s }[2], [x9]\n"
+ "tbz x10, #2, 30f\n"
+ "st1 { v8.4s }, [x28], #0x10\n"
+ "tbz x10, #1, 29f\n"
+ "str d9, [x28], #0x8\n"
+ "tbz x10, #0, 32f\n"
+ "st1 { v9.s }[2], [x28]\n"
"b 32f\n"
"29:" // Height 1: Partial direct writeback: partial_1_4
- "tbz x11, #0, 32f\n"
- "str s9, [x9, #0x0]\n"
+ "tbz x10, #0, 32f\n"
+ "str s9, [x28, #0x0]\n"
"b 32f\n"
"30:" // Height 1: Partial direct writeback: partial_2_0
- "tbz x11, #1, 31f\n"
- "str d8, [x9], #0x8\n"
- "tbz x11, #0, 32f\n"
- "st1 { v8.s }[2], [x9]\n"
+ "tbz x10, #1, 31f\n"
+ "str d8, [x28], #0x8\n"
+ "tbz x10, #0, 32f\n"
+ "st1 { v8.s }[2], [x28]\n"
"b 32f\n"
"31:" // Height 1: Partial direct writeback: partial_1_0
- "str s8, [x9, #0x0]\n"
+ "str s8, [x28, #0x0]\n"
"32:" // Height 1: Partial direct writeback: Done
"b 34f\n"
"33:" // Height 1: Full writeback
- "str q8, [x9, #0x0]\n"
- "str q9, [x9, #0x10]\n"
- "str q10, [x9, #0x20]\n"
- "str q11, [x9, #0x30]\n"
- "add x9, x9, #0x40\n"
+ "str q8, [x28, #0x0]\n"
+ "str q9, [x28, #0x10]\n"
+ "str q10, [x28, #0x20]\n"
+ "str q11, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
"34:" // Height 1: Writeback done
- "subs x11, x11, #0x10\n"
+ "subs x10, x10, #0x10\n"
"bgt 2b\n"
"b 206f\n"
"35:" // Height 2
- "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x28, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"36:" // Height 2: Column loop
"tbz %x[flags], #0, 46f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "cmp x11, #0x10\n"
- "add x24, x9, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "cmp x10, #0x10\n"
+ "add x23, x28, x19, LSL #2\n"
"bge 45f\n"
- "tbz x11, #3, 40f\n"
- "ld1 { v8.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x24], #0x10\n"
- "ld1 { v9.4s }, [x9], #0x10\n"
- "ld1 { v13.4s }, [x24], #0x10\n"
- "tbz x11, #2, 38f\n"
- "ld1 { v10.4s }, [x9], #0x10\n"
- "ld1 { v14.4s }, [x24], #0x10\n"
- "tbz x11, #1, 37f\n"
- "ldr d11, [x9], #0x8\n"
- "ldr d15, [x24], #0x8\n"
- "mov x25, #0x38\n"
- "tbz x11, #0, 44f\n"
- "ld1 { v11.s }[2], [x9]\n"
- "ld1 { v15.s }[2], [x24]\n"
+ "tbz x10, #3, 40f\n"
+ "ld1 { v8.4s }, [x28], #0x10\n"
+ "ld1 { v12.4s }, [x23], #0x10\n"
+ "ld1 { v9.4s }, [x28], #0x10\n"
+ "ld1 { v13.4s }, [x23], #0x10\n"
+ "tbz x10, #2, 38f\n"
+ "ld1 { v10.4s }, [x28], #0x10\n"
+ "ld1 { v14.4s }, [x23], #0x10\n"
+ "tbz x10, #1, 37f\n"
+ "mov x24, #0x38\n"
+ "ldr d11, [x28], #0x8\n"
+ "ldr d15, [x23], #0x8\n"
+ "tbz x10, #0, 44f\n"
+ "ld1 { v11.s }[2], [x28]\n"
+ "ld1 { v15.s }[2], [x23]\n"
"b 44f\n"
"37:" // Height 2: Partial accumulate: partial_1_12
- "mov x25, #0x30\n"
- "tbz x11, #0, 44f\n"
- "ldr s11, [x9, #0x0]\n"
- "ldr s15, [x24, #0x0]\n"
+ "mov x24, #0x30\n"
+ "tbz x10, #0, 44f\n"
+ "ldr s11, [x28, #0x0]\n"
+ "ldr s15, [x23, #0x0]\n"
"b 44f\n"
"38:" // Height 2: Partial accumulate: partial_2_8
- "tbz x11, #1, 39f\n"
- "ldr d10, [x9], #0x8\n"
- "ldr d14, [x24], #0x8\n"
- "mov x25, #0x28\n"
- "tbz x11, #0, 44f\n"
- "ld1 { v10.s }[2], [x9]\n"
- "ld1 { v14.s }[2], [x24]\n"
+ "tbz x10, #1, 39f\n"
+ "ldr d10, [x28], #0x8\n"
+ "ldr d14, [x23], #0x8\n"
+ "mov x24, #0x28\n"
+ "tbz x10, #0, 44f\n"
+ "ld1 { v10.s }[2], [x28]\n"
+ "ld1 { v14.s }[2], [x23]\n"
"b 44f\n"
"39:" // Height 2: Partial accumulate: partial_1_8
- "mov x25, #0x20\n"
- "tbz x11, #0, 44f\n"
- "ldr s10, [x9, #0x0]\n"
- "ldr s14, [x24, #0x0]\n"
+ "mov x24, #0x20\n"
+ "tbz x10, #0, 44f\n"
+ "ldr s10, [x28, #0x0]\n"
+ "ldr s14, [x23, #0x0]\n"
"b 44f\n"
"40:" // Height 2: Partial accumulate: partial_4_0
- "tbz x11, #2, 42f\n"
- "ld1 { v8.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x24], #0x10\n"
- "tbz x11, #1, 41f\n"
- "ldr d9, [x9], #0x8\n"
- "ldr d13, [x24], #0x8\n"
- "mov x25, #0x18\n"
- "tbz x11, #0, 44f\n"
- "ld1 { v9.s }[2], [x9]\n"
- "ld1 { v13.s }[2], [x24]\n"
+ "tbz x10, #2, 42f\n"
+ "ld1 { v8.4s }, [x28], #0x10\n"
+ "ld1 { v12.4s }, [x23], #0x10\n"
+ "tbz x10, #1, 41f\n"
+ "mov x24, #0x18\n"
+ "ldr d9, [x28], #0x8\n"
+ "ldr d13, [x23], #0x8\n"
+ "tbz x10, #0, 44f\n"
+ "ld1 { v9.s }[2], [x28]\n"
+ "ld1 { v13.s }[2], [x23]\n"
"b 44f\n"
"41:" // Height 2: Partial accumulate: partial_1_4
- "mov x25, #0x10\n"
- "tbz x11, #0, 44f\n"
- "ldr s9, [x9, #0x0]\n"
- "ldr s13, [x24, #0x0]\n"
+ "mov x24, #0x10\n"
+ "tbz x10, #0, 44f\n"
+ "ldr s9, [x28, #0x0]\n"
+ "ldr s13, [x23, #0x0]\n"
"b 44f\n"
"42:" // Height 2: Partial accumulate: partial_2_0
- "tbz x11, #1, 43f\n"
- "ldr d8, [x9], #0x8\n"
- "ldr d12, [x24], #0x8\n"
- "mov x25, #0x8\n"
- "tbz x11, #0, 44f\n"
- "ld1 { v8.s }[2], [x9]\n"
- "ld1 { v12.s }[2], [x24]\n"
+ "tbz x10, #1, 43f\n"
+ "ldr d8, [x28], #0x8\n"
+ "ldr d12, [x23], #0x8\n"
+ "mov x24, #0x8\n"
+ "tbz x10, #0, 44f\n"
+ "ld1 { v8.s }[2], [x28]\n"
+ "ld1 { v12.s }[2], [x23]\n"
"b 44f\n"
"43:" // Height 2: Partial accumulate: partial_1_0
- "ldr s8, [x9, #0x0]\n"
- "ldr s12, [x24, #0x0]\n"
- "mov x25, #0x0\n"
+ "ldr s8, [x28, #0x0]\n"
+ "mov x24, #0x0\n"
+ "ldr s12, [x23, #0x0]\n"
"44:" // Height 2: Partial accumulate: Done
- "sub x9, x9, x25\n"
+ "sub x28, x28, x24\n"
"b 47f\n"
"45:" // Height 2: full accumulate
- "ldr q8, [x9, #0x0]\n"
- "ldr q9, [x9, #0x10]\n"
- "ldr q10, [x9, #0x20]\n"
- "ldr q11, [x9, #0x30]\n"
- "ldr q12, [x24, #0x0]\n"
- "ldr q13, [x24, #0x10]\n"
- "ldr q14, [x24, #0x20]\n"
- "ldr q15, [x24, #0x30]\n"
+ "ldr q8, [x28, #0x0]\n"
+ "ldr q9, [x28, #0x10]\n"
+ "ldr q10, [x28, #0x20]\n"
+ "ldr q11, [x28, #0x30]\n"
+ "ldr q12, [x23, #0x0]\n"
+ "ldr q13, [x23, #0x10]\n"
+ "ldr q14, [x23, #0x20]\n"
+ "ldr q15, [x23, #0x30]\n"
"b 47f\n"
"46:" // Height 2: no accumulate
"movi v8.4s, #0x0\n"
@@ -461,392 +461,392 @@ void a64_hybrid_u8u32_dot_6x16 (
"movi v14.4s, #0x0\n"
"movi v15.4s, #0x0\n"
"47:" // Height 2: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"48:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 49f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "cbnz x28, 50f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20\n"
- "add x25, x25, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "cbnz x27, 50f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
+ "add x24, x24, x19\n"
"b 50f\n"
"49:" // Height 2: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19\n"
"50:" // Height 2: input setup done
- "cmp x27, #0x10\n"
+ "cmp x26, #0x10\n"
"blt 53f\n"
- "ldr q0, [x26, #0x0]\n"
- "ldr q1, [x25, #0x0]\n"
- "cmp x27, #0x20\n"
- "ldr q6, [x10, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
+ "ldr q0, [x25, #0x0]\n"
+ "ldr q1, [x24, #0x0]\n"
+ "cmp x26, #0x20\n"
+ "ldr q6, [x9, #0x0]\n"
"blt 52f\n"
"51:" // Height 2: Multiply loop: Main loop head
".inst 0x6f80e0c8 // udot v8.4s, v6.16b, v0.4b[0]\n"
+ "ldr q7, [x9, #0x10]\n"
+ "add x25, x25, #0x10\n"
".inst 0x6f81e0cc // udot v12.4s, v6.16b, v1.4b[0]\n"
- "ldr q6, [x10, #0x20]\n"
- "sub x27, x27, #0x10\n"
+ "ldr q6, [x9, #0x20]\n"
+ "add x24, x24, #0x10\n"
".inst 0x6f80e0e9 // udot v9.4s, v7.16b, v0.4b[0]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "sub x26, x26, #0x10\n"
".inst 0x6f81e0ed // udot v13.4s, v7.16b, v1.4b[0]\n"
- "ldr q7, [x10, #0x30]\n"
- "add x26, x26, #0x10\n"
+ "ldr q7, [x9, #0x30]\n"
+ "cmp x26, #0x20\n"
".inst 0x6f80e0ca // udot v10.4s, v6.16b, v0.4b[0]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x6f81e0ce // udot v14.4s, v6.16b, v1.4b[0]\n"
- "ldr q6, [x10, #0x40]\n"
- "add x25, x25, #0x10\n"
+ "ldr q6, [x9, #0x40]\n"
".inst 0x6f80e0eb // udot v11.4s, v7.16b, v0.4b[0]\n"
".inst 0x6f81e0ef // udot v15.4s, v7.16b, v1.4b[0]\n"
- "ldr q7, [x10, #0x50]\n"
- "cmp x27, #0x20\n"
+ "ldr q7, [x9, #0x50]\n"
".inst 0x6fa0e0c8 // udot v8.4s, v6.16b, v0.4b[1]\n"
".inst 0x6fa1e0cc // udot v12.4s, v6.16b, v1.4b[1]\n"
- "ldr q6, [x10, #0x60]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
+ "ldr q6, [x9, #0x60]\n"
".inst 0x6fa0e0e9 // udot v9.4s, v7.16b, v0.4b[1]\n"
".inst 0x6fa1e0ed // udot v13.4s, v7.16b, v1.4b[1]\n"
- "ldr q7, [x10, #0x70]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
+ "ldr q7, [x9, #0x70]\n"
".inst 0x6fa0e0ca // udot v10.4s, v6.16b, v0.4b[1]\n"
".inst 0x6fa1e0ce // udot v14.4s, v6.16b, v1.4b[1]\n"
- "ldr q6, [x10, #0x80]\n"
+ "ldr q6, [x9, #0x80]\n"
".inst 0x6fa0e0eb // udot v11.4s, v7.16b, v0.4b[1]\n"
".inst 0x6fa1e0ef // udot v15.4s, v7.16b, v1.4b[1]\n"
- "ldr q7, [x10, #0x90]\n"
+ "ldr q7, [x9, #0x90]\n"
".inst 0x6f80e8c8 // udot v8.4s, v6.16b, v0.4b[2]\n"
".inst 0x6f81e8cc // udot v12.4s, v6.16b, v1.4b[2]\n"
- "ldr q6, [x10, #0xa0]\n"
+ "ldr q6, [x9, #0xa0]\n"
".inst 0x6f80e8e9 // udot v9.4s, v7.16b, v0.4b[2]\n"
".inst 0x6f81e8ed // udot v13.4s, v7.16b, v1.4b[2]\n"
- "ldr q7, [x10, #0xb0]\n"
+ "ldr q7, [x9, #0xb0]\n"
".inst 0x6f80e8ca // udot v10.4s, v6.16b, v0.4b[2]\n"
".inst 0x6f81e8ce // udot v14.4s, v6.16b, v1.4b[2]\n"
- "ldr q6, [x10, #0xc0]\n"
+ "ldr q6, [x9, #0xc0]\n"
".inst 0x6f80e8eb // udot v11.4s, v7.16b, v0.4b[2]\n"
".inst 0x6f81e8ef // udot v15.4s, v7.16b, v1.4b[2]\n"
- "ldr q7, [x10, #0xd0]\n"
+ "ldr q7, [x9, #0xd0]\n"
".inst 0x6fa0e8c8 // udot v8.4s, v6.16b, v0.4b[3]\n"
".inst 0x6fa1e8cc // udot v12.4s, v6.16b, v1.4b[3]\n"
- "ldr q6, [x10, #0xe0]\n"
+ "ldr q6, [x9, #0xe0]\n"
".inst 0x6fa0e8e9 // udot v9.4s, v7.16b, v0.4b[3]\n"
".inst 0x6fa1e8ed // udot v13.4s, v7.16b, v1.4b[3]\n"
- "ldr q7, [x10, #0xf0]\n"
- "add x10, x10, #0x100\n"
+ "ldr q7, [x9, #0xf0]\n"
+ "add x9, x9, #0x100\n"
".inst 0x6fa0e8ca // udot v10.4s, v6.16b, v0.4b[3]\n"
".inst 0x6fa1e8ce // udot v14.4s, v6.16b, v1.4b[3]\n"
- "ldr q6, [x10, #0x0]\n"
+ "ldr q6, [x9, #0x0]\n"
".inst 0x6fa0e8eb // udot v11.4s, v7.16b, v0.4b[3]\n"
- "ldr q0, [x26, #0x0]\n"
+ "ldr q0, [x25, #0x0]\n"
".inst 0x6fa1e8ef // udot v15.4s, v7.16b, v1.4b[3]\n"
- "ldr q1, [x25, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
+ "ldr q1, [x24, #0x0]\n"
"bge 51b\n"
"52:" // Height 2: Multiply loop: Single iteration only
".inst 0x6f80e0c8 // udot v8.4s, v6.16b, v0.4b[0]\n"
+ "ldr q7, [x9, #0x10]\n"
+ "sub x26, x26, #0x10\n"
".inst 0x6f81e0cc // udot v12.4s, v6.16b, v1.4b[0]\n"
- "ldr q6, [x10, #0x20]\n"
- "add x26, x26, #0x10\n"
+ "ldr q6, [x9, #0x20]\n"
+ "add x25, x25, #0x10\n"
".inst 0x6f80e0e9 // udot v9.4s, v7.16b, v0.4b[0]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "add x24, x24, #0x10\n"
".inst 0x6f81e0ed // udot v13.4s, v7.16b, v1.4b[0]\n"
- "ldr q7, [x10, #0x30]\n"
- "add x25, x25, #0x10\n"
+ "ldr q7, [x9, #0x30]\n"
".inst 0x6f80e0ca // udot v10.4s, v6.16b, v0.4b[0]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x6f81e0ce // udot v14.4s, v6.16b, v1.4b[0]\n"
- "ldr q6, [x10, #0x40]\n"
- "sub x27, x27, #0x10\n"
+ "ldr q6, [x9, #0x40]\n"
".inst 0x6f80e0eb // udot v11.4s, v7.16b, v0.4b[0]\n"
".inst 0x6f81e0ef // udot v15.4s, v7.16b, v1.4b[0]\n"
- "ldr q7, [x10, #0x50]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
+ "ldr q7, [x9, #0x50]\n"
".inst 0x6fa0e0c8 // udot v8.4s, v6.16b, v0.4b[1]\n"
".inst 0x6fa1e0cc // udot v12.4s, v6.16b, v1.4b[1]\n"
- "ldr q6, [x10, #0x60]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
+ "ldr q6, [x9, #0x60]\n"
".inst 0x6fa0e0e9 // udot v9.4s, v7.16b, v0.4b[1]\n"
".inst 0x6fa1e0ed // udot v13.4s, v7.16b, v1.4b[1]\n"
- "ldr q7, [x10, #0x70]\n"
+ "ldr q7, [x9, #0x70]\n"
".inst 0x6fa0e0ca // udot v10.4s, v6.16b, v0.4b[1]\n"
".inst 0x6fa1e0ce // udot v14.4s, v6.16b, v1.4b[1]\n"
- "ldr q6, [x10, #0x80]\n"
+ "ldr q6, [x9, #0x80]\n"
".inst 0x6fa0e0eb // udot v11.4s, v7.16b, v0.4b[1]\n"
".inst 0x6fa1e0ef // udot v15.4s, v7.16b, v1.4b[1]\n"
- "ldr q7, [x10, #0x90]\n"
+ "ldr q7, [x9, #0x90]\n"
".inst 0x6f80e8c8 // udot v8.4s, v6.16b, v0.4b[2]\n"
".inst 0x6f81e8cc // udot v12.4s, v6.16b, v1.4b[2]\n"
- "ldr q6, [x10, #0xa0]\n"
+ "ldr q6, [x9, #0xa0]\n"
".inst 0x6f80e8e9 // udot v9.4s, v7.16b, v0.4b[2]\n"
".inst 0x6f81e8ed // udot v13.4s, v7.16b, v1.4b[2]\n"
- "ldr q7, [x10, #0xb0]\n"
+ "ldr q7, [x9, #0xb0]\n"
".inst 0x6f80e8ca // udot v10.4s, v6.16b, v0.4b[2]\n"
".inst 0x6f81e8ce // udot v14.4s, v6.16b, v1.4b[2]\n"
- "ldr q6, [x10, #0xc0]\n"
+ "ldr q6, [x9, #0xc0]\n"
".inst 0x6f80e8eb // udot v11.4s, v7.16b, v0.4b[2]\n"
".inst 0x6f81e8ef // udot v15.4s, v7.16b, v1.4b[2]\n"
- "ldr q7, [x10, #0xd0]\n"
+ "ldr q7, [x9, #0xd0]\n"
".inst 0x6fa0e8c8 // udot v8.4s, v6.16b, v0.4b[3]\n"
".inst 0x6fa1e8cc // udot v12.4s, v6.16b, v1.4b[3]\n"
- "ldr q6, [x10, #0xe0]\n"
+ "ldr q6, [x9, #0xe0]\n"
".inst 0x6fa0e8e9 // udot v9.4s, v7.16b, v0.4b[3]\n"
".inst 0x6fa1e8ed // udot v13.4s, v7.16b, v1.4b[3]\n"
- "ldr q7, [x10, #0xf0]\n"
- "add x10, x10, #0x100\n"
+ "ldr q7, [x9, #0xf0]\n"
+ "add x9, x9, #0x100\n"
".inst 0x6fa0e8ca // udot v10.4s, v6.16b, v0.4b[3]\n"
".inst 0x6fa1e8ce // udot v14.4s, v6.16b, v1.4b[3]\n"
".inst 0x6fa0e8eb // udot v11.4s, v7.16b, v0.4b[3]\n"
".inst 0x6fa1e8ef // udot v15.4s, v7.16b, v1.4b[3]\n"
"53:" // Height 2: Multiply loop: Main loop skip
- "cbz x27, 58f\n"
- "cmp x27, #0x4\n"
+ "cbz x26, 58f\n"
+ "cmp x26, #0x4\n"
"blt 55f\n"
"54:" // Height 2: Multiply loop: Odd block loop
- "ldr s0, [x26], #0x4\n"
- "ldr s1, [x25], #0x4\n"
- "sub x27, x27, #0x4\n"
- "cmp x27, #0x4\n"
- "ldr q6, [x10, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
+ "ldr s0, [x25], #0x4\n"
+ "sub x26, x26, #0x4\n"
+ "ldr s1, [x24], #0x4\n"
+ "cmp x26, #0x4\n"
+ "ldr q6, [x9, #0x0]\n"
".inst 0x6f80e0c8 // udot v8.4s, v6.16b, v0.4b[0]\n"
+ "ldr q7, [x9, #0x10]\n"
".inst 0x6f81e0cc // udot v12.4s, v6.16b, v1.4b[0]\n"
- "ldr q6, [x10, #0x20]\n"
+ "ldr q6, [x9, #0x20]\n"
".inst 0x6f80e0e9 // udot v9.4s, v7.16b, v0.4b[0]\n"
".inst 0x6f81e0ed // udot v13.4s, v7.16b, v1.4b[0]\n"
- "ldr q7, [x10, #0x30]\n"
+ "ldr q7, [x9, #0x30]\n"
+ "add x9, x9, #0x40\n"
".inst 0x6f80e0ca // udot v10.4s, v6.16b, v0.4b[0]\n"
".inst 0x6f81e0ce // udot v14.4s, v6.16b, v1.4b[0]\n"
- "add x10, x10, #0x40\n"
".inst 0x6f80e0eb // udot v11.4s, v7.16b, v0.4b[0]\n"
".inst 0x6f81e0ef // udot v15.4s, v7.16b, v1.4b[0]\n"
"bge 54b\n"
+ "cbz x26, 58f\n"
"55:" // Height 2: Multiply loop: Skip odd blocks
- "cbz x27, 58f\n"
- "tbz x27, #1, 56f\n"
- "ldr h0, [x26], #0x2\n"
- "ldr h1, [x25], #0x2\n"
- "tbz x27, #0, 57f\n"
- "ld1 { v0.b }[2], [x26]\n"
- "ld1 { v1.b }[2], [x25]\n"
+ "tbz x26, #1, 56f\n"
+ "ldr h0, [x25], #0x2\n"
+ "ldr h1, [x24], #0x2\n"
+ "tbz x26, #0, 57f\n"
+ "ld1 { v0.b }[2], [x25]\n"
+ "ld1 { v1.b }[2], [x24]\n"
"b 57f\n"
"56:" // Height 2: Multiply loop: Ragged operand read: partial_1_0
- "ldr b0, [x26, #0x0]\n"
- "ldr b1, [x25, #0x0]\n"
+ "ldr b0, [x25, #0x0]\n"
+ "ldr b1, [x24, #0x0]\n"
"57:" // Height 2: Multiply loop: Ragged operand read: Done
- "ldr q6, [x10, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
+ "ldr q6, [x9, #0x0]\n"
".inst 0x6f80e0c8 // udot v8.4s, v6.16b, v0.4b[0]\n"
+ "ldr q7, [x9, #0x10]\n"
".inst 0x6f81e0cc // udot v12.4s, v6.16b, v1.4b[0]\n"
- "ldr q6, [x10, #0x20]\n"
+ "ldr q6, [x9, #0x20]\n"
".inst 0x6f80e0e9 // udot v9.4s, v7.16b, v0.4b[0]\n"
".inst 0x6f81e0ed // udot v13.4s, v7.16b, v1.4b[0]\n"
- "ldr q7, [x10, #0x30]\n"
+ "ldr q7, [x9, #0x30]\n"
+ "add x9, x9, #0x40\n"
".inst 0x6f80e0ca // udot v10.4s, v6.16b, v0.4b[0]\n"
".inst 0x6f81e0ce // udot v14.4s, v6.16b, v1.4b[0]\n"
- "add x10, x10, #0x40\n"
".inst 0x6f80e0eb // udot v11.4s, v7.16b, v0.4b[0]\n"
".inst 0x6f81e0ef // udot v15.4s, v7.16b, v1.4b[0]\n"
"58:" // Height 2: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 48b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "cmp x11, #0x10\n"
- "prfm pstl1keep, [x9, #0x0]\n"
- "prfm pstl1keep, [x24, #0x0]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "prfm pstl1keep, [x28, #0x0]\n"
+ "cmp x10, #0x10\n"
+ "add x23, x28, x19, LSL #2\n"
+ "prfm pstl1keep, [x23, #0x0]\n"
"bge 67f\n"
- "tbz x11, #3, 62f\n"
- "st1 { v8.4s }, [x9], #0x10\n"
- "st1 { v9.4s }, [x9], #0x10\n"
- "st1 { v12.4s }, [x24], #0x10\n"
- "st1 { v13.4s }, [x24], #0x10\n"
- "tbz x11, #2, 60f\n"
- "st1 { v10.4s }, [x9], #0x10\n"
- "st1 { v14.4s }, [x24], #0x10\n"
- "tbz x11, #1, 59f\n"
- "str d11, [x9], #0x8\n"
- "str d15, [x24], #0x8\n"
- "tbz x11, #0, 66f\n"
- "st1 { v11.s }[2], [x9]\n"
- "st1 { v15.s }[2], [x24]\n"
+ "tbz x10, #3, 62f\n"
+ "st1 { v8.4s }, [x28], #0x10\n"
+ "st1 { v9.4s }, [x28], #0x10\n"
+ "st1 { v12.4s }, [x23], #0x10\n"
+ "st1 { v13.4s }, [x23], #0x10\n"
+ "tbz x10, #2, 60f\n"
+ "st1 { v10.4s }, [x28], #0x10\n"
+ "st1 { v14.4s }, [x23], #0x10\n"
+ "tbz x10, #1, 59f\n"
+ "str d11, [x28], #0x8\n"
+ "str d15, [x23], #0x8\n"
+ "tbz x10, #0, 66f\n"
+ "st1 { v11.s }[2], [x28]\n"
+ "st1 { v15.s }[2], [x23]\n"
"b 66f\n"
"59:" // Height 2: Partial direct writeback: partial_1_12
- "tbz x11, #0, 66f\n"
- "str s11, [x9, #0x0]\n"
- "str s15, [x24, #0x0]\n"
+ "tbz x10, #0, 66f\n"
+ "str s11, [x28, #0x0]\n"
+ "str s15, [x23, #0x0]\n"
"b 66f\n"
"60:" // Height 2: Partial direct writeback: partial_2_8
- "tbz x11, #1, 61f\n"
- "str d10, [x9], #0x8\n"
- "str d14, [x24], #0x8\n"
- "tbz x11, #0, 66f\n"
- "st1 { v10.s }[2], [x9]\n"
- "st1 { v14.s }[2], [x24]\n"
+ "tbz x10, #1, 61f\n"
+ "str d10, [x28], #0x8\n"
+ "str d14, [x23], #0x8\n"
+ "tbz x10, #0, 66f\n"
+ "st1 { v10.s }[2], [x28]\n"
+ "st1 { v14.s }[2], [x23]\n"
"b 66f\n"
"61:" // Height 2: Partial direct writeback: partial_1_8
- "tbz x11, #0, 66f\n"
- "str s10, [x9, #0x0]\n"
- "str s14, [x24, #0x0]\n"
+ "tbz x10, #0, 66f\n"
+ "str s10, [x28, #0x0]\n"
+ "str s14, [x23, #0x0]\n"
"b 66f\n"
"62:" // Height 2: Partial direct writeback: partial_4_0
- "tbz x11, #2, 64f\n"
- "st1 { v8.4s }, [x9], #0x10\n"
- "st1 { v12.4s }, [x24], #0x10\n"
- "tbz x11, #1, 63f\n"
- "str d9, [x9], #0x8\n"
- "str d13, [x24], #0x8\n"
- "tbz x11, #0, 66f\n"
- "st1 { v9.s }[2], [x9]\n"
- "st1 { v13.s }[2], [x24]\n"
+ "tbz x10, #2, 64f\n"
+ "st1 { v8.4s }, [x28], #0x10\n"
+ "st1 { v12.4s }, [x23], #0x10\n"
+ "tbz x10, #1, 63f\n"
+ "str d9, [x28], #0x8\n"
+ "str d13, [x23], #0x8\n"
+ "tbz x10, #0, 66f\n"
+ "st1 { v9.s }[2], [x28]\n"
+ "st1 { v13.s }[2], [x23]\n"
"b 66f\n"
"63:" // Height 2: Partial direct writeback: partial_1_4
- "tbz x11, #0, 66f\n"
- "str s9, [x9, #0x0]\n"
- "str s13, [x24, #0x0]\n"
+ "tbz x10, #0, 66f\n"
+ "str s9, [x28, #0x0]\n"
+ "str s13, [x23, #0x0]\n"
"b 66f\n"
"64:" // Height 2: Partial direct writeback: partial_2_0
- "tbz x11, #1, 65f\n"
- "str d8, [x9], #0x8\n"
- "str d12, [x24], #0x8\n"
- "tbz x11, #0, 66f\n"
- "st1 { v8.s }[2], [x9]\n"
- "st1 { v12.s }[2], [x24]\n"
+ "tbz x10, #1, 65f\n"
+ "str d8, [x28], #0x8\n"
+ "str d12, [x23], #0x8\n"
+ "tbz x10, #0, 66f\n"
+ "st1 { v8.s }[2], [x28]\n"
+ "st1 { v12.s }[2], [x23]\n"
"b 66f\n"
"65:" // Height 2: Partial direct writeback: partial_1_0
- "str s8, [x9, #0x0]\n"
- "str s12, [x24, #0x0]\n"
+ "str s8, [x28, #0x0]\n"
+ "str s12, [x23, #0x0]\n"
"66:" // Height 2: Partial direct writeback: Done
"b 68f\n"
"67:" // Height 2: Full writeback
- "str q8, [x9, #0x0]\n"
- "str q9, [x9, #0x10]\n"
- "str q10, [x9, #0x20]\n"
- "str q11, [x9, #0x30]\n"
- "add x9, x9, #0x40\n"
- "str q12, [x24, #0x0]\n"
- "str q13, [x24, #0x10]\n"
- "str q14, [x24, #0x20]\n"
- "str q15, [x24, #0x30]\n"
+ "str q8, [x28, #0x0]\n"
+ "str q9, [x28, #0x10]\n"
+ "str q10, [x28, #0x20]\n"
+ "str q11, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
+ "str q12, [x23, #0x0]\n"
+ "str q13, [x23, #0x10]\n"
+ "str q14, [x23, #0x20]\n"
+ "str q15, [x23, #0x30]\n"
"68:" // Height 2: Writeback done
- "subs x11, x11, #0x10\n"
+ "subs x10, x10, #0x10\n"
"bgt 36b\n"
"b 206f\n"
"69:" // Height 3
- "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x28, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"70:" // Height 3: Column loop
"tbz %x[flags], #0, 80f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "cmp x11, #0x10\n"
- "add x23, x24, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "cmp x10, #0x10\n"
+ "add x23, x28, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
"bge 79f\n"
- "tbz x11, #3, 74f\n"
- "ld1 { v8.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x24], #0x10\n"
- "ld1 { v16.4s }, [x23], #0x10\n"
- "ld1 { v9.4s }, [x9], #0x10\n"
- "ld1 { v13.4s }, [x24], #0x10\n"
- "ld1 { v17.4s }, [x23], #0x10\n"
- "tbz x11, #2, 72f\n"
- "ld1 { v10.4s }, [x9], #0x10\n"
- "ld1 { v14.4s }, [x24], #0x10\n"
- "ld1 { v18.4s }, [x23], #0x10\n"
- "tbz x11, #1, 71f\n"
- "ldr d11, [x9], #0x8\n"
- "ldr d15, [x24], #0x8\n"
- "mov x25, #0x38\n"
- "ldr d19, [x23], #0x8\n"
- "tbz x11, #0, 78f\n"
- "ld1 { v11.s }[2], [x9]\n"
- "ld1 { v15.s }[2], [x24]\n"
- "ld1 { v19.s }[2], [x23]\n"
+ "tbz x10, #3, 74f\n"
+ "ld1 { v8.4s }, [x28], #0x10\n"
+ "ld1 { v12.4s }, [x23], #0x10\n"
+ "ld1 { v16.4s }, [x22], #0x10\n"
+ "ld1 { v9.4s }, [x28], #0x10\n"
+ "ld1 { v13.4s }, [x23], #0x10\n"
+ "ld1 { v17.4s }, [x22], #0x10\n"
+ "tbz x10, #2, 72f\n"
+ "ld1 { v10.4s }, [x28], #0x10\n"
+ "ld1 { v14.4s }, [x23], #0x10\n"
+ "ld1 { v18.4s }, [x22], #0x10\n"
+ "tbz x10, #1, 71f\n"
+ "mov x24, #0x38\n"
+ "ldr d11, [x28], #0x8\n"
+ "ldr d15, [x23], #0x8\n"
+ "ldr d19, [x22], #0x8\n"
+ "tbz x10, #0, 78f\n"
+ "ld1 { v11.s }[2], [x28]\n"
+ "ld1 { v15.s }[2], [x23]\n"
+ "ld1 { v19.s }[2], [x22]\n"
"b 78f\n"
"71:" // Height 3: Partial accumulate: partial_1_12
- "mov x25, #0x30\n"
- "tbz x11, #0, 78f\n"
- "ldr s11, [x9, #0x0]\n"
- "ldr s15, [x24, #0x0]\n"
- "ldr s19, [x23, #0x0]\n"
+ "mov x24, #0x30\n"
+ "tbz x10, #0, 78f\n"
+ "ldr s11, [x28, #0x0]\n"
+ "ldr s15, [x23, #0x0]\n"
+ "ldr s19, [x22, #0x0]\n"
"b 78f\n"
"72:" // Height 3: Partial accumulate: partial_2_8
- "tbz x11, #1, 73f\n"
- "ldr d10, [x9], #0x8\n"
- "ldr d14, [x24], #0x8\n"
- "mov x25, #0x28\n"
- "ldr d18, [x23], #0x8\n"
- "tbz x11, #0, 78f\n"
- "ld1 { v10.s }[2], [x9]\n"
- "ld1 { v14.s }[2], [x24]\n"
- "ld1 { v18.s }[2], [x23]\n"
+ "tbz x10, #1, 73f\n"
+ "ldr d10, [x28], #0x8\n"
+ "ldr d14, [x23], #0x8\n"
+ "mov x24, #0x28\n"
+ "ldr d18, [x22], #0x8\n"
+ "tbz x10, #0, 78f\n"
+ "ld1 { v10.s }[2], [x28]\n"
+ "ld1 { v14.s }[2], [x23]\n"
+ "ld1 { v18.s }[2], [x22]\n"
"b 78f\n"
"73:" // Height 3: Partial accumulate: partial_1_8
- "mov x25, #0x20\n"
- "tbz x11, #0, 78f\n"
- "ldr s10, [x9, #0x0]\n"
- "ldr s14, [x24, #0x0]\n"
- "ldr s18, [x23, #0x0]\n"
+ "mov x24, #0x20\n"
+ "tbz x10, #0, 78f\n"
+ "ldr s10, [x28, #0x0]\n"
+ "ldr s14, [x23, #0x0]\n"
+ "ldr s18, [x22, #0x0]\n"
"b 78f\n"
"74:" // Height 3: Partial accumulate: partial_4_0
- "tbz x11, #2, 76f\n"
- "ld1 { v8.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x24], #0x10\n"
- "ld1 { v16.4s }, [x23], #0x10\n"
- "tbz x11, #1, 75f\n"
- "ldr d9, [x9], #0x8\n"
- "ldr d13, [x24], #0x8\n"
- "mov x25, #0x18\n"
- "ldr d17, [x23], #0x8\n"
- "tbz x11, #0, 78f\n"
- "ld1 { v9.s }[2], [x9]\n"
- "ld1 { v13.s }[2], [x24]\n"
- "ld1 { v17.s }[2], [x23]\n"
+ "tbz x10, #2, 76f\n"
+ "ld1 { v8.4s }, [x28], #0x10\n"
+ "ld1 { v12.4s }, [x23], #0x10\n"
+ "ld1 { v16.4s }, [x22], #0x10\n"
+ "tbz x10, #1, 75f\n"
+ "mov x24, #0x18\n"
+ "ldr d9, [x28], #0x8\n"
+ "ldr d13, [x23], #0x8\n"
+ "ldr d17, [x22], #0x8\n"
+ "tbz x10, #0, 78f\n"
+ "ld1 { v9.s }[2], [x28]\n"
+ "ld1 { v13.s }[2], [x23]\n"
+ "ld1 { v17.s }[2], [x22]\n"
"b 78f\n"
"75:" // Height 3: Partial accumulate: partial_1_4
- "mov x25, #0x10\n"
- "tbz x11, #0, 78f\n"
- "ldr s9, [x9, #0x0]\n"
- "ldr s13, [x24, #0x0]\n"
- "ldr s17, [x23, #0x0]\n"
+ "mov x24, #0x10\n"
+ "tbz x10, #0, 78f\n"
+ "ldr s9, [x28, #0x0]\n"
+ "ldr s13, [x23, #0x0]\n"
+ "ldr s17, [x22, #0x0]\n"
"b 78f\n"
"76:" // Height 3: Partial accumulate: partial_2_0
- "tbz x11, #1, 77f\n"
- "ldr d8, [x9], #0x8\n"
- "ldr d12, [x24], #0x8\n"
- "mov x25, #0x8\n"
- "ldr d16, [x23], #0x8\n"
- "tbz x11, #0, 78f\n"
- "ld1 { v8.s }[2], [x9]\n"
- "ld1 { v12.s }[2], [x24]\n"
- "ld1 { v16.s }[2], [x23]\n"
+ "tbz x10, #1, 77f\n"
+ "ldr d8, [x28], #0x8\n"
+ "ldr d12, [x23], #0x8\n"
+ "mov x24, #0x8\n"
+ "ldr d16, [x22], #0x8\n"
+ "tbz x10, #0, 78f\n"
+ "ld1 { v8.s }[2], [x28]\n"
+ "ld1 { v12.s }[2], [x23]\n"
+ "ld1 { v16.s }[2], [x22]\n"
"b 78f\n"
"77:" // Height 3: Partial accumulate: partial_1_0
- "ldr s8, [x9, #0x0]\n"
- "ldr s12, [x24, #0x0]\n"
- "mov x25, #0x0\n"
- "ldr s16, [x23, #0x0]\n"
+ "ldr s8, [x28, #0x0]\n"
+ "mov x24, #0x0\n"
+ "ldr s12, [x23, #0x0]\n"
+ "ldr s16, [x22, #0x0]\n"
"78:" // Height 3: Partial accumulate: Done
- "sub x9, x9, x25\n"
+ "sub x28, x28, x24\n"
"b 81f\n"
"79:" // Height 3: full accumulate
- "ldr q8, [x9, #0x0]\n"
- "ldr q9, [x9, #0x10]\n"
- "ldr q10, [x9, #0x20]\n"
- "ldr q11, [x9, #0x30]\n"
- "ldr q12, [x24, #0x0]\n"
- "ldr q13, [x24, #0x10]\n"
- "ldr q14, [x24, #0x20]\n"
- "ldr q15, [x24, #0x30]\n"
- "ldr q16, [x23, #0x0]\n"
- "ldr q17, [x23, #0x10]\n"
- "ldr q18, [x23, #0x20]\n"
- "ldr q19, [x23, #0x30]\n"
+ "ldr q8, [x28, #0x0]\n"
+ "ldr q9, [x28, #0x10]\n"
+ "ldr q10, [x28, #0x20]\n"
+ "ldr q11, [x28, #0x30]\n"
+ "ldr q12, [x23, #0x0]\n"
+ "ldr q13, [x23, #0x10]\n"
+ "ldr q14, [x23, #0x20]\n"
+ "ldr q15, [x23, #0x30]\n"
+ "ldr q16, [x22, #0x0]\n"
+ "ldr q17, [x22, #0x10]\n"
+ "ldr q18, [x22, #0x20]\n"
+ "ldr q19, [x22, #0x30]\n"
"b 81f\n"
"80:" // Height 3: no accumulate
"movi v8.4s, #0x0\n"
@@ -862,180 +862,180 @@ void a64_hybrid_u8u32_dot_6x16 (
"movi v18.4s, #0x0\n"
"movi v19.4s, #0x0\n"
"81:" // Height 3: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"82:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 83f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "cbnz x28, 84f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20\n"
- "add x25, x25, x20\n"
- "add x24, x24, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "cbnz x27, 84f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
+ "add x24, x24, x19\n"
+ "add x23, x23, x19\n"
"b 84f\n"
"83:" // Height 3: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20\n"
- "add x24, x25, x20\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19\n"
+ "add x23, x24, x19\n"
"84:" // Height 3: input setup done
- "cmp x27, #0x10\n"
+ "cmp x26, #0x10\n"
"blt 87f\n"
- "ldr q0, [x26, #0x0]\n"
- "ldr q1, [x25, #0x0]\n"
- "cmp x27, #0x20\n"
- "ldr q2, [x24, #0x0]\n"
- "ldr q6, [x10, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
+ "ldr q0, [x25, #0x0]\n"
+ "ldr q1, [x24, #0x0]\n"
+ "cmp x26, #0x20\n"
+ "ldr q2, [x23, #0x0]\n"
+ "ldr q6, [x9, #0x0]\n"
"blt 86f\n"
"85:" // Height 3: Multiply loop: Main loop head
".inst 0x6f80e0c8 // udot v8.4s, v6.16b, v0.4b[0]\n"
+ "ldr q7, [x9, #0x10]\n"
+ "add x25, x25, #0x10\n"
".inst 0x6f81e0cc // udot v12.4s, v6.16b, v1.4b[0]\n"
- "sub x27, x27, #0x10\n"
- "add x26, x26, #0x10\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "add x24, x24, #0x10\n"
".inst 0x6f82e0d0 // udot v16.4s, v6.16b, v2.4b[0]\n"
- "ldr q6, [x10, #0x20]\n"
+ "ldr q6, [x9, #0x20]\n"
+ "add x23, x23, #0x10\n"
".inst 0x6f80e0e9 // udot v9.4s, v7.16b, v0.4b[0]\n"
- "add x25, x25, #0x10\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "sub x26, x26, #0x10\n"
".inst 0x6f81e0ed // udot v13.4s, v7.16b, v1.4b[0]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
+ "cmp x26, #0x20\n"
".inst 0x6f82e0f1 // udot v17.4s, v7.16b, v2.4b[0]\n"
- "ldr q7, [x10, #0x30]\n"
- "add x24, x24, #0x10\n"
+ "ldr q7, [x9, #0x30]\n"
".inst 0x6f80e0ca // udot v10.4s, v6.16b, v0.4b[0]\n"
".inst 0x6f81e0ce // udot v14.4s, v6.16b, v1.4b[0]\n"
- "cmp x27, #0x20\n"
- "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x6f82e0d2 // udot v18.4s, v6.16b, v2.4b[0]\n"
- "ldr q6, [x10, #0x40]\n"
+ "ldr q6, [x9, #0x40]\n"
".inst 0x6f80e0eb // udot v11.4s, v7.16b, v0.4b[0]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x6f81e0ef // udot v15.4s, v7.16b, v1.4b[0]\n"
".inst 0x6f82e0f3 // udot v19.4s, v7.16b, v2.4b[0]\n"
- "ldr q7, [x10, #0x50]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
+ "ldr q7, [x9, #0x50]\n"
".inst 0x6fa0e0c8 // udot v8.4s, v6.16b, v0.4b[1]\n"
".inst 0x6fa1e0cc // udot v12.4s, v6.16b, v1.4b[1]\n"
".inst 0x6fa2e0d0 // udot v16.4s, v6.16b, v2.4b[1]\n"
- "ldr q6, [x10, #0x60]\n"
+ "ldr q6, [x9, #0x60]\n"
".inst 0x6fa0e0e9 // udot v9.4s, v7.16b, v0.4b[1]\n"
".inst 0x6fa1e0ed // udot v13.4s, v7.16b, v1.4b[1]\n"
".inst 0x6fa2e0f1 // udot v17.4s, v7.16b, v2.4b[1]\n"
- "ldr q7, [x10, #0x70]\n"
+ "ldr q7, [x9, #0x70]\n"
".inst 0x6fa0e0ca // udot v10.4s, v6.16b, v0.4b[1]\n"
".inst 0x6fa1e0ce // udot v14.4s, v6.16b, v1.4b[1]\n"
".inst 0x6fa2e0d2 // udot v18.4s, v6.16b, v2.4b[1]\n"
- "ldr q6, [x10, #0x80]\n"
+ "ldr q6, [x9, #0x80]\n"
".inst 0x6fa0e0eb // udot v11.4s, v7.16b, v0.4b[1]\n"
".inst 0x6fa1e0ef // udot v15.4s, v7.16b, v1.4b[1]\n"
".inst 0x6fa2e0f3 // udot v19.4s, v7.16b, v2.4b[1]\n"
- "ldr q7, [x10, #0x90]\n"
+ "ldr q7, [x9, #0x90]\n"
".inst 0x6f80e8c8 // udot v8.4s, v6.16b, v0.4b[2]\n"
".inst 0x6f81e8cc // udot v12.4s, v6.16b, v1.4b[2]\n"
".inst 0x6f82e8d0 // udot v16.4s, v6.16b, v2.4b[2]\n"
- "ldr q6, [x10, #0xa0]\n"
+ "ldr q6, [x9, #0xa0]\n"
".inst 0x6f80e8e9 // udot v9.4s, v7.16b, v0.4b[2]\n"
".inst 0x6f81e8ed // udot v13.4s, v7.16b, v1.4b[2]\n"
".inst 0x6f82e8f1 // udot v17.4s, v7.16b, v2.4b[2]\n"
- "ldr q7, [x10, #0xb0]\n"
+ "ldr q7, [x9, #0xb0]\n"
".inst 0x6f80e8ca // udot v10.4s, v6.16b, v0.4b[2]\n"
".inst 0x6f81e8ce // udot v14.4s, v6.16b, v1.4b[2]\n"
".inst 0x6f82e8d2 // udot v18.4s, v6.16b, v2.4b[2]\n"
- "ldr q6, [x10, #0xc0]\n"
+ "ldr q6, [x9, #0xc0]\n"
".inst 0x6f80e8eb // udot v11.4s, v7.16b, v0.4b[2]\n"
".inst 0x6f81e8ef // udot v15.4s, v7.16b, v1.4b[2]\n"
".inst 0x6f82e8f3 // udot v19.4s, v7.16b, v2.4b[2]\n"
- "ldr q7, [x10, #0xd0]\n"
+ "ldr q7, [x9, #0xd0]\n"
".inst 0x6fa0e8c8 // udot v8.4s, v6.16b, v0.4b[3]\n"
".inst 0x6fa1e8cc // udot v12.4s, v6.16b, v1.4b[3]\n"
".inst 0x6fa2e8d0 // udot v16.4s, v6.16b, v2.4b[3]\n"
- "ldr q6, [x10, #0xe0]\n"
+ "ldr q6, [x9, #0xe0]\n"
".inst 0x6fa0e8e9 // udot v9.4s, v7.16b, v0.4b[3]\n"
".inst 0x6fa1e8ed // udot v13.4s, v7.16b, v1.4b[3]\n"
".inst 0x6fa2e8f1 // udot v17.4s, v7.16b, v2.4b[3]\n"
- "ldr q7, [x10, #0xf0]\n"
- "add x10, x10, #0x100\n"
+ "ldr q7, [x9, #0xf0]\n"
+ "add x9, x9, #0x100\n"
".inst 0x6fa0e8ca // udot v10.4s, v6.16b, v0.4b[3]\n"
".inst 0x6fa1e8ce // udot v14.4s, v6.16b, v1.4b[3]\n"
".inst 0x6fa2e8d2 // udot v18.4s, v6.16b, v2.4b[3]\n"
- "ldr q6, [x10, #0x0]\n"
+ "ldr q6, [x9, #0x0]\n"
".inst 0x6fa0e8eb // udot v11.4s, v7.16b, v0.4b[3]\n"
- "ldr q0, [x26, #0x0]\n"
+ "ldr q0, [x25, #0x0]\n"
".inst 0x6fa1e8ef // udot v15.4s, v7.16b, v1.4b[3]\n"
- "ldr q1, [x25, #0x0]\n"
+ "ldr q1, [x24, #0x0]\n"
".inst 0x6fa2e8f3 // udot v19.4s, v7.16b, v2.4b[3]\n"
- "ldr q2, [x24, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
+ "ldr q2, [x23, #0x0]\n"
"bge 85b\n"
"86:" // Height 3: Multiply loop: Single iteration only
".inst 0x6f80e0c8 // udot v8.4s, v6.16b, v0.4b[0]\n"
+ "ldr q7, [x9, #0x10]\n"
+ "sub x26, x26, #0x10\n"
".inst 0x6f81e0cc // udot v12.4s, v6.16b, v1.4b[0]\n"
- "add x26, x26, #0x10\n"
"add x25, x25, #0x10\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x6f82e0d0 // udot v16.4s, v6.16b, v2.4b[0]\n"
- "ldr q6, [x10, #0x20]\n"
- ".inst 0x6f80e0e9 // udot v9.4s, v7.16b, v0.4b[0]\n"
"add x24, x24, #0x10\n"
+ ".inst 0x6f80e0e9 // udot v9.4s, v7.16b, v0.4b[0]\n"
+ "ldr q6, [x9, #0x20]\n"
+ "add x23, x23, #0x10\n"
".inst 0x6f81e0ed // udot v13.4s, v7.16b, v1.4b[0]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x6f82e0f1 // udot v17.4s, v7.16b, v2.4b[0]\n"
- "ldr q7, [x10, #0x30]\n"
- "sub x27, x27, #0x10\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
+ "ldr q7, [x9, #0x30]\n"
".inst 0x6f80e0ca // udot v10.4s, v6.16b, v0.4b[0]\n"
".inst 0x6f81e0ce // udot v14.4s, v6.16b, v1.4b[0]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x6f82e0d2 // udot v18.4s, v6.16b, v2.4b[0]\n"
- "ldr q6, [x10, #0x40]\n"
+ "ldr q6, [x9, #0x40]\n"
".inst 0x6f80e0eb // udot v11.4s, v7.16b, v0.4b[0]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x6f81e0ef // udot v15.4s, v7.16b, v1.4b[0]\n"
".inst 0x6f82e0f3 // udot v19.4s, v7.16b, v2.4b[0]\n"
- "ldr q7, [x10, #0x50]\n"
+ "ldr q7, [x9, #0x50]\n"
".inst 0x6fa0e0c8 // udot v8.4s, v6.16b, v0.4b[1]\n"
".inst 0x6fa1e0cc // udot v12.4s, v6.16b, v1.4b[1]\n"
".inst 0x6fa2e0d0 // udot v16.4s, v6.16b, v2.4b[1]\n"
- "ldr q6, [x10, #0x60]\n"
+ "ldr q6, [x9, #0x60]\n"
".inst 0x6fa0e0e9 // udot v9.4s, v7.16b, v0.4b[1]\n"
".inst 0x6fa1e0ed // udot v13.4s, v7.16b, v1.4b[1]\n"
".inst 0x6fa2e0f1 // udot v17.4s, v7.16b, v2.4b[1]\n"
- "ldr q7, [x10, #0x70]\n"
+ "ldr q7, [x9, #0x70]\n"
".inst 0x6fa0e0ca // udot v10.4s, v6.16b, v0.4b[1]\n"
".inst 0x6fa1e0ce // udot v14.4s, v6.16b, v1.4b[1]\n"
".inst 0x6fa2e0d2 // udot v18.4s, v6.16b, v2.4b[1]\n"
- "ldr q6, [x10, #0x80]\n"
+ "ldr q6, [x9, #0x80]\n"
".inst 0x6fa0e0eb // udot v11.4s, v7.16b, v0.4b[1]\n"
".inst 0x6fa1e0ef // udot v15.4s, v7.16b, v1.4b[1]\n"
".inst 0x6fa2e0f3 // udot v19.4s, v7.16b, v2.4b[1]\n"
- "ldr q7, [x10, #0x90]\n"
+ "ldr q7, [x9, #0x90]\n"
".inst 0x6f80e8c8 // udot v8.4s, v6.16b, v0.4b[2]\n"
".inst 0x6f81e8cc // udot v12.4s, v6.16b, v1.4b[2]\n"
".inst 0x6f82e8d0 // udot v16.4s, v6.16b, v2.4b[2]\n"
- "ldr q6, [x10, #0xa0]\n"
+ "ldr q6, [x9, #0xa0]\n"
".inst 0x6f80e8e9 // udot v9.4s, v7.16b, v0.4b[2]\n"
".inst 0x6f81e8ed // udot v13.4s, v7.16b, v1.4b[2]\n"
".inst 0x6f82e8f1 // udot v17.4s, v7.16b, v2.4b[2]\n"
- "ldr q7, [x10, #0xb0]\n"
+ "ldr q7, [x9, #0xb0]\n"
".inst 0x6f80e8ca // udot v10.4s, v6.16b, v0.4b[2]\n"
".inst 0x6f81e8ce // udot v14.4s, v6.16b, v1.4b[2]\n"
".inst 0x6f82e8d2 // udot v18.4s, v6.16b, v2.4b[2]\n"
- "ldr q6, [x10, #0xc0]\n"
+ "ldr q6, [x9, #0xc0]\n"
".inst 0x6f80e8eb // udot v11.4s, v7.16b, v0.4b[2]\n"
".inst 0x6f81e8ef // udot v15.4s, v7.16b, v1.4b[2]\n"
".inst 0x6f82e8f3 // udot v19.4s, v7.16b, v2.4b[2]\n"
- "ldr q7, [x10, #0xd0]\n"
+ "ldr q7, [x9, #0xd0]\n"
".inst 0x6fa0e8c8 // udot v8.4s, v6.16b, v0.4b[3]\n"
".inst 0x6fa1e8cc // udot v12.4s, v6.16b, v1.4b[3]\n"
".inst 0x6fa2e8d0 // udot v16.4s, v6.16b, v2.4b[3]\n"
- "ldr q6, [x10, #0xe0]\n"
+ "ldr q6, [x9, #0xe0]\n"
".inst 0x6fa0e8e9 // udot v9.4s, v7.16b, v0.4b[3]\n"
".inst 0x6fa1e8ed // udot v13.4s, v7.16b, v1.4b[3]\n"
".inst 0x6fa2e8f1 // udot v17.4s, v7.16b, v2.4b[3]\n"
- "ldr q7, [x10, #0xf0]\n"
- "add x10, x10, #0x100\n"
+ "ldr q7, [x9, #0xf0]\n"
+ "add x9, x9, #0x100\n"
".inst 0x6fa0e8ca // udot v10.4s, v6.16b, v0.4b[3]\n"
".inst 0x6fa1e8ce // udot v14.4s, v6.16b, v1.4b[3]\n"
".inst 0x6fa2e8d2 // udot v18.4s, v6.16b, v2.4b[3]\n"
@@ -1043,26 +1043,26 @@ void a64_hybrid_u8u32_dot_6x16 (
".inst 0x6fa1e8ef // udot v15.4s, v7.16b, v1.4b[3]\n"
".inst 0x6fa2e8f3 // udot v19.4s, v7.16b, v2.4b[3]\n"
"87:" // Height 3: Multiply loop: Main loop skip
- "cbz x27, 92f\n"
- "cmp x27, #0x4\n"
+ "cbz x26, 92f\n"
+ "cmp x26, #0x4\n"
"blt 89f\n"
"88:" // Height 3: Multiply loop: Odd block loop
- "ldr s0, [x26], #0x4\n"
- "ldr s1, [x25], #0x4\n"
- "sub x27, x27, #0x4\n"
- "cmp x27, #0x4\n"
- "ldr s2, [x24], #0x4\n"
- "ldr q6, [x10, #0x0]\n"
+ "ldr s0, [x25], #0x4\n"
+ "sub x26, x26, #0x4\n"
+ "ldr s1, [x24], #0x4\n"
+ "cmp x26, #0x4\n"
+ "ldr s2, [x23], #0x4\n"
+ "ldr q6, [x9, #0x0]\n"
".inst 0x6f80e0c8 // udot v8.4s, v6.16b, v0.4b[0]\n"
+ "ldr q7, [x9, #0x10]\n"
".inst 0x6f81e0cc // udot v12.4s, v6.16b, v1.4b[0]\n"
- "ldr q7, [x10, #0x10]\n"
".inst 0x6f82e0d0 // udot v16.4s, v6.16b, v2.4b[0]\n"
- "ldr q6, [x10, #0x20]\n"
+ "ldr q6, [x9, #0x20]\n"
".inst 0x6f80e0e9 // udot v9.4s, v7.16b, v0.4b[0]\n"
".inst 0x6f81e0ed // udot v13.4s, v7.16b, v1.4b[0]\n"
".inst 0x6f82e0f1 // udot v17.4s, v7.16b, v2.4b[0]\n"
- "ldr q7, [x10, #0x30]\n"
- "add x10, x10, #0x40\n"
+ "ldr q7, [x9, #0x30]\n"
+ "add x9, x9, #0x40\n"
".inst 0x6f80e0ca // udot v10.4s, v6.16b, v0.4b[0]\n"
".inst 0x6f81e0ce // udot v14.4s, v6.16b, v1.4b[0]\n"
".inst 0x6f82e0d2 // udot v18.4s, v6.16b, v2.4b[0]\n"
@@ -1070,33 +1070,33 @@ void a64_hybrid_u8u32_dot_6x16 (
".inst 0x6f81e0ef // udot v15.4s, v7.16b, v1.4b[0]\n"
".inst 0x6f82e0f3 // udot v19.4s, v7.16b, v2.4b[0]\n"
"bge 88b\n"
+ "cbz x26, 92f\n"
"89:" // Height 3: Multiply loop: Skip odd blocks
- "cbz x27, 92f\n"
- "tbz x27, #1, 90f\n"
- "ldr h0, [x26], #0x2\n"
- "ldr h1, [x25], #0x2\n"
- "ldr h2, [x24], #0x2\n"
- "tbz x27, #0, 91f\n"
- "ld1 { v0.b }[2], [x26]\n"
- "ld1 { v1.b }[2], [x25]\n"
- "ld1 { v2.b }[2], [x24]\n"
+ "tbz x26, #1, 90f\n"
+ "ldr h0, [x25], #0x2\n"
+ "ldr h1, [x24], #0x2\n"
+ "ldr h2, [x23], #0x2\n"
+ "tbz x26, #0, 91f\n"
+ "ld1 { v0.b }[2], [x25]\n"
+ "ld1 { v1.b }[2], [x24]\n"
+ "ld1 { v2.b }[2], [x23]\n"
"b 91f\n"
"90:" // Height 3: Multiply loop: Ragged operand read: partial_1_0
- "ldr b0, [x26, #0x0]\n"
- "ldr b1, [x25, #0x0]\n"
- "ldr b2, [x24, #0x0]\n"
+ "ldr b0, [x25, #0x0]\n"
+ "ldr b1, [x24, #0x0]\n"
+ "ldr b2, [x23, #0x0]\n"
"91:" // Height 3: Multiply loop: Ragged operand read: Done
- "ldr q6, [x10, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
+ "ldr q6, [x9, #0x0]\n"
".inst 0x6f80e0c8 // udot v8.4s, v6.16b, v0.4b[0]\n"
+ "ldr q7, [x9, #0x10]\n"
".inst 0x6f81e0cc // udot v12.4s, v6.16b, v1.4b[0]\n"
".inst 0x6f82e0d0 // udot v16.4s, v6.16b, v2.4b[0]\n"
- "ldr q6, [x10, #0x20]\n"
+ "ldr q6, [x9, #0x20]\n"
".inst 0x6f80e0e9 // udot v9.4s, v7.16b, v0.4b[0]\n"
".inst 0x6f81e0ed // udot v13.4s, v7.16b, v1.4b[0]\n"
".inst 0x6f82e0f1 // udot v17.4s, v7.16b, v2.4b[0]\n"
- "ldr q7, [x10, #0x30]\n"
- "add x10, x10, #0x40\n"
+ "ldr q7, [x9, #0x30]\n"
+ "add x9, x9, #0x40\n"
".inst 0x6f80e0ca // udot v10.4s, v6.16b, v0.4b[0]\n"
".inst 0x6f81e0ce // udot v14.4s, v6.16b, v1.4b[0]\n"
".inst 0x6f82e0d2 // udot v18.4s, v6.16b, v2.4b[0]\n"
@@ -1104,246 +1104,246 @@ void a64_hybrid_u8u32_dot_6x16 (
".inst 0x6f81e0ef // udot v15.4s, v7.16b, v1.4b[0]\n"
".inst 0x6f82e0f3 // udot v19.4s, v7.16b, v2.4b[0]\n"
"92:" // Height 3: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 82b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "prfm pstl1keep, [x9, #0x0]\n"
- "cmp x11, #0x10\n"
- "prfm pstl1keep, [x24, #0x0]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "prfm pstl1keep, [x28, #0x0]\n"
+ "cmp x10, #0x10\n"
+ "add x23, x28, x19, LSL #2\n"
"prfm pstl1keep, [x23, #0x0]\n"
+ "add x22, x23, x19, LSL #2\n"
+ "prfm pstl1keep, [x22, #0x0]\n"
"bge 101f\n"
- "tbz x11, #3, 96f\n"
- "st1 { v8.4s }, [x9], #0x10\n"
- "st1 { v9.4s }, [x9], #0x10\n"
- "st1 { v12.4s }, [x24], #0x10\n"
- "st1 { v13.4s }, [x24], #0x10\n"
- "st1 { v16.4s }, [x23], #0x10\n"
- "st1 { v17.4s }, [x23], #0x10\n"
- "tbz x11, #2, 94f\n"
- "st1 { v10.4s }, [x9], #0x10\n"
- "st1 { v14.4s }, [x24], #0x10\n"
- "st1 { v18.4s }, [x23], #0x10\n"
- "tbz x11, #1, 93f\n"
- "str d11, [x9], #0x8\n"
- "str d15, [x24], #0x8\n"
- "str d19, [x23], #0x8\n"
- "tbz x11, #0, 100f\n"
- "st1 { v11.s }[2], [x9]\n"
- "st1 { v15.s }[2], [x24]\n"
- "st1 { v19.s }[2], [x23]\n"
+ "tbz x10, #3, 96f\n"
+ "st1 { v8.4s }, [x28], #0x10\n"
+ "st1 { v9.4s }, [x28], #0x10\n"
+ "st1 { v12.4s }, [x23], #0x10\n"
+ "st1 { v13.4s }, [x23], #0x10\n"
+ "st1 { v16.4s }, [x22], #0x10\n"
+ "st1 { v17.4s }, [x22], #0x10\n"
+ "tbz x10, #2, 94f\n"
+ "st1 { v10.4s }, [x28], #0x10\n"
+ "st1 { v14.4s }, [x23], #0x10\n"
+ "st1 { v18.4s }, [x22], #0x10\n"
+ "tbz x10, #1, 93f\n"
+ "str d11, [x28], #0x8\n"
+ "str d15, [x23], #0x8\n"
+ "str d19, [x22], #0x8\n"
+ "tbz x10, #0, 100f\n"
+ "st1 { v11.s }[2], [x28]\n"
+ "st1 { v15.s }[2], [x23]\n"
+ "st1 { v19.s }[2], [x22]\n"
"b 100f\n"
"93:" // Height 3: Partial direct writeback: partial_1_12
- "tbz x11, #0, 100f\n"
- "str s11, [x9, #0x0]\n"
- "str s15, [x24, #0x0]\n"
- "str s19, [x23, #0x0]\n"
+ "tbz x10, #0, 100f\n"
+ "str s11, [x28, #0x0]\n"
+ "str s15, [x23, #0x0]\n"
+ "str s19, [x22, #0x0]\n"
"b 100f\n"
"94:" // Height 3: Partial direct writeback: partial_2_8
- "tbz x11, #1, 95f\n"
- "str d10, [x9], #0x8\n"
- "str d14, [x24], #0x8\n"
- "str d18, [x23], #0x8\n"
- "tbz x11, #0, 100f\n"
- "st1 { v10.s }[2], [x9]\n"
- "st1 { v14.s }[2], [x24]\n"
- "st1 { v18.s }[2], [x23]\n"
+ "tbz x10, #1, 95f\n"
+ "str d10, [x28], #0x8\n"
+ "str d14, [x23], #0x8\n"
+ "str d18, [x22], #0x8\n"
+ "tbz x10, #0, 100f\n"
+ "st1 { v10.s }[2], [x28]\n"
+ "st1 { v14.s }[2], [x23]\n"
+ "st1 { v18.s }[2], [x22]\n"
"b 100f\n"
"95:" // Height 3: Partial direct writeback: partial_1_8
- "tbz x11, #0, 100f\n"
- "str s10, [x9, #0x0]\n"
- "str s14, [x24, #0x0]\n"
- "str s18, [x23, #0x0]\n"
+ "tbz x10, #0, 100f\n"
+ "str s10, [x28, #0x0]\n"
+ "str s14, [x23, #0x0]\n"
+ "str s18, [x22, #0x0]\n"
"b 100f\n"
"96:" // Height 3: Partial direct writeback: partial_4_0
- "tbz x11, #2, 98f\n"
- "st1 { v8.4s }, [x9], #0x10\n"
- "st1 { v12.4s }, [x24], #0x10\n"
- "st1 { v16.4s }, [x23], #0x10\n"
- "tbz x11, #1, 97f\n"
- "str d9, [x9], #0x8\n"
- "str d13, [x24], #0x8\n"
- "str d17, [x23], #0x8\n"
- "tbz x11, #0, 100f\n"
- "st1 { v9.s }[2], [x9]\n"
- "st1 { v13.s }[2], [x24]\n"
- "st1 { v17.s }[2], [x23]\n"
+ "tbz x10, #2, 98f\n"
+ "st1 { v8.4s }, [x28], #0x10\n"
+ "st1 { v12.4s }, [x23], #0x10\n"
+ "st1 { v16.4s }, [x22], #0x10\n"
+ "tbz x10, #1, 97f\n"
+ "str d9, [x28], #0x8\n"
+ "str d13, [x23], #0x8\n"
+ "str d17, [x22], #0x8\n"
+ "tbz x10, #0, 100f\n"
+ "st1 { v9.s }[2], [x28]\n"
+ "st1 { v13.s }[2], [x23]\n"
+ "st1 { v17.s }[2], [x22]\n"
"b 100f\n"
"97:" // Height 3: Partial direct writeback: partial_1_4
- "tbz x11, #0, 100f\n"
- "str s9, [x9, #0x0]\n"
- "str s13, [x24, #0x0]\n"
- "str s17, [x23, #0x0]\n"
+ "tbz x10, #0, 100f\n"
+ "str s9, [x28, #0x0]\n"
+ "str s13, [x23, #0x0]\n"
+ "str s17, [x22, #0x0]\n"
"b 100f\n"
"98:" // Height 3: Partial direct writeback: partial_2_0
- "tbz x11, #1, 99f\n"
- "str d8, [x9], #0x8\n"
- "str d12, [x24], #0x8\n"
- "str d16, [x23], #0x8\n"
- "tbz x11, #0, 100f\n"
- "st1 { v8.s }[2], [x9]\n"
- "st1 { v12.s }[2], [x24]\n"
- "st1 { v16.s }[2], [x23]\n"
+ "tbz x10, #1, 99f\n"
+ "str d8, [x28], #0x8\n"
+ "str d12, [x23], #0x8\n"
+ "str d16, [x22], #0x8\n"
+ "tbz x10, #0, 100f\n"
+ "st1 { v8.s }[2], [x28]\n"
+ "st1 { v12.s }[2], [x23]\n"
+ "st1 { v16.s }[2], [x22]\n"
"b 100f\n"
"99:" // Height 3: Partial direct writeback: partial_1_0
- "str s8, [x9, #0x0]\n"
- "str s12, [x24, #0x0]\n"
- "str s16, [x23, #0x0]\n"
+ "str s8, [x28, #0x0]\n"
+ "str s12, [x23, #0x0]\n"
+ "str s16, [x22, #0x0]\n"
"100:" // Height 3: Partial direct writeback: Done
"b 102f\n"
"101:" // Height 3: Full writeback
- "str q8, [x9, #0x0]\n"
- "str q9, [x9, #0x10]\n"
- "str q10, [x9, #0x20]\n"
- "str q11, [x9, #0x30]\n"
- "add x9, x9, #0x40\n"
- "str q12, [x24, #0x0]\n"
- "str q13, [x24, #0x10]\n"
- "str q14, [x24, #0x20]\n"
- "str q15, [x24, #0x30]\n"
- "str q16, [x23, #0x0]\n"
- "str q17, [x23, #0x10]\n"
- "str q18, [x23, #0x20]\n"
- "str q19, [x23, #0x30]\n"
+ "str q8, [x28, #0x0]\n"
+ "str q9, [x28, #0x10]\n"
+ "str q10, [x28, #0x20]\n"
+ "str q11, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
+ "str q12, [x23, #0x0]\n"
+ "str q13, [x23, #0x10]\n"
+ "str q14, [x23, #0x20]\n"
+ "str q15, [x23, #0x30]\n"
+ "str q16, [x22, #0x0]\n"
+ "str q17, [x22, #0x10]\n"
+ "str q18, [x22, #0x20]\n"
+ "str q19, [x22, #0x30]\n"
"102:" // Height 3: Writeback done
- "subs x11, x11, #0x10\n"
+ "subs x10, x10, #0x10\n"
"bgt 70b\n"
"b 206f\n"
"103:" // Height 4
- "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x28, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"104:" // Height 4: Column loop
"tbz %x[flags], #0, 114f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "cmp x11, #0x10\n"
- "add x22, x23, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "cmp x10, #0x10\n"
+ "add x23, x28, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
"bge 113f\n"
- "tbz x11, #3, 108f\n"
- "ld1 { v8.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x24], #0x10\n"
- "ld1 { v16.4s }, [x23], #0x10\n"
- "ld1 { v20.4s }, [x22], #0x10\n"
- "ld1 { v9.4s }, [x9], #0x10\n"
- "ld1 { v13.4s }, [x24], #0x10\n"
- "ld1 { v17.4s }, [x23], #0x10\n"
- "ld1 { v21.4s }, [x22], #0x10\n"
- "tbz x11, #2, 106f\n"
- "ld1 { v10.4s }, [x9], #0x10\n"
- "ld1 { v14.4s }, [x24], #0x10\n"
- "ld1 { v18.4s }, [x23], #0x10\n"
- "ld1 { v22.4s }, [x22], #0x10\n"
- "tbz x11, #1, 105f\n"
- "ldr d11, [x9], #0x8\n"
- "ldr d15, [x24], #0x8\n"
- "mov x25, #0x38\n"
- "ldr d19, [x23], #0x8\n"
- "ldr d23, [x22], #0x8\n"
- "tbz x11, #0, 112f\n"
- "ld1 { v11.s }[2], [x9]\n"
- "ld1 { v15.s }[2], [x24]\n"
- "ld1 { v19.s }[2], [x23]\n"
- "ld1 { v23.s }[2], [x22]\n"
+ "tbz x10, #3, 108f\n"
+ "ld1 { v8.4s }, [x28], #0x10\n"
+ "ld1 { v12.4s }, [x23], #0x10\n"
+ "ld1 { v16.4s }, [x22], #0x10\n"
+ "ld1 { v20.4s }, [x21], #0x10\n"
+ "ld1 { v9.4s }, [x28], #0x10\n"
+ "ld1 { v13.4s }, [x23], #0x10\n"
+ "ld1 { v17.4s }, [x22], #0x10\n"
+ "ld1 { v21.4s }, [x21], #0x10\n"
+ "tbz x10, #2, 106f\n"
+ "ld1 { v10.4s }, [x28], #0x10\n"
+ "ld1 { v14.4s }, [x23], #0x10\n"
+ "ld1 { v18.4s }, [x22], #0x10\n"
+ "ld1 { v22.4s }, [x21], #0x10\n"
+ "tbz x10, #1, 105f\n"
+ "mov x24, #0x38\n"
+ "ldr d11, [x28], #0x8\n"
+ "ldr d15, [x23], #0x8\n"
+ "ldr d19, [x22], #0x8\n"
+ "ldr d23, [x21], #0x8\n"
+ "tbz x10, #0, 112f\n"
+ "ld1 { v11.s }[2], [x28]\n"
+ "ld1 { v15.s }[2], [x23]\n"
+ "ld1 { v19.s }[2], [x22]\n"
+ "ld1 { v23.s }[2], [x21]\n"
"b 112f\n"
"105:" // Height 4: Partial accumulate: partial_1_12
- "mov x25, #0x30\n"
- "tbz x11, #0, 112f\n"
- "ldr s11, [x9, #0x0]\n"
- "ldr s15, [x24, #0x0]\n"
- "ldr s19, [x23, #0x0]\n"
- "ldr s23, [x22, #0x0]\n"
+ "mov x24, #0x30\n"
+ "tbz x10, #0, 112f\n"
+ "ldr s11, [x28, #0x0]\n"
+ "ldr s15, [x23, #0x0]\n"
+ "ldr s19, [x22, #0x0]\n"
+ "ldr s23, [x21, #0x0]\n"
"b 112f\n"
"106:" // Height 4: Partial accumulate: partial_2_8
- "tbz x11, #1, 107f\n"
- "ldr d10, [x9], #0x8\n"
- "ldr d14, [x24], #0x8\n"
- "mov x25, #0x28\n"
- "ldr d18, [x23], #0x8\n"
- "ldr d22, [x22], #0x8\n"
- "tbz x11, #0, 112f\n"
- "ld1 { v10.s }[2], [x9]\n"
- "ld1 { v14.s }[2], [x24]\n"
- "ld1 { v18.s }[2], [x23]\n"
- "ld1 { v22.s }[2], [x22]\n"
+ "tbz x10, #1, 107f\n"
+ "ldr d10, [x28], #0x8\n"
+ "ldr d14, [x23], #0x8\n"
+ "mov x24, #0x28\n"
+ "ldr d18, [x22], #0x8\n"
+ "ldr d22, [x21], #0x8\n"
+ "tbz x10, #0, 112f\n"
+ "ld1 { v10.s }[2], [x28]\n"
+ "ld1 { v14.s }[2], [x23]\n"
+ "ld1 { v18.s }[2], [x22]\n"
+ "ld1 { v22.s }[2], [x21]\n"
"b 112f\n"
"107:" // Height 4: Partial accumulate: partial_1_8
- "mov x25, #0x20\n"
- "tbz x11, #0, 112f\n"
- "ldr s10, [x9, #0x0]\n"
- "ldr s14, [x24, #0x0]\n"
- "ldr s18, [x23, #0x0]\n"
- "ldr s22, [x22, #0x0]\n"
+ "mov x24, #0x20\n"
+ "tbz x10, #0, 112f\n"
+ "ldr s10, [x28, #0x0]\n"
+ "ldr s14, [x23, #0x0]\n"
+ "ldr s18, [x22, #0x0]\n"
+ "ldr s22, [x21, #0x0]\n"
"b 112f\n"
"108:" // Height 4: Partial accumulate: partial_4_0
- "tbz x11, #2, 110f\n"
- "ld1 { v8.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x24], #0x10\n"
- "ld1 { v16.4s }, [x23], #0x10\n"
- "ld1 { v20.4s }, [x22], #0x10\n"
- "tbz x11, #1, 109f\n"
- "ldr d9, [x9], #0x8\n"
- "ldr d13, [x24], #0x8\n"
- "mov x25, #0x18\n"
- "ldr d17, [x23], #0x8\n"
- "ldr d21, [x22], #0x8\n"
- "tbz x11, #0, 112f\n"
- "ld1 { v9.s }[2], [x9]\n"
- "ld1 { v13.s }[2], [x24]\n"
- "ld1 { v17.s }[2], [x23]\n"
- "ld1 { v21.s }[2], [x22]\n"
+ "tbz x10, #2, 110f\n"
+ "ld1 { v8.4s }, [x28], #0x10\n"
+ "ld1 { v12.4s }, [x23], #0x10\n"
+ "ld1 { v16.4s }, [x22], #0x10\n"
+ "ld1 { v20.4s }, [x21], #0x10\n"
+ "tbz x10, #1, 109f\n"
+ "mov x24, #0x18\n"
+ "ldr d9, [x28], #0x8\n"
+ "ldr d13, [x23], #0x8\n"
+ "ldr d17, [x22], #0x8\n"
+ "ldr d21, [x21], #0x8\n"
+ "tbz x10, #0, 112f\n"
+ "ld1 { v9.s }[2], [x28]\n"
+ "ld1 { v13.s }[2], [x23]\n"
+ "ld1 { v17.s }[2], [x22]\n"
+ "ld1 { v21.s }[2], [x21]\n"
"b 112f\n"
"109:" // Height 4: Partial accumulate: partial_1_4
- "mov x25, #0x10\n"
- "tbz x11, #0, 112f\n"
- "ldr s9, [x9, #0x0]\n"
- "ldr s13, [x24, #0x0]\n"
- "ldr s17, [x23, #0x0]\n"
- "ldr s21, [x22, #0x0]\n"
+ "mov x24, #0x10\n"
+ "tbz x10, #0, 112f\n"
+ "ldr s9, [x28, #0x0]\n"
+ "ldr s13, [x23, #0x0]\n"
+ "ldr s17, [x22, #0x0]\n"
+ "ldr s21, [x21, #0x0]\n"
"b 112f\n"
"110:" // Height 4: Partial accumulate: partial_2_0
- "tbz x11, #1, 111f\n"
- "ldr d8, [x9], #0x8\n"
- "ldr d12, [x24], #0x8\n"
- "mov x25, #0x8\n"
- "ldr d16, [x23], #0x8\n"
- "ldr d20, [x22], #0x8\n"
- "tbz x11, #0, 112f\n"
- "ld1 { v8.s }[2], [x9]\n"
- "ld1 { v12.s }[2], [x24]\n"
- "ld1 { v16.s }[2], [x23]\n"
- "ld1 { v20.s }[2], [x22]\n"
+ "tbz x10, #1, 111f\n"
+ "ldr d8, [x28], #0x8\n"
+ "ldr d12, [x23], #0x8\n"
+ "mov x24, #0x8\n"
+ "ldr d16, [x22], #0x8\n"
+ "ldr d20, [x21], #0x8\n"
+ "tbz x10, #0, 112f\n"
+ "ld1 { v8.s }[2], [x28]\n"
+ "ld1 { v12.s }[2], [x23]\n"
+ "ld1 { v16.s }[2], [x22]\n"
+ "ld1 { v20.s }[2], [x21]\n"
"b 112f\n"
"111:" // Height 4: Partial accumulate: partial_1_0
- "ldr s8, [x9, #0x0]\n"
- "ldr s12, [x24, #0x0]\n"
- "mov x25, #0x0\n"
- "ldr s16, [x23, #0x0]\n"
- "ldr s20, [x22, #0x0]\n"
+ "ldr s8, [x28, #0x0]\n"
+ "mov x24, #0x0\n"
+ "ldr s12, [x23, #0x0]\n"
+ "ldr s16, [x22, #0x0]\n"
+ "ldr s20, [x21, #0x0]\n"
"112:" // Height 4: Partial accumulate: Done
- "sub x9, x9, x25\n"
+ "sub x28, x28, x24\n"
"b 115f\n"
"113:" // Height 4: full accumulate
- "ldr q8, [x9, #0x0]\n"
- "ldr q9, [x9, #0x10]\n"
- "ldr q10, [x9, #0x20]\n"
- "ldr q11, [x9, #0x30]\n"
- "ldr q12, [x24, #0x0]\n"
- "ldr q13, [x24, #0x10]\n"
- "ldr q14, [x24, #0x20]\n"
- "ldr q15, [x24, #0x30]\n"
- "ldr q16, [x23, #0x0]\n"
- "ldr q17, [x23, #0x10]\n"
- "ldr q18, [x23, #0x20]\n"
- "ldr q19, [x23, #0x30]\n"
- "ldr q20, [x22, #0x0]\n"
- "ldr q21, [x22, #0x10]\n"
- "ldr q22, [x22, #0x20]\n"
- "ldr q23, [x22, #0x30]\n"
+ "ldr q8, [x28, #0x0]\n"
+ "ldr q9, [x28, #0x10]\n"
+ "ldr q10, [x28, #0x20]\n"
+ "ldr q11, [x28, #0x30]\n"
+ "ldr q12, [x23, #0x0]\n"
+ "ldr q13, [x23, #0x10]\n"
+ "ldr q14, [x23, #0x20]\n"
+ "ldr q15, [x23, #0x30]\n"
+ "ldr q16, [x22, #0x0]\n"
+ "ldr q17, [x22, #0x10]\n"
+ "ldr q18, [x22, #0x20]\n"
+ "ldr q19, [x22, #0x30]\n"
+ "ldr q20, [x21, #0x0]\n"
+ "ldr q21, [x21, #0x10]\n"
+ "ldr q22, [x21, #0x20]\n"
+ "ldr q23, [x21, #0x30]\n"
"b 115f\n"
"114:" // Height 4: no accumulate
"movi v8.4s, #0x0\n"
@@ -1363,219 +1363,219 @@ void a64_hybrid_u8u32_dot_6x16 (
"movi v22.4s, #0x0\n"
"movi v23.4s, #0x0\n"
"115:" // Height 4: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"116:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 117f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "cbnz x28, 118f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20\n"
- "add x25, x25, x20\n"
- "add x24, x24, x20\n"
- "add x23, x23, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "cbnz x27, 118f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
+ "add x24, x24, x19\n"
+ "add x23, x23, x19\n"
+ "add x22, x22, x19\n"
"b 118f\n"
"117:" // Height 4: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20\n"
- "add x24, x25, x20\n"
- "add x23, x24, x20\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19\n"
+ "add x23, x24, x19\n"
+ "add x22, x23, x19\n"
"118:" // Height 4: input setup done
- "cmp x27, #0x10\n"
+ "cmp x26, #0x10\n"
"blt 121f\n"
- "ldr q0, [x26, #0x0]\n"
- "ldr q1, [x25, #0x0]\n"
- "cmp x27, #0x20\n"
- "ldr q2, [x24, #0x0]\n"
- "ldr q3, [x23, #0x0]\n"
- "ldr q6, [x10, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
+ "ldr q0, [x25, #0x0]\n"
+ "ldr q1, [x24, #0x0]\n"
+ "cmp x26, #0x20\n"
+ "ldr q2, [x23, #0x0]\n"
+ "ldr q3, [x22, #0x0]\n"
+ "ldr q6, [x9, #0x0]\n"
"blt 120f\n"
"119:" // Height 4: Multiply loop: Main loop head
".inst 0x6f80e0c8 // udot v8.4s, v6.16b, v0.4b[0]\n"
+ "ldr q7, [x9, #0x10]\n"
+ "add x25, x25, #0x10\n"
".inst 0x6f81e0cc // udot v12.4s, v6.16b, v1.4b[0]\n"
- "sub x27, x27, #0x10\n"
- "add x26, x26, #0x10\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "add x24, x24, #0x10\n"
".inst 0x6f82e0d0 // udot v16.4s, v6.16b, v2.4b[0]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "add x23, x23, #0x10\n"
".inst 0x6f83e0d4 // udot v20.4s, v6.16b, v3.4b[0]\n"
- "ldr q6, [x10, #0x20]\n"
- "add x25, x25, #0x10\n"
+ "ldr q6, [x9, #0x20]\n"
+ "add x22, x22, #0x10\n"
".inst 0x6f80e0e9 // udot v9.4s, v7.16b, v0.4b[0]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
+ "sub x26, x26, #0x10\n"
".inst 0x6f81e0ed // udot v13.4s, v7.16b, v1.4b[0]\n"
- "add x24, x24, #0x10\n"
- "add x23, x23, #0x10\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
+ "cmp x26, #0x20\n"
".inst 0x6f82e0f1 // udot v17.4s, v7.16b, v2.4b[0]\n"
".inst 0x6f83e0f5 // udot v21.4s, v7.16b, v3.4b[0]\n"
- "ldr q7, [x10, #0x30]\n"
- "cmp x27, #0x20\n"
+ "ldr q7, [x9, #0x30]\n"
".inst 0x6f80e0ca // udot v10.4s, v6.16b, v0.4b[0]\n"
".inst 0x6f81e0ce // udot v14.4s, v6.16b, v1.4b[0]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x6f82e0d2 // udot v18.4s, v6.16b, v2.4b[0]\n"
".inst 0x6f83e0d6 // udot v22.4s, v6.16b, v3.4b[0]\n"
- "ldr q6, [x10, #0x40]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
+ "ldr q6, [x9, #0x40]\n"
".inst 0x6f80e0eb // udot v11.4s, v7.16b, v0.4b[0]\n"
".inst 0x6f81e0ef // udot v15.4s, v7.16b, v1.4b[0]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x6f82e0f3 // udot v19.4s, v7.16b, v2.4b[0]\n"
".inst 0x6f83e0f7 // udot v23.4s, v7.16b, v3.4b[0]\n"
- "ldr q7, [x10, #0x50]\n"
+ "ldr q7, [x9, #0x50]\n"
".inst 0x6fa0e0c8 // udot v8.4s, v6.16b, v0.4b[1]\n"
".inst 0x6fa1e0cc // udot v12.4s, v6.16b, v1.4b[1]\n"
".inst 0x6fa2e0d0 // udot v16.4s, v6.16b, v2.4b[1]\n"
".inst 0x6fa3e0d4 // udot v20.4s, v6.16b, v3.4b[1]\n"
- "ldr q6, [x10, #0x60]\n"
+ "ldr q6, [x9, #0x60]\n"
".inst 0x6fa0e0e9 // udot v9.4s, v7.16b, v0.4b[1]\n"
".inst 0x6fa1e0ed // udot v13.4s, v7.16b, v1.4b[1]\n"
".inst 0x6fa2e0f1 // udot v17.4s, v7.16b, v2.4b[1]\n"
".inst 0x6fa3e0f5 // udot v21.4s, v7.16b, v3.4b[1]\n"
- "ldr q7, [x10, #0x70]\n"
+ "ldr q7, [x9, #0x70]\n"
".inst 0x6fa0e0ca // udot v10.4s, v6.16b, v0.4b[1]\n"
".inst 0x6fa1e0ce // udot v14.4s, v6.16b, v1.4b[1]\n"
".inst 0x6fa2e0d2 // udot v18.4s, v6.16b, v2.4b[1]\n"
".inst 0x6fa3e0d6 // udot v22.4s, v6.16b, v3.4b[1]\n"
- "ldr q6, [x10, #0x80]\n"
+ "ldr q6, [x9, #0x80]\n"
".inst 0x6fa0e0eb // udot v11.4s, v7.16b, v0.4b[1]\n"
".inst 0x6fa1e0ef // udot v15.4s, v7.16b, v1.4b[1]\n"
".inst 0x6fa2e0f3 // udot v19.4s, v7.16b, v2.4b[1]\n"
".inst 0x6fa3e0f7 // udot v23.4s, v7.16b, v3.4b[1]\n"
- "ldr q7, [x10, #0x90]\n"
+ "ldr q7, [x9, #0x90]\n"
".inst 0x6f80e8c8 // udot v8.4s, v6.16b, v0.4b[2]\n"
".inst 0x6f81e8cc // udot v12.4s, v6.16b, v1.4b[2]\n"
".inst 0x6f82e8d0 // udot v16.4s, v6.16b, v2.4b[2]\n"
".inst 0x6f83e8d4 // udot v20.4s, v6.16b, v3.4b[2]\n"
- "ldr q6, [x10, #0xa0]\n"
+ "ldr q6, [x9, #0xa0]\n"
".inst 0x6f80e8e9 // udot v9.4s, v7.16b, v0.4b[2]\n"
".inst 0x6f81e8ed // udot v13.4s, v7.16b, v1.4b[2]\n"
".inst 0x6f82e8f1 // udot v17.4s, v7.16b, v2.4b[2]\n"
".inst 0x6f83e8f5 // udot v21.4s, v7.16b, v3.4b[2]\n"
- "ldr q7, [x10, #0xb0]\n"
+ "ldr q7, [x9, #0xb0]\n"
".inst 0x6f80e8ca // udot v10.4s, v6.16b, v0.4b[2]\n"
".inst 0x6f81e8ce // udot v14.4s, v6.16b, v1.4b[2]\n"
".inst 0x6f82e8d2 // udot v18.4s, v6.16b, v2.4b[2]\n"
".inst 0x6f83e8d6 // udot v22.4s, v6.16b, v3.4b[2]\n"
- "ldr q6, [x10, #0xc0]\n"
+ "ldr q6, [x9, #0xc0]\n"
".inst 0x6f80e8eb // udot v11.4s, v7.16b, v0.4b[2]\n"
".inst 0x6f81e8ef // udot v15.4s, v7.16b, v1.4b[2]\n"
".inst 0x6f82e8f3 // udot v19.4s, v7.16b, v2.4b[2]\n"
".inst 0x6f83e8f7 // udot v23.4s, v7.16b, v3.4b[2]\n"
- "ldr q7, [x10, #0xd0]\n"
+ "ldr q7, [x9, #0xd0]\n"
".inst 0x6fa0e8c8 // udot v8.4s, v6.16b, v0.4b[3]\n"
".inst 0x6fa1e8cc // udot v12.4s, v6.16b, v1.4b[3]\n"
".inst 0x6fa2e8d0 // udot v16.4s, v6.16b, v2.4b[3]\n"
".inst 0x6fa3e8d4 // udot v20.4s, v6.16b, v3.4b[3]\n"
- "ldr q6, [x10, #0xe0]\n"
+ "ldr q6, [x9, #0xe0]\n"
".inst 0x6fa0e8e9 // udot v9.4s, v7.16b, v0.4b[3]\n"
".inst 0x6fa1e8ed // udot v13.4s, v7.16b, v1.4b[3]\n"
".inst 0x6fa2e8f1 // udot v17.4s, v7.16b, v2.4b[3]\n"
".inst 0x6fa3e8f5 // udot v21.4s, v7.16b, v3.4b[3]\n"
- "ldr q7, [x10, #0xf0]\n"
- "add x10, x10, #0x100\n"
+ "ldr q7, [x9, #0xf0]\n"
+ "add x9, x9, #0x100\n"
".inst 0x6fa0e8ca // udot v10.4s, v6.16b, v0.4b[3]\n"
".inst 0x6fa1e8ce // udot v14.4s, v6.16b, v1.4b[3]\n"
".inst 0x6fa2e8d2 // udot v18.4s, v6.16b, v2.4b[3]\n"
".inst 0x6fa3e8d6 // udot v22.4s, v6.16b, v3.4b[3]\n"
- "ldr q6, [x10, #0x0]\n"
+ "ldr q6, [x9, #0x0]\n"
".inst 0x6fa0e8eb // udot v11.4s, v7.16b, v0.4b[3]\n"
- "ldr q0, [x26, #0x0]\n"
+ "ldr q0, [x25, #0x0]\n"
".inst 0x6fa1e8ef // udot v15.4s, v7.16b, v1.4b[3]\n"
- "ldr q1, [x25, #0x0]\n"
+ "ldr q1, [x24, #0x0]\n"
".inst 0x6fa2e8f3 // udot v19.4s, v7.16b, v2.4b[3]\n"
- "ldr q2, [x24, #0x0]\n"
+ "ldr q2, [x23, #0x0]\n"
".inst 0x6fa3e8f7 // udot v23.4s, v7.16b, v3.4b[3]\n"
- "ldr q3, [x23, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
+ "ldr q3, [x22, #0x0]\n"
"bge 119b\n"
"120:" // Height 4: Multiply loop: Single iteration only
".inst 0x6f80e0c8 // udot v8.4s, v6.16b, v0.4b[0]\n"
+ "ldr q7, [x9, #0x10]\n"
+ "sub x26, x26, #0x10\n"
".inst 0x6f81e0cc // udot v12.4s, v6.16b, v1.4b[0]\n"
- "add x26, x26, #0x10\n"
"add x25, x25, #0x10\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x6f82e0d0 // udot v16.4s, v6.16b, v2.4b[0]\n"
- ".inst 0x6f83e0d4 // udot v20.4s, v6.16b, v3.4b[0]\n"
- "ldr q6, [x10, #0x20]\n"
"add x24, x24, #0x10\n"
+ ".inst 0x6f83e0d4 // udot v20.4s, v6.16b, v3.4b[0]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "add x23, x23, #0x10\n"
".inst 0x6f80e0e9 // udot v9.4s, v7.16b, v0.4b[0]\n"
+ "ldr q6, [x9, #0x20]\n"
+ "add x22, x22, #0x10\n"
".inst 0x6f81e0ed // udot v13.4s, v7.16b, v1.4b[0]\n"
- "add x23, x23, #0x10\n"
- "sub x27, x27, #0x10\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x6f82e0f1 // udot v17.4s, v7.16b, v2.4b[0]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x6f83e0f5 // udot v21.4s, v7.16b, v3.4b[0]\n"
- "ldr q7, [x10, #0x30]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
+ "ldr q7, [x9, #0x30]\n"
".inst 0x6f80e0ca // udot v10.4s, v6.16b, v0.4b[0]\n"
".inst 0x6f81e0ce // udot v14.4s, v6.16b, v1.4b[0]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x6f82e0d2 // udot v18.4s, v6.16b, v2.4b[0]\n"
".inst 0x6f83e0d6 // udot v22.4s, v6.16b, v3.4b[0]\n"
- "ldr q6, [x10, #0x40]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
+ "ldr q6, [x9, #0x40]\n"
".inst 0x6f80e0eb // udot v11.4s, v7.16b, v0.4b[0]\n"
".inst 0x6f81e0ef // udot v15.4s, v7.16b, v1.4b[0]\n"
".inst 0x6f82e0f3 // udot v19.4s, v7.16b, v2.4b[0]\n"
".inst 0x6f83e0f7 // udot v23.4s, v7.16b, v3.4b[0]\n"
- "ldr q7, [x10, #0x50]\n"
+ "ldr q7, [x9, #0x50]\n"
".inst 0x6fa0e0c8 // udot v8.4s, v6.16b, v0.4b[1]\n"
".inst 0x6fa1e0cc // udot v12.4s, v6.16b, v1.4b[1]\n"
".inst 0x6fa2e0d0 // udot v16.4s, v6.16b, v2.4b[1]\n"
".inst 0x6fa3e0d4 // udot v20.4s, v6.16b, v3.4b[1]\n"
- "ldr q6, [x10, #0x60]\n"
+ "ldr q6, [x9, #0x60]\n"
".inst 0x6fa0e0e9 // udot v9.4s, v7.16b, v0.4b[1]\n"
".inst 0x6fa1e0ed // udot v13.4s, v7.16b, v1.4b[1]\n"
".inst 0x6fa2e0f1 // udot v17.4s, v7.16b, v2.4b[1]\n"
".inst 0x6fa3e0f5 // udot v21.4s, v7.16b, v3.4b[1]\n"
- "ldr q7, [x10, #0x70]\n"
+ "ldr q7, [x9, #0x70]\n"
".inst 0x6fa0e0ca // udot v10.4s, v6.16b, v0.4b[1]\n"
".inst 0x6fa1e0ce // udot v14.4s, v6.16b, v1.4b[1]\n"
".inst 0x6fa2e0d2 // udot v18.4s, v6.16b, v2.4b[1]\n"
".inst 0x6fa3e0d6 // udot v22.4s, v6.16b, v3.4b[1]\n"
- "ldr q6, [x10, #0x80]\n"
+ "ldr q6, [x9, #0x80]\n"
".inst 0x6fa0e0eb // udot v11.4s, v7.16b, v0.4b[1]\n"
".inst 0x6fa1e0ef // udot v15.4s, v7.16b, v1.4b[1]\n"
".inst 0x6fa2e0f3 // udot v19.4s, v7.16b, v2.4b[1]\n"
".inst 0x6fa3e0f7 // udot v23.4s, v7.16b, v3.4b[1]\n"
- "ldr q7, [x10, #0x90]\n"
+ "ldr q7, [x9, #0x90]\n"
".inst 0x6f80e8c8 // udot v8.4s, v6.16b, v0.4b[2]\n"
".inst 0x6f81e8cc // udot v12.4s, v6.16b, v1.4b[2]\n"
".inst 0x6f82e8d0 // udot v16.4s, v6.16b, v2.4b[2]\n"
".inst 0x6f83e8d4 // udot v20.4s, v6.16b, v3.4b[2]\n"
- "ldr q6, [x10, #0xa0]\n"
+ "ldr q6, [x9, #0xa0]\n"
".inst 0x6f80e8e9 // udot v9.4s, v7.16b, v0.4b[2]\n"
".inst 0x6f81e8ed // udot v13.4s, v7.16b, v1.4b[2]\n"
".inst 0x6f82e8f1 // udot v17.4s, v7.16b, v2.4b[2]\n"
".inst 0x6f83e8f5 // udot v21.4s, v7.16b, v3.4b[2]\n"
- "ldr q7, [x10, #0xb0]\n"
+ "ldr q7, [x9, #0xb0]\n"
".inst 0x6f80e8ca // udot v10.4s, v6.16b, v0.4b[2]\n"
".inst 0x6f81e8ce // udot v14.4s, v6.16b, v1.4b[2]\n"
".inst 0x6f82e8d2 // udot v18.4s, v6.16b, v2.4b[2]\n"
".inst 0x6f83e8d6 // udot v22.4s, v6.16b, v3.4b[2]\n"
- "ldr q6, [x10, #0xc0]\n"
+ "ldr q6, [x9, #0xc0]\n"
".inst 0x6f80e8eb // udot v11.4s, v7.16b, v0.4b[2]\n"
".inst 0x6f81e8ef // udot v15.4s, v7.16b, v1.4b[2]\n"
".inst 0x6f82e8f3 // udot v19.4s, v7.16b, v2.4b[2]\n"
".inst 0x6f83e8f7 // udot v23.4s, v7.16b, v3.4b[2]\n"
- "ldr q7, [x10, #0xd0]\n"
+ "ldr q7, [x9, #0xd0]\n"
".inst 0x6fa0e8c8 // udot v8.4s, v6.16b, v0.4b[3]\n"
".inst 0x6fa1e8cc // udot v12.4s, v6.16b, v1.4b[3]\n"
".inst 0x6fa2e8d0 // udot v16.4s, v6.16b, v2.4b[3]\n"
".inst 0x6fa3e8d4 // udot v20.4s, v6.16b, v3.4b[3]\n"
- "ldr q6, [x10, #0xe0]\n"
+ "ldr q6, [x9, #0xe0]\n"
".inst 0x6fa0e8e9 // udot v9.4s, v7.16b, v0.4b[3]\n"
".inst 0x6fa1e8ed // udot v13.4s, v7.16b, v1.4b[3]\n"
".inst 0x6fa2e8f1 // udot v17.4s, v7.16b, v2.4b[3]\n"
".inst 0x6fa3e8f5 // udot v21.4s, v7.16b, v3.4b[3]\n"
- "ldr q7, [x10, #0xf0]\n"
- "add x10, x10, #0x100\n"
+ "ldr q7, [x9, #0xf0]\n"
+ "add x9, x9, #0x100\n"
".inst 0x6fa0e8ca // udot v10.4s, v6.16b, v0.4b[3]\n"
".inst 0x6fa1e8ce // udot v14.4s, v6.16b, v1.4b[3]\n"
".inst 0x6fa2e8d2 // udot v18.4s, v6.16b, v2.4b[3]\n"
@@ -1585,29 +1585,29 @@ void a64_hybrid_u8u32_dot_6x16 (
".inst 0x6fa2e8f3 // udot v19.4s, v7.16b, v2.4b[3]\n"
".inst 0x6fa3e8f7 // udot v23.4s, v7.16b, v3.4b[3]\n"
"121:" // Height 4: Multiply loop: Main loop skip
- "cbz x27, 126f\n"
- "cmp x27, #0x4\n"
+ "cbz x26, 126f\n"
+ "cmp x26, #0x4\n"
"blt 123f\n"
"122:" // Height 4: Multiply loop: Odd block loop
- "ldr s0, [x26], #0x4\n"
- "ldr s1, [x25], #0x4\n"
- "sub x27, x27, #0x4\n"
- "cmp x27, #0x4\n"
- "ldr s2, [x24], #0x4\n"
- "ldr s3, [x23], #0x4\n"
- "ldr q6, [x10, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
+ "ldr s0, [x25], #0x4\n"
+ "sub x26, x26, #0x4\n"
+ "ldr s1, [x24], #0x4\n"
+ "cmp x26, #0x4\n"
+ "ldr s2, [x23], #0x4\n"
+ "ldr s3, [x22], #0x4\n"
+ "ldr q6, [x9, #0x0]\n"
".inst 0x6f80e0c8 // udot v8.4s, v6.16b, v0.4b[0]\n"
+ "ldr q7, [x9, #0x10]\n"
".inst 0x6f81e0cc // udot v12.4s, v6.16b, v1.4b[0]\n"
".inst 0x6f82e0d0 // udot v16.4s, v6.16b, v2.4b[0]\n"
".inst 0x6f83e0d4 // udot v20.4s, v6.16b, v3.4b[0]\n"
- "ldr q6, [x10, #0x20]\n"
+ "ldr q6, [x9, #0x20]\n"
".inst 0x6f80e0e9 // udot v9.4s, v7.16b, v0.4b[0]\n"
".inst 0x6f81e0ed // udot v13.4s, v7.16b, v1.4b[0]\n"
".inst 0x6f82e0f1 // udot v17.4s, v7.16b, v2.4b[0]\n"
".inst 0x6f83e0f5 // udot v21.4s, v7.16b, v3.4b[0]\n"
- "ldr q7, [x10, #0x30]\n"
- "add x10, x10, #0x40\n"
+ "ldr q7, [x9, #0x30]\n"
+ "add x9, x9, #0x40\n"
".inst 0x6f80e0ca // udot v10.4s, v6.16b, v0.4b[0]\n"
".inst 0x6f81e0ce // udot v14.4s, v6.16b, v1.4b[0]\n"
".inst 0x6f82e0d2 // udot v18.4s, v6.16b, v2.4b[0]\n"
@@ -1617,38 +1617,38 @@ void a64_hybrid_u8u32_dot_6x16 (
".inst 0x6f82e0f3 // udot v19.4s, v7.16b, v2.4b[0]\n"
".inst 0x6f83e0f7 // udot v23.4s, v7.16b, v3.4b[0]\n"
"bge 122b\n"
+ "cbz x26, 126f\n"
"123:" // Height 4: Multiply loop: Skip odd blocks
- "cbz x27, 126f\n"
- "tbz x27, #1, 124f\n"
- "ldr h0, [x26], #0x2\n"
- "ldr h1, [x25], #0x2\n"
- "ldr h2, [x24], #0x2\n"
- "ldr h3, [x23], #0x2\n"
- "tbz x27, #0, 125f\n"
- "ld1 { v0.b }[2], [x26]\n"
- "ld1 { v1.b }[2], [x25]\n"
- "ld1 { v2.b }[2], [x24]\n"
- "ld1 { v3.b }[2], [x23]\n"
+ "tbz x26, #1, 124f\n"
+ "ldr h0, [x25], #0x2\n"
+ "ldr h1, [x24], #0x2\n"
+ "ldr h2, [x23], #0x2\n"
+ "ldr h3, [x22], #0x2\n"
+ "tbz x26, #0, 125f\n"
+ "ld1 { v0.b }[2], [x25]\n"
+ "ld1 { v1.b }[2], [x24]\n"
+ "ld1 { v2.b }[2], [x23]\n"
+ "ld1 { v3.b }[2], [x22]\n"
"b 125f\n"
"124:" // Height 4: Multiply loop: Ragged operand read: partial_1_0
- "ldr b0, [x26, #0x0]\n"
- "ldr b1, [x25, #0x0]\n"
- "ldr b2, [x24, #0x0]\n"
- "ldr b3, [x23, #0x0]\n"
+ "ldr b0, [x25, #0x0]\n"
+ "ldr b1, [x24, #0x0]\n"
+ "ldr b2, [x23, #0x0]\n"
+ "ldr b3, [x22, #0x0]\n"
"125:" // Height 4: Multiply loop: Ragged operand read: Done
- "ldr q6, [x10, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
+ "ldr q6, [x9, #0x0]\n"
".inst 0x6f80e0c8 // udot v8.4s, v6.16b, v0.4b[0]\n"
+ "ldr q7, [x9, #0x10]\n"
".inst 0x6f81e0cc // udot v12.4s, v6.16b, v1.4b[0]\n"
".inst 0x6f82e0d0 // udot v16.4s, v6.16b, v2.4b[0]\n"
".inst 0x6f83e0d4 // udot v20.4s, v6.16b, v3.4b[0]\n"
- "ldr q6, [x10, #0x20]\n"
+ "ldr q6, [x9, #0x20]\n"
".inst 0x6f80e0e9 // udot v9.4s, v7.16b, v0.4b[0]\n"
".inst 0x6f81e0ed // udot v13.4s, v7.16b, v1.4b[0]\n"
".inst 0x6f82e0f1 // udot v17.4s, v7.16b, v2.4b[0]\n"
".inst 0x6f83e0f5 // udot v21.4s, v7.16b, v3.4b[0]\n"
- "ldr q7, [x10, #0x30]\n"
- "add x10, x10, #0x40\n"
+ "ldr q7, [x9, #0x30]\n"
+ "add x9, x9, #0x40\n"
".inst 0x6f80e0ca // udot v10.4s, v6.16b, v0.4b[0]\n"
".inst 0x6f81e0ce // udot v14.4s, v6.16b, v1.4b[0]\n"
".inst 0x6f82e0d2 // udot v18.4s, v6.16b, v2.4b[0]\n"
@@ -1658,289 +1658,289 @@ void a64_hybrid_u8u32_dot_6x16 (
".inst 0x6f82e0f3 // udot v19.4s, v7.16b, v2.4b[0]\n"
".inst 0x6f83e0f7 // udot v23.4s, v7.16b, v3.4b[0]\n"
"126:" // Height 4: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 116b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "prfm pstl1keep, [x9, #0x0]\n"
- "add x22, x23, x20, LSL #2\n"
- "cmp x11, #0x10\n"
- "prfm pstl1keep, [x24, #0x0]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "prfm pstl1keep, [x28, #0x0]\n"
+ "cmp x10, #0x10\n"
+ "add x23, x28, x19, LSL #2\n"
"prfm pstl1keep, [x23, #0x0]\n"
+ "add x22, x23, x19, LSL #2\n"
"prfm pstl1keep, [x22, #0x0]\n"
+ "add x21, x22, x19, LSL #2\n"
+ "prfm pstl1keep, [x21, #0x0]\n"
"bge 135f\n"
- "tbz x11, #3, 130f\n"
- "st1 { v8.4s }, [x9], #0x10\n"
- "st1 { v9.4s }, [x9], #0x10\n"
- "st1 { v12.4s }, [x24], #0x10\n"
- "st1 { v13.4s }, [x24], #0x10\n"
- "st1 { v16.4s }, [x23], #0x10\n"
- "st1 { v17.4s }, [x23], #0x10\n"
- "st1 { v20.4s }, [x22], #0x10\n"
- "st1 { v21.4s }, [x22], #0x10\n"
- "tbz x11, #2, 128f\n"
- "st1 { v10.4s }, [x9], #0x10\n"
- "st1 { v14.4s }, [x24], #0x10\n"
- "st1 { v18.4s }, [x23], #0x10\n"
- "st1 { v22.4s }, [x22], #0x10\n"
- "tbz x11, #1, 127f\n"
- "str d11, [x9], #0x8\n"
- "str d15, [x24], #0x8\n"
- "str d19, [x23], #0x8\n"
- "str d23, [x22], #0x8\n"
- "tbz x11, #0, 134f\n"
- "st1 { v11.s }[2], [x9]\n"
- "st1 { v15.s }[2], [x24]\n"
- "st1 { v19.s }[2], [x23]\n"
- "st1 { v23.s }[2], [x22]\n"
+ "tbz x10, #3, 130f\n"
+ "st1 { v8.4s }, [x28], #0x10\n"
+ "st1 { v9.4s }, [x28], #0x10\n"
+ "st1 { v12.4s }, [x23], #0x10\n"
+ "st1 { v13.4s }, [x23], #0x10\n"
+ "st1 { v16.4s }, [x22], #0x10\n"
+ "st1 { v17.4s }, [x22], #0x10\n"
+ "st1 { v20.4s }, [x21], #0x10\n"
+ "st1 { v21.4s }, [x21], #0x10\n"
+ "tbz x10, #2, 128f\n"
+ "st1 { v10.4s }, [x28], #0x10\n"
+ "st1 { v14.4s }, [x23], #0x10\n"
+ "st1 { v18.4s }, [x22], #0x10\n"
+ "st1 { v22.4s }, [x21], #0x10\n"
+ "tbz x10, #1, 127f\n"
+ "str d11, [x28], #0x8\n"
+ "str d15, [x23], #0x8\n"
+ "str d19, [x22], #0x8\n"
+ "str d23, [x21], #0x8\n"
+ "tbz x10, #0, 134f\n"
+ "st1 { v11.s }[2], [x28]\n"
+ "st1 { v15.s }[2], [x23]\n"
+ "st1 { v19.s }[2], [x22]\n"
+ "st1 { v23.s }[2], [x21]\n"
"b 134f\n"
"127:" // Height 4: Partial direct writeback: partial_1_12
- "tbz x11, #0, 134f\n"
- "str s11, [x9, #0x0]\n"
- "str s15, [x24, #0x0]\n"
- "str s19, [x23, #0x0]\n"
- "str s23, [x22, #0x0]\n"
+ "tbz x10, #0, 134f\n"
+ "str s11, [x28, #0x0]\n"
+ "str s15, [x23, #0x0]\n"
+ "str s19, [x22, #0x0]\n"
+ "str s23, [x21, #0x0]\n"
"b 134f\n"
"128:" // Height 4: Partial direct writeback: partial_2_8
- "tbz x11, #1, 129f\n"
- "str d10, [x9], #0x8\n"
- "str d14, [x24], #0x8\n"
- "str d18, [x23], #0x8\n"
- "str d22, [x22], #0x8\n"
- "tbz x11, #0, 134f\n"
- "st1 { v10.s }[2], [x9]\n"
- "st1 { v14.s }[2], [x24]\n"
- "st1 { v18.s }[2], [x23]\n"
- "st1 { v22.s }[2], [x22]\n"
+ "tbz x10, #1, 129f\n"
+ "str d10, [x28], #0x8\n"
+ "str d14, [x23], #0x8\n"
+ "str d18, [x22], #0x8\n"
+ "str d22, [x21], #0x8\n"
+ "tbz x10, #0, 134f\n"
+ "st1 { v10.s }[2], [x28]\n"
+ "st1 { v14.s }[2], [x23]\n"
+ "st1 { v18.s }[2], [x22]\n"
+ "st1 { v22.s }[2], [x21]\n"
"b 134f\n"
"129:" // Height 4: Partial direct writeback: partial_1_8
- "tbz x11, #0, 134f\n"
- "str s10, [x9, #0x0]\n"
- "str s14, [x24, #0x0]\n"
- "str s18, [x23, #0x0]\n"
- "str s22, [x22, #0x0]\n"
+ "tbz x10, #0, 134f\n"
+ "str s10, [x28, #0x0]\n"
+ "str s14, [x23, #0x0]\n"
+ "str s18, [x22, #0x0]\n"
+ "str s22, [x21, #0x0]\n"
"b 134f\n"
"130:" // Height 4: Partial direct writeback: partial_4_0
- "tbz x11, #2, 132f\n"
- "st1 { v8.4s }, [x9], #0x10\n"
- "st1 { v12.4s }, [x24], #0x10\n"
- "st1 { v16.4s }, [x23], #0x10\n"
- "st1 { v20.4s }, [x22], #0x10\n"
- "tbz x11, #1, 131f\n"
- "str d9, [x9], #0x8\n"
- "str d13, [x24], #0x8\n"
- "str d17, [x23], #0x8\n"
- "str d21, [x22], #0x8\n"
- "tbz x11, #0, 134f\n"
- "st1 { v9.s }[2], [x9]\n"
- "st1 { v13.s }[2], [x24]\n"
- "st1 { v17.s }[2], [x23]\n"
- "st1 { v21.s }[2], [x22]\n"
+ "tbz x10, #2, 132f\n"
+ "st1 { v8.4s }, [x28], #0x10\n"
+ "st1 { v12.4s }, [x23], #0x10\n"
+ "st1 { v16.4s }, [x22], #0x10\n"
+ "st1 { v20.4s }, [x21], #0x10\n"
+ "tbz x10, #1, 131f\n"
+ "str d9, [x28], #0x8\n"
+ "str d13, [x23], #0x8\n"
+ "str d17, [x22], #0x8\n"
+ "str d21, [x21], #0x8\n"
+ "tbz x10, #0, 134f\n"
+ "st1 { v9.s }[2], [x28]\n"
+ "st1 { v13.s }[2], [x23]\n"
+ "st1 { v17.s }[2], [x22]\n"
+ "st1 { v21.s }[2], [x21]\n"
"b 134f\n"
"131:" // Height 4: Partial direct writeback: partial_1_4
- "tbz x11, #0, 134f\n"
- "str s9, [x9, #0x0]\n"
- "str s13, [x24, #0x0]\n"
- "str s17, [x23, #0x0]\n"
- "str s21, [x22, #0x0]\n"
+ "tbz x10, #0, 134f\n"
+ "str s9, [x28, #0x0]\n"
+ "str s13, [x23, #0x0]\n"
+ "str s17, [x22, #0x0]\n"
+ "str s21, [x21, #0x0]\n"
"b 134f\n"
"132:" // Height 4: Partial direct writeback: partial_2_0
- "tbz x11, #1, 133f\n"
- "str d8, [x9], #0x8\n"
- "str d12, [x24], #0x8\n"
- "str d16, [x23], #0x8\n"
- "str d20, [x22], #0x8\n"
- "tbz x11, #0, 134f\n"
- "st1 { v8.s }[2], [x9]\n"
- "st1 { v12.s }[2], [x24]\n"
- "st1 { v16.s }[2], [x23]\n"
- "st1 { v20.s }[2], [x22]\n"
+ "tbz x10, #1, 133f\n"
+ "str d8, [x28], #0x8\n"
+ "str d12, [x23], #0x8\n"
+ "str d16, [x22], #0x8\n"
+ "str d20, [x21], #0x8\n"
+ "tbz x10, #0, 134f\n"
+ "st1 { v8.s }[2], [x28]\n"
+ "st1 { v12.s }[2], [x23]\n"
+ "st1 { v16.s }[2], [x22]\n"
+ "st1 { v20.s }[2], [x21]\n"
"b 134f\n"
"133:" // Height 4: Partial direct writeback: partial_1_0
- "str s8, [x9, #0x0]\n"
- "str s12, [x24, #0x0]\n"
- "str s16, [x23, #0x0]\n"
- "str s20, [x22, #0x0]\n"
+ "str s8, [x28, #0x0]\n"
+ "str s12, [x23, #0x0]\n"
+ "str s16, [x22, #0x0]\n"
+ "str s20, [x21, #0x0]\n"
"134:" // Height 4: Partial direct writeback: Done
"b 136f\n"
"135:" // Height 4: Full writeback
- "str q8, [x9, #0x0]\n"
- "str q9, [x9, #0x10]\n"
- "str q10, [x9, #0x20]\n"
- "str q11, [x9, #0x30]\n"
- "add x9, x9, #0x40\n"
- "str q12, [x24, #0x0]\n"
- "str q13, [x24, #0x10]\n"
- "str q14, [x24, #0x20]\n"
- "str q15, [x24, #0x30]\n"
- "str q16, [x23, #0x0]\n"
- "str q17, [x23, #0x10]\n"
- "str q18, [x23, #0x20]\n"
- "str q19, [x23, #0x30]\n"
- "str q20, [x22, #0x0]\n"
- "str q21, [x22, #0x10]\n"
- "str q22, [x22, #0x20]\n"
- "str q23, [x22, #0x30]\n"
+ "str q8, [x28, #0x0]\n"
+ "str q9, [x28, #0x10]\n"
+ "str q10, [x28, #0x20]\n"
+ "str q11, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
+ "str q12, [x23, #0x0]\n"
+ "str q13, [x23, #0x10]\n"
+ "str q14, [x23, #0x20]\n"
+ "str q15, [x23, #0x30]\n"
+ "str q16, [x22, #0x0]\n"
+ "str q17, [x22, #0x10]\n"
+ "str q18, [x22, #0x20]\n"
+ "str q19, [x22, #0x30]\n"
+ "str q20, [x21, #0x0]\n"
+ "str q21, [x21, #0x10]\n"
+ "str q22, [x21, #0x20]\n"
+ "str q23, [x21, #0x30]\n"
"136:" // Height 4: Writeback done
- "subs x11, x11, #0x10\n"
+ "subs x10, x10, #0x10\n"
"bgt 104b\n"
"b 206f\n"
"137:" // Height 5
- "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x28, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"138:" // Height 5: Column loop
"tbz %x[flags], #0, 148f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
- "cmp x11, #0x10\n"
- "add x21, x22, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "cmp x10, #0x10\n"
+ "add x23, x28, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
+ "add x20, x21, x19, LSL #2\n"
"bge 147f\n"
- "tbz x11, #3, 142f\n"
- "ld1 { v8.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x24], #0x10\n"
- "ld1 { v16.4s }, [x23], #0x10\n"
- "ld1 { v20.4s }, [x22], #0x10\n"
- "ld1 { v24.4s }, [x21], #0x10\n"
- "ld1 { v9.4s }, [x9], #0x10\n"
- "ld1 { v13.4s }, [x24], #0x10\n"
- "ld1 { v17.4s }, [x23], #0x10\n"
- "ld1 { v21.4s }, [x22], #0x10\n"
- "ld1 { v25.4s }, [x21], #0x10\n"
- "tbz x11, #2, 140f\n"
- "ld1 { v10.4s }, [x9], #0x10\n"
- "ld1 { v14.4s }, [x24], #0x10\n"
- "ld1 { v18.4s }, [x23], #0x10\n"
- "ld1 { v22.4s }, [x22], #0x10\n"
- "ld1 { v26.4s }, [x21], #0x10\n"
- "tbz x11, #1, 139f\n"
- "ldr d11, [x9], #0x8\n"
- "ldr d15, [x24], #0x8\n"
- "mov x25, #0x38\n"
- "ldr d19, [x23], #0x8\n"
- "ldr d23, [x22], #0x8\n"
- "ldr d27, [x21], #0x8\n"
- "tbz x11, #0, 146f\n"
- "ld1 { v11.s }[2], [x9]\n"
- "ld1 { v15.s }[2], [x24]\n"
- "ld1 { v19.s }[2], [x23]\n"
- "ld1 { v23.s }[2], [x22]\n"
- "ld1 { v27.s }[2], [x21]\n"
+ "tbz x10, #3, 142f\n"
+ "ld1 { v8.4s }, [x28], #0x10\n"
+ "ld1 { v12.4s }, [x23], #0x10\n"
+ "ld1 { v16.4s }, [x22], #0x10\n"
+ "ld1 { v20.4s }, [x21], #0x10\n"
+ "ld1 { v24.4s }, [x20], #0x10\n"
+ "ld1 { v9.4s }, [x28], #0x10\n"
+ "ld1 { v13.4s }, [x23], #0x10\n"
+ "ld1 { v17.4s }, [x22], #0x10\n"
+ "ld1 { v21.4s }, [x21], #0x10\n"
+ "ld1 { v25.4s }, [x20], #0x10\n"
+ "tbz x10, #2, 140f\n"
+ "ld1 { v10.4s }, [x28], #0x10\n"
+ "ld1 { v14.4s }, [x23], #0x10\n"
+ "ld1 { v18.4s }, [x22], #0x10\n"
+ "ld1 { v22.4s }, [x21], #0x10\n"
+ "ld1 { v26.4s }, [x20], #0x10\n"
+ "tbz x10, #1, 139f\n"
+ "ldr d11, [x28], #0x8\n"
+ "mov x24, #0x38\n"
+ "ldr d15, [x23], #0x8\n"
+ "ldr d19, [x22], #0x8\n"
+ "ldr d23, [x21], #0x8\n"
+ "ldr d27, [x20], #0x8\n"
+ "tbz x10, #0, 146f\n"
+ "ld1 { v11.s }[2], [x28]\n"
+ "ld1 { v15.s }[2], [x23]\n"
+ "ld1 { v19.s }[2], [x22]\n"
+ "ld1 { v23.s }[2], [x21]\n"
+ "ld1 { v27.s }[2], [x20]\n"
"b 146f\n"
"139:" // Height 5: Partial accumulate: partial_1_12
- "mov x25, #0x30\n"
- "tbz x11, #0, 146f\n"
- "ldr s11, [x9, #0x0]\n"
- "ldr s15, [x24, #0x0]\n"
- "ldr s19, [x23, #0x0]\n"
- "ldr s23, [x22, #0x0]\n"
- "ldr s27, [x21, #0x0]\n"
+ "mov x24, #0x30\n"
+ "tbz x10, #0, 146f\n"
+ "ldr s11, [x28, #0x0]\n"
+ "ldr s15, [x23, #0x0]\n"
+ "ldr s19, [x22, #0x0]\n"
+ "ldr s23, [x21, #0x0]\n"
+ "ldr s27, [x20, #0x0]\n"
"b 146f\n"
"140:" // Height 5: Partial accumulate: partial_2_8
- "tbz x11, #1, 141f\n"
- "ldr d10, [x9], #0x8\n"
- "ldr d14, [x24], #0x8\n"
- "mov x25, #0x28\n"
- "ldr d18, [x23], #0x8\n"
- "ldr d22, [x22], #0x8\n"
- "ldr d26, [x21], #0x8\n"
- "tbz x11, #0, 146f\n"
- "ld1 { v10.s }[2], [x9]\n"
- "ld1 { v14.s }[2], [x24]\n"
- "ld1 { v18.s }[2], [x23]\n"
- "ld1 { v22.s }[2], [x22]\n"
- "ld1 { v26.s }[2], [x21]\n"
+ "tbz x10, #1, 141f\n"
+ "ldr d10, [x28], #0x8\n"
+ "ldr d14, [x23], #0x8\n"
+ "mov x24, #0x28\n"
+ "ldr d18, [x22], #0x8\n"
+ "ldr d22, [x21], #0x8\n"
+ "ldr d26, [x20], #0x8\n"
+ "tbz x10, #0, 146f\n"
+ "ld1 { v10.s }[2], [x28]\n"
+ "ld1 { v14.s }[2], [x23]\n"
+ "ld1 { v18.s }[2], [x22]\n"
+ "ld1 { v22.s }[2], [x21]\n"
+ "ld1 { v26.s }[2], [x20]\n"
"b 146f\n"
"141:" // Height 5: Partial accumulate: partial_1_8
- "mov x25, #0x20\n"
- "tbz x11, #0, 146f\n"
- "ldr s10, [x9, #0x0]\n"
- "ldr s14, [x24, #0x0]\n"
- "ldr s18, [x23, #0x0]\n"
- "ldr s22, [x22, #0x0]\n"
- "ldr s26, [x21, #0x0]\n"
+ "mov x24, #0x20\n"
+ "tbz x10, #0, 146f\n"
+ "ldr s10, [x28, #0x0]\n"
+ "ldr s14, [x23, #0x0]\n"
+ "ldr s18, [x22, #0x0]\n"
+ "ldr s22, [x21, #0x0]\n"
+ "ldr s26, [x20, #0x0]\n"
"b 146f\n"
"142:" // Height 5: Partial accumulate: partial_4_0
- "tbz x11, #2, 144f\n"
- "ld1 { v8.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x24], #0x10\n"
- "ld1 { v16.4s }, [x23], #0x10\n"
- "ld1 { v20.4s }, [x22], #0x10\n"
- "ld1 { v24.4s }, [x21], #0x10\n"
- "tbz x11, #1, 143f\n"
- "ldr d9, [x9], #0x8\n"
- "ldr d13, [x24], #0x8\n"
- "mov x25, #0x18\n"
- "ldr d17, [x23], #0x8\n"
- "ldr d21, [x22], #0x8\n"
- "ldr d25, [x21], #0x8\n"
- "tbz x11, #0, 146f\n"
- "ld1 { v9.s }[2], [x9]\n"
- "ld1 { v13.s }[2], [x24]\n"
- "ld1 { v17.s }[2], [x23]\n"
- "ld1 { v21.s }[2], [x22]\n"
- "ld1 { v25.s }[2], [x21]\n"
+ "tbz x10, #2, 144f\n"
+ "ld1 { v8.4s }, [x28], #0x10\n"
+ "ld1 { v12.4s }, [x23], #0x10\n"
+ "ld1 { v16.4s }, [x22], #0x10\n"
+ "ld1 { v20.4s }, [x21], #0x10\n"
+ "ld1 { v24.4s }, [x20], #0x10\n"
+ "tbz x10, #1, 143f\n"
+ "ldr d9, [x28], #0x8\n"
+ "mov x24, #0x18\n"
+ "ldr d13, [x23], #0x8\n"
+ "ldr d17, [x22], #0x8\n"
+ "ldr d21, [x21], #0x8\n"
+ "ldr d25, [x20], #0x8\n"
+ "tbz x10, #0, 146f\n"
+ "ld1 { v9.s }[2], [x28]\n"
+ "ld1 { v13.s }[2], [x23]\n"
+ "ld1 { v17.s }[2], [x22]\n"
+ "ld1 { v21.s }[2], [x21]\n"
+ "ld1 { v25.s }[2], [x20]\n"
"b 146f\n"
"143:" // Height 5: Partial accumulate: partial_1_4
- "mov x25, #0x10\n"
- "tbz x11, #0, 146f\n"
- "ldr s9, [x9, #0x0]\n"
- "ldr s13, [x24, #0x0]\n"
- "ldr s17, [x23, #0x0]\n"
- "ldr s21, [x22, #0x0]\n"
- "ldr s25, [x21, #0x0]\n"
+ "mov x24, #0x10\n"
+ "tbz x10, #0, 146f\n"
+ "ldr s9, [x28, #0x0]\n"
+ "ldr s13, [x23, #0x0]\n"
+ "ldr s17, [x22, #0x0]\n"
+ "ldr s21, [x21, #0x0]\n"
+ "ldr s25, [x20, #0x0]\n"
"b 146f\n"
"144:" // Height 5: Partial accumulate: partial_2_0
- "tbz x11, #1, 145f\n"
- "ldr d8, [x9], #0x8\n"
- "ldr d12, [x24], #0x8\n"
- "mov x25, #0x8\n"
- "ldr d16, [x23], #0x8\n"
- "ldr d20, [x22], #0x8\n"
- "ldr d24, [x21], #0x8\n"
- "tbz x11, #0, 146f\n"
- "ld1 { v8.s }[2], [x9]\n"
- "ld1 { v12.s }[2], [x24]\n"
- "ld1 { v16.s }[2], [x23]\n"
- "ld1 { v20.s }[2], [x22]\n"
- "ld1 { v24.s }[2], [x21]\n"
+ "tbz x10, #1, 145f\n"
+ "ldr d8, [x28], #0x8\n"
+ "ldr d12, [x23], #0x8\n"
+ "mov x24, #0x8\n"
+ "ldr d16, [x22], #0x8\n"
+ "ldr d20, [x21], #0x8\n"
+ "ldr d24, [x20], #0x8\n"
+ "tbz x10, #0, 146f\n"
+ "ld1 { v8.s }[2], [x28]\n"
+ "ld1 { v12.s }[2], [x23]\n"
+ "ld1 { v16.s }[2], [x22]\n"
+ "ld1 { v20.s }[2], [x21]\n"
+ "ld1 { v24.s }[2], [x20]\n"
"b 146f\n"
"145:" // Height 5: Partial accumulate: partial_1_0
- "ldr s8, [x9, #0x0]\n"
- "ldr s12, [x24, #0x0]\n"
- "mov x25, #0x0\n"
- "ldr s16, [x23, #0x0]\n"
- "ldr s20, [x22, #0x0]\n"
- "ldr s24, [x21, #0x0]\n"
+ "ldr s8, [x28, #0x0]\n"
+ "mov x24, #0x0\n"
+ "ldr s12, [x23, #0x0]\n"
+ "ldr s16, [x22, #0x0]\n"
+ "ldr s20, [x21, #0x0]\n"
+ "ldr s24, [x20, #0x0]\n"
"146:" // Height 5: Partial accumulate: Done
- "sub x9, x9, x25\n"
+ "sub x28, x28, x24\n"
"b 149f\n"
"147:" // Height 5: full accumulate
- "ldr q8, [x9, #0x0]\n"
- "ldr q9, [x9, #0x10]\n"
- "ldr q10, [x9, #0x20]\n"
- "ldr q11, [x9, #0x30]\n"
- "ldr q12, [x24, #0x0]\n"
- "ldr q13, [x24, #0x10]\n"
- "ldr q14, [x24, #0x20]\n"
- "ldr q15, [x24, #0x30]\n"
- "ldr q16, [x23, #0x0]\n"
- "ldr q17, [x23, #0x10]\n"
- "ldr q18, [x23, #0x20]\n"
- "ldr q19, [x23, #0x30]\n"
- "ldr q20, [x22, #0x0]\n"
- "ldr q21, [x22, #0x10]\n"
- "ldr q22, [x22, #0x20]\n"
- "ldr q23, [x22, #0x30]\n"
- "ldr q24, [x21, #0x0]\n"
- "ldr q25, [x21, #0x10]\n"
- "ldr q26, [x21, #0x20]\n"
- "ldr q27, [x21, #0x30]\n"
+ "ldr q8, [x28, #0x0]\n"
+ "ldr q9, [x28, #0x10]\n"
+ "ldr q10, [x28, #0x20]\n"
+ "ldr q11, [x28, #0x30]\n"
+ "ldr q12, [x23, #0x0]\n"
+ "ldr q13, [x23, #0x10]\n"
+ "ldr q14, [x23, #0x20]\n"
+ "ldr q15, [x23, #0x30]\n"
+ "ldr q16, [x22, #0x0]\n"
+ "ldr q17, [x22, #0x10]\n"
+ "ldr q18, [x22, #0x20]\n"
+ "ldr q19, [x22, #0x30]\n"
+ "ldr q20, [x21, #0x0]\n"
+ "ldr q21, [x21, #0x10]\n"
+ "ldr q22, [x21, #0x20]\n"
+ "ldr q23, [x21, #0x30]\n"
+ "ldr q24, [x20, #0x0]\n"
+ "ldr q25, [x20, #0x10]\n"
+ "ldr q26, [x20, #0x20]\n"
+ "ldr q27, [x20, #0x30]\n"
"b 149f\n"
"148:" // Height 5: no accumulate
"movi v8.4s, #0x0\n"
@@ -1964,258 +1964,258 @@ void a64_hybrid_u8u32_dot_6x16 (
"movi v26.4s, #0x0\n"
"movi v27.4s, #0x0\n"
"149:" // Height 5: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"150:" // Height 5: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 151f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "ldr x22, [x21, #0x20]\n"
- "cbnz x28, 152f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20\n"
- "add x25, x25, x20\n"
- "add x24, x24, x20\n"
- "add x23, x23, x20\n"
- "add x22, x22, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "ldr x21, [x20, #0x20]\n"
+ "cbnz x27, 152f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
+ "add x24, x24, x19\n"
+ "add x23, x23, x19\n"
+ "add x22, x22, x19\n"
+ "add x21, x21, x19\n"
"b 152f\n"
"151:" // Height 5: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20\n"
- "add x24, x25, x20\n"
- "add x23, x24, x20\n"
- "add x22, x23, x20\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19\n"
+ "add x23, x24, x19\n"
+ "add x22, x23, x19\n"
+ "add x21, x22, x19\n"
"152:" // Height 5: input setup done
- "cmp x27, #0x10\n"
+ "cmp x26, #0x10\n"
"blt 155f\n"
- "ldr q0, [x26, #0x0]\n"
- "ldr q1, [x25, #0x0]\n"
- "cmp x27, #0x20\n"
- "ldr q2, [x24, #0x0]\n"
- "ldr q3, [x23, #0x0]\n"
- "ldr q4, [x22, #0x0]\n"
- "ldr q6, [x10, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
+ "ldr q0, [x25, #0x0]\n"
+ "ldr q1, [x24, #0x0]\n"
+ "cmp x26, #0x20\n"
+ "ldr q2, [x23, #0x0]\n"
+ "ldr q3, [x22, #0x0]\n"
+ "ldr q4, [x21, #0x0]\n"
+ "ldr q6, [x9, #0x0]\n"
"blt 154f\n"
"153:" // Height 5: Multiply loop: Main loop head
".inst 0x6f80e0c8 // udot v8.4s, v6.16b, v0.4b[0]\n"
+ "ldr q7, [x9, #0x10]\n"
+ "add x25, x25, #0x10\n"
".inst 0x6f81e0cc // udot v12.4s, v6.16b, v1.4b[0]\n"
- "sub x27, x27, #0x10\n"
- "add x26, x26, #0x10\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "add x24, x24, #0x10\n"
".inst 0x6f82e0d0 // udot v16.4s, v6.16b, v2.4b[0]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "add x23, x23, #0x10\n"
".inst 0x6f83e0d4 // udot v20.4s, v6.16b, v3.4b[0]\n"
- "add x25, x25, #0x10\n"
- "add x24, x24, #0x10\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
+ "add x22, x22, #0x10\n"
".inst 0x6f84e0d8 // udot v24.4s, v6.16b, v4.4b[0]\n"
- "ldr q6, [x10, #0x20]\n"
+ "ldr q6, [x9, #0x20]\n"
+ "add x21, x21, #0x10\n"
".inst 0x6f80e0e9 // udot v9.4s, v7.16b, v0.4b[0]\n"
- "add x23, x23, #0x10\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
+ "sub x26, x26, #0x10\n"
".inst 0x6f81e0ed // udot v13.4s, v7.16b, v1.4b[0]\n"
+ "prfm pldl1keep, [x21, #0x80]\n"
+ "cmp x26, #0x20\n"
".inst 0x6f82e0f1 // udot v17.4s, v7.16b, v2.4b[0]\n"
- "add x22, x22, #0x10\n"
- "cmp x27, #0x20\n"
".inst 0x6f83e0f5 // udot v21.4s, v7.16b, v3.4b[0]\n"
".inst 0x6f84e0f9 // udot v25.4s, v7.16b, v4.4b[0]\n"
- "ldr q7, [x10, #0x30]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
+ "ldr q7, [x9, #0x30]\n"
".inst 0x6f80e0ca // udot v10.4s, v6.16b, v0.4b[0]\n"
".inst 0x6f81e0ce // udot v14.4s, v6.16b, v1.4b[0]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x6f82e0d2 // udot v18.4s, v6.16b, v2.4b[0]\n"
".inst 0x6f83e0d6 // udot v22.4s, v6.16b, v3.4b[0]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
- "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x6f84e0da // udot v26.4s, v6.16b, v4.4b[0]\n"
- "ldr q6, [x10, #0x40]\n"
+ "ldr q6, [x9, #0x40]\n"
".inst 0x6f80e0eb // udot v11.4s, v7.16b, v0.4b[0]\n"
".inst 0x6f81e0ef // udot v15.4s, v7.16b, v1.4b[0]\n"
".inst 0x6f82e0f3 // udot v19.4s, v7.16b, v2.4b[0]\n"
".inst 0x6f83e0f7 // udot v23.4s, v7.16b, v3.4b[0]\n"
".inst 0x6f84e0fb // udot v27.4s, v7.16b, v4.4b[0]\n"
- "ldr q7, [x10, #0x50]\n"
+ "ldr q7, [x9, #0x50]\n"
".inst 0x6fa0e0c8 // udot v8.4s, v6.16b, v0.4b[1]\n"
".inst 0x6fa1e0cc // udot v12.4s, v6.16b, v1.4b[1]\n"
".inst 0x6fa2e0d0 // udot v16.4s, v6.16b, v2.4b[1]\n"
".inst 0x6fa3e0d4 // udot v20.4s, v6.16b, v3.4b[1]\n"
".inst 0x6fa4e0d8 // udot v24.4s, v6.16b, v4.4b[1]\n"
- "ldr q6, [x10, #0x60]\n"
+ "ldr q6, [x9, #0x60]\n"
".inst 0x6fa0e0e9 // udot v9.4s, v7.16b, v0.4b[1]\n"
".inst 0x6fa1e0ed // udot v13.4s, v7.16b, v1.4b[1]\n"
".inst 0x6fa2e0f1 // udot v17.4s, v7.16b, v2.4b[1]\n"
".inst 0x6fa3e0f5 // udot v21.4s, v7.16b, v3.4b[1]\n"
".inst 0x6fa4e0f9 // udot v25.4s, v7.16b, v4.4b[1]\n"
- "ldr q7, [x10, #0x70]\n"
+ "ldr q7, [x9, #0x70]\n"
".inst 0x6fa0e0ca // udot v10.4s, v6.16b, v0.4b[1]\n"
".inst 0x6fa1e0ce // udot v14.4s, v6.16b, v1.4b[1]\n"
".inst 0x6fa2e0d2 // udot v18.4s, v6.16b, v2.4b[1]\n"
".inst 0x6fa3e0d6 // udot v22.4s, v6.16b, v3.4b[1]\n"
".inst 0x6fa4e0da // udot v26.4s, v6.16b, v4.4b[1]\n"
- "ldr q6, [x10, #0x80]\n"
+ "ldr q6, [x9, #0x80]\n"
".inst 0x6fa0e0eb // udot v11.4s, v7.16b, v0.4b[1]\n"
".inst 0x6fa1e0ef // udot v15.4s, v7.16b, v1.4b[1]\n"
".inst 0x6fa2e0f3 // udot v19.4s, v7.16b, v2.4b[1]\n"
".inst 0x6fa3e0f7 // udot v23.4s, v7.16b, v3.4b[1]\n"
".inst 0x6fa4e0fb // udot v27.4s, v7.16b, v4.4b[1]\n"
- "ldr q7, [x10, #0x90]\n"
+ "ldr q7, [x9, #0x90]\n"
".inst 0x6f80e8c8 // udot v8.4s, v6.16b, v0.4b[2]\n"
".inst 0x6f81e8cc // udot v12.4s, v6.16b, v1.4b[2]\n"
".inst 0x6f82e8d0 // udot v16.4s, v6.16b, v2.4b[2]\n"
".inst 0x6f83e8d4 // udot v20.4s, v6.16b, v3.4b[2]\n"
".inst 0x6f84e8d8 // udot v24.4s, v6.16b, v4.4b[2]\n"
- "ldr q6, [x10, #0xa0]\n"
+ "ldr q6, [x9, #0xa0]\n"
".inst 0x6f80e8e9 // udot v9.4s, v7.16b, v0.4b[2]\n"
".inst 0x6f81e8ed // udot v13.4s, v7.16b, v1.4b[2]\n"
".inst 0x6f82e8f1 // udot v17.4s, v7.16b, v2.4b[2]\n"
".inst 0x6f83e8f5 // udot v21.4s, v7.16b, v3.4b[2]\n"
".inst 0x6f84e8f9 // udot v25.4s, v7.16b, v4.4b[2]\n"
- "ldr q7, [x10, #0xb0]\n"
+ "ldr q7, [x9, #0xb0]\n"
".inst 0x6f80e8ca // udot v10.4s, v6.16b, v0.4b[2]\n"
".inst 0x6f81e8ce // udot v14.4s, v6.16b, v1.4b[2]\n"
".inst 0x6f82e8d2 // udot v18.4s, v6.16b, v2.4b[2]\n"
".inst 0x6f83e8d6 // udot v22.4s, v6.16b, v3.4b[2]\n"
".inst 0x6f84e8da // udot v26.4s, v6.16b, v4.4b[2]\n"
- "ldr q6, [x10, #0xc0]\n"
+ "ldr q6, [x9, #0xc0]\n"
".inst 0x6f80e8eb // udot v11.4s, v7.16b, v0.4b[2]\n"
".inst 0x6f81e8ef // udot v15.4s, v7.16b, v1.4b[2]\n"
".inst 0x6f82e8f3 // udot v19.4s, v7.16b, v2.4b[2]\n"
".inst 0x6f83e8f7 // udot v23.4s, v7.16b, v3.4b[2]\n"
".inst 0x6f84e8fb // udot v27.4s, v7.16b, v4.4b[2]\n"
- "ldr q7, [x10, #0xd0]\n"
+ "ldr q7, [x9, #0xd0]\n"
".inst 0x6fa0e8c8 // udot v8.4s, v6.16b, v0.4b[3]\n"
".inst 0x6fa1e8cc // udot v12.4s, v6.16b, v1.4b[3]\n"
".inst 0x6fa2e8d0 // udot v16.4s, v6.16b, v2.4b[3]\n"
".inst 0x6fa3e8d4 // udot v20.4s, v6.16b, v3.4b[3]\n"
".inst 0x6fa4e8d8 // udot v24.4s, v6.16b, v4.4b[3]\n"
- "ldr q6, [x10, #0xe0]\n"
+ "ldr q6, [x9, #0xe0]\n"
".inst 0x6fa0e8e9 // udot v9.4s, v7.16b, v0.4b[3]\n"
".inst 0x6fa1e8ed // udot v13.4s, v7.16b, v1.4b[3]\n"
".inst 0x6fa2e8f1 // udot v17.4s, v7.16b, v2.4b[3]\n"
".inst 0x6fa3e8f5 // udot v21.4s, v7.16b, v3.4b[3]\n"
".inst 0x6fa4e8f9 // udot v25.4s, v7.16b, v4.4b[3]\n"
- "ldr q7, [x10, #0xf0]\n"
- "add x10, x10, #0x100\n"
+ "ldr q7, [x9, #0xf0]\n"
+ "add x9, x9, #0x100\n"
".inst 0x6fa0e8ca // udot v10.4s, v6.16b, v0.4b[3]\n"
".inst 0x6fa1e8ce // udot v14.4s, v6.16b, v1.4b[3]\n"
".inst 0x6fa2e8d2 // udot v18.4s, v6.16b, v2.4b[3]\n"
".inst 0x6fa3e8d6 // udot v22.4s, v6.16b, v3.4b[3]\n"
".inst 0x6fa4e8da // udot v26.4s, v6.16b, v4.4b[3]\n"
- "ldr q6, [x10, #0x0]\n"
+ "ldr q6, [x9, #0x0]\n"
".inst 0x6fa0e8eb // udot v11.4s, v7.16b, v0.4b[3]\n"
- "ldr q0, [x26, #0x0]\n"
+ "ldr q0, [x25, #0x0]\n"
".inst 0x6fa1e8ef // udot v15.4s, v7.16b, v1.4b[3]\n"
- "ldr q1, [x25, #0x0]\n"
+ "ldr q1, [x24, #0x0]\n"
".inst 0x6fa2e8f3 // udot v19.4s, v7.16b, v2.4b[3]\n"
- "ldr q2, [x24, #0x0]\n"
+ "ldr q2, [x23, #0x0]\n"
".inst 0x6fa3e8f7 // udot v23.4s, v7.16b, v3.4b[3]\n"
- "ldr q3, [x23, #0x0]\n"
+ "ldr q3, [x22, #0x0]\n"
".inst 0x6fa4e8fb // udot v27.4s, v7.16b, v4.4b[3]\n"
- "ldr q4, [x22, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
+ "ldr q4, [x21, #0x0]\n"
"bge 153b\n"
"154:" // Height 5: Multiply loop: Single iteration only
".inst 0x6f80e0c8 // udot v8.4s, v6.16b, v0.4b[0]\n"
+ "ldr q7, [x9, #0x10]\n"
+ "sub x26, x26, #0x10\n"
".inst 0x6f81e0cc // udot v12.4s, v6.16b, v1.4b[0]\n"
- "add x26, x26, #0x10\n"
"add x25, x25, #0x10\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x6f82e0d0 // udot v16.4s, v6.16b, v2.4b[0]\n"
- ".inst 0x6f83e0d4 // udot v20.4s, v6.16b, v3.4b[0]\n"
"add x24, x24, #0x10\n"
+ ".inst 0x6f83e0d4 // udot v20.4s, v6.16b, v3.4b[0]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
"add x23, x23, #0x10\n"
".inst 0x6f84e0d8 // udot v24.4s, v6.16b, v4.4b[0]\n"
- "ldr q6, [x10, #0x20]\n"
- ".inst 0x6f80e0e9 // udot v9.4s, v7.16b, v0.4b[0]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
"add x22, x22, #0x10\n"
+ ".inst 0x6f80e0e9 // udot v9.4s, v7.16b, v0.4b[0]\n"
+ "ldr q6, [x9, #0x20]\n"
+ "add x21, x21, #0x10\n"
".inst 0x6f81e0ed // udot v13.4s, v7.16b, v1.4b[0]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x6f82e0f1 // udot v17.4s, v7.16b, v2.4b[0]\n"
- "sub x27, x27, #0x10\n"
- "prfm pldl1keep, [x26, #0x80]\n"
+ "prfm pldl1keep, [x21, #0x80]\n"
".inst 0x6f83e0f5 // udot v21.4s, v7.16b, v3.4b[0]\n"
".inst 0x6f84e0f9 // udot v25.4s, v7.16b, v4.4b[0]\n"
- "ldr q7, [x10, #0x30]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
+ "ldr q7, [x9, #0x30]\n"
".inst 0x6f80e0ca // udot v10.4s, v6.16b, v0.4b[0]\n"
".inst 0x6f81e0ce // udot v14.4s, v6.16b, v1.4b[0]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x6f82e0d2 // udot v18.4s, v6.16b, v2.4b[0]\n"
".inst 0x6f83e0d6 // udot v22.4s, v6.16b, v3.4b[0]\n"
- "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x6f84e0da // udot v26.4s, v6.16b, v4.4b[0]\n"
- "ldr q6, [x10, #0x40]\n"
+ "ldr q6, [x9, #0x40]\n"
".inst 0x6f80e0eb // udot v11.4s, v7.16b, v0.4b[0]\n"
".inst 0x6f81e0ef // udot v15.4s, v7.16b, v1.4b[0]\n"
".inst 0x6f82e0f3 // udot v19.4s, v7.16b, v2.4b[0]\n"
".inst 0x6f83e0f7 // udot v23.4s, v7.16b, v3.4b[0]\n"
".inst 0x6f84e0fb // udot v27.4s, v7.16b, v4.4b[0]\n"
- "ldr q7, [x10, #0x50]\n"
+ "ldr q7, [x9, #0x50]\n"
".inst 0x6fa0e0c8 // udot v8.4s, v6.16b, v0.4b[1]\n"
".inst 0x6fa1e0cc // udot v12.4s, v6.16b, v1.4b[1]\n"
".inst 0x6fa2e0d0 // udot v16.4s, v6.16b, v2.4b[1]\n"
".inst 0x6fa3e0d4 // udot v20.4s, v6.16b, v3.4b[1]\n"
".inst 0x6fa4e0d8 // udot v24.4s, v6.16b, v4.4b[1]\n"
- "ldr q6, [x10, #0x60]\n"
+ "ldr q6, [x9, #0x60]\n"
".inst 0x6fa0e0e9 // udot v9.4s, v7.16b, v0.4b[1]\n"
".inst 0x6fa1e0ed // udot v13.4s, v7.16b, v1.4b[1]\n"
".inst 0x6fa2e0f1 // udot v17.4s, v7.16b, v2.4b[1]\n"
".inst 0x6fa3e0f5 // udot v21.4s, v7.16b, v3.4b[1]\n"
".inst 0x6fa4e0f9 // udot v25.4s, v7.16b, v4.4b[1]\n"
- "ldr q7, [x10, #0x70]\n"
+ "ldr q7, [x9, #0x70]\n"
".inst 0x6fa0e0ca // udot v10.4s, v6.16b, v0.4b[1]\n"
".inst 0x6fa1e0ce // udot v14.4s, v6.16b, v1.4b[1]\n"
".inst 0x6fa2e0d2 // udot v18.4s, v6.16b, v2.4b[1]\n"
".inst 0x6fa3e0d6 // udot v22.4s, v6.16b, v3.4b[1]\n"
".inst 0x6fa4e0da // udot v26.4s, v6.16b, v4.4b[1]\n"
- "ldr q6, [x10, #0x80]\n"
+ "ldr q6, [x9, #0x80]\n"
".inst 0x6fa0e0eb // udot v11.4s, v7.16b, v0.4b[1]\n"
".inst 0x6fa1e0ef // udot v15.4s, v7.16b, v1.4b[1]\n"
".inst 0x6fa2e0f3 // udot v19.4s, v7.16b, v2.4b[1]\n"
".inst 0x6fa3e0f7 // udot v23.4s, v7.16b, v3.4b[1]\n"
".inst 0x6fa4e0fb // udot v27.4s, v7.16b, v4.4b[1]\n"
- "ldr q7, [x10, #0x90]\n"
+ "ldr q7, [x9, #0x90]\n"
".inst 0x6f80e8c8 // udot v8.4s, v6.16b, v0.4b[2]\n"
".inst 0x6f81e8cc // udot v12.4s, v6.16b, v1.4b[2]\n"
".inst 0x6f82e8d0 // udot v16.4s, v6.16b, v2.4b[2]\n"
".inst 0x6f83e8d4 // udot v20.4s, v6.16b, v3.4b[2]\n"
".inst 0x6f84e8d8 // udot v24.4s, v6.16b, v4.4b[2]\n"
- "ldr q6, [x10, #0xa0]\n"
+ "ldr q6, [x9, #0xa0]\n"
".inst 0x6f80e8e9 // udot v9.4s, v7.16b, v0.4b[2]\n"
".inst 0x6f81e8ed // udot v13.4s, v7.16b, v1.4b[2]\n"
".inst 0x6f82e8f1 // udot v17.4s, v7.16b, v2.4b[2]\n"
".inst 0x6f83e8f5 // udot v21.4s, v7.16b, v3.4b[2]\n"
".inst 0x6f84e8f9 // udot v25.4s, v7.16b, v4.4b[2]\n"
- "ldr q7, [x10, #0xb0]\n"
+ "ldr q7, [x9, #0xb0]\n"
".inst 0x6f80e8ca // udot v10.4s, v6.16b, v0.4b[2]\n"
".inst 0x6f81e8ce // udot v14.4s, v6.16b, v1.4b[2]\n"
".inst 0x6f82e8d2 // udot v18.4s, v6.16b, v2.4b[2]\n"
".inst 0x6f83e8d6 // udot v22.4s, v6.16b, v3.4b[2]\n"
".inst 0x6f84e8da // udot v26.4s, v6.16b, v4.4b[2]\n"
- "ldr q6, [x10, #0xc0]\n"
+ "ldr q6, [x9, #0xc0]\n"
".inst 0x6f80e8eb // udot v11.4s, v7.16b, v0.4b[2]\n"
".inst 0x6f81e8ef // udot v15.4s, v7.16b, v1.4b[2]\n"
".inst 0x6f82e8f3 // udot v19.4s, v7.16b, v2.4b[2]\n"
".inst 0x6f83e8f7 // udot v23.4s, v7.16b, v3.4b[2]\n"
".inst 0x6f84e8fb // udot v27.4s, v7.16b, v4.4b[2]\n"
- "ldr q7, [x10, #0xd0]\n"
+ "ldr q7, [x9, #0xd0]\n"
".inst 0x6fa0e8c8 // udot v8.4s, v6.16b, v0.4b[3]\n"
".inst 0x6fa1e8cc // udot v12.4s, v6.16b, v1.4b[3]\n"
".inst 0x6fa2e8d0 // udot v16.4s, v6.16b, v2.4b[3]\n"
".inst 0x6fa3e8d4 // udot v20.4s, v6.16b, v3.4b[3]\n"
".inst 0x6fa4e8d8 // udot v24.4s, v6.16b, v4.4b[3]\n"
- "ldr q6, [x10, #0xe0]\n"
+ "ldr q6, [x9, #0xe0]\n"
".inst 0x6fa0e8e9 // udot v9.4s, v7.16b, v0.4b[3]\n"
".inst 0x6fa1e8ed // udot v13.4s, v7.16b, v1.4b[3]\n"
".inst 0x6fa2e8f1 // udot v17.4s, v7.16b, v2.4b[3]\n"
".inst 0x6fa3e8f5 // udot v21.4s, v7.16b, v3.4b[3]\n"
".inst 0x6fa4e8f9 // udot v25.4s, v7.16b, v4.4b[3]\n"
- "ldr q7, [x10, #0xf0]\n"
- "add x10, x10, #0x100\n"
+ "ldr q7, [x9, #0xf0]\n"
+ "add x9, x9, #0x100\n"
".inst 0x6fa0e8ca // udot v10.4s, v6.16b, v0.4b[3]\n"
".inst 0x6fa1e8ce // udot v14.4s, v6.16b, v1.4b[3]\n"
".inst 0x6fa2e8d2 // udot v18.4s, v6.16b, v2.4b[3]\n"
@@ -2227,32 +2227,32 @@ void a64_hybrid_u8u32_dot_6x16 (
".inst 0x6fa3e8f7 // udot v23.4s, v7.16b, v3.4b[3]\n"
".inst 0x6fa4e8fb // udot v27.4s, v7.16b, v4.4b[3]\n"
"155:" // Height 5: Multiply loop: Main loop skip
- "cbz x27, 160f\n"
- "cmp x27, #0x4\n"
+ "cbz x26, 160f\n"
+ "cmp x26, #0x4\n"
"blt 157f\n"
"156:" // Height 5: Multiply loop: Odd block loop
- "ldr s0, [x26], #0x4\n"
- "ldr s1, [x25], #0x4\n"
- "sub x27, x27, #0x4\n"
- "cmp x27, #0x4\n"
- "ldr s2, [x24], #0x4\n"
- "ldr s3, [x23], #0x4\n"
- "ldr s4, [x22], #0x4\n"
- "ldr q6, [x10, #0x0]\n"
+ "ldr s0, [x25], #0x4\n"
+ "sub x26, x26, #0x4\n"
+ "ldr s1, [x24], #0x4\n"
+ "cmp x26, #0x4\n"
+ "ldr s2, [x23], #0x4\n"
+ "ldr s3, [x22], #0x4\n"
+ "ldr s4, [x21], #0x4\n"
+ "ldr q6, [x9, #0x0]\n"
".inst 0x6f80e0c8 // udot v8.4s, v6.16b, v0.4b[0]\n"
+ "ldr q7, [x9, #0x10]\n"
".inst 0x6f81e0cc // udot v12.4s, v6.16b, v1.4b[0]\n"
- "ldr q7, [x10, #0x10]\n"
".inst 0x6f82e0d0 // udot v16.4s, v6.16b, v2.4b[0]\n"
".inst 0x6f83e0d4 // udot v20.4s, v6.16b, v3.4b[0]\n"
".inst 0x6f84e0d8 // udot v24.4s, v6.16b, v4.4b[0]\n"
- "ldr q6, [x10, #0x20]\n"
+ "ldr q6, [x9, #0x20]\n"
".inst 0x6f80e0e9 // udot v9.4s, v7.16b, v0.4b[0]\n"
".inst 0x6f81e0ed // udot v13.4s, v7.16b, v1.4b[0]\n"
".inst 0x6f82e0f1 // udot v17.4s, v7.16b, v2.4b[0]\n"
".inst 0x6f83e0f5 // udot v21.4s, v7.16b, v3.4b[0]\n"
".inst 0x6f84e0f9 // udot v25.4s, v7.16b, v4.4b[0]\n"
- "ldr q7, [x10, #0x30]\n"
- "add x10, x10, #0x40\n"
+ "ldr q7, [x9, #0x30]\n"
+ "add x9, x9, #0x40\n"
".inst 0x6f80e0ca // udot v10.4s, v6.16b, v0.4b[0]\n"
".inst 0x6f81e0ce // udot v14.4s, v6.16b, v1.4b[0]\n"
".inst 0x6f82e0d2 // udot v18.4s, v6.16b, v2.4b[0]\n"
@@ -2264,43 +2264,43 @@ void a64_hybrid_u8u32_dot_6x16 (
".inst 0x6f83e0f7 // udot v23.4s, v7.16b, v3.4b[0]\n"
".inst 0x6f84e0fb // udot v27.4s, v7.16b, v4.4b[0]\n"
"bge 156b\n"
+ "cbz x26, 160f\n"
"157:" // Height 5: Multiply loop: Skip odd blocks
- "cbz x27, 160f\n"
- "tbz x27, #1, 158f\n"
- "ldr h0, [x26], #0x2\n"
- "ldr h1, [x25], #0x2\n"
- "ldr h2, [x24], #0x2\n"
- "ldr h3, [x23], #0x2\n"
- "ldr h4, [x22], #0x2\n"
- "tbz x27, #0, 159f\n"
- "ld1 { v0.b }[2], [x26]\n"
- "ld1 { v1.b }[2], [x25]\n"
- "ld1 { v2.b }[2], [x24]\n"
- "ld1 { v3.b }[2], [x23]\n"
- "ld1 { v4.b }[2], [x22]\n"
+ "tbz x26, #1, 158f\n"
+ "ldr h0, [x25], #0x2\n"
+ "ldr h1, [x24], #0x2\n"
+ "ldr h2, [x23], #0x2\n"
+ "ldr h3, [x22], #0x2\n"
+ "ldr h4, [x21], #0x2\n"
+ "tbz x26, #0, 159f\n"
+ "ld1 { v0.b }[2], [x25]\n"
+ "ld1 { v1.b }[2], [x24]\n"
+ "ld1 { v2.b }[2], [x23]\n"
+ "ld1 { v3.b }[2], [x22]\n"
+ "ld1 { v4.b }[2], [x21]\n"
"b 159f\n"
"158:" // Height 5: Multiply loop: Ragged operand read: partial_1_0
- "ldr b0, [x26, #0x0]\n"
- "ldr b1, [x25, #0x0]\n"
- "ldr b2, [x24, #0x0]\n"
- "ldr b3, [x23, #0x0]\n"
- "ldr b4, [x22, #0x0]\n"
+ "ldr b0, [x25, #0x0]\n"
+ "ldr b1, [x24, #0x0]\n"
+ "ldr b2, [x23, #0x0]\n"
+ "ldr b3, [x22, #0x0]\n"
+ "ldr b4, [x21, #0x0]\n"
"159:" // Height 5: Multiply loop: Ragged operand read: Done
- "ldr q6, [x10, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
+ "ldr q6, [x9, #0x0]\n"
".inst 0x6f80e0c8 // udot v8.4s, v6.16b, v0.4b[0]\n"
+ "ldr q7, [x9, #0x10]\n"
".inst 0x6f81e0cc // udot v12.4s, v6.16b, v1.4b[0]\n"
".inst 0x6f82e0d0 // udot v16.4s, v6.16b, v2.4b[0]\n"
".inst 0x6f83e0d4 // udot v20.4s, v6.16b, v3.4b[0]\n"
".inst 0x6f84e0d8 // udot v24.4s, v6.16b, v4.4b[0]\n"
- "ldr q6, [x10, #0x20]\n"
+ "ldr q6, [x9, #0x20]\n"
".inst 0x6f80e0e9 // udot v9.4s, v7.16b, v0.4b[0]\n"
".inst 0x6f81e0ed // udot v13.4s, v7.16b, v1.4b[0]\n"
".inst 0x6f82e0f1 // udot v17.4s, v7.16b, v2.4b[0]\n"
".inst 0x6f83e0f5 // udot v21.4s, v7.16b, v3.4b[0]\n"
".inst 0x6f84e0f9 // udot v25.4s, v7.16b, v4.4b[0]\n"
- "ldr q7, [x10, #0x30]\n"
- "add x10, x10, #0x40\n"
+ "ldr q7, [x9, #0x30]\n"
+ "add x9, x9, #0x40\n"
".inst 0x6f80e0ca // udot v10.4s, v6.16b, v0.4b[0]\n"
".inst 0x6f81e0ce // udot v14.4s, v6.16b, v1.4b[0]\n"
".inst 0x6f82e0d2 // udot v18.4s, v6.16b, v2.4b[0]\n"
@@ -2312,335 +2312,335 @@ void a64_hybrid_u8u32_dot_6x16 (
".inst 0x6f83e0f7 // udot v23.4s, v7.16b, v3.4b[0]\n"
".inst 0x6f84e0fb // udot v27.4s, v7.16b, v4.4b[0]\n"
"160:" // Height 5: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 150b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "prfm pstl1keep, [x9, #0x0]\n"
- "add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
- "prfm pstl1keep, [x24, #0x0]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "prfm pstl1keep, [x28, #0x0]\n"
+ "cmp x10, #0x10\n"
+ "add x23, x28, x19, LSL #2\n"
"prfm pstl1keep, [x23, #0x0]\n"
- "cmp x11, #0x10\n"
+ "add x22, x23, x19, LSL #2\n"
"prfm pstl1keep, [x22, #0x0]\n"
+ "add x21, x22, x19, LSL #2\n"
"prfm pstl1keep, [x21, #0x0]\n"
+ "add x20, x21, x19, LSL #2\n"
+ "prfm pstl1keep, [x20, #0x0]\n"
"bge 169f\n"
- "tbz x11, #3, 164f\n"
- "st1 { v8.4s }, [x9], #0x10\n"
- "st1 { v9.4s }, [x9], #0x10\n"
- "st1 { v12.4s }, [x24], #0x10\n"
- "st1 { v13.4s }, [x24], #0x10\n"
- "st1 { v16.4s }, [x23], #0x10\n"
- "st1 { v17.4s }, [x23], #0x10\n"
- "st1 { v20.4s }, [x22], #0x10\n"
- "st1 { v21.4s }, [x22], #0x10\n"
- "st1 { v24.4s }, [x21], #0x10\n"
- "st1 { v25.4s }, [x21], #0x10\n"
- "tbz x11, #2, 162f\n"
- "st1 { v10.4s }, [x9], #0x10\n"
- "st1 { v14.4s }, [x24], #0x10\n"
- "st1 { v18.4s }, [x23], #0x10\n"
- "st1 { v22.4s }, [x22], #0x10\n"
- "st1 { v26.4s }, [x21], #0x10\n"
- "tbz x11, #1, 161f\n"
- "str d11, [x9], #0x8\n"
- "str d15, [x24], #0x8\n"
- "str d19, [x23], #0x8\n"
- "str d23, [x22], #0x8\n"
- "str d27, [x21], #0x8\n"
- "tbz x11, #0, 168f\n"
- "st1 { v11.s }[2], [x9]\n"
- "st1 { v15.s }[2], [x24]\n"
- "st1 { v19.s }[2], [x23]\n"
- "st1 { v23.s }[2], [x22]\n"
- "st1 { v27.s }[2], [x21]\n"
+ "tbz x10, #3, 164f\n"
+ "st1 { v8.4s }, [x28], #0x10\n"
+ "st1 { v9.4s }, [x28], #0x10\n"
+ "st1 { v12.4s }, [x23], #0x10\n"
+ "st1 { v13.4s }, [x23], #0x10\n"
+ "st1 { v16.4s }, [x22], #0x10\n"
+ "st1 { v17.4s }, [x22], #0x10\n"
+ "st1 { v20.4s }, [x21], #0x10\n"
+ "st1 { v21.4s }, [x21], #0x10\n"
+ "st1 { v24.4s }, [x20], #0x10\n"
+ "st1 { v25.4s }, [x20], #0x10\n"
+ "tbz x10, #2, 162f\n"
+ "st1 { v10.4s }, [x28], #0x10\n"
+ "st1 { v14.4s }, [x23], #0x10\n"
+ "st1 { v18.4s }, [x22], #0x10\n"
+ "st1 { v22.4s }, [x21], #0x10\n"
+ "st1 { v26.4s }, [x20], #0x10\n"
+ "tbz x10, #1, 161f\n"
+ "str d11, [x28], #0x8\n"
+ "str d15, [x23], #0x8\n"
+ "str d19, [x22], #0x8\n"
+ "str d23, [x21], #0x8\n"
+ "str d27, [x20], #0x8\n"
+ "tbz x10, #0, 168f\n"
+ "st1 { v11.s }[2], [x28]\n"
+ "st1 { v15.s }[2], [x23]\n"
+ "st1 { v19.s }[2], [x22]\n"
+ "st1 { v23.s }[2], [x21]\n"
+ "st1 { v27.s }[2], [x20]\n"
"b 168f\n"
"161:" // Height 5: Partial direct writeback: partial_1_12
- "tbz x11, #0, 168f\n"
- "str s11, [x9, #0x0]\n"
- "str s15, [x24, #0x0]\n"
- "str s19, [x23, #0x0]\n"
- "str s23, [x22, #0x0]\n"
- "str s27, [x21, #0x0]\n"
+ "tbz x10, #0, 168f\n"
+ "str s11, [x28, #0x0]\n"
+ "str s15, [x23, #0x0]\n"
+ "str s19, [x22, #0x0]\n"
+ "str s23, [x21, #0x0]\n"
+ "str s27, [x20, #0x0]\n"
"b 168f\n"
"162:" // Height 5: Partial direct writeback: partial_2_8
- "tbz x11, #1, 163f\n"
- "str d10, [x9], #0x8\n"
- "str d14, [x24], #0x8\n"
- "str d18, [x23], #0x8\n"
- "str d22, [x22], #0x8\n"
- "str d26, [x21], #0x8\n"
- "tbz x11, #0, 168f\n"
- "st1 { v10.s }[2], [x9]\n"
- "st1 { v14.s }[2], [x24]\n"
- "st1 { v18.s }[2], [x23]\n"
- "st1 { v22.s }[2], [x22]\n"
- "st1 { v26.s }[2], [x21]\n"
+ "tbz x10, #1, 163f\n"
+ "str d10, [x28], #0x8\n"
+ "str d14, [x23], #0x8\n"
+ "str d18, [x22], #0x8\n"
+ "str d22, [x21], #0x8\n"
+ "str d26, [x20], #0x8\n"
+ "tbz x10, #0, 168f\n"
+ "st1 { v10.s }[2], [x28]\n"
+ "st1 { v14.s }[2], [x23]\n"
+ "st1 { v18.s }[2], [x22]\n"
+ "st1 { v22.s }[2], [x21]\n"
+ "st1 { v26.s }[2], [x20]\n"
"b 168f\n"
"163:" // Height 5: Partial direct writeback: partial_1_8
- "tbz x11, #0, 168f\n"
- "str s10, [x9, #0x0]\n"
- "str s14, [x24, #0x0]\n"
- "str s18, [x23, #0x0]\n"
- "str s22, [x22, #0x0]\n"
- "str s26, [x21, #0x0]\n"
+ "tbz x10, #0, 168f\n"
+ "str s10, [x28, #0x0]\n"
+ "str s14, [x23, #0x0]\n"
+ "str s18, [x22, #0x0]\n"
+ "str s22, [x21, #0x0]\n"
+ "str s26, [x20, #0x0]\n"
"b 168f\n"
"164:" // Height 5: Partial direct writeback: partial_4_0
- "tbz x11, #2, 166f\n"
- "st1 { v8.4s }, [x9], #0x10\n"
- "st1 { v12.4s }, [x24], #0x10\n"
- "st1 { v16.4s }, [x23], #0x10\n"
- "st1 { v20.4s }, [x22], #0x10\n"
- "st1 { v24.4s }, [x21], #0x10\n"
- "tbz x11, #1, 165f\n"
- "str d9, [x9], #0x8\n"
- "str d13, [x24], #0x8\n"
- "str d17, [x23], #0x8\n"
- "str d21, [x22], #0x8\n"
- "str d25, [x21], #0x8\n"
- "tbz x11, #0, 168f\n"
- "st1 { v9.s }[2], [x9]\n"
- "st1 { v13.s }[2], [x24]\n"
- "st1 { v17.s }[2], [x23]\n"
- "st1 { v21.s }[2], [x22]\n"
- "st1 { v25.s }[2], [x21]\n"
+ "tbz x10, #2, 166f\n"
+ "st1 { v8.4s }, [x28], #0x10\n"
+ "st1 { v12.4s }, [x23], #0x10\n"
+ "st1 { v16.4s }, [x22], #0x10\n"
+ "st1 { v20.4s }, [x21], #0x10\n"
+ "st1 { v24.4s }, [x20], #0x10\n"
+ "tbz x10, #1, 165f\n"
+ "str d9, [x28], #0x8\n"
+ "str d13, [x23], #0x8\n"
+ "str d17, [x22], #0x8\n"
+ "str d21, [x21], #0x8\n"
+ "str d25, [x20], #0x8\n"
+ "tbz x10, #0, 168f\n"
+ "st1 { v9.s }[2], [x28]\n"
+ "st1 { v13.s }[2], [x23]\n"
+ "st1 { v17.s }[2], [x22]\n"
+ "st1 { v21.s }[2], [x21]\n"
+ "st1 { v25.s }[2], [x20]\n"
"b 168f\n"
"165:" // Height 5: Partial direct writeback: partial_1_4
- "tbz x11, #0, 168f\n"
- "str s9, [x9, #0x0]\n"
- "str s13, [x24, #0x0]\n"
- "str s17, [x23, #0x0]\n"
- "str s21, [x22, #0x0]\n"
- "str s25, [x21, #0x0]\n"
+ "tbz x10, #0, 168f\n"
+ "str s9, [x28, #0x0]\n"
+ "str s13, [x23, #0x0]\n"
+ "str s17, [x22, #0x0]\n"
+ "str s21, [x21, #0x0]\n"
+ "str s25, [x20, #0x0]\n"
"b 168f\n"
"166:" // Height 5: Partial direct writeback: partial_2_0
- "tbz x11, #1, 167f\n"
- "str d8, [x9], #0x8\n"
- "str d12, [x24], #0x8\n"
- "str d16, [x23], #0x8\n"
- "str d20, [x22], #0x8\n"
- "str d24, [x21], #0x8\n"
- "tbz x11, #0, 168f\n"
- "st1 { v8.s }[2], [x9]\n"
- "st1 { v12.s }[2], [x24]\n"
- "st1 { v16.s }[2], [x23]\n"
- "st1 { v20.s }[2], [x22]\n"
- "st1 { v24.s }[2], [x21]\n"
+ "tbz x10, #1, 167f\n"
+ "str d8, [x28], #0x8\n"
+ "str d12, [x23], #0x8\n"
+ "str d16, [x22], #0x8\n"
+ "str d20, [x21], #0x8\n"
+ "str d24, [x20], #0x8\n"
+ "tbz x10, #0, 168f\n"
+ "st1 { v8.s }[2], [x28]\n"
+ "st1 { v12.s }[2], [x23]\n"
+ "st1 { v16.s }[2], [x22]\n"
+ "st1 { v20.s }[2], [x21]\n"
+ "st1 { v24.s }[2], [x20]\n"
"b 168f\n"
"167:" // Height 5: Partial direct writeback: partial_1_0
- "str s8, [x9, #0x0]\n"
- "str s12, [x24, #0x0]\n"
- "str s16, [x23, #0x0]\n"
- "str s20, [x22, #0x0]\n"
- "str s24, [x21, #0x0]\n"
+ "str s8, [x28, #0x0]\n"
+ "str s12, [x23, #0x0]\n"
+ "str s16, [x22, #0x0]\n"
+ "str s20, [x21, #0x0]\n"
+ "str s24, [x20, #0x0]\n"
"168:" // Height 5: Partial direct writeback: Done
"b 170f\n"
"169:" // Height 5: Full writeback
- "str q8, [x9, #0x0]\n"
- "str q9, [x9, #0x10]\n"
- "str q10, [x9, #0x20]\n"
- "str q11, [x9, #0x30]\n"
- "add x9, x9, #0x40\n"
- "str q12, [x24, #0x0]\n"
- "str q13, [x24, #0x10]\n"
- "str q14, [x24, #0x20]\n"
- "str q15, [x24, #0x30]\n"
- "str q16, [x23, #0x0]\n"
- "str q17, [x23, #0x10]\n"
- "str q18, [x23, #0x20]\n"
- "str q19, [x23, #0x30]\n"
- "str q20, [x22, #0x0]\n"
- "str q21, [x22, #0x10]\n"
- "str q22, [x22, #0x20]\n"
- "str q23, [x22, #0x30]\n"
- "str q24, [x21, #0x0]\n"
- "str q25, [x21, #0x10]\n"
- "str q26, [x21, #0x20]\n"
- "str q27, [x21, #0x30]\n"
+ "str q8, [x28, #0x0]\n"
+ "str q9, [x28, #0x10]\n"
+ "str q10, [x28, #0x20]\n"
+ "str q11, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
+ "str q12, [x23, #0x0]\n"
+ "str q13, [x23, #0x10]\n"
+ "str q14, [x23, #0x20]\n"
+ "str q15, [x23, #0x30]\n"
+ "str q16, [x22, #0x0]\n"
+ "str q17, [x22, #0x10]\n"
+ "str q18, [x22, #0x20]\n"
+ "str q19, [x22, #0x30]\n"
+ "str q20, [x21, #0x0]\n"
+ "str q21, [x21, #0x10]\n"
+ "str q22, [x21, #0x20]\n"
+ "str q23, [x21, #0x30]\n"
+ "str q24, [x20, #0x0]\n"
+ "str q25, [x20, #0x10]\n"
+ "str q26, [x20, #0x20]\n"
+ "str q27, [x20, #0x30]\n"
"170:" // Height 5: Writeback done
- "subs x11, x11, #0x10\n"
+ "subs x10, x10, #0x10\n"
"bgt 138b\n"
"b 206f\n"
"171:" // Height 6
- "ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x28, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"mov x20, #0x18\n"
- "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
- "mov x9, %x[output_ptr]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "madd %x[output_ptr], x19, x20, %x[output_ptr]\n"
"172:" // Height 6: Column loop
"tbz %x[flags], #0, 182f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
- "cmp x11, #0x10\n"
- "add x20, x21, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "cmp x10, #0x10\n"
+ "add x23, x28, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
+ "add x20, x21, x19, LSL #2\n"
+ "add x19, x20, x19, LSL #2\n"
"bge 181f\n"
- "tbz x11, #3, 176f\n"
- "ld1 { v8.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x24], #0x10\n"
- "ld1 { v16.4s }, [x23], #0x10\n"
- "ld1 { v20.4s }, [x22], #0x10\n"
- "ld1 { v24.4s }, [x21], #0x10\n"
- "ld1 { v28.4s }, [x20], #0x10\n"
- "ld1 { v9.4s }, [x9], #0x10\n"
- "ld1 { v13.4s }, [x24], #0x10\n"
- "ld1 { v17.4s }, [x23], #0x10\n"
- "ld1 { v21.4s }, [x22], #0x10\n"
- "ld1 { v25.4s }, [x21], #0x10\n"
- "ld1 { v29.4s }, [x20], #0x10\n"
- "tbz x11, #2, 174f\n"
- "ld1 { v10.4s }, [x9], #0x10\n"
- "ld1 { v14.4s }, [x24], #0x10\n"
- "ld1 { v18.4s }, [x23], #0x10\n"
- "ld1 { v22.4s }, [x22], #0x10\n"
- "ld1 { v26.4s }, [x21], #0x10\n"
- "ld1 { v30.4s }, [x20], #0x10\n"
- "tbz x11, #1, 173f\n"
- "ldr d11, [x9], #0x8\n"
- "ldr d15, [x24], #0x8\n"
- "mov x25, #0x38\n"
- "ldr d19, [x23], #0x8\n"
- "ldr d23, [x22], #0x8\n"
- "ldr d27, [x21], #0x8\n"
- "ldr d31, [x20], #0x8\n"
- "tbz x11, #0, 180f\n"
- "ld1 { v11.s }[2], [x9]\n"
- "ld1 { v15.s }[2], [x24]\n"
- "ld1 { v19.s }[2], [x23]\n"
- "ld1 { v23.s }[2], [x22]\n"
- "ld1 { v27.s }[2], [x21]\n"
- "ld1 { v31.s }[2], [x20]\n"
+ "tbz x10, #3, 176f\n"
+ "ld1 { v8.4s }, [x28], #0x10\n"
+ "ld1 { v12.4s }, [x23], #0x10\n"
+ "ld1 { v16.4s }, [x22], #0x10\n"
+ "ld1 { v20.4s }, [x21], #0x10\n"
+ "ld1 { v24.4s }, [x20], #0x10\n"
+ "ld1 { v9.4s }, [x28], #0x10\n"
+ "ld1 { v13.4s }, [x23], #0x10\n"
+ "ld1 { v17.4s }, [x22], #0x10\n"
+ "ld1 { v21.4s }, [x21], #0x10\n"
+ "ld1 { v25.4s }, [x20], #0x10\n"
+ "ld1 { v28.4s }, [x19], #0x10\n"
+ "ld1 { v29.4s }, [x19], #0x10\n"
+ "tbz x10, #2, 174f\n"
+ "ld1 { v10.4s }, [x28], #0x10\n"
+ "ld1 { v14.4s }, [x23], #0x10\n"
+ "ld1 { v18.4s }, [x22], #0x10\n"
+ "ld1 { v22.4s }, [x21], #0x10\n"
+ "ld1 { v26.4s }, [x20], #0x10\n"
+ "ld1 { v30.4s }, [x19], #0x10\n"
+ "tbz x10, #1, 173f\n"
+ "ldr d11, [x28], #0x8\n"
+ "mov x24, #0x38\n"
+ "ldr d15, [x23], #0x8\n"
+ "ldr d19, [x22], #0x8\n"
+ "ldr d23, [x21], #0x8\n"
+ "ldr d27, [x20], #0x8\n"
+ "ldr d31, [x19], #0x8\n"
+ "tbz x10, #0, 180f\n"
+ "ld1 { v11.s }[2], [x28]\n"
+ "ld1 { v15.s }[2], [x23]\n"
+ "ld1 { v19.s }[2], [x22]\n"
+ "ld1 { v23.s }[2], [x21]\n"
+ "ld1 { v27.s }[2], [x20]\n"
+ "ld1 { v31.s }[2], [x19]\n"
"b 180f\n"
"173:" // Height 6: Partial accumulate: partial_1_12
- "mov x25, #0x30\n"
- "tbz x11, #0, 180f\n"
- "ldr s11, [x9, #0x0]\n"
- "ldr s15, [x24, #0x0]\n"
- "ldr s19, [x23, #0x0]\n"
- "ldr s23, [x22, #0x0]\n"
- "ldr s27, [x21, #0x0]\n"
- "ldr s31, [x20, #0x0]\n"
+ "mov x24, #0x30\n"
+ "tbz x10, #0, 180f\n"
+ "ldr s11, [x28, #0x0]\n"
+ "ldr s15, [x23, #0x0]\n"
+ "ldr s19, [x22, #0x0]\n"
+ "ldr s23, [x21, #0x0]\n"
+ "ldr s27, [x20, #0x0]\n"
+ "ldr s31, [x19, #0x0]\n"
"b 180f\n"
"174:" // Height 6: Partial accumulate: partial_2_8
- "tbz x11, #1, 175f\n"
- "ldr d10, [x9], #0x8\n"
- "ldr d14, [x24], #0x8\n"
- "mov x25, #0x28\n"
- "ldr d18, [x23], #0x8\n"
- "ldr d22, [x22], #0x8\n"
- "ldr d26, [x21], #0x8\n"
- "ldr d30, [x20], #0x8\n"
- "tbz x11, #0, 180f\n"
- "ld1 { v10.s }[2], [x9]\n"
- "ld1 { v14.s }[2], [x24]\n"
- "ld1 { v18.s }[2], [x23]\n"
- "ld1 { v22.s }[2], [x22]\n"
- "ld1 { v26.s }[2], [x21]\n"
- "ld1 { v30.s }[2], [x20]\n"
+ "tbz x10, #1, 175f\n"
+ "ldr d10, [x28], #0x8\n"
+ "ldr d14, [x23], #0x8\n"
+ "mov x24, #0x28\n"
+ "ldr d18, [x22], #0x8\n"
+ "ldr d22, [x21], #0x8\n"
+ "ldr d26, [x20], #0x8\n"
+ "ldr d30, [x19], #0x8\n"
+ "tbz x10, #0, 180f\n"
+ "ld1 { v10.s }[2], [x28]\n"
+ "ld1 { v14.s }[2], [x23]\n"
+ "ld1 { v18.s }[2], [x22]\n"
+ "ld1 { v22.s }[2], [x21]\n"
+ "ld1 { v26.s }[2], [x20]\n"
+ "ld1 { v30.s }[2], [x19]\n"
"b 180f\n"
"175:" // Height 6: Partial accumulate: partial_1_8
- "mov x25, #0x20\n"
- "tbz x11, #0, 180f\n"
- "ldr s10, [x9, #0x0]\n"
- "ldr s14, [x24, #0x0]\n"
- "ldr s18, [x23, #0x0]\n"
- "ldr s22, [x22, #0x0]\n"
- "ldr s26, [x21, #0x0]\n"
- "ldr s30, [x20, #0x0]\n"
+ "mov x24, #0x20\n"
+ "tbz x10, #0, 180f\n"
+ "ldr s10, [x28, #0x0]\n"
+ "ldr s14, [x23, #0x0]\n"
+ "ldr s18, [x22, #0x0]\n"
+ "ldr s22, [x21, #0x0]\n"
+ "ldr s26, [x20, #0x0]\n"
+ "ldr s30, [x19, #0x0]\n"
"b 180f\n"
"176:" // Height 6: Partial accumulate: partial_4_0
- "tbz x11, #2, 178f\n"
- "ld1 { v8.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x24], #0x10\n"
- "ld1 { v16.4s }, [x23], #0x10\n"
- "ld1 { v20.4s }, [x22], #0x10\n"
- "ld1 { v24.4s }, [x21], #0x10\n"
- "ld1 { v28.4s }, [x20], #0x10\n"
- "tbz x11, #1, 177f\n"
- "ldr d9, [x9], #0x8\n"
- "ldr d13, [x24], #0x8\n"
- "mov x25, #0x18\n"
- "ldr d17, [x23], #0x8\n"
- "ldr d21, [x22], #0x8\n"
- "ldr d25, [x21], #0x8\n"
- "ldr d29, [x20], #0x8\n"
- "tbz x11, #0, 180f\n"
- "ld1 { v9.s }[2], [x9]\n"
- "ld1 { v13.s }[2], [x24]\n"
- "ld1 { v17.s }[2], [x23]\n"
- "ld1 { v21.s }[2], [x22]\n"
- "ld1 { v25.s }[2], [x21]\n"
- "ld1 { v29.s }[2], [x20]\n"
+ "tbz x10, #2, 178f\n"
+ "ld1 { v8.4s }, [x28], #0x10\n"
+ "ld1 { v12.4s }, [x23], #0x10\n"
+ "ld1 { v16.4s }, [x22], #0x10\n"
+ "ld1 { v20.4s }, [x21], #0x10\n"
+ "ld1 { v24.4s }, [x20], #0x10\n"
+ "ld1 { v28.4s }, [x19], #0x10\n"
+ "tbz x10, #1, 177f\n"
+ "ldr d9, [x28], #0x8\n"
+ "mov x24, #0x18\n"
+ "ldr d13, [x23], #0x8\n"
+ "ldr d17, [x22], #0x8\n"
+ "ldr d21, [x21], #0x8\n"
+ "ldr d25, [x20], #0x8\n"
+ "ldr d29, [x19], #0x8\n"
+ "tbz x10, #0, 180f\n"
+ "ld1 { v9.s }[2], [x28]\n"
+ "ld1 { v13.s }[2], [x23]\n"
+ "ld1 { v17.s }[2], [x22]\n"
+ "ld1 { v21.s }[2], [x21]\n"
+ "ld1 { v25.s }[2], [x20]\n"
+ "ld1 { v29.s }[2], [x19]\n"
"b 180f\n"
"177:" // Height 6: Partial accumulate: partial_1_4
- "mov x25, #0x10\n"
- "tbz x11, #0, 180f\n"
- "ldr s9, [x9, #0x0]\n"
- "ldr s13, [x24, #0x0]\n"
- "ldr s17, [x23, #0x0]\n"
- "ldr s21, [x22, #0x0]\n"
- "ldr s25, [x21, #0x0]\n"
- "ldr s29, [x20, #0x0]\n"
+ "mov x24, #0x10\n"
+ "tbz x10, #0, 180f\n"
+ "ldr s9, [x28, #0x0]\n"
+ "ldr s13, [x23, #0x0]\n"
+ "ldr s17, [x22, #0x0]\n"
+ "ldr s21, [x21, #0x0]\n"
+ "ldr s25, [x20, #0x0]\n"
+ "ldr s29, [x19, #0x0]\n"
"b 180f\n"
"178:" // Height 6: Partial accumulate: partial_2_0
- "tbz x11, #1, 179f\n"
- "ldr d8, [x9], #0x8\n"
- "ldr d12, [x24], #0x8\n"
- "mov x25, #0x8\n"
- "ldr d16, [x23], #0x8\n"
- "ldr d20, [x22], #0x8\n"
- "ldr d24, [x21], #0x8\n"
- "ldr d28, [x20], #0x8\n"
- "tbz x11, #0, 180f\n"
- "ld1 { v8.s }[2], [x9]\n"
- "ld1 { v12.s }[2], [x24]\n"
- "ld1 { v16.s }[2], [x23]\n"
- "ld1 { v20.s }[2], [x22]\n"
- "ld1 { v24.s }[2], [x21]\n"
- "ld1 { v28.s }[2], [x20]\n"
+ "tbz x10, #1, 179f\n"
+ "ldr d8, [x28], #0x8\n"
+ "ldr d12, [x23], #0x8\n"
+ "mov x24, #0x8\n"
+ "ldr d16, [x22], #0x8\n"
+ "ldr d20, [x21], #0x8\n"
+ "ldr d24, [x20], #0x8\n"
+ "ldr d28, [x19], #0x8\n"
+ "tbz x10, #0, 180f\n"
+ "ld1 { v8.s }[2], [x28]\n"
+ "ld1 { v12.s }[2], [x23]\n"
+ "ld1 { v16.s }[2], [x22]\n"
+ "ld1 { v20.s }[2], [x21]\n"
+ "ld1 { v24.s }[2], [x20]\n"
+ "ld1 { v28.s }[2], [x19]\n"
"b 180f\n"
"179:" // Height 6: Partial accumulate: partial_1_0
- "ldr s8, [x9, #0x0]\n"
- "ldr s12, [x24, #0x0]\n"
- "mov x25, #0x0\n"
- "ldr s16, [x23, #0x0]\n"
- "ldr s20, [x22, #0x0]\n"
- "ldr s24, [x21, #0x0]\n"
- "ldr s28, [x20, #0x0]\n"
+ "ldr s8, [x28, #0x0]\n"
+ "mov x24, #0x0\n"
+ "ldr s12, [x23, #0x0]\n"
+ "ldr s16, [x22, #0x0]\n"
+ "ldr s20, [x21, #0x0]\n"
+ "ldr s24, [x20, #0x0]\n"
+ "ldr s28, [x19, #0x0]\n"
"180:" // Height 6: Partial accumulate: Done
- "sub x9, x9, x25\n"
+ "sub x28, x28, x24\n"
"b 183f\n"
"181:" // Height 6: full accumulate
- "ldr q8, [x9, #0x0]\n"
- "ldr q9, [x9, #0x10]\n"
- "ldr q10, [x9, #0x20]\n"
- "ldr q11, [x9, #0x30]\n"
- "ldr q12, [x24, #0x0]\n"
- "ldr q13, [x24, #0x10]\n"
- "ldr q14, [x24, #0x20]\n"
- "ldr q15, [x24, #0x30]\n"
- "ldr q16, [x23, #0x0]\n"
- "ldr q17, [x23, #0x10]\n"
- "ldr q18, [x23, #0x20]\n"
- "ldr q19, [x23, #0x30]\n"
- "ldr q20, [x22, #0x0]\n"
- "ldr q21, [x22, #0x10]\n"
- "ldr q22, [x22, #0x20]\n"
- "ldr q23, [x22, #0x30]\n"
- "ldr q24, [x21, #0x0]\n"
- "ldr q25, [x21, #0x10]\n"
- "ldr q26, [x21, #0x20]\n"
- "ldr q27, [x21, #0x30]\n"
- "ldr q28, [x20, #0x0]\n"
- "ldr q29, [x20, #0x10]\n"
- "ldr q30, [x20, #0x20]\n"
- "ldr q31, [x20, #0x30]\n"
+ "ldr q8, [x28, #0x0]\n"
+ "ldr q9, [x28, #0x10]\n"
+ "ldr q10, [x28, #0x20]\n"
+ "ldr q11, [x28, #0x30]\n"
+ "ldr q12, [x23, #0x0]\n"
+ "ldr q13, [x23, #0x10]\n"
+ "ldr q14, [x23, #0x20]\n"
+ "ldr q15, [x23, #0x30]\n"
+ "ldr q16, [x22, #0x0]\n"
+ "ldr q17, [x22, #0x10]\n"
+ "ldr q18, [x22, #0x20]\n"
+ "ldr q19, [x22, #0x30]\n"
+ "ldr q20, [x21, #0x0]\n"
+ "ldr q21, [x21, #0x10]\n"
+ "ldr q22, [x21, #0x20]\n"
+ "ldr q23, [x21, #0x30]\n"
+ "ldr q24, [x20, #0x0]\n"
+ "ldr q25, [x20, #0x10]\n"
+ "ldr q26, [x20, #0x20]\n"
+ "ldr q27, [x20, #0x30]\n"
+ "ldr q28, [x19, #0x0]\n"
+ "ldr q29, [x19, #0x10]\n"
+ "ldr q30, [x19, #0x20]\n"
+ "ldr q31, [x19, #0x30]\n"
"b 183f\n"
"182:" // Height 6: no accumulate
"movi v8.4s, #0x0\n"
@@ -2668,297 +2668,297 @@ void a64_hybrid_u8u32_dot_6x16 (
"movi v30.4s, #0x0\n"
"movi v31.4s, #0x0\n"
"183:" // Height 6: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"184:" // Height 6: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 185f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "ldr x22, [x21, #0x20]\n"
- "ldr x21, [x21, #0x28]\n"
- "cbnz x28, 186f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20\n"
- "add x25, x25, x20\n"
- "add x24, x24, x20\n"
- "add x23, x23, x20\n"
- "add x22, x22, x20\n"
- "add x21, x21, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "ldr x21, [x20, #0x20]\n"
+ "ldr x20, [x20, #0x28]\n"
+ "cbnz x27, 186f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
+ "add x24, x24, x19\n"
+ "add x23, x23, x19\n"
+ "add x22, x22, x19\n"
+ "add x21, x21, x19\n"
+ "add x20, x20, x19\n"
"b 186f\n"
"185:" // Height 6: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20\n"
- "add x24, x25, x20\n"
- "add x23, x24, x20\n"
- "add x22, x23, x20\n"
- "add x21, x22, x20\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19\n"
+ "add x23, x24, x19\n"
+ "add x22, x23, x19\n"
+ "add x21, x22, x19\n"
+ "add x20, x21, x19\n"
"186:" // Height 6: input setup done
- "cmp x27, #0x10\n"
+ "cmp x26, #0x10\n"
"blt 189f\n"
- "ldr q0, [x26, #0x0]\n"
- "ldr q1, [x25, #0x0]\n"
- "cmp x27, #0x20\n"
- "ldr q2, [x24, #0x0]\n"
- "ldr q3, [x23, #0x0]\n"
- "ldr q4, [x22, #0x0]\n"
- "ldr q5, [x21, #0x0]\n"
- "ldr q6, [x10, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
+ "ldr q0, [x25, #0x0]\n"
+ "ldr q1, [x24, #0x0]\n"
+ "cmp x26, #0x20\n"
+ "ldr q2, [x23, #0x0]\n"
+ "ldr q3, [x22, #0x0]\n"
+ "ldr q4, [x21, #0x0]\n"
+ "ldr q5, [x20, #0x0]\n"
+ "ldr q6, [x9, #0x0]\n"
"blt 188f\n"
"187:" // Height 6: Multiply loop: Main loop head
".inst 0x6f80e0c8 // udot v8.4s, v6.16b, v0.4b[0]\n"
+ "ldr q7, [x9, #0x10]\n"
+ "add x25, x25, #0x10\n"
".inst 0x6f81e0cc // udot v12.4s, v6.16b, v1.4b[0]\n"
- "sub x27, x27, #0x10\n"
- "add x26, x26, #0x10\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "add x24, x24, #0x10\n"
".inst 0x6f82e0d0 // udot v16.4s, v6.16b, v2.4b[0]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "add x23, x23, #0x10\n"
".inst 0x6f83e0d4 // udot v20.4s, v6.16b, v3.4b[0]\n"
- "add x25, x25, #0x10\n"
- "add x24, x24, #0x10\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
+ "add x22, x22, #0x10\n"
".inst 0x6f84e0d8 // udot v24.4s, v6.16b, v4.4b[0]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
+ "add x21, x21, #0x10\n"
".inst 0x6f85e0dc // udot v28.4s, v6.16b, v5.4b[0]\n"
- "ldr q6, [x10, #0x20]\n"
- "add x23, x23, #0x10\n"
+ "ldr q6, [x9, #0x20]\n"
+ "add x20, x20, #0x10\n"
".inst 0x6f80e0e9 // udot v9.4s, v7.16b, v0.4b[0]\n"
+ "prfm pldl1keep, [x21, #0x80]\n"
+ "sub x26, x26, #0x10\n"
".inst 0x6f81e0ed // udot v13.4s, v7.16b, v1.4b[0]\n"
- "add x22, x22, #0x10\n"
- "add x21, x21, #0x10\n"
+ "prfm pldl1keep, [x20, #0x80]\n"
+ "cmp x26, #0x20\n"
".inst 0x6f82e0f1 // udot v17.4s, v7.16b, v2.4b[0]\n"
".inst 0x6f83e0f5 // udot v21.4s, v7.16b, v3.4b[0]\n"
- "cmp x27, #0x20\n"
- "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x6f84e0f9 // udot v25.4s, v7.16b, v4.4b[0]\n"
".inst 0x6f85e0fd // udot v29.4s, v7.16b, v5.4b[0]\n"
- "ldr q7, [x10, #0x30]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
+ "ldr q7, [x9, #0x30]\n"
".inst 0x6f80e0ca // udot v10.4s, v6.16b, v0.4b[0]\n"
".inst 0x6f81e0ce // udot v14.4s, v6.16b, v1.4b[0]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x6f82e0d2 // udot v18.4s, v6.16b, v2.4b[0]\n"
".inst 0x6f83e0d6 // udot v22.4s, v6.16b, v3.4b[0]\n"
- "prfm pldl1keep, [x22, #0x80]\n"
- "prfm pldl1keep, [x21, #0x80]\n"
".inst 0x6f84e0da // udot v26.4s, v6.16b, v4.4b[0]\n"
".inst 0x6f85e0de // udot v30.4s, v6.16b, v5.4b[0]\n"
- "ldr q6, [x10, #0x40]\n"
+ "ldr q6, [x9, #0x40]\n"
".inst 0x6f80e0eb // udot v11.4s, v7.16b, v0.4b[0]\n"
".inst 0x6f81e0ef // udot v15.4s, v7.16b, v1.4b[0]\n"
".inst 0x6f82e0f3 // udot v19.4s, v7.16b, v2.4b[0]\n"
".inst 0x6f83e0f7 // udot v23.4s, v7.16b, v3.4b[0]\n"
".inst 0x6f84e0fb // udot v27.4s, v7.16b, v4.4b[0]\n"
".inst 0x6f85e0ff // udot v31.4s, v7.16b, v5.4b[0]\n"
- "ldr q7, [x10, #0x50]\n"
+ "ldr q7, [x9, #0x50]\n"
".inst 0x6fa0e0c8 // udot v8.4s, v6.16b, v0.4b[1]\n"
".inst 0x6fa1e0cc // udot v12.4s, v6.16b, v1.4b[1]\n"
".inst 0x6fa2e0d0 // udot v16.4s, v6.16b, v2.4b[1]\n"
".inst 0x6fa3e0d4 // udot v20.4s, v6.16b, v3.4b[1]\n"
".inst 0x6fa4e0d8 // udot v24.4s, v6.16b, v4.4b[1]\n"
".inst 0x6fa5e0dc // udot v28.4s, v6.16b, v5.4b[1]\n"
- "ldr q6, [x10, #0x60]\n"
+ "ldr q6, [x9, #0x60]\n"
".inst 0x6fa0e0e9 // udot v9.4s, v7.16b, v0.4b[1]\n"
".inst 0x6fa1e0ed // udot v13.4s, v7.16b, v1.4b[1]\n"
".inst 0x6fa2e0f1 // udot v17.4s, v7.16b, v2.4b[1]\n"
".inst 0x6fa3e0f5 // udot v21.4s, v7.16b, v3.4b[1]\n"
".inst 0x6fa4e0f9 // udot v25.4s, v7.16b, v4.4b[1]\n"
".inst 0x6fa5e0fd // udot v29.4s, v7.16b, v5.4b[1]\n"
- "ldr q7, [x10, #0x70]\n"
+ "ldr q7, [x9, #0x70]\n"
".inst 0x6fa0e0ca // udot v10.4s, v6.16b, v0.4b[1]\n"
".inst 0x6fa1e0ce // udot v14.4s, v6.16b, v1.4b[1]\n"
".inst 0x6fa2e0d2 // udot v18.4s, v6.16b, v2.4b[1]\n"
".inst 0x6fa3e0d6 // udot v22.4s, v6.16b, v3.4b[1]\n"
".inst 0x6fa4e0da // udot v26.4s, v6.16b, v4.4b[1]\n"
".inst 0x6fa5e0de // udot v30.4s, v6.16b, v5.4b[1]\n"
- "ldr q6, [x10, #0x80]\n"
+ "ldr q6, [x9, #0x80]\n"
".inst 0x6fa0e0eb // udot v11.4s, v7.16b, v0.4b[1]\n"
".inst 0x6fa1e0ef // udot v15.4s, v7.16b, v1.4b[1]\n"
".inst 0x6fa2e0f3 // udot v19.4s, v7.16b, v2.4b[1]\n"
".inst 0x6fa3e0f7 // udot v23.4s, v7.16b, v3.4b[1]\n"
".inst 0x6fa4e0fb // udot v27.4s, v7.16b, v4.4b[1]\n"
".inst 0x6fa5e0ff // udot v31.4s, v7.16b, v5.4b[1]\n"
- "ldr q7, [x10, #0x90]\n"
+ "ldr q7, [x9, #0x90]\n"
".inst 0x6f80e8c8 // udot v8.4s, v6.16b, v0.4b[2]\n"
".inst 0x6f81e8cc // udot v12.4s, v6.16b, v1.4b[2]\n"
".inst 0x6f82e8d0 // udot v16.4s, v6.16b, v2.4b[2]\n"
".inst 0x6f83e8d4 // udot v20.4s, v6.16b, v3.4b[2]\n"
".inst 0x6f84e8d8 // udot v24.4s, v6.16b, v4.4b[2]\n"
".inst 0x6f85e8dc // udot v28.4s, v6.16b, v5.4b[2]\n"
- "ldr q6, [x10, #0xa0]\n"
+ "ldr q6, [x9, #0xa0]\n"
".inst 0x6f80e8e9 // udot v9.4s, v7.16b, v0.4b[2]\n"
".inst 0x6f81e8ed // udot v13.4s, v7.16b, v1.4b[2]\n"
".inst 0x6f82e8f1 // udot v17.4s, v7.16b, v2.4b[2]\n"
".inst 0x6f83e8f5 // udot v21.4s, v7.16b, v3.4b[2]\n"
".inst 0x6f84e8f9 // udot v25.4s, v7.16b, v4.4b[2]\n"
".inst 0x6f85e8fd // udot v29.4s, v7.16b, v5.4b[2]\n"
- "ldr q7, [x10, #0xb0]\n"
+ "ldr q7, [x9, #0xb0]\n"
".inst 0x6f80e8ca // udot v10.4s, v6.16b, v0.4b[2]\n"
".inst 0x6f81e8ce // udot v14.4s, v6.16b, v1.4b[2]\n"
".inst 0x6f82e8d2 // udot v18.4s, v6.16b, v2.4b[2]\n"
".inst 0x6f83e8d6 // udot v22.4s, v6.16b, v3.4b[2]\n"
".inst 0x6f84e8da // udot v26.4s, v6.16b, v4.4b[2]\n"
".inst 0x6f85e8de // udot v30.4s, v6.16b, v5.4b[2]\n"
- "ldr q6, [x10, #0xc0]\n"
+ "ldr q6, [x9, #0xc0]\n"
".inst 0x6f80e8eb // udot v11.4s, v7.16b, v0.4b[2]\n"
".inst 0x6f81e8ef // udot v15.4s, v7.16b, v1.4b[2]\n"
".inst 0x6f82e8f3 // udot v19.4s, v7.16b, v2.4b[2]\n"
".inst 0x6f83e8f7 // udot v23.4s, v7.16b, v3.4b[2]\n"
".inst 0x6f84e8fb // udot v27.4s, v7.16b, v4.4b[2]\n"
".inst 0x6f85e8ff // udot v31.4s, v7.16b, v5.4b[2]\n"
- "ldr q7, [x10, #0xd0]\n"
+ "ldr q7, [x9, #0xd0]\n"
".inst 0x6fa0e8c8 // udot v8.4s, v6.16b, v0.4b[3]\n"
".inst 0x6fa1e8cc // udot v12.4s, v6.16b, v1.4b[3]\n"
".inst 0x6fa2e8d0 // udot v16.4s, v6.16b, v2.4b[3]\n"
".inst 0x6fa3e8d4 // udot v20.4s, v6.16b, v3.4b[3]\n"
".inst 0x6fa4e8d8 // udot v24.4s, v6.16b, v4.4b[3]\n"
".inst 0x6fa5e8dc // udot v28.4s, v6.16b, v5.4b[3]\n"
- "ldr q6, [x10, #0xe0]\n"
+ "ldr q6, [x9, #0xe0]\n"
".inst 0x6fa0e8e9 // udot v9.4s, v7.16b, v0.4b[3]\n"
".inst 0x6fa1e8ed // udot v13.4s, v7.16b, v1.4b[3]\n"
".inst 0x6fa2e8f1 // udot v17.4s, v7.16b, v2.4b[3]\n"
".inst 0x6fa3e8f5 // udot v21.4s, v7.16b, v3.4b[3]\n"
".inst 0x6fa4e8f9 // udot v25.4s, v7.16b, v4.4b[3]\n"
".inst 0x6fa5e8fd // udot v29.4s, v7.16b, v5.4b[3]\n"
- "ldr q7, [x10, #0xf0]\n"
- "add x10, x10, #0x100\n"
+ "ldr q7, [x9, #0xf0]\n"
+ "add x9, x9, #0x100\n"
".inst 0x6fa0e8ca // udot v10.4s, v6.16b, v0.4b[3]\n"
".inst 0x6fa1e8ce // udot v14.4s, v6.16b, v1.4b[3]\n"
".inst 0x6fa2e8d2 // udot v18.4s, v6.16b, v2.4b[3]\n"
".inst 0x6fa3e8d6 // udot v22.4s, v6.16b, v3.4b[3]\n"
".inst 0x6fa4e8da // udot v26.4s, v6.16b, v4.4b[3]\n"
".inst 0x6fa5e8de // udot v30.4s, v6.16b, v5.4b[3]\n"
- "ldr q6, [x10, #0x0]\n"
+ "ldr q6, [x9, #0x0]\n"
".inst 0x6fa0e8eb // udot v11.4s, v7.16b, v0.4b[3]\n"
- "ldr q0, [x26, #0x0]\n"
+ "ldr q0, [x25, #0x0]\n"
".inst 0x6fa1e8ef // udot v15.4s, v7.16b, v1.4b[3]\n"
- "ldr q1, [x25, #0x0]\n"
+ "ldr q1, [x24, #0x0]\n"
".inst 0x6fa2e8f3 // udot v19.4s, v7.16b, v2.4b[3]\n"
- "ldr q2, [x24, #0x0]\n"
+ "ldr q2, [x23, #0x0]\n"
".inst 0x6fa3e8f7 // udot v23.4s, v7.16b, v3.4b[3]\n"
- "ldr q3, [x23, #0x0]\n"
+ "ldr q3, [x22, #0x0]\n"
".inst 0x6fa4e8fb // udot v27.4s, v7.16b, v4.4b[3]\n"
- "ldr q4, [x22, #0x0]\n"
+ "ldr q4, [x21, #0x0]\n"
".inst 0x6fa5e8ff // udot v31.4s, v7.16b, v5.4b[3]\n"
- "ldr q5, [x21, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
+ "ldr q5, [x20, #0x0]\n"
"bge 187b\n"
"188:" // Height 6: Multiply loop: Single iteration only
".inst 0x6f80e0c8 // udot v8.4s, v6.16b, v0.4b[0]\n"
+ "ldr q7, [x9, #0x10]\n"
+ "sub x26, x26, #0x10\n"
".inst 0x6f81e0cc // udot v12.4s, v6.16b, v1.4b[0]\n"
- "add x26, x26, #0x10\n"
"add x25, x25, #0x10\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x6f82e0d0 // udot v16.4s, v6.16b, v2.4b[0]\n"
- ".inst 0x6f83e0d4 // udot v20.4s, v6.16b, v3.4b[0]\n"
"add x24, x24, #0x10\n"
+ ".inst 0x6f83e0d4 // udot v20.4s, v6.16b, v3.4b[0]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
"add x23, x23, #0x10\n"
".inst 0x6f84e0d8 // udot v24.4s, v6.16b, v4.4b[0]\n"
- ".inst 0x6f85e0dc // udot v28.4s, v6.16b, v5.4b[0]\n"
- "ldr q6, [x10, #0x20]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
"add x22, x22, #0x10\n"
+ ".inst 0x6f85e0dc // udot v28.4s, v6.16b, v5.4b[0]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
+ "add x21, x21, #0x10\n"
".inst 0x6f80e0e9 // udot v9.4s, v7.16b, v0.4b[0]\n"
+ "ldr q6, [x9, #0x20]\n"
+ "add x20, x20, #0x10\n"
".inst 0x6f81e0ed // udot v13.4s, v7.16b, v1.4b[0]\n"
- "add x21, x21, #0x10\n"
- "sub x27, x27, #0x10\n"
+ "prfm pldl1keep, [x21, #0x80]\n"
".inst 0x6f82e0f1 // udot v17.4s, v7.16b, v2.4b[0]\n"
+ "prfm pldl1keep, [x20, #0x80]\n"
".inst 0x6f83e0f5 // udot v21.4s, v7.16b, v3.4b[0]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x6f84e0f9 // udot v25.4s, v7.16b, v4.4b[0]\n"
".inst 0x6f85e0fd // udot v29.4s, v7.16b, v5.4b[0]\n"
- "ldr q7, [x10, #0x30]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
+ "ldr q7, [x9, #0x30]\n"
".inst 0x6f80e0ca // udot v10.4s, v6.16b, v0.4b[0]\n"
".inst 0x6f81e0ce // udot v14.4s, v6.16b, v1.4b[0]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
- "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x6f82e0d2 // udot v18.4s, v6.16b, v2.4b[0]\n"
".inst 0x6f83e0d6 // udot v22.4s, v6.16b, v3.4b[0]\n"
- "prfm pldl1keep, [x21, #0x80]\n"
".inst 0x6f84e0da // udot v26.4s, v6.16b, v4.4b[0]\n"
".inst 0x6f85e0de // udot v30.4s, v6.16b, v5.4b[0]\n"
- "ldr q6, [x10, #0x40]\n"
+ "ldr q6, [x9, #0x40]\n"
".inst 0x6f80e0eb // udot v11.4s, v7.16b, v0.4b[0]\n"
".inst 0x6f81e0ef // udot v15.4s, v7.16b, v1.4b[0]\n"
".inst 0x6f82e0f3 // udot v19.4s, v7.16b, v2.4b[0]\n"
".inst 0x6f83e0f7 // udot v23.4s, v7.16b, v3.4b[0]\n"
".inst 0x6f84e0fb // udot v27.4s, v7.16b, v4.4b[0]\n"
".inst 0x6f85e0ff // udot v31.4s, v7.16b, v5.4b[0]\n"
- "ldr q7, [x10, #0x50]\n"
+ "ldr q7, [x9, #0x50]\n"
".inst 0x6fa0e0c8 // udot v8.4s, v6.16b, v0.4b[1]\n"
".inst 0x6fa1e0cc // udot v12.4s, v6.16b, v1.4b[1]\n"
".inst 0x6fa2e0d0 // udot v16.4s, v6.16b, v2.4b[1]\n"
".inst 0x6fa3e0d4 // udot v20.4s, v6.16b, v3.4b[1]\n"
".inst 0x6fa4e0d8 // udot v24.4s, v6.16b, v4.4b[1]\n"
".inst 0x6fa5e0dc // udot v28.4s, v6.16b, v5.4b[1]\n"
- "ldr q6, [x10, #0x60]\n"
+ "ldr q6, [x9, #0x60]\n"
".inst 0x6fa0e0e9 // udot v9.4s, v7.16b, v0.4b[1]\n"
".inst 0x6fa1e0ed // udot v13.4s, v7.16b, v1.4b[1]\n"
".inst 0x6fa2e0f1 // udot v17.4s, v7.16b, v2.4b[1]\n"
".inst 0x6fa3e0f5 // udot v21.4s, v7.16b, v3.4b[1]\n"
".inst 0x6fa4e0f9 // udot v25.4s, v7.16b, v4.4b[1]\n"
".inst 0x6fa5e0fd // udot v29.4s, v7.16b, v5.4b[1]\n"
- "ldr q7, [x10, #0x70]\n"
+ "ldr q7, [x9, #0x70]\n"
".inst 0x6fa0e0ca // udot v10.4s, v6.16b, v0.4b[1]\n"
".inst 0x6fa1e0ce // udot v14.4s, v6.16b, v1.4b[1]\n"
".inst 0x6fa2e0d2 // udot v18.4s, v6.16b, v2.4b[1]\n"
".inst 0x6fa3e0d6 // udot v22.4s, v6.16b, v3.4b[1]\n"
".inst 0x6fa4e0da // udot v26.4s, v6.16b, v4.4b[1]\n"
".inst 0x6fa5e0de // udot v30.4s, v6.16b, v5.4b[1]\n"
- "ldr q6, [x10, #0x80]\n"
+ "ldr q6, [x9, #0x80]\n"
".inst 0x6fa0e0eb // udot v11.4s, v7.16b, v0.4b[1]\n"
".inst 0x6fa1e0ef // udot v15.4s, v7.16b, v1.4b[1]\n"
".inst 0x6fa2e0f3 // udot v19.4s, v7.16b, v2.4b[1]\n"
".inst 0x6fa3e0f7 // udot v23.4s, v7.16b, v3.4b[1]\n"
".inst 0x6fa4e0fb // udot v27.4s, v7.16b, v4.4b[1]\n"
".inst 0x6fa5e0ff // udot v31.4s, v7.16b, v5.4b[1]\n"
- "ldr q7, [x10, #0x90]\n"
+ "ldr q7, [x9, #0x90]\n"
".inst 0x6f80e8c8 // udot v8.4s, v6.16b, v0.4b[2]\n"
".inst 0x6f81e8cc // udot v12.4s, v6.16b, v1.4b[2]\n"
".inst 0x6f82e8d0 // udot v16.4s, v6.16b, v2.4b[2]\n"
".inst 0x6f83e8d4 // udot v20.4s, v6.16b, v3.4b[2]\n"
".inst 0x6f84e8d8 // udot v24.4s, v6.16b, v4.4b[2]\n"
".inst 0x6f85e8dc // udot v28.4s, v6.16b, v5.4b[2]\n"
- "ldr q6, [x10, #0xa0]\n"
+ "ldr q6, [x9, #0xa0]\n"
".inst 0x6f80e8e9 // udot v9.4s, v7.16b, v0.4b[2]\n"
".inst 0x6f81e8ed // udot v13.4s, v7.16b, v1.4b[2]\n"
".inst 0x6f82e8f1 // udot v17.4s, v7.16b, v2.4b[2]\n"
".inst 0x6f83e8f5 // udot v21.4s, v7.16b, v3.4b[2]\n"
".inst 0x6f84e8f9 // udot v25.4s, v7.16b, v4.4b[2]\n"
".inst 0x6f85e8fd // udot v29.4s, v7.16b, v5.4b[2]\n"
- "ldr q7, [x10, #0xb0]\n"
+ "ldr q7, [x9, #0xb0]\n"
".inst 0x6f80e8ca // udot v10.4s, v6.16b, v0.4b[2]\n"
".inst 0x6f81e8ce // udot v14.4s, v6.16b, v1.4b[2]\n"
".inst 0x6f82e8d2 // udot v18.4s, v6.16b, v2.4b[2]\n"
".inst 0x6f83e8d6 // udot v22.4s, v6.16b, v3.4b[2]\n"
".inst 0x6f84e8da // udot v26.4s, v6.16b, v4.4b[2]\n"
".inst 0x6f85e8de // udot v30.4s, v6.16b, v5.4b[2]\n"
- "ldr q6, [x10, #0xc0]\n"
+ "ldr q6, [x9, #0xc0]\n"
".inst 0x6f80e8eb // udot v11.4s, v7.16b, v0.4b[2]\n"
".inst 0x6f81e8ef // udot v15.4s, v7.16b, v1.4b[2]\n"
".inst 0x6f82e8f3 // udot v19.4s, v7.16b, v2.4b[2]\n"
".inst 0x6f83e8f7 // udot v23.4s, v7.16b, v3.4b[2]\n"
".inst 0x6f84e8fb // udot v27.4s, v7.16b, v4.4b[2]\n"
".inst 0x6f85e8ff // udot v31.4s, v7.16b, v5.4b[2]\n"
- "ldr q7, [x10, #0xd0]\n"
+ "ldr q7, [x9, #0xd0]\n"
".inst 0x6fa0e8c8 // udot v8.4s, v6.16b, v0.4b[3]\n"
".inst 0x6fa1e8cc // udot v12.4s, v6.16b, v1.4b[3]\n"
".inst 0x6fa2e8d0 // udot v16.4s, v6.16b, v2.4b[3]\n"
".inst 0x6fa3e8d4 // udot v20.4s, v6.16b, v3.4b[3]\n"
".inst 0x6fa4e8d8 // udot v24.4s, v6.16b, v4.4b[3]\n"
".inst 0x6fa5e8dc // udot v28.4s, v6.16b, v5.4b[3]\n"
- "ldr q6, [x10, #0xe0]\n"
+ "ldr q6, [x9, #0xe0]\n"
".inst 0x6fa0e8e9 // udot v9.4s, v7.16b, v0.4b[3]\n"
".inst 0x6fa1e8ed // udot v13.4s, v7.16b, v1.4b[3]\n"
".inst 0x6fa2e8f1 // udot v17.4s, v7.16b, v2.4b[3]\n"
".inst 0x6fa3e8f5 // udot v21.4s, v7.16b, v3.4b[3]\n"
".inst 0x6fa4e8f9 // udot v25.4s, v7.16b, v4.4b[3]\n"
".inst 0x6fa5e8fd // udot v29.4s, v7.16b, v5.4b[3]\n"
- "ldr q7, [x10, #0xf0]\n"
- "add x10, x10, #0x100\n"
+ "ldr q7, [x9, #0xf0]\n"
+ "add x9, x9, #0x100\n"
".inst 0x6fa0e8ca // udot v10.4s, v6.16b, v0.4b[3]\n"
".inst 0x6fa1e8ce // udot v14.4s, v6.16b, v1.4b[3]\n"
".inst 0x6fa2e8d2 // udot v18.4s, v6.16b, v2.4b[3]\n"
@@ -2972,35 +2972,35 @@ void a64_hybrid_u8u32_dot_6x16 (
".inst 0x6fa4e8fb // udot v27.4s, v7.16b, v4.4b[3]\n"
".inst 0x6fa5e8ff // udot v31.4s, v7.16b, v5.4b[3]\n"
"189:" // Height 6: Multiply loop: Main loop skip
- "cbz x27, 194f\n"
- "cmp x27, #0x4\n"
+ "cbz x26, 194f\n"
+ "cmp x26, #0x4\n"
"blt 191f\n"
"190:" // Height 6: Multiply loop: Odd block loop
- "ldr s0, [x26], #0x4\n"
- "ldr s1, [x25], #0x4\n"
- "sub x27, x27, #0x4\n"
- "cmp x27, #0x4\n"
- "ldr s2, [x24], #0x4\n"
- "ldr s3, [x23], #0x4\n"
- "ldr s4, [x22], #0x4\n"
- "ldr s5, [x21], #0x4\n"
- "ldr q6, [x10, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
+ "ldr s0, [x25], #0x4\n"
+ "sub x26, x26, #0x4\n"
+ "ldr s1, [x24], #0x4\n"
+ "cmp x26, #0x4\n"
+ "ldr s2, [x23], #0x4\n"
+ "ldr s3, [x22], #0x4\n"
+ "ldr s4, [x21], #0x4\n"
+ "ldr s5, [x20], #0x4\n"
+ "ldr q6, [x9, #0x0]\n"
".inst 0x6f80e0c8 // udot v8.4s, v6.16b, v0.4b[0]\n"
+ "ldr q7, [x9, #0x10]\n"
".inst 0x6f81e0cc // udot v12.4s, v6.16b, v1.4b[0]\n"
".inst 0x6f82e0d0 // udot v16.4s, v6.16b, v2.4b[0]\n"
".inst 0x6f83e0d4 // udot v20.4s, v6.16b, v3.4b[0]\n"
".inst 0x6f84e0d8 // udot v24.4s, v6.16b, v4.4b[0]\n"
".inst 0x6f85e0dc // udot v28.4s, v6.16b, v5.4b[0]\n"
- "ldr q6, [x10, #0x20]\n"
+ "ldr q6, [x9, #0x20]\n"
".inst 0x6f80e0e9 // udot v9.4s, v7.16b, v0.4b[0]\n"
".inst 0x6f81e0ed // udot v13.4s, v7.16b, v1.4b[0]\n"
".inst 0x6f82e0f1 // udot v17.4s, v7.16b, v2.4b[0]\n"
".inst 0x6f83e0f5 // udot v21.4s, v7.16b, v3.4b[0]\n"
".inst 0x6f84e0f9 // udot v25.4s, v7.16b, v4.4b[0]\n"
".inst 0x6f85e0fd // udot v29.4s, v7.16b, v5.4b[0]\n"
- "ldr q7, [x10, #0x30]\n"
- "add x10, x10, #0x40\n"
+ "ldr q7, [x9, #0x30]\n"
+ "add x9, x9, #0x40\n"
".inst 0x6f80e0ca // udot v10.4s, v6.16b, v0.4b[0]\n"
".inst 0x6f81e0ce // udot v14.4s, v6.16b, v1.4b[0]\n"
".inst 0x6f82e0d2 // udot v18.4s, v6.16b, v2.4b[0]\n"
@@ -3014,48 +3014,48 @@ void a64_hybrid_u8u32_dot_6x16 (
".inst 0x6f84e0fb // udot v27.4s, v7.16b, v4.4b[0]\n"
".inst 0x6f85e0ff // udot v31.4s, v7.16b, v5.4b[0]\n"
"bge 190b\n"
+ "cbz x26, 194f\n"
"191:" // Height 6: Multiply loop: Skip odd blocks
- "cbz x27, 194f\n"
- "tbz x27, #1, 192f\n"
- "ldr h0, [x26], #0x2\n"
- "ldr h1, [x25], #0x2\n"
- "ldr h2, [x24], #0x2\n"
- "ldr h3, [x23], #0x2\n"
- "ldr h4, [x22], #0x2\n"
- "ldr h5, [x21], #0x2\n"
- "tbz x27, #0, 193f\n"
- "ld1 { v0.b }[2], [x26]\n"
- "ld1 { v1.b }[2], [x25]\n"
- "ld1 { v2.b }[2], [x24]\n"
- "ld1 { v3.b }[2], [x23]\n"
- "ld1 { v4.b }[2], [x22]\n"
- "ld1 { v5.b }[2], [x21]\n"
+ "tbz x26, #1, 192f\n"
+ "ldr h0, [x25], #0x2\n"
+ "ldr h1, [x24], #0x2\n"
+ "ldr h2, [x23], #0x2\n"
+ "ldr h3, [x22], #0x2\n"
+ "ldr h4, [x21], #0x2\n"
+ "ldr h5, [x20], #0x2\n"
+ "tbz x26, #0, 193f\n"
+ "ld1 { v0.b }[2], [x25]\n"
+ "ld1 { v1.b }[2], [x24]\n"
+ "ld1 { v2.b }[2], [x23]\n"
+ "ld1 { v3.b }[2], [x22]\n"
+ "ld1 { v4.b }[2], [x21]\n"
+ "ld1 { v5.b }[2], [x20]\n"
"b 193f\n"
"192:" // Height 6: Multiply loop: Ragged operand read: partial_1_0
- "ldr b0, [x26, #0x0]\n"
- "ldr b1, [x25, #0x0]\n"
- "ldr b2, [x24, #0x0]\n"
- "ldr b3, [x23, #0x0]\n"
- "ldr b4, [x22, #0x0]\n"
- "ldr b5, [x21, #0x0]\n"
+ "ldr b0, [x25, #0x0]\n"
+ "ldr b1, [x24, #0x0]\n"
+ "ldr b2, [x23, #0x0]\n"
+ "ldr b3, [x22, #0x0]\n"
+ "ldr b4, [x21, #0x0]\n"
+ "ldr b5, [x20, #0x0]\n"
"193:" // Height 6: Multiply loop: Ragged operand read: Done
- "ldr q6, [x10, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
+ "ldr q6, [x9, #0x0]\n"
".inst 0x6f80e0c8 // udot v8.4s, v6.16b, v0.4b[0]\n"
+ "ldr q7, [x9, #0x10]\n"
".inst 0x6f81e0cc // udot v12.4s, v6.16b, v1.4b[0]\n"
".inst 0x6f82e0d0 // udot v16.4s, v6.16b, v2.4b[0]\n"
".inst 0x6f83e0d4 // udot v20.4s, v6.16b, v3.4b[0]\n"
".inst 0x6f84e0d8 // udot v24.4s, v6.16b, v4.4b[0]\n"
".inst 0x6f85e0dc // udot v28.4s, v6.16b, v5.4b[0]\n"
- "ldr q6, [x10, #0x20]\n"
+ "ldr q6, [x9, #0x20]\n"
".inst 0x6f80e0e9 // udot v9.4s, v7.16b, v0.4b[0]\n"
".inst 0x6f81e0ed // udot v13.4s, v7.16b, v1.4b[0]\n"
".inst 0x6f82e0f1 // udot v17.4s, v7.16b, v2.4b[0]\n"
".inst 0x6f83e0f5 // udot v21.4s, v7.16b, v3.4b[0]\n"
".inst 0x6f84e0f9 // udot v25.4s, v7.16b, v4.4b[0]\n"
".inst 0x6f85e0fd // udot v29.4s, v7.16b, v5.4b[0]\n"
- "ldr q7, [x10, #0x30]\n"
- "add x10, x10, #0x40\n"
+ "ldr q7, [x9, #0x30]\n"
+ "add x9, x9, #0x40\n"
".inst 0x6f80e0ca // udot v10.4s, v6.16b, v0.4b[0]\n"
".inst 0x6f81e0ce // udot v14.4s, v6.16b, v1.4b[0]\n"
".inst 0x6f82e0d2 // udot v18.4s, v6.16b, v2.4b[0]\n"
@@ -3069,195 +3069,195 @@ void a64_hybrid_u8u32_dot_6x16 (
".inst 0x6f84e0fb // udot v27.4s, v7.16b, v4.4b[0]\n"
".inst 0x6f85e0ff // udot v31.4s, v7.16b, v5.4b[0]\n"
"194:" // Height 6: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 184b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "prfm pstl1keep, [x9, #0x0]\n"
- "add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
- "prfm pstl1keep, [x24, #0x0]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "prfm pstl1keep, [x28, #0x0]\n"
+ "cmp x10, #0x10\n"
+ "add x23, x28, x19, LSL #2\n"
"prfm pstl1keep, [x23, #0x0]\n"
- "add x20, x21, x20, LSL #2\n"
- "cmp x11, #0x10\n"
+ "add x22, x23, x19, LSL #2\n"
"prfm pstl1keep, [x22, #0x0]\n"
+ "add x21, x22, x19, LSL #2\n"
"prfm pstl1keep, [x21, #0x0]\n"
+ "add x20, x21, x19, LSL #2\n"
"prfm pstl1keep, [x20, #0x0]\n"
+ "add x19, x20, x19, LSL #2\n"
+ "prfm pstl1keep, [x19, #0x0]\n"
"bge 203f\n"
- "tbz x11, #3, 198f\n"
- "st1 { v8.4s }, [x9], #0x10\n"
- "st1 { v9.4s }, [x9], #0x10\n"
- "st1 { v12.4s }, [x24], #0x10\n"
- "st1 { v13.4s }, [x24], #0x10\n"
- "st1 { v16.4s }, [x23], #0x10\n"
- "st1 { v17.4s }, [x23], #0x10\n"
- "st1 { v20.4s }, [x22], #0x10\n"
- "st1 { v21.4s }, [x22], #0x10\n"
- "st1 { v24.4s }, [x21], #0x10\n"
- "st1 { v25.4s }, [x21], #0x10\n"
- "st1 { v28.4s }, [x20], #0x10\n"
- "st1 { v29.4s }, [x20], #0x10\n"
- "tbz x11, #2, 196f\n"
- "st1 { v10.4s }, [x9], #0x10\n"
- "st1 { v14.4s }, [x24], #0x10\n"
- "st1 { v18.4s }, [x23], #0x10\n"
- "st1 { v22.4s }, [x22], #0x10\n"
- "st1 { v26.4s }, [x21], #0x10\n"
- "st1 { v30.4s }, [x20], #0x10\n"
- "tbz x11, #1, 195f\n"
- "str d11, [x9], #0x8\n"
- "str d15, [x24], #0x8\n"
- "str d19, [x23], #0x8\n"
- "str d23, [x22], #0x8\n"
- "str d27, [x21], #0x8\n"
- "str d31, [x20], #0x8\n"
- "tbz x11, #0, 202f\n"
- "st1 { v11.s }[2], [x9]\n"
- "st1 { v15.s }[2], [x24]\n"
- "st1 { v19.s }[2], [x23]\n"
- "st1 { v23.s }[2], [x22]\n"
- "st1 { v27.s }[2], [x21]\n"
- "st1 { v31.s }[2], [x20]\n"
+ "tbz x10, #3, 198f\n"
+ "st1 { v8.4s }, [x28], #0x10\n"
+ "st1 { v9.4s }, [x28], #0x10\n"
+ "st1 { v12.4s }, [x23], #0x10\n"
+ "st1 { v13.4s }, [x23], #0x10\n"
+ "st1 { v16.4s }, [x22], #0x10\n"
+ "st1 { v17.4s }, [x22], #0x10\n"
+ "st1 { v20.4s }, [x21], #0x10\n"
+ "st1 { v21.4s }, [x21], #0x10\n"
+ "st1 { v24.4s }, [x20], #0x10\n"
+ "st1 { v25.4s }, [x20], #0x10\n"
+ "st1 { v28.4s }, [x19], #0x10\n"
+ "st1 { v29.4s }, [x19], #0x10\n"
+ "tbz x10, #2, 196f\n"
+ "st1 { v10.4s }, [x28], #0x10\n"
+ "st1 { v14.4s }, [x23], #0x10\n"
+ "st1 { v18.4s }, [x22], #0x10\n"
+ "st1 { v22.4s }, [x21], #0x10\n"
+ "st1 { v26.4s }, [x20], #0x10\n"
+ "st1 { v30.4s }, [x19], #0x10\n"
+ "tbz x10, #1, 195f\n"
+ "str d11, [x28], #0x8\n"
+ "str d15, [x23], #0x8\n"
+ "str d19, [x22], #0x8\n"
+ "str d23, [x21], #0x8\n"
+ "str d27, [x20], #0x8\n"
+ "str d31, [x19], #0x8\n"
+ "tbz x10, #0, 202f\n"
+ "st1 { v11.s }[2], [x28]\n"
+ "st1 { v15.s }[2], [x23]\n"
+ "st1 { v19.s }[2], [x22]\n"
+ "st1 { v23.s }[2], [x21]\n"
+ "st1 { v27.s }[2], [x20]\n"
+ "st1 { v31.s }[2], [x19]\n"
"b 202f\n"
"195:" // Height 6: Partial direct writeback: partial_1_12
- "tbz x11, #0, 202f\n"
- "str s11, [x9, #0x0]\n"
- "str s15, [x24, #0x0]\n"
- "str s19, [x23, #0x0]\n"
- "str s23, [x22, #0x0]\n"
- "str s27, [x21, #0x0]\n"
- "str s31, [x20, #0x0]\n"
+ "tbz x10, #0, 202f\n"
+ "str s11, [x28, #0x0]\n"
+ "str s15, [x23, #0x0]\n"
+ "str s19, [x22, #0x0]\n"
+ "str s23, [x21, #0x0]\n"
+ "str s27, [x20, #0x0]\n"
+ "str s31, [x19, #0x0]\n"
"b 202f\n"
"196:" // Height 6: Partial direct writeback: partial_2_8
- "tbz x11, #1, 197f\n"
- "str d10, [x9], #0x8\n"
- "str d14, [x24], #0x8\n"
- "str d18, [x23], #0x8\n"
- "str d22, [x22], #0x8\n"
- "str d26, [x21], #0x8\n"
- "str d30, [x20], #0x8\n"
- "tbz x11, #0, 202f\n"
- "st1 { v10.s }[2], [x9]\n"
- "st1 { v14.s }[2], [x24]\n"
- "st1 { v18.s }[2], [x23]\n"
- "st1 { v22.s }[2], [x22]\n"
- "st1 { v26.s }[2], [x21]\n"
- "st1 { v30.s }[2], [x20]\n"
+ "tbz x10, #1, 197f\n"
+ "str d10, [x28], #0x8\n"
+ "str d14, [x23], #0x8\n"
+ "str d18, [x22], #0x8\n"
+ "str d22, [x21], #0x8\n"
+ "str d26, [x20], #0x8\n"
+ "str d30, [x19], #0x8\n"
+ "tbz x10, #0, 202f\n"
+ "st1 { v10.s }[2], [x28]\n"
+ "st1 { v14.s }[2], [x23]\n"
+ "st1 { v18.s }[2], [x22]\n"
+ "st1 { v22.s }[2], [x21]\n"
+ "st1 { v26.s }[2], [x20]\n"
+ "st1 { v30.s }[2], [x19]\n"
"b 202f\n"
"197:" // Height 6: Partial direct writeback: partial_1_8
- "tbz x11, #0, 202f\n"
- "str s10, [x9, #0x0]\n"
- "str s14, [x24, #0x0]\n"
- "str s18, [x23, #0x0]\n"
- "str s22, [x22, #0x0]\n"
- "str s26, [x21, #0x0]\n"
- "str s30, [x20, #0x0]\n"
+ "tbz x10, #0, 202f\n"
+ "str s10, [x28, #0x0]\n"
+ "str s14, [x23, #0x0]\n"
+ "str s18, [x22, #0x0]\n"
+ "str s22, [x21, #0x0]\n"
+ "str s26, [x20, #0x0]\n"
+ "str s30, [x19, #0x0]\n"
"b 202f\n"
"198:" // Height 6: Partial direct writeback: partial_4_0
- "tbz x11, #2, 200f\n"
- "st1 { v8.4s }, [x9], #0x10\n"
- "st1 { v12.4s }, [x24], #0x10\n"
- "st1 { v16.4s }, [x23], #0x10\n"
- "st1 { v20.4s }, [x22], #0x10\n"
- "st1 { v24.4s }, [x21], #0x10\n"
- "st1 { v28.4s }, [x20], #0x10\n"
- "tbz x11, #1, 199f\n"
- "str d9, [x9], #0x8\n"
- "str d13, [x24], #0x8\n"
- "str d17, [x23], #0x8\n"
- "str d21, [x22], #0x8\n"
- "str d25, [x21], #0x8\n"
- "str d29, [x20], #0x8\n"
- "tbz x11, #0, 202f\n"
- "st1 { v9.s }[2], [x9]\n"
- "st1 { v13.s }[2], [x24]\n"
- "st1 { v17.s }[2], [x23]\n"
- "st1 { v21.s }[2], [x22]\n"
- "st1 { v25.s }[2], [x21]\n"
- "st1 { v29.s }[2], [x20]\n"
+ "tbz x10, #2, 200f\n"
+ "st1 { v8.4s }, [x28], #0x10\n"
+ "st1 { v12.4s }, [x23], #0x10\n"
+ "st1 { v16.4s }, [x22], #0x10\n"
+ "st1 { v20.4s }, [x21], #0x10\n"
+ "st1 { v24.4s }, [x20], #0x10\n"
+ "st1 { v28.4s }, [x19], #0x10\n"
+ "tbz x10, #1, 199f\n"
+ "str d9, [x28], #0x8\n"
+ "str d13, [x23], #0x8\n"
+ "str d17, [x22], #0x8\n"
+ "str d21, [x21], #0x8\n"
+ "str d25, [x20], #0x8\n"
+ "str d29, [x19], #0x8\n"
+ "tbz x10, #0, 202f\n"
+ "st1 { v9.s }[2], [x28]\n"
+ "st1 { v13.s }[2], [x23]\n"
+ "st1 { v17.s }[2], [x22]\n"
+ "st1 { v21.s }[2], [x21]\n"
+ "st1 { v25.s }[2], [x20]\n"
+ "st1 { v29.s }[2], [x19]\n"
"b 202f\n"
"199:" // Height 6: Partial direct writeback: partial_1_4
- "tbz x11, #0, 202f\n"
- "str s9, [x9, #0x0]\n"
- "str s13, [x24, #0x0]\n"
- "str s17, [x23, #0x0]\n"
- "str s21, [x22, #0x0]\n"
- "str s25, [x21, #0x0]\n"
- "str s29, [x20, #0x0]\n"
+ "tbz x10, #0, 202f\n"
+ "str s9, [x28, #0x0]\n"
+ "str s13, [x23, #0x0]\n"
+ "str s17, [x22, #0x0]\n"
+ "str s21, [x21, #0x0]\n"
+ "str s25, [x20, #0x0]\n"
+ "str s29, [x19, #0x0]\n"
"b 202f\n"
"200:" // Height 6: Partial direct writeback: partial_2_0
- "tbz x11, #1, 201f\n"
- "str d8, [x9], #0x8\n"
- "str d12, [x24], #0x8\n"
- "str d16, [x23], #0x8\n"
- "str d20, [x22], #0x8\n"
- "str d24, [x21], #0x8\n"
- "str d28, [x20], #0x8\n"
- "tbz x11, #0, 202f\n"
- "st1 { v8.s }[2], [x9]\n"
- "st1 { v12.s }[2], [x24]\n"
- "st1 { v16.s }[2], [x23]\n"
- "st1 { v20.s }[2], [x22]\n"
- "st1 { v24.s }[2], [x21]\n"
- "st1 { v28.s }[2], [x20]\n"
+ "tbz x10, #1, 201f\n"
+ "str d8, [x28], #0x8\n"
+ "str d12, [x23], #0x8\n"
+ "str d16, [x22], #0x8\n"
+ "str d20, [x21], #0x8\n"
+ "str d24, [x20], #0x8\n"
+ "str d28, [x19], #0x8\n"
+ "tbz x10, #0, 202f\n"
+ "st1 { v8.s }[2], [x28]\n"
+ "st1 { v12.s }[2], [x23]\n"
+ "st1 { v16.s }[2], [x22]\n"
+ "st1 { v20.s }[2], [x21]\n"
+ "st1 { v24.s }[2], [x20]\n"
+ "st1 { v28.s }[2], [x19]\n"
"b 202f\n"
"201:" // Height 6: Partial direct writeback: partial_1_0
- "str s8, [x9, #0x0]\n"
- "str s12, [x24, #0x0]\n"
- "str s16, [x23, #0x0]\n"
- "str s20, [x22, #0x0]\n"
- "str s24, [x21, #0x0]\n"
- "str s28, [x20, #0x0]\n"
+ "str s8, [x28, #0x0]\n"
+ "str s12, [x23, #0x0]\n"
+ "str s16, [x22, #0x0]\n"
+ "str s20, [x21, #0x0]\n"
+ "str s24, [x20, #0x0]\n"
+ "str s28, [x19, #0x0]\n"
"202:" // Height 6: Partial direct writeback: Done
"b 204f\n"
"203:" // Height 6: Full writeback
- "str q8, [x9, #0x0]\n"
- "str q9, [x9, #0x10]\n"
- "str q10, [x9, #0x20]\n"
- "str q11, [x9, #0x30]\n"
- "add x9, x9, #0x40\n"
- "str q12, [x24, #0x0]\n"
- "str q13, [x24, #0x10]\n"
- "str q14, [x24, #0x20]\n"
- "str q15, [x24, #0x30]\n"
- "str q16, [x23, #0x0]\n"
- "str q17, [x23, #0x10]\n"
- "str q18, [x23, #0x20]\n"
- "str q19, [x23, #0x30]\n"
- "str q20, [x22, #0x0]\n"
- "str q21, [x22, #0x10]\n"
- "str q22, [x22, #0x20]\n"
- "str q23, [x22, #0x30]\n"
- "str q24, [x21, #0x0]\n"
- "str q25, [x21, #0x10]\n"
- "str q26, [x21, #0x20]\n"
- "str q27, [x21, #0x30]\n"
- "str q28, [x20, #0x0]\n"
- "str q29, [x20, #0x10]\n"
- "str q30, [x20, #0x20]\n"
- "str q31, [x20, #0x30]\n"
+ "str q8, [x28, #0x0]\n"
+ "str q9, [x28, #0x10]\n"
+ "str q10, [x28, #0x20]\n"
+ "str q11, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
+ "str q12, [x23, #0x0]\n"
+ "str q13, [x23, #0x10]\n"
+ "str q14, [x23, #0x20]\n"
+ "str q15, [x23, #0x30]\n"
+ "str q16, [x22, #0x0]\n"
+ "str q17, [x22, #0x10]\n"
+ "str q18, [x22, #0x20]\n"
+ "str q19, [x22, #0x30]\n"
+ "str q20, [x21, #0x0]\n"
+ "str q21, [x21, #0x10]\n"
+ "str q22, [x21, #0x20]\n"
+ "str q23, [x21, #0x30]\n"
+ "str q24, [x20, #0x0]\n"
+ "str q25, [x20, #0x10]\n"
+ "str q26, [x20, #0x20]\n"
+ "str q27, [x20, #0x30]\n"
+ "str q28, [x19, #0x0]\n"
+ "str q29, [x19, #0x10]\n"
+ "str q30, [x19, #0x20]\n"
+ "str q31, [x19, #0x30]\n"
"204:" // Height 6: Writeback done
- "subs x11, x11, #0x10\n"
+ "subs x10, x10, #0x10\n"
"bgt 172b\n"
"subs %x[M], %x[M], #0x6\n"
"beq 206f\n"
- "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 205f\n"
- "add x21, x21, #0x6\n"
- "str x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "add x20, x20, #0x6\n"
+ "str x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"b 1b\n"
"205:" // Update direct input
- "mov x20, #0x6\n"
- "madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
+ "mov x19, #0x6\n"
+ "madd %x[input_ptr], x19, x20, %x[input_ptr]\n"
"b 1b\n"
"206:" // Exit
: [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
: [args_ptr] "r" (&ka), [flags] "r" (flags), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8u32_mmla_6x16/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8u32_mmla_6x16/generic.cpp
index dd0c46e4dc..fabb3f3efd 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8u32_mmla_6x16/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8u32_mmla_6x16/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef __aarch64__
@@ -87,73 +87,73 @@ void a64_hybrid_u8u32_mmla_6x16 (
"cmp %x[M], #0x2\n"
"bgt 75f\n"
"beq 38f\n"
- "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x28, %x[output_ptr]\n"
"2:" // Height 1: Column loop
"tbz %x[flags], #0, 13f\n"
- "cmp x11, #0x10\n"
+ "cmp x10, #0x10\n"
"bge 11f\n"
- "tbz x11, #3, 6f\n"
- "ld1 { v9.4s }, [x9], #0x10\n"
- "ld1 { v10.4s }, [x9], #0x10\n"
- "tbz x11, #2, 4f\n"
- "ld1 { v11.4s }, [x9], #0x10\n"
- "tbz x11, #1, 3f\n"
- "ldr d16, [x9], #0x8\n"
- "mov x25, #0x38\n"
- "tbz x11, #0, 10f\n"
- "ld1 { v16.s }[2], [x9]\n"
+ "tbz x10, #3, 6f\n"
+ "ld1 { v9.4s }, [x28], #0x10\n"
+ "ld1 { v10.4s }, [x28], #0x10\n"
+ "tbz x10, #2, 4f\n"
+ "ld1 { v11.4s }, [x28], #0x10\n"
+ "tbz x10, #1, 3f\n"
+ "mov x24, #0x38\n"
+ "ldr d16, [x28], #0x8\n"
+ "tbz x10, #0, 10f\n"
+ "ld1 { v16.s }[2], [x28]\n"
"b 10f\n"
"3:" // Height 1: Partial accumulate: partial_1_12
- "mov x25, #0x30\n"
- "tbz x11, #0, 10f\n"
- "ldr s16, [x9, #0x0]\n"
+ "mov x24, #0x30\n"
+ "tbz x10, #0, 10f\n"
+ "ldr s16, [x28, #0x0]\n"
"b 10f\n"
"4:" // Height 1: Partial accumulate: partial_2_8
- "tbz x11, #1, 5f\n"
- "ldr d11, [x9], #0x8\n"
- "mov x25, #0x28\n"
- "tbz x11, #0, 10f\n"
- "ld1 { v11.s }[2], [x9]\n"
+ "tbz x10, #1, 5f\n"
+ "ldr d11, [x28], #0x8\n"
+ "mov x24, #0x28\n"
+ "tbz x10, #0, 10f\n"
+ "ld1 { v11.s }[2], [x28]\n"
"b 10f\n"
"5:" // Height 1: Partial accumulate: partial_1_8
- "mov x25, #0x20\n"
- "tbz x11, #0, 10f\n"
- "ldr s11, [x9, #0x0]\n"
+ "mov x24, #0x20\n"
+ "tbz x10, #0, 10f\n"
+ "ldr s11, [x28, #0x0]\n"
"b 10f\n"
"6:" // Height 1: Partial accumulate: partial_4_0
- "tbz x11, #2, 8f\n"
- "ld1 { v9.4s }, [x9], #0x10\n"
- "tbz x11, #1, 7f\n"
- "ldr d10, [x9], #0x8\n"
- "mov x25, #0x18\n"
- "tbz x11, #0, 10f\n"
- "ld1 { v10.s }[2], [x9]\n"
+ "tbz x10, #2, 8f\n"
+ "ld1 { v9.4s }, [x28], #0x10\n"
+ "tbz x10, #1, 7f\n"
+ "ldr d10, [x28], #0x8\n"
+ "mov x24, #0x18\n"
+ "tbz x10, #0, 10f\n"
+ "ld1 { v10.s }[2], [x28]\n"
"b 10f\n"
"7:" // Height 1: Partial accumulate: partial_1_4
- "mov x25, #0x10\n"
- "tbz x11, #0, 10f\n"
- "ldr s10, [x9, #0x0]\n"
+ "mov x24, #0x10\n"
+ "tbz x10, #0, 10f\n"
+ "ldr s10, [x28, #0x0]\n"
"b 10f\n"
"8:" // Height 1: Partial accumulate: partial_2_0
- "tbz x11, #1, 9f\n"
- "ldr d9, [x9], #0x8\n"
- "mov x25, #0x8\n"
- "tbz x11, #0, 10f\n"
- "ld1 { v9.s }[2], [x9]\n"
+ "tbz x10, #1, 9f\n"
+ "ldr d9, [x28], #0x8\n"
+ "mov x24, #0x8\n"
+ "tbz x10, #0, 10f\n"
+ "ld1 { v9.s }[2], [x28]\n"
"b 10f\n"
"9:" // Height 1: Partial accumulate: partial_1_0
- "ldr s9, [x9, #0x0]\n"
- "mov x25, #0x0\n"
+ "ldr s9, [x28, #0x0]\n"
+ "mov x24, #0x0\n"
"10:" // Height 1: Partial accumulate: Done
- "sub x9, x9, x25\n"
+ "sub x28, x28, x24\n"
"b 12f\n"
"11:" // Height 1: full accumulate
- "ldr q9, [x9, #0x0]\n"
- "ldr q10, [x9, #0x10]\n"
- "ldr q11, [x9, #0x20]\n"
- "ldr q16, [x9, #0x30]\n"
+ "ldr q9, [x28, #0x0]\n"
+ "ldr q10, [x28, #0x10]\n"
+ "ldr q11, [x28, #0x20]\n"
+ "ldr q16, [x28, #0x30]\n"
"12:" // Height 1: MMLA fixup
"zip1 v8.2d, v9.2d, v12.2d\n"
"zip2 v12.2d, v9.2d, v12.2d\n"
@@ -174,333 +174,337 @@ void a64_hybrid_u8u32_mmla_6x16 (
"movi v14.4s, #0x0\n"
"movi v15.4s, #0x0\n"
"14:" // Height 1: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"15:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 16f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "cbnz x28, 17f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "cbnz x27, 17f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
"b 17f\n"
"16:" // Height 1: setup direct input
- "mov x26, %x[input_ptr]\n"
+ "mov x25, %x[input_ptr]\n"
"17:" // Height 1: input setup done
- "cmp x27, #0x10\n"
+ "cmp x26, #0x10\n"
"blt 20f\n"
- "ldr q1, [x26, #0x0]\n"
- "ldr q7, [x10, #0x0]\n"
- "cmp x27, #0x20\n"
- "ldr q6, [x10, #0x10]\n"
+ "ldr q1, [x25, #0x0]\n"
+ "cmp x26, #0x20\n"
"blt 19f\n"
"18:" // Height 1: Multiply loop: Main loop head
+ "movi v2.16b, #0x0\n"
+ "ldr q7, [x9, #0x0]\n"
+ "add x25, x25, #0x10\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
+ "ldr q6, [x9, #0x10]\n"
+ "sub x26, x26, #0x10\n"
+ "trn2 v1.2d, v1.2d, v2.2d\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "cmp x26, #0x20\n"
".inst 0x6e87a408 // ummla v8.4s, v0.16b, v7.16b\n"
- "ldr q7, [x10, #0x20]\n"
+ "ldr q7, [x9, #0x20]\n"
".inst 0x6e86a40c // ummla v12.4s, v0.16b, v6.16b\n"
- "ldr q6, [x10, #0x30]\n"
+ "ldr q6, [x9, #0x30]\n"
".inst 0x6e87a409 // ummla v9.4s, v0.16b, v7.16b\n"
- "ldr q7, [x10, #0x40]\n"
+ "ldr q7, [x9, #0x40]\n"
".inst 0x6e86a40d // ummla v13.4s, v0.16b, v6.16b\n"
- "ldr q6, [x10, #0x50]\n"
+ "ldr q6, [x9, #0x50]\n"
".inst 0x6e87a40a // ummla v10.4s, v0.16b, v7.16b\n"
- "ldr q7, [x10, #0x60]\n"
+ "ldr q7, [x9, #0x60]\n"
".inst 0x6e86a40e // ummla v14.4s, v0.16b, v6.16b\n"
- "ldr q6, [x10, #0x70]\n"
- "trn2 v1.2d, v1.2d, v2.2d\n"
+ "ldr q6, [x9, #0x70]\n"
".inst 0x6e87a40b // ummla v11.4s, v0.16b, v7.16b\n"
- "ldr q7, [x10, #0x80]\n"
+ "ldr q7, [x9, #0x80]\n"
".inst 0x6e86a40f // ummla v15.4s, v0.16b, v6.16b\n"
- "ldr q6, [x10, #0x90]\n"
+ "ldr q6, [x9, #0x90]\n"
".inst 0x6e87a428 // ummla v8.4s, v1.16b, v7.16b\n"
- "ldr q7, [x10, #0xa0]\n"
+ "ldr q7, [x9, #0xa0]\n"
".inst 0x6e86a42c // ummla v12.4s, v1.16b, v6.16b\n"
- "ldr q6, [x10, #0xb0]\n"
+ "ldr q6, [x9, #0xb0]\n"
".inst 0x6e87a429 // ummla v9.4s, v1.16b, v7.16b\n"
- "ldr q7, [x10, #0xc0]\n"
+ "ldr q7, [x9, #0xc0]\n"
".inst 0x6e86a42d // ummla v13.4s, v1.16b, v6.16b\n"
- "ldr q6, [x10, #0xd0]\n"
+ "ldr q6, [x9, #0xd0]\n"
".inst 0x6e87a42a // ummla v10.4s, v1.16b, v7.16b\n"
- "ldr q7, [x10, #0xe0]\n"
+ "ldr q7, [x9, #0xe0]\n"
".inst 0x6e86a42e // ummla v14.4s, v1.16b, v6.16b\n"
- "ldr q6, [x10, #0xf0]\n"
- "sub x27, x27, #0x10\n"
- "add x26, x26, #0x10\n"
- "cmp x27, #0x20\n"
+ "ldr q6, [x9, #0xf0]\n"
+ "add x9, x9, #0x100\n"
".inst 0x6e87a42b // ummla v11.4s, v1.16b, v7.16b\n"
".inst 0x6e86a42f // ummla v15.4s, v1.16b, v6.16b\n"
- "ldr q1, [x26, #0x0]\n"
- "add x10, x10, #0x100\n"
- "ldr q7, [x10, #0x0]\n"
- "ldr q6, [x10, #0x10]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
+ "ldr q1, [x25, #0x0]\n"
"bge 18b\n"
"19:" // Height 1: Multiply loop: Single iteration only
+ "movi v2.16b, #0x0\n"
+ "ldr q7, [x9, #0x0]\n"
+ "sub x26, x26, #0x10\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
+ "ldr q6, [x9, #0x10]\n"
+ "add x25, x25, #0x10\n"
+ "trn2 v1.2d, v1.2d, v2.2d\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x6e87a408 // ummla v8.4s, v0.16b, v7.16b\n"
- "ldr q7, [x10, #0x20]\n"
+ "ldr q7, [x9, #0x20]\n"
".inst 0x6e86a40c // ummla v12.4s, v0.16b, v6.16b\n"
- "ldr q6, [x10, #0x30]\n"
+ "ldr q6, [x9, #0x30]\n"
".inst 0x6e87a409 // ummla v9.4s, v0.16b, v7.16b\n"
- "ldr q7, [x10, #0x40]\n"
+ "ldr q7, [x9, #0x40]\n"
".inst 0x6e86a40d // ummla v13.4s, v0.16b, v6.16b\n"
- "ldr q6, [x10, #0x50]\n"
+ "ldr q6, [x9, #0x50]\n"
".inst 0x6e87a40a // ummla v10.4s, v0.16b, v7.16b\n"
- "ldr q7, [x10, #0x60]\n"
+ "ldr q7, [x9, #0x60]\n"
".inst 0x6e86a40e // ummla v14.4s, v0.16b, v6.16b\n"
- "ldr q6, [x10, #0x70]\n"
- "trn2 v1.2d, v1.2d, v2.2d\n"
+ "ldr q6, [x9, #0x70]\n"
".inst 0x6e87a40b // ummla v11.4s, v0.16b, v7.16b\n"
- "ldr q7, [x10, #0x80]\n"
+ "ldr q7, [x9, #0x80]\n"
".inst 0x6e86a40f // ummla v15.4s, v0.16b, v6.16b\n"
- "ldr q6, [x10, #0x90]\n"
+ "ldr q6, [x9, #0x90]\n"
".inst 0x6e87a428 // ummla v8.4s, v1.16b, v7.16b\n"
- "ldr q7, [x10, #0xa0]\n"
+ "ldr q7, [x9, #0xa0]\n"
".inst 0x6e86a42c // ummla v12.4s, v1.16b, v6.16b\n"
- "ldr q6, [x10, #0xb0]\n"
+ "ldr q6, [x9, #0xb0]\n"
".inst 0x6e87a429 // ummla v9.4s, v1.16b, v7.16b\n"
- "ldr q7, [x10, #0xc0]\n"
+ "ldr q7, [x9, #0xc0]\n"
".inst 0x6e86a42d // ummla v13.4s, v1.16b, v6.16b\n"
- "ldr q6, [x10, #0xd0]\n"
+ "ldr q6, [x9, #0xd0]\n"
".inst 0x6e87a42a // ummla v10.4s, v1.16b, v7.16b\n"
- "ldr q7, [x10, #0xe0]\n"
+ "ldr q7, [x9, #0xe0]\n"
".inst 0x6e86a42e // ummla v14.4s, v1.16b, v6.16b\n"
- "ldr q6, [x10, #0xf0]\n"
- "add x26, x26, #0x10\n"
- "sub x27, x27, #0x10\n"
+ "ldr q6, [x9, #0xf0]\n"
+ "add x9, x9, #0x100\n"
".inst 0x6e87a42b // ummla v11.4s, v1.16b, v7.16b\n"
".inst 0x6e86a42f // ummla v15.4s, v1.16b, v6.16b\n"
- "prfm pldl1keep, [x26, #0x80]\n"
- "add x10, x10, #0x100\n"
"20:" // Height 1: Multiply loop: Main loop skip
- "cbz x27, 27f\n"
- "cmp x27, #0x8\n"
+ "cbz x26, 27f\n"
+ "cmp x26, #0x8\n"
"blt 22f\n"
"21:" // Height 1: Multiply loop: Odd block loop
- "ldr d1, [x26], #0x8\n"
- "ldr q6, [x10, #0x0]\n"
+ "movi v2.16b, #0x0\n"
+ "ldr d1, [x25], #0x8\n"
+ "sub x26, x26, #0x8\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
- "ldr q7, [x10, #0x10]\n"
+ "ldr q6, [x9, #0x0]\n"
+ "cmp x26, #0x8\n"
".inst 0x6e86a408 // ummla v8.4s, v0.16b, v6.16b\n"
- "ldr q6, [x10, #0x20]\n"
+ "ldr q7, [x9, #0x10]\n"
+ "ldr q6, [x9, #0x20]\n"
".inst 0x6e87a40c // ummla v12.4s, v0.16b, v7.16b\n"
- "ldr q7, [x10, #0x30]\n"
+ "ldr q7, [x9, #0x30]\n"
".inst 0x6e86a409 // ummla v9.4s, v0.16b, v6.16b\n"
- "ldr q6, [x10, #0x40]\n"
+ "ldr q6, [x9, #0x40]\n"
".inst 0x6e87a40d // ummla v13.4s, v0.16b, v7.16b\n"
- "ldr q7, [x10, #0x50]\n"
+ "ldr q7, [x9, #0x50]\n"
".inst 0x6e86a40a // ummla v10.4s, v0.16b, v6.16b\n"
- "ldr q6, [x10, #0x60]\n"
+ "ldr q6, [x9, #0x60]\n"
".inst 0x6e87a40e // ummla v14.4s, v0.16b, v7.16b\n"
- "ldr q7, [x10, #0x70]\n"
- "sub x27, x27, #0x8\n"
- "cmp x27, #0x8\n"
+ "ldr q7, [x9, #0x70]\n"
+ "add x9, x9, #0x80\n"
".inst 0x6e86a40b // ummla v11.4s, v0.16b, v6.16b\n"
".inst 0x6e87a40f // ummla v15.4s, v0.16b, v7.16b\n"
- "add x10, x10, #0x80\n"
"bge 21b\n"
+ "cbz x26, 27f\n"
"22:" // Height 1: Multiply loop: Skip odd blocks
- "cbz x27, 27f\n"
- "tbz x27, #2, 24f\n"
- "ldr s1, [x26], #0x4\n"
- "tbz x27, #1, 23f\n"
- "ld1 { v1.h }[2], [x26], #0x2\n"
- "tbz x27, #0, 26f\n"
- "ld1 { v1.b }[6], [x26]\n"
+ "tbz x26, #2, 24f\n"
+ "ldr s1, [x25], #0x4\n"
+ "tbz x26, #1, 23f\n"
+ "ld1 { v1.h }[2], [x25], #0x2\n"
+ "tbz x26, #0, 26f\n"
+ "ld1 { v1.b }[6], [x25]\n"
"b 26f\n"
"23:" // Height 1: Multiply loop: Ragged operand read: partial_1_4
- "tbz x27, #0, 26f\n"
- "ld1 { v1.b }[4], [x26]\n"
+ "tbz x26, #0, 26f\n"
+ "ld1 { v1.b }[4], [x25]\n"
"b 26f\n"
"24:" // Height 1: Multiply loop: Ragged operand read: partial_2_0
- "tbz x27, #1, 25f\n"
- "ldr h1, [x26], #0x2\n"
- "tbz x27, #0, 26f\n"
- "ld1 { v1.b }[2], [x26]\n"
+ "tbz x26, #1, 25f\n"
+ "ldr h1, [x25], #0x2\n"
+ "tbz x26, #0, 26f\n"
+ "ld1 { v1.b }[2], [x25]\n"
"b 26f\n"
"25:" // Height 1: Multiply loop: Ragged operand read: partial_1_0
- "ldr b1, [x26, #0x0]\n"
+ "ldr b1, [x25, #0x0]\n"
"26:" // Height 1: Multiply loop: Ragged operand read: Done
- "ldr q7, [x10, #0x0]\n"
- "ldr q6, [x10, #0x10]\n"
+ "movi v2.16b, #0x0\n"
+ "ldr q7, [x9, #0x0]\n"
+ "ldr q6, [x9, #0x10]\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
".inst 0x6e87a408 // ummla v8.4s, v0.16b, v7.16b\n"
- "ldr q7, [x10, #0x20]\n"
+ "ldr q7, [x9, #0x20]\n"
".inst 0x6e86a40c // ummla v12.4s, v0.16b, v6.16b\n"
- "ldr q6, [x10, #0x30]\n"
+ "ldr q6, [x9, #0x30]\n"
".inst 0x6e87a409 // ummla v9.4s, v0.16b, v7.16b\n"
- "ldr q7, [x10, #0x40]\n"
+ "ldr q7, [x9, #0x40]\n"
".inst 0x6e86a40d // ummla v13.4s, v0.16b, v6.16b\n"
- "ldr q6, [x10, #0x50]\n"
+ "ldr q6, [x9, #0x50]\n"
".inst 0x6e87a40a // ummla v10.4s, v0.16b, v7.16b\n"
- "ldr q7, [x10, #0x60]\n"
+ "ldr q7, [x9, #0x60]\n"
".inst 0x6e86a40e // ummla v14.4s, v0.16b, v6.16b\n"
- "ldr q6, [x10, #0x70]\n"
+ "ldr q6, [x9, #0x70]\n"
+ "add x9, x9, #0x80\n"
".inst 0x6e87a40b // ummla v11.4s, v0.16b, v7.16b\n"
".inst 0x6e86a40f // ummla v15.4s, v0.16b, v6.16b\n"
- "add x10, x10, #0x80\n"
"27:" // Height 1: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 15b\n"
- "cmp x11, #0x10\n"
"uzp1 v8.2d, v8.2d, v12.2d\n"
+ "prfm pstl1keep, [x28, #0x0]\n"
"uzp1 v9.2d, v9.2d, v13.2d\n"
- "prfm pstl1keep, [x9, #0x0]\n"
+ "cmp x10, #0x10\n"
"uzp1 v10.2d, v10.2d, v14.2d\n"
"uzp1 v11.2d, v11.2d, v15.2d\n"
"bge 36f\n"
- "tbz x11, #3, 31f\n"
- "st1 { v8.4s }, [x9], #0x10\n"
- "st1 { v9.4s }, [x9], #0x10\n"
- "tbz x11, #2, 29f\n"
- "st1 { v10.4s }, [x9], #0x10\n"
- "tbz x11, #1, 28f\n"
- "str d11, [x9], #0x8\n"
- "tbz x11, #0, 35f\n"
- "st1 { v11.s }[2], [x9]\n"
+ "tbz x10, #3, 31f\n"
+ "st1 { v8.4s }, [x28], #0x10\n"
+ "st1 { v9.4s }, [x28], #0x10\n"
+ "tbz x10, #2, 29f\n"
+ "st1 { v10.4s }, [x28], #0x10\n"
+ "tbz x10, #1, 28f\n"
+ "str d11, [x28], #0x8\n"
+ "tbz x10, #0, 35f\n"
+ "st1 { v11.s }[2], [x28]\n"
"b 35f\n"
"28:" // Height 1: Partial direct writeback: partial_1_12
- "tbz x11, #0, 35f\n"
- "str s11, [x9, #0x0]\n"
+ "tbz x10, #0, 35f\n"
+ "str s11, [x28, #0x0]\n"
"b 35f\n"
"29:" // Height 1: Partial direct writeback: partial_2_8
- "tbz x11, #1, 30f\n"
- "str d10, [x9], #0x8\n"
- "tbz x11, #0, 35f\n"
- "st1 { v10.s }[2], [x9]\n"
+ "tbz x10, #1, 30f\n"
+ "str d10, [x28], #0x8\n"
+ "tbz x10, #0, 35f\n"
+ "st1 { v10.s }[2], [x28]\n"
"b 35f\n"
"30:" // Height 1: Partial direct writeback: partial_1_8
- "tbz x11, #0, 35f\n"
- "str s10, [x9, #0x0]\n"
+ "tbz x10, #0, 35f\n"
+ "str s10, [x28, #0x0]\n"
"b 35f\n"
"31:" // Height 1: Partial direct writeback: partial_4_0
- "tbz x11, #2, 33f\n"
- "st1 { v8.4s }, [x9], #0x10\n"
- "tbz x11, #1, 32f\n"
- "str d9, [x9], #0x8\n"
- "tbz x11, #0, 35f\n"
- "st1 { v9.s }[2], [x9]\n"
+ "tbz x10, #2, 33f\n"
+ "st1 { v8.4s }, [x28], #0x10\n"
+ "tbz x10, #1, 32f\n"
+ "str d9, [x28], #0x8\n"
+ "tbz x10, #0, 35f\n"
+ "st1 { v9.s }[2], [x28]\n"
"b 35f\n"
"32:" // Height 1: Partial direct writeback: partial_1_4
- "tbz x11, #0, 35f\n"
- "str s9, [x9, #0x0]\n"
+ "tbz x10, #0, 35f\n"
+ "str s9, [x28, #0x0]\n"
"b 35f\n"
"33:" // Height 1: Partial direct writeback: partial_2_0
- "tbz x11, #1, 34f\n"
- "str d8, [x9], #0x8\n"
- "tbz x11, #0, 35f\n"
- "st1 { v8.s }[2], [x9]\n"
+ "tbz x10, #1, 34f\n"
+ "str d8, [x28], #0x8\n"
+ "tbz x10, #0, 35f\n"
+ "st1 { v8.s }[2], [x28]\n"
"b 35f\n"
"34:" // Height 1: Partial direct writeback: partial_1_0
- "str s8, [x9, #0x0]\n"
+ "str s8, [x28, #0x0]\n"
"35:" // Height 1: Partial direct writeback: Done
"b 37f\n"
"36:" // Height 1: Full writeback
- "str q8, [x9, #0x0]\n"
- "str q9, [x9, #0x10]\n"
- "str q10, [x9, #0x20]\n"
- "str q11, [x9, #0x30]\n"
- "add x9, x9, #0x40\n"
+ "str q8, [x28, #0x0]\n"
+ "str q9, [x28, #0x10]\n"
+ "str q10, [x28, #0x20]\n"
+ "str q11, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
"37:" // Height 1: Writeback done
- "subs x11, x11, #0x10\n"
+ "subs x10, x10, #0x10\n"
"bgt 2b\n"
"b 224f\n"
"38:" // Height 2
- "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x28, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"39:" // Height 2: Column loop
"tbz %x[flags], #0, 50f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "cmp x11, #0x10\n"
- "add x24, x9, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "cmp x10, #0x10\n"
+ "add x23, x28, x19, LSL #2\n"
"bge 48f\n"
- "tbz x11, #3, 43f\n"
- "ld1 { v9.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x24], #0x10\n"
- "ld1 { v10.4s }, [x9], #0x10\n"
- "ld1 { v13.4s }, [x24], #0x10\n"
- "tbz x11, #2, 41f\n"
- "ld1 { v11.4s }, [x9], #0x10\n"
- "ld1 { v14.4s }, [x24], #0x10\n"
- "tbz x11, #1, 40f\n"
- "ldr d16, [x9], #0x8\n"
- "ldr d15, [x24], #0x8\n"
- "mov x25, #0x38\n"
- "tbz x11, #0, 47f\n"
- "ld1 { v16.s }[2], [x9]\n"
- "ld1 { v15.s }[2], [x24]\n"
+ "tbz x10, #3, 43f\n"
+ "ld1 { v9.4s }, [x28], #0x10\n"
+ "ld1 { v12.4s }, [x23], #0x10\n"
+ "ld1 { v10.4s }, [x28], #0x10\n"
+ "ld1 { v13.4s }, [x23], #0x10\n"
+ "tbz x10, #2, 41f\n"
+ "ld1 { v11.4s }, [x28], #0x10\n"
+ "ld1 { v14.4s }, [x23], #0x10\n"
+ "tbz x10, #1, 40f\n"
+ "mov x24, #0x38\n"
+ "ldr d16, [x28], #0x8\n"
+ "ldr d15, [x23], #0x8\n"
+ "tbz x10, #0, 47f\n"
+ "ld1 { v16.s }[2], [x28]\n"
+ "ld1 { v15.s }[2], [x23]\n"
"b 47f\n"
"40:" // Height 2: Partial accumulate: partial_1_12
- "mov x25, #0x30\n"
- "tbz x11, #0, 47f\n"
- "ldr s16, [x9, #0x0]\n"
- "ldr s15, [x24, #0x0]\n"
+ "mov x24, #0x30\n"
+ "tbz x10, #0, 47f\n"
+ "ldr s16, [x28, #0x0]\n"
+ "ldr s15, [x23, #0x0]\n"
"b 47f\n"
"41:" // Height 2: Partial accumulate: partial_2_8
- "tbz x11, #1, 42f\n"
- "ldr d11, [x9], #0x8\n"
- "ldr d14, [x24], #0x8\n"
- "mov x25, #0x28\n"
- "tbz x11, #0, 47f\n"
- "ld1 { v11.s }[2], [x9]\n"
- "ld1 { v14.s }[2], [x24]\n"
+ "tbz x10, #1, 42f\n"
+ "ldr d11, [x28], #0x8\n"
+ "ldr d14, [x23], #0x8\n"
+ "mov x24, #0x28\n"
+ "tbz x10, #0, 47f\n"
+ "ld1 { v11.s }[2], [x28]\n"
+ "ld1 { v14.s }[2], [x23]\n"
"b 47f\n"
"42:" // Height 2: Partial accumulate: partial_1_8
- "mov x25, #0x20\n"
- "tbz x11, #0, 47f\n"
- "ldr s11, [x9, #0x0]\n"
- "ldr s14, [x24, #0x0]\n"
+ "mov x24, #0x20\n"
+ "tbz x10, #0, 47f\n"
+ "ldr s11, [x28, #0x0]\n"
+ "ldr s14, [x23, #0x0]\n"
"b 47f\n"
"43:" // Height 2: Partial accumulate: partial_4_0
- "tbz x11, #2, 45f\n"
- "ld1 { v9.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x24], #0x10\n"
- "tbz x11, #1, 44f\n"
- "ldr d10, [x9], #0x8\n"
- "ldr d13, [x24], #0x8\n"
- "mov x25, #0x18\n"
- "tbz x11, #0, 47f\n"
- "ld1 { v10.s }[2], [x9]\n"
- "ld1 { v13.s }[2], [x24]\n"
+ "tbz x10, #2, 45f\n"
+ "ld1 { v9.4s }, [x28], #0x10\n"
+ "ld1 { v12.4s }, [x23], #0x10\n"
+ "tbz x10, #1, 44f\n"
+ "mov x24, #0x18\n"
+ "ldr d10, [x28], #0x8\n"
+ "ldr d13, [x23], #0x8\n"
+ "tbz x10, #0, 47f\n"
+ "ld1 { v10.s }[2], [x28]\n"
+ "ld1 { v13.s }[2], [x23]\n"
"b 47f\n"
"44:" // Height 2: Partial accumulate: partial_1_4
- "mov x25, #0x10\n"
- "tbz x11, #0, 47f\n"
- "ldr s10, [x9, #0x0]\n"
- "ldr s13, [x24, #0x0]\n"
+ "mov x24, #0x10\n"
+ "tbz x10, #0, 47f\n"
+ "ldr s10, [x28, #0x0]\n"
+ "ldr s13, [x23, #0x0]\n"
"b 47f\n"
"45:" // Height 2: Partial accumulate: partial_2_0
- "tbz x11, #1, 46f\n"
- "ldr d9, [x9], #0x8\n"
- "ldr d12, [x24], #0x8\n"
- "mov x25, #0x8\n"
- "tbz x11, #0, 47f\n"
- "ld1 { v9.s }[2], [x9]\n"
- "ld1 { v12.s }[2], [x24]\n"
+ "tbz x10, #1, 46f\n"
+ "ldr d9, [x28], #0x8\n"
+ "ldr d12, [x23], #0x8\n"
+ "mov x24, #0x8\n"
+ "tbz x10, #0, 47f\n"
+ "ld1 { v9.s }[2], [x28]\n"
+ "ld1 { v12.s }[2], [x23]\n"
"b 47f\n"
"46:" // Height 2: Partial accumulate: partial_1_0
- "ldr s9, [x9, #0x0]\n"
- "ldr s12, [x24, #0x0]\n"
- "mov x25, #0x0\n"
+ "ldr s9, [x28, #0x0]\n"
+ "mov x24, #0x0\n"
+ "ldr s12, [x23, #0x0]\n"
"47:" // Height 2: Partial accumulate: Done
- "sub x9, x9, x25\n"
+ "sub x28, x28, x24\n"
"b 49f\n"
"48:" // Height 2: full accumulate
- "ldr q9, [x9, #0x0]\n"
- "ldr q10, [x9, #0x10]\n"
- "ldr q11, [x9, #0x20]\n"
- "ldr q16, [x9, #0x30]\n"
- "ldr q12, [x24, #0x0]\n"
- "ldr q13, [x24, #0x10]\n"
- "ldr q14, [x24, #0x20]\n"
- "ldr q15, [x24, #0x30]\n"
+ "ldr q9, [x28, #0x0]\n"
+ "ldr q10, [x28, #0x10]\n"
+ "ldr q11, [x28, #0x20]\n"
+ "ldr q16, [x28, #0x30]\n"
+ "ldr q12, [x23, #0x0]\n"
+ "ldr q13, [x23, #0x10]\n"
+ "ldr q14, [x23, #0x20]\n"
+ "ldr q15, [x23, #0x30]\n"
"49:" // Height 2: MMLA fixup
"zip1 v8.2d, v9.2d, v12.2d\n"
"zip2 v12.2d, v9.2d, v12.2d\n"
@@ -521,398 +525,398 @@ void a64_hybrid_u8u32_mmla_6x16 (
"movi v14.4s, #0x0\n"
"movi v15.4s, #0x0\n"
"51:" // Height 2: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"52:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 53f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "cbnz x28, 54f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20\n"
- "add x25, x25, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "cbnz x27, 54f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
+ "add x24, x24, x19\n"
"b 54f\n"
"53:" // Height 2: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19\n"
"54:" // Height 2: input setup done
- "cmp x27, #0x10\n"
+ "cmp x26, #0x10\n"
"blt 57f\n"
- "ldr q1, [x26, #0x0]\n"
- "ldr q2, [x25, #0x0]\n"
- "cmp x27, #0x20\n"
- "ldr q7, [x10, #0x0]\n"
- "ldr q6, [x10, #0x10]\n"
+ "ldr q1, [x25, #0x0]\n"
+ "ldr q2, [x24, #0x0]\n"
+ "cmp x26, #0x20\n"
"blt 56f\n"
"55:" // Height 2: Multiply loop: Main loop head
"trn1 v0.2d, v1.2d, v2.2d\n"
+ "ldr q7, [x9, #0x0]\n"
+ "add x25, x25, #0x10\n"
+ "trn2 v1.2d, v1.2d, v2.2d\n"
+ "ldr q6, [x9, #0x10]\n"
+ "add x24, x24, #0x10\n"
".inst 0x6e87a408 // ummla v8.4s, v0.16b, v7.16b\n"
- "ldr q7, [x10, #0x20]\n"
+ "ldr q7, [x9, #0x20]\n"
+ "sub x26, x26, #0x10\n"
".inst 0x6e86a40c // ummla v12.4s, v0.16b, v6.16b\n"
- "ldr q6, [x10, #0x30]\n"
+ "ldr q6, [x9, #0x30]\n"
+ "cmp x26, #0x20\n"
".inst 0x6e87a409 // ummla v9.4s, v0.16b, v7.16b\n"
- "ldr q7, [x10, #0x40]\n"
+ "ldr q7, [x9, #0x40]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x6e86a40d // ummla v13.4s, v0.16b, v6.16b\n"
- "ldr q6, [x10, #0x50]\n"
+ "ldr q6, [x9, #0x50]\n"
".inst 0x6e87a40a // ummla v10.4s, v0.16b, v7.16b\n"
- "ldr q7, [x10, #0x60]\n"
+ "ldr q7, [x9, #0x60]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x6e86a40e // ummla v14.4s, v0.16b, v6.16b\n"
- "ldr q6, [x10, #0x70]\n"
- "trn2 v1.2d, v1.2d, v2.2d\n"
+ "ldr q6, [x9, #0x70]\n"
".inst 0x6e87a40b // ummla v11.4s, v0.16b, v7.16b\n"
- "ldr q7, [x10, #0x80]\n"
+ "ldr q7, [x9, #0x80]\n"
+ "ldr q2, [x24, #0x0]\n"
".inst 0x6e86a40f // ummla v15.4s, v0.16b, v6.16b\n"
- "ldr q6, [x10, #0x90]\n"
+ "ldr q6, [x9, #0x90]\n"
".inst 0x6e87a428 // ummla v8.4s, v1.16b, v7.16b\n"
- "ldr q7, [x10, #0xa0]\n"
+ "ldr q7, [x9, #0xa0]\n"
".inst 0x6e86a42c // ummla v12.4s, v1.16b, v6.16b\n"
- "ldr q6, [x10, #0xb0]\n"
+ "ldr q6, [x9, #0xb0]\n"
".inst 0x6e87a429 // ummla v9.4s, v1.16b, v7.16b\n"
- "ldr q7, [x10, #0xc0]\n"
+ "ldr q7, [x9, #0xc0]\n"
".inst 0x6e86a42d // ummla v13.4s, v1.16b, v6.16b\n"
- "ldr q6, [x10, #0xd0]\n"
+ "ldr q6, [x9, #0xd0]\n"
".inst 0x6e87a42a // ummla v10.4s, v1.16b, v7.16b\n"
- "ldr q7, [x10, #0xe0]\n"
+ "ldr q7, [x9, #0xe0]\n"
".inst 0x6e86a42e // ummla v14.4s, v1.16b, v6.16b\n"
- "ldr q6, [x10, #0xf0]\n"
- "sub x27, x27, #0x10\n"
- "add x26, x26, #0x10\n"
- "add x25, x25, #0x10\n"
- "ldr q2, [x25, #0x0]\n"
- "cmp x27, #0x20\n"
+ "ldr q6, [x9, #0xf0]\n"
+ "add x9, x9, #0x100\n"
".inst 0x6e87a42b // ummla v11.4s, v1.16b, v7.16b\n"
- "add x10, x10, #0x100\n"
- "ldr q7, [x10, #0x0]\n"
".inst 0x6e86a42f // ummla v15.4s, v1.16b, v6.16b\n"
- "ldr q1, [x26, #0x0]\n"
- "ldr q6, [x10, #0x10]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
+ "ldr q1, [x25, #0x0]\n"
"bge 55b\n"
"56:" // Height 2: Multiply loop: Single iteration only
"trn1 v0.2d, v1.2d, v2.2d\n"
+ "ldr q7, [x9, #0x0]\n"
+ "sub x26, x26, #0x10\n"
+ "trn2 v1.2d, v1.2d, v2.2d\n"
+ "ldr q6, [x9, #0x10]\n"
+ "add x25, x25, #0x10\n"
".inst 0x6e87a408 // ummla v8.4s, v0.16b, v7.16b\n"
- "ldr q7, [x10, #0x20]\n"
+ "ldr q7, [x9, #0x20]\n"
+ "add x24, x24, #0x10\n"
".inst 0x6e86a40c // ummla v12.4s, v0.16b, v6.16b\n"
- "ldr q6, [x10, #0x30]\n"
+ "ldr q6, [x9, #0x30]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x6e87a409 // ummla v9.4s, v0.16b, v7.16b\n"
- "ldr q7, [x10, #0x40]\n"
+ "ldr q7, [x9, #0x40]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x6e86a40d // ummla v13.4s, v0.16b, v6.16b\n"
- "ldr q6, [x10, #0x50]\n"
+ "ldr q6, [x9, #0x50]\n"
".inst 0x6e87a40a // ummla v10.4s, v0.16b, v7.16b\n"
- "ldr q7, [x10, #0x60]\n"
+ "ldr q7, [x9, #0x60]\n"
".inst 0x6e86a40e // ummla v14.4s, v0.16b, v6.16b\n"
- "ldr q6, [x10, #0x70]\n"
- "trn2 v1.2d, v1.2d, v2.2d\n"
+ "ldr q6, [x9, #0x70]\n"
".inst 0x6e87a40b // ummla v11.4s, v0.16b, v7.16b\n"
- "ldr q7, [x10, #0x80]\n"
+ "ldr q7, [x9, #0x80]\n"
".inst 0x6e86a40f // ummla v15.4s, v0.16b, v6.16b\n"
- "ldr q6, [x10, #0x90]\n"
+ "ldr q6, [x9, #0x90]\n"
".inst 0x6e87a428 // ummla v8.4s, v1.16b, v7.16b\n"
- "ldr q7, [x10, #0xa0]\n"
+ "ldr q7, [x9, #0xa0]\n"
".inst 0x6e86a42c // ummla v12.4s, v1.16b, v6.16b\n"
- "ldr q6, [x10, #0xb0]\n"
+ "ldr q6, [x9, #0xb0]\n"
".inst 0x6e87a429 // ummla v9.4s, v1.16b, v7.16b\n"
- "ldr q7, [x10, #0xc0]\n"
+ "ldr q7, [x9, #0xc0]\n"
".inst 0x6e86a42d // ummla v13.4s, v1.16b, v6.16b\n"
- "ldr q6, [x10, #0xd0]\n"
+ "ldr q6, [x9, #0xd0]\n"
".inst 0x6e87a42a // ummla v10.4s, v1.16b, v7.16b\n"
- "ldr q7, [x10, #0xe0]\n"
+ "ldr q7, [x9, #0xe0]\n"
".inst 0x6e86a42e // ummla v14.4s, v1.16b, v6.16b\n"
- "ldr q6, [x10, #0xf0]\n"
- "add x26, x26, #0x10\n"
- "add x25, x25, #0x10\n"
+ "ldr q6, [x9, #0xf0]\n"
+ "add x9, x9, #0x100\n"
".inst 0x6e87a42b // ummla v11.4s, v1.16b, v7.16b\n"
".inst 0x6e86a42f // ummla v15.4s, v1.16b, v6.16b\n"
- "sub x27, x27, #0x10\n"
- "prfm pldl1keep, [x26, #0x80]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
- "add x10, x10, #0x100\n"
"57:" // Height 2: Multiply loop: Main loop skip
- "cbz x27, 64f\n"
- "cmp x27, #0x8\n"
+ "cbz x26, 64f\n"
+ "cmp x26, #0x8\n"
"blt 59f\n"
"58:" // Height 2: Multiply loop: Odd block loop
- "ldr d1, [x26], #0x8\n"
- "ldr d2, [x25], #0x8\n"
+ "ldr d1, [x25], #0x8\n"
+ "sub x26, x26, #0x8\n"
+ "ldr d2, [x24], #0x8\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
- "sub x27, x27, #0x8\n"
- "ldr q6, [x10, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
+ "ldr q6, [x9, #0x0]\n"
+ "cmp x26, #0x8\n"
".inst 0x6e86a408 // ummla v8.4s, v0.16b, v6.16b\n"
+ "ldr q7, [x9, #0x10]\n"
+ "ldr q6, [x9, #0x20]\n"
".inst 0x6e87a40c // ummla v12.4s, v0.16b, v7.16b\n"
- "ldr q6, [x10, #0x20]\n"
- "ldr q7, [x10, #0x30]\n"
+ "ldr q7, [x9, #0x30]\n"
".inst 0x6e86a409 // ummla v9.4s, v0.16b, v6.16b\n"
+ "ldr q6, [x9, #0x40]\n"
".inst 0x6e87a40d // ummla v13.4s, v0.16b, v7.16b\n"
- "ldr q6, [x10, #0x40]\n"
- "ldr q7, [x10, #0x50]\n"
+ "ldr q7, [x9, #0x50]\n"
".inst 0x6e86a40a // ummla v10.4s, v0.16b, v6.16b\n"
+ "ldr q6, [x9, #0x60]\n"
".inst 0x6e87a40e // ummla v14.4s, v0.16b, v7.16b\n"
- "ldr q6, [x10, #0x60]\n"
- "ldr q7, [x10, #0x70]\n"
- "cmp x27, #0x8\n"
+ "ldr q7, [x9, #0x70]\n"
+ "add x9, x9, #0x80\n"
".inst 0x6e86a40b // ummla v11.4s, v0.16b, v6.16b\n"
".inst 0x6e87a40f // ummla v15.4s, v0.16b, v7.16b\n"
- "add x10, x10, #0x80\n"
"bge 58b\n"
+ "cbz x26, 64f\n"
"59:" // Height 2: Multiply loop: Skip odd blocks
- "cbz x27, 64f\n"
- "tbz x27, #2, 61f\n"
- "ldr s1, [x26], #0x4\n"
- "ldr s2, [x25], #0x4\n"
- "tbz x27, #1, 60f\n"
- "ld1 { v1.h }[2], [x26], #0x2\n"
- "ld1 { v2.h }[2], [x25], #0x2\n"
- "tbz x27, #0, 63f\n"
- "ld1 { v1.b }[6], [x26]\n"
- "ld1 { v2.b }[6], [x25]\n"
+ "tbz x26, #2, 61f\n"
+ "ldr s1, [x25], #0x4\n"
+ "ldr s2, [x24], #0x4\n"
+ "tbz x26, #1, 60f\n"
+ "ld1 { v1.h }[2], [x25], #0x2\n"
+ "ld1 { v2.h }[2], [x24], #0x2\n"
+ "tbz x26, #0, 63f\n"
+ "ld1 { v1.b }[6], [x25]\n"
+ "ld1 { v2.b }[6], [x24]\n"
"b 63f\n"
"60:" // Height 2: Multiply loop: Ragged operand read: partial_1_4
- "tbz x27, #0, 63f\n"
- "ld1 { v1.b }[4], [x26]\n"
- "ld1 { v2.b }[4], [x25]\n"
+ "tbz x26, #0, 63f\n"
+ "ld1 { v1.b }[4], [x25]\n"
+ "ld1 { v2.b }[4], [x24]\n"
"b 63f\n"
"61:" // Height 2: Multiply loop: Ragged operand read: partial_2_0
- "tbz x27, #1, 62f\n"
- "ldr h1, [x26], #0x2\n"
- "ldr h2, [x25], #0x2\n"
- "tbz x27, #0, 63f\n"
- "ld1 { v1.b }[2], [x26]\n"
- "ld1 { v2.b }[2], [x25]\n"
+ "tbz x26, #1, 62f\n"
+ "ldr h1, [x25], #0x2\n"
+ "ldr h2, [x24], #0x2\n"
+ "tbz x26, #0, 63f\n"
+ "ld1 { v1.b }[2], [x25]\n"
+ "ld1 { v2.b }[2], [x24]\n"
"b 63f\n"
"62:" // Height 2: Multiply loop: Ragged operand read: partial_1_0
- "ldr b1, [x26, #0x0]\n"
- "ldr b2, [x25, #0x0]\n"
+ "ldr b1, [x25, #0x0]\n"
+ "ldr b2, [x24, #0x0]\n"
"63:" // Height 2: Multiply loop: Ragged operand read: Done
- "ldr q7, [x10, #0x0]\n"
- "ldr q6, [x10, #0x10]\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
+ "ldr q7, [x9, #0x0]\n"
+ "ldr q6, [x9, #0x10]\n"
".inst 0x6e87a408 // ummla v8.4s, v0.16b, v7.16b\n"
- "ldr q7, [x10, #0x20]\n"
+ "ldr q7, [x9, #0x20]\n"
".inst 0x6e86a40c // ummla v12.4s, v0.16b, v6.16b\n"
- "ldr q6, [x10, #0x30]\n"
+ "ldr q6, [x9, #0x30]\n"
".inst 0x6e87a409 // ummla v9.4s, v0.16b, v7.16b\n"
- "ldr q7, [x10, #0x40]\n"
+ "ldr q7, [x9, #0x40]\n"
".inst 0x6e86a40d // ummla v13.4s, v0.16b, v6.16b\n"
- "ldr q6, [x10, #0x50]\n"
+ "ldr q6, [x9, #0x50]\n"
".inst 0x6e87a40a // ummla v10.4s, v0.16b, v7.16b\n"
- "ldr q7, [x10, #0x60]\n"
+ "ldr q7, [x9, #0x60]\n"
".inst 0x6e86a40e // ummla v14.4s, v0.16b, v6.16b\n"
- "ldr q6, [x10, #0x70]\n"
+ "ldr q6, [x9, #0x70]\n"
+ "add x9, x9, #0x80\n"
".inst 0x6e87a40b // ummla v11.4s, v0.16b, v7.16b\n"
".inst 0x6e86a40f // ummla v15.4s, v0.16b, v6.16b\n"
- "add x10, x10, #0x80\n"
"64:" // Height 2: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 52b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "cmp x11, #0x10\n"
"uzp1 v7.2d, v8.2d, v12.2d\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp2 v8.2d, v8.2d, v12.2d\n"
+ "prfm pstl1keep, [x28, #0x0]\n"
+ "cmp x10, #0x10\n"
"uzp1 v12.2d, v9.2d, v13.2d\n"
- "prfm pstl1keep, [x9, #0x0]\n"
- "prfm pstl1keep, [x24, #0x0]\n"
+ "add x23, x28, x19, LSL #2\n"
"uzp2 v9.2d, v9.2d, v13.2d\n"
+ "prfm pstl1keep, [x23, #0x0]\n"
"uzp1 v13.2d, v10.2d, v14.2d\n"
"uzp2 v10.2d, v10.2d, v14.2d\n"
"uzp1 v14.2d, v11.2d, v15.2d\n"
"uzp2 v11.2d, v11.2d, v15.2d\n"
"bge 73f\n"
- "tbz x11, #3, 68f\n"
- "st1 { v7.4s }, [x9], #0x10\n"
- "st1 { v12.4s }, [x9], #0x10\n"
- "st1 { v8.4s }, [x24], #0x10\n"
- "st1 { v9.4s }, [x24], #0x10\n"
- "tbz x11, #2, 66f\n"
- "st1 { v13.4s }, [x9], #0x10\n"
- "st1 { v10.4s }, [x24], #0x10\n"
- "tbz x11, #1, 65f\n"
- "str d14, [x9], #0x8\n"
- "str d11, [x24], #0x8\n"
- "tbz x11, #0, 72f\n"
- "st1 { v14.s }[2], [x9]\n"
- "st1 { v11.s }[2], [x24]\n"
+ "tbz x10, #3, 68f\n"
+ "st1 { v7.4s }, [x28], #0x10\n"
+ "st1 { v12.4s }, [x28], #0x10\n"
+ "st1 { v8.4s }, [x23], #0x10\n"
+ "st1 { v9.4s }, [x23], #0x10\n"
+ "tbz x10, #2, 66f\n"
+ "st1 { v13.4s }, [x28], #0x10\n"
+ "st1 { v10.4s }, [x23], #0x10\n"
+ "tbz x10, #1, 65f\n"
+ "str d14, [x28], #0x8\n"
+ "str d11, [x23], #0x8\n"
+ "tbz x10, #0, 72f\n"
+ "st1 { v14.s }[2], [x28]\n"
+ "st1 { v11.s }[2], [x23]\n"
"b 72f\n"
"65:" // Height 2: Partial direct writeback: partial_1_12
- "tbz x11, #0, 72f\n"
- "str s14, [x9, #0x0]\n"
- "str s11, [x24, #0x0]\n"
+ "tbz x10, #0, 72f\n"
+ "str s14, [x28, #0x0]\n"
+ "str s11, [x23, #0x0]\n"
"b 72f\n"
"66:" // Height 2: Partial direct writeback: partial_2_8
- "tbz x11, #1, 67f\n"
- "str d13, [x9], #0x8\n"
- "str d10, [x24], #0x8\n"
- "tbz x11, #0, 72f\n"
- "st1 { v13.s }[2], [x9]\n"
- "st1 { v10.s }[2], [x24]\n"
+ "tbz x10, #1, 67f\n"
+ "str d13, [x28], #0x8\n"
+ "str d10, [x23], #0x8\n"
+ "tbz x10, #0, 72f\n"
+ "st1 { v13.s }[2], [x28]\n"
+ "st1 { v10.s }[2], [x23]\n"
"b 72f\n"
"67:" // Height 2: Partial direct writeback: partial_1_8
- "tbz x11, #0, 72f\n"
- "str s13, [x9, #0x0]\n"
- "str s10, [x24, #0x0]\n"
+ "tbz x10, #0, 72f\n"
+ "str s13, [x28, #0x0]\n"
+ "str s10, [x23, #0x0]\n"
"b 72f\n"
"68:" // Height 2: Partial direct writeback: partial_4_0
- "tbz x11, #2, 70f\n"
- "st1 { v7.4s }, [x9], #0x10\n"
- "st1 { v8.4s }, [x24], #0x10\n"
- "tbz x11, #1, 69f\n"
- "str d12, [x9], #0x8\n"
- "str d9, [x24], #0x8\n"
- "tbz x11, #0, 72f\n"
- "st1 { v12.s }[2], [x9]\n"
- "st1 { v9.s }[2], [x24]\n"
+ "tbz x10, #2, 70f\n"
+ "st1 { v7.4s }, [x28], #0x10\n"
+ "st1 { v8.4s }, [x23], #0x10\n"
+ "tbz x10, #1, 69f\n"
+ "str d12, [x28], #0x8\n"
+ "str d9, [x23], #0x8\n"
+ "tbz x10, #0, 72f\n"
+ "st1 { v12.s }[2], [x28]\n"
+ "st1 { v9.s }[2], [x23]\n"
"b 72f\n"
"69:" // Height 2: Partial direct writeback: partial_1_4
- "tbz x11, #0, 72f\n"
- "str s12, [x9, #0x0]\n"
- "str s9, [x24, #0x0]\n"
+ "tbz x10, #0, 72f\n"
+ "str s12, [x28, #0x0]\n"
+ "str s9, [x23, #0x0]\n"
"b 72f\n"
"70:" // Height 2: Partial direct writeback: partial_2_0
- "tbz x11, #1, 71f\n"
- "str d7, [x9], #0x8\n"
- "str d8, [x24], #0x8\n"
- "tbz x11, #0, 72f\n"
- "st1 { v7.s }[2], [x9]\n"
- "st1 { v8.s }[2], [x24]\n"
+ "tbz x10, #1, 71f\n"
+ "str d7, [x28], #0x8\n"
+ "str d8, [x23], #0x8\n"
+ "tbz x10, #0, 72f\n"
+ "st1 { v7.s }[2], [x28]\n"
+ "st1 { v8.s }[2], [x23]\n"
"b 72f\n"
"71:" // Height 2: Partial direct writeback: partial_1_0
- "str s7, [x9, #0x0]\n"
- "str s8, [x24, #0x0]\n"
+ "str s7, [x28, #0x0]\n"
+ "str s8, [x23, #0x0]\n"
"72:" // Height 2: Partial direct writeback: Done
"b 74f\n"
"73:" // Height 2: Full writeback
- "str q7, [x9, #0x0]\n"
- "str q12, [x9, #0x10]\n"
- "str q13, [x9, #0x20]\n"
- "str q14, [x9, #0x30]\n"
- "add x9, x9, #0x40\n"
- "str q8, [x24, #0x0]\n"
- "str q9, [x24, #0x10]\n"
- "str q10, [x24, #0x20]\n"
- "str q11, [x24, #0x30]\n"
+ "str q7, [x28, #0x0]\n"
+ "str q12, [x28, #0x10]\n"
+ "str q13, [x28, #0x20]\n"
+ "str q14, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
+ "str q8, [x23, #0x0]\n"
+ "str q9, [x23, #0x10]\n"
+ "str q10, [x23, #0x20]\n"
+ "str q11, [x23, #0x30]\n"
"74:" // Height 2: Writeback done
- "subs x11, x11, #0x10\n"
+ "subs x10, x10, #0x10\n"
"bgt 39b\n"
"b 224f\n"
"75:" // Height 3
- "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x28, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"76:" // Height 3: Column loop
"tbz %x[flags], #0, 87f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "cmp x11, #0x10\n"
- "add x23, x24, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "cmp x10, #0x10\n"
+ "add x23, x28, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
"bge 85f\n"
- "tbz x11, #3, 80f\n"
- "ld1 { v9.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x24], #0x10\n"
- "ld1 { v17.4s }, [x23], #0x10\n"
- "ld1 { v10.4s }, [x9], #0x10\n"
- "ld1 { v13.4s }, [x24], #0x10\n"
- "ld1 { v18.4s }, [x23], #0x10\n"
- "tbz x11, #2, 78f\n"
- "ld1 { v11.4s }, [x9], #0x10\n"
- "ld1 { v14.4s }, [x24], #0x10\n"
- "ld1 { v19.4s }, [x23], #0x10\n"
- "tbz x11, #1, 77f\n"
- "ldr d16, [x9], #0x8\n"
- "ldr d15, [x24], #0x8\n"
- "mov x25, #0x38\n"
- "ldr d24, [x23], #0x8\n"
- "tbz x11, #0, 84f\n"
- "ld1 { v16.s }[2], [x9]\n"
- "ld1 { v15.s }[2], [x24]\n"
- "ld1 { v24.s }[2], [x23]\n"
+ "tbz x10, #3, 80f\n"
+ "ld1 { v9.4s }, [x28], #0x10\n"
+ "ld1 { v12.4s }, [x23], #0x10\n"
+ "ld1 { v17.4s }, [x22], #0x10\n"
+ "ld1 { v10.4s }, [x28], #0x10\n"
+ "ld1 { v13.4s }, [x23], #0x10\n"
+ "ld1 { v18.4s }, [x22], #0x10\n"
+ "tbz x10, #2, 78f\n"
+ "ld1 { v11.4s }, [x28], #0x10\n"
+ "ld1 { v14.4s }, [x23], #0x10\n"
+ "ld1 { v19.4s }, [x22], #0x10\n"
+ "tbz x10, #1, 77f\n"
+ "mov x24, #0x38\n"
+ "ldr d16, [x28], #0x8\n"
+ "ldr d15, [x23], #0x8\n"
+ "ldr d24, [x22], #0x8\n"
+ "tbz x10, #0, 84f\n"
+ "ld1 { v16.s }[2], [x28]\n"
+ "ld1 { v15.s }[2], [x23]\n"
+ "ld1 { v24.s }[2], [x22]\n"
"b 84f\n"
"77:" // Height 3: Partial accumulate: partial_1_12
- "mov x25, #0x30\n"
- "tbz x11, #0, 84f\n"
- "ldr s16, [x9, #0x0]\n"
- "ldr s15, [x24, #0x0]\n"
- "ldr s24, [x23, #0x0]\n"
+ "mov x24, #0x30\n"
+ "tbz x10, #0, 84f\n"
+ "ldr s16, [x28, #0x0]\n"
+ "ldr s15, [x23, #0x0]\n"
+ "ldr s24, [x22, #0x0]\n"
"b 84f\n"
"78:" // Height 3: Partial accumulate: partial_2_8
- "tbz x11, #1, 79f\n"
- "ldr d11, [x9], #0x8\n"
- "ldr d14, [x24], #0x8\n"
- "mov x25, #0x28\n"
- "ldr d19, [x23], #0x8\n"
- "tbz x11, #0, 84f\n"
- "ld1 { v11.s }[2], [x9]\n"
- "ld1 { v14.s }[2], [x24]\n"
- "ld1 { v19.s }[2], [x23]\n"
+ "tbz x10, #1, 79f\n"
+ "ldr d11, [x28], #0x8\n"
+ "ldr d14, [x23], #0x8\n"
+ "mov x24, #0x28\n"
+ "ldr d19, [x22], #0x8\n"
+ "tbz x10, #0, 84f\n"
+ "ld1 { v11.s }[2], [x28]\n"
+ "ld1 { v14.s }[2], [x23]\n"
+ "ld1 { v19.s }[2], [x22]\n"
"b 84f\n"
"79:" // Height 3: Partial accumulate: partial_1_8
- "mov x25, #0x20\n"
- "tbz x11, #0, 84f\n"
- "ldr s11, [x9, #0x0]\n"
- "ldr s14, [x24, #0x0]\n"
- "ldr s19, [x23, #0x0]\n"
+ "mov x24, #0x20\n"
+ "tbz x10, #0, 84f\n"
+ "ldr s11, [x28, #0x0]\n"
+ "ldr s14, [x23, #0x0]\n"
+ "ldr s19, [x22, #0x0]\n"
"b 84f\n"
"80:" // Height 3: Partial accumulate: partial_4_0
- "tbz x11, #2, 82f\n"
- "ld1 { v9.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x24], #0x10\n"
- "ld1 { v17.4s }, [x23], #0x10\n"
- "tbz x11, #1, 81f\n"
- "ldr d10, [x9], #0x8\n"
- "ldr d13, [x24], #0x8\n"
- "mov x25, #0x18\n"
- "ldr d18, [x23], #0x8\n"
- "tbz x11, #0, 84f\n"
- "ld1 { v10.s }[2], [x9]\n"
- "ld1 { v13.s }[2], [x24]\n"
- "ld1 { v18.s }[2], [x23]\n"
+ "tbz x10, #2, 82f\n"
+ "ld1 { v9.4s }, [x28], #0x10\n"
+ "ld1 { v12.4s }, [x23], #0x10\n"
+ "ld1 { v17.4s }, [x22], #0x10\n"
+ "tbz x10, #1, 81f\n"
+ "mov x24, #0x18\n"
+ "ldr d10, [x28], #0x8\n"
+ "ldr d13, [x23], #0x8\n"
+ "ldr d18, [x22], #0x8\n"
+ "tbz x10, #0, 84f\n"
+ "ld1 { v10.s }[2], [x28]\n"
+ "ld1 { v13.s }[2], [x23]\n"
+ "ld1 { v18.s }[2], [x22]\n"
"b 84f\n"
"81:" // Height 3: Partial accumulate: partial_1_4
- "mov x25, #0x10\n"
- "tbz x11, #0, 84f\n"
- "ldr s10, [x9, #0x0]\n"
- "ldr s13, [x24, #0x0]\n"
- "ldr s18, [x23, #0x0]\n"
+ "mov x24, #0x10\n"
+ "tbz x10, #0, 84f\n"
+ "ldr s10, [x28, #0x0]\n"
+ "ldr s13, [x23, #0x0]\n"
+ "ldr s18, [x22, #0x0]\n"
"b 84f\n"
"82:" // Height 3: Partial accumulate: partial_2_0
- "tbz x11, #1, 83f\n"
- "ldr d9, [x9], #0x8\n"
- "ldr d12, [x24], #0x8\n"
- "mov x25, #0x8\n"
- "ldr d17, [x23], #0x8\n"
- "tbz x11, #0, 84f\n"
- "ld1 { v9.s }[2], [x9]\n"
- "ld1 { v12.s }[2], [x24]\n"
- "ld1 { v17.s }[2], [x23]\n"
+ "tbz x10, #1, 83f\n"
+ "ldr d9, [x28], #0x8\n"
+ "ldr d12, [x23], #0x8\n"
+ "mov x24, #0x8\n"
+ "ldr d17, [x22], #0x8\n"
+ "tbz x10, #0, 84f\n"
+ "ld1 { v9.s }[2], [x28]\n"
+ "ld1 { v12.s }[2], [x23]\n"
+ "ld1 { v17.s }[2], [x22]\n"
"b 84f\n"
"83:" // Height 3: Partial accumulate: partial_1_0
- "ldr s9, [x9, #0x0]\n"
- "ldr s12, [x24, #0x0]\n"
- "mov x25, #0x0\n"
- "ldr s17, [x23, #0x0]\n"
+ "ldr s9, [x28, #0x0]\n"
+ "mov x24, #0x0\n"
+ "ldr s12, [x23, #0x0]\n"
+ "ldr s17, [x22, #0x0]\n"
"84:" // Height 3: Partial accumulate: Done
- "sub x9, x9, x25\n"
+ "sub x28, x28, x24\n"
"b 86f\n"
"85:" // Height 3: full accumulate
- "ldr q9, [x9, #0x0]\n"
- "ldr q10, [x9, #0x10]\n"
- "ldr q11, [x9, #0x20]\n"
- "ldr q16, [x9, #0x30]\n"
- "ldr q12, [x24, #0x0]\n"
- "ldr q13, [x24, #0x10]\n"
- "ldr q14, [x24, #0x20]\n"
- "ldr q15, [x24, #0x30]\n"
- "ldr q17, [x23, #0x0]\n"
- "ldr q18, [x23, #0x10]\n"
- "ldr q19, [x23, #0x20]\n"
- "ldr q24, [x23, #0x30]\n"
+ "ldr q9, [x28, #0x0]\n"
+ "ldr q10, [x28, #0x10]\n"
+ "ldr q11, [x28, #0x20]\n"
+ "ldr q16, [x28, #0x30]\n"
+ "ldr q12, [x23, #0x0]\n"
+ "ldr q13, [x23, #0x10]\n"
+ "ldr q14, [x23, #0x20]\n"
+ "ldr q15, [x23, #0x30]\n"
+ "ldr q17, [x22, #0x0]\n"
+ "ldr q18, [x22, #0x10]\n"
+ "ldr q19, [x22, #0x20]\n"
+ "ldr q24, [x22, #0x30]\n"
"86:" // Height 3: MMLA fixup
"zip1 v8.2d, v9.2d, v12.2d\n"
"zip2 v12.2d, v9.2d, v12.2d\n"
@@ -949,281 +953,285 @@ void a64_hybrid_u8u32_mmla_6x16 (
"movi v22.4s, #0x0\n"
"movi v23.4s, #0x0\n"
"88:" // Height 3: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"89:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 90f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "cbnz x28, 91f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20\n"
- "add x25, x25, x20\n"
- "add x24, x24, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "cbnz x27, 91f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
+ "add x24, x24, x19\n"
+ "add x23, x23, x19\n"
"b 91f\n"
"90:" // Height 3: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20\n"
- "add x24, x25, x20\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19\n"
+ "add x23, x24, x19\n"
"91:" // Height 3: input setup done
- "cmp x27, #0x10\n"
+ "cmp x26, #0x10\n"
"blt 94f\n"
- "ldr q1, [x26, #0x0]\n"
- "ldr q2, [x25, #0x0]\n"
- "cmp x27, #0x20\n"
- "ldr q3, [x24, #0x0]\n"
- "ldr q7, [x10, #0x0]\n"
- "ldr q6, [x10, #0x10]\n"
+ "ldr q1, [x25, #0x0]\n"
+ "cmp x26, #0x20\n"
"blt 93f\n"
"92:" // Height 3: Multiply loop: Main loop head
+ "movi v4.16b, #0x0\n"
+ "ldr q2, [x24, #0x0]\n"
+ "add x25, x25, #0x10\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
+ "ldr q3, [x23, #0x0]\n"
+ "add x24, x24, #0x10\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
- ".inst 0x6e87a408 // ummla v8.4s, v0.16b, v7.16b\n"
+ "ldr q7, [x9, #0x0]\n"
+ "add x23, x23, #0x10\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
+ "ldr q6, [x9, #0x10]\n"
+ "sub x26, x26, #0x10\n"
+ "trn2 v3.2d, v3.2d, v4.2d\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "cmp x26, #0x20\n"
+ ".inst 0x6e87a408 // ummla v8.4s, v0.16b, v7.16b\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x6e87a450 // ummla v16.4s, v2.16b, v7.16b\n"
- "ldr q7, [x10, #0x20]\n"
+ "ldr q7, [x9, #0x20]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x6e86a40c // ummla v12.4s, v0.16b, v6.16b\n"
".inst 0x6e86a454 // ummla v20.4s, v2.16b, v6.16b\n"
- "ldr q6, [x10, #0x30]\n"
+ "ldr q6, [x9, #0x30]\n"
".inst 0x6e87a409 // ummla v9.4s, v0.16b, v7.16b\n"
- "trn2 v3.2d, v3.2d, v4.2d\n"
".inst 0x6e87a451 // ummla v17.4s, v2.16b, v7.16b\n"
- "ldr q7, [x10, #0x40]\n"
+ "ldr q7, [x9, #0x40]\n"
".inst 0x6e86a40d // ummla v13.4s, v0.16b, v6.16b\n"
- "sub x27, x27, #0x10\n"
".inst 0x6e86a455 // ummla v21.4s, v2.16b, v6.16b\n"
- "ldr q6, [x10, #0x50]\n"
+ "ldr q6, [x9, #0x50]\n"
".inst 0x6e87a40a // ummla v10.4s, v0.16b, v7.16b\n"
- "add x26, x26, #0x10\n"
".inst 0x6e87a452 // ummla v18.4s, v2.16b, v7.16b\n"
- "ldr q7, [x10, #0x60]\n"
+ "ldr q7, [x9, #0x60]\n"
".inst 0x6e86a40e // ummla v14.4s, v0.16b, v6.16b\n"
- "add x25, x25, #0x10\n"
".inst 0x6e86a456 // ummla v22.4s, v2.16b, v6.16b\n"
- "ldr q6, [x10, #0x70]\n"
+ "ldr q6, [x9, #0x70]\n"
".inst 0x6e87a40b // ummla v11.4s, v0.16b, v7.16b\n"
- "add x24, x24, #0x10\n"
".inst 0x6e87a453 // ummla v19.4s, v2.16b, v7.16b\n"
- "ldr q7, [x10, #0x80]\n"
+ "ldr q7, [x9, #0x80]\n"
".inst 0x6e86a40f // ummla v15.4s, v0.16b, v6.16b\n"
- "cmp x27, #0x20\n"
".inst 0x6e86a457 // ummla v23.4s, v2.16b, v6.16b\n"
- "ldr q6, [x10, #0x90]\n"
- "ldr q2, [x25, #0x0]\n"
+ "ldr q6, [x9, #0x90]\n"
".inst 0x6e87a428 // ummla v8.4s, v1.16b, v7.16b\n"
".inst 0x6e87a470 // ummla v16.4s, v3.16b, v7.16b\n"
- "ldr q7, [x10, #0xa0]\n"
+ "ldr q7, [x9, #0xa0]\n"
".inst 0x6e86a42c // ummla v12.4s, v1.16b, v6.16b\n"
- "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x6e86a474 // ummla v20.4s, v3.16b, v6.16b\n"
- "ldr q6, [x10, #0xb0]\n"
+ "ldr q6, [x9, #0xb0]\n"
".inst 0x6e87a429 // ummla v9.4s, v1.16b, v7.16b\n"
- "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x6e87a471 // ummla v17.4s, v3.16b, v7.16b\n"
- "ldr q7, [x10, #0xc0]\n"
+ "ldr q7, [x9, #0xc0]\n"
".inst 0x6e86a42d // ummla v13.4s, v1.16b, v6.16b\n"
- "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x6e86a475 // ummla v21.4s, v3.16b, v6.16b\n"
- "ldr q6, [x10, #0xd0]\n"
+ "ldr q6, [x9, #0xd0]\n"
".inst 0x6e87a42a // ummla v10.4s, v1.16b, v7.16b\n"
".inst 0x6e87a472 // ummla v18.4s, v3.16b, v7.16b\n"
- "ldr q7, [x10, #0xe0]\n"
+ "ldr q7, [x9, #0xe0]\n"
".inst 0x6e86a42e // ummla v14.4s, v1.16b, v6.16b\n"
".inst 0x6e86a476 // ummla v22.4s, v3.16b, v6.16b\n"
- "ldr q6, [x10, #0xf0]\n"
- "add x10, x10, #0x100\n"
+ "ldr q6, [x9, #0xf0]\n"
+ "add x9, x9, #0x100\n"
".inst 0x6e87a42b // ummla v11.4s, v1.16b, v7.16b\n"
".inst 0x6e87a473 // ummla v19.4s, v3.16b, v7.16b\n"
- "ldr q7, [x10, #0x0]\n"
".inst 0x6e86a42f // ummla v15.4s, v1.16b, v6.16b\n"
- "ldr q1, [x26, #0x0]\n"
+ "ldr q1, [x25, #0x0]\n"
".inst 0x6e86a477 // ummla v23.4s, v3.16b, v6.16b\n"
- "ldr q3, [x24, #0x0]\n"
- "ldr q6, [x10, #0x10]\n"
"bge 92b\n"
"93:" // Height 3: Multiply loop: Single iteration only
+ "movi v4.16b, #0x0\n"
+ "ldr q2, [x24, #0x0]\n"
+ "sub x26, x26, #0x10\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
+ "ldr q3, [x23, #0x0]\n"
+ "add x25, x25, #0x10\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
- ".inst 0x6e87a408 // ummla v8.4s, v0.16b, v7.16b\n"
+ "ldr q7, [x9, #0x0]\n"
+ "add x24, x24, #0x10\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
+ "ldr q6, [x9, #0x10]\n"
+ "add x23, x23, #0x10\n"
+ "trn2 v3.2d, v3.2d, v4.2d\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ ".inst 0x6e87a408 // ummla v8.4s, v0.16b, v7.16b\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x6e87a450 // ummla v16.4s, v2.16b, v7.16b\n"
- "ldr q7, [x10, #0x20]\n"
+ "ldr q7, [x9, #0x20]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x6e86a40c // ummla v12.4s, v0.16b, v6.16b\n"
".inst 0x6e86a454 // ummla v20.4s, v2.16b, v6.16b\n"
- "ldr q6, [x10, #0x30]\n"
+ "ldr q6, [x9, #0x30]\n"
".inst 0x6e87a409 // ummla v9.4s, v0.16b, v7.16b\n"
- "trn2 v3.2d, v3.2d, v4.2d\n"
".inst 0x6e87a451 // ummla v17.4s, v2.16b, v7.16b\n"
- "ldr q7, [x10, #0x40]\n"
+ "ldr q7, [x9, #0x40]\n"
".inst 0x6e86a40d // ummla v13.4s, v0.16b, v6.16b\n"
- "add x26, x26, #0x10\n"
".inst 0x6e86a455 // ummla v21.4s, v2.16b, v6.16b\n"
- "ldr q6, [x10, #0x50]\n"
+ "ldr q6, [x9, #0x50]\n"
".inst 0x6e87a40a // ummla v10.4s, v0.16b, v7.16b\n"
- "add x25, x25, #0x10\n"
".inst 0x6e87a452 // ummla v18.4s, v2.16b, v7.16b\n"
- "ldr q7, [x10, #0x60]\n"
+ "ldr q7, [x9, #0x60]\n"
".inst 0x6e86a40e // ummla v14.4s, v0.16b, v6.16b\n"
- "add x24, x24, #0x10\n"
".inst 0x6e86a456 // ummla v22.4s, v2.16b, v6.16b\n"
- "ldr q6, [x10, #0x70]\n"
+ "ldr q6, [x9, #0x70]\n"
".inst 0x6e87a40b // ummla v11.4s, v0.16b, v7.16b\n"
- "sub x27, x27, #0x10\n"
".inst 0x6e87a453 // ummla v19.4s, v2.16b, v7.16b\n"
- "ldr q7, [x10, #0x80]\n"
+ "ldr q7, [x9, #0x80]\n"
".inst 0x6e86a40f // ummla v15.4s, v0.16b, v6.16b\n"
- "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x6e86a457 // ummla v23.4s, v2.16b, v6.16b\n"
- "ldr q6, [x10, #0x90]\n"
+ "ldr q6, [x9, #0x90]\n"
".inst 0x6e87a428 // ummla v8.4s, v1.16b, v7.16b\n"
- "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x6e87a470 // ummla v16.4s, v3.16b, v7.16b\n"
- "ldr q7, [x10, #0xa0]\n"
+ "ldr q7, [x9, #0xa0]\n"
".inst 0x6e86a42c // ummla v12.4s, v1.16b, v6.16b\n"
- "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x6e86a474 // ummla v20.4s, v3.16b, v6.16b\n"
- "ldr q6, [x10, #0xb0]\n"
+ "ldr q6, [x9, #0xb0]\n"
".inst 0x6e87a429 // ummla v9.4s, v1.16b, v7.16b\n"
".inst 0x6e87a471 // ummla v17.4s, v3.16b, v7.16b\n"
- "ldr q7, [x10, #0xc0]\n"
+ "ldr q7, [x9, #0xc0]\n"
".inst 0x6e86a42d // ummla v13.4s, v1.16b, v6.16b\n"
".inst 0x6e86a475 // ummla v21.4s, v3.16b, v6.16b\n"
- "ldr q6, [x10, #0xd0]\n"
+ "ldr q6, [x9, #0xd0]\n"
".inst 0x6e87a42a // ummla v10.4s, v1.16b, v7.16b\n"
".inst 0x6e87a472 // ummla v18.4s, v3.16b, v7.16b\n"
- "ldr q7, [x10, #0xe0]\n"
+ "ldr q7, [x9, #0xe0]\n"
".inst 0x6e86a42e // ummla v14.4s, v1.16b, v6.16b\n"
".inst 0x6e86a476 // ummla v22.4s, v3.16b, v6.16b\n"
- "ldr q6, [x10, #0xf0]\n"
- "add x10, x10, #0x100\n"
+ "ldr q6, [x9, #0xf0]\n"
+ "add x9, x9, #0x100\n"
".inst 0x6e87a42b // ummla v11.4s, v1.16b, v7.16b\n"
".inst 0x6e87a473 // ummla v19.4s, v3.16b, v7.16b\n"
".inst 0x6e86a42f // ummla v15.4s, v1.16b, v6.16b\n"
".inst 0x6e86a477 // ummla v23.4s, v3.16b, v6.16b\n"
"94:" // Height 3: Multiply loop: Main loop skip
- "cbz x27, 101f\n"
- "cmp x27, #0x8\n"
+ "cbz x26, 101f\n"
+ "cmp x26, #0x8\n"
"blt 96f\n"
"95:" // Height 3: Multiply loop: Odd block loop
- "ldr d1, [x26], #0x8\n"
- "ldr d2, [x25], #0x8\n"
+ "movi v4.16b, #0x0\n"
+ "ldr d1, [x25], #0x8\n"
+ "sub x26, x26, #0x8\n"
+ "ldr d2, [x24], #0x8\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
- "ldr d3, [x24], #0x8\n"
- "ldr q6, [x10, #0x0]\n"
+ "ldr d3, [x23], #0x8\n"
+ "cmp x26, #0x8\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
+ "ldr q6, [x9, #0x0]\n"
+ "ldr q7, [x9, #0x10]\n"
".inst 0x6e86a408 // ummla v8.4s, v0.16b, v6.16b\n"
- "ldr q7, [x10, #0x10]\n"
".inst 0x6e86a450 // ummla v16.4s, v2.16b, v6.16b\n"
- "ldr q6, [x10, #0x20]\n"
+ "ldr q6, [x9, #0x20]\n"
".inst 0x6e87a40c // ummla v12.4s, v0.16b, v7.16b\n"
".inst 0x6e87a454 // ummla v20.4s, v2.16b, v7.16b\n"
- "ldr q7, [x10, #0x30]\n"
+ "ldr q7, [x9, #0x30]\n"
".inst 0x6e86a409 // ummla v9.4s, v0.16b, v6.16b\n"
- "sub x27, x27, #0x8\n"
".inst 0x6e86a451 // ummla v17.4s, v2.16b, v6.16b\n"
- "ldr q6, [x10, #0x40]\n"
+ "ldr q6, [x9, #0x40]\n"
".inst 0x6e87a40d // ummla v13.4s, v0.16b, v7.16b\n"
- "cmp x27, #0x8\n"
".inst 0x6e87a455 // ummla v21.4s, v2.16b, v7.16b\n"
- "ldr q7, [x10, #0x50]\n"
+ "ldr q7, [x9, #0x50]\n"
".inst 0x6e86a40a // ummla v10.4s, v0.16b, v6.16b\n"
".inst 0x6e86a452 // ummla v18.4s, v2.16b, v6.16b\n"
- "ldr q6, [x10, #0x60]\n"
+ "ldr q6, [x9, #0x60]\n"
".inst 0x6e87a40e // ummla v14.4s, v0.16b, v7.16b\n"
".inst 0x6e87a456 // ummla v22.4s, v2.16b, v7.16b\n"
- "ldr q7, [x10, #0x70]\n"
+ "ldr q7, [x9, #0x70]\n"
+ "add x9, x9, #0x80\n"
".inst 0x6e86a40b // ummla v11.4s, v0.16b, v6.16b\n"
- "add x10, x10, #0x80\n"
".inst 0x6e86a453 // ummla v19.4s, v2.16b, v6.16b\n"
".inst 0x6e87a40f // ummla v15.4s, v0.16b, v7.16b\n"
".inst 0x6e87a457 // ummla v23.4s, v2.16b, v7.16b\n"
"bge 95b\n"
+ "cbz x26, 101f\n"
"96:" // Height 3: Multiply loop: Skip odd blocks
- "cbz x27, 101f\n"
- "tbz x27, #2, 98f\n"
- "ldr s1, [x26], #0x4\n"
- "ldr s2, [x25], #0x4\n"
- "ldr s3, [x24], #0x4\n"
- "tbz x27, #1, 97f\n"
- "ld1 { v1.h }[2], [x26], #0x2\n"
- "ld1 { v2.h }[2], [x25], #0x2\n"
- "ld1 { v3.h }[2], [x24], #0x2\n"
- "tbz x27, #0, 100f\n"
- "ld1 { v1.b }[6], [x26]\n"
- "ld1 { v2.b }[6], [x25]\n"
- "ld1 { v3.b }[6], [x24]\n"
+ "tbz x26, #2, 98f\n"
+ "ldr s1, [x25], #0x4\n"
+ "ldr s2, [x24], #0x4\n"
+ "ldr s3, [x23], #0x4\n"
+ "tbz x26, #1, 97f\n"
+ "ld1 { v1.h }[2], [x25], #0x2\n"
+ "ld1 { v2.h }[2], [x24], #0x2\n"
+ "ld1 { v3.h }[2], [x23], #0x2\n"
+ "tbz x26, #0, 100f\n"
+ "ld1 { v1.b }[6], [x25]\n"
+ "ld1 { v2.b }[6], [x24]\n"
+ "ld1 { v3.b }[6], [x23]\n"
"b 100f\n"
"97:" // Height 3: Multiply loop: Ragged operand read: partial_1_4
- "tbz x27, #0, 100f\n"
- "ld1 { v1.b }[4], [x26]\n"
- "ld1 { v2.b }[4], [x25]\n"
- "ld1 { v3.b }[4], [x24]\n"
+ "tbz x26, #0, 100f\n"
+ "ld1 { v1.b }[4], [x25]\n"
+ "ld1 { v2.b }[4], [x24]\n"
+ "ld1 { v3.b }[4], [x23]\n"
"b 100f\n"
"98:" // Height 3: Multiply loop: Ragged operand read: partial_2_0
- "tbz x27, #1, 99f\n"
- "ldr h1, [x26], #0x2\n"
- "ldr h2, [x25], #0x2\n"
- "ldr h3, [x24], #0x2\n"
- "tbz x27, #0, 100f\n"
- "ld1 { v1.b }[2], [x26]\n"
- "ld1 { v2.b }[2], [x25]\n"
- "ld1 { v3.b }[2], [x24]\n"
+ "tbz x26, #1, 99f\n"
+ "ldr h1, [x25], #0x2\n"
+ "ldr h2, [x24], #0x2\n"
+ "ldr h3, [x23], #0x2\n"
+ "tbz x26, #0, 100f\n"
+ "ld1 { v1.b }[2], [x25]\n"
+ "ld1 { v2.b }[2], [x24]\n"
+ "ld1 { v3.b }[2], [x23]\n"
"b 100f\n"
"99:" // Height 3: Multiply loop: Ragged operand read: partial_1_0
- "ldr b1, [x26, #0x0]\n"
- "ldr b2, [x25, #0x0]\n"
- "ldr b3, [x24, #0x0]\n"
+ "ldr b1, [x25, #0x0]\n"
+ "ldr b2, [x24, #0x0]\n"
+ "ldr b3, [x23, #0x0]\n"
"100:" // Height 3: Multiply loop: Ragged operand read: Done
- "ldr q7, [x10, #0x0]\n"
- "ldr q6, [x10, #0x10]\n"
+ "movi v4.16b, #0x0\n"
+ "ldr q7, [x9, #0x0]\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
+ "ldr q6, [x9, #0x10]\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
".inst 0x6e87a408 // ummla v8.4s, v0.16b, v7.16b\n"
".inst 0x6e87a450 // ummla v16.4s, v2.16b, v7.16b\n"
- "ldr q7, [x10, #0x20]\n"
+ "ldr q7, [x9, #0x20]\n"
".inst 0x6e86a40c // ummla v12.4s, v0.16b, v6.16b\n"
".inst 0x6e86a454 // ummla v20.4s, v2.16b, v6.16b\n"
- "ldr q6, [x10, #0x30]\n"
+ "ldr q6, [x9, #0x30]\n"
".inst 0x6e87a409 // ummla v9.4s, v0.16b, v7.16b\n"
".inst 0x6e87a451 // ummla v17.4s, v2.16b, v7.16b\n"
- "ldr q7, [x10, #0x40]\n"
+ "ldr q7, [x9, #0x40]\n"
".inst 0x6e86a40d // ummla v13.4s, v0.16b, v6.16b\n"
".inst 0x6e86a455 // ummla v21.4s, v2.16b, v6.16b\n"
- "ldr q6, [x10, #0x50]\n"
+ "ldr q6, [x9, #0x50]\n"
".inst 0x6e87a40a // ummla v10.4s, v0.16b, v7.16b\n"
".inst 0x6e87a452 // ummla v18.4s, v2.16b, v7.16b\n"
- "ldr q7, [x10, #0x60]\n"
+ "ldr q7, [x9, #0x60]\n"
".inst 0x6e86a40e // ummla v14.4s, v0.16b, v6.16b\n"
".inst 0x6e86a456 // ummla v22.4s, v2.16b, v6.16b\n"
- "ldr q6, [x10, #0x70]\n"
- "add x10, x10, #0x80\n"
+ "ldr q6, [x9, #0x70]\n"
+ "add x9, x9, #0x80\n"
".inst 0x6e87a40b // ummla v11.4s, v0.16b, v7.16b\n"
".inst 0x6e87a453 // ummla v19.4s, v2.16b, v7.16b\n"
".inst 0x6e86a40f // ummla v15.4s, v0.16b, v6.16b\n"
".inst 0x6e86a457 // ummla v23.4s, v2.16b, v6.16b\n"
"101:" // Height 3: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 89b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
"uzp1 v7.2d, v8.2d, v12.2d\n"
- "cmp x11, #0x10\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp2 v8.2d, v8.2d, v12.2d\n"
+ "prfm pstl1keep, [x28, #0x0]\n"
+ "cmp x10, #0x10\n"
"uzp1 v12.2d, v9.2d, v13.2d\n"
- "prfm pstl1keep, [x9, #0x0]\n"
+ "add x23, x28, x19, LSL #2\n"
"uzp2 v9.2d, v9.2d, v13.2d\n"
- "uzp1 v13.2d, v10.2d, v14.2d\n"
- "prfm pstl1keep, [x24, #0x0]\n"
"prfm pstl1keep, [x23, #0x0]\n"
+ "uzp1 v13.2d, v10.2d, v14.2d\n"
+ "add x22, x23, x19, LSL #2\n"
"uzp2 v10.2d, v10.2d, v14.2d\n"
+ "prfm pstl1keep, [x22, #0x0]\n"
"uzp1 v14.2d, v11.2d, v15.2d\n"
"uzp2 v11.2d, v11.2d, v15.2d\n"
"uzp1 v16.2d, v16.2d, v20.2d\n"
@@ -1231,234 +1239,234 @@ void a64_hybrid_u8u32_mmla_6x16 (
"uzp1 v18.2d, v18.2d, v22.2d\n"
"uzp1 v19.2d, v19.2d, v23.2d\n"
"bge 110f\n"
- "tbz x11, #3, 105f\n"
- "st1 { v7.4s }, [x9], #0x10\n"
- "st1 { v12.4s }, [x9], #0x10\n"
- "st1 { v8.4s }, [x24], #0x10\n"
- "st1 { v9.4s }, [x24], #0x10\n"
- "st1 { v16.4s }, [x23], #0x10\n"
- "st1 { v17.4s }, [x23], #0x10\n"
- "tbz x11, #2, 103f\n"
- "st1 { v13.4s }, [x9], #0x10\n"
- "st1 { v10.4s }, [x24], #0x10\n"
- "st1 { v18.4s }, [x23], #0x10\n"
- "tbz x11, #1, 102f\n"
- "str d14, [x9], #0x8\n"
- "str d11, [x24], #0x8\n"
- "str d19, [x23], #0x8\n"
- "tbz x11, #0, 109f\n"
- "st1 { v14.s }[2], [x9]\n"
- "st1 { v11.s }[2], [x24]\n"
- "st1 { v19.s }[2], [x23]\n"
+ "tbz x10, #3, 105f\n"
+ "st1 { v7.4s }, [x28], #0x10\n"
+ "st1 { v12.4s }, [x28], #0x10\n"
+ "st1 { v8.4s }, [x23], #0x10\n"
+ "st1 { v9.4s }, [x23], #0x10\n"
+ "st1 { v16.4s }, [x22], #0x10\n"
+ "st1 { v17.4s }, [x22], #0x10\n"
+ "tbz x10, #2, 103f\n"
+ "st1 { v13.4s }, [x28], #0x10\n"
+ "st1 { v10.4s }, [x23], #0x10\n"
+ "st1 { v18.4s }, [x22], #0x10\n"
+ "tbz x10, #1, 102f\n"
+ "str d14, [x28], #0x8\n"
+ "str d11, [x23], #0x8\n"
+ "str d19, [x22], #0x8\n"
+ "tbz x10, #0, 109f\n"
+ "st1 { v14.s }[2], [x28]\n"
+ "st1 { v11.s }[2], [x23]\n"
+ "st1 { v19.s }[2], [x22]\n"
"b 109f\n"
"102:" // Height 3: Partial direct writeback: partial_1_12
- "tbz x11, #0, 109f\n"
- "str s14, [x9, #0x0]\n"
- "str s11, [x24, #0x0]\n"
- "str s19, [x23, #0x0]\n"
+ "tbz x10, #0, 109f\n"
+ "str s14, [x28, #0x0]\n"
+ "str s11, [x23, #0x0]\n"
+ "str s19, [x22, #0x0]\n"
"b 109f\n"
"103:" // Height 3: Partial direct writeback: partial_2_8
- "tbz x11, #1, 104f\n"
- "str d13, [x9], #0x8\n"
- "str d10, [x24], #0x8\n"
- "str d18, [x23], #0x8\n"
- "tbz x11, #0, 109f\n"
- "st1 { v13.s }[2], [x9]\n"
- "st1 { v10.s }[2], [x24]\n"
- "st1 { v18.s }[2], [x23]\n"
+ "tbz x10, #1, 104f\n"
+ "str d13, [x28], #0x8\n"
+ "str d10, [x23], #0x8\n"
+ "str d18, [x22], #0x8\n"
+ "tbz x10, #0, 109f\n"
+ "st1 { v13.s }[2], [x28]\n"
+ "st1 { v10.s }[2], [x23]\n"
+ "st1 { v18.s }[2], [x22]\n"
"b 109f\n"
"104:" // Height 3: Partial direct writeback: partial_1_8
- "tbz x11, #0, 109f\n"
- "str s13, [x9, #0x0]\n"
- "str s10, [x24, #0x0]\n"
- "str s18, [x23, #0x0]\n"
+ "tbz x10, #0, 109f\n"
+ "str s13, [x28, #0x0]\n"
+ "str s10, [x23, #0x0]\n"
+ "str s18, [x22, #0x0]\n"
"b 109f\n"
"105:" // Height 3: Partial direct writeback: partial_4_0
- "tbz x11, #2, 107f\n"
- "st1 { v7.4s }, [x9], #0x10\n"
- "st1 { v8.4s }, [x24], #0x10\n"
- "st1 { v16.4s }, [x23], #0x10\n"
- "tbz x11, #1, 106f\n"
- "str d12, [x9], #0x8\n"
- "str d9, [x24], #0x8\n"
- "str d17, [x23], #0x8\n"
- "tbz x11, #0, 109f\n"
- "st1 { v12.s }[2], [x9]\n"
- "st1 { v9.s }[2], [x24]\n"
- "st1 { v17.s }[2], [x23]\n"
+ "tbz x10, #2, 107f\n"
+ "st1 { v7.4s }, [x28], #0x10\n"
+ "st1 { v8.4s }, [x23], #0x10\n"
+ "st1 { v16.4s }, [x22], #0x10\n"
+ "tbz x10, #1, 106f\n"
+ "str d12, [x28], #0x8\n"
+ "str d9, [x23], #0x8\n"
+ "str d17, [x22], #0x8\n"
+ "tbz x10, #0, 109f\n"
+ "st1 { v12.s }[2], [x28]\n"
+ "st1 { v9.s }[2], [x23]\n"
+ "st1 { v17.s }[2], [x22]\n"
"b 109f\n"
"106:" // Height 3: Partial direct writeback: partial_1_4
- "tbz x11, #0, 109f\n"
- "str s12, [x9, #0x0]\n"
- "str s9, [x24, #0x0]\n"
- "str s17, [x23, #0x0]\n"
+ "tbz x10, #0, 109f\n"
+ "str s12, [x28, #0x0]\n"
+ "str s9, [x23, #0x0]\n"
+ "str s17, [x22, #0x0]\n"
"b 109f\n"
"107:" // Height 3: Partial direct writeback: partial_2_0
- "tbz x11, #1, 108f\n"
- "str d7, [x9], #0x8\n"
- "str d8, [x24], #0x8\n"
- "str d16, [x23], #0x8\n"
- "tbz x11, #0, 109f\n"
- "st1 { v7.s }[2], [x9]\n"
- "st1 { v8.s }[2], [x24]\n"
- "st1 { v16.s }[2], [x23]\n"
+ "tbz x10, #1, 108f\n"
+ "str d7, [x28], #0x8\n"
+ "str d8, [x23], #0x8\n"
+ "str d16, [x22], #0x8\n"
+ "tbz x10, #0, 109f\n"
+ "st1 { v7.s }[2], [x28]\n"
+ "st1 { v8.s }[2], [x23]\n"
+ "st1 { v16.s }[2], [x22]\n"
"b 109f\n"
"108:" // Height 3: Partial direct writeback: partial_1_0
- "str s7, [x9, #0x0]\n"
- "str s8, [x24, #0x0]\n"
- "str s16, [x23, #0x0]\n"
+ "str s7, [x28, #0x0]\n"
+ "str s8, [x23, #0x0]\n"
+ "str s16, [x22, #0x0]\n"
"109:" // Height 3: Partial direct writeback: Done
"b 111f\n"
"110:" // Height 3: Full writeback
- "str q7, [x9, #0x0]\n"
- "str q12, [x9, #0x10]\n"
- "str q13, [x9, #0x20]\n"
- "str q14, [x9, #0x30]\n"
- "add x9, x9, #0x40\n"
- "str q8, [x24, #0x0]\n"
- "str q9, [x24, #0x10]\n"
- "str q10, [x24, #0x20]\n"
- "str q11, [x24, #0x30]\n"
- "str q16, [x23, #0x0]\n"
- "str q17, [x23, #0x10]\n"
- "str q18, [x23, #0x20]\n"
- "str q19, [x23, #0x30]\n"
+ "str q7, [x28, #0x0]\n"
+ "str q12, [x28, #0x10]\n"
+ "str q13, [x28, #0x20]\n"
+ "str q14, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
+ "str q8, [x23, #0x0]\n"
+ "str q9, [x23, #0x10]\n"
+ "str q10, [x23, #0x20]\n"
+ "str q11, [x23, #0x30]\n"
+ "str q16, [x22, #0x0]\n"
+ "str q17, [x22, #0x10]\n"
+ "str q18, [x22, #0x20]\n"
+ "str q19, [x22, #0x30]\n"
"111:" // Height 3: Writeback done
- "subs x11, x11, #0x10\n"
+ "subs x10, x10, #0x10\n"
"bgt 76b\n"
"b 224f\n"
"112:" // Height 4
- "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x28, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"113:" // Height 4: Column loop
"tbz %x[flags], #0, 124f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "cmp x11, #0x10\n"
- "add x22, x23, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "cmp x10, #0x10\n"
+ "add x23, x28, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
"bge 122f\n"
- "tbz x11, #3, 117f\n"
- "ld1 { v9.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x24], #0x10\n"
- "ld1 { v17.4s }, [x23], #0x10\n"
- "ld1 { v20.4s }, [x22], #0x10\n"
- "ld1 { v10.4s }, [x9], #0x10\n"
- "ld1 { v13.4s }, [x24], #0x10\n"
- "ld1 { v18.4s }, [x23], #0x10\n"
- "ld1 { v21.4s }, [x22], #0x10\n"
- "tbz x11, #2, 115f\n"
- "ld1 { v11.4s }, [x9], #0x10\n"
- "ld1 { v14.4s }, [x24], #0x10\n"
- "ld1 { v19.4s }, [x23], #0x10\n"
- "ld1 { v22.4s }, [x22], #0x10\n"
- "tbz x11, #1, 114f\n"
- "ldr d16, [x9], #0x8\n"
- "ldr d15, [x24], #0x8\n"
- "mov x25, #0x38\n"
- "ldr d24, [x23], #0x8\n"
- "ldr d23, [x22], #0x8\n"
- "tbz x11, #0, 121f\n"
- "ld1 { v16.s }[2], [x9]\n"
- "ld1 { v15.s }[2], [x24]\n"
- "ld1 { v24.s }[2], [x23]\n"
- "ld1 { v23.s }[2], [x22]\n"
+ "tbz x10, #3, 117f\n"
+ "ld1 { v9.4s }, [x28], #0x10\n"
+ "ld1 { v12.4s }, [x23], #0x10\n"
+ "ld1 { v17.4s }, [x22], #0x10\n"
+ "ld1 { v20.4s }, [x21], #0x10\n"
+ "ld1 { v10.4s }, [x28], #0x10\n"
+ "ld1 { v13.4s }, [x23], #0x10\n"
+ "ld1 { v18.4s }, [x22], #0x10\n"
+ "ld1 { v21.4s }, [x21], #0x10\n"
+ "tbz x10, #2, 115f\n"
+ "ld1 { v11.4s }, [x28], #0x10\n"
+ "ld1 { v14.4s }, [x23], #0x10\n"
+ "ld1 { v19.4s }, [x22], #0x10\n"
+ "ld1 { v22.4s }, [x21], #0x10\n"
+ "tbz x10, #1, 114f\n"
+ "mov x24, #0x38\n"
+ "ldr d16, [x28], #0x8\n"
+ "ldr d15, [x23], #0x8\n"
+ "ldr d24, [x22], #0x8\n"
+ "ldr d23, [x21], #0x8\n"
+ "tbz x10, #0, 121f\n"
+ "ld1 { v16.s }[2], [x28]\n"
+ "ld1 { v15.s }[2], [x23]\n"
+ "ld1 { v24.s }[2], [x22]\n"
+ "ld1 { v23.s }[2], [x21]\n"
"b 121f\n"
"114:" // Height 4: Partial accumulate: partial_1_12
- "mov x25, #0x30\n"
- "tbz x11, #0, 121f\n"
- "ldr s16, [x9, #0x0]\n"
- "ldr s15, [x24, #0x0]\n"
- "ldr s24, [x23, #0x0]\n"
- "ldr s23, [x22, #0x0]\n"
+ "mov x24, #0x30\n"
+ "tbz x10, #0, 121f\n"
+ "ldr s16, [x28, #0x0]\n"
+ "ldr s15, [x23, #0x0]\n"
+ "ldr s24, [x22, #0x0]\n"
+ "ldr s23, [x21, #0x0]\n"
"b 121f\n"
"115:" // Height 4: Partial accumulate: partial_2_8
- "tbz x11, #1, 116f\n"
- "ldr d11, [x9], #0x8\n"
- "ldr d14, [x24], #0x8\n"
- "mov x25, #0x28\n"
- "ldr d19, [x23], #0x8\n"
- "ldr d22, [x22], #0x8\n"
- "tbz x11, #0, 121f\n"
- "ld1 { v11.s }[2], [x9]\n"
- "ld1 { v14.s }[2], [x24]\n"
- "ld1 { v19.s }[2], [x23]\n"
- "ld1 { v22.s }[2], [x22]\n"
+ "tbz x10, #1, 116f\n"
+ "ldr d11, [x28], #0x8\n"
+ "ldr d14, [x23], #0x8\n"
+ "mov x24, #0x28\n"
+ "ldr d19, [x22], #0x8\n"
+ "ldr d22, [x21], #0x8\n"
+ "tbz x10, #0, 121f\n"
+ "ld1 { v11.s }[2], [x28]\n"
+ "ld1 { v14.s }[2], [x23]\n"
+ "ld1 { v19.s }[2], [x22]\n"
+ "ld1 { v22.s }[2], [x21]\n"
"b 121f\n"
"116:" // Height 4: Partial accumulate: partial_1_8
- "mov x25, #0x20\n"
- "tbz x11, #0, 121f\n"
- "ldr s11, [x9, #0x0]\n"
- "ldr s14, [x24, #0x0]\n"
- "ldr s19, [x23, #0x0]\n"
- "ldr s22, [x22, #0x0]\n"
+ "mov x24, #0x20\n"
+ "tbz x10, #0, 121f\n"
+ "ldr s11, [x28, #0x0]\n"
+ "ldr s14, [x23, #0x0]\n"
+ "ldr s19, [x22, #0x0]\n"
+ "ldr s22, [x21, #0x0]\n"
"b 121f\n"
"117:" // Height 4: Partial accumulate: partial_4_0
- "tbz x11, #2, 119f\n"
- "ld1 { v9.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x24], #0x10\n"
- "ld1 { v17.4s }, [x23], #0x10\n"
- "ld1 { v20.4s }, [x22], #0x10\n"
- "tbz x11, #1, 118f\n"
- "ldr d10, [x9], #0x8\n"
- "ldr d13, [x24], #0x8\n"
- "mov x25, #0x18\n"
- "ldr d18, [x23], #0x8\n"
- "ldr d21, [x22], #0x8\n"
- "tbz x11, #0, 121f\n"
- "ld1 { v10.s }[2], [x9]\n"
- "ld1 { v13.s }[2], [x24]\n"
- "ld1 { v18.s }[2], [x23]\n"
- "ld1 { v21.s }[2], [x22]\n"
+ "tbz x10, #2, 119f\n"
+ "ld1 { v9.4s }, [x28], #0x10\n"
+ "ld1 { v12.4s }, [x23], #0x10\n"
+ "ld1 { v17.4s }, [x22], #0x10\n"
+ "ld1 { v20.4s }, [x21], #0x10\n"
+ "tbz x10, #1, 118f\n"
+ "mov x24, #0x18\n"
+ "ldr d10, [x28], #0x8\n"
+ "ldr d13, [x23], #0x8\n"
+ "ldr d18, [x22], #0x8\n"
+ "ldr d21, [x21], #0x8\n"
+ "tbz x10, #0, 121f\n"
+ "ld1 { v10.s }[2], [x28]\n"
+ "ld1 { v13.s }[2], [x23]\n"
+ "ld1 { v18.s }[2], [x22]\n"
+ "ld1 { v21.s }[2], [x21]\n"
"b 121f\n"
"118:" // Height 4: Partial accumulate: partial_1_4
- "mov x25, #0x10\n"
- "tbz x11, #0, 121f\n"
- "ldr s10, [x9, #0x0]\n"
- "ldr s13, [x24, #0x0]\n"
- "ldr s18, [x23, #0x0]\n"
- "ldr s21, [x22, #0x0]\n"
+ "mov x24, #0x10\n"
+ "tbz x10, #0, 121f\n"
+ "ldr s10, [x28, #0x0]\n"
+ "ldr s13, [x23, #0x0]\n"
+ "ldr s18, [x22, #0x0]\n"
+ "ldr s21, [x21, #0x0]\n"
"b 121f\n"
"119:" // Height 4: Partial accumulate: partial_2_0
- "tbz x11, #1, 120f\n"
- "ldr d9, [x9], #0x8\n"
- "ldr d12, [x24], #0x8\n"
- "mov x25, #0x8\n"
- "ldr d17, [x23], #0x8\n"
- "ldr d20, [x22], #0x8\n"
- "tbz x11, #0, 121f\n"
- "ld1 { v9.s }[2], [x9]\n"
- "ld1 { v12.s }[2], [x24]\n"
- "ld1 { v17.s }[2], [x23]\n"
- "ld1 { v20.s }[2], [x22]\n"
+ "tbz x10, #1, 120f\n"
+ "ldr d9, [x28], #0x8\n"
+ "ldr d12, [x23], #0x8\n"
+ "mov x24, #0x8\n"
+ "ldr d17, [x22], #0x8\n"
+ "ldr d20, [x21], #0x8\n"
+ "tbz x10, #0, 121f\n"
+ "ld1 { v9.s }[2], [x28]\n"
+ "ld1 { v12.s }[2], [x23]\n"
+ "ld1 { v17.s }[2], [x22]\n"
+ "ld1 { v20.s }[2], [x21]\n"
"b 121f\n"
"120:" // Height 4: Partial accumulate: partial_1_0
- "ldr s9, [x9, #0x0]\n"
- "ldr s12, [x24, #0x0]\n"
- "mov x25, #0x0\n"
- "ldr s17, [x23, #0x0]\n"
- "ldr s20, [x22, #0x0]\n"
+ "ldr s9, [x28, #0x0]\n"
+ "mov x24, #0x0\n"
+ "ldr s12, [x23, #0x0]\n"
+ "ldr s17, [x22, #0x0]\n"
+ "ldr s20, [x21, #0x0]\n"
"121:" // Height 4: Partial accumulate: Done
- "sub x9, x9, x25\n"
+ "sub x28, x28, x24\n"
"b 123f\n"
"122:" // Height 4: full accumulate
- "ldr q9, [x9, #0x0]\n"
- "ldr q10, [x9, #0x10]\n"
- "ldr q11, [x9, #0x20]\n"
- "ldr q16, [x9, #0x30]\n"
- "ldr q12, [x24, #0x0]\n"
- "ldr q13, [x24, #0x10]\n"
- "ldr q14, [x24, #0x20]\n"
- "ldr q15, [x24, #0x30]\n"
- "ldr q17, [x23, #0x0]\n"
- "ldr q18, [x23, #0x10]\n"
- "ldr q19, [x23, #0x20]\n"
- "ldr q24, [x23, #0x30]\n"
- "ldr q20, [x22, #0x0]\n"
- "ldr q21, [x22, #0x10]\n"
- "ldr q22, [x22, #0x20]\n"
- "ldr q23, [x22, #0x30]\n"
+ "ldr q9, [x28, #0x0]\n"
+ "ldr q10, [x28, #0x10]\n"
+ "ldr q11, [x28, #0x20]\n"
+ "ldr q16, [x28, #0x30]\n"
+ "ldr q12, [x23, #0x0]\n"
+ "ldr q13, [x23, #0x10]\n"
+ "ldr q14, [x23, #0x20]\n"
+ "ldr q15, [x23, #0x30]\n"
+ "ldr q17, [x22, #0x0]\n"
+ "ldr q18, [x22, #0x10]\n"
+ "ldr q19, [x22, #0x20]\n"
+ "ldr q24, [x22, #0x30]\n"
+ "ldr q20, [x21, #0x0]\n"
+ "ldr q21, [x21, #0x10]\n"
+ "ldr q22, [x21, #0x20]\n"
+ "ldr q23, [x21, #0x30]\n"
"123:" // Height 4: MMLA fixup
"zip1 v8.2d, v9.2d, v12.2d\n"
"zip2 v12.2d, v9.2d, v12.2d\n"
@@ -1495,301 +1503,301 @@ void a64_hybrid_u8u32_mmla_6x16 (
"movi v22.4s, #0x0\n"
"movi v23.4s, #0x0\n"
"125:" // Height 4: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"126:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 127f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "cbnz x28, 128f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20\n"
- "add x25, x25, x20\n"
- "add x24, x24, x20\n"
- "add x23, x23, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "cbnz x27, 128f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
+ "add x24, x24, x19\n"
+ "add x23, x23, x19\n"
+ "add x22, x22, x19\n"
"b 128f\n"
"127:" // Height 4: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20\n"
- "add x24, x25, x20\n"
- "add x23, x24, x20\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19\n"
+ "add x23, x24, x19\n"
+ "add x22, x23, x19\n"
"128:" // Height 4: input setup done
- "cmp x27, #0x10\n"
+ "cmp x26, #0x10\n"
"blt 131f\n"
- "ldr q1, [x26, #0x0]\n"
- "ldr q2, [x25, #0x0]\n"
- "cmp x27, #0x20\n"
- "ldr q3, [x24, #0x0]\n"
- "ldr q4, [x23, #0x0]\n"
- "ldr q7, [x10, #0x0]\n"
- "ldr q6, [x10, #0x10]\n"
+ "ldr q1, [x25, #0x0]\n"
+ "ldr q2, [x24, #0x0]\n"
+ "cmp x26, #0x20\n"
"blt 130f\n"
"129:" // Height 4: Multiply loop: Main loop head
"trn1 v0.2d, v1.2d, v2.2d\n"
+ "ldr q3, [x23, #0x0]\n"
+ "add x25, x25, #0x10\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
- ".inst 0x6e87a408 // ummla v8.4s, v0.16b, v7.16b\n"
- "sub x27, x27, #0x10\n"
+ "ldr q4, [x22, #0x0]\n"
+ "add x24, x24, #0x10\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
+ "ldr q7, [x9, #0x0]\n"
+ "add x23, x23, #0x10\n"
+ "trn2 v3.2d, v3.2d, v4.2d\n"
+ "ldr q6, [x9, #0x10]\n"
+ "add x22, x22, #0x10\n"
+ ".inst 0x6e87a408 // ummla v8.4s, v0.16b, v7.16b\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "sub x26, x26, #0x10\n"
".inst 0x6e87a450 // ummla v16.4s, v2.16b, v7.16b\n"
- "ldr q7, [x10, #0x20]\n"
+ "ldr q7, [x9, #0x20]\n"
+ "cmp x26, #0x20\n"
".inst 0x6e86a40c // ummla v12.4s, v0.16b, v6.16b\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x6e86a454 // ummla v20.4s, v2.16b, v6.16b\n"
- "ldr q6, [x10, #0x30]\n"
+ "ldr q6, [x9, #0x30]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x6e87a409 // ummla v9.4s, v0.16b, v7.16b\n"
- "trn2 v3.2d, v3.2d, v4.2d\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x6e87a451 // ummla v17.4s, v2.16b, v7.16b\n"
- "ldr q7, [x10, #0x40]\n"
+ "ldr q7, [x9, #0x40]\n"
".inst 0x6e86a40d // ummla v13.4s, v0.16b, v6.16b\n"
- "add x26, x26, #0x10\n"
".inst 0x6e86a455 // ummla v21.4s, v2.16b, v6.16b\n"
- "ldr q6, [x10, #0x50]\n"
+ "ldr q6, [x9, #0x50]\n"
".inst 0x6e87a40a // ummla v10.4s, v0.16b, v7.16b\n"
- "add x25, x25, #0x10\n"
".inst 0x6e87a452 // ummla v18.4s, v2.16b, v7.16b\n"
- "ldr q7, [x10, #0x60]\n"
+ "ldr q7, [x9, #0x60]\n"
".inst 0x6e86a40e // ummla v14.4s, v0.16b, v6.16b\n"
- "add x24, x24, #0x10\n"
".inst 0x6e86a456 // ummla v22.4s, v2.16b, v6.16b\n"
- "ldr q6, [x10, #0x70]\n"
+ "ldr q6, [x9, #0x70]\n"
".inst 0x6e87a40b // ummla v11.4s, v0.16b, v7.16b\n"
- "add x23, x23, #0x10\n"
- "ldr q4, [x23, #0x0]\n"
".inst 0x6e87a453 // ummla v19.4s, v2.16b, v7.16b\n"
- "ldr q7, [x10, #0x80]\n"
+ "ldr q7, [x9, #0x80]\n"
".inst 0x6e86a40f // ummla v15.4s, v0.16b, v6.16b\n"
".inst 0x6e86a457 // ummla v23.4s, v2.16b, v6.16b\n"
- "ldr q6, [x10, #0x90]\n"
- "ldr q2, [x25, #0x0]\n"
+ "ldr q6, [x9, #0x90]\n"
+ "ldr q2, [x24, #0x0]\n"
".inst 0x6e87a428 // ummla v8.4s, v1.16b, v7.16b\n"
".inst 0x6e87a470 // ummla v16.4s, v3.16b, v7.16b\n"
- "ldr q7, [x10, #0xa0]\n"
+ "ldr q7, [x9, #0xa0]\n"
".inst 0x6e86a42c // ummla v12.4s, v1.16b, v6.16b\n"
- "cmp x27, #0x20\n"
".inst 0x6e86a474 // ummla v20.4s, v3.16b, v6.16b\n"
- "ldr q6, [x10, #0xb0]\n"
+ "ldr q6, [x9, #0xb0]\n"
".inst 0x6e87a429 // ummla v9.4s, v1.16b, v7.16b\n"
- "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x6e87a471 // ummla v17.4s, v3.16b, v7.16b\n"
- "ldr q7, [x10, #0xc0]\n"
+ "ldr q7, [x9, #0xc0]\n"
".inst 0x6e86a42d // ummla v13.4s, v1.16b, v6.16b\n"
- "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x6e86a475 // ummla v21.4s, v3.16b, v6.16b\n"
- "ldr q6, [x10, #0xd0]\n"
+ "ldr q6, [x9, #0xd0]\n"
".inst 0x6e87a42a // ummla v10.4s, v1.16b, v7.16b\n"
- "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x6e87a472 // ummla v18.4s, v3.16b, v7.16b\n"
- "ldr q7, [x10, #0xe0]\n"
+ "ldr q7, [x9, #0xe0]\n"
".inst 0x6e86a42e // ummla v14.4s, v1.16b, v6.16b\n"
- "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x6e86a476 // ummla v22.4s, v3.16b, v6.16b\n"
- "ldr q6, [x10, #0xf0]\n"
- "add x10, x10, #0x100\n"
+ "ldr q6, [x9, #0xf0]\n"
+ "add x9, x9, #0x100\n"
".inst 0x6e87a42b // ummla v11.4s, v1.16b, v7.16b\n"
".inst 0x6e87a473 // ummla v19.4s, v3.16b, v7.16b\n"
- "ldr q7, [x10, #0x0]\n"
".inst 0x6e86a42f // ummla v15.4s, v1.16b, v6.16b\n"
- "ldr q1, [x26, #0x0]\n"
+ "ldr q1, [x25, #0x0]\n"
".inst 0x6e86a477 // ummla v23.4s, v3.16b, v6.16b\n"
- "ldr q3, [x24, #0x0]\n"
- "ldr q6, [x10, #0x10]\n"
"bge 129b\n"
"130:" // Height 4: Multiply loop: Single iteration only
"trn1 v0.2d, v1.2d, v2.2d\n"
+ "ldr q3, [x23, #0x0]\n"
+ "sub x26, x26, #0x10\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
- ".inst 0x6e87a408 // ummla v8.4s, v0.16b, v7.16b\n"
- "add x26, x26, #0x10\n"
+ "ldr q4, [x22, #0x0]\n"
+ "add x25, x25, #0x10\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
+ "ldr q7, [x9, #0x0]\n"
+ "add x24, x24, #0x10\n"
+ "trn2 v3.2d, v3.2d, v4.2d\n"
+ "ldr q6, [x9, #0x10]\n"
+ "add x23, x23, #0x10\n"
+ ".inst 0x6e87a408 // ummla v8.4s, v0.16b, v7.16b\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "add x22, x22, #0x10\n"
".inst 0x6e87a450 // ummla v16.4s, v2.16b, v7.16b\n"
- "ldr q7, [x10, #0x20]\n"
+ "ldr q7, [x9, #0x20]\n"
".inst 0x6e86a40c // ummla v12.4s, v0.16b, v6.16b\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x6e86a454 // ummla v20.4s, v2.16b, v6.16b\n"
- "ldr q6, [x10, #0x30]\n"
+ "ldr q6, [x9, #0x30]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x6e87a409 // ummla v9.4s, v0.16b, v7.16b\n"
- "trn2 v3.2d, v3.2d, v4.2d\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x6e87a451 // ummla v17.4s, v2.16b, v7.16b\n"
- "ldr q7, [x10, #0x40]\n"
+ "ldr q7, [x9, #0x40]\n"
".inst 0x6e86a40d // ummla v13.4s, v0.16b, v6.16b\n"
- "add x25, x25, #0x10\n"
".inst 0x6e86a455 // ummla v21.4s, v2.16b, v6.16b\n"
- "ldr q6, [x10, #0x50]\n"
+ "ldr q6, [x9, #0x50]\n"
".inst 0x6e87a40a // ummla v10.4s, v0.16b, v7.16b\n"
- "add x24, x24, #0x10\n"
".inst 0x6e87a452 // ummla v18.4s, v2.16b, v7.16b\n"
- "ldr q7, [x10, #0x60]\n"
+ "ldr q7, [x9, #0x60]\n"
".inst 0x6e86a40e // ummla v14.4s, v0.16b, v6.16b\n"
- "add x23, x23, #0x10\n"
".inst 0x6e86a456 // ummla v22.4s, v2.16b, v6.16b\n"
- "ldr q6, [x10, #0x70]\n"
+ "ldr q6, [x9, #0x70]\n"
".inst 0x6e87a40b // ummla v11.4s, v0.16b, v7.16b\n"
- "sub x27, x27, #0x10\n"
".inst 0x6e87a453 // ummla v19.4s, v2.16b, v7.16b\n"
- "ldr q7, [x10, #0x80]\n"
+ "ldr q7, [x9, #0x80]\n"
".inst 0x6e86a40f // ummla v15.4s, v0.16b, v6.16b\n"
- "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x6e86a457 // ummla v23.4s, v2.16b, v6.16b\n"
- "ldr q6, [x10, #0x90]\n"
+ "ldr q6, [x9, #0x90]\n"
".inst 0x6e87a428 // ummla v8.4s, v1.16b, v7.16b\n"
- "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x6e87a470 // ummla v16.4s, v3.16b, v7.16b\n"
- "ldr q7, [x10, #0xa0]\n"
+ "ldr q7, [x9, #0xa0]\n"
".inst 0x6e86a42c // ummla v12.4s, v1.16b, v6.16b\n"
- "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x6e86a474 // ummla v20.4s, v3.16b, v6.16b\n"
- "ldr q6, [x10, #0xb0]\n"
+ "ldr q6, [x9, #0xb0]\n"
".inst 0x6e87a429 // ummla v9.4s, v1.16b, v7.16b\n"
- "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x6e87a471 // ummla v17.4s, v3.16b, v7.16b\n"
- "ldr q7, [x10, #0xc0]\n"
+ "ldr q7, [x9, #0xc0]\n"
".inst 0x6e86a42d // ummla v13.4s, v1.16b, v6.16b\n"
".inst 0x6e86a475 // ummla v21.4s, v3.16b, v6.16b\n"
- "ldr q6, [x10, #0xd0]\n"
+ "ldr q6, [x9, #0xd0]\n"
".inst 0x6e87a42a // ummla v10.4s, v1.16b, v7.16b\n"
".inst 0x6e87a472 // ummla v18.4s, v3.16b, v7.16b\n"
- "ldr q7, [x10, #0xe0]\n"
+ "ldr q7, [x9, #0xe0]\n"
".inst 0x6e86a42e // ummla v14.4s, v1.16b, v6.16b\n"
".inst 0x6e86a476 // ummla v22.4s, v3.16b, v6.16b\n"
- "ldr q6, [x10, #0xf0]\n"
- "add x10, x10, #0x100\n"
+ "ldr q6, [x9, #0xf0]\n"
+ "add x9, x9, #0x100\n"
".inst 0x6e87a42b // ummla v11.4s, v1.16b, v7.16b\n"
".inst 0x6e87a473 // ummla v19.4s, v3.16b, v7.16b\n"
".inst 0x6e86a42f // ummla v15.4s, v1.16b, v6.16b\n"
".inst 0x6e86a477 // ummla v23.4s, v3.16b, v6.16b\n"
"131:" // Height 4: Multiply loop: Main loop skip
- "cbz x27, 138f\n"
- "cmp x27, #0x8\n"
+ "cbz x26, 138f\n"
+ "cmp x26, #0x8\n"
"blt 133f\n"
"132:" // Height 4: Multiply loop: Odd block loop
- "ldr d1, [x26], #0x8\n"
- "ldr d2, [x25], #0x8\n"
+ "ldr d1, [x25], #0x8\n"
+ "sub x26, x26, #0x8\n"
+ "ldr d2, [x24], #0x8\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
- "sub x27, x27, #0x8\n"
- "ldr d3, [x24], #0x8\n"
- "ldr d4, [x23], #0x8\n"
+ "ldr d3, [x23], #0x8\n"
+ "cmp x26, #0x8\n"
+ "ldr d4, [x22], #0x8\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
- "cmp x27, #0x8\n"
- "ldr q6, [x10, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
+ "ldr q6, [x9, #0x0]\n"
+ "ldr q7, [x9, #0x10]\n"
".inst 0x6e86a408 // ummla v8.4s, v0.16b, v6.16b\n"
".inst 0x6e86a450 // ummla v16.4s, v2.16b, v6.16b\n"
- "ldr q6, [x10, #0x20]\n"
+ "ldr q6, [x9, #0x20]\n"
".inst 0x6e87a40c // ummla v12.4s, v0.16b, v7.16b\n"
".inst 0x6e87a454 // ummla v20.4s, v2.16b, v7.16b\n"
- "ldr q7, [x10, #0x30]\n"
+ "ldr q7, [x9, #0x30]\n"
".inst 0x6e86a409 // ummla v9.4s, v0.16b, v6.16b\n"
".inst 0x6e86a451 // ummla v17.4s, v2.16b, v6.16b\n"
- "ldr q6, [x10, #0x40]\n"
+ "ldr q6, [x9, #0x40]\n"
".inst 0x6e87a40d // ummla v13.4s, v0.16b, v7.16b\n"
".inst 0x6e87a455 // ummla v21.4s, v2.16b, v7.16b\n"
- "ldr q7, [x10, #0x50]\n"
+ "ldr q7, [x9, #0x50]\n"
".inst 0x6e86a40a // ummla v10.4s, v0.16b, v6.16b\n"
".inst 0x6e86a452 // ummla v18.4s, v2.16b, v6.16b\n"
- "ldr q6, [x10, #0x60]\n"
+ "ldr q6, [x9, #0x60]\n"
".inst 0x6e87a40e // ummla v14.4s, v0.16b, v7.16b\n"
".inst 0x6e87a456 // ummla v22.4s, v2.16b, v7.16b\n"
- "ldr q7, [x10, #0x70]\n"
- "add x10, x10, #0x80\n"
+ "ldr q7, [x9, #0x70]\n"
+ "add x9, x9, #0x80\n"
".inst 0x6e86a40b // ummla v11.4s, v0.16b, v6.16b\n"
".inst 0x6e86a453 // ummla v19.4s, v2.16b, v6.16b\n"
".inst 0x6e87a40f // ummla v15.4s, v0.16b, v7.16b\n"
".inst 0x6e87a457 // ummla v23.4s, v2.16b, v7.16b\n"
"bge 132b\n"
+ "cbz x26, 138f\n"
"133:" // Height 4: Multiply loop: Skip odd blocks
- "cbz x27, 138f\n"
- "tbz x27, #2, 135f\n"
- "ldr s1, [x26], #0x4\n"
- "ldr s2, [x25], #0x4\n"
- "ldr s3, [x24], #0x4\n"
- "ldr s4, [x23], #0x4\n"
- "tbz x27, #1, 134f\n"
- "ld1 { v1.h }[2], [x26], #0x2\n"
- "ld1 { v2.h }[2], [x25], #0x2\n"
- "ld1 { v3.h }[2], [x24], #0x2\n"
- "ld1 { v4.h }[2], [x23], #0x2\n"
- "tbz x27, #0, 137f\n"
- "ld1 { v1.b }[6], [x26]\n"
- "ld1 { v2.b }[6], [x25]\n"
- "ld1 { v3.b }[6], [x24]\n"
- "ld1 { v4.b }[6], [x23]\n"
+ "tbz x26, #2, 135f\n"
+ "ldr s1, [x25], #0x4\n"
+ "ldr s2, [x24], #0x4\n"
+ "ldr s3, [x23], #0x4\n"
+ "ldr s4, [x22], #0x4\n"
+ "tbz x26, #1, 134f\n"
+ "ld1 { v1.h }[2], [x25], #0x2\n"
+ "ld1 { v2.h }[2], [x24], #0x2\n"
+ "ld1 { v3.h }[2], [x23], #0x2\n"
+ "ld1 { v4.h }[2], [x22], #0x2\n"
+ "tbz x26, #0, 137f\n"
+ "ld1 { v1.b }[6], [x25]\n"
+ "ld1 { v2.b }[6], [x24]\n"
+ "ld1 { v3.b }[6], [x23]\n"
+ "ld1 { v4.b }[6], [x22]\n"
"b 137f\n"
"134:" // Height 4: Multiply loop: Ragged operand read: partial_1_4
- "tbz x27, #0, 137f\n"
- "ld1 { v1.b }[4], [x26]\n"
- "ld1 { v2.b }[4], [x25]\n"
- "ld1 { v3.b }[4], [x24]\n"
- "ld1 { v4.b }[4], [x23]\n"
+ "tbz x26, #0, 137f\n"
+ "ld1 { v1.b }[4], [x25]\n"
+ "ld1 { v2.b }[4], [x24]\n"
+ "ld1 { v3.b }[4], [x23]\n"
+ "ld1 { v4.b }[4], [x22]\n"
"b 137f\n"
"135:" // Height 4: Multiply loop: Ragged operand read: partial_2_0
- "tbz x27, #1, 136f\n"
- "ldr h1, [x26], #0x2\n"
- "ldr h2, [x25], #0x2\n"
- "ldr h3, [x24], #0x2\n"
- "ldr h4, [x23], #0x2\n"
- "tbz x27, #0, 137f\n"
- "ld1 { v1.b }[2], [x26]\n"
- "ld1 { v2.b }[2], [x25]\n"
- "ld1 { v3.b }[2], [x24]\n"
- "ld1 { v4.b }[2], [x23]\n"
+ "tbz x26, #1, 136f\n"
+ "ldr h1, [x25], #0x2\n"
+ "ldr h2, [x24], #0x2\n"
+ "ldr h3, [x23], #0x2\n"
+ "ldr h4, [x22], #0x2\n"
+ "tbz x26, #0, 137f\n"
+ "ld1 { v1.b }[2], [x25]\n"
+ "ld1 { v2.b }[2], [x24]\n"
+ "ld1 { v3.b }[2], [x23]\n"
+ "ld1 { v4.b }[2], [x22]\n"
"b 137f\n"
"136:" // Height 4: Multiply loop: Ragged operand read: partial_1_0
- "ldr b1, [x26, #0x0]\n"
- "ldr b2, [x25, #0x0]\n"
- "ldr b3, [x24, #0x0]\n"
- "ldr b4, [x23, #0x0]\n"
+ "ldr b1, [x25, #0x0]\n"
+ "ldr b2, [x24, #0x0]\n"
+ "ldr b3, [x23, #0x0]\n"
+ "ldr b4, [x22, #0x0]\n"
"137:" // Height 4: Multiply loop: Ragged operand read: Done
- "ldr q7, [x10, #0x0]\n"
- "ldr q6, [x10, #0x10]\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
+ "ldr q7, [x9, #0x0]\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
+ "ldr q6, [x9, #0x10]\n"
".inst 0x6e87a408 // ummla v8.4s, v0.16b, v7.16b\n"
".inst 0x6e87a450 // ummla v16.4s, v2.16b, v7.16b\n"
- "ldr q7, [x10, #0x20]\n"
+ "ldr q7, [x9, #0x20]\n"
".inst 0x6e86a40c // ummla v12.4s, v0.16b, v6.16b\n"
".inst 0x6e86a454 // ummla v20.4s, v2.16b, v6.16b\n"
- "ldr q6, [x10, #0x30]\n"
+ "ldr q6, [x9, #0x30]\n"
".inst 0x6e87a409 // ummla v9.4s, v0.16b, v7.16b\n"
".inst 0x6e87a451 // ummla v17.4s, v2.16b, v7.16b\n"
- "ldr q7, [x10, #0x40]\n"
+ "ldr q7, [x9, #0x40]\n"
".inst 0x6e86a40d // ummla v13.4s, v0.16b, v6.16b\n"
".inst 0x6e86a455 // ummla v21.4s, v2.16b, v6.16b\n"
- "ldr q6, [x10, #0x50]\n"
+ "ldr q6, [x9, #0x50]\n"
".inst 0x6e87a40a // ummla v10.4s, v0.16b, v7.16b\n"
".inst 0x6e87a452 // ummla v18.4s, v2.16b, v7.16b\n"
- "ldr q7, [x10, #0x60]\n"
+ "ldr q7, [x9, #0x60]\n"
".inst 0x6e86a40e // ummla v14.4s, v0.16b, v6.16b\n"
".inst 0x6e86a456 // ummla v22.4s, v2.16b, v6.16b\n"
- "ldr q6, [x10, #0x70]\n"
- "add x10, x10, #0x80\n"
+ "ldr q6, [x9, #0x70]\n"
+ "add x9, x9, #0x80\n"
".inst 0x6e87a40b // ummla v11.4s, v0.16b, v7.16b\n"
".inst 0x6e87a453 // ummla v19.4s, v2.16b, v7.16b\n"
".inst 0x6e86a40f // ummla v15.4s, v0.16b, v6.16b\n"
".inst 0x6e86a457 // ummla v23.4s, v2.16b, v6.16b\n"
"138:" // Height 4: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 126b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
"uzp1 v7.2d, v8.2d, v12.2d\n"
- "add x22, x23, x20, LSL #2\n"
- "cmp x11, #0x10\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp2 v8.2d, v8.2d, v12.2d\n"
+ "prfm pstl1keep, [x28, #0x0]\n"
+ "cmp x10, #0x10\n"
"uzp1 v12.2d, v9.2d, v13.2d\n"
+ "add x23, x28, x19, LSL #2\n"
"uzp2 v9.2d, v9.2d, v13.2d\n"
+ "prfm pstl1keep, [x23, #0x0]\n"
"uzp1 v13.2d, v10.2d, v14.2d\n"
- "prfm pstl1keep, [x9, #0x0]\n"
- "prfm pstl1keep, [x24, #0x0]\n"
+ "add x22, x23, x19, LSL #2\n"
"uzp2 v10.2d, v10.2d, v14.2d\n"
- "uzp1 v14.2d, v11.2d, v15.2d\n"
- "prfm pstl1keep, [x23, #0x0]\n"
"prfm pstl1keep, [x22, #0x0]\n"
+ "add x21, x22, x19, LSL #2\n"
+ "uzp1 v14.2d, v11.2d, v15.2d\n"
+ "prfm pstl1keep, [x21, #0x0]\n"
"uzp2 v11.2d, v11.2d, v15.2d\n"
"uzp1 v15.2d, v16.2d, v20.2d\n"
"uzp2 v16.2d, v16.2d, v20.2d\n"
@@ -1800,275 +1808,275 @@ void a64_hybrid_u8u32_mmla_6x16 (
"uzp1 v22.2d, v19.2d, v23.2d\n"
"uzp2 v19.2d, v19.2d, v23.2d\n"
"bge 147f\n"
- "tbz x11, #3, 142f\n"
- "st1 { v7.4s }, [x9], #0x10\n"
- "st1 { v12.4s }, [x9], #0x10\n"
- "st1 { v8.4s }, [x24], #0x10\n"
- "st1 { v9.4s }, [x24], #0x10\n"
- "st1 { v15.4s }, [x23], #0x10\n"
- "st1 { v20.4s }, [x23], #0x10\n"
- "st1 { v16.4s }, [x22], #0x10\n"
- "st1 { v17.4s }, [x22], #0x10\n"
- "tbz x11, #2, 140f\n"
- "st1 { v13.4s }, [x9], #0x10\n"
- "st1 { v10.4s }, [x24], #0x10\n"
- "st1 { v21.4s }, [x23], #0x10\n"
- "st1 { v18.4s }, [x22], #0x10\n"
- "tbz x11, #1, 139f\n"
- "str d14, [x9], #0x8\n"
- "str d11, [x24], #0x8\n"
- "str d22, [x23], #0x8\n"
- "str d19, [x22], #0x8\n"
- "tbz x11, #0, 146f\n"
- "st1 { v14.s }[2], [x9]\n"
- "st1 { v11.s }[2], [x24]\n"
- "st1 { v22.s }[2], [x23]\n"
- "st1 { v19.s }[2], [x22]\n"
+ "tbz x10, #3, 142f\n"
+ "st1 { v7.4s }, [x28], #0x10\n"
+ "st1 { v12.4s }, [x28], #0x10\n"
+ "st1 { v8.4s }, [x23], #0x10\n"
+ "st1 { v9.4s }, [x23], #0x10\n"
+ "st1 { v15.4s }, [x22], #0x10\n"
+ "st1 { v20.4s }, [x22], #0x10\n"
+ "st1 { v16.4s }, [x21], #0x10\n"
+ "st1 { v17.4s }, [x21], #0x10\n"
+ "tbz x10, #2, 140f\n"
+ "st1 { v13.4s }, [x28], #0x10\n"
+ "st1 { v10.4s }, [x23], #0x10\n"
+ "st1 { v21.4s }, [x22], #0x10\n"
+ "st1 { v18.4s }, [x21], #0x10\n"
+ "tbz x10, #1, 139f\n"
+ "str d14, [x28], #0x8\n"
+ "str d11, [x23], #0x8\n"
+ "str d22, [x22], #0x8\n"
+ "str d19, [x21], #0x8\n"
+ "tbz x10, #0, 146f\n"
+ "st1 { v14.s }[2], [x28]\n"
+ "st1 { v11.s }[2], [x23]\n"
+ "st1 { v22.s }[2], [x22]\n"
+ "st1 { v19.s }[2], [x21]\n"
"b 146f\n"
"139:" // Height 4: Partial direct writeback: partial_1_12
- "tbz x11, #0, 146f\n"
- "str s14, [x9, #0x0]\n"
- "str s11, [x24, #0x0]\n"
- "str s22, [x23, #0x0]\n"
- "str s19, [x22, #0x0]\n"
+ "tbz x10, #0, 146f\n"
+ "str s14, [x28, #0x0]\n"
+ "str s11, [x23, #0x0]\n"
+ "str s22, [x22, #0x0]\n"
+ "str s19, [x21, #0x0]\n"
"b 146f\n"
"140:" // Height 4: Partial direct writeback: partial_2_8
- "tbz x11, #1, 141f\n"
- "str d13, [x9], #0x8\n"
- "str d10, [x24], #0x8\n"
- "str d21, [x23], #0x8\n"
- "str d18, [x22], #0x8\n"
- "tbz x11, #0, 146f\n"
- "st1 { v13.s }[2], [x9]\n"
- "st1 { v10.s }[2], [x24]\n"
- "st1 { v21.s }[2], [x23]\n"
- "st1 { v18.s }[2], [x22]\n"
+ "tbz x10, #1, 141f\n"
+ "str d13, [x28], #0x8\n"
+ "str d10, [x23], #0x8\n"
+ "str d21, [x22], #0x8\n"
+ "str d18, [x21], #0x8\n"
+ "tbz x10, #0, 146f\n"
+ "st1 { v13.s }[2], [x28]\n"
+ "st1 { v10.s }[2], [x23]\n"
+ "st1 { v21.s }[2], [x22]\n"
+ "st1 { v18.s }[2], [x21]\n"
"b 146f\n"
"141:" // Height 4: Partial direct writeback: partial_1_8
- "tbz x11, #0, 146f\n"
- "str s13, [x9, #0x0]\n"
- "str s10, [x24, #0x0]\n"
- "str s21, [x23, #0x0]\n"
- "str s18, [x22, #0x0]\n"
+ "tbz x10, #0, 146f\n"
+ "str s13, [x28, #0x0]\n"
+ "str s10, [x23, #0x0]\n"
+ "str s21, [x22, #0x0]\n"
+ "str s18, [x21, #0x0]\n"
"b 146f\n"
"142:" // Height 4: Partial direct writeback: partial_4_0
- "tbz x11, #2, 144f\n"
- "st1 { v7.4s }, [x9], #0x10\n"
- "st1 { v8.4s }, [x24], #0x10\n"
- "st1 { v15.4s }, [x23], #0x10\n"
- "st1 { v16.4s }, [x22], #0x10\n"
- "tbz x11, #1, 143f\n"
- "str d12, [x9], #0x8\n"
- "str d9, [x24], #0x8\n"
- "str d20, [x23], #0x8\n"
- "str d17, [x22], #0x8\n"
- "tbz x11, #0, 146f\n"
- "st1 { v12.s }[2], [x9]\n"
- "st1 { v9.s }[2], [x24]\n"
- "st1 { v20.s }[2], [x23]\n"
- "st1 { v17.s }[2], [x22]\n"
+ "tbz x10, #2, 144f\n"
+ "st1 { v7.4s }, [x28], #0x10\n"
+ "st1 { v8.4s }, [x23], #0x10\n"
+ "st1 { v15.4s }, [x22], #0x10\n"
+ "st1 { v16.4s }, [x21], #0x10\n"
+ "tbz x10, #1, 143f\n"
+ "str d12, [x28], #0x8\n"
+ "str d9, [x23], #0x8\n"
+ "str d20, [x22], #0x8\n"
+ "str d17, [x21], #0x8\n"
+ "tbz x10, #0, 146f\n"
+ "st1 { v12.s }[2], [x28]\n"
+ "st1 { v9.s }[2], [x23]\n"
+ "st1 { v20.s }[2], [x22]\n"
+ "st1 { v17.s }[2], [x21]\n"
"b 146f\n"
"143:" // Height 4: Partial direct writeback: partial_1_4
- "tbz x11, #0, 146f\n"
- "str s12, [x9, #0x0]\n"
- "str s9, [x24, #0x0]\n"
- "str s20, [x23, #0x0]\n"
- "str s17, [x22, #0x0]\n"
+ "tbz x10, #0, 146f\n"
+ "str s12, [x28, #0x0]\n"
+ "str s9, [x23, #0x0]\n"
+ "str s20, [x22, #0x0]\n"
+ "str s17, [x21, #0x0]\n"
"b 146f\n"
"144:" // Height 4: Partial direct writeback: partial_2_0
- "tbz x11, #1, 145f\n"
- "str d7, [x9], #0x8\n"
- "str d8, [x24], #0x8\n"
- "str d15, [x23], #0x8\n"
- "str d16, [x22], #0x8\n"
- "tbz x11, #0, 146f\n"
- "st1 { v7.s }[2], [x9]\n"
- "st1 { v8.s }[2], [x24]\n"
- "st1 { v15.s }[2], [x23]\n"
- "st1 { v16.s }[2], [x22]\n"
+ "tbz x10, #1, 145f\n"
+ "str d7, [x28], #0x8\n"
+ "str d8, [x23], #0x8\n"
+ "str d15, [x22], #0x8\n"
+ "str d16, [x21], #0x8\n"
+ "tbz x10, #0, 146f\n"
+ "st1 { v7.s }[2], [x28]\n"
+ "st1 { v8.s }[2], [x23]\n"
+ "st1 { v15.s }[2], [x22]\n"
+ "st1 { v16.s }[2], [x21]\n"
"b 146f\n"
"145:" // Height 4: Partial direct writeback: partial_1_0
- "str s7, [x9, #0x0]\n"
- "str s8, [x24, #0x0]\n"
- "str s15, [x23, #0x0]\n"
- "str s16, [x22, #0x0]\n"
+ "str s7, [x28, #0x0]\n"
+ "str s8, [x23, #0x0]\n"
+ "str s15, [x22, #0x0]\n"
+ "str s16, [x21, #0x0]\n"
"146:" // Height 4: Partial direct writeback: Done
"b 148f\n"
"147:" // Height 4: Full writeback
- "str q7, [x9, #0x0]\n"
- "str q12, [x9, #0x10]\n"
- "str q13, [x9, #0x20]\n"
- "str q14, [x9, #0x30]\n"
- "add x9, x9, #0x40\n"
- "str q8, [x24, #0x0]\n"
- "str q9, [x24, #0x10]\n"
- "str q10, [x24, #0x20]\n"
- "str q11, [x24, #0x30]\n"
- "str q15, [x23, #0x0]\n"
- "str q20, [x23, #0x10]\n"
- "str q21, [x23, #0x20]\n"
- "str q22, [x23, #0x30]\n"
- "str q16, [x22, #0x0]\n"
- "str q17, [x22, #0x10]\n"
- "str q18, [x22, #0x20]\n"
- "str q19, [x22, #0x30]\n"
+ "str q7, [x28, #0x0]\n"
+ "str q12, [x28, #0x10]\n"
+ "str q13, [x28, #0x20]\n"
+ "str q14, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
+ "str q8, [x23, #0x0]\n"
+ "str q9, [x23, #0x10]\n"
+ "str q10, [x23, #0x20]\n"
+ "str q11, [x23, #0x30]\n"
+ "str q15, [x22, #0x0]\n"
+ "str q20, [x22, #0x10]\n"
+ "str q21, [x22, #0x20]\n"
+ "str q22, [x22, #0x30]\n"
+ "str q16, [x21, #0x0]\n"
+ "str q17, [x21, #0x10]\n"
+ "str q18, [x21, #0x20]\n"
+ "str q19, [x21, #0x30]\n"
"148:" // Height 4: Writeback done
- "subs x11, x11, #0x10\n"
+ "subs x10, x10, #0x10\n"
"bgt 113b\n"
"b 224f\n"
"149:" // Height 5
- "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x28, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"150:" // Height 5: Column loop
"tbz %x[flags], #0, 161f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
- "cmp x11, #0x10\n"
- "add x21, x22, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "cmp x10, #0x10\n"
+ "add x23, x28, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
+ "add x20, x21, x19, LSL #2\n"
"bge 159f\n"
- "tbz x11, #3, 154f\n"
- "ld1 { v9.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x24], #0x10\n"
- "ld1 { v17.4s }, [x23], #0x10\n"
- "ld1 { v20.4s }, [x22], #0x10\n"
- "ld1 { v25.4s }, [x21], #0x10\n"
- "ld1 { v10.4s }, [x9], #0x10\n"
- "ld1 { v13.4s }, [x24], #0x10\n"
- "ld1 { v18.4s }, [x23], #0x10\n"
- "ld1 { v21.4s }, [x22], #0x10\n"
- "ld1 { v26.4s }, [x21], #0x10\n"
- "tbz x11, #2, 152f\n"
- "ld1 { v11.4s }, [x9], #0x10\n"
- "ld1 { v14.4s }, [x24], #0x10\n"
- "ld1 { v19.4s }, [x23], #0x10\n"
- "ld1 { v22.4s }, [x22], #0x10\n"
- "ld1 { v27.4s }, [x21], #0x10\n"
- "tbz x11, #1, 151f\n"
- "ldr d16, [x9], #0x8\n"
- "ldr d15, [x24], #0x8\n"
- "mov x25, #0x38\n"
- "ldr d24, [x23], #0x8\n"
- "ldr d23, [x22], #0x8\n"
- "ldr d6, [x21], #0x8\n"
- "tbz x11, #0, 158f\n"
- "ld1 { v16.s }[2], [x9]\n"
- "ld1 { v15.s }[2], [x24]\n"
- "ld1 { v24.s }[2], [x23]\n"
- "ld1 { v23.s }[2], [x22]\n"
- "ld1 { v6.s }[2], [x21]\n"
+ "tbz x10, #3, 154f\n"
+ "ld1 { v9.4s }, [x28], #0x10\n"
+ "ld1 { v12.4s }, [x23], #0x10\n"
+ "ld1 { v17.4s }, [x22], #0x10\n"
+ "ld1 { v20.4s }, [x21], #0x10\n"
+ "ld1 { v25.4s }, [x20], #0x10\n"
+ "ld1 { v10.4s }, [x28], #0x10\n"
+ "ld1 { v13.4s }, [x23], #0x10\n"
+ "ld1 { v18.4s }, [x22], #0x10\n"
+ "ld1 { v21.4s }, [x21], #0x10\n"
+ "ld1 { v26.4s }, [x20], #0x10\n"
+ "tbz x10, #2, 152f\n"
+ "ld1 { v11.4s }, [x28], #0x10\n"
+ "ld1 { v14.4s }, [x23], #0x10\n"
+ "ld1 { v19.4s }, [x22], #0x10\n"
+ "ld1 { v22.4s }, [x21], #0x10\n"
+ "ld1 { v27.4s }, [x20], #0x10\n"
+ "tbz x10, #1, 151f\n"
+ "ldr d16, [x28], #0x8\n"
+ "mov x24, #0x38\n"
+ "ldr d15, [x23], #0x8\n"
+ "ldr d24, [x22], #0x8\n"
+ "ldr d23, [x21], #0x8\n"
+ "ldr d6, [x20], #0x8\n"
+ "tbz x10, #0, 158f\n"
+ "ld1 { v16.s }[2], [x28]\n"
+ "ld1 { v15.s }[2], [x23]\n"
+ "ld1 { v24.s }[2], [x22]\n"
+ "ld1 { v23.s }[2], [x21]\n"
+ "ld1 { v6.s }[2], [x20]\n"
"b 158f\n"
"151:" // Height 5: Partial accumulate: partial_1_12
- "mov x25, #0x30\n"
- "tbz x11, #0, 158f\n"
- "ldr s16, [x9, #0x0]\n"
- "ldr s15, [x24, #0x0]\n"
- "ldr s24, [x23, #0x0]\n"
- "ldr s23, [x22, #0x0]\n"
- "ldr s6, [x21, #0x0]\n"
+ "mov x24, #0x30\n"
+ "tbz x10, #0, 158f\n"
+ "ldr s16, [x28, #0x0]\n"
+ "ldr s15, [x23, #0x0]\n"
+ "ldr s24, [x22, #0x0]\n"
+ "ldr s23, [x21, #0x0]\n"
+ "ldr s6, [x20, #0x0]\n"
"b 158f\n"
"152:" // Height 5: Partial accumulate: partial_2_8
- "tbz x11, #1, 153f\n"
- "ldr d11, [x9], #0x8\n"
- "ldr d14, [x24], #0x8\n"
- "mov x25, #0x28\n"
- "ldr d19, [x23], #0x8\n"
- "ldr d22, [x22], #0x8\n"
- "ldr d27, [x21], #0x8\n"
- "tbz x11, #0, 158f\n"
- "ld1 { v11.s }[2], [x9]\n"
- "ld1 { v14.s }[2], [x24]\n"
- "ld1 { v19.s }[2], [x23]\n"
- "ld1 { v22.s }[2], [x22]\n"
- "ld1 { v27.s }[2], [x21]\n"
+ "tbz x10, #1, 153f\n"
+ "ldr d11, [x28], #0x8\n"
+ "ldr d14, [x23], #0x8\n"
+ "mov x24, #0x28\n"
+ "ldr d19, [x22], #0x8\n"
+ "ldr d22, [x21], #0x8\n"
+ "ldr d27, [x20], #0x8\n"
+ "tbz x10, #0, 158f\n"
+ "ld1 { v11.s }[2], [x28]\n"
+ "ld1 { v14.s }[2], [x23]\n"
+ "ld1 { v19.s }[2], [x22]\n"
+ "ld1 { v22.s }[2], [x21]\n"
+ "ld1 { v27.s }[2], [x20]\n"
"b 158f\n"
"153:" // Height 5: Partial accumulate: partial_1_8
- "mov x25, #0x20\n"
- "tbz x11, #0, 158f\n"
- "ldr s11, [x9, #0x0]\n"
- "ldr s14, [x24, #0x0]\n"
- "ldr s19, [x23, #0x0]\n"
- "ldr s22, [x22, #0x0]\n"
- "ldr s27, [x21, #0x0]\n"
+ "mov x24, #0x20\n"
+ "tbz x10, #0, 158f\n"
+ "ldr s11, [x28, #0x0]\n"
+ "ldr s14, [x23, #0x0]\n"
+ "ldr s19, [x22, #0x0]\n"
+ "ldr s22, [x21, #0x0]\n"
+ "ldr s27, [x20, #0x0]\n"
"b 158f\n"
"154:" // Height 5: Partial accumulate: partial_4_0
- "tbz x11, #2, 156f\n"
- "ld1 { v9.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x24], #0x10\n"
- "ld1 { v17.4s }, [x23], #0x10\n"
- "ld1 { v20.4s }, [x22], #0x10\n"
- "ld1 { v25.4s }, [x21], #0x10\n"
- "tbz x11, #1, 155f\n"
- "ldr d10, [x9], #0x8\n"
- "ldr d13, [x24], #0x8\n"
- "mov x25, #0x18\n"
- "ldr d18, [x23], #0x8\n"
- "ldr d21, [x22], #0x8\n"
- "ldr d26, [x21], #0x8\n"
- "tbz x11, #0, 158f\n"
- "ld1 { v10.s }[2], [x9]\n"
- "ld1 { v13.s }[2], [x24]\n"
- "ld1 { v18.s }[2], [x23]\n"
- "ld1 { v21.s }[2], [x22]\n"
- "ld1 { v26.s }[2], [x21]\n"
+ "tbz x10, #2, 156f\n"
+ "ld1 { v9.4s }, [x28], #0x10\n"
+ "ld1 { v12.4s }, [x23], #0x10\n"
+ "ld1 { v17.4s }, [x22], #0x10\n"
+ "ld1 { v20.4s }, [x21], #0x10\n"
+ "ld1 { v25.4s }, [x20], #0x10\n"
+ "tbz x10, #1, 155f\n"
+ "ldr d10, [x28], #0x8\n"
+ "mov x24, #0x18\n"
+ "ldr d13, [x23], #0x8\n"
+ "ldr d18, [x22], #0x8\n"
+ "ldr d21, [x21], #0x8\n"
+ "ldr d26, [x20], #0x8\n"
+ "tbz x10, #0, 158f\n"
+ "ld1 { v10.s }[2], [x28]\n"
+ "ld1 { v13.s }[2], [x23]\n"
+ "ld1 { v18.s }[2], [x22]\n"
+ "ld1 { v21.s }[2], [x21]\n"
+ "ld1 { v26.s }[2], [x20]\n"
"b 158f\n"
"155:" // Height 5: Partial accumulate: partial_1_4
- "mov x25, #0x10\n"
- "tbz x11, #0, 158f\n"
- "ldr s10, [x9, #0x0]\n"
- "ldr s13, [x24, #0x0]\n"
- "ldr s18, [x23, #0x0]\n"
- "ldr s21, [x22, #0x0]\n"
- "ldr s26, [x21, #0x0]\n"
+ "mov x24, #0x10\n"
+ "tbz x10, #0, 158f\n"
+ "ldr s10, [x28, #0x0]\n"
+ "ldr s13, [x23, #0x0]\n"
+ "ldr s18, [x22, #0x0]\n"
+ "ldr s21, [x21, #0x0]\n"
+ "ldr s26, [x20, #0x0]\n"
"b 158f\n"
"156:" // Height 5: Partial accumulate: partial_2_0
- "tbz x11, #1, 157f\n"
- "ldr d9, [x9], #0x8\n"
- "ldr d12, [x24], #0x8\n"
- "mov x25, #0x8\n"
- "ldr d17, [x23], #0x8\n"
- "ldr d20, [x22], #0x8\n"
- "ldr d25, [x21], #0x8\n"
- "tbz x11, #0, 158f\n"
- "ld1 { v9.s }[2], [x9]\n"
- "ld1 { v12.s }[2], [x24]\n"
- "ld1 { v17.s }[2], [x23]\n"
- "ld1 { v20.s }[2], [x22]\n"
- "ld1 { v25.s }[2], [x21]\n"
+ "tbz x10, #1, 157f\n"
+ "ldr d9, [x28], #0x8\n"
+ "ldr d12, [x23], #0x8\n"
+ "mov x24, #0x8\n"
+ "ldr d17, [x22], #0x8\n"
+ "ldr d20, [x21], #0x8\n"
+ "ldr d25, [x20], #0x8\n"
+ "tbz x10, #0, 158f\n"
+ "ld1 { v9.s }[2], [x28]\n"
+ "ld1 { v12.s }[2], [x23]\n"
+ "ld1 { v17.s }[2], [x22]\n"
+ "ld1 { v20.s }[2], [x21]\n"
+ "ld1 { v25.s }[2], [x20]\n"
"b 158f\n"
"157:" // Height 5: Partial accumulate: partial_1_0
- "ldr s9, [x9, #0x0]\n"
- "ldr s12, [x24, #0x0]\n"
- "mov x25, #0x0\n"
- "ldr s17, [x23, #0x0]\n"
- "ldr s20, [x22, #0x0]\n"
- "ldr s25, [x21, #0x0]\n"
+ "ldr s9, [x28, #0x0]\n"
+ "mov x24, #0x0\n"
+ "ldr s12, [x23, #0x0]\n"
+ "ldr s17, [x22, #0x0]\n"
+ "ldr s20, [x21, #0x0]\n"
+ "ldr s25, [x20, #0x0]\n"
"158:" // Height 5: Partial accumulate: Done
- "sub x9, x9, x25\n"
+ "sub x28, x28, x24\n"
"b 160f\n"
"159:" // Height 5: full accumulate
- "ldr q9, [x9, #0x0]\n"
- "ldr q10, [x9, #0x10]\n"
- "ldr q11, [x9, #0x20]\n"
- "ldr q16, [x9, #0x30]\n"
- "ldr q12, [x24, #0x0]\n"
- "ldr q13, [x24, #0x10]\n"
- "ldr q14, [x24, #0x20]\n"
- "ldr q15, [x24, #0x30]\n"
- "ldr q17, [x23, #0x0]\n"
- "ldr q18, [x23, #0x10]\n"
- "ldr q19, [x23, #0x20]\n"
- "ldr q24, [x23, #0x30]\n"
- "ldr q20, [x22, #0x0]\n"
- "ldr q21, [x22, #0x10]\n"
- "ldr q22, [x22, #0x20]\n"
- "ldr q23, [x22, #0x30]\n"
- "ldr q25, [x21, #0x0]\n"
- "ldr q26, [x21, #0x10]\n"
- "ldr q27, [x21, #0x20]\n"
- "ldr q6, [x21, #0x30]\n"
+ "ldr q9, [x28, #0x0]\n"
+ "ldr q10, [x28, #0x10]\n"
+ "ldr q11, [x28, #0x20]\n"
+ "ldr q16, [x28, #0x30]\n"
+ "ldr q12, [x23, #0x0]\n"
+ "ldr q13, [x23, #0x10]\n"
+ "ldr q14, [x23, #0x20]\n"
+ "ldr q15, [x23, #0x30]\n"
+ "ldr q17, [x22, #0x0]\n"
+ "ldr q18, [x22, #0x10]\n"
+ "ldr q19, [x22, #0x20]\n"
+ "ldr q24, [x22, #0x30]\n"
+ "ldr q20, [x21, #0x0]\n"
+ "ldr q21, [x21, #0x10]\n"
+ "ldr q22, [x21, #0x20]\n"
+ "ldr q23, [x21, #0x30]\n"
+ "ldr q25, [x20, #0x0]\n"
+ "ldr q26, [x20, #0x10]\n"
+ "ldr q27, [x20, #0x20]\n"
+ "ldr q6, [x20, #0x30]\n"
"160:" // Height 5: MMLA fixup
"zip1 v8.2d, v9.2d, v12.2d\n"
"zip2 v12.2d, v9.2d, v12.2d\n"
@@ -2121,210 +2129,212 @@ void a64_hybrid_u8u32_mmla_6x16 (
"movi v30.4s, #0x0\n"
"movi v31.4s, #0x0\n"
"162:" // Height 5: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"163:" // Height 5: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 164f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "ldr x22, [x21, #0x20]\n"
- "cbnz x28, 165f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20\n"
- "add x25, x25, x20\n"
- "add x24, x24, x20\n"
- "add x23, x23, x20\n"
- "add x22, x22, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "ldr x21, [x20, #0x20]\n"
+ "cbnz x27, 165f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
+ "add x24, x24, x19\n"
+ "add x23, x23, x19\n"
+ "add x22, x22, x19\n"
+ "add x21, x21, x19\n"
"b 165f\n"
"164:" // Height 5: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20\n"
- "add x24, x25, x20\n"
- "add x23, x24, x20\n"
- "add x22, x23, x20\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19\n"
+ "add x23, x24, x19\n"
+ "add x22, x23, x19\n"
+ "add x21, x22, x19\n"
"165:" // Height 5: input setup done
- "cmp x27, #0x10\n"
+ "cmp x26, #0x10\n"
"blt 168f\n"
- "ldr q1, [x26, #0x0]\n"
- "ldr q2, [x25, #0x0]\n"
- "cmp x27, #0x20\n"
- "ldr q3, [x24, #0x0]\n"
- "ldr q4, [x23, #0x0]\n"
- "ldr q5, [x22, #0x0]\n"
- "ldr q7, [x10, #0x0]\n"
+ "ldr q1, [x25, #0x0]\n"
+ "cmp x26, #0x20\n"
"blt 167f\n"
"166:" // Height 5: Multiply loop: Main loop head
+ "movi v6.4s, #0x0\n"
+ "ldr q2, [x24, #0x0]\n"
+ "add x25, x25, #0x10\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
+ "ldr q3, [x23, #0x0]\n"
+ "add x24, x24, #0x10\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
- ".inst 0x6e87a408 // ummla v8.4s, v0.16b, v7.16b\n"
+ "ldr q4, [x22, #0x0]\n"
+ "add x23, x23, #0x10\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
+ "ldr q5, [x21, #0x0]\n"
+ "add x22, x22, #0x10\n"
"trn2 v3.2d, v3.2d, v4.2d\n"
- ".inst 0x6e87a450 // ummla v16.4s, v2.16b, v7.16b\n"
- "sub x27, x27, #0x10\n"
+ "ldr q7, [x9, #0x0]\n"
+ "add x21, x21, #0x10\n"
"trn1 v4.2d, v5.2d, v6.2d\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "sub x26, x26, #0x10\n"
"trn2 v5.2d, v5.2d, v6.2d\n"
- "ldr q6, [x10, #0x10]\n"
+ "ldr q6, [x9, #0x10]\n"
+ "cmp x26, #0x20\n"
+ ".inst 0x6e87a408 // ummla v8.4s, v0.16b, v7.16b\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ ".inst 0x6e87a450 // ummla v16.4s, v2.16b, v7.16b\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x6e87a498 // ummla v24.4s, v4.16b, v7.16b\n"
- "ldr q7, [x10, #0x20]\n"
+ "ldr q7, [x9, #0x20]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x6e86a40c // ummla v12.4s, v0.16b, v6.16b\n"
".inst 0x6e86a454 // ummla v20.4s, v2.16b, v6.16b\n"
- "add x26, x26, #0x10\n"
+ "prfm pldl1keep, [x21, #0x80]\n"
".inst 0x6e86a49c // ummla v28.4s, v4.16b, v6.16b\n"
- "ldr q6, [x10, #0x30]\n"
+ "ldr q6, [x9, #0x30]\n"
".inst 0x6e87a409 // ummla v9.4s, v0.16b, v7.16b\n"
- "add x25, x25, #0x10\n"
".inst 0x6e87a451 // ummla v17.4s, v2.16b, v7.16b\n"
".inst 0x6e87a499 // ummla v25.4s, v4.16b, v7.16b\n"
- "ldr q7, [x10, #0x40]\n"
- "add x24, x24, #0x10\n"
+ "ldr q7, [x9, #0x40]\n"
".inst 0x6e86a40d // ummla v13.4s, v0.16b, v6.16b\n"
".inst 0x6e86a455 // ummla v21.4s, v2.16b, v6.16b\n"
- "add x23, x23, #0x10\n"
- "add x22, x22, #0x10\n"
".inst 0x6e86a49d // ummla v29.4s, v4.16b, v6.16b\n"
- "ldr q6, [x10, #0x50]\n"
+ "ldr q6, [x9, #0x50]\n"
".inst 0x6e87a40a // ummla v10.4s, v0.16b, v7.16b\n"
- "cmp x27, #0x20\n"
".inst 0x6e87a452 // ummla v18.4s, v2.16b, v7.16b\n"
".inst 0x6e87a49a // ummla v26.4s, v4.16b, v7.16b\n"
- "ldr q7, [x10, #0x60]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
+ "ldr q7, [x9, #0x60]\n"
".inst 0x6e86a40e // ummla v14.4s, v0.16b, v6.16b\n"
".inst 0x6e86a456 // ummla v22.4s, v2.16b, v6.16b\n"
- "prfm pldl1keep, [x25, #0x80]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x6e86a49e // ummla v30.4s, v4.16b, v6.16b\n"
- "ldr q6, [x10, #0x70]\n"
+ "ldr q6, [x9, #0x70]\n"
".inst 0x6e87a40b // ummla v11.4s, v0.16b, v7.16b\n"
- "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x6e87a453 // ummla v19.4s, v2.16b, v7.16b\n"
".inst 0x6e87a49b // ummla v27.4s, v4.16b, v7.16b\n"
- "ldr q7, [x10, #0x80]\n"
- "prfm pldl1keep, [x22, #0x80]\n"
+ "ldr q7, [x9, #0x80]\n"
".inst 0x6e86a40f // ummla v15.4s, v0.16b, v6.16b\n"
".inst 0x6e86a457 // ummla v23.4s, v2.16b, v6.16b\n"
- "ldr q2, [x25, #0x0]\n"
".inst 0x6e86a49f // ummla v31.4s, v4.16b, v6.16b\n"
- "ldr q6, [x10, #0x90]\n"
- "ldr q4, [x23, #0x0]\n"
+ "ldr q6, [x9, #0x90]\n"
".inst 0x6e87a428 // ummla v8.4s, v1.16b, v7.16b\n"
".inst 0x6e87a470 // ummla v16.4s, v3.16b, v7.16b\n"
".inst 0x6e87a4b8 // ummla v24.4s, v5.16b, v7.16b\n"
- "ldr q7, [x10, #0xa0]\n"
+ "ldr q7, [x9, #0xa0]\n"
".inst 0x6e86a42c // ummla v12.4s, v1.16b, v6.16b\n"
".inst 0x6e86a474 // ummla v20.4s, v3.16b, v6.16b\n"
".inst 0x6e86a4bc // ummla v28.4s, v5.16b, v6.16b\n"
- "ldr q6, [x10, #0xb0]\n"
+ "ldr q6, [x9, #0xb0]\n"
".inst 0x6e87a429 // ummla v9.4s, v1.16b, v7.16b\n"
".inst 0x6e87a471 // ummla v17.4s, v3.16b, v7.16b\n"
".inst 0x6e87a4b9 // ummla v25.4s, v5.16b, v7.16b\n"
- "ldr q7, [x10, #0xc0]\n"
+ "ldr q7, [x9, #0xc0]\n"
".inst 0x6e86a42d // ummla v13.4s, v1.16b, v6.16b\n"
".inst 0x6e86a475 // ummla v21.4s, v3.16b, v6.16b\n"
".inst 0x6e86a4bd // ummla v29.4s, v5.16b, v6.16b\n"
- "ldr q6, [x10, #0xd0]\n"
+ "ldr q6, [x9, #0xd0]\n"
".inst 0x6e87a42a // ummla v10.4s, v1.16b, v7.16b\n"
".inst 0x6e87a472 // ummla v18.4s, v3.16b, v7.16b\n"
".inst 0x6e87a4ba // ummla v26.4s, v5.16b, v7.16b\n"
- "ldr q7, [x10, #0xe0]\n"
+ "ldr q7, [x9, #0xe0]\n"
".inst 0x6e86a42e // ummla v14.4s, v1.16b, v6.16b\n"
".inst 0x6e86a476 // ummla v22.4s, v3.16b, v6.16b\n"
".inst 0x6e86a4be // ummla v30.4s, v5.16b, v6.16b\n"
- "ldr q6, [x10, #0xf0]\n"
- "add x10, x10, #0x100\n"
+ "ldr q6, [x9, #0xf0]\n"
+ "add x9, x9, #0x100\n"
".inst 0x6e87a42b // ummla v11.4s, v1.16b, v7.16b\n"
".inst 0x6e87a473 // ummla v19.4s, v3.16b, v7.16b\n"
".inst 0x6e87a4bb // ummla v27.4s, v5.16b, v7.16b\n"
- "ldr q7, [x10, #0x0]\n"
".inst 0x6e86a42f // ummla v15.4s, v1.16b, v6.16b\n"
- "ldr q1, [x26, #0x0]\n"
+ "ldr q1, [x25, #0x0]\n"
".inst 0x6e86a477 // ummla v23.4s, v3.16b, v6.16b\n"
- "ldr q3, [x24, #0x0]\n"
".inst 0x6e86a4bf // ummla v31.4s, v5.16b, v6.16b\n"
- "ldr q5, [x22, #0x0]\n"
"bge 166b\n"
"167:" // Height 5: Multiply loop: Single iteration only
+ "movi v6.4s, #0x0\n"
+ "ldr q2, [x24, #0x0]\n"
+ "sub x26, x26, #0x10\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
+ "ldr q3, [x23, #0x0]\n"
+ "add x25, x25, #0x10\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
- ".inst 0x6e87a408 // ummla v8.4s, v0.16b, v7.16b\n"
+ "ldr q4, [x22, #0x0]\n"
+ "add x24, x24, #0x10\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
+ "ldr q5, [x21, #0x0]\n"
+ "add x23, x23, #0x10\n"
"trn2 v3.2d, v3.2d, v4.2d\n"
- ".inst 0x6e87a450 // ummla v16.4s, v2.16b, v7.16b\n"
- "add x26, x26, #0x10\n"
+ "ldr q7, [x9, #0x0]\n"
+ "add x22, x22, #0x10\n"
"trn1 v4.2d, v5.2d, v6.2d\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "add x21, x21, #0x10\n"
"trn2 v5.2d, v5.2d, v6.2d\n"
- "ldr q6, [x10, #0x10]\n"
+ "ldr q6, [x9, #0x10]\n"
+ ".inst 0x6e87a408 // ummla v8.4s, v0.16b, v7.16b\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ ".inst 0x6e87a450 // ummla v16.4s, v2.16b, v7.16b\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x6e87a498 // ummla v24.4s, v4.16b, v7.16b\n"
- "ldr q7, [x10, #0x20]\n"
+ "ldr q7, [x9, #0x20]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x6e86a40c // ummla v12.4s, v0.16b, v6.16b\n"
".inst 0x6e86a454 // ummla v20.4s, v2.16b, v6.16b\n"
- "add x25, x25, #0x10\n"
+ "prfm pldl1keep, [x21, #0x80]\n"
".inst 0x6e86a49c // ummla v28.4s, v4.16b, v6.16b\n"
- "ldr q6, [x10, #0x30]\n"
+ "ldr q6, [x9, #0x30]\n"
".inst 0x6e87a409 // ummla v9.4s, v0.16b, v7.16b\n"
- "add x24, x24, #0x10\n"
".inst 0x6e87a451 // ummla v17.4s, v2.16b, v7.16b\n"
".inst 0x6e87a499 // ummla v25.4s, v4.16b, v7.16b\n"
- "ldr q7, [x10, #0x40]\n"
- "add x23, x23, #0x10\n"
+ "ldr q7, [x9, #0x40]\n"
".inst 0x6e86a40d // ummla v13.4s, v0.16b, v6.16b\n"
".inst 0x6e86a455 // ummla v21.4s, v2.16b, v6.16b\n"
- "add x22, x22, #0x10\n"
- "sub x27, x27, #0x10\n"
".inst 0x6e86a49d // ummla v29.4s, v4.16b, v6.16b\n"
- "ldr q6, [x10, #0x50]\n"
+ "ldr q6, [x9, #0x50]\n"
".inst 0x6e87a40a // ummla v10.4s, v0.16b, v7.16b\n"
- "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x6e87a452 // ummla v18.4s, v2.16b, v7.16b\n"
".inst 0x6e87a49a // ummla v26.4s, v4.16b, v7.16b\n"
- "ldr q7, [x10, #0x60]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
+ "ldr q7, [x9, #0x60]\n"
".inst 0x6e86a40e // ummla v14.4s, v0.16b, v6.16b\n"
".inst 0x6e86a456 // ummla v22.4s, v2.16b, v6.16b\n"
- "prfm pldl1keep, [x24, #0x80]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x6e86a49e // ummla v30.4s, v4.16b, v6.16b\n"
- "ldr q6, [x10, #0x70]\n"
+ "ldr q6, [x9, #0x70]\n"
".inst 0x6e87a40b // ummla v11.4s, v0.16b, v7.16b\n"
- "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x6e87a453 // ummla v19.4s, v2.16b, v7.16b\n"
".inst 0x6e87a49b // ummla v27.4s, v4.16b, v7.16b\n"
- "ldr q7, [x10, #0x80]\n"
+ "ldr q7, [x9, #0x80]\n"
".inst 0x6e86a40f // ummla v15.4s, v0.16b, v6.16b\n"
".inst 0x6e86a457 // ummla v23.4s, v2.16b, v6.16b\n"
".inst 0x6e86a49f // ummla v31.4s, v4.16b, v6.16b\n"
- "ldr q6, [x10, #0x90]\n"
+ "ldr q6, [x9, #0x90]\n"
".inst 0x6e87a428 // ummla v8.4s, v1.16b, v7.16b\n"
".inst 0x6e87a470 // ummla v16.4s, v3.16b, v7.16b\n"
".inst 0x6e87a4b8 // ummla v24.4s, v5.16b, v7.16b\n"
- "ldr q7, [x10, #0xa0]\n"
+ "ldr q7, [x9, #0xa0]\n"
".inst 0x6e86a42c // ummla v12.4s, v1.16b, v6.16b\n"
".inst 0x6e86a474 // ummla v20.4s, v3.16b, v6.16b\n"
".inst 0x6e86a4bc // ummla v28.4s, v5.16b, v6.16b\n"
- "ldr q6, [x10, #0xb0]\n"
+ "ldr q6, [x9, #0xb0]\n"
".inst 0x6e87a429 // ummla v9.4s, v1.16b, v7.16b\n"
".inst 0x6e87a471 // ummla v17.4s, v3.16b, v7.16b\n"
".inst 0x6e87a4b9 // ummla v25.4s, v5.16b, v7.16b\n"
- "ldr q7, [x10, #0xc0]\n"
+ "ldr q7, [x9, #0xc0]\n"
".inst 0x6e86a42d // ummla v13.4s, v1.16b, v6.16b\n"
".inst 0x6e86a475 // ummla v21.4s, v3.16b, v6.16b\n"
".inst 0x6e86a4bd // ummla v29.4s, v5.16b, v6.16b\n"
- "ldr q6, [x10, #0xd0]\n"
+ "ldr q6, [x9, #0xd0]\n"
".inst 0x6e87a42a // ummla v10.4s, v1.16b, v7.16b\n"
".inst 0x6e87a472 // ummla v18.4s, v3.16b, v7.16b\n"
".inst 0x6e87a4ba // ummla v26.4s, v5.16b, v7.16b\n"
- "ldr q7, [x10, #0xe0]\n"
+ "ldr q7, [x9, #0xe0]\n"
".inst 0x6e86a42e // ummla v14.4s, v1.16b, v6.16b\n"
".inst 0x6e86a476 // ummla v22.4s, v3.16b, v6.16b\n"
".inst 0x6e86a4be // ummla v30.4s, v5.16b, v6.16b\n"
- "ldr q6, [x10, #0xf0]\n"
- "add x10, x10, #0x100\n"
+ "ldr q6, [x9, #0xf0]\n"
+ "add x9, x9, #0x100\n"
".inst 0x6e87a42b // ummla v11.4s, v1.16b, v7.16b\n"
".inst 0x6e87a473 // ummla v19.4s, v3.16b, v7.16b\n"
".inst 0x6e87a4bb // ummla v27.4s, v5.16b, v7.16b\n"
@@ -2332,134 +2342,136 @@ void a64_hybrid_u8u32_mmla_6x16 (
".inst 0x6e86a477 // ummla v23.4s, v3.16b, v6.16b\n"
".inst 0x6e86a4bf // ummla v31.4s, v5.16b, v6.16b\n"
"168:" // Height 5: Multiply loop: Main loop skip
- "cbz x27, 175f\n"
- "cmp x27, #0x8\n"
+ "cbz x26, 175f\n"
+ "cmp x26, #0x8\n"
"blt 170f\n"
"169:" // Height 5: Multiply loop: Odd block loop
- "ldr d1, [x26], #0x8\n"
- "ldr d2, [x25], #0x8\n"
+ "movi v7.4s, #0x0\n"
+ "ldr d1, [x25], #0x8\n"
+ "sub x26, x26, #0x8\n"
+ "ldr d2, [x24], #0x8\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
- "ldr d3, [x24], #0x8\n"
- "ldr d4, [x23], #0x8\n"
+ "ldr d3, [x23], #0x8\n"
+ "cmp x26, #0x8\n"
+ "ldr d4, [x22], #0x8\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
- "sub x27, x27, #0x8\n"
- "ldr d5, [x22], #0x8\n"
- "ldr q6, [x10, #0x0]\n"
+ "ldr d5, [x21], #0x8\n"
+ "ldr q6, [x9, #0x0]\n"
"trn1 v4.2d, v5.2d, v7.2d\n"
+ "ldr q7, [x9, #0x10]\n"
".inst 0x6e86a408 // ummla v8.4s, v0.16b, v6.16b\n"
- "ldr q7, [x10, #0x10]\n"
".inst 0x6e86a450 // ummla v16.4s, v2.16b, v6.16b\n"
".inst 0x6e86a498 // ummla v24.4s, v4.16b, v6.16b\n"
- "ldr q6, [x10, #0x20]\n"
+ "ldr q6, [x9, #0x20]\n"
".inst 0x6e87a40c // ummla v12.4s, v0.16b, v7.16b\n"
".inst 0x6e87a454 // ummla v20.4s, v2.16b, v7.16b\n"
- "cmp x27, #0x8\n"
".inst 0x6e87a49c // ummla v28.4s, v4.16b, v7.16b\n"
- "ldr q7, [x10, #0x30]\n"
+ "ldr q7, [x9, #0x30]\n"
".inst 0x6e86a409 // ummla v9.4s, v0.16b, v6.16b\n"
".inst 0x6e86a451 // ummla v17.4s, v2.16b, v6.16b\n"
".inst 0x6e86a499 // ummla v25.4s, v4.16b, v6.16b\n"
- "ldr q6, [x10, #0x40]\n"
+ "ldr q6, [x9, #0x40]\n"
".inst 0x6e87a40d // ummla v13.4s, v0.16b, v7.16b\n"
".inst 0x6e87a455 // ummla v21.4s, v2.16b, v7.16b\n"
".inst 0x6e87a49d // ummla v29.4s, v4.16b, v7.16b\n"
- "ldr q7, [x10, #0x50]\n"
+ "ldr q7, [x9, #0x50]\n"
".inst 0x6e86a40a // ummla v10.4s, v0.16b, v6.16b\n"
".inst 0x6e86a452 // ummla v18.4s, v2.16b, v6.16b\n"
".inst 0x6e86a49a // ummla v26.4s, v4.16b, v6.16b\n"
- "ldr q6, [x10, #0x60]\n"
+ "ldr q6, [x9, #0x60]\n"
".inst 0x6e87a40e // ummla v14.4s, v0.16b, v7.16b\n"
".inst 0x6e87a456 // ummla v22.4s, v2.16b, v7.16b\n"
".inst 0x6e87a49e // ummla v30.4s, v4.16b, v7.16b\n"
- "ldr q7, [x10, #0x70]\n"
+ "ldr q7, [x9, #0x70]\n"
+ "add x9, x9, #0x80\n"
".inst 0x6e86a40b // ummla v11.4s, v0.16b, v6.16b\n"
- "add x10, x10, #0x80\n"
".inst 0x6e86a453 // ummla v19.4s, v2.16b, v6.16b\n"
".inst 0x6e86a49b // ummla v27.4s, v4.16b, v6.16b\n"
".inst 0x6e87a40f // ummla v15.4s, v0.16b, v7.16b\n"
".inst 0x6e87a457 // ummla v23.4s, v2.16b, v7.16b\n"
".inst 0x6e87a49f // ummla v31.4s, v4.16b, v7.16b\n"
"bge 169b\n"
+ "cbz x26, 175f\n"
"170:" // Height 5: Multiply loop: Skip odd blocks
- "cbz x27, 175f\n"
- "tbz x27, #2, 172f\n"
- "ldr s1, [x26], #0x4\n"
- "ldr s2, [x25], #0x4\n"
- "ldr s3, [x24], #0x4\n"
- "ldr s4, [x23], #0x4\n"
- "ldr s5, [x22], #0x4\n"
- "tbz x27, #1, 171f\n"
- "ld1 { v1.h }[2], [x26], #0x2\n"
- "ld1 { v2.h }[2], [x25], #0x2\n"
- "ld1 { v3.h }[2], [x24], #0x2\n"
- "ld1 { v4.h }[2], [x23], #0x2\n"
- "ld1 { v5.h }[2], [x22], #0x2\n"
- "tbz x27, #0, 174f\n"
- "ld1 { v1.b }[6], [x26]\n"
- "ld1 { v2.b }[6], [x25]\n"
- "ld1 { v3.b }[6], [x24]\n"
- "ld1 { v4.b }[6], [x23]\n"
- "ld1 { v5.b }[6], [x22]\n"
+ "tbz x26, #2, 172f\n"
+ "ldr s1, [x25], #0x4\n"
+ "ldr s2, [x24], #0x4\n"
+ "ldr s3, [x23], #0x4\n"
+ "ldr s4, [x22], #0x4\n"
+ "ldr s5, [x21], #0x4\n"
+ "tbz x26, #1, 171f\n"
+ "ld1 { v1.h }[2], [x25], #0x2\n"
+ "ld1 { v2.h }[2], [x24], #0x2\n"
+ "ld1 { v3.h }[2], [x23], #0x2\n"
+ "ld1 { v4.h }[2], [x22], #0x2\n"
+ "ld1 { v5.h }[2], [x21], #0x2\n"
+ "tbz x26, #0, 174f\n"
+ "ld1 { v1.b }[6], [x25]\n"
+ "ld1 { v2.b }[6], [x24]\n"
+ "ld1 { v3.b }[6], [x23]\n"
+ "ld1 { v4.b }[6], [x22]\n"
+ "ld1 { v5.b }[6], [x21]\n"
"b 174f\n"
"171:" // Height 5: Multiply loop: Ragged operand read: partial_1_4
- "tbz x27, #0, 174f\n"
- "ld1 { v1.b }[4], [x26]\n"
- "ld1 { v2.b }[4], [x25]\n"
- "ld1 { v3.b }[4], [x24]\n"
- "ld1 { v4.b }[4], [x23]\n"
- "ld1 { v5.b }[4], [x22]\n"
+ "tbz x26, #0, 174f\n"
+ "ld1 { v1.b }[4], [x25]\n"
+ "ld1 { v2.b }[4], [x24]\n"
+ "ld1 { v3.b }[4], [x23]\n"
+ "ld1 { v4.b }[4], [x22]\n"
+ "ld1 { v5.b }[4], [x21]\n"
"b 174f\n"
"172:" // Height 5: Multiply loop: Ragged operand read: partial_2_0
- "tbz x27, #1, 173f\n"
- "ldr h1, [x26], #0x2\n"
- "ldr h2, [x25], #0x2\n"
- "ldr h3, [x24], #0x2\n"
- "ldr h4, [x23], #0x2\n"
- "ldr h5, [x22], #0x2\n"
- "tbz x27, #0, 174f\n"
- "ld1 { v1.b }[2], [x26]\n"
- "ld1 { v2.b }[2], [x25]\n"
- "ld1 { v3.b }[2], [x24]\n"
- "ld1 { v4.b }[2], [x23]\n"
- "ld1 { v5.b }[2], [x22]\n"
+ "tbz x26, #1, 173f\n"
+ "ldr h1, [x25], #0x2\n"
+ "ldr h2, [x24], #0x2\n"
+ "ldr h3, [x23], #0x2\n"
+ "ldr h4, [x22], #0x2\n"
+ "ldr h5, [x21], #0x2\n"
+ "tbz x26, #0, 174f\n"
+ "ld1 { v1.b }[2], [x25]\n"
+ "ld1 { v2.b }[2], [x24]\n"
+ "ld1 { v3.b }[2], [x23]\n"
+ "ld1 { v4.b }[2], [x22]\n"
+ "ld1 { v5.b }[2], [x21]\n"
"b 174f\n"
"173:" // Height 5: Multiply loop: Ragged operand read: partial_1_0
- "ldr b1, [x26, #0x0]\n"
- "ldr b2, [x25, #0x0]\n"
- "ldr b3, [x24, #0x0]\n"
- "ldr b4, [x23, #0x0]\n"
- "ldr b5, [x22, #0x0]\n"
+ "ldr b1, [x25, #0x0]\n"
+ "ldr b2, [x24, #0x0]\n"
+ "ldr b3, [x23, #0x0]\n"
+ "ldr b4, [x22, #0x0]\n"
+ "ldr b5, [x21, #0x0]\n"
"174:" // Height 5: Multiply loop: Ragged operand read: Done
- "ldr q7, [x10, #0x0]\n"
+ "movi v6.4s, #0x0\n"
+ "ldr q7, [x9, #0x0]\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
"trn1 v4.2d, v5.2d, v6.2d\n"
- "ldr q6, [x10, #0x10]\n"
+ "ldr q6, [x9, #0x10]\n"
".inst 0x6e87a408 // ummla v8.4s, v0.16b, v7.16b\n"
".inst 0x6e87a450 // ummla v16.4s, v2.16b, v7.16b\n"
".inst 0x6e87a498 // ummla v24.4s, v4.16b, v7.16b\n"
- "ldr q7, [x10, #0x20]\n"
+ "ldr q7, [x9, #0x20]\n"
".inst 0x6e86a40c // ummla v12.4s, v0.16b, v6.16b\n"
".inst 0x6e86a454 // ummla v20.4s, v2.16b, v6.16b\n"
".inst 0x6e86a49c // ummla v28.4s, v4.16b, v6.16b\n"
- "ldr q6, [x10, #0x30]\n"
+ "ldr q6, [x9, #0x30]\n"
".inst 0x6e87a409 // ummla v9.4s, v0.16b, v7.16b\n"
".inst 0x6e87a451 // ummla v17.4s, v2.16b, v7.16b\n"
".inst 0x6e87a499 // ummla v25.4s, v4.16b, v7.16b\n"
- "ldr q7, [x10, #0x40]\n"
+ "ldr q7, [x9, #0x40]\n"
".inst 0x6e86a40d // ummla v13.4s, v0.16b, v6.16b\n"
".inst 0x6e86a455 // ummla v21.4s, v2.16b, v6.16b\n"
".inst 0x6e86a49d // ummla v29.4s, v4.16b, v6.16b\n"
- "ldr q6, [x10, #0x50]\n"
+ "ldr q6, [x9, #0x50]\n"
".inst 0x6e87a40a // ummla v10.4s, v0.16b, v7.16b\n"
".inst 0x6e87a452 // ummla v18.4s, v2.16b, v7.16b\n"
".inst 0x6e87a49a // ummla v26.4s, v4.16b, v7.16b\n"
- "ldr q7, [x10, #0x60]\n"
+ "ldr q7, [x9, #0x60]\n"
".inst 0x6e86a40e // ummla v14.4s, v0.16b, v6.16b\n"
".inst 0x6e86a456 // ummla v22.4s, v2.16b, v6.16b\n"
".inst 0x6e86a49e // ummla v30.4s, v4.16b, v6.16b\n"
- "ldr q6, [x10, #0x70]\n"
- "add x10, x10, #0x80\n"
+ "ldr q6, [x9, #0x70]\n"
+ "add x9, x9, #0x80\n"
".inst 0x6e87a40b // ummla v11.4s, v0.16b, v7.16b\n"
".inst 0x6e87a453 // ummla v19.4s, v2.16b, v7.16b\n"
".inst 0x6e87a49b // ummla v27.4s, v4.16b, v7.16b\n"
@@ -2467,30 +2479,30 @@ void a64_hybrid_u8u32_mmla_6x16 (
".inst 0x6e86a457 // ummla v23.4s, v2.16b, v6.16b\n"
".inst 0x6e86a49f // ummla v31.4s, v4.16b, v6.16b\n"
"175:" // Height 5: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 163b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
"uzp1 v7.2d, v8.2d, v12.2d\n"
- "add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp2 v8.2d, v8.2d, v12.2d\n"
+ "prfm pstl1keep, [x28, #0x0]\n"
+ "cmp x10, #0x10\n"
"uzp1 v12.2d, v9.2d, v13.2d\n"
- "cmp x11, #0x10\n"
+ "add x23, x28, x19, LSL #2\n"
"uzp2 v9.2d, v9.2d, v13.2d\n"
+ "prfm pstl1keep, [x23, #0x0]\n"
"uzp1 v13.2d, v10.2d, v14.2d\n"
- "prfm pstl1keep, [x9, #0x0]\n"
+ "add x22, x23, x19, LSL #2\n"
"uzp2 v10.2d, v10.2d, v14.2d\n"
+ "prfm pstl1keep, [x22, #0x0]\n"
+ "add x21, x22, x19, LSL #2\n"
"uzp1 v14.2d, v11.2d, v15.2d\n"
- "prfm pstl1keep, [x24, #0x0]\n"
- "prfm pstl1keep, [x23, #0x0]\n"
+ "prfm pstl1keep, [x21, #0x0]\n"
+ "add x20, x21, x19, LSL #2\n"
"uzp2 v11.2d, v11.2d, v15.2d\n"
+ "prfm pstl1keep, [x20, #0x0]\n"
"uzp1 v15.2d, v16.2d, v20.2d\n"
- "prfm pstl1keep, [x22, #0x0]\n"
- "prfm pstl1keep, [x21, #0x0]\n"
"uzp2 v16.2d, v16.2d, v20.2d\n"
"uzp1 v20.2d, v17.2d, v21.2d\n"
"uzp2 v17.2d, v17.2d, v21.2d\n"
@@ -2503,319 +2515,319 @@ void a64_hybrid_u8u32_mmla_6x16 (
"uzp1 v26.2d, v26.2d, v30.2d\n"
"uzp1 v27.2d, v27.2d, v31.2d\n"
"bge 184f\n"
- "tbz x11, #3, 179f\n"
- "st1 { v7.4s }, [x9], #0x10\n"
- "st1 { v12.4s }, [x9], #0x10\n"
- "st1 { v8.4s }, [x24], #0x10\n"
- "st1 { v9.4s }, [x24], #0x10\n"
- "st1 { v15.4s }, [x23], #0x10\n"
- "st1 { v20.4s }, [x23], #0x10\n"
- "st1 { v16.4s }, [x22], #0x10\n"
- "st1 { v17.4s }, [x22], #0x10\n"
- "st1 { v24.4s }, [x21], #0x10\n"
- "st1 { v25.4s }, [x21], #0x10\n"
- "tbz x11, #2, 177f\n"
- "st1 { v13.4s }, [x9], #0x10\n"
- "st1 { v10.4s }, [x24], #0x10\n"
- "st1 { v21.4s }, [x23], #0x10\n"
- "st1 { v18.4s }, [x22], #0x10\n"
- "st1 { v26.4s }, [x21], #0x10\n"
- "tbz x11, #1, 176f\n"
- "str d14, [x9], #0x8\n"
- "str d11, [x24], #0x8\n"
- "str d22, [x23], #0x8\n"
- "str d19, [x22], #0x8\n"
- "str d27, [x21], #0x8\n"
- "tbz x11, #0, 183f\n"
- "st1 { v14.s }[2], [x9]\n"
- "st1 { v11.s }[2], [x24]\n"
- "st1 { v22.s }[2], [x23]\n"
- "st1 { v19.s }[2], [x22]\n"
- "st1 { v27.s }[2], [x21]\n"
+ "tbz x10, #3, 179f\n"
+ "st1 { v7.4s }, [x28], #0x10\n"
+ "st1 { v12.4s }, [x28], #0x10\n"
+ "st1 { v8.4s }, [x23], #0x10\n"
+ "st1 { v9.4s }, [x23], #0x10\n"
+ "st1 { v15.4s }, [x22], #0x10\n"
+ "st1 { v20.4s }, [x22], #0x10\n"
+ "st1 { v16.4s }, [x21], #0x10\n"
+ "st1 { v17.4s }, [x21], #0x10\n"
+ "st1 { v24.4s }, [x20], #0x10\n"
+ "st1 { v25.4s }, [x20], #0x10\n"
+ "tbz x10, #2, 177f\n"
+ "st1 { v13.4s }, [x28], #0x10\n"
+ "st1 { v10.4s }, [x23], #0x10\n"
+ "st1 { v21.4s }, [x22], #0x10\n"
+ "st1 { v18.4s }, [x21], #0x10\n"
+ "st1 { v26.4s }, [x20], #0x10\n"
+ "tbz x10, #1, 176f\n"
+ "str d14, [x28], #0x8\n"
+ "str d11, [x23], #0x8\n"
+ "str d22, [x22], #0x8\n"
+ "str d19, [x21], #0x8\n"
+ "str d27, [x20], #0x8\n"
+ "tbz x10, #0, 183f\n"
+ "st1 { v14.s }[2], [x28]\n"
+ "st1 { v11.s }[2], [x23]\n"
+ "st1 { v22.s }[2], [x22]\n"
+ "st1 { v19.s }[2], [x21]\n"
+ "st1 { v27.s }[2], [x20]\n"
"b 183f\n"
"176:" // Height 5: Partial direct writeback: partial_1_12
- "tbz x11, #0, 183f\n"
- "str s14, [x9, #0x0]\n"
- "str s11, [x24, #0x0]\n"
- "str s22, [x23, #0x0]\n"
- "str s19, [x22, #0x0]\n"
- "str s27, [x21, #0x0]\n"
+ "tbz x10, #0, 183f\n"
+ "str s14, [x28, #0x0]\n"
+ "str s11, [x23, #0x0]\n"
+ "str s22, [x22, #0x0]\n"
+ "str s19, [x21, #0x0]\n"
+ "str s27, [x20, #0x0]\n"
"b 183f\n"
"177:" // Height 5: Partial direct writeback: partial_2_8
- "tbz x11, #1, 178f\n"
- "str d13, [x9], #0x8\n"
- "str d10, [x24], #0x8\n"
- "str d21, [x23], #0x8\n"
- "str d18, [x22], #0x8\n"
- "str d26, [x21], #0x8\n"
- "tbz x11, #0, 183f\n"
- "st1 { v13.s }[2], [x9]\n"
- "st1 { v10.s }[2], [x24]\n"
- "st1 { v21.s }[2], [x23]\n"
- "st1 { v18.s }[2], [x22]\n"
- "st1 { v26.s }[2], [x21]\n"
+ "tbz x10, #1, 178f\n"
+ "str d13, [x28], #0x8\n"
+ "str d10, [x23], #0x8\n"
+ "str d21, [x22], #0x8\n"
+ "str d18, [x21], #0x8\n"
+ "str d26, [x20], #0x8\n"
+ "tbz x10, #0, 183f\n"
+ "st1 { v13.s }[2], [x28]\n"
+ "st1 { v10.s }[2], [x23]\n"
+ "st1 { v21.s }[2], [x22]\n"
+ "st1 { v18.s }[2], [x21]\n"
+ "st1 { v26.s }[2], [x20]\n"
"b 183f\n"
"178:" // Height 5: Partial direct writeback: partial_1_8
- "tbz x11, #0, 183f\n"
- "str s13, [x9, #0x0]\n"
- "str s10, [x24, #0x0]\n"
- "str s21, [x23, #0x0]\n"
- "str s18, [x22, #0x0]\n"
- "str s26, [x21, #0x0]\n"
+ "tbz x10, #0, 183f\n"
+ "str s13, [x28, #0x0]\n"
+ "str s10, [x23, #0x0]\n"
+ "str s21, [x22, #0x0]\n"
+ "str s18, [x21, #0x0]\n"
+ "str s26, [x20, #0x0]\n"
"b 183f\n"
"179:" // Height 5: Partial direct writeback: partial_4_0
- "tbz x11, #2, 181f\n"
- "st1 { v7.4s }, [x9], #0x10\n"
- "st1 { v8.4s }, [x24], #0x10\n"
- "st1 { v15.4s }, [x23], #0x10\n"
- "st1 { v16.4s }, [x22], #0x10\n"
- "st1 { v24.4s }, [x21], #0x10\n"
- "tbz x11, #1, 180f\n"
- "str d12, [x9], #0x8\n"
- "str d9, [x24], #0x8\n"
- "str d20, [x23], #0x8\n"
- "str d17, [x22], #0x8\n"
- "str d25, [x21], #0x8\n"
- "tbz x11, #0, 183f\n"
- "st1 { v12.s }[2], [x9]\n"
- "st1 { v9.s }[2], [x24]\n"
- "st1 { v20.s }[2], [x23]\n"
- "st1 { v17.s }[2], [x22]\n"
- "st1 { v25.s }[2], [x21]\n"
+ "tbz x10, #2, 181f\n"
+ "st1 { v7.4s }, [x28], #0x10\n"
+ "st1 { v8.4s }, [x23], #0x10\n"
+ "st1 { v15.4s }, [x22], #0x10\n"
+ "st1 { v16.4s }, [x21], #0x10\n"
+ "st1 { v24.4s }, [x20], #0x10\n"
+ "tbz x10, #1, 180f\n"
+ "str d12, [x28], #0x8\n"
+ "str d9, [x23], #0x8\n"
+ "str d20, [x22], #0x8\n"
+ "str d17, [x21], #0x8\n"
+ "str d25, [x20], #0x8\n"
+ "tbz x10, #0, 183f\n"
+ "st1 { v12.s }[2], [x28]\n"
+ "st1 { v9.s }[2], [x23]\n"
+ "st1 { v20.s }[2], [x22]\n"
+ "st1 { v17.s }[2], [x21]\n"
+ "st1 { v25.s }[2], [x20]\n"
"b 183f\n"
"180:" // Height 5: Partial direct writeback: partial_1_4
- "tbz x11, #0, 183f\n"
- "str s12, [x9, #0x0]\n"
- "str s9, [x24, #0x0]\n"
- "str s20, [x23, #0x0]\n"
- "str s17, [x22, #0x0]\n"
- "str s25, [x21, #0x0]\n"
+ "tbz x10, #0, 183f\n"
+ "str s12, [x28, #0x0]\n"
+ "str s9, [x23, #0x0]\n"
+ "str s20, [x22, #0x0]\n"
+ "str s17, [x21, #0x0]\n"
+ "str s25, [x20, #0x0]\n"
"b 183f\n"
"181:" // Height 5: Partial direct writeback: partial_2_0
- "tbz x11, #1, 182f\n"
- "str d7, [x9], #0x8\n"
- "str d8, [x24], #0x8\n"
- "str d15, [x23], #0x8\n"
- "str d16, [x22], #0x8\n"
- "str d24, [x21], #0x8\n"
- "tbz x11, #0, 183f\n"
- "st1 { v7.s }[2], [x9]\n"
- "st1 { v8.s }[2], [x24]\n"
- "st1 { v15.s }[2], [x23]\n"
- "st1 { v16.s }[2], [x22]\n"
- "st1 { v24.s }[2], [x21]\n"
+ "tbz x10, #1, 182f\n"
+ "str d7, [x28], #0x8\n"
+ "str d8, [x23], #0x8\n"
+ "str d15, [x22], #0x8\n"
+ "str d16, [x21], #0x8\n"
+ "str d24, [x20], #0x8\n"
+ "tbz x10, #0, 183f\n"
+ "st1 { v7.s }[2], [x28]\n"
+ "st1 { v8.s }[2], [x23]\n"
+ "st1 { v15.s }[2], [x22]\n"
+ "st1 { v16.s }[2], [x21]\n"
+ "st1 { v24.s }[2], [x20]\n"
"b 183f\n"
"182:" // Height 5: Partial direct writeback: partial_1_0
- "str s7, [x9, #0x0]\n"
- "str s8, [x24, #0x0]\n"
- "str s15, [x23, #0x0]\n"
- "str s16, [x22, #0x0]\n"
- "str s24, [x21, #0x0]\n"
+ "str s7, [x28, #0x0]\n"
+ "str s8, [x23, #0x0]\n"
+ "str s15, [x22, #0x0]\n"
+ "str s16, [x21, #0x0]\n"
+ "str s24, [x20, #0x0]\n"
"183:" // Height 5: Partial direct writeback: Done
"b 185f\n"
"184:" // Height 5: Full writeback
- "str q7, [x9, #0x0]\n"
- "str q12, [x9, #0x10]\n"
- "str q13, [x9, #0x20]\n"
- "str q14, [x9, #0x30]\n"
- "add x9, x9, #0x40\n"
- "str q8, [x24, #0x0]\n"
- "str q9, [x24, #0x10]\n"
- "str q10, [x24, #0x20]\n"
- "str q11, [x24, #0x30]\n"
- "str q15, [x23, #0x0]\n"
- "str q20, [x23, #0x10]\n"
- "str q21, [x23, #0x20]\n"
- "str q22, [x23, #0x30]\n"
- "str q16, [x22, #0x0]\n"
- "str q17, [x22, #0x10]\n"
- "str q18, [x22, #0x20]\n"
- "str q19, [x22, #0x30]\n"
- "str q24, [x21, #0x0]\n"
- "str q25, [x21, #0x10]\n"
- "str q26, [x21, #0x20]\n"
- "str q27, [x21, #0x30]\n"
+ "str q7, [x28, #0x0]\n"
+ "str q12, [x28, #0x10]\n"
+ "str q13, [x28, #0x20]\n"
+ "str q14, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
+ "str q8, [x23, #0x0]\n"
+ "str q9, [x23, #0x10]\n"
+ "str q10, [x23, #0x20]\n"
+ "str q11, [x23, #0x30]\n"
+ "str q15, [x22, #0x0]\n"
+ "str q20, [x22, #0x10]\n"
+ "str q21, [x22, #0x20]\n"
+ "str q22, [x22, #0x30]\n"
+ "str q16, [x21, #0x0]\n"
+ "str q17, [x21, #0x10]\n"
+ "str q18, [x21, #0x20]\n"
+ "str q19, [x21, #0x30]\n"
+ "str q24, [x20, #0x0]\n"
+ "str q25, [x20, #0x10]\n"
+ "str q26, [x20, #0x20]\n"
+ "str q27, [x20, #0x30]\n"
"185:" // Height 5: Writeback done
- "subs x11, x11, #0x10\n"
+ "subs x10, x10, #0x10\n"
"bgt 150b\n"
"b 224f\n"
"186:" // Height 6
- "ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x28, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"mov x20, #0x18\n"
- "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
- "mov x9, %x[output_ptr]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "madd %x[output_ptr], x19, x20, %x[output_ptr]\n"
"187:" // Height 6: Column loop
"tbz %x[flags], #0, 198f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
- "cmp x11, #0x10\n"
- "add x20, x21, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "cmp x10, #0x10\n"
+ "add x23, x28, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
+ "add x20, x21, x19, LSL #2\n"
+ "add x19, x20, x19, LSL #2\n"
"bge 196f\n"
- "tbz x11, #3, 191f\n"
- "ld1 { v9.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x24], #0x10\n"
- "ld1 { v17.4s }, [x23], #0x10\n"
- "ld1 { v20.4s }, [x22], #0x10\n"
- "ld1 { v25.4s }, [x21], #0x10\n"
- "ld1 { v28.4s }, [x20], #0x10\n"
- "ld1 { v10.4s }, [x9], #0x10\n"
- "ld1 { v13.4s }, [x24], #0x10\n"
- "ld1 { v18.4s }, [x23], #0x10\n"
- "ld1 { v21.4s }, [x22], #0x10\n"
- "ld1 { v26.4s }, [x21], #0x10\n"
- "ld1 { v29.4s }, [x20], #0x10\n"
- "tbz x11, #2, 189f\n"
- "ld1 { v11.4s }, [x9], #0x10\n"
- "ld1 { v14.4s }, [x24], #0x10\n"
- "ld1 { v19.4s }, [x23], #0x10\n"
- "ld1 { v22.4s }, [x22], #0x10\n"
- "ld1 { v27.4s }, [x21], #0x10\n"
- "ld1 { v30.4s }, [x20], #0x10\n"
- "tbz x11, #1, 188f\n"
- "ldr d16, [x9], #0x8\n"
- "ldr d15, [x24], #0x8\n"
- "mov x25, #0x38\n"
- "ldr d24, [x23], #0x8\n"
- "ldr d23, [x22], #0x8\n"
- "ldr d6, [x21], #0x8\n"
- "ldr d31, [x20], #0x8\n"
- "tbz x11, #0, 195f\n"
- "ld1 { v16.s }[2], [x9]\n"
- "ld1 { v15.s }[2], [x24]\n"
- "ld1 { v24.s }[2], [x23]\n"
- "ld1 { v23.s }[2], [x22]\n"
- "ld1 { v6.s }[2], [x21]\n"
- "ld1 { v31.s }[2], [x20]\n"
+ "tbz x10, #3, 191f\n"
+ "ld1 { v9.4s }, [x28], #0x10\n"
+ "ld1 { v12.4s }, [x23], #0x10\n"
+ "ld1 { v17.4s }, [x22], #0x10\n"
+ "ld1 { v20.4s }, [x21], #0x10\n"
+ "ld1 { v25.4s }, [x20], #0x10\n"
+ "ld1 { v10.4s }, [x28], #0x10\n"
+ "ld1 { v13.4s }, [x23], #0x10\n"
+ "ld1 { v18.4s }, [x22], #0x10\n"
+ "ld1 { v21.4s }, [x21], #0x10\n"
+ "ld1 { v26.4s }, [x20], #0x10\n"
+ "ld1 { v28.4s }, [x19], #0x10\n"
+ "ld1 { v29.4s }, [x19], #0x10\n"
+ "tbz x10, #2, 189f\n"
+ "ld1 { v11.4s }, [x28], #0x10\n"
+ "ld1 { v14.4s }, [x23], #0x10\n"
+ "ld1 { v19.4s }, [x22], #0x10\n"
+ "ld1 { v22.4s }, [x21], #0x10\n"
+ "ld1 { v27.4s }, [x20], #0x10\n"
+ "ld1 { v30.4s }, [x19], #0x10\n"
+ "tbz x10, #1, 188f\n"
+ "ldr d16, [x28], #0x8\n"
+ "mov x24, #0x38\n"
+ "ldr d15, [x23], #0x8\n"
+ "ldr d24, [x22], #0x8\n"
+ "ldr d23, [x21], #0x8\n"
+ "ldr d6, [x20], #0x8\n"
+ "ldr d31, [x19], #0x8\n"
+ "tbz x10, #0, 195f\n"
+ "ld1 { v16.s }[2], [x28]\n"
+ "ld1 { v15.s }[2], [x23]\n"
+ "ld1 { v24.s }[2], [x22]\n"
+ "ld1 { v23.s }[2], [x21]\n"
+ "ld1 { v6.s }[2], [x20]\n"
+ "ld1 { v31.s }[2], [x19]\n"
"b 195f\n"
"188:" // Height 6: Partial accumulate: partial_1_12
- "mov x25, #0x30\n"
- "tbz x11, #0, 195f\n"
- "ldr s16, [x9, #0x0]\n"
- "ldr s15, [x24, #0x0]\n"
- "ldr s24, [x23, #0x0]\n"
- "ldr s23, [x22, #0x0]\n"
- "ldr s6, [x21, #0x0]\n"
- "ldr s31, [x20, #0x0]\n"
+ "mov x24, #0x30\n"
+ "tbz x10, #0, 195f\n"
+ "ldr s16, [x28, #0x0]\n"
+ "ldr s15, [x23, #0x0]\n"
+ "ldr s24, [x22, #0x0]\n"
+ "ldr s23, [x21, #0x0]\n"
+ "ldr s6, [x20, #0x0]\n"
+ "ldr s31, [x19, #0x0]\n"
"b 195f\n"
"189:" // Height 6: Partial accumulate: partial_2_8
- "tbz x11, #1, 190f\n"
- "ldr d11, [x9], #0x8\n"
- "ldr d14, [x24], #0x8\n"
- "mov x25, #0x28\n"
- "ldr d19, [x23], #0x8\n"
- "ldr d22, [x22], #0x8\n"
- "ldr d27, [x21], #0x8\n"
- "ldr d30, [x20], #0x8\n"
- "tbz x11, #0, 195f\n"
- "ld1 { v11.s }[2], [x9]\n"
- "ld1 { v14.s }[2], [x24]\n"
- "ld1 { v19.s }[2], [x23]\n"
- "ld1 { v22.s }[2], [x22]\n"
- "ld1 { v27.s }[2], [x21]\n"
- "ld1 { v30.s }[2], [x20]\n"
+ "tbz x10, #1, 190f\n"
+ "ldr d11, [x28], #0x8\n"
+ "ldr d14, [x23], #0x8\n"
+ "mov x24, #0x28\n"
+ "ldr d19, [x22], #0x8\n"
+ "ldr d22, [x21], #0x8\n"
+ "ldr d27, [x20], #0x8\n"
+ "ldr d30, [x19], #0x8\n"
+ "tbz x10, #0, 195f\n"
+ "ld1 { v11.s }[2], [x28]\n"
+ "ld1 { v14.s }[2], [x23]\n"
+ "ld1 { v19.s }[2], [x22]\n"
+ "ld1 { v22.s }[2], [x21]\n"
+ "ld1 { v27.s }[2], [x20]\n"
+ "ld1 { v30.s }[2], [x19]\n"
"b 195f\n"
"190:" // Height 6: Partial accumulate: partial_1_8
- "mov x25, #0x20\n"
- "tbz x11, #0, 195f\n"
- "ldr s11, [x9, #0x0]\n"
- "ldr s14, [x24, #0x0]\n"
- "ldr s19, [x23, #0x0]\n"
- "ldr s22, [x22, #0x0]\n"
- "ldr s27, [x21, #0x0]\n"
- "ldr s30, [x20, #0x0]\n"
+ "mov x24, #0x20\n"
+ "tbz x10, #0, 195f\n"
+ "ldr s11, [x28, #0x0]\n"
+ "ldr s14, [x23, #0x0]\n"
+ "ldr s19, [x22, #0x0]\n"
+ "ldr s22, [x21, #0x0]\n"
+ "ldr s27, [x20, #0x0]\n"
+ "ldr s30, [x19, #0x0]\n"
"b 195f\n"
"191:" // Height 6: Partial accumulate: partial_4_0
- "tbz x11, #2, 193f\n"
- "ld1 { v9.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x24], #0x10\n"
- "ld1 { v17.4s }, [x23], #0x10\n"
- "ld1 { v20.4s }, [x22], #0x10\n"
- "ld1 { v25.4s }, [x21], #0x10\n"
- "ld1 { v28.4s }, [x20], #0x10\n"
- "tbz x11, #1, 192f\n"
- "ldr d10, [x9], #0x8\n"
- "ldr d13, [x24], #0x8\n"
- "mov x25, #0x18\n"
- "ldr d18, [x23], #0x8\n"
- "ldr d21, [x22], #0x8\n"
- "ldr d26, [x21], #0x8\n"
- "ldr d29, [x20], #0x8\n"
- "tbz x11, #0, 195f\n"
- "ld1 { v10.s }[2], [x9]\n"
- "ld1 { v13.s }[2], [x24]\n"
- "ld1 { v18.s }[2], [x23]\n"
- "ld1 { v21.s }[2], [x22]\n"
- "ld1 { v26.s }[2], [x21]\n"
- "ld1 { v29.s }[2], [x20]\n"
+ "tbz x10, #2, 193f\n"
+ "ld1 { v9.4s }, [x28], #0x10\n"
+ "ld1 { v12.4s }, [x23], #0x10\n"
+ "ld1 { v17.4s }, [x22], #0x10\n"
+ "ld1 { v20.4s }, [x21], #0x10\n"
+ "ld1 { v25.4s }, [x20], #0x10\n"
+ "ld1 { v28.4s }, [x19], #0x10\n"
+ "tbz x10, #1, 192f\n"
+ "ldr d10, [x28], #0x8\n"
+ "mov x24, #0x18\n"
+ "ldr d13, [x23], #0x8\n"
+ "ldr d18, [x22], #0x8\n"
+ "ldr d21, [x21], #0x8\n"
+ "ldr d26, [x20], #0x8\n"
+ "ldr d29, [x19], #0x8\n"
+ "tbz x10, #0, 195f\n"
+ "ld1 { v10.s }[2], [x28]\n"
+ "ld1 { v13.s }[2], [x23]\n"
+ "ld1 { v18.s }[2], [x22]\n"
+ "ld1 { v21.s }[2], [x21]\n"
+ "ld1 { v26.s }[2], [x20]\n"
+ "ld1 { v29.s }[2], [x19]\n"
"b 195f\n"
"192:" // Height 6: Partial accumulate: partial_1_4
- "mov x25, #0x10\n"
- "tbz x11, #0, 195f\n"
- "ldr s10, [x9, #0x0]\n"
- "ldr s13, [x24, #0x0]\n"
- "ldr s18, [x23, #0x0]\n"
- "ldr s21, [x22, #0x0]\n"
- "ldr s26, [x21, #0x0]\n"
- "ldr s29, [x20, #0x0]\n"
+ "mov x24, #0x10\n"
+ "tbz x10, #0, 195f\n"
+ "ldr s10, [x28, #0x0]\n"
+ "ldr s13, [x23, #0x0]\n"
+ "ldr s18, [x22, #0x0]\n"
+ "ldr s21, [x21, #0x0]\n"
+ "ldr s26, [x20, #0x0]\n"
+ "ldr s29, [x19, #0x0]\n"
"b 195f\n"
"193:" // Height 6: Partial accumulate: partial_2_0
- "tbz x11, #1, 194f\n"
- "ldr d9, [x9], #0x8\n"
- "ldr d12, [x24], #0x8\n"
- "mov x25, #0x8\n"
- "ldr d17, [x23], #0x8\n"
- "ldr d20, [x22], #0x8\n"
- "ldr d25, [x21], #0x8\n"
- "ldr d28, [x20], #0x8\n"
- "tbz x11, #0, 195f\n"
- "ld1 { v9.s }[2], [x9]\n"
- "ld1 { v12.s }[2], [x24]\n"
- "ld1 { v17.s }[2], [x23]\n"
- "ld1 { v20.s }[2], [x22]\n"
- "ld1 { v25.s }[2], [x21]\n"
- "ld1 { v28.s }[2], [x20]\n"
+ "tbz x10, #1, 194f\n"
+ "ldr d9, [x28], #0x8\n"
+ "ldr d12, [x23], #0x8\n"
+ "mov x24, #0x8\n"
+ "ldr d17, [x22], #0x8\n"
+ "ldr d20, [x21], #0x8\n"
+ "ldr d25, [x20], #0x8\n"
+ "ldr d28, [x19], #0x8\n"
+ "tbz x10, #0, 195f\n"
+ "ld1 { v9.s }[2], [x28]\n"
+ "ld1 { v12.s }[2], [x23]\n"
+ "ld1 { v17.s }[2], [x22]\n"
+ "ld1 { v20.s }[2], [x21]\n"
+ "ld1 { v25.s }[2], [x20]\n"
+ "ld1 { v28.s }[2], [x19]\n"
"b 195f\n"
"194:" // Height 6: Partial accumulate: partial_1_0
- "ldr s9, [x9, #0x0]\n"
- "ldr s12, [x24, #0x0]\n"
- "mov x25, #0x0\n"
- "ldr s17, [x23, #0x0]\n"
- "ldr s20, [x22, #0x0]\n"
- "ldr s25, [x21, #0x0]\n"
- "ldr s28, [x20, #0x0]\n"
+ "ldr s9, [x28, #0x0]\n"
+ "mov x24, #0x0\n"
+ "ldr s12, [x23, #0x0]\n"
+ "ldr s17, [x22, #0x0]\n"
+ "ldr s20, [x21, #0x0]\n"
+ "ldr s25, [x20, #0x0]\n"
+ "ldr s28, [x19, #0x0]\n"
"195:" // Height 6: Partial accumulate: Done
- "sub x9, x9, x25\n"
+ "sub x28, x28, x24\n"
"b 197f\n"
"196:" // Height 6: full accumulate
- "ldr q9, [x9, #0x0]\n"
- "ldr q10, [x9, #0x10]\n"
- "ldr q11, [x9, #0x20]\n"
- "ldr q16, [x9, #0x30]\n"
- "ldr q12, [x24, #0x0]\n"
- "ldr q13, [x24, #0x10]\n"
- "ldr q14, [x24, #0x20]\n"
- "ldr q15, [x24, #0x30]\n"
- "ldr q17, [x23, #0x0]\n"
- "ldr q18, [x23, #0x10]\n"
- "ldr q19, [x23, #0x20]\n"
- "ldr q24, [x23, #0x30]\n"
- "ldr q20, [x22, #0x0]\n"
- "ldr q21, [x22, #0x10]\n"
- "ldr q22, [x22, #0x20]\n"
- "ldr q23, [x22, #0x30]\n"
- "ldr q25, [x21, #0x0]\n"
- "ldr q26, [x21, #0x10]\n"
- "ldr q27, [x21, #0x20]\n"
- "ldr q6, [x21, #0x30]\n"
- "ldr q28, [x20, #0x0]\n"
- "ldr q29, [x20, #0x10]\n"
- "ldr q30, [x20, #0x20]\n"
- "ldr q31, [x20, #0x30]\n"
+ "ldr q9, [x28, #0x0]\n"
+ "ldr q10, [x28, #0x10]\n"
+ "ldr q11, [x28, #0x20]\n"
+ "ldr q16, [x28, #0x30]\n"
+ "ldr q12, [x23, #0x0]\n"
+ "ldr q13, [x23, #0x10]\n"
+ "ldr q14, [x23, #0x20]\n"
+ "ldr q15, [x23, #0x30]\n"
+ "ldr q17, [x22, #0x0]\n"
+ "ldr q18, [x22, #0x10]\n"
+ "ldr q19, [x22, #0x20]\n"
+ "ldr q24, [x22, #0x30]\n"
+ "ldr q20, [x21, #0x0]\n"
+ "ldr q21, [x21, #0x10]\n"
+ "ldr q22, [x21, #0x20]\n"
+ "ldr q23, [x21, #0x30]\n"
+ "ldr q25, [x20, #0x0]\n"
+ "ldr q26, [x20, #0x10]\n"
+ "ldr q27, [x20, #0x20]\n"
+ "ldr q6, [x20, #0x30]\n"
+ "ldr q28, [x19, #0x0]\n"
+ "ldr q29, [x19, #0x10]\n"
+ "ldr q30, [x19, #0x20]\n"
+ "ldr q31, [x19, #0x30]\n"
"197:" // Height 6: MMLA fixup
"zip1 v8.2d, v9.2d, v12.2d\n"
"zip2 v12.2d, v9.2d, v12.2d\n"
@@ -2868,219 +2880,219 @@ void a64_hybrid_u8u32_mmla_6x16 (
"movi v30.4s, #0x0\n"
"movi v31.4s, #0x0\n"
"199:" // Height 6: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"200:" // Height 6: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 201f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "ldr x22, [x21, #0x20]\n"
- "ldr x21, [x21, #0x28]\n"
- "cbnz x28, 202f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20\n"
- "add x25, x25, x20\n"
- "add x24, x24, x20\n"
- "add x23, x23, x20\n"
- "add x22, x22, x20\n"
- "add x21, x21, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "ldr x21, [x20, #0x20]\n"
+ "ldr x20, [x20, #0x28]\n"
+ "cbnz x27, 202f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
+ "add x24, x24, x19\n"
+ "add x23, x23, x19\n"
+ "add x22, x22, x19\n"
+ "add x21, x21, x19\n"
+ "add x20, x20, x19\n"
"b 202f\n"
"201:" // Height 6: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20\n"
- "add x24, x25, x20\n"
- "add x23, x24, x20\n"
- "add x22, x23, x20\n"
- "add x21, x22, x20\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19\n"
+ "add x23, x24, x19\n"
+ "add x22, x23, x19\n"
+ "add x21, x22, x19\n"
+ "add x20, x21, x19\n"
"202:" // Height 6: input setup done
- "cmp x27, #0x10\n"
+ "cmp x26, #0x10\n"
"blt 205f\n"
- "ldr q1, [x26, #0x0]\n"
- "ldr q2, [x25, #0x0]\n"
- "cmp x27, #0x20\n"
- "ldr q3, [x24, #0x0]\n"
- "ldr q4, [x23, #0x0]\n"
- "ldr q5, [x22, #0x0]\n"
- "ldr q6, [x21, #0x0]\n"
- "ldr q7, [x10, #0x0]\n"
+ "ldr q1, [x25, #0x0]\n"
+ "ldr q2, [x24, #0x0]\n"
+ "cmp x26, #0x20\n"
"blt 204f\n"
"203:" // Height 6: Multiply loop: Main loop head
"trn1 v0.2d, v1.2d, v2.2d\n"
+ "ldr q3, [x23, #0x0]\n"
+ "add x25, x25, #0x10\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
- ".inst 0x6e87a408 // ummla v8.4s, v0.16b, v7.16b\n"
- "sub x27, x27, #0x10\n"
+ "ldr q4, [x22, #0x0]\n"
+ "add x24, x24, #0x10\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
+ "ldr q5, [x21, #0x0]\n"
+ "add x23, x23, #0x10\n"
"trn2 v3.2d, v3.2d, v4.2d\n"
- ".inst 0x6e87a450 // ummla v16.4s, v2.16b, v7.16b\n"
- "add x26, x26, #0x10\n"
+ "ldr q6, [x20, #0x0]\n"
+ "add x22, x22, #0x10\n"
"trn1 v4.2d, v5.2d, v6.2d\n"
+ "ldr q7, [x9, #0x0]\n"
+ "add x21, x21, #0x10\n"
"trn2 v5.2d, v5.2d, v6.2d\n"
- "ldr q6, [x10, #0x10]\n"
+ "ldr q6, [x9, #0x10]\n"
+ "add x20, x20, #0x10\n"
+ ".inst 0x6e87a408 // ummla v8.4s, v0.16b, v7.16b\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "sub x26, x26, #0x10\n"
+ ".inst 0x6e87a450 // ummla v16.4s, v2.16b, v7.16b\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "cmp x26, #0x20\n"
".inst 0x6e87a498 // ummla v24.4s, v4.16b, v7.16b\n"
- "ldr q7, [x10, #0x20]\n"
+ "ldr q7, [x9, #0x20]\n"
".inst 0x6e86a40c // ummla v12.4s, v0.16b, v6.16b\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x6e86a454 // ummla v20.4s, v2.16b, v6.16b\n"
- "add x25, x25, #0x10\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x6e86a49c // ummla v28.4s, v4.16b, v6.16b\n"
- "ldr q6, [x10, #0x30]\n"
+ "ldr q6, [x9, #0x30]\n"
+ "prfm pldl1keep, [x21, #0x80]\n"
".inst 0x6e87a409 // ummla v9.4s, v0.16b, v7.16b\n"
- "add x24, x24, #0x10\n"
".inst 0x6e87a451 // ummla v17.4s, v2.16b, v7.16b\n"
+ "prfm pldl1keep, [x20, #0x80]\n"
".inst 0x6e87a499 // ummla v25.4s, v4.16b, v7.16b\n"
- "ldr q7, [x10, #0x40]\n"
- "add x23, x23, #0x10\n"
+ "ldr q7, [x9, #0x40]\n"
".inst 0x6e86a40d // ummla v13.4s, v0.16b, v6.16b\n"
".inst 0x6e86a455 // ummla v21.4s, v2.16b, v6.16b\n"
- "add x22, x22, #0x10\n"
- "add x21, x21, #0x10\n"
".inst 0x6e86a49d // ummla v29.4s, v4.16b, v6.16b\n"
- "ldr q6, [x10, #0x50]\n"
+ "ldr q6, [x9, #0x50]\n"
".inst 0x6e87a40a // ummla v10.4s, v0.16b, v7.16b\n"
- "cmp x27, #0x20\n"
".inst 0x6e87a452 // ummla v18.4s, v2.16b, v7.16b\n"
".inst 0x6e87a49a // ummla v26.4s, v4.16b, v7.16b\n"
- "ldr q7, [x10, #0x60]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
+ "ldr q7, [x9, #0x60]\n"
".inst 0x6e86a40e // ummla v14.4s, v0.16b, v6.16b\n"
".inst 0x6e86a456 // ummla v22.4s, v2.16b, v6.16b\n"
- "prfm pldl1keep, [x25, #0x80]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x6e86a49e // ummla v30.4s, v4.16b, v6.16b\n"
- "ldr q6, [x10, #0x70]\n"
+ "ldr q6, [x9, #0x70]\n"
".inst 0x6e87a40b // ummla v11.4s, v0.16b, v7.16b\n"
- "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x6e87a453 // ummla v19.4s, v2.16b, v7.16b\n"
".inst 0x6e87a49b // ummla v27.4s, v4.16b, v7.16b\n"
- "ldr q7, [x10, #0x80]\n"
- "prfm pldl1keep, [x22, #0x80]\n"
+ "ldr q7, [x9, #0x80]\n"
".inst 0x6e86a40f // ummla v15.4s, v0.16b, v6.16b\n"
".inst 0x6e86a457 // ummla v23.4s, v2.16b, v6.16b\n"
- "ldr q2, [x25, #0x0]\n"
- "prfm pldl1keep, [x21, #0x80]\n"
+ "ldr q2, [x24, #0x0]\n"
".inst 0x6e86a49f // ummla v31.4s, v4.16b, v6.16b\n"
- "ldr q6, [x10, #0x90]\n"
- "ldr q4, [x23, #0x0]\n"
+ "ldr q6, [x9, #0x90]\n"
".inst 0x6e87a428 // ummla v8.4s, v1.16b, v7.16b\n"
".inst 0x6e87a470 // ummla v16.4s, v3.16b, v7.16b\n"
".inst 0x6e87a4b8 // ummla v24.4s, v5.16b, v7.16b\n"
- "ldr q7, [x10, #0xa0]\n"
+ "ldr q7, [x9, #0xa0]\n"
".inst 0x6e86a42c // ummla v12.4s, v1.16b, v6.16b\n"
".inst 0x6e86a474 // ummla v20.4s, v3.16b, v6.16b\n"
".inst 0x6e86a4bc // ummla v28.4s, v5.16b, v6.16b\n"
- "ldr q6, [x10, #0xb0]\n"
+ "ldr q6, [x9, #0xb0]\n"
".inst 0x6e87a429 // ummla v9.4s, v1.16b, v7.16b\n"
".inst 0x6e87a471 // ummla v17.4s, v3.16b, v7.16b\n"
".inst 0x6e87a4b9 // ummla v25.4s, v5.16b, v7.16b\n"
- "ldr q7, [x10, #0xc0]\n"
+ "ldr q7, [x9, #0xc0]\n"
".inst 0x6e86a42d // ummla v13.4s, v1.16b, v6.16b\n"
".inst 0x6e86a475 // ummla v21.4s, v3.16b, v6.16b\n"
".inst 0x6e86a4bd // ummla v29.4s, v5.16b, v6.16b\n"
- "ldr q6, [x10, #0xd0]\n"
+ "ldr q6, [x9, #0xd0]\n"
".inst 0x6e87a42a // ummla v10.4s, v1.16b, v7.16b\n"
".inst 0x6e87a472 // ummla v18.4s, v3.16b, v7.16b\n"
".inst 0x6e87a4ba // ummla v26.4s, v5.16b, v7.16b\n"
- "ldr q7, [x10, #0xe0]\n"
+ "ldr q7, [x9, #0xe0]\n"
".inst 0x6e86a42e // ummla v14.4s, v1.16b, v6.16b\n"
".inst 0x6e86a476 // ummla v22.4s, v3.16b, v6.16b\n"
".inst 0x6e86a4be // ummla v30.4s, v5.16b, v6.16b\n"
- "ldr q6, [x10, #0xf0]\n"
- "add x10, x10, #0x100\n"
+ "ldr q6, [x9, #0xf0]\n"
+ "add x9, x9, #0x100\n"
".inst 0x6e87a42b // ummla v11.4s, v1.16b, v7.16b\n"
".inst 0x6e87a473 // ummla v19.4s, v3.16b, v7.16b\n"
".inst 0x6e87a4bb // ummla v27.4s, v5.16b, v7.16b\n"
- "ldr q7, [x10, #0x0]\n"
".inst 0x6e86a42f // ummla v15.4s, v1.16b, v6.16b\n"
- "ldr q1, [x26, #0x0]\n"
+ "ldr q1, [x25, #0x0]\n"
".inst 0x6e86a477 // ummla v23.4s, v3.16b, v6.16b\n"
- "ldr q3, [x24, #0x0]\n"
".inst 0x6e86a4bf // ummla v31.4s, v5.16b, v6.16b\n"
- "ldr q5, [x22, #0x0]\n"
- "ldr q6, [x21, #0x0]\n"
"bge 203b\n"
"204:" // Height 6: Multiply loop: Single iteration only
"trn1 v0.2d, v1.2d, v2.2d\n"
+ "ldr q3, [x23, #0x0]\n"
+ "sub x26, x26, #0x10\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
- ".inst 0x6e87a408 // ummla v8.4s, v0.16b, v7.16b\n"
- "add x26, x26, #0x10\n"
+ "ldr q4, [x22, #0x0]\n"
+ "add x25, x25, #0x10\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
+ "ldr q5, [x21, #0x0]\n"
+ "add x24, x24, #0x10\n"
"trn2 v3.2d, v3.2d, v4.2d\n"
- ".inst 0x6e87a450 // ummla v16.4s, v2.16b, v7.16b\n"
- "add x25, x25, #0x10\n"
+ "ldr q6, [x20, #0x0]\n"
+ "add x23, x23, #0x10\n"
"trn1 v4.2d, v5.2d, v6.2d\n"
+ "ldr q7, [x9, #0x0]\n"
+ "add x22, x22, #0x10\n"
"trn2 v5.2d, v5.2d, v6.2d\n"
- "ldr q6, [x10, #0x10]\n"
+ "ldr q6, [x9, #0x10]\n"
+ "add x21, x21, #0x10\n"
+ ".inst 0x6e87a408 // ummla v8.4s, v0.16b, v7.16b\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "add x20, x20, #0x10\n"
+ ".inst 0x6e87a450 // ummla v16.4s, v2.16b, v7.16b\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x6e87a498 // ummla v24.4s, v4.16b, v7.16b\n"
- "ldr q7, [x10, #0x20]\n"
+ "ldr q7, [x9, #0x20]\n"
".inst 0x6e86a40c // ummla v12.4s, v0.16b, v6.16b\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x6e86a454 // ummla v20.4s, v2.16b, v6.16b\n"
- "add x24, x24, #0x10\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x6e86a49c // ummla v28.4s, v4.16b, v6.16b\n"
- "ldr q6, [x10, #0x30]\n"
+ "ldr q6, [x9, #0x30]\n"
+ "prfm pldl1keep, [x21, #0x80]\n"
".inst 0x6e87a409 // ummla v9.4s, v0.16b, v7.16b\n"
- "add x23, x23, #0x10\n"
".inst 0x6e87a451 // ummla v17.4s, v2.16b, v7.16b\n"
+ "prfm pldl1keep, [x20, #0x80]\n"
".inst 0x6e87a499 // ummla v25.4s, v4.16b, v7.16b\n"
- "ldr q7, [x10, #0x40]\n"
- "add x22, x22, #0x10\n"
+ "ldr q7, [x9, #0x40]\n"
".inst 0x6e86a40d // ummla v13.4s, v0.16b, v6.16b\n"
".inst 0x6e86a455 // ummla v21.4s, v2.16b, v6.16b\n"
- "add x21, x21, #0x10\n"
- "sub x27, x27, #0x10\n"
".inst 0x6e86a49d // ummla v29.4s, v4.16b, v6.16b\n"
- "ldr q6, [x10, #0x50]\n"
+ "ldr q6, [x9, #0x50]\n"
".inst 0x6e87a40a // ummla v10.4s, v0.16b, v7.16b\n"
- "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x6e87a452 // ummla v18.4s, v2.16b, v7.16b\n"
".inst 0x6e87a49a // ummla v26.4s, v4.16b, v7.16b\n"
- "ldr q7, [x10, #0x60]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
+ "ldr q7, [x9, #0x60]\n"
".inst 0x6e86a40e // ummla v14.4s, v0.16b, v6.16b\n"
".inst 0x6e86a456 // ummla v22.4s, v2.16b, v6.16b\n"
- "prfm pldl1keep, [x24, #0x80]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x6e86a49e // ummla v30.4s, v4.16b, v6.16b\n"
- "ldr q6, [x10, #0x70]\n"
+ "ldr q6, [x9, #0x70]\n"
".inst 0x6e87a40b // ummla v11.4s, v0.16b, v7.16b\n"
- "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x6e87a453 // ummla v19.4s, v2.16b, v7.16b\n"
".inst 0x6e87a49b // ummla v27.4s, v4.16b, v7.16b\n"
- "ldr q7, [x10, #0x80]\n"
- "prfm pldl1keep, [x21, #0x80]\n"
+ "ldr q7, [x9, #0x80]\n"
".inst 0x6e86a40f // ummla v15.4s, v0.16b, v6.16b\n"
".inst 0x6e86a457 // ummla v23.4s, v2.16b, v6.16b\n"
".inst 0x6e86a49f // ummla v31.4s, v4.16b, v6.16b\n"
- "ldr q6, [x10, #0x90]\n"
+ "ldr q6, [x9, #0x90]\n"
".inst 0x6e87a428 // ummla v8.4s, v1.16b, v7.16b\n"
".inst 0x6e87a470 // ummla v16.4s, v3.16b, v7.16b\n"
".inst 0x6e87a4b8 // ummla v24.4s, v5.16b, v7.16b\n"
- "ldr q7, [x10, #0xa0]\n"
+ "ldr q7, [x9, #0xa0]\n"
".inst 0x6e86a42c // ummla v12.4s, v1.16b, v6.16b\n"
".inst 0x6e86a474 // ummla v20.4s, v3.16b, v6.16b\n"
".inst 0x6e86a4bc // ummla v28.4s, v5.16b, v6.16b\n"
- "ldr q6, [x10, #0xb0]\n"
+ "ldr q6, [x9, #0xb0]\n"
".inst 0x6e87a429 // ummla v9.4s, v1.16b, v7.16b\n"
".inst 0x6e87a471 // ummla v17.4s, v3.16b, v7.16b\n"
".inst 0x6e87a4b9 // ummla v25.4s, v5.16b, v7.16b\n"
- "ldr q7, [x10, #0xc0]\n"
+ "ldr q7, [x9, #0xc0]\n"
".inst 0x6e86a42d // ummla v13.4s, v1.16b, v6.16b\n"
".inst 0x6e86a475 // ummla v21.4s, v3.16b, v6.16b\n"
".inst 0x6e86a4bd // ummla v29.4s, v5.16b, v6.16b\n"
- "ldr q6, [x10, #0xd0]\n"
+ "ldr q6, [x9, #0xd0]\n"
".inst 0x6e87a42a // ummla v10.4s, v1.16b, v7.16b\n"
".inst 0x6e87a472 // ummla v18.4s, v3.16b, v7.16b\n"
".inst 0x6e87a4ba // ummla v26.4s, v5.16b, v7.16b\n"
- "ldr q7, [x10, #0xe0]\n"
+ "ldr q7, [x9, #0xe0]\n"
".inst 0x6e86a42e // ummla v14.4s, v1.16b, v6.16b\n"
".inst 0x6e86a476 // ummla v22.4s, v3.16b, v6.16b\n"
".inst 0x6e86a4be // ummla v30.4s, v5.16b, v6.16b\n"
- "ldr q6, [x10, #0xf0]\n"
- "add x10, x10, #0x100\n"
+ "ldr q6, [x9, #0xf0]\n"
+ "add x9, x9, #0x100\n"
".inst 0x6e87a42b // ummla v11.4s, v1.16b, v7.16b\n"
".inst 0x6e87a473 // ummla v19.4s, v3.16b, v7.16b\n"
".inst 0x6e87a4bb // ummla v27.4s, v5.16b, v7.16b\n"
@@ -3088,48 +3100,48 @@ void a64_hybrid_u8u32_mmla_6x16 (
".inst 0x6e86a477 // ummla v23.4s, v3.16b, v6.16b\n"
".inst 0x6e86a4bf // ummla v31.4s, v5.16b, v6.16b\n"
"205:" // Height 6: Multiply loop: Main loop skip
- "cbz x27, 212f\n"
- "cmp x27, #0x8\n"
+ "cbz x26, 212f\n"
+ "cmp x26, #0x8\n"
"blt 207f\n"
"206:" // Height 6: Multiply loop: Odd block loop
- "ldr d1, [x26], #0x8\n"
- "ldr d2, [x25], #0x8\n"
+ "ldr d1, [x25], #0x8\n"
+ "sub x26, x26, #0x8\n"
+ "ldr d2, [x24], #0x8\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
- "sub x27, x27, #0x8\n"
- "ldr d3, [x24], #0x8\n"
- "ldr d4, [x23], #0x8\n"
+ "ldr d3, [x23], #0x8\n"
+ "cmp x26, #0x8\n"
+ "ldr d4, [x22], #0x8\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
- "cmp x27, #0x8\n"
- "ldr d5, [x22], #0x8\n"
- "ldr d7, [x21], #0x8\n"
+ "ldr d5, [x21], #0x8\n"
+ "ldr d7, [x20], #0x8\n"
"trn1 v4.2d, v5.2d, v7.2d\n"
- "ldr q6, [x10, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
+ "ldr q6, [x9, #0x0]\n"
+ "ldr q7, [x9, #0x10]\n"
".inst 0x6e86a408 // ummla v8.4s, v0.16b, v6.16b\n"
".inst 0x6e86a450 // ummla v16.4s, v2.16b, v6.16b\n"
".inst 0x6e86a498 // ummla v24.4s, v4.16b, v6.16b\n"
- "ldr q6, [x10, #0x20]\n"
+ "ldr q6, [x9, #0x20]\n"
".inst 0x6e87a40c // ummla v12.4s, v0.16b, v7.16b\n"
".inst 0x6e87a454 // ummla v20.4s, v2.16b, v7.16b\n"
".inst 0x6e87a49c // ummla v28.4s, v4.16b, v7.16b\n"
- "ldr q7, [x10, #0x30]\n"
+ "ldr q7, [x9, #0x30]\n"
".inst 0x6e86a409 // ummla v9.4s, v0.16b, v6.16b\n"
".inst 0x6e86a451 // ummla v17.4s, v2.16b, v6.16b\n"
".inst 0x6e86a499 // ummla v25.4s, v4.16b, v6.16b\n"
- "ldr q6, [x10, #0x40]\n"
+ "ldr q6, [x9, #0x40]\n"
".inst 0x6e87a40d // ummla v13.4s, v0.16b, v7.16b\n"
".inst 0x6e87a455 // ummla v21.4s, v2.16b, v7.16b\n"
".inst 0x6e87a49d // ummla v29.4s, v4.16b, v7.16b\n"
- "ldr q7, [x10, #0x50]\n"
+ "ldr q7, [x9, #0x50]\n"
".inst 0x6e86a40a // ummla v10.4s, v0.16b, v6.16b\n"
".inst 0x6e86a452 // ummla v18.4s, v2.16b, v6.16b\n"
".inst 0x6e86a49a // ummla v26.4s, v4.16b, v6.16b\n"
- "ldr q6, [x10, #0x60]\n"
+ "ldr q6, [x9, #0x60]\n"
".inst 0x6e87a40e // ummla v14.4s, v0.16b, v7.16b\n"
".inst 0x6e87a456 // ummla v22.4s, v2.16b, v7.16b\n"
".inst 0x6e87a49e // ummla v30.4s, v4.16b, v7.16b\n"
- "ldr q7, [x10, #0x70]\n"
- "add x10, x10, #0x80\n"
+ "ldr q7, [x9, #0x70]\n"
+ "add x9, x9, #0x80\n"
".inst 0x6e86a40b // ummla v11.4s, v0.16b, v6.16b\n"
".inst 0x6e86a453 // ummla v19.4s, v2.16b, v6.16b\n"
".inst 0x6e86a49b // ummla v27.4s, v4.16b, v6.16b\n"
@@ -3137,128 +3149,128 @@ void a64_hybrid_u8u32_mmla_6x16 (
".inst 0x6e87a457 // ummla v23.4s, v2.16b, v7.16b\n"
".inst 0x6e87a49f // ummla v31.4s, v4.16b, v7.16b\n"
"bge 206b\n"
+ "cbz x26, 212f\n"
"207:" // Height 6: Multiply loop: Skip odd blocks
- "cbz x27, 212f\n"
- "tbz x27, #2, 209f\n"
- "ldr s1, [x26], #0x4\n"
- "ldr s2, [x25], #0x4\n"
- "ldr s3, [x24], #0x4\n"
- "ldr s4, [x23], #0x4\n"
- "ldr s5, [x22], #0x4\n"
- "ldr s6, [x21], #0x4\n"
- "tbz x27, #1, 208f\n"
- "ld1 { v1.h }[2], [x26], #0x2\n"
- "ld1 { v2.h }[2], [x25], #0x2\n"
- "ld1 { v3.h }[2], [x24], #0x2\n"
- "ld1 { v4.h }[2], [x23], #0x2\n"
- "ld1 { v5.h }[2], [x22], #0x2\n"
- "ld1 { v6.h }[2], [x21], #0x2\n"
- "tbz x27, #0, 211f\n"
- "ld1 { v1.b }[6], [x26]\n"
- "ld1 { v2.b }[6], [x25]\n"
- "ld1 { v3.b }[6], [x24]\n"
- "ld1 { v4.b }[6], [x23]\n"
- "ld1 { v5.b }[6], [x22]\n"
- "ld1 { v6.b }[6], [x21]\n"
+ "tbz x26, #2, 209f\n"
+ "ldr s1, [x25], #0x4\n"
+ "ldr s2, [x24], #0x4\n"
+ "ldr s3, [x23], #0x4\n"
+ "ldr s4, [x22], #0x4\n"
+ "ldr s5, [x21], #0x4\n"
+ "ldr s6, [x20], #0x4\n"
+ "tbz x26, #1, 208f\n"
+ "ld1 { v1.h }[2], [x25], #0x2\n"
+ "ld1 { v2.h }[2], [x24], #0x2\n"
+ "ld1 { v3.h }[2], [x23], #0x2\n"
+ "ld1 { v4.h }[2], [x22], #0x2\n"
+ "ld1 { v5.h }[2], [x21], #0x2\n"
+ "ld1 { v6.h }[2], [x20], #0x2\n"
+ "tbz x26, #0, 211f\n"
+ "ld1 { v1.b }[6], [x25]\n"
+ "ld1 { v2.b }[6], [x24]\n"
+ "ld1 { v3.b }[6], [x23]\n"
+ "ld1 { v4.b }[6], [x22]\n"
+ "ld1 { v5.b }[6], [x21]\n"
+ "ld1 { v6.b }[6], [x20]\n"
"b 211f\n"
"208:" // Height 6: Multiply loop: Ragged operand read: partial_1_4
- "tbz x27, #0, 211f\n"
- "ld1 { v1.b }[4], [x26]\n"
- "ld1 { v2.b }[4], [x25]\n"
- "ld1 { v3.b }[4], [x24]\n"
- "ld1 { v4.b }[4], [x23]\n"
- "ld1 { v5.b }[4], [x22]\n"
- "ld1 { v6.b }[4], [x21]\n"
+ "tbz x26, #0, 211f\n"
+ "ld1 { v1.b }[4], [x25]\n"
+ "ld1 { v2.b }[4], [x24]\n"
+ "ld1 { v3.b }[4], [x23]\n"
+ "ld1 { v4.b }[4], [x22]\n"
+ "ld1 { v5.b }[4], [x21]\n"
+ "ld1 { v6.b }[4], [x20]\n"
"b 211f\n"
"209:" // Height 6: Multiply loop: Ragged operand read: partial_2_0
- "tbz x27, #1, 210f\n"
- "ldr h1, [x26], #0x2\n"
- "ldr h2, [x25], #0x2\n"
- "ldr h3, [x24], #0x2\n"
- "ldr h4, [x23], #0x2\n"
- "ldr h5, [x22], #0x2\n"
- "ldr h6, [x21], #0x2\n"
- "tbz x27, #0, 211f\n"
- "ld1 { v1.b }[2], [x26]\n"
- "ld1 { v2.b }[2], [x25]\n"
- "ld1 { v3.b }[2], [x24]\n"
- "ld1 { v4.b }[2], [x23]\n"
- "ld1 { v5.b }[2], [x22]\n"
- "ld1 { v6.b }[2], [x21]\n"
+ "tbz x26, #1, 210f\n"
+ "ldr h1, [x25], #0x2\n"
+ "ldr h2, [x24], #0x2\n"
+ "ldr h3, [x23], #0x2\n"
+ "ldr h4, [x22], #0x2\n"
+ "ldr h5, [x21], #0x2\n"
+ "ldr h6, [x20], #0x2\n"
+ "tbz x26, #0, 211f\n"
+ "ld1 { v1.b }[2], [x25]\n"
+ "ld1 { v2.b }[2], [x24]\n"
+ "ld1 { v3.b }[2], [x23]\n"
+ "ld1 { v4.b }[2], [x22]\n"
+ "ld1 { v5.b }[2], [x21]\n"
+ "ld1 { v6.b }[2], [x20]\n"
"b 211f\n"
"210:" // Height 6: Multiply loop: Ragged operand read: partial_1_0
- "ldr b1, [x26, #0x0]\n"
- "ldr b2, [x25, #0x0]\n"
- "ldr b3, [x24, #0x0]\n"
- "ldr b4, [x23, #0x0]\n"
- "ldr b5, [x22, #0x0]\n"
- "ldr b6, [x21, #0x0]\n"
+ "ldr b1, [x25, #0x0]\n"
+ "ldr b2, [x24, #0x0]\n"
+ "ldr b3, [x23, #0x0]\n"
+ "ldr b4, [x22, #0x0]\n"
+ "ldr b5, [x21, #0x0]\n"
+ "ldr b6, [x20, #0x0]\n"
"211:" // Height 6: Multiply loop: Ragged operand read: Done
- "ldr q7, [x10, #0x0]\n"
"trn1 v0.2d, v1.2d, v2.2d\n"
+ "ldr q7, [x9, #0x0]\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
- ".inst 0x6e87a408 // ummla v8.4s, v0.16b, v7.16b\n"
"trn1 v4.2d, v5.2d, v6.2d\n"
- "ldr q6, [x10, #0x10]\n"
+ "ldr q6, [x9, #0x10]\n"
+ ".inst 0x6e87a408 // ummla v8.4s, v0.16b, v7.16b\n"
".inst 0x6e87a450 // ummla v16.4s, v2.16b, v7.16b\n"
".inst 0x6e87a498 // ummla v24.4s, v4.16b, v7.16b\n"
- "ldr q7, [x10, #0x20]\n"
+ "ldr q7, [x9, #0x20]\n"
".inst 0x6e86a40c // ummla v12.4s, v0.16b, v6.16b\n"
".inst 0x6e86a454 // ummla v20.4s, v2.16b, v6.16b\n"
".inst 0x6e86a49c // ummla v28.4s, v4.16b, v6.16b\n"
- "ldr q6, [x10, #0x30]\n"
+ "ldr q6, [x9, #0x30]\n"
".inst 0x6e87a409 // ummla v9.4s, v0.16b, v7.16b\n"
".inst 0x6e87a451 // ummla v17.4s, v2.16b, v7.16b\n"
".inst 0x6e87a499 // ummla v25.4s, v4.16b, v7.16b\n"
- "ldr q7, [x10, #0x40]\n"
+ "ldr q7, [x9, #0x40]\n"
".inst 0x6e86a40d // ummla v13.4s, v0.16b, v6.16b\n"
".inst 0x6e86a455 // ummla v21.4s, v2.16b, v6.16b\n"
".inst 0x6e86a49d // ummla v29.4s, v4.16b, v6.16b\n"
- "ldr q6, [x10, #0x50]\n"
+ "ldr q6, [x9, #0x50]\n"
".inst 0x6e87a40a // ummla v10.4s, v0.16b, v7.16b\n"
".inst 0x6e87a452 // ummla v18.4s, v2.16b, v7.16b\n"
".inst 0x6e87a49a // ummla v26.4s, v4.16b, v7.16b\n"
- "ldr q7, [x10, #0x60]\n"
+ "ldr q7, [x9, #0x60]\n"
".inst 0x6e86a40e // ummla v14.4s, v0.16b, v6.16b\n"
".inst 0x6e86a456 // ummla v22.4s, v2.16b, v6.16b\n"
".inst 0x6e86a49e // ummla v30.4s, v4.16b, v6.16b\n"
- "ldr q6, [x10, #0x70]\n"
+ "ldr q6, [x9, #0x70]\n"
+ "add x9, x9, #0x80\n"
".inst 0x6e87a40b // ummla v11.4s, v0.16b, v7.16b\n"
- "add x10, x10, #0x80\n"
".inst 0x6e87a453 // ummla v19.4s, v2.16b, v7.16b\n"
".inst 0x6e87a49b // ummla v27.4s, v4.16b, v7.16b\n"
".inst 0x6e86a40f // ummla v15.4s, v0.16b, v6.16b\n"
".inst 0x6e86a457 // ummla v23.4s, v2.16b, v6.16b\n"
".inst 0x6e86a49f // ummla v31.4s, v4.16b, v6.16b\n"
"212:" // Height 6: Multiply loop: No odd multiplies
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 200b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
"uzp1 v7.2d, v8.2d, v12.2d\n"
- "add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp2 v8.2d, v8.2d, v12.2d\n"
+ "prfm pstl1keep, [x28, #0x0]\n"
+ "cmp x10, #0x10\n"
"uzp1 v12.2d, v9.2d, v13.2d\n"
- "add x20, x21, x20, LSL #2\n"
- "cmp x11, #0x10\n"
+ "add x23, x28, x19, LSL #2\n"
"uzp2 v9.2d, v9.2d, v13.2d\n"
+ "prfm pstl1keep, [x23, #0x0]\n"
"uzp1 v13.2d, v10.2d, v14.2d\n"
+ "add x22, x23, x19, LSL #2\n"
"uzp2 v10.2d, v10.2d, v14.2d\n"
+ "prfm pstl1keep, [x22, #0x0]\n"
+ "add x21, x22, x19, LSL #2\n"
"uzp1 v14.2d, v11.2d, v15.2d\n"
- "prfm pstl1keep, [x9, #0x0]\n"
- "prfm pstl1keep, [x24, #0x0]\n"
+ "prfm pstl1keep, [x21, #0x0]\n"
+ "add x20, x21, x19, LSL #2\n"
"uzp2 v11.2d, v11.2d, v15.2d\n"
+ "prfm pstl1keep, [x20, #0x0]\n"
+ "add x19, x20, x19, LSL #2\n"
"uzp1 v15.2d, v16.2d, v20.2d\n"
- "prfm pstl1keep, [x23, #0x0]\n"
- "prfm pstl1keep, [x22, #0x0]\n"
+ "prfm pstl1keep, [x19, #0x0]\n"
"uzp2 v16.2d, v16.2d, v20.2d\n"
"uzp1 v20.2d, v17.2d, v21.2d\n"
- "prfm pstl1keep, [x21, #0x0]\n"
- "prfm pstl1keep, [x20, #0x0]\n"
"uzp2 v17.2d, v17.2d, v21.2d\n"
"uzp1 v21.2d, v18.2d, v22.2d\n"
"uzp2 v18.2d, v18.2d, v22.2d\n"
@@ -3273,177 +3285,177 @@ void a64_hybrid_u8u32_mmla_6x16 (
"uzp1 v30.2d, v27.2d, v31.2d\n"
"uzp2 v27.2d, v27.2d, v31.2d\n"
"bge 221f\n"
- "tbz x11, #3, 216f\n"
- "st1 { v7.4s }, [x9], #0x10\n"
- "st1 { v12.4s }, [x9], #0x10\n"
- "st1 { v8.4s }, [x24], #0x10\n"
- "st1 { v9.4s }, [x24], #0x10\n"
- "st1 { v15.4s }, [x23], #0x10\n"
- "st1 { v20.4s }, [x23], #0x10\n"
- "st1 { v16.4s }, [x22], #0x10\n"
- "st1 { v17.4s }, [x22], #0x10\n"
- "st1 { v23.4s }, [x21], #0x10\n"
- "st1 { v28.4s }, [x21], #0x10\n"
- "st1 { v24.4s }, [x20], #0x10\n"
- "st1 { v25.4s }, [x20], #0x10\n"
- "tbz x11, #2, 214f\n"
- "st1 { v13.4s }, [x9], #0x10\n"
- "st1 { v10.4s }, [x24], #0x10\n"
- "st1 { v21.4s }, [x23], #0x10\n"
- "st1 { v18.4s }, [x22], #0x10\n"
- "st1 { v29.4s }, [x21], #0x10\n"
- "st1 { v26.4s }, [x20], #0x10\n"
- "tbz x11, #1, 213f\n"
- "str d14, [x9], #0x8\n"
- "str d11, [x24], #0x8\n"
- "str d22, [x23], #0x8\n"
- "str d19, [x22], #0x8\n"
- "str d30, [x21], #0x8\n"
- "str d27, [x20], #0x8\n"
- "tbz x11, #0, 220f\n"
- "st1 { v14.s }[2], [x9]\n"
- "st1 { v11.s }[2], [x24]\n"
- "st1 { v22.s }[2], [x23]\n"
- "st1 { v19.s }[2], [x22]\n"
- "st1 { v30.s }[2], [x21]\n"
- "st1 { v27.s }[2], [x20]\n"
+ "tbz x10, #3, 216f\n"
+ "st1 { v7.4s }, [x28], #0x10\n"
+ "st1 { v12.4s }, [x28], #0x10\n"
+ "st1 { v8.4s }, [x23], #0x10\n"
+ "st1 { v9.4s }, [x23], #0x10\n"
+ "st1 { v15.4s }, [x22], #0x10\n"
+ "st1 { v20.4s }, [x22], #0x10\n"
+ "st1 { v16.4s }, [x21], #0x10\n"
+ "st1 { v17.4s }, [x21], #0x10\n"
+ "st1 { v23.4s }, [x20], #0x10\n"
+ "st1 { v28.4s }, [x20], #0x10\n"
+ "st1 { v24.4s }, [x19], #0x10\n"
+ "st1 { v25.4s }, [x19], #0x10\n"
+ "tbz x10, #2, 214f\n"
+ "st1 { v13.4s }, [x28], #0x10\n"
+ "st1 { v10.4s }, [x23], #0x10\n"
+ "st1 { v21.4s }, [x22], #0x10\n"
+ "st1 { v18.4s }, [x21], #0x10\n"
+ "st1 { v29.4s }, [x20], #0x10\n"
+ "st1 { v26.4s }, [x19], #0x10\n"
+ "tbz x10, #1, 213f\n"
+ "str d14, [x28], #0x8\n"
+ "str d11, [x23], #0x8\n"
+ "str d22, [x22], #0x8\n"
+ "str d19, [x21], #0x8\n"
+ "str d30, [x20], #0x8\n"
+ "str d27, [x19], #0x8\n"
+ "tbz x10, #0, 220f\n"
+ "st1 { v14.s }[2], [x28]\n"
+ "st1 { v11.s }[2], [x23]\n"
+ "st1 { v22.s }[2], [x22]\n"
+ "st1 { v19.s }[2], [x21]\n"
+ "st1 { v30.s }[2], [x20]\n"
+ "st1 { v27.s }[2], [x19]\n"
"b 220f\n"
"213:" // Height 6: Partial direct writeback: partial_1_12
- "tbz x11, #0, 220f\n"
- "str s14, [x9, #0x0]\n"
- "str s11, [x24, #0x0]\n"
- "str s22, [x23, #0x0]\n"
- "str s19, [x22, #0x0]\n"
- "str s30, [x21, #0x0]\n"
- "str s27, [x20, #0x0]\n"
+ "tbz x10, #0, 220f\n"
+ "str s14, [x28, #0x0]\n"
+ "str s11, [x23, #0x0]\n"
+ "str s22, [x22, #0x0]\n"
+ "str s19, [x21, #0x0]\n"
+ "str s30, [x20, #0x0]\n"
+ "str s27, [x19, #0x0]\n"
"b 220f\n"
"214:" // Height 6: Partial direct writeback: partial_2_8
- "tbz x11, #1, 215f\n"
- "str d13, [x9], #0x8\n"
- "str d10, [x24], #0x8\n"
- "str d21, [x23], #0x8\n"
- "str d18, [x22], #0x8\n"
- "str d29, [x21], #0x8\n"
- "str d26, [x20], #0x8\n"
- "tbz x11, #0, 220f\n"
- "st1 { v13.s }[2], [x9]\n"
- "st1 { v10.s }[2], [x24]\n"
- "st1 { v21.s }[2], [x23]\n"
- "st1 { v18.s }[2], [x22]\n"
- "st1 { v29.s }[2], [x21]\n"
- "st1 { v26.s }[2], [x20]\n"
+ "tbz x10, #1, 215f\n"
+ "str d13, [x28], #0x8\n"
+ "str d10, [x23], #0x8\n"
+ "str d21, [x22], #0x8\n"
+ "str d18, [x21], #0x8\n"
+ "str d29, [x20], #0x8\n"
+ "str d26, [x19], #0x8\n"
+ "tbz x10, #0, 220f\n"
+ "st1 { v13.s }[2], [x28]\n"
+ "st1 { v10.s }[2], [x23]\n"
+ "st1 { v21.s }[2], [x22]\n"
+ "st1 { v18.s }[2], [x21]\n"
+ "st1 { v29.s }[2], [x20]\n"
+ "st1 { v26.s }[2], [x19]\n"
"b 220f\n"
"215:" // Height 6: Partial direct writeback: partial_1_8
- "tbz x11, #0, 220f\n"
- "str s13, [x9, #0x0]\n"
- "str s10, [x24, #0x0]\n"
- "str s21, [x23, #0x0]\n"
- "str s18, [x22, #0x0]\n"
- "str s29, [x21, #0x0]\n"
- "str s26, [x20, #0x0]\n"
+ "tbz x10, #0, 220f\n"
+ "str s13, [x28, #0x0]\n"
+ "str s10, [x23, #0x0]\n"
+ "str s21, [x22, #0x0]\n"
+ "str s18, [x21, #0x0]\n"
+ "str s29, [x20, #0x0]\n"
+ "str s26, [x19, #0x0]\n"
"b 220f\n"
"216:" // Height 6: Partial direct writeback: partial_4_0
- "tbz x11, #2, 218f\n"
- "st1 { v7.4s }, [x9], #0x10\n"
- "st1 { v8.4s }, [x24], #0x10\n"
- "st1 { v15.4s }, [x23], #0x10\n"
- "st1 { v16.4s }, [x22], #0x10\n"
- "st1 { v23.4s }, [x21], #0x10\n"
- "st1 { v24.4s }, [x20], #0x10\n"
- "tbz x11, #1, 217f\n"
- "str d12, [x9], #0x8\n"
- "str d9, [x24], #0x8\n"
- "str d20, [x23], #0x8\n"
- "str d17, [x22], #0x8\n"
- "str d28, [x21], #0x8\n"
- "str d25, [x20], #0x8\n"
- "tbz x11, #0, 220f\n"
- "st1 { v12.s }[2], [x9]\n"
- "st1 { v9.s }[2], [x24]\n"
- "st1 { v20.s }[2], [x23]\n"
- "st1 { v17.s }[2], [x22]\n"
- "st1 { v28.s }[2], [x21]\n"
- "st1 { v25.s }[2], [x20]\n"
+ "tbz x10, #2, 218f\n"
+ "st1 { v7.4s }, [x28], #0x10\n"
+ "st1 { v8.4s }, [x23], #0x10\n"
+ "st1 { v15.4s }, [x22], #0x10\n"
+ "st1 { v16.4s }, [x21], #0x10\n"
+ "st1 { v23.4s }, [x20], #0x10\n"
+ "st1 { v24.4s }, [x19], #0x10\n"
+ "tbz x10, #1, 217f\n"
+ "str d12, [x28], #0x8\n"
+ "str d9, [x23], #0x8\n"
+ "str d20, [x22], #0x8\n"
+ "str d17, [x21], #0x8\n"
+ "str d28, [x20], #0x8\n"
+ "str d25, [x19], #0x8\n"
+ "tbz x10, #0, 220f\n"
+ "st1 { v12.s }[2], [x28]\n"
+ "st1 { v9.s }[2], [x23]\n"
+ "st1 { v20.s }[2], [x22]\n"
+ "st1 { v17.s }[2], [x21]\n"
+ "st1 { v28.s }[2], [x20]\n"
+ "st1 { v25.s }[2], [x19]\n"
"b 220f\n"
"217:" // Height 6: Partial direct writeback: partial_1_4
- "tbz x11, #0, 220f\n"
- "str s12, [x9, #0x0]\n"
- "str s9, [x24, #0x0]\n"
- "str s20, [x23, #0x0]\n"
- "str s17, [x22, #0x0]\n"
- "str s28, [x21, #0x0]\n"
- "str s25, [x20, #0x0]\n"
+ "tbz x10, #0, 220f\n"
+ "str s12, [x28, #0x0]\n"
+ "str s9, [x23, #0x0]\n"
+ "str s20, [x22, #0x0]\n"
+ "str s17, [x21, #0x0]\n"
+ "str s28, [x20, #0x0]\n"
+ "str s25, [x19, #0x0]\n"
"b 220f\n"
"218:" // Height 6: Partial direct writeback: partial_2_0
- "tbz x11, #1, 219f\n"
- "str d7, [x9], #0x8\n"
- "str d8, [x24], #0x8\n"
- "str d15, [x23], #0x8\n"
- "str d16, [x22], #0x8\n"
- "str d23, [x21], #0x8\n"
- "str d24, [x20], #0x8\n"
- "tbz x11, #0, 220f\n"
- "st1 { v7.s }[2], [x9]\n"
- "st1 { v8.s }[2], [x24]\n"
- "st1 { v15.s }[2], [x23]\n"
- "st1 { v16.s }[2], [x22]\n"
- "st1 { v23.s }[2], [x21]\n"
- "st1 { v24.s }[2], [x20]\n"
+ "tbz x10, #1, 219f\n"
+ "str d7, [x28], #0x8\n"
+ "str d8, [x23], #0x8\n"
+ "str d15, [x22], #0x8\n"
+ "str d16, [x21], #0x8\n"
+ "str d23, [x20], #0x8\n"
+ "str d24, [x19], #0x8\n"
+ "tbz x10, #0, 220f\n"
+ "st1 { v7.s }[2], [x28]\n"
+ "st1 { v8.s }[2], [x23]\n"
+ "st1 { v15.s }[2], [x22]\n"
+ "st1 { v16.s }[2], [x21]\n"
+ "st1 { v23.s }[2], [x20]\n"
+ "st1 { v24.s }[2], [x19]\n"
"b 220f\n"
"219:" // Height 6: Partial direct writeback: partial_1_0
- "str s7, [x9, #0x0]\n"
- "str s8, [x24, #0x0]\n"
- "str s15, [x23, #0x0]\n"
- "str s16, [x22, #0x0]\n"
- "str s23, [x21, #0x0]\n"
- "str s24, [x20, #0x0]\n"
+ "str s7, [x28, #0x0]\n"
+ "str s8, [x23, #0x0]\n"
+ "str s15, [x22, #0x0]\n"
+ "str s16, [x21, #0x0]\n"
+ "str s23, [x20, #0x0]\n"
+ "str s24, [x19, #0x0]\n"
"220:" // Height 6: Partial direct writeback: Done
"b 222f\n"
"221:" // Height 6: Full writeback
- "str q7, [x9, #0x0]\n"
- "str q12, [x9, #0x10]\n"
- "str q13, [x9, #0x20]\n"
- "str q14, [x9, #0x30]\n"
- "add x9, x9, #0x40\n"
- "str q8, [x24, #0x0]\n"
- "str q9, [x24, #0x10]\n"
- "str q10, [x24, #0x20]\n"
- "str q11, [x24, #0x30]\n"
- "str q15, [x23, #0x0]\n"
- "str q20, [x23, #0x10]\n"
- "str q21, [x23, #0x20]\n"
- "str q22, [x23, #0x30]\n"
- "str q16, [x22, #0x0]\n"
- "str q17, [x22, #0x10]\n"
- "str q18, [x22, #0x20]\n"
- "str q19, [x22, #0x30]\n"
- "str q23, [x21, #0x0]\n"
- "str q28, [x21, #0x10]\n"
- "str q29, [x21, #0x20]\n"
- "str q30, [x21, #0x30]\n"
- "str q24, [x20, #0x0]\n"
- "str q25, [x20, #0x10]\n"
- "str q26, [x20, #0x20]\n"
- "str q27, [x20, #0x30]\n"
+ "str q7, [x28, #0x0]\n"
+ "str q12, [x28, #0x10]\n"
+ "str q13, [x28, #0x20]\n"
+ "str q14, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
+ "str q8, [x23, #0x0]\n"
+ "str q9, [x23, #0x10]\n"
+ "str q10, [x23, #0x20]\n"
+ "str q11, [x23, #0x30]\n"
+ "str q15, [x22, #0x0]\n"
+ "str q20, [x22, #0x10]\n"
+ "str q21, [x22, #0x20]\n"
+ "str q22, [x22, #0x30]\n"
+ "str q16, [x21, #0x0]\n"
+ "str q17, [x21, #0x10]\n"
+ "str q18, [x21, #0x20]\n"
+ "str q19, [x21, #0x30]\n"
+ "str q23, [x20, #0x0]\n"
+ "str q28, [x20, #0x10]\n"
+ "str q29, [x20, #0x20]\n"
+ "str q30, [x20, #0x30]\n"
+ "str q24, [x19, #0x0]\n"
+ "str q25, [x19, #0x10]\n"
+ "str q26, [x19, #0x20]\n"
+ "str q27, [x19, #0x30]\n"
"222:" // Height 6: Writeback done
- "subs x11, x11, #0x10\n"
+ "subs x10, x10, #0x10\n"
"bgt 187b\n"
"subs %x[M], %x[M], #0x6\n"
"beq 224f\n"
- "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 223f\n"
- "add x21, x21, #0x6\n"
- "str x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "add x20, x20, #0x6\n"
+ "str x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"b 1b\n"
"223:" // Update direct input
- "mov x20, #0x6\n"
- "madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
+ "mov x19, #0x6\n"
+ "madd %x[input_ptr], x19, x20, %x[input_ptr]\n"
"b 1b\n"
"224:" // Exit
: [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
: [args_ptr] "r" (&ka), [flags] "r" (flags), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_bf16fp32_dot_8x12/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_bf16fp32_dot_8x12/generic.cpp
index b3bde74635..5689f89781 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_bf16fp32_dot_8x12/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_bf16fp32_dot_8x12/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -33,108 +33,108 @@ void a64_interleaved_bf16fp32_dot_8x12(
float *Cpanel, int ablocks, int bblocks, int K) {
struct KernelArgs {
+ size_t bblocks = {};
size_t K = {};
const bfloat16 *Bpanel = {};
- size_t bblocks = {};
} ka;
+ ka.bblocks = bblocks;
ka.K = (K/2) - 1;
ka.Bpanel = Bpanel;
- ka.bblocks = bblocks;
__asm__ __volatile__(
"1:" // Height loop
- "ldr x23, [%x[args_ptr], %[offsetof_bblocks]]\n"
- "ldr x22, [%x[args_ptr], %[offsetof_Bpanel]]\n"
+ "ldr x22, [%x[args_ptr], %[offsetof_bblocks]]\n"
"mov x21, %x[Apanel]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_Bpanel]]\n"
"2:" // Width loop
- "ldr q4, [x22, #0x0]\n"
- "ldr q5, [x22, #0x10]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_K]]\n"
"mov %x[Apanel], x21\n"
- "ldr q0, [%x[Apanel], #0x0]\n"
- "ldr q1, [%x[Apanel], #0x10]\n"
+ "cmp x19, #0x2\n"
"movi v8.16b, #0x0\n"
- "ldr q6, [x22, #0x20]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_K]]\n"
- "cmp x20, #0x2\n"
"movi v9.16b, #0x0\n"
"prfm pldl1keep, [%x[Apanel], #0x0]\n"
"movi v10.16b, #0x0\n"
"movi v11.16b, #0x0\n"
- "prfm pldl1keep, [x22, #0x0]\n"
+ "prfm pldl1keep, [x20, #0x0]\n"
"movi v12.16b, #0x0\n"
"movi v13.16b, #0x0\n"
- "prfm pldl1keep, [x22, #0x40]\n"
+ "prfm pldl1keep, [x20, #0x40]\n"
"movi v14.16b, #0x0\n"
"movi v15.16b, #0x0\n"
"prfm pldl1keep, [%x[Apanel], #0x40]\n"
"movi v16.16b, #0x0\n"
"movi v17.16b, #0x0\n"
- "prfm pldl1keep, [x22, #0x80]\n"
+ "prfm pldl1keep, [x20, #0x80]\n"
"movi v18.16b, #0x0\n"
"movi v19.16b, #0x0\n"
+ "ldr q0, [%x[Apanel], #0x0]\n"
"movi v20.16b, #0x0\n"
"movi v21.16b, #0x0\n"
+ "ldr q1, [%x[Apanel], #0x10]\n"
"movi v22.16b, #0x0\n"
"movi v23.16b, #0x0\n"
+ "ldr q4, [x20, #0x0]\n"
"movi v24.16b, #0x0\n"
"movi v25.16b, #0x0\n"
+ "ldr q5, [x20, #0x10]\n"
"movi v26.16b, #0x0\n"
"movi v27.16b, #0x0\n"
+ "ldr q6, [x20, #0x20]\n"
"movi v28.16b, #0x0\n"
"movi v29.16b, #0x0\n"
"movi v30.16b, #0x0\n"
"movi v31.16b, #0x0\n"
"blt 4f\n"
"3:" // main loop head
- "ldr q2, [%x[Apanel], #0x20]\n"
- "ldr q3, [%x[Apanel], #0x30]\n"
".inst 0x4f40f088 // bfdot v8.4s, v4.8h, v0.h[0]\n"
".inst 0x4f60f08b // bfdot v11.4s, v4.8h, v0.h[1]\n"
+ "ldr q2, [%x[Apanel], #0x20]\n"
".inst 0x4f40f88e // bfdot v14.4s, v4.8h, v0.h[2]\n"
- "sub x20, x20, #0x2\n"
".inst 0x4f60f891 // bfdot v17.4s, v4.8h, v0.h[3]\n"
+ "ldr q3, [%x[Apanel], #0x30]\n"
".inst 0x4f41f094 // bfdot v20.4s, v4.8h, v1.h[0]\n"
- "cmp x20, #0x2\n"
".inst 0x4f61f097 // bfdot v23.4s, v4.8h, v1.h[1]\n"
+ "sub x19, x19, #0x2\n"
".inst 0x4f41f89a // bfdot v26.4s, v4.8h, v1.h[2]\n"
- "prfm pldl1keep, [%x[Apanel], #0x80]\n"
".inst 0x4f61f89d // bfdot v29.4s, v4.8h, v1.h[3]\n"
- "ldr q4, [x22, #0x30]\n"
+ "ldr q4, [x20, #0x30]\n"
".inst 0x4f40f0a9 // bfdot v9.4s, v5.8h, v0.h[0]\n"
".inst 0x4f60f0ac // bfdot v12.4s, v5.8h, v0.h[1]\n"
+ "cmp x19, #0x2\n"
".inst 0x4f40f8af // bfdot v15.4s, v5.8h, v0.h[2]\n"
- "add %x[Apanel], %x[Apanel], #0x40\n"
".inst 0x4f60f8b2 // bfdot v18.4s, v5.8h, v0.h[3]\n"
+ "prfm pldl1keep, [%x[Apanel], #0x80]\n"
".inst 0x4f41f0b5 // bfdot v21.4s, v5.8h, v1.h[0]\n"
- "prfm pldl1keep, [x22, #0x100]\n"
".inst 0x4f61f0b8 // bfdot v24.4s, v5.8h, v1.h[1]\n"
+ "add %x[Apanel], %x[Apanel], #0x40\n"
".inst 0x4f41f8bb // bfdot v27.4s, v5.8h, v1.h[2]\n"
- "prfm pldl1keep, [x22, #0x140]\n"
".inst 0x4f61f8be // bfdot v30.4s, v5.8h, v1.h[3]\n"
- "ldr q5, [x22, #0x40]\n"
+ "ldr q5, [x20, #0x40]\n"
".inst 0x4f40f0ca // bfdot v10.4s, v6.8h, v0.h[0]\n"
".inst 0x4f60f0cd // bfdot v13.4s, v6.8h, v0.h[1]\n"
+ "prfm pldl1keep, [x20, #0x100]\n"
".inst 0x4f40f8d0 // bfdot v16.4s, v6.8h, v0.h[2]\n"
".inst 0x4f60f8d3 // bfdot v19.4s, v6.8h, v0.h[3]\n"
- "ldr q0, [%x[Apanel], #0x0]\n"
+ "prfm pldl1keep, [x20, #0x140]\n"
".inst 0x4f41f0d6 // bfdot v22.4s, v6.8h, v1.h[0]\n"
".inst 0x4f61f0d9 // bfdot v25.4s, v6.8h, v1.h[1]\n"
+ "ldr q0, [%x[Apanel], #0x0]\n"
".inst 0x4f41f8dc // bfdot v28.4s, v6.8h, v1.h[2]\n"
".inst 0x4f61f8df // bfdot v31.4s, v6.8h, v1.h[3]\n"
- "ldr q6, [x22, #0x50]\n"
- "ldr q1, [%x[Apanel], #0x10]\n"
- "add x22, x22, #0x60\n"
+ "ldr q6, [x20, #0x50]\n"
+ "add x20, x20, #0x60\n"
".inst 0x4f42f088 // bfdot v8.4s, v4.8h, v2.h[0]\n"
".inst 0x4f62f08b // bfdot v11.4s, v4.8h, v2.h[1]\n"
+ "ldr q1, [%x[Apanel], #0x10]\n"
".inst 0x4f42f88e // bfdot v14.4s, v4.8h, v2.h[2]\n"
".inst 0x4f62f891 // bfdot v17.4s, v4.8h, v2.h[3]\n"
".inst 0x4f43f094 // bfdot v20.4s, v4.8h, v3.h[0]\n"
".inst 0x4f63f097 // bfdot v23.4s, v4.8h, v3.h[1]\n"
".inst 0x4f43f89a // bfdot v26.4s, v4.8h, v3.h[2]\n"
".inst 0x4f63f89d // bfdot v29.4s, v4.8h, v3.h[3]\n"
- "ldr q4, [x22, #0x0]\n"
+ "ldr q4, [x20, #0x0]\n"
".inst 0x4f42f0a9 // bfdot v9.4s, v5.8h, v2.h[0]\n"
".inst 0x4f62f0ac // bfdot v12.4s, v5.8h, v2.h[1]\n"
".inst 0x4f42f8af // bfdot v15.4s, v5.8h, v2.h[2]\n"
@@ -143,7 +143,7 @@ void a64_interleaved_bf16fp32_dot_8x12(
".inst 0x4f63f0b8 // bfdot v24.4s, v5.8h, v3.h[1]\n"
".inst 0x4f43f8bb // bfdot v27.4s, v5.8h, v3.h[2]\n"
".inst 0x4f63f8be // bfdot v30.4s, v5.8h, v3.h[3]\n"
- "ldr q5, [x22, #0x10]\n"
+ "ldr q5, [x20, #0x10]\n"
".inst 0x4f42f0ca // bfdot v10.4s, v6.8h, v2.h[0]\n"
".inst 0x4f62f0cd // bfdot v13.4s, v6.8h, v2.h[1]\n"
".inst 0x4f42f8d0 // bfdot v16.4s, v6.8h, v2.h[2]\n"
@@ -152,13 +152,13 @@ void a64_interleaved_bf16fp32_dot_8x12(
".inst 0x4f63f0d9 // bfdot v25.4s, v6.8h, v3.h[1]\n"
".inst 0x4f43f8dc // bfdot v28.4s, v6.8h, v3.h[2]\n"
".inst 0x4f63f8df // bfdot v31.4s, v6.8h, v3.h[3]\n"
- "ldr q6, [x22, #0x20]\n"
+ "ldr q6, [x20, #0x20]\n"
"bge 3b\n"
"4:" // main loop skip
"add %x[Apanel], %x[Apanel], #0x20\n"
".inst 0x4f40f088 // bfdot v8.4s, v4.8h, v0.h[0]\n"
".inst 0x4f60f08b // bfdot v11.4s, v4.8h, v0.h[1]\n"
- "add x22, x22, #0x30\n"
+ "add x20, x20, #0x30\n"
".inst 0x4f40f88e // bfdot v14.4s, v4.8h, v0.h[2]\n"
".inst 0x4f60f891 // bfdot v17.4s, v4.8h, v0.h[3]\n"
".inst 0x4f41f094 // bfdot v20.4s, v4.8h, v1.h[0]\n"
@@ -181,19 +181,19 @@ void a64_interleaved_bf16fp32_dot_8x12(
".inst 0x4f61f0d9 // bfdot v25.4s, v6.8h, v1.h[1]\n"
".inst 0x4f41f8dc // bfdot v28.4s, v6.8h, v1.h[2]\n"
".inst 0x4f61f8df // bfdot v31.4s, v6.8h, v1.h[3]\n"
- "cbz x20, 5f\n"
+ "cbz x19, 5f\n"
"ldr q0, [%x[Apanel], #0x0]\n"
"ldr q1, [%x[Apanel], #0x10]\n"
"add %x[Apanel], %x[Apanel], #0x20\n"
- "ldr q7, [x22, #0x0]\n"
- "ldr q4, [x22, #0x10]\n"
+ "ldr q7, [x20, #0x0]\n"
+ "ldr q4, [x20, #0x10]\n"
".inst 0x4f40f0e8 // bfdot v8.4s, v7.8h, v0.h[0]\n"
- "ldr q5, [x22, #0x20]\n"
+ "ldr q5, [x20, #0x20]\n"
".inst 0x4f60f0eb // bfdot v11.4s, v7.8h, v0.h[1]\n"
".inst 0x4f40f8ee // bfdot v14.4s, v7.8h, v0.h[2]\n"
+ "add x20, x20, #0x30\n"
".inst 0x4f60f8f1 // bfdot v17.4s, v7.8h, v0.h[3]\n"
".inst 0x4f41f0f4 // bfdot v20.4s, v7.8h, v1.h[0]\n"
- "add x22, x22, #0x30\n"
".inst 0x4f61f0f7 // bfdot v23.4s, v7.8h, v1.h[1]\n"
".inst 0x4f41f8fa // bfdot v26.4s, v7.8h, v1.h[2]\n"
".inst 0x4f61f8fd // bfdot v29.4s, v7.8h, v1.h[3]\n"
@@ -214,7 +214,7 @@ void a64_interleaved_bf16fp32_dot_8x12(
".inst 0x4f41f8bc // bfdot v28.4s, v5.8h, v1.h[2]\n"
".inst 0x4f61f8bf // bfdot v31.4s, v5.8h, v1.h[3]\n"
"5:" // multiply loop done
- "subs x23, x23, #0x1\n"
+ "subs x22, x22, #0x1\n"
"str q8, [%x[Cpanel], #0x0]\n"
"str q9, [%x[Cpanel], #0x10]\n"
"str q10, [%x[Cpanel], #0x20]\n"
@@ -245,7 +245,7 @@ void a64_interleaved_bf16fp32_dot_8x12(
"bne 1b\n"
: [Apanel] "+&r" (Apanel), [Cpanel] "+&r" (Cpanel), [ablocks] "+&r" (ablocks)
: [args_ptr] "r" (&ka), [offsetof_Bpanel] "I" (offsetof(KernelArgs, Bpanel)), [offsetof_K] "I" (offsetof(KernelArgs, K)), [offsetof_bblocks] "I" (offsetof(KernelArgs, bblocks))
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x20", "x21", "x22", "x23"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_bf16fp32_mmla_8x12/a510.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_bf16fp32_mmla_8x12/a510.cpp
index cba29bc572..0235e91bfe 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_bf16fp32_mmla_8x12/a510.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_bf16fp32_mmla_8x12/a510.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef __aarch64__
@@ -33,33 +33,34 @@ void a64_interleaved_bf16fp32_mmla_8x12_a510(
float *Cpanel, int ablocks, int bblocks, int K) {
struct KernelArgs {
+ size_t bblocks = {};
size_t K = {};
const bfloat16 *Bpanel = {};
- size_t bblocks = {};
} ka;
+ ka.bblocks = bblocks;
ka.K = (K/4) - 1;
ka.Bpanel = Bpanel;
- ka.bblocks = bblocks;
__asm__ __volatile__(
"1:" // Height loop
- "ldr x23, [%x[args_ptr], %[offsetof_bblocks]]\n"
- "ldr x22, [%x[args_ptr], %[offsetof_Bpanel]]\n"
+ "ldr x22, [%x[args_ptr], %[offsetof_bblocks]]\n"
"mov x21, %x[Apanel]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_Bpanel]]\n"
"2:" // Width loop
- "ldp q4, q5, [x22], #0x20\n"
"mov %x[Apanel], x21\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_K]]\n"
"ld1 { v0.8h }, [%x[Apanel]], #0x10\n"
+ "ld1 { v4.8h }, [x20], #0x10\n"
"ld1 { v1.8h }, [%x[Apanel]], #0x10\n"
+ "cmp x19, #0x2\n"
"movi v8.16b, #0x0\n"
- "ld1 { v2.8h }, [%x[Apanel]], #0x10\n"
- "ldr x20, [%x[args_ptr], %[offsetof_K]]\n"
- "cmp x20, #0x2\n"
"movi v9.16b, #0x0\n"
+ "ld1 { v5.8h }, [x20], #0x10\n"
"movi v10.16b, #0x0\n"
"movi v11.16b, #0x0\n"
+ "ld1 { v2.8h }, [%x[Apanel]], #0x10\n"
"movi v12.16b, #0x0\n"
"movi v13.16b, #0x0\n"
"movi v14.16b, #0x0\n"
@@ -84,63 +85,65 @@ void a64_interleaved_bf16fp32_mmla_8x12_a510(
"3:" // main loop head
"ld1 { v3.8h }, [%x[Apanel]], #0x10\n"
".inst 0x6e44ec08 // bfmmla v8.4s, v0.8h, v4.8h\n"
- ".inst 0x6e45ec0b // bfmmla v11.4s, v0.8h, v5.8h\n"
- "ldp q6, q7, [x22], #0x20\n"
".inst 0x6e44ec2e // bfmmla v14.4s, v1.8h, v4.8h\n"
+ "ldp q6, q7, [x20], #0x20\n"
+ ".inst 0x6e45ec0b // bfmmla v11.4s, v0.8h, v5.8h\n"
".inst 0x6e45ec31 // bfmmla v17.4s, v1.8h, v5.8h\n"
".inst 0x6e44ec54 // bfmmla v20.4s, v2.8h, v4.8h\n"
- "sub x20, x20, #0x2\n"
+ "sub x19, x19, #0x2\n"
".inst 0x6e45ec57 // bfmmla v23.4s, v2.8h, v5.8h\n"
".inst 0x6e44ec7a // bfmmla v26.4s, v3.8h, v4.8h\n"
- "cmp x20, #0x2\n"
+ "cmp x19, #0x2\n"
".inst 0x6e45ec7d // bfmmla v29.4s, v3.8h, v5.8h\n"
- "ldp q4, q5, [x22], #0x20\n"
+ "ldp q4, q5, [x20], #0x20\n"
".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n"
".inst 0x6e47ec0c // bfmmla v12.4s, v0.8h, v7.8h\n"
- ".inst 0x6e46ec2f // bfmmla v15.4s, v1.8h, v6.8h\n"
- ".inst 0x6e47ec32 // bfmmla v18.4s, v1.8h, v7.8h\n"
- ".inst 0x6e46ec55 // bfmmla v21.4s, v2.8h, v6.8h\n"
- ".inst 0x6e47ec58 // bfmmla v24.4s, v2.8h, v7.8h\n"
- ".inst 0x6e46ec7b // bfmmla v27.4s, v3.8h, v6.8h\n"
- ".inst 0x6e47ec7e // bfmmla v30.4s, v3.8h, v7.8h\n"
- "ldp q6, q7, [x22], #0x20\n"
".inst 0x6e44ec0a // bfmmla v10.4s, v0.8h, v4.8h\n"
".inst 0x6e45ec0d // bfmmla v13.4s, v0.8h, v5.8h\n"
"ld1 { v0.8h }, [%x[Apanel]], #0x10\n"
+ ".inst 0x6e46ec2f // bfmmla v15.4s, v1.8h, v6.8h\n"
+ ".inst 0x6e46ec55 // bfmmla v21.4s, v2.8h, v6.8h\n"
+ ".inst 0x6e46ec7b // bfmmla v27.4s, v3.8h, v6.8h\n"
+ ".inst 0x6e47ec32 // bfmmla v18.4s, v1.8h, v7.8h\n"
+ "ld1 { v6.8h }, [x20], #0x10\n"
".inst 0x6e44ec30 // bfmmla v16.4s, v1.8h, v4.8h\n"
".inst 0x6e45ec33 // bfmmla v19.4s, v1.8h, v5.8h\n"
"ld1 { v1.8h }, [%x[Apanel]], #0x10\n"
+ ".inst 0x6e47ec58 // bfmmla v24.4s, v2.8h, v7.8h\n"
+ ".inst 0x6e47ec7e // bfmmla v30.4s, v3.8h, v7.8h\n"
+ "ld1 { v7.8h }, [x20], #0x10\n"
".inst 0x6e44ec56 // bfmmla v22.4s, v2.8h, v4.8h\n"
".inst 0x6e45ec59 // bfmmla v25.4s, v2.8h, v5.8h\n"
"ld1 { v2.8h }, [%x[Apanel]], #0x10\n"
".inst 0x6e44ec7c // bfmmla v28.4s, v3.8h, v4.8h\n"
".inst 0x6e45ec7f // bfmmla v31.4s, v3.8h, v5.8h\n"
"ld1 { v3.8h }, [%x[Apanel]], #0x10\n"
+ "ldp q4, q5, [x20], #0x20\n"
".inst 0x6e46ec08 // bfmmla v8.4s, v0.8h, v6.8h\n"
- "ldp q4, q5, [x22], #0x20\n"
- ".inst 0x6e47ec0b // bfmmla v11.4s, v0.8h, v7.8h\n"
".inst 0x6e46ec2e // bfmmla v14.4s, v1.8h, v6.8h\n"
+ ".inst 0x6e47ec0b // bfmmla v11.4s, v0.8h, v7.8h\n"
".inst 0x6e47ec31 // bfmmla v17.4s, v1.8h, v7.8h\n"
".inst 0x6e46ec54 // bfmmla v20.4s, v2.8h, v6.8h\n"
".inst 0x6e47ec57 // bfmmla v23.4s, v2.8h, v7.8h\n"
".inst 0x6e46ec7a // bfmmla v26.4s, v3.8h, v6.8h\n"
".inst 0x6e47ec7d // bfmmla v29.4s, v3.8h, v7.8h\n"
- "ldp q6, q7, [x22], #0x20\n"
+ "ldp q6, q7, [x20], #0x20\n"
".inst 0x6e44ec09 // bfmmla v9.4s, v0.8h, v4.8h\n"
".inst 0x6e45ec0c // bfmmla v12.4s, v0.8h, v5.8h\n"
- ".inst 0x6e44ec2f // bfmmla v15.4s, v1.8h, v4.8h\n"
- ".inst 0x6e45ec32 // bfmmla v18.4s, v1.8h, v5.8h\n"
- ".inst 0x6e44ec55 // bfmmla v21.4s, v2.8h, v4.8h\n"
- ".inst 0x6e45ec58 // bfmmla v24.4s, v2.8h, v5.8h\n"
- ".inst 0x6e44ec7b // bfmmla v27.4s, v3.8h, v4.8h\n"
- ".inst 0x6e45ec7e // bfmmla v30.4s, v3.8h, v5.8h\n"
- "ldp q4, q5, [x22], #0x20\n"
".inst 0x6e46ec0a // bfmmla v10.4s, v0.8h, v6.8h\n"
".inst 0x6e47ec0d // bfmmla v13.4s, v0.8h, v7.8h\n"
"ld1 { v0.8h }, [%x[Apanel]], #0x10\n"
+ ".inst 0x6e44ec2f // bfmmla v15.4s, v1.8h, v4.8h\n"
+ ".inst 0x6e44ec55 // bfmmla v21.4s, v2.8h, v4.8h\n"
+ ".inst 0x6e44ec7b // bfmmla v27.4s, v3.8h, v4.8h\n"
+ ".inst 0x6e45ec32 // bfmmla v18.4s, v1.8h, v5.8h\n"
+ "ld1 { v4.8h }, [x20], #0x10\n"
".inst 0x6e46ec30 // bfmmla v16.4s, v1.8h, v6.8h\n"
".inst 0x6e47ec33 // bfmmla v19.4s, v1.8h, v7.8h\n"
"ld1 { v1.8h }, [%x[Apanel]], #0x10\n"
+ ".inst 0x6e45ec58 // bfmmla v24.4s, v2.8h, v5.8h\n"
+ ".inst 0x6e45ec7e // bfmmla v30.4s, v3.8h, v5.8h\n"
+ "ld1 { v5.8h }, [x20], #0x10\n"
".inst 0x6e46ec56 // bfmmla v22.4s, v2.8h, v6.8h\n"
".inst 0x6e47ec59 // bfmmla v25.4s, v2.8h, v7.8h\n"
"ld1 { v2.8h }, [%x[Apanel]], #0x10\n"
@@ -150,65 +153,66 @@ void a64_interleaved_bf16fp32_mmla_8x12_a510(
"4:" // main loop skip
"ld1 { v3.8h }, [%x[Apanel]], #0x10\n"
".inst 0x6e44ec08 // bfmmla v8.4s, v0.8h, v4.8h\n"
- ".inst 0x6e45ec0b // bfmmla v11.4s, v0.8h, v5.8h\n"
- "ldp q6, q7, [x22], #0x20\n"
".inst 0x6e44ec2e // bfmmla v14.4s, v1.8h, v4.8h\n"
+ "ldp q6, q7, [x20], #0x20\n"
+ ".inst 0x6e45ec0b // bfmmla v11.4s, v0.8h, v5.8h\n"
".inst 0x6e45ec31 // bfmmla v17.4s, v1.8h, v5.8h\n"
".inst 0x6e44ec54 // bfmmla v20.4s, v2.8h, v4.8h\n"
".inst 0x6e45ec57 // bfmmla v23.4s, v2.8h, v5.8h\n"
".inst 0x6e44ec7a // bfmmla v26.4s, v3.8h, v4.8h\n"
".inst 0x6e45ec7d // bfmmla v29.4s, v3.8h, v5.8h\n"
- "ldp q4, q5, [x22], #0x20\n"
+ "ldp q4, q5, [x20], #0x20\n"
".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n"
- ".inst 0x6e47ec0c // bfmmla v12.4s, v0.8h, v7.8h\n"
".inst 0x6e46ec2f // bfmmla v15.4s, v1.8h, v6.8h\n"
- ".inst 0x6e47ec32 // bfmmla v18.4s, v1.8h, v7.8h\n"
".inst 0x6e46ec55 // bfmmla v21.4s, v2.8h, v6.8h\n"
- ".inst 0x6e47ec58 // bfmmla v24.4s, v2.8h, v7.8h\n"
".inst 0x6e46ec7b // bfmmla v27.4s, v3.8h, v6.8h\n"
- ".inst 0x6e47ec7e // bfmmla v30.4s, v3.8h, v7.8h\n"
+ ".inst 0x6e47ec0c // bfmmla v12.4s, v0.8h, v7.8h\n"
".inst 0x6e44ec0a // bfmmla v10.4s, v0.8h, v4.8h\n"
".inst 0x6e45ec0d // bfmmla v13.4s, v0.8h, v5.8h\n"
+ ".inst 0x6e47ec32 // bfmmla v18.4s, v1.8h, v7.8h\n"
".inst 0x6e44ec30 // bfmmla v16.4s, v1.8h, v4.8h\n"
".inst 0x6e45ec33 // bfmmla v19.4s, v1.8h, v5.8h\n"
+ ".inst 0x6e47ec58 // bfmmla v24.4s, v2.8h, v7.8h\n"
+ ".inst 0x6e47ec7e // bfmmla v30.4s, v3.8h, v7.8h\n"
".inst 0x6e44ec56 // bfmmla v22.4s, v2.8h, v4.8h\n"
".inst 0x6e45ec59 // bfmmla v25.4s, v2.8h, v5.8h\n"
".inst 0x6e44ec7c // bfmmla v28.4s, v3.8h, v4.8h\n"
".inst 0x6e45ec7f // bfmmla v31.4s, v3.8h, v5.8h\n"
- "cbz x20, 5f\n"
- "ldp q6, q7, [x22], #0x20\n"
+ "cbz x19, 5f\n"
"ld1 { v0.8h }, [%x[Apanel]], #0x10\n"
- "ld1 { v1.8h }, [%x[Apanel]], #0x10\n"
+ "ld1 { v6.8h }, [x20], #0x10\n"
".inst 0x6e46ec08 // bfmmla v8.4s, v0.8h, v6.8h\n"
+ "ld1 { v1.8h }, [%x[Apanel]], #0x10\n"
+ "ld1 { v7.8h }, [x20], #0x10\n"
+ ".inst 0x6e46ec2e // bfmmla v14.4s, v1.8h, v6.8h\n"
"ld1 { v2.8h }, [%x[Apanel]], #0x10\n"
"ld1 { v3.8h }, [%x[Apanel]], #0x10\n"
".inst 0x6e47ec0b // bfmmla v11.4s, v0.8h, v7.8h\n"
- "ldp q4, q5, [x22], #0x20\n"
- ".inst 0x6e46ec2e // bfmmla v14.4s, v1.8h, v6.8h\n"
+ "ldp q4, q5, [x20], #0x20\n"
".inst 0x6e47ec31 // bfmmla v17.4s, v1.8h, v7.8h\n"
".inst 0x6e46ec54 // bfmmla v20.4s, v2.8h, v6.8h\n"
".inst 0x6e47ec57 // bfmmla v23.4s, v2.8h, v7.8h\n"
".inst 0x6e46ec7a // bfmmla v26.4s, v3.8h, v6.8h\n"
".inst 0x6e47ec7d // bfmmla v29.4s, v3.8h, v7.8h\n"
- "ldp q6, q7, [x22], #0x20\n"
+ "ldp q6, q7, [x20], #0x20\n"
".inst 0x6e44ec09 // bfmmla v9.4s, v0.8h, v4.8h\n"
- ".inst 0x6e45ec0c // bfmmla v12.4s, v0.8h, v5.8h\n"
".inst 0x6e44ec2f // bfmmla v15.4s, v1.8h, v4.8h\n"
- ".inst 0x6e45ec32 // bfmmla v18.4s, v1.8h, v5.8h\n"
".inst 0x6e44ec55 // bfmmla v21.4s, v2.8h, v4.8h\n"
- ".inst 0x6e45ec58 // bfmmla v24.4s, v2.8h, v5.8h\n"
".inst 0x6e44ec7b // bfmmla v27.4s, v3.8h, v4.8h\n"
- ".inst 0x6e45ec7e // bfmmla v30.4s, v3.8h, v5.8h\n"
+ ".inst 0x6e45ec0c // bfmmla v12.4s, v0.8h, v5.8h\n"
".inst 0x6e46ec0a // bfmmla v10.4s, v0.8h, v6.8h\n"
".inst 0x6e47ec0d // bfmmla v13.4s, v0.8h, v7.8h\n"
+ ".inst 0x6e45ec32 // bfmmla v18.4s, v1.8h, v5.8h\n"
".inst 0x6e46ec30 // bfmmla v16.4s, v1.8h, v6.8h\n"
".inst 0x6e47ec33 // bfmmla v19.4s, v1.8h, v7.8h\n"
+ ".inst 0x6e45ec58 // bfmmla v24.4s, v2.8h, v5.8h\n"
+ ".inst 0x6e45ec7e // bfmmla v30.4s, v3.8h, v5.8h\n"
".inst 0x6e46ec56 // bfmmla v22.4s, v2.8h, v6.8h\n"
".inst 0x6e47ec59 // bfmmla v25.4s, v2.8h, v7.8h\n"
".inst 0x6e46ec7c // bfmmla v28.4s, v3.8h, v6.8h\n"
".inst 0x6e47ec7f // bfmmla v31.4s, v3.8h, v7.8h\n"
"5:" // multiply loop done
- "subs x23, x23, #0x1\n"
+ "subs x22, x22, #0x1\n"
"uzp1 v4.2d, v8.2d, v11.2d\n"
"uzp2 v8.2d, v8.2d, v11.2d\n"
"uzp1 v11.2d, v9.2d, v12.2d\n"
@@ -263,7 +267,7 @@ void a64_interleaved_bf16fp32_mmla_8x12_a510(
"bne 1b\n"
: [Apanel] "+&r" (Apanel), [Cpanel] "+&r" (Cpanel), [ablocks] "+&r" (ablocks)
: [args_ptr] "r" (&ka), [offsetof_Bpanel] "I" (offsetof(KernelArgs, Bpanel)), [offsetof_K] "I" (offsetof(KernelArgs, K)), [offsetof_bblocks] "I" (offsetof(KernelArgs, bblocks))
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x20", "x21", "x22", "x23"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_bf16fp32_mmla_8x12/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_bf16fp32_mmla_8x12/generic.cpp
index 2938639048..94c72a31c9 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_bf16fp32_mmla_8x12/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_bf16fp32_mmla_8x12/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -33,46 +33,46 @@ void a64_interleaved_bf16fp32_mmla_8x12(
float *Cpanel, int ablocks, int bblocks, int K) {
struct KernelArgs {
+ size_t bblocks = {};
size_t K = {};
const bfloat16 *Bpanel = {};
- size_t bblocks = {};
} ka;
+ ka.bblocks = bblocks;
ka.K = (K/4) - 1;
ka.Bpanel = Bpanel;
- ka.bblocks = bblocks;
__asm__ __volatile__(
"1:" // Height loop
- "ldr x23, [%x[args_ptr], %[offsetof_bblocks]]\n"
- "ldr x22, [%x[args_ptr], %[offsetof_Bpanel]]\n"
+ "ldr x22, [%x[args_ptr], %[offsetof_bblocks]]\n"
"mov x21, %x[Apanel]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_Bpanel]]\n"
"2:" // Width loop
- "ldr q4, [x22, #0x0]\n"
- "ldr q5, [x22, #0x10]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_K]]\n"
"mov %x[Apanel], x21\n"
- "ldr q0, [%x[Apanel], #0x0]\n"
- "ldr q1, [%x[Apanel], #0x10]\n"
+ "cmp x19, #0x2\n"
"movi v8.16b, #0x0\n"
- "ldr q2, [%x[Apanel], #0x20]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_K]]\n"
- "cmp x20, #0x2\n"
"movi v9.16b, #0x0\n"
+ "ldr q4, [x20, #0x0]\n"
"movi v10.16b, #0x0\n"
- "add x22, x22, #0x20\n"
"movi v11.16b, #0x0\n"
+ "ldr q0, [%x[Apanel], #0x0]\n"
"movi v12.16b, #0x0\n"
- "add %x[Apanel], %x[Apanel], #0x30\n"
"movi v13.16b, #0x0\n"
+ "ldr q1, [%x[Apanel], #0x10]\n"
"movi v14.16b, #0x0\n"
"movi v15.16b, #0x0\n"
+ "ldr q5, [x20, #0x10]\n"
"movi v16.16b, #0x0\n"
"movi v17.16b, #0x0\n"
+ "ldr q2, [%x[Apanel], #0x20]\n"
"movi v18.16b, #0x0\n"
"movi v19.16b, #0x0\n"
+ "add x20, x20, #0x20\n"
"movi v20.16b, #0x0\n"
"movi v21.16b, #0x0\n"
+ "add %x[Apanel], %x[Apanel], #0x30\n"
"movi v22.16b, #0x0\n"
"movi v23.16b, #0x0\n"
"movi v24.16b, #0x0\n"
@@ -86,149 +86,149 @@ void a64_interleaved_bf16fp32_mmla_8x12(
"blt 4f\n"
"3:" // main loop head
"ldr q3, [%x[Apanel], #0x0]\n"
- "ldr q6, [x22, #0x0]\n"
".inst 0x6e44ec08 // bfmmla v8.4s, v0.8h, v4.8h\n"
- "ldr q7, [x22, #0x10]\n"
- ".inst 0x6e45ec0b // bfmmla v11.4s, v0.8h, v5.8h\n"
".inst 0x6e44ec2e // bfmmla v14.4s, v1.8h, v4.8h\n"
+ ".inst 0x6e45ec0b // bfmmla v11.4s, v0.8h, v5.8h\n"
".inst 0x6e45ec31 // bfmmla v17.4s, v1.8h, v5.8h\n"
+ "ldr q6, [x20, #0x0]\n"
".inst 0x6e44ec54 // bfmmla v20.4s, v2.8h, v4.8h\n"
- "sub x20, x20, #0x2\n"
".inst 0x6e45ec57 // bfmmla v23.4s, v2.8h, v5.8h\n"
+ "ldr q7, [x20, #0x10]\n"
".inst 0x6e44ec7a // bfmmla v26.4s, v3.8h, v4.8h\n"
- "ldr q4, [x22, #0x20]\n"
".inst 0x6e45ec7d // bfmmla v29.4s, v3.8h, v5.8h\n"
- "ldr q5, [x22, #0x30]\n"
+ "ldr q4, [x20, #0x20]\n"
+ "ldr q5, [x20, #0x30]\n"
".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n"
- ".inst 0x6e47ec0c // bfmmla v12.4s, v0.8h, v7.8h\n"
".inst 0x6e46ec2f // bfmmla v15.4s, v1.8h, v6.8h\n"
- "cmp x20, #0x2\n"
- ".inst 0x6e47ec32 // bfmmla v18.4s, v1.8h, v7.8h\n"
".inst 0x6e46ec55 // bfmmla v21.4s, v2.8h, v6.8h\n"
- ".inst 0x6e47ec58 // bfmmla v24.4s, v2.8h, v7.8h\n"
".inst 0x6e46ec7b // bfmmla v27.4s, v3.8h, v6.8h\n"
- "ldr q6, [x22, #0x40]\n"
- ".inst 0x6e47ec7e // bfmmla v30.4s, v3.8h, v7.8h\n"
- "ldr q7, [x22, #0x50]\n"
+ "ldr q6, [x20, #0x40]\n"
+ ".inst 0x6e47ec0c // bfmmla v12.4s, v0.8h, v7.8h\n"
".inst 0x6e44ec0a // bfmmla v10.4s, v0.8h, v4.8h\n"
+ "sub x19, x19, #0x2\n"
".inst 0x6e45ec0d // bfmmla v13.4s, v0.8h, v5.8h\n"
+ ".inst 0x6e47ec32 // bfmmla v18.4s, v1.8h, v7.8h\n"
"ldr q0, [%x[Apanel], #0x10]\n"
".inst 0x6e44ec30 // bfmmla v16.4s, v1.8h, v4.8h\n"
".inst 0x6e45ec33 // bfmmla v19.4s, v1.8h, v5.8h\n"
"ldr q1, [%x[Apanel], #0x20]\n"
+ ".inst 0x6e47ec58 // bfmmla v24.4s, v2.8h, v7.8h\n"
+ ".inst 0x6e47ec7e // bfmmla v30.4s, v3.8h, v7.8h\n"
+ "ldr q7, [x20, #0x50]\n"
".inst 0x6e44ec56 // bfmmla v22.4s, v2.8h, v4.8h\n"
".inst 0x6e45ec59 // bfmmla v25.4s, v2.8h, v5.8h\n"
"ldr q2, [%x[Apanel], #0x30]\n"
".inst 0x6e44ec7c // bfmmla v28.4s, v3.8h, v4.8h\n"
- "ldr q4, [x22, #0x60]\n"
".inst 0x6e45ec7f // bfmmla v31.4s, v3.8h, v5.8h\n"
"ldr q3, [%x[Apanel], #0x40]\n"
- "ldr q5, [x22, #0x70]\n"
".inst 0x6e46ec08 // bfmmla v8.4s, v0.8h, v6.8h\n"
- ".inst 0x6e47ec0b // bfmmla v11.4s, v0.8h, v7.8h\n"
".inst 0x6e46ec2e // bfmmla v14.4s, v1.8h, v6.8h\n"
+ "ldr q4, [x20, #0x60]\n"
+ ".inst 0x6e47ec0b // bfmmla v11.4s, v0.8h, v7.8h\n"
".inst 0x6e47ec31 // bfmmla v17.4s, v1.8h, v7.8h\n"
+ "ldr q5, [x20, #0x70]\n"
".inst 0x6e46ec54 // bfmmla v20.4s, v2.8h, v6.8h\n"
".inst 0x6e47ec57 // bfmmla v23.4s, v2.8h, v7.8h\n"
+ "cmp x19, #0x2\n"
".inst 0x6e46ec7a // bfmmla v26.4s, v3.8h, v6.8h\n"
- "ldr q6, [x22, #0x80]\n"
".inst 0x6e47ec7d // bfmmla v29.4s, v3.8h, v7.8h\n"
- "ldr q7, [x22, #0x90]\n"
+ "ldr q6, [x20, #0x80]\n"
+ "ldr q7, [x20, #0x90]\n"
".inst 0x6e44ec09 // bfmmla v9.4s, v0.8h, v4.8h\n"
- ".inst 0x6e45ec0c // bfmmla v12.4s, v0.8h, v5.8h\n"
".inst 0x6e44ec2f // bfmmla v15.4s, v1.8h, v4.8h\n"
- ".inst 0x6e45ec32 // bfmmla v18.4s, v1.8h, v5.8h\n"
".inst 0x6e44ec55 // bfmmla v21.4s, v2.8h, v4.8h\n"
- ".inst 0x6e45ec58 // bfmmla v24.4s, v2.8h, v5.8h\n"
".inst 0x6e44ec7b // bfmmla v27.4s, v3.8h, v4.8h\n"
- "ldr q4, [x22, #0xa0]\n"
- ".inst 0x6e45ec7e // bfmmla v30.4s, v3.8h, v5.8h\n"
- "ldr q5, [x22, #0xb0]\n"
+ "ldr q4, [x20, #0xa0]\n"
+ ".inst 0x6e45ec0c // bfmmla v12.4s, v0.8h, v5.8h\n"
".inst 0x6e46ec0a // bfmmla v10.4s, v0.8h, v6.8h\n"
".inst 0x6e47ec0d // bfmmla v13.4s, v0.8h, v7.8h\n"
+ ".inst 0x6e45ec32 // bfmmla v18.4s, v1.8h, v5.8h\n"
"ldr q0, [%x[Apanel], #0x50]\n"
".inst 0x6e46ec30 // bfmmla v16.4s, v1.8h, v6.8h\n"
".inst 0x6e47ec33 // bfmmla v19.4s, v1.8h, v7.8h\n"
"ldr q1, [%x[Apanel], #0x60]\n"
+ ".inst 0x6e45ec58 // bfmmla v24.4s, v2.8h, v5.8h\n"
+ ".inst 0x6e45ec7e // bfmmla v30.4s, v3.8h, v5.8h\n"
+ "ldr q5, [x20, #0xb0]\n"
".inst 0x6e46ec56 // bfmmla v22.4s, v2.8h, v6.8h\n"
".inst 0x6e47ec59 // bfmmla v25.4s, v2.8h, v7.8h\n"
"ldr q2, [%x[Apanel], #0x70]\n"
".inst 0x6e46ec7c // bfmmla v28.4s, v3.8h, v6.8h\n"
".inst 0x6e47ec7f // bfmmla v31.4s, v3.8h, v7.8h\n"
"add %x[Apanel], %x[Apanel], #0x80\n"
- "add x22, x22, #0xc0\n"
+ "add x20, x20, #0xc0\n"
"bge 3b\n"
"4:" // main loop skip
"ldr q3, [%x[Apanel], #0x0]\n"
- "ldr q6, [x22, #0x0]\n"
".inst 0x6e44ec08 // bfmmla v8.4s, v0.8h, v4.8h\n"
- "ldr q7, [x22, #0x10]\n"
- ".inst 0x6e45ec0b // bfmmla v11.4s, v0.8h, v5.8h\n"
".inst 0x6e44ec2e // bfmmla v14.4s, v1.8h, v4.8h\n"
+ ".inst 0x6e45ec0b // bfmmla v11.4s, v0.8h, v5.8h\n"
".inst 0x6e45ec31 // bfmmla v17.4s, v1.8h, v5.8h\n"
+ "ldr q6, [x20, #0x0]\n"
".inst 0x6e44ec54 // bfmmla v20.4s, v2.8h, v4.8h\n"
- "add %x[Apanel], %x[Apanel], #0x10\n"
".inst 0x6e45ec57 // bfmmla v23.4s, v2.8h, v5.8h\n"
+ "ldr q7, [x20, #0x10]\n"
".inst 0x6e44ec7a // bfmmla v26.4s, v3.8h, v4.8h\n"
- "ldr q4, [x22, #0x20]\n"
".inst 0x6e45ec7d // bfmmla v29.4s, v3.8h, v5.8h\n"
- "ldr q5, [x22, #0x30]\n"
+ "ldr q4, [x20, #0x20]\n"
+ "ldr q5, [x20, #0x30]\n"
".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n"
- ".inst 0x6e47ec0c // bfmmla v12.4s, v0.8h, v7.8h\n"
".inst 0x6e46ec2f // bfmmla v15.4s, v1.8h, v6.8h\n"
- "add x22, x22, #0x40\n"
- ".inst 0x6e47ec32 // bfmmla v18.4s, v1.8h, v7.8h\n"
".inst 0x6e46ec55 // bfmmla v21.4s, v2.8h, v6.8h\n"
- ".inst 0x6e47ec58 // bfmmla v24.4s, v2.8h, v7.8h\n"
".inst 0x6e46ec7b // bfmmla v27.4s, v3.8h, v6.8h\n"
- ".inst 0x6e47ec7e // bfmmla v30.4s, v3.8h, v7.8h\n"
+ "add %x[Apanel], %x[Apanel], #0x10\n"
+ ".inst 0x6e47ec0c // bfmmla v12.4s, v0.8h, v7.8h\n"
".inst 0x6e44ec0a // bfmmla v10.4s, v0.8h, v4.8h\n"
+ "add x20, x20, #0x40\n"
".inst 0x6e45ec0d // bfmmla v13.4s, v0.8h, v5.8h\n"
+ ".inst 0x6e47ec32 // bfmmla v18.4s, v1.8h, v7.8h\n"
".inst 0x6e44ec30 // bfmmla v16.4s, v1.8h, v4.8h\n"
".inst 0x6e45ec33 // bfmmla v19.4s, v1.8h, v5.8h\n"
+ ".inst 0x6e47ec58 // bfmmla v24.4s, v2.8h, v7.8h\n"
+ ".inst 0x6e47ec7e // bfmmla v30.4s, v3.8h, v7.8h\n"
".inst 0x6e44ec56 // bfmmla v22.4s, v2.8h, v4.8h\n"
".inst 0x6e45ec59 // bfmmla v25.4s, v2.8h, v5.8h\n"
".inst 0x6e44ec7c // bfmmla v28.4s, v3.8h, v4.8h\n"
".inst 0x6e45ec7f // bfmmla v31.4s, v3.8h, v5.8h\n"
- "cbz x20, 5f\n"
- "ldr q6, [x22, #0x0]\n"
+ "cbz x19, 5f\n"
+ "ldr q6, [x20, #0x0]\n"
"ldr q0, [%x[Apanel], #0x0]\n"
".inst 0x6e46ec08 // bfmmla v8.4s, v0.8h, v6.8h\n"
"ldr q1, [%x[Apanel], #0x10]\n"
- "ldr q7, [x22, #0x10]\n"
- ".inst 0x6e47ec0b // bfmmla v11.4s, v0.8h, v7.8h\n"
+ "ldr q7, [x20, #0x10]\n"
+ ".inst 0x6e46ec2e // bfmmla v14.4s, v1.8h, v6.8h\n"
"ldr q2, [%x[Apanel], #0x20]\n"
"ldr q3, [%x[Apanel], #0x30]\n"
- ".inst 0x6e46ec2e // bfmmla v14.4s, v1.8h, v6.8h\n"
- "ldr q4, [x22, #0x20]\n"
- "ldr q5, [x22, #0x30]\n"
+ ".inst 0x6e47ec0b // bfmmla v11.4s, v0.8h, v7.8h\n"
".inst 0x6e47ec31 // bfmmla v17.4s, v1.8h, v7.8h\n"
".inst 0x6e46ec54 // bfmmla v20.4s, v2.8h, v6.8h\n"
+ "ldr q4, [x20, #0x20]\n"
".inst 0x6e47ec57 // bfmmla v23.4s, v2.8h, v7.8h\n"
- "add %x[Apanel], %x[Apanel], #0x40\n"
".inst 0x6e46ec7a // bfmmla v26.4s, v3.8h, v6.8h\n"
- "ldr q6, [x22, #0x40]\n"
+ "ldr q5, [x20, #0x30]\n"
".inst 0x6e47ec7d // bfmmla v29.4s, v3.8h, v7.8h\n"
- "ldr q7, [x22, #0x50]\n"
+ "ldr q6, [x20, #0x40]\n"
+ "ldr q7, [x20, #0x50]\n"
".inst 0x6e44ec09 // bfmmla v9.4s, v0.8h, v4.8h\n"
- ".inst 0x6e45ec0c // bfmmla v12.4s, v0.8h, v5.8h\n"
".inst 0x6e44ec2f // bfmmla v15.4s, v1.8h, v4.8h\n"
- ".inst 0x6e45ec32 // bfmmla v18.4s, v1.8h, v5.8h\n"
- "add x22, x22, #0x60\n"
+ "add x20, x20, #0x60\n"
".inst 0x6e44ec55 // bfmmla v21.4s, v2.8h, v4.8h\n"
- ".inst 0x6e45ec58 // bfmmla v24.4s, v2.8h, v5.8h\n"
".inst 0x6e44ec7b // bfmmla v27.4s, v3.8h, v4.8h\n"
- ".inst 0x6e45ec7e // bfmmla v30.4s, v3.8h, v5.8h\n"
+ "add %x[Apanel], %x[Apanel], #0x40\n"
+ ".inst 0x6e45ec0c // bfmmla v12.4s, v0.8h, v5.8h\n"
".inst 0x6e46ec0a // bfmmla v10.4s, v0.8h, v6.8h\n"
".inst 0x6e47ec0d // bfmmla v13.4s, v0.8h, v7.8h\n"
+ ".inst 0x6e45ec32 // bfmmla v18.4s, v1.8h, v5.8h\n"
".inst 0x6e46ec30 // bfmmla v16.4s, v1.8h, v6.8h\n"
".inst 0x6e47ec33 // bfmmla v19.4s, v1.8h, v7.8h\n"
+ ".inst 0x6e45ec58 // bfmmla v24.4s, v2.8h, v5.8h\n"
+ ".inst 0x6e45ec7e // bfmmla v30.4s, v3.8h, v5.8h\n"
".inst 0x6e46ec56 // bfmmla v22.4s, v2.8h, v6.8h\n"
".inst 0x6e47ec59 // bfmmla v25.4s, v2.8h, v7.8h\n"
".inst 0x6e46ec7c // bfmmla v28.4s, v3.8h, v6.8h\n"
".inst 0x6e47ec7f // bfmmla v31.4s, v3.8h, v7.8h\n"
"5:" // multiply loop done
- "subs x23, x23, #0x1\n"
+ "subs x22, x22, #0x1\n"
"uzp1 v4.2d, v8.2d, v11.2d\n"
"uzp2 v8.2d, v8.2d, v11.2d\n"
"uzp1 v11.2d, v9.2d, v12.2d\n"
@@ -283,7 +283,7 @@ void a64_interleaved_bf16fp32_mmla_8x12(
"bne 1b\n"
: [Apanel] "+&r" (Apanel), [Cpanel] "+&r" (Cpanel), [ablocks] "+&r" (ablocks)
: [args_ptr] "r" (&ka), [offsetof_Bpanel] "I" (offsetof(KernelArgs, Bpanel)), [offsetof_K] "I" (offsetof(KernelArgs, K)), [offsetof_bblocks] "I" (offsetof(KernelArgs, bblocks))
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x20", "x21", "x22", "x23"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_s8s32_mmla_8x12/a510.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_s8s32_mmla_8x12/a510.cpp
index e46cb8a67a..a4d8c0ace7 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_s8s32_mmla_8x12/a510.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_s8s32_mmla_8x12/a510.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef __aarch64__
@@ -33,33 +33,34 @@ void a64_interleaved_s8s32_mmla_8x12_a510(
int32_t *Cpanel, int ablocks, int bblocks, int K) {
struct KernelArgs {
+ size_t bblocks = {};
size_t K = {};
const int8_t *Bpanel = {};
- size_t bblocks = {};
} ka;
+ ka.bblocks = bblocks;
ka.K = (K/8) - 1;
ka.Bpanel = Bpanel;
- ka.bblocks = bblocks;
__asm__ __volatile__(
"1:" // Height loop
- "ldr x23, [%x[args_ptr], %[offsetof_bblocks]]\n"
- "ldr x22, [%x[args_ptr], %[offsetof_Bpanel]]\n"
+ "ldr x22, [%x[args_ptr], %[offsetof_bblocks]]\n"
"mov x21, %x[Apanel]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_Bpanel]]\n"
"2:" // Width loop
- "ldp q4, q5, [x22], #0x20\n"
"mov %x[Apanel], x21\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_K]]\n"
"ld1 { v0.16b }, [%x[Apanel]], #0x10\n"
+ "ld1 { v4.16b }, [x20], #0x10\n"
"ld1 { v1.16b }, [%x[Apanel]], #0x10\n"
+ "cmp x19, #0x2\n"
"movi v8.4s, #0x0\n"
- "ld1 { v2.16b }, [%x[Apanel]], #0x10\n"
- "ldr x20, [%x[args_ptr], %[offsetof_K]]\n"
- "cmp x20, #0x2\n"
"movi v9.4s, #0x0\n"
+ "ld1 { v5.16b }, [x20], #0x10\n"
"movi v10.4s, #0x0\n"
"movi v11.4s, #0x0\n"
+ "ld1 { v2.16b }, [%x[Apanel]], #0x10\n"
"movi v12.4s, #0x0\n"
"movi v13.4s, #0x0\n"
"movi v14.4s, #0x0\n"
@@ -84,63 +85,65 @@ void a64_interleaved_s8s32_mmla_8x12_a510(
"3:" // main loop head
"ld1 { v3.16b }, [%x[Apanel]], #0x10\n"
".inst 0x4e84a408 // smmla v8.4s, v0.16b, v4.16b\n"
- ".inst 0x4e85a40b // smmla v11.4s, v0.16b, v5.16b\n"
- "ldp q6, q7, [x22], #0x20\n"
".inst 0x4e84a42e // smmla v14.4s, v1.16b, v4.16b\n"
+ "ldp q6, q7, [x20], #0x20\n"
+ ".inst 0x4e85a40b // smmla v11.4s, v0.16b, v5.16b\n"
".inst 0x4e85a431 // smmla v17.4s, v1.16b, v5.16b\n"
".inst 0x4e84a454 // smmla v20.4s, v2.16b, v4.16b\n"
- "sub x20, x20, #0x2\n"
+ "sub x19, x19, #0x2\n"
".inst 0x4e85a457 // smmla v23.4s, v2.16b, v5.16b\n"
".inst 0x4e84a47a // smmla v26.4s, v3.16b, v4.16b\n"
- "cmp x20, #0x2\n"
+ "cmp x19, #0x2\n"
".inst 0x4e85a47d // smmla v29.4s, v3.16b, v5.16b\n"
- "ldp q4, q5, [x22], #0x20\n"
+ "ldp q4, q5, [x20], #0x20\n"
".inst 0x4e86a409 // smmla v9.4s, v0.16b, v6.16b\n"
".inst 0x4e87a40c // smmla v12.4s, v0.16b, v7.16b\n"
- ".inst 0x4e86a42f // smmla v15.4s, v1.16b, v6.16b\n"
- ".inst 0x4e87a432 // smmla v18.4s, v1.16b, v7.16b\n"
- ".inst 0x4e86a455 // smmla v21.4s, v2.16b, v6.16b\n"
- ".inst 0x4e87a458 // smmla v24.4s, v2.16b, v7.16b\n"
- ".inst 0x4e86a47b // smmla v27.4s, v3.16b, v6.16b\n"
- ".inst 0x4e87a47e // smmla v30.4s, v3.16b, v7.16b\n"
- "ldp q6, q7, [x22], #0x20\n"
".inst 0x4e84a40a // smmla v10.4s, v0.16b, v4.16b\n"
".inst 0x4e85a40d // smmla v13.4s, v0.16b, v5.16b\n"
"ld1 { v0.16b }, [%x[Apanel]], #0x10\n"
+ ".inst 0x4e86a42f // smmla v15.4s, v1.16b, v6.16b\n"
+ ".inst 0x4e86a455 // smmla v21.4s, v2.16b, v6.16b\n"
+ ".inst 0x4e86a47b // smmla v27.4s, v3.16b, v6.16b\n"
+ ".inst 0x4e87a432 // smmla v18.4s, v1.16b, v7.16b\n"
+ "ld1 { v6.16b }, [x20], #0x10\n"
".inst 0x4e84a430 // smmla v16.4s, v1.16b, v4.16b\n"
".inst 0x4e85a433 // smmla v19.4s, v1.16b, v5.16b\n"
"ld1 { v1.16b }, [%x[Apanel]], #0x10\n"
+ ".inst 0x4e87a458 // smmla v24.4s, v2.16b, v7.16b\n"
+ ".inst 0x4e87a47e // smmla v30.4s, v3.16b, v7.16b\n"
+ "ld1 { v7.16b }, [x20], #0x10\n"
".inst 0x4e84a456 // smmla v22.4s, v2.16b, v4.16b\n"
".inst 0x4e85a459 // smmla v25.4s, v2.16b, v5.16b\n"
"ld1 { v2.16b }, [%x[Apanel]], #0x10\n"
".inst 0x4e84a47c // smmla v28.4s, v3.16b, v4.16b\n"
".inst 0x4e85a47f // smmla v31.4s, v3.16b, v5.16b\n"
"ld1 { v3.16b }, [%x[Apanel]], #0x10\n"
+ "ldp q4, q5, [x20], #0x20\n"
".inst 0x4e86a408 // smmla v8.4s, v0.16b, v6.16b\n"
- "ldp q4, q5, [x22], #0x20\n"
- ".inst 0x4e87a40b // smmla v11.4s, v0.16b, v7.16b\n"
".inst 0x4e86a42e // smmla v14.4s, v1.16b, v6.16b\n"
+ ".inst 0x4e87a40b // smmla v11.4s, v0.16b, v7.16b\n"
".inst 0x4e87a431 // smmla v17.4s, v1.16b, v7.16b\n"
".inst 0x4e86a454 // smmla v20.4s, v2.16b, v6.16b\n"
".inst 0x4e87a457 // smmla v23.4s, v2.16b, v7.16b\n"
".inst 0x4e86a47a // smmla v26.4s, v3.16b, v6.16b\n"
".inst 0x4e87a47d // smmla v29.4s, v3.16b, v7.16b\n"
- "ldp q6, q7, [x22], #0x20\n"
+ "ldp q6, q7, [x20], #0x20\n"
".inst 0x4e84a409 // smmla v9.4s, v0.16b, v4.16b\n"
".inst 0x4e85a40c // smmla v12.4s, v0.16b, v5.16b\n"
- ".inst 0x4e84a42f // smmla v15.4s, v1.16b, v4.16b\n"
- ".inst 0x4e85a432 // smmla v18.4s, v1.16b, v5.16b\n"
- ".inst 0x4e84a455 // smmla v21.4s, v2.16b, v4.16b\n"
- ".inst 0x4e85a458 // smmla v24.4s, v2.16b, v5.16b\n"
- ".inst 0x4e84a47b // smmla v27.4s, v3.16b, v4.16b\n"
- ".inst 0x4e85a47e // smmla v30.4s, v3.16b, v5.16b\n"
- "ldp q4, q5, [x22], #0x20\n"
".inst 0x4e86a40a // smmla v10.4s, v0.16b, v6.16b\n"
".inst 0x4e87a40d // smmla v13.4s, v0.16b, v7.16b\n"
"ld1 { v0.16b }, [%x[Apanel]], #0x10\n"
+ ".inst 0x4e84a42f // smmla v15.4s, v1.16b, v4.16b\n"
+ ".inst 0x4e84a455 // smmla v21.4s, v2.16b, v4.16b\n"
+ ".inst 0x4e84a47b // smmla v27.4s, v3.16b, v4.16b\n"
+ ".inst 0x4e85a432 // smmla v18.4s, v1.16b, v5.16b\n"
+ "ld1 { v4.16b }, [x20], #0x10\n"
".inst 0x4e86a430 // smmla v16.4s, v1.16b, v6.16b\n"
".inst 0x4e87a433 // smmla v19.4s, v1.16b, v7.16b\n"
"ld1 { v1.16b }, [%x[Apanel]], #0x10\n"
+ ".inst 0x4e85a458 // smmla v24.4s, v2.16b, v5.16b\n"
+ ".inst 0x4e85a47e // smmla v30.4s, v3.16b, v5.16b\n"
+ "ld1 { v5.16b }, [x20], #0x10\n"
".inst 0x4e86a456 // smmla v22.4s, v2.16b, v6.16b\n"
".inst 0x4e87a459 // smmla v25.4s, v2.16b, v7.16b\n"
"ld1 { v2.16b }, [%x[Apanel]], #0x10\n"
@@ -150,65 +153,66 @@ void a64_interleaved_s8s32_mmla_8x12_a510(
"4:" // main loop skip
"ld1 { v3.16b }, [%x[Apanel]], #0x10\n"
".inst 0x4e84a408 // smmla v8.4s, v0.16b, v4.16b\n"
- ".inst 0x4e85a40b // smmla v11.4s, v0.16b, v5.16b\n"
- "ldp q6, q7, [x22], #0x20\n"
".inst 0x4e84a42e // smmla v14.4s, v1.16b, v4.16b\n"
+ "ldp q6, q7, [x20], #0x20\n"
+ ".inst 0x4e85a40b // smmla v11.4s, v0.16b, v5.16b\n"
".inst 0x4e85a431 // smmla v17.4s, v1.16b, v5.16b\n"
".inst 0x4e84a454 // smmla v20.4s, v2.16b, v4.16b\n"
".inst 0x4e85a457 // smmla v23.4s, v2.16b, v5.16b\n"
".inst 0x4e84a47a // smmla v26.4s, v3.16b, v4.16b\n"
".inst 0x4e85a47d // smmla v29.4s, v3.16b, v5.16b\n"
- "ldp q4, q5, [x22], #0x20\n"
+ "ldp q4, q5, [x20], #0x20\n"
".inst 0x4e86a409 // smmla v9.4s, v0.16b, v6.16b\n"
- ".inst 0x4e87a40c // smmla v12.4s, v0.16b, v7.16b\n"
".inst 0x4e86a42f // smmla v15.4s, v1.16b, v6.16b\n"
- ".inst 0x4e87a432 // smmla v18.4s, v1.16b, v7.16b\n"
".inst 0x4e86a455 // smmla v21.4s, v2.16b, v6.16b\n"
- ".inst 0x4e87a458 // smmla v24.4s, v2.16b, v7.16b\n"
".inst 0x4e86a47b // smmla v27.4s, v3.16b, v6.16b\n"
- ".inst 0x4e87a47e // smmla v30.4s, v3.16b, v7.16b\n"
+ ".inst 0x4e87a40c // smmla v12.4s, v0.16b, v7.16b\n"
".inst 0x4e84a40a // smmla v10.4s, v0.16b, v4.16b\n"
".inst 0x4e85a40d // smmla v13.4s, v0.16b, v5.16b\n"
+ ".inst 0x4e87a432 // smmla v18.4s, v1.16b, v7.16b\n"
".inst 0x4e84a430 // smmla v16.4s, v1.16b, v4.16b\n"
".inst 0x4e85a433 // smmla v19.4s, v1.16b, v5.16b\n"
+ ".inst 0x4e87a458 // smmla v24.4s, v2.16b, v7.16b\n"
+ ".inst 0x4e87a47e // smmla v30.4s, v3.16b, v7.16b\n"
".inst 0x4e84a456 // smmla v22.4s, v2.16b, v4.16b\n"
".inst 0x4e85a459 // smmla v25.4s, v2.16b, v5.16b\n"
".inst 0x4e84a47c // smmla v28.4s, v3.16b, v4.16b\n"
".inst 0x4e85a47f // smmla v31.4s, v3.16b, v5.16b\n"
- "cbz x20, 5f\n"
- "ldp q6, q7, [x22], #0x20\n"
+ "cbz x19, 5f\n"
"ld1 { v0.16b }, [%x[Apanel]], #0x10\n"
- "ld1 { v1.16b }, [%x[Apanel]], #0x10\n"
+ "ld1 { v6.16b }, [x20], #0x10\n"
".inst 0x4e86a408 // smmla v8.4s, v0.16b, v6.16b\n"
+ "ld1 { v1.16b }, [%x[Apanel]], #0x10\n"
+ "ld1 { v7.16b }, [x20], #0x10\n"
+ ".inst 0x4e86a42e // smmla v14.4s, v1.16b, v6.16b\n"
"ld1 { v2.16b }, [%x[Apanel]], #0x10\n"
"ld1 { v3.16b }, [%x[Apanel]], #0x10\n"
".inst 0x4e87a40b // smmla v11.4s, v0.16b, v7.16b\n"
- "ldp q4, q5, [x22], #0x20\n"
- ".inst 0x4e86a42e // smmla v14.4s, v1.16b, v6.16b\n"
+ "ldp q4, q5, [x20], #0x20\n"
".inst 0x4e87a431 // smmla v17.4s, v1.16b, v7.16b\n"
".inst 0x4e86a454 // smmla v20.4s, v2.16b, v6.16b\n"
".inst 0x4e87a457 // smmla v23.4s, v2.16b, v7.16b\n"
".inst 0x4e86a47a // smmla v26.4s, v3.16b, v6.16b\n"
".inst 0x4e87a47d // smmla v29.4s, v3.16b, v7.16b\n"
- "ldp q6, q7, [x22], #0x20\n"
+ "ldp q6, q7, [x20], #0x20\n"
".inst 0x4e84a409 // smmla v9.4s, v0.16b, v4.16b\n"
- ".inst 0x4e85a40c // smmla v12.4s, v0.16b, v5.16b\n"
".inst 0x4e84a42f // smmla v15.4s, v1.16b, v4.16b\n"
- ".inst 0x4e85a432 // smmla v18.4s, v1.16b, v5.16b\n"
".inst 0x4e84a455 // smmla v21.4s, v2.16b, v4.16b\n"
- ".inst 0x4e85a458 // smmla v24.4s, v2.16b, v5.16b\n"
".inst 0x4e84a47b // smmla v27.4s, v3.16b, v4.16b\n"
- ".inst 0x4e85a47e // smmla v30.4s, v3.16b, v5.16b\n"
+ ".inst 0x4e85a40c // smmla v12.4s, v0.16b, v5.16b\n"
".inst 0x4e86a40a // smmla v10.4s, v0.16b, v6.16b\n"
".inst 0x4e87a40d // smmla v13.4s, v0.16b, v7.16b\n"
+ ".inst 0x4e85a432 // smmla v18.4s, v1.16b, v5.16b\n"
".inst 0x4e86a430 // smmla v16.4s, v1.16b, v6.16b\n"
".inst 0x4e87a433 // smmla v19.4s, v1.16b, v7.16b\n"
+ ".inst 0x4e85a458 // smmla v24.4s, v2.16b, v5.16b\n"
+ ".inst 0x4e85a47e // smmla v30.4s, v3.16b, v5.16b\n"
".inst 0x4e86a456 // smmla v22.4s, v2.16b, v6.16b\n"
".inst 0x4e87a459 // smmla v25.4s, v2.16b, v7.16b\n"
".inst 0x4e86a47c // smmla v28.4s, v3.16b, v6.16b\n"
".inst 0x4e87a47f // smmla v31.4s, v3.16b, v7.16b\n"
"5:" // multiply loop done
- "subs x23, x23, #0x1\n"
+ "subs x22, x22, #0x1\n"
"uzp1 v4.2d, v8.2d, v11.2d\n"
"uzp2 v8.2d, v8.2d, v11.2d\n"
"uzp1 v11.2d, v9.2d, v12.2d\n"
@@ -263,7 +267,7 @@ void a64_interleaved_s8s32_mmla_8x12_a510(
"bne 1b\n"
: [Apanel] "+&r" (Apanel), [Cpanel] "+&r" (Cpanel), [ablocks] "+&r" (ablocks)
: [args_ptr] "r" (&ka), [offsetof_Bpanel] "I" (offsetof(KernelArgs, Bpanel)), [offsetof_K] "I" (offsetof(KernelArgs, K)), [offsetof_bblocks] "I" (offsetof(KernelArgs, bblocks))
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x20", "x21", "x22", "x23"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_s8s32_mmla_8x12/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_s8s32_mmla_8x12/generic.cpp
index fc20c2fc9d..0c2722a1c2 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_s8s32_mmla_8x12/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_s8s32_mmla_8x12/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -33,46 +33,46 @@ void a64_interleaved_s8s32_mmla_8x12(
int32_t *Cpanel, int ablocks, int bblocks, int K) {
struct KernelArgs {
+ size_t bblocks = {};
size_t K = {};
const int8_t *Bpanel = {};
- size_t bblocks = {};
} ka;
+ ka.bblocks = bblocks;
ka.K = (K/8) - 1;
ka.Bpanel = Bpanel;
- ka.bblocks = bblocks;
__asm__ __volatile__(
"1:" // Height loop
- "ldr x23, [%x[args_ptr], %[offsetof_bblocks]]\n"
- "ldr x22, [%x[args_ptr], %[offsetof_Bpanel]]\n"
+ "ldr x22, [%x[args_ptr], %[offsetof_bblocks]]\n"
"mov x21, %x[Apanel]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_Bpanel]]\n"
"2:" // Width loop
- "ldr q4, [x22, #0x0]\n"
- "ldr q5, [x22, #0x10]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_K]]\n"
"mov %x[Apanel], x21\n"
- "ldr q0, [%x[Apanel], #0x0]\n"
- "ldr q1, [%x[Apanel], #0x10]\n"
+ "cmp x19, #0x4\n"
"movi v8.4s, #0x0\n"
- "ldr q2, [%x[Apanel], #0x20]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_K]]\n"
- "cmp x20, #0x2\n"
"movi v9.4s, #0x0\n"
+ "ldr q4, [x20, #0x0]\n"
"movi v10.4s, #0x0\n"
- "add x22, x22, #0x20\n"
"movi v11.4s, #0x0\n"
+ "ldr q0, [%x[Apanel], #0x0]\n"
"movi v12.4s, #0x0\n"
- "add %x[Apanel], %x[Apanel], #0x30\n"
"movi v13.4s, #0x0\n"
+ "ldr q1, [%x[Apanel], #0x10]\n"
"movi v14.4s, #0x0\n"
"movi v15.4s, #0x0\n"
+ "ldr q5, [x20, #0x10]\n"
"movi v16.4s, #0x0\n"
"movi v17.4s, #0x0\n"
+ "ldr q2, [%x[Apanel], #0x20]\n"
"movi v18.4s, #0x0\n"
"movi v19.4s, #0x0\n"
+ "add x20, x20, #0x20\n"
"movi v20.4s, #0x0\n"
"movi v21.4s, #0x0\n"
+ "add %x[Apanel], %x[Apanel], #0x30\n"
"movi v22.4s, #0x0\n"
"movi v23.4s, #0x0\n"
"movi v24.4s, #0x0\n"
@@ -86,149 +86,220 @@ void a64_interleaved_s8s32_mmla_8x12(
"blt 4f\n"
"3:" // main loop head
"ldr q3, [%x[Apanel], #0x0]\n"
- "ldr q6, [x22, #0x0]\n"
".inst 0x4e84a408 // smmla v8.4s, v0.16b, v4.16b\n"
- "ldr q7, [x22, #0x10]\n"
- ".inst 0x4e85a40b // smmla v11.4s, v0.16b, v5.16b\n"
".inst 0x4e84a42e // smmla v14.4s, v1.16b, v4.16b\n"
+ ".inst 0x4e85a40b // smmla v11.4s, v0.16b, v5.16b\n"
".inst 0x4e85a431 // smmla v17.4s, v1.16b, v5.16b\n"
+ "ldr q6, [x20, #0x0]\n"
".inst 0x4e84a454 // smmla v20.4s, v2.16b, v4.16b\n"
- "sub x20, x20, #0x2\n"
".inst 0x4e85a457 // smmla v23.4s, v2.16b, v5.16b\n"
+ "ldr q7, [x20, #0x10]\n"
".inst 0x4e84a47a // smmla v26.4s, v3.16b, v4.16b\n"
- "ldr q4, [x22, #0x20]\n"
".inst 0x4e85a47d // smmla v29.4s, v3.16b, v5.16b\n"
- "ldr q5, [x22, #0x30]\n"
+ "ldr q4, [x20, #0x20]\n"
+ "ldr q5, [x20, #0x30]\n"
".inst 0x4e86a409 // smmla v9.4s, v0.16b, v6.16b\n"
- ".inst 0x4e87a40c // smmla v12.4s, v0.16b, v7.16b\n"
".inst 0x4e86a42f // smmla v15.4s, v1.16b, v6.16b\n"
- "cmp x20, #0x2\n"
- ".inst 0x4e87a432 // smmla v18.4s, v1.16b, v7.16b\n"
".inst 0x4e86a455 // smmla v21.4s, v2.16b, v6.16b\n"
- ".inst 0x4e87a458 // smmla v24.4s, v2.16b, v7.16b\n"
".inst 0x4e86a47b // smmla v27.4s, v3.16b, v6.16b\n"
- "ldr q6, [x22, #0x40]\n"
- ".inst 0x4e87a47e // smmla v30.4s, v3.16b, v7.16b\n"
- "ldr q7, [x22, #0x50]\n"
+ "ldr q6, [x20, #0x40]\n"
+ ".inst 0x4e87a40c // smmla v12.4s, v0.16b, v7.16b\n"
".inst 0x4e84a40a // smmla v10.4s, v0.16b, v4.16b\n"
+ "sub x19, x19, #0x4\n"
".inst 0x4e85a40d // smmla v13.4s, v0.16b, v5.16b\n"
+ ".inst 0x4e87a432 // smmla v18.4s, v1.16b, v7.16b\n"
"ldr q0, [%x[Apanel], #0x10]\n"
".inst 0x4e84a430 // smmla v16.4s, v1.16b, v4.16b\n"
".inst 0x4e85a433 // smmla v19.4s, v1.16b, v5.16b\n"
"ldr q1, [%x[Apanel], #0x20]\n"
+ ".inst 0x4e87a458 // smmla v24.4s, v2.16b, v7.16b\n"
+ ".inst 0x4e87a47e // smmla v30.4s, v3.16b, v7.16b\n"
+ "ldr q7, [x20, #0x50]\n"
".inst 0x4e84a456 // smmla v22.4s, v2.16b, v4.16b\n"
".inst 0x4e85a459 // smmla v25.4s, v2.16b, v5.16b\n"
"ldr q2, [%x[Apanel], #0x30]\n"
".inst 0x4e84a47c // smmla v28.4s, v3.16b, v4.16b\n"
- "ldr q4, [x22, #0x60]\n"
".inst 0x4e85a47f // smmla v31.4s, v3.16b, v5.16b\n"
"ldr q3, [%x[Apanel], #0x40]\n"
- "ldr q5, [x22, #0x70]\n"
".inst 0x4e86a408 // smmla v8.4s, v0.16b, v6.16b\n"
- ".inst 0x4e87a40b // smmla v11.4s, v0.16b, v7.16b\n"
".inst 0x4e86a42e // smmla v14.4s, v1.16b, v6.16b\n"
+ "ldr q4, [x20, #0x60]\n"
+ ".inst 0x4e87a40b // smmla v11.4s, v0.16b, v7.16b\n"
".inst 0x4e87a431 // smmla v17.4s, v1.16b, v7.16b\n"
+ "ldr q5, [x20, #0x70]\n"
".inst 0x4e86a454 // smmla v20.4s, v2.16b, v6.16b\n"
".inst 0x4e87a457 // smmla v23.4s, v2.16b, v7.16b\n"
+ "cmp x19, #0x4\n"
".inst 0x4e86a47a // smmla v26.4s, v3.16b, v6.16b\n"
- "ldr q6, [x22, #0x80]\n"
".inst 0x4e87a47d // smmla v29.4s, v3.16b, v7.16b\n"
- "ldr q7, [x22, #0x90]\n"
+ "ldr q6, [x20, #0x80]\n"
+ "ldr q7, [x20, #0x90]\n"
".inst 0x4e84a409 // smmla v9.4s, v0.16b, v4.16b\n"
- ".inst 0x4e85a40c // smmla v12.4s, v0.16b, v5.16b\n"
".inst 0x4e84a42f // smmla v15.4s, v1.16b, v4.16b\n"
- ".inst 0x4e85a432 // smmla v18.4s, v1.16b, v5.16b\n"
".inst 0x4e84a455 // smmla v21.4s, v2.16b, v4.16b\n"
- ".inst 0x4e85a458 // smmla v24.4s, v2.16b, v5.16b\n"
".inst 0x4e84a47b // smmla v27.4s, v3.16b, v4.16b\n"
- "ldr q4, [x22, #0xa0]\n"
- ".inst 0x4e85a47e // smmla v30.4s, v3.16b, v5.16b\n"
- "ldr q5, [x22, #0xb0]\n"
+ "ldr q4, [x20, #0xa0]\n"
+ ".inst 0x4e85a40c // smmla v12.4s, v0.16b, v5.16b\n"
".inst 0x4e86a40a // smmla v10.4s, v0.16b, v6.16b\n"
".inst 0x4e87a40d // smmla v13.4s, v0.16b, v7.16b\n"
+ ".inst 0x4e85a432 // smmla v18.4s, v1.16b, v5.16b\n"
"ldr q0, [%x[Apanel], #0x50]\n"
".inst 0x4e86a430 // smmla v16.4s, v1.16b, v6.16b\n"
".inst 0x4e87a433 // smmla v19.4s, v1.16b, v7.16b\n"
"ldr q1, [%x[Apanel], #0x60]\n"
+ ".inst 0x4e85a458 // smmla v24.4s, v2.16b, v5.16b\n"
+ ".inst 0x4e85a47e // smmla v30.4s, v3.16b, v5.16b\n"
+ "ldr q5, [x20, #0xb0]\n"
".inst 0x4e86a456 // smmla v22.4s, v2.16b, v6.16b\n"
".inst 0x4e87a459 // smmla v25.4s, v2.16b, v7.16b\n"
"ldr q2, [%x[Apanel], #0x70]\n"
".inst 0x4e86a47c // smmla v28.4s, v3.16b, v6.16b\n"
".inst 0x4e87a47f // smmla v31.4s, v3.16b, v7.16b\n"
- "add %x[Apanel], %x[Apanel], #0x80\n"
- "add x22, x22, #0xc0\n"
+ "ldr q3, [%x[Apanel], #0x80]\n"
+ ".inst 0x4e84a408 // smmla v8.4s, v0.16b, v4.16b\n"
+ ".inst 0x4e84a42e // smmla v14.4s, v1.16b, v4.16b\n"
+ "ldr q6, [x20, #0xc0]\n"
+ ".inst 0x4e85a40b // smmla v11.4s, v0.16b, v5.16b\n"
+ ".inst 0x4e85a431 // smmla v17.4s, v1.16b, v5.16b\n"
+ "ldr q7, [x20, #0xd0]\n"
+ ".inst 0x4e84a454 // smmla v20.4s, v2.16b, v4.16b\n"
+ ".inst 0x4e85a457 // smmla v23.4s, v2.16b, v5.16b\n"
+ ".inst 0x4e84a47a // smmla v26.4s, v3.16b, v4.16b\n"
+ ".inst 0x4e85a47d // smmla v29.4s, v3.16b, v5.16b\n"
+ "ldr q4, [x20, #0xe0]\n"
+ "ldr q5, [x20, #0xf0]\n"
+ ".inst 0x4e86a409 // smmla v9.4s, v0.16b, v6.16b\n"
+ ".inst 0x4e86a42f // smmla v15.4s, v1.16b, v6.16b\n"
+ ".inst 0x4e86a455 // smmla v21.4s, v2.16b, v6.16b\n"
+ ".inst 0x4e86a47b // smmla v27.4s, v3.16b, v6.16b\n"
+ "ldr q6, [x20, #0x100]\n"
+ ".inst 0x4e87a40c // smmla v12.4s, v0.16b, v7.16b\n"
+ ".inst 0x4e84a40a // smmla v10.4s, v0.16b, v4.16b\n"
+ ".inst 0x4e85a40d // smmla v13.4s, v0.16b, v5.16b\n"
+ ".inst 0x4e87a432 // smmla v18.4s, v1.16b, v7.16b\n"
+ "ldr q0, [%x[Apanel], #0x90]\n"
+ ".inst 0x4e84a430 // smmla v16.4s, v1.16b, v4.16b\n"
+ ".inst 0x4e85a433 // smmla v19.4s, v1.16b, v5.16b\n"
+ "ldr q1, [%x[Apanel], #0xa0]\n"
+ ".inst 0x4e87a458 // smmla v24.4s, v2.16b, v7.16b\n"
+ ".inst 0x4e87a47e // smmla v30.4s, v3.16b, v7.16b\n"
+ "ldr q7, [x20, #0x110]\n"
+ ".inst 0x4e84a456 // smmla v22.4s, v2.16b, v4.16b\n"
+ ".inst 0x4e85a459 // smmla v25.4s, v2.16b, v5.16b\n"
+ "ldr q2, [%x[Apanel], #0xb0]\n"
+ ".inst 0x4e84a47c // smmla v28.4s, v3.16b, v4.16b\n"
+ ".inst 0x4e85a47f // smmla v31.4s, v3.16b, v5.16b\n"
+ "ldr q3, [%x[Apanel], #0xc0]\n"
+ ".inst 0x4e86a408 // smmla v8.4s, v0.16b, v6.16b\n"
+ ".inst 0x4e86a42e // smmla v14.4s, v1.16b, v6.16b\n"
+ "ldr q4, [x20, #0x120]\n"
+ ".inst 0x4e87a40b // smmla v11.4s, v0.16b, v7.16b\n"
+ ".inst 0x4e87a431 // smmla v17.4s, v1.16b, v7.16b\n"
+ "ldr q5, [x20, #0x130]\n"
+ ".inst 0x4e86a454 // smmla v20.4s, v2.16b, v6.16b\n"
+ ".inst 0x4e87a457 // smmla v23.4s, v2.16b, v7.16b\n"
+ ".inst 0x4e86a47a // smmla v26.4s, v3.16b, v6.16b\n"
+ ".inst 0x4e87a47d // smmla v29.4s, v3.16b, v7.16b\n"
+ "ldr q6, [x20, #0x140]\n"
+ "ldr q7, [x20, #0x150]\n"
+ ".inst 0x4e84a409 // smmla v9.4s, v0.16b, v4.16b\n"
+ ".inst 0x4e84a42f // smmla v15.4s, v1.16b, v4.16b\n"
+ ".inst 0x4e84a455 // smmla v21.4s, v2.16b, v4.16b\n"
+ ".inst 0x4e84a47b // smmla v27.4s, v3.16b, v4.16b\n"
+ "ldr q4, [x20, #0x160]\n"
+ ".inst 0x4e85a40c // smmla v12.4s, v0.16b, v5.16b\n"
+ ".inst 0x4e86a40a // smmla v10.4s, v0.16b, v6.16b\n"
+ ".inst 0x4e87a40d // smmla v13.4s, v0.16b, v7.16b\n"
+ ".inst 0x4e85a432 // smmla v18.4s, v1.16b, v5.16b\n"
+ "ldr q0, [%x[Apanel], #0xd0]\n"
+ ".inst 0x4e86a430 // smmla v16.4s, v1.16b, v6.16b\n"
+ ".inst 0x4e87a433 // smmla v19.4s, v1.16b, v7.16b\n"
+ "ldr q1, [%x[Apanel], #0xe0]\n"
+ ".inst 0x4e85a458 // smmla v24.4s, v2.16b, v5.16b\n"
+ ".inst 0x4e85a47e // smmla v30.4s, v3.16b, v5.16b\n"
+ "ldr q5, [x20, #0x170]\n"
+ ".inst 0x4e86a456 // smmla v22.4s, v2.16b, v6.16b\n"
+ ".inst 0x4e87a459 // smmla v25.4s, v2.16b, v7.16b\n"
+ "ldr q2, [%x[Apanel], #0xf0]\n"
+ ".inst 0x4e86a47c // smmla v28.4s, v3.16b, v6.16b\n"
+ ".inst 0x4e87a47f // smmla v31.4s, v3.16b, v7.16b\n"
+ "add %x[Apanel], %x[Apanel], #0x100\n"
+ "add x20, x20, #0x180\n"
"bge 3b\n"
"4:" // main loop skip
"ldr q3, [%x[Apanel], #0x0]\n"
- "ldr q6, [x22, #0x0]\n"
".inst 0x4e84a408 // smmla v8.4s, v0.16b, v4.16b\n"
- "ldr q7, [x22, #0x10]\n"
- ".inst 0x4e85a40b // smmla v11.4s, v0.16b, v5.16b\n"
".inst 0x4e84a42e // smmla v14.4s, v1.16b, v4.16b\n"
+ ".inst 0x4e85a40b // smmla v11.4s, v0.16b, v5.16b\n"
".inst 0x4e85a431 // smmla v17.4s, v1.16b, v5.16b\n"
+ "ldr q6, [x20, #0x0]\n"
".inst 0x4e84a454 // smmla v20.4s, v2.16b, v4.16b\n"
- "add %x[Apanel], %x[Apanel], #0x10\n"
".inst 0x4e85a457 // smmla v23.4s, v2.16b, v5.16b\n"
+ "ldr q7, [x20, #0x10]\n"
".inst 0x4e84a47a // smmla v26.4s, v3.16b, v4.16b\n"
- "ldr q4, [x22, #0x20]\n"
".inst 0x4e85a47d // smmla v29.4s, v3.16b, v5.16b\n"
- "ldr q5, [x22, #0x30]\n"
+ "ldr q4, [x20, #0x20]\n"
+ "ldr q5, [x20, #0x30]\n"
".inst 0x4e86a409 // smmla v9.4s, v0.16b, v6.16b\n"
- ".inst 0x4e87a40c // smmla v12.4s, v0.16b, v7.16b\n"
".inst 0x4e86a42f // smmla v15.4s, v1.16b, v6.16b\n"
- "add x22, x22, #0x40\n"
- ".inst 0x4e87a432 // smmla v18.4s, v1.16b, v7.16b\n"
".inst 0x4e86a455 // smmla v21.4s, v2.16b, v6.16b\n"
- ".inst 0x4e87a458 // smmla v24.4s, v2.16b, v7.16b\n"
".inst 0x4e86a47b // smmla v27.4s, v3.16b, v6.16b\n"
- ".inst 0x4e87a47e // smmla v30.4s, v3.16b, v7.16b\n"
+ "add %x[Apanel], %x[Apanel], #0x10\n"
+ ".inst 0x4e87a40c // smmla v12.4s, v0.16b, v7.16b\n"
".inst 0x4e84a40a // smmla v10.4s, v0.16b, v4.16b\n"
+ "add x20, x20, #0x40\n"
".inst 0x4e85a40d // smmla v13.4s, v0.16b, v5.16b\n"
+ ".inst 0x4e87a432 // smmla v18.4s, v1.16b, v7.16b\n"
".inst 0x4e84a430 // smmla v16.4s, v1.16b, v4.16b\n"
".inst 0x4e85a433 // smmla v19.4s, v1.16b, v5.16b\n"
+ ".inst 0x4e87a458 // smmla v24.4s, v2.16b, v7.16b\n"
+ ".inst 0x4e87a47e // smmla v30.4s, v3.16b, v7.16b\n"
".inst 0x4e84a456 // smmla v22.4s, v2.16b, v4.16b\n"
".inst 0x4e85a459 // smmla v25.4s, v2.16b, v5.16b\n"
".inst 0x4e84a47c // smmla v28.4s, v3.16b, v4.16b\n"
".inst 0x4e85a47f // smmla v31.4s, v3.16b, v5.16b\n"
- "cbz x20, 5f\n"
- "ldr q6, [x22, #0x0]\n"
+ "cbz x19, 6f\n"
+ "5:" // odd loop
+ "ldr q6, [x20, #0x0]\n"
"ldr q0, [%x[Apanel], #0x0]\n"
".inst 0x4e86a408 // smmla v8.4s, v0.16b, v6.16b\n"
"ldr q1, [%x[Apanel], #0x10]\n"
- "ldr q7, [x22, #0x10]\n"
- ".inst 0x4e87a40b // smmla v11.4s, v0.16b, v7.16b\n"
+ "ldr q7, [x20, #0x10]\n"
+ ".inst 0x4e86a42e // smmla v14.4s, v1.16b, v6.16b\n"
"ldr q2, [%x[Apanel], #0x20]\n"
"ldr q3, [%x[Apanel], #0x30]\n"
- ".inst 0x4e86a42e // smmla v14.4s, v1.16b, v6.16b\n"
- "ldr q4, [x22, #0x20]\n"
- "ldr q5, [x22, #0x30]\n"
+ ".inst 0x4e87a40b // smmla v11.4s, v0.16b, v7.16b\n"
".inst 0x4e87a431 // smmla v17.4s, v1.16b, v7.16b\n"
".inst 0x4e86a454 // smmla v20.4s, v2.16b, v6.16b\n"
+ "ldr q4, [x20, #0x20]\n"
".inst 0x4e87a457 // smmla v23.4s, v2.16b, v7.16b\n"
- "add %x[Apanel], %x[Apanel], #0x40\n"
".inst 0x4e86a47a // smmla v26.4s, v3.16b, v6.16b\n"
- "ldr q6, [x22, #0x40]\n"
+ "ldr q5, [x20, #0x30]\n"
".inst 0x4e87a47d // smmla v29.4s, v3.16b, v7.16b\n"
- "ldr q7, [x22, #0x50]\n"
+ "ldr q6, [x20, #0x40]\n"
+ "ldr q7, [x20, #0x50]\n"
+ "subs x19, x19, #0x1\n"
".inst 0x4e84a409 // smmla v9.4s, v0.16b, v4.16b\n"
- ".inst 0x4e85a40c // smmla v12.4s, v0.16b, v5.16b\n"
".inst 0x4e84a42f // smmla v15.4s, v1.16b, v4.16b\n"
- ".inst 0x4e85a432 // smmla v18.4s, v1.16b, v5.16b\n"
- "add x22, x22, #0x60\n"
".inst 0x4e84a455 // smmla v21.4s, v2.16b, v4.16b\n"
- ".inst 0x4e85a458 // smmla v24.4s, v2.16b, v5.16b\n"
".inst 0x4e84a47b // smmla v27.4s, v3.16b, v4.16b\n"
- ".inst 0x4e85a47e // smmla v30.4s, v3.16b, v5.16b\n"
+ "add x20, x20, #0x60\n"
+ ".inst 0x4e85a40c // smmla v12.4s, v0.16b, v5.16b\n"
".inst 0x4e86a40a // smmla v10.4s, v0.16b, v6.16b\n"
+ "add %x[Apanel], %x[Apanel], #0x40\n"
".inst 0x4e87a40d // smmla v13.4s, v0.16b, v7.16b\n"
+ ".inst 0x4e85a432 // smmla v18.4s, v1.16b, v5.16b\n"
".inst 0x4e86a430 // smmla v16.4s, v1.16b, v6.16b\n"
".inst 0x4e87a433 // smmla v19.4s, v1.16b, v7.16b\n"
+ ".inst 0x4e85a458 // smmla v24.4s, v2.16b, v5.16b\n"
+ ".inst 0x4e85a47e // smmla v30.4s, v3.16b, v5.16b\n"
".inst 0x4e86a456 // smmla v22.4s, v2.16b, v6.16b\n"
".inst 0x4e87a459 // smmla v25.4s, v2.16b, v7.16b\n"
".inst 0x4e86a47c // smmla v28.4s, v3.16b, v6.16b\n"
".inst 0x4e87a47f // smmla v31.4s, v3.16b, v7.16b\n"
- "5:" // multiply loop done
- "subs x23, x23, #0x1\n"
+ "bne 5b\n"
+ "6:" // multiply loop done
+ "subs x22, x22, #0x1\n"
"uzp1 v4.2d, v8.2d, v11.2d\n"
"uzp2 v8.2d, v8.2d, v11.2d\n"
"uzp1 v11.2d, v9.2d, v12.2d\n"
@@ -283,7 +354,7 @@ void a64_interleaved_s8s32_mmla_8x12(
"bne 1b\n"
: [Apanel] "+&r" (Apanel), [Cpanel] "+&r" (Cpanel), [ablocks] "+&r" (ablocks)
: [args_ptr] "r" (&ka), [offsetof_Bpanel] "I" (offsetof(KernelArgs, Bpanel)), [offsetof_K] "I" (offsetof(KernelArgs, K)), [offsetof_bblocks] "I" (offsetof(KernelArgs, bblocks))
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x20", "x21", "x22", "x23"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_u8u32_mmla_8x12/a510.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_u8u32_mmla_8x12/a510.cpp
index 83301d80bb..3fe1a9bd04 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_u8u32_mmla_8x12/a510.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_u8u32_mmla_8x12/a510.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef __aarch64__
@@ -33,33 +33,34 @@ void a64_interleaved_u8u32_mmla_8x12_a510(
uint32_t *Cpanel, int ablocks, int bblocks, int K) {
struct KernelArgs {
+ size_t bblocks = {};
size_t K = {};
const uint8_t *Bpanel = {};
- size_t bblocks = {};
} ka;
+ ka.bblocks = bblocks;
ka.K = (K/8) - 1;
ka.Bpanel = Bpanel;
- ka.bblocks = bblocks;
__asm__ __volatile__(
"1:" // Height loop
- "ldr x23, [%x[args_ptr], %[offsetof_bblocks]]\n"
- "ldr x22, [%x[args_ptr], %[offsetof_Bpanel]]\n"
+ "ldr x22, [%x[args_ptr], %[offsetof_bblocks]]\n"
"mov x21, %x[Apanel]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_Bpanel]]\n"
"2:" // Width loop
- "ldp q4, q5, [x22], #0x20\n"
"mov %x[Apanel], x21\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_K]]\n"
"ld1 { v0.16b }, [%x[Apanel]], #0x10\n"
+ "ld1 { v4.16b }, [x20], #0x10\n"
"ld1 { v1.16b }, [%x[Apanel]], #0x10\n"
+ "cmp x19, #0x2\n"
"movi v8.4s, #0x0\n"
- "ld1 { v2.16b }, [%x[Apanel]], #0x10\n"
- "ldr x20, [%x[args_ptr], %[offsetof_K]]\n"
- "cmp x20, #0x2\n"
"movi v9.4s, #0x0\n"
+ "ld1 { v5.16b }, [x20], #0x10\n"
"movi v10.4s, #0x0\n"
"movi v11.4s, #0x0\n"
+ "ld1 { v2.16b }, [%x[Apanel]], #0x10\n"
"movi v12.4s, #0x0\n"
"movi v13.4s, #0x0\n"
"movi v14.4s, #0x0\n"
@@ -84,63 +85,65 @@ void a64_interleaved_u8u32_mmla_8x12_a510(
"3:" // main loop head
"ld1 { v3.16b }, [%x[Apanel]], #0x10\n"
".inst 0x6e84a408 // ummla v8.4s, v0.16b, v4.16b\n"
- ".inst 0x6e85a40b // ummla v11.4s, v0.16b, v5.16b\n"
- "ldp q6, q7, [x22], #0x20\n"
".inst 0x6e84a42e // ummla v14.4s, v1.16b, v4.16b\n"
+ "ldp q6, q7, [x20], #0x20\n"
+ ".inst 0x6e85a40b // ummla v11.4s, v0.16b, v5.16b\n"
".inst 0x6e85a431 // ummla v17.4s, v1.16b, v5.16b\n"
".inst 0x6e84a454 // ummla v20.4s, v2.16b, v4.16b\n"
- "sub x20, x20, #0x2\n"
+ "sub x19, x19, #0x2\n"
".inst 0x6e85a457 // ummla v23.4s, v2.16b, v5.16b\n"
".inst 0x6e84a47a // ummla v26.4s, v3.16b, v4.16b\n"
- "cmp x20, #0x2\n"
+ "cmp x19, #0x2\n"
".inst 0x6e85a47d // ummla v29.4s, v3.16b, v5.16b\n"
- "ldp q4, q5, [x22], #0x20\n"
+ "ldp q4, q5, [x20], #0x20\n"
".inst 0x6e86a409 // ummla v9.4s, v0.16b, v6.16b\n"
".inst 0x6e87a40c // ummla v12.4s, v0.16b, v7.16b\n"
- ".inst 0x6e86a42f // ummla v15.4s, v1.16b, v6.16b\n"
- ".inst 0x6e87a432 // ummla v18.4s, v1.16b, v7.16b\n"
- ".inst 0x6e86a455 // ummla v21.4s, v2.16b, v6.16b\n"
- ".inst 0x6e87a458 // ummla v24.4s, v2.16b, v7.16b\n"
- ".inst 0x6e86a47b // ummla v27.4s, v3.16b, v6.16b\n"
- ".inst 0x6e87a47e // ummla v30.4s, v3.16b, v7.16b\n"
- "ldp q6, q7, [x22], #0x20\n"
".inst 0x6e84a40a // ummla v10.4s, v0.16b, v4.16b\n"
".inst 0x6e85a40d // ummla v13.4s, v0.16b, v5.16b\n"
"ld1 { v0.16b }, [%x[Apanel]], #0x10\n"
+ ".inst 0x6e86a42f // ummla v15.4s, v1.16b, v6.16b\n"
+ ".inst 0x6e86a455 // ummla v21.4s, v2.16b, v6.16b\n"
+ ".inst 0x6e86a47b // ummla v27.4s, v3.16b, v6.16b\n"
+ ".inst 0x6e87a432 // ummla v18.4s, v1.16b, v7.16b\n"
+ "ld1 { v6.16b }, [x20], #0x10\n"
".inst 0x6e84a430 // ummla v16.4s, v1.16b, v4.16b\n"
".inst 0x6e85a433 // ummla v19.4s, v1.16b, v5.16b\n"
"ld1 { v1.16b }, [%x[Apanel]], #0x10\n"
+ ".inst 0x6e87a458 // ummla v24.4s, v2.16b, v7.16b\n"
+ ".inst 0x6e87a47e // ummla v30.4s, v3.16b, v7.16b\n"
+ "ld1 { v7.16b }, [x20], #0x10\n"
".inst 0x6e84a456 // ummla v22.4s, v2.16b, v4.16b\n"
".inst 0x6e85a459 // ummla v25.4s, v2.16b, v5.16b\n"
"ld1 { v2.16b }, [%x[Apanel]], #0x10\n"
".inst 0x6e84a47c // ummla v28.4s, v3.16b, v4.16b\n"
".inst 0x6e85a47f // ummla v31.4s, v3.16b, v5.16b\n"
"ld1 { v3.16b }, [%x[Apanel]], #0x10\n"
+ "ldp q4, q5, [x20], #0x20\n"
".inst 0x6e86a408 // ummla v8.4s, v0.16b, v6.16b\n"
- "ldp q4, q5, [x22], #0x20\n"
- ".inst 0x6e87a40b // ummla v11.4s, v0.16b, v7.16b\n"
".inst 0x6e86a42e // ummla v14.4s, v1.16b, v6.16b\n"
+ ".inst 0x6e87a40b // ummla v11.4s, v0.16b, v7.16b\n"
".inst 0x6e87a431 // ummla v17.4s, v1.16b, v7.16b\n"
".inst 0x6e86a454 // ummla v20.4s, v2.16b, v6.16b\n"
".inst 0x6e87a457 // ummla v23.4s, v2.16b, v7.16b\n"
".inst 0x6e86a47a // ummla v26.4s, v3.16b, v6.16b\n"
".inst 0x6e87a47d // ummla v29.4s, v3.16b, v7.16b\n"
- "ldp q6, q7, [x22], #0x20\n"
+ "ldp q6, q7, [x20], #0x20\n"
".inst 0x6e84a409 // ummla v9.4s, v0.16b, v4.16b\n"
".inst 0x6e85a40c // ummla v12.4s, v0.16b, v5.16b\n"
- ".inst 0x6e84a42f // ummla v15.4s, v1.16b, v4.16b\n"
- ".inst 0x6e85a432 // ummla v18.4s, v1.16b, v5.16b\n"
- ".inst 0x6e84a455 // ummla v21.4s, v2.16b, v4.16b\n"
- ".inst 0x6e85a458 // ummla v24.4s, v2.16b, v5.16b\n"
- ".inst 0x6e84a47b // ummla v27.4s, v3.16b, v4.16b\n"
- ".inst 0x6e85a47e // ummla v30.4s, v3.16b, v5.16b\n"
- "ldp q4, q5, [x22], #0x20\n"
".inst 0x6e86a40a // ummla v10.4s, v0.16b, v6.16b\n"
".inst 0x6e87a40d // ummla v13.4s, v0.16b, v7.16b\n"
"ld1 { v0.16b }, [%x[Apanel]], #0x10\n"
+ ".inst 0x6e84a42f // ummla v15.4s, v1.16b, v4.16b\n"
+ ".inst 0x6e84a455 // ummla v21.4s, v2.16b, v4.16b\n"
+ ".inst 0x6e84a47b // ummla v27.4s, v3.16b, v4.16b\n"
+ ".inst 0x6e85a432 // ummla v18.4s, v1.16b, v5.16b\n"
+ "ld1 { v4.16b }, [x20], #0x10\n"
".inst 0x6e86a430 // ummla v16.4s, v1.16b, v6.16b\n"
".inst 0x6e87a433 // ummla v19.4s, v1.16b, v7.16b\n"
"ld1 { v1.16b }, [%x[Apanel]], #0x10\n"
+ ".inst 0x6e85a458 // ummla v24.4s, v2.16b, v5.16b\n"
+ ".inst 0x6e85a47e // ummla v30.4s, v3.16b, v5.16b\n"
+ "ld1 { v5.16b }, [x20], #0x10\n"
".inst 0x6e86a456 // ummla v22.4s, v2.16b, v6.16b\n"
".inst 0x6e87a459 // ummla v25.4s, v2.16b, v7.16b\n"
"ld1 { v2.16b }, [%x[Apanel]], #0x10\n"
@@ -150,65 +153,66 @@ void a64_interleaved_u8u32_mmla_8x12_a510(
"4:" // main loop skip
"ld1 { v3.16b }, [%x[Apanel]], #0x10\n"
".inst 0x6e84a408 // ummla v8.4s, v0.16b, v4.16b\n"
- ".inst 0x6e85a40b // ummla v11.4s, v0.16b, v5.16b\n"
- "ldp q6, q7, [x22], #0x20\n"
".inst 0x6e84a42e // ummla v14.4s, v1.16b, v4.16b\n"
+ "ldp q6, q7, [x20], #0x20\n"
+ ".inst 0x6e85a40b // ummla v11.4s, v0.16b, v5.16b\n"
".inst 0x6e85a431 // ummla v17.4s, v1.16b, v5.16b\n"
".inst 0x6e84a454 // ummla v20.4s, v2.16b, v4.16b\n"
".inst 0x6e85a457 // ummla v23.4s, v2.16b, v5.16b\n"
".inst 0x6e84a47a // ummla v26.4s, v3.16b, v4.16b\n"
".inst 0x6e85a47d // ummla v29.4s, v3.16b, v5.16b\n"
- "ldp q4, q5, [x22], #0x20\n"
+ "ldp q4, q5, [x20], #0x20\n"
".inst 0x6e86a409 // ummla v9.4s, v0.16b, v6.16b\n"
- ".inst 0x6e87a40c // ummla v12.4s, v0.16b, v7.16b\n"
".inst 0x6e86a42f // ummla v15.4s, v1.16b, v6.16b\n"
- ".inst 0x6e87a432 // ummla v18.4s, v1.16b, v7.16b\n"
".inst 0x6e86a455 // ummla v21.4s, v2.16b, v6.16b\n"
- ".inst 0x6e87a458 // ummla v24.4s, v2.16b, v7.16b\n"
".inst 0x6e86a47b // ummla v27.4s, v3.16b, v6.16b\n"
- ".inst 0x6e87a47e // ummla v30.4s, v3.16b, v7.16b\n"
+ ".inst 0x6e87a40c // ummla v12.4s, v0.16b, v7.16b\n"
".inst 0x6e84a40a // ummla v10.4s, v0.16b, v4.16b\n"
".inst 0x6e85a40d // ummla v13.4s, v0.16b, v5.16b\n"
+ ".inst 0x6e87a432 // ummla v18.4s, v1.16b, v7.16b\n"
".inst 0x6e84a430 // ummla v16.4s, v1.16b, v4.16b\n"
".inst 0x6e85a433 // ummla v19.4s, v1.16b, v5.16b\n"
+ ".inst 0x6e87a458 // ummla v24.4s, v2.16b, v7.16b\n"
+ ".inst 0x6e87a47e // ummla v30.4s, v3.16b, v7.16b\n"
".inst 0x6e84a456 // ummla v22.4s, v2.16b, v4.16b\n"
".inst 0x6e85a459 // ummla v25.4s, v2.16b, v5.16b\n"
".inst 0x6e84a47c // ummla v28.4s, v3.16b, v4.16b\n"
".inst 0x6e85a47f // ummla v31.4s, v3.16b, v5.16b\n"
- "cbz x20, 5f\n"
- "ldp q6, q7, [x22], #0x20\n"
+ "cbz x19, 5f\n"
"ld1 { v0.16b }, [%x[Apanel]], #0x10\n"
- "ld1 { v1.16b }, [%x[Apanel]], #0x10\n"
+ "ld1 { v6.16b }, [x20], #0x10\n"
".inst 0x6e86a408 // ummla v8.4s, v0.16b, v6.16b\n"
+ "ld1 { v1.16b }, [%x[Apanel]], #0x10\n"
+ "ld1 { v7.16b }, [x20], #0x10\n"
+ ".inst 0x6e86a42e // ummla v14.4s, v1.16b, v6.16b\n"
"ld1 { v2.16b }, [%x[Apanel]], #0x10\n"
"ld1 { v3.16b }, [%x[Apanel]], #0x10\n"
".inst 0x6e87a40b // ummla v11.4s, v0.16b, v7.16b\n"
- "ldp q4, q5, [x22], #0x20\n"
- ".inst 0x6e86a42e // ummla v14.4s, v1.16b, v6.16b\n"
+ "ldp q4, q5, [x20], #0x20\n"
".inst 0x6e87a431 // ummla v17.4s, v1.16b, v7.16b\n"
".inst 0x6e86a454 // ummla v20.4s, v2.16b, v6.16b\n"
".inst 0x6e87a457 // ummla v23.4s, v2.16b, v7.16b\n"
".inst 0x6e86a47a // ummla v26.4s, v3.16b, v6.16b\n"
".inst 0x6e87a47d // ummla v29.4s, v3.16b, v7.16b\n"
- "ldp q6, q7, [x22], #0x20\n"
+ "ldp q6, q7, [x20], #0x20\n"
".inst 0x6e84a409 // ummla v9.4s, v0.16b, v4.16b\n"
- ".inst 0x6e85a40c // ummla v12.4s, v0.16b, v5.16b\n"
".inst 0x6e84a42f // ummla v15.4s, v1.16b, v4.16b\n"
- ".inst 0x6e85a432 // ummla v18.4s, v1.16b, v5.16b\n"
".inst 0x6e84a455 // ummla v21.4s, v2.16b, v4.16b\n"
- ".inst 0x6e85a458 // ummla v24.4s, v2.16b, v5.16b\n"
".inst 0x6e84a47b // ummla v27.4s, v3.16b, v4.16b\n"
- ".inst 0x6e85a47e // ummla v30.4s, v3.16b, v5.16b\n"
+ ".inst 0x6e85a40c // ummla v12.4s, v0.16b, v5.16b\n"
".inst 0x6e86a40a // ummla v10.4s, v0.16b, v6.16b\n"
".inst 0x6e87a40d // ummla v13.4s, v0.16b, v7.16b\n"
+ ".inst 0x6e85a432 // ummla v18.4s, v1.16b, v5.16b\n"
".inst 0x6e86a430 // ummla v16.4s, v1.16b, v6.16b\n"
".inst 0x6e87a433 // ummla v19.4s, v1.16b, v7.16b\n"
+ ".inst 0x6e85a458 // ummla v24.4s, v2.16b, v5.16b\n"
+ ".inst 0x6e85a47e // ummla v30.4s, v3.16b, v5.16b\n"
".inst 0x6e86a456 // ummla v22.4s, v2.16b, v6.16b\n"
".inst 0x6e87a459 // ummla v25.4s, v2.16b, v7.16b\n"
".inst 0x6e86a47c // ummla v28.4s, v3.16b, v6.16b\n"
".inst 0x6e87a47f // ummla v31.4s, v3.16b, v7.16b\n"
"5:" // multiply loop done
- "subs x23, x23, #0x1\n"
+ "subs x22, x22, #0x1\n"
"uzp1 v4.2d, v8.2d, v11.2d\n"
"uzp2 v8.2d, v8.2d, v11.2d\n"
"uzp1 v11.2d, v9.2d, v12.2d\n"
@@ -263,7 +267,7 @@ void a64_interleaved_u8u32_mmla_8x12_a510(
"bne 1b\n"
: [Apanel] "+&r" (Apanel), [Cpanel] "+&r" (Cpanel), [ablocks] "+&r" (ablocks)
: [args_ptr] "r" (&ka), [offsetof_Bpanel] "I" (offsetof(KernelArgs, Bpanel)), [offsetof_K] "I" (offsetof(KernelArgs, K)), [offsetof_bblocks] "I" (offsetof(KernelArgs, bblocks))
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x20", "x21", "x22", "x23"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_u8u32_mmla_8x12/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_u8u32_mmla_8x12/generic.cpp
index c5342197c1..e67d17e49a 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_u8u32_mmla_8x12/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_u8u32_mmla_8x12/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2020, 2023 Arm Limited.
+ * Copyright (c) 2019-2020 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -33,46 +33,46 @@ void a64_interleaved_u8u32_mmla_8x12(
uint32_t *Cpanel, int ablocks, int bblocks, int K) {
struct KernelArgs {
+ size_t bblocks = {};
size_t K = {};
const uint8_t *Bpanel = {};
- size_t bblocks = {};
} ka;
+ ka.bblocks = bblocks;
ka.K = (K/8) - 1;
ka.Bpanel = Bpanel;
- ka.bblocks = bblocks;
__asm__ __volatile__(
"1:" // Height loop
- "ldr x23, [%x[args_ptr], %[offsetof_bblocks]]\n"
- "ldr x22, [%x[args_ptr], %[offsetof_Bpanel]]\n"
+ "ldr x22, [%x[args_ptr], %[offsetof_bblocks]]\n"
"mov x21, %x[Apanel]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_Bpanel]]\n"
"2:" // Width loop
- "ldr q4, [x22, #0x0]\n"
- "ldr q5, [x22, #0x10]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_K]]\n"
"mov %x[Apanel], x21\n"
- "ldr q0, [%x[Apanel], #0x0]\n"
- "ldr q1, [%x[Apanel], #0x10]\n"
+ "cmp x19, #0x2\n"
"movi v8.4s, #0x0\n"
- "ldr q2, [%x[Apanel], #0x20]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_K]]\n"
- "cmp x20, #0x2\n"
"movi v9.4s, #0x0\n"
+ "ldr q4, [x20, #0x0]\n"
"movi v10.4s, #0x0\n"
- "add x22, x22, #0x20\n"
"movi v11.4s, #0x0\n"
+ "ldr q0, [%x[Apanel], #0x0]\n"
"movi v12.4s, #0x0\n"
- "add %x[Apanel], %x[Apanel], #0x30\n"
"movi v13.4s, #0x0\n"
+ "ldr q1, [%x[Apanel], #0x10]\n"
"movi v14.4s, #0x0\n"
"movi v15.4s, #0x0\n"
+ "ldr q5, [x20, #0x10]\n"
"movi v16.4s, #0x0\n"
"movi v17.4s, #0x0\n"
+ "ldr q2, [%x[Apanel], #0x20]\n"
"movi v18.4s, #0x0\n"
"movi v19.4s, #0x0\n"
+ "add x20, x20, #0x20\n"
"movi v20.4s, #0x0\n"
"movi v21.4s, #0x0\n"
+ "add %x[Apanel], %x[Apanel], #0x30\n"
"movi v22.4s, #0x0\n"
"movi v23.4s, #0x0\n"
"movi v24.4s, #0x0\n"
@@ -86,149 +86,149 @@ void a64_interleaved_u8u32_mmla_8x12(
"blt 4f\n"
"3:" // main loop head
"ldr q3, [%x[Apanel], #0x0]\n"
- "ldr q6, [x22, #0x0]\n"
".inst 0x6e84a408 // ummla v8.4s, v0.16b, v4.16b\n"
- "ldr q7, [x22, #0x10]\n"
- ".inst 0x6e85a40b // ummla v11.4s, v0.16b, v5.16b\n"
".inst 0x6e84a42e // ummla v14.4s, v1.16b, v4.16b\n"
+ ".inst 0x6e85a40b // ummla v11.4s, v0.16b, v5.16b\n"
".inst 0x6e85a431 // ummla v17.4s, v1.16b, v5.16b\n"
+ "ldr q6, [x20, #0x0]\n"
".inst 0x6e84a454 // ummla v20.4s, v2.16b, v4.16b\n"
- "sub x20, x20, #0x2\n"
".inst 0x6e85a457 // ummla v23.4s, v2.16b, v5.16b\n"
+ "ldr q7, [x20, #0x10]\n"
".inst 0x6e84a47a // ummla v26.4s, v3.16b, v4.16b\n"
- "ldr q4, [x22, #0x20]\n"
".inst 0x6e85a47d // ummla v29.4s, v3.16b, v5.16b\n"
- "ldr q5, [x22, #0x30]\n"
+ "ldr q4, [x20, #0x20]\n"
+ "ldr q5, [x20, #0x30]\n"
".inst 0x6e86a409 // ummla v9.4s, v0.16b, v6.16b\n"
- ".inst 0x6e87a40c // ummla v12.4s, v0.16b, v7.16b\n"
".inst 0x6e86a42f // ummla v15.4s, v1.16b, v6.16b\n"
- "cmp x20, #0x2\n"
- ".inst 0x6e87a432 // ummla v18.4s, v1.16b, v7.16b\n"
".inst 0x6e86a455 // ummla v21.4s, v2.16b, v6.16b\n"
- ".inst 0x6e87a458 // ummla v24.4s, v2.16b, v7.16b\n"
".inst 0x6e86a47b // ummla v27.4s, v3.16b, v6.16b\n"
- "ldr q6, [x22, #0x40]\n"
- ".inst 0x6e87a47e // ummla v30.4s, v3.16b, v7.16b\n"
- "ldr q7, [x22, #0x50]\n"
+ "ldr q6, [x20, #0x40]\n"
+ ".inst 0x6e87a40c // ummla v12.4s, v0.16b, v7.16b\n"
".inst 0x6e84a40a // ummla v10.4s, v0.16b, v4.16b\n"
+ "sub x19, x19, #0x2\n"
".inst 0x6e85a40d // ummla v13.4s, v0.16b, v5.16b\n"
+ ".inst 0x6e87a432 // ummla v18.4s, v1.16b, v7.16b\n"
"ldr q0, [%x[Apanel], #0x10]\n"
".inst 0x6e84a430 // ummla v16.4s, v1.16b, v4.16b\n"
".inst 0x6e85a433 // ummla v19.4s, v1.16b, v5.16b\n"
"ldr q1, [%x[Apanel], #0x20]\n"
+ ".inst 0x6e87a458 // ummla v24.4s, v2.16b, v7.16b\n"
+ ".inst 0x6e87a47e // ummla v30.4s, v3.16b, v7.16b\n"
+ "ldr q7, [x20, #0x50]\n"
".inst 0x6e84a456 // ummla v22.4s, v2.16b, v4.16b\n"
".inst 0x6e85a459 // ummla v25.4s, v2.16b, v5.16b\n"
"ldr q2, [%x[Apanel], #0x30]\n"
".inst 0x6e84a47c // ummla v28.4s, v3.16b, v4.16b\n"
- "ldr q4, [x22, #0x60]\n"
".inst 0x6e85a47f // ummla v31.4s, v3.16b, v5.16b\n"
"ldr q3, [%x[Apanel], #0x40]\n"
- "ldr q5, [x22, #0x70]\n"
".inst 0x6e86a408 // ummla v8.4s, v0.16b, v6.16b\n"
- ".inst 0x6e87a40b // ummla v11.4s, v0.16b, v7.16b\n"
".inst 0x6e86a42e // ummla v14.4s, v1.16b, v6.16b\n"
+ "ldr q4, [x20, #0x60]\n"
+ ".inst 0x6e87a40b // ummla v11.4s, v0.16b, v7.16b\n"
".inst 0x6e87a431 // ummla v17.4s, v1.16b, v7.16b\n"
+ "ldr q5, [x20, #0x70]\n"
".inst 0x6e86a454 // ummla v20.4s, v2.16b, v6.16b\n"
".inst 0x6e87a457 // ummla v23.4s, v2.16b, v7.16b\n"
+ "cmp x19, #0x2\n"
".inst 0x6e86a47a // ummla v26.4s, v3.16b, v6.16b\n"
- "ldr q6, [x22, #0x80]\n"
".inst 0x6e87a47d // ummla v29.4s, v3.16b, v7.16b\n"
- "ldr q7, [x22, #0x90]\n"
+ "ldr q6, [x20, #0x80]\n"
+ "ldr q7, [x20, #0x90]\n"
".inst 0x6e84a409 // ummla v9.4s, v0.16b, v4.16b\n"
- ".inst 0x6e85a40c // ummla v12.4s, v0.16b, v5.16b\n"
".inst 0x6e84a42f // ummla v15.4s, v1.16b, v4.16b\n"
- ".inst 0x6e85a432 // ummla v18.4s, v1.16b, v5.16b\n"
".inst 0x6e84a455 // ummla v21.4s, v2.16b, v4.16b\n"
- ".inst 0x6e85a458 // ummla v24.4s, v2.16b, v5.16b\n"
".inst 0x6e84a47b // ummla v27.4s, v3.16b, v4.16b\n"
- "ldr q4, [x22, #0xa0]\n"
- ".inst 0x6e85a47e // ummla v30.4s, v3.16b, v5.16b\n"
- "ldr q5, [x22, #0xb0]\n"
+ "ldr q4, [x20, #0xa0]\n"
+ ".inst 0x6e85a40c // ummla v12.4s, v0.16b, v5.16b\n"
".inst 0x6e86a40a // ummla v10.4s, v0.16b, v6.16b\n"
".inst 0x6e87a40d // ummla v13.4s, v0.16b, v7.16b\n"
+ ".inst 0x6e85a432 // ummla v18.4s, v1.16b, v5.16b\n"
"ldr q0, [%x[Apanel], #0x50]\n"
".inst 0x6e86a430 // ummla v16.4s, v1.16b, v6.16b\n"
".inst 0x6e87a433 // ummla v19.4s, v1.16b, v7.16b\n"
"ldr q1, [%x[Apanel], #0x60]\n"
+ ".inst 0x6e85a458 // ummla v24.4s, v2.16b, v5.16b\n"
+ ".inst 0x6e85a47e // ummla v30.4s, v3.16b, v5.16b\n"
+ "ldr q5, [x20, #0xb0]\n"
".inst 0x6e86a456 // ummla v22.4s, v2.16b, v6.16b\n"
".inst 0x6e87a459 // ummla v25.4s, v2.16b, v7.16b\n"
"ldr q2, [%x[Apanel], #0x70]\n"
".inst 0x6e86a47c // ummla v28.4s, v3.16b, v6.16b\n"
".inst 0x6e87a47f // ummla v31.4s, v3.16b, v7.16b\n"
"add %x[Apanel], %x[Apanel], #0x80\n"
- "add x22, x22, #0xc0\n"
+ "add x20, x20, #0xc0\n"
"bge 3b\n"
"4:" // main loop skip
"ldr q3, [%x[Apanel], #0x0]\n"
- "ldr q6, [x22, #0x0]\n"
".inst 0x6e84a408 // ummla v8.4s, v0.16b, v4.16b\n"
- "ldr q7, [x22, #0x10]\n"
- ".inst 0x6e85a40b // ummla v11.4s, v0.16b, v5.16b\n"
".inst 0x6e84a42e // ummla v14.4s, v1.16b, v4.16b\n"
+ ".inst 0x6e85a40b // ummla v11.4s, v0.16b, v5.16b\n"
".inst 0x6e85a431 // ummla v17.4s, v1.16b, v5.16b\n"
+ "ldr q6, [x20, #0x0]\n"
".inst 0x6e84a454 // ummla v20.4s, v2.16b, v4.16b\n"
- "add %x[Apanel], %x[Apanel], #0x10\n"
".inst 0x6e85a457 // ummla v23.4s, v2.16b, v5.16b\n"
+ "ldr q7, [x20, #0x10]\n"
".inst 0x6e84a47a // ummla v26.4s, v3.16b, v4.16b\n"
- "ldr q4, [x22, #0x20]\n"
".inst 0x6e85a47d // ummla v29.4s, v3.16b, v5.16b\n"
- "ldr q5, [x22, #0x30]\n"
+ "ldr q4, [x20, #0x20]\n"
+ "ldr q5, [x20, #0x30]\n"
".inst 0x6e86a409 // ummla v9.4s, v0.16b, v6.16b\n"
- ".inst 0x6e87a40c // ummla v12.4s, v0.16b, v7.16b\n"
".inst 0x6e86a42f // ummla v15.4s, v1.16b, v6.16b\n"
- "add x22, x22, #0x40\n"
- ".inst 0x6e87a432 // ummla v18.4s, v1.16b, v7.16b\n"
".inst 0x6e86a455 // ummla v21.4s, v2.16b, v6.16b\n"
- ".inst 0x6e87a458 // ummla v24.4s, v2.16b, v7.16b\n"
".inst 0x6e86a47b // ummla v27.4s, v3.16b, v6.16b\n"
- ".inst 0x6e87a47e // ummla v30.4s, v3.16b, v7.16b\n"
+ "add %x[Apanel], %x[Apanel], #0x10\n"
+ ".inst 0x6e87a40c // ummla v12.4s, v0.16b, v7.16b\n"
".inst 0x6e84a40a // ummla v10.4s, v0.16b, v4.16b\n"
+ "add x20, x20, #0x40\n"
".inst 0x6e85a40d // ummla v13.4s, v0.16b, v5.16b\n"
+ ".inst 0x6e87a432 // ummla v18.4s, v1.16b, v7.16b\n"
".inst 0x6e84a430 // ummla v16.4s, v1.16b, v4.16b\n"
".inst 0x6e85a433 // ummla v19.4s, v1.16b, v5.16b\n"
+ ".inst 0x6e87a458 // ummla v24.4s, v2.16b, v7.16b\n"
+ ".inst 0x6e87a47e // ummla v30.4s, v3.16b, v7.16b\n"
".inst 0x6e84a456 // ummla v22.4s, v2.16b, v4.16b\n"
".inst 0x6e85a459 // ummla v25.4s, v2.16b, v5.16b\n"
".inst 0x6e84a47c // ummla v28.4s, v3.16b, v4.16b\n"
".inst 0x6e85a47f // ummla v31.4s, v3.16b, v5.16b\n"
- "cbz x20, 5f\n"
- "ldr q6, [x22, #0x0]\n"
+ "cbz x19, 5f\n"
+ "ldr q6, [x20, #0x0]\n"
"ldr q0, [%x[Apanel], #0x0]\n"
".inst 0x6e86a408 // ummla v8.4s, v0.16b, v6.16b\n"
"ldr q1, [%x[Apanel], #0x10]\n"
- "ldr q7, [x22, #0x10]\n"
- ".inst 0x6e87a40b // ummla v11.4s, v0.16b, v7.16b\n"
+ "ldr q7, [x20, #0x10]\n"
+ ".inst 0x6e86a42e // ummla v14.4s, v1.16b, v6.16b\n"
"ldr q2, [%x[Apanel], #0x20]\n"
"ldr q3, [%x[Apanel], #0x30]\n"
- ".inst 0x6e86a42e // ummla v14.4s, v1.16b, v6.16b\n"
- "ldr q4, [x22, #0x20]\n"
- "ldr q5, [x22, #0x30]\n"
+ ".inst 0x6e87a40b // ummla v11.4s, v0.16b, v7.16b\n"
".inst 0x6e87a431 // ummla v17.4s, v1.16b, v7.16b\n"
".inst 0x6e86a454 // ummla v20.4s, v2.16b, v6.16b\n"
+ "ldr q4, [x20, #0x20]\n"
".inst 0x6e87a457 // ummla v23.4s, v2.16b, v7.16b\n"
- "add %x[Apanel], %x[Apanel], #0x40\n"
".inst 0x6e86a47a // ummla v26.4s, v3.16b, v6.16b\n"
- "ldr q6, [x22, #0x40]\n"
+ "ldr q5, [x20, #0x30]\n"
".inst 0x6e87a47d // ummla v29.4s, v3.16b, v7.16b\n"
- "ldr q7, [x22, #0x50]\n"
+ "ldr q6, [x20, #0x40]\n"
+ "ldr q7, [x20, #0x50]\n"
".inst 0x6e84a409 // ummla v9.4s, v0.16b, v4.16b\n"
- ".inst 0x6e85a40c // ummla v12.4s, v0.16b, v5.16b\n"
".inst 0x6e84a42f // ummla v15.4s, v1.16b, v4.16b\n"
- ".inst 0x6e85a432 // ummla v18.4s, v1.16b, v5.16b\n"
- "add x22, x22, #0x60\n"
+ "add x20, x20, #0x60\n"
".inst 0x6e84a455 // ummla v21.4s, v2.16b, v4.16b\n"
- ".inst 0x6e85a458 // ummla v24.4s, v2.16b, v5.16b\n"
".inst 0x6e84a47b // ummla v27.4s, v3.16b, v4.16b\n"
- ".inst 0x6e85a47e // ummla v30.4s, v3.16b, v5.16b\n"
+ "add %x[Apanel], %x[Apanel], #0x40\n"
+ ".inst 0x6e85a40c // ummla v12.4s, v0.16b, v5.16b\n"
".inst 0x6e86a40a // ummla v10.4s, v0.16b, v6.16b\n"
".inst 0x6e87a40d // ummla v13.4s, v0.16b, v7.16b\n"
+ ".inst 0x6e85a432 // ummla v18.4s, v1.16b, v5.16b\n"
".inst 0x6e86a430 // ummla v16.4s, v1.16b, v6.16b\n"
".inst 0x6e87a433 // ummla v19.4s, v1.16b, v7.16b\n"
+ ".inst 0x6e85a458 // ummla v24.4s, v2.16b, v5.16b\n"
+ ".inst 0x6e85a47e // ummla v30.4s, v3.16b, v5.16b\n"
".inst 0x6e86a456 // ummla v22.4s, v2.16b, v6.16b\n"
".inst 0x6e87a459 // ummla v25.4s, v2.16b, v7.16b\n"
".inst 0x6e86a47c // ummla v28.4s, v3.16b, v6.16b\n"
".inst 0x6e87a47f // ummla v31.4s, v3.16b, v7.16b\n"
"5:" // multiply loop done
- "subs x23, x23, #0x1\n"
+ "subs x22, x22, #0x1\n"
"uzp1 v4.2d, v8.2d, v11.2d\n"
"uzp2 v8.2d, v8.2d, v11.2d\n"
"uzp1 v11.2d, v9.2d, v12.2d\n"
@@ -283,7 +283,7 @@ void a64_interleaved_u8u32_mmla_8x12(
"bne 1b\n"
: [Apanel] "+&r" (Apanel), [Cpanel] "+&r" (Cpanel), [ablocks] "+&r" (ablocks)
: [args_ptr] "r" (&ka), [offsetof_Bpanel] "I" (offsetof(KernelArgs, Bpanel)), [offsetof_K] "I" (offsetof(KernelArgs, K)), [offsetof_bblocks] "I" (offsetof(KernelArgs, bblocks))
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x20", "x21", "x22", "x23"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_gemv_bf16fp32_dot_16VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_gemv_bf16fp32_dot_16VL/generic.cpp
index 520eeedfec..26861fb931 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sme2_gemv_bf16fp32_dot_16VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_gemv_bf16fp32_dot_16VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef __ARM_FEATURE_SVE
#ifdef ARM_COMPUTE_ENABLE_SME2
@@ -64,487 +64,487 @@ void sme2_gemv_bf16fp32_dot_16VL (
__asm__ __volatile__(
"ptrue p1.b\n"
".inst 0xd503477f // SMSTART ZA\n"
- "cntw x28, ALL, MUL #4\n"
- "add x27, %x[N], x28\n"
- "sub x27, x27, #0x1\n"
- "udiv x27, x27, x28\n"
- "add x22, x27, #0x3\n"
- "and x22, x22, #0xfffffffffffffffc\n"
- "mul x22, x22, x28\n"
- "mul x22, x22, %x[K]\n"
+ "cntw x27, ALL, MUL #4\n"
+ "add x26, %x[N], x27\n"
+ "sub x26, x26, #0x1\n"
+ "udiv x26, x26, x27\n"
+ "add x21, x26, #0x3\n"
+ "and x21, x21, #0xfffffffffffffffc\n"
+ "mul x21, x21, x27\n"
+ "mul x21, x21, %x[K]\n"
"mov x9, #0x0\n"
- "mov x26, %x[B_ptr]\n"
- "mov x25, %x[output_ptr]\n"
+ "mov x25, %x[B_ptr]\n"
+ "mov x24, %x[output_ptr]\n"
"ptrue p1.b\n"
".inst 0x25207811 // ptrue pn9.b\n"
- "lsl x22, x22, #0x1\n"
- "mov x21, #0x1\n"
+ "lsl x21, x21, #0x1\n"
+ "mov x20, #0x1\n"
"1:" // RHS size check loop
- "cmp x22, #0x200000\n"
+ "cmp x21, #0x200000\n"
"blt 2f\n"
- "tbnz x22, #0, 3f\n"
- "lsr x22, x22, #0x1\n"
- "lsl x21, x21, #0x1\n"
+ "tbnz x21, #0, 3f\n"
+ "lsr x21, x21, #0x1\n"
+ "lsl x20, x20, #0x1\n"
"b 1b\n"
"2:" // RHS do prefetch
- "lsl x20, x22, #0x26\n"
- "sub x21, x21, #0x1\n"
- "lsl x21, x21, #0x16\n"
- "orr x22, x22, x20\n"
- "orr x22, x22, x21\n"
- ".inst 0xf8b64b5a // rprfm pldonce, x22, [x26]\n"
+ "lsl x19, x21, #0x26\n"
+ "sub x20, x20, #0x1\n"
+ "lsl x20, x20, #0x16\n"
+ "orr x21, x21, x19\n"
+ "orr x21, x21, x20\n"
+ ".inst 0xf8b54b3a // rprfm pldonce, x21, [x25]\n"
"3:" // RHS prefetch exit
- "mov x24, %x[bias]\n"
+ "mov x23, %x[bias]\n"
"4:" // Column loop
- "cmp x27, #0x4\n"
+ "cmp x26, #0x4\n"
"bge 28f\n"
- "cmp x27, #0x2\n"
+ "cmp x26, #0x2\n"
"bgt 20f\n"
"beq 12f\n"
- "mov x23, %x[A_ptr]\n"
- "lsl x22, %x[K], #0x1\n"
- "mov x20, %x[N]\n"
- "mov x21, %x[K]\n"
- ".inst 0xf8b64af8 // rprfm pldmany, x22, [x23]\n"
- ".inst 0x25b467f0 // whilelt p8.s, XZR, x20, VLx4\n"
- "cbz x24, 5f\n"
- ".inst 0xa040c700 // ld1w { z0.s-z3.s }, pn9.b/Z, [x24]\n"
+ "mov x22, %x[A_ptr]\n"
+ "lsl x21, %x[K], #0x1\n"
+ "mov x19, %x[N]\n"
+ "mov x20, %x[K]\n"
+ ".inst 0xf8b54ad8 // rprfm pldmany, x21, [x22]\n"
+ ".inst 0x25b367f0 // whilelt p8.s, XZR, x19, VLx4\n"
+ "cbz x23, 5f\n"
+ ".inst 0xa040c6e0 // ld1w { z0.s-z3.s }, pn9.b/Z, [x23]\n"
".inst 0xc0042c00 // mova za.d[x9, #0], { z0.d-z3.d }\n"
"b 6f\n"
"5:" // Width 1: no bias
".inst 0xc00800ff // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
"6:" // Width 1: setup done
- "cmp x21, #0x8\n"
+ "cmp x20, #0x8\n"
"ble 8f\n"
"7:" // Width 1: Multiply loop: Main loop head
- "whilelt p0.h, XZR, x21\n"
- "ld1rqh { z10.h }, p0/Z, [x23]\n"
- "sub x21, x21, #0x8\n"
- ".inst 0xa040a741 // ldnt1h { z0.h-z3.h }, pn9.b/Z, [x26]\n"
+ "whilelt p0.h, XZR, x20\n"
+ "ld1rqh { z10.h }, p0/Z, [x22]\n"
+ "sub x20, x20, #0x8\n"
+ ".inst 0xa040a721 // ldnt1h { z0.h-z3.h }, pn9.b/Z, [x25]\n"
".inst 0xc15ab018 // bfdot za.s[x9, 0], { z0.h-z3.h }, z10.h[0]\n"
- "addvl x26, x26, #16\n"
- "cmp x21, #0x8\n"
- ".inst 0xa040a759 // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x26]\n"
+ "addvl x25, x25, #16\n"
+ "cmp x20, #0x8\n"
+ ".inst 0xa040a739 // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x25]\n"
".inst 0xc15ab718 // bfdot za.s[x9, 0], { z24.h-z27.h }, z10.h[1]\n"
- "addvl x26, x26, #16\n"
- "add x23, x23, #0x10\n"
- ".inst 0xa040a74d // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x26]\n"
+ "addvl x25, x25, #16\n"
+ "add x22, x22, #0x10\n"
+ ".inst 0xa040a72d // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x25]\n"
".inst 0xc15ab998 // bfdot za.s[x9, 0], { z12.h-z15.h }, z10.h[2]\n"
- "addvl x26, x26, #16\n"
- ".inst 0xa040a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26]\n"
+ "addvl x25, x25, #16\n"
+ ".inst 0xa040a731 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25]\n"
".inst 0xc15abe18 // bfdot za.s[x9, 0], { z16.h-z19.h }, z10.h[3]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
"bgt 7b\n"
"8:" // Width 1: Multiply loop: Single iteration only
- "whilelt p0.h, XZR, x21\n"
- "ld1rqh { z10.h }, p0/Z, [x23]\n"
- "subs x21, x21, #0x2\n"
- ".inst 0xa040a741 // ldnt1h { z0.h-z3.h }, pn9.b/Z, [x26]\n"
- "add x23, x23, #0x10\n"
+ "whilelt p0.h, XZR, x20\n"
+ "ld1rqh { z10.h }, p0/Z, [x22]\n"
+ "subs x20, x20, #0x2\n"
+ ".inst 0xa040a721 // ldnt1h { z0.h-z3.h }, pn9.b/Z, [x25]\n"
+ "add x22, x22, #0x10\n"
".inst 0xc15ab018 // bfdot za.s[x9, 0], { z0.h-z3.h }, z10.h[0]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
"ble 9f\n"
- ".inst 0xa040a759 // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x26]\n"
- "subs x21, x21, #0x2\n"
+ ".inst 0xa040a739 // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x25]\n"
+ "subs x20, x20, #0x2\n"
".inst 0xc15ab718 // bfdot za.s[x9, 0], { z24.h-z27.h }, z10.h[1]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
"ble 9f\n"
- ".inst 0xa040a74d // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x26]\n"
- "subs x21, x21, #0x2\n"
+ ".inst 0xa040a72d // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x25]\n"
+ "subs x20, x20, #0x2\n"
".inst 0xc15ab998 // bfdot za.s[x9, 0], { z12.h-z15.h }, z10.h[2]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
"ble 9f\n"
- ".inst 0xa040a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26]\n"
+ ".inst 0xa040a731 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25]\n"
".inst 0xc15abe18 // bfdot za.s[x9, 0], { z16.h-z19.h }, z10.h[3]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
"9:" // Width 1: Multiply loop: multiply skip
"tbz %x[flags], #1, 10f\n"
- "add x21, %x[args_ptr], %[offset_min]\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
+ "add x20, %x[args_ptr], %[offset_min]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
".inst 0xc0062c08 // mova { z8.d-z11.d }, za.d[x9, #0]\n"
- "ld1rw { z0.s }, p1/Z, [x21]\n"
- "ld1rw { z6.s }, p1/Z, [x20]\n"
+ "ld1rw { z0.s }, p1/Z, [x20]\n"
+ "ld1rw { z6.s }, p1/Z, [x19]\n"
".inst 0xc1a6c808 // fclamp { z8.s-z11.s }, z0.s, z6.s\n"
- ".inst 0xa060c328 // st1w { z8.s-z11.s }, p8, [x25]\n"
- "addvl x25, x25, #4\n"
+ ".inst 0xa060c308 // st1w { z8.s-z11.s }, p8, [x24]\n"
+ "addvl x24, x24, #4\n"
"b 11f\n"
"10:" // Width 1: No activation
".inst 0xc0062c08 // mova { z8.d-z11.d }, za.d[x9, #0]\n"
- ".inst 0xa060c328 // st1w { z8.s-z11.s }, p8, [x25]\n"
- "addvl x25, x25, #4\n"
+ ".inst 0xa060c308 // st1w { z8.s-z11.s }, p8, [x24]\n"
+ "addvl x24, x24, #4\n"
"11:" // Width 1: Output done
"b 36f\n"
"12:" // Width 2
- "mov x23, %x[A_ptr]\n"
- "lsl x22, %x[K], #0x1\n"
- "sub x20, %x[N], x28\n"
- "mov x21, %x[K]\n"
- ".inst 0xf8b64af8 // rprfm pldmany, x22, [x23]\n"
- ".inst 0x25b467f0 // whilelt p8.s, XZR, x20, VLx4\n"
- "cbz x24, 13f\n"
- ".inst 0xa040c700 // ld1w { z0.s-z3.s }, pn9.b/Z, [x24]\n"
+ "mov x22, %x[A_ptr]\n"
+ "lsl x21, %x[K], #0x1\n"
+ "sub x19, %x[N], x27\n"
+ "mov x20, %x[K]\n"
+ ".inst 0xf8b54ad8 // rprfm pldmany, x21, [x22]\n"
+ ".inst 0x25b367f0 // whilelt p8.s, XZR, x19, VLx4\n"
+ "cbz x23, 13f\n"
+ ".inst 0xa040c6e0 // ld1w { z0.s-z3.s }, pn9.b/Z, [x23]\n"
".inst 0xc0042c00 // mova za.d[x9, #0], { z0.d-z3.d }\n"
- ".inst 0xa041c708 // ld1w { z8.s-z11.s }, pn9.b/Z, [x24, #0x4, MUL VL]\n"
+ ".inst 0xa041c6e8 // ld1w { z8.s-z11.s }, pn9.b/Z, [x23, #0x4, MUL VL]\n"
".inst 0xc0042d01 // mova za.d[x9, #1], { z8.d-z11.d }\n"
"b 14f\n"
"13:" // Width 2: no bias
".inst 0xc00800ff // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
"14:" // Width 2: setup done
- "cmp x21, #0x8\n"
+ "cmp x20, #0x8\n"
"ble 16f\n"
"15:" // Width 2: Multiply loop: Main loop head
- "whilelt p0.h, XZR, x21\n"
- "ld1rqh { z10.h }, p0/Z, [x23]\n"
- "sub x21, x21, #0x8\n"
- ".inst 0xa040a741 // ldnt1h { z0.h-z3.h }, pn9.b/Z, [x26]\n"
+ "whilelt p0.h, XZR, x20\n"
+ "ld1rqh { z10.h }, p0/Z, [x22]\n"
+ "sub x20, x20, #0x8\n"
+ ".inst 0xa040a721 // ldnt1h { z0.h-z3.h }, pn9.b/Z, [x25]\n"
".inst 0xc15ab018 // bfdot za.s[x9, 0], { z0.h-z3.h }, z10.h[0]\n"
- "cmp x21, #0x8\n"
- "add x23, x23, #0x10\n"
- ".inst 0xa041a745 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ "cmp x20, #0x8\n"
+ "add x22, x22, #0x10\n"
+ ".inst 0xa041a725 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc15ab099 // bfdot za.s[x9, 1], { z4.h-z7.h }, z10.h[0]\n"
- "addvl x26, x26, #16\n"
- ".inst 0xa040a759 // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x26]\n"
+ "addvl x25, x25, #16\n"
+ ".inst 0xa040a739 // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x25]\n"
".inst 0xc15ab718 // bfdot za.s[x9, 0], { z24.h-z27.h }, z10.h[1]\n"
- ".inst 0xa041a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa041a731 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc15ab619 // bfdot za.s[x9, 1], { z16.h-z19.h }, z10.h[1]\n"
- "addvl x26, x26, #16\n"
- ".inst 0xa040a74d // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x26]\n"
+ "addvl x25, x25, #16\n"
+ ".inst 0xa040a72d // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x25]\n"
".inst 0xc15ab998 // bfdot za.s[x9, 0], { z12.h-z15.h }, z10.h[2]\n"
- ".inst 0xa041a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa041a731 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc15aba19 // bfdot za.s[x9, 1], { z16.h-z19.h }, z10.h[2]\n"
- "addvl x26, x26, #16\n"
- ".inst 0xa040a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26]\n"
+ "addvl x25, x25, #16\n"
+ ".inst 0xa040a731 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25]\n"
".inst 0xc15abe18 // bfdot za.s[x9, 0], { z16.h-z19.h }, z10.h[3]\n"
- ".inst 0xa041a759 // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa041a739 // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc15abf19 // bfdot za.s[x9, 1], { z24.h-z27.h }, z10.h[3]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
"bgt 15b\n"
"16:" // Width 2: Multiply loop: Single iteration only
- "whilelt p0.h, XZR, x21\n"
- "ld1rqh { z10.h }, p0/Z, [x23]\n"
- "subs x21, x21, #0x2\n"
- ".inst 0xa040a741 // ldnt1h { z0.h-z3.h }, pn9.b/Z, [x26]\n"
- "add x23, x23, #0x10\n"
+ "whilelt p0.h, XZR, x20\n"
+ "ld1rqh { z10.h }, p0/Z, [x22]\n"
+ "subs x20, x20, #0x2\n"
+ ".inst 0xa040a721 // ldnt1h { z0.h-z3.h }, pn9.b/Z, [x25]\n"
+ "add x22, x22, #0x10\n"
".inst 0xc15ab018 // bfdot za.s[x9, 0], { z0.h-z3.h }, z10.h[0]\n"
- ".inst 0xa041a745 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa041a725 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc15ab099 // bfdot za.s[x9, 1], { z4.h-z7.h }, z10.h[0]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
"ble 17f\n"
- ".inst 0xa040a759 // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x26]\n"
- "subs x21, x21, #0x2\n"
+ ".inst 0xa040a739 // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x25]\n"
+ "subs x20, x20, #0x2\n"
".inst 0xc15ab718 // bfdot za.s[x9, 0], { z24.h-z27.h }, z10.h[1]\n"
- ".inst 0xa041a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa041a731 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc15ab619 // bfdot za.s[x9, 1], { z16.h-z19.h }, z10.h[1]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
"ble 17f\n"
- ".inst 0xa040a74d // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x26]\n"
- "subs x21, x21, #0x2\n"
+ ".inst 0xa040a72d // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x25]\n"
+ "subs x20, x20, #0x2\n"
".inst 0xc15ab998 // bfdot za.s[x9, 0], { z12.h-z15.h }, z10.h[2]\n"
- ".inst 0xa041a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa041a731 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc15aba19 // bfdot za.s[x9, 1], { z16.h-z19.h }, z10.h[2]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
"ble 17f\n"
- ".inst 0xa040a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26]\n"
+ ".inst 0xa040a731 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25]\n"
".inst 0xc15abe18 // bfdot za.s[x9, 0], { z16.h-z19.h }, z10.h[3]\n"
- ".inst 0xa041a759 // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa041a739 // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc15abf19 // bfdot za.s[x9, 1], { z24.h-z27.h }, z10.h[3]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
"17:" // Width 2: Multiply loop: multiply skip
"tbz %x[flags], #1, 18f\n"
- "add x21, %x[args_ptr], %[offset_min]\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
+ "add x20, %x[args_ptr], %[offset_min]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
".inst 0xc0062c08 // mova { z8.d-z11.d }, za.d[x9, #0]\n"
- "ld1rw { z0.s }, p1/Z, [x21]\n"
+ "ld1rw { z0.s }, p1/Z, [x20]\n"
".inst 0xc0062c34 // mova { z20.d-z23.d }, za.d[x9, #1]\n"
- "ld1rw { z6.s }, p1/Z, [x20]\n"
+ "ld1rw { z6.s }, p1/Z, [x19]\n"
".inst 0xc1a6c808 // fclamp { z8.s-z11.s }, z0.s, z6.s\n"
- ".inst 0xa060c728 // st1w { z8.s-z11.s }, pn9.b, [x25]\n"
+ ".inst 0xa060c708 // st1w { z8.s-z11.s }, pn9.b, [x24]\n"
".inst 0xc1a6c814 // fclamp { z20.s-z23.s }, z0.s, z6.s\n"
- ".inst 0xa061c334 // st1w { z20.s-z23.s }, p8, [x25, #0x4, MUL VL]\n"
- "addvl x25, x25, #8\n"
+ ".inst 0xa061c314 // st1w { z20.s-z23.s }, p8, [x24, #0x4, MUL VL]\n"
+ "addvl x24, x24, #8\n"
"b 19f\n"
"18:" // Width 2: No activation
".inst 0xc0062c08 // mova { z8.d-z11.d }, za.d[x9, #0]\n"
- ".inst 0xa060c728 // st1w { z8.s-z11.s }, pn9.b, [x25]\n"
+ ".inst 0xa060c708 // st1w { z8.s-z11.s }, pn9.b, [x24]\n"
".inst 0xc0062c34 // mova { z20.d-z23.d }, za.d[x9, #1]\n"
- ".inst 0xa061c334 // st1w { z20.s-z23.s }, p8, [x25, #0x4, MUL VL]\n"
- "addvl x25, x25, #8\n"
+ ".inst 0xa061c314 // st1w { z20.s-z23.s }, p8, [x24, #0x4, MUL VL]\n"
+ "addvl x24, x24, #8\n"
"19:" // Width 2: Output done
"b 36f\n"
"20:" // Width 3
- "mov x20, #0x2\n"
- "mov x23, %x[A_ptr]\n"
- "lsl x22, %x[K], #0x1\n"
- "msub x20, x28, x20, %x[N]\n"
- "mov x21, %x[K]\n"
- ".inst 0xf8b64af8 // rprfm pldmany, x22, [x23]\n"
- ".inst 0x25b467f0 // whilelt p8.s, XZR, x20, VLx4\n"
- "cbz x24, 21f\n"
- ".inst 0xa040c700 // ld1w { z0.s-z3.s }, pn9.b/Z, [x24]\n"
+ "mov x19, #0x2\n"
+ "mov x22, %x[A_ptr]\n"
+ "lsl x21, %x[K], #0x1\n"
+ "msub x19, x27, x19, %x[N]\n"
+ "mov x20, %x[K]\n"
+ ".inst 0xf8b54ad8 // rprfm pldmany, x21, [x22]\n"
+ ".inst 0x25b367f0 // whilelt p8.s, XZR, x19, VLx4\n"
+ "cbz x23, 21f\n"
+ ".inst 0xa040c6e0 // ld1w { z0.s-z3.s }, pn9.b/Z, [x23]\n"
".inst 0xc0042c00 // mova za.d[x9, #0], { z0.d-z3.d }\n"
- ".inst 0xa041c708 // ld1w { z8.s-z11.s }, pn9.b/Z, [x24, #0x4, MUL VL]\n"
+ ".inst 0xa041c6e8 // ld1w { z8.s-z11.s }, pn9.b/Z, [x23, #0x4, MUL VL]\n"
".inst 0xc0042d01 // mova za.d[x9, #1], { z8.d-z11.d }\n"
- ".inst 0xa042c704 // ld1w { z4.s-z7.s }, pn9.b/Z, [x24, #0x8, MUL VL]\n"
+ ".inst 0xa042c6e4 // ld1w { z4.s-z7.s }, pn9.b/Z, [x23, #0x8, MUL VL]\n"
".inst 0xc0042c82 // mova za.d[x9, #2], { z4.d-z7.d }\n"
"b 22f\n"
"21:" // Width 3: no bias
".inst 0xc00800ff // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
"22:" // Width 3: setup done
- "cmp x21, #0x8\n"
+ "cmp x20, #0x8\n"
"ble 24f\n"
"23:" // Width 3: Multiply loop: Main loop head
- "whilelt p0.h, XZR, x21\n"
- "ld1rqh { z10.h }, p0/Z, [x23]\n"
- "sub x21, x21, #0x8\n"
- ".inst 0xa040a741 // ldnt1h { z0.h-z3.h }, pn9.b/Z, [x26]\n"
+ "whilelt p0.h, XZR, x20\n"
+ "ld1rqh { z10.h }, p0/Z, [x22]\n"
+ "sub x20, x20, #0x8\n"
+ ".inst 0xa040a721 // ldnt1h { z0.h-z3.h }, pn9.b/Z, [x25]\n"
".inst 0xc15ab018 // bfdot za.s[x9, 0], { z0.h-z3.h }, z10.h[0]\n"
- "cmp x21, #0x8\n"
- "add x23, x23, #0x10\n"
- ".inst 0xa041a745 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ "cmp x20, #0x8\n"
+ "add x22, x22, #0x10\n"
+ ".inst 0xa041a725 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc15ab099 // bfdot za.s[x9, 1], { z4.h-z7.h }, z10.h[0]\n"
- ".inst 0xa042a755 // ldnt1h { z20.h-z23.h }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
+ ".inst 0xa042a735 // ldnt1h { z20.h-z23.h }, pn9.b/Z, [x25, #0x8, MUL VL]\n"
".inst 0xc15ab29a // bfdot za.s[x9, 2], { z20.h-z23.h }, z10.h[0]\n"
- "addvl x26, x26, #16\n"
- ".inst 0xa040a759 // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x26]\n"
+ "addvl x25, x25, #16\n"
+ ".inst 0xa040a739 // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x25]\n"
".inst 0xc15ab718 // bfdot za.s[x9, 0], { z24.h-z27.h }, z10.h[1]\n"
- ".inst 0xa041a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa041a731 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc15ab619 // bfdot za.s[x9, 1], { z16.h-z19.h }, z10.h[1]\n"
- ".inst 0xa042a759 // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
+ ".inst 0xa042a739 // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x25, #0x8, MUL VL]\n"
".inst 0xc15ab71a // bfdot za.s[x9, 2], { z24.h-z27.h }, z10.h[1]\n"
- "addvl x26, x26, #16\n"
- ".inst 0xa040a74d // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x26]\n"
+ "addvl x25, x25, #16\n"
+ ".inst 0xa040a72d // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x25]\n"
".inst 0xc15ab998 // bfdot za.s[x9, 0], { z12.h-z15.h }, z10.h[2]\n"
- ".inst 0xa041a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa041a731 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc15aba19 // bfdot za.s[x9, 1], { z16.h-z19.h }, z10.h[2]\n"
- ".inst 0xa042a75d // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
+ ".inst 0xa042a73d // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x25, #0x8, MUL VL]\n"
".inst 0xc15abb9a // bfdot za.s[x9, 2], { z28.h-z31.h }, z10.h[2]\n"
- "addvl x26, x26, #16\n"
- ".inst 0xa040a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26]\n"
+ "addvl x25, x25, #16\n"
+ ".inst 0xa040a731 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25]\n"
".inst 0xc15abe18 // bfdot za.s[x9, 0], { z16.h-z19.h }, z10.h[3]\n"
- ".inst 0xa041a759 // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa041a739 // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc15abf19 // bfdot za.s[x9, 1], { z24.h-z27.h }, z10.h[3]\n"
- ".inst 0xa042a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
+ ".inst 0xa042a731 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25, #0x8, MUL VL]\n"
".inst 0xc15abe1a // bfdot za.s[x9, 2], { z16.h-z19.h }, z10.h[3]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
"bgt 23b\n"
"24:" // Width 3: Multiply loop: Single iteration only
- "whilelt p0.h, XZR, x21\n"
- "ld1rqh { z10.h }, p0/Z, [x23]\n"
- "subs x21, x21, #0x2\n"
- ".inst 0xa040a741 // ldnt1h { z0.h-z3.h }, pn9.b/Z, [x26]\n"
- "add x23, x23, #0x10\n"
+ "whilelt p0.h, XZR, x20\n"
+ "ld1rqh { z10.h }, p0/Z, [x22]\n"
+ "subs x20, x20, #0x2\n"
+ ".inst 0xa040a721 // ldnt1h { z0.h-z3.h }, pn9.b/Z, [x25]\n"
+ "add x22, x22, #0x10\n"
".inst 0xc15ab018 // bfdot za.s[x9, 0], { z0.h-z3.h }, z10.h[0]\n"
- ".inst 0xa041a745 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa041a725 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc15ab099 // bfdot za.s[x9, 1], { z4.h-z7.h }, z10.h[0]\n"
- ".inst 0xa042a755 // ldnt1h { z20.h-z23.h }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
+ ".inst 0xa042a735 // ldnt1h { z20.h-z23.h }, pn9.b/Z, [x25, #0x8, MUL VL]\n"
".inst 0xc15ab29a // bfdot za.s[x9, 2], { z20.h-z23.h }, z10.h[0]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
"ble 25f\n"
- ".inst 0xa040a759 // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x26]\n"
- "subs x21, x21, #0x2\n"
+ ".inst 0xa040a739 // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x25]\n"
+ "subs x20, x20, #0x2\n"
".inst 0xc15ab718 // bfdot za.s[x9, 0], { z24.h-z27.h }, z10.h[1]\n"
- ".inst 0xa041a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa041a731 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc15ab619 // bfdot za.s[x9, 1], { z16.h-z19.h }, z10.h[1]\n"
- ".inst 0xa042a759 // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
+ ".inst 0xa042a739 // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x25, #0x8, MUL VL]\n"
".inst 0xc15ab71a // bfdot za.s[x9, 2], { z24.h-z27.h }, z10.h[1]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
"ble 25f\n"
- ".inst 0xa040a74d // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x26]\n"
- "subs x21, x21, #0x2\n"
+ ".inst 0xa040a72d // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x25]\n"
+ "subs x20, x20, #0x2\n"
".inst 0xc15ab998 // bfdot za.s[x9, 0], { z12.h-z15.h }, z10.h[2]\n"
- ".inst 0xa041a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa041a731 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc15aba19 // bfdot za.s[x9, 1], { z16.h-z19.h }, z10.h[2]\n"
- ".inst 0xa042a75d // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
+ ".inst 0xa042a73d // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x25, #0x8, MUL VL]\n"
".inst 0xc15abb9a // bfdot za.s[x9, 2], { z28.h-z31.h }, z10.h[2]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
"ble 25f\n"
- ".inst 0xa040a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26]\n"
+ ".inst 0xa040a731 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25]\n"
".inst 0xc15abe18 // bfdot za.s[x9, 0], { z16.h-z19.h }, z10.h[3]\n"
- ".inst 0xa041a759 // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa041a739 // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc15abf19 // bfdot za.s[x9, 1], { z24.h-z27.h }, z10.h[3]\n"
- ".inst 0xa042a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
+ ".inst 0xa042a731 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25, #0x8, MUL VL]\n"
".inst 0xc15abe1a // bfdot za.s[x9, 2], { z16.h-z19.h }, z10.h[3]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
"25:" // Width 3: Multiply loop: multiply skip
"tbz %x[flags], #1, 26f\n"
- "add x21, %x[args_ptr], %[offset_min]\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
+ "add x20, %x[args_ptr], %[offset_min]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
".inst 0xc0062c08 // mova { z8.d-z11.d }, za.d[x9, #0]\n"
- "ld1rw { z0.s }, p1/Z, [x21]\n"
+ "ld1rw { z0.s }, p1/Z, [x20]\n"
".inst 0xc0062c34 // mova { z20.d-z23.d }, za.d[x9, #1]\n"
- "ld1rw { z6.s }, p1/Z, [x20]\n"
+ "ld1rw { z6.s }, p1/Z, [x19]\n"
".inst 0xc1a6c808 // fclamp { z8.s-z11.s }, z0.s, z6.s\n"
".inst 0xc0062c50 // mova { z16.d-z19.d }, za.d[x9, #2]\n"
- ".inst 0xa060c728 // st1w { z8.s-z11.s }, pn9.b, [x25]\n"
+ ".inst 0xa060c708 // st1w { z8.s-z11.s }, pn9.b, [x24]\n"
".inst 0xc1a6c814 // fclamp { z20.s-z23.s }, z0.s, z6.s\n"
- ".inst 0xa061c734 // st1w { z20.s-z23.s }, pn9.b, [x25, #0x4, MUL VL]\n"
+ ".inst 0xa061c714 // st1w { z20.s-z23.s }, pn9.b, [x24, #0x4, MUL VL]\n"
".inst 0xc1a6c810 // fclamp { z16.s-z19.s }, z0.s, z6.s\n"
- ".inst 0xa062c330 // st1w { z16.s-z19.s }, p8, [x25, #0x8, MUL VL]\n"
- "addvl x25, x25, #12\n"
+ ".inst 0xa062c310 // st1w { z16.s-z19.s }, p8, [x24, #0x8, MUL VL]\n"
+ "addvl x24, x24, #12\n"
"b 27f\n"
"26:" // Width 3: No activation
".inst 0xc0062c08 // mova { z8.d-z11.d }, za.d[x9, #0]\n"
- ".inst 0xa060c728 // st1w { z8.s-z11.s }, pn9.b, [x25]\n"
+ ".inst 0xa060c708 // st1w { z8.s-z11.s }, pn9.b, [x24]\n"
".inst 0xc0062c34 // mova { z20.d-z23.d }, za.d[x9, #1]\n"
- ".inst 0xa061c734 // st1w { z20.s-z23.s }, pn9.b, [x25, #0x4, MUL VL]\n"
+ ".inst 0xa061c714 // st1w { z20.s-z23.s }, pn9.b, [x24, #0x4, MUL VL]\n"
".inst 0xc0062c50 // mova { z16.d-z19.d }, za.d[x9, #2]\n"
- ".inst 0xa062c330 // st1w { z16.s-z19.s }, p8, [x25, #0x8, MUL VL]\n"
- "addvl x25, x25, #12\n"
+ ".inst 0xa062c310 // st1w { z16.s-z19.s }, p8, [x24, #0x8, MUL VL]\n"
+ "addvl x24, x24, #12\n"
"27:" // Width 3: Output done
"b 36f\n"
"28:" // Width 4
- "mov x20, #0x3\n"
- "mov x23, %x[A_ptr]\n"
- "lsl x22, %x[K], #0x1\n"
- "msub x20, x28, x20, %x[N]\n"
- "mov x21, %x[K]\n"
- ".inst 0xf8b64af8 // rprfm pldmany, x22, [x23]\n"
- ".inst 0x25b467f0 // whilelt p8.s, XZR, x20, VLx4\n"
- "cbz x24, 29f\n"
- ".inst 0xa040c700 // ld1w { z0.s-z3.s }, pn9.b/Z, [x24]\n"
+ "mov x19, #0x3\n"
+ "mov x22, %x[A_ptr]\n"
+ "lsl x21, %x[K], #0x1\n"
+ "msub x19, x27, x19, %x[N]\n"
+ "mov x20, %x[K]\n"
+ ".inst 0xf8b54ad8 // rprfm pldmany, x21, [x22]\n"
+ ".inst 0x25b367f0 // whilelt p8.s, XZR, x19, VLx4\n"
+ "cbz x23, 29f\n"
+ ".inst 0xa040c6e0 // ld1w { z0.s-z3.s }, pn9.b/Z, [x23]\n"
".inst 0xc0042c00 // mova za.d[x9, #0], { z0.d-z3.d }\n"
- ".inst 0xa041c708 // ld1w { z8.s-z11.s }, pn9.b/Z, [x24, #0x4, MUL VL]\n"
+ ".inst 0xa041c6e8 // ld1w { z8.s-z11.s }, pn9.b/Z, [x23, #0x4, MUL VL]\n"
".inst 0xc0042d01 // mova za.d[x9, #1], { z8.d-z11.d }\n"
- ".inst 0xa042c704 // ld1w { z4.s-z7.s }, pn9.b/Z, [x24, #0x8, MUL VL]\n"
+ ".inst 0xa042c6e4 // ld1w { z4.s-z7.s }, pn9.b/Z, [x23, #0x8, MUL VL]\n"
".inst 0xc0042c82 // mova za.d[x9, #2], { z4.d-z7.d }\n"
- ".inst 0xa043c710 // ld1w { z16.s-z19.s }, pn9.b/Z, [x24, #0xc, MUL VL]\n"
+ ".inst 0xa043c6f0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x23, #0xc, MUL VL]\n"
".inst 0xc0042e03 // mova za.d[x9, #3], { z16.d-z19.d }\n"
- "addvl x24, x24, #16\n"
+ "addvl x23, x23, #16\n"
"b 30f\n"
"29:" // Width 4: no bias
".inst 0xc00800ff // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
"30:" // Width 4: setup done
- "cmp x21, #0x8\n"
+ "cmp x20, #0x8\n"
"ble 32f\n"
"31:" // Width 4: Multiply loop: Main loop head
- "whilelt p0.h, XZR, x21\n"
- "ld1rqh { z10.h }, p0/Z, [x23]\n"
- "sub x21, x21, #0x8\n"
- ".inst 0xa040a741 // ldnt1h { z0.h-z3.h }, pn9.b/Z, [x26]\n"
+ "whilelt p0.h, XZR, x20\n"
+ "ld1rqh { z10.h }, p0/Z, [x22]\n"
+ "sub x20, x20, #0x8\n"
+ ".inst 0xa040a721 // ldnt1h { z0.h-z3.h }, pn9.b/Z, [x25]\n"
".inst 0xc15ab018 // bfdot za.s[x9, 0], { z0.h-z3.h }, z10.h[0]\n"
- "cmp x21, #0x8\n"
- "add x23, x23, #0x10\n"
- ".inst 0xa041a745 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ "cmp x20, #0x8\n"
+ "add x22, x22, #0x10\n"
+ ".inst 0xa041a725 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc15ab099 // bfdot za.s[x9, 1], { z4.h-z7.h }, z10.h[0]\n"
- ".inst 0xa042a755 // ldnt1h { z20.h-z23.h }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
+ ".inst 0xa042a735 // ldnt1h { z20.h-z23.h }, pn9.b/Z, [x25, #0x8, MUL VL]\n"
".inst 0xc15ab29a // bfdot za.s[x9, 2], { z20.h-z23.h }, z10.h[0]\n"
- ".inst 0xa043a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26, #0xc, MUL VL]\n"
+ ".inst 0xa043a731 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25, #0xc, MUL VL]\n"
".inst 0xc15ab21b // bfdot za.s[x9, 3], { z16.h-z19.h }, z10.h[0]\n"
- "addvl x26, x26, #16\n"
- ".inst 0xa040a759 // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x26]\n"
+ "addvl x25, x25, #16\n"
+ ".inst 0xa040a739 // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x25]\n"
".inst 0xc15ab718 // bfdot za.s[x9, 0], { z24.h-z27.h }, z10.h[1]\n"
- ".inst 0xa041a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa041a731 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc15ab619 // bfdot za.s[x9, 1], { z16.h-z19.h }, z10.h[1]\n"
- ".inst 0xa042a759 // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
+ ".inst 0xa042a739 // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x25, #0x8, MUL VL]\n"
".inst 0xc15ab71a // bfdot za.s[x9, 2], { z24.h-z27.h }, z10.h[1]\n"
- ".inst 0xa043a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26, #0xc, MUL VL]\n"
+ ".inst 0xa043a731 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25, #0xc, MUL VL]\n"
".inst 0xc15ab61b // bfdot za.s[x9, 3], { z16.h-z19.h }, z10.h[1]\n"
- "addvl x26, x26, #16\n"
- ".inst 0xa040a74d // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x26]\n"
+ "addvl x25, x25, #16\n"
+ ".inst 0xa040a72d // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x25]\n"
".inst 0xc15ab998 // bfdot za.s[x9, 0], { z12.h-z15.h }, z10.h[2]\n"
- ".inst 0xa041a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa041a731 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc15aba19 // bfdot za.s[x9, 1], { z16.h-z19.h }, z10.h[2]\n"
- ".inst 0xa042a75d // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
+ ".inst 0xa042a73d // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x25, #0x8, MUL VL]\n"
".inst 0xc15abb9a // bfdot za.s[x9, 2], { z28.h-z31.h }, z10.h[2]\n"
- ".inst 0xa043a755 // ldnt1h { z20.h-z23.h }, pn9.b/Z, [x26, #0xc, MUL VL]\n"
+ ".inst 0xa043a735 // ldnt1h { z20.h-z23.h }, pn9.b/Z, [x25, #0xc, MUL VL]\n"
".inst 0xc15aba9b // bfdot za.s[x9, 3], { z20.h-z23.h }, z10.h[2]\n"
- "addvl x26, x26, #16\n"
- ".inst 0xa040a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26]\n"
+ "addvl x25, x25, #16\n"
+ ".inst 0xa040a731 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25]\n"
".inst 0xc15abe18 // bfdot za.s[x9, 0], { z16.h-z19.h }, z10.h[3]\n"
- ".inst 0xa041a759 // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa041a739 // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc15abf19 // bfdot za.s[x9, 1], { z24.h-z27.h }, z10.h[3]\n"
- ".inst 0xa042a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
+ ".inst 0xa042a731 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25, #0x8, MUL VL]\n"
".inst 0xc15abe1a // bfdot za.s[x9, 2], { z16.h-z19.h }, z10.h[3]\n"
- ".inst 0xa043a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26, #0xc, MUL VL]\n"
+ ".inst 0xa043a731 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25, #0xc, MUL VL]\n"
".inst 0xc15abe1b // bfdot za.s[x9, 3], { z16.h-z19.h }, z10.h[3]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
"bgt 31b\n"
"32:" // Width 4: Multiply loop: Single iteration only
- "whilelt p0.h, XZR, x21\n"
- "ld1rqh { z10.h }, p0/Z, [x23]\n"
- "subs x21, x21, #0x2\n"
- ".inst 0xa040a741 // ldnt1h { z0.h-z3.h }, pn9.b/Z, [x26]\n"
- "add x23, x23, #0x10\n"
+ "whilelt p0.h, XZR, x20\n"
+ "ld1rqh { z10.h }, p0/Z, [x22]\n"
+ "subs x20, x20, #0x2\n"
+ ".inst 0xa040a721 // ldnt1h { z0.h-z3.h }, pn9.b/Z, [x25]\n"
+ "add x22, x22, #0x10\n"
".inst 0xc15ab018 // bfdot za.s[x9, 0], { z0.h-z3.h }, z10.h[0]\n"
- ".inst 0xa041a745 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa041a725 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc15ab099 // bfdot za.s[x9, 1], { z4.h-z7.h }, z10.h[0]\n"
- ".inst 0xa042a755 // ldnt1h { z20.h-z23.h }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
+ ".inst 0xa042a735 // ldnt1h { z20.h-z23.h }, pn9.b/Z, [x25, #0x8, MUL VL]\n"
".inst 0xc15ab29a // bfdot za.s[x9, 2], { z20.h-z23.h }, z10.h[0]\n"
- ".inst 0xa043a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26, #0xc, MUL VL]\n"
+ ".inst 0xa043a731 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25, #0xc, MUL VL]\n"
".inst 0xc15ab21b // bfdot za.s[x9, 3], { z16.h-z19.h }, z10.h[0]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
"ble 33f\n"
- ".inst 0xa040a759 // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x26]\n"
- "subs x21, x21, #0x2\n"
+ ".inst 0xa040a739 // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x25]\n"
+ "subs x20, x20, #0x2\n"
".inst 0xc15ab718 // bfdot za.s[x9, 0], { z24.h-z27.h }, z10.h[1]\n"
- ".inst 0xa041a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa041a731 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc15ab619 // bfdot za.s[x9, 1], { z16.h-z19.h }, z10.h[1]\n"
- ".inst 0xa042a759 // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
+ ".inst 0xa042a739 // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x25, #0x8, MUL VL]\n"
".inst 0xc15ab71a // bfdot za.s[x9, 2], { z24.h-z27.h }, z10.h[1]\n"
- ".inst 0xa043a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26, #0xc, MUL VL]\n"
+ ".inst 0xa043a731 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25, #0xc, MUL VL]\n"
".inst 0xc15ab61b // bfdot za.s[x9, 3], { z16.h-z19.h }, z10.h[1]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
"ble 33f\n"
- ".inst 0xa040a74d // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x26]\n"
- "subs x21, x21, #0x2\n"
+ ".inst 0xa040a72d // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x25]\n"
+ "subs x20, x20, #0x2\n"
".inst 0xc15ab998 // bfdot za.s[x9, 0], { z12.h-z15.h }, z10.h[2]\n"
- ".inst 0xa041a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa041a731 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc15aba19 // bfdot za.s[x9, 1], { z16.h-z19.h }, z10.h[2]\n"
- ".inst 0xa042a75d // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
+ ".inst 0xa042a73d // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x25, #0x8, MUL VL]\n"
".inst 0xc15abb9a // bfdot za.s[x9, 2], { z28.h-z31.h }, z10.h[2]\n"
- ".inst 0xa043a755 // ldnt1h { z20.h-z23.h }, pn9.b/Z, [x26, #0xc, MUL VL]\n"
+ ".inst 0xa043a735 // ldnt1h { z20.h-z23.h }, pn9.b/Z, [x25, #0xc, MUL VL]\n"
".inst 0xc15aba9b // bfdot za.s[x9, 3], { z20.h-z23.h }, z10.h[2]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
"ble 33f\n"
- ".inst 0xa040a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26]\n"
+ ".inst 0xa040a731 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25]\n"
".inst 0xc15abe18 // bfdot za.s[x9, 0], { z16.h-z19.h }, z10.h[3]\n"
- ".inst 0xa041a759 // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa041a739 // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc15abf19 // bfdot za.s[x9, 1], { z24.h-z27.h }, z10.h[3]\n"
- ".inst 0xa042a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
+ ".inst 0xa042a731 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25, #0x8, MUL VL]\n"
".inst 0xc15abe1a // bfdot za.s[x9, 2], { z16.h-z19.h }, z10.h[3]\n"
- ".inst 0xa043a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26, #0xc, MUL VL]\n"
+ ".inst 0xa043a731 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25, #0xc, MUL VL]\n"
".inst 0xc15abe1b // bfdot za.s[x9, 3], { z16.h-z19.h }, z10.h[3]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
"33:" // Width 4: Multiply loop: multiply skip
"tbz %x[flags], #1, 34f\n"
- "add x21, %x[args_ptr], %[offset_min]\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
+ "add x20, %x[args_ptr], %[offset_min]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
".inst 0xc0062c08 // mova { z8.d-z11.d }, za.d[x9, #0]\n"
- "ld1rw { z0.s }, p1/Z, [x21]\n"
+ "ld1rw { z0.s }, p1/Z, [x20]\n"
".inst 0xc0062c34 // mova { z20.d-z23.d }, za.d[x9, #1]\n"
- "ld1rw { z6.s }, p1/Z, [x20]\n"
+ "ld1rw { z6.s }, p1/Z, [x19]\n"
".inst 0xc1a6c808 // fclamp { z8.s-z11.s }, z0.s, z6.s\n"
".inst 0xc0062c50 // mova { z16.d-z19.d }, za.d[x9, #2]\n"
- ".inst 0xa060c728 // st1w { z8.s-z11.s }, pn9.b, [x25]\n"
+ ".inst 0xa060c708 // st1w { z8.s-z11.s }, pn9.b, [x24]\n"
".inst 0xc1a6c814 // fclamp { z20.s-z23.s }, z0.s, z6.s\n"
".inst 0xc0062c78 // mova { z24.d-z27.d }, za.d[x9, #3]\n"
- ".inst 0xa061c734 // st1w { z20.s-z23.s }, pn9.b, [x25, #0x4, MUL VL]\n"
+ ".inst 0xa061c714 // st1w { z20.s-z23.s }, pn9.b, [x24, #0x4, MUL VL]\n"
".inst 0xc1a6c810 // fclamp { z16.s-z19.s }, z0.s, z6.s\n"
- ".inst 0xa062c730 // st1w { z16.s-z19.s }, pn9.b, [x25, #0x8, MUL VL]\n"
+ ".inst 0xa062c710 // st1w { z16.s-z19.s }, pn9.b, [x24, #0x8, MUL VL]\n"
".inst 0xc1a6c818 // fclamp { z24.s-z27.s }, z0.s, z6.s\n"
- ".inst 0xa063c338 // st1w { z24.s-z27.s }, p8, [x25, #0xc, MUL VL]\n"
- "addvl x25, x25, #16\n"
+ ".inst 0xa063c318 // st1w { z24.s-z27.s }, p8, [x24, #0xc, MUL VL]\n"
+ "addvl x24, x24, #16\n"
"b 35f\n"
"34:" // Width 4: No activation
".inst 0xc0062c08 // mova { z8.d-z11.d }, za.d[x9, #0]\n"
- ".inst 0xa060c728 // st1w { z8.s-z11.s }, pn9.b, [x25]\n"
+ ".inst 0xa060c708 // st1w { z8.s-z11.s }, pn9.b, [x24]\n"
".inst 0xc0062c34 // mova { z20.d-z23.d }, za.d[x9, #1]\n"
- ".inst 0xa061c734 // st1w { z20.s-z23.s }, pn9.b, [x25, #0x4, MUL VL]\n"
+ ".inst 0xa061c714 // st1w { z20.s-z23.s }, pn9.b, [x24, #0x4, MUL VL]\n"
".inst 0xc0062c50 // mova { z16.d-z19.d }, za.d[x9, #2]\n"
- ".inst 0xa062c730 // st1w { z16.s-z19.s }, pn9.b, [x25, #0x8, MUL VL]\n"
+ ".inst 0xa062c710 // st1w { z16.s-z19.s }, pn9.b, [x24, #0x8, MUL VL]\n"
".inst 0xc0062c78 // mova { z24.d-z27.d }, za.d[x9, #3]\n"
- ".inst 0xa063c338 // st1w { z24.s-z27.s }, p8, [x25, #0xc, MUL VL]\n"
- "addvl x25, x25, #16\n"
+ ".inst 0xa063c318 // st1w { z24.s-z27.s }, p8, [x24, #0xc, MUL VL]\n"
+ "addvl x24, x24, #16\n"
"35:" // Width 4: Output done
- "subs x27, x27, #0x4\n"
- "sub %x[N], %x[N], x28, LSL #2\n"
+ "subs x26, x26, #0x4\n"
+ "sub %x[N], %x[N], x27, LSL #2\n"
"bgt 4b\n"
"36:" // Exit
".inst 0xd503467f // SMSTOP\n"
"ptrue p1.b\n"
: [N] "+&r" (N)
: [A_ptr] "r" (A_ptr), [B_ptr] "r" (B_ptr), [K] "r" (K), [args_ptr] "r" (&ka), [bias] "r" (bias), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [output_ptr] "r" (output_ptr)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_gemv_fp32_mla_16VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_gemv_fp32_mla_16VL/generic.cpp
index 9224868e6a..4c0ae2c6bd 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sme2_gemv_fp32_mla_16VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_gemv_fp32_mla_16VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef __ARM_FEATURE_SVE
#ifdef ARM_COMPUTE_ENABLE_SME2
@@ -63,487 +63,487 @@ void sme2_gemv_fp32_mla_16VL (
__asm__ __volatile__(
"ptrue p1.b\n"
".inst 0xd503477f // SMSTART ZA\n"
- "cntw x28, ALL, MUL #4\n"
- "add x27, %x[N], x28\n"
- "sub x27, x27, #0x1\n"
- "udiv x27, x27, x28\n"
- "add x22, x27, #0x3\n"
- "and x22, x22, #0xfffffffffffffffc\n"
- "mul x22, x22, x28\n"
- "mul x22, x22, %x[K]\n"
+ "cntw x27, ALL, MUL #4\n"
+ "add x26, %x[N], x27\n"
+ "sub x26, x26, #0x1\n"
+ "udiv x26, x26, x27\n"
+ "add x21, x26, #0x3\n"
+ "and x21, x21, #0xfffffffffffffffc\n"
+ "mul x21, x21, x27\n"
+ "mul x21, x21, %x[K]\n"
"mov x9, #0x0\n"
- "mov x26, %x[B_ptr]\n"
- "mov x25, %x[output_ptr]\n"
+ "mov x25, %x[B_ptr]\n"
+ "mov x24, %x[output_ptr]\n"
"ptrue p1.b\n"
".inst 0x25207811 // ptrue pn9.b\n"
- "lsl x22, x22, #0x2\n"
- "mov x21, #0x1\n"
+ "lsl x21, x21, #0x2\n"
+ "mov x20, #0x1\n"
"1:" // RHS size check loop
- "cmp x22, #0x200000\n"
+ "cmp x21, #0x200000\n"
"blt 2f\n"
- "tbnz x22, #0, 3f\n"
- "lsr x22, x22, #0x1\n"
- "lsl x21, x21, #0x1\n"
+ "tbnz x21, #0, 3f\n"
+ "lsr x21, x21, #0x1\n"
+ "lsl x20, x20, #0x1\n"
"b 1b\n"
"2:" // RHS do prefetch
- "lsl x20, x22, #0x26\n"
- "sub x21, x21, #0x1\n"
- "lsl x21, x21, #0x16\n"
- "orr x22, x22, x20\n"
- "orr x22, x22, x21\n"
- ".inst 0xf8b64b5a // rprfm pldonce, x22, [x26]\n"
+ "lsl x19, x21, #0x26\n"
+ "sub x20, x20, #0x1\n"
+ "lsl x20, x20, #0x16\n"
+ "orr x21, x21, x19\n"
+ "orr x21, x21, x20\n"
+ ".inst 0xf8b54b3a // rprfm pldonce, x21, [x25]\n"
"3:" // RHS prefetch exit
- "mov x24, %x[bias]\n"
+ "mov x23, %x[bias]\n"
"4:" // Column loop
- "cmp x27, #0x4\n"
+ "cmp x26, #0x4\n"
"bge 28f\n"
- "cmp x27, #0x2\n"
+ "cmp x26, #0x2\n"
"bgt 20f\n"
"beq 12f\n"
- "mov x23, %x[A_ptr]\n"
- "lsl x22, %x[K], #0x2\n"
- "mov x20, %x[N]\n"
- "mov x21, %x[K]\n"
- ".inst 0xf8b64af8 // rprfm pldmany, x22, [x23]\n"
- ".inst 0x25b467f0 // whilelt p8.s, XZR, x20, VLx4\n"
- "cbz x24, 5f\n"
- ".inst 0xa040c700 // ld1w { z0.s-z3.s }, pn9.b/Z, [x24]\n"
+ "mov x22, %x[A_ptr]\n"
+ "lsl x21, %x[K], #0x2\n"
+ "mov x19, %x[N]\n"
+ "mov x20, %x[K]\n"
+ ".inst 0xf8b54ad8 // rprfm pldmany, x21, [x22]\n"
+ ".inst 0x25b367f0 // whilelt p8.s, XZR, x19, VLx4\n"
+ "cbz x23, 5f\n"
+ ".inst 0xa040c6e0 // ld1w { z0.s-z3.s }, pn9.b/Z, [x23]\n"
".inst 0xc0042c00 // mova za.d[x9, #0], { z0.d-z3.d }\n"
"b 6f\n"
"5:" // Width 1: no bias
".inst 0xc00800ff // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
"6:" // Width 1: setup done
- "cmp x21, #0x4\n"
+ "cmp x20, #0x4\n"
"ble 8f\n"
"7:" // Width 1: Multiply loop: Main loop head
- "whilelt p0.s, XZR, x21\n"
- "ld1rqw { z10.s }, p0/Z, [x23]\n"
- "sub x21, x21, #0x4\n"
- ".inst 0xa040c741 // ldnt1w { z0.s-z3.s }, pn9.b/Z, [x26]\n"
+ "whilelt p0.s, XZR, x20\n"
+ "ld1rqw { z10.s }, p0/Z, [x22]\n"
+ "sub x20, x20, #0x4\n"
+ ".inst 0xa040c721 // ldnt1w { z0.s-z3.s }, pn9.b/Z, [x25]\n"
".inst 0xc15aa000 // fmla za.s[x9, 0], { z0.s-z3.s }, z10.s[0]\n"
- "addvl x26, x26, #16\n"
- "cmp x21, #0x4\n"
- ".inst 0xa040c759 // ldnt1w { z24.s-z27.s }, pn9.b/Z, [x26]\n"
+ "addvl x25, x25, #16\n"
+ "cmp x20, #0x4\n"
+ ".inst 0xa040c739 // ldnt1w { z24.s-z27.s }, pn9.b/Z, [x25]\n"
".inst 0xc15aa700 // fmla za.s[x9, 0], { z24.s-z27.s }, z10.s[1]\n"
- "addvl x26, x26, #16\n"
- "add x23, x23, #0x10\n"
- ".inst 0xa040c74d // ldnt1w { z12.s-z15.s }, pn9.b/Z, [x26]\n"
+ "addvl x25, x25, #16\n"
+ "add x22, x22, #0x10\n"
+ ".inst 0xa040c72d // ldnt1w { z12.s-z15.s }, pn9.b/Z, [x25]\n"
".inst 0xc15aa980 // fmla za.s[x9, 0], { z12.s-z15.s }, z10.s[2]\n"
- "addvl x26, x26, #16\n"
- ".inst 0xa040c751 // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x26]\n"
+ "addvl x25, x25, #16\n"
+ ".inst 0xa040c731 // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x25]\n"
".inst 0xc15aae00 // fmla za.s[x9, 0], { z16.s-z19.s }, z10.s[3]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
"bgt 7b\n"
"8:" // Width 1: Multiply loop: Single iteration only
- "whilelt p0.s, XZR, x21\n"
- "ld1rqw { z10.s }, p0/Z, [x23]\n"
- "subs x21, x21, #0x1\n"
- ".inst 0xa040c741 // ldnt1w { z0.s-z3.s }, pn9.b/Z, [x26]\n"
- "add x23, x23, #0x10\n"
+ "whilelt p0.s, XZR, x20\n"
+ "ld1rqw { z10.s }, p0/Z, [x22]\n"
+ "subs x20, x20, #0x1\n"
+ ".inst 0xa040c721 // ldnt1w { z0.s-z3.s }, pn9.b/Z, [x25]\n"
+ "add x22, x22, #0x10\n"
".inst 0xc15aa000 // fmla za.s[x9, 0], { z0.s-z3.s }, z10.s[0]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
"ble 9f\n"
- ".inst 0xa040c759 // ldnt1w { z24.s-z27.s }, pn9.b/Z, [x26]\n"
- "subs x21, x21, #0x1\n"
+ ".inst 0xa040c739 // ldnt1w { z24.s-z27.s }, pn9.b/Z, [x25]\n"
+ "subs x20, x20, #0x1\n"
".inst 0xc15aa700 // fmla za.s[x9, 0], { z24.s-z27.s }, z10.s[1]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
"ble 9f\n"
- ".inst 0xa040c74d // ldnt1w { z12.s-z15.s }, pn9.b/Z, [x26]\n"
- "subs x21, x21, #0x1\n"
+ ".inst 0xa040c72d // ldnt1w { z12.s-z15.s }, pn9.b/Z, [x25]\n"
+ "subs x20, x20, #0x1\n"
".inst 0xc15aa980 // fmla za.s[x9, 0], { z12.s-z15.s }, z10.s[2]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
"ble 9f\n"
- ".inst 0xa040c751 // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x26]\n"
+ ".inst 0xa040c731 // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x25]\n"
".inst 0xc15aae00 // fmla za.s[x9, 0], { z16.s-z19.s }, z10.s[3]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
"9:" // Width 1: Multiply loop: multiply skip
"tbz %x[flags], #1, 10f\n"
- "add x21, %x[args_ptr], %[offset_min]\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
+ "add x20, %x[args_ptr], %[offset_min]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
".inst 0xc0062c08 // mova { z8.d-z11.d }, za.d[x9, #0]\n"
- "ld1rw { z0.s }, p1/Z, [x21]\n"
- "ld1rw { z6.s }, p1/Z, [x20]\n"
+ "ld1rw { z0.s }, p1/Z, [x20]\n"
+ "ld1rw { z6.s }, p1/Z, [x19]\n"
".inst 0xc1a6c808 // fclamp { z8.s-z11.s }, z0.s, z6.s\n"
- ".inst 0xa060c328 // st1w { z8.s-z11.s }, p8, [x25]\n"
- "addvl x25, x25, #4\n"
+ ".inst 0xa060c308 // st1w { z8.s-z11.s }, p8, [x24]\n"
+ "addvl x24, x24, #4\n"
"b 11f\n"
"10:" // Width 1: No activation
".inst 0xc0062c08 // mova { z8.d-z11.d }, za.d[x9, #0]\n"
- ".inst 0xa060c328 // st1w { z8.s-z11.s }, p8, [x25]\n"
- "addvl x25, x25, #4\n"
+ ".inst 0xa060c308 // st1w { z8.s-z11.s }, p8, [x24]\n"
+ "addvl x24, x24, #4\n"
"11:" // Width 1: Output done
"b 36f\n"
"12:" // Width 2
- "mov x23, %x[A_ptr]\n"
- "lsl x22, %x[K], #0x2\n"
- "sub x20, %x[N], x28\n"
- "mov x21, %x[K]\n"
- ".inst 0xf8b64af8 // rprfm pldmany, x22, [x23]\n"
- ".inst 0x25b467f0 // whilelt p8.s, XZR, x20, VLx4\n"
- "cbz x24, 13f\n"
- ".inst 0xa040c700 // ld1w { z0.s-z3.s }, pn9.b/Z, [x24]\n"
+ "mov x22, %x[A_ptr]\n"
+ "lsl x21, %x[K], #0x2\n"
+ "sub x19, %x[N], x27\n"
+ "mov x20, %x[K]\n"
+ ".inst 0xf8b54ad8 // rprfm pldmany, x21, [x22]\n"
+ ".inst 0x25b367f0 // whilelt p8.s, XZR, x19, VLx4\n"
+ "cbz x23, 13f\n"
+ ".inst 0xa040c6e0 // ld1w { z0.s-z3.s }, pn9.b/Z, [x23]\n"
".inst 0xc0042c00 // mova za.d[x9, #0], { z0.d-z3.d }\n"
- ".inst 0xa041c708 // ld1w { z8.s-z11.s }, pn9.b/Z, [x24, #0x4, MUL VL]\n"
+ ".inst 0xa041c6e8 // ld1w { z8.s-z11.s }, pn9.b/Z, [x23, #0x4, MUL VL]\n"
".inst 0xc0042d01 // mova za.d[x9, #1], { z8.d-z11.d }\n"
"b 14f\n"
"13:" // Width 2: no bias
".inst 0xc00800ff // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
"14:" // Width 2: setup done
- "cmp x21, #0x4\n"
+ "cmp x20, #0x4\n"
"ble 16f\n"
"15:" // Width 2: Multiply loop: Main loop head
- "whilelt p0.s, XZR, x21\n"
- "ld1rqw { z10.s }, p0/Z, [x23]\n"
- "sub x21, x21, #0x4\n"
- ".inst 0xa040c741 // ldnt1w { z0.s-z3.s }, pn9.b/Z, [x26]\n"
+ "whilelt p0.s, XZR, x20\n"
+ "ld1rqw { z10.s }, p0/Z, [x22]\n"
+ "sub x20, x20, #0x4\n"
+ ".inst 0xa040c721 // ldnt1w { z0.s-z3.s }, pn9.b/Z, [x25]\n"
".inst 0xc15aa000 // fmla za.s[x9, 0], { z0.s-z3.s }, z10.s[0]\n"
- "cmp x21, #0x4\n"
- "add x23, x23, #0x10\n"
- ".inst 0xa041c745 // ldnt1w { z4.s-z7.s }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ "cmp x20, #0x4\n"
+ "add x22, x22, #0x10\n"
+ ".inst 0xa041c725 // ldnt1w { z4.s-z7.s }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc15aa081 // fmla za.s[x9, 1], { z4.s-z7.s }, z10.s[0]\n"
- "addvl x26, x26, #16\n"
- ".inst 0xa040c759 // ldnt1w { z24.s-z27.s }, pn9.b/Z, [x26]\n"
+ "addvl x25, x25, #16\n"
+ ".inst 0xa040c739 // ldnt1w { z24.s-z27.s }, pn9.b/Z, [x25]\n"
".inst 0xc15aa700 // fmla za.s[x9, 0], { z24.s-z27.s }, z10.s[1]\n"
- ".inst 0xa041c751 // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa041c731 // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc15aa601 // fmla za.s[x9, 1], { z16.s-z19.s }, z10.s[1]\n"
- "addvl x26, x26, #16\n"
- ".inst 0xa040c74d // ldnt1w { z12.s-z15.s }, pn9.b/Z, [x26]\n"
+ "addvl x25, x25, #16\n"
+ ".inst 0xa040c72d // ldnt1w { z12.s-z15.s }, pn9.b/Z, [x25]\n"
".inst 0xc15aa980 // fmla za.s[x9, 0], { z12.s-z15.s }, z10.s[2]\n"
- ".inst 0xa041c751 // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa041c731 // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc15aaa01 // fmla za.s[x9, 1], { z16.s-z19.s }, z10.s[2]\n"
- "addvl x26, x26, #16\n"
- ".inst 0xa040c751 // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x26]\n"
+ "addvl x25, x25, #16\n"
+ ".inst 0xa040c731 // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x25]\n"
".inst 0xc15aae00 // fmla za.s[x9, 0], { z16.s-z19.s }, z10.s[3]\n"
- ".inst 0xa041c759 // ldnt1w { z24.s-z27.s }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa041c739 // ldnt1w { z24.s-z27.s }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc15aaf01 // fmla za.s[x9, 1], { z24.s-z27.s }, z10.s[3]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
"bgt 15b\n"
"16:" // Width 2: Multiply loop: Single iteration only
- "whilelt p0.s, XZR, x21\n"
- "ld1rqw { z10.s }, p0/Z, [x23]\n"
- "subs x21, x21, #0x1\n"
- ".inst 0xa040c741 // ldnt1w { z0.s-z3.s }, pn9.b/Z, [x26]\n"
- "add x23, x23, #0x10\n"
+ "whilelt p0.s, XZR, x20\n"
+ "ld1rqw { z10.s }, p0/Z, [x22]\n"
+ "subs x20, x20, #0x1\n"
+ ".inst 0xa040c721 // ldnt1w { z0.s-z3.s }, pn9.b/Z, [x25]\n"
+ "add x22, x22, #0x10\n"
".inst 0xc15aa000 // fmla za.s[x9, 0], { z0.s-z3.s }, z10.s[0]\n"
- ".inst 0xa041c745 // ldnt1w { z4.s-z7.s }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa041c725 // ldnt1w { z4.s-z7.s }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc15aa081 // fmla za.s[x9, 1], { z4.s-z7.s }, z10.s[0]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
"ble 17f\n"
- ".inst 0xa040c759 // ldnt1w { z24.s-z27.s }, pn9.b/Z, [x26]\n"
- "subs x21, x21, #0x1\n"
+ ".inst 0xa040c739 // ldnt1w { z24.s-z27.s }, pn9.b/Z, [x25]\n"
+ "subs x20, x20, #0x1\n"
".inst 0xc15aa700 // fmla za.s[x9, 0], { z24.s-z27.s }, z10.s[1]\n"
- ".inst 0xa041c751 // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa041c731 // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc15aa601 // fmla za.s[x9, 1], { z16.s-z19.s }, z10.s[1]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
"ble 17f\n"
- ".inst 0xa040c74d // ldnt1w { z12.s-z15.s }, pn9.b/Z, [x26]\n"
- "subs x21, x21, #0x1\n"
+ ".inst 0xa040c72d // ldnt1w { z12.s-z15.s }, pn9.b/Z, [x25]\n"
+ "subs x20, x20, #0x1\n"
".inst 0xc15aa980 // fmla za.s[x9, 0], { z12.s-z15.s }, z10.s[2]\n"
- ".inst 0xa041c751 // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa041c731 // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc15aaa01 // fmla za.s[x9, 1], { z16.s-z19.s }, z10.s[2]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
"ble 17f\n"
- ".inst 0xa040c751 // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x26]\n"
+ ".inst 0xa040c731 // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x25]\n"
".inst 0xc15aae00 // fmla za.s[x9, 0], { z16.s-z19.s }, z10.s[3]\n"
- ".inst 0xa041c759 // ldnt1w { z24.s-z27.s }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa041c739 // ldnt1w { z24.s-z27.s }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc15aaf01 // fmla za.s[x9, 1], { z24.s-z27.s }, z10.s[3]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
"17:" // Width 2: Multiply loop: multiply skip
"tbz %x[flags], #1, 18f\n"
- "add x21, %x[args_ptr], %[offset_min]\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
+ "add x20, %x[args_ptr], %[offset_min]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
".inst 0xc0062c08 // mova { z8.d-z11.d }, za.d[x9, #0]\n"
- "ld1rw { z0.s }, p1/Z, [x21]\n"
+ "ld1rw { z0.s }, p1/Z, [x20]\n"
".inst 0xc0062c34 // mova { z20.d-z23.d }, za.d[x9, #1]\n"
- "ld1rw { z6.s }, p1/Z, [x20]\n"
+ "ld1rw { z6.s }, p1/Z, [x19]\n"
".inst 0xc1a6c808 // fclamp { z8.s-z11.s }, z0.s, z6.s\n"
- ".inst 0xa060c728 // st1w { z8.s-z11.s }, pn9.b, [x25]\n"
+ ".inst 0xa060c708 // st1w { z8.s-z11.s }, pn9.b, [x24]\n"
".inst 0xc1a6c814 // fclamp { z20.s-z23.s }, z0.s, z6.s\n"
- ".inst 0xa061c334 // st1w { z20.s-z23.s }, p8, [x25, #0x4, MUL VL]\n"
- "addvl x25, x25, #8\n"
+ ".inst 0xa061c314 // st1w { z20.s-z23.s }, p8, [x24, #0x4, MUL VL]\n"
+ "addvl x24, x24, #8\n"
"b 19f\n"
"18:" // Width 2: No activation
".inst 0xc0062c08 // mova { z8.d-z11.d }, za.d[x9, #0]\n"
- ".inst 0xa060c728 // st1w { z8.s-z11.s }, pn9.b, [x25]\n"
+ ".inst 0xa060c708 // st1w { z8.s-z11.s }, pn9.b, [x24]\n"
".inst 0xc0062c34 // mova { z20.d-z23.d }, za.d[x9, #1]\n"
- ".inst 0xa061c334 // st1w { z20.s-z23.s }, p8, [x25, #0x4, MUL VL]\n"
- "addvl x25, x25, #8\n"
+ ".inst 0xa061c314 // st1w { z20.s-z23.s }, p8, [x24, #0x4, MUL VL]\n"
+ "addvl x24, x24, #8\n"
"19:" // Width 2: Output done
"b 36f\n"
"20:" // Width 3
- "mov x20, #0x2\n"
- "mov x23, %x[A_ptr]\n"
- "lsl x22, %x[K], #0x2\n"
- "msub x20, x28, x20, %x[N]\n"
- "mov x21, %x[K]\n"
- ".inst 0xf8b64af8 // rprfm pldmany, x22, [x23]\n"
- ".inst 0x25b467f0 // whilelt p8.s, XZR, x20, VLx4\n"
- "cbz x24, 21f\n"
- ".inst 0xa040c700 // ld1w { z0.s-z3.s }, pn9.b/Z, [x24]\n"
+ "mov x19, #0x2\n"
+ "mov x22, %x[A_ptr]\n"
+ "lsl x21, %x[K], #0x2\n"
+ "msub x19, x27, x19, %x[N]\n"
+ "mov x20, %x[K]\n"
+ ".inst 0xf8b54ad8 // rprfm pldmany, x21, [x22]\n"
+ ".inst 0x25b367f0 // whilelt p8.s, XZR, x19, VLx4\n"
+ "cbz x23, 21f\n"
+ ".inst 0xa040c6e0 // ld1w { z0.s-z3.s }, pn9.b/Z, [x23]\n"
".inst 0xc0042c00 // mova za.d[x9, #0], { z0.d-z3.d }\n"
- ".inst 0xa041c708 // ld1w { z8.s-z11.s }, pn9.b/Z, [x24, #0x4, MUL VL]\n"
+ ".inst 0xa041c6e8 // ld1w { z8.s-z11.s }, pn9.b/Z, [x23, #0x4, MUL VL]\n"
".inst 0xc0042d01 // mova za.d[x9, #1], { z8.d-z11.d }\n"
- ".inst 0xa042c704 // ld1w { z4.s-z7.s }, pn9.b/Z, [x24, #0x8, MUL VL]\n"
+ ".inst 0xa042c6e4 // ld1w { z4.s-z7.s }, pn9.b/Z, [x23, #0x8, MUL VL]\n"
".inst 0xc0042c82 // mova za.d[x9, #2], { z4.d-z7.d }\n"
"b 22f\n"
"21:" // Width 3: no bias
".inst 0xc00800ff // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
"22:" // Width 3: setup done
- "cmp x21, #0x4\n"
+ "cmp x20, #0x4\n"
"ble 24f\n"
"23:" // Width 3: Multiply loop: Main loop head
- "whilelt p0.s, XZR, x21\n"
- "ld1rqw { z10.s }, p0/Z, [x23]\n"
- "sub x21, x21, #0x4\n"
- ".inst 0xa040c741 // ldnt1w { z0.s-z3.s }, pn9.b/Z, [x26]\n"
+ "whilelt p0.s, XZR, x20\n"
+ "ld1rqw { z10.s }, p0/Z, [x22]\n"
+ "sub x20, x20, #0x4\n"
+ ".inst 0xa040c721 // ldnt1w { z0.s-z3.s }, pn9.b/Z, [x25]\n"
".inst 0xc15aa000 // fmla za.s[x9, 0], { z0.s-z3.s }, z10.s[0]\n"
- "cmp x21, #0x4\n"
- "add x23, x23, #0x10\n"
- ".inst 0xa041c745 // ldnt1w { z4.s-z7.s }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ "cmp x20, #0x4\n"
+ "add x22, x22, #0x10\n"
+ ".inst 0xa041c725 // ldnt1w { z4.s-z7.s }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc15aa081 // fmla za.s[x9, 1], { z4.s-z7.s }, z10.s[0]\n"
- ".inst 0xa042c755 // ldnt1w { z20.s-z23.s }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
+ ".inst 0xa042c735 // ldnt1w { z20.s-z23.s }, pn9.b/Z, [x25, #0x8, MUL VL]\n"
".inst 0xc15aa282 // fmla za.s[x9, 2], { z20.s-z23.s }, z10.s[0]\n"
- "addvl x26, x26, #16\n"
- ".inst 0xa040c759 // ldnt1w { z24.s-z27.s }, pn9.b/Z, [x26]\n"
+ "addvl x25, x25, #16\n"
+ ".inst 0xa040c739 // ldnt1w { z24.s-z27.s }, pn9.b/Z, [x25]\n"
".inst 0xc15aa700 // fmla za.s[x9, 0], { z24.s-z27.s }, z10.s[1]\n"
- ".inst 0xa041c751 // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa041c731 // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc15aa601 // fmla za.s[x9, 1], { z16.s-z19.s }, z10.s[1]\n"
- ".inst 0xa042c759 // ldnt1w { z24.s-z27.s }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
+ ".inst 0xa042c739 // ldnt1w { z24.s-z27.s }, pn9.b/Z, [x25, #0x8, MUL VL]\n"
".inst 0xc15aa702 // fmla za.s[x9, 2], { z24.s-z27.s }, z10.s[1]\n"
- "addvl x26, x26, #16\n"
- ".inst 0xa040c74d // ldnt1w { z12.s-z15.s }, pn9.b/Z, [x26]\n"
+ "addvl x25, x25, #16\n"
+ ".inst 0xa040c72d // ldnt1w { z12.s-z15.s }, pn9.b/Z, [x25]\n"
".inst 0xc15aa980 // fmla za.s[x9, 0], { z12.s-z15.s }, z10.s[2]\n"
- ".inst 0xa041c751 // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa041c731 // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc15aaa01 // fmla za.s[x9, 1], { z16.s-z19.s }, z10.s[2]\n"
- ".inst 0xa042c75d // ldnt1w { z28.s-z31.s }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
+ ".inst 0xa042c73d // ldnt1w { z28.s-z31.s }, pn9.b/Z, [x25, #0x8, MUL VL]\n"
".inst 0xc15aab82 // fmla za.s[x9, 2], { z28.s-z31.s }, z10.s[2]\n"
- "addvl x26, x26, #16\n"
- ".inst 0xa040c751 // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x26]\n"
+ "addvl x25, x25, #16\n"
+ ".inst 0xa040c731 // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x25]\n"
".inst 0xc15aae00 // fmla za.s[x9, 0], { z16.s-z19.s }, z10.s[3]\n"
- ".inst 0xa041c759 // ldnt1w { z24.s-z27.s }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa041c739 // ldnt1w { z24.s-z27.s }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc15aaf01 // fmla za.s[x9, 1], { z24.s-z27.s }, z10.s[3]\n"
- ".inst 0xa042c751 // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
+ ".inst 0xa042c731 // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x25, #0x8, MUL VL]\n"
".inst 0xc15aae02 // fmla za.s[x9, 2], { z16.s-z19.s }, z10.s[3]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
"bgt 23b\n"
"24:" // Width 3: Multiply loop: Single iteration only
- "whilelt p0.s, XZR, x21\n"
- "ld1rqw { z10.s }, p0/Z, [x23]\n"
- "subs x21, x21, #0x1\n"
- ".inst 0xa040c741 // ldnt1w { z0.s-z3.s }, pn9.b/Z, [x26]\n"
- "add x23, x23, #0x10\n"
+ "whilelt p0.s, XZR, x20\n"
+ "ld1rqw { z10.s }, p0/Z, [x22]\n"
+ "subs x20, x20, #0x1\n"
+ ".inst 0xa040c721 // ldnt1w { z0.s-z3.s }, pn9.b/Z, [x25]\n"
+ "add x22, x22, #0x10\n"
".inst 0xc15aa000 // fmla za.s[x9, 0], { z0.s-z3.s }, z10.s[0]\n"
- ".inst 0xa041c745 // ldnt1w { z4.s-z7.s }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa041c725 // ldnt1w { z4.s-z7.s }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc15aa081 // fmla za.s[x9, 1], { z4.s-z7.s }, z10.s[0]\n"
- ".inst 0xa042c755 // ldnt1w { z20.s-z23.s }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
+ ".inst 0xa042c735 // ldnt1w { z20.s-z23.s }, pn9.b/Z, [x25, #0x8, MUL VL]\n"
".inst 0xc15aa282 // fmla za.s[x9, 2], { z20.s-z23.s }, z10.s[0]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
"ble 25f\n"
- ".inst 0xa040c759 // ldnt1w { z24.s-z27.s }, pn9.b/Z, [x26]\n"
- "subs x21, x21, #0x1\n"
+ ".inst 0xa040c739 // ldnt1w { z24.s-z27.s }, pn9.b/Z, [x25]\n"
+ "subs x20, x20, #0x1\n"
".inst 0xc15aa700 // fmla za.s[x9, 0], { z24.s-z27.s }, z10.s[1]\n"
- ".inst 0xa041c751 // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa041c731 // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc15aa601 // fmla za.s[x9, 1], { z16.s-z19.s }, z10.s[1]\n"
- ".inst 0xa042c759 // ldnt1w { z24.s-z27.s }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
+ ".inst 0xa042c739 // ldnt1w { z24.s-z27.s }, pn9.b/Z, [x25, #0x8, MUL VL]\n"
".inst 0xc15aa702 // fmla za.s[x9, 2], { z24.s-z27.s }, z10.s[1]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
"ble 25f\n"
- ".inst 0xa040c74d // ldnt1w { z12.s-z15.s }, pn9.b/Z, [x26]\n"
- "subs x21, x21, #0x1\n"
+ ".inst 0xa040c72d // ldnt1w { z12.s-z15.s }, pn9.b/Z, [x25]\n"
+ "subs x20, x20, #0x1\n"
".inst 0xc15aa980 // fmla za.s[x9, 0], { z12.s-z15.s }, z10.s[2]\n"
- ".inst 0xa041c751 // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa041c731 // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc15aaa01 // fmla za.s[x9, 1], { z16.s-z19.s }, z10.s[2]\n"
- ".inst 0xa042c75d // ldnt1w { z28.s-z31.s }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
+ ".inst 0xa042c73d // ldnt1w { z28.s-z31.s }, pn9.b/Z, [x25, #0x8, MUL VL]\n"
".inst 0xc15aab82 // fmla za.s[x9, 2], { z28.s-z31.s }, z10.s[2]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
"ble 25f\n"
- ".inst 0xa040c751 // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x26]\n"
+ ".inst 0xa040c731 // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x25]\n"
".inst 0xc15aae00 // fmla za.s[x9, 0], { z16.s-z19.s }, z10.s[3]\n"
- ".inst 0xa041c759 // ldnt1w { z24.s-z27.s }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa041c739 // ldnt1w { z24.s-z27.s }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc15aaf01 // fmla za.s[x9, 1], { z24.s-z27.s }, z10.s[3]\n"
- ".inst 0xa042c751 // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
+ ".inst 0xa042c731 // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x25, #0x8, MUL VL]\n"
".inst 0xc15aae02 // fmla za.s[x9, 2], { z16.s-z19.s }, z10.s[3]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
"25:" // Width 3: Multiply loop: multiply skip
"tbz %x[flags], #1, 26f\n"
- "add x21, %x[args_ptr], %[offset_min]\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
+ "add x20, %x[args_ptr], %[offset_min]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
".inst 0xc0062c08 // mova { z8.d-z11.d }, za.d[x9, #0]\n"
- "ld1rw { z0.s }, p1/Z, [x21]\n"
+ "ld1rw { z0.s }, p1/Z, [x20]\n"
".inst 0xc0062c34 // mova { z20.d-z23.d }, za.d[x9, #1]\n"
- "ld1rw { z6.s }, p1/Z, [x20]\n"
+ "ld1rw { z6.s }, p1/Z, [x19]\n"
".inst 0xc1a6c808 // fclamp { z8.s-z11.s }, z0.s, z6.s\n"
".inst 0xc0062c50 // mova { z16.d-z19.d }, za.d[x9, #2]\n"
- ".inst 0xa060c728 // st1w { z8.s-z11.s }, pn9.b, [x25]\n"
+ ".inst 0xa060c708 // st1w { z8.s-z11.s }, pn9.b, [x24]\n"
".inst 0xc1a6c814 // fclamp { z20.s-z23.s }, z0.s, z6.s\n"
- ".inst 0xa061c734 // st1w { z20.s-z23.s }, pn9.b, [x25, #0x4, MUL VL]\n"
+ ".inst 0xa061c714 // st1w { z20.s-z23.s }, pn9.b, [x24, #0x4, MUL VL]\n"
".inst 0xc1a6c810 // fclamp { z16.s-z19.s }, z0.s, z6.s\n"
- ".inst 0xa062c330 // st1w { z16.s-z19.s }, p8, [x25, #0x8, MUL VL]\n"
- "addvl x25, x25, #12\n"
+ ".inst 0xa062c310 // st1w { z16.s-z19.s }, p8, [x24, #0x8, MUL VL]\n"
+ "addvl x24, x24, #12\n"
"b 27f\n"
"26:" // Width 3: No activation
".inst 0xc0062c08 // mova { z8.d-z11.d }, za.d[x9, #0]\n"
- ".inst 0xa060c728 // st1w { z8.s-z11.s }, pn9.b, [x25]\n"
+ ".inst 0xa060c708 // st1w { z8.s-z11.s }, pn9.b, [x24]\n"
".inst 0xc0062c34 // mova { z20.d-z23.d }, za.d[x9, #1]\n"
- ".inst 0xa061c734 // st1w { z20.s-z23.s }, pn9.b, [x25, #0x4, MUL VL]\n"
+ ".inst 0xa061c714 // st1w { z20.s-z23.s }, pn9.b, [x24, #0x4, MUL VL]\n"
".inst 0xc0062c50 // mova { z16.d-z19.d }, za.d[x9, #2]\n"
- ".inst 0xa062c330 // st1w { z16.s-z19.s }, p8, [x25, #0x8, MUL VL]\n"
- "addvl x25, x25, #12\n"
+ ".inst 0xa062c310 // st1w { z16.s-z19.s }, p8, [x24, #0x8, MUL VL]\n"
+ "addvl x24, x24, #12\n"
"27:" // Width 3: Output done
"b 36f\n"
"28:" // Width 4
- "mov x20, #0x3\n"
- "mov x23, %x[A_ptr]\n"
- "lsl x22, %x[K], #0x2\n"
- "msub x20, x28, x20, %x[N]\n"
- "mov x21, %x[K]\n"
- ".inst 0xf8b64af8 // rprfm pldmany, x22, [x23]\n"
- ".inst 0x25b467f0 // whilelt p8.s, XZR, x20, VLx4\n"
- "cbz x24, 29f\n"
- ".inst 0xa040c700 // ld1w { z0.s-z3.s }, pn9.b/Z, [x24]\n"
+ "mov x19, #0x3\n"
+ "mov x22, %x[A_ptr]\n"
+ "lsl x21, %x[K], #0x2\n"
+ "msub x19, x27, x19, %x[N]\n"
+ "mov x20, %x[K]\n"
+ ".inst 0xf8b54ad8 // rprfm pldmany, x21, [x22]\n"
+ ".inst 0x25b367f0 // whilelt p8.s, XZR, x19, VLx4\n"
+ "cbz x23, 29f\n"
+ ".inst 0xa040c6e0 // ld1w { z0.s-z3.s }, pn9.b/Z, [x23]\n"
".inst 0xc0042c00 // mova za.d[x9, #0], { z0.d-z3.d }\n"
- ".inst 0xa041c708 // ld1w { z8.s-z11.s }, pn9.b/Z, [x24, #0x4, MUL VL]\n"
+ ".inst 0xa041c6e8 // ld1w { z8.s-z11.s }, pn9.b/Z, [x23, #0x4, MUL VL]\n"
".inst 0xc0042d01 // mova za.d[x9, #1], { z8.d-z11.d }\n"
- ".inst 0xa042c704 // ld1w { z4.s-z7.s }, pn9.b/Z, [x24, #0x8, MUL VL]\n"
+ ".inst 0xa042c6e4 // ld1w { z4.s-z7.s }, pn9.b/Z, [x23, #0x8, MUL VL]\n"
".inst 0xc0042c82 // mova za.d[x9, #2], { z4.d-z7.d }\n"
- ".inst 0xa043c710 // ld1w { z16.s-z19.s }, pn9.b/Z, [x24, #0xc, MUL VL]\n"
+ ".inst 0xa043c6f0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x23, #0xc, MUL VL]\n"
".inst 0xc0042e03 // mova za.d[x9, #3], { z16.d-z19.d }\n"
- "addvl x24, x24, #16\n"
+ "addvl x23, x23, #16\n"
"b 30f\n"
"29:" // Width 4: no bias
".inst 0xc00800ff // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
"30:" // Width 4: setup done
- "cmp x21, #0x4\n"
+ "cmp x20, #0x4\n"
"ble 32f\n"
"31:" // Width 4: Multiply loop: Main loop head
- "whilelt p0.s, XZR, x21\n"
- "ld1rqw { z10.s }, p0/Z, [x23]\n"
- "sub x21, x21, #0x4\n"
- ".inst 0xa040c741 // ldnt1w { z0.s-z3.s }, pn9.b/Z, [x26]\n"
+ "whilelt p0.s, XZR, x20\n"
+ "ld1rqw { z10.s }, p0/Z, [x22]\n"
+ "sub x20, x20, #0x4\n"
+ ".inst 0xa040c721 // ldnt1w { z0.s-z3.s }, pn9.b/Z, [x25]\n"
".inst 0xc15aa000 // fmla za.s[x9, 0], { z0.s-z3.s }, z10.s[0]\n"
- "cmp x21, #0x4\n"
- "add x23, x23, #0x10\n"
- ".inst 0xa041c745 // ldnt1w { z4.s-z7.s }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ "cmp x20, #0x4\n"
+ "add x22, x22, #0x10\n"
+ ".inst 0xa041c725 // ldnt1w { z4.s-z7.s }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc15aa081 // fmla za.s[x9, 1], { z4.s-z7.s }, z10.s[0]\n"
- ".inst 0xa042c755 // ldnt1w { z20.s-z23.s }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
+ ".inst 0xa042c735 // ldnt1w { z20.s-z23.s }, pn9.b/Z, [x25, #0x8, MUL VL]\n"
".inst 0xc15aa282 // fmla za.s[x9, 2], { z20.s-z23.s }, z10.s[0]\n"
- ".inst 0xa043c751 // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x26, #0xc, MUL VL]\n"
+ ".inst 0xa043c731 // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x25, #0xc, MUL VL]\n"
".inst 0xc15aa203 // fmla za.s[x9, 3], { z16.s-z19.s }, z10.s[0]\n"
- "addvl x26, x26, #16\n"
- ".inst 0xa040c759 // ldnt1w { z24.s-z27.s }, pn9.b/Z, [x26]\n"
+ "addvl x25, x25, #16\n"
+ ".inst 0xa040c739 // ldnt1w { z24.s-z27.s }, pn9.b/Z, [x25]\n"
".inst 0xc15aa700 // fmla za.s[x9, 0], { z24.s-z27.s }, z10.s[1]\n"
- ".inst 0xa041c751 // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa041c731 // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc15aa601 // fmla za.s[x9, 1], { z16.s-z19.s }, z10.s[1]\n"
- ".inst 0xa042c759 // ldnt1w { z24.s-z27.s }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
+ ".inst 0xa042c739 // ldnt1w { z24.s-z27.s }, pn9.b/Z, [x25, #0x8, MUL VL]\n"
".inst 0xc15aa702 // fmla za.s[x9, 2], { z24.s-z27.s }, z10.s[1]\n"
- ".inst 0xa043c751 // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x26, #0xc, MUL VL]\n"
+ ".inst 0xa043c731 // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x25, #0xc, MUL VL]\n"
".inst 0xc15aa603 // fmla za.s[x9, 3], { z16.s-z19.s }, z10.s[1]\n"
- "addvl x26, x26, #16\n"
- ".inst 0xa040c74d // ldnt1w { z12.s-z15.s }, pn9.b/Z, [x26]\n"
+ "addvl x25, x25, #16\n"
+ ".inst 0xa040c72d // ldnt1w { z12.s-z15.s }, pn9.b/Z, [x25]\n"
".inst 0xc15aa980 // fmla za.s[x9, 0], { z12.s-z15.s }, z10.s[2]\n"
- ".inst 0xa041c751 // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa041c731 // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc15aaa01 // fmla za.s[x9, 1], { z16.s-z19.s }, z10.s[2]\n"
- ".inst 0xa042c75d // ldnt1w { z28.s-z31.s }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
+ ".inst 0xa042c73d // ldnt1w { z28.s-z31.s }, pn9.b/Z, [x25, #0x8, MUL VL]\n"
".inst 0xc15aab82 // fmla za.s[x9, 2], { z28.s-z31.s }, z10.s[2]\n"
- ".inst 0xa043c755 // ldnt1w { z20.s-z23.s }, pn9.b/Z, [x26, #0xc, MUL VL]\n"
+ ".inst 0xa043c735 // ldnt1w { z20.s-z23.s }, pn9.b/Z, [x25, #0xc, MUL VL]\n"
".inst 0xc15aaa83 // fmla za.s[x9, 3], { z20.s-z23.s }, z10.s[2]\n"
- "addvl x26, x26, #16\n"
- ".inst 0xa040c751 // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x26]\n"
+ "addvl x25, x25, #16\n"
+ ".inst 0xa040c731 // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x25]\n"
".inst 0xc15aae00 // fmla za.s[x9, 0], { z16.s-z19.s }, z10.s[3]\n"
- ".inst 0xa041c759 // ldnt1w { z24.s-z27.s }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa041c739 // ldnt1w { z24.s-z27.s }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc15aaf01 // fmla za.s[x9, 1], { z24.s-z27.s }, z10.s[3]\n"
- ".inst 0xa042c751 // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
+ ".inst 0xa042c731 // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x25, #0x8, MUL VL]\n"
".inst 0xc15aae02 // fmla za.s[x9, 2], { z16.s-z19.s }, z10.s[3]\n"
- ".inst 0xa043c751 // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x26, #0xc, MUL VL]\n"
+ ".inst 0xa043c731 // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x25, #0xc, MUL VL]\n"
".inst 0xc15aae03 // fmla za.s[x9, 3], { z16.s-z19.s }, z10.s[3]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
"bgt 31b\n"
"32:" // Width 4: Multiply loop: Single iteration only
- "whilelt p0.s, XZR, x21\n"
- "ld1rqw { z10.s }, p0/Z, [x23]\n"
- "subs x21, x21, #0x1\n"
- ".inst 0xa040c741 // ldnt1w { z0.s-z3.s }, pn9.b/Z, [x26]\n"
- "add x23, x23, #0x10\n"
+ "whilelt p0.s, XZR, x20\n"
+ "ld1rqw { z10.s }, p0/Z, [x22]\n"
+ "subs x20, x20, #0x1\n"
+ ".inst 0xa040c721 // ldnt1w { z0.s-z3.s }, pn9.b/Z, [x25]\n"
+ "add x22, x22, #0x10\n"
".inst 0xc15aa000 // fmla za.s[x9, 0], { z0.s-z3.s }, z10.s[0]\n"
- ".inst 0xa041c745 // ldnt1w { z4.s-z7.s }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa041c725 // ldnt1w { z4.s-z7.s }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc15aa081 // fmla za.s[x9, 1], { z4.s-z7.s }, z10.s[0]\n"
- ".inst 0xa042c755 // ldnt1w { z20.s-z23.s }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
+ ".inst 0xa042c735 // ldnt1w { z20.s-z23.s }, pn9.b/Z, [x25, #0x8, MUL VL]\n"
".inst 0xc15aa282 // fmla za.s[x9, 2], { z20.s-z23.s }, z10.s[0]\n"
- ".inst 0xa043c751 // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x26, #0xc, MUL VL]\n"
+ ".inst 0xa043c731 // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x25, #0xc, MUL VL]\n"
".inst 0xc15aa203 // fmla za.s[x9, 3], { z16.s-z19.s }, z10.s[0]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
"ble 33f\n"
- ".inst 0xa040c759 // ldnt1w { z24.s-z27.s }, pn9.b/Z, [x26]\n"
- "subs x21, x21, #0x1\n"
+ ".inst 0xa040c739 // ldnt1w { z24.s-z27.s }, pn9.b/Z, [x25]\n"
+ "subs x20, x20, #0x1\n"
".inst 0xc15aa700 // fmla za.s[x9, 0], { z24.s-z27.s }, z10.s[1]\n"
- ".inst 0xa041c751 // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa041c731 // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc15aa601 // fmla za.s[x9, 1], { z16.s-z19.s }, z10.s[1]\n"
- ".inst 0xa042c759 // ldnt1w { z24.s-z27.s }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
+ ".inst 0xa042c739 // ldnt1w { z24.s-z27.s }, pn9.b/Z, [x25, #0x8, MUL VL]\n"
".inst 0xc15aa702 // fmla za.s[x9, 2], { z24.s-z27.s }, z10.s[1]\n"
- ".inst 0xa043c751 // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x26, #0xc, MUL VL]\n"
+ ".inst 0xa043c731 // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x25, #0xc, MUL VL]\n"
".inst 0xc15aa603 // fmla za.s[x9, 3], { z16.s-z19.s }, z10.s[1]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
"ble 33f\n"
- ".inst 0xa040c74d // ldnt1w { z12.s-z15.s }, pn9.b/Z, [x26]\n"
- "subs x21, x21, #0x1\n"
+ ".inst 0xa040c72d // ldnt1w { z12.s-z15.s }, pn9.b/Z, [x25]\n"
+ "subs x20, x20, #0x1\n"
".inst 0xc15aa980 // fmla za.s[x9, 0], { z12.s-z15.s }, z10.s[2]\n"
- ".inst 0xa041c751 // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa041c731 // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc15aaa01 // fmla za.s[x9, 1], { z16.s-z19.s }, z10.s[2]\n"
- ".inst 0xa042c75d // ldnt1w { z28.s-z31.s }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
+ ".inst 0xa042c73d // ldnt1w { z28.s-z31.s }, pn9.b/Z, [x25, #0x8, MUL VL]\n"
".inst 0xc15aab82 // fmla za.s[x9, 2], { z28.s-z31.s }, z10.s[2]\n"
- ".inst 0xa043c755 // ldnt1w { z20.s-z23.s }, pn9.b/Z, [x26, #0xc, MUL VL]\n"
+ ".inst 0xa043c735 // ldnt1w { z20.s-z23.s }, pn9.b/Z, [x25, #0xc, MUL VL]\n"
".inst 0xc15aaa83 // fmla za.s[x9, 3], { z20.s-z23.s }, z10.s[2]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
"ble 33f\n"
- ".inst 0xa040c751 // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x26]\n"
+ ".inst 0xa040c731 // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x25]\n"
".inst 0xc15aae00 // fmla za.s[x9, 0], { z16.s-z19.s }, z10.s[3]\n"
- ".inst 0xa041c759 // ldnt1w { z24.s-z27.s }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa041c739 // ldnt1w { z24.s-z27.s }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc15aaf01 // fmla za.s[x9, 1], { z24.s-z27.s }, z10.s[3]\n"
- ".inst 0xa042c751 // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
+ ".inst 0xa042c731 // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x25, #0x8, MUL VL]\n"
".inst 0xc15aae02 // fmla za.s[x9, 2], { z16.s-z19.s }, z10.s[3]\n"
- ".inst 0xa043c751 // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x26, #0xc, MUL VL]\n"
+ ".inst 0xa043c731 // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x25, #0xc, MUL VL]\n"
".inst 0xc15aae03 // fmla za.s[x9, 3], { z16.s-z19.s }, z10.s[3]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
"33:" // Width 4: Multiply loop: multiply skip
"tbz %x[flags], #1, 34f\n"
- "add x21, %x[args_ptr], %[offset_min]\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
+ "add x20, %x[args_ptr], %[offset_min]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
".inst 0xc0062c08 // mova { z8.d-z11.d }, za.d[x9, #0]\n"
- "ld1rw { z0.s }, p1/Z, [x21]\n"
+ "ld1rw { z0.s }, p1/Z, [x20]\n"
".inst 0xc0062c34 // mova { z20.d-z23.d }, za.d[x9, #1]\n"
- "ld1rw { z6.s }, p1/Z, [x20]\n"
+ "ld1rw { z6.s }, p1/Z, [x19]\n"
".inst 0xc1a6c808 // fclamp { z8.s-z11.s }, z0.s, z6.s\n"
".inst 0xc0062c50 // mova { z16.d-z19.d }, za.d[x9, #2]\n"
- ".inst 0xa060c728 // st1w { z8.s-z11.s }, pn9.b, [x25]\n"
+ ".inst 0xa060c708 // st1w { z8.s-z11.s }, pn9.b, [x24]\n"
".inst 0xc1a6c814 // fclamp { z20.s-z23.s }, z0.s, z6.s\n"
".inst 0xc0062c78 // mova { z24.d-z27.d }, za.d[x9, #3]\n"
- ".inst 0xa061c734 // st1w { z20.s-z23.s }, pn9.b, [x25, #0x4, MUL VL]\n"
+ ".inst 0xa061c714 // st1w { z20.s-z23.s }, pn9.b, [x24, #0x4, MUL VL]\n"
".inst 0xc1a6c810 // fclamp { z16.s-z19.s }, z0.s, z6.s\n"
- ".inst 0xa062c730 // st1w { z16.s-z19.s }, pn9.b, [x25, #0x8, MUL VL]\n"
+ ".inst 0xa062c710 // st1w { z16.s-z19.s }, pn9.b, [x24, #0x8, MUL VL]\n"
".inst 0xc1a6c818 // fclamp { z24.s-z27.s }, z0.s, z6.s\n"
- ".inst 0xa063c338 // st1w { z24.s-z27.s }, p8, [x25, #0xc, MUL VL]\n"
- "addvl x25, x25, #16\n"
+ ".inst 0xa063c318 // st1w { z24.s-z27.s }, p8, [x24, #0xc, MUL VL]\n"
+ "addvl x24, x24, #16\n"
"b 35f\n"
"34:" // Width 4: No activation
".inst 0xc0062c08 // mova { z8.d-z11.d }, za.d[x9, #0]\n"
- ".inst 0xa060c728 // st1w { z8.s-z11.s }, pn9.b, [x25]\n"
+ ".inst 0xa060c708 // st1w { z8.s-z11.s }, pn9.b, [x24]\n"
".inst 0xc0062c34 // mova { z20.d-z23.d }, za.d[x9, #1]\n"
- ".inst 0xa061c734 // st1w { z20.s-z23.s }, pn9.b, [x25, #0x4, MUL VL]\n"
+ ".inst 0xa061c714 // st1w { z20.s-z23.s }, pn9.b, [x24, #0x4, MUL VL]\n"
".inst 0xc0062c50 // mova { z16.d-z19.d }, za.d[x9, #2]\n"
- ".inst 0xa062c730 // st1w { z16.s-z19.s }, pn9.b, [x25, #0x8, MUL VL]\n"
+ ".inst 0xa062c710 // st1w { z16.s-z19.s }, pn9.b, [x24, #0x8, MUL VL]\n"
".inst 0xc0062c78 // mova { z24.d-z27.d }, za.d[x9, #3]\n"
- ".inst 0xa063c338 // st1w { z24.s-z27.s }, p8, [x25, #0xc, MUL VL]\n"
- "addvl x25, x25, #16\n"
+ ".inst 0xa063c318 // st1w { z24.s-z27.s }, p8, [x24, #0xc, MUL VL]\n"
+ "addvl x24, x24, #16\n"
"35:" // Width 4: Output done
- "subs x27, x27, #0x4\n"
- "sub %x[N], %x[N], x28, LSL #2\n"
+ "subs x26, x26, #0x4\n"
+ "sub %x[N], %x[N], x27, LSL #2\n"
"bgt 4b\n"
"36:" // Exit
".inst 0xd503467f // SMSTOP\n"
"ptrue p1.b\n"
: [N] "+&r" (N)
: [A_ptr] "r" (A_ptr), [B_ptr] "r" (B_ptr), [K] "r" (K), [args_ptr] "r" (&ka), [bias] "r" (bias), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [output_ptr] "r" (output_ptr)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_gemv_fp32bf16fp32_dot_16VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_gemv_fp32bf16fp32_dot_16VL/generic.cpp
index 0a394b6413..8b8bcb6bc7 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sme2_gemv_fp32bf16fp32_dot_16VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_gemv_fp32bf16fp32_dot_16VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef __ARM_FEATURE_SVE
#ifdef ARM_COMPUTE_ENABLE_SME2
@@ -64,544 +64,544 @@ void sme2_gemv_fp32bf16fp32_dot_16VL (
__asm__ __volatile__(
"ptrue p2.b\n"
".inst 0xd503477f // SMSTART ZA\n"
- "cntw x10, ALL, MUL #4\n"
- "add x28, %x[N], x10\n"
- "sub x28, x28, #0x1\n"
- "udiv x28, x28, x10\n"
- "add x22, x28, #0x3\n"
- "and x22, x22, #0xfffffffffffffffc\n"
- "mul x22, x22, x10\n"
- "mul x22, x22, %x[K]\n"
+ "cntw x28, ALL, MUL #4\n"
+ "add x27, %x[N], x28\n"
+ "sub x27, x27, #0x1\n"
+ "udiv x27, x27, x28\n"
+ "add x21, x27, #0x3\n"
+ "and x21, x21, #0xfffffffffffffffc\n"
+ "mul x21, x21, x28\n"
+ "mul x21, x21, %x[K]\n"
"mov x9, #0x0\n"
- "mov x27, #0x4\n"
- "mov x26, %x[B_ptr]\n"
- "mov x25, %x[output_ptr]\n"
+ "mov x26, #0x4\n"
+ "mov x25, %x[B_ptr]\n"
+ "mov x24, %x[output_ptr]\n"
"ptrue p2.b\n"
".inst 0x25207811 // ptrue pn9.b\n"
- "lsl x22, x22, #0x1\n"
- "mov x21, #0x1\n"
+ "lsl x21, x21, #0x1\n"
+ "mov x20, #0x1\n"
"1:" // RHS size check loop
- "cmp x22, #0x200000\n"
+ "cmp x21, #0x200000\n"
"blt 2f\n"
- "tbnz x22, #0, 3f\n"
- "lsr x22, x22, #0x1\n"
- "lsl x21, x21, #0x1\n"
+ "tbnz x21, #0, 3f\n"
+ "lsr x21, x21, #0x1\n"
+ "lsl x20, x20, #0x1\n"
"b 1b\n"
"2:" // RHS do prefetch
- "lsl x20, x22, #0x26\n"
- "sub x21, x21, #0x1\n"
- "lsl x21, x21, #0x16\n"
- "orr x22, x22, x20\n"
- "orr x22, x22, x21\n"
- ".inst 0xf8b64b5a // rprfm pldonce, x22, [x26]\n"
+ "lsl x19, x21, #0x26\n"
+ "sub x20, x20, #0x1\n"
+ "lsl x20, x20, #0x16\n"
+ "orr x21, x21, x19\n"
+ "orr x21, x21, x20\n"
+ ".inst 0xf8b54b3a // rprfm pldonce, x21, [x25]\n"
"3:" // RHS prefetch exit
- "mov x24, %x[bias]\n"
+ "mov x23, %x[bias]\n"
"4:" // Column loop
- "cmp x28, #0x4\n"
+ "cmp x27, #0x4\n"
"bge 28f\n"
- "cmp x28, #0x2\n"
+ "cmp x27, #0x2\n"
"bgt 20f\n"
"beq 12f\n"
- "mov x23, %x[A_ptr]\n"
- "lsl x22, %x[K], #0x2\n"
- "mov x20, %x[N]\n"
- "mov x21, %x[K]\n"
- ".inst 0xf8b64af8 // rprfm pldmany, x22, [x23]\n"
- ".inst 0x25b467f0 // whilelt p8.s, XZR, x20, VLx4\n"
- "cbz x24, 5f\n"
- ".inst 0xa040c700 // ld1w { z0.s-z3.s }, pn9.b/Z, [x24]\n"
+ "mov x22, %x[A_ptr]\n"
+ "lsl x21, %x[K], #0x2\n"
+ "mov x19, %x[N]\n"
+ "mov x20, %x[K]\n"
+ ".inst 0xf8b54ad8 // rprfm pldmany, x21, [x22]\n"
+ ".inst 0x25b367f0 // whilelt p8.s, XZR, x19, VLx4\n"
+ "cbz x23, 5f\n"
+ ".inst 0xa040c6e0 // ld1w { z0.s-z3.s }, pn9.b/Z, [x23]\n"
".inst 0xc0042c00 // mova za.d[x9, #0], { z0.d-z3.d }\n"
"b 6f\n"
"5:" // Width 1: no bias
".inst 0xc00800ff // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
"6:" // Width 1: setup done
- "cmp x21, #0x8\n"
+ "cmp x20, #0x8\n"
"ble 8f\n"
"7:" // Width 1: Multiply loop: Main loop head
- "whilelt p1.s, XZR, x21\n"
- "whilelt p0.s, x27, x21\n"
- "ld1rqw { z0.s }, p1/Z, [x23]\n"
+ "whilelt p1.s, XZR, x20\n"
+ "whilelt p0.s, x26, x20\n"
+ "ld1rqw { z0.s }, p1/Z, [x22]\n"
".inst 0x658aa800 // bfcvt z0.h, p2/M, z0.s\n"
- "ld1rqw { z11.s }, p0/Z, [x23, #16]\n"
+ "ld1rqw { z11.s }, p0/Z, [x22, #16]\n"
".inst 0x658aa96b // bfcvt z11.h, p2/M, z11.s\n"
"uzp1 z0.h, z0.h, z0.h\n"
- "sub x21, x21, #0x8\n"
+ "sub x20, x20, #0x8\n"
"uzp1 z11.h, z11.h, z11.h\n"
"trn1 z0.d, z0.d, z11.d\n"
- ".inst 0xa040a745 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x26]\n"
- "addvl x26, x26, #16\n"
+ ".inst 0xa040a725 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x25]\n"
+ "addvl x25, x25, #16\n"
".inst 0xc150b098 // bfdot za.s[x9, 0], { z4.h-z7.h }, z0.h[0]\n"
- ".inst 0xa040a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26]\n"
- "addvl x26, x26, #16\n"
- "cmp x21, #0x8\n"
+ ".inst 0xa040a731 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25]\n"
+ "addvl x25, x25, #16\n"
+ "cmp x20, #0x8\n"
".inst 0xc150b618 // bfdot za.s[x9, 0], { z16.h-z19.h }, z0.h[1]\n"
- ".inst 0xa040a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26]\n"
- "addvl x26, x26, #16\n"
- "add x23, x23, #0x20\n"
+ ".inst 0xa040a731 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25]\n"
+ "addvl x25, x25, #16\n"
+ "add x22, x22, #0x20\n"
".inst 0xc150ba18 // bfdot za.s[x9, 0], { z16.h-z19.h }, z0.h[2]\n"
- ".inst 0xa040a75d // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x26]\n"
- "addvl x26, x26, #16\n"
+ ".inst 0xa040a73d // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x25]\n"
+ "addvl x25, x25, #16\n"
".inst 0xc150bf98 // bfdot za.s[x9, 0], { z28.h-z31.h }, z0.h[3]\n"
"bgt 7b\n"
"8:" // Width 1: Multiply loop: Single iteration only
- "whilelt p1.s, XZR, x21\n"
- "whilelt p0.s, x27, x21\n"
- "ld1rqw { z0.s }, p1/Z, [x23]\n"
+ "whilelt p1.s, XZR, x20\n"
+ "whilelt p0.s, x26, x20\n"
+ "ld1rqw { z0.s }, p1/Z, [x22]\n"
".inst 0x658aa800 // bfcvt z0.h, p2/M, z0.s\n"
- "ld1rqw { z11.s }, p0/Z, [x23, #16]\n"
+ "ld1rqw { z11.s }, p0/Z, [x22, #16]\n"
".inst 0x658aa96b // bfcvt z11.h, p2/M, z11.s\n"
"uzp1 z0.h, z0.h, z0.h\n"
- "subs x21, x21, #0x2\n"
+ "subs x20, x20, #0x2\n"
"uzp1 z11.h, z11.h, z11.h\n"
"trn1 z0.d, z0.d, z11.d\n"
- ".inst 0xa040a745 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x26]\n"
- "add x23, x23, #0x20\n"
+ ".inst 0xa040a725 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x25]\n"
+ "add x22, x22, #0x20\n"
".inst 0xc150b098 // bfdot za.s[x9, 0], { z4.h-z7.h }, z0.h[0]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
"ble 9f\n"
- ".inst 0xa040a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26]\n"
- "subs x21, x21, #0x2\n"
+ ".inst 0xa040a731 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25]\n"
+ "subs x20, x20, #0x2\n"
".inst 0xc150b618 // bfdot za.s[x9, 0], { z16.h-z19.h }, z0.h[1]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
"ble 9f\n"
- ".inst 0xa040a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26]\n"
- "subs x21, x21, #0x2\n"
+ ".inst 0xa040a731 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25]\n"
+ "subs x20, x20, #0x2\n"
".inst 0xc150ba18 // bfdot za.s[x9, 0], { z16.h-z19.h }, z0.h[2]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
"ble 9f\n"
- ".inst 0xa040a75d // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x26]\n"
+ ".inst 0xa040a73d // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x25]\n"
".inst 0xc150bf98 // bfdot za.s[x9, 0], { z28.h-z31.h }, z0.h[3]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
"9:" // Width 1: Multiply loop: multiply skip
"tbz %x[flags], #1, 10f\n"
- "add x21, %x[args_ptr], %[offset_min]\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
+ "add x20, %x[args_ptr], %[offset_min]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
".inst 0xc0062c08 // mova { z8.d-z11.d }, za.d[x9, #0]\n"
- "ld1rw { z29.s }, p2/Z, [x21]\n"
- "ld1rw { z18.s }, p2/Z, [x20]\n"
+ "ld1rw { z29.s }, p2/Z, [x20]\n"
+ "ld1rw { z18.s }, p2/Z, [x19]\n"
".inst 0xc1b2cba8 // fclamp { z8.s-z11.s }, z29.s, z18.s\n"
- ".inst 0xa060c328 // st1w { z8.s-z11.s }, p8, [x25]\n"
- "addvl x25, x25, #4\n"
+ ".inst 0xa060c308 // st1w { z8.s-z11.s }, p8, [x24]\n"
+ "addvl x24, x24, #4\n"
"b 11f\n"
"10:" // Width 1: No activation
".inst 0xc0062c08 // mova { z8.d-z11.d }, za.d[x9, #0]\n"
- ".inst 0xa060c328 // st1w { z8.s-z11.s }, p8, [x25]\n"
- "addvl x25, x25, #4\n"
+ ".inst 0xa060c308 // st1w { z8.s-z11.s }, p8, [x24]\n"
+ "addvl x24, x24, #4\n"
"11:" // Width 1: Output done
"b 36f\n"
"12:" // Width 2
- "mov x23, %x[A_ptr]\n"
- "lsl x22, %x[K], #0x2\n"
- "sub x20, %x[N], x10\n"
- "mov x21, %x[K]\n"
- ".inst 0xf8b64af8 // rprfm pldmany, x22, [x23]\n"
- ".inst 0x25b467f0 // whilelt p8.s, XZR, x20, VLx4\n"
- "cbz x24, 13f\n"
- ".inst 0xa040c700 // ld1w { z0.s-z3.s }, pn9.b/Z, [x24]\n"
+ "mov x22, %x[A_ptr]\n"
+ "lsl x21, %x[K], #0x2\n"
+ "sub x19, %x[N], x28\n"
+ "mov x20, %x[K]\n"
+ ".inst 0xf8b54ad8 // rprfm pldmany, x21, [x22]\n"
+ ".inst 0x25b367f0 // whilelt p8.s, XZR, x19, VLx4\n"
+ "cbz x23, 13f\n"
+ ".inst 0xa040c6e0 // ld1w { z0.s-z3.s }, pn9.b/Z, [x23]\n"
".inst 0xc0042c00 // mova za.d[x9, #0], { z0.d-z3.d }\n"
- ".inst 0xa041c710 // ld1w { z16.s-z19.s }, pn9.b/Z, [x24, #0x4, MUL VL]\n"
+ ".inst 0xa041c6f0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x23, #0x4, MUL VL]\n"
".inst 0xc0042e01 // mova za.d[x9, #1], { z16.d-z19.d }\n"
"b 14f\n"
"13:" // Width 2: no bias
".inst 0xc00800ff // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
"14:" // Width 2: setup done
- "cmp x21, #0x8\n"
+ "cmp x20, #0x8\n"
"ble 16f\n"
"15:" // Width 2: Multiply loop: Main loop head
- "whilelt p1.s, XZR, x21\n"
- "whilelt p0.s, x27, x21\n"
- "ld1rqw { z0.s }, p1/Z, [x23]\n"
+ "whilelt p1.s, XZR, x20\n"
+ "whilelt p0.s, x26, x20\n"
+ "ld1rqw { z0.s }, p1/Z, [x22]\n"
".inst 0x658aa800 // bfcvt z0.h, p2/M, z0.s\n"
- "ld1rqw { z11.s }, p0/Z, [x23, #16]\n"
+ "ld1rqw { z11.s }, p0/Z, [x22, #16]\n"
".inst 0x658aa96b // bfcvt z11.h, p2/M, z11.s\n"
"uzp1 z0.h, z0.h, z0.h\n"
- "sub x21, x21, #0x8\n"
+ "sub x20, x20, #0x8\n"
"uzp1 z11.h, z11.h, z11.h\n"
"trn1 z0.d, z0.d, z11.d\n"
- ".inst 0xa040a745 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x26]\n"
- "cmp x21, #0x8\n"
- ".inst 0xa041a749 // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa040a725 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x25]\n"
+ "cmp x20, #0x8\n"
+ ".inst 0xa041a729 // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc150b098 // bfdot za.s[x9, 0], { z4.h-z7.h }, z0.h[0]\n"
- "addvl x26, x26, #16\n"
- "add x23, x23, #0x20\n"
+ "addvl x25, x25, #16\n"
+ "add x22, x22, #0x20\n"
".inst 0xc150b119 // bfdot za.s[x9, 1], { z8.h-z11.h }, z0.h[0]\n"
- ".inst 0xa040a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26]\n"
- ".inst 0xa041a745 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa040a731 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25]\n"
+ ".inst 0xa041a725 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc150b618 // bfdot za.s[x9, 0], { z16.h-z19.h }, z0.h[1]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
".inst 0xc150b499 // bfdot za.s[x9, 1], { z4.h-z7.h }, z0.h[1]\n"
- ".inst 0xa040a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26]\n"
- ".inst 0xa041a755 // ldnt1h { z20.h-z23.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa040a731 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25]\n"
+ ".inst 0xa041a735 // ldnt1h { z20.h-z23.h }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc150ba18 // bfdot za.s[x9, 0], { z16.h-z19.h }, z0.h[2]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
".inst 0xc150ba99 // bfdot za.s[x9, 1], { z20.h-z23.h }, z0.h[2]\n"
- ".inst 0xa040a75d // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x26]\n"
- ".inst 0xa041a749 // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa040a73d // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x25]\n"
+ ".inst 0xa041a729 // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc150bf98 // bfdot za.s[x9, 0], { z28.h-z31.h }, z0.h[3]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
".inst 0xc150bd19 // bfdot za.s[x9, 1], { z8.h-z11.h }, z0.h[3]\n"
"bgt 15b\n"
"16:" // Width 2: Multiply loop: Single iteration only
- "whilelt p1.s, XZR, x21\n"
- "whilelt p0.s, x27, x21\n"
- "ld1rqw { z0.s }, p1/Z, [x23]\n"
+ "whilelt p1.s, XZR, x20\n"
+ "whilelt p0.s, x26, x20\n"
+ "ld1rqw { z0.s }, p1/Z, [x22]\n"
".inst 0x658aa800 // bfcvt z0.h, p2/M, z0.s\n"
- "ld1rqw { z11.s }, p0/Z, [x23, #16]\n"
+ "ld1rqw { z11.s }, p0/Z, [x22, #16]\n"
".inst 0x658aa96b // bfcvt z11.h, p2/M, z11.s\n"
"uzp1 z0.h, z0.h, z0.h\n"
- "subs x21, x21, #0x2\n"
+ "subs x20, x20, #0x2\n"
"uzp1 z11.h, z11.h, z11.h\n"
"trn1 z0.d, z0.d, z11.d\n"
- ".inst 0xa040a745 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x26]\n"
- "add x23, x23, #0x20\n"
- ".inst 0xa041a749 // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa040a725 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x25]\n"
+ "add x22, x22, #0x20\n"
+ ".inst 0xa041a729 // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc150b098 // bfdot za.s[x9, 0], { z4.h-z7.h }, z0.h[0]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
".inst 0xc150b119 // bfdot za.s[x9, 1], { z8.h-z11.h }, z0.h[0]\n"
"ble 17f\n"
- ".inst 0xa040a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26]\n"
- "subs x21, x21, #0x2\n"
+ ".inst 0xa040a731 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25]\n"
+ "subs x20, x20, #0x2\n"
".inst 0xc150b618 // bfdot za.s[x9, 0], { z16.h-z19.h }, z0.h[1]\n"
- ".inst 0xa041a745 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa041a725 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc150b499 // bfdot za.s[x9, 1], { z4.h-z7.h }, z0.h[1]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
"ble 17f\n"
- ".inst 0xa040a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26]\n"
- "subs x21, x21, #0x2\n"
+ ".inst 0xa040a731 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25]\n"
+ "subs x20, x20, #0x2\n"
".inst 0xc150ba18 // bfdot za.s[x9, 0], { z16.h-z19.h }, z0.h[2]\n"
- ".inst 0xa041a755 // ldnt1h { z20.h-z23.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa041a735 // ldnt1h { z20.h-z23.h }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc150ba99 // bfdot za.s[x9, 1], { z20.h-z23.h }, z0.h[2]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
"ble 17f\n"
- ".inst 0xa040a75d // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x26]\n"
+ ".inst 0xa040a73d // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x25]\n"
".inst 0xc150bf98 // bfdot za.s[x9, 0], { z28.h-z31.h }, z0.h[3]\n"
- ".inst 0xa041a749 // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa041a729 // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc150bd19 // bfdot za.s[x9, 1], { z8.h-z11.h }, z0.h[3]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
"17:" // Width 2: Multiply loop: multiply skip
"tbz %x[flags], #1, 18f\n"
- "add x21, %x[args_ptr], %[offset_min]\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
+ "add x20, %x[args_ptr], %[offset_min]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
".inst 0xc0062c08 // mova { z8.d-z11.d }, za.d[x9, #0]\n"
- "ld1rw { z29.s }, p2/Z, [x21]\n"
+ "ld1rw { z29.s }, p2/Z, [x20]\n"
".inst 0xc0062c2c // mova { z12.d-z15.d }, za.d[x9, #1]\n"
- "ld1rw { z18.s }, p2/Z, [x20]\n"
+ "ld1rw { z18.s }, p2/Z, [x19]\n"
".inst 0xc1b2cba8 // fclamp { z8.s-z11.s }, z29.s, z18.s\n"
- ".inst 0xa060c728 // st1w { z8.s-z11.s }, pn9.b, [x25]\n"
+ ".inst 0xa060c708 // st1w { z8.s-z11.s }, pn9.b, [x24]\n"
".inst 0xc1b2cbac // fclamp { z12.s-z15.s }, z29.s, z18.s\n"
- ".inst 0xa061c32c // st1w { z12.s-z15.s }, p8, [x25, #0x4, MUL VL]\n"
- "addvl x25, x25, #8\n"
+ ".inst 0xa061c30c // st1w { z12.s-z15.s }, p8, [x24, #0x4, MUL VL]\n"
+ "addvl x24, x24, #8\n"
"b 19f\n"
"18:" // Width 2: No activation
".inst 0xc0062c08 // mova { z8.d-z11.d }, za.d[x9, #0]\n"
- ".inst 0xa060c728 // st1w { z8.s-z11.s }, pn9.b, [x25]\n"
+ ".inst 0xa060c708 // st1w { z8.s-z11.s }, pn9.b, [x24]\n"
".inst 0xc0062c2c // mova { z12.d-z15.d }, za.d[x9, #1]\n"
- ".inst 0xa061c32c // st1w { z12.s-z15.s }, p8, [x25, #0x4, MUL VL]\n"
- "addvl x25, x25, #8\n"
+ ".inst 0xa061c30c // st1w { z12.s-z15.s }, p8, [x24, #0x4, MUL VL]\n"
+ "addvl x24, x24, #8\n"
"19:" // Width 2: Output done
"b 36f\n"
"20:" // Width 3
- "mov x20, #0x2\n"
- "mov x23, %x[A_ptr]\n"
- "lsl x22, %x[K], #0x2\n"
- "msub x20, x10, x20, %x[N]\n"
- "mov x21, %x[K]\n"
- ".inst 0xf8b64af8 // rprfm pldmany, x22, [x23]\n"
- ".inst 0x25b467f0 // whilelt p8.s, XZR, x20, VLx4\n"
- "cbz x24, 21f\n"
- ".inst 0xa040c700 // ld1w { z0.s-z3.s }, pn9.b/Z, [x24]\n"
+ "mov x19, #0x2\n"
+ "mov x22, %x[A_ptr]\n"
+ "lsl x21, %x[K], #0x2\n"
+ "msub x19, x28, x19, %x[N]\n"
+ "mov x20, %x[K]\n"
+ ".inst 0xf8b54ad8 // rprfm pldmany, x21, [x22]\n"
+ ".inst 0x25b367f0 // whilelt p8.s, XZR, x19, VLx4\n"
+ "cbz x23, 21f\n"
+ ".inst 0xa040c6e0 // ld1w { z0.s-z3.s }, pn9.b/Z, [x23]\n"
".inst 0xc0042c00 // mova za.d[x9, #0], { z0.d-z3.d }\n"
- ".inst 0xa041c710 // ld1w { z16.s-z19.s }, pn9.b/Z, [x24, #0x4, MUL VL]\n"
+ ".inst 0xa041c6f0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x23, #0x4, MUL VL]\n"
".inst 0xc0042e01 // mova za.d[x9, #1], { z16.d-z19.d }\n"
- ".inst 0xa042c71c // ld1w { z28.s-z31.s }, pn9.b/Z, [x24, #0x8, MUL VL]\n"
+ ".inst 0xa042c6fc // ld1w { z28.s-z31.s }, pn9.b/Z, [x23, #0x8, MUL VL]\n"
".inst 0xc0042f82 // mova za.d[x9, #2], { z28.d-z31.d }\n"
"b 22f\n"
"21:" // Width 3: no bias
".inst 0xc00800ff // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
"22:" // Width 3: setup done
- "cmp x21, #0x8\n"
+ "cmp x20, #0x8\n"
"ble 24f\n"
"23:" // Width 3: Multiply loop: Main loop head
- "whilelt p1.s, XZR, x21\n"
- "whilelt p0.s, x27, x21\n"
- "ld1rqw { z0.s }, p1/Z, [x23]\n"
+ "whilelt p1.s, XZR, x20\n"
+ "whilelt p0.s, x26, x20\n"
+ "ld1rqw { z0.s }, p1/Z, [x22]\n"
".inst 0x658aa800 // bfcvt z0.h, p2/M, z0.s\n"
- "ld1rqw { z11.s }, p0/Z, [x23, #16]\n"
+ "ld1rqw { z11.s }, p0/Z, [x22, #16]\n"
".inst 0x658aa96b // bfcvt z11.h, p2/M, z11.s\n"
"uzp1 z0.h, z0.h, z0.h\n"
- "sub x21, x21, #0x8\n"
+ "sub x20, x20, #0x8\n"
"uzp1 z11.h, z11.h, z11.h\n"
"trn1 z0.d, z0.d, z11.d\n"
- ".inst 0xa040a745 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x26]\n"
- "cmp x21, #0x8\n"
- ".inst 0xa041a749 // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa040a725 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x25]\n"
+ "cmp x20, #0x8\n"
+ ".inst 0xa041a729 // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc150b098 // bfdot za.s[x9, 0], { z4.h-z7.h }, z0.h[0]\n"
- "add x23, x23, #0x20\n"
- ".inst 0xa042a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
+ "add x22, x22, #0x20\n"
+ ".inst 0xa042a731 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25, #0x8, MUL VL]\n"
".inst 0xc150b119 // bfdot za.s[x9, 1], { z8.h-z11.h }, z0.h[0]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
".inst 0xc150b21a // bfdot za.s[x9, 2], { z16.h-z19.h }, z0.h[0]\n"
- ".inst 0xa040a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26]\n"
- ".inst 0xa041a745 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa040a731 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25]\n"
+ ".inst 0xa041a725 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc150b618 // bfdot za.s[x9, 0], { z16.h-z19.h }, z0.h[1]\n"
- ".inst 0xa042a74d // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
+ ".inst 0xa042a72d // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x25, #0x8, MUL VL]\n"
".inst 0xc150b499 // bfdot za.s[x9, 1], { z4.h-z7.h }, z0.h[1]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
".inst 0xc150b59a // bfdot za.s[x9, 2], { z12.h-z15.h }, z0.h[1]\n"
- ".inst 0xa040a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26]\n"
- ".inst 0xa041a755 // ldnt1h { z20.h-z23.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa040a731 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25]\n"
+ ".inst 0xa041a735 // ldnt1h { z20.h-z23.h }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc150ba18 // bfdot za.s[x9, 0], { z16.h-z19.h }, z0.h[2]\n"
- ".inst 0xa042a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
+ ".inst 0xa042a731 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25, #0x8, MUL VL]\n"
".inst 0xc150ba99 // bfdot za.s[x9, 1], { z20.h-z23.h }, z0.h[2]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
".inst 0xc150ba1a // bfdot za.s[x9, 2], { z16.h-z19.h }, z0.h[2]\n"
- ".inst 0xa040a75d // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x26]\n"
- ".inst 0xa041a749 // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa040a73d // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x25]\n"
+ ".inst 0xa041a729 // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc150bf98 // bfdot za.s[x9, 0], { z28.h-z31.h }, z0.h[3]\n"
- ".inst 0xa042a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
+ ".inst 0xa042a731 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25, #0x8, MUL VL]\n"
".inst 0xc150bd19 // bfdot za.s[x9, 1], { z8.h-z11.h }, z0.h[3]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
".inst 0xc150be1a // bfdot za.s[x9, 2], { z16.h-z19.h }, z0.h[3]\n"
"bgt 23b\n"
"24:" // Width 3: Multiply loop: Single iteration only
- "whilelt p1.s, XZR, x21\n"
- "whilelt p0.s, x27, x21\n"
- "ld1rqw { z0.s }, p1/Z, [x23]\n"
+ "whilelt p1.s, XZR, x20\n"
+ "whilelt p0.s, x26, x20\n"
+ "ld1rqw { z0.s }, p1/Z, [x22]\n"
".inst 0x658aa800 // bfcvt z0.h, p2/M, z0.s\n"
- "ld1rqw { z11.s }, p0/Z, [x23, #16]\n"
+ "ld1rqw { z11.s }, p0/Z, [x22, #16]\n"
".inst 0x658aa96b // bfcvt z11.h, p2/M, z11.s\n"
"uzp1 z0.h, z0.h, z0.h\n"
- "subs x21, x21, #0x2\n"
+ "subs x20, x20, #0x2\n"
"uzp1 z11.h, z11.h, z11.h\n"
"trn1 z0.d, z0.d, z11.d\n"
- ".inst 0xa040a745 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x26]\n"
- "add x23, x23, #0x20\n"
- ".inst 0xa041a749 // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa040a725 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x25]\n"
+ "add x22, x22, #0x20\n"
+ ".inst 0xa041a729 // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc150b098 // bfdot za.s[x9, 0], { z4.h-z7.h }, z0.h[0]\n"
- ".inst 0xa042a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
+ ".inst 0xa042a731 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25, #0x8, MUL VL]\n"
".inst 0xc150b119 // bfdot za.s[x9, 1], { z8.h-z11.h }, z0.h[0]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
".inst 0xc150b21a // bfdot za.s[x9, 2], { z16.h-z19.h }, z0.h[0]\n"
"ble 25f\n"
- ".inst 0xa040a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26]\n"
- "subs x21, x21, #0x2\n"
+ ".inst 0xa040a731 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25]\n"
+ "subs x20, x20, #0x2\n"
".inst 0xc150b618 // bfdot za.s[x9, 0], { z16.h-z19.h }, z0.h[1]\n"
- ".inst 0xa041a745 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa041a725 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc150b499 // bfdot za.s[x9, 1], { z4.h-z7.h }, z0.h[1]\n"
- ".inst 0xa042a74d // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
+ ".inst 0xa042a72d // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x25, #0x8, MUL VL]\n"
".inst 0xc150b59a // bfdot za.s[x9, 2], { z12.h-z15.h }, z0.h[1]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
"ble 25f\n"
- ".inst 0xa040a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26]\n"
- "subs x21, x21, #0x2\n"
+ ".inst 0xa040a731 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25]\n"
+ "subs x20, x20, #0x2\n"
".inst 0xc150ba18 // bfdot za.s[x9, 0], { z16.h-z19.h }, z0.h[2]\n"
- ".inst 0xa041a755 // ldnt1h { z20.h-z23.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa041a735 // ldnt1h { z20.h-z23.h }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc150ba99 // bfdot za.s[x9, 1], { z20.h-z23.h }, z0.h[2]\n"
- ".inst 0xa042a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
+ ".inst 0xa042a731 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25, #0x8, MUL VL]\n"
".inst 0xc150ba1a // bfdot za.s[x9, 2], { z16.h-z19.h }, z0.h[2]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
"ble 25f\n"
- ".inst 0xa040a75d // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x26]\n"
+ ".inst 0xa040a73d // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x25]\n"
".inst 0xc150bf98 // bfdot za.s[x9, 0], { z28.h-z31.h }, z0.h[3]\n"
- ".inst 0xa041a749 // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa041a729 // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc150bd19 // bfdot za.s[x9, 1], { z8.h-z11.h }, z0.h[3]\n"
- ".inst 0xa042a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
+ ".inst 0xa042a731 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25, #0x8, MUL VL]\n"
".inst 0xc150be1a // bfdot za.s[x9, 2], { z16.h-z19.h }, z0.h[3]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
"25:" // Width 3: Multiply loop: multiply skip
"tbz %x[flags], #1, 26f\n"
- "add x21, %x[args_ptr], %[offset_min]\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
+ "add x20, %x[args_ptr], %[offset_min]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
".inst 0xc0062c08 // mova { z8.d-z11.d }, za.d[x9, #0]\n"
- "ld1rw { z29.s }, p2/Z, [x21]\n"
+ "ld1rw { z29.s }, p2/Z, [x20]\n"
".inst 0xc0062c2c // mova { z12.d-z15.d }, za.d[x9, #1]\n"
- "ld1rw { z18.s }, p2/Z, [x20]\n"
+ "ld1rw { z18.s }, p2/Z, [x19]\n"
".inst 0xc1b2cba8 // fclamp { z8.s-z11.s }, z29.s, z18.s\n"
".inst 0xc0062c44 // mova { z4.d-z7.d }, za.d[x9, #2]\n"
- ".inst 0xa060c728 // st1w { z8.s-z11.s }, pn9.b, [x25]\n"
+ ".inst 0xa060c708 // st1w { z8.s-z11.s }, pn9.b, [x24]\n"
".inst 0xc1b2cbac // fclamp { z12.s-z15.s }, z29.s, z18.s\n"
- ".inst 0xa061c72c // st1w { z12.s-z15.s }, pn9.b, [x25, #0x4, MUL VL]\n"
+ ".inst 0xa061c70c // st1w { z12.s-z15.s }, pn9.b, [x24, #0x4, MUL VL]\n"
".inst 0xc1b2cba4 // fclamp { z4.s-z7.s }, z29.s, z18.s\n"
- ".inst 0xa062c324 // st1w { z4.s-z7.s }, p8, [x25, #0x8, MUL VL]\n"
- "addvl x25, x25, #12\n"
+ ".inst 0xa062c304 // st1w { z4.s-z7.s }, p8, [x24, #0x8, MUL VL]\n"
+ "addvl x24, x24, #12\n"
"b 27f\n"
"26:" // Width 3: No activation
".inst 0xc0062c08 // mova { z8.d-z11.d }, za.d[x9, #0]\n"
- ".inst 0xa060c728 // st1w { z8.s-z11.s }, pn9.b, [x25]\n"
+ ".inst 0xa060c708 // st1w { z8.s-z11.s }, pn9.b, [x24]\n"
".inst 0xc0062c2c // mova { z12.d-z15.d }, za.d[x9, #1]\n"
- ".inst 0xa061c72c // st1w { z12.s-z15.s }, pn9.b, [x25, #0x4, MUL VL]\n"
+ ".inst 0xa061c70c // st1w { z12.s-z15.s }, pn9.b, [x24, #0x4, MUL VL]\n"
".inst 0xc0062c44 // mova { z4.d-z7.d }, za.d[x9, #2]\n"
- ".inst 0xa062c324 // st1w { z4.s-z7.s }, p8, [x25, #0x8, MUL VL]\n"
- "addvl x25, x25, #12\n"
+ ".inst 0xa062c304 // st1w { z4.s-z7.s }, p8, [x24, #0x8, MUL VL]\n"
+ "addvl x24, x24, #12\n"
"27:" // Width 3: Output done
"b 36f\n"
"28:" // Width 4
- "mov x20, #0x3\n"
- "mov x23, %x[A_ptr]\n"
- "lsl x22, %x[K], #0x2\n"
- "msub x20, x10, x20, %x[N]\n"
- "mov x21, %x[K]\n"
- ".inst 0xf8b64af8 // rprfm pldmany, x22, [x23]\n"
- ".inst 0x25b467f0 // whilelt p8.s, XZR, x20, VLx4\n"
- "cbz x24, 29f\n"
- ".inst 0xa040c700 // ld1w { z0.s-z3.s }, pn9.b/Z, [x24]\n"
+ "mov x19, #0x3\n"
+ "mov x22, %x[A_ptr]\n"
+ "lsl x21, %x[K], #0x2\n"
+ "msub x19, x28, x19, %x[N]\n"
+ "mov x20, %x[K]\n"
+ ".inst 0xf8b54ad8 // rprfm pldmany, x21, [x22]\n"
+ ".inst 0x25b367f0 // whilelt p8.s, XZR, x19, VLx4\n"
+ "cbz x23, 29f\n"
+ ".inst 0xa040c6e0 // ld1w { z0.s-z3.s }, pn9.b/Z, [x23]\n"
".inst 0xc0042c00 // mova za.d[x9, #0], { z0.d-z3.d }\n"
- ".inst 0xa041c710 // ld1w { z16.s-z19.s }, pn9.b/Z, [x24, #0x4, MUL VL]\n"
+ ".inst 0xa041c6f0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x23, #0x4, MUL VL]\n"
".inst 0xc0042e01 // mova za.d[x9, #1], { z16.d-z19.d }\n"
- ".inst 0xa042c71c // ld1w { z28.s-z31.s }, pn9.b/Z, [x24, #0x8, MUL VL]\n"
+ ".inst 0xa042c6fc // ld1w { z28.s-z31.s }, pn9.b/Z, [x23, #0x8, MUL VL]\n"
".inst 0xc0042f82 // mova za.d[x9, #2], { z28.d-z31.d }\n"
- ".inst 0xa043c710 // ld1w { z16.s-z19.s }, pn9.b/Z, [x24, #0xc, MUL VL]\n"
+ ".inst 0xa043c6f0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x23, #0xc, MUL VL]\n"
".inst 0xc0042e03 // mova za.d[x9, #3], { z16.d-z19.d }\n"
- "addvl x24, x24, #16\n"
+ "addvl x23, x23, #16\n"
"b 30f\n"
"29:" // Width 4: no bias
".inst 0xc00800ff // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
"30:" // Width 4: setup done
- "cmp x21, #0x8\n"
+ "cmp x20, #0x8\n"
"ble 32f\n"
"31:" // Width 4: Multiply loop: Main loop head
- "whilelt p1.s, XZR, x21\n"
- "whilelt p0.s, x27, x21\n"
- "ld1rqw { z0.s }, p1/Z, [x23]\n"
+ "whilelt p1.s, XZR, x20\n"
+ "whilelt p0.s, x26, x20\n"
+ "ld1rqw { z0.s }, p1/Z, [x22]\n"
".inst 0x658aa800 // bfcvt z0.h, p2/M, z0.s\n"
- "ld1rqw { z11.s }, p0/Z, [x23, #16]\n"
+ "ld1rqw { z11.s }, p0/Z, [x22, #16]\n"
".inst 0x658aa96b // bfcvt z11.h, p2/M, z11.s\n"
"uzp1 z0.h, z0.h, z0.h\n"
- "sub x21, x21, #0x8\n"
+ "sub x20, x20, #0x8\n"
"uzp1 z11.h, z11.h, z11.h\n"
"trn1 z0.d, z0.d, z11.d\n"
- ".inst 0xa040a745 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x26]\n"
- "cmp x21, #0x8\n"
- ".inst 0xa041a749 // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa040a725 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x25]\n"
+ "cmp x20, #0x8\n"
+ ".inst 0xa041a729 // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc150b098 // bfdot za.s[x9, 0], { z4.h-z7.h }, z0.h[0]\n"
- "add x23, x23, #0x20\n"
- ".inst 0xa042a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
+ "add x22, x22, #0x20\n"
+ ".inst 0xa042a731 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25, #0x8, MUL VL]\n"
".inst 0xc150b119 // bfdot za.s[x9, 1], { z8.h-z11.h }, z0.h[0]\n"
- ".inst 0xa043a75d // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x26, #0xc, MUL VL]\n"
+ ".inst 0xa043a73d // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x25, #0xc, MUL VL]\n"
".inst 0xc150b21a // bfdot za.s[x9, 2], { z16.h-z19.h }, z0.h[0]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
".inst 0xc150b39b // bfdot za.s[x9, 3], { z28.h-z31.h }, z0.h[0]\n"
- ".inst 0xa040a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26]\n"
- ".inst 0xa041a745 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa040a731 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25]\n"
+ ".inst 0xa041a725 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc150b618 // bfdot za.s[x9, 0], { z16.h-z19.h }, z0.h[1]\n"
- ".inst 0xa042a74d // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
+ ".inst 0xa042a72d // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x25, #0x8, MUL VL]\n"
".inst 0xc150b499 // bfdot za.s[x9, 1], { z4.h-z7.h }, z0.h[1]\n"
- ".inst 0xa043a75d // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x26, #0xc, MUL VL]\n"
+ ".inst 0xa043a73d // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x25, #0xc, MUL VL]\n"
".inst 0xc150b59a // bfdot za.s[x9, 2], { z12.h-z15.h }, z0.h[1]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
".inst 0xc150b79b // bfdot za.s[x9, 3], { z28.h-z31.h }, z0.h[1]\n"
- ".inst 0xa040a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26]\n"
- ".inst 0xa041a755 // ldnt1h { z20.h-z23.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa040a731 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25]\n"
+ ".inst 0xa041a735 // ldnt1h { z20.h-z23.h }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc150ba18 // bfdot za.s[x9, 0], { z16.h-z19.h }, z0.h[2]\n"
- ".inst 0xa042a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
+ ".inst 0xa042a731 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25, #0x8, MUL VL]\n"
".inst 0xc150ba99 // bfdot za.s[x9, 1], { z20.h-z23.h }, z0.h[2]\n"
- ".inst 0xa043a74d // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x26, #0xc, MUL VL]\n"
+ ".inst 0xa043a72d // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x25, #0xc, MUL VL]\n"
".inst 0xc150ba1a // bfdot za.s[x9, 2], { z16.h-z19.h }, z0.h[2]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
".inst 0xc150b99b // bfdot za.s[x9, 3], { z12.h-z15.h }, z0.h[2]\n"
- ".inst 0xa040a75d // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x26]\n"
- ".inst 0xa041a749 // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa040a73d // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x25]\n"
+ ".inst 0xa041a729 // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc150bf98 // bfdot za.s[x9, 0], { z28.h-z31.h }, z0.h[3]\n"
- ".inst 0xa042a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
+ ".inst 0xa042a731 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25, #0x8, MUL VL]\n"
".inst 0xc150bd19 // bfdot za.s[x9, 1], { z8.h-z11.h }, z0.h[3]\n"
- ".inst 0xa043a75d // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x26, #0xc, MUL VL]\n"
+ ".inst 0xa043a73d // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x25, #0xc, MUL VL]\n"
".inst 0xc150be1a // bfdot za.s[x9, 2], { z16.h-z19.h }, z0.h[3]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
".inst 0xc150bf9b // bfdot za.s[x9, 3], { z28.h-z31.h }, z0.h[3]\n"
"bgt 31b\n"
"32:" // Width 4: Multiply loop: Single iteration only
- "whilelt p1.s, XZR, x21\n"
- "whilelt p0.s, x27, x21\n"
- "ld1rqw { z0.s }, p1/Z, [x23]\n"
+ "whilelt p1.s, XZR, x20\n"
+ "whilelt p0.s, x26, x20\n"
+ "ld1rqw { z0.s }, p1/Z, [x22]\n"
".inst 0x658aa800 // bfcvt z0.h, p2/M, z0.s\n"
- "ld1rqw { z11.s }, p0/Z, [x23, #16]\n"
+ "ld1rqw { z11.s }, p0/Z, [x22, #16]\n"
".inst 0x658aa96b // bfcvt z11.h, p2/M, z11.s\n"
"uzp1 z0.h, z0.h, z0.h\n"
- "subs x21, x21, #0x2\n"
+ "subs x20, x20, #0x2\n"
"uzp1 z11.h, z11.h, z11.h\n"
"trn1 z0.d, z0.d, z11.d\n"
- ".inst 0xa040a745 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x26]\n"
- "add x23, x23, #0x20\n"
- ".inst 0xa041a749 // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa040a725 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x25]\n"
+ "add x22, x22, #0x20\n"
+ ".inst 0xa041a729 // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc150b098 // bfdot za.s[x9, 0], { z4.h-z7.h }, z0.h[0]\n"
- ".inst 0xa042a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
+ ".inst 0xa042a731 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25, #0x8, MUL VL]\n"
".inst 0xc150b119 // bfdot za.s[x9, 1], { z8.h-z11.h }, z0.h[0]\n"
- ".inst 0xa043a75d // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x26, #0xc, MUL VL]\n"
+ ".inst 0xa043a73d // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x25, #0xc, MUL VL]\n"
".inst 0xc150b21a // bfdot za.s[x9, 2], { z16.h-z19.h }, z0.h[0]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
".inst 0xc150b39b // bfdot za.s[x9, 3], { z28.h-z31.h }, z0.h[0]\n"
"ble 33f\n"
- ".inst 0xa040a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26]\n"
- "subs x21, x21, #0x2\n"
+ ".inst 0xa040a731 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25]\n"
+ "subs x20, x20, #0x2\n"
".inst 0xc150b618 // bfdot za.s[x9, 0], { z16.h-z19.h }, z0.h[1]\n"
- ".inst 0xa041a745 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa041a725 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc150b499 // bfdot za.s[x9, 1], { z4.h-z7.h }, z0.h[1]\n"
- ".inst 0xa042a74d // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
+ ".inst 0xa042a72d // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x25, #0x8, MUL VL]\n"
".inst 0xc150b59a // bfdot za.s[x9, 2], { z12.h-z15.h }, z0.h[1]\n"
- ".inst 0xa043a75d // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x26, #0xc, MUL VL]\n"
+ ".inst 0xa043a73d // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x25, #0xc, MUL VL]\n"
".inst 0xc150b79b // bfdot za.s[x9, 3], { z28.h-z31.h }, z0.h[1]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
"ble 33f\n"
- ".inst 0xa040a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26]\n"
- "subs x21, x21, #0x2\n"
+ ".inst 0xa040a731 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25]\n"
+ "subs x20, x20, #0x2\n"
".inst 0xc150ba18 // bfdot za.s[x9, 0], { z16.h-z19.h }, z0.h[2]\n"
- ".inst 0xa041a755 // ldnt1h { z20.h-z23.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa041a735 // ldnt1h { z20.h-z23.h }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc150ba99 // bfdot za.s[x9, 1], { z20.h-z23.h }, z0.h[2]\n"
- ".inst 0xa042a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
+ ".inst 0xa042a731 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25, #0x8, MUL VL]\n"
".inst 0xc150ba1a // bfdot za.s[x9, 2], { z16.h-z19.h }, z0.h[2]\n"
- ".inst 0xa043a74d // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x26, #0xc, MUL VL]\n"
+ ".inst 0xa043a72d // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x25, #0xc, MUL VL]\n"
".inst 0xc150b99b // bfdot za.s[x9, 3], { z12.h-z15.h }, z0.h[2]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
"ble 33f\n"
- ".inst 0xa040a75d // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x26]\n"
+ ".inst 0xa040a73d // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x25]\n"
".inst 0xc150bf98 // bfdot za.s[x9, 0], { z28.h-z31.h }, z0.h[3]\n"
- ".inst 0xa041a749 // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa041a729 // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc150bd19 // bfdot za.s[x9, 1], { z8.h-z11.h }, z0.h[3]\n"
- ".inst 0xa042a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
+ ".inst 0xa042a731 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x25, #0x8, MUL VL]\n"
".inst 0xc150be1a // bfdot za.s[x9, 2], { z16.h-z19.h }, z0.h[3]\n"
- ".inst 0xa043a75d // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x26, #0xc, MUL VL]\n"
+ ".inst 0xa043a73d // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x25, #0xc, MUL VL]\n"
".inst 0xc150bf9b // bfdot za.s[x9, 3], { z28.h-z31.h }, z0.h[3]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
"33:" // Width 4: Multiply loop: multiply skip
"tbz %x[flags], #1, 34f\n"
- "add x21, %x[args_ptr], %[offset_min]\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
+ "add x20, %x[args_ptr], %[offset_min]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
".inst 0xc0062c08 // mova { z8.d-z11.d }, za.d[x9, #0]\n"
- "ld1rw { z29.s }, p2/Z, [x21]\n"
+ "ld1rw { z29.s }, p2/Z, [x20]\n"
".inst 0xc0062c2c // mova { z12.d-z15.d }, za.d[x9, #1]\n"
- "ld1rw { z18.s }, p2/Z, [x20]\n"
+ "ld1rw { z18.s }, p2/Z, [x19]\n"
".inst 0xc1b2cba8 // fclamp { z8.s-z11.s }, z29.s, z18.s\n"
".inst 0xc0062c44 // mova { z4.d-z7.d }, za.d[x9, #2]\n"
- ".inst 0xa060c728 // st1w { z8.s-z11.s }, pn9.b, [x25]\n"
+ ".inst 0xa060c708 // st1w { z8.s-z11.s }, pn9.b, [x24]\n"
".inst 0xc1b2cbac // fclamp { z12.s-z15.s }, z29.s, z18.s\n"
".inst 0xc0062c60 // mova { z0.d-z3.d }, za.d[x9, #3]\n"
- ".inst 0xa061c72c // st1w { z12.s-z15.s }, pn9.b, [x25, #0x4, MUL VL]\n"
+ ".inst 0xa061c70c // st1w { z12.s-z15.s }, pn9.b, [x24, #0x4, MUL VL]\n"
".inst 0xc1b2cba4 // fclamp { z4.s-z7.s }, z29.s, z18.s\n"
- ".inst 0xa062c724 // st1w { z4.s-z7.s }, pn9.b, [x25, #0x8, MUL VL]\n"
+ ".inst 0xa062c704 // st1w { z4.s-z7.s }, pn9.b, [x24, #0x8, MUL VL]\n"
".inst 0xc1b2cba0 // fclamp { z0.s-z3.s }, z29.s, z18.s\n"
- ".inst 0xa063c320 // st1w { z0.s-z3.s }, p8, [x25, #0xc, MUL VL]\n"
- "addvl x25, x25, #16\n"
+ ".inst 0xa063c300 // st1w { z0.s-z3.s }, p8, [x24, #0xc, MUL VL]\n"
+ "addvl x24, x24, #16\n"
"b 35f\n"
"34:" // Width 4: No activation
".inst 0xc0062c08 // mova { z8.d-z11.d }, za.d[x9, #0]\n"
- ".inst 0xa060c728 // st1w { z8.s-z11.s }, pn9.b, [x25]\n"
+ ".inst 0xa060c708 // st1w { z8.s-z11.s }, pn9.b, [x24]\n"
".inst 0xc0062c2c // mova { z12.d-z15.d }, za.d[x9, #1]\n"
- ".inst 0xa061c72c // st1w { z12.s-z15.s }, pn9.b, [x25, #0x4, MUL VL]\n"
+ ".inst 0xa061c70c // st1w { z12.s-z15.s }, pn9.b, [x24, #0x4, MUL VL]\n"
".inst 0xc0062c44 // mova { z4.d-z7.d }, za.d[x9, #2]\n"
- ".inst 0xa062c724 // st1w { z4.s-z7.s }, pn9.b, [x25, #0x8, MUL VL]\n"
+ ".inst 0xa062c704 // st1w { z4.s-z7.s }, pn9.b, [x24, #0x8, MUL VL]\n"
".inst 0xc0062c60 // mova { z0.d-z3.d }, za.d[x9, #3]\n"
- ".inst 0xa063c320 // st1w { z0.s-z3.s }, p8, [x25, #0xc, MUL VL]\n"
- "addvl x25, x25, #16\n"
+ ".inst 0xa063c300 // st1w { z0.s-z3.s }, p8, [x24, #0xc, MUL VL]\n"
+ "addvl x24, x24, #16\n"
"35:" // Width 4: Output done
- "subs x28, x28, #0x4\n"
- "sub %x[N], %x[N], x10, LSL #2\n"
+ "subs x27, x27, #0x4\n"
+ "sub %x[N], %x[N], x28, LSL #2\n"
"bgt 4b\n"
"36:" // Exit
".inst 0xd503467f // SMSTOP\n"
"ptrue p2.b\n"
: [N] "+&r" (N)
: [A_ptr] "r" (A_ptr), [B_ptr] "r" (B_ptr), [K] "r" (K), [args_ptr] "r" (&ka), [bias] "r" (bias), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [output_ptr] "r" (output_ptr)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_gemv_s8qa_dot_16VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_gemv_s8qa_dot_16VL/generic.cpp
index 26dc0b9dd2..348c709119 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sme2_gemv_s8qa_dot_16VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_gemv_s8qa_dot_16VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef __ARM_FEATURE_SVE
#ifdef ARM_COMPUTE_ENABLE_SME2
@@ -54,257 +54,257 @@ void sme2_gemv_s8qa_dot_16VL (
__asm__ __volatile__(
"ptrue p2.b\n"
".inst 0xd503477f // SMSTART ZA\n"
- "cntw x28, ALL, MUL #4\n"
- "add x27, %x[N], x28\n"
- "sub x27, x27, #0x1\n"
- "udiv x27, x27, x28\n"
- "add x22, x27, #0x3\n"
- "and x22, x22, #0xfffffffffffffffc\n"
- "mul x22, x22, x28\n"
+ "cntw x27, ALL, MUL #4\n"
+ "add x26, %x[N], x27\n"
+ "sub x26, x26, #0x1\n"
+ "udiv x26, x26, x27\n"
+ "add x21, x26, #0x3\n"
+ "and x21, x21, #0xfffffffffffffffc\n"
+ "mul x21, x21, x27\n"
"mov x9, #0x0\n"
- "mov x26, %x[B_ptr]\n"
- "mov x25, %x[output_ptr]\n"
+ "mov x25, %x[B_ptr]\n"
+ "mov x24, %x[output_ptr]\n"
"ptrue p2.b\n"
".inst 0x25207810 // ptrue pn8.b\n"
- "mul x22, x22, %x[K]\n"
- "mov x21, #0x1\n"
+ "mul x21, x21, %x[K]\n"
+ "mov x20, #0x1\n"
"1:" // RHS size check loop
- "cmp x22, #0x200000\n"
+ "cmp x21, #0x200000\n"
"blt 2f\n"
- "tbnz x22, #0, 3f\n"
- "lsr x22, x22, #0x1\n"
- "lsl x21, x21, #0x1\n"
+ "tbnz x21, #0, 3f\n"
+ "lsr x21, x21, #0x1\n"
+ "lsl x20, x20, #0x1\n"
"b 1b\n"
"2:" // RHS do prefetch
- "lsl x20, x22, #0x26\n"
- "sub x21, x21, #0x1\n"
- "lsl x21, x21, #0x16\n"
- "orr x22, x22, x20\n"
- "orr x22, x22, x21\n"
- ".inst 0xf8b64b5a // rprfm pldonce, x22, [x26]\n"
+ "lsl x19, x21, #0x26\n"
+ "sub x20, x20, #0x1\n"
+ "lsl x20, x20, #0x16\n"
+ "orr x21, x21, x19\n"
+ "orr x21, x21, x20\n"
+ ".inst 0xf8b54b3a // rprfm pldonce, x21, [x25]\n"
"3:" // RHS prefetch exit
- "mov x24, %x[col_bias]\n"
+ "mov x23, %x[col_bias]\n"
"mov z26.s, #0x0\n"
"mov z24.b, #0x1\n"
"bic %x[flags], %x[flags], #0x80000000\n"
"4:" // Column loop
- "cmp x27, #0x4\n"
+ "cmp x26, #0x4\n"
"bge 34f\n"
- "cmp x27, #0x2\n"
+ "cmp x26, #0x2\n"
"bgt 24f\n"
"beq 14f\n"
- "mov x23, %x[A_ptr]\n"
- "mov x22, %x[K]\n"
- "mov x20, %x[N]\n"
+ "mov x22, %x[A_ptr]\n"
"mov x21, %x[K]\n"
- ".inst 0xf8b64af8 // rprfm pldmany, x22, [x23]\n"
- "whilelt p1.b, XZR, x20\n"
- "cbz x24, 5f\n"
- ".inst 0xa040c304 // ld1w { z4.s-z7.s }, pn8.b/Z, [x24]\n"
+ "mov x19, %x[N]\n"
+ "mov x20, %x[K]\n"
+ ".inst 0xf8b54ad8 // rprfm pldmany, x21, [x22]\n"
+ "whilelt p1.b, XZR, x19\n"
+ "cbz x23, 5f\n"
+ ".inst 0xa040c2e4 // ld1w { z4.s-z7.s }, pn8.b/Z, [x23]\n"
".inst 0xc0042c80 // mova za.d[x9, #0], { z4.d-z7.d }\n"
"b 6f\n"
"5:" // Width 1: no bias
".inst 0xc00800ff // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
"6:" // Width 1: setup done
- "cmp x21, #0x10\n"
+ "cmp x20, #0x10\n"
"ble 9f\n"
"7:" // Width 1: Multiply loop: Main loop head
- "whilelt p0.b, XZR, x21\n"
- "ld1rqb { z3.b }, p0/Z, [x23]\n"
- "add x23, x23, #0x10\n"
- ".inst 0xa0408351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26]\n"
+ "whilelt p0.b, XZR, x20\n"
+ "ld1rqb { z3.b }, p0/Z, [x22]\n"
+ "add x22, x22, #0x10\n"
+ ".inst 0xa0408331 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x25]\n"
".inst 0xc153b220 // sdot za.s[x9, 0], { z16.b-z19.b }, z3.b[0]\n"
- "addvl x26, x26, #16\n"
- ".inst 0xa0408355 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x26]\n"
+ "addvl x25, x25, #16\n"
+ ".inst 0xa0408335 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25]\n"
".inst 0xc153b6a0 // sdot za.s[x9, 0], { z20.b-z23.b }, z3.b[1]\n"
- "addvl x26, x26, #16\n"
- ".inst 0xa0408355 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x26]\n"
+ "addvl x25, x25, #16\n"
+ ".inst 0xa0408335 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25]\n"
".inst 0xc153baa0 // sdot za.s[x9, 0], { z20.b-z23.b }, z3.b[2]\n"
- "addvl x26, x26, #16\n"
- ".inst 0xa0408355 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x26]\n"
+ "addvl x25, x25, #16\n"
+ ".inst 0xa0408335 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25]\n"
".inst 0xc153bea0 // sdot za.s[x9, 0], { z20.b-z23.b }, z3.b[3]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
"tbnz %x[flags], #31, 8f\n"
"sdot z26.s, z3.b, z24.b\n"
"8:" // Width 1: Multiply loop: unique 1: skip row sum
- "sub x21, x21, #0x10\n"
- "cmp x21, #0x10\n"
+ "sub x20, x20, #0x10\n"
+ "cmp x20, #0x10\n"
"bgt 7b\n"
"9:" // Width 1: Multiply loop: Single iteration only
- "whilelt p0.b, XZR, x21\n"
- "ld1rqb { z3.b }, p0/Z, [x23]\n"
- "subs x21, x21, #0x4\n"
- ".inst 0xa0408351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26]\n"
- "add x23, x23, #0x10\n"
+ "whilelt p0.b, XZR, x20\n"
+ "ld1rqb { z3.b }, p0/Z, [x22]\n"
+ "subs x20, x20, #0x4\n"
+ ".inst 0xa0408331 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x25]\n"
+ "add x22, x22, #0x10\n"
".inst 0xc153b220 // sdot za.s[x9, 0], { z16.b-z19.b }, z3.b[0]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
"ble 10f\n"
- ".inst 0xa0408355 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x26]\n"
- "subs x21, x21, #0x4\n"
+ ".inst 0xa0408335 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25]\n"
+ "subs x20, x20, #0x4\n"
".inst 0xc153b6a0 // sdot za.s[x9, 0], { z20.b-z23.b }, z3.b[1]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
"ble 10f\n"
- ".inst 0xa0408355 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x26]\n"
- "subs x21, x21, #0x4\n"
+ ".inst 0xa0408335 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25]\n"
+ "subs x20, x20, #0x4\n"
".inst 0xc153baa0 // sdot za.s[x9, 0], { z20.b-z23.b }, z3.b[2]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
"ble 10f\n"
- ".inst 0xa0408355 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x26]\n"
+ ".inst 0xa0408335 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25]\n"
".inst 0xc153bea0 // sdot za.s[x9, 0], { z20.b-z23.b }, z3.b[3]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
"10:" // Width 1: Multiply loop: multiply skip
"tbnz %x[flags], #31, 11f\n"
"sdot z26.s, z3.b, z24.b\n"
"11:" // Width 1: Multiply loop: unique 2: skip row sum
"tbnz %x[flags], #31, 12f\n"
- "add x21, %x[qp], %[b_offset]\n"
- "mov x20, #0x4\n"
- "ld1rw { z10.s }, p2/Z, [x21]\n"
+ "add x20, %x[qp], %[b_offset]\n"
+ "mov x19, #0x4\n"
+ "ld1rw { z10.s }, p2/Z, [x20]\n"
"neg z10.s, p2/M, z10.s\n"
- "whilelt p0.s, XZR, x20\n"
+ "whilelt p0.s, XZR, x19\n"
"saddv d26, p0, z26.s\n"
"mov z26.s, z26.s[0]\n"
"mul z26.s, p2/M, z26.s, z10.s\n"
"orr %x[flags], %x[flags], #0x80000000\n"
"12:" // Width 1: skip row sum fixup
".inst 0xc0904b40 // addha za0.s, p2/M, p2/M, z26.s\n"
- "add x20, %x[qp], %[per_layer_mul]\n"
- "ld1rw { z5.s }, p2/Z, [x20]\n"
- "add x20, %x[qp], %[per_layer_right_shift]\n"
+ "add x19, %x[qp], %[per_layer_mul]\n"
+ "ld1rw { z5.s }, p2/Z, [x19]\n"
+ "add x19, %x[qp], %[per_layer_right_shift]\n"
".inst 0xc0904b41 // addha za1.s, p2/M, p2/M, z26.s\n"
- "ld1rw { z4.s }, p2/Z, [x20]\n"
- "add x20, %x[qp], %[c_offset]\n"
- "add x21, %x[qp], %[minval]\n"
+ "ld1rw { z4.s }, p2/Z, [x19]\n"
+ "add x19, %x[qp], %[c_offset]\n"
+ "add x20, %x[qp], %[minval]\n"
".inst 0xc0904b42 // addha za2.s, p2/M, p2/M, z26.s\n"
- "ld1rw { z6.s }, p2/Z, [x20]\n"
- "add x20, %x[qp], %[maxval]\n"
+ "ld1rw { z6.s }, p2/Z, [x19]\n"
+ "add x19, %x[qp], %[maxval]\n"
".inst 0xc0904b43 // addha za3.s, p2/M, p2/M, z26.s\n"
- "ld1rw { z21.s }, p2/Z, [x21]\n"
+ "ld1rw { z21.s }, p2/Z, [x20]\n"
".inst 0xc0062c1c // mova { z28.d-z31.d }, za.d[x9, #0]\n"
".inst 0xc1a5ac1c // sqdmulh { z28.s-z31.s }, { z28.s-z31.s }, z5.s\n"
".inst 0xc1a4aa3c // srshl { z28.s-z31.s }, { z28.s-z31.s }, z4.s\n"
- "ld1rw { z16.s }, p2/Z, [x20]\n"
+ "ld1rw { z16.s }, p2/Z, [x19]\n"
".inst 0xc1a6ab1c // add { z28.s-z31.s }, { z28.s-z31.s }, z6.s\n"
".inst 0xc1b0cebc // sclamp { z28.s-z31.s }, z21.s, z16.s\n"
"uzp1 z28.h, z28.h, z29.h\n"
"uzp1 z29.h, z30.h, z31.h\n"
"uzp1 z28.b, z28.b, z29.b\n"
- "st1b { z28.b }, p1, [x25]\n"
- "addvl x25, x25, #1\n"
+ "st1b { z28.b }, p1, [x24]\n"
+ "addvl x24, x24, #1\n"
"13:" // Width 1: Output done
"b 44f\n"
"14:" // Width 2
- "mov x23, %x[A_ptr]\n"
- "mov x22, %x[K]\n"
- "sub x20, %x[N], x28\n"
+ "mov x22, %x[A_ptr]\n"
"mov x21, %x[K]\n"
- ".inst 0xf8b64af8 // rprfm pldmany, x22, [x23]\n"
- "whilelt p1.b, XZR, x20\n"
- "cbz x24, 15f\n"
- ".inst 0xa040c304 // ld1w { z4.s-z7.s }, pn8.b/Z, [x24]\n"
+ "sub x19, %x[N], x27\n"
+ "mov x20, %x[K]\n"
+ ".inst 0xf8b54ad8 // rprfm pldmany, x21, [x22]\n"
+ "whilelt p1.b, XZR, x19\n"
+ "cbz x23, 15f\n"
+ ".inst 0xa040c2e4 // ld1w { z4.s-z7.s }, pn8.b/Z, [x23]\n"
".inst 0xc0042c80 // mova za.d[x9, #0], { z4.d-z7.d }\n"
- ".inst 0xa041c314 // ld1w { z20.s-z23.s }, pn8.b/Z, [x24, #0x4, MUL VL]\n"
+ ".inst 0xa041c2f4 // ld1w { z20.s-z23.s }, pn8.b/Z, [x23, #0x4, MUL VL]\n"
".inst 0xc0042e81 // mova za.d[x9, #1], { z20.d-z23.d }\n"
"b 16f\n"
"15:" // Width 2: no bias
".inst 0xc00800ff // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
"16:" // Width 2: setup done
- "cmp x21, #0x10\n"
+ "cmp x20, #0x10\n"
"ble 19f\n"
"17:" // Width 2: Multiply loop: Main loop head
- "whilelt p0.b, XZR, x21\n"
- "ld1rqb { z3.b }, p0/Z, [x23]\n"
- "add x23, x23, #0x10\n"
- ".inst 0xa0408351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26]\n"
+ "whilelt p0.b, XZR, x20\n"
+ "ld1rqb { z3.b }, p0/Z, [x22]\n"
+ "add x22, x22, #0x10\n"
+ ".inst 0xa0408331 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x25]\n"
".inst 0xc153b220 // sdot za.s[x9, 0], { z16.b-z19.b }, z3.b[0]\n"
- ".inst 0xa0418345 // ldnt1b { z4.b-z7.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa0418325 // ldnt1b { z4.b-z7.b }, pn8.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc153b0a1 // sdot za.s[x9, 1], { z4.b-z7.b }, z3.b[0]\n"
- "addvl x26, x26, #16\n"
- ".inst 0xa0408355 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x26]\n"
+ "addvl x25, x25, #16\n"
+ ".inst 0xa0408335 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25]\n"
".inst 0xc153b6a0 // sdot za.s[x9, 0], { z20.b-z23.b }, z3.b[1]\n"
- ".inst 0xa0418349 // ldnt1b { z8.b-z11.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa0418329 // ldnt1b { z8.b-z11.b }, pn8.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc153b521 // sdot za.s[x9, 1], { z8.b-z11.b }, z3.b[1]\n"
- "addvl x26, x26, #16\n"
- ".inst 0xa0408355 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x26]\n"
+ "addvl x25, x25, #16\n"
+ ".inst 0xa0408335 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25]\n"
".inst 0xc153baa0 // sdot za.s[x9, 0], { z20.b-z23.b }, z3.b[2]\n"
- ".inst 0xa0418345 // ldnt1b { z4.b-z7.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa0418325 // ldnt1b { z4.b-z7.b }, pn8.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc153b8a1 // sdot za.s[x9, 1], { z4.b-z7.b }, z3.b[2]\n"
- "addvl x26, x26, #16\n"
- ".inst 0xa0408355 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x26]\n"
+ "addvl x25, x25, #16\n"
+ ".inst 0xa0408335 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25]\n"
".inst 0xc153bea0 // sdot za.s[x9, 0], { z20.b-z23.b }, z3.b[3]\n"
- ".inst 0xa0418355 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa0418335 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc153bea1 // sdot za.s[x9, 1], { z20.b-z23.b }, z3.b[3]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
"tbnz %x[flags], #31, 18f\n"
"sdot z26.s, z3.b, z24.b\n"
"18:" // Width 2: Multiply loop: unique 3: skip row sum
- "sub x21, x21, #0x10\n"
- "cmp x21, #0x10\n"
+ "sub x20, x20, #0x10\n"
+ "cmp x20, #0x10\n"
"bgt 17b\n"
"19:" // Width 2: Multiply loop: Single iteration only
- "whilelt p0.b, XZR, x21\n"
- "ld1rqb { z3.b }, p0/Z, [x23]\n"
- "subs x21, x21, #0x4\n"
- ".inst 0xa0408351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26]\n"
- "add x23, x23, #0x10\n"
+ "whilelt p0.b, XZR, x20\n"
+ "ld1rqb { z3.b }, p0/Z, [x22]\n"
+ "subs x20, x20, #0x4\n"
+ ".inst 0xa0408331 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x25]\n"
+ "add x22, x22, #0x10\n"
".inst 0xc153b220 // sdot za.s[x9, 0], { z16.b-z19.b }, z3.b[0]\n"
- ".inst 0xa0418345 // ldnt1b { z4.b-z7.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa0418325 // ldnt1b { z4.b-z7.b }, pn8.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc153b0a1 // sdot za.s[x9, 1], { z4.b-z7.b }, z3.b[0]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
"ble 20f\n"
- ".inst 0xa0408355 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x26]\n"
- "subs x21, x21, #0x4\n"
+ ".inst 0xa0408335 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25]\n"
+ "subs x20, x20, #0x4\n"
".inst 0xc153b6a0 // sdot za.s[x9, 0], { z20.b-z23.b }, z3.b[1]\n"
- ".inst 0xa0418349 // ldnt1b { z8.b-z11.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa0418329 // ldnt1b { z8.b-z11.b }, pn8.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc153b521 // sdot za.s[x9, 1], { z8.b-z11.b }, z3.b[1]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
"ble 20f\n"
- ".inst 0xa0408355 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x26]\n"
- "subs x21, x21, #0x4\n"
+ ".inst 0xa0408335 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25]\n"
+ "subs x20, x20, #0x4\n"
".inst 0xc153baa0 // sdot za.s[x9, 0], { z20.b-z23.b }, z3.b[2]\n"
- ".inst 0xa0418345 // ldnt1b { z4.b-z7.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa0418325 // ldnt1b { z4.b-z7.b }, pn8.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc153b8a1 // sdot za.s[x9, 1], { z4.b-z7.b }, z3.b[2]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
"ble 20f\n"
- ".inst 0xa0408355 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x26]\n"
+ ".inst 0xa0408335 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25]\n"
".inst 0xc153bea0 // sdot za.s[x9, 0], { z20.b-z23.b }, z3.b[3]\n"
- ".inst 0xa0418355 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa0418335 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc153bea1 // sdot za.s[x9, 1], { z20.b-z23.b }, z3.b[3]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
"20:" // Width 2: Multiply loop: multiply skip
"tbnz %x[flags], #31, 21f\n"
"sdot z26.s, z3.b, z24.b\n"
"21:" // Width 2: Multiply loop: unique 4: skip row sum
"tbnz %x[flags], #31, 22f\n"
- "add x21, %x[qp], %[b_offset]\n"
- "mov x20, #0x4\n"
- "ld1rw { z10.s }, p2/Z, [x21]\n"
+ "add x20, %x[qp], %[b_offset]\n"
+ "mov x19, #0x4\n"
+ "ld1rw { z10.s }, p2/Z, [x20]\n"
"neg z10.s, p2/M, z10.s\n"
- "whilelt p0.s, XZR, x20\n"
+ "whilelt p0.s, XZR, x19\n"
"saddv d26, p0, z26.s\n"
"mov z26.s, z26.s[0]\n"
"mul z26.s, p2/M, z26.s, z10.s\n"
"orr %x[flags], %x[flags], #0x80000000\n"
"22:" // Width 2: skip row sum fixup
".inst 0xc0904b40 // addha za0.s, p2/M, p2/M, z26.s\n"
- "add x20, %x[qp], %[per_layer_mul]\n"
- "ld1rw { z5.s }, p2/Z, [x20]\n"
- "add x20, %x[qp], %[per_layer_right_shift]\n"
+ "add x19, %x[qp], %[per_layer_mul]\n"
+ "ld1rw { z5.s }, p2/Z, [x19]\n"
+ "add x19, %x[qp], %[per_layer_right_shift]\n"
".inst 0xc0904b41 // addha za1.s, p2/M, p2/M, z26.s\n"
- "ld1rw { z4.s }, p2/Z, [x20]\n"
- "add x20, %x[qp], %[c_offset]\n"
- "add x21, %x[qp], %[minval]\n"
+ "ld1rw { z4.s }, p2/Z, [x19]\n"
+ "add x19, %x[qp], %[c_offset]\n"
+ "add x20, %x[qp], %[minval]\n"
".inst 0xc0904b42 // addha za2.s, p2/M, p2/M, z26.s\n"
- "ld1rw { z6.s }, p2/Z, [x20]\n"
- "add x20, %x[qp], %[maxval]\n"
+ "ld1rw { z6.s }, p2/Z, [x19]\n"
+ "add x19, %x[qp], %[maxval]\n"
".inst 0xc0904b43 // addha za3.s, p2/M, p2/M, z26.s\n"
- "ld1rw { z21.s }, p2/Z, [x21]\n"
+ "ld1rw { z21.s }, p2/Z, [x20]\n"
".inst 0xc0062c1c // mova { z28.d-z31.d }, za.d[x9, #0]\n"
".inst 0xc1a5ac1c // sqdmulh { z28.s-z31.s }, { z28.s-z31.s }, z5.s\n"
".inst 0xc0062c2c // mova { z12.d-z15.d }, za.d[x9, #1]\n"
".inst 0xc1a5ac0c // sqdmulh { z12.s-z15.s }, { z12.s-z15.s }, z5.s\n"
".inst 0xc1a4aa3c // srshl { z28.s-z31.s }, { z28.s-z31.s }, z4.s\n"
- "ld1rw { z16.s }, p2/Z, [x20]\n"
+ "ld1rw { z16.s }, p2/Z, [x19]\n"
".inst 0xc1a4aa2c // srshl { z12.s-z15.s }, { z12.s-z15.s }, z4.s\n"
".inst 0xc1a6ab1c // add { z28.s-z31.s }, { z28.s-z31.s }, z6.s\n"
".inst 0xc1a6ab0c // add { z12.s-z15.s }, { z12.s-z15.s }, z6.s\n"
@@ -315,137 +315,137 @@ void sme2_gemv_s8qa_dot_16VL (
"uzp1 z12.h, z12.h, z13.h\n"
"uzp1 z13.h, z14.h, z15.h\n"
"uzp1 z28.b, z28.b, z29.b\n"
- "st1b { z28.b }, p2, [x25]\n"
+ "st1b { z28.b }, p2, [x24]\n"
"uzp1 z12.b, z12.b, z13.b\n"
- "st1b { z12.b }, p1, [x25, #1, MUL VL]\n"
- "addvl x25, x25, #2\n"
+ "st1b { z12.b }, p1, [x24, #1, MUL VL]\n"
+ "addvl x24, x24, #2\n"
"23:" // Width 2: Output done
"b 44f\n"
"24:" // Width 3
- "mov x20, #0x2\n"
- "mov x23, %x[A_ptr]\n"
- "mov x22, %x[K]\n"
- "msub x20, x28, x20, %x[N]\n"
+ "mov x19, #0x2\n"
+ "mov x22, %x[A_ptr]\n"
"mov x21, %x[K]\n"
- ".inst 0xf8b64af8 // rprfm pldmany, x22, [x23]\n"
- "whilelt p1.b, XZR, x20\n"
- "cbz x24, 25f\n"
- ".inst 0xa040c304 // ld1w { z4.s-z7.s }, pn8.b/Z, [x24]\n"
+ "msub x19, x27, x19, %x[N]\n"
+ "mov x20, %x[K]\n"
+ ".inst 0xf8b54ad8 // rprfm pldmany, x21, [x22]\n"
+ "whilelt p1.b, XZR, x19\n"
+ "cbz x23, 25f\n"
+ ".inst 0xa040c2e4 // ld1w { z4.s-z7.s }, pn8.b/Z, [x23]\n"
".inst 0xc0042c80 // mova za.d[x9, #0], { z4.d-z7.d }\n"
- ".inst 0xa041c314 // ld1w { z20.s-z23.s }, pn8.b/Z, [x24, #0x4, MUL VL]\n"
+ ".inst 0xa041c2f4 // ld1w { z20.s-z23.s }, pn8.b/Z, [x23, #0x4, MUL VL]\n"
".inst 0xc0042e81 // mova za.d[x9, #1], { z20.d-z23.d }\n"
- ".inst 0xa042c314 // ld1w { z20.s-z23.s }, pn8.b/Z, [x24, #0x8, MUL VL]\n"
+ ".inst 0xa042c2f4 // ld1w { z20.s-z23.s }, pn8.b/Z, [x23, #0x8, MUL VL]\n"
".inst 0xc0042e82 // mova za.d[x9, #2], { z20.d-z23.d }\n"
"b 26f\n"
"25:" // Width 3: no bias
".inst 0xc00800ff // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
"26:" // Width 3: setup done
- "cmp x21, #0x10\n"
+ "cmp x20, #0x10\n"
"ble 29f\n"
"27:" // Width 3: Multiply loop: Main loop head
- "whilelt p0.b, XZR, x21\n"
- "ld1rqb { z3.b }, p0/Z, [x23]\n"
- "add x23, x23, #0x10\n"
- ".inst 0xa0408351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26]\n"
+ "whilelt p0.b, XZR, x20\n"
+ "ld1rqb { z3.b }, p0/Z, [x22]\n"
+ "add x22, x22, #0x10\n"
+ ".inst 0xa0408331 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x25]\n"
".inst 0xc153b220 // sdot za.s[x9, 0], { z16.b-z19.b }, z3.b[0]\n"
- ".inst 0xa0418345 // ldnt1b { z4.b-z7.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa0418325 // ldnt1b { z4.b-z7.b }, pn8.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc153b0a1 // sdot za.s[x9, 1], { z4.b-z7.b }, z3.b[0]\n"
- ".inst 0xa042834d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x26, #0x8, MUL VL]\n"
+ ".inst 0xa042832d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x25, #0x8, MUL VL]\n"
".inst 0xc153b1a2 // sdot za.s[x9, 2], { z12.b-z15.b }, z3.b[0]\n"
- "addvl x26, x26, #16\n"
- ".inst 0xa0408355 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x26]\n"
+ "addvl x25, x25, #16\n"
+ ".inst 0xa0408335 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25]\n"
".inst 0xc153b6a0 // sdot za.s[x9, 0], { z20.b-z23.b }, z3.b[1]\n"
- ".inst 0xa0418349 // ldnt1b { z8.b-z11.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa0418329 // ldnt1b { z8.b-z11.b }, pn8.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc153b521 // sdot za.s[x9, 1], { z8.b-z11.b }, z3.b[1]\n"
- ".inst 0xa0428351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26, #0x8, MUL VL]\n"
+ ".inst 0xa0428331 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x25, #0x8, MUL VL]\n"
".inst 0xc153b622 // sdot za.s[x9, 2], { z16.b-z19.b }, z3.b[1]\n"
- "addvl x26, x26, #16\n"
- ".inst 0xa0408355 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x26]\n"
+ "addvl x25, x25, #16\n"
+ ".inst 0xa0408335 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25]\n"
".inst 0xc153baa0 // sdot za.s[x9, 0], { z20.b-z23.b }, z3.b[2]\n"
- ".inst 0xa0418345 // ldnt1b { z4.b-z7.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa0418325 // ldnt1b { z4.b-z7.b }, pn8.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc153b8a1 // sdot za.s[x9, 1], { z4.b-z7.b }, z3.b[2]\n"
- ".inst 0xa042834d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x26, #0x8, MUL VL]\n"
+ ".inst 0xa042832d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x25, #0x8, MUL VL]\n"
".inst 0xc153b9a2 // sdot za.s[x9, 2], { z12.b-z15.b }, z3.b[2]\n"
- "addvl x26, x26, #16\n"
- ".inst 0xa0408355 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x26]\n"
+ "addvl x25, x25, #16\n"
+ ".inst 0xa0408335 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25]\n"
".inst 0xc153bea0 // sdot za.s[x9, 0], { z20.b-z23.b }, z3.b[3]\n"
- ".inst 0xa0418355 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa0418335 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc153bea1 // sdot za.s[x9, 1], { z20.b-z23.b }, z3.b[3]\n"
- ".inst 0xa0428351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26, #0x8, MUL VL]\n"
+ ".inst 0xa0428331 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x25, #0x8, MUL VL]\n"
".inst 0xc153be22 // sdot za.s[x9, 2], { z16.b-z19.b }, z3.b[3]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
"tbnz %x[flags], #31, 28f\n"
"sdot z26.s, z3.b, z24.b\n"
"28:" // Width 3: Multiply loop: unique 5: skip row sum
- "sub x21, x21, #0x10\n"
- "cmp x21, #0x10\n"
+ "sub x20, x20, #0x10\n"
+ "cmp x20, #0x10\n"
"bgt 27b\n"
"29:" // Width 3: Multiply loop: Single iteration only
- "whilelt p0.b, XZR, x21\n"
- "ld1rqb { z3.b }, p0/Z, [x23]\n"
- "subs x21, x21, #0x4\n"
- ".inst 0xa0408351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26]\n"
- "add x23, x23, #0x10\n"
+ "whilelt p0.b, XZR, x20\n"
+ "ld1rqb { z3.b }, p0/Z, [x22]\n"
+ "subs x20, x20, #0x4\n"
+ ".inst 0xa0408331 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x25]\n"
+ "add x22, x22, #0x10\n"
".inst 0xc153b220 // sdot za.s[x9, 0], { z16.b-z19.b }, z3.b[0]\n"
- ".inst 0xa0418345 // ldnt1b { z4.b-z7.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa0418325 // ldnt1b { z4.b-z7.b }, pn8.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc153b0a1 // sdot za.s[x9, 1], { z4.b-z7.b }, z3.b[0]\n"
- ".inst 0xa042834d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x26, #0x8, MUL VL]\n"
+ ".inst 0xa042832d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x25, #0x8, MUL VL]\n"
".inst 0xc153b1a2 // sdot za.s[x9, 2], { z12.b-z15.b }, z3.b[0]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
"ble 30f\n"
- ".inst 0xa0408355 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x26]\n"
- "subs x21, x21, #0x4\n"
+ ".inst 0xa0408335 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25]\n"
+ "subs x20, x20, #0x4\n"
".inst 0xc153b6a0 // sdot za.s[x9, 0], { z20.b-z23.b }, z3.b[1]\n"
- ".inst 0xa0418349 // ldnt1b { z8.b-z11.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa0418329 // ldnt1b { z8.b-z11.b }, pn8.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc153b521 // sdot za.s[x9, 1], { z8.b-z11.b }, z3.b[1]\n"
- ".inst 0xa0428351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26, #0x8, MUL VL]\n"
+ ".inst 0xa0428331 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x25, #0x8, MUL VL]\n"
".inst 0xc153b622 // sdot za.s[x9, 2], { z16.b-z19.b }, z3.b[1]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
"ble 30f\n"
- ".inst 0xa0408355 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x26]\n"
- "subs x21, x21, #0x4\n"
+ ".inst 0xa0408335 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25]\n"
+ "subs x20, x20, #0x4\n"
".inst 0xc153baa0 // sdot za.s[x9, 0], { z20.b-z23.b }, z3.b[2]\n"
- ".inst 0xa0418345 // ldnt1b { z4.b-z7.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa0418325 // ldnt1b { z4.b-z7.b }, pn8.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc153b8a1 // sdot za.s[x9, 1], { z4.b-z7.b }, z3.b[2]\n"
- ".inst 0xa042834d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x26, #0x8, MUL VL]\n"
+ ".inst 0xa042832d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x25, #0x8, MUL VL]\n"
".inst 0xc153b9a2 // sdot za.s[x9, 2], { z12.b-z15.b }, z3.b[2]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
"ble 30f\n"
- ".inst 0xa0408355 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x26]\n"
+ ".inst 0xa0408335 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25]\n"
".inst 0xc153bea0 // sdot za.s[x9, 0], { z20.b-z23.b }, z3.b[3]\n"
- ".inst 0xa0418355 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa0418335 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc153bea1 // sdot za.s[x9, 1], { z20.b-z23.b }, z3.b[3]\n"
- ".inst 0xa0428351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26, #0x8, MUL VL]\n"
+ ".inst 0xa0428331 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x25, #0x8, MUL VL]\n"
".inst 0xc153be22 // sdot za.s[x9, 2], { z16.b-z19.b }, z3.b[3]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
"30:" // Width 3: Multiply loop: multiply skip
"tbnz %x[flags], #31, 31f\n"
"sdot z26.s, z3.b, z24.b\n"
"31:" // Width 3: Multiply loop: unique 6: skip row sum
"tbnz %x[flags], #31, 32f\n"
- "add x21, %x[qp], %[b_offset]\n"
- "mov x20, #0x4\n"
- "ld1rw { z10.s }, p2/Z, [x21]\n"
+ "add x20, %x[qp], %[b_offset]\n"
+ "mov x19, #0x4\n"
+ "ld1rw { z10.s }, p2/Z, [x20]\n"
"neg z10.s, p2/M, z10.s\n"
- "whilelt p0.s, XZR, x20\n"
+ "whilelt p0.s, XZR, x19\n"
"saddv d26, p0, z26.s\n"
"mov z26.s, z26.s[0]\n"
"mul z26.s, p2/M, z26.s, z10.s\n"
"orr %x[flags], %x[flags], #0x80000000\n"
"32:" // Width 3: skip row sum fixup
".inst 0xc0904b40 // addha za0.s, p2/M, p2/M, z26.s\n"
- "add x20, %x[qp], %[per_layer_mul]\n"
- "ld1rw { z5.s }, p2/Z, [x20]\n"
- "add x20, %x[qp], %[per_layer_right_shift]\n"
+ "add x19, %x[qp], %[per_layer_mul]\n"
+ "ld1rw { z5.s }, p2/Z, [x19]\n"
+ "add x19, %x[qp], %[per_layer_right_shift]\n"
".inst 0xc0904b41 // addha za1.s, p2/M, p2/M, z26.s\n"
- "ld1rw { z4.s }, p2/Z, [x20]\n"
- "add x20, %x[qp], %[c_offset]\n"
- "add x21, %x[qp], %[minval]\n"
+ "ld1rw { z4.s }, p2/Z, [x19]\n"
+ "add x19, %x[qp], %[c_offset]\n"
+ "add x20, %x[qp], %[minval]\n"
".inst 0xc0904b42 // addha za2.s, p2/M, p2/M, z26.s\n"
- "ld1rw { z6.s }, p2/Z, [x20]\n"
- "add x20, %x[qp], %[maxval]\n"
+ "ld1rw { z6.s }, p2/Z, [x19]\n"
+ "add x19, %x[qp], %[maxval]\n"
".inst 0xc0904b43 // addha za3.s, p2/M, p2/M, z26.s\n"
- "ld1rw { z21.s }, p2/Z, [x21]\n"
+ "ld1rw { z21.s }, p2/Z, [x20]\n"
".inst 0xc0062c1c // mova { z28.d-z31.d }, za.d[x9, #0]\n"
".inst 0xc1a5ac1c // sqdmulh { z28.s-z31.s }, { z28.s-z31.s }, z5.s\n"
".inst 0xc0062c2c // mova { z12.d-z15.d }, za.d[x9, #1]\n"
@@ -453,7 +453,7 @@ void sme2_gemv_s8qa_dot_16VL (
".inst 0xc0062c40 // mova { z0.d-z3.d }, za.d[x9, #2]\n"
".inst 0xc1a5ac00 // sqdmulh { z0.s-z3.s }, { z0.s-z3.s }, z5.s\n"
".inst 0xc1a4aa3c // srshl { z28.s-z31.s }, { z28.s-z31.s }, z4.s\n"
- "ld1rw { z16.s }, p2/Z, [x20]\n"
+ "ld1rw { z16.s }, p2/Z, [x19]\n"
".inst 0xc1a4aa2c // srshl { z12.s-z15.s }, { z12.s-z15.s }, z4.s\n"
".inst 0xc1a4aa20 // srshl { z0.s-z3.s }, { z0.s-z3.s }, z4.s\n"
".inst 0xc1a6ab1c // add { z28.s-z31.s }, { z28.s-z31.s }, z6.s\n"
@@ -469,158 +469,158 @@ void sme2_gemv_s8qa_dot_16VL (
"uzp1 z0.h, z0.h, z1.h\n"
"uzp1 z1.h, z2.h, z3.h\n"
"uzp1 z28.b, z28.b, z29.b\n"
- "st1b { z28.b }, p2, [x25]\n"
+ "st1b { z28.b }, p2, [x24]\n"
"uzp1 z12.b, z12.b, z13.b\n"
- "st1b { z12.b }, p2, [x25, #1, MUL VL]\n"
+ "st1b { z12.b }, p2, [x24, #1, MUL VL]\n"
"uzp1 z0.b, z0.b, z1.b\n"
- "st1b { z0.b }, p1, [x25, #2, MUL VL]\n"
- "addvl x25, x25, #3\n"
+ "st1b { z0.b }, p1, [x24, #2, MUL VL]\n"
+ "addvl x24, x24, #3\n"
"33:" // Width 3: Output done
"b 44f\n"
"34:" // Width 4
- "mov x20, #0x3\n"
- "mov x23, %x[A_ptr]\n"
- "mov x22, %x[K]\n"
- "msub x20, x28, x20, %x[N]\n"
+ "mov x19, #0x3\n"
+ "mov x22, %x[A_ptr]\n"
"mov x21, %x[K]\n"
- ".inst 0xf8b64af8 // rprfm pldmany, x22, [x23]\n"
- "whilelt p1.b, XZR, x20\n"
- "cbz x24, 35f\n"
- ".inst 0xa040c304 // ld1w { z4.s-z7.s }, pn8.b/Z, [x24]\n"
+ "msub x19, x27, x19, %x[N]\n"
+ "mov x20, %x[K]\n"
+ ".inst 0xf8b54ad8 // rprfm pldmany, x21, [x22]\n"
+ "whilelt p1.b, XZR, x19\n"
+ "cbz x23, 35f\n"
+ ".inst 0xa040c2e4 // ld1w { z4.s-z7.s }, pn8.b/Z, [x23]\n"
".inst 0xc0042c80 // mova za.d[x9, #0], { z4.d-z7.d }\n"
- ".inst 0xa041c314 // ld1w { z20.s-z23.s }, pn8.b/Z, [x24, #0x4, MUL VL]\n"
+ ".inst 0xa041c2f4 // ld1w { z20.s-z23.s }, pn8.b/Z, [x23, #0x4, MUL VL]\n"
".inst 0xc0042e81 // mova za.d[x9, #1], { z20.d-z23.d }\n"
- ".inst 0xa042c314 // ld1w { z20.s-z23.s }, pn8.b/Z, [x24, #0x8, MUL VL]\n"
+ ".inst 0xa042c2f4 // ld1w { z20.s-z23.s }, pn8.b/Z, [x23, #0x8, MUL VL]\n"
".inst 0xc0042e82 // mova za.d[x9, #2], { z20.d-z23.d }\n"
- ".inst 0xa043c310 // ld1w { z16.s-z19.s }, pn8.b/Z, [x24, #0xc, MUL VL]\n"
+ ".inst 0xa043c2f0 // ld1w { z16.s-z19.s }, pn8.b/Z, [x23, #0xc, MUL VL]\n"
".inst 0xc0042e03 // mova za.d[x9, #3], { z16.d-z19.d }\n"
- "addvl x24, x24, #16\n"
+ "addvl x23, x23, #16\n"
"b 36f\n"
"35:" // Width 4: no bias
".inst 0xc00800ff // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
"36:" // Width 4: setup done
- "cmp x21, #0x10\n"
+ "cmp x20, #0x10\n"
"ble 39f\n"
"37:" // Width 4: Multiply loop: Main loop head
- "whilelt p0.b, XZR, x21\n"
- "ld1rqb { z3.b }, p0/Z, [x23]\n"
- "add x23, x23, #0x10\n"
- ".inst 0xa0408351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26]\n"
+ "whilelt p0.b, XZR, x20\n"
+ "ld1rqb { z3.b }, p0/Z, [x22]\n"
+ "add x22, x22, #0x10\n"
+ ".inst 0xa0408331 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x25]\n"
".inst 0xc153b220 // sdot za.s[x9, 0], { z16.b-z19.b }, z3.b[0]\n"
- ".inst 0xa0418345 // ldnt1b { z4.b-z7.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa0418325 // ldnt1b { z4.b-z7.b }, pn8.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc153b0a1 // sdot za.s[x9, 1], { z4.b-z7.b }, z3.b[0]\n"
- ".inst 0xa042834d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x26, #0x8, MUL VL]\n"
+ ".inst 0xa042832d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x25, #0x8, MUL VL]\n"
".inst 0xc153b1a2 // sdot za.s[x9, 2], { z12.b-z15.b }, z3.b[0]\n"
- ".inst 0xa043834d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x26, #0xc, MUL VL]\n"
+ ".inst 0xa043832d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x25, #0xc, MUL VL]\n"
".inst 0xc153b1a3 // sdot za.s[x9, 3], { z12.b-z15.b }, z3.b[0]\n"
- "addvl x26, x26, #16\n"
- ".inst 0xa0408355 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x26]\n"
+ "addvl x25, x25, #16\n"
+ ".inst 0xa0408335 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25]\n"
".inst 0xc153b6a0 // sdot za.s[x9, 0], { z20.b-z23.b }, z3.b[1]\n"
- ".inst 0xa0418349 // ldnt1b { z8.b-z11.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa0418329 // ldnt1b { z8.b-z11.b }, pn8.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc153b521 // sdot za.s[x9, 1], { z8.b-z11.b }, z3.b[1]\n"
- ".inst 0xa0428351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26, #0x8, MUL VL]\n"
+ ".inst 0xa0428331 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x25, #0x8, MUL VL]\n"
".inst 0xc153b622 // sdot za.s[x9, 2], { z16.b-z19.b }, z3.b[1]\n"
- ".inst 0xa043834d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x26, #0xc, MUL VL]\n"
+ ".inst 0xa043832d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x25, #0xc, MUL VL]\n"
".inst 0xc153b5a3 // sdot za.s[x9, 3], { z12.b-z15.b }, z3.b[1]\n"
- "addvl x26, x26, #16\n"
- ".inst 0xa0408355 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x26]\n"
+ "addvl x25, x25, #16\n"
+ ".inst 0xa0408335 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25]\n"
".inst 0xc153baa0 // sdot za.s[x9, 0], { z20.b-z23.b }, z3.b[2]\n"
- ".inst 0xa0418345 // ldnt1b { z4.b-z7.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa0418325 // ldnt1b { z4.b-z7.b }, pn8.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc153b8a1 // sdot za.s[x9, 1], { z4.b-z7.b }, z3.b[2]\n"
- ".inst 0xa042834d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x26, #0x8, MUL VL]\n"
+ ".inst 0xa042832d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x25, #0x8, MUL VL]\n"
".inst 0xc153b9a2 // sdot za.s[x9, 2], { z12.b-z15.b }, z3.b[2]\n"
- ".inst 0xa043835d // ldnt1b { z28.b-z31.b }, pn8.b/Z, [x26, #0xc, MUL VL]\n"
+ ".inst 0xa043833d // ldnt1b { z28.b-z31.b }, pn8.b/Z, [x25, #0xc, MUL VL]\n"
".inst 0xc153bba3 // sdot za.s[x9, 3], { z28.b-z31.b }, z3.b[2]\n"
- "addvl x26, x26, #16\n"
- ".inst 0xa0408355 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x26]\n"
+ "addvl x25, x25, #16\n"
+ ".inst 0xa0408335 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25]\n"
".inst 0xc153bea0 // sdot za.s[x9, 0], { z20.b-z23.b }, z3.b[3]\n"
- ".inst 0xa0418355 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa0418335 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc153bea1 // sdot za.s[x9, 1], { z20.b-z23.b }, z3.b[3]\n"
- ".inst 0xa0428351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26, #0x8, MUL VL]\n"
+ ".inst 0xa0428331 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x25, #0x8, MUL VL]\n"
".inst 0xc153be22 // sdot za.s[x9, 2], { z16.b-z19.b }, z3.b[3]\n"
- ".inst 0xa0438351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26, #0xc, MUL VL]\n"
+ ".inst 0xa0438331 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x25, #0xc, MUL VL]\n"
".inst 0xc153be23 // sdot za.s[x9, 3], { z16.b-z19.b }, z3.b[3]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
"tbnz %x[flags], #31, 38f\n"
"sdot z26.s, z3.b, z24.b\n"
"38:" // Width 4: Multiply loop: unique 7: skip row sum
- "sub x21, x21, #0x10\n"
- "cmp x21, #0x10\n"
+ "sub x20, x20, #0x10\n"
+ "cmp x20, #0x10\n"
"bgt 37b\n"
"39:" // Width 4: Multiply loop: Single iteration only
- "whilelt p0.b, XZR, x21\n"
- "ld1rqb { z3.b }, p0/Z, [x23]\n"
- "subs x21, x21, #0x4\n"
- ".inst 0xa0408351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26]\n"
- "add x23, x23, #0x10\n"
+ "whilelt p0.b, XZR, x20\n"
+ "ld1rqb { z3.b }, p0/Z, [x22]\n"
+ "subs x20, x20, #0x4\n"
+ ".inst 0xa0408331 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x25]\n"
+ "add x22, x22, #0x10\n"
".inst 0xc153b220 // sdot za.s[x9, 0], { z16.b-z19.b }, z3.b[0]\n"
- ".inst 0xa0418345 // ldnt1b { z4.b-z7.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa0418325 // ldnt1b { z4.b-z7.b }, pn8.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc153b0a1 // sdot za.s[x9, 1], { z4.b-z7.b }, z3.b[0]\n"
- ".inst 0xa042834d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x26, #0x8, MUL VL]\n"
+ ".inst 0xa042832d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x25, #0x8, MUL VL]\n"
".inst 0xc153b1a2 // sdot za.s[x9, 2], { z12.b-z15.b }, z3.b[0]\n"
- ".inst 0xa043834d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x26, #0xc, MUL VL]\n"
+ ".inst 0xa043832d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x25, #0xc, MUL VL]\n"
".inst 0xc153b1a3 // sdot za.s[x9, 3], { z12.b-z15.b }, z3.b[0]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
"ble 40f\n"
- ".inst 0xa0408355 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x26]\n"
- "subs x21, x21, #0x4\n"
+ ".inst 0xa0408335 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25]\n"
+ "subs x20, x20, #0x4\n"
".inst 0xc153b6a0 // sdot za.s[x9, 0], { z20.b-z23.b }, z3.b[1]\n"
- ".inst 0xa0418349 // ldnt1b { z8.b-z11.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa0418329 // ldnt1b { z8.b-z11.b }, pn8.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc153b521 // sdot za.s[x9, 1], { z8.b-z11.b }, z3.b[1]\n"
- ".inst 0xa0428351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26, #0x8, MUL VL]\n"
+ ".inst 0xa0428331 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x25, #0x8, MUL VL]\n"
".inst 0xc153b622 // sdot za.s[x9, 2], { z16.b-z19.b }, z3.b[1]\n"
- ".inst 0xa043834d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x26, #0xc, MUL VL]\n"
+ ".inst 0xa043832d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x25, #0xc, MUL VL]\n"
".inst 0xc153b5a3 // sdot za.s[x9, 3], { z12.b-z15.b }, z3.b[1]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
"ble 40f\n"
- ".inst 0xa0408355 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x26]\n"
- "subs x21, x21, #0x4\n"
+ ".inst 0xa0408335 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25]\n"
+ "subs x20, x20, #0x4\n"
".inst 0xc153baa0 // sdot za.s[x9, 0], { z20.b-z23.b }, z3.b[2]\n"
- ".inst 0xa0418345 // ldnt1b { z4.b-z7.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa0418325 // ldnt1b { z4.b-z7.b }, pn8.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc153b8a1 // sdot za.s[x9, 1], { z4.b-z7.b }, z3.b[2]\n"
- ".inst 0xa042834d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x26, #0x8, MUL VL]\n"
+ ".inst 0xa042832d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x25, #0x8, MUL VL]\n"
".inst 0xc153b9a2 // sdot za.s[x9, 2], { z12.b-z15.b }, z3.b[2]\n"
- ".inst 0xa043835d // ldnt1b { z28.b-z31.b }, pn8.b/Z, [x26, #0xc, MUL VL]\n"
+ ".inst 0xa043833d // ldnt1b { z28.b-z31.b }, pn8.b/Z, [x25, #0xc, MUL VL]\n"
".inst 0xc153bba3 // sdot za.s[x9, 3], { z28.b-z31.b }, z3.b[2]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
"ble 40f\n"
- ".inst 0xa0408355 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x26]\n"
+ ".inst 0xa0408335 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25]\n"
".inst 0xc153bea0 // sdot za.s[x9, 0], { z20.b-z23.b }, z3.b[3]\n"
- ".inst 0xa0418355 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa0418335 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc153bea1 // sdot za.s[x9, 1], { z20.b-z23.b }, z3.b[3]\n"
- ".inst 0xa0428351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26, #0x8, MUL VL]\n"
+ ".inst 0xa0428331 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x25, #0x8, MUL VL]\n"
".inst 0xc153be22 // sdot za.s[x9, 2], { z16.b-z19.b }, z3.b[3]\n"
- ".inst 0xa0438351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26, #0xc, MUL VL]\n"
+ ".inst 0xa0438331 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x25, #0xc, MUL VL]\n"
".inst 0xc153be23 // sdot za.s[x9, 3], { z16.b-z19.b }, z3.b[3]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
"40:" // Width 4: Multiply loop: multiply skip
"tbnz %x[flags], #31, 41f\n"
"sdot z26.s, z3.b, z24.b\n"
"41:" // Width 4: Multiply loop: unique 8: skip row sum
"tbnz %x[flags], #31, 42f\n"
- "add x21, %x[qp], %[b_offset]\n"
- "mov x20, #0x4\n"
- "ld1rw { z10.s }, p2/Z, [x21]\n"
+ "add x20, %x[qp], %[b_offset]\n"
+ "mov x19, #0x4\n"
+ "ld1rw { z10.s }, p2/Z, [x20]\n"
"neg z10.s, p2/M, z10.s\n"
- "whilelt p0.s, XZR, x20\n"
+ "whilelt p0.s, XZR, x19\n"
"saddv d26, p0, z26.s\n"
"mov z26.s, z26.s[0]\n"
"mul z26.s, p2/M, z26.s, z10.s\n"
"orr %x[flags], %x[flags], #0x80000000\n"
"42:" // Width 4: skip row sum fixup
".inst 0xc0904b40 // addha za0.s, p2/M, p2/M, z26.s\n"
- "add x20, %x[qp], %[per_layer_mul]\n"
- "ld1rw { z5.s }, p2/Z, [x20]\n"
- "add x20, %x[qp], %[per_layer_right_shift]\n"
+ "add x19, %x[qp], %[per_layer_mul]\n"
+ "ld1rw { z5.s }, p2/Z, [x19]\n"
+ "add x19, %x[qp], %[per_layer_right_shift]\n"
".inst 0xc0904b41 // addha za1.s, p2/M, p2/M, z26.s\n"
- "ld1rw { z4.s }, p2/Z, [x20]\n"
- "add x20, %x[qp], %[c_offset]\n"
- "add x21, %x[qp], %[minval]\n"
+ "ld1rw { z4.s }, p2/Z, [x19]\n"
+ "add x19, %x[qp], %[c_offset]\n"
+ "add x20, %x[qp], %[minval]\n"
".inst 0xc0904b42 // addha za2.s, p2/M, p2/M, z26.s\n"
- "ld1rw { z6.s }, p2/Z, [x20]\n"
- "add x20, %x[qp], %[maxval]\n"
+ "ld1rw { z6.s }, p2/Z, [x19]\n"
+ "add x19, %x[qp], %[maxval]\n"
".inst 0xc0904b43 // addha za3.s, p2/M, p2/M, z26.s\n"
- "ld1rw { z21.s }, p2/Z, [x21]\n"
+ "ld1rw { z21.s }, p2/Z, [x20]\n"
".inst 0xc0062c1c // mova { z28.d-z31.d }, za.d[x9, #0]\n"
".inst 0xc1a5ac1c // sqdmulh { z28.s-z31.s }, { z28.s-z31.s }, z5.s\n"
".inst 0xc0062c2c // mova { z12.d-z15.d }, za.d[x9, #1]\n"
@@ -630,7 +630,7 @@ void sme2_gemv_s8qa_dot_16VL (
".inst 0xc0062c68 // mova { z8.d-z11.d }, za.d[x9, #3]\n"
".inst 0xc1a5ac08 // sqdmulh { z8.s-z11.s }, { z8.s-z11.s }, z5.s\n"
".inst 0xc1a4aa3c // srshl { z28.s-z31.s }, { z28.s-z31.s }, z4.s\n"
- "ld1rw { z16.s }, p2/Z, [x20]\n"
+ "ld1rw { z16.s }, p2/Z, [x19]\n"
".inst 0xc1a4aa2c // srshl { z12.s-z15.s }, { z12.s-z15.s }, z4.s\n"
".inst 0xc1a4aa20 // srshl { z0.s-z3.s }, { z0.s-z3.s }, z4.s\n"
".inst 0xc1a4aa28 // srshl { z8.s-z11.s }, { z8.s-z11.s }, z4.s\n"
@@ -651,24 +651,24 @@ void sme2_gemv_s8qa_dot_16VL (
"uzp1 z8.h, z8.h, z9.h\n"
"uzp1 z9.h, z10.h, z11.h\n"
"uzp1 z28.b, z28.b, z29.b\n"
- "st1b { z28.b }, p2, [x25]\n"
+ "st1b { z28.b }, p2, [x24]\n"
"uzp1 z12.b, z12.b, z13.b\n"
- "st1b { z12.b }, p2, [x25, #1, MUL VL]\n"
+ "st1b { z12.b }, p2, [x24, #1, MUL VL]\n"
"uzp1 z0.b, z0.b, z1.b\n"
"uzp1 z8.b, z8.b, z9.b\n"
- "st1b { z0.b }, p2, [x25, #2, MUL VL]\n"
- "st1b { z8.b }, p1, [x25, #3, MUL VL]\n"
- "addvl x25, x25, #4\n"
+ "st1b { z0.b }, p2, [x24, #2, MUL VL]\n"
+ "st1b { z8.b }, p1, [x24, #3, MUL VL]\n"
+ "addvl x24, x24, #4\n"
"43:" // Width 4: Output done
- "subs x27, x27, #0x4\n"
- "sub %x[N], %x[N], x28, LSL #2\n"
+ "subs x26, x26, #0x4\n"
+ "sub %x[N], %x[N], x27, LSL #2\n"
"bgt 4b\n"
"44:" // Exit
".inst 0xd503467f // SMSTOP\n"
"ptrue p2.b\n"
: [N] "+&r" (N), [flags] "+&r" (flags)
: [A_ptr] "r" (A_ptr), [B_ptr] "r" (B_ptr), [K] "r" (K), [b_offset] "I" (offsetof(Requantize32, b_offset)), [c_offset] "I" (offsetof(Requantize32, c_offset)), [col_bias] "r" (col_bias), [maxval] "I" (offsetof(Requantize32, maxval)), [minval] "I" (offsetof(Requantize32, minval)), [output_ptr] "r" (output_ptr), [per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [qp] "r" (qp)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_gemv_u8qa_dot_16VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_gemv_u8qa_dot_16VL/generic.cpp
index dfdc4ea289..9822f637fb 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sme2_gemv_u8qa_dot_16VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_gemv_u8qa_dot_16VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef __ARM_FEATURE_SVE
#ifdef ARM_COMPUTE_ENABLE_SME2
@@ -54,257 +54,257 @@ void sme2_gemv_u8qa_dot_16VL (
__asm__ __volatile__(
"ptrue p2.b\n"
".inst 0xd503477f // SMSTART ZA\n"
- "cntw x28, ALL, MUL #4\n"
- "add x27, %x[N], x28\n"
- "sub x27, x27, #0x1\n"
- "udiv x27, x27, x28\n"
- "add x22, x27, #0x3\n"
- "and x22, x22, #0xfffffffffffffffc\n"
- "mul x22, x22, x28\n"
+ "cntw x27, ALL, MUL #4\n"
+ "add x26, %x[N], x27\n"
+ "sub x26, x26, #0x1\n"
+ "udiv x26, x26, x27\n"
+ "add x21, x26, #0x3\n"
+ "and x21, x21, #0xfffffffffffffffc\n"
+ "mul x21, x21, x27\n"
"mov x9, #0x0\n"
- "mov x26, %x[B_ptr]\n"
- "mov x25, %x[output_ptr]\n"
+ "mov x25, %x[B_ptr]\n"
+ "mov x24, %x[output_ptr]\n"
"ptrue p2.b\n"
".inst 0x25207810 // ptrue pn8.b\n"
- "mul x22, x22, %x[K]\n"
- "mov x21, #0x1\n"
+ "mul x21, x21, %x[K]\n"
+ "mov x20, #0x1\n"
"1:" // RHS size check loop
- "cmp x22, #0x200000\n"
+ "cmp x21, #0x200000\n"
"blt 2f\n"
- "tbnz x22, #0, 3f\n"
- "lsr x22, x22, #0x1\n"
- "lsl x21, x21, #0x1\n"
+ "tbnz x21, #0, 3f\n"
+ "lsr x21, x21, #0x1\n"
+ "lsl x20, x20, #0x1\n"
"b 1b\n"
"2:" // RHS do prefetch
- "lsl x20, x22, #0x26\n"
- "sub x21, x21, #0x1\n"
- "lsl x21, x21, #0x16\n"
- "orr x22, x22, x20\n"
- "orr x22, x22, x21\n"
- ".inst 0xf8b64b5a // rprfm pldonce, x22, [x26]\n"
+ "lsl x19, x21, #0x26\n"
+ "sub x20, x20, #0x1\n"
+ "lsl x20, x20, #0x16\n"
+ "orr x21, x21, x19\n"
+ "orr x21, x21, x20\n"
+ ".inst 0xf8b54b3a // rprfm pldonce, x21, [x25]\n"
"3:" // RHS prefetch exit
- "mov x24, %x[col_bias]\n"
+ "mov x23, %x[col_bias]\n"
"mov z26.s, #0x0\n"
"mov z24.b, #0x1\n"
"bic %x[flags], %x[flags], #0x80000000\n"
"4:" // Column loop
- "cmp x27, #0x4\n"
+ "cmp x26, #0x4\n"
"bge 34f\n"
- "cmp x27, #0x2\n"
+ "cmp x26, #0x2\n"
"bgt 24f\n"
"beq 14f\n"
- "mov x23, %x[A_ptr]\n"
- "mov x22, %x[K]\n"
- "mov x20, %x[N]\n"
+ "mov x22, %x[A_ptr]\n"
"mov x21, %x[K]\n"
- ".inst 0xf8b64af8 // rprfm pldmany, x22, [x23]\n"
- "whilelt p1.b, XZR, x20\n"
- "cbz x24, 5f\n"
- ".inst 0xa040c304 // ld1w { z4.s-z7.s }, pn8.b/Z, [x24]\n"
+ "mov x19, %x[N]\n"
+ "mov x20, %x[K]\n"
+ ".inst 0xf8b54ad8 // rprfm pldmany, x21, [x22]\n"
+ "whilelt p1.b, XZR, x19\n"
+ "cbz x23, 5f\n"
+ ".inst 0xa040c2e4 // ld1w { z4.s-z7.s }, pn8.b/Z, [x23]\n"
".inst 0xc0042c80 // mova za.d[x9, #0], { z4.d-z7.d }\n"
"b 6f\n"
"5:" // Width 1: no bias
".inst 0xc00800ff // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
"6:" // Width 1: setup done
- "cmp x21, #0x10\n"
+ "cmp x20, #0x10\n"
"ble 9f\n"
"7:" // Width 1: Multiply loop: Main loop head
- "whilelt p0.b, XZR, x21\n"
- "ld1rqb { z3.b }, p0/Z, [x23]\n"
- "add x23, x23, #0x10\n"
- ".inst 0xa0408351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26]\n"
+ "whilelt p0.b, XZR, x20\n"
+ "ld1rqb { z3.b }, p0/Z, [x22]\n"
+ "add x22, x22, #0x10\n"
+ ".inst 0xa0408331 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x25]\n"
".inst 0xc153b230 // udot za.s[x9, 0], { z16.b-z19.b }, z3.b[0]\n"
- "addvl x26, x26, #16\n"
- ".inst 0xa0408355 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x26]\n"
+ "addvl x25, x25, #16\n"
+ ".inst 0xa0408335 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25]\n"
".inst 0xc153b6b0 // udot za.s[x9, 0], { z20.b-z23.b }, z3.b[1]\n"
- "addvl x26, x26, #16\n"
- ".inst 0xa0408355 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x26]\n"
+ "addvl x25, x25, #16\n"
+ ".inst 0xa0408335 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25]\n"
".inst 0xc153bab0 // udot za.s[x9, 0], { z20.b-z23.b }, z3.b[2]\n"
- "addvl x26, x26, #16\n"
- ".inst 0xa0408355 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x26]\n"
+ "addvl x25, x25, #16\n"
+ ".inst 0xa0408335 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25]\n"
".inst 0xc153beb0 // udot za.s[x9, 0], { z20.b-z23.b }, z3.b[3]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
"tbnz %x[flags], #31, 8f\n"
"udot z26.s, z3.b, z24.b\n"
"8:" // Width 1: Multiply loop: unique 1: skip row sum
- "sub x21, x21, #0x10\n"
- "cmp x21, #0x10\n"
+ "sub x20, x20, #0x10\n"
+ "cmp x20, #0x10\n"
"bgt 7b\n"
"9:" // Width 1: Multiply loop: Single iteration only
- "whilelt p0.b, XZR, x21\n"
- "ld1rqb { z3.b }, p0/Z, [x23]\n"
- "subs x21, x21, #0x4\n"
- ".inst 0xa0408351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26]\n"
- "add x23, x23, #0x10\n"
+ "whilelt p0.b, XZR, x20\n"
+ "ld1rqb { z3.b }, p0/Z, [x22]\n"
+ "subs x20, x20, #0x4\n"
+ ".inst 0xa0408331 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x25]\n"
+ "add x22, x22, #0x10\n"
".inst 0xc153b230 // udot za.s[x9, 0], { z16.b-z19.b }, z3.b[0]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
"ble 10f\n"
- ".inst 0xa0408355 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x26]\n"
- "subs x21, x21, #0x4\n"
+ ".inst 0xa0408335 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25]\n"
+ "subs x20, x20, #0x4\n"
".inst 0xc153b6b0 // udot za.s[x9, 0], { z20.b-z23.b }, z3.b[1]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
"ble 10f\n"
- ".inst 0xa0408355 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x26]\n"
- "subs x21, x21, #0x4\n"
+ ".inst 0xa0408335 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25]\n"
+ "subs x20, x20, #0x4\n"
".inst 0xc153bab0 // udot za.s[x9, 0], { z20.b-z23.b }, z3.b[2]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
"ble 10f\n"
- ".inst 0xa0408355 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x26]\n"
+ ".inst 0xa0408335 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25]\n"
".inst 0xc153beb0 // udot za.s[x9, 0], { z20.b-z23.b }, z3.b[3]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
"10:" // Width 1: Multiply loop: multiply skip
"tbnz %x[flags], #31, 11f\n"
"udot z26.s, z3.b, z24.b\n"
"11:" // Width 1: Multiply loop: unique 2: skip row sum
"tbnz %x[flags], #31, 12f\n"
- "add x21, %x[qp], %[b_offset]\n"
- "mov x20, #0x4\n"
- "ld1rw { z10.s }, p2/Z, [x21]\n"
+ "add x20, %x[qp], %[b_offset]\n"
+ "mov x19, #0x4\n"
+ "ld1rw { z10.s }, p2/Z, [x20]\n"
"neg z10.s, p2/M, z10.s\n"
- "whilelt p0.s, XZR, x20\n"
+ "whilelt p0.s, XZR, x19\n"
"uaddv d26, p0, z26.s\n"
"mov z26.s, z26.s[0]\n"
"mul z26.s, p2/M, z26.s, z10.s\n"
"orr %x[flags], %x[flags], #0x80000000\n"
"12:" // Width 1: skip row sum fixup
".inst 0xc0904b40 // addha za0.s, p2/M, p2/M, z26.s\n"
- "add x20, %x[qp], %[per_layer_mul]\n"
- "ld1rw { z5.s }, p2/Z, [x20]\n"
- "add x20, %x[qp], %[per_layer_right_shift]\n"
+ "add x19, %x[qp], %[per_layer_mul]\n"
+ "ld1rw { z5.s }, p2/Z, [x19]\n"
+ "add x19, %x[qp], %[per_layer_right_shift]\n"
".inst 0xc0904b41 // addha za1.s, p2/M, p2/M, z26.s\n"
- "ld1rw { z4.s }, p2/Z, [x20]\n"
- "add x20, %x[qp], %[c_offset]\n"
- "add x21, %x[qp], %[minval]\n"
+ "ld1rw { z4.s }, p2/Z, [x19]\n"
+ "add x19, %x[qp], %[c_offset]\n"
+ "add x20, %x[qp], %[minval]\n"
".inst 0xc0904b42 // addha za2.s, p2/M, p2/M, z26.s\n"
- "ld1rw { z6.s }, p2/Z, [x20]\n"
- "add x20, %x[qp], %[maxval]\n"
+ "ld1rw { z6.s }, p2/Z, [x19]\n"
+ "add x19, %x[qp], %[maxval]\n"
".inst 0xc0904b43 // addha za3.s, p2/M, p2/M, z26.s\n"
- "ld1rw { z21.s }, p2/Z, [x21]\n"
+ "ld1rw { z21.s }, p2/Z, [x20]\n"
".inst 0xc0062c1c // mova { z28.d-z31.d }, za.d[x9, #0]\n"
".inst 0xc1a5ac1c // sqdmulh { z28.s-z31.s }, { z28.s-z31.s }, z5.s\n"
".inst 0xc1a4aa3c // srshl { z28.s-z31.s }, { z28.s-z31.s }, z4.s\n"
- "ld1rw { z16.s }, p2/Z, [x20]\n"
+ "ld1rw { z16.s }, p2/Z, [x19]\n"
".inst 0xc1a6ab1c // add { z28.s-z31.s }, { z28.s-z31.s }, z6.s\n"
".inst 0xc1b0cebc // sclamp { z28.s-z31.s }, z21.s, z16.s\n"
"uzp1 z28.h, z28.h, z29.h\n"
"uzp1 z29.h, z30.h, z31.h\n"
"uzp1 z28.b, z28.b, z29.b\n"
- "st1b { z28.b }, p1, [x25]\n"
- "addvl x25, x25, #1\n"
+ "st1b { z28.b }, p1, [x24]\n"
+ "addvl x24, x24, #1\n"
"13:" // Width 1: Output done
"b 44f\n"
"14:" // Width 2
- "mov x23, %x[A_ptr]\n"
- "mov x22, %x[K]\n"
- "sub x20, %x[N], x28\n"
+ "mov x22, %x[A_ptr]\n"
"mov x21, %x[K]\n"
- ".inst 0xf8b64af8 // rprfm pldmany, x22, [x23]\n"
- "whilelt p1.b, XZR, x20\n"
- "cbz x24, 15f\n"
- ".inst 0xa040c304 // ld1w { z4.s-z7.s }, pn8.b/Z, [x24]\n"
+ "sub x19, %x[N], x27\n"
+ "mov x20, %x[K]\n"
+ ".inst 0xf8b54ad8 // rprfm pldmany, x21, [x22]\n"
+ "whilelt p1.b, XZR, x19\n"
+ "cbz x23, 15f\n"
+ ".inst 0xa040c2e4 // ld1w { z4.s-z7.s }, pn8.b/Z, [x23]\n"
".inst 0xc0042c80 // mova za.d[x9, #0], { z4.d-z7.d }\n"
- ".inst 0xa041c314 // ld1w { z20.s-z23.s }, pn8.b/Z, [x24, #0x4, MUL VL]\n"
+ ".inst 0xa041c2f4 // ld1w { z20.s-z23.s }, pn8.b/Z, [x23, #0x4, MUL VL]\n"
".inst 0xc0042e81 // mova za.d[x9, #1], { z20.d-z23.d }\n"
"b 16f\n"
"15:" // Width 2: no bias
".inst 0xc00800ff // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
"16:" // Width 2: setup done
- "cmp x21, #0x10\n"
+ "cmp x20, #0x10\n"
"ble 19f\n"
"17:" // Width 2: Multiply loop: Main loop head
- "whilelt p0.b, XZR, x21\n"
- "ld1rqb { z3.b }, p0/Z, [x23]\n"
- "add x23, x23, #0x10\n"
- ".inst 0xa0408351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26]\n"
+ "whilelt p0.b, XZR, x20\n"
+ "ld1rqb { z3.b }, p0/Z, [x22]\n"
+ "add x22, x22, #0x10\n"
+ ".inst 0xa0408331 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x25]\n"
".inst 0xc153b230 // udot za.s[x9, 0], { z16.b-z19.b }, z3.b[0]\n"
- ".inst 0xa0418345 // ldnt1b { z4.b-z7.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa0418325 // ldnt1b { z4.b-z7.b }, pn8.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc153b0b1 // udot za.s[x9, 1], { z4.b-z7.b }, z3.b[0]\n"
- "addvl x26, x26, #16\n"
- ".inst 0xa0408355 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x26]\n"
+ "addvl x25, x25, #16\n"
+ ".inst 0xa0408335 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25]\n"
".inst 0xc153b6b0 // udot za.s[x9, 0], { z20.b-z23.b }, z3.b[1]\n"
- ".inst 0xa0418349 // ldnt1b { z8.b-z11.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa0418329 // ldnt1b { z8.b-z11.b }, pn8.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc153b531 // udot za.s[x9, 1], { z8.b-z11.b }, z3.b[1]\n"
- "addvl x26, x26, #16\n"
- ".inst 0xa0408355 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x26]\n"
+ "addvl x25, x25, #16\n"
+ ".inst 0xa0408335 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25]\n"
".inst 0xc153bab0 // udot za.s[x9, 0], { z20.b-z23.b }, z3.b[2]\n"
- ".inst 0xa0418345 // ldnt1b { z4.b-z7.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa0418325 // ldnt1b { z4.b-z7.b }, pn8.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc153b8b1 // udot za.s[x9, 1], { z4.b-z7.b }, z3.b[2]\n"
- "addvl x26, x26, #16\n"
- ".inst 0xa0408355 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x26]\n"
+ "addvl x25, x25, #16\n"
+ ".inst 0xa0408335 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25]\n"
".inst 0xc153beb0 // udot za.s[x9, 0], { z20.b-z23.b }, z3.b[3]\n"
- ".inst 0xa0418355 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa0418335 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc153beb1 // udot za.s[x9, 1], { z20.b-z23.b }, z3.b[3]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
"tbnz %x[flags], #31, 18f\n"
"udot z26.s, z3.b, z24.b\n"
"18:" // Width 2: Multiply loop: unique 3: skip row sum
- "sub x21, x21, #0x10\n"
- "cmp x21, #0x10\n"
+ "sub x20, x20, #0x10\n"
+ "cmp x20, #0x10\n"
"bgt 17b\n"
"19:" // Width 2: Multiply loop: Single iteration only
- "whilelt p0.b, XZR, x21\n"
- "ld1rqb { z3.b }, p0/Z, [x23]\n"
- "subs x21, x21, #0x4\n"
- ".inst 0xa0408351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26]\n"
- "add x23, x23, #0x10\n"
+ "whilelt p0.b, XZR, x20\n"
+ "ld1rqb { z3.b }, p0/Z, [x22]\n"
+ "subs x20, x20, #0x4\n"
+ ".inst 0xa0408331 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x25]\n"
+ "add x22, x22, #0x10\n"
".inst 0xc153b230 // udot za.s[x9, 0], { z16.b-z19.b }, z3.b[0]\n"
- ".inst 0xa0418345 // ldnt1b { z4.b-z7.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa0418325 // ldnt1b { z4.b-z7.b }, pn8.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc153b0b1 // udot za.s[x9, 1], { z4.b-z7.b }, z3.b[0]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
"ble 20f\n"
- ".inst 0xa0408355 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x26]\n"
- "subs x21, x21, #0x4\n"
+ ".inst 0xa0408335 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25]\n"
+ "subs x20, x20, #0x4\n"
".inst 0xc153b6b0 // udot za.s[x9, 0], { z20.b-z23.b }, z3.b[1]\n"
- ".inst 0xa0418349 // ldnt1b { z8.b-z11.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa0418329 // ldnt1b { z8.b-z11.b }, pn8.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc153b531 // udot za.s[x9, 1], { z8.b-z11.b }, z3.b[1]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
"ble 20f\n"
- ".inst 0xa0408355 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x26]\n"
- "subs x21, x21, #0x4\n"
+ ".inst 0xa0408335 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25]\n"
+ "subs x20, x20, #0x4\n"
".inst 0xc153bab0 // udot za.s[x9, 0], { z20.b-z23.b }, z3.b[2]\n"
- ".inst 0xa0418345 // ldnt1b { z4.b-z7.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa0418325 // ldnt1b { z4.b-z7.b }, pn8.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc153b8b1 // udot za.s[x9, 1], { z4.b-z7.b }, z3.b[2]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
"ble 20f\n"
- ".inst 0xa0408355 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x26]\n"
+ ".inst 0xa0408335 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25]\n"
".inst 0xc153beb0 // udot za.s[x9, 0], { z20.b-z23.b }, z3.b[3]\n"
- ".inst 0xa0418355 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa0418335 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc153beb1 // udot za.s[x9, 1], { z20.b-z23.b }, z3.b[3]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
"20:" // Width 2: Multiply loop: multiply skip
"tbnz %x[flags], #31, 21f\n"
"udot z26.s, z3.b, z24.b\n"
"21:" // Width 2: Multiply loop: unique 4: skip row sum
"tbnz %x[flags], #31, 22f\n"
- "add x21, %x[qp], %[b_offset]\n"
- "mov x20, #0x4\n"
- "ld1rw { z10.s }, p2/Z, [x21]\n"
+ "add x20, %x[qp], %[b_offset]\n"
+ "mov x19, #0x4\n"
+ "ld1rw { z10.s }, p2/Z, [x20]\n"
"neg z10.s, p2/M, z10.s\n"
- "whilelt p0.s, XZR, x20\n"
+ "whilelt p0.s, XZR, x19\n"
"uaddv d26, p0, z26.s\n"
"mov z26.s, z26.s[0]\n"
"mul z26.s, p2/M, z26.s, z10.s\n"
"orr %x[flags], %x[flags], #0x80000000\n"
"22:" // Width 2: skip row sum fixup
".inst 0xc0904b40 // addha za0.s, p2/M, p2/M, z26.s\n"
- "add x20, %x[qp], %[per_layer_mul]\n"
- "ld1rw { z5.s }, p2/Z, [x20]\n"
- "add x20, %x[qp], %[per_layer_right_shift]\n"
+ "add x19, %x[qp], %[per_layer_mul]\n"
+ "ld1rw { z5.s }, p2/Z, [x19]\n"
+ "add x19, %x[qp], %[per_layer_right_shift]\n"
".inst 0xc0904b41 // addha za1.s, p2/M, p2/M, z26.s\n"
- "ld1rw { z4.s }, p2/Z, [x20]\n"
- "add x20, %x[qp], %[c_offset]\n"
- "add x21, %x[qp], %[minval]\n"
+ "ld1rw { z4.s }, p2/Z, [x19]\n"
+ "add x19, %x[qp], %[c_offset]\n"
+ "add x20, %x[qp], %[minval]\n"
".inst 0xc0904b42 // addha za2.s, p2/M, p2/M, z26.s\n"
- "ld1rw { z6.s }, p2/Z, [x20]\n"
- "add x20, %x[qp], %[maxval]\n"
+ "ld1rw { z6.s }, p2/Z, [x19]\n"
+ "add x19, %x[qp], %[maxval]\n"
".inst 0xc0904b43 // addha za3.s, p2/M, p2/M, z26.s\n"
- "ld1rw { z21.s }, p2/Z, [x21]\n"
+ "ld1rw { z21.s }, p2/Z, [x20]\n"
".inst 0xc0062c1c // mova { z28.d-z31.d }, za.d[x9, #0]\n"
".inst 0xc1a5ac1c // sqdmulh { z28.s-z31.s }, { z28.s-z31.s }, z5.s\n"
".inst 0xc0062c2c // mova { z12.d-z15.d }, za.d[x9, #1]\n"
".inst 0xc1a5ac0c // sqdmulh { z12.s-z15.s }, { z12.s-z15.s }, z5.s\n"
".inst 0xc1a4aa3c // srshl { z28.s-z31.s }, { z28.s-z31.s }, z4.s\n"
- "ld1rw { z16.s }, p2/Z, [x20]\n"
+ "ld1rw { z16.s }, p2/Z, [x19]\n"
".inst 0xc1a4aa2c // srshl { z12.s-z15.s }, { z12.s-z15.s }, z4.s\n"
".inst 0xc1a6ab1c // add { z28.s-z31.s }, { z28.s-z31.s }, z6.s\n"
".inst 0xc1a6ab0c // add { z12.s-z15.s }, { z12.s-z15.s }, z6.s\n"
@@ -315,137 +315,137 @@ void sme2_gemv_u8qa_dot_16VL (
"uzp1 z12.h, z12.h, z13.h\n"
"uzp1 z13.h, z14.h, z15.h\n"
"uzp1 z28.b, z28.b, z29.b\n"
- "st1b { z28.b }, p2, [x25]\n"
+ "st1b { z28.b }, p2, [x24]\n"
"uzp1 z12.b, z12.b, z13.b\n"
- "st1b { z12.b }, p1, [x25, #1, MUL VL]\n"
- "addvl x25, x25, #2\n"
+ "st1b { z12.b }, p1, [x24, #1, MUL VL]\n"
+ "addvl x24, x24, #2\n"
"23:" // Width 2: Output done
"b 44f\n"
"24:" // Width 3
- "mov x20, #0x2\n"
- "mov x23, %x[A_ptr]\n"
- "mov x22, %x[K]\n"
- "msub x20, x28, x20, %x[N]\n"
+ "mov x19, #0x2\n"
+ "mov x22, %x[A_ptr]\n"
"mov x21, %x[K]\n"
- ".inst 0xf8b64af8 // rprfm pldmany, x22, [x23]\n"
- "whilelt p1.b, XZR, x20\n"
- "cbz x24, 25f\n"
- ".inst 0xa040c304 // ld1w { z4.s-z7.s }, pn8.b/Z, [x24]\n"
+ "msub x19, x27, x19, %x[N]\n"
+ "mov x20, %x[K]\n"
+ ".inst 0xf8b54ad8 // rprfm pldmany, x21, [x22]\n"
+ "whilelt p1.b, XZR, x19\n"
+ "cbz x23, 25f\n"
+ ".inst 0xa040c2e4 // ld1w { z4.s-z7.s }, pn8.b/Z, [x23]\n"
".inst 0xc0042c80 // mova za.d[x9, #0], { z4.d-z7.d }\n"
- ".inst 0xa041c314 // ld1w { z20.s-z23.s }, pn8.b/Z, [x24, #0x4, MUL VL]\n"
+ ".inst 0xa041c2f4 // ld1w { z20.s-z23.s }, pn8.b/Z, [x23, #0x4, MUL VL]\n"
".inst 0xc0042e81 // mova za.d[x9, #1], { z20.d-z23.d }\n"
- ".inst 0xa042c314 // ld1w { z20.s-z23.s }, pn8.b/Z, [x24, #0x8, MUL VL]\n"
+ ".inst 0xa042c2f4 // ld1w { z20.s-z23.s }, pn8.b/Z, [x23, #0x8, MUL VL]\n"
".inst 0xc0042e82 // mova za.d[x9, #2], { z20.d-z23.d }\n"
"b 26f\n"
"25:" // Width 3: no bias
".inst 0xc00800ff // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
"26:" // Width 3: setup done
- "cmp x21, #0x10\n"
+ "cmp x20, #0x10\n"
"ble 29f\n"
"27:" // Width 3: Multiply loop: Main loop head
- "whilelt p0.b, XZR, x21\n"
- "ld1rqb { z3.b }, p0/Z, [x23]\n"
- "add x23, x23, #0x10\n"
- ".inst 0xa0408351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26]\n"
+ "whilelt p0.b, XZR, x20\n"
+ "ld1rqb { z3.b }, p0/Z, [x22]\n"
+ "add x22, x22, #0x10\n"
+ ".inst 0xa0408331 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x25]\n"
".inst 0xc153b230 // udot za.s[x9, 0], { z16.b-z19.b }, z3.b[0]\n"
- ".inst 0xa0418345 // ldnt1b { z4.b-z7.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa0418325 // ldnt1b { z4.b-z7.b }, pn8.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc153b0b1 // udot za.s[x9, 1], { z4.b-z7.b }, z3.b[0]\n"
- ".inst 0xa042834d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x26, #0x8, MUL VL]\n"
+ ".inst 0xa042832d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x25, #0x8, MUL VL]\n"
".inst 0xc153b1b2 // udot za.s[x9, 2], { z12.b-z15.b }, z3.b[0]\n"
- "addvl x26, x26, #16\n"
- ".inst 0xa0408355 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x26]\n"
+ "addvl x25, x25, #16\n"
+ ".inst 0xa0408335 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25]\n"
".inst 0xc153b6b0 // udot za.s[x9, 0], { z20.b-z23.b }, z3.b[1]\n"
- ".inst 0xa0418349 // ldnt1b { z8.b-z11.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa0418329 // ldnt1b { z8.b-z11.b }, pn8.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc153b531 // udot za.s[x9, 1], { z8.b-z11.b }, z3.b[1]\n"
- ".inst 0xa0428351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26, #0x8, MUL VL]\n"
+ ".inst 0xa0428331 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x25, #0x8, MUL VL]\n"
".inst 0xc153b632 // udot za.s[x9, 2], { z16.b-z19.b }, z3.b[1]\n"
- "addvl x26, x26, #16\n"
- ".inst 0xa0408355 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x26]\n"
+ "addvl x25, x25, #16\n"
+ ".inst 0xa0408335 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25]\n"
".inst 0xc153bab0 // udot za.s[x9, 0], { z20.b-z23.b }, z3.b[2]\n"
- ".inst 0xa0418345 // ldnt1b { z4.b-z7.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa0418325 // ldnt1b { z4.b-z7.b }, pn8.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc153b8b1 // udot za.s[x9, 1], { z4.b-z7.b }, z3.b[2]\n"
- ".inst 0xa042834d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x26, #0x8, MUL VL]\n"
+ ".inst 0xa042832d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x25, #0x8, MUL VL]\n"
".inst 0xc153b9b2 // udot za.s[x9, 2], { z12.b-z15.b }, z3.b[2]\n"
- "addvl x26, x26, #16\n"
- ".inst 0xa0408355 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x26]\n"
+ "addvl x25, x25, #16\n"
+ ".inst 0xa0408335 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25]\n"
".inst 0xc153beb0 // udot za.s[x9, 0], { z20.b-z23.b }, z3.b[3]\n"
- ".inst 0xa0418355 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa0418335 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc153beb1 // udot za.s[x9, 1], { z20.b-z23.b }, z3.b[3]\n"
- ".inst 0xa0428351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26, #0x8, MUL VL]\n"
+ ".inst 0xa0428331 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x25, #0x8, MUL VL]\n"
".inst 0xc153be32 // udot za.s[x9, 2], { z16.b-z19.b }, z3.b[3]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
"tbnz %x[flags], #31, 28f\n"
"udot z26.s, z3.b, z24.b\n"
"28:" // Width 3: Multiply loop: unique 5: skip row sum
- "sub x21, x21, #0x10\n"
- "cmp x21, #0x10\n"
+ "sub x20, x20, #0x10\n"
+ "cmp x20, #0x10\n"
"bgt 27b\n"
"29:" // Width 3: Multiply loop: Single iteration only
- "whilelt p0.b, XZR, x21\n"
- "ld1rqb { z3.b }, p0/Z, [x23]\n"
- "subs x21, x21, #0x4\n"
- ".inst 0xa0408351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26]\n"
- "add x23, x23, #0x10\n"
+ "whilelt p0.b, XZR, x20\n"
+ "ld1rqb { z3.b }, p0/Z, [x22]\n"
+ "subs x20, x20, #0x4\n"
+ ".inst 0xa0408331 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x25]\n"
+ "add x22, x22, #0x10\n"
".inst 0xc153b230 // udot za.s[x9, 0], { z16.b-z19.b }, z3.b[0]\n"
- ".inst 0xa0418345 // ldnt1b { z4.b-z7.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa0418325 // ldnt1b { z4.b-z7.b }, pn8.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc153b0b1 // udot za.s[x9, 1], { z4.b-z7.b }, z3.b[0]\n"
- ".inst 0xa042834d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x26, #0x8, MUL VL]\n"
+ ".inst 0xa042832d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x25, #0x8, MUL VL]\n"
".inst 0xc153b1b2 // udot za.s[x9, 2], { z12.b-z15.b }, z3.b[0]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
"ble 30f\n"
- ".inst 0xa0408355 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x26]\n"
- "subs x21, x21, #0x4\n"
+ ".inst 0xa0408335 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25]\n"
+ "subs x20, x20, #0x4\n"
".inst 0xc153b6b0 // udot za.s[x9, 0], { z20.b-z23.b }, z3.b[1]\n"
- ".inst 0xa0418349 // ldnt1b { z8.b-z11.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa0418329 // ldnt1b { z8.b-z11.b }, pn8.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc153b531 // udot za.s[x9, 1], { z8.b-z11.b }, z3.b[1]\n"
- ".inst 0xa0428351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26, #0x8, MUL VL]\n"
+ ".inst 0xa0428331 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x25, #0x8, MUL VL]\n"
".inst 0xc153b632 // udot za.s[x9, 2], { z16.b-z19.b }, z3.b[1]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
"ble 30f\n"
- ".inst 0xa0408355 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x26]\n"
- "subs x21, x21, #0x4\n"
+ ".inst 0xa0408335 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25]\n"
+ "subs x20, x20, #0x4\n"
".inst 0xc153bab0 // udot za.s[x9, 0], { z20.b-z23.b }, z3.b[2]\n"
- ".inst 0xa0418345 // ldnt1b { z4.b-z7.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa0418325 // ldnt1b { z4.b-z7.b }, pn8.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc153b8b1 // udot za.s[x9, 1], { z4.b-z7.b }, z3.b[2]\n"
- ".inst 0xa042834d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x26, #0x8, MUL VL]\n"
+ ".inst 0xa042832d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x25, #0x8, MUL VL]\n"
".inst 0xc153b9b2 // udot za.s[x9, 2], { z12.b-z15.b }, z3.b[2]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
"ble 30f\n"
- ".inst 0xa0408355 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x26]\n"
+ ".inst 0xa0408335 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25]\n"
".inst 0xc153beb0 // udot za.s[x9, 0], { z20.b-z23.b }, z3.b[3]\n"
- ".inst 0xa0418355 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa0418335 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc153beb1 // udot za.s[x9, 1], { z20.b-z23.b }, z3.b[3]\n"
- ".inst 0xa0428351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26, #0x8, MUL VL]\n"
+ ".inst 0xa0428331 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x25, #0x8, MUL VL]\n"
".inst 0xc153be32 // udot za.s[x9, 2], { z16.b-z19.b }, z3.b[3]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
"30:" // Width 3: Multiply loop: multiply skip
"tbnz %x[flags], #31, 31f\n"
"udot z26.s, z3.b, z24.b\n"
"31:" // Width 3: Multiply loop: unique 6: skip row sum
"tbnz %x[flags], #31, 32f\n"
- "add x21, %x[qp], %[b_offset]\n"
- "mov x20, #0x4\n"
- "ld1rw { z10.s }, p2/Z, [x21]\n"
+ "add x20, %x[qp], %[b_offset]\n"
+ "mov x19, #0x4\n"
+ "ld1rw { z10.s }, p2/Z, [x20]\n"
"neg z10.s, p2/M, z10.s\n"
- "whilelt p0.s, XZR, x20\n"
+ "whilelt p0.s, XZR, x19\n"
"uaddv d26, p0, z26.s\n"
"mov z26.s, z26.s[0]\n"
"mul z26.s, p2/M, z26.s, z10.s\n"
"orr %x[flags], %x[flags], #0x80000000\n"
"32:" // Width 3: skip row sum fixup
".inst 0xc0904b40 // addha za0.s, p2/M, p2/M, z26.s\n"
- "add x20, %x[qp], %[per_layer_mul]\n"
- "ld1rw { z5.s }, p2/Z, [x20]\n"
- "add x20, %x[qp], %[per_layer_right_shift]\n"
+ "add x19, %x[qp], %[per_layer_mul]\n"
+ "ld1rw { z5.s }, p2/Z, [x19]\n"
+ "add x19, %x[qp], %[per_layer_right_shift]\n"
".inst 0xc0904b41 // addha za1.s, p2/M, p2/M, z26.s\n"
- "ld1rw { z4.s }, p2/Z, [x20]\n"
- "add x20, %x[qp], %[c_offset]\n"
- "add x21, %x[qp], %[minval]\n"
+ "ld1rw { z4.s }, p2/Z, [x19]\n"
+ "add x19, %x[qp], %[c_offset]\n"
+ "add x20, %x[qp], %[minval]\n"
".inst 0xc0904b42 // addha za2.s, p2/M, p2/M, z26.s\n"
- "ld1rw { z6.s }, p2/Z, [x20]\n"
- "add x20, %x[qp], %[maxval]\n"
+ "ld1rw { z6.s }, p2/Z, [x19]\n"
+ "add x19, %x[qp], %[maxval]\n"
".inst 0xc0904b43 // addha za3.s, p2/M, p2/M, z26.s\n"
- "ld1rw { z21.s }, p2/Z, [x21]\n"
+ "ld1rw { z21.s }, p2/Z, [x20]\n"
".inst 0xc0062c1c // mova { z28.d-z31.d }, za.d[x9, #0]\n"
".inst 0xc1a5ac1c // sqdmulh { z28.s-z31.s }, { z28.s-z31.s }, z5.s\n"
".inst 0xc0062c2c // mova { z12.d-z15.d }, za.d[x9, #1]\n"
@@ -453,7 +453,7 @@ void sme2_gemv_u8qa_dot_16VL (
".inst 0xc0062c40 // mova { z0.d-z3.d }, za.d[x9, #2]\n"
".inst 0xc1a5ac00 // sqdmulh { z0.s-z3.s }, { z0.s-z3.s }, z5.s\n"
".inst 0xc1a4aa3c // srshl { z28.s-z31.s }, { z28.s-z31.s }, z4.s\n"
- "ld1rw { z16.s }, p2/Z, [x20]\n"
+ "ld1rw { z16.s }, p2/Z, [x19]\n"
".inst 0xc1a4aa2c // srshl { z12.s-z15.s }, { z12.s-z15.s }, z4.s\n"
".inst 0xc1a4aa20 // srshl { z0.s-z3.s }, { z0.s-z3.s }, z4.s\n"
".inst 0xc1a6ab1c // add { z28.s-z31.s }, { z28.s-z31.s }, z6.s\n"
@@ -469,158 +469,158 @@ void sme2_gemv_u8qa_dot_16VL (
"uzp1 z0.h, z0.h, z1.h\n"
"uzp1 z1.h, z2.h, z3.h\n"
"uzp1 z28.b, z28.b, z29.b\n"
- "st1b { z28.b }, p2, [x25]\n"
+ "st1b { z28.b }, p2, [x24]\n"
"uzp1 z12.b, z12.b, z13.b\n"
- "st1b { z12.b }, p2, [x25, #1, MUL VL]\n"
+ "st1b { z12.b }, p2, [x24, #1, MUL VL]\n"
"uzp1 z0.b, z0.b, z1.b\n"
- "st1b { z0.b }, p1, [x25, #2, MUL VL]\n"
- "addvl x25, x25, #3\n"
+ "st1b { z0.b }, p1, [x24, #2, MUL VL]\n"
+ "addvl x24, x24, #3\n"
"33:" // Width 3: Output done
"b 44f\n"
"34:" // Width 4
- "mov x20, #0x3\n"
- "mov x23, %x[A_ptr]\n"
- "mov x22, %x[K]\n"
- "msub x20, x28, x20, %x[N]\n"
+ "mov x19, #0x3\n"
+ "mov x22, %x[A_ptr]\n"
"mov x21, %x[K]\n"
- ".inst 0xf8b64af8 // rprfm pldmany, x22, [x23]\n"
- "whilelt p1.b, XZR, x20\n"
- "cbz x24, 35f\n"
- ".inst 0xa040c304 // ld1w { z4.s-z7.s }, pn8.b/Z, [x24]\n"
+ "msub x19, x27, x19, %x[N]\n"
+ "mov x20, %x[K]\n"
+ ".inst 0xf8b54ad8 // rprfm pldmany, x21, [x22]\n"
+ "whilelt p1.b, XZR, x19\n"
+ "cbz x23, 35f\n"
+ ".inst 0xa040c2e4 // ld1w { z4.s-z7.s }, pn8.b/Z, [x23]\n"
".inst 0xc0042c80 // mova za.d[x9, #0], { z4.d-z7.d }\n"
- ".inst 0xa041c314 // ld1w { z20.s-z23.s }, pn8.b/Z, [x24, #0x4, MUL VL]\n"
+ ".inst 0xa041c2f4 // ld1w { z20.s-z23.s }, pn8.b/Z, [x23, #0x4, MUL VL]\n"
".inst 0xc0042e81 // mova za.d[x9, #1], { z20.d-z23.d }\n"
- ".inst 0xa042c314 // ld1w { z20.s-z23.s }, pn8.b/Z, [x24, #0x8, MUL VL]\n"
+ ".inst 0xa042c2f4 // ld1w { z20.s-z23.s }, pn8.b/Z, [x23, #0x8, MUL VL]\n"
".inst 0xc0042e82 // mova za.d[x9, #2], { z20.d-z23.d }\n"
- ".inst 0xa043c310 // ld1w { z16.s-z19.s }, pn8.b/Z, [x24, #0xc, MUL VL]\n"
+ ".inst 0xa043c2f0 // ld1w { z16.s-z19.s }, pn8.b/Z, [x23, #0xc, MUL VL]\n"
".inst 0xc0042e03 // mova za.d[x9, #3], { z16.d-z19.d }\n"
- "addvl x24, x24, #16\n"
+ "addvl x23, x23, #16\n"
"b 36f\n"
"35:" // Width 4: no bias
".inst 0xc00800ff // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
"36:" // Width 4: setup done
- "cmp x21, #0x10\n"
+ "cmp x20, #0x10\n"
"ble 39f\n"
"37:" // Width 4: Multiply loop: Main loop head
- "whilelt p0.b, XZR, x21\n"
- "ld1rqb { z3.b }, p0/Z, [x23]\n"
- "add x23, x23, #0x10\n"
- ".inst 0xa0408351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26]\n"
+ "whilelt p0.b, XZR, x20\n"
+ "ld1rqb { z3.b }, p0/Z, [x22]\n"
+ "add x22, x22, #0x10\n"
+ ".inst 0xa0408331 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x25]\n"
".inst 0xc153b230 // udot za.s[x9, 0], { z16.b-z19.b }, z3.b[0]\n"
- ".inst 0xa0418345 // ldnt1b { z4.b-z7.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa0418325 // ldnt1b { z4.b-z7.b }, pn8.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc153b0b1 // udot za.s[x9, 1], { z4.b-z7.b }, z3.b[0]\n"
- ".inst 0xa042834d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x26, #0x8, MUL VL]\n"
+ ".inst 0xa042832d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x25, #0x8, MUL VL]\n"
".inst 0xc153b1b2 // udot za.s[x9, 2], { z12.b-z15.b }, z3.b[0]\n"
- ".inst 0xa043834d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x26, #0xc, MUL VL]\n"
+ ".inst 0xa043832d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x25, #0xc, MUL VL]\n"
".inst 0xc153b1b3 // udot za.s[x9, 3], { z12.b-z15.b }, z3.b[0]\n"
- "addvl x26, x26, #16\n"
- ".inst 0xa0408355 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x26]\n"
+ "addvl x25, x25, #16\n"
+ ".inst 0xa0408335 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25]\n"
".inst 0xc153b6b0 // udot za.s[x9, 0], { z20.b-z23.b }, z3.b[1]\n"
- ".inst 0xa0418349 // ldnt1b { z8.b-z11.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa0418329 // ldnt1b { z8.b-z11.b }, pn8.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc153b531 // udot za.s[x9, 1], { z8.b-z11.b }, z3.b[1]\n"
- ".inst 0xa0428351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26, #0x8, MUL VL]\n"
+ ".inst 0xa0428331 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x25, #0x8, MUL VL]\n"
".inst 0xc153b632 // udot za.s[x9, 2], { z16.b-z19.b }, z3.b[1]\n"
- ".inst 0xa043834d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x26, #0xc, MUL VL]\n"
+ ".inst 0xa043832d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x25, #0xc, MUL VL]\n"
".inst 0xc153b5b3 // udot za.s[x9, 3], { z12.b-z15.b }, z3.b[1]\n"
- "addvl x26, x26, #16\n"
- ".inst 0xa0408355 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x26]\n"
+ "addvl x25, x25, #16\n"
+ ".inst 0xa0408335 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25]\n"
".inst 0xc153bab0 // udot za.s[x9, 0], { z20.b-z23.b }, z3.b[2]\n"
- ".inst 0xa0418345 // ldnt1b { z4.b-z7.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa0418325 // ldnt1b { z4.b-z7.b }, pn8.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc153b8b1 // udot za.s[x9, 1], { z4.b-z7.b }, z3.b[2]\n"
- ".inst 0xa042834d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x26, #0x8, MUL VL]\n"
+ ".inst 0xa042832d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x25, #0x8, MUL VL]\n"
".inst 0xc153b9b2 // udot za.s[x9, 2], { z12.b-z15.b }, z3.b[2]\n"
- ".inst 0xa043835d // ldnt1b { z28.b-z31.b }, pn8.b/Z, [x26, #0xc, MUL VL]\n"
+ ".inst 0xa043833d // ldnt1b { z28.b-z31.b }, pn8.b/Z, [x25, #0xc, MUL VL]\n"
".inst 0xc153bbb3 // udot za.s[x9, 3], { z28.b-z31.b }, z3.b[2]\n"
- "addvl x26, x26, #16\n"
- ".inst 0xa0408355 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x26]\n"
+ "addvl x25, x25, #16\n"
+ ".inst 0xa0408335 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25]\n"
".inst 0xc153beb0 // udot za.s[x9, 0], { z20.b-z23.b }, z3.b[3]\n"
- ".inst 0xa0418355 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa0418335 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc153beb1 // udot za.s[x9, 1], { z20.b-z23.b }, z3.b[3]\n"
- ".inst 0xa0428351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26, #0x8, MUL VL]\n"
+ ".inst 0xa0428331 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x25, #0x8, MUL VL]\n"
".inst 0xc153be32 // udot za.s[x9, 2], { z16.b-z19.b }, z3.b[3]\n"
- ".inst 0xa0438351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26, #0xc, MUL VL]\n"
+ ".inst 0xa0438331 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x25, #0xc, MUL VL]\n"
".inst 0xc153be33 // udot za.s[x9, 3], { z16.b-z19.b }, z3.b[3]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
"tbnz %x[flags], #31, 38f\n"
"udot z26.s, z3.b, z24.b\n"
"38:" // Width 4: Multiply loop: unique 7: skip row sum
- "sub x21, x21, #0x10\n"
- "cmp x21, #0x10\n"
+ "sub x20, x20, #0x10\n"
+ "cmp x20, #0x10\n"
"bgt 37b\n"
"39:" // Width 4: Multiply loop: Single iteration only
- "whilelt p0.b, XZR, x21\n"
- "ld1rqb { z3.b }, p0/Z, [x23]\n"
- "subs x21, x21, #0x4\n"
- ".inst 0xa0408351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26]\n"
- "add x23, x23, #0x10\n"
+ "whilelt p0.b, XZR, x20\n"
+ "ld1rqb { z3.b }, p0/Z, [x22]\n"
+ "subs x20, x20, #0x4\n"
+ ".inst 0xa0408331 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x25]\n"
+ "add x22, x22, #0x10\n"
".inst 0xc153b230 // udot za.s[x9, 0], { z16.b-z19.b }, z3.b[0]\n"
- ".inst 0xa0418345 // ldnt1b { z4.b-z7.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa0418325 // ldnt1b { z4.b-z7.b }, pn8.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc153b0b1 // udot za.s[x9, 1], { z4.b-z7.b }, z3.b[0]\n"
- ".inst 0xa042834d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x26, #0x8, MUL VL]\n"
+ ".inst 0xa042832d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x25, #0x8, MUL VL]\n"
".inst 0xc153b1b2 // udot za.s[x9, 2], { z12.b-z15.b }, z3.b[0]\n"
- ".inst 0xa043834d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x26, #0xc, MUL VL]\n"
+ ".inst 0xa043832d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x25, #0xc, MUL VL]\n"
".inst 0xc153b1b3 // udot za.s[x9, 3], { z12.b-z15.b }, z3.b[0]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
"ble 40f\n"
- ".inst 0xa0408355 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x26]\n"
- "subs x21, x21, #0x4\n"
+ ".inst 0xa0408335 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25]\n"
+ "subs x20, x20, #0x4\n"
".inst 0xc153b6b0 // udot za.s[x9, 0], { z20.b-z23.b }, z3.b[1]\n"
- ".inst 0xa0418349 // ldnt1b { z8.b-z11.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa0418329 // ldnt1b { z8.b-z11.b }, pn8.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc153b531 // udot za.s[x9, 1], { z8.b-z11.b }, z3.b[1]\n"
- ".inst 0xa0428351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26, #0x8, MUL VL]\n"
+ ".inst 0xa0428331 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x25, #0x8, MUL VL]\n"
".inst 0xc153b632 // udot za.s[x9, 2], { z16.b-z19.b }, z3.b[1]\n"
- ".inst 0xa043834d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x26, #0xc, MUL VL]\n"
+ ".inst 0xa043832d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x25, #0xc, MUL VL]\n"
".inst 0xc153b5b3 // udot za.s[x9, 3], { z12.b-z15.b }, z3.b[1]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
"ble 40f\n"
- ".inst 0xa0408355 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x26]\n"
- "subs x21, x21, #0x4\n"
+ ".inst 0xa0408335 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25]\n"
+ "subs x20, x20, #0x4\n"
".inst 0xc153bab0 // udot za.s[x9, 0], { z20.b-z23.b }, z3.b[2]\n"
- ".inst 0xa0418345 // ldnt1b { z4.b-z7.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa0418325 // ldnt1b { z4.b-z7.b }, pn8.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc153b8b1 // udot za.s[x9, 1], { z4.b-z7.b }, z3.b[2]\n"
- ".inst 0xa042834d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x26, #0x8, MUL VL]\n"
+ ".inst 0xa042832d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x25, #0x8, MUL VL]\n"
".inst 0xc153b9b2 // udot za.s[x9, 2], { z12.b-z15.b }, z3.b[2]\n"
- ".inst 0xa043835d // ldnt1b { z28.b-z31.b }, pn8.b/Z, [x26, #0xc, MUL VL]\n"
+ ".inst 0xa043833d // ldnt1b { z28.b-z31.b }, pn8.b/Z, [x25, #0xc, MUL VL]\n"
".inst 0xc153bbb3 // udot za.s[x9, 3], { z28.b-z31.b }, z3.b[2]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
"ble 40f\n"
- ".inst 0xa0408355 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x26]\n"
+ ".inst 0xa0408335 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25]\n"
".inst 0xc153beb0 // udot za.s[x9, 0], { z20.b-z23.b }, z3.b[3]\n"
- ".inst 0xa0418355 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa0418335 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x25, #0x4, MUL VL]\n"
".inst 0xc153beb1 // udot za.s[x9, 1], { z20.b-z23.b }, z3.b[3]\n"
- ".inst 0xa0428351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26, #0x8, MUL VL]\n"
+ ".inst 0xa0428331 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x25, #0x8, MUL VL]\n"
".inst 0xc153be32 // udot za.s[x9, 2], { z16.b-z19.b }, z3.b[3]\n"
- ".inst 0xa0438351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26, #0xc, MUL VL]\n"
+ ".inst 0xa0438331 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x25, #0xc, MUL VL]\n"
".inst 0xc153be33 // udot za.s[x9, 3], { z16.b-z19.b }, z3.b[3]\n"
- "addvl x26, x26, #16\n"
+ "addvl x25, x25, #16\n"
"40:" // Width 4: Multiply loop: multiply skip
"tbnz %x[flags], #31, 41f\n"
"udot z26.s, z3.b, z24.b\n"
"41:" // Width 4: Multiply loop: unique 8: skip row sum
"tbnz %x[flags], #31, 42f\n"
- "add x21, %x[qp], %[b_offset]\n"
- "mov x20, #0x4\n"
- "ld1rw { z10.s }, p2/Z, [x21]\n"
+ "add x20, %x[qp], %[b_offset]\n"
+ "mov x19, #0x4\n"
+ "ld1rw { z10.s }, p2/Z, [x20]\n"
"neg z10.s, p2/M, z10.s\n"
- "whilelt p0.s, XZR, x20\n"
+ "whilelt p0.s, XZR, x19\n"
"uaddv d26, p0, z26.s\n"
"mov z26.s, z26.s[0]\n"
"mul z26.s, p2/M, z26.s, z10.s\n"
"orr %x[flags], %x[flags], #0x80000000\n"
"42:" // Width 4: skip row sum fixup
".inst 0xc0904b40 // addha za0.s, p2/M, p2/M, z26.s\n"
- "add x20, %x[qp], %[per_layer_mul]\n"
- "ld1rw { z5.s }, p2/Z, [x20]\n"
- "add x20, %x[qp], %[per_layer_right_shift]\n"
+ "add x19, %x[qp], %[per_layer_mul]\n"
+ "ld1rw { z5.s }, p2/Z, [x19]\n"
+ "add x19, %x[qp], %[per_layer_right_shift]\n"
".inst 0xc0904b41 // addha za1.s, p2/M, p2/M, z26.s\n"
- "ld1rw { z4.s }, p2/Z, [x20]\n"
- "add x20, %x[qp], %[c_offset]\n"
- "add x21, %x[qp], %[minval]\n"
+ "ld1rw { z4.s }, p2/Z, [x19]\n"
+ "add x19, %x[qp], %[c_offset]\n"
+ "add x20, %x[qp], %[minval]\n"
".inst 0xc0904b42 // addha za2.s, p2/M, p2/M, z26.s\n"
- "ld1rw { z6.s }, p2/Z, [x20]\n"
- "add x20, %x[qp], %[maxval]\n"
+ "ld1rw { z6.s }, p2/Z, [x19]\n"
+ "add x19, %x[qp], %[maxval]\n"
".inst 0xc0904b43 // addha za3.s, p2/M, p2/M, z26.s\n"
- "ld1rw { z21.s }, p2/Z, [x21]\n"
+ "ld1rw { z21.s }, p2/Z, [x20]\n"
".inst 0xc0062c1c // mova { z28.d-z31.d }, za.d[x9, #0]\n"
".inst 0xc1a5ac1c // sqdmulh { z28.s-z31.s }, { z28.s-z31.s }, z5.s\n"
".inst 0xc0062c2c // mova { z12.d-z15.d }, za.d[x9, #1]\n"
@@ -630,7 +630,7 @@ void sme2_gemv_u8qa_dot_16VL (
".inst 0xc0062c68 // mova { z8.d-z11.d }, za.d[x9, #3]\n"
".inst 0xc1a5ac08 // sqdmulh { z8.s-z11.s }, { z8.s-z11.s }, z5.s\n"
".inst 0xc1a4aa3c // srshl { z28.s-z31.s }, { z28.s-z31.s }, z4.s\n"
- "ld1rw { z16.s }, p2/Z, [x20]\n"
+ "ld1rw { z16.s }, p2/Z, [x19]\n"
".inst 0xc1a4aa2c // srshl { z12.s-z15.s }, { z12.s-z15.s }, z4.s\n"
".inst 0xc1a4aa20 // srshl { z0.s-z3.s }, { z0.s-z3.s }, z4.s\n"
".inst 0xc1a4aa28 // srshl { z8.s-z11.s }, { z8.s-z11.s }, z4.s\n"
@@ -651,24 +651,24 @@ void sme2_gemv_u8qa_dot_16VL (
"uzp1 z8.h, z8.h, z9.h\n"
"uzp1 z9.h, z10.h, z11.h\n"
"uzp1 z28.b, z28.b, z29.b\n"
- "st1b { z28.b }, p2, [x25]\n"
+ "st1b { z28.b }, p2, [x24]\n"
"uzp1 z12.b, z12.b, z13.b\n"
- "st1b { z12.b }, p2, [x25, #1, MUL VL]\n"
+ "st1b { z12.b }, p2, [x24, #1, MUL VL]\n"
"uzp1 z0.b, z0.b, z1.b\n"
"uzp1 z8.b, z8.b, z9.b\n"
- "st1b { z0.b }, p2, [x25, #2, MUL VL]\n"
- "st1b { z8.b }, p1, [x25, #3, MUL VL]\n"
- "addvl x25, x25, #4\n"
+ "st1b { z0.b }, p2, [x24, #2, MUL VL]\n"
+ "st1b { z8.b }, p1, [x24, #3, MUL VL]\n"
+ "addvl x24, x24, #4\n"
"43:" // Width 4: Output done
- "subs x27, x27, #0x4\n"
- "sub %x[N], %x[N], x28, LSL #2\n"
+ "subs x26, x26, #0x4\n"
+ "sub %x[N], %x[N], x27, LSL #2\n"
"bgt 4b\n"
"44:" // Exit
".inst 0xd503467f // SMSTOP\n"
"ptrue p2.b\n"
: [N] "+&r" (N), [flags] "+&r" (flags)
: [A_ptr] "r" (A_ptr), [B_ptr] "r" (B_ptr), [K] "r" (K), [b_offset] "I" (offsetof(Requantize32, b_offset)), [c_offset] "I" (offsetof(Requantize32, c_offset)), [col_bias] "r" (col_bias), [maxval] "I" (offsetof(Requantize32, maxval)), [minval] "I" (offsetof(Requantize32, minval)), [output_ptr] "r" (output_ptr), [per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [qp] "r" (qp)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_bf16fp32_mopa_1VLx4VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_bf16fp32_mopa_1VLx4VL/generic.cpp
index c6eb858ade..bb8cad3357 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_bf16fp32_mopa_1VLx4VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_bf16fp32_mopa_1VLx4VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef __ARM_FEATURE_SVE
#ifdef ARM_COMPUTE_ENABLE_SME2
@@ -103,108 +103,108 @@ void sme2_interleaved_nomerge_bf16fp32_mopa_1VLx4VL(const bfloat16 *const A, con
KernelArgs args(A, B, C, ldc, M, N, K, bias, act, accumulate, accumulator_buffer);
__asm__ __volatile__(
- "ldr x15, [%x[args], %[offsetof_flags]]\n"
+ "ldr x14, [%x[args], %[offsetof_flags]]\n"
".inst 0xd503477f // SMSTART ZA\n"
"ptrue p0.b\n"
".inst 0x25207811 // ptrue pn9.b\n"
- "ldr x14, [%x[args], %[offsetof_accumulator_buffer]]\n"
"ldr x13, [%x[args], %[offsetof_accumulator_buffer]]\n"
- "tbz x15, #0, 2f\n"
+ "ldr x11, [%x[args], %[offsetof_accumulator_buffer]]\n"
+ "tbz x14, #0, 2f\n"
"mov x12, #0x0\n"
- "cntw x20\n"
+ "cntw x19\n"
"1:" // Initial accumulator load from buffer: Loop
- ".inst 0xa040c5cc // ld1w { z12.s-z15.s }, pn9.b/Z, [x14]\n"
+ ".inst 0xa040c5ac // ld1w { z12.s-z15.s }, pn9.b/Z, [x13]\n"
".inst 0xc0840580 // mova za0h.s[x12], { z12.s-z15.s }\n"
- ".inst 0xa041c5d0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x14, #0x4, MUL VL]\n"
+ ".inst 0xa041c5b0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x13, #0x4, MUL VL]\n"
".inst 0xc0840601 // mova za1h.s[x12], { z16.s-z19.s }\n"
- ".inst 0xa042c5c4 // ld1w { z4.s-z7.s }, pn9.b/Z, [x14, #0x8, MUL VL]\n"
+ ".inst 0xa042c5a4 // ld1w { z4.s-z7.s }, pn9.b/Z, [x13, #0x8, MUL VL]\n"
".inst 0xc0840482 // mova za2h.s[x12], { z4.s-z7.s }\n"
- ".inst 0xa043c5c4 // ld1w { z4.s-z7.s }, pn9.b/Z, [x14, #0xc, MUL VL]\n"
+ ".inst 0xa043c5a4 // ld1w { z4.s-z7.s }, pn9.b/Z, [x13, #0xc, MUL VL]\n"
".inst 0xc0840483 // mova za3h.s[x12], { z4.s-z7.s }\n"
"add x12, x12, #0x4\n"
- "cmp x12, x20\n"
- "addvl x14, x14, #16\n"
+ "cmp x12, x19\n"
+ "addvl x13, x13, #16\n"
"blt 1b\n"
"2:" // Initial accumulator load from buffer: End
- "ldr w11, [%x[args], %[offsetof_M]]\n"
- "mov x10, #0x0\n"
+ "ldr w10, [%x[args], %[offsetof_M]]\n"
"mov x9, #0x0\n"
- "ldr w28, [%x[args], %[offsetof_N]]\n"
- "ldr x27, [%x[args], %[offsetof_A]]\n"
+ "mov x28, #0x0\n"
+ "ldr w27, [%x[args], %[offsetof_N]]\n"
+ "ldr x26, [%x[args], %[offsetof_A]]\n"
"3:" // M and N loop
- "mov x26, x27\n"
- ".inst 0x25bc6530 // whilelt pn8.s, x9, x28, VLx4\n"
- "tbnz x15, #0, 4f\n"
- "ldr x20, [%x[args], %[offsetof_bias]]\n"
+ "mov x25, x26\n"
+ ".inst 0x25bb6790 // whilelt pn8.s, x28, x27, VLx4\n"
+ "tbnz x14, #0, 4f\n"
+ "ldr x19, [%x[args], %[offsetof_bias]]\n"
".inst 0xc00800ff // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
- "cbz x20, 5f\n"
+ "cbz x19, 5f\n"
"fmov z21.s, #1.0\n"
- ".inst 0xa009c29d // ldnt1w { z28.s-z31.s }, p8/Z, [x20, x9, LSL #2]\n"
+ ".inst 0xa01cc27d // ldnt1w { z28.s-z31.s }, p8/Z, [x19, x28, LSL #2]\n"
".inst 0x809c02a0 // fmopa za0.s, p0/M, p0/M, z21.s, z28.s\n"
".inst 0x809d02a1 // fmopa za1.s, p0/M, p0/M, z21.s, z29.s\n"
".inst 0x809e02a2 // fmopa za2.s, p0/M, p0/M, z21.s, z30.s\n"
".inst 0x809f02a3 // fmopa za3.s, p0/M, p0/M, z21.s, z31.s\n"
"4:" // Prepare accumulators: Test for last block
+ "mov x19, x28\n"
"mov x20, x9\n"
- "mov x21, x10\n"
- "incw x20, ALL, MUL #4\n"
- "incw x21\n"
- "cmp x20, x28\n"
- "csel x21, x10, x21, LT\n"
- "mov x20, x15\n"
- "bfm x15, XZR, #0x0, #0x0 // bfc x15, #0x0, #0x1\n"
- "cmp x21, x11\n"
- "csel x15, x20, x15, LT\n"
+ "incw x19, ALL, MUL #4\n"
+ "incw x20\n"
+ "cmp x19, x27\n"
+ "csel x20, x9, x20, LT\n"
+ "mov x19, x14\n"
+ "bfm x14, XZR, #0x0, #0x0 // bfc x14, #0x0, #0x1\n"
+ "cmp x20, x10\n"
+ "csel x14, x19, x14, LT\n"
"5:" // Prepare accumulators: End
- "ldr x20, [%x[args], %[offsetof_K]]\n"
- "add x20, x20, #0x1\n"
- "lsr x20, x20, #0x1\n"
- "ldr x23, [%x[args], %[offsetof_B]]\n"
- "lsr x22, x20, #0x2\n"
- "and x21, x20, #0x3\n"
- "ldr x20, [%x[args], %[offsetof_kstride_bytes]]\n"
- "madd x23, x9, x20, x23\n" // bptr = B + n * kstride_bytes
- "cbz x22, 8f\n"
- "subs x22, x22, #0x1\n"
- "ld1h { z0.h }, p0/Z, [x26]\n"
- ".inst 0xa140a6fb // ldnt1h { z19.h, z23.h, z27.h, z31.h }, pn9.b/Z, [x23]\n"
- "ld1h { z13.h }, p0/Z, [x26, #1, MUL VL]\n"
- ".inst 0xa141a6ea // ldnt1h { z2.h, z6.h, z10.h, z14.h }, pn9.b/Z, [x23, #0x4, MUL VL]\n"
- "ld1h { z12.h }, p0/Z, [x26, #2, MUL VL]\n"
- ".inst 0xa142a6eb // ldnt1h { z3.h, z7.h, z11.h, z15.h }, pn9.b/Z, [x23, #0x8, MUL VL]\n"
- "ld1h { z26.h }, p0/Z, [x26, #3, MUL VL]\n"
- "addvl x26, x26, #4\n"
- ".inst 0xa143a6f8 // ldnt1h { z16.h, z20.h, z24.h, z28.h }, pn9.b/Z, [x23, #0xc, MUL VL]\n"
- "addvl x23, x23, #16\n"
+ "ldr x19, [%x[args], %[offsetof_K]]\n"
+ "add x19, x19, #0x1\n"
+ "lsr x19, x19, #0x1\n"
+ "ldr x22, [%x[args], %[offsetof_B]]\n"
+ "lsr x21, x19, #0x2\n"
+ "and x20, x19, #0x3\n"
+ "ldr x19, [%x[args], %[offsetof_kstride_bytes]]\n"
+ "madd x22, x28, x19, x22\n" // bptr = B + n * kstride_bytes
+ "cbz x21, 8f\n"
+ "subs x21, x21, #0x1\n"
+ "ld1h { z0.h }, p0/Z, [x25]\n"
+ ".inst 0xa140a6db // ldnt1h { z19.h, z23.h, z27.h, z31.h }, pn9.b/Z, [x22]\n"
+ "ld1h { z13.h }, p0/Z, [x25, #1, MUL VL]\n"
+ ".inst 0xa141a6ca // ldnt1h { z2.h, z6.h, z10.h, z14.h }, pn9.b/Z, [x22, #0x4, MUL VL]\n"
+ "ld1h { z12.h }, p0/Z, [x25, #2, MUL VL]\n"
+ ".inst 0xa142a6cb // ldnt1h { z3.h, z7.h, z11.h, z15.h }, pn9.b/Z, [x22, #0x8, MUL VL]\n"
+ "ld1h { z26.h }, p0/Z, [x25, #3, MUL VL]\n"
+ "addvl x25, x25, #4\n"
+ ".inst 0xa143a6d8 // ldnt1h { z16.h, z20.h, z24.h, z28.h }, pn9.b/Z, [x22, #0xc, MUL VL]\n"
+ "addvl x22, x22, #16\n"
"ble 7f\n"
"6:" // K loop
".inst 0x81930000 // bfmopa za0.s, p0/M, p0/M, z0.h, z19.h\n"
- "subs x22, x22, #0x1\n"
+ "subs x21, x21, #0x1\n"
".inst 0x81970001 // bfmopa za1.s, p0/M, p0/M, z0.h, z23.h\n"
".inst 0x819b0002 // bfmopa za2.s, p0/M, p0/M, z0.h, z27.h\n"
".inst 0x819f0003 // bfmopa za3.s, p0/M, p0/M, z0.h, z31.h\n"
- "ld1h { z0.h }, p0/Z, [x26]\n"
+ "ld1h { z0.h }, p0/Z, [x25]\n"
".inst 0x818201a0 // bfmopa za0.s, p0/M, p0/M, z13.h, z2.h\n"
- ".inst 0xa140a6fb // ldnt1h { z19.h, z23.h, z27.h, z31.h }, pn9.b/Z, [x23]\n"
+ ".inst 0xa140a6db // ldnt1h { z19.h, z23.h, z27.h, z31.h }, pn9.b/Z, [x22]\n"
".inst 0x818601a1 // bfmopa za1.s, p0/M, p0/M, z13.h, z6.h\n"
".inst 0x818a01a2 // bfmopa za2.s, p0/M, p0/M, z13.h, z10.h\n"
".inst 0x818e01a3 // bfmopa za3.s, p0/M, p0/M, z13.h, z14.h\n"
- "ld1h { z13.h }, p0/Z, [x26, #1, MUL VL]\n"
+ "ld1h { z13.h }, p0/Z, [x25, #1, MUL VL]\n"
".inst 0x81830180 // bfmopa za0.s, p0/M, p0/M, z12.h, z3.h\n"
- ".inst 0xa141a6ea // ldnt1h { z2.h, z6.h, z10.h, z14.h }, pn9.b/Z, [x23, #0x4, MUL VL]\n"
+ ".inst 0xa141a6ca // ldnt1h { z2.h, z6.h, z10.h, z14.h }, pn9.b/Z, [x22, #0x4, MUL VL]\n"
".inst 0x81870181 // bfmopa za1.s, p0/M, p0/M, z12.h, z7.h\n"
".inst 0x818b0182 // bfmopa za2.s, p0/M, p0/M, z12.h, z11.h\n"
".inst 0x818f0183 // bfmopa za3.s, p0/M, p0/M, z12.h, z15.h\n"
- "ld1h { z12.h }, p0/Z, [x26, #2, MUL VL]\n"
- ".inst 0xa142a6eb // ldnt1h { z3.h, z7.h, z11.h, z15.h }, pn9.b/Z, [x23, #0x8, MUL VL]\n"
+ "ld1h { z12.h }, p0/Z, [x25, #2, MUL VL]\n"
+ ".inst 0xa142a6cb // ldnt1h { z3.h, z7.h, z11.h, z15.h }, pn9.b/Z, [x22, #0x8, MUL VL]\n"
".inst 0x81900340 // bfmopa za0.s, p0/M, p0/M, z26.h, z16.h\n"
".inst 0x81940341 // bfmopa za1.s, p0/M, p0/M, z26.h, z20.h\n"
".inst 0x81980342 // bfmopa za2.s, p0/M, p0/M, z26.h, z24.h\n"
".inst 0x819c0343 // bfmopa za3.s, p0/M, p0/M, z26.h, z28.h\n"
- "ld1h { z26.h }, p0/Z, [x26, #3, MUL VL]\n"
- "addvl x26, x26, #4\n"
- ".inst 0xa143a6f8 // ldnt1h { z16.h, z20.h, z24.h, z28.h }, pn9.b/Z, [x23, #0xc, MUL VL]\n"
- "addvl x23, x23, #16\n"
+ "ld1h { z26.h }, p0/Z, [x25, #3, MUL VL]\n"
+ "addvl x25, x25, #4\n"
+ ".inst 0xa143a6d8 // ldnt1h { z16.h, z20.h, z24.h, z28.h }, pn9.b/Z, [x22, #0xc, MUL VL]\n"
+ "addvl x22, x22, #16\n"
"bgt 6b\n"
"7:" // K loop tail
".inst 0x81930000 // bfmopa za0.s, p0/M, p0/M, z0.h, z19.h\n"
@@ -224,123 +224,123 @@ void sme2_interleaved_nomerge_bf16fp32_mopa_1VLx4VL(const bfloat16 *const A, con
".inst 0x81980342 // bfmopa za2.s, p0/M, p0/M, z26.h, z24.h\n"
".inst 0x819c0343 // bfmopa za3.s, p0/M, p0/M, z26.h, z28.h\n"
"8:" // K oddments
- "cbz x21, 10f\n"
+ "cbz x20, 10f\n"
"9:" // K oddments: Loop
- "ld1h { z0.h }, p0/Z, [x26]\n"
- "subs x21, x21, #0x1\n"
- "addvl x26, x26, #1\n"
- ".inst 0xa140a6f3 // ld1h { z19.h, z23.h, z27.h, z31.h }, pn9.b/Z, [x23]\n"
- "addvl x23, x23, #4\n"
+ "ld1h { z0.h }, p0/Z, [x25]\n"
+ "subs x20, x20, #0x1\n"
+ "addvl x25, x25, #1\n"
+ ".inst 0xa140a6d3 // ld1h { z19.h, z23.h, z27.h, z31.h }, pn9.b/Z, [x22]\n"
+ "addvl x22, x22, #4\n"
".inst 0x81930000 // bfmopa za0.s, p0/M, p0/M, z0.h, z19.h\n"
".inst 0x81970001 // bfmopa za1.s, p0/M, p0/M, z0.h, z23.h\n"
".inst 0x819b0002 // bfmopa za2.s, p0/M, p0/M, z0.h, z27.h\n"
".inst 0x819f0003 // bfmopa za3.s, p0/M, p0/M, z0.h, z31.h\n"
"bgt 9b\n"
"10:" // K oddments: End
- "tbz x15, #1, 14f\n"
- "tbz x15, #0, 12f\n"
+ "tbz x14, #1, 14f\n"
+ "tbz x14, #0, 12f\n"
"mov x12, #0x0\n"
- "cntw x20\n"
+ "cntw x19\n"
"11:" // Store to partial result buffer: Store and refill: Loop
- ".inst 0xa040c5cc // ld1w { z12.s-z15.s }, pn9.b/Z, [x14]\n"
+ ".inst 0xa040c5ac // ld1w { z12.s-z15.s }, pn9.b/Z, [x13]\n"
".inst 0xc0860418 // mova { z24.s-z27.s }, za0h.s[x12]\n"
".inst 0xc0840580 // mova za0h.s[x12], { z12.s-z15.s }\n"
".inst 0xc0860434 // mova { z20.s-z23.s }, za1h.s[x12]\n"
- ".inst 0xa041c5dc // ld1w { z28.s-z31.s }, pn9.b/Z, [x14, #0x4, MUL VL]\n"
+ ".inst 0xa041c5bc // ld1w { z28.s-z31.s }, pn9.b/Z, [x13, #0x4, MUL VL]\n"
".inst 0xc0840781 // mova za1h.s[x12], { z28.s-z31.s }\n"
".inst 0xc086045c // mova { z28.s-z31.s }, za2h.s[x12]\n"
".inst 0xc0860470 // mova { z16.s-z19.s }, za3h.s[x12]\n"
- ".inst 0xa042c5cc // ld1w { z12.s-z15.s }, pn9.b/Z, [x14, #0x8, MUL VL]\n"
+ ".inst 0xa042c5ac // ld1w { z12.s-z15.s }, pn9.b/Z, [x13, #0x8, MUL VL]\n"
".inst 0xc0840582 // mova za2h.s[x12], { z12.s-z15.s }\n"
- ".inst 0xa043c5cc // ld1w { z12.s-z15.s }, pn9.b/Z, [x14, #0xc, MUL VL]\n"
+ ".inst 0xa043c5ac // ld1w { z12.s-z15.s }, pn9.b/Z, [x13, #0xc, MUL VL]\n"
".inst 0xc0840583 // mova za3h.s[x12], { z12.s-z15.s }\n"
"add x12, x12, #0x4\n"
- "cmp x12, x20\n"
- ".inst 0xa060c5b8 // st1w { z24.s-z27.s }, pn9.b, [x13]\n"
- "addvl x14, x14, #16\n"
- ".inst 0xa061c5b4 // st1w { z20.s-z23.s }, pn9.b, [x13, #0x4, MUL VL]\n"
- ".inst 0xa062c5bc // st1w { z28.s-z31.s }, pn9.b, [x13, #0x8, MUL VL]\n"
- ".inst 0xa063c5b0 // st1w { z16.s-z19.s }, pn9.b, [x13, #0xc, MUL VL]\n"
+ "cmp x12, x19\n"
+ ".inst 0xa060c578 // st1w { z24.s-z27.s }, pn9.b, [x11]\n"
"addvl x13, x13, #16\n"
+ ".inst 0xa061c574 // st1w { z20.s-z23.s }, pn9.b, [x11, #0x4, MUL VL]\n"
+ ".inst 0xa062c57c // st1w { z28.s-z31.s }, pn9.b, [x11, #0x8, MUL VL]\n"
+ ".inst 0xa063c570 // st1w { z16.s-z19.s }, pn9.b, [x11, #0xc, MUL VL]\n"
+ "addvl x11, x11, #16\n"
"blt 11b\n"
"b 24f\n"
"12:" // Store to partial result buffer: Store only
"mov x12, #0x0\n"
- "cntw x20\n"
+ "cntw x19\n"
"13:" // Store to partial result buffer: Store only: Loop
".inst 0xc086040c // mova { z12.s-z15.s }, za0h.s[x12]\n"
".inst 0xc086043c // mova { z28.s-z31.s }, za1h.s[x12]\n"
- ".inst 0xa060c5ac // st1w { z12.s-z15.s }, pn9.b, [x13]\n"
+ ".inst 0xa060c56c // st1w { z12.s-z15.s }, pn9.b, [x11]\n"
".inst 0xc0860450 // mova { z16.s-z19.s }, za2h.s[x12]\n"
".inst 0xc0860464 // mova { z4.s-z7.s }, za3h.s[x12]\n"
- ".inst 0xa061c5bc // st1w { z28.s-z31.s }, pn9.b, [x13, #0x4, MUL VL]\n"
+ ".inst 0xa061c57c // st1w { z28.s-z31.s }, pn9.b, [x11, #0x4, MUL VL]\n"
"add x12, x12, #0x4\n"
- "cmp x12, x20\n"
- ".inst 0xa062c5b0 // st1w { z16.s-z19.s }, pn9.b, [x13, #0x8, MUL VL]\n"
- ".inst 0xa063c5a4 // st1w { z4.s-z7.s }, pn9.b, [x13, #0xc, MUL VL]\n"
- "addvl x13, x13, #16\n"
+ "cmp x12, x19\n"
+ ".inst 0xa062c570 // st1w { z16.s-z19.s }, pn9.b, [x11, #0x8, MUL VL]\n"
+ ".inst 0xa063c564 // st1w { z4.s-z7.s }, pn9.b, [x11, #0xc, MUL VL]\n"
+ "addvl x11, x11, #16\n"
"blt 13b\n"
"b 24f\n"
"14:" // Store to output array
- "ldr x25, [%x[args], %[offsetof_C]]\n"
- "add x25, x25, x9, LSL #2\n" // C += n
- "sub x24, x11, x10\n"
- "ldr x23, [%x[args], %[offsetof_ldcb]]\n"
- "madd x25, x10, x23, x25\n" // C += m * ldc
- "tbz x15, #2, 18f\n"
- "cntw x20\n"
- "cmp x24, x20\n"
- "csel x22, x24, x20, LT\n"
- "lsr x21, x22, #0x2\n"
+ "ldr x24, [%x[args], %[offsetof_C]]\n"
+ "add x24, x24, x28, LSL #2\n" // C += n
+ "sub x23, x10, x9\n"
+ "ldr x22, [%x[args], %[offsetof_ldcb]]\n"
+ "madd x24, x9, x22, x24\n" // C += m * ldc
+ "tbz x14, #2, 18f\n"
+ "cntw x19\n"
+ "cmp x23, x19\n"
+ "csel x21, x23, x19, LT\n"
+ "lsr x20, x21, #0x2\n"
"mov x12, #0x0\n"
- "and x20, x22, #0x3\n"
- "cbz x21, 16f\n"
+ "and x19, x21, #0x3\n"
+ "cbz x20, 16f\n"
"15:" // Store to output array: Skip activation: Accumulator row 0 loop
".inst 0xc0860400 // mova { z0.s-z3.s }, za0h.s[x12]\n"
".inst 0xc0860424 // mova { z4.s-z7.s }, za1h.s[x12]\n"
".inst 0xc0860448 // mova { z8.s-z11.s }, za2h.s[x12]\n"
".inst 0xc086046c // mova { z12.s-z15.s }, za3h.s[x12]\n"
- ".inst 0xa160c320 // st1w { z0.s, z4.s, z8.s, z12.s }, p8, [x25]\n"
- "add x25, x25, x23\n"
- ".inst 0xa160c321 // st1w { z1.s, z5.s, z9.s, z13.s }, p8, [x25]\n"
- "add x25, x25, x23\n"
+ ".inst 0xa160c300 // st1w { z0.s, z4.s, z8.s, z12.s }, p8, [x24]\n"
+ "add x24, x24, x22\n"
+ ".inst 0xa160c301 // st1w { z1.s, z5.s, z9.s, z13.s }, p8, [x24]\n"
+ "add x24, x24, x22\n"
"add x12, x12, #0x4\n"
- ".inst 0xa160c322 // st1w { z2.s, z6.s, z10.s, z14.s }, p8, [x25]\n"
- "add x25, x25, x23\n"
- "cmp x12, x21, LSL #2\n"
- ".inst 0xa160c323 // st1w { z3.s, z7.s, z11.s, z15.s }, p8, [x25]\n"
- "add x25, x25, x23\n"
+ ".inst 0xa160c302 // st1w { z2.s, z6.s, z10.s, z14.s }, p8, [x24]\n"
+ "add x24, x24, x22\n"
+ "cmp x12, x20, LSL #2\n"
+ ".inst 0xa160c303 // st1w { z3.s, z7.s, z11.s, z15.s }, p8, [x24]\n"
+ "add x24, x24, x22\n"
"blt 15b\n"
"16:" // Store to output array: Skip activation: Accumulator row 0 oddments
- "cbz x20, 17f\n"
- "subs x20, x20, #0x1\n"
+ "cbz x19, 17f\n"
+ "subs x19, x19, #0x1\n"
".inst 0xc0860400 // mova { z0.s-z3.s }, za0h.s[x12]\n"
".inst 0xc0860424 // mova { z4.s-z7.s }, za1h.s[x12]\n"
".inst 0xc0860448 // mova { z8.s-z11.s }, za2h.s[x12]\n"
".inst 0xc086046c // mova { z12.s-z15.s }, za3h.s[x12]\n"
- ".inst 0xa160c320 // st1w { z0.s, z4.s, z8.s, z12.s }, p8, [x25]\n"
- "add x25, x25, x23\n"
+ ".inst 0xa160c300 // st1w { z0.s, z4.s, z8.s, z12.s }, p8, [x24]\n"
+ "add x24, x24, x22\n"
"beq 17f\n"
- "subs x20, x20, #0x1\n"
- ".inst 0xa160c321 // st1w { z1.s, z5.s, z9.s, z13.s }, p8, [x25]\n"
- "add x25, x25, x23\n"
+ "subs x19, x19, #0x1\n"
+ ".inst 0xa160c301 // st1w { z1.s, z5.s, z9.s, z13.s }, p8, [x24]\n"
+ "add x24, x24, x22\n"
"beq 17f\n"
- ".inst 0xa160c322 // st1w { z2.s, z6.s, z10.s, z14.s }, p8, [x25]\n"
- "add x25, x25, x23\n"
+ ".inst 0xa160c302 // st1w { z2.s, z6.s, z10.s, z14.s }, p8, [x24]\n"
+ "add x24, x24, x22\n"
"17:" // Store to output array: Skip activation: Accumulator row 0 oddments: End
- "subs x24, x24, x22\n"
+ "subs x23, x23, x21\n"
"beq 18f\n"
"b 22f\n"
"18:" // Store to output array: Skip activation: End
- "cntw x20\n"
- "cmp x24, x20\n"
+ "cntw x19\n"
+ "cmp x23, x19\n"
"ld1rw { z23.s }, p0/Z, [%x[args], %[offsetof_KernelArgs_min]]\n"
- "csel x20, x24, x20, LT\n"
- "lsr x21, x20, #0x2\n"
+ "csel x19, x23, x19, LT\n"
+ "lsr x20, x19, #0x2\n"
"ld1rw { z16.s }, p0/Z, [%x[args], %[offsetof_KernelArgs_max]]\n"
"mov x12, #0x0\n"
- "and x20, x20, #0x3\n"
- "cbz x21, 20f\n"
+ "and x19, x19, #0x3\n"
+ "cbz x20, 20f\n"
"19:" // Store to output array: Accumulator row 0 loop
".inst 0xc0860400 // mova { z0.s-z3.s }, za0h.s[x12]\n"
".inst 0xc0860424 // mova { z4.s-z7.s }, za1h.s[x12]\n"
@@ -350,19 +350,19 @@ void sme2_interleaved_nomerge_bf16fp32_mopa_1VLx4VL(const bfloat16 *const A, con
".inst 0xc086046c // mova { z12.s-z15.s }, za3h.s[x12]\n"
".inst 0xc1b0cae8 // fclamp { z8.s-z11.s }, z23.s, z16.s\n"
".inst 0xc1b0caec // fclamp { z12.s-z15.s }, z23.s, z16.s\n"
- ".inst 0xa160c320 // st1w { z0.s, z4.s, z8.s, z12.s }, p8, [x25]\n"
- "add x25, x25, x23\n"
+ ".inst 0xa160c300 // st1w { z0.s, z4.s, z8.s, z12.s }, p8, [x24]\n"
+ "add x24, x24, x22\n"
"add x12, x12, #0x4\n"
- ".inst 0xa160c321 // st1w { z1.s, z5.s, z9.s, z13.s }, p8, [x25]\n"
- "add x25, x25, x23\n"
- "cmp x12, x21, LSL #2\n"
- ".inst 0xa160c322 // st1w { z2.s, z6.s, z10.s, z14.s }, p8, [x25]\n"
- "add x25, x25, x23\n"
- ".inst 0xa160c323 // st1w { z3.s, z7.s, z11.s, z15.s }, p8, [x25]\n"
- "add x25, x25, x23\n"
+ ".inst 0xa160c301 // st1w { z1.s, z5.s, z9.s, z13.s }, p8, [x24]\n"
+ "add x24, x24, x22\n"
+ "cmp x12, x20, LSL #2\n"
+ ".inst 0xa160c302 // st1w { z2.s, z6.s, z10.s, z14.s }, p8, [x24]\n"
+ "add x24, x24, x22\n"
+ ".inst 0xa160c303 // st1w { z3.s, z7.s, z11.s, z15.s }, p8, [x24]\n"
+ "add x24, x24, x22\n"
"blt 19b\n"
"20:" // Store to output array: Accumulator row 0 oddments
- "cbz x20, 21f\n"
+ "cbz x19, 21f\n"
".inst 0xc0860400 // mova { z0.s-z3.s }, za0h.s[x12]\n"
".inst 0xc0860424 // mova { z4.s-z7.s }, za1h.s[x12]\n"
".inst 0xc1b0cae0 // fclamp { z0.s-z3.s }, z23.s, z16.s\n"
@@ -371,46 +371,46 @@ void sme2_interleaved_nomerge_bf16fp32_mopa_1VLx4VL(const bfloat16 *const A, con
".inst 0xc086046c // mova { z12.s-z15.s }, za3h.s[x12]\n"
".inst 0xc1b0cae8 // fclamp { z8.s-z11.s }, z23.s, z16.s\n"
".inst 0xc1b0caec // fclamp { z12.s-z15.s }, z23.s, z16.s\n"
- "subs x20, x20, #0x1\n"
- ".inst 0xa160c320 // st1w { z0.s, z4.s, z8.s, z12.s }, p8, [x25]\n"
- "add x25, x25, x23\n"
+ "subs x19, x19, #0x1\n"
+ ".inst 0xa160c300 // st1w { z0.s, z4.s, z8.s, z12.s }, p8, [x24]\n"
+ "add x24, x24, x22\n"
"beq 21f\n"
- "subs x20, x20, #0x1\n"
- ".inst 0xa160c321 // st1w { z1.s, z5.s, z9.s, z13.s }, p8, [x25]\n"
- "add x25, x25, x23\n"
+ "subs x19, x19, #0x1\n"
+ ".inst 0xa160c301 // st1w { z1.s, z5.s, z9.s, z13.s }, p8, [x24]\n"
+ "add x24, x24, x22\n"
"beq 21f\n"
- ".inst 0xa160c322 // st1w { z2.s, z6.s, z10.s, z14.s }, p8, [x25]\n"
+ ".inst 0xa160c302 // st1w { z2.s, z6.s, z10.s, z14.s }, p8, [x24]\n"
"21:" // Store to output array: Accumulator row 0 oddments: End
"22:" // Store to output array: End
- "tbz x15, #0, 24f\n"
+ "tbz x14, #0, 24f\n"
"mov x12, #0x0\n"
- "cntw x20\n"
+ "cntw x19\n"
"23:" // Store to output array: Refill accumulators: Loop
- ".inst 0xa040c5d0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x14]\n"
+ ".inst 0xa040c5b0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x13]\n"
".inst 0xc0840600 // mova za0h.s[x12], { z16.s-z19.s }\n"
- ".inst 0xa041c5d0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x14, #0x4, MUL VL]\n"
+ ".inst 0xa041c5b0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x13, #0x4, MUL VL]\n"
".inst 0xc0840601 // mova za1h.s[x12], { z16.s-z19.s }\n"
- ".inst 0xa042c5d0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x14, #0x8, MUL VL]\n"
+ ".inst 0xa042c5b0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x13, #0x8, MUL VL]\n"
".inst 0xc0840602 // mova za2h.s[x12], { z16.s-z19.s }\n"
- ".inst 0xa043c5c8 // ld1w { z8.s-z11.s }, pn9.b/Z, [x14, #0xc, MUL VL]\n"
+ ".inst 0xa043c5a8 // ld1w { z8.s-z11.s }, pn9.b/Z, [x13, #0xc, MUL VL]\n"
".inst 0xc0840503 // mova za3h.s[x12], { z8.s-z11.s }\n"
"add x12, x12, #0x4\n"
- "cmp x12, x20\n"
- "addvl x14, x14, #16\n"
+ "cmp x12, x19\n"
+ "addvl x13, x13, #16\n"
"blt 23b\n"
"24:" // End block
- "incw x9, ALL, MUL #4\n"
- "cmp x9, x28\n"
+ "incw x28, ALL, MUL #4\n"
+ "cmp x28, x27\n"
"blt 3b\n"
- "incw x10\n"
- "cmp x10, x11\n"
- "mov x9, #0x0\n"
- "mov x27, x26\n"
+ "incw x9\n"
+ "cmp x9, x10\n"
+ "mov x28, #0x0\n"
+ "mov x26, x25\n"
"blt 3b\n"
".inst 0xd503467f // SMSTOP\n"
:
: [args] "r" (&args), [offsetof_A] "I" (offsetof(KernelArgs, A)), [offsetof_B] "I" (offsetof(KernelArgs, B)), [offsetof_C] "I" (offsetof(KernelArgs, C)), [offsetof_K] "I" (offsetof(KernelArgs, K)), [offsetof_KernelArgs_max] "I" (offsetof(KernelArgs, max)), [offsetof_KernelArgs_min] "I" (offsetof(KernelArgs, min)), [offsetof_M] "I" (offsetof(KernelArgs, M)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_accumulator_buffer] "I" (offsetof(KernelArgs, accumulator_buffer)), [offsetof_bias] "I" (offsetof(KernelArgs, bias)), [offsetof_flags] "I" (offsetof(KernelArgs, flags)), [offsetof_kstride_bytes] "I" (offsetof(KernelArgs, kstride_bytes)), [offsetof_ldcb] "I" (offsetof(KernelArgs, ldcb))
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_bf16fp32_mopa_2VLx2VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_bf16fp32_mopa_2VLx2VL/generic.cpp
index b63f2110ff..a4a40ad5ff 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_bf16fp32_mopa_2VLx2VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_bf16fp32_mopa_2VLx2VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef __ARM_FEATURE_SVE
#ifdef ARM_COMPUTE_ENABLE_SME2
@@ -103,108 +103,108 @@ void sme2_interleaved_nomerge_bf16fp32_mopa_2VLx2VL(const bfloat16 *const A, con
KernelArgs args(A, B, C, ldc, M, N, K, bias, act, accumulate, accumulator_buffer);
__asm__ __volatile__(
- "ldr x16, [%x[args], %[offsetof_flags]]\n"
+ "ldr x15, [%x[args], %[offsetof_flags]]\n"
".inst 0xd503477f // SMSTART ZA\n"
"ptrue p0.b\n"
".inst 0x25207811 // ptrue pn9.b\n"
- "ldr x15, [%x[args], %[offsetof_accumulator_buffer]]\n"
"ldr x14, [%x[args], %[offsetof_accumulator_buffer]]\n"
- "tbz x16, #0, 2f\n"
+ "ldr x13, [%x[args], %[offsetof_accumulator_buffer]]\n"
+ "tbz x15, #0, 2f\n"
"mov x12, #0x0\n"
- "cntw x20\n"
+ "cntw x19\n"
"1:" // Initial accumulator load from buffer: Loop
- ".inst 0xa040c5e8 // ld1w { z8.s-z11.s }, pn9.b/Z, [x15]\n"
+ ".inst 0xa040c5c8 // ld1w { z8.s-z11.s }, pn9.b/Z, [x14]\n"
".inst 0xc0840500 // mova za0h.s[x12], { z8.s-z11.s }\n"
- ".inst 0xa041c5f0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x15, #0x4, MUL VL]\n"
+ ".inst 0xa041c5d0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x14, #0x4, MUL VL]\n"
".inst 0xc0840601 // mova za1h.s[x12], { z16.s-z19.s }\n"
- ".inst 0xa042c5e0 // ld1w { z0.s-z3.s }, pn9.b/Z, [x15, #0x8, MUL VL]\n"
+ ".inst 0xa042c5c0 // ld1w { z0.s-z3.s }, pn9.b/Z, [x14, #0x8, MUL VL]\n"
".inst 0xc0840402 // mova za2h.s[x12], { z0.s-z3.s }\n"
- ".inst 0xa043c5fc // ld1w { z28.s-z31.s }, pn9.b/Z, [x15, #0xc, MUL VL]\n"
+ ".inst 0xa043c5dc // ld1w { z28.s-z31.s }, pn9.b/Z, [x14, #0xc, MUL VL]\n"
".inst 0xc0840783 // mova za3h.s[x12], { z28.s-z31.s }\n"
"add x12, x12, #0x4\n"
- "cmp x12, x20\n"
- "addvl x15, x15, #16\n"
+ "cmp x12, x19\n"
+ "addvl x14, x14, #16\n"
"blt 1b\n"
"2:" // Initial accumulator load from buffer: End
- "ldr w13, [%x[args], %[offsetof_M]]\n"
- "mov x11, #0x0\n"
+ "ldr w11, [%x[args], %[offsetof_M]]\n"
"mov x10, #0x0\n"
- "ldr w9, [%x[args], %[offsetof_N]]\n"
- "ldr x28, [%x[args], %[offsetof_A]]\n"
+ "mov x9, #0x0\n"
+ "ldr w28, [%x[args], %[offsetof_N]]\n"
+ "ldr x27, [%x[args], %[offsetof_A]]\n"
"3:" // M and N loop
- "mov x27, x28\n"
- ".inst 0x25a94550 // whilelt pn8.s, x10, x9, VLx2\n"
- "tbnz x16, #0, 4f\n"
- "ldr x20, [%x[args], %[offsetof_bias]]\n"
+ "mov x26, x27\n"
+ ".inst 0x25bc4530 // whilelt pn8.s, x9, x28, VLx2\n"
+ "tbnz x15, #0, 4f\n"
+ "ldr x19, [%x[args], %[offsetof_bias]]\n"
".inst 0xc00800ff // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
- "cbz x20, 5f\n"
+ "cbz x19, 5f\n"
"fmov z21.s, #1.0\n"
- ".inst 0xa00a428f // ldnt1w { z14.s-z15.s }, p8/Z, [x20, x10, LSL #2]\n"
+ ".inst 0xa009426f // ldnt1w { z14.s-z15.s }, p8/Z, [x19, x9, LSL #2]\n"
".inst 0x808e02a0 // fmopa za0.s, p0/M, p0/M, z21.s, z14.s\n"
".inst 0x808f02a1 // fmopa za1.s, p0/M, p0/M, z21.s, z15.s\n"
".inst 0x808e02a2 // fmopa za2.s, p0/M, p0/M, z21.s, z14.s\n"
".inst 0x808f02a3 // fmopa za3.s, p0/M, p0/M, z21.s, z15.s\n"
"4:" // Prepare accumulators: Test for last block
+ "mov x19, x9\n"
"mov x20, x10\n"
- "mov x21, x11\n"
+ "incw x19, ALL, MUL #2\n"
"incw x20, ALL, MUL #2\n"
- "incw x21, ALL, MUL #2\n"
- "cmp x20, x9\n"
- "csel x21, x11, x21, LT\n"
- "mov x20, x16\n"
- "bfm x16, XZR, #0x0, #0x0 // bfc x16, #0x0, #0x1\n"
- "cmp x21, x13\n"
- "csel x16, x20, x16, LT\n"
+ "cmp x19, x28\n"
+ "csel x20, x10, x20, LT\n"
+ "mov x19, x15\n"
+ "bfm x15, XZR, #0x0, #0x0 // bfc x15, #0x0, #0x1\n"
+ "cmp x20, x11\n"
+ "csel x15, x19, x15, LT\n"
"5:" // Prepare accumulators: End
- "ldr x20, [%x[args], %[offsetof_K]]\n"
- "add x20, x20, #0x1\n"
- "lsr x20, x20, #0x1\n"
- "ldr x23, [%x[args], %[offsetof_B]]\n"
- "lsr x22, x20, #0x2\n"
- "and x21, x20, #0x3\n"
- "ldr x20, [%x[args], %[offsetof_kstride_bytes]]\n"
- "madd x23, x10, x20, x23\n" // bptr = B + n * kstride_bytes
- "cbz x22, 8f\n"
- "subs x22, x22, #0x1\n"
- ".inst 0xa1402767 // ld1h { z7.h, z15.h }, pn9.b/Z, [x27]\n"
- ".inst 0xa14026ff // ldnt1h { z23.h, z31.h }, pn9.b/Z, [x23]\n"
- ".inst 0xa0412768 // ld1h { z8.h-z9.h }, pn9.b/Z, [x27, #0x2, MUL VL]\n"
- ".inst 0xa04126e3 // ldnt1h { z2.h-z3.h }, pn9.b/Z, [x23, #0x2, MUL VL]\n"
- ".inst 0xa1422772 // ld1h { z18.h, z26.h }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
- ".inst 0xa04226f1 // ldnt1h { z16.h-z17.h }, pn9.b/Z, [x23, #0x4, MUL VL]\n"
- ".inst 0xa1432776 // ld1h { z22.h, z30.h }, pn9.b/Z, [x27, #0x6, MUL VL]\n"
- "addvl x27, x27, #8\n"
- ".inst 0xa14326ec // ldnt1h { z4.h, z12.h }, pn9.b/Z, [x23, #0x6, MUL VL]\n"
- "addvl x23, x23, #8\n"
+ "ldr x19, [%x[args], %[offsetof_K]]\n"
+ "add x19, x19, #0x1\n"
+ "lsr x19, x19, #0x1\n"
+ "ldr x22, [%x[args], %[offsetof_B]]\n"
+ "lsr x21, x19, #0x2\n"
+ "and x20, x19, #0x3\n"
+ "ldr x19, [%x[args], %[offsetof_kstride_bytes]]\n"
+ "madd x22, x9, x19, x22\n" // bptr = B + n * kstride_bytes
+ "cbz x21, 8f\n"
+ "subs x21, x21, #0x1\n"
+ ".inst 0xa1402747 // ld1h { z7.h, z15.h }, pn9.b/Z, [x26]\n"
+ ".inst 0xa14026df // ldnt1h { z23.h, z31.h }, pn9.b/Z, [x22]\n"
+ ".inst 0xa0412748 // ld1h { z8.h-z9.h }, pn9.b/Z, [x26, #0x2, MUL VL]\n"
+ ".inst 0xa04126c3 // ldnt1h { z2.h-z3.h }, pn9.b/Z, [x22, #0x2, MUL VL]\n"
+ ".inst 0xa1422752 // ld1h { z18.h, z26.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa04226d1 // ldnt1h { z16.h-z17.h }, pn9.b/Z, [x22, #0x4, MUL VL]\n"
+ ".inst 0xa1432756 // ld1h { z22.h, z30.h }, pn9.b/Z, [x26, #0x6, MUL VL]\n"
+ "addvl x26, x26, #8\n"
+ ".inst 0xa14326cc // ldnt1h { z4.h, z12.h }, pn9.b/Z, [x22, #0x6, MUL VL]\n"
+ "addvl x22, x22, #8\n"
"ble 7f\n"
"6:" // K loop
".inst 0x819700e0 // bfmopa za0.s, p0/M, p0/M, z7.h, z23.h\n"
- "subs x22, x22, #0x1\n"
+ "subs x21, x21, #0x1\n"
".inst 0x819f00e1 // bfmopa za1.s, p0/M, p0/M, z7.h, z31.h\n"
".inst 0x819701e2 // bfmopa za2.s, p0/M, p0/M, z15.h, z23.h\n"
".inst 0x819f01e3 // bfmopa za3.s, p0/M, p0/M, z15.h, z31.h\n"
- ".inst 0xa1402767 // ld1h { z7.h, z15.h }, pn9.b/Z, [x27]\n"
+ ".inst 0xa1402747 // ld1h { z7.h, z15.h }, pn9.b/Z, [x26]\n"
".inst 0x81820100 // bfmopa za0.s, p0/M, p0/M, z8.h, z2.h\n"
- ".inst 0xa14026ff // ldnt1h { z23.h, z31.h }, pn9.b/Z, [x23]\n"
+ ".inst 0xa14026df // ldnt1h { z23.h, z31.h }, pn9.b/Z, [x22]\n"
".inst 0x81830101 // bfmopa za1.s, p0/M, p0/M, z8.h, z3.h\n"
".inst 0x81820122 // bfmopa za2.s, p0/M, p0/M, z9.h, z2.h\n"
".inst 0x81830123 // bfmopa za3.s, p0/M, p0/M, z9.h, z3.h\n"
- ".inst 0xa0412768 // ld1h { z8.h-z9.h }, pn9.b/Z, [x27, #0x2, MUL VL]\n"
+ ".inst 0xa0412748 // ld1h { z8.h-z9.h }, pn9.b/Z, [x26, #0x2, MUL VL]\n"
".inst 0x81900240 // bfmopa za0.s, p0/M, p0/M, z18.h, z16.h\n"
- ".inst 0xa04126e3 // ldnt1h { z2.h-z3.h }, pn9.b/Z, [x23, #0x2, MUL VL]\n"
+ ".inst 0xa04126c3 // ldnt1h { z2.h-z3.h }, pn9.b/Z, [x22, #0x2, MUL VL]\n"
".inst 0x81910241 // bfmopa za1.s, p0/M, p0/M, z18.h, z17.h\n"
".inst 0x81900342 // bfmopa za2.s, p0/M, p0/M, z26.h, z16.h\n"
".inst 0x81910343 // bfmopa za3.s, p0/M, p0/M, z26.h, z17.h\n"
- ".inst 0xa1422772 // ld1h { z18.h, z26.h }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
- ".inst 0xa04226f1 // ldnt1h { z16.h-z17.h }, pn9.b/Z, [x23, #0x4, MUL VL]\n"
+ ".inst 0xa1422752 // ld1h { z18.h, z26.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa04226d1 // ldnt1h { z16.h-z17.h }, pn9.b/Z, [x22, #0x4, MUL VL]\n"
".inst 0x818402c0 // bfmopa za0.s, p0/M, p0/M, z22.h, z4.h\n"
".inst 0x818c02c1 // bfmopa za1.s, p0/M, p0/M, z22.h, z12.h\n"
".inst 0x818403c2 // bfmopa za2.s, p0/M, p0/M, z30.h, z4.h\n"
".inst 0x818c03c3 // bfmopa za3.s, p0/M, p0/M, z30.h, z12.h\n"
- ".inst 0xa1432776 // ld1h { z22.h, z30.h }, pn9.b/Z, [x27, #0x6, MUL VL]\n"
- "addvl x27, x27, #8\n"
- ".inst 0xa14326ec // ldnt1h { z4.h, z12.h }, pn9.b/Z, [x23, #0x6, MUL VL]\n"
- "addvl x23, x23, #8\n"
+ ".inst 0xa1432756 // ld1h { z22.h, z30.h }, pn9.b/Z, [x26, #0x6, MUL VL]\n"
+ "addvl x26, x26, #8\n"
+ ".inst 0xa14326cc // ldnt1h { z4.h, z12.h }, pn9.b/Z, [x22, #0x6, MUL VL]\n"
+ "addvl x22, x22, #8\n"
"bgt 6b\n"
"7:" // K loop tail
".inst 0x819700e0 // bfmopa za0.s, p0/M, p0/M, z7.h, z23.h\n"
@@ -224,259 +224,259 @@ void sme2_interleaved_nomerge_bf16fp32_mopa_2VLx2VL(const bfloat16 *const A, con
".inst 0x818403c2 // bfmopa za2.s, p0/M, p0/M, z30.h, z4.h\n"
".inst 0x818c03c3 // bfmopa za3.s, p0/M, p0/M, z30.h, z12.h\n"
"8:" // K oddments
- "cbz x21, 10f\n"
+ "cbz x20, 10f\n"
"9:" // K oddments: Loop
- ".inst 0xa1402767 // ld1h { z7.h, z15.h }, pn9.b/Z, [x27]\n"
- "subs x21, x21, #0x1\n"
- "addvl x27, x27, #2\n"
- ".inst 0xa14026f7 // ld1h { z23.h, z31.h }, pn9.b/Z, [x23]\n"
- "addvl x23, x23, #2\n"
+ ".inst 0xa1402747 // ld1h { z7.h, z15.h }, pn9.b/Z, [x26]\n"
+ "subs x20, x20, #0x1\n"
+ "addvl x26, x26, #2\n"
+ ".inst 0xa14026d7 // ld1h { z23.h, z31.h }, pn9.b/Z, [x22]\n"
+ "addvl x22, x22, #2\n"
".inst 0x819700e0 // bfmopa za0.s, p0/M, p0/M, z7.h, z23.h\n"
".inst 0x819f00e1 // bfmopa za1.s, p0/M, p0/M, z7.h, z31.h\n"
".inst 0x819701e2 // bfmopa za2.s, p0/M, p0/M, z15.h, z23.h\n"
".inst 0x819f01e3 // bfmopa za3.s, p0/M, p0/M, z15.h, z31.h\n"
"bgt 9b\n"
"10:" // K oddments: End
- "tbz x16, #1, 14f\n"
- "tbz x16, #0, 12f\n"
+ "tbz x15, #1, 14f\n"
+ "tbz x15, #0, 12f\n"
"mov x12, #0x0\n"
- "cntw x20\n"
+ "cntw x19\n"
"11:" // Store to partial result buffer: Store and refill: Loop
- ".inst 0xa040c5ec // ld1w { z12.s-z15.s }, pn9.b/Z, [x15]\n"
+ ".inst 0xa040c5cc // ld1w { z12.s-z15.s }, pn9.b/Z, [x14]\n"
".inst 0xc0860410 // mova { z16.s-z19.s }, za0h.s[x12]\n"
".inst 0xc0840580 // mova za0h.s[x12], { z12.s-z15.s }\n"
".inst 0xc086042c // mova { z12.s-z15.s }, za1h.s[x12]\n"
- ".inst 0xa041c5e0 // ld1w { z0.s-z3.s }, pn9.b/Z, [x15, #0x4, MUL VL]\n"
+ ".inst 0xa041c5c0 // ld1w { z0.s-z3.s }, pn9.b/Z, [x14, #0x4, MUL VL]\n"
".inst 0xc0840401 // mova za1h.s[x12], { z0.s-z3.s }\n"
".inst 0xc0860454 // mova { z20.s-z23.s }, za2h.s[x12]\n"
".inst 0xc0860468 // mova { z8.s-z11.s }, za3h.s[x12]\n"
- ".inst 0xa042c5f8 // ld1w { z24.s-z27.s }, pn9.b/Z, [x15, #0x8, MUL VL]\n"
+ ".inst 0xa042c5d8 // ld1w { z24.s-z27.s }, pn9.b/Z, [x14, #0x8, MUL VL]\n"
".inst 0xc0840702 // mova za2h.s[x12], { z24.s-z27.s }\n"
- ".inst 0xa043c5f8 // ld1w { z24.s-z27.s }, pn9.b/Z, [x15, #0xc, MUL VL]\n"
+ ".inst 0xa043c5d8 // ld1w { z24.s-z27.s }, pn9.b/Z, [x14, #0xc, MUL VL]\n"
".inst 0xc0840703 // mova za3h.s[x12], { z24.s-z27.s }\n"
"add x12, x12, #0x4\n"
- "cmp x12, x20\n"
- ".inst 0xa060c5d0 // st1w { z16.s-z19.s }, pn9.b, [x14]\n"
- "addvl x15, x15, #16\n"
- ".inst 0xa061c5cc // st1w { z12.s-z15.s }, pn9.b, [x14, #0x4, MUL VL]\n"
- ".inst 0xa062c5d4 // st1w { z20.s-z23.s }, pn9.b, [x14, #0x8, MUL VL]\n"
- ".inst 0xa063c5c8 // st1w { z8.s-z11.s }, pn9.b, [x14, #0xc, MUL VL]\n"
+ "cmp x12, x19\n"
+ ".inst 0xa060c5b0 // st1w { z16.s-z19.s }, pn9.b, [x13]\n"
"addvl x14, x14, #16\n"
+ ".inst 0xa061c5ac // st1w { z12.s-z15.s }, pn9.b, [x13, #0x4, MUL VL]\n"
+ ".inst 0xa062c5b4 // st1w { z20.s-z23.s }, pn9.b, [x13, #0x8, MUL VL]\n"
+ ".inst 0xa063c5a8 // st1w { z8.s-z11.s }, pn9.b, [x13, #0xc, MUL VL]\n"
+ "addvl x13, x13, #16\n"
"blt 11b\n"
"b 30f\n"
"12:" // Store to partial result buffer: Store only
"mov x12, #0x0\n"
- "cntw x20\n"
+ "cntw x19\n"
"13:" // Store to partial result buffer: Store only: Loop
".inst 0xc086040c // mova { z12.s-z15.s }, za0h.s[x12]\n"
".inst 0xc0860430 // mova { z16.s-z19.s }, za1h.s[x12]\n"
- ".inst 0xa060c5cc // st1w { z12.s-z15.s }, pn9.b, [x14]\n"
+ ".inst 0xa060c5ac // st1w { z12.s-z15.s }, pn9.b, [x13]\n"
".inst 0xc0860444 // mova { z4.s-z7.s }, za2h.s[x12]\n"
".inst 0xc0860460 // mova { z0.s-z3.s }, za3h.s[x12]\n"
- ".inst 0xa061c5d0 // st1w { z16.s-z19.s }, pn9.b, [x14, #0x4, MUL VL]\n"
+ ".inst 0xa061c5b0 // st1w { z16.s-z19.s }, pn9.b, [x13, #0x4, MUL VL]\n"
"add x12, x12, #0x4\n"
- "cmp x12, x20\n"
- ".inst 0xa062c5c4 // st1w { z4.s-z7.s }, pn9.b, [x14, #0x8, MUL VL]\n"
- ".inst 0xa063c5c0 // st1w { z0.s-z3.s }, pn9.b, [x14, #0xc, MUL VL]\n"
- "addvl x14, x14, #16\n"
+ "cmp x12, x19\n"
+ ".inst 0xa062c5a4 // st1w { z4.s-z7.s }, pn9.b, [x13, #0x8, MUL VL]\n"
+ ".inst 0xa063c5a0 // st1w { z0.s-z3.s }, pn9.b, [x13, #0xc, MUL VL]\n"
+ "addvl x13, x13, #16\n"
"blt 13b\n"
"b 30f\n"
"14:" // Store to output array
- "ldr x26, [%x[args], %[offsetof_C]]\n"
- "add x26, x26, x10, LSL #2\n" // C += n
- "sub x25, x13, x11\n"
- "ldr x24, [%x[args], %[offsetof_ldcb]]\n"
- "madd x26, x11, x24, x26\n" // C += m * ldc
- "tbz x16, #2, 21f\n"
- "cntw x23\n"
- "cmp x25, x23\n"
- "csel x22, x25, x23, LT\n"
- "lsr x21, x22, #0x2\n"
+ "ldr x25, [%x[args], %[offsetof_C]]\n"
+ "add x25, x25, x9, LSL #2\n" // C += n
+ "sub x24, x11, x10\n"
+ "ldr x23, [%x[args], %[offsetof_ldcb]]\n"
+ "madd x25, x10, x23, x25\n" // C += m * ldc
+ "tbz x15, #2, 21f\n"
+ "cntw x22\n"
+ "cmp x24, x22\n"
+ "csel x21, x24, x22, LT\n"
+ "lsr x20, x21, #0x2\n"
"mov x12, #0x0\n"
- "and x20, x22, #0x3\n"
- "cbz x21, 16f\n"
+ "and x19, x21, #0x3\n"
+ "cbz x20, 16f\n"
"15:" // Store to output array: Skip activation: Accumulator row 0 loop
".inst 0xc0860404 // mova { z4.s-z7.s }, za0h.s[x12]\n"
".inst 0xc086042c // mova { z12.s-z15.s }, za1h.s[x12]\n"
- ".inst 0xa1604344 // st1w { z4.s, z12.s }, p8, [x26]\n"
- "add x26, x26, x24\n"
- ".inst 0xa1604345 // st1w { z5.s, z13.s }, p8, [x26]\n"
- "add x26, x26, x24\n"
+ ".inst 0xa1604324 // st1w { z4.s, z12.s }, p8, [x25]\n"
+ "add x25, x25, x23\n"
+ ".inst 0xa1604325 // st1w { z5.s, z13.s }, p8, [x25]\n"
+ "add x25, x25, x23\n"
"add x12, x12, #0x4\n"
- ".inst 0xa1604346 // st1w { z6.s, z14.s }, p8, [x26]\n"
- "add x26, x26, x24\n"
- "cmp x12, x21, LSL #2\n"
- ".inst 0xa1604347 // st1w { z7.s, z15.s }, p8, [x26]\n"
- "add x26, x26, x24\n"
+ ".inst 0xa1604326 // st1w { z6.s, z14.s }, p8, [x25]\n"
+ "add x25, x25, x23\n"
+ "cmp x12, x20, LSL #2\n"
+ ".inst 0xa1604327 // st1w { z7.s, z15.s }, p8, [x25]\n"
+ "add x25, x25, x23\n"
"blt 15b\n"
"16:" // Store to output array: Skip activation: Accumulator row 0 oddments
- "cbz x20, 17f\n"
- "subs x20, x20, #0x1\n"
+ "cbz x19, 17f\n"
+ "subs x19, x19, #0x1\n"
".inst 0xc0860404 // mova { z4.s-z7.s }, za0h.s[x12]\n"
".inst 0xc086042c // mova { z12.s-z15.s }, za1h.s[x12]\n"
- ".inst 0xa1604344 // st1w { z4.s, z12.s }, p8, [x26]\n"
- "add x26, x26, x24\n"
+ ".inst 0xa1604324 // st1w { z4.s, z12.s }, p8, [x25]\n"
+ "add x25, x25, x23\n"
"beq 17f\n"
- "subs x20, x20, #0x1\n"
- ".inst 0xa1604345 // st1w { z5.s, z13.s }, p8, [x26]\n"
- "add x26, x26, x24\n"
+ "subs x19, x19, #0x1\n"
+ ".inst 0xa1604325 // st1w { z5.s, z13.s }, p8, [x25]\n"
+ "add x25, x25, x23\n"
"beq 17f\n"
- ".inst 0xa1604346 // st1w { z6.s, z14.s }, p8, [x26]\n"
- "add x26, x26, x24\n"
+ ".inst 0xa1604326 // st1w { z6.s, z14.s }, p8, [x25]\n"
+ "add x25, x25, x23\n"
"17:" // Store to output array: Skip activation: Accumulator row 0 oddments: End
- "subs x25, x25, x22\n"
+ "subs x24, x24, x21\n"
"beq 21f\n"
- "cmp x25, x23\n"
- "csel x22, x25, x23, LT\n"
- "lsr x21, x22, #0x2\n"
+ "cmp x24, x22\n"
+ "csel x21, x24, x22, LT\n"
+ "lsr x20, x21, #0x2\n"
"mov x12, #0x0\n"
- "and x20, x22, #0x3\n"
- "cbz x21, 19f\n"
+ "and x19, x21, #0x3\n"
+ "cbz x20, 19f\n"
"18:" // Store to output array: Skip activation: Accumulator row 1 loop
".inst 0xc0860444 // mova { z4.s-z7.s }, za2h.s[x12]\n"
".inst 0xc086046c // mova { z12.s-z15.s }, za3h.s[x12]\n"
- ".inst 0xa1604344 // st1w { z4.s, z12.s }, p8, [x26]\n"
- "add x26, x26, x24\n"
- ".inst 0xa1604345 // st1w { z5.s, z13.s }, p8, [x26]\n"
- "add x26, x26, x24\n"
+ ".inst 0xa1604324 // st1w { z4.s, z12.s }, p8, [x25]\n"
+ "add x25, x25, x23\n"
+ ".inst 0xa1604325 // st1w { z5.s, z13.s }, p8, [x25]\n"
+ "add x25, x25, x23\n"
"add x12, x12, #0x4\n"
- ".inst 0xa1604346 // st1w { z6.s, z14.s }, p8, [x26]\n"
- "add x26, x26, x24\n"
- "cmp x12, x21, LSL #2\n"
- ".inst 0xa1604347 // st1w { z7.s, z15.s }, p8, [x26]\n"
- "add x26, x26, x24\n"
+ ".inst 0xa1604326 // st1w { z6.s, z14.s }, p8, [x25]\n"
+ "add x25, x25, x23\n"
+ "cmp x12, x20, LSL #2\n"
+ ".inst 0xa1604327 // st1w { z7.s, z15.s }, p8, [x25]\n"
+ "add x25, x25, x23\n"
"blt 18b\n"
"19:" // Store to output array: Skip activation: Accumulator row 1 oddments
- "cbz x20, 20f\n"
- "subs x20, x20, #0x1\n"
+ "cbz x19, 20f\n"
+ "subs x19, x19, #0x1\n"
".inst 0xc0860454 // mova { z20.s-z23.s }, za2h.s[x12]\n"
".inst 0xc086047c // mova { z28.s-z31.s }, za3h.s[x12]\n"
- ".inst 0xa1604354 // st1w { z20.s, z28.s }, p8, [x26]\n"
- "add x26, x26, x24\n"
+ ".inst 0xa1604334 // st1w { z20.s, z28.s }, p8, [x25]\n"
+ "add x25, x25, x23\n"
"beq 20f\n"
- "subs x20, x20, #0x1\n"
- ".inst 0xa1604355 // st1w { z21.s, z29.s }, p8, [x26]\n"
- "add x26, x26, x24\n"
+ "subs x19, x19, #0x1\n"
+ ".inst 0xa1604335 // st1w { z21.s, z29.s }, p8, [x25]\n"
+ "add x25, x25, x23\n"
"beq 20f\n"
- ".inst 0xa1604356 // st1w { z22.s, z30.s }, p8, [x26]\n"
- "add x26, x26, x24\n"
+ ".inst 0xa1604336 // st1w { z22.s, z30.s }, p8, [x25]\n"
+ "add x25, x25, x23\n"
"20:" // Store to output array: Skip activation: Accumulator row 1 oddments: End
- "subs x25, x25, x22\n"
+ "subs x24, x24, x21\n"
"beq 21f\n"
"b 28f\n"
"21:" // Store to output array: Skip activation: End
- "cntw x23\n"
- "cmp x25, x23\n"
+ "cntw x22\n"
+ "cmp x24, x22\n"
"ld1rw { z21.s }, p0/Z, [%x[args], %[offsetof_KernelArgs_min]]\n"
- "csel x22, x25, x23, LT\n"
- "lsr x21, x22, #0x2\n"
+ "csel x21, x24, x22, LT\n"
+ "lsr x20, x21, #0x2\n"
"ld1rw { z20.s }, p0/Z, [%x[args], %[offsetof_KernelArgs_max]]\n"
"mov x12, #0x0\n"
- "and x20, x22, #0x3\n"
- "cbz x21, 23f\n"
+ "and x19, x21, #0x3\n"
+ "cbz x20, 23f\n"
"22:" // Store to output array: Accumulator row 0 loop
".inst 0xc0860404 // mova { z4.s-z7.s }, za0h.s[x12]\n"
".inst 0xc086042c // mova { z12.s-z15.s }, za1h.s[x12]\n"
".inst 0xc1b4caa4 // fclamp { z4.s-z7.s }, z21.s, z20.s\n"
".inst 0xc1b4caac // fclamp { z12.s-z15.s }, z21.s, z20.s\n"
- ".inst 0xa1604344 // st1w { z4.s, z12.s }, p8, [x26]\n"
- "add x26, x26, x24\n"
+ ".inst 0xa1604324 // st1w { z4.s, z12.s }, p8, [x25]\n"
+ "add x25, x25, x23\n"
"add x12, x12, #0x4\n"
- ".inst 0xa1604345 // st1w { z5.s, z13.s }, p8, [x26]\n"
- "add x26, x26, x24\n"
- "cmp x12, x21, LSL #2\n"
- ".inst 0xa1604346 // st1w { z6.s, z14.s }, p8, [x26]\n"
- "add x26, x26, x24\n"
- ".inst 0xa1604347 // st1w { z7.s, z15.s }, p8, [x26]\n"
- "add x26, x26, x24\n"
+ ".inst 0xa1604325 // st1w { z5.s, z13.s }, p8, [x25]\n"
+ "add x25, x25, x23\n"
+ "cmp x12, x20, LSL #2\n"
+ ".inst 0xa1604326 // st1w { z6.s, z14.s }, p8, [x25]\n"
+ "add x25, x25, x23\n"
+ ".inst 0xa1604327 // st1w { z7.s, z15.s }, p8, [x25]\n"
+ "add x25, x25, x23\n"
"blt 22b\n"
"23:" // Store to output array: Accumulator row 0 oddments
- "cbz x20, 24f\n"
+ "cbz x19, 24f\n"
".inst 0xc0860400 // mova { z0.s-z3.s }, za0h.s[x12]\n"
".inst 0xc0860428 // mova { z8.s-z11.s }, za1h.s[x12]\n"
".inst 0xc1b4caa0 // fclamp { z0.s-z3.s }, z21.s, z20.s\n"
".inst 0xc1b4caa8 // fclamp { z8.s-z11.s }, z21.s, z20.s\n"
- "subs x20, x20, #0x1\n"
- ".inst 0xa1604340 // st1w { z0.s, z8.s }, p8, [x26]\n"
- "add x26, x26, x24\n"
+ "subs x19, x19, #0x1\n"
+ ".inst 0xa1604320 // st1w { z0.s, z8.s }, p8, [x25]\n"
+ "add x25, x25, x23\n"
"beq 24f\n"
- "subs x20, x20, #0x1\n"
- ".inst 0xa1604341 // st1w { z1.s, z9.s }, p8, [x26]\n"
- "add x26, x26, x24\n"
+ "subs x19, x19, #0x1\n"
+ ".inst 0xa1604321 // st1w { z1.s, z9.s }, p8, [x25]\n"
+ "add x25, x25, x23\n"
"beq 24f\n"
- ".inst 0xa1604342 // st1w { z2.s, z10.s }, p8, [x26]\n"
- "add x26, x26, x24\n"
+ ".inst 0xa1604322 // st1w { z2.s, z10.s }, p8, [x25]\n"
+ "add x25, x25, x23\n"
"24:" // Store to output array: Accumulator row 0 oddments: End
- "subs x25, x25, x22\n"
+ "subs x24, x24, x21\n"
"beq 28f\n"
- "cmp x25, x23\n"
- "csel x20, x25, x23, LT\n"
- "lsr x21, x20, #0x2\n"
+ "cmp x24, x22\n"
+ "csel x19, x24, x22, LT\n"
+ "lsr x20, x19, #0x2\n"
"mov x12, #0x0\n"
- "and x20, x20, #0x3\n"
- "cbz x21, 26f\n"
+ "and x19, x19, #0x3\n"
+ "cbz x20, 26f\n"
"25:" // Store to output array: Accumulator row 1 loop
".inst 0xc0860450 // mova { z16.s-z19.s }, za2h.s[x12]\n"
".inst 0xc0860478 // mova { z24.s-z27.s }, za3h.s[x12]\n"
".inst 0xc1b4cab0 // fclamp { z16.s-z19.s }, z21.s, z20.s\n"
".inst 0xc1b4cab8 // fclamp { z24.s-z27.s }, z21.s, z20.s\n"
- ".inst 0xa1604350 // st1w { z16.s, z24.s }, p8, [x26]\n"
- "add x26, x26, x24\n"
+ ".inst 0xa1604330 // st1w { z16.s, z24.s }, p8, [x25]\n"
+ "add x25, x25, x23\n"
"add x12, x12, #0x4\n"
- ".inst 0xa1604351 // st1w { z17.s, z25.s }, p8, [x26]\n"
- "add x26, x26, x24\n"
- "cmp x12, x21, LSL #2\n"
- ".inst 0xa1604352 // st1w { z18.s, z26.s }, p8, [x26]\n"
- "add x26, x26, x24\n"
- ".inst 0xa1604353 // st1w { z19.s, z27.s }, p8, [x26]\n"
- "add x26, x26, x24\n"
+ ".inst 0xa1604331 // st1w { z17.s, z25.s }, p8, [x25]\n"
+ "add x25, x25, x23\n"
+ "cmp x12, x20, LSL #2\n"
+ ".inst 0xa1604332 // st1w { z18.s, z26.s }, p8, [x25]\n"
+ "add x25, x25, x23\n"
+ ".inst 0xa1604333 // st1w { z19.s, z27.s }, p8, [x25]\n"
+ "add x25, x25, x23\n"
"blt 25b\n"
"26:" // Store to output array: Accumulator row 1 oddments
- "cbz x20, 27f\n"
+ "cbz x19, 27f\n"
".inst 0xc0860450 // mova { z16.s-z19.s }, za2h.s[x12]\n"
".inst 0xc0860478 // mova { z24.s-z27.s }, za3h.s[x12]\n"
".inst 0xc1b4cab0 // fclamp { z16.s-z19.s }, z21.s, z20.s\n"
".inst 0xc1b4cab8 // fclamp { z24.s-z27.s }, z21.s, z20.s\n"
- "subs x20, x20, #0x1\n"
- ".inst 0xa1604350 // st1w { z16.s, z24.s }, p8, [x26]\n"
- "add x26, x26, x24\n"
+ "subs x19, x19, #0x1\n"
+ ".inst 0xa1604330 // st1w { z16.s, z24.s }, p8, [x25]\n"
+ "add x25, x25, x23\n"
"beq 27f\n"
- "subs x20, x20, #0x1\n"
- ".inst 0xa1604351 // st1w { z17.s, z25.s }, p8, [x26]\n"
- "add x26, x26, x24\n"
+ "subs x19, x19, #0x1\n"
+ ".inst 0xa1604331 // st1w { z17.s, z25.s }, p8, [x25]\n"
+ "add x25, x25, x23\n"
"beq 27f\n"
- ".inst 0xa1604352 // st1w { z18.s, z26.s }, p8, [x26]\n"
+ ".inst 0xa1604332 // st1w { z18.s, z26.s }, p8, [x25]\n"
"27:" // Store to output array: Accumulator row 1 oddments: End
"28:" // Store to output array: End
- "tbz x16, #0, 30f\n"
+ "tbz x15, #0, 30f\n"
"mov x12, #0x0\n"
- "cntw x20\n"
+ "cntw x19\n"
"29:" // Store to output array: Refill accumulators: Loop
- ".inst 0xa040c5f0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x15]\n"
+ ".inst 0xa040c5d0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x14]\n"
".inst 0xc0840600 // mova za0h.s[x12], { z16.s-z19.s }\n"
- ".inst 0xa041c5f0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x15, #0x4, MUL VL]\n"
+ ".inst 0xa041c5d0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x14, #0x4, MUL VL]\n"
".inst 0xc0840601 // mova za1h.s[x12], { z16.s-z19.s }\n"
- ".inst 0xa042c5f0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x15, #0x8, MUL VL]\n"
+ ".inst 0xa042c5d0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x14, #0x8, MUL VL]\n"
".inst 0xc0840602 // mova za2h.s[x12], { z16.s-z19.s }\n"
- ".inst 0xa043c5e8 // ld1w { z8.s-z11.s }, pn9.b/Z, [x15, #0xc, MUL VL]\n"
+ ".inst 0xa043c5c8 // ld1w { z8.s-z11.s }, pn9.b/Z, [x14, #0xc, MUL VL]\n"
".inst 0xc0840503 // mova za3h.s[x12], { z8.s-z11.s }\n"
"add x12, x12, #0x4\n"
- "cmp x12, x20\n"
- "addvl x15, x15, #16\n"
+ "cmp x12, x19\n"
+ "addvl x14, x14, #16\n"
"blt 29b\n"
"30:" // End block
- "incw x10, ALL, MUL #2\n"
- "cmp x10, x9\n"
+ "incw x9, ALL, MUL #2\n"
+ "cmp x9, x28\n"
"blt 3b\n"
- "incw x11, ALL, MUL #2\n"
- "cmp x11, x13\n"
- "mov x10, #0x0\n"
- "mov x28, x27\n"
+ "incw x10, ALL, MUL #2\n"
+ "cmp x10, x11\n"
+ "mov x9, #0x0\n"
+ "mov x27, x26\n"
"blt 3b\n"
".inst 0xd503467f // SMSTOP\n"
:
: [args] "r" (&args), [offsetof_A] "I" (offsetof(KernelArgs, A)), [offsetof_B] "I" (offsetof(KernelArgs, B)), [offsetof_C] "I" (offsetof(KernelArgs, C)), [offsetof_K] "I" (offsetof(KernelArgs, K)), [offsetof_KernelArgs_max] "I" (offsetof(KernelArgs, max)), [offsetof_KernelArgs_min] "I" (offsetof(KernelArgs, min)), [offsetof_M] "I" (offsetof(KernelArgs, M)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_accumulator_buffer] "I" (offsetof(KernelArgs, accumulator_buffer)), [offsetof_bias] "I" (offsetof(KernelArgs, bias)), [offsetof_flags] "I" (offsetof(KernelArgs, flags)), [offsetof_kstride_bytes] "I" (offsetof(KernelArgs, kstride_bytes)), [offsetof_ldcb] "I" (offsetof(KernelArgs, ldcb))
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_bf16fp32_mopa_4VLx1VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_bf16fp32_mopa_4VLx1VL/generic.cpp
index a51b3db4b0..798a3cb470 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_bf16fp32_mopa_4VLx1VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_bf16fp32_mopa_4VLx1VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef __ARM_FEATURE_SVE
#ifdef ARM_COMPUTE_ENABLE_SME2
@@ -103,108 +103,108 @@ void sme2_interleaved_nomerge_bf16fp32_mopa_4VLx1VL(const bfloat16 *const A, con
KernelArgs args(A, B, C, ldc, M, N, K, bias, act, accumulate, accumulator_buffer);
__asm__ __volatile__(
- "ldr x16, [%x[args], %[offsetof_flags]]\n"
+ "ldr x15, [%x[args], %[offsetof_flags]]\n"
".inst 0xd503477f // SMSTART ZA\n"
"ptrue p1.b\n"
".inst 0x25207810 // ptrue pn8.b\n"
- "ldr x15, [%x[args], %[offsetof_accumulator_buffer]]\n"
"ldr x14, [%x[args], %[offsetof_accumulator_buffer]]\n"
- "tbz x16, #0, 2f\n"
+ "ldr x13, [%x[args], %[offsetof_accumulator_buffer]]\n"
+ "tbz x15, #0, 2f\n"
"mov x12, #0x0\n"
- "cntw x20\n"
+ "cntw x19\n"
"1:" // Initial accumulator load from buffer: Loop
- ".inst 0xa040c1e4 // ld1w { z4.s-z7.s }, pn8.b/Z, [x15]\n"
+ ".inst 0xa040c1c4 // ld1w { z4.s-z7.s }, pn8.b/Z, [x14]\n"
".inst 0xc0840480 // mova za0h.s[x12], { z4.s-z7.s }\n"
- ".inst 0xa041c1f8 // ld1w { z24.s-z27.s }, pn8.b/Z, [x15, #0x4, MUL VL]\n"
+ ".inst 0xa041c1d8 // ld1w { z24.s-z27.s }, pn8.b/Z, [x14, #0x4, MUL VL]\n"
".inst 0xc0840701 // mova za1h.s[x12], { z24.s-z27.s }\n"
- ".inst 0xa042c1e0 // ld1w { z0.s-z3.s }, pn8.b/Z, [x15, #0x8, MUL VL]\n"
+ ".inst 0xa042c1c0 // ld1w { z0.s-z3.s }, pn8.b/Z, [x14, #0x8, MUL VL]\n"
".inst 0xc0840402 // mova za2h.s[x12], { z0.s-z3.s }\n"
- ".inst 0xa043c1e4 // ld1w { z4.s-z7.s }, pn8.b/Z, [x15, #0xc, MUL VL]\n"
+ ".inst 0xa043c1c4 // ld1w { z4.s-z7.s }, pn8.b/Z, [x14, #0xc, MUL VL]\n"
".inst 0xc0840483 // mova za3h.s[x12], { z4.s-z7.s }\n"
"add x12, x12, #0x4\n"
- "cmp x12, x20\n"
- "addvl x15, x15, #16\n"
+ "cmp x12, x19\n"
+ "addvl x14, x14, #16\n"
"blt 1b\n"
"2:" // Initial accumulator load from buffer: End
- "ldr w13, [%x[args], %[offsetof_M]]\n"
- "mov x11, #0x0\n"
+ "ldr w11, [%x[args], %[offsetof_M]]\n"
"mov x10, #0x0\n"
- "ldr w9, [%x[args], %[offsetof_N]]\n"
- "ldr x28, [%x[args], %[offsetof_A]]\n"
+ "mov x9, #0x0\n"
+ "ldr w28, [%x[args], %[offsetof_N]]\n"
+ "ldr x27, [%x[args], %[offsetof_A]]\n"
"3:" // M and N loop
- "mov x27, x28\n"
- "whilelt p0.s, x10, x9\n"
- "tbnz x16, #0, 4f\n"
- "ldr x20, [%x[args], %[offsetof_bias]]\n"
+ "mov x26, x27\n"
+ "whilelt p0.s, x9, x28\n"
+ "tbnz x15, #0, 4f\n"
+ "ldr x19, [%x[args], %[offsetof_bias]]\n"
".inst 0xc00800ff // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
- "cbz x20, 5f\n"
+ "cbz x19, 5f\n"
"fmov z8.s, #1.0\n"
- "ldnt1w { z27.s }, p0/Z, [x20, x10, LSL #2]\n"
+ "ldnt1w { z27.s }, p0/Z, [x19, x9, LSL #2]\n"
".inst 0x809b2500 // fmopa za0.s, p1/M, p1/M, z8.s, z27.s\n"
".inst 0x809b2501 // fmopa za1.s, p1/M, p1/M, z8.s, z27.s\n"
".inst 0x809b2502 // fmopa za2.s, p1/M, p1/M, z8.s, z27.s\n"
".inst 0x809b2503 // fmopa za3.s, p1/M, p1/M, z8.s, z27.s\n"
"4:" // Prepare accumulators: Test for last block
+ "mov x19, x9\n"
"mov x20, x10\n"
- "mov x21, x11\n"
- "incw x20\n"
- "incw x21, ALL, MUL #4\n"
- "cmp x20, x9\n"
- "csel x21, x11, x21, LT\n"
- "mov x20, x16\n"
- "bfm x16, XZR, #0x0, #0x0 // bfc x16, #0x0, #0x1\n"
- "cmp x21, x13\n"
- "csel x16, x20, x16, LT\n"
+ "incw x19\n"
+ "incw x20, ALL, MUL #4\n"
+ "cmp x19, x28\n"
+ "csel x20, x10, x20, LT\n"
+ "mov x19, x15\n"
+ "bfm x15, XZR, #0x0, #0x0 // bfc x15, #0x0, #0x1\n"
+ "cmp x20, x11\n"
+ "csel x15, x19, x15, LT\n"
"5:" // Prepare accumulators: End
- "ldr x20, [%x[args], %[offsetof_K]]\n"
- "add x20, x20, #0x1\n"
- "lsr x20, x20, #0x1\n"
- "ldr x23, [%x[args], %[offsetof_B]]\n"
- "lsr x22, x20, #0x2\n"
- "and x21, x20, #0x3\n"
- "ldr x20, [%x[args], %[offsetof_kstride_bytes]]\n"
- "madd x23, x10, x20, x23\n" // bptr = B + n * kstride_bytes
- "cbz x22, 8f\n"
- "subs x22, x22, #0x1\n"
- ".inst 0xa040a364 // ld1h { z4.h-z7.h }, pn8.b/Z, [x27]\n"
- "ldnt1h { z29.h }, p1/Z, [x23]\n"
- ".inst 0xa041a36c // ld1h { z12.h-z15.h }, pn8.b/Z, [x27, #0x4, MUL VL]\n"
- "ldnt1h { z23.h }, p1/Z, [x23, #1, MUL VL]\n"
- ".inst 0xa042a360 // ld1h { z0.h-z3.h }, pn8.b/Z, [x27, #0x8, MUL VL]\n"
- "ldnt1h { z21.h }, p1/Z, [x23, #2, MUL VL]\n"
- ".inst 0xa143a372 // ld1h { z18.h, z22.h, z26.h, z30.h }, pn8.b/Z, [x27, #0xc, MUL VL]\n"
- "addvl x27, x27, #16\n"
- "ldnt1h { z27.h }, p1/Z, [x23, #3, MUL VL]\n"
- "addvl x23, x23, #4\n"
+ "ldr x19, [%x[args], %[offsetof_K]]\n"
+ "add x19, x19, #0x1\n"
+ "lsr x19, x19, #0x1\n"
+ "ldr x22, [%x[args], %[offsetof_B]]\n"
+ "lsr x21, x19, #0x2\n"
+ "and x20, x19, #0x3\n"
+ "ldr x19, [%x[args], %[offsetof_kstride_bytes]]\n"
+ "madd x22, x9, x19, x22\n" // bptr = B + n * kstride_bytes
+ "cbz x21, 8f\n"
+ "subs x21, x21, #0x1\n"
+ ".inst 0xa040a344 // ld1h { z4.h-z7.h }, pn8.b/Z, [x26]\n"
+ "ldnt1h { z29.h }, p1/Z, [x22]\n"
+ ".inst 0xa041a34c // ld1h { z12.h-z15.h }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
+ "ldnt1h { z23.h }, p1/Z, [x22, #1, MUL VL]\n"
+ ".inst 0xa042a340 // ld1h { z0.h-z3.h }, pn8.b/Z, [x26, #0x8, MUL VL]\n"
+ "ldnt1h { z21.h }, p1/Z, [x22, #2, MUL VL]\n"
+ ".inst 0xa143a352 // ld1h { z18.h, z22.h, z26.h, z30.h }, pn8.b/Z, [x26, #0xc, MUL VL]\n"
+ "addvl x26, x26, #16\n"
+ "ldnt1h { z27.h }, p1/Z, [x22, #3, MUL VL]\n"
+ "addvl x22, x22, #4\n"
"ble 7f\n"
"6:" // K loop
".inst 0x819d2480 // bfmopa za0.s, p1/M, p1/M, z4.h, z29.h\n"
- "subs x22, x22, #0x1\n"
+ "subs x21, x21, #0x1\n"
".inst 0x819d24a1 // bfmopa za1.s, p1/M, p1/M, z5.h, z29.h\n"
".inst 0x819d24c2 // bfmopa za2.s, p1/M, p1/M, z6.h, z29.h\n"
".inst 0x819d24e3 // bfmopa za3.s, p1/M, p1/M, z7.h, z29.h\n"
- ".inst 0xa040a364 // ld1h { z4.h-z7.h }, pn8.b/Z, [x27]\n"
+ ".inst 0xa040a344 // ld1h { z4.h-z7.h }, pn8.b/Z, [x26]\n"
".inst 0x81972580 // bfmopa za0.s, p1/M, p1/M, z12.h, z23.h\n"
- "ldnt1h { z29.h }, p1/Z, [x23]\n"
+ "ldnt1h { z29.h }, p1/Z, [x22]\n"
".inst 0x819725a1 // bfmopa za1.s, p1/M, p1/M, z13.h, z23.h\n"
".inst 0x819725c2 // bfmopa za2.s, p1/M, p1/M, z14.h, z23.h\n"
".inst 0x819725e3 // bfmopa za3.s, p1/M, p1/M, z15.h, z23.h\n"
- ".inst 0xa041a36c // ld1h { z12.h-z15.h }, pn8.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xa041a34c // ld1h { z12.h-z15.h }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
".inst 0x81952400 // bfmopa za0.s, p1/M, p1/M, z0.h, z21.h\n"
- "ldnt1h { z23.h }, p1/Z, [x23, #1, MUL VL]\n"
+ "ldnt1h { z23.h }, p1/Z, [x22, #1, MUL VL]\n"
".inst 0x81952421 // bfmopa za1.s, p1/M, p1/M, z1.h, z21.h\n"
".inst 0x81952442 // bfmopa za2.s, p1/M, p1/M, z2.h, z21.h\n"
".inst 0x81952463 // bfmopa za3.s, p1/M, p1/M, z3.h, z21.h\n"
- ".inst 0xa042a360 // ld1h { z0.h-z3.h }, pn8.b/Z, [x27, #0x8, MUL VL]\n"
- "ldnt1h { z21.h }, p1/Z, [x23, #2, MUL VL]\n"
+ ".inst 0xa042a340 // ld1h { z0.h-z3.h }, pn8.b/Z, [x26, #0x8, MUL VL]\n"
+ "ldnt1h { z21.h }, p1/Z, [x22, #2, MUL VL]\n"
".inst 0x819b2640 // bfmopa za0.s, p1/M, p1/M, z18.h, z27.h\n"
".inst 0x819b26c1 // bfmopa za1.s, p1/M, p1/M, z22.h, z27.h\n"
".inst 0x819b2742 // bfmopa za2.s, p1/M, p1/M, z26.h, z27.h\n"
".inst 0x819b27c3 // bfmopa za3.s, p1/M, p1/M, z30.h, z27.h\n"
- ".inst 0xa143a372 // ld1h { z18.h, z22.h, z26.h, z30.h }, pn8.b/Z, [x27, #0xc, MUL VL]\n"
- "addvl x27, x27, #16\n"
- "ldnt1h { z27.h }, p1/Z, [x23, #3, MUL VL]\n"
- "addvl x23, x23, #4\n"
+ ".inst 0xa143a352 // ld1h { z18.h, z22.h, z26.h, z30.h }, pn8.b/Z, [x26, #0xc, MUL VL]\n"
+ "addvl x26, x26, #16\n"
+ "ldnt1h { z27.h }, p1/Z, [x22, #3, MUL VL]\n"
+ "addvl x22, x22, #4\n"
"bgt 6b\n"
"7:" // K loop tail
".inst 0x819d2480 // bfmopa za0.s, p1/M, p1/M, z4.h, z29.h\n"
@@ -224,391 +224,391 @@ void sme2_interleaved_nomerge_bf16fp32_mopa_4VLx1VL(const bfloat16 *const A, con
".inst 0x819b2742 // bfmopa za2.s, p1/M, p1/M, z26.h, z27.h\n"
".inst 0x819b27c3 // bfmopa za3.s, p1/M, p1/M, z30.h, z27.h\n"
"8:" // K oddments
- "cbz x21, 10f\n"
+ "cbz x20, 10f\n"
"9:" // K oddments: Loop
- ".inst 0xa040a364 // ld1h { z4.h-z7.h }, pn8.b/Z, [x27]\n"
- "subs x21, x21, #0x1\n"
- "addvl x27, x27, #4\n"
- "ld1h { z29.h }, p1/Z, [x23]\n"
- "addvl x23, x23, #1\n"
+ ".inst 0xa040a344 // ld1h { z4.h-z7.h }, pn8.b/Z, [x26]\n"
+ "subs x20, x20, #0x1\n"
+ "addvl x26, x26, #4\n"
+ "ld1h { z29.h }, p1/Z, [x22]\n"
+ "addvl x22, x22, #1\n"
".inst 0x819d2480 // bfmopa za0.s, p1/M, p1/M, z4.h, z29.h\n"
".inst 0x819d24a1 // bfmopa za1.s, p1/M, p1/M, z5.h, z29.h\n"
".inst 0x819d24c2 // bfmopa za2.s, p1/M, p1/M, z6.h, z29.h\n"
".inst 0x819d24e3 // bfmopa za3.s, p1/M, p1/M, z7.h, z29.h\n"
"bgt 9b\n"
"10:" // K oddments: End
- "tbz x16, #1, 14f\n"
- "tbz x16, #0, 12f\n"
+ "tbz x15, #1, 14f\n"
+ "tbz x15, #0, 12f\n"
"mov x12, #0x0\n"
- "cntw x20\n"
+ "cntw x19\n"
"11:" // Store to partial result buffer: Store and refill: Loop
- ".inst 0xa040c1e8 // ld1w { z8.s-z11.s }, pn8.b/Z, [x15]\n"
+ ".inst 0xa040c1c8 // ld1w { z8.s-z11.s }, pn8.b/Z, [x14]\n"
".inst 0xc0860418 // mova { z24.s-z27.s }, za0h.s[x12]\n"
".inst 0xc0840500 // mova za0h.s[x12], { z8.s-z11.s }\n"
".inst 0xc0860424 // mova { z4.s-z7.s }, za1h.s[x12]\n"
- ".inst 0xa041c1ec // ld1w { z12.s-z15.s }, pn8.b/Z, [x15, #0x4, MUL VL]\n"
+ ".inst 0xa041c1cc // ld1w { z12.s-z15.s }, pn8.b/Z, [x14, #0x4, MUL VL]\n"
".inst 0xc0840581 // mova za1h.s[x12], { z12.s-z15.s }\n"
".inst 0xc086044c // mova { z12.s-z15.s }, za2h.s[x12]\n"
".inst 0xc0860460 // mova { z0.s-z3.s }, za3h.s[x12]\n"
- ".inst 0xa042c1e8 // ld1w { z8.s-z11.s }, pn8.b/Z, [x15, #0x8, MUL VL]\n"
+ ".inst 0xa042c1c8 // ld1w { z8.s-z11.s }, pn8.b/Z, [x14, #0x8, MUL VL]\n"
".inst 0xc0840502 // mova za2h.s[x12], { z8.s-z11.s }\n"
- ".inst 0xa043c1fc // ld1w { z28.s-z31.s }, pn8.b/Z, [x15, #0xc, MUL VL]\n"
+ ".inst 0xa043c1dc // ld1w { z28.s-z31.s }, pn8.b/Z, [x14, #0xc, MUL VL]\n"
".inst 0xc0840783 // mova za3h.s[x12], { z28.s-z31.s }\n"
"add x12, x12, #0x4\n"
- "cmp x12, x20\n"
- ".inst 0xa060c1d8 // st1w { z24.s-z27.s }, pn8.b, [x14]\n"
- "addvl x15, x15, #16\n"
- ".inst 0xa061c1c4 // st1w { z4.s-z7.s }, pn8.b, [x14, #0x4, MUL VL]\n"
- ".inst 0xa062c1cc // st1w { z12.s-z15.s }, pn8.b, [x14, #0x8, MUL VL]\n"
- ".inst 0xa063c1c0 // st1w { z0.s-z3.s }, pn8.b, [x14, #0xc, MUL VL]\n"
+ "cmp x12, x19\n"
+ ".inst 0xa060c1b8 // st1w { z24.s-z27.s }, pn8.b, [x13]\n"
"addvl x14, x14, #16\n"
+ ".inst 0xa061c1a4 // st1w { z4.s-z7.s }, pn8.b, [x13, #0x4, MUL VL]\n"
+ ".inst 0xa062c1ac // st1w { z12.s-z15.s }, pn8.b, [x13, #0x8, MUL VL]\n"
+ ".inst 0xa063c1a0 // st1w { z0.s-z3.s }, pn8.b, [x13, #0xc, MUL VL]\n"
+ "addvl x13, x13, #16\n"
"blt 11b\n"
"b 42f\n"
"12:" // Store to partial result buffer: Store only
"mov x12, #0x0\n"
- "cntw x20\n"
+ "cntw x19\n"
"13:" // Store to partial result buffer: Store only: Loop
".inst 0xc086040c // mova { z12.s-z15.s }, za0h.s[x12]\n"
".inst 0xc0860438 // mova { z24.s-z27.s }, za1h.s[x12]\n"
- ".inst 0xa060c1cc // st1w { z12.s-z15.s }, pn8.b, [x14]\n"
+ ".inst 0xa060c1ac // st1w { z12.s-z15.s }, pn8.b, [x13]\n"
".inst 0xc0860440 // mova { z0.s-z3.s }, za2h.s[x12]\n"
".inst 0xc0860468 // mova { z8.s-z11.s }, za3h.s[x12]\n"
- ".inst 0xa061c1d8 // st1w { z24.s-z27.s }, pn8.b, [x14, #0x4, MUL VL]\n"
+ ".inst 0xa061c1b8 // st1w { z24.s-z27.s }, pn8.b, [x13, #0x4, MUL VL]\n"
"add x12, x12, #0x4\n"
- "cmp x12, x20\n"
- ".inst 0xa062c1c0 // st1w { z0.s-z3.s }, pn8.b, [x14, #0x8, MUL VL]\n"
- ".inst 0xa063c1c8 // st1w { z8.s-z11.s }, pn8.b, [x14, #0xc, MUL VL]\n"
- "addvl x14, x14, #16\n"
+ "cmp x12, x19\n"
+ ".inst 0xa062c1a0 // st1w { z0.s-z3.s }, pn8.b, [x13, #0x8, MUL VL]\n"
+ ".inst 0xa063c1a8 // st1w { z8.s-z11.s }, pn8.b, [x13, #0xc, MUL VL]\n"
+ "addvl x13, x13, #16\n"
"blt 13b\n"
"b 42f\n"
"14:" // Store to output array
- "ldr x26, [%x[args], %[offsetof_C]]\n"
- "add x26, x26, x10, LSL #2\n" // C += n
- "sub x25, x13, x11\n"
- "ldr x24, [%x[args], %[offsetof_ldcb]]\n"
- "madd x26, x11, x24, x26\n" // C += m * ldc
- "tbz x16, #2, 27f\n"
- "cntw x23\n"
- "cmp x25, x23\n"
- "csel x22, x25, x23, LT\n"
- "lsr x21, x22, #0x2\n"
+ "ldr x25, [%x[args], %[offsetof_C]]\n"
+ "add x25, x25, x9, LSL #2\n" // C += n
+ "sub x24, x11, x10\n"
+ "ldr x23, [%x[args], %[offsetof_ldcb]]\n"
+ "madd x25, x10, x23, x25\n" // C += m * ldc
+ "tbz x15, #2, 27f\n"
+ "cntw x22\n"
+ "cmp x24, x22\n"
+ "csel x21, x24, x22, LT\n"
+ "lsr x20, x21, #0x2\n"
"mov x12, #0x0\n"
- "and x20, x22, #0x3\n"
- "cbz x21, 16f\n"
+ "and x19, x21, #0x3\n"
+ "cbz x20, 16f\n"
"15:" // Store to output array: Skip activation: Accumulator row 0 loop
".inst 0xc0860410 // mova { z16.s-z19.s }, za0h.s[x12]\n"
- "st1w { z16.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
- "st1w { z17.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1w { z16.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
+ "st1w { z17.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"add x12, x12, #0x4\n"
- "st1w { z18.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
- "cmp x12, x21, LSL #2\n"
- "st1w { z19.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1w { z18.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
+ "cmp x12, x20, LSL #2\n"
+ "st1w { z19.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"blt 15b\n"
"16:" // Store to output array: Skip activation: Accumulator row 0 oddments
- "cbz x20, 17f\n"
- "subs x20, x20, #0x1\n"
+ "cbz x19, 17f\n"
+ "subs x19, x19, #0x1\n"
".inst 0xc0860404 // mova { z4.s-z7.s }, za0h.s[x12]\n"
- "st1w { z4.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1w { z4.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"beq 17f\n"
- "subs x20, x20, #0x1\n"
- "st1w { z5.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "subs x19, x19, #0x1\n"
+ "st1w { z5.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"beq 17f\n"
- "st1w { z6.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1w { z6.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"17:" // Store to output array: Skip activation: Accumulator row 0 oddments: End
- "subs x25, x25, x22\n"
+ "subs x24, x24, x21\n"
"beq 27f\n"
- "cmp x25, x23\n"
- "csel x22, x25, x23, LT\n"
- "lsr x21, x22, #0x2\n"
+ "cmp x24, x22\n"
+ "csel x21, x24, x22, LT\n"
+ "lsr x20, x21, #0x2\n"
"mov x12, #0x0\n"
- "and x20, x22, #0x3\n"
- "cbz x21, 19f\n"
+ "and x19, x21, #0x3\n"
+ "cbz x20, 19f\n"
"18:" // Store to output array: Skip activation: Accumulator row 1 loop
".inst 0xc0860430 // mova { z16.s-z19.s }, za1h.s[x12]\n"
- "st1w { z16.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
- "st1w { z17.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1w { z16.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
+ "st1w { z17.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"add x12, x12, #0x4\n"
- "st1w { z18.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
- "cmp x12, x21, LSL #2\n"
- "st1w { z19.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1w { z18.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
+ "cmp x12, x20, LSL #2\n"
+ "st1w { z19.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"blt 18b\n"
"19:" // Store to output array: Skip activation: Accumulator row 1 oddments
- "cbz x20, 20f\n"
- "subs x20, x20, #0x1\n"
+ "cbz x19, 20f\n"
+ "subs x19, x19, #0x1\n"
".inst 0xc0860424 // mova { z4.s-z7.s }, za1h.s[x12]\n"
- "st1w { z4.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1w { z4.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"beq 20f\n"
- "subs x20, x20, #0x1\n"
- "st1w { z5.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "subs x19, x19, #0x1\n"
+ "st1w { z5.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"beq 20f\n"
- "st1w { z6.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1w { z6.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"20:" // Store to output array: Skip activation: Accumulator row 1 oddments: End
- "subs x25, x25, x22\n"
+ "subs x24, x24, x21\n"
"beq 27f\n"
- "cmp x25, x23\n"
- "csel x22, x25, x23, LT\n"
- "lsr x21, x22, #0x2\n"
+ "cmp x24, x22\n"
+ "csel x21, x24, x22, LT\n"
+ "lsr x20, x21, #0x2\n"
"mov x12, #0x0\n"
- "and x20, x22, #0x3\n"
- "cbz x21, 22f\n"
+ "and x19, x21, #0x3\n"
+ "cbz x20, 22f\n"
"21:" // Store to output array: Skip activation: Accumulator row 2 loop
".inst 0xc0860450 // mova { z16.s-z19.s }, za2h.s[x12]\n"
- "st1w { z16.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
- "st1w { z17.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1w { z16.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
+ "st1w { z17.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"add x12, x12, #0x4\n"
- "st1w { z18.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
- "cmp x12, x21, LSL #2\n"
- "st1w { z19.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1w { z18.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
+ "cmp x12, x20, LSL #2\n"
+ "st1w { z19.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"blt 21b\n"
"22:" // Store to output array: Skip activation: Accumulator row 2 oddments
- "cbz x20, 23f\n"
- "subs x20, x20, #0x1\n"
+ "cbz x19, 23f\n"
+ "subs x19, x19, #0x1\n"
".inst 0xc0860454 // mova { z20.s-z23.s }, za2h.s[x12]\n"
- "st1w { z20.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1w { z20.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"beq 23f\n"
- "subs x20, x20, #0x1\n"
- "st1w { z21.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "subs x19, x19, #0x1\n"
+ "st1w { z21.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"beq 23f\n"
- "st1w { z22.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1w { z22.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"23:" // Store to output array: Skip activation: Accumulator row 2 oddments: End
- "subs x25, x25, x22\n"
+ "subs x24, x24, x21\n"
"beq 27f\n"
- "cmp x25, x23\n"
- "csel x22, x25, x23, LT\n"
- "lsr x21, x22, #0x2\n"
+ "cmp x24, x22\n"
+ "csel x21, x24, x22, LT\n"
+ "lsr x20, x21, #0x2\n"
"mov x12, #0x0\n"
- "and x20, x22, #0x3\n"
- "cbz x21, 25f\n"
+ "and x19, x21, #0x3\n"
+ "cbz x20, 25f\n"
"24:" // Store to output array: Skip activation: Accumulator row 3 loop
".inst 0xc0860464 // mova { z4.s-z7.s }, za3h.s[x12]\n"
- "st1w { z4.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
- "st1w { z5.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1w { z4.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
+ "st1w { z5.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"add x12, x12, #0x4\n"
- "st1w { z6.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
- "cmp x12, x21, LSL #2\n"
- "st1w { z7.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1w { z6.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
+ "cmp x12, x20, LSL #2\n"
+ "st1w { z7.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"blt 24b\n"
"25:" // Store to output array: Skip activation: Accumulator row 3 oddments
- "cbz x20, 26f\n"
- "subs x20, x20, #0x1\n"
+ "cbz x19, 26f\n"
+ "subs x19, x19, #0x1\n"
".inst 0xc086046c // mova { z12.s-z15.s }, za3h.s[x12]\n"
- "st1w { z12.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1w { z12.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"beq 26f\n"
- "subs x20, x20, #0x1\n"
- "st1w { z13.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "subs x19, x19, #0x1\n"
+ "st1w { z13.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"beq 26f\n"
- "st1w { z14.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1w { z14.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"26:" // Store to output array: Skip activation: Accumulator row 3 oddments: End
- "subs x25, x25, x22\n"
+ "subs x24, x24, x21\n"
"beq 27f\n"
"b 40f\n"
"27:" // Store to output array: Skip activation: End
- "cntw x23\n"
- "cmp x25, x23\n"
+ "cntw x22\n"
+ "cmp x24, x22\n"
"ld1rw { z25.s }, p1/Z, [%x[args], %[offsetof_KernelArgs_min]]\n"
- "csel x22, x25, x23, LT\n"
- "lsr x21, x22, #0x2\n"
+ "csel x21, x24, x22, LT\n"
+ "lsr x20, x21, #0x2\n"
"ld1rw { z24.s }, p1/Z, [%x[args], %[offsetof_KernelArgs_max]]\n"
"mov x12, #0x0\n"
- "and x20, x22, #0x3\n"
- "cbz x21, 29f\n"
+ "and x19, x21, #0x3\n"
+ "cbz x20, 29f\n"
"28:" // Store to output array: Accumulator row 0 loop
".inst 0xc0860414 // mova { z20.s-z23.s }, za0h.s[x12]\n"
".inst 0xc1b8cb34 // fclamp { z20.s-z23.s }, z25.s, z24.s\n"
- "st1w { z20.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
- "st1w { z21.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1w { z20.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
+ "st1w { z21.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"add x12, x12, #0x4\n"
- "st1w { z22.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
- "cmp x12, x21, LSL #2\n"
- "st1w { z23.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1w { z22.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
+ "cmp x12, x20, LSL #2\n"
+ "st1w { z23.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"blt 28b\n"
"29:" // Store to output array: Accumulator row 0 oddments
- "cbz x20, 30f\n"
+ "cbz x19, 30f\n"
".inst 0xc0860408 // mova { z8.s-z11.s }, za0h.s[x12]\n"
- "subs x20, x20, #0x1\n"
+ "subs x19, x19, #0x1\n"
".inst 0xc1b8cb28 // fclamp { z8.s-z11.s }, z25.s, z24.s\n"
- "st1w { z8.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1w { z8.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"beq 30f\n"
- "subs x20, x20, #0x1\n"
- "st1w { z9.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "subs x19, x19, #0x1\n"
+ "st1w { z9.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"beq 30f\n"
- "st1w { z10.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1w { z10.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"30:" // Store to output array: Accumulator row 0 oddments: End
- "subs x25, x25, x22\n"
+ "subs x24, x24, x21\n"
"beq 40f\n"
- "cmp x25, x23\n"
- "csel x22, x25, x23, LT\n"
- "lsr x21, x22, #0x2\n"
+ "cmp x24, x22\n"
+ "csel x21, x24, x22, LT\n"
+ "lsr x20, x21, #0x2\n"
"mov x12, #0x0\n"
- "and x20, x22, #0x3\n"
- "cbz x21, 32f\n"
+ "and x19, x21, #0x3\n"
+ "cbz x20, 32f\n"
"31:" // Store to output array: Accumulator row 1 loop
".inst 0xc0860430 // mova { z16.s-z19.s }, za1h.s[x12]\n"
".inst 0xc1b8cb30 // fclamp { z16.s-z19.s }, z25.s, z24.s\n"
- "st1w { z16.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
- "st1w { z17.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1w { z16.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
+ "st1w { z17.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"add x12, x12, #0x4\n"
- "st1w { z18.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
- "cmp x12, x21, LSL #2\n"
- "st1w { z19.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1w { z18.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
+ "cmp x12, x20, LSL #2\n"
+ "st1w { z19.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"blt 31b\n"
"32:" // Store to output array: Accumulator row 1 oddments
- "cbz x20, 33f\n"
+ "cbz x19, 33f\n"
".inst 0xc0860430 // mova { z16.s-z19.s }, za1h.s[x12]\n"
- "subs x20, x20, #0x1\n"
+ "subs x19, x19, #0x1\n"
".inst 0xc1b8cb30 // fclamp { z16.s-z19.s }, z25.s, z24.s\n"
- "st1w { z16.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1w { z16.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"beq 33f\n"
- "subs x20, x20, #0x1\n"
- "st1w { z17.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "subs x19, x19, #0x1\n"
+ "st1w { z17.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"beq 33f\n"
- "st1w { z18.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1w { z18.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"33:" // Store to output array: Accumulator row 1 oddments: End
- "subs x25, x25, x22\n"
+ "subs x24, x24, x21\n"
"beq 40f\n"
- "cmp x25, x23\n"
- "csel x22, x25, x23, LT\n"
- "lsr x21, x22, #0x2\n"
+ "cmp x24, x22\n"
+ "csel x21, x24, x22, LT\n"
+ "lsr x20, x21, #0x2\n"
"mov x12, #0x0\n"
- "and x20, x22, #0x3\n"
- "cbz x21, 35f\n"
+ "and x19, x21, #0x3\n"
+ "cbz x20, 35f\n"
"34:" // Store to output array: Accumulator row 2 loop
".inst 0xc0860450 // mova { z16.s-z19.s }, za2h.s[x12]\n"
".inst 0xc1b8cb30 // fclamp { z16.s-z19.s }, z25.s, z24.s\n"
- "st1w { z16.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
- "st1w { z17.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1w { z16.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
+ "st1w { z17.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"add x12, x12, #0x4\n"
- "st1w { z18.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
- "cmp x12, x21, LSL #2\n"
- "st1w { z19.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1w { z18.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
+ "cmp x12, x20, LSL #2\n"
+ "st1w { z19.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"blt 34b\n"
"35:" // Store to output array: Accumulator row 2 oddments
- "cbz x20, 36f\n"
+ "cbz x19, 36f\n"
".inst 0xc0860450 // mova { z16.s-z19.s }, za2h.s[x12]\n"
- "subs x20, x20, #0x1\n"
+ "subs x19, x19, #0x1\n"
".inst 0xc1b8cb30 // fclamp { z16.s-z19.s }, z25.s, z24.s\n"
- "st1w { z16.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1w { z16.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"beq 36f\n"
- "subs x20, x20, #0x1\n"
- "st1w { z17.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "subs x19, x19, #0x1\n"
+ "st1w { z17.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"beq 36f\n"
- "st1w { z18.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1w { z18.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"36:" // Store to output array: Accumulator row 2 oddments: End
- "subs x25, x25, x22\n"
+ "subs x24, x24, x21\n"
"beq 40f\n"
- "cmp x25, x23\n"
- "csel x20, x25, x23, LT\n"
- "lsr x21, x20, #0x2\n"
+ "cmp x24, x22\n"
+ "csel x19, x24, x22, LT\n"
+ "lsr x20, x19, #0x2\n"
"mov x12, #0x0\n"
- "and x20, x20, #0x3\n"
- "cbz x21, 38f\n"
+ "and x19, x19, #0x3\n"
+ "cbz x20, 38f\n"
"37:" // Store to output array: Accumulator row 3 loop
".inst 0xc0860474 // mova { z20.s-z23.s }, za3h.s[x12]\n"
".inst 0xc1b8cb34 // fclamp { z20.s-z23.s }, z25.s, z24.s\n"
- "st1w { z20.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
- "st1w { z21.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1w { z20.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
+ "st1w { z21.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"add x12, x12, #0x4\n"
- "st1w { z22.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
- "cmp x12, x21, LSL #2\n"
- "st1w { z23.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1w { z22.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
+ "cmp x12, x20, LSL #2\n"
+ "st1w { z23.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"blt 37b\n"
"38:" // Store to output array: Accumulator row 3 oddments
- "cbz x20, 39f\n"
+ "cbz x19, 39f\n"
".inst 0xc0860470 // mova { z16.s-z19.s }, za3h.s[x12]\n"
- "subs x20, x20, #0x1\n"
+ "subs x19, x19, #0x1\n"
".inst 0xc1b8cb30 // fclamp { z16.s-z19.s }, z25.s, z24.s\n"
- "st1w { z16.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1w { z16.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"beq 39f\n"
- "subs x20, x20, #0x1\n"
- "st1w { z17.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "subs x19, x19, #0x1\n"
+ "st1w { z17.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"beq 39f\n"
- "st1w { z18.s }, p0, [x26]\n"
+ "st1w { z18.s }, p0, [x25]\n"
"39:" // Store to output array: Accumulator row 3 oddments: End
"40:" // Store to output array: End
- "tbz x16, #0, 42f\n"
+ "tbz x15, #0, 42f\n"
"mov x12, #0x0\n"
- "cntw x20\n"
+ "cntw x19\n"
"41:" // Store to output array: Refill accumulators: Loop
- ".inst 0xa040c1f0 // ld1w { z16.s-z19.s }, pn8.b/Z, [x15]\n"
+ ".inst 0xa040c1d0 // ld1w { z16.s-z19.s }, pn8.b/Z, [x14]\n"
".inst 0xc0840600 // mova za0h.s[x12], { z16.s-z19.s }\n"
- ".inst 0xa041c1f0 // ld1w { z16.s-z19.s }, pn8.b/Z, [x15, #0x4, MUL VL]\n"
+ ".inst 0xa041c1d0 // ld1w { z16.s-z19.s }, pn8.b/Z, [x14, #0x4, MUL VL]\n"
".inst 0xc0840601 // mova za1h.s[x12], { z16.s-z19.s }\n"
- ".inst 0xa042c1f4 // ld1w { z20.s-z23.s }, pn8.b/Z, [x15, #0x8, MUL VL]\n"
+ ".inst 0xa042c1d4 // ld1w { z20.s-z23.s }, pn8.b/Z, [x14, #0x8, MUL VL]\n"
".inst 0xc0840682 // mova za2h.s[x12], { z20.s-z23.s }\n"
- ".inst 0xa043c1e4 // ld1w { z4.s-z7.s }, pn8.b/Z, [x15, #0xc, MUL VL]\n"
+ ".inst 0xa043c1c4 // ld1w { z4.s-z7.s }, pn8.b/Z, [x14, #0xc, MUL VL]\n"
".inst 0xc0840483 // mova za3h.s[x12], { z4.s-z7.s }\n"
"add x12, x12, #0x4\n"
- "cmp x12, x20\n"
- "addvl x15, x15, #16\n"
+ "cmp x12, x19\n"
+ "addvl x14, x14, #16\n"
"blt 41b\n"
"42:" // End block
- "incw x10\n"
- "cmp x10, x9\n"
+ "incw x9\n"
+ "cmp x9, x28\n"
"blt 3b\n"
- "incw x11, ALL, MUL #4\n"
- "cmp x11, x13\n"
- "mov x10, #0x0\n"
- "mov x28, x27\n"
+ "incw x10, ALL, MUL #4\n"
+ "cmp x10, x11\n"
+ "mov x9, #0x0\n"
+ "mov x27, x26\n"
"blt 3b\n"
".inst 0xd503467f // SMSTOP\n"
:
: [args] "r" (&args), [offsetof_A] "I" (offsetof(KernelArgs, A)), [offsetof_B] "I" (offsetof(KernelArgs, B)), [offsetof_C] "I" (offsetof(KernelArgs, C)), [offsetof_K] "I" (offsetof(KernelArgs, K)), [offsetof_KernelArgs_max] "I" (offsetof(KernelArgs, max)), [offsetof_KernelArgs_min] "I" (offsetof(KernelArgs, min)), [offsetof_M] "I" (offsetof(KernelArgs, M)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_accumulator_buffer] "I" (offsetof(KernelArgs, accumulator_buffer)), [offsetof_bias] "I" (offsetof(KernelArgs, bias)), [offsetof_flags] "I" (offsetof(KernelArgs, flags)), [offsetof_kstride_bytes] "I" (offsetof(KernelArgs, kstride_bytes)), [offsetof_ldcb] "I" (offsetof(KernelArgs, ldcb))
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp32_mopa_1VLx4VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp32_mopa_1VLx4VL/generic.cpp
index dd99387c5e..4f6d9a3d98 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp32_mopa_1VLx4VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp32_mopa_1VLx4VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef __ARM_FEATURE_SVE
#ifdef ARM_COMPUTE_ENABLE_SME2
@@ -103,106 +103,106 @@ void sme2_interleaved_nomerge_fp32_mopa_1VLx4VL(const float *const A, const floa
KernelArgs args(A, B, C, ldc, M, N, K, bias, act, accumulate, accumulator_buffer);
__asm__ __volatile__(
- "ldr x15, [%x[args], %[offsetof_flags]]\n"
+ "ldr x14, [%x[args], %[offsetof_flags]]\n"
".inst 0xd503477f // SMSTART ZA\n"
"ptrue p0.b\n"
".inst 0x25207811 // ptrue pn9.b\n"
- "ldr x14, [%x[args], %[offsetof_accumulator_buffer]]\n"
"ldr x13, [%x[args], %[offsetof_accumulator_buffer]]\n"
- "tbz x15, #0, 2f\n"
+ "ldr x11, [%x[args], %[offsetof_accumulator_buffer]]\n"
+ "tbz x14, #0, 2f\n"
"mov x12, #0x0\n"
- "cntw x20\n"
+ "cntw x19\n"
"1:" // Initial accumulator load from buffer: Loop
- ".inst 0xa040c5cc // ld1w { z12.s-z15.s }, pn9.b/Z, [x14]\n"
+ ".inst 0xa040c5ac // ld1w { z12.s-z15.s }, pn9.b/Z, [x13]\n"
".inst 0xc0840580 // mova za0h.s[x12], { z12.s-z15.s }\n"
- ".inst 0xa041c5d0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x14, #0x4, MUL VL]\n"
+ ".inst 0xa041c5b0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x13, #0x4, MUL VL]\n"
".inst 0xc0840601 // mova za1h.s[x12], { z16.s-z19.s }\n"
- ".inst 0xa042c5c4 // ld1w { z4.s-z7.s }, pn9.b/Z, [x14, #0x8, MUL VL]\n"
+ ".inst 0xa042c5a4 // ld1w { z4.s-z7.s }, pn9.b/Z, [x13, #0x8, MUL VL]\n"
".inst 0xc0840482 // mova za2h.s[x12], { z4.s-z7.s }\n"
- ".inst 0xa043c5c4 // ld1w { z4.s-z7.s }, pn9.b/Z, [x14, #0xc, MUL VL]\n"
+ ".inst 0xa043c5a4 // ld1w { z4.s-z7.s }, pn9.b/Z, [x13, #0xc, MUL VL]\n"
".inst 0xc0840483 // mova za3h.s[x12], { z4.s-z7.s }\n"
"add x12, x12, #0x4\n"
- "cmp x12, x20\n"
- "addvl x14, x14, #16\n"
+ "cmp x12, x19\n"
+ "addvl x13, x13, #16\n"
"blt 1b\n"
"2:" // Initial accumulator load from buffer: End
- "ldr w11, [%x[args], %[offsetof_M]]\n"
- "mov x10, #0x0\n"
+ "ldr w10, [%x[args], %[offsetof_M]]\n"
"mov x9, #0x0\n"
- "ldr w28, [%x[args], %[offsetof_N]]\n"
- "ldr x27, [%x[args], %[offsetof_A]]\n"
+ "mov x28, #0x0\n"
+ "ldr w27, [%x[args], %[offsetof_N]]\n"
+ "ldr x26, [%x[args], %[offsetof_A]]\n"
"3:" // M and N loop
- "mov x26, x27\n"
- ".inst 0x25bc6530 // whilelt pn8.s, x9, x28, VLx4\n"
- "tbnz x15, #0, 4f\n"
- "ldr x20, [%x[args], %[offsetof_bias]]\n"
+ "mov x25, x26\n"
+ ".inst 0x25bb6790 // whilelt pn8.s, x28, x27, VLx4\n"
+ "tbnz x14, #0, 4f\n"
+ "ldr x19, [%x[args], %[offsetof_bias]]\n"
".inst 0xc00800ff // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
- "cbz x20, 5f\n"
+ "cbz x19, 5f\n"
"fmov z21.s, #1.0\n"
- ".inst 0xa009c29d // ldnt1w { z28.s-z31.s }, p8/Z, [x20, x9, LSL #2]\n"
+ ".inst 0xa01cc27d // ldnt1w { z28.s-z31.s }, p8/Z, [x19, x28, LSL #2]\n"
".inst 0x809c02a0 // fmopa za0.s, p0/M, p0/M, z21.s, z28.s\n"
".inst 0x809d02a1 // fmopa za1.s, p0/M, p0/M, z21.s, z29.s\n"
".inst 0x809e02a2 // fmopa za2.s, p0/M, p0/M, z21.s, z30.s\n"
".inst 0x809f02a3 // fmopa za3.s, p0/M, p0/M, z21.s, z31.s\n"
"4:" // Prepare accumulators: Test for last block
+ "mov x19, x28\n"
"mov x20, x9\n"
- "mov x21, x10\n"
- "incw x20, ALL, MUL #4\n"
- "incw x21\n"
- "cmp x20, x28\n"
- "csel x21, x10, x21, LT\n"
- "mov x20, x15\n"
- "bfm x15, XZR, #0x0, #0x0 // bfc x15, #0x0, #0x1\n"
- "cmp x21, x11\n"
- "csel x15, x20, x15, LT\n"
+ "incw x19, ALL, MUL #4\n"
+ "incw x20\n"
+ "cmp x19, x27\n"
+ "csel x20, x9, x20, LT\n"
+ "mov x19, x14\n"
+ "bfm x14, XZR, #0x0, #0x0 // bfc x14, #0x0, #0x1\n"
+ "cmp x20, x10\n"
+ "csel x14, x19, x14, LT\n"
"5:" // Prepare accumulators: End
- "ldr x20, [%x[args], %[offsetof_K]]\n"
- "lsr x23, x20, #0x2\n"
- "and x22, x20, #0x3\n"
- "ldr x21, [%x[args], %[offsetof_B]]\n"
- "ldr x20, [%x[args], %[offsetof_kstride_bytes]]\n"
- "madd x21, x9, x20, x21\n" // bptr = B + n * kstride_bytes
- "cbz x23, 8f\n"
- "subs x23, x23, #0x1\n"
- "ld1w { z0.s }, p0/Z, [x26]\n"
- ".inst 0xa140c6bb // ldnt1w { z19.s, z23.s, z27.s, z31.s }, pn9.b/Z, [x21]\n"
- "ld1w { z13.s }, p0/Z, [x26, #1, MUL VL]\n"
- ".inst 0xa141c6aa // ldnt1w { z2.s, z6.s, z10.s, z14.s }, pn9.b/Z, [x21, #0x4, MUL VL]\n"
- "ld1w { z12.s }, p0/Z, [x26, #2, MUL VL]\n"
- ".inst 0xa142c6ab // ldnt1w { z3.s, z7.s, z11.s, z15.s }, pn9.b/Z, [x21, #0x8, MUL VL]\n"
- "ld1w { z26.s }, p0/Z, [x26, #3, MUL VL]\n"
- "addvl x26, x26, #4\n"
- ".inst 0xa143c6b8 // ldnt1w { z16.s, z20.s, z24.s, z28.s }, pn9.b/Z, [x21, #0xc, MUL VL]\n"
- "addvl x21, x21, #16\n"
+ "ldr x19, [%x[args], %[offsetof_K]]\n"
+ "lsr x22, x19, #0x2\n"
+ "and x21, x19, #0x3\n"
+ "ldr x20, [%x[args], %[offsetof_B]]\n"
+ "ldr x19, [%x[args], %[offsetof_kstride_bytes]]\n"
+ "madd x20, x28, x19, x20\n" // bptr = B + n * kstride_bytes
+ "cbz x22, 8f\n"
+ "subs x22, x22, #0x1\n"
+ "ld1w { z0.s }, p0/Z, [x25]\n"
+ ".inst 0xa140c69b // ldnt1w { z19.s, z23.s, z27.s, z31.s }, pn9.b/Z, [x20]\n"
+ "ld1w { z13.s }, p0/Z, [x25, #1, MUL VL]\n"
+ ".inst 0xa141c68a // ldnt1w { z2.s, z6.s, z10.s, z14.s }, pn9.b/Z, [x20, #0x4, MUL VL]\n"
+ "ld1w { z12.s }, p0/Z, [x25, #2, MUL VL]\n"
+ ".inst 0xa142c68b // ldnt1w { z3.s, z7.s, z11.s, z15.s }, pn9.b/Z, [x20, #0x8, MUL VL]\n"
+ "ld1w { z26.s }, p0/Z, [x25, #3, MUL VL]\n"
+ "addvl x25, x25, #4\n"
+ ".inst 0xa143c698 // ldnt1w { z16.s, z20.s, z24.s, z28.s }, pn9.b/Z, [x20, #0xc, MUL VL]\n"
+ "addvl x20, x20, #16\n"
"ble 7f\n"
"6:" // K loop
".inst 0x80930000 // fmopa za0.s, p0/M, p0/M, z0.s, z19.s\n"
- "subs x23, x23, #0x1\n"
+ "subs x22, x22, #0x1\n"
".inst 0x80970001 // fmopa za1.s, p0/M, p0/M, z0.s, z23.s\n"
".inst 0x809b0002 // fmopa za2.s, p0/M, p0/M, z0.s, z27.s\n"
".inst 0x809f0003 // fmopa za3.s, p0/M, p0/M, z0.s, z31.s\n"
- "ld1w { z0.s }, p0/Z, [x26]\n"
+ "ld1w { z0.s }, p0/Z, [x25]\n"
".inst 0x808201a0 // fmopa za0.s, p0/M, p0/M, z13.s, z2.s\n"
- ".inst 0xa140c6bb // ldnt1w { z19.s, z23.s, z27.s, z31.s }, pn9.b/Z, [x21]\n"
+ ".inst 0xa140c69b // ldnt1w { z19.s, z23.s, z27.s, z31.s }, pn9.b/Z, [x20]\n"
".inst 0x808601a1 // fmopa za1.s, p0/M, p0/M, z13.s, z6.s\n"
".inst 0x808a01a2 // fmopa za2.s, p0/M, p0/M, z13.s, z10.s\n"
".inst 0x808e01a3 // fmopa za3.s, p0/M, p0/M, z13.s, z14.s\n"
- "ld1w { z13.s }, p0/Z, [x26, #1, MUL VL]\n"
+ "ld1w { z13.s }, p0/Z, [x25, #1, MUL VL]\n"
".inst 0x80830180 // fmopa za0.s, p0/M, p0/M, z12.s, z3.s\n"
- ".inst 0xa141c6aa // ldnt1w { z2.s, z6.s, z10.s, z14.s }, pn9.b/Z, [x21, #0x4, MUL VL]\n"
+ ".inst 0xa141c68a // ldnt1w { z2.s, z6.s, z10.s, z14.s }, pn9.b/Z, [x20, #0x4, MUL VL]\n"
".inst 0x80870181 // fmopa za1.s, p0/M, p0/M, z12.s, z7.s\n"
".inst 0x808b0182 // fmopa za2.s, p0/M, p0/M, z12.s, z11.s\n"
".inst 0x808f0183 // fmopa za3.s, p0/M, p0/M, z12.s, z15.s\n"
- "ld1w { z12.s }, p0/Z, [x26, #2, MUL VL]\n"
- ".inst 0xa142c6ab // ldnt1w { z3.s, z7.s, z11.s, z15.s }, pn9.b/Z, [x21, #0x8, MUL VL]\n"
+ "ld1w { z12.s }, p0/Z, [x25, #2, MUL VL]\n"
+ ".inst 0xa142c68b // ldnt1w { z3.s, z7.s, z11.s, z15.s }, pn9.b/Z, [x20, #0x8, MUL VL]\n"
".inst 0x80900340 // fmopa za0.s, p0/M, p0/M, z26.s, z16.s\n"
".inst 0x80940341 // fmopa za1.s, p0/M, p0/M, z26.s, z20.s\n"
".inst 0x80980342 // fmopa za2.s, p0/M, p0/M, z26.s, z24.s\n"
".inst 0x809c0343 // fmopa za3.s, p0/M, p0/M, z26.s, z28.s\n"
- "ld1w { z26.s }, p0/Z, [x26, #3, MUL VL]\n"
- "addvl x26, x26, #4\n"
- ".inst 0xa143c6b8 // ldnt1w { z16.s, z20.s, z24.s, z28.s }, pn9.b/Z, [x21, #0xc, MUL VL]\n"
- "addvl x21, x21, #16\n"
+ "ld1w { z26.s }, p0/Z, [x25, #3, MUL VL]\n"
+ "addvl x25, x25, #4\n"
+ ".inst 0xa143c698 // ldnt1w { z16.s, z20.s, z24.s, z28.s }, pn9.b/Z, [x20, #0xc, MUL VL]\n"
+ "addvl x20, x20, #16\n"
"bgt 6b\n"
"7:" // K loop tail
".inst 0x80930000 // fmopa za0.s, p0/M, p0/M, z0.s, z19.s\n"
@@ -222,123 +222,123 @@ void sme2_interleaved_nomerge_fp32_mopa_1VLx4VL(const float *const A, const floa
".inst 0x80980342 // fmopa za2.s, p0/M, p0/M, z26.s, z24.s\n"
".inst 0x809c0343 // fmopa za3.s, p0/M, p0/M, z26.s, z28.s\n"
"8:" // K oddments
- "cbz x22, 10f\n"
+ "cbz x21, 10f\n"
"9:" // K oddments: Loop
- "ld1w { z0.s }, p0/Z, [x26]\n"
- "subs x22, x22, #0x1\n"
- "addvl x26, x26, #1\n"
- ".inst 0xa140c6b3 // ld1w { z19.s, z23.s, z27.s, z31.s }, pn9.b/Z, [x21]\n"
- "addvl x21, x21, #4\n"
+ "ld1w { z0.s }, p0/Z, [x25]\n"
+ "subs x21, x21, #0x1\n"
+ "addvl x25, x25, #1\n"
+ ".inst 0xa140c693 // ld1w { z19.s, z23.s, z27.s, z31.s }, pn9.b/Z, [x20]\n"
+ "addvl x20, x20, #4\n"
".inst 0x80930000 // fmopa za0.s, p0/M, p0/M, z0.s, z19.s\n"
".inst 0x80970001 // fmopa za1.s, p0/M, p0/M, z0.s, z23.s\n"
".inst 0x809b0002 // fmopa za2.s, p0/M, p0/M, z0.s, z27.s\n"
".inst 0x809f0003 // fmopa za3.s, p0/M, p0/M, z0.s, z31.s\n"
"bgt 9b\n"
"10:" // K oddments: End
- "tbz x15, #1, 14f\n"
- "tbz x15, #0, 12f\n"
+ "tbz x14, #1, 14f\n"
+ "tbz x14, #0, 12f\n"
"mov x12, #0x0\n"
- "cntw x20\n"
+ "cntw x19\n"
"11:" // Store to partial result buffer: Store and refill: Loop
- ".inst 0xa040c5cc // ld1w { z12.s-z15.s }, pn9.b/Z, [x14]\n"
+ ".inst 0xa040c5ac // ld1w { z12.s-z15.s }, pn9.b/Z, [x13]\n"
".inst 0xc0860418 // mova { z24.s-z27.s }, za0h.s[x12]\n"
".inst 0xc0840580 // mova za0h.s[x12], { z12.s-z15.s }\n"
".inst 0xc0860434 // mova { z20.s-z23.s }, za1h.s[x12]\n"
- ".inst 0xa041c5dc // ld1w { z28.s-z31.s }, pn9.b/Z, [x14, #0x4, MUL VL]\n"
+ ".inst 0xa041c5bc // ld1w { z28.s-z31.s }, pn9.b/Z, [x13, #0x4, MUL VL]\n"
".inst 0xc0840781 // mova za1h.s[x12], { z28.s-z31.s }\n"
".inst 0xc086045c // mova { z28.s-z31.s }, za2h.s[x12]\n"
".inst 0xc0860470 // mova { z16.s-z19.s }, za3h.s[x12]\n"
- ".inst 0xa042c5cc // ld1w { z12.s-z15.s }, pn9.b/Z, [x14, #0x8, MUL VL]\n"
+ ".inst 0xa042c5ac // ld1w { z12.s-z15.s }, pn9.b/Z, [x13, #0x8, MUL VL]\n"
".inst 0xc0840582 // mova za2h.s[x12], { z12.s-z15.s }\n"
- ".inst 0xa043c5cc // ld1w { z12.s-z15.s }, pn9.b/Z, [x14, #0xc, MUL VL]\n"
+ ".inst 0xa043c5ac // ld1w { z12.s-z15.s }, pn9.b/Z, [x13, #0xc, MUL VL]\n"
".inst 0xc0840583 // mova za3h.s[x12], { z12.s-z15.s }\n"
"add x12, x12, #0x4\n"
- "cmp x12, x20\n"
- ".inst 0xa060c5b8 // st1w { z24.s-z27.s }, pn9.b, [x13]\n"
- "addvl x14, x14, #16\n"
- ".inst 0xa061c5b4 // st1w { z20.s-z23.s }, pn9.b, [x13, #0x4, MUL VL]\n"
- ".inst 0xa062c5bc // st1w { z28.s-z31.s }, pn9.b, [x13, #0x8, MUL VL]\n"
- ".inst 0xa063c5b0 // st1w { z16.s-z19.s }, pn9.b, [x13, #0xc, MUL VL]\n"
+ "cmp x12, x19\n"
+ ".inst 0xa060c578 // st1w { z24.s-z27.s }, pn9.b, [x11]\n"
"addvl x13, x13, #16\n"
+ ".inst 0xa061c574 // st1w { z20.s-z23.s }, pn9.b, [x11, #0x4, MUL VL]\n"
+ ".inst 0xa062c57c // st1w { z28.s-z31.s }, pn9.b, [x11, #0x8, MUL VL]\n"
+ ".inst 0xa063c570 // st1w { z16.s-z19.s }, pn9.b, [x11, #0xc, MUL VL]\n"
+ "addvl x11, x11, #16\n"
"blt 11b\n"
"b 24f\n"
"12:" // Store to partial result buffer: Store only
"mov x12, #0x0\n"
- "cntw x20\n"
+ "cntw x19\n"
"13:" // Store to partial result buffer: Store only: Loop
".inst 0xc086040c // mova { z12.s-z15.s }, za0h.s[x12]\n"
".inst 0xc086043c // mova { z28.s-z31.s }, za1h.s[x12]\n"
- ".inst 0xa060c5ac // st1w { z12.s-z15.s }, pn9.b, [x13]\n"
+ ".inst 0xa060c56c // st1w { z12.s-z15.s }, pn9.b, [x11]\n"
".inst 0xc0860450 // mova { z16.s-z19.s }, za2h.s[x12]\n"
".inst 0xc0860464 // mova { z4.s-z7.s }, za3h.s[x12]\n"
- ".inst 0xa061c5bc // st1w { z28.s-z31.s }, pn9.b, [x13, #0x4, MUL VL]\n"
+ ".inst 0xa061c57c // st1w { z28.s-z31.s }, pn9.b, [x11, #0x4, MUL VL]\n"
"add x12, x12, #0x4\n"
- "cmp x12, x20\n"
- ".inst 0xa062c5b0 // st1w { z16.s-z19.s }, pn9.b, [x13, #0x8, MUL VL]\n"
- ".inst 0xa063c5a4 // st1w { z4.s-z7.s }, pn9.b, [x13, #0xc, MUL VL]\n"
- "addvl x13, x13, #16\n"
+ "cmp x12, x19\n"
+ ".inst 0xa062c570 // st1w { z16.s-z19.s }, pn9.b, [x11, #0x8, MUL VL]\n"
+ ".inst 0xa063c564 // st1w { z4.s-z7.s }, pn9.b, [x11, #0xc, MUL VL]\n"
+ "addvl x11, x11, #16\n"
"blt 13b\n"
"b 24f\n"
"14:" // Store to output array
- "ldr x25, [%x[args], %[offsetof_C]]\n"
- "add x25, x25, x9, LSL #2\n" // C += n
- "sub x24, x11, x10\n"
- "ldr x23, [%x[args], %[offsetof_ldcb]]\n"
- "madd x25, x10, x23, x25\n" // C += m * ldc
- "tbz x15, #2, 18f\n"
- "cntw x20\n"
- "cmp x24, x20\n"
- "csel x22, x24, x20, LT\n"
- "lsr x21, x22, #0x2\n"
+ "ldr x24, [%x[args], %[offsetof_C]]\n"
+ "add x24, x24, x28, LSL #2\n" // C += n
+ "sub x23, x10, x9\n"
+ "ldr x22, [%x[args], %[offsetof_ldcb]]\n"
+ "madd x24, x9, x22, x24\n" // C += m * ldc
+ "tbz x14, #2, 18f\n"
+ "cntw x19\n"
+ "cmp x23, x19\n"
+ "csel x21, x23, x19, LT\n"
+ "lsr x20, x21, #0x2\n"
"mov x12, #0x0\n"
- "and x20, x22, #0x3\n"
- "cbz x21, 16f\n"
+ "and x19, x21, #0x3\n"
+ "cbz x20, 16f\n"
"15:" // Store to output array: Skip activation: Accumulator row 0 loop
".inst 0xc0860400 // mova { z0.s-z3.s }, za0h.s[x12]\n"
".inst 0xc0860424 // mova { z4.s-z7.s }, za1h.s[x12]\n"
".inst 0xc0860448 // mova { z8.s-z11.s }, za2h.s[x12]\n"
".inst 0xc086046c // mova { z12.s-z15.s }, za3h.s[x12]\n"
- ".inst 0xa160c320 // st1w { z0.s, z4.s, z8.s, z12.s }, p8, [x25]\n"
- "add x25, x25, x23\n"
- ".inst 0xa160c321 // st1w { z1.s, z5.s, z9.s, z13.s }, p8, [x25]\n"
- "add x25, x25, x23\n"
+ ".inst 0xa160c300 // st1w { z0.s, z4.s, z8.s, z12.s }, p8, [x24]\n"
+ "add x24, x24, x22\n"
+ ".inst 0xa160c301 // st1w { z1.s, z5.s, z9.s, z13.s }, p8, [x24]\n"
+ "add x24, x24, x22\n"
"add x12, x12, #0x4\n"
- ".inst 0xa160c322 // st1w { z2.s, z6.s, z10.s, z14.s }, p8, [x25]\n"
- "add x25, x25, x23\n"
- "cmp x12, x21, LSL #2\n"
- ".inst 0xa160c323 // st1w { z3.s, z7.s, z11.s, z15.s }, p8, [x25]\n"
- "add x25, x25, x23\n"
+ ".inst 0xa160c302 // st1w { z2.s, z6.s, z10.s, z14.s }, p8, [x24]\n"
+ "add x24, x24, x22\n"
+ "cmp x12, x20, LSL #2\n"
+ ".inst 0xa160c303 // st1w { z3.s, z7.s, z11.s, z15.s }, p8, [x24]\n"
+ "add x24, x24, x22\n"
"blt 15b\n"
"16:" // Store to output array: Skip activation: Accumulator row 0 oddments
- "cbz x20, 17f\n"
- "subs x20, x20, #0x1\n"
+ "cbz x19, 17f\n"
+ "subs x19, x19, #0x1\n"
".inst 0xc0860400 // mova { z0.s-z3.s }, za0h.s[x12]\n"
".inst 0xc0860424 // mova { z4.s-z7.s }, za1h.s[x12]\n"
".inst 0xc0860448 // mova { z8.s-z11.s }, za2h.s[x12]\n"
".inst 0xc086046c // mova { z12.s-z15.s }, za3h.s[x12]\n"
- ".inst 0xa160c320 // st1w { z0.s, z4.s, z8.s, z12.s }, p8, [x25]\n"
- "add x25, x25, x23\n"
+ ".inst 0xa160c300 // st1w { z0.s, z4.s, z8.s, z12.s }, p8, [x24]\n"
+ "add x24, x24, x22\n"
"beq 17f\n"
- "subs x20, x20, #0x1\n"
- ".inst 0xa160c321 // st1w { z1.s, z5.s, z9.s, z13.s }, p8, [x25]\n"
- "add x25, x25, x23\n"
+ "subs x19, x19, #0x1\n"
+ ".inst 0xa160c301 // st1w { z1.s, z5.s, z9.s, z13.s }, p8, [x24]\n"
+ "add x24, x24, x22\n"
"beq 17f\n"
- ".inst 0xa160c322 // st1w { z2.s, z6.s, z10.s, z14.s }, p8, [x25]\n"
- "add x25, x25, x23\n"
+ ".inst 0xa160c302 // st1w { z2.s, z6.s, z10.s, z14.s }, p8, [x24]\n"
+ "add x24, x24, x22\n"
"17:" // Store to output array: Skip activation: Accumulator row 0 oddments: End
- "subs x24, x24, x22\n"
+ "subs x23, x23, x21\n"
"beq 18f\n"
"b 22f\n"
"18:" // Store to output array: Skip activation: End
- "cntw x20\n"
- "cmp x24, x20\n"
+ "cntw x19\n"
+ "cmp x23, x19\n"
"ld1rw { z23.s }, p0/Z, [%x[args], %[offsetof_KernelArgs_min]]\n"
- "csel x20, x24, x20, LT\n"
- "lsr x21, x20, #0x2\n"
+ "csel x19, x23, x19, LT\n"
+ "lsr x20, x19, #0x2\n"
"ld1rw { z16.s }, p0/Z, [%x[args], %[offsetof_KernelArgs_max]]\n"
"mov x12, #0x0\n"
- "and x20, x20, #0x3\n"
- "cbz x21, 20f\n"
+ "and x19, x19, #0x3\n"
+ "cbz x20, 20f\n"
"19:" // Store to output array: Accumulator row 0 loop
".inst 0xc0860400 // mova { z0.s-z3.s }, za0h.s[x12]\n"
".inst 0xc0860424 // mova { z4.s-z7.s }, za1h.s[x12]\n"
@@ -348,19 +348,19 @@ void sme2_interleaved_nomerge_fp32_mopa_1VLx4VL(const float *const A, const floa
".inst 0xc086046c // mova { z12.s-z15.s }, za3h.s[x12]\n"
".inst 0xc1b0cae8 // fclamp { z8.s-z11.s }, z23.s, z16.s\n"
".inst 0xc1b0caec // fclamp { z12.s-z15.s }, z23.s, z16.s\n"
- ".inst 0xa160c320 // st1w { z0.s, z4.s, z8.s, z12.s }, p8, [x25]\n"
- "add x25, x25, x23\n"
+ ".inst 0xa160c300 // st1w { z0.s, z4.s, z8.s, z12.s }, p8, [x24]\n"
+ "add x24, x24, x22\n"
"add x12, x12, #0x4\n"
- ".inst 0xa160c321 // st1w { z1.s, z5.s, z9.s, z13.s }, p8, [x25]\n"
- "add x25, x25, x23\n"
- "cmp x12, x21, LSL #2\n"
- ".inst 0xa160c322 // st1w { z2.s, z6.s, z10.s, z14.s }, p8, [x25]\n"
- "add x25, x25, x23\n"
- ".inst 0xa160c323 // st1w { z3.s, z7.s, z11.s, z15.s }, p8, [x25]\n"
- "add x25, x25, x23\n"
+ ".inst 0xa160c301 // st1w { z1.s, z5.s, z9.s, z13.s }, p8, [x24]\n"
+ "add x24, x24, x22\n"
+ "cmp x12, x20, LSL #2\n"
+ ".inst 0xa160c302 // st1w { z2.s, z6.s, z10.s, z14.s }, p8, [x24]\n"
+ "add x24, x24, x22\n"
+ ".inst 0xa160c303 // st1w { z3.s, z7.s, z11.s, z15.s }, p8, [x24]\n"
+ "add x24, x24, x22\n"
"blt 19b\n"
"20:" // Store to output array: Accumulator row 0 oddments
- "cbz x20, 21f\n"
+ "cbz x19, 21f\n"
".inst 0xc0860400 // mova { z0.s-z3.s }, za0h.s[x12]\n"
".inst 0xc0860424 // mova { z4.s-z7.s }, za1h.s[x12]\n"
".inst 0xc1b0cae0 // fclamp { z0.s-z3.s }, z23.s, z16.s\n"
@@ -369,46 +369,46 @@ void sme2_interleaved_nomerge_fp32_mopa_1VLx4VL(const float *const A, const floa
".inst 0xc086046c // mova { z12.s-z15.s }, za3h.s[x12]\n"
".inst 0xc1b0cae8 // fclamp { z8.s-z11.s }, z23.s, z16.s\n"
".inst 0xc1b0caec // fclamp { z12.s-z15.s }, z23.s, z16.s\n"
- "subs x20, x20, #0x1\n"
- ".inst 0xa160c320 // st1w { z0.s, z4.s, z8.s, z12.s }, p8, [x25]\n"
- "add x25, x25, x23\n"
+ "subs x19, x19, #0x1\n"
+ ".inst 0xa160c300 // st1w { z0.s, z4.s, z8.s, z12.s }, p8, [x24]\n"
+ "add x24, x24, x22\n"
"beq 21f\n"
- "subs x20, x20, #0x1\n"
- ".inst 0xa160c321 // st1w { z1.s, z5.s, z9.s, z13.s }, p8, [x25]\n"
- "add x25, x25, x23\n"
+ "subs x19, x19, #0x1\n"
+ ".inst 0xa160c301 // st1w { z1.s, z5.s, z9.s, z13.s }, p8, [x24]\n"
+ "add x24, x24, x22\n"
"beq 21f\n"
- ".inst 0xa160c322 // st1w { z2.s, z6.s, z10.s, z14.s }, p8, [x25]\n"
+ ".inst 0xa160c302 // st1w { z2.s, z6.s, z10.s, z14.s }, p8, [x24]\n"
"21:" // Store to output array: Accumulator row 0 oddments: End
"22:" // Store to output array: End
- "tbz x15, #0, 24f\n"
+ "tbz x14, #0, 24f\n"
"mov x12, #0x0\n"
- "cntw x20\n"
+ "cntw x19\n"
"23:" // Store to output array: Refill accumulators: Loop
- ".inst 0xa040c5d0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x14]\n"
+ ".inst 0xa040c5b0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x13]\n"
".inst 0xc0840600 // mova za0h.s[x12], { z16.s-z19.s }\n"
- ".inst 0xa041c5d0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x14, #0x4, MUL VL]\n"
+ ".inst 0xa041c5b0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x13, #0x4, MUL VL]\n"
".inst 0xc0840601 // mova za1h.s[x12], { z16.s-z19.s }\n"
- ".inst 0xa042c5d0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x14, #0x8, MUL VL]\n"
+ ".inst 0xa042c5b0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x13, #0x8, MUL VL]\n"
".inst 0xc0840602 // mova za2h.s[x12], { z16.s-z19.s }\n"
- ".inst 0xa043c5c8 // ld1w { z8.s-z11.s }, pn9.b/Z, [x14, #0xc, MUL VL]\n"
+ ".inst 0xa043c5a8 // ld1w { z8.s-z11.s }, pn9.b/Z, [x13, #0xc, MUL VL]\n"
".inst 0xc0840503 // mova za3h.s[x12], { z8.s-z11.s }\n"
"add x12, x12, #0x4\n"
- "cmp x12, x20\n"
- "addvl x14, x14, #16\n"
+ "cmp x12, x19\n"
+ "addvl x13, x13, #16\n"
"blt 23b\n"
"24:" // End block
- "incw x9, ALL, MUL #4\n"
- "cmp x9, x28\n"
+ "incw x28, ALL, MUL #4\n"
+ "cmp x28, x27\n"
"blt 3b\n"
- "incw x10\n"
- "cmp x10, x11\n"
- "mov x9, #0x0\n"
- "mov x27, x26\n"
+ "incw x9\n"
+ "cmp x9, x10\n"
+ "mov x28, #0x0\n"
+ "mov x26, x25\n"
"blt 3b\n"
".inst 0xd503467f // SMSTOP\n"
:
: [args] "r" (&args), [offsetof_A] "I" (offsetof(KernelArgs, A)), [offsetof_B] "I" (offsetof(KernelArgs, B)), [offsetof_C] "I" (offsetof(KernelArgs, C)), [offsetof_K] "I" (offsetof(KernelArgs, K)), [offsetof_KernelArgs_max] "I" (offsetof(KernelArgs, max)), [offsetof_KernelArgs_min] "I" (offsetof(KernelArgs, min)), [offsetof_M] "I" (offsetof(KernelArgs, M)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_accumulator_buffer] "I" (offsetof(KernelArgs, accumulator_buffer)), [offsetof_bias] "I" (offsetof(KernelArgs, bias)), [offsetof_flags] "I" (offsetof(KernelArgs, flags)), [offsetof_kstride_bytes] "I" (offsetof(KernelArgs, kstride_bytes)), [offsetof_ldcb] "I" (offsetof(KernelArgs, ldcb))
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp32_mopa_2VLx2VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp32_mopa_2VLx2VL/generic.cpp
index 87d7827c5b..344215bfa5 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp32_mopa_2VLx2VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp32_mopa_2VLx2VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef __ARM_FEATURE_SVE
#ifdef ARM_COMPUTE_ENABLE_SME2
@@ -103,106 +103,106 @@ void sme2_interleaved_nomerge_fp32_mopa_2VLx2VL(const float *const A, const floa
KernelArgs args(A, B, C, ldc, M, N, K, bias, act, accumulate, accumulator_buffer);
__asm__ __volatile__(
- "ldr x16, [%x[args], %[offsetof_flags]]\n"
+ "ldr x15, [%x[args], %[offsetof_flags]]\n"
".inst 0xd503477f // SMSTART ZA\n"
"ptrue p0.b\n"
".inst 0x25207811 // ptrue pn9.b\n"
- "ldr x15, [%x[args], %[offsetof_accumulator_buffer]]\n"
"ldr x14, [%x[args], %[offsetof_accumulator_buffer]]\n"
- "tbz x16, #0, 2f\n"
+ "ldr x13, [%x[args], %[offsetof_accumulator_buffer]]\n"
+ "tbz x15, #0, 2f\n"
"mov x12, #0x0\n"
- "cntw x20\n"
+ "cntw x19\n"
"1:" // Initial accumulator load from buffer: Loop
- ".inst 0xa040c5e8 // ld1w { z8.s-z11.s }, pn9.b/Z, [x15]\n"
+ ".inst 0xa040c5c8 // ld1w { z8.s-z11.s }, pn9.b/Z, [x14]\n"
".inst 0xc0840500 // mova za0h.s[x12], { z8.s-z11.s }\n"
- ".inst 0xa041c5f0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x15, #0x4, MUL VL]\n"
+ ".inst 0xa041c5d0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x14, #0x4, MUL VL]\n"
".inst 0xc0840601 // mova za1h.s[x12], { z16.s-z19.s }\n"
- ".inst 0xa042c5e0 // ld1w { z0.s-z3.s }, pn9.b/Z, [x15, #0x8, MUL VL]\n"
+ ".inst 0xa042c5c0 // ld1w { z0.s-z3.s }, pn9.b/Z, [x14, #0x8, MUL VL]\n"
".inst 0xc0840402 // mova za2h.s[x12], { z0.s-z3.s }\n"
- ".inst 0xa043c5fc // ld1w { z28.s-z31.s }, pn9.b/Z, [x15, #0xc, MUL VL]\n"
+ ".inst 0xa043c5dc // ld1w { z28.s-z31.s }, pn9.b/Z, [x14, #0xc, MUL VL]\n"
".inst 0xc0840783 // mova za3h.s[x12], { z28.s-z31.s }\n"
"add x12, x12, #0x4\n"
- "cmp x12, x20\n"
- "addvl x15, x15, #16\n"
+ "cmp x12, x19\n"
+ "addvl x14, x14, #16\n"
"blt 1b\n"
"2:" // Initial accumulator load from buffer: End
- "ldr w13, [%x[args], %[offsetof_M]]\n"
- "mov x11, #0x0\n"
+ "ldr w11, [%x[args], %[offsetof_M]]\n"
"mov x10, #0x0\n"
- "ldr w9, [%x[args], %[offsetof_N]]\n"
- "ldr x28, [%x[args], %[offsetof_A]]\n"
+ "mov x9, #0x0\n"
+ "ldr w28, [%x[args], %[offsetof_N]]\n"
+ "ldr x27, [%x[args], %[offsetof_A]]\n"
"3:" // M and N loop
- "mov x27, x28\n"
- ".inst 0x25a94550 // whilelt pn8.s, x10, x9, VLx2\n"
- "tbnz x16, #0, 4f\n"
- "ldr x20, [%x[args], %[offsetof_bias]]\n"
+ "mov x26, x27\n"
+ ".inst 0x25bc4530 // whilelt pn8.s, x9, x28, VLx2\n"
+ "tbnz x15, #0, 4f\n"
+ "ldr x19, [%x[args], %[offsetof_bias]]\n"
".inst 0xc00800ff // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
- "cbz x20, 5f\n"
+ "cbz x19, 5f\n"
"fmov z21.s, #1.0\n"
- ".inst 0xa00a428f // ldnt1w { z14.s-z15.s }, p8/Z, [x20, x10, LSL #2]\n"
+ ".inst 0xa009426f // ldnt1w { z14.s-z15.s }, p8/Z, [x19, x9, LSL #2]\n"
".inst 0x808e02a0 // fmopa za0.s, p0/M, p0/M, z21.s, z14.s\n"
".inst 0x808f02a1 // fmopa za1.s, p0/M, p0/M, z21.s, z15.s\n"
".inst 0x808e02a2 // fmopa za2.s, p0/M, p0/M, z21.s, z14.s\n"
".inst 0x808f02a3 // fmopa za3.s, p0/M, p0/M, z21.s, z15.s\n"
"4:" // Prepare accumulators: Test for last block
+ "mov x19, x9\n"
"mov x20, x10\n"
- "mov x21, x11\n"
+ "incw x19, ALL, MUL #2\n"
"incw x20, ALL, MUL #2\n"
- "incw x21, ALL, MUL #2\n"
- "cmp x20, x9\n"
- "csel x21, x11, x21, LT\n"
- "mov x20, x16\n"
- "bfm x16, XZR, #0x0, #0x0 // bfc x16, #0x0, #0x1\n"
- "cmp x21, x13\n"
- "csel x16, x20, x16, LT\n"
+ "cmp x19, x28\n"
+ "csel x20, x10, x20, LT\n"
+ "mov x19, x15\n"
+ "bfm x15, XZR, #0x0, #0x0 // bfc x15, #0x0, #0x1\n"
+ "cmp x20, x11\n"
+ "csel x15, x19, x15, LT\n"
"5:" // Prepare accumulators: End
- "ldr x20, [%x[args], %[offsetof_K]]\n"
- "lsr x23, x20, #0x2\n"
- "and x22, x20, #0x3\n"
- "ldr x21, [%x[args], %[offsetof_B]]\n"
- "ldr x20, [%x[args], %[offsetof_kstride_bytes]]\n"
- "madd x21, x10, x20, x21\n" // bptr = B + n * kstride_bytes
- "cbz x23, 8f\n"
- "subs x23, x23, #0x1\n"
- ".inst 0xa1404767 // ld1w { z7.s, z15.s }, pn9.b/Z, [x27]\n"
- ".inst 0xa14046bf // ldnt1w { z23.s, z31.s }, pn9.b/Z, [x21]\n"
- ".inst 0xa0414768 // ld1w { z8.s-z9.s }, pn9.b/Z, [x27, #0x2, MUL VL]\n"
- ".inst 0xa04146a3 // ldnt1w { z2.s-z3.s }, pn9.b/Z, [x21, #0x2, MUL VL]\n"
- ".inst 0xa1424772 // ld1w { z18.s, z26.s }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
- ".inst 0xa04246b1 // ldnt1w { z16.s-z17.s }, pn9.b/Z, [x21, #0x4, MUL VL]\n"
- ".inst 0xa1434776 // ld1w { z22.s, z30.s }, pn9.b/Z, [x27, #0x6, MUL VL]\n"
- "addvl x27, x27, #8\n"
- ".inst 0xa14346ac // ldnt1w { z4.s, z12.s }, pn9.b/Z, [x21, #0x6, MUL VL]\n"
- "addvl x21, x21, #8\n"
+ "ldr x19, [%x[args], %[offsetof_K]]\n"
+ "lsr x22, x19, #0x2\n"
+ "and x21, x19, #0x3\n"
+ "ldr x20, [%x[args], %[offsetof_B]]\n"
+ "ldr x19, [%x[args], %[offsetof_kstride_bytes]]\n"
+ "madd x20, x9, x19, x20\n" // bptr = B + n * kstride_bytes
+ "cbz x22, 8f\n"
+ "subs x22, x22, #0x1\n"
+ ".inst 0xa1404747 // ld1w { z7.s, z15.s }, pn9.b/Z, [x26]\n"
+ ".inst 0xa140469f // ldnt1w { z23.s, z31.s }, pn9.b/Z, [x20]\n"
+ ".inst 0xa0414748 // ld1w { z8.s-z9.s }, pn9.b/Z, [x26, #0x2, MUL VL]\n"
+ ".inst 0xa0414683 // ldnt1w { z2.s-z3.s }, pn9.b/Z, [x20, #0x2, MUL VL]\n"
+ ".inst 0xa1424752 // ld1w { z18.s, z26.s }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa0424691 // ldnt1w { z16.s-z17.s }, pn9.b/Z, [x20, #0x4, MUL VL]\n"
+ ".inst 0xa1434756 // ld1w { z22.s, z30.s }, pn9.b/Z, [x26, #0x6, MUL VL]\n"
+ "addvl x26, x26, #8\n"
+ ".inst 0xa143468c // ldnt1w { z4.s, z12.s }, pn9.b/Z, [x20, #0x6, MUL VL]\n"
+ "addvl x20, x20, #8\n"
"ble 7f\n"
"6:" // K loop
".inst 0x809700e0 // fmopa za0.s, p0/M, p0/M, z7.s, z23.s\n"
- "subs x23, x23, #0x1\n"
+ "subs x22, x22, #0x1\n"
".inst 0x809f00e1 // fmopa za1.s, p0/M, p0/M, z7.s, z31.s\n"
".inst 0x809701e2 // fmopa za2.s, p0/M, p0/M, z15.s, z23.s\n"
".inst 0x809f01e3 // fmopa za3.s, p0/M, p0/M, z15.s, z31.s\n"
- ".inst 0xa1404767 // ld1w { z7.s, z15.s }, pn9.b/Z, [x27]\n"
+ ".inst 0xa1404747 // ld1w { z7.s, z15.s }, pn9.b/Z, [x26]\n"
".inst 0x80820100 // fmopa za0.s, p0/M, p0/M, z8.s, z2.s\n"
- ".inst 0xa14046bf // ldnt1w { z23.s, z31.s }, pn9.b/Z, [x21]\n"
+ ".inst 0xa140469f // ldnt1w { z23.s, z31.s }, pn9.b/Z, [x20]\n"
".inst 0x80830101 // fmopa za1.s, p0/M, p0/M, z8.s, z3.s\n"
".inst 0x80820122 // fmopa za2.s, p0/M, p0/M, z9.s, z2.s\n"
".inst 0x80830123 // fmopa za3.s, p0/M, p0/M, z9.s, z3.s\n"
- ".inst 0xa0414768 // ld1w { z8.s-z9.s }, pn9.b/Z, [x27, #0x2, MUL VL]\n"
+ ".inst 0xa0414748 // ld1w { z8.s-z9.s }, pn9.b/Z, [x26, #0x2, MUL VL]\n"
".inst 0x80900240 // fmopa za0.s, p0/M, p0/M, z18.s, z16.s\n"
- ".inst 0xa04146a3 // ldnt1w { z2.s-z3.s }, pn9.b/Z, [x21, #0x2, MUL VL]\n"
+ ".inst 0xa0414683 // ldnt1w { z2.s-z3.s }, pn9.b/Z, [x20, #0x2, MUL VL]\n"
".inst 0x80910241 // fmopa za1.s, p0/M, p0/M, z18.s, z17.s\n"
".inst 0x80900342 // fmopa za2.s, p0/M, p0/M, z26.s, z16.s\n"
".inst 0x80910343 // fmopa za3.s, p0/M, p0/M, z26.s, z17.s\n"
- ".inst 0xa1424772 // ld1w { z18.s, z26.s }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
- ".inst 0xa04246b1 // ldnt1w { z16.s-z17.s }, pn9.b/Z, [x21, #0x4, MUL VL]\n"
+ ".inst 0xa1424752 // ld1w { z18.s, z26.s }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa0424691 // ldnt1w { z16.s-z17.s }, pn9.b/Z, [x20, #0x4, MUL VL]\n"
".inst 0x808402c0 // fmopa za0.s, p0/M, p0/M, z22.s, z4.s\n"
".inst 0x808c02c1 // fmopa za1.s, p0/M, p0/M, z22.s, z12.s\n"
".inst 0x808403c2 // fmopa za2.s, p0/M, p0/M, z30.s, z4.s\n"
".inst 0x808c03c3 // fmopa za3.s, p0/M, p0/M, z30.s, z12.s\n"
- ".inst 0xa1434776 // ld1w { z22.s, z30.s }, pn9.b/Z, [x27, #0x6, MUL VL]\n"
- "addvl x27, x27, #8\n"
- ".inst 0xa14346ac // ldnt1w { z4.s, z12.s }, pn9.b/Z, [x21, #0x6, MUL VL]\n"
- "addvl x21, x21, #8\n"
+ ".inst 0xa1434756 // ld1w { z22.s, z30.s }, pn9.b/Z, [x26, #0x6, MUL VL]\n"
+ "addvl x26, x26, #8\n"
+ ".inst 0xa143468c // ldnt1w { z4.s, z12.s }, pn9.b/Z, [x20, #0x6, MUL VL]\n"
+ "addvl x20, x20, #8\n"
"bgt 6b\n"
"7:" // K loop tail
".inst 0x809700e0 // fmopa za0.s, p0/M, p0/M, z7.s, z23.s\n"
@@ -222,259 +222,259 @@ void sme2_interleaved_nomerge_fp32_mopa_2VLx2VL(const float *const A, const floa
".inst 0x808403c2 // fmopa za2.s, p0/M, p0/M, z30.s, z4.s\n"
".inst 0x808c03c3 // fmopa za3.s, p0/M, p0/M, z30.s, z12.s\n"
"8:" // K oddments
- "cbz x22, 10f\n"
+ "cbz x21, 10f\n"
"9:" // K oddments: Loop
- ".inst 0xa1404767 // ld1w { z7.s, z15.s }, pn9.b/Z, [x27]\n"
- "subs x22, x22, #0x1\n"
- "addvl x27, x27, #2\n"
- ".inst 0xa14046b7 // ld1w { z23.s, z31.s }, pn9.b/Z, [x21]\n"
- "addvl x21, x21, #2\n"
+ ".inst 0xa1404747 // ld1w { z7.s, z15.s }, pn9.b/Z, [x26]\n"
+ "subs x21, x21, #0x1\n"
+ "addvl x26, x26, #2\n"
+ ".inst 0xa1404697 // ld1w { z23.s, z31.s }, pn9.b/Z, [x20]\n"
+ "addvl x20, x20, #2\n"
".inst 0x809700e0 // fmopa za0.s, p0/M, p0/M, z7.s, z23.s\n"
".inst 0x809f00e1 // fmopa za1.s, p0/M, p0/M, z7.s, z31.s\n"
".inst 0x809701e2 // fmopa za2.s, p0/M, p0/M, z15.s, z23.s\n"
".inst 0x809f01e3 // fmopa za3.s, p0/M, p0/M, z15.s, z31.s\n"
"bgt 9b\n"
"10:" // K oddments: End
- "tbz x16, #1, 14f\n"
- "tbz x16, #0, 12f\n"
+ "tbz x15, #1, 14f\n"
+ "tbz x15, #0, 12f\n"
"mov x12, #0x0\n"
- "cntw x20\n"
+ "cntw x19\n"
"11:" // Store to partial result buffer: Store and refill: Loop
- ".inst 0xa040c5ec // ld1w { z12.s-z15.s }, pn9.b/Z, [x15]\n"
+ ".inst 0xa040c5cc // ld1w { z12.s-z15.s }, pn9.b/Z, [x14]\n"
".inst 0xc0860410 // mova { z16.s-z19.s }, za0h.s[x12]\n"
".inst 0xc0840580 // mova za0h.s[x12], { z12.s-z15.s }\n"
".inst 0xc086042c // mova { z12.s-z15.s }, za1h.s[x12]\n"
- ".inst 0xa041c5e0 // ld1w { z0.s-z3.s }, pn9.b/Z, [x15, #0x4, MUL VL]\n"
+ ".inst 0xa041c5c0 // ld1w { z0.s-z3.s }, pn9.b/Z, [x14, #0x4, MUL VL]\n"
".inst 0xc0840401 // mova za1h.s[x12], { z0.s-z3.s }\n"
".inst 0xc0860454 // mova { z20.s-z23.s }, za2h.s[x12]\n"
".inst 0xc0860468 // mova { z8.s-z11.s }, za3h.s[x12]\n"
- ".inst 0xa042c5f8 // ld1w { z24.s-z27.s }, pn9.b/Z, [x15, #0x8, MUL VL]\n"
+ ".inst 0xa042c5d8 // ld1w { z24.s-z27.s }, pn9.b/Z, [x14, #0x8, MUL VL]\n"
".inst 0xc0840702 // mova za2h.s[x12], { z24.s-z27.s }\n"
- ".inst 0xa043c5f8 // ld1w { z24.s-z27.s }, pn9.b/Z, [x15, #0xc, MUL VL]\n"
+ ".inst 0xa043c5d8 // ld1w { z24.s-z27.s }, pn9.b/Z, [x14, #0xc, MUL VL]\n"
".inst 0xc0840703 // mova za3h.s[x12], { z24.s-z27.s }\n"
"add x12, x12, #0x4\n"
- "cmp x12, x20\n"
- ".inst 0xa060c5d0 // st1w { z16.s-z19.s }, pn9.b, [x14]\n"
- "addvl x15, x15, #16\n"
- ".inst 0xa061c5cc // st1w { z12.s-z15.s }, pn9.b, [x14, #0x4, MUL VL]\n"
- ".inst 0xa062c5d4 // st1w { z20.s-z23.s }, pn9.b, [x14, #0x8, MUL VL]\n"
- ".inst 0xa063c5c8 // st1w { z8.s-z11.s }, pn9.b, [x14, #0xc, MUL VL]\n"
+ "cmp x12, x19\n"
+ ".inst 0xa060c5b0 // st1w { z16.s-z19.s }, pn9.b, [x13]\n"
"addvl x14, x14, #16\n"
+ ".inst 0xa061c5ac // st1w { z12.s-z15.s }, pn9.b, [x13, #0x4, MUL VL]\n"
+ ".inst 0xa062c5b4 // st1w { z20.s-z23.s }, pn9.b, [x13, #0x8, MUL VL]\n"
+ ".inst 0xa063c5a8 // st1w { z8.s-z11.s }, pn9.b, [x13, #0xc, MUL VL]\n"
+ "addvl x13, x13, #16\n"
"blt 11b\n"
"b 30f\n"
"12:" // Store to partial result buffer: Store only
"mov x12, #0x0\n"
- "cntw x20\n"
+ "cntw x19\n"
"13:" // Store to partial result buffer: Store only: Loop
".inst 0xc086040c // mova { z12.s-z15.s }, za0h.s[x12]\n"
".inst 0xc0860430 // mova { z16.s-z19.s }, za1h.s[x12]\n"
- ".inst 0xa060c5cc // st1w { z12.s-z15.s }, pn9.b, [x14]\n"
+ ".inst 0xa060c5ac // st1w { z12.s-z15.s }, pn9.b, [x13]\n"
".inst 0xc0860444 // mova { z4.s-z7.s }, za2h.s[x12]\n"
".inst 0xc0860460 // mova { z0.s-z3.s }, za3h.s[x12]\n"
- ".inst 0xa061c5d0 // st1w { z16.s-z19.s }, pn9.b, [x14, #0x4, MUL VL]\n"
+ ".inst 0xa061c5b0 // st1w { z16.s-z19.s }, pn9.b, [x13, #0x4, MUL VL]\n"
"add x12, x12, #0x4\n"
- "cmp x12, x20\n"
- ".inst 0xa062c5c4 // st1w { z4.s-z7.s }, pn9.b, [x14, #0x8, MUL VL]\n"
- ".inst 0xa063c5c0 // st1w { z0.s-z3.s }, pn9.b, [x14, #0xc, MUL VL]\n"
- "addvl x14, x14, #16\n"
+ "cmp x12, x19\n"
+ ".inst 0xa062c5a4 // st1w { z4.s-z7.s }, pn9.b, [x13, #0x8, MUL VL]\n"
+ ".inst 0xa063c5a0 // st1w { z0.s-z3.s }, pn9.b, [x13, #0xc, MUL VL]\n"
+ "addvl x13, x13, #16\n"
"blt 13b\n"
"b 30f\n"
"14:" // Store to output array
- "ldr x26, [%x[args], %[offsetof_C]]\n"
- "add x26, x26, x10, LSL #2\n" // C += n
- "sub x25, x13, x11\n"
- "ldr x24, [%x[args], %[offsetof_ldcb]]\n"
- "madd x26, x11, x24, x26\n" // C += m * ldc
- "tbz x16, #2, 21f\n"
- "cntw x23\n"
- "cmp x25, x23\n"
- "csel x22, x25, x23, LT\n"
- "lsr x21, x22, #0x2\n"
+ "ldr x25, [%x[args], %[offsetof_C]]\n"
+ "add x25, x25, x9, LSL #2\n" // C += n
+ "sub x24, x11, x10\n"
+ "ldr x23, [%x[args], %[offsetof_ldcb]]\n"
+ "madd x25, x10, x23, x25\n" // C += m * ldc
+ "tbz x15, #2, 21f\n"
+ "cntw x22\n"
+ "cmp x24, x22\n"
+ "csel x21, x24, x22, LT\n"
+ "lsr x20, x21, #0x2\n"
"mov x12, #0x0\n"
- "and x20, x22, #0x3\n"
- "cbz x21, 16f\n"
+ "and x19, x21, #0x3\n"
+ "cbz x20, 16f\n"
"15:" // Store to output array: Skip activation: Accumulator row 0 loop
".inst 0xc0860404 // mova { z4.s-z7.s }, za0h.s[x12]\n"
".inst 0xc086042c // mova { z12.s-z15.s }, za1h.s[x12]\n"
- ".inst 0xa1604344 // st1w { z4.s, z12.s }, p8, [x26]\n"
- "add x26, x26, x24\n"
- ".inst 0xa1604345 // st1w { z5.s, z13.s }, p8, [x26]\n"
- "add x26, x26, x24\n"
+ ".inst 0xa1604324 // st1w { z4.s, z12.s }, p8, [x25]\n"
+ "add x25, x25, x23\n"
+ ".inst 0xa1604325 // st1w { z5.s, z13.s }, p8, [x25]\n"
+ "add x25, x25, x23\n"
"add x12, x12, #0x4\n"
- ".inst 0xa1604346 // st1w { z6.s, z14.s }, p8, [x26]\n"
- "add x26, x26, x24\n"
- "cmp x12, x21, LSL #2\n"
- ".inst 0xa1604347 // st1w { z7.s, z15.s }, p8, [x26]\n"
- "add x26, x26, x24\n"
+ ".inst 0xa1604326 // st1w { z6.s, z14.s }, p8, [x25]\n"
+ "add x25, x25, x23\n"
+ "cmp x12, x20, LSL #2\n"
+ ".inst 0xa1604327 // st1w { z7.s, z15.s }, p8, [x25]\n"
+ "add x25, x25, x23\n"
"blt 15b\n"
"16:" // Store to output array: Skip activation: Accumulator row 0 oddments
- "cbz x20, 17f\n"
- "subs x20, x20, #0x1\n"
+ "cbz x19, 17f\n"
+ "subs x19, x19, #0x1\n"
".inst 0xc0860404 // mova { z4.s-z7.s }, za0h.s[x12]\n"
".inst 0xc086042c // mova { z12.s-z15.s }, za1h.s[x12]\n"
- ".inst 0xa1604344 // st1w { z4.s, z12.s }, p8, [x26]\n"
- "add x26, x26, x24\n"
+ ".inst 0xa1604324 // st1w { z4.s, z12.s }, p8, [x25]\n"
+ "add x25, x25, x23\n"
"beq 17f\n"
- "subs x20, x20, #0x1\n"
- ".inst 0xa1604345 // st1w { z5.s, z13.s }, p8, [x26]\n"
- "add x26, x26, x24\n"
+ "subs x19, x19, #0x1\n"
+ ".inst 0xa1604325 // st1w { z5.s, z13.s }, p8, [x25]\n"
+ "add x25, x25, x23\n"
"beq 17f\n"
- ".inst 0xa1604346 // st1w { z6.s, z14.s }, p8, [x26]\n"
- "add x26, x26, x24\n"
+ ".inst 0xa1604326 // st1w { z6.s, z14.s }, p8, [x25]\n"
+ "add x25, x25, x23\n"
"17:" // Store to output array: Skip activation: Accumulator row 0 oddments: End
- "subs x25, x25, x22\n"
+ "subs x24, x24, x21\n"
"beq 21f\n"
- "cmp x25, x23\n"
- "csel x22, x25, x23, LT\n"
- "lsr x21, x22, #0x2\n"
+ "cmp x24, x22\n"
+ "csel x21, x24, x22, LT\n"
+ "lsr x20, x21, #0x2\n"
"mov x12, #0x0\n"
- "and x20, x22, #0x3\n"
- "cbz x21, 19f\n"
+ "and x19, x21, #0x3\n"
+ "cbz x20, 19f\n"
"18:" // Store to output array: Skip activation: Accumulator row 1 loop
".inst 0xc0860444 // mova { z4.s-z7.s }, za2h.s[x12]\n"
".inst 0xc086046c // mova { z12.s-z15.s }, za3h.s[x12]\n"
- ".inst 0xa1604344 // st1w { z4.s, z12.s }, p8, [x26]\n"
- "add x26, x26, x24\n"
- ".inst 0xa1604345 // st1w { z5.s, z13.s }, p8, [x26]\n"
- "add x26, x26, x24\n"
+ ".inst 0xa1604324 // st1w { z4.s, z12.s }, p8, [x25]\n"
+ "add x25, x25, x23\n"
+ ".inst 0xa1604325 // st1w { z5.s, z13.s }, p8, [x25]\n"
+ "add x25, x25, x23\n"
"add x12, x12, #0x4\n"
- ".inst 0xa1604346 // st1w { z6.s, z14.s }, p8, [x26]\n"
- "add x26, x26, x24\n"
- "cmp x12, x21, LSL #2\n"
- ".inst 0xa1604347 // st1w { z7.s, z15.s }, p8, [x26]\n"
- "add x26, x26, x24\n"
+ ".inst 0xa1604326 // st1w { z6.s, z14.s }, p8, [x25]\n"
+ "add x25, x25, x23\n"
+ "cmp x12, x20, LSL #2\n"
+ ".inst 0xa1604327 // st1w { z7.s, z15.s }, p8, [x25]\n"
+ "add x25, x25, x23\n"
"blt 18b\n"
"19:" // Store to output array: Skip activation: Accumulator row 1 oddments
- "cbz x20, 20f\n"
- "subs x20, x20, #0x1\n"
+ "cbz x19, 20f\n"
+ "subs x19, x19, #0x1\n"
".inst 0xc0860454 // mova { z20.s-z23.s }, za2h.s[x12]\n"
".inst 0xc086047c // mova { z28.s-z31.s }, za3h.s[x12]\n"
- ".inst 0xa1604354 // st1w { z20.s, z28.s }, p8, [x26]\n"
- "add x26, x26, x24\n"
+ ".inst 0xa1604334 // st1w { z20.s, z28.s }, p8, [x25]\n"
+ "add x25, x25, x23\n"
"beq 20f\n"
- "subs x20, x20, #0x1\n"
- ".inst 0xa1604355 // st1w { z21.s, z29.s }, p8, [x26]\n"
- "add x26, x26, x24\n"
+ "subs x19, x19, #0x1\n"
+ ".inst 0xa1604335 // st1w { z21.s, z29.s }, p8, [x25]\n"
+ "add x25, x25, x23\n"
"beq 20f\n"
- ".inst 0xa1604356 // st1w { z22.s, z30.s }, p8, [x26]\n"
- "add x26, x26, x24\n"
+ ".inst 0xa1604336 // st1w { z22.s, z30.s }, p8, [x25]\n"
+ "add x25, x25, x23\n"
"20:" // Store to output array: Skip activation: Accumulator row 1 oddments: End
- "subs x25, x25, x22\n"
+ "subs x24, x24, x21\n"
"beq 21f\n"
"b 28f\n"
"21:" // Store to output array: Skip activation: End
- "cntw x23\n"
- "cmp x25, x23\n"
+ "cntw x22\n"
+ "cmp x24, x22\n"
"ld1rw { z21.s }, p0/Z, [%x[args], %[offsetof_KernelArgs_min]]\n"
- "csel x22, x25, x23, LT\n"
- "lsr x21, x22, #0x2\n"
+ "csel x21, x24, x22, LT\n"
+ "lsr x20, x21, #0x2\n"
"ld1rw { z20.s }, p0/Z, [%x[args], %[offsetof_KernelArgs_max]]\n"
"mov x12, #0x0\n"
- "and x20, x22, #0x3\n"
- "cbz x21, 23f\n"
+ "and x19, x21, #0x3\n"
+ "cbz x20, 23f\n"
"22:" // Store to output array: Accumulator row 0 loop
".inst 0xc0860404 // mova { z4.s-z7.s }, za0h.s[x12]\n"
".inst 0xc086042c // mova { z12.s-z15.s }, za1h.s[x12]\n"
".inst 0xc1b4caa4 // fclamp { z4.s-z7.s }, z21.s, z20.s\n"
".inst 0xc1b4caac // fclamp { z12.s-z15.s }, z21.s, z20.s\n"
- ".inst 0xa1604344 // st1w { z4.s, z12.s }, p8, [x26]\n"
- "add x26, x26, x24\n"
+ ".inst 0xa1604324 // st1w { z4.s, z12.s }, p8, [x25]\n"
+ "add x25, x25, x23\n"
"add x12, x12, #0x4\n"
- ".inst 0xa1604345 // st1w { z5.s, z13.s }, p8, [x26]\n"
- "add x26, x26, x24\n"
- "cmp x12, x21, LSL #2\n"
- ".inst 0xa1604346 // st1w { z6.s, z14.s }, p8, [x26]\n"
- "add x26, x26, x24\n"
- ".inst 0xa1604347 // st1w { z7.s, z15.s }, p8, [x26]\n"
- "add x26, x26, x24\n"
+ ".inst 0xa1604325 // st1w { z5.s, z13.s }, p8, [x25]\n"
+ "add x25, x25, x23\n"
+ "cmp x12, x20, LSL #2\n"
+ ".inst 0xa1604326 // st1w { z6.s, z14.s }, p8, [x25]\n"
+ "add x25, x25, x23\n"
+ ".inst 0xa1604327 // st1w { z7.s, z15.s }, p8, [x25]\n"
+ "add x25, x25, x23\n"
"blt 22b\n"
"23:" // Store to output array: Accumulator row 0 oddments
- "cbz x20, 24f\n"
+ "cbz x19, 24f\n"
".inst 0xc0860400 // mova { z0.s-z3.s }, za0h.s[x12]\n"
".inst 0xc0860428 // mova { z8.s-z11.s }, za1h.s[x12]\n"
".inst 0xc1b4caa0 // fclamp { z0.s-z3.s }, z21.s, z20.s\n"
".inst 0xc1b4caa8 // fclamp { z8.s-z11.s }, z21.s, z20.s\n"
- "subs x20, x20, #0x1\n"
- ".inst 0xa1604340 // st1w { z0.s, z8.s }, p8, [x26]\n"
- "add x26, x26, x24\n"
+ "subs x19, x19, #0x1\n"
+ ".inst 0xa1604320 // st1w { z0.s, z8.s }, p8, [x25]\n"
+ "add x25, x25, x23\n"
"beq 24f\n"
- "subs x20, x20, #0x1\n"
- ".inst 0xa1604341 // st1w { z1.s, z9.s }, p8, [x26]\n"
- "add x26, x26, x24\n"
+ "subs x19, x19, #0x1\n"
+ ".inst 0xa1604321 // st1w { z1.s, z9.s }, p8, [x25]\n"
+ "add x25, x25, x23\n"
"beq 24f\n"
- ".inst 0xa1604342 // st1w { z2.s, z10.s }, p8, [x26]\n"
- "add x26, x26, x24\n"
+ ".inst 0xa1604322 // st1w { z2.s, z10.s }, p8, [x25]\n"
+ "add x25, x25, x23\n"
"24:" // Store to output array: Accumulator row 0 oddments: End
- "subs x25, x25, x22\n"
+ "subs x24, x24, x21\n"
"beq 28f\n"
- "cmp x25, x23\n"
- "csel x20, x25, x23, LT\n"
- "lsr x21, x20, #0x2\n"
+ "cmp x24, x22\n"
+ "csel x19, x24, x22, LT\n"
+ "lsr x20, x19, #0x2\n"
"mov x12, #0x0\n"
- "and x20, x20, #0x3\n"
- "cbz x21, 26f\n"
+ "and x19, x19, #0x3\n"
+ "cbz x20, 26f\n"
"25:" // Store to output array: Accumulator row 1 loop
".inst 0xc0860450 // mova { z16.s-z19.s }, za2h.s[x12]\n"
".inst 0xc0860478 // mova { z24.s-z27.s }, za3h.s[x12]\n"
".inst 0xc1b4cab0 // fclamp { z16.s-z19.s }, z21.s, z20.s\n"
".inst 0xc1b4cab8 // fclamp { z24.s-z27.s }, z21.s, z20.s\n"
- ".inst 0xa1604350 // st1w { z16.s, z24.s }, p8, [x26]\n"
- "add x26, x26, x24\n"
+ ".inst 0xa1604330 // st1w { z16.s, z24.s }, p8, [x25]\n"
+ "add x25, x25, x23\n"
"add x12, x12, #0x4\n"
- ".inst 0xa1604351 // st1w { z17.s, z25.s }, p8, [x26]\n"
- "add x26, x26, x24\n"
- "cmp x12, x21, LSL #2\n"
- ".inst 0xa1604352 // st1w { z18.s, z26.s }, p8, [x26]\n"
- "add x26, x26, x24\n"
- ".inst 0xa1604353 // st1w { z19.s, z27.s }, p8, [x26]\n"
- "add x26, x26, x24\n"
+ ".inst 0xa1604331 // st1w { z17.s, z25.s }, p8, [x25]\n"
+ "add x25, x25, x23\n"
+ "cmp x12, x20, LSL #2\n"
+ ".inst 0xa1604332 // st1w { z18.s, z26.s }, p8, [x25]\n"
+ "add x25, x25, x23\n"
+ ".inst 0xa1604333 // st1w { z19.s, z27.s }, p8, [x25]\n"
+ "add x25, x25, x23\n"
"blt 25b\n"
"26:" // Store to output array: Accumulator row 1 oddments
- "cbz x20, 27f\n"
+ "cbz x19, 27f\n"
".inst 0xc0860450 // mova { z16.s-z19.s }, za2h.s[x12]\n"
".inst 0xc0860478 // mova { z24.s-z27.s }, za3h.s[x12]\n"
".inst 0xc1b4cab0 // fclamp { z16.s-z19.s }, z21.s, z20.s\n"
".inst 0xc1b4cab8 // fclamp { z24.s-z27.s }, z21.s, z20.s\n"
- "subs x20, x20, #0x1\n"
- ".inst 0xa1604350 // st1w { z16.s, z24.s }, p8, [x26]\n"
- "add x26, x26, x24\n"
+ "subs x19, x19, #0x1\n"
+ ".inst 0xa1604330 // st1w { z16.s, z24.s }, p8, [x25]\n"
+ "add x25, x25, x23\n"
"beq 27f\n"
- "subs x20, x20, #0x1\n"
- ".inst 0xa1604351 // st1w { z17.s, z25.s }, p8, [x26]\n"
- "add x26, x26, x24\n"
+ "subs x19, x19, #0x1\n"
+ ".inst 0xa1604331 // st1w { z17.s, z25.s }, p8, [x25]\n"
+ "add x25, x25, x23\n"
"beq 27f\n"
- ".inst 0xa1604352 // st1w { z18.s, z26.s }, p8, [x26]\n"
+ ".inst 0xa1604332 // st1w { z18.s, z26.s }, p8, [x25]\n"
"27:" // Store to output array: Accumulator row 1 oddments: End
"28:" // Store to output array: End
- "tbz x16, #0, 30f\n"
+ "tbz x15, #0, 30f\n"
"mov x12, #0x0\n"
- "cntw x20\n"
+ "cntw x19\n"
"29:" // Store to output array: Refill accumulators: Loop
- ".inst 0xa040c5f0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x15]\n"
+ ".inst 0xa040c5d0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x14]\n"
".inst 0xc0840600 // mova za0h.s[x12], { z16.s-z19.s }\n"
- ".inst 0xa041c5f0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x15, #0x4, MUL VL]\n"
+ ".inst 0xa041c5d0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x14, #0x4, MUL VL]\n"
".inst 0xc0840601 // mova za1h.s[x12], { z16.s-z19.s }\n"
- ".inst 0xa042c5f0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x15, #0x8, MUL VL]\n"
+ ".inst 0xa042c5d0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x14, #0x8, MUL VL]\n"
".inst 0xc0840602 // mova za2h.s[x12], { z16.s-z19.s }\n"
- ".inst 0xa043c5e8 // ld1w { z8.s-z11.s }, pn9.b/Z, [x15, #0xc, MUL VL]\n"
+ ".inst 0xa043c5c8 // ld1w { z8.s-z11.s }, pn9.b/Z, [x14, #0xc, MUL VL]\n"
".inst 0xc0840503 // mova za3h.s[x12], { z8.s-z11.s }\n"
"add x12, x12, #0x4\n"
- "cmp x12, x20\n"
- "addvl x15, x15, #16\n"
+ "cmp x12, x19\n"
+ "addvl x14, x14, #16\n"
"blt 29b\n"
"30:" // End block
- "incw x10, ALL, MUL #2\n"
- "cmp x10, x9\n"
+ "incw x9, ALL, MUL #2\n"
+ "cmp x9, x28\n"
"blt 3b\n"
- "incw x11, ALL, MUL #2\n"
- "cmp x11, x13\n"
- "mov x10, #0x0\n"
- "mov x28, x27\n"
+ "incw x10, ALL, MUL #2\n"
+ "cmp x10, x11\n"
+ "mov x9, #0x0\n"
+ "mov x27, x26\n"
"blt 3b\n"
".inst 0xd503467f // SMSTOP\n"
:
: [args] "r" (&args), [offsetof_A] "I" (offsetof(KernelArgs, A)), [offsetof_B] "I" (offsetof(KernelArgs, B)), [offsetof_C] "I" (offsetof(KernelArgs, C)), [offsetof_K] "I" (offsetof(KernelArgs, K)), [offsetof_KernelArgs_max] "I" (offsetof(KernelArgs, max)), [offsetof_KernelArgs_min] "I" (offsetof(KernelArgs, min)), [offsetof_M] "I" (offsetof(KernelArgs, M)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_accumulator_buffer] "I" (offsetof(KernelArgs, accumulator_buffer)), [offsetof_bias] "I" (offsetof(KernelArgs, bias)), [offsetof_flags] "I" (offsetof(KernelArgs, flags)), [offsetof_kstride_bytes] "I" (offsetof(KernelArgs, kstride_bytes)), [offsetof_ldcb] "I" (offsetof(KernelArgs, ldcb))
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp32_mopa_4VLx1VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp32_mopa_4VLx1VL/generic.cpp
index 291a7ced5a..5252e8140b 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp32_mopa_4VLx1VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp32_mopa_4VLx1VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef __ARM_FEATURE_SVE
#ifdef ARM_COMPUTE_ENABLE_SME2
@@ -103,106 +103,106 @@ void sme2_interleaved_nomerge_fp32_mopa_4VLx1VL(const float *const A, const floa
KernelArgs args(A, B, C, ldc, M, N, K, bias, act, accumulate, accumulator_buffer);
__asm__ __volatile__(
- "ldr x16, [%x[args], %[offsetof_flags]]\n"
+ "ldr x15, [%x[args], %[offsetof_flags]]\n"
".inst 0xd503477f // SMSTART ZA\n"
"ptrue p1.b\n"
".inst 0x25207810 // ptrue pn8.b\n"
- "ldr x15, [%x[args], %[offsetof_accumulator_buffer]]\n"
"ldr x14, [%x[args], %[offsetof_accumulator_buffer]]\n"
- "tbz x16, #0, 2f\n"
+ "ldr x13, [%x[args], %[offsetof_accumulator_buffer]]\n"
+ "tbz x15, #0, 2f\n"
"mov x12, #0x0\n"
- "cntw x20\n"
+ "cntw x19\n"
"1:" // Initial accumulator load from buffer: Loop
- ".inst 0xa040c1e4 // ld1w { z4.s-z7.s }, pn8.b/Z, [x15]\n"
+ ".inst 0xa040c1c4 // ld1w { z4.s-z7.s }, pn8.b/Z, [x14]\n"
".inst 0xc0840480 // mova za0h.s[x12], { z4.s-z7.s }\n"
- ".inst 0xa041c1f8 // ld1w { z24.s-z27.s }, pn8.b/Z, [x15, #0x4, MUL VL]\n"
+ ".inst 0xa041c1d8 // ld1w { z24.s-z27.s }, pn8.b/Z, [x14, #0x4, MUL VL]\n"
".inst 0xc0840701 // mova za1h.s[x12], { z24.s-z27.s }\n"
- ".inst 0xa042c1e0 // ld1w { z0.s-z3.s }, pn8.b/Z, [x15, #0x8, MUL VL]\n"
+ ".inst 0xa042c1c0 // ld1w { z0.s-z3.s }, pn8.b/Z, [x14, #0x8, MUL VL]\n"
".inst 0xc0840402 // mova za2h.s[x12], { z0.s-z3.s }\n"
- ".inst 0xa043c1e4 // ld1w { z4.s-z7.s }, pn8.b/Z, [x15, #0xc, MUL VL]\n"
+ ".inst 0xa043c1c4 // ld1w { z4.s-z7.s }, pn8.b/Z, [x14, #0xc, MUL VL]\n"
".inst 0xc0840483 // mova za3h.s[x12], { z4.s-z7.s }\n"
"add x12, x12, #0x4\n"
- "cmp x12, x20\n"
- "addvl x15, x15, #16\n"
+ "cmp x12, x19\n"
+ "addvl x14, x14, #16\n"
"blt 1b\n"
"2:" // Initial accumulator load from buffer: End
- "ldr w13, [%x[args], %[offsetof_M]]\n"
- "mov x11, #0x0\n"
+ "ldr w11, [%x[args], %[offsetof_M]]\n"
"mov x10, #0x0\n"
- "ldr w9, [%x[args], %[offsetof_N]]\n"
- "ldr x28, [%x[args], %[offsetof_A]]\n"
+ "mov x9, #0x0\n"
+ "ldr w28, [%x[args], %[offsetof_N]]\n"
+ "ldr x27, [%x[args], %[offsetof_A]]\n"
"3:" // M and N loop
- "mov x27, x28\n"
- "whilelt p0.s, x10, x9\n"
- "tbnz x16, #0, 4f\n"
- "ldr x20, [%x[args], %[offsetof_bias]]\n"
+ "mov x26, x27\n"
+ "whilelt p0.s, x9, x28\n"
+ "tbnz x15, #0, 4f\n"
+ "ldr x19, [%x[args], %[offsetof_bias]]\n"
".inst 0xc00800ff // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
- "cbz x20, 5f\n"
+ "cbz x19, 5f\n"
"fmov z8.s, #1.0\n"
- "ldnt1w { z27.s }, p0/Z, [x20, x10, LSL #2]\n"
+ "ldnt1w { z27.s }, p0/Z, [x19, x9, LSL #2]\n"
".inst 0x809b2500 // fmopa za0.s, p1/M, p1/M, z8.s, z27.s\n"
".inst 0x809b2501 // fmopa za1.s, p1/M, p1/M, z8.s, z27.s\n"
".inst 0x809b2502 // fmopa za2.s, p1/M, p1/M, z8.s, z27.s\n"
".inst 0x809b2503 // fmopa za3.s, p1/M, p1/M, z8.s, z27.s\n"
"4:" // Prepare accumulators: Test for last block
+ "mov x19, x9\n"
"mov x20, x10\n"
- "mov x21, x11\n"
- "incw x20\n"
- "incw x21, ALL, MUL #4\n"
- "cmp x20, x9\n"
- "csel x21, x11, x21, LT\n"
- "mov x20, x16\n"
- "bfm x16, XZR, #0x0, #0x0 // bfc x16, #0x0, #0x1\n"
- "cmp x21, x13\n"
- "csel x16, x20, x16, LT\n"
+ "incw x19\n"
+ "incw x20, ALL, MUL #4\n"
+ "cmp x19, x28\n"
+ "csel x20, x10, x20, LT\n"
+ "mov x19, x15\n"
+ "bfm x15, XZR, #0x0, #0x0 // bfc x15, #0x0, #0x1\n"
+ "cmp x20, x11\n"
+ "csel x15, x19, x15, LT\n"
"5:" // Prepare accumulators: End
- "ldr x20, [%x[args], %[offsetof_K]]\n"
- "lsr x23, x20, #0x2\n"
- "and x22, x20, #0x3\n"
- "ldr x21, [%x[args], %[offsetof_B]]\n"
- "ldr x20, [%x[args], %[offsetof_kstride_bytes]]\n"
- "madd x21, x10, x20, x21\n" // bptr = B + n * kstride_bytes
- "cbz x23, 8f\n"
- "subs x23, x23, #0x1\n"
- ".inst 0xa040c364 // ld1w { z4.s-z7.s }, pn8.b/Z, [x27]\n"
- "ldnt1w { z29.s }, p1/Z, [x21]\n"
- ".inst 0xa041c36c // ld1w { z12.s-z15.s }, pn8.b/Z, [x27, #0x4, MUL VL]\n"
- "ldnt1w { z23.s }, p1/Z, [x21, #1, MUL VL]\n"
- ".inst 0xa042c360 // ld1w { z0.s-z3.s }, pn8.b/Z, [x27, #0x8, MUL VL]\n"
- "ldnt1w { z21.s }, p1/Z, [x21, #2, MUL VL]\n"
- ".inst 0xa143c372 // ld1w { z18.s, z22.s, z26.s, z30.s }, pn8.b/Z, [x27, #0xc, MUL VL]\n"
- "addvl x27, x27, #16\n"
- "ldnt1w { z27.s }, p1/Z, [x21, #3, MUL VL]\n"
- "addvl x21, x21, #4\n"
+ "ldr x19, [%x[args], %[offsetof_K]]\n"
+ "lsr x22, x19, #0x2\n"
+ "and x21, x19, #0x3\n"
+ "ldr x20, [%x[args], %[offsetof_B]]\n"
+ "ldr x19, [%x[args], %[offsetof_kstride_bytes]]\n"
+ "madd x20, x9, x19, x20\n" // bptr = B + n * kstride_bytes
+ "cbz x22, 8f\n"
+ "subs x22, x22, #0x1\n"
+ ".inst 0xa040c344 // ld1w { z4.s-z7.s }, pn8.b/Z, [x26]\n"
+ "ldnt1w { z29.s }, p1/Z, [x20]\n"
+ ".inst 0xa041c34c // ld1w { z12.s-z15.s }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
+ "ldnt1w { z23.s }, p1/Z, [x20, #1, MUL VL]\n"
+ ".inst 0xa042c340 // ld1w { z0.s-z3.s }, pn8.b/Z, [x26, #0x8, MUL VL]\n"
+ "ldnt1w { z21.s }, p1/Z, [x20, #2, MUL VL]\n"
+ ".inst 0xa143c352 // ld1w { z18.s, z22.s, z26.s, z30.s }, pn8.b/Z, [x26, #0xc, MUL VL]\n"
+ "addvl x26, x26, #16\n"
+ "ldnt1w { z27.s }, p1/Z, [x20, #3, MUL VL]\n"
+ "addvl x20, x20, #4\n"
"ble 7f\n"
"6:" // K loop
".inst 0x809d2480 // fmopa za0.s, p1/M, p1/M, z4.s, z29.s\n"
- "subs x23, x23, #0x1\n"
+ "subs x22, x22, #0x1\n"
".inst 0x809d24a1 // fmopa za1.s, p1/M, p1/M, z5.s, z29.s\n"
".inst 0x809d24c2 // fmopa za2.s, p1/M, p1/M, z6.s, z29.s\n"
".inst 0x809d24e3 // fmopa za3.s, p1/M, p1/M, z7.s, z29.s\n"
- ".inst 0xa040c364 // ld1w { z4.s-z7.s }, pn8.b/Z, [x27]\n"
+ ".inst 0xa040c344 // ld1w { z4.s-z7.s }, pn8.b/Z, [x26]\n"
".inst 0x80972580 // fmopa za0.s, p1/M, p1/M, z12.s, z23.s\n"
- "ldnt1w { z29.s }, p1/Z, [x21]\n"
+ "ldnt1w { z29.s }, p1/Z, [x20]\n"
".inst 0x809725a1 // fmopa za1.s, p1/M, p1/M, z13.s, z23.s\n"
".inst 0x809725c2 // fmopa za2.s, p1/M, p1/M, z14.s, z23.s\n"
".inst 0x809725e3 // fmopa za3.s, p1/M, p1/M, z15.s, z23.s\n"
- ".inst 0xa041c36c // ld1w { z12.s-z15.s }, pn8.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xa041c34c // ld1w { z12.s-z15.s }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
".inst 0x80952400 // fmopa za0.s, p1/M, p1/M, z0.s, z21.s\n"
- "ldnt1w { z23.s }, p1/Z, [x21, #1, MUL VL]\n"
+ "ldnt1w { z23.s }, p1/Z, [x20, #1, MUL VL]\n"
".inst 0x80952421 // fmopa za1.s, p1/M, p1/M, z1.s, z21.s\n"
".inst 0x80952442 // fmopa za2.s, p1/M, p1/M, z2.s, z21.s\n"
".inst 0x80952463 // fmopa za3.s, p1/M, p1/M, z3.s, z21.s\n"
- ".inst 0xa042c360 // ld1w { z0.s-z3.s }, pn8.b/Z, [x27, #0x8, MUL VL]\n"
- "ldnt1w { z21.s }, p1/Z, [x21, #2, MUL VL]\n"
+ ".inst 0xa042c340 // ld1w { z0.s-z3.s }, pn8.b/Z, [x26, #0x8, MUL VL]\n"
+ "ldnt1w { z21.s }, p1/Z, [x20, #2, MUL VL]\n"
".inst 0x809b2640 // fmopa za0.s, p1/M, p1/M, z18.s, z27.s\n"
".inst 0x809b26c1 // fmopa za1.s, p1/M, p1/M, z22.s, z27.s\n"
".inst 0x809b2742 // fmopa za2.s, p1/M, p1/M, z26.s, z27.s\n"
".inst 0x809b27c3 // fmopa za3.s, p1/M, p1/M, z30.s, z27.s\n"
- ".inst 0xa143c372 // ld1w { z18.s, z22.s, z26.s, z30.s }, pn8.b/Z, [x27, #0xc, MUL VL]\n"
- "addvl x27, x27, #16\n"
- "ldnt1w { z27.s }, p1/Z, [x21, #3, MUL VL]\n"
- "addvl x21, x21, #4\n"
+ ".inst 0xa143c352 // ld1w { z18.s, z22.s, z26.s, z30.s }, pn8.b/Z, [x26, #0xc, MUL VL]\n"
+ "addvl x26, x26, #16\n"
+ "ldnt1w { z27.s }, p1/Z, [x20, #3, MUL VL]\n"
+ "addvl x20, x20, #4\n"
"bgt 6b\n"
"7:" // K loop tail
".inst 0x809d2480 // fmopa za0.s, p1/M, p1/M, z4.s, z29.s\n"
@@ -222,391 +222,391 @@ void sme2_interleaved_nomerge_fp32_mopa_4VLx1VL(const float *const A, const floa
".inst 0x809b2742 // fmopa za2.s, p1/M, p1/M, z26.s, z27.s\n"
".inst 0x809b27c3 // fmopa za3.s, p1/M, p1/M, z30.s, z27.s\n"
"8:" // K oddments
- "cbz x22, 10f\n"
+ "cbz x21, 10f\n"
"9:" // K oddments: Loop
- ".inst 0xa040c364 // ld1w { z4.s-z7.s }, pn8.b/Z, [x27]\n"
- "subs x22, x22, #0x1\n"
- "addvl x27, x27, #4\n"
- "ld1w { z29.s }, p1/Z, [x21]\n"
- "addvl x21, x21, #1\n"
+ ".inst 0xa040c344 // ld1w { z4.s-z7.s }, pn8.b/Z, [x26]\n"
+ "subs x21, x21, #0x1\n"
+ "addvl x26, x26, #4\n"
+ "ld1w { z29.s }, p1/Z, [x20]\n"
+ "addvl x20, x20, #1\n"
".inst 0x809d2480 // fmopa za0.s, p1/M, p1/M, z4.s, z29.s\n"
".inst 0x809d24a1 // fmopa za1.s, p1/M, p1/M, z5.s, z29.s\n"
".inst 0x809d24c2 // fmopa za2.s, p1/M, p1/M, z6.s, z29.s\n"
".inst 0x809d24e3 // fmopa za3.s, p1/M, p1/M, z7.s, z29.s\n"
"bgt 9b\n"
"10:" // K oddments: End
- "tbz x16, #1, 14f\n"
- "tbz x16, #0, 12f\n"
+ "tbz x15, #1, 14f\n"
+ "tbz x15, #0, 12f\n"
"mov x12, #0x0\n"
- "cntw x20\n"
+ "cntw x19\n"
"11:" // Store to partial result buffer: Store and refill: Loop
- ".inst 0xa040c1e8 // ld1w { z8.s-z11.s }, pn8.b/Z, [x15]\n"
+ ".inst 0xa040c1c8 // ld1w { z8.s-z11.s }, pn8.b/Z, [x14]\n"
".inst 0xc0860418 // mova { z24.s-z27.s }, za0h.s[x12]\n"
".inst 0xc0840500 // mova za0h.s[x12], { z8.s-z11.s }\n"
".inst 0xc0860424 // mova { z4.s-z7.s }, za1h.s[x12]\n"
- ".inst 0xa041c1ec // ld1w { z12.s-z15.s }, pn8.b/Z, [x15, #0x4, MUL VL]\n"
+ ".inst 0xa041c1cc // ld1w { z12.s-z15.s }, pn8.b/Z, [x14, #0x4, MUL VL]\n"
".inst 0xc0840581 // mova za1h.s[x12], { z12.s-z15.s }\n"
".inst 0xc086044c // mova { z12.s-z15.s }, za2h.s[x12]\n"
".inst 0xc0860460 // mova { z0.s-z3.s }, za3h.s[x12]\n"
- ".inst 0xa042c1e8 // ld1w { z8.s-z11.s }, pn8.b/Z, [x15, #0x8, MUL VL]\n"
+ ".inst 0xa042c1c8 // ld1w { z8.s-z11.s }, pn8.b/Z, [x14, #0x8, MUL VL]\n"
".inst 0xc0840502 // mova za2h.s[x12], { z8.s-z11.s }\n"
- ".inst 0xa043c1fc // ld1w { z28.s-z31.s }, pn8.b/Z, [x15, #0xc, MUL VL]\n"
+ ".inst 0xa043c1dc // ld1w { z28.s-z31.s }, pn8.b/Z, [x14, #0xc, MUL VL]\n"
".inst 0xc0840783 // mova za3h.s[x12], { z28.s-z31.s }\n"
"add x12, x12, #0x4\n"
- "cmp x12, x20\n"
- ".inst 0xa060c1d8 // st1w { z24.s-z27.s }, pn8.b, [x14]\n"
- "addvl x15, x15, #16\n"
- ".inst 0xa061c1c4 // st1w { z4.s-z7.s }, pn8.b, [x14, #0x4, MUL VL]\n"
- ".inst 0xa062c1cc // st1w { z12.s-z15.s }, pn8.b, [x14, #0x8, MUL VL]\n"
- ".inst 0xa063c1c0 // st1w { z0.s-z3.s }, pn8.b, [x14, #0xc, MUL VL]\n"
+ "cmp x12, x19\n"
+ ".inst 0xa060c1b8 // st1w { z24.s-z27.s }, pn8.b, [x13]\n"
"addvl x14, x14, #16\n"
+ ".inst 0xa061c1a4 // st1w { z4.s-z7.s }, pn8.b, [x13, #0x4, MUL VL]\n"
+ ".inst 0xa062c1ac // st1w { z12.s-z15.s }, pn8.b, [x13, #0x8, MUL VL]\n"
+ ".inst 0xa063c1a0 // st1w { z0.s-z3.s }, pn8.b, [x13, #0xc, MUL VL]\n"
+ "addvl x13, x13, #16\n"
"blt 11b\n"
"b 42f\n"
"12:" // Store to partial result buffer: Store only
"mov x12, #0x0\n"
- "cntw x20\n"
+ "cntw x19\n"
"13:" // Store to partial result buffer: Store only: Loop
".inst 0xc086040c // mova { z12.s-z15.s }, za0h.s[x12]\n"
".inst 0xc0860438 // mova { z24.s-z27.s }, za1h.s[x12]\n"
- ".inst 0xa060c1cc // st1w { z12.s-z15.s }, pn8.b, [x14]\n"
+ ".inst 0xa060c1ac // st1w { z12.s-z15.s }, pn8.b, [x13]\n"
".inst 0xc0860440 // mova { z0.s-z3.s }, za2h.s[x12]\n"
".inst 0xc0860468 // mova { z8.s-z11.s }, za3h.s[x12]\n"
- ".inst 0xa061c1d8 // st1w { z24.s-z27.s }, pn8.b, [x14, #0x4, MUL VL]\n"
+ ".inst 0xa061c1b8 // st1w { z24.s-z27.s }, pn8.b, [x13, #0x4, MUL VL]\n"
"add x12, x12, #0x4\n"
- "cmp x12, x20\n"
- ".inst 0xa062c1c0 // st1w { z0.s-z3.s }, pn8.b, [x14, #0x8, MUL VL]\n"
- ".inst 0xa063c1c8 // st1w { z8.s-z11.s }, pn8.b, [x14, #0xc, MUL VL]\n"
- "addvl x14, x14, #16\n"
+ "cmp x12, x19\n"
+ ".inst 0xa062c1a0 // st1w { z0.s-z3.s }, pn8.b, [x13, #0x8, MUL VL]\n"
+ ".inst 0xa063c1a8 // st1w { z8.s-z11.s }, pn8.b, [x13, #0xc, MUL VL]\n"
+ "addvl x13, x13, #16\n"
"blt 13b\n"
"b 42f\n"
"14:" // Store to output array
- "ldr x26, [%x[args], %[offsetof_C]]\n"
- "add x26, x26, x10, LSL #2\n" // C += n
- "sub x25, x13, x11\n"
- "ldr x24, [%x[args], %[offsetof_ldcb]]\n"
- "madd x26, x11, x24, x26\n" // C += m * ldc
- "tbz x16, #2, 27f\n"
- "cntw x23\n"
- "cmp x25, x23\n"
- "csel x22, x25, x23, LT\n"
- "lsr x21, x22, #0x2\n"
+ "ldr x25, [%x[args], %[offsetof_C]]\n"
+ "add x25, x25, x9, LSL #2\n" // C += n
+ "sub x24, x11, x10\n"
+ "ldr x23, [%x[args], %[offsetof_ldcb]]\n"
+ "madd x25, x10, x23, x25\n" // C += m * ldc
+ "tbz x15, #2, 27f\n"
+ "cntw x22\n"
+ "cmp x24, x22\n"
+ "csel x21, x24, x22, LT\n"
+ "lsr x20, x21, #0x2\n"
"mov x12, #0x0\n"
- "and x20, x22, #0x3\n"
- "cbz x21, 16f\n"
+ "and x19, x21, #0x3\n"
+ "cbz x20, 16f\n"
"15:" // Store to output array: Skip activation: Accumulator row 0 loop
".inst 0xc0860410 // mova { z16.s-z19.s }, za0h.s[x12]\n"
- "st1w { z16.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
- "st1w { z17.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1w { z16.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
+ "st1w { z17.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"add x12, x12, #0x4\n"
- "st1w { z18.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
- "cmp x12, x21, LSL #2\n"
- "st1w { z19.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1w { z18.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
+ "cmp x12, x20, LSL #2\n"
+ "st1w { z19.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"blt 15b\n"
"16:" // Store to output array: Skip activation: Accumulator row 0 oddments
- "cbz x20, 17f\n"
- "subs x20, x20, #0x1\n"
+ "cbz x19, 17f\n"
+ "subs x19, x19, #0x1\n"
".inst 0xc0860404 // mova { z4.s-z7.s }, za0h.s[x12]\n"
- "st1w { z4.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1w { z4.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"beq 17f\n"
- "subs x20, x20, #0x1\n"
- "st1w { z5.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "subs x19, x19, #0x1\n"
+ "st1w { z5.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"beq 17f\n"
- "st1w { z6.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1w { z6.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"17:" // Store to output array: Skip activation: Accumulator row 0 oddments: End
- "subs x25, x25, x22\n"
+ "subs x24, x24, x21\n"
"beq 27f\n"
- "cmp x25, x23\n"
- "csel x22, x25, x23, LT\n"
- "lsr x21, x22, #0x2\n"
+ "cmp x24, x22\n"
+ "csel x21, x24, x22, LT\n"
+ "lsr x20, x21, #0x2\n"
"mov x12, #0x0\n"
- "and x20, x22, #0x3\n"
- "cbz x21, 19f\n"
+ "and x19, x21, #0x3\n"
+ "cbz x20, 19f\n"
"18:" // Store to output array: Skip activation: Accumulator row 1 loop
".inst 0xc0860430 // mova { z16.s-z19.s }, za1h.s[x12]\n"
- "st1w { z16.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
- "st1w { z17.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1w { z16.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
+ "st1w { z17.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"add x12, x12, #0x4\n"
- "st1w { z18.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
- "cmp x12, x21, LSL #2\n"
- "st1w { z19.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1w { z18.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
+ "cmp x12, x20, LSL #2\n"
+ "st1w { z19.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"blt 18b\n"
"19:" // Store to output array: Skip activation: Accumulator row 1 oddments
- "cbz x20, 20f\n"
- "subs x20, x20, #0x1\n"
+ "cbz x19, 20f\n"
+ "subs x19, x19, #0x1\n"
".inst 0xc0860424 // mova { z4.s-z7.s }, za1h.s[x12]\n"
- "st1w { z4.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1w { z4.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"beq 20f\n"
- "subs x20, x20, #0x1\n"
- "st1w { z5.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "subs x19, x19, #0x1\n"
+ "st1w { z5.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"beq 20f\n"
- "st1w { z6.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1w { z6.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"20:" // Store to output array: Skip activation: Accumulator row 1 oddments: End
- "subs x25, x25, x22\n"
+ "subs x24, x24, x21\n"
"beq 27f\n"
- "cmp x25, x23\n"
- "csel x22, x25, x23, LT\n"
- "lsr x21, x22, #0x2\n"
+ "cmp x24, x22\n"
+ "csel x21, x24, x22, LT\n"
+ "lsr x20, x21, #0x2\n"
"mov x12, #0x0\n"
- "and x20, x22, #0x3\n"
- "cbz x21, 22f\n"
+ "and x19, x21, #0x3\n"
+ "cbz x20, 22f\n"
"21:" // Store to output array: Skip activation: Accumulator row 2 loop
".inst 0xc0860450 // mova { z16.s-z19.s }, za2h.s[x12]\n"
- "st1w { z16.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
- "st1w { z17.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1w { z16.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
+ "st1w { z17.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"add x12, x12, #0x4\n"
- "st1w { z18.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
- "cmp x12, x21, LSL #2\n"
- "st1w { z19.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1w { z18.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
+ "cmp x12, x20, LSL #2\n"
+ "st1w { z19.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"blt 21b\n"
"22:" // Store to output array: Skip activation: Accumulator row 2 oddments
- "cbz x20, 23f\n"
- "subs x20, x20, #0x1\n"
+ "cbz x19, 23f\n"
+ "subs x19, x19, #0x1\n"
".inst 0xc0860454 // mova { z20.s-z23.s }, za2h.s[x12]\n"
- "st1w { z20.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1w { z20.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"beq 23f\n"
- "subs x20, x20, #0x1\n"
- "st1w { z21.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "subs x19, x19, #0x1\n"
+ "st1w { z21.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"beq 23f\n"
- "st1w { z22.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1w { z22.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"23:" // Store to output array: Skip activation: Accumulator row 2 oddments: End
- "subs x25, x25, x22\n"
+ "subs x24, x24, x21\n"
"beq 27f\n"
- "cmp x25, x23\n"
- "csel x22, x25, x23, LT\n"
- "lsr x21, x22, #0x2\n"
+ "cmp x24, x22\n"
+ "csel x21, x24, x22, LT\n"
+ "lsr x20, x21, #0x2\n"
"mov x12, #0x0\n"
- "and x20, x22, #0x3\n"
- "cbz x21, 25f\n"
+ "and x19, x21, #0x3\n"
+ "cbz x20, 25f\n"
"24:" // Store to output array: Skip activation: Accumulator row 3 loop
".inst 0xc0860464 // mova { z4.s-z7.s }, za3h.s[x12]\n"
- "st1w { z4.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
- "st1w { z5.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1w { z4.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
+ "st1w { z5.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"add x12, x12, #0x4\n"
- "st1w { z6.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
- "cmp x12, x21, LSL #2\n"
- "st1w { z7.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1w { z6.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
+ "cmp x12, x20, LSL #2\n"
+ "st1w { z7.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"blt 24b\n"
"25:" // Store to output array: Skip activation: Accumulator row 3 oddments
- "cbz x20, 26f\n"
- "subs x20, x20, #0x1\n"
+ "cbz x19, 26f\n"
+ "subs x19, x19, #0x1\n"
".inst 0xc086046c // mova { z12.s-z15.s }, za3h.s[x12]\n"
- "st1w { z12.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1w { z12.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"beq 26f\n"
- "subs x20, x20, #0x1\n"
- "st1w { z13.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "subs x19, x19, #0x1\n"
+ "st1w { z13.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"beq 26f\n"
- "st1w { z14.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1w { z14.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"26:" // Store to output array: Skip activation: Accumulator row 3 oddments: End
- "subs x25, x25, x22\n"
+ "subs x24, x24, x21\n"
"beq 27f\n"
"b 40f\n"
"27:" // Store to output array: Skip activation: End
- "cntw x23\n"
- "cmp x25, x23\n"
+ "cntw x22\n"
+ "cmp x24, x22\n"
"ld1rw { z25.s }, p1/Z, [%x[args], %[offsetof_KernelArgs_min]]\n"
- "csel x22, x25, x23, LT\n"
- "lsr x21, x22, #0x2\n"
+ "csel x21, x24, x22, LT\n"
+ "lsr x20, x21, #0x2\n"
"ld1rw { z24.s }, p1/Z, [%x[args], %[offsetof_KernelArgs_max]]\n"
"mov x12, #0x0\n"
- "and x20, x22, #0x3\n"
- "cbz x21, 29f\n"
+ "and x19, x21, #0x3\n"
+ "cbz x20, 29f\n"
"28:" // Store to output array: Accumulator row 0 loop
".inst 0xc0860414 // mova { z20.s-z23.s }, za0h.s[x12]\n"
".inst 0xc1b8cb34 // fclamp { z20.s-z23.s }, z25.s, z24.s\n"
- "st1w { z20.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
- "st1w { z21.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1w { z20.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
+ "st1w { z21.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"add x12, x12, #0x4\n"
- "st1w { z22.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
- "cmp x12, x21, LSL #2\n"
- "st1w { z23.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1w { z22.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
+ "cmp x12, x20, LSL #2\n"
+ "st1w { z23.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"blt 28b\n"
"29:" // Store to output array: Accumulator row 0 oddments
- "cbz x20, 30f\n"
+ "cbz x19, 30f\n"
".inst 0xc0860408 // mova { z8.s-z11.s }, za0h.s[x12]\n"
- "subs x20, x20, #0x1\n"
+ "subs x19, x19, #0x1\n"
".inst 0xc1b8cb28 // fclamp { z8.s-z11.s }, z25.s, z24.s\n"
- "st1w { z8.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1w { z8.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"beq 30f\n"
- "subs x20, x20, #0x1\n"
- "st1w { z9.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "subs x19, x19, #0x1\n"
+ "st1w { z9.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"beq 30f\n"
- "st1w { z10.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1w { z10.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"30:" // Store to output array: Accumulator row 0 oddments: End
- "subs x25, x25, x22\n"
+ "subs x24, x24, x21\n"
"beq 40f\n"
- "cmp x25, x23\n"
- "csel x22, x25, x23, LT\n"
- "lsr x21, x22, #0x2\n"
+ "cmp x24, x22\n"
+ "csel x21, x24, x22, LT\n"
+ "lsr x20, x21, #0x2\n"
"mov x12, #0x0\n"
- "and x20, x22, #0x3\n"
- "cbz x21, 32f\n"
+ "and x19, x21, #0x3\n"
+ "cbz x20, 32f\n"
"31:" // Store to output array: Accumulator row 1 loop
".inst 0xc0860430 // mova { z16.s-z19.s }, za1h.s[x12]\n"
".inst 0xc1b8cb30 // fclamp { z16.s-z19.s }, z25.s, z24.s\n"
- "st1w { z16.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
- "st1w { z17.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1w { z16.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
+ "st1w { z17.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"add x12, x12, #0x4\n"
- "st1w { z18.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
- "cmp x12, x21, LSL #2\n"
- "st1w { z19.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1w { z18.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
+ "cmp x12, x20, LSL #2\n"
+ "st1w { z19.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"blt 31b\n"
"32:" // Store to output array: Accumulator row 1 oddments
- "cbz x20, 33f\n"
+ "cbz x19, 33f\n"
".inst 0xc0860430 // mova { z16.s-z19.s }, za1h.s[x12]\n"
- "subs x20, x20, #0x1\n"
+ "subs x19, x19, #0x1\n"
".inst 0xc1b8cb30 // fclamp { z16.s-z19.s }, z25.s, z24.s\n"
- "st1w { z16.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1w { z16.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"beq 33f\n"
- "subs x20, x20, #0x1\n"
- "st1w { z17.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "subs x19, x19, #0x1\n"
+ "st1w { z17.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"beq 33f\n"
- "st1w { z18.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1w { z18.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"33:" // Store to output array: Accumulator row 1 oddments: End
- "subs x25, x25, x22\n"
+ "subs x24, x24, x21\n"
"beq 40f\n"
- "cmp x25, x23\n"
- "csel x22, x25, x23, LT\n"
- "lsr x21, x22, #0x2\n"
+ "cmp x24, x22\n"
+ "csel x21, x24, x22, LT\n"
+ "lsr x20, x21, #0x2\n"
"mov x12, #0x0\n"
- "and x20, x22, #0x3\n"
- "cbz x21, 35f\n"
+ "and x19, x21, #0x3\n"
+ "cbz x20, 35f\n"
"34:" // Store to output array: Accumulator row 2 loop
".inst 0xc0860450 // mova { z16.s-z19.s }, za2h.s[x12]\n"
".inst 0xc1b8cb30 // fclamp { z16.s-z19.s }, z25.s, z24.s\n"
- "st1w { z16.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
- "st1w { z17.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1w { z16.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
+ "st1w { z17.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"add x12, x12, #0x4\n"
- "st1w { z18.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
- "cmp x12, x21, LSL #2\n"
- "st1w { z19.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1w { z18.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
+ "cmp x12, x20, LSL #2\n"
+ "st1w { z19.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"blt 34b\n"
"35:" // Store to output array: Accumulator row 2 oddments
- "cbz x20, 36f\n"
+ "cbz x19, 36f\n"
".inst 0xc0860450 // mova { z16.s-z19.s }, za2h.s[x12]\n"
- "subs x20, x20, #0x1\n"
+ "subs x19, x19, #0x1\n"
".inst 0xc1b8cb30 // fclamp { z16.s-z19.s }, z25.s, z24.s\n"
- "st1w { z16.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1w { z16.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"beq 36f\n"
- "subs x20, x20, #0x1\n"
- "st1w { z17.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "subs x19, x19, #0x1\n"
+ "st1w { z17.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"beq 36f\n"
- "st1w { z18.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1w { z18.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"36:" // Store to output array: Accumulator row 2 oddments: End
- "subs x25, x25, x22\n"
+ "subs x24, x24, x21\n"
"beq 40f\n"
- "cmp x25, x23\n"
- "csel x20, x25, x23, LT\n"
- "lsr x21, x20, #0x2\n"
+ "cmp x24, x22\n"
+ "csel x19, x24, x22, LT\n"
+ "lsr x20, x19, #0x2\n"
"mov x12, #0x0\n"
- "and x20, x20, #0x3\n"
- "cbz x21, 38f\n"
+ "and x19, x19, #0x3\n"
+ "cbz x20, 38f\n"
"37:" // Store to output array: Accumulator row 3 loop
".inst 0xc0860474 // mova { z20.s-z23.s }, za3h.s[x12]\n"
".inst 0xc1b8cb34 // fclamp { z20.s-z23.s }, z25.s, z24.s\n"
- "st1w { z20.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
- "st1w { z21.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1w { z20.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
+ "st1w { z21.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"add x12, x12, #0x4\n"
- "st1w { z22.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
- "cmp x12, x21, LSL #2\n"
- "st1w { z23.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1w { z22.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
+ "cmp x12, x20, LSL #2\n"
+ "st1w { z23.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"blt 37b\n"
"38:" // Store to output array: Accumulator row 3 oddments
- "cbz x20, 39f\n"
+ "cbz x19, 39f\n"
".inst 0xc0860470 // mova { z16.s-z19.s }, za3h.s[x12]\n"
- "subs x20, x20, #0x1\n"
+ "subs x19, x19, #0x1\n"
".inst 0xc1b8cb30 // fclamp { z16.s-z19.s }, z25.s, z24.s\n"
- "st1w { z16.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1w { z16.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"beq 39f\n"
- "subs x20, x20, #0x1\n"
- "st1w { z17.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "subs x19, x19, #0x1\n"
+ "st1w { z17.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"beq 39f\n"
- "st1w { z18.s }, p0, [x26]\n"
+ "st1w { z18.s }, p0, [x25]\n"
"39:" // Store to output array: Accumulator row 3 oddments: End
"40:" // Store to output array: End
- "tbz x16, #0, 42f\n"
+ "tbz x15, #0, 42f\n"
"mov x12, #0x0\n"
- "cntw x20\n"
+ "cntw x19\n"
"41:" // Store to output array: Refill accumulators: Loop
- ".inst 0xa040c1f0 // ld1w { z16.s-z19.s }, pn8.b/Z, [x15]\n"
+ ".inst 0xa040c1d0 // ld1w { z16.s-z19.s }, pn8.b/Z, [x14]\n"
".inst 0xc0840600 // mova za0h.s[x12], { z16.s-z19.s }\n"
- ".inst 0xa041c1f0 // ld1w { z16.s-z19.s }, pn8.b/Z, [x15, #0x4, MUL VL]\n"
+ ".inst 0xa041c1d0 // ld1w { z16.s-z19.s }, pn8.b/Z, [x14, #0x4, MUL VL]\n"
".inst 0xc0840601 // mova za1h.s[x12], { z16.s-z19.s }\n"
- ".inst 0xa042c1f4 // ld1w { z20.s-z23.s }, pn8.b/Z, [x15, #0x8, MUL VL]\n"
+ ".inst 0xa042c1d4 // ld1w { z20.s-z23.s }, pn8.b/Z, [x14, #0x8, MUL VL]\n"
".inst 0xc0840682 // mova za2h.s[x12], { z20.s-z23.s }\n"
- ".inst 0xa043c1e4 // ld1w { z4.s-z7.s }, pn8.b/Z, [x15, #0xc, MUL VL]\n"
+ ".inst 0xa043c1c4 // ld1w { z4.s-z7.s }, pn8.b/Z, [x14, #0xc, MUL VL]\n"
".inst 0xc0840483 // mova za3h.s[x12], { z4.s-z7.s }\n"
"add x12, x12, #0x4\n"
- "cmp x12, x20\n"
- "addvl x15, x15, #16\n"
+ "cmp x12, x19\n"
+ "addvl x14, x14, #16\n"
"blt 41b\n"
"42:" // End block
- "incw x10\n"
- "cmp x10, x9\n"
+ "incw x9\n"
+ "cmp x9, x28\n"
"blt 3b\n"
- "incw x11, ALL, MUL #4\n"
- "cmp x11, x13\n"
- "mov x10, #0x0\n"
- "mov x28, x27\n"
+ "incw x10, ALL, MUL #4\n"
+ "cmp x10, x11\n"
+ "mov x9, #0x0\n"
+ "mov x27, x26\n"
"blt 3b\n"
".inst 0xd503467f // SMSTOP\n"
:
: [args] "r" (&args), [offsetof_A] "I" (offsetof(KernelArgs, A)), [offsetof_B] "I" (offsetof(KernelArgs, B)), [offsetof_C] "I" (offsetof(KernelArgs, C)), [offsetof_K] "I" (offsetof(KernelArgs, K)), [offsetof_KernelArgs_max] "I" (offsetof(KernelArgs, max)), [offsetof_KernelArgs_min] "I" (offsetof(KernelArgs, min)), [offsetof_M] "I" (offsetof(KernelArgs, M)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_accumulator_buffer] "I" (offsetof(KernelArgs, accumulator_buffer)), [offsetof_bias] "I" (offsetof(KernelArgs, bias)), [offsetof_flags] "I" (offsetof(KernelArgs, flags)), [offsetof_kstride_bytes] "I" (offsetof(KernelArgs, kstride_bytes)), [offsetof_ldcb] "I" (offsetof(KernelArgs, ldcb))
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8q_mopa_1VLx4VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8q_mopa_1VLx4VL/generic.cpp
index 929af04032..62170c4945 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8q_mopa_1VLx4VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8q_mopa_1VLx4VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef __ARM_FEATURE_SVE
#ifdef ARM_COMPUTE_ENABLE_SME2
@@ -90,107 +90,107 @@ void sme2_interleaved_nomerge_s8q_mopa_1VLx4VL(const int8_t *const A, const int8
KernelArgs args(A, B, C, ldc, M, N, K, bias, rq, n_0, accumulate, accumulator_buffer);
__asm__ __volatile__(
- "ldr x14, [%x[args], %[offsetof_flags]]\n"
+ "ldr x13, [%x[args], %[offsetof_flags]]\n"
".inst 0xd503477f // SMSTART ZA\n"
"ptrue p1.b\n"
".inst 0x25207811 // ptrue pn9.b\n"
- "ldr x13, [%x[args], %[offsetof_accumulator_buffer]]\n"
"ldr x11, [%x[args], %[offsetof_accumulator_buffer]]\n"
- "tbz x14, #0, 2f\n"
+ "ldr x10, [%x[args], %[offsetof_accumulator_buffer]]\n"
+ "tbz x13, #0, 2f\n"
"mov x12, #0x0\n"
- "cntw x20\n"
+ "cntw x19\n"
"1:" // Initial accumulator load from buffer: Loop
- ".inst 0xa040c5ac // ld1w { z12.s-z15.s }, pn9.b/Z, [x13]\n"
+ ".inst 0xa040c56c // ld1w { z12.s-z15.s }, pn9.b/Z, [x11]\n"
".inst 0xc0840580 // mova za0h.s[x12], { z12.s-z15.s }\n"
- ".inst 0xa041c5bc // ld1w { z28.s-z31.s }, pn9.b/Z, [x13, #0x4, MUL VL]\n"
+ ".inst 0xa041c57c // ld1w { z28.s-z31.s }, pn9.b/Z, [x11, #0x4, MUL VL]\n"
".inst 0xc0840781 // mova za1h.s[x12], { z28.s-z31.s }\n"
- ".inst 0xa042c5bc // ld1w { z28.s-z31.s }, pn9.b/Z, [x13, #0x8, MUL VL]\n"
+ ".inst 0xa042c57c // ld1w { z28.s-z31.s }, pn9.b/Z, [x11, #0x8, MUL VL]\n"
".inst 0xc0840782 // mova za2h.s[x12], { z28.s-z31.s }\n"
- ".inst 0xa043c5a4 // ld1w { z4.s-z7.s }, pn9.b/Z, [x13, #0xc, MUL VL]\n"
+ ".inst 0xa043c564 // ld1w { z4.s-z7.s }, pn9.b/Z, [x11, #0xc, MUL VL]\n"
".inst 0xc0840483 // mova za3h.s[x12], { z4.s-z7.s }\n"
"add x12, x12, #0x4\n"
- "cmp x12, x20\n"
- "addvl x13, x13, #16\n"
+ "cmp x12, x19\n"
+ "addvl x11, x11, #16\n"
"blt 1b\n"
"2:" // Initial accumulator load from buffer: End
- "ldr w10, [%x[args], %[offsetof_M]]\n"
- "mov x9, #0x0\n"
+ "ldr w9, [%x[args], %[offsetof_M]]\n"
"mov x28, #0x0\n"
- "ldr w27, [%x[args], %[offsetof_N]]\n"
- "ldr x26, [%x[args], %[offsetof_A]]\n"
+ "mov x27, #0x0\n"
+ "ldr w26, [%x[args], %[offsetof_N]]\n"
+ "ldr x25, [%x[args], %[offsetof_A]]\n"
"3:" // M and N loop
- "mov x25, x26\n"
- ".inst 0x25bb6790 // whilelt pn8.s, x28, x27, VLx4\n"
- "tbnz x14, #0, 4f\n"
- "ldr x20, [%x[args], %[offsetof_bias]]\n"
+ "mov x24, x25\n"
+ ".inst 0x25ba6770 // whilelt pn8.s, x27, x26, VLx4\n"
+ "tbnz x13, #0, 4f\n"
+ "ldr x19, [%x[args], %[offsetof_bias]]\n"
".inst 0xc00800ff // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
- "cbz x20, 5f\n"
- ".inst 0xa01cc299 // ldnt1w { z24.s-z27.s }, p8/Z, [x20, x28, LSL #2]\n"
+ "cbz x19, 5f\n"
+ ".inst 0xa01bc279 // ldnt1w { z24.s-z27.s }, p8/Z, [x19, x27, LSL #2]\n"
".inst 0xc0902700 // addha za0.s, p1/M, p1/M, z24.s\n"
".inst 0xc0902721 // addha za1.s, p1/M, p1/M, z25.s\n"
".inst 0xc0902742 // addha za2.s, p1/M, p1/M, z26.s\n"
".inst 0xc0902763 // addha za3.s, p1/M, p1/M, z27.s\n"
"4:" // Prepare accumulators: Test for last block
+ "mov x19, x27\n"
"mov x20, x28\n"
- "mov x21, x9\n"
- "incw x20, ALL, MUL #4\n"
- "incw x21\n"
- "cmp x20, x27\n"
- "csel x21, x9, x21, LT\n"
- "mov x20, x14\n"
- "bfm x14, XZR, #0x0, #0x0 // bfc x14, #0x0, #0x1\n"
- "cmp x21, x10\n"
- "csel x14, x20, x14, LT\n"
+ "incw x19, ALL, MUL #4\n"
+ "incw x20\n"
+ "cmp x19, x26\n"
+ "csel x20, x28, x20, LT\n"
+ "mov x19, x13\n"
+ "bfm x13, XZR, #0x0, #0x0 // bfc x13, #0x0, #0x1\n"
+ "cmp x20, x9\n"
+ "csel x13, x19, x13, LT\n"
"5:" // Prepare accumulators: End
- "ldr x20, [%x[args], %[offsetof_K]]\n"
- "add x20, x20, #0x3\n"
- "lsr x20, x20, #0x2\n"
- "ldr x23, [%x[args], %[offsetof_B]]\n"
- "lsr x22, x20, #0x2\n"
- "and x21, x20, #0x3\n"
- "ldr x20, [%x[args], %[offsetof_kstride_bytes]]\n"
- "madd x23, x28, x20, x23\n" // bptr = B + n * kstride_bytes
- "cbz x22, 8f\n"
- "subs x22, x22, #0x1\n"
- "ld1b { z10.b }, p1/Z, [x25]\n"
- ".inst 0xa04086fd // ldnt1b { z28.b-z31.b }, pn9.b/Z, [x23]\n"
- "ld1b { z16.b }, p1/Z, [x25, #1, MUL VL]\n"
- ".inst 0xa04186ed // ldnt1b { z12.b-z15.b }, pn9.b/Z, [x23, #0x4, MUL VL]\n"
- "ld1b { z21.b }, p1/Z, [x25, #2, MUL VL]\n"
- ".inst 0xa04286f9 // ldnt1b { z24.b-z27.b }, pn9.b/Z, [x23, #0x8, MUL VL]\n"
- "ld1b { z19.b }, p1/Z, [x25, #3, MUL VL]\n"
- "addvl x25, x25, #4\n"
- ".inst 0xa04386e1 // ldnt1b { z0.b-z3.b }, pn9.b/Z, [x23, #0xc, MUL VL]\n"
- "addvl x23, x23, #16\n"
+ "ldr x19, [%x[args], %[offsetof_K]]\n"
+ "add x19, x19, #0x3\n"
+ "lsr x19, x19, #0x2\n"
+ "ldr x22, [%x[args], %[offsetof_B]]\n"
+ "lsr x21, x19, #0x2\n"
+ "and x20, x19, #0x3\n"
+ "ldr x19, [%x[args], %[offsetof_kstride_bytes]]\n"
+ "madd x22, x27, x19, x22\n" // bptr = B + n * kstride_bytes
+ "cbz x21, 8f\n"
+ "subs x21, x21, #0x1\n"
+ "ld1b { z10.b }, p1/Z, [x24]\n"
+ ".inst 0xa04086dd // ldnt1b { z28.b-z31.b }, pn9.b/Z, [x22]\n"
+ "ld1b { z16.b }, p1/Z, [x24, #1, MUL VL]\n"
+ ".inst 0xa04186cd // ldnt1b { z12.b-z15.b }, pn9.b/Z, [x22, #0x4, MUL VL]\n"
+ "ld1b { z21.b }, p1/Z, [x24, #2, MUL VL]\n"
+ ".inst 0xa04286d9 // ldnt1b { z24.b-z27.b }, pn9.b/Z, [x22, #0x8, MUL VL]\n"
+ "ld1b { z19.b }, p1/Z, [x24, #3, MUL VL]\n"
+ "addvl x24, x24, #4\n"
+ ".inst 0xa04386c1 // ldnt1b { z0.b-z3.b }, pn9.b/Z, [x22, #0xc, MUL VL]\n"
+ "addvl x22, x22, #16\n"
"ble 7f\n"
"6:" // K loop
".inst 0xa09c2540 // smopa za0.s, p1/M, p1/M, z10.b, z28.b\n"
- "subs x22, x22, #0x1\n"
+ "subs x21, x21, #0x1\n"
".inst 0xa09d2541 // smopa za1.s, p1/M, p1/M, z10.b, z29.b\n"
".inst 0xa09e2542 // smopa za2.s, p1/M, p1/M, z10.b, z30.b\n"
".inst 0xa09f2543 // smopa za3.s, p1/M, p1/M, z10.b, z31.b\n"
- "ld1b { z10.b }, p1/Z, [x25]\n"
+ "ld1b { z10.b }, p1/Z, [x24]\n"
".inst 0xa08c2600 // smopa za0.s, p1/M, p1/M, z16.b, z12.b\n"
- ".inst 0xa04086fd // ldnt1b { z28.b-z31.b }, pn9.b/Z, [x23]\n"
+ ".inst 0xa04086dd // ldnt1b { z28.b-z31.b }, pn9.b/Z, [x22]\n"
".inst 0xa08d2601 // smopa za1.s, p1/M, p1/M, z16.b, z13.b\n"
".inst 0xa08e2602 // smopa za2.s, p1/M, p1/M, z16.b, z14.b\n"
".inst 0xa08f2603 // smopa za3.s, p1/M, p1/M, z16.b, z15.b\n"
- "ld1b { z16.b }, p1/Z, [x25, #1, MUL VL]\n"
+ "ld1b { z16.b }, p1/Z, [x24, #1, MUL VL]\n"
".inst 0xa09826a0 // smopa za0.s, p1/M, p1/M, z21.b, z24.b\n"
- ".inst 0xa04186ed // ldnt1b { z12.b-z15.b }, pn9.b/Z, [x23, #0x4, MUL VL]\n"
+ ".inst 0xa04186cd // ldnt1b { z12.b-z15.b }, pn9.b/Z, [x22, #0x4, MUL VL]\n"
".inst 0xa09926a1 // smopa za1.s, p1/M, p1/M, z21.b, z25.b\n"
".inst 0xa09a26a2 // smopa za2.s, p1/M, p1/M, z21.b, z26.b\n"
".inst 0xa09b26a3 // smopa za3.s, p1/M, p1/M, z21.b, z27.b\n"
- "ld1b { z21.b }, p1/Z, [x25, #2, MUL VL]\n"
- ".inst 0xa04286f9 // ldnt1b { z24.b-z27.b }, pn9.b/Z, [x23, #0x8, MUL VL]\n"
+ "ld1b { z21.b }, p1/Z, [x24, #2, MUL VL]\n"
+ ".inst 0xa04286d9 // ldnt1b { z24.b-z27.b }, pn9.b/Z, [x22, #0x8, MUL VL]\n"
".inst 0xa0802660 // smopa za0.s, p1/M, p1/M, z19.b, z0.b\n"
".inst 0xa0812661 // smopa za1.s, p1/M, p1/M, z19.b, z1.b\n"
".inst 0xa0822662 // smopa za2.s, p1/M, p1/M, z19.b, z2.b\n"
".inst 0xa0832663 // smopa za3.s, p1/M, p1/M, z19.b, z3.b\n"
- "ld1b { z19.b }, p1/Z, [x25, #3, MUL VL]\n"
- "addvl x25, x25, #4\n"
- ".inst 0xa04386e1 // ldnt1b { z0.b-z3.b }, pn9.b/Z, [x23, #0xc, MUL VL]\n"
- "addvl x23, x23, #16\n"
+ "ld1b { z19.b }, p1/Z, [x24, #3, MUL VL]\n"
+ "addvl x24, x24, #4\n"
+ ".inst 0xa04386c1 // ldnt1b { z0.b-z3.b }, pn9.b/Z, [x22, #0xc, MUL VL]\n"
+ "addvl x22, x22, #16\n"
"bgt 6b\n"
"7:" // K loop tail
".inst 0xa09c2540 // smopa za0.s, p1/M, p1/M, z10.b, z28.b\n"
@@ -210,76 +210,76 @@ void sme2_interleaved_nomerge_s8q_mopa_1VLx4VL(const int8_t *const A, const int8
".inst 0xa0822662 // smopa za2.s, p1/M, p1/M, z19.b, z2.b\n"
".inst 0xa0832663 // smopa za3.s, p1/M, p1/M, z19.b, z3.b\n"
"8:" // K oddments
- "cbz x21, 10f\n"
+ "cbz x20, 10f\n"
"9:" // K oddments: Loop
- "ld1b { z10.b }, p1/Z, [x25]\n"
- "subs x21, x21, #0x1\n"
- "addvl x25, x25, #1\n"
- ".inst 0xa04086fc // ld1b { z28.b-z31.b }, pn9.b/Z, [x23]\n"
- "addvl x23, x23, #4\n"
+ "ld1b { z10.b }, p1/Z, [x24]\n"
+ "subs x20, x20, #0x1\n"
+ "addvl x24, x24, #1\n"
+ ".inst 0xa04086dc // ld1b { z28.b-z31.b }, pn9.b/Z, [x22]\n"
+ "addvl x22, x22, #4\n"
".inst 0xa09c2540 // smopa za0.s, p1/M, p1/M, z10.b, z28.b\n"
".inst 0xa09d2541 // smopa za1.s, p1/M, p1/M, z10.b, z29.b\n"
".inst 0xa09e2542 // smopa za2.s, p1/M, p1/M, z10.b, z30.b\n"
".inst 0xa09f2543 // smopa za3.s, p1/M, p1/M, z10.b, z31.b\n"
"bgt 9b\n"
"10:" // K oddments: End
- "ld1w { z14.s }, p1/Z, [x25]\n"
- "addvl x25, x25, #1\n"
+ "ld1w { z14.s }, p1/Z, [x24]\n"
+ "addvl x24, x24, #1\n"
".inst 0xc09125c0 // addva za0.s, p1/M, p1/M, z14.s\n"
".inst 0xc09125c1 // addva za1.s, p1/M, p1/M, z14.s\n"
".inst 0xc09125c2 // addva za2.s, p1/M, p1/M, z14.s\n"
".inst 0xc09125c3 // addva za3.s, p1/M, p1/M, z14.s\n"
- "tbz x14, #1, 14f\n"
- "tbz x14, #0, 12f\n"
+ "tbz x13, #1, 14f\n"
+ "tbz x13, #0, 12f\n"
"mov x12, #0x0\n"
- "cntw x20\n"
+ "cntw x19\n"
"11:" // Store to partial result buffer: Store and refill: Loop
- ".inst 0xa040c5b8 // ld1w { z24.s-z27.s }, pn9.b/Z, [x13]\n"
+ ".inst 0xa040c578 // ld1w { z24.s-z27.s }, pn9.b/Z, [x11]\n"
".inst 0xc086041c // mova { z28.s-z31.s }, za0h.s[x12]\n"
".inst 0xc0840700 // mova za0h.s[x12], { z24.s-z27.s }\n"
".inst 0xc0860428 // mova { z8.s-z11.s }, za1h.s[x12]\n"
- ".inst 0xa041c5b0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x13, #0x4, MUL VL]\n"
+ ".inst 0xa041c570 // ld1w { z16.s-z19.s }, pn9.b/Z, [x11, #0x4, MUL VL]\n"
".inst 0xc0840601 // mova za1h.s[x12], { z16.s-z19.s }\n"
".inst 0xc0860458 // mova { z24.s-z27.s }, za2h.s[x12]\n"
".inst 0xc086046c // mova { z12.s-z15.s }, za3h.s[x12]\n"
- ".inst 0xa042c5a4 // ld1w { z4.s-z7.s }, pn9.b/Z, [x13, #0x8, MUL VL]\n"
+ ".inst 0xa042c564 // ld1w { z4.s-z7.s }, pn9.b/Z, [x11, #0x8, MUL VL]\n"
".inst 0xc0840482 // mova za2h.s[x12], { z4.s-z7.s }\n"
- ".inst 0xa043c5a4 // ld1w { z4.s-z7.s }, pn9.b/Z, [x13, #0xc, MUL VL]\n"
+ ".inst 0xa043c564 // ld1w { z4.s-z7.s }, pn9.b/Z, [x11, #0xc, MUL VL]\n"
".inst 0xc0840483 // mova za3h.s[x12], { z4.s-z7.s }\n"
"add x12, x12, #0x4\n"
- "cmp x12, x20\n"
- ".inst 0xa060c57c // st1w { z28.s-z31.s }, pn9.b, [x11]\n"
- "addvl x13, x13, #16\n"
- ".inst 0xa061c568 // st1w { z8.s-z11.s }, pn9.b, [x11, #0x4, MUL VL]\n"
- ".inst 0xa062c578 // st1w { z24.s-z27.s }, pn9.b, [x11, #0x8, MUL VL]\n"
- ".inst 0xa063c56c // st1w { z12.s-z15.s }, pn9.b, [x11, #0xc, MUL VL]\n"
+ "cmp x12, x19\n"
+ ".inst 0xa060c55c // st1w { z28.s-z31.s }, pn9.b, [x10]\n"
"addvl x11, x11, #16\n"
+ ".inst 0xa061c548 // st1w { z8.s-z11.s }, pn9.b, [x10, #0x4, MUL VL]\n"
+ ".inst 0xa062c558 // st1w { z24.s-z27.s }, pn9.b, [x10, #0x8, MUL VL]\n"
+ ".inst 0xa063c54c // st1w { z12.s-z15.s }, pn9.b, [x10, #0xc, MUL VL]\n"
+ "addvl x10, x10, #16\n"
"blt 11b\n"
"b 21f\n"
"12:" // Store to partial result buffer: Store only
"mov x12, #0x0\n"
- "cntw x20\n"
+ "cntw x19\n"
"13:" // Store to partial result buffer: Store only: Loop
".inst 0xc086041c // mova { z28.s-z31.s }, za0h.s[x12]\n"
".inst 0xc0860420 // mova { z0.s-z3.s }, za1h.s[x12]\n"
- ".inst 0xa060c57c // st1w { z28.s-z31.s }, pn9.b, [x11]\n"
+ ".inst 0xa060c55c // st1w { z28.s-z31.s }, pn9.b, [x10]\n"
".inst 0xc0860448 // mova { z8.s-z11.s }, za2h.s[x12]\n"
".inst 0xc0860470 // mova { z16.s-z19.s }, za3h.s[x12]\n"
- ".inst 0xa061c560 // st1w { z0.s-z3.s }, pn9.b, [x11, #0x4, MUL VL]\n"
+ ".inst 0xa061c540 // st1w { z0.s-z3.s }, pn9.b, [x10, #0x4, MUL VL]\n"
"add x12, x12, #0x4\n"
- "cmp x12, x20\n"
- ".inst 0xa062c568 // st1w { z8.s-z11.s }, pn9.b, [x11, #0x8, MUL VL]\n"
- ".inst 0xa063c570 // st1w { z16.s-z19.s }, pn9.b, [x11, #0xc, MUL VL]\n"
- "addvl x11, x11, #16\n"
+ "cmp x12, x19\n"
+ ".inst 0xa062c548 // st1w { z8.s-z11.s }, pn9.b, [x10, #0x8, MUL VL]\n"
+ ".inst 0xa063c550 // st1w { z16.s-z19.s }, pn9.b, [x10, #0xc, MUL VL]\n"
+ "addvl x10, x10, #16\n"
"blt 13b\n"
"b 21f\n"
"14:" // Store to output array
- "ldr x24, [%x[args], %[offsetof_C]]\n"
- "add x24, x24, x28\n" // C += n
- "sub x23, x10, x9\n"
+ "ldr x23, [%x[args], %[offsetof_C]]\n"
+ "add x23, x23, x27\n" // C += n
+ "sub x22, x9, x28\n"
"ld1rw { z12.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_per_layer_mul]]\n"
- "ldr x22, [%x[args], %[offsetof_ldcb]]\n"
- "madd x24, x9, x22, x24\n" // C += m * ldc
+ "ldr x21, [%x[args], %[offsetof_ldcb]]\n"
+ "madd x23, x28, x21, x23\n" // C += m * ldc
"ld1rw { z13.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_per_layer_mul]]\n"
"ld1rw { z14.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_per_layer_mul]]\n"
"ld1rw { z15.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_per_layer_mul]]\n"
@@ -290,24 +290,24 @@ void sme2_interleaved_nomerge_s8q_mopa_1VLx4VL(const int8_t *const A, const int8
"ld1rw { z1.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_c_offset]]\n"
"ld1rw { z21.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_minval]]\n"
"ld1rw { z20.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_maxval]]\n"
- "tbz x14, #2, 15f\n"
- "ldr w21, [%x[args], %[offsetof_n_0]]\n"
- "add x21, x21, x28\n"
- "ldr x20, [%x[rq], %[offsetof_Requantize32_per_channel_muls]]\n"
- "add x20, x20, x21, LSL #2\n"
- ".inst 0xa040c28c // ld1w { z12.s-z15.s }, p8/Z, [x20]\n"
- "ldr x20, [%x[rq], %[offsetof_Requantize32_per_channel_right_shifts]]\n"
- "add x20, x20, x21, LSL #2\n"
- ".inst 0xa040c284 // ld1w { z4.s-z7.s }, p8/Z, [x20]\n"
+ "tbz x13, #2, 15f\n"
+ "ldr w20, [%x[args], %[offsetof_n_0]]\n"
+ "add x20, x20, x27\n"
+ "ldr x19, [%x[rq], %[offsetof_Requantize32_per_channel_muls]]\n"
+ "add x19, x19, x20, LSL #2\n"
+ ".inst 0xa040c26c // ld1w { z12.s-z15.s }, p8/Z, [x19]\n"
+ "ldr x19, [%x[rq], %[offsetof_Requantize32_per_channel_right_shifts]]\n"
+ "add x19, x19, x20, LSL #2\n"
+ ".inst 0xa040c264 // ld1w { z4.s-z7.s }, p8/Z, [x19]\n"
"15:" // Store to output array: Load per-channel parameters: End
- "cntw x20\n"
- "whilelt p0.b, x28, x27\n"
- "cmp x23, x20\n"
- "csel x20, x23, x20, LT\n"
- "lsr x21, x20, #0x1\n"
+ "cntw x19\n"
+ "whilelt p0.b, x27, x26\n"
+ "cmp x22, x19\n"
+ "csel x19, x22, x19, LT\n"
+ "lsr x20, x19, #0x1\n"
"mov x12, #0x0\n"
- "and x20, x20, #0x1\n"
- "cbz x21, 17f\n"
+ "and x19, x19, #0x1\n"
+ "cbz x20, 17f\n"
"16:" // Store to output array: Accumulator row 0 loop
".inst 0xc086001a // mova { z26.s-z27.s }, za0h.s[x12, 0:1]\n"
".inst 0xc086005c // mova { z28.s-z29.s }, za1h.s[x12, 0:1]\n"
@@ -317,7 +317,7 @@ void sme2_interleaved_nomerge_s8q_mopa_1VLx4VL(const int8_t *const A, const int8
".inst 0xc1ada41c // sqdmulh { z28.s-z29.s }, { z28.s-z29.s }, z13.s\n"
".inst 0xc1aea416 // sqdmulh { z22.s-z23.s }, { z22.s-z23.s }, z14.s\n"
"add x12, x12, #0x2\n"
- "cmp x12, x21, LSL #1\n"
+ "cmp x12, x20, LSL #1\n"
".inst 0xc1afa410 // sqdmulh { z16.s-z17.s }, { z16.s-z17.s }, z15.s\n"
".inst 0xc1a4a23a // srshl { z26.s-z27.s }, { z26.s-z27.s }, z4.s\n"
".inst 0xc1a5a23c // srshl { z28.s-z29.s }, { z28.s-z29.s }, z5.s\n"
@@ -336,14 +336,14 @@ void sme2_interleaved_nomerge_s8q_mopa_1VLx4VL(const int8_t *const A, const int8
"uzp1 z18.b, z27.b, z29.b\n"
"uzp1 z17.b, z23.b, z17.b\n"
"uzp1 z16.b, z19.b, z16.b\n"
- "st1b { z16.b }, p0, [x24]\n"
- "add x24, x24, x22\n"
+ "st1b { z16.b }, p0, [x23]\n"
+ "add x23, x23, x21\n"
"uzp1 z16.b, z18.b, z17.b\n"
- "st1b { z16.b }, p0, [x24]\n"
- "add x24, x24, x22\n"
+ "st1b { z16.b }, p0, [x23]\n"
+ "add x23, x23, x21\n"
"blt 16b\n"
"17:" // Store to output array: Accumulator row 0 oddments
- "cbz x20, 18f\n"
+ "cbz x19, 18f\n"
".inst 0xc0860002 // mova { z2.s-z3.s }, za0h.s[x12, 0:1]\n"
".inst 0xc0860058 // mova { z24.s-z25.s }, za1h.s[x12, 0:1]\n"
".inst 0xc1aca402 // sqdmulh { z2.s-z3.s }, { z2.s-z3.s }, z12.s\n"
@@ -367,38 +367,38 @@ void sme2_interleaved_nomerge_s8q_mopa_1VLx4VL(const int8_t *const A, const int8
".inst 0xc1b4c6aa // sclamp { z10.s-z11.s }, z21.s, z20.s\n"
"uzp1 z16.b, z16.b, z10.b\n"
"uzp1 z16.b, z23.b, z16.b\n"
- "st1b { z16.b }, p0, [x24]\n"
+ "st1b { z16.b }, p0, [x23]\n"
"18:" // Store to output array: Accumulator row 0 oddments: End
"19:" // Store to output array: End
- "tbz x14, #0, 21f\n"
+ "tbz x13, #0, 21f\n"
"mov x12, #0x0\n"
- "cntw x20\n"
+ "cntw x19\n"
"20:" // Store to output array: Refill accumulators: Loop
- ".inst 0xa040c5b0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x13]\n"
+ ".inst 0xa040c570 // ld1w { z16.s-z19.s }, pn9.b/Z, [x11]\n"
".inst 0xc0840600 // mova za0h.s[x12], { z16.s-z19.s }\n"
- ".inst 0xa041c5ac // ld1w { z12.s-z15.s }, pn9.b/Z, [x13, #0x4, MUL VL]\n"
+ ".inst 0xa041c56c // ld1w { z12.s-z15.s }, pn9.b/Z, [x11, #0x4, MUL VL]\n"
".inst 0xc0840581 // mova za1h.s[x12], { z12.s-z15.s }\n"
- ".inst 0xa042c5b0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x13, #0x8, MUL VL]\n"
+ ".inst 0xa042c570 // ld1w { z16.s-z19.s }, pn9.b/Z, [x11, #0x8, MUL VL]\n"
".inst 0xc0840602 // mova za2h.s[x12], { z16.s-z19.s }\n"
- ".inst 0xa043c5ac // ld1w { z12.s-z15.s }, pn9.b/Z, [x13, #0xc, MUL VL]\n"
+ ".inst 0xa043c56c // ld1w { z12.s-z15.s }, pn9.b/Z, [x11, #0xc, MUL VL]\n"
".inst 0xc0840583 // mova za3h.s[x12], { z12.s-z15.s }\n"
"add x12, x12, #0x4\n"
- "cmp x12, x20\n"
- "addvl x13, x13, #16\n"
+ "cmp x12, x19\n"
+ "addvl x11, x11, #16\n"
"blt 20b\n"
"21:" // End block
- "incw x28, ALL, MUL #4\n"
- "cmp x28, x27\n"
+ "incw x27, ALL, MUL #4\n"
+ "cmp x27, x26\n"
"blt 3b\n"
- "incw x9\n"
- "cmp x9, x10\n"
- "mov x28, #0x0\n"
- "mov x26, x25\n"
+ "incw x28\n"
+ "cmp x28, x9\n"
+ "mov x27, #0x0\n"
+ "mov x25, x24\n"
"blt 3b\n"
".inst 0xd503467f // SMSTOP\n"
:
: [args] "r" (&args), [offsetof_A] "I" (offsetof(KernelArgs, A)), [offsetof_B] "I" (offsetof(KernelArgs, B)), [offsetof_C] "I" (offsetof(KernelArgs, C)), [offsetof_K] "I" (offsetof(KernelArgs, K)), [offsetof_M] "I" (offsetof(KernelArgs, M)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_Requantize32_c_offset] "I" (offsetof(Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(Requantize32, minval)), [offsetof_Requantize32_per_channel_muls] "I" (offsetof(Requantize32, per_channel_muls)), [offsetof_Requantize32_per_channel_right_shifts] "I" (offsetof(Requantize32, per_channel_right_shifts)), [offsetof_Requantize32_per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [offsetof_Requantize32_per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [offsetof_accumulator_buffer] "I" (offsetof(KernelArgs, accumulator_buffer)), [offsetof_bias] "I" (offsetof(KernelArgs, bias)), [offsetof_flags] "I" (offsetof(KernelArgs, flags)), [offsetof_kstride_bytes] "I" (offsetof(KernelArgs, kstride_bytes)), [offsetof_ldcb] "I" (offsetof(KernelArgs, ldcb)), [offsetof_n_0] "I" (offsetof(KernelArgs, n_0)), [rq] "r" (&rq)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8q_mopa_2VLx2VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8q_mopa_2VLx2VL/generic.cpp
index 0b642818e2..e565699af5 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8q_mopa_2VLx2VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8q_mopa_2VLx2VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef __ARM_FEATURE_SVE
#ifdef ARM_COMPUTE_ENABLE_SME2
@@ -90,107 +90,107 @@ void sme2_interleaved_nomerge_s8q_mopa_2VLx2VL(const int8_t *const A, const int8
KernelArgs args(A, B, C, ldc, M, N, K, bias, rq, n_0, accumulate, accumulator_buffer);
__asm__ __volatile__(
- "ldr x16, [%x[args], %[offsetof_flags]]\n"
+ "ldr x15, [%x[args], %[offsetof_flags]]\n"
".inst 0xd503477f // SMSTART ZA\n"
"ptrue p1.b\n"
".inst 0x25207811 // ptrue pn9.b\n"
- "ldr x15, [%x[args], %[offsetof_accumulator_buffer]]\n"
"ldr x14, [%x[args], %[offsetof_accumulator_buffer]]\n"
- "tbz x16, #0, 2f\n"
+ "ldr x13, [%x[args], %[offsetof_accumulator_buffer]]\n"
+ "tbz x15, #0, 2f\n"
"mov x12, #0x0\n"
- "cntw x20\n"
+ "cntw x19\n"
"1:" // Initial accumulator load from buffer: Loop
- ".inst 0xa040c5e0 // ld1w { z0.s-z3.s }, pn9.b/Z, [x15]\n"
+ ".inst 0xa040c5c0 // ld1w { z0.s-z3.s }, pn9.b/Z, [x14]\n"
".inst 0xc0840400 // mova za0h.s[x12], { z0.s-z3.s }\n"
- ".inst 0xa041c5ec // ld1w { z12.s-z15.s }, pn9.b/Z, [x15, #0x4, MUL VL]\n"
+ ".inst 0xa041c5cc // ld1w { z12.s-z15.s }, pn9.b/Z, [x14, #0x4, MUL VL]\n"
".inst 0xc0840581 // mova za1h.s[x12], { z12.s-z15.s }\n"
- ".inst 0xa042c5e0 // ld1w { z0.s-z3.s }, pn9.b/Z, [x15, #0x8, MUL VL]\n"
+ ".inst 0xa042c5c0 // ld1w { z0.s-z3.s }, pn9.b/Z, [x14, #0x8, MUL VL]\n"
".inst 0xc0840402 // mova za2h.s[x12], { z0.s-z3.s }\n"
- ".inst 0xa043c5fc // ld1w { z28.s-z31.s }, pn9.b/Z, [x15, #0xc, MUL VL]\n"
+ ".inst 0xa043c5dc // ld1w { z28.s-z31.s }, pn9.b/Z, [x14, #0xc, MUL VL]\n"
".inst 0xc0840783 // mova za3h.s[x12], { z28.s-z31.s }\n"
"add x12, x12, #0x4\n"
- "cmp x12, x20\n"
- "addvl x15, x15, #16\n"
+ "cmp x12, x19\n"
+ "addvl x14, x14, #16\n"
"blt 1b\n"
"2:" // Initial accumulator load from buffer: End
- "ldr w13, [%x[args], %[offsetof_M]]\n"
- "mov x11, #0x0\n"
+ "ldr w11, [%x[args], %[offsetof_M]]\n"
"mov x10, #0x0\n"
- "ldr w9, [%x[args], %[offsetof_N]]\n"
- "ldr x28, [%x[args], %[offsetof_A]]\n"
+ "mov x9, #0x0\n"
+ "ldr w28, [%x[args], %[offsetof_N]]\n"
+ "ldr x27, [%x[args], %[offsetof_A]]\n"
"3:" // M and N loop
- "mov x27, x28\n"
- ".inst 0x25a94550 // whilelt pn8.s, x10, x9, VLx2\n"
- "tbnz x16, #0, 4f\n"
- "ldr x20, [%x[args], %[offsetof_bias]]\n"
+ "mov x26, x27\n"
+ ".inst 0x25bc4530 // whilelt pn8.s, x9, x28, VLx2\n"
+ "tbnz x15, #0, 4f\n"
+ "ldr x19, [%x[args], %[offsetof_bias]]\n"
".inst 0xc00800ff // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
- "cbz x20, 5f\n"
- ".inst 0xa00a4295 // ldnt1w { z20.s-z21.s }, p8/Z, [x20, x10, LSL #2]\n"
+ "cbz x19, 5f\n"
+ ".inst 0xa0094275 // ldnt1w { z20.s-z21.s }, p8/Z, [x19, x9, LSL #2]\n"
".inst 0xc0902680 // addha za0.s, p1/M, p1/M, z20.s\n"
".inst 0xc09026a1 // addha za1.s, p1/M, p1/M, z21.s\n"
".inst 0xc0902682 // addha za2.s, p1/M, p1/M, z20.s\n"
".inst 0xc09026a3 // addha za3.s, p1/M, p1/M, z21.s\n"
"4:" // Prepare accumulators: Test for last block
+ "mov x19, x9\n"
"mov x20, x10\n"
- "mov x21, x11\n"
+ "incw x19, ALL, MUL #2\n"
"incw x20, ALL, MUL #2\n"
- "incw x21, ALL, MUL #2\n"
- "cmp x20, x9\n"
- "csel x21, x11, x21, LT\n"
- "mov x20, x16\n"
- "bfm x16, XZR, #0x0, #0x0 // bfc x16, #0x0, #0x1\n"
- "cmp x21, x13\n"
- "csel x16, x20, x16, LT\n"
+ "cmp x19, x28\n"
+ "csel x20, x10, x20, LT\n"
+ "mov x19, x15\n"
+ "bfm x15, XZR, #0x0, #0x0 // bfc x15, #0x0, #0x1\n"
+ "cmp x20, x11\n"
+ "csel x15, x19, x15, LT\n"
"5:" // Prepare accumulators: End
- "ldr x20, [%x[args], %[offsetof_K]]\n"
- "add x20, x20, #0x3\n"
- "lsr x20, x20, #0x2\n"
- "ldr x23, [%x[args], %[offsetof_B]]\n"
- "lsr x22, x20, #0x2\n"
- "and x21, x20, #0x3\n"
- "ldr x20, [%x[args], %[offsetof_kstride_bytes]]\n"
- "madd x23, x10, x20, x23\n" // bptr = B + n * kstride_bytes
- "cbz x22, 8f\n"
- "subs x22, x22, #0x1\n"
- ".inst 0xa040077e // ld1b { z30.b-z31.b }, pn9.b/Z, [x27]\n"
- ".inst 0xa04006f1 // ldnt1b { z16.b-z17.b }, pn9.b/Z, [x23]\n"
- ".inst 0xa041076e // ld1b { z14.b-z15.b }, pn9.b/Z, [x27, #0x2, MUL VL]\n"
- ".inst 0xa04106e9 // ldnt1b { z8.b-z9.b }, pn9.b/Z, [x23, #0x2, MUL VL]\n"
- ".inst 0xa0420760 // ld1b { z0.b-z1.b }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
- ".inst 0xa14206fc // ldnt1b { z20.b, z28.b }, pn9.b/Z, [x23, #0x4, MUL VL]\n"
- ".inst 0xa0430764 // ld1b { z4.b-z5.b }, pn9.b/Z, [x27, #0x6, MUL VL]\n"
- "addvl x27, x27, #8\n"
- ".inst 0xa14306ea // ldnt1b { z2.b, z10.b }, pn9.b/Z, [x23, #0x6, MUL VL]\n"
- "addvl x23, x23, #8\n"
+ "ldr x19, [%x[args], %[offsetof_K]]\n"
+ "add x19, x19, #0x3\n"
+ "lsr x19, x19, #0x2\n"
+ "ldr x22, [%x[args], %[offsetof_B]]\n"
+ "lsr x21, x19, #0x2\n"
+ "and x20, x19, #0x3\n"
+ "ldr x19, [%x[args], %[offsetof_kstride_bytes]]\n"
+ "madd x22, x9, x19, x22\n" // bptr = B + n * kstride_bytes
+ "cbz x21, 8f\n"
+ "subs x21, x21, #0x1\n"
+ ".inst 0xa040075e // ld1b { z30.b-z31.b }, pn9.b/Z, [x26]\n"
+ ".inst 0xa04006d1 // ldnt1b { z16.b-z17.b }, pn9.b/Z, [x22]\n"
+ ".inst 0xa041074e // ld1b { z14.b-z15.b }, pn9.b/Z, [x26, #0x2, MUL VL]\n"
+ ".inst 0xa04106c9 // ldnt1b { z8.b-z9.b }, pn9.b/Z, [x22, #0x2, MUL VL]\n"
+ ".inst 0xa0420740 // ld1b { z0.b-z1.b }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa14206dc // ldnt1b { z20.b, z28.b }, pn9.b/Z, [x22, #0x4, MUL VL]\n"
+ ".inst 0xa0430744 // ld1b { z4.b-z5.b }, pn9.b/Z, [x26, #0x6, MUL VL]\n"
+ "addvl x26, x26, #8\n"
+ ".inst 0xa14306ca // ldnt1b { z2.b, z10.b }, pn9.b/Z, [x22, #0x6, MUL VL]\n"
+ "addvl x22, x22, #8\n"
"ble 7f\n"
"6:" // K loop
".inst 0xa09027c0 // smopa za0.s, p1/M, p1/M, z30.b, z16.b\n"
- "subs x22, x22, #0x1\n"
+ "subs x21, x21, #0x1\n"
".inst 0xa09127c1 // smopa za1.s, p1/M, p1/M, z30.b, z17.b\n"
".inst 0xa09027e2 // smopa za2.s, p1/M, p1/M, z31.b, z16.b\n"
".inst 0xa09127e3 // smopa za3.s, p1/M, p1/M, z31.b, z17.b\n"
- ".inst 0xa040077e // ld1b { z30.b-z31.b }, pn9.b/Z, [x27]\n"
+ ".inst 0xa040075e // ld1b { z30.b-z31.b }, pn9.b/Z, [x26]\n"
".inst 0xa08825c0 // smopa za0.s, p1/M, p1/M, z14.b, z8.b\n"
- ".inst 0xa04006f1 // ldnt1b { z16.b-z17.b }, pn9.b/Z, [x23]\n"
+ ".inst 0xa04006d1 // ldnt1b { z16.b-z17.b }, pn9.b/Z, [x22]\n"
".inst 0xa08925c1 // smopa za1.s, p1/M, p1/M, z14.b, z9.b\n"
".inst 0xa08825e2 // smopa za2.s, p1/M, p1/M, z15.b, z8.b\n"
".inst 0xa08925e3 // smopa za3.s, p1/M, p1/M, z15.b, z9.b\n"
- ".inst 0xa041076e // ld1b { z14.b-z15.b }, pn9.b/Z, [x27, #0x2, MUL VL]\n"
+ ".inst 0xa041074e // ld1b { z14.b-z15.b }, pn9.b/Z, [x26, #0x2, MUL VL]\n"
".inst 0xa0942400 // smopa za0.s, p1/M, p1/M, z0.b, z20.b\n"
- ".inst 0xa04106e9 // ldnt1b { z8.b-z9.b }, pn9.b/Z, [x23, #0x2, MUL VL]\n"
+ ".inst 0xa04106c9 // ldnt1b { z8.b-z9.b }, pn9.b/Z, [x22, #0x2, MUL VL]\n"
".inst 0xa09c2401 // smopa za1.s, p1/M, p1/M, z0.b, z28.b\n"
".inst 0xa0942422 // smopa za2.s, p1/M, p1/M, z1.b, z20.b\n"
".inst 0xa09c2423 // smopa za3.s, p1/M, p1/M, z1.b, z28.b\n"
- ".inst 0xa0420760 // ld1b { z0.b-z1.b }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
- ".inst 0xa14206fc // ldnt1b { z20.b, z28.b }, pn9.b/Z, [x23, #0x4, MUL VL]\n"
+ ".inst 0xa0420740 // ld1b { z0.b-z1.b }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa14206dc // ldnt1b { z20.b, z28.b }, pn9.b/Z, [x22, #0x4, MUL VL]\n"
".inst 0xa0822480 // smopa za0.s, p1/M, p1/M, z4.b, z2.b\n"
".inst 0xa08a2481 // smopa za1.s, p1/M, p1/M, z4.b, z10.b\n"
".inst 0xa08224a2 // smopa za2.s, p1/M, p1/M, z5.b, z2.b\n"
".inst 0xa08a24a3 // smopa za3.s, p1/M, p1/M, z5.b, z10.b\n"
- ".inst 0xa0430764 // ld1b { z4.b-z5.b }, pn9.b/Z, [x27, #0x6, MUL VL]\n"
- "addvl x27, x27, #8\n"
- ".inst 0xa14306ea // ldnt1b { z2.b, z10.b }, pn9.b/Z, [x23, #0x6, MUL VL]\n"
- "addvl x23, x23, #8\n"
+ ".inst 0xa0430744 // ld1b { z4.b-z5.b }, pn9.b/Z, [x26, #0x6, MUL VL]\n"
+ "addvl x26, x26, #8\n"
+ ".inst 0xa14306ca // ldnt1b { z2.b, z10.b }, pn9.b/Z, [x22, #0x6, MUL VL]\n"
+ "addvl x22, x22, #8\n"
"bgt 6b\n"
"7:" // K loop tail
".inst 0xa09027c0 // smopa za0.s, p1/M, p1/M, z30.b, z16.b\n"
@@ -210,107 +210,107 @@ void sme2_interleaved_nomerge_s8q_mopa_2VLx2VL(const int8_t *const A, const int8
".inst 0xa08224a2 // smopa za2.s, p1/M, p1/M, z5.b, z2.b\n"
".inst 0xa08a24a3 // smopa za3.s, p1/M, p1/M, z5.b, z10.b\n"
"8:" // K oddments
- "cbz x21, 10f\n"
+ "cbz x20, 10f\n"
"9:" // K oddments: Loop
- ".inst 0xa040077e // ld1b { z30.b-z31.b }, pn9.b/Z, [x27]\n"
- "subs x21, x21, #0x1\n"
- "addvl x27, x27, #2\n"
- ".inst 0xa04006f0 // ld1b { z16.b-z17.b }, pn9.b/Z, [x23]\n"
- "addvl x23, x23, #2\n"
+ ".inst 0xa040075e // ld1b { z30.b-z31.b }, pn9.b/Z, [x26]\n"
+ "subs x20, x20, #0x1\n"
+ "addvl x26, x26, #2\n"
+ ".inst 0xa04006d0 // ld1b { z16.b-z17.b }, pn9.b/Z, [x22]\n"
+ "addvl x22, x22, #2\n"
".inst 0xa09027c0 // smopa za0.s, p1/M, p1/M, z30.b, z16.b\n"
".inst 0xa09127c1 // smopa za1.s, p1/M, p1/M, z30.b, z17.b\n"
".inst 0xa09027e2 // smopa za2.s, p1/M, p1/M, z31.b, z16.b\n"
".inst 0xa09127e3 // smopa za3.s, p1/M, p1/M, z31.b, z17.b\n"
"bgt 9b\n"
"10:" // K oddments: End
- ".inst 0xa040476e // ld1w { z14.s-z15.s }, pn9.b/Z, [x27]\n"
- "addvl x27, x27, #2\n"
+ ".inst 0xa040474e // ld1w { z14.s-z15.s }, pn9.b/Z, [x26]\n"
+ "addvl x26, x26, #2\n"
".inst 0xc09125c0 // addva za0.s, p1/M, p1/M, z14.s\n"
".inst 0xc09125c1 // addva za1.s, p1/M, p1/M, z14.s\n"
".inst 0xc09125e2 // addva za2.s, p1/M, p1/M, z15.s\n"
".inst 0xc09125e3 // addva za3.s, p1/M, p1/M, z15.s\n"
- "tbz x16, #1, 14f\n"
- "tbz x16, #0, 12f\n"
+ "tbz x15, #1, 14f\n"
+ "tbz x15, #0, 12f\n"
"mov x12, #0x0\n"
- "cntw x20\n"
+ "cntw x19\n"
"11:" // Store to partial result buffer: Store and refill: Loop
- ".inst 0xa040c5fc // ld1w { z28.s-z31.s }, pn9.b/Z, [x15]\n"
+ ".inst 0xa040c5dc // ld1w { z28.s-z31.s }, pn9.b/Z, [x14]\n"
".inst 0xc0860408 // mova { z8.s-z11.s }, za0h.s[x12]\n"
".inst 0xc0840780 // mova za0h.s[x12], { z28.s-z31.s }\n"
".inst 0xc0860434 // mova { z20.s-z23.s }, za1h.s[x12]\n"
- ".inst 0xa041c5f8 // ld1w { z24.s-z27.s }, pn9.b/Z, [x15, #0x4, MUL VL]\n"
+ ".inst 0xa041c5d8 // ld1w { z24.s-z27.s }, pn9.b/Z, [x14, #0x4, MUL VL]\n"
".inst 0xc0840701 // mova za1h.s[x12], { z24.s-z27.s }\n"
".inst 0xc086045c // mova { z28.s-z31.s }, za2h.s[x12]\n"
".inst 0xc0860470 // mova { z16.s-z19.s }, za3h.s[x12]\n"
- ".inst 0xa042c5f8 // ld1w { z24.s-z27.s }, pn9.b/Z, [x15, #0x8, MUL VL]\n"
+ ".inst 0xa042c5d8 // ld1w { z24.s-z27.s }, pn9.b/Z, [x14, #0x8, MUL VL]\n"
".inst 0xc0840702 // mova za2h.s[x12], { z24.s-z27.s }\n"
- ".inst 0xa043c5ec // ld1w { z12.s-z15.s }, pn9.b/Z, [x15, #0xc, MUL VL]\n"
+ ".inst 0xa043c5cc // ld1w { z12.s-z15.s }, pn9.b/Z, [x14, #0xc, MUL VL]\n"
".inst 0xc0840583 // mova za3h.s[x12], { z12.s-z15.s }\n"
"add x12, x12, #0x4\n"
- "cmp x12, x20\n"
- ".inst 0xa060c5c8 // st1w { z8.s-z11.s }, pn9.b, [x14]\n"
- "addvl x15, x15, #16\n"
- ".inst 0xa061c5d4 // st1w { z20.s-z23.s }, pn9.b, [x14, #0x4, MUL VL]\n"
- ".inst 0xa062c5dc // st1w { z28.s-z31.s }, pn9.b, [x14, #0x8, MUL VL]\n"
- ".inst 0xa063c5d0 // st1w { z16.s-z19.s }, pn9.b, [x14, #0xc, MUL VL]\n"
+ "cmp x12, x19\n"
+ ".inst 0xa060c5a8 // st1w { z8.s-z11.s }, pn9.b, [x13]\n"
"addvl x14, x14, #16\n"
+ ".inst 0xa061c5b4 // st1w { z20.s-z23.s }, pn9.b, [x13, #0x4, MUL VL]\n"
+ ".inst 0xa062c5bc // st1w { z28.s-z31.s }, pn9.b, [x13, #0x8, MUL VL]\n"
+ ".inst 0xa063c5b0 // st1w { z16.s-z19.s }, pn9.b, [x13, #0xc, MUL VL]\n"
+ "addvl x13, x13, #16\n"
"blt 11b\n"
"b 24f\n"
"12:" // Store to partial result buffer: Store only
"mov x12, #0x0\n"
- "cntw x20\n"
+ "cntw x19\n"
"13:" // Store to partial result buffer: Store only: Loop
".inst 0xc0860410 // mova { z16.s-z19.s }, za0h.s[x12]\n"
".inst 0xc0860424 // mova { z4.s-z7.s }, za1h.s[x12]\n"
- ".inst 0xa060c5d0 // st1w { z16.s-z19.s }, pn9.b, [x14]\n"
+ ".inst 0xa060c5b0 // st1w { z16.s-z19.s }, pn9.b, [x13]\n"
".inst 0xc0860448 // mova { z8.s-z11.s }, za2h.s[x12]\n"
".inst 0xc086046c // mova { z12.s-z15.s }, za3h.s[x12]\n"
- ".inst 0xa061c5c4 // st1w { z4.s-z7.s }, pn9.b, [x14, #0x4, MUL VL]\n"
+ ".inst 0xa061c5a4 // st1w { z4.s-z7.s }, pn9.b, [x13, #0x4, MUL VL]\n"
"add x12, x12, #0x4\n"
- "cmp x12, x20\n"
- ".inst 0xa062c5c8 // st1w { z8.s-z11.s }, pn9.b, [x14, #0x8, MUL VL]\n"
- ".inst 0xa063c5cc // st1w { z12.s-z15.s }, pn9.b, [x14, #0xc, MUL VL]\n"
- "addvl x14, x14, #16\n"
+ "cmp x12, x19\n"
+ ".inst 0xa062c5a8 // st1w { z8.s-z11.s }, pn9.b, [x13, #0x8, MUL VL]\n"
+ ".inst 0xa063c5ac // st1w { z12.s-z15.s }, pn9.b, [x13, #0xc, MUL VL]\n"
+ "addvl x13, x13, #16\n"
"blt 13b\n"
"b 24f\n"
"14:" // Store to output array
- "ldr x26, [%x[args], %[offsetof_C]]\n"
- "add x26, x26, x10\n" // C += n
- "sub x25, x13, x11\n"
+ "ldr x25, [%x[args], %[offsetof_C]]\n"
+ "add x25, x25, x9\n" // C += n
+ "sub x24, x11, x10\n"
"ld1rw { z2.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_per_layer_mul]]\n"
- "ldr x24, [%x[args], %[offsetof_ldcb]]\n"
- "madd x26, x11, x24, x26\n" // C += m * ldc
+ "ldr x23, [%x[args], %[offsetof_ldcb]]\n"
+ "madd x25, x10, x23, x25\n" // C += m * ldc
"ld1rw { z3.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_per_layer_mul]]\n"
"ld1rw { z0.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_per_layer_right_shift]]\n"
"ld1rw { z1.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_per_layer_right_shift]]\n"
"ld1rw { z11.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_c_offset]]\n"
"ld1rw { z25.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_minval]]\n"
"ld1rw { z24.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_maxval]]\n"
- "tbz x16, #2, 15f\n"
- "ldr w21, [%x[args], %[offsetof_n_0]]\n"
- "add x21, x21, x10\n"
- "ldr x20, [%x[rq], %[offsetof_Requantize32_per_channel_muls]]\n"
- "add x20, x20, x21, LSL #2\n"
- ".inst 0xa0404282 // ld1w { z2.s-z3.s }, p8/Z, [x20]\n"
- "ldr x20, [%x[rq], %[offsetof_Requantize32_per_channel_right_shifts]]\n"
- "add x20, x20, x21, LSL #2\n"
- ".inst 0xa0404280 // ld1w { z0.s-z1.s }, p8/Z, [x20]\n"
+ "tbz x15, #2, 15f\n"
+ "ldr w20, [%x[args], %[offsetof_n_0]]\n"
+ "add x20, x20, x9\n"
+ "ldr x19, [%x[rq], %[offsetof_Requantize32_per_channel_muls]]\n"
+ "add x19, x19, x20, LSL #2\n"
+ ".inst 0xa0404262 // ld1w { z2.s-z3.s }, p8/Z, [x19]\n"
+ "ldr x19, [%x[rq], %[offsetof_Requantize32_per_channel_right_shifts]]\n"
+ "add x19, x19, x20, LSL #2\n"
+ ".inst 0xa0404260 // ld1w { z0.s-z1.s }, p8/Z, [x19]\n"
"15:" // Store to output array: Load per-channel parameters: End
- "cntw x23\n"
- "whilelt p0.h, x10, x9\n"
- "cmp x25, x23\n"
- "csel x22, x25, x23, LT\n"
- "lsr x21, x22, #0x2\n"
+ "cntw x22\n"
+ "whilelt p0.h, x9, x28\n"
+ "cmp x24, x22\n"
+ "csel x21, x24, x22, LT\n"
+ "lsr x20, x21, #0x2\n"
"mov x12, #0x0\n"
- "and x20, x22, #0x3\n"
- "cbz x21, 17f\n"
+ "and x19, x21, #0x3\n"
+ "cbz x20, 17f\n"
"16:" // Store to output array: Accumulator row 0 loop
".inst 0xc086040c // mova { z12.s-z15.s }, za0h.s[x12]\n"
".inst 0xc086043c // mova { z28.s-z31.s }, za1h.s[x12]\n"
".inst 0xc1a2ac0c // sqdmulh { z12.s-z15.s }, { z12.s-z15.s }, z2.s\n"
".inst 0xc1a3ac1c // sqdmulh { z28.s-z31.s }, { z28.s-z31.s }, z3.s\n"
"add x12, x12, #0x4\n"
- "cmp x12, x21, LSL #2\n"
+ "cmp x12, x20, LSL #2\n"
".inst 0xc1a0aa2c // srshl { z12.s-z15.s }, { z12.s-z15.s }, z0.s\n"
".inst 0xc1a1aa3c // srshl { z28.s-z31.s }, { z28.s-z31.s }, z1.s\n"
".inst 0xc1abab0c // add { z12.s-z15.s }, { z12.s-z15.s }, z11.s\n"
@@ -318,25 +318,25 @@ void sme2_interleaved_nomerge_s8q_mopa_2VLx2VL(const int8_t *const A, const int8
".inst 0xc1b8cf2c // sclamp { z12.s-z15.s }, z25.s, z24.s\n"
".inst 0xc1b8cf3c // sclamp { z28.s-z31.s }, z25.s, z24.s\n"
"uzp1 z16.h, z12.h, z28.h\n"
- "st1b { z16.h }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1b { z16.h }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"uzp1 z16.h, z13.h, z29.h\n"
"uzp1 z17.h, z14.h, z30.h\n"
- "st1b { z16.h }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1b { z16.h }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"uzp1 z16.h, z15.h, z31.h\n"
- "st1b { z17.h }, p0, [x26]\n"
- "add x26, x26, x24\n"
- "st1b { z16.h }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1b { z17.h }, p0, [x25]\n"
+ "add x25, x25, x23\n"
+ "st1b { z16.h }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"blt 16b\n"
"17:" // Store to output array: Accumulator row 0 oddments
- "cbz x20, 18f\n"
+ "cbz x19, 18f\n"
".inst 0xc086041c // mova { z28.s-z31.s }, za0h.s[x12]\n"
".inst 0xc086042c // mova { z12.s-z15.s }, za1h.s[x12]\n"
".inst 0xc1a2ac1c // sqdmulh { z28.s-z31.s }, { z28.s-z31.s }, z2.s\n"
".inst 0xc1a3ac0c // sqdmulh { z12.s-z15.s }, { z12.s-z15.s }, z3.s\n"
- "subs x20, x20, #0x1\n"
+ "subs x19, x19, #0x1\n"
".inst 0xc1a0aa3c // srshl { z28.s-z31.s }, { z28.s-z31.s }, z0.s\n"
".inst 0xc1a1aa2c // srshl { z12.s-z15.s }, { z12.s-z15.s }, z1.s\n"
".inst 0xc1abab1c // add { z28.s-z31.s }, { z28.s-z31.s }, z11.s\n"
@@ -344,34 +344,34 @@ void sme2_interleaved_nomerge_s8q_mopa_2VLx2VL(const int8_t *const A, const int8
".inst 0xc1b8cf3c // sclamp { z28.s-z31.s }, z25.s, z24.s\n"
".inst 0xc1b8cf2c // sclamp { z12.s-z15.s }, z25.s, z24.s\n"
"uzp1 z16.h, z28.h, z12.h\n"
- "st1b { z16.h }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1b { z16.h }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"beq 18f\n"
- "subs x20, x20, #0x1\n"
+ "subs x19, x19, #0x1\n"
"uzp1 z16.h, z29.h, z13.h\n"
- "st1b { z16.h }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1b { z16.h }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"beq 18f\n"
"uzp1 z16.h, z30.h, z14.h\n"
- "st1b { z16.h }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1b { z16.h }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"18:" // Store to output array: Accumulator row 0 oddments: End
- "subs x25, x25, x22\n"
+ "subs x24, x24, x21\n"
"beq 22f\n"
- "whilelt p0.h, x10, x9\n"
- "cmp x25, x23\n"
- "csel x20, x25, x23, LT\n"
- "lsr x21, x20, #0x2\n"
+ "whilelt p0.h, x9, x28\n"
+ "cmp x24, x22\n"
+ "csel x19, x24, x22, LT\n"
+ "lsr x20, x19, #0x2\n"
"mov x12, #0x0\n"
- "and x20, x20, #0x3\n"
- "cbz x21, 20f\n"
+ "and x19, x19, #0x3\n"
+ "cbz x20, 20f\n"
"19:" // Store to output array: Accumulator row 1 loop
".inst 0xc0860444 // mova { z4.s-z7.s }, za2h.s[x12]\n"
".inst 0xc0860470 // mova { z16.s-z19.s }, za3h.s[x12]\n"
".inst 0xc1a2ac04 // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z2.s\n"
".inst 0xc1a3ac10 // sqdmulh { z16.s-z19.s }, { z16.s-z19.s }, z3.s\n"
"add x12, x12, #0x4\n"
- "cmp x12, x21, LSL #2\n"
+ "cmp x12, x20, LSL #2\n"
".inst 0xc1a0aa24 // srshl { z4.s-z7.s }, { z4.s-z7.s }, z0.s\n"
".inst 0xc1a1aa30 // srshl { z16.s-z19.s }, { z16.s-z19.s }, z1.s\n"
".inst 0xc1abab04 // add { z4.s-z7.s }, { z4.s-z7.s }, z11.s\n"
@@ -379,25 +379,25 @@ void sme2_interleaved_nomerge_s8q_mopa_2VLx2VL(const int8_t *const A, const int8
".inst 0xc1b8cf24 // sclamp { z4.s-z7.s }, z25.s, z24.s\n"
".inst 0xc1b8cf30 // sclamp { z16.s-z19.s }, z25.s, z24.s\n"
"uzp1 z16.h, z4.h, z16.h\n"
- "st1b { z16.h }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1b { z16.h }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"uzp1 z16.h, z5.h, z17.h\n"
"uzp1 z17.h, z6.h, z18.h\n"
- "st1b { z16.h }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1b { z16.h }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"uzp1 z16.h, z7.h, z19.h\n"
- "st1b { z17.h }, p0, [x26]\n"
- "add x26, x26, x24\n"
- "st1b { z16.h }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1b { z17.h }, p0, [x25]\n"
+ "add x25, x25, x23\n"
+ "st1b { z16.h }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"blt 19b\n"
"20:" // Store to output array: Accumulator row 1 oddments
- "cbz x20, 21f\n"
+ "cbz x19, 21f\n"
".inst 0xc0860454 // mova { z20.s-z23.s }, za2h.s[x12]\n"
".inst 0xc0860470 // mova { z16.s-z19.s }, za3h.s[x12]\n"
".inst 0xc1a2ac14 // sqdmulh { z20.s-z23.s }, { z20.s-z23.s }, z2.s\n"
".inst 0xc1a3ac10 // sqdmulh { z16.s-z19.s }, { z16.s-z19.s }, z3.s\n"
- "subs x20, x20, #0x1\n"
+ "subs x19, x19, #0x1\n"
".inst 0xc1a0aa34 // srshl { z20.s-z23.s }, { z20.s-z23.s }, z0.s\n"
".inst 0xc1a1aa30 // srshl { z16.s-z19.s }, { z16.s-z19.s }, z1.s\n"
".inst 0xc1abab14 // add { z20.s-z23.s }, { z20.s-z23.s }, z11.s\n"
@@ -405,47 +405,47 @@ void sme2_interleaved_nomerge_s8q_mopa_2VLx2VL(const int8_t *const A, const int8
".inst 0xc1b8cf34 // sclamp { z20.s-z23.s }, z25.s, z24.s\n"
".inst 0xc1b8cf30 // sclamp { z16.s-z19.s }, z25.s, z24.s\n"
"uzp1 z16.h, z20.h, z16.h\n"
- "st1b { z16.h }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1b { z16.h }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"beq 21f\n"
- "subs x20, x20, #0x1\n"
+ "subs x19, x19, #0x1\n"
"uzp1 z16.h, z21.h, z17.h\n"
- "st1b { z16.h }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1b { z16.h }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"beq 21f\n"
"uzp1 z16.h, z22.h, z18.h\n"
- "st1b { z16.h }, p0, [x26]\n"
+ "st1b { z16.h }, p0, [x25]\n"
"21:" // Store to output array: Accumulator row 1 oddments: End
"22:" // Store to output array: End
- "tbz x16, #0, 24f\n"
+ "tbz x15, #0, 24f\n"
"mov x12, #0x0\n"
- "cntw x20\n"
+ "cntw x19\n"
"23:" // Store to output array: Refill accumulators: Loop
- ".inst 0xa040c5f0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x15]\n"
+ ".inst 0xa040c5d0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x14]\n"
".inst 0xc0840600 // mova za0h.s[x12], { z16.s-z19.s }\n"
- ".inst 0xa041c5f0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x15, #0x4, MUL VL]\n"
+ ".inst 0xa041c5d0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x14, #0x4, MUL VL]\n"
".inst 0xc0840601 // mova za1h.s[x12], { z16.s-z19.s }\n"
- ".inst 0xa042c5f0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x15, #0x8, MUL VL]\n"
+ ".inst 0xa042c5d0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x14, #0x8, MUL VL]\n"
".inst 0xc0840602 // mova za2h.s[x12], { z16.s-z19.s }\n"
- ".inst 0xa043c5e4 // ld1w { z4.s-z7.s }, pn9.b/Z, [x15, #0xc, MUL VL]\n"
+ ".inst 0xa043c5c4 // ld1w { z4.s-z7.s }, pn9.b/Z, [x14, #0xc, MUL VL]\n"
".inst 0xc0840483 // mova za3h.s[x12], { z4.s-z7.s }\n"
"add x12, x12, #0x4\n"
- "cmp x12, x20\n"
- "addvl x15, x15, #16\n"
+ "cmp x12, x19\n"
+ "addvl x14, x14, #16\n"
"blt 23b\n"
"24:" // End block
- "incw x10, ALL, MUL #2\n"
- "cmp x10, x9\n"
+ "incw x9, ALL, MUL #2\n"
+ "cmp x9, x28\n"
"blt 3b\n"
- "incw x11, ALL, MUL #2\n"
- "cmp x11, x13\n"
- "mov x10, #0x0\n"
- "mov x28, x27\n"
+ "incw x10, ALL, MUL #2\n"
+ "cmp x10, x11\n"
+ "mov x9, #0x0\n"
+ "mov x27, x26\n"
"blt 3b\n"
".inst 0xd503467f // SMSTOP\n"
:
: [args] "r" (&args), [offsetof_A] "I" (offsetof(KernelArgs, A)), [offsetof_B] "I" (offsetof(KernelArgs, B)), [offsetof_C] "I" (offsetof(KernelArgs, C)), [offsetof_K] "I" (offsetof(KernelArgs, K)), [offsetof_M] "I" (offsetof(KernelArgs, M)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_Requantize32_c_offset] "I" (offsetof(Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(Requantize32, minval)), [offsetof_Requantize32_per_channel_muls] "I" (offsetof(Requantize32, per_channel_muls)), [offsetof_Requantize32_per_channel_right_shifts] "I" (offsetof(Requantize32, per_channel_right_shifts)), [offsetof_Requantize32_per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [offsetof_Requantize32_per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [offsetof_accumulator_buffer] "I" (offsetof(KernelArgs, accumulator_buffer)), [offsetof_bias] "I" (offsetof(KernelArgs, bias)), [offsetof_flags] "I" (offsetof(KernelArgs, flags)), [offsetof_kstride_bytes] "I" (offsetof(KernelArgs, kstride_bytes)), [offsetof_ldcb] "I" (offsetof(KernelArgs, ldcb)), [offsetof_n_0] "I" (offsetof(KernelArgs, n_0)), [rq] "r" (&rq)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8q_mopa_4VLx1VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8q_mopa_4VLx1VL/generic.cpp
index 0d0e3da224..a738a10418 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8q_mopa_4VLx1VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8q_mopa_4VLx1VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef __ARM_FEATURE_SVE
#ifdef ARM_COMPUTE_ENABLE_SME2
@@ -90,107 +90,107 @@ void sme2_interleaved_nomerge_s8q_mopa_4VLx1VL(const int8_t *const A, const int8
KernelArgs args(A, B, C, ldc, M, N, K, bias, rq, n_0, accumulate, accumulator_buffer);
__asm__ __volatile__(
- "ldr x16, [%x[args], %[offsetof_flags]]\n"
+ "ldr x15, [%x[args], %[offsetof_flags]]\n"
".inst 0xd503477f // SMSTART ZA\n"
"ptrue p1.b\n"
".inst 0x25207810 // ptrue pn8.b\n"
- "ldr x15, [%x[args], %[offsetof_accumulator_buffer]]\n"
"ldr x14, [%x[args], %[offsetof_accumulator_buffer]]\n"
- "tbz x16, #0, 2f\n"
+ "ldr x13, [%x[args], %[offsetof_accumulator_buffer]]\n"
+ "tbz x15, #0, 2f\n"
"mov x12, #0x0\n"
- "cntw x20\n"
+ "cntw x19\n"
"1:" // Initial accumulator load from buffer: Loop
- ".inst 0xa040c1fc // ld1w { z28.s-z31.s }, pn8.b/Z, [x15]\n"
+ ".inst 0xa040c1dc // ld1w { z28.s-z31.s }, pn8.b/Z, [x14]\n"
".inst 0xc0840780 // mova za0h.s[x12], { z28.s-z31.s }\n"
- ".inst 0xa041c1ec // ld1w { z12.s-z15.s }, pn8.b/Z, [x15, #0x4, MUL VL]\n"
+ ".inst 0xa041c1cc // ld1w { z12.s-z15.s }, pn8.b/Z, [x14, #0x4, MUL VL]\n"
".inst 0xc0840581 // mova za1h.s[x12], { z12.s-z15.s }\n"
- ".inst 0xa042c1f4 // ld1w { z20.s-z23.s }, pn8.b/Z, [x15, #0x8, MUL VL]\n"
+ ".inst 0xa042c1d4 // ld1w { z20.s-z23.s }, pn8.b/Z, [x14, #0x8, MUL VL]\n"
".inst 0xc0840682 // mova za2h.s[x12], { z20.s-z23.s }\n"
- ".inst 0xa043c1f8 // ld1w { z24.s-z27.s }, pn8.b/Z, [x15, #0xc, MUL VL]\n"
+ ".inst 0xa043c1d8 // ld1w { z24.s-z27.s }, pn8.b/Z, [x14, #0xc, MUL VL]\n"
".inst 0xc0840703 // mova za3h.s[x12], { z24.s-z27.s }\n"
"add x12, x12, #0x4\n"
- "cmp x12, x20\n"
- "addvl x15, x15, #16\n"
+ "cmp x12, x19\n"
+ "addvl x14, x14, #16\n"
"blt 1b\n"
"2:" // Initial accumulator load from buffer: End
- "ldr w13, [%x[args], %[offsetof_M]]\n"
- "mov x11, #0x0\n"
+ "ldr w11, [%x[args], %[offsetof_M]]\n"
"mov x10, #0x0\n"
- "ldr w9, [%x[args], %[offsetof_N]]\n"
- "ldr x28, [%x[args], %[offsetof_A]]\n"
+ "mov x9, #0x0\n"
+ "ldr w28, [%x[args], %[offsetof_N]]\n"
+ "ldr x27, [%x[args], %[offsetof_A]]\n"
"3:" // M and N loop
- "mov x27, x28\n"
- "whilelt p0.s, x10, x9\n"
- "tbnz x16, #0, 4f\n"
- "ldr x20, [%x[args], %[offsetof_bias]]\n"
+ "mov x26, x27\n"
+ "whilelt p0.s, x9, x28\n"
+ "tbnz x15, #0, 4f\n"
+ "ldr x19, [%x[args], %[offsetof_bias]]\n"
".inst 0xc00800ff // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
- "cbz x20, 5f\n"
- "ldnt1w { z15.s }, p0/Z, [x20, x10, LSL #2]\n"
+ "cbz x19, 5f\n"
+ "ldnt1w { z15.s }, p0/Z, [x19, x9, LSL #2]\n"
".inst 0xc09025e0 // addha za0.s, p1/M, p1/M, z15.s\n"
".inst 0xc09025e1 // addha za1.s, p1/M, p1/M, z15.s\n"
".inst 0xc09025e2 // addha za2.s, p1/M, p1/M, z15.s\n"
".inst 0xc09025e3 // addha za3.s, p1/M, p1/M, z15.s\n"
"4:" // Prepare accumulators: Test for last block
+ "mov x19, x9\n"
"mov x20, x10\n"
- "mov x21, x11\n"
- "incw x20\n"
- "incw x21, ALL, MUL #4\n"
- "cmp x20, x9\n"
- "csel x21, x11, x21, LT\n"
- "mov x20, x16\n"
- "bfm x16, XZR, #0x0, #0x0 // bfc x16, #0x0, #0x1\n"
- "cmp x21, x13\n"
- "csel x16, x20, x16, LT\n"
+ "incw x19\n"
+ "incw x20, ALL, MUL #4\n"
+ "cmp x19, x28\n"
+ "csel x20, x10, x20, LT\n"
+ "mov x19, x15\n"
+ "bfm x15, XZR, #0x0, #0x0 // bfc x15, #0x0, #0x1\n"
+ "cmp x20, x11\n"
+ "csel x15, x19, x15, LT\n"
"5:" // Prepare accumulators: End
- "ldr x20, [%x[args], %[offsetof_K]]\n"
- "add x20, x20, #0x3\n"
- "lsr x20, x20, #0x2\n"
- "ldr x23, [%x[args], %[offsetof_B]]\n"
- "lsr x22, x20, #0x2\n"
- "and x21, x20, #0x3\n"
- "ldr x20, [%x[args], %[offsetof_kstride_bytes]]\n"
- "madd x23, x10, x20, x23\n" // bptr = B + n * kstride_bytes
- "cbz x22, 8f\n"
- "subs x22, x22, #0x1\n"
- ".inst 0xa1408372 // ld1b { z18.b, z22.b, z26.b, z30.b }, pn8.b/Z, [x27]\n"
- "ldnt1b { z0.b }, p1/Z, [x23]\n"
- ".inst 0xa1418373 // ld1b { z19.b, z23.b, z27.b, z31.b }, pn8.b/Z, [x27, #0x4, MUL VL]\n"
- "ldnt1b { z9.b }, p1/Z, [x23, #1, MUL VL]\n"
- ".inst 0xa1428370 // ld1b { z16.b, z20.b, z24.b, z28.b }, pn8.b/Z, [x27, #0x8, MUL VL]\n"
- "ldnt1b { z21.b }, p1/Z, [x23, #2, MUL VL]\n"
- ".inst 0xa1438362 // ld1b { z2.b, z6.b, z10.b, z14.b }, pn8.b/Z, [x27, #0xc, MUL VL]\n"
- "addvl x27, x27, #16\n"
- "ldnt1b { z12.b }, p1/Z, [x23, #3, MUL VL]\n"
- "addvl x23, x23, #4\n"
+ "ldr x19, [%x[args], %[offsetof_K]]\n"
+ "add x19, x19, #0x3\n"
+ "lsr x19, x19, #0x2\n"
+ "ldr x22, [%x[args], %[offsetof_B]]\n"
+ "lsr x21, x19, #0x2\n"
+ "and x20, x19, #0x3\n"
+ "ldr x19, [%x[args], %[offsetof_kstride_bytes]]\n"
+ "madd x22, x9, x19, x22\n" // bptr = B + n * kstride_bytes
+ "cbz x21, 8f\n"
+ "subs x21, x21, #0x1\n"
+ ".inst 0xa1408352 // ld1b { z18.b, z22.b, z26.b, z30.b }, pn8.b/Z, [x26]\n"
+ "ldnt1b { z0.b }, p1/Z, [x22]\n"
+ ".inst 0xa1418353 // ld1b { z19.b, z23.b, z27.b, z31.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
+ "ldnt1b { z9.b }, p1/Z, [x22, #1, MUL VL]\n"
+ ".inst 0xa1428350 // ld1b { z16.b, z20.b, z24.b, z28.b }, pn8.b/Z, [x26, #0x8, MUL VL]\n"
+ "ldnt1b { z21.b }, p1/Z, [x22, #2, MUL VL]\n"
+ ".inst 0xa1438342 // ld1b { z2.b, z6.b, z10.b, z14.b }, pn8.b/Z, [x26, #0xc, MUL VL]\n"
+ "addvl x26, x26, #16\n"
+ "ldnt1b { z12.b }, p1/Z, [x22, #3, MUL VL]\n"
+ "addvl x22, x22, #4\n"
"ble 7f\n"
"6:" // K loop
".inst 0xa0802640 // smopa za0.s, p1/M, p1/M, z18.b, z0.b\n"
- "subs x22, x22, #0x1\n"
+ "subs x21, x21, #0x1\n"
".inst 0xa08026c1 // smopa za1.s, p1/M, p1/M, z22.b, z0.b\n"
".inst 0xa0802742 // smopa za2.s, p1/M, p1/M, z26.b, z0.b\n"
".inst 0xa08027c3 // smopa za3.s, p1/M, p1/M, z30.b, z0.b\n"
- ".inst 0xa1408372 // ld1b { z18.b, z22.b, z26.b, z30.b }, pn8.b/Z, [x27]\n"
+ ".inst 0xa1408352 // ld1b { z18.b, z22.b, z26.b, z30.b }, pn8.b/Z, [x26]\n"
".inst 0xa0892660 // smopa za0.s, p1/M, p1/M, z19.b, z9.b\n"
- "ldnt1b { z0.b }, p1/Z, [x23]\n"
+ "ldnt1b { z0.b }, p1/Z, [x22]\n"
".inst 0xa08926e1 // smopa za1.s, p1/M, p1/M, z23.b, z9.b\n"
".inst 0xa0892762 // smopa za2.s, p1/M, p1/M, z27.b, z9.b\n"
".inst 0xa08927e3 // smopa za3.s, p1/M, p1/M, z31.b, z9.b\n"
- ".inst 0xa1418373 // ld1b { z19.b, z23.b, z27.b, z31.b }, pn8.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xa1418353 // ld1b { z19.b, z23.b, z27.b, z31.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
".inst 0xa0952600 // smopa za0.s, p1/M, p1/M, z16.b, z21.b\n"
- "ldnt1b { z9.b }, p1/Z, [x23, #1, MUL VL]\n"
+ "ldnt1b { z9.b }, p1/Z, [x22, #1, MUL VL]\n"
".inst 0xa0952681 // smopa za1.s, p1/M, p1/M, z20.b, z21.b\n"
".inst 0xa0952702 // smopa za2.s, p1/M, p1/M, z24.b, z21.b\n"
".inst 0xa0952783 // smopa za3.s, p1/M, p1/M, z28.b, z21.b\n"
- ".inst 0xa1428370 // ld1b { z16.b, z20.b, z24.b, z28.b }, pn8.b/Z, [x27, #0x8, MUL VL]\n"
- "ldnt1b { z21.b }, p1/Z, [x23, #2, MUL VL]\n"
+ ".inst 0xa1428350 // ld1b { z16.b, z20.b, z24.b, z28.b }, pn8.b/Z, [x26, #0x8, MUL VL]\n"
+ "ldnt1b { z21.b }, p1/Z, [x22, #2, MUL VL]\n"
".inst 0xa08c2440 // smopa za0.s, p1/M, p1/M, z2.b, z12.b\n"
".inst 0xa08c24c1 // smopa za1.s, p1/M, p1/M, z6.b, z12.b\n"
".inst 0xa08c2542 // smopa za2.s, p1/M, p1/M, z10.b, z12.b\n"
".inst 0xa08c25c3 // smopa za3.s, p1/M, p1/M, z14.b, z12.b\n"
- ".inst 0xa1438362 // ld1b { z2.b, z6.b, z10.b, z14.b }, pn8.b/Z, [x27, #0xc, MUL VL]\n"
- "addvl x27, x27, #16\n"
- "ldnt1b { z12.b }, p1/Z, [x23, #3, MUL VL]\n"
- "addvl x23, x23, #4\n"
+ ".inst 0xa1438342 // ld1b { z2.b, z6.b, z10.b, z14.b }, pn8.b/Z, [x26, #0xc, MUL VL]\n"
+ "addvl x26, x26, #16\n"
+ "ldnt1b { z12.b }, p1/Z, [x22, #3, MUL VL]\n"
+ "addvl x22, x22, #4\n"
"bgt 6b\n"
"7:" // K loop tail
".inst 0xa0802640 // smopa za0.s, p1/M, p1/M, z18.b, z0.b\n"
@@ -210,294 +210,294 @@ void sme2_interleaved_nomerge_s8q_mopa_4VLx1VL(const int8_t *const A, const int8
".inst 0xa08c2542 // smopa za2.s, p1/M, p1/M, z10.b, z12.b\n"
".inst 0xa08c25c3 // smopa za3.s, p1/M, p1/M, z14.b, z12.b\n"
"8:" // K oddments
- "cbz x21, 10f\n"
+ "cbz x20, 10f\n"
"9:" // K oddments: Loop
- ".inst 0xa1408372 // ld1b { z18.b, z22.b, z26.b, z30.b }, pn8.b/Z, [x27]\n"
- "subs x21, x21, #0x1\n"
- "addvl x27, x27, #4\n"
- "ld1b { z0.b }, p1/Z, [x23]\n"
- "addvl x23, x23, #1\n"
+ ".inst 0xa1408352 // ld1b { z18.b, z22.b, z26.b, z30.b }, pn8.b/Z, [x26]\n"
+ "subs x20, x20, #0x1\n"
+ "addvl x26, x26, #4\n"
+ "ld1b { z0.b }, p1/Z, [x22]\n"
+ "addvl x22, x22, #1\n"
".inst 0xa0802640 // smopa za0.s, p1/M, p1/M, z18.b, z0.b\n"
".inst 0xa08026c1 // smopa za1.s, p1/M, p1/M, z22.b, z0.b\n"
".inst 0xa0802742 // smopa za2.s, p1/M, p1/M, z26.b, z0.b\n"
".inst 0xa08027c3 // smopa za3.s, p1/M, p1/M, z30.b, z0.b\n"
"bgt 9b\n"
"10:" // K oddments: End
- ".inst 0xa040c360 // ld1w { z0.s-z3.s }, pn8.b/Z, [x27]\n"
- "addvl x27, x27, #4\n"
+ ".inst 0xa040c340 // ld1w { z0.s-z3.s }, pn8.b/Z, [x26]\n"
+ "addvl x26, x26, #4\n"
".inst 0xc0912400 // addva za0.s, p1/M, p1/M, z0.s\n"
".inst 0xc0912421 // addva za1.s, p1/M, p1/M, z1.s\n"
".inst 0xc0912442 // addva za2.s, p1/M, p1/M, z2.s\n"
".inst 0xc0912463 // addva za3.s, p1/M, p1/M, z3.s\n"
- "tbz x16, #1, 14f\n"
- "tbz x16, #0, 12f\n"
+ "tbz x15, #1, 14f\n"
+ "tbz x15, #0, 12f\n"
"mov x12, #0x0\n"
- "cntw x20\n"
+ "cntw x19\n"
"11:" // Store to partial result buffer: Store and refill: Loop
- ".inst 0xa040c1f4 // ld1w { z20.s-z23.s }, pn8.b/Z, [x15]\n"
+ ".inst 0xa040c1d4 // ld1w { z20.s-z23.s }, pn8.b/Z, [x14]\n"
".inst 0xc0860410 // mova { z16.s-z19.s }, za0h.s[x12]\n"
".inst 0xc0840680 // mova za0h.s[x12], { z20.s-z23.s }\n"
".inst 0xc0860428 // mova { z8.s-z11.s }, za1h.s[x12]\n"
- ".inst 0xa041c1e4 // ld1w { z4.s-z7.s }, pn8.b/Z, [x15, #0x4, MUL VL]\n"
+ ".inst 0xa041c1c4 // ld1w { z4.s-z7.s }, pn8.b/Z, [x14, #0x4, MUL VL]\n"
".inst 0xc0840481 // mova za1h.s[x12], { z4.s-z7.s }\n"
".inst 0xc086044c // mova { z12.s-z15.s }, za2h.s[x12]\n"
".inst 0xc086047c // mova { z28.s-z31.s }, za3h.s[x12]\n"
- ".inst 0xa042c1e4 // ld1w { z4.s-z7.s }, pn8.b/Z, [x15, #0x8, MUL VL]\n"
+ ".inst 0xa042c1c4 // ld1w { z4.s-z7.s }, pn8.b/Z, [x14, #0x8, MUL VL]\n"
".inst 0xc0840482 // mova za2h.s[x12], { z4.s-z7.s }\n"
- ".inst 0xa043c1f4 // ld1w { z20.s-z23.s }, pn8.b/Z, [x15, #0xc, MUL VL]\n"
+ ".inst 0xa043c1d4 // ld1w { z20.s-z23.s }, pn8.b/Z, [x14, #0xc, MUL VL]\n"
".inst 0xc0840683 // mova za3h.s[x12], { z20.s-z23.s }\n"
"add x12, x12, #0x4\n"
- "cmp x12, x20\n"
- ".inst 0xa060c1d0 // st1w { z16.s-z19.s }, pn8.b, [x14]\n"
- "addvl x15, x15, #16\n"
- ".inst 0xa061c1c8 // st1w { z8.s-z11.s }, pn8.b, [x14, #0x4, MUL VL]\n"
- ".inst 0xa062c1cc // st1w { z12.s-z15.s }, pn8.b, [x14, #0x8, MUL VL]\n"
- ".inst 0xa063c1dc // st1w { z28.s-z31.s }, pn8.b, [x14, #0xc, MUL VL]\n"
+ "cmp x12, x19\n"
+ ".inst 0xa060c1b0 // st1w { z16.s-z19.s }, pn8.b, [x13]\n"
"addvl x14, x14, #16\n"
+ ".inst 0xa061c1a8 // st1w { z8.s-z11.s }, pn8.b, [x13, #0x4, MUL VL]\n"
+ ".inst 0xa062c1ac // st1w { z12.s-z15.s }, pn8.b, [x13, #0x8, MUL VL]\n"
+ ".inst 0xa063c1bc // st1w { z28.s-z31.s }, pn8.b, [x13, #0xc, MUL VL]\n"
+ "addvl x13, x13, #16\n"
"blt 11b\n"
"b 30f\n"
"12:" // Store to partial result buffer: Store only
"mov x12, #0x0\n"
- "cntw x20\n"
+ "cntw x19\n"
"13:" // Store to partial result buffer: Store only: Loop
".inst 0xc0860410 // mova { z16.s-z19.s }, za0h.s[x12]\n"
".inst 0xc086042c // mova { z12.s-z15.s }, za1h.s[x12]\n"
- ".inst 0xa060c1d0 // st1w { z16.s-z19.s }, pn8.b, [x14]\n"
+ ".inst 0xa060c1b0 // st1w { z16.s-z19.s }, pn8.b, [x13]\n"
".inst 0xc0860454 // mova { z20.s-z23.s }, za2h.s[x12]\n"
".inst 0xc0860478 // mova { z24.s-z27.s }, za3h.s[x12]\n"
- ".inst 0xa061c1cc // st1w { z12.s-z15.s }, pn8.b, [x14, #0x4, MUL VL]\n"
+ ".inst 0xa061c1ac // st1w { z12.s-z15.s }, pn8.b, [x13, #0x4, MUL VL]\n"
"add x12, x12, #0x4\n"
- "cmp x12, x20\n"
- ".inst 0xa062c1d4 // st1w { z20.s-z23.s }, pn8.b, [x14, #0x8, MUL VL]\n"
- ".inst 0xa063c1d8 // st1w { z24.s-z27.s }, pn8.b, [x14, #0xc, MUL VL]\n"
- "addvl x14, x14, #16\n"
+ "cmp x12, x19\n"
+ ".inst 0xa062c1b4 // st1w { z20.s-z23.s }, pn8.b, [x13, #0x8, MUL VL]\n"
+ ".inst 0xa063c1b8 // st1w { z24.s-z27.s }, pn8.b, [x13, #0xc, MUL VL]\n"
+ "addvl x13, x13, #16\n"
"blt 13b\n"
"b 30f\n"
"14:" // Store to output array
- "ldr x26, [%x[args], %[offsetof_C]]\n"
- "add x26, x26, x10\n" // C += n
- "sub x25, x13, x11\n"
+ "ldr x25, [%x[args], %[offsetof_C]]\n"
+ "add x25, x25, x9\n" // C += n
+ "sub x24, x11, x10\n"
"ld1rw { z8.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_per_layer_mul]]\n"
- "ldr x24, [%x[args], %[offsetof_ldcb]]\n"
- "madd x26, x11, x24, x26\n" // C += m * ldc
+ "ldr x23, [%x[args], %[offsetof_ldcb]]\n"
+ "madd x25, x10, x23, x25\n" // C += m * ldc
"ld1rw { z7.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_per_layer_right_shift]]\n"
"ld1rw { z6.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_c_offset]]\n"
"ld1rw { z5.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_minval]]\n"
"ld1rw { z4.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_maxval]]\n"
- "tbz x16, #2, 15f\n"
- "ldr w21, [%x[args], %[offsetof_n_0]]\n"
- "add x21, x21, x10\n"
- "ldr x20, [%x[rq], %[offsetof_Requantize32_per_channel_muls]]\n"
- "add x20, x20, x21, LSL #2\n"
- "ld1w { z8.s }, p0/Z, [x20]\n"
- "ldr x20, [%x[rq], %[offsetof_Requantize32_per_channel_right_shifts]]\n"
- "add x20, x20, x21, LSL #2\n"
- "ld1w { z7.s }, p0/Z, [x20]\n"
+ "tbz x15, #2, 15f\n"
+ "ldr w20, [%x[args], %[offsetof_n_0]]\n"
+ "add x20, x20, x9\n"
+ "ldr x19, [%x[rq], %[offsetof_Requantize32_per_channel_muls]]\n"
+ "add x19, x19, x20, LSL #2\n"
+ "ld1w { z8.s }, p0/Z, [x19]\n"
+ "ldr x19, [%x[rq], %[offsetof_Requantize32_per_channel_right_shifts]]\n"
+ "add x19, x19, x20, LSL #2\n"
+ "ld1w { z7.s }, p0/Z, [x19]\n"
"15:" // Store to output array: Load per-channel parameters: End
- "cntw x23\n"
- "whilelt p0.s, x10, x9\n"
- "cmp x25, x23\n"
- "csel x22, x25, x23, LT\n"
- "lsr x21, x22, #0x2\n"
+ "cntw x22\n"
+ "whilelt p0.s, x9, x28\n"
+ "cmp x24, x22\n"
+ "csel x21, x24, x22, LT\n"
+ "lsr x20, x21, #0x2\n"
"mov x12, #0x0\n"
- "and x20, x22, #0x3\n"
- "cbz x21, 17f\n"
+ "and x19, x21, #0x3\n"
+ "cbz x20, 17f\n"
"16:" // Store to output array: Accumulator row 0 loop
".inst 0xc086040c // mova { z12.s-z15.s }, za0h.s[x12]\n"
".inst 0xc1a8ac0c // sqdmulh { z12.s-z15.s }, { z12.s-z15.s }, z8.s\n"
"add x12, x12, #0x4\n"
".inst 0xc1a7aa2c // srshl { z12.s-z15.s }, { z12.s-z15.s }, z7.s\n"
- "cmp x12, x21, LSL #2\n"
+ "cmp x12, x20, LSL #2\n"
".inst 0xc1a6ab0c // add { z12.s-z15.s }, { z12.s-z15.s }, z6.s\n"
".inst 0xc1a4ccac // sclamp { z12.s-z15.s }, z5.s, z4.s\n"
- "st1b { z12.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
- "st1b { z13.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
- "st1b { z14.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
- "st1b { z15.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1b { z12.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
+ "st1b { z13.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
+ "st1b { z14.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
+ "st1b { z15.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"blt 16b\n"
"17:" // Store to output array: Accumulator row 0 oddments
- "cbz x20, 18f\n"
+ "cbz x19, 18f\n"
".inst 0xc0860410 // mova { z16.s-z19.s }, za0h.s[x12]\n"
".inst 0xc1a8ac10 // sqdmulh { z16.s-z19.s }, { z16.s-z19.s }, z8.s\n"
- "subs x20, x20, #0x1\n"
+ "subs x19, x19, #0x1\n"
".inst 0xc1a7aa30 // srshl { z16.s-z19.s }, { z16.s-z19.s }, z7.s\n"
".inst 0xc1a6ab10 // add { z16.s-z19.s }, { z16.s-z19.s }, z6.s\n"
".inst 0xc1a4ccb0 // sclamp { z16.s-z19.s }, z5.s, z4.s\n"
- "st1b { z16.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1b { z16.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"beq 18f\n"
- "subs x20, x20, #0x1\n"
- "st1b { z17.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "subs x19, x19, #0x1\n"
+ "st1b { z17.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"beq 18f\n"
- "st1b { z18.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1b { z18.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"18:" // Store to output array: Accumulator row 0 oddments: End
- "subs x25, x25, x22\n"
+ "subs x24, x24, x21\n"
"beq 28f\n"
- "whilelt p0.s, x10, x9\n"
- "cmp x25, x23\n"
- "csel x22, x25, x23, LT\n"
- "lsr x21, x22, #0x2\n"
+ "whilelt p0.s, x9, x28\n"
+ "cmp x24, x22\n"
+ "csel x21, x24, x22, LT\n"
+ "lsr x20, x21, #0x2\n"
"mov x12, #0x0\n"
- "and x20, x22, #0x3\n"
- "cbz x21, 20f\n"
+ "and x19, x21, #0x3\n"
+ "cbz x20, 20f\n"
"19:" // Store to output array: Accumulator row 1 loop
".inst 0xc0860430 // mova { z16.s-z19.s }, za1h.s[x12]\n"
".inst 0xc1a8ac10 // sqdmulh { z16.s-z19.s }, { z16.s-z19.s }, z8.s\n"
"add x12, x12, #0x4\n"
".inst 0xc1a7aa30 // srshl { z16.s-z19.s }, { z16.s-z19.s }, z7.s\n"
- "cmp x12, x21, LSL #2\n"
+ "cmp x12, x20, LSL #2\n"
".inst 0xc1a6ab10 // add { z16.s-z19.s }, { z16.s-z19.s }, z6.s\n"
".inst 0xc1a4ccb0 // sclamp { z16.s-z19.s }, z5.s, z4.s\n"
- "st1b { z16.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
- "st1b { z17.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
- "st1b { z18.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
- "st1b { z19.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1b { z16.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
+ "st1b { z17.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
+ "st1b { z18.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
+ "st1b { z19.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"blt 19b\n"
"20:" // Store to output array: Accumulator row 1 oddments
- "cbz x20, 21f\n"
+ "cbz x19, 21f\n"
".inst 0xc086043c // mova { z28.s-z31.s }, za1h.s[x12]\n"
".inst 0xc1a8ac1c // sqdmulh { z28.s-z31.s }, { z28.s-z31.s }, z8.s\n"
- "subs x20, x20, #0x1\n"
+ "subs x19, x19, #0x1\n"
".inst 0xc1a7aa3c // srshl { z28.s-z31.s }, { z28.s-z31.s }, z7.s\n"
".inst 0xc1a6ab1c // add { z28.s-z31.s }, { z28.s-z31.s }, z6.s\n"
".inst 0xc1a4ccbc // sclamp { z28.s-z31.s }, z5.s, z4.s\n"
- "st1b { z28.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1b { z28.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"beq 21f\n"
- "subs x20, x20, #0x1\n"
- "st1b { z29.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "subs x19, x19, #0x1\n"
+ "st1b { z29.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"beq 21f\n"
- "st1b { z30.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1b { z30.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"21:" // Store to output array: Accumulator row 1 oddments: End
- "subs x25, x25, x22\n"
+ "subs x24, x24, x21\n"
"beq 28f\n"
- "whilelt p0.s, x10, x9\n"
- "cmp x25, x23\n"
- "csel x22, x25, x23, LT\n"
- "lsr x21, x22, #0x2\n"
+ "whilelt p0.s, x9, x28\n"
+ "cmp x24, x22\n"
+ "csel x21, x24, x22, LT\n"
+ "lsr x20, x21, #0x2\n"
"mov x12, #0x0\n"
- "and x20, x22, #0x3\n"
- "cbz x21, 23f\n"
+ "and x19, x21, #0x3\n"
+ "cbz x20, 23f\n"
"22:" // Store to output array: Accumulator row 2 loop
".inst 0xc0860458 // mova { z24.s-z27.s }, za2h.s[x12]\n"
".inst 0xc1a8ac18 // sqdmulh { z24.s-z27.s }, { z24.s-z27.s }, z8.s\n"
"add x12, x12, #0x4\n"
".inst 0xc1a7aa38 // srshl { z24.s-z27.s }, { z24.s-z27.s }, z7.s\n"
- "cmp x12, x21, LSL #2\n"
+ "cmp x12, x20, LSL #2\n"
".inst 0xc1a6ab18 // add { z24.s-z27.s }, { z24.s-z27.s }, z6.s\n"
".inst 0xc1a4ccb8 // sclamp { z24.s-z27.s }, z5.s, z4.s\n"
- "st1b { z24.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
- "st1b { z25.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
- "st1b { z26.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
- "st1b { z27.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1b { z24.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
+ "st1b { z25.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
+ "st1b { z26.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
+ "st1b { z27.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"blt 22b\n"
"23:" // Store to output array: Accumulator row 2 oddments
- "cbz x20, 24f\n"
+ "cbz x19, 24f\n"
".inst 0xc086044c // mova { z12.s-z15.s }, za2h.s[x12]\n"
".inst 0xc1a8ac0c // sqdmulh { z12.s-z15.s }, { z12.s-z15.s }, z8.s\n"
- "subs x20, x20, #0x1\n"
+ "subs x19, x19, #0x1\n"
".inst 0xc1a7aa2c // srshl { z12.s-z15.s }, { z12.s-z15.s }, z7.s\n"
".inst 0xc1a6ab0c // add { z12.s-z15.s }, { z12.s-z15.s }, z6.s\n"
".inst 0xc1a4ccac // sclamp { z12.s-z15.s }, z5.s, z4.s\n"
- "st1b { z12.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1b { z12.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"beq 24f\n"
- "subs x20, x20, #0x1\n"
- "st1b { z13.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "subs x19, x19, #0x1\n"
+ "st1b { z13.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"beq 24f\n"
- "st1b { z14.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1b { z14.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"24:" // Store to output array: Accumulator row 2 oddments: End
- "subs x25, x25, x22\n"
+ "subs x24, x24, x21\n"
"beq 28f\n"
- "whilelt p0.s, x10, x9\n"
- "cmp x25, x23\n"
- "csel x20, x25, x23, LT\n"
- "lsr x21, x20, #0x2\n"
+ "whilelt p0.s, x9, x28\n"
+ "cmp x24, x22\n"
+ "csel x19, x24, x22, LT\n"
+ "lsr x20, x19, #0x2\n"
"mov x12, #0x0\n"
- "and x20, x20, #0x3\n"
- "cbz x21, 26f\n"
+ "and x19, x19, #0x3\n"
+ "cbz x20, 26f\n"
"25:" // Store to output array: Accumulator row 3 loop
".inst 0xc0860474 // mova { z20.s-z23.s }, za3h.s[x12]\n"
".inst 0xc1a8ac14 // sqdmulh { z20.s-z23.s }, { z20.s-z23.s }, z8.s\n"
"add x12, x12, #0x4\n"
".inst 0xc1a7aa34 // srshl { z20.s-z23.s }, { z20.s-z23.s }, z7.s\n"
- "cmp x12, x21, LSL #2\n"
+ "cmp x12, x20, LSL #2\n"
".inst 0xc1a6ab14 // add { z20.s-z23.s }, { z20.s-z23.s }, z6.s\n"
".inst 0xc1a4ccb4 // sclamp { z20.s-z23.s }, z5.s, z4.s\n"
- "st1b { z20.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
- "st1b { z21.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
- "st1b { z22.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
- "st1b { z23.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1b { z20.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
+ "st1b { z21.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
+ "st1b { z22.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
+ "st1b { z23.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"blt 25b\n"
"26:" // Store to output array: Accumulator row 3 oddments
- "cbz x20, 27f\n"
+ "cbz x19, 27f\n"
".inst 0xc0860460 // mova { z0.s-z3.s }, za3h.s[x12]\n"
".inst 0xc1a8ac00 // sqdmulh { z0.s-z3.s }, { z0.s-z3.s }, z8.s\n"
- "subs x20, x20, #0x1\n"
+ "subs x19, x19, #0x1\n"
".inst 0xc1a7aa20 // srshl { z0.s-z3.s }, { z0.s-z3.s }, z7.s\n"
".inst 0xc1a6ab00 // add { z0.s-z3.s }, { z0.s-z3.s }, z6.s\n"
".inst 0xc1a4cca0 // sclamp { z0.s-z3.s }, z5.s, z4.s\n"
- "st1b { z0.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1b { z0.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"beq 27f\n"
- "subs x20, x20, #0x1\n"
- "st1b { z1.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "subs x19, x19, #0x1\n"
+ "st1b { z1.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"beq 27f\n"
- "st1b { z2.s }, p0, [x26]\n"
+ "st1b { z2.s }, p0, [x25]\n"
"27:" // Store to output array: Accumulator row 3 oddments: End
"28:" // Store to output array: End
- "tbz x16, #0, 30f\n"
+ "tbz x15, #0, 30f\n"
"mov x12, #0x0\n"
- "cntw x20\n"
+ "cntw x19\n"
"29:" // Store to output array: Refill accumulators: Loop
- ".inst 0xa040c1e4 // ld1w { z4.s-z7.s }, pn8.b/Z, [x15]\n"
+ ".inst 0xa040c1c4 // ld1w { z4.s-z7.s }, pn8.b/Z, [x14]\n"
".inst 0xc0840480 // mova za0h.s[x12], { z4.s-z7.s }\n"
- ".inst 0xa041c1f0 // ld1w { z16.s-z19.s }, pn8.b/Z, [x15, #0x4, MUL VL]\n"
+ ".inst 0xa041c1d0 // ld1w { z16.s-z19.s }, pn8.b/Z, [x14, #0x4, MUL VL]\n"
".inst 0xc0840601 // mova za1h.s[x12], { z16.s-z19.s }\n"
- ".inst 0xa042c1f0 // ld1w { z16.s-z19.s }, pn8.b/Z, [x15, #0x8, MUL VL]\n"
+ ".inst 0xa042c1d0 // ld1w { z16.s-z19.s }, pn8.b/Z, [x14, #0x8, MUL VL]\n"
".inst 0xc0840602 // mova za2h.s[x12], { z16.s-z19.s }\n"
- ".inst 0xa043c1e4 // ld1w { z4.s-z7.s }, pn8.b/Z, [x15, #0xc, MUL VL]\n"
+ ".inst 0xa043c1c4 // ld1w { z4.s-z7.s }, pn8.b/Z, [x14, #0xc, MUL VL]\n"
".inst 0xc0840483 // mova za3h.s[x12], { z4.s-z7.s }\n"
"add x12, x12, #0x4\n"
- "cmp x12, x20\n"
- "addvl x15, x15, #16\n"
+ "cmp x12, x19\n"
+ "addvl x14, x14, #16\n"
"blt 29b\n"
"30:" // End block
- "incw x10\n"
- "cmp x10, x9\n"
+ "incw x9\n"
+ "cmp x9, x28\n"
"blt 3b\n"
- "incw x11, ALL, MUL #4\n"
- "cmp x11, x13\n"
- "mov x10, #0x0\n"
- "mov x28, x27\n"
+ "incw x10, ALL, MUL #4\n"
+ "cmp x10, x11\n"
+ "mov x9, #0x0\n"
+ "mov x27, x26\n"
"blt 3b\n"
".inst 0xd503467f // SMSTOP\n"
:
: [args] "r" (&args), [offsetof_A] "I" (offsetof(KernelArgs, A)), [offsetof_B] "I" (offsetof(KernelArgs, B)), [offsetof_C] "I" (offsetof(KernelArgs, C)), [offsetof_K] "I" (offsetof(KernelArgs, K)), [offsetof_M] "I" (offsetof(KernelArgs, M)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_Requantize32_c_offset] "I" (offsetof(Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(Requantize32, minval)), [offsetof_Requantize32_per_channel_muls] "I" (offsetof(Requantize32, per_channel_muls)), [offsetof_Requantize32_per_channel_right_shifts] "I" (offsetof(Requantize32, per_channel_right_shifts)), [offsetof_Requantize32_per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [offsetof_Requantize32_per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [offsetof_accumulator_buffer] "I" (offsetof(KernelArgs, accumulator_buffer)), [offsetof_bias] "I" (offsetof(KernelArgs, bias)), [offsetof_flags] "I" (offsetof(KernelArgs, flags)), [offsetof_kstride_bytes] "I" (offsetof(KernelArgs, kstride_bytes)), [offsetof_ldcb] "I" (offsetof(KernelArgs, ldcb)), [offsetof_n_0] "I" (offsetof(KernelArgs, n_0)), [rq] "r" (&rq)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8s32_mopa_1VLx4VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8s32_mopa_1VLx4VL/generic.cpp
index 12e714a471..7ddd7c2e09 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8s32_mopa_1VLx4VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8s32_mopa_1VLx4VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef __ARM_FEATURE_SVE
#ifdef ARM_COMPUTE_ENABLE_SME2
@@ -84,107 +84,107 @@ void sme2_interleaved_nomerge_s8s32_mopa_1VLx4VL(const int8_t *const A, const in
KernelArgs args(A, B, C, ldc, M, N, K, bias, accumulate, accumulator_buffer);
__asm__ __volatile__(
- "ldr x13, [%x[args], %[offsetof_flags]]\n"
+ "ldr x11, [%x[args], %[offsetof_flags]]\n"
".inst 0xd503477f // SMSTART ZA\n"
"ptrue p0.b\n"
".inst 0x25207811 // ptrue pn9.b\n"
- "ldr x11, [%x[args], %[offsetof_accumulator_buffer]]\n"
"ldr x10, [%x[args], %[offsetof_accumulator_buffer]]\n"
- "tbz x13, #0, 2f\n"
+ "ldr x9, [%x[args], %[offsetof_accumulator_buffer]]\n"
+ "tbz x11, #0, 2f\n"
"mov x12, #0x0\n"
- "cntw x20\n"
+ "cntw x19\n"
"1:" // Initial accumulator load from buffer: Loop
- ".inst 0xa040c564 // ld1w { z4.s-z7.s }, pn9.b/Z, [x11]\n"
+ ".inst 0xa040c544 // ld1w { z4.s-z7.s }, pn9.b/Z, [x10]\n"
".inst 0xc0840480 // mova za0h.s[x12], { z4.s-z7.s }\n"
- ".inst 0xa041c57c // ld1w { z28.s-z31.s }, pn9.b/Z, [x11, #0x4, MUL VL]\n"
+ ".inst 0xa041c55c // ld1w { z28.s-z31.s }, pn9.b/Z, [x10, #0x4, MUL VL]\n"
".inst 0xc0840781 // mova za1h.s[x12], { z28.s-z31.s }\n"
- ".inst 0xa042c570 // ld1w { z16.s-z19.s }, pn9.b/Z, [x11, #0x8, MUL VL]\n"
+ ".inst 0xa042c550 // ld1w { z16.s-z19.s }, pn9.b/Z, [x10, #0x8, MUL VL]\n"
".inst 0xc0840602 // mova za2h.s[x12], { z16.s-z19.s }\n"
- ".inst 0xa043c560 // ld1w { z0.s-z3.s }, pn9.b/Z, [x11, #0xc, MUL VL]\n"
+ ".inst 0xa043c540 // ld1w { z0.s-z3.s }, pn9.b/Z, [x10, #0xc, MUL VL]\n"
".inst 0xc0840403 // mova za3h.s[x12], { z0.s-z3.s }\n"
"add x12, x12, #0x4\n"
- "cmp x12, x20\n"
- "addvl x11, x11, #16\n"
+ "cmp x12, x19\n"
+ "addvl x10, x10, #16\n"
"blt 1b\n"
"2:" // Initial accumulator load from buffer: End
- "ldr w9, [%x[args], %[offsetof_M]]\n"
- "mov x28, #0x0\n"
+ "ldr w28, [%x[args], %[offsetof_M]]\n"
"mov x27, #0x0\n"
- "ldr w26, [%x[args], %[offsetof_N]]\n"
- "ldr x25, [%x[args], %[offsetof_A]]\n"
+ "mov x26, #0x0\n"
+ "ldr w25, [%x[args], %[offsetof_N]]\n"
+ "ldr x24, [%x[args], %[offsetof_A]]\n"
"3:" // M and N loop
- "mov x24, x25\n"
- ".inst 0x25ba6770 // whilelt pn8.s, x27, x26, VLx4\n"
- "tbnz x13, #0, 4f\n"
- "ldr x20, [%x[args], %[offsetof_bias]]\n"
+ "mov x23, x24\n"
+ ".inst 0x25b96750 // whilelt pn8.s, x26, x25, VLx4\n"
+ "tbnz x11, #0, 4f\n"
+ "ldr x19, [%x[args], %[offsetof_bias]]\n"
".inst 0xc00800ff // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
- "cbz x20, 5f\n"
- ".inst 0xa11bc28a // ldnt1w { z2.s, z6.s, z10.s, z14.s }, p8/Z, [x20, x27, LSL #2]\n"
+ "cbz x19, 5f\n"
+ ".inst 0xa11ac26a // ldnt1w { z2.s, z6.s, z10.s, z14.s }, p8/Z, [x19, x26, LSL #2]\n"
".inst 0xc0900040 // addha za0.s, p0/M, p0/M, z2.s\n"
".inst 0xc09000c1 // addha za1.s, p0/M, p0/M, z6.s\n"
".inst 0xc0900142 // addha za2.s, p0/M, p0/M, z10.s\n"
".inst 0xc09001c3 // addha za3.s, p0/M, p0/M, z14.s\n"
"4:" // Prepare accumulators: Test for last block
+ "mov x19, x26\n"
"mov x20, x27\n"
- "mov x21, x28\n"
- "incw x20, ALL, MUL #4\n"
- "incw x21\n"
- "cmp x20, x26\n"
- "csel x21, x28, x21, LT\n"
- "mov x20, x13\n"
- "bfm x13, XZR, #0x0, #0x0 // bfc x13, #0x0, #0x1\n"
- "cmp x21, x9\n"
- "csel x13, x20, x13, LT\n"
+ "incw x19, ALL, MUL #4\n"
+ "incw x20\n"
+ "cmp x19, x25\n"
+ "csel x20, x27, x20, LT\n"
+ "mov x19, x11\n"
+ "bfm x11, XZR, #0x0, #0x0 // bfc x11, #0x0, #0x1\n"
+ "cmp x20, x28\n"
+ "csel x11, x19, x11, LT\n"
"5:" // Prepare accumulators: End
- "ldr x20, [%x[args], %[offsetof_K]]\n"
- "add x20, x20, #0x3\n"
- "lsr x20, x20, #0x2\n"
- "ldr x23, [%x[args], %[offsetof_B]]\n"
- "lsr x22, x20, #0x2\n"
- "and x21, x20, #0x3\n"
- "ldr x20, [%x[args], %[offsetof_kstride_bytes]]\n"
- "madd x23, x27, x20, x23\n" // bptr = B + n * kstride_bytes
- "cbz x22, 8f\n"
- "subs x22, x22, #0x1\n"
- "ld1b { z20.b }, p0/Z, [x24]\n"
- ".inst 0xa14086e9 // ldnt1b { z1.b, z5.b, z9.b, z13.b }, pn9.b/Z, [x23]\n"
- "ld1b { z10.b }, p0/Z, [x24, #1, MUL VL]\n"
- ".inst 0xa14186fa // ldnt1b { z18.b, z22.b, z26.b, z30.b }, pn9.b/Z, [x23, #0x4, MUL VL]\n"
- "ld1b { z16.b }, p0/Z, [x24, #2, MUL VL]\n"
- ".inst 0xa14286eb // ldnt1b { z3.b, z7.b, z11.b, z15.b }, pn9.b/Z, [x23, #0x8, MUL VL]\n"
- "ld1b { z25.b }, p0/Z, [x24, #3, MUL VL]\n"
- "addvl x24, x24, #4\n"
- ".inst 0xa14386e8 // ldnt1b { z0.b, z4.b, z8.b, z12.b }, pn9.b/Z, [x23, #0xc, MUL VL]\n"
- "addvl x23, x23, #16\n"
+ "ldr x19, [%x[args], %[offsetof_K]]\n"
+ "add x19, x19, #0x3\n"
+ "lsr x19, x19, #0x2\n"
+ "ldr x22, [%x[args], %[offsetof_B]]\n"
+ "lsr x21, x19, #0x2\n"
+ "and x20, x19, #0x3\n"
+ "ldr x19, [%x[args], %[offsetof_kstride_bytes]]\n"
+ "madd x22, x26, x19, x22\n" // bptr = B + n * kstride_bytes
+ "cbz x21, 8f\n"
+ "subs x21, x21, #0x1\n"
+ "ld1b { z20.b }, p0/Z, [x23]\n"
+ ".inst 0xa14086c9 // ldnt1b { z1.b, z5.b, z9.b, z13.b }, pn9.b/Z, [x22]\n"
+ "ld1b { z10.b }, p0/Z, [x23, #1, MUL VL]\n"
+ ".inst 0xa14186da // ldnt1b { z18.b, z22.b, z26.b, z30.b }, pn9.b/Z, [x22, #0x4, MUL VL]\n"
+ "ld1b { z16.b }, p0/Z, [x23, #2, MUL VL]\n"
+ ".inst 0xa14286cb // ldnt1b { z3.b, z7.b, z11.b, z15.b }, pn9.b/Z, [x22, #0x8, MUL VL]\n"
+ "ld1b { z25.b }, p0/Z, [x23, #3, MUL VL]\n"
+ "addvl x23, x23, #4\n"
+ ".inst 0xa14386c8 // ldnt1b { z0.b, z4.b, z8.b, z12.b }, pn9.b/Z, [x22, #0xc, MUL VL]\n"
+ "addvl x22, x22, #16\n"
"ble 7f\n"
"6:" // K loop
".inst 0xa0810280 // smopa za0.s, p0/M, p0/M, z20.b, z1.b\n"
- "subs x22, x22, #0x1\n"
+ "subs x21, x21, #0x1\n"
".inst 0xa0850281 // smopa za1.s, p0/M, p0/M, z20.b, z5.b\n"
".inst 0xa0890282 // smopa za2.s, p0/M, p0/M, z20.b, z9.b\n"
".inst 0xa08d0283 // smopa za3.s, p0/M, p0/M, z20.b, z13.b\n"
- "ld1b { z20.b }, p0/Z, [x24]\n"
+ "ld1b { z20.b }, p0/Z, [x23]\n"
".inst 0xa0920140 // smopa za0.s, p0/M, p0/M, z10.b, z18.b\n"
- ".inst 0xa14086e9 // ldnt1b { z1.b, z5.b, z9.b, z13.b }, pn9.b/Z, [x23]\n"
+ ".inst 0xa14086c9 // ldnt1b { z1.b, z5.b, z9.b, z13.b }, pn9.b/Z, [x22]\n"
".inst 0xa0960141 // smopa za1.s, p0/M, p0/M, z10.b, z22.b\n"
".inst 0xa09a0142 // smopa za2.s, p0/M, p0/M, z10.b, z26.b\n"
".inst 0xa09e0143 // smopa za3.s, p0/M, p0/M, z10.b, z30.b\n"
- "ld1b { z10.b }, p0/Z, [x24, #1, MUL VL]\n"
+ "ld1b { z10.b }, p0/Z, [x23, #1, MUL VL]\n"
".inst 0xa0830200 // smopa za0.s, p0/M, p0/M, z16.b, z3.b\n"
- ".inst 0xa14186fa // ldnt1b { z18.b, z22.b, z26.b, z30.b }, pn9.b/Z, [x23, #0x4, MUL VL]\n"
+ ".inst 0xa14186da // ldnt1b { z18.b, z22.b, z26.b, z30.b }, pn9.b/Z, [x22, #0x4, MUL VL]\n"
".inst 0xa0870201 // smopa za1.s, p0/M, p0/M, z16.b, z7.b\n"
".inst 0xa08b0202 // smopa za2.s, p0/M, p0/M, z16.b, z11.b\n"
".inst 0xa08f0203 // smopa za3.s, p0/M, p0/M, z16.b, z15.b\n"
- "ld1b { z16.b }, p0/Z, [x24, #2, MUL VL]\n"
- ".inst 0xa14286eb // ldnt1b { z3.b, z7.b, z11.b, z15.b }, pn9.b/Z, [x23, #0x8, MUL VL]\n"
+ "ld1b { z16.b }, p0/Z, [x23, #2, MUL VL]\n"
+ ".inst 0xa14286cb // ldnt1b { z3.b, z7.b, z11.b, z15.b }, pn9.b/Z, [x22, #0x8, MUL VL]\n"
".inst 0xa0800320 // smopa za0.s, p0/M, p0/M, z25.b, z0.b\n"
".inst 0xa0840321 // smopa za1.s, p0/M, p0/M, z25.b, z4.b\n"
".inst 0xa0880322 // smopa za2.s, p0/M, p0/M, z25.b, z8.b\n"
".inst 0xa08c0323 // smopa za3.s, p0/M, p0/M, z25.b, z12.b\n"
- "ld1b { z25.b }, p0/Z, [x24, #3, MUL VL]\n"
- "addvl x24, x24, #4\n"
- ".inst 0xa14386e8 // ldnt1b { z0.b, z4.b, z8.b, z12.b }, pn9.b/Z, [x23, #0xc, MUL VL]\n"
- "addvl x23, x23, #16\n"
+ "ld1b { z25.b }, p0/Z, [x23, #3, MUL VL]\n"
+ "addvl x23, x23, #4\n"
+ ".inst 0xa14386c8 // ldnt1b { z0.b, z4.b, z8.b, z12.b }, pn9.b/Z, [x22, #0xc, MUL VL]\n"
+ "addvl x22, x22, #16\n"
"bgt 6b\n"
"7:" // K loop tail
".inst 0xa0810280 // smopa za0.s, p0/M, p0/M, z20.b, z1.b\n"
@@ -204,138 +204,138 @@ void sme2_interleaved_nomerge_s8s32_mopa_1VLx4VL(const int8_t *const A, const in
".inst 0xa0880322 // smopa za2.s, p0/M, p0/M, z25.b, z8.b\n"
".inst 0xa08c0323 // smopa za3.s, p0/M, p0/M, z25.b, z12.b\n"
"8:" // K oddments
- "cbz x21, 10f\n"
+ "cbz x20, 10f\n"
"9:" // K oddments: Loop
- "ld1b { z20.b }, p0/Z, [x24]\n"
- "subs x21, x21, #0x1\n"
- "addvl x24, x24, #1\n"
- ".inst 0xa14086e1 // ld1b { z1.b, z5.b, z9.b, z13.b }, pn9.b/Z, [x23]\n"
- "addvl x23, x23, #4\n"
+ "ld1b { z20.b }, p0/Z, [x23]\n"
+ "subs x20, x20, #0x1\n"
+ "addvl x23, x23, #1\n"
+ ".inst 0xa14086c1 // ld1b { z1.b, z5.b, z9.b, z13.b }, pn9.b/Z, [x22]\n"
+ "addvl x22, x22, #4\n"
".inst 0xa0810280 // smopa za0.s, p0/M, p0/M, z20.b, z1.b\n"
".inst 0xa0850281 // smopa za1.s, p0/M, p0/M, z20.b, z5.b\n"
".inst 0xa0890282 // smopa za2.s, p0/M, p0/M, z20.b, z9.b\n"
".inst 0xa08d0283 // smopa za3.s, p0/M, p0/M, z20.b, z13.b\n"
"bgt 9b\n"
"10:" // K oddments: End
- "tbz x13, #1, 14f\n"
- "tbz x13, #0, 12f\n"
+ "tbz x11, #1, 14f\n"
+ "tbz x11, #0, 12f\n"
"mov x12, #0x0\n"
- "cntw x20\n"
+ "cntw x19\n"
"11:" // Store to partial result buffer: Store and refill: Loop
- ".inst 0xa040c57c // ld1w { z28.s-z31.s }, pn9.b/Z, [x11]\n"
+ ".inst 0xa040c55c // ld1w { z28.s-z31.s }, pn9.b/Z, [x10]\n"
".inst 0xc0860404 // mova { z4.s-z7.s }, za0h.s[x12]\n"
".inst 0xc0840780 // mova za0h.s[x12], { z28.s-z31.s }\n"
".inst 0xc0860434 // mova { z20.s-z23.s }, za1h.s[x12]\n"
- ".inst 0xa041c560 // ld1w { z0.s-z3.s }, pn9.b/Z, [x11, #0x4, MUL VL]\n"
+ ".inst 0xa041c540 // ld1w { z0.s-z3.s }, pn9.b/Z, [x10, #0x4, MUL VL]\n"
".inst 0xc0840401 // mova za1h.s[x12], { z0.s-z3.s }\n"
".inst 0xc0860458 // mova { z24.s-z27.s }, za2h.s[x12]\n"
".inst 0xc086047c // mova { z28.s-z31.s }, za3h.s[x12]\n"
- ".inst 0xa042c570 // ld1w { z16.s-z19.s }, pn9.b/Z, [x11, #0x8, MUL VL]\n"
+ ".inst 0xa042c550 // ld1w { z16.s-z19.s }, pn9.b/Z, [x10, #0x8, MUL VL]\n"
".inst 0xc0840602 // mova za2h.s[x12], { z16.s-z19.s }\n"
- ".inst 0xa043c56c // ld1w { z12.s-z15.s }, pn9.b/Z, [x11, #0xc, MUL VL]\n"
+ ".inst 0xa043c54c // ld1w { z12.s-z15.s }, pn9.b/Z, [x10, #0xc, MUL VL]\n"
".inst 0xc0840583 // mova za3h.s[x12], { z12.s-z15.s }\n"
"add x12, x12, #0x4\n"
- "cmp x12, x20\n"
- ".inst 0xa060c544 // st1w { z4.s-z7.s }, pn9.b, [x10]\n"
- "addvl x11, x11, #16\n"
- ".inst 0xa061c554 // st1w { z20.s-z23.s }, pn9.b, [x10, #0x4, MUL VL]\n"
- ".inst 0xa062c558 // st1w { z24.s-z27.s }, pn9.b, [x10, #0x8, MUL VL]\n"
- ".inst 0xa063c55c // st1w { z28.s-z31.s }, pn9.b, [x10, #0xc, MUL VL]\n"
+ "cmp x12, x19\n"
+ ".inst 0xa060c524 // st1w { z4.s-z7.s }, pn9.b, [x9]\n"
"addvl x10, x10, #16\n"
+ ".inst 0xa061c534 // st1w { z20.s-z23.s }, pn9.b, [x9, #0x4, MUL VL]\n"
+ ".inst 0xa062c538 // st1w { z24.s-z27.s }, pn9.b, [x9, #0x8, MUL VL]\n"
+ ".inst 0xa063c53c // st1w { z28.s-z31.s }, pn9.b, [x9, #0xc, MUL VL]\n"
+ "addvl x9, x9, #16\n"
"blt 11b\n"
"b 20f\n"
"12:" // Store to partial result buffer: Store only
"mov x12, #0x0\n"
- "cntw x20\n"
+ "cntw x19\n"
"13:" // Store to partial result buffer: Store only: Loop
".inst 0xc0860414 // mova { z20.s-z23.s }, za0h.s[x12]\n"
".inst 0xc0860420 // mova { z0.s-z3.s }, za1h.s[x12]\n"
- ".inst 0xa060c554 // st1w { z20.s-z23.s }, pn9.b, [x10]\n"
+ ".inst 0xa060c534 // st1w { z20.s-z23.s }, pn9.b, [x9]\n"
".inst 0xc0860448 // mova { z8.s-z11.s }, za2h.s[x12]\n"
".inst 0xc086046c // mova { z12.s-z15.s }, za3h.s[x12]\n"
- ".inst 0xa061c540 // st1w { z0.s-z3.s }, pn9.b, [x10, #0x4, MUL VL]\n"
+ ".inst 0xa061c520 // st1w { z0.s-z3.s }, pn9.b, [x9, #0x4, MUL VL]\n"
"add x12, x12, #0x4\n"
- "cmp x12, x20\n"
- ".inst 0xa062c548 // st1w { z8.s-z11.s }, pn9.b, [x10, #0x8, MUL VL]\n"
- ".inst 0xa063c54c // st1w { z12.s-z15.s }, pn9.b, [x10, #0xc, MUL VL]\n"
- "addvl x10, x10, #16\n"
+ "cmp x12, x19\n"
+ ".inst 0xa062c528 // st1w { z8.s-z11.s }, pn9.b, [x9, #0x8, MUL VL]\n"
+ ".inst 0xa063c52c // st1w { z12.s-z15.s }, pn9.b, [x9, #0xc, MUL VL]\n"
+ "addvl x9, x9, #16\n"
"blt 13b\n"
"b 20f\n"
"14:" // Store to output array
- "ldr x23, [%x[args], %[offsetof_C]]\n"
- "sub x21, x9, x28\n"
- "cntw x20\n"
- "ldr x22, [%x[args], %[offsetof_ldcb]]\n"
- "cmp x21, x20\n"
- "csel x20, x21, x20, LT\n"
- "add x23, x23, x27, LSL #2\n" // C += n
- "lsr x21, x20, #0x2\n"
- "madd x23, x28, x22, x23\n" // C += m * ldc
+ "ldr x22, [%x[args], %[offsetof_C]]\n"
+ "sub x20, x28, x27\n"
+ "cntw x19\n"
+ "ldr x21, [%x[args], %[offsetof_ldcb]]\n"
+ "cmp x20, x19\n"
+ "csel x19, x20, x19, LT\n"
+ "add x22, x22, x26, LSL #2\n" // C += n
+ "lsr x20, x19, #0x2\n"
+ "madd x22, x27, x21, x22\n" // C += m * ldc
"mov x12, #0x0\n"
- "and x20, x20, #0x3\n"
- "cbz x21, 16f\n"
+ "and x19, x19, #0x3\n"
+ "cbz x20, 16f\n"
"15:" // Store to output array: Accumulator row 0 loop
".inst 0xc0860400 // mova { z0.s-z3.s }, za0h.s[x12]\n"
".inst 0xc0860424 // mova { z4.s-z7.s }, za1h.s[x12]\n"
".inst 0xc0860448 // mova { z8.s-z11.s }, za2h.s[x12]\n"
".inst 0xc086046c // mova { z12.s-z15.s }, za3h.s[x12]\n"
- ".inst 0xa160c2e0 // st1w { z0.s, z4.s, z8.s, z12.s }, p8, [x23]\n"
- "add x23, x23, x22\n"
- ".inst 0xa160c2e1 // st1w { z1.s, z5.s, z9.s, z13.s }, p8, [x23]\n"
- "add x23, x23, x22\n"
+ ".inst 0xa160c2c0 // st1w { z0.s, z4.s, z8.s, z12.s }, p8, [x22]\n"
+ "add x22, x22, x21\n"
+ ".inst 0xa160c2c1 // st1w { z1.s, z5.s, z9.s, z13.s }, p8, [x22]\n"
+ "add x22, x22, x21\n"
"add x12, x12, #0x4\n"
- ".inst 0xa160c2e2 // st1w { z2.s, z6.s, z10.s, z14.s }, p8, [x23]\n"
- "add x23, x23, x22\n"
- "cmp x12, x21, LSL #2\n"
- ".inst 0xa160c2e3 // st1w { z3.s, z7.s, z11.s, z15.s }, p8, [x23]\n"
- "add x23, x23, x22\n"
+ ".inst 0xa160c2c2 // st1w { z2.s, z6.s, z10.s, z14.s }, p8, [x22]\n"
+ "add x22, x22, x21\n"
+ "cmp x12, x20, LSL #2\n"
+ ".inst 0xa160c2c3 // st1w { z3.s, z7.s, z11.s, z15.s }, p8, [x22]\n"
+ "add x22, x22, x21\n"
"blt 15b\n"
"16:" // Store to output array: Accumulator row 0 oddments
- "cbz x20, 17f\n"
- "subs x20, x20, #0x1\n"
+ "cbz x19, 17f\n"
+ "subs x19, x19, #0x1\n"
".inst 0xc0860400 // mova { z0.s-z3.s }, za0h.s[x12]\n"
".inst 0xc0860424 // mova { z4.s-z7.s }, za1h.s[x12]\n"
".inst 0xc0860448 // mova { z8.s-z11.s }, za2h.s[x12]\n"
".inst 0xc086046c // mova { z12.s-z15.s }, za3h.s[x12]\n"
- ".inst 0xa160c2e0 // st1w { z0.s, z4.s, z8.s, z12.s }, p8, [x23]\n"
- "add x23, x23, x22\n"
+ ".inst 0xa160c2c0 // st1w { z0.s, z4.s, z8.s, z12.s }, p8, [x22]\n"
+ "add x22, x22, x21\n"
"beq 17f\n"
- "subs x20, x20, #0x1\n"
- ".inst 0xa160c2e1 // st1w { z1.s, z5.s, z9.s, z13.s }, p8, [x23]\n"
- "add x23, x23, x22\n"
+ "subs x19, x19, #0x1\n"
+ ".inst 0xa160c2c1 // st1w { z1.s, z5.s, z9.s, z13.s }, p8, [x22]\n"
+ "add x22, x22, x21\n"
"beq 17f\n"
- ".inst 0xa160c2e2 // st1w { z2.s, z6.s, z10.s, z14.s }, p8, [x23]\n"
+ ".inst 0xa160c2c2 // st1w { z2.s, z6.s, z10.s, z14.s }, p8, [x22]\n"
"17:" // Store to output array: Accumulator row 0 oddments: End
"18:" // Store to output array: End
- "tbz x13, #0, 20f\n"
+ "tbz x11, #0, 20f\n"
"mov x12, #0x0\n"
- "cntw x20\n"
+ "cntw x19\n"
"19:" // Store to output array: Refill accumulators: Loop
- ".inst 0xa040c56c // ld1w { z12.s-z15.s }, pn9.b/Z, [x11]\n"
+ ".inst 0xa040c54c // ld1w { z12.s-z15.s }, pn9.b/Z, [x10]\n"
".inst 0xc0840580 // mova za0h.s[x12], { z12.s-z15.s }\n"
- ".inst 0xa041c570 // ld1w { z16.s-z19.s }, pn9.b/Z, [x11, #0x4, MUL VL]\n"
+ ".inst 0xa041c550 // ld1w { z16.s-z19.s }, pn9.b/Z, [x10, #0x4, MUL VL]\n"
".inst 0xc0840601 // mova za1h.s[x12], { z16.s-z19.s }\n"
- ".inst 0xa042c570 // ld1w { z16.s-z19.s }, pn9.b/Z, [x11, #0x8, MUL VL]\n"
+ ".inst 0xa042c550 // ld1w { z16.s-z19.s }, pn9.b/Z, [x10, #0x8, MUL VL]\n"
".inst 0xc0840602 // mova za2h.s[x12], { z16.s-z19.s }\n"
- ".inst 0xa043c564 // ld1w { z4.s-z7.s }, pn9.b/Z, [x11, #0xc, MUL VL]\n"
+ ".inst 0xa043c544 // ld1w { z4.s-z7.s }, pn9.b/Z, [x10, #0xc, MUL VL]\n"
".inst 0xc0840483 // mova za3h.s[x12], { z4.s-z7.s }\n"
"add x12, x12, #0x4\n"
- "cmp x12, x20\n"
- "addvl x11, x11, #16\n"
+ "cmp x12, x19\n"
+ "addvl x10, x10, #16\n"
"blt 19b\n"
"20:" // End block
- "incw x27, ALL, MUL #4\n"
- "cmp x27, x26\n"
+ "incw x26, ALL, MUL #4\n"
+ "cmp x26, x25\n"
"blt 3b\n"
- "incw x28\n"
- "cmp x28, x9\n"
- "mov x27, #0x0\n"
- "mov x25, x24\n"
+ "incw x27\n"
+ "cmp x27, x28\n"
+ "mov x26, #0x0\n"
+ "mov x24, x23\n"
"blt 3b\n"
".inst 0xd503467f // SMSTOP\n"
:
: [args] "r" (&args), [offsetof_A] "I" (offsetof(KernelArgs, A)), [offsetof_B] "I" (offsetof(KernelArgs, B)), [offsetof_C] "I" (offsetof(KernelArgs, C)), [offsetof_K] "I" (offsetof(KernelArgs, K)), [offsetof_M] "I" (offsetof(KernelArgs, M)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_accumulator_buffer] "I" (offsetof(KernelArgs, accumulator_buffer)), [offsetof_bias] "I" (offsetof(KernelArgs, bias)), [offsetof_flags] "I" (offsetof(KernelArgs, flags)), [offsetof_kstride_bytes] "I" (offsetof(KernelArgs, kstride_bytes)), [offsetof_ldcb] "I" (offsetof(KernelArgs, ldcb))
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8s32_mopa_2VLx2VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8s32_mopa_2VLx2VL/generic.cpp
index d7a7528211..9ae18f0e6b 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8s32_mopa_2VLx2VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8s32_mopa_2VLx2VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef __ARM_FEATURE_SVE
#ifdef ARM_COMPUTE_ENABLE_SME2
@@ -84,107 +84,107 @@ void sme2_interleaved_nomerge_s8s32_mopa_2VLx2VL(const int8_t *const A, const in
KernelArgs args(A, B, C, ldc, M, N, K, bias, accumulate, accumulator_buffer);
__asm__ __volatile__(
- "ldr x16, [%x[args], %[offsetof_flags]]\n"
+ "ldr x15, [%x[args], %[offsetof_flags]]\n"
".inst 0xd503477f // SMSTART ZA\n"
"ptrue p0.b\n"
".inst 0x25207811 // ptrue pn9.b\n"
- "ldr x15, [%x[args], %[offsetof_accumulator_buffer]]\n"
"ldr x14, [%x[args], %[offsetof_accumulator_buffer]]\n"
- "tbz x16, #0, 2f\n"
+ "ldr x13, [%x[args], %[offsetof_accumulator_buffer]]\n"
+ "tbz x15, #0, 2f\n"
"mov x12, #0x0\n"
- "cntw x20\n"
+ "cntw x19\n"
"1:" // Initial accumulator load from buffer: Loop
- ".inst 0xa040c5e8 // ld1w { z8.s-z11.s }, pn9.b/Z, [x15]\n"
+ ".inst 0xa040c5c8 // ld1w { z8.s-z11.s }, pn9.b/Z, [x14]\n"
".inst 0xc0840500 // mova za0h.s[x12], { z8.s-z11.s }\n"
- ".inst 0xa041c5f8 // ld1w { z24.s-z27.s }, pn9.b/Z, [x15, #0x4, MUL VL]\n"
+ ".inst 0xa041c5d8 // ld1w { z24.s-z27.s }, pn9.b/Z, [x14, #0x4, MUL VL]\n"
".inst 0xc0840701 // mova za1h.s[x12], { z24.s-z27.s }\n"
- ".inst 0xa042c5fc // ld1w { z28.s-z31.s }, pn9.b/Z, [x15, #0x8, MUL VL]\n"
+ ".inst 0xa042c5dc // ld1w { z28.s-z31.s }, pn9.b/Z, [x14, #0x8, MUL VL]\n"
".inst 0xc0840782 // mova za2h.s[x12], { z28.s-z31.s }\n"
- ".inst 0xa043c5f0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x15, #0xc, MUL VL]\n"
+ ".inst 0xa043c5d0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x14, #0xc, MUL VL]\n"
".inst 0xc0840603 // mova za3h.s[x12], { z16.s-z19.s }\n"
"add x12, x12, #0x4\n"
- "cmp x12, x20\n"
- "addvl x15, x15, #16\n"
+ "cmp x12, x19\n"
+ "addvl x14, x14, #16\n"
"blt 1b\n"
"2:" // Initial accumulator load from buffer: End
- "ldr w13, [%x[args], %[offsetof_M]]\n"
- "mov x11, #0x0\n"
+ "ldr w11, [%x[args], %[offsetof_M]]\n"
"mov x10, #0x0\n"
- "ldr w9, [%x[args], %[offsetof_N]]\n"
- "ldr x28, [%x[args], %[offsetof_A]]\n"
+ "mov x9, #0x0\n"
+ "ldr w28, [%x[args], %[offsetof_N]]\n"
+ "ldr x27, [%x[args], %[offsetof_A]]\n"
"3:" // M and N loop
- "mov x27, x28\n"
- ".inst 0x25a94550 // whilelt pn8.s, x10, x9, VLx2\n"
- "tbnz x16, #0, 4f\n"
- "ldr x20, [%x[args], %[offsetof_bias]]\n"
+ "mov x26, x27\n"
+ ".inst 0x25bc4530 // whilelt pn8.s, x9, x28, VLx2\n"
+ "tbnz x15, #0, 4f\n"
+ "ldr x19, [%x[args], %[offsetof_bias]]\n"
".inst 0xc00800ff // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
- "cbz x20, 5f\n"
- ".inst 0xa10a429c // ldnt1w { z20.s, z28.s }, p8/Z, [x20, x10, LSL #2]\n"
+ "cbz x19, 5f\n"
+ ".inst 0xa109427c // ldnt1w { z20.s, z28.s }, p8/Z, [x19, x9, LSL #2]\n"
".inst 0xc0900280 // addha za0.s, p0/M, p0/M, z20.s\n"
".inst 0xc0900381 // addha za1.s, p0/M, p0/M, z28.s\n"
".inst 0xc0900282 // addha za2.s, p0/M, p0/M, z20.s\n"
".inst 0xc0900383 // addha za3.s, p0/M, p0/M, z28.s\n"
"4:" // Prepare accumulators: Test for last block
+ "mov x19, x9\n"
"mov x20, x10\n"
- "mov x21, x11\n"
+ "incw x19, ALL, MUL #2\n"
"incw x20, ALL, MUL #2\n"
- "incw x21, ALL, MUL #2\n"
- "cmp x20, x9\n"
- "csel x21, x11, x21, LT\n"
- "mov x20, x16\n"
- "bfm x16, XZR, #0x0, #0x0 // bfc x16, #0x0, #0x1\n"
- "cmp x21, x13\n"
- "csel x16, x20, x16, LT\n"
+ "cmp x19, x28\n"
+ "csel x20, x10, x20, LT\n"
+ "mov x19, x15\n"
+ "bfm x15, XZR, #0x0, #0x0 // bfc x15, #0x0, #0x1\n"
+ "cmp x20, x11\n"
+ "csel x15, x19, x15, LT\n"
"5:" // Prepare accumulators: End
- "ldr x20, [%x[args], %[offsetof_K]]\n"
- "add x20, x20, #0x3\n"
- "lsr x20, x20, #0x2\n"
- "ldr x23, [%x[args], %[offsetof_B]]\n"
- "lsr x22, x20, #0x2\n"
- "and x21, x20, #0x3\n"
- "ldr x20, [%x[args], %[offsetof_kstride_bytes]]\n"
- "madd x23, x10, x20, x23\n" // bptr = B + n * kstride_bytes
- "cbz x22, 8f\n"
- "subs x22, x22, #0x1\n"
- ".inst 0xa1400776 // ld1b { z22.b, z30.b }, pn9.b/Z, [x27]\n"
- ".inst 0xa14006f9 // ldnt1b { z17.b, z25.b }, pn9.b/Z, [x23]\n"
- ".inst 0xa1410770 // ld1b { z16.b, z24.b }, pn9.b/Z, [x27, #0x2, MUL VL]\n"
- ".inst 0xa14106eb // ldnt1b { z3.b, z11.b }, pn9.b/Z, [x23, #0x2, MUL VL]\n"
- ".inst 0xa0420768 // ld1b { z8.b-z9.b }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
- ".inst 0xa04206f3 // ldnt1b { z18.b-z19.b }, pn9.b/Z, [x23, #0x4, MUL VL]\n"
- ".inst 0xa0430764 // ld1b { z4.b-z5.b }, pn9.b/Z, [x27, #0x6, MUL VL]\n"
- "addvl x27, x27, #8\n"
- ".inst 0xa14306fd // ldnt1b { z21.b, z29.b }, pn9.b/Z, [x23, #0x6, MUL VL]\n"
- "addvl x23, x23, #8\n"
+ "ldr x19, [%x[args], %[offsetof_K]]\n"
+ "add x19, x19, #0x3\n"
+ "lsr x19, x19, #0x2\n"
+ "ldr x22, [%x[args], %[offsetof_B]]\n"
+ "lsr x21, x19, #0x2\n"
+ "and x20, x19, #0x3\n"
+ "ldr x19, [%x[args], %[offsetof_kstride_bytes]]\n"
+ "madd x22, x9, x19, x22\n" // bptr = B + n * kstride_bytes
+ "cbz x21, 8f\n"
+ "subs x21, x21, #0x1\n"
+ ".inst 0xa1400756 // ld1b { z22.b, z30.b }, pn9.b/Z, [x26]\n"
+ ".inst 0xa14006d9 // ldnt1b { z17.b, z25.b }, pn9.b/Z, [x22]\n"
+ ".inst 0xa1410750 // ld1b { z16.b, z24.b }, pn9.b/Z, [x26, #0x2, MUL VL]\n"
+ ".inst 0xa14106cb // ldnt1b { z3.b, z11.b }, pn9.b/Z, [x22, #0x2, MUL VL]\n"
+ ".inst 0xa0420748 // ld1b { z8.b-z9.b }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa04206d3 // ldnt1b { z18.b-z19.b }, pn9.b/Z, [x22, #0x4, MUL VL]\n"
+ ".inst 0xa0430744 // ld1b { z4.b-z5.b }, pn9.b/Z, [x26, #0x6, MUL VL]\n"
+ "addvl x26, x26, #8\n"
+ ".inst 0xa14306dd // ldnt1b { z21.b, z29.b }, pn9.b/Z, [x22, #0x6, MUL VL]\n"
+ "addvl x22, x22, #8\n"
"ble 7f\n"
"6:" // K loop
".inst 0xa09102c0 // smopa za0.s, p0/M, p0/M, z22.b, z17.b\n"
- "subs x22, x22, #0x1\n"
+ "subs x21, x21, #0x1\n"
".inst 0xa09902c1 // smopa za1.s, p0/M, p0/M, z22.b, z25.b\n"
".inst 0xa09103c2 // smopa za2.s, p0/M, p0/M, z30.b, z17.b\n"
".inst 0xa09903c3 // smopa za3.s, p0/M, p0/M, z30.b, z25.b\n"
- ".inst 0xa1400776 // ld1b { z22.b, z30.b }, pn9.b/Z, [x27]\n"
+ ".inst 0xa1400756 // ld1b { z22.b, z30.b }, pn9.b/Z, [x26]\n"
".inst 0xa0830200 // smopa za0.s, p0/M, p0/M, z16.b, z3.b\n"
- ".inst 0xa14006f9 // ldnt1b { z17.b, z25.b }, pn9.b/Z, [x23]\n"
+ ".inst 0xa14006d9 // ldnt1b { z17.b, z25.b }, pn9.b/Z, [x22]\n"
".inst 0xa08b0201 // smopa za1.s, p0/M, p0/M, z16.b, z11.b\n"
".inst 0xa0830302 // smopa za2.s, p0/M, p0/M, z24.b, z3.b\n"
".inst 0xa08b0303 // smopa za3.s, p0/M, p0/M, z24.b, z11.b\n"
- ".inst 0xa1410770 // ld1b { z16.b, z24.b }, pn9.b/Z, [x27, #0x2, MUL VL]\n"
+ ".inst 0xa1410750 // ld1b { z16.b, z24.b }, pn9.b/Z, [x26, #0x2, MUL VL]\n"
".inst 0xa0920100 // smopa za0.s, p0/M, p0/M, z8.b, z18.b\n"
- ".inst 0xa14106eb // ldnt1b { z3.b, z11.b }, pn9.b/Z, [x23, #0x2, MUL VL]\n"
+ ".inst 0xa14106cb // ldnt1b { z3.b, z11.b }, pn9.b/Z, [x22, #0x2, MUL VL]\n"
".inst 0xa0930101 // smopa za1.s, p0/M, p0/M, z8.b, z19.b\n"
".inst 0xa0920122 // smopa za2.s, p0/M, p0/M, z9.b, z18.b\n"
".inst 0xa0930123 // smopa za3.s, p0/M, p0/M, z9.b, z19.b\n"
- ".inst 0xa0420768 // ld1b { z8.b-z9.b }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
- ".inst 0xa04206f3 // ldnt1b { z18.b-z19.b }, pn9.b/Z, [x23, #0x4, MUL VL]\n"
+ ".inst 0xa0420748 // ld1b { z8.b-z9.b }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa04206d3 // ldnt1b { z18.b-z19.b }, pn9.b/Z, [x22, #0x4, MUL VL]\n"
".inst 0xa0950080 // smopa za0.s, p0/M, p0/M, z4.b, z21.b\n"
".inst 0xa09d0081 // smopa za1.s, p0/M, p0/M, z4.b, z29.b\n"
".inst 0xa09500a2 // smopa za2.s, p0/M, p0/M, z5.b, z21.b\n"
".inst 0xa09d00a3 // smopa za3.s, p0/M, p0/M, z5.b, z29.b\n"
- ".inst 0xa0430764 // ld1b { z4.b-z5.b }, pn9.b/Z, [x27, #0x6, MUL VL]\n"
- "addvl x27, x27, #8\n"
- ".inst 0xa14306fd // ldnt1b { z21.b, z29.b }, pn9.b/Z, [x23, #0x6, MUL VL]\n"
- "addvl x23, x23, #8\n"
+ ".inst 0xa0430744 // ld1b { z4.b-z5.b }, pn9.b/Z, [x26, #0x6, MUL VL]\n"
+ "addvl x26, x26, #8\n"
+ ".inst 0xa14306dd // ldnt1b { z21.b, z29.b }, pn9.b/Z, [x22, #0x6, MUL VL]\n"
+ "addvl x22, x22, #8\n"
"bgt 6b\n"
"7:" // K loop tail
".inst 0xa09102c0 // smopa za0.s, p0/M, p0/M, z22.b, z17.b\n"
@@ -204,171 +204,171 @@ void sme2_interleaved_nomerge_s8s32_mopa_2VLx2VL(const int8_t *const A, const in
".inst 0xa09500a2 // smopa za2.s, p0/M, p0/M, z5.b, z21.b\n"
".inst 0xa09d00a3 // smopa za3.s, p0/M, p0/M, z5.b, z29.b\n"
"8:" // K oddments
- "cbz x21, 10f\n"
+ "cbz x20, 10f\n"
"9:" // K oddments: Loop
- ".inst 0xa1400776 // ld1b { z22.b, z30.b }, pn9.b/Z, [x27]\n"
- "subs x21, x21, #0x1\n"
- "addvl x27, x27, #2\n"
- ".inst 0xa14006f1 // ld1b { z17.b, z25.b }, pn9.b/Z, [x23]\n"
- "addvl x23, x23, #2\n"
+ ".inst 0xa1400756 // ld1b { z22.b, z30.b }, pn9.b/Z, [x26]\n"
+ "subs x20, x20, #0x1\n"
+ "addvl x26, x26, #2\n"
+ ".inst 0xa14006d1 // ld1b { z17.b, z25.b }, pn9.b/Z, [x22]\n"
+ "addvl x22, x22, #2\n"
".inst 0xa09102c0 // smopa za0.s, p0/M, p0/M, z22.b, z17.b\n"
".inst 0xa09902c1 // smopa za1.s, p0/M, p0/M, z22.b, z25.b\n"
".inst 0xa09103c2 // smopa za2.s, p0/M, p0/M, z30.b, z17.b\n"
".inst 0xa09903c3 // smopa za3.s, p0/M, p0/M, z30.b, z25.b\n"
"bgt 9b\n"
"10:" // K oddments: End
- "tbz x16, #1, 14f\n"
- "tbz x16, #0, 12f\n"
+ "tbz x15, #1, 14f\n"
+ "tbz x15, #0, 12f\n"
"mov x12, #0x0\n"
- "cntw x20\n"
+ "cntw x19\n"
"11:" // Store to partial result buffer: Store and refill: Loop
- ".inst 0xa040c5ec // ld1w { z12.s-z15.s }, pn9.b/Z, [x15]\n"
+ ".inst 0xa040c5cc // ld1w { z12.s-z15.s }, pn9.b/Z, [x14]\n"
".inst 0xc0860418 // mova { z24.s-z27.s }, za0h.s[x12]\n"
".inst 0xc0840580 // mova za0h.s[x12], { z12.s-z15.s }\n"
".inst 0xc0860420 // mova { z0.s-z3.s }, za1h.s[x12]\n"
- ".inst 0xa041c5ec // ld1w { z12.s-z15.s }, pn9.b/Z, [x15, #0x4, MUL VL]\n"
+ ".inst 0xa041c5cc // ld1w { z12.s-z15.s }, pn9.b/Z, [x14, #0x4, MUL VL]\n"
".inst 0xc0840581 // mova za1h.s[x12], { z12.s-z15.s }\n"
".inst 0xc086045c // mova { z28.s-z31.s }, za2h.s[x12]\n"
".inst 0xc0860468 // mova { z8.s-z11.s }, za3h.s[x12]\n"
- ".inst 0xa042c5ec // ld1w { z12.s-z15.s }, pn9.b/Z, [x15, #0x8, MUL VL]\n"
+ ".inst 0xa042c5cc // ld1w { z12.s-z15.s }, pn9.b/Z, [x14, #0x8, MUL VL]\n"
".inst 0xc0840582 // mova za2h.s[x12], { z12.s-z15.s }\n"
- ".inst 0xa043c5e4 // ld1w { z4.s-z7.s }, pn9.b/Z, [x15, #0xc, MUL VL]\n"
+ ".inst 0xa043c5c4 // ld1w { z4.s-z7.s }, pn9.b/Z, [x14, #0xc, MUL VL]\n"
".inst 0xc0840483 // mova za3h.s[x12], { z4.s-z7.s }\n"
"add x12, x12, #0x4\n"
- "cmp x12, x20\n"
- ".inst 0xa060c5d8 // st1w { z24.s-z27.s }, pn9.b, [x14]\n"
- "addvl x15, x15, #16\n"
- ".inst 0xa061c5c0 // st1w { z0.s-z3.s }, pn9.b, [x14, #0x4, MUL VL]\n"
- ".inst 0xa062c5dc // st1w { z28.s-z31.s }, pn9.b, [x14, #0x8, MUL VL]\n"
- ".inst 0xa063c5c8 // st1w { z8.s-z11.s }, pn9.b, [x14, #0xc, MUL VL]\n"
+ "cmp x12, x19\n"
+ ".inst 0xa060c5b8 // st1w { z24.s-z27.s }, pn9.b, [x13]\n"
"addvl x14, x14, #16\n"
+ ".inst 0xa061c5a0 // st1w { z0.s-z3.s }, pn9.b, [x13, #0x4, MUL VL]\n"
+ ".inst 0xa062c5bc // st1w { z28.s-z31.s }, pn9.b, [x13, #0x8, MUL VL]\n"
+ ".inst 0xa063c5a8 // st1w { z8.s-z11.s }, pn9.b, [x13, #0xc, MUL VL]\n"
+ "addvl x13, x13, #16\n"
"blt 11b\n"
"b 23f\n"
"12:" // Store to partial result buffer: Store only
"mov x12, #0x0\n"
- "cntw x20\n"
+ "cntw x19\n"
"13:" // Store to partial result buffer: Store only: Loop
".inst 0xc0860400 // mova { z0.s-z3.s }, za0h.s[x12]\n"
".inst 0xc086042c // mova { z12.s-z15.s }, za1h.s[x12]\n"
- ".inst 0xa060c5c0 // st1w { z0.s-z3.s }, pn9.b, [x14]\n"
+ ".inst 0xa060c5a0 // st1w { z0.s-z3.s }, pn9.b, [x13]\n"
".inst 0xc086045c // mova { z28.s-z31.s }, za2h.s[x12]\n"
".inst 0xc0860464 // mova { z4.s-z7.s }, za3h.s[x12]\n"
- ".inst 0xa061c5cc // st1w { z12.s-z15.s }, pn9.b, [x14, #0x4, MUL VL]\n"
+ ".inst 0xa061c5ac // st1w { z12.s-z15.s }, pn9.b, [x13, #0x4, MUL VL]\n"
"add x12, x12, #0x4\n"
- "cmp x12, x20\n"
- ".inst 0xa062c5dc // st1w { z28.s-z31.s }, pn9.b, [x14, #0x8, MUL VL]\n"
- ".inst 0xa063c5c4 // st1w { z4.s-z7.s }, pn9.b, [x14, #0xc, MUL VL]\n"
- "addvl x14, x14, #16\n"
+ "cmp x12, x19\n"
+ ".inst 0xa062c5bc // st1w { z28.s-z31.s }, pn9.b, [x13, #0x8, MUL VL]\n"
+ ".inst 0xa063c5a4 // st1w { z4.s-z7.s }, pn9.b, [x13, #0xc, MUL VL]\n"
+ "addvl x13, x13, #16\n"
"blt 13b\n"
"b 23f\n"
"14:" // Store to output array
- "ldr x26, [%x[args], %[offsetof_C]]\n"
- "sub x25, x13, x11\n"
- "cntw x24\n"
- "ldr x23, [%x[args], %[offsetof_ldcb]]\n"
- "cmp x25, x24\n"
- "csel x22, x25, x24, LT\n"
- "add x26, x26, x10, LSL #2\n" // C += n
- "lsr x21, x22, #0x2\n"
- "madd x26, x11, x23, x26\n" // C += m * ldc
+ "ldr x25, [%x[args], %[offsetof_C]]\n"
+ "sub x24, x11, x10\n"
+ "cntw x23\n"
+ "ldr x22, [%x[args], %[offsetof_ldcb]]\n"
+ "cmp x24, x23\n"
+ "csel x21, x24, x23, LT\n"
+ "add x25, x25, x9, LSL #2\n" // C += n
+ "lsr x20, x21, #0x2\n"
+ "madd x25, x10, x22, x25\n" // C += m * ldc
"mov x12, #0x0\n"
- "and x20, x22, #0x3\n"
- "cbz x21, 16f\n"
+ "and x19, x21, #0x3\n"
+ "cbz x20, 16f\n"
"15:" // Store to output array: Accumulator row 0 loop
".inst 0xc0860410 // mova { z16.s-z19.s }, za0h.s[x12]\n"
".inst 0xc0860438 // mova { z24.s-z27.s }, za1h.s[x12]\n"
- ".inst 0xa1604350 // st1w { z16.s, z24.s }, p8, [x26]\n"
- "add x26, x26, x23\n"
- ".inst 0xa1604351 // st1w { z17.s, z25.s }, p8, [x26]\n"
- "add x26, x26, x23\n"
+ ".inst 0xa1604330 // st1w { z16.s, z24.s }, p8, [x25]\n"
+ "add x25, x25, x22\n"
+ ".inst 0xa1604331 // st1w { z17.s, z25.s }, p8, [x25]\n"
+ "add x25, x25, x22\n"
"add x12, x12, #0x4\n"
- ".inst 0xa1604352 // st1w { z18.s, z26.s }, p8, [x26]\n"
- "add x26, x26, x23\n"
- "cmp x12, x21, LSL #2\n"
- ".inst 0xa1604353 // st1w { z19.s, z27.s }, p8, [x26]\n"
- "add x26, x26, x23\n"
+ ".inst 0xa1604332 // st1w { z18.s, z26.s }, p8, [x25]\n"
+ "add x25, x25, x22\n"
+ "cmp x12, x20, LSL #2\n"
+ ".inst 0xa1604333 // st1w { z19.s, z27.s }, p8, [x25]\n"
+ "add x25, x25, x22\n"
"blt 15b\n"
"16:" // Store to output array: Accumulator row 0 oddments
- "cbz x20, 17f\n"
- "subs x20, x20, #0x1\n"
+ "cbz x19, 17f\n"
+ "subs x19, x19, #0x1\n"
".inst 0xc0860400 // mova { z0.s-z3.s }, za0h.s[x12]\n"
".inst 0xc0860428 // mova { z8.s-z11.s }, za1h.s[x12]\n"
- ".inst 0xa1604340 // st1w { z0.s, z8.s }, p8, [x26]\n"
- "add x26, x26, x23\n"
+ ".inst 0xa1604320 // st1w { z0.s, z8.s }, p8, [x25]\n"
+ "add x25, x25, x22\n"
"beq 17f\n"
- "subs x20, x20, #0x1\n"
- ".inst 0xa1604341 // st1w { z1.s, z9.s }, p8, [x26]\n"
- "add x26, x26, x23\n"
+ "subs x19, x19, #0x1\n"
+ ".inst 0xa1604321 // st1w { z1.s, z9.s }, p8, [x25]\n"
+ "add x25, x25, x22\n"
"beq 17f\n"
- ".inst 0xa1604342 // st1w { z2.s, z10.s }, p8, [x26]\n"
- "add x26, x26, x23\n"
+ ".inst 0xa1604322 // st1w { z2.s, z10.s }, p8, [x25]\n"
+ "add x25, x25, x22\n"
"17:" // Store to output array: Accumulator row 0 oddments: End
- "subs x25, x25, x22\n"
+ "subs x24, x24, x21\n"
"beq 21f\n"
- "cmp x25, x24\n"
- "csel x20, x25, x24, LT\n"
- "lsr x21, x20, #0x2\n"
+ "cmp x24, x23\n"
+ "csel x19, x24, x23, LT\n"
+ "lsr x20, x19, #0x2\n"
"mov x12, #0x0\n"
- "and x20, x20, #0x3\n"
- "cbz x21, 19f\n"
+ "and x19, x19, #0x3\n"
+ "cbz x20, 19f\n"
"18:" // Store to output array: Accumulator row 1 loop
".inst 0xc0860450 // mova { z16.s-z19.s }, za2h.s[x12]\n"
".inst 0xc0860478 // mova { z24.s-z27.s }, za3h.s[x12]\n"
- ".inst 0xa1604350 // st1w { z16.s, z24.s }, p8, [x26]\n"
- "add x26, x26, x23\n"
- ".inst 0xa1604351 // st1w { z17.s, z25.s }, p8, [x26]\n"
- "add x26, x26, x23\n"
+ ".inst 0xa1604330 // st1w { z16.s, z24.s }, p8, [x25]\n"
+ "add x25, x25, x22\n"
+ ".inst 0xa1604331 // st1w { z17.s, z25.s }, p8, [x25]\n"
+ "add x25, x25, x22\n"
"add x12, x12, #0x4\n"
- ".inst 0xa1604352 // st1w { z18.s, z26.s }, p8, [x26]\n"
- "add x26, x26, x23\n"
- "cmp x12, x21, LSL #2\n"
- ".inst 0xa1604353 // st1w { z19.s, z27.s }, p8, [x26]\n"
- "add x26, x26, x23\n"
+ ".inst 0xa1604332 // st1w { z18.s, z26.s }, p8, [x25]\n"
+ "add x25, x25, x22\n"
+ "cmp x12, x20, LSL #2\n"
+ ".inst 0xa1604333 // st1w { z19.s, z27.s }, p8, [x25]\n"
+ "add x25, x25, x22\n"
"blt 18b\n"
"19:" // Store to output array: Accumulator row 1 oddments
- "cbz x20, 20f\n"
- "subs x20, x20, #0x1\n"
+ "cbz x19, 20f\n"
+ "subs x19, x19, #0x1\n"
".inst 0xc0860444 // mova { z4.s-z7.s }, za2h.s[x12]\n"
".inst 0xc086046c // mova { z12.s-z15.s }, za3h.s[x12]\n"
- ".inst 0xa1604344 // st1w { z4.s, z12.s }, p8, [x26]\n"
- "add x26, x26, x23\n"
+ ".inst 0xa1604324 // st1w { z4.s, z12.s }, p8, [x25]\n"
+ "add x25, x25, x22\n"
"beq 20f\n"
- "subs x20, x20, #0x1\n"
- ".inst 0xa1604345 // st1w { z5.s, z13.s }, p8, [x26]\n"
- "add x26, x26, x23\n"
+ "subs x19, x19, #0x1\n"
+ ".inst 0xa1604325 // st1w { z5.s, z13.s }, p8, [x25]\n"
+ "add x25, x25, x22\n"
"beq 20f\n"
- ".inst 0xa1604346 // st1w { z6.s, z14.s }, p8, [x26]\n"
+ ".inst 0xa1604326 // st1w { z6.s, z14.s }, p8, [x25]\n"
"20:" // Store to output array: Accumulator row 1 oddments: End
"21:" // Store to output array: End
- "tbz x16, #0, 23f\n"
+ "tbz x15, #0, 23f\n"
"mov x12, #0x0\n"
- "cntw x20\n"
+ "cntw x19\n"
"22:" // Store to output array: Refill accumulators: Loop
- ".inst 0xa040c5f0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x15]\n"
+ ".inst 0xa040c5d0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x14]\n"
".inst 0xc0840600 // mova za0h.s[x12], { z16.s-z19.s }\n"
- ".inst 0xa041c5f0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x15, #0x4, MUL VL]\n"
+ ".inst 0xa041c5d0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x14, #0x4, MUL VL]\n"
".inst 0xc0840601 // mova za1h.s[x12], { z16.s-z19.s }\n"
- ".inst 0xa042c5e0 // ld1w { z0.s-z3.s }, pn9.b/Z, [x15, #0x8, MUL VL]\n"
+ ".inst 0xa042c5c0 // ld1w { z0.s-z3.s }, pn9.b/Z, [x14, #0x8, MUL VL]\n"
".inst 0xc0840402 // mova za2h.s[x12], { z0.s-z3.s }\n"
- ".inst 0xa043c5ec // ld1w { z12.s-z15.s }, pn9.b/Z, [x15, #0xc, MUL VL]\n"
+ ".inst 0xa043c5cc // ld1w { z12.s-z15.s }, pn9.b/Z, [x14, #0xc, MUL VL]\n"
".inst 0xc0840583 // mova za3h.s[x12], { z12.s-z15.s }\n"
"add x12, x12, #0x4\n"
- "cmp x12, x20\n"
- "addvl x15, x15, #16\n"
+ "cmp x12, x19\n"
+ "addvl x14, x14, #16\n"
"blt 22b\n"
"23:" // End block
- "incw x10, ALL, MUL #2\n"
- "cmp x10, x9\n"
+ "incw x9, ALL, MUL #2\n"
+ "cmp x9, x28\n"
"blt 3b\n"
- "incw x11, ALL, MUL #2\n"
- "cmp x11, x13\n"
- "mov x10, #0x0\n"
- "mov x28, x27\n"
+ "incw x10, ALL, MUL #2\n"
+ "cmp x10, x11\n"
+ "mov x9, #0x0\n"
+ "mov x27, x26\n"
"blt 3b\n"
".inst 0xd503467f // SMSTOP\n"
:
: [args] "r" (&args), [offsetof_A] "I" (offsetof(KernelArgs, A)), [offsetof_B] "I" (offsetof(KernelArgs, B)), [offsetof_C] "I" (offsetof(KernelArgs, C)), [offsetof_K] "I" (offsetof(KernelArgs, K)), [offsetof_M] "I" (offsetof(KernelArgs, M)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_accumulator_buffer] "I" (offsetof(KernelArgs, accumulator_buffer)), [offsetof_bias] "I" (offsetof(KernelArgs, bias)), [offsetof_flags] "I" (offsetof(KernelArgs, flags)), [offsetof_kstride_bytes] "I" (offsetof(KernelArgs, kstride_bytes)), [offsetof_ldcb] "I" (offsetof(KernelArgs, ldcb))
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8s32_mopa_4VLx1VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8s32_mopa_4VLx1VL/generic.cpp
index d863b6c72a..3623f5b6c0 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8s32_mopa_4VLx1VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8s32_mopa_4VLx1VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef __ARM_FEATURE_SVE
#ifdef ARM_COMPUTE_ENABLE_SME2
@@ -84,107 +84,107 @@ void sme2_interleaved_nomerge_s8s32_mopa_4VLx1VL(const int8_t *const A, const in
KernelArgs args(A, B, C, ldc, M, N, K, bias, accumulate, accumulator_buffer);
__asm__ __volatile__(
- "ldr x16, [%x[args], %[offsetof_flags]]\n"
+ "ldr x15, [%x[args], %[offsetof_flags]]\n"
".inst 0xd503477f // SMSTART ZA\n"
"ptrue p1.b\n"
".inst 0x25207810 // ptrue pn8.b\n"
- "ldr x15, [%x[args], %[offsetof_accumulator_buffer]]\n"
"ldr x14, [%x[args], %[offsetof_accumulator_buffer]]\n"
- "tbz x16, #0, 2f\n"
+ "ldr x13, [%x[args], %[offsetof_accumulator_buffer]]\n"
+ "tbz x15, #0, 2f\n"
"mov x12, #0x0\n"
- "cntw x20\n"
+ "cntw x19\n"
"1:" // Initial accumulator load from buffer: Loop
- ".inst 0xa040c1fc // ld1w { z28.s-z31.s }, pn8.b/Z, [x15]\n"
+ ".inst 0xa040c1dc // ld1w { z28.s-z31.s }, pn8.b/Z, [x14]\n"
".inst 0xc0840780 // mova za0h.s[x12], { z28.s-z31.s }\n"
- ".inst 0xa041c1f8 // ld1w { z24.s-z27.s }, pn8.b/Z, [x15, #0x4, MUL VL]\n"
+ ".inst 0xa041c1d8 // ld1w { z24.s-z27.s }, pn8.b/Z, [x14, #0x4, MUL VL]\n"
".inst 0xc0840701 // mova za1h.s[x12], { z24.s-z27.s }\n"
- ".inst 0xa042c1e4 // ld1w { z4.s-z7.s }, pn8.b/Z, [x15, #0x8, MUL VL]\n"
+ ".inst 0xa042c1c4 // ld1w { z4.s-z7.s }, pn8.b/Z, [x14, #0x8, MUL VL]\n"
".inst 0xc0840482 // mova za2h.s[x12], { z4.s-z7.s }\n"
- ".inst 0xa043c1e4 // ld1w { z4.s-z7.s }, pn8.b/Z, [x15, #0xc, MUL VL]\n"
+ ".inst 0xa043c1c4 // ld1w { z4.s-z7.s }, pn8.b/Z, [x14, #0xc, MUL VL]\n"
".inst 0xc0840483 // mova za3h.s[x12], { z4.s-z7.s }\n"
"add x12, x12, #0x4\n"
- "cmp x12, x20\n"
- "addvl x15, x15, #16\n"
+ "cmp x12, x19\n"
+ "addvl x14, x14, #16\n"
"blt 1b\n"
"2:" // Initial accumulator load from buffer: End
- "ldr w13, [%x[args], %[offsetof_M]]\n"
- "mov x11, #0x0\n"
+ "ldr w11, [%x[args], %[offsetof_M]]\n"
"mov x10, #0x0\n"
- "ldr w9, [%x[args], %[offsetof_N]]\n"
- "ldr x28, [%x[args], %[offsetof_A]]\n"
+ "mov x9, #0x0\n"
+ "ldr w28, [%x[args], %[offsetof_N]]\n"
+ "ldr x27, [%x[args], %[offsetof_A]]\n"
"3:" // M and N loop
- "mov x27, x28\n"
- "whilelt p0.s, x10, x9\n"
- "tbnz x16, #0, 4f\n"
- "ldr x20, [%x[args], %[offsetof_bias]]\n"
+ "mov x26, x27\n"
+ "whilelt p0.s, x9, x28\n"
+ "tbnz x15, #0, 4f\n"
+ "ldr x19, [%x[args], %[offsetof_bias]]\n"
".inst 0xc00800ff // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
- "cbz x20, 5f\n"
- "ldnt1w { z15.s }, p0/Z, [x20, x10, LSL #2]\n"
+ "cbz x19, 5f\n"
+ "ldnt1w { z15.s }, p0/Z, [x19, x9, LSL #2]\n"
".inst 0xc09025e0 // addha za0.s, p1/M, p1/M, z15.s\n"
".inst 0xc09025e1 // addha za1.s, p1/M, p1/M, z15.s\n"
".inst 0xc09025e2 // addha za2.s, p1/M, p1/M, z15.s\n"
".inst 0xc09025e3 // addha za3.s, p1/M, p1/M, z15.s\n"
"4:" // Prepare accumulators: Test for last block
+ "mov x19, x9\n"
"mov x20, x10\n"
- "mov x21, x11\n"
- "incw x20\n"
- "incw x21, ALL, MUL #4\n"
- "cmp x20, x9\n"
- "csel x21, x11, x21, LT\n"
- "mov x20, x16\n"
- "bfm x16, XZR, #0x0, #0x0 // bfc x16, #0x0, #0x1\n"
- "cmp x21, x13\n"
- "csel x16, x20, x16, LT\n"
+ "incw x19\n"
+ "incw x20, ALL, MUL #4\n"
+ "cmp x19, x28\n"
+ "csel x20, x10, x20, LT\n"
+ "mov x19, x15\n"
+ "bfm x15, XZR, #0x0, #0x0 // bfc x15, #0x0, #0x1\n"
+ "cmp x20, x11\n"
+ "csel x15, x19, x15, LT\n"
"5:" // Prepare accumulators: End
- "ldr x20, [%x[args], %[offsetof_K]]\n"
- "add x20, x20, #0x3\n"
- "lsr x20, x20, #0x2\n"
- "ldr x23, [%x[args], %[offsetof_B]]\n"
- "lsr x22, x20, #0x2\n"
- "and x21, x20, #0x3\n"
- "ldr x20, [%x[args], %[offsetof_kstride_bytes]]\n"
- "madd x23, x10, x20, x23\n" // bptr = B + n * kstride_bytes
- "cbz x22, 8f\n"
- "subs x22, x22, #0x1\n"
- ".inst 0xa0408370 // ld1b { z16.b-z19.b }, pn8.b/Z, [x27]\n"
- "ldnt1b { z7.b }, p1/Z, [x23]\n"
- ".inst 0xa041837c // ld1b { z28.b-z31.b }, pn8.b/Z, [x27, #0x4, MUL VL]\n"
- "ldnt1b { z13.b }, p1/Z, [x23, #1, MUL VL]\n"
- ".inst 0xa0428360 // ld1b { z0.b-z3.b }, pn8.b/Z, [x27, #0x8, MUL VL]\n"
- "ldnt1b { z12.b }, p1/Z, [x23, #2, MUL VL]\n"
- ".inst 0xa0438378 // ld1b { z24.b-z27.b }, pn8.b/Z, [x27, #0xc, MUL VL]\n"
- "addvl x27, x27, #16\n"
- "ldnt1b { z23.b }, p1/Z, [x23, #3, MUL VL]\n"
- "addvl x23, x23, #4\n"
+ "ldr x19, [%x[args], %[offsetof_K]]\n"
+ "add x19, x19, #0x3\n"
+ "lsr x19, x19, #0x2\n"
+ "ldr x22, [%x[args], %[offsetof_B]]\n"
+ "lsr x21, x19, #0x2\n"
+ "and x20, x19, #0x3\n"
+ "ldr x19, [%x[args], %[offsetof_kstride_bytes]]\n"
+ "madd x22, x9, x19, x22\n" // bptr = B + n * kstride_bytes
+ "cbz x21, 8f\n"
+ "subs x21, x21, #0x1\n"
+ ".inst 0xa0408350 // ld1b { z16.b-z19.b }, pn8.b/Z, [x26]\n"
+ "ldnt1b { z7.b }, p1/Z, [x22]\n"
+ ".inst 0xa041835c // ld1b { z28.b-z31.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
+ "ldnt1b { z13.b }, p1/Z, [x22, #1, MUL VL]\n"
+ ".inst 0xa0428340 // ld1b { z0.b-z3.b }, pn8.b/Z, [x26, #0x8, MUL VL]\n"
+ "ldnt1b { z12.b }, p1/Z, [x22, #2, MUL VL]\n"
+ ".inst 0xa0438358 // ld1b { z24.b-z27.b }, pn8.b/Z, [x26, #0xc, MUL VL]\n"
+ "addvl x26, x26, #16\n"
+ "ldnt1b { z23.b }, p1/Z, [x22, #3, MUL VL]\n"
+ "addvl x22, x22, #4\n"
"ble 7f\n"
"6:" // K loop
".inst 0xa0872600 // smopa za0.s, p1/M, p1/M, z16.b, z7.b\n"
- "subs x22, x22, #0x1\n"
+ "subs x21, x21, #0x1\n"
".inst 0xa0872621 // smopa za1.s, p1/M, p1/M, z17.b, z7.b\n"
".inst 0xa0872642 // smopa za2.s, p1/M, p1/M, z18.b, z7.b\n"
".inst 0xa0872663 // smopa za3.s, p1/M, p1/M, z19.b, z7.b\n"
- ".inst 0xa0408370 // ld1b { z16.b-z19.b }, pn8.b/Z, [x27]\n"
+ ".inst 0xa0408350 // ld1b { z16.b-z19.b }, pn8.b/Z, [x26]\n"
".inst 0xa08d2780 // smopa za0.s, p1/M, p1/M, z28.b, z13.b\n"
- "ldnt1b { z7.b }, p1/Z, [x23]\n"
+ "ldnt1b { z7.b }, p1/Z, [x22]\n"
".inst 0xa08d27a1 // smopa za1.s, p1/M, p1/M, z29.b, z13.b\n"
".inst 0xa08d27c2 // smopa za2.s, p1/M, p1/M, z30.b, z13.b\n"
".inst 0xa08d27e3 // smopa za3.s, p1/M, p1/M, z31.b, z13.b\n"
- ".inst 0xa041837c // ld1b { z28.b-z31.b }, pn8.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xa041835c // ld1b { z28.b-z31.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
".inst 0xa08c2400 // smopa za0.s, p1/M, p1/M, z0.b, z12.b\n"
- "ldnt1b { z13.b }, p1/Z, [x23, #1, MUL VL]\n"
+ "ldnt1b { z13.b }, p1/Z, [x22, #1, MUL VL]\n"
".inst 0xa08c2421 // smopa za1.s, p1/M, p1/M, z1.b, z12.b\n"
".inst 0xa08c2442 // smopa za2.s, p1/M, p1/M, z2.b, z12.b\n"
".inst 0xa08c2463 // smopa za3.s, p1/M, p1/M, z3.b, z12.b\n"
- ".inst 0xa0428360 // ld1b { z0.b-z3.b }, pn8.b/Z, [x27, #0x8, MUL VL]\n"
- "ldnt1b { z12.b }, p1/Z, [x23, #2, MUL VL]\n"
+ ".inst 0xa0428340 // ld1b { z0.b-z3.b }, pn8.b/Z, [x26, #0x8, MUL VL]\n"
+ "ldnt1b { z12.b }, p1/Z, [x22, #2, MUL VL]\n"
".inst 0xa0972700 // smopa za0.s, p1/M, p1/M, z24.b, z23.b\n"
".inst 0xa0972721 // smopa za1.s, p1/M, p1/M, z25.b, z23.b\n"
".inst 0xa0972742 // smopa za2.s, p1/M, p1/M, z26.b, z23.b\n"
".inst 0xa0972763 // smopa za3.s, p1/M, p1/M, z27.b, z23.b\n"
- ".inst 0xa0438378 // ld1b { z24.b-z27.b }, pn8.b/Z, [x27, #0xc, MUL VL]\n"
- "addvl x27, x27, #16\n"
- "ldnt1b { z23.b }, p1/Z, [x23, #3, MUL VL]\n"
- "addvl x23, x23, #4\n"
+ ".inst 0xa0438358 // ld1b { z24.b-z27.b }, pn8.b/Z, [x26, #0xc, MUL VL]\n"
+ "addvl x26, x26, #16\n"
+ "ldnt1b { z23.b }, p1/Z, [x22, #3, MUL VL]\n"
+ "addvl x22, x22, #4\n"
"bgt 6b\n"
"7:" // K loop tail
".inst 0xa0872600 // smopa za0.s, p1/M, p1/M, z16.b, z7.b\n"
@@ -204,237 +204,237 @@ void sme2_interleaved_nomerge_s8s32_mopa_4VLx1VL(const int8_t *const A, const in
".inst 0xa0972742 // smopa za2.s, p1/M, p1/M, z26.b, z23.b\n"
".inst 0xa0972763 // smopa za3.s, p1/M, p1/M, z27.b, z23.b\n"
"8:" // K oddments
- "cbz x21, 10f\n"
+ "cbz x20, 10f\n"
"9:" // K oddments: Loop
- ".inst 0xa0408370 // ld1b { z16.b-z19.b }, pn8.b/Z, [x27]\n"
- "subs x21, x21, #0x1\n"
- "addvl x27, x27, #4\n"
- "ld1b { z7.b }, p1/Z, [x23]\n"
- "addvl x23, x23, #1\n"
+ ".inst 0xa0408350 // ld1b { z16.b-z19.b }, pn8.b/Z, [x26]\n"
+ "subs x20, x20, #0x1\n"
+ "addvl x26, x26, #4\n"
+ "ld1b { z7.b }, p1/Z, [x22]\n"
+ "addvl x22, x22, #1\n"
".inst 0xa0872600 // smopa za0.s, p1/M, p1/M, z16.b, z7.b\n"
".inst 0xa0872621 // smopa za1.s, p1/M, p1/M, z17.b, z7.b\n"
".inst 0xa0872642 // smopa za2.s, p1/M, p1/M, z18.b, z7.b\n"
".inst 0xa0872663 // smopa za3.s, p1/M, p1/M, z19.b, z7.b\n"
"bgt 9b\n"
"10:" // K oddments: End
- "tbz x16, #1, 14f\n"
- "tbz x16, #0, 12f\n"
+ "tbz x15, #1, 14f\n"
+ "tbz x15, #0, 12f\n"
"mov x12, #0x0\n"
- "cntw x20\n"
+ "cntw x19\n"
"11:" // Store to partial result buffer: Store and refill: Loop
- ".inst 0xa040c1f4 // ld1w { z20.s-z23.s }, pn8.b/Z, [x15]\n"
+ ".inst 0xa040c1d4 // ld1w { z20.s-z23.s }, pn8.b/Z, [x14]\n"
".inst 0xc0860400 // mova { z0.s-z3.s }, za0h.s[x12]\n"
".inst 0xc0840680 // mova za0h.s[x12], { z20.s-z23.s }\n"
".inst 0xc0860428 // mova { z8.s-z11.s }, za1h.s[x12]\n"
- ".inst 0xa041c1f8 // ld1w { z24.s-z27.s }, pn8.b/Z, [x15, #0x4, MUL VL]\n"
+ ".inst 0xa041c1d8 // ld1w { z24.s-z27.s }, pn8.b/Z, [x14, #0x4, MUL VL]\n"
".inst 0xc0840701 // mova za1h.s[x12], { z24.s-z27.s }\n"
".inst 0xc086045c // mova { z28.s-z31.s }, za2h.s[x12]\n"
".inst 0xc0860470 // mova { z16.s-z19.s }, za3h.s[x12]\n"
- ".inst 0xa042c1e4 // ld1w { z4.s-z7.s }, pn8.b/Z, [x15, #0x8, MUL VL]\n"
+ ".inst 0xa042c1c4 // ld1w { z4.s-z7.s }, pn8.b/Z, [x14, #0x8, MUL VL]\n"
".inst 0xc0840482 // mova za2h.s[x12], { z4.s-z7.s }\n"
- ".inst 0xa043c1f4 // ld1w { z20.s-z23.s }, pn8.b/Z, [x15, #0xc, MUL VL]\n"
+ ".inst 0xa043c1d4 // ld1w { z20.s-z23.s }, pn8.b/Z, [x14, #0xc, MUL VL]\n"
".inst 0xc0840683 // mova za3h.s[x12], { z20.s-z23.s }\n"
"add x12, x12, #0x4\n"
- "cmp x12, x20\n"
- ".inst 0xa060c1c0 // st1w { z0.s-z3.s }, pn8.b, [x14]\n"
- "addvl x15, x15, #16\n"
- ".inst 0xa061c1c8 // st1w { z8.s-z11.s }, pn8.b, [x14, #0x4, MUL VL]\n"
- ".inst 0xa062c1dc // st1w { z28.s-z31.s }, pn8.b, [x14, #0x8, MUL VL]\n"
- ".inst 0xa063c1d0 // st1w { z16.s-z19.s }, pn8.b, [x14, #0xc, MUL VL]\n"
+ "cmp x12, x19\n"
+ ".inst 0xa060c1a0 // st1w { z0.s-z3.s }, pn8.b, [x13]\n"
"addvl x14, x14, #16\n"
+ ".inst 0xa061c1a8 // st1w { z8.s-z11.s }, pn8.b, [x13, #0x4, MUL VL]\n"
+ ".inst 0xa062c1bc // st1w { z28.s-z31.s }, pn8.b, [x13, #0x8, MUL VL]\n"
+ ".inst 0xa063c1b0 // st1w { z16.s-z19.s }, pn8.b, [x13, #0xc, MUL VL]\n"
+ "addvl x13, x13, #16\n"
"blt 11b\n"
"b 29f\n"
"12:" // Store to partial result buffer: Store only
"mov x12, #0x0\n"
- "cntw x20\n"
+ "cntw x19\n"
"13:" // Store to partial result buffer: Store only: Loop
".inst 0xc0860408 // mova { z8.s-z11.s }, za0h.s[x12]\n"
".inst 0xc0860424 // mova { z4.s-z7.s }, za1h.s[x12]\n"
- ".inst 0xa060c1c8 // st1w { z8.s-z11.s }, pn8.b, [x14]\n"
+ ".inst 0xa060c1a8 // st1w { z8.s-z11.s }, pn8.b, [x13]\n"
".inst 0xc086044c // mova { z12.s-z15.s }, za2h.s[x12]\n"
".inst 0xc0860460 // mova { z0.s-z3.s }, za3h.s[x12]\n"
- ".inst 0xa061c1c4 // st1w { z4.s-z7.s }, pn8.b, [x14, #0x4, MUL VL]\n"
+ ".inst 0xa061c1a4 // st1w { z4.s-z7.s }, pn8.b, [x13, #0x4, MUL VL]\n"
"add x12, x12, #0x4\n"
- "cmp x12, x20\n"
- ".inst 0xa062c1cc // st1w { z12.s-z15.s }, pn8.b, [x14, #0x8, MUL VL]\n"
- ".inst 0xa063c1c0 // st1w { z0.s-z3.s }, pn8.b, [x14, #0xc, MUL VL]\n"
- "addvl x14, x14, #16\n"
+ "cmp x12, x19\n"
+ ".inst 0xa062c1ac // st1w { z12.s-z15.s }, pn8.b, [x13, #0x8, MUL VL]\n"
+ ".inst 0xa063c1a0 // st1w { z0.s-z3.s }, pn8.b, [x13, #0xc, MUL VL]\n"
+ "addvl x13, x13, #16\n"
"blt 13b\n"
"b 29f\n"
"14:" // Store to output array
- "ldr x26, [%x[args], %[offsetof_C]]\n"
- "sub x25, x13, x11\n"
- "cntw x24\n"
- "ldr x23, [%x[args], %[offsetof_ldcb]]\n"
- "cmp x25, x24\n"
- "csel x22, x25, x24, LT\n"
- "add x26, x26, x10, LSL #2\n" // C += n
- "lsr x21, x22, #0x2\n"
- "madd x26, x11, x23, x26\n" // C += m * ldc
+ "ldr x25, [%x[args], %[offsetof_C]]\n"
+ "sub x24, x11, x10\n"
+ "cntw x23\n"
+ "ldr x22, [%x[args], %[offsetof_ldcb]]\n"
+ "cmp x24, x23\n"
+ "csel x21, x24, x23, LT\n"
+ "add x25, x25, x9, LSL #2\n" // C += n
+ "lsr x20, x21, #0x2\n"
+ "madd x25, x10, x22, x25\n" // C += m * ldc
"mov x12, #0x0\n"
- "and x20, x22, #0x3\n"
- "cbz x21, 16f\n"
+ "and x19, x21, #0x3\n"
+ "cbz x20, 16f\n"
"15:" // Store to output array: Accumulator row 0 loop
".inst 0xc086041c // mova { z28.s-z31.s }, za0h.s[x12]\n"
- "st1w { z28.s }, p0, [x26]\n"
- "add x26, x26, x23\n"
- "st1w { z29.s }, p0, [x26]\n"
- "add x26, x26, x23\n"
+ "st1w { z28.s }, p0, [x25]\n"
+ "add x25, x25, x22\n"
+ "st1w { z29.s }, p0, [x25]\n"
+ "add x25, x25, x22\n"
"add x12, x12, #0x4\n"
- "st1w { z30.s }, p0, [x26]\n"
- "add x26, x26, x23\n"
- "cmp x12, x21, LSL #2\n"
- "st1w { z31.s }, p0, [x26]\n"
- "add x26, x26, x23\n"
+ "st1w { z30.s }, p0, [x25]\n"
+ "add x25, x25, x22\n"
+ "cmp x12, x20, LSL #2\n"
+ "st1w { z31.s }, p0, [x25]\n"
+ "add x25, x25, x22\n"
"blt 15b\n"
"16:" // Store to output array: Accumulator row 0 oddments
- "cbz x20, 17f\n"
- "subs x20, x20, #0x1\n"
+ "cbz x19, 17f\n"
+ "subs x19, x19, #0x1\n"
".inst 0xc0860408 // mova { z8.s-z11.s }, za0h.s[x12]\n"
- "st1w { z8.s }, p0, [x26]\n"
- "add x26, x26, x23\n"
+ "st1w { z8.s }, p0, [x25]\n"
+ "add x25, x25, x22\n"
"beq 17f\n"
- "subs x20, x20, #0x1\n"
- "st1w { z9.s }, p0, [x26]\n"
- "add x26, x26, x23\n"
+ "subs x19, x19, #0x1\n"
+ "st1w { z9.s }, p0, [x25]\n"
+ "add x25, x25, x22\n"
"beq 17f\n"
- "st1w { z10.s }, p0, [x26]\n"
- "add x26, x26, x23\n"
+ "st1w { z10.s }, p0, [x25]\n"
+ "add x25, x25, x22\n"
"17:" // Store to output array: Accumulator row 0 oddments: End
- "subs x25, x25, x22\n"
+ "subs x24, x24, x21\n"
"beq 27f\n"
- "cmp x25, x24\n"
- "csel x22, x25, x24, LT\n"
- "lsr x21, x22, #0x2\n"
+ "cmp x24, x23\n"
+ "csel x21, x24, x23, LT\n"
+ "lsr x20, x21, #0x2\n"
"mov x12, #0x0\n"
- "and x20, x22, #0x3\n"
- "cbz x21, 19f\n"
+ "and x19, x21, #0x3\n"
+ "cbz x20, 19f\n"
"18:" // Store to output array: Accumulator row 1 loop
".inst 0xc0860420 // mova { z0.s-z3.s }, za1h.s[x12]\n"
- "st1w { z0.s }, p0, [x26]\n"
- "add x26, x26, x23\n"
- "st1w { z1.s }, p0, [x26]\n"
- "add x26, x26, x23\n"
+ "st1w { z0.s }, p0, [x25]\n"
+ "add x25, x25, x22\n"
+ "st1w { z1.s }, p0, [x25]\n"
+ "add x25, x25, x22\n"
"add x12, x12, #0x4\n"
- "st1w { z2.s }, p0, [x26]\n"
- "add x26, x26, x23\n"
- "cmp x12, x21, LSL #2\n"
- "st1w { z3.s }, p0, [x26]\n"
- "add x26, x26, x23\n"
+ "st1w { z2.s }, p0, [x25]\n"
+ "add x25, x25, x22\n"
+ "cmp x12, x20, LSL #2\n"
+ "st1w { z3.s }, p0, [x25]\n"
+ "add x25, x25, x22\n"
"blt 18b\n"
"19:" // Store to output array: Accumulator row 1 oddments
- "cbz x20, 20f\n"
- "subs x20, x20, #0x1\n"
+ "cbz x19, 20f\n"
+ "subs x19, x19, #0x1\n"
".inst 0xc0860430 // mova { z16.s-z19.s }, za1h.s[x12]\n"
- "st1w { z16.s }, p0, [x26]\n"
- "add x26, x26, x23\n"
+ "st1w { z16.s }, p0, [x25]\n"
+ "add x25, x25, x22\n"
"beq 20f\n"
- "subs x20, x20, #0x1\n"
- "st1w { z17.s }, p0, [x26]\n"
- "add x26, x26, x23\n"
+ "subs x19, x19, #0x1\n"
+ "st1w { z17.s }, p0, [x25]\n"
+ "add x25, x25, x22\n"
"beq 20f\n"
- "st1w { z18.s }, p0, [x26]\n"
- "add x26, x26, x23\n"
+ "st1w { z18.s }, p0, [x25]\n"
+ "add x25, x25, x22\n"
"20:" // Store to output array: Accumulator row 1 oddments: End
- "subs x25, x25, x22\n"
+ "subs x24, x24, x21\n"
"beq 27f\n"
- "cmp x25, x24\n"
- "csel x22, x25, x24, LT\n"
- "lsr x21, x22, #0x2\n"
+ "cmp x24, x23\n"
+ "csel x21, x24, x23, LT\n"
+ "lsr x20, x21, #0x2\n"
"mov x12, #0x0\n"
- "and x20, x22, #0x3\n"
- "cbz x21, 22f\n"
+ "and x19, x21, #0x3\n"
+ "cbz x20, 22f\n"
"21:" // Store to output array: Accumulator row 2 loop
".inst 0xc0860450 // mova { z16.s-z19.s }, za2h.s[x12]\n"
- "st1w { z16.s }, p0, [x26]\n"
- "add x26, x26, x23\n"
- "st1w { z17.s }, p0, [x26]\n"
- "add x26, x26, x23\n"
+ "st1w { z16.s }, p0, [x25]\n"
+ "add x25, x25, x22\n"
+ "st1w { z17.s }, p0, [x25]\n"
+ "add x25, x25, x22\n"
"add x12, x12, #0x4\n"
- "st1w { z18.s }, p0, [x26]\n"
- "add x26, x26, x23\n"
- "cmp x12, x21, LSL #2\n"
- "st1w { z19.s }, p0, [x26]\n"
- "add x26, x26, x23\n"
+ "st1w { z18.s }, p0, [x25]\n"
+ "add x25, x25, x22\n"
+ "cmp x12, x20, LSL #2\n"
+ "st1w { z19.s }, p0, [x25]\n"
+ "add x25, x25, x22\n"
"blt 21b\n"
"22:" // Store to output array: Accumulator row 2 oddments
- "cbz x20, 23f\n"
- "subs x20, x20, #0x1\n"
+ "cbz x19, 23f\n"
+ "subs x19, x19, #0x1\n"
".inst 0xc0860440 // mova { z0.s-z3.s }, za2h.s[x12]\n"
- "st1w { z0.s }, p0, [x26]\n"
- "add x26, x26, x23\n"
+ "st1w { z0.s }, p0, [x25]\n"
+ "add x25, x25, x22\n"
"beq 23f\n"
- "subs x20, x20, #0x1\n"
- "st1w { z1.s }, p0, [x26]\n"
- "add x26, x26, x23\n"
+ "subs x19, x19, #0x1\n"
+ "st1w { z1.s }, p0, [x25]\n"
+ "add x25, x25, x22\n"
"beq 23f\n"
- "st1w { z2.s }, p0, [x26]\n"
- "add x26, x26, x23\n"
+ "st1w { z2.s }, p0, [x25]\n"
+ "add x25, x25, x22\n"
"23:" // Store to output array: Accumulator row 2 oddments: End
- "subs x25, x25, x22\n"
+ "subs x24, x24, x21\n"
"beq 27f\n"
- "cmp x25, x24\n"
- "csel x20, x25, x24, LT\n"
- "lsr x21, x20, #0x2\n"
+ "cmp x24, x23\n"
+ "csel x19, x24, x23, LT\n"
+ "lsr x20, x19, #0x2\n"
"mov x12, #0x0\n"
- "and x20, x20, #0x3\n"
- "cbz x21, 25f\n"
+ "and x19, x19, #0x3\n"
+ "cbz x20, 25f\n"
"24:" // Store to output array: Accumulator row 3 loop
".inst 0xc086046c // mova { z12.s-z15.s }, za3h.s[x12]\n"
- "st1w { z12.s }, p0, [x26]\n"
- "add x26, x26, x23\n"
- "st1w { z13.s }, p0, [x26]\n"
- "add x26, x26, x23\n"
+ "st1w { z12.s }, p0, [x25]\n"
+ "add x25, x25, x22\n"
+ "st1w { z13.s }, p0, [x25]\n"
+ "add x25, x25, x22\n"
"add x12, x12, #0x4\n"
- "st1w { z14.s }, p0, [x26]\n"
- "add x26, x26, x23\n"
- "cmp x12, x21, LSL #2\n"
- "st1w { z15.s }, p0, [x26]\n"
- "add x26, x26, x23\n"
+ "st1w { z14.s }, p0, [x25]\n"
+ "add x25, x25, x22\n"
+ "cmp x12, x20, LSL #2\n"
+ "st1w { z15.s }, p0, [x25]\n"
+ "add x25, x25, x22\n"
"blt 24b\n"
"25:" // Store to output array: Accumulator row 3 oddments
- "cbz x20, 26f\n"
- "subs x20, x20, #0x1\n"
+ "cbz x19, 26f\n"
+ "subs x19, x19, #0x1\n"
".inst 0xc0860470 // mova { z16.s-z19.s }, za3h.s[x12]\n"
- "st1w { z16.s }, p0, [x26]\n"
- "add x26, x26, x23\n"
+ "st1w { z16.s }, p0, [x25]\n"
+ "add x25, x25, x22\n"
"beq 26f\n"
- "subs x20, x20, #0x1\n"
- "st1w { z17.s }, p0, [x26]\n"
- "add x26, x26, x23\n"
+ "subs x19, x19, #0x1\n"
+ "st1w { z17.s }, p0, [x25]\n"
+ "add x25, x25, x22\n"
"beq 26f\n"
- "st1w { z18.s }, p0, [x26]\n"
+ "st1w { z18.s }, p0, [x25]\n"
"26:" // Store to output array: Accumulator row 3 oddments: End
"27:" // Store to output array: End
- "tbz x16, #0, 29f\n"
+ "tbz x15, #0, 29f\n"
"mov x12, #0x0\n"
- "cntw x20\n"
+ "cntw x19\n"
"28:" // Store to output array: Refill accumulators: Loop
- ".inst 0xa040c1f0 // ld1w { z16.s-z19.s }, pn8.b/Z, [x15]\n"
+ ".inst 0xa040c1d0 // ld1w { z16.s-z19.s }, pn8.b/Z, [x14]\n"
".inst 0xc0840600 // mova za0h.s[x12], { z16.s-z19.s }\n"
- ".inst 0xa041c1ec // ld1w { z12.s-z15.s }, pn8.b/Z, [x15, #0x4, MUL VL]\n"
+ ".inst 0xa041c1cc // ld1w { z12.s-z15.s }, pn8.b/Z, [x14, #0x4, MUL VL]\n"
".inst 0xc0840581 // mova za1h.s[x12], { z12.s-z15.s }\n"
- ".inst 0xa042c1f8 // ld1w { z24.s-z27.s }, pn8.b/Z, [x15, #0x8, MUL VL]\n"
+ ".inst 0xa042c1d8 // ld1w { z24.s-z27.s }, pn8.b/Z, [x14, #0x8, MUL VL]\n"
".inst 0xc0840702 // mova za2h.s[x12], { z24.s-z27.s }\n"
- ".inst 0xa043c1e8 // ld1w { z8.s-z11.s }, pn8.b/Z, [x15, #0xc, MUL VL]\n"
+ ".inst 0xa043c1c8 // ld1w { z8.s-z11.s }, pn8.b/Z, [x14, #0xc, MUL VL]\n"
".inst 0xc0840503 // mova za3h.s[x12], { z8.s-z11.s }\n"
"add x12, x12, #0x4\n"
- "cmp x12, x20\n"
- "addvl x15, x15, #16\n"
+ "cmp x12, x19\n"
+ "addvl x14, x14, #16\n"
"blt 28b\n"
"29:" // End block
- "incw x10\n"
- "cmp x10, x9\n"
+ "incw x9\n"
+ "cmp x9, x28\n"
"blt 3b\n"
- "incw x11, ALL, MUL #4\n"
- "cmp x11, x13\n"
- "mov x10, #0x0\n"
- "mov x28, x27\n"
+ "incw x10, ALL, MUL #4\n"
+ "cmp x10, x11\n"
+ "mov x9, #0x0\n"
+ "mov x27, x26\n"
"blt 3b\n"
".inst 0xd503467f // SMSTOP\n"
:
: [args] "r" (&args), [offsetof_A] "I" (offsetof(KernelArgs, A)), [offsetof_B] "I" (offsetof(KernelArgs, B)), [offsetof_C] "I" (offsetof(KernelArgs, C)), [offsetof_K] "I" (offsetof(KernelArgs, K)), [offsetof_M] "I" (offsetof(KernelArgs, M)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_accumulator_buffer] "I" (offsetof(KernelArgs, accumulator_buffer)), [offsetof_bias] "I" (offsetof(KernelArgs, bias)), [offsetof_flags] "I" (offsetof(KernelArgs, flags)), [offsetof_kstride_bytes] "I" (offsetof(KernelArgs, kstride_bytes)), [offsetof_ldcb] "I" (offsetof(KernelArgs, ldcb))
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_u8q_mopa_1VLx4VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_u8q_mopa_1VLx4VL/generic.cpp
index d868ed2b67..100f15c7e0 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_u8q_mopa_1VLx4VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_u8q_mopa_1VLx4VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef __ARM_FEATURE_SVE
#ifdef ARM_COMPUTE_ENABLE_SME2
@@ -90,107 +90,107 @@ void sme2_interleaved_nomerge_u8q_mopa_1VLx4VL(const uint8_t *const A, const uin
KernelArgs args(A, B, C, ldc, M, N, K, bias, rq, n_0, accumulate, accumulator_buffer);
__asm__ __volatile__(
- "ldr x14, [%x[args], %[offsetof_flags]]\n"
+ "ldr x13, [%x[args], %[offsetof_flags]]\n"
".inst 0xd503477f // SMSTART ZA\n"
"ptrue p1.b\n"
".inst 0x25207811 // ptrue pn9.b\n"
- "ldr x13, [%x[args], %[offsetof_accumulator_buffer]]\n"
"ldr x11, [%x[args], %[offsetof_accumulator_buffer]]\n"
- "tbz x14, #0, 2f\n"
+ "ldr x10, [%x[args], %[offsetof_accumulator_buffer]]\n"
+ "tbz x13, #0, 2f\n"
"mov x12, #0x0\n"
- "cntw x20\n"
+ "cntw x19\n"
"1:" // Initial accumulator load from buffer: Loop
- ".inst 0xa040c5ac // ld1w { z12.s-z15.s }, pn9.b/Z, [x13]\n"
+ ".inst 0xa040c56c // ld1w { z12.s-z15.s }, pn9.b/Z, [x11]\n"
".inst 0xc0840580 // mova za0h.s[x12], { z12.s-z15.s }\n"
- ".inst 0xa041c5bc // ld1w { z28.s-z31.s }, pn9.b/Z, [x13, #0x4, MUL VL]\n"
+ ".inst 0xa041c57c // ld1w { z28.s-z31.s }, pn9.b/Z, [x11, #0x4, MUL VL]\n"
".inst 0xc0840781 // mova za1h.s[x12], { z28.s-z31.s }\n"
- ".inst 0xa042c5bc // ld1w { z28.s-z31.s }, pn9.b/Z, [x13, #0x8, MUL VL]\n"
+ ".inst 0xa042c57c // ld1w { z28.s-z31.s }, pn9.b/Z, [x11, #0x8, MUL VL]\n"
".inst 0xc0840782 // mova za2h.s[x12], { z28.s-z31.s }\n"
- ".inst 0xa043c5a4 // ld1w { z4.s-z7.s }, pn9.b/Z, [x13, #0xc, MUL VL]\n"
+ ".inst 0xa043c564 // ld1w { z4.s-z7.s }, pn9.b/Z, [x11, #0xc, MUL VL]\n"
".inst 0xc0840483 // mova za3h.s[x12], { z4.s-z7.s }\n"
"add x12, x12, #0x4\n"
- "cmp x12, x20\n"
- "addvl x13, x13, #16\n"
+ "cmp x12, x19\n"
+ "addvl x11, x11, #16\n"
"blt 1b\n"
"2:" // Initial accumulator load from buffer: End
- "ldr w10, [%x[args], %[offsetof_M]]\n"
- "mov x9, #0x0\n"
+ "ldr w9, [%x[args], %[offsetof_M]]\n"
"mov x28, #0x0\n"
- "ldr w27, [%x[args], %[offsetof_N]]\n"
- "ldr x26, [%x[args], %[offsetof_A]]\n"
+ "mov x27, #0x0\n"
+ "ldr w26, [%x[args], %[offsetof_N]]\n"
+ "ldr x25, [%x[args], %[offsetof_A]]\n"
"3:" // M and N loop
- "mov x25, x26\n"
- ".inst 0x25bb6790 // whilelt pn8.s, x28, x27, VLx4\n"
- "tbnz x14, #0, 4f\n"
- "ldr x20, [%x[args], %[offsetof_bias]]\n"
+ "mov x24, x25\n"
+ ".inst 0x25ba6770 // whilelt pn8.s, x27, x26, VLx4\n"
+ "tbnz x13, #0, 4f\n"
+ "ldr x19, [%x[args], %[offsetof_bias]]\n"
".inst 0xc00800ff // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
- "cbz x20, 5f\n"
- ".inst 0xa01cc299 // ldnt1w { z24.s-z27.s }, p8/Z, [x20, x28, LSL #2]\n"
+ "cbz x19, 5f\n"
+ ".inst 0xa01bc279 // ldnt1w { z24.s-z27.s }, p8/Z, [x19, x27, LSL #2]\n"
".inst 0xc0902700 // addha za0.s, p1/M, p1/M, z24.s\n"
".inst 0xc0902721 // addha za1.s, p1/M, p1/M, z25.s\n"
".inst 0xc0902742 // addha za2.s, p1/M, p1/M, z26.s\n"
".inst 0xc0902763 // addha za3.s, p1/M, p1/M, z27.s\n"
"4:" // Prepare accumulators: Test for last block
+ "mov x19, x27\n"
"mov x20, x28\n"
- "mov x21, x9\n"
- "incw x20, ALL, MUL #4\n"
- "incw x21\n"
- "cmp x20, x27\n"
- "csel x21, x9, x21, LT\n"
- "mov x20, x14\n"
- "bfm x14, XZR, #0x0, #0x0 // bfc x14, #0x0, #0x1\n"
- "cmp x21, x10\n"
- "csel x14, x20, x14, LT\n"
+ "incw x19, ALL, MUL #4\n"
+ "incw x20\n"
+ "cmp x19, x26\n"
+ "csel x20, x28, x20, LT\n"
+ "mov x19, x13\n"
+ "bfm x13, XZR, #0x0, #0x0 // bfc x13, #0x0, #0x1\n"
+ "cmp x20, x9\n"
+ "csel x13, x19, x13, LT\n"
"5:" // Prepare accumulators: End
- "ldr x20, [%x[args], %[offsetof_K]]\n"
- "add x20, x20, #0x3\n"
- "lsr x20, x20, #0x2\n"
- "ldr x23, [%x[args], %[offsetof_B]]\n"
- "lsr x22, x20, #0x2\n"
- "and x21, x20, #0x3\n"
- "ldr x20, [%x[args], %[offsetof_kstride_bytes]]\n"
- "madd x23, x28, x20, x23\n" // bptr = B + n * kstride_bytes
- "cbz x22, 8f\n"
- "subs x22, x22, #0x1\n"
- "ld1b { z10.b }, p1/Z, [x25]\n"
- ".inst 0xa04086fd // ldnt1b { z28.b-z31.b }, pn9.b/Z, [x23]\n"
- "ld1b { z16.b }, p1/Z, [x25, #1, MUL VL]\n"
- ".inst 0xa04186ed // ldnt1b { z12.b-z15.b }, pn9.b/Z, [x23, #0x4, MUL VL]\n"
- "ld1b { z21.b }, p1/Z, [x25, #2, MUL VL]\n"
- ".inst 0xa04286f9 // ldnt1b { z24.b-z27.b }, pn9.b/Z, [x23, #0x8, MUL VL]\n"
- "ld1b { z19.b }, p1/Z, [x25, #3, MUL VL]\n"
- "addvl x25, x25, #4\n"
- ".inst 0xa04386e1 // ldnt1b { z0.b-z3.b }, pn9.b/Z, [x23, #0xc, MUL VL]\n"
- "addvl x23, x23, #16\n"
+ "ldr x19, [%x[args], %[offsetof_K]]\n"
+ "add x19, x19, #0x3\n"
+ "lsr x19, x19, #0x2\n"
+ "ldr x22, [%x[args], %[offsetof_B]]\n"
+ "lsr x21, x19, #0x2\n"
+ "and x20, x19, #0x3\n"
+ "ldr x19, [%x[args], %[offsetof_kstride_bytes]]\n"
+ "madd x22, x27, x19, x22\n" // bptr = B + n * kstride_bytes
+ "cbz x21, 8f\n"
+ "subs x21, x21, #0x1\n"
+ "ld1b { z10.b }, p1/Z, [x24]\n"
+ ".inst 0xa04086dd // ldnt1b { z28.b-z31.b }, pn9.b/Z, [x22]\n"
+ "ld1b { z16.b }, p1/Z, [x24, #1, MUL VL]\n"
+ ".inst 0xa04186cd // ldnt1b { z12.b-z15.b }, pn9.b/Z, [x22, #0x4, MUL VL]\n"
+ "ld1b { z21.b }, p1/Z, [x24, #2, MUL VL]\n"
+ ".inst 0xa04286d9 // ldnt1b { z24.b-z27.b }, pn9.b/Z, [x22, #0x8, MUL VL]\n"
+ "ld1b { z19.b }, p1/Z, [x24, #3, MUL VL]\n"
+ "addvl x24, x24, #4\n"
+ ".inst 0xa04386c1 // ldnt1b { z0.b-z3.b }, pn9.b/Z, [x22, #0xc, MUL VL]\n"
+ "addvl x22, x22, #16\n"
"ble 7f\n"
"6:" // K loop
".inst 0xa1bc2540 // umopa za0.s, p1/M, p1/M, z10.b, z28.b\n"
- "subs x22, x22, #0x1\n"
+ "subs x21, x21, #0x1\n"
".inst 0xa1bd2541 // umopa za1.s, p1/M, p1/M, z10.b, z29.b\n"
".inst 0xa1be2542 // umopa za2.s, p1/M, p1/M, z10.b, z30.b\n"
".inst 0xa1bf2543 // umopa za3.s, p1/M, p1/M, z10.b, z31.b\n"
- "ld1b { z10.b }, p1/Z, [x25]\n"
+ "ld1b { z10.b }, p1/Z, [x24]\n"
".inst 0xa1ac2600 // umopa za0.s, p1/M, p1/M, z16.b, z12.b\n"
- ".inst 0xa04086fd // ldnt1b { z28.b-z31.b }, pn9.b/Z, [x23]\n"
+ ".inst 0xa04086dd // ldnt1b { z28.b-z31.b }, pn9.b/Z, [x22]\n"
".inst 0xa1ad2601 // umopa za1.s, p1/M, p1/M, z16.b, z13.b\n"
".inst 0xa1ae2602 // umopa za2.s, p1/M, p1/M, z16.b, z14.b\n"
".inst 0xa1af2603 // umopa za3.s, p1/M, p1/M, z16.b, z15.b\n"
- "ld1b { z16.b }, p1/Z, [x25, #1, MUL VL]\n"
+ "ld1b { z16.b }, p1/Z, [x24, #1, MUL VL]\n"
".inst 0xa1b826a0 // umopa za0.s, p1/M, p1/M, z21.b, z24.b\n"
- ".inst 0xa04186ed // ldnt1b { z12.b-z15.b }, pn9.b/Z, [x23, #0x4, MUL VL]\n"
+ ".inst 0xa04186cd // ldnt1b { z12.b-z15.b }, pn9.b/Z, [x22, #0x4, MUL VL]\n"
".inst 0xa1b926a1 // umopa za1.s, p1/M, p1/M, z21.b, z25.b\n"
".inst 0xa1ba26a2 // umopa za2.s, p1/M, p1/M, z21.b, z26.b\n"
".inst 0xa1bb26a3 // umopa za3.s, p1/M, p1/M, z21.b, z27.b\n"
- "ld1b { z21.b }, p1/Z, [x25, #2, MUL VL]\n"
- ".inst 0xa04286f9 // ldnt1b { z24.b-z27.b }, pn9.b/Z, [x23, #0x8, MUL VL]\n"
+ "ld1b { z21.b }, p1/Z, [x24, #2, MUL VL]\n"
+ ".inst 0xa04286d9 // ldnt1b { z24.b-z27.b }, pn9.b/Z, [x22, #0x8, MUL VL]\n"
".inst 0xa1a02660 // umopa za0.s, p1/M, p1/M, z19.b, z0.b\n"
".inst 0xa1a12661 // umopa za1.s, p1/M, p1/M, z19.b, z1.b\n"
".inst 0xa1a22662 // umopa za2.s, p1/M, p1/M, z19.b, z2.b\n"
".inst 0xa1a32663 // umopa za3.s, p1/M, p1/M, z19.b, z3.b\n"
- "ld1b { z19.b }, p1/Z, [x25, #3, MUL VL]\n"
- "addvl x25, x25, #4\n"
- ".inst 0xa04386e1 // ldnt1b { z0.b-z3.b }, pn9.b/Z, [x23, #0xc, MUL VL]\n"
- "addvl x23, x23, #16\n"
+ "ld1b { z19.b }, p1/Z, [x24, #3, MUL VL]\n"
+ "addvl x24, x24, #4\n"
+ ".inst 0xa04386c1 // ldnt1b { z0.b-z3.b }, pn9.b/Z, [x22, #0xc, MUL VL]\n"
+ "addvl x22, x22, #16\n"
"bgt 6b\n"
"7:" // K loop tail
".inst 0xa1bc2540 // umopa za0.s, p1/M, p1/M, z10.b, z28.b\n"
@@ -210,76 +210,76 @@ void sme2_interleaved_nomerge_u8q_mopa_1VLx4VL(const uint8_t *const A, const uin
".inst 0xa1a22662 // umopa za2.s, p1/M, p1/M, z19.b, z2.b\n"
".inst 0xa1a32663 // umopa za3.s, p1/M, p1/M, z19.b, z3.b\n"
"8:" // K oddments
- "cbz x21, 10f\n"
+ "cbz x20, 10f\n"
"9:" // K oddments: Loop
- "ld1b { z10.b }, p1/Z, [x25]\n"
- "subs x21, x21, #0x1\n"
- "addvl x25, x25, #1\n"
- ".inst 0xa04086fc // ld1b { z28.b-z31.b }, pn9.b/Z, [x23]\n"
- "addvl x23, x23, #4\n"
+ "ld1b { z10.b }, p1/Z, [x24]\n"
+ "subs x20, x20, #0x1\n"
+ "addvl x24, x24, #1\n"
+ ".inst 0xa04086dc // ld1b { z28.b-z31.b }, pn9.b/Z, [x22]\n"
+ "addvl x22, x22, #4\n"
".inst 0xa1bc2540 // umopa za0.s, p1/M, p1/M, z10.b, z28.b\n"
".inst 0xa1bd2541 // umopa za1.s, p1/M, p1/M, z10.b, z29.b\n"
".inst 0xa1be2542 // umopa za2.s, p1/M, p1/M, z10.b, z30.b\n"
".inst 0xa1bf2543 // umopa za3.s, p1/M, p1/M, z10.b, z31.b\n"
"bgt 9b\n"
"10:" // K oddments: End
- "ld1w { z14.s }, p1/Z, [x25]\n"
- "addvl x25, x25, #1\n"
+ "ld1w { z14.s }, p1/Z, [x24]\n"
+ "addvl x24, x24, #1\n"
".inst 0xc09125c0 // addva za0.s, p1/M, p1/M, z14.s\n"
".inst 0xc09125c1 // addva za1.s, p1/M, p1/M, z14.s\n"
".inst 0xc09125c2 // addva za2.s, p1/M, p1/M, z14.s\n"
".inst 0xc09125c3 // addva za3.s, p1/M, p1/M, z14.s\n"
- "tbz x14, #1, 14f\n"
- "tbz x14, #0, 12f\n"
+ "tbz x13, #1, 14f\n"
+ "tbz x13, #0, 12f\n"
"mov x12, #0x0\n"
- "cntw x20\n"
+ "cntw x19\n"
"11:" // Store to partial result buffer: Store and refill: Loop
- ".inst 0xa040c5b8 // ld1w { z24.s-z27.s }, pn9.b/Z, [x13]\n"
+ ".inst 0xa040c578 // ld1w { z24.s-z27.s }, pn9.b/Z, [x11]\n"
".inst 0xc086041c // mova { z28.s-z31.s }, za0h.s[x12]\n"
".inst 0xc0840700 // mova za0h.s[x12], { z24.s-z27.s }\n"
".inst 0xc0860428 // mova { z8.s-z11.s }, za1h.s[x12]\n"
- ".inst 0xa041c5b0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x13, #0x4, MUL VL]\n"
+ ".inst 0xa041c570 // ld1w { z16.s-z19.s }, pn9.b/Z, [x11, #0x4, MUL VL]\n"
".inst 0xc0840601 // mova za1h.s[x12], { z16.s-z19.s }\n"
".inst 0xc0860458 // mova { z24.s-z27.s }, za2h.s[x12]\n"
".inst 0xc086046c // mova { z12.s-z15.s }, za3h.s[x12]\n"
- ".inst 0xa042c5a4 // ld1w { z4.s-z7.s }, pn9.b/Z, [x13, #0x8, MUL VL]\n"
+ ".inst 0xa042c564 // ld1w { z4.s-z7.s }, pn9.b/Z, [x11, #0x8, MUL VL]\n"
".inst 0xc0840482 // mova za2h.s[x12], { z4.s-z7.s }\n"
- ".inst 0xa043c5a4 // ld1w { z4.s-z7.s }, pn9.b/Z, [x13, #0xc, MUL VL]\n"
+ ".inst 0xa043c564 // ld1w { z4.s-z7.s }, pn9.b/Z, [x11, #0xc, MUL VL]\n"
".inst 0xc0840483 // mova za3h.s[x12], { z4.s-z7.s }\n"
"add x12, x12, #0x4\n"
- "cmp x12, x20\n"
- ".inst 0xa060c57c // st1w { z28.s-z31.s }, pn9.b, [x11]\n"
- "addvl x13, x13, #16\n"
- ".inst 0xa061c568 // st1w { z8.s-z11.s }, pn9.b, [x11, #0x4, MUL VL]\n"
- ".inst 0xa062c578 // st1w { z24.s-z27.s }, pn9.b, [x11, #0x8, MUL VL]\n"
- ".inst 0xa063c56c // st1w { z12.s-z15.s }, pn9.b, [x11, #0xc, MUL VL]\n"
+ "cmp x12, x19\n"
+ ".inst 0xa060c55c // st1w { z28.s-z31.s }, pn9.b, [x10]\n"
"addvl x11, x11, #16\n"
+ ".inst 0xa061c548 // st1w { z8.s-z11.s }, pn9.b, [x10, #0x4, MUL VL]\n"
+ ".inst 0xa062c558 // st1w { z24.s-z27.s }, pn9.b, [x10, #0x8, MUL VL]\n"
+ ".inst 0xa063c54c // st1w { z12.s-z15.s }, pn9.b, [x10, #0xc, MUL VL]\n"
+ "addvl x10, x10, #16\n"
"blt 11b\n"
"b 21f\n"
"12:" // Store to partial result buffer: Store only
"mov x12, #0x0\n"
- "cntw x20\n"
+ "cntw x19\n"
"13:" // Store to partial result buffer: Store only: Loop
".inst 0xc086041c // mova { z28.s-z31.s }, za0h.s[x12]\n"
".inst 0xc0860420 // mova { z0.s-z3.s }, za1h.s[x12]\n"
- ".inst 0xa060c57c // st1w { z28.s-z31.s }, pn9.b, [x11]\n"
+ ".inst 0xa060c55c // st1w { z28.s-z31.s }, pn9.b, [x10]\n"
".inst 0xc0860448 // mova { z8.s-z11.s }, za2h.s[x12]\n"
".inst 0xc0860470 // mova { z16.s-z19.s }, za3h.s[x12]\n"
- ".inst 0xa061c560 // st1w { z0.s-z3.s }, pn9.b, [x11, #0x4, MUL VL]\n"
+ ".inst 0xa061c540 // st1w { z0.s-z3.s }, pn9.b, [x10, #0x4, MUL VL]\n"
"add x12, x12, #0x4\n"
- "cmp x12, x20\n"
- ".inst 0xa062c568 // st1w { z8.s-z11.s }, pn9.b, [x11, #0x8, MUL VL]\n"
- ".inst 0xa063c570 // st1w { z16.s-z19.s }, pn9.b, [x11, #0xc, MUL VL]\n"
- "addvl x11, x11, #16\n"
+ "cmp x12, x19\n"
+ ".inst 0xa062c548 // st1w { z8.s-z11.s }, pn9.b, [x10, #0x8, MUL VL]\n"
+ ".inst 0xa063c550 // st1w { z16.s-z19.s }, pn9.b, [x10, #0xc, MUL VL]\n"
+ "addvl x10, x10, #16\n"
"blt 13b\n"
"b 21f\n"
"14:" // Store to output array
- "ldr x24, [%x[args], %[offsetof_C]]\n"
- "add x24, x24, x28\n" // C += n
- "sub x23, x10, x9\n"
+ "ldr x23, [%x[args], %[offsetof_C]]\n"
+ "add x23, x23, x27\n" // C += n
+ "sub x22, x9, x28\n"
"ld1rw { z12.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_per_layer_mul]]\n"
- "ldr x22, [%x[args], %[offsetof_ldcb]]\n"
- "madd x24, x9, x22, x24\n" // C += m * ldc
+ "ldr x21, [%x[args], %[offsetof_ldcb]]\n"
+ "madd x23, x28, x21, x23\n" // C += m * ldc
"ld1rw { z13.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_per_layer_mul]]\n"
"ld1rw { z14.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_per_layer_mul]]\n"
"ld1rw { z15.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_per_layer_mul]]\n"
@@ -290,24 +290,24 @@ void sme2_interleaved_nomerge_u8q_mopa_1VLx4VL(const uint8_t *const A, const uin
"ld1rw { z1.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_c_offset]]\n"
"ld1rw { z21.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_minval]]\n"
"ld1rw { z20.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_maxval]]\n"
- "tbz x14, #2, 15f\n"
- "ldr w21, [%x[args], %[offsetof_n_0]]\n"
- "add x21, x21, x28\n"
- "ldr x20, [%x[rq], %[offsetof_Requantize32_per_channel_muls]]\n"
- "add x20, x20, x21, LSL #2\n"
- ".inst 0xa040c28c // ld1w { z12.s-z15.s }, p8/Z, [x20]\n"
- "ldr x20, [%x[rq], %[offsetof_Requantize32_per_channel_right_shifts]]\n"
- "add x20, x20, x21, LSL #2\n"
- ".inst 0xa040c284 // ld1w { z4.s-z7.s }, p8/Z, [x20]\n"
+ "tbz x13, #2, 15f\n"
+ "ldr w20, [%x[args], %[offsetof_n_0]]\n"
+ "add x20, x20, x27\n"
+ "ldr x19, [%x[rq], %[offsetof_Requantize32_per_channel_muls]]\n"
+ "add x19, x19, x20, LSL #2\n"
+ ".inst 0xa040c26c // ld1w { z12.s-z15.s }, p8/Z, [x19]\n"
+ "ldr x19, [%x[rq], %[offsetof_Requantize32_per_channel_right_shifts]]\n"
+ "add x19, x19, x20, LSL #2\n"
+ ".inst 0xa040c264 // ld1w { z4.s-z7.s }, p8/Z, [x19]\n"
"15:" // Store to output array: Load per-channel parameters: End
- "cntw x20\n"
- "whilelt p0.b, x28, x27\n"
- "cmp x23, x20\n"
- "csel x20, x23, x20, LT\n"
- "lsr x21, x20, #0x1\n"
+ "cntw x19\n"
+ "whilelt p0.b, x27, x26\n"
+ "cmp x22, x19\n"
+ "csel x19, x22, x19, LT\n"
+ "lsr x20, x19, #0x1\n"
"mov x12, #0x0\n"
- "and x20, x20, #0x1\n"
- "cbz x21, 17f\n"
+ "and x19, x19, #0x1\n"
+ "cbz x20, 17f\n"
"16:" // Store to output array: Accumulator row 0 loop
".inst 0xc086001a // mova { z26.s-z27.s }, za0h.s[x12, 0:1]\n"
".inst 0xc086005c // mova { z28.s-z29.s }, za1h.s[x12, 0:1]\n"
@@ -317,7 +317,7 @@ void sme2_interleaved_nomerge_u8q_mopa_1VLx4VL(const uint8_t *const A, const uin
".inst 0xc1ada41c // sqdmulh { z28.s-z29.s }, { z28.s-z29.s }, z13.s\n"
".inst 0xc1aea416 // sqdmulh { z22.s-z23.s }, { z22.s-z23.s }, z14.s\n"
"add x12, x12, #0x2\n"
- "cmp x12, x21, LSL #1\n"
+ "cmp x12, x20, LSL #1\n"
".inst 0xc1afa410 // sqdmulh { z16.s-z17.s }, { z16.s-z17.s }, z15.s\n"
".inst 0xc1a4a23a // srshl { z26.s-z27.s }, { z26.s-z27.s }, z4.s\n"
".inst 0xc1a5a23c // srshl { z28.s-z29.s }, { z28.s-z29.s }, z5.s\n"
@@ -336,14 +336,14 @@ void sme2_interleaved_nomerge_u8q_mopa_1VLx4VL(const uint8_t *const A, const uin
"uzp1 z18.b, z27.b, z29.b\n"
"uzp1 z17.b, z23.b, z17.b\n"
"uzp1 z16.b, z19.b, z16.b\n"
- "st1b { z16.b }, p0, [x24]\n"
- "add x24, x24, x22\n"
+ "st1b { z16.b }, p0, [x23]\n"
+ "add x23, x23, x21\n"
"uzp1 z16.b, z18.b, z17.b\n"
- "st1b { z16.b }, p0, [x24]\n"
- "add x24, x24, x22\n"
+ "st1b { z16.b }, p0, [x23]\n"
+ "add x23, x23, x21\n"
"blt 16b\n"
"17:" // Store to output array: Accumulator row 0 oddments
- "cbz x20, 18f\n"
+ "cbz x19, 18f\n"
".inst 0xc0860002 // mova { z2.s-z3.s }, za0h.s[x12, 0:1]\n"
".inst 0xc0860058 // mova { z24.s-z25.s }, za1h.s[x12, 0:1]\n"
".inst 0xc1aca402 // sqdmulh { z2.s-z3.s }, { z2.s-z3.s }, z12.s\n"
@@ -367,38 +367,38 @@ void sme2_interleaved_nomerge_u8q_mopa_1VLx4VL(const uint8_t *const A, const uin
".inst 0xc1b4c6aa // sclamp { z10.s-z11.s }, z21.s, z20.s\n"
"uzp1 z16.b, z16.b, z10.b\n"
"uzp1 z16.b, z23.b, z16.b\n"
- "st1b { z16.b }, p0, [x24]\n"
+ "st1b { z16.b }, p0, [x23]\n"
"18:" // Store to output array: Accumulator row 0 oddments: End
"19:" // Store to output array: End
- "tbz x14, #0, 21f\n"
+ "tbz x13, #0, 21f\n"
"mov x12, #0x0\n"
- "cntw x20\n"
+ "cntw x19\n"
"20:" // Store to output array: Refill accumulators: Loop
- ".inst 0xa040c5b0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x13]\n"
+ ".inst 0xa040c570 // ld1w { z16.s-z19.s }, pn9.b/Z, [x11]\n"
".inst 0xc0840600 // mova za0h.s[x12], { z16.s-z19.s }\n"
- ".inst 0xa041c5ac // ld1w { z12.s-z15.s }, pn9.b/Z, [x13, #0x4, MUL VL]\n"
+ ".inst 0xa041c56c // ld1w { z12.s-z15.s }, pn9.b/Z, [x11, #0x4, MUL VL]\n"
".inst 0xc0840581 // mova za1h.s[x12], { z12.s-z15.s }\n"
- ".inst 0xa042c5b0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x13, #0x8, MUL VL]\n"
+ ".inst 0xa042c570 // ld1w { z16.s-z19.s }, pn9.b/Z, [x11, #0x8, MUL VL]\n"
".inst 0xc0840602 // mova za2h.s[x12], { z16.s-z19.s }\n"
- ".inst 0xa043c5ac // ld1w { z12.s-z15.s }, pn9.b/Z, [x13, #0xc, MUL VL]\n"
+ ".inst 0xa043c56c // ld1w { z12.s-z15.s }, pn9.b/Z, [x11, #0xc, MUL VL]\n"
".inst 0xc0840583 // mova za3h.s[x12], { z12.s-z15.s }\n"
"add x12, x12, #0x4\n"
- "cmp x12, x20\n"
- "addvl x13, x13, #16\n"
+ "cmp x12, x19\n"
+ "addvl x11, x11, #16\n"
"blt 20b\n"
"21:" // End block
- "incw x28, ALL, MUL #4\n"
- "cmp x28, x27\n"
+ "incw x27, ALL, MUL #4\n"
+ "cmp x27, x26\n"
"blt 3b\n"
- "incw x9\n"
- "cmp x9, x10\n"
- "mov x28, #0x0\n"
- "mov x26, x25\n"
+ "incw x28\n"
+ "cmp x28, x9\n"
+ "mov x27, #0x0\n"
+ "mov x25, x24\n"
"blt 3b\n"
".inst 0xd503467f // SMSTOP\n"
:
: [args] "r" (&args), [offsetof_A] "I" (offsetof(KernelArgs, A)), [offsetof_B] "I" (offsetof(KernelArgs, B)), [offsetof_C] "I" (offsetof(KernelArgs, C)), [offsetof_K] "I" (offsetof(KernelArgs, K)), [offsetof_M] "I" (offsetof(KernelArgs, M)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_Requantize32_c_offset] "I" (offsetof(Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(Requantize32, minval)), [offsetof_Requantize32_per_channel_muls] "I" (offsetof(Requantize32, per_channel_muls)), [offsetof_Requantize32_per_channel_right_shifts] "I" (offsetof(Requantize32, per_channel_right_shifts)), [offsetof_Requantize32_per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [offsetof_Requantize32_per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [offsetof_accumulator_buffer] "I" (offsetof(KernelArgs, accumulator_buffer)), [offsetof_bias] "I" (offsetof(KernelArgs, bias)), [offsetof_flags] "I" (offsetof(KernelArgs, flags)), [offsetof_kstride_bytes] "I" (offsetof(KernelArgs, kstride_bytes)), [offsetof_ldcb] "I" (offsetof(KernelArgs, ldcb)), [offsetof_n_0] "I" (offsetof(KernelArgs, n_0)), [rq] "r" (&rq)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_u8q_mopa_2VLx2VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_u8q_mopa_2VLx2VL/generic.cpp
index cb0e9521e3..6c42012482 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_u8q_mopa_2VLx2VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_u8q_mopa_2VLx2VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef __ARM_FEATURE_SVE
#ifdef ARM_COMPUTE_ENABLE_SME2
@@ -90,107 +90,107 @@ void sme2_interleaved_nomerge_u8q_mopa_2VLx2VL(const uint8_t *const A, const uin
KernelArgs args(A, B, C, ldc, M, N, K, bias, rq, n_0, accumulate, accumulator_buffer);
__asm__ __volatile__(
- "ldr x16, [%x[args], %[offsetof_flags]]\n"
+ "ldr x15, [%x[args], %[offsetof_flags]]\n"
".inst 0xd503477f // SMSTART ZA\n"
"ptrue p1.b\n"
".inst 0x25207811 // ptrue pn9.b\n"
- "ldr x15, [%x[args], %[offsetof_accumulator_buffer]]\n"
"ldr x14, [%x[args], %[offsetof_accumulator_buffer]]\n"
- "tbz x16, #0, 2f\n"
+ "ldr x13, [%x[args], %[offsetof_accumulator_buffer]]\n"
+ "tbz x15, #0, 2f\n"
"mov x12, #0x0\n"
- "cntw x20\n"
+ "cntw x19\n"
"1:" // Initial accumulator load from buffer: Loop
- ".inst 0xa040c5e0 // ld1w { z0.s-z3.s }, pn9.b/Z, [x15]\n"
+ ".inst 0xa040c5c0 // ld1w { z0.s-z3.s }, pn9.b/Z, [x14]\n"
".inst 0xc0840400 // mova za0h.s[x12], { z0.s-z3.s }\n"
- ".inst 0xa041c5ec // ld1w { z12.s-z15.s }, pn9.b/Z, [x15, #0x4, MUL VL]\n"
+ ".inst 0xa041c5cc // ld1w { z12.s-z15.s }, pn9.b/Z, [x14, #0x4, MUL VL]\n"
".inst 0xc0840581 // mova za1h.s[x12], { z12.s-z15.s }\n"
- ".inst 0xa042c5e0 // ld1w { z0.s-z3.s }, pn9.b/Z, [x15, #0x8, MUL VL]\n"
+ ".inst 0xa042c5c0 // ld1w { z0.s-z3.s }, pn9.b/Z, [x14, #0x8, MUL VL]\n"
".inst 0xc0840402 // mova za2h.s[x12], { z0.s-z3.s }\n"
- ".inst 0xa043c5fc // ld1w { z28.s-z31.s }, pn9.b/Z, [x15, #0xc, MUL VL]\n"
+ ".inst 0xa043c5dc // ld1w { z28.s-z31.s }, pn9.b/Z, [x14, #0xc, MUL VL]\n"
".inst 0xc0840783 // mova za3h.s[x12], { z28.s-z31.s }\n"
"add x12, x12, #0x4\n"
- "cmp x12, x20\n"
- "addvl x15, x15, #16\n"
+ "cmp x12, x19\n"
+ "addvl x14, x14, #16\n"
"blt 1b\n"
"2:" // Initial accumulator load from buffer: End
- "ldr w13, [%x[args], %[offsetof_M]]\n"
- "mov x11, #0x0\n"
+ "ldr w11, [%x[args], %[offsetof_M]]\n"
"mov x10, #0x0\n"
- "ldr w9, [%x[args], %[offsetof_N]]\n"
- "ldr x28, [%x[args], %[offsetof_A]]\n"
+ "mov x9, #0x0\n"
+ "ldr w28, [%x[args], %[offsetof_N]]\n"
+ "ldr x27, [%x[args], %[offsetof_A]]\n"
"3:" // M and N loop
- "mov x27, x28\n"
- ".inst 0x25a94550 // whilelt pn8.s, x10, x9, VLx2\n"
- "tbnz x16, #0, 4f\n"
- "ldr x20, [%x[args], %[offsetof_bias]]\n"
+ "mov x26, x27\n"
+ ".inst 0x25bc4530 // whilelt pn8.s, x9, x28, VLx2\n"
+ "tbnz x15, #0, 4f\n"
+ "ldr x19, [%x[args], %[offsetof_bias]]\n"
".inst 0xc00800ff // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
- "cbz x20, 5f\n"
- ".inst 0xa00a4295 // ldnt1w { z20.s-z21.s }, p8/Z, [x20, x10, LSL #2]\n"
+ "cbz x19, 5f\n"
+ ".inst 0xa0094275 // ldnt1w { z20.s-z21.s }, p8/Z, [x19, x9, LSL #2]\n"
".inst 0xc0902680 // addha za0.s, p1/M, p1/M, z20.s\n"
".inst 0xc09026a1 // addha za1.s, p1/M, p1/M, z21.s\n"
".inst 0xc0902682 // addha za2.s, p1/M, p1/M, z20.s\n"
".inst 0xc09026a3 // addha za3.s, p1/M, p1/M, z21.s\n"
"4:" // Prepare accumulators: Test for last block
+ "mov x19, x9\n"
"mov x20, x10\n"
- "mov x21, x11\n"
+ "incw x19, ALL, MUL #2\n"
"incw x20, ALL, MUL #2\n"
- "incw x21, ALL, MUL #2\n"
- "cmp x20, x9\n"
- "csel x21, x11, x21, LT\n"
- "mov x20, x16\n"
- "bfm x16, XZR, #0x0, #0x0 // bfc x16, #0x0, #0x1\n"
- "cmp x21, x13\n"
- "csel x16, x20, x16, LT\n"
+ "cmp x19, x28\n"
+ "csel x20, x10, x20, LT\n"
+ "mov x19, x15\n"
+ "bfm x15, XZR, #0x0, #0x0 // bfc x15, #0x0, #0x1\n"
+ "cmp x20, x11\n"
+ "csel x15, x19, x15, LT\n"
"5:" // Prepare accumulators: End
- "ldr x20, [%x[args], %[offsetof_K]]\n"
- "add x20, x20, #0x3\n"
- "lsr x20, x20, #0x2\n"
- "ldr x23, [%x[args], %[offsetof_B]]\n"
- "lsr x22, x20, #0x2\n"
- "and x21, x20, #0x3\n"
- "ldr x20, [%x[args], %[offsetof_kstride_bytes]]\n"
- "madd x23, x10, x20, x23\n" // bptr = B + n * kstride_bytes
- "cbz x22, 8f\n"
- "subs x22, x22, #0x1\n"
- ".inst 0xa040077e // ld1b { z30.b-z31.b }, pn9.b/Z, [x27]\n"
- ".inst 0xa04006f1 // ldnt1b { z16.b-z17.b }, pn9.b/Z, [x23]\n"
- ".inst 0xa041076e // ld1b { z14.b-z15.b }, pn9.b/Z, [x27, #0x2, MUL VL]\n"
- ".inst 0xa04106e9 // ldnt1b { z8.b-z9.b }, pn9.b/Z, [x23, #0x2, MUL VL]\n"
- ".inst 0xa0420760 // ld1b { z0.b-z1.b }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
- ".inst 0xa14206fc // ldnt1b { z20.b, z28.b }, pn9.b/Z, [x23, #0x4, MUL VL]\n"
- ".inst 0xa0430764 // ld1b { z4.b-z5.b }, pn9.b/Z, [x27, #0x6, MUL VL]\n"
- "addvl x27, x27, #8\n"
- ".inst 0xa14306ea // ldnt1b { z2.b, z10.b }, pn9.b/Z, [x23, #0x6, MUL VL]\n"
- "addvl x23, x23, #8\n"
+ "ldr x19, [%x[args], %[offsetof_K]]\n"
+ "add x19, x19, #0x3\n"
+ "lsr x19, x19, #0x2\n"
+ "ldr x22, [%x[args], %[offsetof_B]]\n"
+ "lsr x21, x19, #0x2\n"
+ "and x20, x19, #0x3\n"
+ "ldr x19, [%x[args], %[offsetof_kstride_bytes]]\n"
+ "madd x22, x9, x19, x22\n" // bptr = B + n * kstride_bytes
+ "cbz x21, 8f\n"
+ "subs x21, x21, #0x1\n"
+ ".inst 0xa040075e // ld1b { z30.b-z31.b }, pn9.b/Z, [x26]\n"
+ ".inst 0xa04006d1 // ldnt1b { z16.b-z17.b }, pn9.b/Z, [x22]\n"
+ ".inst 0xa041074e // ld1b { z14.b-z15.b }, pn9.b/Z, [x26, #0x2, MUL VL]\n"
+ ".inst 0xa04106c9 // ldnt1b { z8.b-z9.b }, pn9.b/Z, [x22, #0x2, MUL VL]\n"
+ ".inst 0xa0420740 // ld1b { z0.b-z1.b }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa14206dc // ldnt1b { z20.b, z28.b }, pn9.b/Z, [x22, #0x4, MUL VL]\n"
+ ".inst 0xa0430744 // ld1b { z4.b-z5.b }, pn9.b/Z, [x26, #0x6, MUL VL]\n"
+ "addvl x26, x26, #8\n"
+ ".inst 0xa14306ca // ldnt1b { z2.b, z10.b }, pn9.b/Z, [x22, #0x6, MUL VL]\n"
+ "addvl x22, x22, #8\n"
"ble 7f\n"
"6:" // K loop
".inst 0xa1b027c0 // umopa za0.s, p1/M, p1/M, z30.b, z16.b\n"
- "subs x22, x22, #0x1\n"
+ "subs x21, x21, #0x1\n"
".inst 0xa1b127c1 // umopa za1.s, p1/M, p1/M, z30.b, z17.b\n"
".inst 0xa1b027e2 // umopa za2.s, p1/M, p1/M, z31.b, z16.b\n"
".inst 0xa1b127e3 // umopa za3.s, p1/M, p1/M, z31.b, z17.b\n"
- ".inst 0xa040077e // ld1b { z30.b-z31.b }, pn9.b/Z, [x27]\n"
+ ".inst 0xa040075e // ld1b { z30.b-z31.b }, pn9.b/Z, [x26]\n"
".inst 0xa1a825c0 // umopa za0.s, p1/M, p1/M, z14.b, z8.b\n"
- ".inst 0xa04006f1 // ldnt1b { z16.b-z17.b }, pn9.b/Z, [x23]\n"
+ ".inst 0xa04006d1 // ldnt1b { z16.b-z17.b }, pn9.b/Z, [x22]\n"
".inst 0xa1a925c1 // umopa za1.s, p1/M, p1/M, z14.b, z9.b\n"
".inst 0xa1a825e2 // umopa za2.s, p1/M, p1/M, z15.b, z8.b\n"
".inst 0xa1a925e3 // umopa za3.s, p1/M, p1/M, z15.b, z9.b\n"
- ".inst 0xa041076e // ld1b { z14.b-z15.b }, pn9.b/Z, [x27, #0x2, MUL VL]\n"
+ ".inst 0xa041074e // ld1b { z14.b-z15.b }, pn9.b/Z, [x26, #0x2, MUL VL]\n"
".inst 0xa1b42400 // umopa za0.s, p1/M, p1/M, z0.b, z20.b\n"
- ".inst 0xa04106e9 // ldnt1b { z8.b-z9.b }, pn9.b/Z, [x23, #0x2, MUL VL]\n"
+ ".inst 0xa04106c9 // ldnt1b { z8.b-z9.b }, pn9.b/Z, [x22, #0x2, MUL VL]\n"
".inst 0xa1bc2401 // umopa za1.s, p1/M, p1/M, z0.b, z28.b\n"
".inst 0xa1b42422 // umopa za2.s, p1/M, p1/M, z1.b, z20.b\n"
".inst 0xa1bc2423 // umopa za3.s, p1/M, p1/M, z1.b, z28.b\n"
- ".inst 0xa0420760 // ld1b { z0.b-z1.b }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
- ".inst 0xa14206fc // ldnt1b { z20.b, z28.b }, pn9.b/Z, [x23, #0x4, MUL VL]\n"
+ ".inst 0xa0420740 // ld1b { z0.b-z1.b }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa14206dc // ldnt1b { z20.b, z28.b }, pn9.b/Z, [x22, #0x4, MUL VL]\n"
".inst 0xa1a22480 // umopa za0.s, p1/M, p1/M, z4.b, z2.b\n"
".inst 0xa1aa2481 // umopa za1.s, p1/M, p1/M, z4.b, z10.b\n"
".inst 0xa1a224a2 // umopa za2.s, p1/M, p1/M, z5.b, z2.b\n"
".inst 0xa1aa24a3 // umopa za3.s, p1/M, p1/M, z5.b, z10.b\n"
- ".inst 0xa0430764 // ld1b { z4.b-z5.b }, pn9.b/Z, [x27, #0x6, MUL VL]\n"
- "addvl x27, x27, #8\n"
- ".inst 0xa14306ea // ldnt1b { z2.b, z10.b }, pn9.b/Z, [x23, #0x6, MUL VL]\n"
- "addvl x23, x23, #8\n"
+ ".inst 0xa0430744 // ld1b { z4.b-z5.b }, pn9.b/Z, [x26, #0x6, MUL VL]\n"
+ "addvl x26, x26, #8\n"
+ ".inst 0xa14306ca // ldnt1b { z2.b, z10.b }, pn9.b/Z, [x22, #0x6, MUL VL]\n"
+ "addvl x22, x22, #8\n"
"bgt 6b\n"
"7:" // K loop tail
".inst 0xa1b027c0 // umopa za0.s, p1/M, p1/M, z30.b, z16.b\n"
@@ -210,107 +210,107 @@ void sme2_interleaved_nomerge_u8q_mopa_2VLx2VL(const uint8_t *const A, const uin
".inst 0xa1a224a2 // umopa za2.s, p1/M, p1/M, z5.b, z2.b\n"
".inst 0xa1aa24a3 // umopa za3.s, p1/M, p1/M, z5.b, z10.b\n"
"8:" // K oddments
- "cbz x21, 10f\n"
+ "cbz x20, 10f\n"
"9:" // K oddments: Loop
- ".inst 0xa040077e // ld1b { z30.b-z31.b }, pn9.b/Z, [x27]\n"
- "subs x21, x21, #0x1\n"
- "addvl x27, x27, #2\n"
- ".inst 0xa04006f0 // ld1b { z16.b-z17.b }, pn9.b/Z, [x23]\n"
- "addvl x23, x23, #2\n"
+ ".inst 0xa040075e // ld1b { z30.b-z31.b }, pn9.b/Z, [x26]\n"
+ "subs x20, x20, #0x1\n"
+ "addvl x26, x26, #2\n"
+ ".inst 0xa04006d0 // ld1b { z16.b-z17.b }, pn9.b/Z, [x22]\n"
+ "addvl x22, x22, #2\n"
".inst 0xa1b027c0 // umopa za0.s, p1/M, p1/M, z30.b, z16.b\n"
".inst 0xa1b127c1 // umopa za1.s, p1/M, p1/M, z30.b, z17.b\n"
".inst 0xa1b027e2 // umopa za2.s, p1/M, p1/M, z31.b, z16.b\n"
".inst 0xa1b127e3 // umopa za3.s, p1/M, p1/M, z31.b, z17.b\n"
"bgt 9b\n"
"10:" // K oddments: End
- ".inst 0xa040476e // ld1w { z14.s-z15.s }, pn9.b/Z, [x27]\n"
- "addvl x27, x27, #2\n"
+ ".inst 0xa040474e // ld1w { z14.s-z15.s }, pn9.b/Z, [x26]\n"
+ "addvl x26, x26, #2\n"
".inst 0xc09125c0 // addva za0.s, p1/M, p1/M, z14.s\n"
".inst 0xc09125c1 // addva za1.s, p1/M, p1/M, z14.s\n"
".inst 0xc09125e2 // addva za2.s, p1/M, p1/M, z15.s\n"
".inst 0xc09125e3 // addva za3.s, p1/M, p1/M, z15.s\n"
- "tbz x16, #1, 14f\n"
- "tbz x16, #0, 12f\n"
+ "tbz x15, #1, 14f\n"
+ "tbz x15, #0, 12f\n"
"mov x12, #0x0\n"
- "cntw x20\n"
+ "cntw x19\n"
"11:" // Store to partial result buffer: Store and refill: Loop
- ".inst 0xa040c5fc // ld1w { z28.s-z31.s }, pn9.b/Z, [x15]\n"
+ ".inst 0xa040c5dc // ld1w { z28.s-z31.s }, pn9.b/Z, [x14]\n"
".inst 0xc0860408 // mova { z8.s-z11.s }, za0h.s[x12]\n"
".inst 0xc0840780 // mova za0h.s[x12], { z28.s-z31.s }\n"
".inst 0xc0860434 // mova { z20.s-z23.s }, za1h.s[x12]\n"
- ".inst 0xa041c5f8 // ld1w { z24.s-z27.s }, pn9.b/Z, [x15, #0x4, MUL VL]\n"
+ ".inst 0xa041c5d8 // ld1w { z24.s-z27.s }, pn9.b/Z, [x14, #0x4, MUL VL]\n"
".inst 0xc0840701 // mova za1h.s[x12], { z24.s-z27.s }\n"
".inst 0xc086045c // mova { z28.s-z31.s }, za2h.s[x12]\n"
".inst 0xc0860470 // mova { z16.s-z19.s }, za3h.s[x12]\n"
- ".inst 0xa042c5f8 // ld1w { z24.s-z27.s }, pn9.b/Z, [x15, #0x8, MUL VL]\n"
+ ".inst 0xa042c5d8 // ld1w { z24.s-z27.s }, pn9.b/Z, [x14, #0x8, MUL VL]\n"
".inst 0xc0840702 // mova za2h.s[x12], { z24.s-z27.s }\n"
- ".inst 0xa043c5ec // ld1w { z12.s-z15.s }, pn9.b/Z, [x15, #0xc, MUL VL]\n"
+ ".inst 0xa043c5cc // ld1w { z12.s-z15.s }, pn9.b/Z, [x14, #0xc, MUL VL]\n"
".inst 0xc0840583 // mova za3h.s[x12], { z12.s-z15.s }\n"
"add x12, x12, #0x4\n"
- "cmp x12, x20\n"
- ".inst 0xa060c5c8 // st1w { z8.s-z11.s }, pn9.b, [x14]\n"
- "addvl x15, x15, #16\n"
- ".inst 0xa061c5d4 // st1w { z20.s-z23.s }, pn9.b, [x14, #0x4, MUL VL]\n"
- ".inst 0xa062c5dc // st1w { z28.s-z31.s }, pn9.b, [x14, #0x8, MUL VL]\n"
- ".inst 0xa063c5d0 // st1w { z16.s-z19.s }, pn9.b, [x14, #0xc, MUL VL]\n"
+ "cmp x12, x19\n"
+ ".inst 0xa060c5a8 // st1w { z8.s-z11.s }, pn9.b, [x13]\n"
"addvl x14, x14, #16\n"
+ ".inst 0xa061c5b4 // st1w { z20.s-z23.s }, pn9.b, [x13, #0x4, MUL VL]\n"
+ ".inst 0xa062c5bc // st1w { z28.s-z31.s }, pn9.b, [x13, #0x8, MUL VL]\n"
+ ".inst 0xa063c5b0 // st1w { z16.s-z19.s }, pn9.b, [x13, #0xc, MUL VL]\n"
+ "addvl x13, x13, #16\n"
"blt 11b\n"
"b 24f\n"
"12:" // Store to partial result buffer: Store only
"mov x12, #0x0\n"
- "cntw x20\n"
+ "cntw x19\n"
"13:" // Store to partial result buffer: Store only: Loop
".inst 0xc0860410 // mova { z16.s-z19.s }, za0h.s[x12]\n"
".inst 0xc0860424 // mova { z4.s-z7.s }, za1h.s[x12]\n"
- ".inst 0xa060c5d0 // st1w { z16.s-z19.s }, pn9.b, [x14]\n"
+ ".inst 0xa060c5b0 // st1w { z16.s-z19.s }, pn9.b, [x13]\n"
".inst 0xc0860448 // mova { z8.s-z11.s }, za2h.s[x12]\n"
".inst 0xc086046c // mova { z12.s-z15.s }, za3h.s[x12]\n"
- ".inst 0xa061c5c4 // st1w { z4.s-z7.s }, pn9.b, [x14, #0x4, MUL VL]\n"
+ ".inst 0xa061c5a4 // st1w { z4.s-z7.s }, pn9.b, [x13, #0x4, MUL VL]\n"
"add x12, x12, #0x4\n"
- "cmp x12, x20\n"
- ".inst 0xa062c5c8 // st1w { z8.s-z11.s }, pn9.b, [x14, #0x8, MUL VL]\n"
- ".inst 0xa063c5cc // st1w { z12.s-z15.s }, pn9.b, [x14, #0xc, MUL VL]\n"
- "addvl x14, x14, #16\n"
+ "cmp x12, x19\n"
+ ".inst 0xa062c5a8 // st1w { z8.s-z11.s }, pn9.b, [x13, #0x8, MUL VL]\n"
+ ".inst 0xa063c5ac // st1w { z12.s-z15.s }, pn9.b, [x13, #0xc, MUL VL]\n"
+ "addvl x13, x13, #16\n"
"blt 13b\n"
"b 24f\n"
"14:" // Store to output array
- "ldr x26, [%x[args], %[offsetof_C]]\n"
- "add x26, x26, x10\n" // C += n
- "sub x25, x13, x11\n"
+ "ldr x25, [%x[args], %[offsetof_C]]\n"
+ "add x25, x25, x9\n" // C += n
+ "sub x24, x11, x10\n"
"ld1rw { z2.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_per_layer_mul]]\n"
- "ldr x24, [%x[args], %[offsetof_ldcb]]\n"
- "madd x26, x11, x24, x26\n" // C += m * ldc
+ "ldr x23, [%x[args], %[offsetof_ldcb]]\n"
+ "madd x25, x10, x23, x25\n" // C += m * ldc
"ld1rw { z3.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_per_layer_mul]]\n"
"ld1rw { z0.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_per_layer_right_shift]]\n"
"ld1rw { z1.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_per_layer_right_shift]]\n"
"ld1rw { z11.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_c_offset]]\n"
"ld1rw { z25.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_minval]]\n"
"ld1rw { z24.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_maxval]]\n"
- "tbz x16, #2, 15f\n"
- "ldr w21, [%x[args], %[offsetof_n_0]]\n"
- "add x21, x21, x10\n"
- "ldr x20, [%x[rq], %[offsetof_Requantize32_per_channel_muls]]\n"
- "add x20, x20, x21, LSL #2\n"
- ".inst 0xa0404282 // ld1w { z2.s-z3.s }, p8/Z, [x20]\n"
- "ldr x20, [%x[rq], %[offsetof_Requantize32_per_channel_right_shifts]]\n"
- "add x20, x20, x21, LSL #2\n"
- ".inst 0xa0404280 // ld1w { z0.s-z1.s }, p8/Z, [x20]\n"
+ "tbz x15, #2, 15f\n"
+ "ldr w20, [%x[args], %[offsetof_n_0]]\n"
+ "add x20, x20, x9\n"
+ "ldr x19, [%x[rq], %[offsetof_Requantize32_per_channel_muls]]\n"
+ "add x19, x19, x20, LSL #2\n"
+ ".inst 0xa0404262 // ld1w { z2.s-z3.s }, p8/Z, [x19]\n"
+ "ldr x19, [%x[rq], %[offsetof_Requantize32_per_channel_right_shifts]]\n"
+ "add x19, x19, x20, LSL #2\n"
+ ".inst 0xa0404260 // ld1w { z0.s-z1.s }, p8/Z, [x19]\n"
"15:" // Store to output array: Load per-channel parameters: End
- "cntw x23\n"
- "whilelt p0.h, x10, x9\n"
- "cmp x25, x23\n"
- "csel x22, x25, x23, LT\n"
- "lsr x21, x22, #0x2\n"
+ "cntw x22\n"
+ "whilelt p0.h, x9, x28\n"
+ "cmp x24, x22\n"
+ "csel x21, x24, x22, LT\n"
+ "lsr x20, x21, #0x2\n"
"mov x12, #0x0\n"
- "and x20, x22, #0x3\n"
- "cbz x21, 17f\n"
+ "and x19, x21, #0x3\n"
+ "cbz x20, 17f\n"
"16:" // Store to output array: Accumulator row 0 loop
".inst 0xc086040c // mova { z12.s-z15.s }, za0h.s[x12]\n"
".inst 0xc086043c // mova { z28.s-z31.s }, za1h.s[x12]\n"
".inst 0xc1a2ac0c // sqdmulh { z12.s-z15.s }, { z12.s-z15.s }, z2.s\n"
".inst 0xc1a3ac1c // sqdmulh { z28.s-z31.s }, { z28.s-z31.s }, z3.s\n"
"add x12, x12, #0x4\n"
- "cmp x12, x21, LSL #2\n"
+ "cmp x12, x20, LSL #2\n"
".inst 0xc1a0aa2c // srshl { z12.s-z15.s }, { z12.s-z15.s }, z0.s\n"
".inst 0xc1a1aa3c // srshl { z28.s-z31.s }, { z28.s-z31.s }, z1.s\n"
".inst 0xc1abab0c // add { z12.s-z15.s }, { z12.s-z15.s }, z11.s\n"
@@ -318,25 +318,25 @@ void sme2_interleaved_nomerge_u8q_mopa_2VLx2VL(const uint8_t *const A, const uin
".inst 0xc1b8cf2c // sclamp { z12.s-z15.s }, z25.s, z24.s\n"
".inst 0xc1b8cf3c // sclamp { z28.s-z31.s }, z25.s, z24.s\n"
"uzp1 z16.h, z12.h, z28.h\n"
- "st1b { z16.h }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1b { z16.h }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"uzp1 z16.h, z13.h, z29.h\n"
"uzp1 z17.h, z14.h, z30.h\n"
- "st1b { z16.h }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1b { z16.h }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"uzp1 z16.h, z15.h, z31.h\n"
- "st1b { z17.h }, p0, [x26]\n"
- "add x26, x26, x24\n"
- "st1b { z16.h }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1b { z17.h }, p0, [x25]\n"
+ "add x25, x25, x23\n"
+ "st1b { z16.h }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"blt 16b\n"
"17:" // Store to output array: Accumulator row 0 oddments
- "cbz x20, 18f\n"
+ "cbz x19, 18f\n"
".inst 0xc086041c // mova { z28.s-z31.s }, za0h.s[x12]\n"
".inst 0xc086042c // mova { z12.s-z15.s }, za1h.s[x12]\n"
".inst 0xc1a2ac1c // sqdmulh { z28.s-z31.s }, { z28.s-z31.s }, z2.s\n"
".inst 0xc1a3ac0c // sqdmulh { z12.s-z15.s }, { z12.s-z15.s }, z3.s\n"
- "subs x20, x20, #0x1\n"
+ "subs x19, x19, #0x1\n"
".inst 0xc1a0aa3c // srshl { z28.s-z31.s }, { z28.s-z31.s }, z0.s\n"
".inst 0xc1a1aa2c // srshl { z12.s-z15.s }, { z12.s-z15.s }, z1.s\n"
".inst 0xc1abab1c // add { z28.s-z31.s }, { z28.s-z31.s }, z11.s\n"
@@ -344,34 +344,34 @@ void sme2_interleaved_nomerge_u8q_mopa_2VLx2VL(const uint8_t *const A, const uin
".inst 0xc1b8cf3c // sclamp { z28.s-z31.s }, z25.s, z24.s\n"
".inst 0xc1b8cf2c // sclamp { z12.s-z15.s }, z25.s, z24.s\n"
"uzp1 z16.h, z28.h, z12.h\n"
- "st1b { z16.h }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1b { z16.h }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"beq 18f\n"
- "subs x20, x20, #0x1\n"
+ "subs x19, x19, #0x1\n"
"uzp1 z16.h, z29.h, z13.h\n"
- "st1b { z16.h }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1b { z16.h }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"beq 18f\n"
"uzp1 z16.h, z30.h, z14.h\n"
- "st1b { z16.h }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1b { z16.h }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"18:" // Store to output array: Accumulator row 0 oddments: End
- "subs x25, x25, x22\n"
+ "subs x24, x24, x21\n"
"beq 22f\n"
- "whilelt p0.h, x10, x9\n"
- "cmp x25, x23\n"
- "csel x20, x25, x23, LT\n"
- "lsr x21, x20, #0x2\n"
+ "whilelt p0.h, x9, x28\n"
+ "cmp x24, x22\n"
+ "csel x19, x24, x22, LT\n"
+ "lsr x20, x19, #0x2\n"
"mov x12, #0x0\n"
- "and x20, x20, #0x3\n"
- "cbz x21, 20f\n"
+ "and x19, x19, #0x3\n"
+ "cbz x20, 20f\n"
"19:" // Store to output array: Accumulator row 1 loop
".inst 0xc0860444 // mova { z4.s-z7.s }, za2h.s[x12]\n"
".inst 0xc0860470 // mova { z16.s-z19.s }, za3h.s[x12]\n"
".inst 0xc1a2ac04 // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z2.s\n"
".inst 0xc1a3ac10 // sqdmulh { z16.s-z19.s }, { z16.s-z19.s }, z3.s\n"
"add x12, x12, #0x4\n"
- "cmp x12, x21, LSL #2\n"
+ "cmp x12, x20, LSL #2\n"
".inst 0xc1a0aa24 // srshl { z4.s-z7.s }, { z4.s-z7.s }, z0.s\n"
".inst 0xc1a1aa30 // srshl { z16.s-z19.s }, { z16.s-z19.s }, z1.s\n"
".inst 0xc1abab04 // add { z4.s-z7.s }, { z4.s-z7.s }, z11.s\n"
@@ -379,25 +379,25 @@ void sme2_interleaved_nomerge_u8q_mopa_2VLx2VL(const uint8_t *const A, const uin
".inst 0xc1b8cf24 // sclamp { z4.s-z7.s }, z25.s, z24.s\n"
".inst 0xc1b8cf30 // sclamp { z16.s-z19.s }, z25.s, z24.s\n"
"uzp1 z16.h, z4.h, z16.h\n"
- "st1b { z16.h }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1b { z16.h }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"uzp1 z16.h, z5.h, z17.h\n"
"uzp1 z17.h, z6.h, z18.h\n"
- "st1b { z16.h }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1b { z16.h }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"uzp1 z16.h, z7.h, z19.h\n"
- "st1b { z17.h }, p0, [x26]\n"
- "add x26, x26, x24\n"
- "st1b { z16.h }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1b { z17.h }, p0, [x25]\n"
+ "add x25, x25, x23\n"
+ "st1b { z16.h }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"blt 19b\n"
"20:" // Store to output array: Accumulator row 1 oddments
- "cbz x20, 21f\n"
+ "cbz x19, 21f\n"
".inst 0xc0860454 // mova { z20.s-z23.s }, za2h.s[x12]\n"
".inst 0xc0860470 // mova { z16.s-z19.s }, za3h.s[x12]\n"
".inst 0xc1a2ac14 // sqdmulh { z20.s-z23.s }, { z20.s-z23.s }, z2.s\n"
".inst 0xc1a3ac10 // sqdmulh { z16.s-z19.s }, { z16.s-z19.s }, z3.s\n"
- "subs x20, x20, #0x1\n"
+ "subs x19, x19, #0x1\n"
".inst 0xc1a0aa34 // srshl { z20.s-z23.s }, { z20.s-z23.s }, z0.s\n"
".inst 0xc1a1aa30 // srshl { z16.s-z19.s }, { z16.s-z19.s }, z1.s\n"
".inst 0xc1abab14 // add { z20.s-z23.s }, { z20.s-z23.s }, z11.s\n"
@@ -405,47 +405,47 @@ void sme2_interleaved_nomerge_u8q_mopa_2VLx2VL(const uint8_t *const A, const uin
".inst 0xc1b8cf34 // sclamp { z20.s-z23.s }, z25.s, z24.s\n"
".inst 0xc1b8cf30 // sclamp { z16.s-z19.s }, z25.s, z24.s\n"
"uzp1 z16.h, z20.h, z16.h\n"
- "st1b { z16.h }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1b { z16.h }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"beq 21f\n"
- "subs x20, x20, #0x1\n"
+ "subs x19, x19, #0x1\n"
"uzp1 z16.h, z21.h, z17.h\n"
- "st1b { z16.h }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1b { z16.h }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"beq 21f\n"
"uzp1 z16.h, z22.h, z18.h\n"
- "st1b { z16.h }, p0, [x26]\n"
+ "st1b { z16.h }, p0, [x25]\n"
"21:" // Store to output array: Accumulator row 1 oddments: End
"22:" // Store to output array: End
- "tbz x16, #0, 24f\n"
+ "tbz x15, #0, 24f\n"
"mov x12, #0x0\n"
- "cntw x20\n"
+ "cntw x19\n"
"23:" // Store to output array: Refill accumulators: Loop
- ".inst 0xa040c5f0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x15]\n"
+ ".inst 0xa040c5d0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x14]\n"
".inst 0xc0840600 // mova za0h.s[x12], { z16.s-z19.s }\n"
- ".inst 0xa041c5f0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x15, #0x4, MUL VL]\n"
+ ".inst 0xa041c5d0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x14, #0x4, MUL VL]\n"
".inst 0xc0840601 // mova za1h.s[x12], { z16.s-z19.s }\n"
- ".inst 0xa042c5f0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x15, #0x8, MUL VL]\n"
+ ".inst 0xa042c5d0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x14, #0x8, MUL VL]\n"
".inst 0xc0840602 // mova za2h.s[x12], { z16.s-z19.s }\n"
- ".inst 0xa043c5e4 // ld1w { z4.s-z7.s }, pn9.b/Z, [x15, #0xc, MUL VL]\n"
+ ".inst 0xa043c5c4 // ld1w { z4.s-z7.s }, pn9.b/Z, [x14, #0xc, MUL VL]\n"
".inst 0xc0840483 // mova za3h.s[x12], { z4.s-z7.s }\n"
"add x12, x12, #0x4\n"
- "cmp x12, x20\n"
- "addvl x15, x15, #16\n"
+ "cmp x12, x19\n"
+ "addvl x14, x14, #16\n"
"blt 23b\n"
"24:" // End block
- "incw x10, ALL, MUL #2\n"
- "cmp x10, x9\n"
+ "incw x9, ALL, MUL #2\n"
+ "cmp x9, x28\n"
"blt 3b\n"
- "incw x11, ALL, MUL #2\n"
- "cmp x11, x13\n"
- "mov x10, #0x0\n"
- "mov x28, x27\n"
+ "incw x10, ALL, MUL #2\n"
+ "cmp x10, x11\n"
+ "mov x9, #0x0\n"
+ "mov x27, x26\n"
"blt 3b\n"
".inst 0xd503467f // SMSTOP\n"
:
: [args] "r" (&args), [offsetof_A] "I" (offsetof(KernelArgs, A)), [offsetof_B] "I" (offsetof(KernelArgs, B)), [offsetof_C] "I" (offsetof(KernelArgs, C)), [offsetof_K] "I" (offsetof(KernelArgs, K)), [offsetof_M] "I" (offsetof(KernelArgs, M)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_Requantize32_c_offset] "I" (offsetof(Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(Requantize32, minval)), [offsetof_Requantize32_per_channel_muls] "I" (offsetof(Requantize32, per_channel_muls)), [offsetof_Requantize32_per_channel_right_shifts] "I" (offsetof(Requantize32, per_channel_right_shifts)), [offsetof_Requantize32_per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [offsetof_Requantize32_per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [offsetof_accumulator_buffer] "I" (offsetof(KernelArgs, accumulator_buffer)), [offsetof_bias] "I" (offsetof(KernelArgs, bias)), [offsetof_flags] "I" (offsetof(KernelArgs, flags)), [offsetof_kstride_bytes] "I" (offsetof(KernelArgs, kstride_bytes)), [offsetof_ldcb] "I" (offsetof(KernelArgs, ldcb)), [offsetof_n_0] "I" (offsetof(KernelArgs, n_0)), [rq] "r" (&rq)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_u8q_mopa_4VLx1VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_u8q_mopa_4VLx1VL/generic.cpp
index 8f8886b876..40d2fff8c2 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_u8q_mopa_4VLx1VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_u8q_mopa_4VLx1VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef __ARM_FEATURE_SVE
#ifdef ARM_COMPUTE_ENABLE_SME2
@@ -90,107 +90,107 @@ void sme2_interleaved_nomerge_u8q_mopa_4VLx1VL(const uint8_t *const A, const uin
KernelArgs args(A, B, C, ldc, M, N, K, bias, rq, n_0, accumulate, accumulator_buffer);
__asm__ __volatile__(
- "ldr x16, [%x[args], %[offsetof_flags]]\n"
+ "ldr x15, [%x[args], %[offsetof_flags]]\n"
".inst 0xd503477f // SMSTART ZA\n"
"ptrue p1.b\n"
".inst 0x25207810 // ptrue pn8.b\n"
- "ldr x15, [%x[args], %[offsetof_accumulator_buffer]]\n"
"ldr x14, [%x[args], %[offsetof_accumulator_buffer]]\n"
- "tbz x16, #0, 2f\n"
+ "ldr x13, [%x[args], %[offsetof_accumulator_buffer]]\n"
+ "tbz x15, #0, 2f\n"
"mov x12, #0x0\n"
- "cntw x20\n"
+ "cntw x19\n"
"1:" // Initial accumulator load from buffer: Loop
- ".inst 0xa040c1fc // ld1w { z28.s-z31.s }, pn8.b/Z, [x15]\n"
+ ".inst 0xa040c1dc // ld1w { z28.s-z31.s }, pn8.b/Z, [x14]\n"
".inst 0xc0840780 // mova za0h.s[x12], { z28.s-z31.s }\n"
- ".inst 0xa041c1ec // ld1w { z12.s-z15.s }, pn8.b/Z, [x15, #0x4, MUL VL]\n"
+ ".inst 0xa041c1cc // ld1w { z12.s-z15.s }, pn8.b/Z, [x14, #0x4, MUL VL]\n"
".inst 0xc0840581 // mova za1h.s[x12], { z12.s-z15.s }\n"
- ".inst 0xa042c1f4 // ld1w { z20.s-z23.s }, pn8.b/Z, [x15, #0x8, MUL VL]\n"
+ ".inst 0xa042c1d4 // ld1w { z20.s-z23.s }, pn8.b/Z, [x14, #0x8, MUL VL]\n"
".inst 0xc0840682 // mova za2h.s[x12], { z20.s-z23.s }\n"
- ".inst 0xa043c1f8 // ld1w { z24.s-z27.s }, pn8.b/Z, [x15, #0xc, MUL VL]\n"
+ ".inst 0xa043c1d8 // ld1w { z24.s-z27.s }, pn8.b/Z, [x14, #0xc, MUL VL]\n"
".inst 0xc0840703 // mova za3h.s[x12], { z24.s-z27.s }\n"
"add x12, x12, #0x4\n"
- "cmp x12, x20\n"
- "addvl x15, x15, #16\n"
+ "cmp x12, x19\n"
+ "addvl x14, x14, #16\n"
"blt 1b\n"
"2:" // Initial accumulator load from buffer: End
- "ldr w13, [%x[args], %[offsetof_M]]\n"
- "mov x11, #0x0\n"
+ "ldr w11, [%x[args], %[offsetof_M]]\n"
"mov x10, #0x0\n"
- "ldr w9, [%x[args], %[offsetof_N]]\n"
- "ldr x28, [%x[args], %[offsetof_A]]\n"
+ "mov x9, #0x0\n"
+ "ldr w28, [%x[args], %[offsetof_N]]\n"
+ "ldr x27, [%x[args], %[offsetof_A]]\n"
"3:" // M and N loop
- "mov x27, x28\n"
- "whilelt p0.s, x10, x9\n"
- "tbnz x16, #0, 4f\n"
- "ldr x20, [%x[args], %[offsetof_bias]]\n"
+ "mov x26, x27\n"
+ "whilelt p0.s, x9, x28\n"
+ "tbnz x15, #0, 4f\n"
+ "ldr x19, [%x[args], %[offsetof_bias]]\n"
".inst 0xc00800ff // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
- "cbz x20, 5f\n"
- "ldnt1w { z15.s }, p0/Z, [x20, x10, LSL #2]\n"
+ "cbz x19, 5f\n"
+ "ldnt1w { z15.s }, p0/Z, [x19, x9, LSL #2]\n"
".inst 0xc09025e0 // addha za0.s, p1/M, p1/M, z15.s\n"
".inst 0xc09025e1 // addha za1.s, p1/M, p1/M, z15.s\n"
".inst 0xc09025e2 // addha za2.s, p1/M, p1/M, z15.s\n"
".inst 0xc09025e3 // addha za3.s, p1/M, p1/M, z15.s\n"
"4:" // Prepare accumulators: Test for last block
+ "mov x19, x9\n"
"mov x20, x10\n"
- "mov x21, x11\n"
- "incw x20\n"
- "incw x21, ALL, MUL #4\n"
- "cmp x20, x9\n"
- "csel x21, x11, x21, LT\n"
- "mov x20, x16\n"
- "bfm x16, XZR, #0x0, #0x0 // bfc x16, #0x0, #0x1\n"
- "cmp x21, x13\n"
- "csel x16, x20, x16, LT\n"
+ "incw x19\n"
+ "incw x20, ALL, MUL #4\n"
+ "cmp x19, x28\n"
+ "csel x20, x10, x20, LT\n"
+ "mov x19, x15\n"
+ "bfm x15, XZR, #0x0, #0x0 // bfc x15, #0x0, #0x1\n"
+ "cmp x20, x11\n"
+ "csel x15, x19, x15, LT\n"
"5:" // Prepare accumulators: End
- "ldr x20, [%x[args], %[offsetof_K]]\n"
- "add x20, x20, #0x3\n"
- "lsr x20, x20, #0x2\n"
- "ldr x23, [%x[args], %[offsetof_B]]\n"
- "lsr x22, x20, #0x2\n"
- "and x21, x20, #0x3\n"
- "ldr x20, [%x[args], %[offsetof_kstride_bytes]]\n"
- "madd x23, x10, x20, x23\n" // bptr = B + n * kstride_bytes
- "cbz x22, 8f\n"
- "subs x22, x22, #0x1\n"
- ".inst 0xa1408372 // ld1b { z18.b, z22.b, z26.b, z30.b }, pn8.b/Z, [x27]\n"
- "ldnt1b { z0.b }, p1/Z, [x23]\n"
- ".inst 0xa1418373 // ld1b { z19.b, z23.b, z27.b, z31.b }, pn8.b/Z, [x27, #0x4, MUL VL]\n"
- "ldnt1b { z9.b }, p1/Z, [x23, #1, MUL VL]\n"
- ".inst 0xa1428370 // ld1b { z16.b, z20.b, z24.b, z28.b }, pn8.b/Z, [x27, #0x8, MUL VL]\n"
- "ldnt1b { z21.b }, p1/Z, [x23, #2, MUL VL]\n"
- ".inst 0xa1438362 // ld1b { z2.b, z6.b, z10.b, z14.b }, pn8.b/Z, [x27, #0xc, MUL VL]\n"
- "addvl x27, x27, #16\n"
- "ldnt1b { z12.b }, p1/Z, [x23, #3, MUL VL]\n"
- "addvl x23, x23, #4\n"
+ "ldr x19, [%x[args], %[offsetof_K]]\n"
+ "add x19, x19, #0x3\n"
+ "lsr x19, x19, #0x2\n"
+ "ldr x22, [%x[args], %[offsetof_B]]\n"
+ "lsr x21, x19, #0x2\n"
+ "and x20, x19, #0x3\n"
+ "ldr x19, [%x[args], %[offsetof_kstride_bytes]]\n"
+ "madd x22, x9, x19, x22\n" // bptr = B + n * kstride_bytes
+ "cbz x21, 8f\n"
+ "subs x21, x21, #0x1\n"
+ ".inst 0xa1408352 // ld1b { z18.b, z22.b, z26.b, z30.b }, pn8.b/Z, [x26]\n"
+ "ldnt1b { z0.b }, p1/Z, [x22]\n"
+ ".inst 0xa1418353 // ld1b { z19.b, z23.b, z27.b, z31.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
+ "ldnt1b { z9.b }, p1/Z, [x22, #1, MUL VL]\n"
+ ".inst 0xa1428350 // ld1b { z16.b, z20.b, z24.b, z28.b }, pn8.b/Z, [x26, #0x8, MUL VL]\n"
+ "ldnt1b { z21.b }, p1/Z, [x22, #2, MUL VL]\n"
+ ".inst 0xa1438342 // ld1b { z2.b, z6.b, z10.b, z14.b }, pn8.b/Z, [x26, #0xc, MUL VL]\n"
+ "addvl x26, x26, #16\n"
+ "ldnt1b { z12.b }, p1/Z, [x22, #3, MUL VL]\n"
+ "addvl x22, x22, #4\n"
"ble 7f\n"
"6:" // K loop
".inst 0xa1a02640 // umopa za0.s, p1/M, p1/M, z18.b, z0.b\n"
- "subs x22, x22, #0x1\n"
+ "subs x21, x21, #0x1\n"
".inst 0xa1a026c1 // umopa za1.s, p1/M, p1/M, z22.b, z0.b\n"
".inst 0xa1a02742 // umopa za2.s, p1/M, p1/M, z26.b, z0.b\n"
".inst 0xa1a027c3 // umopa za3.s, p1/M, p1/M, z30.b, z0.b\n"
- ".inst 0xa1408372 // ld1b { z18.b, z22.b, z26.b, z30.b }, pn8.b/Z, [x27]\n"
+ ".inst 0xa1408352 // ld1b { z18.b, z22.b, z26.b, z30.b }, pn8.b/Z, [x26]\n"
".inst 0xa1a92660 // umopa za0.s, p1/M, p1/M, z19.b, z9.b\n"
- "ldnt1b { z0.b }, p1/Z, [x23]\n"
+ "ldnt1b { z0.b }, p1/Z, [x22]\n"
".inst 0xa1a926e1 // umopa za1.s, p1/M, p1/M, z23.b, z9.b\n"
".inst 0xa1a92762 // umopa za2.s, p1/M, p1/M, z27.b, z9.b\n"
".inst 0xa1a927e3 // umopa za3.s, p1/M, p1/M, z31.b, z9.b\n"
- ".inst 0xa1418373 // ld1b { z19.b, z23.b, z27.b, z31.b }, pn8.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xa1418353 // ld1b { z19.b, z23.b, z27.b, z31.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
".inst 0xa1b52600 // umopa za0.s, p1/M, p1/M, z16.b, z21.b\n"
- "ldnt1b { z9.b }, p1/Z, [x23, #1, MUL VL]\n"
+ "ldnt1b { z9.b }, p1/Z, [x22, #1, MUL VL]\n"
".inst 0xa1b52681 // umopa za1.s, p1/M, p1/M, z20.b, z21.b\n"
".inst 0xa1b52702 // umopa za2.s, p1/M, p1/M, z24.b, z21.b\n"
".inst 0xa1b52783 // umopa za3.s, p1/M, p1/M, z28.b, z21.b\n"
- ".inst 0xa1428370 // ld1b { z16.b, z20.b, z24.b, z28.b }, pn8.b/Z, [x27, #0x8, MUL VL]\n"
- "ldnt1b { z21.b }, p1/Z, [x23, #2, MUL VL]\n"
+ ".inst 0xa1428350 // ld1b { z16.b, z20.b, z24.b, z28.b }, pn8.b/Z, [x26, #0x8, MUL VL]\n"
+ "ldnt1b { z21.b }, p1/Z, [x22, #2, MUL VL]\n"
".inst 0xa1ac2440 // umopa za0.s, p1/M, p1/M, z2.b, z12.b\n"
".inst 0xa1ac24c1 // umopa za1.s, p1/M, p1/M, z6.b, z12.b\n"
".inst 0xa1ac2542 // umopa za2.s, p1/M, p1/M, z10.b, z12.b\n"
".inst 0xa1ac25c3 // umopa za3.s, p1/M, p1/M, z14.b, z12.b\n"
- ".inst 0xa1438362 // ld1b { z2.b, z6.b, z10.b, z14.b }, pn8.b/Z, [x27, #0xc, MUL VL]\n"
- "addvl x27, x27, #16\n"
- "ldnt1b { z12.b }, p1/Z, [x23, #3, MUL VL]\n"
- "addvl x23, x23, #4\n"
+ ".inst 0xa1438342 // ld1b { z2.b, z6.b, z10.b, z14.b }, pn8.b/Z, [x26, #0xc, MUL VL]\n"
+ "addvl x26, x26, #16\n"
+ "ldnt1b { z12.b }, p1/Z, [x22, #3, MUL VL]\n"
+ "addvl x22, x22, #4\n"
"bgt 6b\n"
"7:" // K loop tail
".inst 0xa1a02640 // umopa za0.s, p1/M, p1/M, z18.b, z0.b\n"
@@ -210,294 +210,294 @@ void sme2_interleaved_nomerge_u8q_mopa_4VLx1VL(const uint8_t *const A, const uin
".inst 0xa1ac2542 // umopa za2.s, p1/M, p1/M, z10.b, z12.b\n"
".inst 0xa1ac25c3 // umopa za3.s, p1/M, p1/M, z14.b, z12.b\n"
"8:" // K oddments
- "cbz x21, 10f\n"
+ "cbz x20, 10f\n"
"9:" // K oddments: Loop
- ".inst 0xa1408372 // ld1b { z18.b, z22.b, z26.b, z30.b }, pn8.b/Z, [x27]\n"
- "subs x21, x21, #0x1\n"
- "addvl x27, x27, #4\n"
- "ld1b { z0.b }, p1/Z, [x23]\n"
- "addvl x23, x23, #1\n"
+ ".inst 0xa1408352 // ld1b { z18.b, z22.b, z26.b, z30.b }, pn8.b/Z, [x26]\n"
+ "subs x20, x20, #0x1\n"
+ "addvl x26, x26, #4\n"
+ "ld1b { z0.b }, p1/Z, [x22]\n"
+ "addvl x22, x22, #1\n"
".inst 0xa1a02640 // umopa za0.s, p1/M, p1/M, z18.b, z0.b\n"
".inst 0xa1a026c1 // umopa za1.s, p1/M, p1/M, z22.b, z0.b\n"
".inst 0xa1a02742 // umopa za2.s, p1/M, p1/M, z26.b, z0.b\n"
".inst 0xa1a027c3 // umopa za3.s, p1/M, p1/M, z30.b, z0.b\n"
"bgt 9b\n"
"10:" // K oddments: End
- ".inst 0xa040c360 // ld1w { z0.s-z3.s }, pn8.b/Z, [x27]\n"
- "addvl x27, x27, #4\n"
+ ".inst 0xa040c340 // ld1w { z0.s-z3.s }, pn8.b/Z, [x26]\n"
+ "addvl x26, x26, #4\n"
".inst 0xc0912400 // addva za0.s, p1/M, p1/M, z0.s\n"
".inst 0xc0912421 // addva za1.s, p1/M, p1/M, z1.s\n"
".inst 0xc0912442 // addva za2.s, p1/M, p1/M, z2.s\n"
".inst 0xc0912463 // addva za3.s, p1/M, p1/M, z3.s\n"
- "tbz x16, #1, 14f\n"
- "tbz x16, #0, 12f\n"
+ "tbz x15, #1, 14f\n"
+ "tbz x15, #0, 12f\n"
"mov x12, #0x0\n"
- "cntw x20\n"
+ "cntw x19\n"
"11:" // Store to partial result buffer: Store and refill: Loop
- ".inst 0xa040c1f4 // ld1w { z20.s-z23.s }, pn8.b/Z, [x15]\n"
+ ".inst 0xa040c1d4 // ld1w { z20.s-z23.s }, pn8.b/Z, [x14]\n"
".inst 0xc0860410 // mova { z16.s-z19.s }, za0h.s[x12]\n"
".inst 0xc0840680 // mova za0h.s[x12], { z20.s-z23.s }\n"
".inst 0xc0860428 // mova { z8.s-z11.s }, za1h.s[x12]\n"
- ".inst 0xa041c1e4 // ld1w { z4.s-z7.s }, pn8.b/Z, [x15, #0x4, MUL VL]\n"
+ ".inst 0xa041c1c4 // ld1w { z4.s-z7.s }, pn8.b/Z, [x14, #0x4, MUL VL]\n"
".inst 0xc0840481 // mova za1h.s[x12], { z4.s-z7.s }\n"
".inst 0xc086044c // mova { z12.s-z15.s }, za2h.s[x12]\n"
".inst 0xc086047c // mova { z28.s-z31.s }, za3h.s[x12]\n"
- ".inst 0xa042c1e4 // ld1w { z4.s-z7.s }, pn8.b/Z, [x15, #0x8, MUL VL]\n"
+ ".inst 0xa042c1c4 // ld1w { z4.s-z7.s }, pn8.b/Z, [x14, #0x8, MUL VL]\n"
".inst 0xc0840482 // mova za2h.s[x12], { z4.s-z7.s }\n"
- ".inst 0xa043c1f4 // ld1w { z20.s-z23.s }, pn8.b/Z, [x15, #0xc, MUL VL]\n"
+ ".inst 0xa043c1d4 // ld1w { z20.s-z23.s }, pn8.b/Z, [x14, #0xc, MUL VL]\n"
".inst 0xc0840683 // mova za3h.s[x12], { z20.s-z23.s }\n"
"add x12, x12, #0x4\n"
- "cmp x12, x20\n"
- ".inst 0xa060c1d0 // st1w { z16.s-z19.s }, pn8.b, [x14]\n"
- "addvl x15, x15, #16\n"
- ".inst 0xa061c1c8 // st1w { z8.s-z11.s }, pn8.b, [x14, #0x4, MUL VL]\n"
- ".inst 0xa062c1cc // st1w { z12.s-z15.s }, pn8.b, [x14, #0x8, MUL VL]\n"
- ".inst 0xa063c1dc // st1w { z28.s-z31.s }, pn8.b, [x14, #0xc, MUL VL]\n"
+ "cmp x12, x19\n"
+ ".inst 0xa060c1b0 // st1w { z16.s-z19.s }, pn8.b, [x13]\n"
"addvl x14, x14, #16\n"
+ ".inst 0xa061c1a8 // st1w { z8.s-z11.s }, pn8.b, [x13, #0x4, MUL VL]\n"
+ ".inst 0xa062c1ac // st1w { z12.s-z15.s }, pn8.b, [x13, #0x8, MUL VL]\n"
+ ".inst 0xa063c1bc // st1w { z28.s-z31.s }, pn8.b, [x13, #0xc, MUL VL]\n"
+ "addvl x13, x13, #16\n"
"blt 11b\n"
"b 30f\n"
"12:" // Store to partial result buffer: Store only
"mov x12, #0x0\n"
- "cntw x20\n"
+ "cntw x19\n"
"13:" // Store to partial result buffer: Store only: Loop
".inst 0xc0860410 // mova { z16.s-z19.s }, za0h.s[x12]\n"
".inst 0xc086042c // mova { z12.s-z15.s }, za1h.s[x12]\n"
- ".inst 0xa060c1d0 // st1w { z16.s-z19.s }, pn8.b, [x14]\n"
+ ".inst 0xa060c1b0 // st1w { z16.s-z19.s }, pn8.b, [x13]\n"
".inst 0xc0860454 // mova { z20.s-z23.s }, za2h.s[x12]\n"
".inst 0xc0860478 // mova { z24.s-z27.s }, za3h.s[x12]\n"
- ".inst 0xa061c1cc // st1w { z12.s-z15.s }, pn8.b, [x14, #0x4, MUL VL]\n"
+ ".inst 0xa061c1ac // st1w { z12.s-z15.s }, pn8.b, [x13, #0x4, MUL VL]\n"
"add x12, x12, #0x4\n"
- "cmp x12, x20\n"
- ".inst 0xa062c1d4 // st1w { z20.s-z23.s }, pn8.b, [x14, #0x8, MUL VL]\n"
- ".inst 0xa063c1d8 // st1w { z24.s-z27.s }, pn8.b, [x14, #0xc, MUL VL]\n"
- "addvl x14, x14, #16\n"
+ "cmp x12, x19\n"
+ ".inst 0xa062c1b4 // st1w { z20.s-z23.s }, pn8.b, [x13, #0x8, MUL VL]\n"
+ ".inst 0xa063c1b8 // st1w { z24.s-z27.s }, pn8.b, [x13, #0xc, MUL VL]\n"
+ "addvl x13, x13, #16\n"
"blt 13b\n"
"b 30f\n"
"14:" // Store to output array
- "ldr x26, [%x[args], %[offsetof_C]]\n"
- "add x26, x26, x10\n" // C += n
- "sub x25, x13, x11\n"
+ "ldr x25, [%x[args], %[offsetof_C]]\n"
+ "add x25, x25, x9\n" // C += n
+ "sub x24, x11, x10\n"
"ld1rw { z8.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_per_layer_mul]]\n"
- "ldr x24, [%x[args], %[offsetof_ldcb]]\n"
- "madd x26, x11, x24, x26\n" // C += m * ldc
+ "ldr x23, [%x[args], %[offsetof_ldcb]]\n"
+ "madd x25, x10, x23, x25\n" // C += m * ldc
"ld1rw { z7.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_per_layer_right_shift]]\n"
"ld1rw { z6.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_c_offset]]\n"
"ld1rw { z5.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_minval]]\n"
"ld1rw { z4.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_maxval]]\n"
- "tbz x16, #2, 15f\n"
- "ldr w21, [%x[args], %[offsetof_n_0]]\n"
- "add x21, x21, x10\n"
- "ldr x20, [%x[rq], %[offsetof_Requantize32_per_channel_muls]]\n"
- "add x20, x20, x21, LSL #2\n"
- "ld1w { z8.s }, p0/Z, [x20]\n"
- "ldr x20, [%x[rq], %[offsetof_Requantize32_per_channel_right_shifts]]\n"
- "add x20, x20, x21, LSL #2\n"
- "ld1w { z7.s }, p0/Z, [x20]\n"
+ "tbz x15, #2, 15f\n"
+ "ldr w20, [%x[args], %[offsetof_n_0]]\n"
+ "add x20, x20, x9\n"
+ "ldr x19, [%x[rq], %[offsetof_Requantize32_per_channel_muls]]\n"
+ "add x19, x19, x20, LSL #2\n"
+ "ld1w { z8.s }, p0/Z, [x19]\n"
+ "ldr x19, [%x[rq], %[offsetof_Requantize32_per_channel_right_shifts]]\n"
+ "add x19, x19, x20, LSL #2\n"
+ "ld1w { z7.s }, p0/Z, [x19]\n"
"15:" // Store to output array: Load per-channel parameters: End
- "cntw x23\n"
- "whilelt p0.s, x10, x9\n"
- "cmp x25, x23\n"
- "csel x22, x25, x23, LT\n"
- "lsr x21, x22, #0x2\n"
+ "cntw x22\n"
+ "whilelt p0.s, x9, x28\n"
+ "cmp x24, x22\n"
+ "csel x21, x24, x22, LT\n"
+ "lsr x20, x21, #0x2\n"
"mov x12, #0x0\n"
- "and x20, x22, #0x3\n"
- "cbz x21, 17f\n"
+ "and x19, x21, #0x3\n"
+ "cbz x20, 17f\n"
"16:" // Store to output array: Accumulator row 0 loop
".inst 0xc086040c // mova { z12.s-z15.s }, za0h.s[x12]\n"
".inst 0xc1a8ac0c // sqdmulh { z12.s-z15.s }, { z12.s-z15.s }, z8.s\n"
"add x12, x12, #0x4\n"
".inst 0xc1a7aa2c // srshl { z12.s-z15.s }, { z12.s-z15.s }, z7.s\n"
- "cmp x12, x21, LSL #2\n"
+ "cmp x12, x20, LSL #2\n"
".inst 0xc1a6ab0c // add { z12.s-z15.s }, { z12.s-z15.s }, z6.s\n"
".inst 0xc1a4ccac // sclamp { z12.s-z15.s }, z5.s, z4.s\n"
- "st1b { z12.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
- "st1b { z13.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
- "st1b { z14.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
- "st1b { z15.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1b { z12.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
+ "st1b { z13.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
+ "st1b { z14.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
+ "st1b { z15.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"blt 16b\n"
"17:" // Store to output array: Accumulator row 0 oddments
- "cbz x20, 18f\n"
+ "cbz x19, 18f\n"
".inst 0xc0860410 // mova { z16.s-z19.s }, za0h.s[x12]\n"
".inst 0xc1a8ac10 // sqdmulh { z16.s-z19.s }, { z16.s-z19.s }, z8.s\n"
- "subs x20, x20, #0x1\n"
+ "subs x19, x19, #0x1\n"
".inst 0xc1a7aa30 // srshl { z16.s-z19.s }, { z16.s-z19.s }, z7.s\n"
".inst 0xc1a6ab10 // add { z16.s-z19.s }, { z16.s-z19.s }, z6.s\n"
".inst 0xc1a4ccb0 // sclamp { z16.s-z19.s }, z5.s, z4.s\n"
- "st1b { z16.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1b { z16.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"beq 18f\n"
- "subs x20, x20, #0x1\n"
- "st1b { z17.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "subs x19, x19, #0x1\n"
+ "st1b { z17.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"beq 18f\n"
- "st1b { z18.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1b { z18.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"18:" // Store to output array: Accumulator row 0 oddments: End
- "subs x25, x25, x22\n"
+ "subs x24, x24, x21\n"
"beq 28f\n"
- "whilelt p0.s, x10, x9\n"
- "cmp x25, x23\n"
- "csel x22, x25, x23, LT\n"
- "lsr x21, x22, #0x2\n"
+ "whilelt p0.s, x9, x28\n"
+ "cmp x24, x22\n"
+ "csel x21, x24, x22, LT\n"
+ "lsr x20, x21, #0x2\n"
"mov x12, #0x0\n"
- "and x20, x22, #0x3\n"
- "cbz x21, 20f\n"
+ "and x19, x21, #0x3\n"
+ "cbz x20, 20f\n"
"19:" // Store to output array: Accumulator row 1 loop
".inst 0xc0860430 // mova { z16.s-z19.s }, za1h.s[x12]\n"
".inst 0xc1a8ac10 // sqdmulh { z16.s-z19.s }, { z16.s-z19.s }, z8.s\n"
"add x12, x12, #0x4\n"
".inst 0xc1a7aa30 // srshl { z16.s-z19.s }, { z16.s-z19.s }, z7.s\n"
- "cmp x12, x21, LSL #2\n"
+ "cmp x12, x20, LSL #2\n"
".inst 0xc1a6ab10 // add { z16.s-z19.s }, { z16.s-z19.s }, z6.s\n"
".inst 0xc1a4ccb0 // sclamp { z16.s-z19.s }, z5.s, z4.s\n"
- "st1b { z16.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
- "st1b { z17.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
- "st1b { z18.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
- "st1b { z19.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1b { z16.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
+ "st1b { z17.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
+ "st1b { z18.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
+ "st1b { z19.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"blt 19b\n"
"20:" // Store to output array: Accumulator row 1 oddments
- "cbz x20, 21f\n"
+ "cbz x19, 21f\n"
".inst 0xc086043c // mova { z28.s-z31.s }, za1h.s[x12]\n"
".inst 0xc1a8ac1c // sqdmulh { z28.s-z31.s }, { z28.s-z31.s }, z8.s\n"
- "subs x20, x20, #0x1\n"
+ "subs x19, x19, #0x1\n"
".inst 0xc1a7aa3c // srshl { z28.s-z31.s }, { z28.s-z31.s }, z7.s\n"
".inst 0xc1a6ab1c // add { z28.s-z31.s }, { z28.s-z31.s }, z6.s\n"
".inst 0xc1a4ccbc // sclamp { z28.s-z31.s }, z5.s, z4.s\n"
- "st1b { z28.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1b { z28.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"beq 21f\n"
- "subs x20, x20, #0x1\n"
- "st1b { z29.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "subs x19, x19, #0x1\n"
+ "st1b { z29.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"beq 21f\n"
- "st1b { z30.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1b { z30.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"21:" // Store to output array: Accumulator row 1 oddments: End
- "subs x25, x25, x22\n"
+ "subs x24, x24, x21\n"
"beq 28f\n"
- "whilelt p0.s, x10, x9\n"
- "cmp x25, x23\n"
- "csel x22, x25, x23, LT\n"
- "lsr x21, x22, #0x2\n"
+ "whilelt p0.s, x9, x28\n"
+ "cmp x24, x22\n"
+ "csel x21, x24, x22, LT\n"
+ "lsr x20, x21, #0x2\n"
"mov x12, #0x0\n"
- "and x20, x22, #0x3\n"
- "cbz x21, 23f\n"
+ "and x19, x21, #0x3\n"
+ "cbz x20, 23f\n"
"22:" // Store to output array: Accumulator row 2 loop
".inst 0xc0860458 // mova { z24.s-z27.s }, za2h.s[x12]\n"
".inst 0xc1a8ac18 // sqdmulh { z24.s-z27.s }, { z24.s-z27.s }, z8.s\n"
"add x12, x12, #0x4\n"
".inst 0xc1a7aa38 // srshl { z24.s-z27.s }, { z24.s-z27.s }, z7.s\n"
- "cmp x12, x21, LSL #2\n"
+ "cmp x12, x20, LSL #2\n"
".inst 0xc1a6ab18 // add { z24.s-z27.s }, { z24.s-z27.s }, z6.s\n"
".inst 0xc1a4ccb8 // sclamp { z24.s-z27.s }, z5.s, z4.s\n"
- "st1b { z24.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
- "st1b { z25.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
- "st1b { z26.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
- "st1b { z27.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1b { z24.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
+ "st1b { z25.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
+ "st1b { z26.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
+ "st1b { z27.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"blt 22b\n"
"23:" // Store to output array: Accumulator row 2 oddments
- "cbz x20, 24f\n"
+ "cbz x19, 24f\n"
".inst 0xc086044c // mova { z12.s-z15.s }, za2h.s[x12]\n"
".inst 0xc1a8ac0c // sqdmulh { z12.s-z15.s }, { z12.s-z15.s }, z8.s\n"
- "subs x20, x20, #0x1\n"
+ "subs x19, x19, #0x1\n"
".inst 0xc1a7aa2c // srshl { z12.s-z15.s }, { z12.s-z15.s }, z7.s\n"
".inst 0xc1a6ab0c // add { z12.s-z15.s }, { z12.s-z15.s }, z6.s\n"
".inst 0xc1a4ccac // sclamp { z12.s-z15.s }, z5.s, z4.s\n"
- "st1b { z12.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1b { z12.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"beq 24f\n"
- "subs x20, x20, #0x1\n"
- "st1b { z13.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "subs x19, x19, #0x1\n"
+ "st1b { z13.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"beq 24f\n"
- "st1b { z14.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1b { z14.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"24:" // Store to output array: Accumulator row 2 oddments: End
- "subs x25, x25, x22\n"
+ "subs x24, x24, x21\n"
"beq 28f\n"
- "whilelt p0.s, x10, x9\n"
- "cmp x25, x23\n"
- "csel x20, x25, x23, LT\n"
- "lsr x21, x20, #0x2\n"
+ "whilelt p0.s, x9, x28\n"
+ "cmp x24, x22\n"
+ "csel x19, x24, x22, LT\n"
+ "lsr x20, x19, #0x2\n"
"mov x12, #0x0\n"
- "and x20, x20, #0x3\n"
- "cbz x21, 26f\n"
+ "and x19, x19, #0x3\n"
+ "cbz x20, 26f\n"
"25:" // Store to output array: Accumulator row 3 loop
".inst 0xc0860474 // mova { z20.s-z23.s }, za3h.s[x12]\n"
".inst 0xc1a8ac14 // sqdmulh { z20.s-z23.s }, { z20.s-z23.s }, z8.s\n"
"add x12, x12, #0x4\n"
".inst 0xc1a7aa34 // srshl { z20.s-z23.s }, { z20.s-z23.s }, z7.s\n"
- "cmp x12, x21, LSL #2\n"
+ "cmp x12, x20, LSL #2\n"
".inst 0xc1a6ab14 // add { z20.s-z23.s }, { z20.s-z23.s }, z6.s\n"
".inst 0xc1a4ccb4 // sclamp { z20.s-z23.s }, z5.s, z4.s\n"
- "st1b { z20.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
- "st1b { z21.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
- "st1b { z22.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
- "st1b { z23.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1b { z20.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
+ "st1b { z21.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
+ "st1b { z22.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
+ "st1b { z23.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"blt 25b\n"
"26:" // Store to output array: Accumulator row 3 oddments
- "cbz x20, 27f\n"
+ "cbz x19, 27f\n"
".inst 0xc0860460 // mova { z0.s-z3.s }, za3h.s[x12]\n"
".inst 0xc1a8ac00 // sqdmulh { z0.s-z3.s }, { z0.s-z3.s }, z8.s\n"
- "subs x20, x20, #0x1\n"
+ "subs x19, x19, #0x1\n"
".inst 0xc1a7aa20 // srshl { z0.s-z3.s }, { z0.s-z3.s }, z7.s\n"
".inst 0xc1a6ab00 // add { z0.s-z3.s }, { z0.s-z3.s }, z6.s\n"
".inst 0xc1a4cca0 // sclamp { z0.s-z3.s }, z5.s, z4.s\n"
- "st1b { z0.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "st1b { z0.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"beq 27f\n"
- "subs x20, x20, #0x1\n"
- "st1b { z1.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ "subs x19, x19, #0x1\n"
+ "st1b { z1.s }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"beq 27f\n"
- "st1b { z2.s }, p0, [x26]\n"
+ "st1b { z2.s }, p0, [x25]\n"
"27:" // Store to output array: Accumulator row 3 oddments: End
"28:" // Store to output array: End
- "tbz x16, #0, 30f\n"
+ "tbz x15, #0, 30f\n"
"mov x12, #0x0\n"
- "cntw x20\n"
+ "cntw x19\n"
"29:" // Store to output array: Refill accumulators: Loop
- ".inst 0xa040c1e4 // ld1w { z4.s-z7.s }, pn8.b/Z, [x15]\n"
+ ".inst 0xa040c1c4 // ld1w { z4.s-z7.s }, pn8.b/Z, [x14]\n"
".inst 0xc0840480 // mova za0h.s[x12], { z4.s-z7.s }\n"
- ".inst 0xa041c1f0 // ld1w { z16.s-z19.s }, pn8.b/Z, [x15, #0x4, MUL VL]\n"
+ ".inst 0xa041c1d0 // ld1w { z16.s-z19.s }, pn8.b/Z, [x14, #0x4, MUL VL]\n"
".inst 0xc0840601 // mova za1h.s[x12], { z16.s-z19.s }\n"
- ".inst 0xa042c1f0 // ld1w { z16.s-z19.s }, pn8.b/Z, [x15, #0x8, MUL VL]\n"
+ ".inst 0xa042c1d0 // ld1w { z16.s-z19.s }, pn8.b/Z, [x14, #0x8, MUL VL]\n"
".inst 0xc0840602 // mova za2h.s[x12], { z16.s-z19.s }\n"
- ".inst 0xa043c1e4 // ld1w { z4.s-z7.s }, pn8.b/Z, [x15, #0xc, MUL VL]\n"
+ ".inst 0xa043c1c4 // ld1w { z4.s-z7.s }, pn8.b/Z, [x14, #0xc, MUL VL]\n"
".inst 0xc0840483 // mova za3h.s[x12], { z4.s-z7.s }\n"
"add x12, x12, #0x4\n"
- "cmp x12, x20\n"
- "addvl x15, x15, #16\n"
+ "cmp x12, x19\n"
+ "addvl x14, x14, #16\n"
"blt 29b\n"
"30:" // End block
- "incw x10\n"
- "cmp x10, x9\n"
+ "incw x9\n"
+ "cmp x9, x28\n"
"blt 3b\n"
- "incw x11, ALL, MUL #4\n"
- "cmp x11, x13\n"
- "mov x10, #0x0\n"
- "mov x28, x27\n"
+ "incw x10, ALL, MUL #4\n"
+ "cmp x10, x11\n"
+ "mov x9, #0x0\n"
+ "mov x27, x26\n"
"blt 3b\n"
".inst 0xd503467f // SMSTOP\n"
:
: [args] "r" (&args), [offsetof_A] "I" (offsetof(KernelArgs, A)), [offsetof_B] "I" (offsetof(KernelArgs, B)), [offsetof_C] "I" (offsetof(KernelArgs, C)), [offsetof_K] "I" (offsetof(KernelArgs, K)), [offsetof_M] "I" (offsetof(KernelArgs, M)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_Requantize32_c_offset] "I" (offsetof(Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(Requantize32, minval)), [offsetof_Requantize32_per_channel_muls] "I" (offsetof(Requantize32, per_channel_muls)), [offsetof_Requantize32_per_channel_right_shifts] "I" (offsetof(Requantize32, per_channel_right_shifts)), [offsetof_Requantize32_per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [offsetof_Requantize32_per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [offsetof_accumulator_buffer] "I" (offsetof(KernelArgs, accumulator_buffer)), [offsetof_bias] "I" (offsetof(KernelArgs, bias)), [offsetof_flags] "I" (offsetof(KernelArgs, flags)), [offsetof_kstride_bytes] "I" (offsetof(KernelArgs, kstride_bytes)), [offsetof_ldcb] "I" (offsetof(KernelArgs, ldcb)), [offsetof_n_0] "I" (offsetof(KernelArgs, n_0)), [rq] "r" (&rq)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_bf16fp32_mmla_6x4VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_bf16fp32_mmla_6x4VL/generic.cpp
index 13f2e488dd..c0b6b30762 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_bf16fp32_mmla_6x4VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_bf16fp32_mmla_6x4VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef ARM_COMPUTE_ENABLE_SVE
@@ -106,63 +106,63 @@ void sve_ffhybrid_bf16fp32_mmla_6x4VL (
"cmp %x[M], #0x2\n"
"bgt 29f\n"
"beq 15f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x15, %x[bias]\n"
- "ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x13, %x[output_ptr]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x14, %x[bias]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x12, %x[output_ptr]\n"
"2:" // Height 1: Column loop
- "ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
- "add x11, x12, x20, LSL #1\n"
- "cntw x21, ALL, MUL #3\n"
- "add x10, x11, x20, LSL #1\n"
- "add x9, x10, x20, LSL #1\n"
- "add x20, x9, x20, LSL #1\n"
- "cmp x14, x21\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #1\n"
+ "cntw x20, ALL, MUL #3\n"
+ "add x9, x10, x19, LSL #1\n"
+ "add x28, x9, x19, LSL #1\n"
+ "add x19, x28, x19, LSL #1\n"
+ "cmp x13, x20\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"bgt 3f\n"
- "decw x21\n"
- "cmp x14, x21\n"
- "mov x9, x12\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x28, x11\n"
"bgt 3f\n"
- "decw x21\n"
- "cmp x14, x21\n"
- "mov x10, x12\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x9, x11\n"
"bgt 3f\n"
- "mov x11, x12\n"
+ "mov x10, x11\n"
"3:" // Height 1: B setup done
- "mov x20, #0x0\n"
- "whilelt p4.s, x20, x14\n"
- "incw x20\n"
- "whilelt p3.s, x20, x14\n"
- "incw x20\n"
- "whilelt p2.s, x20, x14\n"
- "incw x20\n"
- "whilelt p1.s, x20, x14\n"
- "cbz x15, 4f\n"
- "ld1w { z8.s }, p5/Z, [x15]\n"
- "ld1w { z9.s }, p5/Z, [x15, #1, MUL VL]\n"
+ "mov x19, #0x0\n"
+ "whilelt p4.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p3.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x13\n"
+ "cbz x14, 4f\n"
+ "ld1w { z8.s }, p5/Z, [x14]\n"
+ "ld1w { z9.s }, p5/Z, [x14, #1, MUL VL]\n"
"zip2 z12.d, z8.d, z8.d\n"
"zip1 z8.d, z8.d, z8.d\n"
- "ld1w { z10.s }, p5/Z, [x15, #2, MUL VL]\n"
- "ld1w { z11.s }, p5/Z, [x15, #3, MUL VL]\n"
+ "ld1w { z10.s }, p5/Z, [x14, #2, MUL VL]\n"
+ "ld1w { z11.s }, p5/Z, [x14, #3, MUL VL]\n"
"zip2 z13.d, z9.d, z9.d\n"
"zip1 z9.d, z9.d, z9.d\n"
"zip2 z14.d, z10.d, z10.d\n"
"zip1 z10.d, z10.d, z10.d\n"
- "addvl x15, x15, #4\n"
+ "addvl x14, x14, #4\n"
"zip2 z15.d, z11.d, z11.d\n"
"zip1 z11.d, z11.d, z11.d\n"
"b 6f\n"
"4:" // Height 1: no bias
"tbz %x[flags], #0, 5f\n"
- "ld1w { z9.s }, p4/Z, [x13]\n"
- "ld1w { z10.s }, p3/Z, [x13, #1, MUL VL]\n"
+ "ld1w { z9.s }, p4/Z, [x12]\n"
+ "ld1w { z10.s }, p3/Z, [x12, #1, MUL VL]\n"
"zip1 z8.d, z9.d, z12.d\n"
"zip2 z12.d, z9.d, z12.d\n"
- "ld1w { z11.s }, p2/Z, [x13, #2, MUL VL]\n"
- "ld1w { z16.s }, p1/Z, [x13, #3, MUL VL]\n"
+ "ld1w { z11.s }, p2/Z, [x12, #2, MUL VL]\n"
+ "ld1w { z16.s }, p1/Z, [x12, #3, MUL VL]\n"
"zip1 z9.d, z10.d, z13.d\n"
"zip2 z13.d, z10.d, z13.d\n"
"zip1 z10.d, z11.d, z14.d\n"
@@ -180,130 +180,130 @@ void sve_ffhybrid_bf16fp32_mmla_6x4VL (
"mov z14.b, #0x0\n"
"mov z15.b, #0x0\n"
"6:" // Height 1: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"7:" // Height 1: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 8f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "cbnz x28, 9f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #1\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "cbnz x27, 9f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
"b 9f\n"
"8:" // Height 1: setup direct input
- "mov x26, %x[input_ptr]\n"
+ "mov x25, %x[input_ptr]\n"
"9:" // Height 1: input setup done
- "cmp x27, #0x8\n"
+ "cmp x26, #0x8\n"
"ble 11f\n"
"10:" // Height 1: Multiply loop: Main loop head
- "whilelt p0.h, XZR, x27\n"
- "ld1rqh { z1.h }, p0/Z, [x26]\n"
+ "whilelt p0.h, XZR, x26\n"
+ "ld1rqh { z1.h }, p0/Z, [x25]\n"
"trn1 z0.d, z1.d, z2.d\n"
- "ld1h { z7.h }, p5/Z, [x12]\n"
- "ld1h { z6.h }, p5/Z, [x12, #1, MUL VL]\n"
- ".inst 0x6467e408 // bfmmla z8.s, z0.h, z7.h\n"
- ".inst 0x6466e40c // bfmmla z12.s, z0.h, z6.h\n"
"ld1h { z7.h }, p5/Z, [x11]\n"
"ld1h { z6.h }, p5/Z, [x11, #1, MUL VL]\n"
- ".inst 0x6467e409 // bfmmla z9.s, z0.h, z7.h\n"
- ".inst 0x6466e40d // bfmmla z13.s, z0.h, z6.h\n"
+ ".inst 0x6467e408 // bfmmla z8.s, z0.h, z7.h\n"
+ ".inst 0x6466e40c // bfmmla z12.s, z0.h, z6.h\n"
"ld1h { z7.h }, p5/Z, [x10]\n"
"ld1h { z6.h }, p5/Z, [x10, #1, MUL VL]\n"
- ".inst 0x6467e40a // bfmmla z10.s, z0.h, z7.h\n"
- ".inst 0x6466e40e // bfmmla z14.s, z0.h, z6.h\n"
+ ".inst 0x6467e409 // bfmmla z9.s, z0.h, z7.h\n"
+ ".inst 0x6466e40d // bfmmla z13.s, z0.h, z6.h\n"
"ld1h { z7.h }, p5/Z, [x9]\n"
"ld1h { z6.h }, p5/Z, [x9, #1, MUL VL]\n"
+ ".inst 0x6467e40a // bfmmla z10.s, z0.h, z7.h\n"
+ ".inst 0x6466e40e // bfmmla z14.s, z0.h, z6.h\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "ld1h { z6.h }, p5/Z, [x28, #1, MUL VL]\n"
"trn2 z1.d, z1.d, z2.d\n"
".inst 0x6467e40b // bfmmla z11.s, z0.h, z7.h\n"
".inst 0x6466e40f // bfmmla z15.s, z0.h, z6.h\n"
- "ld1h { z7.h }, p5/Z, [x12, #2, MUL VL]\n"
- "ld1h { z6.h }, p5/Z, [x12, #3, MUL VL]\n"
- ".inst 0x6467e428 // bfmmla z8.s, z1.h, z7.h\n"
- ".inst 0x6466e42c // bfmmla z12.s, z1.h, z6.h\n"
"ld1h { z7.h }, p5/Z, [x11, #2, MUL VL]\n"
"ld1h { z6.h }, p5/Z, [x11, #3, MUL VL]\n"
- ".inst 0x6467e429 // bfmmla z9.s, z1.h, z7.h\n"
- ".inst 0x6466e42d // bfmmla z13.s, z1.h, z6.h\n"
+ ".inst 0x6467e428 // bfmmla z8.s, z1.h, z7.h\n"
+ ".inst 0x6466e42c // bfmmla z12.s, z1.h, z6.h\n"
"ld1h { z7.h }, p5/Z, [x10, #2, MUL VL]\n"
"ld1h { z6.h }, p5/Z, [x10, #3, MUL VL]\n"
- ".inst 0x6467e42a // bfmmla z10.s, z1.h, z7.h\n"
- ".inst 0x6466e42e // bfmmla z14.s, z1.h, z6.h\n"
+ ".inst 0x6467e429 // bfmmla z9.s, z1.h, z7.h\n"
+ ".inst 0x6466e42d // bfmmla z13.s, z1.h, z6.h\n"
"ld1h { z7.h }, p5/Z, [x9, #2, MUL VL]\n"
"ld1h { z6.h }, p5/Z, [x9, #3, MUL VL]\n"
- "sub x27, x27, #0x8\n"
- "cmp x27, #0x8\n"
+ ".inst 0x6467e42a // bfmmla z10.s, z1.h, z7.h\n"
+ ".inst 0x6466e42e // bfmmla z14.s, z1.h, z6.h\n"
+ "ld1h { z7.h }, p5/Z, [x28, #2, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x28, #3, MUL VL]\n"
+ "sub x26, x26, #0x8\n"
+ "cmp x26, #0x8\n"
".inst 0x6467e42b // bfmmla z11.s, z1.h, z7.h\n"
".inst 0x6466e42f // bfmmla z15.s, z1.h, z6.h\n"
- "add x26, x26, #0x10\n"
- "addvl x12, x12, #4\n"
+ "add x25, x25, #0x10\n"
"addvl x11, x11, #4\n"
"addvl x10, x10, #4\n"
"addvl x9, x9, #4\n"
+ "addvl x28, x28, #4\n"
"bgt 10b\n"
"11:" // Height 1: Multiply loop: Single iteration only
- "whilelt p0.h, XZR, x27\n"
- "ld1rqh { z1.h }, p0/Z, [x26]\n"
+ "whilelt p0.h, XZR, x26\n"
+ "ld1rqh { z1.h }, p0/Z, [x25]\n"
"trn1 z0.d, z1.d, z2.d\n"
- "ld1h { z7.h }, p5/Z, [x12]\n"
- "ld1h { z6.h }, p5/Z, [x12, #1, MUL VL]\n"
- ".inst 0x6467e408 // bfmmla z8.s, z0.h, z7.h\n"
- ".inst 0x6466e40c // bfmmla z12.s, z0.h, z6.h\n"
"ld1h { z7.h }, p5/Z, [x11]\n"
"ld1h { z6.h }, p5/Z, [x11, #1, MUL VL]\n"
- ".inst 0x6467e409 // bfmmla z9.s, z0.h, z7.h\n"
- ".inst 0x6466e40d // bfmmla z13.s, z0.h, z6.h\n"
+ ".inst 0x6467e408 // bfmmla z8.s, z0.h, z7.h\n"
+ ".inst 0x6466e40c // bfmmla z12.s, z0.h, z6.h\n"
"ld1h { z7.h }, p5/Z, [x10]\n"
"ld1h { z6.h }, p5/Z, [x10, #1, MUL VL]\n"
- ".inst 0x6467e40a // bfmmla z10.s, z0.h, z7.h\n"
- ".inst 0x6466e40e // bfmmla z14.s, z0.h, z6.h\n"
+ ".inst 0x6467e409 // bfmmla z9.s, z0.h, z7.h\n"
+ ".inst 0x6466e40d // bfmmla z13.s, z0.h, z6.h\n"
"ld1h { z7.h }, p5/Z, [x9]\n"
"ld1h { z6.h }, p5/Z, [x9, #1, MUL VL]\n"
- "subs x27, x27, #0x4\n"
+ ".inst 0x6467e40a // bfmmla z10.s, z0.h, z7.h\n"
+ ".inst 0x6466e40e // bfmmla z14.s, z0.h, z6.h\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "ld1h { z6.h }, p5/Z, [x28, #1, MUL VL]\n"
+ "subs x26, x26, #0x4\n"
"trn2 z1.d, z1.d, z2.d\n"
".inst 0x6467e40b // bfmmla z11.s, z0.h, z7.h\n"
".inst 0x6466e40f // bfmmla z15.s, z0.h, z6.h\n"
- "addvl x12, x12, #2\n"
"addvl x11, x11, #2\n"
"addvl x10, x10, #2\n"
"addvl x9, x9, #2\n"
+ "addvl x28, x28, #2\n"
"ble 12f\n"
- "ld1h { z7.h }, p5/Z, [x12]\n"
- "ld1h { z6.h }, p5/Z, [x12, #1, MUL VL]\n"
- ".inst 0x6467e428 // bfmmla z8.s, z1.h, z7.h\n"
- ".inst 0x6466e42c // bfmmla z12.s, z1.h, z6.h\n"
"ld1h { z7.h }, p5/Z, [x11]\n"
"ld1h { z6.h }, p5/Z, [x11, #1, MUL VL]\n"
- ".inst 0x6467e429 // bfmmla z9.s, z1.h, z7.h\n"
- ".inst 0x6466e42d // bfmmla z13.s, z1.h, z6.h\n"
+ ".inst 0x6467e428 // bfmmla z8.s, z1.h, z7.h\n"
+ ".inst 0x6466e42c // bfmmla z12.s, z1.h, z6.h\n"
"ld1h { z7.h }, p5/Z, [x10]\n"
"ld1h { z6.h }, p5/Z, [x10, #1, MUL VL]\n"
- ".inst 0x6467e42a // bfmmla z10.s, z1.h, z7.h\n"
- ".inst 0x6466e42e // bfmmla z14.s, z1.h, z6.h\n"
+ ".inst 0x6467e429 // bfmmla z9.s, z1.h, z7.h\n"
+ ".inst 0x6466e42d // bfmmla z13.s, z1.h, z6.h\n"
"ld1h { z7.h }, p5/Z, [x9]\n"
"ld1h { z6.h }, p5/Z, [x9, #1, MUL VL]\n"
+ ".inst 0x6467e42a // bfmmla z10.s, z1.h, z7.h\n"
+ ".inst 0x6466e42e // bfmmla z14.s, z1.h, z6.h\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "ld1h { z6.h }, p5/Z, [x28, #1, MUL VL]\n"
".inst 0x6467e42b // bfmmla z11.s, z1.h, z7.h\n"
".inst 0x6466e42f // bfmmla z15.s, z1.h, z6.h\n"
- "addvl x12, x12, #2\n"
"addvl x11, x11, #2\n"
"addvl x10, x10, #2\n"
"addvl x9, x9, #2\n"
+ "addvl x28, x28, #2\n"
"12:" // Height 1: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 7b\n"
"uzp1 z8.d, z8.d, z12.d\n"
"uzp1 z9.d, z9.d, z13.d\n"
"uzp1 z10.d, z10.d, z14.d\n"
"uzp1 z11.d, z11.d, z15.d\n"
"tbz %x[flags], #1, 13f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z1.s }, p5/Z, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rw { z0.s }, p5/Z, [x20]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rw { z1.s }, p5/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z0.s }, p5/Z, [x19]\n"
"fmin z8.s, p5/M, z8.s, z1.s\n"
"fmin z9.s, p5/M, z9.s, z1.s\n"
"fmin z10.s, p5/M, z10.s, z1.s\n"
@@ -313,82 +313,82 @@ void sve_ffhybrid_bf16fp32_mmla_6x4VL (
"fmax z10.s, p5/M, z10.s, z0.s\n"
"fmax z11.s, p5/M, z11.s, z0.s\n"
"13:" // Height 1: No activation
- "st1w { z8.s }, p4, [x13]\n"
- "st1w { z9.s }, p3, [x13, #1, MUL VL]\n"
- "st1w { z10.s }, p2, [x13, #2, MUL VL]\n"
- "st1w { z11.s }, p1, [x13, #3, MUL VL]\n"
- "addvl x13, x13, #4\n"
+ "st1w { z8.s }, p4, [x12]\n"
+ "st1w { z9.s }, p3, [x12, #1, MUL VL]\n"
+ "st1w { z10.s }, p2, [x12, #2, MUL VL]\n"
+ "st1w { z11.s }, p1, [x12, #3, MUL VL]\n"
+ "addvl x12, x12, #4\n"
"14:" // Height 1: Writeback done
- "decw x14, ALL, MUL #4\n"
- "cmp x14, XZR\n"
+ "decw x13, ALL, MUL #4\n"
+ "cmp x13, XZR\n"
"bgt 2b\n"
"b 86f\n"
"15:" // Height 2
- "ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x15, %x[bias]\n"
- "ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x13, %x[output_ptr]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x14, %x[bias]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x12, %x[output_ptr]\n"
"16:" // Height 2: Column loop
- "ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
- "add x11, x12, x20, LSL #1\n"
- "cntw x21, ALL, MUL #3\n"
- "add x10, x11, x20, LSL #1\n"
- "add x9, x10, x20, LSL #1\n"
- "add x20, x9, x20, LSL #1\n"
- "cmp x14, x21\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #1\n"
+ "cntw x20, ALL, MUL #3\n"
+ "add x9, x10, x19, LSL #1\n"
+ "add x28, x9, x19, LSL #1\n"
+ "add x19, x28, x19, LSL #1\n"
+ "cmp x13, x20\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"bgt 17f\n"
- "decw x21\n"
- "cmp x14, x21\n"
- "mov x9, x12\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x28, x11\n"
"bgt 17f\n"
- "decw x21\n"
- "cmp x14, x21\n"
- "mov x10, x12\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x9, x11\n"
"bgt 17f\n"
- "mov x11, x12\n"
+ "mov x10, x11\n"
"17:" // Height 2: B setup done
- "mov x20, #0x0\n"
- "whilelt p4.s, x20, x14\n"
- "incw x20\n"
- "whilelt p3.s, x20, x14\n"
- "incw x20\n"
- "whilelt p2.s, x20, x14\n"
- "incw x20\n"
- "whilelt p1.s, x20, x14\n"
- "cbz x15, 18f\n"
- "ld1w { z8.s }, p5/Z, [x15]\n"
- "ld1w { z9.s }, p5/Z, [x15, #1, MUL VL]\n"
+ "mov x19, #0x0\n"
+ "whilelt p4.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p3.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x13\n"
+ "cbz x14, 18f\n"
+ "ld1w { z8.s }, p5/Z, [x14]\n"
+ "ld1w { z9.s }, p5/Z, [x14, #1, MUL VL]\n"
"zip2 z12.d, z8.d, z8.d\n"
"zip1 z8.d, z8.d, z8.d\n"
- "ld1w { z10.s }, p5/Z, [x15, #2, MUL VL]\n"
- "ld1w { z11.s }, p5/Z, [x15, #3, MUL VL]\n"
+ "ld1w { z10.s }, p5/Z, [x14, #2, MUL VL]\n"
+ "ld1w { z11.s }, p5/Z, [x14, #3, MUL VL]\n"
"zip2 z13.d, z9.d, z9.d\n"
"zip1 z9.d, z9.d, z9.d\n"
"zip2 z14.d, z10.d, z10.d\n"
"zip1 z10.d, z10.d, z10.d\n"
- "addvl x15, x15, #4\n"
+ "addvl x14, x14, #4\n"
"zip2 z15.d, z11.d, z11.d\n"
"zip1 z11.d, z11.d, z11.d\n"
"b 20f\n"
"18:" // Height 2: no bias
"tbz %x[flags], #0, 19f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #2\n"
- "ld1w { z9.s }, p4/Z, [x13]\n"
- "ld1w { z10.s }, p3/Z, [x13, #1, MUL VL]\n"
- "ld1w { z11.s }, p2/Z, [x13, #2, MUL VL]\n"
- "ld1w { z16.s }, p1/Z, [x13, #3, MUL VL]\n"
- "ld1w { z12.s }, p4/Z, [x25]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #2\n"
+ "ld1w { z9.s }, p4/Z, [x12]\n"
+ "ld1w { z10.s }, p3/Z, [x12, #1, MUL VL]\n"
+ "ld1w { z11.s }, p2/Z, [x12, #2, MUL VL]\n"
+ "ld1w { z16.s }, p1/Z, [x12, #3, MUL VL]\n"
+ "ld1w { z12.s }, p4/Z, [x24]\n"
"zip1 z8.d, z9.d, z12.d\n"
"zip2 z12.d, z9.d, z12.d\n"
- "ld1w { z13.s }, p3/Z, [x25, #1, MUL VL]\n"
- "ld1w { z14.s }, p2/Z, [x25, #2, MUL VL]\n"
+ "ld1w { z13.s }, p3/Z, [x24, #1, MUL VL]\n"
+ "ld1w { z14.s }, p2/Z, [x24, #2, MUL VL]\n"
"zip1 z9.d, z10.d, z13.d\n"
"zip2 z13.d, z10.d, z13.d\n"
- "ld1w { z15.s }, p1/Z, [x25, #3, MUL VL]\n"
+ "ld1w { z15.s }, p1/Z, [x24, #3, MUL VL]\n"
"zip1 z10.d, z11.d, z14.d\n"
"zip2 z14.d, z11.d, z14.d\n"
"zip1 z11.d, z16.d, z15.d\n"
@@ -404,131 +404,131 @@ void sve_ffhybrid_bf16fp32_mmla_6x4VL (
"mov z14.b, #0x0\n"
"mov z15.b, #0x0\n"
"20:" // Height 2: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"21:" // Height 2: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 22f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "cbnz x28, 23f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #1\n"
- "add x25, x25, x20, LSL #1\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "cbnz x27, 23f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
+ "add x24, x24, x19, LSL #1\n"
"b 23f\n"
"22:" // Height 2: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #1\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #1\n"
"23:" // Height 2: input setup done
- "cmp x27, #0x8\n"
+ "cmp x26, #0x8\n"
"ble 25f\n"
"24:" // Height 2: Multiply loop: Main loop head
- "whilelt p0.h, XZR, x27\n"
- "ld1rqh { z1.h }, p0/Z, [x26]\n"
- "ld1rqh { z2.h }, p0/Z, [x25]\n"
+ "whilelt p0.h, XZR, x26\n"
+ "ld1rqh { z1.h }, p0/Z, [x25]\n"
+ "ld1rqh { z2.h }, p0/Z, [x24]\n"
"trn1 z0.d, z1.d, z2.d\n"
- "ld1h { z7.h }, p5/Z, [x12]\n"
- "ld1h { z6.h }, p5/Z, [x12, #1, MUL VL]\n"
- ".inst 0x6467e408 // bfmmla z8.s, z0.h, z7.h\n"
- ".inst 0x6466e40c // bfmmla z12.s, z0.h, z6.h\n"
"ld1h { z7.h }, p5/Z, [x11]\n"
"ld1h { z6.h }, p5/Z, [x11, #1, MUL VL]\n"
- ".inst 0x6467e409 // bfmmla z9.s, z0.h, z7.h\n"
- ".inst 0x6466e40d // bfmmla z13.s, z0.h, z6.h\n"
+ ".inst 0x6467e408 // bfmmla z8.s, z0.h, z7.h\n"
+ ".inst 0x6466e40c // bfmmla z12.s, z0.h, z6.h\n"
"ld1h { z7.h }, p5/Z, [x10]\n"
"ld1h { z6.h }, p5/Z, [x10, #1, MUL VL]\n"
- ".inst 0x6467e40a // bfmmla z10.s, z0.h, z7.h\n"
- ".inst 0x6466e40e // bfmmla z14.s, z0.h, z6.h\n"
+ ".inst 0x6467e409 // bfmmla z9.s, z0.h, z7.h\n"
+ ".inst 0x6466e40d // bfmmla z13.s, z0.h, z6.h\n"
"ld1h { z7.h }, p5/Z, [x9]\n"
"ld1h { z6.h }, p5/Z, [x9, #1, MUL VL]\n"
+ ".inst 0x6467e40a // bfmmla z10.s, z0.h, z7.h\n"
+ ".inst 0x6466e40e // bfmmla z14.s, z0.h, z6.h\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "ld1h { z6.h }, p5/Z, [x28, #1, MUL VL]\n"
"trn2 z1.d, z1.d, z2.d\n"
".inst 0x6467e40b // bfmmla z11.s, z0.h, z7.h\n"
".inst 0x6466e40f // bfmmla z15.s, z0.h, z6.h\n"
- "ld1h { z7.h }, p5/Z, [x12, #2, MUL VL]\n"
- "ld1h { z6.h }, p5/Z, [x12, #3, MUL VL]\n"
- ".inst 0x6467e428 // bfmmla z8.s, z1.h, z7.h\n"
- ".inst 0x6466e42c // bfmmla z12.s, z1.h, z6.h\n"
"ld1h { z7.h }, p5/Z, [x11, #2, MUL VL]\n"
"ld1h { z6.h }, p5/Z, [x11, #3, MUL VL]\n"
- ".inst 0x6467e429 // bfmmla z9.s, z1.h, z7.h\n"
- ".inst 0x6466e42d // bfmmla z13.s, z1.h, z6.h\n"
+ ".inst 0x6467e428 // bfmmla z8.s, z1.h, z7.h\n"
+ ".inst 0x6466e42c // bfmmla z12.s, z1.h, z6.h\n"
"ld1h { z7.h }, p5/Z, [x10, #2, MUL VL]\n"
"ld1h { z6.h }, p5/Z, [x10, #3, MUL VL]\n"
- ".inst 0x6467e42a // bfmmla z10.s, z1.h, z7.h\n"
- ".inst 0x6466e42e // bfmmla z14.s, z1.h, z6.h\n"
+ ".inst 0x6467e429 // bfmmla z9.s, z1.h, z7.h\n"
+ ".inst 0x6466e42d // bfmmla z13.s, z1.h, z6.h\n"
"ld1h { z7.h }, p5/Z, [x9, #2, MUL VL]\n"
"ld1h { z6.h }, p5/Z, [x9, #3, MUL VL]\n"
- "sub x27, x27, #0x8\n"
- "cmp x27, #0x8\n"
+ ".inst 0x6467e42a // bfmmla z10.s, z1.h, z7.h\n"
+ ".inst 0x6466e42e // bfmmla z14.s, z1.h, z6.h\n"
+ "ld1h { z7.h }, p5/Z, [x28, #2, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x28, #3, MUL VL]\n"
+ "sub x26, x26, #0x8\n"
+ "cmp x26, #0x8\n"
".inst 0x6467e42b // bfmmla z11.s, z1.h, z7.h\n"
".inst 0x6466e42f // bfmmla z15.s, z1.h, z6.h\n"
- "add x26, x26, #0x10\n"
"add x25, x25, #0x10\n"
- "addvl x12, x12, #4\n"
+ "add x24, x24, #0x10\n"
"addvl x11, x11, #4\n"
"addvl x10, x10, #4\n"
"addvl x9, x9, #4\n"
+ "addvl x28, x28, #4\n"
"bgt 24b\n"
"25:" // Height 2: Multiply loop: Single iteration only
- "whilelt p0.h, XZR, x27\n"
- "ld1rqh { z1.h }, p0/Z, [x26]\n"
- "ld1rqh { z2.h }, p0/Z, [x25]\n"
+ "whilelt p0.h, XZR, x26\n"
+ "ld1rqh { z1.h }, p0/Z, [x25]\n"
+ "ld1rqh { z2.h }, p0/Z, [x24]\n"
"trn1 z0.d, z1.d, z2.d\n"
- "ld1h { z7.h }, p5/Z, [x12]\n"
- "ld1h { z6.h }, p5/Z, [x12, #1, MUL VL]\n"
- ".inst 0x6467e408 // bfmmla z8.s, z0.h, z7.h\n"
- ".inst 0x6466e40c // bfmmla z12.s, z0.h, z6.h\n"
"ld1h { z7.h }, p5/Z, [x11]\n"
"ld1h { z6.h }, p5/Z, [x11, #1, MUL VL]\n"
- ".inst 0x6467e409 // bfmmla z9.s, z0.h, z7.h\n"
- ".inst 0x6466e40d // bfmmla z13.s, z0.h, z6.h\n"
+ ".inst 0x6467e408 // bfmmla z8.s, z0.h, z7.h\n"
+ ".inst 0x6466e40c // bfmmla z12.s, z0.h, z6.h\n"
"ld1h { z7.h }, p5/Z, [x10]\n"
"ld1h { z6.h }, p5/Z, [x10, #1, MUL VL]\n"
- ".inst 0x6467e40a // bfmmla z10.s, z0.h, z7.h\n"
- ".inst 0x6466e40e // bfmmla z14.s, z0.h, z6.h\n"
+ ".inst 0x6467e409 // bfmmla z9.s, z0.h, z7.h\n"
+ ".inst 0x6466e40d // bfmmla z13.s, z0.h, z6.h\n"
"ld1h { z7.h }, p5/Z, [x9]\n"
"ld1h { z6.h }, p5/Z, [x9, #1, MUL VL]\n"
- "subs x27, x27, #0x4\n"
+ ".inst 0x6467e40a // bfmmla z10.s, z0.h, z7.h\n"
+ ".inst 0x6466e40e // bfmmla z14.s, z0.h, z6.h\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "ld1h { z6.h }, p5/Z, [x28, #1, MUL VL]\n"
+ "subs x26, x26, #0x4\n"
"trn2 z1.d, z1.d, z2.d\n"
".inst 0x6467e40b // bfmmla z11.s, z0.h, z7.h\n"
".inst 0x6466e40f // bfmmla z15.s, z0.h, z6.h\n"
- "addvl x12, x12, #2\n"
"addvl x11, x11, #2\n"
"addvl x10, x10, #2\n"
"addvl x9, x9, #2\n"
+ "addvl x28, x28, #2\n"
"ble 26f\n"
- "ld1h { z7.h }, p5/Z, [x12]\n"
- "ld1h { z6.h }, p5/Z, [x12, #1, MUL VL]\n"
- ".inst 0x6467e428 // bfmmla z8.s, z1.h, z7.h\n"
- ".inst 0x6466e42c // bfmmla z12.s, z1.h, z6.h\n"
"ld1h { z7.h }, p5/Z, [x11]\n"
"ld1h { z6.h }, p5/Z, [x11, #1, MUL VL]\n"
- ".inst 0x6467e429 // bfmmla z9.s, z1.h, z7.h\n"
- ".inst 0x6466e42d // bfmmla z13.s, z1.h, z6.h\n"
+ ".inst 0x6467e428 // bfmmla z8.s, z1.h, z7.h\n"
+ ".inst 0x6466e42c // bfmmla z12.s, z1.h, z6.h\n"
"ld1h { z7.h }, p5/Z, [x10]\n"
"ld1h { z6.h }, p5/Z, [x10, #1, MUL VL]\n"
- ".inst 0x6467e42a // bfmmla z10.s, z1.h, z7.h\n"
- ".inst 0x6466e42e // bfmmla z14.s, z1.h, z6.h\n"
+ ".inst 0x6467e429 // bfmmla z9.s, z1.h, z7.h\n"
+ ".inst 0x6466e42d // bfmmla z13.s, z1.h, z6.h\n"
"ld1h { z7.h }, p5/Z, [x9]\n"
"ld1h { z6.h }, p5/Z, [x9, #1, MUL VL]\n"
+ ".inst 0x6467e42a // bfmmla z10.s, z1.h, z7.h\n"
+ ".inst 0x6466e42e // bfmmla z14.s, z1.h, z6.h\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "ld1h { z6.h }, p5/Z, [x28, #1, MUL VL]\n"
".inst 0x6467e42b // bfmmla z11.s, z1.h, z7.h\n"
".inst 0x6466e42f // bfmmla z15.s, z1.h, z6.h\n"
- "addvl x12, x12, #2\n"
"addvl x11, x11, #2\n"
"addvl x10, x10, #2\n"
"addvl x9, x9, #2\n"
+ "addvl x28, x28, #2\n"
"26:" // Height 2: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 21b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp1 z7.d, z8.d, z12.d\n"
"uzp2 z8.d, z8.d, z12.d\n"
- "add x25, x13, x20, LSL #2\n"
+ "add x24, x12, x19, LSL #2\n"
"uzp1 z12.d, z9.d, z13.d\n"
"uzp2 z9.d, z9.d, z13.d\n"
"uzp1 z13.d, z10.d, z14.d\n"
@@ -536,10 +536,10 @@ void sve_ffhybrid_bf16fp32_mmla_6x4VL (
"uzp1 z14.d, z11.d, z15.d\n"
"uzp2 z11.d, z11.d, z15.d\n"
"tbz %x[flags], #1, 27f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z1.s }, p5/Z, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rw { z0.s }, p5/Z, [x20]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rw { z1.s }, p5/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z0.s }, p5/Z, [x19]\n"
"fmin z7.s, p5/M, z7.s, z1.s\n"
"fmin z12.s, p5/M, z12.s, z1.s\n"
"fmin z13.s, p5/M, z13.s, z1.s\n"
@@ -557,67 +557,67 @@ void sve_ffhybrid_bf16fp32_mmla_6x4VL (
"fmax z10.s, p5/M, z10.s, z0.s\n"
"fmax z11.s, p5/M, z11.s, z0.s\n"
"27:" // Height 2: No activation
- "st1w { z7.s }, p4, [x13]\n"
- "st1w { z12.s }, p3, [x13, #1, MUL VL]\n"
- "st1w { z13.s }, p2, [x13, #2, MUL VL]\n"
- "st1w { z14.s }, p1, [x13, #3, MUL VL]\n"
- "addvl x13, x13, #4\n"
- "st1w { z8.s }, p4, [x25]\n"
- "st1w { z9.s }, p3, [x25, #1, MUL VL]\n"
- "st1w { z10.s }, p2, [x25, #2, MUL VL]\n"
- "st1w { z11.s }, p1, [x25, #3, MUL VL]\n"
+ "st1w { z7.s }, p4, [x12]\n"
+ "st1w { z12.s }, p3, [x12, #1, MUL VL]\n"
+ "st1w { z13.s }, p2, [x12, #2, MUL VL]\n"
+ "st1w { z14.s }, p1, [x12, #3, MUL VL]\n"
+ "addvl x12, x12, #4\n"
+ "st1w { z8.s }, p4, [x24]\n"
+ "st1w { z9.s }, p3, [x24, #1, MUL VL]\n"
+ "st1w { z10.s }, p2, [x24, #2, MUL VL]\n"
+ "st1w { z11.s }, p1, [x24, #3, MUL VL]\n"
"28:" // Height 2: Writeback done
- "decw x14, ALL, MUL #4\n"
- "cmp x14, XZR\n"
+ "decw x13, ALL, MUL #4\n"
+ "cmp x13, XZR\n"
"bgt 16b\n"
"b 86f\n"
"29:" // Height 3
- "ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x15, %x[bias]\n"
- "ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x13, %x[output_ptr]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x14, %x[bias]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x12, %x[output_ptr]\n"
"30:" // Height 3: Column loop
- "ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
- "add x11, x12, x20, LSL #1\n"
- "cntw x21, ALL, MUL #3\n"
- "add x10, x11, x20, LSL #1\n"
- "add x9, x10, x20, LSL #1\n"
- "add x20, x9, x20, LSL #1\n"
- "cmp x14, x21\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #1\n"
+ "cntw x20, ALL, MUL #3\n"
+ "add x9, x10, x19, LSL #1\n"
+ "add x28, x9, x19, LSL #1\n"
+ "add x19, x28, x19, LSL #1\n"
+ "cmp x13, x20\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"bgt 31f\n"
- "decw x21\n"
- "cmp x14, x21\n"
- "mov x9, x12\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x28, x11\n"
"bgt 31f\n"
- "decw x21\n"
- "cmp x14, x21\n"
- "mov x10, x12\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x9, x11\n"
"bgt 31f\n"
- "mov x11, x12\n"
+ "mov x10, x11\n"
"31:" // Height 3: B setup done
- "mov x20, #0x0\n"
- "whilelt p4.s, x20, x14\n"
- "incw x20\n"
- "whilelt p3.s, x20, x14\n"
- "incw x20\n"
- "whilelt p2.s, x20, x14\n"
- "incw x20\n"
- "whilelt p1.s, x20, x14\n"
- "cbz x15, 32f\n"
- "ld1w { z8.s }, p5/Z, [x15]\n"
- "ld1w { z9.s }, p5/Z, [x15, #1, MUL VL]\n"
+ "mov x19, #0x0\n"
+ "whilelt p4.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p3.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x13\n"
+ "cbz x14, 32f\n"
+ "ld1w { z8.s }, p5/Z, [x14]\n"
+ "ld1w { z9.s }, p5/Z, [x14, #1, MUL VL]\n"
"zip2 z12.d, z8.d, z8.d\n"
"zip1 z8.d, z8.d, z8.d\n"
- "ld1w { z10.s }, p5/Z, [x15, #2, MUL VL]\n"
- "ld1w { z11.s }, p5/Z, [x15, #3, MUL VL]\n"
+ "ld1w { z10.s }, p5/Z, [x14, #2, MUL VL]\n"
+ "ld1w { z11.s }, p5/Z, [x14, #3, MUL VL]\n"
"zip2 z13.d, z9.d, z9.d\n"
"zip1 z9.d, z9.d, z9.d\n"
"zip2 z14.d, z10.d, z10.d\n"
"zip1 z10.d, z10.d, z10.d\n"
- "addvl x15, x15, #4\n"
+ "addvl x14, x14, #4\n"
"zip2 z15.d, z11.d, z11.d\n"
"zip1 z11.d, z11.d, z11.d\n"
"mov z16.d, z8.d\n"
@@ -631,29 +631,29 @@ void sve_ffhybrid_bf16fp32_mmla_6x4VL (
"b 34f\n"
"32:" // Height 3: no bias
"tbz %x[flags], #0, 33f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "ld1w { z9.s }, p4/Z, [x13]\n"
- "ld1w { z10.s }, p3/Z, [x13, #1, MUL VL]\n"
- "ld1w { z11.s }, p2/Z, [x13, #2, MUL VL]\n"
- "ld1w { z16.s }, p1/Z, [x13, #3, MUL VL]\n"
- "ld1w { z12.s }, p4/Z, [x25]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "ld1w { z9.s }, p4/Z, [x12]\n"
+ "ld1w { z10.s }, p3/Z, [x12, #1, MUL VL]\n"
+ "ld1w { z11.s }, p2/Z, [x12, #2, MUL VL]\n"
+ "ld1w { z16.s }, p1/Z, [x12, #3, MUL VL]\n"
+ "ld1w { z12.s }, p4/Z, [x24]\n"
"zip1 z8.d, z9.d, z12.d\n"
"zip2 z12.d, z9.d, z12.d\n"
- "ld1w { z13.s }, p3/Z, [x25, #1, MUL VL]\n"
- "ld1w { z14.s }, p2/Z, [x25, #2, MUL VL]\n"
+ "ld1w { z13.s }, p3/Z, [x24, #1, MUL VL]\n"
+ "ld1w { z14.s }, p2/Z, [x24, #2, MUL VL]\n"
"zip1 z9.d, z10.d, z13.d\n"
"zip2 z13.d, z10.d, z13.d\n"
- "ld1w { z15.s }, p1/Z, [x25, #3, MUL VL]\n"
- "ld1w { z17.s }, p4/Z, [x24]\n"
+ "ld1w { z15.s }, p1/Z, [x24, #3, MUL VL]\n"
+ "ld1w { z17.s }, p4/Z, [x23]\n"
"zip1 z10.d, z11.d, z14.d\n"
"zip2 z14.d, z11.d, z14.d\n"
- "ld1w { z18.s }, p3/Z, [x24, #1, MUL VL]\n"
- "ld1w { z19.s }, p2/Z, [x24, #2, MUL VL]\n"
+ "ld1w { z18.s }, p3/Z, [x23, #1, MUL VL]\n"
+ "ld1w { z19.s }, p2/Z, [x23, #2, MUL VL]\n"
"zip1 z11.d, z16.d, z15.d\n"
"zip2 z15.d, z16.d, z15.d\n"
- "ld1w { z24.s }, p1/Z, [x24, #3, MUL VL]\n"
+ "ld1w { z24.s }, p1/Z, [x23, #3, MUL VL]\n"
"zip1 z16.d, z17.d, z20.d\n"
"zip2 z20.d, z17.d, z20.d\n"
"zip1 z17.d, z18.d, z21.d\n"
@@ -681,176 +681,176 @@ void sve_ffhybrid_bf16fp32_mmla_6x4VL (
"mov z22.b, #0x0\n"
"mov z23.b, #0x0\n"
"34:" // Height 3: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"35:" // Height 3: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 36f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "cbnz x28, 37f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #1\n"
- "add x25, x25, x20, LSL #1\n"
- "add x24, x24, x20, LSL #1\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "cbnz x27, 37f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
+ "add x24, x24, x19, LSL #1\n"
+ "add x23, x23, x19, LSL #1\n"
"b 37f\n"
"36:" // Height 3: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
"37:" // Height 3: input setup done
- "cmp x27, #0x8\n"
+ "cmp x26, #0x8\n"
"ble 39f\n"
"38:" // Height 3: Multiply loop: Main loop head
- "whilelt p0.h, XZR, x27\n"
- "ld1rqh { z1.h }, p0/Z, [x26]\n"
- "ld1rqh { z2.h }, p0/Z, [x25]\n"
- "ld1rqh { z3.h }, p0/Z, [x24]\n"
+ "whilelt p0.h, XZR, x26\n"
+ "ld1rqh { z1.h }, p0/Z, [x25]\n"
+ "ld1rqh { z2.h }, p0/Z, [x24]\n"
+ "ld1rqh { z3.h }, p0/Z, [x23]\n"
"trn1 z0.d, z1.d, z2.d\n"
"trn2 z1.d, z1.d, z2.d\n"
- "ld1h { z7.h }, p5/Z, [x12]\n"
+ "ld1h { z7.h }, p5/Z, [x11]\n"
"trn1 z2.d, z3.d, z4.d\n"
- "ld1h { z6.h }, p5/Z, [x12, #1, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #1, MUL VL]\n"
".inst 0x6467e408 // bfmmla z8.s, z0.h, z7.h\n"
".inst 0x6467e450 // bfmmla z16.s, z2.h, z7.h\n"
".inst 0x6466e40c // bfmmla z12.s, z0.h, z6.h\n"
".inst 0x6466e454 // bfmmla z20.s, z2.h, z6.h\n"
- "ld1h { z7.h }, p5/Z, [x11]\n"
- "ld1h { z6.h }, p5/Z, [x11, #1, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
+ "ld1h { z6.h }, p5/Z, [x10, #1, MUL VL]\n"
".inst 0x6467e409 // bfmmla z9.s, z0.h, z7.h\n"
".inst 0x6467e451 // bfmmla z17.s, z2.h, z7.h\n"
- "ld1h { z7.h }, p5/Z, [x10]\n"
+ "ld1h { z7.h }, p5/Z, [x9]\n"
"trn2 z3.d, z3.d, z4.d\n"
".inst 0x6466e40d // bfmmla z13.s, z0.h, z6.h\n"
".inst 0x6466e455 // bfmmla z21.s, z2.h, z6.h\n"
- "ld1h { z6.h }, p5/Z, [x10, #1, MUL VL]\n"
- "sub x27, x27, #0x8\n"
+ "ld1h { z6.h }, p5/Z, [x9, #1, MUL VL]\n"
+ "sub x26, x26, #0x8\n"
".inst 0x6467e40a // bfmmla z10.s, z0.h, z7.h\n"
".inst 0x6467e452 // bfmmla z18.s, z2.h, z7.h\n"
- "ld1h { z7.h }, p5/Z, [x9]\n"
- "cmp x27, #0x8\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "cmp x26, #0x8\n"
".inst 0x6466e40e // bfmmla z14.s, z0.h, z6.h\n"
".inst 0x6466e456 // bfmmla z22.s, z2.h, z6.h\n"
- "ld1h { z6.h }, p5/Z, [x9, #1, MUL VL]\n"
- "add x26, x26, #0x10\n"
+ "ld1h { z6.h }, p5/Z, [x28, #1, MUL VL]\n"
+ "add x25, x25, #0x10\n"
".inst 0x6467e40b // bfmmla z11.s, z0.h, z7.h\n"
".inst 0x6467e453 // bfmmla z19.s, z2.h, z7.h\n"
- "ld1h { z7.h }, p5/Z, [x12, #2, MUL VL]\n"
- "add x25, x25, #0x10\n"
+ "ld1h { z7.h }, p5/Z, [x11, #2, MUL VL]\n"
+ "add x24, x24, #0x10\n"
".inst 0x6466e40f // bfmmla z15.s, z0.h, z6.h\n"
".inst 0x6466e457 // bfmmla z23.s, z2.h, z6.h\n"
- "ld1h { z6.h }, p5/Z, [x12, #3, MUL VL]\n"
- "add x24, x24, #0x10\n"
+ "ld1h { z6.h }, p5/Z, [x11, #3, MUL VL]\n"
+ "add x23, x23, #0x10\n"
".inst 0x6467e428 // bfmmla z8.s, z1.h, z7.h\n"
".inst 0x6467e470 // bfmmla z16.s, z3.h, z7.h\n"
- "ld1h { z7.h }, p5/Z, [x11, #2, MUL VL]\n"
- "addvl x12, x12, #4\n"
+ "ld1h { z7.h }, p5/Z, [x10, #2, MUL VL]\n"
+ "addvl x11, x11, #4\n"
".inst 0x6466e42c // bfmmla z12.s, z1.h, z6.h\n"
".inst 0x6466e474 // bfmmla z20.s, z3.h, z6.h\n"
- "ld1h { z6.h }, p5/Z, [x11, #3, MUL VL]\n"
- "addvl x11, x11, #4\n"
+ "ld1h { z6.h }, p5/Z, [x10, #3, MUL VL]\n"
+ "addvl x10, x10, #4\n"
".inst 0x6467e429 // bfmmla z9.s, z1.h, z7.h\n"
".inst 0x6467e471 // bfmmla z17.s, z3.h, z7.h\n"
- "ld1h { z7.h }, p5/Z, [x10, #2, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x9, #2, MUL VL]\n"
".inst 0x6466e42d // bfmmla z13.s, z1.h, z6.h\n"
".inst 0x6466e475 // bfmmla z21.s, z3.h, z6.h\n"
- "ld1h { z6.h }, p5/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
+ "ld1h { z6.h }, p5/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
".inst 0x6467e42a // bfmmla z10.s, z1.h, z7.h\n"
".inst 0x6467e472 // bfmmla z18.s, z3.h, z7.h\n"
- "ld1h { z7.h }, p5/Z, [x9, #2, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x28, #2, MUL VL]\n"
".inst 0x6466e42e // bfmmla z14.s, z1.h, z6.h\n"
".inst 0x6466e476 // bfmmla z22.s, z3.h, z6.h\n"
- "ld1h { z6.h }, p5/Z, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
+ "ld1h { z6.h }, p5/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
".inst 0x6467e42b // bfmmla z11.s, z1.h, z7.h\n"
".inst 0x6467e473 // bfmmla z19.s, z3.h, z7.h\n"
".inst 0x6466e42f // bfmmla z15.s, z1.h, z6.h\n"
".inst 0x6466e477 // bfmmla z23.s, z3.h, z6.h\n"
"bgt 38b\n"
"39:" // Height 3: Multiply loop: Single iteration only
- "whilelt p0.h, XZR, x27\n"
- "ld1rqh { z1.h }, p0/Z, [x26]\n"
- "ld1rqh { z2.h }, p0/Z, [x25]\n"
- "ld1rqh { z3.h }, p0/Z, [x24]\n"
+ "whilelt p0.h, XZR, x26\n"
+ "ld1rqh { z1.h }, p0/Z, [x25]\n"
+ "ld1rqh { z2.h }, p0/Z, [x24]\n"
+ "ld1rqh { z3.h }, p0/Z, [x23]\n"
"trn1 z0.d, z1.d, z2.d\n"
"trn2 z1.d, z1.d, z2.d\n"
- "ld1h { z7.h }, p5/Z, [x12]\n"
+ "ld1h { z7.h }, p5/Z, [x11]\n"
"trn1 z2.d, z3.d, z4.d\n"
- "ld1h { z6.h }, p5/Z, [x12, #1, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #1, MUL VL]\n"
".inst 0x6467e408 // bfmmla z8.s, z0.h, z7.h\n"
".inst 0x6467e450 // bfmmla z16.s, z2.h, z7.h\n"
".inst 0x6466e40c // bfmmla z12.s, z0.h, z6.h\n"
".inst 0x6466e454 // bfmmla z20.s, z2.h, z6.h\n"
- "ld1h { z7.h }, p5/Z, [x11]\n"
- "ld1h { z6.h }, p5/Z, [x11, #1, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
+ "ld1h { z6.h }, p5/Z, [x10, #1, MUL VL]\n"
".inst 0x6467e409 // bfmmla z9.s, z0.h, z7.h\n"
".inst 0x6467e451 // bfmmla z17.s, z2.h, z7.h\n"
- "ld1h { z7.h }, p5/Z, [x10]\n"
- "subs x27, x27, #0x4\n"
+ "ld1h { z7.h }, p5/Z, [x9]\n"
+ "subs x26, x26, #0x4\n"
".inst 0x6466e40d // bfmmla z13.s, z0.h, z6.h\n"
".inst 0x6466e455 // bfmmla z21.s, z2.h, z6.h\n"
- "ld1h { z6.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #1, MUL VL]\n"
"trn2 z3.d, z3.d, z4.d\n"
".inst 0x6467e40a // bfmmla z10.s, z0.h, z7.h\n"
".inst 0x6467e452 // bfmmla z18.s, z2.h, z7.h\n"
- "ld1h { z7.h }, p5/Z, [x9]\n"
- "addvl x12, x12, #2\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "addvl x11, x11, #2\n"
".inst 0x6466e40e // bfmmla z14.s, z0.h, z6.h\n"
".inst 0x6466e456 // bfmmla z22.s, z2.h, z6.h\n"
- "ld1h { z6.h }, p5/Z, [x9, #1, MUL VL]\n"
- "addvl x11, x11, #2\n"
+ "ld1h { z6.h }, p5/Z, [x28, #1, MUL VL]\n"
+ "addvl x10, x10, #2\n"
".inst 0x6467e40b // bfmmla z11.s, z0.h, z7.h\n"
".inst 0x6467e453 // bfmmla z19.s, z2.h, z7.h\n"
- "addvl x10, x10, #2\n"
"addvl x9, x9, #2\n"
+ "addvl x28, x28, #2\n"
".inst 0x6466e40f // bfmmla z15.s, z0.h, z6.h\n"
".inst 0x6466e457 // bfmmla z23.s, z2.h, z6.h\n"
"ble 40f\n"
- "ld1h { z7.h }, p5/Z, [x12]\n"
- "ld1h { z6.h }, p5/Z, [x12, #1, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x11]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #1, MUL VL]\n"
".inst 0x6467e428 // bfmmla z8.s, z1.h, z7.h\n"
".inst 0x6467e470 // bfmmla z16.s, z3.h, z7.h\n"
".inst 0x6466e42c // bfmmla z12.s, z1.h, z6.h\n"
".inst 0x6466e474 // bfmmla z20.s, z3.h, z6.h\n"
- "ld1h { z7.h }, p5/Z, [x11]\n"
- "ld1h { z6.h }, p5/Z, [x11, #1, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
+ "ld1h { z6.h }, p5/Z, [x10, #1, MUL VL]\n"
".inst 0x6467e429 // bfmmla z9.s, z1.h, z7.h\n"
".inst 0x6467e471 // bfmmla z17.s, z3.h, z7.h\n"
- "ld1h { z7.h }, p5/Z, [x10]\n"
- "addvl x12, x12, #2\n"
+ "ld1h { z7.h }, p5/Z, [x9]\n"
+ "addvl x11, x11, #2\n"
".inst 0x6466e42d // bfmmla z13.s, z1.h, z6.h\n"
".inst 0x6466e475 // bfmmla z21.s, z3.h, z6.h\n"
- "ld1h { z6.h }, p5/Z, [x10, #1, MUL VL]\n"
- "addvl x11, x11, #2\n"
+ "ld1h { z6.h }, p5/Z, [x9, #1, MUL VL]\n"
+ "addvl x10, x10, #2\n"
".inst 0x6467e42a // bfmmla z10.s, z1.h, z7.h\n"
".inst 0x6467e472 // bfmmla z18.s, z3.h, z7.h\n"
- "ld1h { z7.h }, p5/Z, [x9]\n"
- "addvl x10, x10, #2\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "addvl x9, x9, #2\n"
".inst 0x6466e42e // bfmmla z14.s, z1.h, z6.h\n"
".inst 0x6466e476 // bfmmla z22.s, z3.h, z6.h\n"
- "ld1h { z6.h }, p5/Z, [x9, #1, MUL VL]\n"
- "addvl x9, x9, #2\n"
+ "ld1h { z6.h }, p5/Z, [x28, #1, MUL VL]\n"
+ "addvl x28, x28, #2\n"
".inst 0x6467e42b // bfmmla z11.s, z1.h, z7.h\n"
".inst 0x6467e473 // bfmmla z19.s, z3.h, z7.h\n"
".inst 0x6466e42f // bfmmla z15.s, z1.h, z6.h\n"
".inst 0x6466e477 // bfmmla z23.s, z3.h, z6.h\n"
"40:" // Height 3: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 35b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #2\n"
"uzp1 z7.d, z8.d, z12.d\n"
"uzp2 z8.d, z8.d, z12.d\n"
"uzp1 z12.d, z9.d, z13.d\n"
"uzp2 z9.d, z9.d, z13.d\n"
- "add x24, x25, x20, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
"uzp1 z13.d, z10.d, z14.d\n"
"uzp2 z10.d, z10.d, z14.d\n"
"uzp1 z14.d, z11.d, z15.d\n"
@@ -860,10 +860,10 @@ void sve_ffhybrid_bf16fp32_mmla_6x4VL (
"uzp1 z18.d, z18.d, z22.d\n"
"uzp1 z19.d, z19.d, z23.d\n"
"tbz %x[flags], #1, 41f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z1.s }, p5/Z, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rw { z0.s }, p5/Z, [x20]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rw { z1.s }, p5/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z0.s }, p5/Z, [x19]\n"
"fmin z7.s, p5/M, z7.s, z1.s\n"
"fmin z12.s, p5/M, z12.s, z1.s\n"
"fmin z13.s, p5/M, z13.s, z1.s\n"
@@ -889,71 +889,71 @@ void sve_ffhybrid_bf16fp32_mmla_6x4VL (
"fmax z18.s, p5/M, z18.s, z0.s\n"
"fmax z19.s, p5/M, z19.s, z0.s\n"
"41:" // Height 3: No activation
- "st1w { z7.s }, p4, [x13]\n"
- "st1w { z12.s }, p3, [x13, #1, MUL VL]\n"
- "st1w { z13.s }, p2, [x13, #2, MUL VL]\n"
- "st1w { z14.s }, p1, [x13, #3, MUL VL]\n"
- "addvl x13, x13, #4\n"
- "st1w { z8.s }, p4, [x25]\n"
- "st1w { z9.s }, p3, [x25, #1, MUL VL]\n"
- "st1w { z10.s }, p2, [x25, #2, MUL VL]\n"
- "st1w { z11.s }, p1, [x25, #3, MUL VL]\n"
- "st1w { z16.s }, p4, [x24]\n"
- "st1w { z17.s }, p3, [x24, #1, MUL VL]\n"
- "st1w { z18.s }, p2, [x24, #2, MUL VL]\n"
- "st1w { z19.s }, p1, [x24, #3, MUL VL]\n"
+ "st1w { z7.s }, p4, [x12]\n"
+ "st1w { z12.s }, p3, [x12, #1, MUL VL]\n"
+ "st1w { z13.s }, p2, [x12, #2, MUL VL]\n"
+ "st1w { z14.s }, p1, [x12, #3, MUL VL]\n"
+ "addvl x12, x12, #4\n"
+ "st1w { z8.s }, p4, [x24]\n"
+ "st1w { z9.s }, p3, [x24, #1, MUL VL]\n"
+ "st1w { z10.s }, p2, [x24, #2, MUL VL]\n"
+ "st1w { z11.s }, p1, [x24, #3, MUL VL]\n"
+ "st1w { z16.s }, p4, [x23]\n"
+ "st1w { z17.s }, p3, [x23, #1, MUL VL]\n"
+ "st1w { z18.s }, p2, [x23, #2, MUL VL]\n"
+ "st1w { z19.s }, p1, [x23, #3, MUL VL]\n"
"42:" // Height 3: Writeback done
- "decw x14, ALL, MUL #4\n"
- "cmp x14, XZR\n"
+ "decw x13, ALL, MUL #4\n"
+ "cmp x13, XZR\n"
"bgt 30b\n"
"b 86f\n"
"43:" // Height 4
- "ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x15, %x[bias]\n"
- "ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x13, %x[output_ptr]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x14, %x[bias]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x12, %x[output_ptr]\n"
"44:" // Height 4: Column loop
- "ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
- "add x11, x12, x20, LSL #1\n"
- "cntw x21, ALL, MUL #3\n"
- "add x10, x11, x20, LSL #1\n"
- "add x9, x10, x20, LSL #1\n"
- "add x20, x9, x20, LSL #1\n"
- "cmp x14, x21\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #1\n"
+ "cntw x20, ALL, MUL #3\n"
+ "add x9, x10, x19, LSL #1\n"
+ "add x28, x9, x19, LSL #1\n"
+ "add x19, x28, x19, LSL #1\n"
+ "cmp x13, x20\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"bgt 45f\n"
- "decw x21\n"
- "cmp x14, x21\n"
- "mov x9, x12\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x28, x11\n"
"bgt 45f\n"
- "decw x21\n"
- "cmp x14, x21\n"
- "mov x10, x12\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x9, x11\n"
"bgt 45f\n"
- "mov x11, x12\n"
+ "mov x10, x11\n"
"45:" // Height 4: B setup done
- "mov x20, #0x0\n"
- "whilelt p4.s, x20, x14\n"
- "incw x20\n"
- "whilelt p3.s, x20, x14\n"
- "incw x20\n"
- "whilelt p2.s, x20, x14\n"
- "incw x20\n"
- "whilelt p1.s, x20, x14\n"
- "cbz x15, 46f\n"
- "ld1w { z8.s }, p5/Z, [x15]\n"
- "ld1w { z9.s }, p5/Z, [x15, #1, MUL VL]\n"
+ "mov x19, #0x0\n"
+ "whilelt p4.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p3.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x13\n"
+ "cbz x14, 46f\n"
+ "ld1w { z8.s }, p5/Z, [x14]\n"
+ "ld1w { z9.s }, p5/Z, [x14, #1, MUL VL]\n"
"zip2 z12.d, z8.d, z8.d\n"
"zip1 z8.d, z8.d, z8.d\n"
- "ld1w { z10.s }, p5/Z, [x15, #2, MUL VL]\n"
- "ld1w { z11.s }, p5/Z, [x15, #3, MUL VL]\n"
+ "ld1w { z10.s }, p5/Z, [x14, #2, MUL VL]\n"
+ "ld1w { z11.s }, p5/Z, [x14, #3, MUL VL]\n"
"zip2 z13.d, z9.d, z9.d\n"
"zip1 z9.d, z9.d, z9.d\n"
"zip2 z14.d, z10.d, z10.d\n"
"zip1 z10.d, z10.d, z10.d\n"
- "addvl x15, x15, #4\n"
+ "addvl x14, x14, #4\n"
"zip2 z15.d, z11.d, z11.d\n"
"zip1 z11.d, z11.d, z11.d\n"
"mov z16.d, z8.d\n"
@@ -967,38 +967,38 @@ void sve_ffhybrid_bf16fp32_mmla_6x4VL (
"b 48f\n"
"46:" // Height 4: no bias
"tbz %x[flags], #0, 47f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "ld1w { z9.s }, p4/Z, [x13]\n"
- "add x23, x24, x20, LSL #2\n"
- "ld1w { z10.s }, p3/Z, [x13, #1, MUL VL]\n"
- "ld1w { z11.s }, p2/Z, [x13, #2, MUL VL]\n"
- "ld1w { z16.s }, p1/Z, [x13, #3, MUL VL]\n"
- "ld1w { z12.s }, p4/Z, [x25]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "ld1w { z9.s }, p4/Z, [x12]\n"
+ "add x22, x23, x19, LSL #2\n"
+ "ld1w { z10.s }, p3/Z, [x12, #1, MUL VL]\n"
+ "ld1w { z11.s }, p2/Z, [x12, #2, MUL VL]\n"
+ "ld1w { z16.s }, p1/Z, [x12, #3, MUL VL]\n"
+ "ld1w { z12.s }, p4/Z, [x24]\n"
"zip1 z8.d, z9.d, z12.d\n"
"zip2 z12.d, z9.d, z12.d\n"
- "ld1w { z13.s }, p3/Z, [x25, #1, MUL VL]\n"
- "ld1w { z14.s }, p2/Z, [x25, #2, MUL VL]\n"
+ "ld1w { z13.s }, p3/Z, [x24, #1, MUL VL]\n"
+ "ld1w { z14.s }, p2/Z, [x24, #2, MUL VL]\n"
"zip1 z9.d, z10.d, z13.d\n"
"zip2 z13.d, z10.d, z13.d\n"
- "ld1w { z15.s }, p1/Z, [x25, #3, MUL VL]\n"
- "ld1w { z17.s }, p4/Z, [x24]\n"
+ "ld1w { z15.s }, p1/Z, [x24, #3, MUL VL]\n"
+ "ld1w { z17.s }, p4/Z, [x23]\n"
"zip1 z10.d, z11.d, z14.d\n"
"zip2 z14.d, z11.d, z14.d\n"
- "ld1w { z18.s }, p3/Z, [x24, #1, MUL VL]\n"
- "ld1w { z19.s }, p2/Z, [x24, #2, MUL VL]\n"
+ "ld1w { z18.s }, p3/Z, [x23, #1, MUL VL]\n"
+ "ld1w { z19.s }, p2/Z, [x23, #2, MUL VL]\n"
"zip1 z11.d, z16.d, z15.d\n"
"zip2 z15.d, z16.d, z15.d\n"
- "ld1w { z24.s }, p1/Z, [x24, #3, MUL VL]\n"
- "ld1w { z20.s }, p4/Z, [x23]\n"
+ "ld1w { z24.s }, p1/Z, [x23, #3, MUL VL]\n"
+ "ld1w { z20.s }, p4/Z, [x22]\n"
"zip1 z16.d, z17.d, z20.d\n"
"zip2 z20.d, z17.d, z20.d\n"
- "ld1w { z21.s }, p3/Z, [x23, #1, MUL VL]\n"
- "ld1w { z22.s }, p2/Z, [x23, #2, MUL VL]\n"
+ "ld1w { z21.s }, p3/Z, [x22, #1, MUL VL]\n"
+ "ld1w { z22.s }, p2/Z, [x22, #2, MUL VL]\n"
"zip1 z17.d, z18.d, z21.d\n"
"zip2 z21.d, z18.d, z21.d\n"
- "ld1w { z23.s }, p1/Z, [x23, #3, MUL VL]\n"
+ "ld1w { z23.s }, p1/Z, [x22, #3, MUL VL]\n"
"zip1 z18.d, z19.d, z22.d\n"
"zip2 z22.d, z19.d, z22.d\n"
"zip1 z19.d, z24.d, z23.d\n"
@@ -1022,182 +1022,182 @@ void sve_ffhybrid_bf16fp32_mmla_6x4VL (
"mov z22.b, #0x0\n"
"mov z23.b, #0x0\n"
"48:" // Height 4: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"49:" // Height 4: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 50f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "cbnz x28, 51f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #1\n"
- "add x25, x25, x20, LSL #1\n"
- "add x24, x24, x20, LSL #1\n"
- "add x23, x23, x20, LSL #1\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "cbnz x27, 51f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
+ "add x24, x24, x19, LSL #1\n"
+ "add x23, x23, x19, LSL #1\n"
+ "add x22, x22, x19, LSL #1\n"
"b 51f\n"
"50:" // Height 4: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
- "add x23, x24, x20, LSL #1\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "add x22, x23, x19, LSL #1\n"
"51:" // Height 4: input setup done
- "cmp x27, #0x8\n"
+ "cmp x26, #0x8\n"
"ble 53f\n"
"52:" // Height 4: Multiply loop: Main loop head
- "whilelt p0.h, XZR, x27\n"
- "ld1rqh { z1.h }, p0/Z, [x26]\n"
- "ld1rqh { z2.h }, p0/Z, [x25]\n"
+ "whilelt p0.h, XZR, x26\n"
+ "ld1rqh { z1.h }, p0/Z, [x25]\n"
+ "ld1rqh { z2.h }, p0/Z, [x24]\n"
"trn1 z0.d, z1.d, z2.d\n"
- "ld1rqh { z3.h }, p0/Z, [x24]\n"
- "ld1rqh { z4.h }, p0/Z, [x23]\n"
+ "ld1rqh { z3.h }, p0/Z, [x23]\n"
+ "ld1rqh { z4.h }, p0/Z, [x22]\n"
"trn2 z1.d, z1.d, z2.d\n"
"trn1 z2.d, z3.d, z4.d\n"
- "ld1h { z7.h }, p5/Z, [x12]\n"
- "ld1h { z6.h }, p5/Z, [x12, #1, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x11]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #1, MUL VL]\n"
".inst 0x6467e408 // bfmmla z8.s, z0.h, z7.h\n"
".inst 0x6467e450 // bfmmla z16.s, z2.h, z7.h\n"
".inst 0x6466e40c // bfmmla z12.s, z0.h, z6.h\n"
".inst 0x6466e454 // bfmmla z20.s, z2.h, z6.h\n"
- "ld1h { z7.h }, p5/Z, [x11]\n"
- "ld1h { z6.h }, p5/Z, [x11, #1, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
+ "ld1h { z6.h }, p5/Z, [x10, #1, MUL VL]\n"
".inst 0x6467e409 // bfmmla z9.s, z0.h, z7.h\n"
".inst 0x6467e451 // bfmmla z17.s, z2.h, z7.h\n"
- "ld1h { z7.h }, p5/Z, [x10]\n"
+ "ld1h { z7.h }, p5/Z, [x9]\n"
"trn2 z3.d, z3.d, z4.d\n"
".inst 0x6466e40d // bfmmla z13.s, z0.h, z6.h\n"
".inst 0x6466e455 // bfmmla z21.s, z2.h, z6.h\n"
- "ld1h { z6.h }, p5/Z, [x10, #1, MUL VL]\n"
- "sub x27, x27, #0x8\n"
+ "ld1h { z6.h }, p5/Z, [x9, #1, MUL VL]\n"
+ "sub x26, x26, #0x8\n"
".inst 0x6467e40a // bfmmla z10.s, z0.h, z7.h\n"
".inst 0x6467e452 // bfmmla z18.s, z2.h, z7.h\n"
- "ld1h { z7.h }, p5/Z, [x9]\n"
- "cmp x27, #0x8\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "cmp x26, #0x8\n"
".inst 0x6466e40e // bfmmla z14.s, z0.h, z6.h\n"
".inst 0x6466e456 // bfmmla z22.s, z2.h, z6.h\n"
- "ld1h { z6.h }, p5/Z, [x9, #1, MUL VL]\n"
- "add x26, x26, #0x10\n"
+ "ld1h { z6.h }, p5/Z, [x28, #1, MUL VL]\n"
+ "add x25, x25, #0x10\n"
".inst 0x6467e40b // bfmmla z11.s, z0.h, z7.h\n"
".inst 0x6467e453 // bfmmla z19.s, z2.h, z7.h\n"
- "ld1h { z7.h }, p5/Z, [x12, #2, MUL VL]\n"
- "add x25, x25, #0x10\n"
+ "ld1h { z7.h }, p5/Z, [x11, #2, MUL VL]\n"
+ "add x24, x24, #0x10\n"
".inst 0x6466e40f // bfmmla z15.s, z0.h, z6.h\n"
".inst 0x6466e457 // bfmmla z23.s, z2.h, z6.h\n"
- "ld1h { z6.h }, p5/Z, [x12, #3, MUL VL]\n"
- "add x24, x24, #0x10\n"
+ "ld1h { z6.h }, p5/Z, [x11, #3, MUL VL]\n"
+ "add x23, x23, #0x10\n"
".inst 0x6467e428 // bfmmla z8.s, z1.h, z7.h\n"
".inst 0x6467e470 // bfmmla z16.s, z3.h, z7.h\n"
- "ld1h { z7.h }, p5/Z, [x11, #2, MUL VL]\n"
- "add x23, x23, #0x10\n"
+ "ld1h { z7.h }, p5/Z, [x10, #2, MUL VL]\n"
+ "add x22, x22, #0x10\n"
".inst 0x6466e42c // bfmmla z12.s, z1.h, z6.h\n"
".inst 0x6466e474 // bfmmla z20.s, z3.h, z6.h\n"
- "ld1h { z6.h }, p5/Z, [x11, #3, MUL VL]\n"
- "addvl x12, x12, #4\n"
+ "ld1h { z6.h }, p5/Z, [x10, #3, MUL VL]\n"
+ "addvl x11, x11, #4\n"
".inst 0x6467e429 // bfmmla z9.s, z1.h, z7.h\n"
".inst 0x6467e471 // bfmmla z17.s, z3.h, z7.h\n"
- "ld1h { z7.h }, p5/Z, [x10, #2, MUL VL]\n"
- "addvl x11, x11, #4\n"
+ "ld1h { z7.h }, p5/Z, [x9, #2, MUL VL]\n"
+ "addvl x10, x10, #4\n"
".inst 0x6466e42d // bfmmla z13.s, z1.h, z6.h\n"
".inst 0x6466e475 // bfmmla z21.s, z3.h, z6.h\n"
- "ld1h { z6.h }, p5/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
+ "ld1h { z6.h }, p5/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
".inst 0x6467e42a // bfmmla z10.s, z1.h, z7.h\n"
".inst 0x6467e472 // bfmmla z18.s, z3.h, z7.h\n"
- "ld1h { z7.h }, p5/Z, [x9, #2, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x28, #2, MUL VL]\n"
".inst 0x6466e42e // bfmmla z14.s, z1.h, z6.h\n"
".inst 0x6466e476 // bfmmla z22.s, z3.h, z6.h\n"
- "ld1h { z6.h }, p5/Z, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
+ "ld1h { z6.h }, p5/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
".inst 0x6467e42b // bfmmla z11.s, z1.h, z7.h\n"
".inst 0x6467e473 // bfmmla z19.s, z3.h, z7.h\n"
".inst 0x6466e42f // bfmmla z15.s, z1.h, z6.h\n"
".inst 0x6466e477 // bfmmla z23.s, z3.h, z6.h\n"
"bgt 52b\n"
"53:" // Height 4: Multiply loop: Single iteration only
- "whilelt p0.h, XZR, x27\n"
- "ld1rqh { z1.h }, p0/Z, [x26]\n"
- "ld1rqh { z2.h }, p0/Z, [x25]\n"
+ "whilelt p0.h, XZR, x26\n"
+ "ld1rqh { z1.h }, p0/Z, [x25]\n"
+ "ld1rqh { z2.h }, p0/Z, [x24]\n"
"trn1 z0.d, z1.d, z2.d\n"
- "ld1rqh { z3.h }, p0/Z, [x24]\n"
- "ld1rqh { z4.h }, p0/Z, [x23]\n"
+ "ld1rqh { z3.h }, p0/Z, [x23]\n"
+ "ld1rqh { z4.h }, p0/Z, [x22]\n"
"trn2 z1.d, z1.d, z2.d\n"
"trn1 z2.d, z3.d, z4.d\n"
- "ld1h { z7.h }, p5/Z, [x12]\n"
- "ld1h { z6.h }, p5/Z, [x12, #1, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x11]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #1, MUL VL]\n"
".inst 0x6467e408 // bfmmla z8.s, z0.h, z7.h\n"
".inst 0x6467e450 // bfmmla z16.s, z2.h, z7.h\n"
".inst 0x6466e40c // bfmmla z12.s, z0.h, z6.h\n"
".inst 0x6466e454 // bfmmla z20.s, z2.h, z6.h\n"
- "ld1h { z7.h }, p5/Z, [x11]\n"
- "ld1h { z6.h }, p5/Z, [x11, #1, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
+ "ld1h { z6.h }, p5/Z, [x10, #1, MUL VL]\n"
".inst 0x6467e409 // bfmmla z9.s, z0.h, z7.h\n"
".inst 0x6467e451 // bfmmla z17.s, z2.h, z7.h\n"
- "ld1h { z7.h }, p5/Z, [x10]\n"
- "subs x27, x27, #0x4\n"
+ "ld1h { z7.h }, p5/Z, [x9]\n"
+ "subs x26, x26, #0x4\n"
".inst 0x6466e40d // bfmmla z13.s, z0.h, z6.h\n"
".inst 0x6466e455 // bfmmla z21.s, z2.h, z6.h\n"
- "ld1h { z6.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #1, MUL VL]\n"
"trn2 z3.d, z3.d, z4.d\n"
".inst 0x6467e40a // bfmmla z10.s, z0.h, z7.h\n"
".inst 0x6467e452 // bfmmla z18.s, z2.h, z7.h\n"
- "ld1h { z7.h }, p5/Z, [x9]\n"
- "addvl x12, x12, #2\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "addvl x11, x11, #2\n"
".inst 0x6466e40e // bfmmla z14.s, z0.h, z6.h\n"
".inst 0x6466e456 // bfmmla z22.s, z2.h, z6.h\n"
- "ld1h { z6.h }, p5/Z, [x9, #1, MUL VL]\n"
- "addvl x11, x11, #2\n"
+ "ld1h { z6.h }, p5/Z, [x28, #1, MUL VL]\n"
+ "addvl x10, x10, #2\n"
".inst 0x6467e40b // bfmmla z11.s, z0.h, z7.h\n"
".inst 0x6467e453 // bfmmla z19.s, z2.h, z7.h\n"
- "addvl x10, x10, #2\n"
"addvl x9, x9, #2\n"
+ "addvl x28, x28, #2\n"
".inst 0x6466e40f // bfmmla z15.s, z0.h, z6.h\n"
".inst 0x6466e457 // bfmmla z23.s, z2.h, z6.h\n"
"ble 54f\n"
- "ld1h { z7.h }, p5/Z, [x12]\n"
- "ld1h { z6.h }, p5/Z, [x12, #1, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x11]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #1, MUL VL]\n"
".inst 0x6467e428 // bfmmla z8.s, z1.h, z7.h\n"
".inst 0x6467e470 // bfmmla z16.s, z3.h, z7.h\n"
".inst 0x6466e42c // bfmmla z12.s, z1.h, z6.h\n"
".inst 0x6466e474 // bfmmla z20.s, z3.h, z6.h\n"
- "ld1h { z7.h }, p5/Z, [x11]\n"
- "ld1h { z6.h }, p5/Z, [x11, #1, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
+ "ld1h { z6.h }, p5/Z, [x10, #1, MUL VL]\n"
".inst 0x6467e429 // bfmmla z9.s, z1.h, z7.h\n"
".inst 0x6467e471 // bfmmla z17.s, z3.h, z7.h\n"
- "ld1h { z7.h }, p5/Z, [x10]\n"
- "addvl x12, x12, #2\n"
+ "ld1h { z7.h }, p5/Z, [x9]\n"
+ "addvl x11, x11, #2\n"
".inst 0x6466e42d // bfmmla z13.s, z1.h, z6.h\n"
".inst 0x6466e475 // bfmmla z21.s, z3.h, z6.h\n"
- "ld1h { z6.h }, p5/Z, [x10, #1, MUL VL]\n"
- "addvl x11, x11, #2\n"
+ "ld1h { z6.h }, p5/Z, [x9, #1, MUL VL]\n"
+ "addvl x10, x10, #2\n"
".inst 0x6467e42a // bfmmla z10.s, z1.h, z7.h\n"
".inst 0x6467e472 // bfmmla z18.s, z3.h, z7.h\n"
- "ld1h { z7.h }, p5/Z, [x9]\n"
- "addvl x10, x10, #2\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "addvl x9, x9, #2\n"
".inst 0x6466e42e // bfmmla z14.s, z1.h, z6.h\n"
".inst 0x6466e476 // bfmmla z22.s, z3.h, z6.h\n"
- "ld1h { z6.h }, p5/Z, [x9, #1, MUL VL]\n"
- "addvl x9, x9, #2\n"
+ "ld1h { z6.h }, p5/Z, [x28, #1, MUL VL]\n"
+ "addvl x28, x28, #2\n"
".inst 0x6467e42b // bfmmla z11.s, z1.h, z7.h\n"
".inst 0x6467e473 // bfmmla z19.s, z3.h, z7.h\n"
".inst 0x6466e42f // bfmmla z15.s, z1.h, z6.h\n"
".inst 0x6466e477 // bfmmla z23.s, z3.h, z6.h\n"
"54:" // Height 4: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 49b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
"uzp1 z7.d, z8.d, z12.d\n"
"uzp2 z8.d, z8.d, z12.d\n"
"uzp1 z12.d, z9.d, z13.d\n"
- "add x23, x24, x20, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
"uzp2 z9.d, z9.d, z13.d\n"
"uzp1 z13.d, z10.d, z14.d\n"
"uzp2 z10.d, z10.d, z14.d\n"
@@ -1212,10 +1212,10 @@ void sve_ffhybrid_bf16fp32_mmla_6x4VL (
"uzp1 z22.d, z19.d, z23.d\n"
"uzp2 z19.d, z19.d, z23.d\n"
"tbz %x[flags], #1, 55f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z1.s }, p5/Z, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rw { z0.s }, p5/Z, [x20]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rw { z1.s }, p5/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z0.s }, p5/Z, [x19]\n"
"fmin z7.s, p5/M, z7.s, z1.s\n"
"fmin z12.s, p5/M, z12.s, z1.s\n"
"fmin z13.s, p5/M, z13.s, z1.s\n"
@@ -1249,75 +1249,75 @@ void sve_ffhybrid_bf16fp32_mmla_6x4VL (
"fmax z18.s, p5/M, z18.s, z0.s\n"
"fmax z19.s, p5/M, z19.s, z0.s\n"
"55:" // Height 4: No activation
- "st1w { z7.s }, p4, [x13]\n"
- "st1w { z12.s }, p3, [x13, #1, MUL VL]\n"
- "st1w { z13.s }, p2, [x13, #2, MUL VL]\n"
- "st1w { z14.s }, p1, [x13, #3, MUL VL]\n"
- "addvl x13, x13, #4\n"
- "st1w { z8.s }, p4, [x25]\n"
- "st1w { z9.s }, p3, [x25, #1, MUL VL]\n"
- "st1w { z10.s }, p2, [x25, #2, MUL VL]\n"
- "st1w { z11.s }, p1, [x25, #3, MUL VL]\n"
- "st1w { z15.s }, p4, [x24]\n"
- "st1w { z20.s }, p3, [x24, #1, MUL VL]\n"
- "st1w { z21.s }, p2, [x24, #2, MUL VL]\n"
- "st1w { z22.s }, p1, [x24, #3, MUL VL]\n"
- "st1w { z16.s }, p4, [x23]\n"
- "st1w { z17.s }, p3, [x23, #1, MUL VL]\n"
- "st1w { z18.s }, p2, [x23, #2, MUL VL]\n"
- "st1w { z19.s }, p1, [x23, #3, MUL VL]\n"
+ "st1w { z7.s }, p4, [x12]\n"
+ "st1w { z12.s }, p3, [x12, #1, MUL VL]\n"
+ "st1w { z13.s }, p2, [x12, #2, MUL VL]\n"
+ "st1w { z14.s }, p1, [x12, #3, MUL VL]\n"
+ "addvl x12, x12, #4\n"
+ "st1w { z8.s }, p4, [x24]\n"
+ "st1w { z9.s }, p3, [x24, #1, MUL VL]\n"
+ "st1w { z10.s }, p2, [x24, #2, MUL VL]\n"
+ "st1w { z11.s }, p1, [x24, #3, MUL VL]\n"
+ "st1w { z15.s }, p4, [x23]\n"
+ "st1w { z20.s }, p3, [x23, #1, MUL VL]\n"
+ "st1w { z21.s }, p2, [x23, #2, MUL VL]\n"
+ "st1w { z22.s }, p1, [x23, #3, MUL VL]\n"
+ "st1w { z16.s }, p4, [x22]\n"
+ "st1w { z17.s }, p3, [x22, #1, MUL VL]\n"
+ "st1w { z18.s }, p2, [x22, #2, MUL VL]\n"
+ "st1w { z19.s }, p1, [x22, #3, MUL VL]\n"
"56:" // Height 4: Writeback done
- "decw x14, ALL, MUL #4\n"
- "cmp x14, XZR\n"
+ "decw x13, ALL, MUL #4\n"
+ "cmp x13, XZR\n"
"bgt 44b\n"
"b 86f\n"
"57:" // Height 5
- "ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x15, %x[bias]\n"
- "ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x13, %x[output_ptr]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x14, %x[bias]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x12, %x[output_ptr]\n"
"58:" // Height 5: Column loop
- "ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
- "add x11, x12, x20, LSL #1\n"
- "cntw x21, ALL, MUL #3\n"
- "add x10, x11, x20, LSL #1\n"
- "add x9, x10, x20, LSL #1\n"
- "add x20, x9, x20, LSL #1\n"
- "cmp x14, x21\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #1\n"
+ "cntw x20, ALL, MUL #3\n"
+ "add x9, x10, x19, LSL #1\n"
+ "add x28, x9, x19, LSL #1\n"
+ "add x19, x28, x19, LSL #1\n"
+ "cmp x13, x20\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"bgt 59f\n"
- "decw x21\n"
- "cmp x14, x21\n"
- "mov x9, x12\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x28, x11\n"
"bgt 59f\n"
- "decw x21\n"
- "cmp x14, x21\n"
- "mov x10, x12\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x9, x11\n"
"bgt 59f\n"
- "mov x11, x12\n"
+ "mov x10, x11\n"
"59:" // Height 5: B setup done
- "mov x20, #0x0\n"
- "whilelt p4.s, x20, x14\n"
- "incw x20\n"
- "whilelt p3.s, x20, x14\n"
- "incw x20\n"
- "whilelt p2.s, x20, x14\n"
- "incw x20\n"
- "whilelt p1.s, x20, x14\n"
- "cbz x15, 60f\n"
- "ld1w { z8.s }, p5/Z, [x15]\n"
- "ld1w { z9.s }, p5/Z, [x15, #1, MUL VL]\n"
+ "mov x19, #0x0\n"
+ "whilelt p4.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p3.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x13\n"
+ "cbz x14, 60f\n"
+ "ld1w { z8.s }, p5/Z, [x14]\n"
+ "ld1w { z9.s }, p5/Z, [x14, #1, MUL VL]\n"
"zip2 z12.d, z8.d, z8.d\n"
"zip1 z8.d, z8.d, z8.d\n"
- "ld1w { z10.s }, p5/Z, [x15, #2, MUL VL]\n"
- "ld1w { z11.s }, p5/Z, [x15, #3, MUL VL]\n"
+ "ld1w { z10.s }, p5/Z, [x14, #2, MUL VL]\n"
+ "ld1w { z11.s }, p5/Z, [x14, #3, MUL VL]\n"
"zip2 z13.d, z9.d, z9.d\n"
"zip1 z9.d, z9.d, z9.d\n"
"zip2 z14.d, z10.d, z10.d\n"
"zip1 z10.d, z10.d, z10.d\n"
- "addvl x15, x15, #4\n"
+ "addvl x14, x14, #4\n"
"zip2 z15.d, z11.d, z11.d\n"
"zip1 z11.d, z11.d, z11.d\n"
"mov z16.d, z8.d\n"
@@ -1339,47 +1339,47 @@ void sve_ffhybrid_bf16fp32_mmla_6x4VL (
"b 62f\n"
"60:" // Height 5: no bias
"tbz %x[flags], #0, 61f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "ld1w { z9.s }, p4/Z, [x13]\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
- "ld1w { z10.s }, p3/Z, [x13, #1, MUL VL]\n"
- "ld1w { z11.s }, p2/Z, [x13, #2, MUL VL]\n"
- "ld1w { z16.s }, p1/Z, [x13, #3, MUL VL]\n"
- "ld1w { z12.s }, p4/Z, [x25]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "ld1w { z9.s }, p4/Z, [x12]\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
+ "ld1w { z10.s }, p3/Z, [x12, #1, MUL VL]\n"
+ "ld1w { z11.s }, p2/Z, [x12, #2, MUL VL]\n"
+ "ld1w { z16.s }, p1/Z, [x12, #3, MUL VL]\n"
+ "ld1w { z12.s }, p4/Z, [x24]\n"
"zip1 z8.d, z9.d, z12.d\n"
"zip2 z12.d, z9.d, z12.d\n"
- "ld1w { z13.s }, p3/Z, [x25, #1, MUL VL]\n"
- "ld1w { z14.s }, p2/Z, [x25, #2, MUL VL]\n"
+ "ld1w { z13.s }, p3/Z, [x24, #1, MUL VL]\n"
+ "ld1w { z14.s }, p2/Z, [x24, #2, MUL VL]\n"
"zip1 z9.d, z10.d, z13.d\n"
"zip2 z13.d, z10.d, z13.d\n"
- "ld1w { z15.s }, p1/Z, [x25, #3, MUL VL]\n"
- "ld1w { z17.s }, p4/Z, [x24]\n"
+ "ld1w { z15.s }, p1/Z, [x24, #3, MUL VL]\n"
+ "ld1w { z17.s }, p4/Z, [x23]\n"
"zip1 z10.d, z11.d, z14.d\n"
"zip2 z14.d, z11.d, z14.d\n"
- "ld1w { z18.s }, p3/Z, [x24, #1, MUL VL]\n"
- "ld1w { z19.s }, p2/Z, [x24, #2, MUL VL]\n"
+ "ld1w { z18.s }, p3/Z, [x23, #1, MUL VL]\n"
+ "ld1w { z19.s }, p2/Z, [x23, #2, MUL VL]\n"
"zip1 z11.d, z16.d, z15.d\n"
"zip2 z15.d, z16.d, z15.d\n"
- "ld1w { z24.s }, p1/Z, [x24, #3, MUL VL]\n"
- "ld1w { z20.s }, p4/Z, [x23]\n"
+ "ld1w { z24.s }, p1/Z, [x23, #3, MUL VL]\n"
+ "ld1w { z20.s }, p4/Z, [x22]\n"
"zip1 z16.d, z17.d, z20.d\n"
"zip2 z20.d, z17.d, z20.d\n"
- "ld1w { z21.s }, p3/Z, [x23, #1, MUL VL]\n"
- "ld1w { z22.s }, p2/Z, [x23, #2, MUL VL]\n"
+ "ld1w { z21.s }, p3/Z, [x22, #1, MUL VL]\n"
+ "ld1w { z22.s }, p2/Z, [x22, #2, MUL VL]\n"
"zip1 z17.d, z18.d, z21.d\n"
"zip2 z21.d, z18.d, z21.d\n"
- "ld1w { z23.s }, p1/Z, [x23, #3, MUL VL]\n"
- "ld1w { z25.s }, p4/Z, [x22]\n"
+ "ld1w { z23.s }, p1/Z, [x22, #3, MUL VL]\n"
+ "ld1w { z25.s }, p4/Z, [x21]\n"
"zip1 z18.d, z19.d, z22.d\n"
"zip2 z22.d, z19.d, z22.d\n"
- "ld1w { z26.s }, p3/Z, [x22, #1, MUL VL]\n"
- "ld1w { z27.s }, p2/Z, [x22, #2, MUL VL]\n"
+ "ld1w { z26.s }, p3/Z, [x21, #1, MUL VL]\n"
+ "ld1w { z27.s }, p2/Z, [x21, #2, MUL VL]\n"
"zip1 z19.d, z24.d, z23.d\n"
"zip2 z23.d, z24.d, z23.d\n"
- "ld1w { z6.s }, p1/Z, [x22, #3, MUL VL]\n"
+ "ld1w { z6.s }, p1/Z, [x21, #3, MUL VL]\n"
"zip1 z24.d, z25.d, z28.d\n"
"zip2 z28.d, z25.d, z28.d\n"
"zip1 z25.d, z26.d, z29.d\n"
@@ -1415,119 +1415,119 @@ void sve_ffhybrid_bf16fp32_mmla_6x4VL (
"mov z30.b, #0x0\n"
"mov z31.b, #0x0\n"
"62:" // Height 5: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"63:" // Height 5: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 64f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "ldr x22, [x21, #0x20]\n"
- "cbnz x28, 65f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #1\n"
- "add x25, x25, x20, LSL #1\n"
- "add x24, x24, x20, LSL #1\n"
- "add x23, x23, x20, LSL #1\n"
- "add x22, x22, x20, LSL #1\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "ldr x21, [x20, #0x20]\n"
+ "cbnz x27, 65f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
+ "add x24, x24, x19, LSL #1\n"
+ "add x23, x23, x19, LSL #1\n"
+ "add x22, x22, x19, LSL #1\n"
+ "add x21, x21, x19, LSL #1\n"
"b 65f\n"
"64:" // Height 5: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
- "add x23, x24, x20, LSL #1\n"
- "add x22, x23, x20, LSL #1\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "add x22, x23, x19, LSL #1\n"
+ "add x21, x22, x19, LSL #1\n"
"65:" // Height 5: input setup done
- "cmp x27, #0x8\n"
+ "cmp x26, #0x8\n"
"ble 67f\n"
"66:" // Height 5: Multiply loop: Main loop head
- "whilelt p0.h, XZR, x27\n"
- "ld1rqh { z1.h }, p0/Z, [x26]\n"
- "ld1rqh { z2.h }, p0/Z, [x25]\n"
- "ld1rqh { z3.h }, p0/Z, [x24]\n"
- "ld1rqh { z4.h }, p0/Z, [x23]\n"
+ "whilelt p0.h, XZR, x26\n"
+ "ld1rqh { z1.h }, p0/Z, [x25]\n"
+ "ld1rqh { z2.h }, p0/Z, [x24]\n"
+ "ld1rqh { z3.h }, p0/Z, [x23]\n"
+ "ld1rqh { z4.h }, p0/Z, [x22]\n"
"trn1 z0.d, z1.d, z2.d\n"
"trn2 z1.d, z1.d, z2.d\n"
- "ld1rqh { z5.h }, p0/Z, [x22]\n"
+ "ld1rqh { z5.h }, p0/Z, [x21]\n"
"trn1 z2.d, z3.d, z4.d\n"
"trn2 z3.d, z3.d, z4.d\n"
- "ld1h { z7.h }, p5/Z, [x12]\n"
+ "ld1h { z7.h }, p5/Z, [x11]\n"
"trn1 z4.d, z5.d, z6.d\n"
"trn2 z5.d, z5.d, z6.d\n"
- "ld1h { z6.h }, p5/Z, [x12, #1, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #1, MUL VL]\n"
".inst 0x6467e408 // bfmmla z8.s, z0.h, z7.h\n"
".inst 0x6467e450 // bfmmla z16.s, z2.h, z7.h\n"
".inst 0x6467e498 // bfmmla z24.s, z4.h, z7.h\n"
- "ld1h { z7.h }, p5/Z, [x11]\n"
- "sub x27, x27, #0x8\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
+ "sub x26, x26, #0x8\n"
".inst 0x6466e40c // bfmmla z12.s, z0.h, z6.h\n"
".inst 0x6466e454 // bfmmla z20.s, z2.h, z6.h\n"
- "cmp x27, #0x8\n"
- "add x26, x26, #0x10\n"
+ "cmp x26, #0x8\n"
+ "add x25, x25, #0x10\n"
".inst 0x6466e49c // bfmmla z28.s, z4.h, z6.h\n"
- "ld1h { z6.h }, p5/Z, [x11, #1, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x10, #1, MUL VL]\n"
".inst 0x6467e409 // bfmmla z9.s, z0.h, z7.h\n"
- "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
".inst 0x6467e451 // bfmmla z17.s, z2.h, z7.h\n"
".inst 0x6467e499 // bfmmla z25.s, z4.h, z7.h\n"
- "ld1h { z7.h }, p5/Z, [x10]\n"
- "add x24, x24, #0x10\n"
+ "ld1h { z7.h }, p5/Z, [x9]\n"
+ "add x23, x23, #0x10\n"
".inst 0x6466e40d // bfmmla z13.s, z0.h, z6.h\n"
".inst 0x6466e455 // bfmmla z21.s, z2.h, z6.h\n"
- "add x23, x23, #0x10\n"
"add x22, x22, #0x10\n"
+ "add x21, x21, #0x10\n"
".inst 0x6466e49d // bfmmla z29.s, z4.h, z6.h\n"
- "ld1h { z6.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #1, MUL VL]\n"
".inst 0x6467e40a // bfmmla z10.s, z0.h, z7.h\n"
".inst 0x6467e452 // bfmmla z18.s, z2.h, z7.h\n"
".inst 0x6467e49a // bfmmla z26.s, z4.h, z7.h\n"
- "ld1h { z7.h }, p5/Z, [x9]\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
".inst 0x6466e40e // bfmmla z14.s, z0.h, z6.h\n"
".inst 0x6466e456 // bfmmla z22.s, z2.h, z6.h\n"
".inst 0x6466e49e // bfmmla z30.s, z4.h, z6.h\n"
- "ld1h { z6.h }, p5/Z, [x9, #1, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x28, #1, MUL VL]\n"
".inst 0x6467e40b // bfmmla z11.s, z0.h, z7.h\n"
".inst 0x6467e453 // bfmmla z19.s, z2.h, z7.h\n"
".inst 0x6467e49b // bfmmla z27.s, z4.h, z7.h\n"
- "ld1h { z7.h }, p5/Z, [x12, #2, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x11, #2, MUL VL]\n"
".inst 0x6466e40f // bfmmla z15.s, z0.h, z6.h\n"
".inst 0x6466e457 // bfmmla z23.s, z2.h, z6.h\n"
".inst 0x6466e49f // bfmmla z31.s, z4.h, z6.h\n"
- "ld1h { z6.h }, p5/Z, [x12, #3, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #3, MUL VL]\n"
".inst 0x6467e428 // bfmmla z8.s, z1.h, z7.h\n"
- "addvl x12, x12, #4\n"
+ "addvl x11, x11, #4\n"
".inst 0x6467e470 // bfmmla z16.s, z3.h, z7.h\n"
".inst 0x6467e4b8 // bfmmla z24.s, z5.h, z7.h\n"
- "ld1h { z7.h }, p5/Z, [x11, #2, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #2, MUL VL]\n"
".inst 0x6466e42c // bfmmla z12.s, z1.h, z6.h\n"
".inst 0x6466e474 // bfmmla z20.s, z3.h, z6.h\n"
".inst 0x6466e4bc // bfmmla z28.s, z5.h, z6.h\n"
- "ld1h { z6.h }, p5/Z, [x11, #3, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x10, #3, MUL VL]\n"
".inst 0x6467e429 // bfmmla z9.s, z1.h, z7.h\n"
- "addvl x11, x11, #4\n"
+ "addvl x10, x10, #4\n"
".inst 0x6467e471 // bfmmla z17.s, z3.h, z7.h\n"
".inst 0x6467e4b9 // bfmmla z25.s, z5.h, z7.h\n"
- "ld1h { z7.h }, p5/Z, [x10, #2, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x9, #2, MUL VL]\n"
".inst 0x6466e42d // bfmmla z13.s, z1.h, z6.h\n"
".inst 0x6466e475 // bfmmla z21.s, z3.h, z6.h\n"
".inst 0x6466e4bd // bfmmla z29.s, z5.h, z6.h\n"
- "ld1h { z6.h }, p5/Z, [x10, #3, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #3, MUL VL]\n"
".inst 0x6467e42a // bfmmla z10.s, z1.h, z7.h\n"
- "addvl x10, x10, #4\n"
+ "addvl x9, x9, #4\n"
".inst 0x6467e472 // bfmmla z18.s, z3.h, z7.h\n"
".inst 0x6467e4ba // bfmmla z26.s, z5.h, z7.h\n"
- "ld1h { z7.h }, p5/Z, [x9, #2, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x28, #2, MUL VL]\n"
".inst 0x6466e42e // bfmmla z14.s, z1.h, z6.h\n"
".inst 0x6466e476 // bfmmla z22.s, z3.h, z6.h\n"
".inst 0x6466e4be // bfmmla z30.s, z5.h, z6.h\n"
- "ld1h { z6.h }, p5/Z, [x9, #3, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x28, #3, MUL VL]\n"
".inst 0x6467e42b // bfmmla z11.s, z1.h, z7.h\n"
- "addvl x9, x9, #4\n"
+ "addvl x28, x28, #4\n"
".inst 0x6467e473 // bfmmla z19.s, z3.h, z7.h\n"
".inst 0x6467e4bb // bfmmla z27.s, z5.h, z7.h\n"
".inst 0x6466e42f // bfmmla z15.s, z1.h, z6.h\n"
@@ -1535,86 +1535,86 @@ void sve_ffhybrid_bf16fp32_mmla_6x4VL (
".inst 0x6466e4bf // bfmmla z31.s, z5.h, z6.h\n"
"bgt 66b\n"
"67:" // Height 5: Multiply loop: Single iteration only
- "whilelt p0.h, XZR, x27\n"
- "ld1rqh { z1.h }, p0/Z, [x26]\n"
- "ld1rqh { z2.h }, p0/Z, [x25]\n"
- "ld1rqh { z3.h }, p0/Z, [x24]\n"
- "ld1rqh { z4.h }, p0/Z, [x23]\n"
+ "whilelt p0.h, XZR, x26\n"
+ "ld1rqh { z1.h }, p0/Z, [x25]\n"
+ "ld1rqh { z2.h }, p0/Z, [x24]\n"
+ "ld1rqh { z3.h }, p0/Z, [x23]\n"
+ "ld1rqh { z4.h }, p0/Z, [x22]\n"
"trn1 z0.d, z1.d, z2.d\n"
"trn2 z1.d, z1.d, z2.d\n"
- "ld1rqh { z5.h }, p0/Z, [x22]\n"
+ "ld1rqh { z5.h }, p0/Z, [x21]\n"
"trn1 z2.d, z3.d, z4.d\n"
"trn2 z3.d, z3.d, z4.d\n"
- "ld1h { z7.h }, p5/Z, [x12]\n"
+ "ld1h { z7.h }, p5/Z, [x11]\n"
"trn1 z4.d, z5.d, z6.d\n"
"trn2 z5.d, z5.d, z6.d\n"
- "ld1h { z6.h }, p5/Z, [x12, #1, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #1, MUL VL]\n"
".inst 0x6467e408 // bfmmla z8.s, z0.h, z7.h\n"
".inst 0x6467e450 // bfmmla z16.s, z2.h, z7.h\n"
".inst 0x6467e498 // bfmmla z24.s, z4.h, z7.h\n"
- "ld1h { z7.h }, p5/Z, [x11]\n"
- "subs x27, x27, #0x4\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
+ "subs x26, x26, #0x4\n"
".inst 0x6466e40c // bfmmla z12.s, z0.h, z6.h\n"
".inst 0x6466e454 // bfmmla z20.s, z2.h, z6.h\n"
- "addvl x12, x12, #2\n"
+ "addvl x11, x11, #2\n"
".inst 0x6466e49c // bfmmla z28.s, z4.h, z6.h\n"
- "ld1h { z6.h }, p5/Z, [x11, #1, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x10, #1, MUL VL]\n"
".inst 0x6467e409 // bfmmla z9.s, z0.h, z7.h\n"
- "addvl x11, x11, #2\n"
+ "addvl x10, x10, #2\n"
".inst 0x6467e451 // bfmmla z17.s, z2.h, z7.h\n"
".inst 0x6467e499 // bfmmla z25.s, z4.h, z7.h\n"
- "ld1h { z7.h }, p5/Z, [x10]\n"
+ "ld1h { z7.h }, p5/Z, [x9]\n"
".inst 0x6466e40d // bfmmla z13.s, z0.h, z6.h\n"
".inst 0x6466e455 // bfmmla z21.s, z2.h, z6.h\n"
".inst 0x6466e49d // bfmmla z29.s, z4.h, z6.h\n"
- "ld1h { z6.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #1, MUL VL]\n"
".inst 0x6467e40a // bfmmla z10.s, z0.h, z7.h\n"
- "addvl x10, x10, #2\n"
+ "addvl x9, x9, #2\n"
".inst 0x6467e452 // bfmmla z18.s, z2.h, z7.h\n"
".inst 0x6467e49a // bfmmla z26.s, z4.h, z7.h\n"
- "ld1h { z7.h }, p5/Z, [x9]\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
".inst 0x6466e40e // bfmmla z14.s, z0.h, z6.h\n"
".inst 0x6466e456 // bfmmla z22.s, z2.h, z6.h\n"
".inst 0x6466e49e // bfmmla z30.s, z4.h, z6.h\n"
- "ld1h { z6.h }, p5/Z, [x9, #1, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x28, #1, MUL VL]\n"
".inst 0x6467e40b // bfmmla z11.s, z0.h, z7.h\n"
- "addvl x9, x9, #2\n"
+ "addvl x28, x28, #2\n"
".inst 0x6467e453 // bfmmla z19.s, z2.h, z7.h\n"
".inst 0x6467e49b // bfmmla z27.s, z4.h, z7.h\n"
".inst 0x6466e40f // bfmmla z15.s, z0.h, z6.h\n"
".inst 0x6466e457 // bfmmla z23.s, z2.h, z6.h\n"
".inst 0x6466e49f // bfmmla z31.s, z4.h, z6.h\n"
"ble 68f\n"
- "ld1h { z7.h }, p5/Z, [x12]\n"
- "ld1h { z6.h }, p5/Z, [x12, #1, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x11]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #1, MUL VL]\n"
".inst 0x6467e428 // bfmmla z8.s, z1.h, z7.h\n"
".inst 0x6467e470 // bfmmla z16.s, z3.h, z7.h\n"
".inst 0x6467e4b8 // bfmmla z24.s, z5.h, z7.h\n"
".inst 0x6466e42c // bfmmla z12.s, z1.h, z6.h\n"
- "ld1h { z7.h }, p5/Z, [x11]\n"
- "addvl x12, x12, #2\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
+ "addvl x11, x11, #2\n"
".inst 0x6466e474 // bfmmla z20.s, z3.h, z6.h\n"
".inst 0x6466e4bc // bfmmla z28.s, z5.h, z6.h\n"
- "ld1h { z6.h }, p5/Z, [x11, #1, MUL VL]\n"
- "addvl x11, x11, #2\n"
+ "ld1h { z6.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "addvl x10, x10, #2\n"
".inst 0x6467e429 // bfmmla z9.s, z1.h, z7.h\n"
".inst 0x6467e471 // bfmmla z17.s, z3.h, z7.h\n"
".inst 0x6467e4b9 // bfmmla z25.s, z5.h, z7.h\n"
".inst 0x6466e42d // bfmmla z13.s, z1.h, z6.h\n"
- "ld1h { z7.h }, p5/Z, [x10]\n"
+ "ld1h { z7.h }, p5/Z, [x9]\n"
".inst 0x6466e475 // bfmmla z21.s, z3.h, z6.h\n"
".inst 0x6466e4bd // bfmmla z29.s, z5.h, z6.h\n"
- "ld1h { z6.h }, p5/Z, [x10, #1, MUL VL]\n"
- "addvl x10, x10, #2\n"
+ "ld1h { z6.h }, p5/Z, [x9, #1, MUL VL]\n"
+ "addvl x9, x9, #2\n"
".inst 0x6467e42a // bfmmla z10.s, z1.h, z7.h\n"
".inst 0x6467e472 // bfmmla z18.s, z3.h, z7.h\n"
".inst 0x6467e4ba // bfmmla z26.s, z5.h, z7.h\n"
".inst 0x6466e42e // bfmmla z14.s, z1.h, z6.h\n"
- "ld1h { z7.h }, p5/Z, [x9]\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
".inst 0x6466e476 // bfmmla z22.s, z3.h, z6.h\n"
".inst 0x6466e4be // bfmmla z30.s, z5.h, z6.h\n"
- "ld1h { z6.h }, p5/Z, [x9, #1, MUL VL]\n"
- "addvl x9, x9, #2\n"
+ "ld1h { z6.h }, p5/Z, [x28, #1, MUL VL]\n"
+ "addvl x28, x28, #2\n"
".inst 0x6467e42b // bfmmla z11.s, z1.h, z7.h\n"
".inst 0x6467e473 // bfmmla z19.s, z3.h, z7.h\n"
".inst 0x6467e4bb // bfmmla z27.s, z5.h, z7.h\n"
@@ -1622,18 +1622,18 @@ void sve_ffhybrid_bf16fp32_mmla_6x4VL (
".inst 0x6466e477 // bfmmla z23.s, z3.h, z6.h\n"
".inst 0x6466e4bf // bfmmla z31.s, z5.h, z6.h\n"
"68:" // Height 5: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 63b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
"uzp1 z7.d, z8.d, z12.d\n"
- "add x23, x24, x20, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
"uzp2 z8.d, z8.d, z12.d\n"
"uzp1 z12.d, z9.d, z13.d\n"
- "add x22, x23, x20, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
"uzp2 z9.d, z9.d, z13.d\n"
"uzp1 z13.d, z10.d, z14.d\n"
"uzp2 z10.d, z10.d, z14.d\n"
@@ -1652,10 +1652,10 @@ void sve_ffhybrid_bf16fp32_mmla_6x4VL (
"uzp1 z26.d, z26.d, z30.d\n"
"uzp1 z27.d, z27.d, z31.d\n"
"tbz %x[flags], #1, 69f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z1.s }, p5/Z, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rw { z0.s }, p5/Z, [x20]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rw { z1.s }, p5/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z0.s }, p5/Z, [x19]\n"
"fmin z7.s, p5/M, z7.s, z1.s\n"
"fmin z12.s, p5/M, z12.s, z1.s\n"
"fmin z13.s, p5/M, z13.s, z1.s\n"
@@ -1697,82 +1697,82 @@ void sve_ffhybrid_bf16fp32_mmla_6x4VL (
"fmax z26.s, p5/M, z26.s, z0.s\n"
"fmax z27.s, p5/M, z27.s, z0.s\n"
"69:" // Height 5: No activation
- "st1w { z7.s }, p4, [x13]\n"
- "st1w { z12.s }, p3, [x13, #1, MUL VL]\n"
- "st1w { z13.s }, p2, [x13, #2, MUL VL]\n"
- "st1w { z14.s }, p1, [x13, #3, MUL VL]\n"
- "addvl x13, x13, #4\n"
- "st1w { z8.s }, p4, [x25]\n"
- "st1w { z9.s }, p3, [x25, #1, MUL VL]\n"
- "st1w { z10.s }, p2, [x25, #2, MUL VL]\n"
- "st1w { z11.s }, p1, [x25, #3, MUL VL]\n"
- "st1w { z15.s }, p4, [x24]\n"
- "st1w { z20.s }, p3, [x24, #1, MUL VL]\n"
- "st1w { z21.s }, p2, [x24, #2, MUL VL]\n"
- "st1w { z22.s }, p1, [x24, #3, MUL VL]\n"
- "st1w { z16.s }, p4, [x23]\n"
- "st1w { z17.s }, p3, [x23, #1, MUL VL]\n"
- "st1w { z18.s }, p2, [x23, #2, MUL VL]\n"
- "st1w { z19.s }, p1, [x23, #3, MUL VL]\n"
- "st1w { z24.s }, p4, [x22]\n"
- "st1w { z25.s }, p3, [x22, #1, MUL VL]\n"
- "st1w { z26.s }, p2, [x22, #2, MUL VL]\n"
- "st1w { z27.s }, p1, [x22, #3, MUL VL]\n"
+ "st1w { z7.s }, p4, [x12]\n"
+ "st1w { z12.s }, p3, [x12, #1, MUL VL]\n"
+ "st1w { z13.s }, p2, [x12, #2, MUL VL]\n"
+ "st1w { z14.s }, p1, [x12, #3, MUL VL]\n"
+ "addvl x12, x12, #4\n"
+ "st1w { z8.s }, p4, [x24]\n"
+ "st1w { z9.s }, p3, [x24, #1, MUL VL]\n"
+ "st1w { z10.s }, p2, [x24, #2, MUL VL]\n"
+ "st1w { z11.s }, p1, [x24, #3, MUL VL]\n"
+ "st1w { z15.s }, p4, [x23]\n"
+ "st1w { z20.s }, p3, [x23, #1, MUL VL]\n"
+ "st1w { z21.s }, p2, [x23, #2, MUL VL]\n"
+ "st1w { z22.s }, p1, [x23, #3, MUL VL]\n"
+ "st1w { z16.s }, p4, [x22]\n"
+ "st1w { z17.s }, p3, [x22, #1, MUL VL]\n"
+ "st1w { z18.s }, p2, [x22, #2, MUL VL]\n"
+ "st1w { z19.s }, p1, [x22, #3, MUL VL]\n"
+ "st1w { z24.s }, p4, [x21]\n"
+ "st1w { z25.s }, p3, [x21, #1, MUL VL]\n"
+ "st1w { z26.s }, p2, [x21, #2, MUL VL]\n"
+ "st1w { z27.s }, p1, [x21, #3, MUL VL]\n"
"70:" // Height 5: Writeback done
- "decw x14, ALL, MUL #4\n"
- "cmp x14, XZR\n"
+ "decw x13, ALL, MUL #4\n"
+ "cmp x13, XZR\n"
"bgt 58b\n"
"b 86f\n"
"71:" // Height 6
- "ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x21, #0x18\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "mov x15, %x[bias]\n"
- "mov x13, %x[output_ptr]\n"
- "madd %x[output_ptr], x20, x21, %x[output_ptr]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x20, #0x18\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "mov x14, %x[bias]\n"
+ "mov x12, %x[output_ptr]\n"
+ "madd %x[output_ptr], x19, x20, %x[output_ptr]\n"
"72:" // Height 6: Column loop
- "ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
- "add x11, x12, x20, LSL #1\n"
- "cntw x21, ALL, MUL #3\n"
- "add x10, x11, x20, LSL #1\n"
- "add x9, x10, x20, LSL #1\n"
- "add x20, x9, x20, LSL #1\n"
- "cmp x14, x21\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #1\n"
+ "cntw x20, ALL, MUL #3\n"
+ "add x9, x10, x19, LSL #1\n"
+ "add x28, x9, x19, LSL #1\n"
+ "add x19, x28, x19, LSL #1\n"
+ "cmp x13, x20\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"bgt 73f\n"
- "decw x21\n"
- "cmp x14, x21\n"
- "mov x9, x12\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x28, x11\n"
"bgt 73f\n"
- "decw x21\n"
- "cmp x14, x21\n"
- "mov x10, x12\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x9, x11\n"
"bgt 73f\n"
- "mov x11, x12\n"
+ "mov x10, x11\n"
"73:" // Height 6: B setup done
- "mov x20, #0x0\n"
- "whilelt p4.s, x20, x14\n"
- "incw x20\n"
- "whilelt p3.s, x20, x14\n"
- "incw x20\n"
- "whilelt p2.s, x20, x14\n"
- "incw x20\n"
- "whilelt p1.s, x20, x14\n"
- "cbz x15, 74f\n"
- "ld1w { z8.s }, p5/Z, [x15]\n"
- "ld1w { z9.s }, p5/Z, [x15, #1, MUL VL]\n"
+ "mov x19, #0x0\n"
+ "whilelt p4.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p3.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x13\n"
+ "cbz x14, 74f\n"
+ "ld1w { z8.s }, p5/Z, [x14]\n"
+ "ld1w { z9.s }, p5/Z, [x14, #1, MUL VL]\n"
"zip2 z12.d, z8.d, z8.d\n"
"zip1 z8.d, z8.d, z8.d\n"
- "ld1w { z10.s }, p5/Z, [x15, #2, MUL VL]\n"
- "ld1w { z11.s }, p5/Z, [x15, #3, MUL VL]\n"
+ "ld1w { z10.s }, p5/Z, [x14, #2, MUL VL]\n"
+ "ld1w { z11.s }, p5/Z, [x14, #3, MUL VL]\n"
"zip2 z13.d, z9.d, z9.d\n"
"zip1 z9.d, z9.d, z9.d\n"
"zip2 z14.d, z10.d, z10.d\n"
"zip1 z10.d, z10.d, z10.d\n"
- "addvl x15, x15, #4\n"
+ "addvl x14, x14, #4\n"
"zip2 z15.d, z11.d, z11.d\n"
"zip1 z11.d, z11.d, z11.d\n"
"mov z16.d, z8.d\n"
@@ -1794,55 +1794,55 @@ void sve_ffhybrid_bf16fp32_mmla_6x4VL (
"b 76f\n"
"74:" // Height 6: no bias
"tbz %x[flags], #0, 75f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "ld1w { z9.s }, p4/Z, [x13]\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
- "ld1w { z10.s }, p3/Z, [x13, #1, MUL VL]\n"
- "ld1w { z11.s }, p2/Z, [x13, #2, MUL VL]\n"
- "add x21, x22, x20, LSL #2\n"
- "ld1w { z16.s }, p1/Z, [x13, #3, MUL VL]\n"
- "ld1w { z12.s }, p4/Z, [x25]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "ld1w { z9.s }, p4/Z, [x12]\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
+ "ld1w { z10.s }, p3/Z, [x12, #1, MUL VL]\n"
+ "ld1w { z11.s }, p2/Z, [x12, #2, MUL VL]\n"
+ "add x20, x21, x19, LSL #2\n"
+ "ld1w { z16.s }, p1/Z, [x12, #3, MUL VL]\n"
+ "ld1w { z12.s }, p4/Z, [x24]\n"
"zip1 z8.d, z9.d, z12.d\n"
- "ld1w { z13.s }, p3/Z, [x25, #1, MUL VL]\n"
- "ld1w { z14.s }, p2/Z, [x25, #2, MUL VL]\n"
+ "ld1w { z13.s }, p3/Z, [x24, #1, MUL VL]\n"
+ "ld1w { z14.s }, p2/Z, [x24, #2, MUL VL]\n"
"zip2 z12.d, z9.d, z12.d\n"
"zip1 z9.d, z10.d, z13.d\n"
- "ld1w { z15.s }, p1/Z, [x25, #3, MUL VL]\n"
- "ld1w { z17.s }, p4/Z, [x24]\n"
+ "ld1w { z15.s }, p1/Z, [x24, #3, MUL VL]\n"
+ "ld1w { z17.s }, p4/Z, [x23]\n"
"zip2 z13.d, z10.d, z13.d\n"
"zip1 z10.d, z11.d, z14.d\n"
- "ld1w { z18.s }, p3/Z, [x24, #1, MUL VL]\n"
- "ld1w { z19.s }, p2/Z, [x24, #2, MUL VL]\n"
+ "ld1w { z18.s }, p3/Z, [x23, #1, MUL VL]\n"
+ "ld1w { z19.s }, p2/Z, [x23, #2, MUL VL]\n"
"zip2 z14.d, z11.d, z14.d\n"
"zip1 z11.d, z16.d, z15.d\n"
- "ld1w { z24.s }, p1/Z, [x24, #3, MUL VL]\n"
- "ld1w { z20.s }, p4/Z, [x23]\n"
+ "ld1w { z24.s }, p1/Z, [x23, #3, MUL VL]\n"
+ "ld1w { z20.s }, p4/Z, [x22]\n"
"zip2 z15.d, z16.d, z15.d\n"
"zip1 z16.d, z17.d, z20.d\n"
- "ld1w { z21.s }, p3/Z, [x23, #1, MUL VL]\n"
- "ld1w { z22.s }, p2/Z, [x23, #2, MUL VL]\n"
+ "ld1w { z21.s }, p3/Z, [x22, #1, MUL VL]\n"
+ "ld1w { z22.s }, p2/Z, [x22, #2, MUL VL]\n"
"zip2 z20.d, z17.d, z20.d\n"
"zip1 z17.d, z18.d, z21.d\n"
- "ld1w { z23.s }, p1/Z, [x23, #3, MUL VL]\n"
- "ld1w { z25.s }, p4/Z, [x22]\n"
+ "ld1w { z23.s }, p1/Z, [x22, #3, MUL VL]\n"
+ "ld1w { z25.s }, p4/Z, [x21]\n"
"zip2 z21.d, z18.d, z21.d\n"
"zip1 z18.d, z19.d, z22.d\n"
- "ld1w { z26.s }, p3/Z, [x22, #1, MUL VL]\n"
- "ld1w { z27.s }, p2/Z, [x22, #2, MUL VL]\n"
+ "ld1w { z26.s }, p3/Z, [x21, #1, MUL VL]\n"
+ "ld1w { z27.s }, p2/Z, [x21, #2, MUL VL]\n"
"zip2 z22.d, z19.d, z22.d\n"
"zip1 z19.d, z24.d, z23.d\n"
- "ld1w { z6.s }, p1/Z, [x22, #3, MUL VL]\n"
- "ld1w { z28.s }, p4/Z, [x21]\n"
+ "ld1w { z6.s }, p1/Z, [x21, #3, MUL VL]\n"
+ "ld1w { z28.s }, p4/Z, [x20]\n"
"zip2 z23.d, z24.d, z23.d\n"
"zip1 z24.d, z25.d, z28.d\n"
- "ld1w { z29.s }, p3/Z, [x21, #1, MUL VL]\n"
- "ld1w { z30.s }, p2/Z, [x21, #2, MUL VL]\n"
+ "ld1w { z29.s }, p3/Z, [x20, #1, MUL VL]\n"
+ "ld1w { z30.s }, p2/Z, [x20, #2, MUL VL]\n"
"zip2 z28.d, z25.d, z28.d\n"
"zip1 z25.d, z26.d, z29.d\n"
- "ld1w { z31.s }, p1/Z, [x21, #3, MUL VL]\n"
+ "ld1w { z31.s }, p1/Z, [x20, #3, MUL VL]\n"
"zip2 z29.d, z26.d, z29.d\n"
"zip1 z26.d, z27.d, z30.d\n"
"zip2 z30.d, z27.d, z30.d\n"
@@ -1875,124 +1875,124 @@ void sve_ffhybrid_bf16fp32_mmla_6x4VL (
"mov z30.b, #0x0\n"
"mov z31.b, #0x0\n"
"76:" // Height 6: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"77:" // Height 6: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 78f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "ldr x22, [x21, #0x20]\n"
- "ldr x21, [x21, #0x28]\n"
- "cbnz x28, 79f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #1\n"
- "add x25, x25, x20, LSL #1\n"
- "add x24, x24, x20, LSL #1\n"
- "add x23, x23, x20, LSL #1\n"
- "add x22, x22, x20, LSL #1\n"
- "add x21, x21, x20, LSL #1\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "ldr x21, [x20, #0x20]\n"
+ "ldr x20, [x20, #0x28]\n"
+ "cbnz x27, 79f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
+ "add x24, x24, x19, LSL #1\n"
+ "add x23, x23, x19, LSL #1\n"
+ "add x22, x22, x19, LSL #1\n"
+ "add x21, x21, x19, LSL #1\n"
+ "add x20, x20, x19, LSL #1\n"
"b 79f\n"
"78:" // Height 6: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
- "add x23, x24, x20, LSL #1\n"
- "add x22, x23, x20, LSL #1\n"
- "add x21, x22, x20, LSL #1\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "add x22, x23, x19, LSL #1\n"
+ "add x21, x22, x19, LSL #1\n"
+ "add x20, x21, x19, LSL #1\n"
"79:" // Height 6: input setup done
- "cmp x27, #0x8\n"
+ "cmp x26, #0x8\n"
"ble 81f\n"
"80:" // Height 6: Multiply loop: Main loop head
- "whilelt p0.h, XZR, x27\n"
- "ld1rqh { z1.h }, p0/Z, [x26]\n"
- "ld1rqh { z2.h }, p0/Z, [x25]\n"
+ "whilelt p0.h, XZR, x26\n"
+ "ld1rqh { z1.h }, p0/Z, [x25]\n"
+ "ld1rqh { z2.h }, p0/Z, [x24]\n"
"trn1 z0.d, z1.d, z2.d\n"
- "ld1rqh { z3.h }, p0/Z, [x24]\n"
- "ld1rqh { z4.h }, p0/Z, [x23]\n"
+ "ld1rqh { z3.h }, p0/Z, [x23]\n"
+ "ld1rqh { z4.h }, p0/Z, [x22]\n"
"trn2 z1.d, z1.d, z2.d\n"
"trn1 z2.d, z3.d, z4.d\n"
- "ld1rqh { z5.h }, p0/Z, [x22]\n"
- "ld1rqh { z6.h }, p0/Z, [x21]\n"
+ "ld1rqh { z5.h }, p0/Z, [x21]\n"
+ "ld1rqh { z6.h }, p0/Z, [x20]\n"
"trn2 z3.d, z3.d, z4.d\n"
"trn1 z4.d, z5.d, z6.d\n"
"trn2 z5.d, z5.d, z6.d\n"
- "ld1h { z7.h }, p5/Z, [x12]\n"
- "ld1h { z6.h }, p5/Z, [x12, #1, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x11]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #1, MUL VL]\n"
".inst 0x6467e408 // bfmmla z8.s, z0.h, z7.h\n"
".inst 0x6467e450 // bfmmla z16.s, z2.h, z7.h\n"
".inst 0x6467e498 // bfmmla z24.s, z4.h, z7.h\n"
- "ld1h { z7.h }, p5/Z, [x11]\n"
- "sub x27, x27, #0x8\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
+ "sub x26, x26, #0x8\n"
".inst 0x6466e40c // bfmmla z12.s, z0.h, z6.h\n"
".inst 0x6466e454 // bfmmla z20.s, z2.h, z6.h\n"
- "cmp x27, #0x8\n"
- "add x26, x26, #0x10\n"
+ "cmp x26, #0x8\n"
+ "add x25, x25, #0x10\n"
".inst 0x6466e49c // bfmmla z28.s, z4.h, z6.h\n"
- "ld1h { z6.h }, p5/Z, [x11, #1, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x10, #1, MUL VL]\n"
".inst 0x6467e409 // bfmmla z9.s, z0.h, z7.h\n"
- "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
".inst 0x6467e451 // bfmmla z17.s, z2.h, z7.h\n"
".inst 0x6467e499 // bfmmla z25.s, z4.h, z7.h\n"
- "ld1h { z7.h }, p5/Z, [x10]\n"
- "add x24, x24, #0x10\n"
+ "ld1h { z7.h }, p5/Z, [x9]\n"
+ "add x23, x23, #0x10\n"
".inst 0x6466e40d // bfmmla z13.s, z0.h, z6.h\n"
".inst 0x6466e455 // bfmmla z21.s, z2.h, z6.h\n"
- "add x23, x23, #0x10\n"
"add x22, x22, #0x10\n"
+ "add x21, x21, #0x10\n"
".inst 0x6466e49d // bfmmla z29.s, z4.h, z6.h\n"
- "ld1h { z6.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #1, MUL VL]\n"
".inst 0x6467e40a // bfmmla z10.s, z0.h, z7.h\n"
- "add x21, x21, #0x10\n"
+ "add x20, x20, #0x10\n"
".inst 0x6467e452 // bfmmla z18.s, z2.h, z7.h\n"
".inst 0x6467e49a // bfmmla z26.s, z4.h, z7.h\n"
- "ld1h { z7.h }, p5/Z, [x9]\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
".inst 0x6466e40e // bfmmla z14.s, z0.h, z6.h\n"
".inst 0x6466e456 // bfmmla z22.s, z2.h, z6.h\n"
".inst 0x6466e49e // bfmmla z30.s, z4.h, z6.h\n"
- "ld1h { z6.h }, p5/Z, [x9, #1, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x28, #1, MUL VL]\n"
".inst 0x6467e40b // bfmmla z11.s, z0.h, z7.h\n"
".inst 0x6467e453 // bfmmla z19.s, z2.h, z7.h\n"
".inst 0x6467e49b // bfmmla z27.s, z4.h, z7.h\n"
- "ld1h { z7.h }, p5/Z, [x12, #2, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x11, #2, MUL VL]\n"
".inst 0x6466e40f // bfmmla z15.s, z0.h, z6.h\n"
".inst 0x6466e457 // bfmmla z23.s, z2.h, z6.h\n"
".inst 0x6466e49f // bfmmla z31.s, z4.h, z6.h\n"
- "ld1h { z6.h }, p5/Z, [x12, #3, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #3, MUL VL]\n"
".inst 0x6467e428 // bfmmla z8.s, z1.h, z7.h\n"
- "addvl x12, x12, #4\n"
+ "addvl x11, x11, #4\n"
".inst 0x6467e470 // bfmmla z16.s, z3.h, z7.h\n"
".inst 0x6467e4b8 // bfmmla z24.s, z5.h, z7.h\n"
- "ld1h { z7.h }, p5/Z, [x11, #2, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #2, MUL VL]\n"
".inst 0x6466e42c // bfmmla z12.s, z1.h, z6.h\n"
".inst 0x6466e474 // bfmmla z20.s, z3.h, z6.h\n"
".inst 0x6466e4bc // bfmmla z28.s, z5.h, z6.h\n"
- "ld1h { z6.h }, p5/Z, [x11, #3, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x10, #3, MUL VL]\n"
".inst 0x6467e429 // bfmmla z9.s, z1.h, z7.h\n"
- "addvl x11, x11, #4\n"
+ "addvl x10, x10, #4\n"
".inst 0x6467e471 // bfmmla z17.s, z3.h, z7.h\n"
".inst 0x6467e4b9 // bfmmla z25.s, z5.h, z7.h\n"
- "ld1h { z7.h }, p5/Z, [x10, #2, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x9, #2, MUL VL]\n"
".inst 0x6466e42d // bfmmla z13.s, z1.h, z6.h\n"
".inst 0x6466e475 // bfmmla z21.s, z3.h, z6.h\n"
".inst 0x6466e4bd // bfmmla z29.s, z5.h, z6.h\n"
- "ld1h { z6.h }, p5/Z, [x10, #3, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #3, MUL VL]\n"
".inst 0x6467e42a // bfmmla z10.s, z1.h, z7.h\n"
- "addvl x10, x10, #4\n"
+ "addvl x9, x9, #4\n"
".inst 0x6467e472 // bfmmla z18.s, z3.h, z7.h\n"
".inst 0x6467e4ba // bfmmla z26.s, z5.h, z7.h\n"
- "ld1h { z7.h }, p5/Z, [x9, #2, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x28, #2, MUL VL]\n"
".inst 0x6466e42e // bfmmla z14.s, z1.h, z6.h\n"
".inst 0x6466e476 // bfmmla z22.s, z3.h, z6.h\n"
".inst 0x6466e4be // bfmmla z30.s, z5.h, z6.h\n"
- "ld1h { z6.h }, p5/Z, [x9, #3, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x28, #3, MUL VL]\n"
".inst 0x6467e42b // bfmmla z11.s, z1.h, z7.h\n"
- "addvl x9, x9, #4\n"
+ "addvl x28, x28, #4\n"
".inst 0x6467e473 // bfmmla z19.s, z3.h, z7.h\n"
".inst 0x6467e4bb // bfmmla z27.s, z5.h, z7.h\n"
".inst 0x6466e42f // bfmmla z15.s, z1.h, z6.h\n"
@@ -2000,87 +2000,87 @@ void sve_ffhybrid_bf16fp32_mmla_6x4VL (
".inst 0x6466e4bf // bfmmla z31.s, z5.h, z6.h\n"
"bgt 80b\n"
"81:" // Height 6: Multiply loop: Single iteration only
- "whilelt p0.h, XZR, x27\n"
- "ld1rqh { z1.h }, p0/Z, [x26]\n"
- "ld1rqh { z2.h }, p0/Z, [x25]\n"
+ "whilelt p0.h, XZR, x26\n"
+ "ld1rqh { z1.h }, p0/Z, [x25]\n"
+ "ld1rqh { z2.h }, p0/Z, [x24]\n"
"trn1 z0.d, z1.d, z2.d\n"
- "ld1rqh { z3.h }, p0/Z, [x24]\n"
- "ld1rqh { z4.h }, p0/Z, [x23]\n"
+ "ld1rqh { z3.h }, p0/Z, [x23]\n"
+ "ld1rqh { z4.h }, p0/Z, [x22]\n"
"trn2 z1.d, z1.d, z2.d\n"
"trn1 z2.d, z3.d, z4.d\n"
- "ld1rqh { z5.h }, p0/Z, [x22]\n"
- "ld1rqh { z6.h }, p0/Z, [x21]\n"
+ "ld1rqh { z5.h }, p0/Z, [x21]\n"
+ "ld1rqh { z6.h }, p0/Z, [x20]\n"
"trn2 z3.d, z3.d, z4.d\n"
"trn1 z4.d, z5.d, z6.d\n"
"trn2 z5.d, z5.d, z6.d\n"
- "ld1h { z7.h }, p5/Z, [x12]\n"
- "ld1h { z6.h }, p5/Z, [x12, #1, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x11]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #1, MUL VL]\n"
".inst 0x6467e408 // bfmmla z8.s, z0.h, z7.h\n"
".inst 0x6467e450 // bfmmla z16.s, z2.h, z7.h\n"
".inst 0x6467e498 // bfmmla z24.s, z4.h, z7.h\n"
- "ld1h { z7.h }, p5/Z, [x11]\n"
- "subs x27, x27, #0x4\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
+ "subs x26, x26, #0x4\n"
".inst 0x6466e40c // bfmmla z12.s, z0.h, z6.h\n"
".inst 0x6466e454 // bfmmla z20.s, z2.h, z6.h\n"
- "addvl x12, x12, #2\n"
+ "addvl x11, x11, #2\n"
".inst 0x6466e49c // bfmmla z28.s, z4.h, z6.h\n"
- "ld1h { z6.h }, p5/Z, [x11, #1, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x10, #1, MUL VL]\n"
".inst 0x6467e409 // bfmmla z9.s, z0.h, z7.h\n"
- "addvl x11, x11, #2\n"
+ "addvl x10, x10, #2\n"
".inst 0x6467e451 // bfmmla z17.s, z2.h, z7.h\n"
".inst 0x6467e499 // bfmmla z25.s, z4.h, z7.h\n"
- "ld1h { z7.h }, p5/Z, [x10]\n"
+ "ld1h { z7.h }, p5/Z, [x9]\n"
".inst 0x6466e40d // bfmmla z13.s, z0.h, z6.h\n"
".inst 0x6466e455 // bfmmla z21.s, z2.h, z6.h\n"
".inst 0x6466e49d // bfmmla z29.s, z4.h, z6.h\n"
- "ld1h { z6.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #1, MUL VL]\n"
".inst 0x6467e40a // bfmmla z10.s, z0.h, z7.h\n"
- "addvl x10, x10, #2\n"
+ "addvl x9, x9, #2\n"
".inst 0x6467e452 // bfmmla z18.s, z2.h, z7.h\n"
".inst 0x6467e49a // bfmmla z26.s, z4.h, z7.h\n"
- "ld1h { z7.h }, p5/Z, [x9]\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
".inst 0x6466e40e // bfmmla z14.s, z0.h, z6.h\n"
".inst 0x6466e456 // bfmmla z22.s, z2.h, z6.h\n"
".inst 0x6466e49e // bfmmla z30.s, z4.h, z6.h\n"
- "ld1h { z6.h }, p5/Z, [x9, #1, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x28, #1, MUL VL]\n"
".inst 0x6467e40b // bfmmla z11.s, z0.h, z7.h\n"
- "addvl x9, x9, #2\n"
+ "addvl x28, x28, #2\n"
".inst 0x6467e453 // bfmmla z19.s, z2.h, z7.h\n"
".inst 0x6467e49b // bfmmla z27.s, z4.h, z7.h\n"
".inst 0x6466e40f // bfmmla z15.s, z0.h, z6.h\n"
".inst 0x6466e457 // bfmmla z23.s, z2.h, z6.h\n"
".inst 0x6466e49f // bfmmla z31.s, z4.h, z6.h\n"
"ble 82f\n"
- "ld1h { z7.h }, p5/Z, [x12]\n"
- "ld1h { z6.h }, p5/Z, [x12, #1, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x11]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #1, MUL VL]\n"
".inst 0x6467e428 // bfmmla z8.s, z1.h, z7.h\n"
".inst 0x6467e470 // bfmmla z16.s, z3.h, z7.h\n"
".inst 0x6467e4b8 // bfmmla z24.s, z5.h, z7.h\n"
".inst 0x6466e42c // bfmmla z12.s, z1.h, z6.h\n"
- "ld1h { z7.h }, p5/Z, [x11]\n"
- "addvl x12, x12, #2\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
+ "addvl x11, x11, #2\n"
".inst 0x6466e474 // bfmmla z20.s, z3.h, z6.h\n"
".inst 0x6466e4bc // bfmmla z28.s, z5.h, z6.h\n"
- "ld1h { z6.h }, p5/Z, [x11, #1, MUL VL]\n"
- "addvl x11, x11, #2\n"
+ "ld1h { z6.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "addvl x10, x10, #2\n"
".inst 0x6467e429 // bfmmla z9.s, z1.h, z7.h\n"
".inst 0x6467e471 // bfmmla z17.s, z3.h, z7.h\n"
".inst 0x6467e4b9 // bfmmla z25.s, z5.h, z7.h\n"
".inst 0x6466e42d // bfmmla z13.s, z1.h, z6.h\n"
- "ld1h { z7.h }, p5/Z, [x10]\n"
+ "ld1h { z7.h }, p5/Z, [x9]\n"
".inst 0x6466e475 // bfmmla z21.s, z3.h, z6.h\n"
".inst 0x6466e4bd // bfmmla z29.s, z5.h, z6.h\n"
- "ld1h { z6.h }, p5/Z, [x10, #1, MUL VL]\n"
- "addvl x10, x10, #2\n"
+ "ld1h { z6.h }, p5/Z, [x9, #1, MUL VL]\n"
+ "addvl x9, x9, #2\n"
".inst 0x6467e42a // bfmmla z10.s, z1.h, z7.h\n"
".inst 0x6467e472 // bfmmla z18.s, z3.h, z7.h\n"
".inst 0x6467e4ba // bfmmla z26.s, z5.h, z7.h\n"
".inst 0x6466e42e // bfmmla z14.s, z1.h, z6.h\n"
- "ld1h { z7.h }, p5/Z, [x9]\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
".inst 0x6466e476 // bfmmla z22.s, z3.h, z6.h\n"
".inst 0x6466e4be // bfmmla z30.s, z5.h, z6.h\n"
- "ld1h { z6.h }, p5/Z, [x9, #1, MUL VL]\n"
- "addvl x9, x9, #2\n"
+ "ld1h { z6.h }, p5/Z, [x28, #1, MUL VL]\n"
+ "addvl x28, x28, #2\n"
".inst 0x6467e42b // bfmmla z11.s, z1.h, z7.h\n"
".inst 0x6467e473 // bfmmla z19.s, z3.h, z7.h\n"
".inst 0x6467e4bb // bfmmla z27.s, z5.h, z7.h\n"
@@ -2088,21 +2088,21 @@ void sve_ffhybrid_bf16fp32_mmla_6x4VL (
".inst 0x6466e477 // bfmmla z23.s, z3.h, z6.h\n"
".inst 0x6466e4bf // bfmmla z31.s, z5.h, z6.h\n"
"82:" // Height 6: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 77b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
"uzp1 z7.d, z8.d, z12.d\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
"uzp2 z8.d, z8.d, z12.d\n"
"uzp1 z12.d, z9.d, z13.d\n"
"uzp2 z9.d, z9.d, z13.d\n"
"uzp1 z13.d, z10.d, z14.d\n"
- "add x21, x22, x20, LSL #2\n"
+ "add x20, x21, x19, LSL #2\n"
"uzp2 z10.d, z10.d, z14.d\n"
"uzp1 z14.d, z11.d, z15.d\n"
"uzp2 z11.d, z11.d, z15.d\n"
@@ -2123,10 +2123,10 @@ void sve_ffhybrid_bf16fp32_mmla_6x4VL (
"uzp1 z30.d, z27.d, z31.d\n"
"uzp2 z27.d, z27.d, z31.d\n"
"tbz %x[flags], #1, 83f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z1.s }, p5/Z, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rw { z0.s }, p5/Z, [x20]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rw { z1.s }, p5/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z0.s }, p5/Z, [x19]\n"
"fmin z7.s, p5/M, z7.s, z1.s\n"
"fmin z12.s, p5/M, z12.s, z1.s\n"
"fmin z13.s, p5/M, z13.s, z1.s\n"
@@ -2176,50 +2176,50 @@ void sve_ffhybrid_bf16fp32_mmla_6x4VL (
"fmax z26.s, p5/M, z26.s, z0.s\n"
"fmax z27.s, p5/M, z27.s, z0.s\n"
"83:" // Height 6: No activation
- "st1w { z7.s }, p4, [x13]\n"
- "st1w { z12.s }, p3, [x13, #1, MUL VL]\n"
- "st1w { z13.s }, p2, [x13, #2, MUL VL]\n"
- "st1w { z14.s }, p1, [x13, #3, MUL VL]\n"
- "addvl x13, x13, #4\n"
- "st1w { z8.s }, p4, [x25]\n"
- "st1w { z9.s }, p3, [x25, #1, MUL VL]\n"
- "st1w { z10.s }, p2, [x25, #2, MUL VL]\n"
- "st1w { z11.s }, p1, [x25, #3, MUL VL]\n"
- "st1w { z15.s }, p4, [x24]\n"
- "st1w { z20.s }, p3, [x24, #1, MUL VL]\n"
- "st1w { z21.s }, p2, [x24, #2, MUL VL]\n"
- "st1w { z22.s }, p1, [x24, #3, MUL VL]\n"
- "st1w { z16.s }, p4, [x23]\n"
- "st1w { z17.s }, p3, [x23, #1, MUL VL]\n"
- "st1w { z18.s }, p2, [x23, #2, MUL VL]\n"
- "st1w { z19.s }, p1, [x23, #3, MUL VL]\n"
- "st1w { z23.s }, p4, [x22]\n"
- "st1w { z28.s }, p3, [x22, #1, MUL VL]\n"
- "st1w { z29.s }, p2, [x22, #2, MUL VL]\n"
- "st1w { z30.s }, p1, [x22, #3, MUL VL]\n"
- "st1w { z24.s }, p4, [x21]\n"
- "st1w { z25.s }, p3, [x21, #1, MUL VL]\n"
- "st1w { z26.s }, p2, [x21, #2, MUL VL]\n"
- "st1w { z27.s }, p1, [x21, #3, MUL VL]\n"
+ "st1w { z7.s }, p4, [x12]\n"
+ "st1w { z12.s }, p3, [x12, #1, MUL VL]\n"
+ "st1w { z13.s }, p2, [x12, #2, MUL VL]\n"
+ "st1w { z14.s }, p1, [x12, #3, MUL VL]\n"
+ "addvl x12, x12, #4\n"
+ "st1w { z8.s }, p4, [x24]\n"
+ "st1w { z9.s }, p3, [x24, #1, MUL VL]\n"
+ "st1w { z10.s }, p2, [x24, #2, MUL VL]\n"
+ "st1w { z11.s }, p1, [x24, #3, MUL VL]\n"
+ "st1w { z15.s }, p4, [x23]\n"
+ "st1w { z20.s }, p3, [x23, #1, MUL VL]\n"
+ "st1w { z21.s }, p2, [x23, #2, MUL VL]\n"
+ "st1w { z22.s }, p1, [x23, #3, MUL VL]\n"
+ "st1w { z16.s }, p4, [x22]\n"
+ "st1w { z17.s }, p3, [x22, #1, MUL VL]\n"
+ "st1w { z18.s }, p2, [x22, #2, MUL VL]\n"
+ "st1w { z19.s }, p1, [x22, #3, MUL VL]\n"
+ "st1w { z23.s }, p4, [x21]\n"
+ "st1w { z28.s }, p3, [x21, #1, MUL VL]\n"
+ "st1w { z29.s }, p2, [x21, #2, MUL VL]\n"
+ "st1w { z30.s }, p1, [x21, #3, MUL VL]\n"
+ "st1w { z24.s }, p4, [x20]\n"
+ "st1w { z25.s }, p3, [x20, #1, MUL VL]\n"
+ "st1w { z26.s }, p2, [x20, #2, MUL VL]\n"
+ "st1w { z27.s }, p1, [x20, #3, MUL VL]\n"
"84:" // Height 6: Writeback done
- "decw x14, ALL, MUL #4\n"
- "cmp x14, XZR\n"
+ "decw x13, ALL, MUL #4\n"
+ "cmp x13, XZR\n"
"bgt 72b\n"
"subs %x[M], %x[M], #0x6\n"
"beq 86f\n"
- "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 85f\n"
- "add x21, x21, #0x6\n"
- "str x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "add x20, x20, #0x6\n"
+ "str x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"b 1b\n"
"85:" // Update direct input
- "mov x20, #0xc\n"
- "madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
+ "mov x19, #0xc\n"
+ "madd %x[input_ptr], x19, x20, %x[input_ptr]\n"
"b 1b\n"
"86:" // Exit
: [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
: [args_ptr] "r" (&ka), [bias] "r" (bias), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_B_stride] "I" (offsetof(KernelArgs, B_stride)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_cur_B_ptr] "I" (offsetof(KernelArgs, cur_B_ptr)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "x9", "x10", "x11", "x12", "x13", "x14", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp16_mla_6x4VL/a64fx.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp16_mla_6x4VL/a64fx.cpp
index 5f093bf08a..181022bf51 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp16_mla_6x4VL/a64fx.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp16_mla_6x4VL/a64fx.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef ARM_COMPUTE_ENABLE_SVE
@@ -105,53 +105,53 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"cmp %x[M], #0x2\n"
"bgt 27f\n"
"beq 14f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x15, %x[bias]\n"
- "ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x13, %x[output_ptr]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x14, %x[bias]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x12, %x[output_ptr]\n"
"2:" // Height 1: Column loop
- "ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
- "add x11, x12, x20, LSL #1\n"
- "cnth x21, ALL, MUL #3\n"
- "add x10, x11, x20, LSL #1\n"
- "add x9, x10, x20, LSL #1\n"
- "add x20, x9, x20, LSL #1\n"
- "cmp x14, x21\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #1\n"
+ "cnth x20, ALL, MUL #3\n"
+ "add x9, x10, x19, LSL #1\n"
+ "add x28, x9, x19, LSL #1\n"
+ "add x19, x28, x19, LSL #1\n"
+ "cmp x13, x20\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"bgt 3f\n"
- "dech x21\n"
- "cmp x14, x21\n"
- "mov x9, x12\n"
+ "dech x20\n"
+ "cmp x13, x20\n"
+ "mov x28, x11\n"
"bgt 3f\n"
- "dech x21\n"
- "cmp x14, x21\n"
- "mov x10, x12\n"
+ "dech x20\n"
+ "cmp x13, x20\n"
+ "mov x9, x11\n"
"bgt 3f\n"
- "mov x11, x12\n"
+ "mov x10, x11\n"
"3:" // Height 1: B setup done
- "mov x20, #0x0\n"
- "whilelt p3.h, x20, x14\n"
- "inch x20\n"
- "whilelt p2.h, x20, x14\n"
- "inch x20\n"
- "whilelt p1.h, x20, x14\n"
- "inch x20\n"
- "whilelt p0.h, x20, x14\n"
- "cbz x15, 4f\n"
- "ld1h { z8.h }, p4/Z, [x15]\n"
- "ld1h { z9.h }, p4/Z, [x15, #1, MUL VL]\n"
- "ld1h { z10.h }, p4/Z, [x15, #2, MUL VL]\n"
- "ld1h { z11.h }, p4/Z, [x15, #3, MUL VL]\n"
- "addvl x15, x15, #4\n"
+ "mov x19, #0x0\n"
+ "whilelt p3.h, x19, x13\n"
+ "inch x19\n"
+ "whilelt p2.h, x19, x13\n"
+ "inch x19\n"
+ "whilelt p1.h, x19, x13\n"
+ "inch x19\n"
+ "whilelt p0.h, x19, x13\n"
+ "cbz x14, 4f\n"
+ "ld1h { z8.h }, p4/Z, [x14]\n"
+ "ld1h { z9.h }, p4/Z, [x14, #1, MUL VL]\n"
+ "ld1h { z10.h }, p4/Z, [x14, #2, MUL VL]\n"
+ "ld1h { z11.h }, p4/Z, [x14, #3, MUL VL]\n"
+ "addvl x14, x14, #4\n"
"b 6f\n"
"4:" // Height 1: no bias
"tbz %x[flags], #0, 5f\n"
- "ld1h { z8.h }, p3/Z, [x13]\n"
- "ld1h { z9.h }, p2/Z, [x13, #1, MUL VL]\n"
- "ld1h { z10.h }, p1/Z, [x13, #2, MUL VL]\n"
- "ld1h { z11.h }, p0/Z, [x13, #3, MUL VL]\n"
+ "ld1h { z8.h }, p3/Z, [x12]\n"
+ "ld1h { z9.h }, p2/Z, [x12, #1, MUL VL]\n"
+ "ld1h { z10.h }, p1/Z, [x12, #2, MUL VL]\n"
+ "ld1h { z11.h }, p0/Z, [x12, #3, MUL VL]\n"
"b 6f\n"
"5:" // Height 1: no accumulate
"mov z8.b, #0x0\n"
@@ -159,64 +159,64 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"mov z10.b, #0x0\n"
"mov z11.b, #0x0\n"
"6:" // Height 1: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"7:" // Height 1: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 8f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "cbnz x28, 9f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #1\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "cbnz x27, 9f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
"b 9f\n"
"8:" // Height 1: setup direct input
- "mov x26, %x[input_ptr]\n"
+ "mov x25, %x[input_ptr]\n"
"9:" // Height 1: input setup done
- "subs x27, x27, #0x1\n"
- "ld1rh { z0.h }, p4/Z, [x26]\n"
- "ld1h { z6.h }, p4/Z, [x12]\n"
- "ld1h { z7.h }, p4/Z, [x11]\n"
+ "subs x26, x26, #0x1\n"
+ "ld1rh { z0.h }, p4/Z, [x25]\n"
+ "ld1h { z6.h }, p4/Z, [x11]\n"
+ "ld1h { z7.h }, p4/Z, [x10]\n"
"ble 11f\n"
"10:" // Height 1: Multiply loop: Main loop
"fmla z8.h, p4/M, z6.h, z0.h\n"
"fmla z9.h, p4/M, z7.h, z0.h\n"
- "ld1h { z6.h }, p4/Z, [x10]\n"
- "ld1h { z7.h }, p4/Z, [x9]\n"
- "addvl x12, x12, #1\n"
+ "ld1h { z6.h }, p4/Z, [x9]\n"
+ "ld1h { z7.h }, p4/Z, [x28]\n"
"addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
"fmla z10.h, p4/M, z6.h, z0.h\n"
"fmla z11.h, p4/M, z7.h, z0.h\n"
- "add x26, x26, #0x2\n"
- "subs x27, x27, #0x1\n"
- "ld1rh { z0.h }, p4/Z, [x26]\n"
- "ld1h { z6.h }, p4/Z, [x12]\n"
- "addvl x10, x10, #1\n"
+ "add x25, x25, #0x2\n"
+ "subs x26, x26, #0x1\n"
+ "ld1rh { z0.h }, p4/Z, [x25]\n"
+ "ld1h { z6.h }, p4/Z, [x11]\n"
"addvl x9, x9, #1\n"
- "ld1h { z7.h }, p4/Z, [x11]\n"
+ "addvl x28, x28, #1\n"
+ "ld1h { z7.h }, p4/Z, [x10]\n"
"bgt 10b\n"
"11:" // Height 1: Multiply loop: Main loop skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
"fmla z8.h, p4/M, z6.h, z0.h\n"
"fmla z9.h, p4/M, z7.h, z0.h\n"
- "ld1h { z6.h }, p4/Z, [x10]\n"
- "ld1h { z7.h }, p4/Z, [x9]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ld1h { z6.h }, p4/Z, [x9]\n"
+ "ld1h { z7.h }, p4/Z, [x28]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"fmla z10.h, p4/M, z6.h, z0.h\n"
"fmla z11.h, p4/M, z7.h, z0.h\n"
- "addvl x12, x12, #1\n"
"addvl x11, x11, #1\n"
"addvl x10, x10, #1\n"
"addvl x9, x9, #1\n"
+ "addvl x28, x28, #1\n"
"bne 7b\n"
"tbz %x[flags], #1, 12f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rh { z1.h }, p4/Z, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rh { z0.h }, p4/Z, [x20]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rh { z1.h }, p4/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rh { z0.h }, p4/Z, [x19]\n"
"fmin z8.h, p4/M, z8.h, z1.h\n"
"fmin z9.h, p4/M, z9.h, z1.h\n"
"fmin z10.h, p4/M, z10.h, z1.h\n"
@@ -226,74 +226,74 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"fmax z10.h, p4/M, z10.h, z0.h\n"
"fmax z11.h, p4/M, z11.h, z0.h\n"
"12:" // Height 1: No activation
- "st1h { z8.h }, p3, [x13]\n"
- "st1h { z9.h }, p2, [x13, #1, MUL VL]\n"
- "st1h { z10.h }, p1, [x13, #2, MUL VL]\n"
- "st1h { z11.h }, p0, [x13, #3, MUL VL]\n"
- "addvl x13, x13, #4\n"
+ "st1h { z8.h }, p3, [x12]\n"
+ "st1h { z9.h }, p2, [x12, #1, MUL VL]\n"
+ "st1h { z10.h }, p1, [x12, #2, MUL VL]\n"
+ "st1h { z11.h }, p0, [x12, #3, MUL VL]\n"
+ "addvl x12, x12, #4\n"
"13:" // Height 1: Writeback done
- "dech x14, ALL, MUL #4\n"
- "cmp x14, XZR\n"
+ "dech x13, ALL, MUL #4\n"
+ "cmp x13, XZR\n"
"bgt 2b\n"
"b 80f\n"
"14:" // Height 2
- "ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x15, %x[bias]\n"
- "ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x13, %x[output_ptr]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x14, %x[bias]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x12, %x[output_ptr]\n"
"15:" // Height 2: Column loop
- "ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
- "add x11, x12, x20, LSL #1\n"
- "cnth x21, ALL, MUL #3\n"
- "add x10, x11, x20, LSL #1\n"
- "add x9, x10, x20, LSL #1\n"
- "add x20, x9, x20, LSL #1\n"
- "cmp x14, x21\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #1\n"
+ "cnth x20, ALL, MUL #3\n"
+ "add x9, x10, x19, LSL #1\n"
+ "add x28, x9, x19, LSL #1\n"
+ "add x19, x28, x19, LSL #1\n"
+ "cmp x13, x20\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"bgt 16f\n"
- "dech x21\n"
- "cmp x14, x21\n"
- "mov x9, x12\n"
+ "dech x20\n"
+ "cmp x13, x20\n"
+ "mov x28, x11\n"
"bgt 16f\n"
- "dech x21\n"
- "cmp x14, x21\n"
- "mov x10, x12\n"
+ "dech x20\n"
+ "cmp x13, x20\n"
+ "mov x9, x11\n"
"bgt 16f\n"
- "mov x11, x12\n"
+ "mov x10, x11\n"
"16:" // Height 2: B setup done
- "mov x20, #0x0\n"
- "whilelt p3.h, x20, x14\n"
- "inch x20\n"
- "whilelt p2.h, x20, x14\n"
- "inch x20\n"
- "whilelt p1.h, x20, x14\n"
- "inch x20\n"
- "whilelt p0.h, x20, x14\n"
- "cbz x15, 17f\n"
- "ld1h { z8.h }, p4/Z, [x15]\n"
- "ld1h { z9.h }, p4/Z, [x15, #1, MUL VL]\n"
+ "mov x19, #0x0\n"
+ "whilelt p3.h, x19, x13\n"
+ "inch x19\n"
+ "whilelt p2.h, x19, x13\n"
+ "inch x19\n"
+ "whilelt p1.h, x19, x13\n"
+ "inch x19\n"
+ "whilelt p0.h, x19, x13\n"
+ "cbz x14, 17f\n"
+ "ld1h { z8.h }, p4/Z, [x14]\n"
+ "ld1h { z9.h }, p4/Z, [x14, #1, MUL VL]\n"
"mov z12.d, z8.d\n"
"mov z13.d, z9.d\n"
- "ld1h { z10.h }, p4/Z, [x15, #2, MUL VL]\n"
- "ld1h { z11.h }, p4/Z, [x15, #3, MUL VL]\n"
+ "ld1h { z10.h }, p4/Z, [x14, #2, MUL VL]\n"
+ "ld1h { z11.h }, p4/Z, [x14, #3, MUL VL]\n"
"mov z14.d, z10.d\n"
"mov z15.d, z11.d\n"
- "addvl x15, x15, #4\n"
+ "addvl x14, x14, #4\n"
"b 19f\n"
"17:" // Height 2: no bias
"tbz %x[flags], #0, 18f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #1\n"
- "ld1h { z8.h }, p3/Z, [x13]\n"
- "ld1h { z9.h }, p2/Z, [x13, #1, MUL VL]\n"
- "ld1h { z10.h }, p1/Z, [x13, #2, MUL VL]\n"
- "ld1h { z11.h }, p0/Z, [x13, #3, MUL VL]\n"
- "ld1h { z12.h }, p3/Z, [x25]\n"
- "ld1h { z13.h }, p2/Z, [x25, #1, MUL VL]\n"
- "ld1h { z14.h }, p1/Z, [x25, #2, MUL VL]\n"
- "ld1h { z15.h }, p0/Z, [x25, #3, MUL VL]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #1\n"
+ "ld1h { z8.h }, p3/Z, [x12]\n"
+ "ld1h { z9.h }, p2/Z, [x12, #1, MUL VL]\n"
+ "ld1h { z10.h }, p1/Z, [x12, #2, MUL VL]\n"
+ "ld1h { z11.h }, p0/Z, [x12, #3, MUL VL]\n"
+ "ld1h { z12.h }, p3/Z, [x24]\n"
+ "ld1h { z13.h }, p2/Z, [x24, #1, MUL VL]\n"
+ "ld1h { z14.h }, p1/Z, [x24, #2, MUL VL]\n"
+ "ld1h { z15.h }, p0/Z, [x24, #3, MUL VL]\n"
"b 19f\n"
"18:" // Height 2: no accumulate
"mov z8.b, #0x0\n"
@@ -305,80 +305,80 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"mov z14.b, #0x0\n"
"mov z15.b, #0x0\n"
"19:" // Height 2: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"20:" // Height 2: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 21f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "cbnz x28, 22f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #1\n"
- "add x25, x25, x20, LSL #1\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "cbnz x27, 22f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
+ "add x24, x24, x19, LSL #1\n"
"b 22f\n"
"21:" // Height 2: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #1\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #1\n"
"22:" // Height 2: input setup done
- "subs x27, x27, #0x1\n"
- "ld1rh { z0.h }, p4/Z, [x26]\n"
- "ld1rh { z1.h }, p4/Z, [x25]\n"
- "ld1h { z6.h }, p4/Z, [x12]\n"
- "ld1h { z7.h }, p4/Z, [x11]\n"
+ "subs x26, x26, #0x1\n"
+ "ld1rh { z0.h }, p4/Z, [x25]\n"
+ "ld1rh { z1.h }, p4/Z, [x24]\n"
+ "ld1h { z6.h }, p4/Z, [x11]\n"
+ "ld1h { z7.h }, p4/Z, [x10]\n"
"ble 24f\n"
"23:" // Height 2: Multiply loop: Main loop
"fmla z8.h, p4/M, z6.h, z0.h\n"
"fmla z12.h, p4/M, z6.h, z1.h\n"
- "ld1h { z6.h }, p4/Z, [x10]\n"
- "addvl x12, x12, #1\n"
+ "ld1h { z6.h }, p4/Z, [x9]\n"
+ "addvl x11, x11, #1\n"
"fmla z9.h, p4/M, z7.h, z0.h\n"
"fmla z13.h, p4/M, z7.h, z1.h\n"
- "ld1h { z7.h }, p4/Z, [x9]\n"
- "addvl x11, x11, #1\n"
- "add x26, x26, #0x2\n"
- "subs x27, x27, #0x1\n"
+ "ld1h { z7.h }, p4/Z, [x28]\n"
+ "addvl x10, x10, #1\n"
+ "add x25, x25, #0x2\n"
+ "subs x26, x26, #0x1\n"
"fmla z10.h, p4/M, z6.h, z0.h\n"
"fmla z14.h, p4/M, z6.h, z1.h\n"
- "add x25, x25, #0x2\n"
+ "add x24, x24, #0x2\n"
"fmla z11.h, p4/M, z7.h, z0.h\n"
"fmla z15.h, p4/M, z7.h, z1.h\n"
- "addvl x10, x10, #1\n"
- "ld1rh { z0.h }, p4/Z, [x26]\n"
- "ld1rh { z1.h }, p4/Z, [x25]\n"
"addvl x9, x9, #1\n"
- "ld1h { z6.h }, p4/Z, [x12]\n"
- "ld1h { z7.h }, p4/Z, [x11]\n"
+ "ld1rh { z0.h }, p4/Z, [x25]\n"
+ "ld1rh { z1.h }, p4/Z, [x24]\n"
+ "addvl x28, x28, #1\n"
+ "ld1h { z6.h }, p4/Z, [x11]\n"
+ "ld1h { z7.h }, p4/Z, [x10]\n"
"bgt 23b\n"
"24:" // Height 2: Multiply loop: Main loop skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
"fmla z8.h, p4/M, z6.h, z0.h\n"
"fmla z12.h, p4/M, z6.h, z1.h\n"
- "ld1h { z6.h }, p4/Z, [x10]\n"
+ "ld1h { z6.h }, p4/Z, [x9]\n"
"fmla z9.h, p4/M, z7.h, z0.h\n"
"fmla z13.h, p4/M, z7.h, z1.h\n"
- "ld1h { z7.h }, p4/Z, [x9]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ld1h { z7.h }, p4/Z, [x28]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"fmla z10.h, p4/M, z6.h, z0.h\n"
"fmla z14.h, p4/M, z6.h, z1.h\n"
- "addvl x12, x12, #1\n"
"addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
"fmla z11.h, p4/M, z7.h, z0.h\n"
"fmla z15.h, p4/M, z7.h, z1.h\n"
- "addvl x10, x10, #1\n"
"addvl x9, x9, #1\n"
+ "addvl x28, x28, #1\n"
"bne 20b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #1\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #1\n"
"tbz %x[flags], #1, 25f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rh { z1.h }, p4/Z, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rh { z0.h }, p4/Z, [x20]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rh { z1.h }, p4/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rh { z0.h }, p4/Z, [x19]\n"
"fmin z8.h, p4/M, z8.h, z1.h\n"
"fmin z9.h, p4/M, z9.h, z1.h\n"
"fmin z10.h, p4/M, z10.h, z1.h\n"
@@ -396,87 +396,87 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"fmax z14.h, p4/M, z14.h, z0.h\n"
"fmax z15.h, p4/M, z15.h, z0.h\n"
"25:" // Height 2: No activation
- "st1h { z8.h }, p3, [x13]\n"
- "st1h { z9.h }, p2, [x13, #1, MUL VL]\n"
- "st1h { z10.h }, p1, [x13, #2, MUL VL]\n"
- "st1h { z11.h }, p0, [x13, #3, MUL VL]\n"
- "addvl x13, x13, #4\n"
- "st1h { z12.h }, p3, [x25]\n"
- "st1h { z13.h }, p2, [x25, #1, MUL VL]\n"
- "st1h { z14.h }, p1, [x25, #2, MUL VL]\n"
- "st1h { z15.h }, p0, [x25, #3, MUL VL]\n"
+ "st1h { z8.h }, p3, [x12]\n"
+ "st1h { z9.h }, p2, [x12, #1, MUL VL]\n"
+ "st1h { z10.h }, p1, [x12, #2, MUL VL]\n"
+ "st1h { z11.h }, p0, [x12, #3, MUL VL]\n"
+ "addvl x12, x12, #4\n"
+ "st1h { z12.h }, p3, [x24]\n"
+ "st1h { z13.h }, p2, [x24, #1, MUL VL]\n"
+ "st1h { z14.h }, p1, [x24, #2, MUL VL]\n"
+ "st1h { z15.h }, p0, [x24, #3, MUL VL]\n"
"26:" // Height 2: Writeback done
- "dech x14, ALL, MUL #4\n"
- "cmp x14, XZR\n"
+ "dech x13, ALL, MUL #4\n"
+ "cmp x13, XZR\n"
"bgt 15b\n"
"b 80f\n"
"27:" // Height 3
- "ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x15, %x[bias]\n"
- "ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x13, %x[output_ptr]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x14, %x[bias]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x12, %x[output_ptr]\n"
"28:" // Height 3: Column loop
- "ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
- "add x11, x12, x20, LSL #1\n"
- "cnth x21, ALL, MUL #3\n"
- "add x10, x11, x20, LSL #1\n"
- "add x9, x10, x20, LSL #1\n"
- "add x20, x9, x20, LSL #1\n"
- "cmp x14, x21\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #1\n"
+ "cnth x20, ALL, MUL #3\n"
+ "add x9, x10, x19, LSL #1\n"
+ "add x28, x9, x19, LSL #1\n"
+ "add x19, x28, x19, LSL #1\n"
+ "cmp x13, x20\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"bgt 29f\n"
- "dech x21\n"
- "cmp x14, x21\n"
- "mov x9, x12\n"
+ "dech x20\n"
+ "cmp x13, x20\n"
+ "mov x28, x11\n"
"bgt 29f\n"
- "dech x21\n"
- "cmp x14, x21\n"
- "mov x10, x12\n"
+ "dech x20\n"
+ "cmp x13, x20\n"
+ "mov x9, x11\n"
"bgt 29f\n"
- "mov x11, x12\n"
+ "mov x10, x11\n"
"29:" // Height 3: B setup done
- "mov x20, #0x0\n"
- "whilelt p3.h, x20, x14\n"
- "inch x20\n"
- "whilelt p2.h, x20, x14\n"
- "inch x20\n"
- "whilelt p1.h, x20, x14\n"
- "inch x20\n"
- "whilelt p0.h, x20, x14\n"
- "cbz x15, 30f\n"
- "ld1h { z8.h }, p4/Z, [x15]\n"
- "ld1h { z9.h }, p4/Z, [x15, #1, MUL VL]\n"
+ "mov x19, #0x0\n"
+ "whilelt p3.h, x19, x13\n"
+ "inch x19\n"
+ "whilelt p2.h, x19, x13\n"
+ "inch x19\n"
+ "whilelt p1.h, x19, x13\n"
+ "inch x19\n"
+ "whilelt p0.h, x19, x13\n"
+ "cbz x14, 30f\n"
+ "ld1h { z8.h }, p4/Z, [x14]\n"
+ "ld1h { z9.h }, p4/Z, [x14, #1, MUL VL]\n"
"mov z12.d, z8.d\n"
"mov z13.d, z9.d\n"
- "ld1h { z10.h }, p4/Z, [x15, #2, MUL VL]\n"
- "ld1h { z11.h }, p4/Z, [x15, #3, MUL VL]\n"
+ "ld1h { z10.h }, p4/Z, [x14, #2, MUL VL]\n"
+ "ld1h { z11.h }, p4/Z, [x14, #3, MUL VL]\n"
"mov z14.d, z10.d\n"
"mov z15.d, z11.d\n"
"mov z16.d, z8.d\n"
"mov z17.d, z9.d\n"
- "addvl x15, x15, #4\n"
+ "addvl x14, x14, #4\n"
"mov z18.d, z10.d\n"
"mov z19.d, z11.d\n"
"b 32f\n"
"30:" // Height 3: no bias
"tbz %x[flags], #0, 31f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
- "ld1h { z8.h }, p3/Z, [x13]\n"
- "ld1h { z9.h }, p2/Z, [x13, #1, MUL VL]\n"
- "ld1h { z10.h }, p1/Z, [x13, #2, MUL VL]\n"
- "ld1h { z11.h }, p0/Z, [x13, #3, MUL VL]\n"
- "ld1h { z12.h }, p3/Z, [x25]\n"
- "ld1h { z13.h }, p2/Z, [x25, #1, MUL VL]\n"
- "ld1h { z14.h }, p1/Z, [x25, #2, MUL VL]\n"
- "ld1h { z15.h }, p0/Z, [x25, #3, MUL VL]\n"
- "ld1h { z16.h }, p3/Z, [x24]\n"
- "ld1h { z17.h }, p2/Z, [x24, #1, MUL VL]\n"
- "ld1h { z18.h }, p1/Z, [x24, #2, MUL VL]\n"
- "ld1h { z19.h }, p0/Z, [x24, #3, MUL VL]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "ld1h { z8.h }, p3/Z, [x12]\n"
+ "ld1h { z9.h }, p2/Z, [x12, #1, MUL VL]\n"
+ "ld1h { z10.h }, p1/Z, [x12, #2, MUL VL]\n"
+ "ld1h { z11.h }, p0/Z, [x12, #3, MUL VL]\n"
+ "ld1h { z12.h }, p3/Z, [x24]\n"
+ "ld1h { z13.h }, p2/Z, [x24, #1, MUL VL]\n"
+ "ld1h { z14.h }, p1/Z, [x24, #2, MUL VL]\n"
+ "ld1h { z15.h }, p0/Z, [x24, #3, MUL VL]\n"
+ "ld1h { z16.h }, p3/Z, [x23]\n"
+ "ld1h { z17.h }, p2/Z, [x23, #1, MUL VL]\n"
+ "ld1h { z18.h }, p1/Z, [x23, #2, MUL VL]\n"
+ "ld1h { z19.h }, p0/Z, [x23, #3, MUL VL]\n"
"b 32f\n"
"31:" // Height 3: no accumulate
"mov z8.b, #0x0\n"
@@ -492,95 +492,95 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"mov z18.b, #0x0\n"
"mov z19.b, #0x0\n"
"32:" // Height 3: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"33:" // Height 3: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 34f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "cbnz x28, 35f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #1\n"
- "add x25, x25, x20, LSL #1\n"
- "add x24, x24, x20, LSL #1\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "cbnz x27, 35f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
+ "add x24, x24, x19, LSL #1\n"
+ "add x23, x23, x19, LSL #1\n"
"b 35f\n"
"34:" // Height 3: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
"35:" // Height 3: input setup done
- "subs x27, x27, #0x1\n"
- "ld1rh { z0.h }, p4/Z, [x26]\n"
- "ld1rh { z1.h }, p4/Z, [x25]\n"
- "ld1rh { z2.h }, p4/Z, [x24]\n"
- "ld1h { z6.h }, p4/Z, [x12]\n"
- "ld1h { z7.h }, p4/Z, [x11]\n"
+ "subs x26, x26, #0x1\n"
+ "ld1rh { z0.h }, p4/Z, [x25]\n"
+ "ld1rh { z1.h }, p4/Z, [x24]\n"
+ "ld1rh { z2.h }, p4/Z, [x23]\n"
+ "ld1h { z6.h }, p4/Z, [x11]\n"
+ "ld1h { z7.h }, p4/Z, [x10]\n"
"ble 37f\n"
"36:" // Height 3: Multiply loop: Main loop
"fmla z8.h, p4/M, z6.h, z0.h\n"
"fmla z12.h, p4/M, z6.h, z1.h\n"
- "addvl x12, x12, #1\n"
"addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
"fmla z16.h, p4/M, z6.h, z2.h\n"
"fmla z9.h, p4/M, z7.h, z0.h\n"
- "ld1h { z6.h }, p4/Z, [x10]\n"
- "add x26, x26, #0x2\n"
+ "ld1h { z6.h }, p4/Z, [x9]\n"
+ "add x25, x25, #0x2\n"
"fmla z13.h, p4/M, z7.h, z1.h\n"
"fmla z17.h, p4/M, z7.h, z2.h\n"
- "ld1h { z7.h }, p4/Z, [x9]\n"
- "subs x27, x27, #0x1\n"
- "add x25, x25, #0x2\n"
+ "ld1h { z7.h }, p4/Z, [x28]\n"
+ "subs x26, x26, #0x1\n"
"add x24, x24, #0x2\n"
+ "add x23, x23, #0x2\n"
"fmla z10.h, p4/M, z6.h, z0.h\n"
"fmla z14.h, p4/M, z6.h, z1.h\n"
"fmla z18.h, p4/M, z6.h, z2.h\n"
"fmla z11.h, p4/M, z7.h, z0.h\n"
- "addvl x10, x10, #1\n"
"addvl x9, x9, #1\n"
+ "addvl x28, x28, #1\n"
"fmla z15.h, p4/M, z7.h, z1.h\n"
"fmla z19.h, p4/M, z7.h, z2.h\n"
- "ld1rh { z0.h }, p4/Z, [x26]\n"
- "ld1rh { z1.h }, p4/Z, [x25]\n"
- "ld1rh { z2.h }, p4/Z, [x24]\n"
- "ld1h { z6.h }, p4/Z, [x12]\n"
- "ld1h { z7.h }, p4/Z, [x11]\n"
+ "ld1rh { z0.h }, p4/Z, [x25]\n"
+ "ld1rh { z1.h }, p4/Z, [x24]\n"
+ "ld1rh { z2.h }, p4/Z, [x23]\n"
+ "ld1h { z6.h }, p4/Z, [x11]\n"
+ "ld1h { z7.h }, p4/Z, [x10]\n"
"bgt 36b\n"
"37:" // Height 3: Multiply loop: Main loop skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
"fmla z8.h, p4/M, z6.h, z0.h\n"
"fmla z12.h, p4/M, z6.h, z1.h\n"
- "add x28, x28, #0x1\n"
+ "add x27, x27, #0x1\n"
"fmla z16.h, p4/M, z6.h, z2.h\n"
"fmla z9.h, p4/M, z7.h, z0.h\n"
- "ld1h { z6.h }, p4/Z, [x10]\n"
- "cmp x28, x20\n"
+ "ld1h { z6.h }, p4/Z, [x9]\n"
+ "cmp x27, x19\n"
"fmla z13.h, p4/M, z7.h, z1.h\n"
"fmla z17.h, p4/M, z7.h, z2.h\n"
- "ld1h { z7.h }, p4/Z, [x9]\n"
- "addvl x12, x12, #1\n"
+ "ld1h { z7.h }, p4/Z, [x28]\n"
+ "addvl x11, x11, #1\n"
"fmla z10.h, p4/M, z6.h, z0.h\n"
"fmla z14.h, p4/M, z6.h, z1.h\n"
- "addvl x11, x11, #1\n"
"addvl x10, x10, #1\n"
+ "addvl x9, x9, #1\n"
"fmla z18.h, p4/M, z6.h, z2.h\n"
"fmla z11.h, p4/M, z7.h, z0.h\n"
- "addvl x9, x9, #1\n"
+ "addvl x28, x28, #1\n"
"fmla z15.h, p4/M, z7.h, z1.h\n"
"fmla z19.h, p4/M, z7.h, z2.h\n"
"bne 33b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
"tbz %x[flags], #1, 38f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rh { z1.h }, p4/Z, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rh { z0.h }, p4/Z, [x20]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rh { z1.h }, p4/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rh { z0.h }, p4/Z, [x19]\n"
"fmin z8.h, p4/M, z8.h, z1.h\n"
"fmin z9.h, p4/M, z9.h, z1.h\n"
"fmin z10.h, p4/M, z10.h, z1.h\n"
@@ -606,71 +606,71 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"fmax z18.h, p4/M, z18.h, z0.h\n"
"fmax z19.h, p4/M, z19.h, z0.h\n"
"38:" // Height 3: No activation
- "st1h { z8.h }, p3, [x13]\n"
- "st1h { z9.h }, p2, [x13, #1, MUL VL]\n"
- "st1h { z10.h }, p1, [x13, #2, MUL VL]\n"
- "st1h { z11.h }, p0, [x13, #3, MUL VL]\n"
- "addvl x13, x13, #4\n"
- "st1h { z12.h }, p3, [x25]\n"
- "st1h { z13.h }, p2, [x25, #1, MUL VL]\n"
- "st1h { z14.h }, p1, [x25, #2, MUL VL]\n"
- "st1h { z15.h }, p0, [x25, #3, MUL VL]\n"
- "st1h { z16.h }, p3, [x24]\n"
- "st1h { z17.h }, p2, [x24, #1, MUL VL]\n"
- "st1h { z18.h }, p1, [x24, #2, MUL VL]\n"
- "st1h { z19.h }, p0, [x24, #3, MUL VL]\n"
+ "st1h { z8.h }, p3, [x12]\n"
+ "st1h { z9.h }, p2, [x12, #1, MUL VL]\n"
+ "st1h { z10.h }, p1, [x12, #2, MUL VL]\n"
+ "st1h { z11.h }, p0, [x12, #3, MUL VL]\n"
+ "addvl x12, x12, #4\n"
+ "st1h { z12.h }, p3, [x24]\n"
+ "st1h { z13.h }, p2, [x24, #1, MUL VL]\n"
+ "st1h { z14.h }, p1, [x24, #2, MUL VL]\n"
+ "st1h { z15.h }, p0, [x24, #3, MUL VL]\n"
+ "st1h { z16.h }, p3, [x23]\n"
+ "st1h { z17.h }, p2, [x23, #1, MUL VL]\n"
+ "st1h { z18.h }, p1, [x23, #2, MUL VL]\n"
+ "st1h { z19.h }, p0, [x23, #3, MUL VL]\n"
"39:" // Height 3: Writeback done
- "dech x14, ALL, MUL #4\n"
- "cmp x14, XZR\n"
+ "dech x13, ALL, MUL #4\n"
+ "cmp x13, XZR\n"
"bgt 28b\n"
"b 80f\n"
"40:" // Height 4
- "ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x15, %x[bias]\n"
- "ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x13, %x[output_ptr]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x14, %x[bias]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x12, %x[output_ptr]\n"
"41:" // Height 4: Column loop
- "ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
- "add x11, x12, x20, LSL #1\n"
- "cnth x21, ALL, MUL #3\n"
- "add x10, x11, x20, LSL #1\n"
- "add x9, x10, x20, LSL #1\n"
- "add x20, x9, x20, LSL #1\n"
- "cmp x14, x21\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #1\n"
+ "cnth x20, ALL, MUL #3\n"
+ "add x9, x10, x19, LSL #1\n"
+ "add x28, x9, x19, LSL #1\n"
+ "add x19, x28, x19, LSL #1\n"
+ "cmp x13, x20\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"bgt 42f\n"
- "dech x21\n"
- "cmp x14, x21\n"
- "mov x9, x12\n"
+ "dech x20\n"
+ "cmp x13, x20\n"
+ "mov x28, x11\n"
"bgt 42f\n"
- "dech x21\n"
- "cmp x14, x21\n"
- "mov x10, x12\n"
+ "dech x20\n"
+ "cmp x13, x20\n"
+ "mov x9, x11\n"
"bgt 42f\n"
- "mov x11, x12\n"
+ "mov x10, x11\n"
"42:" // Height 4: B setup done
- "mov x20, #0x0\n"
- "whilelt p3.h, x20, x14\n"
- "inch x20\n"
- "whilelt p2.h, x20, x14\n"
- "inch x20\n"
- "whilelt p1.h, x20, x14\n"
- "inch x20\n"
- "whilelt p0.h, x20, x14\n"
- "cbz x15, 43f\n"
- "ld1h { z8.h }, p4/Z, [x15]\n"
- "ld1h { z9.h }, p4/Z, [x15, #1, MUL VL]\n"
+ "mov x19, #0x0\n"
+ "whilelt p3.h, x19, x13\n"
+ "inch x19\n"
+ "whilelt p2.h, x19, x13\n"
+ "inch x19\n"
+ "whilelt p1.h, x19, x13\n"
+ "inch x19\n"
+ "whilelt p0.h, x19, x13\n"
+ "cbz x14, 43f\n"
+ "ld1h { z8.h }, p4/Z, [x14]\n"
+ "ld1h { z9.h }, p4/Z, [x14, #1, MUL VL]\n"
"mov z12.d, z8.d\n"
"mov z13.d, z9.d\n"
- "ld1h { z10.h }, p4/Z, [x15, #2, MUL VL]\n"
- "ld1h { z11.h }, p4/Z, [x15, #3, MUL VL]\n"
+ "ld1h { z10.h }, p4/Z, [x14, #2, MUL VL]\n"
+ "ld1h { z11.h }, p4/Z, [x14, #3, MUL VL]\n"
"mov z14.d, z10.d\n"
"mov z15.d, z11.d\n"
"mov z16.d, z8.d\n"
"mov z17.d, z9.d\n"
- "addvl x15, x15, #4\n"
+ "addvl x14, x14, #4\n"
"mov z18.d, z10.d\n"
"mov z19.d, z11.d\n"
"mov z20.d, z8.d\n"
@@ -680,26 +680,26 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"b 45f\n"
"43:" // Height 4: no bias
"tbz %x[flags], #0, 44f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
- "ld1h { z8.h }, p3/Z, [x13]\n"
- "add x23, x24, x20, LSL #1\n"
- "ld1h { z9.h }, p2/Z, [x13, #1, MUL VL]\n"
- "ld1h { z10.h }, p1/Z, [x13, #2, MUL VL]\n"
- "ld1h { z11.h }, p0/Z, [x13, #3, MUL VL]\n"
- "ld1h { z12.h }, p3/Z, [x25]\n"
- "ld1h { z13.h }, p2/Z, [x25, #1, MUL VL]\n"
- "ld1h { z14.h }, p1/Z, [x25, #2, MUL VL]\n"
- "ld1h { z15.h }, p0/Z, [x25, #3, MUL VL]\n"
- "ld1h { z16.h }, p3/Z, [x24]\n"
- "ld1h { z17.h }, p2/Z, [x24, #1, MUL VL]\n"
- "ld1h { z18.h }, p1/Z, [x24, #2, MUL VL]\n"
- "ld1h { z19.h }, p0/Z, [x24, #3, MUL VL]\n"
- "ld1h { z20.h }, p3/Z, [x23]\n"
- "ld1h { z21.h }, p2/Z, [x23, #1, MUL VL]\n"
- "ld1h { z22.h }, p1/Z, [x23, #2, MUL VL]\n"
- "ld1h { z23.h }, p0/Z, [x23, #3, MUL VL]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "ld1h { z8.h }, p3/Z, [x12]\n"
+ "add x22, x23, x19, LSL #1\n"
+ "ld1h { z9.h }, p2/Z, [x12, #1, MUL VL]\n"
+ "ld1h { z10.h }, p1/Z, [x12, #2, MUL VL]\n"
+ "ld1h { z11.h }, p0/Z, [x12, #3, MUL VL]\n"
+ "ld1h { z12.h }, p3/Z, [x24]\n"
+ "ld1h { z13.h }, p2/Z, [x24, #1, MUL VL]\n"
+ "ld1h { z14.h }, p1/Z, [x24, #2, MUL VL]\n"
+ "ld1h { z15.h }, p0/Z, [x24, #3, MUL VL]\n"
+ "ld1h { z16.h }, p3/Z, [x23]\n"
+ "ld1h { z17.h }, p2/Z, [x23, #1, MUL VL]\n"
+ "ld1h { z18.h }, p1/Z, [x23, #2, MUL VL]\n"
+ "ld1h { z19.h }, p0/Z, [x23, #3, MUL VL]\n"
+ "ld1h { z20.h }, p3/Z, [x22]\n"
+ "ld1h { z21.h }, p2/Z, [x22, #1, MUL VL]\n"
+ "ld1h { z22.h }, p1/Z, [x22, #2, MUL VL]\n"
+ "ld1h { z23.h }, p0/Z, [x22, #3, MUL VL]\n"
"b 45f\n"
"44:" // Height 4: no accumulate
"mov z8.b, #0x0\n"
@@ -719,94 +719,94 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"mov z22.b, #0x0\n"
"mov z23.b, #0x0\n"
"45:" // Height 4: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"46:" // Height 4: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 47f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "cbnz x28, 48f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #1\n"
- "add x25, x25, x20, LSL #1\n"
- "add x24, x24, x20, LSL #1\n"
- "add x23, x23, x20, LSL #1\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "cbnz x27, 48f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
+ "add x24, x24, x19, LSL #1\n"
+ "add x23, x23, x19, LSL #1\n"
+ "add x22, x22, x19, LSL #1\n"
"b 48f\n"
"47:" // Height 4: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
- "add x23, x24, x20, LSL #1\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "add x22, x23, x19, LSL #1\n"
"48:" // Height 4: input setup done
- "subs x27, x27, #0x1\n"
- "ld1rh { z0.h }, p4/Z, [x26]\n"
- "ld1rh { z1.h }, p4/Z, [x25]\n"
- "ld1rh { z2.h }, p4/Z, [x24]\n"
- "ld1rh { z3.h }, p4/Z, [x23]\n"
- "ld1h { z6.h }, p4/Z, [x12]\n"
- "ld1h { z7.h }, p4/Z, [x11]\n"
+ "subs x26, x26, #0x1\n"
+ "ld1rh { z0.h }, p4/Z, [x25]\n"
+ "ld1rh { z1.h }, p4/Z, [x24]\n"
+ "ld1rh { z2.h }, p4/Z, [x23]\n"
+ "ld1rh { z3.h }, p4/Z, [x22]\n"
+ "ld1h { z6.h }, p4/Z, [x11]\n"
+ "ld1h { z7.h }, p4/Z, [x10]\n"
"ble 50f\n"
"49:" // Height 4: Multiply loop: Main loop
"fmla z8.h, p4/M, z6.h, z0.h\n"
"fmla z12.h, p4/M, z6.h, z1.h\n"
- "addvl x12, x12, #1\n"
"addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
"fmla z16.h, p4/M, z6.h, z2.h\n"
"fmla z20.h, p4/M, z6.h, z3.h\n"
- "ld1h { z6.h }, p4/Z, [x10]\n"
- "add x26, x26, #0x2\n"
+ "ld1h { z6.h }, p4/Z, [x9]\n"
+ "add x25, x25, #0x2\n"
"fmla z9.h, p4/M, z7.h, z0.h\n"
"fmla z13.h, p4/M, z7.h, z1.h\n"
- "subs x27, x27, #0x1\n"
- "add x25, x25, #0x2\n"
+ "subs x26, x26, #0x1\n"
+ "add x24, x24, #0x2\n"
"fmla z17.h, p4/M, z7.h, z2.h\n"
"fmla z21.h, p4/M, z7.h, z3.h\n"
- "ld1h { z7.h }, p4/Z, [x9]\n"
- "add x24, x24, #0x2\n"
+ "ld1h { z7.h }, p4/Z, [x28]\n"
"add x23, x23, #0x2\n"
+ "add x22, x22, #0x2\n"
"fmla z10.h, p4/M, z6.h, z0.h\n"
"fmla z14.h, p4/M, z6.h, z1.h\n"
- "addvl x10, x10, #1\n"
+ "addvl x9, x9, #1\n"
"fmla z18.h, p4/M, z6.h, z2.h\n"
"fmla z22.h, p4/M, z6.h, z3.h\n"
- "addvl x9, x9, #1\n"
- "ld1h { z6.h }, p4/Z, [x12]\n"
+ "addvl x28, x28, #1\n"
+ "ld1h { z6.h }, p4/Z, [x11]\n"
"fmla z11.h, p4/M, z7.h, z0.h\n"
"fmla z15.h, p4/M, z7.h, z1.h\n"
- "ld1rh { z0.h }, p4/Z, [x26]\n"
- "ld1rh { z1.h }, p4/Z, [x25]\n"
+ "ld1rh { z0.h }, p4/Z, [x25]\n"
+ "ld1rh { z1.h }, p4/Z, [x24]\n"
"fmla z19.h, p4/M, z7.h, z2.h\n"
"fmla z23.h, p4/M, z7.h, z3.h\n"
- "ld1rh { z2.h }, p4/Z, [x24]\n"
- "ld1rh { z3.h }, p4/Z, [x23]\n"
- "ld1h { z7.h }, p4/Z, [x11]\n"
+ "ld1rh { z2.h }, p4/Z, [x23]\n"
+ "ld1rh { z3.h }, p4/Z, [x22]\n"
+ "ld1h { z7.h }, p4/Z, [x10]\n"
"bgt 49b\n"
"50:" // Height 4: Multiply loop: Main loop skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
"fmla z8.h, p4/M, z6.h, z0.h\n"
"fmla z12.h, p4/M, z6.h, z1.h\n"
- "add x28, x28, #0x1\n"
+ "add x27, x27, #0x1\n"
"fmla z16.h, p4/M, z6.h, z2.h\n"
"fmla z20.h, p4/M, z6.h, z3.h\n"
- "ld1h { z6.h }, p4/Z, [x10]\n"
- "cmp x28, x20\n"
+ "ld1h { z6.h }, p4/Z, [x9]\n"
+ "cmp x27, x19\n"
"fmla z9.h, p4/M, z7.h, z0.h\n"
"fmla z13.h, p4/M, z7.h, z1.h\n"
- "addvl x12, x12, #1\n"
"addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
"fmla z17.h, p4/M, z7.h, z2.h\n"
"fmla z21.h, p4/M, z7.h, z3.h\n"
- "ld1h { z7.h }, p4/Z, [x9]\n"
- "addvl x10, x10, #1\n"
+ "ld1h { z7.h }, p4/Z, [x28]\n"
+ "addvl x9, x9, #1\n"
"fmla z10.h, p4/M, z6.h, z0.h\n"
"fmla z14.h, p4/M, z6.h, z1.h\n"
- "addvl x9, x9, #1\n"
+ "addvl x28, x28, #1\n"
"fmla z18.h, p4/M, z6.h, z2.h\n"
"fmla z22.h, p4/M, z6.h, z3.h\n"
"fmla z11.h, p4/M, z7.h, z0.h\n"
@@ -814,15 +814,15 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"fmla z19.h, p4/M, z7.h, z2.h\n"
"fmla z23.h, p4/M, z7.h, z3.h\n"
"bne 46b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
- "add x23, x24, x20, LSL #1\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "add x22, x23, x19, LSL #1\n"
"tbz %x[flags], #1, 51f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rh { z1.h }, p4/Z, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rh { z0.h }, p4/Z, [x20]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rh { z1.h }, p4/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rh { z0.h }, p4/Z, [x19]\n"
"fmin z8.h, p4/M, z8.h, z1.h\n"
"fmin z9.h, p4/M, z9.h, z1.h\n"
"fmin z10.h, p4/M, z10.h, z1.h\n"
@@ -856,75 +856,75 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"fmax z22.h, p4/M, z22.h, z0.h\n"
"fmax z23.h, p4/M, z23.h, z0.h\n"
"51:" // Height 4: No activation
- "st1h { z8.h }, p3, [x13]\n"
- "st1h { z9.h }, p2, [x13, #1, MUL VL]\n"
- "st1h { z10.h }, p1, [x13, #2, MUL VL]\n"
- "st1h { z11.h }, p0, [x13, #3, MUL VL]\n"
- "addvl x13, x13, #4\n"
- "st1h { z12.h }, p3, [x25]\n"
- "st1h { z13.h }, p2, [x25, #1, MUL VL]\n"
- "st1h { z14.h }, p1, [x25, #2, MUL VL]\n"
- "st1h { z15.h }, p0, [x25, #3, MUL VL]\n"
- "st1h { z16.h }, p3, [x24]\n"
- "st1h { z17.h }, p2, [x24, #1, MUL VL]\n"
- "st1h { z18.h }, p1, [x24, #2, MUL VL]\n"
- "st1h { z19.h }, p0, [x24, #3, MUL VL]\n"
- "st1h { z20.h }, p3, [x23]\n"
- "st1h { z21.h }, p2, [x23, #1, MUL VL]\n"
- "st1h { z22.h }, p1, [x23, #2, MUL VL]\n"
- "st1h { z23.h }, p0, [x23, #3, MUL VL]\n"
+ "st1h { z8.h }, p3, [x12]\n"
+ "st1h { z9.h }, p2, [x12, #1, MUL VL]\n"
+ "st1h { z10.h }, p1, [x12, #2, MUL VL]\n"
+ "st1h { z11.h }, p0, [x12, #3, MUL VL]\n"
+ "addvl x12, x12, #4\n"
+ "st1h { z12.h }, p3, [x24]\n"
+ "st1h { z13.h }, p2, [x24, #1, MUL VL]\n"
+ "st1h { z14.h }, p1, [x24, #2, MUL VL]\n"
+ "st1h { z15.h }, p0, [x24, #3, MUL VL]\n"
+ "st1h { z16.h }, p3, [x23]\n"
+ "st1h { z17.h }, p2, [x23, #1, MUL VL]\n"
+ "st1h { z18.h }, p1, [x23, #2, MUL VL]\n"
+ "st1h { z19.h }, p0, [x23, #3, MUL VL]\n"
+ "st1h { z20.h }, p3, [x22]\n"
+ "st1h { z21.h }, p2, [x22, #1, MUL VL]\n"
+ "st1h { z22.h }, p1, [x22, #2, MUL VL]\n"
+ "st1h { z23.h }, p0, [x22, #3, MUL VL]\n"
"52:" // Height 4: Writeback done
- "dech x14, ALL, MUL #4\n"
- "cmp x14, XZR\n"
+ "dech x13, ALL, MUL #4\n"
+ "cmp x13, XZR\n"
"bgt 41b\n"
"b 80f\n"
"53:" // Height 5
- "ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x15, %x[bias]\n"
- "ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x13, %x[output_ptr]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x14, %x[bias]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x12, %x[output_ptr]\n"
"54:" // Height 5: Column loop
- "ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
- "add x11, x12, x20, LSL #1\n"
- "cnth x21, ALL, MUL #3\n"
- "add x10, x11, x20, LSL #1\n"
- "add x9, x10, x20, LSL #1\n"
- "add x20, x9, x20, LSL #1\n"
- "cmp x14, x21\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #1\n"
+ "cnth x20, ALL, MUL #3\n"
+ "add x9, x10, x19, LSL #1\n"
+ "add x28, x9, x19, LSL #1\n"
+ "add x19, x28, x19, LSL #1\n"
+ "cmp x13, x20\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"bgt 55f\n"
- "dech x21\n"
- "cmp x14, x21\n"
- "mov x9, x12\n"
+ "dech x20\n"
+ "cmp x13, x20\n"
+ "mov x28, x11\n"
"bgt 55f\n"
- "dech x21\n"
- "cmp x14, x21\n"
- "mov x10, x12\n"
+ "dech x20\n"
+ "cmp x13, x20\n"
+ "mov x9, x11\n"
"bgt 55f\n"
- "mov x11, x12\n"
+ "mov x10, x11\n"
"55:" // Height 5: B setup done
- "mov x20, #0x0\n"
- "whilelt p3.h, x20, x14\n"
- "inch x20\n"
- "whilelt p2.h, x20, x14\n"
- "inch x20\n"
- "whilelt p1.h, x20, x14\n"
- "inch x20\n"
- "whilelt p0.h, x20, x14\n"
- "cbz x15, 56f\n"
- "ld1h { z8.h }, p4/Z, [x15]\n"
- "ld1h { z9.h }, p4/Z, [x15, #1, MUL VL]\n"
+ "mov x19, #0x0\n"
+ "whilelt p3.h, x19, x13\n"
+ "inch x19\n"
+ "whilelt p2.h, x19, x13\n"
+ "inch x19\n"
+ "whilelt p1.h, x19, x13\n"
+ "inch x19\n"
+ "whilelt p0.h, x19, x13\n"
+ "cbz x14, 56f\n"
+ "ld1h { z8.h }, p4/Z, [x14]\n"
+ "ld1h { z9.h }, p4/Z, [x14, #1, MUL VL]\n"
"mov z12.d, z8.d\n"
"mov z13.d, z9.d\n"
- "ld1h { z10.h }, p4/Z, [x15, #2, MUL VL]\n"
- "ld1h { z11.h }, p4/Z, [x15, #3, MUL VL]\n"
+ "ld1h { z10.h }, p4/Z, [x14, #2, MUL VL]\n"
+ "ld1h { z11.h }, p4/Z, [x14, #3, MUL VL]\n"
"mov z14.d, z10.d\n"
"mov z15.d, z11.d\n"
"mov z16.d, z8.d\n"
"mov z17.d, z9.d\n"
- "addvl x15, x15, #4\n"
+ "addvl x14, x14, #4\n"
"mov z18.d, z10.d\n"
"mov z19.d, z11.d\n"
"mov z20.d, z8.d\n"
@@ -938,31 +938,31 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"b 58f\n"
"56:" // Height 5: no bias
"tbz %x[flags], #0, 57f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
- "ld1h { z8.h }, p3/Z, [x13]\n"
- "add x23, x24, x20, LSL #1\n"
- "add x22, x23, x20, LSL #1\n"
- "ld1h { z9.h }, p2/Z, [x13, #1, MUL VL]\n"
- "ld1h { z10.h }, p1/Z, [x13, #2, MUL VL]\n"
- "ld1h { z11.h }, p0/Z, [x13, #3, MUL VL]\n"
- "ld1h { z12.h }, p3/Z, [x25]\n"
- "ld1h { z13.h }, p2/Z, [x25, #1, MUL VL]\n"
- "ld1h { z14.h }, p1/Z, [x25, #2, MUL VL]\n"
- "ld1h { z15.h }, p0/Z, [x25, #3, MUL VL]\n"
- "ld1h { z16.h }, p3/Z, [x24]\n"
- "ld1h { z17.h }, p2/Z, [x24, #1, MUL VL]\n"
- "ld1h { z18.h }, p1/Z, [x24, #2, MUL VL]\n"
- "ld1h { z19.h }, p0/Z, [x24, #3, MUL VL]\n"
- "ld1h { z20.h }, p3/Z, [x23]\n"
- "ld1h { z21.h }, p2/Z, [x23, #1, MUL VL]\n"
- "ld1h { z22.h }, p1/Z, [x23, #2, MUL VL]\n"
- "ld1h { z23.h }, p0/Z, [x23, #3, MUL VL]\n"
- "ld1h { z24.h }, p3/Z, [x22]\n"
- "ld1h { z25.h }, p2/Z, [x22, #1, MUL VL]\n"
- "ld1h { z26.h }, p1/Z, [x22, #2, MUL VL]\n"
- "ld1h { z27.h }, p0/Z, [x22, #3, MUL VL]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "ld1h { z8.h }, p3/Z, [x12]\n"
+ "add x22, x23, x19, LSL #1\n"
+ "add x21, x22, x19, LSL #1\n"
+ "ld1h { z9.h }, p2/Z, [x12, #1, MUL VL]\n"
+ "ld1h { z10.h }, p1/Z, [x12, #2, MUL VL]\n"
+ "ld1h { z11.h }, p0/Z, [x12, #3, MUL VL]\n"
+ "ld1h { z12.h }, p3/Z, [x24]\n"
+ "ld1h { z13.h }, p2/Z, [x24, #1, MUL VL]\n"
+ "ld1h { z14.h }, p1/Z, [x24, #2, MUL VL]\n"
+ "ld1h { z15.h }, p0/Z, [x24, #3, MUL VL]\n"
+ "ld1h { z16.h }, p3/Z, [x23]\n"
+ "ld1h { z17.h }, p2/Z, [x23, #1, MUL VL]\n"
+ "ld1h { z18.h }, p1/Z, [x23, #2, MUL VL]\n"
+ "ld1h { z19.h }, p0/Z, [x23, #3, MUL VL]\n"
+ "ld1h { z20.h }, p3/Z, [x22]\n"
+ "ld1h { z21.h }, p2/Z, [x22, #1, MUL VL]\n"
+ "ld1h { z22.h }, p1/Z, [x22, #2, MUL VL]\n"
+ "ld1h { z23.h }, p0/Z, [x22, #3, MUL VL]\n"
+ "ld1h { z24.h }, p3/Z, [x21]\n"
+ "ld1h { z25.h }, p2/Z, [x21, #1, MUL VL]\n"
+ "ld1h { z26.h }, p1/Z, [x21, #2, MUL VL]\n"
+ "ld1h { z27.h }, p0/Z, [x21, #3, MUL VL]\n"
"b 58f\n"
"57:" // Height 5: no accumulate
"mov z8.b, #0x0\n"
@@ -986,104 +986,104 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"mov z26.b, #0x0\n"
"mov z27.b, #0x0\n"
"58:" // Height 5: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"59:" // Height 5: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 60f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "ldr x22, [x21, #0x20]\n"
- "cbnz x28, 61f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #1\n"
- "add x25, x25, x20, LSL #1\n"
- "add x24, x24, x20, LSL #1\n"
- "add x23, x23, x20, LSL #1\n"
- "add x22, x22, x20, LSL #1\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "ldr x21, [x20, #0x20]\n"
+ "cbnz x27, 61f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
+ "add x24, x24, x19, LSL #1\n"
+ "add x23, x23, x19, LSL #1\n"
+ "add x22, x22, x19, LSL #1\n"
+ "add x21, x21, x19, LSL #1\n"
"b 61f\n"
"60:" // Height 5: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
- "add x23, x24, x20, LSL #1\n"
- "add x22, x23, x20, LSL #1\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "add x22, x23, x19, LSL #1\n"
+ "add x21, x22, x19, LSL #1\n"
"61:" // Height 5: input setup done
- "subs x27, x27, #0x1\n"
- "ld1rh { z0.h }, p4/Z, [x26]\n"
- "ld1rh { z1.h }, p4/Z, [x25]\n"
- "ld1rh { z2.h }, p4/Z, [x24]\n"
- "ld1rh { z3.h }, p4/Z, [x23]\n"
- "ld1rh { z4.h }, p4/Z, [x22]\n"
- "ld1h { z6.h }, p4/Z, [x12]\n"
- "ld1h { z7.h }, p4/Z, [x11]\n"
+ "subs x26, x26, #0x1\n"
+ "ld1rh { z0.h }, p4/Z, [x25]\n"
+ "ld1rh { z1.h }, p4/Z, [x24]\n"
+ "ld1rh { z2.h }, p4/Z, [x23]\n"
+ "ld1rh { z3.h }, p4/Z, [x22]\n"
+ "ld1rh { z4.h }, p4/Z, [x21]\n"
+ "ld1h { z6.h }, p4/Z, [x11]\n"
+ "ld1h { z7.h }, p4/Z, [x10]\n"
"ble 63f\n"
"62:" // Height 5: Multiply loop: Main loop
"fmla z8.h, p4/M, z6.h, z0.h\n"
"fmla z12.h, p4/M, z6.h, z1.h\n"
- "addvl x12, x12, #1\n"
"addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
"fmla z16.h, p4/M, z6.h, z2.h\n"
"fmla z20.h, p4/M, z6.h, z3.h\n"
- "add x26, x26, #0x2\n"
- "subs x27, x27, #0x1\n"
+ "add x25, x25, #0x2\n"
+ "subs x26, x26, #0x1\n"
"fmla z24.h, p4/M, z6.h, z4.h\n"
"fmla z9.h, p4/M, z7.h, z0.h\n"
- "ld1h { z6.h }, p4/Z, [x10]\n"
- "add x25, x25, #0x2\n"
+ "ld1h { z6.h }, p4/Z, [x9]\n"
+ "add x24, x24, #0x2\n"
"fmla z13.h, p4/M, z7.h, z1.h\n"
"fmla z17.h, p4/M, z7.h, z2.h\n"
- "add x24, x24, #0x2\n"
"add x23, x23, #0x2\n"
+ "add x22, x22, #0x2\n"
"fmla z21.h, p4/M, z7.h, z3.h\n"
"fmla z25.h, p4/M, z7.h, z4.h\n"
- "ld1h { z7.h }, p4/Z, [x9]\n"
- "add x22, x22, #0x2\n"
+ "ld1h { z7.h }, p4/Z, [x28]\n"
+ "add x21, x21, #0x2\n"
"fmla z10.h, p4/M, z6.h, z0.h\n"
"fmla z14.h, p4/M, z6.h, z1.h\n"
- "addvl x10, x10, #1\n"
"addvl x9, x9, #1\n"
+ "addvl x28, x28, #1\n"
"fmla z18.h, p4/M, z6.h, z2.h\n"
"fmla z22.h, p4/M, z6.h, z3.h\n"
"fmla z26.h, p4/M, z6.h, z4.h\n"
"fmla z11.h, p4/M, z7.h, z0.h\n"
- "ld1rh { z0.h }, p4/Z, [x26]\n"
- "ld1h { z6.h }, p4/Z, [x12]\n"
+ "ld1rh { z0.h }, p4/Z, [x25]\n"
+ "ld1h { z6.h }, p4/Z, [x11]\n"
"fmla z15.h, p4/M, z7.h, z1.h\n"
"fmla z19.h, p4/M, z7.h, z2.h\n"
- "ld1rh { z1.h }, p4/Z, [x25]\n"
- "ld1rh { z2.h }, p4/Z, [x24]\n"
+ "ld1rh { z1.h }, p4/Z, [x24]\n"
+ "ld1rh { z2.h }, p4/Z, [x23]\n"
"fmla z23.h, p4/M, z7.h, z3.h\n"
"fmla z27.h, p4/M, z7.h, z4.h\n"
- "ld1rh { z3.h }, p4/Z, [x23]\n"
- "ld1rh { z4.h }, p4/Z, [x22]\n"
- "ld1h { z7.h }, p4/Z, [x11]\n"
+ "ld1rh { z3.h }, p4/Z, [x22]\n"
+ "ld1rh { z4.h }, p4/Z, [x21]\n"
+ "ld1h { z7.h }, p4/Z, [x10]\n"
"bgt 62b\n"
"63:" // Height 5: Multiply loop: Main loop skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
"fmla z8.h, p4/M, z6.h, z0.h\n"
"fmla z12.h, p4/M, z6.h, z1.h\n"
- "add x28, x28, #0x1\n"
+ "add x27, x27, #0x1\n"
"fmla z16.h, p4/M, z6.h, z2.h\n"
"fmla z20.h, p4/M, z6.h, z3.h\n"
- "cmp x28, x20\n"
- "addvl x12, x12, #1\n"
+ "cmp x27, x19\n"
+ "addvl x11, x11, #1\n"
"fmla z24.h, p4/M, z6.h, z4.h\n"
"fmla z9.h, p4/M, z7.h, z0.h\n"
- "ld1h { z6.h }, p4/Z, [x10]\n"
- "addvl x11, x11, #1\n"
+ "ld1h { z6.h }, p4/Z, [x9]\n"
+ "addvl x10, x10, #1\n"
"fmla z13.h, p4/M, z7.h, z1.h\n"
"fmla z17.h, p4/M, z7.h, z2.h\n"
- "addvl x10, x10, #1\n"
+ "addvl x9, x9, #1\n"
"fmla z21.h, p4/M, z7.h, z3.h\n"
"fmla z25.h, p4/M, z7.h, z4.h\n"
- "ld1h { z7.h }, p4/Z, [x9]\n"
- "addvl x9, x9, #1\n"
+ "ld1h { z7.h }, p4/Z, [x28]\n"
+ "addvl x28, x28, #1\n"
"fmla z10.h, p4/M, z6.h, z0.h\n"
"fmla z14.h, p4/M, z6.h, z1.h\n"
"fmla z18.h, p4/M, z6.h, z2.h\n"
@@ -1095,16 +1095,16 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"fmla z23.h, p4/M, z7.h, z3.h\n"
"fmla z27.h, p4/M, z7.h, z4.h\n"
"bne 59b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
- "add x23, x24, x20, LSL #1\n"
- "add x22, x23, x20, LSL #1\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "add x22, x23, x19, LSL #1\n"
+ "add x21, x22, x19, LSL #1\n"
"tbz %x[flags], #1, 64f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rh { z1.h }, p4/Z, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rh { z0.h }, p4/Z, [x20]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rh { z1.h }, p4/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rh { z0.h }, p4/Z, [x19]\n"
"fmin z8.h, p4/M, z8.h, z1.h\n"
"fmin z9.h, p4/M, z9.h, z1.h\n"
"fmin z10.h, p4/M, z10.h, z1.h\n"
@@ -1146,82 +1146,82 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"fmax z26.h, p4/M, z26.h, z0.h\n"
"fmax z27.h, p4/M, z27.h, z0.h\n"
"64:" // Height 5: No activation
- "st1h { z8.h }, p3, [x13]\n"
- "st1h { z9.h }, p2, [x13, #1, MUL VL]\n"
- "st1h { z10.h }, p1, [x13, #2, MUL VL]\n"
- "st1h { z11.h }, p0, [x13, #3, MUL VL]\n"
- "addvl x13, x13, #4\n"
- "st1h { z12.h }, p3, [x25]\n"
- "st1h { z13.h }, p2, [x25, #1, MUL VL]\n"
- "st1h { z14.h }, p1, [x25, #2, MUL VL]\n"
- "st1h { z15.h }, p0, [x25, #3, MUL VL]\n"
- "st1h { z16.h }, p3, [x24]\n"
- "st1h { z17.h }, p2, [x24, #1, MUL VL]\n"
- "st1h { z18.h }, p1, [x24, #2, MUL VL]\n"
- "st1h { z19.h }, p0, [x24, #3, MUL VL]\n"
- "st1h { z20.h }, p3, [x23]\n"
- "st1h { z21.h }, p2, [x23, #1, MUL VL]\n"
- "st1h { z22.h }, p1, [x23, #2, MUL VL]\n"
- "st1h { z23.h }, p0, [x23, #3, MUL VL]\n"
- "st1h { z24.h }, p3, [x22]\n"
- "st1h { z25.h }, p2, [x22, #1, MUL VL]\n"
- "st1h { z26.h }, p1, [x22, #2, MUL VL]\n"
- "st1h { z27.h }, p0, [x22, #3, MUL VL]\n"
+ "st1h { z8.h }, p3, [x12]\n"
+ "st1h { z9.h }, p2, [x12, #1, MUL VL]\n"
+ "st1h { z10.h }, p1, [x12, #2, MUL VL]\n"
+ "st1h { z11.h }, p0, [x12, #3, MUL VL]\n"
+ "addvl x12, x12, #4\n"
+ "st1h { z12.h }, p3, [x24]\n"
+ "st1h { z13.h }, p2, [x24, #1, MUL VL]\n"
+ "st1h { z14.h }, p1, [x24, #2, MUL VL]\n"
+ "st1h { z15.h }, p0, [x24, #3, MUL VL]\n"
+ "st1h { z16.h }, p3, [x23]\n"
+ "st1h { z17.h }, p2, [x23, #1, MUL VL]\n"
+ "st1h { z18.h }, p1, [x23, #2, MUL VL]\n"
+ "st1h { z19.h }, p0, [x23, #3, MUL VL]\n"
+ "st1h { z20.h }, p3, [x22]\n"
+ "st1h { z21.h }, p2, [x22, #1, MUL VL]\n"
+ "st1h { z22.h }, p1, [x22, #2, MUL VL]\n"
+ "st1h { z23.h }, p0, [x22, #3, MUL VL]\n"
+ "st1h { z24.h }, p3, [x21]\n"
+ "st1h { z25.h }, p2, [x21, #1, MUL VL]\n"
+ "st1h { z26.h }, p1, [x21, #2, MUL VL]\n"
+ "st1h { z27.h }, p0, [x21, #3, MUL VL]\n"
"65:" // Height 5: Writeback done
- "dech x14, ALL, MUL #4\n"
- "cmp x14, XZR\n"
+ "dech x13, ALL, MUL #4\n"
+ "cmp x13, XZR\n"
"bgt 54b\n"
"b 80f\n"
"66:" // Height 6
- "ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x21, #0xc\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "mov x15, %x[bias]\n"
- "mov x13, %x[output_ptr]\n"
- "madd %x[output_ptr], x20, x21, %x[output_ptr]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x20, #0xc\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "mov x14, %x[bias]\n"
+ "mov x12, %x[output_ptr]\n"
+ "madd %x[output_ptr], x19, x20, %x[output_ptr]\n"
"67:" // Height 6: Column loop
- "ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
- "add x11, x12, x20, LSL #1\n"
- "cnth x21, ALL, MUL #3\n"
- "add x10, x11, x20, LSL #1\n"
- "add x9, x10, x20, LSL #1\n"
- "add x20, x9, x20, LSL #1\n"
- "cmp x14, x21\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #1\n"
+ "cnth x20, ALL, MUL #3\n"
+ "add x9, x10, x19, LSL #1\n"
+ "add x28, x9, x19, LSL #1\n"
+ "add x19, x28, x19, LSL #1\n"
+ "cmp x13, x20\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"bgt 68f\n"
- "dech x21\n"
- "cmp x14, x21\n"
- "mov x9, x12\n"
+ "dech x20\n"
+ "cmp x13, x20\n"
+ "mov x28, x11\n"
"bgt 68f\n"
- "dech x21\n"
- "cmp x14, x21\n"
- "mov x10, x12\n"
+ "dech x20\n"
+ "cmp x13, x20\n"
+ "mov x9, x11\n"
"bgt 68f\n"
- "mov x11, x12\n"
+ "mov x10, x11\n"
"68:" // Height 6: B setup done
- "mov x20, #0x0\n"
- "whilelt p3.h, x20, x14\n"
- "inch x20\n"
- "whilelt p2.h, x20, x14\n"
- "inch x20\n"
- "whilelt p1.h, x20, x14\n"
- "inch x20\n"
- "whilelt p0.h, x20, x14\n"
- "cbz x15, 69f\n"
- "ld1h { z8.h }, p4/Z, [x15]\n"
- "ld1h { z9.h }, p4/Z, [x15, #1, MUL VL]\n"
+ "mov x19, #0x0\n"
+ "whilelt p3.h, x19, x13\n"
+ "inch x19\n"
+ "whilelt p2.h, x19, x13\n"
+ "inch x19\n"
+ "whilelt p1.h, x19, x13\n"
+ "inch x19\n"
+ "whilelt p0.h, x19, x13\n"
+ "cbz x14, 69f\n"
+ "ld1h { z8.h }, p4/Z, [x14]\n"
+ "ld1h { z9.h }, p4/Z, [x14, #1, MUL VL]\n"
"mov z12.d, z8.d\n"
"mov z13.d, z9.d\n"
- "ld1h { z10.h }, p4/Z, [x15, #2, MUL VL]\n"
- "ld1h { z11.h }, p4/Z, [x15, #3, MUL VL]\n"
+ "ld1h { z10.h }, p4/Z, [x14, #2, MUL VL]\n"
+ "ld1h { z11.h }, p4/Z, [x14, #3, MUL VL]\n"
"mov z14.d, z10.d\n"
"mov z15.d, z11.d\n"
"mov z16.d, z8.d\n"
"mov z17.d, z9.d\n"
- "addvl x15, x15, #4\n"
+ "addvl x14, x14, #4\n"
"mov z18.d, z10.d\n"
"mov z19.d, z11.d\n"
"mov z20.d, z8.d\n"
@@ -1239,36 +1239,36 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"b 71f\n"
"69:" // Height 6: no bias
"tbz %x[flags], #0, 70f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
- "ld1h { z8.h }, p3/Z, [x13]\n"
- "add x23, x24, x20, LSL #1\n"
- "add x22, x23, x20, LSL #1\n"
- "ld1h { z9.h }, p2/Z, [x13, #1, MUL VL]\n"
- "ld1h { z10.h }, p1/Z, [x13, #2, MUL VL]\n"
- "add x21, x22, x20, LSL #1\n"
- "ld1h { z11.h }, p0/Z, [x13, #3, MUL VL]\n"
- "ld1h { z12.h }, p3/Z, [x25]\n"
- "ld1h { z13.h }, p2/Z, [x25, #1, MUL VL]\n"
- "ld1h { z14.h }, p1/Z, [x25, #2, MUL VL]\n"
- "ld1h { z15.h }, p0/Z, [x25, #3, MUL VL]\n"
- "ld1h { z16.h }, p3/Z, [x24]\n"
- "ld1h { z17.h }, p2/Z, [x24, #1, MUL VL]\n"
- "ld1h { z18.h }, p1/Z, [x24, #2, MUL VL]\n"
- "ld1h { z19.h }, p0/Z, [x24, #3, MUL VL]\n"
- "ld1h { z20.h }, p3/Z, [x23]\n"
- "ld1h { z21.h }, p2/Z, [x23, #1, MUL VL]\n"
- "ld1h { z22.h }, p1/Z, [x23, #2, MUL VL]\n"
- "ld1h { z23.h }, p0/Z, [x23, #3, MUL VL]\n"
- "ld1h { z24.h }, p3/Z, [x22]\n"
- "ld1h { z25.h }, p2/Z, [x22, #1, MUL VL]\n"
- "ld1h { z26.h }, p1/Z, [x22, #2, MUL VL]\n"
- "ld1h { z27.h }, p0/Z, [x22, #3, MUL VL]\n"
- "ld1h { z28.h }, p3/Z, [x21]\n"
- "ld1h { z29.h }, p2/Z, [x21, #1, MUL VL]\n"
- "ld1h { z30.h }, p1/Z, [x21, #2, MUL VL]\n"
- "ld1h { z31.h }, p0/Z, [x21, #3, MUL VL]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "ld1h { z8.h }, p3/Z, [x12]\n"
+ "add x22, x23, x19, LSL #1\n"
+ "add x21, x22, x19, LSL #1\n"
+ "ld1h { z9.h }, p2/Z, [x12, #1, MUL VL]\n"
+ "ld1h { z10.h }, p1/Z, [x12, #2, MUL VL]\n"
+ "add x20, x21, x19, LSL #1\n"
+ "ld1h { z11.h }, p0/Z, [x12, #3, MUL VL]\n"
+ "ld1h { z12.h }, p3/Z, [x24]\n"
+ "ld1h { z13.h }, p2/Z, [x24, #1, MUL VL]\n"
+ "ld1h { z14.h }, p1/Z, [x24, #2, MUL VL]\n"
+ "ld1h { z15.h }, p0/Z, [x24, #3, MUL VL]\n"
+ "ld1h { z16.h }, p3/Z, [x23]\n"
+ "ld1h { z17.h }, p2/Z, [x23, #1, MUL VL]\n"
+ "ld1h { z18.h }, p1/Z, [x23, #2, MUL VL]\n"
+ "ld1h { z19.h }, p0/Z, [x23, #3, MUL VL]\n"
+ "ld1h { z20.h }, p3/Z, [x22]\n"
+ "ld1h { z21.h }, p2/Z, [x22, #1, MUL VL]\n"
+ "ld1h { z22.h }, p1/Z, [x22, #2, MUL VL]\n"
+ "ld1h { z23.h }, p0/Z, [x22, #3, MUL VL]\n"
+ "ld1h { z24.h }, p3/Z, [x21]\n"
+ "ld1h { z25.h }, p2/Z, [x21, #1, MUL VL]\n"
+ "ld1h { z26.h }, p1/Z, [x21, #2, MUL VL]\n"
+ "ld1h { z27.h }, p0/Z, [x21, #3, MUL VL]\n"
+ "ld1h { z28.h }, p3/Z, [x20]\n"
+ "ld1h { z29.h }, p2/Z, [x20, #1, MUL VL]\n"
+ "ld1h { z30.h }, p1/Z, [x20, #2, MUL VL]\n"
+ "ld1h { z31.h }, p0/Z, [x20, #3, MUL VL]\n"
"b 71f\n"
"70:" // Height 6: no accumulate
"mov z8.b, #0x0\n"
@@ -1296,116 +1296,116 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"mov z30.b, #0x0\n"
"mov z31.b, #0x0\n"
"71:" // Height 6: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"72:" // Height 6: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 73f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "ldr x22, [x21, #0x20]\n"
- "ldr x21, [x21, #0x28]\n"
- "cbnz x28, 74f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #1\n"
- "add x25, x25, x20, LSL #1\n"
- "add x24, x24, x20, LSL #1\n"
- "add x23, x23, x20, LSL #1\n"
- "add x22, x22, x20, LSL #1\n"
- "add x21, x21, x20, LSL #1\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "ldr x21, [x20, #0x20]\n"
+ "ldr x20, [x20, #0x28]\n"
+ "cbnz x27, 74f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
+ "add x24, x24, x19, LSL #1\n"
+ "add x23, x23, x19, LSL #1\n"
+ "add x22, x22, x19, LSL #1\n"
+ "add x21, x21, x19, LSL #1\n"
+ "add x20, x20, x19, LSL #1\n"
"b 74f\n"
"73:" // Height 6: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
- "add x23, x24, x20, LSL #1\n"
- "add x22, x23, x20, LSL #1\n"
- "add x21, x22, x20, LSL #1\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "add x22, x23, x19, LSL #1\n"
+ "add x21, x22, x19, LSL #1\n"
+ "add x20, x21, x19, LSL #1\n"
"74:" // Height 6: input setup done
- "subs x27, x27, #0x1\n"
- "ld1rh { z0.h }, p4/Z, [x26]\n"
- "ld1rh { z1.h }, p4/Z, [x25]\n"
- "ld1rh { z2.h }, p4/Z, [x24]\n"
- "ld1rh { z3.h }, p4/Z, [x23]\n"
- "ld1rh { z4.h }, p4/Z, [x22]\n"
- "ld1rh { z5.h }, p4/Z, [x21]\n"
- "ld1h { z6.h }, p4/Z, [x12]\n"
- "ld1h { z7.h }, p4/Z, [x11]\n"
+ "subs x26, x26, #0x1\n"
+ "ld1rh { z0.h }, p4/Z, [x25]\n"
+ "ld1rh { z1.h }, p4/Z, [x24]\n"
+ "ld1rh { z2.h }, p4/Z, [x23]\n"
+ "ld1rh { z3.h }, p4/Z, [x22]\n"
+ "ld1rh { z4.h }, p4/Z, [x21]\n"
+ "ld1rh { z5.h }, p4/Z, [x20]\n"
+ "ld1h { z6.h }, p4/Z, [x11]\n"
+ "ld1h { z7.h }, p4/Z, [x10]\n"
"ble 76f\n"
"75:" // Height 6: Multiply loop: Main loop
"fmla z8.h, p4/M, z6.h, z0.h\n"
"fmla z12.h, p4/M, z6.h, z1.h\n"
- "addvl x12, x12, #1\n"
"addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
"fmla z16.h, p4/M, z6.h, z2.h\n"
"fmla z20.h, p4/M, z6.h, z3.h\n"
- "add x26, x26, #0x2\n"
- "subs x27, x27, #0x1\n"
+ "add x25, x25, #0x2\n"
+ "subs x26, x26, #0x1\n"
"fmla z24.h, p4/M, z6.h, z4.h\n"
"fmla z28.h, p4/M, z6.h, z5.h\n"
- "ld1h { z6.h }, p4/Z, [x10]\n"
- "add x25, x25, #0x2\n"
+ "ld1h { z6.h }, p4/Z, [x9]\n"
+ "add x24, x24, #0x2\n"
"fmla z9.h, p4/M, z7.h, z0.h\n"
"fmla z13.h, p4/M, z7.h, z1.h\n"
- "add x24, x24, #0x2\n"
"add x23, x23, #0x2\n"
+ "add x22, x22, #0x2\n"
"fmla z17.h, p4/M, z7.h, z2.h\n"
"fmla z21.h, p4/M, z7.h, z3.h\n"
- "add x22, x22, #0x2\n"
"add x21, x21, #0x2\n"
+ "add x20, x20, #0x2\n"
"fmla z25.h, p4/M, z7.h, z4.h\n"
"fmla z29.h, p4/M, z7.h, z5.h\n"
- "ld1h { z7.h }, p4/Z, [x9]\n"
- "addvl x10, x10, #1\n"
+ "ld1h { z7.h }, p4/Z, [x28]\n"
+ "addvl x9, x9, #1\n"
"fmla z10.h, p4/M, z6.h, z0.h\n"
"fmla z14.h, p4/M, z6.h, z1.h\n"
- "addvl x9, x9, #1\n"
+ "addvl x28, x28, #1\n"
"fmla z18.h, p4/M, z6.h, z2.h\n"
"fmla z22.h, p4/M, z6.h, z3.h\n"
"fmla z26.h, p4/M, z6.h, z4.h\n"
"fmla z30.h, p4/M, z6.h, z5.h\n"
- "ld1h { z6.h }, p4/Z, [x12]\n"
+ "ld1h { z6.h }, p4/Z, [x11]\n"
"fmla z11.h, p4/M, z7.h, z0.h\n"
"fmla z15.h, p4/M, z7.h, z1.h\n"
- "ld1rh { z0.h }, p4/Z, [x26]\n"
- "ld1rh { z1.h }, p4/Z, [x25]\n"
+ "ld1rh { z0.h }, p4/Z, [x25]\n"
+ "ld1rh { z1.h }, p4/Z, [x24]\n"
"fmla z19.h, p4/M, z7.h, z2.h\n"
"fmla z23.h, p4/M, z7.h, z3.h\n"
- "ld1rh { z2.h }, p4/Z, [x24]\n"
- "ld1rh { z3.h }, p4/Z, [x23]\n"
+ "ld1rh { z2.h }, p4/Z, [x23]\n"
+ "ld1rh { z3.h }, p4/Z, [x22]\n"
"fmla z27.h, p4/M, z7.h, z4.h\n"
"fmla z31.h, p4/M, z7.h, z5.h\n"
- "ld1rh { z4.h }, p4/Z, [x22]\n"
- "ld1rh { z5.h }, p4/Z, [x21]\n"
- "ld1h { z7.h }, p4/Z, [x11]\n"
+ "ld1rh { z4.h }, p4/Z, [x21]\n"
+ "ld1rh { z5.h }, p4/Z, [x20]\n"
+ "ld1h { z7.h }, p4/Z, [x10]\n"
"bgt 75b\n"
"76:" // Height 6: Multiply loop: Main loop skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
"fmla z8.h, p4/M, z6.h, z0.h\n"
"fmla z12.h, p4/M, z6.h, z1.h\n"
- "add x28, x28, #0x1\n"
+ "add x27, x27, #0x1\n"
"fmla z16.h, p4/M, z6.h, z2.h\n"
"fmla z20.h, p4/M, z6.h, z3.h\n"
- "cmp x28, x20\n"
- "addvl x12, x12, #1\n"
+ "cmp x27, x19\n"
+ "addvl x11, x11, #1\n"
"fmla z24.h, p4/M, z6.h, z4.h\n"
"fmla z28.h, p4/M, z6.h, z5.h\n"
- "ld1h { z6.h }, p4/Z, [x10]\n"
- "addvl x11, x11, #1\n"
+ "ld1h { z6.h }, p4/Z, [x9]\n"
+ "addvl x10, x10, #1\n"
"fmla z9.h, p4/M, z7.h, z0.h\n"
"fmla z13.h, p4/M, z7.h, z1.h\n"
- "addvl x10, x10, #1\n"
+ "addvl x9, x9, #1\n"
"fmla z17.h, p4/M, z7.h, z2.h\n"
"fmla z21.h, p4/M, z7.h, z3.h\n"
"fmla z25.h, p4/M, z7.h, z4.h\n"
"fmla z29.h, p4/M, z7.h, z5.h\n"
- "ld1h { z7.h }, p4/Z, [x9]\n"
- "addvl x9, x9, #1\n"
+ "ld1h { z7.h }, p4/Z, [x28]\n"
+ "addvl x28, x28, #1\n"
"fmla z10.h, p4/M, z6.h, z0.h\n"
"fmla z14.h, p4/M, z6.h, z1.h\n"
"fmla z18.h, p4/M, z6.h, z2.h\n"
@@ -1419,17 +1419,17 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"fmla z27.h, p4/M, z7.h, z4.h\n"
"fmla z31.h, p4/M, z7.h, z5.h\n"
"bne 72b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
- "add x23, x24, x20, LSL #1\n"
- "add x22, x23, x20, LSL #1\n"
- "add x21, x22, x20, LSL #1\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "add x22, x23, x19, LSL #1\n"
+ "add x21, x22, x19, LSL #1\n"
+ "add x20, x21, x19, LSL #1\n"
"tbz %x[flags], #1, 77f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rh { z1.h }, p4/Z, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rh { z0.h }, p4/Z, [x20]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rh { z1.h }, p4/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rh { z0.h }, p4/Z, [x19]\n"
"fmin z8.h, p4/M, z8.h, z1.h\n"
"fmin z9.h, p4/M, z9.h, z1.h\n"
"fmin z10.h, p4/M, z10.h, z1.h\n"
@@ -1479,50 +1479,50 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"fmax z30.h, p4/M, z30.h, z0.h\n"
"fmax z31.h, p4/M, z31.h, z0.h\n"
"77:" // Height 6: No activation
- "st1h { z8.h }, p3, [x13]\n"
- "st1h { z9.h }, p2, [x13, #1, MUL VL]\n"
- "st1h { z10.h }, p1, [x13, #2, MUL VL]\n"
- "st1h { z11.h }, p0, [x13, #3, MUL VL]\n"
- "addvl x13, x13, #4\n"
- "st1h { z12.h }, p3, [x25]\n"
- "st1h { z13.h }, p2, [x25, #1, MUL VL]\n"
- "st1h { z14.h }, p1, [x25, #2, MUL VL]\n"
- "st1h { z15.h }, p0, [x25, #3, MUL VL]\n"
- "st1h { z16.h }, p3, [x24]\n"
- "st1h { z17.h }, p2, [x24, #1, MUL VL]\n"
- "st1h { z18.h }, p1, [x24, #2, MUL VL]\n"
- "st1h { z19.h }, p0, [x24, #3, MUL VL]\n"
- "st1h { z20.h }, p3, [x23]\n"
- "st1h { z21.h }, p2, [x23, #1, MUL VL]\n"
- "st1h { z22.h }, p1, [x23, #2, MUL VL]\n"
- "st1h { z23.h }, p0, [x23, #3, MUL VL]\n"
- "st1h { z24.h }, p3, [x22]\n"
- "st1h { z25.h }, p2, [x22, #1, MUL VL]\n"
- "st1h { z26.h }, p1, [x22, #2, MUL VL]\n"
- "st1h { z27.h }, p0, [x22, #3, MUL VL]\n"
- "st1h { z28.h }, p3, [x21]\n"
- "st1h { z29.h }, p2, [x21, #1, MUL VL]\n"
- "st1h { z30.h }, p1, [x21, #2, MUL VL]\n"
- "st1h { z31.h }, p0, [x21, #3, MUL VL]\n"
+ "st1h { z8.h }, p3, [x12]\n"
+ "st1h { z9.h }, p2, [x12, #1, MUL VL]\n"
+ "st1h { z10.h }, p1, [x12, #2, MUL VL]\n"
+ "st1h { z11.h }, p0, [x12, #3, MUL VL]\n"
+ "addvl x12, x12, #4\n"
+ "st1h { z12.h }, p3, [x24]\n"
+ "st1h { z13.h }, p2, [x24, #1, MUL VL]\n"
+ "st1h { z14.h }, p1, [x24, #2, MUL VL]\n"
+ "st1h { z15.h }, p0, [x24, #3, MUL VL]\n"
+ "st1h { z16.h }, p3, [x23]\n"
+ "st1h { z17.h }, p2, [x23, #1, MUL VL]\n"
+ "st1h { z18.h }, p1, [x23, #2, MUL VL]\n"
+ "st1h { z19.h }, p0, [x23, #3, MUL VL]\n"
+ "st1h { z20.h }, p3, [x22]\n"
+ "st1h { z21.h }, p2, [x22, #1, MUL VL]\n"
+ "st1h { z22.h }, p1, [x22, #2, MUL VL]\n"
+ "st1h { z23.h }, p0, [x22, #3, MUL VL]\n"
+ "st1h { z24.h }, p3, [x21]\n"
+ "st1h { z25.h }, p2, [x21, #1, MUL VL]\n"
+ "st1h { z26.h }, p1, [x21, #2, MUL VL]\n"
+ "st1h { z27.h }, p0, [x21, #3, MUL VL]\n"
+ "st1h { z28.h }, p3, [x20]\n"
+ "st1h { z29.h }, p2, [x20, #1, MUL VL]\n"
+ "st1h { z30.h }, p1, [x20, #2, MUL VL]\n"
+ "st1h { z31.h }, p0, [x20, #3, MUL VL]\n"
"78:" // Height 6: Writeback done
- "dech x14, ALL, MUL #4\n"
- "cmp x14, XZR\n"
+ "dech x13, ALL, MUL #4\n"
+ "cmp x13, XZR\n"
"bgt 67b\n"
"subs %x[M], %x[M], #0x6\n"
"beq 80f\n"
- "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 79f\n"
- "add x21, x21, #0x6\n"
- "str x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "add x20, x20, #0x6\n"
+ "str x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"b 1b\n"
"79:" // Update direct input
- "mov x20, #0xc\n"
- "madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
+ "mov x19, #0xc\n"
+ "madd %x[input_ptr], x19, x20, %x[input_ptr]\n"
"b 1b\n"
"80:" // Exit
: [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
: [args_ptr] "r" (&ka), [bias] "r" (bias), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_B_stride] "I" (offsetof(KernelArgs, B_stride)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_cur_B_ptr] "I" (offsetof(KernelArgs, cur_B_ptr)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x9", "x10", "x11", "x12", "x13", "x14", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp16_mla_6x4VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp16_mla_6x4VL/generic.cpp
index 0b543b667f..0f995812d8 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp16_mla_6x4VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp16_mla_6x4VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef ARM_COMPUTE_ENABLE_SVE
@@ -105,53 +105,53 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"cmp %x[M], #0x2\n"
"bgt 29f\n"
"beq 15f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x15, %x[bias]\n"
- "ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x13, %x[output_ptr]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x14, %x[bias]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x12, %x[output_ptr]\n"
"2:" // Height 1: Column loop
- "ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
- "add x11, x12, x20, LSL #1\n"
- "cnth x21, ALL, MUL #3\n"
- "add x10, x11, x20, LSL #1\n"
- "add x9, x10, x20, LSL #1\n"
- "add x20, x9, x20, LSL #1\n"
- "cmp x14, x21\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #1\n"
+ "cnth x20, ALL, MUL #3\n"
+ "add x9, x10, x19, LSL #1\n"
+ "add x28, x9, x19, LSL #1\n"
+ "add x19, x28, x19, LSL #1\n"
+ "cmp x13, x20\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"bgt 3f\n"
- "dech x21\n"
- "cmp x14, x21\n"
- "mov x9, x12\n"
+ "dech x20\n"
+ "cmp x13, x20\n"
+ "mov x28, x11\n"
"bgt 3f\n"
- "dech x21\n"
- "cmp x14, x21\n"
- "mov x10, x12\n"
+ "dech x20\n"
+ "cmp x13, x20\n"
+ "mov x9, x11\n"
"bgt 3f\n"
- "mov x11, x12\n"
+ "mov x10, x11\n"
"3:" // Height 1: B setup done
- "mov x20, #0x0\n"
- "whilelt p4.h, x20, x14\n"
- "inch x20\n"
- "whilelt p3.h, x20, x14\n"
- "inch x20\n"
- "whilelt p2.h, x20, x14\n"
- "inch x20\n"
- "whilelt p1.h, x20, x14\n"
- "cbz x15, 4f\n"
- "ld1h { z8.h }, p5/Z, [x15]\n"
- "ld1h { z9.h }, p5/Z, [x15, #1, MUL VL]\n"
- "ld1h { z10.h }, p5/Z, [x15, #2, MUL VL]\n"
- "ld1h { z11.h }, p5/Z, [x15, #3, MUL VL]\n"
- "addvl x15, x15, #4\n"
+ "mov x19, #0x0\n"
+ "whilelt p4.h, x19, x13\n"
+ "inch x19\n"
+ "whilelt p3.h, x19, x13\n"
+ "inch x19\n"
+ "whilelt p2.h, x19, x13\n"
+ "inch x19\n"
+ "whilelt p1.h, x19, x13\n"
+ "cbz x14, 4f\n"
+ "ld1h { z8.h }, p5/Z, [x14]\n"
+ "ld1h { z9.h }, p5/Z, [x14, #1, MUL VL]\n"
+ "ld1h { z10.h }, p5/Z, [x14, #2, MUL VL]\n"
+ "ld1h { z11.h }, p5/Z, [x14, #3, MUL VL]\n"
+ "addvl x14, x14, #4\n"
"b 6f\n"
"4:" // Height 1: no bias
"tbz %x[flags], #0, 5f\n"
- "ld1h { z8.h }, p4/Z, [x13]\n"
- "ld1h { z9.h }, p3/Z, [x13, #1, MUL VL]\n"
- "ld1h { z10.h }, p2/Z, [x13, #2, MUL VL]\n"
- "ld1h { z11.h }, p1/Z, [x13, #3, MUL VL]\n"
+ "ld1h { z8.h }, p4/Z, [x12]\n"
+ "ld1h { z9.h }, p3/Z, [x12, #1, MUL VL]\n"
+ "ld1h { z10.h }, p2/Z, [x12, #2, MUL VL]\n"
+ "ld1h { z11.h }, p1/Z, [x12, #3, MUL VL]\n"
"b 6f\n"
"5:" // Height 1: no accumulate
"mov z8.b, #0x0\n"
@@ -159,222 +159,222 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"mov z10.b, #0x0\n"
"mov z11.b, #0x0\n"
"6:" // Height 1: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"7:" // Height 1: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 8f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "cbnz x28, 9f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #1\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "cbnz x27, 9f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
"b 9f\n"
"8:" // Height 1: setup direct input
- "mov x26, %x[input_ptr]\n"
+ "mov x25, %x[input_ptr]\n"
"9:" // Height 1: input setup done
- "cmp x27, #0x8\n"
+ "cmp x26, #0x8\n"
"ble 11f\n"
"10:" // Height 1: Multiply loop: Main loop head
- "whilelt p0.h, XZR, x27\n"
- "ld1rqh { z0.h }, p0/Z, [x26]\n"
- "ld1h { z6.h }, p5/Z, [x12]\n"
+ "whilelt p0.h, XZR, x26\n"
+ "ld1rqh { z0.h }, p0/Z, [x25]\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
"fmla z8.h, z6.h, z0.h[0]\n"
- "ld1h { z7.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
"fmla z9.h, z7.h, z0.h[0]\n"
- "ld1h { z6.h }, p5/Z, [x10]\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
"fmla z10.h, z6.h, z0.h[0]\n"
- "ld1h { z7.h }, p5/Z, [x9]\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
"fmla z11.h, z7.h, z0.h[0]\n"
- "ld1h { z6.h }, p5/Z, [x12, #1, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #1, MUL VL]\n"
"fmla z8.h, z6.h, z0.h[1]\n"
- "ld1h { z7.h }, p5/Z, [x11, #1, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
"fmla z9.h, z7.h, z0.h[1]\n"
- "ld1h { z6.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #1, MUL VL]\n"
"fmla z10.h, z6.h, z0.h[1]\n"
- "ld1h { z7.h }, p5/Z, [x9, #1, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x28, #1, MUL VL]\n"
"fmla z11.h, z7.h, z0.h[1]\n"
- "ld1h { z6.h }, p5/Z, [x12, #2, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #2, MUL VL]\n"
"fmla z8.h, z6.h, z0.h[2]\n"
- "ld1h { z7.h }, p5/Z, [x11, #2, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #2, MUL VL]\n"
"fmla z9.h, z7.h, z0.h[2]\n"
- "ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #2, MUL VL]\n"
"fmla z10.h, z6.h, z0.h[2]\n"
- "ld1h { z7.h }, p5/Z, [x9, #2, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x28, #2, MUL VL]\n"
"fmla z11.h, z7.h, z0.h[2]\n"
- "ld1h { z6.h }, p5/Z, [x12, #3, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #3, MUL VL]\n"
"fmla z8.h, z6.h, z0.h[3]\n"
- "ld1h { z7.h }, p5/Z, [x11, #3, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #3, MUL VL]\n"
"fmla z9.h, z7.h, z0.h[3]\n"
- "ld1h { z6.h }, p5/Z, [x10, #3, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #3, MUL VL]\n"
"fmla z10.h, z6.h, z0.h[3]\n"
- "ld1h { z7.h }, p5/Z, [x9, #3, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x28, #3, MUL VL]\n"
"fmla z11.h, z7.h, z0.h[3]\n"
- "ld1h { z6.h }, p5/Z, [x12, #4, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #4, MUL VL]\n"
"fmla z8.h, z6.h, z0.h[4]\n"
- "ld1h { z7.h }, p5/Z, [x11, #4, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #4, MUL VL]\n"
"fmla z9.h, z7.h, z0.h[4]\n"
- "ld1h { z6.h }, p5/Z, [x10, #4, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #4, MUL VL]\n"
"fmla z10.h, z6.h, z0.h[4]\n"
- "ld1h { z7.h }, p5/Z, [x9, #4, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x28, #4, MUL VL]\n"
"fmla z11.h, z7.h, z0.h[4]\n"
- "ld1h { z6.h }, p5/Z, [x12, #5, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #5, MUL VL]\n"
"fmla z8.h, z6.h, z0.h[5]\n"
- "ld1h { z7.h }, p5/Z, [x11, #5, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #5, MUL VL]\n"
"fmla z9.h, z7.h, z0.h[5]\n"
- "ld1h { z6.h }, p5/Z, [x10, #5, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #5, MUL VL]\n"
"fmla z10.h, z6.h, z0.h[5]\n"
- "ld1h { z7.h }, p5/Z, [x9, #5, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x28, #5, MUL VL]\n"
"fmla z11.h, z7.h, z0.h[5]\n"
- "ld1h { z6.h }, p5/Z, [x12, #6, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #6, MUL VL]\n"
"fmla z8.h, z6.h, z0.h[6]\n"
- "ld1h { z7.h }, p5/Z, [x11, #6, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #6, MUL VL]\n"
"fmla z9.h, z7.h, z0.h[6]\n"
- "ld1h { z6.h }, p5/Z, [x10, #6, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #6, MUL VL]\n"
"fmla z10.h, z6.h, z0.h[6]\n"
- "ld1h { z7.h }, p5/Z, [x9, #6, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x28, #6, MUL VL]\n"
"fmla z11.h, z7.h, z0.h[6]\n"
- "ld1h { z6.h }, p5/Z, [x12, #7, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #7, MUL VL]\n"
"fmla z8.h, z6.h, z0.h[7]\n"
- "ld1h { z7.h }, p5/Z, [x11, #7, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #7, MUL VL]\n"
"fmla z9.h, z7.h, z0.h[7]\n"
- "ld1h { z6.h }, p5/Z, [x10, #7, MUL VL]\n"
- "sub x27, x27, #0x8\n"
- "ld1h { z7.h }, p5/Z, [x9, #7, MUL VL]\n"
- "cmp x27, #0x8\n"
+ "ld1h { z6.h }, p5/Z, [x9, #7, MUL VL]\n"
+ "sub x26, x26, #0x8\n"
+ "ld1h { z7.h }, p5/Z, [x28, #7, MUL VL]\n"
+ "cmp x26, #0x8\n"
"fmla z10.h, z6.h, z0.h[7]\n"
"fmla z11.h, z7.h, z0.h[7]\n"
- "add x26, x26, #0x10\n"
- "addvl x12, x12, #8\n"
+ "add x25, x25, #0x10\n"
"addvl x11, x11, #8\n"
"addvl x10, x10, #8\n"
"addvl x9, x9, #8\n"
+ "addvl x28, x28, #8\n"
"bgt 10b\n"
"11:" // Height 1: Multiply loop: Single iteration only
- "whilelt p0.h, XZR, x27\n"
- "ld1rqh { z0.h }, p0/Z, [x26]\n"
- "ld1h { z6.h }, p5/Z, [x12]\n"
+ "whilelt p0.h, XZR, x26\n"
+ "ld1rqh { z0.h }, p0/Z, [x25]\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
"fmla z8.h, z6.h, z0.h[0]\n"
- "ld1h { z7.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
"fmla z9.h, z7.h, z0.h[0]\n"
- "ld1h { z6.h }, p5/Z, [x10]\n"
- "subs x27, x27, #0x1\n"
- "ld1h { z7.h }, p5/Z, [x9]\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "subs x26, x26, #0x1\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
"fmla z10.h, z6.h, z0.h[0]\n"
"fmla z11.h, z7.h, z0.h[0]\n"
- "addvl x12, x12, #1\n"
"addvl x11, x11, #1\n"
"addvl x10, x10, #1\n"
"addvl x9, x9, #1\n"
+ "addvl x28, x28, #1\n"
"ble 12f\n"
- "ld1h { z6.h }, p5/Z, [x12]\n"
- "ld1h { z7.h }, p5/Z, [x11]\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
"fmla z8.h, z6.h, z0.h[1]\n"
"fmla z9.h, z7.h, z0.h[1]\n"
- "ld1h { z6.h }, p5/Z, [x10]\n"
- "ld1h { z7.h }, p5/Z, [x9]\n"
- "subs x27, x27, #0x1\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "subs x26, x26, #0x1\n"
"fmla z10.h, z6.h, z0.h[1]\n"
"fmla z11.h, z7.h, z0.h[1]\n"
- "addvl x12, x12, #1\n"
"addvl x11, x11, #1\n"
"addvl x10, x10, #1\n"
"addvl x9, x9, #1\n"
+ "addvl x28, x28, #1\n"
"ble 12f\n"
- "ld1h { z6.h }, p5/Z, [x12]\n"
- "ld1h { z7.h }, p5/Z, [x11]\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
"fmla z8.h, z6.h, z0.h[2]\n"
"fmla z9.h, z7.h, z0.h[2]\n"
- "ld1h { z6.h }, p5/Z, [x10]\n"
- "ld1h { z7.h }, p5/Z, [x9]\n"
- "subs x27, x27, #0x1\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "subs x26, x26, #0x1\n"
"fmla z10.h, z6.h, z0.h[2]\n"
"fmla z11.h, z7.h, z0.h[2]\n"
- "addvl x12, x12, #1\n"
"addvl x11, x11, #1\n"
"addvl x10, x10, #1\n"
"addvl x9, x9, #1\n"
+ "addvl x28, x28, #1\n"
"ble 12f\n"
- "ld1h { z6.h }, p5/Z, [x12]\n"
- "ld1h { z7.h }, p5/Z, [x11]\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
"fmla z8.h, z6.h, z0.h[3]\n"
"fmla z9.h, z7.h, z0.h[3]\n"
- "ld1h { z6.h }, p5/Z, [x10]\n"
- "ld1h { z7.h }, p5/Z, [x9]\n"
- "subs x27, x27, #0x1\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "subs x26, x26, #0x1\n"
"fmla z10.h, z6.h, z0.h[3]\n"
"fmla z11.h, z7.h, z0.h[3]\n"
- "addvl x12, x12, #1\n"
"addvl x11, x11, #1\n"
"addvl x10, x10, #1\n"
"addvl x9, x9, #1\n"
+ "addvl x28, x28, #1\n"
"ble 12f\n"
- "ld1h { z6.h }, p5/Z, [x12]\n"
- "ld1h { z7.h }, p5/Z, [x11]\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
"fmla z8.h, z6.h, z0.h[4]\n"
"fmla z9.h, z7.h, z0.h[4]\n"
- "ld1h { z6.h }, p5/Z, [x10]\n"
- "ld1h { z7.h }, p5/Z, [x9]\n"
- "subs x27, x27, #0x1\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "subs x26, x26, #0x1\n"
"fmla z10.h, z6.h, z0.h[4]\n"
"fmla z11.h, z7.h, z0.h[4]\n"
- "addvl x12, x12, #1\n"
"addvl x11, x11, #1\n"
"addvl x10, x10, #1\n"
"addvl x9, x9, #1\n"
+ "addvl x28, x28, #1\n"
"ble 12f\n"
- "ld1h { z6.h }, p5/Z, [x12]\n"
- "ld1h { z7.h }, p5/Z, [x11]\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
"fmla z8.h, z6.h, z0.h[5]\n"
"fmla z9.h, z7.h, z0.h[5]\n"
- "ld1h { z6.h }, p5/Z, [x10]\n"
- "ld1h { z7.h }, p5/Z, [x9]\n"
- "subs x27, x27, #0x1\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "subs x26, x26, #0x1\n"
"fmla z10.h, z6.h, z0.h[5]\n"
"fmla z11.h, z7.h, z0.h[5]\n"
- "addvl x12, x12, #1\n"
"addvl x11, x11, #1\n"
"addvl x10, x10, #1\n"
"addvl x9, x9, #1\n"
+ "addvl x28, x28, #1\n"
"ble 12f\n"
- "ld1h { z6.h }, p5/Z, [x12]\n"
- "ld1h { z7.h }, p5/Z, [x11]\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
"fmla z8.h, z6.h, z0.h[6]\n"
"fmla z9.h, z7.h, z0.h[6]\n"
- "ld1h { z6.h }, p5/Z, [x10]\n"
- "ld1h { z7.h }, p5/Z, [x9]\n"
- "subs x27, x27, #0x1\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "subs x26, x26, #0x1\n"
"fmla z10.h, z6.h, z0.h[6]\n"
"fmla z11.h, z7.h, z0.h[6]\n"
- "addvl x12, x12, #1\n"
"addvl x11, x11, #1\n"
"addvl x10, x10, #1\n"
"addvl x9, x9, #1\n"
+ "addvl x28, x28, #1\n"
"ble 12f\n"
- "ld1h { z6.h }, p5/Z, [x12]\n"
- "ld1h { z7.h }, p5/Z, [x11]\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
"fmla z8.h, z6.h, z0.h[7]\n"
"fmla z9.h, z7.h, z0.h[7]\n"
- "ld1h { z6.h }, p5/Z, [x10]\n"
- "ld1h { z7.h }, p5/Z, [x9]\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
"fmla z10.h, z6.h, z0.h[7]\n"
"fmla z11.h, z7.h, z0.h[7]\n"
- "addvl x12, x12, #1\n"
"addvl x11, x11, #1\n"
"addvl x10, x10, #1\n"
"addvl x9, x9, #1\n"
+ "addvl x28, x28, #1\n"
"12:" // Height 1: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 7b\n"
"tbz %x[flags], #1, 13f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rh { z1.h }, p5/Z, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rh { z0.h }, p5/Z, [x20]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rh { z1.h }, p5/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rh { z0.h }, p5/Z, [x19]\n"
"fmin z8.h, p5/M, z8.h, z1.h\n"
"fmin z9.h, p5/M, z9.h, z1.h\n"
"fmin z10.h, p5/M, z10.h, z1.h\n"
@@ -384,74 +384,74 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"fmax z10.h, p5/M, z10.h, z0.h\n"
"fmax z11.h, p5/M, z11.h, z0.h\n"
"13:" // Height 1: No activation
- "st1h { z8.h }, p4, [x13]\n"
- "st1h { z9.h }, p3, [x13, #1, MUL VL]\n"
- "st1h { z10.h }, p2, [x13, #2, MUL VL]\n"
- "st1h { z11.h }, p1, [x13, #3, MUL VL]\n"
- "addvl x13, x13, #4\n"
+ "st1h { z8.h }, p4, [x12]\n"
+ "st1h { z9.h }, p3, [x12, #1, MUL VL]\n"
+ "st1h { z10.h }, p2, [x12, #2, MUL VL]\n"
+ "st1h { z11.h }, p1, [x12, #3, MUL VL]\n"
+ "addvl x12, x12, #4\n"
"14:" // Height 1: Writeback done
- "dech x14, ALL, MUL #4\n"
- "cmp x14, XZR\n"
+ "dech x13, ALL, MUL #4\n"
+ "cmp x13, XZR\n"
"bgt 2b\n"
"b 86f\n"
"15:" // Height 2
- "ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x15, %x[bias]\n"
- "ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x13, %x[output_ptr]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x14, %x[bias]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x12, %x[output_ptr]\n"
"16:" // Height 2: Column loop
- "ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
- "add x11, x12, x20, LSL #1\n"
- "cnth x21, ALL, MUL #3\n"
- "add x10, x11, x20, LSL #1\n"
- "add x9, x10, x20, LSL #1\n"
- "add x20, x9, x20, LSL #1\n"
- "cmp x14, x21\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #1\n"
+ "cnth x20, ALL, MUL #3\n"
+ "add x9, x10, x19, LSL #1\n"
+ "add x28, x9, x19, LSL #1\n"
+ "add x19, x28, x19, LSL #1\n"
+ "cmp x13, x20\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"bgt 17f\n"
- "dech x21\n"
- "cmp x14, x21\n"
- "mov x9, x12\n"
+ "dech x20\n"
+ "cmp x13, x20\n"
+ "mov x28, x11\n"
"bgt 17f\n"
- "dech x21\n"
- "cmp x14, x21\n"
- "mov x10, x12\n"
+ "dech x20\n"
+ "cmp x13, x20\n"
+ "mov x9, x11\n"
"bgt 17f\n"
- "mov x11, x12\n"
+ "mov x10, x11\n"
"17:" // Height 2: B setup done
- "mov x20, #0x0\n"
- "whilelt p4.h, x20, x14\n"
- "inch x20\n"
- "whilelt p3.h, x20, x14\n"
- "inch x20\n"
- "whilelt p2.h, x20, x14\n"
- "inch x20\n"
- "whilelt p1.h, x20, x14\n"
- "cbz x15, 18f\n"
- "ld1h { z8.h }, p5/Z, [x15]\n"
- "ld1h { z9.h }, p5/Z, [x15, #1, MUL VL]\n"
+ "mov x19, #0x0\n"
+ "whilelt p4.h, x19, x13\n"
+ "inch x19\n"
+ "whilelt p3.h, x19, x13\n"
+ "inch x19\n"
+ "whilelt p2.h, x19, x13\n"
+ "inch x19\n"
+ "whilelt p1.h, x19, x13\n"
+ "cbz x14, 18f\n"
+ "ld1h { z8.h }, p5/Z, [x14]\n"
+ "ld1h { z9.h }, p5/Z, [x14, #1, MUL VL]\n"
"mov z12.d, z8.d\n"
"mov z13.d, z9.d\n"
- "ld1h { z10.h }, p5/Z, [x15, #2, MUL VL]\n"
- "ld1h { z11.h }, p5/Z, [x15, #3, MUL VL]\n"
+ "ld1h { z10.h }, p5/Z, [x14, #2, MUL VL]\n"
+ "ld1h { z11.h }, p5/Z, [x14, #3, MUL VL]\n"
"mov z14.d, z10.d\n"
"mov z15.d, z11.d\n"
- "addvl x15, x15, #4\n"
+ "addvl x14, x14, #4\n"
"b 20f\n"
"18:" // Height 2: no bias
"tbz %x[flags], #0, 19f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #1\n"
- "ld1h { z8.h }, p4/Z, [x13]\n"
- "ld1h { z9.h }, p3/Z, [x13, #1, MUL VL]\n"
- "ld1h { z10.h }, p2/Z, [x13, #2, MUL VL]\n"
- "ld1h { z11.h }, p1/Z, [x13, #3, MUL VL]\n"
- "ld1h { z12.h }, p4/Z, [x25]\n"
- "ld1h { z13.h }, p3/Z, [x25, #1, MUL VL]\n"
- "ld1h { z14.h }, p2/Z, [x25, #2, MUL VL]\n"
- "ld1h { z15.h }, p1/Z, [x25, #3, MUL VL]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #1\n"
+ "ld1h { z8.h }, p4/Z, [x12]\n"
+ "ld1h { z9.h }, p3/Z, [x12, #1, MUL VL]\n"
+ "ld1h { z10.h }, p2/Z, [x12, #2, MUL VL]\n"
+ "ld1h { z11.h }, p1/Z, [x12, #3, MUL VL]\n"
+ "ld1h { z12.h }, p4/Z, [x24]\n"
+ "ld1h { z13.h }, p3/Z, [x24, #1, MUL VL]\n"
+ "ld1h { z14.h }, p2/Z, [x24, #2, MUL VL]\n"
+ "ld1h { z15.h }, p1/Z, [x24, #3, MUL VL]\n"
"b 20f\n"
"19:" // Height 2: no accumulate
"mov z8.b, #0x0\n"
@@ -463,294 +463,294 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"mov z14.b, #0x0\n"
"mov z15.b, #0x0\n"
"20:" // Height 2: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"21:" // Height 2: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 22f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "cbnz x28, 23f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #1\n"
- "add x25, x25, x20, LSL #1\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "cbnz x27, 23f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
+ "add x24, x24, x19, LSL #1\n"
"b 23f\n"
"22:" // Height 2: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #1\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #1\n"
"23:" // Height 2: input setup done
- "cmp x27, #0x8\n"
+ "cmp x26, #0x8\n"
"ble 25f\n"
"24:" // Height 2: Multiply loop: Main loop head
- "whilelt p0.h, XZR, x27\n"
- "ld1rqh { z0.h }, p0/Z, [x26]\n"
- "ld1rqh { z1.h }, p0/Z, [x25]\n"
- "sub x27, x27, #0x8\n"
- "ld1h { z6.h }, p5/Z, [x12]\n"
- "ld1h { z7.h }, p5/Z, [x11]\n"
+ "whilelt p0.h, XZR, x26\n"
+ "ld1rqh { z0.h }, p0/Z, [x25]\n"
+ "ld1rqh { z1.h }, p0/Z, [x24]\n"
+ "sub x26, x26, #0x8\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
"fmla z8.h, z6.h, z0.h[0]\n"
"fmla z12.h, z6.h, z1.h[0]\n"
"fmla z9.h, z7.h, z0.h[0]\n"
"fmla z13.h, z7.h, z1.h[0]\n"
- "ld1h { z6.h }, p5/Z, [x10]\n"
- "ld1h { z7.h }, p5/Z, [x9]\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
"fmla z10.h, z6.h, z0.h[0]\n"
"fmla z14.h, z6.h, z1.h[0]\n"
- "ld1h { z6.h }, p5/Z, [x12, #1, MUL VL]\n"
- "cmp x27, #0x8\n"
+ "ld1h { z6.h }, p5/Z, [x11, #1, MUL VL]\n"
+ "cmp x26, #0x8\n"
"fmla z11.h, z7.h, z0.h[0]\n"
"fmla z15.h, z7.h, z1.h[0]\n"
- "ld1h { z7.h }, p5/Z, [x11, #1, MUL VL]\n"
- "add x26, x26, #0x10\n"
+ "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "add x25, x25, #0x10\n"
"fmla z8.h, z6.h, z0.h[1]\n"
"fmla z12.h, z6.h, z1.h[1]\n"
- "ld1h { z6.h }, p5/Z, [x10, #1, MUL VL]\n"
- "add x25, x25, #0x10\n"
+ "ld1h { z6.h }, p5/Z, [x9, #1, MUL VL]\n"
+ "add x24, x24, #0x10\n"
"fmla z9.h, z7.h, z0.h[1]\n"
"fmla z13.h, z7.h, z1.h[1]\n"
- "ld1h { z7.h }, p5/Z, [x9, #1, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x28, #1, MUL VL]\n"
"fmla z10.h, z6.h, z0.h[1]\n"
"fmla z14.h, z6.h, z1.h[1]\n"
- "ld1h { z6.h }, p5/Z, [x12, #2, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #2, MUL VL]\n"
"fmla z11.h, z7.h, z0.h[1]\n"
"fmla z15.h, z7.h, z1.h[1]\n"
- "ld1h { z7.h }, p5/Z, [x11, #2, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #2, MUL VL]\n"
"fmla z8.h, z6.h, z0.h[2]\n"
"fmla z12.h, z6.h, z1.h[2]\n"
- "ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #2, MUL VL]\n"
"fmla z9.h, z7.h, z0.h[2]\n"
"fmla z13.h, z7.h, z1.h[2]\n"
- "ld1h { z7.h }, p5/Z, [x9, #2, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x28, #2, MUL VL]\n"
"fmla z10.h, z6.h, z0.h[2]\n"
"fmla z14.h, z6.h, z1.h[2]\n"
- "ld1h { z6.h }, p5/Z, [x12, #3, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #3, MUL VL]\n"
"fmla z11.h, z7.h, z0.h[2]\n"
"fmla z15.h, z7.h, z1.h[2]\n"
- "ld1h { z7.h }, p5/Z, [x11, #3, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #3, MUL VL]\n"
"fmla z8.h, z6.h, z0.h[3]\n"
"fmla z12.h, z6.h, z1.h[3]\n"
- "ld1h { z6.h }, p5/Z, [x10, #3, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #3, MUL VL]\n"
"fmla z9.h, z7.h, z0.h[3]\n"
"fmla z13.h, z7.h, z1.h[3]\n"
- "ld1h { z7.h }, p5/Z, [x9, #3, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x28, #3, MUL VL]\n"
"fmla z10.h, z6.h, z0.h[3]\n"
"fmla z14.h, z6.h, z1.h[3]\n"
- "ld1h { z6.h }, p5/Z, [x12, #4, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #4, MUL VL]\n"
"fmla z11.h, z7.h, z0.h[3]\n"
"fmla z15.h, z7.h, z1.h[3]\n"
- "ld1h { z7.h }, p5/Z, [x11, #4, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #4, MUL VL]\n"
"fmla z8.h, z6.h, z0.h[4]\n"
"fmla z12.h, z6.h, z1.h[4]\n"
- "ld1h { z6.h }, p5/Z, [x10, #4, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #4, MUL VL]\n"
"fmla z9.h, z7.h, z0.h[4]\n"
"fmla z13.h, z7.h, z1.h[4]\n"
- "ld1h { z7.h }, p5/Z, [x9, #4, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x28, #4, MUL VL]\n"
"fmla z10.h, z6.h, z0.h[4]\n"
"fmla z14.h, z6.h, z1.h[4]\n"
- "ld1h { z6.h }, p5/Z, [x12, #5, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #5, MUL VL]\n"
"fmla z11.h, z7.h, z0.h[4]\n"
"fmla z15.h, z7.h, z1.h[4]\n"
- "ld1h { z7.h }, p5/Z, [x11, #5, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #5, MUL VL]\n"
"fmla z8.h, z6.h, z0.h[5]\n"
"fmla z12.h, z6.h, z1.h[5]\n"
- "ld1h { z6.h }, p5/Z, [x10, #5, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #5, MUL VL]\n"
"fmla z9.h, z7.h, z0.h[5]\n"
"fmla z13.h, z7.h, z1.h[5]\n"
- "ld1h { z7.h }, p5/Z, [x9, #5, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x28, #5, MUL VL]\n"
"fmla z10.h, z6.h, z0.h[5]\n"
"fmla z14.h, z6.h, z1.h[5]\n"
- "ld1h { z6.h }, p5/Z, [x12, #6, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #6, MUL VL]\n"
"fmla z11.h, z7.h, z0.h[5]\n"
"fmla z15.h, z7.h, z1.h[5]\n"
- "ld1h { z7.h }, p5/Z, [x11, #6, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #6, MUL VL]\n"
"fmla z8.h, z6.h, z0.h[6]\n"
"fmla z12.h, z6.h, z1.h[6]\n"
- "ld1h { z6.h }, p5/Z, [x10, #6, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #6, MUL VL]\n"
"fmla z9.h, z7.h, z0.h[6]\n"
"fmla z13.h, z7.h, z1.h[6]\n"
- "ld1h { z7.h }, p5/Z, [x9, #6, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x28, #6, MUL VL]\n"
"fmla z10.h, z6.h, z0.h[6]\n"
"fmla z14.h, z6.h, z1.h[6]\n"
- "ld1h { z6.h }, p5/Z, [x12, #7, MUL VL]\n"
- "addvl x12, x12, #8\n"
+ "ld1h { z6.h }, p5/Z, [x11, #7, MUL VL]\n"
+ "addvl x11, x11, #8\n"
"fmla z11.h, z7.h, z0.h[6]\n"
"fmla z15.h, z7.h, z1.h[6]\n"
- "ld1h { z7.h }, p5/Z, [x11, #7, MUL VL]\n"
- "addvl x11, x11, #8\n"
+ "ld1h { z7.h }, p5/Z, [x10, #7, MUL VL]\n"
+ "addvl x10, x10, #8\n"
"fmla z8.h, z6.h, z0.h[7]\n"
"fmla z12.h, z6.h, z1.h[7]\n"
- "ld1h { z6.h }, p5/Z, [x10, #7, MUL VL]\n"
- "addvl x10, x10, #8\n"
+ "ld1h { z6.h }, p5/Z, [x9, #7, MUL VL]\n"
+ "addvl x9, x9, #8\n"
"fmla z9.h, z7.h, z0.h[7]\n"
"fmla z13.h, z7.h, z1.h[7]\n"
- "ld1h { z7.h }, p5/Z, [x9, #7, MUL VL]\n"
- "addvl x9, x9, #8\n"
+ "ld1h { z7.h }, p5/Z, [x28, #7, MUL VL]\n"
+ "addvl x28, x28, #8\n"
"fmla z10.h, z6.h, z0.h[7]\n"
"fmla z14.h, z6.h, z1.h[7]\n"
"fmla z11.h, z7.h, z0.h[7]\n"
"fmla z15.h, z7.h, z1.h[7]\n"
"bgt 24b\n"
"25:" // Height 2: Multiply loop: Single iteration only
- "whilelt p0.h, XZR, x27\n"
- "ld1rqh { z0.h }, p0/Z, [x26]\n"
- "ld1rqh { z1.h }, p0/Z, [x25]\n"
- "subs x27, x27, #0x1\n"
- "ld1h { z6.h }, p5/Z, [x12]\n"
- "ld1h { z7.h }, p5/Z, [x11]\n"
+ "whilelt p0.h, XZR, x26\n"
+ "ld1rqh { z0.h }, p0/Z, [x25]\n"
+ "ld1rqh { z1.h }, p0/Z, [x24]\n"
+ "subs x26, x26, #0x1\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
"fmla z8.h, z6.h, z0.h[0]\n"
"fmla z12.h, z6.h, z1.h[0]\n"
"fmla z9.h, z7.h, z0.h[0]\n"
"fmla z13.h, z7.h, z1.h[0]\n"
- "ld1h { z6.h }, p5/Z, [x10]\n"
- "ld1h { z7.h }, p5/Z, [x9]\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
"fmla z10.h, z6.h, z0.h[0]\n"
"fmla z14.h, z6.h, z1.h[0]\n"
- "addvl x12, x12, #1\n"
"addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
"fmla z11.h, z7.h, z0.h[0]\n"
"fmla z15.h, z7.h, z1.h[0]\n"
- "addvl x10, x10, #1\n"
"addvl x9, x9, #1\n"
+ "addvl x28, x28, #1\n"
"ble 26f\n"
- "ld1h { z6.h }, p5/Z, [x12]\n"
- "ld1h { z7.h }, p5/Z, [x11]\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
"fmla z8.h, z6.h, z0.h[1]\n"
"fmla z12.h, z6.h, z1.h[1]\n"
"fmla z9.h, z7.h, z0.h[1]\n"
"fmla z13.h, z7.h, z1.h[1]\n"
- "ld1h { z6.h }, p5/Z, [x10]\n"
- "ld1h { z7.h }, p5/Z, [x9]\n"
- "subs x27, x27, #0x1\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "subs x26, x26, #0x1\n"
"fmla z10.h, z6.h, z0.h[1]\n"
"fmla z14.h, z6.h, z1.h[1]\n"
- "addvl x12, x12, #1\n"
+ "addvl x11, x11, #1\n"
"fmla z11.h, z7.h, z0.h[1]\n"
"fmla z15.h, z7.h, z1.h[1]\n"
- "addvl x11, x11, #1\n"
"addvl x10, x10, #1\n"
"addvl x9, x9, #1\n"
+ "addvl x28, x28, #1\n"
"ble 26f\n"
- "ld1h { z6.h }, p5/Z, [x12]\n"
- "ld1h { z7.h }, p5/Z, [x11]\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
"fmla z8.h, z6.h, z0.h[2]\n"
"fmla z12.h, z6.h, z1.h[2]\n"
"fmla z9.h, z7.h, z0.h[2]\n"
"fmla z13.h, z7.h, z1.h[2]\n"
- "ld1h { z6.h }, p5/Z, [x10]\n"
- "ld1h { z7.h }, p5/Z, [x9]\n"
- "subs x27, x27, #0x1\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "subs x26, x26, #0x1\n"
"fmla z10.h, z6.h, z0.h[2]\n"
"fmla z14.h, z6.h, z1.h[2]\n"
- "addvl x12, x12, #1\n"
+ "addvl x11, x11, #1\n"
"fmla z11.h, z7.h, z0.h[2]\n"
"fmla z15.h, z7.h, z1.h[2]\n"
- "addvl x11, x11, #1\n"
"addvl x10, x10, #1\n"
"addvl x9, x9, #1\n"
+ "addvl x28, x28, #1\n"
"ble 26f\n"
- "ld1h { z6.h }, p5/Z, [x12]\n"
- "ld1h { z7.h }, p5/Z, [x11]\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
"fmla z8.h, z6.h, z0.h[3]\n"
"fmla z12.h, z6.h, z1.h[3]\n"
"fmla z9.h, z7.h, z0.h[3]\n"
"fmla z13.h, z7.h, z1.h[3]\n"
- "ld1h { z6.h }, p5/Z, [x10]\n"
- "ld1h { z7.h }, p5/Z, [x9]\n"
- "subs x27, x27, #0x1\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "subs x26, x26, #0x1\n"
"fmla z10.h, z6.h, z0.h[3]\n"
"fmla z14.h, z6.h, z1.h[3]\n"
- "addvl x12, x12, #1\n"
+ "addvl x11, x11, #1\n"
"fmla z11.h, z7.h, z0.h[3]\n"
"fmla z15.h, z7.h, z1.h[3]\n"
- "addvl x11, x11, #1\n"
"addvl x10, x10, #1\n"
"addvl x9, x9, #1\n"
+ "addvl x28, x28, #1\n"
"ble 26f\n"
- "ld1h { z6.h }, p5/Z, [x12]\n"
- "ld1h { z7.h }, p5/Z, [x11]\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
"fmla z8.h, z6.h, z0.h[4]\n"
"fmla z12.h, z6.h, z1.h[4]\n"
"fmla z9.h, z7.h, z0.h[4]\n"
"fmla z13.h, z7.h, z1.h[4]\n"
- "ld1h { z6.h }, p5/Z, [x10]\n"
- "ld1h { z7.h }, p5/Z, [x9]\n"
- "subs x27, x27, #0x1\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "subs x26, x26, #0x1\n"
"fmla z10.h, z6.h, z0.h[4]\n"
"fmla z14.h, z6.h, z1.h[4]\n"
- "addvl x12, x12, #1\n"
+ "addvl x11, x11, #1\n"
"fmla z11.h, z7.h, z0.h[4]\n"
"fmla z15.h, z7.h, z1.h[4]\n"
- "addvl x11, x11, #1\n"
"addvl x10, x10, #1\n"
"addvl x9, x9, #1\n"
+ "addvl x28, x28, #1\n"
"ble 26f\n"
- "ld1h { z6.h }, p5/Z, [x12]\n"
- "ld1h { z7.h }, p5/Z, [x11]\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
"fmla z8.h, z6.h, z0.h[5]\n"
"fmla z12.h, z6.h, z1.h[5]\n"
"fmla z9.h, z7.h, z0.h[5]\n"
"fmla z13.h, z7.h, z1.h[5]\n"
- "ld1h { z6.h }, p5/Z, [x10]\n"
- "ld1h { z7.h }, p5/Z, [x9]\n"
- "subs x27, x27, #0x1\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "subs x26, x26, #0x1\n"
"fmla z10.h, z6.h, z0.h[5]\n"
"fmla z14.h, z6.h, z1.h[5]\n"
- "addvl x12, x12, #1\n"
+ "addvl x11, x11, #1\n"
"fmla z11.h, z7.h, z0.h[5]\n"
"fmla z15.h, z7.h, z1.h[5]\n"
- "addvl x11, x11, #1\n"
"addvl x10, x10, #1\n"
"addvl x9, x9, #1\n"
+ "addvl x28, x28, #1\n"
"ble 26f\n"
- "ld1h { z6.h }, p5/Z, [x12]\n"
- "ld1h { z7.h }, p5/Z, [x11]\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
"fmla z8.h, z6.h, z0.h[6]\n"
"fmla z12.h, z6.h, z1.h[6]\n"
"fmla z9.h, z7.h, z0.h[6]\n"
"fmla z13.h, z7.h, z1.h[6]\n"
- "ld1h { z6.h }, p5/Z, [x10]\n"
- "ld1h { z7.h }, p5/Z, [x9]\n"
- "subs x27, x27, #0x1\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "subs x26, x26, #0x1\n"
"fmla z10.h, z6.h, z0.h[6]\n"
"fmla z14.h, z6.h, z1.h[6]\n"
- "addvl x12, x12, #1\n"
+ "addvl x11, x11, #1\n"
"fmla z11.h, z7.h, z0.h[6]\n"
"fmla z15.h, z7.h, z1.h[6]\n"
- "addvl x11, x11, #1\n"
"addvl x10, x10, #1\n"
"addvl x9, x9, #1\n"
+ "addvl x28, x28, #1\n"
"ble 26f\n"
- "ld1h { z6.h }, p5/Z, [x12]\n"
- "ld1h { z7.h }, p5/Z, [x11]\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
"fmla z8.h, z6.h, z0.h[7]\n"
"fmla z12.h, z6.h, z1.h[7]\n"
"fmla z9.h, z7.h, z0.h[7]\n"
"fmla z13.h, z7.h, z1.h[7]\n"
- "ld1h { z6.h }, p5/Z, [x10]\n"
- "ld1h { z7.h }, p5/Z, [x9]\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
"fmla z10.h, z6.h, z0.h[7]\n"
"fmla z14.h, z6.h, z1.h[7]\n"
- "addvl x12, x12, #1\n"
"addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
"fmla z11.h, z7.h, z0.h[7]\n"
"fmla z15.h, z7.h, z1.h[7]\n"
- "addvl x10, x10, #1\n"
"addvl x9, x9, #1\n"
+ "addvl x28, x28, #1\n"
"26:" // Height 2: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 21b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #1\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #1\n"
"tbz %x[flags], #1, 27f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rh { z1.h }, p5/Z, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rh { z0.h }, p5/Z, [x20]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rh { z1.h }, p5/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rh { z0.h }, p5/Z, [x19]\n"
"fmin z8.h, p5/M, z8.h, z1.h\n"
"fmin z9.h, p5/M, z9.h, z1.h\n"
"fmin z10.h, p5/M, z10.h, z1.h\n"
@@ -768,87 +768,87 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"fmax z14.h, p5/M, z14.h, z0.h\n"
"fmax z15.h, p5/M, z15.h, z0.h\n"
"27:" // Height 2: No activation
- "st1h { z8.h }, p4, [x13]\n"
- "st1h { z9.h }, p3, [x13, #1, MUL VL]\n"
- "st1h { z10.h }, p2, [x13, #2, MUL VL]\n"
- "st1h { z11.h }, p1, [x13, #3, MUL VL]\n"
- "addvl x13, x13, #4\n"
- "st1h { z12.h }, p4, [x25]\n"
- "st1h { z13.h }, p3, [x25, #1, MUL VL]\n"
- "st1h { z14.h }, p2, [x25, #2, MUL VL]\n"
- "st1h { z15.h }, p1, [x25, #3, MUL VL]\n"
+ "st1h { z8.h }, p4, [x12]\n"
+ "st1h { z9.h }, p3, [x12, #1, MUL VL]\n"
+ "st1h { z10.h }, p2, [x12, #2, MUL VL]\n"
+ "st1h { z11.h }, p1, [x12, #3, MUL VL]\n"
+ "addvl x12, x12, #4\n"
+ "st1h { z12.h }, p4, [x24]\n"
+ "st1h { z13.h }, p3, [x24, #1, MUL VL]\n"
+ "st1h { z14.h }, p2, [x24, #2, MUL VL]\n"
+ "st1h { z15.h }, p1, [x24, #3, MUL VL]\n"
"28:" // Height 2: Writeback done
- "dech x14, ALL, MUL #4\n"
- "cmp x14, XZR\n"
+ "dech x13, ALL, MUL #4\n"
+ "cmp x13, XZR\n"
"bgt 16b\n"
"b 86f\n"
"29:" // Height 3
- "ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x15, %x[bias]\n"
- "ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x13, %x[output_ptr]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x14, %x[bias]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x12, %x[output_ptr]\n"
"30:" // Height 3: Column loop
- "ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
- "add x11, x12, x20, LSL #1\n"
- "cnth x21, ALL, MUL #3\n"
- "add x10, x11, x20, LSL #1\n"
- "add x9, x10, x20, LSL #1\n"
- "add x20, x9, x20, LSL #1\n"
- "cmp x14, x21\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #1\n"
+ "cnth x20, ALL, MUL #3\n"
+ "add x9, x10, x19, LSL #1\n"
+ "add x28, x9, x19, LSL #1\n"
+ "add x19, x28, x19, LSL #1\n"
+ "cmp x13, x20\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"bgt 31f\n"
- "dech x21\n"
- "cmp x14, x21\n"
- "mov x9, x12\n"
+ "dech x20\n"
+ "cmp x13, x20\n"
+ "mov x28, x11\n"
"bgt 31f\n"
- "dech x21\n"
- "cmp x14, x21\n"
- "mov x10, x12\n"
+ "dech x20\n"
+ "cmp x13, x20\n"
+ "mov x9, x11\n"
"bgt 31f\n"
- "mov x11, x12\n"
+ "mov x10, x11\n"
"31:" // Height 3: B setup done
- "mov x20, #0x0\n"
- "whilelt p4.h, x20, x14\n"
- "inch x20\n"
- "whilelt p3.h, x20, x14\n"
- "inch x20\n"
- "whilelt p2.h, x20, x14\n"
- "inch x20\n"
- "whilelt p1.h, x20, x14\n"
- "cbz x15, 32f\n"
- "ld1h { z8.h }, p5/Z, [x15]\n"
- "ld1h { z9.h }, p5/Z, [x15, #1, MUL VL]\n"
+ "mov x19, #0x0\n"
+ "whilelt p4.h, x19, x13\n"
+ "inch x19\n"
+ "whilelt p3.h, x19, x13\n"
+ "inch x19\n"
+ "whilelt p2.h, x19, x13\n"
+ "inch x19\n"
+ "whilelt p1.h, x19, x13\n"
+ "cbz x14, 32f\n"
+ "ld1h { z8.h }, p5/Z, [x14]\n"
+ "ld1h { z9.h }, p5/Z, [x14, #1, MUL VL]\n"
"mov z12.d, z8.d\n"
"mov z13.d, z9.d\n"
- "ld1h { z10.h }, p5/Z, [x15, #2, MUL VL]\n"
- "ld1h { z11.h }, p5/Z, [x15, #3, MUL VL]\n"
+ "ld1h { z10.h }, p5/Z, [x14, #2, MUL VL]\n"
+ "ld1h { z11.h }, p5/Z, [x14, #3, MUL VL]\n"
"mov z14.d, z10.d\n"
"mov z15.d, z11.d\n"
"mov z16.d, z8.d\n"
"mov z17.d, z9.d\n"
- "addvl x15, x15, #4\n"
+ "addvl x14, x14, #4\n"
"mov z18.d, z10.d\n"
"mov z19.d, z11.d\n"
"b 34f\n"
"32:" // Height 3: no bias
"tbz %x[flags], #0, 33f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
- "ld1h { z8.h }, p4/Z, [x13]\n"
- "ld1h { z9.h }, p3/Z, [x13, #1, MUL VL]\n"
- "ld1h { z10.h }, p2/Z, [x13, #2, MUL VL]\n"
- "ld1h { z11.h }, p1/Z, [x13, #3, MUL VL]\n"
- "ld1h { z12.h }, p4/Z, [x25]\n"
- "ld1h { z13.h }, p3/Z, [x25, #1, MUL VL]\n"
- "ld1h { z14.h }, p2/Z, [x25, #2, MUL VL]\n"
- "ld1h { z15.h }, p1/Z, [x25, #3, MUL VL]\n"
- "ld1h { z16.h }, p4/Z, [x24]\n"
- "ld1h { z17.h }, p3/Z, [x24, #1, MUL VL]\n"
- "ld1h { z18.h }, p2/Z, [x24, #2, MUL VL]\n"
- "ld1h { z19.h }, p1/Z, [x24, #3, MUL VL]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "ld1h { z8.h }, p4/Z, [x12]\n"
+ "ld1h { z9.h }, p3/Z, [x12, #1, MUL VL]\n"
+ "ld1h { z10.h }, p2/Z, [x12, #2, MUL VL]\n"
+ "ld1h { z11.h }, p1/Z, [x12, #3, MUL VL]\n"
+ "ld1h { z12.h }, p4/Z, [x24]\n"
+ "ld1h { z13.h }, p3/Z, [x24, #1, MUL VL]\n"
+ "ld1h { z14.h }, p2/Z, [x24, #2, MUL VL]\n"
+ "ld1h { z15.h }, p1/Z, [x24, #3, MUL VL]\n"
+ "ld1h { z16.h }, p4/Z, [x23]\n"
+ "ld1h { z17.h }, p3/Z, [x23, #1, MUL VL]\n"
+ "ld1h { z18.h }, p2/Z, [x23, #2, MUL VL]\n"
+ "ld1h { z19.h }, p1/Z, [x23, #3, MUL VL]\n"
"b 34f\n"
"33:" // Height 3: no accumulate
"mov z8.b, #0x0\n"
@@ -864,166 +864,166 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"mov z18.b, #0x0\n"
"mov z19.b, #0x0\n"
"34:" // Height 3: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"35:" // Height 3: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 36f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "cbnz x28, 37f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #1\n"
- "add x25, x25, x20, LSL #1\n"
- "add x24, x24, x20, LSL #1\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "cbnz x27, 37f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
+ "add x24, x24, x19, LSL #1\n"
+ "add x23, x23, x19, LSL #1\n"
"b 37f\n"
"36:" // Height 3: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
"37:" // Height 3: input setup done
- "cmp x27, #0x8\n"
+ "cmp x26, #0x8\n"
"ble 39f\n"
"38:" // Height 3: Multiply loop: Main loop head
- "whilelt p0.h, XZR, x27\n"
- "ld1rqh { z0.h }, p0/Z, [x26]\n"
- "ld1rqh { z1.h }, p0/Z, [x25]\n"
- "sub x27, x27, #0x8\n"
- "ld1rqh { z2.h }, p0/Z, [x24]\n"
- "ld1h { z6.h }, p5/Z, [x12]\n"
+ "whilelt p0.h, XZR, x26\n"
+ "ld1rqh { z0.h }, p0/Z, [x25]\n"
+ "ld1rqh { z1.h }, p0/Z, [x24]\n"
+ "sub x26, x26, #0x8\n"
+ "ld1rqh { z2.h }, p0/Z, [x23]\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
"fmla z8.h, z6.h, z0.h[0]\n"
"fmla z12.h, z6.h, z1.h[0]\n"
- "ld1h { z7.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
"fmla z16.h, z6.h, z2.h[0]\n"
"fmla z9.h, z7.h, z0.h[0]\n"
- "ld1h { z6.h }, p5/Z, [x10]\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
"fmla z13.h, z7.h, z1.h[0]\n"
"fmla z17.h, z7.h, z2.h[0]\n"
- "ld1h { z7.h }, p5/Z, [x9]\n"
- "cmp x27, #0x8\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "cmp x26, #0x8\n"
"fmla z10.h, z6.h, z0.h[0]\n"
"fmla z14.h, z6.h, z1.h[0]\n"
- "add x26, x26, #0x10\n"
"add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
"fmla z18.h, z6.h, z2.h[0]\n"
"fmla z11.h, z7.h, z0.h[0]\n"
- "ld1h { z6.h }, p5/Z, [x12, #1, MUL VL]\n"
- "add x24, x24, #0x10\n"
+ "ld1h { z6.h }, p5/Z, [x11, #1, MUL VL]\n"
+ "add x23, x23, #0x10\n"
"fmla z15.h, z7.h, z1.h[0]\n"
"fmla z19.h, z7.h, z2.h[0]\n"
- "ld1h { z7.h }, p5/Z, [x11, #1, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
"fmla z8.h, z6.h, z0.h[1]\n"
"fmla z12.h, z6.h, z1.h[1]\n"
"fmla z16.h, z6.h, z2.h[1]\n"
"fmla z9.h, z7.h, z0.h[1]\n"
- "ld1h { z6.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #1, MUL VL]\n"
"fmla z13.h, z7.h, z1.h[1]\n"
"fmla z17.h, z7.h, z2.h[1]\n"
- "ld1h { z7.h }, p5/Z, [x9, #1, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x28, #1, MUL VL]\n"
"fmla z10.h, z6.h, z0.h[1]\n"
"fmla z14.h, z6.h, z1.h[1]\n"
"fmla z18.h, z6.h, z2.h[1]\n"
"fmla z11.h, z7.h, z0.h[1]\n"
- "ld1h { z6.h }, p5/Z, [x12, #2, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #2, MUL VL]\n"
"fmla z15.h, z7.h, z1.h[1]\n"
"fmla z19.h, z7.h, z2.h[1]\n"
- "ld1h { z7.h }, p5/Z, [x11, #2, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #2, MUL VL]\n"
"fmla z8.h, z6.h, z0.h[2]\n"
"fmla z12.h, z6.h, z1.h[2]\n"
"fmla z16.h, z6.h, z2.h[2]\n"
"fmla z9.h, z7.h, z0.h[2]\n"
- "ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #2, MUL VL]\n"
"fmla z13.h, z7.h, z1.h[2]\n"
"fmla z17.h, z7.h, z2.h[2]\n"
- "ld1h { z7.h }, p5/Z, [x9, #2, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x28, #2, MUL VL]\n"
"fmla z10.h, z6.h, z0.h[2]\n"
"fmla z14.h, z6.h, z1.h[2]\n"
"fmla z18.h, z6.h, z2.h[2]\n"
"fmla z11.h, z7.h, z0.h[2]\n"
- "ld1h { z6.h }, p5/Z, [x12, #3, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #3, MUL VL]\n"
"fmla z15.h, z7.h, z1.h[2]\n"
"fmla z19.h, z7.h, z2.h[2]\n"
- "ld1h { z7.h }, p5/Z, [x11, #3, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #3, MUL VL]\n"
"fmla z8.h, z6.h, z0.h[3]\n"
"fmla z12.h, z6.h, z1.h[3]\n"
"fmla z16.h, z6.h, z2.h[3]\n"
"fmla z9.h, z7.h, z0.h[3]\n"
- "ld1h { z6.h }, p5/Z, [x10, #3, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #3, MUL VL]\n"
"fmla z13.h, z7.h, z1.h[3]\n"
"fmla z17.h, z7.h, z2.h[3]\n"
- "ld1h { z7.h }, p5/Z, [x9, #3, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x28, #3, MUL VL]\n"
"fmla z10.h, z6.h, z0.h[3]\n"
"fmla z14.h, z6.h, z1.h[3]\n"
"fmla z18.h, z6.h, z2.h[3]\n"
"fmla z11.h, z7.h, z0.h[3]\n"
- "ld1h { z6.h }, p5/Z, [x12, #4, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #4, MUL VL]\n"
"fmla z15.h, z7.h, z1.h[3]\n"
"fmla z19.h, z7.h, z2.h[3]\n"
- "ld1h { z7.h }, p5/Z, [x11, #4, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #4, MUL VL]\n"
"fmla z8.h, z6.h, z0.h[4]\n"
"fmla z12.h, z6.h, z1.h[4]\n"
"fmla z16.h, z6.h, z2.h[4]\n"
"fmla z9.h, z7.h, z0.h[4]\n"
- "ld1h { z6.h }, p5/Z, [x10, #4, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #4, MUL VL]\n"
"fmla z13.h, z7.h, z1.h[4]\n"
"fmla z17.h, z7.h, z2.h[4]\n"
- "ld1h { z7.h }, p5/Z, [x9, #4, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x28, #4, MUL VL]\n"
"fmla z10.h, z6.h, z0.h[4]\n"
"fmla z14.h, z6.h, z1.h[4]\n"
"fmla z18.h, z6.h, z2.h[4]\n"
"fmla z11.h, z7.h, z0.h[4]\n"
- "ld1h { z6.h }, p5/Z, [x12, #5, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #5, MUL VL]\n"
"fmla z15.h, z7.h, z1.h[4]\n"
"fmla z19.h, z7.h, z2.h[4]\n"
- "ld1h { z7.h }, p5/Z, [x11, #5, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #5, MUL VL]\n"
"fmla z8.h, z6.h, z0.h[5]\n"
"fmla z12.h, z6.h, z1.h[5]\n"
"fmla z16.h, z6.h, z2.h[5]\n"
"fmla z9.h, z7.h, z0.h[5]\n"
- "ld1h { z6.h }, p5/Z, [x10, #5, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #5, MUL VL]\n"
"fmla z13.h, z7.h, z1.h[5]\n"
"fmla z17.h, z7.h, z2.h[5]\n"
- "ld1h { z7.h }, p5/Z, [x9, #5, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x28, #5, MUL VL]\n"
"fmla z10.h, z6.h, z0.h[5]\n"
"fmla z14.h, z6.h, z1.h[5]\n"
"fmla z18.h, z6.h, z2.h[5]\n"
"fmla z11.h, z7.h, z0.h[5]\n"
- "ld1h { z6.h }, p5/Z, [x12, #6, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #6, MUL VL]\n"
"fmla z15.h, z7.h, z1.h[5]\n"
"fmla z19.h, z7.h, z2.h[5]\n"
- "ld1h { z7.h }, p5/Z, [x11, #6, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #6, MUL VL]\n"
"fmla z8.h, z6.h, z0.h[6]\n"
"fmla z12.h, z6.h, z1.h[6]\n"
"fmla z16.h, z6.h, z2.h[6]\n"
"fmla z9.h, z7.h, z0.h[6]\n"
- "ld1h { z6.h }, p5/Z, [x10, #6, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #6, MUL VL]\n"
"fmla z13.h, z7.h, z1.h[6]\n"
"fmla z17.h, z7.h, z2.h[6]\n"
- "ld1h { z7.h }, p5/Z, [x9, #6, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x28, #6, MUL VL]\n"
"fmla z10.h, z6.h, z0.h[6]\n"
"fmla z14.h, z6.h, z1.h[6]\n"
"fmla z18.h, z6.h, z2.h[6]\n"
"fmla z11.h, z7.h, z0.h[6]\n"
- "ld1h { z6.h }, p5/Z, [x12, #7, MUL VL]\n"
- "addvl x12, x12, #8\n"
+ "ld1h { z6.h }, p5/Z, [x11, #7, MUL VL]\n"
+ "addvl x11, x11, #8\n"
"fmla z15.h, z7.h, z1.h[6]\n"
"fmla z19.h, z7.h, z2.h[6]\n"
- "ld1h { z7.h }, p5/Z, [x11, #7, MUL VL]\n"
- "addvl x11, x11, #8\n"
+ "ld1h { z7.h }, p5/Z, [x10, #7, MUL VL]\n"
+ "addvl x10, x10, #8\n"
"fmla z8.h, z6.h, z0.h[7]\n"
"fmla z12.h, z6.h, z1.h[7]\n"
"fmla z16.h, z6.h, z2.h[7]\n"
"fmla z9.h, z7.h, z0.h[7]\n"
- "ld1h { z6.h }, p5/Z, [x10, #7, MUL VL]\n"
- "addvl x10, x10, #8\n"
+ "ld1h { z6.h }, p5/Z, [x9, #7, MUL VL]\n"
+ "addvl x9, x9, #8\n"
"fmla z13.h, z7.h, z1.h[7]\n"
"fmla z17.h, z7.h, z2.h[7]\n"
- "ld1h { z7.h }, p5/Z, [x9, #7, MUL VL]\n"
- "addvl x9, x9, #8\n"
+ "ld1h { z7.h }, p5/Z, [x28, #7, MUL VL]\n"
+ "addvl x28, x28, #8\n"
"fmla z10.h, z6.h, z0.h[7]\n"
"fmla z14.h, z6.h, z1.h[7]\n"
"fmla z18.h, z6.h, z2.h[7]\n"
@@ -1032,197 +1032,197 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"fmla z19.h, z7.h, z2.h[7]\n"
"bgt 38b\n"
"39:" // Height 3: Multiply loop: Single iteration only
- "whilelt p0.h, XZR, x27\n"
- "ld1rqh { z0.h }, p0/Z, [x26]\n"
- "ld1rqh { z1.h }, p0/Z, [x25]\n"
- "subs x27, x27, #0x1\n"
- "ld1rqh { z2.h }, p0/Z, [x24]\n"
- "ld1h { z6.h }, p5/Z, [x12]\n"
+ "whilelt p0.h, XZR, x26\n"
+ "ld1rqh { z0.h }, p0/Z, [x25]\n"
+ "ld1rqh { z1.h }, p0/Z, [x24]\n"
+ "subs x26, x26, #0x1\n"
+ "ld1rqh { z2.h }, p0/Z, [x23]\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
"fmla z8.h, z6.h, z0.h[0]\n"
"fmla z12.h, z6.h, z1.h[0]\n"
- "ld1h { z7.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
"fmla z16.h, z6.h, z2.h[0]\n"
"fmla z9.h, z7.h, z0.h[0]\n"
- "ld1h { z6.h }, p5/Z, [x10]\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
"fmla z13.h, z7.h, z1.h[0]\n"
"fmla z17.h, z7.h, z2.h[0]\n"
- "ld1h { z7.h }, p5/Z, [x9]\n"
- "addvl x12, x12, #1\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "addvl x11, x11, #1\n"
"fmla z10.h, z6.h, z0.h[0]\n"
"fmla z14.h, z6.h, z1.h[0]\n"
- "addvl x11, x11, #1\n"
"addvl x10, x10, #1\n"
+ "addvl x9, x9, #1\n"
"fmla z18.h, z6.h, z2.h[0]\n"
"fmla z11.h, z7.h, z0.h[0]\n"
- "addvl x9, x9, #1\n"
+ "addvl x28, x28, #1\n"
"fmla z15.h, z7.h, z1.h[0]\n"
"fmla z19.h, z7.h, z2.h[0]\n"
"ble 40f\n"
- "ld1h { z6.h }, p5/Z, [x12]\n"
- "ld1h { z7.h }, p5/Z, [x11]\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
"fmla z8.h, z6.h, z0.h[1]\n"
"fmla z12.h, z6.h, z1.h[1]\n"
"fmla z16.h, z6.h, z2.h[1]\n"
"fmla z9.h, z7.h, z0.h[1]\n"
- "ld1h { z6.h }, p5/Z, [x10]\n"
- "subs x27, x27, #0x1\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "subs x26, x26, #0x1\n"
"fmla z13.h, z7.h, z1.h[1]\n"
"fmla z17.h, z7.h, z2.h[1]\n"
- "ld1h { z7.h }, p5/Z, [x9]\n"
- "addvl x12, x12, #1\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "addvl x11, x11, #1\n"
"fmla z10.h, z6.h, z0.h[1]\n"
"fmla z14.h, z6.h, z1.h[1]\n"
- "addvl x11, x11, #1\n"
"addvl x10, x10, #1\n"
+ "addvl x9, x9, #1\n"
"fmla z18.h, z6.h, z2.h[1]\n"
"fmla z11.h, z7.h, z0.h[1]\n"
- "addvl x9, x9, #1\n"
+ "addvl x28, x28, #1\n"
"fmla z15.h, z7.h, z1.h[1]\n"
"fmla z19.h, z7.h, z2.h[1]\n"
"ble 40f\n"
- "ld1h { z6.h }, p5/Z, [x12]\n"
- "ld1h { z7.h }, p5/Z, [x11]\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
"fmla z8.h, z6.h, z0.h[2]\n"
"fmla z12.h, z6.h, z1.h[2]\n"
"fmla z16.h, z6.h, z2.h[2]\n"
"fmla z9.h, z7.h, z0.h[2]\n"
- "ld1h { z6.h }, p5/Z, [x10]\n"
- "subs x27, x27, #0x1\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "subs x26, x26, #0x1\n"
"fmla z13.h, z7.h, z1.h[2]\n"
"fmla z17.h, z7.h, z2.h[2]\n"
- "ld1h { z7.h }, p5/Z, [x9]\n"
- "addvl x12, x12, #1\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "addvl x11, x11, #1\n"
"fmla z10.h, z6.h, z0.h[2]\n"
"fmla z14.h, z6.h, z1.h[2]\n"
- "addvl x11, x11, #1\n"
"addvl x10, x10, #1\n"
+ "addvl x9, x9, #1\n"
"fmla z18.h, z6.h, z2.h[2]\n"
"fmla z11.h, z7.h, z0.h[2]\n"
- "addvl x9, x9, #1\n"
+ "addvl x28, x28, #1\n"
"fmla z15.h, z7.h, z1.h[2]\n"
"fmla z19.h, z7.h, z2.h[2]\n"
"ble 40f\n"
- "ld1h { z6.h }, p5/Z, [x12]\n"
- "ld1h { z7.h }, p5/Z, [x11]\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
"fmla z8.h, z6.h, z0.h[3]\n"
"fmla z12.h, z6.h, z1.h[3]\n"
"fmla z16.h, z6.h, z2.h[3]\n"
"fmla z9.h, z7.h, z0.h[3]\n"
- "ld1h { z6.h }, p5/Z, [x10]\n"
- "subs x27, x27, #0x1\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "subs x26, x26, #0x1\n"
"fmla z13.h, z7.h, z1.h[3]\n"
"fmla z17.h, z7.h, z2.h[3]\n"
- "ld1h { z7.h }, p5/Z, [x9]\n"
- "addvl x12, x12, #1\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "addvl x11, x11, #1\n"
"fmla z10.h, z6.h, z0.h[3]\n"
"fmla z14.h, z6.h, z1.h[3]\n"
- "addvl x11, x11, #1\n"
"addvl x10, x10, #1\n"
+ "addvl x9, x9, #1\n"
"fmla z18.h, z6.h, z2.h[3]\n"
"fmla z11.h, z7.h, z0.h[3]\n"
- "addvl x9, x9, #1\n"
+ "addvl x28, x28, #1\n"
"fmla z15.h, z7.h, z1.h[3]\n"
"fmla z19.h, z7.h, z2.h[3]\n"
"ble 40f\n"
- "ld1h { z6.h }, p5/Z, [x12]\n"
- "ld1h { z7.h }, p5/Z, [x11]\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
"fmla z8.h, z6.h, z0.h[4]\n"
"fmla z12.h, z6.h, z1.h[4]\n"
"fmla z16.h, z6.h, z2.h[4]\n"
"fmla z9.h, z7.h, z0.h[4]\n"
- "ld1h { z6.h }, p5/Z, [x10]\n"
- "subs x27, x27, #0x1\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "subs x26, x26, #0x1\n"
"fmla z13.h, z7.h, z1.h[4]\n"
"fmla z17.h, z7.h, z2.h[4]\n"
- "ld1h { z7.h }, p5/Z, [x9]\n"
- "addvl x12, x12, #1\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "addvl x11, x11, #1\n"
"fmla z10.h, z6.h, z0.h[4]\n"
"fmla z14.h, z6.h, z1.h[4]\n"
- "addvl x11, x11, #1\n"
"addvl x10, x10, #1\n"
+ "addvl x9, x9, #1\n"
"fmla z18.h, z6.h, z2.h[4]\n"
"fmla z11.h, z7.h, z0.h[4]\n"
- "addvl x9, x9, #1\n"
+ "addvl x28, x28, #1\n"
"fmla z15.h, z7.h, z1.h[4]\n"
"fmla z19.h, z7.h, z2.h[4]\n"
"ble 40f\n"
- "ld1h { z6.h }, p5/Z, [x12]\n"
- "ld1h { z7.h }, p5/Z, [x11]\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
"fmla z8.h, z6.h, z0.h[5]\n"
"fmla z12.h, z6.h, z1.h[5]\n"
"fmla z16.h, z6.h, z2.h[5]\n"
"fmla z9.h, z7.h, z0.h[5]\n"
- "ld1h { z6.h }, p5/Z, [x10]\n"
- "subs x27, x27, #0x1\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "subs x26, x26, #0x1\n"
"fmla z13.h, z7.h, z1.h[5]\n"
"fmla z17.h, z7.h, z2.h[5]\n"
- "ld1h { z7.h }, p5/Z, [x9]\n"
- "addvl x12, x12, #1\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "addvl x11, x11, #1\n"
"fmla z10.h, z6.h, z0.h[5]\n"
"fmla z14.h, z6.h, z1.h[5]\n"
- "addvl x11, x11, #1\n"
"addvl x10, x10, #1\n"
+ "addvl x9, x9, #1\n"
"fmla z18.h, z6.h, z2.h[5]\n"
"fmla z11.h, z7.h, z0.h[5]\n"
- "addvl x9, x9, #1\n"
+ "addvl x28, x28, #1\n"
"fmla z15.h, z7.h, z1.h[5]\n"
"fmla z19.h, z7.h, z2.h[5]\n"
"ble 40f\n"
- "ld1h { z6.h }, p5/Z, [x12]\n"
- "ld1h { z7.h }, p5/Z, [x11]\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
"fmla z8.h, z6.h, z0.h[6]\n"
"fmla z12.h, z6.h, z1.h[6]\n"
"fmla z16.h, z6.h, z2.h[6]\n"
"fmla z9.h, z7.h, z0.h[6]\n"
- "ld1h { z6.h }, p5/Z, [x10]\n"
- "subs x27, x27, #0x1\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "subs x26, x26, #0x1\n"
"fmla z13.h, z7.h, z1.h[6]\n"
"fmla z17.h, z7.h, z2.h[6]\n"
- "ld1h { z7.h }, p5/Z, [x9]\n"
- "addvl x12, x12, #1\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "addvl x11, x11, #1\n"
"fmla z10.h, z6.h, z0.h[6]\n"
"fmla z14.h, z6.h, z1.h[6]\n"
- "addvl x11, x11, #1\n"
"addvl x10, x10, #1\n"
+ "addvl x9, x9, #1\n"
"fmla z18.h, z6.h, z2.h[6]\n"
"fmla z11.h, z7.h, z0.h[6]\n"
- "addvl x9, x9, #1\n"
+ "addvl x28, x28, #1\n"
"fmla z15.h, z7.h, z1.h[6]\n"
"fmla z19.h, z7.h, z2.h[6]\n"
"ble 40f\n"
- "ld1h { z6.h }, p5/Z, [x12]\n"
- "ld1h { z7.h }, p5/Z, [x11]\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
"fmla z8.h, z6.h, z0.h[7]\n"
"fmla z12.h, z6.h, z1.h[7]\n"
"fmla z16.h, z6.h, z2.h[7]\n"
"fmla z9.h, z7.h, z0.h[7]\n"
- "ld1h { z6.h }, p5/Z, [x10]\n"
- "addvl x12, x12, #1\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "addvl x11, x11, #1\n"
"fmla z13.h, z7.h, z1.h[7]\n"
"fmla z17.h, z7.h, z2.h[7]\n"
- "ld1h { z7.h }, p5/Z, [x9]\n"
- "addvl x11, x11, #1\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "addvl x10, x10, #1\n"
"fmla z10.h, z6.h, z0.h[7]\n"
"fmla z14.h, z6.h, z1.h[7]\n"
- "addvl x10, x10, #1\n"
"addvl x9, x9, #1\n"
+ "addvl x28, x28, #1\n"
"fmla z18.h, z6.h, z2.h[7]\n"
"fmla z11.h, z7.h, z0.h[7]\n"
"fmla z15.h, z7.h, z1.h[7]\n"
"fmla z19.h, z7.h, z2.h[7]\n"
"40:" // Height 3: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 35b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
"tbz %x[flags], #1, 41f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rh { z1.h }, p5/Z, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rh { z0.h }, p5/Z, [x20]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rh { z1.h }, p5/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rh { z0.h }, p5/Z, [x19]\n"
"fmin z8.h, p5/M, z8.h, z1.h\n"
"fmin z9.h, p5/M, z9.h, z1.h\n"
"fmin z10.h, p5/M, z10.h, z1.h\n"
@@ -1248,71 +1248,71 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"fmax z18.h, p5/M, z18.h, z0.h\n"
"fmax z19.h, p5/M, z19.h, z0.h\n"
"41:" // Height 3: No activation
- "st1h { z8.h }, p4, [x13]\n"
- "st1h { z9.h }, p3, [x13, #1, MUL VL]\n"
- "st1h { z10.h }, p2, [x13, #2, MUL VL]\n"
- "st1h { z11.h }, p1, [x13, #3, MUL VL]\n"
- "addvl x13, x13, #4\n"
- "st1h { z12.h }, p4, [x25]\n"
- "st1h { z13.h }, p3, [x25, #1, MUL VL]\n"
- "st1h { z14.h }, p2, [x25, #2, MUL VL]\n"
- "st1h { z15.h }, p1, [x25, #3, MUL VL]\n"
- "st1h { z16.h }, p4, [x24]\n"
- "st1h { z17.h }, p3, [x24, #1, MUL VL]\n"
- "st1h { z18.h }, p2, [x24, #2, MUL VL]\n"
- "st1h { z19.h }, p1, [x24, #3, MUL VL]\n"
+ "st1h { z8.h }, p4, [x12]\n"
+ "st1h { z9.h }, p3, [x12, #1, MUL VL]\n"
+ "st1h { z10.h }, p2, [x12, #2, MUL VL]\n"
+ "st1h { z11.h }, p1, [x12, #3, MUL VL]\n"
+ "addvl x12, x12, #4\n"
+ "st1h { z12.h }, p4, [x24]\n"
+ "st1h { z13.h }, p3, [x24, #1, MUL VL]\n"
+ "st1h { z14.h }, p2, [x24, #2, MUL VL]\n"
+ "st1h { z15.h }, p1, [x24, #3, MUL VL]\n"
+ "st1h { z16.h }, p4, [x23]\n"
+ "st1h { z17.h }, p3, [x23, #1, MUL VL]\n"
+ "st1h { z18.h }, p2, [x23, #2, MUL VL]\n"
+ "st1h { z19.h }, p1, [x23, #3, MUL VL]\n"
"42:" // Height 3: Writeback done
- "dech x14, ALL, MUL #4\n"
- "cmp x14, XZR\n"
+ "dech x13, ALL, MUL #4\n"
+ "cmp x13, XZR\n"
"bgt 30b\n"
"b 86f\n"
"43:" // Height 4
- "ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x15, %x[bias]\n"
- "ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x13, %x[output_ptr]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x14, %x[bias]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x12, %x[output_ptr]\n"
"44:" // Height 4: Column loop
- "ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
- "add x11, x12, x20, LSL #1\n"
- "cnth x21, ALL, MUL #3\n"
- "add x10, x11, x20, LSL #1\n"
- "add x9, x10, x20, LSL #1\n"
- "add x20, x9, x20, LSL #1\n"
- "cmp x14, x21\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #1\n"
+ "cnth x20, ALL, MUL #3\n"
+ "add x9, x10, x19, LSL #1\n"
+ "add x28, x9, x19, LSL #1\n"
+ "add x19, x28, x19, LSL #1\n"
+ "cmp x13, x20\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"bgt 45f\n"
- "dech x21\n"
- "cmp x14, x21\n"
- "mov x9, x12\n"
+ "dech x20\n"
+ "cmp x13, x20\n"
+ "mov x28, x11\n"
"bgt 45f\n"
- "dech x21\n"
- "cmp x14, x21\n"
- "mov x10, x12\n"
+ "dech x20\n"
+ "cmp x13, x20\n"
+ "mov x9, x11\n"
"bgt 45f\n"
- "mov x11, x12\n"
+ "mov x10, x11\n"
"45:" // Height 4: B setup done
- "mov x20, #0x0\n"
- "whilelt p4.h, x20, x14\n"
- "inch x20\n"
- "whilelt p3.h, x20, x14\n"
- "inch x20\n"
- "whilelt p2.h, x20, x14\n"
- "inch x20\n"
- "whilelt p1.h, x20, x14\n"
- "cbz x15, 46f\n"
- "ld1h { z8.h }, p5/Z, [x15]\n"
- "ld1h { z9.h }, p5/Z, [x15, #1, MUL VL]\n"
+ "mov x19, #0x0\n"
+ "whilelt p4.h, x19, x13\n"
+ "inch x19\n"
+ "whilelt p3.h, x19, x13\n"
+ "inch x19\n"
+ "whilelt p2.h, x19, x13\n"
+ "inch x19\n"
+ "whilelt p1.h, x19, x13\n"
+ "cbz x14, 46f\n"
+ "ld1h { z8.h }, p5/Z, [x14]\n"
+ "ld1h { z9.h }, p5/Z, [x14, #1, MUL VL]\n"
"mov z12.d, z8.d\n"
"mov z13.d, z9.d\n"
- "ld1h { z10.h }, p5/Z, [x15, #2, MUL VL]\n"
- "ld1h { z11.h }, p5/Z, [x15, #3, MUL VL]\n"
+ "ld1h { z10.h }, p5/Z, [x14, #2, MUL VL]\n"
+ "ld1h { z11.h }, p5/Z, [x14, #3, MUL VL]\n"
"mov z14.d, z10.d\n"
"mov z15.d, z11.d\n"
"mov z16.d, z8.d\n"
"mov z17.d, z9.d\n"
- "addvl x15, x15, #4\n"
+ "addvl x14, x14, #4\n"
"mov z18.d, z10.d\n"
"mov z19.d, z11.d\n"
"mov z20.d, z8.d\n"
@@ -1322,26 +1322,26 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"b 48f\n"
"46:" // Height 4: no bias
"tbz %x[flags], #0, 47f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
- "ld1h { z8.h }, p4/Z, [x13]\n"
- "add x23, x24, x20, LSL #1\n"
- "ld1h { z9.h }, p3/Z, [x13, #1, MUL VL]\n"
- "ld1h { z10.h }, p2/Z, [x13, #2, MUL VL]\n"
- "ld1h { z11.h }, p1/Z, [x13, #3, MUL VL]\n"
- "ld1h { z12.h }, p4/Z, [x25]\n"
- "ld1h { z13.h }, p3/Z, [x25, #1, MUL VL]\n"
- "ld1h { z14.h }, p2/Z, [x25, #2, MUL VL]\n"
- "ld1h { z15.h }, p1/Z, [x25, #3, MUL VL]\n"
- "ld1h { z16.h }, p4/Z, [x24]\n"
- "ld1h { z17.h }, p3/Z, [x24, #1, MUL VL]\n"
- "ld1h { z18.h }, p2/Z, [x24, #2, MUL VL]\n"
- "ld1h { z19.h }, p1/Z, [x24, #3, MUL VL]\n"
- "ld1h { z20.h }, p4/Z, [x23]\n"
- "ld1h { z21.h }, p3/Z, [x23, #1, MUL VL]\n"
- "ld1h { z22.h }, p2/Z, [x23, #2, MUL VL]\n"
- "ld1h { z23.h }, p1/Z, [x23, #3, MUL VL]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "ld1h { z8.h }, p4/Z, [x12]\n"
+ "add x22, x23, x19, LSL #1\n"
+ "ld1h { z9.h }, p3/Z, [x12, #1, MUL VL]\n"
+ "ld1h { z10.h }, p2/Z, [x12, #2, MUL VL]\n"
+ "ld1h { z11.h }, p1/Z, [x12, #3, MUL VL]\n"
+ "ld1h { z12.h }, p4/Z, [x24]\n"
+ "ld1h { z13.h }, p3/Z, [x24, #1, MUL VL]\n"
+ "ld1h { z14.h }, p2/Z, [x24, #2, MUL VL]\n"
+ "ld1h { z15.h }, p1/Z, [x24, #3, MUL VL]\n"
+ "ld1h { z16.h }, p4/Z, [x23]\n"
+ "ld1h { z17.h }, p3/Z, [x23, #1, MUL VL]\n"
+ "ld1h { z18.h }, p2/Z, [x23, #2, MUL VL]\n"
+ "ld1h { z19.h }, p1/Z, [x23, #3, MUL VL]\n"
+ "ld1h { z20.h }, p4/Z, [x22]\n"
+ "ld1h { z21.h }, p3/Z, [x22, #1, MUL VL]\n"
+ "ld1h { z22.h }, p2/Z, [x22, #2, MUL VL]\n"
+ "ld1h { z23.h }, p1/Z, [x22, #3, MUL VL]\n"
"b 48f\n"
"47:" // Height 4: no accumulate
"mov z8.b, #0x0\n"
@@ -1361,201 +1361,201 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"mov z22.b, #0x0\n"
"mov z23.b, #0x0\n"
"48:" // Height 4: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"49:" // Height 4: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 50f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "cbnz x28, 51f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #1\n"
- "add x25, x25, x20, LSL #1\n"
- "add x24, x24, x20, LSL #1\n"
- "add x23, x23, x20, LSL #1\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "cbnz x27, 51f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
+ "add x24, x24, x19, LSL #1\n"
+ "add x23, x23, x19, LSL #1\n"
+ "add x22, x22, x19, LSL #1\n"
"b 51f\n"
"50:" // Height 4: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
- "add x23, x24, x20, LSL #1\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "add x22, x23, x19, LSL #1\n"
"51:" // Height 4: input setup done
- "cmp x27, #0x8\n"
+ "cmp x26, #0x8\n"
"ble 53f\n"
"52:" // Height 4: Multiply loop: Main loop head
- "whilelt p0.h, XZR, x27\n"
- "ld1rqh { z0.h }, p0/Z, [x26]\n"
- "ld1rqh { z1.h }, p0/Z, [x25]\n"
- "sub x27, x27, #0x8\n"
- "ld1rqh { z2.h }, p0/Z, [x24]\n"
- "ld1rqh { z3.h }, p0/Z, [x23]\n"
- "cmp x27, #0x8\n"
- "add x26, x26, #0x10\n"
- "ld1h { z6.h }, p5/Z, [x12]\n"
- "ld1h { z7.h }, p5/Z, [x11]\n"
+ "whilelt p0.h, XZR, x26\n"
+ "ld1rqh { z0.h }, p0/Z, [x25]\n"
+ "ld1rqh { z1.h }, p0/Z, [x24]\n"
+ "sub x26, x26, #0x8\n"
+ "ld1rqh { z2.h }, p0/Z, [x23]\n"
+ "ld1rqh { z3.h }, p0/Z, [x22]\n"
+ "cmp x26, #0x8\n"
+ "add x25, x25, #0x10\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
"fmla z8.h, z6.h, z0.h[0]\n"
"fmla z12.h, z6.h, z1.h[0]\n"
"fmla z16.h, z6.h, z2.h[0]\n"
"fmla z20.h, z6.h, z3.h[0]\n"
- "ld1h { z6.h }, p5/Z, [x10]\n"
- "add x25, x25, #0x10\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "add x24, x24, #0x10\n"
"fmla z9.h, z7.h, z0.h[0]\n"
"fmla z13.h, z7.h, z1.h[0]\n"
- "add x24, x24, #0x10\n"
"add x23, x23, #0x10\n"
+ "add x22, x22, #0x10\n"
"fmla z17.h, z7.h, z2.h[0]\n"
"fmla z21.h, z7.h, z3.h[0]\n"
- "ld1h { z7.h }, p5/Z, [x9]\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
"fmla z10.h, z6.h, z0.h[0]\n"
"fmla z14.h, z6.h, z1.h[0]\n"
"fmla z18.h, z6.h, z2.h[0]\n"
"fmla z22.h, z6.h, z3.h[0]\n"
- "ld1h { z6.h }, p5/Z, [x12, #1, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #1, MUL VL]\n"
"fmla z11.h, z7.h, z0.h[0]\n"
"fmla z15.h, z7.h, z1.h[0]\n"
"fmla z19.h, z7.h, z2.h[0]\n"
"fmla z23.h, z7.h, z3.h[0]\n"
- "ld1h { z7.h }, p5/Z, [x11, #1, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
"fmla z8.h, z6.h, z0.h[1]\n"
"fmla z12.h, z6.h, z1.h[1]\n"
"fmla z16.h, z6.h, z2.h[1]\n"
"fmla z20.h, z6.h, z3.h[1]\n"
- "ld1h { z6.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #1, MUL VL]\n"
"fmla z9.h, z7.h, z0.h[1]\n"
"fmla z13.h, z7.h, z1.h[1]\n"
"fmla z17.h, z7.h, z2.h[1]\n"
"fmla z21.h, z7.h, z3.h[1]\n"
- "ld1h { z7.h }, p5/Z, [x9, #1, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x28, #1, MUL VL]\n"
"fmla z10.h, z6.h, z0.h[1]\n"
"fmla z14.h, z6.h, z1.h[1]\n"
"fmla z18.h, z6.h, z2.h[1]\n"
"fmla z22.h, z6.h, z3.h[1]\n"
- "ld1h { z6.h }, p5/Z, [x12, #2, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #2, MUL VL]\n"
"fmla z11.h, z7.h, z0.h[1]\n"
"fmla z15.h, z7.h, z1.h[1]\n"
"fmla z19.h, z7.h, z2.h[1]\n"
"fmla z23.h, z7.h, z3.h[1]\n"
- "ld1h { z7.h }, p5/Z, [x11, #2, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #2, MUL VL]\n"
"fmla z8.h, z6.h, z0.h[2]\n"
"fmla z12.h, z6.h, z1.h[2]\n"
"fmla z16.h, z6.h, z2.h[2]\n"
"fmla z20.h, z6.h, z3.h[2]\n"
- "ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #2, MUL VL]\n"
"fmla z9.h, z7.h, z0.h[2]\n"
"fmla z13.h, z7.h, z1.h[2]\n"
"fmla z17.h, z7.h, z2.h[2]\n"
"fmla z21.h, z7.h, z3.h[2]\n"
- "ld1h { z7.h }, p5/Z, [x9, #2, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x28, #2, MUL VL]\n"
"fmla z10.h, z6.h, z0.h[2]\n"
"fmla z14.h, z6.h, z1.h[2]\n"
"fmla z18.h, z6.h, z2.h[2]\n"
"fmla z22.h, z6.h, z3.h[2]\n"
- "ld1h { z6.h }, p5/Z, [x12, #3, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #3, MUL VL]\n"
"fmla z11.h, z7.h, z0.h[2]\n"
"fmla z15.h, z7.h, z1.h[2]\n"
"fmla z19.h, z7.h, z2.h[2]\n"
"fmla z23.h, z7.h, z3.h[2]\n"
- "ld1h { z7.h }, p5/Z, [x11, #3, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #3, MUL VL]\n"
"fmla z8.h, z6.h, z0.h[3]\n"
"fmla z12.h, z6.h, z1.h[3]\n"
"fmla z16.h, z6.h, z2.h[3]\n"
"fmla z20.h, z6.h, z3.h[3]\n"
- "ld1h { z6.h }, p5/Z, [x10, #3, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #3, MUL VL]\n"
"fmla z9.h, z7.h, z0.h[3]\n"
"fmla z13.h, z7.h, z1.h[3]\n"
"fmla z17.h, z7.h, z2.h[3]\n"
"fmla z21.h, z7.h, z3.h[3]\n"
- "ld1h { z7.h }, p5/Z, [x9, #3, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x28, #3, MUL VL]\n"
"fmla z10.h, z6.h, z0.h[3]\n"
"fmla z14.h, z6.h, z1.h[3]\n"
"fmla z18.h, z6.h, z2.h[3]\n"
"fmla z22.h, z6.h, z3.h[3]\n"
- "ld1h { z6.h }, p5/Z, [x12, #4, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #4, MUL VL]\n"
"fmla z11.h, z7.h, z0.h[3]\n"
"fmla z15.h, z7.h, z1.h[3]\n"
"fmla z19.h, z7.h, z2.h[3]\n"
"fmla z23.h, z7.h, z3.h[3]\n"
- "ld1h { z7.h }, p5/Z, [x11, #4, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #4, MUL VL]\n"
"fmla z8.h, z6.h, z0.h[4]\n"
"fmla z12.h, z6.h, z1.h[4]\n"
"fmla z16.h, z6.h, z2.h[4]\n"
"fmla z20.h, z6.h, z3.h[4]\n"
- "ld1h { z6.h }, p5/Z, [x10, #4, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #4, MUL VL]\n"
"fmla z9.h, z7.h, z0.h[4]\n"
"fmla z13.h, z7.h, z1.h[4]\n"
"fmla z17.h, z7.h, z2.h[4]\n"
"fmla z21.h, z7.h, z3.h[4]\n"
- "ld1h { z7.h }, p5/Z, [x9, #4, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x28, #4, MUL VL]\n"
"fmla z10.h, z6.h, z0.h[4]\n"
"fmla z14.h, z6.h, z1.h[4]\n"
"fmla z18.h, z6.h, z2.h[4]\n"
"fmla z22.h, z6.h, z3.h[4]\n"
- "ld1h { z6.h }, p5/Z, [x12, #5, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #5, MUL VL]\n"
"fmla z11.h, z7.h, z0.h[4]\n"
"fmla z15.h, z7.h, z1.h[4]\n"
"fmla z19.h, z7.h, z2.h[4]\n"
"fmla z23.h, z7.h, z3.h[4]\n"
- "ld1h { z7.h }, p5/Z, [x11, #5, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #5, MUL VL]\n"
"fmla z8.h, z6.h, z0.h[5]\n"
"fmla z12.h, z6.h, z1.h[5]\n"
"fmla z16.h, z6.h, z2.h[5]\n"
"fmla z20.h, z6.h, z3.h[5]\n"
- "ld1h { z6.h }, p5/Z, [x10, #5, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #5, MUL VL]\n"
"fmla z9.h, z7.h, z0.h[5]\n"
"fmla z13.h, z7.h, z1.h[5]\n"
"fmla z17.h, z7.h, z2.h[5]\n"
"fmla z21.h, z7.h, z3.h[5]\n"
- "ld1h { z7.h }, p5/Z, [x9, #5, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x28, #5, MUL VL]\n"
"fmla z10.h, z6.h, z0.h[5]\n"
"fmla z14.h, z6.h, z1.h[5]\n"
"fmla z18.h, z6.h, z2.h[5]\n"
"fmla z22.h, z6.h, z3.h[5]\n"
- "ld1h { z6.h }, p5/Z, [x12, #6, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #6, MUL VL]\n"
"fmla z11.h, z7.h, z0.h[5]\n"
"fmla z15.h, z7.h, z1.h[5]\n"
"fmla z19.h, z7.h, z2.h[5]\n"
"fmla z23.h, z7.h, z3.h[5]\n"
- "ld1h { z7.h }, p5/Z, [x11, #6, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #6, MUL VL]\n"
"fmla z8.h, z6.h, z0.h[6]\n"
"fmla z12.h, z6.h, z1.h[6]\n"
"fmla z16.h, z6.h, z2.h[6]\n"
"fmla z20.h, z6.h, z3.h[6]\n"
- "ld1h { z6.h }, p5/Z, [x10, #6, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #6, MUL VL]\n"
"fmla z9.h, z7.h, z0.h[6]\n"
"fmla z13.h, z7.h, z1.h[6]\n"
"fmla z17.h, z7.h, z2.h[6]\n"
"fmla z21.h, z7.h, z3.h[6]\n"
- "ld1h { z7.h }, p5/Z, [x9, #6, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x28, #6, MUL VL]\n"
"fmla z10.h, z6.h, z0.h[6]\n"
"fmla z14.h, z6.h, z1.h[6]\n"
"fmla z18.h, z6.h, z2.h[6]\n"
"fmla z22.h, z6.h, z3.h[6]\n"
- "ld1h { z6.h }, p5/Z, [x12, #7, MUL VL]\n"
- "addvl x12, x12, #8\n"
+ "ld1h { z6.h }, p5/Z, [x11, #7, MUL VL]\n"
+ "addvl x11, x11, #8\n"
"fmla z11.h, z7.h, z0.h[6]\n"
"fmla z15.h, z7.h, z1.h[6]\n"
"fmla z19.h, z7.h, z2.h[6]\n"
"fmla z23.h, z7.h, z3.h[6]\n"
- "ld1h { z7.h }, p5/Z, [x11, #7, MUL VL]\n"
- "addvl x11, x11, #8\n"
+ "ld1h { z7.h }, p5/Z, [x10, #7, MUL VL]\n"
+ "addvl x10, x10, #8\n"
"fmla z8.h, z6.h, z0.h[7]\n"
"fmla z12.h, z6.h, z1.h[7]\n"
"fmla z16.h, z6.h, z2.h[7]\n"
"fmla z20.h, z6.h, z3.h[7]\n"
- "ld1h { z6.h }, p5/Z, [x10, #7, MUL VL]\n"
- "addvl x10, x10, #8\n"
+ "ld1h { z6.h }, p5/Z, [x9, #7, MUL VL]\n"
+ "addvl x9, x9, #8\n"
"fmla z9.h, z7.h, z0.h[7]\n"
"fmla z13.h, z7.h, z1.h[7]\n"
"fmla z17.h, z7.h, z2.h[7]\n"
"fmla z21.h, z7.h, z3.h[7]\n"
- "ld1h { z7.h }, p5/Z, [x9, #7, MUL VL]\n"
- "addvl x9, x9, #8\n"
+ "ld1h { z7.h }, p5/Z, [x28, #7, MUL VL]\n"
+ "addvl x28, x28, #8\n"
"fmla z10.h, z6.h, z0.h[7]\n"
"fmla z14.h, z6.h, z1.h[7]\n"
"fmla z18.h, z6.h, z2.h[7]\n"
@@ -1566,28 +1566,28 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"fmla z23.h, z7.h, z3.h[7]\n"
"bgt 52b\n"
"53:" // Height 4: Multiply loop: Single iteration only
- "whilelt p0.h, XZR, x27\n"
- "ld1rqh { z0.h }, p0/Z, [x26]\n"
- "ld1rqh { z1.h }, p0/Z, [x25]\n"
- "subs x27, x27, #0x1\n"
- "ld1rqh { z2.h }, p0/Z, [x24]\n"
- "ld1rqh { z3.h }, p0/Z, [x23]\n"
- "ld1h { z6.h }, p5/Z, [x12]\n"
- "ld1h { z7.h }, p5/Z, [x11]\n"
+ "whilelt p0.h, XZR, x26\n"
+ "ld1rqh { z0.h }, p0/Z, [x25]\n"
+ "ld1rqh { z1.h }, p0/Z, [x24]\n"
+ "subs x26, x26, #0x1\n"
+ "ld1rqh { z2.h }, p0/Z, [x23]\n"
+ "ld1rqh { z3.h }, p0/Z, [x22]\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
"fmla z8.h, z6.h, z0.h[0]\n"
"fmla z12.h, z6.h, z1.h[0]\n"
"fmla z16.h, z6.h, z2.h[0]\n"
"fmla z20.h, z6.h, z3.h[0]\n"
- "ld1h { z6.h }, p5/Z, [x10]\n"
- "addvl x12, x12, #1\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "addvl x11, x11, #1\n"
"fmla z9.h, z7.h, z0.h[0]\n"
"fmla z13.h, z7.h, z1.h[0]\n"
- "addvl x11, x11, #1\n"
"addvl x10, x10, #1\n"
+ "addvl x9, x9, #1\n"
"fmla z17.h, z7.h, z2.h[0]\n"
"fmla z21.h, z7.h, z3.h[0]\n"
- "ld1h { z7.h }, p5/Z, [x9]\n"
- "addvl x9, x9, #1\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "addvl x28, x28, #1\n"
"fmla z10.h, z6.h, z0.h[0]\n"
"fmla z14.h, z6.h, z1.h[0]\n"
"fmla z18.h, z6.h, z2.h[0]\n"
@@ -1597,25 +1597,25 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"fmla z19.h, z7.h, z2.h[0]\n"
"fmla z23.h, z7.h, z3.h[0]\n"
"ble 54f\n"
- "ld1h { z6.h }, p5/Z, [x12]\n"
- "ld1h { z7.h }, p5/Z, [x11]\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
"fmla z8.h, z6.h, z0.h[1]\n"
"fmla z12.h, z6.h, z1.h[1]\n"
"fmla z16.h, z6.h, z2.h[1]\n"
"fmla z20.h, z6.h, z3.h[1]\n"
- "ld1h { z6.h }, p5/Z, [x10]\n"
- "subs x27, x27, #0x1\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "subs x26, x26, #0x1\n"
"fmla z9.h, z7.h, z0.h[1]\n"
"fmla z13.h, z7.h, z1.h[1]\n"
- "addvl x12, x12, #1\n"
"addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
"fmla z17.h, z7.h, z2.h[1]\n"
"fmla z21.h, z7.h, z3.h[1]\n"
- "ld1h { z7.h }, p5/Z, [x9]\n"
- "addvl x10, x10, #1\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "addvl x9, x9, #1\n"
"fmla z10.h, z6.h, z0.h[1]\n"
"fmla z14.h, z6.h, z1.h[1]\n"
- "addvl x9, x9, #1\n"
+ "addvl x28, x28, #1\n"
"fmla z18.h, z6.h, z2.h[1]\n"
"fmla z22.h, z6.h, z3.h[1]\n"
"fmla z11.h, z7.h, z0.h[1]\n"
@@ -1623,25 +1623,25 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"fmla z19.h, z7.h, z2.h[1]\n"
"fmla z23.h, z7.h, z3.h[1]\n"
"ble 54f\n"
- "ld1h { z6.h }, p5/Z, [x12]\n"
- "ld1h { z7.h }, p5/Z, [x11]\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
"fmla z8.h, z6.h, z0.h[2]\n"
"fmla z12.h, z6.h, z1.h[2]\n"
"fmla z16.h, z6.h, z2.h[2]\n"
"fmla z20.h, z6.h, z3.h[2]\n"
- "ld1h { z6.h }, p5/Z, [x10]\n"
- "subs x27, x27, #0x1\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "subs x26, x26, #0x1\n"
"fmla z9.h, z7.h, z0.h[2]\n"
"fmla z13.h, z7.h, z1.h[2]\n"
- "addvl x12, x12, #1\n"
"addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
"fmla z17.h, z7.h, z2.h[2]\n"
"fmla z21.h, z7.h, z3.h[2]\n"
- "ld1h { z7.h }, p5/Z, [x9]\n"
- "addvl x10, x10, #1\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "addvl x9, x9, #1\n"
"fmla z10.h, z6.h, z0.h[2]\n"
"fmla z14.h, z6.h, z1.h[2]\n"
- "addvl x9, x9, #1\n"
+ "addvl x28, x28, #1\n"
"fmla z18.h, z6.h, z2.h[2]\n"
"fmla z22.h, z6.h, z3.h[2]\n"
"fmla z11.h, z7.h, z0.h[2]\n"
@@ -1649,25 +1649,25 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"fmla z19.h, z7.h, z2.h[2]\n"
"fmla z23.h, z7.h, z3.h[2]\n"
"ble 54f\n"
- "ld1h { z6.h }, p5/Z, [x12]\n"
- "ld1h { z7.h }, p5/Z, [x11]\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
"fmla z8.h, z6.h, z0.h[3]\n"
"fmla z12.h, z6.h, z1.h[3]\n"
"fmla z16.h, z6.h, z2.h[3]\n"
"fmla z20.h, z6.h, z3.h[3]\n"
- "ld1h { z6.h }, p5/Z, [x10]\n"
- "subs x27, x27, #0x1\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "subs x26, x26, #0x1\n"
"fmla z9.h, z7.h, z0.h[3]\n"
"fmla z13.h, z7.h, z1.h[3]\n"
- "addvl x12, x12, #1\n"
"addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
"fmla z17.h, z7.h, z2.h[3]\n"
"fmla z21.h, z7.h, z3.h[3]\n"
- "ld1h { z7.h }, p5/Z, [x9]\n"
- "addvl x10, x10, #1\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "addvl x9, x9, #1\n"
"fmla z10.h, z6.h, z0.h[3]\n"
"fmla z14.h, z6.h, z1.h[3]\n"
- "addvl x9, x9, #1\n"
+ "addvl x28, x28, #1\n"
"fmla z18.h, z6.h, z2.h[3]\n"
"fmla z22.h, z6.h, z3.h[3]\n"
"fmla z11.h, z7.h, z0.h[3]\n"
@@ -1675,25 +1675,25 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"fmla z19.h, z7.h, z2.h[3]\n"
"fmla z23.h, z7.h, z3.h[3]\n"
"ble 54f\n"
- "ld1h { z6.h }, p5/Z, [x12]\n"
- "ld1h { z7.h }, p5/Z, [x11]\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
"fmla z8.h, z6.h, z0.h[4]\n"
"fmla z12.h, z6.h, z1.h[4]\n"
"fmla z16.h, z6.h, z2.h[4]\n"
"fmla z20.h, z6.h, z3.h[4]\n"
- "ld1h { z6.h }, p5/Z, [x10]\n"
- "subs x27, x27, #0x1\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "subs x26, x26, #0x1\n"
"fmla z9.h, z7.h, z0.h[4]\n"
"fmla z13.h, z7.h, z1.h[4]\n"
- "addvl x12, x12, #1\n"
"addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
"fmla z17.h, z7.h, z2.h[4]\n"
"fmla z21.h, z7.h, z3.h[4]\n"
- "ld1h { z7.h }, p5/Z, [x9]\n"
- "addvl x10, x10, #1\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "addvl x9, x9, #1\n"
"fmla z10.h, z6.h, z0.h[4]\n"
"fmla z14.h, z6.h, z1.h[4]\n"
- "addvl x9, x9, #1\n"
+ "addvl x28, x28, #1\n"
"fmla z18.h, z6.h, z2.h[4]\n"
"fmla z22.h, z6.h, z3.h[4]\n"
"fmla z11.h, z7.h, z0.h[4]\n"
@@ -1701,25 +1701,25 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"fmla z19.h, z7.h, z2.h[4]\n"
"fmla z23.h, z7.h, z3.h[4]\n"
"ble 54f\n"
- "ld1h { z6.h }, p5/Z, [x12]\n"
- "ld1h { z7.h }, p5/Z, [x11]\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
"fmla z8.h, z6.h, z0.h[5]\n"
"fmla z12.h, z6.h, z1.h[5]\n"
"fmla z16.h, z6.h, z2.h[5]\n"
"fmla z20.h, z6.h, z3.h[5]\n"
- "ld1h { z6.h }, p5/Z, [x10]\n"
- "subs x27, x27, #0x1\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "subs x26, x26, #0x1\n"
"fmla z9.h, z7.h, z0.h[5]\n"
"fmla z13.h, z7.h, z1.h[5]\n"
- "addvl x12, x12, #1\n"
"addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
"fmla z17.h, z7.h, z2.h[5]\n"
"fmla z21.h, z7.h, z3.h[5]\n"
- "ld1h { z7.h }, p5/Z, [x9]\n"
- "addvl x10, x10, #1\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "addvl x9, x9, #1\n"
"fmla z10.h, z6.h, z0.h[5]\n"
"fmla z14.h, z6.h, z1.h[5]\n"
- "addvl x9, x9, #1\n"
+ "addvl x28, x28, #1\n"
"fmla z18.h, z6.h, z2.h[5]\n"
"fmla z22.h, z6.h, z3.h[5]\n"
"fmla z11.h, z7.h, z0.h[5]\n"
@@ -1727,25 +1727,25 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"fmla z19.h, z7.h, z2.h[5]\n"
"fmla z23.h, z7.h, z3.h[5]\n"
"ble 54f\n"
- "ld1h { z6.h }, p5/Z, [x12]\n"
- "ld1h { z7.h }, p5/Z, [x11]\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
"fmla z8.h, z6.h, z0.h[6]\n"
"fmla z12.h, z6.h, z1.h[6]\n"
"fmla z16.h, z6.h, z2.h[6]\n"
"fmla z20.h, z6.h, z3.h[6]\n"
- "ld1h { z6.h }, p5/Z, [x10]\n"
- "subs x27, x27, #0x1\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "subs x26, x26, #0x1\n"
"fmla z9.h, z7.h, z0.h[6]\n"
"fmla z13.h, z7.h, z1.h[6]\n"
- "addvl x12, x12, #1\n"
"addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
"fmla z17.h, z7.h, z2.h[6]\n"
"fmla z21.h, z7.h, z3.h[6]\n"
- "ld1h { z7.h }, p5/Z, [x9]\n"
- "addvl x10, x10, #1\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "addvl x9, x9, #1\n"
"fmla z10.h, z6.h, z0.h[6]\n"
"fmla z14.h, z6.h, z1.h[6]\n"
- "addvl x9, x9, #1\n"
+ "addvl x28, x28, #1\n"
"fmla z18.h, z6.h, z2.h[6]\n"
"fmla z22.h, z6.h, z3.h[6]\n"
"fmla z11.h, z7.h, z0.h[6]\n"
@@ -1753,22 +1753,22 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"fmla z19.h, z7.h, z2.h[6]\n"
"fmla z23.h, z7.h, z3.h[6]\n"
"ble 54f\n"
- "ld1h { z6.h }, p5/Z, [x12]\n"
- "ld1h { z7.h }, p5/Z, [x11]\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
"fmla z8.h, z6.h, z0.h[7]\n"
"fmla z12.h, z6.h, z1.h[7]\n"
"fmla z16.h, z6.h, z2.h[7]\n"
"fmla z20.h, z6.h, z3.h[7]\n"
- "ld1h { z6.h }, p5/Z, [x10]\n"
- "addvl x12, x12, #1\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "addvl x11, x11, #1\n"
"fmla z9.h, z7.h, z0.h[7]\n"
"fmla z13.h, z7.h, z1.h[7]\n"
- "addvl x11, x11, #1\n"
"addvl x10, x10, #1\n"
+ "addvl x9, x9, #1\n"
"fmla z17.h, z7.h, z2.h[7]\n"
"fmla z21.h, z7.h, z3.h[7]\n"
- "ld1h { z7.h }, p5/Z, [x9]\n"
- "addvl x9, x9, #1\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "addvl x28, x28, #1\n"
"fmla z10.h, z6.h, z0.h[7]\n"
"fmla z14.h, z6.h, z1.h[7]\n"
"fmla z18.h, z6.h, z2.h[7]\n"
@@ -1778,19 +1778,19 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"fmla z19.h, z7.h, z2.h[7]\n"
"fmla z23.h, z7.h, z3.h[7]\n"
"54:" // Height 4: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 49b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
- "add x23, x24, x20, LSL #1\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "add x22, x23, x19, LSL #1\n"
"tbz %x[flags], #1, 55f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rh { z1.h }, p5/Z, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rh { z0.h }, p5/Z, [x20]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rh { z1.h }, p5/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rh { z0.h }, p5/Z, [x19]\n"
"fmin z8.h, p5/M, z8.h, z1.h\n"
"fmin z9.h, p5/M, z9.h, z1.h\n"
"fmin z10.h, p5/M, z10.h, z1.h\n"
@@ -1824,75 +1824,75 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"fmax z22.h, p5/M, z22.h, z0.h\n"
"fmax z23.h, p5/M, z23.h, z0.h\n"
"55:" // Height 4: No activation
- "st1h { z8.h }, p4, [x13]\n"
- "st1h { z9.h }, p3, [x13, #1, MUL VL]\n"
- "st1h { z10.h }, p2, [x13, #2, MUL VL]\n"
- "st1h { z11.h }, p1, [x13, #3, MUL VL]\n"
- "addvl x13, x13, #4\n"
- "st1h { z12.h }, p4, [x25]\n"
- "st1h { z13.h }, p3, [x25, #1, MUL VL]\n"
- "st1h { z14.h }, p2, [x25, #2, MUL VL]\n"
- "st1h { z15.h }, p1, [x25, #3, MUL VL]\n"
- "st1h { z16.h }, p4, [x24]\n"
- "st1h { z17.h }, p3, [x24, #1, MUL VL]\n"
- "st1h { z18.h }, p2, [x24, #2, MUL VL]\n"
- "st1h { z19.h }, p1, [x24, #3, MUL VL]\n"
- "st1h { z20.h }, p4, [x23]\n"
- "st1h { z21.h }, p3, [x23, #1, MUL VL]\n"
- "st1h { z22.h }, p2, [x23, #2, MUL VL]\n"
- "st1h { z23.h }, p1, [x23, #3, MUL VL]\n"
+ "st1h { z8.h }, p4, [x12]\n"
+ "st1h { z9.h }, p3, [x12, #1, MUL VL]\n"
+ "st1h { z10.h }, p2, [x12, #2, MUL VL]\n"
+ "st1h { z11.h }, p1, [x12, #3, MUL VL]\n"
+ "addvl x12, x12, #4\n"
+ "st1h { z12.h }, p4, [x24]\n"
+ "st1h { z13.h }, p3, [x24, #1, MUL VL]\n"
+ "st1h { z14.h }, p2, [x24, #2, MUL VL]\n"
+ "st1h { z15.h }, p1, [x24, #3, MUL VL]\n"
+ "st1h { z16.h }, p4, [x23]\n"
+ "st1h { z17.h }, p3, [x23, #1, MUL VL]\n"
+ "st1h { z18.h }, p2, [x23, #2, MUL VL]\n"
+ "st1h { z19.h }, p1, [x23, #3, MUL VL]\n"
+ "st1h { z20.h }, p4, [x22]\n"
+ "st1h { z21.h }, p3, [x22, #1, MUL VL]\n"
+ "st1h { z22.h }, p2, [x22, #2, MUL VL]\n"
+ "st1h { z23.h }, p1, [x22, #3, MUL VL]\n"
"56:" // Height 4: Writeback done
- "dech x14, ALL, MUL #4\n"
- "cmp x14, XZR\n"
+ "dech x13, ALL, MUL #4\n"
+ "cmp x13, XZR\n"
"bgt 44b\n"
"b 86f\n"
"57:" // Height 5
- "ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x15, %x[bias]\n"
- "ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x13, %x[output_ptr]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x14, %x[bias]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x12, %x[output_ptr]\n"
"58:" // Height 5: Column loop
- "ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
- "add x11, x12, x20, LSL #1\n"
- "cnth x21, ALL, MUL #3\n"
- "add x10, x11, x20, LSL #1\n"
- "add x9, x10, x20, LSL #1\n"
- "add x20, x9, x20, LSL #1\n"
- "cmp x14, x21\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #1\n"
+ "cnth x20, ALL, MUL #3\n"
+ "add x9, x10, x19, LSL #1\n"
+ "add x28, x9, x19, LSL #1\n"
+ "add x19, x28, x19, LSL #1\n"
+ "cmp x13, x20\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"bgt 59f\n"
- "dech x21\n"
- "cmp x14, x21\n"
- "mov x9, x12\n"
+ "dech x20\n"
+ "cmp x13, x20\n"
+ "mov x28, x11\n"
"bgt 59f\n"
- "dech x21\n"
- "cmp x14, x21\n"
- "mov x10, x12\n"
+ "dech x20\n"
+ "cmp x13, x20\n"
+ "mov x9, x11\n"
"bgt 59f\n"
- "mov x11, x12\n"
+ "mov x10, x11\n"
"59:" // Height 5: B setup done
- "mov x20, #0x0\n"
- "whilelt p4.h, x20, x14\n"
- "inch x20\n"
- "whilelt p3.h, x20, x14\n"
- "inch x20\n"
- "whilelt p2.h, x20, x14\n"
- "inch x20\n"
- "whilelt p1.h, x20, x14\n"
- "cbz x15, 60f\n"
- "ld1h { z8.h }, p5/Z, [x15]\n"
- "ld1h { z9.h }, p5/Z, [x15, #1, MUL VL]\n"
+ "mov x19, #0x0\n"
+ "whilelt p4.h, x19, x13\n"
+ "inch x19\n"
+ "whilelt p3.h, x19, x13\n"
+ "inch x19\n"
+ "whilelt p2.h, x19, x13\n"
+ "inch x19\n"
+ "whilelt p1.h, x19, x13\n"
+ "cbz x14, 60f\n"
+ "ld1h { z8.h }, p5/Z, [x14]\n"
+ "ld1h { z9.h }, p5/Z, [x14, #1, MUL VL]\n"
"mov z12.d, z8.d\n"
"mov z13.d, z9.d\n"
- "ld1h { z10.h }, p5/Z, [x15, #2, MUL VL]\n"
- "ld1h { z11.h }, p5/Z, [x15, #3, MUL VL]\n"
+ "ld1h { z10.h }, p5/Z, [x14, #2, MUL VL]\n"
+ "ld1h { z11.h }, p5/Z, [x14, #3, MUL VL]\n"
"mov z14.d, z10.d\n"
"mov z15.d, z11.d\n"
"mov z16.d, z8.d\n"
"mov z17.d, z9.d\n"
- "addvl x15, x15, #4\n"
+ "addvl x14, x14, #4\n"
"mov z18.d, z10.d\n"
"mov z19.d, z11.d\n"
"mov z20.d, z8.d\n"
@@ -1906,31 +1906,31 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"b 62f\n"
"60:" // Height 5: no bias
"tbz %x[flags], #0, 61f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
- "ld1h { z8.h }, p4/Z, [x13]\n"
- "add x23, x24, x20, LSL #1\n"
- "add x22, x23, x20, LSL #1\n"
- "ld1h { z9.h }, p3/Z, [x13, #1, MUL VL]\n"
- "ld1h { z10.h }, p2/Z, [x13, #2, MUL VL]\n"
- "ld1h { z11.h }, p1/Z, [x13, #3, MUL VL]\n"
- "ld1h { z12.h }, p4/Z, [x25]\n"
- "ld1h { z13.h }, p3/Z, [x25, #1, MUL VL]\n"
- "ld1h { z14.h }, p2/Z, [x25, #2, MUL VL]\n"
- "ld1h { z15.h }, p1/Z, [x25, #3, MUL VL]\n"
- "ld1h { z16.h }, p4/Z, [x24]\n"
- "ld1h { z17.h }, p3/Z, [x24, #1, MUL VL]\n"
- "ld1h { z18.h }, p2/Z, [x24, #2, MUL VL]\n"
- "ld1h { z19.h }, p1/Z, [x24, #3, MUL VL]\n"
- "ld1h { z20.h }, p4/Z, [x23]\n"
- "ld1h { z21.h }, p3/Z, [x23, #1, MUL VL]\n"
- "ld1h { z22.h }, p2/Z, [x23, #2, MUL VL]\n"
- "ld1h { z23.h }, p1/Z, [x23, #3, MUL VL]\n"
- "ld1h { z24.h }, p4/Z, [x22]\n"
- "ld1h { z25.h }, p3/Z, [x22, #1, MUL VL]\n"
- "ld1h { z26.h }, p2/Z, [x22, #2, MUL VL]\n"
- "ld1h { z27.h }, p1/Z, [x22, #3, MUL VL]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "ld1h { z8.h }, p4/Z, [x12]\n"
+ "add x22, x23, x19, LSL #1\n"
+ "add x21, x22, x19, LSL #1\n"
+ "ld1h { z9.h }, p3/Z, [x12, #1, MUL VL]\n"
+ "ld1h { z10.h }, p2/Z, [x12, #2, MUL VL]\n"
+ "ld1h { z11.h }, p1/Z, [x12, #3, MUL VL]\n"
+ "ld1h { z12.h }, p4/Z, [x24]\n"
+ "ld1h { z13.h }, p3/Z, [x24, #1, MUL VL]\n"
+ "ld1h { z14.h }, p2/Z, [x24, #2, MUL VL]\n"
+ "ld1h { z15.h }, p1/Z, [x24, #3, MUL VL]\n"
+ "ld1h { z16.h }, p4/Z, [x23]\n"
+ "ld1h { z17.h }, p3/Z, [x23, #1, MUL VL]\n"
+ "ld1h { z18.h }, p2/Z, [x23, #2, MUL VL]\n"
+ "ld1h { z19.h }, p1/Z, [x23, #3, MUL VL]\n"
+ "ld1h { z20.h }, p4/Z, [x22]\n"
+ "ld1h { z21.h }, p3/Z, [x22, #1, MUL VL]\n"
+ "ld1h { z22.h }, p2/Z, [x22, #2, MUL VL]\n"
+ "ld1h { z23.h }, p1/Z, [x22, #3, MUL VL]\n"
+ "ld1h { z24.h }, p4/Z, [x21]\n"
+ "ld1h { z25.h }, p3/Z, [x21, #1, MUL VL]\n"
+ "ld1h { z26.h }, p2/Z, [x21, #2, MUL VL]\n"
+ "ld1h { z27.h }, p1/Z, [x21, #3, MUL VL]\n"
"b 62f\n"
"61:" // Height 5: no accumulate
"mov z8.b, #0x0\n"
@@ -1954,236 +1954,236 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"mov z26.b, #0x0\n"
"mov z27.b, #0x0\n"
"62:" // Height 5: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"63:" // Height 5: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 64f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "ldr x22, [x21, #0x20]\n"
- "cbnz x28, 65f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #1\n"
- "add x25, x25, x20, LSL #1\n"
- "add x24, x24, x20, LSL #1\n"
- "add x23, x23, x20, LSL #1\n"
- "add x22, x22, x20, LSL #1\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "ldr x21, [x20, #0x20]\n"
+ "cbnz x27, 65f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
+ "add x24, x24, x19, LSL #1\n"
+ "add x23, x23, x19, LSL #1\n"
+ "add x22, x22, x19, LSL #1\n"
+ "add x21, x21, x19, LSL #1\n"
"b 65f\n"
"64:" // Height 5: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
- "add x23, x24, x20, LSL #1\n"
- "add x22, x23, x20, LSL #1\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "add x22, x23, x19, LSL #1\n"
+ "add x21, x22, x19, LSL #1\n"
"65:" // Height 5: input setup done
- "cmp x27, #0x8\n"
+ "cmp x26, #0x8\n"
"ble 67f\n"
"66:" // Height 5: Multiply loop: Main loop head
- "whilelt p0.h, XZR, x27\n"
- "ld1rqh { z0.h }, p0/Z, [x26]\n"
- "ld1rqh { z1.h }, p0/Z, [x25]\n"
- "sub x27, x27, #0x8\n"
- "ld1rqh { z2.h }, p0/Z, [x24]\n"
- "ld1rqh { z3.h }, p0/Z, [x23]\n"
- "cmp x27, #0x8\n"
- "add x26, x26, #0x10\n"
- "ld1rqh { z4.h }, p0/Z, [x22]\n"
- "ld1h { z6.h }, p5/Z, [x12]\n"
+ "whilelt p0.h, XZR, x26\n"
+ "ld1rqh { z0.h }, p0/Z, [x25]\n"
+ "ld1rqh { z1.h }, p0/Z, [x24]\n"
+ "sub x26, x26, #0x8\n"
+ "ld1rqh { z2.h }, p0/Z, [x23]\n"
+ "ld1rqh { z3.h }, p0/Z, [x22]\n"
+ "cmp x26, #0x8\n"
+ "add x25, x25, #0x10\n"
+ "ld1rqh { z4.h }, p0/Z, [x21]\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
"fmla z8.h, z6.h, z0.h[0]\n"
"fmla z12.h, z6.h, z1.h[0]\n"
- "ld1h { z7.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
"fmla z16.h, z6.h, z2.h[0]\n"
"fmla z20.h, z6.h, z3.h[0]\n"
- "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
"fmla z24.h, z6.h, z4.h[0]\n"
"fmla z9.h, z7.h, z0.h[0]\n"
- "ld1h { z6.h }, p5/Z, [x10]\n"
- "add x24, x24, #0x10\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "add x23, x23, #0x10\n"
"fmla z13.h, z7.h, z1.h[0]\n"
"fmla z17.h, z7.h, z2.h[0]\n"
- "add x23, x23, #0x10\n"
"add x22, x22, #0x10\n"
+ "add x21, x21, #0x10\n"
"fmla z21.h, z7.h, z3.h[0]\n"
"fmla z25.h, z7.h, z4.h[0]\n"
- "ld1h { z7.h }, p5/Z, [x9]\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
"fmla z10.h, z6.h, z0.h[0]\n"
"fmla z14.h, z6.h, z1.h[0]\n"
"fmla z18.h, z6.h, z2.h[0]\n"
"fmla z22.h, z6.h, z3.h[0]\n"
"fmla z26.h, z6.h, z4.h[0]\n"
"fmla z11.h, z7.h, z0.h[0]\n"
- "ld1h { z6.h }, p5/Z, [x12, #1, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #1, MUL VL]\n"
"fmla z15.h, z7.h, z1.h[0]\n"
"fmla z19.h, z7.h, z2.h[0]\n"
"fmla z23.h, z7.h, z3.h[0]\n"
"fmla z27.h, z7.h, z4.h[0]\n"
- "ld1h { z7.h }, p5/Z, [x11, #1, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
"fmla z8.h, z6.h, z0.h[1]\n"
"fmla z12.h, z6.h, z1.h[1]\n"
"fmla z16.h, z6.h, z2.h[1]\n"
"fmla z20.h, z6.h, z3.h[1]\n"
"fmla z24.h, z6.h, z4.h[1]\n"
"fmla z9.h, z7.h, z0.h[1]\n"
- "ld1h { z6.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #1, MUL VL]\n"
"fmla z13.h, z7.h, z1.h[1]\n"
"fmla z17.h, z7.h, z2.h[1]\n"
"fmla z21.h, z7.h, z3.h[1]\n"
"fmla z25.h, z7.h, z4.h[1]\n"
- "ld1h { z7.h }, p5/Z, [x9, #1, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x28, #1, MUL VL]\n"
"fmla z10.h, z6.h, z0.h[1]\n"
"fmla z14.h, z6.h, z1.h[1]\n"
"fmla z18.h, z6.h, z2.h[1]\n"
"fmla z22.h, z6.h, z3.h[1]\n"
"fmla z26.h, z6.h, z4.h[1]\n"
"fmla z11.h, z7.h, z0.h[1]\n"
- "ld1h { z6.h }, p5/Z, [x12, #2, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #2, MUL VL]\n"
"fmla z15.h, z7.h, z1.h[1]\n"
"fmla z19.h, z7.h, z2.h[1]\n"
"fmla z23.h, z7.h, z3.h[1]\n"
"fmla z27.h, z7.h, z4.h[1]\n"
- "ld1h { z7.h }, p5/Z, [x11, #2, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #2, MUL VL]\n"
"fmla z8.h, z6.h, z0.h[2]\n"
"fmla z12.h, z6.h, z1.h[2]\n"
"fmla z16.h, z6.h, z2.h[2]\n"
"fmla z20.h, z6.h, z3.h[2]\n"
"fmla z24.h, z6.h, z4.h[2]\n"
"fmla z9.h, z7.h, z0.h[2]\n"
- "ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #2, MUL VL]\n"
"fmla z13.h, z7.h, z1.h[2]\n"
"fmla z17.h, z7.h, z2.h[2]\n"
"fmla z21.h, z7.h, z3.h[2]\n"
"fmla z25.h, z7.h, z4.h[2]\n"
- "ld1h { z7.h }, p5/Z, [x9, #2, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x28, #2, MUL VL]\n"
"fmla z10.h, z6.h, z0.h[2]\n"
"fmla z14.h, z6.h, z1.h[2]\n"
"fmla z18.h, z6.h, z2.h[2]\n"
"fmla z22.h, z6.h, z3.h[2]\n"
"fmla z26.h, z6.h, z4.h[2]\n"
"fmla z11.h, z7.h, z0.h[2]\n"
- "ld1h { z6.h }, p5/Z, [x12, #3, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #3, MUL VL]\n"
"fmla z15.h, z7.h, z1.h[2]\n"
"fmla z19.h, z7.h, z2.h[2]\n"
"fmla z23.h, z7.h, z3.h[2]\n"
"fmla z27.h, z7.h, z4.h[2]\n"
- "ld1h { z7.h }, p5/Z, [x11, #3, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #3, MUL VL]\n"
"fmla z8.h, z6.h, z0.h[3]\n"
"fmla z12.h, z6.h, z1.h[3]\n"
"fmla z16.h, z6.h, z2.h[3]\n"
"fmla z20.h, z6.h, z3.h[3]\n"
"fmla z24.h, z6.h, z4.h[3]\n"
"fmla z9.h, z7.h, z0.h[3]\n"
- "ld1h { z6.h }, p5/Z, [x10, #3, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #3, MUL VL]\n"
"fmla z13.h, z7.h, z1.h[3]\n"
"fmla z17.h, z7.h, z2.h[3]\n"
"fmla z21.h, z7.h, z3.h[3]\n"
"fmla z25.h, z7.h, z4.h[3]\n"
- "ld1h { z7.h }, p5/Z, [x9, #3, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x28, #3, MUL VL]\n"
"fmla z10.h, z6.h, z0.h[3]\n"
"fmla z14.h, z6.h, z1.h[3]\n"
"fmla z18.h, z6.h, z2.h[3]\n"
"fmla z22.h, z6.h, z3.h[3]\n"
"fmla z26.h, z6.h, z4.h[3]\n"
"fmla z11.h, z7.h, z0.h[3]\n"
- "ld1h { z6.h }, p5/Z, [x12, #4, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #4, MUL VL]\n"
"fmla z15.h, z7.h, z1.h[3]\n"
"fmla z19.h, z7.h, z2.h[3]\n"
"fmla z23.h, z7.h, z3.h[3]\n"
"fmla z27.h, z7.h, z4.h[3]\n"
- "ld1h { z7.h }, p5/Z, [x11, #4, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #4, MUL VL]\n"
"fmla z8.h, z6.h, z0.h[4]\n"
"fmla z12.h, z6.h, z1.h[4]\n"
"fmla z16.h, z6.h, z2.h[4]\n"
"fmla z20.h, z6.h, z3.h[4]\n"
"fmla z24.h, z6.h, z4.h[4]\n"
"fmla z9.h, z7.h, z0.h[4]\n"
- "ld1h { z6.h }, p5/Z, [x10, #4, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #4, MUL VL]\n"
"fmla z13.h, z7.h, z1.h[4]\n"
"fmla z17.h, z7.h, z2.h[4]\n"
"fmla z21.h, z7.h, z3.h[4]\n"
"fmla z25.h, z7.h, z4.h[4]\n"
- "ld1h { z7.h }, p5/Z, [x9, #4, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x28, #4, MUL VL]\n"
"fmla z10.h, z6.h, z0.h[4]\n"
"fmla z14.h, z6.h, z1.h[4]\n"
"fmla z18.h, z6.h, z2.h[4]\n"
"fmla z22.h, z6.h, z3.h[4]\n"
"fmla z26.h, z6.h, z4.h[4]\n"
"fmla z11.h, z7.h, z0.h[4]\n"
- "ld1h { z6.h }, p5/Z, [x12, #5, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #5, MUL VL]\n"
"fmla z15.h, z7.h, z1.h[4]\n"
"fmla z19.h, z7.h, z2.h[4]\n"
"fmla z23.h, z7.h, z3.h[4]\n"
"fmla z27.h, z7.h, z4.h[4]\n"
- "ld1h { z7.h }, p5/Z, [x11, #5, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #5, MUL VL]\n"
"fmla z8.h, z6.h, z0.h[5]\n"
"fmla z12.h, z6.h, z1.h[5]\n"
"fmla z16.h, z6.h, z2.h[5]\n"
"fmla z20.h, z6.h, z3.h[5]\n"
"fmla z24.h, z6.h, z4.h[5]\n"
"fmla z9.h, z7.h, z0.h[5]\n"
- "ld1h { z6.h }, p5/Z, [x10, #5, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #5, MUL VL]\n"
"fmla z13.h, z7.h, z1.h[5]\n"
"fmla z17.h, z7.h, z2.h[5]\n"
"fmla z21.h, z7.h, z3.h[5]\n"
"fmla z25.h, z7.h, z4.h[5]\n"
- "ld1h { z7.h }, p5/Z, [x9, #5, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x28, #5, MUL VL]\n"
"fmla z10.h, z6.h, z0.h[5]\n"
"fmla z14.h, z6.h, z1.h[5]\n"
"fmla z18.h, z6.h, z2.h[5]\n"
"fmla z22.h, z6.h, z3.h[5]\n"
"fmla z26.h, z6.h, z4.h[5]\n"
"fmla z11.h, z7.h, z0.h[5]\n"
- "ld1h { z6.h }, p5/Z, [x12, #6, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #6, MUL VL]\n"
"fmla z15.h, z7.h, z1.h[5]\n"
"fmla z19.h, z7.h, z2.h[5]\n"
"fmla z23.h, z7.h, z3.h[5]\n"
"fmla z27.h, z7.h, z4.h[5]\n"
- "ld1h { z7.h }, p5/Z, [x11, #6, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #6, MUL VL]\n"
"fmla z8.h, z6.h, z0.h[6]\n"
"fmla z12.h, z6.h, z1.h[6]\n"
"fmla z16.h, z6.h, z2.h[6]\n"
"fmla z20.h, z6.h, z3.h[6]\n"
"fmla z24.h, z6.h, z4.h[6]\n"
"fmla z9.h, z7.h, z0.h[6]\n"
- "ld1h { z6.h }, p5/Z, [x10, #6, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #6, MUL VL]\n"
"fmla z13.h, z7.h, z1.h[6]\n"
"fmla z17.h, z7.h, z2.h[6]\n"
"fmla z21.h, z7.h, z3.h[6]\n"
"fmla z25.h, z7.h, z4.h[6]\n"
- "ld1h { z7.h }, p5/Z, [x9, #6, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x28, #6, MUL VL]\n"
"fmla z10.h, z6.h, z0.h[6]\n"
"fmla z14.h, z6.h, z1.h[6]\n"
"fmla z18.h, z6.h, z2.h[6]\n"
"fmla z22.h, z6.h, z3.h[6]\n"
"fmla z26.h, z6.h, z4.h[6]\n"
"fmla z11.h, z7.h, z0.h[6]\n"
- "ld1h { z6.h }, p5/Z, [x12, #7, MUL VL]\n"
- "addvl x12, x12, #8\n"
+ "ld1h { z6.h }, p5/Z, [x11, #7, MUL VL]\n"
+ "addvl x11, x11, #8\n"
"fmla z15.h, z7.h, z1.h[6]\n"
"fmla z19.h, z7.h, z2.h[6]\n"
"fmla z23.h, z7.h, z3.h[6]\n"
"fmla z27.h, z7.h, z4.h[6]\n"
- "ld1h { z7.h }, p5/Z, [x11, #7, MUL VL]\n"
- "addvl x11, x11, #8\n"
+ "ld1h { z7.h }, p5/Z, [x10, #7, MUL VL]\n"
+ "addvl x10, x10, #8\n"
"fmla z8.h, z6.h, z0.h[7]\n"
"fmla z12.h, z6.h, z1.h[7]\n"
"fmla z16.h, z6.h, z2.h[7]\n"
"fmla z20.h, z6.h, z3.h[7]\n"
"fmla z24.h, z6.h, z4.h[7]\n"
"fmla z9.h, z7.h, z0.h[7]\n"
- "ld1h { z6.h }, p5/Z, [x10, #7, MUL VL]\n"
- "addvl x10, x10, #8\n"
+ "ld1h { z6.h }, p5/Z, [x9, #7, MUL VL]\n"
+ "addvl x9, x9, #8\n"
"fmla z13.h, z7.h, z1.h[7]\n"
"fmla z17.h, z7.h, z2.h[7]\n"
"fmla z21.h, z7.h, z3.h[7]\n"
"fmla z25.h, z7.h, z4.h[7]\n"
- "ld1h { z7.h }, p5/Z, [x9, #7, MUL VL]\n"
- "addvl x9, x9, #8\n"
+ "ld1h { z7.h }, p5/Z, [x28, #7, MUL VL]\n"
+ "addvl x28, x28, #8\n"
"fmla z10.h, z6.h, z0.h[7]\n"
"fmla z14.h, z6.h, z1.h[7]\n"
"fmla z18.h, z6.h, z2.h[7]\n"
@@ -2196,31 +2196,31 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"fmla z27.h, z7.h, z4.h[7]\n"
"bgt 66b\n"
"67:" // Height 5: Multiply loop: Single iteration only
- "whilelt p0.h, XZR, x27\n"
- "ld1rqh { z0.h }, p0/Z, [x26]\n"
- "ld1rqh { z1.h }, p0/Z, [x25]\n"
- "subs x27, x27, #0x1\n"
- "ld1rqh { z2.h }, p0/Z, [x24]\n"
- "ld1rqh { z3.h }, p0/Z, [x23]\n"
- "ld1rqh { z4.h }, p0/Z, [x22]\n"
- "ld1h { z6.h }, p5/Z, [x12]\n"
+ "whilelt p0.h, XZR, x26\n"
+ "ld1rqh { z0.h }, p0/Z, [x25]\n"
+ "ld1rqh { z1.h }, p0/Z, [x24]\n"
+ "subs x26, x26, #0x1\n"
+ "ld1rqh { z2.h }, p0/Z, [x23]\n"
+ "ld1rqh { z3.h }, p0/Z, [x22]\n"
+ "ld1rqh { z4.h }, p0/Z, [x21]\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
"fmla z8.h, z6.h, z0.h[0]\n"
"fmla z12.h, z6.h, z1.h[0]\n"
- "ld1h { z7.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
"fmla z16.h, z6.h, z2.h[0]\n"
"fmla z20.h, z6.h, z3.h[0]\n"
- "addvl x12, x12, #1\n"
+ "addvl x11, x11, #1\n"
"fmla z24.h, z6.h, z4.h[0]\n"
"fmla z9.h, z7.h, z0.h[0]\n"
- "ld1h { z6.h }, p5/Z, [x10]\n"
- "addvl x11, x11, #1\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "addvl x10, x10, #1\n"
"fmla z13.h, z7.h, z1.h[0]\n"
"fmla z17.h, z7.h, z2.h[0]\n"
- "addvl x10, x10, #1\n"
+ "addvl x9, x9, #1\n"
"fmla z21.h, z7.h, z3.h[0]\n"
"fmla z25.h, z7.h, z4.h[0]\n"
- "ld1h { z7.h }, p5/Z, [x9]\n"
- "addvl x9, x9, #1\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "addvl x28, x28, #1\n"
"fmla z10.h, z6.h, z0.h[0]\n"
"fmla z14.h, z6.h, z1.h[0]\n"
"fmla z18.h, z6.h, z2.h[0]\n"
@@ -2232,25 +2232,25 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"fmla z23.h, z7.h, z3.h[0]\n"
"fmla z27.h, z7.h, z4.h[0]\n"
"ble 68f\n"
- "ld1h { z6.h }, p5/Z, [x12]\n"
- "ld1h { z7.h }, p5/Z, [x11]\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
"fmla z8.h, z6.h, z0.h[1]\n"
"fmla z12.h, z6.h, z1.h[1]\n"
"fmla z16.h, z6.h, z2.h[1]\n"
"fmla z20.h, z6.h, z3.h[1]\n"
- "subs x27, x27, #0x1\n"
- "addvl x12, x12, #1\n"
+ "subs x26, x26, #0x1\n"
+ "addvl x11, x11, #1\n"
"fmla z24.h, z6.h, z4.h[1]\n"
"fmla z9.h, z7.h, z0.h[1]\n"
- "ld1h { z6.h }, p5/Z, [x10]\n"
- "addvl x11, x11, #1\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "addvl x10, x10, #1\n"
"fmla z13.h, z7.h, z1.h[1]\n"
"fmla z17.h, z7.h, z2.h[1]\n"
- "addvl x10, x10, #1\n"
+ "addvl x9, x9, #1\n"
"fmla z21.h, z7.h, z3.h[1]\n"
"fmla z25.h, z7.h, z4.h[1]\n"
- "ld1h { z7.h }, p5/Z, [x9]\n"
- "addvl x9, x9, #1\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "addvl x28, x28, #1\n"
"fmla z10.h, z6.h, z0.h[1]\n"
"fmla z14.h, z6.h, z1.h[1]\n"
"fmla z18.h, z6.h, z2.h[1]\n"
@@ -2262,25 +2262,25 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"fmla z23.h, z7.h, z3.h[1]\n"
"fmla z27.h, z7.h, z4.h[1]\n"
"ble 68f\n"
- "ld1h { z6.h }, p5/Z, [x12]\n"
- "ld1h { z7.h }, p5/Z, [x11]\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
"fmla z8.h, z6.h, z0.h[2]\n"
"fmla z12.h, z6.h, z1.h[2]\n"
"fmla z16.h, z6.h, z2.h[2]\n"
"fmla z20.h, z6.h, z3.h[2]\n"
- "subs x27, x27, #0x1\n"
- "addvl x12, x12, #1\n"
+ "subs x26, x26, #0x1\n"
+ "addvl x11, x11, #1\n"
"fmla z24.h, z6.h, z4.h[2]\n"
"fmla z9.h, z7.h, z0.h[2]\n"
- "ld1h { z6.h }, p5/Z, [x10]\n"
- "addvl x11, x11, #1\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "addvl x10, x10, #1\n"
"fmla z13.h, z7.h, z1.h[2]\n"
"fmla z17.h, z7.h, z2.h[2]\n"
- "addvl x10, x10, #1\n"
+ "addvl x9, x9, #1\n"
"fmla z21.h, z7.h, z3.h[2]\n"
"fmla z25.h, z7.h, z4.h[2]\n"
- "ld1h { z7.h }, p5/Z, [x9]\n"
- "addvl x9, x9, #1\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "addvl x28, x28, #1\n"
"fmla z10.h, z6.h, z0.h[2]\n"
"fmla z14.h, z6.h, z1.h[2]\n"
"fmla z18.h, z6.h, z2.h[2]\n"
@@ -2292,25 +2292,25 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"fmla z23.h, z7.h, z3.h[2]\n"
"fmla z27.h, z7.h, z4.h[2]\n"
"ble 68f\n"
- "ld1h { z6.h }, p5/Z, [x12]\n"
- "ld1h { z7.h }, p5/Z, [x11]\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
"fmla z8.h, z6.h, z0.h[3]\n"
"fmla z12.h, z6.h, z1.h[3]\n"
"fmla z16.h, z6.h, z2.h[3]\n"
"fmla z20.h, z6.h, z3.h[3]\n"
- "subs x27, x27, #0x1\n"
- "addvl x12, x12, #1\n"
+ "subs x26, x26, #0x1\n"
+ "addvl x11, x11, #1\n"
"fmla z24.h, z6.h, z4.h[3]\n"
"fmla z9.h, z7.h, z0.h[3]\n"
- "ld1h { z6.h }, p5/Z, [x10]\n"
- "addvl x11, x11, #1\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "addvl x10, x10, #1\n"
"fmla z13.h, z7.h, z1.h[3]\n"
"fmla z17.h, z7.h, z2.h[3]\n"
- "addvl x10, x10, #1\n"
+ "addvl x9, x9, #1\n"
"fmla z21.h, z7.h, z3.h[3]\n"
"fmla z25.h, z7.h, z4.h[3]\n"
- "ld1h { z7.h }, p5/Z, [x9]\n"
- "addvl x9, x9, #1\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "addvl x28, x28, #1\n"
"fmla z10.h, z6.h, z0.h[3]\n"
"fmla z14.h, z6.h, z1.h[3]\n"
"fmla z18.h, z6.h, z2.h[3]\n"
@@ -2322,25 +2322,25 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"fmla z23.h, z7.h, z3.h[3]\n"
"fmla z27.h, z7.h, z4.h[3]\n"
"ble 68f\n"
- "ld1h { z6.h }, p5/Z, [x12]\n"
- "ld1h { z7.h }, p5/Z, [x11]\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
"fmla z8.h, z6.h, z0.h[4]\n"
"fmla z12.h, z6.h, z1.h[4]\n"
"fmla z16.h, z6.h, z2.h[4]\n"
"fmla z20.h, z6.h, z3.h[4]\n"
- "subs x27, x27, #0x1\n"
- "addvl x12, x12, #1\n"
+ "subs x26, x26, #0x1\n"
+ "addvl x11, x11, #1\n"
"fmla z24.h, z6.h, z4.h[4]\n"
"fmla z9.h, z7.h, z0.h[4]\n"
- "ld1h { z6.h }, p5/Z, [x10]\n"
- "addvl x11, x11, #1\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "addvl x10, x10, #1\n"
"fmla z13.h, z7.h, z1.h[4]\n"
"fmla z17.h, z7.h, z2.h[4]\n"
- "addvl x10, x10, #1\n"
+ "addvl x9, x9, #1\n"
"fmla z21.h, z7.h, z3.h[4]\n"
"fmla z25.h, z7.h, z4.h[4]\n"
- "ld1h { z7.h }, p5/Z, [x9]\n"
- "addvl x9, x9, #1\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "addvl x28, x28, #1\n"
"fmla z10.h, z6.h, z0.h[4]\n"
"fmla z14.h, z6.h, z1.h[4]\n"
"fmla z18.h, z6.h, z2.h[4]\n"
@@ -2352,25 +2352,25 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"fmla z23.h, z7.h, z3.h[4]\n"
"fmla z27.h, z7.h, z4.h[4]\n"
"ble 68f\n"
- "ld1h { z6.h }, p5/Z, [x12]\n"
- "ld1h { z7.h }, p5/Z, [x11]\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
"fmla z8.h, z6.h, z0.h[5]\n"
"fmla z12.h, z6.h, z1.h[5]\n"
"fmla z16.h, z6.h, z2.h[5]\n"
"fmla z20.h, z6.h, z3.h[5]\n"
- "subs x27, x27, #0x1\n"
- "addvl x12, x12, #1\n"
+ "subs x26, x26, #0x1\n"
+ "addvl x11, x11, #1\n"
"fmla z24.h, z6.h, z4.h[5]\n"
"fmla z9.h, z7.h, z0.h[5]\n"
- "ld1h { z6.h }, p5/Z, [x10]\n"
- "addvl x11, x11, #1\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "addvl x10, x10, #1\n"
"fmla z13.h, z7.h, z1.h[5]\n"
"fmla z17.h, z7.h, z2.h[5]\n"
- "addvl x10, x10, #1\n"
+ "addvl x9, x9, #1\n"
"fmla z21.h, z7.h, z3.h[5]\n"
"fmla z25.h, z7.h, z4.h[5]\n"
- "ld1h { z7.h }, p5/Z, [x9]\n"
- "addvl x9, x9, #1\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "addvl x28, x28, #1\n"
"fmla z10.h, z6.h, z0.h[5]\n"
"fmla z14.h, z6.h, z1.h[5]\n"
"fmla z18.h, z6.h, z2.h[5]\n"
@@ -2382,25 +2382,25 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"fmla z23.h, z7.h, z3.h[5]\n"
"fmla z27.h, z7.h, z4.h[5]\n"
"ble 68f\n"
- "ld1h { z6.h }, p5/Z, [x12]\n"
- "ld1h { z7.h }, p5/Z, [x11]\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
"fmla z8.h, z6.h, z0.h[6]\n"
"fmla z12.h, z6.h, z1.h[6]\n"
"fmla z16.h, z6.h, z2.h[6]\n"
"fmla z20.h, z6.h, z3.h[6]\n"
- "subs x27, x27, #0x1\n"
- "addvl x12, x12, #1\n"
+ "subs x26, x26, #0x1\n"
+ "addvl x11, x11, #1\n"
"fmla z24.h, z6.h, z4.h[6]\n"
"fmla z9.h, z7.h, z0.h[6]\n"
- "ld1h { z6.h }, p5/Z, [x10]\n"
- "addvl x11, x11, #1\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "addvl x10, x10, #1\n"
"fmla z13.h, z7.h, z1.h[6]\n"
"fmla z17.h, z7.h, z2.h[6]\n"
- "addvl x10, x10, #1\n"
+ "addvl x9, x9, #1\n"
"fmla z21.h, z7.h, z3.h[6]\n"
"fmla z25.h, z7.h, z4.h[6]\n"
- "ld1h { z7.h }, p5/Z, [x9]\n"
- "addvl x9, x9, #1\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "addvl x28, x28, #1\n"
"fmla z10.h, z6.h, z0.h[6]\n"
"fmla z14.h, z6.h, z1.h[6]\n"
"fmla z18.h, z6.h, z2.h[6]\n"
@@ -2412,24 +2412,24 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"fmla z23.h, z7.h, z3.h[6]\n"
"fmla z27.h, z7.h, z4.h[6]\n"
"ble 68f\n"
- "ld1h { z6.h }, p5/Z, [x12]\n"
- "ld1h { z7.h }, p5/Z, [x11]\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
"fmla z8.h, z6.h, z0.h[7]\n"
"fmla z12.h, z6.h, z1.h[7]\n"
"fmla z16.h, z6.h, z2.h[7]\n"
"fmla z20.h, z6.h, z3.h[7]\n"
- "addvl x12, x12, #1\n"
"addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
"fmla z24.h, z6.h, z4.h[7]\n"
"fmla z9.h, z7.h, z0.h[7]\n"
- "ld1h { z6.h }, p5/Z, [x10]\n"
- "addvl x10, x10, #1\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "addvl x9, x9, #1\n"
"fmla z13.h, z7.h, z1.h[7]\n"
"fmla z17.h, z7.h, z2.h[7]\n"
"fmla z21.h, z7.h, z3.h[7]\n"
"fmla z25.h, z7.h, z4.h[7]\n"
- "ld1h { z7.h }, p5/Z, [x9]\n"
- "addvl x9, x9, #1\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "addvl x28, x28, #1\n"
"fmla z10.h, z6.h, z0.h[7]\n"
"fmla z14.h, z6.h, z1.h[7]\n"
"fmla z18.h, z6.h, z2.h[7]\n"
@@ -2441,20 +2441,20 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"fmla z23.h, z7.h, z3.h[7]\n"
"fmla z27.h, z7.h, z4.h[7]\n"
"68:" // Height 5: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 63b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
- "add x23, x24, x20, LSL #1\n"
- "add x22, x23, x20, LSL #1\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "add x22, x23, x19, LSL #1\n"
+ "add x21, x22, x19, LSL #1\n"
"tbz %x[flags], #1, 69f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rh { z1.h }, p5/Z, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rh { z0.h }, p5/Z, [x20]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rh { z1.h }, p5/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rh { z0.h }, p5/Z, [x19]\n"
"fmin z8.h, p5/M, z8.h, z1.h\n"
"fmin z9.h, p5/M, z9.h, z1.h\n"
"fmin z10.h, p5/M, z10.h, z1.h\n"
@@ -2496,82 +2496,82 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"fmax z26.h, p5/M, z26.h, z0.h\n"
"fmax z27.h, p5/M, z27.h, z0.h\n"
"69:" // Height 5: No activation
- "st1h { z8.h }, p4, [x13]\n"
- "st1h { z9.h }, p3, [x13, #1, MUL VL]\n"
- "st1h { z10.h }, p2, [x13, #2, MUL VL]\n"
- "st1h { z11.h }, p1, [x13, #3, MUL VL]\n"
- "addvl x13, x13, #4\n"
- "st1h { z12.h }, p4, [x25]\n"
- "st1h { z13.h }, p3, [x25, #1, MUL VL]\n"
- "st1h { z14.h }, p2, [x25, #2, MUL VL]\n"
- "st1h { z15.h }, p1, [x25, #3, MUL VL]\n"
- "st1h { z16.h }, p4, [x24]\n"
- "st1h { z17.h }, p3, [x24, #1, MUL VL]\n"
- "st1h { z18.h }, p2, [x24, #2, MUL VL]\n"
- "st1h { z19.h }, p1, [x24, #3, MUL VL]\n"
- "st1h { z20.h }, p4, [x23]\n"
- "st1h { z21.h }, p3, [x23, #1, MUL VL]\n"
- "st1h { z22.h }, p2, [x23, #2, MUL VL]\n"
- "st1h { z23.h }, p1, [x23, #3, MUL VL]\n"
- "st1h { z24.h }, p4, [x22]\n"
- "st1h { z25.h }, p3, [x22, #1, MUL VL]\n"
- "st1h { z26.h }, p2, [x22, #2, MUL VL]\n"
- "st1h { z27.h }, p1, [x22, #3, MUL VL]\n"
+ "st1h { z8.h }, p4, [x12]\n"
+ "st1h { z9.h }, p3, [x12, #1, MUL VL]\n"
+ "st1h { z10.h }, p2, [x12, #2, MUL VL]\n"
+ "st1h { z11.h }, p1, [x12, #3, MUL VL]\n"
+ "addvl x12, x12, #4\n"
+ "st1h { z12.h }, p4, [x24]\n"
+ "st1h { z13.h }, p3, [x24, #1, MUL VL]\n"
+ "st1h { z14.h }, p2, [x24, #2, MUL VL]\n"
+ "st1h { z15.h }, p1, [x24, #3, MUL VL]\n"
+ "st1h { z16.h }, p4, [x23]\n"
+ "st1h { z17.h }, p3, [x23, #1, MUL VL]\n"
+ "st1h { z18.h }, p2, [x23, #2, MUL VL]\n"
+ "st1h { z19.h }, p1, [x23, #3, MUL VL]\n"
+ "st1h { z20.h }, p4, [x22]\n"
+ "st1h { z21.h }, p3, [x22, #1, MUL VL]\n"
+ "st1h { z22.h }, p2, [x22, #2, MUL VL]\n"
+ "st1h { z23.h }, p1, [x22, #3, MUL VL]\n"
+ "st1h { z24.h }, p4, [x21]\n"
+ "st1h { z25.h }, p3, [x21, #1, MUL VL]\n"
+ "st1h { z26.h }, p2, [x21, #2, MUL VL]\n"
+ "st1h { z27.h }, p1, [x21, #3, MUL VL]\n"
"70:" // Height 5: Writeback done
- "dech x14, ALL, MUL #4\n"
- "cmp x14, XZR\n"
+ "dech x13, ALL, MUL #4\n"
+ "cmp x13, XZR\n"
"bgt 58b\n"
"b 86f\n"
"71:" // Height 6
- "ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x21, #0xc\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "mov x15, %x[bias]\n"
- "mov x13, %x[output_ptr]\n"
- "madd %x[output_ptr], x20, x21, %x[output_ptr]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x20, #0xc\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "mov x14, %x[bias]\n"
+ "mov x12, %x[output_ptr]\n"
+ "madd %x[output_ptr], x19, x20, %x[output_ptr]\n"
"72:" // Height 6: Column loop
- "ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
- "add x11, x12, x20, LSL #1\n"
- "cnth x21, ALL, MUL #3\n"
- "add x10, x11, x20, LSL #1\n"
- "add x9, x10, x20, LSL #1\n"
- "add x20, x9, x20, LSL #1\n"
- "cmp x14, x21\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #1\n"
+ "cnth x20, ALL, MUL #3\n"
+ "add x9, x10, x19, LSL #1\n"
+ "add x28, x9, x19, LSL #1\n"
+ "add x19, x28, x19, LSL #1\n"
+ "cmp x13, x20\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"bgt 73f\n"
- "dech x21\n"
- "cmp x14, x21\n"
- "mov x9, x12\n"
+ "dech x20\n"
+ "cmp x13, x20\n"
+ "mov x28, x11\n"
"bgt 73f\n"
- "dech x21\n"
- "cmp x14, x21\n"
- "mov x10, x12\n"
+ "dech x20\n"
+ "cmp x13, x20\n"
+ "mov x9, x11\n"
"bgt 73f\n"
- "mov x11, x12\n"
+ "mov x10, x11\n"
"73:" // Height 6: B setup done
- "mov x20, #0x0\n"
- "whilelt p4.h, x20, x14\n"
- "inch x20\n"
- "whilelt p3.h, x20, x14\n"
- "inch x20\n"
- "whilelt p2.h, x20, x14\n"
- "inch x20\n"
- "whilelt p1.h, x20, x14\n"
- "cbz x15, 74f\n"
- "ld1h { z8.h }, p5/Z, [x15]\n"
- "ld1h { z9.h }, p5/Z, [x15, #1, MUL VL]\n"
+ "mov x19, #0x0\n"
+ "whilelt p4.h, x19, x13\n"
+ "inch x19\n"
+ "whilelt p3.h, x19, x13\n"
+ "inch x19\n"
+ "whilelt p2.h, x19, x13\n"
+ "inch x19\n"
+ "whilelt p1.h, x19, x13\n"
+ "cbz x14, 74f\n"
+ "ld1h { z8.h }, p5/Z, [x14]\n"
+ "ld1h { z9.h }, p5/Z, [x14, #1, MUL VL]\n"
"mov z12.d, z8.d\n"
"mov z13.d, z9.d\n"
- "ld1h { z10.h }, p5/Z, [x15, #2, MUL VL]\n"
- "ld1h { z11.h }, p5/Z, [x15, #3, MUL VL]\n"
+ "ld1h { z10.h }, p5/Z, [x14, #2, MUL VL]\n"
+ "ld1h { z11.h }, p5/Z, [x14, #3, MUL VL]\n"
"mov z14.d, z10.d\n"
"mov z15.d, z11.d\n"
"mov z16.d, z8.d\n"
"mov z17.d, z9.d\n"
- "addvl x15, x15, #4\n"
+ "addvl x14, x14, #4\n"
"mov z18.d, z10.d\n"
"mov z19.d, z11.d\n"
"mov z20.d, z8.d\n"
@@ -2589,36 +2589,36 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"b 76f\n"
"74:" // Height 6: no bias
"tbz %x[flags], #0, 75f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
- "ld1h { z8.h }, p4/Z, [x13]\n"
- "add x23, x24, x20, LSL #1\n"
- "add x22, x23, x20, LSL #1\n"
- "ld1h { z9.h }, p3/Z, [x13, #1, MUL VL]\n"
- "ld1h { z10.h }, p2/Z, [x13, #2, MUL VL]\n"
- "add x21, x22, x20, LSL #1\n"
- "ld1h { z11.h }, p1/Z, [x13, #3, MUL VL]\n"
- "ld1h { z12.h }, p4/Z, [x25]\n"
- "ld1h { z13.h }, p3/Z, [x25, #1, MUL VL]\n"
- "ld1h { z14.h }, p2/Z, [x25, #2, MUL VL]\n"
- "ld1h { z15.h }, p1/Z, [x25, #3, MUL VL]\n"
- "ld1h { z16.h }, p4/Z, [x24]\n"
- "ld1h { z17.h }, p3/Z, [x24, #1, MUL VL]\n"
- "ld1h { z18.h }, p2/Z, [x24, #2, MUL VL]\n"
- "ld1h { z19.h }, p1/Z, [x24, #3, MUL VL]\n"
- "ld1h { z20.h }, p4/Z, [x23]\n"
- "ld1h { z21.h }, p3/Z, [x23, #1, MUL VL]\n"
- "ld1h { z22.h }, p2/Z, [x23, #2, MUL VL]\n"
- "ld1h { z23.h }, p1/Z, [x23, #3, MUL VL]\n"
- "ld1h { z24.h }, p4/Z, [x22]\n"
- "ld1h { z25.h }, p3/Z, [x22, #1, MUL VL]\n"
- "ld1h { z26.h }, p2/Z, [x22, #2, MUL VL]\n"
- "ld1h { z27.h }, p1/Z, [x22, #3, MUL VL]\n"
- "ld1h { z28.h }, p4/Z, [x21]\n"
- "ld1h { z29.h }, p3/Z, [x21, #1, MUL VL]\n"
- "ld1h { z30.h }, p2/Z, [x21, #2, MUL VL]\n"
- "ld1h { z31.h }, p1/Z, [x21, #3, MUL VL]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "ld1h { z8.h }, p4/Z, [x12]\n"
+ "add x22, x23, x19, LSL #1\n"
+ "add x21, x22, x19, LSL #1\n"
+ "ld1h { z9.h }, p3/Z, [x12, #1, MUL VL]\n"
+ "ld1h { z10.h }, p2/Z, [x12, #2, MUL VL]\n"
+ "add x20, x21, x19, LSL #1\n"
+ "ld1h { z11.h }, p1/Z, [x12, #3, MUL VL]\n"
+ "ld1h { z12.h }, p4/Z, [x24]\n"
+ "ld1h { z13.h }, p3/Z, [x24, #1, MUL VL]\n"
+ "ld1h { z14.h }, p2/Z, [x24, #2, MUL VL]\n"
+ "ld1h { z15.h }, p1/Z, [x24, #3, MUL VL]\n"
+ "ld1h { z16.h }, p4/Z, [x23]\n"
+ "ld1h { z17.h }, p3/Z, [x23, #1, MUL VL]\n"
+ "ld1h { z18.h }, p2/Z, [x23, #2, MUL VL]\n"
+ "ld1h { z19.h }, p1/Z, [x23, #3, MUL VL]\n"
+ "ld1h { z20.h }, p4/Z, [x22]\n"
+ "ld1h { z21.h }, p3/Z, [x22, #1, MUL VL]\n"
+ "ld1h { z22.h }, p2/Z, [x22, #2, MUL VL]\n"
+ "ld1h { z23.h }, p1/Z, [x22, #3, MUL VL]\n"
+ "ld1h { z24.h }, p4/Z, [x21]\n"
+ "ld1h { z25.h }, p3/Z, [x21, #1, MUL VL]\n"
+ "ld1h { z26.h }, p2/Z, [x21, #2, MUL VL]\n"
+ "ld1h { z27.h }, p1/Z, [x21, #3, MUL VL]\n"
+ "ld1h { z28.h }, p4/Z, [x20]\n"
+ "ld1h { z29.h }, p3/Z, [x20, #1, MUL VL]\n"
+ "ld1h { z30.h }, p2/Z, [x20, #2, MUL VL]\n"
+ "ld1h { z31.h }, p1/Z, [x20, #3, MUL VL]\n"
"b 76f\n"
"75:" // Height 6: no accumulate
"mov z8.b, #0x0\n"
@@ -2646,271 +2646,271 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"mov z30.b, #0x0\n"
"mov z31.b, #0x0\n"
"76:" // Height 6: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"77:" // Height 6: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 78f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "ldr x22, [x21, #0x20]\n"
- "ldr x21, [x21, #0x28]\n"
- "cbnz x28, 79f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #1\n"
- "add x25, x25, x20, LSL #1\n"
- "add x24, x24, x20, LSL #1\n"
- "add x23, x23, x20, LSL #1\n"
- "add x22, x22, x20, LSL #1\n"
- "add x21, x21, x20, LSL #1\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "ldr x21, [x20, #0x20]\n"
+ "ldr x20, [x20, #0x28]\n"
+ "cbnz x27, 79f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
+ "add x24, x24, x19, LSL #1\n"
+ "add x23, x23, x19, LSL #1\n"
+ "add x22, x22, x19, LSL #1\n"
+ "add x21, x21, x19, LSL #1\n"
+ "add x20, x20, x19, LSL #1\n"
"b 79f\n"
"78:" // Height 6: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
- "add x23, x24, x20, LSL #1\n"
- "add x22, x23, x20, LSL #1\n"
- "add x21, x22, x20, LSL #1\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "add x22, x23, x19, LSL #1\n"
+ "add x21, x22, x19, LSL #1\n"
+ "add x20, x21, x19, LSL #1\n"
"79:" // Height 6: input setup done
- "cmp x27, #0x8\n"
+ "cmp x26, #0x8\n"
"ble 81f\n"
"80:" // Height 6: Multiply loop: Main loop head
- "whilelt p0.h, XZR, x27\n"
- "ld1rqh { z0.h }, p0/Z, [x26]\n"
- "ld1rqh { z1.h }, p0/Z, [x25]\n"
- "sub x27, x27, #0x8\n"
- "ld1rqh { z2.h }, p0/Z, [x24]\n"
- "ld1rqh { z3.h }, p0/Z, [x23]\n"
- "cmp x27, #0x8\n"
- "add x26, x26, #0x10\n"
- "ld1rqh { z4.h }, p0/Z, [x22]\n"
- "ld1rqh { z5.h }, p0/Z, [x21]\n"
+ "whilelt p0.h, XZR, x26\n"
+ "ld1rqh { z0.h }, p0/Z, [x25]\n"
+ "ld1rqh { z1.h }, p0/Z, [x24]\n"
+ "sub x26, x26, #0x8\n"
+ "ld1rqh { z2.h }, p0/Z, [x23]\n"
+ "ld1rqh { z3.h }, p0/Z, [x22]\n"
+ "cmp x26, #0x8\n"
"add x25, x25, #0x10\n"
+ "ld1rqh { z4.h }, p0/Z, [x21]\n"
+ "ld1rqh { z5.h }, p0/Z, [x20]\n"
"add x24, x24, #0x10\n"
- "ld1h { z6.h }, p5/Z, [x12]\n"
- "ld1h { z7.h }, p5/Z, [x11]\n"
+ "add x23, x23, #0x10\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
"fmla z8.h, z6.h, z0.h[0]\n"
"fmla z12.h, z6.h, z1.h[0]\n"
"fmla z16.h, z6.h, z2.h[0]\n"
"fmla z20.h, z6.h, z3.h[0]\n"
- "add x23, x23, #0x10\n"
"add x22, x22, #0x10\n"
+ "add x21, x21, #0x10\n"
"fmla z24.h, z6.h, z4.h[0]\n"
"fmla z28.h, z6.h, z5.h[0]\n"
- "ld1h { z6.h }, p5/Z, [x10]\n"
- "add x21, x21, #0x10\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "add x20, x20, #0x10\n"
"fmla z9.h, z7.h, z0.h[0]\n"
"fmla z13.h, z7.h, z1.h[0]\n"
"fmla z17.h, z7.h, z2.h[0]\n"
"fmla z21.h, z7.h, z3.h[0]\n"
"fmla z25.h, z7.h, z4.h[0]\n"
"fmla z29.h, z7.h, z5.h[0]\n"
- "ld1h { z7.h }, p5/Z, [x9]\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
"fmla z10.h, z6.h, z0.h[0]\n"
"fmla z14.h, z6.h, z1.h[0]\n"
"fmla z18.h, z6.h, z2.h[0]\n"
"fmla z22.h, z6.h, z3.h[0]\n"
"fmla z26.h, z6.h, z4.h[0]\n"
"fmla z30.h, z6.h, z5.h[0]\n"
- "ld1h { z6.h }, p5/Z, [x12, #1, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #1, MUL VL]\n"
"fmla z11.h, z7.h, z0.h[0]\n"
"fmla z15.h, z7.h, z1.h[0]\n"
"fmla z19.h, z7.h, z2.h[0]\n"
"fmla z23.h, z7.h, z3.h[0]\n"
"fmla z27.h, z7.h, z4.h[0]\n"
"fmla z31.h, z7.h, z5.h[0]\n"
- "ld1h { z7.h }, p5/Z, [x11, #1, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
"fmla z8.h, z6.h, z0.h[1]\n"
"fmla z12.h, z6.h, z1.h[1]\n"
"fmla z16.h, z6.h, z2.h[1]\n"
"fmla z20.h, z6.h, z3.h[1]\n"
"fmla z24.h, z6.h, z4.h[1]\n"
"fmla z28.h, z6.h, z5.h[1]\n"
- "ld1h { z6.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #1, MUL VL]\n"
"fmla z9.h, z7.h, z0.h[1]\n"
"fmla z13.h, z7.h, z1.h[1]\n"
"fmla z17.h, z7.h, z2.h[1]\n"
"fmla z21.h, z7.h, z3.h[1]\n"
"fmla z25.h, z7.h, z4.h[1]\n"
"fmla z29.h, z7.h, z5.h[1]\n"
- "ld1h { z7.h }, p5/Z, [x9, #1, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x28, #1, MUL VL]\n"
"fmla z10.h, z6.h, z0.h[1]\n"
"fmla z14.h, z6.h, z1.h[1]\n"
"fmla z18.h, z6.h, z2.h[1]\n"
"fmla z22.h, z6.h, z3.h[1]\n"
"fmla z26.h, z6.h, z4.h[1]\n"
"fmla z30.h, z6.h, z5.h[1]\n"
- "ld1h { z6.h }, p5/Z, [x12, #2, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #2, MUL VL]\n"
"fmla z11.h, z7.h, z0.h[1]\n"
"fmla z15.h, z7.h, z1.h[1]\n"
"fmla z19.h, z7.h, z2.h[1]\n"
"fmla z23.h, z7.h, z3.h[1]\n"
"fmla z27.h, z7.h, z4.h[1]\n"
"fmla z31.h, z7.h, z5.h[1]\n"
- "ld1h { z7.h }, p5/Z, [x11, #2, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #2, MUL VL]\n"
"fmla z8.h, z6.h, z0.h[2]\n"
"fmla z12.h, z6.h, z1.h[2]\n"
"fmla z16.h, z6.h, z2.h[2]\n"
"fmla z20.h, z6.h, z3.h[2]\n"
"fmla z24.h, z6.h, z4.h[2]\n"
"fmla z28.h, z6.h, z5.h[2]\n"
- "ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #2, MUL VL]\n"
"fmla z9.h, z7.h, z0.h[2]\n"
"fmla z13.h, z7.h, z1.h[2]\n"
"fmla z17.h, z7.h, z2.h[2]\n"
"fmla z21.h, z7.h, z3.h[2]\n"
"fmla z25.h, z7.h, z4.h[2]\n"
"fmla z29.h, z7.h, z5.h[2]\n"
- "ld1h { z7.h }, p5/Z, [x9, #2, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x28, #2, MUL VL]\n"
"fmla z10.h, z6.h, z0.h[2]\n"
"fmla z14.h, z6.h, z1.h[2]\n"
"fmla z18.h, z6.h, z2.h[2]\n"
"fmla z22.h, z6.h, z3.h[2]\n"
"fmla z26.h, z6.h, z4.h[2]\n"
"fmla z30.h, z6.h, z5.h[2]\n"
- "ld1h { z6.h }, p5/Z, [x12, #3, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #3, MUL VL]\n"
"fmla z11.h, z7.h, z0.h[2]\n"
"fmla z15.h, z7.h, z1.h[2]\n"
"fmla z19.h, z7.h, z2.h[2]\n"
"fmla z23.h, z7.h, z3.h[2]\n"
"fmla z27.h, z7.h, z4.h[2]\n"
"fmla z31.h, z7.h, z5.h[2]\n"
- "ld1h { z7.h }, p5/Z, [x11, #3, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #3, MUL VL]\n"
"fmla z8.h, z6.h, z0.h[3]\n"
"fmla z12.h, z6.h, z1.h[3]\n"
"fmla z16.h, z6.h, z2.h[3]\n"
"fmla z20.h, z6.h, z3.h[3]\n"
"fmla z24.h, z6.h, z4.h[3]\n"
"fmla z28.h, z6.h, z5.h[3]\n"
- "ld1h { z6.h }, p5/Z, [x10, #3, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #3, MUL VL]\n"
"fmla z9.h, z7.h, z0.h[3]\n"
"fmla z13.h, z7.h, z1.h[3]\n"
"fmla z17.h, z7.h, z2.h[3]\n"
"fmla z21.h, z7.h, z3.h[3]\n"
"fmla z25.h, z7.h, z4.h[3]\n"
"fmla z29.h, z7.h, z5.h[3]\n"
- "ld1h { z7.h }, p5/Z, [x9, #3, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x28, #3, MUL VL]\n"
"fmla z10.h, z6.h, z0.h[3]\n"
"fmla z14.h, z6.h, z1.h[3]\n"
"fmla z18.h, z6.h, z2.h[3]\n"
"fmla z22.h, z6.h, z3.h[3]\n"
"fmla z26.h, z6.h, z4.h[3]\n"
"fmla z30.h, z6.h, z5.h[3]\n"
- "ld1h { z6.h }, p5/Z, [x12, #4, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #4, MUL VL]\n"
"fmla z11.h, z7.h, z0.h[3]\n"
"fmla z15.h, z7.h, z1.h[3]\n"
"fmla z19.h, z7.h, z2.h[3]\n"
"fmla z23.h, z7.h, z3.h[3]\n"
"fmla z27.h, z7.h, z4.h[3]\n"
"fmla z31.h, z7.h, z5.h[3]\n"
- "ld1h { z7.h }, p5/Z, [x11, #4, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #4, MUL VL]\n"
"fmla z8.h, z6.h, z0.h[4]\n"
"fmla z12.h, z6.h, z1.h[4]\n"
"fmla z16.h, z6.h, z2.h[4]\n"
"fmla z20.h, z6.h, z3.h[4]\n"
"fmla z24.h, z6.h, z4.h[4]\n"
"fmla z28.h, z6.h, z5.h[4]\n"
- "ld1h { z6.h }, p5/Z, [x10, #4, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #4, MUL VL]\n"
"fmla z9.h, z7.h, z0.h[4]\n"
"fmla z13.h, z7.h, z1.h[4]\n"
"fmla z17.h, z7.h, z2.h[4]\n"
"fmla z21.h, z7.h, z3.h[4]\n"
"fmla z25.h, z7.h, z4.h[4]\n"
"fmla z29.h, z7.h, z5.h[4]\n"
- "ld1h { z7.h }, p5/Z, [x9, #4, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x28, #4, MUL VL]\n"
"fmla z10.h, z6.h, z0.h[4]\n"
"fmla z14.h, z6.h, z1.h[4]\n"
"fmla z18.h, z6.h, z2.h[4]\n"
"fmla z22.h, z6.h, z3.h[4]\n"
"fmla z26.h, z6.h, z4.h[4]\n"
"fmla z30.h, z6.h, z5.h[4]\n"
- "ld1h { z6.h }, p5/Z, [x12, #5, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #5, MUL VL]\n"
"fmla z11.h, z7.h, z0.h[4]\n"
"fmla z15.h, z7.h, z1.h[4]\n"
"fmla z19.h, z7.h, z2.h[4]\n"
"fmla z23.h, z7.h, z3.h[4]\n"
"fmla z27.h, z7.h, z4.h[4]\n"
"fmla z31.h, z7.h, z5.h[4]\n"
- "ld1h { z7.h }, p5/Z, [x11, #5, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #5, MUL VL]\n"
"fmla z8.h, z6.h, z0.h[5]\n"
"fmla z12.h, z6.h, z1.h[5]\n"
"fmla z16.h, z6.h, z2.h[5]\n"
"fmla z20.h, z6.h, z3.h[5]\n"
"fmla z24.h, z6.h, z4.h[5]\n"
"fmla z28.h, z6.h, z5.h[5]\n"
- "ld1h { z6.h }, p5/Z, [x10, #5, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #5, MUL VL]\n"
"fmla z9.h, z7.h, z0.h[5]\n"
"fmla z13.h, z7.h, z1.h[5]\n"
"fmla z17.h, z7.h, z2.h[5]\n"
"fmla z21.h, z7.h, z3.h[5]\n"
"fmla z25.h, z7.h, z4.h[5]\n"
"fmla z29.h, z7.h, z5.h[5]\n"
- "ld1h { z7.h }, p5/Z, [x9, #5, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x28, #5, MUL VL]\n"
"fmla z10.h, z6.h, z0.h[5]\n"
"fmla z14.h, z6.h, z1.h[5]\n"
"fmla z18.h, z6.h, z2.h[5]\n"
"fmla z22.h, z6.h, z3.h[5]\n"
"fmla z26.h, z6.h, z4.h[5]\n"
"fmla z30.h, z6.h, z5.h[5]\n"
- "ld1h { z6.h }, p5/Z, [x12, #6, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #6, MUL VL]\n"
"fmla z11.h, z7.h, z0.h[5]\n"
"fmla z15.h, z7.h, z1.h[5]\n"
"fmla z19.h, z7.h, z2.h[5]\n"
"fmla z23.h, z7.h, z3.h[5]\n"
"fmla z27.h, z7.h, z4.h[5]\n"
"fmla z31.h, z7.h, z5.h[5]\n"
- "ld1h { z7.h }, p5/Z, [x11, #6, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #6, MUL VL]\n"
"fmla z8.h, z6.h, z0.h[6]\n"
"fmla z12.h, z6.h, z1.h[6]\n"
"fmla z16.h, z6.h, z2.h[6]\n"
"fmla z20.h, z6.h, z3.h[6]\n"
"fmla z24.h, z6.h, z4.h[6]\n"
"fmla z28.h, z6.h, z5.h[6]\n"
- "ld1h { z6.h }, p5/Z, [x10, #6, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #6, MUL VL]\n"
"fmla z9.h, z7.h, z0.h[6]\n"
"fmla z13.h, z7.h, z1.h[6]\n"
"fmla z17.h, z7.h, z2.h[6]\n"
"fmla z21.h, z7.h, z3.h[6]\n"
"fmla z25.h, z7.h, z4.h[6]\n"
"fmla z29.h, z7.h, z5.h[6]\n"
- "ld1h { z7.h }, p5/Z, [x9, #6, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x28, #6, MUL VL]\n"
"fmla z10.h, z6.h, z0.h[6]\n"
"fmla z14.h, z6.h, z1.h[6]\n"
"fmla z18.h, z6.h, z2.h[6]\n"
"fmla z22.h, z6.h, z3.h[6]\n"
"fmla z26.h, z6.h, z4.h[6]\n"
"fmla z30.h, z6.h, z5.h[6]\n"
- "ld1h { z6.h }, p5/Z, [x12, #7, MUL VL]\n"
- "addvl x12, x12, #8\n"
+ "ld1h { z6.h }, p5/Z, [x11, #7, MUL VL]\n"
+ "addvl x11, x11, #8\n"
"fmla z11.h, z7.h, z0.h[6]\n"
"fmla z15.h, z7.h, z1.h[6]\n"
"fmla z19.h, z7.h, z2.h[6]\n"
"fmla z23.h, z7.h, z3.h[6]\n"
"fmla z27.h, z7.h, z4.h[6]\n"
"fmla z31.h, z7.h, z5.h[6]\n"
- "ld1h { z7.h }, p5/Z, [x11, #7, MUL VL]\n"
- "addvl x11, x11, #8\n"
+ "ld1h { z7.h }, p5/Z, [x10, #7, MUL VL]\n"
+ "addvl x10, x10, #8\n"
"fmla z8.h, z6.h, z0.h[7]\n"
"fmla z12.h, z6.h, z1.h[7]\n"
"fmla z16.h, z6.h, z2.h[7]\n"
"fmla z20.h, z6.h, z3.h[7]\n"
"fmla z24.h, z6.h, z4.h[7]\n"
"fmla z28.h, z6.h, z5.h[7]\n"
- "ld1h { z6.h }, p5/Z, [x10, #7, MUL VL]\n"
- "addvl x10, x10, #8\n"
+ "ld1h { z6.h }, p5/Z, [x9, #7, MUL VL]\n"
+ "addvl x9, x9, #8\n"
"fmla z9.h, z7.h, z0.h[7]\n"
"fmla z13.h, z7.h, z1.h[7]\n"
"fmla z17.h, z7.h, z2.h[7]\n"
"fmla z21.h, z7.h, z3.h[7]\n"
"fmla z25.h, z7.h, z4.h[7]\n"
"fmla z29.h, z7.h, z5.h[7]\n"
- "ld1h { z7.h }, p5/Z, [x9, #7, MUL VL]\n"
- "addvl x9, x9, #8\n"
+ "ld1h { z7.h }, p5/Z, [x28, #7, MUL VL]\n"
+ "addvl x28, x28, #8\n"
"fmla z10.h, z6.h, z0.h[7]\n"
"fmla z14.h, z6.h, z1.h[7]\n"
"fmla z18.h, z6.h, z2.h[7]\n"
@@ -2925,34 +2925,34 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"fmla z31.h, z7.h, z5.h[7]\n"
"bgt 80b\n"
"81:" // Height 6: Multiply loop: Single iteration only
- "whilelt p0.h, XZR, x27\n"
- "ld1rqh { z0.h }, p0/Z, [x26]\n"
- "ld1rqh { z1.h }, p0/Z, [x25]\n"
- "subs x27, x27, #0x1\n"
- "ld1rqh { z2.h }, p0/Z, [x24]\n"
- "ld1rqh { z3.h }, p0/Z, [x23]\n"
- "ld1rqh { z4.h }, p0/Z, [x22]\n"
- "ld1rqh { z5.h }, p0/Z, [x21]\n"
- "ld1h { z6.h }, p5/Z, [x12]\n"
- "ld1h { z7.h }, p5/Z, [x11]\n"
+ "whilelt p0.h, XZR, x26\n"
+ "ld1rqh { z0.h }, p0/Z, [x25]\n"
+ "ld1rqh { z1.h }, p0/Z, [x24]\n"
+ "subs x26, x26, #0x1\n"
+ "ld1rqh { z2.h }, p0/Z, [x23]\n"
+ "ld1rqh { z3.h }, p0/Z, [x22]\n"
+ "ld1rqh { z4.h }, p0/Z, [x21]\n"
+ "ld1rqh { z5.h }, p0/Z, [x20]\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
"fmla z8.h, z6.h, z0.h[0]\n"
"fmla z12.h, z6.h, z1.h[0]\n"
"fmla z16.h, z6.h, z2.h[0]\n"
"fmla z20.h, z6.h, z3.h[0]\n"
- "addvl x12, x12, #1\n"
"addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
"fmla z24.h, z6.h, z4.h[0]\n"
"fmla z28.h, z6.h, z5.h[0]\n"
- "ld1h { z6.h }, p5/Z, [x10]\n"
- "addvl x10, x10, #1\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "addvl x9, x9, #1\n"
"fmla z9.h, z7.h, z0.h[0]\n"
"fmla z13.h, z7.h, z1.h[0]\n"
"fmla z17.h, z7.h, z2.h[0]\n"
"fmla z21.h, z7.h, z3.h[0]\n"
"fmla z25.h, z7.h, z4.h[0]\n"
"fmla z29.h, z7.h, z5.h[0]\n"
- "ld1h { z7.h }, p5/Z, [x9]\n"
- "addvl x9, x9, #1\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "addvl x28, x28, #1\n"
"fmla z10.h, z6.h, z0.h[0]\n"
"fmla z14.h, z6.h, z1.h[0]\n"
"fmla z18.h, z6.h, z2.h[0]\n"
@@ -2966,27 +2966,27 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"fmla z27.h, z7.h, z4.h[0]\n"
"fmla z31.h, z7.h, z5.h[0]\n"
"ble 82f\n"
- "ld1h { z6.h }, p5/Z, [x12]\n"
- "ld1h { z7.h }, p5/Z, [x11]\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
"fmla z8.h, z6.h, z0.h[1]\n"
"fmla z12.h, z6.h, z1.h[1]\n"
"fmla z16.h, z6.h, z2.h[1]\n"
"fmla z20.h, z6.h, z3.h[1]\n"
- "subs x27, x27, #0x1\n"
- "addvl x12, x12, #1\n"
+ "subs x26, x26, #0x1\n"
+ "addvl x11, x11, #1\n"
"fmla z24.h, z6.h, z4.h[1]\n"
"fmla z28.h, z6.h, z5.h[1]\n"
- "ld1h { z6.h }, p5/Z, [x10]\n"
- "addvl x11, x11, #1\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "addvl x10, x10, #1\n"
"fmla z9.h, z7.h, z0.h[1]\n"
"fmla z13.h, z7.h, z1.h[1]\n"
- "addvl x10, x10, #1\n"
+ "addvl x9, x9, #1\n"
"fmla z17.h, z7.h, z2.h[1]\n"
"fmla z21.h, z7.h, z3.h[1]\n"
"fmla z25.h, z7.h, z4.h[1]\n"
"fmla z29.h, z7.h, z5.h[1]\n"
- "ld1h { z7.h }, p5/Z, [x9]\n"
- "addvl x9, x9, #1\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "addvl x28, x28, #1\n"
"fmla z10.h, z6.h, z0.h[1]\n"
"fmla z14.h, z6.h, z1.h[1]\n"
"fmla z18.h, z6.h, z2.h[1]\n"
@@ -3000,27 +3000,27 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"fmla z27.h, z7.h, z4.h[1]\n"
"fmla z31.h, z7.h, z5.h[1]\n"
"ble 82f\n"
- "ld1h { z6.h }, p5/Z, [x12]\n"
- "ld1h { z7.h }, p5/Z, [x11]\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
"fmla z8.h, z6.h, z0.h[2]\n"
"fmla z12.h, z6.h, z1.h[2]\n"
"fmla z16.h, z6.h, z2.h[2]\n"
"fmla z20.h, z6.h, z3.h[2]\n"
- "subs x27, x27, #0x1\n"
- "addvl x12, x12, #1\n"
+ "subs x26, x26, #0x1\n"
+ "addvl x11, x11, #1\n"
"fmla z24.h, z6.h, z4.h[2]\n"
"fmla z28.h, z6.h, z5.h[2]\n"
- "ld1h { z6.h }, p5/Z, [x10]\n"
- "addvl x11, x11, #1\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "addvl x10, x10, #1\n"
"fmla z9.h, z7.h, z0.h[2]\n"
"fmla z13.h, z7.h, z1.h[2]\n"
- "addvl x10, x10, #1\n"
+ "addvl x9, x9, #1\n"
"fmla z17.h, z7.h, z2.h[2]\n"
"fmla z21.h, z7.h, z3.h[2]\n"
"fmla z25.h, z7.h, z4.h[2]\n"
"fmla z29.h, z7.h, z5.h[2]\n"
- "ld1h { z7.h }, p5/Z, [x9]\n"
- "addvl x9, x9, #1\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "addvl x28, x28, #1\n"
"fmla z10.h, z6.h, z0.h[2]\n"
"fmla z14.h, z6.h, z1.h[2]\n"
"fmla z18.h, z6.h, z2.h[2]\n"
@@ -3034,27 +3034,27 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"fmla z27.h, z7.h, z4.h[2]\n"
"fmla z31.h, z7.h, z5.h[2]\n"
"ble 82f\n"
- "ld1h { z6.h }, p5/Z, [x12]\n"
- "ld1h { z7.h }, p5/Z, [x11]\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
"fmla z8.h, z6.h, z0.h[3]\n"
"fmla z12.h, z6.h, z1.h[3]\n"
"fmla z16.h, z6.h, z2.h[3]\n"
"fmla z20.h, z6.h, z3.h[3]\n"
- "subs x27, x27, #0x1\n"
- "addvl x12, x12, #1\n"
+ "subs x26, x26, #0x1\n"
+ "addvl x11, x11, #1\n"
"fmla z24.h, z6.h, z4.h[3]\n"
"fmla z28.h, z6.h, z5.h[3]\n"
- "ld1h { z6.h }, p5/Z, [x10]\n"
- "addvl x11, x11, #1\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "addvl x10, x10, #1\n"
"fmla z9.h, z7.h, z0.h[3]\n"
"fmla z13.h, z7.h, z1.h[3]\n"
- "addvl x10, x10, #1\n"
+ "addvl x9, x9, #1\n"
"fmla z17.h, z7.h, z2.h[3]\n"
"fmla z21.h, z7.h, z3.h[3]\n"
"fmla z25.h, z7.h, z4.h[3]\n"
"fmla z29.h, z7.h, z5.h[3]\n"
- "ld1h { z7.h }, p5/Z, [x9]\n"
- "addvl x9, x9, #1\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "addvl x28, x28, #1\n"
"fmla z10.h, z6.h, z0.h[3]\n"
"fmla z14.h, z6.h, z1.h[3]\n"
"fmla z18.h, z6.h, z2.h[3]\n"
@@ -3068,27 +3068,27 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"fmla z27.h, z7.h, z4.h[3]\n"
"fmla z31.h, z7.h, z5.h[3]\n"
"ble 82f\n"
- "ld1h { z6.h }, p5/Z, [x12]\n"
- "ld1h { z7.h }, p5/Z, [x11]\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
"fmla z8.h, z6.h, z0.h[4]\n"
"fmla z12.h, z6.h, z1.h[4]\n"
"fmla z16.h, z6.h, z2.h[4]\n"
"fmla z20.h, z6.h, z3.h[4]\n"
- "subs x27, x27, #0x1\n"
- "addvl x12, x12, #1\n"
+ "subs x26, x26, #0x1\n"
+ "addvl x11, x11, #1\n"
"fmla z24.h, z6.h, z4.h[4]\n"
"fmla z28.h, z6.h, z5.h[4]\n"
- "ld1h { z6.h }, p5/Z, [x10]\n"
- "addvl x11, x11, #1\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "addvl x10, x10, #1\n"
"fmla z9.h, z7.h, z0.h[4]\n"
"fmla z13.h, z7.h, z1.h[4]\n"
- "addvl x10, x10, #1\n"
+ "addvl x9, x9, #1\n"
"fmla z17.h, z7.h, z2.h[4]\n"
"fmla z21.h, z7.h, z3.h[4]\n"
"fmla z25.h, z7.h, z4.h[4]\n"
"fmla z29.h, z7.h, z5.h[4]\n"
- "ld1h { z7.h }, p5/Z, [x9]\n"
- "addvl x9, x9, #1\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "addvl x28, x28, #1\n"
"fmla z10.h, z6.h, z0.h[4]\n"
"fmla z14.h, z6.h, z1.h[4]\n"
"fmla z18.h, z6.h, z2.h[4]\n"
@@ -3102,27 +3102,27 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"fmla z27.h, z7.h, z4.h[4]\n"
"fmla z31.h, z7.h, z5.h[4]\n"
"ble 82f\n"
- "ld1h { z6.h }, p5/Z, [x12]\n"
- "ld1h { z7.h }, p5/Z, [x11]\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
"fmla z8.h, z6.h, z0.h[5]\n"
"fmla z12.h, z6.h, z1.h[5]\n"
"fmla z16.h, z6.h, z2.h[5]\n"
"fmla z20.h, z6.h, z3.h[5]\n"
- "subs x27, x27, #0x1\n"
- "addvl x12, x12, #1\n"
+ "subs x26, x26, #0x1\n"
+ "addvl x11, x11, #1\n"
"fmla z24.h, z6.h, z4.h[5]\n"
"fmla z28.h, z6.h, z5.h[5]\n"
- "ld1h { z6.h }, p5/Z, [x10]\n"
- "addvl x11, x11, #1\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "addvl x10, x10, #1\n"
"fmla z9.h, z7.h, z0.h[5]\n"
"fmla z13.h, z7.h, z1.h[5]\n"
- "addvl x10, x10, #1\n"
+ "addvl x9, x9, #1\n"
"fmla z17.h, z7.h, z2.h[5]\n"
"fmla z21.h, z7.h, z3.h[5]\n"
"fmla z25.h, z7.h, z4.h[5]\n"
"fmla z29.h, z7.h, z5.h[5]\n"
- "ld1h { z7.h }, p5/Z, [x9]\n"
- "addvl x9, x9, #1\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "addvl x28, x28, #1\n"
"fmla z10.h, z6.h, z0.h[5]\n"
"fmla z14.h, z6.h, z1.h[5]\n"
"fmla z18.h, z6.h, z2.h[5]\n"
@@ -3136,27 +3136,27 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"fmla z27.h, z7.h, z4.h[5]\n"
"fmla z31.h, z7.h, z5.h[5]\n"
"ble 82f\n"
- "ld1h { z6.h }, p5/Z, [x12]\n"
- "ld1h { z7.h }, p5/Z, [x11]\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
"fmla z8.h, z6.h, z0.h[6]\n"
"fmla z12.h, z6.h, z1.h[6]\n"
"fmla z16.h, z6.h, z2.h[6]\n"
"fmla z20.h, z6.h, z3.h[6]\n"
- "subs x27, x27, #0x1\n"
- "addvl x12, x12, #1\n"
+ "subs x26, x26, #0x1\n"
+ "addvl x11, x11, #1\n"
"fmla z24.h, z6.h, z4.h[6]\n"
"fmla z28.h, z6.h, z5.h[6]\n"
- "ld1h { z6.h }, p5/Z, [x10]\n"
- "addvl x11, x11, #1\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "addvl x10, x10, #1\n"
"fmla z9.h, z7.h, z0.h[6]\n"
"fmla z13.h, z7.h, z1.h[6]\n"
- "addvl x10, x10, #1\n"
+ "addvl x9, x9, #1\n"
"fmla z17.h, z7.h, z2.h[6]\n"
"fmla z21.h, z7.h, z3.h[6]\n"
"fmla z25.h, z7.h, z4.h[6]\n"
"fmla z29.h, z7.h, z5.h[6]\n"
- "ld1h { z7.h }, p5/Z, [x9]\n"
- "addvl x9, x9, #1\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "addvl x28, x28, #1\n"
"fmla z10.h, z6.h, z0.h[6]\n"
"fmla z14.h, z6.h, z1.h[6]\n"
"fmla z18.h, z6.h, z2.h[6]\n"
@@ -3170,26 +3170,26 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"fmla z27.h, z7.h, z4.h[6]\n"
"fmla z31.h, z7.h, z5.h[6]\n"
"ble 82f\n"
- "ld1h { z6.h }, p5/Z, [x12]\n"
- "ld1h { z7.h }, p5/Z, [x11]\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
"fmla z8.h, z6.h, z0.h[7]\n"
"fmla z12.h, z6.h, z1.h[7]\n"
"fmla z16.h, z6.h, z2.h[7]\n"
"fmla z20.h, z6.h, z3.h[7]\n"
- "addvl x12, x12, #1\n"
"addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
"fmla z24.h, z6.h, z4.h[7]\n"
"fmla z28.h, z6.h, z5.h[7]\n"
- "ld1h { z6.h }, p5/Z, [x10]\n"
- "addvl x10, x10, #1\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "addvl x9, x9, #1\n"
"fmla z9.h, z7.h, z0.h[7]\n"
"fmla z13.h, z7.h, z1.h[7]\n"
"fmla z17.h, z7.h, z2.h[7]\n"
"fmla z21.h, z7.h, z3.h[7]\n"
"fmla z25.h, z7.h, z4.h[7]\n"
"fmla z29.h, z7.h, z5.h[7]\n"
- "ld1h { z7.h }, p5/Z, [x9]\n"
- "addvl x9, x9, #1\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "addvl x28, x28, #1\n"
"fmla z10.h, z6.h, z0.h[7]\n"
"fmla z14.h, z6.h, z1.h[7]\n"
"fmla z18.h, z6.h, z2.h[7]\n"
@@ -3203,21 +3203,21 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"fmla z27.h, z7.h, z4.h[7]\n"
"fmla z31.h, z7.h, z5.h[7]\n"
"82:" // Height 6: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 77b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
- "add x23, x24, x20, LSL #1\n"
- "add x22, x23, x20, LSL #1\n"
- "add x21, x22, x20, LSL #1\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "add x22, x23, x19, LSL #1\n"
+ "add x21, x22, x19, LSL #1\n"
+ "add x20, x21, x19, LSL #1\n"
"tbz %x[flags], #1, 83f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rh { z1.h }, p5/Z, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rh { z0.h }, p5/Z, [x20]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rh { z1.h }, p5/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rh { z0.h }, p5/Z, [x19]\n"
"fmin z8.h, p5/M, z8.h, z1.h\n"
"fmin z9.h, p5/M, z9.h, z1.h\n"
"fmin z10.h, p5/M, z10.h, z1.h\n"
@@ -3267,50 +3267,50 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"fmax z30.h, p5/M, z30.h, z0.h\n"
"fmax z31.h, p5/M, z31.h, z0.h\n"
"83:" // Height 6: No activation
- "st1h { z8.h }, p4, [x13]\n"
- "st1h { z9.h }, p3, [x13, #1, MUL VL]\n"
- "st1h { z10.h }, p2, [x13, #2, MUL VL]\n"
- "st1h { z11.h }, p1, [x13, #3, MUL VL]\n"
- "addvl x13, x13, #4\n"
- "st1h { z12.h }, p4, [x25]\n"
- "st1h { z13.h }, p3, [x25, #1, MUL VL]\n"
- "st1h { z14.h }, p2, [x25, #2, MUL VL]\n"
- "st1h { z15.h }, p1, [x25, #3, MUL VL]\n"
- "st1h { z16.h }, p4, [x24]\n"
- "st1h { z17.h }, p3, [x24, #1, MUL VL]\n"
- "st1h { z18.h }, p2, [x24, #2, MUL VL]\n"
- "st1h { z19.h }, p1, [x24, #3, MUL VL]\n"
- "st1h { z20.h }, p4, [x23]\n"
- "st1h { z21.h }, p3, [x23, #1, MUL VL]\n"
- "st1h { z22.h }, p2, [x23, #2, MUL VL]\n"
- "st1h { z23.h }, p1, [x23, #3, MUL VL]\n"
- "st1h { z24.h }, p4, [x22]\n"
- "st1h { z25.h }, p3, [x22, #1, MUL VL]\n"
- "st1h { z26.h }, p2, [x22, #2, MUL VL]\n"
- "st1h { z27.h }, p1, [x22, #3, MUL VL]\n"
- "st1h { z28.h }, p4, [x21]\n"
- "st1h { z29.h }, p3, [x21, #1, MUL VL]\n"
- "st1h { z30.h }, p2, [x21, #2, MUL VL]\n"
- "st1h { z31.h }, p1, [x21, #3, MUL VL]\n"
+ "st1h { z8.h }, p4, [x12]\n"
+ "st1h { z9.h }, p3, [x12, #1, MUL VL]\n"
+ "st1h { z10.h }, p2, [x12, #2, MUL VL]\n"
+ "st1h { z11.h }, p1, [x12, #3, MUL VL]\n"
+ "addvl x12, x12, #4\n"
+ "st1h { z12.h }, p4, [x24]\n"
+ "st1h { z13.h }, p3, [x24, #1, MUL VL]\n"
+ "st1h { z14.h }, p2, [x24, #2, MUL VL]\n"
+ "st1h { z15.h }, p1, [x24, #3, MUL VL]\n"
+ "st1h { z16.h }, p4, [x23]\n"
+ "st1h { z17.h }, p3, [x23, #1, MUL VL]\n"
+ "st1h { z18.h }, p2, [x23, #2, MUL VL]\n"
+ "st1h { z19.h }, p1, [x23, #3, MUL VL]\n"
+ "st1h { z20.h }, p4, [x22]\n"
+ "st1h { z21.h }, p3, [x22, #1, MUL VL]\n"
+ "st1h { z22.h }, p2, [x22, #2, MUL VL]\n"
+ "st1h { z23.h }, p1, [x22, #3, MUL VL]\n"
+ "st1h { z24.h }, p4, [x21]\n"
+ "st1h { z25.h }, p3, [x21, #1, MUL VL]\n"
+ "st1h { z26.h }, p2, [x21, #2, MUL VL]\n"
+ "st1h { z27.h }, p1, [x21, #3, MUL VL]\n"
+ "st1h { z28.h }, p4, [x20]\n"
+ "st1h { z29.h }, p3, [x20, #1, MUL VL]\n"
+ "st1h { z30.h }, p2, [x20, #2, MUL VL]\n"
+ "st1h { z31.h }, p1, [x20, #3, MUL VL]\n"
"84:" // Height 6: Writeback done
- "dech x14, ALL, MUL #4\n"
- "cmp x14, XZR\n"
+ "dech x13, ALL, MUL #4\n"
+ "cmp x13, XZR\n"
"bgt 72b\n"
"subs %x[M], %x[M], #0x6\n"
"beq 86f\n"
- "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 85f\n"
- "add x21, x21, #0x6\n"
- "str x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "add x20, x20, #0x6\n"
+ "str x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"b 1b\n"
"85:" // Update direct input
- "mov x20, #0xc\n"
- "madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
+ "mov x19, #0xc\n"
+ "madd %x[input_ptr], x19, x20, %x[input_ptr]\n"
"b 1b\n"
"86:" // Exit
: [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
: [args_ptr] "r" (&ka), [bias] "r" (bias), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_B_stride] "I" (offsetof(KernelArgs, B_stride)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_cur_B_ptr] "I" (offsetof(KernelArgs, cur_B_ptr)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "x9", "x10", "x11", "x12", "x13", "x14", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp32_mla_6x4VL/a64fx.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp32_mla_6x4VL/a64fx.cpp
index 32fcac3a45..7dd4e234d5 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp32_mla_6x4VL/a64fx.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp32_mla_6x4VL/a64fx.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef ARM_COMPUTE_ENABLE_SVE
@@ -105,53 +105,53 @@ void sve_ffhybrid_fp32_mla_6x4VL_a64fx (
"cmp %x[M], #0x2\n"
"bgt 27f\n"
"beq 14f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x15, %x[bias]\n"
- "ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x13, %x[output_ptr]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x14, %x[bias]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x12, %x[output_ptr]\n"
"2:" // Height 1: Column loop
- "ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
- "add x11, x12, x20, LSL #2\n"
- "cntw x21, ALL, MUL #3\n"
- "add x10, x11, x20, LSL #2\n"
- "add x9, x10, x20, LSL #2\n"
- "add x20, x9, x20, LSL #2\n"
- "cmp x14, x21\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #2\n"
+ "cntw x20, ALL, MUL #3\n"
+ "add x9, x10, x19, LSL #2\n"
+ "add x28, x9, x19, LSL #2\n"
+ "add x19, x28, x19, LSL #2\n"
+ "cmp x13, x20\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"bgt 3f\n"
- "decw x21\n"
- "cmp x14, x21\n"
- "mov x9, x12\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x28, x11\n"
"bgt 3f\n"
- "decw x21\n"
- "cmp x14, x21\n"
- "mov x10, x12\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x9, x11\n"
"bgt 3f\n"
- "mov x11, x12\n"
+ "mov x10, x11\n"
"3:" // Height 1: B setup done
- "mov x20, #0x0\n"
- "whilelt p3.s, x20, x14\n"
- "incw x20\n"
- "whilelt p2.s, x20, x14\n"
- "incw x20\n"
- "whilelt p1.s, x20, x14\n"
- "incw x20\n"
- "whilelt p0.s, x20, x14\n"
- "cbz x15, 4f\n"
- "ld1w { z8.s }, p4/Z, [x15]\n"
- "ld1w { z9.s }, p4/Z, [x15, #1, MUL VL]\n"
- "ld1w { z10.s }, p4/Z, [x15, #2, MUL VL]\n"
- "ld1w { z11.s }, p4/Z, [x15, #3, MUL VL]\n"
- "addvl x15, x15, #4\n"
+ "mov x19, #0x0\n"
+ "whilelt p3.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p0.s, x19, x13\n"
+ "cbz x14, 4f\n"
+ "ld1w { z8.s }, p4/Z, [x14]\n"
+ "ld1w { z9.s }, p4/Z, [x14, #1, MUL VL]\n"
+ "ld1w { z10.s }, p4/Z, [x14, #2, MUL VL]\n"
+ "ld1w { z11.s }, p4/Z, [x14, #3, MUL VL]\n"
+ "addvl x14, x14, #4\n"
"b 6f\n"
"4:" // Height 1: no bias
"tbz %x[flags], #0, 5f\n"
- "ld1w { z8.s }, p3/Z, [x13]\n"
- "ld1w { z9.s }, p2/Z, [x13, #1, MUL VL]\n"
- "ld1w { z10.s }, p1/Z, [x13, #2, MUL VL]\n"
- "ld1w { z11.s }, p0/Z, [x13, #3, MUL VL]\n"
+ "ld1w { z8.s }, p3/Z, [x12]\n"
+ "ld1w { z9.s }, p2/Z, [x12, #1, MUL VL]\n"
+ "ld1w { z10.s }, p1/Z, [x12, #2, MUL VL]\n"
+ "ld1w { z11.s }, p0/Z, [x12, #3, MUL VL]\n"
"b 6f\n"
"5:" // Height 1: no accumulate
"mov z8.b, #0x0\n"
@@ -159,64 +159,64 @@ void sve_ffhybrid_fp32_mla_6x4VL_a64fx (
"mov z10.b, #0x0\n"
"mov z11.b, #0x0\n"
"6:" // Height 1: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"7:" // Height 1: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 8f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "cbnz x28, 9f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "cbnz x27, 9f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #2\n"
"b 9f\n"
"8:" // Height 1: setup direct input
- "mov x26, %x[input_ptr]\n"
+ "mov x25, %x[input_ptr]\n"
"9:" // Height 1: input setup done
- "subs x27, x27, #0x1\n"
- "ld1rw { z0.s }, p4/Z, [x26]\n"
- "ld1w { z6.s }, p4/Z, [x12]\n"
- "ld1w { z7.s }, p4/Z, [x11]\n"
+ "subs x26, x26, #0x1\n"
+ "ld1rw { z0.s }, p4/Z, [x25]\n"
+ "ld1w { z6.s }, p4/Z, [x11]\n"
+ "ld1w { z7.s }, p4/Z, [x10]\n"
"ble 11f\n"
"10:" // Height 1: Multiply loop: Main loop
"fmla z8.s, p4/M, z6.s, z0.s\n"
"fmla z9.s, p4/M, z7.s, z0.s\n"
- "ld1w { z6.s }, p4/Z, [x10]\n"
- "ld1w { z7.s }, p4/Z, [x9]\n"
- "addvl x12, x12, #1\n"
+ "ld1w { z6.s }, p4/Z, [x9]\n"
+ "ld1w { z7.s }, p4/Z, [x28]\n"
"addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
"fmla z10.s, p4/M, z6.s, z0.s\n"
"fmla z11.s, p4/M, z7.s, z0.s\n"
- "add x26, x26, #0x4\n"
- "subs x27, x27, #0x1\n"
- "ld1rw { z0.s }, p4/Z, [x26]\n"
- "ld1w { z6.s }, p4/Z, [x12]\n"
- "addvl x10, x10, #1\n"
+ "add x25, x25, #0x4\n"
+ "subs x26, x26, #0x1\n"
+ "ld1rw { z0.s }, p4/Z, [x25]\n"
+ "ld1w { z6.s }, p4/Z, [x11]\n"
"addvl x9, x9, #1\n"
- "ld1w { z7.s }, p4/Z, [x11]\n"
+ "addvl x28, x28, #1\n"
+ "ld1w { z7.s }, p4/Z, [x10]\n"
"bgt 10b\n"
"11:" // Height 1: Multiply loop: Main loop skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
"fmla z8.s, p4/M, z6.s, z0.s\n"
"fmla z9.s, p4/M, z7.s, z0.s\n"
- "ld1w { z6.s }, p4/Z, [x10]\n"
- "ld1w { z7.s }, p4/Z, [x9]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ld1w { z6.s }, p4/Z, [x9]\n"
+ "ld1w { z7.s }, p4/Z, [x28]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"fmla z10.s, p4/M, z6.s, z0.s\n"
"fmla z11.s, p4/M, z7.s, z0.s\n"
- "addvl x12, x12, #1\n"
"addvl x11, x11, #1\n"
"addvl x10, x10, #1\n"
"addvl x9, x9, #1\n"
+ "addvl x28, x28, #1\n"
"bne 7b\n"
"tbz %x[flags], #1, 12f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z1.s }, p4/Z, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rw { z0.s }, p4/Z, [x20]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rw { z1.s }, p4/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z0.s }, p4/Z, [x19]\n"
"fmin z8.s, p4/M, z8.s, z1.s\n"
"fmin z9.s, p4/M, z9.s, z1.s\n"
"fmin z10.s, p4/M, z10.s, z1.s\n"
@@ -226,74 +226,74 @@ void sve_ffhybrid_fp32_mla_6x4VL_a64fx (
"fmax z10.s, p4/M, z10.s, z0.s\n"
"fmax z11.s, p4/M, z11.s, z0.s\n"
"12:" // Height 1: No activation
- "st1w { z8.s }, p3, [x13]\n"
- "st1w { z9.s }, p2, [x13, #1, MUL VL]\n"
- "st1w { z10.s }, p1, [x13, #2, MUL VL]\n"
- "st1w { z11.s }, p0, [x13, #3, MUL VL]\n"
- "addvl x13, x13, #4\n"
+ "st1w { z8.s }, p3, [x12]\n"
+ "st1w { z9.s }, p2, [x12, #1, MUL VL]\n"
+ "st1w { z10.s }, p1, [x12, #2, MUL VL]\n"
+ "st1w { z11.s }, p0, [x12, #3, MUL VL]\n"
+ "addvl x12, x12, #4\n"
"13:" // Height 1: Writeback done
- "decw x14, ALL, MUL #4\n"
- "cmp x14, XZR\n"
+ "decw x13, ALL, MUL #4\n"
+ "cmp x13, XZR\n"
"bgt 2b\n"
"b 80f\n"
"14:" // Height 2
- "ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x15, %x[bias]\n"
- "ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x13, %x[output_ptr]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x14, %x[bias]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x12, %x[output_ptr]\n"
"15:" // Height 2: Column loop
- "ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
- "add x11, x12, x20, LSL #2\n"
- "cntw x21, ALL, MUL #3\n"
- "add x10, x11, x20, LSL #2\n"
- "add x9, x10, x20, LSL #2\n"
- "add x20, x9, x20, LSL #2\n"
- "cmp x14, x21\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #2\n"
+ "cntw x20, ALL, MUL #3\n"
+ "add x9, x10, x19, LSL #2\n"
+ "add x28, x9, x19, LSL #2\n"
+ "add x19, x28, x19, LSL #2\n"
+ "cmp x13, x20\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"bgt 16f\n"
- "decw x21\n"
- "cmp x14, x21\n"
- "mov x9, x12\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x28, x11\n"
"bgt 16f\n"
- "decw x21\n"
- "cmp x14, x21\n"
- "mov x10, x12\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x9, x11\n"
"bgt 16f\n"
- "mov x11, x12\n"
+ "mov x10, x11\n"
"16:" // Height 2: B setup done
- "mov x20, #0x0\n"
- "whilelt p3.s, x20, x14\n"
- "incw x20\n"
- "whilelt p2.s, x20, x14\n"
- "incw x20\n"
- "whilelt p1.s, x20, x14\n"
- "incw x20\n"
- "whilelt p0.s, x20, x14\n"
- "cbz x15, 17f\n"
- "ld1w { z8.s }, p4/Z, [x15]\n"
- "ld1w { z9.s }, p4/Z, [x15, #1, MUL VL]\n"
+ "mov x19, #0x0\n"
+ "whilelt p3.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p0.s, x19, x13\n"
+ "cbz x14, 17f\n"
+ "ld1w { z8.s }, p4/Z, [x14]\n"
+ "ld1w { z9.s }, p4/Z, [x14, #1, MUL VL]\n"
"mov z12.d, z8.d\n"
"mov z13.d, z9.d\n"
- "ld1w { z10.s }, p4/Z, [x15, #2, MUL VL]\n"
- "ld1w { z11.s }, p4/Z, [x15, #3, MUL VL]\n"
+ "ld1w { z10.s }, p4/Z, [x14, #2, MUL VL]\n"
+ "ld1w { z11.s }, p4/Z, [x14, #3, MUL VL]\n"
"mov z14.d, z10.d\n"
"mov z15.d, z11.d\n"
- "addvl x15, x15, #4\n"
+ "addvl x14, x14, #4\n"
"b 19f\n"
"17:" // Height 2: no bias
"tbz %x[flags], #0, 18f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #2\n"
- "ld1w { z8.s }, p3/Z, [x13]\n"
- "ld1w { z9.s }, p2/Z, [x13, #1, MUL VL]\n"
- "ld1w { z10.s }, p1/Z, [x13, #2, MUL VL]\n"
- "ld1w { z11.s }, p0/Z, [x13, #3, MUL VL]\n"
- "ld1w { z12.s }, p3/Z, [x25]\n"
- "ld1w { z13.s }, p2/Z, [x25, #1, MUL VL]\n"
- "ld1w { z14.s }, p1/Z, [x25, #2, MUL VL]\n"
- "ld1w { z15.s }, p0/Z, [x25, #3, MUL VL]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #2\n"
+ "ld1w { z8.s }, p3/Z, [x12]\n"
+ "ld1w { z9.s }, p2/Z, [x12, #1, MUL VL]\n"
+ "ld1w { z10.s }, p1/Z, [x12, #2, MUL VL]\n"
+ "ld1w { z11.s }, p0/Z, [x12, #3, MUL VL]\n"
+ "ld1w { z12.s }, p3/Z, [x24]\n"
+ "ld1w { z13.s }, p2/Z, [x24, #1, MUL VL]\n"
+ "ld1w { z14.s }, p1/Z, [x24, #2, MUL VL]\n"
+ "ld1w { z15.s }, p0/Z, [x24, #3, MUL VL]\n"
"b 19f\n"
"18:" // Height 2: no accumulate
"mov z8.b, #0x0\n"
@@ -305,80 +305,80 @@ void sve_ffhybrid_fp32_mla_6x4VL_a64fx (
"mov z14.b, #0x0\n"
"mov z15.b, #0x0\n"
"19:" // Height 2: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"20:" // Height 2: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 21f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "cbnz x28, 22f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #2\n"
- "add x25, x25, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "cbnz x27, 22f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #2\n"
+ "add x24, x24, x19, LSL #2\n"
"b 22f\n"
"21:" // Height 2: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #2\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #2\n"
"22:" // Height 2: input setup done
- "subs x27, x27, #0x1\n"
- "ld1rw { z0.s }, p4/Z, [x26]\n"
- "ld1rw { z1.s }, p4/Z, [x25]\n"
- "ld1w { z6.s }, p4/Z, [x12]\n"
- "ld1w { z7.s }, p4/Z, [x11]\n"
+ "subs x26, x26, #0x1\n"
+ "ld1rw { z0.s }, p4/Z, [x25]\n"
+ "ld1rw { z1.s }, p4/Z, [x24]\n"
+ "ld1w { z6.s }, p4/Z, [x11]\n"
+ "ld1w { z7.s }, p4/Z, [x10]\n"
"ble 24f\n"
"23:" // Height 2: Multiply loop: Main loop
"fmla z8.s, p4/M, z6.s, z0.s\n"
"fmla z12.s, p4/M, z6.s, z1.s\n"
- "ld1w { z6.s }, p4/Z, [x10]\n"
- "addvl x12, x12, #1\n"
+ "ld1w { z6.s }, p4/Z, [x9]\n"
+ "addvl x11, x11, #1\n"
"fmla z9.s, p4/M, z7.s, z0.s\n"
"fmla z13.s, p4/M, z7.s, z1.s\n"
- "ld1w { z7.s }, p4/Z, [x9]\n"
- "addvl x11, x11, #1\n"
- "add x26, x26, #0x4\n"
- "subs x27, x27, #0x1\n"
+ "ld1w { z7.s }, p4/Z, [x28]\n"
+ "addvl x10, x10, #1\n"
+ "add x25, x25, #0x4\n"
+ "subs x26, x26, #0x1\n"
"fmla z10.s, p4/M, z6.s, z0.s\n"
"fmla z14.s, p4/M, z6.s, z1.s\n"
- "add x25, x25, #0x4\n"
+ "add x24, x24, #0x4\n"
"fmla z11.s, p4/M, z7.s, z0.s\n"
"fmla z15.s, p4/M, z7.s, z1.s\n"
- "addvl x10, x10, #1\n"
- "ld1rw { z0.s }, p4/Z, [x26]\n"
- "ld1rw { z1.s }, p4/Z, [x25]\n"
"addvl x9, x9, #1\n"
- "ld1w { z6.s }, p4/Z, [x12]\n"
- "ld1w { z7.s }, p4/Z, [x11]\n"
+ "ld1rw { z0.s }, p4/Z, [x25]\n"
+ "ld1rw { z1.s }, p4/Z, [x24]\n"
+ "addvl x28, x28, #1\n"
+ "ld1w { z6.s }, p4/Z, [x11]\n"
+ "ld1w { z7.s }, p4/Z, [x10]\n"
"bgt 23b\n"
"24:" // Height 2: Multiply loop: Main loop skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
"fmla z8.s, p4/M, z6.s, z0.s\n"
"fmla z12.s, p4/M, z6.s, z1.s\n"
- "ld1w { z6.s }, p4/Z, [x10]\n"
+ "ld1w { z6.s }, p4/Z, [x9]\n"
"fmla z9.s, p4/M, z7.s, z0.s\n"
"fmla z13.s, p4/M, z7.s, z1.s\n"
- "ld1w { z7.s }, p4/Z, [x9]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ld1w { z7.s }, p4/Z, [x28]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"fmla z10.s, p4/M, z6.s, z0.s\n"
"fmla z14.s, p4/M, z6.s, z1.s\n"
- "addvl x12, x12, #1\n"
"addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
"fmla z11.s, p4/M, z7.s, z0.s\n"
"fmla z15.s, p4/M, z7.s, z1.s\n"
- "addvl x10, x10, #1\n"
"addvl x9, x9, #1\n"
+ "addvl x28, x28, #1\n"
"bne 20b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #2\n"
"tbz %x[flags], #1, 25f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z1.s }, p4/Z, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rw { z0.s }, p4/Z, [x20]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rw { z1.s }, p4/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z0.s }, p4/Z, [x19]\n"
"fmin z8.s, p4/M, z8.s, z1.s\n"
"fmin z9.s, p4/M, z9.s, z1.s\n"
"fmin z10.s, p4/M, z10.s, z1.s\n"
@@ -396,87 +396,87 @@ void sve_ffhybrid_fp32_mla_6x4VL_a64fx (
"fmax z14.s, p4/M, z14.s, z0.s\n"
"fmax z15.s, p4/M, z15.s, z0.s\n"
"25:" // Height 2: No activation
- "st1w { z8.s }, p3, [x13]\n"
- "st1w { z9.s }, p2, [x13, #1, MUL VL]\n"
- "st1w { z10.s }, p1, [x13, #2, MUL VL]\n"
- "st1w { z11.s }, p0, [x13, #3, MUL VL]\n"
- "addvl x13, x13, #4\n"
- "st1w { z12.s }, p3, [x25]\n"
- "st1w { z13.s }, p2, [x25, #1, MUL VL]\n"
- "st1w { z14.s }, p1, [x25, #2, MUL VL]\n"
- "st1w { z15.s }, p0, [x25, #3, MUL VL]\n"
+ "st1w { z8.s }, p3, [x12]\n"
+ "st1w { z9.s }, p2, [x12, #1, MUL VL]\n"
+ "st1w { z10.s }, p1, [x12, #2, MUL VL]\n"
+ "st1w { z11.s }, p0, [x12, #3, MUL VL]\n"
+ "addvl x12, x12, #4\n"
+ "st1w { z12.s }, p3, [x24]\n"
+ "st1w { z13.s }, p2, [x24, #1, MUL VL]\n"
+ "st1w { z14.s }, p1, [x24, #2, MUL VL]\n"
+ "st1w { z15.s }, p0, [x24, #3, MUL VL]\n"
"26:" // Height 2: Writeback done
- "decw x14, ALL, MUL #4\n"
- "cmp x14, XZR\n"
+ "decw x13, ALL, MUL #4\n"
+ "cmp x13, XZR\n"
"bgt 15b\n"
"b 80f\n"
"27:" // Height 3
- "ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x15, %x[bias]\n"
- "ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x13, %x[output_ptr]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x14, %x[bias]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x12, %x[output_ptr]\n"
"28:" // Height 3: Column loop
- "ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
- "add x11, x12, x20, LSL #2\n"
- "cntw x21, ALL, MUL #3\n"
- "add x10, x11, x20, LSL #2\n"
- "add x9, x10, x20, LSL #2\n"
- "add x20, x9, x20, LSL #2\n"
- "cmp x14, x21\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #2\n"
+ "cntw x20, ALL, MUL #3\n"
+ "add x9, x10, x19, LSL #2\n"
+ "add x28, x9, x19, LSL #2\n"
+ "add x19, x28, x19, LSL #2\n"
+ "cmp x13, x20\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"bgt 29f\n"
- "decw x21\n"
- "cmp x14, x21\n"
- "mov x9, x12\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x28, x11\n"
"bgt 29f\n"
- "decw x21\n"
- "cmp x14, x21\n"
- "mov x10, x12\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x9, x11\n"
"bgt 29f\n"
- "mov x11, x12\n"
+ "mov x10, x11\n"
"29:" // Height 3: B setup done
- "mov x20, #0x0\n"
- "whilelt p3.s, x20, x14\n"
- "incw x20\n"
- "whilelt p2.s, x20, x14\n"
- "incw x20\n"
- "whilelt p1.s, x20, x14\n"
- "incw x20\n"
- "whilelt p0.s, x20, x14\n"
- "cbz x15, 30f\n"
- "ld1w { z8.s }, p4/Z, [x15]\n"
- "ld1w { z9.s }, p4/Z, [x15, #1, MUL VL]\n"
+ "mov x19, #0x0\n"
+ "whilelt p3.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p0.s, x19, x13\n"
+ "cbz x14, 30f\n"
+ "ld1w { z8.s }, p4/Z, [x14]\n"
+ "ld1w { z9.s }, p4/Z, [x14, #1, MUL VL]\n"
"mov z12.d, z8.d\n"
"mov z13.d, z9.d\n"
- "ld1w { z10.s }, p4/Z, [x15, #2, MUL VL]\n"
- "ld1w { z11.s }, p4/Z, [x15, #3, MUL VL]\n"
+ "ld1w { z10.s }, p4/Z, [x14, #2, MUL VL]\n"
+ "ld1w { z11.s }, p4/Z, [x14, #3, MUL VL]\n"
"mov z14.d, z10.d\n"
"mov z15.d, z11.d\n"
"mov z16.d, z8.d\n"
"mov z17.d, z9.d\n"
- "addvl x15, x15, #4\n"
+ "addvl x14, x14, #4\n"
"mov z18.d, z10.d\n"
"mov z19.d, z11.d\n"
"b 32f\n"
"30:" // Height 3: no bias
"tbz %x[flags], #0, 31f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "ld1w { z8.s }, p3/Z, [x13]\n"
- "ld1w { z9.s }, p2/Z, [x13, #1, MUL VL]\n"
- "ld1w { z10.s }, p1/Z, [x13, #2, MUL VL]\n"
- "ld1w { z11.s }, p0/Z, [x13, #3, MUL VL]\n"
- "ld1w { z12.s }, p3/Z, [x25]\n"
- "ld1w { z13.s }, p2/Z, [x25, #1, MUL VL]\n"
- "ld1w { z14.s }, p1/Z, [x25, #2, MUL VL]\n"
- "ld1w { z15.s }, p0/Z, [x25, #3, MUL VL]\n"
- "ld1w { z16.s }, p3/Z, [x24]\n"
- "ld1w { z17.s }, p2/Z, [x24, #1, MUL VL]\n"
- "ld1w { z18.s }, p1/Z, [x24, #2, MUL VL]\n"
- "ld1w { z19.s }, p0/Z, [x24, #3, MUL VL]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "ld1w { z8.s }, p3/Z, [x12]\n"
+ "ld1w { z9.s }, p2/Z, [x12, #1, MUL VL]\n"
+ "ld1w { z10.s }, p1/Z, [x12, #2, MUL VL]\n"
+ "ld1w { z11.s }, p0/Z, [x12, #3, MUL VL]\n"
+ "ld1w { z12.s }, p3/Z, [x24]\n"
+ "ld1w { z13.s }, p2/Z, [x24, #1, MUL VL]\n"
+ "ld1w { z14.s }, p1/Z, [x24, #2, MUL VL]\n"
+ "ld1w { z15.s }, p0/Z, [x24, #3, MUL VL]\n"
+ "ld1w { z16.s }, p3/Z, [x23]\n"
+ "ld1w { z17.s }, p2/Z, [x23, #1, MUL VL]\n"
+ "ld1w { z18.s }, p1/Z, [x23, #2, MUL VL]\n"
+ "ld1w { z19.s }, p0/Z, [x23, #3, MUL VL]\n"
"b 32f\n"
"31:" // Height 3: no accumulate
"mov z8.b, #0x0\n"
@@ -492,95 +492,95 @@ void sve_ffhybrid_fp32_mla_6x4VL_a64fx (
"mov z18.b, #0x0\n"
"mov z19.b, #0x0\n"
"32:" // Height 3: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"33:" // Height 3: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 34f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "cbnz x28, 35f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #2\n"
- "add x25, x25, x20, LSL #2\n"
- "add x24, x24, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "cbnz x27, 35f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #2\n"
+ "add x24, x24, x19, LSL #2\n"
+ "add x23, x23, x19, LSL #2\n"
"b 35f\n"
"34:" // Height 3: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
"35:" // Height 3: input setup done
- "subs x27, x27, #0x1\n"
- "ld1rw { z0.s }, p4/Z, [x26]\n"
- "ld1rw { z1.s }, p4/Z, [x25]\n"
- "ld1rw { z2.s }, p4/Z, [x24]\n"
- "ld1w { z6.s }, p4/Z, [x12]\n"
- "ld1w { z7.s }, p4/Z, [x11]\n"
+ "subs x26, x26, #0x1\n"
+ "ld1rw { z0.s }, p4/Z, [x25]\n"
+ "ld1rw { z1.s }, p4/Z, [x24]\n"
+ "ld1rw { z2.s }, p4/Z, [x23]\n"
+ "ld1w { z6.s }, p4/Z, [x11]\n"
+ "ld1w { z7.s }, p4/Z, [x10]\n"
"ble 37f\n"
"36:" // Height 3: Multiply loop: Main loop
"fmla z8.s, p4/M, z6.s, z0.s\n"
"fmla z12.s, p4/M, z6.s, z1.s\n"
- "addvl x12, x12, #1\n"
"addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
"fmla z16.s, p4/M, z6.s, z2.s\n"
"fmla z9.s, p4/M, z7.s, z0.s\n"
- "ld1w { z6.s }, p4/Z, [x10]\n"
- "add x26, x26, #0x4\n"
+ "ld1w { z6.s }, p4/Z, [x9]\n"
+ "add x25, x25, #0x4\n"
"fmla z13.s, p4/M, z7.s, z1.s\n"
"fmla z17.s, p4/M, z7.s, z2.s\n"
- "ld1w { z7.s }, p4/Z, [x9]\n"
- "subs x27, x27, #0x1\n"
- "add x25, x25, #0x4\n"
+ "ld1w { z7.s }, p4/Z, [x28]\n"
+ "subs x26, x26, #0x1\n"
"add x24, x24, #0x4\n"
+ "add x23, x23, #0x4\n"
"fmla z10.s, p4/M, z6.s, z0.s\n"
"fmla z14.s, p4/M, z6.s, z1.s\n"
"fmla z18.s, p4/M, z6.s, z2.s\n"
"fmla z11.s, p4/M, z7.s, z0.s\n"
- "addvl x10, x10, #1\n"
"addvl x9, x9, #1\n"
+ "addvl x28, x28, #1\n"
"fmla z15.s, p4/M, z7.s, z1.s\n"
"fmla z19.s, p4/M, z7.s, z2.s\n"
- "ld1rw { z0.s }, p4/Z, [x26]\n"
- "ld1rw { z1.s }, p4/Z, [x25]\n"
- "ld1rw { z2.s }, p4/Z, [x24]\n"
- "ld1w { z6.s }, p4/Z, [x12]\n"
- "ld1w { z7.s }, p4/Z, [x11]\n"
+ "ld1rw { z0.s }, p4/Z, [x25]\n"
+ "ld1rw { z1.s }, p4/Z, [x24]\n"
+ "ld1rw { z2.s }, p4/Z, [x23]\n"
+ "ld1w { z6.s }, p4/Z, [x11]\n"
+ "ld1w { z7.s }, p4/Z, [x10]\n"
"bgt 36b\n"
"37:" // Height 3: Multiply loop: Main loop skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
"fmla z8.s, p4/M, z6.s, z0.s\n"
"fmla z12.s, p4/M, z6.s, z1.s\n"
- "add x28, x28, #0x1\n"
+ "add x27, x27, #0x1\n"
"fmla z16.s, p4/M, z6.s, z2.s\n"
"fmla z9.s, p4/M, z7.s, z0.s\n"
- "ld1w { z6.s }, p4/Z, [x10]\n"
- "cmp x28, x20\n"
+ "ld1w { z6.s }, p4/Z, [x9]\n"
+ "cmp x27, x19\n"
"fmla z13.s, p4/M, z7.s, z1.s\n"
"fmla z17.s, p4/M, z7.s, z2.s\n"
- "ld1w { z7.s }, p4/Z, [x9]\n"
- "addvl x12, x12, #1\n"
+ "ld1w { z7.s }, p4/Z, [x28]\n"
+ "addvl x11, x11, #1\n"
"fmla z10.s, p4/M, z6.s, z0.s\n"
"fmla z14.s, p4/M, z6.s, z1.s\n"
- "addvl x11, x11, #1\n"
"addvl x10, x10, #1\n"
+ "addvl x9, x9, #1\n"
"fmla z18.s, p4/M, z6.s, z2.s\n"
"fmla z11.s, p4/M, z7.s, z0.s\n"
- "addvl x9, x9, #1\n"
+ "addvl x28, x28, #1\n"
"fmla z15.s, p4/M, z7.s, z1.s\n"
"fmla z19.s, p4/M, z7.s, z2.s\n"
"bne 33b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
"tbz %x[flags], #1, 38f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z1.s }, p4/Z, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rw { z0.s }, p4/Z, [x20]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rw { z1.s }, p4/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z0.s }, p4/Z, [x19]\n"
"fmin z8.s, p4/M, z8.s, z1.s\n"
"fmin z9.s, p4/M, z9.s, z1.s\n"
"fmin z10.s, p4/M, z10.s, z1.s\n"
@@ -606,71 +606,71 @@ void sve_ffhybrid_fp32_mla_6x4VL_a64fx (
"fmax z18.s, p4/M, z18.s, z0.s\n"
"fmax z19.s, p4/M, z19.s, z0.s\n"
"38:" // Height 3: No activation
- "st1w { z8.s }, p3, [x13]\n"
- "st1w { z9.s }, p2, [x13, #1, MUL VL]\n"
- "st1w { z10.s }, p1, [x13, #2, MUL VL]\n"
- "st1w { z11.s }, p0, [x13, #3, MUL VL]\n"
- "addvl x13, x13, #4\n"
- "st1w { z12.s }, p3, [x25]\n"
- "st1w { z13.s }, p2, [x25, #1, MUL VL]\n"
- "st1w { z14.s }, p1, [x25, #2, MUL VL]\n"
- "st1w { z15.s }, p0, [x25, #3, MUL VL]\n"
- "st1w { z16.s }, p3, [x24]\n"
- "st1w { z17.s }, p2, [x24, #1, MUL VL]\n"
- "st1w { z18.s }, p1, [x24, #2, MUL VL]\n"
- "st1w { z19.s }, p0, [x24, #3, MUL VL]\n"
+ "st1w { z8.s }, p3, [x12]\n"
+ "st1w { z9.s }, p2, [x12, #1, MUL VL]\n"
+ "st1w { z10.s }, p1, [x12, #2, MUL VL]\n"
+ "st1w { z11.s }, p0, [x12, #3, MUL VL]\n"
+ "addvl x12, x12, #4\n"
+ "st1w { z12.s }, p3, [x24]\n"
+ "st1w { z13.s }, p2, [x24, #1, MUL VL]\n"
+ "st1w { z14.s }, p1, [x24, #2, MUL VL]\n"
+ "st1w { z15.s }, p0, [x24, #3, MUL VL]\n"
+ "st1w { z16.s }, p3, [x23]\n"
+ "st1w { z17.s }, p2, [x23, #1, MUL VL]\n"
+ "st1w { z18.s }, p1, [x23, #2, MUL VL]\n"
+ "st1w { z19.s }, p0, [x23, #3, MUL VL]\n"
"39:" // Height 3: Writeback done
- "decw x14, ALL, MUL #4\n"
- "cmp x14, XZR\n"
+ "decw x13, ALL, MUL #4\n"
+ "cmp x13, XZR\n"
"bgt 28b\n"
"b 80f\n"
"40:" // Height 4
- "ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x15, %x[bias]\n"
- "ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x13, %x[output_ptr]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x14, %x[bias]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x12, %x[output_ptr]\n"
"41:" // Height 4: Column loop
- "ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
- "add x11, x12, x20, LSL #2\n"
- "cntw x21, ALL, MUL #3\n"
- "add x10, x11, x20, LSL #2\n"
- "add x9, x10, x20, LSL #2\n"
- "add x20, x9, x20, LSL #2\n"
- "cmp x14, x21\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #2\n"
+ "cntw x20, ALL, MUL #3\n"
+ "add x9, x10, x19, LSL #2\n"
+ "add x28, x9, x19, LSL #2\n"
+ "add x19, x28, x19, LSL #2\n"
+ "cmp x13, x20\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"bgt 42f\n"
- "decw x21\n"
- "cmp x14, x21\n"
- "mov x9, x12\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x28, x11\n"
"bgt 42f\n"
- "decw x21\n"
- "cmp x14, x21\n"
- "mov x10, x12\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x9, x11\n"
"bgt 42f\n"
- "mov x11, x12\n"
+ "mov x10, x11\n"
"42:" // Height 4: B setup done
- "mov x20, #0x0\n"
- "whilelt p3.s, x20, x14\n"
- "incw x20\n"
- "whilelt p2.s, x20, x14\n"
- "incw x20\n"
- "whilelt p1.s, x20, x14\n"
- "incw x20\n"
- "whilelt p0.s, x20, x14\n"
- "cbz x15, 43f\n"
- "ld1w { z8.s }, p4/Z, [x15]\n"
- "ld1w { z9.s }, p4/Z, [x15, #1, MUL VL]\n"
+ "mov x19, #0x0\n"
+ "whilelt p3.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p0.s, x19, x13\n"
+ "cbz x14, 43f\n"
+ "ld1w { z8.s }, p4/Z, [x14]\n"
+ "ld1w { z9.s }, p4/Z, [x14, #1, MUL VL]\n"
"mov z12.d, z8.d\n"
"mov z13.d, z9.d\n"
- "ld1w { z10.s }, p4/Z, [x15, #2, MUL VL]\n"
- "ld1w { z11.s }, p4/Z, [x15, #3, MUL VL]\n"
+ "ld1w { z10.s }, p4/Z, [x14, #2, MUL VL]\n"
+ "ld1w { z11.s }, p4/Z, [x14, #3, MUL VL]\n"
"mov z14.d, z10.d\n"
"mov z15.d, z11.d\n"
"mov z16.d, z8.d\n"
"mov z17.d, z9.d\n"
- "addvl x15, x15, #4\n"
+ "addvl x14, x14, #4\n"
"mov z18.d, z10.d\n"
"mov z19.d, z11.d\n"
"mov z20.d, z8.d\n"
@@ -680,26 +680,26 @@ void sve_ffhybrid_fp32_mla_6x4VL_a64fx (
"b 45f\n"
"43:" // Height 4: no bias
"tbz %x[flags], #0, 44f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "ld1w { z8.s }, p3/Z, [x13]\n"
- "add x23, x24, x20, LSL #2\n"
- "ld1w { z9.s }, p2/Z, [x13, #1, MUL VL]\n"
- "ld1w { z10.s }, p1/Z, [x13, #2, MUL VL]\n"
- "ld1w { z11.s }, p0/Z, [x13, #3, MUL VL]\n"
- "ld1w { z12.s }, p3/Z, [x25]\n"
- "ld1w { z13.s }, p2/Z, [x25, #1, MUL VL]\n"
- "ld1w { z14.s }, p1/Z, [x25, #2, MUL VL]\n"
- "ld1w { z15.s }, p0/Z, [x25, #3, MUL VL]\n"
- "ld1w { z16.s }, p3/Z, [x24]\n"
- "ld1w { z17.s }, p2/Z, [x24, #1, MUL VL]\n"
- "ld1w { z18.s }, p1/Z, [x24, #2, MUL VL]\n"
- "ld1w { z19.s }, p0/Z, [x24, #3, MUL VL]\n"
- "ld1w { z20.s }, p3/Z, [x23]\n"
- "ld1w { z21.s }, p2/Z, [x23, #1, MUL VL]\n"
- "ld1w { z22.s }, p1/Z, [x23, #2, MUL VL]\n"
- "ld1w { z23.s }, p0/Z, [x23, #3, MUL VL]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "ld1w { z8.s }, p3/Z, [x12]\n"
+ "add x22, x23, x19, LSL #2\n"
+ "ld1w { z9.s }, p2/Z, [x12, #1, MUL VL]\n"
+ "ld1w { z10.s }, p1/Z, [x12, #2, MUL VL]\n"
+ "ld1w { z11.s }, p0/Z, [x12, #3, MUL VL]\n"
+ "ld1w { z12.s }, p3/Z, [x24]\n"
+ "ld1w { z13.s }, p2/Z, [x24, #1, MUL VL]\n"
+ "ld1w { z14.s }, p1/Z, [x24, #2, MUL VL]\n"
+ "ld1w { z15.s }, p0/Z, [x24, #3, MUL VL]\n"
+ "ld1w { z16.s }, p3/Z, [x23]\n"
+ "ld1w { z17.s }, p2/Z, [x23, #1, MUL VL]\n"
+ "ld1w { z18.s }, p1/Z, [x23, #2, MUL VL]\n"
+ "ld1w { z19.s }, p0/Z, [x23, #3, MUL VL]\n"
+ "ld1w { z20.s }, p3/Z, [x22]\n"
+ "ld1w { z21.s }, p2/Z, [x22, #1, MUL VL]\n"
+ "ld1w { z22.s }, p1/Z, [x22, #2, MUL VL]\n"
+ "ld1w { z23.s }, p0/Z, [x22, #3, MUL VL]\n"
"b 45f\n"
"44:" // Height 4: no accumulate
"mov z8.b, #0x0\n"
@@ -719,94 +719,94 @@ void sve_ffhybrid_fp32_mla_6x4VL_a64fx (
"mov z22.b, #0x0\n"
"mov z23.b, #0x0\n"
"45:" // Height 4: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"46:" // Height 4: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 47f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "cbnz x28, 48f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #2\n"
- "add x25, x25, x20, LSL #2\n"
- "add x24, x24, x20, LSL #2\n"
- "add x23, x23, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "cbnz x27, 48f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #2\n"
+ "add x24, x24, x19, LSL #2\n"
+ "add x23, x23, x19, LSL #2\n"
+ "add x22, x22, x19, LSL #2\n"
"b 48f\n"
"47:" // Height 4: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
"48:" // Height 4: input setup done
- "subs x27, x27, #0x1\n"
- "ld1rw { z0.s }, p4/Z, [x26]\n"
- "ld1rw { z1.s }, p4/Z, [x25]\n"
- "ld1rw { z2.s }, p4/Z, [x24]\n"
- "ld1rw { z3.s }, p4/Z, [x23]\n"
- "ld1w { z6.s }, p4/Z, [x12]\n"
- "ld1w { z7.s }, p4/Z, [x11]\n"
+ "subs x26, x26, #0x1\n"
+ "ld1rw { z0.s }, p4/Z, [x25]\n"
+ "ld1rw { z1.s }, p4/Z, [x24]\n"
+ "ld1rw { z2.s }, p4/Z, [x23]\n"
+ "ld1rw { z3.s }, p4/Z, [x22]\n"
+ "ld1w { z6.s }, p4/Z, [x11]\n"
+ "ld1w { z7.s }, p4/Z, [x10]\n"
"ble 50f\n"
"49:" // Height 4: Multiply loop: Main loop
"fmla z8.s, p4/M, z6.s, z0.s\n"
"fmla z12.s, p4/M, z6.s, z1.s\n"
- "addvl x12, x12, #1\n"
"addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
"fmla z16.s, p4/M, z6.s, z2.s\n"
"fmla z20.s, p4/M, z6.s, z3.s\n"
- "ld1w { z6.s }, p4/Z, [x10]\n"
- "add x26, x26, #0x4\n"
+ "ld1w { z6.s }, p4/Z, [x9]\n"
+ "add x25, x25, #0x4\n"
"fmla z9.s, p4/M, z7.s, z0.s\n"
"fmla z13.s, p4/M, z7.s, z1.s\n"
- "subs x27, x27, #0x1\n"
- "add x25, x25, #0x4\n"
+ "subs x26, x26, #0x1\n"
+ "add x24, x24, #0x4\n"
"fmla z17.s, p4/M, z7.s, z2.s\n"
"fmla z21.s, p4/M, z7.s, z3.s\n"
- "ld1w { z7.s }, p4/Z, [x9]\n"
- "add x24, x24, #0x4\n"
+ "ld1w { z7.s }, p4/Z, [x28]\n"
"add x23, x23, #0x4\n"
+ "add x22, x22, #0x4\n"
"fmla z10.s, p4/M, z6.s, z0.s\n"
"fmla z14.s, p4/M, z6.s, z1.s\n"
- "addvl x10, x10, #1\n"
+ "addvl x9, x9, #1\n"
"fmla z18.s, p4/M, z6.s, z2.s\n"
"fmla z22.s, p4/M, z6.s, z3.s\n"
- "addvl x9, x9, #1\n"
- "ld1w { z6.s }, p4/Z, [x12]\n"
+ "addvl x28, x28, #1\n"
+ "ld1w { z6.s }, p4/Z, [x11]\n"
"fmla z11.s, p4/M, z7.s, z0.s\n"
"fmla z15.s, p4/M, z7.s, z1.s\n"
- "ld1rw { z0.s }, p4/Z, [x26]\n"
- "ld1rw { z1.s }, p4/Z, [x25]\n"
+ "ld1rw { z0.s }, p4/Z, [x25]\n"
+ "ld1rw { z1.s }, p4/Z, [x24]\n"
"fmla z19.s, p4/M, z7.s, z2.s\n"
"fmla z23.s, p4/M, z7.s, z3.s\n"
- "ld1rw { z2.s }, p4/Z, [x24]\n"
- "ld1rw { z3.s }, p4/Z, [x23]\n"
- "ld1w { z7.s }, p4/Z, [x11]\n"
+ "ld1rw { z2.s }, p4/Z, [x23]\n"
+ "ld1rw { z3.s }, p4/Z, [x22]\n"
+ "ld1w { z7.s }, p4/Z, [x10]\n"
"bgt 49b\n"
"50:" // Height 4: Multiply loop: Main loop skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
"fmla z8.s, p4/M, z6.s, z0.s\n"
"fmla z12.s, p4/M, z6.s, z1.s\n"
- "add x28, x28, #0x1\n"
+ "add x27, x27, #0x1\n"
"fmla z16.s, p4/M, z6.s, z2.s\n"
"fmla z20.s, p4/M, z6.s, z3.s\n"
- "ld1w { z6.s }, p4/Z, [x10]\n"
- "cmp x28, x20\n"
+ "ld1w { z6.s }, p4/Z, [x9]\n"
+ "cmp x27, x19\n"
"fmla z9.s, p4/M, z7.s, z0.s\n"
"fmla z13.s, p4/M, z7.s, z1.s\n"
- "addvl x12, x12, #1\n"
"addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
"fmla z17.s, p4/M, z7.s, z2.s\n"
"fmla z21.s, p4/M, z7.s, z3.s\n"
- "ld1w { z7.s }, p4/Z, [x9]\n"
- "addvl x10, x10, #1\n"
+ "ld1w { z7.s }, p4/Z, [x28]\n"
+ "addvl x9, x9, #1\n"
"fmla z10.s, p4/M, z6.s, z0.s\n"
"fmla z14.s, p4/M, z6.s, z1.s\n"
- "addvl x9, x9, #1\n"
+ "addvl x28, x28, #1\n"
"fmla z18.s, p4/M, z6.s, z2.s\n"
"fmla z22.s, p4/M, z6.s, z3.s\n"
"fmla z11.s, p4/M, z7.s, z0.s\n"
@@ -814,15 +814,15 @@ void sve_ffhybrid_fp32_mla_6x4VL_a64fx (
"fmla z19.s, p4/M, z7.s, z2.s\n"
"fmla z23.s, p4/M, z7.s, z3.s\n"
"bne 46b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
"tbz %x[flags], #1, 51f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z1.s }, p4/Z, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rw { z0.s }, p4/Z, [x20]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rw { z1.s }, p4/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z0.s }, p4/Z, [x19]\n"
"fmin z8.s, p4/M, z8.s, z1.s\n"
"fmin z9.s, p4/M, z9.s, z1.s\n"
"fmin z10.s, p4/M, z10.s, z1.s\n"
@@ -856,75 +856,75 @@ void sve_ffhybrid_fp32_mla_6x4VL_a64fx (
"fmax z22.s, p4/M, z22.s, z0.s\n"
"fmax z23.s, p4/M, z23.s, z0.s\n"
"51:" // Height 4: No activation
- "st1w { z8.s }, p3, [x13]\n"
- "st1w { z9.s }, p2, [x13, #1, MUL VL]\n"
- "st1w { z10.s }, p1, [x13, #2, MUL VL]\n"
- "st1w { z11.s }, p0, [x13, #3, MUL VL]\n"
- "addvl x13, x13, #4\n"
- "st1w { z12.s }, p3, [x25]\n"
- "st1w { z13.s }, p2, [x25, #1, MUL VL]\n"
- "st1w { z14.s }, p1, [x25, #2, MUL VL]\n"
- "st1w { z15.s }, p0, [x25, #3, MUL VL]\n"
- "st1w { z16.s }, p3, [x24]\n"
- "st1w { z17.s }, p2, [x24, #1, MUL VL]\n"
- "st1w { z18.s }, p1, [x24, #2, MUL VL]\n"
- "st1w { z19.s }, p0, [x24, #3, MUL VL]\n"
- "st1w { z20.s }, p3, [x23]\n"
- "st1w { z21.s }, p2, [x23, #1, MUL VL]\n"
- "st1w { z22.s }, p1, [x23, #2, MUL VL]\n"
- "st1w { z23.s }, p0, [x23, #3, MUL VL]\n"
+ "st1w { z8.s }, p3, [x12]\n"
+ "st1w { z9.s }, p2, [x12, #1, MUL VL]\n"
+ "st1w { z10.s }, p1, [x12, #2, MUL VL]\n"
+ "st1w { z11.s }, p0, [x12, #3, MUL VL]\n"
+ "addvl x12, x12, #4\n"
+ "st1w { z12.s }, p3, [x24]\n"
+ "st1w { z13.s }, p2, [x24, #1, MUL VL]\n"
+ "st1w { z14.s }, p1, [x24, #2, MUL VL]\n"
+ "st1w { z15.s }, p0, [x24, #3, MUL VL]\n"
+ "st1w { z16.s }, p3, [x23]\n"
+ "st1w { z17.s }, p2, [x23, #1, MUL VL]\n"
+ "st1w { z18.s }, p1, [x23, #2, MUL VL]\n"
+ "st1w { z19.s }, p0, [x23, #3, MUL VL]\n"
+ "st1w { z20.s }, p3, [x22]\n"
+ "st1w { z21.s }, p2, [x22, #1, MUL VL]\n"
+ "st1w { z22.s }, p1, [x22, #2, MUL VL]\n"
+ "st1w { z23.s }, p0, [x22, #3, MUL VL]\n"
"52:" // Height 4: Writeback done
- "decw x14, ALL, MUL #4\n"
- "cmp x14, XZR\n"
+ "decw x13, ALL, MUL #4\n"
+ "cmp x13, XZR\n"
"bgt 41b\n"
"b 80f\n"
"53:" // Height 5
- "ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x15, %x[bias]\n"
- "ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x13, %x[output_ptr]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x14, %x[bias]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x12, %x[output_ptr]\n"
"54:" // Height 5: Column loop
- "ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
- "add x11, x12, x20, LSL #2\n"
- "cntw x21, ALL, MUL #3\n"
- "add x10, x11, x20, LSL #2\n"
- "add x9, x10, x20, LSL #2\n"
- "add x20, x9, x20, LSL #2\n"
- "cmp x14, x21\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #2\n"
+ "cntw x20, ALL, MUL #3\n"
+ "add x9, x10, x19, LSL #2\n"
+ "add x28, x9, x19, LSL #2\n"
+ "add x19, x28, x19, LSL #2\n"
+ "cmp x13, x20\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"bgt 55f\n"
- "decw x21\n"
- "cmp x14, x21\n"
- "mov x9, x12\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x28, x11\n"
"bgt 55f\n"
- "decw x21\n"
- "cmp x14, x21\n"
- "mov x10, x12\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x9, x11\n"
"bgt 55f\n"
- "mov x11, x12\n"
+ "mov x10, x11\n"
"55:" // Height 5: B setup done
- "mov x20, #0x0\n"
- "whilelt p3.s, x20, x14\n"
- "incw x20\n"
- "whilelt p2.s, x20, x14\n"
- "incw x20\n"
- "whilelt p1.s, x20, x14\n"
- "incw x20\n"
- "whilelt p0.s, x20, x14\n"
- "cbz x15, 56f\n"
- "ld1w { z8.s }, p4/Z, [x15]\n"
- "ld1w { z9.s }, p4/Z, [x15, #1, MUL VL]\n"
+ "mov x19, #0x0\n"
+ "whilelt p3.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p0.s, x19, x13\n"
+ "cbz x14, 56f\n"
+ "ld1w { z8.s }, p4/Z, [x14]\n"
+ "ld1w { z9.s }, p4/Z, [x14, #1, MUL VL]\n"
"mov z12.d, z8.d\n"
"mov z13.d, z9.d\n"
- "ld1w { z10.s }, p4/Z, [x15, #2, MUL VL]\n"
- "ld1w { z11.s }, p4/Z, [x15, #3, MUL VL]\n"
+ "ld1w { z10.s }, p4/Z, [x14, #2, MUL VL]\n"
+ "ld1w { z11.s }, p4/Z, [x14, #3, MUL VL]\n"
"mov z14.d, z10.d\n"
"mov z15.d, z11.d\n"
"mov z16.d, z8.d\n"
"mov z17.d, z9.d\n"
- "addvl x15, x15, #4\n"
+ "addvl x14, x14, #4\n"
"mov z18.d, z10.d\n"
"mov z19.d, z11.d\n"
"mov z20.d, z8.d\n"
@@ -938,31 +938,31 @@ void sve_ffhybrid_fp32_mla_6x4VL_a64fx (
"b 58f\n"
"56:" // Height 5: no bias
"tbz %x[flags], #0, 57f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "ld1w { z8.s }, p3/Z, [x13]\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
- "ld1w { z9.s }, p2/Z, [x13, #1, MUL VL]\n"
- "ld1w { z10.s }, p1/Z, [x13, #2, MUL VL]\n"
- "ld1w { z11.s }, p0/Z, [x13, #3, MUL VL]\n"
- "ld1w { z12.s }, p3/Z, [x25]\n"
- "ld1w { z13.s }, p2/Z, [x25, #1, MUL VL]\n"
- "ld1w { z14.s }, p1/Z, [x25, #2, MUL VL]\n"
- "ld1w { z15.s }, p0/Z, [x25, #3, MUL VL]\n"
- "ld1w { z16.s }, p3/Z, [x24]\n"
- "ld1w { z17.s }, p2/Z, [x24, #1, MUL VL]\n"
- "ld1w { z18.s }, p1/Z, [x24, #2, MUL VL]\n"
- "ld1w { z19.s }, p0/Z, [x24, #3, MUL VL]\n"
- "ld1w { z20.s }, p3/Z, [x23]\n"
- "ld1w { z21.s }, p2/Z, [x23, #1, MUL VL]\n"
- "ld1w { z22.s }, p1/Z, [x23, #2, MUL VL]\n"
- "ld1w { z23.s }, p0/Z, [x23, #3, MUL VL]\n"
- "ld1w { z24.s }, p3/Z, [x22]\n"
- "ld1w { z25.s }, p2/Z, [x22, #1, MUL VL]\n"
- "ld1w { z26.s }, p1/Z, [x22, #2, MUL VL]\n"
- "ld1w { z27.s }, p0/Z, [x22, #3, MUL VL]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "ld1w { z8.s }, p3/Z, [x12]\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
+ "ld1w { z9.s }, p2/Z, [x12, #1, MUL VL]\n"
+ "ld1w { z10.s }, p1/Z, [x12, #2, MUL VL]\n"
+ "ld1w { z11.s }, p0/Z, [x12, #3, MUL VL]\n"
+ "ld1w { z12.s }, p3/Z, [x24]\n"
+ "ld1w { z13.s }, p2/Z, [x24, #1, MUL VL]\n"
+ "ld1w { z14.s }, p1/Z, [x24, #2, MUL VL]\n"
+ "ld1w { z15.s }, p0/Z, [x24, #3, MUL VL]\n"
+ "ld1w { z16.s }, p3/Z, [x23]\n"
+ "ld1w { z17.s }, p2/Z, [x23, #1, MUL VL]\n"
+ "ld1w { z18.s }, p1/Z, [x23, #2, MUL VL]\n"
+ "ld1w { z19.s }, p0/Z, [x23, #3, MUL VL]\n"
+ "ld1w { z20.s }, p3/Z, [x22]\n"
+ "ld1w { z21.s }, p2/Z, [x22, #1, MUL VL]\n"
+ "ld1w { z22.s }, p1/Z, [x22, #2, MUL VL]\n"
+ "ld1w { z23.s }, p0/Z, [x22, #3, MUL VL]\n"
+ "ld1w { z24.s }, p3/Z, [x21]\n"
+ "ld1w { z25.s }, p2/Z, [x21, #1, MUL VL]\n"
+ "ld1w { z26.s }, p1/Z, [x21, #2, MUL VL]\n"
+ "ld1w { z27.s }, p0/Z, [x21, #3, MUL VL]\n"
"b 58f\n"
"57:" // Height 5: no accumulate
"mov z8.b, #0x0\n"
@@ -986,104 +986,104 @@ void sve_ffhybrid_fp32_mla_6x4VL_a64fx (
"mov z26.b, #0x0\n"
"mov z27.b, #0x0\n"
"58:" // Height 5: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"59:" // Height 5: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 60f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "ldr x22, [x21, #0x20]\n"
- "cbnz x28, 61f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #2\n"
- "add x25, x25, x20, LSL #2\n"
- "add x24, x24, x20, LSL #2\n"
- "add x23, x23, x20, LSL #2\n"
- "add x22, x22, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "ldr x21, [x20, #0x20]\n"
+ "cbnz x27, 61f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #2\n"
+ "add x24, x24, x19, LSL #2\n"
+ "add x23, x23, x19, LSL #2\n"
+ "add x22, x22, x19, LSL #2\n"
+ "add x21, x21, x19, LSL #2\n"
"b 61f\n"
"60:" // Height 5: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
"61:" // Height 5: input setup done
- "subs x27, x27, #0x1\n"
- "ld1rw { z0.s }, p4/Z, [x26]\n"
- "ld1rw { z1.s }, p4/Z, [x25]\n"
- "ld1rw { z2.s }, p4/Z, [x24]\n"
- "ld1rw { z3.s }, p4/Z, [x23]\n"
- "ld1rw { z4.s }, p4/Z, [x22]\n"
- "ld1w { z6.s }, p4/Z, [x12]\n"
- "ld1w { z7.s }, p4/Z, [x11]\n"
+ "subs x26, x26, #0x1\n"
+ "ld1rw { z0.s }, p4/Z, [x25]\n"
+ "ld1rw { z1.s }, p4/Z, [x24]\n"
+ "ld1rw { z2.s }, p4/Z, [x23]\n"
+ "ld1rw { z3.s }, p4/Z, [x22]\n"
+ "ld1rw { z4.s }, p4/Z, [x21]\n"
+ "ld1w { z6.s }, p4/Z, [x11]\n"
+ "ld1w { z7.s }, p4/Z, [x10]\n"
"ble 63f\n"
"62:" // Height 5: Multiply loop: Main loop
"fmla z8.s, p4/M, z6.s, z0.s\n"
"fmla z12.s, p4/M, z6.s, z1.s\n"
- "addvl x12, x12, #1\n"
"addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
"fmla z16.s, p4/M, z6.s, z2.s\n"
"fmla z20.s, p4/M, z6.s, z3.s\n"
- "add x26, x26, #0x4\n"
- "subs x27, x27, #0x1\n"
+ "add x25, x25, #0x4\n"
+ "subs x26, x26, #0x1\n"
"fmla z24.s, p4/M, z6.s, z4.s\n"
"fmla z9.s, p4/M, z7.s, z0.s\n"
- "ld1w { z6.s }, p4/Z, [x10]\n"
- "add x25, x25, #0x4\n"
+ "ld1w { z6.s }, p4/Z, [x9]\n"
+ "add x24, x24, #0x4\n"
"fmla z13.s, p4/M, z7.s, z1.s\n"
"fmla z17.s, p4/M, z7.s, z2.s\n"
- "add x24, x24, #0x4\n"
"add x23, x23, #0x4\n"
+ "add x22, x22, #0x4\n"
"fmla z21.s, p4/M, z7.s, z3.s\n"
"fmla z25.s, p4/M, z7.s, z4.s\n"
- "ld1w { z7.s }, p4/Z, [x9]\n"
- "add x22, x22, #0x4\n"
+ "ld1w { z7.s }, p4/Z, [x28]\n"
+ "add x21, x21, #0x4\n"
"fmla z10.s, p4/M, z6.s, z0.s\n"
"fmla z14.s, p4/M, z6.s, z1.s\n"
- "addvl x10, x10, #1\n"
"addvl x9, x9, #1\n"
+ "addvl x28, x28, #1\n"
"fmla z18.s, p4/M, z6.s, z2.s\n"
"fmla z22.s, p4/M, z6.s, z3.s\n"
"fmla z26.s, p4/M, z6.s, z4.s\n"
"fmla z11.s, p4/M, z7.s, z0.s\n"
- "ld1rw { z0.s }, p4/Z, [x26]\n"
- "ld1w { z6.s }, p4/Z, [x12]\n"
+ "ld1rw { z0.s }, p4/Z, [x25]\n"
+ "ld1w { z6.s }, p4/Z, [x11]\n"
"fmla z15.s, p4/M, z7.s, z1.s\n"
"fmla z19.s, p4/M, z7.s, z2.s\n"
- "ld1rw { z1.s }, p4/Z, [x25]\n"
- "ld1rw { z2.s }, p4/Z, [x24]\n"
+ "ld1rw { z1.s }, p4/Z, [x24]\n"
+ "ld1rw { z2.s }, p4/Z, [x23]\n"
"fmla z23.s, p4/M, z7.s, z3.s\n"
"fmla z27.s, p4/M, z7.s, z4.s\n"
- "ld1rw { z3.s }, p4/Z, [x23]\n"
- "ld1rw { z4.s }, p4/Z, [x22]\n"
- "ld1w { z7.s }, p4/Z, [x11]\n"
+ "ld1rw { z3.s }, p4/Z, [x22]\n"
+ "ld1rw { z4.s }, p4/Z, [x21]\n"
+ "ld1w { z7.s }, p4/Z, [x10]\n"
"bgt 62b\n"
"63:" // Height 5: Multiply loop: Main loop skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
"fmla z8.s, p4/M, z6.s, z0.s\n"
"fmla z12.s, p4/M, z6.s, z1.s\n"
- "add x28, x28, #0x1\n"
+ "add x27, x27, #0x1\n"
"fmla z16.s, p4/M, z6.s, z2.s\n"
"fmla z20.s, p4/M, z6.s, z3.s\n"
- "cmp x28, x20\n"
- "addvl x12, x12, #1\n"
+ "cmp x27, x19\n"
+ "addvl x11, x11, #1\n"
"fmla z24.s, p4/M, z6.s, z4.s\n"
"fmla z9.s, p4/M, z7.s, z0.s\n"
- "ld1w { z6.s }, p4/Z, [x10]\n"
- "addvl x11, x11, #1\n"
+ "ld1w { z6.s }, p4/Z, [x9]\n"
+ "addvl x10, x10, #1\n"
"fmla z13.s, p4/M, z7.s, z1.s\n"
"fmla z17.s, p4/M, z7.s, z2.s\n"
- "addvl x10, x10, #1\n"
+ "addvl x9, x9, #1\n"
"fmla z21.s, p4/M, z7.s, z3.s\n"
"fmla z25.s, p4/M, z7.s, z4.s\n"
- "ld1w { z7.s }, p4/Z, [x9]\n"
- "addvl x9, x9, #1\n"
+ "ld1w { z7.s }, p4/Z, [x28]\n"
+ "addvl x28, x28, #1\n"
"fmla z10.s, p4/M, z6.s, z0.s\n"
"fmla z14.s, p4/M, z6.s, z1.s\n"
"fmla z18.s, p4/M, z6.s, z2.s\n"
@@ -1095,16 +1095,16 @@ void sve_ffhybrid_fp32_mla_6x4VL_a64fx (
"fmla z23.s, p4/M, z7.s, z3.s\n"
"fmla z27.s, p4/M, z7.s, z4.s\n"
"bne 59b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
"tbz %x[flags], #1, 64f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z1.s }, p4/Z, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rw { z0.s }, p4/Z, [x20]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rw { z1.s }, p4/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z0.s }, p4/Z, [x19]\n"
"fmin z8.s, p4/M, z8.s, z1.s\n"
"fmin z9.s, p4/M, z9.s, z1.s\n"
"fmin z10.s, p4/M, z10.s, z1.s\n"
@@ -1146,82 +1146,82 @@ void sve_ffhybrid_fp32_mla_6x4VL_a64fx (
"fmax z26.s, p4/M, z26.s, z0.s\n"
"fmax z27.s, p4/M, z27.s, z0.s\n"
"64:" // Height 5: No activation
- "st1w { z8.s }, p3, [x13]\n"
- "st1w { z9.s }, p2, [x13, #1, MUL VL]\n"
- "st1w { z10.s }, p1, [x13, #2, MUL VL]\n"
- "st1w { z11.s }, p0, [x13, #3, MUL VL]\n"
- "addvl x13, x13, #4\n"
- "st1w { z12.s }, p3, [x25]\n"
- "st1w { z13.s }, p2, [x25, #1, MUL VL]\n"
- "st1w { z14.s }, p1, [x25, #2, MUL VL]\n"
- "st1w { z15.s }, p0, [x25, #3, MUL VL]\n"
- "st1w { z16.s }, p3, [x24]\n"
- "st1w { z17.s }, p2, [x24, #1, MUL VL]\n"
- "st1w { z18.s }, p1, [x24, #2, MUL VL]\n"
- "st1w { z19.s }, p0, [x24, #3, MUL VL]\n"
- "st1w { z20.s }, p3, [x23]\n"
- "st1w { z21.s }, p2, [x23, #1, MUL VL]\n"
- "st1w { z22.s }, p1, [x23, #2, MUL VL]\n"
- "st1w { z23.s }, p0, [x23, #3, MUL VL]\n"
- "st1w { z24.s }, p3, [x22]\n"
- "st1w { z25.s }, p2, [x22, #1, MUL VL]\n"
- "st1w { z26.s }, p1, [x22, #2, MUL VL]\n"
- "st1w { z27.s }, p0, [x22, #3, MUL VL]\n"
+ "st1w { z8.s }, p3, [x12]\n"
+ "st1w { z9.s }, p2, [x12, #1, MUL VL]\n"
+ "st1w { z10.s }, p1, [x12, #2, MUL VL]\n"
+ "st1w { z11.s }, p0, [x12, #3, MUL VL]\n"
+ "addvl x12, x12, #4\n"
+ "st1w { z12.s }, p3, [x24]\n"
+ "st1w { z13.s }, p2, [x24, #1, MUL VL]\n"
+ "st1w { z14.s }, p1, [x24, #2, MUL VL]\n"
+ "st1w { z15.s }, p0, [x24, #3, MUL VL]\n"
+ "st1w { z16.s }, p3, [x23]\n"
+ "st1w { z17.s }, p2, [x23, #1, MUL VL]\n"
+ "st1w { z18.s }, p1, [x23, #2, MUL VL]\n"
+ "st1w { z19.s }, p0, [x23, #3, MUL VL]\n"
+ "st1w { z20.s }, p3, [x22]\n"
+ "st1w { z21.s }, p2, [x22, #1, MUL VL]\n"
+ "st1w { z22.s }, p1, [x22, #2, MUL VL]\n"
+ "st1w { z23.s }, p0, [x22, #3, MUL VL]\n"
+ "st1w { z24.s }, p3, [x21]\n"
+ "st1w { z25.s }, p2, [x21, #1, MUL VL]\n"
+ "st1w { z26.s }, p1, [x21, #2, MUL VL]\n"
+ "st1w { z27.s }, p0, [x21, #3, MUL VL]\n"
"65:" // Height 5: Writeback done
- "decw x14, ALL, MUL #4\n"
- "cmp x14, XZR\n"
+ "decw x13, ALL, MUL #4\n"
+ "cmp x13, XZR\n"
"bgt 54b\n"
"b 80f\n"
"66:" // Height 6
- "ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x21, #0x18\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "mov x15, %x[bias]\n"
- "mov x13, %x[output_ptr]\n"
- "madd %x[output_ptr], x20, x21, %x[output_ptr]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x20, #0x18\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "mov x14, %x[bias]\n"
+ "mov x12, %x[output_ptr]\n"
+ "madd %x[output_ptr], x19, x20, %x[output_ptr]\n"
"67:" // Height 6: Column loop
- "ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
- "add x11, x12, x20, LSL #2\n"
- "cntw x21, ALL, MUL #3\n"
- "add x10, x11, x20, LSL #2\n"
- "add x9, x10, x20, LSL #2\n"
- "add x20, x9, x20, LSL #2\n"
- "cmp x14, x21\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #2\n"
+ "cntw x20, ALL, MUL #3\n"
+ "add x9, x10, x19, LSL #2\n"
+ "add x28, x9, x19, LSL #2\n"
+ "add x19, x28, x19, LSL #2\n"
+ "cmp x13, x20\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"bgt 68f\n"
- "decw x21\n"
- "cmp x14, x21\n"
- "mov x9, x12\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x28, x11\n"
"bgt 68f\n"
- "decw x21\n"
- "cmp x14, x21\n"
- "mov x10, x12\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x9, x11\n"
"bgt 68f\n"
- "mov x11, x12\n"
+ "mov x10, x11\n"
"68:" // Height 6: B setup done
- "mov x20, #0x0\n"
- "whilelt p3.s, x20, x14\n"
- "incw x20\n"
- "whilelt p2.s, x20, x14\n"
- "incw x20\n"
- "whilelt p1.s, x20, x14\n"
- "incw x20\n"
- "whilelt p0.s, x20, x14\n"
- "cbz x15, 69f\n"
- "ld1w { z8.s }, p4/Z, [x15]\n"
- "ld1w { z9.s }, p4/Z, [x15, #1, MUL VL]\n"
+ "mov x19, #0x0\n"
+ "whilelt p3.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p0.s, x19, x13\n"
+ "cbz x14, 69f\n"
+ "ld1w { z8.s }, p4/Z, [x14]\n"
+ "ld1w { z9.s }, p4/Z, [x14, #1, MUL VL]\n"
"mov z12.d, z8.d\n"
"mov z13.d, z9.d\n"
- "ld1w { z10.s }, p4/Z, [x15, #2, MUL VL]\n"
- "ld1w { z11.s }, p4/Z, [x15, #3, MUL VL]\n"
+ "ld1w { z10.s }, p4/Z, [x14, #2, MUL VL]\n"
+ "ld1w { z11.s }, p4/Z, [x14, #3, MUL VL]\n"
"mov z14.d, z10.d\n"
"mov z15.d, z11.d\n"
"mov z16.d, z8.d\n"
"mov z17.d, z9.d\n"
- "addvl x15, x15, #4\n"
+ "addvl x14, x14, #4\n"
"mov z18.d, z10.d\n"
"mov z19.d, z11.d\n"
"mov z20.d, z8.d\n"
@@ -1239,36 +1239,36 @@ void sve_ffhybrid_fp32_mla_6x4VL_a64fx (
"b 71f\n"
"69:" // Height 6: no bias
"tbz %x[flags], #0, 70f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "ld1w { z8.s }, p3/Z, [x13]\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
- "ld1w { z9.s }, p2/Z, [x13, #1, MUL VL]\n"
- "ld1w { z10.s }, p1/Z, [x13, #2, MUL VL]\n"
- "add x21, x22, x20, LSL #2\n"
- "ld1w { z11.s }, p0/Z, [x13, #3, MUL VL]\n"
- "ld1w { z12.s }, p3/Z, [x25]\n"
- "ld1w { z13.s }, p2/Z, [x25, #1, MUL VL]\n"
- "ld1w { z14.s }, p1/Z, [x25, #2, MUL VL]\n"
- "ld1w { z15.s }, p0/Z, [x25, #3, MUL VL]\n"
- "ld1w { z16.s }, p3/Z, [x24]\n"
- "ld1w { z17.s }, p2/Z, [x24, #1, MUL VL]\n"
- "ld1w { z18.s }, p1/Z, [x24, #2, MUL VL]\n"
- "ld1w { z19.s }, p0/Z, [x24, #3, MUL VL]\n"
- "ld1w { z20.s }, p3/Z, [x23]\n"
- "ld1w { z21.s }, p2/Z, [x23, #1, MUL VL]\n"
- "ld1w { z22.s }, p1/Z, [x23, #2, MUL VL]\n"
- "ld1w { z23.s }, p0/Z, [x23, #3, MUL VL]\n"
- "ld1w { z24.s }, p3/Z, [x22]\n"
- "ld1w { z25.s }, p2/Z, [x22, #1, MUL VL]\n"
- "ld1w { z26.s }, p1/Z, [x22, #2, MUL VL]\n"
- "ld1w { z27.s }, p0/Z, [x22, #3, MUL VL]\n"
- "ld1w { z28.s }, p3/Z, [x21]\n"
- "ld1w { z29.s }, p2/Z, [x21, #1, MUL VL]\n"
- "ld1w { z30.s }, p1/Z, [x21, #2, MUL VL]\n"
- "ld1w { z31.s }, p0/Z, [x21, #3, MUL VL]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "ld1w { z8.s }, p3/Z, [x12]\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
+ "ld1w { z9.s }, p2/Z, [x12, #1, MUL VL]\n"
+ "ld1w { z10.s }, p1/Z, [x12, #2, MUL VL]\n"
+ "add x20, x21, x19, LSL #2\n"
+ "ld1w { z11.s }, p0/Z, [x12, #3, MUL VL]\n"
+ "ld1w { z12.s }, p3/Z, [x24]\n"
+ "ld1w { z13.s }, p2/Z, [x24, #1, MUL VL]\n"
+ "ld1w { z14.s }, p1/Z, [x24, #2, MUL VL]\n"
+ "ld1w { z15.s }, p0/Z, [x24, #3, MUL VL]\n"
+ "ld1w { z16.s }, p3/Z, [x23]\n"
+ "ld1w { z17.s }, p2/Z, [x23, #1, MUL VL]\n"
+ "ld1w { z18.s }, p1/Z, [x23, #2, MUL VL]\n"
+ "ld1w { z19.s }, p0/Z, [x23, #3, MUL VL]\n"
+ "ld1w { z20.s }, p3/Z, [x22]\n"
+ "ld1w { z21.s }, p2/Z, [x22, #1, MUL VL]\n"
+ "ld1w { z22.s }, p1/Z, [x22, #2, MUL VL]\n"
+ "ld1w { z23.s }, p0/Z, [x22, #3, MUL VL]\n"
+ "ld1w { z24.s }, p3/Z, [x21]\n"
+ "ld1w { z25.s }, p2/Z, [x21, #1, MUL VL]\n"
+ "ld1w { z26.s }, p1/Z, [x21, #2, MUL VL]\n"
+ "ld1w { z27.s }, p0/Z, [x21, #3, MUL VL]\n"
+ "ld1w { z28.s }, p3/Z, [x20]\n"
+ "ld1w { z29.s }, p2/Z, [x20, #1, MUL VL]\n"
+ "ld1w { z30.s }, p1/Z, [x20, #2, MUL VL]\n"
+ "ld1w { z31.s }, p0/Z, [x20, #3, MUL VL]\n"
"b 71f\n"
"70:" // Height 6: no accumulate
"mov z8.b, #0x0\n"
@@ -1296,116 +1296,116 @@ void sve_ffhybrid_fp32_mla_6x4VL_a64fx (
"mov z30.b, #0x0\n"
"mov z31.b, #0x0\n"
"71:" // Height 6: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"72:" // Height 6: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 73f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "ldr x22, [x21, #0x20]\n"
- "ldr x21, [x21, #0x28]\n"
- "cbnz x28, 74f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #2\n"
- "add x25, x25, x20, LSL #2\n"
- "add x24, x24, x20, LSL #2\n"
- "add x23, x23, x20, LSL #2\n"
- "add x22, x22, x20, LSL #2\n"
- "add x21, x21, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "ldr x21, [x20, #0x20]\n"
+ "ldr x20, [x20, #0x28]\n"
+ "cbnz x27, 74f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #2\n"
+ "add x24, x24, x19, LSL #2\n"
+ "add x23, x23, x19, LSL #2\n"
+ "add x22, x22, x19, LSL #2\n"
+ "add x21, x21, x19, LSL #2\n"
+ "add x20, x20, x19, LSL #2\n"
"b 74f\n"
"73:" // Height 6: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
+ "add x20, x21, x19, LSL #2\n"
"74:" // Height 6: input setup done
- "subs x27, x27, #0x1\n"
- "ld1rw { z0.s }, p4/Z, [x26]\n"
- "ld1rw { z1.s }, p4/Z, [x25]\n"
- "ld1rw { z2.s }, p4/Z, [x24]\n"
- "ld1rw { z3.s }, p4/Z, [x23]\n"
- "ld1rw { z4.s }, p4/Z, [x22]\n"
- "ld1rw { z5.s }, p4/Z, [x21]\n"
- "ld1w { z6.s }, p4/Z, [x12]\n"
- "ld1w { z7.s }, p4/Z, [x11]\n"
+ "subs x26, x26, #0x1\n"
+ "ld1rw { z0.s }, p4/Z, [x25]\n"
+ "ld1rw { z1.s }, p4/Z, [x24]\n"
+ "ld1rw { z2.s }, p4/Z, [x23]\n"
+ "ld1rw { z3.s }, p4/Z, [x22]\n"
+ "ld1rw { z4.s }, p4/Z, [x21]\n"
+ "ld1rw { z5.s }, p4/Z, [x20]\n"
+ "ld1w { z6.s }, p4/Z, [x11]\n"
+ "ld1w { z7.s }, p4/Z, [x10]\n"
"ble 76f\n"
"75:" // Height 6: Multiply loop: Main loop
"fmla z8.s, p4/M, z6.s, z0.s\n"
"fmla z12.s, p4/M, z6.s, z1.s\n"
- "addvl x12, x12, #1\n"
"addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
"fmla z16.s, p4/M, z6.s, z2.s\n"
"fmla z20.s, p4/M, z6.s, z3.s\n"
- "add x26, x26, #0x4\n"
- "subs x27, x27, #0x1\n"
+ "add x25, x25, #0x4\n"
+ "subs x26, x26, #0x1\n"
"fmla z24.s, p4/M, z6.s, z4.s\n"
"fmla z28.s, p4/M, z6.s, z5.s\n"
- "ld1w { z6.s }, p4/Z, [x10]\n"
- "add x25, x25, #0x4\n"
+ "ld1w { z6.s }, p4/Z, [x9]\n"
+ "add x24, x24, #0x4\n"
"fmla z9.s, p4/M, z7.s, z0.s\n"
"fmla z13.s, p4/M, z7.s, z1.s\n"
- "add x24, x24, #0x4\n"
"add x23, x23, #0x4\n"
+ "add x22, x22, #0x4\n"
"fmla z17.s, p4/M, z7.s, z2.s\n"
"fmla z21.s, p4/M, z7.s, z3.s\n"
- "add x22, x22, #0x4\n"
"add x21, x21, #0x4\n"
+ "add x20, x20, #0x4\n"
"fmla z25.s, p4/M, z7.s, z4.s\n"
"fmla z29.s, p4/M, z7.s, z5.s\n"
- "ld1w { z7.s }, p4/Z, [x9]\n"
- "addvl x10, x10, #1\n"
+ "ld1w { z7.s }, p4/Z, [x28]\n"
+ "addvl x9, x9, #1\n"
"fmla z10.s, p4/M, z6.s, z0.s\n"
"fmla z14.s, p4/M, z6.s, z1.s\n"
- "addvl x9, x9, #1\n"
+ "addvl x28, x28, #1\n"
"fmla z18.s, p4/M, z6.s, z2.s\n"
"fmla z22.s, p4/M, z6.s, z3.s\n"
"fmla z26.s, p4/M, z6.s, z4.s\n"
"fmla z30.s, p4/M, z6.s, z5.s\n"
- "ld1w { z6.s }, p4/Z, [x12]\n"
+ "ld1w { z6.s }, p4/Z, [x11]\n"
"fmla z11.s, p4/M, z7.s, z0.s\n"
"fmla z15.s, p4/M, z7.s, z1.s\n"
- "ld1rw { z0.s }, p4/Z, [x26]\n"
- "ld1rw { z1.s }, p4/Z, [x25]\n"
+ "ld1rw { z0.s }, p4/Z, [x25]\n"
+ "ld1rw { z1.s }, p4/Z, [x24]\n"
"fmla z19.s, p4/M, z7.s, z2.s\n"
"fmla z23.s, p4/M, z7.s, z3.s\n"
- "ld1rw { z2.s }, p4/Z, [x24]\n"
- "ld1rw { z3.s }, p4/Z, [x23]\n"
+ "ld1rw { z2.s }, p4/Z, [x23]\n"
+ "ld1rw { z3.s }, p4/Z, [x22]\n"
"fmla z27.s, p4/M, z7.s, z4.s\n"
"fmla z31.s, p4/M, z7.s, z5.s\n"
- "ld1rw { z4.s }, p4/Z, [x22]\n"
- "ld1rw { z5.s }, p4/Z, [x21]\n"
- "ld1w { z7.s }, p4/Z, [x11]\n"
+ "ld1rw { z4.s }, p4/Z, [x21]\n"
+ "ld1rw { z5.s }, p4/Z, [x20]\n"
+ "ld1w { z7.s }, p4/Z, [x10]\n"
"bgt 75b\n"
"76:" // Height 6: Multiply loop: Main loop skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
"fmla z8.s, p4/M, z6.s, z0.s\n"
"fmla z12.s, p4/M, z6.s, z1.s\n"
- "add x28, x28, #0x1\n"
+ "add x27, x27, #0x1\n"
"fmla z16.s, p4/M, z6.s, z2.s\n"
"fmla z20.s, p4/M, z6.s, z3.s\n"
- "cmp x28, x20\n"
- "addvl x12, x12, #1\n"
+ "cmp x27, x19\n"
+ "addvl x11, x11, #1\n"
"fmla z24.s, p4/M, z6.s, z4.s\n"
"fmla z28.s, p4/M, z6.s, z5.s\n"
- "ld1w { z6.s }, p4/Z, [x10]\n"
- "addvl x11, x11, #1\n"
+ "ld1w { z6.s }, p4/Z, [x9]\n"
+ "addvl x10, x10, #1\n"
"fmla z9.s, p4/M, z7.s, z0.s\n"
"fmla z13.s, p4/M, z7.s, z1.s\n"
- "addvl x10, x10, #1\n"
+ "addvl x9, x9, #1\n"
"fmla z17.s, p4/M, z7.s, z2.s\n"
"fmla z21.s, p4/M, z7.s, z3.s\n"
"fmla z25.s, p4/M, z7.s, z4.s\n"
"fmla z29.s, p4/M, z7.s, z5.s\n"
- "ld1w { z7.s }, p4/Z, [x9]\n"
- "addvl x9, x9, #1\n"
+ "ld1w { z7.s }, p4/Z, [x28]\n"
+ "addvl x28, x28, #1\n"
"fmla z10.s, p4/M, z6.s, z0.s\n"
"fmla z14.s, p4/M, z6.s, z1.s\n"
"fmla z18.s, p4/M, z6.s, z2.s\n"
@@ -1419,17 +1419,17 @@ void sve_ffhybrid_fp32_mla_6x4VL_a64fx (
"fmla z27.s, p4/M, z7.s, z4.s\n"
"fmla z31.s, p4/M, z7.s, z5.s\n"
"bne 72b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
+ "add x20, x21, x19, LSL #2\n"
"tbz %x[flags], #1, 77f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z1.s }, p4/Z, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rw { z0.s }, p4/Z, [x20]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rw { z1.s }, p4/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z0.s }, p4/Z, [x19]\n"
"fmin z8.s, p4/M, z8.s, z1.s\n"
"fmin z9.s, p4/M, z9.s, z1.s\n"
"fmin z10.s, p4/M, z10.s, z1.s\n"
@@ -1479,50 +1479,50 @@ void sve_ffhybrid_fp32_mla_6x4VL_a64fx (
"fmax z30.s, p4/M, z30.s, z0.s\n"
"fmax z31.s, p4/M, z31.s, z0.s\n"
"77:" // Height 6: No activation
- "st1w { z8.s }, p3, [x13]\n"
- "st1w { z9.s }, p2, [x13, #1, MUL VL]\n"
- "st1w { z10.s }, p1, [x13, #2, MUL VL]\n"
- "st1w { z11.s }, p0, [x13, #3, MUL VL]\n"
- "addvl x13, x13, #4\n"
- "st1w { z12.s }, p3, [x25]\n"
- "st1w { z13.s }, p2, [x25, #1, MUL VL]\n"
- "st1w { z14.s }, p1, [x25, #2, MUL VL]\n"
- "st1w { z15.s }, p0, [x25, #3, MUL VL]\n"
- "st1w { z16.s }, p3, [x24]\n"
- "st1w { z17.s }, p2, [x24, #1, MUL VL]\n"
- "st1w { z18.s }, p1, [x24, #2, MUL VL]\n"
- "st1w { z19.s }, p0, [x24, #3, MUL VL]\n"
- "st1w { z20.s }, p3, [x23]\n"
- "st1w { z21.s }, p2, [x23, #1, MUL VL]\n"
- "st1w { z22.s }, p1, [x23, #2, MUL VL]\n"
- "st1w { z23.s }, p0, [x23, #3, MUL VL]\n"
- "st1w { z24.s }, p3, [x22]\n"
- "st1w { z25.s }, p2, [x22, #1, MUL VL]\n"
- "st1w { z26.s }, p1, [x22, #2, MUL VL]\n"
- "st1w { z27.s }, p0, [x22, #3, MUL VL]\n"
- "st1w { z28.s }, p3, [x21]\n"
- "st1w { z29.s }, p2, [x21, #1, MUL VL]\n"
- "st1w { z30.s }, p1, [x21, #2, MUL VL]\n"
- "st1w { z31.s }, p0, [x21, #3, MUL VL]\n"
+ "st1w { z8.s }, p3, [x12]\n"
+ "st1w { z9.s }, p2, [x12, #1, MUL VL]\n"
+ "st1w { z10.s }, p1, [x12, #2, MUL VL]\n"
+ "st1w { z11.s }, p0, [x12, #3, MUL VL]\n"
+ "addvl x12, x12, #4\n"
+ "st1w { z12.s }, p3, [x24]\n"
+ "st1w { z13.s }, p2, [x24, #1, MUL VL]\n"
+ "st1w { z14.s }, p1, [x24, #2, MUL VL]\n"
+ "st1w { z15.s }, p0, [x24, #3, MUL VL]\n"
+ "st1w { z16.s }, p3, [x23]\n"
+ "st1w { z17.s }, p2, [x23, #1, MUL VL]\n"
+ "st1w { z18.s }, p1, [x23, #2, MUL VL]\n"
+ "st1w { z19.s }, p0, [x23, #3, MUL VL]\n"
+ "st1w { z20.s }, p3, [x22]\n"
+ "st1w { z21.s }, p2, [x22, #1, MUL VL]\n"
+ "st1w { z22.s }, p1, [x22, #2, MUL VL]\n"
+ "st1w { z23.s }, p0, [x22, #3, MUL VL]\n"
+ "st1w { z24.s }, p3, [x21]\n"
+ "st1w { z25.s }, p2, [x21, #1, MUL VL]\n"
+ "st1w { z26.s }, p1, [x21, #2, MUL VL]\n"
+ "st1w { z27.s }, p0, [x21, #3, MUL VL]\n"
+ "st1w { z28.s }, p3, [x20]\n"
+ "st1w { z29.s }, p2, [x20, #1, MUL VL]\n"
+ "st1w { z30.s }, p1, [x20, #2, MUL VL]\n"
+ "st1w { z31.s }, p0, [x20, #3, MUL VL]\n"
"78:" // Height 6: Writeback done
- "decw x14, ALL, MUL #4\n"
- "cmp x14, XZR\n"
+ "decw x13, ALL, MUL #4\n"
+ "cmp x13, XZR\n"
"bgt 67b\n"
"subs %x[M], %x[M], #0x6\n"
"beq 80f\n"
- "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 79f\n"
- "add x21, x21, #0x6\n"
- "str x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "add x20, x20, #0x6\n"
+ "str x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"b 1b\n"
"79:" // Update direct input
- "mov x20, #0x18\n"
- "madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
+ "mov x19, #0x18\n"
+ "madd %x[input_ptr], x19, x20, %x[input_ptr]\n"
"b 1b\n"
"80:" // Exit
: [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
: [args_ptr] "r" (&ka), [bias] "r" (bias), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_B_stride] "I" (offsetof(KernelArgs, B_stride)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_cur_B_ptr] "I" (offsetof(KernelArgs, cur_B_ptr)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x9", "x10", "x11", "x12", "x13", "x14", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp32_mla_6x4VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp32_mla_6x4VL/generic.cpp
index eb057e7734..3c7e562c89 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp32_mla_6x4VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp32_mla_6x4VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef ARM_COMPUTE_ENABLE_SVE
@@ -105,53 +105,53 @@ void sve_ffhybrid_fp32_mla_6x4VL (
"cmp %x[M], #0x2\n"
"bgt 29f\n"
"beq 15f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x15, %x[bias]\n"
- "ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x13, %x[output_ptr]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x14, %x[bias]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x12, %x[output_ptr]\n"
"2:" // Height 1: Column loop
- "ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
- "add x11, x12, x20, LSL #2\n"
- "cntw x21, ALL, MUL #3\n"
- "add x10, x11, x20, LSL #2\n"
- "add x9, x10, x20, LSL #2\n"
- "add x20, x9, x20, LSL #2\n"
- "cmp x14, x21\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #2\n"
+ "cntw x20, ALL, MUL #3\n"
+ "add x9, x10, x19, LSL #2\n"
+ "add x28, x9, x19, LSL #2\n"
+ "add x19, x28, x19, LSL #2\n"
+ "cmp x13, x20\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"bgt 3f\n"
- "decw x21\n"
- "cmp x14, x21\n"
- "mov x9, x12\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x28, x11\n"
"bgt 3f\n"
- "decw x21\n"
- "cmp x14, x21\n"
- "mov x10, x12\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x9, x11\n"
"bgt 3f\n"
- "mov x11, x12\n"
+ "mov x10, x11\n"
"3:" // Height 1: B setup done
- "mov x20, #0x0\n"
- "whilelt p4.s, x20, x14\n"
- "incw x20\n"
- "whilelt p3.s, x20, x14\n"
- "incw x20\n"
- "whilelt p2.s, x20, x14\n"
- "incw x20\n"
- "whilelt p1.s, x20, x14\n"
- "cbz x15, 4f\n"
- "ld1w { z8.s }, p5/Z, [x15]\n"
- "ld1w { z9.s }, p5/Z, [x15, #1, MUL VL]\n"
- "ld1w { z10.s }, p5/Z, [x15, #2, MUL VL]\n"
- "ld1w { z11.s }, p5/Z, [x15, #3, MUL VL]\n"
- "addvl x15, x15, #4\n"
+ "mov x19, #0x0\n"
+ "whilelt p4.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p3.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x13\n"
+ "cbz x14, 4f\n"
+ "ld1w { z8.s }, p5/Z, [x14]\n"
+ "ld1w { z9.s }, p5/Z, [x14, #1, MUL VL]\n"
+ "ld1w { z10.s }, p5/Z, [x14, #2, MUL VL]\n"
+ "ld1w { z11.s }, p5/Z, [x14, #3, MUL VL]\n"
+ "addvl x14, x14, #4\n"
"b 6f\n"
"4:" // Height 1: no bias
"tbz %x[flags], #0, 5f\n"
- "ld1w { z8.s }, p4/Z, [x13]\n"
- "ld1w { z9.s }, p3/Z, [x13, #1, MUL VL]\n"
- "ld1w { z10.s }, p2/Z, [x13, #2, MUL VL]\n"
- "ld1w { z11.s }, p1/Z, [x13, #3, MUL VL]\n"
+ "ld1w { z8.s }, p4/Z, [x12]\n"
+ "ld1w { z9.s }, p3/Z, [x12, #1, MUL VL]\n"
+ "ld1w { z10.s }, p2/Z, [x12, #2, MUL VL]\n"
+ "ld1w { z11.s }, p1/Z, [x12, #3, MUL VL]\n"
"b 6f\n"
"5:" // Height 1: no accumulate
"mov z8.b, #0x0\n"
@@ -159,134 +159,134 @@ void sve_ffhybrid_fp32_mla_6x4VL (
"mov z10.b, #0x0\n"
"mov z11.b, #0x0\n"
"6:" // Height 1: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"7:" // Height 1: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 8f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "cbnz x28, 9f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "cbnz x27, 9f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #2\n"
"b 9f\n"
"8:" // Height 1: setup direct input
- "mov x26, %x[input_ptr]\n"
+ "mov x25, %x[input_ptr]\n"
"9:" // Height 1: input setup done
- "cmp x27, #0x4\n"
+ "cmp x26, #0x4\n"
"ble 11f\n"
"10:" // Height 1: Multiply loop: Main loop head
- "whilelt p0.s, XZR, x27\n"
- "ld1rqw { z0.s }, p0/Z, [x26]\n"
- "ld1w { z6.s }, p5/Z, [x12]\n"
+ "whilelt p0.s, XZR, x26\n"
+ "ld1rqw { z0.s }, p0/Z, [x25]\n"
+ "ld1w { z6.s }, p5/Z, [x11]\n"
"fmla z8.s, z6.s, z0.s[0]\n"
- "ld1w { z7.s }, p5/Z, [x11]\n"
+ "ld1w { z7.s }, p5/Z, [x10]\n"
"fmla z9.s, z7.s, z0.s[0]\n"
- "ld1w { z6.s }, p5/Z, [x10]\n"
+ "ld1w { z6.s }, p5/Z, [x9]\n"
"fmla z10.s, z6.s, z0.s[0]\n"
- "ld1w { z7.s }, p5/Z, [x9]\n"
+ "ld1w { z7.s }, p5/Z, [x28]\n"
"fmla z11.s, z7.s, z0.s[0]\n"
- "ld1w { z6.s }, p5/Z, [x12, #1, MUL VL]\n"
+ "ld1w { z6.s }, p5/Z, [x11, #1, MUL VL]\n"
"fmla z8.s, z6.s, z0.s[1]\n"
- "ld1w { z7.s }, p5/Z, [x11, #1, MUL VL]\n"
+ "ld1w { z7.s }, p5/Z, [x10, #1, MUL VL]\n"
"fmla z9.s, z7.s, z0.s[1]\n"
- "ld1w { z6.s }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1w { z6.s }, p5/Z, [x9, #1, MUL VL]\n"
"fmla z10.s, z6.s, z0.s[1]\n"
- "ld1w { z7.s }, p5/Z, [x9, #1, MUL VL]\n"
+ "ld1w { z7.s }, p5/Z, [x28, #1, MUL VL]\n"
"fmla z11.s, z7.s, z0.s[1]\n"
- "ld1w { z6.s }, p5/Z, [x12, #2, MUL VL]\n"
+ "ld1w { z6.s }, p5/Z, [x11, #2, MUL VL]\n"
"fmla z8.s, z6.s, z0.s[2]\n"
- "ld1w { z7.s }, p5/Z, [x11, #2, MUL VL]\n"
+ "ld1w { z7.s }, p5/Z, [x10, #2, MUL VL]\n"
"fmla z9.s, z7.s, z0.s[2]\n"
- "ld1w { z6.s }, p5/Z, [x10, #2, MUL VL]\n"
+ "ld1w { z6.s }, p5/Z, [x9, #2, MUL VL]\n"
"fmla z10.s, z6.s, z0.s[2]\n"
- "ld1w { z7.s }, p5/Z, [x9, #2, MUL VL]\n"
+ "ld1w { z7.s }, p5/Z, [x28, #2, MUL VL]\n"
"fmla z11.s, z7.s, z0.s[2]\n"
- "ld1w { z6.s }, p5/Z, [x12, #3, MUL VL]\n"
+ "ld1w { z6.s }, p5/Z, [x11, #3, MUL VL]\n"
"fmla z8.s, z6.s, z0.s[3]\n"
- "ld1w { z7.s }, p5/Z, [x11, #3, MUL VL]\n"
+ "ld1w { z7.s }, p5/Z, [x10, #3, MUL VL]\n"
"fmla z9.s, z7.s, z0.s[3]\n"
- "ld1w { z6.s }, p5/Z, [x10, #3, MUL VL]\n"
- "sub x27, x27, #0x4\n"
- "ld1w { z7.s }, p5/Z, [x9, #3, MUL VL]\n"
- "cmp x27, #0x4\n"
+ "ld1w { z6.s }, p5/Z, [x9, #3, MUL VL]\n"
+ "sub x26, x26, #0x4\n"
+ "ld1w { z7.s }, p5/Z, [x28, #3, MUL VL]\n"
+ "cmp x26, #0x4\n"
"fmla z10.s, z6.s, z0.s[3]\n"
"fmla z11.s, z7.s, z0.s[3]\n"
- "add x26, x26, #0x10\n"
- "addvl x12, x12, #4\n"
+ "add x25, x25, #0x10\n"
"addvl x11, x11, #4\n"
"addvl x10, x10, #4\n"
"addvl x9, x9, #4\n"
+ "addvl x28, x28, #4\n"
"bgt 10b\n"
"11:" // Height 1: Multiply loop: Single iteration only
- "whilelt p0.s, XZR, x27\n"
- "ld1rqw { z0.s }, p0/Z, [x26]\n"
- "ld1w { z6.s }, p5/Z, [x12]\n"
+ "whilelt p0.s, XZR, x26\n"
+ "ld1rqw { z0.s }, p0/Z, [x25]\n"
+ "ld1w { z6.s }, p5/Z, [x11]\n"
"fmla z8.s, z6.s, z0.s[0]\n"
- "ld1w { z7.s }, p5/Z, [x11]\n"
+ "ld1w { z7.s }, p5/Z, [x10]\n"
"fmla z9.s, z7.s, z0.s[0]\n"
- "ld1w { z6.s }, p5/Z, [x10]\n"
- "subs x27, x27, #0x1\n"
- "ld1w { z7.s }, p5/Z, [x9]\n"
+ "ld1w { z6.s }, p5/Z, [x9]\n"
+ "subs x26, x26, #0x1\n"
+ "ld1w { z7.s }, p5/Z, [x28]\n"
"fmla z10.s, z6.s, z0.s[0]\n"
"fmla z11.s, z7.s, z0.s[0]\n"
- "addvl x12, x12, #1\n"
"addvl x11, x11, #1\n"
"addvl x10, x10, #1\n"
"addvl x9, x9, #1\n"
+ "addvl x28, x28, #1\n"
"ble 12f\n"
- "ld1w { z6.s }, p5/Z, [x12]\n"
- "ld1w { z7.s }, p5/Z, [x11]\n"
+ "ld1w { z6.s }, p5/Z, [x11]\n"
+ "ld1w { z7.s }, p5/Z, [x10]\n"
"fmla z8.s, z6.s, z0.s[1]\n"
"fmla z9.s, z7.s, z0.s[1]\n"
- "ld1w { z6.s }, p5/Z, [x10]\n"
- "ld1w { z7.s }, p5/Z, [x9]\n"
- "subs x27, x27, #0x1\n"
+ "ld1w { z6.s }, p5/Z, [x9]\n"
+ "ld1w { z7.s }, p5/Z, [x28]\n"
+ "subs x26, x26, #0x1\n"
"fmla z10.s, z6.s, z0.s[1]\n"
"fmla z11.s, z7.s, z0.s[1]\n"
- "addvl x12, x12, #1\n"
"addvl x11, x11, #1\n"
"addvl x10, x10, #1\n"
"addvl x9, x9, #1\n"
+ "addvl x28, x28, #1\n"
"ble 12f\n"
- "ld1w { z6.s }, p5/Z, [x12]\n"
- "ld1w { z7.s }, p5/Z, [x11]\n"
+ "ld1w { z6.s }, p5/Z, [x11]\n"
+ "ld1w { z7.s }, p5/Z, [x10]\n"
"fmla z8.s, z6.s, z0.s[2]\n"
"fmla z9.s, z7.s, z0.s[2]\n"
- "ld1w { z6.s }, p5/Z, [x10]\n"
- "ld1w { z7.s }, p5/Z, [x9]\n"
- "subs x27, x27, #0x1\n"
+ "ld1w { z6.s }, p5/Z, [x9]\n"
+ "ld1w { z7.s }, p5/Z, [x28]\n"
+ "subs x26, x26, #0x1\n"
"fmla z10.s, z6.s, z0.s[2]\n"
"fmla z11.s, z7.s, z0.s[2]\n"
- "addvl x12, x12, #1\n"
"addvl x11, x11, #1\n"
"addvl x10, x10, #1\n"
"addvl x9, x9, #1\n"
+ "addvl x28, x28, #1\n"
"ble 12f\n"
- "ld1w { z6.s }, p5/Z, [x12]\n"
- "ld1w { z7.s }, p5/Z, [x11]\n"
+ "ld1w { z6.s }, p5/Z, [x11]\n"
+ "ld1w { z7.s }, p5/Z, [x10]\n"
"fmla z8.s, z6.s, z0.s[3]\n"
"fmla z9.s, z7.s, z0.s[3]\n"
- "ld1w { z6.s }, p5/Z, [x10]\n"
- "ld1w { z7.s }, p5/Z, [x9]\n"
+ "ld1w { z6.s }, p5/Z, [x9]\n"
+ "ld1w { z7.s }, p5/Z, [x28]\n"
"fmla z10.s, z6.s, z0.s[3]\n"
"fmla z11.s, z7.s, z0.s[3]\n"
- "addvl x12, x12, #1\n"
"addvl x11, x11, #1\n"
"addvl x10, x10, #1\n"
"addvl x9, x9, #1\n"
+ "addvl x28, x28, #1\n"
"12:" // Height 1: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 7b\n"
"tbz %x[flags], #1, 13f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z1.s }, p5/Z, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rw { z0.s }, p5/Z, [x20]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rw { z1.s }, p5/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z0.s }, p5/Z, [x19]\n"
"fmin z8.s, p5/M, z8.s, z1.s\n"
"fmin z9.s, p5/M, z9.s, z1.s\n"
"fmin z10.s, p5/M, z10.s, z1.s\n"
@@ -296,74 +296,74 @@ void sve_ffhybrid_fp32_mla_6x4VL (
"fmax z10.s, p5/M, z10.s, z0.s\n"
"fmax z11.s, p5/M, z11.s, z0.s\n"
"13:" // Height 1: No activation
- "st1w { z8.s }, p4, [x13]\n"
- "st1w { z9.s }, p3, [x13, #1, MUL VL]\n"
- "st1w { z10.s }, p2, [x13, #2, MUL VL]\n"
- "st1w { z11.s }, p1, [x13, #3, MUL VL]\n"
- "addvl x13, x13, #4\n"
+ "st1w { z8.s }, p4, [x12]\n"
+ "st1w { z9.s }, p3, [x12, #1, MUL VL]\n"
+ "st1w { z10.s }, p2, [x12, #2, MUL VL]\n"
+ "st1w { z11.s }, p1, [x12, #3, MUL VL]\n"
+ "addvl x12, x12, #4\n"
"14:" // Height 1: Writeback done
- "decw x14, ALL, MUL #4\n"
- "cmp x14, XZR\n"
+ "decw x13, ALL, MUL #4\n"
+ "cmp x13, XZR\n"
"bgt 2b\n"
"b 86f\n"
"15:" // Height 2
- "ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x15, %x[bias]\n"
- "ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x13, %x[output_ptr]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x14, %x[bias]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x12, %x[output_ptr]\n"
"16:" // Height 2: Column loop
- "ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
- "add x11, x12, x20, LSL #2\n"
- "cntw x21, ALL, MUL #3\n"
- "add x10, x11, x20, LSL #2\n"
- "add x9, x10, x20, LSL #2\n"
- "add x20, x9, x20, LSL #2\n"
- "cmp x14, x21\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #2\n"
+ "cntw x20, ALL, MUL #3\n"
+ "add x9, x10, x19, LSL #2\n"
+ "add x28, x9, x19, LSL #2\n"
+ "add x19, x28, x19, LSL #2\n"
+ "cmp x13, x20\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"bgt 17f\n"
- "decw x21\n"
- "cmp x14, x21\n"
- "mov x9, x12\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x28, x11\n"
"bgt 17f\n"
- "decw x21\n"
- "cmp x14, x21\n"
- "mov x10, x12\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x9, x11\n"
"bgt 17f\n"
- "mov x11, x12\n"
+ "mov x10, x11\n"
"17:" // Height 2: B setup done
- "mov x20, #0x0\n"
- "whilelt p4.s, x20, x14\n"
- "incw x20\n"
- "whilelt p3.s, x20, x14\n"
- "incw x20\n"
- "whilelt p2.s, x20, x14\n"
- "incw x20\n"
- "whilelt p1.s, x20, x14\n"
- "cbz x15, 18f\n"
- "ld1w { z8.s }, p5/Z, [x15]\n"
- "ld1w { z9.s }, p5/Z, [x15, #1, MUL VL]\n"
+ "mov x19, #0x0\n"
+ "whilelt p4.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p3.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x13\n"
+ "cbz x14, 18f\n"
+ "ld1w { z8.s }, p5/Z, [x14]\n"
+ "ld1w { z9.s }, p5/Z, [x14, #1, MUL VL]\n"
"mov z12.d, z8.d\n"
"mov z13.d, z9.d\n"
- "ld1w { z10.s }, p5/Z, [x15, #2, MUL VL]\n"
- "ld1w { z11.s }, p5/Z, [x15, #3, MUL VL]\n"
+ "ld1w { z10.s }, p5/Z, [x14, #2, MUL VL]\n"
+ "ld1w { z11.s }, p5/Z, [x14, #3, MUL VL]\n"
"mov z14.d, z10.d\n"
"mov z15.d, z11.d\n"
- "addvl x15, x15, #4\n"
+ "addvl x14, x14, #4\n"
"b 20f\n"
"18:" // Height 2: no bias
"tbz %x[flags], #0, 19f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #2\n"
- "ld1w { z8.s }, p4/Z, [x13]\n"
- "ld1w { z9.s }, p3/Z, [x13, #1, MUL VL]\n"
- "ld1w { z10.s }, p2/Z, [x13, #2, MUL VL]\n"
- "ld1w { z11.s }, p1/Z, [x13, #3, MUL VL]\n"
- "ld1w { z12.s }, p4/Z, [x25]\n"
- "ld1w { z13.s }, p3/Z, [x25, #1, MUL VL]\n"
- "ld1w { z14.s }, p2/Z, [x25, #2, MUL VL]\n"
- "ld1w { z15.s }, p1/Z, [x25, #3, MUL VL]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #2\n"
+ "ld1w { z8.s }, p4/Z, [x12]\n"
+ "ld1w { z9.s }, p3/Z, [x12, #1, MUL VL]\n"
+ "ld1w { z10.s }, p2/Z, [x12, #2, MUL VL]\n"
+ "ld1w { z11.s }, p1/Z, [x12, #3, MUL VL]\n"
+ "ld1w { z12.s }, p4/Z, [x24]\n"
+ "ld1w { z13.s }, p3/Z, [x24, #1, MUL VL]\n"
+ "ld1w { z14.s }, p2/Z, [x24, #2, MUL VL]\n"
+ "ld1w { z15.s }, p1/Z, [x24, #3, MUL VL]\n"
"b 20f\n"
"19:" // Height 2: no accumulate
"mov z8.b, #0x0\n"
@@ -375,174 +375,174 @@ void sve_ffhybrid_fp32_mla_6x4VL (
"mov z14.b, #0x0\n"
"mov z15.b, #0x0\n"
"20:" // Height 2: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"21:" // Height 2: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 22f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "cbnz x28, 23f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #2\n"
- "add x25, x25, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "cbnz x27, 23f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #2\n"
+ "add x24, x24, x19, LSL #2\n"
"b 23f\n"
"22:" // Height 2: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #2\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #2\n"
"23:" // Height 2: input setup done
- "cmp x27, #0x4\n"
+ "cmp x26, #0x4\n"
"ble 25f\n"
"24:" // Height 2: Multiply loop: Main loop head
- "whilelt p0.s, XZR, x27\n"
- "ld1rqw { z0.s }, p0/Z, [x26]\n"
- "ld1rqw { z1.s }, p0/Z, [x25]\n"
- "sub x27, x27, #0x4\n"
- "ld1w { z6.s }, p5/Z, [x12]\n"
- "ld1w { z7.s }, p5/Z, [x11]\n"
+ "whilelt p0.s, XZR, x26\n"
+ "ld1rqw { z0.s }, p0/Z, [x25]\n"
+ "ld1rqw { z1.s }, p0/Z, [x24]\n"
+ "sub x26, x26, #0x4\n"
+ "ld1w { z6.s }, p5/Z, [x11]\n"
+ "ld1w { z7.s }, p5/Z, [x10]\n"
"fmla z8.s, z6.s, z0.s[0]\n"
"fmla z12.s, z6.s, z1.s[0]\n"
"fmla z9.s, z7.s, z0.s[0]\n"
"fmla z13.s, z7.s, z1.s[0]\n"
- "ld1w { z6.s }, p5/Z, [x10]\n"
- "ld1w { z7.s }, p5/Z, [x9]\n"
+ "ld1w { z6.s }, p5/Z, [x9]\n"
+ "ld1w { z7.s }, p5/Z, [x28]\n"
"fmla z10.s, z6.s, z0.s[0]\n"
"fmla z14.s, z6.s, z1.s[0]\n"
- "ld1w { z6.s }, p5/Z, [x12, #1, MUL VL]\n"
- "cmp x27, #0x4\n"
+ "ld1w { z6.s }, p5/Z, [x11, #1, MUL VL]\n"
+ "cmp x26, #0x4\n"
"fmla z11.s, z7.s, z0.s[0]\n"
"fmla z15.s, z7.s, z1.s[0]\n"
- "ld1w { z7.s }, p5/Z, [x11, #1, MUL VL]\n"
- "add x26, x26, #0x10\n"
+ "ld1w { z7.s }, p5/Z, [x10, #1, MUL VL]\n"
+ "add x25, x25, #0x10\n"
"fmla z8.s, z6.s, z0.s[1]\n"
"fmla z12.s, z6.s, z1.s[1]\n"
- "ld1w { z6.s }, p5/Z, [x10, #1, MUL VL]\n"
- "add x25, x25, #0x10\n"
+ "ld1w { z6.s }, p5/Z, [x9, #1, MUL VL]\n"
+ "add x24, x24, #0x10\n"
"fmla z9.s, z7.s, z0.s[1]\n"
"fmla z13.s, z7.s, z1.s[1]\n"
- "ld1w { z7.s }, p5/Z, [x9, #1, MUL VL]\n"
+ "ld1w { z7.s }, p5/Z, [x28, #1, MUL VL]\n"
"fmla z10.s, z6.s, z0.s[1]\n"
"fmla z14.s, z6.s, z1.s[1]\n"
- "ld1w { z6.s }, p5/Z, [x12, #2, MUL VL]\n"
+ "ld1w { z6.s }, p5/Z, [x11, #2, MUL VL]\n"
"fmla z11.s, z7.s, z0.s[1]\n"
"fmla z15.s, z7.s, z1.s[1]\n"
- "ld1w { z7.s }, p5/Z, [x11, #2, MUL VL]\n"
+ "ld1w { z7.s }, p5/Z, [x10, #2, MUL VL]\n"
"fmla z8.s, z6.s, z0.s[2]\n"
"fmla z12.s, z6.s, z1.s[2]\n"
- "ld1w { z6.s }, p5/Z, [x10, #2, MUL VL]\n"
+ "ld1w { z6.s }, p5/Z, [x9, #2, MUL VL]\n"
"fmla z9.s, z7.s, z0.s[2]\n"
"fmla z13.s, z7.s, z1.s[2]\n"
- "ld1w { z7.s }, p5/Z, [x9, #2, MUL VL]\n"
+ "ld1w { z7.s }, p5/Z, [x28, #2, MUL VL]\n"
"fmla z10.s, z6.s, z0.s[2]\n"
"fmla z14.s, z6.s, z1.s[2]\n"
- "ld1w { z6.s }, p5/Z, [x12, #3, MUL VL]\n"
- "addvl x12, x12, #4\n"
+ "ld1w { z6.s }, p5/Z, [x11, #3, MUL VL]\n"
+ "addvl x11, x11, #4\n"
"fmla z11.s, z7.s, z0.s[2]\n"
"fmla z15.s, z7.s, z1.s[2]\n"
- "ld1w { z7.s }, p5/Z, [x11, #3, MUL VL]\n"
- "addvl x11, x11, #4\n"
+ "ld1w { z7.s }, p5/Z, [x10, #3, MUL VL]\n"
+ "addvl x10, x10, #4\n"
"fmla z8.s, z6.s, z0.s[3]\n"
"fmla z12.s, z6.s, z1.s[3]\n"
- "ld1w { z6.s }, p5/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
+ "ld1w { z6.s }, p5/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"fmla z9.s, z7.s, z0.s[3]\n"
"fmla z13.s, z7.s, z1.s[3]\n"
- "ld1w { z7.s }, p5/Z, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
+ "ld1w { z7.s }, p5/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
"fmla z10.s, z6.s, z0.s[3]\n"
"fmla z14.s, z6.s, z1.s[3]\n"
"fmla z11.s, z7.s, z0.s[3]\n"
"fmla z15.s, z7.s, z1.s[3]\n"
"bgt 24b\n"
"25:" // Height 2: Multiply loop: Single iteration only
- "whilelt p0.s, XZR, x27\n"
- "ld1rqw { z0.s }, p0/Z, [x26]\n"
- "ld1rqw { z1.s }, p0/Z, [x25]\n"
- "subs x27, x27, #0x1\n"
- "ld1w { z6.s }, p5/Z, [x12]\n"
- "ld1w { z7.s }, p5/Z, [x11]\n"
+ "whilelt p0.s, XZR, x26\n"
+ "ld1rqw { z0.s }, p0/Z, [x25]\n"
+ "ld1rqw { z1.s }, p0/Z, [x24]\n"
+ "subs x26, x26, #0x1\n"
+ "ld1w { z6.s }, p5/Z, [x11]\n"
+ "ld1w { z7.s }, p5/Z, [x10]\n"
"fmla z8.s, z6.s, z0.s[0]\n"
"fmla z12.s, z6.s, z1.s[0]\n"
"fmla z9.s, z7.s, z0.s[0]\n"
"fmla z13.s, z7.s, z1.s[0]\n"
- "ld1w { z6.s }, p5/Z, [x10]\n"
- "ld1w { z7.s }, p5/Z, [x9]\n"
+ "ld1w { z6.s }, p5/Z, [x9]\n"
+ "ld1w { z7.s }, p5/Z, [x28]\n"
"fmla z10.s, z6.s, z0.s[0]\n"
"fmla z14.s, z6.s, z1.s[0]\n"
- "addvl x12, x12, #1\n"
"addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
"fmla z11.s, z7.s, z0.s[0]\n"
"fmla z15.s, z7.s, z1.s[0]\n"
- "addvl x10, x10, #1\n"
"addvl x9, x9, #1\n"
+ "addvl x28, x28, #1\n"
"ble 26f\n"
- "ld1w { z6.s }, p5/Z, [x12]\n"
- "ld1w { z7.s }, p5/Z, [x11]\n"
+ "ld1w { z6.s }, p5/Z, [x11]\n"
+ "ld1w { z7.s }, p5/Z, [x10]\n"
"fmla z8.s, z6.s, z0.s[1]\n"
"fmla z12.s, z6.s, z1.s[1]\n"
"fmla z9.s, z7.s, z0.s[1]\n"
"fmla z13.s, z7.s, z1.s[1]\n"
- "ld1w { z6.s }, p5/Z, [x10]\n"
- "ld1w { z7.s }, p5/Z, [x9]\n"
- "subs x27, x27, #0x1\n"
+ "ld1w { z6.s }, p5/Z, [x9]\n"
+ "ld1w { z7.s }, p5/Z, [x28]\n"
+ "subs x26, x26, #0x1\n"
"fmla z10.s, z6.s, z0.s[1]\n"
"fmla z14.s, z6.s, z1.s[1]\n"
- "addvl x12, x12, #1\n"
+ "addvl x11, x11, #1\n"
"fmla z11.s, z7.s, z0.s[1]\n"
"fmla z15.s, z7.s, z1.s[1]\n"
- "addvl x11, x11, #1\n"
"addvl x10, x10, #1\n"
"addvl x9, x9, #1\n"
+ "addvl x28, x28, #1\n"
"ble 26f\n"
- "ld1w { z6.s }, p5/Z, [x12]\n"
- "ld1w { z7.s }, p5/Z, [x11]\n"
+ "ld1w { z6.s }, p5/Z, [x11]\n"
+ "ld1w { z7.s }, p5/Z, [x10]\n"
"fmla z8.s, z6.s, z0.s[2]\n"
"fmla z12.s, z6.s, z1.s[2]\n"
"fmla z9.s, z7.s, z0.s[2]\n"
"fmla z13.s, z7.s, z1.s[2]\n"
- "ld1w { z6.s }, p5/Z, [x10]\n"
- "ld1w { z7.s }, p5/Z, [x9]\n"
- "subs x27, x27, #0x1\n"
+ "ld1w { z6.s }, p5/Z, [x9]\n"
+ "ld1w { z7.s }, p5/Z, [x28]\n"
+ "subs x26, x26, #0x1\n"
"fmla z10.s, z6.s, z0.s[2]\n"
"fmla z14.s, z6.s, z1.s[2]\n"
- "addvl x12, x12, #1\n"
+ "addvl x11, x11, #1\n"
"fmla z11.s, z7.s, z0.s[2]\n"
"fmla z15.s, z7.s, z1.s[2]\n"
- "addvl x11, x11, #1\n"
"addvl x10, x10, #1\n"
"addvl x9, x9, #1\n"
+ "addvl x28, x28, #1\n"
"ble 26f\n"
- "ld1w { z6.s }, p5/Z, [x12]\n"
- "ld1w { z7.s }, p5/Z, [x11]\n"
+ "ld1w { z6.s }, p5/Z, [x11]\n"
+ "ld1w { z7.s }, p5/Z, [x10]\n"
"fmla z8.s, z6.s, z0.s[3]\n"
"fmla z12.s, z6.s, z1.s[3]\n"
"fmla z9.s, z7.s, z0.s[3]\n"
"fmla z13.s, z7.s, z1.s[3]\n"
- "ld1w { z6.s }, p5/Z, [x10]\n"
- "ld1w { z7.s }, p5/Z, [x9]\n"
+ "ld1w { z6.s }, p5/Z, [x9]\n"
+ "ld1w { z7.s }, p5/Z, [x28]\n"
"fmla z10.s, z6.s, z0.s[3]\n"
"fmla z14.s, z6.s, z1.s[3]\n"
- "addvl x12, x12, #1\n"
"addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
"fmla z11.s, z7.s, z0.s[3]\n"
"fmla z15.s, z7.s, z1.s[3]\n"
- "addvl x10, x10, #1\n"
"addvl x9, x9, #1\n"
+ "addvl x28, x28, #1\n"
"26:" // Height 2: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 21b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #2\n"
"tbz %x[flags], #1, 27f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z1.s }, p5/Z, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rw { z0.s }, p5/Z, [x20]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rw { z1.s }, p5/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z0.s }, p5/Z, [x19]\n"
"fmin z8.s, p5/M, z8.s, z1.s\n"
"fmin z9.s, p5/M, z9.s, z1.s\n"
"fmin z10.s, p5/M, z10.s, z1.s\n"
@@ -560,87 +560,87 @@ void sve_ffhybrid_fp32_mla_6x4VL (
"fmax z14.s, p5/M, z14.s, z0.s\n"
"fmax z15.s, p5/M, z15.s, z0.s\n"
"27:" // Height 2: No activation
- "st1w { z8.s }, p4, [x13]\n"
- "st1w { z9.s }, p3, [x13, #1, MUL VL]\n"
- "st1w { z10.s }, p2, [x13, #2, MUL VL]\n"
- "st1w { z11.s }, p1, [x13, #3, MUL VL]\n"
- "addvl x13, x13, #4\n"
- "st1w { z12.s }, p4, [x25]\n"
- "st1w { z13.s }, p3, [x25, #1, MUL VL]\n"
- "st1w { z14.s }, p2, [x25, #2, MUL VL]\n"
- "st1w { z15.s }, p1, [x25, #3, MUL VL]\n"
+ "st1w { z8.s }, p4, [x12]\n"
+ "st1w { z9.s }, p3, [x12, #1, MUL VL]\n"
+ "st1w { z10.s }, p2, [x12, #2, MUL VL]\n"
+ "st1w { z11.s }, p1, [x12, #3, MUL VL]\n"
+ "addvl x12, x12, #4\n"
+ "st1w { z12.s }, p4, [x24]\n"
+ "st1w { z13.s }, p3, [x24, #1, MUL VL]\n"
+ "st1w { z14.s }, p2, [x24, #2, MUL VL]\n"
+ "st1w { z15.s }, p1, [x24, #3, MUL VL]\n"
"28:" // Height 2: Writeback done
- "decw x14, ALL, MUL #4\n"
- "cmp x14, XZR\n"
+ "decw x13, ALL, MUL #4\n"
+ "cmp x13, XZR\n"
"bgt 16b\n"
"b 86f\n"
"29:" // Height 3
- "ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x15, %x[bias]\n"
- "ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x13, %x[output_ptr]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x14, %x[bias]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x12, %x[output_ptr]\n"
"30:" // Height 3: Column loop
- "ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
- "add x11, x12, x20, LSL #2\n"
- "cntw x21, ALL, MUL #3\n"
- "add x10, x11, x20, LSL #2\n"
- "add x9, x10, x20, LSL #2\n"
- "add x20, x9, x20, LSL #2\n"
- "cmp x14, x21\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #2\n"
+ "cntw x20, ALL, MUL #3\n"
+ "add x9, x10, x19, LSL #2\n"
+ "add x28, x9, x19, LSL #2\n"
+ "add x19, x28, x19, LSL #2\n"
+ "cmp x13, x20\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"bgt 31f\n"
- "decw x21\n"
- "cmp x14, x21\n"
- "mov x9, x12\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x28, x11\n"
"bgt 31f\n"
- "decw x21\n"
- "cmp x14, x21\n"
- "mov x10, x12\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x9, x11\n"
"bgt 31f\n"
- "mov x11, x12\n"
+ "mov x10, x11\n"
"31:" // Height 3: B setup done
- "mov x20, #0x0\n"
- "whilelt p4.s, x20, x14\n"
- "incw x20\n"
- "whilelt p3.s, x20, x14\n"
- "incw x20\n"
- "whilelt p2.s, x20, x14\n"
- "incw x20\n"
- "whilelt p1.s, x20, x14\n"
- "cbz x15, 32f\n"
- "ld1w { z8.s }, p5/Z, [x15]\n"
- "ld1w { z9.s }, p5/Z, [x15, #1, MUL VL]\n"
+ "mov x19, #0x0\n"
+ "whilelt p4.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p3.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x13\n"
+ "cbz x14, 32f\n"
+ "ld1w { z8.s }, p5/Z, [x14]\n"
+ "ld1w { z9.s }, p5/Z, [x14, #1, MUL VL]\n"
"mov z12.d, z8.d\n"
"mov z13.d, z9.d\n"
- "ld1w { z10.s }, p5/Z, [x15, #2, MUL VL]\n"
- "ld1w { z11.s }, p5/Z, [x15, #3, MUL VL]\n"
+ "ld1w { z10.s }, p5/Z, [x14, #2, MUL VL]\n"
+ "ld1w { z11.s }, p5/Z, [x14, #3, MUL VL]\n"
"mov z14.d, z10.d\n"
"mov z15.d, z11.d\n"
"mov z16.d, z8.d\n"
"mov z17.d, z9.d\n"
- "addvl x15, x15, #4\n"
+ "addvl x14, x14, #4\n"
"mov z18.d, z10.d\n"
"mov z19.d, z11.d\n"
"b 34f\n"
"32:" // Height 3: no bias
"tbz %x[flags], #0, 33f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "ld1w { z8.s }, p4/Z, [x13]\n"
- "ld1w { z9.s }, p3/Z, [x13, #1, MUL VL]\n"
- "ld1w { z10.s }, p2/Z, [x13, #2, MUL VL]\n"
- "ld1w { z11.s }, p1/Z, [x13, #3, MUL VL]\n"
- "ld1w { z12.s }, p4/Z, [x25]\n"
- "ld1w { z13.s }, p3/Z, [x25, #1, MUL VL]\n"
- "ld1w { z14.s }, p2/Z, [x25, #2, MUL VL]\n"
- "ld1w { z15.s }, p1/Z, [x25, #3, MUL VL]\n"
- "ld1w { z16.s }, p4/Z, [x24]\n"
- "ld1w { z17.s }, p3/Z, [x24, #1, MUL VL]\n"
- "ld1w { z18.s }, p2/Z, [x24, #2, MUL VL]\n"
- "ld1w { z19.s }, p1/Z, [x24, #3, MUL VL]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "ld1w { z8.s }, p4/Z, [x12]\n"
+ "ld1w { z9.s }, p3/Z, [x12, #1, MUL VL]\n"
+ "ld1w { z10.s }, p2/Z, [x12, #2, MUL VL]\n"
+ "ld1w { z11.s }, p1/Z, [x12, #3, MUL VL]\n"
+ "ld1w { z12.s }, p4/Z, [x24]\n"
+ "ld1w { z13.s }, p3/Z, [x24, #1, MUL VL]\n"
+ "ld1w { z14.s }, p2/Z, [x24, #2, MUL VL]\n"
+ "ld1w { z15.s }, p1/Z, [x24, #3, MUL VL]\n"
+ "ld1w { z16.s }, p4/Z, [x23]\n"
+ "ld1w { z17.s }, p3/Z, [x23, #1, MUL VL]\n"
+ "ld1w { z18.s }, p2/Z, [x23, #2, MUL VL]\n"
+ "ld1w { z19.s }, p1/Z, [x23, #3, MUL VL]\n"
"b 34f\n"
"33:" // Height 3: no accumulate
"mov z8.b, #0x0\n"
@@ -656,102 +656,102 @@ void sve_ffhybrid_fp32_mla_6x4VL (
"mov z18.b, #0x0\n"
"mov z19.b, #0x0\n"
"34:" // Height 3: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"35:" // Height 3: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 36f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "cbnz x28, 37f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #2\n"
- "add x25, x25, x20, LSL #2\n"
- "add x24, x24, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "cbnz x27, 37f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #2\n"
+ "add x24, x24, x19, LSL #2\n"
+ "add x23, x23, x19, LSL #2\n"
"b 37f\n"
"36:" // Height 3: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
"37:" // Height 3: input setup done
- "cmp x27, #0x4\n"
+ "cmp x26, #0x4\n"
"ble 39f\n"
"38:" // Height 3: Multiply loop: Main loop head
- "whilelt p0.s, XZR, x27\n"
- "ld1rqw { z0.s }, p0/Z, [x26]\n"
- "ld1rqw { z1.s }, p0/Z, [x25]\n"
- "sub x27, x27, #0x4\n"
- "ld1rqw { z2.s }, p0/Z, [x24]\n"
- "ld1w { z6.s }, p5/Z, [x12]\n"
+ "whilelt p0.s, XZR, x26\n"
+ "ld1rqw { z0.s }, p0/Z, [x25]\n"
+ "ld1rqw { z1.s }, p0/Z, [x24]\n"
+ "sub x26, x26, #0x4\n"
+ "ld1rqw { z2.s }, p0/Z, [x23]\n"
+ "ld1w { z6.s }, p5/Z, [x11]\n"
"fmla z8.s, z6.s, z0.s[0]\n"
"fmla z12.s, z6.s, z1.s[0]\n"
- "ld1w { z7.s }, p5/Z, [x11]\n"
+ "ld1w { z7.s }, p5/Z, [x10]\n"
"fmla z16.s, z6.s, z2.s[0]\n"
"fmla z9.s, z7.s, z0.s[0]\n"
- "ld1w { z6.s }, p5/Z, [x10]\n"
+ "ld1w { z6.s }, p5/Z, [x9]\n"
"fmla z13.s, z7.s, z1.s[0]\n"
"fmla z17.s, z7.s, z2.s[0]\n"
- "ld1w { z7.s }, p5/Z, [x9]\n"
- "cmp x27, #0x4\n"
+ "ld1w { z7.s }, p5/Z, [x28]\n"
+ "cmp x26, #0x4\n"
"fmla z10.s, z6.s, z0.s[0]\n"
"fmla z14.s, z6.s, z1.s[0]\n"
- "add x26, x26, #0x10\n"
"add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
"fmla z18.s, z6.s, z2.s[0]\n"
"fmla z11.s, z7.s, z0.s[0]\n"
- "ld1w { z6.s }, p5/Z, [x12, #1, MUL VL]\n"
- "add x24, x24, #0x10\n"
+ "ld1w { z6.s }, p5/Z, [x11, #1, MUL VL]\n"
+ "add x23, x23, #0x10\n"
"fmla z15.s, z7.s, z1.s[0]\n"
"fmla z19.s, z7.s, z2.s[0]\n"
- "ld1w { z7.s }, p5/Z, [x11, #1, MUL VL]\n"
+ "ld1w { z7.s }, p5/Z, [x10, #1, MUL VL]\n"
"fmla z8.s, z6.s, z0.s[1]\n"
"fmla z12.s, z6.s, z1.s[1]\n"
"fmla z16.s, z6.s, z2.s[1]\n"
"fmla z9.s, z7.s, z0.s[1]\n"
- "ld1w { z6.s }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1w { z6.s }, p5/Z, [x9, #1, MUL VL]\n"
"fmla z13.s, z7.s, z1.s[1]\n"
"fmla z17.s, z7.s, z2.s[1]\n"
- "ld1w { z7.s }, p5/Z, [x9, #1, MUL VL]\n"
+ "ld1w { z7.s }, p5/Z, [x28, #1, MUL VL]\n"
"fmla z10.s, z6.s, z0.s[1]\n"
"fmla z14.s, z6.s, z1.s[1]\n"
"fmla z18.s, z6.s, z2.s[1]\n"
"fmla z11.s, z7.s, z0.s[1]\n"
- "ld1w { z6.s }, p5/Z, [x12, #2, MUL VL]\n"
+ "ld1w { z6.s }, p5/Z, [x11, #2, MUL VL]\n"
"fmla z15.s, z7.s, z1.s[1]\n"
"fmla z19.s, z7.s, z2.s[1]\n"
- "ld1w { z7.s }, p5/Z, [x11, #2, MUL VL]\n"
+ "ld1w { z7.s }, p5/Z, [x10, #2, MUL VL]\n"
"fmla z8.s, z6.s, z0.s[2]\n"
"fmla z12.s, z6.s, z1.s[2]\n"
"fmla z16.s, z6.s, z2.s[2]\n"
"fmla z9.s, z7.s, z0.s[2]\n"
- "ld1w { z6.s }, p5/Z, [x10, #2, MUL VL]\n"
+ "ld1w { z6.s }, p5/Z, [x9, #2, MUL VL]\n"
"fmla z13.s, z7.s, z1.s[2]\n"
"fmla z17.s, z7.s, z2.s[2]\n"
- "ld1w { z7.s }, p5/Z, [x9, #2, MUL VL]\n"
+ "ld1w { z7.s }, p5/Z, [x28, #2, MUL VL]\n"
"fmla z10.s, z6.s, z0.s[2]\n"
"fmla z14.s, z6.s, z1.s[2]\n"
"fmla z18.s, z6.s, z2.s[2]\n"
"fmla z11.s, z7.s, z0.s[2]\n"
- "ld1w { z6.s }, p5/Z, [x12, #3, MUL VL]\n"
- "addvl x12, x12, #4\n"
+ "ld1w { z6.s }, p5/Z, [x11, #3, MUL VL]\n"
+ "addvl x11, x11, #4\n"
"fmla z15.s, z7.s, z1.s[2]\n"
"fmla z19.s, z7.s, z2.s[2]\n"
- "ld1w { z7.s }, p5/Z, [x11, #3, MUL VL]\n"
- "addvl x11, x11, #4\n"
+ "ld1w { z7.s }, p5/Z, [x10, #3, MUL VL]\n"
+ "addvl x10, x10, #4\n"
"fmla z8.s, z6.s, z0.s[3]\n"
"fmla z12.s, z6.s, z1.s[3]\n"
"fmla z16.s, z6.s, z2.s[3]\n"
"fmla z9.s, z7.s, z0.s[3]\n"
- "ld1w { z6.s }, p5/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
+ "ld1w { z6.s }, p5/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"fmla z13.s, z7.s, z1.s[3]\n"
"fmla z17.s, z7.s, z2.s[3]\n"
- "ld1w { z7.s }, p5/Z, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
+ "ld1w { z7.s }, p5/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
"fmla z10.s, z6.s, z0.s[3]\n"
"fmla z14.s, z6.s, z1.s[3]\n"
"fmla z18.s, z6.s, z2.s[3]\n"
@@ -760,109 +760,109 @@ void sve_ffhybrid_fp32_mla_6x4VL (
"fmla z19.s, z7.s, z2.s[3]\n"
"bgt 38b\n"
"39:" // Height 3: Multiply loop: Single iteration only
- "whilelt p0.s, XZR, x27\n"
- "ld1rqw { z0.s }, p0/Z, [x26]\n"
- "ld1rqw { z1.s }, p0/Z, [x25]\n"
- "subs x27, x27, #0x1\n"
- "ld1rqw { z2.s }, p0/Z, [x24]\n"
- "ld1w { z6.s }, p5/Z, [x12]\n"
+ "whilelt p0.s, XZR, x26\n"
+ "ld1rqw { z0.s }, p0/Z, [x25]\n"
+ "ld1rqw { z1.s }, p0/Z, [x24]\n"
+ "subs x26, x26, #0x1\n"
+ "ld1rqw { z2.s }, p0/Z, [x23]\n"
+ "ld1w { z6.s }, p5/Z, [x11]\n"
"fmla z8.s, z6.s, z0.s[0]\n"
"fmla z12.s, z6.s, z1.s[0]\n"
- "ld1w { z7.s }, p5/Z, [x11]\n"
+ "ld1w { z7.s }, p5/Z, [x10]\n"
"fmla z16.s, z6.s, z2.s[0]\n"
"fmla z9.s, z7.s, z0.s[0]\n"
- "ld1w { z6.s }, p5/Z, [x10]\n"
+ "ld1w { z6.s }, p5/Z, [x9]\n"
"fmla z13.s, z7.s, z1.s[0]\n"
"fmla z17.s, z7.s, z2.s[0]\n"
- "ld1w { z7.s }, p5/Z, [x9]\n"
- "addvl x12, x12, #1\n"
+ "ld1w { z7.s }, p5/Z, [x28]\n"
+ "addvl x11, x11, #1\n"
"fmla z10.s, z6.s, z0.s[0]\n"
"fmla z14.s, z6.s, z1.s[0]\n"
- "addvl x11, x11, #1\n"
"addvl x10, x10, #1\n"
+ "addvl x9, x9, #1\n"
"fmla z18.s, z6.s, z2.s[0]\n"
"fmla z11.s, z7.s, z0.s[0]\n"
- "addvl x9, x9, #1\n"
+ "addvl x28, x28, #1\n"
"fmla z15.s, z7.s, z1.s[0]\n"
"fmla z19.s, z7.s, z2.s[0]\n"
"ble 40f\n"
- "ld1w { z6.s }, p5/Z, [x12]\n"
- "ld1w { z7.s }, p5/Z, [x11]\n"
+ "ld1w { z6.s }, p5/Z, [x11]\n"
+ "ld1w { z7.s }, p5/Z, [x10]\n"
"fmla z8.s, z6.s, z0.s[1]\n"
"fmla z12.s, z6.s, z1.s[1]\n"
"fmla z16.s, z6.s, z2.s[1]\n"
"fmla z9.s, z7.s, z0.s[1]\n"
- "ld1w { z6.s }, p5/Z, [x10]\n"
- "subs x27, x27, #0x1\n"
+ "ld1w { z6.s }, p5/Z, [x9]\n"
+ "subs x26, x26, #0x1\n"
"fmla z13.s, z7.s, z1.s[1]\n"
"fmla z17.s, z7.s, z2.s[1]\n"
- "ld1w { z7.s }, p5/Z, [x9]\n"
- "addvl x12, x12, #1\n"
+ "ld1w { z7.s }, p5/Z, [x28]\n"
+ "addvl x11, x11, #1\n"
"fmla z10.s, z6.s, z0.s[1]\n"
"fmla z14.s, z6.s, z1.s[1]\n"
- "addvl x11, x11, #1\n"
"addvl x10, x10, #1\n"
+ "addvl x9, x9, #1\n"
"fmla z18.s, z6.s, z2.s[1]\n"
"fmla z11.s, z7.s, z0.s[1]\n"
- "addvl x9, x9, #1\n"
+ "addvl x28, x28, #1\n"
"fmla z15.s, z7.s, z1.s[1]\n"
"fmla z19.s, z7.s, z2.s[1]\n"
"ble 40f\n"
- "ld1w { z6.s }, p5/Z, [x12]\n"
- "ld1w { z7.s }, p5/Z, [x11]\n"
+ "ld1w { z6.s }, p5/Z, [x11]\n"
+ "ld1w { z7.s }, p5/Z, [x10]\n"
"fmla z8.s, z6.s, z0.s[2]\n"
"fmla z12.s, z6.s, z1.s[2]\n"
"fmla z16.s, z6.s, z2.s[2]\n"
"fmla z9.s, z7.s, z0.s[2]\n"
- "ld1w { z6.s }, p5/Z, [x10]\n"
- "subs x27, x27, #0x1\n"
+ "ld1w { z6.s }, p5/Z, [x9]\n"
+ "subs x26, x26, #0x1\n"
"fmla z13.s, z7.s, z1.s[2]\n"
"fmla z17.s, z7.s, z2.s[2]\n"
- "ld1w { z7.s }, p5/Z, [x9]\n"
- "addvl x12, x12, #1\n"
+ "ld1w { z7.s }, p5/Z, [x28]\n"
+ "addvl x11, x11, #1\n"
"fmla z10.s, z6.s, z0.s[2]\n"
"fmla z14.s, z6.s, z1.s[2]\n"
- "addvl x11, x11, #1\n"
"addvl x10, x10, #1\n"
+ "addvl x9, x9, #1\n"
"fmla z18.s, z6.s, z2.s[2]\n"
"fmla z11.s, z7.s, z0.s[2]\n"
- "addvl x9, x9, #1\n"
+ "addvl x28, x28, #1\n"
"fmla z15.s, z7.s, z1.s[2]\n"
"fmla z19.s, z7.s, z2.s[2]\n"
"ble 40f\n"
- "ld1w { z6.s }, p5/Z, [x12]\n"
- "ld1w { z7.s }, p5/Z, [x11]\n"
+ "ld1w { z6.s }, p5/Z, [x11]\n"
+ "ld1w { z7.s }, p5/Z, [x10]\n"
"fmla z8.s, z6.s, z0.s[3]\n"
"fmla z12.s, z6.s, z1.s[3]\n"
"fmla z16.s, z6.s, z2.s[3]\n"
"fmla z9.s, z7.s, z0.s[3]\n"
- "ld1w { z6.s }, p5/Z, [x10]\n"
- "addvl x12, x12, #1\n"
+ "ld1w { z6.s }, p5/Z, [x9]\n"
+ "addvl x11, x11, #1\n"
"fmla z13.s, z7.s, z1.s[3]\n"
"fmla z17.s, z7.s, z2.s[3]\n"
- "ld1w { z7.s }, p5/Z, [x9]\n"
- "addvl x11, x11, #1\n"
+ "ld1w { z7.s }, p5/Z, [x28]\n"
+ "addvl x10, x10, #1\n"
"fmla z10.s, z6.s, z0.s[3]\n"
"fmla z14.s, z6.s, z1.s[3]\n"
- "addvl x10, x10, #1\n"
"addvl x9, x9, #1\n"
+ "addvl x28, x28, #1\n"
"fmla z18.s, z6.s, z2.s[3]\n"
"fmla z11.s, z7.s, z0.s[3]\n"
"fmla z15.s, z7.s, z1.s[3]\n"
"fmla z19.s, z7.s, z2.s[3]\n"
"40:" // Height 3: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 35b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
"tbz %x[flags], #1, 41f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z1.s }, p5/Z, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rw { z0.s }, p5/Z, [x20]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rw { z1.s }, p5/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z0.s }, p5/Z, [x19]\n"
"fmin z8.s, p5/M, z8.s, z1.s\n"
"fmin z9.s, p5/M, z9.s, z1.s\n"
"fmin z10.s, p5/M, z10.s, z1.s\n"
@@ -888,71 +888,71 @@ void sve_ffhybrid_fp32_mla_6x4VL (
"fmax z18.s, p5/M, z18.s, z0.s\n"
"fmax z19.s, p5/M, z19.s, z0.s\n"
"41:" // Height 3: No activation
- "st1w { z8.s }, p4, [x13]\n"
- "st1w { z9.s }, p3, [x13, #1, MUL VL]\n"
- "st1w { z10.s }, p2, [x13, #2, MUL VL]\n"
- "st1w { z11.s }, p1, [x13, #3, MUL VL]\n"
- "addvl x13, x13, #4\n"
- "st1w { z12.s }, p4, [x25]\n"
- "st1w { z13.s }, p3, [x25, #1, MUL VL]\n"
- "st1w { z14.s }, p2, [x25, #2, MUL VL]\n"
- "st1w { z15.s }, p1, [x25, #3, MUL VL]\n"
- "st1w { z16.s }, p4, [x24]\n"
- "st1w { z17.s }, p3, [x24, #1, MUL VL]\n"
- "st1w { z18.s }, p2, [x24, #2, MUL VL]\n"
- "st1w { z19.s }, p1, [x24, #3, MUL VL]\n"
+ "st1w { z8.s }, p4, [x12]\n"
+ "st1w { z9.s }, p3, [x12, #1, MUL VL]\n"
+ "st1w { z10.s }, p2, [x12, #2, MUL VL]\n"
+ "st1w { z11.s }, p1, [x12, #3, MUL VL]\n"
+ "addvl x12, x12, #4\n"
+ "st1w { z12.s }, p4, [x24]\n"
+ "st1w { z13.s }, p3, [x24, #1, MUL VL]\n"
+ "st1w { z14.s }, p2, [x24, #2, MUL VL]\n"
+ "st1w { z15.s }, p1, [x24, #3, MUL VL]\n"
+ "st1w { z16.s }, p4, [x23]\n"
+ "st1w { z17.s }, p3, [x23, #1, MUL VL]\n"
+ "st1w { z18.s }, p2, [x23, #2, MUL VL]\n"
+ "st1w { z19.s }, p1, [x23, #3, MUL VL]\n"
"42:" // Height 3: Writeback done
- "decw x14, ALL, MUL #4\n"
- "cmp x14, XZR\n"
+ "decw x13, ALL, MUL #4\n"
+ "cmp x13, XZR\n"
"bgt 30b\n"
"b 86f\n"
"43:" // Height 4
- "ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x15, %x[bias]\n"
- "ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x13, %x[output_ptr]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x14, %x[bias]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x12, %x[output_ptr]\n"
"44:" // Height 4: Column loop
- "ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
- "add x11, x12, x20, LSL #2\n"
- "cntw x21, ALL, MUL #3\n"
- "add x10, x11, x20, LSL #2\n"
- "add x9, x10, x20, LSL #2\n"
- "add x20, x9, x20, LSL #2\n"
- "cmp x14, x21\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #2\n"
+ "cntw x20, ALL, MUL #3\n"
+ "add x9, x10, x19, LSL #2\n"
+ "add x28, x9, x19, LSL #2\n"
+ "add x19, x28, x19, LSL #2\n"
+ "cmp x13, x20\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"bgt 45f\n"
- "decw x21\n"
- "cmp x14, x21\n"
- "mov x9, x12\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x28, x11\n"
"bgt 45f\n"
- "decw x21\n"
- "cmp x14, x21\n"
- "mov x10, x12\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x9, x11\n"
"bgt 45f\n"
- "mov x11, x12\n"
+ "mov x10, x11\n"
"45:" // Height 4: B setup done
- "mov x20, #0x0\n"
- "whilelt p4.s, x20, x14\n"
- "incw x20\n"
- "whilelt p3.s, x20, x14\n"
- "incw x20\n"
- "whilelt p2.s, x20, x14\n"
- "incw x20\n"
- "whilelt p1.s, x20, x14\n"
- "cbz x15, 46f\n"
- "ld1w { z8.s }, p5/Z, [x15]\n"
- "ld1w { z9.s }, p5/Z, [x15, #1, MUL VL]\n"
+ "mov x19, #0x0\n"
+ "whilelt p4.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p3.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x13\n"
+ "cbz x14, 46f\n"
+ "ld1w { z8.s }, p5/Z, [x14]\n"
+ "ld1w { z9.s }, p5/Z, [x14, #1, MUL VL]\n"
"mov z12.d, z8.d\n"
"mov z13.d, z9.d\n"
- "ld1w { z10.s }, p5/Z, [x15, #2, MUL VL]\n"
- "ld1w { z11.s }, p5/Z, [x15, #3, MUL VL]\n"
+ "ld1w { z10.s }, p5/Z, [x14, #2, MUL VL]\n"
+ "ld1w { z11.s }, p5/Z, [x14, #3, MUL VL]\n"
"mov z14.d, z10.d\n"
"mov z15.d, z11.d\n"
"mov z16.d, z8.d\n"
"mov z17.d, z9.d\n"
- "addvl x15, x15, #4\n"
+ "addvl x14, x14, #4\n"
"mov z18.d, z10.d\n"
"mov z19.d, z11.d\n"
"mov z20.d, z8.d\n"
@@ -962,26 +962,26 @@ void sve_ffhybrid_fp32_mla_6x4VL (
"b 48f\n"
"46:" // Height 4: no bias
"tbz %x[flags], #0, 47f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "ld1w { z8.s }, p4/Z, [x13]\n"
- "add x23, x24, x20, LSL #2\n"
- "ld1w { z9.s }, p3/Z, [x13, #1, MUL VL]\n"
- "ld1w { z10.s }, p2/Z, [x13, #2, MUL VL]\n"
- "ld1w { z11.s }, p1/Z, [x13, #3, MUL VL]\n"
- "ld1w { z12.s }, p4/Z, [x25]\n"
- "ld1w { z13.s }, p3/Z, [x25, #1, MUL VL]\n"
- "ld1w { z14.s }, p2/Z, [x25, #2, MUL VL]\n"
- "ld1w { z15.s }, p1/Z, [x25, #3, MUL VL]\n"
- "ld1w { z16.s }, p4/Z, [x24]\n"
- "ld1w { z17.s }, p3/Z, [x24, #1, MUL VL]\n"
- "ld1w { z18.s }, p2/Z, [x24, #2, MUL VL]\n"
- "ld1w { z19.s }, p1/Z, [x24, #3, MUL VL]\n"
- "ld1w { z20.s }, p4/Z, [x23]\n"
- "ld1w { z21.s }, p3/Z, [x23, #1, MUL VL]\n"
- "ld1w { z22.s }, p2/Z, [x23, #2, MUL VL]\n"
- "ld1w { z23.s }, p1/Z, [x23, #3, MUL VL]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "ld1w { z8.s }, p4/Z, [x12]\n"
+ "add x22, x23, x19, LSL #2\n"
+ "ld1w { z9.s }, p3/Z, [x12, #1, MUL VL]\n"
+ "ld1w { z10.s }, p2/Z, [x12, #2, MUL VL]\n"
+ "ld1w { z11.s }, p1/Z, [x12, #3, MUL VL]\n"
+ "ld1w { z12.s }, p4/Z, [x24]\n"
+ "ld1w { z13.s }, p3/Z, [x24, #1, MUL VL]\n"
+ "ld1w { z14.s }, p2/Z, [x24, #2, MUL VL]\n"
+ "ld1w { z15.s }, p1/Z, [x24, #3, MUL VL]\n"
+ "ld1w { z16.s }, p4/Z, [x23]\n"
+ "ld1w { z17.s }, p3/Z, [x23, #1, MUL VL]\n"
+ "ld1w { z18.s }, p2/Z, [x23, #2, MUL VL]\n"
+ "ld1w { z19.s }, p1/Z, [x23, #3, MUL VL]\n"
+ "ld1w { z20.s }, p4/Z, [x22]\n"
+ "ld1w { z21.s }, p3/Z, [x22, #1, MUL VL]\n"
+ "ld1w { z22.s }, p2/Z, [x22, #2, MUL VL]\n"
+ "ld1w { z23.s }, p1/Z, [x22, #3, MUL VL]\n"
"b 48f\n"
"47:" // Height 4: no accumulate
"mov z8.b, #0x0\n"
@@ -1001,121 +1001,121 @@ void sve_ffhybrid_fp32_mla_6x4VL (
"mov z22.b, #0x0\n"
"mov z23.b, #0x0\n"
"48:" // Height 4: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"49:" // Height 4: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 50f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "cbnz x28, 51f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #2\n"
- "add x25, x25, x20, LSL #2\n"
- "add x24, x24, x20, LSL #2\n"
- "add x23, x23, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "cbnz x27, 51f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #2\n"
+ "add x24, x24, x19, LSL #2\n"
+ "add x23, x23, x19, LSL #2\n"
+ "add x22, x22, x19, LSL #2\n"
"b 51f\n"
"50:" // Height 4: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
"51:" // Height 4: input setup done
- "cmp x27, #0x4\n"
+ "cmp x26, #0x4\n"
"ble 53f\n"
"52:" // Height 4: Multiply loop: Main loop head
- "whilelt p0.s, XZR, x27\n"
- "ld1rqw { z0.s }, p0/Z, [x26]\n"
- "ld1rqw { z1.s }, p0/Z, [x25]\n"
- "sub x27, x27, #0x4\n"
- "ld1rqw { z2.s }, p0/Z, [x24]\n"
- "ld1rqw { z3.s }, p0/Z, [x23]\n"
- "cmp x27, #0x4\n"
- "add x26, x26, #0x10\n"
- "ld1w { z6.s }, p5/Z, [x12]\n"
- "ld1w { z7.s }, p5/Z, [x11]\n"
+ "whilelt p0.s, XZR, x26\n"
+ "ld1rqw { z0.s }, p0/Z, [x25]\n"
+ "ld1rqw { z1.s }, p0/Z, [x24]\n"
+ "sub x26, x26, #0x4\n"
+ "ld1rqw { z2.s }, p0/Z, [x23]\n"
+ "ld1rqw { z3.s }, p0/Z, [x22]\n"
+ "cmp x26, #0x4\n"
+ "add x25, x25, #0x10\n"
+ "ld1w { z6.s }, p5/Z, [x11]\n"
+ "ld1w { z7.s }, p5/Z, [x10]\n"
"fmla z8.s, z6.s, z0.s[0]\n"
"fmla z12.s, z6.s, z1.s[0]\n"
"fmla z16.s, z6.s, z2.s[0]\n"
"fmla z20.s, z6.s, z3.s[0]\n"
- "ld1w { z6.s }, p5/Z, [x10]\n"
- "add x25, x25, #0x10\n"
+ "ld1w { z6.s }, p5/Z, [x9]\n"
+ "add x24, x24, #0x10\n"
"fmla z9.s, z7.s, z0.s[0]\n"
"fmla z13.s, z7.s, z1.s[0]\n"
- "add x24, x24, #0x10\n"
"add x23, x23, #0x10\n"
+ "add x22, x22, #0x10\n"
"fmla z17.s, z7.s, z2.s[0]\n"
"fmla z21.s, z7.s, z3.s[0]\n"
- "ld1w { z7.s }, p5/Z, [x9]\n"
+ "ld1w { z7.s }, p5/Z, [x28]\n"
"fmla z10.s, z6.s, z0.s[0]\n"
"fmla z14.s, z6.s, z1.s[0]\n"
"fmla z18.s, z6.s, z2.s[0]\n"
"fmla z22.s, z6.s, z3.s[0]\n"
- "ld1w { z6.s }, p5/Z, [x12, #1, MUL VL]\n"
+ "ld1w { z6.s }, p5/Z, [x11, #1, MUL VL]\n"
"fmla z11.s, z7.s, z0.s[0]\n"
"fmla z15.s, z7.s, z1.s[0]\n"
"fmla z19.s, z7.s, z2.s[0]\n"
"fmla z23.s, z7.s, z3.s[0]\n"
- "ld1w { z7.s }, p5/Z, [x11, #1, MUL VL]\n"
+ "ld1w { z7.s }, p5/Z, [x10, #1, MUL VL]\n"
"fmla z8.s, z6.s, z0.s[1]\n"
"fmla z12.s, z6.s, z1.s[1]\n"
"fmla z16.s, z6.s, z2.s[1]\n"
"fmla z20.s, z6.s, z3.s[1]\n"
- "ld1w { z6.s }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1w { z6.s }, p5/Z, [x9, #1, MUL VL]\n"
"fmla z9.s, z7.s, z0.s[1]\n"
"fmla z13.s, z7.s, z1.s[1]\n"
"fmla z17.s, z7.s, z2.s[1]\n"
"fmla z21.s, z7.s, z3.s[1]\n"
- "ld1w { z7.s }, p5/Z, [x9, #1, MUL VL]\n"
+ "ld1w { z7.s }, p5/Z, [x28, #1, MUL VL]\n"
"fmla z10.s, z6.s, z0.s[1]\n"
"fmla z14.s, z6.s, z1.s[1]\n"
"fmla z18.s, z6.s, z2.s[1]\n"
"fmla z22.s, z6.s, z3.s[1]\n"
- "ld1w { z6.s }, p5/Z, [x12, #2, MUL VL]\n"
+ "ld1w { z6.s }, p5/Z, [x11, #2, MUL VL]\n"
"fmla z11.s, z7.s, z0.s[1]\n"
"fmla z15.s, z7.s, z1.s[1]\n"
"fmla z19.s, z7.s, z2.s[1]\n"
"fmla z23.s, z7.s, z3.s[1]\n"
- "ld1w { z7.s }, p5/Z, [x11, #2, MUL VL]\n"
+ "ld1w { z7.s }, p5/Z, [x10, #2, MUL VL]\n"
"fmla z8.s, z6.s, z0.s[2]\n"
"fmla z12.s, z6.s, z1.s[2]\n"
"fmla z16.s, z6.s, z2.s[2]\n"
"fmla z20.s, z6.s, z3.s[2]\n"
- "ld1w { z6.s }, p5/Z, [x10, #2, MUL VL]\n"
+ "ld1w { z6.s }, p5/Z, [x9, #2, MUL VL]\n"
"fmla z9.s, z7.s, z0.s[2]\n"
"fmla z13.s, z7.s, z1.s[2]\n"
"fmla z17.s, z7.s, z2.s[2]\n"
"fmla z21.s, z7.s, z3.s[2]\n"
- "ld1w { z7.s }, p5/Z, [x9, #2, MUL VL]\n"
+ "ld1w { z7.s }, p5/Z, [x28, #2, MUL VL]\n"
"fmla z10.s, z6.s, z0.s[2]\n"
"fmla z14.s, z6.s, z1.s[2]\n"
"fmla z18.s, z6.s, z2.s[2]\n"
"fmla z22.s, z6.s, z3.s[2]\n"
- "ld1w { z6.s }, p5/Z, [x12, #3, MUL VL]\n"
- "addvl x12, x12, #4\n"
+ "ld1w { z6.s }, p5/Z, [x11, #3, MUL VL]\n"
+ "addvl x11, x11, #4\n"
"fmla z11.s, z7.s, z0.s[2]\n"
"fmla z15.s, z7.s, z1.s[2]\n"
"fmla z19.s, z7.s, z2.s[2]\n"
"fmla z23.s, z7.s, z3.s[2]\n"
- "ld1w { z7.s }, p5/Z, [x11, #3, MUL VL]\n"
- "addvl x11, x11, #4\n"
+ "ld1w { z7.s }, p5/Z, [x10, #3, MUL VL]\n"
+ "addvl x10, x10, #4\n"
"fmla z8.s, z6.s, z0.s[3]\n"
"fmla z12.s, z6.s, z1.s[3]\n"
"fmla z16.s, z6.s, z2.s[3]\n"
"fmla z20.s, z6.s, z3.s[3]\n"
- "ld1w { z6.s }, p5/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
+ "ld1w { z6.s }, p5/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"fmla z9.s, z7.s, z0.s[3]\n"
"fmla z13.s, z7.s, z1.s[3]\n"
"fmla z17.s, z7.s, z2.s[3]\n"
"fmla z21.s, z7.s, z3.s[3]\n"
- "ld1w { z7.s }, p5/Z, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
+ "ld1w { z7.s }, p5/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
"fmla z10.s, z6.s, z0.s[3]\n"
"fmla z14.s, z6.s, z1.s[3]\n"
"fmla z18.s, z6.s, z2.s[3]\n"
@@ -1126,28 +1126,28 @@ void sve_ffhybrid_fp32_mla_6x4VL (
"fmla z23.s, z7.s, z3.s[3]\n"
"bgt 52b\n"
"53:" // Height 4: Multiply loop: Single iteration only
- "whilelt p0.s, XZR, x27\n"
- "ld1rqw { z0.s }, p0/Z, [x26]\n"
- "ld1rqw { z1.s }, p0/Z, [x25]\n"
- "subs x27, x27, #0x1\n"
- "ld1rqw { z2.s }, p0/Z, [x24]\n"
- "ld1rqw { z3.s }, p0/Z, [x23]\n"
- "ld1w { z6.s }, p5/Z, [x12]\n"
- "ld1w { z7.s }, p5/Z, [x11]\n"
+ "whilelt p0.s, XZR, x26\n"
+ "ld1rqw { z0.s }, p0/Z, [x25]\n"
+ "ld1rqw { z1.s }, p0/Z, [x24]\n"
+ "subs x26, x26, #0x1\n"
+ "ld1rqw { z2.s }, p0/Z, [x23]\n"
+ "ld1rqw { z3.s }, p0/Z, [x22]\n"
+ "ld1w { z6.s }, p5/Z, [x11]\n"
+ "ld1w { z7.s }, p5/Z, [x10]\n"
"fmla z8.s, z6.s, z0.s[0]\n"
"fmla z12.s, z6.s, z1.s[0]\n"
"fmla z16.s, z6.s, z2.s[0]\n"
"fmla z20.s, z6.s, z3.s[0]\n"
- "ld1w { z6.s }, p5/Z, [x10]\n"
- "addvl x12, x12, #1\n"
+ "ld1w { z6.s }, p5/Z, [x9]\n"
+ "addvl x11, x11, #1\n"
"fmla z9.s, z7.s, z0.s[0]\n"
"fmla z13.s, z7.s, z1.s[0]\n"
- "addvl x11, x11, #1\n"
"addvl x10, x10, #1\n"
+ "addvl x9, x9, #1\n"
"fmla z17.s, z7.s, z2.s[0]\n"
"fmla z21.s, z7.s, z3.s[0]\n"
- "ld1w { z7.s }, p5/Z, [x9]\n"
- "addvl x9, x9, #1\n"
+ "ld1w { z7.s }, p5/Z, [x28]\n"
+ "addvl x28, x28, #1\n"
"fmla z10.s, z6.s, z0.s[0]\n"
"fmla z14.s, z6.s, z1.s[0]\n"
"fmla z18.s, z6.s, z2.s[0]\n"
@@ -1157,25 +1157,25 @@ void sve_ffhybrid_fp32_mla_6x4VL (
"fmla z19.s, z7.s, z2.s[0]\n"
"fmla z23.s, z7.s, z3.s[0]\n"
"ble 54f\n"
- "ld1w { z6.s }, p5/Z, [x12]\n"
- "ld1w { z7.s }, p5/Z, [x11]\n"
+ "ld1w { z6.s }, p5/Z, [x11]\n"
+ "ld1w { z7.s }, p5/Z, [x10]\n"
"fmla z8.s, z6.s, z0.s[1]\n"
"fmla z12.s, z6.s, z1.s[1]\n"
"fmla z16.s, z6.s, z2.s[1]\n"
"fmla z20.s, z6.s, z3.s[1]\n"
- "ld1w { z6.s }, p5/Z, [x10]\n"
- "subs x27, x27, #0x1\n"
+ "ld1w { z6.s }, p5/Z, [x9]\n"
+ "subs x26, x26, #0x1\n"
"fmla z9.s, z7.s, z0.s[1]\n"
"fmla z13.s, z7.s, z1.s[1]\n"
- "addvl x12, x12, #1\n"
"addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
"fmla z17.s, z7.s, z2.s[1]\n"
"fmla z21.s, z7.s, z3.s[1]\n"
- "ld1w { z7.s }, p5/Z, [x9]\n"
- "addvl x10, x10, #1\n"
+ "ld1w { z7.s }, p5/Z, [x28]\n"
+ "addvl x9, x9, #1\n"
"fmla z10.s, z6.s, z0.s[1]\n"
"fmla z14.s, z6.s, z1.s[1]\n"
- "addvl x9, x9, #1\n"
+ "addvl x28, x28, #1\n"
"fmla z18.s, z6.s, z2.s[1]\n"
"fmla z22.s, z6.s, z3.s[1]\n"
"fmla z11.s, z7.s, z0.s[1]\n"
@@ -1183,25 +1183,25 @@ void sve_ffhybrid_fp32_mla_6x4VL (
"fmla z19.s, z7.s, z2.s[1]\n"
"fmla z23.s, z7.s, z3.s[1]\n"
"ble 54f\n"
- "ld1w { z6.s }, p5/Z, [x12]\n"
- "ld1w { z7.s }, p5/Z, [x11]\n"
+ "ld1w { z6.s }, p5/Z, [x11]\n"
+ "ld1w { z7.s }, p5/Z, [x10]\n"
"fmla z8.s, z6.s, z0.s[2]\n"
"fmla z12.s, z6.s, z1.s[2]\n"
"fmla z16.s, z6.s, z2.s[2]\n"
"fmla z20.s, z6.s, z3.s[2]\n"
- "ld1w { z6.s }, p5/Z, [x10]\n"
- "subs x27, x27, #0x1\n"
+ "ld1w { z6.s }, p5/Z, [x9]\n"
+ "subs x26, x26, #0x1\n"
"fmla z9.s, z7.s, z0.s[2]\n"
"fmla z13.s, z7.s, z1.s[2]\n"
- "addvl x12, x12, #1\n"
"addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
"fmla z17.s, z7.s, z2.s[2]\n"
"fmla z21.s, z7.s, z3.s[2]\n"
- "ld1w { z7.s }, p5/Z, [x9]\n"
- "addvl x10, x10, #1\n"
+ "ld1w { z7.s }, p5/Z, [x28]\n"
+ "addvl x9, x9, #1\n"
"fmla z10.s, z6.s, z0.s[2]\n"
"fmla z14.s, z6.s, z1.s[2]\n"
- "addvl x9, x9, #1\n"
+ "addvl x28, x28, #1\n"
"fmla z18.s, z6.s, z2.s[2]\n"
"fmla z22.s, z6.s, z3.s[2]\n"
"fmla z11.s, z7.s, z0.s[2]\n"
@@ -1209,22 +1209,22 @@ void sve_ffhybrid_fp32_mla_6x4VL (
"fmla z19.s, z7.s, z2.s[2]\n"
"fmla z23.s, z7.s, z3.s[2]\n"
"ble 54f\n"
- "ld1w { z6.s }, p5/Z, [x12]\n"
- "ld1w { z7.s }, p5/Z, [x11]\n"
+ "ld1w { z6.s }, p5/Z, [x11]\n"
+ "ld1w { z7.s }, p5/Z, [x10]\n"
"fmla z8.s, z6.s, z0.s[3]\n"
"fmla z12.s, z6.s, z1.s[3]\n"
"fmla z16.s, z6.s, z2.s[3]\n"
"fmla z20.s, z6.s, z3.s[3]\n"
- "ld1w { z6.s }, p5/Z, [x10]\n"
- "addvl x12, x12, #1\n"
+ "ld1w { z6.s }, p5/Z, [x9]\n"
+ "addvl x11, x11, #1\n"
"fmla z9.s, z7.s, z0.s[3]\n"
"fmla z13.s, z7.s, z1.s[3]\n"
- "addvl x11, x11, #1\n"
"addvl x10, x10, #1\n"
+ "addvl x9, x9, #1\n"
"fmla z17.s, z7.s, z2.s[3]\n"
"fmla z21.s, z7.s, z3.s[3]\n"
- "ld1w { z7.s }, p5/Z, [x9]\n"
- "addvl x9, x9, #1\n"
+ "ld1w { z7.s }, p5/Z, [x28]\n"
+ "addvl x28, x28, #1\n"
"fmla z10.s, z6.s, z0.s[3]\n"
"fmla z14.s, z6.s, z1.s[3]\n"
"fmla z18.s, z6.s, z2.s[3]\n"
@@ -1234,19 +1234,19 @@ void sve_ffhybrid_fp32_mla_6x4VL (
"fmla z19.s, z7.s, z2.s[3]\n"
"fmla z23.s, z7.s, z3.s[3]\n"
"54:" // Height 4: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 49b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
"tbz %x[flags], #1, 55f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z1.s }, p5/Z, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rw { z0.s }, p5/Z, [x20]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rw { z1.s }, p5/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z0.s }, p5/Z, [x19]\n"
"fmin z8.s, p5/M, z8.s, z1.s\n"
"fmin z9.s, p5/M, z9.s, z1.s\n"
"fmin z10.s, p5/M, z10.s, z1.s\n"
@@ -1280,75 +1280,75 @@ void sve_ffhybrid_fp32_mla_6x4VL (
"fmax z22.s, p5/M, z22.s, z0.s\n"
"fmax z23.s, p5/M, z23.s, z0.s\n"
"55:" // Height 4: No activation
- "st1w { z8.s }, p4, [x13]\n"
- "st1w { z9.s }, p3, [x13, #1, MUL VL]\n"
- "st1w { z10.s }, p2, [x13, #2, MUL VL]\n"
- "st1w { z11.s }, p1, [x13, #3, MUL VL]\n"
- "addvl x13, x13, #4\n"
- "st1w { z12.s }, p4, [x25]\n"
- "st1w { z13.s }, p3, [x25, #1, MUL VL]\n"
- "st1w { z14.s }, p2, [x25, #2, MUL VL]\n"
- "st1w { z15.s }, p1, [x25, #3, MUL VL]\n"
- "st1w { z16.s }, p4, [x24]\n"
- "st1w { z17.s }, p3, [x24, #1, MUL VL]\n"
- "st1w { z18.s }, p2, [x24, #2, MUL VL]\n"
- "st1w { z19.s }, p1, [x24, #3, MUL VL]\n"
- "st1w { z20.s }, p4, [x23]\n"
- "st1w { z21.s }, p3, [x23, #1, MUL VL]\n"
- "st1w { z22.s }, p2, [x23, #2, MUL VL]\n"
- "st1w { z23.s }, p1, [x23, #3, MUL VL]\n"
+ "st1w { z8.s }, p4, [x12]\n"
+ "st1w { z9.s }, p3, [x12, #1, MUL VL]\n"
+ "st1w { z10.s }, p2, [x12, #2, MUL VL]\n"
+ "st1w { z11.s }, p1, [x12, #3, MUL VL]\n"
+ "addvl x12, x12, #4\n"
+ "st1w { z12.s }, p4, [x24]\n"
+ "st1w { z13.s }, p3, [x24, #1, MUL VL]\n"
+ "st1w { z14.s }, p2, [x24, #2, MUL VL]\n"
+ "st1w { z15.s }, p1, [x24, #3, MUL VL]\n"
+ "st1w { z16.s }, p4, [x23]\n"
+ "st1w { z17.s }, p3, [x23, #1, MUL VL]\n"
+ "st1w { z18.s }, p2, [x23, #2, MUL VL]\n"
+ "st1w { z19.s }, p1, [x23, #3, MUL VL]\n"
+ "st1w { z20.s }, p4, [x22]\n"
+ "st1w { z21.s }, p3, [x22, #1, MUL VL]\n"
+ "st1w { z22.s }, p2, [x22, #2, MUL VL]\n"
+ "st1w { z23.s }, p1, [x22, #3, MUL VL]\n"
"56:" // Height 4: Writeback done
- "decw x14, ALL, MUL #4\n"
- "cmp x14, XZR\n"
+ "decw x13, ALL, MUL #4\n"
+ "cmp x13, XZR\n"
"bgt 44b\n"
"b 86f\n"
"57:" // Height 5
- "ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x15, %x[bias]\n"
- "ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x13, %x[output_ptr]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x14, %x[bias]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x12, %x[output_ptr]\n"
"58:" // Height 5: Column loop
- "ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
- "add x11, x12, x20, LSL #2\n"
- "cntw x21, ALL, MUL #3\n"
- "add x10, x11, x20, LSL #2\n"
- "add x9, x10, x20, LSL #2\n"
- "add x20, x9, x20, LSL #2\n"
- "cmp x14, x21\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #2\n"
+ "cntw x20, ALL, MUL #3\n"
+ "add x9, x10, x19, LSL #2\n"
+ "add x28, x9, x19, LSL #2\n"
+ "add x19, x28, x19, LSL #2\n"
+ "cmp x13, x20\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"bgt 59f\n"
- "decw x21\n"
- "cmp x14, x21\n"
- "mov x9, x12\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x28, x11\n"
"bgt 59f\n"
- "decw x21\n"
- "cmp x14, x21\n"
- "mov x10, x12\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x9, x11\n"
"bgt 59f\n"
- "mov x11, x12\n"
+ "mov x10, x11\n"
"59:" // Height 5: B setup done
- "mov x20, #0x0\n"
- "whilelt p4.s, x20, x14\n"
- "incw x20\n"
- "whilelt p3.s, x20, x14\n"
- "incw x20\n"
- "whilelt p2.s, x20, x14\n"
- "incw x20\n"
- "whilelt p1.s, x20, x14\n"
- "cbz x15, 60f\n"
- "ld1w { z8.s }, p5/Z, [x15]\n"
- "ld1w { z9.s }, p5/Z, [x15, #1, MUL VL]\n"
+ "mov x19, #0x0\n"
+ "whilelt p4.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p3.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x13\n"
+ "cbz x14, 60f\n"
+ "ld1w { z8.s }, p5/Z, [x14]\n"
+ "ld1w { z9.s }, p5/Z, [x14, #1, MUL VL]\n"
"mov z12.d, z8.d\n"
"mov z13.d, z9.d\n"
- "ld1w { z10.s }, p5/Z, [x15, #2, MUL VL]\n"
- "ld1w { z11.s }, p5/Z, [x15, #3, MUL VL]\n"
+ "ld1w { z10.s }, p5/Z, [x14, #2, MUL VL]\n"
+ "ld1w { z11.s }, p5/Z, [x14, #3, MUL VL]\n"
"mov z14.d, z10.d\n"
"mov z15.d, z11.d\n"
"mov z16.d, z8.d\n"
"mov z17.d, z9.d\n"
- "addvl x15, x15, #4\n"
+ "addvl x14, x14, #4\n"
"mov z18.d, z10.d\n"
"mov z19.d, z11.d\n"
"mov z20.d, z8.d\n"
@@ -1362,31 +1362,31 @@ void sve_ffhybrid_fp32_mla_6x4VL (
"b 62f\n"
"60:" // Height 5: no bias
"tbz %x[flags], #0, 61f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "ld1w { z8.s }, p4/Z, [x13]\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
- "ld1w { z9.s }, p3/Z, [x13, #1, MUL VL]\n"
- "ld1w { z10.s }, p2/Z, [x13, #2, MUL VL]\n"
- "ld1w { z11.s }, p1/Z, [x13, #3, MUL VL]\n"
- "ld1w { z12.s }, p4/Z, [x25]\n"
- "ld1w { z13.s }, p3/Z, [x25, #1, MUL VL]\n"
- "ld1w { z14.s }, p2/Z, [x25, #2, MUL VL]\n"
- "ld1w { z15.s }, p1/Z, [x25, #3, MUL VL]\n"
- "ld1w { z16.s }, p4/Z, [x24]\n"
- "ld1w { z17.s }, p3/Z, [x24, #1, MUL VL]\n"
- "ld1w { z18.s }, p2/Z, [x24, #2, MUL VL]\n"
- "ld1w { z19.s }, p1/Z, [x24, #3, MUL VL]\n"
- "ld1w { z20.s }, p4/Z, [x23]\n"
- "ld1w { z21.s }, p3/Z, [x23, #1, MUL VL]\n"
- "ld1w { z22.s }, p2/Z, [x23, #2, MUL VL]\n"
- "ld1w { z23.s }, p1/Z, [x23, #3, MUL VL]\n"
- "ld1w { z24.s }, p4/Z, [x22]\n"
- "ld1w { z25.s }, p3/Z, [x22, #1, MUL VL]\n"
- "ld1w { z26.s }, p2/Z, [x22, #2, MUL VL]\n"
- "ld1w { z27.s }, p1/Z, [x22, #3, MUL VL]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "ld1w { z8.s }, p4/Z, [x12]\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
+ "ld1w { z9.s }, p3/Z, [x12, #1, MUL VL]\n"
+ "ld1w { z10.s }, p2/Z, [x12, #2, MUL VL]\n"
+ "ld1w { z11.s }, p1/Z, [x12, #3, MUL VL]\n"
+ "ld1w { z12.s }, p4/Z, [x24]\n"
+ "ld1w { z13.s }, p3/Z, [x24, #1, MUL VL]\n"
+ "ld1w { z14.s }, p2/Z, [x24, #2, MUL VL]\n"
+ "ld1w { z15.s }, p1/Z, [x24, #3, MUL VL]\n"
+ "ld1w { z16.s }, p4/Z, [x23]\n"
+ "ld1w { z17.s }, p3/Z, [x23, #1, MUL VL]\n"
+ "ld1w { z18.s }, p2/Z, [x23, #2, MUL VL]\n"
+ "ld1w { z19.s }, p1/Z, [x23, #3, MUL VL]\n"
+ "ld1w { z20.s }, p4/Z, [x22]\n"
+ "ld1w { z21.s }, p3/Z, [x22, #1, MUL VL]\n"
+ "ld1w { z22.s }, p2/Z, [x22, #2, MUL VL]\n"
+ "ld1w { z23.s }, p1/Z, [x22, #3, MUL VL]\n"
+ "ld1w { z24.s }, p4/Z, [x21]\n"
+ "ld1w { z25.s }, p3/Z, [x21, #1, MUL VL]\n"
+ "ld1w { z26.s }, p2/Z, [x21, #2, MUL VL]\n"
+ "ld1w { z27.s }, p1/Z, [x21, #3, MUL VL]\n"
"b 62f\n"
"61:" // Height 5: no accumulate
"mov z8.b, #0x0\n"
@@ -1410,140 +1410,140 @@ void sve_ffhybrid_fp32_mla_6x4VL (
"mov z26.b, #0x0\n"
"mov z27.b, #0x0\n"
"62:" // Height 5: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"63:" // Height 5: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 64f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "ldr x22, [x21, #0x20]\n"
- "cbnz x28, 65f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #2\n"
- "add x25, x25, x20, LSL #2\n"
- "add x24, x24, x20, LSL #2\n"
- "add x23, x23, x20, LSL #2\n"
- "add x22, x22, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "ldr x21, [x20, #0x20]\n"
+ "cbnz x27, 65f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #2\n"
+ "add x24, x24, x19, LSL #2\n"
+ "add x23, x23, x19, LSL #2\n"
+ "add x22, x22, x19, LSL #2\n"
+ "add x21, x21, x19, LSL #2\n"
"b 65f\n"
"64:" // Height 5: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
"65:" // Height 5: input setup done
- "cmp x27, #0x4\n"
+ "cmp x26, #0x4\n"
"ble 67f\n"
"66:" // Height 5: Multiply loop: Main loop head
- "whilelt p0.s, XZR, x27\n"
- "ld1rqw { z0.s }, p0/Z, [x26]\n"
- "ld1rqw { z1.s }, p0/Z, [x25]\n"
- "sub x27, x27, #0x4\n"
- "ld1rqw { z2.s }, p0/Z, [x24]\n"
- "ld1rqw { z3.s }, p0/Z, [x23]\n"
- "cmp x27, #0x4\n"
- "add x26, x26, #0x10\n"
- "ld1rqw { z4.s }, p0/Z, [x22]\n"
- "ld1w { z6.s }, p5/Z, [x12]\n"
+ "whilelt p0.s, XZR, x26\n"
+ "ld1rqw { z0.s }, p0/Z, [x25]\n"
+ "ld1rqw { z1.s }, p0/Z, [x24]\n"
+ "sub x26, x26, #0x4\n"
+ "ld1rqw { z2.s }, p0/Z, [x23]\n"
+ "ld1rqw { z3.s }, p0/Z, [x22]\n"
+ "cmp x26, #0x4\n"
+ "add x25, x25, #0x10\n"
+ "ld1rqw { z4.s }, p0/Z, [x21]\n"
+ "ld1w { z6.s }, p5/Z, [x11]\n"
"fmla z8.s, z6.s, z0.s[0]\n"
"fmla z12.s, z6.s, z1.s[0]\n"
- "ld1w { z7.s }, p5/Z, [x11]\n"
+ "ld1w { z7.s }, p5/Z, [x10]\n"
"fmla z16.s, z6.s, z2.s[0]\n"
"fmla z20.s, z6.s, z3.s[0]\n"
- "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
"fmla z24.s, z6.s, z4.s[0]\n"
"fmla z9.s, z7.s, z0.s[0]\n"
- "ld1w { z6.s }, p5/Z, [x10]\n"
- "add x24, x24, #0x10\n"
+ "ld1w { z6.s }, p5/Z, [x9]\n"
+ "add x23, x23, #0x10\n"
"fmla z13.s, z7.s, z1.s[0]\n"
"fmla z17.s, z7.s, z2.s[0]\n"
- "add x23, x23, #0x10\n"
"add x22, x22, #0x10\n"
+ "add x21, x21, #0x10\n"
"fmla z21.s, z7.s, z3.s[0]\n"
"fmla z25.s, z7.s, z4.s[0]\n"
- "ld1w { z7.s }, p5/Z, [x9]\n"
+ "ld1w { z7.s }, p5/Z, [x28]\n"
"fmla z10.s, z6.s, z0.s[0]\n"
"fmla z14.s, z6.s, z1.s[0]\n"
"fmla z18.s, z6.s, z2.s[0]\n"
"fmla z22.s, z6.s, z3.s[0]\n"
"fmla z26.s, z6.s, z4.s[0]\n"
"fmla z11.s, z7.s, z0.s[0]\n"
- "ld1w { z6.s }, p5/Z, [x12, #1, MUL VL]\n"
+ "ld1w { z6.s }, p5/Z, [x11, #1, MUL VL]\n"
"fmla z15.s, z7.s, z1.s[0]\n"
"fmla z19.s, z7.s, z2.s[0]\n"
"fmla z23.s, z7.s, z3.s[0]\n"
"fmla z27.s, z7.s, z4.s[0]\n"
- "ld1w { z7.s }, p5/Z, [x11, #1, MUL VL]\n"
+ "ld1w { z7.s }, p5/Z, [x10, #1, MUL VL]\n"
"fmla z8.s, z6.s, z0.s[1]\n"
"fmla z12.s, z6.s, z1.s[1]\n"
"fmla z16.s, z6.s, z2.s[1]\n"
"fmla z20.s, z6.s, z3.s[1]\n"
"fmla z24.s, z6.s, z4.s[1]\n"
"fmla z9.s, z7.s, z0.s[1]\n"
- "ld1w { z6.s }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1w { z6.s }, p5/Z, [x9, #1, MUL VL]\n"
"fmla z13.s, z7.s, z1.s[1]\n"
"fmla z17.s, z7.s, z2.s[1]\n"
"fmla z21.s, z7.s, z3.s[1]\n"
"fmla z25.s, z7.s, z4.s[1]\n"
- "ld1w { z7.s }, p5/Z, [x9, #1, MUL VL]\n"
+ "ld1w { z7.s }, p5/Z, [x28, #1, MUL VL]\n"
"fmla z10.s, z6.s, z0.s[1]\n"
"fmla z14.s, z6.s, z1.s[1]\n"
"fmla z18.s, z6.s, z2.s[1]\n"
"fmla z22.s, z6.s, z3.s[1]\n"
"fmla z26.s, z6.s, z4.s[1]\n"
"fmla z11.s, z7.s, z0.s[1]\n"
- "ld1w { z6.s }, p5/Z, [x12, #2, MUL VL]\n"
+ "ld1w { z6.s }, p5/Z, [x11, #2, MUL VL]\n"
"fmla z15.s, z7.s, z1.s[1]\n"
"fmla z19.s, z7.s, z2.s[1]\n"
"fmla z23.s, z7.s, z3.s[1]\n"
"fmla z27.s, z7.s, z4.s[1]\n"
- "ld1w { z7.s }, p5/Z, [x11, #2, MUL VL]\n"
+ "ld1w { z7.s }, p5/Z, [x10, #2, MUL VL]\n"
"fmla z8.s, z6.s, z0.s[2]\n"
"fmla z12.s, z6.s, z1.s[2]\n"
"fmla z16.s, z6.s, z2.s[2]\n"
"fmla z20.s, z6.s, z3.s[2]\n"
"fmla z24.s, z6.s, z4.s[2]\n"
"fmla z9.s, z7.s, z0.s[2]\n"
- "ld1w { z6.s }, p5/Z, [x10, #2, MUL VL]\n"
+ "ld1w { z6.s }, p5/Z, [x9, #2, MUL VL]\n"
"fmla z13.s, z7.s, z1.s[2]\n"
"fmla z17.s, z7.s, z2.s[2]\n"
"fmla z21.s, z7.s, z3.s[2]\n"
"fmla z25.s, z7.s, z4.s[2]\n"
- "ld1w { z7.s }, p5/Z, [x9, #2, MUL VL]\n"
+ "ld1w { z7.s }, p5/Z, [x28, #2, MUL VL]\n"
"fmla z10.s, z6.s, z0.s[2]\n"
"fmla z14.s, z6.s, z1.s[2]\n"
"fmla z18.s, z6.s, z2.s[2]\n"
"fmla z22.s, z6.s, z3.s[2]\n"
"fmla z26.s, z6.s, z4.s[2]\n"
"fmla z11.s, z7.s, z0.s[2]\n"
- "ld1w { z6.s }, p5/Z, [x12, #3, MUL VL]\n"
- "addvl x12, x12, #4\n"
+ "ld1w { z6.s }, p5/Z, [x11, #3, MUL VL]\n"
+ "addvl x11, x11, #4\n"
"fmla z15.s, z7.s, z1.s[2]\n"
"fmla z19.s, z7.s, z2.s[2]\n"
"fmla z23.s, z7.s, z3.s[2]\n"
"fmla z27.s, z7.s, z4.s[2]\n"
- "ld1w { z7.s }, p5/Z, [x11, #3, MUL VL]\n"
- "addvl x11, x11, #4\n"
+ "ld1w { z7.s }, p5/Z, [x10, #3, MUL VL]\n"
+ "addvl x10, x10, #4\n"
"fmla z8.s, z6.s, z0.s[3]\n"
"fmla z12.s, z6.s, z1.s[3]\n"
"fmla z16.s, z6.s, z2.s[3]\n"
"fmla z20.s, z6.s, z3.s[3]\n"
"fmla z24.s, z6.s, z4.s[3]\n"
"fmla z9.s, z7.s, z0.s[3]\n"
- "ld1w { z6.s }, p5/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
+ "ld1w { z6.s }, p5/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"fmla z13.s, z7.s, z1.s[3]\n"
"fmla z17.s, z7.s, z2.s[3]\n"
"fmla z21.s, z7.s, z3.s[3]\n"
"fmla z25.s, z7.s, z4.s[3]\n"
- "ld1w { z7.s }, p5/Z, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
+ "ld1w { z7.s }, p5/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
"fmla z10.s, z6.s, z0.s[3]\n"
"fmla z14.s, z6.s, z1.s[3]\n"
"fmla z18.s, z6.s, z2.s[3]\n"
@@ -1556,31 +1556,31 @@ void sve_ffhybrid_fp32_mla_6x4VL (
"fmla z27.s, z7.s, z4.s[3]\n"
"bgt 66b\n"
"67:" // Height 5: Multiply loop: Single iteration only
- "whilelt p0.s, XZR, x27\n"
- "ld1rqw { z0.s }, p0/Z, [x26]\n"
- "ld1rqw { z1.s }, p0/Z, [x25]\n"
- "subs x27, x27, #0x1\n"
- "ld1rqw { z2.s }, p0/Z, [x24]\n"
- "ld1rqw { z3.s }, p0/Z, [x23]\n"
- "ld1rqw { z4.s }, p0/Z, [x22]\n"
- "ld1w { z6.s }, p5/Z, [x12]\n"
+ "whilelt p0.s, XZR, x26\n"
+ "ld1rqw { z0.s }, p0/Z, [x25]\n"
+ "ld1rqw { z1.s }, p0/Z, [x24]\n"
+ "subs x26, x26, #0x1\n"
+ "ld1rqw { z2.s }, p0/Z, [x23]\n"
+ "ld1rqw { z3.s }, p0/Z, [x22]\n"
+ "ld1rqw { z4.s }, p0/Z, [x21]\n"
+ "ld1w { z6.s }, p5/Z, [x11]\n"
"fmla z8.s, z6.s, z0.s[0]\n"
"fmla z12.s, z6.s, z1.s[0]\n"
- "ld1w { z7.s }, p5/Z, [x11]\n"
+ "ld1w { z7.s }, p5/Z, [x10]\n"
"fmla z16.s, z6.s, z2.s[0]\n"
"fmla z20.s, z6.s, z3.s[0]\n"
- "addvl x12, x12, #1\n"
+ "addvl x11, x11, #1\n"
"fmla z24.s, z6.s, z4.s[0]\n"
"fmla z9.s, z7.s, z0.s[0]\n"
- "ld1w { z6.s }, p5/Z, [x10]\n"
- "addvl x11, x11, #1\n"
+ "ld1w { z6.s }, p5/Z, [x9]\n"
+ "addvl x10, x10, #1\n"
"fmla z13.s, z7.s, z1.s[0]\n"
"fmla z17.s, z7.s, z2.s[0]\n"
- "addvl x10, x10, #1\n"
+ "addvl x9, x9, #1\n"
"fmla z21.s, z7.s, z3.s[0]\n"
"fmla z25.s, z7.s, z4.s[0]\n"
- "ld1w { z7.s }, p5/Z, [x9]\n"
- "addvl x9, x9, #1\n"
+ "ld1w { z7.s }, p5/Z, [x28]\n"
+ "addvl x28, x28, #1\n"
"fmla z10.s, z6.s, z0.s[0]\n"
"fmla z14.s, z6.s, z1.s[0]\n"
"fmla z18.s, z6.s, z2.s[0]\n"
@@ -1592,25 +1592,25 @@ void sve_ffhybrid_fp32_mla_6x4VL (
"fmla z23.s, z7.s, z3.s[0]\n"
"fmla z27.s, z7.s, z4.s[0]\n"
"ble 68f\n"
- "ld1w { z6.s }, p5/Z, [x12]\n"
- "ld1w { z7.s }, p5/Z, [x11]\n"
+ "ld1w { z6.s }, p5/Z, [x11]\n"
+ "ld1w { z7.s }, p5/Z, [x10]\n"
"fmla z8.s, z6.s, z0.s[1]\n"
"fmla z12.s, z6.s, z1.s[1]\n"
"fmla z16.s, z6.s, z2.s[1]\n"
"fmla z20.s, z6.s, z3.s[1]\n"
- "subs x27, x27, #0x1\n"
- "addvl x12, x12, #1\n"
+ "subs x26, x26, #0x1\n"
+ "addvl x11, x11, #1\n"
"fmla z24.s, z6.s, z4.s[1]\n"
"fmla z9.s, z7.s, z0.s[1]\n"
- "ld1w { z6.s }, p5/Z, [x10]\n"
- "addvl x11, x11, #1\n"
+ "ld1w { z6.s }, p5/Z, [x9]\n"
+ "addvl x10, x10, #1\n"
"fmla z13.s, z7.s, z1.s[1]\n"
"fmla z17.s, z7.s, z2.s[1]\n"
- "addvl x10, x10, #1\n"
+ "addvl x9, x9, #1\n"
"fmla z21.s, z7.s, z3.s[1]\n"
"fmla z25.s, z7.s, z4.s[1]\n"
- "ld1w { z7.s }, p5/Z, [x9]\n"
- "addvl x9, x9, #1\n"
+ "ld1w { z7.s }, p5/Z, [x28]\n"
+ "addvl x28, x28, #1\n"
"fmla z10.s, z6.s, z0.s[1]\n"
"fmla z14.s, z6.s, z1.s[1]\n"
"fmla z18.s, z6.s, z2.s[1]\n"
@@ -1622,25 +1622,25 @@ void sve_ffhybrid_fp32_mla_6x4VL (
"fmla z23.s, z7.s, z3.s[1]\n"
"fmla z27.s, z7.s, z4.s[1]\n"
"ble 68f\n"
- "ld1w { z6.s }, p5/Z, [x12]\n"
- "ld1w { z7.s }, p5/Z, [x11]\n"
+ "ld1w { z6.s }, p5/Z, [x11]\n"
+ "ld1w { z7.s }, p5/Z, [x10]\n"
"fmla z8.s, z6.s, z0.s[2]\n"
"fmla z12.s, z6.s, z1.s[2]\n"
"fmla z16.s, z6.s, z2.s[2]\n"
"fmla z20.s, z6.s, z3.s[2]\n"
- "subs x27, x27, #0x1\n"
- "addvl x12, x12, #1\n"
+ "subs x26, x26, #0x1\n"
+ "addvl x11, x11, #1\n"
"fmla z24.s, z6.s, z4.s[2]\n"
"fmla z9.s, z7.s, z0.s[2]\n"
- "ld1w { z6.s }, p5/Z, [x10]\n"
- "addvl x11, x11, #1\n"
+ "ld1w { z6.s }, p5/Z, [x9]\n"
+ "addvl x10, x10, #1\n"
"fmla z13.s, z7.s, z1.s[2]\n"
"fmla z17.s, z7.s, z2.s[2]\n"
- "addvl x10, x10, #1\n"
+ "addvl x9, x9, #1\n"
"fmla z21.s, z7.s, z3.s[2]\n"
"fmla z25.s, z7.s, z4.s[2]\n"
- "ld1w { z7.s }, p5/Z, [x9]\n"
- "addvl x9, x9, #1\n"
+ "ld1w { z7.s }, p5/Z, [x28]\n"
+ "addvl x28, x28, #1\n"
"fmla z10.s, z6.s, z0.s[2]\n"
"fmla z14.s, z6.s, z1.s[2]\n"
"fmla z18.s, z6.s, z2.s[2]\n"
@@ -1652,24 +1652,24 @@ void sve_ffhybrid_fp32_mla_6x4VL (
"fmla z23.s, z7.s, z3.s[2]\n"
"fmla z27.s, z7.s, z4.s[2]\n"
"ble 68f\n"
- "ld1w { z6.s }, p5/Z, [x12]\n"
- "ld1w { z7.s }, p5/Z, [x11]\n"
+ "ld1w { z6.s }, p5/Z, [x11]\n"
+ "ld1w { z7.s }, p5/Z, [x10]\n"
"fmla z8.s, z6.s, z0.s[3]\n"
"fmla z12.s, z6.s, z1.s[3]\n"
"fmla z16.s, z6.s, z2.s[3]\n"
"fmla z20.s, z6.s, z3.s[3]\n"
- "addvl x12, x12, #1\n"
"addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
"fmla z24.s, z6.s, z4.s[3]\n"
"fmla z9.s, z7.s, z0.s[3]\n"
- "ld1w { z6.s }, p5/Z, [x10]\n"
- "addvl x10, x10, #1\n"
+ "ld1w { z6.s }, p5/Z, [x9]\n"
+ "addvl x9, x9, #1\n"
"fmla z13.s, z7.s, z1.s[3]\n"
"fmla z17.s, z7.s, z2.s[3]\n"
"fmla z21.s, z7.s, z3.s[3]\n"
"fmla z25.s, z7.s, z4.s[3]\n"
- "ld1w { z7.s }, p5/Z, [x9]\n"
- "addvl x9, x9, #1\n"
+ "ld1w { z7.s }, p5/Z, [x28]\n"
+ "addvl x28, x28, #1\n"
"fmla z10.s, z6.s, z0.s[3]\n"
"fmla z14.s, z6.s, z1.s[3]\n"
"fmla z18.s, z6.s, z2.s[3]\n"
@@ -1681,20 +1681,20 @@ void sve_ffhybrid_fp32_mla_6x4VL (
"fmla z23.s, z7.s, z3.s[3]\n"
"fmla z27.s, z7.s, z4.s[3]\n"
"68:" // Height 5: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 63b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
"tbz %x[flags], #1, 69f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z1.s }, p5/Z, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rw { z0.s }, p5/Z, [x20]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rw { z1.s }, p5/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z0.s }, p5/Z, [x19]\n"
"fmin z8.s, p5/M, z8.s, z1.s\n"
"fmin z9.s, p5/M, z9.s, z1.s\n"
"fmin z10.s, p5/M, z10.s, z1.s\n"
@@ -1736,82 +1736,82 @@ void sve_ffhybrid_fp32_mla_6x4VL (
"fmax z26.s, p5/M, z26.s, z0.s\n"
"fmax z27.s, p5/M, z27.s, z0.s\n"
"69:" // Height 5: No activation
- "st1w { z8.s }, p4, [x13]\n"
- "st1w { z9.s }, p3, [x13, #1, MUL VL]\n"
- "st1w { z10.s }, p2, [x13, #2, MUL VL]\n"
- "st1w { z11.s }, p1, [x13, #3, MUL VL]\n"
- "addvl x13, x13, #4\n"
- "st1w { z12.s }, p4, [x25]\n"
- "st1w { z13.s }, p3, [x25, #1, MUL VL]\n"
- "st1w { z14.s }, p2, [x25, #2, MUL VL]\n"
- "st1w { z15.s }, p1, [x25, #3, MUL VL]\n"
- "st1w { z16.s }, p4, [x24]\n"
- "st1w { z17.s }, p3, [x24, #1, MUL VL]\n"
- "st1w { z18.s }, p2, [x24, #2, MUL VL]\n"
- "st1w { z19.s }, p1, [x24, #3, MUL VL]\n"
- "st1w { z20.s }, p4, [x23]\n"
- "st1w { z21.s }, p3, [x23, #1, MUL VL]\n"
- "st1w { z22.s }, p2, [x23, #2, MUL VL]\n"
- "st1w { z23.s }, p1, [x23, #3, MUL VL]\n"
- "st1w { z24.s }, p4, [x22]\n"
- "st1w { z25.s }, p3, [x22, #1, MUL VL]\n"
- "st1w { z26.s }, p2, [x22, #2, MUL VL]\n"
- "st1w { z27.s }, p1, [x22, #3, MUL VL]\n"
+ "st1w { z8.s }, p4, [x12]\n"
+ "st1w { z9.s }, p3, [x12, #1, MUL VL]\n"
+ "st1w { z10.s }, p2, [x12, #2, MUL VL]\n"
+ "st1w { z11.s }, p1, [x12, #3, MUL VL]\n"
+ "addvl x12, x12, #4\n"
+ "st1w { z12.s }, p4, [x24]\n"
+ "st1w { z13.s }, p3, [x24, #1, MUL VL]\n"
+ "st1w { z14.s }, p2, [x24, #2, MUL VL]\n"
+ "st1w { z15.s }, p1, [x24, #3, MUL VL]\n"
+ "st1w { z16.s }, p4, [x23]\n"
+ "st1w { z17.s }, p3, [x23, #1, MUL VL]\n"
+ "st1w { z18.s }, p2, [x23, #2, MUL VL]\n"
+ "st1w { z19.s }, p1, [x23, #3, MUL VL]\n"
+ "st1w { z20.s }, p4, [x22]\n"
+ "st1w { z21.s }, p3, [x22, #1, MUL VL]\n"
+ "st1w { z22.s }, p2, [x22, #2, MUL VL]\n"
+ "st1w { z23.s }, p1, [x22, #3, MUL VL]\n"
+ "st1w { z24.s }, p4, [x21]\n"
+ "st1w { z25.s }, p3, [x21, #1, MUL VL]\n"
+ "st1w { z26.s }, p2, [x21, #2, MUL VL]\n"
+ "st1w { z27.s }, p1, [x21, #3, MUL VL]\n"
"70:" // Height 5: Writeback done
- "decw x14, ALL, MUL #4\n"
- "cmp x14, XZR\n"
+ "decw x13, ALL, MUL #4\n"
+ "cmp x13, XZR\n"
"bgt 58b\n"
"b 86f\n"
"71:" // Height 6
- "ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x21, #0x18\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "mov x15, %x[bias]\n"
- "mov x13, %x[output_ptr]\n"
- "madd %x[output_ptr], x20, x21, %x[output_ptr]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x20, #0x18\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "mov x14, %x[bias]\n"
+ "mov x12, %x[output_ptr]\n"
+ "madd %x[output_ptr], x19, x20, %x[output_ptr]\n"
"72:" // Height 6: Column loop
- "ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
- "add x11, x12, x20, LSL #2\n"
- "cntw x21, ALL, MUL #3\n"
- "add x10, x11, x20, LSL #2\n"
- "add x9, x10, x20, LSL #2\n"
- "add x20, x9, x20, LSL #2\n"
- "cmp x14, x21\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #2\n"
+ "cntw x20, ALL, MUL #3\n"
+ "add x9, x10, x19, LSL #2\n"
+ "add x28, x9, x19, LSL #2\n"
+ "add x19, x28, x19, LSL #2\n"
+ "cmp x13, x20\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"bgt 73f\n"
- "decw x21\n"
- "cmp x14, x21\n"
- "mov x9, x12\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x28, x11\n"
"bgt 73f\n"
- "decw x21\n"
- "cmp x14, x21\n"
- "mov x10, x12\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x9, x11\n"
"bgt 73f\n"
- "mov x11, x12\n"
+ "mov x10, x11\n"
"73:" // Height 6: B setup done
- "mov x20, #0x0\n"
- "whilelt p4.s, x20, x14\n"
- "incw x20\n"
- "whilelt p3.s, x20, x14\n"
- "incw x20\n"
- "whilelt p2.s, x20, x14\n"
- "incw x20\n"
- "whilelt p1.s, x20, x14\n"
- "cbz x15, 74f\n"
- "ld1w { z8.s }, p5/Z, [x15]\n"
- "ld1w { z9.s }, p5/Z, [x15, #1, MUL VL]\n"
+ "mov x19, #0x0\n"
+ "whilelt p4.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p3.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x13\n"
+ "cbz x14, 74f\n"
+ "ld1w { z8.s }, p5/Z, [x14]\n"
+ "ld1w { z9.s }, p5/Z, [x14, #1, MUL VL]\n"
"mov z12.d, z8.d\n"
"mov z13.d, z9.d\n"
- "ld1w { z10.s }, p5/Z, [x15, #2, MUL VL]\n"
- "ld1w { z11.s }, p5/Z, [x15, #3, MUL VL]\n"
+ "ld1w { z10.s }, p5/Z, [x14, #2, MUL VL]\n"
+ "ld1w { z11.s }, p5/Z, [x14, #3, MUL VL]\n"
"mov z14.d, z10.d\n"
"mov z15.d, z11.d\n"
"mov z16.d, z8.d\n"
"mov z17.d, z9.d\n"
- "addvl x15, x15, #4\n"
+ "addvl x14, x14, #4\n"
"mov z18.d, z10.d\n"
"mov z19.d, z11.d\n"
"mov z20.d, z8.d\n"
@@ -1829,36 +1829,36 @@ void sve_ffhybrid_fp32_mla_6x4VL (
"b 76f\n"
"74:" // Height 6: no bias
"tbz %x[flags], #0, 75f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "ld1w { z8.s }, p4/Z, [x13]\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
- "ld1w { z9.s }, p3/Z, [x13, #1, MUL VL]\n"
- "ld1w { z10.s }, p2/Z, [x13, #2, MUL VL]\n"
- "add x21, x22, x20, LSL #2\n"
- "ld1w { z11.s }, p1/Z, [x13, #3, MUL VL]\n"
- "ld1w { z12.s }, p4/Z, [x25]\n"
- "ld1w { z13.s }, p3/Z, [x25, #1, MUL VL]\n"
- "ld1w { z14.s }, p2/Z, [x25, #2, MUL VL]\n"
- "ld1w { z15.s }, p1/Z, [x25, #3, MUL VL]\n"
- "ld1w { z16.s }, p4/Z, [x24]\n"
- "ld1w { z17.s }, p3/Z, [x24, #1, MUL VL]\n"
- "ld1w { z18.s }, p2/Z, [x24, #2, MUL VL]\n"
- "ld1w { z19.s }, p1/Z, [x24, #3, MUL VL]\n"
- "ld1w { z20.s }, p4/Z, [x23]\n"
- "ld1w { z21.s }, p3/Z, [x23, #1, MUL VL]\n"
- "ld1w { z22.s }, p2/Z, [x23, #2, MUL VL]\n"
- "ld1w { z23.s }, p1/Z, [x23, #3, MUL VL]\n"
- "ld1w { z24.s }, p4/Z, [x22]\n"
- "ld1w { z25.s }, p3/Z, [x22, #1, MUL VL]\n"
- "ld1w { z26.s }, p2/Z, [x22, #2, MUL VL]\n"
- "ld1w { z27.s }, p1/Z, [x22, #3, MUL VL]\n"
- "ld1w { z28.s }, p4/Z, [x21]\n"
- "ld1w { z29.s }, p3/Z, [x21, #1, MUL VL]\n"
- "ld1w { z30.s }, p2/Z, [x21, #2, MUL VL]\n"
- "ld1w { z31.s }, p1/Z, [x21, #3, MUL VL]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "ld1w { z8.s }, p4/Z, [x12]\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
+ "ld1w { z9.s }, p3/Z, [x12, #1, MUL VL]\n"
+ "ld1w { z10.s }, p2/Z, [x12, #2, MUL VL]\n"
+ "add x20, x21, x19, LSL #2\n"
+ "ld1w { z11.s }, p1/Z, [x12, #3, MUL VL]\n"
+ "ld1w { z12.s }, p4/Z, [x24]\n"
+ "ld1w { z13.s }, p3/Z, [x24, #1, MUL VL]\n"
+ "ld1w { z14.s }, p2/Z, [x24, #2, MUL VL]\n"
+ "ld1w { z15.s }, p1/Z, [x24, #3, MUL VL]\n"
+ "ld1w { z16.s }, p4/Z, [x23]\n"
+ "ld1w { z17.s }, p3/Z, [x23, #1, MUL VL]\n"
+ "ld1w { z18.s }, p2/Z, [x23, #2, MUL VL]\n"
+ "ld1w { z19.s }, p1/Z, [x23, #3, MUL VL]\n"
+ "ld1w { z20.s }, p4/Z, [x22]\n"
+ "ld1w { z21.s }, p3/Z, [x22, #1, MUL VL]\n"
+ "ld1w { z22.s }, p2/Z, [x22, #2, MUL VL]\n"
+ "ld1w { z23.s }, p1/Z, [x22, #3, MUL VL]\n"
+ "ld1w { z24.s }, p4/Z, [x21]\n"
+ "ld1w { z25.s }, p3/Z, [x21, #1, MUL VL]\n"
+ "ld1w { z26.s }, p2/Z, [x21, #2, MUL VL]\n"
+ "ld1w { z27.s }, p1/Z, [x21, #3, MUL VL]\n"
+ "ld1w { z28.s }, p4/Z, [x20]\n"
+ "ld1w { z29.s }, p3/Z, [x20, #1, MUL VL]\n"
+ "ld1w { z30.s }, p2/Z, [x20, #2, MUL VL]\n"
+ "ld1w { z31.s }, p1/Z, [x20, #3, MUL VL]\n"
"b 76f\n"
"75:" // Height 6: no accumulate
"mov z8.b, #0x0\n"
@@ -1886,159 +1886,159 @@ void sve_ffhybrid_fp32_mla_6x4VL (
"mov z30.b, #0x0\n"
"mov z31.b, #0x0\n"
"76:" // Height 6: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"77:" // Height 6: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 78f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "ldr x22, [x21, #0x20]\n"
- "ldr x21, [x21, #0x28]\n"
- "cbnz x28, 79f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #2\n"
- "add x25, x25, x20, LSL #2\n"
- "add x24, x24, x20, LSL #2\n"
- "add x23, x23, x20, LSL #2\n"
- "add x22, x22, x20, LSL #2\n"
- "add x21, x21, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "ldr x21, [x20, #0x20]\n"
+ "ldr x20, [x20, #0x28]\n"
+ "cbnz x27, 79f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #2\n"
+ "add x24, x24, x19, LSL #2\n"
+ "add x23, x23, x19, LSL #2\n"
+ "add x22, x22, x19, LSL #2\n"
+ "add x21, x21, x19, LSL #2\n"
+ "add x20, x20, x19, LSL #2\n"
"b 79f\n"
"78:" // Height 6: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
+ "add x20, x21, x19, LSL #2\n"
"79:" // Height 6: input setup done
- "cmp x27, #0x4\n"
+ "cmp x26, #0x4\n"
"ble 81f\n"
"80:" // Height 6: Multiply loop: Main loop head
- "whilelt p0.s, XZR, x27\n"
- "ld1rqw { z0.s }, p0/Z, [x26]\n"
- "ld1rqw { z1.s }, p0/Z, [x25]\n"
- "sub x27, x27, #0x4\n"
- "ld1rqw { z2.s }, p0/Z, [x24]\n"
- "ld1rqw { z3.s }, p0/Z, [x23]\n"
- "cmp x27, #0x4\n"
- "add x26, x26, #0x10\n"
- "ld1rqw { z4.s }, p0/Z, [x22]\n"
- "ld1rqw { z5.s }, p0/Z, [x21]\n"
+ "whilelt p0.s, XZR, x26\n"
+ "ld1rqw { z0.s }, p0/Z, [x25]\n"
+ "ld1rqw { z1.s }, p0/Z, [x24]\n"
+ "sub x26, x26, #0x4\n"
+ "ld1rqw { z2.s }, p0/Z, [x23]\n"
+ "ld1rqw { z3.s }, p0/Z, [x22]\n"
+ "cmp x26, #0x4\n"
"add x25, x25, #0x10\n"
+ "ld1rqw { z4.s }, p0/Z, [x21]\n"
+ "ld1rqw { z5.s }, p0/Z, [x20]\n"
"add x24, x24, #0x10\n"
- "ld1w { z6.s }, p5/Z, [x12]\n"
- "ld1w { z7.s }, p5/Z, [x11]\n"
+ "add x23, x23, #0x10\n"
+ "ld1w { z6.s }, p5/Z, [x11]\n"
+ "ld1w { z7.s }, p5/Z, [x10]\n"
"fmla z8.s, z6.s, z0.s[0]\n"
"fmla z12.s, z6.s, z1.s[0]\n"
"fmla z16.s, z6.s, z2.s[0]\n"
"fmla z20.s, z6.s, z3.s[0]\n"
- "add x23, x23, #0x10\n"
"add x22, x22, #0x10\n"
+ "add x21, x21, #0x10\n"
"fmla z24.s, z6.s, z4.s[0]\n"
"fmla z28.s, z6.s, z5.s[0]\n"
- "ld1w { z6.s }, p5/Z, [x10]\n"
- "add x21, x21, #0x10\n"
+ "ld1w { z6.s }, p5/Z, [x9]\n"
+ "add x20, x20, #0x10\n"
"fmla z9.s, z7.s, z0.s[0]\n"
"fmla z13.s, z7.s, z1.s[0]\n"
"fmla z17.s, z7.s, z2.s[0]\n"
"fmla z21.s, z7.s, z3.s[0]\n"
"fmla z25.s, z7.s, z4.s[0]\n"
"fmla z29.s, z7.s, z5.s[0]\n"
- "ld1w { z7.s }, p5/Z, [x9]\n"
+ "ld1w { z7.s }, p5/Z, [x28]\n"
"fmla z10.s, z6.s, z0.s[0]\n"
"fmla z14.s, z6.s, z1.s[0]\n"
"fmla z18.s, z6.s, z2.s[0]\n"
"fmla z22.s, z6.s, z3.s[0]\n"
"fmla z26.s, z6.s, z4.s[0]\n"
"fmla z30.s, z6.s, z5.s[0]\n"
- "ld1w { z6.s }, p5/Z, [x12, #1, MUL VL]\n"
+ "ld1w { z6.s }, p5/Z, [x11, #1, MUL VL]\n"
"fmla z11.s, z7.s, z0.s[0]\n"
"fmla z15.s, z7.s, z1.s[0]\n"
"fmla z19.s, z7.s, z2.s[0]\n"
"fmla z23.s, z7.s, z3.s[0]\n"
"fmla z27.s, z7.s, z4.s[0]\n"
"fmla z31.s, z7.s, z5.s[0]\n"
- "ld1w { z7.s }, p5/Z, [x11, #1, MUL VL]\n"
+ "ld1w { z7.s }, p5/Z, [x10, #1, MUL VL]\n"
"fmla z8.s, z6.s, z0.s[1]\n"
"fmla z12.s, z6.s, z1.s[1]\n"
"fmla z16.s, z6.s, z2.s[1]\n"
"fmla z20.s, z6.s, z3.s[1]\n"
"fmla z24.s, z6.s, z4.s[1]\n"
"fmla z28.s, z6.s, z5.s[1]\n"
- "ld1w { z6.s }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1w { z6.s }, p5/Z, [x9, #1, MUL VL]\n"
"fmla z9.s, z7.s, z0.s[1]\n"
"fmla z13.s, z7.s, z1.s[1]\n"
"fmla z17.s, z7.s, z2.s[1]\n"
"fmla z21.s, z7.s, z3.s[1]\n"
"fmla z25.s, z7.s, z4.s[1]\n"
"fmla z29.s, z7.s, z5.s[1]\n"
- "ld1w { z7.s }, p5/Z, [x9, #1, MUL VL]\n"
+ "ld1w { z7.s }, p5/Z, [x28, #1, MUL VL]\n"
"fmla z10.s, z6.s, z0.s[1]\n"
"fmla z14.s, z6.s, z1.s[1]\n"
"fmla z18.s, z6.s, z2.s[1]\n"
"fmla z22.s, z6.s, z3.s[1]\n"
"fmla z26.s, z6.s, z4.s[1]\n"
"fmla z30.s, z6.s, z5.s[1]\n"
- "ld1w { z6.s }, p5/Z, [x12, #2, MUL VL]\n"
+ "ld1w { z6.s }, p5/Z, [x11, #2, MUL VL]\n"
"fmla z11.s, z7.s, z0.s[1]\n"
"fmla z15.s, z7.s, z1.s[1]\n"
"fmla z19.s, z7.s, z2.s[1]\n"
"fmla z23.s, z7.s, z3.s[1]\n"
"fmla z27.s, z7.s, z4.s[1]\n"
"fmla z31.s, z7.s, z5.s[1]\n"
- "ld1w { z7.s }, p5/Z, [x11, #2, MUL VL]\n"
+ "ld1w { z7.s }, p5/Z, [x10, #2, MUL VL]\n"
"fmla z8.s, z6.s, z0.s[2]\n"
"fmla z12.s, z6.s, z1.s[2]\n"
"fmla z16.s, z6.s, z2.s[2]\n"
"fmla z20.s, z6.s, z3.s[2]\n"
"fmla z24.s, z6.s, z4.s[2]\n"
"fmla z28.s, z6.s, z5.s[2]\n"
- "ld1w { z6.s }, p5/Z, [x10, #2, MUL VL]\n"
+ "ld1w { z6.s }, p5/Z, [x9, #2, MUL VL]\n"
"fmla z9.s, z7.s, z0.s[2]\n"
"fmla z13.s, z7.s, z1.s[2]\n"
"fmla z17.s, z7.s, z2.s[2]\n"
"fmla z21.s, z7.s, z3.s[2]\n"
"fmla z25.s, z7.s, z4.s[2]\n"
"fmla z29.s, z7.s, z5.s[2]\n"
- "ld1w { z7.s }, p5/Z, [x9, #2, MUL VL]\n"
+ "ld1w { z7.s }, p5/Z, [x28, #2, MUL VL]\n"
"fmla z10.s, z6.s, z0.s[2]\n"
"fmla z14.s, z6.s, z1.s[2]\n"
"fmla z18.s, z6.s, z2.s[2]\n"
"fmla z22.s, z6.s, z3.s[2]\n"
"fmla z26.s, z6.s, z4.s[2]\n"
"fmla z30.s, z6.s, z5.s[2]\n"
- "ld1w { z6.s }, p5/Z, [x12, #3, MUL VL]\n"
- "addvl x12, x12, #4\n"
+ "ld1w { z6.s }, p5/Z, [x11, #3, MUL VL]\n"
+ "addvl x11, x11, #4\n"
"fmla z11.s, z7.s, z0.s[2]\n"
"fmla z15.s, z7.s, z1.s[2]\n"
"fmla z19.s, z7.s, z2.s[2]\n"
"fmla z23.s, z7.s, z3.s[2]\n"
"fmla z27.s, z7.s, z4.s[2]\n"
"fmla z31.s, z7.s, z5.s[2]\n"
- "ld1w { z7.s }, p5/Z, [x11, #3, MUL VL]\n"
- "addvl x11, x11, #4\n"
+ "ld1w { z7.s }, p5/Z, [x10, #3, MUL VL]\n"
+ "addvl x10, x10, #4\n"
"fmla z8.s, z6.s, z0.s[3]\n"
"fmla z12.s, z6.s, z1.s[3]\n"
"fmla z16.s, z6.s, z2.s[3]\n"
"fmla z20.s, z6.s, z3.s[3]\n"
"fmla z24.s, z6.s, z4.s[3]\n"
"fmla z28.s, z6.s, z5.s[3]\n"
- "ld1w { z6.s }, p5/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
+ "ld1w { z6.s }, p5/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"fmla z9.s, z7.s, z0.s[3]\n"
"fmla z13.s, z7.s, z1.s[3]\n"
"fmla z17.s, z7.s, z2.s[3]\n"
"fmla z21.s, z7.s, z3.s[3]\n"
"fmla z25.s, z7.s, z4.s[3]\n"
"fmla z29.s, z7.s, z5.s[3]\n"
- "ld1w { z7.s }, p5/Z, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
+ "ld1w { z7.s }, p5/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
"fmla z10.s, z6.s, z0.s[3]\n"
"fmla z14.s, z6.s, z1.s[3]\n"
"fmla z18.s, z6.s, z2.s[3]\n"
@@ -2053,34 +2053,34 @@ void sve_ffhybrid_fp32_mla_6x4VL (
"fmla z31.s, z7.s, z5.s[3]\n"
"bgt 80b\n"
"81:" // Height 6: Multiply loop: Single iteration only
- "whilelt p0.s, XZR, x27\n"
- "ld1rqw { z0.s }, p0/Z, [x26]\n"
- "ld1rqw { z1.s }, p0/Z, [x25]\n"
- "subs x27, x27, #0x1\n"
- "ld1rqw { z2.s }, p0/Z, [x24]\n"
- "ld1rqw { z3.s }, p0/Z, [x23]\n"
- "ld1rqw { z4.s }, p0/Z, [x22]\n"
- "ld1rqw { z5.s }, p0/Z, [x21]\n"
- "ld1w { z6.s }, p5/Z, [x12]\n"
- "ld1w { z7.s }, p5/Z, [x11]\n"
+ "whilelt p0.s, XZR, x26\n"
+ "ld1rqw { z0.s }, p0/Z, [x25]\n"
+ "ld1rqw { z1.s }, p0/Z, [x24]\n"
+ "subs x26, x26, #0x1\n"
+ "ld1rqw { z2.s }, p0/Z, [x23]\n"
+ "ld1rqw { z3.s }, p0/Z, [x22]\n"
+ "ld1rqw { z4.s }, p0/Z, [x21]\n"
+ "ld1rqw { z5.s }, p0/Z, [x20]\n"
+ "ld1w { z6.s }, p5/Z, [x11]\n"
+ "ld1w { z7.s }, p5/Z, [x10]\n"
"fmla z8.s, z6.s, z0.s[0]\n"
"fmla z12.s, z6.s, z1.s[0]\n"
"fmla z16.s, z6.s, z2.s[0]\n"
"fmla z20.s, z6.s, z3.s[0]\n"
- "addvl x12, x12, #1\n"
"addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
"fmla z24.s, z6.s, z4.s[0]\n"
"fmla z28.s, z6.s, z5.s[0]\n"
- "ld1w { z6.s }, p5/Z, [x10]\n"
- "addvl x10, x10, #1\n"
+ "ld1w { z6.s }, p5/Z, [x9]\n"
+ "addvl x9, x9, #1\n"
"fmla z9.s, z7.s, z0.s[0]\n"
"fmla z13.s, z7.s, z1.s[0]\n"
"fmla z17.s, z7.s, z2.s[0]\n"
"fmla z21.s, z7.s, z3.s[0]\n"
"fmla z25.s, z7.s, z4.s[0]\n"
"fmla z29.s, z7.s, z5.s[0]\n"
- "ld1w { z7.s }, p5/Z, [x9]\n"
- "addvl x9, x9, #1\n"
+ "ld1w { z7.s }, p5/Z, [x28]\n"
+ "addvl x28, x28, #1\n"
"fmla z10.s, z6.s, z0.s[0]\n"
"fmla z14.s, z6.s, z1.s[0]\n"
"fmla z18.s, z6.s, z2.s[0]\n"
@@ -2094,27 +2094,27 @@ void sve_ffhybrid_fp32_mla_6x4VL (
"fmla z27.s, z7.s, z4.s[0]\n"
"fmla z31.s, z7.s, z5.s[0]\n"
"ble 82f\n"
- "ld1w { z6.s }, p5/Z, [x12]\n"
- "ld1w { z7.s }, p5/Z, [x11]\n"
+ "ld1w { z6.s }, p5/Z, [x11]\n"
+ "ld1w { z7.s }, p5/Z, [x10]\n"
"fmla z8.s, z6.s, z0.s[1]\n"
"fmla z12.s, z6.s, z1.s[1]\n"
"fmla z16.s, z6.s, z2.s[1]\n"
"fmla z20.s, z6.s, z3.s[1]\n"
- "subs x27, x27, #0x1\n"
- "addvl x12, x12, #1\n"
+ "subs x26, x26, #0x1\n"
+ "addvl x11, x11, #1\n"
"fmla z24.s, z6.s, z4.s[1]\n"
"fmla z28.s, z6.s, z5.s[1]\n"
- "ld1w { z6.s }, p5/Z, [x10]\n"
- "addvl x11, x11, #1\n"
+ "ld1w { z6.s }, p5/Z, [x9]\n"
+ "addvl x10, x10, #1\n"
"fmla z9.s, z7.s, z0.s[1]\n"
"fmla z13.s, z7.s, z1.s[1]\n"
- "addvl x10, x10, #1\n"
+ "addvl x9, x9, #1\n"
"fmla z17.s, z7.s, z2.s[1]\n"
"fmla z21.s, z7.s, z3.s[1]\n"
"fmla z25.s, z7.s, z4.s[1]\n"
"fmla z29.s, z7.s, z5.s[1]\n"
- "ld1w { z7.s }, p5/Z, [x9]\n"
- "addvl x9, x9, #1\n"
+ "ld1w { z7.s }, p5/Z, [x28]\n"
+ "addvl x28, x28, #1\n"
"fmla z10.s, z6.s, z0.s[1]\n"
"fmla z14.s, z6.s, z1.s[1]\n"
"fmla z18.s, z6.s, z2.s[1]\n"
@@ -2128,27 +2128,27 @@ void sve_ffhybrid_fp32_mla_6x4VL (
"fmla z27.s, z7.s, z4.s[1]\n"
"fmla z31.s, z7.s, z5.s[1]\n"
"ble 82f\n"
- "ld1w { z6.s }, p5/Z, [x12]\n"
- "ld1w { z7.s }, p5/Z, [x11]\n"
+ "ld1w { z6.s }, p5/Z, [x11]\n"
+ "ld1w { z7.s }, p5/Z, [x10]\n"
"fmla z8.s, z6.s, z0.s[2]\n"
"fmla z12.s, z6.s, z1.s[2]\n"
"fmla z16.s, z6.s, z2.s[2]\n"
"fmla z20.s, z6.s, z3.s[2]\n"
- "subs x27, x27, #0x1\n"
- "addvl x12, x12, #1\n"
+ "subs x26, x26, #0x1\n"
+ "addvl x11, x11, #1\n"
"fmla z24.s, z6.s, z4.s[2]\n"
"fmla z28.s, z6.s, z5.s[2]\n"
- "ld1w { z6.s }, p5/Z, [x10]\n"
- "addvl x11, x11, #1\n"
+ "ld1w { z6.s }, p5/Z, [x9]\n"
+ "addvl x10, x10, #1\n"
"fmla z9.s, z7.s, z0.s[2]\n"
"fmla z13.s, z7.s, z1.s[2]\n"
- "addvl x10, x10, #1\n"
+ "addvl x9, x9, #1\n"
"fmla z17.s, z7.s, z2.s[2]\n"
"fmla z21.s, z7.s, z3.s[2]\n"
"fmla z25.s, z7.s, z4.s[2]\n"
"fmla z29.s, z7.s, z5.s[2]\n"
- "ld1w { z7.s }, p5/Z, [x9]\n"
- "addvl x9, x9, #1\n"
+ "ld1w { z7.s }, p5/Z, [x28]\n"
+ "addvl x28, x28, #1\n"
"fmla z10.s, z6.s, z0.s[2]\n"
"fmla z14.s, z6.s, z1.s[2]\n"
"fmla z18.s, z6.s, z2.s[2]\n"
@@ -2162,26 +2162,26 @@ void sve_ffhybrid_fp32_mla_6x4VL (
"fmla z27.s, z7.s, z4.s[2]\n"
"fmla z31.s, z7.s, z5.s[2]\n"
"ble 82f\n"
- "ld1w { z6.s }, p5/Z, [x12]\n"
- "ld1w { z7.s }, p5/Z, [x11]\n"
+ "ld1w { z6.s }, p5/Z, [x11]\n"
+ "ld1w { z7.s }, p5/Z, [x10]\n"
"fmla z8.s, z6.s, z0.s[3]\n"
"fmla z12.s, z6.s, z1.s[3]\n"
"fmla z16.s, z6.s, z2.s[3]\n"
"fmla z20.s, z6.s, z3.s[3]\n"
- "addvl x12, x12, #1\n"
"addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
"fmla z24.s, z6.s, z4.s[3]\n"
"fmla z28.s, z6.s, z5.s[3]\n"
- "ld1w { z6.s }, p5/Z, [x10]\n"
- "addvl x10, x10, #1\n"
+ "ld1w { z6.s }, p5/Z, [x9]\n"
+ "addvl x9, x9, #1\n"
"fmla z9.s, z7.s, z0.s[3]\n"
"fmla z13.s, z7.s, z1.s[3]\n"
"fmla z17.s, z7.s, z2.s[3]\n"
"fmla z21.s, z7.s, z3.s[3]\n"
"fmla z25.s, z7.s, z4.s[3]\n"
"fmla z29.s, z7.s, z5.s[3]\n"
- "ld1w { z7.s }, p5/Z, [x9]\n"
- "addvl x9, x9, #1\n"
+ "ld1w { z7.s }, p5/Z, [x28]\n"
+ "addvl x28, x28, #1\n"
"fmla z10.s, z6.s, z0.s[3]\n"
"fmla z14.s, z6.s, z1.s[3]\n"
"fmla z18.s, z6.s, z2.s[3]\n"
@@ -2195,21 +2195,21 @@ void sve_ffhybrid_fp32_mla_6x4VL (
"fmla z27.s, z7.s, z4.s[3]\n"
"fmla z31.s, z7.s, z5.s[3]\n"
"82:" // Height 6: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 77b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
+ "add x20, x21, x19, LSL #2\n"
"tbz %x[flags], #1, 83f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z1.s }, p5/Z, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rw { z0.s }, p5/Z, [x20]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rw { z1.s }, p5/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z0.s }, p5/Z, [x19]\n"
"fmin z8.s, p5/M, z8.s, z1.s\n"
"fmin z9.s, p5/M, z9.s, z1.s\n"
"fmin z10.s, p5/M, z10.s, z1.s\n"
@@ -2259,50 +2259,50 @@ void sve_ffhybrid_fp32_mla_6x4VL (
"fmax z30.s, p5/M, z30.s, z0.s\n"
"fmax z31.s, p5/M, z31.s, z0.s\n"
"83:" // Height 6: No activation
- "st1w { z8.s }, p4, [x13]\n"
- "st1w { z9.s }, p3, [x13, #1, MUL VL]\n"
- "st1w { z10.s }, p2, [x13, #2, MUL VL]\n"
- "st1w { z11.s }, p1, [x13, #3, MUL VL]\n"
- "addvl x13, x13, #4\n"
- "st1w { z12.s }, p4, [x25]\n"
- "st1w { z13.s }, p3, [x25, #1, MUL VL]\n"
- "st1w { z14.s }, p2, [x25, #2, MUL VL]\n"
- "st1w { z15.s }, p1, [x25, #3, MUL VL]\n"
- "st1w { z16.s }, p4, [x24]\n"
- "st1w { z17.s }, p3, [x24, #1, MUL VL]\n"
- "st1w { z18.s }, p2, [x24, #2, MUL VL]\n"
- "st1w { z19.s }, p1, [x24, #3, MUL VL]\n"
- "st1w { z20.s }, p4, [x23]\n"
- "st1w { z21.s }, p3, [x23, #1, MUL VL]\n"
- "st1w { z22.s }, p2, [x23, #2, MUL VL]\n"
- "st1w { z23.s }, p1, [x23, #3, MUL VL]\n"
- "st1w { z24.s }, p4, [x22]\n"
- "st1w { z25.s }, p3, [x22, #1, MUL VL]\n"
- "st1w { z26.s }, p2, [x22, #2, MUL VL]\n"
- "st1w { z27.s }, p1, [x22, #3, MUL VL]\n"
- "st1w { z28.s }, p4, [x21]\n"
- "st1w { z29.s }, p3, [x21, #1, MUL VL]\n"
- "st1w { z30.s }, p2, [x21, #2, MUL VL]\n"
- "st1w { z31.s }, p1, [x21, #3, MUL VL]\n"
+ "st1w { z8.s }, p4, [x12]\n"
+ "st1w { z9.s }, p3, [x12, #1, MUL VL]\n"
+ "st1w { z10.s }, p2, [x12, #2, MUL VL]\n"
+ "st1w { z11.s }, p1, [x12, #3, MUL VL]\n"
+ "addvl x12, x12, #4\n"
+ "st1w { z12.s }, p4, [x24]\n"
+ "st1w { z13.s }, p3, [x24, #1, MUL VL]\n"
+ "st1w { z14.s }, p2, [x24, #2, MUL VL]\n"
+ "st1w { z15.s }, p1, [x24, #3, MUL VL]\n"
+ "st1w { z16.s }, p4, [x23]\n"
+ "st1w { z17.s }, p3, [x23, #1, MUL VL]\n"
+ "st1w { z18.s }, p2, [x23, #2, MUL VL]\n"
+ "st1w { z19.s }, p1, [x23, #3, MUL VL]\n"
+ "st1w { z20.s }, p4, [x22]\n"
+ "st1w { z21.s }, p3, [x22, #1, MUL VL]\n"
+ "st1w { z22.s }, p2, [x22, #2, MUL VL]\n"
+ "st1w { z23.s }, p1, [x22, #3, MUL VL]\n"
+ "st1w { z24.s }, p4, [x21]\n"
+ "st1w { z25.s }, p3, [x21, #1, MUL VL]\n"
+ "st1w { z26.s }, p2, [x21, #2, MUL VL]\n"
+ "st1w { z27.s }, p1, [x21, #3, MUL VL]\n"
+ "st1w { z28.s }, p4, [x20]\n"
+ "st1w { z29.s }, p3, [x20, #1, MUL VL]\n"
+ "st1w { z30.s }, p2, [x20, #2, MUL VL]\n"
+ "st1w { z31.s }, p1, [x20, #3, MUL VL]\n"
"84:" // Height 6: Writeback done
- "decw x14, ALL, MUL #4\n"
- "cmp x14, XZR\n"
+ "decw x13, ALL, MUL #4\n"
+ "cmp x13, XZR\n"
"bgt 72b\n"
"subs %x[M], %x[M], #0x6\n"
"beq 86f\n"
- "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 85f\n"
- "add x21, x21, #0x6\n"
- "str x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "add x20, x20, #0x6\n"
+ "str x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"b 1b\n"
"85:" // Update direct input
- "mov x20, #0x18\n"
- "madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
+ "mov x19, #0x18\n"
+ "madd %x[input_ptr], x19, x20, %x[input_ptr]\n"
"b 1b\n"
"86:" // Exit
: [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
: [args_ptr] "r" (&ka), [bias] "r" (bias), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_B_stride] "I" (offsetof(KernelArgs, B_stride)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_cur_B_ptr] "I" (offsetof(KernelArgs, cur_B_ptr)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "x9", "x10", "x11", "x12", "x13", "x14", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp32bf16fp32_mmla_4x6VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp32bf16fp32_mmla_4x6VL/generic.cpp
index 36fc9d75ca..8e3676a007 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp32bf16fp32_mmla_4x6VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp32bf16fp32_mmla_4x6VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef ARM_COMPUTE_ENABLE_SVE
@@ -103,70 +103,70 @@ void sve_ffhybrid_fp32bf16fp32_mmla_4x6VL (
"cmp %x[M], #0x2\n"
"bgt 29f\n"
"beq 15f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x15, %x[bias]\n"
- "ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x13, %x[output_ptr]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x14, %x[bias]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x12, %x[output_ptr]\n"
"2:" // Height 1: Column loop
- "ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
- "add x11, x12, x20, LSL #1\n"
- "add x10, x11, x20, LSL #1\n"
- "add x9, x10, x20, LSL #1\n"
- "cntw x21, ALL, MUL #5\n"
- "add x28, x9, x20, LSL #1\n"
- "add x27, x28, x20, LSL #1\n"
- "add x20, x27, x20, LSL #1\n"
- "cmp x14, x21\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #1\n"
+ "add x9, x10, x19, LSL #1\n"
+ "add x28, x9, x19, LSL #1\n"
+ "cntw x20, ALL, MUL #5\n"
+ "add x27, x28, x19, LSL #1\n"
+ "add x26, x27, x19, LSL #1\n"
+ "add x19, x26, x19, LSL #1\n"
+ "cmp x13, x20\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"bgt 3f\n"
- "decw x21\n"
- "cmp x14, x21\n"
- "mov x27, x12\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x26, x11\n"
"bgt 3f\n"
- "decw x21\n"
- "cmp x14, x21\n"
- "mov x28, x12\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x27, x11\n"
"bgt 3f\n"
- "decw x21\n"
- "cmp x14, x21\n"
- "mov x9, x12\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x28, x11\n"
"bgt 3f\n"
- "decw x21\n"
- "cmp x14, x21\n"
- "mov x10, x12\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x9, x11\n"
"bgt 3f\n"
- "mov x11, x12\n"
+ "mov x10, x11\n"
"3:" // Height 1: B setup done
- "mov x20, #0x0\n"
- "whilelt p6.s, x20, x14\n"
- "incw x20\n"
- "whilelt p5.s, x20, x14\n"
- "incw x20\n"
- "whilelt p4.s, x20, x14\n"
- "incw x20\n"
- "whilelt p3.s, x20, x14\n"
- "incw x20\n"
- "whilelt p2.s, x20, x14\n"
- "incw x20\n"
- "whilelt p1.s, x20, x14\n"
- "cbz x15, 4f\n"
- "ld1w { z8.s }, p7/Z, [x15]\n"
- "ld1w { z9.s }, p7/Z, [x15, #1, MUL VL]\n"
+ "mov x19, #0x0\n"
+ "whilelt p6.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p5.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p4.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p3.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x13\n"
+ "cbz x14, 4f\n"
+ "ld1w { z8.s }, p7/Z, [x14]\n"
+ "ld1w { z9.s }, p7/Z, [x14, #1, MUL VL]\n"
"zip2 z14.d, z8.d, z8.d\n"
"zip1 z8.d, z8.d, z8.d\n"
- "ld1w { z10.s }, p7/Z, [x15, #2, MUL VL]\n"
- "ld1w { z11.s }, p7/Z, [x15, #3, MUL VL]\n"
+ "ld1w { z10.s }, p7/Z, [x14, #2, MUL VL]\n"
+ "ld1w { z11.s }, p7/Z, [x14, #3, MUL VL]\n"
"zip2 z15.d, z9.d, z9.d\n"
"zip1 z9.d, z9.d, z9.d\n"
- "ld1w { z12.s }, p7/Z, [x15, #4, MUL VL]\n"
- "ld1w { z13.s }, p7/Z, [x15, #5, MUL VL]\n"
+ "ld1w { z12.s }, p7/Z, [x14, #4, MUL VL]\n"
+ "ld1w { z13.s }, p7/Z, [x14, #5, MUL VL]\n"
"zip2 z16.d, z10.d, z10.d\n"
"zip1 z10.d, z10.d, z10.d\n"
"zip2 z17.d, z11.d, z11.d\n"
"zip1 z11.d, z11.d, z11.d\n"
- "addvl x15, x15, #6\n"
+ "addvl x14, x14, #6\n"
"zip2 z18.d, z12.d, z12.d\n"
"zip1 z12.d, z12.d, z12.d\n"
"zip2 z19.d, z13.d, z13.d\n"
@@ -174,16 +174,16 @@ void sve_ffhybrid_fp32bf16fp32_mmla_4x6VL (
"b 6f\n"
"4:" // Height 1: no bias
"tbz %x[flags], #0, 5f\n"
- "ld1w { z9.s }, p6/Z, [x13]\n"
- "ld1w { z10.s }, p5/Z, [x13, #1, MUL VL]\n"
+ "ld1w { z9.s }, p6/Z, [x12]\n"
+ "ld1w { z10.s }, p5/Z, [x12, #1, MUL VL]\n"
"zip1 z8.d, z9.d, z14.d\n"
"zip2 z14.d, z9.d, z14.d\n"
- "ld1w { z11.s }, p4/Z, [x13, #2, MUL VL]\n"
- "ld1w { z12.s }, p3/Z, [x13, #3, MUL VL]\n"
+ "ld1w { z11.s }, p4/Z, [x12, #2, MUL VL]\n"
+ "ld1w { z12.s }, p3/Z, [x12, #3, MUL VL]\n"
"zip1 z9.d, z10.d, z15.d\n"
"zip2 z15.d, z10.d, z15.d\n"
- "ld1w { z13.s }, p2/Z, [x13, #4, MUL VL]\n"
- "ld1w { z20.s }, p1/Z, [x13, #5, MUL VL]\n"
+ "ld1w { z13.s }, p2/Z, [x12, #4, MUL VL]\n"
+ "ld1w { z20.s }, p1/Z, [x12, #5, MUL VL]\n"
"zip1 z10.d, z11.d, z16.d\n"
"zip2 z16.d, z11.d, z16.d\n"
"zip1 z11.d, z12.d, z17.d\n"
@@ -207,102 +207,102 @@ void sve_ffhybrid_fp32bf16fp32_mmla_4x6VL (
"mov z18.b, #0x0\n"
"mov z19.b, #0x0\n"
"6:" // Height 1: setup done
- "mov x26, #0x0\n"
+ "mov x25, #0x0\n"
"7:" // Height 1: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w24, [x19, x25, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 8f\n"
- "ldr x21, [%x[input_ptr], x26, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x24, [x21, #0x0]\n"
- "cbnz x26, 9f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x24, x24, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x25, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x23, [x20, #0x0]\n"
+ "cbnz x25, 9f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x23, x23, x19, LSL #2\n"
"b 9f\n"
"8:" // Height 1: setup direct input
- "mov x24, %x[input_ptr]\n"
+ "mov x23, %x[input_ptr]\n"
"9:" // Height 1: input setup done
- "cmp x25, #0x4\n"
+ "cmp x24, #0x4\n"
"ble 11f\n"
"10:" // Height 1: Multiply loop: Main loop head
- "whilelt p0.s, XZR, x25\n"
- "ld1rqw { z0.s }, p0/Z, [x24]\n"
+ "whilelt p0.s, XZR, x24\n"
+ "ld1rqw { z0.s }, p0/Z, [x23]\n"
".inst 0x658abc00 // bfcvt z0.h, p7/M, z0.s\n"
"uzp1 z0.h, z0.h, z0.h\n"
- "ld1h { z4.h }, p7/Z, [x12]\n"
- "ld1h { z5.h }, p7/Z, [x12, #1, MUL VL]\n"
+ "ld1h { z4.h }, p7/Z, [x11]\n"
+ "ld1h { z5.h }, p7/Z, [x11, #1, MUL VL]\n"
".inst 0x6464e408 // bfmmla z8.s, z0.h, z4.h\n"
".inst 0x6465e40e // bfmmla z14.s, z0.h, z5.h\n"
- "ld1h { z6.h }, p7/Z, [x11]\n"
- "ld1h { z7.h }, p7/Z, [x11, #1, MUL VL]\n"
+ "ld1h { z6.h }, p7/Z, [x10]\n"
+ "ld1h { z7.h }, p7/Z, [x10, #1, MUL VL]\n"
".inst 0x6466e409 // bfmmla z9.s, z0.h, z6.h\n"
".inst 0x6467e40f // bfmmla z15.s, z0.h, z7.h\n"
- "ld1h { z4.h }, p7/Z, [x10]\n"
- "ld1h { z5.h }, p7/Z, [x10, #1, MUL VL]\n"
+ "ld1h { z4.h }, p7/Z, [x9]\n"
+ "ld1h { z5.h }, p7/Z, [x9, #1, MUL VL]\n"
".inst 0x6464e40a // bfmmla z10.s, z0.h, z4.h\n"
".inst 0x6465e410 // bfmmla z16.s, z0.h, z5.h\n"
- "ld1h { z6.h }, p7/Z, [x9]\n"
- "ld1h { z7.h }, p7/Z, [x9, #1, MUL VL]\n"
+ "ld1h { z6.h }, p7/Z, [x28]\n"
+ "ld1h { z7.h }, p7/Z, [x28, #1, MUL VL]\n"
".inst 0x6466e40b // bfmmla z11.s, z0.h, z6.h\n"
".inst 0x6467e411 // bfmmla z17.s, z0.h, z7.h\n"
- "ld1h { z4.h }, p7/Z, [x28]\n"
- "ld1h { z5.h }, p7/Z, [x28, #1, MUL VL]\n"
- "sub x25, x25, #0x4\n"
- "cmp x25, #0x4\n"
- "ld1h { z6.h }, p7/Z, [x27]\n"
- "ld1h { z7.h }, p7/Z, [x27, #1, MUL VL]\n"
+ "ld1h { z4.h }, p7/Z, [x27]\n"
+ "ld1h { z5.h }, p7/Z, [x27, #1, MUL VL]\n"
+ "sub x24, x24, #0x4\n"
+ "cmp x24, #0x4\n"
+ "ld1h { z6.h }, p7/Z, [x26]\n"
+ "ld1h { z7.h }, p7/Z, [x26, #1, MUL VL]\n"
".inst 0x6464e40c // bfmmla z12.s, z0.h, z4.h\n"
".inst 0x6465e412 // bfmmla z18.s, z0.h, z5.h\n"
".inst 0x6466e40d // bfmmla z13.s, z0.h, z6.h\n"
".inst 0x6467e413 // bfmmla z19.s, z0.h, z7.h\n"
- "add x24, x24, #0x10\n"
- "addvl x12, x12, #2\n"
+ "add x23, x23, #0x10\n"
"addvl x11, x11, #2\n"
"addvl x10, x10, #2\n"
"addvl x9, x9, #2\n"
"addvl x28, x28, #2\n"
"addvl x27, x27, #2\n"
+ "addvl x26, x26, #2\n"
"bgt 10b\n"
"11:" // Height 1: Multiply loop: Single iteration only
- "whilelt p0.s, XZR, x25\n"
- "ld1rqw { z0.s }, p0/Z, [x24]\n"
+ "whilelt p0.s, XZR, x24\n"
+ "ld1rqw { z0.s }, p0/Z, [x23]\n"
".inst 0x658abc00 // bfcvt z0.h, p7/M, z0.s\n"
"uzp1 z0.h, z0.h, z0.h\n"
- "ld1h { z4.h }, p7/Z, [x12]\n"
- "ld1h { z5.h }, p7/Z, [x12, #1, MUL VL]\n"
+ "ld1h { z4.h }, p7/Z, [x11]\n"
+ "ld1h { z5.h }, p7/Z, [x11, #1, MUL VL]\n"
".inst 0x6464e408 // bfmmla z8.s, z0.h, z4.h\n"
".inst 0x6465e40e // bfmmla z14.s, z0.h, z5.h\n"
- "ld1h { z6.h }, p7/Z, [x11]\n"
- "ld1h { z7.h }, p7/Z, [x11, #1, MUL VL]\n"
+ "ld1h { z6.h }, p7/Z, [x10]\n"
+ "ld1h { z7.h }, p7/Z, [x10, #1, MUL VL]\n"
".inst 0x6466e409 // bfmmla z9.s, z0.h, z6.h\n"
".inst 0x6467e40f // bfmmla z15.s, z0.h, z7.h\n"
- "ld1h { z4.h }, p7/Z, [x10]\n"
- "ld1h { z5.h }, p7/Z, [x10, #1, MUL VL]\n"
+ "ld1h { z4.h }, p7/Z, [x9]\n"
+ "ld1h { z5.h }, p7/Z, [x9, #1, MUL VL]\n"
".inst 0x6464e40a // bfmmla z10.s, z0.h, z4.h\n"
".inst 0x6465e410 // bfmmla z16.s, z0.h, z5.h\n"
- "ld1h { z6.h }, p7/Z, [x9]\n"
- "ld1h { z7.h }, p7/Z, [x9, #1, MUL VL]\n"
+ "ld1h { z6.h }, p7/Z, [x28]\n"
+ "ld1h { z7.h }, p7/Z, [x28, #1, MUL VL]\n"
".inst 0x6466e40b // bfmmla z11.s, z0.h, z6.h\n"
".inst 0x6467e411 // bfmmla z17.s, z0.h, z7.h\n"
- "ld1h { z4.h }, p7/Z, [x28]\n"
- "ld1h { z5.h }, p7/Z, [x28, #1, MUL VL]\n"
+ "ld1h { z4.h }, p7/Z, [x27]\n"
+ "ld1h { z5.h }, p7/Z, [x27, #1, MUL VL]\n"
".inst 0x6464e40c // bfmmla z12.s, z0.h, z4.h\n"
".inst 0x6465e412 // bfmmla z18.s, z0.h, z5.h\n"
- "ld1h { z6.h }, p7/Z, [x27]\n"
- "ld1h { z7.h }, p7/Z, [x27, #1, MUL VL]\n"
+ "ld1h { z6.h }, p7/Z, [x26]\n"
+ "ld1h { z7.h }, p7/Z, [x26, #1, MUL VL]\n"
".inst 0x6466e40d // bfmmla z13.s, z0.h, z6.h\n"
".inst 0x6467e413 // bfmmla z19.s, z0.h, z7.h\n"
- "addvl x12, x12, #2\n"
"addvl x11, x11, #2\n"
"addvl x10, x10, #2\n"
"addvl x9, x9, #2\n"
"addvl x28, x28, #2\n"
"addvl x27, x27, #2\n"
+ "addvl x26, x26, #2\n"
"12:" // Height 1: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x26, x26, #0x1\n"
- "cmp x26, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x25, x25, #0x1\n"
+ "cmp x25, x19\n"
"bne 7b\n"
"uzp1 z8.d, z8.d, z14.d\n"
"uzp1 z9.d, z9.d, z15.d\n"
@@ -311,10 +311,10 @@ void sve_ffhybrid_fp32bf16fp32_mmla_4x6VL (
"uzp1 z12.d, z12.d, z18.d\n"
"uzp1 z13.d, z13.d, z19.d\n"
"tbz %x[flags], #1, 13f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z1.s }, p7/Z, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rw { z0.s }, p7/Z, [x20]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rw { z1.s }, p7/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z0.s }, p7/Z, [x19]\n"
"fmin z8.s, p7/M, z8.s, z1.s\n"
"fmin z9.s, p7/M, z9.s, z1.s\n"
"fmin z10.s, p7/M, z10.s, z1.s\n"
@@ -328,83 +328,83 @@ void sve_ffhybrid_fp32bf16fp32_mmla_4x6VL (
"fmax z12.s, p7/M, z12.s, z0.s\n"
"fmax z13.s, p7/M, z13.s, z0.s\n"
"13:" // Height 1: No activation
- "st1w { z8.s }, p6, [x13]\n"
- "st1w { z9.s }, p5, [x13, #1, MUL VL]\n"
- "st1w { z10.s }, p4, [x13, #2, MUL VL]\n"
- "st1w { z11.s }, p3, [x13, #3, MUL VL]\n"
- "st1w { z12.s }, p2, [x13, #4, MUL VL]\n"
- "st1w { z13.s }, p1, [x13, #5, MUL VL]\n"
- "addvl x13, x13, #6\n"
+ "st1w { z8.s }, p6, [x12]\n"
+ "st1w { z9.s }, p5, [x12, #1, MUL VL]\n"
+ "st1w { z10.s }, p4, [x12, #2, MUL VL]\n"
+ "st1w { z11.s }, p3, [x12, #3, MUL VL]\n"
+ "st1w { z12.s }, p2, [x12, #4, MUL VL]\n"
+ "st1w { z13.s }, p1, [x12, #5, MUL VL]\n"
+ "addvl x12, x12, #6\n"
"14:" // Height 1: Writeback done
- "decw x14, ALL, MUL #6\n"
- "cmp x14, XZR\n"
+ "decw x13, ALL, MUL #6\n"
+ "cmp x13, XZR\n"
"bgt 2b\n"
"b 58f\n"
"15:" // Height 2
- "ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x15, %x[bias]\n"
- "ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x13, %x[output_ptr]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x14, %x[bias]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x12, %x[output_ptr]\n"
"16:" // Height 2: Column loop
- "ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
- "add x11, x12, x20, LSL #1\n"
- "add x10, x11, x20, LSL #1\n"
- "add x9, x10, x20, LSL #1\n"
- "cntw x21, ALL, MUL #5\n"
- "add x28, x9, x20, LSL #1\n"
- "add x27, x28, x20, LSL #1\n"
- "add x20, x27, x20, LSL #1\n"
- "cmp x14, x21\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #1\n"
+ "add x9, x10, x19, LSL #1\n"
+ "add x28, x9, x19, LSL #1\n"
+ "cntw x20, ALL, MUL #5\n"
+ "add x27, x28, x19, LSL #1\n"
+ "add x26, x27, x19, LSL #1\n"
+ "add x19, x26, x19, LSL #1\n"
+ "cmp x13, x20\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"bgt 17f\n"
- "decw x21\n"
- "cmp x14, x21\n"
- "mov x27, x12\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x26, x11\n"
"bgt 17f\n"
- "decw x21\n"
- "cmp x14, x21\n"
- "mov x28, x12\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x27, x11\n"
"bgt 17f\n"
- "decw x21\n"
- "cmp x14, x21\n"
- "mov x9, x12\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x28, x11\n"
"bgt 17f\n"
- "decw x21\n"
- "cmp x14, x21\n"
- "mov x10, x12\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x9, x11\n"
"bgt 17f\n"
- "mov x11, x12\n"
+ "mov x10, x11\n"
"17:" // Height 2: B setup done
- "mov x20, #0x0\n"
- "whilelt p6.s, x20, x14\n"
- "incw x20\n"
- "whilelt p5.s, x20, x14\n"
- "incw x20\n"
- "whilelt p4.s, x20, x14\n"
- "incw x20\n"
- "whilelt p3.s, x20, x14\n"
- "incw x20\n"
- "whilelt p2.s, x20, x14\n"
- "incw x20\n"
- "whilelt p1.s, x20, x14\n"
- "cbz x15, 18f\n"
- "ld1w { z8.s }, p7/Z, [x15]\n"
- "ld1w { z9.s }, p7/Z, [x15, #1, MUL VL]\n"
+ "mov x19, #0x0\n"
+ "whilelt p6.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p5.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p4.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p3.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x13\n"
+ "cbz x14, 18f\n"
+ "ld1w { z8.s }, p7/Z, [x14]\n"
+ "ld1w { z9.s }, p7/Z, [x14, #1, MUL VL]\n"
"zip2 z14.d, z8.d, z8.d\n"
"zip1 z8.d, z8.d, z8.d\n"
- "ld1w { z10.s }, p7/Z, [x15, #2, MUL VL]\n"
- "ld1w { z11.s }, p7/Z, [x15, #3, MUL VL]\n"
+ "ld1w { z10.s }, p7/Z, [x14, #2, MUL VL]\n"
+ "ld1w { z11.s }, p7/Z, [x14, #3, MUL VL]\n"
"zip2 z15.d, z9.d, z9.d\n"
"zip1 z9.d, z9.d, z9.d\n"
- "ld1w { z12.s }, p7/Z, [x15, #4, MUL VL]\n"
- "ld1w { z13.s }, p7/Z, [x15, #5, MUL VL]\n"
+ "ld1w { z12.s }, p7/Z, [x14, #4, MUL VL]\n"
+ "ld1w { z13.s }, p7/Z, [x14, #5, MUL VL]\n"
"zip2 z16.d, z10.d, z10.d\n"
"zip1 z10.d, z10.d, z10.d\n"
"zip2 z17.d, z11.d, z11.d\n"
"zip1 z11.d, z11.d, z11.d\n"
- "addvl x15, x15, #6\n"
+ "addvl x14, x14, #6\n"
"zip2 z18.d, z12.d, z12.d\n"
"zip1 z12.d, z12.d, z12.d\n"
"zip2 z19.d, z13.d, z13.d\n"
@@ -412,26 +412,26 @@ void sve_ffhybrid_fp32bf16fp32_mmla_4x6VL (
"b 20f\n"
"18:" // Height 2: no bias
"tbz %x[flags], #0, 19f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x13, x20, LSL #2\n"
- "ld1w { z9.s }, p6/Z, [x13]\n"
- "ld1w { z10.s }, p5/Z, [x13, #1, MUL VL]\n"
- "ld1w { z11.s }, p4/Z, [x13, #2, MUL VL]\n"
- "ld1w { z12.s }, p3/Z, [x13, #3, MUL VL]\n"
- "ld1w { z13.s }, p2/Z, [x13, #4, MUL VL]\n"
- "ld1w { z20.s }, p1/Z, [x13, #5, MUL VL]\n"
- "ld1w { z14.s }, p6/Z, [x23]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x22, x12, x19, LSL #2\n"
+ "ld1w { z9.s }, p6/Z, [x12]\n"
+ "ld1w { z10.s }, p5/Z, [x12, #1, MUL VL]\n"
+ "ld1w { z11.s }, p4/Z, [x12, #2, MUL VL]\n"
+ "ld1w { z12.s }, p3/Z, [x12, #3, MUL VL]\n"
+ "ld1w { z13.s }, p2/Z, [x12, #4, MUL VL]\n"
+ "ld1w { z20.s }, p1/Z, [x12, #5, MUL VL]\n"
+ "ld1w { z14.s }, p6/Z, [x22]\n"
"zip1 z8.d, z9.d, z14.d\n"
"zip2 z14.d, z9.d, z14.d\n"
- "ld1w { z15.s }, p5/Z, [x23, #1, MUL VL]\n"
- "ld1w { z16.s }, p4/Z, [x23, #2, MUL VL]\n"
+ "ld1w { z15.s }, p5/Z, [x22, #1, MUL VL]\n"
+ "ld1w { z16.s }, p4/Z, [x22, #2, MUL VL]\n"
"zip1 z9.d, z10.d, z15.d\n"
"zip2 z15.d, z10.d, z15.d\n"
- "ld1w { z17.s }, p3/Z, [x23, #3, MUL VL]\n"
- "ld1w { z18.s }, p2/Z, [x23, #4, MUL VL]\n"
+ "ld1w { z17.s }, p3/Z, [x22, #3, MUL VL]\n"
+ "ld1w { z18.s }, p2/Z, [x22, #4, MUL VL]\n"
"zip1 z10.d, z11.d, z16.d\n"
"zip2 z16.d, z11.d, z16.d\n"
- "ld1w { z19.s }, p1/Z, [x23, #5, MUL VL]\n"
+ "ld1w { z19.s }, p1/Z, [x22, #5, MUL VL]\n"
"zip1 z11.d, z12.d, z17.d\n"
"zip2 z17.d, z12.d, z17.d\n"
"zip1 z12.d, z13.d, z18.d\n"
@@ -453,119 +453,119 @@ void sve_ffhybrid_fp32bf16fp32_mmla_4x6VL (
"mov z18.b, #0x0\n"
"mov z19.b, #0x0\n"
"20:" // Height 2: setup done
- "mov x26, #0x0\n"
+ "mov x25, #0x0\n"
"21:" // Height 2: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w24, [x19, x25, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 22f\n"
- "ldr x21, [%x[input_ptr], x26, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x24, [x21, #0x0]\n"
- "ldr x23, [x21, #0x8]\n"
- "cbnz x26, 23f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x24, x24, x20, LSL #2\n"
- "add x23, x23, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x25, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x23, [x20, #0x0]\n"
+ "ldr x22, [x20, #0x8]\n"
+ "cbnz x25, 23f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x23, x23, x19, LSL #2\n"
+ "add x22, x22, x19, LSL #2\n"
"b 23f\n"
"22:" // Height 2: setup direct input
- "mov x24, %x[input_ptr]\n"
- "add x23, x24, x20, LSL #2\n"
+ "mov x23, %x[input_ptr]\n"
+ "add x22, x23, x19, LSL #2\n"
"23:" // Height 2: input setup done
- "cmp x25, #0x4\n"
+ "cmp x24, #0x4\n"
"ble 25f\n"
"24:" // Height 2: Multiply loop: Main loop head
- "whilelt p0.s, XZR, x25\n"
- "ld1rqw { z0.s }, p0/Z, [x24]\n"
- "ld1rqw { z1.s }, p0/Z, [x23]\n"
+ "whilelt p0.s, XZR, x24\n"
+ "ld1rqw { z0.s }, p0/Z, [x23]\n"
+ "ld1rqw { z1.s }, p0/Z, [x22]\n"
".inst 0x658abc00 // bfcvt z0.h, p7/M, z0.s\n"
".inst 0x658abc21 // bfcvt z1.h, p7/M, z1.s\n"
"uzp1 z0.h, z0.h, z0.h\n"
- "ld1h { z4.h }, p7/Z, [x12]\n"
- "ld1h { z5.h }, p7/Z, [x12, #1, MUL VL]\n"
+ "ld1h { z4.h }, p7/Z, [x11]\n"
+ "ld1h { z5.h }, p7/Z, [x11, #1, MUL VL]\n"
"uzp1 z1.h, z1.h, z1.h\n"
"trn1 z0.d, z0.d, z1.d\n"
- "ld1h { z6.h }, p7/Z, [x11]\n"
- "ld1h { z7.h }, p7/Z, [x11, #1, MUL VL]\n"
+ "ld1h { z6.h }, p7/Z, [x10]\n"
+ "ld1h { z7.h }, p7/Z, [x10, #1, MUL VL]\n"
".inst 0x6464e408 // bfmmla z8.s, z0.h, z4.h\n"
".inst 0x6465e40e // bfmmla z14.s, z0.h, z5.h\n"
- "ld1h { z4.h }, p7/Z, [x10]\n"
- "ld1h { z5.h }, p7/Z, [x10, #1, MUL VL]\n"
+ "ld1h { z4.h }, p7/Z, [x9]\n"
+ "ld1h { z5.h }, p7/Z, [x9, #1, MUL VL]\n"
".inst 0x6466e409 // bfmmla z9.s, z0.h, z6.h\n"
".inst 0x6467e40f // bfmmla z15.s, z0.h, z7.h\n"
- "ld1h { z6.h }, p7/Z, [x9]\n"
- "ld1h { z7.h }, p7/Z, [x9, #1, MUL VL]\n"
+ "ld1h { z6.h }, p7/Z, [x28]\n"
+ "ld1h { z7.h }, p7/Z, [x28, #1, MUL VL]\n"
".inst 0x6464e40a // bfmmla z10.s, z0.h, z4.h\n"
".inst 0x6465e410 // bfmmla z16.s, z0.h, z5.h\n"
- "ld1h { z4.h }, p7/Z, [x28]\n"
- "ld1h { z5.h }, p7/Z, [x28, #1, MUL VL]\n"
+ "ld1h { z4.h }, p7/Z, [x27]\n"
+ "ld1h { z5.h }, p7/Z, [x27, #1, MUL VL]\n"
".inst 0x6466e40b // bfmmla z11.s, z0.h, z6.h\n"
".inst 0x6467e411 // bfmmla z17.s, z0.h, z7.h\n"
- "ld1h { z6.h }, p7/Z, [x27]\n"
- "ld1h { z7.h }, p7/Z, [x27, #1, MUL VL]\n"
- "sub x25, x25, #0x4\n"
- "cmp x25, #0x4\n"
+ "ld1h { z6.h }, p7/Z, [x26]\n"
+ "ld1h { z7.h }, p7/Z, [x26, #1, MUL VL]\n"
+ "sub x24, x24, #0x4\n"
+ "cmp x24, #0x4\n"
".inst 0x6464e40c // bfmmla z12.s, z0.h, z4.h\n"
".inst 0x6465e412 // bfmmla z18.s, z0.h, z5.h\n"
".inst 0x6466e40d // bfmmla z13.s, z0.h, z6.h\n"
".inst 0x6467e413 // bfmmla z19.s, z0.h, z7.h\n"
- "add x24, x24, #0x10\n"
"add x23, x23, #0x10\n"
- "addvl x12, x12, #2\n"
+ "add x22, x22, #0x10\n"
"addvl x11, x11, #2\n"
"addvl x10, x10, #2\n"
"addvl x9, x9, #2\n"
"addvl x28, x28, #2\n"
"addvl x27, x27, #2\n"
+ "addvl x26, x26, #2\n"
"bgt 24b\n"
"25:" // Height 2: Multiply loop: Single iteration only
- "whilelt p0.s, XZR, x25\n"
- "ld1rqw { z0.s }, p0/Z, [x24]\n"
- "ld1rqw { z1.s }, p0/Z, [x23]\n"
+ "whilelt p0.s, XZR, x24\n"
+ "ld1rqw { z0.s }, p0/Z, [x23]\n"
+ "ld1rqw { z1.s }, p0/Z, [x22]\n"
".inst 0x658abc00 // bfcvt z0.h, p7/M, z0.s\n"
".inst 0x658abc21 // bfcvt z1.h, p7/M, z1.s\n"
"uzp1 z0.h, z0.h, z0.h\n"
- "ld1h { z4.h }, p7/Z, [x12]\n"
- "ld1h { z5.h }, p7/Z, [x12, #1, MUL VL]\n"
+ "ld1h { z4.h }, p7/Z, [x11]\n"
+ "ld1h { z5.h }, p7/Z, [x11, #1, MUL VL]\n"
"uzp1 z1.h, z1.h, z1.h\n"
"trn1 z0.d, z0.d, z1.d\n"
- "ld1h { z6.h }, p7/Z, [x11]\n"
- "ld1h { z7.h }, p7/Z, [x11, #1, MUL VL]\n"
+ "ld1h { z6.h }, p7/Z, [x10]\n"
+ "ld1h { z7.h }, p7/Z, [x10, #1, MUL VL]\n"
".inst 0x6464e408 // bfmmla z8.s, z0.h, z4.h\n"
".inst 0x6465e40e // bfmmla z14.s, z0.h, z5.h\n"
- "ld1h { z4.h }, p7/Z, [x10]\n"
- "ld1h { z5.h }, p7/Z, [x10, #1, MUL VL]\n"
+ "ld1h { z4.h }, p7/Z, [x9]\n"
+ "ld1h { z5.h }, p7/Z, [x9, #1, MUL VL]\n"
".inst 0x6466e409 // bfmmla z9.s, z0.h, z6.h\n"
".inst 0x6467e40f // bfmmla z15.s, z0.h, z7.h\n"
- "ld1h { z6.h }, p7/Z, [x9]\n"
- "ld1h { z7.h }, p7/Z, [x9, #1, MUL VL]\n"
+ "ld1h { z6.h }, p7/Z, [x28]\n"
+ "ld1h { z7.h }, p7/Z, [x28, #1, MUL VL]\n"
".inst 0x6464e40a // bfmmla z10.s, z0.h, z4.h\n"
".inst 0x6465e410 // bfmmla z16.s, z0.h, z5.h\n"
- "ld1h { z4.h }, p7/Z, [x28]\n"
- "ld1h { z5.h }, p7/Z, [x28, #1, MUL VL]\n"
+ "ld1h { z4.h }, p7/Z, [x27]\n"
+ "ld1h { z5.h }, p7/Z, [x27, #1, MUL VL]\n"
".inst 0x6466e40b // bfmmla z11.s, z0.h, z6.h\n"
".inst 0x6467e411 // bfmmla z17.s, z0.h, z7.h\n"
- "ld1h { z6.h }, p7/Z, [x27]\n"
- "ld1h { z7.h }, p7/Z, [x27, #1, MUL VL]\n"
+ "ld1h { z6.h }, p7/Z, [x26]\n"
+ "ld1h { z7.h }, p7/Z, [x26, #1, MUL VL]\n"
".inst 0x6464e40c // bfmmla z12.s, z0.h, z4.h\n"
".inst 0x6465e412 // bfmmla z18.s, z0.h, z5.h\n"
- "addvl x12, x12, #2\n"
"addvl x11, x11, #2\n"
+ "addvl x10, x10, #2\n"
".inst 0x6466e40d // bfmmla z13.s, z0.h, z6.h\n"
".inst 0x6467e413 // bfmmla z19.s, z0.h, z7.h\n"
- "addvl x10, x10, #2\n"
"addvl x9, x9, #2\n"
"addvl x28, x28, #2\n"
"addvl x27, x27, #2\n"
+ "addvl x26, x26, #2\n"
"26:" // Height 2: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x26, x26, #0x1\n"
- "cmp x26, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x25, x25, #0x1\n"
+ "cmp x25, x19\n"
"bne 21b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp1 z4.d, z8.d, z14.d\n"
"uzp2 z8.d, z8.d, z14.d\n"
- "add x23, x13, x20, LSL #2\n"
+ "add x22, x12, x19, LSL #2\n"
"uzp1 z14.d, z9.d, z15.d\n"
"uzp2 z9.d, z9.d, z15.d\n"
"uzp1 z15.d, z10.d, z16.d\n"
@@ -577,10 +577,10 @@ void sve_ffhybrid_fp32bf16fp32_mmla_4x6VL (
"uzp1 z18.d, z13.d, z19.d\n"
"uzp2 z13.d, z13.d, z19.d\n"
"tbz %x[flags], #1, 27f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z1.s }, p7/Z, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rw { z0.s }, p7/Z, [x20]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rw { z1.s }, p7/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z0.s }, p7/Z, [x19]\n"
"fmin z4.s, p7/M, z4.s, z1.s\n"
"fmin z14.s, p7/M, z14.s, z1.s\n"
"fmin z15.s, p7/M, z15.s, z1.s\n"
@@ -606,89 +606,89 @@ void sve_ffhybrid_fp32bf16fp32_mmla_4x6VL (
"fmax z12.s, p7/M, z12.s, z0.s\n"
"fmax z13.s, p7/M, z13.s, z0.s\n"
"27:" // Height 2: No activation
- "st1w { z4.s }, p6, [x13]\n"
- "st1w { z14.s }, p5, [x13, #1, MUL VL]\n"
- "st1w { z15.s }, p4, [x13, #2, MUL VL]\n"
- "st1w { z16.s }, p3, [x13, #3, MUL VL]\n"
- "st1w { z17.s }, p2, [x13, #4, MUL VL]\n"
- "st1w { z18.s }, p1, [x13, #5, MUL VL]\n"
- "addvl x13, x13, #6\n"
- "st1w { z8.s }, p6, [x23]\n"
- "st1w { z9.s }, p5, [x23, #1, MUL VL]\n"
- "st1w { z10.s }, p4, [x23, #2, MUL VL]\n"
- "st1w { z11.s }, p3, [x23, #3, MUL VL]\n"
- "st1w { z12.s }, p2, [x23, #4, MUL VL]\n"
- "st1w { z13.s }, p1, [x23, #5, MUL VL]\n"
+ "st1w { z4.s }, p6, [x12]\n"
+ "st1w { z14.s }, p5, [x12, #1, MUL VL]\n"
+ "st1w { z15.s }, p4, [x12, #2, MUL VL]\n"
+ "st1w { z16.s }, p3, [x12, #3, MUL VL]\n"
+ "st1w { z17.s }, p2, [x12, #4, MUL VL]\n"
+ "st1w { z18.s }, p1, [x12, #5, MUL VL]\n"
+ "addvl x12, x12, #6\n"
+ "st1w { z8.s }, p6, [x22]\n"
+ "st1w { z9.s }, p5, [x22, #1, MUL VL]\n"
+ "st1w { z10.s }, p4, [x22, #2, MUL VL]\n"
+ "st1w { z11.s }, p3, [x22, #3, MUL VL]\n"
+ "st1w { z12.s }, p2, [x22, #4, MUL VL]\n"
+ "st1w { z13.s }, p1, [x22, #5, MUL VL]\n"
"28:" // Height 2: Writeback done
- "decw x14, ALL, MUL #6\n"
- "cmp x14, XZR\n"
+ "decw x13, ALL, MUL #6\n"
+ "cmp x13, XZR\n"
"bgt 16b\n"
"b 58f\n"
"29:" // Height 3
- "ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x15, %x[bias]\n"
- "ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x13, %x[output_ptr]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x14, %x[bias]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x12, %x[output_ptr]\n"
"30:" // Height 3: Column loop
- "ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
- "add x11, x12, x20, LSL #1\n"
- "add x10, x11, x20, LSL #1\n"
- "add x9, x10, x20, LSL #1\n"
- "cntw x21, ALL, MUL #5\n"
- "add x28, x9, x20, LSL #1\n"
- "add x27, x28, x20, LSL #1\n"
- "add x20, x27, x20, LSL #1\n"
- "cmp x14, x21\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #1\n"
+ "add x9, x10, x19, LSL #1\n"
+ "add x28, x9, x19, LSL #1\n"
+ "cntw x20, ALL, MUL #5\n"
+ "add x27, x28, x19, LSL #1\n"
+ "add x26, x27, x19, LSL #1\n"
+ "add x19, x26, x19, LSL #1\n"
+ "cmp x13, x20\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"bgt 31f\n"
- "decw x21\n"
- "cmp x14, x21\n"
- "mov x27, x12\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x26, x11\n"
"bgt 31f\n"
- "decw x21\n"
- "cmp x14, x21\n"
- "mov x28, x12\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x27, x11\n"
"bgt 31f\n"
- "decw x21\n"
- "cmp x14, x21\n"
- "mov x9, x12\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x28, x11\n"
"bgt 31f\n"
- "decw x21\n"
- "cmp x14, x21\n"
- "mov x10, x12\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x9, x11\n"
"bgt 31f\n"
- "mov x11, x12\n"
+ "mov x10, x11\n"
"31:" // Height 3: B setup done
- "mov x20, #0x0\n"
- "whilelt p6.s, x20, x14\n"
- "incw x20\n"
- "whilelt p5.s, x20, x14\n"
- "incw x20\n"
- "whilelt p4.s, x20, x14\n"
- "incw x20\n"
- "whilelt p3.s, x20, x14\n"
- "incw x20\n"
- "whilelt p2.s, x20, x14\n"
- "incw x20\n"
- "whilelt p1.s, x20, x14\n"
- "cbz x15, 32f\n"
- "ld1w { z8.s }, p7/Z, [x15]\n"
- "ld1w { z9.s }, p7/Z, [x15, #1, MUL VL]\n"
+ "mov x19, #0x0\n"
+ "whilelt p6.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p5.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p4.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p3.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x13\n"
+ "cbz x14, 32f\n"
+ "ld1w { z8.s }, p7/Z, [x14]\n"
+ "ld1w { z9.s }, p7/Z, [x14, #1, MUL VL]\n"
"zip2 z14.d, z8.d, z8.d\n"
"zip1 z8.d, z8.d, z8.d\n"
- "ld1w { z10.s }, p7/Z, [x15, #2, MUL VL]\n"
- "ld1w { z11.s }, p7/Z, [x15, #3, MUL VL]\n"
+ "ld1w { z10.s }, p7/Z, [x14, #2, MUL VL]\n"
+ "ld1w { z11.s }, p7/Z, [x14, #3, MUL VL]\n"
"zip2 z15.d, z9.d, z9.d\n"
"zip1 z9.d, z9.d, z9.d\n"
- "ld1w { z12.s }, p7/Z, [x15, #4, MUL VL]\n"
- "ld1w { z13.s }, p7/Z, [x15, #5, MUL VL]\n"
+ "ld1w { z12.s }, p7/Z, [x14, #4, MUL VL]\n"
+ "ld1w { z13.s }, p7/Z, [x14, #5, MUL VL]\n"
"zip2 z16.d, z10.d, z10.d\n"
"zip1 z10.d, z10.d, z10.d\n"
"zip2 z17.d, z11.d, z11.d\n"
"zip1 z11.d, z11.d, z11.d\n"
- "addvl x15, x15, #6\n"
+ "addvl x14, x14, #6\n"
"zip2 z18.d, z12.d, z12.d\n"
"zip1 z12.d, z12.d, z12.d\n"
"zip2 z19.d, z13.d, z13.d\n"
@@ -708,39 +708,39 @@ void sve_ffhybrid_fp32bf16fp32_mmla_4x6VL (
"b 34f\n"
"32:" // Height 3: no bias
"tbz %x[flags], #0, 33f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x13, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
- "ld1w { z9.s }, p6/Z, [x13]\n"
- "ld1w { z10.s }, p5/Z, [x13, #1, MUL VL]\n"
- "ld1w { z11.s }, p4/Z, [x13, #2, MUL VL]\n"
- "ld1w { z12.s }, p3/Z, [x13, #3, MUL VL]\n"
- "ld1w { z13.s }, p2/Z, [x13, #4, MUL VL]\n"
- "ld1w { z20.s }, p1/Z, [x13, #5, MUL VL]\n"
- "ld1w { z14.s }, p6/Z, [x23]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x22, x12, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
+ "ld1w { z9.s }, p6/Z, [x12]\n"
+ "ld1w { z10.s }, p5/Z, [x12, #1, MUL VL]\n"
+ "ld1w { z11.s }, p4/Z, [x12, #2, MUL VL]\n"
+ "ld1w { z12.s }, p3/Z, [x12, #3, MUL VL]\n"
+ "ld1w { z13.s }, p2/Z, [x12, #4, MUL VL]\n"
+ "ld1w { z20.s }, p1/Z, [x12, #5, MUL VL]\n"
+ "ld1w { z14.s }, p6/Z, [x22]\n"
"zip1 z8.d, z9.d, z14.d\n"
"zip2 z14.d, z9.d, z14.d\n"
- "ld1w { z15.s }, p5/Z, [x23, #1, MUL VL]\n"
- "ld1w { z16.s }, p4/Z, [x23, #2, MUL VL]\n"
+ "ld1w { z15.s }, p5/Z, [x22, #1, MUL VL]\n"
+ "ld1w { z16.s }, p4/Z, [x22, #2, MUL VL]\n"
"zip1 z9.d, z10.d, z15.d\n"
"zip2 z15.d, z10.d, z15.d\n"
- "ld1w { z17.s }, p3/Z, [x23, #3, MUL VL]\n"
- "ld1w { z18.s }, p2/Z, [x23, #4, MUL VL]\n"
+ "ld1w { z17.s }, p3/Z, [x22, #3, MUL VL]\n"
+ "ld1w { z18.s }, p2/Z, [x22, #4, MUL VL]\n"
"zip1 z10.d, z11.d, z16.d\n"
"zip2 z16.d, z11.d, z16.d\n"
- "ld1w { z19.s }, p1/Z, [x23, #5, MUL VL]\n"
- "ld1w { z21.s }, p6/Z, [x22]\n"
+ "ld1w { z19.s }, p1/Z, [x22, #5, MUL VL]\n"
+ "ld1w { z21.s }, p6/Z, [x21]\n"
"zip1 z11.d, z12.d, z17.d\n"
"zip2 z17.d, z12.d, z17.d\n"
- "ld1w { z22.s }, p5/Z, [x22, #1, MUL VL]\n"
- "ld1w { z23.s }, p4/Z, [x22, #2, MUL VL]\n"
+ "ld1w { z22.s }, p5/Z, [x21, #1, MUL VL]\n"
+ "ld1w { z23.s }, p4/Z, [x21, #2, MUL VL]\n"
"zip1 z12.d, z13.d, z18.d\n"
"zip2 z18.d, z13.d, z18.d\n"
- "ld1w { z24.s }, p3/Z, [x22, #3, MUL VL]\n"
- "ld1w { z25.s }, p2/Z, [x22, #4, MUL VL]\n"
+ "ld1w { z24.s }, p3/Z, [x21, #3, MUL VL]\n"
+ "ld1w { z25.s }, p2/Z, [x21, #4, MUL VL]\n"
"zip1 z13.d, z20.d, z19.d\n"
"zip2 z19.d, z20.d, z19.d\n"
- "ld1w { z4.s }, p1/Z, [x22, #5, MUL VL]\n"
+ "ld1w { z4.s }, p1/Z, [x21, #5, MUL VL]\n"
"zip1 z20.d, z21.d, z26.d\n"
"zip2 z26.d, z21.d, z26.d\n"
"zip1 z21.d, z22.d, z27.d\n"
@@ -780,137 +780,137 @@ void sve_ffhybrid_fp32bf16fp32_mmla_4x6VL (
"mov z30.b, #0x0\n"
"mov z31.b, #0x0\n"
"34:" // Height 3: setup done
- "mov x26, #0x0\n"
+ "mov x25, #0x0\n"
"35:" // Height 3: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w24, [x19, x25, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 36f\n"
- "ldr x21, [%x[input_ptr], x26, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x24, [x21, #0x0]\n"
- "ldr x23, [x21, #0x8]\n"
- "ldr x22, [x21, #0x10]\n"
- "cbnz x26, 37f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x24, x24, x20, LSL #2\n"
- "add x23, x23, x20, LSL #2\n"
- "add x22, x22, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x25, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x23, [x20, #0x0]\n"
+ "ldr x22, [x20, #0x8]\n"
+ "ldr x21, [x20, #0x10]\n"
+ "cbnz x25, 37f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x23, x23, x19, LSL #2\n"
+ "add x22, x22, x19, LSL #2\n"
+ "add x21, x21, x19, LSL #2\n"
"b 37f\n"
"36:" // Height 3: setup direct input
- "mov x24, %x[input_ptr]\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
+ "mov x23, %x[input_ptr]\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
"37:" // Height 3: input setup done
- "cmp x25, #0x4\n"
+ "cmp x24, #0x4\n"
"ble 39f\n"
"38:" // Height 3: Multiply loop: Main loop head
- "whilelt p0.s, XZR, x25\n"
- "ld1rqw { z0.s }, p0/Z, [x24]\n"
- "ld1rqw { z1.s }, p0/Z, [x23]\n"
+ "whilelt p0.s, XZR, x24\n"
+ "ld1rqw { z0.s }, p0/Z, [x23]\n"
+ "ld1rqw { z1.s }, p0/Z, [x22]\n"
".inst 0x658abc00 // bfcvt z0.h, p7/M, z0.s\n"
- "ld1rqw { z2.s }, p0/Z, [x22]\n"
+ "ld1rqw { z2.s }, p0/Z, [x21]\n"
".inst 0x658abc21 // bfcvt z1.h, p7/M, z1.s\n"
"uzp1 z0.h, z0.h, z0.h\n"
- "ld1h { z4.h }, p7/Z, [x12]\n"
+ "ld1h { z4.h }, p7/Z, [x11]\n"
"uzp1 z1.h, z1.h, z1.h\n"
".inst 0x658abc42 // bfcvt z2.h, p7/M, z2.s\n"
- "ld1h { z5.h }, p7/Z, [x12, #1, MUL VL]\n"
- "ld1h { z6.h }, p7/Z, [x11]\n"
+ "ld1h { z5.h }, p7/Z, [x11, #1, MUL VL]\n"
+ "ld1h { z6.h }, p7/Z, [x10]\n"
"trn1 z0.d, z0.d, z1.d\n"
"uzp1 z2.h, z2.h, z2.h\n"
- "ld1h { z7.h }, p7/Z, [x11, #1, MUL VL]\n"
+ "ld1h { z7.h }, p7/Z, [x10, #1, MUL VL]\n"
".inst 0x6464e408 // bfmmla z8.s, z0.h, z4.h\n"
".inst 0x6464e454 // bfmmla z20.s, z2.h, z4.h\n"
".inst 0x6465e40e // bfmmla z14.s, z0.h, z5.h\n"
- "ld1h { z4.h }, p7/Z, [x10]\n"
- "sub x25, x25, #0x4\n"
+ "ld1h { z4.h }, p7/Z, [x9]\n"
+ "sub x24, x24, #0x4\n"
".inst 0x6465e45a // bfmmla z26.s, z2.h, z5.h\n"
".inst 0x6466e409 // bfmmla z9.s, z0.h, z6.h\n"
- "ld1h { z5.h }, p7/Z, [x10, #1, MUL VL]\n"
- "cmp x25, #0x4\n"
+ "ld1h { z5.h }, p7/Z, [x9, #1, MUL VL]\n"
+ "cmp x24, #0x4\n"
".inst 0x6466e455 // bfmmla z21.s, z2.h, z6.h\n"
".inst 0x6467e40f // bfmmla z15.s, z0.h, z7.h\n"
- "ld1h { z6.h }, p7/Z, [x9]\n"
- "add x24, x24, #0x10\n"
+ "ld1h { z6.h }, p7/Z, [x28]\n"
+ "add x23, x23, #0x10\n"
".inst 0x6467e45b // bfmmla z27.s, z2.h, z7.h\n"
- "ld1h { z7.h }, p7/Z, [x9, #1, MUL VL]\n"
+ "ld1h { z7.h }, p7/Z, [x28, #1, MUL VL]\n"
".inst 0x6464e40a // bfmmla z10.s, z0.h, z4.h\n"
- "add x23, x23, #0x10\n"
+ "add x22, x22, #0x10\n"
".inst 0x6464e456 // bfmmla z22.s, z2.h, z4.h\n"
".inst 0x6465e410 // bfmmla z16.s, z0.h, z5.h\n"
- "ld1h { z4.h }, p7/Z, [x28]\n"
- "add x22, x22, #0x10\n"
+ "ld1h { z4.h }, p7/Z, [x27]\n"
+ "add x21, x21, #0x10\n"
".inst 0x6465e45c // bfmmla z28.s, z2.h, z5.h\n"
".inst 0x6466e40b // bfmmla z11.s, z0.h, z6.h\n"
- "ld1h { z5.h }, p7/Z, [x28, #1, MUL VL]\n"
- "addvl x12, x12, #2\n"
+ "ld1h { z5.h }, p7/Z, [x27, #1, MUL VL]\n"
+ "addvl x11, x11, #2\n"
".inst 0x6466e457 // bfmmla z23.s, z2.h, z6.h\n"
".inst 0x6467e411 // bfmmla z17.s, z0.h, z7.h\n"
- "ld1h { z6.h }, p7/Z, [x27]\n"
- "addvl x11, x11, #2\n"
+ "ld1h { z6.h }, p7/Z, [x26]\n"
+ "addvl x10, x10, #2\n"
".inst 0x6467e45d // bfmmla z29.s, z2.h, z7.h\n"
- "ld1h { z7.h }, p7/Z, [x27, #1, MUL VL]\n"
+ "ld1h { z7.h }, p7/Z, [x26, #1, MUL VL]\n"
".inst 0x6464e40c // bfmmla z12.s, z0.h, z4.h\n"
- "addvl x10, x10, #2\n"
+ "addvl x9, x9, #2\n"
".inst 0x6464e458 // bfmmla z24.s, z2.h, z4.h\n"
".inst 0x6465e412 // bfmmla z18.s, z0.h, z5.h\n"
- "addvl x9, x9, #2\n"
"addvl x28, x28, #2\n"
+ "addvl x27, x27, #2\n"
".inst 0x6465e45e // bfmmla z30.s, z2.h, z5.h\n"
".inst 0x6466e40d // bfmmla z13.s, z0.h, z6.h\n"
- "addvl x27, x27, #2\n"
+ "addvl x26, x26, #2\n"
".inst 0x6466e459 // bfmmla z25.s, z2.h, z6.h\n"
".inst 0x6467e413 // bfmmla z19.s, z0.h, z7.h\n"
".inst 0x6467e45f // bfmmla z31.s, z2.h, z7.h\n"
"bgt 38b\n"
"39:" // Height 3: Multiply loop: Single iteration only
- "whilelt p0.s, XZR, x25\n"
- "ld1rqw { z0.s }, p0/Z, [x24]\n"
- "ld1rqw { z1.s }, p0/Z, [x23]\n"
+ "whilelt p0.s, XZR, x24\n"
+ "ld1rqw { z0.s }, p0/Z, [x23]\n"
+ "ld1rqw { z1.s }, p0/Z, [x22]\n"
".inst 0x658abc00 // bfcvt z0.h, p7/M, z0.s\n"
- "ld1rqw { z2.s }, p0/Z, [x22]\n"
+ "ld1rqw { z2.s }, p0/Z, [x21]\n"
".inst 0x658abc21 // bfcvt z1.h, p7/M, z1.s\n"
"uzp1 z0.h, z0.h, z0.h\n"
- "ld1h { z4.h }, p7/Z, [x12]\n"
+ "ld1h { z4.h }, p7/Z, [x11]\n"
"uzp1 z1.h, z1.h, z1.h\n"
".inst 0x658abc42 // bfcvt z2.h, p7/M, z2.s\n"
- "ld1h { z5.h }, p7/Z, [x12, #1, MUL VL]\n"
- "ld1h { z6.h }, p7/Z, [x11]\n"
+ "ld1h { z5.h }, p7/Z, [x11, #1, MUL VL]\n"
+ "ld1h { z6.h }, p7/Z, [x10]\n"
"trn1 z0.d, z0.d, z1.d\n"
"uzp1 z2.h, z2.h, z2.h\n"
- "ld1h { z7.h }, p7/Z, [x11, #1, MUL VL]\n"
+ "ld1h { z7.h }, p7/Z, [x10, #1, MUL VL]\n"
".inst 0x6464e408 // bfmmla z8.s, z0.h, z4.h\n"
".inst 0x6464e454 // bfmmla z20.s, z2.h, z4.h\n"
".inst 0x6465e40e // bfmmla z14.s, z0.h, z5.h\n"
- "ld1h { z4.h }, p7/Z, [x10]\n"
- "addvl x12, x12, #2\n"
+ "ld1h { z4.h }, p7/Z, [x9]\n"
+ "addvl x11, x11, #2\n"
".inst 0x6465e45a // bfmmla z26.s, z2.h, z5.h\n"
".inst 0x6466e409 // bfmmla z9.s, z0.h, z6.h\n"
- "ld1h { z5.h }, p7/Z, [x10, #1, MUL VL]\n"
- "addvl x11, x11, #2\n"
+ "ld1h { z5.h }, p7/Z, [x9, #1, MUL VL]\n"
+ "addvl x10, x10, #2\n"
".inst 0x6466e455 // bfmmla z21.s, z2.h, z6.h\n"
".inst 0x6467e40f // bfmmla z15.s, z0.h, z7.h\n"
- "ld1h { z6.h }, p7/Z, [x9]\n"
- "addvl x10, x10, #2\n"
+ "ld1h { z6.h }, p7/Z, [x28]\n"
+ "addvl x9, x9, #2\n"
".inst 0x6467e45b // bfmmla z27.s, z2.h, z7.h\n"
- "ld1h { z7.h }, p7/Z, [x9, #1, MUL VL]\n"
+ "ld1h { z7.h }, p7/Z, [x28, #1, MUL VL]\n"
".inst 0x6464e40a // bfmmla z10.s, z0.h, z4.h\n"
- "addvl x9, x9, #2\n"
+ "addvl x28, x28, #2\n"
".inst 0x6464e456 // bfmmla z22.s, z2.h, z4.h\n"
".inst 0x6465e410 // bfmmla z16.s, z0.h, z5.h\n"
- "ld1h { z4.h }, p7/Z, [x28]\n"
+ "ld1h { z4.h }, p7/Z, [x27]\n"
".inst 0x6465e45c // bfmmla z28.s, z2.h, z5.h\n"
".inst 0x6466e40b // bfmmla z11.s, z0.h, z6.h\n"
- "ld1h { z5.h }, p7/Z, [x28, #1, MUL VL]\n"
- "addvl x28, x28, #2\n"
+ "ld1h { z5.h }, p7/Z, [x27, #1, MUL VL]\n"
+ "addvl x27, x27, #2\n"
".inst 0x6466e457 // bfmmla z23.s, z2.h, z6.h\n"
".inst 0x6467e411 // bfmmla z17.s, z0.h, z7.h\n"
- "ld1h { z6.h }, p7/Z, [x27]\n"
+ "ld1h { z6.h }, p7/Z, [x26]\n"
".inst 0x6467e45d // bfmmla z29.s, z2.h, z7.h\n"
- "ld1h { z7.h }, p7/Z, [x27, #1, MUL VL]\n"
+ "ld1h { z7.h }, p7/Z, [x26, #1, MUL VL]\n"
".inst 0x6464e40c // bfmmla z12.s, z0.h, z4.h\n"
- "addvl x27, x27, #2\n"
+ "addvl x26, x26, #2\n"
".inst 0x6464e458 // bfmmla z24.s, z2.h, z4.h\n"
".inst 0x6465e412 // bfmmla z18.s, z0.h, z5.h\n"
".inst 0x6465e45e // bfmmla z30.s, z2.h, z5.h\n"
@@ -919,17 +919,17 @@ void sve_ffhybrid_fp32bf16fp32_mmla_4x6VL (
".inst 0x6467e413 // bfmmla z19.s, z0.h, z7.h\n"
".inst 0x6467e45f // bfmmla z31.s, z2.h, z7.h\n"
"40:" // Height 3: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x26, x26, #0x1\n"
- "cmp x26, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x25, x25, #0x1\n"
+ "cmp x25, x19\n"
"bne 35b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x13, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x22, x12, x19, LSL #2\n"
"uzp1 z4.d, z8.d, z14.d\n"
"uzp2 z8.d, z8.d, z14.d\n"
"uzp1 z14.d, z9.d, z15.d\n"
"uzp2 z9.d, z9.d, z15.d\n"
- "add x22, x23, x20, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
"uzp1 z15.d, z10.d, z16.d\n"
"uzp2 z10.d, z10.d, z16.d\n"
"uzp1 z16.d, z11.d, z17.d\n"
@@ -945,10 +945,10 @@ void sve_ffhybrid_fp32bf16fp32_mmla_4x6VL (
"uzp1 z24.d, z24.d, z30.d\n"
"uzp1 z25.d, z25.d, z31.d\n"
"tbz %x[flags], #1, 41f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z1.s }, p7/Z, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rw { z0.s }, p7/Z, [x20]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rw { z1.s }, p7/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z0.s }, p7/Z, [x19]\n"
"fmin z4.s, p7/M, z4.s, z1.s\n"
"fmin z14.s, p7/M, z14.s, z1.s\n"
"fmin z15.s, p7/M, z15.s, z1.s\n"
@@ -986,98 +986,98 @@ void sve_ffhybrid_fp32bf16fp32_mmla_4x6VL (
"fmax z24.s, p7/M, z24.s, z0.s\n"
"fmax z25.s, p7/M, z25.s, z0.s\n"
"41:" // Height 3: No activation
- "st1w { z4.s }, p6, [x13]\n"
- "st1w { z14.s }, p5, [x13, #1, MUL VL]\n"
- "st1w { z15.s }, p4, [x13, #2, MUL VL]\n"
- "st1w { z16.s }, p3, [x13, #3, MUL VL]\n"
- "st1w { z17.s }, p2, [x13, #4, MUL VL]\n"
- "st1w { z18.s }, p1, [x13, #5, MUL VL]\n"
- "addvl x13, x13, #6\n"
- "st1w { z8.s }, p6, [x23]\n"
- "st1w { z9.s }, p5, [x23, #1, MUL VL]\n"
- "st1w { z10.s }, p4, [x23, #2, MUL VL]\n"
- "st1w { z11.s }, p3, [x23, #3, MUL VL]\n"
- "st1w { z12.s }, p2, [x23, #4, MUL VL]\n"
- "st1w { z13.s }, p1, [x23, #5, MUL VL]\n"
- "st1w { z20.s }, p6, [x22]\n"
- "st1w { z21.s }, p5, [x22, #1, MUL VL]\n"
- "st1w { z22.s }, p4, [x22, #2, MUL VL]\n"
- "st1w { z23.s }, p3, [x22, #3, MUL VL]\n"
- "st1w { z24.s }, p2, [x22, #4, MUL VL]\n"
- "st1w { z25.s }, p1, [x22, #5, MUL VL]\n"
+ "st1w { z4.s }, p6, [x12]\n"
+ "st1w { z14.s }, p5, [x12, #1, MUL VL]\n"
+ "st1w { z15.s }, p4, [x12, #2, MUL VL]\n"
+ "st1w { z16.s }, p3, [x12, #3, MUL VL]\n"
+ "st1w { z17.s }, p2, [x12, #4, MUL VL]\n"
+ "st1w { z18.s }, p1, [x12, #5, MUL VL]\n"
+ "addvl x12, x12, #6\n"
+ "st1w { z8.s }, p6, [x22]\n"
+ "st1w { z9.s }, p5, [x22, #1, MUL VL]\n"
+ "st1w { z10.s }, p4, [x22, #2, MUL VL]\n"
+ "st1w { z11.s }, p3, [x22, #3, MUL VL]\n"
+ "st1w { z12.s }, p2, [x22, #4, MUL VL]\n"
+ "st1w { z13.s }, p1, [x22, #5, MUL VL]\n"
+ "st1w { z20.s }, p6, [x21]\n"
+ "st1w { z21.s }, p5, [x21, #1, MUL VL]\n"
+ "st1w { z22.s }, p4, [x21, #2, MUL VL]\n"
+ "st1w { z23.s }, p3, [x21, #3, MUL VL]\n"
+ "st1w { z24.s }, p2, [x21, #4, MUL VL]\n"
+ "st1w { z25.s }, p1, [x21, #5, MUL VL]\n"
"42:" // Height 3: Writeback done
- "decw x14, ALL, MUL #6\n"
- "cmp x14, XZR\n"
+ "decw x13, ALL, MUL #6\n"
+ "cmp x13, XZR\n"
"bgt 30b\n"
"b 58f\n"
"43:" // Height 4
- "ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x21, #0x10\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "mov x15, %x[bias]\n"
- "mov x13, %x[output_ptr]\n"
- "madd %x[output_ptr], x20, x21, %x[output_ptr]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x20, #0x10\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "mov x14, %x[bias]\n"
+ "mov x12, %x[output_ptr]\n"
+ "madd %x[output_ptr], x19, x20, %x[output_ptr]\n"
"44:" // Height 4: Column loop
- "ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
- "add x11, x12, x20, LSL #1\n"
- "add x10, x11, x20, LSL #1\n"
- "add x9, x10, x20, LSL #1\n"
- "cntw x21, ALL, MUL #5\n"
- "add x28, x9, x20, LSL #1\n"
- "add x27, x28, x20, LSL #1\n"
- "add x20, x27, x20, LSL #1\n"
- "cmp x14, x21\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #1\n"
+ "add x9, x10, x19, LSL #1\n"
+ "add x28, x9, x19, LSL #1\n"
+ "cntw x20, ALL, MUL #5\n"
+ "add x27, x28, x19, LSL #1\n"
+ "add x26, x27, x19, LSL #1\n"
+ "add x19, x26, x19, LSL #1\n"
+ "cmp x13, x20\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"bgt 45f\n"
- "decw x21\n"
- "cmp x14, x21\n"
- "mov x27, x12\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x26, x11\n"
"bgt 45f\n"
- "decw x21\n"
- "cmp x14, x21\n"
- "mov x28, x12\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x27, x11\n"
"bgt 45f\n"
- "decw x21\n"
- "cmp x14, x21\n"
- "mov x9, x12\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x28, x11\n"
"bgt 45f\n"
- "decw x21\n"
- "cmp x14, x21\n"
- "mov x10, x12\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x9, x11\n"
"bgt 45f\n"
- "mov x11, x12\n"
+ "mov x10, x11\n"
"45:" // Height 4: B setup done
- "mov x20, #0x0\n"
- "whilelt p6.s, x20, x14\n"
- "incw x20\n"
- "whilelt p5.s, x20, x14\n"
- "incw x20\n"
- "whilelt p4.s, x20, x14\n"
- "incw x20\n"
- "whilelt p3.s, x20, x14\n"
- "incw x20\n"
- "whilelt p2.s, x20, x14\n"
- "incw x20\n"
- "whilelt p1.s, x20, x14\n"
- "cbz x15, 46f\n"
- "ld1w { z8.s }, p7/Z, [x15]\n"
- "ld1w { z9.s }, p7/Z, [x15, #1, MUL VL]\n"
+ "mov x19, #0x0\n"
+ "whilelt p6.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p5.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p4.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p3.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x13\n"
+ "cbz x14, 46f\n"
+ "ld1w { z8.s }, p7/Z, [x14]\n"
+ "ld1w { z9.s }, p7/Z, [x14, #1, MUL VL]\n"
"zip2 z14.d, z8.d, z8.d\n"
"zip1 z8.d, z8.d, z8.d\n"
- "ld1w { z10.s }, p7/Z, [x15, #2, MUL VL]\n"
- "ld1w { z11.s }, p7/Z, [x15, #3, MUL VL]\n"
+ "ld1w { z10.s }, p7/Z, [x14, #2, MUL VL]\n"
+ "ld1w { z11.s }, p7/Z, [x14, #3, MUL VL]\n"
"zip2 z15.d, z9.d, z9.d\n"
"zip1 z9.d, z9.d, z9.d\n"
- "ld1w { z12.s }, p7/Z, [x15, #4, MUL VL]\n"
- "ld1w { z13.s }, p7/Z, [x15, #5, MUL VL]\n"
+ "ld1w { z12.s }, p7/Z, [x14, #4, MUL VL]\n"
+ "ld1w { z13.s }, p7/Z, [x14, #5, MUL VL]\n"
"zip2 z16.d, z10.d, z10.d\n"
"zip1 z10.d, z10.d, z10.d\n"
"zip2 z17.d, z11.d, z11.d\n"
"zip1 z11.d, z11.d, z11.d\n"
- "addvl x15, x15, #6\n"
+ "addvl x14, x14, #6\n"
"zip2 z18.d, z12.d, z12.d\n"
"zip1 z12.d, z12.d, z12.d\n"
"zip2 z19.d, z13.d, z13.d\n"
@@ -1097,52 +1097,52 @@ void sve_ffhybrid_fp32bf16fp32_mmla_4x6VL (
"b 48f\n"
"46:" // Height 4: no bias
"tbz %x[flags], #0, 47f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x13, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
- "ld1w { z9.s }, p6/Z, [x13]\n"
- "add x21, x22, x20, LSL #2\n"
- "ld1w { z10.s }, p5/Z, [x13, #1, MUL VL]\n"
- "ld1w { z11.s }, p4/Z, [x13, #2, MUL VL]\n"
- "ld1w { z12.s }, p3/Z, [x13, #3, MUL VL]\n"
- "ld1w { z13.s }, p2/Z, [x13, #4, MUL VL]\n"
- "ld1w { z20.s }, p1/Z, [x13, #5, MUL VL]\n"
- "ld1w { z14.s }, p6/Z, [x23]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x22, x12, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
+ "ld1w { z9.s }, p6/Z, [x12]\n"
+ "add x20, x21, x19, LSL #2\n"
+ "ld1w { z10.s }, p5/Z, [x12, #1, MUL VL]\n"
+ "ld1w { z11.s }, p4/Z, [x12, #2, MUL VL]\n"
+ "ld1w { z12.s }, p3/Z, [x12, #3, MUL VL]\n"
+ "ld1w { z13.s }, p2/Z, [x12, #4, MUL VL]\n"
+ "ld1w { z20.s }, p1/Z, [x12, #5, MUL VL]\n"
+ "ld1w { z14.s }, p6/Z, [x22]\n"
"zip1 z8.d, z9.d, z14.d\n"
"zip2 z14.d, z9.d, z14.d\n"
- "ld1w { z15.s }, p5/Z, [x23, #1, MUL VL]\n"
- "ld1w { z16.s }, p4/Z, [x23, #2, MUL VL]\n"
+ "ld1w { z15.s }, p5/Z, [x22, #1, MUL VL]\n"
+ "ld1w { z16.s }, p4/Z, [x22, #2, MUL VL]\n"
"zip1 z9.d, z10.d, z15.d\n"
"zip2 z15.d, z10.d, z15.d\n"
- "ld1w { z17.s }, p3/Z, [x23, #3, MUL VL]\n"
- "ld1w { z18.s }, p2/Z, [x23, #4, MUL VL]\n"
+ "ld1w { z17.s }, p3/Z, [x22, #3, MUL VL]\n"
+ "ld1w { z18.s }, p2/Z, [x22, #4, MUL VL]\n"
"zip1 z10.d, z11.d, z16.d\n"
"zip2 z16.d, z11.d, z16.d\n"
- "ld1w { z19.s }, p1/Z, [x23, #5, MUL VL]\n"
- "ld1w { z21.s }, p6/Z, [x22]\n"
+ "ld1w { z19.s }, p1/Z, [x22, #5, MUL VL]\n"
+ "ld1w { z21.s }, p6/Z, [x21]\n"
"zip1 z11.d, z12.d, z17.d\n"
"zip2 z17.d, z12.d, z17.d\n"
- "ld1w { z22.s }, p5/Z, [x22, #1, MUL VL]\n"
- "ld1w { z23.s }, p4/Z, [x22, #2, MUL VL]\n"
+ "ld1w { z22.s }, p5/Z, [x21, #1, MUL VL]\n"
+ "ld1w { z23.s }, p4/Z, [x21, #2, MUL VL]\n"
"zip1 z12.d, z13.d, z18.d\n"
"zip2 z18.d, z13.d, z18.d\n"
- "ld1w { z24.s }, p3/Z, [x22, #3, MUL VL]\n"
- "ld1w { z25.s }, p2/Z, [x22, #4, MUL VL]\n"
+ "ld1w { z24.s }, p3/Z, [x21, #3, MUL VL]\n"
+ "ld1w { z25.s }, p2/Z, [x21, #4, MUL VL]\n"
"zip1 z13.d, z20.d, z19.d\n"
"zip2 z19.d, z20.d, z19.d\n"
- "ld1w { z4.s }, p1/Z, [x22, #5, MUL VL]\n"
- "ld1w { z26.s }, p6/Z, [x21]\n"
+ "ld1w { z4.s }, p1/Z, [x21, #5, MUL VL]\n"
+ "ld1w { z26.s }, p6/Z, [x20]\n"
"zip1 z20.d, z21.d, z26.d\n"
"zip2 z26.d, z21.d, z26.d\n"
- "ld1w { z27.s }, p5/Z, [x21, #1, MUL VL]\n"
- "ld1w { z28.s }, p4/Z, [x21, #2, MUL VL]\n"
+ "ld1w { z27.s }, p5/Z, [x20, #1, MUL VL]\n"
+ "ld1w { z28.s }, p4/Z, [x20, #2, MUL VL]\n"
"zip1 z21.d, z22.d, z27.d\n"
"zip2 z27.d, z22.d, z27.d\n"
- "ld1w { z29.s }, p3/Z, [x21, #3, MUL VL]\n"
- "ld1w { z30.s }, p2/Z, [x21, #4, MUL VL]\n"
+ "ld1w { z29.s }, p3/Z, [x20, #3, MUL VL]\n"
+ "ld1w { z30.s }, p2/Z, [x20, #4, MUL VL]\n"
"zip1 z22.d, z23.d, z28.d\n"
"zip2 z28.d, z23.d, z28.d\n"
- "ld1w { z31.s }, p1/Z, [x21, #5, MUL VL]\n"
+ "ld1w { z31.s }, p1/Z, [x20, #5, MUL VL]\n"
"zip1 z23.d, z24.d, z29.d\n"
"zip2 z29.d, z24.d, z29.d\n"
"zip1 z24.d, z25.d, z30.d\n"
@@ -1176,149 +1176,149 @@ void sve_ffhybrid_fp32bf16fp32_mmla_4x6VL (
"mov z30.b, #0x0\n"
"mov z31.b, #0x0\n"
"48:" // Height 4: setup done
- "mov x26, #0x0\n"
+ "mov x25, #0x0\n"
"49:" // Height 4: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w24, [x19, x25, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 50f\n"
- "ldr x21, [%x[input_ptr], x26, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x24, [x21, #0x0]\n"
- "ldr x23, [x21, #0x8]\n"
- "ldr x22, [x21, #0x10]\n"
- "ldr x21, [x21, #0x18]\n"
- "cbnz x26, 51f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x24, x24, x20, LSL #2\n"
- "add x23, x23, x20, LSL #2\n"
- "add x22, x22, x20, LSL #2\n"
- "add x21, x21, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x25, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x23, [x20, #0x0]\n"
+ "ldr x22, [x20, #0x8]\n"
+ "ldr x21, [x20, #0x10]\n"
+ "ldr x20, [x20, #0x18]\n"
+ "cbnz x25, 51f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x23, x23, x19, LSL #2\n"
+ "add x22, x22, x19, LSL #2\n"
+ "add x21, x21, x19, LSL #2\n"
+ "add x20, x20, x19, LSL #2\n"
"b 51f\n"
"50:" // Height 4: setup direct input
- "mov x24, %x[input_ptr]\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
+ "mov x23, %x[input_ptr]\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
+ "add x20, x21, x19, LSL #2\n"
"51:" // Height 4: input setup done
- "cmp x25, #0x4\n"
+ "cmp x24, #0x4\n"
"ble 53f\n"
"52:" // Height 4: Multiply loop: Main loop head
- "whilelt p0.s, XZR, x25\n"
- "ld1rqw { z0.s }, p0/Z, [x24]\n"
- "ld1rqw { z1.s }, p0/Z, [x23]\n"
+ "whilelt p0.s, XZR, x24\n"
+ "ld1rqw { z0.s }, p0/Z, [x23]\n"
+ "ld1rqw { z1.s }, p0/Z, [x22]\n"
".inst 0x658abc00 // bfcvt z0.h, p7/M, z0.s\n"
- "ld1rqw { z2.s }, p0/Z, [x22]\n"
- "ld1rqw { z3.s }, p0/Z, [x21]\n"
+ "ld1rqw { z2.s }, p0/Z, [x21]\n"
+ "ld1rqw { z3.s }, p0/Z, [x20]\n"
".inst 0x658abc21 // bfcvt z1.h, p7/M, z1.s\n"
".inst 0x658abc42 // bfcvt z2.h, p7/M, z2.s\n"
".inst 0x658abc63 // bfcvt z3.h, p7/M, z3.s\n"
"uzp1 z0.h, z0.h, z0.h\n"
- "ld1h { z4.h }, p7/Z, [x12]\n"
- "ld1h { z5.h }, p7/Z, [x12, #1, MUL VL]\n"
+ "ld1h { z4.h }, p7/Z, [x11]\n"
+ "ld1h { z5.h }, p7/Z, [x11, #1, MUL VL]\n"
"uzp1 z1.h, z1.h, z1.h\n"
"uzp1 z2.h, z2.h, z2.h\n"
- "ld1h { z6.h }, p7/Z, [x11]\n"
- "ld1h { z7.h }, p7/Z, [x11, #1, MUL VL]\n"
+ "ld1h { z6.h }, p7/Z, [x10]\n"
+ "ld1h { z7.h }, p7/Z, [x10, #1, MUL VL]\n"
"uzp1 z3.h, z3.h, z3.h\n"
"trn1 z0.d, z0.d, z1.d\n"
".inst 0x6464e408 // bfmmla z8.s, z0.h, z4.h\n"
- "sub x25, x25, #0x4\n"
+ "sub x24, x24, #0x4\n"
"trn1 z2.d, z2.d, z3.d\n"
".inst 0x6464e454 // bfmmla z20.s, z2.h, z4.h\n"
".inst 0x6465e40e // bfmmla z14.s, z0.h, z5.h\n"
- "ld1h { z4.h }, p7/Z, [x10]\n"
+ "ld1h { z4.h }, p7/Z, [x9]\n"
".inst 0x6465e45a // bfmmla z26.s, z2.h, z5.h\n"
".inst 0x6466e409 // bfmmla z9.s, z0.h, z6.h\n"
- "ld1h { z5.h }, p7/Z, [x10, #1, MUL VL]\n"
- "cmp x25, #0x4\n"
+ "ld1h { z5.h }, p7/Z, [x9, #1, MUL VL]\n"
+ "cmp x24, #0x4\n"
".inst 0x6466e455 // bfmmla z21.s, z2.h, z6.h\n"
".inst 0x6467e40f // bfmmla z15.s, z0.h, z7.h\n"
- "ld1h { z6.h }, p7/Z, [x9]\n"
- "add x24, x24, #0x10\n"
+ "ld1h { z6.h }, p7/Z, [x28]\n"
+ "add x23, x23, #0x10\n"
".inst 0x6467e45b // bfmmla z27.s, z2.h, z7.h\n"
- "ld1h { z7.h }, p7/Z, [x9, #1, MUL VL]\n"
+ "ld1h { z7.h }, p7/Z, [x28, #1, MUL VL]\n"
".inst 0x6464e40a // bfmmla z10.s, z0.h, z4.h\n"
- "add x23, x23, #0x10\n"
+ "add x22, x22, #0x10\n"
".inst 0x6464e456 // bfmmla z22.s, z2.h, z4.h\n"
".inst 0x6465e410 // bfmmla z16.s, z0.h, z5.h\n"
- "ld1h { z4.h }, p7/Z, [x28]\n"
- "add x22, x22, #0x10\n"
+ "ld1h { z4.h }, p7/Z, [x27]\n"
+ "add x21, x21, #0x10\n"
".inst 0x6465e45c // bfmmla z28.s, z2.h, z5.h\n"
".inst 0x6466e40b // bfmmla z11.s, z0.h, z6.h\n"
- "ld1h { z5.h }, p7/Z, [x28, #1, MUL VL]\n"
- "add x21, x21, #0x10\n"
+ "ld1h { z5.h }, p7/Z, [x27, #1, MUL VL]\n"
+ "add x20, x20, #0x10\n"
".inst 0x6466e457 // bfmmla z23.s, z2.h, z6.h\n"
".inst 0x6467e411 // bfmmla z17.s, z0.h, z7.h\n"
- "ld1h { z6.h }, p7/Z, [x27]\n"
- "addvl x12, x12, #2\n"
+ "ld1h { z6.h }, p7/Z, [x26]\n"
+ "addvl x11, x11, #2\n"
".inst 0x6467e45d // bfmmla z29.s, z2.h, z7.h\n"
- "ld1h { z7.h }, p7/Z, [x27, #1, MUL VL]\n"
+ "ld1h { z7.h }, p7/Z, [x26, #1, MUL VL]\n"
".inst 0x6464e40c // bfmmla z12.s, z0.h, z4.h\n"
- "addvl x11, x11, #2\n"
+ "addvl x10, x10, #2\n"
".inst 0x6464e458 // bfmmla z24.s, z2.h, z4.h\n"
".inst 0x6465e412 // bfmmla z18.s, z0.h, z5.h\n"
- "addvl x10, x10, #2\n"
"addvl x9, x9, #2\n"
+ "addvl x28, x28, #2\n"
".inst 0x6465e45e // bfmmla z30.s, z2.h, z5.h\n"
".inst 0x6466e40d // bfmmla z13.s, z0.h, z6.h\n"
- "addvl x28, x28, #2\n"
"addvl x27, x27, #2\n"
+ "addvl x26, x26, #2\n"
".inst 0x6466e459 // bfmmla z25.s, z2.h, z6.h\n"
".inst 0x6467e413 // bfmmla z19.s, z0.h, z7.h\n"
".inst 0x6467e45f // bfmmla z31.s, z2.h, z7.h\n"
"bgt 52b\n"
"53:" // Height 4: Multiply loop: Single iteration only
- "whilelt p0.s, XZR, x25\n"
- "ld1rqw { z0.s }, p0/Z, [x24]\n"
- "ld1rqw { z1.s }, p0/Z, [x23]\n"
+ "whilelt p0.s, XZR, x24\n"
+ "ld1rqw { z0.s }, p0/Z, [x23]\n"
+ "ld1rqw { z1.s }, p0/Z, [x22]\n"
".inst 0x658abc00 // bfcvt z0.h, p7/M, z0.s\n"
- "ld1rqw { z2.s }, p0/Z, [x22]\n"
- "ld1rqw { z3.s }, p0/Z, [x21]\n"
+ "ld1rqw { z2.s }, p0/Z, [x21]\n"
+ "ld1rqw { z3.s }, p0/Z, [x20]\n"
".inst 0x658abc21 // bfcvt z1.h, p7/M, z1.s\n"
".inst 0x658abc42 // bfcvt z2.h, p7/M, z2.s\n"
".inst 0x658abc63 // bfcvt z3.h, p7/M, z3.s\n"
"uzp1 z0.h, z0.h, z0.h\n"
- "ld1h { z4.h }, p7/Z, [x12]\n"
- "ld1h { z5.h }, p7/Z, [x12, #1, MUL VL]\n"
+ "ld1h { z4.h }, p7/Z, [x11]\n"
+ "ld1h { z5.h }, p7/Z, [x11, #1, MUL VL]\n"
"uzp1 z1.h, z1.h, z1.h\n"
"uzp1 z2.h, z2.h, z2.h\n"
- "ld1h { z6.h }, p7/Z, [x11]\n"
- "ld1h { z7.h }, p7/Z, [x11, #1, MUL VL]\n"
+ "ld1h { z6.h }, p7/Z, [x10]\n"
+ "ld1h { z7.h }, p7/Z, [x10, #1, MUL VL]\n"
"uzp1 z3.h, z3.h, z3.h\n"
"trn1 z0.d, z0.d, z1.d\n"
".inst 0x6464e408 // bfmmla z8.s, z0.h, z4.h\n"
- "addvl x12, x12, #2\n"
+ "addvl x11, x11, #2\n"
"trn1 z2.d, z2.d, z3.d\n"
".inst 0x6464e454 // bfmmla z20.s, z2.h, z4.h\n"
".inst 0x6465e40e // bfmmla z14.s, z0.h, z5.h\n"
- "ld1h { z4.h }, p7/Z, [x10]\n"
+ "ld1h { z4.h }, p7/Z, [x9]\n"
".inst 0x6465e45a // bfmmla z26.s, z2.h, z5.h\n"
".inst 0x6466e409 // bfmmla z9.s, z0.h, z6.h\n"
- "ld1h { z5.h }, p7/Z, [x10, #1, MUL VL]\n"
- "addvl x11, x11, #2\n"
+ "ld1h { z5.h }, p7/Z, [x9, #1, MUL VL]\n"
+ "addvl x10, x10, #2\n"
".inst 0x6466e455 // bfmmla z21.s, z2.h, z6.h\n"
".inst 0x6467e40f // bfmmla z15.s, z0.h, z7.h\n"
- "ld1h { z6.h }, p7/Z, [x9]\n"
- "addvl x10, x10, #2\n"
+ "ld1h { z6.h }, p7/Z, [x28]\n"
+ "addvl x9, x9, #2\n"
".inst 0x6467e45b // bfmmla z27.s, z2.h, z7.h\n"
- "ld1h { z7.h }, p7/Z, [x9, #1, MUL VL]\n"
+ "ld1h { z7.h }, p7/Z, [x28, #1, MUL VL]\n"
".inst 0x6464e40a // bfmmla z10.s, z0.h, z4.h\n"
- "addvl x9, x9, #2\n"
+ "addvl x28, x28, #2\n"
".inst 0x6464e456 // bfmmla z22.s, z2.h, z4.h\n"
".inst 0x6465e410 // bfmmla z16.s, z0.h, z5.h\n"
- "ld1h { z4.h }, p7/Z, [x28]\n"
+ "ld1h { z4.h }, p7/Z, [x27]\n"
".inst 0x6465e45c // bfmmla z28.s, z2.h, z5.h\n"
".inst 0x6466e40b // bfmmla z11.s, z0.h, z6.h\n"
- "ld1h { z5.h }, p7/Z, [x28, #1, MUL VL]\n"
- "addvl x28, x28, #2\n"
+ "ld1h { z5.h }, p7/Z, [x27, #1, MUL VL]\n"
+ "addvl x27, x27, #2\n"
".inst 0x6466e457 // bfmmla z23.s, z2.h, z6.h\n"
".inst 0x6467e411 // bfmmla z17.s, z0.h, z7.h\n"
- "ld1h { z6.h }, p7/Z, [x27]\n"
+ "ld1h { z6.h }, p7/Z, [x26]\n"
".inst 0x6467e45d // bfmmla z29.s, z2.h, z7.h\n"
- "ld1h { z7.h }, p7/Z, [x27, #1, MUL VL]\n"
+ "ld1h { z7.h }, p7/Z, [x26, #1, MUL VL]\n"
".inst 0x6464e40c // bfmmla z12.s, z0.h, z4.h\n"
- "addvl x27, x27, #2\n"
+ "addvl x26, x26, #2\n"
".inst 0x6464e458 // bfmmla z24.s, z2.h, z4.h\n"
".inst 0x6465e412 // bfmmla z18.s, z0.h, z5.h\n"
".inst 0x6465e45e // bfmmla z30.s, z2.h, z5.h\n"
@@ -1327,17 +1327,17 @@ void sve_ffhybrid_fp32bf16fp32_mmla_4x6VL (
".inst 0x6467e413 // bfmmla z19.s, z0.h, z7.h\n"
".inst 0x6467e45f // bfmmla z31.s, z2.h, z7.h\n"
"54:" // Height 4: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x26, x26, #0x1\n"
- "cmp x26, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x25, x25, #0x1\n"
+ "cmp x25, x19\n"
"bne 49b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x13, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x22, x12, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
"uzp1 z4.d, z8.d, z14.d\n"
"uzp2 z8.d, z8.d, z14.d\n"
"uzp1 z14.d, z9.d, z15.d\n"
- "add x21, x22, x20, LSL #2\n"
+ "add x20, x21, x19, LSL #2\n"
"uzp2 z9.d, z9.d, z15.d\n"
"uzp1 z15.d, z10.d, z16.d\n"
"uzp2 z10.d, z10.d, z16.d\n"
@@ -1360,10 +1360,10 @@ void sve_ffhybrid_fp32bf16fp32_mmla_4x6VL (
"uzp1 z30.d, z25.d, z31.d\n"
"uzp2 z25.d, z25.d, z31.d\n"
"tbz %x[flags], #1, 55f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z1.s }, p7/Z, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rw { z0.s }, p7/Z, [x20]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rw { z1.s }, p7/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z0.s }, p7/Z, [x19]\n"
"fmin z4.s, p7/M, z4.s, z1.s\n"
"fmin z14.s, p7/M, z14.s, z1.s\n"
"fmin z15.s, p7/M, z15.s, z1.s\n"
@@ -1413,50 +1413,50 @@ void sve_ffhybrid_fp32bf16fp32_mmla_4x6VL (
"fmax z24.s, p7/M, z24.s, z0.s\n"
"fmax z25.s, p7/M, z25.s, z0.s\n"
"55:" // Height 4: No activation
- "st1w { z4.s }, p6, [x13]\n"
- "st1w { z14.s }, p5, [x13, #1, MUL VL]\n"
- "st1w { z15.s }, p4, [x13, #2, MUL VL]\n"
- "st1w { z16.s }, p3, [x13, #3, MUL VL]\n"
- "st1w { z17.s }, p2, [x13, #4, MUL VL]\n"
- "st1w { z18.s }, p1, [x13, #5, MUL VL]\n"
- "addvl x13, x13, #6\n"
- "st1w { z8.s }, p6, [x23]\n"
- "st1w { z9.s }, p5, [x23, #1, MUL VL]\n"
- "st1w { z10.s }, p4, [x23, #2, MUL VL]\n"
- "st1w { z11.s }, p3, [x23, #3, MUL VL]\n"
- "st1w { z12.s }, p2, [x23, #4, MUL VL]\n"
- "st1w { z13.s }, p1, [x23, #5, MUL VL]\n"
- "st1w { z19.s }, p6, [x22]\n"
- "st1w { z26.s }, p5, [x22, #1, MUL VL]\n"
- "st1w { z27.s }, p4, [x22, #2, MUL VL]\n"
- "st1w { z28.s }, p3, [x22, #3, MUL VL]\n"
- "st1w { z29.s }, p2, [x22, #4, MUL VL]\n"
- "st1w { z30.s }, p1, [x22, #5, MUL VL]\n"
- "st1w { z20.s }, p6, [x21]\n"
- "st1w { z21.s }, p5, [x21, #1, MUL VL]\n"
- "st1w { z22.s }, p4, [x21, #2, MUL VL]\n"
- "st1w { z23.s }, p3, [x21, #3, MUL VL]\n"
- "st1w { z24.s }, p2, [x21, #4, MUL VL]\n"
- "st1w { z25.s }, p1, [x21, #5, MUL VL]\n"
+ "st1w { z4.s }, p6, [x12]\n"
+ "st1w { z14.s }, p5, [x12, #1, MUL VL]\n"
+ "st1w { z15.s }, p4, [x12, #2, MUL VL]\n"
+ "st1w { z16.s }, p3, [x12, #3, MUL VL]\n"
+ "st1w { z17.s }, p2, [x12, #4, MUL VL]\n"
+ "st1w { z18.s }, p1, [x12, #5, MUL VL]\n"
+ "addvl x12, x12, #6\n"
+ "st1w { z8.s }, p6, [x22]\n"
+ "st1w { z9.s }, p5, [x22, #1, MUL VL]\n"
+ "st1w { z10.s }, p4, [x22, #2, MUL VL]\n"
+ "st1w { z11.s }, p3, [x22, #3, MUL VL]\n"
+ "st1w { z12.s }, p2, [x22, #4, MUL VL]\n"
+ "st1w { z13.s }, p1, [x22, #5, MUL VL]\n"
+ "st1w { z19.s }, p6, [x21]\n"
+ "st1w { z26.s }, p5, [x21, #1, MUL VL]\n"
+ "st1w { z27.s }, p4, [x21, #2, MUL VL]\n"
+ "st1w { z28.s }, p3, [x21, #3, MUL VL]\n"
+ "st1w { z29.s }, p2, [x21, #4, MUL VL]\n"
+ "st1w { z30.s }, p1, [x21, #5, MUL VL]\n"
+ "st1w { z20.s }, p6, [x20]\n"
+ "st1w { z21.s }, p5, [x20, #1, MUL VL]\n"
+ "st1w { z22.s }, p4, [x20, #2, MUL VL]\n"
+ "st1w { z23.s }, p3, [x20, #3, MUL VL]\n"
+ "st1w { z24.s }, p2, [x20, #4, MUL VL]\n"
+ "st1w { z25.s }, p1, [x20, #5, MUL VL]\n"
"56:" // Height 4: Writeback done
- "decw x14, ALL, MUL #6\n"
- "cmp x14, XZR\n"
+ "decw x13, ALL, MUL #6\n"
+ "cmp x13, XZR\n"
"bgt 44b\n"
"subs %x[M], %x[M], #0x4\n"
"beq 58f\n"
- "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 57f\n"
- "add x21, x21, #0x4\n"
- "str x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "add x20, x20, #0x4\n"
+ "str x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"b 1b\n"
"57:" // Update direct input
- "mov x20, #0x10\n"
- "madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
+ "mov x19, #0x10\n"
+ "madd %x[input_ptr], x19, x20, %x[input_ptr]\n"
"b 1b\n"
"58:" // Exit
: [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
: [args_ptr] "r" (&ka), [bias] "r" (bias), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_B_stride] "I" (offsetof(KernelArgs, B_stride)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_cur_B_ptr] "I" (offsetof(KernelArgs, cur_B_ptr)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "x9", "x10", "x11", "x12", "x13", "x14", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_bf16fp32_mmla_8x3VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_bf16fp32_mmla_8x3VL/generic.cpp
index 7649336c36..1f1e08d3dd 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_bf16fp32_mmla_8x3VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_bf16fp32_mmla_8x3VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef ARM_COMPUTE_ENABLE_SVE
@@ -53,33 +53,33 @@ void sve_ffinterleaved_bf16fp32_mmla_8x3VL(
__asm__ __volatile__(
"ptrue p0.b\n"
"1:" // Height loop
- "ldr x26, [%x[args_ptr], %[offsetof_Bpanel]]\n"
- "ldr x25, [%x[args_ptr], %[offsetof_N]]\n"
- "str x26, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x24, %x[Apanel]\n"
+ "ldr x25, [%x[args_ptr], %[offsetof_Bpanel]]\n"
+ "ldr x24, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x25, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x23, %x[Apanel]\n"
"2:" // Width loop
- "ldr x26, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
- "cntw x23, ALL, MUL #2\n"
- "add x22, x26, x20, LSL #1\n"
- "add x21, x22, x20, LSL #1\n"
- "add x20, x21, x20, LSL #1\n"
- "cmp x25, x23\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov %x[Apanel], x24\n"
+ "ldr x25, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "cntw x22, ALL, MUL #2\n"
+ "add x21, x25, x19, LSL #1\n"
+ "add x20, x21, x19, LSL #1\n"
+ "add x19, x20, x19, LSL #1\n"
+ "cmp x24, x22\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov %x[Apanel], x23\n"
"bgt 3f\n"
- "decw x23\n"
- "cmp x25, x23\n"
- "mov x21, x26\n"
+ "decw x22\n"
+ "cmp x24, x22\n"
+ "mov x20, x25\n"
"bgt 3f\n"
- "mov x22, x26\n"
+ "mov x21, x25\n"
"3:" // B setup done
- "ldr x20, [%x[args_ptr], %[offsetof_K]]\n"
- "cmp x20, #0x2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_K]]\n"
+ "cmp x19, #0x2\n"
"mov z8.b, #0x0\n"
"mov z9.b, #0x0\n"
"mov z10.b, #0x0\n"
- "ld1h { z4.h }, p0/Z, [x26]\n"
+ "ld1h { z4.h }, p0/Z, [x25]\n"
"mov z11.b, #0x0\n"
"mov z12.b, #0x0\n"
"ld1rqh { z0.h }, p0/Z, [%x[Apanel]]\n"
@@ -88,13 +88,13 @@ void sve_ffinterleaved_bf16fp32_mmla_8x3VL(
"ld1rqh { z1.h }, p0/Z, [%x[Apanel], #16]\n"
"mov z15.b, #0x0\n"
"mov z16.b, #0x0\n"
- "ld1h { z5.h }, p0/Z, [x26, #1, MUL VL]\n"
+ "ld1h { z5.h }, p0/Z, [x25, #1, MUL VL]\n"
"mov z17.b, #0x0\n"
"mov z18.b, #0x0\n"
"ld1rqh { z2.h }, p0/Z, [%x[Apanel], #32]\n"
"mov z19.b, #0x0\n"
"mov z20.b, #0x0\n"
- "addvl x26, x26, #2\n"
+ "addvl x25, x25, #2\n"
"mov z21.b, #0x0\n"
"mov z22.b, #0x0\n"
"add %x[Apanel], %x[Apanel], #0x30\n"
@@ -114,25 +114,25 @@ void sve_ffinterleaved_bf16fp32_mmla_8x3VL(
".inst 0x6465e40b // bfmmla z11.s, z0.h, z5.h\n"
".inst 0x6464e42e // bfmmla z14.s, z1.h, z4.h\n"
".inst 0x6465e431 // bfmmla z17.s, z1.h, z5.h\n"
- "ld1h { z6.h }, p0/Z, [x22]\n"
+ "ld1h { z6.h }, p0/Z, [x21]\n"
".inst 0x6464e454 // bfmmla z20.s, z2.h, z4.h\n"
".inst 0x6465e457 // bfmmla z23.s, z2.h, z5.h\n"
- "ld1h { z7.h }, p0/Z, [x22, #1, MUL VL]\n"
+ "ld1h { z7.h }, p0/Z, [x21, #1, MUL VL]\n"
".inst 0x6464e47a // bfmmla z26.s, z3.h, z4.h\n"
".inst 0x6465e47d // bfmmla z29.s, z3.h, z5.h\n"
- "ld1h { z4.h }, p0/Z, [x21]\n"
- "ld1h { z5.h }, p0/Z, [x21, #1, MUL VL]\n"
+ "ld1h { z4.h }, p0/Z, [x20]\n"
+ "ld1h { z5.h }, p0/Z, [x20, #1, MUL VL]\n"
".inst 0x6466e409 // bfmmla z9.s, z0.h, z6.h\n"
".inst 0x6467e40c // bfmmla z12.s, z0.h, z7.h\n"
".inst 0x6466e42f // bfmmla z15.s, z1.h, z6.h\n"
".inst 0x6467e432 // bfmmla z18.s, z1.h, z7.h\n"
- "sub x20, x20, #0x2\n"
+ "sub x19, x19, #0x2\n"
".inst 0x6466e455 // bfmmla z21.s, z2.h, z6.h\n"
".inst 0x6467e458 // bfmmla z24.s, z2.h, z7.h\n"
- "cmp x20, #0x2\n"
+ "cmp x19, #0x2\n"
".inst 0x6466e47b // bfmmla z27.s, z3.h, z6.h\n"
".inst 0x6467e47e // bfmmla z30.s, z3.h, z7.h\n"
- "ld1h { z6.h }, p0/Z, [x26]\n"
+ "ld1h { z6.h }, p0/Z, [x25]\n"
".inst 0x6464e40a // bfmmla z10.s, z0.h, z4.h\n"
".inst 0x6465e40d // bfmmla z13.s, z0.h, z5.h\n"
"ld1rqh { z0.h }, p0/Z, [%x[Apanel], #16]\n"
@@ -141,7 +141,7 @@ void sve_ffinterleaved_bf16fp32_mmla_8x3VL(
"ld1rqh { z1.h }, p0/Z, [%x[Apanel], #32]\n"
".inst 0x6464e456 // bfmmla z22.s, z2.h, z4.h\n"
".inst 0x6465e459 // bfmmla z25.s, z2.h, z5.h\n"
- "ld1h { z7.h }, p0/Z, [x26, #1, MUL VL]\n"
+ "ld1h { z7.h }, p0/Z, [x25, #1, MUL VL]\n"
".inst 0x6464e47c // bfmmla z28.s, z3.h, z4.h\n"
".inst 0x6465e47f // bfmmla z31.s, z3.h, z5.h\n"
"ld1rqh { z2.h }, p0/Z, [%x[Apanel], #48]\n"
@@ -150,25 +150,25 @@ void sve_ffinterleaved_bf16fp32_mmla_8x3VL(
".inst 0x6467e40b // bfmmla z11.s, z0.h, z7.h\n"
".inst 0x6466e42e // bfmmla z14.s, z1.h, z6.h\n"
".inst 0x6467e431 // bfmmla z17.s, z1.h, z7.h\n"
- "ld1h { z4.h }, p0/Z, [x22, #2, MUL VL]\n"
+ "ld1h { z4.h }, p0/Z, [x21, #2, MUL VL]\n"
".inst 0x6466e454 // bfmmla z20.s, z2.h, z6.h\n"
".inst 0x6467e457 // bfmmla z23.s, z2.h, z7.h\n"
- "ld1h { z5.h }, p0/Z, [x22, #3, MUL VL]\n"
+ "ld1h { z5.h }, p0/Z, [x21, #3, MUL VL]\n"
".inst 0x6466e47a // bfmmla z26.s, z3.h, z6.h\n"
".inst 0x6467e47d // bfmmla z29.s, z3.h, z7.h\n"
- "ld1h { z6.h }, p0/Z, [x21, #2, MUL VL]\n"
- "ld1h { z7.h }, p0/Z, [x21, #3, MUL VL]\n"
+ "ld1h { z6.h }, p0/Z, [x20, #2, MUL VL]\n"
+ "ld1h { z7.h }, p0/Z, [x20, #3, MUL VL]\n"
".inst 0x6464e409 // bfmmla z9.s, z0.h, z4.h\n"
".inst 0x6465e40c // bfmmla z12.s, z0.h, z5.h\n"
".inst 0x6464e42f // bfmmla z15.s, z1.h, z4.h\n"
".inst 0x6465e432 // bfmmla z18.s, z1.h, z5.h\n"
- "addvl x22, x22, #4\n"
+ "addvl x21, x21, #4\n"
".inst 0x6464e455 // bfmmla z21.s, z2.h, z4.h\n"
".inst 0x6465e458 // bfmmla z24.s, z2.h, z5.h\n"
- "addvl x21, x21, #4\n"
+ "addvl x20, x20, #4\n"
".inst 0x6464e47b // bfmmla z27.s, z3.h, z4.h\n"
".inst 0x6465e47e // bfmmla z30.s, z3.h, z5.h\n"
- "ld1h { z4.h }, p0/Z, [x26, #2, MUL VL]\n"
+ "ld1h { z4.h }, p0/Z, [x25, #2, MUL VL]\n"
".inst 0x6466e40a // bfmmla z10.s, z0.h, z6.h\n"
".inst 0x6467e40d // bfmmla z13.s, z0.h, z7.h\n"
"ld1rqh { z0.h }, p0/Z, [%x[Apanel], #80]\n"
@@ -177,12 +177,12 @@ void sve_ffinterleaved_bf16fp32_mmla_8x3VL(
"ld1rqh { z1.h }, p0/Z, [%x[Apanel], #96]\n"
".inst 0x6466e456 // bfmmla z22.s, z2.h, z6.h\n"
".inst 0x6467e459 // bfmmla z25.s, z2.h, z7.h\n"
- "ld1h { z5.h }, p0/Z, [x26, #3, MUL VL]\n"
+ "ld1h { z5.h }, p0/Z, [x25, #3, MUL VL]\n"
".inst 0x6466e47c // bfmmla z28.s, z3.h, z6.h\n"
".inst 0x6467e47f // bfmmla z31.s, z3.h, z7.h\n"
"ld1rqh { z2.h }, p0/Z, [%x[Apanel], #112]\n"
"add %x[Apanel], %x[Apanel], #0x80\n"
- "addvl x26, x26, #4\n"
+ "addvl x25, x25, #4\n"
"bge 4b\n"
"5:" // main loop skip
"ld1rqh { z3.h }, p0/Z, [%x[Apanel]]\n"
@@ -190,14 +190,14 @@ void sve_ffinterleaved_bf16fp32_mmla_8x3VL(
".inst 0x6465e40b // bfmmla z11.s, z0.h, z5.h\n"
".inst 0x6464e42e // bfmmla z14.s, z1.h, z4.h\n"
".inst 0x6465e431 // bfmmla z17.s, z1.h, z5.h\n"
- "ld1h { z6.h }, p0/Z, [x22]\n"
+ "ld1h { z6.h }, p0/Z, [x21]\n"
".inst 0x6464e454 // bfmmla z20.s, z2.h, z4.h\n"
".inst 0x6465e457 // bfmmla z23.s, z2.h, z5.h\n"
- "ld1h { z7.h }, p0/Z, [x22, #1, MUL VL]\n"
+ "ld1h { z7.h }, p0/Z, [x21, #1, MUL VL]\n"
".inst 0x6464e47a // bfmmla z26.s, z3.h, z4.h\n"
".inst 0x6465e47d // bfmmla z29.s, z3.h, z5.h\n"
- "ld1h { z4.h }, p0/Z, [x21]\n"
- "ld1h { z5.h }, p0/Z, [x21, #1, MUL VL]\n"
+ "ld1h { z4.h }, p0/Z, [x20]\n"
+ "ld1h { z5.h }, p0/Z, [x20, #1, MUL VL]\n"
".inst 0x6466e409 // bfmmla z9.s, z0.h, z6.h\n"
".inst 0x6467e40c // bfmmla z12.s, z0.h, z7.h\n"
".inst 0x6466e42f // bfmmla z15.s, z1.h, z6.h\n"
@@ -205,10 +205,10 @@ void sve_ffinterleaved_bf16fp32_mmla_8x3VL(
"add %x[Apanel], %x[Apanel], #0x10\n"
".inst 0x6466e455 // bfmmla z21.s, z2.h, z6.h\n"
".inst 0x6467e458 // bfmmla z24.s, z2.h, z7.h\n"
- "addvl x22, x22, #2\n"
+ "addvl x21, x21, #2\n"
".inst 0x6466e47b // bfmmla z27.s, z3.h, z6.h\n"
".inst 0x6467e47e // bfmmla z30.s, z3.h, z7.h\n"
- "addvl x21, x21, #2\n"
+ "addvl x20, x20, #2\n"
".inst 0x6464e40a // bfmmla z10.s, z0.h, z4.h\n"
".inst 0x6465e40d // bfmmla z13.s, z0.h, z5.h\n"
".inst 0x6464e430 // bfmmla z16.s, z1.h, z4.h\n"
@@ -217,25 +217,25 @@ void sve_ffinterleaved_bf16fp32_mmla_8x3VL(
".inst 0x6465e459 // bfmmla z25.s, z2.h, z5.h\n"
".inst 0x6464e47c // bfmmla z28.s, z3.h, z4.h\n"
".inst 0x6465e47f // bfmmla z31.s, z3.h, z5.h\n"
- "cbz x20, 6f\n"
- "ld1h { z6.h }, p0/Z, [x26]\n"
+ "cbz x19, 6f\n"
+ "ld1h { z6.h }, p0/Z, [x25]\n"
"ld1rqh { z0.h }, p0/Z, [%x[Apanel]]\n"
".inst 0x6466e408 // bfmmla z8.s, z0.h, z6.h\n"
"ld1rqh { z1.h }, p0/Z, [%x[Apanel], #16]\n"
- "ld1h { z7.h }, p0/Z, [x26, #1, MUL VL]\n"
+ "ld1h { z7.h }, p0/Z, [x25, #1, MUL VL]\n"
".inst 0x6467e40b // bfmmla z11.s, z0.h, z7.h\n"
"ld1rqh { z2.h }, p0/Z, [%x[Apanel], #32]\n"
"ld1rqh { z3.h }, p0/Z, [%x[Apanel], #48]\n"
".inst 0x6466e42e // bfmmla z14.s, z1.h, z6.h\n"
".inst 0x6467e431 // bfmmla z17.s, z1.h, z7.h\n"
".inst 0x6466e454 // bfmmla z20.s, z2.h, z6.h\n"
- "ld1h { z4.h }, p0/Z, [x22]\n"
+ "ld1h { z4.h }, p0/Z, [x21]\n"
".inst 0x6467e457 // bfmmla z23.s, z2.h, z7.h\n"
".inst 0x6466e47a // bfmmla z26.s, z3.h, z6.h\n"
- "ld1h { z5.h }, p0/Z, [x22, #1, MUL VL]\n"
+ "ld1h { z5.h }, p0/Z, [x21, #1, MUL VL]\n"
".inst 0x6467e47d // bfmmla z29.s, z3.h, z7.h\n"
- "ld1h { z6.h }, p0/Z, [x21]\n"
- "ld1h { z7.h }, p0/Z, [x21, #1, MUL VL]\n"
+ "ld1h { z6.h }, p0/Z, [x20]\n"
+ "ld1h { z7.h }, p0/Z, [x20, #1, MUL VL]\n"
".inst 0x6464e409 // bfmmla z9.s, z0.h, z4.h\n"
".inst 0x6465e40c // bfmmla z12.s, z0.h, z5.h\n"
"add %x[Apanel], %x[Apanel], #0x40\n"
@@ -254,7 +254,7 @@ void sve_ffinterleaved_bf16fp32_mmla_8x3VL(
".inst 0x6466e47c // bfmmla z28.s, z3.h, z6.h\n"
".inst 0x6467e47f // bfmmla z31.s, z3.h, z7.h\n"
"6:" // multiply loop done
- "decw x25, ALL, MUL #3\n"
+ "decw x24, ALL, MUL #3\n"
"uzp1 z4.d, z8.d, z11.d\n"
"uzp2 z8.d, z8.d, z11.d\n"
"uzp1 z11.d, z9.d, z12.d\n"
@@ -268,7 +268,7 @@ void sve_ffinterleaved_bf16fp32_mmla_8x3VL(
"uzp2 z14.d, z14.d, z17.d\n"
"st1w { z8.s }, p0, [%x[Cpanel], #3, MUL VL]\n"
"uzp1 z17.d, z15.d, z18.d\n"
- "cmp x25, XZR\n"
+ "cmp x24, XZR\n"
"st1w { z9.s }, p0, [%x[Cpanel], #4, MUL VL]\n"
"uzp2 z15.d, z15.d, z18.d\n"
"uzp1 z18.d, z16.d, z19.d\n"
@@ -311,7 +311,7 @@ void sve_ffinterleaved_bf16fp32_mmla_8x3VL(
"bne 1b\n"
: [Apanel] "+&r" (Apanel), [Cpanel] "+&r" (Cpanel), [ablocks] "+&r" (ablocks)
: [args_ptr] "r" (&ka), [offsetof_B_stride] "I" (offsetof(KernelArgs, B_stride)), [offsetof_Bpanel] "I" (offsetof(KernelArgs, Bpanel)), [offsetof_K] "I" (offsetof(KernelArgs, K)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_cur_B_ptr] "I" (offsetof(KernelArgs, cur_B_ptr))
- : "cc", "memory", "p0", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_fp16_mla_8x3VL/a64fx.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_fp16_mla_8x3VL/a64fx.cpp
index 1d502f5354..cd4da2c124 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_fp16_mla_8x3VL/a64fx.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_fp16_mla_8x3VL/a64fx.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef ARM_COMPUTE_ENABLE_SVE
@@ -52,39 +52,39 @@ void sve_ffinterleaved_fp16_mla_8x3VL_a64fx(
__asm__ __volatile__(
"ptrue p0.b\n"
"1:" // Height loop
- "ldr x26, [%x[args_ptr], %[offsetof_Bpanel]]\n"
- "ldr x25, [%x[args_ptr], %[offsetof_N]]\n"
- "str x26, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x24, %x[Apanel]\n"
+ "ldr x25, [%x[args_ptr], %[offsetof_Bpanel]]\n"
+ "ldr x24, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x25, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x23, %x[Apanel]\n"
"2:" // Width loop
- "ldr x26, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
- "cnth x23, ALL, MUL #2\n"
- "add x22, x26, x20, LSL #1\n"
- "add x21, x22, x20, LSL #1\n"
- "add x20, x21, x20, LSL #1\n"
- "cmp x25, x23\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov %x[Apanel], x24\n"
+ "ldr x25, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "cnth x22, ALL, MUL #2\n"
+ "add x21, x25, x19, LSL #1\n"
+ "add x20, x21, x19, LSL #1\n"
+ "add x19, x20, x19, LSL #1\n"
+ "cmp x24, x22\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov %x[Apanel], x23\n"
"bgt 3f\n"
- "dech x23\n"
- "cmp x25, x23\n"
- "mov x21, x26\n"
+ "dech x22\n"
+ "cmp x24, x22\n"
+ "mov x20, x25\n"
"bgt 3f\n"
- "mov x22, x26\n"
+ "mov x21, x25\n"
"3:" // B setup done
- "ldr x20, [%x[args_ptr], %[offsetof_K]]\n"
- "cmp x20, #0x2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_K]]\n"
+ "cmp x19, #0x2\n"
"mov z8.b, #0x0\n"
"mov z9.b, #0x0\n"
"mov z10.b, #0x0\n"
- "ld1h { z0.h }, p0/Z, [x26]\n"
+ "ld1h { z0.h }, p0/Z, [x25]\n"
"mov z11.b, #0x0\n"
"mov z12.b, #0x0\n"
- "ld1h { z1.h }, p0/Z, [x22]\n"
+ "ld1h { z1.h }, p0/Z, [x21]\n"
"mov z13.b, #0x0\n"
"mov z14.b, #0x0\n"
- "ld1h { z2.h }, p0/Z, [x21]\n"
+ "ld1h { z2.h }, p0/Z, [x20]\n"
"mov z15.b, #0x0\n"
"mov z16.b, #0x0\n"
"ld1rh { z3.h }, p0/Z, [%x[Apanel]]\n"
@@ -110,7 +110,7 @@ void sve_ffinterleaved_fp16_mla_8x3VL_a64fx(
"4:" // main loop head
"fmla z8.h, p0/M, z0.h, z3.h\n"
"fmla z9.h, p0/M, z1.h, z3.h\n"
- "sub x20, x20, #0x2\n"
+ "sub x19, x19, #0x2\n"
"fmla z10.h, p0/M, z2.h, z3.h\n"
"ld1rh { z3.h }, p0/Z, [%x[Apanel], #8]\n"
"fmla z11.h, p0/M, z0.h, z4.h\n"
@@ -119,7 +119,7 @@ void sve_ffinterleaved_fp16_mla_8x3VL_a64fx(
"ld1rh { z4.h }, p0/Z, [%x[Apanel], #10]\n"
"fmla z14.h, p0/M, z0.h, z5.h\n"
"fmla z15.h, p0/M, z1.h, z5.h\n"
- "cmp x20, #0x2\n"
+ "cmp x19, #0x2\n"
"fmla z16.h, p0/M, z2.h, z5.h\n"
"ld1rh { z5.h }, p0/Z, [%x[Apanel], #12]\n"
"fmla z17.h, p0/M, z0.h, z6.h\n"
@@ -139,11 +139,11 @@ void sve_ffinterleaved_fp16_mla_8x3VL_a64fx(
"fmla z28.h, p0/M, z2.h, z5.h\n"
"ld1rh { z5.h }, p0/Z, [%x[Apanel], #20]\n"
"fmla z29.h, p0/M, z0.h, z6.h\n"
- "ld1h { z0.h }, p0/Z, [x26, #1, MUL VL]\n"
+ "ld1h { z0.h }, p0/Z, [x25, #1, MUL VL]\n"
"fmla z30.h, p0/M, z1.h, z6.h\n"
"fmla z31.h, p0/M, z2.h, z6.h\n"
- "ld1h { z1.h }, p0/Z, [x22, #1, MUL VL]\n"
- "ld1h { z2.h }, p0/Z, [x21, #1, MUL VL]\n"
+ "ld1h { z1.h }, p0/Z, [x21, #1, MUL VL]\n"
+ "ld1h { z2.h }, p0/Z, [x20, #1, MUL VL]\n"
"fmla z8.h, p0/M, z0.h, z3.h\n"
"ld1rh { z6.h }, p0/Z, [%x[Apanel], #22]\n"
"fmla z9.h, p0/M, z1.h, z3.h\n"
@@ -155,15 +155,15 @@ void sve_ffinterleaved_fp16_mla_8x3VL_a64fx(
"ld1rh { z4.h }, p0/Z, [%x[Apanel], #26]\n"
"fmla z14.h, p0/M, z0.h, z5.h\n"
"fmla z15.h, p0/M, z1.h, z5.h\n"
- "addvl x26, x26, #2\n"
+ "addvl x25, x25, #2\n"
"fmla z16.h, p0/M, z2.h, z5.h\n"
"ld1rh { z5.h }, p0/Z, [%x[Apanel], #28]\n"
"fmla z17.h, p0/M, z0.h, z6.h\n"
"fmla z18.h, p0/M, z1.h, z6.h\n"
"fmla z19.h, p0/M, z2.h, z6.h\n"
"ld1rh { z6.h }, p0/Z, [%x[Apanel], #30]\n"
- "addvl x22, x22, #2\n"
"addvl x21, x21, #2\n"
+ "addvl x20, x20, #2\n"
"add %x[Apanel], %x[Apanel], #0x20\n"
"fmla z20.h, p0/M, z0.h, z3.h\n"
"fmla z21.h, p0/M, z1.h, z3.h\n"
@@ -178,17 +178,17 @@ void sve_ffinterleaved_fp16_mla_8x3VL_a64fx(
"fmla z28.h, p0/M, z2.h, z5.h\n"
"ld1rh { z5.h }, p0/Z, [%x[Apanel], #4]\n"
"fmla z29.h, p0/M, z0.h, z6.h\n"
- "ld1h { z0.h }, p0/Z, [x26]\n"
+ "ld1h { z0.h }, p0/Z, [x25]\n"
"fmla z30.h, p0/M, z1.h, z6.h\n"
"fmla z31.h, p0/M, z2.h, z6.h\n"
- "ld1h { z1.h }, p0/Z, [x22]\n"
- "ld1h { z2.h }, p0/Z, [x21]\n"
+ "ld1h { z1.h }, p0/Z, [x21]\n"
+ "ld1h { z2.h }, p0/Z, [x20]\n"
"ld1rh { z6.h }, p0/Z, [%x[Apanel], #6]\n"
"bge 4b\n"
"5:" // main loop skip
"fmla z8.h, p0/M, z0.h, z3.h\n"
"fmla z9.h, p0/M, z1.h, z3.h\n"
- "addvl x26, x26, #1\n"
+ "addvl x25, x25, #1\n"
"fmla z10.h, p0/M, z2.h, z3.h\n"
"ld1rh { z3.h }, p0/Z, [%x[Apanel], #8]\n"
"fmla z11.h, p0/M, z0.h, z4.h\n"
@@ -197,7 +197,7 @@ void sve_ffinterleaved_fp16_mla_8x3VL_a64fx(
"ld1rh { z4.h }, p0/Z, [%x[Apanel], #10]\n"
"fmla z14.h, p0/M, z0.h, z5.h\n"
"fmla z15.h, p0/M, z1.h, z5.h\n"
- "addvl x22, x22, #1\n"
+ "addvl x21, x21, #1\n"
"fmla z16.h, p0/M, z2.h, z5.h\n"
"ld1rh { z5.h }, p0/Z, [%x[Apanel], #12]\n"
"fmla z17.h, p0/M, z0.h, z6.h\n"
@@ -206,7 +206,7 @@ void sve_ffinterleaved_fp16_mla_8x3VL_a64fx(
"ld1rh { z6.h }, p0/Z, [%x[Apanel], #14]\n"
"fmla z20.h, p0/M, z0.h, z3.h\n"
"fmla z21.h, p0/M, z1.h, z3.h\n"
- "addvl x21, x21, #1\n"
+ "addvl x20, x20, #1\n"
"fmla z22.h, p0/M, z2.h, z3.h\n"
"fmla z23.h, p0/M, z0.h, z4.h\n"
"add %x[Apanel], %x[Apanel], #0x10\n"
@@ -218,10 +218,10 @@ void sve_ffinterleaved_fp16_mla_8x3VL_a64fx(
"fmla z29.h, p0/M, z0.h, z6.h\n"
"fmla z30.h, p0/M, z1.h, z6.h\n"
"fmla z31.h, p0/M, z2.h, z6.h\n"
- "cbz x20, 6f\n"
- "ld1h { z0.h }, p0/Z, [x26]\n"
- "ld1h { z1.h }, p0/Z, [x22]\n"
- "ld1h { z2.h }, p0/Z, [x21]\n"
+ "cbz x19, 6f\n"
+ "ld1h { z0.h }, p0/Z, [x25]\n"
+ "ld1h { z1.h }, p0/Z, [x21]\n"
+ "ld1h { z2.h }, p0/Z, [x20]\n"
"ld1rh { z3.h }, p0/Z, [%x[Apanel]]\n"
"fmla z8.h, p0/M, z0.h, z3.h\n"
"ld1rh { z4.h }, p0/Z, [%x[Apanel], #2]\n"
@@ -256,9 +256,9 @@ void sve_ffinterleaved_fp16_mla_8x3VL_a64fx(
"fmla z30.h, p0/M, z1.h, z6.h\n"
"fmla z31.h, p0/M, z2.h, z6.h\n"
"6:" // multiply loop done
- "dech x25, ALL, MUL #3\n"
+ "dech x24, ALL, MUL #3\n"
"st1h { z8.h }, p0, [%x[Cpanel]]\n"
- "cmp x25, XZR\n"
+ "cmp x24, XZR\n"
"st1h { z9.h }, p0, [%x[Cpanel], #1, MUL VL]\n"
"st1h { z10.h }, p0, [%x[Cpanel], #2, MUL VL]\n"
"st1h { z11.h }, p0, [%x[Cpanel], #3, MUL VL]\n"
@@ -289,7 +289,7 @@ void sve_ffinterleaved_fp16_mla_8x3VL_a64fx(
"bne 1b\n"
: [Apanel] "+&r" (Apanel), [Cpanel] "+&r" (Cpanel), [ablocks] "+&r" (ablocks)
: [args_ptr] "r" (&ka), [offsetof_B_stride] "I" (offsetof(KernelArgs, B_stride)), [offsetof_Bpanel] "I" (offsetof(KernelArgs, Bpanel)), [offsetof_K] "I" (offsetof(KernelArgs, K)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_cur_B_ptr] "I" (offsetof(KernelArgs, cur_B_ptr))
- : "cc", "memory", "p0", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_fp16_mla_8x3VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_fp16_mla_8x3VL/generic.cpp
index de219aa2bf..431c2a88f5 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_fp16_mla_8x3VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_fp16_mla_8x3VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef ARM_COMPUTE_ENABLE_SVE
@@ -52,42 +52,42 @@ void sve_ffinterleaved_fp16_mla_8x3VL(
__asm__ __volatile__(
"ptrue p0.b\n"
"1:" // Height loop
- "ldr x26, [%x[args_ptr], %[offsetof_Bpanel]]\n"
- "ldr x25, [%x[args_ptr], %[offsetof_N]]\n"
- "str x26, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x24, %x[Apanel]\n"
+ "ldr x25, [%x[args_ptr], %[offsetof_Bpanel]]\n"
+ "ldr x24, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x25, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x23, %x[Apanel]\n"
"2:" // Width loop
- "ldr x26, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
- "cnth x23, ALL, MUL #2\n"
- "add x22, x26, x20, LSL #1\n"
- "add x21, x22, x20, LSL #1\n"
- "add x20, x21, x20, LSL #1\n"
- "cmp x25, x23\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov %x[Apanel], x24\n"
+ "ldr x25, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "cnth x22, ALL, MUL #2\n"
+ "add x21, x25, x19, LSL #1\n"
+ "add x20, x21, x19, LSL #1\n"
+ "add x19, x20, x19, LSL #1\n"
+ "cmp x24, x22\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov %x[Apanel], x23\n"
"bgt 3f\n"
- "dech x23\n"
- "cmp x25, x23\n"
- "mov x21, x26\n"
+ "dech x22\n"
+ "cmp x24, x22\n"
+ "mov x20, x25\n"
"bgt 3f\n"
- "mov x22, x26\n"
+ "mov x21, x25\n"
"3:" // B setup done
- "ldr x20, [%x[args_ptr], %[offsetof_K]]\n"
- "cmp x20, #0x2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_K]]\n"
+ "cmp x19, #0x2\n"
"mov z8.b, #0x0\n"
"mov z9.b, #0x0\n"
"mov z10.b, #0x0\n"
"ld1rqh { z0.h }, p0/Z, [%x[Apanel]]\n"
"mov z11.b, #0x0\n"
"mov z12.b, #0x0\n"
- "ld1h { z2.h }, p0/Z, [x26]\n"
+ "ld1h { z2.h }, p0/Z, [x25]\n"
"mov z13.b, #0x0\n"
"mov z14.b, #0x0\n"
- "ld1h { z3.h }, p0/Z, [x22]\n"
+ "ld1h { z3.h }, p0/Z, [x21]\n"
"mov z15.b, #0x0\n"
"mov z16.b, #0x0\n"
- "ld1h { z4.h }, p0/Z, [x21]\n"
+ "ld1h { z4.h }, p0/Z, [x20]\n"
"mov z17.b, #0x0\n"
"mov z18.b, #0x0\n"
"mov z19.b, #0x0\n"
@@ -110,43 +110,43 @@ void sve_ffinterleaved_fp16_mla_8x3VL(
"ld1rqh { z1.h }, p0/Z, [%x[Apanel], #16]\n"
"fmla z14.h, z2.h, z0.h[2]\n"
"fmla z17.h, z2.h, z0.h[3]\n"
- "ld1h { z5.h }, p0/Z, [x26, #1, MUL VL]\n"
+ "ld1h { z5.h }, p0/Z, [x25, #1, MUL VL]\n"
"fmla z20.h, z2.h, z0.h[4]\n"
"fmla z23.h, z2.h, z0.h[5]\n"
- "ld1h { z6.h }, p0/Z, [x22, #1, MUL VL]\n"
+ "ld1h { z6.h }, p0/Z, [x21, #1, MUL VL]\n"
"fmla z26.h, z2.h, z0.h[6]\n"
"fmla z29.h, z2.h, z0.h[7]\n"
- "ld1h { z7.h }, p0/Z, [x21, #1, MUL VL]\n"
+ "ld1h { z7.h }, p0/Z, [x20, #1, MUL VL]\n"
"fmla z9.h, z3.h, z0.h[0]\n"
"fmla z12.h, z3.h, z0.h[1]\n"
- "addvl x26, x26, #2\n"
+ "addvl x25, x25, #2\n"
"fmla z15.h, z3.h, z0.h[2]\n"
"fmla z18.h, z3.h, z0.h[3]\n"
- "addvl x22, x22, #2\n"
+ "addvl x21, x21, #2\n"
"fmla z21.h, z3.h, z0.h[4]\n"
"fmla z24.h, z3.h, z0.h[5]\n"
- "addvl x21, x21, #2\n"
+ "addvl x20, x20, #2\n"
"fmla z27.h, z3.h, z0.h[6]\n"
"fmla z30.h, z3.h, z0.h[7]\n"
- "sub x20, x20, #0x2\n"
+ "sub x19, x19, #0x2\n"
"fmla z10.h, z4.h, z0.h[0]\n"
"fmla z13.h, z4.h, z0.h[1]\n"
- "cmp x20, #0x2\n"
+ "cmp x19, #0x2\n"
"fmla z16.h, z4.h, z0.h[2]\n"
"fmla z19.h, z4.h, z0.h[3]\n"
"add %x[Apanel], %x[Apanel], #0x20\n"
"fmla z22.h, z4.h, z0.h[4]\n"
"fmla z25.h, z4.h, z0.h[5]\n"
- "ld1h { z2.h }, p0/Z, [x26]\n"
+ "ld1h { z2.h }, p0/Z, [x25]\n"
"fmla z28.h, z4.h, z0.h[6]\n"
"fmla z31.h, z4.h, z0.h[7]\n"
"ld1rqh { z0.h }, p0/Z, [%x[Apanel]]\n"
"fmla z8.h, z5.h, z1.h[0]\n"
"fmla z11.h, z5.h, z1.h[1]\n"
- "ld1h { z3.h }, p0/Z, [x22]\n"
+ "ld1h { z3.h }, p0/Z, [x21]\n"
"fmla z14.h, z5.h, z1.h[2]\n"
"fmla z17.h, z5.h, z1.h[3]\n"
- "ld1h { z4.h }, p0/Z, [x21]\n"
+ "ld1h { z4.h }, p0/Z, [x20]\n"
"fmla z20.h, z5.h, z1.h[4]\n"
"fmla z23.h, z5.h, z1.h[5]\n"
"fmla z26.h, z5.h, z1.h[6]\n"
@@ -174,13 +174,13 @@ void sve_ffinterleaved_fp16_mla_8x3VL(
"add %x[Apanel], %x[Apanel], #0x10\n"
"fmla z14.h, z2.h, z0.h[2]\n"
"fmla z17.h, z2.h, z0.h[3]\n"
- "addvl x26, x26, #1\n"
+ "addvl x25, x25, #1\n"
"fmla z20.h, z2.h, z0.h[4]\n"
"fmla z23.h, z2.h, z0.h[5]\n"
- "addvl x22, x22, #1\n"
+ "addvl x21, x21, #1\n"
"fmla z26.h, z2.h, z0.h[6]\n"
"fmla z29.h, z2.h, z0.h[7]\n"
- "addvl x21, x21, #1\n"
+ "addvl x20, x20, #1\n"
"fmla z9.h, z3.h, z0.h[0]\n"
"fmla z12.h, z3.h, z0.h[1]\n"
"fmla z15.h, z3.h, z0.h[2]\n"
@@ -197,12 +197,12 @@ void sve_ffinterleaved_fp16_mla_8x3VL(
"fmla z25.h, z4.h, z0.h[5]\n"
"fmla z28.h, z4.h, z0.h[6]\n"
"fmla z31.h, z4.h, z0.h[7]\n"
- "cbz x20, 6f\n"
+ "cbz x19, 6f\n"
"ld1rqh { z0.h }, p0/Z, [%x[Apanel]]\n"
- "ld1h { z5.h }, p0/Z, [x26]\n"
+ "ld1h { z5.h }, p0/Z, [x25]\n"
"fmla z8.h, z5.h, z0.h[0]\n"
- "ld1h { z6.h }, p0/Z, [x22]\n"
- "ld1h { z7.h }, p0/Z, [x21]\n"
+ "ld1h { z6.h }, p0/Z, [x21]\n"
+ "ld1h { z7.h }, p0/Z, [x20]\n"
"fmla z11.h, z5.h, z0.h[1]\n"
"fmla z14.h, z5.h, z0.h[2]\n"
"fmla z17.h, z5.h, z0.h[3]\n"
@@ -228,9 +228,9 @@ void sve_ffinterleaved_fp16_mla_8x3VL(
"fmla z28.h, z7.h, z0.h[6]\n"
"fmla z31.h, z7.h, z0.h[7]\n"
"6:" // multiply loop done
- "dech x25, ALL, MUL #3\n"
+ "dech x24, ALL, MUL #3\n"
"st1h { z8.h }, p0, [%x[Cpanel]]\n"
- "cmp x25, XZR\n"
+ "cmp x24, XZR\n"
"st1h { z9.h }, p0, [%x[Cpanel], #1, MUL VL]\n"
"st1h { z10.h }, p0, [%x[Cpanel], #2, MUL VL]\n"
"st1h { z11.h }, p0, [%x[Cpanel], #3, MUL VL]\n"
@@ -261,7 +261,7 @@ void sve_ffinterleaved_fp16_mla_8x3VL(
"bne 1b\n"
: [Apanel] "+&r" (Apanel), [Cpanel] "+&r" (Cpanel), [ablocks] "+&r" (ablocks)
: [args_ptr] "r" (&ka), [offsetof_B_stride] "I" (offsetof(KernelArgs, B_stride)), [offsetof_Bpanel] "I" (offsetof(KernelArgs, Bpanel)), [offsetof_K] "I" (offsetof(KernelArgs, K)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_cur_B_ptr] "I" (offsetof(KernelArgs, cur_B_ptr))
- : "cc", "memory", "p0", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_fp32_mla_8x3VL/a64fx.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_fp32_mla_8x3VL/a64fx.cpp
index 8c8b6b0675..aecf7f94c9 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_fp32_mla_8x3VL/a64fx.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_fp32_mla_8x3VL/a64fx.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef ARM_COMPUTE_ENABLE_SVE
@@ -52,39 +52,39 @@ void sve_ffinterleaved_fp32_mla_8x3VL_a64fx(
__asm__ __volatile__(
"ptrue p0.b\n"
"1:" // Height loop
- "ldr x26, [%x[args_ptr], %[offsetof_Bpanel]]\n"
- "ldr x25, [%x[args_ptr], %[offsetof_N]]\n"
- "str x26, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x24, %x[Apanel]\n"
+ "ldr x25, [%x[args_ptr], %[offsetof_Bpanel]]\n"
+ "ldr x24, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x25, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x23, %x[Apanel]\n"
"2:" // Width loop
- "ldr x26, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
- "cntw x23, ALL, MUL #2\n"
- "add x22, x26, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
- "add x20, x21, x20, LSL #2\n"
- "cmp x25, x23\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov %x[Apanel], x24\n"
+ "ldr x25, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "cntw x22, ALL, MUL #2\n"
+ "add x21, x25, x19, LSL #2\n"
+ "add x20, x21, x19, LSL #2\n"
+ "add x19, x20, x19, LSL #2\n"
+ "cmp x24, x22\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov %x[Apanel], x23\n"
"bgt 3f\n"
- "decw x23\n"
- "cmp x25, x23\n"
- "mov x21, x26\n"
+ "decw x22\n"
+ "cmp x24, x22\n"
+ "mov x20, x25\n"
"bgt 3f\n"
- "mov x22, x26\n"
+ "mov x21, x25\n"
"3:" // B setup done
- "ldr x20, [%x[args_ptr], %[offsetof_K]]\n"
- "cmp x20, #0x2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_K]]\n"
+ "cmp x19, #0x2\n"
"mov z8.b, #0x0\n"
"mov z9.b, #0x0\n"
"mov z10.b, #0x0\n"
- "ld1w { z0.s }, p0/Z, [x26]\n"
+ "ld1w { z0.s }, p0/Z, [x25]\n"
"mov z11.b, #0x0\n"
"mov z12.b, #0x0\n"
- "ld1w { z1.s }, p0/Z, [x22]\n"
+ "ld1w { z1.s }, p0/Z, [x21]\n"
"mov z13.b, #0x0\n"
"mov z14.b, #0x0\n"
- "ld1w { z2.s }, p0/Z, [x21]\n"
+ "ld1w { z2.s }, p0/Z, [x20]\n"
"mov z15.b, #0x0\n"
"mov z16.b, #0x0\n"
"ld1rw { z3.s }, p0/Z, [%x[Apanel]]\n"
@@ -110,7 +110,7 @@ void sve_ffinterleaved_fp32_mla_8x3VL_a64fx(
"4:" // main loop head
"fmla z8.s, p0/M, z0.s, z3.s\n"
"fmla z9.s, p0/M, z1.s, z3.s\n"
- "sub x20, x20, #0x2\n"
+ "sub x19, x19, #0x2\n"
"fmla z10.s, p0/M, z2.s, z3.s\n"
"ld1rw { z3.s }, p0/Z, [%x[Apanel], #16]\n"
"fmla z11.s, p0/M, z0.s, z4.s\n"
@@ -119,7 +119,7 @@ void sve_ffinterleaved_fp32_mla_8x3VL_a64fx(
"ld1rw { z4.s }, p0/Z, [%x[Apanel], #20]\n"
"fmla z14.s, p0/M, z0.s, z5.s\n"
"fmla z15.s, p0/M, z1.s, z5.s\n"
- "cmp x20, #0x2\n"
+ "cmp x19, #0x2\n"
"fmla z16.s, p0/M, z2.s, z5.s\n"
"ld1rw { z5.s }, p0/Z, [%x[Apanel], #24]\n"
"fmla z17.s, p0/M, z0.s, z6.s\n"
@@ -139,11 +139,11 @@ void sve_ffinterleaved_fp32_mla_8x3VL_a64fx(
"fmla z28.s, p0/M, z2.s, z5.s\n"
"ld1rw { z5.s }, p0/Z, [%x[Apanel], #40]\n"
"fmla z29.s, p0/M, z0.s, z6.s\n"
- "ld1w { z0.s }, p0/Z, [x26, #1, MUL VL]\n"
+ "ld1w { z0.s }, p0/Z, [x25, #1, MUL VL]\n"
"fmla z30.s, p0/M, z1.s, z6.s\n"
"fmla z31.s, p0/M, z2.s, z6.s\n"
- "ld1w { z1.s }, p0/Z, [x22, #1, MUL VL]\n"
- "ld1w { z2.s }, p0/Z, [x21, #1, MUL VL]\n"
+ "ld1w { z1.s }, p0/Z, [x21, #1, MUL VL]\n"
+ "ld1w { z2.s }, p0/Z, [x20, #1, MUL VL]\n"
"fmla z8.s, p0/M, z0.s, z3.s\n"
"ld1rw { z6.s }, p0/Z, [%x[Apanel], #44]\n"
"fmla z9.s, p0/M, z1.s, z3.s\n"
@@ -155,15 +155,15 @@ void sve_ffinterleaved_fp32_mla_8x3VL_a64fx(
"ld1rw { z4.s }, p0/Z, [%x[Apanel], #52]\n"
"fmla z14.s, p0/M, z0.s, z5.s\n"
"fmla z15.s, p0/M, z1.s, z5.s\n"
- "addvl x26, x26, #2\n"
+ "addvl x25, x25, #2\n"
"fmla z16.s, p0/M, z2.s, z5.s\n"
"ld1rw { z5.s }, p0/Z, [%x[Apanel], #56]\n"
"fmla z17.s, p0/M, z0.s, z6.s\n"
"fmla z18.s, p0/M, z1.s, z6.s\n"
"fmla z19.s, p0/M, z2.s, z6.s\n"
"ld1rw { z6.s }, p0/Z, [%x[Apanel], #60]\n"
- "addvl x22, x22, #2\n"
"addvl x21, x21, #2\n"
+ "addvl x20, x20, #2\n"
"add %x[Apanel], %x[Apanel], #0x40\n"
"fmla z20.s, p0/M, z0.s, z3.s\n"
"fmla z21.s, p0/M, z1.s, z3.s\n"
@@ -178,17 +178,17 @@ void sve_ffinterleaved_fp32_mla_8x3VL_a64fx(
"fmla z28.s, p0/M, z2.s, z5.s\n"
"ld1rw { z5.s }, p0/Z, [%x[Apanel], #8]\n"
"fmla z29.s, p0/M, z0.s, z6.s\n"
- "ld1w { z0.s }, p0/Z, [x26]\n"
+ "ld1w { z0.s }, p0/Z, [x25]\n"
"fmla z30.s, p0/M, z1.s, z6.s\n"
"fmla z31.s, p0/M, z2.s, z6.s\n"
- "ld1w { z1.s }, p0/Z, [x22]\n"
- "ld1w { z2.s }, p0/Z, [x21]\n"
+ "ld1w { z1.s }, p0/Z, [x21]\n"
+ "ld1w { z2.s }, p0/Z, [x20]\n"
"ld1rw { z6.s }, p0/Z, [%x[Apanel], #12]\n"
"bge 4b\n"
"5:" // main loop skip
"fmla z8.s, p0/M, z0.s, z3.s\n"
"fmla z9.s, p0/M, z1.s, z3.s\n"
- "addvl x26, x26, #1\n"
+ "addvl x25, x25, #1\n"
"fmla z10.s, p0/M, z2.s, z3.s\n"
"ld1rw { z3.s }, p0/Z, [%x[Apanel], #16]\n"
"fmla z11.s, p0/M, z0.s, z4.s\n"
@@ -197,7 +197,7 @@ void sve_ffinterleaved_fp32_mla_8x3VL_a64fx(
"ld1rw { z4.s }, p0/Z, [%x[Apanel], #20]\n"
"fmla z14.s, p0/M, z0.s, z5.s\n"
"fmla z15.s, p0/M, z1.s, z5.s\n"
- "addvl x22, x22, #1\n"
+ "addvl x21, x21, #1\n"
"fmla z16.s, p0/M, z2.s, z5.s\n"
"ld1rw { z5.s }, p0/Z, [%x[Apanel], #24]\n"
"fmla z17.s, p0/M, z0.s, z6.s\n"
@@ -206,7 +206,7 @@ void sve_ffinterleaved_fp32_mla_8x3VL_a64fx(
"ld1rw { z6.s }, p0/Z, [%x[Apanel], #28]\n"
"fmla z20.s, p0/M, z0.s, z3.s\n"
"fmla z21.s, p0/M, z1.s, z3.s\n"
- "addvl x21, x21, #1\n"
+ "addvl x20, x20, #1\n"
"fmla z22.s, p0/M, z2.s, z3.s\n"
"fmla z23.s, p0/M, z0.s, z4.s\n"
"add %x[Apanel], %x[Apanel], #0x20\n"
@@ -218,10 +218,10 @@ void sve_ffinterleaved_fp32_mla_8x3VL_a64fx(
"fmla z29.s, p0/M, z0.s, z6.s\n"
"fmla z30.s, p0/M, z1.s, z6.s\n"
"fmla z31.s, p0/M, z2.s, z6.s\n"
- "cbz x20, 6f\n"
- "ld1w { z0.s }, p0/Z, [x26]\n"
- "ld1w { z1.s }, p0/Z, [x22]\n"
- "ld1w { z2.s }, p0/Z, [x21]\n"
+ "cbz x19, 6f\n"
+ "ld1w { z0.s }, p0/Z, [x25]\n"
+ "ld1w { z1.s }, p0/Z, [x21]\n"
+ "ld1w { z2.s }, p0/Z, [x20]\n"
"ld1rw { z3.s }, p0/Z, [%x[Apanel]]\n"
"fmla z8.s, p0/M, z0.s, z3.s\n"
"ld1rw { z4.s }, p0/Z, [%x[Apanel], #4]\n"
@@ -256,9 +256,9 @@ void sve_ffinterleaved_fp32_mla_8x3VL_a64fx(
"fmla z30.s, p0/M, z1.s, z6.s\n"
"fmla z31.s, p0/M, z2.s, z6.s\n"
"6:" // multiply loop done
- "decw x25, ALL, MUL #3\n"
+ "decw x24, ALL, MUL #3\n"
"st1w { z8.s }, p0, [%x[Cpanel]]\n"
- "cmp x25, XZR\n"
+ "cmp x24, XZR\n"
"st1w { z9.s }, p0, [%x[Cpanel], #1, MUL VL]\n"
"st1w { z10.s }, p0, [%x[Cpanel], #2, MUL VL]\n"
"st1w { z11.s }, p0, [%x[Cpanel], #3, MUL VL]\n"
@@ -289,7 +289,7 @@ void sve_ffinterleaved_fp32_mla_8x3VL_a64fx(
"bne 1b\n"
: [Apanel] "+&r" (Apanel), [Cpanel] "+&r" (Cpanel), [ablocks] "+&r" (ablocks)
: [args_ptr] "r" (&ka), [offsetof_B_stride] "I" (offsetof(KernelArgs, B_stride)), [offsetof_Bpanel] "I" (offsetof(KernelArgs, Bpanel)), [offsetof_K] "I" (offsetof(KernelArgs, K)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_cur_B_ptr] "I" (offsetof(KernelArgs, cur_B_ptr))
- : "cc", "memory", "p0", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_fp32_mla_8x3VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_fp32_mla_8x3VL/generic.cpp
index 4a0b31daff..1e9a3f119e 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_fp32_mla_8x3VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_fp32_mla_8x3VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef ARM_COMPUTE_ENABLE_SVE
@@ -52,29 +52,29 @@ void sve_ffinterleaved_fp32_mla_8x3VL(
__asm__ __volatile__(
"ptrue p0.b\n"
"1:" // Height loop
- "ldr x26, [%x[args_ptr], %[offsetof_Bpanel]]\n"
- "ldr x25, [%x[args_ptr], %[offsetof_N]]\n"
- "str x26, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x24, %x[Apanel]\n"
+ "ldr x25, [%x[args_ptr], %[offsetof_Bpanel]]\n"
+ "ldr x24, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x25, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x23, %x[Apanel]\n"
"2:" // Width loop
- "ldr x26, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
- "cntw x23, ALL, MUL #2\n"
- "add x22, x26, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
- "add x20, x21, x20, LSL #2\n"
- "cmp x25, x23\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov %x[Apanel], x24\n"
+ "ldr x25, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "cntw x22, ALL, MUL #2\n"
+ "add x21, x25, x19, LSL #2\n"
+ "add x20, x21, x19, LSL #2\n"
+ "add x19, x20, x19, LSL #2\n"
+ "cmp x24, x22\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov %x[Apanel], x23\n"
"bgt 3f\n"
- "decw x23\n"
- "cmp x25, x23\n"
- "mov x21, x26\n"
+ "decw x22\n"
+ "cmp x24, x22\n"
+ "mov x20, x25\n"
"bgt 3f\n"
- "mov x22, x26\n"
+ "mov x21, x25\n"
"3:" // B setup done
- "ldr x20, [%x[args_ptr], %[offsetof_K]]\n"
- "cmp x20, #0x2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_K]]\n"
+ "cmp x19, #0x2\n"
"mov z8.b, #0x0\n"
"mov z9.b, #0x0\n"
"mov z10.b, #0x0\n"
@@ -84,13 +84,13 @@ void sve_ffinterleaved_fp32_mla_8x3VL(
"ld1rqw { z1.s }, p0/Z, [%x[Apanel], #16]\n"
"mov z13.b, #0x0\n"
"mov z14.b, #0x0\n"
- "ld1w { z4.s }, p0/Z, [x26]\n"
+ "ld1w { z4.s }, p0/Z, [x25]\n"
"mov z15.b, #0x0\n"
"mov z16.b, #0x0\n"
- "ld1w { z5.s }, p0/Z, [x22]\n"
+ "ld1w { z5.s }, p0/Z, [x21]\n"
"mov z17.b, #0x0\n"
"mov z18.b, #0x0\n"
- "ld1w { z6.s }, p0/Z, [x21]\n"
+ "ld1w { z6.s }, p0/Z, [x20]\n"
"mov z19.b, #0x0\n"
"mov z20.b, #0x0\n"
"mov z21.b, #0x0\n"
@@ -114,25 +114,25 @@ void sve_ffinterleaved_fp32_mla_8x3VL(
"ld1rqw { z3.s }, p0/Z, [%x[Apanel], #48]\n"
"fmla z20.s, z4.s, z1.s[0]\n"
"fmla z23.s, z4.s, z1.s[1]\n"
- "sub x20, x20, #0x2\n"
+ "sub x19, x19, #0x2\n"
"fmla z26.s, z4.s, z1.s[2]\n"
"fmla z29.s, z4.s, z1.s[3]\n"
- "ld1w { z4.s }, p0/Z, [x26, #1, MUL VL]\n"
+ "ld1w { z4.s }, p0/Z, [x25, #1, MUL VL]\n"
"fmla z9.s, z5.s, z0.s[0]\n"
"fmla z12.s, z5.s, z0.s[1]\n"
- "addvl x26, x26, #2\n"
+ "addvl x25, x25, #2\n"
"fmla z15.s, z5.s, z0.s[2]\n"
"fmla z18.s, z5.s, z0.s[3]\n"
- "cmp x20, #0x2\n"
+ "cmp x19, #0x2\n"
"fmla z21.s, z5.s, z1.s[0]\n"
"fmla z24.s, z5.s, z1.s[1]\n"
"add %x[Apanel], %x[Apanel], #0x40\n"
"fmla z27.s, z5.s, z1.s[2]\n"
"fmla z30.s, z5.s, z1.s[3]\n"
- "ld1w { z5.s }, p0/Z, [x22, #1, MUL VL]\n"
+ "ld1w { z5.s }, p0/Z, [x21, #1, MUL VL]\n"
"fmla z10.s, z6.s, z0.s[0]\n"
"fmla z13.s, z6.s, z0.s[1]\n"
- "addvl x22, x22, #2\n"
+ "addvl x21, x21, #2\n"
"fmla z16.s, z6.s, z0.s[2]\n"
"fmla z19.s, z6.s, z0.s[3]\n"
"ld1rqw { z0.s }, p0/Z, [%x[Apanel]]\n"
@@ -140,8 +140,8 @@ void sve_ffinterleaved_fp32_mla_8x3VL(
"fmla z25.s, z6.s, z1.s[1]\n"
"fmla z28.s, z6.s, z1.s[2]\n"
"fmla z31.s, z6.s, z1.s[3]\n"
- "ld1w { z6.s }, p0/Z, [x21, #1, MUL VL]\n"
- "addvl x21, x21, #2\n"
+ "ld1w { z6.s }, p0/Z, [x20, #1, MUL VL]\n"
+ "addvl x20, x20, #2\n"
"fmla z8.s, z4.s, z2.s[0]\n"
"fmla z11.s, z4.s, z2.s[1]\n"
"fmla z14.s, z4.s, z2.s[2]\n"
@@ -151,7 +151,7 @@ void sve_ffinterleaved_fp32_mla_8x3VL(
"fmla z23.s, z4.s, z3.s[1]\n"
"fmla z26.s, z4.s, z3.s[2]\n"
"fmla z29.s, z4.s, z3.s[3]\n"
- "ld1w { z4.s }, p0/Z, [x26]\n"
+ "ld1w { z4.s }, p0/Z, [x25]\n"
"fmla z9.s, z5.s, z2.s[0]\n"
"fmla z12.s, z5.s, z2.s[1]\n"
"fmla z15.s, z5.s, z2.s[2]\n"
@@ -160,7 +160,7 @@ void sve_ffinterleaved_fp32_mla_8x3VL(
"fmla z24.s, z5.s, z3.s[1]\n"
"fmla z27.s, z5.s, z3.s[2]\n"
"fmla z30.s, z5.s, z3.s[3]\n"
- "ld1w { z5.s }, p0/Z, [x22]\n"
+ "ld1w { z5.s }, p0/Z, [x21]\n"
"fmla z10.s, z6.s, z2.s[0]\n"
"fmla z13.s, z6.s, z2.s[1]\n"
"fmla z16.s, z6.s, z2.s[2]\n"
@@ -169,7 +169,7 @@ void sve_ffinterleaved_fp32_mla_8x3VL(
"fmla z25.s, z6.s, z3.s[1]\n"
"fmla z28.s, z6.s, z3.s[2]\n"
"fmla z31.s, z6.s, z3.s[3]\n"
- "ld1w { z6.s }, p0/Z, [x21]\n"
+ "ld1w { z6.s }, p0/Z, [x20]\n"
"bge 4b\n"
"5:" // main loop skip
"fmla z8.s, z4.s, z0.s[0]\n"
@@ -177,13 +177,13 @@ void sve_ffinterleaved_fp32_mla_8x3VL(
"add %x[Apanel], %x[Apanel], #0x20\n"
"fmla z14.s, z4.s, z0.s[2]\n"
"fmla z17.s, z4.s, z0.s[3]\n"
- "addvl x26, x26, #1\n"
+ "addvl x25, x25, #1\n"
"fmla z20.s, z4.s, z1.s[0]\n"
"fmla z23.s, z4.s, z1.s[1]\n"
- "addvl x22, x22, #1\n"
+ "addvl x21, x21, #1\n"
"fmla z26.s, z4.s, z1.s[2]\n"
"fmla z29.s, z4.s, z1.s[3]\n"
- "addvl x21, x21, #1\n"
+ "addvl x20, x20, #1\n"
"fmla z9.s, z5.s, z0.s[0]\n"
"fmla z12.s, z5.s, z0.s[1]\n"
"fmla z15.s, z5.s, z0.s[2]\n"
@@ -200,14 +200,14 @@ void sve_ffinterleaved_fp32_mla_8x3VL(
"fmla z25.s, z6.s, z1.s[1]\n"
"fmla z28.s, z6.s, z1.s[2]\n"
"fmla z31.s, z6.s, z1.s[3]\n"
- "cbz x20, 6f\n"
+ "cbz x19, 6f\n"
"ld1rqw { z0.s }, p0/Z, [%x[Apanel]]\n"
"ld1rqw { z1.s }, p0/Z, [%x[Apanel], #16]\n"
"add %x[Apanel], %x[Apanel], #0x20\n"
- "ld1w { z7.s }, p0/Z, [x26]\n"
- "ld1w { z4.s }, p0/Z, [x22]\n"
+ "ld1w { z7.s }, p0/Z, [x25]\n"
+ "ld1w { z4.s }, p0/Z, [x21]\n"
"fmla z8.s, z7.s, z0.s[0]\n"
- "ld1w { z5.s }, p0/Z, [x21]\n"
+ "ld1w { z5.s }, p0/Z, [x20]\n"
"fmla z11.s, z7.s, z0.s[1]\n"
"fmla z14.s, z7.s, z0.s[2]\n"
"fmla z17.s, z7.s, z0.s[3]\n"
@@ -232,9 +232,9 @@ void sve_ffinterleaved_fp32_mla_8x3VL(
"fmla z28.s, z5.s, z1.s[2]\n"
"fmla z31.s, z5.s, z1.s[3]\n"
"6:" // multiply loop done
- "decw x25, ALL, MUL #3\n"
+ "decw x24, ALL, MUL #3\n"
"st1w { z8.s }, p0, [%x[Cpanel]]\n"
- "cmp x25, XZR\n"
+ "cmp x24, XZR\n"
"st1w { z9.s }, p0, [%x[Cpanel], #1, MUL VL]\n"
"st1w { z10.s }, p0, [%x[Cpanel], #2, MUL VL]\n"
"st1w { z11.s }, p0, [%x[Cpanel], #3, MUL VL]\n"
@@ -265,7 +265,7 @@ void sve_ffinterleaved_fp32_mla_8x3VL(
"bne 1b\n"
: [Apanel] "+&r" (Apanel), [Cpanel] "+&r" (Cpanel), [ablocks] "+&r" (ablocks)
: [args_ptr] "r" (&ka), [offsetof_B_stride] "I" (offsetof(KernelArgs, B_stride)), [offsetof_Bpanel] "I" (offsetof(KernelArgs, Bpanel)), [offsetof_K] "I" (offsetof(KernelArgs, K)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_cur_B_ptr] "I" (offsetof(KernelArgs, cur_B_ptr))
- : "cc", "memory", "p0", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_bf16fp32_dot_6x4VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_bf16fp32_dot_6x4VL/generic.cpp
index f0b00e6251..b794c21807 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_bf16fp32_dot_6x4VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_bf16fp32_dot_6x4VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef ARM_COMPUTE_ENABLE_SVE
@@ -103,32 +103,32 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
"cmp %x[M], #0x2\n"
"bgt 27f\n"
"beq 14f\n"
- "mov x12, %x[bias]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "mov x9, %x[bias]\n"
+ "mov x28, %x[output_ptr]\n"
"2:" // Height 1: Column loop
- "mov x20, #0x0\n"
- "whilelt p4.s, x20, x11\n"
- "incw x20\n"
- "whilelt p3.s, x20, x11\n"
- "incw x20\n"
- "whilelt p2.s, x20, x11\n"
- "incw x20\n"
- "whilelt p1.s, x20, x11\n"
- "cbz x12, 3f\n"
- "ld1w { z8.s }, p5/Z, [x12]\n"
- "ld1w { z9.s }, p5/Z, [x12, #1, MUL VL]\n"
- "ld1w { z10.s }, p5/Z, [x12, #2, MUL VL]\n"
- "ld1w { z11.s }, p5/Z, [x12, #3, MUL VL]\n"
- "addvl x12, x12, #4\n"
+ "mov x19, #0x0\n"
+ "whilelt p4.s, x19, x11\n"
+ "incw x19\n"
+ "whilelt p3.s, x19, x11\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x11\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x11\n"
+ "cbz x9, 3f\n"
+ "ld1w { z8.s }, p5/Z, [x9]\n"
+ "ld1w { z9.s }, p5/Z, [x9, #1, MUL VL]\n"
+ "ld1w { z10.s }, p5/Z, [x9, #2, MUL VL]\n"
+ "ld1w { z11.s }, p5/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"b 5f\n"
"3:" // Height 1: no bias
"tbz %x[flags], #0, 4f\n"
- "ld1w { z8.s }, p4/Z, [x9]\n"
- "ld1w { z9.s }, p3/Z, [x9, #1, MUL VL]\n"
- "ld1w { z10.s }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1w { z11.s }, p1/Z, [x9, #3, MUL VL]\n"
+ "ld1w { z8.s }, p4/Z, [x28]\n"
+ "ld1w { z9.s }, p3/Z, [x28, #1, MUL VL]\n"
+ "ld1w { z10.s }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1w { z11.s }, p1/Z, [x28, #3, MUL VL]\n"
"b 5f\n"
"4:" // Height 1: no accumulate
"mov z8.b, #0x0\n"
@@ -136,175 +136,175 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
"mov z10.b, #0x0\n"
"mov z11.b, #0x0\n"
"5:" // Height 1: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"6:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 7f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "cbnz x28, 8f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #1\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "cbnz x27, 8f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
"b 8f\n"
"7:" // Height 1: setup direct input
- "mov x26, %x[input_ptr]\n"
+ "mov x25, %x[input_ptr]\n"
"8:" // Height 1: input setup done
- "cmp x27, #0x8\n"
+ "cmp x26, #0x8\n"
"ble 10f\n"
"9:" // Height 1: Multiply loop: Main loop head
- "whilelt p0.h, XZR, x27\n"
- "ld1rqh { z0.h }, p0/Z, [x26]\n"
"ld1h { z6.h }, p5/Z, [x10]\n"
- ".inst 0x646040c8 // bfdot z8.s, z6.h, z0.h[0]\n"
+ "whilelt p0.h, XZR, x26\n"
"ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
- ".inst 0x646040e9 // bfdot z9.s, z7.h, z0.h[0]\n"
+ "sub x26, x26, #0x8\n"
+ "ld1rqh { z0.h }, p0/Z, [x25]\n"
+ ".inst 0x646040c8 // bfdot z8.s, z6.h, z0.h[0]\n"
"ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
- ".inst 0x646040ca // bfdot z10.s, z6.h, z0.h[0]\n"
+ "cmp x26, #0x8\n"
+ ".inst 0x646040e9 // bfdot z9.s, z7.h, z0.h[0]\n"
"ld1h { z7.h }, p5/Z, [x10, #3, MUL VL]\n"
- ".inst 0x646040eb // bfdot z11.s, z7.h, z0.h[0]\n"
+ "add x25, x25, #0x10\n"
+ ".inst 0x646040ca // bfdot z10.s, z6.h, z0.h[0]\n"
"ld1h { z6.h }, p5/Z, [x10, #4, MUL VL]\n"
- ".inst 0x646840c8 // bfdot z8.s, z6.h, z0.h[1]\n"
+ ".inst 0x646040eb // bfdot z11.s, z7.h, z0.h[0]\n"
"ld1h { z7.h }, p5/Z, [x10, #5, MUL VL]\n"
- ".inst 0x646840e9 // bfdot z9.s, z7.h, z0.h[1]\n"
+ ".inst 0x646840c8 // bfdot z8.s, z6.h, z0.h[1]\n"
"ld1h { z6.h }, p5/Z, [x10, #6, MUL VL]\n"
- ".inst 0x646840ca // bfdot z10.s, z6.h, z0.h[1]\n"
+ ".inst 0x646840e9 // bfdot z9.s, z7.h, z0.h[1]\n"
"ld1h { z7.h }, p5/Z, [x10, #7, MUL VL]\n"
"addvl x10, x10, #16\n"
- ".inst 0x646840eb // bfdot z11.s, z7.h, z0.h[1]\n"
+ ".inst 0x646840ca // bfdot z10.s, z6.h, z0.h[1]\n"
"ld1h { z6.h }, p5/Z, [x10, #-8, MUL VL]\n"
+ ".inst 0x646840eb // bfdot z11.s, z7.h, z0.h[1]\n"
"ld1h { z7.h }, p5/Z, [x10, #-7, MUL VL]\n"
".inst 0x647040c8 // bfdot z8.s, z6.h, z0.h[2]\n"
- ".inst 0x647040e9 // bfdot z9.s, z7.h, z0.h[2]\n"
"ld1h { z6.h }, p5/Z, [x10, #-6, MUL VL]\n"
+ ".inst 0x647040e9 // bfdot z9.s, z7.h, z0.h[2]\n"
"ld1h { z7.h }, p5/Z, [x10, #-5, MUL VL]\n"
".inst 0x647040ca // bfdot z10.s, z6.h, z0.h[2]\n"
- ".inst 0x647040eb // bfdot z11.s, z7.h, z0.h[2]\n"
"ld1h { z6.h }, p5/Z, [x10, #-4, MUL VL]\n"
+ ".inst 0x647040eb // bfdot z11.s, z7.h, z0.h[2]\n"
"ld1h { z7.h }, p5/Z, [x10, #-3, MUL VL]\n"
".inst 0x647840c8 // bfdot z8.s, z6.h, z0.h[3]\n"
- ".inst 0x647840e9 // bfdot z9.s, z7.h, z0.h[3]\n"
"ld1h { z6.h }, p5/Z, [x10, #-2, MUL VL]\n"
+ ".inst 0x647840e9 // bfdot z9.s, z7.h, z0.h[3]\n"
"ld1h { z7.h }, p5/Z, [x10, #-1, MUL VL]\n"
- "sub x27, x27, #0x8\n"
- "cmp x27, #0x8\n"
".inst 0x647840ca // bfdot z10.s, z6.h, z0.h[3]\n"
".inst 0x647840eb // bfdot z11.s, z7.h, z0.h[3]\n"
- "add x26, x26, #0x10\n"
"bgt 9b\n"
"10:" // Height 1: Multiply loop: Single iteration only
- "whilelt p0.h, XZR, x27\n"
- "ld1rqh { z0.h }, p0/Z, [x26]\n"
"ld1h { z6.h }, p5/Z, [x10]\n"
- ".inst 0x646040c8 // bfdot z8.s, z6.h, z0.h[0]\n"
+ "whilelt p0.h, XZR, x26\n"
"ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
- ".inst 0x646040e9 // bfdot z9.s, z7.h, z0.h[0]\n"
+ "subs x26, x26, #0x2\n"
+ "ld1rqh { z0.h }, p0/Z, [x25]\n"
+ ".inst 0x646040c8 // bfdot z8.s, z6.h, z0.h[0]\n"
"ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
- "subs x27, x27, #0x2\n"
+ ".inst 0x646040e9 // bfdot z9.s, z7.h, z0.h[0]\n"
"ld1h { z7.h }, p5/Z, [x10, #3, MUL VL]\n"
+ "addvl x10, x10, #4\n"
".inst 0x646040ca // bfdot z10.s, z6.h, z0.h[0]\n"
".inst 0x646040eb // bfdot z11.s, z7.h, z0.h[0]\n"
- "addvl x10, x10, #4\n"
"ble 11f\n"
"ld1h { z6.h }, p5/Z, [x10]\n"
- "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
".inst 0x646840c8 // bfdot z8.s, z6.h, z0.h[1]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x2\n"
".inst 0x646840e9 // bfdot z9.s, z7.h, z0.h[1]\n"
"ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
"ld1h { z7.h }, p5/Z, [x10, #3, MUL VL]\n"
- "subs x27, x27, #0x2\n"
".inst 0x646840ca // bfdot z10.s, z6.h, z0.h[1]\n"
- ".inst 0x646840eb // bfdot z11.s, z7.h, z0.h[1]\n"
"addvl x10, x10, #4\n"
+ ".inst 0x646840eb // bfdot z11.s, z7.h, z0.h[1]\n"
"ble 11f\n"
"ld1h { z6.h }, p5/Z, [x10]\n"
- "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
".inst 0x647040c8 // bfdot z8.s, z6.h, z0.h[2]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x2\n"
".inst 0x647040e9 // bfdot z9.s, z7.h, z0.h[2]\n"
"ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
"ld1h { z7.h }, p5/Z, [x10, #3, MUL VL]\n"
- "subs x27, x27, #0x2\n"
".inst 0x647040ca // bfdot z10.s, z6.h, z0.h[2]\n"
- ".inst 0x647040eb // bfdot z11.s, z7.h, z0.h[2]\n"
"addvl x10, x10, #4\n"
+ ".inst 0x647040eb // bfdot z11.s, z7.h, z0.h[2]\n"
"ble 11f\n"
"ld1h { z6.h }, p5/Z, [x10]\n"
- "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
".inst 0x647840c8 // bfdot z8.s, z6.h, z0.h[3]\n"
- ".inst 0x647840e9 // bfdot z9.s, z7.h, z0.h[3]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
"ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x647840e9 // bfdot z9.s, z7.h, z0.h[3]\n"
"ld1h { z7.h }, p5/Z, [x10, #3, MUL VL]\n"
+ "addvl x10, x10, #4\n"
".inst 0x647840ca // bfdot z10.s, z6.h, z0.h[3]\n"
".inst 0x647840eb // bfdot z11.s, z7.h, z0.h[3]\n"
- "addvl x10, x10, #4\n"
"11:" // Height 1: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 6b\n"
"tbz %x[flags], #1, 12f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z1.s }, p5/Z, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rw { z0.s }, p5/Z, [x20]\n"
- "fmin z8.s, p5/M, z8.s, z1.s\n"
- "fmin z9.s, p5/M, z9.s, z1.s\n"
- "fmin z10.s, p5/M, z10.s, z1.s\n"
- "fmin z11.s, p5/M, z11.s, z1.s\n"
- "fmax z8.s, p5/M, z8.s, z0.s\n"
- "fmax z9.s, p5/M, z9.s, z0.s\n"
- "fmax z10.s, p5/M, z10.s, z0.s\n"
- "fmax z11.s, p5/M, z11.s, z0.s\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z1.s }, p5/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rw { z0.s }, p5/Z, [x19]\n"
+ "fmin z8.s, p5/M, z8.s, z0.s\n"
+ "fmin z9.s, p5/M, z9.s, z0.s\n"
+ "fmin z10.s, p5/M, z10.s, z0.s\n"
+ "fmin z11.s, p5/M, z11.s, z0.s\n"
+ "fmax z8.s, p5/M, z8.s, z1.s\n"
+ "fmax z9.s, p5/M, z9.s, z1.s\n"
+ "fmax z10.s, p5/M, z10.s, z1.s\n"
+ "fmax z11.s, p5/M, z11.s, z1.s\n"
"12:" // Height 1: No activation
- "st1w { z8.s }, p4, [x9]\n"
- "st1w { z9.s }, p3, [x9, #1, MUL VL]\n"
- "st1w { z10.s }, p2, [x9, #2, MUL VL]\n"
- "st1w { z11.s }, p1, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
+ "st1w { z8.s }, p4, [x28]\n"
+ "st1w { z9.s }, p3, [x28, #1, MUL VL]\n"
+ "st1w { z10.s }, p2, [x28, #2, MUL VL]\n"
+ "st1w { z11.s }, p1, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
"13:" // Height 1: Writeback done
"decw x11, ALL, MUL #4\n"
"cmp x11, XZR\n"
"bgt 2b\n"
"b 80f\n"
"14:" // Height 2
- "mov x12, %x[bias]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x9, %x[bias]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "mov x28, %x[output_ptr]\n"
"15:" // Height 2: Column loop
- "mov x20, #0x0\n"
- "whilelt p4.s, x20, x11\n"
- "incw x20\n"
- "whilelt p3.s, x20, x11\n"
- "incw x20\n"
- "whilelt p2.s, x20, x11\n"
- "incw x20\n"
- "whilelt p1.s, x20, x11\n"
- "cbz x12, 16f\n"
- "ld1w { z8.s }, p5/Z, [x12]\n"
- "ld1w { z9.s }, p5/Z, [x12, #1, MUL VL]\n"
+ "mov x19, #0x0\n"
+ "whilelt p4.s, x19, x11\n"
+ "incw x19\n"
+ "whilelt p3.s, x19, x11\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x11\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x11\n"
+ "cbz x9, 16f\n"
+ "ld1w { z8.s }, p5/Z, [x9]\n"
"mov z12.d, z8.d\n"
+ "ld1w { z9.s }, p5/Z, [x9, #1, MUL VL]\n"
+ "ld1w { z10.s }, p5/Z, [x9, #2, MUL VL]\n"
"mov z13.d, z9.d\n"
- "ld1w { z10.s }, p5/Z, [x12, #2, MUL VL]\n"
- "ld1w { z11.s }, p5/Z, [x12, #3, MUL VL]\n"
+ "ld1w { z11.s }, p5/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"mov z14.d, z10.d\n"
"mov z15.d, z11.d\n"
- "addvl x12, x12, #4\n"
"b 18f\n"
"16:" // Height 2: no bias
"tbz %x[flags], #0, 17f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "ld1w { z8.s }, p4/Z, [x9]\n"
- "ld1w { z9.s }, p3/Z, [x9, #1, MUL VL]\n"
- "ld1w { z10.s }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1w { z11.s }, p1/Z, [x9, #3, MUL VL]\n"
- "ld1w { z12.s }, p4/Z, [x25]\n"
- "ld1w { z13.s }, p3/Z, [x25, #1, MUL VL]\n"
- "ld1w { z14.s }, p2/Z, [x25, #2, MUL VL]\n"
- "ld1w { z15.s }, p1/Z, [x25, #3, MUL VL]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ld1w { z8.s }, p4/Z, [x28]\n"
+ "add x24, x28, x19, LSL #2\n"
+ "ld1w { z9.s }, p3/Z, [x28, #1, MUL VL]\n"
+ "ld1w { z10.s }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1w { z11.s }, p1/Z, [x28, #3, MUL VL]\n"
+ "ld1w { z12.s }, p4/Z, [x24]\n"
+ "ld1w { z13.s }, p3/Z, [x24, #1, MUL VL]\n"
+ "ld1w { z14.s }, p2/Z, [x24, #2, MUL VL]\n"
+ "ld1w { z15.s }, p1/Z, [x24, #3, MUL VL]\n"
"b 18f\n"
"17:" // Height 2: no accumulate
"mov z8.b, #0x0\n"
@@ -316,52 +316,52 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
"mov z14.b, #0x0\n"
"mov z15.b, #0x0\n"
"18:" // Height 2: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"19:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 20f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "cbnz x28, 21f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #1\n"
- "add x25, x25, x20, LSL #1\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "cbnz x27, 21f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
+ "add x24, x24, x19, LSL #1\n"
"b 21f\n"
"20:" // Height 2: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #1\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #1\n"
"21:" // Height 2: input setup done
- "cmp x27, #0x8\n"
+ "cmp x26, #0x8\n"
"ble 23f\n"
"22:" // Height 2: Multiply loop: Main loop head
- "whilelt p0.h, XZR, x27\n"
- "ld1rqh { z0.h }, p0/Z, [x26]\n"
- "ld1rqh { z1.h }, p0/Z, [x25]\n"
- "sub x27, x27, #0x8\n"
"ld1h { z6.h }, p5/Z, [x10]\n"
+ "whilelt p0.h, XZR, x26\n"
"ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "sub x26, x26, #0x8\n"
+ "ld1rqh { z0.h }, p0/Z, [x25]\n"
".inst 0x646040c8 // bfdot z8.s, z6.h, z0.h[0]\n"
- ".inst 0x646140cc // bfdot z12.s, z6.h, z1.h[0]\n"
+ "ld1rqh { z1.h }, p0/Z, [x24]\n"
+ "cmp x26, #0x8\n"
".inst 0x646040e9 // bfdot z9.s, z7.h, z0.h[0]\n"
- ".inst 0x646140ed // bfdot z13.s, z7.h, z1.h[0]\n"
+ "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
+ ".inst 0x646140cc // bfdot z12.s, z6.h, z1.h[0]\n"
"ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x646140ed // bfdot z13.s, z7.h, z1.h[0]\n"
"ld1h { z7.h }, p5/Z, [x10, #3, MUL VL]\n"
".inst 0x646040ca // bfdot z10.s, z6.h, z0.h[0]\n"
".inst 0x646140ce // bfdot z14.s, z6.h, z1.h[0]\n"
"ld1h { z6.h }, p5/Z, [x10, #4, MUL VL]\n"
- "cmp x27, #0x8\n"
".inst 0x646040eb // bfdot z11.s, z7.h, z0.h[0]\n"
".inst 0x646140ef // bfdot z15.s, z7.h, z1.h[0]\n"
"ld1h { z7.h }, p5/Z, [x10, #5, MUL VL]\n"
- "add x26, x26, #0x10\n"
".inst 0x646840c8 // bfdot z8.s, z6.h, z0.h[1]\n"
".inst 0x646940cc // bfdot z12.s, z6.h, z1.h[1]\n"
"ld1h { z6.h }, p5/Z, [x10, #6, MUL VL]\n"
- "add x25, x25, #0x10\n"
".inst 0x646840e9 // bfdot z9.s, z7.h, z0.h[1]\n"
".inst 0x646940ed // bfdot z13.s, z7.h, z1.h[1]\n"
"ld1h { z7.h }, p5/Z, [x10, #7, MUL VL]\n"
@@ -396,156 +396,156 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
".inst 0x647940ef // bfdot z15.s, z7.h, z1.h[3]\n"
"bgt 22b\n"
"23:" // Height 2: Multiply loop: Single iteration only
- "whilelt p0.h, XZR, x27\n"
- "ld1rqh { z0.h }, p0/Z, [x26]\n"
- "ld1rqh { z1.h }, p0/Z, [x25]\n"
- "subs x27, x27, #0x2\n"
"ld1h { z6.h }, p5/Z, [x10]\n"
+ "whilelt p0.h, XZR, x26\n"
"ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x2\n"
+ "ld1rqh { z0.h }, p0/Z, [x25]\n"
".inst 0x646040c8 // bfdot z8.s, z6.h, z0.h[0]\n"
- ".inst 0x646140cc // bfdot z12.s, z6.h, z1.h[0]\n"
+ "ld1rqh { z1.h }, p0/Z, [x24]\n"
".inst 0x646040e9 // bfdot z9.s, z7.h, z0.h[0]\n"
- ".inst 0x646140ed // bfdot z13.s, z7.h, z1.h[0]\n"
+ ".inst 0x646140cc // bfdot z12.s, z6.h, z1.h[0]\n"
"ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x646140ed // bfdot z13.s, z7.h, z1.h[0]\n"
"ld1h { z7.h }, p5/Z, [x10, #3, MUL VL]\n"
+ "addvl x10, x10, #4\n"
".inst 0x646040ca // bfdot z10.s, z6.h, z0.h[0]\n"
".inst 0x646140ce // bfdot z14.s, z6.h, z1.h[0]\n"
- "addvl x10, x10, #4\n"
".inst 0x646040eb // bfdot z11.s, z7.h, z0.h[0]\n"
".inst 0x646140ef // bfdot z15.s, z7.h, z1.h[0]\n"
"ble 24f\n"
"ld1h { z6.h }, p5/Z, [x10]\n"
- "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
".inst 0x646840c8 // bfdot z8.s, z6.h, z0.h[1]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x2\n"
".inst 0x646940cc // bfdot z12.s, z6.h, z1.h[1]\n"
+ "ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
".inst 0x646840e9 // bfdot z9.s, z7.h, z0.h[1]\n"
".inst 0x646940ed // bfdot z13.s, z7.h, z1.h[1]\n"
- "ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
"ld1h { z7.h }, p5/Z, [x10, #3, MUL VL]\n"
- "subs x27, x27, #0x2\n"
+ "addvl x10, x10, #4\n"
".inst 0x646840ca // bfdot z10.s, z6.h, z0.h[1]\n"
".inst 0x646940ce // bfdot z14.s, z6.h, z1.h[1]\n"
- "addvl x10, x10, #4\n"
".inst 0x646840eb // bfdot z11.s, z7.h, z0.h[1]\n"
".inst 0x646940ef // bfdot z15.s, z7.h, z1.h[1]\n"
"ble 24f\n"
"ld1h { z6.h }, p5/Z, [x10]\n"
- "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
".inst 0x647040c8 // bfdot z8.s, z6.h, z0.h[2]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x2\n"
".inst 0x647140cc // bfdot z12.s, z6.h, z1.h[2]\n"
+ "ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
".inst 0x647040e9 // bfdot z9.s, z7.h, z0.h[2]\n"
".inst 0x647140ed // bfdot z13.s, z7.h, z1.h[2]\n"
- "ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
"ld1h { z7.h }, p5/Z, [x10, #3, MUL VL]\n"
- "subs x27, x27, #0x2\n"
+ "addvl x10, x10, #4\n"
".inst 0x647040ca // bfdot z10.s, z6.h, z0.h[2]\n"
".inst 0x647140ce // bfdot z14.s, z6.h, z1.h[2]\n"
- "addvl x10, x10, #4\n"
".inst 0x647040eb // bfdot z11.s, z7.h, z0.h[2]\n"
".inst 0x647140ef // bfdot z15.s, z7.h, z1.h[2]\n"
"ble 24f\n"
"ld1h { z6.h }, p5/Z, [x10]\n"
- "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
".inst 0x647840c8 // bfdot z8.s, z6.h, z0.h[3]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
".inst 0x647940cc // bfdot z12.s, z6.h, z1.h[3]\n"
+ "ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
".inst 0x647840e9 // bfdot z9.s, z7.h, z0.h[3]\n"
".inst 0x647940ed // bfdot z13.s, z7.h, z1.h[3]\n"
- "ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
"ld1h { z7.h }, p5/Z, [x10, #3, MUL VL]\n"
+ "addvl x10, x10, #4\n"
".inst 0x647840ca // bfdot z10.s, z6.h, z0.h[3]\n"
".inst 0x647940ce // bfdot z14.s, z6.h, z1.h[3]\n"
- "addvl x10, x10, #4\n"
".inst 0x647840eb // bfdot z11.s, z7.h, z0.h[3]\n"
".inst 0x647940ef // bfdot z15.s, z7.h, z1.h[3]\n"
"24:" // Height 2: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 19b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x28, x19, LSL #2\n"
"tbz %x[flags], #1, 25f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z1.s }, p5/Z, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rw { z0.s }, p5/Z, [x20]\n"
- "fmin z8.s, p5/M, z8.s, z1.s\n"
- "fmin z9.s, p5/M, z9.s, z1.s\n"
- "fmin z10.s, p5/M, z10.s, z1.s\n"
- "fmin z11.s, p5/M, z11.s, z1.s\n"
- "fmin z12.s, p5/M, z12.s, z1.s\n"
- "fmin z13.s, p5/M, z13.s, z1.s\n"
- "fmin z14.s, p5/M, z14.s, z1.s\n"
- "fmin z15.s, p5/M, z15.s, z1.s\n"
- "fmax z8.s, p5/M, z8.s, z0.s\n"
- "fmax z9.s, p5/M, z9.s, z0.s\n"
- "fmax z10.s, p5/M, z10.s, z0.s\n"
- "fmax z11.s, p5/M, z11.s, z0.s\n"
- "fmax z12.s, p5/M, z12.s, z0.s\n"
- "fmax z13.s, p5/M, z13.s, z0.s\n"
- "fmax z14.s, p5/M, z14.s, z0.s\n"
- "fmax z15.s, p5/M, z15.s, z0.s\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z1.s }, p5/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rw { z0.s }, p5/Z, [x19]\n"
+ "fmin z8.s, p5/M, z8.s, z0.s\n"
+ "fmin z9.s, p5/M, z9.s, z0.s\n"
+ "fmin z10.s, p5/M, z10.s, z0.s\n"
+ "fmin z11.s, p5/M, z11.s, z0.s\n"
+ "fmin z12.s, p5/M, z12.s, z0.s\n"
+ "fmax z8.s, p5/M, z8.s, z1.s\n"
+ "fmax z9.s, p5/M, z9.s, z1.s\n"
+ "fmax z10.s, p5/M, z10.s, z1.s\n"
+ "fmax z11.s, p5/M, z11.s, z1.s\n"
+ "fmax z12.s, p5/M, z12.s, z1.s\n"
+ "fmin z13.s, p5/M, z13.s, z0.s\n"
+ "fmin z14.s, p5/M, z14.s, z0.s\n"
+ "fmin z15.s, p5/M, z15.s, z0.s\n"
+ "fmax z13.s, p5/M, z13.s, z1.s\n"
+ "fmax z14.s, p5/M, z14.s, z1.s\n"
+ "fmax z15.s, p5/M, z15.s, z1.s\n"
"25:" // Height 2: No activation
- "st1w { z8.s }, p4, [x9]\n"
- "st1w { z9.s }, p3, [x9, #1, MUL VL]\n"
- "st1w { z10.s }, p2, [x9, #2, MUL VL]\n"
- "st1w { z11.s }, p1, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
- "st1w { z12.s }, p4, [x25]\n"
- "st1w { z13.s }, p3, [x25, #1, MUL VL]\n"
- "st1w { z14.s }, p2, [x25, #2, MUL VL]\n"
- "st1w { z15.s }, p1, [x25, #3, MUL VL]\n"
+ "st1w { z8.s }, p4, [x28]\n"
+ "st1w { z9.s }, p3, [x28, #1, MUL VL]\n"
+ "st1w { z10.s }, p2, [x28, #2, MUL VL]\n"
+ "st1w { z11.s }, p1, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "st1w { z12.s }, p4, [x24]\n"
+ "st1w { z13.s }, p3, [x24, #1, MUL VL]\n"
+ "st1w { z14.s }, p2, [x24, #2, MUL VL]\n"
+ "st1w { z15.s }, p1, [x24, #3, MUL VL]\n"
"26:" // Height 2: Writeback done
"decw x11, ALL, MUL #4\n"
"cmp x11, XZR\n"
"bgt 15b\n"
"b 80f\n"
"27:" // Height 3
- "mov x12, %x[bias]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x9, %x[bias]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "mov x28, %x[output_ptr]\n"
"28:" // Height 3: Column loop
- "mov x20, #0x0\n"
- "whilelt p4.s, x20, x11\n"
- "incw x20\n"
- "whilelt p3.s, x20, x11\n"
- "incw x20\n"
- "whilelt p2.s, x20, x11\n"
- "incw x20\n"
- "whilelt p1.s, x20, x11\n"
- "cbz x12, 29f\n"
- "ld1w { z8.s }, p5/Z, [x12]\n"
- "ld1w { z9.s }, p5/Z, [x12, #1, MUL VL]\n"
+ "mov x19, #0x0\n"
+ "whilelt p4.s, x19, x11\n"
+ "incw x19\n"
+ "whilelt p3.s, x19, x11\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x11\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x11\n"
+ "cbz x9, 29f\n"
+ "ld1w { z8.s }, p5/Z, [x9]\n"
"mov z12.d, z8.d\n"
+ "ld1w { z9.s }, p5/Z, [x9, #1, MUL VL]\n"
+ "mov z16.d, z8.d\n"
+ "ld1w { z10.s }, p5/Z, [x9, #2, MUL VL]\n"
+ "ld1w { z11.s }, p5/Z, [x9, #3, MUL VL]\n"
"mov z13.d, z9.d\n"
- "ld1w { z10.s }, p5/Z, [x12, #2, MUL VL]\n"
- "ld1w { z11.s }, p5/Z, [x12, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
+ "mov z17.d, z9.d\n"
"mov z14.d, z10.d\n"
"mov z15.d, z11.d\n"
- "mov z16.d, z8.d\n"
- "mov z17.d, z9.d\n"
- "addvl x12, x12, #4\n"
"mov z18.d, z10.d\n"
"mov z19.d, z11.d\n"
"b 31f\n"
"29:" // Height 3: no bias
"tbz %x[flags], #0, 30f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "ld1w { z8.s }, p4/Z, [x9]\n"
- "ld1w { z9.s }, p3/Z, [x9, #1, MUL VL]\n"
- "ld1w { z10.s }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1w { z11.s }, p1/Z, [x9, #3, MUL VL]\n"
- "ld1w { z12.s }, p4/Z, [x25]\n"
- "ld1w { z13.s }, p3/Z, [x25, #1, MUL VL]\n"
- "ld1w { z14.s }, p2/Z, [x25, #2, MUL VL]\n"
- "ld1w { z15.s }, p1/Z, [x25, #3, MUL VL]\n"
- "ld1w { z16.s }, p4/Z, [x24]\n"
- "ld1w { z17.s }, p3/Z, [x24, #1, MUL VL]\n"
- "ld1w { z18.s }, p2/Z, [x24, #2, MUL VL]\n"
- "ld1w { z19.s }, p1/Z, [x24, #3, MUL VL]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ld1w { z8.s }, p4/Z, [x28]\n"
+ "add x24, x28, x19, LSL #2\n"
+ "ld1w { z9.s }, p3/Z, [x28, #1, MUL VL]\n"
+ "ld1w { z10.s }, p2/Z, [x28, #2, MUL VL]\n"
+ "add x23, x24, x19, LSL #2\n"
+ "ld1w { z11.s }, p1/Z, [x28, #3, MUL VL]\n"
+ "ld1w { z12.s }, p4/Z, [x24]\n"
+ "ld1w { z13.s }, p3/Z, [x24, #1, MUL VL]\n"
+ "ld1w { z14.s }, p2/Z, [x24, #2, MUL VL]\n"
+ "ld1w { z15.s }, p1/Z, [x24, #3, MUL VL]\n"
+ "ld1w { z16.s }, p4/Z, [x23]\n"
+ "ld1w { z17.s }, p3/Z, [x23, #1, MUL VL]\n"
+ "ld1w { z18.s }, p2/Z, [x23, #2, MUL VL]\n"
+ "ld1w { z19.s }, p1/Z, [x23, #3, MUL VL]\n"
"b 31f\n"
"30:" // Height 3: no accumulate
"mov z8.b, #0x0\n"
@@ -561,63 +561,63 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
"mov z18.b, #0x0\n"
"mov z19.b, #0x0\n"
"31:" // Height 3: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"32:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 33f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "cbnz x28, 34f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #1\n"
- "add x25, x25, x20, LSL #1\n"
- "add x24, x24, x20, LSL #1\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "cbnz x27, 34f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
+ "add x24, x24, x19, LSL #1\n"
+ "add x23, x23, x19, LSL #1\n"
"b 34f\n"
"33:" // Height 3: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
"34:" // Height 3: input setup done
- "cmp x27, #0x8\n"
+ "cmp x26, #0x8\n"
"ble 36f\n"
"35:" // Height 3: Multiply loop: Main loop head
- "whilelt p0.h, XZR, x27\n"
- "ld1rqh { z0.h }, p0/Z, [x26]\n"
- "ld1rqh { z1.h }, p0/Z, [x25]\n"
- "sub x27, x27, #0x8\n"
- "ld1rqh { z2.h }, p0/Z, [x24]\n"
"ld1h { z6.h }, p5/Z, [x10]\n"
+ "whilelt p0.h, XZR, x26\n"
+ "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "sub x26, x26, #0x8\n"
+ "ld1rqh { z0.h }, p0/Z, [x25]\n"
".inst 0x646040c8 // bfdot z8.s, z6.h, z0.h[0]\n"
+ "ld1rqh { z1.h }, p0/Z, [x24]\n"
+ "cmp x26, #0x8\n"
+ ".inst 0x646040e9 // bfdot z9.s, z7.h, z0.h[0]\n"
+ "ld1rqh { z2.h }, p0/Z, [x23]\n"
+ "add x25, x25, #0x10\n"
".inst 0x646140cc // bfdot z12.s, z6.h, z1.h[0]\n"
- "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "add x24, x24, #0x10\n"
+ ".inst 0x646140ed // bfdot z13.s, z7.h, z1.h[0]\n"
+ "add x23, x23, #0x10\n"
".inst 0x646240d0 // bfdot z16.s, z6.h, z2.h[0]\n"
- ".inst 0x646040e9 // bfdot z9.s, z7.h, z0.h[0]\n"
"ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
- ".inst 0x646140ed // bfdot z13.s, z7.h, z1.h[0]\n"
".inst 0x646240f1 // bfdot z17.s, z7.h, z2.h[0]\n"
"ld1h { z7.h }, p5/Z, [x10, #3, MUL VL]\n"
- "cmp x27, #0x8\n"
".inst 0x646040ca // bfdot z10.s, z6.h, z0.h[0]\n"
".inst 0x646140ce // bfdot z14.s, z6.h, z1.h[0]\n"
- "add x26, x26, #0x10\n"
- "add x25, x25, #0x10\n"
".inst 0x646240d2 // bfdot z18.s, z6.h, z2.h[0]\n"
- ".inst 0x646040eb // bfdot z11.s, z7.h, z0.h[0]\n"
"ld1h { z6.h }, p5/Z, [x10, #4, MUL VL]\n"
- "add x24, x24, #0x10\n"
+ ".inst 0x646040eb // bfdot z11.s, z7.h, z0.h[0]\n"
".inst 0x646140ef // bfdot z15.s, z7.h, z1.h[0]\n"
".inst 0x646240f3 // bfdot z19.s, z7.h, z2.h[0]\n"
"ld1h { z7.h }, p5/Z, [x10, #5, MUL VL]\n"
".inst 0x646840c8 // bfdot z8.s, z6.h, z0.h[1]\n"
".inst 0x646940cc // bfdot z12.s, z6.h, z1.h[1]\n"
".inst 0x646a40d0 // bfdot z16.s, z6.h, z2.h[1]\n"
- ".inst 0x646840e9 // bfdot z9.s, z7.h, z0.h[1]\n"
"ld1h { z6.h }, p5/Z, [x10, #6, MUL VL]\n"
+ ".inst 0x646840e9 // bfdot z9.s, z7.h, z0.h[1]\n"
".inst 0x646940ed // bfdot z13.s, z7.h, z1.h[1]\n"
".inst 0x646a40f1 // bfdot z17.s, z7.h, z2.h[1]\n"
"ld1h { z7.h }, p5/Z, [x10, #7, MUL VL]\n"
@@ -625,32 +625,32 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
".inst 0x646840ca // bfdot z10.s, z6.h, z0.h[1]\n"
".inst 0x646940ce // bfdot z14.s, z6.h, z1.h[1]\n"
".inst 0x646a40d2 // bfdot z18.s, z6.h, z2.h[1]\n"
- ".inst 0x646840eb // bfdot z11.s, z7.h, z0.h[1]\n"
"ld1h { z6.h }, p5/Z, [x10, #-8, MUL VL]\n"
+ ".inst 0x646840eb // bfdot z11.s, z7.h, z0.h[1]\n"
".inst 0x646940ef // bfdot z15.s, z7.h, z1.h[1]\n"
".inst 0x646a40f3 // bfdot z19.s, z7.h, z2.h[1]\n"
"ld1h { z7.h }, p5/Z, [x10, #-7, MUL VL]\n"
".inst 0x647040c8 // bfdot z8.s, z6.h, z0.h[2]\n"
".inst 0x647140cc // bfdot z12.s, z6.h, z1.h[2]\n"
".inst 0x647240d0 // bfdot z16.s, z6.h, z2.h[2]\n"
- ".inst 0x647040e9 // bfdot z9.s, z7.h, z0.h[2]\n"
"ld1h { z6.h }, p5/Z, [x10, #-6, MUL VL]\n"
+ ".inst 0x647040e9 // bfdot z9.s, z7.h, z0.h[2]\n"
".inst 0x647140ed // bfdot z13.s, z7.h, z1.h[2]\n"
".inst 0x647240f1 // bfdot z17.s, z7.h, z2.h[2]\n"
"ld1h { z7.h }, p5/Z, [x10, #-5, MUL VL]\n"
".inst 0x647040ca // bfdot z10.s, z6.h, z0.h[2]\n"
".inst 0x647140ce // bfdot z14.s, z6.h, z1.h[2]\n"
".inst 0x647240d2 // bfdot z18.s, z6.h, z2.h[2]\n"
- ".inst 0x647040eb // bfdot z11.s, z7.h, z0.h[2]\n"
"ld1h { z6.h }, p5/Z, [x10, #-4, MUL VL]\n"
+ ".inst 0x647040eb // bfdot z11.s, z7.h, z0.h[2]\n"
".inst 0x647140ef // bfdot z15.s, z7.h, z1.h[2]\n"
".inst 0x647240f3 // bfdot z19.s, z7.h, z2.h[2]\n"
"ld1h { z7.h }, p5/Z, [x10, #-3, MUL VL]\n"
".inst 0x647840c8 // bfdot z8.s, z6.h, z0.h[3]\n"
".inst 0x647940cc // bfdot z12.s, z6.h, z1.h[3]\n"
".inst 0x647a40d0 // bfdot z16.s, z6.h, z2.h[3]\n"
- ".inst 0x647840e9 // bfdot z9.s, z7.h, z0.h[3]\n"
"ld1h { z6.h }, p5/Z, [x10, #-2, MUL VL]\n"
+ ".inst 0x647840e9 // bfdot z9.s, z7.h, z0.h[3]\n"
".inst 0x647940ed // bfdot z13.s, z7.h, z1.h[3]\n"
".inst 0x647a40f1 // bfdot z17.s, z7.h, z2.h[3]\n"
"ld1h { z7.h }, p5/Z, [x10, #-1, MUL VL]\n"
@@ -662,19 +662,19 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
".inst 0x647a40f3 // bfdot z19.s, z7.h, z2.h[3]\n"
"bgt 35b\n"
"36:" // Height 3: Multiply loop: Single iteration only
- "whilelt p0.h, XZR, x27\n"
- "ld1rqh { z0.h }, p0/Z, [x26]\n"
- "ld1rqh { z1.h }, p0/Z, [x25]\n"
- "subs x27, x27, #0x2\n"
- "ld1rqh { z2.h }, p0/Z, [x24]\n"
"ld1h { z6.h }, p5/Z, [x10]\n"
+ "whilelt p0.h, XZR, x26\n"
+ "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x2\n"
+ "ld1rqh { z0.h }, p0/Z, [x25]\n"
".inst 0x646040c8 // bfdot z8.s, z6.h, z0.h[0]\n"
+ "ld1rqh { z1.h }, p0/Z, [x24]\n"
+ ".inst 0x646040e9 // bfdot z9.s, z7.h, z0.h[0]\n"
+ "ld1rqh { z2.h }, p0/Z, [x23]\n"
".inst 0x646140cc // bfdot z12.s, z6.h, z1.h[0]\n"
- "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
+ ".inst 0x646140ed // bfdot z13.s, z7.h, z1.h[0]\n"
".inst 0x646240d0 // bfdot z16.s, z6.h, z2.h[0]\n"
- ".inst 0x646040e9 // bfdot z9.s, z7.h, z0.h[0]\n"
"ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
- ".inst 0x646140ed // bfdot z13.s, z7.h, z1.h[0]\n"
".inst 0x646240f1 // bfdot z17.s, z7.h, z2.h[0]\n"
"ld1h { z7.h }, p5/Z, [x10, #3, MUL VL]\n"
"addvl x10, x10, #4\n"
@@ -686,13 +686,13 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
".inst 0x646240f3 // bfdot z19.s, z7.h, z2.h[0]\n"
"ble 37f\n"
"ld1h { z6.h }, p5/Z, [x10]\n"
- "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
".inst 0x646840c8 // bfdot z8.s, z6.h, z0.h[1]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x2\n"
".inst 0x646940cc // bfdot z12.s, z6.h, z1.h[1]\n"
".inst 0x646a40d0 // bfdot z16.s, z6.h, z2.h[1]\n"
- ".inst 0x646840e9 // bfdot z9.s, z7.h, z0.h[1]\n"
"ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
- "subs x27, x27, #0x2\n"
+ ".inst 0x646840e9 // bfdot z9.s, z7.h, z0.h[1]\n"
".inst 0x646940ed // bfdot z13.s, z7.h, z1.h[1]\n"
".inst 0x646a40f1 // bfdot z17.s, z7.h, z2.h[1]\n"
"ld1h { z7.h }, p5/Z, [x10, #3, MUL VL]\n"
@@ -705,13 +705,13 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
".inst 0x646a40f3 // bfdot z19.s, z7.h, z2.h[1]\n"
"ble 37f\n"
"ld1h { z6.h }, p5/Z, [x10]\n"
- "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
".inst 0x647040c8 // bfdot z8.s, z6.h, z0.h[2]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x2\n"
".inst 0x647140cc // bfdot z12.s, z6.h, z1.h[2]\n"
".inst 0x647240d0 // bfdot z16.s, z6.h, z2.h[2]\n"
- ".inst 0x647040e9 // bfdot z9.s, z7.h, z0.h[2]\n"
"ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
- "subs x27, x27, #0x2\n"
+ ".inst 0x647040e9 // bfdot z9.s, z7.h, z0.h[2]\n"
".inst 0x647140ed // bfdot z13.s, z7.h, z1.h[2]\n"
".inst 0x647240f1 // bfdot z17.s, z7.h, z2.h[2]\n"
"ld1h { z7.h }, p5/Z, [x10, #3, MUL VL]\n"
@@ -724,12 +724,12 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
".inst 0x647240f3 // bfdot z19.s, z7.h, z2.h[2]\n"
"ble 37f\n"
"ld1h { z6.h }, p5/Z, [x10]\n"
- "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
".inst 0x647840c8 // bfdot z8.s, z6.h, z0.h[3]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
".inst 0x647940cc // bfdot z12.s, z6.h, z1.h[3]\n"
".inst 0x647a40d0 // bfdot z16.s, z6.h, z2.h[3]\n"
- ".inst 0x647840e9 // bfdot z9.s, z7.h, z0.h[3]\n"
"ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x647840e9 // bfdot z9.s, z7.h, z0.h[3]\n"
".inst 0x647940ed // bfdot z13.s, z7.h, z1.h[3]\n"
".inst 0x647a40f1 // bfdot z17.s, z7.h, z2.h[3]\n"
"ld1h { z7.h }, p5/Z, [x10, #3, MUL VL]\n"
@@ -741,116 +741,116 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
".inst 0x647940ef // bfdot z15.s, z7.h, z1.h[3]\n"
".inst 0x647a40f3 // bfdot z19.s, z7.h, z2.h[3]\n"
"37:" // Height 3: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 32b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x28, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
"tbz %x[flags], #1, 38f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z1.s }, p5/Z, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rw { z0.s }, p5/Z, [x20]\n"
- "fmin z8.s, p5/M, z8.s, z1.s\n"
- "fmin z9.s, p5/M, z9.s, z1.s\n"
- "fmin z10.s, p5/M, z10.s, z1.s\n"
- "fmin z11.s, p5/M, z11.s, z1.s\n"
- "fmin z12.s, p5/M, z12.s, z1.s\n"
- "fmin z13.s, p5/M, z13.s, z1.s\n"
- "fmin z14.s, p5/M, z14.s, z1.s\n"
- "fmin z15.s, p5/M, z15.s, z1.s\n"
- "fmin z16.s, p5/M, z16.s, z1.s\n"
- "fmin z17.s, p5/M, z17.s, z1.s\n"
- "fmin z18.s, p5/M, z18.s, z1.s\n"
- "fmin z19.s, p5/M, z19.s, z1.s\n"
- "fmax z8.s, p5/M, z8.s, z0.s\n"
- "fmax z9.s, p5/M, z9.s, z0.s\n"
- "fmax z10.s, p5/M, z10.s, z0.s\n"
- "fmax z11.s, p5/M, z11.s, z0.s\n"
- "fmax z12.s, p5/M, z12.s, z0.s\n"
- "fmax z13.s, p5/M, z13.s, z0.s\n"
- "fmax z14.s, p5/M, z14.s, z0.s\n"
- "fmax z15.s, p5/M, z15.s, z0.s\n"
- "fmax z16.s, p5/M, z16.s, z0.s\n"
- "fmax z17.s, p5/M, z17.s, z0.s\n"
- "fmax z18.s, p5/M, z18.s, z0.s\n"
- "fmax z19.s, p5/M, z19.s, z0.s\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z1.s }, p5/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rw { z0.s }, p5/Z, [x19]\n"
+ "fmin z8.s, p5/M, z8.s, z0.s\n"
+ "fmin z9.s, p5/M, z9.s, z0.s\n"
+ "fmin z10.s, p5/M, z10.s, z0.s\n"
+ "fmin z11.s, p5/M, z11.s, z0.s\n"
+ "fmin z12.s, p5/M, z12.s, z0.s\n"
+ "fmax z8.s, p5/M, z8.s, z1.s\n"
+ "fmax z9.s, p5/M, z9.s, z1.s\n"
+ "fmax z10.s, p5/M, z10.s, z1.s\n"
+ "fmax z11.s, p5/M, z11.s, z1.s\n"
+ "fmax z12.s, p5/M, z12.s, z1.s\n"
+ "fmin z13.s, p5/M, z13.s, z0.s\n"
+ "fmin z14.s, p5/M, z14.s, z0.s\n"
+ "fmin z15.s, p5/M, z15.s, z0.s\n"
+ "fmin z16.s, p5/M, z16.s, z0.s\n"
+ "fmax z13.s, p5/M, z13.s, z1.s\n"
+ "fmax z14.s, p5/M, z14.s, z1.s\n"
+ "fmax z15.s, p5/M, z15.s, z1.s\n"
+ "fmax z16.s, p5/M, z16.s, z1.s\n"
+ "fmin z17.s, p5/M, z17.s, z0.s\n"
+ "fmin z18.s, p5/M, z18.s, z0.s\n"
+ "fmin z19.s, p5/M, z19.s, z0.s\n"
+ "fmax z17.s, p5/M, z17.s, z1.s\n"
+ "fmax z18.s, p5/M, z18.s, z1.s\n"
+ "fmax z19.s, p5/M, z19.s, z1.s\n"
"38:" // Height 3: No activation
- "st1w { z8.s }, p4, [x9]\n"
- "st1w { z9.s }, p3, [x9, #1, MUL VL]\n"
- "st1w { z10.s }, p2, [x9, #2, MUL VL]\n"
- "st1w { z11.s }, p1, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
- "st1w { z12.s }, p4, [x25]\n"
- "st1w { z13.s }, p3, [x25, #1, MUL VL]\n"
- "st1w { z14.s }, p2, [x25, #2, MUL VL]\n"
- "st1w { z15.s }, p1, [x25, #3, MUL VL]\n"
- "st1w { z16.s }, p4, [x24]\n"
- "st1w { z17.s }, p3, [x24, #1, MUL VL]\n"
- "st1w { z18.s }, p2, [x24, #2, MUL VL]\n"
- "st1w { z19.s }, p1, [x24, #3, MUL VL]\n"
+ "st1w { z8.s }, p4, [x28]\n"
+ "st1w { z9.s }, p3, [x28, #1, MUL VL]\n"
+ "st1w { z10.s }, p2, [x28, #2, MUL VL]\n"
+ "st1w { z11.s }, p1, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "st1w { z12.s }, p4, [x24]\n"
+ "st1w { z13.s }, p3, [x24, #1, MUL VL]\n"
+ "st1w { z14.s }, p2, [x24, #2, MUL VL]\n"
+ "st1w { z15.s }, p1, [x24, #3, MUL VL]\n"
+ "st1w { z16.s }, p4, [x23]\n"
+ "st1w { z17.s }, p3, [x23, #1, MUL VL]\n"
+ "st1w { z18.s }, p2, [x23, #2, MUL VL]\n"
+ "st1w { z19.s }, p1, [x23, #3, MUL VL]\n"
"39:" // Height 3: Writeback done
"decw x11, ALL, MUL #4\n"
"cmp x11, XZR\n"
"bgt 28b\n"
"b 80f\n"
"40:" // Height 4
- "mov x12, %x[bias]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x9, %x[bias]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "mov x28, %x[output_ptr]\n"
"41:" // Height 4: Column loop
- "mov x20, #0x0\n"
- "whilelt p4.s, x20, x11\n"
- "incw x20\n"
- "whilelt p3.s, x20, x11\n"
- "incw x20\n"
- "whilelt p2.s, x20, x11\n"
- "incw x20\n"
- "whilelt p1.s, x20, x11\n"
- "cbz x12, 42f\n"
- "ld1w { z8.s }, p5/Z, [x12]\n"
- "ld1w { z9.s }, p5/Z, [x12, #1, MUL VL]\n"
+ "mov x19, #0x0\n"
+ "whilelt p4.s, x19, x11\n"
+ "incw x19\n"
+ "whilelt p3.s, x19, x11\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x11\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x11\n"
+ "cbz x9, 42f\n"
+ "ld1w { z8.s }, p5/Z, [x9]\n"
"mov z12.d, z8.d\n"
+ "ld1w { z9.s }, p5/Z, [x9, #1, MUL VL]\n"
+ "mov z16.d, z8.d\n"
+ "ld1w { z10.s }, p5/Z, [x9, #2, MUL VL]\n"
+ "mov z20.d, z8.d\n"
+ "ld1w { z11.s }, p5/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"mov z13.d, z9.d\n"
- "ld1w { z10.s }, p5/Z, [x12, #2, MUL VL]\n"
- "ld1w { z11.s }, p5/Z, [x12, #3, MUL VL]\n"
+ "mov z17.d, z9.d\n"
"mov z14.d, z10.d\n"
"mov z15.d, z11.d\n"
- "mov z16.d, z8.d\n"
- "mov z17.d, z9.d\n"
- "addvl x12, x12, #4\n"
"mov z18.d, z10.d\n"
"mov z19.d, z11.d\n"
- "mov z20.d, z8.d\n"
"mov z21.d, z9.d\n"
"mov z22.d, z10.d\n"
"mov z23.d, z11.d\n"
"b 44f\n"
"42:" // Height 4: no bias
"tbz %x[flags], #0, 43f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "ld1w { z8.s }, p4/Z, [x9]\n"
- "add x23, x24, x20, LSL #2\n"
- "ld1w { z9.s }, p3/Z, [x9, #1, MUL VL]\n"
- "ld1w { z10.s }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1w { z11.s }, p1/Z, [x9, #3, MUL VL]\n"
- "ld1w { z12.s }, p4/Z, [x25]\n"
- "ld1w { z13.s }, p3/Z, [x25, #1, MUL VL]\n"
- "ld1w { z14.s }, p2/Z, [x25, #2, MUL VL]\n"
- "ld1w { z15.s }, p1/Z, [x25, #3, MUL VL]\n"
- "ld1w { z16.s }, p4/Z, [x24]\n"
- "ld1w { z17.s }, p3/Z, [x24, #1, MUL VL]\n"
- "ld1w { z18.s }, p2/Z, [x24, #2, MUL VL]\n"
- "ld1w { z19.s }, p1/Z, [x24, #3, MUL VL]\n"
- "ld1w { z20.s }, p4/Z, [x23]\n"
- "ld1w { z21.s }, p3/Z, [x23, #1, MUL VL]\n"
- "ld1w { z22.s }, p2/Z, [x23, #2, MUL VL]\n"
- "ld1w { z23.s }, p1/Z, [x23, #3, MUL VL]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ld1w { z8.s }, p4/Z, [x28]\n"
+ "add x24, x28, x19, LSL #2\n"
+ "ld1w { z9.s }, p3/Z, [x28, #1, MUL VL]\n"
+ "ld1w { z10.s }, p2/Z, [x28, #2, MUL VL]\n"
+ "add x23, x24, x19, LSL #2\n"
+ "ld1w { z11.s }, p1/Z, [x28, #3, MUL VL]\n"
+ "add x22, x23, x19, LSL #2\n"
+ "ld1w { z12.s }, p4/Z, [x24]\n"
+ "ld1w { z13.s }, p3/Z, [x24, #1, MUL VL]\n"
+ "ld1w { z14.s }, p2/Z, [x24, #2, MUL VL]\n"
+ "ld1w { z15.s }, p1/Z, [x24, #3, MUL VL]\n"
+ "ld1w { z16.s }, p4/Z, [x23]\n"
+ "ld1w { z17.s }, p3/Z, [x23, #1, MUL VL]\n"
+ "ld1w { z18.s }, p2/Z, [x23, #2, MUL VL]\n"
+ "ld1w { z19.s }, p1/Z, [x23, #3, MUL VL]\n"
+ "ld1w { z20.s }, p4/Z, [x22]\n"
+ "ld1w { z21.s }, p3/Z, [x22, #1, MUL VL]\n"
+ "ld1w { z22.s }, p2/Z, [x22, #2, MUL VL]\n"
+ "ld1w { z23.s }, p1/Z, [x22, #3, MUL VL]\n"
"b 44f\n"
"43:" // Height 4: no accumulate
"mov z8.b, #0x0\n"
@@ -870,55 +870,55 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
"mov z22.b, #0x0\n"
"mov z23.b, #0x0\n"
"44:" // Height 4: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"45:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 46f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "cbnz x28, 47f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #1\n"
- "add x25, x25, x20, LSL #1\n"
- "add x24, x24, x20, LSL #1\n"
- "add x23, x23, x20, LSL #1\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "cbnz x27, 47f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
+ "add x24, x24, x19, LSL #1\n"
+ "add x23, x23, x19, LSL #1\n"
+ "add x22, x22, x19, LSL #1\n"
"b 47f\n"
"46:" // Height 4: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
- "add x23, x24, x20, LSL #1\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "add x22, x23, x19, LSL #1\n"
"47:" // Height 4: input setup done
- "cmp x27, #0x8\n"
+ "cmp x26, #0x8\n"
"ble 49f\n"
"48:" // Height 4: Multiply loop: Main loop head
- "whilelt p0.h, XZR, x27\n"
- "ld1rqh { z0.h }, p0/Z, [x26]\n"
- "ld1rqh { z1.h }, p0/Z, [x25]\n"
- "sub x27, x27, #0x8\n"
- "ld1rqh { z2.h }, p0/Z, [x24]\n"
- "ld1rqh { z3.h }, p0/Z, [x23]\n"
- "cmp x27, #0x8\n"
- "add x26, x26, #0x10\n"
"ld1h { z6.h }, p5/Z, [x10]\n"
+ "whilelt p0.h, XZR, x26\n"
"ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "sub x26, x26, #0x8\n"
+ "ld1rqh { z0.h }, p0/Z, [x25]\n"
".inst 0x646040c8 // bfdot z8.s, z6.h, z0.h[0]\n"
- ".inst 0x646140cc // bfdot z12.s, z6.h, z1.h[0]\n"
- ".inst 0x646240d0 // bfdot z16.s, z6.h, z2.h[0]\n"
- ".inst 0x646340d4 // bfdot z20.s, z6.h, z3.h[0]\n"
- "ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
- "add x25, x25, #0x10\n"
+ "ld1rqh { z1.h }, p0/Z, [x24]\n"
+ "cmp x26, #0x8\n"
".inst 0x646040e9 // bfdot z9.s, z7.h, z0.h[0]\n"
- ".inst 0x646140ed // bfdot z13.s, z7.h, z1.h[0]\n"
+ "ld1rqh { z2.h }, p0/Z, [x23]\n"
+ "add x25, x25, #0x10\n"
+ ".inst 0x646140cc // bfdot z12.s, z6.h, z1.h[0]\n"
+ "ld1rqh { z3.h }, p0/Z, [x22]\n"
"add x24, x24, #0x10\n"
+ ".inst 0x646240d0 // bfdot z16.s, z6.h, z2.h[0]\n"
"add x23, x23, #0x10\n"
+ ".inst 0x646140ed // bfdot z13.s, z7.h, z1.h[0]\n"
+ "add x22, x22, #0x10\n"
".inst 0x646240f1 // bfdot z17.s, z7.h, z2.h[0]\n"
+ ".inst 0x646340d4 // bfdot z20.s, z6.h, z3.h[0]\n"
+ "ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
".inst 0x646340f5 // bfdot z21.s, z7.h, z3.h[0]\n"
"ld1h { z7.h }, p5/Z, [x10, #3, MUL VL]\n"
".inst 0x646040ca // bfdot z10.s, z6.h, z0.h[0]\n"
@@ -992,21 +992,21 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
".inst 0x647b40f7 // bfdot z23.s, z7.h, z3.h[3]\n"
"bgt 48b\n"
"49:" // Height 4: Multiply loop: Single iteration only
- "whilelt p0.h, XZR, x27\n"
- "ld1rqh { z0.h }, p0/Z, [x26]\n"
- "ld1rqh { z1.h }, p0/Z, [x25]\n"
- "subs x27, x27, #0x2\n"
- "ld1rqh { z2.h }, p0/Z, [x24]\n"
- "ld1rqh { z3.h }, p0/Z, [x23]\n"
"ld1h { z6.h }, p5/Z, [x10]\n"
+ "whilelt p0.h, XZR, x26\n"
"ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x2\n"
+ "ld1rqh { z0.h }, p0/Z, [x25]\n"
".inst 0x646040c8 // bfdot z8.s, z6.h, z0.h[0]\n"
+ "ld1rqh { z1.h }, p0/Z, [x24]\n"
+ ".inst 0x646040e9 // bfdot z9.s, z7.h, z0.h[0]\n"
+ "ld1rqh { z2.h }, p0/Z, [x23]\n"
+ "ld1rqh { z3.h }, p0/Z, [x22]\n"
".inst 0x646140cc // bfdot z12.s, z6.h, z1.h[0]\n"
+ ".inst 0x646140ed // bfdot z13.s, z7.h, z1.h[0]\n"
".inst 0x646240d0 // bfdot z16.s, z6.h, z2.h[0]\n"
".inst 0x646340d4 // bfdot z20.s, z6.h, z3.h[0]\n"
"ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
- ".inst 0x646040e9 // bfdot z9.s, z7.h, z0.h[0]\n"
- ".inst 0x646140ed // bfdot z13.s, z7.h, z1.h[0]\n"
".inst 0x646240f1 // bfdot z17.s, z7.h, z2.h[0]\n"
".inst 0x646340f5 // bfdot z21.s, z7.h, z3.h[0]\n"
"ld1h { z7.h }, p5/Z, [x10, #3, MUL VL]\n"
@@ -1021,13 +1021,13 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
".inst 0x646340f7 // bfdot z23.s, z7.h, z3.h[0]\n"
"ble 50f\n"
"ld1h { z6.h }, p5/Z, [x10]\n"
- "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
".inst 0x646840c8 // bfdot z8.s, z6.h, z0.h[1]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x2\n"
".inst 0x646940cc // bfdot z12.s, z6.h, z1.h[1]\n"
".inst 0x646a40d0 // bfdot z16.s, z6.h, z2.h[1]\n"
".inst 0x646b40d4 // bfdot z20.s, z6.h, z3.h[1]\n"
"ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
- "subs x27, x27, #0x2\n"
".inst 0x646840e9 // bfdot z9.s, z7.h, z0.h[1]\n"
".inst 0x646940ed // bfdot z13.s, z7.h, z1.h[1]\n"
".inst 0x646a40f1 // bfdot z17.s, z7.h, z2.h[1]\n"
@@ -1044,13 +1044,13 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
".inst 0x646b40f7 // bfdot z23.s, z7.h, z3.h[1]\n"
"ble 50f\n"
"ld1h { z6.h }, p5/Z, [x10]\n"
- "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
".inst 0x647040c8 // bfdot z8.s, z6.h, z0.h[2]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x2\n"
".inst 0x647140cc // bfdot z12.s, z6.h, z1.h[2]\n"
".inst 0x647240d0 // bfdot z16.s, z6.h, z2.h[2]\n"
".inst 0x647340d4 // bfdot z20.s, z6.h, z3.h[2]\n"
"ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
- "subs x27, x27, #0x2\n"
".inst 0x647040e9 // bfdot z9.s, z7.h, z0.h[2]\n"
".inst 0x647140ed // bfdot z13.s, z7.h, z1.h[2]\n"
".inst 0x647240f1 // bfdot z17.s, z7.h, z2.h[2]\n"
@@ -1067,8 +1067,8 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
".inst 0x647340f7 // bfdot z23.s, z7.h, z3.h[2]\n"
"ble 50f\n"
"ld1h { z6.h }, p5/Z, [x10]\n"
- "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
".inst 0x647840c8 // bfdot z8.s, z6.h, z0.h[3]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
".inst 0x647940cc // bfdot z12.s, z6.h, z1.h[3]\n"
".inst 0x647a40d0 // bfdot z16.s, z6.h, z2.h[3]\n"
".inst 0x647b40d4 // bfdot z20.s, z6.h, z3.h[3]\n"
@@ -1088,103 +1088,103 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
".inst 0x647a40f3 // bfdot z19.s, z7.h, z2.h[3]\n"
".inst 0x647b40f7 // bfdot z23.s, z7.h, z3.h[3]\n"
"50:" // Height 4: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 45b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x28, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
"tbz %x[flags], #1, 51f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z1.s }, p5/Z, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rw { z0.s }, p5/Z, [x20]\n"
- "fmin z8.s, p5/M, z8.s, z1.s\n"
- "fmin z9.s, p5/M, z9.s, z1.s\n"
- "fmin z10.s, p5/M, z10.s, z1.s\n"
- "fmin z11.s, p5/M, z11.s, z1.s\n"
- "fmin z12.s, p5/M, z12.s, z1.s\n"
- "fmin z13.s, p5/M, z13.s, z1.s\n"
- "fmin z14.s, p5/M, z14.s, z1.s\n"
- "fmin z15.s, p5/M, z15.s, z1.s\n"
- "fmin z16.s, p5/M, z16.s, z1.s\n"
- "fmin z17.s, p5/M, z17.s, z1.s\n"
- "fmin z18.s, p5/M, z18.s, z1.s\n"
- "fmin z19.s, p5/M, z19.s, z1.s\n"
- "fmin z20.s, p5/M, z20.s, z1.s\n"
- "fmin z21.s, p5/M, z21.s, z1.s\n"
- "fmin z22.s, p5/M, z22.s, z1.s\n"
- "fmin z23.s, p5/M, z23.s, z1.s\n"
- "fmax z8.s, p5/M, z8.s, z0.s\n"
- "fmax z9.s, p5/M, z9.s, z0.s\n"
- "fmax z10.s, p5/M, z10.s, z0.s\n"
- "fmax z11.s, p5/M, z11.s, z0.s\n"
- "fmax z12.s, p5/M, z12.s, z0.s\n"
- "fmax z13.s, p5/M, z13.s, z0.s\n"
- "fmax z14.s, p5/M, z14.s, z0.s\n"
- "fmax z15.s, p5/M, z15.s, z0.s\n"
- "fmax z16.s, p5/M, z16.s, z0.s\n"
- "fmax z17.s, p5/M, z17.s, z0.s\n"
- "fmax z18.s, p5/M, z18.s, z0.s\n"
- "fmax z19.s, p5/M, z19.s, z0.s\n"
- "fmax z20.s, p5/M, z20.s, z0.s\n"
- "fmax z21.s, p5/M, z21.s, z0.s\n"
- "fmax z22.s, p5/M, z22.s, z0.s\n"
- "fmax z23.s, p5/M, z23.s, z0.s\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z1.s }, p5/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rw { z0.s }, p5/Z, [x19]\n"
+ "fmin z8.s, p5/M, z8.s, z0.s\n"
+ "fmin z9.s, p5/M, z9.s, z0.s\n"
+ "fmin z10.s, p5/M, z10.s, z0.s\n"
+ "fmin z11.s, p5/M, z11.s, z0.s\n"
+ "fmin z12.s, p5/M, z12.s, z0.s\n"
+ "fmax z8.s, p5/M, z8.s, z1.s\n"
+ "fmax z9.s, p5/M, z9.s, z1.s\n"
+ "fmax z10.s, p5/M, z10.s, z1.s\n"
+ "fmax z11.s, p5/M, z11.s, z1.s\n"
+ "fmax z12.s, p5/M, z12.s, z1.s\n"
+ "fmin z13.s, p5/M, z13.s, z0.s\n"
+ "fmin z14.s, p5/M, z14.s, z0.s\n"
+ "fmin z15.s, p5/M, z15.s, z0.s\n"
+ "fmin z16.s, p5/M, z16.s, z0.s\n"
+ "fmax z13.s, p5/M, z13.s, z1.s\n"
+ "fmax z14.s, p5/M, z14.s, z1.s\n"
+ "fmax z15.s, p5/M, z15.s, z1.s\n"
+ "fmax z16.s, p5/M, z16.s, z1.s\n"
+ "fmin z17.s, p5/M, z17.s, z0.s\n"
+ "fmin z18.s, p5/M, z18.s, z0.s\n"
+ "fmin z19.s, p5/M, z19.s, z0.s\n"
+ "fmin z20.s, p5/M, z20.s, z0.s\n"
+ "fmax z17.s, p5/M, z17.s, z1.s\n"
+ "fmax z18.s, p5/M, z18.s, z1.s\n"
+ "fmax z19.s, p5/M, z19.s, z1.s\n"
+ "fmax z20.s, p5/M, z20.s, z1.s\n"
+ "fmin z21.s, p5/M, z21.s, z0.s\n"
+ "fmin z22.s, p5/M, z22.s, z0.s\n"
+ "fmin z23.s, p5/M, z23.s, z0.s\n"
+ "fmax z21.s, p5/M, z21.s, z1.s\n"
+ "fmax z22.s, p5/M, z22.s, z1.s\n"
+ "fmax z23.s, p5/M, z23.s, z1.s\n"
"51:" // Height 4: No activation
- "st1w { z8.s }, p4, [x9]\n"
- "st1w { z9.s }, p3, [x9, #1, MUL VL]\n"
- "st1w { z10.s }, p2, [x9, #2, MUL VL]\n"
- "st1w { z11.s }, p1, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
- "st1w { z12.s }, p4, [x25]\n"
- "st1w { z13.s }, p3, [x25, #1, MUL VL]\n"
- "st1w { z14.s }, p2, [x25, #2, MUL VL]\n"
- "st1w { z15.s }, p1, [x25, #3, MUL VL]\n"
- "st1w { z16.s }, p4, [x24]\n"
- "st1w { z17.s }, p3, [x24, #1, MUL VL]\n"
- "st1w { z18.s }, p2, [x24, #2, MUL VL]\n"
- "st1w { z19.s }, p1, [x24, #3, MUL VL]\n"
- "st1w { z20.s }, p4, [x23]\n"
- "st1w { z21.s }, p3, [x23, #1, MUL VL]\n"
- "st1w { z22.s }, p2, [x23, #2, MUL VL]\n"
- "st1w { z23.s }, p1, [x23, #3, MUL VL]\n"
+ "st1w { z8.s }, p4, [x28]\n"
+ "st1w { z9.s }, p3, [x28, #1, MUL VL]\n"
+ "st1w { z10.s }, p2, [x28, #2, MUL VL]\n"
+ "st1w { z11.s }, p1, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "st1w { z12.s }, p4, [x24]\n"
+ "st1w { z13.s }, p3, [x24, #1, MUL VL]\n"
+ "st1w { z14.s }, p2, [x24, #2, MUL VL]\n"
+ "st1w { z15.s }, p1, [x24, #3, MUL VL]\n"
+ "st1w { z16.s }, p4, [x23]\n"
+ "st1w { z17.s }, p3, [x23, #1, MUL VL]\n"
+ "st1w { z18.s }, p2, [x23, #2, MUL VL]\n"
+ "st1w { z19.s }, p1, [x23, #3, MUL VL]\n"
+ "st1w { z20.s }, p4, [x22]\n"
+ "st1w { z21.s }, p3, [x22, #1, MUL VL]\n"
+ "st1w { z22.s }, p2, [x22, #2, MUL VL]\n"
+ "st1w { z23.s }, p1, [x22, #3, MUL VL]\n"
"52:" // Height 4: Writeback done
"decw x11, ALL, MUL #4\n"
"cmp x11, XZR\n"
"bgt 41b\n"
"b 80f\n"
"53:" // Height 5
- "mov x12, %x[bias]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x9, %x[bias]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "mov x28, %x[output_ptr]\n"
"54:" // Height 5: Column loop
- "mov x20, #0x0\n"
- "whilelt p4.s, x20, x11\n"
- "incw x20\n"
- "whilelt p3.s, x20, x11\n"
- "incw x20\n"
- "whilelt p2.s, x20, x11\n"
- "incw x20\n"
- "whilelt p1.s, x20, x11\n"
- "cbz x12, 55f\n"
- "ld1w { z8.s }, p5/Z, [x12]\n"
- "ld1w { z9.s }, p5/Z, [x12, #1, MUL VL]\n"
+ "mov x19, #0x0\n"
+ "whilelt p4.s, x19, x11\n"
+ "incw x19\n"
+ "whilelt p3.s, x19, x11\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x11\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x11\n"
+ "cbz x9, 55f\n"
+ "ld1w { z8.s }, p5/Z, [x9]\n"
"mov z12.d, z8.d\n"
+ "ld1w { z9.s }, p5/Z, [x9, #1, MUL VL]\n"
+ "mov z16.d, z8.d\n"
+ "ld1w { z10.s }, p5/Z, [x9, #2, MUL VL]\n"
+ "mov z20.d, z8.d\n"
+ "ld1w { z11.s }, p5/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"mov z13.d, z9.d\n"
- "ld1w { z10.s }, p5/Z, [x12, #2, MUL VL]\n"
- "ld1w { z11.s }, p5/Z, [x12, #3, MUL VL]\n"
+ "mov z17.d, z9.d\n"
"mov z14.d, z10.d\n"
"mov z15.d, z11.d\n"
- "mov z16.d, z8.d\n"
- "mov z17.d, z9.d\n"
- "addvl x12, x12, #4\n"
"mov z18.d, z10.d\n"
"mov z19.d, z11.d\n"
- "mov z20.d, z8.d\n"
"mov z21.d, z9.d\n"
"mov z22.d, z10.d\n"
"mov z23.d, z11.d\n"
@@ -1195,31 +1195,31 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
"b 57f\n"
"55:" // Height 5: no bias
"tbz %x[flags], #0, 56f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "ld1w { z8.s }, p4/Z, [x9]\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
- "ld1w { z9.s }, p3/Z, [x9, #1, MUL VL]\n"
- "ld1w { z10.s }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1w { z11.s }, p1/Z, [x9, #3, MUL VL]\n"
- "ld1w { z12.s }, p4/Z, [x25]\n"
- "ld1w { z13.s }, p3/Z, [x25, #1, MUL VL]\n"
- "ld1w { z14.s }, p2/Z, [x25, #2, MUL VL]\n"
- "ld1w { z15.s }, p1/Z, [x25, #3, MUL VL]\n"
- "ld1w { z16.s }, p4/Z, [x24]\n"
- "ld1w { z17.s }, p3/Z, [x24, #1, MUL VL]\n"
- "ld1w { z18.s }, p2/Z, [x24, #2, MUL VL]\n"
- "ld1w { z19.s }, p1/Z, [x24, #3, MUL VL]\n"
- "ld1w { z20.s }, p4/Z, [x23]\n"
- "ld1w { z21.s }, p3/Z, [x23, #1, MUL VL]\n"
- "ld1w { z22.s }, p2/Z, [x23, #2, MUL VL]\n"
- "ld1w { z23.s }, p1/Z, [x23, #3, MUL VL]\n"
- "ld1w { z24.s }, p4/Z, [x22]\n"
- "ld1w { z25.s }, p3/Z, [x22, #1, MUL VL]\n"
- "ld1w { z26.s }, p2/Z, [x22, #2, MUL VL]\n"
- "ld1w { z27.s }, p1/Z, [x22, #3, MUL VL]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ld1w { z8.s }, p4/Z, [x28]\n"
+ "add x24, x28, x19, LSL #2\n"
+ "ld1w { z9.s }, p3/Z, [x28, #1, MUL VL]\n"
+ "ld1w { z10.s }, p2/Z, [x28, #2, MUL VL]\n"
+ "add x23, x24, x19, LSL #2\n"
+ "ld1w { z11.s }, p1/Z, [x28, #3, MUL VL]\n"
+ "add x22, x23, x19, LSL #2\n"
+ "ld1w { z12.s }, p4/Z, [x24]\n"
+ "add x21, x22, x19, LSL #2\n"
+ "ld1w { z13.s }, p3/Z, [x24, #1, MUL VL]\n"
+ "ld1w { z14.s }, p2/Z, [x24, #2, MUL VL]\n"
+ "ld1w { z15.s }, p1/Z, [x24, #3, MUL VL]\n"
+ "ld1w { z16.s }, p4/Z, [x23]\n"
+ "ld1w { z17.s }, p3/Z, [x23, #1, MUL VL]\n"
+ "ld1w { z18.s }, p2/Z, [x23, #2, MUL VL]\n"
+ "ld1w { z19.s }, p1/Z, [x23, #3, MUL VL]\n"
+ "ld1w { z20.s }, p4/Z, [x22]\n"
+ "ld1w { z21.s }, p3/Z, [x22, #1, MUL VL]\n"
+ "ld1w { z22.s }, p2/Z, [x22, #2, MUL VL]\n"
+ "ld1w { z23.s }, p1/Z, [x22, #3, MUL VL]\n"
+ "ld1w { z24.s }, p4/Z, [x21]\n"
+ "ld1w { z25.s }, p3/Z, [x21, #1, MUL VL]\n"
+ "ld1w { z26.s }, p2/Z, [x21, #2, MUL VL]\n"
+ "ld1w { z27.s }, p1/Z, [x21, #3, MUL VL]\n"
"b 57f\n"
"56:" // Height 5: no accumulate
"mov z8.b, #0x0\n"
@@ -1243,61 +1243,61 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
"mov z26.b, #0x0\n"
"mov z27.b, #0x0\n"
"57:" // Height 5: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"58:" // Height 5: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 59f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "ldr x22, [x21, #0x20]\n"
- "cbnz x28, 60f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #1\n"
- "add x25, x25, x20, LSL #1\n"
- "add x24, x24, x20, LSL #1\n"
- "add x23, x23, x20, LSL #1\n"
- "add x22, x22, x20, LSL #1\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "ldr x21, [x20, #0x20]\n"
+ "cbnz x27, 60f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
+ "add x24, x24, x19, LSL #1\n"
+ "add x23, x23, x19, LSL #1\n"
+ "add x22, x22, x19, LSL #1\n"
+ "add x21, x21, x19, LSL #1\n"
"b 60f\n"
"59:" // Height 5: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
- "add x23, x24, x20, LSL #1\n"
- "add x22, x23, x20, LSL #1\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "add x22, x23, x19, LSL #1\n"
+ "add x21, x22, x19, LSL #1\n"
"60:" // Height 5: input setup done
- "cmp x27, #0x8\n"
+ "cmp x26, #0x8\n"
"ble 62f\n"
"61:" // Height 5: Multiply loop: Main loop head
- "whilelt p0.h, XZR, x27\n"
- "ld1rqh { z0.h }, p0/Z, [x26]\n"
- "ld1rqh { z1.h }, p0/Z, [x25]\n"
- "sub x27, x27, #0x8\n"
- "ld1rqh { z2.h }, p0/Z, [x24]\n"
- "ld1rqh { z3.h }, p0/Z, [x23]\n"
- "cmp x27, #0x8\n"
- "add x26, x26, #0x10\n"
- "ld1rqh { z4.h }, p0/Z, [x22]\n"
"ld1h { z6.h }, p5/Z, [x10]\n"
+ "whilelt p0.h, XZR, x26\n"
+ "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "sub x26, x26, #0x8\n"
+ "ld1rqh { z0.h }, p0/Z, [x25]\n"
".inst 0x646040c8 // bfdot z8.s, z6.h, z0.h[0]\n"
+ "ld1rqh { z1.h }, p0/Z, [x24]\n"
+ "cmp x26, #0x8\n"
+ ".inst 0x646040e9 // bfdot z9.s, z7.h, z0.h[0]\n"
+ "ld1rqh { z2.h }, p0/Z, [x23]\n"
+ "add x25, x25, #0x10\n"
".inst 0x646140cc // bfdot z12.s, z6.h, z1.h[0]\n"
- "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1rqh { z3.h }, p0/Z, [x22]\n"
+ "add x24, x24, #0x10\n"
".inst 0x646240d0 // bfdot z16.s, z6.h, z2.h[0]\n"
+ "ld1rqh { z4.h }, p0/Z, [x21]\n"
+ "add x23, x23, #0x10\n"
+ ".inst 0x646140ed // bfdot z13.s, z7.h, z1.h[0]\n"
+ "add x22, x22, #0x10\n"
+ ".inst 0x646240f1 // bfdot z17.s, z7.h, z2.h[0]\n"
+ "add x21, x21, #0x10\n"
".inst 0x646340d4 // bfdot z20.s, z6.h, z3.h[0]\n"
- "add x25, x25, #0x10\n"
".inst 0x646440d8 // bfdot z24.s, z6.h, z4.h[0]\n"
- ".inst 0x646040e9 // bfdot z9.s, z7.h, z0.h[0]\n"
"ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
- "add x24, x24, #0x10\n"
- ".inst 0x646140ed // bfdot z13.s, z7.h, z1.h[0]\n"
- ".inst 0x646240f1 // bfdot z17.s, z7.h, z2.h[0]\n"
- "add x23, x23, #0x10\n"
- "add x22, x22, #0x10\n"
".inst 0x646340f5 // bfdot z21.s, z7.h, z3.h[0]\n"
".inst 0x646440f9 // bfdot z25.s, z7.h, z4.h[0]\n"
"ld1h { z7.h }, p5/Z, [x10, #3, MUL VL]\n"
@@ -1306,8 +1306,8 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
".inst 0x646240d2 // bfdot z18.s, z6.h, z2.h[0]\n"
".inst 0x646340d6 // bfdot z22.s, z6.h, z3.h[0]\n"
".inst 0x646440da // bfdot z26.s, z6.h, z4.h[0]\n"
- ".inst 0x646040eb // bfdot z11.s, z7.h, z0.h[0]\n"
"ld1h { z6.h }, p5/Z, [x10, #4, MUL VL]\n"
+ ".inst 0x646040eb // bfdot z11.s, z7.h, z0.h[0]\n"
".inst 0x646140ef // bfdot z15.s, z7.h, z1.h[0]\n"
".inst 0x646240f3 // bfdot z19.s, z7.h, z2.h[0]\n"
".inst 0x646340f7 // bfdot z23.s, z7.h, z3.h[0]\n"
@@ -1318,8 +1318,8 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
".inst 0x646a40d0 // bfdot z16.s, z6.h, z2.h[1]\n"
".inst 0x646b40d4 // bfdot z20.s, z6.h, z3.h[1]\n"
".inst 0x646c40d8 // bfdot z24.s, z6.h, z4.h[1]\n"
- ".inst 0x646840e9 // bfdot z9.s, z7.h, z0.h[1]\n"
"ld1h { z6.h }, p5/Z, [x10, #6, MUL VL]\n"
+ ".inst 0x646840e9 // bfdot z9.s, z7.h, z0.h[1]\n"
".inst 0x646940ed // bfdot z13.s, z7.h, z1.h[1]\n"
".inst 0x646a40f1 // bfdot z17.s, z7.h, z2.h[1]\n"
".inst 0x646b40f5 // bfdot z21.s, z7.h, z3.h[1]\n"
@@ -1331,8 +1331,8 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
".inst 0x646a40d2 // bfdot z18.s, z6.h, z2.h[1]\n"
".inst 0x646b40d6 // bfdot z22.s, z6.h, z3.h[1]\n"
".inst 0x646c40da // bfdot z26.s, z6.h, z4.h[1]\n"
- ".inst 0x646840eb // bfdot z11.s, z7.h, z0.h[1]\n"
"ld1h { z6.h }, p5/Z, [x10, #-8, MUL VL]\n"
+ ".inst 0x646840eb // bfdot z11.s, z7.h, z0.h[1]\n"
".inst 0x646940ef // bfdot z15.s, z7.h, z1.h[1]\n"
".inst 0x646a40f3 // bfdot z19.s, z7.h, z2.h[1]\n"
".inst 0x646b40f7 // bfdot z23.s, z7.h, z3.h[1]\n"
@@ -1343,8 +1343,8 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
".inst 0x647240d0 // bfdot z16.s, z6.h, z2.h[2]\n"
".inst 0x647340d4 // bfdot z20.s, z6.h, z3.h[2]\n"
".inst 0x647440d8 // bfdot z24.s, z6.h, z4.h[2]\n"
- ".inst 0x647040e9 // bfdot z9.s, z7.h, z0.h[2]\n"
"ld1h { z6.h }, p5/Z, [x10, #-6, MUL VL]\n"
+ ".inst 0x647040e9 // bfdot z9.s, z7.h, z0.h[2]\n"
".inst 0x647140ed // bfdot z13.s, z7.h, z1.h[2]\n"
".inst 0x647240f1 // bfdot z17.s, z7.h, z2.h[2]\n"
".inst 0x647340f5 // bfdot z21.s, z7.h, z3.h[2]\n"
@@ -1355,8 +1355,8 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
".inst 0x647240d2 // bfdot z18.s, z6.h, z2.h[2]\n"
".inst 0x647340d6 // bfdot z22.s, z6.h, z3.h[2]\n"
".inst 0x647440da // bfdot z26.s, z6.h, z4.h[2]\n"
- ".inst 0x647040eb // bfdot z11.s, z7.h, z0.h[2]\n"
"ld1h { z6.h }, p5/Z, [x10, #-4, MUL VL]\n"
+ ".inst 0x647040eb // bfdot z11.s, z7.h, z0.h[2]\n"
".inst 0x647140ef // bfdot z15.s, z7.h, z1.h[2]\n"
".inst 0x647240f3 // bfdot z19.s, z7.h, z2.h[2]\n"
".inst 0x647340f7 // bfdot z23.s, z7.h, z3.h[2]\n"
@@ -1367,8 +1367,8 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
".inst 0x647a40d0 // bfdot z16.s, z6.h, z2.h[3]\n"
".inst 0x647b40d4 // bfdot z20.s, z6.h, z3.h[3]\n"
".inst 0x647c40d8 // bfdot z24.s, z6.h, z4.h[3]\n"
- ".inst 0x647840e9 // bfdot z9.s, z7.h, z0.h[3]\n"
"ld1h { z6.h }, p5/Z, [x10, #-2, MUL VL]\n"
+ ".inst 0x647840e9 // bfdot z9.s, z7.h, z0.h[3]\n"
".inst 0x647940ed // bfdot z13.s, z7.h, z1.h[3]\n"
".inst 0x647a40f1 // bfdot z17.s, z7.h, z2.h[3]\n"
".inst 0x647b40f5 // bfdot z21.s, z7.h, z3.h[3]\n"
@@ -1386,23 +1386,23 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
".inst 0x647c40fb // bfdot z27.s, z7.h, z4.h[3]\n"
"bgt 61b\n"
"62:" // Height 5: Multiply loop: Single iteration only
- "whilelt p0.h, XZR, x27\n"
- "ld1rqh { z0.h }, p0/Z, [x26]\n"
- "ld1rqh { z1.h }, p0/Z, [x25]\n"
- "subs x27, x27, #0x2\n"
- "ld1rqh { z2.h }, p0/Z, [x24]\n"
- "ld1rqh { z3.h }, p0/Z, [x23]\n"
- "ld1rqh { z4.h }, p0/Z, [x22]\n"
"ld1h { z6.h }, p5/Z, [x10]\n"
+ "whilelt p0.h, XZR, x26\n"
+ "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x2\n"
+ "ld1rqh { z0.h }, p0/Z, [x25]\n"
".inst 0x646040c8 // bfdot z8.s, z6.h, z0.h[0]\n"
+ "ld1rqh { z1.h }, p0/Z, [x24]\n"
+ ".inst 0x646040e9 // bfdot z9.s, z7.h, z0.h[0]\n"
+ "ld1rqh { z2.h }, p0/Z, [x23]\n"
+ "ld1rqh { z3.h }, p0/Z, [x22]\n"
".inst 0x646140cc // bfdot z12.s, z6.h, z1.h[0]\n"
- "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1rqh { z4.h }, p0/Z, [x21]\n"
+ ".inst 0x646140ed // bfdot z13.s, z7.h, z1.h[0]\n"
".inst 0x646240d0 // bfdot z16.s, z6.h, z2.h[0]\n"
".inst 0x646340d4 // bfdot z20.s, z6.h, z3.h[0]\n"
".inst 0x646440d8 // bfdot z24.s, z6.h, z4.h[0]\n"
- ".inst 0x646040e9 // bfdot z9.s, z7.h, z0.h[0]\n"
"ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
- ".inst 0x646140ed // bfdot z13.s, z7.h, z1.h[0]\n"
".inst 0x646240f1 // bfdot z17.s, z7.h, z2.h[0]\n"
".inst 0x646340f5 // bfdot z21.s, z7.h, z3.h[0]\n"
".inst 0x646440f9 // bfdot z25.s, z7.h, z4.h[0]\n"
@@ -1420,15 +1420,15 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
".inst 0x646440fb // bfdot z27.s, z7.h, z4.h[0]\n"
"ble 63f\n"
"ld1h { z6.h }, p5/Z, [x10]\n"
- "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
".inst 0x646840c8 // bfdot z8.s, z6.h, z0.h[1]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x2\n"
".inst 0x646940cc // bfdot z12.s, z6.h, z1.h[1]\n"
".inst 0x646a40d0 // bfdot z16.s, z6.h, z2.h[1]\n"
".inst 0x646b40d4 // bfdot z20.s, z6.h, z3.h[1]\n"
- "subs x27, x27, #0x2\n"
".inst 0x646c40d8 // bfdot z24.s, z6.h, z4.h[1]\n"
- ".inst 0x646840e9 // bfdot z9.s, z7.h, z0.h[1]\n"
"ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x646840e9 // bfdot z9.s, z7.h, z0.h[1]\n"
".inst 0x646940ed // bfdot z13.s, z7.h, z1.h[1]\n"
".inst 0x646a40f1 // bfdot z17.s, z7.h, z2.h[1]\n"
".inst 0x646b40f5 // bfdot z21.s, z7.h, z3.h[1]\n"
@@ -1447,15 +1447,15 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
".inst 0x646c40fb // bfdot z27.s, z7.h, z4.h[1]\n"
"ble 63f\n"
"ld1h { z6.h }, p5/Z, [x10]\n"
- "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
".inst 0x647040c8 // bfdot z8.s, z6.h, z0.h[2]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x2\n"
".inst 0x647140cc // bfdot z12.s, z6.h, z1.h[2]\n"
".inst 0x647240d0 // bfdot z16.s, z6.h, z2.h[2]\n"
".inst 0x647340d4 // bfdot z20.s, z6.h, z3.h[2]\n"
- "subs x27, x27, #0x2\n"
".inst 0x647440d8 // bfdot z24.s, z6.h, z4.h[2]\n"
- ".inst 0x647040e9 // bfdot z9.s, z7.h, z0.h[2]\n"
"ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x647040e9 // bfdot z9.s, z7.h, z0.h[2]\n"
".inst 0x647140ed // bfdot z13.s, z7.h, z1.h[2]\n"
".inst 0x647240f1 // bfdot z17.s, z7.h, z2.h[2]\n"
".inst 0x647340f5 // bfdot z21.s, z7.h, z3.h[2]\n"
@@ -1474,14 +1474,14 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
".inst 0x647440fb // bfdot z27.s, z7.h, z4.h[2]\n"
"ble 63f\n"
"ld1h { z6.h }, p5/Z, [x10]\n"
- "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
".inst 0x647840c8 // bfdot z8.s, z6.h, z0.h[3]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
".inst 0x647940cc // bfdot z12.s, z6.h, z1.h[3]\n"
".inst 0x647a40d0 // bfdot z16.s, z6.h, z2.h[3]\n"
".inst 0x647b40d4 // bfdot z20.s, z6.h, z3.h[3]\n"
".inst 0x647c40d8 // bfdot z24.s, z6.h, z4.h[3]\n"
- ".inst 0x647840e9 // bfdot z9.s, z7.h, z0.h[3]\n"
"ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x647840e9 // bfdot z9.s, z7.h, z0.h[3]\n"
".inst 0x647940ed // bfdot z13.s, z7.h, z1.h[3]\n"
".inst 0x647a40f1 // bfdot z17.s, z7.h, z2.h[3]\n"
".inst 0x647b40f5 // bfdot z21.s, z7.h, z3.h[3]\n"
@@ -1499,119 +1499,119 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
".inst 0x647b40f7 // bfdot z23.s, z7.h, z3.h[3]\n"
".inst 0x647c40fb // bfdot z27.s, z7.h, z4.h[3]\n"
"63:" // Height 5: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 58b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x28, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
"tbz %x[flags], #1, 64f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z1.s }, p5/Z, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rw { z0.s }, p5/Z, [x20]\n"
- "fmin z8.s, p5/M, z8.s, z1.s\n"
- "fmin z9.s, p5/M, z9.s, z1.s\n"
- "fmin z10.s, p5/M, z10.s, z1.s\n"
- "fmin z11.s, p5/M, z11.s, z1.s\n"
- "fmin z12.s, p5/M, z12.s, z1.s\n"
- "fmin z13.s, p5/M, z13.s, z1.s\n"
- "fmin z14.s, p5/M, z14.s, z1.s\n"
- "fmin z15.s, p5/M, z15.s, z1.s\n"
- "fmin z16.s, p5/M, z16.s, z1.s\n"
- "fmin z17.s, p5/M, z17.s, z1.s\n"
- "fmin z18.s, p5/M, z18.s, z1.s\n"
- "fmin z19.s, p5/M, z19.s, z1.s\n"
- "fmin z20.s, p5/M, z20.s, z1.s\n"
- "fmin z21.s, p5/M, z21.s, z1.s\n"
- "fmin z22.s, p5/M, z22.s, z1.s\n"
- "fmin z23.s, p5/M, z23.s, z1.s\n"
- "fmin z24.s, p5/M, z24.s, z1.s\n"
- "fmin z25.s, p5/M, z25.s, z1.s\n"
- "fmin z26.s, p5/M, z26.s, z1.s\n"
- "fmin z27.s, p5/M, z27.s, z1.s\n"
- "fmax z8.s, p5/M, z8.s, z0.s\n"
- "fmax z9.s, p5/M, z9.s, z0.s\n"
- "fmax z10.s, p5/M, z10.s, z0.s\n"
- "fmax z11.s, p5/M, z11.s, z0.s\n"
- "fmax z12.s, p5/M, z12.s, z0.s\n"
- "fmax z13.s, p5/M, z13.s, z0.s\n"
- "fmax z14.s, p5/M, z14.s, z0.s\n"
- "fmax z15.s, p5/M, z15.s, z0.s\n"
- "fmax z16.s, p5/M, z16.s, z0.s\n"
- "fmax z17.s, p5/M, z17.s, z0.s\n"
- "fmax z18.s, p5/M, z18.s, z0.s\n"
- "fmax z19.s, p5/M, z19.s, z0.s\n"
- "fmax z20.s, p5/M, z20.s, z0.s\n"
- "fmax z21.s, p5/M, z21.s, z0.s\n"
- "fmax z22.s, p5/M, z22.s, z0.s\n"
- "fmax z23.s, p5/M, z23.s, z0.s\n"
- "fmax z24.s, p5/M, z24.s, z0.s\n"
- "fmax z25.s, p5/M, z25.s, z0.s\n"
- "fmax z26.s, p5/M, z26.s, z0.s\n"
- "fmax z27.s, p5/M, z27.s, z0.s\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z1.s }, p5/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rw { z0.s }, p5/Z, [x19]\n"
+ "fmin z8.s, p5/M, z8.s, z0.s\n"
+ "fmin z9.s, p5/M, z9.s, z0.s\n"
+ "fmin z10.s, p5/M, z10.s, z0.s\n"
+ "fmin z11.s, p5/M, z11.s, z0.s\n"
+ "fmin z12.s, p5/M, z12.s, z0.s\n"
+ "fmax z8.s, p5/M, z8.s, z1.s\n"
+ "fmax z9.s, p5/M, z9.s, z1.s\n"
+ "fmax z10.s, p5/M, z10.s, z1.s\n"
+ "fmax z11.s, p5/M, z11.s, z1.s\n"
+ "fmax z12.s, p5/M, z12.s, z1.s\n"
+ "fmin z13.s, p5/M, z13.s, z0.s\n"
+ "fmin z14.s, p5/M, z14.s, z0.s\n"
+ "fmin z15.s, p5/M, z15.s, z0.s\n"
+ "fmin z16.s, p5/M, z16.s, z0.s\n"
+ "fmax z13.s, p5/M, z13.s, z1.s\n"
+ "fmax z14.s, p5/M, z14.s, z1.s\n"
+ "fmax z15.s, p5/M, z15.s, z1.s\n"
+ "fmax z16.s, p5/M, z16.s, z1.s\n"
+ "fmin z17.s, p5/M, z17.s, z0.s\n"
+ "fmin z18.s, p5/M, z18.s, z0.s\n"
+ "fmin z19.s, p5/M, z19.s, z0.s\n"
+ "fmin z20.s, p5/M, z20.s, z0.s\n"
+ "fmax z17.s, p5/M, z17.s, z1.s\n"
+ "fmax z18.s, p5/M, z18.s, z1.s\n"
+ "fmax z19.s, p5/M, z19.s, z1.s\n"
+ "fmax z20.s, p5/M, z20.s, z1.s\n"
+ "fmin z21.s, p5/M, z21.s, z0.s\n"
+ "fmin z22.s, p5/M, z22.s, z0.s\n"
+ "fmin z23.s, p5/M, z23.s, z0.s\n"
+ "fmin z24.s, p5/M, z24.s, z0.s\n"
+ "fmax z21.s, p5/M, z21.s, z1.s\n"
+ "fmax z22.s, p5/M, z22.s, z1.s\n"
+ "fmax z23.s, p5/M, z23.s, z1.s\n"
+ "fmax z24.s, p5/M, z24.s, z1.s\n"
+ "fmin z25.s, p5/M, z25.s, z0.s\n"
+ "fmin z26.s, p5/M, z26.s, z0.s\n"
+ "fmin z27.s, p5/M, z27.s, z0.s\n"
+ "fmax z25.s, p5/M, z25.s, z1.s\n"
+ "fmax z26.s, p5/M, z26.s, z1.s\n"
+ "fmax z27.s, p5/M, z27.s, z1.s\n"
"64:" // Height 5: No activation
- "st1w { z8.s }, p4, [x9]\n"
- "st1w { z9.s }, p3, [x9, #1, MUL VL]\n"
- "st1w { z10.s }, p2, [x9, #2, MUL VL]\n"
- "st1w { z11.s }, p1, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
- "st1w { z12.s }, p4, [x25]\n"
- "st1w { z13.s }, p3, [x25, #1, MUL VL]\n"
- "st1w { z14.s }, p2, [x25, #2, MUL VL]\n"
- "st1w { z15.s }, p1, [x25, #3, MUL VL]\n"
- "st1w { z16.s }, p4, [x24]\n"
- "st1w { z17.s }, p3, [x24, #1, MUL VL]\n"
- "st1w { z18.s }, p2, [x24, #2, MUL VL]\n"
- "st1w { z19.s }, p1, [x24, #3, MUL VL]\n"
- "st1w { z20.s }, p4, [x23]\n"
- "st1w { z21.s }, p3, [x23, #1, MUL VL]\n"
- "st1w { z22.s }, p2, [x23, #2, MUL VL]\n"
- "st1w { z23.s }, p1, [x23, #3, MUL VL]\n"
- "st1w { z24.s }, p4, [x22]\n"
- "st1w { z25.s }, p3, [x22, #1, MUL VL]\n"
- "st1w { z26.s }, p2, [x22, #2, MUL VL]\n"
- "st1w { z27.s }, p1, [x22, #3, MUL VL]\n"
+ "st1w { z8.s }, p4, [x28]\n"
+ "st1w { z9.s }, p3, [x28, #1, MUL VL]\n"
+ "st1w { z10.s }, p2, [x28, #2, MUL VL]\n"
+ "st1w { z11.s }, p1, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "st1w { z12.s }, p4, [x24]\n"
+ "st1w { z13.s }, p3, [x24, #1, MUL VL]\n"
+ "st1w { z14.s }, p2, [x24, #2, MUL VL]\n"
+ "st1w { z15.s }, p1, [x24, #3, MUL VL]\n"
+ "st1w { z16.s }, p4, [x23]\n"
+ "st1w { z17.s }, p3, [x23, #1, MUL VL]\n"
+ "st1w { z18.s }, p2, [x23, #2, MUL VL]\n"
+ "st1w { z19.s }, p1, [x23, #3, MUL VL]\n"
+ "st1w { z20.s }, p4, [x22]\n"
+ "st1w { z21.s }, p3, [x22, #1, MUL VL]\n"
+ "st1w { z22.s }, p2, [x22, #2, MUL VL]\n"
+ "st1w { z23.s }, p1, [x22, #3, MUL VL]\n"
+ "st1w { z24.s }, p4, [x21]\n"
+ "st1w { z25.s }, p3, [x21, #1, MUL VL]\n"
+ "st1w { z26.s }, p2, [x21, #2, MUL VL]\n"
+ "st1w { z27.s }, p1, [x21, #3, MUL VL]\n"
"65:" // Height 5: Writeback done
"decw x11, ALL, MUL #4\n"
"cmp x11, XZR\n"
"bgt 54b\n"
"b 80f\n"
"66:" // Height 6
- "ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "mov x20, #0x18\n"
- "mov x12, %x[bias]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x9, %x[bias]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
+ "mov x28, %x[output_ptr]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "mov x19, #0x18\n"
+ "madd %x[output_ptr], x20, x19, %x[output_ptr]\n"
"67:" // Height 6: Column loop
- "mov x20, #0x0\n"
- "whilelt p4.s, x20, x11\n"
- "incw x20\n"
- "whilelt p3.s, x20, x11\n"
- "incw x20\n"
- "whilelt p2.s, x20, x11\n"
- "incw x20\n"
- "whilelt p1.s, x20, x11\n"
- "cbz x12, 68f\n"
- "ld1w { z8.s }, p5/Z, [x12]\n"
- "ld1w { z9.s }, p5/Z, [x12, #1, MUL VL]\n"
+ "mov x19, #0x0\n"
+ "whilelt p4.s, x19, x11\n"
+ "incw x19\n"
+ "whilelt p3.s, x19, x11\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x11\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x11\n"
+ "cbz x9, 68f\n"
+ "ld1w { z8.s }, p5/Z, [x9]\n"
"mov z12.d, z8.d\n"
+ "ld1w { z9.s }, p5/Z, [x9, #1, MUL VL]\n"
+ "mov z16.d, z8.d\n"
+ "ld1w { z10.s }, p5/Z, [x9, #2, MUL VL]\n"
+ "mov z20.d, z8.d\n"
+ "ld1w { z11.s }, p5/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"mov z13.d, z9.d\n"
- "ld1w { z10.s }, p5/Z, [x12, #2, MUL VL]\n"
- "ld1w { z11.s }, p5/Z, [x12, #3, MUL VL]\n"
+ "mov z17.d, z9.d\n"
"mov z14.d, z10.d\n"
"mov z15.d, z11.d\n"
- "mov z16.d, z8.d\n"
- "mov z17.d, z9.d\n"
- "addvl x12, x12, #4\n"
"mov z18.d, z10.d\n"
"mov z19.d, z11.d\n"
- "mov z20.d, z8.d\n"
"mov z21.d, z9.d\n"
"mov z22.d, z10.d\n"
"mov z23.d, z11.d\n"
@@ -1626,36 +1626,36 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
"b 70f\n"
"68:" // Height 6: no bias
"tbz %x[flags], #0, 69f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "ld1w { z8.s }, p4/Z, [x9]\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
- "ld1w { z9.s }, p3/Z, [x9, #1, MUL VL]\n"
- "ld1w { z10.s }, p2/Z, [x9, #2, MUL VL]\n"
- "add x21, x22, x20, LSL #2\n"
- "ld1w { z11.s }, p1/Z, [x9, #3, MUL VL]\n"
- "ld1w { z12.s }, p4/Z, [x25]\n"
- "ld1w { z13.s }, p3/Z, [x25, #1, MUL VL]\n"
- "ld1w { z14.s }, p2/Z, [x25, #2, MUL VL]\n"
- "ld1w { z15.s }, p1/Z, [x25, #3, MUL VL]\n"
- "ld1w { z16.s }, p4/Z, [x24]\n"
- "ld1w { z17.s }, p3/Z, [x24, #1, MUL VL]\n"
- "ld1w { z18.s }, p2/Z, [x24, #2, MUL VL]\n"
- "ld1w { z19.s }, p1/Z, [x24, #3, MUL VL]\n"
- "ld1w { z20.s }, p4/Z, [x23]\n"
- "ld1w { z21.s }, p3/Z, [x23, #1, MUL VL]\n"
- "ld1w { z22.s }, p2/Z, [x23, #2, MUL VL]\n"
- "ld1w { z23.s }, p1/Z, [x23, #3, MUL VL]\n"
- "ld1w { z24.s }, p4/Z, [x22]\n"
- "ld1w { z25.s }, p3/Z, [x22, #1, MUL VL]\n"
- "ld1w { z26.s }, p2/Z, [x22, #2, MUL VL]\n"
- "ld1w { z27.s }, p1/Z, [x22, #3, MUL VL]\n"
- "ld1w { z28.s }, p4/Z, [x21]\n"
- "ld1w { z29.s }, p3/Z, [x21, #1, MUL VL]\n"
- "ld1w { z30.s }, p2/Z, [x21, #2, MUL VL]\n"
- "ld1w { z31.s }, p1/Z, [x21, #3, MUL VL]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ld1w { z8.s }, p4/Z, [x28]\n"
+ "add x24, x28, x19, LSL #2\n"
+ "ld1w { z9.s }, p3/Z, [x28, #1, MUL VL]\n"
+ "ld1w { z10.s }, p2/Z, [x28, #2, MUL VL]\n"
+ "add x23, x24, x19, LSL #2\n"
+ "ld1w { z11.s }, p1/Z, [x28, #3, MUL VL]\n"
+ "add x22, x23, x19, LSL #2\n"
+ "ld1w { z12.s }, p4/Z, [x24]\n"
+ "add x21, x22, x19, LSL #2\n"
+ "ld1w { z13.s }, p3/Z, [x24, #1, MUL VL]\n"
+ "add x20, x21, x19, LSL #2\n"
+ "ld1w { z14.s }, p2/Z, [x24, #2, MUL VL]\n"
+ "ld1w { z15.s }, p1/Z, [x24, #3, MUL VL]\n"
+ "ld1w { z16.s }, p4/Z, [x23]\n"
+ "ld1w { z17.s }, p3/Z, [x23, #1, MUL VL]\n"
+ "ld1w { z18.s }, p2/Z, [x23, #2, MUL VL]\n"
+ "ld1w { z19.s }, p1/Z, [x23, #3, MUL VL]\n"
+ "ld1w { z20.s }, p4/Z, [x22]\n"
+ "ld1w { z21.s }, p3/Z, [x22, #1, MUL VL]\n"
+ "ld1w { z22.s }, p2/Z, [x22, #2, MUL VL]\n"
+ "ld1w { z23.s }, p1/Z, [x22, #3, MUL VL]\n"
+ "ld1w { z24.s }, p4/Z, [x21]\n"
+ "ld1w { z25.s }, p3/Z, [x21, #1, MUL VL]\n"
+ "ld1w { z26.s }, p2/Z, [x21, #2, MUL VL]\n"
+ "ld1w { z27.s }, p1/Z, [x21, #3, MUL VL]\n"
+ "ld1w { z28.s }, p4/Z, [x20]\n"
+ "ld1w { z29.s }, p3/Z, [x20, #1, MUL VL]\n"
+ "ld1w { z30.s }, p2/Z, [x20, #2, MUL VL]\n"
+ "ld1w { z31.s }, p1/Z, [x20, #3, MUL VL]\n"
"b 70f\n"
"69:" // Height 6: no accumulate
"mov z8.b, #0x0\n"
@@ -1683,67 +1683,67 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
"mov z30.b, #0x0\n"
"mov z31.b, #0x0\n"
"70:" // Height 6: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"71:" // Height 6: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 72f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "ldr x22, [x21, #0x20]\n"
- "ldr x21, [x21, #0x28]\n"
- "cbnz x28, 73f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #1\n"
- "add x25, x25, x20, LSL #1\n"
- "add x24, x24, x20, LSL #1\n"
- "add x23, x23, x20, LSL #1\n"
- "add x22, x22, x20, LSL #1\n"
- "add x21, x21, x20, LSL #1\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "ldr x21, [x20, #0x20]\n"
+ "ldr x20, [x20, #0x28]\n"
+ "cbnz x27, 73f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
+ "add x24, x24, x19, LSL #1\n"
+ "add x23, x23, x19, LSL #1\n"
+ "add x22, x22, x19, LSL #1\n"
+ "add x21, x21, x19, LSL #1\n"
+ "add x20, x20, x19, LSL #1\n"
"b 73f\n"
"72:" // Height 6: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
- "add x23, x24, x20, LSL #1\n"
- "add x22, x23, x20, LSL #1\n"
- "add x21, x22, x20, LSL #1\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "add x22, x23, x19, LSL #1\n"
+ "add x21, x22, x19, LSL #1\n"
+ "add x20, x21, x19, LSL #1\n"
"73:" // Height 6: input setup done
- "cmp x27, #0x8\n"
+ "cmp x26, #0x8\n"
"ble 75f\n"
"74:" // Height 6: Multiply loop: Main loop head
- "whilelt p0.h, XZR, x27\n"
- "ld1rqh { z0.h }, p0/Z, [x26]\n"
- "ld1rqh { z1.h }, p0/Z, [x25]\n"
- "sub x27, x27, #0x8\n"
- "ld1rqh { z2.h }, p0/Z, [x24]\n"
- "ld1rqh { z3.h }, p0/Z, [x23]\n"
- "cmp x27, #0x8\n"
- "add x26, x26, #0x10\n"
- "ld1rqh { z4.h }, p0/Z, [x22]\n"
- "ld1rqh { z5.h }, p0/Z, [x21]\n"
- "add x25, x25, #0x10\n"
- "add x24, x24, #0x10\n"
"ld1h { z6.h }, p5/Z, [x10]\n"
+ "whilelt p0.h, XZR, x26\n"
"ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "sub x26, x26, #0x8\n"
+ "ld1rqh { z0.h }, p0/Z, [x25]\n"
".inst 0x646040c8 // bfdot z8.s, z6.h, z0.h[0]\n"
+ "ld1rqh { z1.h }, p0/Z, [x24]\n"
+ "cmp x26, #0x8\n"
+ ".inst 0x646040e9 // bfdot z9.s, z7.h, z0.h[0]\n"
+ "ld1rqh { z2.h }, p0/Z, [x23]\n"
+ "add x25, x25, #0x10\n"
".inst 0x646140cc // bfdot z12.s, z6.h, z1.h[0]\n"
+ "ld1rqh { z3.h }, p0/Z, [x22]\n"
+ "add x24, x24, #0x10\n"
".inst 0x646240d0 // bfdot z16.s, z6.h, z2.h[0]\n"
- ".inst 0x646340d4 // bfdot z20.s, z6.h, z3.h[0]\n"
+ "ld1rqh { z4.h }, p0/Z, [x21]\n"
"add x23, x23, #0x10\n"
+ ".inst 0x646140ed // bfdot z13.s, z7.h, z1.h[0]\n"
+ "ld1rqh { z5.h }, p0/Z, [x20]\n"
"add x22, x22, #0x10\n"
+ ".inst 0x646340d4 // bfdot z20.s, z6.h, z3.h[0]\n"
+ "add x21, x21, #0x10\n"
+ ".inst 0x646240f1 // bfdot z17.s, z7.h, z2.h[0]\n"
+ "add x20, x20, #0x10\n"
".inst 0x646440d8 // bfdot z24.s, z6.h, z4.h[0]\n"
".inst 0x646540dc // bfdot z28.s, z6.h, z5.h[0]\n"
"ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
- "add x21, x21, #0x10\n"
- ".inst 0x646040e9 // bfdot z9.s, z7.h, z0.h[0]\n"
- ".inst 0x646140ed // bfdot z13.s, z7.h, z1.h[0]\n"
- ".inst 0x646240f1 // bfdot z17.s, z7.h, z2.h[0]\n"
".inst 0x646340f5 // bfdot z21.s, z7.h, z3.h[0]\n"
".inst 0x646440f9 // bfdot z25.s, z7.h, z4.h[0]\n"
".inst 0x646540fd // bfdot z29.s, z7.h, z5.h[0]\n"
@@ -1847,25 +1847,25 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
".inst 0x647d40ff // bfdot z31.s, z7.h, z5.h[3]\n"
"bgt 74b\n"
"75:" // Height 6: Multiply loop: Single iteration only
- "whilelt p0.h, XZR, x27\n"
- "ld1rqh { z0.h }, p0/Z, [x26]\n"
- "ld1rqh { z1.h }, p0/Z, [x25]\n"
- "subs x27, x27, #0x2\n"
- "ld1rqh { z2.h }, p0/Z, [x24]\n"
- "ld1rqh { z3.h }, p0/Z, [x23]\n"
- "ld1rqh { z4.h }, p0/Z, [x22]\n"
- "ld1rqh { z5.h }, p0/Z, [x21]\n"
"ld1h { z6.h }, p5/Z, [x10]\n"
+ "whilelt p0.h, XZR, x26\n"
"ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x2\n"
+ "ld1rqh { z0.h }, p0/Z, [x25]\n"
".inst 0x646040c8 // bfdot z8.s, z6.h, z0.h[0]\n"
+ "ld1rqh { z1.h }, p0/Z, [x24]\n"
+ ".inst 0x646040e9 // bfdot z9.s, z7.h, z0.h[0]\n"
+ "ld1rqh { z2.h }, p0/Z, [x23]\n"
+ "ld1rqh { z3.h }, p0/Z, [x22]\n"
".inst 0x646140cc // bfdot z12.s, z6.h, z1.h[0]\n"
+ "ld1rqh { z4.h }, p0/Z, [x21]\n"
+ ".inst 0x646140ed // bfdot z13.s, z7.h, z1.h[0]\n"
+ "ld1rqh { z5.h }, p0/Z, [x20]\n"
".inst 0x646240d0 // bfdot z16.s, z6.h, z2.h[0]\n"
".inst 0x646340d4 // bfdot z20.s, z6.h, z3.h[0]\n"
".inst 0x646440d8 // bfdot z24.s, z6.h, z4.h[0]\n"
".inst 0x646540dc // bfdot z28.s, z6.h, z5.h[0]\n"
"ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
- ".inst 0x646040e9 // bfdot z9.s, z7.h, z0.h[0]\n"
- ".inst 0x646140ed // bfdot z13.s, z7.h, z1.h[0]\n"
".inst 0x646240f1 // bfdot z17.s, z7.h, z2.h[0]\n"
".inst 0x646340f5 // bfdot z21.s, z7.h, z3.h[0]\n"
".inst 0x646440f9 // bfdot z25.s, z7.h, z4.h[0]\n"
@@ -1886,12 +1886,12 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
".inst 0x646540ff // bfdot z31.s, z7.h, z5.h[0]\n"
"ble 76f\n"
"ld1h { z6.h }, p5/Z, [x10]\n"
- "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
".inst 0x646840c8 // bfdot z8.s, z6.h, z0.h[1]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x2\n"
".inst 0x646940cc // bfdot z12.s, z6.h, z1.h[1]\n"
".inst 0x646a40d0 // bfdot z16.s, z6.h, z2.h[1]\n"
".inst 0x646b40d4 // bfdot z20.s, z6.h, z3.h[1]\n"
- "subs x27, x27, #0x2\n"
".inst 0x646c40d8 // bfdot z24.s, z6.h, z4.h[1]\n"
".inst 0x646d40dc // bfdot z28.s, z6.h, z5.h[1]\n"
"ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
@@ -1917,12 +1917,12 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
".inst 0x646d40ff // bfdot z31.s, z7.h, z5.h[1]\n"
"ble 76f\n"
"ld1h { z6.h }, p5/Z, [x10]\n"
- "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
".inst 0x647040c8 // bfdot z8.s, z6.h, z0.h[2]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x2\n"
".inst 0x647140cc // bfdot z12.s, z6.h, z1.h[2]\n"
".inst 0x647240d0 // bfdot z16.s, z6.h, z2.h[2]\n"
".inst 0x647340d4 // bfdot z20.s, z6.h, z3.h[2]\n"
- "subs x27, x27, #0x2\n"
".inst 0x647440d8 // bfdot z24.s, z6.h, z4.h[2]\n"
".inst 0x647540dc // bfdot z28.s, z6.h, z5.h[2]\n"
"ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
@@ -1948,8 +1948,8 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
".inst 0x647540ff // bfdot z31.s, z7.h, z5.h[2]\n"
"ble 76f\n"
"ld1h { z6.h }, p5/Z, [x10]\n"
- "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
".inst 0x647840c8 // bfdot z8.s, z6.h, z0.h[3]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
".inst 0x647940cc // bfdot z12.s, z6.h, z1.h[3]\n"
".inst 0x647a40d0 // bfdot z16.s, z6.h, z2.h[3]\n"
".inst 0x647b40d4 // bfdot z20.s, z6.h, z3.h[3]\n"
@@ -1977,115 +1977,115 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
".inst 0x647c40fb // bfdot z27.s, z7.h, z4.h[3]\n"
".inst 0x647d40ff // bfdot z31.s, z7.h, z5.h[3]\n"
"76:" // Height 6: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 71b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x28, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
+ "add x20, x21, x19, LSL #2\n"
"tbz %x[flags], #1, 77f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z1.s }, p5/Z, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rw { z0.s }, p5/Z, [x20]\n"
- "fmin z8.s, p5/M, z8.s, z1.s\n"
- "fmin z9.s, p5/M, z9.s, z1.s\n"
- "fmin z10.s, p5/M, z10.s, z1.s\n"
- "fmin z11.s, p5/M, z11.s, z1.s\n"
- "fmin z12.s, p5/M, z12.s, z1.s\n"
- "fmin z13.s, p5/M, z13.s, z1.s\n"
- "fmin z14.s, p5/M, z14.s, z1.s\n"
- "fmin z15.s, p5/M, z15.s, z1.s\n"
- "fmin z16.s, p5/M, z16.s, z1.s\n"
- "fmin z17.s, p5/M, z17.s, z1.s\n"
- "fmin z18.s, p5/M, z18.s, z1.s\n"
- "fmin z19.s, p5/M, z19.s, z1.s\n"
- "fmin z20.s, p5/M, z20.s, z1.s\n"
- "fmin z21.s, p5/M, z21.s, z1.s\n"
- "fmin z22.s, p5/M, z22.s, z1.s\n"
- "fmin z23.s, p5/M, z23.s, z1.s\n"
- "fmin z24.s, p5/M, z24.s, z1.s\n"
- "fmin z25.s, p5/M, z25.s, z1.s\n"
- "fmin z26.s, p5/M, z26.s, z1.s\n"
- "fmin z27.s, p5/M, z27.s, z1.s\n"
- "fmin z28.s, p5/M, z28.s, z1.s\n"
- "fmin z29.s, p5/M, z29.s, z1.s\n"
- "fmin z30.s, p5/M, z30.s, z1.s\n"
- "fmin z31.s, p5/M, z31.s, z1.s\n"
- "fmax z8.s, p5/M, z8.s, z0.s\n"
- "fmax z9.s, p5/M, z9.s, z0.s\n"
- "fmax z10.s, p5/M, z10.s, z0.s\n"
- "fmax z11.s, p5/M, z11.s, z0.s\n"
- "fmax z12.s, p5/M, z12.s, z0.s\n"
- "fmax z13.s, p5/M, z13.s, z0.s\n"
- "fmax z14.s, p5/M, z14.s, z0.s\n"
- "fmax z15.s, p5/M, z15.s, z0.s\n"
- "fmax z16.s, p5/M, z16.s, z0.s\n"
- "fmax z17.s, p5/M, z17.s, z0.s\n"
- "fmax z18.s, p5/M, z18.s, z0.s\n"
- "fmax z19.s, p5/M, z19.s, z0.s\n"
- "fmax z20.s, p5/M, z20.s, z0.s\n"
- "fmax z21.s, p5/M, z21.s, z0.s\n"
- "fmax z22.s, p5/M, z22.s, z0.s\n"
- "fmax z23.s, p5/M, z23.s, z0.s\n"
- "fmax z24.s, p5/M, z24.s, z0.s\n"
- "fmax z25.s, p5/M, z25.s, z0.s\n"
- "fmax z26.s, p5/M, z26.s, z0.s\n"
- "fmax z27.s, p5/M, z27.s, z0.s\n"
- "fmax z28.s, p5/M, z28.s, z0.s\n"
- "fmax z29.s, p5/M, z29.s, z0.s\n"
- "fmax z30.s, p5/M, z30.s, z0.s\n"
- "fmax z31.s, p5/M, z31.s, z0.s\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z1.s }, p5/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rw { z0.s }, p5/Z, [x19]\n"
+ "fmin z8.s, p5/M, z8.s, z0.s\n"
+ "fmin z9.s, p5/M, z9.s, z0.s\n"
+ "fmin z10.s, p5/M, z10.s, z0.s\n"
+ "fmin z11.s, p5/M, z11.s, z0.s\n"
+ "fmin z12.s, p5/M, z12.s, z0.s\n"
+ "fmax z8.s, p5/M, z8.s, z1.s\n"
+ "fmax z9.s, p5/M, z9.s, z1.s\n"
+ "fmax z10.s, p5/M, z10.s, z1.s\n"
+ "fmax z11.s, p5/M, z11.s, z1.s\n"
+ "fmax z12.s, p5/M, z12.s, z1.s\n"
+ "fmin z13.s, p5/M, z13.s, z0.s\n"
+ "fmin z14.s, p5/M, z14.s, z0.s\n"
+ "fmin z15.s, p5/M, z15.s, z0.s\n"
+ "fmin z16.s, p5/M, z16.s, z0.s\n"
+ "fmax z13.s, p5/M, z13.s, z1.s\n"
+ "fmax z14.s, p5/M, z14.s, z1.s\n"
+ "fmax z15.s, p5/M, z15.s, z1.s\n"
+ "fmax z16.s, p5/M, z16.s, z1.s\n"
+ "fmin z17.s, p5/M, z17.s, z0.s\n"
+ "fmin z18.s, p5/M, z18.s, z0.s\n"
+ "fmin z19.s, p5/M, z19.s, z0.s\n"
+ "fmin z20.s, p5/M, z20.s, z0.s\n"
+ "fmax z17.s, p5/M, z17.s, z1.s\n"
+ "fmax z18.s, p5/M, z18.s, z1.s\n"
+ "fmax z19.s, p5/M, z19.s, z1.s\n"
+ "fmax z20.s, p5/M, z20.s, z1.s\n"
+ "fmin z21.s, p5/M, z21.s, z0.s\n"
+ "fmin z22.s, p5/M, z22.s, z0.s\n"
+ "fmin z23.s, p5/M, z23.s, z0.s\n"
+ "fmin z24.s, p5/M, z24.s, z0.s\n"
+ "fmax z21.s, p5/M, z21.s, z1.s\n"
+ "fmax z22.s, p5/M, z22.s, z1.s\n"
+ "fmax z23.s, p5/M, z23.s, z1.s\n"
+ "fmax z24.s, p5/M, z24.s, z1.s\n"
+ "fmin z25.s, p5/M, z25.s, z0.s\n"
+ "fmin z26.s, p5/M, z26.s, z0.s\n"
+ "fmin z27.s, p5/M, z27.s, z0.s\n"
+ "fmin z28.s, p5/M, z28.s, z0.s\n"
+ "fmax z25.s, p5/M, z25.s, z1.s\n"
+ "fmax z26.s, p5/M, z26.s, z1.s\n"
+ "fmax z27.s, p5/M, z27.s, z1.s\n"
+ "fmax z28.s, p5/M, z28.s, z1.s\n"
+ "fmin z29.s, p5/M, z29.s, z0.s\n"
+ "fmin z30.s, p5/M, z30.s, z0.s\n"
+ "fmin z31.s, p5/M, z31.s, z0.s\n"
+ "fmax z29.s, p5/M, z29.s, z1.s\n"
+ "fmax z30.s, p5/M, z30.s, z1.s\n"
+ "fmax z31.s, p5/M, z31.s, z1.s\n"
"77:" // Height 6: No activation
- "st1w { z8.s }, p4, [x9]\n"
- "st1w { z9.s }, p3, [x9, #1, MUL VL]\n"
- "st1w { z10.s }, p2, [x9, #2, MUL VL]\n"
- "st1w { z11.s }, p1, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
- "st1w { z12.s }, p4, [x25]\n"
- "st1w { z13.s }, p3, [x25, #1, MUL VL]\n"
- "st1w { z14.s }, p2, [x25, #2, MUL VL]\n"
- "st1w { z15.s }, p1, [x25, #3, MUL VL]\n"
- "st1w { z16.s }, p4, [x24]\n"
- "st1w { z17.s }, p3, [x24, #1, MUL VL]\n"
- "st1w { z18.s }, p2, [x24, #2, MUL VL]\n"
- "st1w { z19.s }, p1, [x24, #3, MUL VL]\n"
- "st1w { z20.s }, p4, [x23]\n"
- "st1w { z21.s }, p3, [x23, #1, MUL VL]\n"
- "st1w { z22.s }, p2, [x23, #2, MUL VL]\n"
- "st1w { z23.s }, p1, [x23, #3, MUL VL]\n"
- "st1w { z24.s }, p4, [x22]\n"
- "st1w { z25.s }, p3, [x22, #1, MUL VL]\n"
- "st1w { z26.s }, p2, [x22, #2, MUL VL]\n"
- "st1w { z27.s }, p1, [x22, #3, MUL VL]\n"
- "st1w { z28.s }, p4, [x21]\n"
- "st1w { z29.s }, p3, [x21, #1, MUL VL]\n"
- "st1w { z30.s }, p2, [x21, #2, MUL VL]\n"
- "st1w { z31.s }, p1, [x21, #3, MUL VL]\n"
+ "st1w { z8.s }, p4, [x28]\n"
+ "st1w { z9.s }, p3, [x28, #1, MUL VL]\n"
+ "st1w { z10.s }, p2, [x28, #2, MUL VL]\n"
+ "st1w { z11.s }, p1, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "st1w { z12.s }, p4, [x24]\n"
+ "st1w { z13.s }, p3, [x24, #1, MUL VL]\n"
+ "st1w { z14.s }, p2, [x24, #2, MUL VL]\n"
+ "st1w { z15.s }, p1, [x24, #3, MUL VL]\n"
+ "st1w { z16.s }, p4, [x23]\n"
+ "st1w { z17.s }, p3, [x23, #1, MUL VL]\n"
+ "st1w { z18.s }, p2, [x23, #2, MUL VL]\n"
+ "st1w { z19.s }, p1, [x23, #3, MUL VL]\n"
+ "st1w { z20.s }, p4, [x22]\n"
+ "st1w { z21.s }, p3, [x22, #1, MUL VL]\n"
+ "st1w { z22.s }, p2, [x22, #2, MUL VL]\n"
+ "st1w { z23.s }, p1, [x22, #3, MUL VL]\n"
+ "st1w { z24.s }, p4, [x21]\n"
+ "st1w { z25.s }, p3, [x21, #1, MUL VL]\n"
+ "st1w { z26.s }, p2, [x21, #2, MUL VL]\n"
+ "st1w { z27.s }, p1, [x21, #3, MUL VL]\n"
+ "st1w { z28.s }, p4, [x20]\n"
+ "st1w { z29.s }, p3, [x20, #1, MUL VL]\n"
+ "st1w { z30.s }, p2, [x20, #2, MUL VL]\n"
+ "st1w { z31.s }, p1, [x20, #3, MUL VL]\n"
"78:" // Height 6: Writeback done
"decw x11, ALL, MUL #4\n"
"cmp x11, XZR\n"
"bgt 67b\n"
"subs %x[M], %x[M], #0x6\n"
"beq 80f\n"
- "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 79f\n"
- "add x21, x21, #0x6\n"
- "str x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "add x20, x20, #0x6\n"
+ "str x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"b 1b\n"
"79:" // Update direct input
- "mov x20, #0xc\n"
- "madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
+ "mov x19, #0xc\n"
+ "madd %x[input_ptr], x19, x20, %x[input_ptr]\n"
"b 1b\n"
"80:" // Exit
: [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
: [args_ptr] "r" (&ka), [bias] "r" (bias), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "x9", "x10", "x11", "x12", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "x9", "x10", "x11", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_bf16fp32_mmla_6x4VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_bf16fp32_mmla_6x4VL/generic.cpp
index 9bb67f18d2..e69293e3f1 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_bf16fp32_mmla_6x4VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_bf16fp32_mmla_6x4VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef ARM_COMPUTE_ENABLE_SVE
@@ -103,42 +103,42 @@ void sve_hybrid_bf16fp32_mmla_6x4VL (
"cmp %x[M], #0x2\n"
"bgt 27f\n"
"beq 14f\n"
- "mov x12, %x[bias]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "mov x9, %x[bias]\n"
+ "mov x28, %x[output_ptr]\n"
"2:" // Height 1: Column loop
- "mov x20, #0x0\n"
- "whilelt p4.s, x20, x11\n"
- "incw x20\n"
- "whilelt p3.s, x20, x11\n"
- "incw x20\n"
- "whilelt p2.s, x20, x11\n"
- "incw x20\n"
- "whilelt p1.s, x20, x11\n"
- "cbz x12, 3f\n"
- "ld1w { z8.s }, p5/Z, [x12]\n"
- "ld1w { z9.s }, p5/Z, [x12, #1, MUL VL]\n"
+ "mov x19, #0x0\n"
+ "whilelt p4.s, x19, x11\n"
+ "incw x19\n"
+ "whilelt p3.s, x19, x11\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x11\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x11\n"
+ "cbz x9, 3f\n"
+ "ld1w { z8.s }, p5/Z, [x9]\n"
"zip2 z12.d, z8.d, z8.d\n"
+ "ld1w { z9.s }, p5/Z, [x9, #1, MUL VL]\n"
"zip1 z8.d, z8.d, z8.d\n"
- "ld1w { z10.s }, p5/Z, [x12, #2, MUL VL]\n"
- "ld1w { z11.s }, p5/Z, [x12, #3, MUL VL]\n"
+ "ld1w { z10.s }, p5/Z, [x9, #2, MUL VL]\n"
+ "ld1w { z11.s }, p5/Z, [x9, #3, MUL VL]\n"
"zip2 z13.d, z9.d, z9.d\n"
+ "addvl x9, x9, #4\n"
"zip1 z9.d, z9.d, z9.d\n"
"zip2 z14.d, z10.d, z10.d\n"
"zip1 z10.d, z10.d, z10.d\n"
- "addvl x12, x12, #4\n"
"zip2 z15.d, z11.d, z11.d\n"
"zip1 z11.d, z11.d, z11.d\n"
"b 5f\n"
"3:" // Height 1: no bias
"tbz %x[flags], #0, 4f\n"
- "ld1w { z9.s }, p4/Z, [x9]\n"
- "ld1w { z10.s }, p3/Z, [x9, #1, MUL VL]\n"
+ "ld1w { z9.s }, p4/Z, [x28]\n"
"zip1 z8.d, z9.d, z12.d\n"
+ "ld1w { z10.s }, p3/Z, [x28, #1, MUL VL]\n"
"zip2 z12.d, z9.d, z12.d\n"
- "ld1w { z11.s }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1w { z16.s }, p1/Z, [x9, #3, MUL VL]\n"
+ "ld1w { z11.s }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1w { z16.s }, p1/Z, [x28, #3, MUL VL]\n"
"zip1 z9.d, z10.d, z13.d\n"
"zip2 z13.d, z10.d, z13.d\n"
"zip1 z10.d, z11.d, z14.d\n"
@@ -156,185 +156,185 @@ void sve_hybrid_bf16fp32_mmla_6x4VL (
"mov z14.b, #0x0\n"
"mov z15.b, #0x0\n"
"5:" // Height 1: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"6:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 7f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "cbnz x28, 8f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #1\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "cbnz x27, 8f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
"b 8f\n"
"7:" // Height 1: setup direct input
- "mov x26, %x[input_ptr]\n"
+ "mov x25, %x[input_ptr]\n"
"8:" // Height 1: input setup done
- "cmp x27, #0x8\n"
+ "cmp x26, #0x8\n"
"ble 10f\n"
"9:" // Height 1: Multiply loop: Main loop head
- "whilelt p0.h, XZR, x27\n"
- "ld1rqh { z1.h }, p0/Z, [x26]\n"
- "trn1 z0.d, z1.d, z2.d\n"
"ld1h { z7.h }, p5/Z, [x10]\n"
+ "whilelt p0.h, XZR, x26\n"
"ld1h { z6.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1rqh { z1.h }, p0/Z, [x25]\n"
+ "trn1 z0.d, z1.d, z2.d\n"
+ "sub x26, x26, #0x8\n"
+ "trn2 z1.d, z1.d, z2.d\n"
+ "cmp x26, #0x8\n"
+ "add x25, x25, #0x10\n"
".inst 0x6467e408 // bfmmla z8.s, z0.h, z7.h\n"
- ".inst 0x6466e40c // bfmmla z12.s, z0.h, z6.h\n"
"ld1h { z7.h }, p5/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x6466e40c // bfmmla z12.s, z0.h, z6.h\n"
"ld1h { z6.h }, p5/Z, [x10, #3, MUL VL]\n"
".inst 0x6467e409 // bfmmla z9.s, z0.h, z7.h\n"
- ".inst 0x6466e40d // bfmmla z13.s, z0.h, z6.h\n"
"ld1h { z7.h }, p5/Z, [x10, #4, MUL VL]\n"
+ ".inst 0x6466e40d // bfmmla z13.s, z0.h, z6.h\n"
"ld1h { z6.h }, p5/Z, [x10, #5, MUL VL]\n"
".inst 0x6467e40a // bfmmla z10.s, z0.h, z7.h\n"
- ".inst 0x6466e40e // bfmmla z14.s, z0.h, z6.h\n"
"ld1h { z7.h }, p5/Z, [x10, #6, MUL VL]\n"
+ ".inst 0x6466e40e // bfmmla z14.s, z0.h, z6.h\n"
"ld1h { z6.h }, p5/Z, [x10, #7, MUL VL]\n"
"addvl x10, x10, #16\n"
- "trn2 z1.d, z1.d, z2.d\n"
".inst 0x6467e40b // bfmmla z11.s, z0.h, z7.h\n"
- ".inst 0x6466e40f // bfmmla z15.s, z0.h, z6.h\n"
"ld1h { z7.h }, p5/Z, [x10, #-8, MUL VL]\n"
+ ".inst 0x6466e40f // bfmmla z15.s, z0.h, z6.h\n"
"ld1h { z6.h }, p5/Z, [x10, #-7, MUL VL]\n"
".inst 0x6467e428 // bfmmla z8.s, z1.h, z7.h\n"
- ".inst 0x6466e42c // bfmmla z12.s, z1.h, z6.h\n"
"ld1h { z7.h }, p5/Z, [x10, #-6, MUL VL]\n"
+ ".inst 0x6466e42c // bfmmla z12.s, z1.h, z6.h\n"
"ld1h { z6.h }, p5/Z, [x10, #-5, MUL VL]\n"
".inst 0x6467e429 // bfmmla z9.s, z1.h, z7.h\n"
- ".inst 0x6466e42d // bfmmla z13.s, z1.h, z6.h\n"
"ld1h { z7.h }, p5/Z, [x10, #-4, MUL VL]\n"
+ ".inst 0x6466e42d // bfmmla z13.s, z1.h, z6.h\n"
"ld1h { z6.h }, p5/Z, [x10, #-3, MUL VL]\n"
".inst 0x6467e42a // bfmmla z10.s, z1.h, z7.h\n"
- ".inst 0x6466e42e // bfmmla z14.s, z1.h, z6.h\n"
"ld1h { z7.h }, p5/Z, [x10, #-2, MUL VL]\n"
+ ".inst 0x6466e42e // bfmmla z14.s, z1.h, z6.h\n"
"ld1h { z6.h }, p5/Z, [x10, #-1, MUL VL]\n"
- "sub x27, x27, #0x8\n"
- "cmp x27, #0x8\n"
".inst 0x6467e42b // bfmmla z11.s, z1.h, z7.h\n"
".inst 0x6466e42f // bfmmla z15.s, z1.h, z6.h\n"
- "add x26, x26, #0x10\n"
"bgt 9b\n"
"10:" // Height 1: Multiply loop: Single iteration only
- "whilelt p0.h, XZR, x27\n"
- "ld1rqh { z1.h }, p0/Z, [x26]\n"
- "trn1 z0.d, z1.d, z2.d\n"
"ld1h { z7.h }, p5/Z, [x10]\n"
+ "whilelt p0.h, XZR, x26\n"
"ld1h { z6.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1rqh { z1.h }, p0/Z, [x25]\n"
+ "trn1 z0.d, z1.d, z2.d\n"
+ "subs x26, x26, #0x4\n"
+ "trn2 z1.d, z1.d, z2.d\n"
".inst 0x6467e408 // bfmmla z8.s, z0.h, z7.h\n"
- ".inst 0x6466e40c // bfmmla z12.s, z0.h, z6.h\n"
"ld1h { z7.h }, p5/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x6466e40c // bfmmla z12.s, z0.h, z6.h\n"
"ld1h { z6.h }, p5/Z, [x10, #3, MUL VL]\n"
".inst 0x6467e409 // bfmmla z9.s, z0.h, z7.h\n"
- ".inst 0x6466e40d // bfmmla z13.s, z0.h, z6.h\n"
"ld1h { z7.h }, p5/Z, [x10, #4, MUL VL]\n"
+ ".inst 0x6466e40d // bfmmla z13.s, z0.h, z6.h\n"
"ld1h { z6.h }, p5/Z, [x10, #5, MUL VL]\n"
".inst 0x6467e40a // bfmmla z10.s, z0.h, z7.h\n"
- ".inst 0x6466e40e // bfmmla z14.s, z0.h, z6.h\n"
"ld1h { z7.h }, p5/Z, [x10, #6, MUL VL]\n"
+ ".inst 0x6466e40e // bfmmla z14.s, z0.h, z6.h\n"
"ld1h { z6.h }, p5/Z, [x10, #7, MUL VL]\n"
- "subs x27, x27, #0x4\n"
- "trn2 z1.d, z1.d, z2.d\n"
+ "addvl x10, x10, #8\n"
".inst 0x6467e40b // bfmmla z11.s, z0.h, z7.h\n"
".inst 0x6466e40f // bfmmla z15.s, z0.h, z6.h\n"
- "addvl x10, x10, #8\n"
"ble 11f\n"
"ld1h { z7.h }, p5/Z, [x10]\n"
- "ld1h { z6.h }, p5/Z, [x10, #1, MUL VL]\n"
".inst 0x6467e428 // bfmmla z8.s, z1.h, z7.h\n"
- ".inst 0x6466e42c // bfmmla z12.s, z1.h, z6.h\n"
+ "ld1h { z6.h }, p5/Z, [x10, #1, MUL VL]\n"
"ld1h { z7.h }, p5/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x6466e42c // bfmmla z12.s, z1.h, z6.h\n"
"ld1h { z6.h }, p5/Z, [x10, #3, MUL VL]\n"
".inst 0x6467e429 // bfmmla z9.s, z1.h, z7.h\n"
- ".inst 0x6466e42d // bfmmla z13.s, z1.h, z6.h\n"
"ld1h { z7.h }, p5/Z, [x10, #4, MUL VL]\n"
+ ".inst 0x6466e42d // bfmmla z13.s, z1.h, z6.h\n"
"ld1h { z6.h }, p5/Z, [x10, #5, MUL VL]\n"
".inst 0x6467e42a // bfmmla z10.s, z1.h, z7.h\n"
- ".inst 0x6466e42e // bfmmla z14.s, z1.h, z6.h\n"
"ld1h { z7.h }, p5/Z, [x10, #6, MUL VL]\n"
+ ".inst 0x6466e42e // bfmmla z14.s, z1.h, z6.h\n"
"ld1h { z6.h }, p5/Z, [x10, #7, MUL VL]\n"
+ "addvl x10, x10, #8\n"
".inst 0x6467e42b // bfmmla z11.s, z1.h, z7.h\n"
".inst 0x6466e42f // bfmmla z15.s, z1.h, z6.h\n"
- "addvl x10, x10, #8\n"
"11:" // Height 1: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 6b\n"
"uzp1 z8.d, z8.d, z12.d\n"
"uzp1 z9.d, z9.d, z13.d\n"
"uzp1 z10.d, z10.d, z14.d\n"
"uzp1 z11.d, z11.d, z15.d\n"
"tbz %x[flags], #1, 12f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z1.s }, p5/Z, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rw { z0.s }, p5/Z, [x20]\n"
- "fmin z8.s, p5/M, z8.s, z1.s\n"
- "fmin z9.s, p5/M, z9.s, z1.s\n"
- "fmin z10.s, p5/M, z10.s, z1.s\n"
- "fmin z11.s, p5/M, z11.s, z1.s\n"
- "fmax z8.s, p5/M, z8.s, z0.s\n"
- "fmax z9.s, p5/M, z9.s, z0.s\n"
- "fmax z10.s, p5/M, z10.s, z0.s\n"
- "fmax z11.s, p5/M, z11.s, z0.s\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z1.s }, p5/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rw { z0.s }, p5/Z, [x19]\n"
+ "fmin z8.s, p5/M, z8.s, z0.s\n"
+ "fmin z9.s, p5/M, z9.s, z0.s\n"
+ "fmin z10.s, p5/M, z10.s, z0.s\n"
+ "fmin z11.s, p5/M, z11.s, z0.s\n"
+ "fmax z8.s, p5/M, z8.s, z1.s\n"
+ "fmax z9.s, p5/M, z9.s, z1.s\n"
+ "fmax z10.s, p5/M, z10.s, z1.s\n"
+ "fmax z11.s, p5/M, z11.s, z1.s\n"
"12:" // Height 1: No activation
- "st1w { z8.s }, p4, [x9]\n"
- "st1w { z9.s }, p3, [x9, #1, MUL VL]\n"
- "st1w { z10.s }, p2, [x9, #2, MUL VL]\n"
- "st1w { z11.s }, p1, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
+ "st1w { z8.s }, p4, [x28]\n"
+ "st1w { z9.s }, p3, [x28, #1, MUL VL]\n"
+ "st1w { z10.s }, p2, [x28, #2, MUL VL]\n"
+ "st1w { z11.s }, p1, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
"13:" // Height 1: Writeback done
"decw x11, ALL, MUL #4\n"
"cmp x11, XZR\n"
"bgt 2b\n"
"b 80f\n"
"14:" // Height 2
- "mov x12, %x[bias]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x9, %x[bias]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "mov x28, %x[output_ptr]\n"
"15:" // Height 2: Column loop
- "mov x20, #0x0\n"
- "whilelt p4.s, x20, x11\n"
- "incw x20\n"
- "whilelt p3.s, x20, x11\n"
- "incw x20\n"
- "whilelt p2.s, x20, x11\n"
- "incw x20\n"
- "whilelt p1.s, x20, x11\n"
- "cbz x12, 16f\n"
- "ld1w { z8.s }, p5/Z, [x12]\n"
- "ld1w { z9.s }, p5/Z, [x12, #1, MUL VL]\n"
+ "mov x19, #0x0\n"
+ "whilelt p4.s, x19, x11\n"
+ "incw x19\n"
+ "whilelt p3.s, x19, x11\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x11\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x11\n"
+ "cbz x9, 16f\n"
+ "ld1w { z8.s }, p5/Z, [x9]\n"
"zip2 z12.d, z8.d, z8.d\n"
+ "ld1w { z9.s }, p5/Z, [x9, #1, MUL VL]\n"
"zip1 z8.d, z8.d, z8.d\n"
- "ld1w { z10.s }, p5/Z, [x12, #2, MUL VL]\n"
- "ld1w { z11.s }, p5/Z, [x12, #3, MUL VL]\n"
+ "ld1w { z10.s }, p5/Z, [x9, #2, MUL VL]\n"
+ "ld1w { z11.s }, p5/Z, [x9, #3, MUL VL]\n"
"zip2 z13.d, z9.d, z9.d\n"
+ "addvl x9, x9, #4\n"
"zip1 z9.d, z9.d, z9.d\n"
"zip2 z14.d, z10.d, z10.d\n"
"zip1 z10.d, z10.d, z10.d\n"
- "addvl x12, x12, #4\n"
"zip2 z15.d, z11.d, z11.d\n"
"zip1 z11.d, z11.d, z11.d\n"
"b 18f\n"
"16:" // Height 2: no bias
"tbz %x[flags], #0, 17f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "ld1w { z9.s }, p4/Z, [x9]\n"
- "ld1w { z10.s }, p3/Z, [x9, #1, MUL VL]\n"
- "ld1w { z11.s }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1w { z16.s }, p1/Z, [x9, #3, MUL VL]\n"
- "ld1w { z12.s }, p4/Z, [x25]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ld1w { z9.s }, p4/Z, [x28]\n"
+ "add x24, x28, x19, LSL #2\n"
+ "ld1w { z10.s }, p3/Z, [x28, #1, MUL VL]\n"
+ "ld1w { z11.s }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1w { z16.s }, p1/Z, [x28, #3, MUL VL]\n"
+ "ld1w { z12.s }, p4/Z, [x24]\n"
"zip1 z8.d, z9.d, z12.d\n"
+ "ld1w { z13.s }, p3/Z, [x24, #1, MUL VL]\n"
"zip2 z12.d, z9.d, z12.d\n"
- "ld1w { z13.s }, p3/Z, [x25, #1, MUL VL]\n"
- "ld1w { z14.s }, p2/Z, [x25, #2, MUL VL]\n"
+ "ld1w { z14.s }, p2/Z, [x24, #2, MUL VL]\n"
+ "ld1w { z15.s }, p1/Z, [x24, #3, MUL VL]\n"
"zip1 z9.d, z10.d, z13.d\n"
"zip2 z13.d, z10.d, z13.d\n"
- "ld1w { z15.s }, p1/Z, [x25, #3, MUL VL]\n"
"zip1 z10.d, z11.d, z14.d\n"
"zip2 z14.d, z11.d, z14.d\n"
"zip1 z11.d, z16.d, z15.d\n"
@@ -350,122 +350,122 @@ void sve_hybrid_bf16fp32_mmla_6x4VL (
"mov z14.b, #0x0\n"
"mov z15.b, #0x0\n"
"18:" // Height 2: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"19:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 20f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "cbnz x28, 21f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #1\n"
- "add x25, x25, x20, LSL #1\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "cbnz x27, 21f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
+ "add x24, x24, x19, LSL #1\n"
"b 21f\n"
"20:" // Height 2: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #1\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #1\n"
"21:" // Height 2: input setup done
- "cmp x27, #0x8\n"
+ "cmp x26, #0x8\n"
"ble 23f\n"
"22:" // Height 2: Multiply loop: Main loop head
- "whilelt p0.h, XZR, x27\n"
- "ld1rqh { z1.h }, p0/Z, [x26]\n"
- "ld1rqh { z2.h }, p0/Z, [x25]\n"
- "trn1 z0.d, z1.d, z2.d\n"
"ld1h { z7.h }, p5/Z, [x10]\n"
+ "whilelt p0.h, XZR, x26\n"
"ld1h { z6.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "sub x26, x26, #0x8\n"
+ "ld1rqh { z1.h }, p0/Z, [x25]\n"
+ "cmp x26, #0x8\n"
+ "ld1rqh { z2.h }, p0/Z, [x24]\n"
+ "trn1 z0.d, z1.d, z2.d\n"
+ "add x25, x25, #0x10\n"
+ "trn2 z1.d, z1.d, z2.d\n"
+ "add x24, x24, #0x10\n"
".inst 0x6467e408 // bfmmla z8.s, z0.h, z7.h\n"
- ".inst 0x6466e40c // bfmmla z12.s, z0.h, z6.h\n"
"ld1h { z7.h }, p5/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x6466e40c // bfmmla z12.s, z0.h, z6.h\n"
"ld1h { z6.h }, p5/Z, [x10, #3, MUL VL]\n"
".inst 0x6467e409 // bfmmla z9.s, z0.h, z7.h\n"
- ".inst 0x6466e40d // bfmmla z13.s, z0.h, z6.h\n"
"ld1h { z7.h }, p5/Z, [x10, #4, MUL VL]\n"
+ ".inst 0x6466e40d // bfmmla z13.s, z0.h, z6.h\n"
"ld1h { z6.h }, p5/Z, [x10, #5, MUL VL]\n"
".inst 0x6467e40a // bfmmla z10.s, z0.h, z7.h\n"
- ".inst 0x6466e40e // bfmmla z14.s, z0.h, z6.h\n"
"ld1h { z7.h }, p5/Z, [x10, #6, MUL VL]\n"
+ ".inst 0x6466e40e // bfmmla z14.s, z0.h, z6.h\n"
"ld1h { z6.h }, p5/Z, [x10, #7, MUL VL]\n"
"addvl x10, x10, #16\n"
- "trn2 z1.d, z1.d, z2.d\n"
".inst 0x6467e40b // bfmmla z11.s, z0.h, z7.h\n"
- ".inst 0x6466e40f // bfmmla z15.s, z0.h, z6.h\n"
"ld1h { z7.h }, p5/Z, [x10, #-8, MUL VL]\n"
+ ".inst 0x6466e40f // bfmmla z15.s, z0.h, z6.h\n"
"ld1h { z6.h }, p5/Z, [x10, #-7, MUL VL]\n"
".inst 0x6467e428 // bfmmla z8.s, z1.h, z7.h\n"
- ".inst 0x6466e42c // bfmmla z12.s, z1.h, z6.h\n"
"ld1h { z7.h }, p5/Z, [x10, #-6, MUL VL]\n"
+ ".inst 0x6466e42c // bfmmla z12.s, z1.h, z6.h\n"
"ld1h { z6.h }, p5/Z, [x10, #-5, MUL VL]\n"
".inst 0x6467e429 // bfmmla z9.s, z1.h, z7.h\n"
- ".inst 0x6466e42d // bfmmla z13.s, z1.h, z6.h\n"
"ld1h { z7.h }, p5/Z, [x10, #-4, MUL VL]\n"
+ ".inst 0x6466e42d // bfmmla z13.s, z1.h, z6.h\n"
"ld1h { z6.h }, p5/Z, [x10, #-3, MUL VL]\n"
".inst 0x6467e42a // bfmmla z10.s, z1.h, z7.h\n"
- ".inst 0x6466e42e // bfmmla z14.s, z1.h, z6.h\n"
"ld1h { z7.h }, p5/Z, [x10, #-2, MUL VL]\n"
+ ".inst 0x6466e42e // bfmmla z14.s, z1.h, z6.h\n"
"ld1h { z6.h }, p5/Z, [x10, #-1, MUL VL]\n"
- "sub x27, x27, #0x8\n"
- "cmp x27, #0x8\n"
".inst 0x6467e42b // bfmmla z11.s, z1.h, z7.h\n"
".inst 0x6466e42f // bfmmla z15.s, z1.h, z6.h\n"
- "add x26, x26, #0x10\n"
- "add x25, x25, #0x10\n"
"bgt 22b\n"
"23:" // Height 2: Multiply loop: Single iteration only
- "whilelt p0.h, XZR, x27\n"
- "ld1rqh { z1.h }, p0/Z, [x26]\n"
- "ld1rqh { z2.h }, p0/Z, [x25]\n"
- "trn1 z0.d, z1.d, z2.d\n"
"ld1h { z7.h }, p5/Z, [x10]\n"
+ "whilelt p0.h, XZR, x26\n"
"ld1h { z6.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x4\n"
+ "ld1rqh { z1.h }, p0/Z, [x25]\n"
+ "ld1rqh { z2.h }, p0/Z, [x24]\n"
+ "trn1 z0.d, z1.d, z2.d\n"
+ "trn2 z1.d, z1.d, z2.d\n"
".inst 0x6467e408 // bfmmla z8.s, z0.h, z7.h\n"
- ".inst 0x6466e40c // bfmmla z12.s, z0.h, z6.h\n"
"ld1h { z7.h }, p5/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x6466e40c // bfmmla z12.s, z0.h, z6.h\n"
"ld1h { z6.h }, p5/Z, [x10, #3, MUL VL]\n"
".inst 0x6467e409 // bfmmla z9.s, z0.h, z7.h\n"
- ".inst 0x6466e40d // bfmmla z13.s, z0.h, z6.h\n"
"ld1h { z7.h }, p5/Z, [x10, #4, MUL VL]\n"
+ ".inst 0x6466e40d // bfmmla z13.s, z0.h, z6.h\n"
"ld1h { z6.h }, p5/Z, [x10, #5, MUL VL]\n"
".inst 0x6467e40a // bfmmla z10.s, z0.h, z7.h\n"
- ".inst 0x6466e40e // bfmmla z14.s, z0.h, z6.h\n"
"ld1h { z7.h }, p5/Z, [x10, #6, MUL VL]\n"
+ ".inst 0x6466e40e // bfmmla z14.s, z0.h, z6.h\n"
"ld1h { z6.h }, p5/Z, [x10, #7, MUL VL]\n"
- "subs x27, x27, #0x4\n"
- "trn2 z1.d, z1.d, z2.d\n"
+ "addvl x10, x10, #8\n"
".inst 0x6467e40b // bfmmla z11.s, z0.h, z7.h\n"
".inst 0x6466e40f // bfmmla z15.s, z0.h, z6.h\n"
- "addvl x10, x10, #8\n"
"ble 24f\n"
"ld1h { z7.h }, p5/Z, [x10]\n"
- "ld1h { z6.h }, p5/Z, [x10, #1, MUL VL]\n"
".inst 0x6467e428 // bfmmla z8.s, z1.h, z7.h\n"
- ".inst 0x6466e42c // bfmmla z12.s, z1.h, z6.h\n"
+ "ld1h { z6.h }, p5/Z, [x10, #1, MUL VL]\n"
"ld1h { z7.h }, p5/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x6466e42c // bfmmla z12.s, z1.h, z6.h\n"
"ld1h { z6.h }, p5/Z, [x10, #3, MUL VL]\n"
".inst 0x6467e429 // bfmmla z9.s, z1.h, z7.h\n"
- ".inst 0x6466e42d // bfmmla z13.s, z1.h, z6.h\n"
"ld1h { z7.h }, p5/Z, [x10, #4, MUL VL]\n"
+ ".inst 0x6466e42d // bfmmla z13.s, z1.h, z6.h\n"
"ld1h { z6.h }, p5/Z, [x10, #5, MUL VL]\n"
".inst 0x6467e42a // bfmmla z10.s, z1.h, z7.h\n"
- ".inst 0x6466e42e // bfmmla z14.s, z1.h, z6.h\n"
"ld1h { z7.h }, p5/Z, [x10, #6, MUL VL]\n"
+ ".inst 0x6466e42e // bfmmla z14.s, z1.h, z6.h\n"
"ld1h { z6.h }, p5/Z, [x10, #7, MUL VL]\n"
+ "addvl x10, x10, #8\n"
".inst 0x6467e42b // bfmmla z11.s, z1.h, z7.h\n"
".inst 0x6466e42f // bfmmla z15.s, z1.h, z6.h\n"
- "addvl x10, x10, #8\n"
"24:" // Height 2: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 19b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp1 z7.d, z8.d, z12.d\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp2 z8.d, z8.d, z12.d\n"
- "add x25, x9, x20, LSL #2\n"
+ "add x24, x28, x19, LSL #2\n"
"uzp1 z12.d, z9.d, z13.d\n"
"uzp2 z9.d, z9.d, z13.d\n"
"uzp1 z13.d, z10.d, z14.d\n"
@@ -473,70 +473,70 @@ void sve_hybrid_bf16fp32_mmla_6x4VL (
"uzp1 z14.d, z11.d, z15.d\n"
"uzp2 z11.d, z11.d, z15.d\n"
"tbz %x[flags], #1, 25f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z1.s }, p5/Z, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rw { z0.s }, p5/Z, [x20]\n"
- "fmin z7.s, p5/M, z7.s, z1.s\n"
- "fmin z12.s, p5/M, z12.s, z1.s\n"
- "fmin z13.s, p5/M, z13.s, z1.s\n"
- "fmin z14.s, p5/M, z14.s, z1.s\n"
- "fmin z8.s, p5/M, z8.s, z1.s\n"
- "fmin z9.s, p5/M, z9.s, z1.s\n"
- "fmin z10.s, p5/M, z10.s, z1.s\n"
- "fmin z11.s, p5/M, z11.s, z1.s\n"
- "fmax z7.s, p5/M, z7.s, z0.s\n"
- "fmax z12.s, p5/M, z12.s, z0.s\n"
- "fmax z13.s, p5/M, z13.s, z0.s\n"
- "fmax z14.s, p5/M, z14.s, z0.s\n"
- "fmax z8.s, p5/M, z8.s, z0.s\n"
- "fmax z9.s, p5/M, z9.s, z0.s\n"
- "fmax z10.s, p5/M, z10.s, z0.s\n"
- "fmax z11.s, p5/M, z11.s, z0.s\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z1.s }, p5/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rw { z0.s }, p5/Z, [x19]\n"
+ "fmin z7.s, p5/M, z7.s, z0.s\n"
+ "fmin z12.s, p5/M, z12.s, z0.s\n"
+ "fmin z13.s, p5/M, z13.s, z0.s\n"
+ "fmin z14.s, p5/M, z14.s, z0.s\n"
+ "fmin z8.s, p5/M, z8.s, z0.s\n"
+ "fmax z7.s, p5/M, z7.s, z1.s\n"
+ "fmax z12.s, p5/M, z12.s, z1.s\n"
+ "fmax z13.s, p5/M, z13.s, z1.s\n"
+ "fmax z14.s, p5/M, z14.s, z1.s\n"
+ "fmax z8.s, p5/M, z8.s, z1.s\n"
+ "fmin z9.s, p5/M, z9.s, z0.s\n"
+ "fmin z10.s, p5/M, z10.s, z0.s\n"
+ "fmin z11.s, p5/M, z11.s, z0.s\n"
+ "fmax z9.s, p5/M, z9.s, z1.s\n"
+ "fmax z10.s, p5/M, z10.s, z1.s\n"
+ "fmax z11.s, p5/M, z11.s, z1.s\n"
"25:" // Height 2: No activation
- "st1w { z7.s }, p4, [x9]\n"
- "st1w { z12.s }, p3, [x9, #1, MUL VL]\n"
- "st1w { z13.s }, p2, [x9, #2, MUL VL]\n"
- "st1w { z14.s }, p1, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
- "st1w { z8.s }, p4, [x25]\n"
- "st1w { z9.s }, p3, [x25, #1, MUL VL]\n"
- "st1w { z10.s }, p2, [x25, #2, MUL VL]\n"
- "st1w { z11.s }, p1, [x25, #3, MUL VL]\n"
+ "st1w { z7.s }, p4, [x28]\n"
+ "st1w { z12.s }, p3, [x28, #1, MUL VL]\n"
+ "st1w { z13.s }, p2, [x28, #2, MUL VL]\n"
+ "st1w { z14.s }, p1, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "st1w { z8.s }, p4, [x24]\n"
+ "st1w { z9.s }, p3, [x24, #1, MUL VL]\n"
+ "st1w { z10.s }, p2, [x24, #2, MUL VL]\n"
+ "st1w { z11.s }, p1, [x24, #3, MUL VL]\n"
"26:" // Height 2: Writeback done
"decw x11, ALL, MUL #4\n"
"cmp x11, XZR\n"
"bgt 15b\n"
"b 80f\n"
"27:" // Height 3
- "mov x12, %x[bias]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x9, %x[bias]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "mov x28, %x[output_ptr]\n"
"28:" // Height 3: Column loop
- "mov x20, #0x0\n"
- "whilelt p4.s, x20, x11\n"
- "incw x20\n"
- "whilelt p3.s, x20, x11\n"
- "incw x20\n"
- "whilelt p2.s, x20, x11\n"
- "incw x20\n"
- "whilelt p1.s, x20, x11\n"
- "cbz x12, 29f\n"
- "ld1w { z8.s }, p5/Z, [x12]\n"
- "ld1w { z9.s }, p5/Z, [x12, #1, MUL VL]\n"
+ "mov x19, #0x0\n"
+ "whilelt p4.s, x19, x11\n"
+ "incw x19\n"
+ "whilelt p3.s, x19, x11\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x11\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x11\n"
+ "cbz x9, 29f\n"
+ "ld1w { z8.s }, p5/Z, [x9]\n"
"zip2 z12.d, z8.d, z8.d\n"
+ "ld1w { z9.s }, p5/Z, [x9, #1, MUL VL]\n"
"zip1 z8.d, z8.d, z8.d\n"
- "ld1w { z10.s }, p5/Z, [x12, #2, MUL VL]\n"
- "ld1w { z11.s }, p5/Z, [x12, #3, MUL VL]\n"
+ "ld1w { z10.s }, p5/Z, [x9, #2, MUL VL]\n"
+ "mov z16.d, z8.d\n"
+ "ld1w { z11.s }, p5/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"zip2 z13.d, z9.d, z9.d\n"
"zip1 z9.d, z9.d, z9.d\n"
"zip2 z14.d, z10.d, z10.d\n"
"zip1 z10.d, z10.d, z10.d\n"
- "addvl x12, x12, #4\n"
"zip2 z15.d, z11.d, z11.d\n"
"zip1 z11.d, z11.d, z11.d\n"
- "mov z16.d, z8.d\n"
"mov z20.d, z12.d\n"
"mov z17.d, z9.d\n"
"mov z21.d, z13.d\n"
@@ -547,29 +547,29 @@ void sve_hybrid_bf16fp32_mmla_6x4VL (
"b 31f\n"
"29:" // Height 3: no bias
"tbz %x[flags], #0, 30f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "ld1w { z9.s }, p4/Z, [x9]\n"
- "ld1w { z10.s }, p3/Z, [x9, #1, MUL VL]\n"
- "ld1w { z11.s }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1w { z16.s }, p1/Z, [x9, #3, MUL VL]\n"
- "ld1w { z12.s }, p4/Z, [x25]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ld1w { z9.s }, p4/Z, [x28]\n"
+ "add x24, x28, x19, LSL #2\n"
+ "ld1w { z10.s }, p3/Z, [x28, #1, MUL VL]\n"
+ "ld1w { z11.s }, p2/Z, [x28, #2, MUL VL]\n"
+ "add x23, x24, x19, LSL #2\n"
+ "ld1w { z16.s }, p1/Z, [x28, #3, MUL VL]\n"
+ "ld1w { z12.s }, p4/Z, [x24]\n"
"zip1 z8.d, z9.d, z12.d\n"
+ "ld1w { z13.s }, p3/Z, [x24, #1, MUL VL]\n"
"zip2 z12.d, z9.d, z12.d\n"
- "ld1w { z13.s }, p3/Z, [x25, #1, MUL VL]\n"
- "ld1w { z14.s }, p2/Z, [x25, #2, MUL VL]\n"
+ "ld1w { z14.s }, p2/Z, [x24, #2, MUL VL]\n"
+ "ld1w { z15.s }, p1/Z, [x24, #3, MUL VL]\n"
"zip1 z9.d, z10.d, z13.d\n"
+ "ld1w { z17.s }, p4/Z, [x23]\n"
"zip2 z13.d, z10.d, z13.d\n"
- "ld1w { z15.s }, p1/Z, [x25, #3, MUL VL]\n"
- "ld1w { z17.s }, p4/Z, [x24]\n"
+ "ld1w { z18.s }, p3/Z, [x23, #1, MUL VL]\n"
"zip1 z10.d, z11.d, z14.d\n"
+ "ld1w { z19.s }, p2/Z, [x23, #2, MUL VL]\n"
"zip2 z14.d, z11.d, z14.d\n"
- "ld1w { z18.s }, p3/Z, [x24, #1, MUL VL]\n"
- "ld1w { z19.s }, p2/Z, [x24, #2, MUL VL]\n"
+ "ld1w { z24.s }, p1/Z, [x23, #3, MUL VL]\n"
"zip1 z11.d, z16.d, z15.d\n"
"zip2 z15.d, z16.d, z15.d\n"
- "ld1w { z24.s }, p1/Z, [x24, #3, MUL VL]\n"
"zip1 z16.d, z17.d, z20.d\n"
"zip2 z20.d, z17.d, z20.d\n"
"zip1 z17.d, z18.d, z21.d\n"
@@ -597,58 +597,61 @@ void sve_hybrid_bf16fp32_mmla_6x4VL (
"mov z22.b, #0x0\n"
"mov z23.b, #0x0\n"
"31:" // Height 3: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"32:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 33f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "cbnz x28, 34f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #1\n"
- "add x25, x25, x20, LSL #1\n"
- "add x24, x24, x20, LSL #1\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "cbnz x27, 34f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
+ "add x24, x24, x19, LSL #1\n"
+ "add x23, x23, x19, LSL #1\n"
"b 34f\n"
"33:" // Height 3: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
"34:" // Height 3: input setup done
- "cmp x27, #0x8\n"
+ "cmp x26, #0x8\n"
"ble 36f\n"
"35:" // Height 3: Multiply loop: Main loop head
- "whilelt p0.h, XZR, x27\n"
- "ld1rqh { z1.h }, p0/Z, [x26]\n"
- "ld1rqh { z2.h }, p0/Z, [x25]\n"
- "ld1rqh { z3.h }, p0/Z, [x24]\n"
- "trn1 z0.d, z1.d, z2.d\n"
- "trn2 z1.d, z1.d, z2.d\n"
"ld1h { z7.h }, p5/Z, [x10]\n"
- "trn1 z2.d, z3.d, z4.d\n"
+ "whilelt p0.h, XZR, x26\n"
"ld1h { z6.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1rqh { z1.h }, p0/Z, [x25]\n"
+ "sub x26, x26, #0x8\n"
+ "ld1rqh { z2.h }, p0/Z, [x24]\n"
+ "trn1 z0.d, z1.d, z2.d\n"
+ "ld1rqh { z3.h }, p0/Z, [x23]\n"
+ "cmp x26, #0x8\n"
+ "trn2 z1.d, z1.d, z2.d\n"
+ "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
".inst 0x6467e408 // bfmmla z8.s, z0.h, z7.h\n"
- ".inst 0x6467e450 // bfmmla z16.s, z2.h, z7.h\n"
".inst 0x6466e40c // bfmmla z12.s, z0.h, z6.h\n"
- ".inst 0x6466e454 // bfmmla z20.s, z2.h, z6.h\n"
+ "add x23, x23, #0x10\n"
+ "trn1 z2.d, z3.d, z4.d\n"
+ "trn2 z3.d, z3.d, z4.d\n"
+ ".inst 0x6467e450 // bfmmla z16.s, z2.h, z7.h\n"
"ld1h { z7.h }, p5/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x6466e454 // bfmmla z20.s, z2.h, z6.h\n"
"ld1h { z6.h }, p5/Z, [x10, #3, MUL VL]\n"
".inst 0x6467e409 // bfmmla z9.s, z0.h, z7.h\n"
".inst 0x6467e451 // bfmmla z17.s, z2.h, z7.h\n"
"ld1h { z7.h }, p5/Z, [x10, #4, MUL VL]\n"
- "trn2 z3.d, z3.d, z4.d\n"
".inst 0x6466e40d // bfmmla z13.s, z0.h, z6.h\n"
".inst 0x6466e455 // bfmmla z21.s, z2.h, z6.h\n"
"ld1h { z6.h }, p5/Z, [x10, #5, MUL VL]\n"
- "sub x27, x27, #0x8\n"
".inst 0x6467e40a // bfmmla z10.s, z0.h, z7.h\n"
".inst 0x6467e452 // bfmmla z18.s, z2.h, z7.h\n"
"ld1h { z7.h }, p5/Z, [x10, #6, MUL VL]\n"
- "cmp x27, #0x8\n"
".inst 0x6466e40e // bfmmla z14.s, z0.h, z6.h\n"
".inst 0x6466e456 // bfmmla z22.s, z2.h, z6.h\n"
"ld1h { z6.h }, p5/Z, [x10, #7, MUL VL]\n"
@@ -656,15 +659,12 @@ void sve_hybrid_bf16fp32_mmla_6x4VL (
".inst 0x6467e40b // bfmmla z11.s, z0.h, z7.h\n"
".inst 0x6467e453 // bfmmla z19.s, z2.h, z7.h\n"
"ld1h { z7.h }, p5/Z, [x10, #-8, MUL VL]\n"
- "add x26, x26, #0x10\n"
".inst 0x6466e40f // bfmmla z15.s, z0.h, z6.h\n"
".inst 0x6466e457 // bfmmla z23.s, z2.h, z6.h\n"
"ld1h { z6.h }, p5/Z, [x10, #-7, MUL VL]\n"
- "add x25, x25, #0x10\n"
".inst 0x6467e428 // bfmmla z8.s, z1.h, z7.h\n"
".inst 0x6467e470 // bfmmla z16.s, z3.h, z7.h\n"
"ld1h { z7.h }, p5/Z, [x10, #-6, MUL VL]\n"
- "add x24, x24, #0x10\n"
".inst 0x6466e42c // bfmmla z12.s, z1.h, z6.h\n"
".inst 0x6466e474 // bfmmla z20.s, z3.h, z6.h\n"
"ld1h { z6.h }, p5/Z, [x10, #-5, MUL VL]\n"
@@ -686,29 +686,29 @@ void sve_hybrid_bf16fp32_mmla_6x4VL (
".inst 0x6466e477 // bfmmla z23.s, z3.h, z6.h\n"
"bgt 35b\n"
"36:" // Height 3: Multiply loop: Single iteration only
- "whilelt p0.h, XZR, x27\n"
- "ld1rqh { z1.h }, p0/Z, [x26]\n"
- "ld1rqh { z2.h }, p0/Z, [x25]\n"
- "ld1rqh { z3.h }, p0/Z, [x24]\n"
- "trn1 z0.d, z1.d, z2.d\n"
- "trn2 z1.d, z1.d, z2.d\n"
"ld1h { z7.h }, p5/Z, [x10]\n"
- "trn1 z2.d, z3.d, z4.d\n"
+ "whilelt p0.h, XZR, x26\n"
"ld1h { z6.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1rqh { z1.h }, p0/Z, [x25]\n"
+ "subs x26, x26, #0x4\n"
+ "ld1rqh { z2.h }, p0/Z, [x24]\n"
+ "trn1 z0.d, z1.d, z2.d\n"
+ "ld1rqh { z3.h }, p0/Z, [x23]\n"
+ "trn2 z1.d, z1.d, z2.d\n"
".inst 0x6467e408 // bfmmla z8.s, z0.h, z7.h\n"
- ".inst 0x6467e450 // bfmmla z16.s, z2.h, z7.h\n"
".inst 0x6466e40c // bfmmla z12.s, z0.h, z6.h\n"
- ".inst 0x6466e454 // bfmmla z20.s, z2.h, z6.h\n"
+ "trn1 z2.d, z3.d, z4.d\n"
+ "trn2 z3.d, z3.d, z4.d\n"
+ ".inst 0x6467e450 // bfmmla z16.s, z2.h, z7.h\n"
"ld1h { z7.h }, p5/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x6466e454 // bfmmla z20.s, z2.h, z6.h\n"
"ld1h { z6.h }, p5/Z, [x10, #3, MUL VL]\n"
".inst 0x6467e409 // bfmmla z9.s, z0.h, z7.h\n"
".inst 0x6467e451 // bfmmla z17.s, z2.h, z7.h\n"
"ld1h { z7.h }, p5/Z, [x10, #4, MUL VL]\n"
- "subs x27, x27, #0x4\n"
".inst 0x6466e40d // bfmmla z13.s, z0.h, z6.h\n"
".inst 0x6466e455 // bfmmla z21.s, z2.h, z6.h\n"
"ld1h { z6.h }, p5/Z, [x10, #5, MUL VL]\n"
- "trn2 z3.d, z3.d, z4.d\n"
".inst 0x6467e40a // bfmmla z10.s, z0.h, z7.h\n"
".inst 0x6467e452 // bfmmla z18.s, z2.h, z7.h\n"
"ld1h { z7.h }, p5/Z, [x10, #6, MUL VL]\n"
@@ -722,12 +722,12 @@ void sve_hybrid_bf16fp32_mmla_6x4VL (
".inst 0x6466e457 // bfmmla z23.s, z2.h, z6.h\n"
"ble 37f\n"
"ld1h { z7.h }, p5/Z, [x10]\n"
- "ld1h { z6.h }, p5/Z, [x10, #1, MUL VL]\n"
".inst 0x6467e428 // bfmmla z8.s, z1.h, z7.h\n"
+ "ld1h { z6.h }, p5/Z, [x10, #1, MUL VL]\n"
".inst 0x6467e470 // bfmmla z16.s, z3.h, z7.h\n"
+ "ld1h { z7.h }, p5/Z, [x10, #2, MUL VL]\n"
".inst 0x6466e42c // bfmmla z12.s, z1.h, z6.h\n"
".inst 0x6466e474 // bfmmla z20.s, z3.h, z6.h\n"
- "ld1h { z7.h }, p5/Z, [x10, #2, MUL VL]\n"
"ld1h { z6.h }, p5/Z, [x10, #3, MUL VL]\n"
".inst 0x6467e429 // bfmmla z9.s, z1.h, z7.h\n"
".inst 0x6467e471 // bfmmla z17.s, z3.h, z7.h\n"
@@ -747,17 +747,17 @@ void sve_hybrid_bf16fp32_mmla_6x4VL (
".inst 0x6466e42f // bfmmla z15.s, z1.h, z6.h\n"
".inst 0x6466e477 // bfmmla z23.s, z3.h, z6.h\n"
"37:" // Height 3: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 32b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
"uzp1 z7.d, z8.d, z12.d\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp2 z8.d, z8.d, z12.d\n"
+ "add x24, x28, x19, LSL #2\n"
"uzp1 z12.d, z9.d, z13.d\n"
"uzp2 z9.d, z9.d, z13.d\n"
- "add x24, x25, x20, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
"uzp1 z13.d, z10.d, z14.d\n"
"uzp2 z10.d, z10.d, z14.d\n"
"uzp1 z14.d, z11.d, z15.d\n"
@@ -767,82 +767,82 @@ void sve_hybrid_bf16fp32_mmla_6x4VL (
"uzp1 z18.d, z18.d, z22.d\n"
"uzp1 z19.d, z19.d, z23.d\n"
"tbz %x[flags], #1, 38f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z1.s }, p5/Z, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rw { z0.s }, p5/Z, [x20]\n"
- "fmin z7.s, p5/M, z7.s, z1.s\n"
- "fmin z12.s, p5/M, z12.s, z1.s\n"
- "fmin z13.s, p5/M, z13.s, z1.s\n"
- "fmin z14.s, p5/M, z14.s, z1.s\n"
- "fmin z8.s, p5/M, z8.s, z1.s\n"
- "fmin z9.s, p5/M, z9.s, z1.s\n"
- "fmin z10.s, p5/M, z10.s, z1.s\n"
- "fmin z11.s, p5/M, z11.s, z1.s\n"
- "fmin z16.s, p5/M, z16.s, z1.s\n"
- "fmin z17.s, p5/M, z17.s, z1.s\n"
- "fmin z18.s, p5/M, z18.s, z1.s\n"
- "fmin z19.s, p5/M, z19.s, z1.s\n"
- "fmax z7.s, p5/M, z7.s, z0.s\n"
- "fmax z12.s, p5/M, z12.s, z0.s\n"
- "fmax z13.s, p5/M, z13.s, z0.s\n"
- "fmax z14.s, p5/M, z14.s, z0.s\n"
- "fmax z8.s, p5/M, z8.s, z0.s\n"
- "fmax z9.s, p5/M, z9.s, z0.s\n"
- "fmax z10.s, p5/M, z10.s, z0.s\n"
- "fmax z11.s, p5/M, z11.s, z0.s\n"
- "fmax z16.s, p5/M, z16.s, z0.s\n"
- "fmax z17.s, p5/M, z17.s, z0.s\n"
- "fmax z18.s, p5/M, z18.s, z0.s\n"
- "fmax z19.s, p5/M, z19.s, z0.s\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z1.s }, p5/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rw { z0.s }, p5/Z, [x19]\n"
+ "fmin z7.s, p5/M, z7.s, z0.s\n"
+ "fmin z12.s, p5/M, z12.s, z0.s\n"
+ "fmin z13.s, p5/M, z13.s, z0.s\n"
+ "fmin z14.s, p5/M, z14.s, z0.s\n"
+ "fmin z8.s, p5/M, z8.s, z0.s\n"
+ "fmax z7.s, p5/M, z7.s, z1.s\n"
+ "fmax z12.s, p5/M, z12.s, z1.s\n"
+ "fmax z13.s, p5/M, z13.s, z1.s\n"
+ "fmax z14.s, p5/M, z14.s, z1.s\n"
+ "fmax z8.s, p5/M, z8.s, z1.s\n"
+ "fmin z9.s, p5/M, z9.s, z0.s\n"
+ "fmin z10.s, p5/M, z10.s, z0.s\n"
+ "fmin z11.s, p5/M, z11.s, z0.s\n"
+ "fmin z16.s, p5/M, z16.s, z0.s\n"
+ "fmax z9.s, p5/M, z9.s, z1.s\n"
+ "fmax z10.s, p5/M, z10.s, z1.s\n"
+ "fmax z11.s, p5/M, z11.s, z1.s\n"
+ "fmax z16.s, p5/M, z16.s, z1.s\n"
+ "fmin z17.s, p5/M, z17.s, z0.s\n"
+ "fmin z18.s, p5/M, z18.s, z0.s\n"
+ "fmin z19.s, p5/M, z19.s, z0.s\n"
+ "fmax z17.s, p5/M, z17.s, z1.s\n"
+ "fmax z18.s, p5/M, z18.s, z1.s\n"
+ "fmax z19.s, p5/M, z19.s, z1.s\n"
"38:" // Height 3: No activation
- "st1w { z7.s }, p4, [x9]\n"
- "st1w { z12.s }, p3, [x9, #1, MUL VL]\n"
- "st1w { z13.s }, p2, [x9, #2, MUL VL]\n"
- "st1w { z14.s }, p1, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
- "st1w { z8.s }, p4, [x25]\n"
- "st1w { z9.s }, p3, [x25, #1, MUL VL]\n"
- "st1w { z10.s }, p2, [x25, #2, MUL VL]\n"
- "st1w { z11.s }, p1, [x25, #3, MUL VL]\n"
- "st1w { z16.s }, p4, [x24]\n"
- "st1w { z17.s }, p3, [x24, #1, MUL VL]\n"
- "st1w { z18.s }, p2, [x24, #2, MUL VL]\n"
- "st1w { z19.s }, p1, [x24, #3, MUL VL]\n"
+ "st1w { z7.s }, p4, [x28]\n"
+ "st1w { z12.s }, p3, [x28, #1, MUL VL]\n"
+ "st1w { z13.s }, p2, [x28, #2, MUL VL]\n"
+ "st1w { z14.s }, p1, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "st1w { z8.s }, p4, [x24]\n"
+ "st1w { z9.s }, p3, [x24, #1, MUL VL]\n"
+ "st1w { z10.s }, p2, [x24, #2, MUL VL]\n"
+ "st1w { z11.s }, p1, [x24, #3, MUL VL]\n"
+ "st1w { z16.s }, p4, [x23]\n"
+ "st1w { z17.s }, p3, [x23, #1, MUL VL]\n"
+ "st1w { z18.s }, p2, [x23, #2, MUL VL]\n"
+ "st1w { z19.s }, p1, [x23, #3, MUL VL]\n"
"39:" // Height 3: Writeback done
"decw x11, ALL, MUL #4\n"
"cmp x11, XZR\n"
"bgt 28b\n"
"b 80f\n"
"40:" // Height 4
- "mov x12, %x[bias]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x9, %x[bias]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "mov x28, %x[output_ptr]\n"
"41:" // Height 4: Column loop
- "mov x20, #0x0\n"
- "whilelt p4.s, x20, x11\n"
- "incw x20\n"
- "whilelt p3.s, x20, x11\n"
- "incw x20\n"
- "whilelt p2.s, x20, x11\n"
- "incw x20\n"
- "whilelt p1.s, x20, x11\n"
- "cbz x12, 42f\n"
- "ld1w { z8.s }, p5/Z, [x12]\n"
- "ld1w { z9.s }, p5/Z, [x12, #1, MUL VL]\n"
+ "mov x19, #0x0\n"
+ "whilelt p4.s, x19, x11\n"
+ "incw x19\n"
+ "whilelt p3.s, x19, x11\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x11\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x11\n"
+ "cbz x9, 42f\n"
+ "ld1w { z8.s }, p5/Z, [x9]\n"
"zip2 z12.d, z8.d, z8.d\n"
+ "ld1w { z9.s }, p5/Z, [x9, #1, MUL VL]\n"
"zip1 z8.d, z8.d, z8.d\n"
- "ld1w { z10.s }, p5/Z, [x12, #2, MUL VL]\n"
- "ld1w { z11.s }, p5/Z, [x12, #3, MUL VL]\n"
+ "ld1w { z10.s }, p5/Z, [x9, #2, MUL VL]\n"
+ "mov z16.d, z8.d\n"
+ "ld1w { z11.s }, p5/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"zip2 z13.d, z9.d, z9.d\n"
"zip1 z9.d, z9.d, z9.d\n"
"zip2 z14.d, z10.d, z10.d\n"
"zip1 z10.d, z10.d, z10.d\n"
- "addvl x12, x12, #4\n"
"zip2 z15.d, z11.d, z11.d\n"
"zip1 z11.d, z11.d, z11.d\n"
- "mov z16.d, z8.d\n"
"mov z20.d, z12.d\n"
"mov z17.d, z9.d\n"
"mov z21.d, z13.d\n"
@@ -853,38 +853,38 @@ void sve_hybrid_bf16fp32_mmla_6x4VL (
"b 44f\n"
"42:" // Height 4: no bias
"tbz %x[flags], #0, 43f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "ld1w { z9.s }, p4/Z, [x9]\n"
- "add x23, x24, x20, LSL #2\n"
- "ld1w { z10.s }, p3/Z, [x9, #1, MUL VL]\n"
- "ld1w { z11.s }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1w { z16.s }, p1/Z, [x9, #3, MUL VL]\n"
- "ld1w { z12.s }, p4/Z, [x25]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ld1w { z9.s }, p4/Z, [x28]\n"
+ "add x24, x28, x19, LSL #2\n"
+ "ld1w { z10.s }, p3/Z, [x28, #1, MUL VL]\n"
+ "ld1w { z11.s }, p2/Z, [x28, #2, MUL VL]\n"
+ "add x23, x24, x19, LSL #2\n"
+ "ld1w { z16.s }, p1/Z, [x28, #3, MUL VL]\n"
+ "add x22, x23, x19, LSL #2\n"
+ "ld1w { z12.s }, p4/Z, [x24]\n"
"zip1 z8.d, z9.d, z12.d\n"
+ "ld1w { z13.s }, p3/Z, [x24, #1, MUL VL]\n"
"zip2 z12.d, z9.d, z12.d\n"
- "ld1w { z13.s }, p3/Z, [x25, #1, MUL VL]\n"
- "ld1w { z14.s }, p2/Z, [x25, #2, MUL VL]\n"
+ "ld1w { z14.s }, p2/Z, [x24, #2, MUL VL]\n"
+ "ld1w { z15.s }, p1/Z, [x24, #3, MUL VL]\n"
"zip1 z9.d, z10.d, z13.d\n"
+ "ld1w { z17.s }, p4/Z, [x23]\n"
"zip2 z13.d, z10.d, z13.d\n"
- "ld1w { z15.s }, p1/Z, [x25, #3, MUL VL]\n"
- "ld1w { z17.s }, p4/Z, [x24]\n"
+ "ld1w { z18.s }, p3/Z, [x23, #1, MUL VL]\n"
"zip1 z10.d, z11.d, z14.d\n"
+ "ld1w { z19.s }, p2/Z, [x23, #2, MUL VL]\n"
"zip2 z14.d, z11.d, z14.d\n"
- "ld1w { z18.s }, p3/Z, [x24, #1, MUL VL]\n"
- "ld1w { z19.s }, p2/Z, [x24, #2, MUL VL]\n"
+ "ld1w { z24.s }, p1/Z, [x23, #3, MUL VL]\n"
"zip1 z11.d, z16.d, z15.d\n"
+ "ld1w { z20.s }, p4/Z, [x22]\n"
"zip2 z15.d, z16.d, z15.d\n"
- "ld1w { z24.s }, p1/Z, [x24, #3, MUL VL]\n"
- "ld1w { z20.s }, p4/Z, [x23]\n"
+ "ld1w { z21.s }, p3/Z, [x22, #1, MUL VL]\n"
+ "ld1w { z22.s }, p2/Z, [x22, #2, MUL VL]\n"
"zip1 z16.d, z17.d, z20.d\n"
+ "ld1w { z23.s }, p1/Z, [x22, #3, MUL VL]\n"
"zip2 z20.d, z17.d, z20.d\n"
- "ld1w { z21.s }, p3/Z, [x23, #1, MUL VL]\n"
- "ld1w { z22.s }, p2/Z, [x23, #2, MUL VL]\n"
"zip1 z17.d, z18.d, z21.d\n"
"zip2 z21.d, z18.d, z21.d\n"
- "ld1w { z23.s }, p1/Z, [x23, #3, MUL VL]\n"
"zip1 z18.d, z19.d, z22.d\n"
"zip2 z22.d, z19.d, z22.d\n"
"zip1 z19.d, z24.d, z23.d\n"
@@ -908,62 +908,66 @@ void sve_hybrid_bf16fp32_mmla_6x4VL (
"mov z22.b, #0x0\n"
"mov z23.b, #0x0\n"
"44:" // Height 4: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"45:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 46f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "cbnz x28, 47f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #1\n"
- "add x25, x25, x20, LSL #1\n"
- "add x24, x24, x20, LSL #1\n"
- "add x23, x23, x20, LSL #1\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "cbnz x27, 47f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
+ "add x24, x24, x19, LSL #1\n"
+ "add x23, x23, x19, LSL #1\n"
+ "add x22, x22, x19, LSL #1\n"
"b 47f\n"
"46:" // Height 4: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
- "add x23, x24, x20, LSL #1\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "add x22, x23, x19, LSL #1\n"
"47:" // Height 4: input setup done
- "cmp x27, #0x8\n"
+ "cmp x26, #0x8\n"
"ble 49f\n"
"48:" // Height 4: Multiply loop: Main loop head
- "whilelt p0.h, XZR, x27\n"
- "ld1rqh { z1.h }, p0/Z, [x26]\n"
- "ld1rqh { z2.h }, p0/Z, [x25]\n"
- "trn1 z0.d, z1.d, z2.d\n"
- "ld1rqh { z3.h }, p0/Z, [x24]\n"
- "ld1rqh { z4.h }, p0/Z, [x23]\n"
- "trn2 z1.d, z1.d, z2.d\n"
- "trn1 z2.d, z3.d, z4.d\n"
"ld1h { z7.h }, p5/Z, [x10]\n"
+ "whilelt p0.h, XZR, x26\n"
"ld1h { z6.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "sub x26, x26, #0x8\n"
+ "ld1rqh { z1.h }, p0/Z, [x25]\n"
+ "cmp x26, #0x8\n"
+ "ld1rqh { z2.h }, p0/Z, [x24]\n"
+ "trn1 z0.d, z1.d, z2.d\n"
+ "ld1rqh { z3.h }, p0/Z, [x23]\n"
+ "add x25, x25, #0x10\n"
+ "trn2 z1.d, z1.d, z2.d\n"
+ "ld1rqh { z4.h }, p0/Z, [x22]\n"
+ "add x24, x24, #0x10\n"
".inst 0x6467e408 // bfmmla z8.s, z0.h, z7.h\n"
- ".inst 0x6467e450 // bfmmla z16.s, z2.h, z7.h\n"
+ "add x23, x23, #0x10\n"
".inst 0x6466e40c // bfmmla z12.s, z0.h, z6.h\n"
- ".inst 0x6466e454 // bfmmla z20.s, z2.h, z6.h\n"
+ "add x22, x22, #0x10\n"
+ "trn1 z2.d, z3.d, z4.d\n"
+ "trn2 z3.d, z3.d, z4.d\n"
+ ".inst 0x6467e450 // bfmmla z16.s, z2.h, z7.h\n"
"ld1h { z7.h }, p5/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x6466e454 // bfmmla z20.s, z2.h, z6.h\n"
"ld1h { z6.h }, p5/Z, [x10, #3, MUL VL]\n"
".inst 0x6467e409 // bfmmla z9.s, z0.h, z7.h\n"
".inst 0x6467e451 // bfmmla z17.s, z2.h, z7.h\n"
"ld1h { z7.h }, p5/Z, [x10, #4, MUL VL]\n"
- "trn2 z3.d, z3.d, z4.d\n"
".inst 0x6466e40d // bfmmla z13.s, z0.h, z6.h\n"
".inst 0x6466e455 // bfmmla z21.s, z2.h, z6.h\n"
"ld1h { z6.h }, p5/Z, [x10, #5, MUL VL]\n"
- "sub x27, x27, #0x8\n"
".inst 0x6467e40a // bfmmla z10.s, z0.h, z7.h\n"
".inst 0x6467e452 // bfmmla z18.s, z2.h, z7.h\n"
"ld1h { z7.h }, p5/Z, [x10, #6, MUL VL]\n"
- "cmp x27, #0x8\n"
".inst 0x6466e40e // bfmmla z14.s, z0.h, z6.h\n"
".inst 0x6466e456 // bfmmla z22.s, z2.h, z6.h\n"
"ld1h { z6.h }, p5/Z, [x10, #7, MUL VL]\n"
@@ -971,19 +975,15 @@ void sve_hybrid_bf16fp32_mmla_6x4VL (
".inst 0x6467e40b // bfmmla z11.s, z0.h, z7.h\n"
".inst 0x6467e453 // bfmmla z19.s, z2.h, z7.h\n"
"ld1h { z7.h }, p5/Z, [x10, #-8, MUL VL]\n"
- "add x26, x26, #0x10\n"
".inst 0x6466e40f // bfmmla z15.s, z0.h, z6.h\n"
".inst 0x6466e457 // bfmmla z23.s, z2.h, z6.h\n"
"ld1h { z6.h }, p5/Z, [x10, #-7, MUL VL]\n"
- "add x25, x25, #0x10\n"
".inst 0x6467e428 // bfmmla z8.s, z1.h, z7.h\n"
".inst 0x6467e470 // bfmmla z16.s, z3.h, z7.h\n"
"ld1h { z7.h }, p5/Z, [x10, #-6, MUL VL]\n"
- "add x24, x24, #0x10\n"
".inst 0x6466e42c // bfmmla z12.s, z1.h, z6.h\n"
".inst 0x6466e474 // bfmmla z20.s, z3.h, z6.h\n"
"ld1h { z6.h }, p5/Z, [x10, #-5, MUL VL]\n"
- "add x23, x23, #0x10\n"
".inst 0x6467e429 // bfmmla z9.s, z1.h, z7.h\n"
".inst 0x6467e471 // bfmmla z17.s, z3.h, z7.h\n"
"ld1h { z7.h }, p5/Z, [x10, #-4, MUL VL]\n"
@@ -1002,30 +1002,30 @@ void sve_hybrid_bf16fp32_mmla_6x4VL (
".inst 0x6466e477 // bfmmla z23.s, z3.h, z6.h\n"
"bgt 48b\n"
"49:" // Height 4: Multiply loop: Single iteration only
- "whilelt p0.h, XZR, x27\n"
- "ld1rqh { z1.h }, p0/Z, [x26]\n"
- "ld1rqh { z2.h }, p0/Z, [x25]\n"
- "trn1 z0.d, z1.d, z2.d\n"
- "ld1rqh { z3.h }, p0/Z, [x24]\n"
- "ld1rqh { z4.h }, p0/Z, [x23]\n"
- "trn2 z1.d, z1.d, z2.d\n"
- "trn1 z2.d, z3.d, z4.d\n"
"ld1h { z7.h }, p5/Z, [x10]\n"
+ "whilelt p0.h, XZR, x26\n"
"ld1h { z6.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x4\n"
+ "ld1rqh { z1.h }, p0/Z, [x25]\n"
+ "ld1rqh { z2.h }, p0/Z, [x24]\n"
+ "trn1 z0.d, z1.d, z2.d\n"
+ "ld1rqh { z3.h }, p0/Z, [x23]\n"
+ "trn2 z1.d, z1.d, z2.d\n"
+ "ld1rqh { z4.h }, p0/Z, [x22]\n"
".inst 0x6467e408 // bfmmla z8.s, z0.h, z7.h\n"
- ".inst 0x6467e450 // bfmmla z16.s, z2.h, z7.h\n"
".inst 0x6466e40c // bfmmla z12.s, z0.h, z6.h\n"
- ".inst 0x6466e454 // bfmmla z20.s, z2.h, z6.h\n"
+ "trn1 z2.d, z3.d, z4.d\n"
+ "trn2 z3.d, z3.d, z4.d\n"
+ ".inst 0x6467e450 // bfmmla z16.s, z2.h, z7.h\n"
"ld1h { z7.h }, p5/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x6466e454 // bfmmla z20.s, z2.h, z6.h\n"
"ld1h { z6.h }, p5/Z, [x10, #3, MUL VL]\n"
".inst 0x6467e409 // bfmmla z9.s, z0.h, z7.h\n"
".inst 0x6467e451 // bfmmla z17.s, z2.h, z7.h\n"
"ld1h { z7.h }, p5/Z, [x10, #4, MUL VL]\n"
- "subs x27, x27, #0x4\n"
".inst 0x6466e40d // bfmmla z13.s, z0.h, z6.h\n"
".inst 0x6466e455 // bfmmla z21.s, z2.h, z6.h\n"
"ld1h { z6.h }, p5/Z, [x10, #5, MUL VL]\n"
- "trn2 z3.d, z3.d, z4.d\n"
".inst 0x6467e40a // bfmmla z10.s, z0.h, z7.h\n"
".inst 0x6467e452 // bfmmla z18.s, z2.h, z7.h\n"
"ld1h { z7.h }, p5/Z, [x10, #6, MUL VL]\n"
@@ -1039,12 +1039,12 @@ void sve_hybrid_bf16fp32_mmla_6x4VL (
".inst 0x6466e457 // bfmmla z23.s, z2.h, z6.h\n"
"ble 50f\n"
"ld1h { z7.h }, p5/Z, [x10]\n"
- "ld1h { z6.h }, p5/Z, [x10, #1, MUL VL]\n"
".inst 0x6467e428 // bfmmla z8.s, z1.h, z7.h\n"
+ "ld1h { z6.h }, p5/Z, [x10, #1, MUL VL]\n"
".inst 0x6467e470 // bfmmla z16.s, z3.h, z7.h\n"
+ "ld1h { z7.h }, p5/Z, [x10, #2, MUL VL]\n"
".inst 0x6466e42c // bfmmla z12.s, z1.h, z6.h\n"
".inst 0x6466e474 // bfmmla z20.s, z3.h, z6.h\n"
- "ld1h { z7.h }, p5/Z, [x10, #2, MUL VL]\n"
"ld1h { z6.h }, p5/Z, [x10, #3, MUL VL]\n"
".inst 0x6467e429 // bfmmla z9.s, z1.h, z7.h\n"
".inst 0x6467e471 // bfmmla z17.s, z3.h, z7.h\n"
@@ -1064,19 +1064,19 @@ void sve_hybrid_bf16fp32_mmla_6x4VL (
".inst 0x6466e42f // bfmmla z15.s, z1.h, z6.h\n"
".inst 0x6466e477 // bfmmla z23.s, z3.h, z6.h\n"
"50:" // Height 4: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 45b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
"uzp1 z7.d, z8.d, z12.d\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp2 z8.d, z8.d, z12.d\n"
+ "add x24, x28, x19, LSL #2\n"
"uzp1 z12.d, z9.d, z13.d\n"
- "add x23, x24, x20, LSL #2\n"
"uzp2 z9.d, z9.d, z13.d\n"
+ "add x23, x24, x19, LSL #2\n"
"uzp1 z13.d, z10.d, z14.d\n"
+ "add x22, x23, x19, LSL #2\n"
"uzp2 z10.d, z10.d, z14.d\n"
"uzp1 z14.d, z11.d, z15.d\n"
"uzp2 z11.d, z11.d, z15.d\n"
@@ -1089,94 +1089,94 @@ void sve_hybrid_bf16fp32_mmla_6x4VL (
"uzp1 z22.d, z19.d, z23.d\n"
"uzp2 z19.d, z19.d, z23.d\n"
"tbz %x[flags], #1, 51f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z1.s }, p5/Z, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rw { z0.s }, p5/Z, [x20]\n"
- "fmin z7.s, p5/M, z7.s, z1.s\n"
- "fmin z12.s, p5/M, z12.s, z1.s\n"
- "fmin z13.s, p5/M, z13.s, z1.s\n"
- "fmin z14.s, p5/M, z14.s, z1.s\n"
- "fmin z8.s, p5/M, z8.s, z1.s\n"
- "fmin z9.s, p5/M, z9.s, z1.s\n"
- "fmin z10.s, p5/M, z10.s, z1.s\n"
- "fmin z11.s, p5/M, z11.s, z1.s\n"
- "fmin z15.s, p5/M, z15.s, z1.s\n"
- "fmin z20.s, p5/M, z20.s, z1.s\n"
- "fmin z21.s, p5/M, z21.s, z1.s\n"
- "fmin z22.s, p5/M, z22.s, z1.s\n"
- "fmin z16.s, p5/M, z16.s, z1.s\n"
- "fmin z17.s, p5/M, z17.s, z1.s\n"
- "fmin z18.s, p5/M, z18.s, z1.s\n"
- "fmin z19.s, p5/M, z19.s, z1.s\n"
- "fmax z7.s, p5/M, z7.s, z0.s\n"
- "fmax z12.s, p5/M, z12.s, z0.s\n"
- "fmax z13.s, p5/M, z13.s, z0.s\n"
- "fmax z14.s, p5/M, z14.s, z0.s\n"
- "fmax z8.s, p5/M, z8.s, z0.s\n"
- "fmax z9.s, p5/M, z9.s, z0.s\n"
- "fmax z10.s, p5/M, z10.s, z0.s\n"
- "fmax z11.s, p5/M, z11.s, z0.s\n"
- "fmax z15.s, p5/M, z15.s, z0.s\n"
- "fmax z20.s, p5/M, z20.s, z0.s\n"
- "fmax z21.s, p5/M, z21.s, z0.s\n"
- "fmax z22.s, p5/M, z22.s, z0.s\n"
- "fmax z16.s, p5/M, z16.s, z0.s\n"
- "fmax z17.s, p5/M, z17.s, z0.s\n"
- "fmax z18.s, p5/M, z18.s, z0.s\n"
- "fmax z19.s, p5/M, z19.s, z0.s\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z1.s }, p5/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rw { z0.s }, p5/Z, [x19]\n"
+ "fmin z7.s, p5/M, z7.s, z0.s\n"
+ "fmin z12.s, p5/M, z12.s, z0.s\n"
+ "fmin z13.s, p5/M, z13.s, z0.s\n"
+ "fmin z14.s, p5/M, z14.s, z0.s\n"
+ "fmin z8.s, p5/M, z8.s, z0.s\n"
+ "fmax z7.s, p5/M, z7.s, z1.s\n"
+ "fmax z12.s, p5/M, z12.s, z1.s\n"
+ "fmax z13.s, p5/M, z13.s, z1.s\n"
+ "fmax z14.s, p5/M, z14.s, z1.s\n"
+ "fmax z8.s, p5/M, z8.s, z1.s\n"
+ "fmin z9.s, p5/M, z9.s, z0.s\n"
+ "fmin z10.s, p5/M, z10.s, z0.s\n"
+ "fmin z11.s, p5/M, z11.s, z0.s\n"
+ "fmin z15.s, p5/M, z15.s, z0.s\n"
+ "fmax z9.s, p5/M, z9.s, z1.s\n"
+ "fmax z10.s, p5/M, z10.s, z1.s\n"
+ "fmax z11.s, p5/M, z11.s, z1.s\n"
+ "fmax z15.s, p5/M, z15.s, z1.s\n"
+ "fmin z20.s, p5/M, z20.s, z0.s\n"
+ "fmin z21.s, p5/M, z21.s, z0.s\n"
+ "fmin z22.s, p5/M, z22.s, z0.s\n"
+ "fmin z16.s, p5/M, z16.s, z0.s\n"
+ "fmax z20.s, p5/M, z20.s, z1.s\n"
+ "fmax z21.s, p5/M, z21.s, z1.s\n"
+ "fmax z22.s, p5/M, z22.s, z1.s\n"
+ "fmax z16.s, p5/M, z16.s, z1.s\n"
+ "fmin z17.s, p5/M, z17.s, z0.s\n"
+ "fmin z18.s, p5/M, z18.s, z0.s\n"
+ "fmin z19.s, p5/M, z19.s, z0.s\n"
+ "fmax z17.s, p5/M, z17.s, z1.s\n"
+ "fmax z18.s, p5/M, z18.s, z1.s\n"
+ "fmax z19.s, p5/M, z19.s, z1.s\n"
"51:" // Height 4: No activation
- "st1w { z7.s }, p4, [x9]\n"
- "st1w { z12.s }, p3, [x9, #1, MUL VL]\n"
- "st1w { z13.s }, p2, [x9, #2, MUL VL]\n"
- "st1w { z14.s }, p1, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
- "st1w { z8.s }, p4, [x25]\n"
- "st1w { z9.s }, p3, [x25, #1, MUL VL]\n"
- "st1w { z10.s }, p2, [x25, #2, MUL VL]\n"
- "st1w { z11.s }, p1, [x25, #3, MUL VL]\n"
- "st1w { z15.s }, p4, [x24]\n"
- "st1w { z20.s }, p3, [x24, #1, MUL VL]\n"
- "st1w { z21.s }, p2, [x24, #2, MUL VL]\n"
- "st1w { z22.s }, p1, [x24, #3, MUL VL]\n"
- "st1w { z16.s }, p4, [x23]\n"
- "st1w { z17.s }, p3, [x23, #1, MUL VL]\n"
- "st1w { z18.s }, p2, [x23, #2, MUL VL]\n"
- "st1w { z19.s }, p1, [x23, #3, MUL VL]\n"
+ "st1w { z7.s }, p4, [x28]\n"
+ "st1w { z12.s }, p3, [x28, #1, MUL VL]\n"
+ "st1w { z13.s }, p2, [x28, #2, MUL VL]\n"
+ "st1w { z14.s }, p1, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "st1w { z8.s }, p4, [x24]\n"
+ "st1w { z9.s }, p3, [x24, #1, MUL VL]\n"
+ "st1w { z10.s }, p2, [x24, #2, MUL VL]\n"
+ "st1w { z11.s }, p1, [x24, #3, MUL VL]\n"
+ "st1w { z15.s }, p4, [x23]\n"
+ "st1w { z20.s }, p3, [x23, #1, MUL VL]\n"
+ "st1w { z21.s }, p2, [x23, #2, MUL VL]\n"
+ "st1w { z22.s }, p1, [x23, #3, MUL VL]\n"
+ "st1w { z16.s }, p4, [x22]\n"
+ "st1w { z17.s }, p3, [x22, #1, MUL VL]\n"
+ "st1w { z18.s }, p2, [x22, #2, MUL VL]\n"
+ "st1w { z19.s }, p1, [x22, #3, MUL VL]\n"
"52:" // Height 4: Writeback done
"decw x11, ALL, MUL #4\n"
"cmp x11, XZR\n"
"bgt 41b\n"
"b 80f\n"
"53:" // Height 5
- "mov x12, %x[bias]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x9, %x[bias]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "mov x28, %x[output_ptr]\n"
"54:" // Height 5: Column loop
- "mov x20, #0x0\n"
- "whilelt p4.s, x20, x11\n"
- "incw x20\n"
- "whilelt p3.s, x20, x11\n"
- "incw x20\n"
- "whilelt p2.s, x20, x11\n"
- "incw x20\n"
- "whilelt p1.s, x20, x11\n"
- "cbz x12, 55f\n"
- "ld1w { z8.s }, p5/Z, [x12]\n"
- "ld1w { z9.s }, p5/Z, [x12, #1, MUL VL]\n"
+ "mov x19, #0x0\n"
+ "whilelt p4.s, x19, x11\n"
+ "incw x19\n"
+ "whilelt p3.s, x19, x11\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x11\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x11\n"
+ "cbz x9, 55f\n"
+ "ld1w { z8.s }, p5/Z, [x9]\n"
"zip2 z12.d, z8.d, z8.d\n"
+ "ld1w { z9.s }, p5/Z, [x9, #1, MUL VL]\n"
"zip1 z8.d, z8.d, z8.d\n"
- "ld1w { z10.s }, p5/Z, [x12, #2, MUL VL]\n"
- "ld1w { z11.s }, p5/Z, [x12, #3, MUL VL]\n"
+ "ld1w { z10.s }, p5/Z, [x9, #2, MUL VL]\n"
+ "mov z16.d, z8.d\n"
+ "ld1w { z11.s }, p5/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"zip2 z13.d, z9.d, z9.d\n"
"zip1 z9.d, z9.d, z9.d\n"
"zip2 z14.d, z10.d, z10.d\n"
"zip1 z10.d, z10.d, z10.d\n"
- "addvl x12, x12, #4\n"
"zip2 z15.d, z11.d, z11.d\n"
"zip1 z11.d, z11.d, z11.d\n"
- "mov z16.d, z8.d\n"
"mov z20.d, z12.d\n"
"mov z17.d, z9.d\n"
"mov z21.d, z13.d\n"
@@ -1195,47 +1195,47 @@ void sve_hybrid_bf16fp32_mmla_6x4VL (
"b 57f\n"
"55:" // Height 5: no bias
"tbz %x[flags], #0, 56f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "ld1w { z9.s }, p4/Z, [x9]\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
- "ld1w { z10.s }, p3/Z, [x9, #1, MUL VL]\n"
- "ld1w { z11.s }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1w { z16.s }, p1/Z, [x9, #3, MUL VL]\n"
- "ld1w { z12.s }, p4/Z, [x25]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ld1w { z9.s }, p4/Z, [x28]\n"
+ "add x24, x28, x19, LSL #2\n"
+ "ld1w { z10.s }, p3/Z, [x28, #1, MUL VL]\n"
+ "ld1w { z11.s }, p2/Z, [x28, #2, MUL VL]\n"
+ "add x23, x24, x19, LSL #2\n"
+ "ld1w { z16.s }, p1/Z, [x28, #3, MUL VL]\n"
+ "add x22, x23, x19, LSL #2\n"
+ "ld1w { z12.s }, p4/Z, [x24]\n"
"zip1 z8.d, z9.d, z12.d\n"
+ "ld1w { z13.s }, p3/Z, [x24, #1, MUL VL]\n"
+ "add x21, x22, x19, LSL #2\n"
"zip2 z12.d, z9.d, z12.d\n"
- "ld1w { z13.s }, p3/Z, [x25, #1, MUL VL]\n"
- "ld1w { z14.s }, p2/Z, [x25, #2, MUL VL]\n"
+ "ld1w { z14.s }, p2/Z, [x24, #2, MUL VL]\n"
+ "ld1w { z15.s }, p1/Z, [x24, #3, MUL VL]\n"
"zip1 z9.d, z10.d, z13.d\n"
+ "ld1w { z17.s }, p4/Z, [x23]\n"
"zip2 z13.d, z10.d, z13.d\n"
- "ld1w { z15.s }, p1/Z, [x25, #3, MUL VL]\n"
- "ld1w { z17.s }, p4/Z, [x24]\n"
+ "ld1w { z18.s }, p3/Z, [x23, #1, MUL VL]\n"
"zip1 z10.d, z11.d, z14.d\n"
+ "ld1w { z19.s }, p2/Z, [x23, #2, MUL VL]\n"
"zip2 z14.d, z11.d, z14.d\n"
- "ld1w { z18.s }, p3/Z, [x24, #1, MUL VL]\n"
- "ld1w { z19.s }, p2/Z, [x24, #2, MUL VL]\n"
+ "ld1w { z24.s }, p1/Z, [x23, #3, MUL VL]\n"
"zip1 z11.d, z16.d, z15.d\n"
+ "ld1w { z20.s }, p4/Z, [x22]\n"
"zip2 z15.d, z16.d, z15.d\n"
- "ld1w { z24.s }, p1/Z, [x24, #3, MUL VL]\n"
- "ld1w { z20.s }, p4/Z, [x23]\n"
+ "ld1w { z21.s }, p3/Z, [x22, #1, MUL VL]\n"
+ "ld1w { z22.s }, p2/Z, [x22, #2, MUL VL]\n"
"zip1 z16.d, z17.d, z20.d\n"
+ "ld1w { z23.s }, p1/Z, [x22, #3, MUL VL]\n"
"zip2 z20.d, z17.d, z20.d\n"
- "ld1w { z21.s }, p3/Z, [x23, #1, MUL VL]\n"
- "ld1w { z22.s }, p2/Z, [x23, #2, MUL VL]\n"
+ "ld1w { z25.s }, p4/Z, [x21]\n"
"zip1 z17.d, z18.d, z21.d\n"
+ "ld1w { z26.s }, p3/Z, [x21, #1, MUL VL]\n"
"zip2 z21.d, z18.d, z21.d\n"
- "ld1w { z23.s }, p1/Z, [x23, #3, MUL VL]\n"
- "ld1w { z25.s }, p4/Z, [x22]\n"
+ "ld1w { z27.s }, p2/Z, [x21, #2, MUL VL]\n"
"zip1 z18.d, z19.d, z22.d\n"
+ "ld1w { z6.s }, p1/Z, [x21, #3, MUL VL]\n"
"zip2 z22.d, z19.d, z22.d\n"
- "ld1w { z26.s }, p3/Z, [x22, #1, MUL VL]\n"
- "ld1w { z27.s }, p2/Z, [x22, #2, MUL VL]\n"
"zip1 z19.d, z24.d, z23.d\n"
"zip2 z23.d, z24.d, z23.d\n"
- "ld1w { z6.s }, p1/Z, [x22, #3, MUL VL]\n"
"zip1 z24.d, z25.d, z28.d\n"
"zip2 z28.d, z25.d, z28.d\n"
"zip1 z25.d, z26.d, z29.d\n"
@@ -1271,72 +1271,72 @@ void sve_hybrid_bf16fp32_mmla_6x4VL (
"mov z30.b, #0x0\n"
"mov z31.b, #0x0\n"
"57:" // Height 5: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"58:" // Height 5: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 59f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "ldr x22, [x21, #0x20]\n"
- "cbnz x28, 60f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #1\n"
- "add x25, x25, x20, LSL #1\n"
- "add x24, x24, x20, LSL #1\n"
- "add x23, x23, x20, LSL #1\n"
- "add x22, x22, x20, LSL #1\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "ldr x21, [x20, #0x20]\n"
+ "cbnz x27, 60f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
+ "add x24, x24, x19, LSL #1\n"
+ "add x23, x23, x19, LSL #1\n"
+ "add x22, x22, x19, LSL #1\n"
+ "add x21, x21, x19, LSL #1\n"
"b 60f\n"
"59:" // Height 5: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
- "add x23, x24, x20, LSL #1\n"
- "add x22, x23, x20, LSL #1\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "add x22, x23, x19, LSL #1\n"
+ "add x21, x22, x19, LSL #1\n"
"60:" // Height 5: input setup done
- "cmp x27, #0x8\n"
+ "cmp x26, #0x8\n"
"ble 62f\n"
"61:" // Height 5: Multiply loop: Main loop head
- "whilelt p0.h, XZR, x27\n"
- "ld1rqh { z1.h }, p0/Z, [x26]\n"
- "ld1rqh { z2.h }, p0/Z, [x25]\n"
- "ld1rqh { z3.h }, p0/Z, [x24]\n"
- "ld1rqh { z4.h }, p0/Z, [x23]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
+ "whilelt p0.h, XZR, x26\n"
+ "ld1rqh { z1.h }, p0/Z, [x25]\n"
+ "ld1rqh { z2.h }, p0/Z, [x24]\n"
"trn1 z0.d, z1.d, z2.d\n"
+ "ld1rqh { z3.h }, p0/Z, [x23]\n"
+ "sub x26, x26, #0x8\n"
"trn2 z1.d, z1.d, z2.d\n"
- "ld1rqh { z5.h }, p0/Z, [x22]\n"
+ "ld1rqh { z4.h }, p0/Z, [x22]\n"
+ "cmp x26, #0x8\n"
+ ".inst 0x6467e408 // bfmmla z8.s, z0.h, z7.h\n"
+ "ld1rqh { z5.h }, p0/Z, [x21]\n"
+ "add x25, x25, #0x10\n"
"trn1 z2.d, z3.d, z4.d\n"
+ "add x24, x24, #0x10\n"
"trn2 z3.d, z3.d, z4.d\n"
- "ld1h { z7.h }, p5/Z, [x10]\n"
+ "add x23, x23, #0x10\n"
"trn1 z4.d, z5.d, z6.d\n"
+ "add x22, x22, #0x10\n"
"trn2 z5.d, z5.d, z6.d\n"
"ld1h { z6.h }, p5/Z, [x10, #1, MUL VL]\n"
- ".inst 0x6467e408 // bfmmla z8.s, z0.h, z7.h\n"
+ "add x21, x21, #0x10\n"
".inst 0x6467e450 // bfmmla z16.s, z2.h, z7.h\n"
".inst 0x6467e498 // bfmmla z24.s, z4.h, z7.h\n"
"ld1h { z7.h }, p5/Z, [x10, #2, MUL VL]\n"
- "sub x27, x27, #0x8\n"
".inst 0x6466e40c // bfmmla z12.s, z0.h, z6.h\n"
".inst 0x6466e454 // bfmmla z20.s, z2.h, z6.h\n"
- "cmp x27, #0x8\n"
- "add x26, x26, #0x10\n"
".inst 0x6466e49c // bfmmla z28.s, z4.h, z6.h\n"
"ld1h { z6.h }, p5/Z, [x10, #3, MUL VL]\n"
".inst 0x6467e409 // bfmmla z9.s, z0.h, z7.h\n"
- "add x25, x25, #0x10\n"
".inst 0x6467e451 // bfmmla z17.s, z2.h, z7.h\n"
".inst 0x6467e499 // bfmmla z25.s, z4.h, z7.h\n"
"ld1h { z7.h }, p5/Z, [x10, #4, MUL VL]\n"
- "add x24, x24, #0x10\n"
".inst 0x6466e40d // bfmmla z13.s, z0.h, z6.h\n"
".inst 0x6466e455 // bfmmla z21.s, z2.h, z6.h\n"
- "add x23, x23, #0x10\n"
- "add x22, x22, #0x10\n"
".inst 0x6466e49d // bfmmla z29.s, z4.h, z6.h\n"
"ld1h { z6.h }, p5/Z, [x10, #5, MUL VL]\n"
".inst 0x6467e40a // bfmmla z10.s, z0.h, z7.h\n"
@@ -1388,25 +1388,25 @@ void sve_hybrid_bf16fp32_mmla_6x4VL (
".inst 0x6466e4bf // bfmmla z31.s, z5.h, z6.h\n"
"bgt 61b\n"
"62:" // Height 5: Multiply loop: Single iteration only
- "whilelt p0.h, XZR, x27\n"
- "ld1rqh { z1.h }, p0/Z, [x26]\n"
- "ld1rqh { z2.h }, p0/Z, [x25]\n"
- "ld1rqh { z3.h }, p0/Z, [x24]\n"
- "ld1rqh { z4.h }, p0/Z, [x23]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
+ "whilelt p0.h, XZR, x26\n"
+ "ld1rqh { z1.h }, p0/Z, [x25]\n"
+ "ld1rqh { z2.h }, p0/Z, [x24]\n"
"trn1 z0.d, z1.d, z2.d\n"
+ "ld1rqh { z3.h }, p0/Z, [x23]\n"
+ "subs x26, x26, #0x4\n"
"trn2 z1.d, z1.d, z2.d\n"
- "ld1rqh { z5.h }, p0/Z, [x22]\n"
+ "ld1rqh { z4.h }, p0/Z, [x22]\n"
+ "ld1rqh { z5.h }, p0/Z, [x21]\n"
+ ".inst 0x6467e408 // bfmmla z8.s, z0.h, z7.h\n"
"trn1 z2.d, z3.d, z4.d\n"
"trn2 z3.d, z3.d, z4.d\n"
- "ld1h { z7.h }, p5/Z, [x10]\n"
"trn1 z4.d, z5.d, z6.d\n"
"trn2 z5.d, z5.d, z6.d\n"
"ld1h { z6.h }, p5/Z, [x10, #1, MUL VL]\n"
- ".inst 0x6467e408 // bfmmla z8.s, z0.h, z7.h\n"
".inst 0x6467e450 // bfmmla z16.s, z2.h, z7.h\n"
".inst 0x6467e498 // bfmmla z24.s, z4.h, z7.h\n"
"ld1h { z7.h }, p5/Z, [x10, #2, MUL VL]\n"
- "subs x27, x27, #0x4\n"
".inst 0x6466e40c // bfmmla z12.s, z0.h, z6.h\n"
".inst 0x6466e454 // bfmmla z20.s, z2.h, z6.h\n"
".inst 0x6466e49c // bfmmla z28.s, z4.h, z6.h\n"
@@ -1427,8 +1427,8 @@ void sve_hybrid_bf16fp32_mmla_6x4VL (
".inst 0x6466e456 // bfmmla z22.s, z2.h, z6.h\n"
".inst 0x6466e49e // bfmmla z30.s, z4.h, z6.h\n"
"ld1h { z6.h }, p5/Z, [x10, #7, MUL VL]\n"
- ".inst 0x6467e40b // bfmmla z11.s, z0.h, z7.h\n"
"addvl x10, x10, #8\n"
+ ".inst 0x6467e40b // bfmmla z11.s, z0.h, z7.h\n"
".inst 0x6467e453 // bfmmla z19.s, z2.h, z7.h\n"
".inst 0x6467e49b // bfmmla z27.s, z4.h, z7.h\n"
".inst 0x6466e40f // bfmmla z15.s, z0.h, z6.h\n"
@@ -1436,28 +1436,28 @@ void sve_hybrid_bf16fp32_mmla_6x4VL (
".inst 0x6466e49f // bfmmla z31.s, z4.h, z6.h\n"
"ble 63f\n"
"ld1h { z7.h }, p5/Z, [x10]\n"
- "ld1h { z6.h }, p5/Z, [x10, #1, MUL VL]\n"
".inst 0x6467e428 // bfmmla z8.s, z1.h, z7.h\n"
+ "ld1h { z6.h }, p5/Z, [x10, #1, MUL VL]\n"
".inst 0x6467e470 // bfmmla z16.s, z3.h, z7.h\n"
".inst 0x6467e4b8 // bfmmla z24.s, z5.h, z7.h\n"
- ".inst 0x6466e42c // bfmmla z12.s, z1.h, z6.h\n"
"ld1h { z7.h }, p5/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x6466e42c // bfmmla z12.s, z1.h, z6.h\n"
".inst 0x6466e474 // bfmmla z20.s, z3.h, z6.h\n"
".inst 0x6466e4bc // bfmmla z28.s, z5.h, z6.h\n"
"ld1h { z6.h }, p5/Z, [x10, #3, MUL VL]\n"
".inst 0x6467e429 // bfmmla z9.s, z1.h, z7.h\n"
".inst 0x6467e471 // bfmmla z17.s, z3.h, z7.h\n"
".inst 0x6467e4b9 // bfmmla z25.s, z5.h, z7.h\n"
- ".inst 0x6466e42d // bfmmla z13.s, z1.h, z6.h\n"
"ld1h { z7.h }, p5/Z, [x10, #4, MUL VL]\n"
+ ".inst 0x6466e42d // bfmmla z13.s, z1.h, z6.h\n"
".inst 0x6466e475 // bfmmla z21.s, z3.h, z6.h\n"
".inst 0x6466e4bd // bfmmla z29.s, z5.h, z6.h\n"
"ld1h { z6.h }, p5/Z, [x10, #5, MUL VL]\n"
".inst 0x6467e42a // bfmmla z10.s, z1.h, z7.h\n"
".inst 0x6467e472 // bfmmla z18.s, z3.h, z7.h\n"
".inst 0x6467e4ba // bfmmla z26.s, z5.h, z7.h\n"
- ".inst 0x6466e42e // bfmmla z14.s, z1.h, z6.h\n"
"ld1h { z7.h }, p5/Z, [x10, #6, MUL VL]\n"
+ ".inst 0x6466e42e // bfmmla z14.s, z1.h, z6.h\n"
".inst 0x6466e476 // bfmmla z22.s, z3.h, z6.h\n"
".inst 0x6466e4be // bfmmla z30.s, z5.h, z6.h\n"
"ld1h { z6.h }, p5/Z, [x10, #7, MUL VL]\n"
@@ -1469,21 +1469,21 @@ void sve_hybrid_bf16fp32_mmla_6x4VL (
".inst 0x6466e477 // bfmmla z23.s, z3.h, z6.h\n"
".inst 0x6466e4bf // bfmmla z31.s, z5.h, z6.h\n"
"63:" // Height 5: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 58b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
"uzp1 z7.d, z8.d, z12.d\n"
- "add x23, x24, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp2 z8.d, z8.d, z12.d\n"
+ "add x24, x28, x19, LSL #2\n"
"uzp1 z12.d, z9.d, z13.d\n"
- "add x22, x23, x20, LSL #2\n"
"uzp2 z9.d, z9.d, z13.d\n"
+ "add x23, x24, x19, LSL #2\n"
"uzp1 z13.d, z10.d, z14.d\n"
+ "add x22, x23, x19, LSL #2\n"
"uzp2 z10.d, z10.d, z14.d\n"
+ "add x21, x22, x19, LSL #2\n"
"uzp1 z14.d, z11.d, z15.d\n"
"uzp2 z11.d, z11.d, z15.d\n"
"uzp1 z15.d, z16.d, z20.d\n"
@@ -1499,109 +1499,109 @@ void sve_hybrid_bf16fp32_mmla_6x4VL (
"uzp1 z26.d, z26.d, z30.d\n"
"uzp1 z27.d, z27.d, z31.d\n"
"tbz %x[flags], #1, 64f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z1.s }, p5/Z, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rw { z0.s }, p5/Z, [x20]\n"
- "fmin z7.s, p5/M, z7.s, z1.s\n"
- "fmin z12.s, p5/M, z12.s, z1.s\n"
- "fmin z13.s, p5/M, z13.s, z1.s\n"
- "fmin z14.s, p5/M, z14.s, z1.s\n"
- "fmin z8.s, p5/M, z8.s, z1.s\n"
- "fmin z9.s, p5/M, z9.s, z1.s\n"
- "fmin z10.s, p5/M, z10.s, z1.s\n"
- "fmin z11.s, p5/M, z11.s, z1.s\n"
- "fmin z15.s, p5/M, z15.s, z1.s\n"
- "fmin z20.s, p5/M, z20.s, z1.s\n"
- "fmin z21.s, p5/M, z21.s, z1.s\n"
- "fmin z22.s, p5/M, z22.s, z1.s\n"
- "fmin z16.s, p5/M, z16.s, z1.s\n"
- "fmin z17.s, p5/M, z17.s, z1.s\n"
- "fmin z18.s, p5/M, z18.s, z1.s\n"
- "fmin z19.s, p5/M, z19.s, z1.s\n"
- "fmin z24.s, p5/M, z24.s, z1.s\n"
- "fmin z25.s, p5/M, z25.s, z1.s\n"
- "fmin z26.s, p5/M, z26.s, z1.s\n"
- "fmin z27.s, p5/M, z27.s, z1.s\n"
- "fmax z7.s, p5/M, z7.s, z0.s\n"
- "fmax z12.s, p5/M, z12.s, z0.s\n"
- "fmax z13.s, p5/M, z13.s, z0.s\n"
- "fmax z14.s, p5/M, z14.s, z0.s\n"
- "fmax z8.s, p5/M, z8.s, z0.s\n"
- "fmax z9.s, p5/M, z9.s, z0.s\n"
- "fmax z10.s, p5/M, z10.s, z0.s\n"
- "fmax z11.s, p5/M, z11.s, z0.s\n"
- "fmax z15.s, p5/M, z15.s, z0.s\n"
- "fmax z20.s, p5/M, z20.s, z0.s\n"
- "fmax z21.s, p5/M, z21.s, z0.s\n"
- "fmax z22.s, p5/M, z22.s, z0.s\n"
- "fmax z16.s, p5/M, z16.s, z0.s\n"
- "fmax z17.s, p5/M, z17.s, z0.s\n"
- "fmax z18.s, p5/M, z18.s, z0.s\n"
- "fmax z19.s, p5/M, z19.s, z0.s\n"
- "fmax z24.s, p5/M, z24.s, z0.s\n"
- "fmax z25.s, p5/M, z25.s, z0.s\n"
- "fmax z26.s, p5/M, z26.s, z0.s\n"
- "fmax z27.s, p5/M, z27.s, z0.s\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z1.s }, p5/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rw { z0.s }, p5/Z, [x19]\n"
+ "fmin z7.s, p5/M, z7.s, z0.s\n"
+ "fmin z12.s, p5/M, z12.s, z0.s\n"
+ "fmin z13.s, p5/M, z13.s, z0.s\n"
+ "fmin z14.s, p5/M, z14.s, z0.s\n"
+ "fmin z8.s, p5/M, z8.s, z0.s\n"
+ "fmax z7.s, p5/M, z7.s, z1.s\n"
+ "fmax z12.s, p5/M, z12.s, z1.s\n"
+ "fmax z13.s, p5/M, z13.s, z1.s\n"
+ "fmax z14.s, p5/M, z14.s, z1.s\n"
+ "fmax z8.s, p5/M, z8.s, z1.s\n"
+ "fmin z9.s, p5/M, z9.s, z0.s\n"
+ "fmin z10.s, p5/M, z10.s, z0.s\n"
+ "fmin z11.s, p5/M, z11.s, z0.s\n"
+ "fmin z15.s, p5/M, z15.s, z0.s\n"
+ "fmax z9.s, p5/M, z9.s, z1.s\n"
+ "fmax z10.s, p5/M, z10.s, z1.s\n"
+ "fmax z11.s, p5/M, z11.s, z1.s\n"
+ "fmax z15.s, p5/M, z15.s, z1.s\n"
+ "fmin z20.s, p5/M, z20.s, z0.s\n"
+ "fmin z21.s, p5/M, z21.s, z0.s\n"
+ "fmin z22.s, p5/M, z22.s, z0.s\n"
+ "fmin z16.s, p5/M, z16.s, z0.s\n"
+ "fmax z20.s, p5/M, z20.s, z1.s\n"
+ "fmax z21.s, p5/M, z21.s, z1.s\n"
+ "fmax z22.s, p5/M, z22.s, z1.s\n"
+ "fmax z16.s, p5/M, z16.s, z1.s\n"
+ "fmin z17.s, p5/M, z17.s, z0.s\n"
+ "fmin z18.s, p5/M, z18.s, z0.s\n"
+ "fmin z19.s, p5/M, z19.s, z0.s\n"
+ "fmin z24.s, p5/M, z24.s, z0.s\n"
+ "fmax z17.s, p5/M, z17.s, z1.s\n"
+ "fmax z18.s, p5/M, z18.s, z1.s\n"
+ "fmax z19.s, p5/M, z19.s, z1.s\n"
+ "fmax z24.s, p5/M, z24.s, z1.s\n"
+ "fmin z25.s, p5/M, z25.s, z0.s\n"
+ "fmin z26.s, p5/M, z26.s, z0.s\n"
+ "fmin z27.s, p5/M, z27.s, z0.s\n"
+ "fmax z25.s, p5/M, z25.s, z1.s\n"
+ "fmax z26.s, p5/M, z26.s, z1.s\n"
+ "fmax z27.s, p5/M, z27.s, z1.s\n"
"64:" // Height 5: No activation
- "st1w { z7.s }, p4, [x9]\n"
- "st1w { z12.s }, p3, [x9, #1, MUL VL]\n"
- "st1w { z13.s }, p2, [x9, #2, MUL VL]\n"
- "st1w { z14.s }, p1, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
- "st1w { z8.s }, p4, [x25]\n"
- "st1w { z9.s }, p3, [x25, #1, MUL VL]\n"
- "st1w { z10.s }, p2, [x25, #2, MUL VL]\n"
- "st1w { z11.s }, p1, [x25, #3, MUL VL]\n"
- "st1w { z15.s }, p4, [x24]\n"
- "st1w { z20.s }, p3, [x24, #1, MUL VL]\n"
- "st1w { z21.s }, p2, [x24, #2, MUL VL]\n"
- "st1w { z22.s }, p1, [x24, #3, MUL VL]\n"
- "st1w { z16.s }, p4, [x23]\n"
- "st1w { z17.s }, p3, [x23, #1, MUL VL]\n"
- "st1w { z18.s }, p2, [x23, #2, MUL VL]\n"
- "st1w { z19.s }, p1, [x23, #3, MUL VL]\n"
- "st1w { z24.s }, p4, [x22]\n"
- "st1w { z25.s }, p3, [x22, #1, MUL VL]\n"
- "st1w { z26.s }, p2, [x22, #2, MUL VL]\n"
- "st1w { z27.s }, p1, [x22, #3, MUL VL]\n"
+ "st1w { z7.s }, p4, [x28]\n"
+ "st1w { z12.s }, p3, [x28, #1, MUL VL]\n"
+ "st1w { z13.s }, p2, [x28, #2, MUL VL]\n"
+ "st1w { z14.s }, p1, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "st1w { z8.s }, p4, [x24]\n"
+ "st1w { z9.s }, p3, [x24, #1, MUL VL]\n"
+ "st1w { z10.s }, p2, [x24, #2, MUL VL]\n"
+ "st1w { z11.s }, p1, [x24, #3, MUL VL]\n"
+ "st1w { z15.s }, p4, [x23]\n"
+ "st1w { z20.s }, p3, [x23, #1, MUL VL]\n"
+ "st1w { z21.s }, p2, [x23, #2, MUL VL]\n"
+ "st1w { z22.s }, p1, [x23, #3, MUL VL]\n"
+ "st1w { z16.s }, p4, [x22]\n"
+ "st1w { z17.s }, p3, [x22, #1, MUL VL]\n"
+ "st1w { z18.s }, p2, [x22, #2, MUL VL]\n"
+ "st1w { z19.s }, p1, [x22, #3, MUL VL]\n"
+ "st1w { z24.s }, p4, [x21]\n"
+ "st1w { z25.s }, p3, [x21, #1, MUL VL]\n"
+ "st1w { z26.s }, p2, [x21, #2, MUL VL]\n"
+ "st1w { z27.s }, p1, [x21, #3, MUL VL]\n"
"65:" // Height 5: Writeback done
"decw x11, ALL, MUL #4\n"
"cmp x11, XZR\n"
"bgt 54b\n"
"b 80f\n"
"66:" // Height 6
- "ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "mov x20, #0x18\n"
- "mov x12, %x[bias]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x9, %x[bias]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
+ "mov x28, %x[output_ptr]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "mov x19, #0x18\n"
+ "madd %x[output_ptr], x20, x19, %x[output_ptr]\n"
"67:" // Height 6: Column loop
- "mov x20, #0x0\n"
- "whilelt p4.s, x20, x11\n"
- "incw x20\n"
- "whilelt p3.s, x20, x11\n"
- "incw x20\n"
- "whilelt p2.s, x20, x11\n"
- "incw x20\n"
- "whilelt p1.s, x20, x11\n"
- "cbz x12, 68f\n"
- "ld1w { z8.s }, p5/Z, [x12]\n"
- "ld1w { z9.s }, p5/Z, [x12, #1, MUL VL]\n"
+ "mov x19, #0x0\n"
+ "whilelt p4.s, x19, x11\n"
+ "incw x19\n"
+ "whilelt p3.s, x19, x11\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x11\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x11\n"
+ "cbz x9, 68f\n"
+ "ld1w { z8.s }, p5/Z, [x9]\n"
"zip2 z12.d, z8.d, z8.d\n"
+ "ld1w { z9.s }, p5/Z, [x9, #1, MUL VL]\n"
"zip1 z8.d, z8.d, z8.d\n"
- "ld1w { z10.s }, p5/Z, [x12, #2, MUL VL]\n"
- "ld1w { z11.s }, p5/Z, [x12, #3, MUL VL]\n"
+ "ld1w { z10.s }, p5/Z, [x9, #2, MUL VL]\n"
+ "mov z16.d, z8.d\n"
+ "ld1w { z11.s }, p5/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"zip2 z13.d, z9.d, z9.d\n"
"zip1 z9.d, z9.d, z9.d\n"
"zip2 z14.d, z10.d, z10.d\n"
"zip1 z10.d, z10.d, z10.d\n"
- "addvl x12, x12, #4\n"
"zip2 z15.d, z11.d, z11.d\n"
"zip1 z11.d, z11.d, z11.d\n"
- "mov z16.d, z8.d\n"
"mov z20.d, z12.d\n"
"mov z17.d, z9.d\n"
"mov z21.d, z13.d\n"
@@ -1620,55 +1620,55 @@ void sve_hybrid_bf16fp32_mmla_6x4VL (
"b 70f\n"
"68:" // Height 6: no bias
"tbz %x[flags], #0, 69f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "ld1w { z9.s }, p4/Z, [x9]\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
- "ld1w { z10.s }, p3/Z, [x9, #1, MUL VL]\n"
- "ld1w { z11.s }, p2/Z, [x9, #2, MUL VL]\n"
- "add x21, x22, x20, LSL #2\n"
- "ld1w { z16.s }, p1/Z, [x9, #3, MUL VL]\n"
- "ld1w { z12.s }, p4/Z, [x25]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ld1w { z9.s }, p4/Z, [x28]\n"
+ "add x24, x28, x19, LSL #2\n"
+ "ld1w { z10.s }, p3/Z, [x28, #1, MUL VL]\n"
+ "ld1w { z11.s }, p2/Z, [x28, #2, MUL VL]\n"
+ "add x23, x24, x19, LSL #2\n"
+ "ld1w { z16.s }, p1/Z, [x28, #3, MUL VL]\n"
+ "add x22, x23, x19, LSL #2\n"
+ "ld1w { z12.s }, p4/Z, [x24]\n"
"zip1 z8.d, z9.d, z12.d\n"
- "ld1w { z13.s }, p3/Z, [x25, #1, MUL VL]\n"
- "ld1w { z14.s }, p2/Z, [x25, #2, MUL VL]\n"
+ "ld1w { z13.s }, p3/Z, [x24, #1, MUL VL]\n"
+ "add x21, x22, x19, LSL #2\n"
"zip2 z12.d, z9.d, z12.d\n"
+ "ld1w { z14.s }, p2/Z, [x24, #2, MUL VL]\n"
+ "add x20, x21, x19, LSL #2\n"
"zip1 z9.d, z10.d, z13.d\n"
- "ld1w { z15.s }, p1/Z, [x25, #3, MUL VL]\n"
- "ld1w { z17.s }, p4/Z, [x24]\n"
+ "ld1w { z15.s }, p1/Z, [x24, #3, MUL VL]\n"
"zip2 z13.d, z10.d, z13.d\n"
+ "ld1w { z17.s }, p4/Z, [x23]\n"
"zip1 z10.d, z11.d, z14.d\n"
- "ld1w { z18.s }, p3/Z, [x24, #1, MUL VL]\n"
- "ld1w { z19.s }, p2/Z, [x24, #2, MUL VL]\n"
+ "ld1w { z18.s }, p3/Z, [x23, #1, MUL VL]\n"
"zip2 z14.d, z11.d, z14.d\n"
+ "ld1w { z19.s }, p2/Z, [x23, #2, MUL VL]\n"
"zip1 z11.d, z16.d, z15.d\n"
- "ld1w { z24.s }, p1/Z, [x24, #3, MUL VL]\n"
- "ld1w { z20.s }, p4/Z, [x23]\n"
+ "ld1w { z24.s }, p1/Z, [x23, #3, MUL VL]\n"
"zip2 z15.d, z16.d, z15.d\n"
+ "ld1w { z20.s }, p4/Z, [x22]\n"
+ "ld1w { z21.s }, p3/Z, [x22, #1, MUL VL]\n"
"zip1 z16.d, z17.d, z20.d\n"
- "ld1w { z21.s }, p3/Z, [x23, #1, MUL VL]\n"
- "ld1w { z22.s }, p2/Z, [x23, #2, MUL VL]\n"
+ "ld1w { z22.s }, p2/Z, [x22, #2, MUL VL]\n"
"zip2 z20.d, z17.d, z20.d\n"
+ "ld1w { z23.s }, p1/Z, [x22, #3, MUL VL]\n"
"zip1 z17.d, z18.d, z21.d\n"
- "ld1w { z23.s }, p1/Z, [x23, #3, MUL VL]\n"
- "ld1w { z25.s }, p4/Z, [x22]\n"
+ "ld1w { z25.s }, p4/Z, [x21]\n"
"zip2 z21.d, z18.d, z21.d\n"
+ "ld1w { z26.s }, p3/Z, [x21, #1, MUL VL]\n"
"zip1 z18.d, z19.d, z22.d\n"
- "ld1w { z26.s }, p3/Z, [x22, #1, MUL VL]\n"
- "ld1w { z27.s }, p2/Z, [x22, #2, MUL VL]\n"
+ "ld1w { z27.s }, p2/Z, [x21, #2, MUL VL]\n"
"zip2 z22.d, z19.d, z22.d\n"
+ "ld1w { z6.s }, p1/Z, [x21, #3, MUL VL]\n"
"zip1 z19.d, z24.d, z23.d\n"
- "ld1w { z6.s }, p1/Z, [x22, #3, MUL VL]\n"
- "ld1w { z28.s }, p4/Z, [x21]\n"
+ "ld1w { z28.s }, p4/Z, [x20]\n"
"zip2 z23.d, z24.d, z23.d\n"
+ "ld1w { z29.s }, p3/Z, [x20, #1, MUL VL]\n"
+ "ld1w { z30.s }, p2/Z, [x20, #2, MUL VL]\n"
"zip1 z24.d, z25.d, z28.d\n"
- "ld1w { z29.s }, p3/Z, [x21, #1, MUL VL]\n"
- "ld1w { z30.s }, p2/Z, [x21, #2, MUL VL]\n"
+ "ld1w { z31.s }, p1/Z, [x20, #3, MUL VL]\n"
"zip2 z28.d, z25.d, z28.d\n"
"zip1 z25.d, z26.d, z29.d\n"
- "ld1w { z31.s }, p1/Z, [x21, #3, MUL VL]\n"
"zip2 z29.d, z26.d, z29.d\n"
"zip1 z26.d, z27.d, z30.d\n"
"zip2 z30.d, z27.d, z30.d\n"
@@ -1701,80 +1701,80 @@ void sve_hybrid_bf16fp32_mmla_6x4VL (
"mov z30.b, #0x0\n"
"mov z31.b, #0x0\n"
"70:" // Height 6: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"71:" // Height 6: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 72f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "ldr x22, [x21, #0x20]\n"
- "ldr x21, [x21, #0x28]\n"
- "cbnz x28, 73f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #1\n"
- "add x25, x25, x20, LSL #1\n"
- "add x24, x24, x20, LSL #1\n"
- "add x23, x23, x20, LSL #1\n"
- "add x22, x22, x20, LSL #1\n"
- "add x21, x21, x20, LSL #1\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "ldr x21, [x20, #0x20]\n"
+ "ldr x20, [x20, #0x28]\n"
+ "cbnz x27, 73f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
+ "add x24, x24, x19, LSL #1\n"
+ "add x23, x23, x19, LSL #1\n"
+ "add x22, x22, x19, LSL #1\n"
+ "add x21, x21, x19, LSL #1\n"
+ "add x20, x20, x19, LSL #1\n"
"b 73f\n"
"72:" // Height 6: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
- "add x23, x24, x20, LSL #1\n"
- "add x22, x23, x20, LSL #1\n"
- "add x21, x22, x20, LSL #1\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "add x22, x23, x19, LSL #1\n"
+ "add x21, x22, x19, LSL #1\n"
+ "add x20, x21, x19, LSL #1\n"
"73:" // Height 6: input setup done
- "cmp x27, #0x8\n"
+ "cmp x26, #0x8\n"
"ble 75f\n"
"74:" // Height 6: Multiply loop: Main loop head
- "whilelt p0.h, XZR, x27\n"
- "ld1rqh { z1.h }, p0/Z, [x26]\n"
- "ld1rqh { z2.h }, p0/Z, [x25]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
+ "whilelt p0.h, XZR, x26\n"
+ "sub x26, x26, #0x8\n"
+ "ld1rqh { z1.h }, p0/Z, [x25]\n"
+ "ld1rqh { z2.h }, p0/Z, [x24]\n"
"trn1 z0.d, z1.d, z2.d\n"
- "ld1rqh { z3.h }, p0/Z, [x24]\n"
- "ld1rqh { z4.h }, p0/Z, [x23]\n"
+ "ld1rqh { z3.h }, p0/Z, [x23]\n"
+ "cmp x26, #0x8\n"
"trn2 z1.d, z1.d, z2.d\n"
+ "ld1rqh { z4.h }, p0/Z, [x22]\n"
+ "add x25, x25, #0x10\n"
+ ".inst 0x6467e408 // bfmmla z8.s, z0.h, z7.h\n"
+ "ld1rqh { z5.h }, p0/Z, [x21]\n"
+ "add x24, x24, #0x10\n"
"trn1 z2.d, z3.d, z4.d\n"
- "ld1rqh { z5.h }, p0/Z, [x22]\n"
- "ld1rqh { z6.h }, p0/Z, [x21]\n"
+ "ld1rqh { z6.h }, p0/Z, [x20]\n"
+ "add x23, x23, #0x10\n"
"trn2 z3.d, z3.d, z4.d\n"
+ "add x22, x22, #0x10\n"
+ "add x21, x21, #0x10\n"
+ ".inst 0x6467e450 // bfmmla z16.s, z2.h, z7.h\n"
+ "add x20, x20, #0x10\n"
"trn1 z4.d, z5.d, z6.d\n"
"trn2 z5.d, z5.d, z6.d\n"
- "ld1h { z7.h }, p5/Z, [x10]\n"
"ld1h { z6.h }, p5/Z, [x10, #1, MUL VL]\n"
- ".inst 0x6467e408 // bfmmla z8.s, z0.h, z7.h\n"
- ".inst 0x6467e450 // bfmmla z16.s, z2.h, z7.h\n"
".inst 0x6467e498 // bfmmla z24.s, z4.h, z7.h\n"
"ld1h { z7.h }, p5/Z, [x10, #2, MUL VL]\n"
- "sub x27, x27, #0x8\n"
".inst 0x6466e40c // bfmmla z12.s, z0.h, z6.h\n"
".inst 0x6466e454 // bfmmla z20.s, z2.h, z6.h\n"
- "cmp x27, #0x8\n"
- "add x26, x26, #0x10\n"
".inst 0x6466e49c // bfmmla z28.s, z4.h, z6.h\n"
"ld1h { z6.h }, p5/Z, [x10, #3, MUL VL]\n"
".inst 0x6467e409 // bfmmla z9.s, z0.h, z7.h\n"
- "add x25, x25, #0x10\n"
".inst 0x6467e451 // bfmmla z17.s, z2.h, z7.h\n"
".inst 0x6467e499 // bfmmla z25.s, z4.h, z7.h\n"
"ld1h { z7.h }, p5/Z, [x10, #4, MUL VL]\n"
- "add x24, x24, #0x10\n"
".inst 0x6466e40d // bfmmla z13.s, z0.h, z6.h\n"
".inst 0x6466e455 // bfmmla z21.s, z2.h, z6.h\n"
- "add x23, x23, #0x10\n"
- "add x22, x22, #0x10\n"
".inst 0x6466e49d // bfmmla z29.s, z4.h, z6.h\n"
"ld1h { z6.h }, p5/Z, [x10, #5, MUL VL]\n"
".inst 0x6467e40a // bfmmla z10.s, z0.h, z7.h\n"
- "add x21, x21, #0x10\n"
".inst 0x6467e452 // bfmmla z18.s, z2.h, z7.h\n"
".inst 0x6467e49a // bfmmla z26.s, z4.h, z7.h\n"
"ld1h { z7.h }, p5/Z, [x10, #6, MUL VL]\n"
@@ -1823,26 +1823,26 @@ void sve_hybrid_bf16fp32_mmla_6x4VL (
".inst 0x6466e4bf // bfmmla z31.s, z5.h, z6.h\n"
"bgt 74b\n"
"75:" // Height 6: Multiply loop: Single iteration only
- "whilelt p0.h, XZR, x27\n"
- "ld1rqh { z1.h }, p0/Z, [x26]\n"
- "ld1rqh { z2.h }, p0/Z, [x25]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
+ "whilelt p0.h, XZR, x26\n"
+ "subs x26, x26, #0x4\n"
+ "ld1rqh { z1.h }, p0/Z, [x25]\n"
+ "ld1rqh { z2.h }, p0/Z, [x24]\n"
"trn1 z0.d, z1.d, z2.d\n"
- "ld1rqh { z3.h }, p0/Z, [x24]\n"
- "ld1rqh { z4.h }, p0/Z, [x23]\n"
+ "ld1rqh { z3.h }, p0/Z, [x23]\n"
"trn2 z1.d, z1.d, z2.d\n"
+ "ld1rqh { z4.h }, p0/Z, [x22]\n"
+ "ld1rqh { z5.h }, p0/Z, [x21]\n"
+ ".inst 0x6467e408 // bfmmla z8.s, z0.h, z7.h\n"
+ "ld1rqh { z6.h }, p0/Z, [x20]\n"
"trn1 z2.d, z3.d, z4.d\n"
- "ld1rqh { z5.h }, p0/Z, [x22]\n"
- "ld1rqh { z6.h }, p0/Z, [x21]\n"
"trn2 z3.d, z3.d, z4.d\n"
"trn1 z4.d, z5.d, z6.d\n"
"trn2 z5.d, z5.d, z6.d\n"
- "ld1h { z7.h }, p5/Z, [x10]\n"
"ld1h { z6.h }, p5/Z, [x10, #1, MUL VL]\n"
- ".inst 0x6467e408 // bfmmla z8.s, z0.h, z7.h\n"
".inst 0x6467e450 // bfmmla z16.s, z2.h, z7.h\n"
".inst 0x6467e498 // bfmmla z24.s, z4.h, z7.h\n"
"ld1h { z7.h }, p5/Z, [x10, #2, MUL VL]\n"
- "subs x27, x27, #0x4\n"
".inst 0x6466e40c // bfmmla z12.s, z0.h, z6.h\n"
".inst 0x6466e454 // bfmmla z20.s, z2.h, z6.h\n"
".inst 0x6466e49c // bfmmla z28.s, z4.h, z6.h\n"
@@ -1863,8 +1863,8 @@ void sve_hybrid_bf16fp32_mmla_6x4VL (
".inst 0x6466e456 // bfmmla z22.s, z2.h, z6.h\n"
".inst 0x6466e49e // bfmmla z30.s, z4.h, z6.h\n"
"ld1h { z6.h }, p5/Z, [x10, #7, MUL VL]\n"
- ".inst 0x6467e40b // bfmmla z11.s, z0.h, z7.h\n"
"addvl x10, x10, #8\n"
+ ".inst 0x6467e40b // bfmmla z11.s, z0.h, z7.h\n"
".inst 0x6467e453 // bfmmla z19.s, z2.h, z7.h\n"
".inst 0x6467e49b // bfmmla z27.s, z4.h, z7.h\n"
".inst 0x6466e40f // bfmmla z15.s, z0.h, z6.h\n"
@@ -1872,28 +1872,28 @@ void sve_hybrid_bf16fp32_mmla_6x4VL (
".inst 0x6466e49f // bfmmla z31.s, z4.h, z6.h\n"
"ble 76f\n"
"ld1h { z7.h }, p5/Z, [x10]\n"
- "ld1h { z6.h }, p5/Z, [x10, #1, MUL VL]\n"
".inst 0x6467e428 // bfmmla z8.s, z1.h, z7.h\n"
+ "ld1h { z6.h }, p5/Z, [x10, #1, MUL VL]\n"
".inst 0x6467e470 // bfmmla z16.s, z3.h, z7.h\n"
".inst 0x6467e4b8 // bfmmla z24.s, z5.h, z7.h\n"
- ".inst 0x6466e42c // bfmmla z12.s, z1.h, z6.h\n"
"ld1h { z7.h }, p5/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x6466e42c // bfmmla z12.s, z1.h, z6.h\n"
".inst 0x6466e474 // bfmmla z20.s, z3.h, z6.h\n"
".inst 0x6466e4bc // bfmmla z28.s, z5.h, z6.h\n"
"ld1h { z6.h }, p5/Z, [x10, #3, MUL VL]\n"
".inst 0x6467e429 // bfmmla z9.s, z1.h, z7.h\n"
".inst 0x6467e471 // bfmmla z17.s, z3.h, z7.h\n"
".inst 0x6467e4b9 // bfmmla z25.s, z5.h, z7.h\n"
- ".inst 0x6466e42d // bfmmla z13.s, z1.h, z6.h\n"
"ld1h { z7.h }, p5/Z, [x10, #4, MUL VL]\n"
+ ".inst 0x6466e42d // bfmmla z13.s, z1.h, z6.h\n"
".inst 0x6466e475 // bfmmla z21.s, z3.h, z6.h\n"
".inst 0x6466e4bd // bfmmla z29.s, z5.h, z6.h\n"
"ld1h { z6.h }, p5/Z, [x10, #5, MUL VL]\n"
".inst 0x6467e42a // bfmmla z10.s, z1.h, z7.h\n"
".inst 0x6467e472 // bfmmla z18.s, z3.h, z7.h\n"
".inst 0x6467e4ba // bfmmla z26.s, z5.h, z7.h\n"
- ".inst 0x6466e42e // bfmmla z14.s, z1.h, z6.h\n"
"ld1h { z7.h }, p5/Z, [x10, #6, MUL VL]\n"
+ ".inst 0x6466e42e // bfmmla z14.s, z1.h, z6.h\n"
".inst 0x6466e476 // bfmmla z22.s, z3.h, z6.h\n"
".inst 0x6466e4be // bfmmla z30.s, z5.h, z6.h\n"
"ld1h { z6.h }, p5/Z, [x10, #7, MUL VL]\n"
@@ -1905,23 +1905,23 @@ void sve_hybrid_bf16fp32_mmla_6x4VL (
".inst 0x6466e477 // bfmmla z23.s, z3.h, z6.h\n"
".inst 0x6466e4bf // bfmmla z31.s, z5.h, z6.h\n"
"76:" // Height 6: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 71b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
"uzp1 z7.d, z8.d, z12.d\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp2 z8.d, z8.d, z12.d\n"
+ "add x24, x28, x19, LSL #2\n"
"uzp1 z12.d, z9.d, z13.d\n"
"uzp2 z9.d, z9.d, z13.d\n"
+ "add x23, x24, x19, LSL #2\n"
"uzp1 z13.d, z10.d, z14.d\n"
- "add x21, x22, x20, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
"uzp2 z10.d, z10.d, z14.d\n"
+ "add x21, x22, x19, LSL #2\n"
"uzp1 z14.d, z11.d, z15.d\n"
+ "add x20, x21, x19, LSL #2\n"
"uzp2 z11.d, z11.d, z15.d\n"
"uzp1 z15.d, z16.d, z20.d\n"
"uzp2 z16.d, z16.d, z20.d\n"
@@ -1940,103 +1940,104 @@ void sve_hybrid_bf16fp32_mmla_6x4VL (
"uzp1 z30.d, z27.d, z31.d\n"
"uzp2 z27.d, z27.d, z31.d\n"
"tbz %x[flags], #1, 77f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z1.s }, p5/Z, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rw { z0.s }, p5/Z, [x20]\n"
- "fmin z7.s, p5/M, z7.s, z1.s\n"
- "fmin z12.s, p5/M, z12.s, z1.s\n"
- "fmin z13.s, p5/M, z13.s, z1.s\n"
- "fmin z14.s, p5/M, z14.s, z1.s\n"
- "fmin z8.s, p5/M, z8.s, z1.s\n"
- "fmin z9.s, p5/M, z9.s, z1.s\n"
- "fmin z10.s, p5/M, z10.s, z1.s\n"
- "fmin z11.s, p5/M, z11.s, z1.s\n"
- "fmin z15.s, p5/M, z15.s, z1.s\n"
- "fmin z20.s, p5/M, z20.s, z1.s\n"
- "fmin z21.s, p5/M, z21.s, z1.s\n"
- "fmin z22.s, p5/M, z22.s, z1.s\n"
- "fmin z16.s, p5/M, z16.s, z1.s\n"
- "fmin z17.s, p5/M, z17.s, z1.s\n"
- "fmin z18.s, p5/M, z18.s, z1.s\n"
- "fmin z19.s, p5/M, z19.s, z1.s\n"
- "fmin z23.s, p5/M, z23.s, z1.s\n"
- "fmin z28.s, p5/M, z28.s, z1.s\n"
- "fmin z29.s, p5/M, z29.s, z1.s\n"
- "fmin z30.s, p5/M, z30.s, z1.s\n"
- "fmin z24.s, p5/M, z24.s, z1.s\n"
- "fmin z25.s, p5/M, z25.s, z1.s\n"
- "fmin z26.s, p5/M, z26.s, z1.s\n"
- "fmin z27.s, p5/M, z27.s, z1.s\n"
- "fmax z7.s, p5/M, z7.s, z0.s\n"
- "fmax z12.s, p5/M, z12.s, z0.s\n"
- "fmax z13.s, p5/M, z13.s, z0.s\n"
- "fmax z14.s, p5/M, z14.s, z0.s\n"
- "fmax z8.s, p5/M, z8.s, z0.s\n"
- "fmax z9.s, p5/M, z9.s, z0.s\n"
- "fmax z10.s, p5/M, z10.s, z0.s\n"
- "fmax z11.s, p5/M, z11.s, z0.s\n"
- "fmax z15.s, p5/M, z15.s, z0.s\n"
- "fmax z20.s, p5/M, z20.s, z0.s\n"
- "fmax z21.s, p5/M, z21.s, z0.s\n"
- "fmax z22.s, p5/M, z22.s, z0.s\n"
- "fmax z16.s, p5/M, z16.s, z0.s\n"
- "fmax z17.s, p5/M, z17.s, z0.s\n"
- "fmax z18.s, p5/M, z18.s, z0.s\n"
- "fmax z19.s, p5/M, z19.s, z0.s\n"
- "fmax z23.s, p5/M, z23.s, z0.s\n"
- "fmax z28.s, p5/M, z28.s, z0.s\n"
- "fmax z29.s, p5/M, z29.s, z0.s\n"
- "fmax z30.s, p5/M, z30.s, z0.s\n"
- "fmax z24.s, p5/M, z24.s, z0.s\n"
- "fmax z25.s, p5/M, z25.s, z0.s\n"
- "fmax z26.s, p5/M, z26.s, z0.s\n"
- "fmax z27.s, p5/M, z27.s, z0.s\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z1.s }, p5/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rw { z0.s }, p5/Z, [x19]\n"
+ "fmin z7.s, p5/M, z7.s, z0.s\n"
+ "fmin z12.s, p5/M, z12.s, z0.s\n"
+ "fmin z13.s, p5/M, z13.s, z0.s\n"
+ "fmin z14.s, p5/M, z14.s, z0.s\n"
+ "fmin z8.s, p5/M, z8.s, z0.s\n"
+ "fmax z7.s, p5/M, z7.s, z1.s\n"
+ "fmax z12.s, p5/M, z12.s, z1.s\n"
+ "fmax z13.s, p5/M, z13.s, z1.s\n"
+ "fmax z14.s, p5/M, z14.s, z1.s\n"
+ "fmax z8.s, p5/M, z8.s, z1.s\n"
+ "fmin z9.s, p5/M, z9.s, z0.s\n"
+ "fmin z10.s, p5/M, z10.s, z0.s\n"
+ "fmin z11.s, p5/M, z11.s, z0.s\n"
+ "fmin z15.s, p5/M, z15.s, z0.s\n"
+ "fmax z9.s, p5/M, z9.s, z1.s\n"
+ "fmax z10.s, p5/M, z10.s, z1.s\n"
+ "fmax z11.s, p5/M, z11.s, z1.s\n"
+ "fmax z15.s, p5/M, z15.s, z1.s\n"
+ "fmin z20.s, p5/M, z20.s, z0.s\n"
+ "fmin z21.s, p5/M, z21.s, z0.s\n"
+ "fmin z22.s, p5/M, z22.s, z0.s\n"
+ "fmin z16.s, p5/M, z16.s, z0.s\n"
+ "fmax z20.s, p5/M, z20.s, z1.s\n"
+ "fmax z21.s, p5/M, z21.s, z1.s\n"
+ "fmax z22.s, p5/M, z22.s, z1.s\n"
+ "fmax z16.s, p5/M, z16.s, z1.s\n"
+ "fmin z17.s, p5/M, z17.s, z0.s\n"
+ "fmin z18.s, p5/M, z18.s, z0.s\n"
+ "fmin z19.s, p5/M, z19.s, z0.s\n"
+ "fmin z23.s, p5/M, z23.s, z0.s\n"
+ "fmax z17.s, p5/M, z17.s, z1.s\n"
+ "fmax z18.s, p5/M, z18.s, z1.s\n"
+ "fmax z19.s, p5/M, z19.s, z1.s\n"
+ "fmax z23.s, p5/M, z23.s, z1.s\n"
+ "fmin z28.s, p5/M, z28.s, z0.s\n"
+ "fmin z29.s, p5/M, z29.s, z0.s\n"
+ "fmin z30.s, p5/M, z30.s, z0.s\n"
+ "fmin z24.s, p5/M, z24.s, z0.s\n"
+ "fmax z28.s, p5/M, z28.s, z1.s\n"
+ "fmax z29.s, p5/M, z29.s, z1.s\n"
+ "fmax z30.s, p5/M, z30.s, z1.s\n"
+ "fmax z24.s, p5/M, z24.s, z1.s\n"
+ "fmin z25.s, p5/M, z25.s, z0.s\n"
+ "fmin z26.s, p5/M, z26.s, z0.s\n"
+ "fmin z27.s, p5/M, z27.s, z0.s\n"
+ "fmax z25.s, p5/M, z25.s, z1.s\n"
+ "fmax z26.s, p5/M, z26.s, z1.s\n"
+ "fmax z27.s, p5/M, z27.s, z1.s\n"
"77:" // Height 6: No activation
- "st1w { z7.s }, p4, [x9]\n"
- "st1w { z12.s }, p3, [x9, #1, MUL VL]\n"
- "st1w { z13.s }, p2, [x9, #2, MUL VL]\n"
- "st1w { z14.s }, p1, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
- "st1w { z8.s }, p4, [x25]\n"
- "st1w { z9.s }, p3, [x25, #1, MUL VL]\n"
- "st1w { z10.s }, p2, [x25, #2, MUL VL]\n"
- "st1w { z11.s }, p1, [x25, #3, MUL VL]\n"
- "st1w { z15.s }, p4, [x24]\n"
- "st1w { z20.s }, p3, [x24, #1, MUL VL]\n"
- "st1w { z21.s }, p2, [x24, #2, MUL VL]\n"
- "st1w { z22.s }, p1, [x24, #3, MUL VL]\n"
- "st1w { z16.s }, p4, [x23]\n"
- "st1w { z17.s }, p3, [x23, #1, MUL VL]\n"
- "st1w { z18.s }, p2, [x23, #2, MUL VL]\n"
- "st1w { z19.s }, p1, [x23, #3, MUL VL]\n"
- "st1w { z23.s }, p4, [x22]\n"
- "st1w { z28.s }, p3, [x22, #1, MUL VL]\n"
- "st1w { z29.s }, p2, [x22, #2, MUL VL]\n"
- "st1w { z30.s }, p1, [x22, #3, MUL VL]\n"
- "st1w { z24.s }, p4, [x21]\n"
- "st1w { z25.s }, p3, [x21, #1, MUL VL]\n"
- "st1w { z26.s }, p2, [x21, #2, MUL VL]\n"
- "st1w { z27.s }, p1, [x21, #3, MUL VL]\n"
+ "st1w { z7.s }, p4, [x28]\n"
+ "st1w { z12.s }, p3, [x28, #1, MUL VL]\n"
+ "st1w { z13.s }, p2, [x28, #2, MUL VL]\n"
+ "st1w { z14.s }, p1, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "st1w { z8.s }, p4, [x24]\n"
+ "st1w { z9.s }, p3, [x24, #1, MUL VL]\n"
+ "st1w { z10.s }, p2, [x24, #2, MUL VL]\n"
+ "st1w { z11.s }, p1, [x24, #3, MUL VL]\n"
+ "st1w { z15.s }, p4, [x23]\n"
+ "st1w { z20.s }, p3, [x23, #1, MUL VL]\n"
+ "st1w { z21.s }, p2, [x23, #2, MUL VL]\n"
+ "st1w { z22.s }, p1, [x23, #3, MUL VL]\n"
+ "st1w { z16.s }, p4, [x22]\n"
+ "st1w { z17.s }, p3, [x22, #1, MUL VL]\n"
+ "st1w { z18.s }, p2, [x22, #2, MUL VL]\n"
+ "st1w { z19.s }, p1, [x22, #3, MUL VL]\n"
+ "st1w { z23.s }, p4, [x21]\n"
+ "st1w { z28.s }, p3, [x21, #1, MUL VL]\n"
+ "st1w { z29.s }, p2, [x21, #2, MUL VL]\n"
+ "st1w { z30.s }, p1, [x21, #3, MUL VL]\n"
+ "st1w { z24.s }, p4, [x20]\n"
+ "st1w { z25.s }, p3, [x20, #1, MUL VL]\n"
+ "st1w { z26.s }, p2, [x20, #2, MUL VL]\n"
+ "st1w { z27.s }, p1, [x20, #3, MUL VL]\n"
"78:" // Height 6: Writeback done
"decw x11, ALL, MUL #4\n"
"cmp x11, XZR\n"
"bgt 67b\n"
"subs %x[M], %x[M], #0x6\n"
"beq 80f\n"
- "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 79f\n"
- "add x21, x21, #0x6\n"
- "str x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "add x20, x20, #0x6\n"
+ "str x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"b 1b\n"
"79:" // Update direct input
- "mov x20, #0xc\n"
- "madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
+ "mov x19, #0xc\n"
+ "madd %x[input_ptr], x19, x20, %x[input_ptr]\n"
"b 1b\n"
"80:" // Exit
+
: [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
: [args_ptr] "r" (&ka), [bias] "r" (bias), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "x9", "x10", "x11", "x12", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "x9", "x10", "x11", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp16_mla_6x4VL/a64fx.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp16_mla_6x4VL/a64fx.cpp
index a70e66cbe4..11f5ed2c0a 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp16_mla_6x4VL/a64fx.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp16_mla_6x4VL/a64fx.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef ARM_COMPUTE_ENABLE_SVE
@@ -102,32 +102,32 @@ void sve_hybrid_fp16_mla_6x4VL_a64fx (
"cmp %x[M], #0x2\n"
"bgt 25f\n"
"beq 13f\n"
- "mov x12, %x[bias]\n"
- "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "mov x11, %x[bias]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x28, %x[output_ptr]\n"
"2:" // Height 1: Column loop
- "mov x20, #0x0\n"
- "whilelt p3.h, x20, x11\n"
- "inch x20\n"
- "whilelt p2.h, x20, x11\n"
- "inch x20\n"
- "whilelt p1.h, x20, x11\n"
- "inch x20\n"
- "whilelt p0.h, x20, x11\n"
- "cbz x12, 3f\n"
- "ld1h { z8.h }, p4/Z, [x12]\n"
- "ld1h { z9.h }, p4/Z, [x12, #1, MUL VL]\n"
- "ld1h { z10.h }, p4/Z, [x12, #2, MUL VL]\n"
- "ld1h { z11.h }, p4/Z, [x12, #3, MUL VL]\n"
- "addvl x12, x12, #4\n"
+ "mov x19, #0x0\n"
+ "whilelt p3.h, x19, x10\n"
+ "inch x19\n"
+ "whilelt p2.h, x19, x10\n"
+ "inch x19\n"
+ "whilelt p1.h, x19, x10\n"
+ "inch x19\n"
+ "whilelt p0.h, x19, x10\n"
+ "cbz x11, 3f\n"
+ "ld1h { z8.h }, p4/Z, [x11]\n"
+ "ld1h { z9.h }, p4/Z, [x11, #1, MUL VL]\n"
+ "ld1h { z10.h }, p4/Z, [x11, #2, MUL VL]\n"
+ "ld1h { z11.h }, p4/Z, [x11, #3, MUL VL]\n"
+ "addvl x11, x11, #4\n"
"b 5f\n"
"3:" // Height 1: no bias
"tbz %x[flags], #0, 4f\n"
- "ld1h { z8.h }, p3/Z, [x9]\n"
- "ld1h { z9.h }, p2/Z, [x9, #1, MUL VL]\n"
- "ld1h { z10.h }, p1/Z, [x9, #2, MUL VL]\n"
- "ld1h { z11.h }, p0/Z, [x9, #3, MUL VL]\n"
+ "ld1h { z8.h }, p3/Z, [x28]\n"
+ "ld1h { z9.h }, p2/Z, [x28, #1, MUL VL]\n"
+ "ld1h { z10.h }, p1/Z, [x28, #2, MUL VL]\n"
+ "ld1h { z11.h }, p0/Z, [x28, #3, MUL VL]\n"
"b 5f\n"
"4:" // Height 1: no accumulate
"mov z8.b, #0x0\n"
@@ -135,58 +135,58 @@ void sve_hybrid_fp16_mla_6x4VL_a64fx (
"mov z10.b, #0x0\n"
"mov z11.b, #0x0\n"
"5:" // Height 1: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"6:" // Height 1: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 7f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "cbnz x28, 8f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #1\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "cbnz x27, 8f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
"b 8f\n"
"7:" // Height 1: setup direct input
- "mov x26, %x[input_ptr]\n"
+ "mov x25, %x[input_ptr]\n"
"8:" // Height 1: input setup done
- "subs x27, x27, #0x1\n"
- "ld1rh { z0.h }, p4/Z, [x26]\n"
- "ld1h { z6.h }, p4/Z, [x10]\n"
- "ld1h { z7.h }, p4/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x1\n"
+ "ld1rh { z0.h }, p4/Z, [x25]\n"
+ "ld1h { z6.h }, p4/Z, [x9]\n"
+ "ld1h { z7.h }, p4/Z, [x9, #1, MUL VL]\n"
"ble 10f\n"
"9:" // Height 1: Multiply loop: Main loop
"fmla z8.h, p4/M, z6.h, z0.h\n"
"fmla z9.h, p4/M, z7.h, z0.h\n"
- "ld1h { z6.h }, p4/Z, [x10, #2, MUL VL]\n"
- "ld1h { z7.h }, p4/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
- "add x26, x26, #0x2\n"
+ "ld1h { z6.h }, p4/Z, [x9, #2, MUL VL]\n"
+ "ld1h { z7.h }, p4/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
+ "add x25, x25, #0x2\n"
"fmla z10.h, p4/M, z6.h, z0.h\n"
"fmla z11.h, p4/M, z7.h, z0.h\n"
- "subs x27, x27, #0x1\n"
- "ld1rh { z0.h }, p4/Z, [x26]\n"
- "ld1h { z6.h }, p4/Z, [x10]\n"
- "ld1h { z7.h }, p4/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x1\n"
+ "ld1rh { z0.h }, p4/Z, [x25]\n"
+ "ld1h { z6.h }, p4/Z, [x9]\n"
+ "ld1h { z7.h }, p4/Z, [x9, #1, MUL VL]\n"
"bgt 9b\n"
"10:" // Height 1: Multiply loop: Main loop skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
"fmla z8.h, p4/M, z6.h, z0.h\n"
"fmla z9.h, p4/M, z7.h, z0.h\n"
- "ld1h { z6.h }, p4/Z, [x10, #2, MUL VL]\n"
- "ld1h { z7.h }, p4/Z, [x10, #3, MUL VL]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ld1h { z6.h }, p4/Z, [x9, #2, MUL VL]\n"
+ "ld1h { z7.h }, p4/Z, [x9, #3, MUL VL]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"fmla z10.h, p4/M, z6.h, z0.h\n"
"fmla z11.h, p4/M, z7.h, z0.h\n"
- "addvl x10, x10, #4\n"
+ "addvl x9, x9, #4\n"
"bne 6b\n"
"tbz %x[flags], #1, 11f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rh { z1.h }, p4/Z, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rh { z0.h }, p4/Z, [x20]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rh { z1.h }, p4/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rh { z0.h }, p4/Z, [x19]\n"
"fmin z8.h, p4/M, z8.h, z1.h\n"
"fmin z9.h, p4/M, z9.h, z1.h\n"
"fmin z10.h, p4/M, z10.h, z1.h\n"
@@ -196,53 +196,53 @@ void sve_hybrid_fp16_mla_6x4VL_a64fx (
"fmax z10.h, p4/M, z10.h, z0.h\n"
"fmax z11.h, p4/M, z11.h, z0.h\n"
"11:" // Height 1: No activation
- "st1h { z8.h }, p3, [x9]\n"
- "st1h { z9.h }, p2, [x9, #1, MUL VL]\n"
- "st1h { z10.h }, p1, [x9, #2, MUL VL]\n"
- "st1h { z11.h }, p0, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
+ "st1h { z8.h }, p3, [x28]\n"
+ "st1h { z9.h }, p2, [x28, #1, MUL VL]\n"
+ "st1h { z10.h }, p1, [x28, #2, MUL VL]\n"
+ "st1h { z11.h }, p0, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
"12:" // Height 1: Writeback done
- "dech x11, ALL, MUL #4\n"
- "cmp x11, XZR\n"
+ "dech x10, ALL, MUL #4\n"
+ "cmp x10, XZR\n"
"bgt 2b\n"
"b 74f\n"
"13:" // Height 2
- "mov x12, %x[bias]\n"
- "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "mov x11, %x[bias]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x28, %x[output_ptr]\n"
"14:" // Height 2: Column loop
- "mov x20, #0x0\n"
- "whilelt p3.h, x20, x11\n"
- "inch x20\n"
- "whilelt p2.h, x20, x11\n"
- "inch x20\n"
- "whilelt p1.h, x20, x11\n"
- "inch x20\n"
- "whilelt p0.h, x20, x11\n"
- "cbz x12, 15f\n"
- "ld1h { z8.h }, p4/Z, [x12]\n"
- "ld1h { z9.h }, p4/Z, [x12, #1, MUL VL]\n"
+ "mov x19, #0x0\n"
+ "whilelt p3.h, x19, x10\n"
+ "inch x19\n"
+ "whilelt p2.h, x19, x10\n"
+ "inch x19\n"
+ "whilelt p1.h, x19, x10\n"
+ "inch x19\n"
+ "whilelt p0.h, x19, x10\n"
+ "cbz x11, 15f\n"
+ "ld1h { z8.h }, p4/Z, [x11]\n"
+ "ld1h { z9.h }, p4/Z, [x11, #1, MUL VL]\n"
+ "ld1h { z10.h }, p4/Z, [x11, #2, MUL VL]\n"
"mov z12.d, z8.d\n"
"mov z13.d, z9.d\n"
- "ld1h { z10.h }, p4/Z, [x12, #2, MUL VL]\n"
- "ld1h { z11.h }, p4/Z, [x12, #3, MUL VL]\n"
+ "ld1h { z11.h }, p4/Z, [x11, #3, MUL VL]\n"
"mov z14.d, z10.d\n"
"mov z15.d, z11.d\n"
- "addvl x12, x12, #4\n"
+ "addvl x11, x11, #4\n"
"b 17f\n"
"15:" // Height 2: no bias
"tbz %x[flags], #0, 16f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #1\n"
- "ld1h { z8.h }, p3/Z, [x9]\n"
- "ld1h { z9.h }, p2/Z, [x9, #1, MUL VL]\n"
- "ld1h { z10.h }, p1/Z, [x9, #2, MUL VL]\n"
- "ld1h { z11.h }, p0/Z, [x9, #3, MUL VL]\n"
- "ld1h { z12.h }, p3/Z, [x25]\n"
- "ld1h { z13.h }, p2/Z, [x25, #1, MUL VL]\n"
- "ld1h { z14.h }, p1/Z, [x25, #2, MUL VL]\n"
- "ld1h { z15.h }, p0/Z, [x25, #3, MUL VL]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x28, x19, LSL #1\n"
+ "ld1h { z8.h }, p3/Z, [x28]\n"
+ "ld1h { z9.h }, p2/Z, [x28, #1, MUL VL]\n"
+ "ld1h { z10.h }, p1/Z, [x28, #2, MUL VL]\n"
+ "ld1h { z11.h }, p0/Z, [x28, #3, MUL VL]\n"
+ "ld1h { z12.h }, p3/Z, [x24]\n"
+ "ld1h { z13.h }, p2/Z, [x24, #1, MUL VL]\n"
+ "ld1h { z14.h }, p1/Z, [x24, #2, MUL VL]\n"
+ "ld1h { z15.h }, p0/Z, [x24, #3, MUL VL]\n"
"b 17f\n"
"16:" // Height 2: no accumulate
"mov z8.b, #0x0\n"
@@ -254,74 +254,74 @@ void sve_hybrid_fp16_mla_6x4VL_a64fx (
"mov z14.b, #0x0\n"
"mov z15.b, #0x0\n"
"17:" // Height 2: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"18:" // Height 2: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 19f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "cbnz x28, 20f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #1\n"
- "add x25, x25, x20, LSL #1\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "cbnz x27, 20f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
+ "add x24, x24, x19, LSL #1\n"
"b 20f\n"
"19:" // Height 2: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #1\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #1\n"
"20:" // Height 2: input setup done
- "subs x27, x27, #0x1\n"
- "ld1rh { z0.h }, p4/Z, [x26]\n"
- "ld1rh { z1.h }, p4/Z, [x25]\n"
- "ld1h { z6.h }, p4/Z, [x10]\n"
- "ld1h { z7.h }, p4/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x1\n"
+ "ld1rh { z0.h }, p4/Z, [x25]\n"
+ "ld1rh { z1.h }, p4/Z, [x24]\n"
+ "ld1h { z6.h }, p4/Z, [x9]\n"
+ "ld1h { z7.h }, p4/Z, [x9, #1, MUL VL]\n"
"ble 22f\n"
"21:" // Height 2: Multiply loop: Main loop
"fmla z8.h, p4/M, z6.h, z0.h\n"
"fmla z12.h, p4/M, z6.h, z1.h\n"
- "ld1h { z6.h }, p4/Z, [x10, #2, MUL VL]\n"
- "add x26, x26, #0x2\n"
+ "ld1h { z6.h }, p4/Z, [x9, #2, MUL VL]\n"
+ "add x25, x25, #0x2\n"
"fmla z9.h, p4/M, z7.h, z0.h\n"
"fmla z13.h, p4/M, z7.h, z1.h\n"
- "ld1h { z7.h }, p4/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
- "subs x27, x27, #0x1\n"
- "add x25, x25, #0x2\n"
+ "ld1h { z7.h }, p4/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
+ "subs x26, x26, #0x1\n"
+ "add x24, x24, #0x2\n"
"fmla z10.h, p4/M, z6.h, z0.h\n"
"fmla z14.h, p4/M, z6.h, z1.h\n"
"fmla z11.h, p4/M, z7.h, z0.h\n"
"fmla z15.h, p4/M, z7.h, z1.h\n"
- "ld1rh { z0.h }, p4/Z, [x26]\n"
- "ld1rh { z1.h }, p4/Z, [x25]\n"
- "ld1h { z6.h }, p4/Z, [x10]\n"
- "ld1h { z7.h }, p4/Z, [x10, #1, MUL VL]\n"
+ "ld1rh { z0.h }, p4/Z, [x25]\n"
+ "ld1rh { z1.h }, p4/Z, [x24]\n"
+ "ld1h { z6.h }, p4/Z, [x9]\n"
+ "ld1h { z7.h }, p4/Z, [x9, #1, MUL VL]\n"
"bgt 21b\n"
"22:" // Height 2: Multiply loop: Main loop skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
"fmla z8.h, p4/M, z6.h, z0.h\n"
"fmla z12.h, p4/M, z6.h, z1.h\n"
- "ld1h { z6.h }, p4/Z, [x10, #2, MUL VL]\n"
+ "ld1h { z6.h }, p4/Z, [x9, #2, MUL VL]\n"
"fmla z9.h, p4/M, z7.h, z0.h\n"
"fmla z13.h, p4/M, z7.h, z1.h\n"
- "ld1h { z7.h }, p4/Z, [x10, #3, MUL VL]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ld1h { z7.h }, p4/Z, [x9, #3, MUL VL]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"fmla z10.h, p4/M, z6.h, z0.h\n"
"fmla z14.h, p4/M, z6.h, z1.h\n"
- "addvl x10, x10, #4\n"
+ "addvl x9, x9, #4\n"
"fmla z11.h, p4/M, z7.h, z0.h\n"
"fmla z15.h, p4/M, z7.h, z1.h\n"
"bne 18b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #1\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x28, x19, LSL #1\n"
"tbz %x[flags], #1, 23f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rh { z1.h }, p4/Z, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rh { z0.h }, p4/Z, [x20]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rh { z1.h }, p4/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rh { z0.h }, p4/Z, [x19]\n"
"fmin z8.h, p4/M, z8.h, z1.h\n"
"fmin z9.h, p4/M, z9.h, z1.h\n"
"fmin z10.h, p4/M, z10.h, z1.h\n"
@@ -339,66 +339,66 @@ void sve_hybrid_fp16_mla_6x4VL_a64fx (
"fmax z14.h, p4/M, z14.h, z0.h\n"
"fmax z15.h, p4/M, z15.h, z0.h\n"
"23:" // Height 2: No activation
- "st1h { z8.h }, p3, [x9]\n"
- "st1h { z9.h }, p2, [x9, #1, MUL VL]\n"
- "st1h { z10.h }, p1, [x9, #2, MUL VL]\n"
- "st1h { z11.h }, p0, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
- "st1h { z12.h }, p3, [x25]\n"
- "st1h { z13.h }, p2, [x25, #1, MUL VL]\n"
- "st1h { z14.h }, p1, [x25, #2, MUL VL]\n"
- "st1h { z15.h }, p0, [x25, #3, MUL VL]\n"
+ "st1h { z8.h }, p3, [x28]\n"
+ "st1h { z9.h }, p2, [x28, #1, MUL VL]\n"
+ "st1h { z10.h }, p1, [x28, #2, MUL VL]\n"
+ "st1h { z11.h }, p0, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "st1h { z12.h }, p3, [x24]\n"
+ "st1h { z13.h }, p2, [x24, #1, MUL VL]\n"
+ "st1h { z14.h }, p1, [x24, #2, MUL VL]\n"
+ "st1h { z15.h }, p0, [x24, #3, MUL VL]\n"
"24:" // Height 2: Writeback done
- "dech x11, ALL, MUL #4\n"
- "cmp x11, XZR\n"
+ "dech x10, ALL, MUL #4\n"
+ "cmp x10, XZR\n"
"bgt 14b\n"
"b 74f\n"
"25:" // Height 3
- "mov x12, %x[bias]\n"
- "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "mov x11, %x[bias]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x28, %x[output_ptr]\n"
"26:" // Height 3: Column loop
- "mov x20, #0x0\n"
- "whilelt p3.h, x20, x11\n"
- "inch x20\n"
- "whilelt p2.h, x20, x11\n"
- "inch x20\n"
- "whilelt p1.h, x20, x11\n"
- "inch x20\n"
- "whilelt p0.h, x20, x11\n"
- "cbz x12, 27f\n"
- "ld1h { z8.h }, p4/Z, [x12]\n"
- "ld1h { z9.h }, p4/Z, [x12, #1, MUL VL]\n"
+ "mov x19, #0x0\n"
+ "whilelt p3.h, x19, x10\n"
+ "inch x19\n"
+ "whilelt p2.h, x19, x10\n"
+ "inch x19\n"
+ "whilelt p1.h, x19, x10\n"
+ "inch x19\n"
+ "whilelt p0.h, x19, x10\n"
+ "cbz x11, 27f\n"
+ "ld1h { z8.h }, p4/Z, [x11]\n"
+ "ld1h { z9.h }, p4/Z, [x11, #1, MUL VL]\n"
+ "ld1h { z10.h }, p4/Z, [x11, #2, MUL VL]\n"
"mov z12.d, z8.d\n"
"mov z13.d, z9.d\n"
- "ld1h { z10.h }, p4/Z, [x12, #2, MUL VL]\n"
- "ld1h { z11.h }, p4/Z, [x12, #3, MUL VL]\n"
+ "ld1h { z11.h }, p4/Z, [x11, #3, MUL VL]\n"
"mov z14.d, z10.d\n"
"mov z15.d, z11.d\n"
+ "addvl x11, x11, #4\n"
"mov z16.d, z8.d\n"
"mov z17.d, z9.d\n"
- "addvl x12, x12, #4\n"
"mov z18.d, z10.d\n"
"mov z19.d, z11.d\n"
"b 29f\n"
"27:" // Height 3: no bias
"tbz %x[flags], #0, 28f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
- "ld1h { z8.h }, p3/Z, [x9]\n"
- "ld1h { z9.h }, p2/Z, [x9, #1, MUL VL]\n"
- "ld1h { z10.h }, p1/Z, [x9, #2, MUL VL]\n"
- "ld1h { z11.h }, p0/Z, [x9, #3, MUL VL]\n"
- "ld1h { z12.h }, p3/Z, [x25]\n"
- "ld1h { z13.h }, p2/Z, [x25, #1, MUL VL]\n"
- "ld1h { z14.h }, p1/Z, [x25, #2, MUL VL]\n"
- "ld1h { z15.h }, p0/Z, [x25, #3, MUL VL]\n"
- "ld1h { z16.h }, p3/Z, [x24]\n"
- "ld1h { z17.h }, p2/Z, [x24, #1, MUL VL]\n"
- "ld1h { z18.h }, p1/Z, [x24, #2, MUL VL]\n"
- "ld1h { z19.h }, p0/Z, [x24, #3, MUL VL]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x28, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "ld1h { z8.h }, p3/Z, [x28]\n"
+ "ld1h { z9.h }, p2/Z, [x28, #1, MUL VL]\n"
+ "ld1h { z10.h }, p1/Z, [x28, #2, MUL VL]\n"
+ "ld1h { z11.h }, p0/Z, [x28, #3, MUL VL]\n"
+ "ld1h { z12.h }, p3/Z, [x24]\n"
+ "ld1h { z13.h }, p2/Z, [x24, #1, MUL VL]\n"
+ "ld1h { z14.h }, p1/Z, [x24, #2, MUL VL]\n"
+ "ld1h { z15.h }, p0/Z, [x24, #3, MUL VL]\n"
+ "ld1h { z16.h }, p3/Z, [x23]\n"
+ "ld1h { z17.h }, p2/Z, [x23, #1, MUL VL]\n"
+ "ld1h { z18.h }, p1/Z, [x23, #2, MUL VL]\n"
+ "ld1h { z19.h }, p0/Z, [x23, #3, MUL VL]\n"
"b 29f\n"
"28:" // Height 3: no accumulate
"mov z8.b, #0x0\n"
@@ -414,74 +414,74 @@ void sve_hybrid_fp16_mla_6x4VL_a64fx (
"mov z18.b, #0x0\n"
"mov z19.b, #0x0\n"
"29:" // Height 3: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"30:" // Height 3: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 31f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "cbnz x28, 32f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #1\n"
- "add x25, x25, x20, LSL #1\n"
- "add x24, x24, x20, LSL #1\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "cbnz x27, 32f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
+ "add x24, x24, x19, LSL #1\n"
+ "add x23, x23, x19, LSL #1\n"
"b 32f\n"
"31:" // Height 3: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
"32:" // Height 3: input setup done
- "subs x27, x27, #0x1\n"
- "ld1rh { z0.h }, p4/Z, [x26]\n"
- "ld1rh { z1.h }, p4/Z, [x25]\n"
- "ld1rh { z2.h }, p4/Z, [x24]\n"
- "ld1h { z6.h }, p4/Z, [x10]\n"
- "ld1h { z7.h }, p4/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x1\n"
+ "ld1rh { z0.h }, p4/Z, [x25]\n"
+ "ld1rh { z1.h }, p4/Z, [x24]\n"
+ "ld1rh { z2.h }, p4/Z, [x23]\n"
+ "ld1h { z6.h }, p4/Z, [x9]\n"
+ "ld1h { z7.h }, p4/Z, [x9, #1, MUL VL]\n"
"ble 34f\n"
"33:" // Height 3: Multiply loop: Main loop
"fmla z8.h, p4/M, z6.h, z0.h\n"
"fmla z12.h, p4/M, z6.h, z1.h\n"
- "add x26, x26, #0x2\n"
- "subs x27, x27, #0x1\n"
+ "add x25, x25, #0x2\n"
+ "subs x26, x26, #0x1\n"
"fmla z16.h, p4/M, z6.h, z2.h\n"
"fmla z9.h, p4/M, z7.h, z0.h\n"
- "ld1h { z6.h }, p4/Z, [x10, #2, MUL VL]\n"
- "add x25, x25, #0x2\n"
+ "ld1h { z6.h }, p4/Z, [x9, #2, MUL VL]\n"
+ "add x24, x24, #0x2\n"
"fmla z13.h, p4/M, z7.h, z1.h\n"
"fmla z17.h, p4/M, z7.h, z2.h\n"
- "ld1h { z7.h }, p4/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
- "add x24, x24, #0x2\n"
+ "ld1h { z7.h }, p4/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
+ "add x23, x23, #0x2\n"
"fmla z10.h, p4/M, z6.h, z0.h\n"
"fmla z14.h, p4/M, z6.h, z1.h\n"
"fmla z18.h, p4/M, z6.h, z2.h\n"
"fmla z11.h, p4/M, z7.h, z0.h\n"
- "ld1rh { z0.h }, p4/Z, [x26]\n"
- "ld1h { z6.h }, p4/Z, [x10]\n"
+ "ld1rh { z0.h }, p4/Z, [x25]\n"
+ "ld1h { z6.h }, p4/Z, [x9]\n"
"fmla z15.h, p4/M, z7.h, z1.h\n"
"fmla z19.h, p4/M, z7.h, z2.h\n"
- "ld1rh { z1.h }, p4/Z, [x25]\n"
- "ld1rh { z2.h }, p4/Z, [x24]\n"
- "ld1h { z7.h }, p4/Z, [x10, #1, MUL VL]\n"
+ "ld1rh { z1.h }, p4/Z, [x24]\n"
+ "ld1rh { z2.h }, p4/Z, [x23]\n"
+ "ld1h { z7.h }, p4/Z, [x9, #1, MUL VL]\n"
"bgt 33b\n"
"34:" // Height 3: Multiply loop: Main loop skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
"fmla z8.h, p4/M, z6.h, z0.h\n"
"fmla z12.h, p4/M, z6.h, z1.h\n"
- "add x28, x28, #0x1\n"
+ "add x27, x27, #0x1\n"
"fmla z16.h, p4/M, z6.h, z2.h\n"
"fmla z9.h, p4/M, z7.h, z0.h\n"
- "ld1h { z6.h }, p4/Z, [x10, #2, MUL VL]\n"
- "cmp x28, x20\n"
+ "ld1h { z6.h }, p4/Z, [x9, #2, MUL VL]\n"
+ "cmp x27, x19\n"
"fmla z13.h, p4/M, z7.h, z1.h\n"
"fmla z17.h, p4/M, z7.h, z2.h\n"
- "ld1h { z7.h }, p4/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
+ "ld1h { z7.h }, p4/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"fmla z10.h, p4/M, z6.h, z0.h\n"
"fmla z14.h, p4/M, z6.h, z1.h\n"
"fmla z18.h, p4/M, z6.h, z2.h\n"
@@ -489,14 +489,14 @@ void sve_hybrid_fp16_mla_6x4VL_a64fx (
"fmla z15.h, p4/M, z7.h, z1.h\n"
"fmla z19.h, p4/M, z7.h, z2.h\n"
"bne 30b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x28, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
"tbz %x[flags], #1, 35f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rh { z1.h }, p4/Z, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rh { z0.h }, p4/Z, [x20]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rh { z1.h }, p4/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rh { z0.h }, p4/Z, [x19]\n"
"fmin z8.h, p4/M, z8.h, z1.h\n"
"fmin z9.h, p4/M, z9.h, z1.h\n"
"fmin z10.h, p4/M, z10.h, z1.h\n"
@@ -522,50 +522,50 @@ void sve_hybrid_fp16_mla_6x4VL_a64fx (
"fmax z18.h, p4/M, z18.h, z0.h\n"
"fmax z19.h, p4/M, z19.h, z0.h\n"
"35:" // Height 3: No activation
- "st1h { z8.h }, p3, [x9]\n"
- "st1h { z9.h }, p2, [x9, #1, MUL VL]\n"
- "st1h { z10.h }, p1, [x9, #2, MUL VL]\n"
- "st1h { z11.h }, p0, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
- "st1h { z12.h }, p3, [x25]\n"
- "st1h { z13.h }, p2, [x25, #1, MUL VL]\n"
- "st1h { z14.h }, p1, [x25, #2, MUL VL]\n"
- "st1h { z15.h }, p0, [x25, #3, MUL VL]\n"
- "st1h { z16.h }, p3, [x24]\n"
- "st1h { z17.h }, p2, [x24, #1, MUL VL]\n"
- "st1h { z18.h }, p1, [x24, #2, MUL VL]\n"
- "st1h { z19.h }, p0, [x24, #3, MUL VL]\n"
+ "st1h { z8.h }, p3, [x28]\n"
+ "st1h { z9.h }, p2, [x28, #1, MUL VL]\n"
+ "st1h { z10.h }, p1, [x28, #2, MUL VL]\n"
+ "st1h { z11.h }, p0, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "st1h { z12.h }, p3, [x24]\n"
+ "st1h { z13.h }, p2, [x24, #1, MUL VL]\n"
+ "st1h { z14.h }, p1, [x24, #2, MUL VL]\n"
+ "st1h { z15.h }, p0, [x24, #3, MUL VL]\n"
+ "st1h { z16.h }, p3, [x23]\n"
+ "st1h { z17.h }, p2, [x23, #1, MUL VL]\n"
+ "st1h { z18.h }, p1, [x23, #2, MUL VL]\n"
+ "st1h { z19.h }, p0, [x23, #3, MUL VL]\n"
"36:" // Height 3: Writeback done
- "dech x11, ALL, MUL #4\n"
- "cmp x11, XZR\n"
+ "dech x10, ALL, MUL #4\n"
+ "cmp x10, XZR\n"
"bgt 26b\n"
"b 74f\n"
"37:" // Height 4
- "mov x12, %x[bias]\n"
- "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "mov x11, %x[bias]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x28, %x[output_ptr]\n"
"38:" // Height 4: Column loop
- "mov x20, #0x0\n"
- "whilelt p3.h, x20, x11\n"
- "inch x20\n"
- "whilelt p2.h, x20, x11\n"
- "inch x20\n"
- "whilelt p1.h, x20, x11\n"
- "inch x20\n"
- "whilelt p0.h, x20, x11\n"
- "cbz x12, 39f\n"
- "ld1h { z8.h }, p4/Z, [x12]\n"
- "ld1h { z9.h }, p4/Z, [x12, #1, MUL VL]\n"
+ "mov x19, #0x0\n"
+ "whilelt p3.h, x19, x10\n"
+ "inch x19\n"
+ "whilelt p2.h, x19, x10\n"
+ "inch x19\n"
+ "whilelt p1.h, x19, x10\n"
+ "inch x19\n"
+ "whilelt p0.h, x19, x10\n"
+ "cbz x11, 39f\n"
+ "ld1h { z8.h }, p4/Z, [x11]\n"
+ "ld1h { z9.h }, p4/Z, [x11, #1, MUL VL]\n"
+ "ld1h { z10.h }, p4/Z, [x11, #2, MUL VL]\n"
"mov z12.d, z8.d\n"
"mov z13.d, z9.d\n"
- "ld1h { z10.h }, p4/Z, [x12, #2, MUL VL]\n"
- "ld1h { z11.h }, p4/Z, [x12, #3, MUL VL]\n"
+ "ld1h { z11.h }, p4/Z, [x11, #3, MUL VL]\n"
"mov z14.d, z10.d\n"
"mov z15.d, z11.d\n"
+ "addvl x11, x11, #4\n"
"mov z16.d, z8.d\n"
"mov z17.d, z9.d\n"
- "addvl x12, x12, #4\n"
"mov z18.d, z10.d\n"
"mov z19.d, z11.d\n"
"mov z20.d, z8.d\n"
@@ -575,26 +575,26 @@ void sve_hybrid_fp16_mla_6x4VL_a64fx (
"b 41f\n"
"39:" // Height 4: no bias
"tbz %x[flags], #0, 40f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
- "ld1h { z8.h }, p3/Z, [x9]\n"
- "add x23, x24, x20, LSL #1\n"
- "ld1h { z9.h }, p2/Z, [x9, #1, MUL VL]\n"
- "ld1h { z10.h }, p1/Z, [x9, #2, MUL VL]\n"
- "ld1h { z11.h }, p0/Z, [x9, #3, MUL VL]\n"
- "ld1h { z12.h }, p3/Z, [x25]\n"
- "ld1h { z13.h }, p2/Z, [x25, #1, MUL VL]\n"
- "ld1h { z14.h }, p1/Z, [x25, #2, MUL VL]\n"
- "ld1h { z15.h }, p0/Z, [x25, #3, MUL VL]\n"
- "ld1h { z16.h }, p3/Z, [x24]\n"
- "ld1h { z17.h }, p2/Z, [x24, #1, MUL VL]\n"
- "ld1h { z18.h }, p1/Z, [x24, #2, MUL VL]\n"
- "ld1h { z19.h }, p0/Z, [x24, #3, MUL VL]\n"
- "ld1h { z20.h }, p3/Z, [x23]\n"
- "ld1h { z21.h }, p2/Z, [x23, #1, MUL VL]\n"
- "ld1h { z22.h }, p1/Z, [x23, #2, MUL VL]\n"
- "ld1h { z23.h }, p0/Z, [x23, #3, MUL VL]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x28, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "add x22, x23, x19, LSL #1\n"
+ "ld1h { z8.h }, p3/Z, [x28]\n"
+ "ld1h { z9.h }, p2/Z, [x28, #1, MUL VL]\n"
+ "ld1h { z10.h }, p1/Z, [x28, #2, MUL VL]\n"
+ "ld1h { z11.h }, p0/Z, [x28, #3, MUL VL]\n"
+ "ld1h { z12.h }, p3/Z, [x24]\n"
+ "ld1h { z13.h }, p2/Z, [x24, #1, MUL VL]\n"
+ "ld1h { z14.h }, p1/Z, [x24, #2, MUL VL]\n"
+ "ld1h { z15.h }, p0/Z, [x24, #3, MUL VL]\n"
+ "ld1h { z16.h }, p3/Z, [x23]\n"
+ "ld1h { z17.h }, p2/Z, [x23, #1, MUL VL]\n"
+ "ld1h { z18.h }, p1/Z, [x23, #2, MUL VL]\n"
+ "ld1h { z19.h }, p0/Z, [x23, #3, MUL VL]\n"
+ "ld1h { z20.h }, p3/Z, [x22]\n"
+ "ld1h { z21.h }, p2/Z, [x22, #1, MUL VL]\n"
+ "ld1h { z22.h }, p1/Z, [x22, #2, MUL VL]\n"
+ "ld1h { z23.h }, p0/Z, [x22, #3, MUL VL]\n"
"b 41f\n"
"40:" // Height 4: no accumulate
"mov z8.b, #0x0\n"
@@ -614,86 +614,86 @@ void sve_hybrid_fp16_mla_6x4VL_a64fx (
"mov z22.b, #0x0\n"
"mov z23.b, #0x0\n"
"41:" // Height 4: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"42:" // Height 4: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 43f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "cbnz x28, 44f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #1\n"
- "add x25, x25, x20, LSL #1\n"
- "add x24, x24, x20, LSL #1\n"
- "add x23, x23, x20, LSL #1\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "cbnz x27, 44f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
+ "add x24, x24, x19, LSL #1\n"
+ "add x23, x23, x19, LSL #1\n"
+ "add x22, x22, x19, LSL #1\n"
"b 44f\n"
"43:" // Height 4: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
- "add x23, x24, x20, LSL #1\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "add x22, x23, x19, LSL #1\n"
"44:" // Height 4: input setup done
- "subs x27, x27, #0x1\n"
- "ld1rh { z0.h }, p4/Z, [x26]\n"
- "ld1rh { z1.h }, p4/Z, [x25]\n"
- "ld1rh { z2.h }, p4/Z, [x24]\n"
- "ld1rh { z3.h }, p4/Z, [x23]\n"
- "ld1h { z6.h }, p4/Z, [x10]\n"
- "ld1h { z7.h }, p4/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x1\n"
+ "ld1rh { z0.h }, p4/Z, [x25]\n"
+ "ld1rh { z1.h }, p4/Z, [x24]\n"
+ "ld1rh { z2.h }, p4/Z, [x23]\n"
+ "ld1rh { z3.h }, p4/Z, [x22]\n"
+ "ld1h { z6.h }, p4/Z, [x9]\n"
+ "ld1h { z7.h }, p4/Z, [x9, #1, MUL VL]\n"
"ble 46f\n"
"45:" // Height 4: Multiply loop: Main loop
"fmla z8.h, p4/M, z6.h, z0.h\n"
"fmla z12.h, p4/M, z6.h, z1.h\n"
- "add x26, x26, #0x2\n"
- "subs x27, x27, #0x1\n"
+ "add x25, x25, #0x2\n"
+ "subs x26, x26, #0x1\n"
"fmla z16.h, p4/M, z6.h, z2.h\n"
"fmla z20.h, p4/M, z6.h, z3.h\n"
- "ld1h { z6.h }, p4/Z, [x10, #2, MUL VL]\n"
- "add x25, x25, #0x2\n"
+ "ld1h { z6.h }, p4/Z, [x9, #2, MUL VL]\n"
+ "add x24, x24, #0x2\n"
"fmla z9.h, p4/M, z7.h, z0.h\n"
"fmla z13.h, p4/M, z7.h, z1.h\n"
- "add x24, x24, #0x2\n"
"add x23, x23, #0x2\n"
+ "add x22, x22, #0x2\n"
"fmla z17.h, p4/M, z7.h, z2.h\n"
"fmla z21.h, p4/M, z7.h, z3.h\n"
- "ld1h { z7.h }, p4/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
+ "ld1h { z7.h }, p4/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"fmla z10.h, p4/M, z6.h, z0.h\n"
"fmla z14.h, p4/M, z6.h, z1.h\n"
"fmla z18.h, p4/M, z6.h, z2.h\n"
"fmla z22.h, p4/M, z6.h, z3.h\n"
- "ld1h { z6.h }, p4/Z, [x10]\n"
+ "ld1h { z6.h }, p4/Z, [x9]\n"
"fmla z11.h, p4/M, z7.h, z0.h\n"
"fmla z15.h, p4/M, z7.h, z1.h\n"
- "ld1rh { z0.h }, p4/Z, [x26]\n"
- "ld1rh { z1.h }, p4/Z, [x25]\n"
+ "ld1rh { z0.h }, p4/Z, [x25]\n"
+ "ld1rh { z1.h }, p4/Z, [x24]\n"
"fmla z19.h, p4/M, z7.h, z2.h\n"
"fmla z23.h, p4/M, z7.h, z3.h\n"
- "ld1rh { z2.h }, p4/Z, [x24]\n"
- "ld1rh { z3.h }, p4/Z, [x23]\n"
- "ld1h { z7.h }, p4/Z, [x10, #1, MUL VL]\n"
+ "ld1rh { z2.h }, p4/Z, [x23]\n"
+ "ld1rh { z3.h }, p4/Z, [x22]\n"
+ "ld1h { z7.h }, p4/Z, [x9, #1, MUL VL]\n"
"bgt 45b\n"
"46:" // Height 4: Multiply loop: Main loop skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
"fmla z8.h, p4/M, z6.h, z0.h\n"
"fmla z12.h, p4/M, z6.h, z1.h\n"
- "add x28, x28, #0x1\n"
+ "add x27, x27, #0x1\n"
"fmla z16.h, p4/M, z6.h, z2.h\n"
"fmla z20.h, p4/M, z6.h, z3.h\n"
- "ld1h { z6.h }, p4/Z, [x10, #2, MUL VL]\n"
- "cmp x28, x20\n"
+ "ld1h { z6.h }, p4/Z, [x9, #2, MUL VL]\n"
+ "cmp x27, x19\n"
"fmla z9.h, p4/M, z7.h, z0.h\n"
"fmla z13.h, p4/M, z7.h, z1.h\n"
"fmla z17.h, p4/M, z7.h, z2.h\n"
"fmla z21.h, p4/M, z7.h, z3.h\n"
- "ld1h { z7.h }, p4/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
+ "ld1h { z7.h }, p4/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"fmla z10.h, p4/M, z6.h, z0.h\n"
"fmla z14.h, p4/M, z6.h, z1.h\n"
"fmla z18.h, p4/M, z6.h, z2.h\n"
@@ -703,15 +703,15 @@ void sve_hybrid_fp16_mla_6x4VL_a64fx (
"fmla z19.h, p4/M, z7.h, z2.h\n"
"fmla z23.h, p4/M, z7.h, z3.h\n"
"bne 42b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
- "add x23, x24, x20, LSL #1\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x28, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "add x22, x23, x19, LSL #1\n"
"tbz %x[flags], #1, 47f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rh { z1.h }, p4/Z, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rh { z0.h }, p4/Z, [x20]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rh { z1.h }, p4/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rh { z0.h }, p4/Z, [x19]\n"
"fmin z8.h, p4/M, z8.h, z1.h\n"
"fmin z9.h, p4/M, z9.h, z1.h\n"
"fmin z10.h, p4/M, z10.h, z1.h\n"
@@ -745,54 +745,54 @@ void sve_hybrid_fp16_mla_6x4VL_a64fx (
"fmax z22.h, p4/M, z22.h, z0.h\n"
"fmax z23.h, p4/M, z23.h, z0.h\n"
"47:" // Height 4: No activation
- "st1h { z8.h }, p3, [x9]\n"
- "st1h { z9.h }, p2, [x9, #1, MUL VL]\n"
- "st1h { z10.h }, p1, [x9, #2, MUL VL]\n"
- "st1h { z11.h }, p0, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
- "st1h { z12.h }, p3, [x25]\n"
- "st1h { z13.h }, p2, [x25, #1, MUL VL]\n"
- "st1h { z14.h }, p1, [x25, #2, MUL VL]\n"
- "st1h { z15.h }, p0, [x25, #3, MUL VL]\n"
- "st1h { z16.h }, p3, [x24]\n"
- "st1h { z17.h }, p2, [x24, #1, MUL VL]\n"
- "st1h { z18.h }, p1, [x24, #2, MUL VL]\n"
- "st1h { z19.h }, p0, [x24, #3, MUL VL]\n"
- "st1h { z20.h }, p3, [x23]\n"
- "st1h { z21.h }, p2, [x23, #1, MUL VL]\n"
- "st1h { z22.h }, p1, [x23, #2, MUL VL]\n"
- "st1h { z23.h }, p0, [x23, #3, MUL VL]\n"
+ "st1h { z8.h }, p3, [x28]\n"
+ "st1h { z9.h }, p2, [x28, #1, MUL VL]\n"
+ "st1h { z10.h }, p1, [x28, #2, MUL VL]\n"
+ "st1h { z11.h }, p0, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "st1h { z12.h }, p3, [x24]\n"
+ "st1h { z13.h }, p2, [x24, #1, MUL VL]\n"
+ "st1h { z14.h }, p1, [x24, #2, MUL VL]\n"
+ "st1h { z15.h }, p0, [x24, #3, MUL VL]\n"
+ "st1h { z16.h }, p3, [x23]\n"
+ "st1h { z17.h }, p2, [x23, #1, MUL VL]\n"
+ "st1h { z18.h }, p1, [x23, #2, MUL VL]\n"
+ "st1h { z19.h }, p0, [x23, #3, MUL VL]\n"
+ "st1h { z20.h }, p3, [x22]\n"
+ "st1h { z21.h }, p2, [x22, #1, MUL VL]\n"
+ "st1h { z22.h }, p1, [x22, #2, MUL VL]\n"
+ "st1h { z23.h }, p0, [x22, #3, MUL VL]\n"
"48:" // Height 4: Writeback done
- "dech x11, ALL, MUL #4\n"
- "cmp x11, XZR\n"
+ "dech x10, ALL, MUL #4\n"
+ "cmp x10, XZR\n"
"bgt 38b\n"
"b 74f\n"
"49:" // Height 5
- "mov x12, %x[bias]\n"
- "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "mov x11, %x[bias]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x28, %x[output_ptr]\n"
"50:" // Height 5: Column loop
- "mov x20, #0x0\n"
- "whilelt p3.h, x20, x11\n"
- "inch x20\n"
- "whilelt p2.h, x20, x11\n"
- "inch x20\n"
- "whilelt p1.h, x20, x11\n"
- "inch x20\n"
- "whilelt p0.h, x20, x11\n"
- "cbz x12, 51f\n"
- "ld1h { z8.h }, p4/Z, [x12]\n"
- "ld1h { z9.h }, p4/Z, [x12, #1, MUL VL]\n"
+ "mov x19, #0x0\n"
+ "whilelt p3.h, x19, x10\n"
+ "inch x19\n"
+ "whilelt p2.h, x19, x10\n"
+ "inch x19\n"
+ "whilelt p1.h, x19, x10\n"
+ "inch x19\n"
+ "whilelt p0.h, x19, x10\n"
+ "cbz x11, 51f\n"
+ "ld1h { z8.h }, p4/Z, [x11]\n"
+ "ld1h { z9.h }, p4/Z, [x11, #1, MUL VL]\n"
+ "ld1h { z10.h }, p4/Z, [x11, #2, MUL VL]\n"
"mov z12.d, z8.d\n"
"mov z13.d, z9.d\n"
- "ld1h { z10.h }, p4/Z, [x12, #2, MUL VL]\n"
- "ld1h { z11.h }, p4/Z, [x12, #3, MUL VL]\n"
+ "ld1h { z11.h }, p4/Z, [x11, #3, MUL VL]\n"
"mov z14.d, z10.d\n"
"mov z15.d, z11.d\n"
+ "addvl x11, x11, #4\n"
"mov z16.d, z8.d\n"
"mov z17.d, z9.d\n"
- "addvl x12, x12, #4\n"
"mov z18.d, z10.d\n"
"mov z19.d, z11.d\n"
"mov z20.d, z8.d\n"
@@ -806,31 +806,31 @@ void sve_hybrid_fp16_mla_6x4VL_a64fx (
"b 53f\n"
"51:" // Height 5: no bias
"tbz %x[flags], #0, 52f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
- "ld1h { z8.h }, p3/Z, [x9]\n"
- "add x23, x24, x20, LSL #1\n"
- "add x22, x23, x20, LSL #1\n"
- "ld1h { z9.h }, p2/Z, [x9, #1, MUL VL]\n"
- "ld1h { z10.h }, p1/Z, [x9, #2, MUL VL]\n"
- "ld1h { z11.h }, p0/Z, [x9, #3, MUL VL]\n"
- "ld1h { z12.h }, p3/Z, [x25]\n"
- "ld1h { z13.h }, p2/Z, [x25, #1, MUL VL]\n"
- "ld1h { z14.h }, p1/Z, [x25, #2, MUL VL]\n"
- "ld1h { z15.h }, p0/Z, [x25, #3, MUL VL]\n"
- "ld1h { z16.h }, p3/Z, [x24]\n"
- "ld1h { z17.h }, p2/Z, [x24, #1, MUL VL]\n"
- "ld1h { z18.h }, p1/Z, [x24, #2, MUL VL]\n"
- "ld1h { z19.h }, p0/Z, [x24, #3, MUL VL]\n"
- "ld1h { z20.h }, p3/Z, [x23]\n"
- "ld1h { z21.h }, p2/Z, [x23, #1, MUL VL]\n"
- "ld1h { z22.h }, p1/Z, [x23, #2, MUL VL]\n"
- "ld1h { z23.h }, p0/Z, [x23, #3, MUL VL]\n"
- "ld1h { z24.h }, p3/Z, [x22]\n"
- "ld1h { z25.h }, p2/Z, [x22, #1, MUL VL]\n"
- "ld1h { z26.h }, p1/Z, [x22, #2, MUL VL]\n"
- "ld1h { z27.h }, p0/Z, [x22, #3, MUL VL]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x28, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "add x22, x23, x19, LSL #1\n"
+ "add x21, x22, x19, LSL #1\n"
+ "ld1h { z8.h }, p3/Z, [x28]\n"
+ "ld1h { z9.h }, p2/Z, [x28, #1, MUL VL]\n"
+ "ld1h { z10.h }, p1/Z, [x28, #2, MUL VL]\n"
+ "ld1h { z11.h }, p0/Z, [x28, #3, MUL VL]\n"
+ "ld1h { z12.h }, p3/Z, [x24]\n"
+ "ld1h { z13.h }, p2/Z, [x24, #1, MUL VL]\n"
+ "ld1h { z14.h }, p1/Z, [x24, #2, MUL VL]\n"
+ "ld1h { z15.h }, p0/Z, [x24, #3, MUL VL]\n"
+ "ld1h { z16.h }, p3/Z, [x23]\n"
+ "ld1h { z17.h }, p2/Z, [x23, #1, MUL VL]\n"
+ "ld1h { z18.h }, p1/Z, [x23, #2, MUL VL]\n"
+ "ld1h { z19.h }, p0/Z, [x23, #3, MUL VL]\n"
+ "ld1h { z20.h }, p3/Z, [x22]\n"
+ "ld1h { z21.h }, p2/Z, [x22, #1, MUL VL]\n"
+ "ld1h { z22.h }, p1/Z, [x22, #2, MUL VL]\n"
+ "ld1h { z23.h }, p0/Z, [x22, #3, MUL VL]\n"
+ "ld1h { z24.h }, p3/Z, [x21]\n"
+ "ld1h { z25.h }, p2/Z, [x21, #1, MUL VL]\n"
+ "ld1h { z26.h }, p1/Z, [x21, #2, MUL VL]\n"
+ "ld1h { z27.h }, p0/Z, [x21, #3, MUL VL]\n"
"b 53f\n"
"52:" // Height 5: no accumulate
"mov z8.b, #0x0\n"
@@ -854,98 +854,98 @@ void sve_hybrid_fp16_mla_6x4VL_a64fx (
"mov z26.b, #0x0\n"
"mov z27.b, #0x0\n"
"53:" // Height 5: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"54:" // Height 5: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 55f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "ldr x22, [x21, #0x20]\n"
- "cbnz x28, 56f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #1\n"
- "add x25, x25, x20, LSL #1\n"
- "add x24, x24, x20, LSL #1\n"
- "add x23, x23, x20, LSL #1\n"
- "add x22, x22, x20, LSL #1\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "ldr x21, [x20, #0x20]\n"
+ "cbnz x27, 56f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
+ "add x24, x24, x19, LSL #1\n"
+ "add x23, x23, x19, LSL #1\n"
+ "add x22, x22, x19, LSL #1\n"
+ "add x21, x21, x19, LSL #1\n"
"b 56f\n"
"55:" // Height 5: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
- "add x23, x24, x20, LSL #1\n"
- "add x22, x23, x20, LSL #1\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "add x22, x23, x19, LSL #1\n"
+ "add x21, x22, x19, LSL #1\n"
"56:" // Height 5: input setup done
- "subs x27, x27, #0x1\n"
- "ld1rh { z0.h }, p4/Z, [x26]\n"
- "ld1rh { z1.h }, p4/Z, [x25]\n"
- "ld1rh { z2.h }, p4/Z, [x24]\n"
- "ld1rh { z3.h }, p4/Z, [x23]\n"
- "ld1rh { z4.h }, p4/Z, [x22]\n"
- "ld1h { z6.h }, p4/Z, [x10]\n"
- "ld1h { z7.h }, p4/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x1\n"
+ "ld1rh { z0.h }, p4/Z, [x25]\n"
+ "ld1rh { z1.h }, p4/Z, [x24]\n"
+ "ld1rh { z2.h }, p4/Z, [x23]\n"
+ "ld1rh { z3.h }, p4/Z, [x22]\n"
+ "ld1rh { z4.h }, p4/Z, [x21]\n"
+ "ld1h { z6.h }, p4/Z, [x9]\n"
+ "ld1h { z7.h }, p4/Z, [x9, #1, MUL VL]\n"
"ble 58f\n"
"57:" // Height 5: Multiply loop: Main loop
"fmla z8.h, p4/M, z6.h, z0.h\n"
"fmla z12.h, p4/M, z6.h, z1.h\n"
- "add x26, x26, #0x2\n"
- "subs x27, x27, #0x1\n"
+ "add x25, x25, #0x2\n"
+ "subs x26, x26, #0x1\n"
"fmla z16.h, p4/M, z6.h, z2.h\n"
"fmla z20.h, p4/M, z6.h, z3.h\n"
- "add x25, x25, #0x2\n"
"add x24, x24, #0x2\n"
+ "add x23, x23, #0x2\n"
"fmla z24.h, p4/M, z6.h, z4.h\n"
"fmla z9.h, p4/M, z7.h, z0.h\n"
- "ld1h { z6.h }, p4/Z, [x10, #2, MUL VL]\n"
- "add x23, x23, #0x2\n"
+ "ld1h { z6.h }, p4/Z, [x9, #2, MUL VL]\n"
+ "add x22, x22, #0x2\n"
"fmla z13.h, p4/M, z7.h, z1.h\n"
"fmla z17.h, p4/M, z7.h, z2.h\n"
- "add x22, x22, #0x2\n"
+ "add x21, x21, #0x2\n"
"fmla z21.h, p4/M, z7.h, z3.h\n"
"fmla z25.h, p4/M, z7.h, z4.h\n"
- "ld1h { z7.h }, p4/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
+ "ld1h { z7.h }, p4/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"fmla z10.h, p4/M, z6.h, z0.h\n"
"fmla z14.h, p4/M, z6.h, z1.h\n"
"fmla z18.h, p4/M, z6.h, z2.h\n"
"fmla z22.h, p4/M, z6.h, z3.h\n"
"fmla z26.h, p4/M, z6.h, z4.h\n"
"fmla z11.h, p4/M, z7.h, z0.h\n"
- "ld1rh { z0.h }, p4/Z, [x26]\n"
- "ld1h { z6.h }, p4/Z, [x10]\n"
+ "ld1rh { z0.h }, p4/Z, [x25]\n"
+ "ld1h { z6.h }, p4/Z, [x9]\n"
"fmla z15.h, p4/M, z7.h, z1.h\n"
"fmla z19.h, p4/M, z7.h, z2.h\n"
- "ld1rh { z1.h }, p4/Z, [x25]\n"
- "ld1rh { z2.h }, p4/Z, [x24]\n"
+ "ld1rh { z1.h }, p4/Z, [x24]\n"
+ "ld1rh { z2.h }, p4/Z, [x23]\n"
"fmla z23.h, p4/M, z7.h, z3.h\n"
"fmla z27.h, p4/M, z7.h, z4.h\n"
- "ld1rh { z3.h }, p4/Z, [x23]\n"
- "ld1rh { z4.h }, p4/Z, [x22]\n"
- "ld1h { z7.h }, p4/Z, [x10, #1, MUL VL]\n"
+ "ld1rh { z3.h }, p4/Z, [x22]\n"
+ "ld1rh { z4.h }, p4/Z, [x21]\n"
+ "ld1h { z7.h }, p4/Z, [x9, #1, MUL VL]\n"
"bgt 57b\n"
"58:" // Height 5: Multiply loop: Main loop skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
"fmla z8.h, p4/M, z6.h, z0.h\n"
"fmla z12.h, p4/M, z6.h, z1.h\n"
- "add x28, x28, #0x1\n"
+ "add x27, x27, #0x1\n"
"fmla z16.h, p4/M, z6.h, z2.h\n"
"fmla z20.h, p4/M, z6.h, z3.h\n"
- "cmp x28, x20\n"
+ "cmp x27, x19\n"
"fmla z24.h, p4/M, z6.h, z4.h\n"
"fmla z9.h, p4/M, z7.h, z0.h\n"
- "ld1h { z6.h }, p4/Z, [x10, #2, MUL VL]\n"
+ "ld1h { z6.h }, p4/Z, [x9, #2, MUL VL]\n"
"fmla z13.h, p4/M, z7.h, z1.h\n"
"fmla z17.h, p4/M, z7.h, z2.h\n"
"fmla z21.h, p4/M, z7.h, z3.h\n"
"fmla z25.h, p4/M, z7.h, z4.h\n"
- "ld1h { z7.h }, p4/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
+ "ld1h { z7.h }, p4/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"fmla z10.h, p4/M, z6.h, z0.h\n"
"fmla z14.h, p4/M, z6.h, z1.h\n"
"fmla z18.h, p4/M, z6.h, z2.h\n"
@@ -957,16 +957,16 @@ void sve_hybrid_fp16_mla_6x4VL_a64fx (
"fmla z23.h, p4/M, z7.h, z3.h\n"
"fmla z27.h, p4/M, z7.h, z4.h\n"
"bne 54b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
- "add x23, x24, x20, LSL #1\n"
- "add x22, x23, x20, LSL #1\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x28, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "add x22, x23, x19, LSL #1\n"
+ "add x21, x22, x19, LSL #1\n"
"tbz %x[flags], #1, 59f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rh { z1.h }, p4/Z, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rh { z0.h }, p4/Z, [x20]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rh { z1.h }, p4/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rh { z0.h }, p4/Z, [x19]\n"
"fmin z8.h, p4/M, z8.h, z1.h\n"
"fmin z9.h, p4/M, z9.h, z1.h\n"
"fmin z10.h, p4/M, z10.h, z1.h\n"
@@ -1008,61 +1008,61 @@ void sve_hybrid_fp16_mla_6x4VL_a64fx (
"fmax z26.h, p4/M, z26.h, z0.h\n"
"fmax z27.h, p4/M, z27.h, z0.h\n"
"59:" // Height 5: No activation
- "st1h { z8.h }, p3, [x9]\n"
- "st1h { z9.h }, p2, [x9, #1, MUL VL]\n"
- "st1h { z10.h }, p1, [x9, #2, MUL VL]\n"
- "st1h { z11.h }, p0, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
- "st1h { z12.h }, p3, [x25]\n"
- "st1h { z13.h }, p2, [x25, #1, MUL VL]\n"
- "st1h { z14.h }, p1, [x25, #2, MUL VL]\n"
- "st1h { z15.h }, p0, [x25, #3, MUL VL]\n"
- "st1h { z16.h }, p3, [x24]\n"
- "st1h { z17.h }, p2, [x24, #1, MUL VL]\n"
- "st1h { z18.h }, p1, [x24, #2, MUL VL]\n"
- "st1h { z19.h }, p0, [x24, #3, MUL VL]\n"
- "st1h { z20.h }, p3, [x23]\n"
- "st1h { z21.h }, p2, [x23, #1, MUL VL]\n"
- "st1h { z22.h }, p1, [x23, #2, MUL VL]\n"
- "st1h { z23.h }, p0, [x23, #3, MUL VL]\n"
- "st1h { z24.h }, p3, [x22]\n"
- "st1h { z25.h }, p2, [x22, #1, MUL VL]\n"
- "st1h { z26.h }, p1, [x22, #2, MUL VL]\n"
- "st1h { z27.h }, p0, [x22, #3, MUL VL]\n"
+ "st1h { z8.h }, p3, [x28]\n"
+ "st1h { z9.h }, p2, [x28, #1, MUL VL]\n"
+ "st1h { z10.h }, p1, [x28, #2, MUL VL]\n"
+ "st1h { z11.h }, p0, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "st1h { z12.h }, p3, [x24]\n"
+ "st1h { z13.h }, p2, [x24, #1, MUL VL]\n"
+ "st1h { z14.h }, p1, [x24, #2, MUL VL]\n"
+ "st1h { z15.h }, p0, [x24, #3, MUL VL]\n"
+ "st1h { z16.h }, p3, [x23]\n"
+ "st1h { z17.h }, p2, [x23, #1, MUL VL]\n"
+ "st1h { z18.h }, p1, [x23, #2, MUL VL]\n"
+ "st1h { z19.h }, p0, [x23, #3, MUL VL]\n"
+ "st1h { z20.h }, p3, [x22]\n"
+ "st1h { z21.h }, p2, [x22, #1, MUL VL]\n"
+ "st1h { z22.h }, p1, [x22, #2, MUL VL]\n"
+ "st1h { z23.h }, p0, [x22, #3, MUL VL]\n"
+ "st1h { z24.h }, p3, [x21]\n"
+ "st1h { z25.h }, p2, [x21, #1, MUL VL]\n"
+ "st1h { z26.h }, p1, [x21, #2, MUL VL]\n"
+ "st1h { z27.h }, p0, [x21, #3, MUL VL]\n"
"60:" // Height 5: Writeback done
- "dech x11, ALL, MUL #4\n"
- "cmp x11, XZR\n"
+ "dech x10, ALL, MUL #4\n"
+ "cmp x10, XZR\n"
"bgt 50b\n"
"b 74f\n"
"61:" // Height 6
- "ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "mov x20, #0xc\n"
- "mov x12, %x[bias]\n"
- "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "mov x19, #0xc\n"
+ "mov x11, %x[bias]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x28, %x[output_ptr]\n"
+ "madd %x[output_ptr], x20, x19, %x[output_ptr]\n"
"62:" // Height 6: Column loop
- "mov x20, #0x0\n"
- "whilelt p3.h, x20, x11\n"
- "inch x20\n"
- "whilelt p2.h, x20, x11\n"
- "inch x20\n"
- "whilelt p1.h, x20, x11\n"
- "inch x20\n"
- "whilelt p0.h, x20, x11\n"
- "cbz x12, 63f\n"
- "ld1h { z8.h }, p4/Z, [x12]\n"
- "ld1h { z9.h }, p4/Z, [x12, #1, MUL VL]\n"
+ "mov x19, #0x0\n"
+ "whilelt p3.h, x19, x10\n"
+ "inch x19\n"
+ "whilelt p2.h, x19, x10\n"
+ "inch x19\n"
+ "whilelt p1.h, x19, x10\n"
+ "inch x19\n"
+ "whilelt p0.h, x19, x10\n"
+ "cbz x11, 63f\n"
+ "ld1h { z8.h }, p4/Z, [x11]\n"
+ "ld1h { z9.h }, p4/Z, [x11, #1, MUL VL]\n"
+ "ld1h { z10.h }, p4/Z, [x11, #2, MUL VL]\n"
"mov z12.d, z8.d\n"
"mov z13.d, z9.d\n"
- "ld1h { z10.h }, p4/Z, [x12, #2, MUL VL]\n"
- "ld1h { z11.h }, p4/Z, [x12, #3, MUL VL]\n"
+ "ld1h { z11.h }, p4/Z, [x11, #3, MUL VL]\n"
"mov z14.d, z10.d\n"
"mov z15.d, z11.d\n"
+ "addvl x11, x11, #4\n"
"mov z16.d, z8.d\n"
"mov z17.d, z9.d\n"
- "addvl x12, x12, #4\n"
"mov z18.d, z10.d\n"
"mov z19.d, z11.d\n"
"mov z20.d, z8.d\n"
@@ -1080,36 +1080,36 @@ void sve_hybrid_fp16_mla_6x4VL_a64fx (
"b 65f\n"
"63:" // Height 6: no bias
"tbz %x[flags], #0, 64f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
- "ld1h { z8.h }, p3/Z, [x9]\n"
- "add x23, x24, x20, LSL #1\n"
- "add x22, x23, x20, LSL #1\n"
- "ld1h { z9.h }, p2/Z, [x9, #1, MUL VL]\n"
- "ld1h { z10.h }, p1/Z, [x9, #2, MUL VL]\n"
- "add x21, x22, x20, LSL #1\n"
- "ld1h { z11.h }, p0/Z, [x9, #3, MUL VL]\n"
- "ld1h { z12.h }, p3/Z, [x25]\n"
- "ld1h { z13.h }, p2/Z, [x25, #1, MUL VL]\n"
- "ld1h { z14.h }, p1/Z, [x25, #2, MUL VL]\n"
- "ld1h { z15.h }, p0/Z, [x25, #3, MUL VL]\n"
- "ld1h { z16.h }, p3/Z, [x24]\n"
- "ld1h { z17.h }, p2/Z, [x24, #1, MUL VL]\n"
- "ld1h { z18.h }, p1/Z, [x24, #2, MUL VL]\n"
- "ld1h { z19.h }, p0/Z, [x24, #3, MUL VL]\n"
- "ld1h { z20.h }, p3/Z, [x23]\n"
- "ld1h { z21.h }, p2/Z, [x23, #1, MUL VL]\n"
- "ld1h { z22.h }, p1/Z, [x23, #2, MUL VL]\n"
- "ld1h { z23.h }, p0/Z, [x23, #3, MUL VL]\n"
- "ld1h { z24.h }, p3/Z, [x22]\n"
- "ld1h { z25.h }, p2/Z, [x22, #1, MUL VL]\n"
- "ld1h { z26.h }, p1/Z, [x22, #2, MUL VL]\n"
- "ld1h { z27.h }, p0/Z, [x22, #3, MUL VL]\n"
- "ld1h { z28.h }, p3/Z, [x21]\n"
- "ld1h { z29.h }, p2/Z, [x21, #1, MUL VL]\n"
- "ld1h { z30.h }, p1/Z, [x21, #2, MUL VL]\n"
- "ld1h { z31.h }, p0/Z, [x21, #3, MUL VL]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x28, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "add x22, x23, x19, LSL #1\n"
+ "add x21, x22, x19, LSL #1\n"
+ "ld1h { z8.h }, p3/Z, [x28]\n"
+ "ld1h { z9.h }, p2/Z, [x28, #1, MUL VL]\n"
+ "add x20, x21, x19, LSL #1\n"
+ "ld1h { z10.h }, p1/Z, [x28, #2, MUL VL]\n"
+ "ld1h { z11.h }, p0/Z, [x28, #3, MUL VL]\n"
+ "ld1h { z12.h }, p3/Z, [x24]\n"
+ "ld1h { z13.h }, p2/Z, [x24, #1, MUL VL]\n"
+ "ld1h { z14.h }, p1/Z, [x24, #2, MUL VL]\n"
+ "ld1h { z15.h }, p0/Z, [x24, #3, MUL VL]\n"
+ "ld1h { z16.h }, p3/Z, [x23]\n"
+ "ld1h { z17.h }, p2/Z, [x23, #1, MUL VL]\n"
+ "ld1h { z18.h }, p1/Z, [x23, #2, MUL VL]\n"
+ "ld1h { z19.h }, p0/Z, [x23, #3, MUL VL]\n"
+ "ld1h { z20.h }, p3/Z, [x22]\n"
+ "ld1h { z21.h }, p2/Z, [x22, #1, MUL VL]\n"
+ "ld1h { z22.h }, p1/Z, [x22, #2, MUL VL]\n"
+ "ld1h { z23.h }, p0/Z, [x22, #3, MUL VL]\n"
+ "ld1h { z24.h }, p3/Z, [x21]\n"
+ "ld1h { z25.h }, p2/Z, [x21, #1, MUL VL]\n"
+ "ld1h { z26.h }, p1/Z, [x21, #2, MUL VL]\n"
+ "ld1h { z27.h }, p0/Z, [x21, #3, MUL VL]\n"
+ "ld1h { z28.h }, p3/Z, [x20]\n"
+ "ld1h { z29.h }, p2/Z, [x20, #1, MUL VL]\n"
+ "ld1h { z30.h }, p1/Z, [x20, #2, MUL VL]\n"
+ "ld1h { z31.h }, p0/Z, [x20, #3, MUL VL]\n"
"b 65f\n"
"64:" // Height 6: no accumulate
"mov z8.b, #0x0\n"
@@ -1137,110 +1137,110 @@ void sve_hybrid_fp16_mla_6x4VL_a64fx (
"mov z30.b, #0x0\n"
"mov z31.b, #0x0\n"
"65:" // Height 6: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"66:" // Height 6: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 67f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "ldr x22, [x21, #0x20]\n"
- "ldr x21, [x21, #0x28]\n"
- "cbnz x28, 68f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #1\n"
- "add x25, x25, x20, LSL #1\n"
- "add x24, x24, x20, LSL #1\n"
- "add x23, x23, x20, LSL #1\n"
- "add x22, x22, x20, LSL #1\n"
- "add x21, x21, x20, LSL #1\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "ldr x21, [x20, #0x20]\n"
+ "ldr x20, [x20, #0x28]\n"
+ "cbnz x27, 68f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
+ "add x24, x24, x19, LSL #1\n"
+ "add x23, x23, x19, LSL #1\n"
+ "add x22, x22, x19, LSL #1\n"
+ "add x21, x21, x19, LSL #1\n"
+ "add x20, x20, x19, LSL #1\n"
"b 68f\n"
"67:" // Height 6: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
- "add x23, x24, x20, LSL #1\n"
- "add x22, x23, x20, LSL #1\n"
- "add x21, x22, x20, LSL #1\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "add x22, x23, x19, LSL #1\n"
+ "add x21, x22, x19, LSL #1\n"
+ "add x20, x21, x19, LSL #1\n"
"68:" // Height 6: input setup done
- "subs x27, x27, #0x1\n"
- "ld1rh { z0.h }, p4/Z, [x26]\n"
- "ld1rh { z1.h }, p4/Z, [x25]\n"
- "ld1rh { z2.h }, p4/Z, [x24]\n"
- "ld1rh { z3.h }, p4/Z, [x23]\n"
- "ld1rh { z4.h }, p4/Z, [x22]\n"
- "ld1rh { z5.h }, p4/Z, [x21]\n"
- "ld1h { z6.h }, p4/Z, [x10]\n"
- "ld1h { z7.h }, p4/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x1\n"
+ "ld1rh { z0.h }, p4/Z, [x25]\n"
+ "ld1rh { z1.h }, p4/Z, [x24]\n"
+ "ld1rh { z2.h }, p4/Z, [x23]\n"
+ "ld1rh { z3.h }, p4/Z, [x22]\n"
+ "ld1rh { z4.h }, p4/Z, [x21]\n"
+ "ld1rh { z5.h }, p4/Z, [x20]\n"
+ "ld1h { z6.h }, p4/Z, [x9]\n"
+ "ld1h { z7.h }, p4/Z, [x9, #1, MUL VL]\n"
"ble 70f\n"
"69:" // Height 6: Multiply loop: Main loop
"fmla z8.h, p4/M, z6.h, z0.h\n"
"fmla z12.h, p4/M, z6.h, z1.h\n"
- "add x26, x26, #0x2\n"
- "subs x27, x27, #0x1\n"
+ "add x25, x25, #0x2\n"
+ "subs x26, x26, #0x1\n"
"fmla z16.h, p4/M, z6.h, z2.h\n"
"fmla z20.h, p4/M, z6.h, z3.h\n"
- "add x25, x25, #0x2\n"
"add x24, x24, #0x2\n"
+ "add x23, x23, #0x2\n"
"fmla z24.h, p4/M, z6.h, z4.h\n"
"fmla z28.h, p4/M, z6.h, z5.h\n"
- "ld1h { z6.h }, p4/Z, [x10, #2, MUL VL]\n"
- "add x23, x23, #0x2\n"
+ "ld1h { z6.h }, p4/Z, [x9, #2, MUL VL]\n"
+ "add x22, x22, #0x2\n"
"fmla z9.h, p4/M, z7.h, z0.h\n"
"fmla z13.h, p4/M, z7.h, z1.h\n"
- "add x22, x22, #0x2\n"
"add x21, x21, #0x2\n"
+ "add x20, x20, #0x2\n"
"fmla z17.h, p4/M, z7.h, z2.h\n"
"fmla z21.h, p4/M, z7.h, z3.h\n"
"fmla z25.h, p4/M, z7.h, z4.h\n"
"fmla z29.h, p4/M, z7.h, z5.h\n"
- "ld1h { z7.h }, p4/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
+ "ld1h { z7.h }, p4/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"fmla z10.h, p4/M, z6.h, z0.h\n"
"fmla z14.h, p4/M, z6.h, z1.h\n"
"fmla z18.h, p4/M, z6.h, z2.h\n"
"fmla z22.h, p4/M, z6.h, z3.h\n"
"fmla z26.h, p4/M, z6.h, z4.h\n"
"fmla z30.h, p4/M, z6.h, z5.h\n"
- "ld1h { z6.h }, p4/Z, [x10]\n"
+ "ld1h { z6.h }, p4/Z, [x9]\n"
"fmla z11.h, p4/M, z7.h, z0.h\n"
"fmla z15.h, p4/M, z7.h, z1.h\n"
- "ld1rh { z0.h }, p4/Z, [x26]\n"
- "ld1rh { z1.h }, p4/Z, [x25]\n"
+ "ld1rh { z0.h }, p4/Z, [x25]\n"
+ "ld1rh { z1.h }, p4/Z, [x24]\n"
"fmla z19.h, p4/M, z7.h, z2.h\n"
"fmla z23.h, p4/M, z7.h, z3.h\n"
- "ld1rh { z2.h }, p4/Z, [x24]\n"
- "ld1rh { z3.h }, p4/Z, [x23]\n"
+ "ld1rh { z2.h }, p4/Z, [x23]\n"
+ "ld1rh { z3.h }, p4/Z, [x22]\n"
"fmla z27.h, p4/M, z7.h, z4.h\n"
"fmla z31.h, p4/M, z7.h, z5.h\n"
- "ld1rh { z4.h }, p4/Z, [x22]\n"
- "ld1rh { z5.h }, p4/Z, [x21]\n"
- "ld1h { z7.h }, p4/Z, [x10, #1, MUL VL]\n"
+ "ld1rh { z4.h }, p4/Z, [x21]\n"
+ "ld1rh { z5.h }, p4/Z, [x20]\n"
+ "ld1h { z7.h }, p4/Z, [x9, #1, MUL VL]\n"
"bgt 69b\n"
"70:" // Height 6: Multiply loop: Main loop skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
"fmla z8.h, p4/M, z6.h, z0.h\n"
"fmla z12.h, p4/M, z6.h, z1.h\n"
- "add x28, x28, #0x1\n"
+ "add x27, x27, #0x1\n"
"fmla z16.h, p4/M, z6.h, z2.h\n"
"fmla z20.h, p4/M, z6.h, z3.h\n"
- "cmp x28, x20\n"
+ "cmp x27, x19\n"
"fmla z24.h, p4/M, z6.h, z4.h\n"
"fmla z28.h, p4/M, z6.h, z5.h\n"
- "ld1h { z6.h }, p4/Z, [x10, #2, MUL VL]\n"
+ "ld1h { z6.h }, p4/Z, [x9, #2, MUL VL]\n"
"fmla z9.h, p4/M, z7.h, z0.h\n"
"fmla z13.h, p4/M, z7.h, z1.h\n"
"fmla z17.h, p4/M, z7.h, z2.h\n"
"fmla z21.h, p4/M, z7.h, z3.h\n"
"fmla z25.h, p4/M, z7.h, z4.h\n"
"fmla z29.h, p4/M, z7.h, z5.h\n"
- "ld1h { z7.h }, p4/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
+ "ld1h { z7.h }, p4/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"fmla z10.h, p4/M, z6.h, z0.h\n"
"fmla z14.h, p4/M, z6.h, z1.h\n"
"fmla z18.h, p4/M, z6.h, z2.h\n"
@@ -1254,17 +1254,17 @@ void sve_hybrid_fp16_mla_6x4VL_a64fx (
"fmla z27.h, p4/M, z7.h, z4.h\n"
"fmla z31.h, p4/M, z7.h, z5.h\n"
"bne 66b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
- "add x23, x24, x20, LSL #1\n"
- "add x22, x23, x20, LSL #1\n"
- "add x21, x22, x20, LSL #1\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x28, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "add x22, x23, x19, LSL #1\n"
+ "add x21, x22, x19, LSL #1\n"
+ "add x20, x21, x19, LSL #1\n"
"tbz %x[flags], #1, 71f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rh { z1.h }, p4/Z, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rh { z0.h }, p4/Z, [x20]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rh { z1.h }, p4/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rh { z0.h }, p4/Z, [x19]\n"
"fmin z8.h, p4/M, z8.h, z1.h\n"
"fmin z9.h, p4/M, z9.h, z1.h\n"
"fmin z10.h, p4/M, z10.h, z1.h\n"
@@ -1314,51 +1314,51 @@ void sve_hybrid_fp16_mla_6x4VL_a64fx (
"fmax z30.h, p4/M, z30.h, z0.h\n"
"fmax z31.h, p4/M, z31.h, z0.h\n"
"71:" // Height 6: No activation
- "st1h { z8.h }, p3, [x9]\n"
- "st1h { z9.h }, p2, [x9, #1, MUL VL]\n"
- "st1h { z10.h }, p1, [x9, #2, MUL VL]\n"
- "st1h { z11.h }, p0, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
- "st1h { z12.h }, p3, [x25]\n"
- "st1h { z13.h }, p2, [x25, #1, MUL VL]\n"
- "st1h { z14.h }, p1, [x25, #2, MUL VL]\n"
- "st1h { z15.h }, p0, [x25, #3, MUL VL]\n"
- "st1h { z16.h }, p3, [x24]\n"
- "st1h { z17.h }, p2, [x24, #1, MUL VL]\n"
- "st1h { z18.h }, p1, [x24, #2, MUL VL]\n"
- "st1h { z19.h }, p0, [x24, #3, MUL VL]\n"
- "st1h { z20.h }, p3, [x23]\n"
- "st1h { z21.h }, p2, [x23, #1, MUL VL]\n"
- "st1h { z22.h }, p1, [x23, #2, MUL VL]\n"
- "st1h { z23.h }, p0, [x23, #3, MUL VL]\n"
- "st1h { z24.h }, p3, [x22]\n"
- "st1h { z25.h }, p2, [x22, #1, MUL VL]\n"
- "st1h { z26.h }, p1, [x22, #2, MUL VL]\n"
- "st1h { z27.h }, p0, [x22, #3, MUL VL]\n"
- "st1h { z28.h }, p3, [x21]\n"
- "st1h { z29.h }, p2, [x21, #1, MUL VL]\n"
- "st1h { z30.h }, p1, [x21, #2, MUL VL]\n"
- "st1h { z31.h }, p0, [x21, #3, MUL VL]\n"
+ "st1h { z8.h }, p3, [x28]\n"
+ "st1h { z9.h }, p2, [x28, #1, MUL VL]\n"
+ "st1h { z10.h }, p1, [x28, #2, MUL VL]\n"
+ "st1h { z11.h }, p0, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "st1h { z12.h }, p3, [x24]\n"
+ "st1h { z13.h }, p2, [x24, #1, MUL VL]\n"
+ "st1h { z14.h }, p1, [x24, #2, MUL VL]\n"
+ "st1h { z15.h }, p0, [x24, #3, MUL VL]\n"
+ "st1h { z16.h }, p3, [x23]\n"
+ "st1h { z17.h }, p2, [x23, #1, MUL VL]\n"
+ "st1h { z18.h }, p1, [x23, #2, MUL VL]\n"
+ "st1h { z19.h }, p0, [x23, #3, MUL VL]\n"
+ "st1h { z20.h }, p3, [x22]\n"
+ "st1h { z21.h }, p2, [x22, #1, MUL VL]\n"
+ "st1h { z22.h }, p1, [x22, #2, MUL VL]\n"
+ "st1h { z23.h }, p0, [x22, #3, MUL VL]\n"
+ "st1h { z24.h }, p3, [x21]\n"
+ "st1h { z25.h }, p2, [x21, #1, MUL VL]\n"
+ "st1h { z26.h }, p1, [x21, #2, MUL VL]\n"
+ "st1h { z27.h }, p0, [x21, #3, MUL VL]\n"
+ "st1h { z28.h }, p3, [x20]\n"
+ "st1h { z29.h }, p2, [x20, #1, MUL VL]\n"
+ "st1h { z30.h }, p1, [x20, #2, MUL VL]\n"
+ "st1h { z31.h }, p0, [x20, #3, MUL VL]\n"
"72:" // Height 6: Writeback done
- "dech x11, ALL, MUL #4\n"
- "cmp x11, XZR\n"
+ "dech x10, ALL, MUL #4\n"
+ "cmp x10, XZR\n"
"bgt 62b\n"
"subs %x[M], %x[M], #0x6\n"
"beq 74f\n"
- "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 73f\n"
- "add x21, x21, #0x6\n"
- "str x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "add x20, x20, #0x6\n"
+ "str x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"b 1b\n"
"73:" // Update direct input
- "mov x20, #0xc\n"
- "madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
+ "mov x19, #0xc\n"
+ "madd %x[input_ptr], x19, x20, %x[input_ptr]\n"
"b 1b\n"
"74:" // Exit
: [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
: [args_ptr] "r" (&ka), [bias] "r" (bias), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x9", "x10", "x11", "x12", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x9", "x10", "x11", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp16_mla_6x4VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp16_mla_6x4VL/generic.cpp
index 6f0b3e0008..09d5d8d96d 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp16_mla_6x4VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp16_mla_6x4VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef ARM_COMPUTE_ENABLE_SVE
@@ -102,32 +102,32 @@ void sve_hybrid_fp16_mla_6x4VL (
"cmp %x[M], #0x2\n"
"bgt 27f\n"
"beq 14f\n"
- "mov x12, %x[bias]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "mov x9, %x[bias]\n"
+ "mov x28, %x[output_ptr]\n"
"2:" // Height 1: Column loop
- "mov x20, #0x0\n"
- "whilelt p4.h, x20, x11\n"
- "inch x20\n"
- "whilelt p3.h, x20, x11\n"
- "inch x20\n"
- "whilelt p2.h, x20, x11\n"
- "inch x20\n"
- "whilelt p1.h, x20, x11\n"
- "cbz x12, 3f\n"
- "ld1h { z8.h }, p5/Z, [x12]\n"
- "ld1h { z9.h }, p5/Z, [x12, #1, MUL VL]\n"
- "ld1h { z10.h }, p5/Z, [x12, #2, MUL VL]\n"
- "ld1h { z11.h }, p5/Z, [x12, #3, MUL VL]\n"
- "addvl x12, x12, #4\n"
+ "mov x19, #0x0\n"
+ "whilelt p4.h, x19, x11\n"
+ "inch x19\n"
+ "whilelt p3.h, x19, x11\n"
+ "inch x19\n"
+ "whilelt p2.h, x19, x11\n"
+ "inch x19\n"
+ "whilelt p1.h, x19, x11\n"
+ "cbz x9, 3f\n"
+ "ld1h { z8.h }, p5/Z, [x9]\n"
+ "ld1h { z9.h }, p5/Z, [x9, #1, MUL VL]\n"
+ "ld1h { z10.h }, p5/Z, [x9, #2, MUL VL]\n"
+ "ld1h { z11.h }, p5/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"b 5f\n"
"3:" // Height 1: no bias
"tbz %x[flags], #0, 4f\n"
- "ld1h { z8.h }, p4/Z, [x9]\n"
- "ld1h { z9.h }, p3/Z, [x9, #1, MUL VL]\n"
- "ld1h { z10.h }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1h { z11.h }, p1/Z, [x9, #3, MUL VL]\n"
+ "ld1h { z8.h }, p4/Z, [x28]\n"
+ "ld1h { z9.h }, p3/Z, [x28, #1, MUL VL]\n"
+ "ld1h { z10.h }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1h { z11.h }, p1/Z, [x28, #3, MUL VL]\n"
"b 5f\n"
"4:" // Height 1: no accumulate
"mov z8.b, #0x0\n"
@@ -135,252 +135,252 @@ void sve_hybrid_fp16_mla_6x4VL (
"mov z10.b, #0x0\n"
"mov z11.b, #0x0\n"
"5:" // Height 1: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"6:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 7f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "cbnz x28, 8f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #1\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "cbnz x27, 8f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
"b 8f\n"
"7:" // Height 1: setup direct input
- "mov x26, %x[input_ptr]\n"
+ "mov x25, %x[input_ptr]\n"
"8:" // Height 1: input setup done
- "cmp x27, #0x8\n"
+ "cmp x26, #0x8\n"
"ble 10f\n"
"9:" // Height 1: Multiply loop: Main loop head
- "whilelt p0.h, XZR, x27\n"
- "ld1rqh { z0.h }, p0/Z, [x26]\n"
"ld1h { z6.h }, p5/Z, [x10]\n"
- "fmla z8.h, z6.h, z0.h[0]\n"
+ "whilelt p0.h, XZR, x26\n"
"ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
- "fmla z9.h, z7.h, z0.h[0]\n"
+ "sub x26, x26, #0x8\n"
+ "ld1rqh { z0.h }, p0/Z, [x25]\n"
+ "fmla z8.h, z6.h, z0.h[0]\n"
"ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
- "fmla z10.h, z6.h, z0.h[0]\n"
+ "cmp x26, #0x8\n"
+ "fmla z9.h, z7.h, z0.h[0]\n"
"ld1h { z7.h }, p5/Z, [x10, #3, MUL VL]\n"
- "fmla z11.h, z7.h, z0.h[0]\n"
+ "add x25, x25, #0x10\n"
+ "fmla z10.h, z6.h, z0.h[0]\n"
"ld1h { z6.h }, p5/Z, [x10, #4, MUL VL]\n"
- "fmla z8.h, z6.h, z0.h[1]\n"
+ "fmla z11.h, z7.h, z0.h[0]\n"
"ld1h { z7.h }, p5/Z, [x10, #5, MUL VL]\n"
- "fmla z9.h, z7.h, z0.h[1]\n"
+ "fmla z8.h, z6.h, z0.h[1]\n"
"ld1h { z6.h }, p5/Z, [x10, #6, MUL VL]\n"
- "fmla z10.h, z6.h, z0.h[1]\n"
+ "fmla z9.h, z7.h, z0.h[1]\n"
"ld1h { z7.h }, p5/Z, [x10, #7, MUL VL]\n"
"addvl x10, x10, #16\n"
- "fmla z11.h, z7.h, z0.h[1]\n"
+ "fmla z10.h, z6.h, z0.h[1]\n"
"ld1h { z6.h }, p5/Z, [x10, #-8, MUL VL]\n"
+ "fmla z11.h, z7.h, z0.h[1]\n"
"ld1h { z7.h }, p5/Z, [x10, #-7, MUL VL]\n"
"fmla z8.h, z6.h, z0.h[2]\n"
- "fmla z9.h, z7.h, z0.h[2]\n"
"ld1h { z6.h }, p5/Z, [x10, #-6, MUL VL]\n"
+ "fmla z9.h, z7.h, z0.h[2]\n"
"ld1h { z7.h }, p5/Z, [x10, #-5, MUL VL]\n"
"fmla z10.h, z6.h, z0.h[2]\n"
- "fmla z11.h, z7.h, z0.h[2]\n"
"ld1h { z6.h }, p5/Z, [x10, #-4, MUL VL]\n"
+ "fmla z11.h, z7.h, z0.h[2]\n"
"ld1h { z7.h }, p5/Z, [x10, #-3, MUL VL]\n"
"fmla z8.h, z6.h, z0.h[3]\n"
- "fmla z9.h, z7.h, z0.h[3]\n"
"ld1h { z6.h }, p5/Z, [x10, #-2, MUL VL]\n"
+ "fmla z9.h, z7.h, z0.h[3]\n"
"ld1h { z7.h }, p5/Z, [x10, #-1, MUL VL]\n"
"fmla z10.h, z6.h, z0.h[3]\n"
- "fmla z11.h, z7.h, z0.h[3]\n"
"ld1h { z6.h }, p5/Z, [x10]\n"
+ "fmla z11.h, z7.h, z0.h[3]\n"
"ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
"fmla z8.h, z6.h, z0.h[4]\n"
- "fmla z9.h, z7.h, z0.h[4]\n"
"ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
+ "fmla z9.h, z7.h, z0.h[4]\n"
"ld1h { z7.h }, p5/Z, [x10, #3, MUL VL]\n"
"fmla z10.h, z6.h, z0.h[4]\n"
- "fmla z11.h, z7.h, z0.h[4]\n"
"ld1h { z6.h }, p5/Z, [x10, #4, MUL VL]\n"
+ "fmla z11.h, z7.h, z0.h[4]\n"
"ld1h { z7.h }, p5/Z, [x10, #5, MUL VL]\n"
"fmla z8.h, z6.h, z0.h[5]\n"
- "fmla z9.h, z7.h, z0.h[5]\n"
"ld1h { z6.h }, p5/Z, [x10, #6, MUL VL]\n"
+ "fmla z9.h, z7.h, z0.h[5]\n"
"ld1h { z7.h }, p5/Z, [x10, #7, MUL VL]\n"
"addvl x10, x10, #16\n"
"fmla z10.h, z6.h, z0.h[5]\n"
- "fmla z11.h, z7.h, z0.h[5]\n"
"ld1h { z6.h }, p5/Z, [x10, #-8, MUL VL]\n"
+ "fmla z11.h, z7.h, z0.h[5]\n"
"ld1h { z7.h }, p5/Z, [x10, #-7, MUL VL]\n"
"fmla z8.h, z6.h, z0.h[6]\n"
- "fmla z9.h, z7.h, z0.h[6]\n"
"ld1h { z6.h }, p5/Z, [x10, #-6, MUL VL]\n"
+ "fmla z9.h, z7.h, z0.h[6]\n"
"ld1h { z7.h }, p5/Z, [x10, #-5, MUL VL]\n"
"fmla z10.h, z6.h, z0.h[6]\n"
- "fmla z11.h, z7.h, z0.h[6]\n"
"ld1h { z6.h }, p5/Z, [x10, #-4, MUL VL]\n"
+ "fmla z11.h, z7.h, z0.h[6]\n"
"ld1h { z7.h }, p5/Z, [x10, #-3, MUL VL]\n"
"fmla z8.h, z6.h, z0.h[7]\n"
- "fmla z9.h, z7.h, z0.h[7]\n"
"ld1h { z6.h }, p5/Z, [x10, #-2, MUL VL]\n"
+ "fmla z9.h, z7.h, z0.h[7]\n"
"ld1h { z7.h }, p5/Z, [x10, #-1, MUL VL]\n"
- "sub x27, x27, #0x8\n"
- "cmp x27, #0x8\n"
"fmla z10.h, z6.h, z0.h[7]\n"
"fmla z11.h, z7.h, z0.h[7]\n"
- "add x26, x26, #0x10\n"
"bgt 9b\n"
"10:" // Height 1: Multiply loop: Single iteration only
- "whilelt p0.h, XZR, x27\n"
- "ld1rqh { z0.h }, p0/Z, [x26]\n"
"ld1h { z6.h }, p5/Z, [x10]\n"
- "fmla z8.h, z6.h, z0.h[0]\n"
+ "whilelt p0.h, XZR, x26\n"
"ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
- "fmla z9.h, z7.h, z0.h[0]\n"
+ "subs x26, x26, #0x1\n"
+ "ld1rqh { z0.h }, p0/Z, [x25]\n"
+ "fmla z8.h, z6.h, z0.h[0]\n"
"ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
- "subs x27, x27, #0x1\n"
+ "fmla z9.h, z7.h, z0.h[0]\n"
"ld1h { z7.h }, p5/Z, [x10, #3, MUL VL]\n"
+ "addvl x10, x10, #4\n"
"fmla z10.h, z6.h, z0.h[0]\n"
"fmla z11.h, z7.h, z0.h[0]\n"
- "addvl x10, x10, #4\n"
"ble 11f\n"
"ld1h { z6.h }, p5/Z, [x10]\n"
- "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
"fmla z8.h, z6.h, z0.h[1]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x1\n"
"fmla z9.h, z7.h, z0.h[1]\n"
"ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
"ld1h { z7.h }, p5/Z, [x10, #3, MUL VL]\n"
- "subs x27, x27, #0x1\n"
"fmla z10.h, z6.h, z0.h[1]\n"
- "fmla z11.h, z7.h, z0.h[1]\n"
"addvl x10, x10, #4\n"
+ "fmla z11.h, z7.h, z0.h[1]\n"
"ble 11f\n"
"ld1h { z6.h }, p5/Z, [x10]\n"
- "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
"fmla z8.h, z6.h, z0.h[2]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x1\n"
"fmla z9.h, z7.h, z0.h[2]\n"
"ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
"ld1h { z7.h }, p5/Z, [x10, #3, MUL VL]\n"
- "subs x27, x27, #0x1\n"
"fmla z10.h, z6.h, z0.h[2]\n"
- "fmla z11.h, z7.h, z0.h[2]\n"
"addvl x10, x10, #4\n"
+ "fmla z11.h, z7.h, z0.h[2]\n"
"ble 11f\n"
"ld1h { z6.h }, p5/Z, [x10]\n"
- "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
"fmla z8.h, z6.h, z0.h[3]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x1\n"
"fmla z9.h, z7.h, z0.h[3]\n"
"ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
"ld1h { z7.h }, p5/Z, [x10, #3, MUL VL]\n"
- "subs x27, x27, #0x1\n"
"fmla z10.h, z6.h, z0.h[3]\n"
- "fmla z11.h, z7.h, z0.h[3]\n"
"addvl x10, x10, #4\n"
+ "fmla z11.h, z7.h, z0.h[3]\n"
"ble 11f\n"
"ld1h { z6.h }, p5/Z, [x10]\n"
- "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
"fmla z8.h, z6.h, z0.h[4]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x1\n"
"fmla z9.h, z7.h, z0.h[4]\n"
"ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
"ld1h { z7.h }, p5/Z, [x10, #3, MUL VL]\n"
- "subs x27, x27, #0x1\n"
"fmla z10.h, z6.h, z0.h[4]\n"
- "fmla z11.h, z7.h, z0.h[4]\n"
"addvl x10, x10, #4\n"
+ "fmla z11.h, z7.h, z0.h[4]\n"
"ble 11f\n"
"ld1h { z6.h }, p5/Z, [x10]\n"
- "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
"fmla z8.h, z6.h, z0.h[5]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x1\n"
"fmla z9.h, z7.h, z0.h[5]\n"
"ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
"ld1h { z7.h }, p5/Z, [x10, #3, MUL VL]\n"
- "subs x27, x27, #0x1\n"
"fmla z10.h, z6.h, z0.h[5]\n"
- "fmla z11.h, z7.h, z0.h[5]\n"
"addvl x10, x10, #4\n"
+ "fmla z11.h, z7.h, z0.h[5]\n"
"ble 11f\n"
"ld1h { z6.h }, p5/Z, [x10]\n"
- "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
"fmla z8.h, z6.h, z0.h[6]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x1\n"
"fmla z9.h, z7.h, z0.h[6]\n"
"ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
"ld1h { z7.h }, p5/Z, [x10, #3, MUL VL]\n"
- "subs x27, x27, #0x1\n"
"fmla z10.h, z6.h, z0.h[6]\n"
- "fmla z11.h, z7.h, z0.h[6]\n"
"addvl x10, x10, #4\n"
+ "fmla z11.h, z7.h, z0.h[6]\n"
"ble 11f\n"
"ld1h { z6.h }, p5/Z, [x10]\n"
- "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
"fmla z8.h, z6.h, z0.h[7]\n"
- "fmla z9.h, z7.h, z0.h[7]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
"ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
+ "fmla z9.h, z7.h, z0.h[7]\n"
"ld1h { z7.h }, p5/Z, [x10, #3, MUL VL]\n"
+ "addvl x10, x10, #4\n"
"fmla z10.h, z6.h, z0.h[7]\n"
"fmla z11.h, z7.h, z0.h[7]\n"
- "addvl x10, x10, #4\n"
"11:" // Height 1: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 6b\n"
"tbz %x[flags], #1, 12f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rh { z1.h }, p5/Z, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rh { z0.h }, p5/Z, [x20]\n"
- "fmin z8.h, p5/M, z8.h, z1.h\n"
- "fmin z9.h, p5/M, z9.h, z1.h\n"
- "fmin z10.h, p5/M, z10.h, z1.h\n"
- "fmin z11.h, p5/M, z11.h, z1.h\n"
- "fmax z8.h, p5/M, z8.h, z0.h\n"
- "fmax z9.h, p5/M, z9.h, z0.h\n"
- "fmax z10.h, p5/M, z10.h, z0.h\n"
- "fmax z11.h, p5/M, z11.h, z0.h\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rh { z1.h }, p5/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rh { z0.h }, p5/Z, [x19]\n"
+ "fmin z8.h, p5/M, z8.h, z0.h\n"
+ "fmin z9.h, p5/M, z9.h, z0.h\n"
+ "fmin z10.h, p5/M, z10.h, z0.h\n"
+ "fmin z11.h, p5/M, z11.h, z0.h\n"
+ "fmax z8.h, p5/M, z8.h, z1.h\n"
+ "fmax z9.h, p5/M, z9.h, z1.h\n"
+ "fmax z10.h, p5/M, z10.h, z1.h\n"
+ "fmax z11.h, p5/M, z11.h, z1.h\n"
"12:" // Height 1: No activation
- "st1h { z8.h }, p4, [x9]\n"
- "st1h { z9.h }, p3, [x9, #1, MUL VL]\n"
- "st1h { z10.h }, p2, [x9, #2, MUL VL]\n"
- "st1h { z11.h }, p1, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
+ "st1h { z8.h }, p4, [x28]\n"
+ "st1h { z9.h }, p3, [x28, #1, MUL VL]\n"
+ "st1h { z10.h }, p2, [x28, #2, MUL VL]\n"
+ "st1h { z11.h }, p1, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
"13:" // Height 1: Writeback done
"dech x11, ALL, MUL #4\n"
"cmp x11, XZR\n"
"bgt 2b\n"
"b 80f\n"
"14:" // Height 2
- "mov x12, %x[bias]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x9, %x[bias]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "mov x28, %x[output_ptr]\n"
"15:" // Height 2: Column loop
- "mov x20, #0x0\n"
- "whilelt p4.h, x20, x11\n"
- "inch x20\n"
- "whilelt p3.h, x20, x11\n"
- "inch x20\n"
- "whilelt p2.h, x20, x11\n"
- "inch x20\n"
- "whilelt p1.h, x20, x11\n"
- "cbz x12, 16f\n"
- "ld1h { z8.h }, p5/Z, [x12]\n"
- "ld1h { z9.h }, p5/Z, [x12, #1, MUL VL]\n"
+ "mov x19, #0x0\n"
+ "whilelt p4.h, x19, x11\n"
+ "inch x19\n"
+ "whilelt p3.h, x19, x11\n"
+ "inch x19\n"
+ "whilelt p2.h, x19, x11\n"
+ "inch x19\n"
+ "whilelt p1.h, x19, x11\n"
+ "cbz x9, 16f\n"
+ "ld1h { z8.h }, p5/Z, [x9]\n"
"mov z12.d, z8.d\n"
+ "ld1h { z9.h }, p5/Z, [x9, #1, MUL VL]\n"
+ "ld1h { z10.h }, p5/Z, [x9, #2, MUL VL]\n"
"mov z13.d, z9.d\n"
- "ld1h { z10.h }, p5/Z, [x12, #2, MUL VL]\n"
- "ld1h { z11.h }, p5/Z, [x12, #3, MUL VL]\n"
+ "ld1h { z11.h }, p5/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"mov z14.d, z10.d\n"
"mov z15.d, z11.d\n"
- "addvl x12, x12, #4\n"
"b 18f\n"
"16:" // Height 2: no bias
"tbz %x[flags], #0, 17f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #1\n"
- "ld1h { z8.h }, p4/Z, [x9]\n"
- "ld1h { z9.h }, p3/Z, [x9, #1, MUL VL]\n"
- "ld1h { z10.h }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1h { z11.h }, p1/Z, [x9, #3, MUL VL]\n"
- "ld1h { z12.h }, p4/Z, [x25]\n"
- "ld1h { z13.h }, p3/Z, [x25, #1, MUL VL]\n"
- "ld1h { z14.h }, p2/Z, [x25, #2, MUL VL]\n"
- "ld1h { z15.h }, p1/Z, [x25, #3, MUL VL]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ld1h { z8.h }, p4/Z, [x28]\n"
+ "add x24, x28, x19, LSL #1\n"
+ "ld1h { z9.h }, p3/Z, [x28, #1, MUL VL]\n"
+ "ld1h { z10.h }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1h { z11.h }, p1/Z, [x28, #3, MUL VL]\n"
+ "ld1h { z12.h }, p4/Z, [x24]\n"
+ "ld1h { z13.h }, p3/Z, [x24, #1, MUL VL]\n"
+ "ld1h { z14.h }, p2/Z, [x24, #2, MUL VL]\n"
+ "ld1h { z15.h }, p1/Z, [x24, #3, MUL VL]\n"
"b 18f\n"
"17:" // Height 2: no accumulate
"mov z8.b, #0x0\n"
@@ -392,52 +392,52 @@ void sve_hybrid_fp16_mla_6x4VL (
"mov z14.b, #0x0\n"
"mov z15.b, #0x0\n"
"18:" // Height 2: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"19:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 20f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "cbnz x28, 21f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #1\n"
- "add x25, x25, x20, LSL #1\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "cbnz x27, 21f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
+ "add x24, x24, x19, LSL #1\n"
"b 21f\n"
"20:" // Height 2: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #1\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #1\n"
"21:" // Height 2: input setup done
- "cmp x27, #0x8\n"
+ "cmp x26, #0x8\n"
"ble 23f\n"
"22:" // Height 2: Multiply loop: Main loop head
- "whilelt p0.h, XZR, x27\n"
- "ld1rqh { z0.h }, p0/Z, [x26]\n"
- "ld1rqh { z1.h }, p0/Z, [x25]\n"
- "sub x27, x27, #0x8\n"
"ld1h { z6.h }, p5/Z, [x10]\n"
+ "whilelt p0.h, XZR, x26\n"
"ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "sub x26, x26, #0x8\n"
+ "ld1rqh { z0.h }, p0/Z, [x25]\n"
"fmla z8.h, z6.h, z0.h[0]\n"
- "fmla z12.h, z6.h, z1.h[0]\n"
+ "ld1rqh { z1.h }, p0/Z, [x24]\n"
+ "cmp x26, #0x8\n"
"fmla z9.h, z7.h, z0.h[0]\n"
- "fmla z13.h, z7.h, z1.h[0]\n"
+ "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
+ "fmla z12.h, z6.h, z1.h[0]\n"
"ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
+ "fmla z13.h, z7.h, z1.h[0]\n"
"ld1h { z7.h }, p5/Z, [x10, #3, MUL VL]\n"
"fmla z10.h, z6.h, z0.h[0]\n"
"fmla z14.h, z6.h, z1.h[0]\n"
"ld1h { z6.h }, p5/Z, [x10, #4, MUL VL]\n"
- "cmp x27, #0x8\n"
"fmla z11.h, z7.h, z0.h[0]\n"
"fmla z15.h, z7.h, z1.h[0]\n"
"ld1h { z7.h }, p5/Z, [x10, #5, MUL VL]\n"
- "add x26, x26, #0x10\n"
"fmla z8.h, z6.h, z0.h[1]\n"
"fmla z12.h, z6.h, z1.h[1]\n"
"ld1h { z6.h }, p5/Z, [x10, #6, MUL VL]\n"
- "add x25, x25, #0x10\n"
"fmla z9.h, z7.h, z0.h[1]\n"
"fmla z13.h, z7.h, z1.h[1]\n"
"ld1h { z7.h }, p5/Z, [x10, #7, MUL VL]\n"
@@ -521,216 +521,216 @@ void sve_hybrid_fp16_mla_6x4VL (
"fmla z15.h, z7.h, z1.h[7]\n"
"bgt 22b\n"
"23:" // Height 2: Multiply loop: Single iteration only
- "whilelt p0.h, XZR, x27\n"
- "ld1rqh { z0.h }, p0/Z, [x26]\n"
- "ld1rqh { z1.h }, p0/Z, [x25]\n"
- "subs x27, x27, #0x1\n"
"ld1h { z6.h }, p5/Z, [x10]\n"
+ "whilelt p0.h, XZR, x26\n"
"ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x1\n"
+ "ld1rqh { z0.h }, p0/Z, [x25]\n"
"fmla z8.h, z6.h, z0.h[0]\n"
- "fmla z12.h, z6.h, z1.h[0]\n"
+ "ld1rqh { z1.h }, p0/Z, [x24]\n"
"fmla z9.h, z7.h, z0.h[0]\n"
- "fmla z13.h, z7.h, z1.h[0]\n"
+ "fmla z12.h, z6.h, z1.h[0]\n"
"ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
+ "fmla z13.h, z7.h, z1.h[0]\n"
"ld1h { z7.h }, p5/Z, [x10, #3, MUL VL]\n"
+ "addvl x10, x10, #4\n"
"fmla z10.h, z6.h, z0.h[0]\n"
"fmla z14.h, z6.h, z1.h[0]\n"
- "addvl x10, x10, #4\n"
"fmla z11.h, z7.h, z0.h[0]\n"
"fmla z15.h, z7.h, z1.h[0]\n"
"ble 24f\n"
"ld1h { z6.h }, p5/Z, [x10]\n"
- "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
"fmla z8.h, z6.h, z0.h[1]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x1\n"
"fmla z12.h, z6.h, z1.h[1]\n"
+ "ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
"fmla z9.h, z7.h, z0.h[1]\n"
"fmla z13.h, z7.h, z1.h[1]\n"
- "ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
"ld1h { z7.h }, p5/Z, [x10, #3, MUL VL]\n"
- "subs x27, x27, #0x1\n"
+ "addvl x10, x10, #4\n"
"fmla z10.h, z6.h, z0.h[1]\n"
"fmla z14.h, z6.h, z1.h[1]\n"
- "addvl x10, x10, #4\n"
"fmla z11.h, z7.h, z0.h[1]\n"
"fmla z15.h, z7.h, z1.h[1]\n"
"ble 24f\n"
"ld1h { z6.h }, p5/Z, [x10]\n"
- "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
"fmla z8.h, z6.h, z0.h[2]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x1\n"
"fmla z12.h, z6.h, z1.h[2]\n"
+ "ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
"fmla z9.h, z7.h, z0.h[2]\n"
"fmla z13.h, z7.h, z1.h[2]\n"
- "ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
"ld1h { z7.h }, p5/Z, [x10, #3, MUL VL]\n"
- "subs x27, x27, #0x1\n"
+ "addvl x10, x10, #4\n"
"fmla z10.h, z6.h, z0.h[2]\n"
"fmla z14.h, z6.h, z1.h[2]\n"
- "addvl x10, x10, #4\n"
"fmla z11.h, z7.h, z0.h[2]\n"
"fmla z15.h, z7.h, z1.h[2]\n"
"ble 24f\n"
"ld1h { z6.h }, p5/Z, [x10]\n"
- "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
"fmla z8.h, z6.h, z0.h[3]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x1\n"
"fmla z12.h, z6.h, z1.h[3]\n"
+ "ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
"fmla z9.h, z7.h, z0.h[3]\n"
"fmla z13.h, z7.h, z1.h[3]\n"
- "ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
"ld1h { z7.h }, p5/Z, [x10, #3, MUL VL]\n"
- "subs x27, x27, #0x1\n"
+ "addvl x10, x10, #4\n"
"fmla z10.h, z6.h, z0.h[3]\n"
"fmla z14.h, z6.h, z1.h[3]\n"
- "addvl x10, x10, #4\n"
"fmla z11.h, z7.h, z0.h[3]\n"
"fmla z15.h, z7.h, z1.h[3]\n"
"ble 24f\n"
"ld1h { z6.h }, p5/Z, [x10]\n"
- "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
"fmla z8.h, z6.h, z0.h[4]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x1\n"
"fmla z12.h, z6.h, z1.h[4]\n"
+ "ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
"fmla z9.h, z7.h, z0.h[4]\n"
"fmla z13.h, z7.h, z1.h[4]\n"
- "ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
"ld1h { z7.h }, p5/Z, [x10, #3, MUL VL]\n"
- "subs x27, x27, #0x1\n"
+ "addvl x10, x10, #4\n"
"fmla z10.h, z6.h, z0.h[4]\n"
"fmla z14.h, z6.h, z1.h[4]\n"
- "addvl x10, x10, #4\n"
"fmla z11.h, z7.h, z0.h[4]\n"
"fmla z15.h, z7.h, z1.h[4]\n"
"ble 24f\n"
"ld1h { z6.h }, p5/Z, [x10]\n"
- "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
"fmla z8.h, z6.h, z0.h[5]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x1\n"
"fmla z12.h, z6.h, z1.h[5]\n"
+ "ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
"fmla z9.h, z7.h, z0.h[5]\n"
"fmla z13.h, z7.h, z1.h[5]\n"
- "ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
"ld1h { z7.h }, p5/Z, [x10, #3, MUL VL]\n"
- "subs x27, x27, #0x1\n"
+ "addvl x10, x10, #4\n"
"fmla z10.h, z6.h, z0.h[5]\n"
"fmla z14.h, z6.h, z1.h[5]\n"
- "addvl x10, x10, #4\n"
"fmla z11.h, z7.h, z0.h[5]\n"
"fmla z15.h, z7.h, z1.h[5]\n"
"ble 24f\n"
"ld1h { z6.h }, p5/Z, [x10]\n"
- "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
"fmla z8.h, z6.h, z0.h[6]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x1\n"
"fmla z12.h, z6.h, z1.h[6]\n"
+ "ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
"fmla z9.h, z7.h, z0.h[6]\n"
"fmla z13.h, z7.h, z1.h[6]\n"
- "ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
"ld1h { z7.h }, p5/Z, [x10, #3, MUL VL]\n"
- "subs x27, x27, #0x1\n"
+ "addvl x10, x10, #4\n"
"fmla z10.h, z6.h, z0.h[6]\n"
"fmla z14.h, z6.h, z1.h[6]\n"
- "addvl x10, x10, #4\n"
"fmla z11.h, z7.h, z0.h[6]\n"
"fmla z15.h, z7.h, z1.h[6]\n"
"ble 24f\n"
"ld1h { z6.h }, p5/Z, [x10]\n"
- "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
"fmla z8.h, z6.h, z0.h[7]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
"fmla z12.h, z6.h, z1.h[7]\n"
+ "ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
"fmla z9.h, z7.h, z0.h[7]\n"
"fmla z13.h, z7.h, z1.h[7]\n"
- "ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
"ld1h { z7.h }, p5/Z, [x10, #3, MUL VL]\n"
+ "addvl x10, x10, #4\n"
"fmla z10.h, z6.h, z0.h[7]\n"
"fmla z14.h, z6.h, z1.h[7]\n"
- "addvl x10, x10, #4\n"
"fmla z11.h, z7.h, z0.h[7]\n"
"fmla z15.h, z7.h, z1.h[7]\n"
"24:" // Height 2: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 19b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #1\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x28, x19, LSL #1\n"
"tbz %x[flags], #1, 25f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rh { z1.h }, p5/Z, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rh { z0.h }, p5/Z, [x20]\n"
- "fmin z8.h, p5/M, z8.h, z1.h\n"
- "fmin z9.h, p5/M, z9.h, z1.h\n"
- "fmin z10.h, p5/M, z10.h, z1.h\n"
- "fmin z11.h, p5/M, z11.h, z1.h\n"
- "fmin z12.h, p5/M, z12.h, z1.h\n"
- "fmin z13.h, p5/M, z13.h, z1.h\n"
- "fmin z14.h, p5/M, z14.h, z1.h\n"
- "fmin z15.h, p5/M, z15.h, z1.h\n"
- "fmax z8.h, p5/M, z8.h, z0.h\n"
- "fmax z9.h, p5/M, z9.h, z0.h\n"
- "fmax z10.h, p5/M, z10.h, z0.h\n"
- "fmax z11.h, p5/M, z11.h, z0.h\n"
- "fmax z12.h, p5/M, z12.h, z0.h\n"
- "fmax z13.h, p5/M, z13.h, z0.h\n"
- "fmax z14.h, p5/M, z14.h, z0.h\n"
- "fmax z15.h, p5/M, z15.h, z0.h\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rh { z1.h }, p5/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rh { z0.h }, p5/Z, [x19]\n"
+ "fmin z8.h, p5/M, z8.h, z0.h\n"
+ "fmin z9.h, p5/M, z9.h, z0.h\n"
+ "fmin z10.h, p5/M, z10.h, z0.h\n"
+ "fmin z11.h, p5/M, z11.h, z0.h\n"
+ "fmin z12.h, p5/M, z12.h, z0.h\n"
+ "fmax z8.h, p5/M, z8.h, z1.h\n"
+ "fmax z9.h, p5/M, z9.h, z1.h\n"
+ "fmax z10.h, p5/M, z10.h, z1.h\n"
+ "fmax z11.h, p5/M, z11.h, z1.h\n"
+ "fmax z12.h, p5/M, z12.h, z1.h\n"
+ "fmin z13.h, p5/M, z13.h, z0.h\n"
+ "fmin z14.h, p5/M, z14.h, z0.h\n"
+ "fmin z15.h, p5/M, z15.h, z0.h\n"
+ "fmax z13.h, p5/M, z13.h, z1.h\n"
+ "fmax z14.h, p5/M, z14.h, z1.h\n"
+ "fmax z15.h, p5/M, z15.h, z1.h\n"
"25:" // Height 2: No activation
- "st1h { z8.h }, p4, [x9]\n"
- "st1h { z9.h }, p3, [x9, #1, MUL VL]\n"
- "st1h { z10.h }, p2, [x9, #2, MUL VL]\n"
- "st1h { z11.h }, p1, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
- "st1h { z12.h }, p4, [x25]\n"
- "st1h { z13.h }, p3, [x25, #1, MUL VL]\n"
- "st1h { z14.h }, p2, [x25, #2, MUL VL]\n"
- "st1h { z15.h }, p1, [x25, #3, MUL VL]\n"
+ "st1h { z8.h }, p4, [x28]\n"
+ "st1h { z9.h }, p3, [x28, #1, MUL VL]\n"
+ "st1h { z10.h }, p2, [x28, #2, MUL VL]\n"
+ "st1h { z11.h }, p1, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "st1h { z12.h }, p4, [x24]\n"
+ "st1h { z13.h }, p3, [x24, #1, MUL VL]\n"
+ "st1h { z14.h }, p2, [x24, #2, MUL VL]\n"
+ "st1h { z15.h }, p1, [x24, #3, MUL VL]\n"
"26:" // Height 2: Writeback done
"dech x11, ALL, MUL #4\n"
"cmp x11, XZR\n"
"bgt 15b\n"
"b 80f\n"
"27:" // Height 3
- "mov x12, %x[bias]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x9, %x[bias]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "mov x28, %x[output_ptr]\n"
"28:" // Height 3: Column loop
- "mov x20, #0x0\n"
- "whilelt p4.h, x20, x11\n"
- "inch x20\n"
- "whilelt p3.h, x20, x11\n"
- "inch x20\n"
- "whilelt p2.h, x20, x11\n"
- "inch x20\n"
- "whilelt p1.h, x20, x11\n"
- "cbz x12, 29f\n"
- "ld1h { z8.h }, p5/Z, [x12]\n"
- "ld1h { z9.h }, p5/Z, [x12, #1, MUL VL]\n"
+ "mov x19, #0x0\n"
+ "whilelt p4.h, x19, x11\n"
+ "inch x19\n"
+ "whilelt p3.h, x19, x11\n"
+ "inch x19\n"
+ "whilelt p2.h, x19, x11\n"
+ "inch x19\n"
+ "whilelt p1.h, x19, x11\n"
+ "cbz x9, 29f\n"
+ "ld1h { z8.h }, p5/Z, [x9]\n"
"mov z12.d, z8.d\n"
+ "ld1h { z9.h }, p5/Z, [x9, #1, MUL VL]\n"
+ "mov z16.d, z8.d\n"
+ "ld1h { z10.h }, p5/Z, [x9, #2, MUL VL]\n"
+ "ld1h { z11.h }, p5/Z, [x9, #3, MUL VL]\n"
"mov z13.d, z9.d\n"
- "ld1h { z10.h }, p5/Z, [x12, #2, MUL VL]\n"
- "ld1h { z11.h }, p5/Z, [x12, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
+ "mov z17.d, z9.d\n"
"mov z14.d, z10.d\n"
"mov z15.d, z11.d\n"
- "mov z16.d, z8.d\n"
- "mov z17.d, z9.d\n"
- "addvl x12, x12, #4\n"
"mov z18.d, z10.d\n"
"mov z19.d, z11.d\n"
"b 31f\n"
"29:" // Height 3: no bias
"tbz %x[flags], #0, 30f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
- "ld1h { z8.h }, p4/Z, [x9]\n"
- "ld1h { z9.h }, p3/Z, [x9, #1, MUL VL]\n"
- "ld1h { z10.h }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1h { z11.h }, p1/Z, [x9, #3, MUL VL]\n"
- "ld1h { z12.h }, p4/Z, [x25]\n"
- "ld1h { z13.h }, p3/Z, [x25, #1, MUL VL]\n"
- "ld1h { z14.h }, p2/Z, [x25, #2, MUL VL]\n"
- "ld1h { z15.h }, p1/Z, [x25, #3, MUL VL]\n"
- "ld1h { z16.h }, p4/Z, [x24]\n"
- "ld1h { z17.h }, p3/Z, [x24, #1, MUL VL]\n"
- "ld1h { z18.h }, p2/Z, [x24, #2, MUL VL]\n"
- "ld1h { z19.h }, p1/Z, [x24, #3, MUL VL]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ld1h { z8.h }, p4/Z, [x28]\n"
+ "add x24, x28, x19, LSL #1\n"
+ "ld1h { z9.h }, p3/Z, [x28, #1, MUL VL]\n"
+ "ld1h { z10.h }, p2/Z, [x28, #2, MUL VL]\n"
+ "add x23, x24, x19, LSL #1\n"
+ "ld1h { z11.h }, p1/Z, [x28, #3, MUL VL]\n"
+ "ld1h { z12.h }, p4/Z, [x24]\n"
+ "ld1h { z13.h }, p3/Z, [x24, #1, MUL VL]\n"
+ "ld1h { z14.h }, p2/Z, [x24, #2, MUL VL]\n"
+ "ld1h { z15.h }, p1/Z, [x24, #3, MUL VL]\n"
+ "ld1h { z16.h }, p4/Z, [x23]\n"
+ "ld1h { z17.h }, p3/Z, [x23, #1, MUL VL]\n"
+ "ld1h { z18.h }, p2/Z, [x23, #2, MUL VL]\n"
+ "ld1h { z19.h }, p1/Z, [x23, #3, MUL VL]\n"
"b 31f\n"
"30:" // Height 3: no accumulate
"mov z8.b, #0x0\n"
@@ -746,63 +746,63 @@ void sve_hybrid_fp16_mla_6x4VL (
"mov z18.b, #0x0\n"
"mov z19.b, #0x0\n"
"31:" // Height 3: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"32:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 33f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "cbnz x28, 34f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #1\n"
- "add x25, x25, x20, LSL #1\n"
- "add x24, x24, x20, LSL #1\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "cbnz x27, 34f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
+ "add x24, x24, x19, LSL #1\n"
+ "add x23, x23, x19, LSL #1\n"
"b 34f\n"
"33:" // Height 3: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
"34:" // Height 3: input setup done
- "cmp x27, #0x8\n"
+ "cmp x26, #0x8\n"
"ble 36f\n"
"35:" // Height 3: Multiply loop: Main loop head
- "whilelt p0.h, XZR, x27\n"
- "ld1rqh { z0.h }, p0/Z, [x26]\n"
- "ld1rqh { z1.h }, p0/Z, [x25]\n"
- "sub x27, x27, #0x8\n"
- "ld1rqh { z2.h }, p0/Z, [x24]\n"
"ld1h { z6.h }, p5/Z, [x10]\n"
+ "whilelt p0.h, XZR, x26\n"
+ "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "sub x26, x26, #0x8\n"
+ "ld1rqh { z0.h }, p0/Z, [x25]\n"
"fmla z8.h, z6.h, z0.h[0]\n"
+ "ld1rqh { z1.h }, p0/Z, [x24]\n"
+ "cmp x26, #0x8\n"
+ "fmla z9.h, z7.h, z0.h[0]\n"
+ "ld1rqh { z2.h }, p0/Z, [x23]\n"
+ "add x25, x25, #0x10\n"
"fmla z12.h, z6.h, z1.h[0]\n"
- "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "add x24, x24, #0x10\n"
+ "fmla z13.h, z7.h, z1.h[0]\n"
+ "add x23, x23, #0x10\n"
"fmla z16.h, z6.h, z2.h[0]\n"
- "fmla z9.h, z7.h, z0.h[0]\n"
"ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
- "fmla z13.h, z7.h, z1.h[0]\n"
"fmla z17.h, z7.h, z2.h[0]\n"
"ld1h { z7.h }, p5/Z, [x10, #3, MUL VL]\n"
- "cmp x27, #0x8\n"
"fmla z10.h, z6.h, z0.h[0]\n"
"fmla z14.h, z6.h, z1.h[0]\n"
- "add x26, x26, #0x10\n"
- "add x25, x25, #0x10\n"
"fmla z18.h, z6.h, z2.h[0]\n"
- "fmla z11.h, z7.h, z0.h[0]\n"
"ld1h { z6.h }, p5/Z, [x10, #4, MUL VL]\n"
- "add x24, x24, #0x10\n"
+ "fmla z11.h, z7.h, z0.h[0]\n"
"fmla z15.h, z7.h, z1.h[0]\n"
"fmla z19.h, z7.h, z2.h[0]\n"
"ld1h { z7.h }, p5/Z, [x10, #5, MUL VL]\n"
"fmla z8.h, z6.h, z0.h[1]\n"
"fmla z12.h, z6.h, z1.h[1]\n"
"fmla z16.h, z6.h, z2.h[1]\n"
- "fmla z9.h, z7.h, z0.h[1]\n"
"ld1h { z6.h }, p5/Z, [x10, #6, MUL VL]\n"
+ "fmla z9.h, z7.h, z0.h[1]\n"
"fmla z13.h, z7.h, z1.h[1]\n"
"fmla z17.h, z7.h, z2.h[1]\n"
"ld1h { z7.h }, p5/Z, [x10, #7, MUL VL]\n"
@@ -810,64 +810,64 @@ void sve_hybrid_fp16_mla_6x4VL (
"fmla z10.h, z6.h, z0.h[1]\n"
"fmla z14.h, z6.h, z1.h[1]\n"
"fmla z18.h, z6.h, z2.h[1]\n"
- "fmla z11.h, z7.h, z0.h[1]\n"
"ld1h { z6.h }, p5/Z, [x10, #-8, MUL VL]\n"
+ "fmla z11.h, z7.h, z0.h[1]\n"
"fmla z15.h, z7.h, z1.h[1]\n"
"fmla z19.h, z7.h, z2.h[1]\n"
"ld1h { z7.h }, p5/Z, [x10, #-7, MUL VL]\n"
"fmla z8.h, z6.h, z0.h[2]\n"
"fmla z12.h, z6.h, z1.h[2]\n"
"fmla z16.h, z6.h, z2.h[2]\n"
- "fmla z9.h, z7.h, z0.h[2]\n"
"ld1h { z6.h }, p5/Z, [x10, #-6, MUL VL]\n"
+ "fmla z9.h, z7.h, z0.h[2]\n"
"fmla z13.h, z7.h, z1.h[2]\n"
"fmla z17.h, z7.h, z2.h[2]\n"
"ld1h { z7.h }, p5/Z, [x10, #-5, MUL VL]\n"
"fmla z10.h, z6.h, z0.h[2]\n"
"fmla z14.h, z6.h, z1.h[2]\n"
"fmla z18.h, z6.h, z2.h[2]\n"
- "fmla z11.h, z7.h, z0.h[2]\n"
"ld1h { z6.h }, p5/Z, [x10, #-4, MUL VL]\n"
+ "fmla z11.h, z7.h, z0.h[2]\n"
"fmla z15.h, z7.h, z1.h[2]\n"
"fmla z19.h, z7.h, z2.h[2]\n"
"ld1h { z7.h }, p5/Z, [x10, #-3, MUL VL]\n"
"fmla z8.h, z6.h, z0.h[3]\n"
"fmla z12.h, z6.h, z1.h[3]\n"
"fmla z16.h, z6.h, z2.h[3]\n"
- "fmla z9.h, z7.h, z0.h[3]\n"
"ld1h { z6.h }, p5/Z, [x10, #-2, MUL VL]\n"
+ "fmla z9.h, z7.h, z0.h[3]\n"
"fmla z13.h, z7.h, z1.h[3]\n"
"fmla z17.h, z7.h, z2.h[3]\n"
"ld1h { z7.h }, p5/Z, [x10, #-1, MUL VL]\n"
"fmla z10.h, z6.h, z0.h[3]\n"
"fmla z14.h, z6.h, z1.h[3]\n"
"fmla z18.h, z6.h, z2.h[3]\n"
- "fmla z11.h, z7.h, z0.h[3]\n"
"ld1h { z6.h }, p5/Z, [x10]\n"
+ "fmla z11.h, z7.h, z0.h[3]\n"
"fmla z15.h, z7.h, z1.h[3]\n"
"fmla z19.h, z7.h, z2.h[3]\n"
"ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
"fmla z8.h, z6.h, z0.h[4]\n"
"fmla z12.h, z6.h, z1.h[4]\n"
"fmla z16.h, z6.h, z2.h[4]\n"
- "fmla z9.h, z7.h, z0.h[4]\n"
"ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
+ "fmla z9.h, z7.h, z0.h[4]\n"
"fmla z13.h, z7.h, z1.h[4]\n"
"fmla z17.h, z7.h, z2.h[4]\n"
"ld1h { z7.h }, p5/Z, [x10, #3, MUL VL]\n"
"fmla z10.h, z6.h, z0.h[4]\n"
"fmla z14.h, z6.h, z1.h[4]\n"
"fmla z18.h, z6.h, z2.h[4]\n"
- "fmla z11.h, z7.h, z0.h[4]\n"
"ld1h { z6.h }, p5/Z, [x10, #4, MUL VL]\n"
+ "fmla z11.h, z7.h, z0.h[4]\n"
"fmla z15.h, z7.h, z1.h[4]\n"
"fmla z19.h, z7.h, z2.h[4]\n"
"ld1h { z7.h }, p5/Z, [x10, #5, MUL VL]\n"
"fmla z8.h, z6.h, z0.h[5]\n"
"fmla z12.h, z6.h, z1.h[5]\n"
"fmla z16.h, z6.h, z2.h[5]\n"
- "fmla z9.h, z7.h, z0.h[5]\n"
"ld1h { z6.h }, p5/Z, [x10, #6, MUL VL]\n"
+ "fmla z9.h, z7.h, z0.h[5]\n"
"fmla z13.h, z7.h, z1.h[5]\n"
"fmla z17.h, z7.h, z2.h[5]\n"
"ld1h { z7.h }, p5/Z, [x10, #7, MUL VL]\n"
@@ -875,32 +875,32 @@ void sve_hybrid_fp16_mla_6x4VL (
"fmla z10.h, z6.h, z0.h[5]\n"
"fmla z14.h, z6.h, z1.h[5]\n"
"fmla z18.h, z6.h, z2.h[5]\n"
- "fmla z11.h, z7.h, z0.h[5]\n"
"ld1h { z6.h }, p5/Z, [x10, #-8, MUL VL]\n"
+ "fmla z11.h, z7.h, z0.h[5]\n"
"fmla z15.h, z7.h, z1.h[5]\n"
"fmla z19.h, z7.h, z2.h[5]\n"
"ld1h { z7.h }, p5/Z, [x10, #-7, MUL VL]\n"
"fmla z8.h, z6.h, z0.h[6]\n"
"fmla z12.h, z6.h, z1.h[6]\n"
"fmla z16.h, z6.h, z2.h[6]\n"
- "fmla z9.h, z7.h, z0.h[6]\n"
"ld1h { z6.h }, p5/Z, [x10, #-6, MUL VL]\n"
+ "fmla z9.h, z7.h, z0.h[6]\n"
"fmla z13.h, z7.h, z1.h[6]\n"
"fmla z17.h, z7.h, z2.h[6]\n"
"ld1h { z7.h }, p5/Z, [x10, #-5, MUL VL]\n"
"fmla z10.h, z6.h, z0.h[6]\n"
"fmla z14.h, z6.h, z1.h[6]\n"
"fmla z18.h, z6.h, z2.h[6]\n"
- "fmla z11.h, z7.h, z0.h[6]\n"
"ld1h { z6.h }, p5/Z, [x10, #-4, MUL VL]\n"
+ "fmla z11.h, z7.h, z0.h[6]\n"
"fmla z15.h, z7.h, z1.h[6]\n"
"fmla z19.h, z7.h, z2.h[6]\n"
"ld1h { z7.h }, p5/Z, [x10, #-3, MUL VL]\n"
"fmla z8.h, z6.h, z0.h[7]\n"
"fmla z12.h, z6.h, z1.h[7]\n"
"fmla z16.h, z6.h, z2.h[7]\n"
- "fmla z9.h, z7.h, z0.h[7]\n"
"ld1h { z6.h }, p5/Z, [x10, #-2, MUL VL]\n"
+ "fmla z9.h, z7.h, z0.h[7]\n"
"fmla z13.h, z7.h, z1.h[7]\n"
"fmla z17.h, z7.h, z2.h[7]\n"
"ld1h { z7.h }, p5/Z, [x10, #-1, MUL VL]\n"
@@ -912,19 +912,19 @@ void sve_hybrid_fp16_mla_6x4VL (
"fmla z19.h, z7.h, z2.h[7]\n"
"bgt 35b\n"
"36:" // Height 3: Multiply loop: Single iteration only
- "whilelt p0.h, XZR, x27\n"
- "ld1rqh { z0.h }, p0/Z, [x26]\n"
- "ld1rqh { z1.h }, p0/Z, [x25]\n"
- "subs x27, x27, #0x1\n"
- "ld1rqh { z2.h }, p0/Z, [x24]\n"
"ld1h { z6.h }, p5/Z, [x10]\n"
+ "whilelt p0.h, XZR, x26\n"
+ "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x1\n"
+ "ld1rqh { z0.h }, p0/Z, [x25]\n"
"fmla z8.h, z6.h, z0.h[0]\n"
+ "ld1rqh { z1.h }, p0/Z, [x24]\n"
+ "fmla z9.h, z7.h, z0.h[0]\n"
+ "ld1rqh { z2.h }, p0/Z, [x23]\n"
"fmla z12.h, z6.h, z1.h[0]\n"
- "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "fmla z13.h, z7.h, z1.h[0]\n"
"fmla z16.h, z6.h, z2.h[0]\n"
- "fmla z9.h, z7.h, z0.h[0]\n"
"ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
- "fmla z13.h, z7.h, z1.h[0]\n"
"fmla z17.h, z7.h, z2.h[0]\n"
"ld1h { z7.h }, p5/Z, [x10, #3, MUL VL]\n"
"addvl x10, x10, #4\n"
@@ -936,13 +936,13 @@ void sve_hybrid_fp16_mla_6x4VL (
"fmla z19.h, z7.h, z2.h[0]\n"
"ble 37f\n"
"ld1h { z6.h }, p5/Z, [x10]\n"
- "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
"fmla z8.h, z6.h, z0.h[1]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x1\n"
"fmla z12.h, z6.h, z1.h[1]\n"
"fmla z16.h, z6.h, z2.h[1]\n"
- "fmla z9.h, z7.h, z0.h[1]\n"
"ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
- "subs x27, x27, #0x1\n"
+ "fmla z9.h, z7.h, z0.h[1]\n"
"fmla z13.h, z7.h, z1.h[1]\n"
"fmla z17.h, z7.h, z2.h[1]\n"
"ld1h { z7.h }, p5/Z, [x10, #3, MUL VL]\n"
@@ -955,13 +955,13 @@ void sve_hybrid_fp16_mla_6x4VL (
"fmla z19.h, z7.h, z2.h[1]\n"
"ble 37f\n"
"ld1h { z6.h }, p5/Z, [x10]\n"
- "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
"fmla z8.h, z6.h, z0.h[2]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x1\n"
"fmla z12.h, z6.h, z1.h[2]\n"
"fmla z16.h, z6.h, z2.h[2]\n"
- "fmla z9.h, z7.h, z0.h[2]\n"
"ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
- "subs x27, x27, #0x1\n"
+ "fmla z9.h, z7.h, z0.h[2]\n"
"fmla z13.h, z7.h, z1.h[2]\n"
"fmla z17.h, z7.h, z2.h[2]\n"
"ld1h { z7.h }, p5/Z, [x10, #3, MUL VL]\n"
@@ -974,13 +974,13 @@ void sve_hybrid_fp16_mla_6x4VL (
"fmla z19.h, z7.h, z2.h[2]\n"
"ble 37f\n"
"ld1h { z6.h }, p5/Z, [x10]\n"
- "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
"fmla z8.h, z6.h, z0.h[3]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x1\n"
"fmla z12.h, z6.h, z1.h[3]\n"
"fmla z16.h, z6.h, z2.h[3]\n"
- "fmla z9.h, z7.h, z0.h[3]\n"
"ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
- "subs x27, x27, #0x1\n"
+ "fmla z9.h, z7.h, z0.h[3]\n"
"fmla z13.h, z7.h, z1.h[3]\n"
"fmla z17.h, z7.h, z2.h[3]\n"
"ld1h { z7.h }, p5/Z, [x10, #3, MUL VL]\n"
@@ -993,13 +993,13 @@ void sve_hybrid_fp16_mla_6x4VL (
"fmla z19.h, z7.h, z2.h[3]\n"
"ble 37f\n"
"ld1h { z6.h }, p5/Z, [x10]\n"
- "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
"fmla z8.h, z6.h, z0.h[4]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x1\n"
"fmla z12.h, z6.h, z1.h[4]\n"
"fmla z16.h, z6.h, z2.h[4]\n"
- "fmla z9.h, z7.h, z0.h[4]\n"
"ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
- "subs x27, x27, #0x1\n"
+ "fmla z9.h, z7.h, z0.h[4]\n"
"fmla z13.h, z7.h, z1.h[4]\n"
"fmla z17.h, z7.h, z2.h[4]\n"
"ld1h { z7.h }, p5/Z, [x10, #3, MUL VL]\n"
@@ -1012,13 +1012,13 @@ void sve_hybrid_fp16_mla_6x4VL (
"fmla z19.h, z7.h, z2.h[4]\n"
"ble 37f\n"
"ld1h { z6.h }, p5/Z, [x10]\n"
- "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
"fmla z8.h, z6.h, z0.h[5]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x1\n"
"fmla z12.h, z6.h, z1.h[5]\n"
"fmla z16.h, z6.h, z2.h[5]\n"
- "fmla z9.h, z7.h, z0.h[5]\n"
"ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
- "subs x27, x27, #0x1\n"
+ "fmla z9.h, z7.h, z0.h[5]\n"
"fmla z13.h, z7.h, z1.h[5]\n"
"fmla z17.h, z7.h, z2.h[5]\n"
"ld1h { z7.h }, p5/Z, [x10, #3, MUL VL]\n"
@@ -1031,13 +1031,13 @@ void sve_hybrid_fp16_mla_6x4VL (
"fmla z19.h, z7.h, z2.h[5]\n"
"ble 37f\n"
"ld1h { z6.h }, p5/Z, [x10]\n"
- "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
"fmla z8.h, z6.h, z0.h[6]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x1\n"
"fmla z12.h, z6.h, z1.h[6]\n"
"fmla z16.h, z6.h, z2.h[6]\n"
- "fmla z9.h, z7.h, z0.h[6]\n"
"ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
- "subs x27, x27, #0x1\n"
+ "fmla z9.h, z7.h, z0.h[6]\n"
"fmla z13.h, z7.h, z1.h[6]\n"
"fmla z17.h, z7.h, z2.h[6]\n"
"ld1h { z7.h }, p5/Z, [x10, #3, MUL VL]\n"
@@ -1050,12 +1050,12 @@ void sve_hybrid_fp16_mla_6x4VL (
"fmla z19.h, z7.h, z2.h[6]\n"
"ble 37f\n"
"ld1h { z6.h }, p5/Z, [x10]\n"
- "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
"fmla z8.h, z6.h, z0.h[7]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
"fmla z12.h, z6.h, z1.h[7]\n"
"fmla z16.h, z6.h, z2.h[7]\n"
- "fmla z9.h, z7.h, z0.h[7]\n"
"ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
+ "fmla z9.h, z7.h, z0.h[7]\n"
"fmla z13.h, z7.h, z1.h[7]\n"
"fmla z17.h, z7.h, z2.h[7]\n"
"ld1h { z7.h }, p5/Z, [x10, #3, MUL VL]\n"
@@ -1067,116 +1067,116 @@ void sve_hybrid_fp16_mla_6x4VL (
"fmla z15.h, z7.h, z1.h[7]\n"
"fmla z19.h, z7.h, z2.h[7]\n"
"37:" // Height 3: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 32b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x28, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
"tbz %x[flags], #1, 38f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rh { z1.h }, p5/Z, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rh { z0.h }, p5/Z, [x20]\n"
- "fmin z8.h, p5/M, z8.h, z1.h\n"
- "fmin z9.h, p5/M, z9.h, z1.h\n"
- "fmin z10.h, p5/M, z10.h, z1.h\n"
- "fmin z11.h, p5/M, z11.h, z1.h\n"
- "fmin z12.h, p5/M, z12.h, z1.h\n"
- "fmin z13.h, p5/M, z13.h, z1.h\n"
- "fmin z14.h, p5/M, z14.h, z1.h\n"
- "fmin z15.h, p5/M, z15.h, z1.h\n"
- "fmin z16.h, p5/M, z16.h, z1.h\n"
- "fmin z17.h, p5/M, z17.h, z1.h\n"
- "fmin z18.h, p5/M, z18.h, z1.h\n"
- "fmin z19.h, p5/M, z19.h, z1.h\n"
- "fmax z8.h, p5/M, z8.h, z0.h\n"
- "fmax z9.h, p5/M, z9.h, z0.h\n"
- "fmax z10.h, p5/M, z10.h, z0.h\n"
- "fmax z11.h, p5/M, z11.h, z0.h\n"
- "fmax z12.h, p5/M, z12.h, z0.h\n"
- "fmax z13.h, p5/M, z13.h, z0.h\n"
- "fmax z14.h, p5/M, z14.h, z0.h\n"
- "fmax z15.h, p5/M, z15.h, z0.h\n"
- "fmax z16.h, p5/M, z16.h, z0.h\n"
- "fmax z17.h, p5/M, z17.h, z0.h\n"
- "fmax z18.h, p5/M, z18.h, z0.h\n"
- "fmax z19.h, p5/M, z19.h, z0.h\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rh { z1.h }, p5/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rh { z0.h }, p5/Z, [x19]\n"
+ "fmin z8.h, p5/M, z8.h, z0.h\n"
+ "fmin z9.h, p5/M, z9.h, z0.h\n"
+ "fmin z10.h, p5/M, z10.h, z0.h\n"
+ "fmin z11.h, p5/M, z11.h, z0.h\n"
+ "fmin z12.h, p5/M, z12.h, z0.h\n"
+ "fmax z8.h, p5/M, z8.h, z1.h\n"
+ "fmax z9.h, p5/M, z9.h, z1.h\n"
+ "fmax z10.h, p5/M, z10.h, z1.h\n"
+ "fmax z11.h, p5/M, z11.h, z1.h\n"
+ "fmax z12.h, p5/M, z12.h, z1.h\n"
+ "fmin z13.h, p5/M, z13.h, z0.h\n"
+ "fmin z14.h, p5/M, z14.h, z0.h\n"
+ "fmin z15.h, p5/M, z15.h, z0.h\n"
+ "fmin z16.h, p5/M, z16.h, z0.h\n"
+ "fmax z13.h, p5/M, z13.h, z1.h\n"
+ "fmax z14.h, p5/M, z14.h, z1.h\n"
+ "fmax z15.h, p5/M, z15.h, z1.h\n"
+ "fmax z16.h, p5/M, z16.h, z1.h\n"
+ "fmin z17.h, p5/M, z17.h, z0.h\n"
+ "fmin z18.h, p5/M, z18.h, z0.h\n"
+ "fmin z19.h, p5/M, z19.h, z0.h\n"
+ "fmax z17.h, p5/M, z17.h, z1.h\n"
+ "fmax z18.h, p5/M, z18.h, z1.h\n"
+ "fmax z19.h, p5/M, z19.h, z1.h\n"
"38:" // Height 3: No activation
- "st1h { z8.h }, p4, [x9]\n"
- "st1h { z9.h }, p3, [x9, #1, MUL VL]\n"
- "st1h { z10.h }, p2, [x9, #2, MUL VL]\n"
- "st1h { z11.h }, p1, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
- "st1h { z12.h }, p4, [x25]\n"
- "st1h { z13.h }, p3, [x25, #1, MUL VL]\n"
- "st1h { z14.h }, p2, [x25, #2, MUL VL]\n"
- "st1h { z15.h }, p1, [x25, #3, MUL VL]\n"
- "st1h { z16.h }, p4, [x24]\n"
- "st1h { z17.h }, p3, [x24, #1, MUL VL]\n"
- "st1h { z18.h }, p2, [x24, #2, MUL VL]\n"
- "st1h { z19.h }, p1, [x24, #3, MUL VL]\n"
+ "st1h { z8.h }, p4, [x28]\n"
+ "st1h { z9.h }, p3, [x28, #1, MUL VL]\n"
+ "st1h { z10.h }, p2, [x28, #2, MUL VL]\n"
+ "st1h { z11.h }, p1, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "st1h { z12.h }, p4, [x24]\n"
+ "st1h { z13.h }, p3, [x24, #1, MUL VL]\n"
+ "st1h { z14.h }, p2, [x24, #2, MUL VL]\n"
+ "st1h { z15.h }, p1, [x24, #3, MUL VL]\n"
+ "st1h { z16.h }, p4, [x23]\n"
+ "st1h { z17.h }, p3, [x23, #1, MUL VL]\n"
+ "st1h { z18.h }, p2, [x23, #2, MUL VL]\n"
+ "st1h { z19.h }, p1, [x23, #3, MUL VL]\n"
"39:" // Height 3: Writeback done
"dech x11, ALL, MUL #4\n"
"cmp x11, XZR\n"
"bgt 28b\n"
"b 80f\n"
"40:" // Height 4
- "mov x12, %x[bias]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x9, %x[bias]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "mov x28, %x[output_ptr]\n"
"41:" // Height 4: Column loop
- "mov x20, #0x0\n"
- "whilelt p4.h, x20, x11\n"
- "inch x20\n"
- "whilelt p3.h, x20, x11\n"
- "inch x20\n"
- "whilelt p2.h, x20, x11\n"
- "inch x20\n"
- "whilelt p1.h, x20, x11\n"
- "cbz x12, 42f\n"
- "ld1h { z8.h }, p5/Z, [x12]\n"
- "ld1h { z9.h }, p5/Z, [x12, #1, MUL VL]\n"
+ "mov x19, #0x0\n"
+ "whilelt p4.h, x19, x11\n"
+ "inch x19\n"
+ "whilelt p3.h, x19, x11\n"
+ "inch x19\n"
+ "whilelt p2.h, x19, x11\n"
+ "inch x19\n"
+ "whilelt p1.h, x19, x11\n"
+ "cbz x9, 42f\n"
+ "ld1h { z8.h }, p5/Z, [x9]\n"
"mov z12.d, z8.d\n"
+ "ld1h { z9.h }, p5/Z, [x9, #1, MUL VL]\n"
+ "mov z16.d, z8.d\n"
+ "ld1h { z10.h }, p5/Z, [x9, #2, MUL VL]\n"
+ "mov z20.d, z8.d\n"
+ "ld1h { z11.h }, p5/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"mov z13.d, z9.d\n"
- "ld1h { z10.h }, p5/Z, [x12, #2, MUL VL]\n"
- "ld1h { z11.h }, p5/Z, [x12, #3, MUL VL]\n"
+ "mov z17.d, z9.d\n"
"mov z14.d, z10.d\n"
"mov z15.d, z11.d\n"
- "mov z16.d, z8.d\n"
- "mov z17.d, z9.d\n"
- "addvl x12, x12, #4\n"
"mov z18.d, z10.d\n"
"mov z19.d, z11.d\n"
- "mov z20.d, z8.d\n"
"mov z21.d, z9.d\n"
"mov z22.d, z10.d\n"
"mov z23.d, z11.d\n"
"b 44f\n"
"42:" // Height 4: no bias
"tbz %x[flags], #0, 43f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
- "ld1h { z8.h }, p4/Z, [x9]\n"
- "add x23, x24, x20, LSL #1\n"
- "ld1h { z9.h }, p3/Z, [x9, #1, MUL VL]\n"
- "ld1h { z10.h }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1h { z11.h }, p1/Z, [x9, #3, MUL VL]\n"
- "ld1h { z12.h }, p4/Z, [x25]\n"
- "ld1h { z13.h }, p3/Z, [x25, #1, MUL VL]\n"
- "ld1h { z14.h }, p2/Z, [x25, #2, MUL VL]\n"
- "ld1h { z15.h }, p1/Z, [x25, #3, MUL VL]\n"
- "ld1h { z16.h }, p4/Z, [x24]\n"
- "ld1h { z17.h }, p3/Z, [x24, #1, MUL VL]\n"
- "ld1h { z18.h }, p2/Z, [x24, #2, MUL VL]\n"
- "ld1h { z19.h }, p1/Z, [x24, #3, MUL VL]\n"
- "ld1h { z20.h }, p4/Z, [x23]\n"
- "ld1h { z21.h }, p3/Z, [x23, #1, MUL VL]\n"
- "ld1h { z22.h }, p2/Z, [x23, #2, MUL VL]\n"
- "ld1h { z23.h }, p1/Z, [x23, #3, MUL VL]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ld1h { z8.h }, p4/Z, [x28]\n"
+ "add x24, x28, x19, LSL #1\n"
+ "ld1h { z9.h }, p3/Z, [x28, #1, MUL VL]\n"
+ "ld1h { z10.h }, p2/Z, [x28, #2, MUL VL]\n"
+ "add x23, x24, x19, LSL #1\n"
+ "ld1h { z11.h }, p1/Z, [x28, #3, MUL VL]\n"
+ "add x22, x23, x19, LSL #1\n"
+ "ld1h { z12.h }, p4/Z, [x24]\n"
+ "ld1h { z13.h }, p3/Z, [x24, #1, MUL VL]\n"
+ "ld1h { z14.h }, p2/Z, [x24, #2, MUL VL]\n"
+ "ld1h { z15.h }, p1/Z, [x24, #3, MUL VL]\n"
+ "ld1h { z16.h }, p4/Z, [x23]\n"
+ "ld1h { z17.h }, p3/Z, [x23, #1, MUL VL]\n"
+ "ld1h { z18.h }, p2/Z, [x23, #2, MUL VL]\n"
+ "ld1h { z19.h }, p1/Z, [x23, #3, MUL VL]\n"
+ "ld1h { z20.h }, p4/Z, [x22]\n"
+ "ld1h { z21.h }, p3/Z, [x22, #1, MUL VL]\n"
+ "ld1h { z22.h }, p2/Z, [x22, #2, MUL VL]\n"
+ "ld1h { z23.h }, p1/Z, [x22, #3, MUL VL]\n"
"b 44f\n"
"43:" // Height 4: no accumulate
"mov z8.b, #0x0\n"
@@ -1196,55 +1196,55 @@ void sve_hybrid_fp16_mla_6x4VL (
"mov z22.b, #0x0\n"
"mov z23.b, #0x0\n"
"44:" // Height 4: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"45:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 46f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "cbnz x28, 47f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #1\n"
- "add x25, x25, x20, LSL #1\n"
- "add x24, x24, x20, LSL #1\n"
- "add x23, x23, x20, LSL #1\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "cbnz x27, 47f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
+ "add x24, x24, x19, LSL #1\n"
+ "add x23, x23, x19, LSL #1\n"
+ "add x22, x22, x19, LSL #1\n"
"b 47f\n"
"46:" // Height 4: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
- "add x23, x24, x20, LSL #1\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "add x22, x23, x19, LSL #1\n"
"47:" // Height 4: input setup done
- "cmp x27, #0x8\n"
+ "cmp x26, #0x8\n"
"ble 49f\n"
"48:" // Height 4: Multiply loop: Main loop head
- "whilelt p0.h, XZR, x27\n"
- "ld1rqh { z0.h }, p0/Z, [x26]\n"
- "ld1rqh { z1.h }, p0/Z, [x25]\n"
- "sub x27, x27, #0x8\n"
- "ld1rqh { z2.h }, p0/Z, [x24]\n"
- "ld1rqh { z3.h }, p0/Z, [x23]\n"
- "cmp x27, #0x8\n"
- "add x26, x26, #0x10\n"
"ld1h { z6.h }, p5/Z, [x10]\n"
+ "whilelt p0.h, XZR, x26\n"
"ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "sub x26, x26, #0x8\n"
+ "ld1rqh { z0.h }, p0/Z, [x25]\n"
"fmla z8.h, z6.h, z0.h[0]\n"
- "fmla z12.h, z6.h, z1.h[0]\n"
- "fmla z16.h, z6.h, z2.h[0]\n"
- "fmla z20.h, z6.h, z3.h[0]\n"
- "ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
- "add x25, x25, #0x10\n"
+ "ld1rqh { z1.h }, p0/Z, [x24]\n"
+ "cmp x26, #0x8\n"
"fmla z9.h, z7.h, z0.h[0]\n"
- "fmla z13.h, z7.h, z1.h[0]\n"
+ "ld1rqh { z2.h }, p0/Z, [x23]\n"
+ "add x25, x25, #0x10\n"
+ "fmla z12.h, z6.h, z1.h[0]\n"
+ "ld1rqh { z3.h }, p0/Z, [x22]\n"
"add x24, x24, #0x10\n"
+ "fmla z16.h, z6.h, z2.h[0]\n"
"add x23, x23, #0x10\n"
+ "fmla z13.h, z7.h, z1.h[0]\n"
+ "add x22, x22, #0x10\n"
"fmla z17.h, z7.h, z2.h[0]\n"
+ "fmla z20.h, z6.h, z3.h[0]\n"
+ "ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
"fmla z21.h, z7.h, z3.h[0]\n"
"ld1h { z7.h }, p5/Z, [x10, #3, MUL VL]\n"
"fmla z10.h, z6.h, z0.h[0]\n"
@@ -1399,21 +1399,21 @@ void sve_hybrid_fp16_mla_6x4VL (
"fmla z23.h, z7.h, z3.h[7]\n"
"bgt 48b\n"
"49:" // Height 4: Multiply loop: Single iteration only
- "whilelt p0.h, XZR, x27\n"
- "ld1rqh { z0.h }, p0/Z, [x26]\n"
- "ld1rqh { z1.h }, p0/Z, [x25]\n"
- "subs x27, x27, #0x1\n"
- "ld1rqh { z2.h }, p0/Z, [x24]\n"
- "ld1rqh { z3.h }, p0/Z, [x23]\n"
"ld1h { z6.h }, p5/Z, [x10]\n"
+ "whilelt p0.h, XZR, x26\n"
"ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x1\n"
+ "ld1rqh { z0.h }, p0/Z, [x25]\n"
"fmla z8.h, z6.h, z0.h[0]\n"
+ "ld1rqh { z1.h }, p0/Z, [x24]\n"
+ "fmla z9.h, z7.h, z0.h[0]\n"
+ "ld1rqh { z2.h }, p0/Z, [x23]\n"
+ "ld1rqh { z3.h }, p0/Z, [x22]\n"
"fmla z12.h, z6.h, z1.h[0]\n"
+ "fmla z13.h, z7.h, z1.h[0]\n"
"fmla z16.h, z6.h, z2.h[0]\n"
"fmla z20.h, z6.h, z3.h[0]\n"
"ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
- "fmla z9.h, z7.h, z0.h[0]\n"
- "fmla z13.h, z7.h, z1.h[0]\n"
"fmla z17.h, z7.h, z2.h[0]\n"
"fmla z21.h, z7.h, z3.h[0]\n"
"ld1h { z7.h }, p5/Z, [x10, #3, MUL VL]\n"
@@ -1428,13 +1428,13 @@ void sve_hybrid_fp16_mla_6x4VL (
"fmla z23.h, z7.h, z3.h[0]\n"
"ble 50f\n"
"ld1h { z6.h }, p5/Z, [x10]\n"
- "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
"fmla z8.h, z6.h, z0.h[1]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x1\n"
"fmla z12.h, z6.h, z1.h[1]\n"
"fmla z16.h, z6.h, z2.h[1]\n"
"fmla z20.h, z6.h, z3.h[1]\n"
"ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
- "subs x27, x27, #0x1\n"
"fmla z9.h, z7.h, z0.h[1]\n"
"fmla z13.h, z7.h, z1.h[1]\n"
"fmla z17.h, z7.h, z2.h[1]\n"
@@ -1451,13 +1451,13 @@ void sve_hybrid_fp16_mla_6x4VL (
"fmla z23.h, z7.h, z3.h[1]\n"
"ble 50f\n"
"ld1h { z6.h }, p5/Z, [x10]\n"
- "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
"fmla z8.h, z6.h, z0.h[2]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x1\n"
"fmla z12.h, z6.h, z1.h[2]\n"
"fmla z16.h, z6.h, z2.h[2]\n"
"fmla z20.h, z6.h, z3.h[2]\n"
"ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
- "subs x27, x27, #0x1\n"
"fmla z9.h, z7.h, z0.h[2]\n"
"fmla z13.h, z7.h, z1.h[2]\n"
"fmla z17.h, z7.h, z2.h[2]\n"
@@ -1474,13 +1474,13 @@ void sve_hybrid_fp16_mla_6x4VL (
"fmla z23.h, z7.h, z3.h[2]\n"
"ble 50f\n"
"ld1h { z6.h }, p5/Z, [x10]\n"
- "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
"fmla z8.h, z6.h, z0.h[3]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x1\n"
"fmla z12.h, z6.h, z1.h[3]\n"
"fmla z16.h, z6.h, z2.h[3]\n"
"fmla z20.h, z6.h, z3.h[3]\n"
"ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
- "subs x27, x27, #0x1\n"
"fmla z9.h, z7.h, z0.h[3]\n"
"fmla z13.h, z7.h, z1.h[3]\n"
"fmla z17.h, z7.h, z2.h[3]\n"
@@ -1497,13 +1497,13 @@ void sve_hybrid_fp16_mla_6x4VL (
"fmla z23.h, z7.h, z3.h[3]\n"
"ble 50f\n"
"ld1h { z6.h }, p5/Z, [x10]\n"
- "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
"fmla z8.h, z6.h, z0.h[4]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x1\n"
"fmla z12.h, z6.h, z1.h[4]\n"
"fmla z16.h, z6.h, z2.h[4]\n"
"fmla z20.h, z6.h, z3.h[4]\n"
"ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
- "subs x27, x27, #0x1\n"
"fmla z9.h, z7.h, z0.h[4]\n"
"fmla z13.h, z7.h, z1.h[4]\n"
"fmla z17.h, z7.h, z2.h[4]\n"
@@ -1520,13 +1520,13 @@ void sve_hybrid_fp16_mla_6x4VL (
"fmla z23.h, z7.h, z3.h[4]\n"
"ble 50f\n"
"ld1h { z6.h }, p5/Z, [x10]\n"
- "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
"fmla z8.h, z6.h, z0.h[5]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x1\n"
"fmla z12.h, z6.h, z1.h[5]\n"
"fmla z16.h, z6.h, z2.h[5]\n"
"fmla z20.h, z6.h, z3.h[5]\n"
"ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
- "subs x27, x27, #0x1\n"
"fmla z9.h, z7.h, z0.h[5]\n"
"fmla z13.h, z7.h, z1.h[5]\n"
"fmla z17.h, z7.h, z2.h[5]\n"
@@ -1543,13 +1543,13 @@ void sve_hybrid_fp16_mla_6x4VL (
"fmla z23.h, z7.h, z3.h[5]\n"
"ble 50f\n"
"ld1h { z6.h }, p5/Z, [x10]\n"
- "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
"fmla z8.h, z6.h, z0.h[6]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x1\n"
"fmla z12.h, z6.h, z1.h[6]\n"
"fmla z16.h, z6.h, z2.h[6]\n"
"fmla z20.h, z6.h, z3.h[6]\n"
"ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
- "subs x27, x27, #0x1\n"
"fmla z9.h, z7.h, z0.h[6]\n"
"fmla z13.h, z7.h, z1.h[6]\n"
"fmla z17.h, z7.h, z2.h[6]\n"
@@ -1566,8 +1566,8 @@ void sve_hybrid_fp16_mla_6x4VL (
"fmla z23.h, z7.h, z3.h[6]\n"
"ble 50f\n"
"ld1h { z6.h }, p5/Z, [x10]\n"
- "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
"fmla z8.h, z6.h, z0.h[7]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
"fmla z12.h, z6.h, z1.h[7]\n"
"fmla z16.h, z6.h, z2.h[7]\n"
"fmla z20.h, z6.h, z3.h[7]\n"
@@ -1587,103 +1587,103 @@ void sve_hybrid_fp16_mla_6x4VL (
"fmla z19.h, z7.h, z2.h[7]\n"
"fmla z23.h, z7.h, z3.h[7]\n"
"50:" // Height 4: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 45b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
- "add x23, x24, x20, LSL #1\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x28, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "add x22, x23, x19, LSL #1\n"
"tbz %x[flags], #1, 51f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rh { z1.h }, p5/Z, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rh { z0.h }, p5/Z, [x20]\n"
- "fmin z8.h, p5/M, z8.h, z1.h\n"
- "fmin z9.h, p5/M, z9.h, z1.h\n"
- "fmin z10.h, p5/M, z10.h, z1.h\n"
- "fmin z11.h, p5/M, z11.h, z1.h\n"
- "fmin z12.h, p5/M, z12.h, z1.h\n"
- "fmin z13.h, p5/M, z13.h, z1.h\n"
- "fmin z14.h, p5/M, z14.h, z1.h\n"
- "fmin z15.h, p5/M, z15.h, z1.h\n"
- "fmin z16.h, p5/M, z16.h, z1.h\n"
- "fmin z17.h, p5/M, z17.h, z1.h\n"
- "fmin z18.h, p5/M, z18.h, z1.h\n"
- "fmin z19.h, p5/M, z19.h, z1.h\n"
- "fmin z20.h, p5/M, z20.h, z1.h\n"
- "fmin z21.h, p5/M, z21.h, z1.h\n"
- "fmin z22.h, p5/M, z22.h, z1.h\n"
- "fmin z23.h, p5/M, z23.h, z1.h\n"
- "fmax z8.h, p5/M, z8.h, z0.h\n"
- "fmax z9.h, p5/M, z9.h, z0.h\n"
- "fmax z10.h, p5/M, z10.h, z0.h\n"
- "fmax z11.h, p5/M, z11.h, z0.h\n"
- "fmax z12.h, p5/M, z12.h, z0.h\n"
- "fmax z13.h, p5/M, z13.h, z0.h\n"
- "fmax z14.h, p5/M, z14.h, z0.h\n"
- "fmax z15.h, p5/M, z15.h, z0.h\n"
- "fmax z16.h, p5/M, z16.h, z0.h\n"
- "fmax z17.h, p5/M, z17.h, z0.h\n"
- "fmax z18.h, p5/M, z18.h, z0.h\n"
- "fmax z19.h, p5/M, z19.h, z0.h\n"
- "fmax z20.h, p5/M, z20.h, z0.h\n"
- "fmax z21.h, p5/M, z21.h, z0.h\n"
- "fmax z22.h, p5/M, z22.h, z0.h\n"
- "fmax z23.h, p5/M, z23.h, z0.h\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rh { z1.h }, p5/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rh { z0.h }, p5/Z, [x19]\n"
+ "fmin z8.h, p5/M, z8.h, z0.h\n"
+ "fmin z9.h, p5/M, z9.h, z0.h\n"
+ "fmin z10.h, p5/M, z10.h, z0.h\n"
+ "fmin z11.h, p5/M, z11.h, z0.h\n"
+ "fmin z12.h, p5/M, z12.h, z0.h\n"
+ "fmax z8.h, p5/M, z8.h, z1.h\n"
+ "fmax z9.h, p5/M, z9.h, z1.h\n"
+ "fmax z10.h, p5/M, z10.h, z1.h\n"
+ "fmax z11.h, p5/M, z11.h, z1.h\n"
+ "fmax z12.h, p5/M, z12.h, z1.h\n"
+ "fmin z13.h, p5/M, z13.h, z0.h\n"
+ "fmin z14.h, p5/M, z14.h, z0.h\n"
+ "fmin z15.h, p5/M, z15.h, z0.h\n"
+ "fmin z16.h, p5/M, z16.h, z0.h\n"
+ "fmax z13.h, p5/M, z13.h, z1.h\n"
+ "fmax z14.h, p5/M, z14.h, z1.h\n"
+ "fmax z15.h, p5/M, z15.h, z1.h\n"
+ "fmax z16.h, p5/M, z16.h, z1.h\n"
+ "fmin z17.h, p5/M, z17.h, z0.h\n"
+ "fmin z18.h, p5/M, z18.h, z0.h\n"
+ "fmin z19.h, p5/M, z19.h, z0.h\n"
+ "fmin z20.h, p5/M, z20.h, z0.h\n"
+ "fmax z17.h, p5/M, z17.h, z1.h\n"
+ "fmax z18.h, p5/M, z18.h, z1.h\n"
+ "fmax z19.h, p5/M, z19.h, z1.h\n"
+ "fmax z20.h, p5/M, z20.h, z1.h\n"
+ "fmin z21.h, p5/M, z21.h, z0.h\n"
+ "fmin z22.h, p5/M, z22.h, z0.h\n"
+ "fmin z23.h, p5/M, z23.h, z0.h\n"
+ "fmax z21.h, p5/M, z21.h, z1.h\n"
+ "fmax z22.h, p5/M, z22.h, z1.h\n"
+ "fmax z23.h, p5/M, z23.h, z1.h\n"
"51:" // Height 4: No activation
- "st1h { z8.h }, p4, [x9]\n"
- "st1h { z9.h }, p3, [x9, #1, MUL VL]\n"
- "st1h { z10.h }, p2, [x9, #2, MUL VL]\n"
- "st1h { z11.h }, p1, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
- "st1h { z12.h }, p4, [x25]\n"
- "st1h { z13.h }, p3, [x25, #1, MUL VL]\n"
- "st1h { z14.h }, p2, [x25, #2, MUL VL]\n"
- "st1h { z15.h }, p1, [x25, #3, MUL VL]\n"
- "st1h { z16.h }, p4, [x24]\n"
- "st1h { z17.h }, p3, [x24, #1, MUL VL]\n"
- "st1h { z18.h }, p2, [x24, #2, MUL VL]\n"
- "st1h { z19.h }, p1, [x24, #3, MUL VL]\n"
- "st1h { z20.h }, p4, [x23]\n"
- "st1h { z21.h }, p3, [x23, #1, MUL VL]\n"
- "st1h { z22.h }, p2, [x23, #2, MUL VL]\n"
- "st1h { z23.h }, p1, [x23, #3, MUL VL]\n"
+ "st1h { z8.h }, p4, [x28]\n"
+ "st1h { z9.h }, p3, [x28, #1, MUL VL]\n"
+ "st1h { z10.h }, p2, [x28, #2, MUL VL]\n"
+ "st1h { z11.h }, p1, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "st1h { z12.h }, p4, [x24]\n"
+ "st1h { z13.h }, p3, [x24, #1, MUL VL]\n"
+ "st1h { z14.h }, p2, [x24, #2, MUL VL]\n"
+ "st1h { z15.h }, p1, [x24, #3, MUL VL]\n"
+ "st1h { z16.h }, p4, [x23]\n"
+ "st1h { z17.h }, p3, [x23, #1, MUL VL]\n"
+ "st1h { z18.h }, p2, [x23, #2, MUL VL]\n"
+ "st1h { z19.h }, p1, [x23, #3, MUL VL]\n"
+ "st1h { z20.h }, p4, [x22]\n"
+ "st1h { z21.h }, p3, [x22, #1, MUL VL]\n"
+ "st1h { z22.h }, p2, [x22, #2, MUL VL]\n"
+ "st1h { z23.h }, p1, [x22, #3, MUL VL]\n"
"52:" // Height 4: Writeback done
"dech x11, ALL, MUL #4\n"
"cmp x11, XZR\n"
"bgt 41b\n"
"b 80f\n"
"53:" // Height 5
- "mov x12, %x[bias]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x9, %x[bias]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "mov x28, %x[output_ptr]\n"
"54:" // Height 5: Column loop
- "mov x20, #0x0\n"
- "whilelt p4.h, x20, x11\n"
- "inch x20\n"
- "whilelt p3.h, x20, x11\n"
- "inch x20\n"
- "whilelt p2.h, x20, x11\n"
- "inch x20\n"
- "whilelt p1.h, x20, x11\n"
- "cbz x12, 55f\n"
- "ld1h { z8.h }, p5/Z, [x12]\n"
- "ld1h { z9.h }, p5/Z, [x12, #1, MUL VL]\n"
+ "mov x19, #0x0\n"
+ "whilelt p4.h, x19, x11\n"
+ "inch x19\n"
+ "whilelt p3.h, x19, x11\n"
+ "inch x19\n"
+ "whilelt p2.h, x19, x11\n"
+ "inch x19\n"
+ "whilelt p1.h, x19, x11\n"
+ "cbz x9, 55f\n"
+ "ld1h { z8.h }, p5/Z, [x9]\n"
"mov z12.d, z8.d\n"
+ "ld1h { z9.h }, p5/Z, [x9, #1, MUL VL]\n"
+ "mov z16.d, z8.d\n"
+ "ld1h { z10.h }, p5/Z, [x9, #2, MUL VL]\n"
+ "mov z20.d, z8.d\n"
+ "ld1h { z11.h }, p5/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"mov z13.d, z9.d\n"
- "ld1h { z10.h }, p5/Z, [x12, #2, MUL VL]\n"
- "ld1h { z11.h }, p5/Z, [x12, #3, MUL VL]\n"
+ "mov z17.d, z9.d\n"
"mov z14.d, z10.d\n"
"mov z15.d, z11.d\n"
- "mov z16.d, z8.d\n"
- "mov z17.d, z9.d\n"
- "addvl x12, x12, #4\n"
"mov z18.d, z10.d\n"
"mov z19.d, z11.d\n"
- "mov z20.d, z8.d\n"
"mov z21.d, z9.d\n"
"mov z22.d, z10.d\n"
"mov z23.d, z11.d\n"
@@ -1694,31 +1694,31 @@ void sve_hybrid_fp16_mla_6x4VL (
"b 57f\n"
"55:" // Height 5: no bias
"tbz %x[flags], #0, 56f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
- "ld1h { z8.h }, p4/Z, [x9]\n"
- "add x23, x24, x20, LSL #1\n"
- "add x22, x23, x20, LSL #1\n"
- "ld1h { z9.h }, p3/Z, [x9, #1, MUL VL]\n"
- "ld1h { z10.h }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1h { z11.h }, p1/Z, [x9, #3, MUL VL]\n"
- "ld1h { z12.h }, p4/Z, [x25]\n"
- "ld1h { z13.h }, p3/Z, [x25, #1, MUL VL]\n"
- "ld1h { z14.h }, p2/Z, [x25, #2, MUL VL]\n"
- "ld1h { z15.h }, p1/Z, [x25, #3, MUL VL]\n"
- "ld1h { z16.h }, p4/Z, [x24]\n"
- "ld1h { z17.h }, p3/Z, [x24, #1, MUL VL]\n"
- "ld1h { z18.h }, p2/Z, [x24, #2, MUL VL]\n"
- "ld1h { z19.h }, p1/Z, [x24, #3, MUL VL]\n"
- "ld1h { z20.h }, p4/Z, [x23]\n"
- "ld1h { z21.h }, p3/Z, [x23, #1, MUL VL]\n"
- "ld1h { z22.h }, p2/Z, [x23, #2, MUL VL]\n"
- "ld1h { z23.h }, p1/Z, [x23, #3, MUL VL]\n"
- "ld1h { z24.h }, p4/Z, [x22]\n"
- "ld1h { z25.h }, p3/Z, [x22, #1, MUL VL]\n"
- "ld1h { z26.h }, p2/Z, [x22, #2, MUL VL]\n"
- "ld1h { z27.h }, p1/Z, [x22, #3, MUL VL]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ld1h { z8.h }, p4/Z, [x28]\n"
+ "add x24, x28, x19, LSL #1\n"
+ "ld1h { z9.h }, p3/Z, [x28, #1, MUL VL]\n"
+ "ld1h { z10.h }, p2/Z, [x28, #2, MUL VL]\n"
+ "add x23, x24, x19, LSL #1\n"
+ "ld1h { z11.h }, p1/Z, [x28, #3, MUL VL]\n"
+ "add x22, x23, x19, LSL #1\n"
+ "ld1h { z12.h }, p4/Z, [x24]\n"
+ "add x21, x22, x19, LSL #1\n"
+ "ld1h { z13.h }, p3/Z, [x24, #1, MUL VL]\n"
+ "ld1h { z14.h }, p2/Z, [x24, #2, MUL VL]\n"
+ "ld1h { z15.h }, p1/Z, [x24, #3, MUL VL]\n"
+ "ld1h { z16.h }, p4/Z, [x23]\n"
+ "ld1h { z17.h }, p3/Z, [x23, #1, MUL VL]\n"
+ "ld1h { z18.h }, p2/Z, [x23, #2, MUL VL]\n"
+ "ld1h { z19.h }, p1/Z, [x23, #3, MUL VL]\n"
+ "ld1h { z20.h }, p4/Z, [x22]\n"
+ "ld1h { z21.h }, p3/Z, [x22, #1, MUL VL]\n"
+ "ld1h { z22.h }, p2/Z, [x22, #2, MUL VL]\n"
+ "ld1h { z23.h }, p1/Z, [x22, #3, MUL VL]\n"
+ "ld1h { z24.h }, p4/Z, [x21]\n"
+ "ld1h { z25.h }, p3/Z, [x21, #1, MUL VL]\n"
+ "ld1h { z26.h }, p2/Z, [x21, #2, MUL VL]\n"
+ "ld1h { z27.h }, p1/Z, [x21, #3, MUL VL]\n"
"b 57f\n"
"56:" // Height 5: no accumulate
"mov z8.b, #0x0\n"
@@ -1742,61 +1742,61 @@ void sve_hybrid_fp16_mla_6x4VL (
"mov z26.b, #0x0\n"
"mov z27.b, #0x0\n"
"57:" // Height 5: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"58:" // Height 5: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 59f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "ldr x22, [x21, #0x20]\n"
- "cbnz x28, 60f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #1\n"
- "add x25, x25, x20, LSL #1\n"
- "add x24, x24, x20, LSL #1\n"
- "add x23, x23, x20, LSL #1\n"
- "add x22, x22, x20, LSL #1\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "ldr x21, [x20, #0x20]\n"
+ "cbnz x27, 60f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
+ "add x24, x24, x19, LSL #1\n"
+ "add x23, x23, x19, LSL #1\n"
+ "add x22, x22, x19, LSL #1\n"
+ "add x21, x21, x19, LSL #1\n"
"b 60f\n"
"59:" // Height 5: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
- "add x23, x24, x20, LSL #1\n"
- "add x22, x23, x20, LSL #1\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "add x22, x23, x19, LSL #1\n"
+ "add x21, x22, x19, LSL #1\n"
"60:" // Height 5: input setup done
- "cmp x27, #0x8\n"
+ "cmp x26, #0x8\n"
"ble 62f\n"
"61:" // Height 5: Multiply loop: Main loop head
- "whilelt p0.h, XZR, x27\n"
- "ld1rqh { z0.h }, p0/Z, [x26]\n"
- "ld1rqh { z1.h }, p0/Z, [x25]\n"
- "sub x27, x27, #0x8\n"
- "ld1rqh { z2.h }, p0/Z, [x24]\n"
- "ld1rqh { z3.h }, p0/Z, [x23]\n"
- "cmp x27, #0x8\n"
- "add x26, x26, #0x10\n"
- "ld1rqh { z4.h }, p0/Z, [x22]\n"
"ld1h { z6.h }, p5/Z, [x10]\n"
+ "whilelt p0.h, XZR, x26\n"
+ "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "sub x26, x26, #0x8\n"
+ "ld1rqh { z0.h }, p0/Z, [x25]\n"
"fmla z8.h, z6.h, z0.h[0]\n"
+ "ld1rqh { z1.h }, p0/Z, [x24]\n"
+ "cmp x26, #0x8\n"
+ "fmla z9.h, z7.h, z0.h[0]\n"
+ "ld1rqh { z2.h }, p0/Z, [x23]\n"
+ "add x25, x25, #0x10\n"
"fmla z12.h, z6.h, z1.h[0]\n"
- "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1rqh { z3.h }, p0/Z, [x22]\n"
+ "add x24, x24, #0x10\n"
"fmla z16.h, z6.h, z2.h[0]\n"
+ "ld1rqh { z4.h }, p0/Z, [x21]\n"
+ "add x23, x23, #0x10\n"
+ "fmla z13.h, z7.h, z1.h[0]\n"
+ "add x22, x22, #0x10\n"
+ "fmla z17.h, z7.h, z2.h[0]\n"
+ "add x21, x21, #0x10\n"
"fmla z20.h, z6.h, z3.h[0]\n"
- "add x25, x25, #0x10\n"
"fmla z24.h, z6.h, z4.h[0]\n"
- "fmla z9.h, z7.h, z0.h[0]\n"
"ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
- "add x24, x24, #0x10\n"
- "fmla z13.h, z7.h, z1.h[0]\n"
- "fmla z17.h, z7.h, z2.h[0]\n"
- "add x23, x23, #0x10\n"
- "add x22, x22, #0x10\n"
"fmla z21.h, z7.h, z3.h[0]\n"
"fmla z25.h, z7.h, z4.h[0]\n"
"ld1h { z7.h }, p5/Z, [x10, #3, MUL VL]\n"
@@ -1805,8 +1805,8 @@ void sve_hybrid_fp16_mla_6x4VL (
"fmla z18.h, z6.h, z2.h[0]\n"
"fmla z22.h, z6.h, z3.h[0]\n"
"fmla z26.h, z6.h, z4.h[0]\n"
- "fmla z11.h, z7.h, z0.h[0]\n"
"ld1h { z6.h }, p5/Z, [x10, #4, MUL VL]\n"
+ "fmla z11.h, z7.h, z0.h[0]\n"
"fmla z15.h, z7.h, z1.h[0]\n"
"fmla z19.h, z7.h, z2.h[0]\n"
"fmla z23.h, z7.h, z3.h[0]\n"
@@ -1817,8 +1817,8 @@ void sve_hybrid_fp16_mla_6x4VL (
"fmla z16.h, z6.h, z2.h[1]\n"
"fmla z20.h, z6.h, z3.h[1]\n"
"fmla z24.h, z6.h, z4.h[1]\n"
- "fmla z9.h, z7.h, z0.h[1]\n"
"ld1h { z6.h }, p5/Z, [x10, #6, MUL VL]\n"
+ "fmla z9.h, z7.h, z0.h[1]\n"
"fmla z13.h, z7.h, z1.h[1]\n"
"fmla z17.h, z7.h, z2.h[1]\n"
"fmla z21.h, z7.h, z3.h[1]\n"
@@ -1830,8 +1830,8 @@ void sve_hybrid_fp16_mla_6x4VL (
"fmla z18.h, z6.h, z2.h[1]\n"
"fmla z22.h, z6.h, z3.h[1]\n"
"fmla z26.h, z6.h, z4.h[1]\n"
- "fmla z11.h, z7.h, z0.h[1]\n"
"ld1h { z6.h }, p5/Z, [x10, #-8, MUL VL]\n"
+ "fmla z11.h, z7.h, z0.h[1]\n"
"fmla z15.h, z7.h, z1.h[1]\n"
"fmla z19.h, z7.h, z2.h[1]\n"
"fmla z23.h, z7.h, z3.h[1]\n"
@@ -1842,8 +1842,8 @@ void sve_hybrid_fp16_mla_6x4VL (
"fmla z16.h, z6.h, z2.h[2]\n"
"fmla z20.h, z6.h, z3.h[2]\n"
"fmla z24.h, z6.h, z4.h[2]\n"
- "fmla z9.h, z7.h, z0.h[2]\n"
"ld1h { z6.h }, p5/Z, [x10, #-6, MUL VL]\n"
+ "fmla z9.h, z7.h, z0.h[2]\n"
"fmla z13.h, z7.h, z1.h[2]\n"
"fmla z17.h, z7.h, z2.h[2]\n"
"fmla z21.h, z7.h, z3.h[2]\n"
@@ -1854,8 +1854,8 @@ void sve_hybrid_fp16_mla_6x4VL (
"fmla z18.h, z6.h, z2.h[2]\n"
"fmla z22.h, z6.h, z3.h[2]\n"
"fmla z26.h, z6.h, z4.h[2]\n"
- "fmla z11.h, z7.h, z0.h[2]\n"
"ld1h { z6.h }, p5/Z, [x10, #-4, MUL VL]\n"
+ "fmla z11.h, z7.h, z0.h[2]\n"
"fmla z15.h, z7.h, z1.h[2]\n"
"fmla z19.h, z7.h, z2.h[2]\n"
"fmla z23.h, z7.h, z3.h[2]\n"
@@ -1866,8 +1866,8 @@ void sve_hybrid_fp16_mla_6x4VL (
"fmla z16.h, z6.h, z2.h[3]\n"
"fmla z20.h, z6.h, z3.h[3]\n"
"fmla z24.h, z6.h, z4.h[3]\n"
- "fmla z9.h, z7.h, z0.h[3]\n"
"ld1h { z6.h }, p5/Z, [x10, #-2, MUL VL]\n"
+ "fmla z9.h, z7.h, z0.h[3]\n"
"fmla z13.h, z7.h, z1.h[3]\n"
"fmla z17.h, z7.h, z2.h[3]\n"
"fmla z21.h, z7.h, z3.h[3]\n"
@@ -1878,8 +1878,8 @@ void sve_hybrid_fp16_mla_6x4VL (
"fmla z18.h, z6.h, z2.h[3]\n"
"fmla z22.h, z6.h, z3.h[3]\n"
"fmla z26.h, z6.h, z4.h[3]\n"
- "fmla z11.h, z7.h, z0.h[3]\n"
"ld1h { z6.h }, p5/Z, [x10]\n"
+ "fmla z11.h, z7.h, z0.h[3]\n"
"fmla z15.h, z7.h, z1.h[3]\n"
"fmla z19.h, z7.h, z2.h[3]\n"
"fmla z23.h, z7.h, z3.h[3]\n"
@@ -1890,8 +1890,8 @@ void sve_hybrid_fp16_mla_6x4VL (
"fmla z16.h, z6.h, z2.h[4]\n"
"fmla z20.h, z6.h, z3.h[4]\n"
"fmla z24.h, z6.h, z4.h[4]\n"
- "fmla z9.h, z7.h, z0.h[4]\n"
"ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
+ "fmla z9.h, z7.h, z0.h[4]\n"
"fmla z13.h, z7.h, z1.h[4]\n"
"fmla z17.h, z7.h, z2.h[4]\n"
"fmla z21.h, z7.h, z3.h[4]\n"
@@ -1902,8 +1902,8 @@ void sve_hybrid_fp16_mla_6x4VL (
"fmla z18.h, z6.h, z2.h[4]\n"
"fmla z22.h, z6.h, z3.h[4]\n"
"fmla z26.h, z6.h, z4.h[4]\n"
- "fmla z11.h, z7.h, z0.h[4]\n"
"ld1h { z6.h }, p5/Z, [x10, #4, MUL VL]\n"
+ "fmla z11.h, z7.h, z0.h[4]\n"
"fmla z15.h, z7.h, z1.h[4]\n"
"fmla z19.h, z7.h, z2.h[4]\n"
"fmla z23.h, z7.h, z3.h[4]\n"
@@ -1914,8 +1914,8 @@ void sve_hybrid_fp16_mla_6x4VL (
"fmla z16.h, z6.h, z2.h[5]\n"
"fmla z20.h, z6.h, z3.h[5]\n"
"fmla z24.h, z6.h, z4.h[5]\n"
- "fmla z9.h, z7.h, z0.h[5]\n"
"ld1h { z6.h }, p5/Z, [x10, #6, MUL VL]\n"
+ "fmla z9.h, z7.h, z0.h[5]\n"
"fmla z13.h, z7.h, z1.h[5]\n"
"fmla z17.h, z7.h, z2.h[5]\n"
"fmla z21.h, z7.h, z3.h[5]\n"
@@ -1927,8 +1927,8 @@ void sve_hybrid_fp16_mla_6x4VL (
"fmla z18.h, z6.h, z2.h[5]\n"
"fmla z22.h, z6.h, z3.h[5]\n"
"fmla z26.h, z6.h, z4.h[5]\n"
- "fmla z11.h, z7.h, z0.h[5]\n"
"ld1h { z6.h }, p5/Z, [x10, #-8, MUL VL]\n"
+ "fmla z11.h, z7.h, z0.h[5]\n"
"fmla z15.h, z7.h, z1.h[5]\n"
"fmla z19.h, z7.h, z2.h[5]\n"
"fmla z23.h, z7.h, z3.h[5]\n"
@@ -1939,8 +1939,8 @@ void sve_hybrid_fp16_mla_6x4VL (
"fmla z16.h, z6.h, z2.h[6]\n"
"fmla z20.h, z6.h, z3.h[6]\n"
"fmla z24.h, z6.h, z4.h[6]\n"
- "fmla z9.h, z7.h, z0.h[6]\n"
"ld1h { z6.h }, p5/Z, [x10, #-6, MUL VL]\n"
+ "fmla z9.h, z7.h, z0.h[6]\n"
"fmla z13.h, z7.h, z1.h[6]\n"
"fmla z17.h, z7.h, z2.h[6]\n"
"fmla z21.h, z7.h, z3.h[6]\n"
@@ -1951,8 +1951,8 @@ void sve_hybrid_fp16_mla_6x4VL (
"fmla z18.h, z6.h, z2.h[6]\n"
"fmla z22.h, z6.h, z3.h[6]\n"
"fmla z26.h, z6.h, z4.h[6]\n"
- "fmla z11.h, z7.h, z0.h[6]\n"
"ld1h { z6.h }, p5/Z, [x10, #-4, MUL VL]\n"
+ "fmla z11.h, z7.h, z0.h[6]\n"
"fmla z15.h, z7.h, z1.h[6]\n"
"fmla z19.h, z7.h, z2.h[6]\n"
"fmla z23.h, z7.h, z3.h[6]\n"
@@ -1963,8 +1963,8 @@ void sve_hybrid_fp16_mla_6x4VL (
"fmla z16.h, z6.h, z2.h[7]\n"
"fmla z20.h, z6.h, z3.h[7]\n"
"fmla z24.h, z6.h, z4.h[7]\n"
- "fmla z9.h, z7.h, z0.h[7]\n"
"ld1h { z6.h }, p5/Z, [x10, #-2, MUL VL]\n"
+ "fmla z9.h, z7.h, z0.h[7]\n"
"fmla z13.h, z7.h, z1.h[7]\n"
"fmla z17.h, z7.h, z2.h[7]\n"
"fmla z21.h, z7.h, z3.h[7]\n"
@@ -1982,23 +1982,23 @@ void sve_hybrid_fp16_mla_6x4VL (
"fmla z27.h, z7.h, z4.h[7]\n"
"bgt 61b\n"
"62:" // Height 5: Multiply loop: Single iteration only
- "whilelt p0.h, XZR, x27\n"
- "ld1rqh { z0.h }, p0/Z, [x26]\n"
- "ld1rqh { z1.h }, p0/Z, [x25]\n"
- "subs x27, x27, #0x1\n"
- "ld1rqh { z2.h }, p0/Z, [x24]\n"
- "ld1rqh { z3.h }, p0/Z, [x23]\n"
- "ld1rqh { z4.h }, p0/Z, [x22]\n"
"ld1h { z6.h }, p5/Z, [x10]\n"
+ "whilelt p0.h, XZR, x26\n"
+ "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x1\n"
+ "ld1rqh { z0.h }, p0/Z, [x25]\n"
"fmla z8.h, z6.h, z0.h[0]\n"
+ "ld1rqh { z1.h }, p0/Z, [x24]\n"
+ "fmla z9.h, z7.h, z0.h[0]\n"
+ "ld1rqh { z2.h }, p0/Z, [x23]\n"
+ "ld1rqh { z3.h }, p0/Z, [x22]\n"
"fmla z12.h, z6.h, z1.h[0]\n"
- "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1rqh { z4.h }, p0/Z, [x21]\n"
+ "fmla z13.h, z7.h, z1.h[0]\n"
"fmla z16.h, z6.h, z2.h[0]\n"
"fmla z20.h, z6.h, z3.h[0]\n"
"fmla z24.h, z6.h, z4.h[0]\n"
- "fmla z9.h, z7.h, z0.h[0]\n"
"ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
- "fmla z13.h, z7.h, z1.h[0]\n"
"fmla z17.h, z7.h, z2.h[0]\n"
"fmla z21.h, z7.h, z3.h[0]\n"
"fmla z25.h, z7.h, z4.h[0]\n"
@@ -2016,15 +2016,15 @@ void sve_hybrid_fp16_mla_6x4VL (
"fmla z27.h, z7.h, z4.h[0]\n"
"ble 63f\n"
"ld1h { z6.h }, p5/Z, [x10]\n"
- "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
"fmla z8.h, z6.h, z0.h[1]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x1\n"
"fmla z12.h, z6.h, z1.h[1]\n"
"fmla z16.h, z6.h, z2.h[1]\n"
"fmla z20.h, z6.h, z3.h[1]\n"
- "subs x27, x27, #0x1\n"
"fmla z24.h, z6.h, z4.h[1]\n"
- "fmla z9.h, z7.h, z0.h[1]\n"
"ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
+ "fmla z9.h, z7.h, z0.h[1]\n"
"fmla z13.h, z7.h, z1.h[1]\n"
"fmla z17.h, z7.h, z2.h[1]\n"
"fmla z21.h, z7.h, z3.h[1]\n"
@@ -2043,15 +2043,15 @@ void sve_hybrid_fp16_mla_6x4VL (
"fmla z27.h, z7.h, z4.h[1]\n"
"ble 63f\n"
"ld1h { z6.h }, p5/Z, [x10]\n"
- "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
"fmla z8.h, z6.h, z0.h[2]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x1\n"
"fmla z12.h, z6.h, z1.h[2]\n"
"fmla z16.h, z6.h, z2.h[2]\n"
"fmla z20.h, z6.h, z3.h[2]\n"
- "subs x27, x27, #0x1\n"
"fmla z24.h, z6.h, z4.h[2]\n"
- "fmla z9.h, z7.h, z0.h[2]\n"
"ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
+ "fmla z9.h, z7.h, z0.h[2]\n"
"fmla z13.h, z7.h, z1.h[2]\n"
"fmla z17.h, z7.h, z2.h[2]\n"
"fmla z21.h, z7.h, z3.h[2]\n"
@@ -2070,15 +2070,15 @@ void sve_hybrid_fp16_mla_6x4VL (
"fmla z27.h, z7.h, z4.h[2]\n"
"ble 63f\n"
"ld1h { z6.h }, p5/Z, [x10]\n"
- "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
"fmla z8.h, z6.h, z0.h[3]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x1\n"
"fmla z12.h, z6.h, z1.h[3]\n"
"fmla z16.h, z6.h, z2.h[3]\n"
"fmla z20.h, z6.h, z3.h[3]\n"
- "subs x27, x27, #0x1\n"
"fmla z24.h, z6.h, z4.h[3]\n"
- "fmla z9.h, z7.h, z0.h[3]\n"
"ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
+ "fmla z9.h, z7.h, z0.h[3]\n"
"fmla z13.h, z7.h, z1.h[3]\n"
"fmla z17.h, z7.h, z2.h[3]\n"
"fmla z21.h, z7.h, z3.h[3]\n"
@@ -2097,15 +2097,15 @@ void sve_hybrid_fp16_mla_6x4VL (
"fmla z27.h, z7.h, z4.h[3]\n"
"ble 63f\n"
"ld1h { z6.h }, p5/Z, [x10]\n"
- "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
"fmla z8.h, z6.h, z0.h[4]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x1\n"
"fmla z12.h, z6.h, z1.h[4]\n"
"fmla z16.h, z6.h, z2.h[4]\n"
"fmla z20.h, z6.h, z3.h[4]\n"
- "subs x27, x27, #0x1\n"
"fmla z24.h, z6.h, z4.h[4]\n"
- "fmla z9.h, z7.h, z0.h[4]\n"
"ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
+ "fmla z9.h, z7.h, z0.h[4]\n"
"fmla z13.h, z7.h, z1.h[4]\n"
"fmla z17.h, z7.h, z2.h[4]\n"
"fmla z21.h, z7.h, z3.h[4]\n"
@@ -2124,15 +2124,15 @@ void sve_hybrid_fp16_mla_6x4VL (
"fmla z27.h, z7.h, z4.h[4]\n"
"ble 63f\n"
"ld1h { z6.h }, p5/Z, [x10]\n"
- "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
"fmla z8.h, z6.h, z0.h[5]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x1\n"
"fmla z12.h, z6.h, z1.h[5]\n"
"fmla z16.h, z6.h, z2.h[5]\n"
"fmla z20.h, z6.h, z3.h[5]\n"
- "subs x27, x27, #0x1\n"
"fmla z24.h, z6.h, z4.h[5]\n"
- "fmla z9.h, z7.h, z0.h[5]\n"
"ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
+ "fmla z9.h, z7.h, z0.h[5]\n"
"fmla z13.h, z7.h, z1.h[5]\n"
"fmla z17.h, z7.h, z2.h[5]\n"
"fmla z21.h, z7.h, z3.h[5]\n"
@@ -2151,15 +2151,15 @@ void sve_hybrid_fp16_mla_6x4VL (
"fmla z27.h, z7.h, z4.h[5]\n"
"ble 63f\n"
"ld1h { z6.h }, p5/Z, [x10]\n"
- "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
"fmla z8.h, z6.h, z0.h[6]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x1\n"
"fmla z12.h, z6.h, z1.h[6]\n"
"fmla z16.h, z6.h, z2.h[6]\n"
"fmla z20.h, z6.h, z3.h[6]\n"
- "subs x27, x27, #0x1\n"
"fmla z24.h, z6.h, z4.h[6]\n"
- "fmla z9.h, z7.h, z0.h[6]\n"
"ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
+ "fmla z9.h, z7.h, z0.h[6]\n"
"fmla z13.h, z7.h, z1.h[6]\n"
"fmla z17.h, z7.h, z2.h[6]\n"
"fmla z21.h, z7.h, z3.h[6]\n"
@@ -2178,14 +2178,14 @@ void sve_hybrid_fp16_mla_6x4VL (
"fmla z27.h, z7.h, z4.h[6]\n"
"ble 63f\n"
"ld1h { z6.h }, p5/Z, [x10]\n"
- "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
"fmla z8.h, z6.h, z0.h[7]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
"fmla z12.h, z6.h, z1.h[7]\n"
"fmla z16.h, z6.h, z2.h[7]\n"
"fmla z20.h, z6.h, z3.h[7]\n"
"fmla z24.h, z6.h, z4.h[7]\n"
- "fmla z9.h, z7.h, z0.h[7]\n"
"ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
+ "fmla z9.h, z7.h, z0.h[7]\n"
"fmla z13.h, z7.h, z1.h[7]\n"
"fmla z17.h, z7.h, z2.h[7]\n"
"fmla z21.h, z7.h, z3.h[7]\n"
@@ -2203,119 +2203,119 @@ void sve_hybrid_fp16_mla_6x4VL (
"fmla z23.h, z7.h, z3.h[7]\n"
"fmla z27.h, z7.h, z4.h[7]\n"
"63:" // Height 5: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 58b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
- "add x23, x24, x20, LSL #1\n"
- "add x22, x23, x20, LSL #1\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x28, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "add x22, x23, x19, LSL #1\n"
+ "add x21, x22, x19, LSL #1\n"
"tbz %x[flags], #1, 64f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rh { z1.h }, p5/Z, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rh { z0.h }, p5/Z, [x20]\n"
- "fmin z8.h, p5/M, z8.h, z1.h\n"
- "fmin z9.h, p5/M, z9.h, z1.h\n"
- "fmin z10.h, p5/M, z10.h, z1.h\n"
- "fmin z11.h, p5/M, z11.h, z1.h\n"
- "fmin z12.h, p5/M, z12.h, z1.h\n"
- "fmin z13.h, p5/M, z13.h, z1.h\n"
- "fmin z14.h, p5/M, z14.h, z1.h\n"
- "fmin z15.h, p5/M, z15.h, z1.h\n"
- "fmin z16.h, p5/M, z16.h, z1.h\n"
- "fmin z17.h, p5/M, z17.h, z1.h\n"
- "fmin z18.h, p5/M, z18.h, z1.h\n"
- "fmin z19.h, p5/M, z19.h, z1.h\n"
- "fmin z20.h, p5/M, z20.h, z1.h\n"
- "fmin z21.h, p5/M, z21.h, z1.h\n"
- "fmin z22.h, p5/M, z22.h, z1.h\n"
- "fmin z23.h, p5/M, z23.h, z1.h\n"
- "fmin z24.h, p5/M, z24.h, z1.h\n"
- "fmin z25.h, p5/M, z25.h, z1.h\n"
- "fmin z26.h, p5/M, z26.h, z1.h\n"
- "fmin z27.h, p5/M, z27.h, z1.h\n"
- "fmax z8.h, p5/M, z8.h, z0.h\n"
- "fmax z9.h, p5/M, z9.h, z0.h\n"
- "fmax z10.h, p5/M, z10.h, z0.h\n"
- "fmax z11.h, p5/M, z11.h, z0.h\n"
- "fmax z12.h, p5/M, z12.h, z0.h\n"
- "fmax z13.h, p5/M, z13.h, z0.h\n"
- "fmax z14.h, p5/M, z14.h, z0.h\n"
- "fmax z15.h, p5/M, z15.h, z0.h\n"
- "fmax z16.h, p5/M, z16.h, z0.h\n"
- "fmax z17.h, p5/M, z17.h, z0.h\n"
- "fmax z18.h, p5/M, z18.h, z0.h\n"
- "fmax z19.h, p5/M, z19.h, z0.h\n"
- "fmax z20.h, p5/M, z20.h, z0.h\n"
- "fmax z21.h, p5/M, z21.h, z0.h\n"
- "fmax z22.h, p5/M, z22.h, z0.h\n"
- "fmax z23.h, p5/M, z23.h, z0.h\n"
- "fmax z24.h, p5/M, z24.h, z0.h\n"
- "fmax z25.h, p5/M, z25.h, z0.h\n"
- "fmax z26.h, p5/M, z26.h, z0.h\n"
- "fmax z27.h, p5/M, z27.h, z0.h\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rh { z1.h }, p5/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rh { z0.h }, p5/Z, [x19]\n"
+ "fmin z8.h, p5/M, z8.h, z0.h\n"
+ "fmin z9.h, p5/M, z9.h, z0.h\n"
+ "fmin z10.h, p5/M, z10.h, z0.h\n"
+ "fmin z11.h, p5/M, z11.h, z0.h\n"
+ "fmin z12.h, p5/M, z12.h, z0.h\n"
+ "fmax z8.h, p5/M, z8.h, z1.h\n"
+ "fmax z9.h, p5/M, z9.h, z1.h\n"
+ "fmax z10.h, p5/M, z10.h, z1.h\n"
+ "fmax z11.h, p5/M, z11.h, z1.h\n"
+ "fmax z12.h, p5/M, z12.h, z1.h\n"
+ "fmin z13.h, p5/M, z13.h, z0.h\n"
+ "fmin z14.h, p5/M, z14.h, z0.h\n"
+ "fmin z15.h, p5/M, z15.h, z0.h\n"
+ "fmin z16.h, p5/M, z16.h, z0.h\n"
+ "fmax z13.h, p5/M, z13.h, z1.h\n"
+ "fmax z14.h, p5/M, z14.h, z1.h\n"
+ "fmax z15.h, p5/M, z15.h, z1.h\n"
+ "fmax z16.h, p5/M, z16.h, z1.h\n"
+ "fmin z17.h, p5/M, z17.h, z0.h\n"
+ "fmin z18.h, p5/M, z18.h, z0.h\n"
+ "fmin z19.h, p5/M, z19.h, z0.h\n"
+ "fmin z20.h, p5/M, z20.h, z0.h\n"
+ "fmax z17.h, p5/M, z17.h, z1.h\n"
+ "fmax z18.h, p5/M, z18.h, z1.h\n"
+ "fmax z19.h, p5/M, z19.h, z1.h\n"
+ "fmax z20.h, p5/M, z20.h, z1.h\n"
+ "fmin z21.h, p5/M, z21.h, z0.h\n"
+ "fmin z22.h, p5/M, z22.h, z0.h\n"
+ "fmin z23.h, p5/M, z23.h, z0.h\n"
+ "fmin z24.h, p5/M, z24.h, z0.h\n"
+ "fmax z21.h, p5/M, z21.h, z1.h\n"
+ "fmax z22.h, p5/M, z22.h, z1.h\n"
+ "fmax z23.h, p5/M, z23.h, z1.h\n"
+ "fmax z24.h, p5/M, z24.h, z1.h\n"
+ "fmin z25.h, p5/M, z25.h, z0.h\n"
+ "fmin z26.h, p5/M, z26.h, z0.h\n"
+ "fmin z27.h, p5/M, z27.h, z0.h\n"
+ "fmax z25.h, p5/M, z25.h, z1.h\n"
+ "fmax z26.h, p5/M, z26.h, z1.h\n"
+ "fmax z27.h, p5/M, z27.h, z1.h\n"
"64:" // Height 5: No activation
- "st1h { z8.h }, p4, [x9]\n"
- "st1h { z9.h }, p3, [x9, #1, MUL VL]\n"
- "st1h { z10.h }, p2, [x9, #2, MUL VL]\n"
- "st1h { z11.h }, p1, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
- "st1h { z12.h }, p4, [x25]\n"
- "st1h { z13.h }, p3, [x25, #1, MUL VL]\n"
- "st1h { z14.h }, p2, [x25, #2, MUL VL]\n"
- "st1h { z15.h }, p1, [x25, #3, MUL VL]\n"
- "st1h { z16.h }, p4, [x24]\n"
- "st1h { z17.h }, p3, [x24, #1, MUL VL]\n"
- "st1h { z18.h }, p2, [x24, #2, MUL VL]\n"
- "st1h { z19.h }, p1, [x24, #3, MUL VL]\n"
- "st1h { z20.h }, p4, [x23]\n"
- "st1h { z21.h }, p3, [x23, #1, MUL VL]\n"
- "st1h { z22.h }, p2, [x23, #2, MUL VL]\n"
- "st1h { z23.h }, p1, [x23, #3, MUL VL]\n"
- "st1h { z24.h }, p4, [x22]\n"
- "st1h { z25.h }, p3, [x22, #1, MUL VL]\n"
- "st1h { z26.h }, p2, [x22, #2, MUL VL]\n"
- "st1h { z27.h }, p1, [x22, #3, MUL VL]\n"
+ "st1h { z8.h }, p4, [x28]\n"
+ "st1h { z9.h }, p3, [x28, #1, MUL VL]\n"
+ "st1h { z10.h }, p2, [x28, #2, MUL VL]\n"
+ "st1h { z11.h }, p1, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "st1h { z12.h }, p4, [x24]\n"
+ "st1h { z13.h }, p3, [x24, #1, MUL VL]\n"
+ "st1h { z14.h }, p2, [x24, #2, MUL VL]\n"
+ "st1h { z15.h }, p1, [x24, #3, MUL VL]\n"
+ "st1h { z16.h }, p4, [x23]\n"
+ "st1h { z17.h }, p3, [x23, #1, MUL VL]\n"
+ "st1h { z18.h }, p2, [x23, #2, MUL VL]\n"
+ "st1h { z19.h }, p1, [x23, #3, MUL VL]\n"
+ "st1h { z20.h }, p4, [x22]\n"
+ "st1h { z21.h }, p3, [x22, #1, MUL VL]\n"
+ "st1h { z22.h }, p2, [x22, #2, MUL VL]\n"
+ "st1h { z23.h }, p1, [x22, #3, MUL VL]\n"
+ "st1h { z24.h }, p4, [x21]\n"
+ "st1h { z25.h }, p3, [x21, #1, MUL VL]\n"
+ "st1h { z26.h }, p2, [x21, #2, MUL VL]\n"
+ "st1h { z27.h }, p1, [x21, #3, MUL VL]\n"
"65:" // Height 5: Writeback done
"dech x11, ALL, MUL #4\n"
"cmp x11, XZR\n"
"bgt 54b\n"
"b 80f\n"
"66:" // Height 6
- "ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "mov x20, #0xc\n"
- "mov x12, %x[bias]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x9, %x[bias]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
+ "mov x28, %x[output_ptr]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "mov x19, #0xc\n"
+ "madd %x[output_ptr], x20, x19, %x[output_ptr]\n"
"67:" // Height 6: Column loop
- "mov x20, #0x0\n"
- "whilelt p4.h, x20, x11\n"
- "inch x20\n"
- "whilelt p3.h, x20, x11\n"
- "inch x20\n"
- "whilelt p2.h, x20, x11\n"
- "inch x20\n"
- "whilelt p1.h, x20, x11\n"
- "cbz x12, 68f\n"
- "ld1h { z8.h }, p5/Z, [x12]\n"
- "ld1h { z9.h }, p5/Z, [x12, #1, MUL VL]\n"
+ "mov x19, #0x0\n"
+ "whilelt p4.h, x19, x11\n"
+ "inch x19\n"
+ "whilelt p3.h, x19, x11\n"
+ "inch x19\n"
+ "whilelt p2.h, x19, x11\n"
+ "inch x19\n"
+ "whilelt p1.h, x19, x11\n"
+ "cbz x9, 68f\n"
+ "ld1h { z8.h }, p5/Z, [x9]\n"
"mov z12.d, z8.d\n"
+ "ld1h { z9.h }, p5/Z, [x9, #1, MUL VL]\n"
+ "mov z16.d, z8.d\n"
+ "ld1h { z10.h }, p5/Z, [x9, #2, MUL VL]\n"
+ "mov z20.d, z8.d\n"
+ "ld1h { z11.h }, p5/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"mov z13.d, z9.d\n"
- "ld1h { z10.h }, p5/Z, [x12, #2, MUL VL]\n"
- "ld1h { z11.h }, p5/Z, [x12, #3, MUL VL]\n"
+ "mov z17.d, z9.d\n"
"mov z14.d, z10.d\n"
"mov z15.d, z11.d\n"
- "mov z16.d, z8.d\n"
- "mov z17.d, z9.d\n"
- "addvl x12, x12, #4\n"
"mov z18.d, z10.d\n"
"mov z19.d, z11.d\n"
- "mov z20.d, z8.d\n"
"mov z21.d, z9.d\n"
"mov z22.d, z10.d\n"
"mov z23.d, z11.d\n"
@@ -2330,36 +2330,36 @@ void sve_hybrid_fp16_mla_6x4VL (
"b 70f\n"
"68:" // Height 6: no bias
"tbz %x[flags], #0, 69f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
- "ld1h { z8.h }, p4/Z, [x9]\n"
- "add x23, x24, x20, LSL #1\n"
- "add x22, x23, x20, LSL #1\n"
- "ld1h { z9.h }, p3/Z, [x9, #1, MUL VL]\n"
- "ld1h { z10.h }, p2/Z, [x9, #2, MUL VL]\n"
- "add x21, x22, x20, LSL #1\n"
- "ld1h { z11.h }, p1/Z, [x9, #3, MUL VL]\n"
- "ld1h { z12.h }, p4/Z, [x25]\n"
- "ld1h { z13.h }, p3/Z, [x25, #1, MUL VL]\n"
- "ld1h { z14.h }, p2/Z, [x25, #2, MUL VL]\n"
- "ld1h { z15.h }, p1/Z, [x25, #3, MUL VL]\n"
- "ld1h { z16.h }, p4/Z, [x24]\n"
- "ld1h { z17.h }, p3/Z, [x24, #1, MUL VL]\n"
- "ld1h { z18.h }, p2/Z, [x24, #2, MUL VL]\n"
- "ld1h { z19.h }, p1/Z, [x24, #3, MUL VL]\n"
- "ld1h { z20.h }, p4/Z, [x23]\n"
- "ld1h { z21.h }, p3/Z, [x23, #1, MUL VL]\n"
- "ld1h { z22.h }, p2/Z, [x23, #2, MUL VL]\n"
- "ld1h { z23.h }, p1/Z, [x23, #3, MUL VL]\n"
- "ld1h { z24.h }, p4/Z, [x22]\n"
- "ld1h { z25.h }, p3/Z, [x22, #1, MUL VL]\n"
- "ld1h { z26.h }, p2/Z, [x22, #2, MUL VL]\n"
- "ld1h { z27.h }, p1/Z, [x22, #3, MUL VL]\n"
- "ld1h { z28.h }, p4/Z, [x21]\n"
- "ld1h { z29.h }, p3/Z, [x21, #1, MUL VL]\n"
- "ld1h { z30.h }, p2/Z, [x21, #2, MUL VL]\n"
- "ld1h { z31.h }, p1/Z, [x21, #3, MUL VL]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ld1h { z8.h }, p4/Z, [x28]\n"
+ "add x24, x28, x19, LSL #1\n"
+ "ld1h { z9.h }, p3/Z, [x28, #1, MUL VL]\n"
+ "ld1h { z10.h }, p2/Z, [x28, #2, MUL VL]\n"
+ "add x23, x24, x19, LSL #1\n"
+ "ld1h { z11.h }, p1/Z, [x28, #3, MUL VL]\n"
+ "add x22, x23, x19, LSL #1\n"
+ "ld1h { z12.h }, p4/Z, [x24]\n"
+ "add x21, x22, x19, LSL #1\n"
+ "ld1h { z13.h }, p3/Z, [x24, #1, MUL VL]\n"
+ "add x20, x21, x19, LSL #1\n"
+ "ld1h { z14.h }, p2/Z, [x24, #2, MUL VL]\n"
+ "ld1h { z15.h }, p1/Z, [x24, #3, MUL VL]\n"
+ "ld1h { z16.h }, p4/Z, [x23]\n"
+ "ld1h { z17.h }, p3/Z, [x23, #1, MUL VL]\n"
+ "ld1h { z18.h }, p2/Z, [x23, #2, MUL VL]\n"
+ "ld1h { z19.h }, p1/Z, [x23, #3, MUL VL]\n"
+ "ld1h { z20.h }, p4/Z, [x22]\n"
+ "ld1h { z21.h }, p3/Z, [x22, #1, MUL VL]\n"
+ "ld1h { z22.h }, p2/Z, [x22, #2, MUL VL]\n"
+ "ld1h { z23.h }, p1/Z, [x22, #3, MUL VL]\n"
+ "ld1h { z24.h }, p4/Z, [x21]\n"
+ "ld1h { z25.h }, p3/Z, [x21, #1, MUL VL]\n"
+ "ld1h { z26.h }, p2/Z, [x21, #2, MUL VL]\n"
+ "ld1h { z27.h }, p1/Z, [x21, #3, MUL VL]\n"
+ "ld1h { z28.h }, p4/Z, [x20]\n"
+ "ld1h { z29.h }, p3/Z, [x20, #1, MUL VL]\n"
+ "ld1h { z30.h }, p2/Z, [x20, #2, MUL VL]\n"
+ "ld1h { z31.h }, p1/Z, [x20, #3, MUL VL]\n"
"b 70f\n"
"69:" // Height 6: no accumulate
"mov z8.b, #0x0\n"
@@ -2387,67 +2387,67 @@ void sve_hybrid_fp16_mla_6x4VL (
"mov z30.b, #0x0\n"
"mov z31.b, #0x0\n"
"70:" // Height 6: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"71:" // Height 6: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 72f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "ldr x22, [x21, #0x20]\n"
- "ldr x21, [x21, #0x28]\n"
- "cbnz x28, 73f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #1\n"
- "add x25, x25, x20, LSL #1\n"
- "add x24, x24, x20, LSL #1\n"
- "add x23, x23, x20, LSL #1\n"
- "add x22, x22, x20, LSL #1\n"
- "add x21, x21, x20, LSL #1\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "ldr x21, [x20, #0x20]\n"
+ "ldr x20, [x20, #0x28]\n"
+ "cbnz x27, 73f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
+ "add x24, x24, x19, LSL #1\n"
+ "add x23, x23, x19, LSL #1\n"
+ "add x22, x22, x19, LSL #1\n"
+ "add x21, x21, x19, LSL #1\n"
+ "add x20, x20, x19, LSL #1\n"
"b 73f\n"
"72:" // Height 6: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
- "add x23, x24, x20, LSL #1\n"
- "add x22, x23, x20, LSL #1\n"
- "add x21, x22, x20, LSL #1\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "add x22, x23, x19, LSL #1\n"
+ "add x21, x22, x19, LSL #1\n"
+ "add x20, x21, x19, LSL #1\n"
"73:" // Height 6: input setup done
- "cmp x27, #0x8\n"
+ "cmp x26, #0x8\n"
"ble 75f\n"
"74:" // Height 6: Multiply loop: Main loop head
- "whilelt p0.h, XZR, x27\n"
- "ld1rqh { z0.h }, p0/Z, [x26]\n"
- "ld1rqh { z1.h }, p0/Z, [x25]\n"
- "sub x27, x27, #0x8\n"
- "ld1rqh { z2.h }, p0/Z, [x24]\n"
- "ld1rqh { z3.h }, p0/Z, [x23]\n"
- "cmp x27, #0x8\n"
- "add x26, x26, #0x10\n"
- "ld1rqh { z4.h }, p0/Z, [x22]\n"
- "ld1rqh { z5.h }, p0/Z, [x21]\n"
- "add x25, x25, #0x10\n"
- "add x24, x24, #0x10\n"
"ld1h { z6.h }, p5/Z, [x10]\n"
+ "whilelt p0.h, XZR, x26\n"
"ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "sub x26, x26, #0x8\n"
+ "ld1rqh { z0.h }, p0/Z, [x25]\n"
"fmla z8.h, z6.h, z0.h[0]\n"
+ "ld1rqh { z1.h }, p0/Z, [x24]\n"
+ "cmp x26, #0x8\n"
+ "fmla z9.h, z7.h, z0.h[0]\n"
+ "ld1rqh { z2.h }, p0/Z, [x23]\n"
+ "add x25, x25, #0x10\n"
"fmla z12.h, z6.h, z1.h[0]\n"
+ "ld1rqh { z3.h }, p0/Z, [x22]\n"
+ "add x24, x24, #0x10\n"
"fmla z16.h, z6.h, z2.h[0]\n"
- "fmla z20.h, z6.h, z3.h[0]\n"
+ "ld1rqh { z4.h }, p0/Z, [x21]\n"
"add x23, x23, #0x10\n"
+ "fmla z13.h, z7.h, z1.h[0]\n"
+ "ld1rqh { z5.h }, p0/Z, [x20]\n"
"add x22, x22, #0x10\n"
+ "fmla z20.h, z6.h, z3.h[0]\n"
+ "add x21, x21, #0x10\n"
+ "fmla z17.h, z7.h, z2.h[0]\n"
+ "add x20, x20, #0x10\n"
"fmla z24.h, z6.h, z4.h[0]\n"
"fmla z28.h, z6.h, z5.h[0]\n"
"ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
- "add x21, x21, #0x10\n"
- "fmla z9.h, z7.h, z0.h[0]\n"
- "fmla z13.h, z7.h, z1.h[0]\n"
- "fmla z17.h, z7.h, z2.h[0]\n"
"fmla z21.h, z7.h, z3.h[0]\n"
"fmla z25.h, z7.h, z4.h[0]\n"
"fmla z29.h, z7.h, z5.h[0]\n"
@@ -2664,25 +2664,25 @@ void sve_hybrid_fp16_mla_6x4VL (
"fmla z31.h, z7.h, z5.h[7]\n"
"bgt 74b\n"
"75:" // Height 6: Multiply loop: Single iteration only
- "whilelt p0.h, XZR, x27\n"
- "ld1rqh { z0.h }, p0/Z, [x26]\n"
- "ld1rqh { z1.h }, p0/Z, [x25]\n"
- "subs x27, x27, #0x1\n"
- "ld1rqh { z2.h }, p0/Z, [x24]\n"
- "ld1rqh { z3.h }, p0/Z, [x23]\n"
- "ld1rqh { z4.h }, p0/Z, [x22]\n"
- "ld1rqh { z5.h }, p0/Z, [x21]\n"
"ld1h { z6.h }, p5/Z, [x10]\n"
+ "whilelt p0.h, XZR, x26\n"
"ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x1\n"
+ "ld1rqh { z0.h }, p0/Z, [x25]\n"
"fmla z8.h, z6.h, z0.h[0]\n"
+ "ld1rqh { z1.h }, p0/Z, [x24]\n"
+ "fmla z9.h, z7.h, z0.h[0]\n"
+ "ld1rqh { z2.h }, p0/Z, [x23]\n"
+ "ld1rqh { z3.h }, p0/Z, [x22]\n"
"fmla z12.h, z6.h, z1.h[0]\n"
+ "ld1rqh { z4.h }, p0/Z, [x21]\n"
+ "fmla z13.h, z7.h, z1.h[0]\n"
+ "ld1rqh { z5.h }, p0/Z, [x20]\n"
"fmla z16.h, z6.h, z2.h[0]\n"
"fmla z20.h, z6.h, z3.h[0]\n"
"fmla z24.h, z6.h, z4.h[0]\n"
"fmla z28.h, z6.h, z5.h[0]\n"
"ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
- "fmla z9.h, z7.h, z0.h[0]\n"
- "fmla z13.h, z7.h, z1.h[0]\n"
"fmla z17.h, z7.h, z2.h[0]\n"
"fmla z21.h, z7.h, z3.h[0]\n"
"fmla z25.h, z7.h, z4.h[0]\n"
@@ -2703,12 +2703,12 @@ void sve_hybrid_fp16_mla_6x4VL (
"fmla z31.h, z7.h, z5.h[0]\n"
"ble 76f\n"
"ld1h { z6.h }, p5/Z, [x10]\n"
- "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
"fmla z8.h, z6.h, z0.h[1]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x1\n"
"fmla z12.h, z6.h, z1.h[1]\n"
"fmla z16.h, z6.h, z2.h[1]\n"
"fmla z20.h, z6.h, z3.h[1]\n"
- "subs x27, x27, #0x1\n"
"fmla z24.h, z6.h, z4.h[1]\n"
"fmla z28.h, z6.h, z5.h[1]\n"
"ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
@@ -2734,12 +2734,12 @@ void sve_hybrid_fp16_mla_6x4VL (
"fmla z31.h, z7.h, z5.h[1]\n"
"ble 76f\n"
"ld1h { z6.h }, p5/Z, [x10]\n"
- "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
"fmla z8.h, z6.h, z0.h[2]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x1\n"
"fmla z12.h, z6.h, z1.h[2]\n"
"fmla z16.h, z6.h, z2.h[2]\n"
"fmla z20.h, z6.h, z3.h[2]\n"
- "subs x27, x27, #0x1\n"
"fmla z24.h, z6.h, z4.h[2]\n"
"fmla z28.h, z6.h, z5.h[2]\n"
"ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
@@ -2765,12 +2765,12 @@ void sve_hybrid_fp16_mla_6x4VL (
"fmla z31.h, z7.h, z5.h[2]\n"
"ble 76f\n"
"ld1h { z6.h }, p5/Z, [x10]\n"
- "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
"fmla z8.h, z6.h, z0.h[3]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x1\n"
"fmla z12.h, z6.h, z1.h[3]\n"
"fmla z16.h, z6.h, z2.h[3]\n"
"fmla z20.h, z6.h, z3.h[3]\n"
- "subs x27, x27, #0x1\n"
"fmla z24.h, z6.h, z4.h[3]\n"
"fmla z28.h, z6.h, z5.h[3]\n"
"ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
@@ -2796,12 +2796,12 @@ void sve_hybrid_fp16_mla_6x4VL (
"fmla z31.h, z7.h, z5.h[3]\n"
"ble 76f\n"
"ld1h { z6.h }, p5/Z, [x10]\n"
- "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
"fmla z8.h, z6.h, z0.h[4]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x1\n"
"fmla z12.h, z6.h, z1.h[4]\n"
"fmla z16.h, z6.h, z2.h[4]\n"
"fmla z20.h, z6.h, z3.h[4]\n"
- "subs x27, x27, #0x1\n"
"fmla z24.h, z6.h, z4.h[4]\n"
"fmla z28.h, z6.h, z5.h[4]\n"
"ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
@@ -2827,12 +2827,12 @@ void sve_hybrid_fp16_mla_6x4VL (
"fmla z31.h, z7.h, z5.h[4]\n"
"ble 76f\n"
"ld1h { z6.h }, p5/Z, [x10]\n"
- "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
"fmla z8.h, z6.h, z0.h[5]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x1\n"
"fmla z12.h, z6.h, z1.h[5]\n"
"fmla z16.h, z6.h, z2.h[5]\n"
"fmla z20.h, z6.h, z3.h[5]\n"
- "subs x27, x27, #0x1\n"
"fmla z24.h, z6.h, z4.h[5]\n"
"fmla z28.h, z6.h, z5.h[5]\n"
"ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
@@ -2858,12 +2858,12 @@ void sve_hybrid_fp16_mla_6x4VL (
"fmla z31.h, z7.h, z5.h[5]\n"
"ble 76f\n"
"ld1h { z6.h }, p5/Z, [x10]\n"
- "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
"fmla z8.h, z6.h, z0.h[6]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x1\n"
"fmla z12.h, z6.h, z1.h[6]\n"
"fmla z16.h, z6.h, z2.h[6]\n"
"fmla z20.h, z6.h, z3.h[6]\n"
- "subs x27, x27, #0x1\n"
"fmla z24.h, z6.h, z4.h[6]\n"
"fmla z28.h, z6.h, z5.h[6]\n"
"ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
@@ -2889,8 +2889,8 @@ void sve_hybrid_fp16_mla_6x4VL (
"fmla z31.h, z7.h, z5.h[6]\n"
"ble 76f\n"
"ld1h { z6.h }, p5/Z, [x10]\n"
- "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
"fmla z8.h, z6.h, z0.h[7]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
"fmla z12.h, z6.h, z1.h[7]\n"
"fmla z16.h, z6.h, z2.h[7]\n"
"fmla z20.h, z6.h, z3.h[7]\n"
@@ -2918,115 +2918,115 @@ void sve_hybrid_fp16_mla_6x4VL (
"fmla z27.h, z7.h, z4.h[7]\n"
"fmla z31.h, z7.h, z5.h[7]\n"
"76:" // Height 6: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 71b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
- "add x23, x24, x20, LSL #1\n"
- "add x22, x23, x20, LSL #1\n"
- "add x21, x22, x20, LSL #1\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x28, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "add x22, x23, x19, LSL #1\n"
+ "add x21, x22, x19, LSL #1\n"
+ "add x20, x21, x19, LSL #1\n"
"tbz %x[flags], #1, 77f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rh { z1.h }, p5/Z, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rh { z0.h }, p5/Z, [x20]\n"
- "fmin z8.h, p5/M, z8.h, z1.h\n"
- "fmin z9.h, p5/M, z9.h, z1.h\n"
- "fmin z10.h, p5/M, z10.h, z1.h\n"
- "fmin z11.h, p5/M, z11.h, z1.h\n"
- "fmin z12.h, p5/M, z12.h, z1.h\n"
- "fmin z13.h, p5/M, z13.h, z1.h\n"
- "fmin z14.h, p5/M, z14.h, z1.h\n"
- "fmin z15.h, p5/M, z15.h, z1.h\n"
- "fmin z16.h, p5/M, z16.h, z1.h\n"
- "fmin z17.h, p5/M, z17.h, z1.h\n"
- "fmin z18.h, p5/M, z18.h, z1.h\n"
- "fmin z19.h, p5/M, z19.h, z1.h\n"
- "fmin z20.h, p5/M, z20.h, z1.h\n"
- "fmin z21.h, p5/M, z21.h, z1.h\n"
- "fmin z22.h, p5/M, z22.h, z1.h\n"
- "fmin z23.h, p5/M, z23.h, z1.h\n"
- "fmin z24.h, p5/M, z24.h, z1.h\n"
- "fmin z25.h, p5/M, z25.h, z1.h\n"
- "fmin z26.h, p5/M, z26.h, z1.h\n"
- "fmin z27.h, p5/M, z27.h, z1.h\n"
- "fmin z28.h, p5/M, z28.h, z1.h\n"
- "fmin z29.h, p5/M, z29.h, z1.h\n"
- "fmin z30.h, p5/M, z30.h, z1.h\n"
- "fmin z31.h, p5/M, z31.h, z1.h\n"
- "fmax z8.h, p5/M, z8.h, z0.h\n"
- "fmax z9.h, p5/M, z9.h, z0.h\n"
- "fmax z10.h, p5/M, z10.h, z0.h\n"
- "fmax z11.h, p5/M, z11.h, z0.h\n"
- "fmax z12.h, p5/M, z12.h, z0.h\n"
- "fmax z13.h, p5/M, z13.h, z0.h\n"
- "fmax z14.h, p5/M, z14.h, z0.h\n"
- "fmax z15.h, p5/M, z15.h, z0.h\n"
- "fmax z16.h, p5/M, z16.h, z0.h\n"
- "fmax z17.h, p5/M, z17.h, z0.h\n"
- "fmax z18.h, p5/M, z18.h, z0.h\n"
- "fmax z19.h, p5/M, z19.h, z0.h\n"
- "fmax z20.h, p5/M, z20.h, z0.h\n"
- "fmax z21.h, p5/M, z21.h, z0.h\n"
- "fmax z22.h, p5/M, z22.h, z0.h\n"
- "fmax z23.h, p5/M, z23.h, z0.h\n"
- "fmax z24.h, p5/M, z24.h, z0.h\n"
- "fmax z25.h, p5/M, z25.h, z0.h\n"
- "fmax z26.h, p5/M, z26.h, z0.h\n"
- "fmax z27.h, p5/M, z27.h, z0.h\n"
- "fmax z28.h, p5/M, z28.h, z0.h\n"
- "fmax z29.h, p5/M, z29.h, z0.h\n"
- "fmax z30.h, p5/M, z30.h, z0.h\n"
- "fmax z31.h, p5/M, z31.h, z0.h\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rh { z1.h }, p5/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rh { z0.h }, p5/Z, [x19]\n"
+ "fmin z8.h, p5/M, z8.h, z0.h\n"
+ "fmin z9.h, p5/M, z9.h, z0.h\n"
+ "fmin z10.h, p5/M, z10.h, z0.h\n"
+ "fmin z11.h, p5/M, z11.h, z0.h\n"
+ "fmin z12.h, p5/M, z12.h, z0.h\n"
+ "fmax z8.h, p5/M, z8.h, z1.h\n"
+ "fmax z9.h, p5/M, z9.h, z1.h\n"
+ "fmax z10.h, p5/M, z10.h, z1.h\n"
+ "fmax z11.h, p5/M, z11.h, z1.h\n"
+ "fmax z12.h, p5/M, z12.h, z1.h\n"
+ "fmin z13.h, p5/M, z13.h, z0.h\n"
+ "fmin z14.h, p5/M, z14.h, z0.h\n"
+ "fmin z15.h, p5/M, z15.h, z0.h\n"
+ "fmin z16.h, p5/M, z16.h, z0.h\n"
+ "fmax z13.h, p5/M, z13.h, z1.h\n"
+ "fmax z14.h, p5/M, z14.h, z1.h\n"
+ "fmax z15.h, p5/M, z15.h, z1.h\n"
+ "fmax z16.h, p5/M, z16.h, z1.h\n"
+ "fmin z17.h, p5/M, z17.h, z0.h\n"
+ "fmin z18.h, p5/M, z18.h, z0.h\n"
+ "fmin z19.h, p5/M, z19.h, z0.h\n"
+ "fmin z20.h, p5/M, z20.h, z0.h\n"
+ "fmax z17.h, p5/M, z17.h, z1.h\n"
+ "fmax z18.h, p5/M, z18.h, z1.h\n"
+ "fmax z19.h, p5/M, z19.h, z1.h\n"
+ "fmax z20.h, p5/M, z20.h, z1.h\n"
+ "fmin z21.h, p5/M, z21.h, z0.h\n"
+ "fmin z22.h, p5/M, z22.h, z0.h\n"
+ "fmin z23.h, p5/M, z23.h, z0.h\n"
+ "fmin z24.h, p5/M, z24.h, z0.h\n"
+ "fmax z21.h, p5/M, z21.h, z1.h\n"
+ "fmax z22.h, p5/M, z22.h, z1.h\n"
+ "fmax z23.h, p5/M, z23.h, z1.h\n"
+ "fmax z24.h, p5/M, z24.h, z1.h\n"
+ "fmin z25.h, p5/M, z25.h, z0.h\n"
+ "fmin z26.h, p5/M, z26.h, z0.h\n"
+ "fmin z27.h, p5/M, z27.h, z0.h\n"
+ "fmin z28.h, p5/M, z28.h, z0.h\n"
+ "fmax z25.h, p5/M, z25.h, z1.h\n"
+ "fmax z26.h, p5/M, z26.h, z1.h\n"
+ "fmax z27.h, p5/M, z27.h, z1.h\n"
+ "fmax z28.h, p5/M, z28.h, z1.h\n"
+ "fmin z29.h, p5/M, z29.h, z0.h\n"
+ "fmin z30.h, p5/M, z30.h, z0.h\n"
+ "fmin z31.h, p5/M, z31.h, z0.h\n"
+ "fmax z29.h, p5/M, z29.h, z1.h\n"
+ "fmax z30.h, p5/M, z30.h, z1.h\n"
+ "fmax z31.h, p5/M, z31.h, z1.h\n"
"77:" // Height 6: No activation
- "st1h { z8.h }, p4, [x9]\n"
- "st1h { z9.h }, p3, [x9, #1, MUL VL]\n"
- "st1h { z10.h }, p2, [x9, #2, MUL VL]\n"
- "st1h { z11.h }, p1, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
- "st1h { z12.h }, p4, [x25]\n"
- "st1h { z13.h }, p3, [x25, #1, MUL VL]\n"
- "st1h { z14.h }, p2, [x25, #2, MUL VL]\n"
- "st1h { z15.h }, p1, [x25, #3, MUL VL]\n"
- "st1h { z16.h }, p4, [x24]\n"
- "st1h { z17.h }, p3, [x24, #1, MUL VL]\n"
- "st1h { z18.h }, p2, [x24, #2, MUL VL]\n"
- "st1h { z19.h }, p1, [x24, #3, MUL VL]\n"
- "st1h { z20.h }, p4, [x23]\n"
- "st1h { z21.h }, p3, [x23, #1, MUL VL]\n"
- "st1h { z22.h }, p2, [x23, #2, MUL VL]\n"
- "st1h { z23.h }, p1, [x23, #3, MUL VL]\n"
- "st1h { z24.h }, p4, [x22]\n"
- "st1h { z25.h }, p3, [x22, #1, MUL VL]\n"
- "st1h { z26.h }, p2, [x22, #2, MUL VL]\n"
- "st1h { z27.h }, p1, [x22, #3, MUL VL]\n"
- "st1h { z28.h }, p4, [x21]\n"
- "st1h { z29.h }, p3, [x21, #1, MUL VL]\n"
- "st1h { z30.h }, p2, [x21, #2, MUL VL]\n"
- "st1h { z31.h }, p1, [x21, #3, MUL VL]\n"
+ "st1h { z8.h }, p4, [x28]\n"
+ "st1h { z9.h }, p3, [x28, #1, MUL VL]\n"
+ "st1h { z10.h }, p2, [x28, #2, MUL VL]\n"
+ "st1h { z11.h }, p1, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "st1h { z12.h }, p4, [x24]\n"
+ "st1h { z13.h }, p3, [x24, #1, MUL VL]\n"
+ "st1h { z14.h }, p2, [x24, #2, MUL VL]\n"
+ "st1h { z15.h }, p1, [x24, #3, MUL VL]\n"
+ "st1h { z16.h }, p4, [x23]\n"
+ "st1h { z17.h }, p3, [x23, #1, MUL VL]\n"
+ "st1h { z18.h }, p2, [x23, #2, MUL VL]\n"
+ "st1h { z19.h }, p1, [x23, #3, MUL VL]\n"
+ "st1h { z20.h }, p4, [x22]\n"
+ "st1h { z21.h }, p3, [x22, #1, MUL VL]\n"
+ "st1h { z22.h }, p2, [x22, #2, MUL VL]\n"
+ "st1h { z23.h }, p1, [x22, #3, MUL VL]\n"
+ "st1h { z24.h }, p4, [x21]\n"
+ "st1h { z25.h }, p3, [x21, #1, MUL VL]\n"
+ "st1h { z26.h }, p2, [x21, #2, MUL VL]\n"
+ "st1h { z27.h }, p1, [x21, #3, MUL VL]\n"
+ "st1h { z28.h }, p4, [x20]\n"
+ "st1h { z29.h }, p3, [x20, #1, MUL VL]\n"
+ "st1h { z30.h }, p2, [x20, #2, MUL VL]\n"
+ "st1h { z31.h }, p1, [x20, #3, MUL VL]\n"
"78:" // Height 6: Writeback done
"dech x11, ALL, MUL #4\n"
"cmp x11, XZR\n"
"bgt 67b\n"
"subs %x[M], %x[M], #0x6\n"
"beq 80f\n"
- "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 79f\n"
- "add x21, x21, #0x6\n"
- "str x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "add x20, x20, #0x6\n"
+ "str x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"b 1b\n"
"79:" // Update direct input
- "mov x20, #0xc\n"
- "madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
+ "mov x19, #0xc\n"
+ "madd %x[input_ptr], x19, x20, %x[input_ptr]\n"
"b 1b\n"
"80:" // Exit
: [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
: [args_ptr] "r" (&ka), [bias] "r" (bias), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "x9", "x10", "x11", "x12", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "x9", "x10", "x11", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_6x4VL/a64fx.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_6x4VL/a64fx.cpp
index 9ae51af59b..30b6a54277 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_6x4VL/a64fx.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_6x4VL/a64fx.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef ARM_COMPUTE_ENABLE_SVE
@@ -102,32 +102,32 @@ void sve_hybrid_fp32_mla_6x4VL_a64fx (
"cmp %x[M], #0x2\n"
"bgt 25f\n"
"beq 13f\n"
- "mov x12, %x[bias]\n"
- "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "mov x11, %x[bias]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x28, %x[output_ptr]\n"
"2:" // Height 1: Column loop
- "mov x20, #0x0\n"
- "whilelt p3.s, x20, x11\n"
- "incw x20\n"
- "whilelt p2.s, x20, x11\n"
- "incw x20\n"
- "whilelt p1.s, x20, x11\n"
- "incw x20\n"
- "whilelt p0.s, x20, x11\n"
- "cbz x12, 3f\n"
- "ld1w { z8.s }, p4/Z, [x12]\n"
- "ld1w { z9.s }, p4/Z, [x12, #1, MUL VL]\n"
- "ld1w { z10.s }, p4/Z, [x12, #2, MUL VL]\n"
- "ld1w { z11.s }, p4/Z, [x12, #3, MUL VL]\n"
- "addvl x12, x12, #4\n"
+ "mov x19, #0x0\n"
+ "whilelt p3.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p0.s, x19, x10\n"
+ "cbz x11, 3f\n"
+ "ld1w { z8.s }, p4/Z, [x11]\n"
+ "ld1w { z9.s }, p4/Z, [x11, #1, MUL VL]\n"
+ "ld1w { z10.s }, p4/Z, [x11, #2, MUL VL]\n"
+ "ld1w { z11.s }, p4/Z, [x11, #3, MUL VL]\n"
+ "addvl x11, x11, #4\n"
"b 5f\n"
"3:" // Height 1: no bias
"tbz %x[flags], #0, 4f\n"
- "ld1w { z8.s }, p3/Z, [x9]\n"
- "ld1w { z9.s }, p2/Z, [x9, #1, MUL VL]\n"
- "ld1w { z10.s }, p1/Z, [x9, #2, MUL VL]\n"
- "ld1w { z11.s }, p0/Z, [x9, #3, MUL VL]\n"
+ "ld1w { z8.s }, p3/Z, [x28]\n"
+ "ld1w { z9.s }, p2/Z, [x28, #1, MUL VL]\n"
+ "ld1w { z10.s }, p1/Z, [x28, #2, MUL VL]\n"
+ "ld1w { z11.s }, p0/Z, [x28, #3, MUL VL]\n"
"b 5f\n"
"4:" // Height 1: no accumulate
"mov z8.b, #0x0\n"
@@ -135,58 +135,58 @@ void sve_hybrid_fp32_mla_6x4VL_a64fx (
"mov z10.b, #0x0\n"
"mov z11.b, #0x0\n"
"5:" // Height 1: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"6:" // Height 1: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 7f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "cbnz x28, 8f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "cbnz x27, 8f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #2\n"
"b 8f\n"
"7:" // Height 1: setup direct input
- "mov x26, %x[input_ptr]\n"
+ "mov x25, %x[input_ptr]\n"
"8:" // Height 1: input setup done
- "subs x27, x27, #0x1\n"
- "ld1rw { z0.s }, p4/Z, [x26]\n"
- "ld1w { z6.s }, p4/Z, [x10]\n"
- "ld1w { z7.s }, p4/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x1\n"
+ "ld1rw { z0.s }, p4/Z, [x25]\n"
+ "ld1w { z6.s }, p4/Z, [x9]\n"
+ "ld1w { z7.s }, p4/Z, [x9, #1, MUL VL]\n"
"ble 10f\n"
"9:" // Height 1: Multiply loop: Main loop
"fmla z8.s, p4/M, z6.s, z0.s\n"
"fmla z9.s, p4/M, z7.s, z0.s\n"
- "ld1w { z6.s }, p4/Z, [x10, #2, MUL VL]\n"
- "ld1w { z7.s }, p4/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
- "add x26, x26, #0x4\n"
+ "ld1w { z6.s }, p4/Z, [x9, #2, MUL VL]\n"
+ "ld1w { z7.s }, p4/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
+ "add x25, x25, #0x4\n"
"fmla z10.s, p4/M, z6.s, z0.s\n"
"fmla z11.s, p4/M, z7.s, z0.s\n"
- "subs x27, x27, #0x1\n"
- "ld1rw { z0.s }, p4/Z, [x26]\n"
- "ld1w { z6.s }, p4/Z, [x10]\n"
- "ld1w { z7.s }, p4/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x1\n"
+ "ld1rw { z0.s }, p4/Z, [x25]\n"
+ "ld1w { z6.s }, p4/Z, [x9]\n"
+ "ld1w { z7.s }, p4/Z, [x9, #1, MUL VL]\n"
"bgt 9b\n"
"10:" // Height 1: Multiply loop: Main loop skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
"fmla z8.s, p4/M, z6.s, z0.s\n"
"fmla z9.s, p4/M, z7.s, z0.s\n"
- "ld1w { z6.s }, p4/Z, [x10, #2, MUL VL]\n"
- "ld1w { z7.s }, p4/Z, [x10, #3, MUL VL]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ld1w { z6.s }, p4/Z, [x9, #2, MUL VL]\n"
+ "ld1w { z7.s }, p4/Z, [x9, #3, MUL VL]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"fmla z10.s, p4/M, z6.s, z0.s\n"
"fmla z11.s, p4/M, z7.s, z0.s\n"
- "addvl x10, x10, #4\n"
+ "addvl x9, x9, #4\n"
"bne 6b\n"
"tbz %x[flags], #1, 11f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z1.s }, p4/Z, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rw { z0.s }, p4/Z, [x20]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rw { z1.s }, p4/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z0.s }, p4/Z, [x19]\n"
"fmin z8.s, p4/M, z8.s, z1.s\n"
"fmin z9.s, p4/M, z9.s, z1.s\n"
"fmin z10.s, p4/M, z10.s, z1.s\n"
@@ -196,53 +196,53 @@ void sve_hybrid_fp32_mla_6x4VL_a64fx (
"fmax z10.s, p4/M, z10.s, z0.s\n"
"fmax z11.s, p4/M, z11.s, z0.s\n"
"11:" // Height 1: No activation
- "st1w { z8.s }, p3, [x9]\n"
- "st1w { z9.s }, p2, [x9, #1, MUL VL]\n"
- "st1w { z10.s }, p1, [x9, #2, MUL VL]\n"
- "st1w { z11.s }, p0, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
+ "st1w { z8.s }, p3, [x28]\n"
+ "st1w { z9.s }, p2, [x28, #1, MUL VL]\n"
+ "st1w { z10.s }, p1, [x28, #2, MUL VL]\n"
+ "st1w { z11.s }, p0, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
"12:" // Height 1: Writeback done
- "decw x11, ALL, MUL #4\n"
- "cmp x11, XZR\n"
+ "decw x10, ALL, MUL #4\n"
+ "cmp x10, XZR\n"
"bgt 2b\n"
"b 74f\n"
"13:" // Height 2
- "mov x12, %x[bias]\n"
- "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "mov x11, %x[bias]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x28, %x[output_ptr]\n"
"14:" // Height 2: Column loop
- "mov x20, #0x0\n"
- "whilelt p3.s, x20, x11\n"
- "incw x20\n"
- "whilelt p2.s, x20, x11\n"
- "incw x20\n"
- "whilelt p1.s, x20, x11\n"
- "incw x20\n"
- "whilelt p0.s, x20, x11\n"
- "cbz x12, 15f\n"
- "ld1w { z8.s }, p4/Z, [x12]\n"
- "ld1w { z9.s }, p4/Z, [x12, #1, MUL VL]\n"
+ "mov x19, #0x0\n"
+ "whilelt p3.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p0.s, x19, x10\n"
+ "cbz x11, 15f\n"
+ "ld1w { z8.s }, p4/Z, [x11]\n"
+ "ld1w { z9.s }, p4/Z, [x11, #1, MUL VL]\n"
+ "ld1w { z10.s }, p4/Z, [x11, #2, MUL VL]\n"
"mov z12.d, z8.d\n"
"mov z13.d, z9.d\n"
- "ld1w { z10.s }, p4/Z, [x12, #2, MUL VL]\n"
- "ld1w { z11.s }, p4/Z, [x12, #3, MUL VL]\n"
+ "ld1w { z11.s }, p4/Z, [x11, #3, MUL VL]\n"
"mov z14.d, z10.d\n"
"mov z15.d, z11.d\n"
- "addvl x12, x12, #4\n"
+ "addvl x11, x11, #4\n"
"b 17f\n"
"15:" // Height 2: no bias
"tbz %x[flags], #0, 16f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "ld1w { z8.s }, p3/Z, [x9]\n"
- "ld1w { z9.s }, p2/Z, [x9, #1, MUL VL]\n"
- "ld1w { z10.s }, p1/Z, [x9, #2, MUL VL]\n"
- "ld1w { z11.s }, p0/Z, [x9, #3, MUL VL]\n"
- "ld1w { z12.s }, p3/Z, [x25]\n"
- "ld1w { z13.s }, p2/Z, [x25, #1, MUL VL]\n"
- "ld1w { z14.s }, p1/Z, [x25, #2, MUL VL]\n"
- "ld1w { z15.s }, p0/Z, [x25, #3, MUL VL]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x28, x19, LSL #2\n"
+ "ld1w { z8.s }, p3/Z, [x28]\n"
+ "ld1w { z9.s }, p2/Z, [x28, #1, MUL VL]\n"
+ "ld1w { z10.s }, p1/Z, [x28, #2, MUL VL]\n"
+ "ld1w { z11.s }, p0/Z, [x28, #3, MUL VL]\n"
+ "ld1w { z12.s }, p3/Z, [x24]\n"
+ "ld1w { z13.s }, p2/Z, [x24, #1, MUL VL]\n"
+ "ld1w { z14.s }, p1/Z, [x24, #2, MUL VL]\n"
+ "ld1w { z15.s }, p0/Z, [x24, #3, MUL VL]\n"
"b 17f\n"
"16:" // Height 2: no accumulate
"mov z8.b, #0x0\n"
@@ -254,74 +254,74 @@ void sve_hybrid_fp32_mla_6x4VL_a64fx (
"mov z14.b, #0x0\n"
"mov z15.b, #0x0\n"
"17:" // Height 2: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"18:" // Height 2: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 19f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "cbnz x28, 20f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #2\n"
- "add x25, x25, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "cbnz x27, 20f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #2\n"
+ "add x24, x24, x19, LSL #2\n"
"b 20f\n"
"19:" // Height 2: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #2\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #2\n"
"20:" // Height 2: input setup done
- "subs x27, x27, #0x1\n"
- "ld1rw { z0.s }, p4/Z, [x26]\n"
- "ld1rw { z1.s }, p4/Z, [x25]\n"
- "ld1w { z6.s }, p4/Z, [x10]\n"
- "ld1w { z7.s }, p4/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x1\n"
+ "ld1rw { z0.s }, p4/Z, [x25]\n"
+ "ld1rw { z1.s }, p4/Z, [x24]\n"
+ "ld1w { z6.s }, p4/Z, [x9]\n"
+ "ld1w { z7.s }, p4/Z, [x9, #1, MUL VL]\n"
"ble 22f\n"
"21:" // Height 2: Multiply loop: Main loop
"fmla z8.s, p4/M, z6.s, z0.s\n"
"fmla z12.s, p4/M, z6.s, z1.s\n"
- "ld1w { z6.s }, p4/Z, [x10, #2, MUL VL]\n"
- "add x26, x26, #0x4\n"
+ "ld1w { z6.s }, p4/Z, [x9, #2, MUL VL]\n"
+ "add x25, x25, #0x4\n"
"fmla z9.s, p4/M, z7.s, z0.s\n"
"fmla z13.s, p4/M, z7.s, z1.s\n"
- "ld1w { z7.s }, p4/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
- "subs x27, x27, #0x1\n"
- "add x25, x25, #0x4\n"
+ "ld1w { z7.s }, p4/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
+ "subs x26, x26, #0x1\n"
+ "add x24, x24, #0x4\n"
"fmla z10.s, p4/M, z6.s, z0.s\n"
"fmla z14.s, p4/M, z6.s, z1.s\n"
"fmla z11.s, p4/M, z7.s, z0.s\n"
"fmla z15.s, p4/M, z7.s, z1.s\n"
- "ld1rw { z0.s }, p4/Z, [x26]\n"
- "ld1rw { z1.s }, p4/Z, [x25]\n"
- "ld1w { z6.s }, p4/Z, [x10]\n"
- "ld1w { z7.s }, p4/Z, [x10, #1, MUL VL]\n"
+ "ld1rw { z0.s }, p4/Z, [x25]\n"
+ "ld1rw { z1.s }, p4/Z, [x24]\n"
+ "ld1w { z6.s }, p4/Z, [x9]\n"
+ "ld1w { z7.s }, p4/Z, [x9, #1, MUL VL]\n"
"bgt 21b\n"
"22:" // Height 2: Multiply loop: Main loop skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
"fmla z8.s, p4/M, z6.s, z0.s\n"
"fmla z12.s, p4/M, z6.s, z1.s\n"
- "ld1w { z6.s }, p4/Z, [x10, #2, MUL VL]\n"
+ "ld1w { z6.s }, p4/Z, [x9, #2, MUL VL]\n"
"fmla z9.s, p4/M, z7.s, z0.s\n"
"fmla z13.s, p4/M, z7.s, z1.s\n"
- "ld1w { z7.s }, p4/Z, [x10, #3, MUL VL]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ld1w { z7.s }, p4/Z, [x9, #3, MUL VL]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"fmla z10.s, p4/M, z6.s, z0.s\n"
"fmla z14.s, p4/M, z6.s, z1.s\n"
- "addvl x10, x10, #4\n"
+ "addvl x9, x9, #4\n"
"fmla z11.s, p4/M, z7.s, z0.s\n"
"fmla z15.s, p4/M, z7.s, z1.s\n"
"bne 18b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x28, x19, LSL #2\n"
"tbz %x[flags], #1, 23f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z1.s }, p4/Z, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rw { z0.s }, p4/Z, [x20]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rw { z1.s }, p4/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z0.s }, p4/Z, [x19]\n"
"fmin z8.s, p4/M, z8.s, z1.s\n"
"fmin z9.s, p4/M, z9.s, z1.s\n"
"fmin z10.s, p4/M, z10.s, z1.s\n"
@@ -339,66 +339,66 @@ void sve_hybrid_fp32_mla_6x4VL_a64fx (
"fmax z14.s, p4/M, z14.s, z0.s\n"
"fmax z15.s, p4/M, z15.s, z0.s\n"
"23:" // Height 2: No activation
- "st1w { z8.s }, p3, [x9]\n"
- "st1w { z9.s }, p2, [x9, #1, MUL VL]\n"
- "st1w { z10.s }, p1, [x9, #2, MUL VL]\n"
- "st1w { z11.s }, p0, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
- "st1w { z12.s }, p3, [x25]\n"
- "st1w { z13.s }, p2, [x25, #1, MUL VL]\n"
- "st1w { z14.s }, p1, [x25, #2, MUL VL]\n"
- "st1w { z15.s }, p0, [x25, #3, MUL VL]\n"
+ "st1w { z8.s }, p3, [x28]\n"
+ "st1w { z9.s }, p2, [x28, #1, MUL VL]\n"
+ "st1w { z10.s }, p1, [x28, #2, MUL VL]\n"
+ "st1w { z11.s }, p0, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "st1w { z12.s }, p3, [x24]\n"
+ "st1w { z13.s }, p2, [x24, #1, MUL VL]\n"
+ "st1w { z14.s }, p1, [x24, #2, MUL VL]\n"
+ "st1w { z15.s }, p0, [x24, #3, MUL VL]\n"
"24:" // Height 2: Writeback done
- "decw x11, ALL, MUL #4\n"
- "cmp x11, XZR\n"
+ "decw x10, ALL, MUL #4\n"
+ "cmp x10, XZR\n"
"bgt 14b\n"
"b 74f\n"
"25:" // Height 3
- "mov x12, %x[bias]\n"
- "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "mov x11, %x[bias]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x28, %x[output_ptr]\n"
"26:" // Height 3: Column loop
- "mov x20, #0x0\n"
- "whilelt p3.s, x20, x11\n"
- "incw x20\n"
- "whilelt p2.s, x20, x11\n"
- "incw x20\n"
- "whilelt p1.s, x20, x11\n"
- "incw x20\n"
- "whilelt p0.s, x20, x11\n"
- "cbz x12, 27f\n"
- "ld1w { z8.s }, p4/Z, [x12]\n"
- "ld1w { z9.s }, p4/Z, [x12, #1, MUL VL]\n"
+ "mov x19, #0x0\n"
+ "whilelt p3.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p0.s, x19, x10\n"
+ "cbz x11, 27f\n"
+ "ld1w { z8.s }, p4/Z, [x11]\n"
+ "ld1w { z9.s }, p4/Z, [x11, #1, MUL VL]\n"
+ "ld1w { z10.s }, p4/Z, [x11, #2, MUL VL]\n"
"mov z12.d, z8.d\n"
"mov z13.d, z9.d\n"
- "ld1w { z10.s }, p4/Z, [x12, #2, MUL VL]\n"
- "ld1w { z11.s }, p4/Z, [x12, #3, MUL VL]\n"
+ "ld1w { z11.s }, p4/Z, [x11, #3, MUL VL]\n"
"mov z14.d, z10.d\n"
"mov z15.d, z11.d\n"
+ "addvl x11, x11, #4\n"
"mov z16.d, z8.d\n"
"mov z17.d, z9.d\n"
- "addvl x12, x12, #4\n"
"mov z18.d, z10.d\n"
"mov z19.d, z11.d\n"
"b 29f\n"
"27:" // Height 3: no bias
"tbz %x[flags], #0, 28f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "ld1w { z8.s }, p3/Z, [x9]\n"
- "ld1w { z9.s }, p2/Z, [x9, #1, MUL VL]\n"
- "ld1w { z10.s }, p1/Z, [x9, #2, MUL VL]\n"
- "ld1w { z11.s }, p0/Z, [x9, #3, MUL VL]\n"
- "ld1w { z12.s }, p3/Z, [x25]\n"
- "ld1w { z13.s }, p2/Z, [x25, #1, MUL VL]\n"
- "ld1w { z14.s }, p1/Z, [x25, #2, MUL VL]\n"
- "ld1w { z15.s }, p0/Z, [x25, #3, MUL VL]\n"
- "ld1w { z16.s }, p3/Z, [x24]\n"
- "ld1w { z17.s }, p2/Z, [x24, #1, MUL VL]\n"
- "ld1w { z18.s }, p1/Z, [x24, #2, MUL VL]\n"
- "ld1w { z19.s }, p0/Z, [x24, #3, MUL VL]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x28, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "ld1w { z8.s }, p3/Z, [x28]\n"
+ "ld1w { z9.s }, p2/Z, [x28, #1, MUL VL]\n"
+ "ld1w { z10.s }, p1/Z, [x28, #2, MUL VL]\n"
+ "ld1w { z11.s }, p0/Z, [x28, #3, MUL VL]\n"
+ "ld1w { z12.s }, p3/Z, [x24]\n"
+ "ld1w { z13.s }, p2/Z, [x24, #1, MUL VL]\n"
+ "ld1w { z14.s }, p1/Z, [x24, #2, MUL VL]\n"
+ "ld1w { z15.s }, p0/Z, [x24, #3, MUL VL]\n"
+ "ld1w { z16.s }, p3/Z, [x23]\n"
+ "ld1w { z17.s }, p2/Z, [x23, #1, MUL VL]\n"
+ "ld1w { z18.s }, p1/Z, [x23, #2, MUL VL]\n"
+ "ld1w { z19.s }, p0/Z, [x23, #3, MUL VL]\n"
"b 29f\n"
"28:" // Height 3: no accumulate
"mov z8.b, #0x0\n"
@@ -414,74 +414,74 @@ void sve_hybrid_fp32_mla_6x4VL_a64fx (
"mov z18.b, #0x0\n"
"mov z19.b, #0x0\n"
"29:" // Height 3: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"30:" // Height 3: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 31f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "cbnz x28, 32f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #2\n"
- "add x25, x25, x20, LSL #2\n"
- "add x24, x24, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "cbnz x27, 32f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #2\n"
+ "add x24, x24, x19, LSL #2\n"
+ "add x23, x23, x19, LSL #2\n"
"b 32f\n"
"31:" // Height 3: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
"32:" // Height 3: input setup done
- "subs x27, x27, #0x1\n"
- "ld1rw { z0.s }, p4/Z, [x26]\n"
- "ld1rw { z1.s }, p4/Z, [x25]\n"
- "ld1rw { z2.s }, p4/Z, [x24]\n"
- "ld1w { z6.s }, p4/Z, [x10]\n"
- "ld1w { z7.s }, p4/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x1\n"
+ "ld1rw { z0.s }, p4/Z, [x25]\n"
+ "ld1rw { z1.s }, p4/Z, [x24]\n"
+ "ld1rw { z2.s }, p4/Z, [x23]\n"
+ "ld1w { z6.s }, p4/Z, [x9]\n"
+ "ld1w { z7.s }, p4/Z, [x9, #1, MUL VL]\n"
"ble 34f\n"
"33:" // Height 3: Multiply loop: Main loop
"fmla z8.s, p4/M, z6.s, z0.s\n"
"fmla z12.s, p4/M, z6.s, z1.s\n"
- "add x26, x26, #0x4\n"
- "subs x27, x27, #0x1\n"
+ "add x25, x25, #0x4\n"
+ "subs x26, x26, #0x1\n"
"fmla z16.s, p4/M, z6.s, z2.s\n"
"fmla z9.s, p4/M, z7.s, z0.s\n"
- "ld1w { z6.s }, p4/Z, [x10, #2, MUL VL]\n"
- "add x25, x25, #0x4\n"
+ "ld1w { z6.s }, p4/Z, [x9, #2, MUL VL]\n"
+ "add x24, x24, #0x4\n"
"fmla z13.s, p4/M, z7.s, z1.s\n"
"fmla z17.s, p4/M, z7.s, z2.s\n"
- "ld1w { z7.s }, p4/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
- "add x24, x24, #0x4\n"
+ "ld1w { z7.s }, p4/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
+ "add x23, x23, #0x4\n"
"fmla z10.s, p4/M, z6.s, z0.s\n"
"fmla z14.s, p4/M, z6.s, z1.s\n"
"fmla z18.s, p4/M, z6.s, z2.s\n"
"fmla z11.s, p4/M, z7.s, z0.s\n"
- "ld1rw { z0.s }, p4/Z, [x26]\n"
- "ld1w { z6.s }, p4/Z, [x10]\n"
+ "ld1rw { z0.s }, p4/Z, [x25]\n"
+ "ld1w { z6.s }, p4/Z, [x9]\n"
"fmla z15.s, p4/M, z7.s, z1.s\n"
"fmla z19.s, p4/M, z7.s, z2.s\n"
- "ld1rw { z1.s }, p4/Z, [x25]\n"
- "ld1rw { z2.s }, p4/Z, [x24]\n"
- "ld1w { z7.s }, p4/Z, [x10, #1, MUL VL]\n"
+ "ld1rw { z1.s }, p4/Z, [x24]\n"
+ "ld1rw { z2.s }, p4/Z, [x23]\n"
+ "ld1w { z7.s }, p4/Z, [x9, #1, MUL VL]\n"
"bgt 33b\n"
"34:" // Height 3: Multiply loop: Main loop skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
"fmla z8.s, p4/M, z6.s, z0.s\n"
"fmla z12.s, p4/M, z6.s, z1.s\n"
- "add x28, x28, #0x1\n"
+ "add x27, x27, #0x1\n"
"fmla z16.s, p4/M, z6.s, z2.s\n"
"fmla z9.s, p4/M, z7.s, z0.s\n"
- "ld1w { z6.s }, p4/Z, [x10, #2, MUL VL]\n"
- "cmp x28, x20\n"
+ "ld1w { z6.s }, p4/Z, [x9, #2, MUL VL]\n"
+ "cmp x27, x19\n"
"fmla z13.s, p4/M, z7.s, z1.s\n"
"fmla z17.s, p4/M, z7.s, z2.s\n"
- "ld1w { z7.s }, p4/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
+ "ld1w { z7.s }, p4/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"fmla z10.s, p4/M, z6.s, z0.s\n"
"fmla z14.s, p4/M, z6.s, z1.s\n"
"fmla z18.s, p4/M, z6.s, z2.s\n"
@@ -489,14 +489,14 @@ void sve_hybrid_fp32_mla_6x4VL_a64fx (
"fmla z15.s, p4/M, z7.s, z1.s\n"
"fmla z19.s, p4/M, z7.s, z2.s\n"
"bne 30b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x28, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
"tbz %x[flags], #1, 35f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z1.s }, p4/Z, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rw { z0.s }, p4/Z, [x20]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rw { z1.s }, p4/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z0.s }, p4/Z, [x19]\n"
"fmin z8.s, p4/M, z8.s, z1.s\n"
"fmin z9.s, p4/M, z9.s, z1.s\n"
"fmin z10.s, p4/M, z10.s, z1.s\n"
@@ -522,50 +522,50 @@ void sve_hybrid_fp32_mla_6x4VL_a64fx (
"fmax z18.s, p4/M, z18.s, z0.s\n"
"fmax z19.s, p4/M, z19.s, z0.s\n"
"35:" // Height 3: No activation
- "st1w { z8.s }, p3, [x9]\n"
- "st1w { z9.s }, p2, [x9, #1, MUL VL]\n"
- "st1w { z10.s }, p1, [x9, #2, MUL VL]\n"
- "st1w { z11.s }, p0, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
- "st1w { z12.s }, p3, [x25]\n"
- "st1w { z13.s }, p2, [x25, #1, MUL VL]\n"
- "st1w { z14.s }, p1, [x25, #2, MUL VL]\n"
- "st1w { z15.s }, p0, [x25, #3, MUL VL]\n"
- "st1w { z16.s }, p3, [x24]\n"
- "st1w { z17.s }, p2, [x24, #1, MUL VL]\n"
- "st1w { z18.s }, p1, [x24, #2, MUL VL]\n"
- "st1w { z19.s }, p0, [x24, #3, MUL VL]\n"
+ "st1w { z8.s }, p3, [x28]\n"
+ "st1w { z9.s }, p2, [x28, #1, MUL VL]\n"
+ "st1w { z10.s }, p1, [x28, #2, MUL VL]\n"
+ "st1w { z11.s }, p0, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "st1w { z12.s }, p3, [x24]\n"
+ "st1w { z13.s }, p2, [x24, #1, MUL VL]\n"
+ "st1w { z14.s }, p1, [x24, #2, MUL VL]\n"
+ "st1w { z15.s }, p0, [x24, #3, MUL VL]\n"
+ "st1w { z16.s }, p3, [x23]\n"
+ "st1w { z17.s }, p2, [x23, #1, MUL VL]\n"
+ "st1w { z18.s }, p1, [x23, #2, MUL VL]\n"
+ "st1w { z19.s }, p0, [x23, #3, MUL VL]\n"
"36:" // Height 3: Writeback done
- "decw x11, ALL, MUL #4\n"
- "cmp x11, XZR\n"
+ "decw x10, ALL, MUL #4\n"
+ "cmp x10, XZR\n"
"bgt 26b\n"
"b 74f\n"
"37:" // Height 4
- "mov x12, %x[bias]\n"
- "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "mov x11, %x[bias]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x28, %x[output_ptr]\n"
"38:" // Height 4: Column loop
- "mov x20, #0x0\n"
- "whilelt p3.s, x20, x11\n"
- "incw x20\n"
- "whilelt p2.s, x20, x11\n"
- "incw x20\n"
- "whilelt p1.s, x20, x11\n"
- "incw x20\n"
- "whilelt p0.s, x20, x11\n"
- "cbz x12, 39f\n"
- "ld1w { z8.s }, p4/Z, [x12]\n"
- "ld1w { z9.s }, p4/Z, [x12, #1, MUL VL]\n"
+ "mov x19, #0x0\n"
+ "whilelt p3.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p0.s, x19, x10\n"
+ "cbz x11, 39f\n"
+ "ld1w { z8.s }, p4/Z, [x11]\n"
+ "ld1w { z9.s }, p4/Z, [x11, #1, MUL VL]\n"
+ "ld1w { z10.s }, p4/Z, [x11, #2, MUL VL]\n"
"mov z12.d, z8.d\n"
"mov z13.d, z9.d\n"
- "ld1w { z10.s }, p4/Z, [x12, #2, MUL VL]\n"
- "ld1w { z11.s }, p4/Z, [x12, #3, MUL VL]\n"
+ "ld1w { z11.s }, p4/Z, [x11, #3, MUL VL]\n"
"mov z14.d, z10.d\n"
"mov z15.d, z11.d\n"
+ "addvl x11, x11, #4\n"
"mov z16.d, z8.d\n"
"mov z17.d, z9.d\n"
- "addvl x12, x12, #4\n"
"mov z18.d, z10.d\n"
"mov z19.d, z11.d\n"
"mov z20.d, z8.d\n"
@@ -575,26 +575,26 @@ void sve_hybrid_fp32_mla_6x4VL_a64fx (
"b 41f\n"
"39:" // Height 4: no bias
"tbz %x[flags], #0, 40f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "ld1w { z8.s }, p3/Z, [x9]\n"
- "add x23, x24, x20, LSL #2\n"
- "ld1w { z9.s }, p2/Z, [x9, #1, MUL VL]\n"
- "ld1w { z10.s }, p1/Z, [x9, #2, MUL VL]\n"
- "ld1w { z11.s }, p0/Z, [x9, #3, MUL VL]\n"
- "ld1w { z12.s }, p3/Z, [x25]\n"
- "ld1w { z13.s }, p2/Z, [x25, #1, MUL VL]\n"
- "ld1w { z14.s }, p1/Z, [x25, #2, MUL VL]\n"
- "ld1w { z15.s }, p0/Z, [x25, #3, MUL VL]\n"
- "ld1w { z16.s }, p3/Z, [x24]\n"
- "ld1w { z17.s }, p2/Z, [x24, #1, MUL VL]\n"
- "ld1w { z18.s }, p1/Z, [x24, #2, MUL VL]\n"
- "ld1w { z19.s }, p0/Z, [x24, #3, MUL VL]\n"
- "ld1w { z20.s }, p3/Z, [x23]\n"
- "ld1w { z21.s }, p2/Z, [x23, #1, MUL VL]\n"
- "ld1w { z22.s }, p1/Z, [x23, #2, MUL VL]\n"
- "ld1w { z23.s }, p0/Z, [x23, #3, MUL VL]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x28, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "ld1w { z8.s }, p3/Z, [x28]\n"
+ "ld1w { z9.s }, p2/Z, [x28, #1, MUL VL]\n"
+ "ld1w { z10.s }, p1/Z, [x28, #2, MUL VL]\n"
+ "ld1w { z11.s }, p0/Z, [x28, #3, MUL VL]\n"
+ "ld1w { z12.s }, p3/Z, [x24]\n"
+ "ld1w { z13.s }, p2/Z, [x24, #1, MUL VL]\n"
+ "ld1w { z14.s }, p1/Z, [x24, #2, MUL VL]\n"
+ "ld1w { z15.s }, p0/Z, [x24, #3, MUL VL]\n"
+ "ld1w { z16.s }, p3/Z, [x23]\n"
+ "ld1w { z17.s }, p2/Z, [x23, #1, MUL VL]\n"
+ "ld1w { z18.s }, p1/Z, [x23, #2, MUL VL]\n"
+ "ld1w { z19.s }, p0/Z, [x23, #3, MUL VL]\n"
+ "ld1w { z20.s }, p3/Z, [x22]\n"
+ "ld1w { z21.s }, p2/Z, [x22, #1, MUL VL]\n"
+ "ld1w { z22.s }, p1/Z, [x22, #2, MUL VL]\n"
+ "ld1w { z23.s }, p0/Z, [x22, #3, MUL VL]\n"
"b 41f\n"
"40:" // Height 4: no accumulate
"mov z8.b, #0x0\n"
@@ -614,86 +614,86 @@ void sve_hybrid_fp32_mla_6x4VL_a64fx (
"mov z22.b, #0x0\n"
"mov z23.b, #0x0\n"
"41:" // Height 4: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"42:" // Height 4: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 43f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "cbnz x28, 44f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #2\n"
- "add x25, x25, x20, LSL #2\n"
- "add x24, x24, x20, LSL #2\n"
- "add x23, x23, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "cbnz x27, 44f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #2\n"
+ "add x24, x24, x19, LSL #2\n"
+ "add x23, x23, x19, LSL #2\n"
+ "add x22, x22, x19, LSL #2\n"
"b 44f\n"
"43:" // Height 4: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
"44:" // Height 4: input setup done
- "subs x27, x27, #0x1\n"
- "ld1rw { z0.s }, p4/Z, [x26]\n"
- "ld1rw { z1.s }, p4/Z, [x25]\n"
- "ld1rw { z2.s }, p4/Z, [x24]\n"
- "ld1rw { z3.s }, p4/Z, [x23]\n"
- "ld1w { z6.s }, p4/Z, [x10]\n"
- "ld1w { z7.s }, p4/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x1\n"
+ "ld1rw { z0.s }, p4/Z, [x25]\n"
+ "ld1rw { z1.s }, p4/Z, [x24]\n"
+ "ld1rw { z2.s }, p4/Z, [x23]\n"
+ "ld1rw { z3.s }, p4/Z, [x22]\n"
+ "ld1w { z6.s }, p4/Z, [x9]\n"
+ "ld1w { z7.s }, p4/Z, [x9, #1, MUL VL]\n"
"ble 46f\n"
"45:" // Height 4: Multiply loop: Main loop
"fmla z8.s, p4/M, z6.s, z0.s\n"
"fmla z12.s, p4/M, z6.s, z1.s\n"
- "add x26, x26, #0x4\n"
- "subs x27, x27, #0x1\n"
+ "add x25, x25, #0x4\n"
+ "subs x26, x26, #0x1\n"
"fmla z16.s, p4/M, z6.s, z2.s\n"
"fmla z20.s, p4/M, z6.s, z3.s\n"
- "ld1w { z6.s }, p4/Z, [x10, #2, MUL VL]\n"
- "add x25, x25, #0x4\n"
+ "ld1w { z6.s }, p4/Z, [x9, #2, MUL VL]\n"
+ "add x24, x24, #0x4\n"
"fmla z9.s, p4/M, z7.s, z0.s\n"
"fmla z13.s, p4/M, z7.s, z1.s\n"
- "add x24, x24, #0x4\n"
"add x23, x23, #0x4\n"
+ "add x22, x22, #0x4\n"
"fmla z17.s, p4/M, z7.s, z2.s\n"
"fmla z21.s, p4/M, z7.s, z3.s\n"
- "ld1w { z7.s }, p4/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
+ "ld1w { z7.s }, p4/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"fmla z10.s, p4/M, z6.s, z0.s\n"
"fmla z14.s, p4/M, z6.s, z1.s\n"
"fmla z18.s, p4/M, z6.s, z2.s\n"
"fmla z22.s, p4/M, z6.s, z3.s\n"
- "ld1w { z6.s }, p4/Z, [x10]\n"
+ "ld1w { z6.s }, p4/Z, [x9]\n"
"fmla z11.s, p4/M, z7.s, z0.s\n"
"fmla z15.s, p4/M, z7.s, z1.s\n"
- "ld1rw { z0.s }, p4/Z, [x26]\n"
- "ld1rw { z1.s }, p4/Z, [x25]\n"
+ "ld1rw { z0.s }, p4/Z, [x25]\n"
+ "ld1rw { z1.s }, p4/Z, [x24]\n"
"fmla z19.s, p4/M, z7.s, z2.s\n"
"fmla z23.s, p4/M, z7.s, z3.s\n"
- "ld1rw { z2.s }, p4/Z, [x24]\n"
- "ld1rw { z3.s }, p4/Z, [x23]\n"
- "ld1w { z7.s }, p4/Z, [x10, #1, MUL VL]\n"
+ "ld1rw { z2.s }, p4/Z, [x23]\n"
+ "ld1rw { z3.s }, p4/Z, [x22]\n"
+ "ld1w { z7.s }, p4/Z, [x9, #1, MUL VL]\n"
"bgt 45b\n"
"46:" // Height 4: Multiply loop: Main loop skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
"fmla z8.s, p4/M, z6.s, z0.s\n"
"fmla z12.s, p4/M, z6.s, z1.s\n"
- "add x28, x28, #0x1\n"
+ "add x27, x27, #0x1\n"
"fmla z16.s, p4/M, z6.s, z2.s\n"
"fmla z20.s, p4/M, z6.s, z3.s\n"
- "ld1w { z6.s }, p4/Z, [x10, #2, MUL VL]\n"
- "cmp x28, x20\n"
+ "ld1w { z6.s }, p4/Z, [x9, #2, MUL VL]\n"
+ "cmp x27, x19\n"
"fmla z9.s, p4/M, z7.s, z0.s\n"
"fmla z13.s, p4/M, z7.s, z1.s\n"
"fmla z17.s, p4/M, z7.s, z2.s\n"
"fmla z21.s, p4/M, z7.s, z3.s\n"
- "ld1w { z7.s }, p4/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
+ "ld1w { z7.s }, p4/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"fmla z10.s, p4/M, z6.s, z0.s\n"
"fmla z14.s, p4/M, z6.s, z1.s\n"
"fmla z18.s, p4/M, z6.s, z2.s\n"
@@ -703,15 +703,15 @@ void sve_hybrid_fp32_mla_6x4VL_a64fx (
"fmla z19.s, p4/M, z7.s, z2.s\n"
"fmla z23.s, p4/M, z7.s, z3.s\n"
"bne 42b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x28, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
"tbz %x[flags], #1, 47f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z1.s }, p4/Z, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rw { z0.s }, p4/Z, [x20]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rw { z1.s }, p4/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z0.s }, p4/Z, [x19]\n"
"fmin z8.s, p4/M, z8.s, z1.s\n"
"fmin z9.s, p4/M, z9.s, z1.s\n"
"fmin z10.s, p4/M, z10.s, z1.s\n"
@@ -745,54 +745,54 @@ void sve_hybrid_fp32_mla_6x4VL_a64fx (
"fmax z22.s, p4/M, z22.s, z0.s\n"
"fmax z23.s, p4/M, z23.s, z0.s\n"
"47:" // Height 4: No activation
- "st1w { z8.s }, p3, [x9]\n"
- "st1w { z9.s }, p2, [x9, #1, MUL VL]\n"
- "st1w { z10.s }, p1, [x9, #2, MUL VL]\n"
- "st1w { z11.s }, p0, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
- "st1w { z12.s }, p3, [x25]\n"
- "st1w { z13.s }, p2, [x25, #1, MUL VL]\n"
- "st1w { z14.s }, p1, [x25, #2, MUL VL]\n"
- "st1w { z15.s }, p0, [x25, #3, MUL VL]\n"
- "st1w { z16.s }, p3, [x24]\n"
- "st1w { z17.s }, p2, [x24, #1, MUL VL]\n"
- "st1w { z18.s }, p1, [x24, #2, MUL VL]\n"
- "st1w { z19.s }, p0, [x24, #3, MUL VL]\n"
- "st1w { z20.s }, p3, [x23]\n"
- "st1w { z21.s }, p2, [x23, #1, MUL VL]\n"
- "st1w { z22.s }, p1, [x23, #2, MUL VL]\n"
- "st1w { z23.s }, p0, [x23, #3, MUL VL]\n"
+ "st1w { z8.s }, p3, [x28]\n"
+ "st1w { z9.s }, p2, [x28, #1, MUL VL]\n"
+ "st1w { z10.s }, p1, [x28, #2, MUL VL]\n"
+ "st1w { z11.s }, p0, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "st1w { z12.s }, p3, [x24]\n"
+ "st1w { z13.s }, p2, [x24, #1, MUL VL]\n"
+ "st1w { z14.s }, p1, [x24, #2, MUL VL]\n"
+ "st1w { z15.s }, p0, [x24, #3, MUL VL]\n"
+ "st1w { z16.s }, p3, [x23]\n"
+ "st1w { z17.s }, p2, [x23, #1, MUL VL]\n"
+ "st1w { z18.s }, p1, [x23, #2, MUL VL]\n"
+ "st1w { z19.s }, p0, [x23, #3, MUL VL]\n"
+ "st1w { z20.s }, p3, [x22]\n"
+ "st1w { z21.s }, p2, [x22, #1, MUL VL]\n"
+ "st1w { z22.s }, p1, [x22, #2, MUL VL]\n"
+ "st1w { z23.s }, p0, [x22, #3, MUL VL]\n"
"48:" // Height 4: Writeback done
- "decw x11, ALL, MUL #4\n"
- "cmp x11, XZR\n"
+ "decw x10, ALL, MUL #4\n"
+ "cmp x10, XZR\n"
"bgt 38b\n"
"b 74f\n"
"49:" // Height 5
- "mov x12, %x[bias]\n"
- "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "mov x11, %x[bias]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x28, %x[output_ptr]\n"
"50:" // Height 5: Column loop
- "mov x20, #0x0\n"
- "whilelt p3.s, x20, x11\n"
- "incw x20\n"
- "whilelt p2.s, x20, x11\n"
- "incw x20\n"
- "whilelt p1.s, x20, x11\n"
- "incw x20\n"
- "whilelt p0.s, x20, x11\n"
- "cbz x12, 51f\n"
- "ld1w { z8.s }, p4/Z, [x12]\n"
- "ld1w { z9.s }, p4/Z, [x12, #1, MUL VL]\n"
+ "mov x19, #0x0\n"
+ "whilelt p3.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p0.s, x19, x10\n"
+ "cbz x11, 51f\n"
+ "ld1w { z8.s }, p4/Z, [x11]\n"
+ "ld1w { z9.s }, p4/Z, [x11, #1, MUL VL]\n"
+ "ld1w { z10.s }, p4/Z, [x11, #2, MUL VL]\n"
"mov z12.d, z8.d\n"
"mov z13.d, z9.d\n"
- "ld1w { z10.s }, p4/Z, [x12, #2, MUL VL]\n"
- "ld1w { z11.s }, p4/Z, [x12, #3, MUL VL]\n"
+ "ld1w { z11.s }, p4/Z, [x11, #3, MUL VL]\n"
"mov z14.d, z10.d\n"
"mov z15.d, z11.d\n"
+ "addvl x11, x11, #4\n"
"mov z16.d, z8.d\n"
"mov z17.d, z9.d\n"
- "addvl x12, x12, #4\n"
"mov z18.d, z10.d\n"
"mov z19.d, z11.d\n"
"mov z20.d, z8.d\n"
@@ -806,31 +806,31 @@ void sve_hybrid_fp32_mla_6x4VL_a64fx (
"b 53f\n"
"51:" // Height 5: no bias
"tbz %x[flags], #0, 52f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "ld1w { z8.s }, p3/Z, [x9]\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
- "ld1w { z9.s }, p2/Z, [x9, #1, MUL VL]\n"
- "ld1w { z10.s }, p1/Z, [x9, #2, MUL VL]\n"
- "ld1w { z11.s }, p0/Z, [x9, #3, MUL VL]\n"
- "ld1w { z12.s }, p3/Z, [x25]\n"
- "ld1w { z13.s }, p2/Z, [x25, #1, MUL VL]\n"
- "ld1w { z14.s }, p1/Z, [x25, #2, MUL VL]\n"
- "ld1w { z15.s }, p0/Z, [x25, #3, MUL VL]\n"
- "ld1w { z16.s }, p3/Z, [x24]\n"
- "ld1w { z17.s }, p2/Z, [x24, #1, MUL VL]\n"
- "ld1w { z18.s }, p1/Z, [x24, #2, MUL VL]\n"
- "ld1w { z19.s }, p0/Z, [x24, #3, MUL VL]\n"
- "ld1w { z20.s }, p3/Z, [x23]\n"
- "ld1w { z21.s }, p2/Z, [x23, #1, MUL VL]\n"
- "ld1w { z22.s }, p1/Z, [x23, #2, MUL VL]\n"
- "ld1w { z23.s }, p0/Z, [x23, #3, MUL VL]\n"
- "ld1w { z24.s }, p3/Z, [x22]\n"
- "ld1w { z25.s }, p2/Z, [x22, #1, MUL VL]\n"
- "ld1w { z26.s }, p1/Z, [x22, #2, MUL VL]\n"
- "ld1w { z27.s }, p0/Z, [x22, #3, MUL VL]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x28, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
+ "ld1w { z8.s }, p3/Z, [x28]\n"
+ "ld1w { z9.s }, p2/Z, [x28, #1, MUL VL]\n"
+ "ld1w { z10.s }, p1/Z, [x28, #2, MUL VL]\n"
+ "ld1w { z11.s }, p0/Z, [x28, #3, MUL VL]\n"
+ "ld1w { z12.s }, p3/Z, [x24]\n"
+ "ld1w { z13.s }, p2/Z, [x24, #1, MUL VL]\n"
+ "ld1w { z14.s }, p1/Z, [x24, #2, MUL VL]\n"
+ "ld1w { z15.s }, p0/Z, [x24, #3, MUL VL]\n"
+ "ld1w { z16.s }, p3/Z, [x23]\n"
+ "ld1w { z17.s }, p2/Z, [x23, #1, MUL VL]\n"
+ "ld1w { z18.s }, p1/Z, [x23, #2, MUL VL]\n"
+ "ld1w { z19.s }, p0/Z, [x23, #3, MUL VL]\n"
+ "ld1w { z20.s }, p3/Z, [x22]\n"
+ "ld1w { z21.s }, p2/Z, [x22, #1, MUL VL]\n"
+ "ld1w { z22.s }, p1/Z, [x22, #2, MUL VL]\n"
+ "ld1w { z23.s }, p0/Z, [x22, #3, MUL VL]\n"
+ "ld1w { z24.s }, p3/Z, [x21]\n"
+ "ld1w { z25.s }, p2/Z, [x21, #1, MUL VL]\n"
+ "ld1w { z26.s }, p1/Z, [x21, #2, MUL VL]\n"
+ "ld1w { z27.s }, p0/Z, [x21, #3, MUL VL]\n"
"b 53f\n"
"52:" // Height 5: no accumulate
"mov z8.b, #0x0\n"
@@ -854,98 +854,98 @@ void sve_hybrid_fp32_mla_6x4VL_a64fx (
"mov z26.b, #0x0\n"
"mov z27.b, #0x0\n"
"53:" // Height 5: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"54:" // Height 5: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 55f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "ldr x22, [x21, #0x20]\n"
- "cbnz x28, 56f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #2\n"
- "add x25, x25, x20, LSL #2\n"
- "add x24, x24, x20, LSL #2\n"
- "add x23, x23, x20, LSL #2\n"
- "add x22, x22, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "ldr x21, [x20, #0x20]\n"
+ "cbnz x27, 56f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #2\n"
+ "add x24, x24, x19, LSL #2\n"
+ "add x23, x23, x19, LSL #2\n"
+ "add x22, x22, x19, LSL #2\n"
+ "add x21, x21, x19, LSL #2\n"
"b 56f\n"
"55:" // Height 5: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
"56:" // Height 5: input setup done
- "subs x27, x27, #0x1\n"
- "ld1rw { z0.s }, p4/Z, [x26]\n"
- "ld1rw { z1.s }, p4/Z, [x25]\n"
- "ld1rw { z2.s }, p4/Z, [x24]\n"
- "ld1rw { z3.s }, p4/Z, [x23]\n"
- "ld1rw { z4.s }, p4/Z, [x22]\n"
- "ld1w { z6.s }, p4/Z, [x10]\n"
- "ld1w { z7.s }, p4/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x1\n"
+ "ld1rw { z0.s }, p4/Z, [x25]\n"
+ "ld1rw { z1.s }, p4/Z, [x24]\n"
+ "ld1rw { z2.s }, p4/Z, [x23]\n"
+ "ld1rw { z3.s }, p4/Z, [x22]\n"
+ "ld1rw { z4.s }, p4/Z, [x21]\n"
+ "ld1w { z6.s }, p4/Z, [x9]\n"
+ "ld1w { z7.s }, p4/Z, [x9, #1, MUL VL]\n"
"ble 58f\n"
"57:" // Height 5: Multiply loop: Main loop
"fmla z8.s, p4/M, z6.s, z0.s\n"
"fmla z12.s, p4/M, z6.s, z1.s\n"
- "add x26, x26, #0x4\n"
- "subs x27, x27, #0x1\n"
+ "add x25, x25, #0x4\n"
+ "subs x26, x26, #0x1\n"
"fmla z16.s, p4/M, z6.s, z2.s\n"
"fmla z20.s, p4/M, z6.s, z3.s\n"
- "add x25, x25, #0x4\n"
"add x24, x24, #0x4\n"
+ "add x23, x23, #0x4\n"
"fmla z24.s, p4/M, z6.s, z4.s\n"
"fmla z9.s, p4/M, z7.s, z0.s\n"
- "ld1w { z6.s }, p4/Z, [x10, #2, MUL VL]\n"
- "add x23, x23, #0x4\n"
+ "ld1w { z6.s }, p4/Z, [x9, #2, MUL VL]\n"
+ "add x22, x22, #0x4\n"
"fmla z13.s, p4/M, z7.s, z1.s\n"
"fmla z17.s, p4/M, z7.s, z2.s\n"
- "add x22, x22, #0x4\n"
+ "add x21, x21, #0x4\n"
"fmla z21.s, p4/M, z7.s, z3.s\n"
"fmla z25.s, p4/M, z7.s, z4.s\n"
- "ld1w { z7.s }, p4/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
+ "ld1w { z7.s }, p4/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"fmla z10.s, p4/M, z6.s, z0.s\n"
"fmla z14.s, p4/M, z6.s, z1.s\n"
"fmla z18.s, p4/M, z6.s, z2.s\n"
"fmla z22.s, p4/M, z6.s, z3.s\n"
"fmla z26.s, p4/M, z6.s, z4.s\n"
"fmla z11.s, p4/M, z7.s, z0.s\n"
- "ld1rw { z0.s }, p4/Z, [x26]\n"
- "ld1w { z6.s }, p4/Z, [x10]\n"
+ "ld1rw { z0.s }, p4/Z, [x25]\n"
+ "ld1w { z6.s }, p4/Z, [x9]\n"
"fmla z15.s, p4/M, z7.s, z1.s\n"
"fmla z19.s, p4/M, z7.s, z2.s\n"
- "ld1rw { z1.s }, p4/Z, [x25]\n"
- "ld1rw { z2.s }, p4/Z, [x24]\n"
+ "ld1rw { z1.s }, p4/Z, [x24]\n"
+ "ld1rw { z2.s }, p4/Z, [x23]\n"
"fmla z23.s, p4/M, z7.s, z3.s\n"
"fmla z27.s, p4/M, z7.s, z4.s\n"
- "ld1rw { z3.s }, p4/Z, [x23]\n"
- "ld1rw { z4.s }, p4/Z, [x22]\n"
- "ld1w { z7.s }, p4/Z, [x10, #1, MUL VL]\n"
+ "ld1rw { z3.s }, p4/Z, [x22]\n"
+ "ld1rw { z4.s }, p4/Z, [x21]\n"
+ "ld1w { z7.s }, p4/Z, [x9, #1, MUL VL]\n"
"bgt 57b\n"
"58:" // Height 5: Multiply loop: Main loop skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
"fmla z8.s, p4/M, z6.s, z0.s\n"
"fmla z12.s, p4/M, z6.s, z1.s\n"
- "add x28, x28, #0x1\n"
+ "add x27, x27, #0x1\n"
"fmla z16.s, p4/M, z6.s, z2.s\n"
"fmla z20.s, p4/M, z6.s, z3.s\n"
- "cmp x28, x20\n"
+ "cmp x27, x19\n"
"fmla z24.s, p4/M, z6.s, z4.s\n"
"fmla z9.s, p4/M, z7.s, z0.s\n"
- "ld1w { z6.s }, p4/Z, [x10, #2, MUL VL]\n"
+ "ld1w { z6.s }, p4/Z, [x9, #2, MUL VL]\n"
"fmla z13.s, p4/M, z7.s, z1.s\n"
"fmla z17.s, p4/M, z7.s, z2.s\n"
"fmla z21.s, p4/M, z7.s, z3.s\n"
"fmla z25.s, p4/M, z7.s, z4.s\n"
- "ld1w { z7.s }, p4/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
+ "ld1w { z7.s }, p4/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"fmla z10.s, p4/M, z6.s, z0.s\n"
"fmla z14.s, p4/M, z6.s, z1.s\n"
"fmla z18.s, p4/M, z6.s, z2.s\n"
@@ -957,16 +957,16 @@ void sve_hybrid_fp32_mla_6x4VL_a64fx (
"fmla z23.s, p4/M, z7.s, z3.s\n"
"fmla z27.s, p4/M, z7.s, z4.s\n"
"bne 54b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x28, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
"tbz %x[flags], #1, 59f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z1.s }, p4/Z, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rw { z0.s }, p4/Z, [x20]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rw { z1.s }, p4/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z0.s }, p4/Z, [x19]\n"
"fmin z8.s, p4/M, z8.s, z1.s\n"
"fmin z9.s, p4/M, z9.s, z1.s\n"
"fmin z10.s, p4/M, z10.s, z1.s\n"
@@ -1008,61 +1008,61 @@ void sve_hybrid_fp32_mla_6x4VL_a64fx (
"fmax z26.s, p4/M, z26.s, z0.s\n"
"fmax z27.s, p4/M, z27.s, z0.s\n"
"59:" // Height 5: No activation
- "st1w { z8.s }, p3, [x9]\n"
- "st1w { z9.s }, p2, [x9, #1, MUL VL]\n"
- "st1w { z10.s }, p1, [x9, #2, MUL VL]\n"
- "st1w { z11.s }, p0, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
- "st1w { z12.s }, p3, [x25]\n"
- "st1w { z13.s }, p2, [x25, #1, MUL VL]\n"
- "st1w { z14.s }, p1, [x25, #2, MUL VL]\n"
- "st1w { z15.s }, p0, [x25, #3, MUL VL]\n"
- "st1w { z16.s }, p3, [x24]\n"
- "st1w { z17.s }, p2, [x24, #1, MUL VL]\n"
- "st1w { z18.s }, p1, [x24, #2, MUL VL]\n"
- "st1w { z19.s }, p0, [x24, #3, MUL VL]\n"
- "st1w { z20.s }, p3, [x23]\n"
- "st1w { z21.s }, p2, [x23, #1, MUL VL]\n"
- "st1w { z22.s }, p1, [x23, #2, MUL VL]\n"
- "st1w { z23.s }, p0, [x23, #3, MUL VL]\n"
- "st1w { z24.s }, p3, [x22]\n"
- "st1w { z25.s }, p2, [x22, #1, MUL VL]\n"
- "st1w { z26.s }, p1, [x22, #2, MUL VL]\n"
- "st1w { z27.s }, p0, [x22, #3, MUL VL]\n"
+ "st1w { z8.s }, p3, [x28]\n"
+ "st1w { z9.s }, p2, [x28, #1, MUL VL]\n"
+ "st1w { z10.s }, p1, [x28, #2, MUL VL]\n"
+ "st1w { z11.s }, p0, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "st1w { z12.s }, p3, [x24]\n"
+ "st1w { z13.s }, p2, [x24, #1, MUL VL]\n"
+ "st1w { z14.s }, p1, [x24, #2, MUL VL]\n"
+ "st1w { z15.s }, p0, [x24, #3, MUL VL]\n"
+ "st1w { z16.s }, p3, [x23]\n"
+ "st1w { z17.s }, p2, [x23, #1, MUL VL]\n"
+ "st1w { z18.s }, p1, [x23, #2, MUL VL]\n"
+ "st1w { z19.s }, p0, [x23, #3, MUL VL]\n"
+ "st1w { z20.s }, p3, [x22]\n"
+ "st1w { z21.s }, p2, [x22, #1, MUL VL]\n"
+ "st1w { z22.s }, p1, [x22, #2, MUL VL]\n"
+ "st1w { z23.s }, p0, [x22, #3, MUL VL]\n"
+ "st1w { z24.s }, p3, [x21]\n"
+ "st1w { z25.s }, p2, [x21, #1, MUL VL]\n"
+ "st1w { z26.s }, p1, [x21, #2, MUL VL]\n"
+ "st1w { z27.s }, p0, [x21, #3, MUL VL]\n"
"60:" // Height 5: Writeback done
- "decw x11, ALL, MUL #4\n"
- "cmp x11, XZR\n"
+ "decw x10, ALL, MUL #4\n"
+ "cmp x10, XZR\n"
"bgt 50b\n"
"b 74f\n"
"61:" // Height 6
- "ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "mov x20, #0x18\n"
- "mov x12, %x[bias]\n"
- "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "mov x19, #0x18\n"
+ "mov x11, %x[bias]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x28, %x[output_ptr]\n"
+ "madd %x[output_ptr], x20, x19, %x[output_ptr]\n"
"62:" // Height 6: Column loop
- "mov x20, #0x0\n"
- "whilelt p3.s, x20, x11\n"
- "incw x20\n"
- "whilelt p2.s, x20, x11\n"
- "incw x20\n"
- "whilelt p1.s, x20, x11\n"
- "incw x20\n"
- "whilelt p0.s, x20, x11\n"
- "cbz x12, 63f\n"
- "ld1w { z8.s }, p4/Z, [x12]\n"
- "ld1w { z9.s }, p4/Z, [x12, #1, MUL VL]\n"
+ "mov x19, #0x0\n"
+ "whilelt p3.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p0.s, x19, x10\n"
+ "cbz x11, 63f\n"
+ "ld1w { z8.s }, p4/Z, [x11]\n"
+ "ld1w { z9.s }, p4/Z, [x11, #1, MUL VL]\n"
+ "ld1w { z10.s }, p4/Z, [x11, #2, MUL VL]\n"
"mov z12.d, z8.d\n"
"mov z13.d, z9.d\n"
- "ld1w { z10.s }, p4/Z, [x12, #2, MUL VL]\n"
- "ld1w { z11.s }, p4/Z, [x12, #3, MUL VL]\n"
+ "ld1w { z11.s }, p4/Z, [x11, #3, MUL VL]\n"
"mov z14.d, z10.d\n"
"mov z15.d, z11.d\n"
+ "addvl x11, x11, #4\n"
"mov z16.d, z8.d\n"
"mov z17.d, z9.d\n"
- "addvl x12, x12, #4\n"
"mov z18.d, z10.d\n"
"mov z19.d, z11.d\n"
"mov z20.d, z8.d\n"
@@ -1080,36 +1080,36 @@ void sve_hybrid_fp32_mla_6x4VL_a64fx (
"b 65f\n"
"63:" // Height 6: no bias
"tbz %x[flags], #0, 64f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "ld1w { z8.s }, p3/Z, [x9]\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
- "ld1w { z9.s }, p2/Z, [x9, #1, MUL VL]\n"
- "ld1w { z10.s }, p1/Z, [x9, #2, MUL VL]\n"
- "add x21, x22, x20, LSL #2\n"
- "ld1w { z11.s }, p0/Z, [x9, #3, MUL VL]\n"
- "ld1w { z12.s }, p3/Z, [x25]\n"
- "ld1w { z13.s }, p2/Z, [x25, #1, MUL VL]\n"
- "ld1w { z14.s }, p1/Z, [x25, #2, MUL VL]\n"
- "ld1w { z15.s }, p0/Z, [x25, #3, MUL VL]\n"
- "ld1w { z16.s }, p3/Z, [x24]\n"
- "ld1w { z17.s }, p2/Z, [x24, #1, MUL VL]\n"
- "ld1w { z18.s }, p1/Z, [x24, #2, MUL VL]\n"
- "ld1w { z19.s }, p0/Z, [x24, #3, MUL VL]\n"
- "ld1w { z20.s }, p3/Z, [x23]\n"
- "ld1w { z21.s }, p2/Z, [x23, #1, MUL VL]\n"
- "ld1w { z22.s }, p1/Z, [x23, #2, MUL VL]\n"
- "ld1w { z23.s }, p0/Z, [x23, #3, MUL VL]\n"
- "ld1w { z24.s }, p3/Z, [x22]\n"
- "ld1w { z25.s }, p2/Z, [x22, #1, MUL VL]\n"
- "ld1w { z26.s }, p1/Z, [x22, #2, MUL VL]\n"
- "ld1w { z27.s }, p0/Z, [x22, #3, MUL VL]\n"
- "ld1w { z28.s }, p3/Z, [x21]\n"
- "ld1w { z29.s }, p2/Z, [x21, #1, MUL VL]\n"
- "ld1w { z30.s }, p1/Z, [x21, #2, MUL VL]\n"
- "ld1w { z31.s }, p0/Z, [x21, #3, MUL VL]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x28, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
+ "ld1w { z8.s }, p3/Z, [x28]\n"
+ "ld1w { z9.s }, p2/Z, [x28, #1, MUL VL]\n"
+ "add x20, x21, x19, LSL #2\n"
+ "ld1w { z10.s }, p1/Z, [x28, #2, MUL VL]\n"
+ "ld1w { z11.s }, p0/Z, [x28, #3, MUL VL]\n"
+ "ld1w { z12.s }, p3/Z, [x24]\n"
+ "ld1w { z13.s }, p2/Z, [x24, #1, MUL VL]\n"
+ "ld1w { z14.s }, p1/Z, [x24, #2, MUL VL]\n"
+ "ld1w { z15.s }, p0/Z, [x24, #3, MUL VL]\n"
+ "ld1w { z16.s }, p3/Z, [x23]\n"
+ "ld1w { z17.s }, p2/Z, [x23, #1, MUL VL]\n"
+ "ld1w { z18.s }, p1/Z, [x23, #2, MUL VL]\n"
+ "ld1w { z19.s }, p0/Z, [x23, #3, MUL VL]\n"
+ "ld1w { z20.s }, p3/Z, [x22]\n"
+ "ld1w { z21.s }, p2/Z, [x22, #1, MUL VL]\n"
+ "ld1w { z22.s }, p1/Z, [x22, #2, MUL VL]\n"
+ "ld1w { z23.s }, p0/Z, [x22, #3, MUL VL]\n"
+ "ld1w { z24.s }, p3/Z, [x21]\n"
+ "ld1w { z25.s }, p2/Z, [x21, #1, MUL VL]\n"
+ "ld1w { z26.s }, p1/Z, [x21, #2, MUL VL]\n"
+ "ld1w { z27.s }, p0/Z, [x21, #3, MUL VL]\n"
+ "ld1w { z28.s }, p3/Z, [x20]\n"
+ "ld1w { z29.s }, p2/Z, [x20, #1, MUL VL]\n"
+ "ld1w { z30.s }, p1/Z, [x20, #2, MUL VL]\n"
+ "ld1w { z31.s }, p0/Z, [x20, #3, MUL VL]\n"
"b 65f\n"
"64:" // Height 6: no accumulate
"mov z8.b, #0x0\n"
@@ -1137,110 +1137,110 @@ void sve_hybrid_fp32_mla_6x4VL_a64fx (
"mov z30.b, #0x0\n"
"mov z31.b, #0x0\n"
"65:" // Height 6: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"66:" // Height 6: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 67f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "ldr x22, [x21, #0x20]\n"
- "ldr x21, [x21, #0x28]\n"
- "cbnz x28, 68f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #2\n"
- "add x25, x25, x20, LSL #2\n"
- "add x24, x24, x20, LSL #2\n"
- "add x23, x23, x20, LSL #2\n"
- "add x22, x22, x20, LSL #2\n"
- "add x21, x21, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "ldr x21, [x20, #0x20]\n"
+ "ldr x20, [x20, #0x28]\n"
+ "cbnz x27, 68f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #2\n"
+ "add x24, x24, x19, LSL #2\n"
+ "add x23, x23, x19, LSL #2\n"
+ "add x22, x22, x19, LSL #2\n"
+ "add x21, x21, x19, LSL #2\n"
+ "add x20, x20, x19, LSL #2\n"
"b 68f\n"
"67:" // Height 6: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
+ "add x20, x21, x19, LSL #2\n"
"68:" // Height 6: input setup done
- "subs x27, x27, #0x1\n"
- "ld1rw { z0.s }, p4/Z, [x26]\n"
- "ld1rw { z1.s }, p4/Z, [x25]\n"
- "ld1rw { z2.s }, p4/Z, [x24]\n"
- "ld1rw { z3.s }, p4/Z, [x23]\n"
- "ld1rw { z4.s }, p4/Z, [x22]\n"
- "ld1rw { z5.s }, p4/Z, [x21]\n"
- "ld1w { z6.s }, p4/Z, [x10]\n"
- "ld1w { z7.s }, p4/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x1\n"
+ "ld1rw { z0.s }, p4/Z, [x25]\n"
+ "ld1rw { z1.s }, p4/Z, [x24]\n"
+ "ld1rw { z2.s }, p4/Z, [x23]\n"
+ "ld1rw { z3.s }, p4/Z, [x22]\n"
+ "ld1rw { z4.s }, p4/Z, [x21]\n"
+ "ld1rw { z5.s }, p4/Z, [x20]\n"
+ "ld1w { z6.s }, p4/Z, [x9]\n"
+ "ld1w { z7.s }, p4/Z, [x9, #1, MUL VL]\n"
"ble 70f\n"
"69:" // Height 6: Multiply loop: Main loop
"fmla z8.s, p4/M, z6.s, z0.s\n"
"fmla z12.s, p4/M, z6.s, z1.s\n"
- "add x26, x26, #0x4\n"
- "subs x27, x27, #0x1\n"
+ "add x25, x25, #0x4\n"
+ "subs x26, x26, #0x1\n"
"fmla z16.s, p4/M, z6.s, z2.s\n"
"fmla z20.s, p4/M, z6.s, z3.s\n"
- "add x25, x25, #0x4\n"
"add x24, x24, #0x4\n"
+ "add x23, x23, #0x4\n"
"fmla z24.s, p4/M, z6.s, z4.s\n"
"fmla z28.s, p4/M, z6.s, z5.s\n"
- "ld1w { z6.s }, p4/Z, [x10, #2, MUL VL]\n"
- "add x23, x23, #0x4\n"
+ "ld1w { z6.s }, p4/Z, [x9, #2, MUL VL]\n"
+ "add x22, x22, #0x4\n"
"fmla z9.s, p4/M, z7.s, z0.s\n"
"fmla z13.s, p4/M, z7.s, z1.s\n"
- "add x22, x22, #0x4\n"
"add x21, x21, #0x4\n"
+ "add x20, x20, #0x4\n"
"fmla z17.s, p4/M, z7.s, z2.s\n"
"fmla z21.s, p4/M, z7.s, z3.s\n"
"fmla z25.s, p4/M, z7.s, z4.s\n"
"fmla z29.s, p4/M, z7.s, z5.s\n"
- "ld1w { z7.s }, p4/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
+ "ld1w { z7.s }, p4/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"fmla z10.s, p4/M, z6.s, z0.s\n"
"fmla z14.s, p4/M, z6.s, z1.s\n"
"fmla z18.s, p4/M, z6.s, z2.s\n"
"fmla z22.s, p4/M, z6.s, z3.s\n"
"fmla z26.s, p4/M, z6.s, z4.s\n"
"fmla z30.s, p4/M, z6.s, z5.s\n"
- "ld1w { z6.s }, p4/Z, [x10]\n"
+ "ld1w { z6.s }, p4/Z, [x9]\n"
"fmla z11.s, p4/M, z7.s, z0.s\n"
"fmla z15.s, p4/M, z7.s, z1.s\n"
- "ld1rw { z0.s }, p4/Z, [x26]\n"
- "ld1rw { z1.s }, p4/Z, [x25]\n"
+ "ld1rw { z0.s }, p4/Z, [x25]\n"
+ "ld1rw { z1.s }, p4/Z, [x24]\n"
"fmla z19.s, p4/M, z7.s, z2.s\n"
"fmla z23.s, p4/M, z7.s, z3.s\n"
- "ld1rw { z2.s }, p4/Z, [x24]\n"
- "ld1rw { z3.s }, p4/Z, [x23]\n"
+ "ld1rw { z2.s }, p4/Z, [x23]\n"
+ "ld1rw { z3.s }, p4/Z, [x22]\n"
"fmla z27.s, p4/M, z7.s, z4.s\n"
"fmla z31.s, p4/M, z7.s, z5.s\n"
- "ld1rw { z4.s }, p4/Z, [x22]\n"
- "ld1rw { z5.s }, p4/Z, [x21]\n"
- "ld1w { z7.s }, p4/Z, [x10, #1, MUL VL]\n"
+ "ld1rw { z4.s }, p4/Z, [x21]\n"
+ "ld1rw { z5.s }, p4/Z, [x20]\n"
+ "ld1w { z7.s }, p4/Z, [x9, #1, MUL VL]\n"
"bgt 69b\n"
"70:" // Height 6: Multiply loop: Main loop skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
"fmla z8.s, p4/M, z6.s, z0.s\n"
"fmla z12.s, p4/M, z6.s, z1.s\n"
- "add x28, x28, #0x1\n"
+ "add x27, x27, #0x1\n"
"fmla z16.s, p4/M, z6.s, z2.s\n"
"fmla z20.s, p4/M, z6.s, z3.s\n"
- "cmp x28, x20\n"
+ "cmp x27, x19\n"
"fmla z24.s, p4/M, z6.s, z4.s\n"
"fmla z28.s, p4/M, z6.s, z5.s\n"
- "ld1w { z6.s }, p4/Z, [x10, #2, MUL VL]\n"
+ "ld1w { z6.s }, p4/Z, [x9, #2, MUL VL]\n"
"fmla z9.s, p4/M, z7.s, z0.s\n"
"fmla z13.s, p4/M, z7.s, z1.s\n"
"fmla z17.s, p4/M, z7.s, z2.s\n"
"fmla z21.s, p4/M, z7.s, z3.s\n"
"fmla z25.s, p4/M, z7.s, z4.s\n"
"fmla z29.s, p4/M, z7.s, z5.s\n"
- "ld1w { z7.s }, p4/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
+ "ld1w { z7.s }, p4/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"fmla z10.s, p4/M, z6.s, z0.s\n"
"fmla z14.s, p4/M, z6.s, z1.s\n"
"fmla z18.s, p4/M, z6.s, z2.s\n"
@@ -1254,17 +1254,17 @@ void sve_hybrid_fp32_mla_6x4VL_a64fx (
"fmla z27.s, p4/M, z7.s, z4.s\n"
"fmla z31.s, p4/M, z7.s, z5.s\n"
"bne 66b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x28, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
+ "add x20, x21, x19, LSL #2\n"
"tbz %x[flags], #1, 71f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z1.s }, p4/Z, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rw { z0.s }, p4/Z, [x20]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rw { z1.s }, p4/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z0.s }, p4/Z, [x19]\n"
"fmin z8.s, p4/M, z8.s, z1.s\n"
"fmin z9.s, p4/M, z9.s, z1.s\n"
"fmin z10.s, p4/M, z10.s, z1.s\n"
@@ -1314,51 +1314,51 @@ void sve_hybrid_fp32_mla_6x4VL_a64fx (
"fmax z30.s, p4/M, z30.s, z0.s\n"
"fmax z31.s, p4/M, z31.s, z0.s\n"
"71:" // Height 6: No activation
- "st1w { z8.s }, p3, [x9]\n"
- "st1w { z9.s }, p2, [x9, #1, MUL VL]\n"
- "st1w { z10.s }, p1, [x9, #2, MUL VL]\n"
- "st1w { z11.s }, p0, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
- "st1w { z12.s }, p3, [x25]\n"
- "st1w { z13.s }, p2, [x25, #1, MUL VL]\n"
- "st1w { z14.s }, p1, [x25, #2, MUL VL]\n"
- "st1w { z15.s }, p0, [x25, #3, MUL VL]\n"
- "st1w { z16.s }, p3, [x24]\n"
- "st1w { z17.s }, p2, [x24, #1, MUL VL]\n"
- "st1w { z18.s }, p1, [x24, #2, MUL VL]\n"
- "st1w { z19.s }, p0, [x24, #3, MUL VL]\n"
- "st1w { z20.s }, p3, [x23]\n"
- "st1w { z21.s }, p2, [x23, #1, MUL VL]\n"
- "st1w { z22.s }, p1, [x23, #2, MUL VL]\n"
- "st1w { z23.s }, p0, [x23, #3, MUL VL]\n"
- "st1w { z24.s }, p3, [x22]\n"
- "st1w { z25.s }, p2, [x22, #1, MUL VL]\n"
- "st1w { z26.s }, p1, [x22, #2, MUL VL]\n"
- "st1w { z27.s }, p0, [x22, #3, MUL VL]\n"
- "st1w { z28.s }, p3, [x21]\n"
- "st1w { z29.s }, p2, [x21, #1, MUL VL]\n"
- "st1w { z30.s }, p1, [x21, #2, MUL VL]\n"
- "st1w { z31.s }, p0, [x21, #3, MUL VL]\n"
+ "st1w { z8.s }, p3, [x28]\n"
+ "st1w { z9.s }, p2, [x28, #1, MUL VL]\n"
+ "st1w { z10.s }, p1, [x28, #2, MUL VL]\n"
+ "st1w { z11.s }, p0, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "st1w { z12.s }, p3, [x24]\n"
+ "st1w { z13.s }, p2, [x24, #1, MUL VL]\n"
+ "st1w { z14.s }, p1, [x24, #2, MUL VL]\n"
+ "st1w { z15.s }, p0, [x24, #3, MUL VL]\n"
+ "st1w { z16.s }, p3, [x23]\n"
+ "st1w { z17.s }, p2, [x23, #1, MUL VL]\n"
+ "st1w { z18.s }, p1, [x23, #2, MUL VL]\n"
+ "st1w { z19.s }, p0, [x23, #3, MUL VL]\n"
+ "st1w { z20.s }, p3, [x22]\n"
+ "st1w { z21.s }, p2, [x22, #1, MUL VL]\n"
+ "st1w { z22.s }, p1, [x22, #2, MUL VL]\n"
+ "st1w { z23.s }, p0, [x22, #3, MUL VL]\n"
+ "st1w { z24.s }, p3, [x21]\n"
+ "st1w { z25.s }, p2, [x21, #1, MUL VL]\n"
+ "st1w { z26.s }, p1, [x21, #2, MUL VL]\n"
+ "st1w { z27.s }, p0, [x21, #3, MUL VL]\n"
+ "st1w { z28.s }, p3, [x20]\n"
+ "st1w { z29.s }, p2, [x20, #1, MUL VL]\n"
+ "st1w { z30.s }, p1, [x20, #2, MUL VL]\n"
+ "st1w { z31.s }, p0, [x20, #3, MUL VL]\n"
"72:" // Height 6: Writeback done
- "decw x11, ALL, MUL #4\n"
- "cmp x11, XZR\n"
+ "decw x10, ALL, MUL #4\n"
+ "cmp x10, XZR\n"
"bgt 62b\n"
"subs %x[M], %x[M], #0x6\n"
"beq 74f\n"
- "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 73f\n"
- "add x21, x21, #0x6\n"
- "str x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "add x20, x20, #0x6\n"
+ "str x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"b 1b\n"
"73:" // Update direct input
- "mov x20, #0x18\n"
- "madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
+ "mov x19, #0x18\n"
+ "madd %x[input_ptr], x19, x20, %x[input_ptr]\n"
"b 1b\n"
"74:" // Exit
: [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
: [args_ptr] "r" (&ka), [bias] "r" (bias), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x9", "x10", "x11", "x12", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x9", "x10", "x11", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_6x4VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_6x4VL/generic.cpp
index 71c6afba42..3baf7b9715 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_6x4VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_6x4VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef ARM_COMPUTE_ENABLE_SVE
@@ -102,32 +102,32 @@ void sve_hybrid_fp32_mla_6x4VL (
"cmp %x[M], #0x2\n"
"bgt 27f\n"
"beq 14f\n"
- "mov x12, %x[bias]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "mov x9, %x[bias]\n"
+ "mov x28, %x[output_ptr]\n"
"2:" // Height 1: Column loop
- "mov x20, #0x0\n"
- "whilelt p4.s, x20, x11\n"
- "incw x20\n"
- "whilelt p3.s, x20, x11\n"
- "incw x20\n"
- "whilelt p2.s, x20, x11\n"
- "incw x20\n"
- "whilelt p1.s, x20, x11\n"
- "cbz x12, 3f\n"
- "ld1w { z8.s }, p5/Z, [x12]\n"
- "ld1w { z9.s }, p5/Z, [x12, #1, MUL VL]\n"
- "ld1w { z10.s }, p5/Z, [x12, #2, MUL VL]\n"
- "ld1w { z11.s }, p5/Z, [x12, #3, MUL VL]\n"
- "addvl x12, x12, #4\n"
+ "mov x19, #0x0\n"
+ "whilelt p4.s, x19, x11\n"
+ "incw x19\n"
+ "whilelt p3.s, x19, x11\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x11\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x11\n"
+ "cbz x9, 3f\n"
+ "ld1w { z8.s }, p5/Z, [x9]\n"
+ "ld1w { z9.s }, p5/Z, [x9, #1, MUL VL]\n"
+ "ld1w { z10.s }, p5/Z, [x9, #2, MUL VL]\n"
+ "ld1w { z11.s }, p5/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"b 5f\n"
"3:" // Height 1: no bias
"tbz %x[flags], #0, 4f\n"
- "ld1w { z8.s }, p4/Z, [x9]\n"
- "ld1w { z9.s }, p3/Z, [x9, #1, MUL VL]\n"
- "ld1w { z10.s }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1w { z11.s }, p1/Z, [x9, #3, MUL VL]\n"
+ "ld1w { z8.s }, p4/Z, [x28]\n"
+ "ld1w { z9.s }, p3/Z, [x28, #1, MUL VL]\n"
+ "ld1w { z10.s }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1w { z11.s }, p1/Z, [x28, #3, MUL VL]\n"
"b 5f\n"
"4:" // Height 1: no accumulate
"mov z8.b, #0x0\n"
@@ -135,175 +135,175 @@ void sve_hybrid_fp32_mla_6x4VL (
"mov z10.b, #0x0\n"
"mov z11.b, #0x0\n"
"5:" // Height 1: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"6:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 7f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "cbnz x28, 8f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "cbnz x27, 8f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #2\n"
"b 8f\n"
"7:" // Height 1: setup direct input
- "mov x26, %x[input_ptr]\n"
+ "mov x25, %x[input_ptr]\n"
"8:" // Height 1: input setup done
- "cmp x27, #0x4\n"
+ "cmp x26, #0x4\n"
"ble 10f\n"
"9:" // Height 1: Multiply loop: Main loop head
- "whilelt p0.s, XZR, x27\n"
- "ld1rqw { z0.s }, p0/Z, [x26]\n"
"ld1w { z6.s }, p5/Z, [x10]\n"
- "fmla z8.s, z6.s, z0.s[0]\n"
+ "whilelt p0.s, XZR, x26\n"
"ld1w { z7.s }, p5/Z, [x10, #1, MUL VL]\n"
- "fmla z9.s, z7.s, z0.s[0]\n"
+ "sub x26, x26, #0x4\n"
+ "ld1rqw { z0.s }, p0/Z, [x25]\n"
+ "fmla z8.s, z6.s, z0.s[0]\n"
"ld1w { z6.s }, p5/Z, [x10, #2, MUL VL]\n"
- "fmla z10.s, z6.s, z0.s[0]\n"
+ "cmp x26, #0x4\n"
+ "fmla z9.s, z7.s, z0.s[0]\n"
"ld1w { z7.s }, p5/Z, [x10, #3, MUL VL]\n"
- "fmla z11.s, z7.s, z0.s[0]\n"
+ "add x25, x25, #0x10\n"
+ "fmla z10.s, z6.s, z0.s[0]\n"
"ld1w { z6.s }, p5/Z, [x10, #4, MUL VL]\n"
- "fmla z8.s, z6.s, z0.s[1]\n"
+ "fmla z11.s, z7.s, z0.s[0]\n"
"ld1w { z7.s }, p5/Z, [x10, #5, MUL VL]\n"
- "fmla z9.s, z7.s, z0.s[1]\n"
+ "fmla z8.s, z6.s, z0.s[1]\n"
"ld1w { z6.s }, p5/Z, [x10, #6, MUL VL]\n"
- "fmla z10.s, z6.s, z0.s[1]\n"
+ "fmla z9.s, z7.s, z0.s[1]\n"
"ld1w { z7.s }, p5/Z, [x10, #7, MUL VL]\n"
"addvl x10, x10, #16\n"
- "fmla z11.s, z7.s, z0.s[1]\n"
+ "fmla z10.s, z6.s, z0.s[1]\n"
"ld1w { z6.s }, p5/Z, [x10, #-8, MUL VL]\n"
+ "fmla z11.s, z7.s, z0.s[1]\n"
"ld1w { z7.s }, p5/Z, [x10, #-7, MUL VL]\n"
"fmla z8.s, z6.s, z0.s[2]\n"
- "fmla z9.s, z7.s, z0.s[2]\n"
"ld1w { z6.s }, p5/Z, [x10, #-6, MUL VL]\n"
+ "fmla z9.s, z7.s, z0.s[2]\n"
"ld1w { z7.s }, p5/Z, [x10, #-5, MUL VL]\n"
"fmla z10.s, z6.s, z0.s[2]\n"
- "fmla z11.s, z7.s, z0.s[2]\n"
"ld1w { z6.s }, p5/Z, [x10, #-4, MUL VL]\n"
+ "fmla z11.s, z7.s, z0.s[2]\n"
"ld1w { z7.s }, p5/Z, [x10, #-3, MUL VL]\n"
"fmla z8.s, z6.s, z0.s[3]\n"
- "fmla z9.s, z7.s, z0.s[3]\n"
"ld1w { z6.s }, p5/Z, [x10, #-2, MUL VL]\n"
+ "fmla z9.s, z7.s, z0.s[3]\n"
"ld1w { z7.s }, p5/Z, [x10, #-1, MUL VL]\n"
- "sub x27, x27, #0x4\n"
- "cmp x27, #0x4\n"
"fmla z10.s, z6.s, z0.s[3]\n"
"fmla z11.s, z7.s, z0.s[3]\n"
- "add x26, x26, #0x10\n"
"bgt 9b\n"
"10:" // Height 1: Multiply loop: Single iteration only
- "whilelt p0.s, XZR, x27\n"
- "ld1rqw { z0.s }, p0/Z, [x26]\n"
"ld1w { z6.s }, p5/Z, [x10]\n"
- "fmla z8.s, z6.s, z0.s[0]\n"
+ "whilelt p0.s, XZR, x26\n"
"ld1w { z7.s }, p5/Z, [x10, #1, MUL VL]\n"
- "fmla z9.s, z7.s, z0.s[0]\n"
+ "subs x26, x26, #0x1\n"
+ "ld1rqw { z0.s }, p0/Z, [x25]\n"
+ "fmla z8.s, z6.s, z0.s[0]\n"
"ld1w { z6.s }, p5/Z, [x10, #2, MUL VL]\n"
- "subs x27, x27, #0x1\n"
+ "fmla z9.s, z7.s, z0.s[0]\n"
"ld1w { z7.s }, p5/Z, [x10, #3, MUL VL]\n"
+ "addvl x10, x10, #4\n"
"fmla z10.s, z6.s, z0.s[0]\n"
"fmla z11.s, z7.s, z0.s[0]\n"
- "addvl x10, x10, #4\n"
"ble 11f\n"
"ld1w { z6.s }, p5/Z, [x10]\n"
- "ld1w { z7.s }, p5/Z, [x10, #1, MUL VL]\n"
"fmla z8.s, z6.s, z0.s[1]\n"
+ "ld1w { z7.s }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x1\n"
"fmla z9.s, z7.s, z0.s[1]\n"
"ld1w { z6.s }, p5/Z, [x10, #2, MUL VL]\n"
"ld1w { z7.s }, p5/Z, [x10, #3, MUL VL]\n"
- "subs x27, x27, #0x1\n"
"fmla z10.s, z6.s, z0.s[1]\n"
- "fmla z11.s, z7.s, z0.s[1]\n"
"addvl x10, x10, #4\n"
+ "fmla z11.s, z7.s, z0.s[1]\n"
"ble 11f\n"
"ld1w { z6.s }, p5/Z, [x10]\n"
- "ld1w { z7.s }, p5/Z, [x10, #1, MUL VL]\n"
"fmla z8.s, z6.s, z0.s[2]\n"
+ "ld1w { z7.s }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x1\n"
"fmla z9.s, z7.s, z0.s[2]\n"
"ld1w { z6.s }, p5/Z, [x10, #2, MUL VL]\n"
"ld1w { z7.s }, p5/Z, [x10, #3, MUL VL]\n"
- "subs x27, x27, #0x1\n"
"fmla z10.s, z6.s, z0.s[2]\n"
- "fmla z11.s, z7.s, z0.s[2]\n"
"addvl x10, x10, #4\n"
+ "fmla z11.s, z7.s, z0.s[2]\n"
"ble 11f\n"
"ld1w { z6.s }, p5/Z, [x10]\n"
- "ld1w { z7.s }, p5/Z, [x10, #1, MUL VL]\n"
"fmla z8.s, z6.s, z0.s[3]\n"
- "fmla z9.s, z7.s, z0.s[3]\n"
+ "ld1w { z7.s }, p5/Z, [x10, #1, MUL VL]\n"
"ld1w { z6.s }, p5/Z, [x10, #2, MUL VL]\n"
+ "fmla z9.s, z7.s, z0.s[3]\n"
"ld1w { z7.s }, p5/Z, [x10, #3, MUL VL]\n"
+ "addvl x10, x10, #4\n"
"fmla z10.s, z6.s, z0.s[3]\n"
"fmla z11.s, z7.s, z0.s[3]\n"
- "addvl x10, x10, #4\n"
"11:" // Height 1: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 6b\n"
"tbz %x[flags], #1, 12f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z1.s }, p5/Z, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rw { z0.s }, p5/Z, [x20]\n"
- "fmin z8.s, p5/M, z8.s, z1.s\n"
- "fmin z9.s, p5/M, z9.s, z1.s\n"
- "fmin z10.s, p5/M, z10.s, z1.s\n"
- "fmin z11.s, p5/M, z11.s, z1.s\n"
- "fmax z8.s, p5/M, z8.s, z0.s\n"
- "fmax z9.s, p5/M, z9.s, z0.s\n"
- "fmax z10.s, p5/M, z10.s, z0.s\n"
- "fmax z11.s, p5/M, z11.s, z0.s\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z1.s }, p5/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rw { z0.s }, p5/Z, [x19]\n"
+ "fmin z8.s, p5/M, z8.s, z0.s\n"
+ "fmin z9.s, p5/M, z9.s, z0.s\n"
+ "fmin z10.s, p5/M, z10.s, z0.s\n"
+ "fmin z11.s, p5/M, z11.s, z0.s\n"
+ "fmax z8.s, p5/M, z8.s, z1.s\n"
+ "fmax z9.s, p5/M, z9.s, z1.s\n"
+ "fmax z10.s, p5/M, z10.s, z1.s\n"
+ "fmax z11.s, p5/M, z11.s, z1.s\n"
"12:" // Height 1: No activation
- "st1w { z8.s }, p4, [x9]\n"
- "st1w { z9.s }, p3, [x9, #1, MUL VL]\n"
- "st1w { z10.s }, p2, [x9, #2, MUL VL]\n"
- "st1w { z11.s }, p1, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
+ "st1w { z8.s }, p4, [x28]\n"
+ "st1w { z9.s }, p3, [x28, #1, MUL VL]\n"
+ "st1w { z10.s }, p2, [x28, #2, MUL VL]\n"
+ "st1w { z11.s }, p1, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
"13:" // Height 1: Writeback done
"decw x11, ALL, MUL #4\n"
"cmp x11, XZR\n"
"bgt 2b\n"
"b 80f\n"
"14:" // Height 2
- "mov x12, %x[bias]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x9, %x[bias]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "mov x28, %x[output_ptr]\n"
"15:" // Height 2: Column loop
- "mov x20, #0x0\n"
- "whilelt p4.s, x20, x11\n"
- "incw x20\n"
- "whilelt p3.s, x20, x11\n"
- "incw x20\n"
- "whilelt p2.s, x20, x11\n"
- "incw x20\n"
- "whilelt p1.s, x20, x11\n"
- "cbz x12, 16f\n"
- "ld1w { z8.s }, p5/Z, [x12]\n"
- "ld1w { z9.s }, p5/Z, [x12, #1, MUL VL]\n"
+ "mov x19, #0x0\n"
+ "whilelt p4.s, x19, x11\n"
+ "incw x19\n"
+ "whilelt p3.s, x19, x11\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x11\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x11\n"
+ "cbz x9, 16f\n"
+ "ld1w { z8.s }, p5/Z, [x9]\n"
"mov z12.d, z8.d\n"
+ "ld1w { z9.s }, p5/Z, [x9, #1, MUL VL]\n"
+ "ld1w { z10.s }, p5/Z, [x9, #2, MUL VL]\n"
"mov z13.d, z9.d\n"
- "ld1w { z10.s }, p5/Z, [x12, #2, MUL VL]\n"
- "ld1w { z11.s }, p5/Z, [x12, #3, MUL VL]\n"
+ "ld1w { z11.s }, p5/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"mov z14.d, z10.d\n"
"mov z15.d, z11.d\n"
- "addvl x12, x12, #4\n"
"b 18f\n"
"16:" // Height 2: no bias
"tbz %x[flags], #0, 17f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "ld1w { z8.s }, p4/Z, [x9]\n"
- "ld1w { z9.s }, p3/Z, [x9, #1, MUL VL]\n"
- "ld1w { z10.s }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1w { z11.s }, p1/Z, [x9, #3, MUL VL]\n"
- "ld1w { z12.s }, p4/Z, [x25]\n"
- "ld1w { z13.s }, p3/Z, [x25, #1, MUL VL]\n"
- "ld1w { z14.s }, p2/Z, [x25, #2, MUL VL]\n"
- "ld1w { z15.s }, p1/Z, [x25, #3, MUL VL]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ld1w { z8.s }, p4/Z, [x28]\n"
+ "add x24, x28, x19, LSL #2\n"
+ "ld1w { z9.s }, p3/Z, [x28, #1, MUL VL]\n"
+ "ld1w { z10.s }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1w { z11.s }, p1/Z, [x28, #3, MUL VL]\n"
+ "ld1w { z12.s }, p4/Z, [x24]\n"
+ "ld1w { z13.s }, p3/Z, [x24, #1, MUL VL]\n"
+ "ld1w { z14.s }, p2/Z, [x24, #2, MUL VL]\n"
+ "ld1w { z15.s }, p1/Z, [x24, #3, MUL VL]\n"
"b 18f\n"
"17:" // Height 2: no accumulate
"mov z8.b, #0x0\n"
@@ -315,52 +315,52 @@ void sve_hybrid_fp32_mla_6x4VL (
"mov z14.b, #0x0\n"
"mov z15.b, #0x0\n"
"18:" // Height 2: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"19:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 20f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "cbnz x28, 21f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #2\n"
- "add x25, x25, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "cbnz x27, 21f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #2\n"
+ "add x24, x24, x19, LSL #2\n"
"b 21f\n"
"20:" // Height 2: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #2\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #2\n"
"21:" // Height 2: input setup done
- "cmp x27, #0x4\n"
+ "cmp x26, #0x4\n"
"ble 23f\n"
"22:" // Height 2: Multiply loop: Main loop head
- "whilelt p0.s, XZR, x27\n"
- "ld1rqw { z0.s }, p0/Z, [x26]\n"
- "ld1rqw { z1.s }, p0/Z, [x25]\n"
- "sub x27, x27, #0x4\n"
"ld1w { z6.s }, p5/Z, [x10]\n"
+ "whilelt p0.s, XZR, x26\n"
"ld1w { z7.s }, p5/Z, [x10, #1, MUL VL]\n"
+ "sub x26, x26, #0x4\n"
+ "ld1rqw { z0.s }, p0/Z, [x25]\n"
"fmla z8.s, z6.s, z0.s[0]\n"
- "fmla z12.s, z6.s, z1.s[0]\n"
+ "ld1rqw { z1.s }, p0/Z, [x24]\n"
+ "cmp x26, #0x4\n"
"fmla z9.s, z7.s, z0.s[0]\n"
- "fmla z13.s, z7.s, z1.s[0]\n"
+ "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
+ "fmla z12.s, z6.s, z1.s[0]\n"
"ld1w { z6.s }, p5/Z, [x10, #2, MUL VL]\n"
+ "fmla z13.s, z7.s, z1.s[0]\n"
"ld1w { z7.s }, p5/Z, [x10, #3, MUL VL]\n"
"fmla z10.s, z6.s, z0.s[0]\n"
"fmla z14.s, z6.s, z1.s[0]\n"
"ld1w { z6.s }, p5/Z, [x10, #4, MUL VL]\n"
- "cmp x27, #0x4\n"
"fmla z11.s, z7.s, z0.s[0]\n"
"fmla z15.s, z7.s, z1.s[0]\n"
"ld1w { z7.s }, p5/Z, [x10, #5, MUL VL]\n"
- "add x26, x26, #0x10\n"
"fmla z8.s, z6.s, z0.s[1]\n"
"fmla z12.s, z6.s, z1.s[1]\n"
"ld1w { z6.s }, p5/Z, [x10, #6, MUL VL]\n"
- "add x25, x25, #0x10\n"
"fmla z9.s, z7.s, z0.s[1]\n"
"fmla z13.s, z7.s, z1.s[1]\n"
"ld1w { z7.s }, p5/Z, [x10, #7, MUL VL]\n"
@@ -395,156 +395,156 @@ void sve_hybrid_fp32_mla_6x4VL (
"fmla z15.s, z7.s, z1.s[3]\n"
"bgt 22b\n"
"23:" // Height 2: Multiply loop: Single iteration only
- "whilelt p0.s, XZR, x27\n"
- "ld1rqw { z0.s }, p0/Z, [x26]\n"
- "ld1rqw { z1.s }, p0/Z, [x25]\n"
- "subs x27, x27, #0x1\n"
"ld1w { z6.s }, p5/Z, [x10]\n"
+ "whilelt p0.s, XZR, x26\n"
"ld1w { z7.s }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x1\n"
+ "ld1rqw { z0.s }, p0/Z, [x25]\n"
"fmla z8.s, z6.s, z0.s[0]\n"
- "fmla z12.s, z6.s, z1.s[0]\n"
+ "ld1rqw { z1.s }, p0/Z, [x24]\n"
"fmla z9.s, z7.s, z0.s[0]\n"
- "fmla z13.s, z7.s, z1.s[0]\n"
+ "fmla z12.s, z6.s, z1.s[0]\n"
"ld1w { z6.s }, p5/Z, [x10, #2, MUL VL]\n"
+ "fmla z13.s, z7.s, z1.s[0]\n"
"ld1w { z7.s }, p5/Z, [x10, #3, MUL VL]\n"
+ "addvl x10, x10, #4\n"
"fmla z10.s, z6.s, z0.s[0]\n"
"fmla z14.s, z6.s, z1.s[0]\n"
- "addvl x10, x10, #4\n"
"fmla z11.s, z7.s, z0.s[0]\n"
"fmla z15.s, z7.s, z1.s[0]\n"
"ble 24f\n"
"ld1w { z6.s }, p5/Z, [x10]\n"
- "ld1w { z7.s }, p5/Z, [x10, #1, MUL VL]\n"
"fmla z8.s, z6.s, z0.s[1]\n"
+ "ld1w { z7.s }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x1\n"
"fmla z12.s, z6.s, z1.s[1]\n"
+ "ld1w { z6.s }, p5/Z, [x10, #2, MUL VL]\n"
"fmla z9.s, z7.s, z0.s[1]\n"
"fmla z13.s, z7.s, z1.s[1]\n"
- "ld1w { z6.s }, p5/Z, [x10, #2, MUL VL]\n"
"ld1w { z7.s }, p5/Z, [x10, #3, MUL VL]\n"
- "subs x27, x27, #0x1\n"
+ "addvl x10, x10, #4\n"
"fmla z10.s, z6.s, z0.s[1]\n"
"fmla z14.s, z6.s, z1.s[1]\n"
- "addvl x10, x10, #4\n"
"fmla z11.s, z7.s, z0.s[1]\n"
"fmla z15.s, z7.s, z1.s[1]\n"
"ble 24f\n"
"ld1w { z6.s }, p5/Z, [x10]\n"
- "ld1w { z7.s }, p5/Z, [x10, #1, MUL VL]\n"
"fmla z8.s, z6.s, z0.s[2]\n"
+ "ld1w { z7.s }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x1\n"
"fmla z12.s, z6.s, z1.s[2]\n"
+ "ld1w { z6.s }, p5/Z, [x10, #2, MUL VL]\n"
"fmla z9.s, z7.s, z0.s[2]\n"
"fmla z13.s, z7.s, z1.s[2]\n"
- "ld1w { z6.s }, p5/Z, [x10, #2, MUL VL]\n"
"ld1w { z7.s }, p5/Z, [x10, #3, MUL VL]\n"
- "subs x27, x27, #0x1\n"
+ "addvl x10, x10, #4\n"
"fmla z10.s, z6.s, z0.s[2]\n"
"fmla z14.s, z6.s, z1.s[2]\n"
- "addvl x10, x10, #4\n"
"fmla z11.s, z7.s, z0.s[2]\n"
"fmla z15.s, z7.s, z1.s[2]\n"
"ble 24f\n"
"ld1w { z6.s }, p5/Z, [x10]\n"
- "ld1w { z7.s }, p5/Z, [x10, #1, MUL VL]\n"
"fmla z8.s, z6.s, z0.s[3]\n"
+ "ld1w { z7.s }, p5/Z, [x10, #1, MUL VL]\n"
"fmla z12.s, z6.s, z1.s[3]\n"
+ "ld1w { z6.s }, p5/Z, [x10, #2, MUL VL]\n"
"fmla z9.s, z7.s, z0.s[3]\n"
"fmla z13.s, z7.s, z1.s[3]\n"
- "ld1w { z6.s }, p5/Z, [x10, #2, MUL VL]\n"
"ld1w { z7.s }, p5/Z, [x10, #3, MUL VL]\n"
+ "addvl x10, x10, #4\n"
"fmla z10.s, z6.s, z0.s[3]\n"
"fmla z14.s, z6.s, z1.s[3]\n"
- "addvl x10, x10, #4\n"
"fmla z11.s, z7.s, z0.s[3]\n"
"fmla z15.s, z7.s, z1.s[3]\n"
"24:" // Height 2: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 19b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x28, x19, LSL #2\n"
"tbz %x[flags], #1, 25f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z1.s }, p5/Z, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rw { z0.s }, p5/Z, [x20]\n"
- "fmin z8.s, p5/M, z8.s, z1.s\n"
- "fmin z9.s, p5/M, z9.s, z1.s\n"
- "fmin z10.s, p5/M, z10.s, z1.s\n"
- "fmin z11.s, p5/M, z11.s, z1.s\n"
- "fmin z12.s, p5/M, z12.s, z1.s\n"
- "fmin z13.s, p5/M, z13.s, z1.s\n"
- "fmin z14.s, p5/M, z14.s, z1.s\n"
- "fmin z15.s, p5/M, z15.s, z1.s\n"
- "fmax z8.s, p5/M, z8.s, z0.s\n"
- "fmax z9.s, p5/M, z9.s, z0.s\n"
- "fmax z10.s, p5/M, z10.s, z0.s\n"
- "fmax z11.s, p5/M, z11.s, z0.s\n"
- "fmax z12.s, p5/M, z12.s, z0.s\n"
- "fmax z13.s, p5/M, z13.s, z0.s\n"
- "fmax z14.s, p5/M, z14.s, z0.s\n"
- "fmax z15.s, p5/M, z15.s, z0.s\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z1.s }, p5/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rw { z0.s }, p5/Z, [x19]\n"
+ "fmin z8.s, p5/M, z8.s, z0.s\n"
+ "fmin z9.s, p5/M, z9.s, z0.s\n"
+ "fmin z10.s, p5/M, z10.s, z0.s\n"
+ "fmin z11.s, p5/M, z11.s, z0.s\n"
+ "fmin z12.s, p5/M, z12.s, z0.s\n"
+ "fmax z8.s, p5/M, z8.s, z1.s\n"
+ "fmax z9.s, p5/M, z9.s, z1.s\n"
+ "fmax z10.s, p5/M, z10.s, z1.s\n"
+ "fmax z11.s, p5/M, z11.s, z1.s\n"
+ "fmax z12.s, p5/M, z12.s, z1.s\n"
+ "fmin z13.s, p5/M, z13.s, z0.s\n"
+ "fmin z14.s, p5/M, z14.s, z0.s\n"
+ "fmin z15.s, p5/M, z15.s, z0.s\n"
+ "fmax z13.s, p5/M, z13.s, z1.s\n"
+ "fmax z14.s, p5/M, z14.s, z1.s\n"
+ "fmax z15.s, p5/M, z15.s, z1.s\n"
"25:" // Height 2: No activation
- "st1w { z8.s }, p4, [x9]\n"
- "st1w { z9.s }, p3, [x9, #1, MUL VL]\n"
- "st1w { z10.s }, p2, [x9, #2, MUL VL]\n"
- "st1w { z11.s }, p1, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
- "st1w { z12.s }, p4, [x25]\n"
- "st1w { z13.s }, p3, [x25, #1, MUL VL]\n"
- "st1w { z14.s }, p2, [x25, #2, MUL VL]\n"
- "st1w { z15.s }, p1, [x25, #3, MUL VL]\n"
+ "st1w { z8.s }, p4, [x28]\n"
+ "st1w { z9.s }, p3, [x28, #1, MUL VL]\n"
+ "st1w { z10.s }, p2, [x28, #2, MUL VL]\n"
+ "st1w { z11.s }, p1, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "st1w { z12.s }, p4, [x24]\n"
+ "st1w { z13.s }, p3, [x24, #1, MUL VL]\n"
+ "st1w { z14.s }, p2, [x24, #2, MUL VL]\n"
+ "st1w { z15.s }, p1, [x24, #3, MUL VL]\n"
"26:" // Height 2: Writeback done
"decw x11, ALL, MUL #4\n"
"cmp x11, XZR\n"
"bgt 15b\n"
"b 80f\n"
"27:" // Height 3
- "mov x12, %x[bias]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x9, %x[bias]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "mov x28, %x[output_ptr]\n"
"28:" // Height 3: Column loop
- "mov x20, #0x0\n"
- "whilelt p4.s, x20, x11\n"
- "incw x20\n"
- "whilelt p3.s, x20, x11\n"
- "incw x20\n"
- "whilelt p2.s, x20, x11\n"
- "incw x20\n"
- "whilelt p1.s, x20, x11\n"
- "cbz x12, 29f\n"
- "ld1w { z8.s }, p5/Z, [x12]\n"
- "ld1w { z9.s }, p5/Z, [x12, #1, MUL VL]\n"
+ "mov x19, #0x0\n"
+ "whilelt p4.s, x19, x11\n"
+ "incw x19\n"
+ "whilelt p3.s, x19, x11\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x11\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x11\n"
+ "cbz x9, 29f\n"
+ "ld1w { z8.s }, p5/Z, [x9]\n"
"mov z12.d, z8.d\n"
+ "ld1w { z9.s }, p5/Z, [x9, #1, MUL VL]\n"
+ "mov z16.d, z8.d\n"
+ "ld1w { z10.s }, p5/Z, [x9, #2, MUL VL]\n"
+ "ld1w { z11.s }, p5/Z, [x9, #3, MUL VL]\n"
"mov z13.d, z9.d\n"
- "ld1w { z10.s }, p5/Z, [x12, #2, MUL VL]\n"
- "ld1w { z11.s }, p5/Z, [x12, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
+ "mov z17.d, z9.d\n"
"mov z14.d, z10.d\n"
"mov z15.d, z11.d\n"
- "mov z16.d, z8.d\n"
- "mov z17.d, z9.d\n"
- "addvl x12, x12, #4\n"
"mov z18.d, z10.d\n"
"mov z19.d, z11.d\n"
"b 31f\n"
"29:" // Height 3: no bias
"tbz %x[flags], #0, 30f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "ld1w { z8.s }, p4/Z, [x9]\n"
- "ld1w { z9.s }, p3/Z, [x9, #1, MUL VL]\n"
- "ld1w { z10.s }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1w { z11.s }, p1/Z, [x9, #3, MUL VL]\n"
- "ld1w { z12.s }, p4/Z, [x25]\n"
- "ld1w { z13.s }, p3/Z, [x25, #1, MUL VL]\n"
- "ld1w { z14.s }, p2/Z, [x25, #2, MUL VL]\n"
- "ld1w { z15.s }, p1/Z, [x25, #3, MUL VL]\n"
- "ld1w { z16.s }, p4/Z, [x24]\n"
- "ld1w { z17.s }, p3/Z, [x24, #1, MUL VL]\n"
- "ld1w { z18.s }, p2/Z, [x24, #2, MUL VL]\n"
- "ld1w { z19.s }, p1/Z, [x24, #3, MUL VL]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ld1w { z8.s }, p4/Z, [x28]\n"
+ "add x24, x28, x19, LSL #2\n"
+ "ld1w { z9.s }, p3/Z, [x28, #1, MUL VL]\n"
+ "ld1w { z10.s }, p2/Z, [x28, #2, MUL VL]\n"
+ "add x23, x24, x19, LSL #2\n"
+ "ld1w { z11.s }, p1/Z, [x28, #3, MUL VL]\n"
+ "ld1w { z12.s }, p4/Z, [x24]\n"
+ "ld1w { z13.s }, p3/Z, [x24, #1, MUL VL]\n"
+ "ld1w { z14.s }, p2/Z, [x24, #2, MUL VL]\n"
+ "ld1w { z15.s }, p1/Z, [x24, #3, MUL VL]\n"
+ "ld1w { z16.s }, p4/Z, [x23]\n"
+ "ld1w { z17.s }, p3/Z, [x23, #1, MUL VL]\n"
+ "ld1w { z18.s }, p2/Z, [x23, #2, MUL VL]\n"
+ "ld1w { z19.s }, p1/Z, [x23, #3, MUL VL]\n"
"b 31f\n"
"30:" // Height 3: no accumulate
"mov z8.b, #0x0\n"
@@ -560,63 +560,63 @@ void sve_hybrid_fp32_mla_6x4VL (
"mov z18.b, #0x0\n"
"mov z19.b, #0x0\n"
"31:" // Height 3: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"32:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 33f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "cbnz x28, 34f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #2\n"
- "add x25, x25, x20, LSL #2\n"
- "add x24, x24, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "cbnz x27, 34f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #2\n"
+ "add x24, x24, x19, LSL #2\n"
+ "add x23, x23, x19, LSL #2\n"
"b 34f\n"
"33:" // Height 3: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
"34:" // Height 3: input setup done
- "cmp x27, #0x4\n"
+ "cmp x26, #0x4\n"
"ble 36f\n"
"35:" // Height 3: Multiply loop: Main loop head
- "whilelt p0.s, XZR, x27\n"
- "ld1rqw { z0.s }, p0/Z, [x26]\n"
- "ld1rqw { z1.s }, p0/Z, [x25]\n"
- "sub x27, x27, #0x4\n"
- "ld1rqw { z2.s }, p0/Z, [x24]\n"
"ld1w { z6.s }, p5/Z, [x10]\n"
+ "whilelt p0.s, XZR, x26\n"
+ "ld1w { z7.s }, p5/Z, [x10, #1, MUL VL]\n"
+ "sub x26, x26, #0x4\n"
+ "ld1rqw { z0.s }, p0/Z, [x25]\n"
"fmla z8.s, z6.s, z0.s[0]\n"
+ "ld1rqw { z1.s }, p0/Z, [x24]\n"
+ "cmp x26, #0x4\n"
+ "fmla z9.s, z7.s, z0.s[0]\n"
+ "ld1rqw { z2.s }, p0/Z, [x23]\n"
+ "add x25, x25, #0x10\n"
"fmla z12.s, z6.s, z1.s[0]\n"
- "ld1w { z7.s }, p5/Z, [x10, #1, MUL VL]\n"
+ "add x24, x24, #0x10\n"
+ "fmla z13.s, z7.s, z1.s[0]\n"
+ "add x23, x23, #0x10\n"
"fmla z16.s, z6.s, z2.s[0]\n"
- "fmla z9.s, z7.s, z0.s[0]\n"
"ld1w { z6.s }, p5/Z, [x10, #2, MUL VL]\n"
- "fmla z13.s, z7.s, z1.s[0]\n"
"fmla z17.s, z7.s, z2.s[0]\n"
"ld1w { z7.s }, p5/Z, [x10, #3, MUL VL]\n"
- "cmp x27, #0x4\n"
"fmla z10.s, z6.s, z0.s[0]\n"
"fmla z14.s, z6.s, z1.s[0]\n"
- "add x26, x26, #0x10\n"
- "add x25, x25, #0x10\n"
"fmla z18.s, z6.s, z2.s[0]\n"
- "fmla z11.s, z7.s, z0.s[0]\n"
"ld1w { z6.s }, p5/Z, [x10, #4, MUL VL]\n"
- "add x24, x24, #0x10\n"
+ "fmla z11.s, z7.s, z0.s[0]\n"
"fmla z15.s, z7.s, z1.s[0]\n"
"fmla z19.s, z7.s, z2.s[0]\n"
"ld1w { z7.s }, p5/Z, [x10, #5, MUL VL]\n"
"fmla z8.s, z6.s, z0.s[1]\n"
"fmla z12.s, z6.s, z1.s[1]\n"
"fmla z16.s, z6.s, z2.s[1]\n"
- "fmla z9.s, z7.s, z0.s[1]\n"
"ld1w { z6.s }, p5/Z, [x10, #6, MUL VL]\n"
+ "fmla z9.s, z7.s, z0.s[1]\n"
"fmla z13.s, z7.s, z1.s[1]\n"
"fmla z17.s, z7.s, z2.s[1]\n"
"ld1w { z7.s }, p5/Z, [x10, #7, MUL VL]\n"
@@ -624,32 +624,32 @@ void sve_hybrid_fp32_mla_6x4VL (
"fmla z10.s, z6.s, z0.s[1]\n"
"fmla z14.s, z6.s, z1.s[1]\n"
"fmla z18.s, z6.s, z2.s[1]\n"
- "fmla z11.s, z7.s, z0.s[1]\n"
"ld1w { z6.s }, p5/Z, [x10, #-8, MUL VL]\n"
+ "fmla z11.s, z7.s, z0.s[1]\n"
"fmla z15.s, z7.s, z1.s[1]\n"
"fmla z19.s, z7.s, z2.s[1]\n"
"ld1w { z7.s }, p5/Z, [x10, #-7, MUL VL]\n"
"fmla z8.s, z6.s, z0.s[2]\n"
"fmla z12.s, z6.s, z1.s[2]\n"
"fmla z16.s, z6.s, z2.s[2]\n"
- "fmla z9.s, z7.s, z0.s[2]\n"
"ld1w { z6.s }, p5/Z, [x10, #-6, MUL VL]\n"
+ "fmla z9.s, z7.s, z0.s[2]\n"
"fmla z13.s, z7.s, z1.s[2]\n"
"fmla z17.s, z7.s, z2.s[2]\n"
"ld1w { z7.s }, p5/Z, [x10, #-5, MUL VL]\n"
"fmla z10.s, z6.s, z0.s[2]\n"
"fmla z14.s, z6.s, z1.s[2]\n"
"fmla z18.s, z6.s, z2.s[2]\n"
- "fmla z11.s, z7.s, z0.s[2]\n"
"ld1w { z6.s }, p5/Z, [x10, #-4, MUL VL]\n"
+ "fmla z11.s, z7.s, z0.s[2]\n"
"fmla z15.s, z7.s, z1.s[2]\n"
"fmla z19.s, z7.s, z2.s[2]\n"
"ld1w { z7.s }, p5/Z, [x10, #-3, MUL VL]\n"
"fmla z8.s, z6.s, z0.s[3]\n"
"fmla z12.s, z6.s, z1.s[3]\n"
"fmla z16.s, z6.s, z2.s[3]\n"
- "fmla z9.s, z7.s, z0.s[3]\n"
"ld1w { z6.s }, p5/Z, [x10, #-2, MUL VL]\n"
+ "fmla z9.s, z7.s, z0.s[3]\n"
"fmla z13.s, z7.s, z1.s[3]\n"
"fmla z17.s, z7.s, z2.s[3]\n"
"ld1w { z7.s }, p5/Z, [x10, #-1, MUL VL]\n"
@@ -661,19 +661,19 @@ void sve_hybrid_fp32_mla_6x4VL (
"fmla z19.s, z7.s, z2.s[3]\n"
"bgt 35b\n"
"36:" // Height 3: Multiply loop: Single iteration only
- "whilelt p0.s, XZR, x27\n"
- "ld1rqw { z0.s }, p0/Z, [x26]\n"
- "ld1rqw { z1.s }, p0/Z, [x25]\n"
- "subs x27, x27, #0x1\n"
- "ld1rqw { z2.s }, p0/Z, [x24]\n"
"ld1w { z6.s }, p5/Z, [x10]\n"
+ "whilelt p0.s, XZR, x26\n"
+ "ld1w { z7.s }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x1\n"
+ "ld1rqw { z0.s }, p0/Z, [x25]\n"
"fmla z8.s, z6.s, z0.s[0]\n"
+ "ld1rqw { z1.s }, p0/Z, [x24]\n"
+ "fmla z9.s, z7.s, z0.s[0]\n"
+ "ld1rqw { z2.s }, p0/Z, [x23]\n"
"fmla z12.s, z6.s, z1.s[0]\n"
- "ld1w { z7.s }, p5/Z, [x10, #1, MUL VL]\n"
+ "fmla z13.s, z7.s, z1.s[0]\n"
"fmla z16.s, z6.s, z2.s[0]\n"
- "fmla z9.s, z7.s, z0.s[0]\n"
"ld1w { z6.s }, p5/Z, [x10, #2, MUL VL]\n"
- "fmla z13.s, z7.s, z1.s[0]\n"
"fmla z17.s, z7.s, z2.s[0]\n"
"ld1w { z7.s }, p5/Z, [x10, #3, MUL VL]\n"
"addvl x10, x10, #4\n"
@@ -685,13 +685,13 @@ void sve_hybrid_fp32_mla_6x4VL (
"fmla z19.s, z7.s, z2.s[0]\n"
"ble 37f\n"
"ld1w { z6.s }, p5/Z, [x10]\n"
- "ld1w { z7.s }, p5/Z, [x10, #1, MUL VL]\n"
"fmla z8.s, z6.s, z0.s[1]\n"
+ "ld1w { z7.s }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x1\n"
"fmla z12.s, z6.s, z1.s[1]\n"
"fmla z16.s, z6.s, z2.s[1]\n"
- "fmla z9.s, z7.s, z0.s[1]\n"
"ld1w { z6.s }, p5/Z, [x10, #2, MUL VL]\n"
- "subs x27, x27, #0x1\n"
+ "fmla z9.s, z7.s, z0.s[1]\n"
"fmla z13.s, z7.s, z1.s[1]\n"
"fmla z17.s, z7.s, z2.s[1]\n"
"ld1w { z7.s }, p5/Z, [x10, #3, MUL VL]\n"
@@ -704,13 +704,13 @@ void sve_hybrid_fp32_mla_6x4VL (
"fmla z19.s, z7.s, z2.s[1]\n"
"ble 37f\n"
"ld1w { z6.s }, p5/Z, [x10]\n"
- "ld1w { z7.s }, p5/Z, [x10, #1, MUL VL]\n"
"fmla z8.s, z6.s, z0.s[2]\n"
+ "ld1w { z7.s }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x1\n"
"fmla z12.s, z6.s, z1.s[2]\n"
"fmla z16.s, z6.s, z2.s[2]\n"
- "fmla z9.s, z7.s, z0.s[2]\n"
"ld1w { z6.s }, p5/Z, [x10, #2, MUL VL]\n"
- "subs x27, x27, #0x1\n"
+ "fmla z9.s, z7.s, z0.s[2]\n"
"fmla z13.s, z7.s, z1.s[2]\n"
"fmla z17.s, z7.s, z2.s[2]\n"
"ld1w { z7.s }, p5/Z, [x10, #3, MUL VL]\n"
@@ -723,12 +723,12 @@ void sve_hybrid_fp32_mla_6x4VL (
"fmla z19.s, z7.s, z2.s[2]\n"
"ble 37f\n"
"ld1w { z6.s }, p5/Z, [x10]\n"
- "ld1w { z7.s }, p5/Z, [x10, #1, MUL VL]\n"
"fmla z8.s, z6.s, z0.s[3]\n"
+ "ld1w { z7.s }, p5/Z, [x10, #1, MUL VL]\n"
"fmla z12.s, z6.s, z1.s[3]\n"
"fmla z16.s, z6.s, z2.s[3]\n"
- "fmla z9.s, z7.s, z0.s[3]\n"
"ld1w { z6.s }, p5/Z, [x10, #2, MUL VL]\n"
+ "fmla z9.s, z7.s, z0.s[3]\n"
"fmla z13.s, z7.s, z1.s[3]\n"
"fmla z17.s, z7.s, z2.s[3]\n"
"ld1w { z7.s }, p5/Z, [x10, #3, MUL VL]\n"
@@ -740,116 +740,116 @@ void sve_hybrid_fp32_mla_6x4VL (
"fmla z15.s, z7.s, z1.s[3]\n"
"fmla z19.s, z7.s, z2.s[3]\n"
"37:" // Height 3: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 32b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x28, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
"tbz %x[flags], #1, 38f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z1.s }, p5/Z, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rw { z0.s }, p5/Z, [x20]\n"
- "fmin z8.s, p5/M, z8.s, z1.s\n"
- "fmin z9.s, p5/M, z9.s, z1.s\n"
- "fmin z10.s, p5/M, z10.s, z1.s\n"
- "fmin z11.s, p5/M, z11.s, z1.s\n"
- "fmin z12.s, p5/M, z12.s, z1.s\n"
- "fmin z13.s, p5/M, z13.s, z1.s\n"
- "fmin z14.s, p5/M, z14.s, z1.s\n"
- "fmin z15.s, p5/M, z15.s, z1.s\n"
- "fmin z16.s, p5/M, z16.s, z1.s\n"
- "fmin z17.s, p5/M, z17.s, z1.s\n"
- "fmin z18.s, p5/M, z18.s, z1.s\n"
- "fmin z19.s, p5/M, z19.s, z1.s\n"
- "fmax z8.s, p5/M, z8.s, z0.s\n"
- "fmax z9.s, p5/M, z9.s, z0.s\n"
- "fmax z10.s, p5/M, z10.s, z0.s\n"
- "fmax z11.s, p5/M, z11.s, z0.s\n"
- "fmax z12.s, p5/M, z12.s, z0.s\n"
- "fmax z13.s, p5/M, z13.s, z0.s\n"
- "fmax z14.s, p5/M, z14.s, z0.s\n"
- "fmax z15.s, p5/M, z15.s, z0.s\n"
- "fmax z16.s, p5/M, z16.s, z0.s\n"
- "fmax z17.s, p5/M, z17.s, z0.s\n"
- "fmax z18.s, p5/M, z18.s, z0.s\n"
- "fmax z19.s, p5/M, z19.s, z0.s\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z1.s }, p5/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rw { z0.s }, p5/Z, [x19]\n"
+ "fmin z8.s, p5/M, z8.s, z0.s\n"
+ "fmin z9.s, p5/M, z9.s, z0.s\n"
+ "fmin z10.s, p5/M, z10.s, z0.s\n"
+ "fmin z11.s, p5/M, z11.s, z0.s\n"
+ "fmin z12.s, p5/M, z12.s, z0.s\n"
+ "fmax z8.s, p5/M, z8.s, z1.s\n"
+ "fmax z9.s, p5/M, z9.s, z1.s\n"
+ "fmax z10.s, p5/M, z10.s, z1.s\n"
+ "fmax z11.s, p5/M, z11.s, z1.s\n"
+ "fmax z12.s, p5/M, z12.s, z1.s\n"
+ "fmin z13.s, p5/M, z13.s, z0.s\n"
+ "fmin z14.s, p5/M, z14.s, z0.s\n"
+ "fmin z15.s, p5/M, z15.s, z0.s\n"
+ "fmin z16.s, p5/M, z16.s, z0.s\n"
+ "fmax z13.s, p5/M, z13.s, z1.s\n"
+ "fmax z14.s, p5/M, z14.s, z1.s\n"
+ "fmax z15.s, p5/M, z15.s, z1.s\n"
+ "fmax z16.s, p5/M, z16.s, z1.s\n"
+ "fmin z17.s, p5/M, z17.s, z0.s\n"
+ "fmin z18.s, p5/M, z18.s, z0.s\n"
+ "fmin z19.s, p5/M, z19.s, z0.s\n"
+ "fmax z17.s, p5/M, z17.s, z1.s\n"
+ "fmax z18.s, p5/M, z18.s, z1.s\n"
+ "fmax z19.s, p5/M, z19.s, z1.s\n"
"38:" // Height 3: No activation
- "st1w { z8.s }, p4, [x9]\n"
- "st1w { z9.s }, p3, [x9, #1, MUL VL]\n"
- "st1w { z10.s }, p2, [x9, #2, MUL VL]\n"
- "st1w { z11.s }, p1, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
- "st1w { z12.s }, p4, [x25]\n"
- "st1w { z13.s }, p3, [x25, #1, MUL VL]\n"
- "st1w { z14.s }, p2, [x25, #2, MUL VL]\n"
- "st1w { z15.s }, p1, [x25, #3, MUL VL]\n"
- "st1w { z16.s }, p4, [x24]\n"
- "st1w { z17.s }, p3, [x24, #1, MUL VL]\n"
- "st1w { z18.s }, p2, [x24, #2, MUL VL]\n"
- "st1w { z19.s }, p1, [x24, #3, MUL VL]\n"
+ "st1w { z8.s }, p4, [x28]\n"
+ "st1w { z9.s }, p3, [x28, #1, MUL VL]\n"
+ "st1w { z10.s }, p2, [x28, #2, MUL VL]\n"
+ "st1w { z11.s }, p1, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "st1w { z12.s }, p4, [x24]\n"
+ "st1w { z13.s }, p3, [x24, #1, MUL VL]\n"
+ "st1w { z14.s }, p2, [x24, #2, MUL VL]\n"
+ "st1w { z15.s }, p1, [x24, #3, MUL VL]\n"
+ "st1w { z16.s }, p4, [x23]\n"
+ "st1w { z17.s }, p3, [x23, #1, MUL VL]\n"
+ "st1w { z18.s }, p2, [x23, #2, MUL VL]\n"
+ "st1w { z19.s }, p1, [x23, #3, MUL VL]\n"
"39:" // Height 3: Writeback done
"decw x11, ALL, MUL #4\n"
"cmp x11, XZR\n"
"bgt 28b\n"
"b 80f\n"
"40:" // Height 4
- "mov x12, %x[bias]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x9, %x[bias]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "mov x28, %x[output_ptr]\n"
"41:" // Height 4: Column loop
- "mov x20, #0x0\n"
- "whilelt p4.s, x20, x11\n"
- "incw x20\n"
- "whilelt p3.s, x20, x11\n"
- "incw x20\n"
- "whilelt p2.s, x20, x11\n"
- "incw x20\n"
- "whilelt p1.s, x20, x11\n"
- "cbz x12, 42f\n"
- "ld1w { z8.s }, p5/Z, [x12]\n"
- "ld1w { z9.s }, p5/Z, [x12, #1, MUL VL]\n"
+ "mov x19, #0x0\n"
+ "whilelt p4.s, x19, x11\n"
+ "incw x19\n"
+ "whilelt p3.s, x19, x11\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x11\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x11\n"
+ "cbz x9, 42f\n"
+ "ld1w { z8.s }, p5/Z, [x9]\n"
"mov z12.d, z8.d\n"
+ "ld1w { z9.s }, p5/Z, [x9, #1, MUL VL]\n"
+ "mov z16.d, z8.d\n"
+ "ld1w { z10.s }, p5/Z, [x9, #2, MUL VL]\n"
+ "mov z20.d, z8.d\n"
+ "ld1w { z11.s }, p5/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"mov z13.d, z9.d\n"
- "ld1w { z10.s }, p5/Z, [x12, #2, MUL VL]\n"
- "ld1w { z11.s }, p5/Z, [x12, #3, MUL VL]\n"
+ "mov z17.d, z9.d\n"
"mov z14.d, z10.d\n"
"mov z15.d, z11.d\n"
- "mov z16.d, z8.d\n"
- "mov z17.d, z9.d\n"
- "addvl x12, x12, #4\n"
"mov z18.d, z10.d\n"
"mov z19.d, z11.d\n"
- "mov z20.d, z8.d\n"
"mov z21.d, z9.d\n"
"mov z22.d, z10.d\n"
"mov z23.d, z11.d\n"
"b 44f\n"
"42:" // Height 4: no bias
"tbz %x[flags], #0, 43f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "ld1w { z8.s }, p4/Z, [x9]\n"
- "add x23, x24, x20, LSL #2\n"
- "ld1w { z9.s }, p3/Z, [x9, #1, MUL VL]\n"
- "ld1w { z10.s }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1w { z11.s }, p1/Z, [x9, #3, MUL VL]\n"
- "ld1w { z12.s }, p4/Z, [x25]\n"
- "ld1w { z13.s }, p3/Z, [x25, #1, MUL VL]\n"
- "ld1w { z14.s }, p2/Z, [x25, #2, MUL VL]\n"
- "ld1w { z15.s }, p1/Z, [x25, #3, MUL VL]\n"
- "ld1w { z16.s }, p4/Z, [x24]\n"
- "ld1w { z17.s }, p3/Z, [x24, #1, MUL VL]\n"
- "ld1w { z18.s }, p2/Z, [x24, #2, MUL VL]\n"
- "ld1w { z19.s }, p1/Z, [x24, #3, MUL VL]\n"
- "ld1w { z20.s }, p4/Z, [x23]\n"
- "ld1w { z21.s }, p3/Z, [x23, #1, MUL VL]\n"
- "ld1w { z22.s }, p2/Z, [x23, #2, MUL VL]\n"
- "ld1w { z23.s }, p1/Z, [x23, #3, MUL VL]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ld1w { z8.s }, p4/Z, [x28]\n"
+ "add x24, x28, x19, LSL #2\n"
+ "ld1w { z9.s }, p3/Z, [x28, #1, MUL VL]\n"
+ "ld1w { z10.s }, p2/Z, [x28, #2, MUL VL]\n"
+ "add x23, x24, x19, LSL #2\n"
+ "ld1w { z11.s }, p1/Z, [x28, #3, MUL VL]\n"
+ "add x22, x23, x19, LSL #2\n"
+ "ld1w { z12.s }, p4/Z, [x24]\n"
+ "ld1w { z13.s }, p3/Z, [x24, #1, MUL VL]\n"
+ "ld1w { z14.s }, p2/Z, [x24, #2, MUL VL]\n"
+ "ld1w { z15.s }, p1/Z, [x24, #3, MUL VL]\n"
+ "ld1w { z16.s }, p4/Z, [x23]\n"
+ "ld1w { z17.s }, p3/Z, [x23, #1, MUL VL]\n"
+ "ld1w { z18.s }, p2/Z, [x23, #2, MUL VL]\n"
+ "ld1w { z19.s }, p1/Z, [x23, #3, MUL VL]\n"
+ "ld1w { z20.s }, p4/Z, [x22]\n"
+ "ld1w { z21.s }, p3/Z, [x22, #1, MUL VL]\n"
+ "ld1w { z22.s }, p2/Z, [x22, #2, MUL VL]\n"
+ "ld1w { z23.s }, p1/Z, [x22, #3, MUL VL]\n"
"b 44f\n"
"43:" // Height 4: no accumulate
"mov z8.b, #0x0\n"
@@ -869,55 +869,55 @@ void sve_hybrid_fp32_mla_6x4VL (
"mov z22.b, #0x0\n"
"mov z23.b, #0x0\n"
"44:" // Height 4: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"45:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 46f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "cbnz x28, 47f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #2\n"
- "add x25, x25, x20, LSL #2\n"
- "add x24, x24, x20, LSL #2\n"
- "add x23, x23, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "cbnz x27, 47f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #2\n"
+ "add x24, x24, x19, LSL #2\n"
+ "add x23, x23, x19, LSL #2\n"
+ "add x22, x22, x19, LSL #2\n"
"b 47f\n"
"46:" // Height 4: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
"47:" // Height 4: input setup done
- "cmp x27, #0x4\n"
+ "cmp x26, #0x4\n"
"ble 49f\n"
"48:" // Height 4: Multiply loop: Main loop head
- "whilelt p0.s, XZR, x27\n"
- "ld1rqw { z0.s }, p0/Z, [x26]\n"
- "ld1rqw { z1.s }, p0/Z, [x25]\n"
- "sub x27, x27, #0x4\n"
- "ld1rqw { z2.s }, p0/Z, [x24]\n"
- "ld1rqw { z3.s }, p0/Z, [x23]\n"
- "cmp x27, #0x4\n"
- "add x26, x26, #0x10\n"
"ld1w { z6.s }, p5/Z, [x10]\n"
+ "whilelt p0.s, XZR, x26\n"
"ld1w { z7.s }, p5/Z, [x10, #1, MUL VL]\n"
+ "sub x26, x26, #0x4\n"
+ "ld1rqw { z0.s }, p0/Z, [x25]\n"
"fmla z8.s, z6.s, z0.s[0]\n"
- "fmla z12.s, z6.s, z1.s[0]\n"
- "fmla z16.s, z6.s, z2.s[0]\n"
- "fmla z20.s, z6.s, z3.s[0]\n"
- "ld1w { z6.s }, p5/Z, [x10, #2, MUL VL]\n"
- "add x25, x25, #0x10\n"
+ "ld1rqw { z1.s }, p0/Z, [x24]\n"
+ "cmp x26, #0x4\n"
"fmla z9.s, z7.s, z0.s[0]\n"
- "fmla z13.s, z7.s, z1.s[0]\n"
+ "ld1rqw { z2.s }, p0/Z, [x23]\n"
+ "add x25, x25, #0x10\n"
+ "fmla z12.s, z6.s, z1.s[0]\n"
+ "ld1rqw { z3.s }, p0/Z, [x22]\n"
"add x24, x24, #0x10\n"
+ "fmla z16.s, z6.s, z2.s[0]\n"
"add x23, x23, #0x10\n"
+ "fmla z13.s, z7.s, z1.s[0]\n"
+ "add x22, x22, #0x10\n"
"fmla z17.s, z7.s, z2.s[0]\n"
+ "fmla z20.s, z6.s, z3.s[0]\n"
+ "ld1w { z6.s }, p5/Z, [x10, #2, MUL VL]\n"
"fmla z21.s, z7.s, z3.s[0]\n"
"ld1w { z7.s }, p5/Z, [x10, #3, MUL VL]\n"
"fmla z10.s, z6.s, z0.s[0]\n"
@@ -991,21 +991,21 @@ void sve_hybrid_fp32_mla_6x4VL (
"fmla z23.s, z7.s, z3.s[3]\n"
"bgt 48b\n"
"49:" // Height 4: Multiply loop: Single iteration only
- "whilelt p0.s, XZR, x27\n"
- "ld1rqw { z0.s }, p0/Z, [x26]\n"
- "ld1rqw { z1.s }, p0/Z, [x25]\n"
- "subs x27, x27, #0x1\n"
- "ld1rqw { z2.s }, p0/Z, [x24]\n"
- "ld1rqw { z3.s }, p0/Z, [x23]\n"
"ld1w { z6.s }, p5/Z, [x10]\n"
+ "whilelt p0.s, XZR, x26\n"
"ld1w { z7.s }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x1\n"
+ "ld1rqw { z0.s }, p0/Z, [x25]\n"
"fmla z8.s, z6.s, z0.s[0]\n"
+ "ld1rqw { z1.s }, p0/Z, [x24]\n"
+ "fmla z9.s, z7.s, z0.s[0]\n"
+ "ld1rqw { z2.s }, p0/Z, [x23]\n"
+ "ld1rqw { z3.s }, p0/Z, [x22]\n"
"fmla z12.s, z6.s, z1.s[0]\n"
+ "fmla z13.s, z7.s, z1.s[0]\n"
"fmla z16.s, z6.s, z2.s[0]\n"
"fmla z20.s, z6.s, z3.s[0]\n"
"ld1w { z6.s }, p5/Z, [x10, #2, MUL VL]\n"
- "fmla z9.s, z7.s, z0.s[0]\n"
- "fmla z13.s, z7.s, z1.s[0]\n"
"fmla z17.s, z7.s, z2.s[0]\n"
"fmla z21.s, z7.s, z3.s[0]\n"
"ld1w { z7.s }, p5/Z, [x10, #3, MUL VL]\n"
@@ -1020,13 +1020,13 @@ void sve_hybrid_fp32_mla_6x4VL (
"fmla z23.s, z7.s, z3.s[0]\n"
"ble 50f\n"
"ld1w { z6.s }, p5/Z, [x10]\n"
- "ld1w { z7.s }, p5/Z, [x10, #1, MUL VL]\n"
"fmla z8.s, z6.s, z0.s[1]\n"
+ "ld1w { z7.s }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x1\n"
"fmla z12.s, z6.s, z1.s[1]\n"
"fmla z16.s, z6.s, z2.s[1]\n"
"fmla z20.s, z6.s, z3.s[1]\n"
"ld1w { z6.s }, p5/Z, [x10, #2, MUL VL]\n"
- "subs x27, x27, #0x1\n"
"fmla z9.s, z7.s, z0.s[1]\n"
"fmla z13.s, z7.s, z1.s[1]\n"
"fmla z17.s, z7.s, z2.s[1]\n"
@@ -1043,13 +1043,13 @@ void sve_hybrid_fp32_mla_6x4VL (
"fmla z23.s, z7.s, z3.s[1]\n"
"ble 50f\n"
"ld1w { z6.s }, p5/Z, [x10]\n"
- "ld1w { z7.s }, p5/Z, [x10, #1, MUL VL]\n"
"fmla z8.s, z6.s, z0.s[2]\n"
+ "ld1w { z7.s }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x1\n"
"fmla z12.s, z6.s, z1.s[2]\n"
"fmla z16.s, z6.s, z2.s[2]\n"
"fmla z20.s, z6.s, z3.s[2]\n"
"ld1w { z6.s }, p5/Z, [x10, #2, MUL VL]\n"
- "subs x27, x27, #0x1\n"
"fmla z9.s, z7.s, z0.s[2]\n"
"fmla z13.s, z7.s, z1.s[2]\n"
"fmla z17.s, z7.s, z2.s[2]\n"
@@ -1066,8 +1066,8 @@ void sve_hybrid_fp32_mla_6x4VL (
"fmla z23.s, z7.s, z3.s[2]\n"
"ble 50f\n"
"ld1w { z6.s }, p5/Z, [x10]\n"
- "ld1w { z7.s }, p5/Z, [x10, #1, MUL VL]\n"
"fmla z8.s, z6.s, z0.s[3]\n"
+ "ld1w { z7.s }, p5/Z, [x10, #1, MUL VL]\n"
"fmla z12.s, z6.s, z1.s[3]\n"
"fmla z16.s, z6.s, z2.s[3]\n"
"fmla z20.s, z6.s, z3.s[3]\n"
@@ -1087,103 +1087,103 @@ void sve_hybrid_fp32_mla_6x4VL (
"fmla z19.s, z7.s, z2.s[3]\n"
"fmla z23.s, z7.s, z3.s[3]\n"
"50:" // Height 4: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 45b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x28, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
"tbz %x[flags], #1, 51f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z1.s }, p5/Z, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rw { z0.s }, p5/Z, [x20]\n"
- "fmin z8.s, p5/M, z8.s, z1.s\n"
- "fmin z9.s, p5/M, z9.s, z1.s\n"
- "fmin z10.s, p5/M, z10.s, z1.s\n"
- "fmin z11.s, p5/M, z11.s, z1.s\n"
- "fmin z12.s, p5/M, z12.s, z1.s\n"
- "fmin z13.s, p5/M, z13.s, z1.s\n"
- "fmin z14.s, p5/M, z14.s, z1.s\n"
- "fmin z15.s, p5/M, z15.s, z1.s\n"
- "fmin z16.s, p5/M, z16.s, z1.s\n"
- "fmin z17.s, p5/M, z17.s, z1.s\n"
- "fmin z18.s, p5/M, z18.s, z1.s\n"
- "fmin z19.s, p5/M, z19.s, z1.s\n"
- "fmin z20.s, p5/M, z20.s, z1.s\n"
- "fmin z21.s, p5/M, z21.s, z1.s\n"
- "fmin z22.s, p5/M, z22.s, z1.s\n"
- "fmin z23.s, p5/M, z23.s, z1.s\n"
- "fmax z8.s, p5/M, z8.s, z0.s\n"
- "fmax z9.s, p5/M, z9.s, z0.s\n"
- "fmax z10.s, p5/M, z10.s, z0.s\n"
- "fmax z11.s, p5/M, z11.s, z0.s\n"
- "fmax z12.s, p5/M, z12.s, z0.s\n"
- "fmax z13.s, p5/M, z13.s, z0.s\n"
- "fmax z14.s, p5/M, z14.s, z0.s\n"
- "fmax z15.s, p5/M, z15.s, z0.s\n"
- "fmax z16.s, p5/M, z16.s, z0.s\n"
- "fmax z17.s, p5/M, z17.s, z0.s\n"
- "fmax z18.s, p5/M, z18.s, z0.s\n"
- "fmax z19.s, p5/M, z19.s, z0.s\n"
- "fmax z20.s, p5/M, z20.s, z0.s\n"
- "fmax z21.s, p5/M, z21.s, z0.s\n"
- "fmax z22.s, p5/M, z22.s, z0.s\n"
- "fmax z23.s, p5/M, z23.s, z0.s\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z1.s }, p5/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rw { z0.s }, p5/Z, [x19]\n"
+ "fmin z8.s, p5/M, z8.s, z0.s\n"
+ "fmin z9.s, p5/M, z9.s, z0.s\n"
+ "fmin z10.s, p5/M, z10.s, z0.s\n"
+ "fmin z11.s, p5/M, z11.s, z0.s\n"
+ "fmin z12.s, p5/M, z12.s, z0.s\n"
+ "fmax z8.s, p5/M, z8.s, z1.s\n"
+ "fmax z9.s, p5/M, z9.s, z1.s\n"
+ "fmax z10.s, p5/M, z10.s, z1.s\n"
+ "fmax z11.s, p5/M, z11.s, z1.s\n"
+ "fmax z12.s, p5/M, z12.s, z1.s\n"
+ "fmin z13.s, p5/M, z13.s, z0.s\n"
+ "fmin z14.s, p5/M, z14.s, z0.s\n"
+ "fmin z15.s, p5/M, z15.s, z0.s\n"
+ "fmin z16.s, p5/M, z16.s, z0.s\n"
+ "fmax z13.s, p5/M, z13.s, z1.s\n"
+ "fmax z14.s, p5/M, z14.s, z1.s\n"
+ "fmax z15.s, p5/M, z15.s, z1.s\n"
+ "fmax z16.s, p5/M, z16.s, z1.s\n"
+ "fmin z17.s, p5/M, z17.s, z0.s\n"
+ "fmin z18.s, p5/M, z18.s, z0.s\n"
+ "fmin z19.s, p5/M, z19.s, z0.s\n"
+ "fmin z20.s, p5/M, z20.s, z0.s\n"
+ "fmax z17.s, p5/M, z17.s, z1.s\n"
+ "fmax z18.s, p5/M, z18.s, z1.s\n"
+ "fmax z19.s, p5/M, z19.s, z1.s\n"
+ "fmax z20.s, p5/M, z20.s, z1.s\n"
+ "fmin z21.s, p5/M, z21.s, z0.s\n"
+ "fmin z22.s, p5/M, z22.s, z0.s\n"
+ "fmin z23.s, p5/M, z23.s, z0.s\n"
+ "fmax z21.s, p5/M, z21.s, z1.s\n"
+ "fmax z22.s, p5/M, z22.s, z1.s\n"
+ "fmax z23.s, p5/M, z23.s, z1.s\n"
"51:" // Height 4: No activation
- "st1w { z8.s }, p4, [x9]\n"
- "st1w { z9.s }, p3, [x9, #1, MUL VL]\n"
- "st1w { z10.s }, p2, [x9, #2, MUL VL]\n"
- "st1w { z11.s }, p1, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
- "st1w { z12.s }, p4, [x25]\n"
- "st1w { z13.s }, p3, [x25, #1, MUL VL]\n"
- "st1w { z14.s }, p2, [x25, #2, MUL VL]\n"
- "st1w { z15.s }, p1, [x25, #3, MUL VL]\n"
- "st1w { z16.s }, p4, [x24]\n"
- "st1w { z17.s }, p3, [x24, #1, MUL VL]\n"
- "st1w { z18.s }, p2, [x24, #2, MUL VL]\n"
- "st1w { z19.s }, p1, [x24, #3, MUL VL]\n"
- "st1w { z20.s }, p4, [x23]\n"
- "st1w { z21.s }, p3, [x23, #1, MUL VL]\n"
- "st1w { z22.s }, p2, [x23, #2, MUL VL]\n"
- "st1w { z23.s }, p1, [x23, #3, MUL VL]\n"
+ "st1w { z8.s }, p4, [x28]\n"
+ "st1w { z9.s }, p3, [x28, #1, MUL VL]\n"
+ "st1w { z10.s }, p2, [x28, #2, MUL VL]\n"
+ "st1w { z11.s }, p1, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "st1w { z12.s }, p4, [x24]\n"
+ "st1w { z13.s }, p3, [x24, #1, MUL VL]\n"
+ "st1w { z14.s }, p2, [x24, #2, MUL VL]\n"
+ "st1w { z15.s }, p1, [x24, #3, MUL VL]\n"
+ "st1w { z16.s }, p4, [x23]\n"
+ "st1w { z17.s }, p3, [x23, #1, MUL VL]\n"
+ "st1w { z18.s }, p2, [x23, #2, MUL VL]\n"
+ "st1w { z19.s }, p1, [x23, #3, MUL VL]\n"
+ "st1w { z20.s }, p4, [x22]\n"
+ "st1w { z21.s }, p3, [x22, #1, MUL VL]\n"
+ "st1w { z22.s }, p2, [x22, #2, MUL VL]\n"
+ "st1w { z23.s }, p1, [x22, #3, MUL VL]\n"
"52:" // Height 4: Writeback done
"decw x11, ALL, MUL #4\n"
"cmp x11, XZR\n"
"bgt 41b\n"
"b 80f\n"
"53:" // Height 5
- "mov x12, %x[bias]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x9, %x[bias]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "mov x28, %x[output_ptr]\n"
"54:" // Height 5: Column loop
- "mov x20, #0x0\n"
- "whilelt p4.s, x20, x11\n"
- "incw x20\n"
- "whilelt p3.s, x20, x11\n"
- "incw x20\n"
- "whilelt p2.s, x20, x11\n"
- "incw x20\n"
- "whilelt p1.s, x20, x11\n"
- "cbz x12, 55f\n"
- "ld1w { z8.s }, p5/Z, [x12]\n"
- "ld1w { z9.s }, p5/Z, [x12, #1, MUL VL]\n"
+ "mov x19, #0x0\n"
+ "whilelt p4.s, x19, x11\n"
+ "incw x19\n"
+ "whilelt p3.s, x19, x11\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x11\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x11\n"
+ "cbz x9, 55f\n"
+ "ld1w { z8.s }, p5/Z, [x9]\n"
"mov z12.d, z8.d\n"
+ "ld1w { z9.s }, p5/Z, [x9, #1, MUL VL]\n"
+ "mov z16.d, z8.d\n"
+ "ld1w { z10.s }, p5/Z, [x9, #2, MUL VL]\n"
+ "mov z20.d, z8.d\n"
+ "ld1w { z11.s }, p5/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"mov z13.d, z9.d\n"
- "ld1w { z10.s }, p5/Z, [x12, #2, MUL VL]\n"
- "ld1w { z11.s }, p5/Z, [x12, #3, MUL VL]\n"
+ "mov z17.d, z9.d\n"
"mov z14.d, z10.d\n"
"mov z15.d, z11.d\n"
- "mov z16.d, z8.d\n"
- "mov z17.d, z9.d\n"
- "addvl x12, x12, #4\n"
"mov z18.d, z10.d\n"
"mov z19.d, z11.d\n"
- "mov z20.d, z8.d\n"
"mov z21.d, z9.d\n"
"mov z22.d, z10.d\n"
"mov z23.d, z11.d\n"
@@ -1194,31 +1194,31 @@ void sve_hybrid_fp32_mla_6x4VL (
"b 57f\n"
"55:" // Height 5: no bias
"tbz %x[flags], #0, 56f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "ld1w { z8.s }, p4/Z, [x9]\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
- "ld1w { z9.s }, p3/Z, [x9, #1, MUL VL]\n"
- "ld1w { z10.s }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1w { z11.s }, p1/Z, [x9, #3, MUL VL]\n"
- "ld1w { z12.s }, p4/Z, [x25]\n"
- "ld1w { z13.s }, p3/Z, [x25, #1, MUL VL]\n"
- "ld1w { z14.s }, p2/Z, [x25, #2, MUL VL]\n"
- "ld1w { z15.s }, p1/Z, [x25, #3, MUL VL]\n"
- "ld1w { z16.s }, p4/Z, [x24]\n"
- "ld1w { z17.s }, p3/Z, [x24, #1, MUL VL]\n"
- "ld1w { z18.s }, p2/Z, [x24, #2, MUL VL]\n"
- "ld1w { z19.s }, p1/Z, [x24, #3, MUL VL]\n"
- "ld1w { z20.s }, p4/Z, [x23]\n"
- "ld1w { z21.s }, p3/Z, [x23, #1, MUL VL]\n"
- "ld1w { z22.s }, p2/Z, [x23, #2, MUL VL]\n"
- "ld1w { z23.s }, p1/Z, [x23, #3, MUL VL]\n"
- "ld1w { z24.s }, p4/Z, [x22]\n"
- "ld1w { z25.s }, p3/Z, [x22, #1, MUL VL]\n"
- "ld1w { z26.s }, p2/Z, [x22, #2, MUL VL]\n"
- "ld1w { z27.s }, p1/Z, [x22, #3, MUL VL]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ld1w { z8.s }, p4/Z, [x28]\n"
+ "add x24, x28, x19, LSL #2\n"
+ "ld1w { z9.s }, p3/Z, [x28, #1, MUL VL]\n"
+ "ld1w { z10.s }, p2/Z, [x28, #2, MUL VL]\n"
+ "add x23, x24, x19, LSL #2\n"
+ "ld1w { z11.s }, p1/Z, [x28, #3, MUL VL]\n"
+ "add x22, x23, x19, LSL #2\n"
+ "ld1w { z12.s }, p4/Z, [x24]\n"
+ "add x21, x22, x19, LSL #2\n"
+ "ld1w { z13.s }, p3/Z, [x24, #1, MUL VL]\n"
+ "ld1w { z14.s }, p2/Z, [x24, #2, MUL VL]\n"
+ "ld1w { z15.s }, p1/Z, [x24, #3, MUL VL]\n"
+ "ld1w { z16.s }, p4/Z, [x23]\n"
+ "ld1w { z17.s }, p3/Z, [x23, #1, MUL VL]\n"
+ "ld1w { z18.s }, p2/Z, [x23, #2, MUL VL]\n"
+ "ld1w { z19.s }, p1/Z, [x23, #3, MUL VL]\n"
+ "ld1w { z20.s }, p4/Z, [x22]\n"
+ "ld1w { z21.s }, p3/Z, [x22, #1, MUL VL]\n"
+ "ld1w { z22.s }, p2/Z, [x22, #2, MUL VL]\n"
+ "ld1w { z23.s }, p1/Z, [x22, #3, MUL VL]\n"
+ "ld1w { z24.s }, p4/Z, [x21]\n"
+ "ld1w { z25.s }, p3/Z, [x21, #1, MUL VL]\n"
+ "ld1w { z26.s }, p2/Z, [x21, #2, MUL VL]\n"
+ "ld1w { z27.s }, p1/Z, [x21, #3, MUL VL]\n"
"b 57f\n"
"56:" // Height 5: no accumulate
"mov z8.b, #0x0\n"
@@ -1242,61 +1242,61 @@ void sve_hybrid_fp32_mla_6x4VL (
"mov z26.b, #0x0\n"
"mov z27.b, #0x0\n"
"57:" // Height 5: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"58:" // Height 5: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 59f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "ldr x22, [x21, #0x20]\n"
- "cbnz x28, 60f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #2\n"
- "add x25, x25, x20, LSL #2\n"
- "add x24, x24, x20, LSL #2\n"
- "add x23, x23, x20, LSL #2\n"
- "add x22, x22, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "ldr x21, [x20, #0x20]\n"
+ "cbnz x27, 60f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #2\n"
+ "add x24, x24, x19, LSL #2\n"
+ "add x23, x23, x19, LSL #2\n"
+ "add x22, x22, x19, LSL #2\n"
+ "add x21, x21, x19, LSL #2\n"
"b 60f\n"
"59:" // Height 5: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
"60:" // Height 5: input setup done
- "cmp x27, #0x4\n"
+ "cmp x26, #0x4\n"
"ble 62f\n"
"61:" // Height 5: Multiply loop: Main loop head
- "whilelt p0.s, XZR, x27\n"
- "ld1rqw { z0.s }, p0/Z, [x26]\n"
- "ld1rqw { z1.s }, p0/Z, [x25]\n"
- "sub x27, x27, #0x4\n"
- "ld1rqw { z2.s }, p0/Z, [x24]\n"
- "ld1rqw { z3.s }, p0/Z, [x23]\n"
- "cmp x27, #0x4\n"
- "add x26, x26, #0x10\n"
- "ld1rqw { z4.s }, p0/Z, [x22]\n"
"ld1w { z6.s }, p5/Z, [x10]\n"
+ "whilelt p0.s, XZR, x26\n"
+ "ld1w { z7.s }, p5/Z, [x10, #1, MUL VL]\n"
+ "sub x26, x26, #0x4\n"
+ "ld1rqw { z0.s }, p0/Z, [x25]\n"
"fmla z8.s, z6.s, z0.s[0]\n"
+ "ld1rqw { z1.s }, p0/Z, [x24]\n"
+ "cmp x26, #0x4\n"
+ "fmla z9.s, z7.s, z0.s[0]\n"
+ "ld1rqw { z2.s }, p0/Z, [x23]\n"
+ "add x25, x25, #0x10\n"
"fmla z12.s, z6.s, z1.s[0]\n"
- "ld1w { z7.s }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1rqw { z3.s }, p0/Z, [x22]\n"
+ "add x24, x24, #0x10\n"
"fmla z16.s, z6.s, z2.s[0]\n"
+ "ld1rqw { z4.s }, p0/Z, [x21]\n"
+ "add x23, x23, #0x10\n"
+ "fmla z13.s, z7.s, z1.s[0]\n"
+ "add x22, x22, #0x10\n"
+ "fmla z17.s, z7.s, z2.s[0]\n"
+ "add x21, x21, #0x10\n"
"fmla z20.s, z6.s, z3.s[0]\n"
- "add x25, x25, #0x10\n"
"fmla z24.s, z6.s, z4.s[0]\n"
- "fmla z9.s, z7.s, z0.s[0]\n"
"ld1w { z6.s }, p5/Z, [x10, #2, MUL VL]\n"
- "add x24, x24, #0x10\n"
- "fmla z13.s, z7.s, z1.s[0]\n"
- "fmla z17.s, z7.s, z2.s[0]\n"
- "add x23, x23, #0x10\n"
- "add x22, x22, #0x10\n"
"fmla z21.s, z7.s, z3.s[0]\n"
"fmla z25.s, z7.s, z4.s[0]\n"
"ld1w { z7.s }, p5/Z, [x10, #3, MUL VL]\n"
@@ -1305,8 +1305,8 @@ void sve_hybrid_fp32_mla_6x4VL (
"fmla z18.s, z6.s, z2.s[0]\n"
"fmla z22.s, z6.s, z3.s[0]\n"
"fmla z26.s, z6.s, z4.s[0]\n"
- "fmla z11.s, z7.s, z0.s[0]\n"
"ld1w { z6.s }, p5/Z, [x10, #4, MUL VL]\n"
+ "fmla z11.s, z7.s, z0.s[0]\n"
"fmla z15.s, z7.s, z1.s[0]\n"
"fmla z19.s, z7.s, z2.s[0]\n"
"fmla z23.s, z7.s, z3.s[0]\n"
@@ -1317,8 +1317,8 @@ void sve_hybrid_fp32_mla_6x4VL (
"fmla z16.s, z6.s, z2.s[1]\n"
"fmla z20.s, z6.s, z3.s[1]\n"
"fmla z24.s, z6.s, z4.s[1]\n"
- "fmla z9.s, z7.s, z0.s[1]\n"
"ld1w { z6.s }, p5/Z, [x10, #6, MUL VL]\n"
+ "fmla z9.s, z7.s, z0.s[1]\n"
"fmla z13.s, z7.s, z1.s[1]\n"
"fmla z17.s, z7.s, z2.s[1]\n"
"fmla z21.s, z7.s, z3.s[1]\n"
@@ -1330,8 +1330,8 @@ void sve_hybrid_fp32_mla_6x4VL (
"fmla z18.s, z6.s, z2.s[1]\n"
"fmla z22.s, z6.s, z3.s[1]\n"
"fmla z26.s, z6.s, z4.s[1]\n"
- "fmla z11.s, z7.s, z0.s[1]\n"
"ld1w { z6.s }, p5/Z, [x10, #-8, MUL VL]\n"
+ "fmla z11.s, z7.s, z0.s[1]\n"
"fmla z15.s, z7.s, z1.s[1]\n"
"fmla z19.s, z7.s, z2.s[1]\n"
"fmla z23.s, z7.s, z3.s[1]\n"
@@ -1342,8 +1342,8 @@ void sve_hybrid_fp32_mla_6x4VL (
"fmla z16.s, z6.s, z2.s[2]\n"
"fmla z20.s, z6.s, z3.s[2]\n"
"fmla z24.s, z6.s, z4.s[2]\n"
- "fmla z9.s, z7.s, z0.s[2]\n"
"ld1w { z6.s }, p5/Z, [x10, #-6, MUL VL]\n"
+ "fmla z9.s, z7.s, z0.s[2]\n"
"fmla z13.s, z7.s, z1.s[2]\n"
"fmla z17.s, z7.s, z2.s[2]\n"
"fmla z21.s, z7.s, z3.s[2]\n"
@@ -1354,8 +1354,8 @@ void sve_hybrid_fp32_mla_6x4VL (
"fmla z18.s, z6.s, z2.s[2]\n"
"fmla z22.s, z6.s, z3.s[2]\n"
"fmla z26.s, z6.s, z4.s[2]\n"
- "fmla z11.s, z7.s, z0.s[2]\n"
"ld1w { z6.s }, p5/Z, [x10, #-4, MUL VL]\n"
+ "fmla z11.s, z7.s, z0.s[2]\n"
"fmla z15.s, z7.s, z1.s[2]\n"
"fmla z19.s, z7.s, z2.s[2]\n"
"fmla z23.s, z7.s, z3.s[2]\n"
@@ -1366,8 +1366,8 @@ void sve_hybrid_fp32_mla_6x4VL (
"fmla z16.s, z6.s, z2.s[3]\n"
"fmla z20.s, z6.s, z3.s[3]\n"
"fmla z24.s, z6.s, z4.s[3]\n"
- "fmla z9.s, z7.s, z0.s[3]\n"
"ld1w { z6.s }, p5/Z, [x10, #-2, MUL VL]\n"
+ "fmla z9.s, z7.s, z0.s[3]\n"
"fmla z13.s, z7.s, z1.s[3]\n"
"fmla z17.s, z7.s, z2.s[3]\n"
"fmla z21.s, z7.s, z3.s[3]\n"
@@ -1385,23 +1385,23 @@ void sve_hybrid_fp32_mla_6x4VL (
"fmla z27.s, z7.s, z4.s[3]\n"
"bgt 61b\n"
"62:" // Height 5: Multiply loop: Single iteration only
- "whilelt p0.s, XZR, x27\n"
- "ld1rqw { z0.s }, p0/Z, [x26]\n"
- "ld1rqw { z1.s }, p0/Z, [x25]\n"
- "subs x27, x27, #0x1\n"
- "ld1rqw { z2.s }, p0/Z, [x24]\n"
- "ld1rqw { z3.s }, p0/Z, [x23]\n"
- "ld1rqw { z4.s }, p0/Z, [x22]\n"
"ld1w { z6.s }, p5/Z, [x10]\n"
+ "whilelt p0.s, XZR, x26\n"
+ "ld1w { z7.s }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x1\n"
+ "ld1rqw { z0.s }, p0/Z, [x25]\n"
"fmla z8.s, z6.s, z0.s[0]\n"
+ "ld1rqw { z1.s }, p0/Z, [x24]\n"
+ "fmla z9.s, z7.s, z0.s[0]\n"
+ "ld1rqw { z2.s }, p0/Z, [x23]\n"
+ "ld1rqw { z3.s }, p0/Z, [x22]\n"
"fmla z12.s, z6.s, z1.s[0]\n"
- "ld1w { z7.s }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1rqw { z4.s }, p0/Z, [x21]\n"
+ "fmla z13.s, z7.s, z1.s[0]\n"
"fmla z16.s, z6.s, z2.s[0]\n"
"fmla z20.s, z6.s, z3.s[0]\n"
"fmla z24.s, z6.s, z4.s[0]\n"
- "fmla z9.s, z7.s, z0.s[0]\n"
"ld1w { z6.s }, p5/Z, [x10, #2, MUL VL]\n"
- "fmla z13.s, z7.s, z1.s[0]\n"
"fmla z17.s, z7.s, z2.s[0]\n"
"fmla z21.s, z7.s, z3.s[0]\n"
"fmla z25.s, z7.s, z4.s[0]\n"
@@ -1419,15 +1419,15 @@ void sve_hybrid_fp32_mla_6x4VL (
"fmla z27.s, z7.s, z4.s[0]\n"
"ble 63f\n"
"ld1w { z6.s }, p5/Z, [x10]\n"
- "ld1w { z7.s }, p5/Z, [x10, #1, MUL VL]\n"
"fmla z8.s, z6.s, z0.s[1]\n"
+ "ld1w { z7.s }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x1\n"
"fmla z12.s, z6.s, z1.s[1]\n"
"fmla z16.s, z6.s, z2.s[1]\n"
"fmla z20.s, z6.s, z3.s[1]\n"
- "subs x27, x27, #0x1\n"
"fmla z24.s, z6.s, z4.s[1]\n"
- "fmla z9.s, z7.s, z0.s[1]\n"
"ld1w { z6.s }, p5/Z, [x10, #2, MUL VL]\n"
+ "fmla z9.s, z7.s, z0.s[1]\n"
"fmla z13.s, z7.s, z1.s[1]\n"
"fmla z17.s, z7.s, z2.s[1]\n"
"fmla z21.s, z7.s, z3.s[1]\n"
@@ -1446,15 +1446,15 @@ void sve_hybrid_fp32_mla_6x4VL (
"fmla z27.s, z7.s, z4.s[1]\n"
"ble 63f\n"
"ld1w { z6.s }, p5/Z, [x10]\n"
- "ld1w { z7.s }, p5/Z, [x10, #1, MUL VL]\n"
"fmla z8.s, z6.s, z0.s[2]\n"
+ "ld1w { z7.s }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x1\n"
"fmla z12.s, z6.s, z1.s[2]\n"
"fmla z16.s, z6.s, z2.s[2]\n"
"fmla z20.s, z6.s, z3.s[2]\n"
- "subs x27, x27, #0x1\n"
"fmla z24.s, z6.s, z4.s[2]\n"
- "fmla z9.s, z7.s, z0.s[2]\n"
"ld1w { z6.s }, p5/Z, [x10, #2, MUL VL]\n"
+ "fmla z9.s, z7.s, z0.s[2]\n"
"fmla z13.s, z7.s, z1.s[2]\n"
"fmla z17.s, z7.s, z2.s[2]\n"
"fmla z21.s, z7.s, z3.s[2]\n"
@@ -1473,14 +1473,14 @@ void sve_hybrid_fp32_mla_6x4VL (
"fmla z27.s, z7.s, z4.s[2]\n"
"ble 63f\n"
"ld1w { z6.s }, p5/Z, [x10]\n"
- "ld1w { z7.s }, p5/Z, [x10, #1, MUL VL]\n"
"fmla z8.s, z6.s, z0.s[3]\n"
+ "ld1w { z7.s }, p5/Z, [x10, #1, MUL VL]\n"
"fmla z12.s, z6.s, z1.s[3]\n"
"fmla z16.s, z6.s, z2.s[3]\n"
"fmla z20.s, z6.s, z3.s[3]\n"
"fmla z24.s, z6.s, z4.s[3]\n"
- "fmla z9.s, z7.s, z0.s[3]\n"
"ld1w { z6.s }, p5/Z, [x10, #2, MUL VL]\n"
+ "fmla z9.s, z7.s, z0.s[3]\n"
"fmla z13.s, z7.s, z1.s[3]\n"
"fmla z17.s, z7.s, z2.s[3]\n"
"fmla z21.s, z7.s, z3.s[3]\n"
@@ -1498,119 +1498,119 @@ void sve_hybrid_fp32_mla_6x4VL (
"fmla z23.s, z7.s, z3.s[3]\n"
"fmla z27.s, z7.s, z4.s[3]\n"
"63:" // Height 5: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 58b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x28, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
"tbz %x[flags], #1, 64f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z1.s }, p5/Z, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rw { z0.s }, p5/Z, [x20]\n"
- "fmin z8.s, p5/M, z8.s, z1.s\n"
- "fmin z9.s, p5/M, z9.s, z1.s\n"
- "fmin z10.s, p5/M, z10.s, z1.s\n"
- "fmin z11.s, p5/M, z11.s, z1.s\n"
- "fmin z12.s, p5/M, z12.s, z1.s\n"
- "fmin z13.s, p5/M, z13.s, z1.s\n"
- "fmin z14.s, p5/M, z14.s, z1.s\n"
- "fmin z15.s, p5/M, z15.s, z1.s\n"
- "fmin z16.s, p5/M, z16.s, z1.s\n"
- "fmin z17.s, p5/M, z17.s, z1.s\n"
- "fmin z18.s, p5/M, z18.s, z1.s\n"
- "fmin z19.s, p5/M, z19.s, z1.s\n"
- "fmin z20.s, p5/M, z20.s, z1.s\n"
- "fmin z21.s, p5/M, z21.s, z1.s\n"
- "fmin z22.s, p5/M, z22.s, z1.s\n"
- "fmin z23.s, p5/M, z23.s, z1.s\n"
- "fmin z24.s, p5/M, z24.s, z1.s\n"
- "fmin z25.s, p5/M, z25.s, z1.s\n"
- "fmin z26.s, p5/M, z26.s, z1.s\n"
- "fmin z27.s, p5/M, z27.s, z1.s\n"
- "fmax z8.s, p5/M, z8.s, z0.s\n"
- "fmax z9.s, p5/M, z9.s, z0.s\n"
- "fmax z10.s, p5/M, z10.s, z0.s\n"
- "fmax z11.s, p5/M, z11.s, z0.s\n"
- "fmax z12.s, p5/M, z12.s, z0.s\n"
- "fmax z13.s, p5/M, z13.s, z0.s\n"
- "fmax z14.s, p5/M, z14.s, z0.s\n"
- "fmax z15.s, p5/M, z15.s, z0.s\n"
- "fmax z16.s, p5/M, z16.s, z0.s\n"
- "fmax z17.s, p5/M, z17.s, z0.s\n"
- "fmax z18.s, p5/M, z18.s, z0.s\n"
- "fmax z19.s, p5/M, z19.s, z0.s\n"
- "fmax z20.s, p5/M, z20.s, z0.s\n"
- "fmax z21.s, p5/M, z21.s, z0.s\n"
- "fmax z22.s, p5/M, z22.s, z0.s\n"
- "fmax z23.s, p5/M, z23.s, z0.s\n"
- "fmax z24.s, p5/M, z24.s, z0.s\n"
- "fmax z25.s, p5/M, z25.s, z0.s\n"
- "fmax z26.s, p5/M, z26.s, z0.s\n"
- "fmax z27.s, p5/M, z27.s, z0.s\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z1.s }, p5/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rw { z0.s }, p5/Z, [x19]\n"
+ "fmin z8.s, p5/M, z8.s, z0.s\n"
+ "fmin z9.s, p5/M, z9.s, z0.s\n"
+ "fmin z10.s, p5/M, z10.s, z0.s\n"
+ "fmin z11.s, p5/M, z11.s, z0.s\n"
+ "fmin z12.s, p5/M, z12.s, z0.s\n"
+ "fmax z8.s, p5/M, z8.s, z1.s\n"
+ "fmax z9.s, p5/M, z9.s, z1.s\n"
+ "fmax z10.s, p5/M, z10.s, z1.s\n"
+ "fmax z11.s, p5/M, z11.s, z1.s\n"
+ "fmax z12.s, p5/M, z12.s, z1.s\n"
+ "fmin z13.s, p5/M, z13.s, z0.s\n"
+ "fmin z14.s, p5/M, z14.s, z0.s\n"
+ "fmin z15.s, p5/M, z15.s, z0.s\n"
+ "fmin z16.s, p5/M, z16.s, z0.s\n"
+ "fmax z13.s, p5/M, z13.s, z1.s\n"
+ "fmax z14.s, p5/M, z14.s, z1.s\n"
+ "fmax z15.s, p5/M, z15.s, z1.s\n"
+ "fmax z16.s, p5/M, z16.s, z1.s\n"
+ "fmin z17.s, p5/M, z17.s, z0.s\n"
+ "fmin z18.s, p5/M, z18.s, z0.s\n"
+ "fmin z19.s, p5/M, z19.s, z0.s\n"
+ "fmin z20.s, p5/M, z20.s, z0.s\n"
+ "fmax z17.s, p5/M, z17.s, z1.s\n"
+ "fmax z18.s, p5/M, z18.s, z1.s\n"
+ "fmax z19.s, p5/M, z19.s, z1.s\n"
+ "fmax z20.s, p5/M, z20.s, z1.s\n"
+ "fmin z21.s, p5/M, z21.s, z0.s\n"
+ "fmin z22.s, p5/M, z22.s, z0.s\n"
+ "fmin z23.s, p5/M, z23.s, z0.s\n"
+ "fmin z24.s, p5/M, z24.s, z0.s\n"
+ "fmax z21.s, p5/M, z21.s, z1.s\n"
+ "fmax z22.s, p5/M, z22.s, z1.s\n"
+ "fmax z23.s, p5/M, z23.s, z1.s\n"
+ "fmax z24.s, p5/M, z24.s, z1.s\n"
+ "fmin z25.s, p5/M, z25.s, z0.s\n"
+ "fmin z26.s, p5/M, z26.s, z0.s\n"
+ "fmin z27.s, p5/M, z27.s, z0.s\n"
+ "fmax z25.s, p5/M, z25.s, z1.s\n"
+ "fmax z26.s, p5/M, z26.s, z1.s\n"
+ "fmax z27.s, p5/M, z27.s, z1.s\n"
"64:" // Height 5: No activation
- "st1w { z8.s }, p4, [x9]\n"
- "st1w { z9.s }, p3, [x9, #1, MUL VL]\n"
- "st1w { z10.s }, p2, [x9, #2, MUL VL]\n"
- "st1w { z11.s }, p1, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
- "st1w { z12.s }, p4, [x25]\n"
- "st1w { z13.s }, p3, [x25, #1, MUL VL]\n"
- "st1w { z14.s }, p2, [x25, #2, MUL VL]\n"
- "st1w { z15.s }, p1, [x25, #3, MUL VL]\n"
- "st1w { z16.s }, p4, [x24]\n"
- "st1w { z17.s }, p3, [x24, #1, MUL VL]\n"
- "st1w { z18.s }, p2, [x24, #2, MUL VL]\n"
- "st1w { z19.s }, p1, [x24, #3, MUL VL]\n"
- "st1w { z20.s }, p4, [x23]\n"
- "st1w { z21.s }, p3, [x23, #1, MUL VL]\n"
- "st1w { z22.s }, p2, [x23, #2, MUL VL]\n"
- "st1w { z23.s }, p1, [x23, #3, MUL VL]\n"
- "st1w { z24.s }, p4, [x22]\n"
- "st1w { z25.s }, p3, [x22, #1, MUL VL]\n"
- "st1w { z26.s }, p2, [x22, #2, MUL VL]\n"
- "st1w { z27.s }, p1, [x22, #3, MUL VL]\n"
+ "st1w { z8.s }, p4, [x28]\n"
+ "st1w { z9.s }, p3, [x28, #1, MUL VL]\n"
+ "st1w { z10.s }, p2, [x28, #2, MUL VL]\n"
+ "st1w { z11.s }, p1, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "st1w { z12.s }, p4, [x24]\n"
+ "st1w { z13.s }, p3, [x24, #1, MUL VL]\n"
+ "st1w { z14.s }, p2, [x24, #2, MUL VL]\n"
+ "st1w { z15.s }, p1, [x24, #3, MUL VL]\n"
+ "st1w { z16.s }, p4, [x23]\n"
+ "st1w { z17.s }, p3, [x23, #1, MUL VL]\n"
+ "st1w { z18.s }, p2, [x23, #2, MUL VL]\n"
+ "st1w { z19.s }, p1, [x23, #3, MUL VL]\n"
+ "st1w { z20.s }, p4, [x22]\n"
+ "st1w { z21.s }, p3, [x22, #1, MUL VL]\n"
+ "st1w { z22.s }, p2, [x22, #2, MUL VL]\n"
+ "st1w { z23.s }, p1, [x22, #3, MUL VL]\n"
+ "st1w { z24.s }, p4, [x21]\n"
+ "st1w { z25.s }, p3, [x21, #1, MUL VL]\n"
+ "st1w { z26.s }, p2, [x21, #2, MUL VL]\n"
+ "st1w { z27.s }, p1, [x21, #3, MUL VL]\n"
"65:" // Height 5: Writeback done
"decw x11, ALL, MUL #4\n"
"cmp x11, XZR\n"
"bgt 54b\n"
"b 80f\n"
"66:" // Height 6
- "ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "mov x20, #0x18\n"
- "mov x12, %x[bias]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x9, %x[bias]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
+ "mov x28, %x[output_ptr]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "mov x19, #0x18\n"
+ "madd %x[output_ptr], x20, x19, %x[output_ptr]\n"
"67:" // Height 6: Column loop
- "mov x20, #0x0\n"
- "whilelt p4.s, x20, x11\n"
- "incw x20\n"
- "whilelt p3.s, x20, x11\n"
- "incw x20\n"
- "whilelt p2.s, x20, x11\n"
- "incw x20\n"
- "whilelt p1.s, x20, x11\n"
- "cbz x12, 68f\n"
- "ld1w { z8.s }, p5/Z, [x12]\n"
- "ld1w { z9.s }, p5/Z, [x12, #1, MUL VL]\n"
+ "mov x19, #0x0\n"
+ "whilelt p4.s, x19, x11\n"
+ "incw x19\n"
+ "whilelt p3.s, x19, x11\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x11\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x11\n"
+ "cbz x9, 68f\n"
+ "ld1w { z8.s }, p5/Z, [x9]\n"
"mov z12.d, z8.d\n"
+ "ld1w { z9.s }, p5/Z, [x9, #1, MUL VL]\n"
+ "mov z16.d, z8.d\n"
+ "ld1w { z10.s }, p5/Z, [x9, #2, MUL VL]\n"
+ "mov z20.d, z8.d\n"
+ "ld1w { z11.s }, p5/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"mov z13.d, z9.d\n"
- "ld1w { z10.s }, p5/Z, [x12, #2, MUL VL]\n"
- "ld1w { z11.s }, p5/Z, [x12, #3, MUL VL]\n"
+ "mov z17.d, z9.d\n"
"mov z14.d, z10.d\n"
"mov z15.d, z11.d\n"
- "mov z16.d, z8.d\n"
- "mov z17.d, z9.d\n"
- "addvl x12, x12, #4\n"
"mov z18.d, z10.d\n"
"mov z19.d, z11.d\n"
- "mov z20.d, z8.d\n"
"mov z21.d, z9.d\n"
"mov z22.d, z10.d\n"
"mov z23.d, z11.d\n"
@@ -1625,36 +1625,36 @@ void sve_hybrid_fp32_mla_6x4VL (
"b 70f\n"
"68:" // Height 6: no bias
"tbz %x[flags], #0, 69f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "ld1w { z8.s }, p4/Z, [x9]\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
- "ld1w { z9.s }, p3/Z, [x9, #1, MUL VL]\n"
- "ld1w { z10.s }, p2/Z, [x9, #2, MUL VL]\n"
- "add x21, x22, x20, LSL #2\n"
- "ld1w { z11.s }, p1/Z, [x9, #3, MUL VL]\n"
- "ld1w { z12.s }, p4/Z, [x25]\n"
- "ld1w { z13.s }, p3/Z, [x25, #1, MUL VL]\n"
- "ld1w { z14.s }, p2/Z, [x25, #2, MUL VL]\n"
- "ld1w { z15.s }, p1/Z, [x25, #3, MUL VL]\n"
- "ld1w { z16.s }, p4/Z, [x24]\n"
- "ld1w { z17.s }, p3/Z, [x24, #1, MUL VL]\n"
- "ld1w { z18.s }, p2/Z, [x24, #2, MUL VL]\n"
- "ld1w { z19.s }, p1/Z, [x24, #3, MUL VL]\n"
- "ld1w { z20.s }, p4/Z, [x23]\n"
- "ld1w { z21.s }, p3/Z, [x23, #1, MUL VL]\n"
- "ld1w { z22.s }, p2/Z, [x23, #2, MUL VL]\n"
- "ld1w { z23.s }, p1/Z, [x23, #3, MUL VL]\n"
- "ld1w { z24.s }, p4/Z, [x22]\n"
- "ld1w { z25.s }, p3/Z, [x22, #1, MUL VL]\n"
- "ld1w { z26.s }, p2/Z, [x22, #2, MUL VL]\n"
- "ld1w { z27.s }, p1/Z, [x22, #3, MUL VL]\n"
- "ld1w { z28.s }, p4/Z, [x21]\n"
- "ld1w { z29.s }, p3/Z, [x21, #1, MUL VL]\n"
- "ld1w { z30.s }, p2/Z, [x21, #2, MUL VL]\n"
- "ld1w { z31.s }, p1/Z, [x21, #3, MUL VL]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ld1w { z8.s }, p4/Z, [x28]\n"
+ "add x24, x28, x19, LSL #2\n"
+ "ld1w { z9.s }, p3/Z, [x28, #1, MUL VL]\n"
+ "ld1w { z10.s }, p2/Z, [x28, #2, MUL VL]\n"
+ "add x23, x24, x19, LSL #2\n"
+ "ld1w { z11.s }, p1/Z, [x28, #3, MUL VL]\n"
+ "add x22, x23, x19, LSL #2\n"
+ "ld1w { z12.s }, p4/Z, [x24]\n"
+ "add x21, x22, x19, LSL #2\n"
+ "ld1w { z13.s }, p3/Z, [x24, #1, MUL VL]\n"
+ "add x20, x21, x19, LSL #2\n"
+ "ld1w { z14.s }, p2/Z, [x24, #2, MUL VL]\n"
+ "ld1w { z15.s }, p1/Z, [x24, #3, MUL VL]\n"
+ "ld1w { z16.s }, p4/Z, [x23]\n"
+ "ld1w { z17.s }, p3/Z, [x23, #1, MUL VL]\n"
+ "ld1w { z18.s }, p2/Z, [x23, #2, MUL VL]\n"
+ "ld1w { z19.s }, p1/Z, [x23, #3, MUL VL]\n"
+ "ld1w { z20.s }, p4/Z, [x22]\n"
+ "ld1w { z21.s }, p3/Z, [x22, #1, MUL VL]\n"
+ "ld1w { z22.s }, p2/Z, [x22, #2, MUL VL]\n"
+ "ld1w { z23.s }, p1/Z, [x22, #3, MUL VL]\n"
+ "ld1w { z24.s }, p4/Z, [x21]\n"
+ "ld1w { z25.s }, p3/Z, [x21, #1, MUL VL]\n"
+ "ld1w { z26.s }, p2/Z, [x21, #2, MUL VL]\n"
+ "ld1w { z27.s }, p1/Z, [x21, #3, MUL VL]\n"
+ "ld1w { z28.s }, p4/Z, [x20]\n"
+ "ld1w { z29.s }, p3/Z, [x20, #1, MUL VL]\n"
+ "ld1w { z30.s }, p2/Z, [x20, #2, MUL VL]\n"
+ "ld1w { z31.s }, p1/Z, [x20, #3, MUL VL]\n"
"b 70f\n"
"69:" // Height 6: no accumulate
"mov z8.b, #0x0\n"
@@ -1682,67 +1682,67 @@ void sve_hybrid_fp32_mla_6x4VL (
"mov z30.b, #0x0\n"
"mov z31.b, #0x0\n"
"70:" // Height 6: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"71:" // Height 6: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 72f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "ldr x22, [x21, #0x20]\n"
- "ldr x21, [x21, #0x28]\n"
- "cbnz x28, 73f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #2\n"
- "add x25, x25, x20, LSL #2\n"
- "add x24, x24, x20, LSL #2\n"
- "add x23, x23, x20, LSL #2\n"
- "add x22, x22, x20, LSL #2\n"
- "add x21, x21, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "ldr x21, [x20, #0x20]\n"
+ "ldr x20, [x20, #0x28]\n"
+ "cbnz x27, 73f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #2\n"
+ "add x24, x24, x19, LSL #2\n"
+ "add x23, x23, x19, LSL #2\n"
+ "add x22, x22, x19, LSL #2\n"
+ "add x21, x21, x19, LSL #2\n"
+ "add x20, x20, x19, LSL #2\n"
"b 73f\n"
"72:" // Height 6: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
+ "add x20, x21, x19, LSL #2\n"
"73:" // Height 6: input setup done
- "cmp x27, #0x4\n"
+ "cmp x26, #0x4\n"
"ble 75f\n"
"74:" // Height 6: Multiply loop: Main loop head
- "whilelt p0.s, XZR, x27\n"
- "ld1rqw { z0.s }, p0/Z, [x26]\n"
- "ld1rqw { z1.s }, p0/Z, [x25]\n"
- "sub x27, x27, #0x4\n"
- "ld1rqw { z2.s }, p0/Z, [x24]\n"
- "ld1rqw { z3.s }, p0/Z, [x23]\n"
- "cmp x27, #0x4\n"
- "add x26, x26, #0x10\n"
- "ld1rqw { z4.s }, p0/Z, [x22]\n"
- "ld1rqw { z5.s }, p0/Z, [x21]\n"
- "add x25, x25, #0x10\n"
- "add x24, x24, #0x10\n"
"ld1w { z6.s }, p5/Z, [x10]\n"
+ "whilelt p0.s, XZR, x26\n"
"ld1w { z7.s }, p5/Z, [x10, #1, MUL VL]\n"
+ "sub x26, x26, #0x4\n"
+ "ld1rqw { z0.s }, p0/Z, [x25]\n"
"fmla z8.s, z6.s, z0.s[0]\n"
+ "ld1rqw { z1.s }, p0/Z, [x24]\n"
+ "cmp x26, #0x4\n"
+ "fmla z9.s, z7.s, z0.s[0]\n"
+ "ld1rqw { z2.s }, p0/Z, [x23]\n"
+ "add x25, x25, #0x10\n"
"fmla z12.s, z6.s, z1.s[0]\n"
+ "ld1rqw { z3.s }, p0/Z, [x22]\n"
+ "add x24, x24, #0x10\n"
"fmla z16.s, z6.s, z2.s[0]\n"
- "fmla z20.s, z6.s, z3.s[0]\n"
+ "ld1rqw { z4.s }, p0/Z, [x21]\n"
"add x23, x23, #0x10\n"
+ "fmla z13.s, z7.s, z1.s[0]\n"
+ "ld1rqw { z5.s }, p0/Z, [x20]\n"
"add x22, x22, #0x10\n"
+ "fmla z20.s, z6.s, z3.s[0]\n"
+ "add x21, x21, #0x10\n"
+ "fmla z17.s, z7.s, z2.s[0]\n"
+ "add x20, x20, #0x10\n"
"fmla z24.s, z6.s, z4.s[0]\n"
"fmla z28.s, z6.s, z5.s[0]\n"
"ld1w { z6.s }, p5/Z, [x10, #2, MUL VL]\n"
- "add x21, x21, #0x10\n"
- "fmla z9.s, z7.s, z0.s[0]\n"
- "fmla z13.s, z7.s, z1.s[0]\n"
- "fmla z17.s, z7.s, z2.s[0]\n"
"fmla z21.s, z7.s, z3.s[0]\n"
"fmla z25.s, z7.s, z4.s[0]\n"
"fmla z29.s, z7.s, z5.s[0]\n"
@@ -1846,25 +1846,25 @@ void sve_hybrid_fp32_mla_6x4VL (
"fmla z31.s, z7.s, z5.s[3]\n"
"bgt 74b\n"
"75:" // Height 6: Multiply loop: Single iteration only
- "whilelt p0.s, XZR, x27\n"
- "ld1rqw { z0.s }, p0/Z, [x26]\n"
- "ld1rqw { z1.s }, p0/Z, [x25]\n"
- "subs x27, x27, #0x1\n"
- "ld1rqw { z2.s }, p0/Z, [x24]\n"
- "ld1rqw { z3.s }, p0/Z, [x23]\n"
- "ld1rqw { z4.s }, p0/Z, [x22]\n"
- "ld1rqw { z5.s }, p0/Z, [x21]\n"
"ld1w { z6.s }, p5/Z, [x10]\n"
+ "whilelt p0.s, XZR, x26\n"
"ld1w { z7.s }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x1\n"
+ "ld1rqw { z0.s }, p0/Z, [x25]\n"
"fmla z8.s, z6.s, z0.s[0]\n"
+ "ld1rqw { z1.s }, p0/Z, [x24]\n"
+ "fmla z9.s, z7.s, z0.s[0]\n"
+ "ld1rqw { z2.s }, p0/Z, [x23]\n"
+ "ld1rqw { z3.s }, p0/Z, [x22]\n"
"fmla z12.s, z6.s, z1.s[0]\n"
+ "ld1rqw { z4.s }, p0/Z, [x21]\n"
+ "fmla z13.s, z7.s, z1.s[0]\n"
+ "ld1rqw { z5.s }, p0/Z, [x20]\n"
"fmla z16.s, z6.s, z2.s[0]\n"
"fmla z20.s, z6.s, z3.s[0]\n"
"fmla z24.s, z6.s, z4.s[0]\n"
"fmla z28.s, z6.s, z5.s[0]\n"
"ld1w { z6.s }, p5/Z, [x10, #2, MUL VL]\n"
- "fmla z9.s, z7.s, z0.s[0]\n"
- "fmla z13.s, z7.s, z1.s[0]\n"
"fmla z17.s, z7.s, z2.s[0]\n"
"fmla z21.s, z7.s, z3.s[0]\n"
"fmla z25.s, z7.s, z4.s[0]\n"
@@ -1885,12 +1885,12 @@ void sve_hybrid_fp32_mla_6x4VL (
"fmla z31.s, z7.s, z5.s[0]\n"
"ble 76f\n"
"ld1w { z6.s }, p5/Z, [x10]\n"
- "ld1w { z7.s }, p5/Z, [x10, #1, MUL VL]\n"
"fmla z8.s, z6.s, z0.s[1]\n"
+ "ld1w { z7.s }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x1\n"
"fmla z12.s, z6.s, z1.s[1]\n"
"fmla z16.s, z6.s, z2.s[1]\n"
"fmla z20.s, z6.s, z3.s[1]\n"
- "subs x27, x27, #0x1\n"
"fmla z24.s, z6.s, z4.s[1]\n"
"fmla z28.s, z6.s, z5.s[1]\n"
"ld1w { z6.s }, p5/Z, [x10, #2, MUL VL]\n"
@@ -1916,12 +1916,12 @@ void sve_hybrid_fp32_mla_6x4VL (
"fmla z31.s, z7.s, z5.s[1]\n"
"ble 76f\n"
"ld1w { z6.s }, p5/Z, [x10]\n"
- "ld1w { z7.s }, p5/Z, [x10, #1, MUL VL]\n"
"fmla z8.s, z6.s, z0.s[2]\n"
+ "ld1w { z7.s }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x1\n"
"fmla z12.s, z6.s, z1.s[2]\n"
"fmla z16.s, z6.s, z2.s[2]\n"
"fmla z20.s, z6.s, z3.s[2]\n"
- "subs x27, x27, #0x1\n"
"fmla z24.s, z6.s, z4.s[2]\n"
"fmla z28.s, z6.s, z5.s[2]\n"
"ld1w { z6.s }, p5/Z, [x10, #2, MUL VL]\n"
@@ -1947,8 +1947,8 @@ void sve_hybrid_fp32_mla_6x4VL (
"fmla z31.s, z7.s, z5.s[2]\n"
"ble 76f\n"
"ld1w { z6.s }, p5/Z, [x10]\n"
- "ld1w { z7.s }, p5/Z, [x10, #1, MUL VL]\n"
"fmla z8.s, z6.s, z0.s[3]\n"
+ "ld1w { z7.s }, p5/Z, [x10, #1, MUL VL]\n"
"fmla z12.s, z6.s, z1.s[3]\n"
"fmla z16.s, z6.s, z2.s[3]\n"
"fmla z20.s, z6.s, z3.s[3]\n"
@@ -1976,115 +1976,115 @@ void sve_hybrid_fp32_mla_6x4VL (
"fmla z27.s, z7.s, z4.s[3]\n"
"fmla z31.s, z7.s, z5.s[3]\n"
"76:" // Height 6: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 71b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x28, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
+ "add x20, x21, x19, LSL #2\n"
"tbz %x[flags], #1, 77f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z1.s }, p5/Z, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rw { z0.s }, p5/Z, [x20]\n"
- "fmin z8.s, p5/M, z8.s, z1.s\n"
- "fmin z9.s, p5/M, z9.s, z1.s\n"
- "fmin z10.s, p5/M, z10.s, z1.s\n"
- "fmin z11.s, p5/M, z11.s, z1.s\n"
- "fmin z12.s, p5/M, z12.s, z1.s\n"
- "fmin z13.s, p5/M, z13.s, z1.s\n"
- "fmin z14.s, p5/M, z14.s, z1.s\n"
- "fmin z15.s, p5/M, z15.s, z1.s\n"
- "fmin z16.s, p5/M, z16.s, z1.s\n"
- "fmin z17.s, p5/M, z17.s, z1.s\n"
- "fmin z18.s, p5/M, z18.s, z1.s\n"
- "fmin z19.s, p5/M, z19.s, z1.s\n"
- "fmin z20.s, p5/M, z20.s, z1.s\n"
- "fmin z21.s, p5/M, z21.s, z1.s\n"
- "fmin z22.s, p5/M, z22.s, z1.s\n"
- "fmin z23.s, p5/M, z23.s, z1.s\n"
- "fmin z24.s, p5/M, z24.s, z1.s\n"
- "fmin z25.s, p5/M, z25.s, z1.s\n"
- "fmin z26.s, p5/M, z26.s, z1.s\n"
- "fmin z27.s, p5/M, z27.s, z1.s\n"
- "fmin z28.s, p5/M, z28.s, z1.s\n"
- "fmin z29.s, p5/M, z29.s, z1.s\n"
- "fmin z30.s, p5/M, z30.s, z1.s\n"
- "fmin z31.s, p5/M, z31.s, z1.s\n"
- "fmax z8.s, p5/M, z8.s, z0.s\n"
- "fmax z9.s, p5/M, z9.s, z0.s\n"
- "fmax z10.s, p5/M, z10.s, z0.s\n"
- "fmax z11.s, p5/M, z11.s, z0.s\n"
- "fmax z12.s, p5/M, z12.s, z0.s\n"
- "fmax z13.s, p5/M, z13.s, z0.s\n"
- "fmax z14.s, p5/M, z14.s, z0.s\n"
- "fmax z15.s, p5/M, z15.s, z0.s\n"
- "fmax z16.s, p5/M, z16.s, z0.s\n"
- "fmax z17.s, p5/M, z17.s, z0.s\n"
- "fmax z18.s, p5/M, z18.s, z0.s\n"
- "fmax z19.s, p5/M, z19.s, z0.s\n"
- "fmax z20.s, p5/M, z20.s, z0.s\n"
- "fmax z21.s, p5/M, z21.s, z0.s\n"
- "fmax z22.s, p5/M, z22.s, z0.s\n"
- "fmax z23.s, p5/M, z23.s, z0.s\n"
- "fmax z24.s, p5/M, z24.s, z0.s\n"
- "fmax z25.s, p5/M, z25.s, z0.s\n"
- "fmax z26.s, p5/M, z26.s, z0.s\n"
- "fmax z27.s, p5/M, z27.s, z0.s\n"
- "fmax z28.s, p5/M, z28.s, z0.s\n"
- "fmax z29.s, p5/M, z29.s, z0.s\n"
- "fmax z30.s, p5/M, z30.s, z0.s\n"
- "fmax z31.s, p5/M, z31.s, z0.s\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z1.s }, p5/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rw { z0.s }, p5/Z, [x19]\n"
+ "fmin z8.s, p5/M, z8.s, z0.s\n"
+ "fmin z9.s, p5/M, z9.s, z0.s\n"
+ "fmin z10.s, p5/M, z10.s, z0.s\n"
+ "fmin z11.s, p5/M, z11.s, z0.s\n"
+ "fmin z12.s, p5/M, z12.s, z0.s\n"
+ "fmax z8.s, p5/M, z8.s, z1.s\n"
+ "fmax z9.s, p5/M, z9.s, z1.s\n"
+ "fmax z10.s, p5/M, z10.s, z1.s\n"
+ "fmax z11.s, p5/M, z11.s, z1.s\n"
+ "fmax z12.s, p5/M, z12.s, z1.s\n"
+ "fmin z13.s, p5/M, z13.s, z0.s\n"
+ "fmin z14.s, p5/M, z14.s, z0.s\n"
+ "fmin z15.s, p5/M, z15.s, z0.s\n"
+ "fmin z16.s, p5/M, z16.s, z0.s\n"
+ "fmax z13.s, p5/M, z13.s, z1.s\n"
+ "fmax z14.s, p5/M, z14.s, z1.s\n"
+ "fmax z15.s, p5/M, z15.s, z1.s\n"
+ "fmax z16.s, p5/M, z16.s, z1.s\n"
+ "fmin z17.s, p5/M, z17.s, z0.s\n"
+ "fmin z18.s, p5/M, z18.s, z0.s\n"
+ "fmin z19.s, p5/M, z19.s, z0.s\n"
+ "fmin z20.s, p5/M, z20.s, z0.s\n"
+ "fmax z17.s, p5/M, z17.s, z1.s\n"
+ "fmax z18.s, p5/M, z18.s, z1.s\n"
+ "fmax z19.s, p5/M, z19.s, z1.s\n"
+ "fmax z20.s, p5/M, z20.s, z1.s\n"
+ "fmin z21.s, p5/M, z21.s, z0.s\n"
+ "fmin z22.s, p5/M, z22.s, z0.s\n"
+ "fmin z23.s, p5/M, z23.s, z0.s\n"
+ "fmin z24.s, p5/M, z24.s, z0.s\n"
+ "fmax z21.s, p5/M, z21.s, z1.s\n"
+ "fmax z22.s, p5/M, z22.s, z1.s\n"
+ "fmax z23.s, p5/M, z23.s, z1.s\n"
+ "fmax z24.s, p5/M, z24.s, z1.s\n"
+ "fmin z25.s, p5/M, z25.s, z0.s\n"
+ "fmin z26.s, p5/M, z26.s, z0.s\n"
+ "fmin z27.s, p5/M, z27.s, z0.s\n"
+ "fmin z28.s, p5/M, z28.s, z0.s\n"
+ "fmax z25.s, p5/M, z25.s, z1.s\n"
+ "fmax z26.s, p5/M, z26.s, z1.s\n"
+ "fmax z27.s, p5/M, z27.s, z1.s\n"
+ "fmax z28.s, p5/M, z28.s, z1.s\n"
+ "fmin z29.s, p5/M, z29.s, z0.s\n"
+ "fmin z30.s, p5/M, z30.s, z0.s\n"
+ "fmin z31.s, p5/M, z31.s, z0.s\n"
+ "fmax z29.s, p5/M, z29.s, z1.s\n"
+ "fmax z30.s, p5/M, z30.s, z1.s\n"
+ "fmax z31.s, p5/M, z31.s, z1.s\n"
"77:" // Height 6: No activation
- "st1w { z8.s }, p4, [x9]\n"
- "st1w { z9.s }, p3, [x9, #1, MUL VL]\n"
- "st1w { z10.s }, p2, [x9, #2, MUL VL]\n"
- "st1w { z11.s }, p1, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
- "st1w { z12.s }, p4, [x25]\n"
- "st1w { z13.s }, p3, [x25, #1, MUL VL]\n"
- "st1w { z14.s }, p2, [x25, #2, MUL VL]\n"
- "st1w { z15.s }, p1, [x25, #3, MUL VL]\n"
- "st1w { z16.s }, p4, [x24]\n"
- "st1w { z17.s }, p3, [x24, #1, MUL VL]\n"
- "st1w { z18.s }, p2, [x24, #2, MUL VL]\n"
- "st1w { z19.s }, p1, [x24, #3, MUL VL]\n"
- "st1w { z20.s }, p4, [x23]\n"
- "st1w { z21.s }, p3, [x23, #1, MUL VL]\n"
- "st1w { z22.s }, p2, [x23, #2, MUL VL]\n"
- "st1w { z23.s }, p1, [x23, #3, MUL VL]\n"
- "st1w { z24.s }, p4, [x22]\n"
- "st1w { z25.s }, p3, [x22, #1, MUL VL]\n"
- "st1w { z26.s }, p2, [x22, #2, MUL VL]\n"
- "st1w { z27.s }, p1, [x22, #3, MUL VL]\n"
- "st1w { z28.s }, p4, [x21]\n"
- "st1w { z29.s }, p3, [x21, #1, MUL VL]\n"
- "st1w { z30.s }, p2, [x21, #2, MUL VL]\n"
- "st1w { z31.s }, p1, [x21, #3, MUL VL]\n"
+ "st1w { z8.s }, p4, [x28]\n"
+ "st1w { z9.s }, p3, [x28, #1, MUL VL]\n"
+ "st1w { z10.s }, p2, [x28, #2, MUL VL]\n"
+ "st1w { z11.s }, p1, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "st1w { z12.s }, p4, [x24]\n"
+ "st1w { z13.s }, p3, [x24, #1, MUL VL]\n"
+ "st1w { z14.s }, p2, [x24, #2, MUL VL]\n"
+ "st1w { z15.s }, p1, [x24, #3, MUL VL]\n"
+ "st1w { z16.s }, p4, [x23]\n"
+ "st1w { z17.s }, p3, [x23, #1, MUL VL]\n"
+ "st1w { z18.s }, p2, [x23, #2, MUL VL]\n"
+ "st1w { z19.s }, p1, [x23, #3, MUL VL]\n"
+ "st1w { z20.s }, p4, [x22]\n"
+ "st1w { z21.s }, p3, [x22, #1, MUL VL]\n"
+ "st1w { z22.s }, p2, [x22, #2, MUL VL]\n"
+ "st1w { z23.s }, p1, [x22, #3, MUL VL]\n"
+ "st1w { z24.s }, p4, [x21]\n"
+ "st1w { z25.s }, p3, [x21, #1, MUL VL]\n"
+ "st1w { z26.s }, p2, [x21, #2, MUL VL]\n"
+ "st1w { z27.s }, p1, [x21, #3, MUL VL]\n"
+ "st1w { z28.s }, p4, [x20]\n"
+ "st1w { z29.s }, p3, [x20, #1, MUL VL]\n"
+ "st1w { z30.s }, p2, [x20, #2, MUL VL]\n"
+ "st1w { z31.s }, p1, [x20, #3, MUL VL]\n"
"78:" // Height 6: Writeback done
"decw x11, ALL, MUL #4\n"
"cmp x11, XZR\n"
"bgt 67b\n"
"subs %x[M], %x[M], #0x6\n"
"beq 80f\n"
- "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 79f\n"
- "add x21, x21, #0x6\n"
- "str x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "add x20, x20, #0x6\n"
+ "str x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"b 1b\n"
"79:" // Update direct input
- "mov x20, #0x18\n"
- "madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
+ "mov x19, #0x18\n"
+ "madd %x[input_ptr], x19, x20, %x[input_ptr]\n"
"b 1b\n"
"80:" // Exit
: [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
: [args_ptr] "r" (&ka), [bias] "r" (bias), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "x9", "x10", "x11", "x12", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "x9", "x10", "x11", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_8x1VL/a64fx.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_8x1VL/a64fx.cpp
index 2ccd050f18..0a37f8abfc 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_8x1VL/a64fx.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_8x1VL/a64fx.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef ARM_COMPUTE_ENABLE_SVE
@@ -105,248 +105,248 @@ void sve_hybrid_fp32_mla_8x1VL_a64fx (
"cmp %x[M], #0x2\n"
"bgt 25f\n"
"beq 13f\n"
- "mov x14, %x[bias]\n"
- "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x12, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x11, %x[output_ptr]\n"
+ "mov x13, %x[bias]\n"
+ "ldr x12, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x10, %x[output_ptr]\n"
"2:" // Height 1: Column loop
- "mov x20, #0x0\n"
- "whilelt p0.s, x20, x13\n"
- "cbz x14, 3f\n"
- "ld1w { z24.s }, p1/Z, [x14]\n"
- "addvl x14, x14, #1\n"
+ "mov x19, #0x0\n"
+ "whilelt p0.s, x19, x12\n"
+ "cbz x13, 3f\n"
+ "ld1w { z24.s }, p1/Z, [x13]\n"
+ "addvl x13, x13, #1\n"
"b 5f\n"
"3:" // Height 1: no bias
"tbz %x[flags], #0, 4f\n"
- "ld1w { z24.s }, p0/Z, [x11]\n"
+ "ld1w { z24.s }, p0/Z, [x10]\n"
"b 5f\n"
"4:" // Height 1: no accumulate
"mov z24.b, #0x0\n"
"5:" // Height 1: setup done
- "mov x10, #0x0\n"
+ "mov x9, #0x0\n"
"6:" // Height 1: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w9, [x20, x10, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w28, [x19, x9, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 7f\n"
- "ldr x21, [%x[input_ptr], x10, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x28, [x21, #0x0]\n"
- "cbnz x10, 8f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x28, x28, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x9, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x27, [x20, #0x0]\n"
+ "cbnz x9, 8f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x27, x27, x19, LSL #2\n"
"b 8f\n"
"7:" // Height 1: setup direct input
- "mov x28, %x[input_ptr]\n"
+ "mov x27, %x[input_ptr]\n"
"8:" // Height 1: input setup done
- "subs x9, x9, #0x1\n"
- "ld1rw { z0.s }, p1/Z, [x28]\n"
+ "subs x28, x28, #0x1\n"
+ "ld1rw { z0.s }, p1/Z, [x27]\n"
"ble 10f\n"
"9:" // Height 1: Multiply loop: Main loop
- "ld1w { z8.s }, p1/Z, [x12]\n"
- "add x28, x28, #0x4\n"
- "subs x9, x9, #0x1\n"
+ "ld1w { z8.s }, p1/Z, [x11]\n"
+ "add x27, x27, #0x4\n"
+ "subs x28, x28, #0x1\n"
"fmla z24.s, p1/M, z8.s, z0.s\n"
- "addvl x12, x12, #1\n"
- "ld1rw { z0.s }, p1/Z, [x28]\n"
+ "addvl x11, x11, #1\n"
+ "ld1rw { z0.s }, p1/Z, [x27]\n"
"bgt 9b\n"
"10:" // Height 1: Multiply loop: Main loop skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "ld1w { z9.s }, p1/Z, [x12]\n"
- "add x10, x10, #0x1\n"
- "cmp x10, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "ld1w { z9.s }, p1/Z, [x11]\n"
+ "add x9, x9, #0x1\n"
+ "cmp x9, x19\n"
"fmla z24.s, p1/M, z9.s, z0.s\n"
- "addvl x12, x12, #1\n"
+ "addvl x11, x11, #1\n"
"bne 6b\n"
"tbz %x[flags], #1, 11f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z17.s }, p1/Z, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rw { z16.s }, p1/Z, [x20]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rw { z17.s }, p1/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z16.s }, p1/Z, [x19]\n"
"fmin z24.s, p1/M, z24.s, z17.s\n"
"fmax z24.s, p1/M, z24.s, z16.s\n"
"11:" // Height 1: No activation
- "st1w { z24.s }, p0, [x11]\n"
- "addvl x11, x11, #1\n"
+ "st1w { z24.s }, p0, [x10]\n"
+ "addvl x10, x10, #1\n"
"12:" // Height 1: Writeback done
- "decw x13\n"
- "cmp x13, XZR\n"
+ "decw x12\n"
+ "cmp x12, XZR\n"
"bgt 2b\n"
"b 98f\n"
"13:" // Height 2
- "mov x14, %x[bias]\n"
- "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x12, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x11, %x[output_ptr]\n"
+ "mov x13, %x[bias]\n"
+ "ldr x12, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x10, %x[output_ptr]\n"
"14:" // Height 2: Column loop
- "mov x20, #0x0\n"
- "whilelt p0.s, x20, x13\n"
- "cbz x14, 15f\n"
- "ld1w { z24.s }, p1/Z, [x14]\n"
+ "mov x19, #0x0\n"
+ "whilelt p0.s, x19, x12\n"
+ "cbz x13, 15f\n"
+ "ld1w { z24.s }, p1/Z, [x13]\n"
"mov z25.d, z24.d\n"
- "addvl x14, x14, #1\n"
+ "addvl x13, x13, #1\n"
"b 17f\n"
"15:" // Height 2: no bias
"tbz %x[flags], #0, 16f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x27, x11, x20, LSL #2\n"
- "ld1w { z24.s }, p0/Z, [x11]\n"
- "ld1w { z25.s }, p0/Z, [x27]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x26, x10, x19, LSL #2\n"
+ "ld1w { z24.s }, p0/Z, [x10]\n"
+ "ld1w { z25.s }, p0/Z, [x26]\n"
"b 17f\n"
"16:" // Height 2: no accumulate
"mov z24.b, #0x0\n"
"mov z25.b, #0x0\n"
"17:" // Height 2: setup done
- "mov x10, #0x0\n"
+ "mov x9, #0x0\n"
"18:" // Height 2: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w9, [x20, x10, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w28, [x19, x9, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 19f\n"
- "ldr x21, [%x[input_ptr], x10, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x28, [x21, #0x0]\n"
- "ldr x27, [x21, #0x8]\n"
- "cbnz x10, 20f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x28, x28, x20, LSL #2\n"
- "add x27, x27, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x9, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x27, [x20, #0x0]\n"
+ "ldr x26, [x20, #0x8]\n"
+ "cbnz x9, 20f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x27, x27, x19, LSL #2\n"
+ "add x26, x26, x19, LSL #2\n"
"b 20f\n"
"19:" // Height 2: setup direct input
- "mov x28, %x[input_ptr]\n"
- "add x27, x28, x20, LSL #2\n"
+ "mov x27, %x[input_ptr]\n"
+ "add x26, x27, x19, LSL #2\n"
"20:" // Height 2: input setup done
- "subs x9, x9, #0x1\n"
- "ld1rw { z0.s }, p1/Z, [x28]\n"
- "ld1rw { z1.s }, p1/Z, [x27]\n"
+ "subs x28, x28, #0x1\n"
+ "ld1rw { z0.s }, p1/Z, [x27]\n"
+ "ld1rw { z1.s }, p1/Z, [x26]\n"
"ble 22f\n"
"21:" // Height 2: Multiply loop: Main loop
- "ld1w { z8.s }, p1/Z, [x12]\n"
- "add x28, x28, #0x4\n"
- "subs x9, x9, #0x1\n"
- "fmla z24.s, p1/M, z8.s, z0.s\n"
+ "ld1w { z8.s }, p1/Z, [x11]\n"
"add x27, x27, #0x4\n"
+ "subs x28, x28, #0x1\n"
+ "fmla z24.s, p1/M, z8.s, z0.s\n"
+ "add x26, x26, #0x4\n"
"fmla z25.s, p1/M, z8.s, z1.s\n"
- "addvl x12, x12, #1\n"
- "ld1rw { z0.s }, p1/Z, [x28]\n"
- "ld1rw { z1.s }, p1/Z, [x27]\n"
+ "addvl x11, x11, #1\n"
+ "ld1rw { z0.s }, p1/Z, [x27]\n"
+ "ld1rw { z1.s }, p1/Z, [x26]\n"
"bgt 21b\n"
"22:" // Height 2: Multiply loop: Main loop skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "ld1w { z9.s }, p1/Z, [x12]\n"
- "add x10, x10, #0x1\n"
- "cmp x10, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "ld1w { z9.s }, p1/Z, [x11]\n"
+ "add x9, x9, #0x1\n"
+ "cmp x9, x19\n"
"fmla z24.s, p1/M, z9.s, z0.s\n"
"fmla z25.s, p1/M, z9.s, z1.s\n"
- "addvl x12, x12, #1\n"
+ "addvl x11, x11, #1\n"
"bne 18b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x27, x11, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x26, x10, x19, LSL #2\n"
"tbz %x[flags], #1, 23f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z17.s }, p1/Z, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rw { z16.s }, p1/Z, [x20]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rw { z17.s }, p1/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z16.s }, p1/Z, [x19]\n"
"fmin z24.s, p1/M, z24.s, z17.s\n"
"fmin z25.s, p1/M, z25.s, z17.s\n"
"fmax z24.s, p1/M, z24.s, z16.s\n"
"fmax z25.s, p1/M, z25.s, z16.s\n"
"23:" // Height 2: No activation
- "st1w { z24.s }, p0, [x11]\n"
- "addvl x11, x11, #1\n"
- "st1w { z25.s }, p0, [x27]\n"
+ "st1w { z24.s }, p0, [x10]\n"
+ "addvl x10, x10, #1\n"
+ "st1w { z25.s }, p0, [x26]\n"
"24:" // Height 2: Writeback done
- "decw x13\n"
- "cmp x13, XZR\n"
+ "decw x12\n"
+ "cmp x12, XZR\n"
"bgt 14b\n"
"b 98f\n"
"25:" // Height 3
- "mov x14, %x[bias]\n"
- "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x12, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x11, %x[output_ptr]\n"
+ "mov x13, %x[bias]\n"
+ "ldr x12, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x10, %x[output_ptr]\n"
"26:" // Height 3: Column loop
- "mov x20, #0x0\n"
- "whilelt p0.s, x20, x13\n"
- "cbz x14, 27f\n"
- "ld1w { z24.s }, p1/Z, [x14]\n"
+ "mov x19, #0x0\n"
+ "whilelt p0.s, x19, x12\n"
+ "cbz x13, 27f\n"
+ "ld1w { z24.s }, p1/Z, [x13]\n"
"mov z25.d, z24.d\n"
"mov z26.d, z24.d\n"
- "addvl x14, x14, #1\n"
+ "addvl x13, x13, #1\n"
"b 29f\n"
"27:" // Height 3: no bias
"tbz %x[flags], #0, 28f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x27, x11, x20, LSL #2\n"
- "add x26, x27, x20, LSL #2\n"
- "ld1w { z24.s }, p0/Z, [x11]\n"
- "ld1w { z25.s }, p0/Z, [x27]\n"
- "ld1w { z26.s }, p0/Z, [x26]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x26, x10, x19, LSL #2\n"
+ "add x25, x26, x19, LSL #2\n"
+ "ld1w { z24.s }, p0/Z, [x10]\n"
+ "ld1w { z25.s }, p0/Z, [x26]\n"
+ "ld1w { z26.s }, p0/Z, [x25]\n"
"b 29f\n"
"28:" // Height 3: no accumulate
"mov z24.b, #0x0\n"
"mov z25.b, #0x0\n"
"mov z26.b, #0x0\n"
"29:" // Height 3: setup done
- "mov x10, #0x0\n"
+ "mov x9, #0x0\n"
"30:" // Height 3: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w9, [x20, x10, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w28, [x19, x9, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 31f\n"
- "ldr x21, [%x[input_ptr], x10, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x28, [x21, #0x0]\n"
- "ldr x27, [x21, #0x8]\n"
- "ldr x26, [x21, #0x10]\n"
- "cbnz x10, 32f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x28, x28, x20, LSL #2\n"
- "add x27, x27, x20, LSL #2\n"
- "add x26, x26, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x9, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x27, [x20, #0x0]\n"
+ "ldr x26, [x20, #0x8]\n"
+ "ldr x25, [x20, #0x10]\n"
+ "cbnz x9, 32f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x27, x27, x19, LSL #2\n"
+ "add x26, x26, x19, LSL #2\n"
+ "add x25, x25, x19, LSL #2\n"
"b 32f\n"
"31:" // Height 3: setup direct input
- "mov x28, %x[input_ptr]\n"
- "add x27, x28, x20, LSL #2\n"
- "add x26, x27, x20, LSL #2\n"
+ "mov x27, %x[input_ptr]\n"
+ "add x26, x27, x19, LSL #2\n"
+ "add x25, x26, x19, LSL #2\n"
"32:" // Height 3: input setup done
- "subs x9, x9, #0x1\n"
- "ld1rw { z0.s }, p1/Z, [x28]\n"
- "ld1rw { z1.s }, p1/Z, [x27]\n"
- "ld1rw { z2.s }, p1/Z, [x26]\n"
+ "subs x28, x28, #0x1\n"
+ "ld1rw { z0.s }, p1/Z, [x27]\n"
+ "ld1rw { z1.s }, p1/Z, [x26]\n"
+ "ld1rw { z2.s }, p1/Z, [x25]\n"
"ble 34f\n"
"33:" // Height 3: Multiply loop: Main loop
- "ld1w { z8.s }, p1/Z, [x12]\n"
- "add x28, x28, #0x4\n"
- "subs x9, x9, #0x1\n"
- "fmla z24.s, p1/M, z8.s, z0.s\n"
+ "ld1w { z8.s }, p1/Z, [x11]\n"
"add x27, x27, #0x4\n"
+ "subs x28, x28, #0x1\n"
+ "fmla z24.s, p1/M, z8.s, z0.s\n"
"add x26, x26, #0x4\n"
+ "add x25, x25, #0x4\n"
"fmla z25.s, p1/M, z8.s, z1.s\n"
"fmla z26.s, p1/M, z8.s, z2.s\n"
- "addvl x12, x12, #1\n"
- "ld1rw { z0.s }, p1/Z, [x28]\n"
- "ld1rw { z1.s }, p1/Z, [x27]\n"
- "ld1rw { z2.s }, p1/Z, [x26]\n"
+ "addvl x11, x11, #1\n"
+ "ld1rw { z0.s }, p1/Z, [x27]\n"
+ "ld1rw { z1.s }, p1/Z, [x26]\n"
+ "ld1rw { z2.s }, p1/Z, [x25]\n"
"bgt 33b\n"
"34:" // Height 3: Multiply loop: Main loop skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "ld1w { z9.s }, p1/Z, [x12]\n"
- "add x10, x10, #0x1\n"
- "cmp x10, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "ld1w { z9.s }, p1/Z, [x11]\n"
+ "add x9, x9, #0x1\n"
+ "cmp x9, x19\n"
"fmla z24.s, p1/M, z9.s, z0.s\n"
"fmla z25.s, p1/M, z9.s, z1.s\n"
- "addvl x12, x12, #1\n"
+ "addvl x11, x11, #1\n"
"fmla z26.s, p1/M, z9.s, z2.s\n"
"bne 30b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x27, x11, x20, LSL #2\n"
- "add x26, x27, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x26, x10, x19, LSL #2\n"
+ "add x25, x26, x19, LSL #2\n"
"tbz %x[flags], #1, 35f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z17.s }, p1/Z, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rw { z16.s }, p1/Z, [x20]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rw { z17.s }, p1/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z16.s }, p1/Z, [x19]\n"
"fmin z24.s, p1/M, z24.s, z17.s\n"
"fmin z25.s, p1/M, z25.s, z17.s\n"
"fmin z26.s, p1/M, z26.s, z17.s\n"
@@ -354,40 +354,40 @@ void sve_hybrid_fp32_mla_8x1VL_a64fx (
"fmax z25.s, p1/M, z25.s, z16.s\n"
"fmax z26.s, p1/M, z26.s, z16.s\n"
"35:" // Height 3: No activation
- "st1w { z24.s }, p0, [x11]\n"
- "addvl x11, x11, #1\n"
- "st1w { z25.s }, p0, [x27]\n"
- "st1w { z26.s }, p0, [x26]\n"
+ "st1w { z24.s }, p0, [x10]\n"
+ "addvl x10, x10, #1\n"
+ "st1w { z25.s }, p0, [x26]\n"
+ "st1w { z26.s }, p0, [x25]\n"
"36:" // Height 3: Writeback done
- "decw x13\n"
- "cmp x13, XZR\n"
+ "decw x12\n"
+ "cmp x12, XZR\n"
"bgt 26b\n"
"b 98f\n"
"37:" // Height 4
- "mov x14, %x[bias]\n"
- "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x12, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x11, %x[output_ptr]\n"
+ "mov x13, %x[bias]\n"
+ "ldr x12, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x10, %x[output_ptr]\n"
"38:" // Height 4: Column loop
- "mov x20, #0x0\n"
- "whilelt p0.s, x20, x13\n"
- "cbz x14, 39f\n"
- "ld1w { z24.s }, p1/Z, [x14]\n"
+ "mov x19, #0x0\n"
+ "whilelt p0.s, x19, x12\n"
+ "cbz x13, 39f\n"
+ "ld1w { z24.s }, p1/Z, [x13]\n"
"mov z25.d, z24.d\n"
"mov z26.d, z24.d\n"
- "addvl x14, x14, #1\n"
+ "addvl x13, x13, #1\n"
"mov z27.d, z24.d\n"
"b 41f\n"
"39:" // Height 4: no bias
"tbz %x[flags], #0, 40f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x27, x11, x20, LSL #2\n"
- "add x26, x27, x20, LSL #2\n"
- "ld1w { z24.s }, p0/Z, [x11]\n"
- "add x25, x26, x20, LSL #2\n"
- "ld1w { z25.s }, p0/Z, [x27]\n"
- "ld1w { z26.s }, p0/Z, [x26]\n"
- "ld1w { z27.s }, p0/Z, [x25]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x26, x10, x19, LSL #2\n"
+ "add x25, x26, x19, LSL #2\n"
+ "add x24, x25, x19, LSL #2\n"
+ "ld1w { z24.s }, p0/Z, [x10]\n"
+ "ld1w { z25.s }, p0/Z, [x26]\n"
+ "ld1w { z26.s }, p0/Z, [x25]\n"
+ "ld1w { z27.s }, p0/Z, [x24]\n"
"b 41f\n"
"40:" // Height 4: no accumulate
"mov z24.b, #0x0\n"
@@ -395,74 +395,74 @@ void sve_hybrid_fp32_mla_8x1VL_a64fx (
"mov z26.b, #0x0\n"
"mov z27.b, #0x0\n"
"41:" // Height 4: setup done
- "mov x10, #0x0\n"
+ "mov x9, #0x0\n"
"42:" // Height 4: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w9, [x20, x10, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w28, [x19, x9, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 43f\n"
- "ldr x21, [%x[input_ptr], x10, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x28, [x21, #0x0]\n"
- "ldr x27, [x21, #0x8]\n"
- "ldr x26, [x21, #0x10]\n"
- "ldr x25, [x21, #0x18]\n"
- "cbnz x10, 44f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x28, x28, x20, LSL #2\n"
- "add x27, x27, x20, LSL #2\n"
- "add x26, x26, x20, LSL #2\n"
- "add x25, x25, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x9, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x27, [x20, #0x0]\n"
+ "ldr x26, [x20, #0x8]\n"
+ "ldr x25, [x20, #0x10]\n"
+ "ldr x24, [x20, #0x18]\n"
+ "cbnz x9, 44f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x27, x27, x19, LSL #2\n"
+ "add x26, x26, x19, LSL #2\n"
+ "add x25, x25, x19, LSL #2\n"
+ "add x24, x24, x19, LSL #2\n"
"b 44f\n"
"43:" // Height 4: setup direct input
- "mov x28, %x[input_ptr]\n"
- "add x27, x28, x20, LSL #2\n"
- "add x26, x27, x20, LSL #2\n"
- "add x25, x26, x20, LSL #2\n"
+ "mov x27, %x[input_ptr]\n"
+ "add x26, x27, x19, LSL #2\n"
+ "add x25, x26, x19, LSL #2\n"
+ "add x24, x25, x19, LSL #2\n"
"44:" // Height 4: input setup done
- "subs x9, x9, #0x1\n"
- "ld1rw { z0.s }, p1/Z, [x28]\n"
- "ld1rw { z1.s }, p1/Z, [x27]\n"
- "ld1rw { z2.s }, p1/Z, [x26]\n"
- "ld1rw { z3.s }, p1/Z, [x25]\n"
+ "subs x28, x28, #0x1\n"
+ "ld1rw { z0.s }, p1/Z, [x27]\n"
+ "ld1rw { z1.s }, p1/Z, [x26]\n"
+ "ld1rw { z2.s }, p1/Z, [x25]\n"
+ "ld1rw { z3.s }, p1/Z, [x24]\n"
"ble 46f\n"
"45:" // Height 4: Multiply loop: Main loop
- "ld1w { z8.s }, p1/Z, [x12]\n"
- "add x28, x28, #0x4\n"
- "subs x9, x9, #0x1\n"
- "fmla z24.s, p1/M, z8.s, z0.s\n"
+ "ld1w { z8.s }, p1/Z, [x11]\n"
"add x27, x27, #0x4\n"
+ "subs x28, x28, #0x1\n"
+ "fmla z24.s, p1/M, z8.s, z0.s\n"
"add x26, x26, #0x4\n"
+ "add x25, x25, #0x4\n"
"fmla z25.s, p1/M, z8.s, z1.s\n"
"fmla z26.s, p1/M, z8.s, z2.s\n"
- "add x25, x25, #0x4\n"
+ "add x24, x24, #0x4\n"
"fmla z27.s, p1/M, z8.s, z3.s\n"
- "addvl x12, x12, #1\n"
- "ld1rw { z0.s }, p1/Z, [x28]\n"
- "ld1rw { z1.s }, p1/Z, [x27]\n"
- "ld1rw { z2.s }, p1/Z, [x26]\n"
- "ld1rw { z3.s }, p1/Z, [x25]\n"
+ "addvl x11, x11, #1\n"
+ "ld1rw { z0.s }, p1/Z, [x27]\n"
+ "ld1rw { z1.s }, p1/Z, [x26]\n"
+ "ld1rw { z2.s }, p1/Z, [x25]\n"
+ "ld1rw { z3.s }, p1/Z, [x24]\n"
"bgt 45b\n"
"46:" // Height 4: Multiply loop: Main loop skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "ld1w { z9.s }, p1/Z, [x12]\n"
- "add x10, x10, #0x1\n"
- "cmp x10, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "ld1w { z9.s }, p1/Z, [x11]\n"
+ "add x9, x9, #0x1\n"
+ "cmp x9, x19\n"
"fmla z24.s, p1/M, z9.s, z0.s\n"
"fmla z25.s, p1/M, z9.s, z1.s\n"
- "addvl x12, x12, #1\n"
+ "addvl x11, x11, #1\n"
"fmla z26.s, p1/M, z9.s, z2.s\n"
"fmla z27.s, p1/M, z9.s, z3.s\n"
"bne 42b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x27, x11, x20, LSL #2\n"
- "add x26, x27, x20, LSL #2\n"
- "add x25, x26, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x26, x10, x19, LSL #2\n"
+ "add x25, x26, x19, LSL #2\n"
+ "add x24, x25, x19, LSL #2\n"
"tbz %x[flags], #1, 47f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z17.s }, p1/Z, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rw { z16.s }, p1/Z, [x20]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rw { z17.s }, p1/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z16.s }, p1/Z, [x19]\n"
"fmin z24.s, p1/M, z24.s, z17.s\n"
"fmin z25.s, p1/M, z25.s, z17.s\n"
"fmin z26.s, p1/M, z26.s, z17.s\n"
@@ -472,44 +472,44 @@ void sve_hybrid_fp32_mla_8x1VL_a64fx (
"fmax z26.s, p1/M, z26.s, z16.s\n"
"fmax z27.s, p1/M, z27.s, z16.s\n"
"47:" // Height 4: No activation
- "st1w { z24.s }, p0, [x11]\n"
- "addvl x11, x11, #1\n"
- "st1w { z25.s }, p0, [x27]\n"
- "st1w { z26.s }, p0, [x26]\n"
- "st1w { z27.s }, p0, [x25]\n"
+ "st1w { z24.s }, p0, [x10]\n"
+ "addvl x10, x10, #1\n"
+ "st1w { z25.s }, p0, [x26]\n"
+ "st1w { z26.s }, p0, [x25]\n"
+ "st1w { z27.s }, p0, [x24]\n"
"48:" // Height 4: Writeback done
- "decw x13\n"
- "cmp x13, XZR\n"
+ "decw x12\n"
+ "cmp x12, XZR\n"
"bgt 38b\n"
"b 98f\n"
"49:" // Height 5
- "mov x14, %x[bias]\n"
- "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x12, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x11, %x[output_ptr]\n"
+ "mov x13, %x[bias]\n"
+ "ldr x12, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x10, %x[output_ptr]\n"
"50:" // Height 5: Column loop
- "mov x20, #0x0\n"
- "whilelt p0.s, x20, x13\n"
- "cbz x14, 51f\n"
- "ld1w { z24.s }, p1/Z, [x14]\n"
+ "mov x19, #0x0\n"
+ "whilelt p0.s, x19, x12\n"
+ "cbz x13, 51f\n"
+ "ld1w { z24.s }, p1/Z, [x13]\n"
"mov z25.d, z24.d\n"
"mov z26.d, z24.d\n"
- "addvl x14, x14, #1\n"
+ "addvl x13, x13, #1\n"
"mov z27.d, z24.d\n"
"mov z28.d, z24.d\n"
"b 53f\n"
"51:" // Height 5: no bias
"tbz %x[flags], #0, 52f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x27, x11, x20, LSL #2\n"
- "add x26, x27, x20, LSL #2\n"
- "ld1w { z24.s }, p0/Z, [x11]\n"
- "add x25, x26, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "ld1w { z25.s }, p0/Z, [x27]\n"
- "ld1w { z26.s }, p0/Z, [x26]\n"
- "ld1w { z27.s }, p0/Z, [x25]\n"
- "ld1w { z28.s }, p0/Z, [x24]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x26, x10, x19, LSL #2\n"
+ "add x25, x26, x19, LSL #2\n"
+ "add x24, x25, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "ld1w { z24.s }, p0/Z, [x10]\n"
+ "ld1w { z25.s }, p0/Z, [x26]\n"
+ "ld1w { z26.s }, p0/Z, [x25]\n"
+ "ld1w { z27.s }, p0/Z, [x24]\n"
+ "ld1w { z28.s }, p0/Z, [x23]\n"
"b 53f\n"
"52:" // Height 5: no accumulate
"mov z24.b, #0x0\n"
@@ -518,83 +518,83 @@ void sve_hybrid_fp32_mla_8x1VL_a64fx (
"mov z27.b, #0x0\n"
"mov z28.b, #0x0\n"
"53:" // Height 5: setup done
- "mov x10, #0x0\n"
+ "mov x9, #0x0\n"
"54:" // Height 5: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w9, [x20, x10, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w28, [x19, x9, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 55f\n"
- "ldr x21, [%x[input_ptr], x10, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x28, [x21, #0x0]\n"
- "ldr x27, [x21, #0x8]\n"
- "ldr x26, [x21, #0x10]\n"
- "ldr x25, [x21, #0x18]\n"
- "ldr x24, [x21, #0x20]\n"
- "cbnz x10, 56f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x28, x28, x20, LSL #2\n"
- "add x27, x27, x20, LSL #2\n"
- "add x26, x26, x20, LSL #2\n"
- "add x25, x25, x20, LSL #2\n"
- "add x24, x24, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x9, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x27, [x20, #0x0]\n"
+ "ldr x26, [x20, #0x8]\n"
+ "ldr x25, [x20, #0x10]\n"
+ "ldr x24, [x20, #0x18]\n"
+ "ldr x23, [x20, #0x20]\n"
+ "cbnz x9, 56f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x27, x27, x19, LSL #2\n"
+ "add x26, x26, x19, LSL #2\n"
+ "add x25, x25, x19, LSL #2\n"
+ "add x24, x24, x19, LSL #2\n"
+ "add x23, x23, x19, LSL #2\n"
"b 56f\n"
"55:" // Height 5: setup direct input
- "mov x28, %x[input_ptr]\n"
- "add x27, x28, x20, LSL #2\n"
- "add x26, x27, x20, LSL #2\n"
- "add x25, x26, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
+ "mov x27, %x[input_ptr]\n"
+ "add x26, x27, x19, LSL #2\n"
+ "add x25, x26, x19, LSL #2\n"
+ "add x24, x25, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
"56:" // Height 5: input setup done
- "subs x9, x9, #0x1\n"
- "ld1rw { z0.s }, p1/Z, [x28]\n"
- "ld1rw { z1.s }, p1/Z, [x27]\n"
- "ld1rw { z2.s }, p1/Z, [x26]\n"
- "ld1rw { z3.s }, p1/Z, [x25]\n"
- "ld1rw { z4.s }, p1/Z, [x24]\n"
+ "subs x28, x28, #0x1\n"
+ "ld1rw { z0.s }, p1/Z, [x27]\n"
+ "ld1rw { z1.s }, p1/Z, [x26]\n"
+ "ld1rw { z2.s }, p1/Z, [x25]\n"
+ "ld1rw { z3.s }, p1/Z, [x24]\n"
+ "ld1rw { z4.s }, p1/Z, [x23]\n"
"ble 58f\n"
"57:" // Height 5: Multiply loop: Main loop
- "ld1w { z8.s }, p1/Z, [x12]\n"
- "add x28, x28, #0x4\n"
- "subs x9, x9, #0x1\n"
- "fmla z24.s, p1/M, z8.s, z0.s\n"
+ "ld1w { z8.s }, p1/Z, [x11]\n"
"add x27, x27, #0x4\n"
+ "subs x28, x28, #0x1\n"
+ "fmla z24.s, p1/M, z8.s, z0.s\n"
"add x26, x26, #0x4\n"
+ "add x25, x25, #0x4\n"
"fmla z25.s, p1/M, z8.s, z1.s\n"
"fmla z26.s, p1/M, z8.s, z2.s\n"
- "add x25, x25, #0x4\n"
"add x24, x24, #0x4\n"
+ "add x23, x23, #0x4\n"
"fmla z27.s, p1/M, z8.s, z3.s\n"
- "ld1rw { z0.s }, p1/Z, [x28]\n"
- "addvl x12, x12, #1\n"
+ "ld1rw { z0.s }, p1/Z, [x27]\n"
+ "addvl x11, x11, #1\n"
"fmla z28.s, p1/M, z8.s, z4.s\n"
- "ld1rw { z1.s }, p1/Z, [x27]\n"
- "ld1rw { z2.s }, p1/Z, [x26]\n"
- "ld1rw { z3.s }, p1/Z, [x25]\n"
- "ld1rw { z4.s }, p1/Z, [x24]\n"
+ "ld1rw { z1.s }, p1/Z, [x26]\n"
+ "ld1rw { z2.s }, p1/Z, [x25]\n"
+ "ld1rw { z3.s }, p1/Z, [x24]\n"
+ "ld1rw { z4.s }, p1/Z, [x23]\n"
"bgt 57b\n"
"58:" // Height 5: Multiply loop: Main loop skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "ld1w { z9.s }, p1/Z, [x12]\n"
- "add x10, x10, #0x1\n"
- "cmp x10, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "ld1w { z9.s }, p1/Z, [x11]\n"
+ "add x9, x9, #0x1\n"
+ "cmp x9, x19\n"
"fmla z24.s, p1/M, z9.s, z0.s\n"
"fmla z25.s, p1/M, z9.s, z1.s\n"
- "addvl x12, x12, #1\n"
+ "addvl x11, x11, #1\n"
"fmla z26.s, p1/M, z9.s, z2.s\n"
"fmla z27.s, p1/M, z9.s, z3.s\n"
"fmla z28.s, p1/M, z9.s, z4.s\n"
"bne 54b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x27, x11, x20, LSL #2\n"
- "add x26, x27, x20, LSL #2\n"
- "add x25, x26, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x26, x10, x19, LSL #2\n"
+ "add x25, x26, x19, LSL #2\n"
+ "add x24, x25, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
"tbz %x[flags], #1, 59f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z17.s }, p1/Z, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rw { z16.s }, p1/Z, [x20]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rw { z17.s }, p1/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z16.s }, p1/Z, [x19]\n"
"fmin z24.s, p1/M, z24.s, z17.s\n"
"fmin z25.s, p1/M, z25.s, z17.s\n"
"fmin z26.s, p1/M, z26.s, z17.s\n"
@@ -606,48 +606,48 @@ void sve_hybrid_fp32_mla_8x1VL_a64fx (
"fmax z27.s, p1/M, z27.s, z16.s\n"
"fmax z28.s, p1/M, z28.s, z16.s\n"
"59:" // Height 5: No activation
- "st1w { z24.s }, p0, [x11]\n"
- "addvl x11, x11, #1\n"
- "st1w { z25.s }, p0, [x27]\n"
- "st1w { z26.s }, p0, [x26]\n"
- "st1w { z27.s }, p0, [x25]\n"
- "st1w { z28.s }, p0, [x24]\n"
+ "st1w { z24.s }, p0, [x10]\n"
+ "addvl x10, x10, #1\n"
+ "st1w { z25.s }, p0, [x26]\n"
+ "st1w { z26.s }, p0, [x25]\n"
+ "st1w { z27.s }, p0, [x24]\n"
+ "st1w { z28.s }, p0, [x23]\n"
"60:" // Height 5: Writeback done
- "decw x13\n"
- "cmp x13, XZR\n"
+ "decw x12\n"
+ "cmp x12, XZR\n"
"bgt 50b\n"
"b 98f\n"
"61:" // Height 6
- "mov x14, %x[bias]\n"
- "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x12, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x11, %x[output_ptr]\n"
+ "mov x13, %x[bias]\n"
+ "ldr x12, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x10, %x[output_ptr]\n"
"62:" // Height 6: Column loop
- "mov x20, #0x0\n"
- "whilelt p0.s, x20, x13\n"
- "cbz x14, 63f\n"
- "ld1w { z24.s }, p1/Z, [x14]\n"
+ "mov x19, #0x0\n"
+ "whilelt p0.s, x19, x12\n"
+ "cbz x13, 63f\n"
+ "ld1w { z24.s }, p1/Z, [x13]\n"
"mov z25.d, z24.d\n"
"mov z26.d, z24.d\n"
- "addvl x14, x14, #1\n"
+ "addvl x13, x13, #1\n"
"mov z27.d, z24.d\n"
"mov z28.d, z24.d\n"
"mov z29.d, z24.d\n"
"b 65f\n"
"63:" // Height 6: no bias
"tbz %x[flags], #0, 64f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x27, x11, x20, LSL #2\n"
- "add x26, x27, x20, LSL #2\n"
- "ld1w { z24.s }, p0/Z, [x11]\n"
- "add x25, x26, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "ld1w { z25.s }, p0/Z, [x27]\n"
- "ld1w { z26.s }, p0/Z, [x26]\n"
- "add x23, x24, x20, LSL #2\n"
- "ld1w { z27.s }, p0/Z, [x25]\n"
- "ld1w { z28.s }, p0/Z, [x24]\n"
- "ld1w { z29.s }, p0/Z, [x23]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x26, x10, x19, LSL #2\n"
+ "add x25, x26, x19, LSL #2\n"
+ "add x24, x25, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "ld1w { z24.s }, p0/Z, [x10]\n"
+ "ld1w { z25.s }, p0/Z, [x26]\n"
+ "add x22, x23, x19, LSL #2\n"
+ "ld1w { z26.s }, p0/Z, [x25]\n"
+ "ld1w { z27.s }, p0/Z, [x24]\n"
+ "ld1w { z28.s }, p0/Z, [x23]\n"
+ "ld1w { z29.s }, p0/Z, [x22]\n"
"b 65f\n"
"64:" // Height 6: no accumulate
"mov z24.b, #0x0\n"
@@ -657,92 +657,92 @@ void sve_hybrid_fp32_mla_8x1VL_a64fx (
"mov z28.b, #0x0\n"
"mov z29.b, #0x0\n"
"65:" // Height 6: setup done
- "mov x10, #0x0\n"
+ "mov x9, #0x0\n"
"66:" // Height 6: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w9, [x20, x10, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w28, [x19, x9, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 67f\n"
- "ldr x21, [%x[input_ptr], x10, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x28, [x21, #0x0]\n"
- "ldr x27, [x21, #0x8]\n"
- "ldr x26, [x21, #0x10]\n"
- "ldr x25, [x21, #0x18]\n"
- "ldr x24, [x21, #0x20]\n"
- "ldr x23, [x21, #0x28]\n"
- "cbnz x10, 68f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x28, x28, x20, LSL #2\n"
- "add x27, x27, x20, LSL #2\n"
- "add x26, x26, x20, LSL #2\n"
- "add x25, x25, x20, LSL #2\n"
- "add x24, x24, x20, LSL #2\n"
- "add x23, x23, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x9, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x27, [x20, #0x0]\n"
+ "ldr x26, [x20, #0x8]\n"
+ "ldr x25, [x20, #0x10]\n"
+ "ldr x24, [x20, #0x18]\n"
+ "ldr x23, [x20, #0x20]\n"
+ "ldr x22, [x20, #0x28]\n"
+ "cbnz x9, 68f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x27, x27, x19, LSL #2\n"
+ "add x26, x26, x19, LSL #2\n"
+ "add x25, x25, x19, LSL #2\n"
+ "add x24, x24, x19, LSL #2\n"
+ "add x23, x23, x19, LSL #2\n"
+ "add x22, x22, x19, LSL #2\n"
"b 68f\n"
"67:" // Height 6: setup direct input
- "mov x28, %x[input_ptr]\n"
- "add x27, x28, x20, LSL #2\n"
- "add x26, x27, x20, LSL #2\n"
- "add x25, x26, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
+ "mov x27, %x[input_ptr]\n"
+ "add x26, x27, x19, LSL #2\n"
+ "add x25, x26, x19, LSL #2\n"
+ "add x24, x25, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
"68:" // Height 6: input setup done
- "subs x9, x9, #0x1\n"
- "ld1rw { z0.s }, p1/Z, [x28]\n"
- "ld1rw { z1.s }, p1/Z, [x27]\n"
- "ld1rw { z2.s }, p1/Z, [x26]\n"
- "ld1rw { z3.s }, p1/Z, [x25]\n"
- "ld1rw { z4.s }, p1/Z, [x24]\n"
- "ld1rw { z5.s }, p1/Z, [x23]\n"
+ "subs x28, x28, #0x1\n"
+ "ld1rw { z0.s }, p1/Z, [x27]\n"
+ "ld1rw { z1.s }, p1/Z, [x26]\n"
+ "ld1rw { z2.s }, p1/Z, [x25]\n"
+ "ld1rw { z3.s }, p1/Z, [x24]\n"
+ "ld1rw { z4.s }, p1/Z, [x23]\n"
+ "ld1rw { z5.s }, p1/Z, [x22]\n"
"ble 70f\n"
"69:" // Height 6: Multiply loop: Main loop
- "ld1w { z8.s }, p1/Z, [x12]\n"
- "add x28, x28, #0x4\n"
- "subs x9, x9, #0x1\n"
- "fmla z24.s, p1/M, z8.s, z0.s\n"
+ "ld1w { z8.s }, p1/Z, [x11]\n"
"add x27, x27, #0x4\n"
+ "subs x28, x28, #0x1\n"
+ "fmla z24.s, p1/M, z8.s, z0.s\n"
"add x26, x26, #0x4\n"
+ "add x25, x25, #0x4\n"
"fmla z25.s, p1/M, z8.s, z1.s\n"
"fmla z26.s, p1/M, z8.s, z2.s\n"
- "add x25, x25, #0x4\n"
"add x24, x24, #0x4\n"
+ "add x23, x23, #0x4\n"
"fmla z27.s, p1/M, z8.s, z3.s\n"
"fmla z28.s, p1/M, z8.s, z4.s\n"
- "add x23, x23, #0x4\n"
- "addvl x12, x12, #1\n"
+ "add x22, x22, #0x4\n"
+ "addvl x11, x11, #1\n"
"fmla z29.s, p1/M, z8.s, z5.s\n"
- "ld1rw { z0.s }, p1/Z, [x28]\n"
- "ld1rw { z1.s }, p1/Z, [x27]\n"
- "ld1rw { z2.s }, p1/Z, [x26]\n"
- "ld1rw { z3.s }, p1/Z, [x25]\n"
- "ld1rw { z4.s }, p1/Z, [x24]\n"
- "ld1rw { z5.s }, p1/Z, [x23]\n"
+ "ld1rw { z0.s }, p1/Z, [x27]\n"
+ "ld1rw { z1.s }, p1/Z, [x26]\n"
+ "ld1rw { z2.s }, p1/Z, [x25]\n"
+ "ld1rw { z3.s }, p1/Z, [x24]\n"
+ "ld1rw { z4.s }, p1/Z, [x23]\n"
+ "ld1rw { z5.s }, p1/Z, [x22]\n"
"bgt 69b\n"
"70:" // Height 6: Multiply loop: Main loop skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "ld1w { z9.s }, p1/Z, [x12]\n"
- "add x10, x10, #0x1\n"
- "cmp x10, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "ld1w { z9.s }, p1/Z, [x11]\n"
+ "add x9, x9, #0x1\n"
+ "cmp x9, x19\n"
"fmla z24.s, p1/M, z9.s, z0.s\n"
"fmla z25.s, p1/M, z9.s, z1.s\n"
- "addvl x12, x12, #1\n"
+ "addvl x11, x11, #1\n"
"fmla z26.s, p1/M, z9.s, z2.s\n"
"fmla z27.s, p1/M, z9.s, z3.s\n"
"fmla z28.s, p1/M, z9.s, z4.s\n"
"fmla z29.s, p1/M, z9.s, z5.s\n"
"bne 66b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x27, x11, x20, LSL #2\n"
- "add x26, x27, x20, LSL #2\n"
- "add x25, x26, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x26, x10, x19, LSL #2\n"
+ "add x25, x26, x19, LSL #2\n"
+ "add x24, x25, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
"tbz %x[flags], #1, 71f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z17.s }, p1/Z, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rw { z16.s }, p1/Z, [x20]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rw { z17.s }, p1/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z16.s }, p1/Z, [x19]\n"
"fmin z24.s, p1/M, z24.s, z17.s\n"
"fmin z25.s, p1/M, z25.s, z17.s\n"
"fmin z26.s, p1/M, z26.s, z17.s\n"
@@ -756,31 +756,31 @@ void sve_hybrid_fp32_mla_8x1VL_a64fx (
"fmax z28.s, p1/M, z28.s, z16.s\n"
"fmax z29.s, p1/M, z29.s, z16.s\n"
"71:" // Height 6: No activation
- "st1w { z24.s }, p0, [x11]\n"
- "addvl x11, x11, #1\n"
- "st1w { z25.s }, p0, [x27]\n"
- "st1w { z26.s }, p0, [x26]\n"
- "st1w { z27.s }, p0, [x25]\n"
- "st1w { z28.s }, p0, [x24]\n"
- "st1w { z29.s }, p0, [x23]\n"
+ "st1w { z24.s }, p0, [x10]\n"
+ "addvl x10, x10, #1\n"
+ "st1w { z25.s }, p0, [x26]\n"
+ "st1w { z26.s }, p0, [x25]\n"
+ "st1w { z27.s }, p0, [x24]\n"
+ "st1w { z28.s }, p0, [x23]\n"
+ "st1w { z29.s }, p0, [x22]\n"
"72:" // Height 6: Writeback done
- "decw x13\n"
- "cmp x13, XZR\n"
+ "decw x12\n"
+ "cmp x12, XZR\n"
"bgt 62b\n"
"b 98f\n"
"73:" // Height 7
- "mov x14, %x[bias]\n"
- "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x12, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x11, %x[output_ptr]\n"
+ "mov x13, %x[bias]\n"
+ "ldr x12, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x10, %x[output_ptr]\n"
"74:" // Height 7: Column loop
- "mov x20, #0x0\n"
- "whilelt p0.s, x20, x13\n"
- "cbz x14, 75f\n"
- "ld1w { z24.s }, p1/Z, [x14]\n"
+ "mov x19, #0x0\n"
+ "whilelt p0.s, x19, x12\n"
+ "cbz x13, 75f\n"
+ "ld1w { z24.s }, p1/Z, [x13]\n"
"mov z25.d, z24.d\n"
"mov z26.d, z24.d\n"
- "addvl x14, x14, #1\n"
+ "addvl x13, x13, #1\n"
"mov z27.d, z24.d\n"
"mov z28.d, z24.d\n"
"mov z29.d, z24.d\n"
@@ -788,20 +788,20 @@ void sve_hybrid_fp32_mla_8x1VL_a64fx (
"b 77f\n"
"75:" // Height 7: no bias
"tbz %x[flags], #0, 76f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x27, x11, x20, LSL #2\n"
- "add x26, x27, x20, LSL #2\n"
- "ld1w { z24.s }, p0/Z, [x11]\n"
- "add x25, x26, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "ld1w { z25.s }, p0/Z, [x27]\n"
- "ld1w { z26.s }, p0/Z, [x26]\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
- "ld1w { z27.s }, p0/Z, [x25]\n"
- "ld1w { z28.s }, p0/Z, [x24]\n"
- "ld1w { z29.s }, p0/Z, [x23]\n"
- "ld1w { z30.s }, p0/Z, [x22]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x26, x10, x19, LSL #2\n"
+ "add x25, x26, x19, LSL #2\n"
+ "add x24, x25, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "ld1w { z24.s }, p0/Z, [x10]\n"
+ "ld1w { z25.s }, p0/Z, [x26]\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
+ "ld1w { z26.s }, p0/Z, [x25]\n"
+ "ld1w { z27.s }, p0/Z, [x24]\n"
+ "ld1w { z28.s }, p0/Z, [x23]\n"
+ "ld1w { z29.s }, p0/Z, [x22]\n"
+ "ld1w { z30.s }, p0/Z, [x21]\n"
"b 77f\n"
"76:" // Height 7: no accumulate
"mov z24.b, #0x0\n"
@@ -812,101 +812,101 @@ void sve_hybrid_fp32_mla_8x1VL_a64fx (
"mov z29.b, #0x0\n"
"mov z30.b, #0x0\n"
"77:" // Height 7: setup done
- "mov x10, #0x0\n"
+ "mov x9, #0x0\n"
"78:" // Height 7: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w9, [x20, x10, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w28, [x19, x9, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 79f\n"
- "ldr x21, [%x[input_ptr], x10, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x28, [x21, #0x0]\n"
- "ldr x27, [x21, #0x8]\n"
- "ldr x26, [x21, #0x10]\n"
- "ldr x25, [x21, #0x18]\n"
- "ldr x24, [x21, #0x20]\n"
- "ldr x23, [x21, #0x28]\n"
- "ldr x22, [x21, #0x30]\n"
- "cbnz x10, 80f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x28, x28, x20, LSL #2\n"
- "add x27, x27, x20, LSL #2\n"
- "add x26, x26, x20, LSL #2\n"
- "add x25, x25, x20, LSL #2\n"
- "add x24, x24, x20, LSL #2\n"
- "add x23, x23, x20, LSL #2\n"
- "add x22, x22, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x9, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x27, [x20, #0x0]\n"
+ "ldr x26, [x20, #0x8]\n"
+ "ldr x25, [x20, #0x10]\n"
+ "ldr x24, [x20, #0x18]\n"
+ "ldr x23, [x20, #0x20]\n"
+ "ldr x22, [x20, #0x28]\n"
+ "ldr x21, [x20, #0x30]\n"
+ "cbnz x9, 80f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x27, x27, x19, LSL #2\n"
+ "add x26, x26, x19, LSL #2\n"
+ "add x25, x25, x19, LSL #2\n"
+ "add x24, x24, x19, LSL #2\n"
+ "add x23, x23, x19, LSL #2\n"
+ "add x22, x22, x19, LSL #2\n"
+ "add x21, x21, x19, LSL #2\n"
"b 80f\n"
"79:" // Height 7: setup direct input
- "mov x28, %x[input_ptr]\n"
- "add x27, x28, x20, LSL #2\n"
- "add x26, x27, x20, LSL #2\n"
- "add x25, x26, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
+ "mov x27, %x[input_ptr]\n"
+ "add x26, x27, x19, LSL #2\n"
+ "add x25, x26, x19, LSL #2\n"
+ "add x24, x25, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
"80:" // Height 7: input setup done
- "subs x9, x9, #0x1\n"
- "ld1rw { z0.s }, p1/Z, [x28]\n"
- "ld1rw { z1.s }, p1/Z, [x27]\n"
- "ld1rw { z2.s }, p1/Z, [x26]\n"
- "ld1rw { z3.s }, p1/Z, [x25]\n"
- "ld1rw { z4.s }, p1/Z, [x24]\n"
- "ld1rw { z5.s }, p1/Z, [x23]\n"
- "ld1rw { z6.s }, p1/Z, [x22]\n"
+ "subs x28, x28, #0x1\n"
+ "ld1rw { z0.s }, p1/Z, [x27]\n"
+ "ld1rw { z1.s }, p1/Z, [x26]\n"
+ "ld1rw { z2.s }, p1/Z, [x25]\n"
+ "ld1rw { z3.s }, p1/Z, [x24]\n"
+ "ld1rw { z4.s }, p1/Z, [x23]\n"
+ "ld1rw { z5.s }, p1/Z, [x22]\n"
+ "ld1rw { z6.s }, p1/Z, [x21]\n"
"ble 82f\n"
"81:" // Height 7: Multiply loop: Main loop
- "ld1w { z8.s }, p1/Z, [x12]\n"
- "add x28, x28, #0x4\n"
- "subs x9, x9, #0x1\n"
- "fmla z24.s, p1/M, z8.s, z0.s\n"
+ "ld1w { z8.s }, p1/Z, [x11]\n"
"add x27, x27, #0x4\n"
+ "subs x28, x28, #0x1\n"
+ "fmla z24.s, p1/M, z8.s, z0.s\n"
"add x26, x26, #0x4\n"
+ "add x25, x25, #0x4\n"
"fmla z25.s, p1/M, z8.s, z1.s\n"
"fmla z26.s, p1/M, z8.s, z2.s\n"
- "add x25, x25, #0x4\n"
"add x24, x24, #0x4\n"
- "fmla z27.s, p1/M, z8.s, z3.s\n"
- "ld1rw { z0.s }, p1/Z, [x28]\n"
"add x23, x23, #0x4\n"
+ "fmla z27.s, p1/M, z8.s, z3.s\n"
+ "ld1rw { z0.s }, p1/Z, [x27]\n"
"add x22, x22, #0x4\n"
+ "add x21, x21, #0x4\n"
"fmla z28.s, p1/M, z8.s, z4.s\n"
"fmla z29.s, p1/M, z8.s, z5.s\n"
- "addvl x12, x12, #1\n"
- "ld1rw { z1.s }, p1/Z, [x27]\n"
+ "addvl x11, x11, #1\n"
+ "ld1rw { z1.s }, p1/Z, [x26]\n"
"fmla z30.s, p1/M, z8.s, z6.s\n"
- "ld1rw { z2.s }, p1/Z, [x26]\n"
- "ld1rw { z3.s }, p1/Z, [x25]\n"
- "ld1rw { z4.s }, p1/Z, [x24]\n"
- "ld1rw { z5.s }, p1/Z, [x23]\n"
- "ld1rw { z6.s }, p1/Z, [x22]\n"
+ "ld1rw { z2.s }, p1/Z, [x25]\n"
+ "ld1rw { z3.s }, p1/Z, [x24]\n"
+ "ld1rw { z4.s }, p1/Z, [x23]\n"
+ "ld1rw { z5.s }, p1/Z, [x22]\n"
+ "ld1rw { z6.s }, p1/Z, [x21]\n"
"bgt 81b\n"
"82:" // Height 7: Multiply loop: Main loop skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "ld1w { z9.s }, p1/Z, [x12]\n"
- "add x10, x10, #0x1\n"
- "cmp x10, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "ld1w { z9.s }, p1/Z, [x11]\n"
+ "add x9, x9, #0x1\n"
+ "cmp x9, x19\n"
"fmla z24.s, p1/M, z9.s, z0.s\n"
"fmla z25.s, p1/M, z9.s, z1.s\n"
- "addvl x12, x12, #1\n"
+ "addvl x11, x11, #1\n"
"fmla z26.s, p1/M, z9.s, z2.s\n"
"fmla z27.s, p1/M, z9.s, z3.s\n"
"fmla z28.s, p1/M, z9.s, z4.s\n"
"fmla z29.s, p1/M, z9.s, z5.s\n"
"fmla z30.s, p1/M, z9.s, z6.s\n"
"bne 78b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x27, x11, x20, LSL #2\n"
- "add x26, x27, x20, LSL #2\n"
- "add x25, x26, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x26, x10, x19, LSL #2\n"
+ "add x25, x26, x19, LSL #2\n"
+ "add x24, x25, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
"tbz %x[flags], #1, 83f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z17.s }, p1/Z, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rw { z16.s }, p1/Z, [x20]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rw { z17.s }, p1/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z16.s }, p1/Z, [x19]\n"
"fmin z24.s, p1/M, z24.s, z17.s\n"
"fmin z25.s, p1/M, z25.s, z17.s\n"
"fmin z26.s, p1/M, z26.s, z17.s\n"
@@ -922,35 +922,35 @@ void sve_hybrid_fp32_mla_8x1VL_a64fx (
"fmax z29.s, p1/M, z29.s, z16.s\n"
"fmax z30.s, p1/M, z30.s, z16.s\n"
"83:" // Height 7: No activation
- "st1w { z24.s }, p0, [x11]\n"
- "addvl x11, x11, #1\n"
- "st1w { z25.s }, p0, [x27]\n"
- "st1w { z26.s }, p0, [x26]\n"
- "st1w { z27.s }, p0, [x25]\n"
- "st1w { z28.s }, p0, [x24]\n"
- "st1w { z29.s }, p0, [x23]\n"
- "st1w { z30.s }, p0, [x22]\n"
+ "st1w { z24.s }, p0, [x10]\n"
+ "addvl x10, x10, #1\n"
+ "st1w { z25.s }, p0, [x26]\n"
+ "st1w { z26.s }, p0, [x25]\n"
+ "st1w { z27.s }, p0, [x24]\n"
+ "st1w { z28.s }, p0, [x23]\n"
+ "st1w { z29.s }, p0, [x22]\n"
+ "st1w { z30.s }, p0, [x21]\n"
"84:" // Height 7: Writeback done
- "decw x13\n"
- "cmp x13, XZR\n"
+ "decw x12\n"
+ "cmp x12, XZR\n"
"bgt 74b\n"
"b 98f\n"
"85:" // Height 8
- "ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "mov x20, #0x20\n"
- "mov x14, %x[bias]\n"
- "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x12, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x11, %x[output_ptr]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "mov x19, #0x20\n"
+ "mov x13, %x[bias]\n"
+ "ldr x12, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x10, %x[output_ptr]\n"
+ "madd %x[output_ptr], x20, x19, %x[output_ptr]\n"
"86:" // Height 8: Column loop
- "mov x20, #0x0\n"
- "whilelt p0.s, x20, x13\n"
- "cbz x14, 87f\n"
- "ld1w { z24.s }, p1/Z, [x14]\n"
+ "mov x19, #0x0\n"
+ "whilelt p0.s, x19, x12\n"
+ "cbz x13, 87f\n"
+ "ld1w { z24.s }, p1/Z, [x13]\n"
"mov z25.d, z24.d\n"
"mov z26.d, z24.d\n"
- "addvl x14, x14, #1\n"
+ "addvl x13, x13, #1\n"
"mov z27.d, z24.d\n"
"mov z28.d, z24.d\n"
"mov z29.d, z24.d\n"
@@ -959,22 +959,22 @@ void sve_hybrid_fp32_mla_8x1VL_a64fx (
"b 89f\n"
"87:" // Height 8: no bias
"tbz %x[flags], #0, 88f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x27, x11, x20, LSL #2\n"
- "add x26, x27, x20, LSL #2\n"
- "ld1w { z24.s }, p0/Z, [x11]\n"
- "add x25, x26, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "ld1w { z25.s }, p0/Z, [x27]\n"
- "ld1w { z26.s }, p0/Z, [x26]\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
- "ld1w { z27.s }, p0/Z, [x25]\n"
- "ld1w { z28.s }, p0/Z, [x24]\n"
- "add x21, x22, x20, LSL #2\n"
- "ld1w { z29.s }, p0/Z, [x23]\n"
- "ld1w { z30.s }, p0/Z, [x22]\n"
- "ld1w { z31.s }, p0/Z, [x21]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x26, x10, x19, LSL #2\n"
+ "add x25, x26, x19, LSL #2\n"
+ "add x24, x25, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "ld1w { z24.s }, p0/Z, [x10]\n"
+ "ld1w { z25.s }, p0/Z, [x26]\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
+ "ld1w { z26.s }, p0/Z, [x25]\n"
+ "ld1w { z27.s }, p0/Z, [x24]\n"
+ "add x20, x21, x19, LSL #2\n"
+ "ld1w { z28.s }, p0/Z, [x23]\n"
+ "ld1w { z29.s }, p0/Z, [x22]\n"
+ "ld1w { z30.s }, p0/Z, [x21]\n"
+ "ld1w { z31.s }, p0/Z, [x20]\n"
"b 89f\n"
"88:" // Height 8: no accumulate
"mov z24.b, #0x0\n"
@@ -986,90 +986,90 @@ void sve_hybrid_fp32_mla_8x1VL_a64fx (
"mov z30.b, #0x0\n"
"mov z31.b, #0x0\n"
"89:" // Height 8: setup done
- "mov x10, #0x0\n"
+ "mov x9, #0x0\n"
"90:" // Height 8: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w9, [x20, x10, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w28, [x19, x9, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 91f\n"
- "ldr x21, [%x[input_ptr], x10, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x28, [x21, #0x0]\n"
- "ldr x27, [x21, #0x8]\n"
- "ldr x26, [x21, #0x10]\n"
- "ldr x25, [x21, #0x18]\n"
- "ldr x24, [x21, #0x20]\n"
- "ldr x23, [x21, #0x28]\n"
- "ldr x22, [x21, #0x30]\n"
- "ldr x21, [x21, #0x38]\n"
- "cbnz x10, 92f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x28, x28, x20, LSL #2\n"
- "add x27, x27, x20, LSL #2\n"
- "add x26, x26, x20, LSL #2\n"
- "add x25, x25, x20, LSL #2\n"
- "add x24, x24, x20, LSL #2\n"
- "add x23, x23, x20, LSL #2\n"
- "add x22, x22, x20, LSL #2\n"
- "add x21, x21, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x9, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x27, [x20, #0x0]\n"
+ "ldr x26, [x20, #0x8]\n"
+ "ldr x25, [x20, #0x10]\n"
+ "ldr x24, [x20, #0x18]\n"
+ "ldr x23, [x20, #0x20]\n"
+ "ldr x22, [x20, #0x28]\n"
+ "ldr x21, [x20, #0x30]\n"
+ "ldr x20, [x20, #0x38]\n"
+ "cbnz x9, 92f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x27, x27, x19, LSL #2\n"
+ "add x26, x26, x19, LSL #2\n"
+ "add x25, x25, x19, LSL #2\n"
+ "add x24, x24, x19, LSL #2\n"
+ "add x23, x23, x19, LSL #2\n"
+ "add x22, x22, x19, LSL #2\n"
+ "add x21, x21, x19, LSL #2\n"
+ "add x20, x20, x19, LSL #2\n"
"b 92f\n"
"91:" // Height 8: setup direct input
- "mov x28, %x[input_ptr]\n"
- "add x27, x28, x20, LSL #2\n"
- "add x26, x27, x20, LSL #2\n"
- "add x25, x26, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
+ "mov x27, %x[input_ptr]\n"
+ "add x26, x27, x19, LSL #2\n"
+ "add x25, x26, x19, LSL #2\n"
+ "add x24, x25, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
+ "add x20, x21, x19, LSL #2\n"
"92:" // Height 8: input setup done
- "subs x9, x9, #0x1\n"
- "ld1rw { z0.s }, p1/Z, [x28]\n"
- "ld1rw { z1.s }, p1/Z, [x27]\n"
- "ld1rw { z2.s }, p1/Z, [x26]\n"
- "ld1rw { z3.s }, p1/Z, [x25]\n"
- "ld1rw { z4.s }, p1/Z, [x24]\n"
- "ld1rw { z5.s }, p1/Z, [x23]\n"
- "ld1rw { z6.s }, p1/Z, [x22]\n"
- "ld1rw { z7.s }, p1/Z, [x21]\n"
+ "subs x28, x28, #0x1\n"
+ "ld1rw { z0.s }, p1/Z, [x27]\n"
+ "ld1rw { z1.s }, p1/Z, [x26]\n"
+ "ld1rw { z2.s }, p1/Z, [x25]\n"
+ "ld1rw { z3.s }, p1/Z, [x24]\n"
+ "ld1rw { z4.s }, p1/Z, [x23]\n"
+ "ld1rw { z5.s }, p1/Z, [x22]\n"
+ "ld1rw { z6.s }, p1/Z, [x21]\n"
+ "ld1rw { z7.s }, p1/Z, [x20]\n"
"ble 94f\n"
"93:" // Height 8: Multiply loop: Main loop
- "ld1w { z8.s }, p1/Z, [x12]\n"
- "add x28, x28, #0x4\n"
- "subs x9, x9, #0x1\n"
- "fmla z24.s, p1/M, z8.s, z0.s\n"
+ "ld1w { z8.s }, p1/Z, [x11]\n"
"add x27, x27, #0x4\n"
+ "subs x28, x28, #0x1\n"
+ "fmla z24.s, p1/M, z8.s, z0.s\n"
"add x26, x26, #0x4\n"
+ "add x25, x25, #0x4\n"
"fmla z25.s, p1/M, z8.s, z1.s\n"
"fmla z26.s, p1/M, z8.s, z2.s\n"
- "add x25, x25, #0x4\n"
"add x24, x24, #0x4\n"
+ "add x23, x23, #0x4\n"
"fmla z27.s, p1/M, z8.s, z3.s\n"
"fmla z28.s, p1/M, z8.s, z4.s\n"
- "add x23, x23, #0x4\n"
"add x22, x22, #0x4\n"
- "fmla z29.s, p1/M, z8.s, z5.s\n"
- "ld1rw { z0.s }, p1/Z, [x28]\n"
"add x21, x21, #0x4\n"
- "addvl x12, x12, #1\n"
- "ld1rw { z1.s }, p1/Z, [x27]\n"
+ "fmla z29.s, p1/M, z8.s, z5.s\n"
+ "ld1rw { z0.s }, p1/Z, [x27]\n"
+ "add x20, x20, #0x4\n"
+ "addvl x11, x11, #1\n"
+ "ld1rw { z1.s }, p1/Z, [x26]\n"
"fmla z30.s, p1/M, z8.s, z6.s\n"
"fmla z31.s, p1/M, z8.s, z7.s\n"
- "ld1rw { z2.s }, p1/Z, [x26]\n"
- "ld1rw { z3.s }, p1/Z, [x25]\n"
- "ld1rw { z4.s }, p1/Z, [x24]\n"
- "ld1rw { z5.s }, p1/Z, [x23]\n"
- "ld1rw { z6.s }, p1/Z, [x22]\n"
- "ld1rw { z7.s }, p1/Z, [x21]\n"
+ "ld1rw { z2.s }, p1/Z, [x25]\n"
+ "ld1rw { z3.s }, p1/Z, [x24]\n"
+ "ld1rw { z4.s }, p1/Z, [x23]\n"
+ "ld1rw { z5.s }, p1/Z, [x22]\n"
+ "ld1rw { z6.s }, p1/Z, [x21]\n"
+ "ld1rw { z7.s }, p1/Z, [x20]\n"
"bgt 93b\n"
"94:" // Height 8: Multiply loop: Main loop skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "ld1w { z9.s }, p1/Z, [x12]\n"
- "add x10, x10, #0x1\n"
- "cmp x10, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "ld1w { z9.s }, p1/Z, [x11]\n"
+ "add x9, x9, #0x1\n"
+ "cmp x9, x19\n"
"fmla z24.s, p1/M, z9.s, z0.s\n"
"fmla z25.s, p1/M, z9.s, z1.s\n"
- "addvl x12, x12, #1\n"
+ "addvl x11, x11, #1\n"
"fmla z26.s, p1/M, z9.s, z2.s\n"
"fmla z27.s, p1/M, z9.s, z3.s\n"
"fmla z28.s, p1/M, z9.s, z4.s\n"
@@ -1077,19 +1077,19 @@ void sve_hybrid_fp32_mla_8x1VL_a64fx (
"fmla z30.s, p1/M, z9.s, z6.s\n"
"fmla z31.s, p1/M, z9.s, z7.s\n"
"bne 90b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x27, x11, x20, LSL #2\n"
- "add x26, x27, x20, LSL #2\n"
- "add x25, x26, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x26, x10, x19, LSL #2\n"
+ "add x25, x26, x19, LSL #2\n"
+ "add x24, x25, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
+ "add x20, x21, x19, LSL #2\n"
"tbz %x[flags], #1, 95f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z17.s }, p1/Z, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rw { z16.s }, p1/Z, [x20]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rw { z17.s }, p1/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z16.s }, p1/Z, [x19]\n"
"fmin z24.s, p1/M, z24.s, z17.s\n"
"fmin z25.s, p1/M, z25.s, z17.s\n"
"fmin z26.s, p1/M, z26.s, z17.s\n"
@@ -1107,35 +1107,35 @@ void sve_hybrid_fp32_mla_8x1VL_a64fx (
"fmax z30.s, p1/M, z30.s, z16.s\n"
"fmax z31.s, p1/M, z31.s, z16.s\n"
"95:" // Height 8: No activation
- "st1w { z24.s }, p0, [x11]\n"
- "addvl x11, x11, #1\n"
- "st1w { z25.s }, p0, [x27]\n"
- "st1w { z26.s }, p0, [x26]\n"
- "st1w { z27.s }, p0, [x25]\n"
- "st1w { z28.s }, p0, [x24]\n"
- "st1w { z29.s }, p0, [x23]\n"
- "st1w { z30.s }, p0, [x22]\n"
- "st1w { z31.s }, p0, [x21]\n"
+ "st1w { z24.s }, p0, [x10]\n"
+ "addvl x10, x10, #1\n"
+ "st1w { z25.s }, p0, [x26]\n"
+ "st1w { z26.s }, p0, [x25]\n"
+ "st1w { z27.s }, p0, [x24]\n"
+ "st1w { z28.s }, p0, [x23]\n"
+ "st1w { z29.s }, p0, [x22]\n"
+ "st1w { z30.s }, p0, [x21]\n"
+ "st1w { z31.s }, p0, [x20]\n"
"96:" // Height 8: Writeback done
- "decw x13\n"
- "cmp x13, XZR\n"
+ "decw x12\n"
+ "cmp x12, XZR\n"
"bgt 86b\n"
"subs %x[M], %x[M], #0x8\n"
"beq 98f\n"
- "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 97f\n"
- "add x21, x21, #0x8\n"
- "str x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "add x20, x20, #0x8\n"
+ "str x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"b 1b\n"
"97:" // Update direct input
- "mov x20, #0x20\n"
- "madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
+ "mov x19, #0x20\n"
+ "madd %x[input_ptr], x19, x20, %x[input_ptr]\n"
"b 1b\n"
"98:" // Exit
: [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
: [args_ptr] "r" (&ka), [bias] "r" (bias), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
- : "cc", "memory", "p0", "p1", "x9", "x10", "x11", "x12", "x13", "x14", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z16", "z17", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "x9", "x10", "x11", "x12", "x13", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z16", "z17", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_8x1VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_8x1VL/generic.cpp
index 9679d49506..5b4b6b9b2e 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_8x1VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_8x1VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef ARM_COMPUTE_ENABLE_SVE
@@ -105,382 +105,382 @@ void sve_hybrid_fp32_mla_8x1VL (
"cmp %x[M], #0x2\n"
"bgt 27f\n"
"beq 14f\n"
- "mov x14, %x[bias]\n"
"ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x12, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x11, %x[output_ptr]\n"
+ "mov x11, %x[bias]\n"
+ "mov x10, %x[output_ptr]\n"
"2:" // Height 1: Column loop
- "mov x20, #0x0\n"
- "whilelt p1.s, x20, x13\n"
- "cbz x14, 3f\n"
- "ld1w { z24.s }, p2/Z, [x14]\n"
- "addvl x14, x14, #1\n"
+ "mov x19, #0x0\n"
+ "whilelt p1.s, x19, x13\n"
+ "cbz x11, 3f\n"
+ "ld1w { z24.s }, p2/Z, [x11]\n"
+ "addvl x11, x11, #1\n"
"b 5f\n"
"3:" // Height 1: no bias
"tbz %x[flags], #0, 4f\n"
- "ld1w { z24.s }, p1/Z, [x11]\n"
+ "ld1w { z24.s }, p1/Z, [x10]\n"
"b 5f\n"
"4:" // Height 1: no accumulate
"mov z24.b, #0x0\n"
"5:" // Height 1: setup done
- "mov x10, #0x0\n"
+ "mov x9, #0x0\n"
"6:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w9, [x20, x10, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w28, [x20, x9, LSL #0x2]\n"
"tbz %x[flags], #3, 7f\n"
- "ldr x21, [%x[input_ptr], x10, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x28, [x21, #0x0]\n"
- "cbnz x10, 8f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x28, x28, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x9, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x27, [x20, #0x0]\n"
+ "cbnz x9, 8f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x27, x27, x19, LSL #2\n"
"b 8f\n"
"7:" // Height 1: setup direct input
- "mov x28, %x[input_ptr]\n"
+ "mov x27, %x[input_ptr]\n"
"8:" // Height 1: input setup done
- "cmp x9, #0x4\n"
+ "cmp x28, #0x4\n"
"ble 10f\n"
"9:" // Height 1: Multiply loop: Main loop head
- "whilelt p0.s, XZR, x9\n"
- "ld1rqw { z0.s }, p0/Z, [x28]\n"
"ld1w { z8.s }, p2/Z, [x12]\n"
- "fmla z24.s, z8.s, z0.s[0]\n"
+ "whilelt p0.s, XZR, x28\n"
"ld1w { z9.s }, p2/Z, [x12, #1, MUL VL]\n"
- "fmla z24.s, z9.s, z0.s[1]\n"
+ "sub x28, x28, #0x4\n"
+ "ld1rqw { z0.s }, p0/Z, [x27]\n"
+ "fmla z24.s, z8.s, z0.s[0]\n"
"ld1w { z10.s }, p2/Z, [x12, #2, MUL VL]\n"
- "fmla z24.s, z10.s, z0.s[2]\n"
+ "cmp x28, #0x4\n"
+ "fmla z24.s, z9.s, z0.s[1]\n"
"ld1w { z11.s }, p2/Z, [x12, #3, MUL VL]\n"
- "sub x9, x9, #0x4\n"
- "cmp x9, #0x4\n"
- "fmla z24.s, z11.s, z0.s[3]\n"
- "add x28, x28, #0x10\n"
+ "add x27, x27, #0x10\n"
+ "fmla z24.s, z10.s, z0.s[2]\n"
"addvl x12, x12, #4\n"
+ "fmla z24.s, z11.s, z0.s[3]\n"
"bgt 9b\n"
"10:" // Height 1: Multiply loop: Single iteration only
- "whilelt p0.s, XZR, x9\n"
- "ld1rqw { z0.s }, p0/Z, [x28]\n"
"ld1w { z8.s }, p2/Z, [x12]\n"
- "subs x9, x9, #0x1\n"
+ "whilelt p0.s, XZR, x28\n"
+ "subs x28, x28, #0x1\n"
+ "ld1rqw { z0.s }, p0/Z, [x27]\n"
"fmla z24.s, z8.s, z0.s[0]\n"
"addvl x12, x12, #1\n"
"ble 11f\n"
"ld1w { z9.s }, p2/Z, [x12]\n"
- "subs x9, x9, #0x1\n"
"fmla z24.s, z9.s, z0.s[1]\n"
+ "subs x28, x28, #0x1\n"
"addvl x12, x12, #1\n"
"ble 11f\n"
"ld1w { z10.s }, p2/Z, [x12]\n"
- "subs x9, x9, #0x1\n"
"fmla z24.s, z10.s, z0.s[2]\n"
+ "subs x28, x28, #0x1\n"
"addvl x12, x12, #1\n"
"ble 11f\n"
"ld1w { z11.s }, p2/Z, [x12]\n"
"fmla z24.s, z11.s, z0.s[3]\n"
"addvl x12, x12, #1\n"
"11:" // Height 1: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x10, x10, #0x1\n"
- "cmp x10, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x9, x9, #0x1\n"
+ "cmp x9, x19\n"
"bne 6b\n"
"tbz %x[flags], #1, 12f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z17.s }, p2/Z, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rw { z16.s }, p2/Z, [x20]\n"
- "fmin z24.s, p2/M, z24.s, z17.s\n"
- "fmax z24.s, p2/M, z24.s, z16.s\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z17.s }, p2/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rw { z16.s }, p2/Z, [x19]\n"
+ "fmin z24.s, p2/M, z24.s, z16.s\n"
+ "fmax z24.s, p2/M, z24.s, z17.s\n"
"12:" // Height 1: No activation
- "st1w { z24.s }, p1, [x11]\n"
- "addvl x11, x11, #1\n"
+ "st1w { z24.s }, p1, [x10]\n"
+ "addvl x10, x10, #1\n"
"13:" // Height 1: Writeback done
"decw x13\n"
"cmp x13, XZR\n"
"bgt 2b\n"
"b 106f\n"
"14:" // Height 2
- "mov x14, %x[bias]\n"
"ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x11, %x[bias]\n"
"ldr x12, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x11, %x[output_ptr]\n"
+ "mov x10, %x[output_ptr]\n"
"15:" // Height 2: Column loop
- "mov x20, #0x0\n"
- "whilelt p1.s, x20, x13\n"
- "cbz x14, 16f\n"
- "ld1w { z24.s }, p2/Z, [x14]\n"
+ "mov x19, #0x0\n"
+ "whilelt p1.s, x19, x13\n"
+ "cbz x11, 16f\n"
+ "ld1w { z24.s }, p2/Z, [x11]\n"
"mov z25.d, z24.d\n"
- "addvl x14, x14, #1\n"
+ "addvl x11, x11, #1\n"
"b 18f\n"
"16:" // Height 2: no bias
"tbz %x[flags], #0, 17f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x27, x11, x20, LSL #2\n"
- "ld1w { z24.s }, p1/Z, [x11]\n"
- "ld1w { z25.s }, p1/Z, [x27]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ld1w { z24.s }, p1/Z, [x10]\n"
+ "add x26, x10, x19, LSL #2\n"
+ "ld1w { z25.s }, p1/Z, [x26]\n"
"b 18f\n"
"17:" // Height 2: no accumulate
"mov z24.b, #0x0\n"
"mov z25.b, #0x0\n"
"18:" // Height 2: setup done
- "mov x10, #0x0\n"
+ "mov x9, #0x0\n"
"19:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w9, [x20, x10, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w28, [x20, x9, LSL #0x2]\n"
"tbz %x[flags], #3, 20f\n"
- "ldr x21, [%x[input_ptr], x10, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x28, [x21, #0x0]\n"
- "ldr x27, [x21, #0x8]\n"
- "cbnz x10, 21f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x28, x28, x20, LSL #2\n"
- "add x27, x27, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x9, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x27, [x20, #0x0]\n"
+ "ldr x26, [x20, #0x8]\n"
+ "cbnz x9, 21f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x27, x27, x19, LSL #2\n"
+ "add x26, x26, x19, LSL #2\n"
"b 21f\n"
"20:" // Height 2: setup direct input
- "mov x28, %x[input_ptr]\n"
- "add x27, x28, x20, LSL #2\n"
+ "mov x27, %x[input_ptr]\n"
+ "add x26, x27, x19, LSL #2\n"
"21:" // Height 2: input setup done
- "cmp x9, #0x4\n"
+ "cmp x28, #0x4\n"
"ble 23f\n"
"22:" // Height 2: Multiply loop: Main loop head
- "whilelt p0.s, XZR, x9\n"
- "ld1rqw { z0.s }, p0/Z, [x28]\n"
- "ld1rqw { z1.s }, p0/Z, [x27]\n"
- "sub x9, x9, #0x4\n"
"ld1w { z8.s }, p2/Z, [x12]\n"
+ "whilelt p0.s, XZR, x28\n"
+ "ld1w { z9.s }, p2/Z, [x12, #1, MUL VL]\n"
+ "sub x28, x28, #0x4\n"
+ "ld1rqw { z0.s }, p0/Z, [x27]\n"
"fmla z24.s, z8.s, z0.s[0]\n"
+ "ld1rqw { z1.s }, p0/Z, [x26]\n"
+ "cmp x28, #0x4\n"
"fmla z25.s, z8.s, z1.s[0]\n"
- "ld1w { z9.s }, p2/Z, [x12, #1, MUL VL]\n"
- "fmla z24.s, z9.s, z0.s[1]\n"
- "fmla z25.s, z9.s, z1.s[1]\n"
"ld1w { z10.s }, p2/Z, [x12, #2, MUL VL]\n"
+ "add x27, x27, #0x10\n"
+ "fmla z24.s, z9.s, z0.s[1]\n"
"ld1w { z11.s }, p2/Z, [x12, #3, MUL VL]\n"
+ "add x26, x26, #0x10\n"
+ "fmla z25.s, z9.s, z1.s[1]\n"
+ "addvl x12, x12, #4\n"
"fmla z24.s, z10.s, z0.s[2]\n"
"fmla z25.s, z10.s, z1.s[2]\n"
- "cmp x9, #0x4\n"
- "add x28, x28, #0x10\n"
"fmla z24.s, z11.s, z0.s[3]\n"
"fmla z25.s, z11.s, z1.s[3]\n"
- "add x27, x27, #0x10\n"
- "addvl x12, x12, #4\n"
"bgt 22b\n"
"23:" // Height 2: Multiply loop: Single iteration only
- "whilelt p0.s, XZR, x9\n"
- "ld1rqw { z0.s }, p0/Z, [x28]\n"
- "ld1rqw { z1.s }, p0/Z, [x27]\n"
- "subs x9, x9, #0x1\n"
"ld1w { z8.s }, p2/Z, [x12]\n"
+ "whilelt p0.s, XZR, x28\n"
+ "subs x28, x28, #0x1\n"
+ "ld1rqw { z0.s }, p0/Z, [x27]\n"
"fmla z24.s, z8.s, z0.s[0]\n"
- "fmla z25.s, z8.s, z1.s[0]\n"
+ "ld1rqw { z1.s }, p0/Z, [x26]\n"
"addvl x12, x12, #1\n"
+ "fmla z25.s, z8.s, z1.s[0]\n"
"ble 24f\n"
"ld1w { z9.s }, p2/Z, [x12]\n"
- "subs x9, x9, #0x1\n"
"fmla z24.s, z9.s, z0.s[1]\n"
- "fmla z25.s, z9.s, z1.s[1]\n"
+ "subs x28, x28, #0x1\n"
"addvl x12, x12, #1\n"
+ "fmla z25.s, z9.s, z1.s[1]\n"
"ble 24f\n"
"ld1w { z10.s }, p2/Z, [x12]\n"
- "subs x9, x9, #0x1\n"
"fmla z24.s, z10.s, z0.s[2]\n"
+ "subs x28, x28, #0x1\n"
"fmla z25.s, z10.s, z1.s[2]\n"
"addvl x12, x12, #1\n"
"ble 24f\n"
"ld1w { z11.s }, p2/Z, [x12]\n"
"fmla z24.s, z11.s, z0.s[3]\n"
- "fmla z25.s, z11.s, z1.s[3]\n"
"addvl x12, x12, #1\n"
+ "fmla z25.s, z11.s, z1.s[3]\n"
"24:" // Height 2: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x10, x10, #0x1\n"
- "cmp x10, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x9, x9, #0x1\n"
+ "cmp x9, x19\n"
"bne 19b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x27, x11, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x26, x10, x19, LSL #2\n"
"tbz %x[flags], #1, 25f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z17.s }, p2/Z, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rw { z16.s }, p2/Z, [x20]\n"
- "fmin z24.s, p2/M, z24.s, z17.s\n"
- "fmin z25.s, p2/M, z25.s, z17.s\n"
- "fmax z24.s, p2/M, z24.s, z16.s\n"
- "fmax z25.s, p2/M, z25.s, z16.s\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z17.s }, p2/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rw { z16.s }, p2/Z, [x19]\n"
+ "fmin z24.s, p2/M, z24.s, z16.s\n"
+ "fmin z25.s, p2/M, z25.s, z16.s\n"
+ "fmax z24.s, p2/M, z24.s, z17.s\n"
+ "fmax z25.s, p2/M, z25.s, z17.s\n"
"25:" // Height 2: No activation
- "st1w { z24.s }, p1, [x11]\n"
- "addvl x11, x11, #1\n"
- "st1w { z25.s }, p1, [x27]\n"
+ "st1w { z24.s }, p1, [x10]\n"
+ "addvl x10, x10, #1\n"
+ "st1w { z25.s }, p1, [x26]\n"
"26:" // Height 2: Writeback done
"decw x13\n"
"cmp x13, XZR\n"
"bgt 15b\n"
"b 106f\n"
"27:" // Height 3
- "mov x14, %x[bias]\n"
"ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x11, %x[bias]\n"
"ldr x12, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x11, %x[output_ptr]\n"
+ "mov x10, %x[output_ptr]\n"
"28:" // Height 3: Column loop
- "mov x20, #0x0\n"
- "whilelt p1.s, x20, x13\n"
- "cbz x14, 29f\n"
- "ld1w { z24.s }, p2/Z, [x14]\n"
+ "mov x19, #0x0\n"
+ "whilelt p1.s, x19, x13\n"
+ "cbz x11, 29f\n"
+ "ld1w { z24.s }, p2/Z, [x11]\n"
"mov z25.d, z24.d\n"
+ "addvl x11, x11, #1\n"
"mov z26.d, z24.d\n"
- "addvl x14, x14, #1\n"
"b 31f\n"
"29:" // Height 3: no bias
"tbz %x[flags], #0, 30f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x27, x11, x20, LSL #2\n"
- "add x26, x27, x20, LSL #2\n"
- "ld1w { z24.s }, p1/Z, [x11]\n"
- "ld1w { z25.s }, p1/Z, [x27]\n"
- "ld1w { z26.s }, p1/Z, [x26]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ld1w { z24.s }, p1/Z, [x10]\n"
+ "add x26, x10, x19, LSL #2\n"
+ "ld1w { z25.s }, p1/Z, [x26]\n"
+ "add x25, x26, x19, LSL #2\n"
+ "ld1w { z26.s }, p1/Z, [x25]\n"
"b 31f\n"
"30:" // Height 3: no accumulate
"mov z24.b, #0x0\n"
"mov z25.b, #0x0\n"
"mov z26.b, #0x0\n"
"31:" // Height 3: setup done
- "mov x10, #0x0\n"
+ "mov x9, #0x0\n"
"32:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w9, [x20, x10, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w28, [x20, x9, LSL #0x2]\n"
"tbz %x[flags], #3, 33f\n"
- "ldr x21, [%x[input_ptr], x10, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x28, [x21, #0x0]\n"
- "ldr x27, [x21, #0x8]\n"
- "ldr x26, [x21, #0x10]\n"
- "cbnz x10, 34f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x28, x28, x20, LSL #2\n"
- "add x27, x27, x20, LSL #2\n"
- "add x26, x26, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x9, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x27, [x20, #0x0]\n"
+ "ldr x26, [x20, #0x8]\n"
+ "ldr x25, [x20, #0x10]\n"
+ "cbnz x9, 34f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x27, x27, x19, LSL #2\n"
+ "add x26, x26, x19, LSL #2\n"
+ "add x25, x25, x19, LSL #2\n"
"b 34f\n"
"33:" // Height 3: setup direct input
- "mov x28, %x[input_ptr]\n"
- "add x27, x28, x20, LSL #2\n"
- "add x26, x27, x20, LSL #2\n"
+ "mov x27, %x[input_ptr]\n"
+ "add x26, x27, x19, LSL #2\n"
+ "add x25, x26, x19, LSL #2\n"
"34:" // Height 3: input setup done
- "cmp x9, #0x4\n"
+ "cmp x28, #0x4\n"
"ble 36f\n"
"35:" // Height 3: Multiply loop: Main loop head
- "whilelt p0.s, XZR, x9\n"
- "ld1rqw { z0.s }, p0/Z, [x28]\n"
- "ld1rqw { z1.s }, p0/Z, [x27]\n"
- "sub x9, x9, #0x4\n"
- "ld1rqw { z2.s }, p0/Z, [x26]\n"
"ld1w { z8.s }, p2/Z, [x12]\n"
+ "whilelt p0.s, XZR, x28\n"
+ "ld1w { z9.s }, p2/Z, [x12, #1, MUL VL]\n"
+ "sub x28, x28, #0x4\n"
+ "ld1rqw { z0.s }, p0/Z, [x27]\n"
"fmla z24.s, z8.s, z0.s[0]\n"
+ "ld1rqw { z1.s }, p0/Z, [x26]\n"
+ "cmp x28, #0x4\n"
"fmla z25.s, z8.s, z1.s[0]\n"
- "fmla z26.s, z8.s, z2.s[0]\n"
- "ld1w { z9.s }, p2/Z, [x12, #1, MUL VL]\n"
+ "ld1rqw { z2.s }, p0/Z, [x25]\n"
+ "add x27, x27, #0x10\n"
"fmla z24.s, z9.s, z0.s[1]\n"
"ld1w { z10.s }, p2/Z, [x12, #2, MUL VL]\n"
- "fmla z25.s, z9.s, z1.s[1]\n"
- "fmla z26.s, z9.s, z2.s[1]\n"
+ "add x26, x26, #0x10\n"
+ "fmla z26.s, z8.s, z2.s[0]\n"
"ld1w { z11.s }, p2/Z, [x12, #3, MUL VL]\n"
- "cmp x9, #0x4\n"
+ "add x25, x25, #0x10\n"
+ "fmla z25.s, z9.s, z1.s[1]\n"
+ "addvl x12, x12, #4\n"
"fmla z24.s, z10.s, z0.s[2]\n"
+ "fmla z26.s, z9.s, z2.s[1]\n"
"fmla z25.s, z10.s, z1.s[2]\n"
- "add x28, x28, #0x10\n"
- "add x27, x27, #0x10\n"
- "fmla z26.s, z10.s, z2.s[2]\n"
"fmla z24.s, z11.s, z0.s[3]\n"
- "add x26, x26, #0x10\n"
- "addvl x12, x12, #4\n"
+ "fmla z26.s, z10.s, z2.s[2]\n"
"fmla z25.s, z11.s, z1.s[3]\n"
"fmla z26.s, z11.s, z2.s[3]\n"
"bgt 35b\n"
"36:" // Height 3: Multiply loop: Single iteration only
- "whilelt p0.s, XZR, x9\n"
- "ld1rqw { z0.s }, p0/Z, [x28]\n"
- "ld1rqw { z1.s }, p0/Z, [x27]\n"
- "subs x9, x9, #0x1\n"
- "ld1rqw { z2.s }, p0/Z, [x26]\n"
"ld1w { z8.s }, p2/Z, [x12]\n"
+ "whilelt p0.s, XZR, x28\n"
+ "subs x28, x28, #0x1\n"
+ "ld1rqw { z0.s }, p0/Z, [x27]\n"
"fmla z24.s, z8.s, z0.s[0]\n"
+ "ld1rqw { z1.s }, p0/Z, [x26]\n"
+ "addvl x12, x12, #1\n"
"fmla z25.s, z8.s, z1.s[0]\n"
+ "ld1rqw { z2.s }, p0/Z, [x25]\n"
"fmla z26.s, z8.s, z2.s[0]\n"
- "addvl x12, x12, #1\n"
"ble 37f\n"
"ld1w { z9.s }, p2/Z, [x12]\n"
- "subs x9, x9, #0x1\n"
"fmla z24.s, z9.s, z0.s[1]\n"
+ "subs x28, x28, #0x1\n"
"fmla z25.s, z9.s, z1.s[1]\n"
- "fmla z26.s, z9.s, z2.s[1]\n"
"addvl x12, x12, #1\n"
+ "fmla z26.s, z9.s, z2.s[1]\n"
"ble 37f\n"
"ld1w { z10.s }, p2/Z, [x12]\n"
- "subs x9, x9, #0x1\n"
"fmla z24.s, z10.s, z0.s[2]\n"
+ "subs x28, x28, #0x1\n"
"fmla z25.s, z10.s, z1.s[2]\n"
- "fmla z26.s, z10.s, z2.s[2]\n"
"addvl x12, x12, #1\n"
+ "fmla z26.s, z10.s, z2.s[2]\n"
"ble 37f\n"
"ld1w { z11.s }, p2/Z, [x12]\n"
"fmla z24.s, z11.s, z0.s[3]\n"
- "fmla z25.s, z11.s, z1.s[3]\n"
"addvl x12, x12, #1\n"
+ "fmla z25.s, z11.s, z1.s[3]\n"
"fmla z26.s, z11.s, z2.s[3]\n"
"37:" // Height 3: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x10, x10, #0x1\n"
- "cmp x10, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x9, x9, #0x1\n"
+ "cmp x9, x19\n"
"bne 32b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x27, x11, x20, LSL #2\n"
- "add x26, x27, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x26, x10, x19, LSL #2\n"
+ "add x25, x26, x19, LSL #2\n"
"tbz %x[flags], #1, 38f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z17.s }, p2/Z, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rw { z16.s }, p2/Z, [x20]\n"
- "fmin z24.s, p2/M, z24.s, z17.s\n"
- "fmin z25.s, p2/M, z25.s, z17.s\n"
- "fmin z26.s, p2/M, z26.s, z17.s\n"
- "fmax z24.s, p2/M, z24.s, z16.s\n"
- "fmax z25.s, p2/M, z25.s, z16.s\n"
- "fmax z26.s, p2/M, z26.s, z16.s\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z17.s }, p2/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rw { z16.s }, p2/Z, [x19]\n"
+ "fmin z24.s, p2/M, z24.s, z16.s\n"
+ "fmin z25.s, p2/M, z25.s, z16.s\n"
+ "fmin z26.s, p2/M, z26.s, z16.s\n"
+ "fmax z24.s, p2/M, z24.s, z17.s\n"
+ "fmax z25.s, p2/M, z25.s, z17.s\n"
+ "fmax z26.s, p2/M, z26.s, z17.s\n"
"38:" // Height 3: No activation
- "st1w { z24.s }, p1, [x11]\n"
- "addvl x11, x11, #1\n"
- "st1w { z25.s }, p1, [x27]\n"
- "st1w { z26.s }, p1, [x26]\n"
+ "st1w { z24.s }, p1, [x10]\n"
+ "addvl x10, x10, #1\n"
+ "st1w { z25.s }, p1, [x26]\n"
+ "st1w { z26.s }, p1, [x25]\n"
"39:" // Height 3: Writeback done
"decw x13\n"
"cmp x13, XZR\n"
"bgt 28b\n"
"b 106f\n"
"40:" // Height 4
- "mov x14, %x[bias]\n"
"ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x11, %x[bias]\n"
"ldr x12, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x11, %x[output_ptr]\n"
+ "mov x10, %x[output_ptr]\n"
"41:" // Height 4: Column loop
- "mov x20, #0x0\n"
- "whilelt p1.s, x20, x13\n"
- "cbz x14, 42f\n"
- "ld1w { z24.s }, p2/Z, [x14]\n"
+ "mov x19, #0x0\n"
+ "whilelt p1.s, x19, x13\n"
+ "cbz x11, 42f\n"
+ "ld1w { z24.s }, p2/Z, [x11]\n"
"mov z25.d, z24.d\n"
+ "addvl x11, x11, #1\n"
"mov z26.d, z24.d\n"
- "addvl x14, x14, #1\n"
"mov z27.d, z24.d\n"
"b 44f\n"
"42:" // Height 4: no bias
"tbz %x[flags], #0, 43f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x27, x11, x20, LSL #2\n"
- "add x26, x27, x20, LSL #2\n"
- "ld1w { z24.s }, p1/Z, [x11]\n"
- "add x25, x26, x20, LSL #2\n"
- "ld1w { z25.s }, p1/Z, [x27]\n"
- "ld1w { z26.s }, p1/Z, [x26]\n"
- "ld1w { z27.s }, p1/Z, [x25]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ld1w { z24.s }, p1/Z, [x10]\n"
+ "add x26, x10, x19, LSL #2\n"
+ "ld1w { z25.s }, p1/Z, [x26]\n"
+ "add x25, x26, x19, LSL #2\n"
+ "ld1w { z26.s }, p1/Z, [x25]\n"
+ "add x24, x25, x19, LSL #2\n"
+ "ld1w { z27.s }, p1/Z, [x24]\n"
"b 44f\n"
"43:" // Height 4: no accumulate
"mov z24.b, #0x0\n"
@@ -488,164 +488,164 @@ void sve_hybrid_fp32_mla_8x1VL (
"mov z26.b, #0x0\n"
"mov z27.b, #0x0\n"
"44:" // Height 4: setup done
- "mov x10, #0x0\n"
+ "mov x9, #0x0\n"
"45:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w9, [x20, x10, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w28, [x20, x9, LSL #0x2]\n"
"tbz %x[flags], #3, 46f\n"
- "ldr x21, [%x[input_ptr], x10, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x28, [x21, #0x0]\n"
- "ldr x27, [x21, #0x8]\n"
- "ldr x26, [x21, #0x10]\n"
- "ldr x25, [x21, #0x18]\n"
- "cbnz x10, 47f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x28, x28, x20, LSL #2\n"
- "add x27, x27, x20, LSL #2\n"
- "add x26, x26, x20, LSL #2\n"
- "add x25, x25, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x9, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x27, [x20, #0x0]\n"
+ "ldr x26, [x20, #0x8]\n"
+ "ldr x25, [x20, #0x10]\n"
+ "ldr x24, [x20, #0x18]\n"
+ "cbnz x9, 47f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x27, x27, x19, LSL #2\n"
+ "add x26, x26, x19, LSL #2\n"
+ "add x25, x25, x19, LSL #2\n"
+ "add x24, x24, x19, LSL #2\n"
"b 47f\n"
"46:" // Height 4: setup direct input
- "mov x28, %x[input_ptr]\n"
- "add x27, x28, x20, LSL #2\n"
- "add x26, x27, x20, LSL #2\n"
- "add x25, x26, x20, LSL #2\n"
+ "mov x27, %x[input_ptr]\n"
+ "add x26, x27, x19, LSL #2\n"
+ "add x25, x26, x19, LSL #2\n"
+ "add x24, x25, x19, LSL #2\n"
"47:" // Height 4: input setup done
- "cmp x9, #0x4\n"
+ "cmp x28, #0x4\n"
"ble 49f\n"
"48:" // Height 4: Multiply loop: Main loop head
- "whilelt p0.s, XZR, x9\n"
- "ld1rqw { z0.s }, p0/Z, [x28]\n"
- "ld1rqw { z1.s }, p0/Z, [x27]\n"
- "sub x9, x9, #0x4\n"
- "ld1rqw { z2.s }, p0/Z, [x26]\n"
- "ld1rqw { z3.s }, p0/Z, [x25]\n"
- "cmp x9, #0x4\n"
- "add x28, x28, #0x10\n"
"ld1w { z8.s }, p2/Z, [x12]\n"
+ "whilelt p0.s, XZR, x28\n"
+ "ld1w { z9.s }, p2/Z, [x12, #1, MUL VL]\n"
+ "sub x28, x28, #0x4\n"
+ "ld1rqw { z0.s }, p0/Z, [x27]\n"
"fmla z24.s, z8.s, z0.s[0]\n"
+ "ld1rqw { z1.s }, p0/Z, [x26]\n"
+ "cmp x28, #0x4\n"
"fmla z25.s, z8.s, z1.s[0]\n"
- "ld1w { z9.s }, p2/Z, [x12, #1, MUL VL]\n"
+ "ld1rqw { z2.s }, p0/Z, [x25]\n"
+ "add x27, x27, #0x10\n"
+ "fmla z24.s, z9.s, z0.s[1]\n"
+ "ld1rqw { z3.s }, p0/Z, [x24]\n"
+ "add x26, x26, #0x10\n"
"fmla z26.s, z8.s, z2.s[0]\n"
- "fmla z27.s, z8.s, z3.s[0]\n"
"ld1w { z10.s }, p2/Z, [x12, #2, MUL VL]\n"
+ "add x25, x25, #0x10\n"
+ "fmla z27.s, z8.s, z3.s[0]\n"
"ld1w { z11.s }, p2/Z, [x12, #3, MUL VL]\n"
- "fmla z24.s, z9.s, z0.s[1]\n"
+ "add x24, x24, #0x10\n"
"fmla z25.s, z9.s, z1.s[1]\n"
- "add x27, x27, #0x10\n"
- "add x26, x26, #0x10\n"
- "fmla z26.s, z9.s, z2.s[1]\n"
- "fmla z27.s, z9.s, z3.s[1]\n"
- "add x25, x25, #0x10\n"
"addvl x12, x12, #4\n"
"fmla z24.s, z10.s, z0.s[2]\n"
+ "fmla z26.s, z9.s, z2.s[1]\n"
+ "fmla z27.s, z9.s, z3.s[1]\n"
"fmla z25.s, z10.s, z1.s[2]\n"
+ "fmla z24.s, z11.s, z0.s[3]\n"
"fmla z26.s, z10.s, z2.s[2]\n"
"fmla z27.s, z10.s, z3.s[2]\n"
- "fmla z24.s, z11.s, z0.s[3]\n"
"fmla z25.s, z11.s, z1.s[3]\n"
"fmla z26.s, z11.s, z2.s[3]\n"
"fmla z27.s, z11.s, z3.s[3]\n"
"bgt 48b\n"
"49:" // Height 4: Multiply loop: Single iteration only
- "whilelt p0.s, XZR, x9\n"
- "ld1rqw { z0.s }, p0/Z, [x28]\n"
- "ld1rqw { z1.s }, p0/Z, [x27]\n"
- "subs x9, x9, #0x1\n"
- "ld1rqw { z2.s }, p0/Z, [x26]\n"
- "ld1rqw { z3.s }, p0/Z, [x25]\n"
"ld1w { z8.s }, p2/Z, [x12]\n"
+ "whilelt p0.s, XZR, x28\n"
+ "subs x28, x28, #0x1\n"
+ "ld1rqw { z0.s }, p0/Z, [x27]\n"
"fmla z24.s, z8.s, z0.s[0]\n"
- "fmla z25.s, z8.s, z1.s[0]\n"
+ "ld1rqw { z1.s }, p0/Z, [x26]\n"
"addvl x12, x12, #1\n"
+ "fmla z25.s, z8.s, z1.s[0]\n"
+ "ld1rqw { z2.s }, p0/Z, [x25]\n"
+ "ld1rqw { z3.s }, p0/Z, [x24]\n"
"fmla z26.s, z8.s, z2.s[0]\n"
"fmla z27.s, z8.s, z3.s[0]\n"
"ble 50f\n"
"ld1w { z9.s }, p2/Z, [x12]\n"
- "subs x9, x9, #0x1\n"
"fmla z24.s, z9.s, z0.s[1]\n"
+ "subs x28, x28, #0x1\n"
"fmla z25.s, z9.s, z1.s[1]\n"
+ "addvl x12, x12, #1\n"
"fmla z26.s, z9.s, z2.s[1]\n"
"fmla z27.s, z9.s, z3.s[1]\n"
- "addvl x12, x12, #1\n"
"ble 50f\n"
"ld1w { z10.s }, p2/Z, [x12]\n"
- "subs x9, x9, #0x1\n"
"fmla z24.s, z10.s, z0.s[2]\n"
+ "subs x28, x28, #0x1\n"
"fmla z25.s, z10.s, z1.s[2]\n"
+ "addvl x12, x12, #1\n"
"fmla z26.s, z10.s, z2.s[2]\n"
"fmla z27.s, z10.s, z3.s[2]\n"
- "addvl x12, x12, #1\n"
"ble 50f\n"
"ld1w { z11.s }, p2/Z, [x12]\n"
"fmla z24.s, z11.s, z0.s[3]\n"
- "fmla z25.s, z11.s, z1.s[3]\n"
"addvl x12, x12, #1\n"
+ "fmla z25.s, z11.s, z1.s[3]\n"
"fmla z26.s, z11.s, z2.s[3]\n"
"fmla z27.s, z11.s, z3.s[3]\n"
"50:" // Height 4: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x10, x10, #0x1\n"
- "cmp x10, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x9, x9, #0x1\n"
+ "cmp x9, x19\n"
"bne 45b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x27, x11, x20, LSL #2\n"
- "add x26, x27, x20, LSL #2\n"
- "add x25, x26, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x26, x10, x19, LSL #2\n"
+ "add x25, x26, x19, LSL #2\n"
+ "add x24, x25, x19, LSL #2\n"
"tbz %x[flags], #1, 51f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z17.s }, p2/Z, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rw { z16.s }, p2/Z, [x20]\n"
- "fmin z24.s, p2/M, z24.s, z17.s\n"
- "fmin z25.s, p2/M, z25.s, z17.s\n"
- "fmin z26.s, p2/M, z26.s, z17.s\n"
- "fmin z27.s, p2/M, z27.s, z17.s\n"
- "fmax z24.s, p2/M, z24.s, z16.s\n"
- "fmax z25.s, p2/M, z25.s, z16.s\n"
- "fmax z26.s, p2/M, z26.s, z16.s\n"
- "fmax z27.s, p2/M, z27.s, z16.s\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z17.s }, p2/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rw { z16.s }, p2/Z, [x19]\n"
+ "fmin z24.s, p2/M, z24.s, z16.s\n"
+ "fmin z25.s, p2/M, z25.s, z16.s\n"
+ "fmin z26.s, p2/M, z26.s, z16.s\n"
+ "fmin z27.s, p2/M, z27.s, z16.s\n"
+ "fmax z24.s, p2/M, z24.s, z17.s\n"
+ "fmax z25.s, p2/M, z25.s, z17.s\n"
+ "fmax z26.s, p2/M, z26.s, z17.s\n"
+ "fmax z27.s, p2/M, z27.s, z17.s\n"
"51:" // Height 4: No activation
- "st1w { z24.s }, p1, [x11]\n"
- "addvl x11, x11, #1\n"
- "st1w { z25.s }, p1, [x27]\n"
- "st1w { z26.s }, p1, [x26]\n"
- "st1w { z27.s }, p1, [x25]\n"
+ "st1w { z24.s }, p1, [x10]\n"
+ "addvl x10, x10, #1\n"
+ "st1w { z25.s }, p1, [x26]\n"
+ "st1w { z26.s }, p1, [x25]\n"
+ "st1w { z27.s }, p1, [x24]\n"
"52:" // Height 4: Writeback done
"decw x13\n"
"cmp x13, XZR\n"
"bgt 41b\n"
"b 106f\n"
"53:" // Height 5
- "mov x14, %x[bias]\n"
"ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x11, %x[bias]\n"
"ldr x12, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x11, %x[output_ptr]\n"
+ "mov x10, %x[output_ptr]\n"
"54:" // Height 5: Column loop
- "mov x20, #0x0\n"
- "whilelt p1.s, x20, x13\n"
- "cbz x14, 55f\n"
- "ld1w { z24.s }, p2/Z, [x14]\n"
+ "mov x19, #0x0\n"
+ "whilelt p1.s, x19, x13\n"
+ "cbz x11, 55f\n"
+ "ld1w { z24.s }, p2/Z, [x11]\n"
"mov z25.d, z24.d\n"
+ "addvl x11, x11, #1\n"
"mov z26.d, z24.d\n"
- "addvl x14, x14, #1\n"
"mov z27.d, z24.d\n"
"mov z28.d, z24.d\n"
"b 57f\n"
"55:" // Height 5: no bias
"tbz %x[flags], #0, 56f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x27, x11, x20, LSL #2\n"
- "add x26, x27, x20, LSL #2\n"
- "ld1w { z24.s }, p1/Z, [x11]\n"
- "add x25, x26, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "ld1w { z25.s }, p1/Z, [x27]\n"
- "ld1w { z26.s }, p1/Z, [x26]\n"
- "ld1w { z27.s }, p1/Z, [x25]\n"
- "ld1w { z28.s }, p1/Z, [x24]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ld1w { z24.s }, p1/Z, [x10]\n"
+ "add x26, x10, x19, LSL #2\n"
+ "ld1w { z25.s }, p1/Z, [x26]\n"
+ "add x25, x26, x19, LSL #2\n"
+ "ld1w { z26.s }, p1/Z, [x25]\n"
+ "add x24, x25, x19, LSL #2\n"
+ "ld1w { z27.s }, p1/Z, [x24]\n"
+ "add x23, x24, x19, LSL #2\n"
+ "ld1w { z28.s }, p1/Z, [x23]\n"
"b 57f\n"
"56:" // Height 5: no accumulate
"mov z24.b, #0x0\n"
@@ -654,185 +654,185 @@ void sve_hybrid_fp32_mla_8x1VL (
"mov z27.b, #0x0\n"
"mov z28.b, #0x0\n"
"57:" // Height 5: setup done
- "mov x10, #0x0\n"
+ "mov x9, #0x0\n"
"58:" // Height 5: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w9, [x20, x10, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w28, [x20, x9, LSL #0x2]\n"
"tbz %x[flags], #3, 59f\n"
- "ldr x21, [%x[input_ptr], x10, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x28, [x21, #0x0]\n"
- "ldr x27, [x21, #0x8]\n"
- "ldr x26, [x21, #0x10]\n"
- "ldr x25, [x21, #0x18]\n"
- "ldr x24, [x21, #0x20]\n"
- "cbnz x10, 60f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x28, x28, x20, LSL #2\n"
- "add x27, x27, x20, LSL #2\n"
- "add x26, x26, x20, LSL #2\n"
- "add x25, x25, x20, LSL #2\n"
- "add x24, x24, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x9, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x27, [x20, #0x0]\n"
+ "ldr x26, [x20, #0x8]\n"
+ "ldr x25, [x20, #0x10]\n"
+ "ldr x24, [x20, #0x18]\n"
+ "ldr x23, [x20, #0x20]\n"
+ "cbnz x9, 60f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x27, x27, x19, LSL #2\n"
+ "add x26, x26, x19, LSL #2\n"
+ "add x25, x25, x19, LSL #2\n"
+ "add x24, x24, x19, LSL #2\n"
+ "add x23, x23, x19, LSL #2\n"
"b 60f\n"
"59:" // Height 5: setup direct input
- "mov x28, %x[input_ptr]\n"
- "add x27, x28, x20, LSL #2\n"
- "add x26, x27, x20, LSL #2\n"
- "add x25, x26, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
+ "mov x27, %x[input_ptr]\n"
+ "add x26, x27, x19, LSL #2\n"
+ "add x25, x26, x19, LSL #2\n"
+ "add x24, x25, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
"60:" // Height 5: input setup done
- "cmp x9, #0x4\n"
+ "cmp x28, #0x4\n"
"ble 62f\n"
"61:" // Height 5: Multiply loop: Main loop head
- "whilelt p0.s, XZR, x9\n"
- "ld1rqw { z0.s }, p0/Z, [x28]\n"
- "ld1rqw { z1.s }, p0/Z, [x27]\n"
- "sub x9, x9, #0x4\n"
- "ld1rqw { z2.s }, p0/Z, [x26]\n"
- "ld1rqw { z3.s }, p0/Z, [x25]\n"
- "cmp x9, #0x4\n"
- "add x28, x28, #0x10\n"
- "ld1rqw { z4.s }, p0/Z, [x24]\n"
"ld1w { z8.s }, p2/Z, [x12]\n"
+ "whilelt p0.s, XZR, x28\n"
+ "ld1w { z9.s }, p2/Z, [x12, #1, MUL VL]\n"
+ "sub x28, x28, #0x4\n"
+ "ld1rqw { z0.s }, p0/Z, [x27]\n"
"fmla z24.s, z8.s, z0.s[0]\n"
+ "ld1rqw { z1.s }, p0/Z, [x26]\n"
+ "cmp x28, #0x4\n"
"fmla z25.s, z8.s, z1.s[0]\n"
- "fmla z26.s, z8.s, z2.s[0]\n"
- "fmla z27.s, z8.s, z3.s[0]\n"
- "ld1w { z9.s }, p2/Z, [x12, #1, MUL VL]\n"
- "ld1w { z10.s }, p2/Z, [x12, #2, MUL VL]\n"
- "fmla z28.s, z8.s, z4.s[0]\n"
- "fmla z24.s, z9.s, z0.s[1]\n"
- "ld1w { z11.s }, p2/Z, [x12, #3, MUL VL]\n"
+ "ld1rqw { z2.s }, p0/Z, [x25]\n"
"add x27, x27, #0x10\n"
- "fmla z25.s, z9.s, z1.s[1]\n"
- "fmla z26.s, z9.s, z2.s[1]\n"
+ "fmla z24.s, z9.s, z0.s[1]\n"
+ "ld1rqw { z3.s }, p0/Z, [x24]\n"
"add x26, x26, #0x10\n"
+ "fmla z26.s, z8.s, z2.s[0]\n"
+ "ld1rqw { z4.s }, p0/Z, [x23]\n"
"add x25, x25, #0x10\n"
- "fmla z27.s, z9.s, z3.s[1]\n"
- "fmla z28.s, z9.s, z4.s[1]\n"
+ "fmla z27.s, z8.s, z3.s[0]\n"
+ "ld1w { z10.s }, p2/Z, [x12, #2, MUL VL]\n"
"add x24, x24, #0x10\n"
+ "fmla z25.s, z9.s, z1.s[1]\n"
+ "ld1w { z11.s }, p2/Z, [x12, #3, MUL VL]\n"
+ "add x23, x23, #0x10\n"
+ "fmla z28.s, z8.s, z4.s[0]\n"
"addvl x12, x12, #4\n"
+ "fmla z26.s, z9.s, z2.s[1]\n"
"fmla z24.s, z10.s, z0.s[2]\n"
+ "fmla z27.s, z9.s, z3.s[1]\n"
"fmla z25.s, z10.s, z1.s[2]\n"
+ "fmla z28.s, z9.s, z4.s[1]\n"
"fmla z26.s, z10.s, z2.s[2]\n"
"fmla z27.s, z10.s, z3.s[2]\n"
- "fmla z28.s, z10.s, z4.s[2]\n"
"fmla z24.s, z11.s, z0.s[3]\n"
+ "fmla z28.s, z10.s, z4.s[2]\n"
"fmla z25.s, z11.s, z1.s[3]\n"
"fmla z26.s, z11.s, z2.s[3]\n"
"fmla z27.s, z11.s, z3.s[3]\n"
"fmla z28.s, z11.s, z4.s[3]\n"
"bgt 61b\n"
"62:" // Height 5: Multiply loop: Single iteration only
- "whilelt p0.s, XZR, x9\n"
- "ld1rqw { z0.s }, p0/Z, [x28]\n"
- "ld1rqw { z1.s }, p0/Z, [x27]\n"
- "subs x9, x9, #0x1\n"
- "ld1rqw { z2.s }, p0/Z, [x26]\n"
- "ld1rqw { z3.s }, p0/Z, [x25]\n"
- "ld1rqw { z4.s }, p0/Z, [x24]\n"
"ld1w { z8.s }, p2/Z, [x12]\n"
+ "whilelt p0.s, XZR, x28\n"
+ "subs x28, x28, #0x1\n"
+ "ld1rqw { z0.s }, p0/Z, [x27]\n"
"fmla z24.s, z8.s, z0.s[0]\n"
+ "ld1rqw { z1.s }, p0/Z, [x26]\n"
+ "addvl x12, x12, #1\n"
"fmla z25.s, z8.s, z1.s[0]\n"
+ "ld1rqw { z2.s }, p0/Z, [x25]\n"
+ "ld1rqw { z3.s }, p0/Z, [x24]\n"
"fmla z26.s, z8.s, z2.s[0]\n"
+ "ld1rqw { z4.s }, p0/Z, [x23]\n"
"fmla z27.s, z8.s, z3.s[0]\n"
- "addvl x12, x12, #1\n"
"fmla z28.s, z8.s, z4.s[0]\n"
"ble 63f\n"
"ld1w { z9.s }, p2/Z, [x12]\n"
- "subs x9, x9, #0x1\n"
"fmla z24.s, z9.s, z0.s[1]\n"
+ "subs x28, x28, #0x1\n"
"fmla z25.s, z9.s, z1.s[1]\n"
+ "addvl x12, x12, #1\n"
"fmla z26.s, z9.s, z2.s[1]\n"
"fmla z27.s, z9.s, z3.s[1]\n"
- "addvl x12, x12, #1\n"
"fmla z28.s, z9.s, z4.s[1]\n"
"ble 63f\n"
"ld1w { z10.s }, p2/Z, [x12]\n"
- "subs x9, x9, #0x1\n"
"fmla z24.s, z10.s, z0.s[2]\n"
+ "subs x28, x28, #0x1\n"
"fmla z25.s, z10.s, z1.s[2]\n"
+ "addvl x12, x12, #1\n"
"fmla z26.s, z10.s, z2.s[2]\n"
"fmla z27.s, z10.s, z3.s[2]\n"
- "addvl x12, x12, #1\n"
"fmla z28.s, z10.s, z4.s[2]\n"
"ble 63f\n"
"ld1w { z11.s }, p2/Z, [x12]\n"
"fmla z24.s, z11.s, z0.s[3]\n"
- "fmla z25.s, z11.s, z1.s[3]\n"
"addvl x12, x12, #1\n"
+ "fmla z25.s, z11.s, z1.s[3]\n"
"fmla z26.s, z11.s, z2.s[3]\n"
"fmla z27.s, z11.s, z3.s[3]\n"
"fmla z28.s, z11.s, z4.s[3]\n"
"63:" // Height 5: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x10, x10, #0x1\n"
- "cmp x10, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x9, x9, #0x1\n"
+ "cmp x9, x19\n"
"bne 58b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x27, x11, x20, LSL #2\n"
- "add x26, x27, x20, LSL #2\n"
- "add x25, x26, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x26, x10, x19, LSL #2\n"
+ "add x25, x26, x19, LSL #2\n"
+ "add x24, x25, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
"tbz %x[flags], #1, 64f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z17.s }, p2/Z, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rw { z16.s }, p2/Z, [x20]\n"
- "fmin z24.s, p2/M, z24.s, z17.s\n"
- "fmin z25.s, p2/M, z25.s, z17.s\n"
- "fmin z26.s, p2/M, z26.s, z17.s\n"
- "fmin z27.s, p2/M, z27.s, z17.s\n"
- "fmin z28.s, p2/M, z28.s, z17.s\n"
- "fmax z24.s, p2/M, z24.s, z16.s\n"
- "fmax z25.s, p2/M, z25.s, z16.s\n"
- "fmax z26.s, p2/M, z26.s, z16.s\n"
- "fmax z27.s, p2/M, z27.s, z16.s\n"
- "fmax z28.s, p2/M, z28.s, z16.s\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z17.s }, p2/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rw { z16.s }, p2/Z, [x19]\n"
+ "fmin z24.s, p2/M, z24.s, z16.s\n"
+ "fmin z25.s, p2/M, z25.s, z16.s\n"
+ "fmin z26.s, p2/M, z26.s, z16.s\n"
+ "fmin z27.s, p2/M, z27.s, z16.s\n"
+ "fmin z28.s, p2/M, z28.s, z16.s\n"
+ "fmax z24.s, p2/M, z24.s, z17.s\n"
+ "fmax z25.s, p2/M, z25.s, z17.s\n"
+ "fmax z26.s, p2/M, z26.s, z17.s\n"
+ "fmax z27.s, p2/M, z27.s, z17.s\n"
+ "fmax z28.s, p2/M, z28.s, z17.s\n"
"64:" // Height 5: No activation
- "st1w { z24.s }, p1, [x11]\n"
- "addvl x11, x11, #1\n"
- "st1w { z25.s }, p1, [x27]\n"
- "st1w { z26.s }, p1, [x26]\n"
- "st1w { z27.s }, p1, [x25]\n"
- "st1w { z28.s }, p1, [x24]\n"
+ "st1w { z24.s }, p1, [x10]\n"
+ "addvl x10, x10, #1\n"
+ "st1w { z25.s }, p1, [x26]\n"
+ "st1w { z26.s }, p1, [x25]\n"
+ "st1w { z27.s }, p1, [x24]\n"
+ "st1w { z28.s }, p1, [x23]\n"
"65:" // Height 5: Writeback done
"decw x13\n"
"cmp x13, XZR\n"
"bgt 54b\n"
"b 106f\n"
"66:" // Height 6
- "mov x14, %x[bias]\n"
"ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x11, %x[bias]\n"
"ldr x12, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x11, %x[output_ptr]\n"
+ "mov x10, %x[output_ptr]\n"
"67:" // Height 6: Column loop
- "mov x20, #0x0\n"
- "whilelt p1.s, x20, x13\n"
- "cbz x14, 68f\n"
- "ld1w { z24.s }, p2/Z, [x14]\n"
+ "mov x19, #0x0\n"
+ "whilelt p1.s, x19, x13\n"
+ "cbz x11, 68f\n"
+ "ld1w { z24.s }, p2/Z, [x11]\n"
"mov z25.d, z24.d\n"
+ "addvl x11, x11, #1\n"
"mov z26.d, z24.d\n"
- "addvl x14, x14, #1\n"
"mov z27.d, z24.d\n"
"mov z28.d, z24.d\n"
"mov z29.d, z24.d\n"
"b 70f\n"
"68:" // Height 6: no bias
"tbz %x[flags], #0, 69f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x27, x11, x20, LSL #2\n"
- "add x26, x27, x20, LSL #2\n"
- "ld1w { z24.s }, p1/Z, [x11]\n"
- "add x25, x26, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "ld1w { z25.s }, p1/Z, [x27]\n"
- "ld1w { z26.s }, p1/Z, [x26]\n"
- "add x23, x24, x20, LSL #2\n"
- "ld1w { z27.s }, p1/Z, [x25]\n"
- "ld1w { z28.s }, p1/Z, [x24]\n"
- "ld1w { z29.s }, p1/Z, [x23]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ld1w { z24.s }, p1/Z, [x10]\n"
+ "add x26, x10, x19, LSL #2\n"
+ "ld1w { z25.s }, p1/Z, [x26]\n"
+ "add x25, x26, x19, LSL #2\n"
+ "ld1w { z26.s }, p1/Z, [x25]\n"
+ "add x24, x25, x19, LSL #2\n"
+ "ld1w { z27.s }, p1/Z, [x24]\n"
+ "add x23, x24, x19, LSL #2\n"
+ "ld1w { z28.s }, p1/Z, [x23]\n"
+ "add x22, x23, x19, LSL #2\n"
+ "ld1w { z29.s }, p1/Z, [x22]\n"
"b 70f\n"
"69:" // Height 6: no accumulate
"mov z24.b, #0x0\n"
@@ -842,73 +842,73 @@ void sve_hybrid_fp32_mla_8x1VL (
"mov z28.b, #0x0\n"
"mov z29.b, #0x0\n"
"70:" // Height 6: setup done
- "mov x10, #0x0\n"
+ "mov x9, #0x0\n"
"71:" // Height 6: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w9, [x20, x10, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w28, [x20, x9, LSL #0x2]\n"
"tbz %x[flags], #3, 72f\n"
- "ldr x21, [%x[input_ptr], x10, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x28, [x21, #0x0]\n"
- "ldr x27, [x21, #0x8]\n"
- "ldr x26, [x21, #0x10]\n"
- "ldr x25, [x21, #0x18]\n"
- "ldr x24, [x21, #0x20]\n"
- "ldr x23, [x21, #0x28]\n"
- "cbnz x10, 73f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x28, x28, x20, LSL #2\n"
- "add x27, x27, x20, LSL #2\n"
- "add x26, x26, x20, LSL #2\n"
- "add x25, x25, x20, LSL #2\n"
- "add x24, x24, x20, LSL #2\n"
- "add x23, x23, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x9, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x27, [x20, #0x0]\n"
+ "ldr x26, [x20, #0x8]\n"
+ "ldr x25, [x20, #0x10]\n"
+ "ldr x24, [x20, #0x18]\n"
+ "ldr x23, [x20, #0x20]\n"
+ "ldr x22, [x20, #0x28]\n"
+ "cbnz x9, 73f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x27, x27, x19, LSL #2\n"
+ "add x26, x26, x19, LSL #2\n"
+ "add x25, x25, x19, LSL #2\n"
+ "add x24, x24, x19, LSL #2\n"
+ "add x23, x23, x19, LSL #2\n"
+ "add x22, x22, x19, LSL #2\n"
"b 73f\n"
"72:" // Height 6: setup direct input
- "mov x28, %x[input_ptr]\n"
- "add x27, x28, x20, LSL #2\n"
- "add x26, x27, x20, LSL #2\n"
- "add x25, x26, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
+ "mov x27, %x[input_ptr]\n"
+ "add x26, x27, x19, LSL #2\n"
+ "add x25, x26, x19, LSL #2\n"
+ "add x24, x25, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
"73:" // Height 6: input setup done
- "cmp x9, #0x4\n"
+ "cmp x28, #0x4\n"
"ble 75f\n"
"74:" // Height 6: Multiply loop: Main loop head
- "whilelt p0.s, XZR, x9\n"
- "ld1rqw { z0.s }, p0/Z, [x28]\n"
- "ld1rqw { z1.s }, p0/Z, [x27]\n"
- "sub x9, x9, #0x4\n"
- "ld1rqw { z2.s }, p0/Z, [x26]\n"
- "ld1rqw { z3.s }, p0/Z, [x25]\n"
- "cmp x9, #0x4\n"
- "add x28, x28, #0x10\n"
- "ld1rqw { z4.s }, p0/Z, [x24]\n"
- "ld1rqw { z5.s }, p0/Z, [x23]\n"
- "add x27, x27, #0x10\n"
- "add x26, x26, #0x10\n"
"ld1w { z8.s }, p2/Z, [x12]\n"
+ "whilelt p0.s, XZR, x28\n"
+ "ld1w { z9.s }, p2/Z, [x12, #1, MUL VL]\n"
+ "sub x28, x28, #0x4\n"
+ "ld1rqw { z0.s }, p0/Z, [x27]\n"
"fmla z24.s, z8.s, z0.s[0]\n"
+ "ld1rqw { z1.s }, p0/Z, [x26]\n"
+ "cmp x28, #0x4\n"
"fmla z25.s, z8.s, z1.s[0]\n"
- "ld1w { z9.s }, p2/Z, [x12, #1, MUL VL]\n"
+ "ld1rqw { z2.s }, p0/Z, [x25]\n"
+ "add x27, x27, #0x10\n"
+ "fmla z24.s, z9.s, z0.s[1]\n"
+ "ld1rqw { z3.s }, p0/Z, [x24]\n"
+ "add x26, x26, #0x10\n"
"fmla z26.s, z8.s, z2.s[0]\n"
- "fmla z27.s, z8.s, z3.s[0]\n"
- "ld1w { z10.s }, p2/Z, [x12, #2, MUL VL]\n"
- "ld1w { z11.s }, p2/Z, [x12, #3, MUL VL]\n"
- "fmla z28.s, z8.s, z4.s[0]\n"
- "fmla z29.s, z8.s, z5.s[0]\n"
+ "ld1rqw { z4.s }, p0/Z, [x23]\n"
"add x25, x25, #0x10\n"
+ "fmla z27.s, z8.s, z3.s[0]\n"
+ "ld1rqw { z5.s }, p0/Z, [x22]\n"
"add x24, x24, #0x10\n"
- "fmla z24.s, z9.s, z0.s[1]\n"
"fmla z25.s, z9.s, z1.s[1]\n"
+ "ld1w { z10.s }, p2/Z, [x12, #2, MUL VL]\n"
"add x23, x23, #0x10\n"
+ "fmla z28.s, z8.s, z4.s[0]\n"
+ "ld1w { z11.s }, p2/Z, [x12, #3, MUL VL]\n"
+ "add x22, x22, #0x10\n"
+ "fmla z29.s, z8.s, z5.s[0]\n"
"addvl x12, x12, #4\n"
"fmla z26.s, z9.s, z2.s[1]\n"
"fmla z27.s, z9.s, z3.s[1]\n"
+ "fmla z24.s, z10.s, z0.s[2]\n"
"fmla z28.s, z9.s, z4.s[1]\n"
"fmla z29.s, z9.s, z5.s[1]\n"
- "fmla z24.s, z10.s, z0.s[2]\n"
"fmla z25.s, z10.s, z1.s[2]\n"
"fmla z26.s, z10.s, z2.s[2]\n"
"fmla z27.s, z10.s, z3.s[2]\n"
@@ -922,105 +922,105 @@ void sve_hybrid_fp32_mla_8x1VL (
"fmla z29.s, z11.s, z5.s[3]\n"
"bgt 74b\n"
"75:" // Height 6: Multiply loop: Single iteration only
- "whilelt p0.s, XZR, x9\n"
- "ld1rqw { z0.s }, p0/Z, [x28]\n"
- "ld1rqw { z1.s }, p0/Z, [x27]\n"
- "subs x9, x9, #0x1\n"
- "ld1rqw { z2.s }, p0/Z, [x26]\n"
- "ld1rqw { z3.s }, p0/Z, [x25]\n"
- "ld1rqw { z4.s }, p0/Z, [x24]\n"
- "ld1rqw { z5.s }, p0/Z, [x23]\n"
"ld1w { z8.s }, p2/Z, [x12]\n"
+ "whilelt p0.s, XZR, x28\n"
+ "subs x28, x28, #0x1\n"
+ "ld1rqw { z0.s }, p0/Z, [x27]\n"
"fmla z24.s, z8.s, z0.s[0]\n"
- "fmla z25.s, z8.s, z1.s[0]\n"
+ "ld1rqw { z1.s }, p0/Z, [x26]\n"
"addvl x12, x12, #1\n"
+ "fmla z25.s, z8.s, z1.s[0]\n"
+ "ld1rqw { z2.s }, p0/Z, [x25]\n"
+ "ld1rqw { z3.s }, p0/Z, [x24]\n"
"fmla z26.s, z8.s, z2.s[0]\n"
+ "ld1rqw { z4.s }, p0/Z, [x23]\n"
"fmla z27.s, z8.s, z3.s[0]\n"
+ "ld1rqw { z5.s }, p0/Z, [x22]\n"
"fmla z28.s, z8.s, z4.s[0]\n"
"fmla z29.s, z8.s, z5.s[0]\n"
"ble 76f\n"
"ld1w { z9.s }, p2/Z, [x12]\n"
- "subs x9, x9, #0x1\n"
"fmla z24.s, z9.s, z0.s[1]\n"
+ "subs x28, x28, #0x1\n"
"fmla z25.s, z9.s, z1.s[1]\n"
+ "addvl x12, x12, #1\n"
"fmla z26.s, z9.s, z2.s[1]\n"
"fmla z27.s, z9.s, z3.s[1]\n"
- "addvl x12, x12, #1\n"
"fmla z28.s, z9.s, z4.s[1]\n"
"fmla z29.s, z9.s, z5.s[1]\n"
"ble 76f\n"
"ld1w { z10.s }, p2/Z, [x12]\n"
- "subs x9, x9, #0x1\n"
"fmla z24.s, z10.s, z0.s[2]\n"
+ "subs x28, x28, #0x1\n"
"fmla z25.s, z10.s, z1.s[2]\n"
+ "addvl x12, x12, #1\n"
"fmla z26.s, z10.s, z2.s[2]\n"
"fmla z27.s, z10.s, z3.s[2]\n"
- "addvl x12, x12, #1\n"
"fmla z28.s, z10.s, z4.s[2]\n"
"fmla z29.s, z10.s, z5.s[2]\n"
"ble 76f\n"
"ld1w { z11.s }, p2/Z, [x12]\n"
"fmla z24.s, z11.s, z0.s[3]\n"
- "fmla z25.s, z11.s, z1.s[3]\n"
"addvl x12, x12, #1\n"
+ "fmla z25.s, z11.s, z1.s[3]\n"
"fmla z26.s, z11.s, z2.s[3]\n"
"fmla z27.s, z11.s, z3.s[3]\n"
"fmla z28.s, z11.s, z4.s[3]\n"
"fmla z29.s, z11.s, z5.s[3]\n"
"76:" // Height 6: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x10, x10, #0x1\n"
- "cmp x10, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x9, x9, #0x1\n"
+ "cmp x9, x19\n"
"bne 71b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x27, x11, x20, LSL #2\n"
- "add x26, x27, x20, LSL #2\n"
- "add x25, x26, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x26, x10, x19, LSL #2\n"
+ "add x25, x26, x19, LSL #2\n"
+ "add x24, x25, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
"tbz %x[flags], #1, 77f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z17.s }, p2/Z, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rw { z16.s }, p2/Z, [x20]\n"
- "fmin z24.s, p2/M, z24.s, z17.s\n"
- "fmin z25.s, p2/M, z25.s, z17.s\n"
- "fmin z26.s, p2/M, z26.s, z17.s\n"
- "fmin z27.s, p2/M, z27.s, z17.s\n"
- "fmin z28.s, p2/M, z28.s, z17.s\n"
- "fmin z29.s, p2/M, z29.s, z17.s\n"
- "fmax z24.s, p2/M, z24.s, z16.s\n"
- "fmax z25.s, p2/M, z25.s, z16.s\n"
- "fmax z26.s, p2/M, z26.s, z16.s\n"
- "fmax z27.s, p2/M, z27.s, z16.s\n"
- "fmax z28.s, p2/M, z28.s, z16.s\n"
- "fmax z29.s, p2/M, z29.s, z16.s\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z17.s }, p2/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rw { z16.s }, p2/Z, [x19]\n"
+ "fmin z24.s, p2/M, z24.s, z16.s\n"
+ "fmin z25.s, p2/M, z25.s, z16.s\n"
+ "fmin z26.s, p2/M, z26.s, z16.s\n"
+ "fmin z27.s, p2/M, z27.s, z16.s\n"
+ "fmin z28.s, p2/M, z28.s, z16.s\n"
+ "fmax z24.s, p2/M, z24.s, z17.s\n"
+ "fmax z25.s, p2/M, z25.s, z17.s\n"
+ "fmax z26.s, p2/M, z26.s, z17.s\n"
+ "fmax z27.s, p2/M, z27.s, z17.s\n"
+ "fmax z28.s, p2/M, z28.s, z17.s\n"
+ "fmin z29.s, p2/M, z29.s, z16.s\n"
+ "fmax z29.s, p2/M, z29.s, z17.s\n"
"77:" // Height 6: No activation
- "st1w { z24.s }, p1, [x11]\n"
- "addvl x11, x11, #1\n"
- "st1w { z25.s }, p1, [x27]\n"
- "st1w { z26.s }, p1, [x26]\n"
- "st1w { z27.s }, p1, [x25]\n"
- "st1w { z28.s }, p1, [x24]\n"
- "st1w { z29.s }, p1, [x23]\n"
+ "st1w { z24.s }, p1, [x10]\n"
+ "addvl x10, x10, #1\n"
+ "st1w { z25.s }, p1, [x26]\n"
+ "st1w { z26.s }, p1, [x25]\n"
+ "st1w { z27.s }, p1, [x24]\n"
+ "st1w { z28.s }, p1, [x23]\n"
+ "st1w { z29.s }, p1, [x22]\n"
"78:" // Height 6: Writeback done
"decw x13\n"
"cmp x13, XZR\n"
"bgt 67b\n"
"b 106f\n"
"79:" // Height 7
- "mov x14, %x[bias]\n"
"ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x11, %x[bias]\n"
"ldr x12, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x11, %x[output_ptr]\n"
+ "mov x10, %x[output_ptr]\n"
"80:" // Height 7: Column loop
- "mov x20, #0x0\n"
- "whilelt p1.s, x20, x13\n"
- "cbz x14, 81f\n"
- "ld1w { z24.s }, p2/Z, [x14]\n"
+ "mov x19, #0x0\n"
+ "whilelt p1.s, x19, x13\n"
+ "cbz x11, 81f\n"
+ "ld1w { z24.s }, p2/Z, [x11]\n"
"mov z25.d, z24.d\n"
+ "addvl x11, x11, #1\n"
"mov z26.d, z24.d\n"
- "addvl x14, x14, #1\n"
"mov z27.d, z24.d\n"
"mov z28.d, z24.d\n"
"mov z29.d, z24.d\n"
@@ -1028,20 +1028,20 @@ void sve_hybrid_fp32_mla_8x1VL (
"b 83f\n"
"81:" // Height 7: no bias
"tbz %x[flags], #0, 82f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x27, x11, x20, LSL #2\n"
- "add x26, x27, x20, LSL #2\n"
- "ld1w { z24.s }, p1/Z, [x11]\n"
- "add x25, x26, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "ld1w { z25.s }, p1/Z, [x27]\n"
- "ld1w { z26.s }, p1/Z, [x26]\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
- "ld1w { z27.s }, p1/Z, [x25]\n"
- "ld1w { z28.s }, p1/Z, [x24]\n"
- "ld1w { z29.s }, p1/Z, [x23]\n"
- "ld1w { z30.s }, p1/Z, [x22]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ld1w { z24.s }, p1/Z, [x10]\n"
+ "add x26, x10, x19, LSL #2\n"
+ "ld1w { z25.s }, p1/Z, [x26]\n"
+ "add x25, x26, x19, LSL #2\n"
+ "ld1w { z26.s }, p1/Z, [x25]\n"
+ "add x24, x25, x19, LSL #2\n"
+ "ld1w { z27.s }, p1/Z, [x24]\n"
+ "add x23, x24, x19, LSL #2\n"
+ "ld1w { z28.s }, p1/Z, [x23]\n"
+ "add x22, x23, x19, LSL #2\n"
+ "ld1w { z29.s }, p1/Z, [x22]\n"
+ "add x21, x22, x19, LSL #2\n"
+ "ld1w { z30.s }, p1/Z, [x21]\n"
"b 83f\n"
"82:" // Height 7: no accumulate
"mov z24.b, #0x0\n"
@@ -1052,75 +1052,75 @@ void sve_hybrid_fp32_mla_8x1VL (
"mov z29.b, #0x0\n"
"mov z30.b, #0x0\n"
"83:" // Height 7: setup done
- "mov x10, #0x0\n"
+ "mov x9, #0x0\n"
"84:" // Height 7: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w9, [x20, x10, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w28, [x20, x9, LSL #0x2]\n"
"tbz %x[flags], #3, 85f\n"
- "ldr x21, [%x[input_ptr], x10, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x28, [x21, #0x0]\n"
- "ldr x27, [x21, #0x8]\n"
- "ldr x26, [x21, #0x10]\n"
- "ldr x25, [x21, #0x18]\n"
- "ldr x24, [x21, #0x20]\n"
- "ldr x23, [x21, #0x28]\n"
- "ldr x22, [x21, #0x30]\n"
- "cbnz x10, 86f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x28, x28, x20, LSL #2\n"
- "add x27, x27, x20, LSL #2\n"
- "add x26, x26, x20, LSL #2\n"
- "add x25, x25, x20, LSL #2\n"
- "add x24, x24, x20, LSL #2\n"
- "add x23, x23, x20, LSL #2\n"
- "add x22, x22, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x9, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x27, [x20, #0x0]\n"
+ "ldr x26, [x20, #0x8]\n"
+ "ldr x25, [x20, #0x10]\n"
+ "ldr x24, [x20, #0x18]\n"
+ "ldr x23, [x20, #0x20]\n"
+ "ldr x22, [x20, #0x28]\n"
+ "ldr x21, [x20, #0x30]\n"
+ "cbnz x9, 86f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x27, x27, x19, LSL #2\n"
+ "add x26, x26, x19, LSL #2\n"
+ "add x25, x25, x19, LSL #2\n"
+ "add x24, x24, x19, LSL #2\n"
+ "add x23, x23, x19, LSL #2\n"
+ "add x22, x22, x19, LSL #2\n"
+ "add x21, x21, x19, LSL #2\n"
"b 86f\n"
"85:" // Height 7: setup direct input
- "mov x28, %x[input_ptr]\n"
- "add x27, x28, x20, LSL #2\n"
- "add x26, x27, x20, LSL #2\n"
- "add x25, x26, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
+ "mov x27, %x[input_ptr]\n"
+ "add x26, x27, x19, LSL #2\n"
+ "add x25, x26, x19, LSL #2\n"
+ "add x24, x25, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
"86:" // Height 7: input setup done
- "cmp x9, #0x4\n"
+ "cmp x28, #0x4\n"
"ble 88f\n"
"87:" // Height 7: Multiply loop: Main loop head
- "whilelt p0.s, XZR, x9\n"
- "ld1rqw { z0.s }, p0/Z, [x28]\n"
- "ld1rqw { z1.s }, p0/Z, [x27]\n"
- "sub x9, x9, #0x4\n"
- "ld1rqw { z2.s }, p0/Z, [x26]\n"
- "ld1rqw { z3.s }, p0/Z, [x25]\n"
- "cmp x9, #0x4\n"
- "add x28, x28, #0x10\n"
- "ld1rqw { z4.s }, p0/Z, [x24]\n"
- "ld1rqw { z5.s }, p0/Z, [x23]\n"
- "add x27, x27, #0x10\n"
- "add x26, x26, #0x10\n"
- "ld1rqw { z6.s }, p0/Z, [x22]\n"
"ld1w { z8.s }, p2/Z, [x12]\n"
+ "whilelt p0.s, XZR, x28\n"
+ "ld1w { z9.s }, p2/Z, [x12, #1, MUL VL]\n"
+ "sub x28, x28, #0x4\n"
+ "ld1rqw { z0.s }, p0/Z, [x27]\n"
"fmla z24.s, z8.s, z0.s[0]\n"
+ "ld1rqw { z1.s }, p0/Z, [x26]\n"
+ "cmp x28, #0x4\n"
"fmla z25.s, z8.s, z1.s[0]\n"
+ "ld1rqw { z2.s }, p0/Z, [x25]\n"
+ "add x27, x27, #0x10\n"
+ "fmla z24.s, z9.s, z0.s[1]\n"
+ "ld1rqw { z3.s }, p0/Z, [x24]\n"
+ "add x26, x26, #0x10\n"
"fmla z26.s, z8.s, z2.s[0]\n"
+ "ld1rqw { z4.s }, p0/Z, [x23]\n"
+ "add x25, x25, #0x10\n"
"fmla z27.s, z8.s, z3.s[0]\n"
- "ld1w { z9.s }, p2/Z, [x12, #1, MUL VL]\n"
- "ld1w { z10.s }, p2/Z, [x12, #2, MUL VL]\n"
+ "ld1rqw { z5.s }, p0/Z, [x22]\n"
+ "add x24, x24, #0x10\n"
+ "fmla z25.s, z9.s, z1.s[1]\n"
+ "ld1rqw { z6.s }, p0/Z, [x21]\n"
+ "add x23, x23, #0x10\n"
"fmla z28.s, z8.s, z4.s[0]\n"
+ "ld1w { z10.s }, p2/Z, [x12, #2, MUL VL]\n"
+ "add x22, x22, #0x10\n"
"fmla z29.s, z8.s, z5.s[0]\n"
"ld1w { z11.s }, p2/Z, [x12, #3, MUL VL]\n"
- "add x25, x25, #0x10\n"
+ "add x21, x21, #0x10\n"
"fmla z30.s, z8.s, z6.s[0]\n"
- "fmla z24.s, z9.s, z0.s[1]\n"
- "add x24, x24, #0x10\n"
- "add x23, x23, #0x10\n"
- "fmla z25.s, z9.s, z1.s[1]\n"
- "fmla z26.s, z9.s, z2.s[1]\n"
- "add x22, x22, #0x10\n"
"addvl x12, x12, #4\n"
+ "fmla z26.s, z9.s, z2.s[1]\n"
"fmla z27.s, z9.s, z3.s[1]\n"
"fmla z28.s, z9.s, z4.s[1]\n"
"fmla z29.s, z9.s, z5.s[1]\n"
@@ -1141,117 +1141,117 @@ void sve_hybrid_fp32_mla_8x1VL (
"fmla z30.s, z11.s, z6.s[3]\n"
"bgt 87b\n"
"88:" // Height 7: Multiply loop: Single iteration only
- "whilelt p0.s, XZR, x9\n"
- "ld1rqw { z0.s }, p0/Z, [x28]\n"
- "ld1rqw { z1.s }, p0/Z, [x27]\n"
- "subs x9, x9, #0x1\n"
- "ld1rqw { z2.s }, p0/Z, [x26]\n"
- "ld1rqw { z3.s }, p0/Z, [x25]\n"
- "ld1rqw { z4.s }, p0/Z, [x24]\n"
- "ld1rqw { z5.s }, p0/Z, [x23]\n"
- "ld1rqw { z6.s }, p0/Z, [x22]\n"
"ld1w { z8.s }, p2/Z, [x12]\n"
+ "whilelt p0.s, XZR, x28\n"
+ "subs x28, x28, #0x1\n"
+ "ld1rqw { z0.s }, p0/Z, [x27]\n"
"fmla z24.s, z8.s, z0.s[0]\n"
+ "ld1rqw { z1.s }, p0/Z, [x26]\n"
+ "addvl x12, x12, #1\n"
"fmla z25.s, z8.s, z1.s[0]\n"
+ "ld1rqw { z2.s }, p0/Z, [x25]\n"
+ "ld1rqw { z3.s }, p0/Z, [x24]\n"
"fmla z26.s, z8.s, z2.s[0]\n"
+ "ld1rqw { z4.s }, p0/Z, [x23]\n"
"fmla z27.s, z8.s, z3.s[0]\n"
- "addvl x12, x12, #1\n"
+ "ld1rqw { z5.s }, p0/Z, [x22]\n"
+ "ld1rqw { z6.s }, p0/Z, [x21]\n"
"fmla z28.s, z8.s, z4.s[0]\n"
"fmla z29.s, z8.s, z5.s[0]\n"
"fmla z30.s, z8.s, z6.s[0]\n"
"ble 89f\n"
"ld1w { z9.s }, p2/Z, [x12]\n"
- "subs x9, x9, #0x1\n"
"fmla z24.s, z9.s, z0.s[1]\n"
+ "subs x28, x28, #0x1\n"
"fmla z25.s, z9.s, z1.s[1]\n"
+ "addvl x12, x12, #1\n"
"fmla z26.s, z9.s, z2.s[1]\n"
"fmla z27.s, z9.s, z3.s[1]\n"
- "addvl x12, x12, #1\n"
"fmla z28.s, z9.s, z4.s[1]\n"
"fmla z29.s, z9.s, z5.s[1]\n"
"fmla z30.s, z9.s, z6.s[1]\n"
"ble 89f\n"
"ld1w { z10.s }, p2/Z, [x12]\n"
- "subs x9, x9, #0x1\n"
"fmla z24.s, z10.s, z0.s[2]\n"
+ "subs x28, x28, #0x1\n"
"fmla z25.s, z10.s, z1.s[2]\n"
+ "addvl x12, x12, #1\n"
"fmla z26.s, z10.s, z2.s[2]\n"
"fmla z27.s, z10.s, z3.s[2]\n"
- "addvl x12, x12, #1\n"
"fmla z28.s, z10.s, z4.s[2]\n"
"fmla z29.s, z10.s, z5.s[2]\n"
"fmla z30.s, z10.s, z6.s[2]\n"
"ble 89f\n"
"ld1w { z11.s }, p2/Z, [x12]\n"
"fmla z24.s, z11.s, z0.s[3]\n"
- "fmla z25.s, z11.s, z1.s[3]\n"
"addvl x12, x12, #1\n"
+ "fmla z25.s, z11.s, z1.s[3]\n"
"fmla z26.s, z11.s, z2.s[3]\n"
"fmla z27.s, z11.s, z3.s[3]\n"
"fmla z28.s, z11.s, z4.s[3]\n"
"fmla z29.s, z11.s, z5.s[3]\n"
"fmla z30.s, z11.s, z6.s[3]\n"
"89:" // Height 7: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x10, x10, #0x1\n"
- "cmp x10, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x9, x9, #0x1\n"
+ "cmp x9, x19\n"
"bne 84b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x27, x11, x20, LSL #2\n"
- "add x26, x27, x20, LSL #2\n"
- "add x25, x26, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x26, x10, x19, LSL #2\n"
+ "add x25, x26, x19, LSL #2\n"
+ "add x24, x25, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
"tbz %x[flags], #1, 90f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z17.s }, p2/Z, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rw { z16.s }, p2/Z, [x20]\n"
- "fmin z24.s, p2/M, z24.s, z17.s\n"
- "fmin z25.s, p2/M, z25.s, z17.s\n"
- "fmin z26.s, p2/M, z26.s, z17.s\n"
- "fmin z27.s, p2/M, z27.s, z17.s\n"
- "fmin z28.s, p2/M, z28.s, z17.s\n"
- "fmin z29.s, p2/M, z29.s, z17.s\n"
- "fmin z30.s, p2/M, z30.s, z17.s\n"
- "fmax z24.s, p2/M, z24.s, z16.s\n"
- "fmax z25.s, p2/M, z25.s, z16.s\n"
- "fmax z26.s, p2/M, z26.s, z16.s\n"
- "fmax z27.s, p2/M, z27.s, z16.s\n"
- "fmax z28.s, p2/M, z28.s, z16.s\n"
- "fmax z29.s, p2/M, z29.s, z16.s\n"
- "fmax z30.s, p2/M, z30.s, z16.s\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z17.s }, p2/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rw { z16.s }, p2/Z, [x19]\n"
+ "fmin z24.s, p2/M, z24.s, z16.s\n"
+ "fmin z25.s, p2/M, z25.s, z16.s\n"
+ "fmin z26.s, p2/M, z26.s, z16.s\n"
+ "fmin z27.s, p2/M, z27.s, z16.s\n"
+ "fmin z28.s, p2/M, z28.s, z16.s\n"
+ "fmax z24.s, p2/M, z24.s, z17.s\n"
+ "fmax z25.s, p2/M, z25.s, z17.s\n"
+ "fmax z26.s, p2/M, z26.s, z17.s\n"
+ "fmax z27.s, p2/M, z27.s, z17.s\n"
+ "fmax z28.s, p2/M, z28.s, z17.s\n"
+ "fmin z29.s, p2/M, z29.s, z16.s\n"
+ "fmin z30.s, p2/M, z30.s, z16.s\n"
+ "fmax z29.s, p2/M, z29.s, z17.s\n"
+ "fmax z30.s, p2/M, z30.s, z17.s\n"
"90:" // Height 7: No activation
- "st1w { z24.s }, p1, [x11]\n"
- "addvl x11, x11, #1\n"
- "st1w { z25.s }, p1, [x27]\n"
- "st1w { z26.s }, p1, [x26]\n"
- "st1w { z27.s }, p1, [x25]\n"
- "st1w { z28.s }, p1, [x24]\n"
- "st1w { z29.s }, p1, [x23]\n"
- "st1w { z30.s }, p1, [x22]\n"
+ "st1w { z24.s }, p1, [x10]\n"
+ "addvl x10, x10, #1\n"
+ "st1w { z25.s }, p1, [x26]\n"
+ "st1w { z26.s }, p1, [x25]\n"
+ "st1w { z27.s }, p1, [x24]\n"
+ "st1w { z28.s }, p1, [x23]\n"
+ "st1w { z29.s }, p1, [x22]\n"
+ "st1w { z30.s }, p1, [x21]\n"
"91:" // Height 7: Writeback done
"decw x13\n"
"cmp x13, XZR\n"
"bgt 80b\n"
"b 106f\n"
"92:" // Height 8
- "ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "mov x20, #0x20\n"
- "mov x14, %x[bias]\n"
"ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x11, %x[bias]\n"
"ldr x12, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x11, %x[output_ptr]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
+ "mov x10, %x[output_ptr]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "mov x19, #0x20\n"
+ "madd %x[output_ptr], x20, x19, %x[output_ptr]\n"
"93:" // Height 8: Column loop
- "mov x20, #0x0\n"
- "whilelt p1.s, x20, x13\n"
- "cbz x14, 94f\n"
- "ld1w { z24.s }, p2/Z, [x14]\n"
+ "mov x19, #0x0\n"
+ "whilelt p1.s, x19, x13\n"
+ "cbz x11, 94f\n"
+ "ld1w { z24.s }, p2/Z, [x11]\n"
"mov z25.d, z24.d\n"
+ "addvl x11, x11, #1\n"
"mov z26.d, z24.d\n"
- "addvl x14, x14, #1\n"
"mov z27.d, z24.d\n"
"mov z28.d, z24.d\n"
"mov z29.d, z24.d\n"
@@ -1260,22 +1260,22 @@ void sve_hybrid_fp32_mla_8x1VL (
"b 96f\n"
"94:" // Height 8: no bias
"tbz %x[flags], #0, 95f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x27, x11, x20, LSL #2\n"
- "add x26, x27, x20, LSL #2\n"
- "ld1w { z24.s }, p1/Z, [x11]\n"
- "add x25, x26, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "ld1w { z25.s }, p1/Z, [x27]\n"
- "ld1w { z26.s }, p1/Z, [x26]\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
- "ld1w { z27.s }, p1/Z, [x25]\n"
- "ld1w { z28.s }, p1/Z, [x24]\n"
- "add x21, x22, x20, LSL #2\n"
- "ld1w { z29.s }, p1/Z, [x23]\n"
- "ld1w { z30.s }, p1/Z, [x22]\n"
- "ld1w { z31.s }, p1/Z, [x21]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ld1w { z24.s }, p1/Z, [x10]\n"
+ "add x26, x10, x19, LSL #2\n"
+ "ld1w { z25.s }, p1/Z, [x26]\n"
+ "add x25, x26, x19, LSL #2\n"
+ "ld1w { z26.s }, p1/Z, [x25]\n"
+ "add x24, x25, x19, LSL #2\n"
+ "ld1w { z27.s }, p1/Z, [x24]\n"
+ "add x23, x24, x19, LSL #2\n"
+ "ld1w { z28.s }, p1/Z, [x23]\n"
+ "add x22, x23, x19, LSL #2\n"
+ "ld1w { z29.s }, p1/Z, [x22]\n"
+ "add x21, x22, x19, LSL #2\n"
+ "ld1w { z30.s }, p1/Z, [x21]\n"
+ "add x20, x21, x19, LSL #2\n"
+ "ld1w { z31.s }, p1/Z, [x20]\n"
"b 96f\n"
"95:" // Height 8: no accumulate
"mov z24.b, #0x0\n"
@@ -1287,80 +1287,80 @@ void sve_hybrid_fp32_mla_8x1VL (
"mov z30.b, #0x0\n"
"mov z31.b, #0x0\n"
"96:" // Height 8: setup done
- "mov x10, #0x0\n"
+ "mov x9, #0x0\n"
"97:" // Height 8: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w9, [x20, x10, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w28, [x20, x9, LSL #0x2]\n"
"tbz %x[flags], #3, 98f\n"
- "ldr x21, [%x[input_ptr], x10, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x28, [x21, #0x0]\n"
- "ldr x27, [x21, #0x8]\n"
- "ldr x26, [x21, #0x10]\n"
- "ldr x25, [x21, #0x18]\n"
- "ldr x24, [x21, #0x20]\n"
- "ldr x23, [x21, #0x28]\n"
- "ldr x22, [x21, #0x30]\n"
- "ldr x21, [x21, #0x38]\n"
- "cbnz x10, 99f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x28, x28, x20, LSL #2\n"
- "add x27, x27, x20, LSL #2\n"
- "add x26, x26, x20, LSL #2\n"
- "add x25, x25, x20, LSL #2\n"
- "add x24, x24, x20, LSL #2\n"
- "add x23, x23, x20, LSL #2\n"
- "add x22, x22, x20, LSL #2\n"
- "add x21, x21, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x9, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x27, [x20, #0x0]\n"
+ "ldr x26, [x20, #0x8]\n"
+ "ldr x25, [x20, #0x10]\n"
+ "ldr x24, [x20, #0x18]\n"
+ "ldr x23, [x20, #0x20]\n"
+ "ldr x22, [x20, #0x28]\n"
+ "ldr x21, [x20, #0x30]\n"
+ "ldr x20, [x20, #0x38]\n"
+ "cbnz x9, 99f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x27, x27, x19, LSL #2\n"
+ "add x26, x26, x19, LSL #2\n"
+ "add x25, x25, x19, LSL #2\n"
+ "add x24, x24, x19, LSL #2\n"
+ "add x23, x23, x19, LSL #2\n"
+ "add x22, x22, x19, LSL #2\n"
+ "add x21, x21, x19, LSL #2\n"
+ "add x20, x20, x19, LSL #2\n"
"b 99f\n"
"98:" // Height 8: setup direct input
- "mov x28, %x[input_ptr]\n"
- "add x27, x28, x20, LSL #2\n"
- "add x26, x27, x20, LSL #2\n"
- "add x25, x26, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
+ "mov x27, %x[input_ptr]\n"
+ "add x26, x27, x19, LSL #2\n"
+ "add x25, x26, x19, LSL #2\n"
+ "add x24, x25, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
+ "add x20, x21, x19, LSL #2\n"
"99:" // Height 8: input setup done
- "cmp x9, #0x4\n"
+ "cmp x28, #0x4\n"
"ble 101f\n"
"100:" // Height 8: Multiply loop: Main loop head
- "whilelt p0.s, XZR, x9\n"
- "ld1rqw { z0.s }, p0/Z, [x28]\n"
- "ld1rqw { z1.s }, p0/Z, [x27]\n"
- "sub x9, x9, #0x4\n"
- "ld1rqw { z2.s }, p0/Z, [x26]\n"
- "ld1rqw { z3.s }, p0/Z, [x25]\n"
- "cmp x9, #0x4\n"
- "add x28, x28, #0x10\n"
- "ld1rqw { z4.s }, p0/Z, [x24]\n"
- "ld1rqw { z5.s }, p0/Z, [x23]\n"
- "add x27, x27, #0x10\n"
- "add x26, x26, #0x10\n"
- "ld1rqw { z6.s }, p0/Z, [x22]\n"
- "ld1rqw { z7.s }, p0/Z, [x21]\n"
- "add x25, x25, #0x10\n"
- "add x24, x24, #0x10\n"
"ld1w { z8.s }, p2/Z, [x12]\n"
+ "whilelt p0.s, XZR, x28\n"
+ "ld1w { z9.s }, p2/Z, [x12, #1, MUL VL]\n"
+ "sub x28, x28, #0x4\n"
+ "ld1rqw { z0.s }, p0/Z, [x27]\n"
"fmla z24.s, z8.s, z0.s[0]\n"
+ "ld1rqw { z1.s }, p0/Z, [x26]\n"
+ "cmp x28, #0x4\n"
"fmla z25.s, z8.s, z1.s[0]\n"
- "ld1w { z9.s }, p2/Z, [x12, #1, MUL VL]\n"
+ "ld1rqw { z2.s }, p0/Z, [x25]\n"
+ "add x27, x27, #0x10\n"
+ "fmla z24.s, z9.s, z0.s[1]\n"
+ "ld1rqw { z3.s }, p0/Z, [x24]\n"
+ "add x26, x26, #0x10\n"
"fmla z26.s, z8.s, z2.s[0]\n"
+ "ld1rqw { z4.s }, p0/Z, [x23]\n"
+ "add x25, x25, #0x10\n"
"fmla z27.s, z8.s, z3.s[0]\n"
- "ld1w { z10.s }, p2/Z, [x12, #2, MUL VL]\n"
- "ld1w { z11.s }, p2/Z, [x12, #3, MUL VL]\n"
- "fmla z28.s, z8.s, z4.s[0]\n"
- "fmla z29.s, z8.s, z5.s[0]\n"
+ "ld1rqw { z5.s }, p0/Z, [x22]\n"
+ "add x24, x24, #0x10\n"
+ "fmla z25.s, z9.s, z1.s[1]\n"
+ "ld1rqw { z6.s }, p0/Z, [x21]\n"
"add x23, x23, #0x10\n"
+ "fmla z28.s, z8.s, z4.s[0]\n"
+ "ld1rqw { z7.s }, p0/Z, [x20]\n"
"add x22, x22, #0x10\n"
+ "fmla z29.s, z8.s, z5.s[0]\n"
+ "ld1w { z10.s }, p2/Z, [x12, #2, MUL VL]\n"
+ "add x21, x21, #0x10\n"
"fmla z30.s, z8.s, z6.s[0]\n"
+ "ld1w { z11.s }, p2/Z, [x12, #3, MUL VL]\n"
+ "add x20, x20, #0x10\n"
"fmla z31.s, z8.s, z7.s[0]\n"
- "add x21, x21, #0x10\n"
"addvl x12, x12, #4\n"
- "fmla z24.s, z9.s, z0.s[1]\n"
- "fmla z25.s, z9.s, z1.s[1]\n"
"fmla z26.s, z9.s, z2.s[1]\n"
"fmla z27.s, z9.s, z3.s[1]\n"
"fmla z28.s, z9.s, z4.s[1]\n"
@@ -1385,46 +1385,46 @@ void sve_hybrid_fp32_mla_8x1VL (
"fmla z31.s, z11.s, z7.s[3]\n"
"bgt 100b\n"
"101:" // Height 8: Multiply loop: Single iteration only
- "whilelt p0.s, XZR, x9\n"
- "ld1rqw { z0.s }, p0/Z, [x28]\n"
- "ld1rqw { z1.s }, p0/Z, [x27]\n"
- "subs x9, x9, #0x1\n"
- "ld1rqw { z2.s }, p0/Z, [x26]\n"
- "ld1rqw { z3.s }, p0/Z, [x25]\n"
- "ld1rqw { z4.s }, p0/Z, [x24]\n"
- "ld1rqw { z5.s }, p0/Z, [x23]\n"
- "ld1rqw { z6.s }, p0/Z, [x22]\n"
- "ld1rqw { z7.s }, p0/Z, [x21]\n"
"ld1w { z8.s }, p2/Z, [x12]\n"
+ "whilelt p0.s, XZR, x28\n"
+ "subs x28, x28, #0x1\n"
+ "ld1rqw { z0.s }, p0/Z, [x27]\n"
"fmla z24.s, z8.s, z0.s[0]\n"
- "fmla z25.s, z8.s, z1.s[0]\n"
+ "ld1rqw { z1.s }, p0/Z, [x26]\n"
"addvl x12, x12, #1\n"
+ "fmla z25.s, z8.s, z1.s[0]\n"
+ "ld1rqw { z2.s }, p0/Z, [x25]\n"
+ "ld1rqw { z3.s }, p0/Z, [x24]\n"
"fmla z26.s, z8.s, z2.s[0]\n"
+ "ld1rqw { z4.s }, p0/Z, [x23]\n"
"fmla z27.s, z8.s, z3.s[0]\n"
+ "ld1rqw { z5.s }, p0/Z, [x22]\n"
+ "ld1rqw { z6.s }, p0/Z, [x21]\n"
"fmla z28.s, z8.s, z4.s[0]\n"
+ "ld1rqw { z7.s }, p0/Z, [x20]\n"
"fmla z29.s, z8.s, z5.s[0]\n"
"fmla z30.s, z8.s, z6.s[0]\n"
"fmla z31.s, z8.s, z7.s[0]\n"
"ble 102f\n"
"ld1w { z9.s }, p2/Z, [x12]\n"
- "subs x9, x9, #0x1\n"
"fmla z24.s, z9.s, z0.s[1]\n"
+ "subs x28, x28, #0x1\n"
"fmla z25.s, z9.s, z1.s[1]\n"
+ "addvl x12, x12, #1\n"
"fmla z26.s, z9.s, z2.s[1]\n"
"fmla z27.s, z9.s, z3.s[1]\n"
- "addvl x12, x12, #1\n"
"fmla z28.s, z9.s, z4.s[1]\n"
"fmla z29.s, z9.s, z5.s[1]\n"
"fmla z30.s, z9.s, z6.s[1]\n"
"fmla z31.s, z9.s, z7.s[1]\n"
"ble 102f\n"
"ld1w { z10.s }, p2/Z, [x12]\n"
- "subs x9, x9, #0x1\n"
"fmla z24.s, z10.s, z0.s[2]\n"
+ "subs x28, x28, #0x1\n"
"fmla z25.s, z10.s, z1.s[2]\n"
+ "addvl x12, x12, #1\n"
"fmla z26.s, z10.s, z2.s[2]\n"
"fmla z27.s, z10.s, z3.s[2]\n"
- "addvl x12, x12, #1\n"
"fmla z28.s, z10.s, z4.s[2]\n"
"fmla z29.s, z10.s, z5.s[2]\n"
"fmla z30.s, z10.s, z6.s[2]\n"
@@ -1432,8 +1432,8 @@ void sve_hybrid_fp32_mla_8x1VL (
"ble 102f\n"
"ld1w { z11.s }, p2/Z, [x12]\n"
"fmla z24.s, z11.s, z0.s[3]\n"
- "fmla z25.s, z11.s, z1.s[3]\n"
"addvl x12, x12, #1\n"
+ "fmla z25.s, z11.s, z1.s[3]\n"
"fmla z26.s, z11.s, z2.s[3]\n"
"fmla z27.s, z11.s, z3.s[3]\n"
"fmla z28.s, z11.s, z4.s[3]\n"
@@ -1441,69 +1441,69 @@ void sve_hybrid_fp32_mla_8x1VL (
"fmla z30.s, z11.s, z6.s[3]\n"
"fmla z31.s, z11.s, z7.s[3]\n"
"102:" // Height 8: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x10, x10, #0x1\n"
- "cmp x10, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x9, x9, #0x1\n"
+ "cmp x9, x19\n"
"bne 97b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x27, x11, x20, LSL #2\n"
- "add x26, x27, x20, LSL #2\n"
- "add x25, x26, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x26, x10, x19, LSL #2\n"
+ "add x25, x26, x19, LSL #2\n"
+ "add x24, x25, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
+ "add x20, x21, x19, LSL #2\n"
"tbz %x[flags], #1, 103f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z17.s }, p2/Z, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rw { z16.s }, p2/Z, [x20]\n"
- "fmin z24.s, p2/M, z24.s, z17.s\n"
- "fmin z25.s, p2/M, z25.s, z17.s\n"
- "fmin z26.s, p2/M, z26.s, z17.s\n"
- "fmin z27.s, p2/M, z27.s, z17.s\n"
- "fmin z28.s, p2/M, z28.s, z17.s\n"
- "fmin z29.s, p2/M, z29.s, z17.s\n"
- "fmin z30.s, p2/M, z30.s, z17.s\n"
- "fmin z31.s, p2/M, z31.s, z17.s\n"
- "fmax z24.s, p2/M, z24.s, z16.s\n"
- "fmax z25.s, p2/M, z25.s, z16.s\n"
- "fmax z26.s, p2/M, z26.s, z16.s\n"
- "fmax z27.s, p2/M, z27.s, z16.s\n"
- "fmax z28.s, p2/M, z28.s, z16.s\n"
- "fmax z29.s, p2/M, z29.s, z16.s\n"
- "fmax z30.s, p2/M, z30.s, z16.s\n"
- "fmax z31.s, p2/M, z31.s, z16.s\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z17.s }, p2/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rw { z16.s }, p2/Z, [x19]\n"
+ "fmin z24.s, p2/M, z24.s, z16.s\n"
+ "fmin z25.s, p2/M, z25.s, z16.s\n"
+ "fmin z26.s, p2/M, z26.s, z16.s\n"
+ "fmin z27.s, p2/M, z27.s, z16.s\n"
+ "fmin z28.s, p2/M, z28.s, z16.s\n"
+ "fmax z24.s, p2/M, z24.s, z17.s\n"
+ "fmax z25.s, p2/M, z25.s, z17.s\n"
+ "fmax z26.s, p2/M, z26.s, z17.s\n"
+ "fmax z27.s, p2/M, z27.s, z17.s\n"
+ "fmax z28.s, p2/M, z28.s, z17.s\n"
+ "fmin z29.s, p2/M, z29.s, z16.s\n"
+ "fmin z30.s, p2/M, z30.s, z16.s\n"
+ "fmin z31.s, p2/M, z31.s, z16.s\n"
+ "fmax z29.s, p2/M, z29.s, z17.s\n"
+ "fmax z30.s, p2/M, z30.s, z17.s\n"
+ "fmax z31.s, p2/M, z31.s, z17.s\n"
"103:" // Height 8: No activation
- "st1w { z24.s }, p1, [x11]\n"
- "addvl x11, x11, #1\n"
- "st1w { z25.s }, p1, [x27]\n"
- "st1w { z26.s }, p1, [x26]\n"
- "st1w { z27.s }, p1, [x25]\n"
- "st1w { z28.s }, p1, [x24]\n"
- "st1w { z29.s }, p1, [x23]\n"
- "st1w { z30.s }, p1, [x22]\n"
- "st1w { z31.s }, p1, [x21]\n"
+ "st1w { z24.s }, p1, [x10]\n"
+ "addvl x10, x10, #1\n"
+ "st1w { z25.s }, p1, [x26]\n"
+ "st1w { z26.s }, p1, [x25]\n"
+ "st1w { z27.s }, p1, [x24]\n"
+ "st1w { z28.s }, p1, [x23]\n"
+ "st1w { z29.s }, p1, [x22]\n"
+ "st1w { z30.s }, p1, [x21]\n"
+ "st1w { z31.s }, p1, [x20]\n"
"104:" // Height 8: Writeback done
"decw x13\n"
"cmp x13, XZR\n"
"bgt 93b\n"
"subs %x[M], %x[M], #0x8\n"
"beq 106f\n"
- "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 105f\n"
- "add x21, x21, #0x8\n"
- "str x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "add x20, x20, #0x8\n"
+ "str x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"b 1b\n"
"105:" // Update direct input
- "mov x20, #0x20\n"
- "madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
+ "mov x19, #0x20\n"
+ "madd %x[input_ptr], x19, x20, %x[input_ptr]\n"
"b 1b\n"
"106:" // Exit
: [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
: [args_ptr] "r" (&ka), [bias] "r" (bias), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
- : "cc", "memory", "p0", "p1", "p2", "x9", "x10", "x11", "x12", "x13", "x14", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z16", "z17", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "x9", "x10", "x11", "x12", "x13", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z16", "z17", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32bf16fp32_mmla_4x6VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32bf16fp32_mmla_4x6VL/generic.cpp
index 8d05c1ffb3..43b0f54805 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32bf16fp32_mmla_4x6VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32bf16fp32_mmla_4x6VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef ARM_COMPUTE_ENABLE_SVE
@@ -100,39 +100,39 @@ void sve_hybrid_fp32bf16fp32_mmla_4x6VL (
"cmp %x[M], #0x2\n"
"bgt 27f\n"
"beq 14f\n"
- "mov x10, %x[bias]\n"
- "ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x27, %x[output_ptr]\n"
+ "mov x9, %x[bias]\n"
+ "ldr x28, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x27, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x26, %x[output_ptr]\n"
"2:" // Height 1: Column loop
- "mov x20, #0x0\n"
- "whilelt p6.s, x20, x9\n"
- "incw x20\n"
- "whilelt p5.s, x20, x9\n"
- "incw x20\n"
- "whilelt p4.s, x20, x9\n"
- "incw x20\n"
- "whilelt p3.s, x20, x9\n"
- "incw x20\n"
- "whilelt p2.s, x20, x9\n"
- "incw x20\n"
- "whilelt p1.s, x20, x9\n"
- "cbz x10, 3f\n"
- "ld1w { z8.s }, p7/Z, [x10]\n"
- "ld1w { z9.s }, p7/Z, [x10, #1, MUL VL]\n"
+ "mov x19, #0x0\n"
+ "whilelt p6.s, x19, x28\n"
+ "incw x19\n"
+ "whilelt p5.s, x19, x28\n"
+ "incw x19\n"
+ "whilelt p4.s, x19, x28\n"
+ "incw x19\n"
+ "whilelt p3.s, x19, x28\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x28\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x28\n"
+ "cbz x9, 3f\n"
+ "ld1w { z8.s }, p7/Z, [x9]\n"
+ "ld1w { z9.s }, p7/Z, [x9, #1, MUL VL]\n"
+ "ld1w { z10.s }, p7/Z, [x9, #2, MUL VL]\n"
"zip2 z14.d, z8.d, z8.d\n"
"zip1 z8.d, z8.d, z8.d\n"
- "ld1w { z10.s }, p7/Z, [x10, #2, MUL VL]\n"
- "ld1w { z11.s }, p7/Z, [x10, #3, MUL VL]\n"
+ "ld1w { z11.s }, p7/Z, [x9, #3, MUL VL]\n"
+ "ld1w { z12.s }, p7/Z, [x9, #4, MUL VL]\n"
"zip2 z15.d, z9.d, z9.d\n"
"zip1 z9.d, z9.d, z9.d\n"
- "ld1w { z12.s }, p7/Z, [x10, #4, MUL VL]\n"
- "ld1w { z13.s }, p7/Z, [x10, #5, MUL VL]\n"
+ "ld1w { z13.s }, p7/Z, [x9, #5, MUL VL]\n"
"zip2 z16.d, z10.d, z10.d\n"
"zip1 z10.d, z10.d, z10.d\n"
+ "addvl x9, x9, #6\n"
"zip2 z17.d, z11.d, z11.d\n"
"zip1 z11.d, z11.d, z11.d\n"
- "addvl x10, x10, #6\n"
"zip2 z18.d, z12.d, z12.d\n"
"zip1 z12.d, z12.d, z12.d\n"
"zip2 z19.d, z13.d, z13.d\n"
@@ -140,16 +140,16 @@ void sve_hybrid_fp32bf16fp32_mmla_4x6VL (
"b 5f\n"
"3:" // Height 1: no bias
"tbz %x[flags], #0, 4f\n"
- "ld1w { z9.s }, p6/Z, [x27]\n"
- "ld1w { z10.s }, p5/Z, [x27, #1, MUL VL]\n"
+ "ld1w { z9.s }, p6/Z, [x26]\n"
+ "ld1w { z10.s }, p5/Z, [x26, #1, MUL VL]\n"
"zip1 z8.d, z9.d, z14.d\n"
+ "ld1w { z11.s }, p4/Z, [x26, #2, MUL VL]\n"
+ "ld1w { z12.s }, p3/Z, [x26, #3, MUL VL]\n"
"zip2 z14.d, z9.d, z14.d\n"
- "ld1w { z11.s }, p4/Z, [x27, #2, MUL VL]\n"
- "ld1w { z12.s }, p3/Z, [x27, #3, MUL VL]\n"
"zip1 z9.d, z10.d, z15.d\n"
+ "ld1w { z13.s }, p2/Z, [x26, #4, MUL VL]\n"
+ "ld1w { z20.s }, p1/Z, [x26, #5, MUL VL]\n"
"zip2 z15.d, z10.d, z15.d\n"
- "ld1w { z13.s }, p2/Z, [x27, #4, MUL VL]\n"
- "ld1w { z20.s }, p1/Z, [x27, #5, MUL VL]\n"
"zip1 z10.d, z11.d, z16.d\n"
"zip2 z16.d, z11.d, z16.d\n"
"zip1 z11.d, z12.d, z17.d\n"
@@ -173,94 +173,94 @@ void sve_hybrid_fp32bf16fp32_mmla_4x6VL (
"mov z18.b, #0x0\n"
"mov z19.b, #0x0\n"
"5:" // Height 1: setup done
- "mov x26, #0x0\n"
+ "mov x25, #0x0\n"
"6:" // Height 1: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w24, [x19, x25, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 7f\n"
- "ldr x21, [%x[input_ptr], x26, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x24, [x21, #0x0]\n"
- "cbnz x26, 8f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x24, x24, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x25, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x23, [x20, #0x0]\n"
+ "cbnz x25, 8f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x23, x23, x19, LSL #2\n"
"b 8f\n"
"7:" // Height 1: setup direct input
- "mov x24, %x[input_ptr]\n"
+ "mov x23, %x[input_ptr]\n"
"8:" // Height 1: input setup done
- "cmp x25, #0x4\n"
+ "cmp x24, #0x4\n"
"ble 10f\n"
"9:" // Height 1: Multiply loop: Main loop head
- "whilelt p0.s, XZR, x25\n"
- "ld1rqw { z0.s }, p0/Z, [x24]\n"
+ "whilelt p0.s, XZR, x24\n"
+ "ld1rqw { z0.s }, p0/Z, [x23]\n"
".inst 0x658abc00 // bfcvt z0.h, p7/M, z0.s\n"
"uzp1 z0.h, z0.h, z0.h\n"
- "ld1h { z4.h }, p7/Z, [x28]\n"
- "ld1h { z5.h }, p7/Z, [x28, #1, MUL VL]\n"
+ "ld1h { z4.h }, p7/Z, [x27]\n"
+ "ld1h { z5.h }, p7/Z, [x27, #1, MUL VL]\n"
".inst 0x6464e408 // bfmmla z8.s, z0.h, z4.h\n"
".inst 0x6465e40e // bfmmla z14.s, z0.h, z5.h\n"
- "ld1h { z6.h }, p7/Z, [x28, #2, MUL VL]\n"
- "ld1h { z7.h }, p7/Z, [x28, #3, MUL VL]\n"
+ "ld1h { z6.h }, p7/Z, [x27, #2, MUL VL]\n"
+ "ld1h { z7.h }, p7/Z, [x27, #3, MUL VL]\n"
".inst 0x6466e409 // bfmmla z9.s, z0.h, z6.h\n"
".inst 0x6467e40f // bfmmla z15.s, z0.h, z7.h\n"
- "ld1h { z4.h }, p7/Z, [x28, #4, MUL VL]\n"
- "ld1h { z5.h }, p7/Z, [x28, #5, MUL VL]\n"
+ "ld1h { z4.h }, p7/Z, [x27, #4, MUL VL]\n"
+ "ld1h { z5.h }, p7/Z, [x27, #5, MUL VL]\n"
".inst 0x6464e40a // bfmmla z10.s, z0.h, z4.h\n"
".inst 0x6465e410 // bfmmla z16.s, z0.h, z5.h\n"
- "ld1h { z6.h }, p7/Z, [x28, #6, MUL VL]\n"
- "ld1h { z7.h }, p7/Z, [x28, #7, MUL VL]\n"
- "addvl x28, x28, #16\n"
+ "ld1h { z6.h }, p7/Z, [x27, #6, MUL VL]\n"
+ "ld1h { z7.h }, p7/Z, [x27, #7, MUL VL]\n"
+ "addvl x27, x27, #16\n"
".inst 0x6466e40b // bfmmla z11.s, z0.h, z6.h\n"
".inst 0x6467e411 // bfmmla z17.s, z0.h, z7.h\n"
- "ld1h { z4.h }, p7/Z, [x28, #-8, MUL VL]\n"
- "ld1h { z5.h }, p7/Z, [x28, #-7, MUL VL]\n"
- "sub x25, x25, #0x4\n"
- "ld1h { z6.h }, p7/Z, [x28, #-6, MUL VL]\n"
- "ld1h { z7.h }, p7/Z, [x28, #-5, MUL VL]\n"
- "cmp x25, #0x4\n"
+ "ld1h { z4.h }, p7/Z, [x27, #-8, MUL VL]\n"
+ "ld1h { z5.h }, p7/Z, [x27, #-7, MUL VL]\n"
+ "sub x24, x24, #0x4\n"
+ "ld1h { z6.h }, p7/Z, [x27, #-6, MUL VL]\n"
+ "ld1h { z7.h }, p7/Z, [x27, #-5, MUL VL]\n"
+ "cmp x24, #0x4\n"
".inst 0x6464e40c // bfmmla z12.s, z0.h, z4.h\n"
".inst 0x6465e412 // bfmmla z18.s, z0.h, z5.h\n"
".inst 0x6466e40d // bfmmla z13.s, z0.h, z6.h\n"
- "add x24, x24, #0x10\n"
- "addvl x28, x28, #-4\n"
+ "add x23, x23, #0x10\n"
+ "addvl x27, x27, #-4\n"
".inst 0x6467e413 // bfmmla z19.s, z0.h, z7.h\n"
"bgt 9b\n"
"10:" // Height 1: Multiply loop: Single iteration only
- "whilelt p0.s, XZR, x25\n"
- "ld1rqw { z0.s }, p0/Z, [x24]\n"
+ "whilelt p0.s, XZR, x24\n"
+ "ld1rqw { z0.s }, p0/Z, [x23]\n"
".inst 0x658abc00 // bfcvt z0.h, p7/M, z0.s\n"
"uzp1 z0.h, z0.h, z0.h\n"
- "ld1h { z4.h }, p7/Z, [x28]\n"
- "ld1h { z5.h }, p7/Z, [x28, #1, MUL VL]\n"
+ "ld1h { z4.h }, p7/Z, [x27]\n"
+ "ld1h { z5.h }, p7/Z, [x27, #1, MUL VL]\n"
".inst 0x6464e408 // bfmmla z8.s, z0.h, z4.h\n"
".inst 0x6465e40e // bfmmla z14.s, z0.h, z5.h\n"
- "ld1h { z6.h }, p7/Z, [x28, #2, MUL VL]\n"
- "ld1h { z7.h }, p7/Z, [x28, #3, MUL VL]\n"
+ "ld1h { z6.h }, p7/Z, [x27, #2, MUL VL]\n"
+ "ld1h { z7.h }, p7/Z, [x27, #3, MUL VL]\n"
".inst 0x6466e409 // bfmmla z9.s, z0.h, z6.h\n"
".inst 0x6467e40f // bfmmla z15.s, z0.h, z7.h\n"
- "ld1h { z4.h }, p7/Z, [x28, #4, MUL VL]\n"
- "ld1h { z5.h }, p7/Z, [x28, #5, MUL VL]\n"
+ "ld1h { z4.h }, p7/Z, [x27, #4, MUL VL]\n"
+ "ld1h { z5.h }, p7/Z, [x27, #5, MUL VL]\n"
".inst 0x6464e40a // bfmmla z10.s, z0.h, z4.h\n"
".inst 0x6465e410 // bfmmla z16.s, z0.h, z5.h\n"
- "ld1h { z6.h }, p7/Z, [x28, #6, MUL VL]\n"
- "ld1h { z7.h }, p7/Z, [x28, #7, MUL VL]\n"
- "addvl x28, x28, #16\n"
+ "ld1h { z6.h }, p7/Z, [x27, #6, MUL VL]\n"
+ "ld1h { z7.h }, p7/Z, [x27, #7, MUL VL]\n"
+ "addvl x27, x27, #16\n"
".inst 0x6466e40b // bfmmla z11.s, z0.h, z6.h\n"
".inst 0x6467e411 // bfmmla z17.s, z0.h, z7.h\n"
- "ld1h { z4.h }, p7/Z, [x28, #-8, MUL VL]\n"
- "ld1h { z5.h }, p7/Z, [x28, #-7, MUL VL]\n"
+ "ld1h { z4.h }, p7/Z, [x27, #-8, MUL VL]\n"
+ "ld1h { z5.h }, p7/Z, [x27, #-7, MUL VL]\n"
".inst 0x6464e40c // bfmmla z12.s, z0.h, z4.h\n"
- "ld1h { z6.h }, p7/Z, [x28, #-6, MUL VL]\n"
- "ld1h { z7.h }, p7/Z, [x28, #-5, MUL VL]\n"
+ "ld1h { z6.h }, p7/Z, [x27, #-6, MUL VL]\n"
+ "ld1h { z7.h }, p7/Z, [x27, #-5, MUL VL]\n"
".inst 0x6465e412 // bfmmla z18.s, z0.h, z5.h\n"
".inst 0x6466e40d // bfmmla z13.s, z0.h, z6.h\n"
".inst 0x6467e413 // bfmmla z19.s, z0.h, z7.h\n"
- "addvl x28, x28, #-4\n"
+ "addvl x27, x27, #-4\n"
"11:" // Height 1: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x26, x26, #0x1\n"
- "cmp x26, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x25, x25, #0x1\n"
+ "cmp x25, x19\n"
"bne 6b\n"
"uzp1 z8.d, z8.d, z14.d\n"
"uzp1 z9.d, z9.d, z15.d\n"
@@ -269,10 +269,10 @@ void sve_hybrid_fp32bf16fp32_mmla_4x6VL (
"uzp1 z12.d, z12.d, z18.d\n"
"uzp1 z13.d, z13.d, z19.d\n"
"tbz %x[flags], #1, 12f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z1.s }, p7/Z, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rw { z0.s }, p7/Z, [x20]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rw { z1.s }, p7/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z0.s }, p7/Z, [x19]\n"
"fmin z8.s, p7/M, z8.s, z1.s\n"
"fmin z9.s, p7/M, z9.s, z1.s\n"
"fmin z10.s, p7/M, z10.s, z1.s\n"
@@ -286,52 +286,52 @@ void sve_hybrid_fp32bf16fp32_mmla_4x6VL (
"fmax z12.s, p7/M, z12.s, z0.s\n"
"fmax z13.s, p7/M, z13.s, z0.s\n"
"12:" // Height 1: No activation
- "st1w { z8.s }, p6, [x27]\n"
- "st1w { z9.s }, p5, [x27, #1, MUL VL]\n"
- "st1w { z10.s }, p4, [x27, #2, MUL VL]\n"
- "st1w { z11.s }, p3, [x27, #3, MUL VL]\n"
- "st1w { z12.s }, p2, [x27, #4, MUL VL]\n"
- "st1w { z13.s }, p1, [x27, #5, MUL VL]\n"
- "addvl x27, x27, #6\n"
+ "st1w { z8.s }, p6, [x26]\n"
+ "st1w { z9.s }, p5, [x26, #1, MUL VL]\n"
+ "st1w { z10.s }, p4, [x26, #2, MUL VL]\n"
+ "st1w { z11.s }, p3, [x26, #3, MUL VL]\n"
+ "st1w { z12.s }, p2, [x26, #4, MUL VL]\n"
+ "st1w { z13.s }, p1, [x26, #5, MUL VL]\n"
+ "addvl x26, x26, #6\n"
"13:" // Height 1: Writeback done
- "decw x9, ALL, MUL #6\n"
- "cmp x9, XZR\n"
+ "decw x28, ALL, MUL #6\n"
+ "cmp x28, XZR\n"
"bgt 2b\n"
"b 54f\n"
"14:" // Height 2
- "mov x10, %x[bias]\n"
- "ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x27, %x[output_ptr]\n"
+ "mov x9, %x[bias]\n"
+ "ldr x28, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x27, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x26, %x[output_ptr]\n"
"15:" // Height 2: Column loop
- "mov x20, #0x0\n"
- "whilelt p6.s, x20, x9\n"
- "incw x20\n"
- "whilelt p5.s, x20, x9\n"
- "incw x20\n"
- "whilelt p4.s, x20, x9\n"
- "incw x20\n"
- "whilelt p3.s, x20, x9\n"
- "incw x20\n"
- "whilelt p2.s, x20, x9\n"
- "incw x20\n"
- "whilelt p1.s, x20, x9\n"
- "cbz x10, 16f\n"
- "ld1w { z8.s }, p7/Z, [x10]\n"
- "ld1w { z9.s }, p7/Z, [x10, #1, MUL VL]\n"
+ "mov x19, #0x0\n"
+ "whilelt p6.s, x19, x28\n"
+ "incw x19\n"
+ "whilelt p5.s, x19, x28\n"
+ "incw x19\n"
+ "whilelt p4.s, x19, x28\n"
+ "incw x19\n"
+ "whilelt p3.s, x19, x28\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x28\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x28\n"
+ "cbz x9, 16f\n"
+ "ld1w { z8.s }, p7/Z, [x9]\n"
+ "ld1w { z9.s }, p7/Z, [x9, #1, MUL VL]\n"
+ "ld1w { z10.s }, p7/Z, [x9, #2, MUL VL]\n"
"zip2 z14.d, z8.d, z8.d\n"
"zip1 z8.d, z8.d, z8.d\n"
- "ld1w { z10.s }, p7/Z, [x10, #2, MUL VL]\n"
- "ld1w { z11.s }, p7/Z, [x10, #3, MUL VL]\n"
+ "ld1w { z11.s }, p7/Z, [x9, #3, MUL VL]\n"
+ "ld1w { z12.s }, p7/Z, [x9, #4, MUL VL]\n"
"zip2 z15.d, z9.d, z9.d\n"
"zip1 z9.d, z9.d, z9.d\n"
- "ld1w { z12.s }, p7/Z, [x10, #4, MUL VL]\n"
- "ld1w { z13.s }, p7/Z, [x10, #5, MUL VL]\n"
+ "ld1w { z13.s }, p7/Z, [x9, #5, MUL VL]\n"
"zip2 z16.d, z10.d, z10.d\n"
"zip1 z10.d, z10.d, z10.d\n"
+ "addvl x9, x9, #6\n"
"zip2 z17.d, z11.d, z11.d\n"
"zip1 z11.d, z11.d, z11.d\n"
- "addvl x10, x10, #6\n"
"zip2 z18.d, z12.d, z12.d\n"
"zip1 z12.d, z12.d, z12.d\n"
"zip2 z19.d, z13.d, z13.d\n"
@@ -339,26 +339,26 @@ void sve_hybrid_fp32bf16fp32_mmla_4x6VL (
"b 18f\n"
"16:" // Height 2: no bias
"tbz %x[flags], #0, 17f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x27, x20, LSL #2\n"
- "ld1w { z9.s }, p6/Z, [x27]\n"
- "ld1w { z10.s }, p5/Z, [x27, #1, MUL VL]\n"
- "ld1w { z11.s }, p4/Z, [x27, #2, MUL VL]\n"
- "ld1w { z12.s }, p3/Z, [x27, #3, MUL VL]\n"
- "ld1w { z13.s }, p2/Z, [x27, #4, MUL VL]\n"
- "ld1w { z20.s }, p1/Z, [x27, #5, MUL VL]\n"
- "ld1w { z14.s }, p6/Z, [x23]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x22, x26, x19, LSL #2\n"
+ "ld1w { z9.s }, p6/Z, [x26]\n"
+ "ld1w { z10.s }, p5/Z, [x26, #1, MUL VL]\n"
+ "ld1w { z11.s }, p4/Z, [x26, #2, MUL VL]\n"
+ "ld1w { z12.s }, p3/Z, [x26, #3, MUL VL]\n"
+ "ld1w { z13.s }, p2/Z, [x26, #4, MUL VL]\n"
+ "ld1w { z20.s }, p1/Z, [x26, #5, MUL VL]\n"
+ "ld1w { z14.s }, p6/Z, [x22]\n"
"zip1 z8.d, z9.d, z14.d\n"
"zip2 z14.d, z9.d, z14.d\n"
- "ld1w { z15.s }, p5/Z, [x23, #1, MUL VL]\n"
- "ld1w { z16.s }, p4/Z, [x23, #2, MUL VL]\n"
+ "ld1w { z15.s }, p5/Z, [x22, #1, MUL VL]\n"
+ "ld1w { z16.s }, p4/Z, [x22, #2, MUL VL]\n"
"zip1 z9.d, z10.d, z15.d\n"
"zip2 z15.d, z10.d, z15.d\n"
- "ld1w { z17.s }, p3/Z, [x23, #3, MUL VL]\n"
- "ld1w { z18.s }, p2/Z, [x23, #4, MUL VL]\n"
+ "ld1w { z17.s }, p3/Z, [x22, #3, MUL VL]\n"
+ "ld1w { z18.s }, p2/Z, [x22, #4, MUL VL]\n"
"zip1 z10.d, z11.d, z16.d\n"
"zip2 z16.d, z11.d, z16.d\n"
- "ld1w { z19.s }, p1/Z, [x23, #5, MUL VL]\n"
+ "ld1w { z19.s }, p1/Z, [x22, #5, MUL VL]\n"
"zip1 z11.d, z12.d, z17.d\n"
"zip2 z17.d, z12.d, z17.d\n"
"zip1 z12.d, z13.d, z18.d\n"
@@ -380,111 +380,111 @@ void sve_hybrid_fp32bf16fp32_mmla_4x6VL (
"mov z18.b, #0x0\n"
"mov z19.b, #0x0\n"
"18:" // Height 2: setup done
- "mov x26, #0x0\n"
+ "mov x25, #0x0\n"
"19:" // Height 2: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w24, [x19, x25, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 20f\n"
- "ldr x21, [%x[input_ptr], x26, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x24, [x21, #0x0]\n"
- "ldr x23, [x21, #0x8]\n"
- "cbnz x26, 21f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x24, x24, x20, LSL #2\n"
- "add x23, x23, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x25, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x23, [x20, #0x0]\n"
+ "ldr x22, [x20, #0x8]\n"
+ "cbnz x25, 21f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x23, x23, x19, LSL #2\n"
+ "add x22, x22, x19, LSL #2\n"
"b 21f\n"
"20:" // Height 2: setup direct input
- "mov x24, %x[input_ptr]\n"
- "add x23, x24, x20, LSL #2\n"
+ "mov x23, %x[input_ptr]\n"
+ "add x22, x23, x19, LSL #2\n"
"21:" // Height 2: input setup done
- "cmp x25, #0x4\n"
+ "cmp x24, #0x4\n"
"ble 23f\n"
"22:" // Height 2: Multiply loop: Main loop head
- "whilelt p0.s, XZR, x25\n"
- "ld1rqw { z0.s }, p0/Z, [x24]\n"
- "ld1rqw { z1.s }, p0/Z, [x23]\n"
+ "whilelt p0.s, XZR, x24\n"
+ "ld1rqw { z0.s }, p0/Z, [x23]\n"
+ "ld1rqw { z1.s }, p0/Z, [x22]\n"
".inst 0x658abc00 // bfcvt z0.h, p7/M, z0.s\n"
".inst 0x658abc21 // bfcvt z1.h, p7/M, z1.s\n"
"uzp1 z0.h, z0.h, z0.h\n"
- "ld1h { z4.h }, p7/Z, [x28]\n"
- "ld1h { z5.h }, p7/Z, [x28, #1, MUL VL]\n"
+ "ld1h { z4.h }, p7/Z, [x27]\n"
+ "ld1h { z5.h }, p7/Z, [x27, #1, MUL VL]\n"
"uzp1 z1.h, z1.h, z1.h\n"
"trn1 z0.d, z0.d, z1.d\n"
- "ld1h { z6.h }, p7/Z, [x28, #2, MUL VL]\n"
- "ld1h { z7.h }, p7/Z, [x28, #3, MUL VL]\n"
+ "ld1h { z6.h }, p7/Z, [x27, #2, MUL VL]\n"
+ "ld1h { z7.h }, p7/Z, [x27, #3, MUL VL]\n"
".inst 0x6464e408 // bfmmla z8.s, z0.h, z4.h\n"
".inst 0x6465e40e // bfmmla z14.s, z0.h, z5.h\n"
- "ld1h { z4.h }, p7/Z, [x28, #4, MUL VL]\n"
- "ld1h { z5.h }, p7/Z, [x28, #5, MUL VL]\n"
+ "ld1h { z4.h }, p7/Z, [x27, #4, MUL VL]\n"
+ "ld1h { z5.h }, p7/Z, [x27, #5, MUL VL]\n"
".inst 0x6466e409 // bfmmla z9.s, z0.h, z6.h\n"
".inst 0x6467e40f // bfmmla z15.s, z0.h, z7.h\n"
- "ld1h { z6.h }, p7/Z, [x28, #6, MUL VL]\n"
- "ld1h { z7.h }, p7/Z, [x28, #7, MUL VL]\n"
- "addvl x28, x28, #16\n"
+ "ld1h { z6.h }, p7/Z, [x27, #6, MUL VL]\n"
+ "ld1h { z7.h }, p7/Z, [x27, #7, MUL VL]\n"
+ "addvl x27, x27, #16\n"
".inst 0x6464e40a // bfmmla z10.s, z0.h, z4.h\n"
".inst 0x6465e410 // bfmmla z16.s, z0.h, z5.h\n"
- "ld1h { z4.h }, p7/Z, [x28, #-8, MUL VL]\n"
+ "ld1h { z4.h }, p7/Z, [x27, #-8, MUL VL]\n"
".inst 0x6466e40b // bfmmla z11.s, z0.h, z6.h\n"
".inst 0x6467e411 // bfmmla z17.s, z0.h, z7.h\n"
- "ld1h { z5.h }, p7/Z, [x28, #-7, MUL VL]\n"
- "ld1h { z6.h }, p7/Z, [x28, #-6, MUL VL]\n"
- "ld1h { z7.h }, p7/Z, [x28, #-5, MUL VL]\n"
- "sub x25, x25, #0x4\n"
- "cmp x25, #0x4\n"
+ "ld1h { z5.h }, p7/Z, [x27, #-7, MUL VL]\n"
+ "ld1h { z6.h }, p7/Z, [x27, #-6, MUL VL]\n"
+ "ld1h { z7.h }, p7/Z, [x27, #-5, MUL VL]\n"
+ "sub x24, x24, #0x4\n"
+ "cmp x24, #0x4\n"
".inst 0x6464e40c // bfmmla z12.s, z0.h, z4.h\n"
".inst 0x6465e412 // bfmmla z18.s, z0.h, z5.h\n"
".inst 0x6466e40d // bfmmla z13.s, z0.h, z6.h\n"
- "add x24, x24, #0x10\n"
"add x23, x23, #0x10\n"
+ "add x22, x22, #0x10\n"
".inst 0x6467e413 // bfmmla z19.s, z0.h, z7.h\n"
- "addvl x28, x28, #-4\n"
+ "addvl x27, x27, #-4\n"
"bgt 22b\n"
"23:" // Height 2: Multiply loop: Single iteration only
- "whilelt p0.s, XZR, x25\n"
- "ld1rqw { z0.s }, p0/Z, [x24]\n"
- "ld1rqw { z1.s }, p0/Z, [x23]\n"
+ "whilelt p0.s, XZR, x24\n"
+ "ld1rqw { z0.s }, p0/Z, [x23]\n"
+ "ld1rqw { z1.s }, p0/Z, [x22]\n"
".inst 0x658abc00 // bfcvt z0.h, p7/M, z0.s\n"
".inst 0x658abc21 // bfcvt z1.h, p7/M, z1.s\n"
"uzp1 z0.h, z0.h, z0.h\n"
- "ld1h { z4.h }, p7/Z, [x28]\n"
- "ld1h { z5.h }, p7/Z, [x28, #1, MUL VL]\n"
+ "ld1h { z4.h }, p7/Z, [x27]\n"
+ "ld1h { z5.h }, p7/Z, [x27, #1, MUL VL]\n"
"uzp1 z1.h, z1.h, z1.h\n"
"trn1 z0.d, z0.d, z1.d\n"
- "ld1h { z6.h }, p7/Z, [x28, #2, MUL VL]\n"
- "ld1h { z7.h }, p7/Z, [x28, #3, MUL VL]\n"
+ "ld1h { z6.h }, p7/Z, [x27, #2, MUL VL]\n"
+ "ld1h { z7.h }, p7/Z, [x27, #3, MUL VL]\n"
".inst 0x6464e408 // bfmmla z8.s, z0.h, z4.h\n"
".inst 0x6465e40e // bfmmla z14.s, z0.h, z5.h\n"
- "ld1h { z4.h }, p7/Z, [x28, #4, MUL VL]\n"
- "ld1h { z5.h }, p7/Z, [x28, #5, MUL VL]\n"
+ "ld1h { z4.h }, p7/Z, [x27, #4, MUL VL]\n"
+ "ld1h { z5.h }, p7/Z, [x27, #5, MUL VL]\n"
".inst 0x6466e409 // bfmmla z9.s, z0.h, z6.h\n"
".inst 0x6467e40f // bfmmla z15.s, z0.h, z7.h\n"
- "ld1h { z6.h }, p7/Z, [x28, #6, MUL VL]\n"
- "ld1h { z7.h }, p7/Z, [x28, #7, MUL VL]\n"
- "addvl x28, x28, #16\n"
+ "ld1h { z6.h }, p7/Z, [x27, #6, MUL VL]\n"
+ "ld1h { z7.h }, p7/Z, [x27, #7, MUL VL]\n"
+ "addvl x27, x27, #16\n"
".inst 0x6464e40a // bfmmla z10.s, z0.h, z4.h\n"
".inst 0x6465e410 // bfmmla z16.s, z0.h, z5.h\n"
- "ld1h { z4.h }, p7/Z, [x28, #-8, MUL VL]\n"
+ "ld1h { z4.h }, p7/Z, [x27, #-8, MUL VL]\n"
".inst 0x6466e40b // bfmmla z11.s, z0.h, z6.h\n"
".inst 0x6467e411 // bfmmla z17.s, z0.h, z7.h\n"
- "ld1h { z5.h }, p7/Z, [x28, #-7, MUL VL]\n"
- "ld1h { z6.h }, p7/Z, [x28, #-6, MUL VL]\n"
- "ld1h { z7.h }, p7/Z, [x28, #-5, MUL VL]\n"
+ "ld1h { z5.h }, p7/Z, [x27, #-7, MUL VL]\n"
+ "ld1h { z6.h }, p7/Z, [x27, #-6, MUL VL]\n"
+ "ld1h { z7.h }, p7/Z, [x27, #-5, MUL VL]\n"
".inst 0x6464e40c // bfmmla z12.s, z0.h, z4.h\n"
".inst 0x6465e412 // bfmmla z18.s, z0.h, z5.h\n"
- "addvl x28, x28, #-4\n"
+ "addvl x27, x27, #-4\n"
".inst 0x6466e40d // bfmmla z13.s, z0.h, z6.h\n"
".inst 0x6467e413 // bfmmla z19.s, z0.h, z7.h\n"
"24:" // Height 2: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x26, x26, #0x1\n"
- "cmp x26, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x25, x25, #0x1\n"
+ "cmp x25, x19\n"
"bne 19b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp1 z4.d, z8.d, z14.d\n"
"uzp2 z8.d, z8.d, z14.d\n"
- "add x23, x27, x20, LSL #2\n"
+ "add x22, x26, x19, LSL #2\n"
"uzp1 z14.d, z9.d, z15.d\n"
"uzp2 z9.d, z9.d, z15.d\n"
"uzp1 z15.d, z10.d, z16.d\n"
@@ -496,10 +496,10 @@ void sve_hybrid_fp32bf16fp32_mmla_4x6VL (
"uzp1 z18.d, z13.d, z19.d\n"
"uzp2 z13.d, z13.d, z19.d\n"
"tbz %x[flags], #1, 25f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z1.s }, p7/Z, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rw { z0.s }, p7/Z, [x20]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rw { z1.s }, p7/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z0.s }, p7/Z, [x19]\n"
"fmin z4.s, p7/M, z4.s, z1.s\n"
"fmin z14.s, p7/M, z14.s, z1.s\n"
"fmin z15.s, p7/M, z15.s, z1.s\n"
@@ -525,58 +525,58 @@ void sve_hybrid_fp32bf16fp32_mmla_4x6VL (
"fmax z12.s, p7/M, z12.s, z0.s\n"
"fmax z13.s, p7/M, z13.s, z0.s\n"
"25:" // Height 2: No activation
- "st1w { z4.s }, p6, [x27]\n"
- "st1w { z14.s }, p5, [x27, #1, MUL VL]\n"
- "st1w { z15.s }, p4, [x27, #2, MUL VL]\n"
- "st1w { z16.s }, p3, [x27, #3, MUL VL]\n"
- "st1w { z17.s }, p2, [x27, #4, MUL VL]\n"
- "st1w { z18.s }, p1, [x27, #5, MUL VL]\n"
- "addvl x27, x27, #6\n"
- "st1w { z8.s }, p6, [x23]\n"
- "st1w { z9.s }, p5, [x23, #1, MUL VL]\n"
- "st1w { z10.s }, p4, [x23, #2, MUL VL]\n"
- "st1w { z11.s }, p3, [x23, #3, MUL VL]\n"
- "st1w { z12.s }, p2, [x23, #4, MUL VL]\n"
- "st1w { z13.s }, p1, [x23, #5, MUL VL]\n"
+ "st1w { z4.s }, p6, [x26]\n"
+ "st1w { z14.s }, p5, [x26, #1, MUL VL]\n"
+ "st1w { z15.s }, p4, [x26, #2, MUL VL]\n"
+ "st1w { z16.s }, p3, [x26, #3, MUL VL]\n"
+ "st1w { z17.s }, p2, [x26, #4, MUL VL]\n"
+ "st1w { z18.s }, p1, [x26, #5, MUL VL]\n"
+ "addvl x26, x26, #6\n"
+ "st1w { z8.s }, p6, [x22]\n"
+ "st1w { z9.s }, p5, [x22, #1, MUL VL]\n"
+ "st1w { z10.s }, p4, [x22, #2, MUL VL]\n"
+ "st1w { z11.s }, p3, [x22, #3, MUL VL]\n"
+ "st1w { z12.s }, p2, [x22, #4, MUL VL]\n"
+ "st1w { z13.s }, p1, [x22, #5, MUL VL]\n"
"26:" // Height 2: Writeback done
- "decw x9, ALL, MUL #6\n"
- "cmp x9, XZR\n"
+ "decw x28, ALL, MUL #6\n"
+ "cmp x28, XZR\n"
"bgt 15b\n"
"b 54f\n"
"27:" // Height 3
- "mov x10, %x[bias]\n"
- "ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x27, %x[output_ptr]\n"
+ "mov x9, %x[bias]\n"
+ "ldr x28, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x27, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x26, %x[output_ptr]\n"
"28:" // Height 3: Column loop
- "mov x20, #0x0\n"
- "whilelt p6.s, x20, x9\n"
- "incw x20\n"
- "whilelt p5.s, x20, x9\n"
- "incw x20\n"
- "whilelt p4.s, x20, x9\n"
- "incw x20\n"
- "whilelt p3.s, x20, x9\n"
- "incw x20\n"
- "whilelt p2.s, x20, x9\n"
- "incw x20\n"
- "whilelt p1.s, x20, x9\n"
- "cbz x10, 29f\n"
- "ld1w { z8.s }, p7/Z, [x10]\n"
- "ld1w { z9.s }, p7/Z, [x10, #1, MUL VL]\n"
+ "mov x19, #0x0\n"
+ "whilelt p6.s, x19, x28\n"
+ "incw x19\n"
+ "whilelt p5.s, x19, x28\n"
+ "incw x19\n"
+ "whilelt p4.s, x19, x28\n"
+ "incw x19\n"
+ "whilelt p3.s, x19, x28\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x28\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x28\n"
+ "cbz x9, 29f\n"
+ "ld1w { z8.s }, p7/Z, [x9]\n"
+ "ld1w { z9.s }, p7/Z, [x9, #1, MUL VL]\n"
+ "ld1w { z10.s }, p7/Z, [x9, #2, MUL VL]\n"
"zip2 z14.d, z8.d, z8.d\n"
"zip1 z8.d, z8.d, z8.d\n"
- "ld1w { z10.s }, p7/Z, [x10, #2, MUL VL]\n"
- "ld1w { z11.s }, p7/Z, [x10, #3, MUL VL]\n"
+ "ld1w { z11.s }, p7/Z, [x9, #3, MUL VL]\n"
+ "ld1w { z12.s }, p7/Z, [x9, #4, MUL VL]\n"
"zip2 z15.d, z9.d, z9.d\n"
"zip1 z9.d, z9.d, z9.d\n"
- "ld1w { z12.s }, p7/Z, [x10, #4, MUL VL]\n"
- "ld1w { z13.s }, p7/Z, [x10, #5, MUL VL]\n"
+ "ld1w { z13.s }, p7/Z, [x9, #5, MUL VL]\n"
"zip2 z16.d, z10.d, z10.d\n"
"zip1 z10.d, z10.d, z10.d\n"
+ "addvl x9, x9, #6\n"
"zip2 z17.d, z11.d, z11.d\n"
"zip1 z11.d, z11.d, z11.d\n"
- "addvl x10, x10, #6\n"
"zip2 z18.d, z12.d, z12.d\n"
"zip1 z12.d, z12.d, z12.d\n"
"zip2 z19.d, z13.d, z13.d\n"
@@ -596,39 +596,39 @@ void sve_hybrid_fp32bf16fp32_mmla_4x6VL (
"b 31f\n"
"29:" // Height 3: no bias
"tbz %x[flags], #0, 30f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x27, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
- "ld1w { z9.s }, p6/Z, [x27]\n"
- "ld1w { z10.s }, p5/Z, [x27, #1, MUL VL]\n"
- "ld1w { z11.s }, p4/Z, [x27, #2, MUL VL]\n"
- "ld1w { z12.s }, p3/Z, [x27, #3, MUL VL]\n"
- "ld1w { z13.s }, p2/Z, [x27, #4, MUL VL]\n"
- "ld1w { z20.s }, p1/Z, [x27, #5, MUL VL]\n"
- "ld1w { z14.s }, p6/Z, [x23]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x22, x26, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
+ "ld1w { z9.s }, p6/Z, [x26]\n"
+ "ld1w { z10.s }, p5/Z, [x26, #1, MUL VL]\n"
+ "ld1w { z11.s }, p4/Z, [x26, #2, MUL VL]\n"
+ "ld1w { z12.s }, p3/Z, [x26, #3, MUL VL]\n"
+ "ld1w { z13.s }, p2/Z, [x26, #4, MUL VL]\n"
+ "ld1w { z20.s }, p1/Z, [x26, #5, MUL VL]\n"
+ "ld1w { z14.s }, p6/Z, [x22]\n"
+ "ld1w { z15.s }, p5/Z, [x22, #1, MUL VL]\n"
"zip1 z8.d, z9.d, z14.d\n"
"zip2 z14.d, z9.d, z14.d\n"
- "ld1w { z15.s }, p5/Z, [x23, #1, MUL VL]\n"
- "ld1w { z16.s }, p4/Z, [x23, #2, MUL VL]\n"
+ "ld1w { z16.s }, p4/Z, [x22, #2, MUL VL]\n"
+ "ld1w { z17.s }, p3/Z, [x22, #3, MUL VL]\n"
"zip1 z9.d, z10.d, z15.d\n"
"zip2 z15.d, z10.d, z15.d\n"
- "ld1w { z17.s }, p3/Z, [x23, #3, MUL VL]\n"
- "ld1w { z18.s }, p2/Z, [x23, #4, MUL VL]\n"
+ "ld1w { z18.s }, p2/Z, [x22, #4, MUL VL]\n"
+ "ld1w { z19.s }, p1/Z, [x22, #5, MUL VL]\n"
"zip1 z10.d, z11.d, z16.d\n"
"zip2 z16.d, z11.d, z16.d\n"
- "ld1w { z19.s }, p1/Z, [x23, #5, MUL VL]\n"
- "ld1w { z21.s }, p6/Z, [x22]\n"
+ "ld1w { z21.s }, p6/Z, [x21]\n"
+ "ld1w { z22.s }, p5/Z, [x21, #1, MUL VL]\n"
"zip1 z11.d, z12.d, z17.d\n"
"zip2 z17.d, z12.d, z17.d\n"
- "ld1w { z22.s }, p5/Z, [x22, #1, MUL VL]\n"
- "ld1w { z23.s }, p4/Z, [x22, #2, MUL VL]\n"
+ "ld1w { z23.s }, p4/Z, [x21, #2, MUL VL]\n"
+ "ld1w { z24.s }, p3/Z, [x21, #3, MUL VL]\n"
"zip1 z12.d, z13.d, z18.d\n"
"zip2 z18.d, z13.d, z18.d\n"
- "ld1w { z24.s }, p3/Z, [x22, #3, MUL VL]\n"
- "ld1w { z25.s }, p2/Z, [x22, #4, MUL VL]\n"
+ "ld1w { z25.s }, p2/Z, [x21, #4, MUL VL]\n"
+ "ld1w { z4.s }, p1/Z, [x21, #5, MUL VL]\n"
"zip1 z13.d, z20.d, z19.d\n"
"zip2 z19.d, z20.d, z19.d\n"
- "ld1w { z4.s }, p1/Z, [x22, #5, MUL VL]\n"
"zip1 z20.d, z21.d, z26.d\n"
"zip2 z26.d, z21.d, z26.d\n"
"zip1 z21.d, z22.d, z27.d\n"
@@ -668,78 +668,78 @@ void sve_hybrid_fp32bf16fp32_mmla_4x6VL (
"mov z30.b, #0x0\n"
"mov z31.b, #0x0\n"
"31:" // Height 3: setup done
- "mov x26, #0x0\n"
+ "mov x25, #0x0\n"
"32:" // Height 3: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w24, [x19, x25, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 33f\n"
- "ldr x21, [%x[input_ptr], x26, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x24, [x21, #0x0]\n"
- "ldr x23, [x21, #0x8]\n"
- "ldr x22, [x21, #0x10]\n"
- "cbnz x26, 34f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x24, x24, x20, LSL #2\n"
- "add x23, x23, x20, LSL #2\n"
- "add x22, x22, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x25, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x23, [x20, #0x0]\n"
+ "ldr x22, [x20, #0x8]\n"
+ "ldr x21, [x20, #0x10]\n"
+ "cbnz x25, 34f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x23, x23, x19, LSL #2\n"
+ "add x22, x22, x19, LSL #2\n"
+ "add x21, x21, x19, LSL #2\n"
"b 34f\n"
"33:" // Height 3: setup direct input
- "mov x24, %x[input_ptr]\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
+ "mov x23, %x[input_ptr]\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
"34:" // Height 3: input setup done
- "cmp x25, #0x4\n"
+ "cmp x24, #0x4\n"
"ble 36f\n"
"35:" // Height 3: Multiply loop: Main loop head
- "whilelt p0.s, XZR, x25\n"
- "ld1rqw { z0.s }, p0/Z, [x24]\n"
- "ld1rqw { z1.s }, p0/Z, [x23]\n"
+ "whilelt p0.s, XZR, x24\n"
+ "ld1rqw { z0.s }, p0/Z, [x23]\n"
+ "ld1rqw { z1.s }, p0/Z, [x22]\n"
".inst 0x658abc00 // bfcvt z0.h, p7/M, z0.s\n"
- "ld1rqw { z2.s }, p0/Z, [x22]\n"
+ "ld1rqw { z2.s }, p0/Z, [x21]\n"
".inst 0x658abc21 // bfcvt z1.h, p7/M, z1.s\n"
"uzp1 z0.h, z0.h, z0.h\n"
- "ld1h { z4.h }, p7/Z, [x28]\n"
+ "ld1h { z4.h }, p7/Z, [x27]\n"
"uzp1 z1.h, z1.h, z1.h\n"
".inst 0x658abc42 // bfcvt z2.h, p7/M, z2.s\n"
- "ld1h { z5.h }, p7/Z, [x28, #1, MUL VL]\n"
- "ld1h { z6.h }, p7/Z, [x28, #2, MUL VL]\n"
+ "ld1h { z5.h }, p7/Z, [x27, #1, MUL VL]\n"
+ "ld1h { z6.h }, p7/Z, [x27, #2, MUL VL]\n"
"trn1 z0.d, z0.d, z1.d\n"
"uzp1 z2.h, z2.h, z2.h\n"
- "ld1h { z7.h }, p7/Z, [x28, #3, MUL VL]\n"
+ "ld1h { z7.h }, p7/Z, [x27, #3, MUL VL]\n"
".inst 0x6464e408 // bfmmla z8.s, z0.h, z4.h\n"
".inst 0x6464e454 // bfmmla z20.s, z2.h, z4.h\n"
".inst 0x6465e40e // bfmmla z14.s, z0.h, z5.h\n"
- "ld1h { z4.h }, p7/Z, [x28, #4, MUL VL]\n"
- "sub x25, x25, #0x4\n"
+ "ld1h { z4.h }, p7/Z, [x27, #4, MUL VL]\n"
+ "sub x24, x24, #0x4\n"
".inst 0x6465e45a // bfmmla z26.s, z2.h, z5.h\n"
".inst 0x6466e409 // bfmmla z9.s, z0.h, z6.h\n"
- "ld1h { z5.h }, p7/Z, [x28, #5, MUL VL]\n"
- "cmp x25, #0x4\n"
+ "ld1h { z5.h }, p7/Z, [x27, #5, MUL VL]\n"
+ "cmp x24, #0x4\n"
".inst 0x6466e455 // bfmmla z21.s, z2.h, z6.h\n"
".inst 0x6467e40f // bfmmla z15.s, z0.h, z7.h\n"
- "ld1h { z6.h }, p7/Z, [x28, #6, MUL VL]\n"
- "add x24, x24, #0x10\n"
+ "ld1h { z6.h }, p7/Z, [x27, #6, MUL VL]\n"
+ "add x23, x23, #0x10\n"
".inst 0x6467e45b // bfmmla z27.s, z2.h, z7.h\n"
- "ld1h { z7.h }, p7/Z, [x28, #7, MUL VL]\n"
- "addvl x28, x28, #16\n"
+ "ld1h { z7.h }, p7/Z, [x27, #7, MUL VL]\n"
+ "addvl x27, x27, #16\n"
".inst 0x6464e40a // bfmmla z10.s, z0.h, z4.h\n"
".inst 0x6464e456 // bfmmla z22.s, z2.h, z4.h\n"
".inst 0x6465e410 // bfmmla z16.s, z0.h, z5.h\n"
- "ld1h { z4.h }, p7/Z, [x28, #-8, MUL VL]\n"
- "add x23, x23, #0x10\n"
+ "ld1h { z4.h }, p7/Z, [x27, #-8, MUL VL]\n"
+ "add x22, x22, #0x10\n"
".inst 0x6465e45c // bfmmla z28.s, z2.h, z5.h\n"
".inst 0x6466e40b // bfmmla z11.s, z0.h, z6.h\n"
- "ld1h { z5.h }, p7/Z, [x28, #-7, MUL VL]\n"
- "add x22, x22, #0x10\n"
+ "ld1h { z5.h }, p7/Z, [x27, #-7, MUL VL]\n"
+ "add x21, x21, #0x10\n"
".inst 0x6466e457 // bfmmla z23.s, z2.h, z6.h\n"
".inst 0x6467e411 // bfmmla z17.s, z0.h, z7.h\n"
- "ld1h { z6.h }, p7/Z, [x28, #-6, MUL VL]\n"
+ "ld1h { z6.h }, p7/Z, [x27, #-6, MUL VL]\n"
".inst 0x6467e45d // bfmmla z29.s, z2.h, z7.h\n"
- "ld1h { z7.h }, p7/Z, [x28, #-5, MUL VL]\n"
+ "ld1h { z7.h }, p7/Z, [x27, #-5, MUL VL]\n"
".inst 0x6464e40c // bfmmla z12.s, z0.h, z4.h\n"
- "addvl x28, x28, #-4\n"
+ "addvl x27, x27, #-4\n"
".inst 0x6464e458 // bfmmla z24.s, z2.h, z4.h\n"
".inst 0x6465e412 // bfmmla z18.s, z0.h, z5.h\n"
".inst 0x6465e45e // bfmmla z30.s, z2.h, z5.h\n"
@@ -749,48 +749,48 @@ void sve_hybrid_fp32bf16fp32_mmla_4x6VL (
".inst 0x6467e45f // bfmmla z31.s, z2.h, z7.h\n"
"bgt 35b\n"
"36:" // Height 3: Multiply loop: Single iteration only
- "whilelt p0.s, XZR, x25\n"
- "ld1rqw { z0.s }, p0/Z, [x24]\n"
- "ld1rqw { z1.s }, p0/Z, [x23]\n"
+ "whilelt p0.s, XZR, x24\n"
+ "ld1rqw { z0.s }, p0/Z, [x23]\n"
+ "ld1rqw { z1.s }, p0/Z, [x22]\n"
".inst 0x658abc00 // bfcvt z0.h, p7/M, z0.s\n"
- "ld1rqw { z2.s }, p0/Z, [x22]\n"
+ "ld1rqw { z2.s }, p0/Z, [x21]\n"
".inst 0x658abc21 // bfcvt z1.h, p7/M, z1.s\n"
"uzp1 z0.h, z0.h, z0.h\n"
- "ld1h { z4.h }, p7/Z, [x28]\n"
+ "ld1h { z4.h }, p7/Z, [x27]\n"
"uzp1 z1.h, z1.h, z1.h\n"
".inst 0x658abc42 // bfcvt z2.h, p7/M, z2.s\n"
- "ld1h { z5.h }, p7/Z, [x28, #1, MUL VL]\n"
- "ld1h { z6.h }, p7/Z, [x28, #2, MUL VL]\n"
+ "ld1h { z5.h }, p7/Z, [x27, #1, MUL VL]\n"
+ "ld1h { z6.h }, p7/Z, [x27, #2, MUL VL]\n"
"trn1 z0.d, z0.d, z1.d\n"
"uzp1 z2.h, z2.h, z2.h\n"
- "ld1h { z7.h }, p7/Z, [x28, #3, MUL VL]\n"
+ "ld1h { z7.h }, p7/Z, [x27, #3, MUL VL]\n"
".inst 0x6464e408 // bfmmla z8.s, z0.h, z4.h\n"
".inst 0x6464e454 // bfmmla z20.s, z2.h, z4.h\n"
".inst 0x6465e40e // bfmmla z14.s, z0.h, z5.h\n"
- "ld1h { z4.h }, p7/Z, [x28, #4, MUL VL]\n"
+ "ld1h { z4.h }, p7/Z, [x27, #4, MUL VL]\n"
".inst 0x6465e45a // bfmmla z26.s, z2.h, z5.h\n"
".inst 0x6466e409 // bfmmla z9.s, z0.h, z6.h\n"
- "ld1h { z5.h }, p7/Z, [x28, #5, MUL VL]\n"
+ "ld1h { z5.h }, p7/Z, [x27, #5, MUL VL]\n"
".inst 0x6466e455 // bfmmla z21.s, z2.h, z6.h\n"
".inst 0x6467e40f // bfmmla z15.s, z0.h, z7.h\n"
- "ld1h { z6.h }, p7/Z, [x28, #6, MUL VL]\n"
+ "ld1h { z6.h }, p7/Z, [x27, #6, MUL VL]\n"
".inst 0x6467e45b // bfmmla z27.s, z2.h, z7.h\n"
- "ld1h { z7.h }, p7/Z, [x28, #7, MUL VL]\n"
- "addvl x28, x28, #16\n"
+ "ld1h { z7.h }, p7/Z, [x27, #7, MUL VL]\n"
+ "addvl x27, x27, #16\n"
".inst 0x6464e40a // bfmmla z10.s, z0.h, z4.h\n"
".inst 0x6464e456 // bfmmla z22.s, z2.h, z4.h\n"
".inst 0x6465e410 // bfmmla z16.s, z0.h, z5.h\n"
- "ld1h { z4.h }, p7/Z, [x28, #-8, MUL VL]\n"
+ "ld1h { z4.h }, p7/Z, [x27, #-8, MUL VL]\n"
".inst 0x6465e45c // bfmmla z28.s, z2.h, z5.h\n"
".inst 0x6466e40b // bfmmla z11.s, z0.h, z6.h\n"
- "ld1h { z5.h }, p7/Z, [x28, #-7, MUL VL]\n"
+ "ld1h { z5.h }, p7/Z, [x27, #-7, MUL VL]\n"
".inst 0x6466e457 // bfmmla z23.s, z2.h, z6.h\n"
".inst 0x6467e411 // bfmmla z17.s, z0.h, z7.h\n"
- "ld1h { z6.h }, p7/Z, [x28, #-6, MUL VL]\n"
+ "ld1h { z6.h }, p7/Z, [x27, #-6, MUL VL]\n"
".inst 0x6467e45d // bfmmla z29.s, z2.h, z7.h\n"
- "ld1h { z7.h }, p7/Z, [x28, #-5, MUL VL]\n"
+ "ld1h { z7.h }, p7/Z, [x27, #-5, MUL VL]\n"
".inst 0x6464e40c // bfmmla z12.s, z0.h, z4.h\n"
- "addvl x28, x28, #-4\n"
+ "addvl x27, x27, #-4\n"
".inst 0x6464e458 // bfmmla z24.s, z2.h, z4.h\n"
".inst 0x6465e412 // bfmmla z18.s, z0.h, z5.h\n"
".inst 0x6465e45e // bfmmla z30.s, z2.h, z5.h\n"
@@ -799,17 +799,17 @@ void sve_hybrid_fp32bf16fp32_mmla_4x6VL (
".inst 0x6467e413 // bfmmla z19.s, z0.h, z7.h\n"
".inst 0x6467e45f // bfmmla z31.s, z2.h, z7.h\n"
"37:" // Height 3: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x26, x26, #0x1\n"
- "cmp x26, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x25, x25, #0x1\n"
+ "cmp x25, x19\n"
"bne 32b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x27, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x22, x26, x19, LSL #2\n"
"uzp1 z4.d, z8.d, z14.d\n"
"uzp2 z8.d, z8.d, z14.d\n"
"uzp1 z14.d, z9.d, z15.d\n"
"uzp2 z9.d, z9.d, z15.d\n"
- "add x22, x23, x20, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
"uzp1 z15.d, z10.d, z16.d\n"
"uzp2 z10.d, z10.d, z16.d\n"
"uzp1 z16.d, z11.d, z17.d\n"
@@ -825,10 +825,10 @@ void sve_hybrid_fp32bf16fp32_mmla_4x6VL (
"uzp1 z24.d, z24.d, z30.d\n"
"uzp1 z25.d, z25.d, z31.d\n"
"tbz %x[flags], #1, 38f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z1.s }, p7/Z, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rw { z0.s }, p7/Z, [x20]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rw { z1.s }, p7/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z0.s }, p7/Z, [x19]\n"
"fmin z4.s, p7/M, z4.s, z1.s\n"
"fmin z14.s, p7/M, z14.s, z1.s\n"
"fmin z15.s, p7/M, z15.s, z1.s\n"
@@ -866,67 +866,67 @@ void sve_hybrid_fp32bf16fp32_mmla_4x6VL (
"fmax z24.s, p7/M, z24.s, z0.s\n"
"fmax z25.s, p7/M, z25.s, z0.s\n"
"38:" // Height 3: No activation
- "st1w { z4.s }, p6, [x27]\n"
- "st1w { z14.s }, p5, [x27, #1, MUL VL]\n"
- "st1w { z15.s }, p4, [x27, #2, MUL VL]\n"
- "st1w { z16.s }, p3, [x27, #3, MUL VL]\n"
- "st1w { z17.s }, p2, [x27, #4, MUL VL]\n"
- "st1w { z18.s }, p1, [x27, #5, MUL VL]\n"
- "addvl x27, x27, #6\n"
- "st1w { z8.s }, p6, [x23]\n"
- "st1w { z9.s }, p5, [x23, #1, MUL VL]\n"
- "st1w { z10.s }, p4, [x23, #2, MUL VL]\n"
- "st1w { z11.s }, p3, [x23, #3, MUL VL]\n"
- "st1w { z12.s }, p2, [x23, #4, MUL VL]\n"
- "st1w { z13.s }, p1, [x23, #5, MUL VL]\n"
- "st1w { z20.s }, p6, [x22]\n"
- "st1w { z21.s }, p5, [x22, #1, MUL VL]\n"
- "st1w { z22.s }, p4, [x22, #2, MUL VL]\n"
- "st1w { z23.s }, p3, [x22, #3, MUL VL]\n"
- "st1w { z24.s }, p2, [x22, #4, MUL VL]\n"
- "st1w { z25.s }, p1, [x22, #5, MUL VL]\n"
+ "st1w { z4.s }, p6, [x26]\n"
+ "st1w { z14.s }, p5, [x26, #1, MUL VL]\n"
+ "st1w { z15.s }, p4, [x26, #2, MUL VL]\n"
+ "st1w { z16.s }, p3, [x26, #3, MUL VL]\n"
+ "st1w { z17.s }, p2, [x26, #4, MUL VL]\n"
+ "st1w { z18.s }, p1, [x26, #5, MUL VL]\n"
+ "addvl x26, x26, #6\n"
+ "st1w { z8.s }, p6, [x22]\n"
+ "st1w { z9.s }, p5, [x22, #1, MUL VL]\n"
+ "st1w { z10.s }, p4, [x22, #2, MUL VL]\n"
+ "st1w { z11.s }, p3, [x22, #3, MUL VL]\n"
+ "st1w { z12.s }, p2, [x22, #4, MUL VL]\n"
+ "st1w { z13.s }, p1, [x22, #5, MUL VL]\n"
+ "st1w { z20.s }, p6, [x21]\n"
+ "st1w { z21.s }, p5, [x21, #1, MUL VL]\n"
+ "st1w { z22.s }, p4, [x21, #2, MUL VL]\n"
+ "st1w { z23.s }, p3, [x21, #3, MUL VL]\n"
+ "st1w { z24.s }, p2, [x21, #4, MUL VL]\n"
+ "st1w { z25.s }, p1, [x21, #5, MUL VL]\n"
"39:" // Height 3: Writeback done
- "decw x9, ALL, MUL #6\n"
- "cmp x9, XZR\n"
+ "decw x28, ALL, MUL #6\n"
+ "cmp x28, XZR\n"
"bgt 28b\n"
"b 54f\n"
"40:" // Height 4
- "ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "mov x20, #0x10\n"
- "mov x10, %x[bias]\n"
- "ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x27, %x[output_ptr]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "mov x19, #0x10\n"
+ "mov x9, %x[bias]\n"
+ "ldr x28, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x27, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x26, %x[output_ptr]\n"
+ "madd %x[output_ptr], x20, x19, %x[output_ptr]\n"
"41:" // Height 4: Column loop
- "mov x20, #0x0\n"
- "whilelt p6.s, x20, x9\n"
- "incw x20\n"
- "whilelt p5.s, x20, x9\n"
- "incw x20\n"
- "whilelt p4.s, x20, x9\n"
- "incw x20\n"
- "whilelt p3.s, x20, x9\n"
- "incw x20\n"
- "whilelt p2.s, x20, x9\n"
- "incw x20\n"
- "whilelt p1.s, x20, x9\n"
- "cbz x10, 42f\n"
- "ld1w { z8.s }, p7/Z, [x10]\n"
- "ld1w { z9.s }, p7/Z, [x10, #1, MUL VL]\n"
+ "mov x19, #0x0\n"
+ "whilelt p6.s, x19, x28\n"
+ "incw x19\n"
+ "whilelt p5.s, x19, x28\n"
+ "incw x19\n"
+ "whilelt p4.s, x19, x28\n"
+ "incw x19\n"
+ "whilelt p3.s, x19, x28\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x28\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x28\n"
+ "cbz x9, 42f\n"
+ "ld1w { z8.s }, p7/Z, [x9]\n"
+ "ld1w { z9.s }, p7/Z, [x9, #1, MUL VL]\n"
+ "ld1w { z10.s }, p7/Z, [x9, #2, MUL VL]\n"
"zip2 z14.d, z8.d, z8.d\n"
"zip1 z8.d, z8.d, z8.d\n"
- "ld1w { z10.s }, p7/Z, [x10, #2, MUL VL]\n"
- "ld1w { z11.s }, p7/Z, [x10, #3, MUL VL]\n"
+ "ld1w { z11.s }, p7/Z, [x9, #3, MUL VL]\n"
+ "ld1w { z12.s }, p7/Z, [x9, #4, MUL VL]\n"
"zip2 z15.d, z9.d, z9.d\n"
"zip1 z9.d, z9.d, z9.d\n"
- "ld1w { z12.s }, p7/Z, [x10, #4, MUL VL]\n"
- "ld1w { z13.s }, p7/Z, [x10, #5, MUL VL]\n"
+ "ld1w { z13.s }, p7/Z, [x9, #5, MUL VL]\n"
"zip2 z16.d, z10.d, z10.d\n"
"zip1 z10.d, z10.d, z10.d\n"
+ "addvl x9, x9, #6\n"
"zip2 z17.d, z11.d, z11.d\n"
"zip1 z11.d, z11.d, z11.d\n"
- "addvl x10, x10, #6\n"
"zip2 z18.d, z12.d, z12.d\n"
"zip1 z12.d, z12.d, z12.d\n"
"zip2 z19.d, z13.d, z13.d\n"
@@ -946,52 +946,52 @@ void sve_hybrid_fp32bf16fp32_mmla_4x6VL (
"b 44f\n"
"42:" // Height 4: no bias
"tbz %x[flags], #0, 43f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x27, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
- "ld1w { z9.s }, p6/Z, [x27]\n"
- "add x21, x22, x20, LSL #2\n"
- "ld1w { z10.s }, p5/Z, [x27, #1, MUL VL]\n"
- "ld1w { z11.s }, p4/Z, [x27, #2, MUL VL]\n"
- "ld1w { z12.s }, p3/Z, [x27, #3, MUL VL]\n"
- "ld1w { z13.s }, p2/Z, [x27, #4, MUL VL]\n"
- "ld1w { z20.s }, p1/Z, [x27, #5, MUL VL]\n"
- "ld1w { z14.s }, p6/Z, [x23]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x22, x26, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
+ "add x20, x21, x19, LSL #2\n"
+ "ld1w { z9.s }, p6/Z, [x26]\n"
+ "ld1w { z10.s }, p5/Z, [x26, #1, MUL VL]\n"
+ "ld1w { z11.s }, p4/Z, [x26, #2, MUL VL]\n"
+ "ld1w { z12.s }, p3/Z, [x26, #3, MUL VL]\n"
+ "ld1w { z13.s }, p2/Z, [x26, #4, MUL VL]\n"
+ "ld1w { z20.s }, p1/Z, [x26, #5, MUL VL]\n"
+ "ld1w { z14.s }, p6/Z, [x22]\n"
+ "ld1w { z15.s }, p5/Z, [x22, #1, MUL VL]\n"
"zip1 z8.d, z9.d, z14.d\n"
"zip2 z14.d, z9.d, z14.d\n"
- "ld1w { z15.s }, p5/Z, [x23, #1, MUL VL]\n"
- "ld1w { z16.s }, p4/Z, [x23, #2, MUL VL]\n"
+ "ld1w { z16.s }, p4/Z, [x22, #2, MUL VL]\n"
+ "ld1w { z17.s }, p3/Z, [x22, #3, MUL VL]\n"
"zip1 z9.d, z10.d, z15.d\n"
"zip2 z15.d, z10.d, z15.d\n"
- "ld1w { z17.s }, p3/Z, [x23, #3, MUL VL]\n"
- "ld1w { z18.s }, p2/Z, [x23, #4, MUL VL]\n"
+ "ld1w { z18.s }, p2/Z, [x22, #4, MUL VL]\n"
+ "ld1w { z19.s }, p1/Z, [x22, #5, MUL VL]\n"
"zip1 z10.d, z11.d, z16.d\n"
"zip2 z16.d, z11.d, z16.d\n"
- "ld1w { z19.s }, p1/Z, [x23, #5, MUL VL]\n"
- "ld1w { z21.s }, p6/Z, [x22]\n"
+ "ld1w { z21.s }, p6/Z, [x21]\n"
+ "ld1w { z22.s }, p5/Z, [x21, #1, MUL VL]\n"
"zip1 z11.d, z12.d, z17.d\n"
"zip2 z17.d, z12.d, z17.d\n"
- "ld1w { z22.s }, p5/Z, [x22, #1, MUL VL]\n"
- "ld1w { z23.s }, p4/Z, [x22, #2, MUL VL]\n"
+ "ld1w { z23.s }, p4/Z, [x21, #2, MUL VL]\n"
+ "ld1w { z24.s }, p3/Z, [x21, #3, MUL VL]\n"
"zip1 z12.d, z13.d, z18.d\n"
"zip2 z18.d, z13.d, z18.d\n"
- "ld1w { z24.s }, p3/Z, [x22, #3, MUL VL]\n"
- "ld1w { z25.s }, p2/Z, [x22, #4, MUL VL]\n"
+ "ld1w { z25.s }, p2/Z, [x21, #4, MUL VL]\n"
+ "ld1w { z4.s }, p1/Z, [x21, #5, MUL VL]\n"
"zip1 z13.d, z20.d, z19.d\n"
"zip2 z19.d, z20.d, z19.d\n"
- "ld1w { z4.s }, p1/Z, [x22, #5, MUL VL]\n"
- "ld1w { z26.s }, p6/Z, [x21]\n"
+ "ld1w { z26.s }, p6/Z, [x20]\n"
+ "ld1w { z27.s }, p5/Z, [x20, #1, MUL VL]\n"
"zip1 z20.d, z21.d, z26.d\n"
"zip2 z26.d, z21.d, z26.d\n"
- "ld1w { z27.s }, p5/Z, [x21, #1, MUL VL]\n"
- "ld1w { z28.s }, p4/Z, [x21, #2, MUL VL]\n"
+ "ld1w { z28.s }, p4/Z, [x20, #2, MUL VL]\n"
+ "ld1w { z29.s }, p3/Z, [x20, #3, MUL VL]\n"
"zip1 z21.d, z22.d, z27.d\n"
"zip2 z27.d, z22.d, z27.d\n"
- "ld1w { z29.s }, p3/Z, [x21, #3, MUL VL]\n"
- "ld1w { z30.s }, p2/Z, [x21, #4, MUL VL]\n"
+ "ld1w { z30.s }, p2/Z, [x20, #4, MUL VL]\n"
+ "ld1w { z31.s }, p1/Z, [x20, #5, MUL VL]\n"
"zip1 z22.d, z23.d, z28.d\n"
"zip2 z28.d, z23.d, z28.d\n"
- "ld1w { z31.s }, p1/Z, [x21, #5, MUL VL]\n"
"zip1 z23.d, z24.d, z29.d\n"
"zip2 z29.d, z24.d, z29.d\n"
"zip1 z24.d, z25.d, z30.d\n"
@@ -1025,86 +1025,86 @@ void sve_hybrid_fp32bf16fp32_mmla_4x6VL (
"mov z30.b, #0x0\n"
"mov z31.b, #0x0\n"
"44:" // Height 4: setup done
- "mov x26, #0x0\n"
+ "mov x25, #0x0\n"
"45:" // Height 4: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w24, [x19, x25, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 46f\n"
- "ldr x21, [%x[input_ptr], x26, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x24, [x21, #0x0]\n"
- "ldr x23, [x21, #0x8]\n"
- "ldr x22, [x21, #0x10]\n"
- "ldr x21, [x21, #0x18]\n"
- "cbnz x26, 47f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x24, x24, x20, LSL #2\n"
- "add x23, x23, x20, LSL #2\n"
- "add x22, x22, x20, LSL #2\n"
- "add x21, x21, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x25, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x23, [x20, #0x0]\n"
+ "ldr x22, [x20, #0x8]\n"
+ "ldr x21, [x20, #0x10]\n"
+ "ldr x20, [x20, #0x18]\n"
+ "cbnz x25, 47f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x23, x23, x19, LSL #2\n"
+ "add x22, x22, x19, LSL #2\n"
+ "add x21, x21, x19, LSL #2\n"
+ "add x20, x20, x19, LSL #2\n"
"b 47f\n"
"46:" // Height 4: setup direct input
- "mov x24, %x[input_ptr]\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
+ "mov x23, %x[input_ptr]\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
+ "add x20, x21, x19, LSL #2\n"
"47:" // Height 4: input setup done
- "cmp x25, #0x4\n"
+ "cmp x24, #0x4\n"
"ble 49f\n"
"48:" // Height 4: Multiply loop: Main loop head
- "whilelt p0.s, XZR, x25\n"
- "ld1rqw { z0.s }, p0/Z, [x24]\n"
- "ld1rqw { z1.s }, p0/Z, [x23]\n"
+ "whilelt p0.s, XZR, x24\n"
+ "ld1rqw { z0.s }, p0/Z, [x23]\n"
+ "ld1rqw { z1.s }, p0/Z, [x22]\n"
".inst 0x658abc00 // bfcvt z0.h, p7/M, z0.s\n"
- "ld1rqw { z2.s }, p0/Z, [x22]\n"
- "ld1rqw { z3.s }, p0/Z, [x21]\n"
+ "ld1rqw { z2.s }, p0/Z, [x21]\n"
+ "ld1rqw { z3.s }, p0/Z, [x20]\n"
".inst 0x658abc21 // bfcvt z1.h, p7/M, z1.s\n"
".inst 0x658abc42 // bfcvt z2.h, p7/M, z2.s\n"
".inst 0x658abc63 // bfcvt z3.h, p7/M, z3.s\n"
"uzp1 z0.h, z0.h, z0.h\n"
- "ld1h { z4.h }, p7/Z, [x28]\n"
- "ld1h { z5.h }, p7/Z, [x28, #1, MUL VL]\n"
+ "ld1h { z4.h }, p7/Z, [x27]\n"
+ "ld1h { z5.h }, p7/Z, [x27, #1, MUL VL]\n"
"uzp1 z1.h, z1.h, z1.h\n"
"uzp1 z2.h, z2.h, z2.h\n"
- "ld1h { z6.h }, p7/Z, [x28, #2, MUL VL]\n"
- "ld1h { z7.h }, p7/Z, [x28, #3, MUL VL]\n"
+ "ld1h { z6.h }, p7/Z, [x27, #2, MUL VL]\n"
+ "ld1h { z7.h }, p7/Z, [x27, #3, MUL VL]\n"
"uzp1 z3.h, z3.h, z3.h\n"
"trn1 z0.d, z0.d, z1.d\n"
".inst 0x6464e408 // bfmmla z8.s, z0.h, z4.h\n"
- "sub x25, x25, #0x4\n"
+ "sub x24, x24, #0x4\n"
"trn1 z2.d, z2.d, z3.d\n"
".inst 0x6464e454 // bfmmla z20.s, z2.h, z4.h\n"
".inst 0x6465e40e // bfmmla z14.s, z0.h, z5.h\n"
- "ld1h { z4.h }, p7/Z, [x28, #4, MUL VL]\n"
+ "ld1h { z4.h }, p7/Z, [x27, #4, MUL VL]\n"
".inst 0x6465e45a // bfmmla z26.s, z2.h, z5.h\n"
".inst 0x6466e409 // bfmmla z9.s, z0.h, z6.h\n"
- "ld1h { z5.h }, p7/Z, [x28, #5, MUL VL]\n"
- "cmp x25, #0x4\n"
+ "ld1h { z5.h }, p7/Z, [x27, #5, MUL VL]\n"
+ "cmp x24, #0x4\n"
".inst 0x6466e455 // bfmmla z21.s, z2.h, z6.h\n"
".inst 0x6467e40f // bfmmla z15.s, z0.h, z7.h\n"
- "ld1h { z6.h }, p7/Z, [x28, #6, MUL VL]\n"
- "add x24, x24, #0x10\n"
+ "ld1h { z6.h }, p7/Z, [x27, #6, MUL VL]\n"
+ "add x23, x23, #0x10\n"
".inst 0x6467e45b // bfmmla z27.s, z2.h, z7.h\n"
- "ld1h { z7.h }, p7/Z, [x28, #7, MUL VL]\n"
- "addvl x28, x28, #16\n"
+ "ld1h { z7.h }, p7/Z, [x27, #7, MUL VL]\n"
+ "addvl x27, x27, #16\n"
".inst 0x6464e40a // bfmmla z10.s, z0.h, z4.h\n"
".inst 0x6464e456 // bfmmla z22.s, z2.h, z4.h\n"
".inst 0x6465e410 // bfmmla z16.s, z0.h, z5.h\n"
- "ld1h { z4.h }, p7/Z, [x28, #-8, MUL VL]\n"
- "add x23, x23, #0x10\n"
+ "ld1h { z4.h }, p7/Z, [x27, #-8, MUL VL]\n"
+ "add x22, x22, #0x10\n"
".inst 0x6465e45c // bfmmla z28.s, z2.h, z5.h\n"
".inst 0x6466e40b // bfmmla z11.s, z0.h, z6.h\n"
- "ld1h { z5.h }, p7/Z, [x28, #-7, MUL VL]\n"
- "add x22, x22, #0x10\n"
+ "ld1h { z5.h }, p7/Z, [x27, #-7, MUL VL]\n"
+ "add x21, x21, #0x10\n"
".inst 0x6466e457 // bfmmla z23.s, z2.h, z6.h\n"
".inst 0x6467e411 // bfmmla z17.s, z0.h, z7.h\n"
- "ld1h { z6.h }, p7/Z, [x28, #-6, MUL VL]\n"
- "add x21, x21, #0x10\n"
+ "ld1h { z6.h }, p7/Z, [x27, #-6, MUL VL]\n"
+ "add x20, x20, #0x10\n"
".inst 0x6467e45d // bfmmla z29.s, z2.h, z7.h\n"
- "ld1h { z7.h }, p7/Z, [x28, #-5, MUL VL]\n"
+ "ld1h { z7.h }, p7/Z, [x27, #-5, MUL VL]\n"
".inst 0x6464e40c // bfmmla z12.s, z0.h, z4.h\n"
- "addvl x28, x28, #-4\n"
+ "addvl x27, x27, #-4\n"
".inst 0x6464e458 // bfmmla z24.s, z2.h, z4.h\n"
".inst 0x6465e412 // bfmmla z18.s, z0.h, z5.h\n"
".inst 0x6465e45e // bfmmla z30.s, z2.h, z5.h\n"
@@ -1114,52 +1114,52 @@ void sve_hybrid_fp32bf16fp32_mmla_4x6VL (
".inst 0x6467e45f // bfmmla z31.s, z2.h, z7.h\n"
"bgt 48b\n"
"49:" // Height 4: Multiply loop: Single iteration only
- "whilelt p0.s, XZR, x25\n"
- "ld1rqw { z0.s }, p0/Z, [x24]\n"
- "ld1rqw { z1.s }, p0/Z, [x23]\n"
+ "whilelt p0.s, XZR, x24\n"
+ "ld1rqw { z0.s }, p0/Z, [x23]\n"
+ "ld1rqw { z1.s }, p0/Z, [x22]\n"
".inst 0x658abc00 // bfcvt z0.h, p7/M, z0.s\n"
- "ld1rqw { z2.s }, p0/Z, [x22]\n"
- "ld1rqw { z3.s }, p0/Z, [x21]\n"
+ "ld1rqw { z2.s }, p0/Z, [x21]\n"
+ "ld1rqw { z3.s }, p0/Z, [x20]\n"
".inst 0x658abc21 // bfcvt z1.h, p7/M, z1.s\n"
".inst 0x658abc42 // bfcvt z2.h, p7/M, z2.s\n"
".inst 0x658abc63 // bfcvt z3.h, p7/M, z3.s\n"
"uzp1 z0.h, z0.h, z0.h\n"
- "ld1h { z4.h }, p7/Z, [x28]\n"
- "ld1h { z5.h }, p7/Z, [x28, #1, MUL VL]\n"
+ "ld1h { z4.h }, p7/Z, [x27]\n"
+ "ld1h { z5.h }, p7/Z, [x27, #1, MUL VL]\n"
"uzp1 z1.h, z1.h, z1.h\n"
"uzp1 z2.h, z2.h, z2.h\n"
- "ld1h { z6.h }, p7/Z, [x28, #2, MUL VL]\n"
- "ld1h { z7.h }, p7/Z, [x28, #3, MUL VL]\n"
+ "ld1h { z6.h }, p7/Z, [x27, #2, MUL VL]\n"
+ "ld1h { z7.h }, p7/Z, [x27, #3, MUL VL]\n"
"uzp1 z3.h, z3.h, z3.h\n"
"trn1 z0.d, z0.d, z1.d\n"
".inst 0x6464e408 // bfmmla z8.s, z0.h, z4.h\n"
"trn1 z2.d, z2.d, z3.d\n"
".inst 0x6464e454 // bfmmla z20.s, z2.h, z4.h\n"
".inst 0x6465e40e // bfmmla z14.s, z0.h, z5.h\n"
- "ld1h { z4.h }, p7/Z, [x28, #4, MUL VL]\n"
+ "ld1h { z4.h }, p7/Z, [x27, #4, MUL VL]\n"
".inst 0x6465e45a // bfmmla z26.s, z2.h, z5.h\n"
".inst 0x6466e409 // bfmmla z9.s, z0.h, z6.h\n"
- "ld1h { z5.h }, p7/Z, [x28, #5, MUL VL]\n"
+ "ld1h { z5.h }, p7/Z, [x27, #5, MUL VL]\n"
".inst 0x6466e455 // bfmmla z21.s, z2.h, z6.h\n"
".inst 0x6467e40f // bfmmla z15.s, z0.h, z7.h\n"
- "ld1h { z6.h }, p7/Z, [x28, #6, MUL VL]\n"
+ "ld1h { z6.h }, p7/Z, [x27, #6, MUL VL]\n"
".inst 0x6467e45b // bfmmla z27.s, z2.h, z7.h\n"
- "ld1h { z7.h }, p7/Z, [x28, #7, MUL VL]\n"
- "addvl x28, x28, #16\n"
+ "ld1h { z7.h }, p7/Z, [x27, #7, MUL VL]\n"
+ "addvl x27, x27, #16\n"
".inst 0x6464e40a // bfmmla z10.s, z0.h, z4.h\n"
".inst 0x6464e456 // bfmmla z22.s, z2.h, z4.h\n"
".inst 0x6465e410 // bfmmla z16.s, z0.h, z5.h\n"
- "ld1h { z4.h }, p7/Z, [x28, #-8, MUL VL]\n"
+ "ld1h { z4.h }, p7/Z, [x27, #-8, MUL VL]\n"
".inst 0x6465e45c // bfmmla z28.s, z2.h, z5.h\n"
".inst 0x6466e40b // bfmmla z11.s, z0.h, z6.h\n"
- "ld1h { z5.h }, p7/Z, [x28, #-7, MUL VL]\n"
+ "ld1h { z5.h }, p7/Z, [x27, #-7, MUL VL]\n"
".inst 0x6466e457 // bfmmla z23.s, z2.h, z6.h\n"
".inst 0x6467e411 // bfmmla z17.s, z0.h, z7.h\n"
- "ld1h { z6.h }, p7/Z, [x28, #-6, MUL VL]\n"
+ "ld1h { z6.h }, p7/Z, [x27, #-6, MUL VL]\n"
".inst 0x6467e45d // bfmmla z29.s, z2.h, z7.h\n"
- "ld1h { z7.h }, p7/Z, [x28, #-5, MUL VL]\n"
+ "ld1h { z7.h }, p7/Z, [x27, #-5, MUL VL]\n"
".inst 0x6464e40c // bfmmla z12.s, z0.h, z4.h\n"
- "addvl x28, x28, #-4\n"
+ "addvl x27, x27, #-4\n"
".inst 0x6464e458 // bfmmla z24.s, z2.h, z4.h\n"
".inst 0x6465e412 // bfmmla z18.s, z0.h, z5.h\n"
".inst 0x6465e45e // bfmmla z30.s, z2.h, z5.h\n"
@@ -1168,17 +1168,17 @@ void sve_hybrid_fp32bf16fp32_mmla_4x6VL (
".inst 0x6467e413 // bfmmla z19.s, z0.h, z7.h\n"
".inst 0x6467e45f // bfmmla z31.s, z2.h, z7.h\n"
"50:" // Height 4: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x26, x26, #0x1\n"
- "cmp x26, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x25, x25, #0x1\n"
+ "cmp x25, x19\n"
"bne 45b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x27, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x22, x26, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
"uzp1 z4.d, z8.d, z14.d\n"
"uzp2 z8.d, z8.d, z14.d\n"
"uzp1 z14.d, z9.d, z15.d\n"
- "add x21, x22, x20, LSL #2\n"
+ "add x20, x21, x19, LSL #2\n"
"uzp2 z9.d, z9.d, z15.d\n"
"uzp1 z15.d, z10.d, z16.d\n"
"uzp2 z10.d, z10.d, z16.d\n"
@@ -1201,10 +1201,10 @@ void sve_hybrid_fp32bf16fp32_mmla_4x6VL (
"uzp1 z30.d, z25.d, z31.d\n"
"uzp2 z25.d, z25.d, z31.d\n"
"tbz %x[flags], #1, 51f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z1.s }, p7/Z, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rw { z0.s }, p7/Z, [x20]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rw { z1.s }, p7/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z0.s }, p7/Z, [x19]\n"
"fmin z4.s, p7/M, z4.s, z1.s\n"
"fmin z14.s, p7/M, z14.s, z1.s\n"
"fmin z15.s, p7/M, z15.s, z1.s\n"
@@ -1254,51 +1254,51 @@ void sve_hybrid_fp32bf16fp32_mmla_4x6VL (
"fmax z24.s, p7/M, z24.s, z0.s\n"
"fmax z25.s, p7/M, z25.s, z0.s\n"
"51:" // Height 4: No activation
- "st1w { z4.s }, p6, [x27]\n"
- "st1w { z14.s }, p5, [x27, #1, MUL VL]\n"
- "st1w { z15.s }, p4, [x27, #2, MUL VL]\n"
- "st1w { z16.s }, p3, [x27, #3, MUL VL]\n"
- "st1w { z17.s }, p2, [x27, #4, MUL VL]\n"
- "st1w { z18.s }, p1, [x27, #5, MUL VL]\n"
- "addvl x27, x27, #6\n"
- "st1w { z8.s }, p6, [x23]\n"
- "st1w { z9.s }, p5, [x23, #1, MUL VL]\n"
- "st1w { z10.s }, p4, [x23, #2, MUL VL]\n"
- "st1w { z11.s }, p3, [x23, #3, MUL VL]\n"
- "st1w { z12.s }, p2, [x23, #4, MUL VL]\n"
- "st1w { z13.s }, p1, [x23, #5, MUL VL]\n"
- "st1w { z19.s }, p6, [x22]\n"
- "st1w { z26.s }, p5, [x22, #1, MUL VL]\n"
- "st1w { z27.s }, p4, [x22, #2, MUL VL]\n"
- "st1w { z28.s }, p3, [x22, #3, MUL VL]\n"
- "st1w { z29.s }, p2, [x22, #4, MUL VL]\n"
- "st1w { z30.s }, p1, [x22, #5, MUL VL]\n"
- "st1w { z20.s }, p6, [x21]\n"
- "st1w { z21.s }, p5, [x21, #1, MUL VL]\n"
- "st1w { z22.s }, p4, [x21, #2, MUL VL]\n"
- "st1w { z23.s }, p3, [x21, #3, MUL VL]\n"
- "st1w { z24.s }, p2, [x21, #4, MUL VL]\n"
- "st1w { z25.s }, p1, [x21, #5, MUL VL]\n"
+ "st1w { z4.s }, p6, [x26]\n"
+ "st1w { z14.s }, p5, [x26, #1, MUL VL]\n"
+ "st1w { z15.s }, p4, [x26, #2, MUL VL]\n"
+ "st1w { z16.s }, p3, [x26, #3, MUL VL]\n"
+ "st1w { z17.s }, p2, [x26, #4, MUL VL]\n"
+ "st1w { z18.s }, p1, [x26, #5, MUL VL]\n"
+ "addvl x26, x26, #6\n"
+ "st1w { z8.s }, p6, [x22]\n"
+ "st1w { z9.s }, p5, [x22, #1, MUL VL]\n"
+ "st1w { z10.s }, p4, [x22, #2, MUL VL]\n"
+ "st1w { z11.s }, p3, [x22, #3, MUL VL]\n"
+ "st1w { z12.s }, p2, [x22, #4, MUL VL]\n"
+ "st1w { z13.s }, p1, [x22, #5, MUL VL]\n"
+ "st1w { z19.s }, p6, [x21]\n"
+ "st1w { z26.s }, p5, [x21, #1, MUL VL]\n"
+ "st1w { z27.s }, p4, [x21, #2, MUL VL]\n"
+ "st1w { z28.s }, p3, [x21, #3, MUL VL]\n"
+ "st1w { z29.s }, p2, [x21, #4, MUL VL]\n"
+ "st1w { z30.s }, p1, [x21, #5, MUL VL]\n"
+ "st1w { z20.s }, p6, [x20]\n"
+ "st1w { z21.s }, p5, [x20, #1, MUL VL]\n"
+ "st1w { z22.s }, p4, [x20, #2, MUL VL]\n"
+ "st1w { z23.s }, p3, [x20, #3, MUL VL]\n"
+ "st1w { z24.s }, p2, [x20, #4, MUL VL]\n"
+ "st1w { z25.s }, p1, [x20, #5, MUL VL]\n"
"52:" // Height 4: Writeback done
- "decw x9, ALL, MUL #6\n"
- "cmp x9, XZR\n"
+ "decw x28, ALL, MUL #6\n"
+ "cmp x28, XZR\n"
"bgt 41b\n"
"subs %x[M], %x[M], #0x4\n"
"beq 54f\n"
- "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 53f\n"
- "add x21, x21, #0x4\n"
- "str x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "add x20, x20, #0x4\n"
+ "str x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"b 1b\n"
"53:" // Update direct input
- "mov x20, #0x10\n"
- "madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
+ "mov x19, #0x10\n"
+ "madd %x[input_ptr], x19, x20, %x[input_ptr]\n"
"b 1b\n"
"54:" // Exit
: [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
: [args_ptr] "r" (&ka), [bias] "r" (bias), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "x9", "x10", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "x9", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32bf16fp32_mmla_6x4VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32bf16fp32_mmla_6x4VL/generic.cpp
index 23d7ff9c3b..236eebad66 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32bf16fp32_mmla_6x4VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32bf16fp32_mmla_6x4VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef ARM_COMPUTE_ENABLE_SVE
@@ -103,42 +103,42 @@ void sve_hybrid_fp32bf16fp32_mmla_6x4VL (
"cmp %x[M], #0x2\n"
"bgt 27f\n"
"beq 14f\n"
- "mov x12, %x[bias]\n"
- "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "mov x11, %x[bias]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x28, %x[output_ptr]\n"
"2:" // Height 1: Column loop
- "mov x20, #0x0\n"
- "whilelt p4.s, x20, x11\n"
- "incw x20\n"
- "whilelt p3.s, x20, x11\n"
- "incw x20\n"
- "whilelt p2.s, x20, x11\n"
- "incw x20\n"
- "whilelt p1.s, x20, x11\n"
- "cbz x12, 3f\n"
- "ld1w { z8.s }, p5/Z, [x12]\n"
- "ld1w { z9.s }, p5/Z, [x12, #1, MUL VL]\n"
+ "mov x19, #0x0\n"
+ "whilelt p4.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p3.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x10\n"
+ "cbz x11, 3f\n"
+ "ld1w { z8.s }, p5/Z, [x11]\n"
+ "ld1w { z9.s }, p5/Z, [x11, #1, MUL VL]\n"
+ "ld1w { z10.s }, p5/Z, [x11, #2, MUL VL]\n"
"zip2 z12.d, z8.d, z8.d\n"
"zip1 z8.d, z8.d, z8.d\n"
- "ld1w { z10.s }, p5/Z, [x12, #2, MUL VL]\n"
- "ld1w { z11.s }, p5/Z, [x12, #3, MUL VL]\n"
+ "ld1w { z11.s }, p5/Z, [x11, #3, MUL VL]\n"
"zip2 z13.d, z9.d, z9.d\n"
"zip1 z9.d, z9.d, z9.d\n"
+ "addvl x11, x11, #4\n"
"zip2 z14.d, z10.d, z10.d\n"
"zip1 z10.d, z10.d, z10.d\n"
- "addvl x12, x12, #4\n"
"zip2 z15.d, z11.d, z11.d\n"
"zip1 z11.d, z11.d, z11.d\n"
"b 5f\n"
"3:" // Height 1: no bias
"tbz %x[flags], #0, 4f\n"
- "ld1w { z9.s }, p4/Z, [x9]\n"
- "ld1w { z10.s }, p3/Z, [x9, #1, MUL VL]\n"
+ "ld1w { z9.s }, p4/Z, [x28]\n"
+ "ld1w { z10.s }, p3/Z, [x28, #1, MUL VL]\n"
"zip1 z8.d, z9.d, z12.d\n"
+ "ld1w { z11.s }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1w { z16.s }, p1/Z, [x28, #3, MUL VL]\n"
"zip2 z12.d, z9.d, z12.d\n"
- "ld1w { z11.s }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1w { z16.s }, p1/Z, [x9, #3, MUL VL]\n"
"zip1 z9.d, z10.d, z13.d\n"
"zip2 z13.d, z10.d, z13.d\n"
"zip1 z10.d, z11.d, z14.d\n"
@@ -156,86 +156,86 @@ void sve_hybrid_fp32bf16fp32_mmla_6x4VL (
"mov z14.b, #0x0\n"
"mov z15.b, #0x0\n"
"5:" // Height 1: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"6:" // Height 1: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 7f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "cbnz x28, 8f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "cbnz x27, 8f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #2\n"
"b 8f\n"
"7:" // Height 1: setup direct input
- "mov x26, %x[input_ptr]\n"
+ "mov x25, %x[input_ptr]\n"
"8:" // Height 1: input setup done
- "cmp x27, #0x4\n"
+ "cmp x26, #0x4\n"
"ble 10f\n"
"9:" // Height 1: Multiply loop: Main loop head
- "whilelt p0.s, XZR, x27\n"
- "ld1rqw { z0.s }, p0/Z, [x26]\n"
+ "whilelt p0.s, XZR, x26\n"
+ "ld1rqw { z0.s }, p0/Z, [x25]\n"
".inst 0x658ab400 // bfcvt z0.h, p5/M, z0.s\n"
"uzp1 z0.h, z0.h, z0.h\n"
- "ld1h { z6.h }, p5/Z, [x10]\n"
- "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "ld1h { z7.h }, p5/Z, [x9, #1, MUL VL]\n"
".inst 0x6466e408 // bfmmla z8.s, z0.h, z6.h\n"
".inst 0x6467e40c // bfmmla z12.s, z0.h, z7.h\n"
- "ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
- "ld1h { z7.h }, p5/Z, [x10, #3, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #2, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x9, #3, MUL VL]\n"
".inst 0x6466e409 // bfmmla z9.s, z0.h, z6.h\n"
".inst 0x6467e40d // bfmmla z13.s, z0.h, z7.h\n"
- "ld1h { z6.h }, p5/Z, [x10, #4, MUL VL]\n"
- "ld1h { z7.h }, p5/Z, [x10, #5, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #4, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x9, #5, MUL VL]\n"
".inst 0x6466e40a // bfmmla z10.s, z0.h, z6.h\n"
".inst 0x6467e40e // bfmmla z14.s, z0.h, z7.h\n"
- "ld1h { z6.h }, p5/Z, [x10, #6, MUL VL]\n"
- "ld1h { z7.h }, p5/Z, [x10, #7, MUL VL]\n"
- "sub x27, x27, #0x4\n"
- "cmp x27, #0x4\n"
+ "ld1h { z6.h }, p5/Z, [x9, #6, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x9, #7, MUL VL]\n"
+ "sub x26, x26, #0x4\n"
+ "cmp x26, #0x4\n"
".inst 0x6466e40b // bfmmla z11.s, z0.h, z6.h\n"
".inst 0x6467e40f // bfmmla z15.s, z0.h, z7.h\n"
- "add x26, x26, #0x10\n"
- "addvl x10, x10, #8\n"
+ "add x25, x25, #0x10\n"
+ "addvl x9, x9, #8\n"
"bgt 9b\n"
"10:" // Height 1: Multiply loop: Single iteration only
- "whilelt p0.s, XZR, x27\n"
- "ld1rqw { z0.s }, p0/Z, [x26]\n"
+ "whilelt p0.s, XZR, x26\n"
+ "ld1rqw { z0.s }, p0/Z, [x25]\n"
".inst 0x658ab400 // bfcvt z0.h, p5/M, z0.s\n"
"uzp1 z0.h, z0.h, z0.h\n"
- "ld1h { z6.h }, p5/Z, [x10]\n"
- "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "ld1h { z7.h }, p5/Z, [x9, #1, MUL VL]\n"
".inst 0x6466e408 // bfmmla z8.s, z0.h, z6.h\n"
".inst 0x6467e40c // bfmmla z12.s, z0.h, z7.h\n"
- "ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
- "ld1h { z7.h }, p5/Z, [x10, #3, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #2, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x9, #3, MUL VL]\n"
".inst 0x6466e409 // bfmmla z9.s, z0.h, z6.h\n"
".inst 0x6467e40d // bfmmla z13.s, z0.h, z7.h\n"
- "ld1h { z6.h }, p5/Z, [x10, #4, MUL VL]\n"
- "ld1h { z7.h }, p5/Z, [x10, #5, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #4, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x9, #5, MUL VL]\n"
".inst 0x6466e40a // bfmmla z10.s, z0.h, z6.h\n"
".inst 0x6467e40e // bfmmla z14.s, z0.h, z7.h\n"
- "ld1h { z6.h }, p5/Z, [x10, #6, MUL VL]\n"
- "ld1h { z7.h }, p5/Z, [x10, #7, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #6, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x9, #7, MUL VL]\n"
".inst 0x6466e40b // bfmmla z11.s, z0.h, z6.h\n"
".inst 0x6467e40f // bfmmla z15.s, z0.h, z7.h\n"
- "addvl x10, x10, #8\n"
+ "addvl x9, x9, #8\n"
"11:" // Height 1: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 6b\n"
"uzp1 z8.d, z8.d, z12.d\n"
"uzp1 z9.d, z9.d, z13.d\n"
"uzp1 z10.d, z10.d, z14.d\n"
"uzp1 z11.d, z11.d, z15.d\n"
"tbz %x[flags], #1, 12f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z1.s }, p5/Z, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rw { z0.s }, p5/Z, [x20]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rw { z1.s }, p5/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z0.s }, p5/Z, [x19]\n"
"fmin z8.s, p5/M, z8.s, z1.s\n"
"fmin z9.s, p5/M, z9.s, z1.s\n"
"fmin z10.s, p5/M, z10.s, z1.s\n"
@@ -245,61 +245,61 @@ void sve_hybrid_fp32bf16fp32_mmla_6x4VL (
"fmax z10.s, p5/M, z10.s, z0.s\n"
"fmax z11.s, p5/M, z11.s, z0.s\n"
"12:" // Height 1: No activation
- "st1w { z8.s }, p4, [x9]\n"
- "st1w { z9.s }, p3, [x9, #1, MUL VL]\n"
- "st1w { z10.s }, p2, [x9, #2, MUL VL]\n"
- "st1w { z11.s }, p1, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
+ "st1w { z8.s }, p4, [x28]\n"
+ "st1w { z9.s }, p3, [x28, #1, MUL VL]\n"
+ "st1w { z10.s }, p2, [x28, #2, MUL VL]\n"
+ "st1w { z11.s }, p1, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
"13:" // Height 1: Writeback done
- "decw x11, ALL, MUL #4\n"
- "cmp x11, XZR\n"
+ "decw x10, ALL, MUL #4\n"
+ "cmp x10, XZR\n"
"bgt 2b\n"
"b 80f\n"
"14:" // Height 2
- "mov x12, %x[bias]\n"
- "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "mov x11, %x[bias]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x28, %x[output_ptr]\n"
"15:" // Height 2: Column loop
- "mov x20, #0x0\n"
- "whilelt p4.s, x20, x11\n"
- "incw x20\n"
- "whilelt p3.s, x20, x11\n"
- "incw x20\n"
- "whilelt p2.s, x20, x11\n"
- "incw x20\n"
- "whilelt p1.s, x20, x11\n"
- "cbz x12, 16f\n"
- "ld1w { z8.s }, p5/Z, [x12]\n"
- "ld1w { z9.s }, p5/Z, [x12, #1, MUL VL]\n"
+ "mov x19, #0x0\n"
+ "whilelt p4.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p3.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x10\n"
+ "cbz x11, 16f\n"
+ "ld1w { z8.s }, p5/Z, [x11]\n"
+ "ld1w { z9.s }, p5/Z, [x11, #1, MUL VL]\n"
+ "ld1w { z10.s }, p5/Z, [x11, #2, MUL VL]\n"
"zip2 z12.d, z8.d, z8.d\n"
"zip1 z8.d, z8.d, z8.d\n"
- "ld1w { z10.s }, p5/Z, [x12, #2, MUL VL]\n"
- "ld1w { z11.s }, p5/Z, [x12, #3, MUL VL]\n"
+ "ld1w { z11.s }, p5/Z, [x11, #3, MUL VL]\n"
"zip2 z13.d, z9.d, z9.d\n"
"zip1 z9.d, z9.d, z9.d\n"
+ "addvl x11, x11, #4\n"
"zip2 z14.d, z10.d, z10.d\n"
"zip1 z10.d, z10.d, z10.d\n"
- "addvl x12, x12, #4\n"
"zip2 z15.d, z11.d, z11.d\n"
"zip1 z11.d, z11.d, z11.d\n"
"b 18f\n"
"16:" // Height 2: no bias
"tbz %x[flags], #0, 17f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "ld1w { z9.s }, p4/Z, [x9]\n"
- "ld1w { z10.s }, p3/Z, [x9, #1, MUL VL]\n"
- "ld1w { z11.s }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1w { z16.s }, p1/Z, [x9, #3, MUL VL]\n"
- "ld1w { z12.s }, p4/Z, [x25]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x28, x19, LSL #2\n"
+ "ld1w { z9.s }, p4/Z, [x28]\n"
+ "ld1w { z10.s }, p3/Z, [x28, #1, MUL VL]\n"
+ "ld1w { z11.s }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1w { z16.s }, p1/Z, [x28, #3, MUL VL]\n"
+ "ld1w { z12.s }, p4/Z, [x24]\n"
"zip1 z8.d, z9.d, z12.d\n"
"zip2 z12.d, z9.d, z12.d\n"
- "ld1w { z13.s }, p3/Z, [x25, #1, MUL VL]\n"
- "ld1w { z14.s }, p2/Z, [x25, #2, MUL VL]\n"
+ "ld1w { z13.s }, p3/Z, [x24, #1, MUL VL]\n"
+ "ld1w { z14.s }, p2/Z, [x24, #2, MUL VL]\n"
"zip1 z9.d, z10.d, z13.d\n"
"zip2 z13.d, z10.d, z13.d\n"
- "ld1w { z15.s }, p1/Z, [x25, #3, MUL VL]\n"
+ "ld1w { z15.s }, p1/Z, [x24, #3, MUL VL]\n"
"zip1 z10.d, z11.d, z14.d\n"
"zip2 z14.d, z11.d, z14.d\n"
"zip1 z11.d, z16.d, z15.d\n"
@@ -315,93 +315,93 @@ void sve_hybrid_fp32bf16fp32_mmla_6x4VL (
"mov z14.b, #0x0\n"
"mov z15.b, #0x0\n"
"18:" // Height 2: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"19:" // Height 2: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 20f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "cbnz x28, 21f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #2\n"
- "add x25, x25, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "cbnz x27, 21f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #2\n"
+ "add x24, x24, x19, LSL #2\n"
"b 21f\n"
"20:" // Height 2: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #2\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #2\n"
"21:" // Height 2: input setup done
- "cmp x27, #0x4\n"
+ "cmp x26, #0x4\n"
"ble 23f\n"
"22:" // Height 2: Multiply loop: Main loop head
- "whilelt p0.s, XZR, x27\n"
- "ld1rqw { z0.s }, p0/Z, [x26]\n"
- "ld1rqw { z1.s }, p0/Z, [x25]\n"
+ "whilelt p0.s, XZR, x26\n"
+ "ld1rqw { z0.s }, p0/Z, [x25]\n"
+ "ld1rqw { z1.s }, p0/Z, [x24]\n"
".inst 0x658ab400 // bfcvt z0.h, p5/M, z0.s\n"
".inst 0x658ab421 // bfcvt z1.h, p5/M, z1.s\n"
"uzp1 z0.h, z0.h, z0.h\n"
- "ld1h { z6.h }, p5/Z, [x10]\n"
- "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "ld1h { z7.h }, p5/Z, [x9, #1, MUL VL]\n"
"uzp1 z1.h, z1.h, z1.h\n"
"trn1 z0.d, z0.d, z1.d\n"
".inst 0x6466e408 // bfmmla z8.s, z0.h, z6.h\n"
- "ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #2, MUL VL]\n"
".inst 0x6467e40c // bfmmla z12.s, z0.h, z7.h\n"
- "ld1h { z7.h }, p5/Z, [x10, #3, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x9, #3, MUL VL]\n"
".inst 0x6466e409 // bfmmla z9.s, z0.h, z6.h\n"
- "ld1h { z6.h }, p5/Z, [x10, #4, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #4, MUL VL]\n"
".inst 0x6467e40d // bfmmla z13.s, z0.h, z7.h\n"
- "ld1h { z7.h }, p5/Z, [x10, #5, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x9, #5, MUL VL]\n"
".inst 0x6466e40a // bfmmla z10.s, z0.h, z6.h\n"
- "ld1h { z6.h }, p5/Z, [x10, #6, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #6, MUL VL]\n"
".inst 0x6467e40e // bfmmla z14.s, z0.h, z7.h\n"
- "ld1h { z7.h }, p5/Z, [x10, #7, MUL VL]\n"
- "sub x27, x27, #0x4\n"
- "cmp x27, #0x4\n"
+ "ld1h { z7.h }, p5/Z, [x9, #7, MUL VL]\n"
+ "sub x26, x26, #0x4\n"
+ "cmp x26, #0x4\n"
".inst 0x6466e40b // bfmmla z11.s, z0.h, z6.h\n"
".inst 0x6467e40f // bfmmla z15.s, z0.h, z7.h\n"
- "add x26, x26, #0x10\n"
"add x25, x25, #0x10\n"
- "addvl x10, x10, #8\n"
+ "add x24, x24, #0x10\n"
+ "addvl x9, x9, #8\n"
"bgt 22b\n"
"23:" // Height 2: Multiply loop: Single iteration only
- "whilelt p0.s, XZR, x27\n"
- "ld1rqw { z0.s }, p0/Z, [x26]\n"
- "ld1rqw { z1.s }, p0/Z, [x25]\n"
+ "whilelt p0.s, XZR, x26\n"
+ "ld1rqw { z0.s }, p0/Z, [x25]\n"
+ "ld1rqw { z1.s }, p0/Z, [x24]\n"
".inst 0x658ab400 // bfcvt z0.h, p5/M, z0.s\n"
".inst 0x658ab421 // bfcvt z1.h, p5/M, z1.s\n"
"uzp1 z0.h, z0.h, z0.h\n"
- "ld1h { z6.h }, p5/Z, [x10]\n"
- "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "ld1h { z7.h }, p5/Z, [x9, #1, MUL VL]\n"
"uzp1 z1.h, z1.h, z1.h\n"
"trn1 z0.d, z0.d, z1.d\n"
".inst 0x6466e408 // bfmmla z8.s, z0.h, z6.h\n"
- "ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #2, MUL VL]\n"
".inst 0x6467e40c // bfmmla z12.s, z0.h, z7.h\n"
- "ld1h { z7.h }, p5/Z, [x10, #3, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x9, #3, MUL VL]\n"
".inst 0x6466e409 // bfmmla z9.s, z0.h, z6.h\n"
- "ld1h { z6.h }, p5/Z, [x10, #4, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #4, MUL VL]\n"
".inst 0x6467e40d // bfmmla z13.s, z0.h, z7.h\n"
- "ld1h { z7.h }, p5/Z, [x10, #5, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x9, #5, MUL VL]\n"
".inst 0x6466e40a // bfmmla z10.s, z0.h, z6.h\n"
- "ld1h { z6.h }, p5/Z, [x10, #6, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #6, MUL VL]\n"
".inst 0x6467e40e // bfmmla z14.s, z0.h, z7.h\n"
- "ld1h { z7.h }, p5/Z, [x10, #7, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x9, #7, MUL VL]\n"
".inst 0x6466e40b // bfmmla z11.s, z0.h, z6.h\n"
- "addvl x10, x10, #8\n"
+ "addvl x9, x9, #8\n"
".inst 0x6467e40f // bfmmla z15.s, z0.h, z7.h\n"
"24:" // Height 2: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 19b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp1 z6.d, z8.d, z12.d\n"
"uzp2 z8.d, z8.d, z12.d\n"
- "add x25, x9, x20, LSL #2\n"
+ "add x24, x28, x19, LSL #2\n"
"uzp1 z12.d, z9.d, z13.d\n"
"uzp2 z9.d, z9.d, z13.d\n"
"uzp1 z13.d, z10.d, z14.d\n"
@@ -409,10 +409,10 @@ void sve_hybrid_fp32bf16fp32_mmla_6x4VL (
"uzp1 z14.d, z11.d, z15.d\n"
"uzp2 z11.d, z11.d, z15.d\n"
"tbz %x[flags], #1, 25f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z1.s }, p5/Z, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rw { z0.s }, p5/Z, [x20]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rw { z1.s }, p5/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z0.s }, p5/Z, [x19]\n"
"fmin z6.s, p5/M, z6.s, z1.s\n"
"fmin z12.s, p5/M, z12.s, z1.s\n"
"fmin z13.s, p5/M, z13.s, z1.s\n"
@@ -430,46 +430,46 @@ void sve_hybrid_fp32bf16fp32_mmla_6x4VL (
"fmax z10.s, p5/M, z10.s, z0.s\n"
"fmax z11.s, p5/M, z11.s, z0.s\n"
"25:" // Height 2: No activation
- "st1w { z6.s }, p4, [x9]\n"
- "st1w { z12.s }, p3, [x9, #1, MUL VL]\n"
- "st1w { z13.s }, p2, [x9, #2, MUL VL]\n"
- "st1w { z14.s }, p1, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
- "st1w { z8.s }, p4, [x25]\n"
- "st1w { z9.s }, p3, [x25, #1, MUL VL]\n"
- "st1w { z10.s }, p2, [x25, #2, MUL VL]\n"
- "st1w { z11.s }, p1, [x25, #3, MUL VL]\n"
+ "st1w { z6.s }, p4, [x28]\n"
+ "st1w { z12.s }, p3, [x28, #1, MUL VL]\n"
+ "st1w { z13.s }, p2, [x28, #2, MUL VL]\n"
+ "st1w { z14.s }, p1, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "st1w { z8.s }, p4, [x24]\n"
+ "st1w { z9.s }, p3, [x24, #1, MUL VL]\n"
+ "st1w { z10.s }, p2, [x24, #2, MUL VL]\n"
+ "st1w { z11.s }, p1, [x24, #3, MUL VL]\n"
"26:" // Height 2: Writeback done
- "decw x11, ALL, MUL #4\n"
- "cmp x11, XZR\n"
+ "decw x10, ALL, MUL #4\n"
+ "cmp x10, XZR\n"
"bgt 15b\n"
"b 80f\n"
"27:" // Height 3
- "mov x12, %x[bias]\n"
- "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "mov x11, %x[bias]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x28, %x[output_ptr]\n"
"28:" // Height 3: Column loop
- "mov x20, #0x0\n"
- "whilelt p4.s, x20, x11\n"
- "incw x20\n"
- "whilelt p3.s, x20, x11\n"
- "incw x20\n"
- "whilelt p2.s, x20, x11\n"
- "incw x20\n"
- "whilelt p1.s, x20, x11\n"
- "cbz x12, 29f\n"
- "ld1w { z8.s }, p5/Z, [x12]\n"
- "ld1w { z9.s }, p5/Z, [x12, #1, MUL VL]\n"
+ "mov x19, #0x0\n"
+ "whilelt p4.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p3.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x10\n"
+ "cbz x11, 29f\n"
+ "ld1w { z8.s }, p5/Z, [x11]\n"
+ "ld1w { z9.s }, p5/Z, [x11, #1, MUL VL]\n"
+ "ld1w { z10.s }, p5/Z, [x11, #2, MUL VL]\n"
"zip2 z12.d, z8.d, z8.d\n"
"zip1 z8.d, z8.d, z8.d\n"
- "ld1w { z10.s }, p5/Z, [x12, #2, MUL VL]\n"
- "ld1w { z11.s }, p5/Z, [x12, #3, MUL VL]\n"
+ "ld1w { z11.s }, p5/Z, [x11, #3, MUL VL]\n"
"zip2 z13.d, z9.d, z9.d\n"
"zip1 z9.d, z9.d, z9.d\n"
+ "addvl x11, x11, #4\n"
"zip2 z14.d, z10.d, z10.d\n"
"zip1 z10.d, z10.d, z10.d\n"
- "addvl x12, x12, #4\n"
"zip2 z15.d, z11.d, z11.d\n"
"zip1 z11.d, z11.d, z11.d\n"
"mov z16.d, z8.d\n"
@@ -483,29 +483,29 @@ void sve_hybrid_fp32bf16fp32_mmla_6x4VL (
"b 31f\n"
"29:" // Height 3: no bias
"tbz %x[flags], #0, 30f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "ld1w { z9.s }, p4/Z, [x9]\n"
- "ld1w { z10.s }, p3/Z, [x9, #1, MUL VL]\n"
- "ld1w { z11.s }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1w { z16.s }, p1/Z, [x9, #3, MUL VL]\n"
- "ld1w { z12.s }, p4/Z, [x25]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x28, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "ld1w { z9.s }, p4/Z, [x28]\n"
+ "ld1w { z10.s }, p3/Z, [x28, #1, MUL VL]\n"
+ "ld1w { z11.s }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1w { z16.s }, p1/Z, [x28, #3, MUL VL]\n"
+ "ld1w { z12.s }, p4/Z, [x24]\n"
+ "ld1w { z13.s }, p3/Z, [x24, #1, MUL VL]\n"
"zip1 z8.d, z9.d, z12.d\n"
"zip2 z12.d, z9.d, z12.d\n"
- "ld1w { z13.s }, p3/Z, [x25, #1, MUL VL]\n"
- "ld1w { z14.s }, p2/Z, [x25, #2, MUL VL]\n"
+ "ld1w { z14.s }, p2/Z, [x24, #2, MUL VL]\n"
+ "ld1w { z15.s }, p1/Z, [x24, #3, MUL VL]\n"
"zip1 z9.d, z10.d, z13.d\n"
"zip2 z13.d, z10.d, z13.d\n"
- "ld1w { z15.s }, p1/Z, [x25, #3, MUL VL]\n"
- "ld1w { z17.s }, p4/Z, [x24]\n"
+ "ld1w { z17.s }, p4/Z, [x23]\n"
+ "ld1w { z18.s }, p3/Z, [x23, #1, MUL VL]\n"
"zip1 z10.d, z11.d, z14.d\n"
"zip2 z14.d, z11.d, z14.d\n"
- "ld1w { z18.s }, p3/Z, [x24, #1, MUL VL]\n"
- "ld1w { z19.s }, p2/Z, [x24, #2, MUL VL]\n"
+ "ld1w { z19.s }, p2/Z, [x23, #2, MUL VL]\n"
+ "ld1w { z24.s }, p1/Z, [x23, #3, MUL VL]\n"
"zip1 z11.d, z16.d, z15.d\n"
"zip2 z15.d, z16.d, z15.d\n"
- "ld1w { z24.s }, p1/Z, [x24, #3, MUL VL]\n"
"zip1 z16.d, z17.d, z20.d\n"
"zip2 z20.d, z17.d, z20.d\n"
"zip1 z17.d, z18.d, z21.d\n"
@@ -533,122 +533,122 @@ void sve_hybrid_fp32bf16fp32_mmla_6x4VL (
"mov z22.b, #0x0\n"
"mov z23.b, #0x0\n"
"31:" // Height 3: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"32:" // Height 3: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 33f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "cbnz x28, 34f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #2\n"
- "add x25, x25, x20, LSL #2\n"
- "add x24, x24, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "cbnz x27, 34f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #2\n"
+ "add x24, x24, x19, LSL #2\n"
+ "add x23, x23, x19, LSL #2\n"
"b 34f\n"
"33:" // Height 3: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
"34:" // Height 3: input setup done
- "cmp x27, #0x4\n"
+ "cmp x26, #0x4\n"
"ble 36f\n"
"35:" // Height 3: Multiply loop: Main loop head
- "whilelt p0.s, XZR, x27\n"
- "ld1rqw { z0.s }, p0/Z, [x26]\n"
- "ld1rqw { z1.s }, p0/Z, [x25]\n"
+ "whilelt p0.s, XZR, x26\n"
+ "ld1rqw { z0.s }, p0/Z, [x25]\n"
+ "ld1rqw { z1.s }, p0/Z, [x24]\n"
".inst 0x658ab400 // bfcvt z0.h, p5/M, z0.s\n"
- "ld1rqw { z2.s }, p0/Z, [x24]\n"
+ "ld1rqw { z2.s }, p0/Z, [x23]\n"
".inst 0x658ab421 // bfcvt z1.h, p5/M, z1.s\n"
"uzp1 z0.h, z0.h, z0.h\n"
- "ld1h { z6.h }, p5/Z, [x10]\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
"uzp1 z1.h, z1.h, z1.h\n"
".inst 0x658ab442 // bfcvt z2.h, p5/M, z2.s\n"
- "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
- "sub x27, x27, #0x4\n"
+ "ld1h { z7.h }, p5/Z, [x9, #1, MUL VL]\n"
+ "sub x26, x26, #0x4\n"
"trn1 z0.d, z0.d, z1.d\n"
"uzp1 z2.h, z2.h, z2.h\n"
".inst 0x6466e408 // bfmmla z8.s, z0.h, z6.h\n"
- "cmp x27, #0x4\n"
+ "cmp x26, #0x4\n"
".inst 0x6466e450 // bfmmla z16.s, z2.h, z6.h\n"
".inst 0x6467e40c // bfmmla z12.s, z0.h, z7.h\n"
- "ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
- "add x26, x26, #0x10\n"
+ "ld1h { z6.h }, p5/Z, [x9, #2, MUL VL]\n"
+ "add x25, x25, #0x10\n"
".inst 0x6467e454 // bfmmla z20.s, z2.h, z7.h\n"
- "ld1h { z7.h }, p5/Z, [x10, #3, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x9, #3, MUL VL]\n"
".inst 0x6466e409 // bfmmla z9.s, z0.h, z6.h\n"
- "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
".inst 0x6466e451 // bfmmla z17.s, z2.h, z6.h\n"
".inst 0x6467e40d // bfmmla z13.s, z0.h, z7.h\n"
- "ld1h { z6.h }, p5/Z, [x10, #4, MUL VL]\n"
- "add x24, x24, #0x10\n"
+ "ld1h { z6.h }, p5/Z, [x9, #4, MUL VL]\n"
+ "add x23, x23, #0x10\n"
".inst 0x6467e455 // bfmmla z21.s, z2.h, z7.h\n"
- "ld1h { z7.h }, p5/Z, [x10, #5, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x9, #5, MUL VL]\n"
".inst 0x6466e40a // bfmmla z10.s, z0.h, z6.h\n"
".inst 0x6466e452 // bfmmla z18.s, z2.h, z6.h\n"
".inst 0x6467e40e // bfmmla z14.s, z0.h, z7.h\n"
- "ld1h { z6.h }, p5/Z, [x10, #6, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #6, MUL VL]\n"
".inst 0x6467e456 // bfmmla z22.s, z2.h, z7.h\n"
- "ld1h { z7.h }, p5/Z, [x10, #7, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x9, #7, MUL VL]\n"
".inst 0x6466e40b // bfmmla z11.s, z0.h, z6.h\n"
- "addvl x10, x10, #8\n"
+ "addvl x9, x9, #8\n"
".inst 0x6466e453 // bfmmla z19.s, z2.h, z6.h\n"
".inst 0x6467e40f // bfmmla z15.s, z0.h, z7.h\n"
".inst 0x6467e457 // bfmmla z23.s, z2.h, z7.h\n"
"bgt 35b\n"
"36:" // Height 3: Multiply loop: Single iteration only
- "whilelt p0.s, XZR, x27\n"
- "ld1rqw { z0.s }, p0/Z, [x26]\n"
- "ld1rqw { z1.s }, p0/Z, [x25]\n"
+ "whilelt p0.s, XZR, x26\n"
+ "ld1rqw { z0.s }, p0/Z, [x25]\n"
+ "ld1rqw { z1.s }, p0/Z, [x24]\n"
".inst 0x658ab400 // bfcvt z0.h, p5/M, z0.s\n"
- "ld1rqw { z2.s }, p0/Z, [x24]\n"
+ "ld1rqw { z2.s }, p0/Z, [x23]\n"
".inst 0x658ab421 // bfcvt z1.h, p5/M, z1.s\n"
"uzp1 z0.h, z0.h, z0.h\n"
- "ld1h { z6.h }, p5/Z, [x10]\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
"uzp1 z1.h, z1.h, z1.h\n"
".inst 0x658ab442 // bfcvt z2.h, p5/M, z2.s\n"
- "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x9, #1, MUL VL]\n"
"trn1 z0.d, z0.d, z1.d\n"
"uzp1 z2.h, z2.h, z2.h\n"
".inst 0x6466e408 // bfmmla z8.s, z0.h, z6.h\n"
".inst 0x6466e450 // bfmmla z16.s, z2.h, z6.h\n"
".inst 0x6467e40c // bfmmla z12.s, z0.h, z7.h\n"
- "ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #2, MUL VL]\n"
".inst 0x6467e454 // bfmmla z20.s, z2.h, z7.h\n"
- "ld1h { z7.h }, p5/Z, [x10, #3, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x9, #3, MUL VL]\n"
".inst 0x6466e409 // bfmmla z9.s, z0.h, z6.h\n"
".inst 0x6466e451 // bfmmla z17.s, z2.h, z6.h\n"
".inst 0x6467e40d // bfmmla z13.s, z0.h, z7.h\n"
- "ld1h { z6.h }, p5/Z, [x10, #4, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #4, MUL VL]\n"
".inst 0x6467e455 // bfmmla z21.s, z2.h, z7.h\n"
- "ld1h { z7.h }, p5/Z, [x10, #5, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x9, #5, MUL VL]\n"
".inst 0x6466e40a // bfmmla z10.s, z0.h, z6.h\n"
".inst 0x6466e452 // bfmmla z18.s, z2.h, z6.h\n"
".inst 0x6467e40e // bfmmla z14.s, z0.h, z7.h\n"
- "ld1h { z6.h }, p5/Z, [x10, #6, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #6, MUL VL]\n"
".inst 0x6467e456 // bfmmla z22.s, z2.h, z7.h\n"
- "ld1h { z7.h }, p5/Z, [x10, #7, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x9, #7, MUL VL]\n"
".inst 0x6466e40b // bfmmla z11.s, z0.h, z6.h\n"
- "addvl x10, x10, #8\n"
+ "addvl x9, x9, #8\n"
".inst 0x6466e453 // bfmmla z19.s, z2.h, z6.h\n"
".inst 0x6467e40f // bfmmla z15.s, z0.h, z7.h\n"
".inst 0x6467e457 // bfmmla z23.s, z2.h, z7.h\n"
"37:" // Height 3: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 32b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x28, x19, LSL #2\n"
"uzp1 z6.d, z8.d, z12.d\n"
"uzp2 z8.d, z8.d, z12.d\n"
"uzp1 z12.d, z9.d, z13.d\n"
"uzp2 z9.d, z9.d, z13.d\n"
- "add x24, x25, x20, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
"uzp1 z13.d, z10.d, z14.d\n"
"uzp2 z10.d, z10.d, z14.d\n"
"uzp1 z14.d, z11.d, z15.d\n"
@@ -658,10 +658,10 @@ void sve_hybrid_fp32bf16fp32_mmla_6x4VL (
"uzp1 z18.d, z18.d, z22.d\n"
"uzp1 z19.d, z19.d, z23.d\n"
"tbz %x[flags], #1, 38f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z1.s }, p5/Z, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rw { z0.s }, p5/Z, [x20]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rw { z1.s }, p5/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z0.s }, p5/Z, [x19]\n"
"fmin z6.s, p5/M, z6.s, z1.s\n"
"fmin z12.s, p5/M, z12.s, z1.s\n"
"fmin z13.s, p5/M, z13.s, z1.s\n"
@@ -687,50 +687,50 @@ void sve_hybrid_fp32bf16fp32_mmla_6x4VL (
"fmax z18.s, p5/M, z18.s, z0.s\n"
"fmax z19.s, p5/M, z19.s, z0.s\n"
"38:" // Height 3: No activation
- "st1w { z6.s }, p4, [x9]\n"
- "st1w { z12.s }, p3, [x9, #1, MUL VL]\n"
- "st1w { z13.s }, p2, [x9, #2, MUL VL]\n"
- "st1w { z14.s }, p1, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
- "st1w { z8.s }, p4, [x25]\n"
- "st1w { z9.s }, p3, [x25, #1, MUL VL]\n"
- "st1w { z10.s }, p2, [x25, #2, MUL VL]\n"
- "st1w { z11.s }, p1, [x25, #3, MUL VL]\n"
- "st1w { z16.s }, p4, [x24]\n"
- "st1w { z17.s }, p3, [x24, #1, MUL VL]\n"
- "st1w { z18.s }, p2, [x24, #2, MUL VL]\n"
- "st1w { z19.s }, p1, [x24, #3, MUL VL]\n"
+ "st1w { z6.s }, p4, [x28]\n"
+ "st1w { z12.s }, p3, [x28, #1, MUL VL]\n"
+ "st1w { z13.s }, p2, [x28, #2, MUL VL]\n"
+ "st1w { z14.s }, p1, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "st1w { z8.s }, p4, [x24]\n"
+ "st1w { z9.s }, p3, [x24, #1, MUL VL]\n"
+ "st1w { z10.s }, p2, [x24, #2, MUL VL]\n"
+ "st1w { z11.s }, p1, [x24, #3, MUL VL]\n"
+ "st1w { z16.s }, p4, [x23]\n"
+ "st1w { z17.s }, p3, [x23, #1, MUL VL]\n"
+ "st1w { z18.s }, p2, [x23, #2, MUL VL]\n"
+ "st1w { z19.s }, p1, [x23, #3, MUL VL]\n"
"39:" // Height 3: Writeback done
- "decw x11, ALL, MUL #4\n"
- "cmp x11, XZR\n"
+ "decw x10, ALL, MUL #4\n"
+ "cmp x10, XZR\n"
"bgt 28b\n"
"b 80f\n"
"40:" // Height 4
- "mov x12, %x[bias]\n"
- "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "mov x11, %x[bias]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x28, %x[output_ptr]\n"
"41:" // Height 4: Column loop
- "mov x20, #0x0\n"
- "whilelt p4.s, x20, x11\n"
- "incw x20\n"
- "whilelt p3.s, x20, x11\n"
- "incw x20\n"
- "whilelt p2.s, x20, x11\n"
- "incw x20\n"
- "whilelt p1.s, x20, x11\n"
- "cbz x12, 42f\n"
- "ld1w { z8.s }, p5/Z, [x12]\n"
- "ld1w { z9.s }, p5/Z, [x12, #1, MUL VL]\n"
+ "mov x19, #0x0\n"
+ "whilelt p4.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p3.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x10\n"
+ "cbz x11, 42f\n"
+ "ld1w { z8.s }, p5/Z, [x11]\n"
+ "ld1w { z9.s }, p5/Z, [x11, #1, MUL VL]\n"
+ "ld1w { z10.s }, p5/Z, [x11, #2, MUL VL]\n"
"zip2 z12.d, z8.d, z8.d\n"
"zip1 z8.d, z8.d, z8.d\n"
- "ld1w { z10.s }, p5/Z, [x12, #2, MUL VL]\n"
- "ld1w { z11.s }, p5/Z, [x12, #3, MUL VL]\n"
+ "ld1w { z11.s }, p5/Z, [x11, #3, MUL VL]\n"
"zip2 z13.d, z9.d, z9.d\n"
"zip1 z9.d, z9.d, z9.d\n"
+ "addvl x11, x11, #4\n"
"zip2 z14.d, z10.d, z10.d\n"
"zip1 z10.d, z10.d, z10.d\n"
- "addvl x12, x12, #4\n"
"zip2 z15.d, z11.d, z11.d\n"
"zip1 z11.d, z11.d, z11.d\n"
"mov z16.d, z8.d\n"
@@ -744,38 +744,38 @@ void sve_hybrid_fp32bf16fp32_mmla_6x4VL (
"b 44f\n"
"42:" // Height 4: no bias
"tbz %x[flags], #0, 43f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "ld1w { z9.s }, p4/Z, [x9]\n"
- "add x23, x24, x20, LSL #2\n"
- "ld1w { z10.s }, p3/Z, [x9, #1, MUL VL]\n"
- "ld1w { z11.s }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1w { z16.s }, p1/Z, [x9, #3, MUL VL]\n"
- "ld1w { z12.s }, p4/Z, [x25]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x28, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "ld1w { z9.s }, p4/Z, [x28]\n"
+ "ld1w { z10.s }, p3/Z, [x28, #1, MUL VL]\n"
+ "ld1w { z11.s }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1w { z16.s }, p1/Z, [x28, #3, MUL VL]\n"
+ "ld1w { z12.s }, p4/Z, [x24]\n"
+ "ld1w { z13.s }, p3/Z, [x24, #1, MUL VL]\n"
"zip1 z8.d, z9.d, z12.d\n"
"zip2 z12.d, z9.d, z12.d\n"
- "ld1w { z13.s }, p3/Z, [x25, #1, MUL VL]\n"
- "ld1w { z14.s }, p2/Z, [x25, #2, MUL VL]\n"
+ "ld1w { z14.s }, p2/Z, [x24, #2, MUL VL]\n"
+ "ld1w { z15.s }, p1/Z, [x24, #3, MUL VL]\n"
"zip1 z9.d, z10.d, z13.d\n"
"zip2 z13.d, z10.d, z13.d\n"
- "ld1w { z15.s }, p1/Z, [x25, #3, MUL VL]\n"
- "ld1w { z17.s }, p4/Z, [x24]\n"
+ "ld1w { z17.s }, p4/Z, [x23]\n"
+ "ld1w { z18.s }, p3/Z, [x23, #1, MUL VL]\n"
"zip1 z10.d, z11.d, z14.d\n"
"zip2 z14.d, z11.d, z14.d\n"
- "ld1w { z18.s }, p3/Z, [x24, #1, MUL VL]\n"
- "ld1w { z19.s }, p2/Z, [x24, #2, MUL VL]\n"
+ "ld1w { z19.s }, p2/Z, [x23, #2, MUL VL]\n"
+ "ld1w { z24.s }, p1/Z, [x23, #3, MUL VL]\n"
"zip1 z11.d, z16.d, z15.d\n"
"zip2 z15.d, z16.d, z15.d\n"
- "ld1w { z24.s }, p1/Z, [x24, #3, MUL VL]\n"
- "ld1w { z20.s }, p4/Z, [x23]\n"
+ "ld1w { z20.s }, p4/Z, [x22]\n"
+ "ld1w { z21.s }, p3/Z, [x22, #1, MUL VL]\n"
"zip1 z16.d, z17.d, z20.d\n"
"zip2 z20.d, z17.d, z20.d\n"
- "ld1w { z21.s }, p3/Z, [x23, #1, MUL VL]\n"
- "ld1w { z22.s }, p2/Z, [x23, #2, MUL VL]\n"
+ "ld1w { z22.s }, p2/Z, [x22, #2, MUL VL]\n"
+ "ld1w { z23.s }, p1/Z, [x22, #3, MUL VL]\n"
"zip1 z17.d, z18.d, z21.d\n"
"zip2 z21.d, z18.d, z21.d\n"
- "ld1w { z23.s }, p1/Z, [x23, #3, MUL VL]\n"
"zip1 z18.d, z19.d, z22.d\n"
"zip2 z22.d, z19.d, z22.d\n"
"zip1 z19.d, z24.d, z23.d\n"
@@ -799,94 +799,94 @@ void sve_hybrid_fp32bf16fp32_mmla_6x4VL (
"mov z22.b, #0x0\n"
"mov z23.b, #0x0\n"
"44:" // Height 4: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"45:" // Height 4: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 46f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "cbnz x28, 47f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #2\n"
- "add x25, x25, x20, LSL #2\n"
- "add x24, x24, x20, LSL #2\n"
- "add x23, x23, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "cbnz x27, 47f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #2\n"
+ "add x24, x24, x19, LSL #2\n"
+ "add x23, x23, x19, LSL #2\n"
+ "add x22, x22, x19, LSL #2\n"
"b 47f\n"
"46:" // Height 4: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
"47:" // Height 4: input setup done
- "cmp x27, #0x4\n"
+ "cmp x26, #0x4\n"
"ble 49f\n"
"48:" // Height 4: Multiply loop: Main loop head
- "whilelt p0.s, XZR, x27\n"
- "ld1rqw { z0.s }, p0/Z, [x26]\n"
- "ld1rqw { z1.s }, p0/Z, [x25]\n"
+ "whilelt p0.s, XZR, x26\n"
+ "ld1rqw { z0.s }, p0/Z, [x25]\n"
+ "ld1rqw { z1.s }, p0/Z, [x24]\n"
".inst 0x658ab400 // bfcvt z0.h, p5/M, z0.s\n"
- "ld1rqw { z2.s }, p0/Z, [x24]\n"
- "ld1rqw { z3.s }, p0/Z, [x23]\n"
+ "ld1rqw { z2.s }, p0/Z, [x23]\n"
+ "ld1rqw { z3.s }, p0/Z, [x22]\n"
".inst 0x658ab421 // bfcvt z1.h, p5/M, z1.s\n"
".inst 0x658ab442 // bfcvt z2.h, p5/M, z2.s\n"
".inst 0x658ab463 // bfcvt z3.h, p5/M, z3.s\n"
"uzp1 z0.h, z0.h, z0.h\n"
- "ld1h { z6.h }, p5/Z, [x10]\n"
- "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "ld1h { z7.h }, p5/Z, [x9, #1, MUL VL]\n"
"uzp1 z1.h, z1.h, z1.h\n"
"uzp1 z2.h, z2.h, z2.h\n"
- "sub x27, x27, #0x4\n"
- "cmp x27, #0x4\n"
+ "sub x26, x26, #0x4\n"
+ "cmp x26, #0x4\n"
"uzp1 z3.h, z3.h, z3.h\n"
"trn1 z0.d, z0.d, z1.d\n"
".inst 0x6466e408 // bfmmla z8.s, z0.h, z6.h\n"
- "add x26, x26, #0x10\n"
+ "add x25, x25, #0x10\n"
"trn1 z2.d, z2.d, z3.d\n"
".inst 0x6466e450 // bfmmla z16.s, z2.h, z6.h\n"
".inst 0x6467e40c // bfmmla z12.s, z0.h, z7.h\n"
- "ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #2, MUL VL]\n"
".inst 0x6467e454 // bfmmla z20.s, z2.h, z7.h\n"
- "ld1h { z7.h }, p5/Z, [x10, #3, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x9, #3, MUL VL]\n"
".inst 0x6466e409 // bfmmla z9.s, z0.h, z6.h\n"
- "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
".inst 0x6466e451 // bfmmla z17.s, z2.h, z6.h\n"
".inst 0x6467e40d // bfmmla z13.s, z0.h, z7.h\n"
- "ld1h { z6.h }, p5/Z, [x10, #4, MUL VL]\n"
- "add x24, x24, #0x10\n"
+ "ld1h { z6.h }, p5/Z, [x9, #4, MUL VL]\n"
+ "add x23, x23, #0x10\n"
".inst 0x6467e455 // bfmmla z21.s, z2.h, z7.h\n"
- "ld1h { z7.h }, p5/Z, [x10, #5, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x9, #5, MUL VL]\n"
".inst 0x6466e40a // bfmmla z10.s, z0.h, z6.h\n"
- "add x23, x23, #0x10\n"
+ "add x22, x22, #0x10\n"
".inst 0x6466e452 // bfmmla z18.s, z2.h, z6.h\n"
".inst 0x6467e40e // bfmmla z14.s, z0.h, z7.h\n"
- "ld1h { z6.h }, p5/Z, [x10, #6, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #6, MUL VL]\n"
".inst 0x6467e456 // bfmmla z22.s, z2.h, z7.h\n"
- "ld1h { z7.h }, p5/Z, [x10, #7, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x9, #7, MUL VL]\n"
".inst 0x6466e40b // bfmmla z11.s, z0.h, z6.h\n"
- "addvl x10, x10, #8\n"
+ "addvl x9, x9, #8\n"
".inst 0x6466e453 // bfmmla z19.s, z2.h, z6.h\n"
".inst 0x6467e40f // bfmmla z15.s, z0.h, z7.h\n"
".inst 0x6467e457 // bfmmla z23.s, z2.h, z7.h\n"
"bgt 48b\n"
"49:" // Height 4: Multiply loop: Single iteration only
- "whilelt p0.s, XZR, x27\n"
- "ld1rqw { z0.s }, p0/Z, [x26]\n"
- "ld1rqw { z1.s }, p0/Z, [x25]\n"
+ "whilelt p0.s, XZR, x26\n"
+ "ld1rqw { z0.s }, p0/Z, [x25]\n"
+ "ld1rqw { z1.s }, p0/Z, [x24]\n"
".inst 0x658ab400 // bfcvt z0.h, p5/M, z0.s\n"
- "ld1rqw { z2.s }, p0/Z, [x24]\n"
- "ld1rqw { z3.s }, p0/Z, [x23]\n"
+ "ld1rqw { z2.s }, p0/Z, [x23]\n"
+ "ld1rqw { z3.s }, p0/Z, [x22]\n"
".inst 0x658ab421 // bfcvt z1.h, p5/M, z1.s\n"
".inst 0x658ab442 // bfcvt z2.h, p5/M, z2.s\n"
".inst 0x658ab463 // bfcvt z3.h, p5/M, z3.s\n"
"uzp1 z0.h, z0.h, z0.h\n"
- "ld1h { z6.h }, p5/Z, [x10]\n"
- "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "ld1h { z7.h }, p5/Z, [x9, #1, MUL VL]\n"
"uzp1 z1.h, z1.h, z1.h\n"
"uzp1 z2.h, z2.h, z2.h\n"
"uzp1 z3.h, z3.h, z3.h\n"
@@ -895,38 +895,38 @@ void sve_hybrid_fp32bf16fp32_mmla_6x4VL (
"trn1 z2.d, z2.d, z3.d\n"
".inst 0x6466e450 // bfmmla z16.s, z2.h, z6.h\n"
".inst 0x6467e40c // bfmmla z12.s, z0.h, z7.h\n"
- "ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #2, MUL VL]\n"
".inst 0x6467e454 // bfmmla z20.s, z2.h, z7.h\n"
- "ld1h { z7.h }, p5/Z, [x10, #3, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x9, #3, MUL VL]\n"
".inst 0x6466e409 // bfmmla z9.s, z0.h, z6.h\n"
".inst 0x6466e451 // bfmmla z17.s, z2.h, z6.h\n"
".inst 0x6467e40d // bfmmla z13.s, z0.h, z7.h\n"
- "ld1h { z6.h }, p5/Z, [x10, #4, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #4, MUL VL]\n"
".inst 0x6467e455 // bfmmla z21.s, z2.h, z7.h\n"
- "ld1h { z7.h }, p5/Z, [x10, #5, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x9, #5, MUL VL]\n"
".inst 0x6466e40a // bfmmla z10.s, z0.h, z6.h\n"
".inst 0x6466e452 // bfmmla z18.s, z2.h, z6.h\n"
".inst 0x6467e40e // bfmmla z14.s, z0.h, z7.h\n"
- "ld1h { z6.h }, p5/Z, [x10, #6, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #6, MUL VL]\n"
".inst 0x6467e456 // bfmmla z22.s, z2.h, z7.h\n"
- "ld1h { z7.h }, p5/Z, [x10, #7, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x9, #7, MUL VL]\n"
".inst 0x6466e40b // bfmmla z11.s, z0.h, z6.h\n"
- "addvl x10, x10, #8\n"
+ "addvl x9, x9, #8\n"
".inst 0x6466e453 // bfmmla z19.s, z2.h, z6.h\n"
".inst 0x6467e40f // bfmmla z15.s, z0.h, z7.h\n"
".inst 0x6467e457 // bfmmla z23.s, z2.h, z7.h\n"
"50:" // Height 4: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 45b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x28, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
"uzp1 z6.d, z8.d, z12.d\n"
"uzp2 z8.d, z8.d, z12.d\n"
"uzp1 z12.d, z9.d, z13.d\n"
- "add x23, x24, x20, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
"uzp2 z9.d, z9.d, z13.d\n"
"uzp1 z13.d, z10.d, z14.d\n"
"uzp2 z10.d, z10.d, z14.d\n"
@@ -941,10 +941,10 @@ void sve_hybrid_fp32bf16fp32_mmla_6x4VL (
"uzp1 z22.d, z19.d, z23.d\n"
"uzp2 z19.d, z19.d, z23.d\n"
"tbz %x[flags], #1, 51f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z1.s }, p5/Z, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rw { z0.s }, p5/Z, [x20]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rw { z1.s }, p5/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z0.s }, p5/Z, [x19]\n"
"fmin z6.s, p5/M, z6.s, z1.s\n"
"fmin z12.s, p5/M, z12.s, z1.s\n"
"fmin z13.s, p5/M, z13.s, z1.s\n"
@@ -978,54 +978,54 @@ void sve_hybrid_fp32bf16fp32_mmla_6x4VL (
"fmax z18.s, p5/M, z18.s, z0.s\n"
"fmax z19.s, p5/M, z19.s, z0.s\n"
"51:" // Height 4: No activation
- "st1w { z6.s }, p4, [x9]\n"
- "st1w { z12.s }, p3, [x9, #1, MUL VL]\n"
- "st1w { z13.s }, p2, [x9, #2, MUL VL]\n"
- "st1w { z14.s }, p1, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
- "st1w { z8.s }, p4, [x25]\n"
- "st1w { z9.s }, p3, [x25, #1, MUL VL]\n"
- "st1w { z10.s }, p2, [x25, #2, MUL VL]\n"
- "st1w { z11.s }, p1, [x25, #3, MUL VL]\n"
- "st1w { z15.s }, p4, [x24]\n"
- "st1w { z20.s }, p3, [x24, #1, MUL VL]\n"
- "st1w { z21.s }, p2, [x24, #2, MUL VL]\n"
- "st1w { z22.s }, p1, [x24, #3, MUL VL]\n"
- "st1w { z16.s }, p4, [x23]\n"
- "st1w { z17.s }, p3, [x23, #1, MUL VL]\n"
- "st1w { z18.s }, p2, [x23, #2, MUL VL]\n"
- "st1w { z19.s }, p1, [x23, #3, MUL VL]\n"
+ "st1w { z6.s }, p4, [x28]\n"
+ "st1w { z12.s }, p3, [x28, #1, MUL VL]\n"
+ "st1w { z13.s }, p2, [x28, #2, MUL VL]\n"
+ "st1w { z14.s }, p1, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "st1w { z8.s }, p4, [x24]\n"
+ "st1w { z9.s }, p3, [x24, #1, MUL VL]\n"
+ "st1w { z10.s }, p2, [x24, #2, MUL VL]\n"
+ "st1w { z11.s }, p1, [x24, #3, MUL VL]\n"
+ "st1w { z15.s }, p4, [x23]\n"
+ "st1w { z20.s }, p3, [x23, #1, MUL VL]\n"
+ "st1w { z21.s }, p2, [x23, #2, MUL VL]\n"
+ "st1w { z22.s }, p1, [x23, #3, MUL VL]\n"
+ "st1w { z16.s }, p4, [x22]\n"
+ "st1w { z17.s }, p3, [x22, #1, MUL VL]\n"
+ "st1w { z18.s }, p2, [x22, #2, MUL VL]\n"
+ "st1w { z19.s }, p1, [x22, #3, MUL VL]\n"
"52:" // Height 4: Writeback done
- "decw x11, ALL, MUL #4\n"
- "cmp x11, XZR\n"
+ "decw x10, ALL, MUL #4\n"
+ "cmp x10, XZR\n"
"bgt 41b\n"
"b 80f\n"
"53:" // Height 5
- "mov x12, %x[bias]\n"
- "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "mov x11, %x[bias]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x28, %x[output_ptr]\n"
"54:" // Height 5: Column loop
- "mov x20, #0x0\n"
- "whilelt p4.s, x20, x11\n"
- "incw x20\n"
- "whilelt p3.s, x20, x11\n"
- "incw x20\n"
- "whilelt p2.s, x20, x11\n"
- "incw x20\n"
- "whilelt p1.s, x20, x11\n"
- "cbz x12, 55f\n"
- "ld1w { z8.s }, p5/Z, [x12]\n"
- "ld1w { z9.s }, p5/Z, [x12, #1, MUL VL]\n"
+ "mov x19, #0x0\n"
+ "whilelt p4.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p3.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x10\n"
+ "cbz x11, 55f\n"
+ "ld1w { z8.s }, p5/Z, [x11]\n"
+ "ld1w { z9.s }, p5/Z, [x11, #1, MUL VL]\n"
+ "ld1w { z10.s }, p5/Z, [x11, #2, MUL VL]\n"
"zip2 z12.d, z8.d, z8.d\n"
"zip1 z8.d, z8.d, z8.d\n"
- "ld1w { z10.s }, p5/Z, [x12, #2, MUL VL]\n"
- "ld1w { z11.s }, p5/Z, [x12, #3, MUL VL]\n"
+ "ld1w { z11.s }, p5/Z, [x11, #3, MUL VL]\n"
"zip2 z13.d, z9.d, z9.d\n"
"zip1 z9.d, z9.d, z9.d\n"
+ "addvl x11, x11, #4\n"
"zip2 z14.d, z10.d, z10.d\n"
"zip1 z10.d, z10.d, z10.d\n"
- "addvl x12, x12, #4\n"
"zip2 z15.d, z11.d, z11.d\n"
"zip1 z11.d, z11.d, z11.d\n"
"mov z16.d, z8.d\n"
@@ -1047,47 +1047,47 @@ void sve_hybrid_fp32bf16fp32_mmla_6x4VL (
"b 57f\n"
"55:" // Height 5: no bias
"tbz %x[flags], #0, 56f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "ld1w { z9.s }, p4/Z, [x9]\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
- "ld1w { z10.s }, p3/Z, [x9, #1, MUL VL]\n"
- "ld1w { z11.s }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1w { z16.s }, p1/Z, [x9, #3, MUL VL]\n"
- "ld1w { z12.s }, p4/Z, [x25]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x28, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
+ "ld1w { z9.s }, p4/Z, [x28]\n"
+ "ld1w { z10.s }, p3/Z, [x28, #1, MUL VL]\n"
+ "ld1w { z11.s }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1w { z16.s }, p1/Z, [x28, #3, MUL VL]\n"
+ "ld1w { z12.s }, p4/Z, [x24]\n"
+ "ld1w { z13.s }, p3/Z, [x24, #1, MUL VL]\n"
"zip1 z8.d, z9.d, z12.d\n"
"zip2 z12.d, z9.d, z12.d\n"
- "ld1w { z13.s }, p3/Z, [x25, #1, MUL VL]\n"
- "ld1w { z14.s }, p2/Z, [x25, #2, MUL VL]\n"
+ "ld1w { z14.s }, p2/Z, [x24, #2, MUL VL]\n"
+ "ld1w { z15.s }, p1/Z, [x24, #3, MUL VL]\n"
"zip1 z9.d, z10.d, z13.d\n"
"zip2 z13.d, z10.d, z13.d\n"
- "ld1w { z15.s }, p1/Z, [x25, #3, MUL VL]\n"
- "ld1w { z17.s }, p4/Z, [x24]\n"
+ "ld1w { z17.s }, p4/Z, [x23]\n"
+ "ld1w { z18.s }, p3/Z, [x23, #1, MUL VL]\n"
"zip1 z10.d, z11.d, z14.d\n"
"zip2 z14.d, z11.d, z14.d\n"
- "ld1w { z18.s }, p3/Z, [x24, #1, MUL VL]\n"
- "ld1w { z19.s }, p2/Z, [x24, #2, MUL VL]\n"
+ "ld1w { z19.s }, p2/Z, [x23, #2, MUL VL]\n"
+ "ld1w { z24.s }, p1/Z, [x23, #3, MUL VL]\n"
"zip1 z11.d, z16.d, z15.d\n"
"zip2 z15.d, z16.d, z15.d\n"
- "ld1w { z24.s }, p1/Z, [x24, #3, MUL VL]\n"
- "ld1w { z20.s }, p4/Z, [x23]\n"
+ "ld1w { z20.s }, p4/Z, [x22]\n"
+ "ld1w { z21.s }, p3/Z, [x22, #1, MUL VL]\n"
"zip1 z16.d, z17.d, z20.d\n"
"zip2 z20.d, z17.d, z20.d\n"
- "ld1w { z21.s }, p3/Z, [x23, #1, MUL VL]\n"
- "ld1w { z22.s }, p2/Z, [x23, #2, MUL VL]\n"
+ "ld1w { z22.s }, p2/Z, [x22, #2, MUL VL]\n"
+ "ld1w { z23.s }, p1/Z, [x22, #3, MUL VL]\n"
"zip1 z17.d, z18.d, z21.d\n"
"zip2 z21.d, z18.d, z21.d\n"
- "ld1w { z23.s }, p1/Z, [x23, #3, MUL VL]\n"
- "ld1w { z25.s }, p4/Z, [x22]\n"
+ "ld1w { z25.s }, p4/Z, [x21]\n"
+ "ld1w { z26.s }, p3/Z, [x21, #1, MUL VL]\n"
"zip1 z18.d, z19.d, z22.d\n"
"zip2 z22.d, z19.d, z22.d\n"
- "ld1w { z26.s }, p3/Z, [x22, #1, MUL VL]\n"
- "ld1w { z27.s }, p2/Z, [x22, #2, MUL VL]\n"
+ "ld1w { z27.s }, p2/Z, [x21, #2, MUL VL]\n"
+ "ld1w { z6.s }, p1/Z, [x21, #3, MUL VL]\n"
"zip1 z19.d, z24.d, z23.d\n"
"zip2 z23.d, z24.d, z23.d\n"
- "ld1w { z6.s }, p1/Z, [x22, #3, MUL VL]\n"
"zip1 z24.d, z25.d, z28.d\n"
"zip2 z28.d, z25.d, z28.d\n"
"zip1 z25.d, z26.d, z29.d\n"
@@ -1123,90 +1123,90 @@ void sve_hybrid_fp32bf16fp32_mmla_6x4VL (
"mov z30.b, #0x0\n"
"mov z31.b, #0x0\n"
"57:" // Height 5: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"58:" // Height 5: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 59f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "ldr x22, [x21, #0x20]\n"
- "cbnz x28, 60f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #2\n"
- "add x25, x25, x20, LSL #2\n"
- "add x24, x24, x20, LSL #2\n"
- "add x23, x23, x20, LSL #2\n"
- "add x22, x22, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "ldr x21, [x20, #0x20]\n"
+ "cbnz x27, 60f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #2\n"
+ "add x24, x24, x19, LSL #2\n"
+ "add x23, x23, x19, LSL #2\n"
+ "add x22, x22, x19, LSL #2\n"
+ "add x21, x21, x19, LSL #2\n"
"b 60f\n"
"59:" // Height 5: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
"60:" // Height 5: input setup done
- "cmp x27, #0x4\n"
+ "cmp x26, #0x4\n"
"ble 62f\n"
"61:" // Height 5: Multiply loop: Main loop head
- "whilelt p0.s, XZR, x27\n"
- "ld1rqw { z0.s }, p0/Z, [x26]\n"
- "ld1rqw { z1.s }, p0/Z, [x25]\n"
+ "whilelt p0.s, XZR, x26\n"
+ "ld1rqw { z0.s }, p0/Z, [x25]\n"
+ "ld1rqw { z1.s }, p0/Z, [x24]\n"
".inst 0x658ab400 // bfcvt z0.h, p5/M, z0.s\n"
- "ld1rqw { z2.s }, p0/Z, [x24]\n"
- "ld1rqw { z3.s }, p0/Z, [x23]\n"
+ "ld1rqw { z2.s }, p0/Z, [x23]\n"
+ "ld1rqw { z3.s }, p0/Z, [x22]\n"
".inst 0x658ab421 // bfcvt z1.h, p5/M, z1.s\n"
".inst 0x658ab442 // bfcvt z2.h, p5/M, z2.s\n"
- "ld1rqw { z4.s }, p0/Z, [x22]\n"
+ "ld1rqw { z4.s }, p0/Z, [x21]\n"
".inst 0x658ab463 // bfcvt z3.h, p5/M, z3.s\n"
"uzp1 z0.h, z0.h, z0.h\n"
- "ld1h { z6.h }, p5/Z, [x10]\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
"uzp1 z1.h, z1.h, z1.h\n"
"uzp1 z2.h, z2.h, z2.h\n"
- "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
- "sub x27, x27, #0x4\n"
+ "ld1h { z7.h }, p5/Z, [x9, #1, MUL VL]\n"
+ "sub x26, x26, #0x4\n"
"uzp1 z3.h, z3.h, z3.h\n"
".inst 0x658ab484 // bfcvt z4.h, p5/M, z4.s\n"
- "cmp x27, #0x4\n"
- "add x26, x26, #0x10\n"
+ "cmp x26, #0x4\n"
+ "add x25, x25, #0x10\n"
"trn1 z0.d, z0.d, z1.d\n"
"trn1 z2.d, z2.d, z3.d\n"
".inst 0x6466e408 // bfmmla z8.s, z0.h, z6.h\n"
- "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
"uzp1 z4.h, z4.h, z4.h\n"
".inst 0x6466e450 // bfmmla z16.s, z2.h, z6.h\n"
".inst 0x6466e498 // bfmmla z24.s, z4.h, z6.h\n"
- "ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #2, MUL VL]\n"
".inst 0x6467e40c // bfmmla z12.s, z0.h, z7.h\n"
".inst 0x6467e454 // bfmmla z20.s, z2.h, z7.h\n"
- "add x24, x24, #0x10\n"
"add x23, x23, #0x10\n"
+ "add x22, x22, #0x10\n"
".inst 0x6467e49c // bfmmla z28.s, z4.h, z7.h\n"
- "ld1h { z7.h }, p5/Z, [x10, #3, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x9, #3, MUL VL]\n"
".inst 0x6466e409 // bfmmla z9.s, z0.h, z6.h\n"
- "add x22, x22, #0x10\n"
+ "add x21, x21, #0x10\n"
".inst 0x6466e451 // bfmmla z17.s, z2.h, z6.h\n"
".inst 0x6466e499 // bfmmla z25.s, z4.h, z6.h\n"
- "ld1h { z6.h }, p5/Z, [x10, #4, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #4, MUL VL]\n"
".inst 0x6467e40d // bfmmla z13.s, z0.h, z7.h\n"
".inst 0x6467e455 // bfmmla z21.s, z2.h, z7.h\n"
".inst 0x6467e49d // bfmmla z29.s, z4.h, z7.h\n"
- "ld1h { z7.h }, p5/Z, [x10, #5, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x9, #5, MUL VL]\n"
".inst 0x6466e40a // bfmmla z10.s, z0.h, z6.h\n"
".inst 0x6466e452 // bfmmla z18.s, z2.h, z6.h\n"
".inst 0x6466e49a // bfmmla z26.s, z4.h, z6.h\n"
- "ld1h { z6.h }, p5/Z, [x10, #6, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #6, MUL VL]\n"
".inst 0x6467e40e // bfmmla z14.s, z0.h, z7.h\n"
".inst 0x6467e456 // bfmmla z22.s, z2.h, z7.h\n"
".inst 0x6467e49e // bfmmla z30.s, z4.h, z7.h\n"
- "ld1h { z7.h }, p5/Z, [x10, #7, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x9, #7, MUL VL]\n"
".inst 0x6466e40b // bfmmla z11.s, z0.h, z6.h\n"
- "addvl x10, x10, #8\n"
+ "addvl x9, x9, #8\n"
".inst 0x6466e453 // bfmmla z19.s, z2.h, z6.h\n"
".inst 0x6466e49b // bfmmla z27.s, z4.h, z6.h\n"
".inst 0x6467e40f // bfmmla z15.s, z0.h, z7.h\n"
@@ -1214,21 +1214,21 @@ void sve_hybrid_fp32bf16fp32_mmla_6x4VL (
".inst 0x6467e49f // bfmmla z31.s, z4.h, z7.h\n"
"bgt 61b\n"
"62:" // Height 5: Multiply loop: Single iteration only
- "whilelt p0.s, XZR, x27\n"
- "ld1rqw { z0.s }, p0/Z, [x26]\n"
- "ld1rqw { z1.s }, p0/Z, [x25]\n"
+ "whilelt p0.s, XZR, x26\n"
+ "ld1rqw { z0.s }, p0/Z, [x25]\n"
+ "ld1rqw { z1.s }, p0/Z, [x24]\n"
".inst 0x658ab400 // bfcvt z0.h, p5/M, z0.s\n"
- "ld1rqw { z2.s }, p0/Z, [x24]\n"
- "ld1rqw { z3.s }, p0/Z, [x23]\n"
+ "ld1rqw { z2.s }, p0/Z, [x23]\n"
+ "ld1rqw { z3.s }, p0/Z, [x22]\n"
".inst 0x658ab421 // bfcvt z1.h, p5/M, z1.s\n"
".inst 0x658ab442 // bfcvt z2.h, p5/M, z2.s\n"
- "ld1rqw { z4.s }, p0/Z, [x22]\n"
+ "ld1rqw { z4.s }, p0/Z, [x21]\n"
".inst 0x658ab463 // bfcvt z3.h, p5/M, z3.s\n"
"uzp1 z0.h, z0.h, z0.h\n"
- "ld1h { z6.h }, p5/Z, [x10]\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
"uzp1 z1.h, z1.h, z1.h\n"
"uzp1 z2.h, z2.h, z2.h\n"
- "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x9, #1, MUL VL]\n"
"uzp1 z3.h, z3.h, z3.h\n"
".inst 0x658ab484 // bfcvt z4.h, p5/M, z4.s\n"
"trn1 z0.d, z0.d, z1.d\n"
@@ -1237,47 +1237,47 @@ void sve_hybrid_fp32bf16fp32_mmla_6x4VL (
"uzp1 z4.h, z4.h, z4.h\n"
".inst 0x6466e450 // bfmmla z16.s, z2.h, z6.h\n"
".inst 0x6466e498 // bfmmla z24.s, z4.h, z6.h\n"
- "ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #2, MUL VL]\n"
".inst 0x6467e40c // bfmmla z12.s, z0.h, z7.h\n"
".inst 0x6467e454 // bfmmla z20.s, z2.h, z7.h\n"
".inst 0x6467e49c // bfmmla z28.s, z4.h, z7.h\n"
- "ld1h { z7.h }, p5/Z, [x10, #3, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x9, #3, MUL VL]\n"
".inst 0x6466e409 // bfmmla z9.s, z0.h, z6.h\n"
".inst 0x6466e451 // bfmmla z17.s, z2.h, z6.h\n"
".inst 0x6466e499 // bfmmla z25.s, z4.h, z6.h\n"
- "ld1h { z6.h }, p5/Z, [x10, #4, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #4, MUL VL]\n"
".inst 0x6467e40d // bfmmla z13.s, z0.h, z7.h\n"
".inst 0x6467e455 // bfmmla z21.s, z2.h, z7.h\n"
".inst 0x6467e49d // bfmmla z29.s, z4.h, z7.h\n"
- "ld1h { z7.h }, p5/Z, [x10, #5, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x9, #5, MUL VL]\n"
".inst 0x6466e40a // bfmmla z10.s, z0.h, z6.h\n"
".inst 0x6466e452 // bfmmla z18.s, z2.h, z6.h\n"
".inst 0x6466e49a // bfmmla z26.s, z4.h, z6.h\n"
- "ld1h { z6.h }, p5/Z, [x10, #6, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #6, MUL VL]\n"
".inst 0x6467e40e // bfmmla z14.s, z0.h, z7.h\n"
".inst 0x6467e456 // bfmmla z22.s, z2.h, z7.h\n"
".inst 0x6467e49e // bfmmla z30.s, z4.h, z7.h\n"
- "ld1h { z7.h }, p5/Z, [x10, #7, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x9, #7, MUL VL]\n"
".inst 0x6466e40b // bfmmla z11.s, z0.h, z6.h\n"
- "addvl x10, x10, #8\n"
+ "addvl x9, x9, #8\n"
".inst 0x6466e453 // bfmmla z19.s, z2.h, z6.h\n"
".inst 0x6466e49b // bfmmla z27.s, z4.h, z6.h\n"
".inst 0x6467e40f // bfmmla z15.s, z0.h, z7.h\n"
".inst 0x6467e457 // bfmmla z23.s, z2.h, z7.h\n"
".inst 0x6467e49f // bfmmla z31.s, z4.h, z7.h\n"
"63:" // Height 5: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 58b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x28, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
"uzp1 z6.d, z8.d, z12.d\n"
- "add x23, x24, x20, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
"uzp2 z8.d, z8.d, z12.d\n"
"uzp1 z12.d, z9.d, z13.d\n"
- "add x22, x23, x20, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
"uzp2 z9.d, z9.d, z13.d\n"
"uzp1 z13.d, z10.d, z14.d\n"
"uzp2 z10.d, z10.d, z14.d\n"
@@ -1296,10 +1296,10 @@ void sve_hybrid_fp32bf16fp32_mmla_6x4VL (
"uzp1 z26.d, z26.d, z30.d\n"
"uzp1 z27.d, z27.d, z31.d\n"
"tbz %x[flags], #1, 64f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z1.s }, p5/Z, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rw { z0.s }, p5/Z, [x20]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rw { z1.s }, p5/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z0.s }, p5/Z, [x19]\n"
"fmin z6.s, p5/M, z6.s, z1.s\n"
"fmin z12.s, p5/M, z12.s, z1.s\n"
"fmin z13.s, p5/M, z13.s, z1.s\n"
@@ -1341,61 +1341,61 @@ void sve_hybrid_fp32bf16fp32_mmla_6x4VL (
"fmax z26.s, p5/M, z26.s, z0.s\n"
"fmax z27.s, p5/M, z27.s, z0.s\n"
"64:" // Height 5: No activation
- "st1w { z6.s }, p4, [x9]\n"
- "st1w { z12.s }, p3, [x9, #1, MUL VL]\n"
- "st1w { z13.s }, p2, [x9, #2, MUL VL]\n"
- "st1w { z14.s }, p1, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
- "st1w { z8.s }, p4, [x25]\n"
- "st1w { z9.s }, p3, [x25, #1, MUL VL]\n"
- "st1w { z10.s }, p2, [x25, #2, MUL VL]\n"
- "st1w { z11.s }, p1, [x25, #3, MUL VL]\n"
- "st1w { z15.s }, p4, [x24]\n"
- "st1w { z20.s }, p3, [x24, #1, MUL VL]\n"
- "st1w { z21.s }, p2, [x24, #2, MUL VL]\n"
- "st1w { z22.s }, p1, [x24, #3, MUL VL]\n"
- "st1w { z16.s }, p4, [x23]\n"
- "st1w { z17.s }, p3, [x23, #1, MUL VL]\n"
- "st1w { z18.s }, p2, [x23, #2, MUL VL]\n"
- "st1w { z19.s }, p1, [x23, #3, MUL VL]\n"
- "st1w { z24.s }, p4, [x22]\n"
- "st1w { z25.s }, p3, [x22, #1, MUL VL]\n"
- "st1w { z26.s }, p2, [x22, #2, MUL VL]\n"
- "st1w { z27.s }, p1, [x22, #3, MUL VL]\n"
+ "st1w { z6.s }, p4, [x28]\n"
+ "st1w { z12.s }, p3, [x28, #1, MUL VL]\n"
+ "st1w { z13.s }, p2, [x28, #2, MUL VL]\n"
+ "st1w { z14.s }, p1, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "st1w { z8.s }, p4, [x24]\n"
+ "st1w { z9.s }, p3, [x24, #1, MUL VL]\n"
+ "st1w { z10.s }, p2, [x24, #2, MUL VL]\n"
+ "st1w { z11.s }, p1, [x24, #3, MUL VL]\n"
+ "st1w { z15.s }, p4, [x23]\n"
+ "st1w { z20.s }, p3, [x23, #1, MUL VL]\n"
+ "st1w { z21.s }, p2, [x23, #2, MUL VL]\n"
+ "st1w { z22.s }, p1, [x23, #3, MUL VL]\n"
+ "st1w { z16.s }, p4, [x22]\n"
+ "st1w { z17.s }, p3, [x22, #1, MUL VL]\n"
+ "st1w { z18.s }, p2, [x22, #2, MUL VL]\n"
+ "st1w { z19.s }, p1, [x22, #3, MUL VL]\n"
+ "st1w { z24.s }, p4, [x21]\n"
+ "st1w { z25.s }, p3, [x21, #1, MUL VL]\n"
+ "st1w { z26.s }, p2, [x21, #2, MUL VL]\n"
+ "st1w { z27.s }, p1, [x21, #3, MUL VL]\n"
"65:" // Height 5: Writeback done
- "decw x11, ALL, MUL #4\n"
- "cmp x11, XZR\n"
+ "decw x10, ALL, MUL #4\n"
+ "cmp x10, XZR\n"
"bgt 54b\n"
"b 80f\n"
"66:" // Height 6
- "ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "mov x20, #0x18\n"
- "mov x12, %x[bias]\n"
- "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "mov x19, #0x18\n"
+ "mov x11, %x[bias]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x28, %x[output_ptr]\n"
+ "madd %x[output_ptr], x20, x19, %x[output_ptr]\n"
"67:" // Height 6: Column loop
- "mov x20, #0x0\n"
- "whilelt p4.s, x20, x11\n"
- "incw x20\n"
- "whilelt p3.s, x20, x11\n"
- "incw x20\n"
- "whilelt p2.s, x20, x11\n"
- "incw x20\n"
- "whilelt p1.s, x20, x11\n"
- "cbz x12, 68f\n"
- "ld1w { z8.s }, p5/Z, [x12]\n"
- "ld1w { z9.s }, p5/Z, [x12, #1, MUL VL]\n"
+ "mov x19, #0x0\n"
+ "whilelt p4.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p3.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x10\n"
+ "cbz x11, 68f\n"
+ "ld1w { z8.s }, p5/Z, [x11]\n"
+ "ld1w { z9.s }, p5/Z, [x11, #1, MUL VL]\n"
+ "ld1w { z10.s }, p5/Z, [x11, #2, MUL VL]\n"
"zip2 z12.d, z8.d, z8.d\n"
"zip1 z8.d, z8.d, z8.d\n"
- "ld1w { z10.s }, p5/Z, [x12, #2, MUL VL]\n"
- "ld1w { z11.s }, p5/Z, [x12, #3, MUL VL]\n"
+ "ld1w { z11.s }, p5/Z, [x11, #3, MUL VL]\n"
"zip2 z13.d, z9.d, z9.d\n"
"zip1 z9.d, z9.d, z9.d\n"
+ "addvl x11, x11, #4\n"
"zip2 z14.d, z10.d, z10.d\n"
"zip1 z10.d, z10.d, z10.d\n"
- "addvl x12, x12, #4\n"
"zip2 z15.d, z11.d, z11.d\n"
"zip1 z11.d, z11.d, z11.d\n"
"mov z16.d, z8.d\n"
@@ -1417,55 +1417,55 @@ void sve_hybrid_fp32bf16fp32_mmla_6x4VL (
"b 70f\n"
"68:" // Height 6: no bias
"tbz %x[flags], #0, 69f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "ld1w { z9.s }, p4/Z, [x9]\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
- "ld1w { z10.s }, p3/Z, [x9, #1, MUL VL]\n"
- "ld1w { z11.s }, p2/Z, [x9, #2, MUL VL]\n"
- "add x21, x22, x20, LSL #2\n"
- "ld1w { z16.s }, p1/Z, [x9, #3, MUL VL]\n"
- "ld1w { z12.s }, p4/Z, [x25]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x28, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
+ "ld1w { z9.s }, p4/Z, [x28]\n"
+ "ld1w { z10.s }, p3/Z, [x28, #1, MUL VL]\n"
+ "add x20, x21, x19, LSL #2\n"
+ "ld1w { z11.s }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1w { z16.s }, p1/Z, [x28, #3, MUL VL]\n"
+ "ld1w { z12.s }, p4/Z, [x24]\n"
+ "ld1w { z13.s }, p3/Z, [x24, #1, MUL VL]\n"
"zip1 z8.d, z9.d, z12.d\n"
- "ld1w { z13.s }, p3/Z, [x25, #1, MUL VL]\n"
- "ld1w { z14.s }, p2/Z, [x25, #2, MUL VL]\n"
"zip2 z12.d, z9.d, z12.d\n"
+ "ld1w { z14.s }, p2/Z, [x24, #2, MUL VL]\n"
+ "ld1w { z15.s }, p1/Z, [x24, #3, MUL VL]\n"
"zip1 z9.d, z10.d, z13.d\n"
- "ld1w { z15.s }, p1/Z, [x25, #3, MUL VL]\n"
- "ld1w { z17.s }, p4/Z, [x24]\n"
"zip2 z13.d, z10.d, z13.d\n"
+ "ld1w { z17.s }, p4/Z, [x23]\n"
+ "ld1w { z18.s }, p3/Z, [x23, #1, MUL VL]\n"
"zip1 z10.d, z11.d, z14.d\n"
- "ld1w { z18.s }, p3/Z, [x24, #1, MUL VL]\n"
- "ld1w { z19.s }, p2/Z, [x24, #2, MUL VL]\n"
"zip2 z14.d, z11.d, z14.d\n"
+ "ld1w { z19.s }, p2/Z, [x23, #2, MUL VL]\n"
+ "ld1w { z24.s }, p1/Z, [x23, #3, MUL VL]\n"
"zip1 z11.d, z16.d, z15.d\n"
- "ld1w { z24.s }, p1/Z, [x24, #3, MUL VL]\n"
- "ld1w { z20.s }, p4/Z, [x23]\n"
"zip2 z15.d, z16.d, z15.d\n"
+ "ld1w { z20.s }, p4/Z, [x22]\n"
+ "ld1w { z21.s }, p3/Z, [x22, #1, MUL VL]\n"
"zip1 z16.d, z17.d, z20.d\n"
- "ld1w { z21.s }, p3/Z, [x23, #1, MUL VL]\n"
- "ld1w { z22.s }, p2/Z, [x23, #2, MUL VL]\n"
"zip2 z20.d, z17.d, z20.d\n"
+ "ld1w { z22.s }, p2/Z, [x22, #2, MUL VL]\n"
+ "ld1w { z23.s }, p1/Z, [x22, #3, MUL VL]\n"
"zip1 z17.d, z18.d, z21.d\n"
- "ld1w { z23.s }, p1/Z, [x23, #3, MUL VL]\n"
- "ld1w { z25.s }, p4/Z, [x22]\n"
"zip2 z21.d, z18.d, z21.d\n"
+ "ld1w { z25.s }, p4/Z, [x21]\n"
+ "ld1w { z26.s }, p3/Z, [x21, #1, MUL VL]\n"
"zip1 z18.d, z19.d, z22.d\n"
- "ld1w { z26.s }, p3/Z, [x22, #1, MUL VL]\n"
- "ld1w { z27.s }, p2/Z, [x22, #2, MUL VL]\n"
"zip2 z22.d, z19.d, z22.d\n"
+ "ld1w { z27.s }, p2/Z, [x21, #2, MUL VL]\n"
+ "ld1w { z6.s }, p1/Z, [x21, #3, MUL VL]\n"
"zip1 z19.d, z24.d, z23.d\n"
- "ld1w { z6.s }, p1/Z, [x22, #3, MUL VL]\n"
- "ld1w { z28.s }, p4/Z, [x21]\n"
"zip2 z23.d, z24.d, z23.d\n"
+ "ld1w { z28.s }, p4/Z, [x20]\n"
+ "ld1w { z29.s }, p3/Z, [x20, #1, MUL VL]\n"
"zip1 z24.d, z25.d, z28.d\n"
- "ld1w { z29.s }, p3/Z, [x21, #1, MUL VL]\n"
- "ld1w { z30.s }, p2/Z, [x21, #2, MUL VL]\n"
"zip2 z28.d, z25.d, z28.d\n"
+ "ld1w { z30.s }, p2/Z, [x20, #2, MUL VL]\n"
+ "ld1w { z31.s }, p1/Z, [x20, #3, MUL VL]\n"
"zip1 z25.d, z26.d, z29.d\n"
- "ld1w { z31.s }, p1/Z, [x21, #3, MUL VL]\n"
"zip2 z29.d, z26.d, z29.d\n"
"zip1 z26.d, z27.d, z30.d\n"
"zip2 z30.d, z27.d, z30.d\n"
@@ -1498,97 +1498,97 @@ void sve_hybrid_fp32bf16fp32_mmla_6x4VL (
"mov z30.b, #0x0\n"
"mov z31.b, #0x0\n"
"70:" // Height 6: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"71:" // Height 6: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 72f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "ldr x22, [x21, #0x20]\n"
- "ldr x21, [x21, #0x28]\n"
- "cbnz x28, 73f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20, LSL #2\n"
- "add x25, x25, x20, LSL #2\n"
- "add x24, x24, x20, LSL #2\n"
- "add x23, x23, x20, LSL #2\n"
- "add x22, x22, x20, LSL #2\n"
- "add x21, x21, x20, LSL #2\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "ldr x21, [x20, #0x20]\n"
+ "ldr x20, [x20, #0x28]\n"
+ "cbnz x27, 73f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #2\n"
+ "add x24, x24, x19, LSL #2\n"
+ "add x23, x23, x19, LSL #2\n"
+ "add x22, x22, x19, LSL #2\n"
+ "add x21, x21, x19, LSL #2\n"
+ "add x20, x20, x19, LSL #2\n"
"b 73f\n"
"72:" // Height 6: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
+ "add x20, x21, x19, LSL #2\n"
"73:" // Height 6: input setup done
- "cmp x27, #0x4\n"
+ "cmp x26, #0x4\n"
"ble 75f\n"
"74:" // Height 6: Multiply loop: Main loop head
- "whilelt p0.s, XZR, x27\n"
- "ld1rqw { z0.s }, p0/Z, [x26]\n"
- "ld1rqw { z1.s }, p0/Z, [x25]\n"
+ "whilelt p0.s, XZR, x26\n"
+ "ld1rqw { z0.s }, p0/Z, [x25]\n"
+ "ld1rqw { z1.s }, p0/Z, [x24]\n"
".inst 0x658ab400 // bfcvt z0.h, p5/M, z0.s\n"
- "ld1rqw { z2.s }, p0/Z, [x24]\n"
- "ld1rqw { z3.s }, p0/Z, [x23]\n"
+ "ld1rqw { z2.s }, p0/Z, [x23]\n"
+ "ld1rqw { z3.s }, p0/Z, [x22]\n"
".inst 0x658ab421 // bfcvt z1.h, p5/M, z1.s\n"
".inst 0x658ab442 // bfcvt z2.h, p5/M, z2.s\n"
- "ld1rqw { z4.s }, p0/Z, [x22]\n"
- "ld1rqw { z5.s }, p0/Z, [x21]\n"
+ "ld1rqw { z4.s }, p0/Z, [x21]\n"
+ "ld1rqw { z5.s }, p0/Z, [x20]\n"
".inst 0x658ab463 // bfcvt z3.h, p5/M, z3.s\n"
".inst 0x658ab484 // bfcvt z4.h, p5/M, z4.s\n"
".inst 0x658ab4a5 // bfcvt z5.h, p5/M, z5.s\n"
"uzp1 z0.h, z0.h, z0.h\n"
- "ld1h { z6.h }, p5/Z, [x10]\n"
- "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "ld1h { z7.h }, p5/Z, [x9, #1, MUL VL]\n"
"uzp1 z1.h, z1.h, z1.h\n"
"uzp1 z2.h, z2.h, z2.h\n"
- "sub x27, x27, #0x4\n"
- "cmp x27, #0x4\n"
+ "sub x26, x26, #0x4\n"
+ "cmp x26, #0x4\n"
"uzp1 z3.h, z3.h, z3.h\n"
"uzp1 z4.h, z4.h, z4.h\n"
- "add x26, x26, #0x10\n"
"add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
"uzp1 z5.h, z5.h, z5.h\n"
"trn1 z0.d, z0.d, z1.d\n"
".inst 0x6466e408 // bfmmla z8.s, z0.h, z6.h\n"
- "add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
"trn1 z2.d, z2.d, z3.d\n"
"trn1 z4.d, z4.d, z5.d\n"
".inst 0x6466e450 // bfmmla z16.s, z2.h, z6.h\n"
- "add x23, x23, #0x10\n"
+ "add x22, x22, #0x10\n"
".inst 0x6466e498 // bfmmla z24.s, z4.h, z6.h\n"
".inst 0x6467e40c // bfmmla z12.s, z0.h, z7.h\n"
- "ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
- "add x22, x22, #0x10\n"
+ "ld1h { z6.h }, p5/Z, [x9, #2, MUL VL]\n"
+ "add x21, x21, #0x10\n"
".inst 0x6467e454 // bfmmla z20.s, z2.h, z7.h\n"
".inst 0x6467e49c // bfmmla z28.s, z4.h, z7.h\n"
- "ld1h { z7.h }, p5/Z, [x10, #3, MUL VL]\n"
- "add x21, x21, #0x10\n"
+ "ld1h { z7.h }, p5/Z, [x9, #3, MUL VL]\n"
+ "add x20, x20, #0x10\n"
".inst 0x6466e409 // bfmmla z9.s, z0.h, z6.h\n"
".inst 0x6466e451 // bfmmla z17.s, z2.h, z6.h\n"
".inst 0x6466e499 // bfmmla z25.s, z4.h, z6.h\n"
".inst 0x6467e40d // bfmmla z13.s, z0.h, z7.h\n"
- "ld1h { z6.h }, p5/Z, [x10, #4, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #4, MUL VL]\n"
".inst 0x6467e455 // bfmmla z21.s, z2.h, z7.h\n"
".inst 0x6467e49d // bfmmla z29.s, z4.h, z7.h\n"
- "ld1h { z7.h }, p5/Z, [x10, #5, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x9, #5, MUL VL]\n"
".inst 0x6466e40a // bfmmla z10.s, z0.h, z6.h\n"
".inst 0x6466e452 // bfmmla z18.s, z2.h, z6.h\n"
".inst 0x6466e49a // bfmmla z26.s, z4.h, z6.h\n"
".inst 0x6467e40e // bfmmla z14.s, z0.h, z7.h\n"
- "ld1h { z6.h }, p5/Z, [x10, #6, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #6, MUL VL]\n"
".inst 0x6467e456 // bfmmla z22.s, z2.h, z7.h\n"
".inst 0x6467e49e // bfmmla z30.s, z4.h, z7.h\n"
- "ld1h { z7.h }, p5/Z, [x10, #7, MUL VL]\n"
- "addvl x10, x10, #8\n"
+ "ld1h { z7.h }, p5/Z, [x9, #7, MUL VL]\n"
+ "addvl x9, x9, #8\n"
".inst 0x6466e40b // bfmmla z11.s, z0.h, z6.h\n"
".inst 0x6466e453 // bfmmla z19.s, z2.h, z6.h\n"
".inst 0x6466e49b // bfmmla z27.s, z4.h, z6.h\n"
@@ -1597,22 +1597,22 @@ void sve_hybrid_fp32bf16fp32_mmla_6x4VL (
".inst 0x6467e49f // bfmmla z31.s, z4.h, z7.h\n"
"bgt 74b\n"
"75:" // Height 6: Multiply loop: Single iteration only
- "whilelt p0.s, XZR, x27\n"
- "ld1rqw { z0.s }, p0/Z, [x26]\n"
- "ld1rqw { z1.s }, p0/Z, [x25]\n"
+ "whilelt p0.s, XZR, x26\n"
+ "ld1rqw { z0.s }, p0/Z, [x25]\n"
+ "ld1rqw { z1.s }, p0/Z, [x24]\n"
".inst 0x658ab400 // bfcvt z0.h, p5/M, z0.s\n"
- "ld1rqw { z2.s }, p0/Z, [x24]\n"
- "ld1rqw { z3.s }, p0/Z, [x23]\n"
+ "ld1rqw { z2.s }, p0/Z, [x23]\n"
+ "ld1rqw { z3.s }, p0/Z, [x22]\n"
".inst 0x658ab421 // bfcvt z1.h, p5/M, z1.s\n"
".inst 0x658ab442 // bfcvt z2.h, p5/M, z2.s\n"
- "ld1rqw { z4.s }, p0/Z, [x22]\n"
- "ld1rqw { z5.s }, p0/Z, [x21]\n"
+ "ld1rqw { z4.s }, p0/Z, [x21]\n"
+ "ld1rqw { z5.s }, p0/Z, [x20]\n"
".inst 0x658ab463 // bfcvt z3.h, p5/M, z3.s\n"
".inst 0x658ab484 // bfcvt z4.h, p5/M, z4.s\n"
".inst 0x658ab4a5 // bfcvt z5.h, p5/M, z5.s\n"
"uzp1 z0.h, z0.h, z0.h\n"
- "ld1h { z6.h }, p5/Z, [x10]\n"
- "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "ld1h { z7.h }, p5/Z, [x9, #1, MUL VL]\n"
"uzp1 z1.h, z1.h, z1.h\n"
"uzp1 z2.h, z2.h, z2.h\n"
"uzp1 z3.h, z3.h, z3.h\n"
@@ -1625,27 +1625,27 @@ void sve_hybrid_fp32bf16fp32_mmla_6x4VL (
".inst 0x6466e450 // bfmmla z16.s, z2.h, z6.h\n"
".inst 0x6466e498 // bfmmla z24.s, z4.h, z6.h\n"
".inst 0x6467e40c // bfmmla z12.s, z0.h, z7.h\n"
- "ld1h { z6.h }, p5/Z, [x10, #2, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #2, MUL VL]\n"
".inst 0x6467e454 // bfmmla z20.s, z2.h, z7.h\n"
".inst 0x6467e49c // bfmmla z28.s, z4.h, z7.h\n"
- "ld1h { z7.h }, p5/Z, [x10, #3, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x9, #3, MUL VL]\n"
".inst 0x6466e409 // bfmmla z9.s, z0.h, z6.h\n"
".inst 0x6466e451 // bfmmla z17.s, z2.h, z6.h\n"
".inst 0x6466e499 // bfmmla z25.s, z4.h, z6.h\n"
".inst 0x6467e40d // bfmmla z13.s, z0.h, z7.h\n"
- "ld1h { z6.h }, p5/Z, [x10, #4, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #4, MUL VL]\n"
".inst 0x6467e455 // bfmmla z21.s, z2.h, z7.h\n"
".inst 0x6467e49d // bfmmla z29.s, z4.h, z7.h\n"
- "ld1h { z7.h }, p5/Z, [x10, #5, MUL VL]\n"
+ "ld1h { z7.h }, p5/Z, [x9, #5, MUL VL]\n"
".inst 0x6466e40a // bfmmla z10.s, z0.h, z6.h\n"
".inst 0x6466e452 // bfmmla z18.s, z2.h, z6.h\n"
".inst 0x6466e49a // bfmmla z26.s, z4.h, z6.h\n"
".inst 0x6467e40e // bfmmla z14.s, z0.h, z7.h\n"
- "ld1h { z6.h }, p5/Z, [x10, #6, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #6, MUL VL]\n"
".inst 0x6467e456 // bfmmla z22.s, z2.h, z7.h\n"
".inst 0x6467e49e // bfmmla z30.s, z4.h, z7.h\n"
- "ld1h { z7.h }, p5/Z, [x10, #7, MUL VL]\n"
- "addvl x10, x10, #8\n"
+ "ld1h { z7.h }, p5/Z, [x9, #7, MUL VL]\n"
+ "addvl x9, x9, #8\n"
".inst 0x6466e40b // bfmmla z11.s, z0.h, z6.h\n"
".inst 0x6466e453 // bfmmla z19.s, z2.h, z6.h\n"
".inst 0x6466e49b // bfmmla z27.s, z4.h, z6.h\n"
@@ -1653,21 +1653,21 @@ void sve_hybrid_fp32bf16fp32_mmla_6x4VL (
".inst 0x6467e457 // bfmmla z23.s, z2.h, z7.h\n"
".inst 0x6467e49f // bfmmla z31.s, z4.h, z7.h\n"
"76:" // Height 6: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 71b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x28, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
"uzp1 z6.d, z8.d, z12.d\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
"uzp2 z8.d, z8.d, z12.d\n"
"uzp1 z12.d, z9.d, z13.d\n"
"uzp2 z9.d, z9.d, z13.d\n"
"uzp1 z13.d, z10.d, z14.d\n"
- "add x21, x22, x20, LSL #2\n"
+ "add x20, x21, x19, LSL #2\n"
"uzp2 z10.d, z10.d, z14.d\n"
"uzp1 z14.d, z11.d, z15.d\n"
"uzp2 z11.d, z11.d, z15.d\n"
@@ -1688,10 +1688,10 @@ void sve_hybrid_fp32bf16fp32_mmla_6x4VL (
"uzp1 z30.d, z27.d, z31.d\n"
"uzp2 z27.d, z27.d, z31.d\n"
"tbz %x[flags], #1, 77f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z1.s }, p5/Z, [x20]\n"
- "add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rw { z0.s }, p5/Z, [x20]\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rw { z1.s }, p5/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z0.s }, p5/Z, [x19]\n"
"fmin z6.s, p5/M, z6.s, z1.s\n"
"fmin z12.s, p5/M, z12.s, z1.s\n"
"fmin z13.s, p5/M, z13.s, z1.s\n"
@@ -1741,51 +1741,51 @@ void sve_hybrid_fp32bf16fp32_mmla_6x4VL (
"fmax z26.s, p5/M, z26.s, z0.s\n"
"fmax z27.s, p5/M, z27.s, z0.s\n"
"77:" // Height 6: No activation
- "st1w { z6.s }, p4, [x9]\n"
- "st1w { z12.s }, p3, [x9, #1, MUL VL]\n"
- "st1w { z13.s }, p2, [x9, #2, MUL VL]\n"
- "st1w { z14.s }, p1, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
- "st1w { z8.s }, p4, [x25]\n"
- "st1w { z9.s }, p3, [x25, #1, MUL VL]\n"
- "st1w { z10.s }, p2, [x25, #2, MUL VL]\n"
- "st1w { z11.s }, p1, [x25, #3, MUL VL]\n"
- "st1w { z15.s }, p4, [x24]\n"
- "st1w { z20.s }, p3, [x24, #1, MUL VL]\n"
- "st1w { z21.s }, p2, [x24, #2, MUL VL]\n"
- "st1w { z22.s }, p1, [x24, #3, MUL VL]\n"
- "st1w { z16.s }, p4, [x23]\n"
- "st1w { z17.s }, p3, [x23, #1, MUL VL]\n"
- "st1w { z18.s }, p2, [x23, #2, MUL VL]\n"
- "st1w { z19.s }, p1, [x23, #3, MUL VL]\n"
- "st1w { z23.s }, p4, [x22]\n"
- "st1w { z28.s }, p3, [x22, #1, MUL VL]\n"
- "st1w { z29.s }, p2, [x22, #2, MUL VL]\n"
- "st1w { z30.s }, p1, [x22, #3, MUL VL]\n"
- "st1w { z24.s }, p4, [x21]\n"
- "st1w { z25.s }, p3, [x21, #1, MUL VL]\n"
- "st1w { z26.s }, p2, [x21, #2, MUL VL]\n"
- "st1w { z27.s }, p1, [x21, #3, MUL VL]\n"
+ "st1w { z6.s }, p4, [x28]\n"
+ "st1w { z12.s }, p3, [x28, #1, MUL VL]\n"
+ "st1w { z13.s }, p2, [x28, #2, MUL VL]\n"
+ "st1w { z14.s }, p1, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "st1w { z8.s }, p4, [x24]\n"
+ "st1w { z9.s }, p3, [x24, #1, MUL VL]\n"
+ "st1w { z10.s }, p2, [x24, #2, MUL VL]\n"
+ "st1w { z11.s }, p1, [x24, #3, MUL VL]\n"
+ "st1w { z15.s }, p4, [x23]\n"
+ "st1w { z20.s }, p3, [x23, #1, MUL VL]\n"
+ "st1w { z21.s }, p2, [x23, #2, MUL VL]\n"
+ "st1w { z22.s }, p1, [x23, #3, MUL VL]\n"
+ "st1w { z16.s }, p4, [x22]\n"
+ "st1w { z17.s }, p3, [x22, #1, MUL VL]\n"
+ "st1w { z18.s }, p2, [x22, #2, MUL VL]\n"
+ "st1w { z19.s }, p1, [x22, #3, MUL VL]\n"
+ "st1w { z23.s }, p4, [x21]\n"
+ "st1w { z28.s }, p3, [x21, #1, MUL VL]\n"
+ "st1w { z29.s }, p2, [x21, #2, MUL VL]\n"
+ "st1w { z30.s }, p1, [x21, #3, MUL VL]\n"
+ "st1w { z24.s }, p4, [x20]\n"
+ "st1w { z25.s }, p3, [x20, #1, MUL VL]\n"
+ "st1w { z26.s }, p2, [x20, #2, MUL VL]\n"
+ "st1w { z27.s }, p1, [x20, #3, MUL VL]\n"
"78:" // Height 6: Writeback done
- "decw x11, ALL, MUL #4\n"
- "cmp x11, XZR\n"
+ "decw x10, ALL, MUL #4\n"
+ "cmp x10, XZR\n"
"bgt 67b\n"
"subs %x[M], %x[M], #0x6\n"
"beq 80f\n"
- "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 79f\n"
- "add x21, x21, #0x6\n"
- "str x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "add x20, x20, #0x6\n"
+ "str x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"b 1b\n"
"79:" // Update direct input
- "mov x20, #0x18\n"
- "madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
+ "mov x19, #0x18\n"
+ "madd %x[input_ptr], x19, x20, %x[input_ptr]\n"
"b 1b\n"
"80:" // Exit
: [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
: [args_ptr] "r" (&ka), [bias] "r" (bias), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "x9", "x10", "x11", "x12", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "x9", "x10", "x11", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8qa_dot_4x4VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8qa_dot_4x4VL/generic.cpp
index 562b2759aa..3031f5abf5 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8qa_dot_4x4VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8qa_dot_4x4VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef ARM_COMPUTE_ENABLE_SVE
@@ -85,165 +85,165 @@ void sve_hybrid_s8qa_dot_4x4VL (
"cmp %x[M], #0x2\n"
"bgt 29f\n"
"beq 15f\n"
- "mov x10, %x[col_bias]\n"
"mov z11.s, #0x0\n"
- "mov z15.b, #0x1\n"
- "bic %x[flags], %x[flags], #0x80000000\n"
"ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov z15.b, #0x1\n"
"ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x27, %x[output_ptr]\n"
+ "mov x27, %x[col_bias]\n"
+ "bic %x[flags], %x[flags], #0x80000000\n"
+ "mov x26, %x[output_ptr]\n"
"2:" // Height 1: Column loop
- "mov x20, #0x0\n"
- "whilelt p1.b, x20, x9\n"
"mov z16.s, #0x0\n"
+ "mov x19, #0x0\n"
"mov z17.s, #0x0\n"
+ "whilelt p1.b, x19, x9\n"
"mov z18.s, #0x0\n"
"mov z19.s, #0x0\n"
"3:" // Height 1: setup done
- "mov x26, #0x0\n"
+ "mov x25, #0x0\n"
"4:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w24, [x20, x25, LSL #0x2]\n"
"tbz %x[flags], #3, 5f\n"
- "ldr x21, [%x[input_ptr], x26, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x24, [x21, #0x0]\n"
- "cbnz x26, 6f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x24, x24, x20\n"
+ "ldr x20, [%x[input_ptr], x25, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x23, [x20, #0x0]\n"
+ "cbnz x25, 6f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x23, x23, x19\n"
"b 6f\n"
"5:" // Height 1: setup direct input
- "mov x24, %x[input_ptr]\n"
+ "mov x23, %x[input_ptr]\n"
"6:" // Height 1: input setup done
- "cmp x25, #0x10\n"
+ "cmp x24, #0x10\n"
"ble 9f\n"
"7:" // Height 1: Multiply loop: Main loop head
- "whilelt p0.b, XZR, x25\n"
- "ld1rqb { z0.b }, p0/Z, [x24]\n"
"ld1b { z4.b }, p2/Z, [x28]\n"
- "sdot z16.s, z4.b, z0.b[0]\n"
+ "whilelt p0.b, XZR, x24\n"
"ld1b { z5.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "ld1rqb { z0.b }, p0/Z, [x23]\n"
+ "sdot z16.s, z4.b, z0.b[0]\n"
"ld1b { z6.b }, p2/Z, [x28, #2, MUL VL]\n"
+ "add x23, x23, #0x10\n"
"sdot z17.s, z5.b, z0.b[0]\n"
- "sdot z18.s, z6.b, z0.b[0]\n"
"ld1b { z7.b }, p2/Z, [x28, #3, MUL VL]\n"
- "sdot z19.s, z7.b, z0.b[0]\n"
"ld1b { z8.b }, p2/Z, [x28, #4, MUL VL]\n"
- "sdot z16.s, z8.b, z0.b[1]\n"
+ "sdot z18.s, z6.b, z0.b[0]\n"
"ld1b { z9.b }, p2/Z, [x28, #5, MUL VL]\n"
"ld1b { z10.b }, p2/Z, [x28, #6, MUL VL]\n"
- "sdot z17.s, z9.b, z0.b[1]\n"
- "sdot z18.s, z10.b, z0.b[1]\n"
+ "sdot z19.s, z7.b, z0.b[0]\n"
+ "sdot z16.s, z8.b, z0.b[1]\n"
"ld1b { z4.b }, p2/Z, [x28, #7, MUL VL]\n"
"addvl x28, x28, #16\n"
- "sdot z19.s, z4.b, z0.b[1]\n"
+ "sdot z17.s, z9.b, z0.b[1]\n"
"ld1b { z5.b }, p2/Z, [x28, #-8, MUL VL]\n"
"ld1b { z6.b }, p2/Z, [x28, #-7, MUL VL]\n"
+ "sdot z18.s, z10.b, z0.b[1]\n"
"ld1b { z7.b }, p2/Z, [x28, #-6, MUL VL]\n"
- "sdot z16.s, z5.b, z0.b[2]\n"
- "sdot z17.s, z6.b, z0.b[2]\n"
+ "sdot z19.s, z4.b, z0.b[1]\n"
"ld1b { z8.b }, p2/Z, [x28, #-5, MUL VL]\n"
- "sdot z18.s, z7.b, z0.b[2]\n"
- "sdot z19.s, z8.b, z0.b[2]\n"
+ "sdot z16.s, z5.b, z0.b[2]\n"
"ld1b { z9.b }, p2/Z, [x28, #-4, MUL VL]\n"
+ "sdot z17.s, z6.b, z0.b[2]\n"
"ld1b { z10.b }, p2/Z, [x28, #-3, MUL VL]\n"
+ "sdot z18.s, z7.b, z0.b[2]\n"
"ld1b { z4.b }, p2/Z, [x28, #-2, MUL VL]\n"
+ "sdot z19.s, z8.b, z0.b[2]\n"
+ "ld1b { z5.b }, p2/Z, [x28, #-1, MUL VL]\n"
"sdot z16.s, z9.b, z0.b[3]\n"
"sdot z17.s, z10.b, z0.b[3]\n"
- "ld1b { z5.b }, p2/Z, [x28, #-1, MUL VL]\n"
"sdot z18.s, z4.b, z0.b[3]\n"
"sdot z19.s, z5.b, z0.b[3]\n"
- "add x24, x24, #0x10\n"
"tbnz %x[flags], #31, 8f\n"
"sdot z11.s, z0.b, z15.b\n"
"8:" // Height 1: Multiply loop: unique 1: skip row sum
- "sub x25, x25, #0x10\n"
- "cmp x25, #0x10\n"
+ "sub x24, x24, #0x10\n"
+ "cmp x24, #0x10\n"
"bgt 7b\n"
"9:" // Height 1: Multiply loop: Single iteration only
- "whilelt p0.b, XZR, x25\n"
- "ld1rqb { z0.b }, p0/Z, [x24]\n"
"ld1b { z4.b }, p2/Z, [x28]\n"
- "subs x25, x25, #0x4\n"
+ "whilelt p0.b, XZR, x24\n"
"ld1b { z5.b }, p2/Z, [x28, #1, MUL VL]\n"
- "ld1b { z6.b }, p2/Z, [x28, #2, MUL VL]\n"
+ "subs x24, x24, #0x4\n"
+ "ld1rqb { z0.b }, p0/Z, [x23]\n"
"sdot z16.s, z4.b, z0.b[0]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #2, MUL VL]\n"
"sdot z17.s, z5.b, z0.b[0]\n"
"ld1b { z7.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
"sdot z18.s, z6.b, z0.b[0]\n"
"sdot z19.s, z7.b, z0.b[0]\n"
- "addvl x28, x28, #4\n"
"ble 10f\n"
"ld1b { z8.b }, p2/Z, [x28]\n"
- "ld1b { z9.b }, p2/Z, [x28, #1, MUL VL]\n"
- "subs x25, x25, #0x4\n"
"sdot z16.s, z8.b, z0.b[1]\n"
+ "ld1b { z9.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "subs x24, x24, #0x4\n"
+ "sdot z17.s, z9.b, z0.b[1]\n"
"ld1b { z10.b }, p2/Z, [x28, #2, MUL VL]\n"
"ld1b { z4.b }, p2/Z, [x28, #3, MUL VL]\n"
- "sdot z17.s, z9.b, z0.b[1]\n"
"sdot z18.s, z10.b, z0.b[1]\n"
- "sdot z19.s, z4.b, z0.b[1]\n"
"addvl x28, x28, #4\n"
+ "sdot z19.s, z4.b, z0.b[1]\n"
"ble 10f\n"
"ld1b { z5.b }, p2/Z, [x28]\n"
- "ld1b { z6.b }, p2/Z, [x28, #1, MUL VL]\n"
- "subs x25, x25, #0x4\n"
"sdot z16.s, z5.b, z0.b[2]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "subs x24, x24, #0x4\n"
+ "sdot z17.s, z6.b, z0.b[2]\n"
"ld1b { z7.b }, p2/Z, [x28, #2, MUL VL]\n"
"ld1b { z8.b }, p2/Z, [x28, #3, MUL VL]\n"
- "sdot z17.s, z6.b, z0.b[2]\n"
"sdot z18.s, z7.b, z0.b[2]\n"
- "sdot z19.s, z8.b, z0.b[2]\n"
"addvl x28, x28, #4\n"
+ "sdot z19.s, z8.b, z0.b[2]\n"
"ble 10f\n"
"ld1b { z9.b }, p2/Z, [x28]\n"
- "ld1b { z10.b }, p2/Z, [x28, #1, MUL VL]\n"
"sdot z16.s, z9.b, z0.b[3]\n"
- "sdot z17.s, z10.b, z0.b[3]\n"
+ "ld1b { z10.b }, p2/Z, [x28, #1, MUL VL]\n"
"ld1b { z4.b }, p2/Z, [x28, #2, MUL VL]\n"
+ "sdot z17.s, z10.b, z0.b[3]\n"
"ld1b { z5.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
"sdot z18.s, z4.b, z0.b[3]\n"
"sdot z19.s, z5.b, z0.b[3]\n"
- "addvl x28, x28, #4\n"
"10:" // Height 1: Multiply loop: multiply skip
"tbnz %x[flags], #31, 11f\n"
"sdot z11.s, z0.b, z15.b\n"
"11:" // Height 1: Multiply loop: unique 2: skip row sum
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x26, x26, #0x1\n"
- "cmp x26, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x25, x25, #0x1\n"
+ "cmp x25, x19\n"
"bne 4b\n"
"tbnz %x[flags], #31, 12f\n"
- "mov x20, #0x4\n"
- "whilelt p0.s, XZR, x20\n"
- "add x20, %x[qp], %[b_offset]\n"
- "ld1rw { z1.s }, p2/Z, [x20]\n"
+ "add x19, %x[qp], %[b_offset]\n"
+ "ld1rw { z1.s }, p2/Z, [x19]\n"
+ "neg z1.s, p2/M, z1.s\n"
+ "mov x19, #0x4\n"
+ "whilelt p0.s, XZR, x19\n"
"saddv d11, p0, z11.s\n"
"mov z11.s, z11.s[0]\n"
- "neg z1.s, p2/M, z1.s\n"
"mul z11.s, p2/M, z11.s, z1.s\n"
"12:" // Height 1: skip row sum fixup
"add z16.s, z16.s, z11.s\n"
+ "ld1w { z0.s }, p2/Z, [x27]\n"
+ "orr %x[flags], %x[flags], #0x80000000\n"
"add z17.s, z17.s, z11.s\n"
- "ld1w { z0.s }, p2/Z, [x10]\n"
- "ld1w { z1.s }, p2/Z, [x10, #1, MUL VL]\n"
+ "ld1w { z1.s }, p2/Z, [x27, #1, MUL VL]\n"
+ "add x23, %x[qp], %[per_layer_right_shift]\n"
"add z18.s, z18.s, z11.s\n"
+ "ld1w { z2.s }, p2/Z, [x27, #2, MUL VL]\n"
+ "add x19, %x[qp], %[per_layer_mul]\n"
"add z19.s, z19.s, z11.s\n"
- "ld1w { z2.s }, p2/Z, [x10, #2, MUL VL]\n"
- "ld1w { z3.s }, p2/Z, [x10, #3, MUL VL]\n"
- "add x20, %x[qp], %[per_layer_mul]\n"
- "orr %x[flags], %x[flags], #0x80000000\n"
+ "ld1w { z3.s }, p2/Z, [x27, #3, MUL VL]\n"
+ "addvl x27, x27, #4\n"
"add z16.s, z16.s, z0.s\n"
+ "ld1rw { z0.s }, p2/Z, [x23]\n"
"add z17.s, z17.s, z1.s\n"
+ "ld1rw { z4.s }, p2/Z, [x19]\n"
"add z18.s, z18.s, z2.s\n"
"add z19.s, z19.s, z3.s\n"
- "ld1rw { z4.s }, p2/Z, [x20]\n"
- "add x20, %x[qp], %[per_layer_right_shift]\n"
- "ld1rw { z0.s }, p2/Z, [x20]\n"
".inst 0x04a47610 // sqrdmulh z16.s, z16.s, z4.s\n"
".inst 0x04a47631 // sqrdmulh z17.s, z17.s, z4.s\n"
- "addvl x10, x10, #4\n"
".inst 0x04a47652 // sqrdmulh z18.s, z18.s, z4.s\n"
".inst 0x04a47673 // sqrdmulh z19.s, z19.s, z4.s\n"
"tbz %x[flags], #5, 13f\n"
@@ -254,26 +254,26 @@ void sve_hybrid_s8qa_dot_4x4VL (
"asr z4.s, z4.s, #0x1f\n"
"asr z5.s, z5.s, #0x1f\n"
"asr z6.s, z6.s, #0x1f\n"
- "asr z7.s, z7.s, #0x1f\n"
"sqadd z16.s, z16.s, z4.s\n"
"sqadd z17.s, z17.s, z5.s\n"
"sqadd z18.s, z18.s, z6.s\n"
+ "asr z7.s, z7.s, #0x1f\n"
"sqadd z19.s, z19.s, z7.s\n"
"13:" // Height 1: no shift correction
- "add x20, %x[qp], %[c_offset]\n"
- "ld1rw { z4.s }, p2/Z, [x20]\n"
".inst 0x44828810 // srshl z16.s, p2/M, z16.s, z0.s\n"
- "add z16.s, z16.s, z4.s\n"
+ "add x19, %x[qp], %[c_offset]\n"
+ "ld1rw { z4.s }, p2/Z, [x19]\n"
".inst 0x44828811 // srshl z17.s, p2/M, z17.s, z0.s\n"
+ "add x19, %x[qp], %[minval]\n"
".inst 0x44828812 // srshl z18.s, p2/M, z18.s, z0.s\n"
+ "ld1rw { z5.s }, p2/Z, [x19]\n"
+ "add x19, %x[qp], %[maxval]\n"
+ ".inst 0x44828813 // srshl z19.s, p2/M, z19.s, z0.s\n"
+ "ld1rw { z6.s }, p2/Z, [x19]\n"
+ "add z16.s, z16.s, z4.s\n"
"add z17.s, z17.s, z4.s\n"
"add z18.s, z18.s, z4.s\n"
- ".inst 0x44828813 // srshl z19.s, p2/M, z19.s, z0.s\n"
- "add x20, %x[qp], %[maxval]\n"
- "ld1rw { z6.s }, p2/Z, [x20]\n"
"add z19.s, z19.s, z4.s\n"
- "add x20, %x[qp], %[minval]\n"
- "ld1rw { z5.s }, p2/Z, [x20]\n"
"smin z16.s, p2/M, z16.s, z6.s\n"
"smin z17.s, p2/M, z17.s, z6.s\n"
"smin z18.s, p2/M, z18.s, z6.s\n"
@@ -281,31 +281,31 @@ void sve_hybrid_s8qa_dot_4x4VL (
"smax z16.s, p2/M, z16.s, z5.s\n"
"smax z17.s, p2/M, z17.s, z5.s\n"
"smax z18.s, p2/M, z18.s, z5.s\n"
- "uzp1 z16.h, z16.h, z17.h\n"
"smax z19.s, p2/M, z19.s, z5.s\n"
+ "uzp1 z16.h, z16.h, z17.h\n"
"uzp1 z17.h, z18.h, z19.h\n"
"uzp1 z16.b, z16.b, z17.b\n"
- "st1b { z16.b }, p1, [x27]\n"
- "addvl x27, x27, #1\n"
+ "st1b { z16.b }, p1, [x26]\n"
+ "addvl x26, x26, #1\n"
"14:" // Height 1: Writeback done
"decw x9, ALL, MUL #4\n"
"cmp x9, XZR\n"
"bgt 2b\n"
"b 58f\n"
"15:" // Height 2
- "mov x10, %x[col_bias]\n"
"mov z11.s, #0x0\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x27, %x[col_bias]\n"
"mov z12.s, #0x0\n"
+ "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"bic %x[flags], %x[flags], #0x80000000\n"
"mov z15.b, #0x1\n"
- "ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x27, %x[output_ptr]\n"
+ "mov x26, %x[output_ptr]\n"
"16:" // Height 2: Column loop
- "mov x20, #0x0\n"
- "whilelt p1.b, x20, x9\n"
"mov z16.s, #0x0\n"
+ "mov x19, #0x0\n"
"mov z17.s, #0x0\n"
+ "whilelt p1.b, x19, x9\n"
"mov z18.s, #0x0\n"
"mov z19.s, #0x0\n"
"mov z20.s, #0x0\n"
@@ -313,68 +313,68 @@ void sve_hybrid_s8qa_dot_4x4VL (
"mov z22.s, #0x0\n"
"mov z23.s, #0x0\n"
"17:" // Height 2: setup done
- "mov x26, #0x0\n"
+ "mov x25, #0x0\n"
"18:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w24, [x20, x25, LSL #0x2]\n"
"tbz %x[flags], #3, 19f\n"
- "ldr x21, [%x[input_ptr], x26, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x24, [x21, #0x0]\n"
- "ldr x23, [x21, #0x8]\n"
- "cbnz x26, 20f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x24, x24, x20\n"
- "add x23, x23, x20\n"
+ "ldr x20, [%x[input_ptr], x25, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x23, [x20, #0x0]\n"
+ "ldr x22, [x20, #0x8]\n"
+ "cbnz x25, 20f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x23, x23, x19\n"
+ "add x22, x22, x19\n"
"b 20f\n"
"19:" // Height 2: setup direct input
- "mov x24, %x[input_ptr]\n"
- "add x23, x24, x20\n"
+ "mov x23, %x[input_ptr]\n"
+ "add x22, x23, x19\n"
"20:" // Height 2: input setup done
- "cmp x25, #0x10\n"
+ "cmp x24, #0x10\n"
"ble 23f\n"
"21:" // Height 2: Multiply loop: Main loop head
- "whilelt p0.b, XZR, x25\n"
- "ld1rqb { z0.b }, p0/Z, [x24]\n"
- "ld1rqb { z1.b }, p0/Z, [x23]\n"
- "add x24, x24, #0x10\n"
"ld1b { z4.b }, p2/Z, [x28]\n"
+ "whilelt p0.b, XZR, x24\n"
"ld1b { z5.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "ld1rqb { z0.b }, p0/Z, [x23]\n"
"sdot z16.s, z4.b, z0.b[0]\n"
- "sdot z20.s, z4.b, z1.b[0]\n"
+ "ld1rqb { z1.b }, p0/Z, [x22]\n"
+ "add x23, x23, #0x10\n"
+ "sdot z17.s, z5.b, z0.b[0]\n"
"ld1b { z6.b }, p2/Z, [x28, #2, MUL VL]\n"
+ "add x22, x22, #0x10\n"
+ "sdot z20.s, z4.b, z1.b[0]\n"
"ld1b { z7.b }, p2/Z, [x28, #3, MUL VL]\n"
- "sdot z17.s, z5.b, z0.b[0]\n"
"sdot z21.s, z5.b, z1.b[0]\n"
- "sdot z18.s, z6.b, z0.b[0]\n"
- "sdot z22.s, z6.b, z1.b[0]\n"
"ld1b { z8.b }, p2/Z, [x28, #4, MUL VL]\n"
+ "sdot z18.s, z6.b, z0.b[0]\n"
"ld1b { z9.b }, p2/Z, [x28, #5, MUL VL]\n"
- "sdot z19.s, z7.b, z0.b[0]\n"
- "sdot z23.s, z7.b, z1.b[0]\n"
+ "sdot z22.s, z6.b, z1.b[0]\n"
"ld1b { z10.b }, p2/Z, [x28, #6, MUL VL]\n"
+ "sdot z19.s, z7.b, z0.b[0]\n"
"ld1b { z4.b }, p2/Z, [x28, #7, MUL VL]\n"
"addvl x28, x28, #16\n"
+ "sdot z23.s, z7.b, z1.b[0]\n"
+ "ld1b { z5.b }, p2/Z, [x28, #-8, MUL VL]\n"
"sdot z16.s, z8.b, z0.b[1]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #-7, MUL VL]\n"
"sdot z20.s, z8.b, z1.b[1]\n"
- "ld1b { z5.b }, p2/Z, [x28, #-8, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #-6, MUL VL]\n"
"sdot z17.s, z9.b, z0.b[1]\n"
+ "ld1b { z8.b }, p2/Z, [x28, #-5, MUL VL]\n"
"sdot z21.s, z9.b, z1.b[1]\n"
- "ld1b { z6.b }, p2/Z, [x28, #-7, MUL VL]\n"
- "ld1b { z7.b }, p2/Z, [x28, #-6, MUL VL]\n"
+ "ld1b { z9.b }, p2/Z, [x28, #-4, MUL VL]\n"
"sdot z18.s, z10.b, z0.b[1]\n"
"sdot z22.s, z10.b, z1.b[1]\n"
- "ld1b { z8.b }, p2/Z, [x28, #-5, MUL VL]\n"
- "ld1b { z9.b }, p2/Z, [x28, #-4, MUL VL]\n"
+ "ld1b { z10.b }, p2/Z, [x28, #-3, MUL VL]\n"
"sdot z19.s, z4.b, z0.b[1]\n"
"sdot z23.s, z4.b, z1.b[1]\n"
- "ld1b { z10.b }, p2/Z, [x28, #-3, MUL VL]\n"
"ld1b { z4.b }, p2/Z, [x28, #-2, MUL VL]\n"
"sdot z16.s, z5.b, z0.b[2]\n"
"sdot z20.s, z5.b, z1.b[2]\n"
"ld1b { z5.b }, p2/Z, [x28, #-1, MUL VL]\n"
- "add x23, x23, #0x10\n"
"sdot z17.s, z6.b, z0.b[2]\n"
"sdot z21.s, z6.b, z1.b[2]\n"
"sdot z18.s, z7.b, z0.b[2]\n"
@@ -393,69 +393,69 @@ void sve_hybrid_s8qa_dot_4x4VL (
"sdot z11.s, z0.b, z15.b\n"
"sdot z12.s, z1.b, z15.b\n"
"22:" // Height 2: Multiply loop: unique 3: skip row sum
- "sub x25, x25, #0x10\n"
- "cmp x25, #0x10\n"
+ "sub x24, x24, #0x10\n"
+ "cmp x24, #0x10\n"
"bgt 21b\n"
"23:" // Height 2: Multiply loop: Single iteration only
- "whilelt p0.b, XZR, x25\n"
- "ld1rqb { z0.b }, p0/Z, [x24]\n"
- "ld1rqb { z1.b }, p0/Z, [x23]\n"
- "subs x25, x25, #0x4\n"
"ld1b { z4.b }, p2/Z, [x28]\n"
+ "whilelt p0.b, XZR, x24\n"
"ld1b { z5.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "subs x24, x24, #0x4\n"
+ "ld1rqb { z0.b }, p0/Z, [x23]\n"
"sdot z16.s, z4.b, z0.b[0]\n"
- "sdot z20.s, z4.b, z1.b[0]\n"
+ "ld1rqb { z1.b }, p0/Z, [x22]\n"
+ "sdot z17.s, z5.b, z0.b[0]\n"
"ld1b { z6.b }, p2/Z, [x28, #2, MUL VL]\n"
"ld1b { z7.b }, p2/Z, [x28, #3, MUL VL]\n"
- "sdot z17.s, z5.b, z0.b[0]\n"
+ "sdot z20.s, z4.b, z1.b[0]\n"
+ "addvl x28, x28, #4\n"
"sdot z21.s, z5.b, z1.b[0]\n"
"sdot z18.s, z6.b, z0.b[0]\n"
"sdot z22.s, z6.b, z1.b[0]\n"
- "addvl x28, x28, #4\n"
"sdot z19.s, z7.b, z0.b[0]\n"
"sdot z23.s, z7.b, z1.b[0]\n"
"ble 24f\n"
"ld1b { z8.b }, p2/Z, [x28]\n"
- "ld1b { z9.b }, p2/Z, [x28, #1, MUL VL]\n"
- "subs x25, x25, #0x4\n"
"sdot z16.s, z8.b, z0.b[1]\n"
+ "ld1b { z9.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "subs x24, x24, #0x4\n"
+ "sdot z20.s, z8.b, z1.b[1]\n"
"ld1b { z10.b }, p2/Z, [x28, #2, MUL VL]\n"
"ld1b { z4.b }, p2/Z, [x28, #3, MUL VL]\n"
- "sdot z20.s, z8.b, z1.b[1]\n"
"sdot z17.s, z9.b, z0.b[1]\n"
+ "addvl x28, x28, #4\n"
"sdot z21.s, z9.b, z1.b[1]\n"
"sdot z18.s, z10.b, z0.b[1]\n"
- "addvl x28, x28, #4\n"
"sdot z22.s, z10.b, z1.b[1]\n"
"sdot z19.s, z4.b, z0.b[1]\n"
"sdot z23.s, z4.b, z1.b[1]\n"
"ble 24f\n"
"ld1b { z5.b }, p2/Z, [x28]\n"
- "ld1b { z6.b }, p2/Z, [x28, #1, MUL VL]\n"
- "subs x25, x25, #0x4\n"
"sdot z16.s, z5.b, z0.b[2]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "subs x24, x24, #0x4\n"
+ "sdot z20.s, z5.b, z1.b[2]\n"
"ld1b { z7.b }, p2/Z, [x28, #2, MUL VL]\n"
"ld1b { z8.b }, p2/Z, [x28, #3, MUL VL]\n"
- "sdot z20.s, z5.b, z1.b[2]\n"
"sdot z17.s, z6.b, z0.b[2]\n"
+ "addvl x28, x28, #4\n"
"sdot z21.s, z6.b, z1.b[2]\n"
"sdot z18.s, z7.b, z0.b[2]\n"
- "addvl x28, x28, #4\n"
"sdot z22.s, z7.b, z1.b[2]\n"
"sdot z19.s, z8.b, z0.b[2]\n"
"sdot z23.s, z8.b, z1.b[2]\n"
"ble 24f\n"
"ld1b { z9.b }, p2/Z, [x28]\n"
- "ld1b { z10.b }, p2/Z, [x28, #1, MUL VL]\n"
"sdot z16.s, z9.b, z0.b[3]\n"
+ "ld1b { z10.b }, p2/Z, [x28, #1, MUL VL]\n"
"sdot z20.s, z9.b, z1.b[3]\n"
"ld1b { z4.b }, p2/Z, [x28, #2, MUL VL]\n"
"ld1b { z5.b }, p2/Z, [x28, #3, MUL VL]\n"
"sdot z17.s, z10.b, z0.b[3]\n"
+ "addvl x28, x28, #4\n"
"sdot z21.s, z10.b, z1.b[3]\n"
"sdot z18.s, z4.b, z0.b[3]\n"
"sdot z22.s, z4.b, z1.b[3]\n"
- "addvl x28, x28, #4\n"
"sdot z19.s, z5.b, z0.b[3]\n"
"sdot z23.s, z5.b, z1.b[3]\n"
"24:" // Height 2: Multiply loop: multiply skip
@@ -463,49 +463,49 @@ void sve_hybrid_s8qa_dot_4x4VL (
"sdot z11.s, z0.b, z15.b\n"
"sdot z12.s, z1.b, z15.b\n"
"25:" // Height 2: Multiply loop: unique 4: skip row sum
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x26, x26, #0x1\n"
- "cmp x26, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x25, x25, #0x1\n"
+ "cmp x25, x19\n"
"bne 18b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x27, x20\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x22, x26, x19\n"
"tbnz %x[flags], #31, 26f\n"
- "mov x20, #0x4\n"
- "whilelt p0.s, XZR, x20\n"
- "add x20, %x[qp], %[b_offset]\n"
- "ld1rw { z2.s }, p2/Z, [x20]\n"
+ "add x19, %x[qp], %[b_offset]\n"
+ "ld1rw { z2.s }, p2/Z, [x19]\n"
+ "neg z2.s, p2/M, z2.s\n"
+ "mov x19, #0x4\n"
+ "whilelt p0.s, XZR, x19\n"
"saddv d11, p0, z11.s\n"
- "mov z11.s, z11.s[0]\n"
"saddv d12, p0, z12.s\n"
- "neg z2.s, p2/M, z2.s\n"
+ "mov z11.s, z11.s[0]\n"
"mov z12.s, z12.s[0]\n"
"mul z11.s, p2/M, z11.s, z2.s\n"
"mul z12.s, p2/M, z12.s, z2.s\n"
"26:" // Height 2: skip row sum fixup
"add z16.s, z16.s, z11.s\n"
+ "ld1w { z0.s }, p2/Z, [x27]\n"
+ "orr %x[flags], %x[flags], #0x80000000\n"
"add z17.s, z17.s, z11.s\n"
- "ld1w { z0.s }, p2/Z, [x10]\n"
- "ld1w { z1.s }, p2/Z, [x10, #1, MUL VL]\n"
+ "ld1w { z1.s }, p2/Z, [x27, #1, MUL VL]\n"
+ "add x23, %x[qp], %[per_layer_right_shift]\n"
"add z18.s, z18.s, z11.s\n"
+ "ld1w { z2.s }, p2/Z, [x27, #2, MUL VL]\n"
+ "add x19, %x[qp], %[per_layer_mul]\n"
"add z19.s, z19.s, z11.s\n"
- "ld1w { z2.s }, p2/Z, [x10, #2, MUL VL]\n"
- "ld1w { z3.s }, p2/Z, [x10, #3, MUL VL]\n"
+ "ld1w { z3.s }, p2/Z, [x27, #3, MUL VL]\n"
+ "addvl x27, x27, #4\n"
"add z20.s, z20.s, z12.s\n"
+ "ld1rw { z4.s }, p2/Z, [x19]\n"
"add z21.s, z21.s, z12.s\n"
- "add x20, %x[qp], %[per_layer_mul]\n"
- "orr %x[flags], %x[flags], #0x80000000\n"
"add z22.s, z22.s, z12.s\n"
"add z23.s, z23.s, z12.s\n"
- "ld1rw { z4.s }, p2/Z, [x20]\n"
- "add x20, %x[qp], %[per_layer_right_shift]\n"
"add z16.s, z16.s, z0.s\n"
"add z17.s, z17.s, z1.s\n"
- "addvl x10, x10, #4\n"
"add z18.s, z18.s, z2.s\n"
"add z19.s, z19.s, z3.s\n"
"add z20.s, z20.s, z0.s\n"
+ "ld1rw { z0.s }, p2/Z, [x23]\n"
"add z21.s, z21.s, z1.s\n"
- "ld1rw { z0.s }, p2/Z, [x20]\n"
"add z22.s, z22.s, z2.s\n"
"add z23.s, z23.s, z3.s\n"
".inst 0x04a47610 // sqrdmulh z16.s, z16.s, z4.s\n"
@@ -518,97 +518,97 @@ void sve_hybrid_s8qa_dot_4x4VL (
".inst 0x04a476f7 // sqrdmulh z23.s, z23.s, z4.s\n"
"tbz %x[flags], #5, 27f\n"
"and z4.d, z16.d, z0.d\n"
- "asr z4.s, z4.s, #0x1f\n"
- "sqadd z16.s, z16.s, z4.s\n"
"and z5.d, z17.d, z0.d\n"
"and z6.d, z18.d, z0.d\n"
+ "asr z4.s, z4.s, #0x1f\n"
+ "asr z5.s, z5.s, #0x1f\n"
+ "asr z6.s, z6.s, #0x1f\n"
+ "sqadd z16.s, z16.s, z4.s\n"
+ "sqadd z17.s, z17.s, z5.s\n"
+ "sqadd z18.s, z18.s, z6.s\n"
"and z7.d, z19.d, z0.d\n"
"and z8.d, z20.d, z0.d\n"
"and z9.d, z21.d, z0.d\n"
- "and z10.d, z22.d, z0.d\n"
- "and z4.d, z23.d, z0.d\n"
- "asr z5.s, z5.s, #0x1f\n"
- "asr z6.s, z6.s, #0x1f\n"
"asr z7.s, z7.s, #0x1f\n"
"asr z8.s, z8.s, #0x1f\n"
"asr z9.s, z9.s, #0x1f\n"
- "asr z10.s, z10.s, #0x1f\n"
- "asr z4.s, z4.s, #0x1f\n"
- "sqadd z17.s, z17.s, z5.s\n"
- "sqadd z18.s, z18.s, z6.s\n"
"sqadd z19.s, z19.s, z7.s\n"
"sqadd z20.s, z20.s, z8.s\n"
"sqadd z21.s, z21.s, z9.s\n"
+ "and z10.d, z22.d, z0.d\n"
+ "and z4.d, z23.d, z0.d\n"
+ "asr z10.s, z10.s, #0x1f\n"
+ "asr z4.s, z4.s, #0x1f\n"
"sqadd z22.s, z22.s, z10.s\n"
"sqadd z23.s, z23.s, z4.s\n"
"27:" // Height 2: no shift correction
- "add x20, %x[qp], %[c_offset]\n"
- "ld1rw { z4.s }, p2/Z, [x20]\n"
".inst 0x44828810 // srshl z16.s, p2/M, z16.s, z0.s\n"
- "add z16.s, z16.s, z4.s\n"
+ "add x19, %x[qp], %[c_offset]\n"
+ "ld1rw { z4.s }, p2/Z, [x19]\n"
".inst 0x44828811 // srshl z17.s, p2/M, z17.s, z0.s\n"
+ "add x19, %x[qp], %[minval]\n"
".inst 0x44828812 // srshl z18.s, p2/M, z18.s, z0.s\n"
- "add z17.s, z17.s, z4.s\n"
- "add z18.s, z18.s, z4.s\n"
+ "ld1rw { z5.s }, p2/Z, [x19]\n"
+ "add x19, %x[qp], %[maxval]\n"
".inst 0x44828813 // srshl z19.s, p2/M, z19.s, z0.s\n"
+ "ld1rw { z6.s }, p2/Z, [x19]\n"
".inst 0x44828814 // srshl z20.s, p2/M, z20.s, z0.s\n"
+ "add z16.s, z16.s, z4.s\n"
+ "add z17.s, z17.s, z4.s\n"
+ "add z18.s, z18.s, z4.s\n"
"add z19.s, z19.s, z4.s\n"
"add z20.s, z20.s, z4.s\n"
- ".inst 0x44828815 // srshl z21.s, p2/M, z21.s, z0.s\n"
- ".inst 0x44828816 // srshl z22.s, p2/M, z22.s, z0.s\n"
- "add z21.s, z21.s, z4.s\n"
- "add z22.s, z22.s, z4.s\n"
- ".inst 0x44828817 // srshl z23.s, p2/M, z23.s, z0.s\n"
- "add x20, %x[qp], %[maxval]\n"
- "ld1rw { z6.s }, p2/Z, [x20]\n"
- "add z23.s, z23.s, z4.s\n"
- "add x20, %x[qp], %[minval]\n"
- "ld1rw { z5.s }, p2/Z, [x20]\n"
"smin z16.s, p2/M, z16.s, z6.s\n"
"smin z17.s, p2/M, z17.s, z6.s\n"
"smin z18.s, p2/M, z18.s, z6.s\n"
"smin z19.s, p2/M, z19.s, z6.s\n"
- "smin z20.s, p2/M, z20.s, z6.s\n"
- "smin z21.s, p2/M, z21.s, z6.s\n"
- "smin z22.s, p2/M, z22.s, z6.s\n"
- "smin z23.s, p2/M, z23.s, z6.s\n"
"smax z16.s, p2/M, z16.s, z5.s\n"
"smax z17.s, p2/M, z17.s, z5.s\n"
"smax z18.s, p2/M, z18.s, z5.s\n"
- "uzp1 z16.h, z16.h, z17.h\n"
"smax z19.s, p2/M, z19.s, z5.s\n"
- "smax z20.s, p2/M, z20.s, z5.s\n"
+ "smin z20.s, p2/M, z20.s, z6.s\n"
+ "uzp1 z16.h, z16.h, z17.h\n"
+ ".inst 0x44828815 // srshl z21.s, p2/M, z21.s, z0.s\n"
"uzp1 z17.h, z18.h, z19.h\n"
+ "smax z20.s, p2/M, z20.s, z5.s\n"
"uzp1 z16.b, z16.b, z17.b\n"
+ "st1b { z16.b }, p1, [x26]\n"
+ "add z21.s, z21.s, z4.s\n"
+ "addvl x26, x26, #1\n"
+ ".inst 0x44828816 // srshl z22.s, p2/M, z22.s, z0.s\n"
+ ".inst 0x44828817 // srshl z23.s, p2/M, z23.s, z0.s\n"
+ "smin z21.s, p2/M, z21.s, z6.s\n"
+ "add z22.s, z22.s, z4.s\n"
+ "add z23.s, z23.s, z4.s\n"
"smax z21.s, p2/M, z21.s, z5.s\n"
- "smax z22.s, p2/M, z22.s, z5.s\n"
+ "smin z22.s, p2/M, z22.s, z6.s\n"
+ "smin z23.s, p2/M, z23.s, z6.s\n"
"uzp1 z20.h, z20.h, z21.h\n"
- "st1b { z16.b }, p1, [x27]\n"
+ "smax z22.s, p2/M, z22.s, z5.s\n"
"smax z23.s, p2/M, z23.s, z5.s\n"
"uzp1 z21.h, z22.h, z23.h\n"
"uzp1 z20.b, z20.b, z21.b\n"
- "st1b { z20.b }, p1, [x23]\n"
- "addvl x27, x27, #1\n"
+ "st1b { z20.b }, p1, [x22]\n"
"28:" // Height 2: Writeback done
"decw x9, ALL, MUL #4\n"
"cmp x9, XZR\n"
"bgt 16b\n"
"b 58f\n"
"29:" // Height 3
- "mov x10, %x[col_bias]\n"
"mov z11.s, #0x0\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x27, %x[col_bias]\n"
"mov z12.s, #0x0\n"
+ "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"bic %x[flags], %x[flags], #0x80000000\n"
"mov z13.s, #0x0\n"
+ "mov x26, %x[output_ptr]\n"
"mov z15.b, #0x1\n"
- "ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x27, %x[output_ptr]\n"
"30:" // Height 3: Column loop
- "mov x20, #0x0\n"
- "whilelt p1.b, x20, x9\n"
"mov z16.s, #0x0\n"
+ "mov x19, #0x0\n"
"mov z17.s, #0x0\n"
+ "whilelt p1.b, x19, x9\n"
"mov z18.s, #0x0\n"
"mov z19.s, #0x0\n"
"mov z20.s, #0x0\n"
@@ -620,83 +620,83 @@ void sve_hybrid_s8qa_dot_4x4VL (
"mov z26.s, #0x0\n"
"mov z27.s, #0x0\n"
"31:" // Height 3: setup done
- "mov x26, #0x0\n"
+ "mov x25, #0x0\n"
"32:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w24, [x20, x25, LSL #0x2]\n"
"tbz %x[flags], #3, 33f\n"
- "ldr x21, [%x[input_ptr], x26, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x24, [x21, #0x0]\n"
- "ldr x23, [x21, #0x8]\n"
- "ldr x22, [x21, #0x10]\n"
- "cbnz x26, 34f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x24, x24, x20\n"
- "add x23, x23, x20\n"
- "add x22, x22, x20\n"
+ "ldr x20, [%x[input_ptr], x25, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x23, [x20, #0x0]\n"
+ "ldr x22, [x20, #0x8]\n"
+ "ldr x21, [x20, #0x10]\n"
+ "cbnz x25, 34f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x23, x23, x19\n"
+ "add x22, x22, x19\n"
+ "add x21, x21, x19\n"
"b 34f\n"
"33:" // Height 3: setup direct input
- "mov x24, %x[input_ptr]\n"
- "add x23, x24, x20\n"
- "add x22, x23, x20\n"
+ "mov x23, %x[input_ptr]\n"
+ "add x22, x23, x19\n"
+ "add x21, x22, x19\n"
"34:" // Height 3: input setup done
- "cmp x25, #0x10\n"
+ "cmp x24, #0x10\n"
"ble 37f\n"
"35:" // Height 3: Multiply loop: Main loop head
- "whilelt p0.b, XZR, x25\n"
- "ld1rqb { z0.b }, p0/Z, [x24]\n"
- "ld1rqb { z1.b }, p0/Z, [x23]\n"
- "add x24, x24, #0x10\n"
- "ld1rqb { z2.b }, p0/Z, [x22]\n"
"ld1b { z4.b }, p2/Z, [x28]\n"
+ "whilelt p0.b, XZR, x24\n"
+ "ld1b { z5.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "ld1rqb { z0.b }, p0/Z, [x23]\n"
"sdot z16.s, z4.b, z0.b[0]\n"
+ "ld1rqb { z1.b }, p0/Z, [x22]\n"
+ "add x23, x23, #0x10\n"
+ "sdot z17.s, z5.b, z0.b[0]\n"
+ "ld1rqb { z2.b }, p0/Z, [x21]\n"
+ "add x22, x22, #0x10\n"
"sdot z20.s, z4.b, z1.b[0]\n"
- "ld1b { z5.b }, p2/Z, [x28, #1, MUL VL]\n"
"ld1b { z6.b }, p2/Z, [x28, #2, MUL VL]\n"
+ "add x21, x21, #0x10\n"
"sdot z24.s, z4.b, z2.b[0]\n"
- "sdot z17.s, z5.b, z0.b[0]\n"
"ld1b { z7.b }, p2/Z, [x28, #3, MUL VL]\n"
"sdot z21.s, z5.b, z1.b[0]\n"
- "sdot z25.s, z5.b, z2.b[0]\n"
"ld1b { z8.b }, p2/Z, [x28, #4, MUL VL]\n"
- "sdot z18.s, z6.b, z0.b[0]\n"
- "sdot z22.s, z6.b, z1.b[0]\n"
+ "sdot z25.s, z5.b, z2.b[0]\n"
"ld1b { z9.b }, p2/Z, [x28, #5, MUL VL]\n"
+ "sdot z18.s, z6.b, z0.b[0]\n"
"ld1b { z10.b }, p2/Z, [x28, #6, MUL VL]\n"
- "sdot z26.s, z6.b, z2.b[0]\n"
- "sdot z19.s, z7.b, z0.b[0]\n"
+ "sdot z22.s, z6.b, z1.b[0]\n"
"ld1b { z4.b }, p2/Z, [x28, #7, MUL VL]\n"
"addvl x28, x28, #16\n"
- "sdot z23.s, z7.b, z1.b[0]\n"
- "sdot z27.s, z7.b, z2.b[0]\n"
+ "sdot z26.s, z6.b, z2.b[0]\n"
"ld1b { z5.b }, p2/Z, [x28, #-8, MUL VL]\n"
+ "sdot z19.s, z7.b, z0.b[0]\n"
"ld1b { z6.b }, p2/Z, [x28, #-7, MUL VL]\n"
+ "sdot z23.s, z7.b, z1.b[0]\n"
+ "sdot z27.s, z7.b, z2.b[0]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #-6, MUL VL]\n"
"sdot z16.s, z8.b, z0.b[1]\n"
"sdot z20.s, z8.b, z1.b[1]\n"
- "ld1b { z7.b }, p2/Z, [x28, #-6, MUL VL]\n"
- "add x23, x23, #0x10\n"
"sdot z24.s, z8.b, z2.b[1]\n"
- "sdot z17.s, z9.b, z0.b[1]\n"
"ld1b { z8.b }, p2/Z, [x28, #-5, MUL VL]\n"
- "add x22, x22, #0x10\n"
+ "sdot z17.s, z9.b, z0.b[1]\n"
"sdot z21.s, z9.b, z1.b[1]\n"
"sdot z25.s, z9.b, z2.b[1]\n"
"ld1b { z9.b }, p2/Z, [x28, #-4, MUL VL]\n"
"sdot z18.s, z10.b, z0.b[1]\n"
"sdot z22.s, z10.b, z1.b[1]\n"
"sdot z26.s, z10.b, z2.b[1]\n"
- "sdot z19.s, z4.b, z0.b[1]\n"
"ld1b { z10.b }, p2/Z, [x28, #-3, MUL VL]\n"
+ "sdot z19.s, z4.b, z0.b[1]\n"
"sdot z23.s, z4.b, z1.b[1]\n"
"sdot z27.s, z4.b, z2.b[1]\n"
"ld1b { z4.b }, p2/Z, [x28, #-2, MUL VL]\n"
"sdot z16.s, z5.b, z0.b[2]\n"
"sdot z20.s, z5.b, z1.b[2]\n"
"sdot z24.s, z5.b, z2.b[2]\n"
- "sdot z17.s, z6.b, z0.b[2]\n"
"ld1b { z5.b }, p2/Z, [x28, #-1, MUL VL]\n"
+ "sdot z17.s, z6.b, z0.b[2]\n"
"sdot z21.s, z6.b, z1.b[2]\n"
"sdot z25.s, z6.b, z2.b[2]\n"
"sdot z18.s, z7.b, z0.b[2]\n"
@@ -722,26 +722,26 @@ void sve_hybrid_s8qa_dot_4x4VL (
"sdot z12.s, z1.b, z15.b\n"
"sdot z13.s, z2.b, z15.b\n"
"36:" // Height 3: Multiply loop: unique 5: skip row sum
- "sub x25, x25, #0x10\n"
- "cmp x25, #0x10\n"
+ "sub x24, x24, #0x10\n"
+ "cmp x24, #0x10\n"
"bgt 35b\n"
"37:" // Height 3: Multiply loop: Single iteration only
- "whilelt p0.b, XZR, x25\n"
- "ld1rqb { z0.b }, p0/Z, [x24]\n"
- "ld1rqb { z1.b }, p0/Z, [x23]\n"
- "subs x25, x25, #0x4\n"
- "ld1rqb { z2.b }, p0/Z, [x22]\n"
"ld1b { z4.b }, p2/Z, [x28]\n"
- "sdot z16.s, z4.b, z0.b[0]\n"
- "sdot z20.s, z4.b, z1.b[0]\n"
+ "whilelt p0.b, XZR, x24\n"
"ld1b { z5.b }, p2/Z, [x28, #1, MUL VL]\n"
- "ld1b { z6.b }, p2/Z, [x28, #2, MUL VL]\n"
- "sdot z24.s, z4.b, z2.b[0]\n"
+ "subs x24, x24, #0x4\n"
+ "ld1rqb { z0.b }, p0/Z, [x23]\n"
+ "sdot z16.s, z4.b, z0.b[0]\n"
+ "ld1rqb { z1.b }, p0/Z, [x22]\n"
"sdot z17.s, z5.b, z0.b[0]\n"
+ "ld1rqb { z2.b }, p0/Z, [x21]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #2, MUL VL]\n"
+ "sdot z20.s, z4.b, z1.b[0]\n"
"ld1b { z7.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "sdot z24.s, z4.b, z2.b[0]\n"
"sdot z21.s, z5.b, z1.b[0]\n"
"sdot z25.s, z5.b, z2.b[0]\n"
- "addvl x28, x28, #4\n"
"sdot z18.s, z6.b, z0.b[0]\n"
"sdot z22.s, z6.b, z1.b[0]\n"
"sdot z26.s, z6.b, z2.b[0]\n"
@@ -750,16 +750,16 @@ void sve_hybrid_s8qa_dot_4x4VL (
"sdot z27.s, z7.b, z2.b[0]\n"
"ble 38f\n"
"ld1b { z8.b }, p2/Z, [x28]\n"
- "ld1b { z9.b }, p2/Z, [x28, #1, MUL VL]\n"
- "subs x25, x25, #0x4\n"
"sdot z16.s, z8.b, z0.b[1]\n"
- "ld1b { z10.b }, p2/Z, [x28, #2, MUL VL]\n"
- "ld1b { z4.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "ld1b { z9.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "subs x24, x24, #0x4\n"
"sdot z20.s, z8.b, z1.b[1]\n"
+ "ld1b { z10.b }, p2/Z, [x28, #2, MUL VL]\n"
"sdot z24.s, z8.b, z2.b[1]\n"
+ "ld1b { z4.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
"sdot z17.s, z9.b, z0.b[1]\n"
"sdot z21.s, z9.b, z1.b[1]\n"
- "addvl x28, x28, #4\n"
"sdot z25.s, z9.b, z2.b[1]\n"
"sdot z18.s, z10.b, z0.b[1]\n"
"sdot z22.s, z10.b, z1.b[1]\n"
@@ -769,16 +769,16 @@ void sve_hybrid_s8qa_dot_4x4VL (
"sdot z27.s, z4.b, z2.b[1]\n"
"ble 38f\n"
"ld1b { z5.b }, p2/Z, [x28]\n"
- "ld1b { z6.b }, p2/Z, [x28, #1, MUL VL]\n"
- "subs x25, x25, #0x4\n"
"sdot z16.s, z5.b, z0.b[2]\n"
- "ld1b { z7.b }, p2/Z, [x28, #2, MUL VL]\n"
- "ld1b { z8.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "subs x24, x24, #0x4\n"
"sdot z20.s, z5.b, z1.b[2]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #2, MUL VL]\n"
"sdot z24.s, z5.b, z2.b[2]\n"
+ "ld1b { z8.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
"sdot z17.s, z6.b, z0.b[2]\n"
"sdot z21.s, z6.b, z1.b[2]\n"
- "addvl x28, x28, #4\n"
"sdot z25.s, z6.b, z2.b[2]\n"
"sdot z18.s, z7.b, z0.b[2]\n"
"sdot z22.s, z7.b, z1.b[2]\n"
@@ -788,16 +788,16 @@ void sve_hybrid_s8qa_dot_4x4VL (
"sdot z27.s, z8.b, z2.b[2]\n"
"ble 38f\n"
"ld1b { z9.b }, p2/Z, [x28]\n"
- "ld1b { z10.b }, p2/Z, [x28, #1, MUL VL]\n"
"sdot z16.s, z9.b, z0.b[3]\n"
+ "ld1b { z10.b }, p2/Z, [x28, #1, MUL VL]\n"
"sdot z20.s, z9.b, z1.b[3]\n"
"ld1b { z4.b }, p2/Z, [x28, #2, MUL VL]\n"
- "ld1b { z5.b }, p2/Z, [x28, #3, MUL VL]\n"
"sdot z24.s, z9.b, z2.b[3]\n"
+ "ld1b { z5.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
"sdot z17.s, z10.b, z0.b[3]\n"
"sdot z21.s, z10.b, z1.b[3]\n"
"sdot z25.s, z10.b, z2.b[3]\n"
- "addvl x28, x28, #4\n"
"sdot z18.s, z4.b, z0.b[3]\n"
"sdot z22.s, z4.b, z1.b[3]\n"
"sdot z26.s, z4.b, z2.b[3]\n"
@@ -810,48 +810,48 @@ void sve_hybrid_s8qa_dot_4x4VL (
"sdot z12.s, z1.b, z15.b\n"
"sdot z13.s, z2.b, z15.b\n"
"39:" // Height 3: Multiply loop: unique 6: skip row sum
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x26, x26, #0x1\n"
- "cmp x26, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x25, x25, #0x1\n"
+ "cmp x25, x19\n"
"bne 32b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x27, x20\n"
- "add x22, x23, x20\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x22, x26, x19\n"
+ "add x21, x22, x19\n"
"tbnz %x[flags], #31, 40f\n"
- "mov x20, #0x4\n"
- "whilelt p0.s, XZR, x20\n"
- "add x20, %x[qp], %[b_offset]\n"
- "ld1rw { z3.s }, p2/Z, [x20]\n"
+ "add x19, %x[qp], %[b_offset]\n"
+ "ld1rw { z3.s }, p2/Z, [x19]\n"
+ "neg z3.s, p2/M, z3.s\n"
+ "mov x19, #0x4\n"
+ "whilelt p0.s, XZR, x19\n"
"saddv d11, p0, z11.s\n"
- "mov z11.s, z11.s[0]\n"
"saddv d12, p0, z12.s\n"
"saddv d13, p0, z13.s\n"
+ "mov z11.s, z11.s[0]\n"
"mov z12.s, z12.s[0]\n"
"mov z13.s, z13.s[0]\n"
- "neg z3.s, p2/M, z3.s\n"
"mul z11.s, p2/M, z11.s, z3.s\n"
"mul z12.s, p2/M, z12.s, z3.s\n"
"mul z13.s, p2/M, z13.s, z3.s\n"
"40:" // Height 3: skip row sum fixup
"add z16.s, z16.s, z11.s\n"
+ "ld1w { z0.s }, p2/Z, [x27]\n"
+ "orr %x[flags], %x[flags], #0x80000000\n"
"add z17.s, z17.s, z11.s\n"
- "ld1w { z0.s }, p2/Z, [x10]\n"
- "ld1w { z1.s }, p2/Z, [x10, #1, MUL VL]\n"
+ "ld1w { z1.s }, p2/Z, [x27, #1, MUL VL]\n"
+ "add x23, %x[qp], %[per_layer_right_shift]\n"
"add z18.s, z18.s, z11.s\n"
+ "ld1w { z2.s }, p2/Z, [x27, #2, MUL VL]\n"
+ "add x19, %x[qp], %[per_layer_mul]\n"
"add z19.s, z19.s, z11.s\n"
- "ld1w { z2.s }, p2/Z, [x10, #2, MUL VL]\n"
- "ld1w { z3.s }, p2/Z, [x10, #3, MUL VL]\n"
+ "ld1w { z3.s }, p2/Z, [x27, #3, MUL VL]\n"
+ "addvl x27, x27, #4\n"
"add z20.s, z20.s, z12.s\n"
+ "ld1rw { z4.s }, p2/Z, [x19]\n"
"add z21.s, z21.s, z12.s\n"
- "add x20, %x[qp], %[per_layer_mul]\n"
- "orr %x[flags], %x[flags], #0x80000000\n"
"add z22.s, z22.s, z12.s\n"
"add z23.s, z23.s, z12.s\n"
- "ld1rw { z4.s }, p2/Z, [x20]\n"
- "add x20, %x[qp], %[per_layer_right_shift]\n"
"add z24.s, z24.s, z13.s\n"
"add z25.s, z25.s, z13.s\n"
- "addvl x10, x10, #4\n"
"add z26.s, z26.s, z13.s\n"
"add z27.s, z27.s, z13.s\n"
"add z16.s, z16.s, z0.s\n"
@@ -863,8 +863,8 @@ void sve_hybrid_s8qa_dot_4x4VL (
"add z22.s, z22.s, z2.s\n"
"add z23.s, z23.s, z3.s\n"
"add z24.s, z24.s, z0.s\n"
+ "ld1rw { z0.s }, p2/Z, [x23]\n"
"add z25.s, z25.s, z1.s\n"
- "ld1rw { z0.s }, p2/Z, [x20]\n"
"add z26.s, z26.s, z2.s\n"
"add z27.s, z27.s, z3.s\n"
".inst 0x04a47610 // sqrdmulh z16.s, z16.s, z4.s\n"
@@ -883,131 +883,131 @@ void sve_hybrid_s8qa_dot_4x4VL (
"and z4.d, z16.d, z0.d\n"
"and z5.d, z17.d, z0.d\n"
"and z6.d, z18.d, z0.d\n"
- "and z7.d, z19.d, z0.d\n"
- "and z8.d, z20.d, z0.d\n"
"asr z4.s, z4.s, #0x1f\n"
"asr z5.s, z5.s, #0x1f\n"
"asr z6.s, z6.s, #0x1f\n"
- "asr z7.s, z7.s, #0x1f\n"
- "asr z8.s, z8.s, #0x1f\n"
"sqadd z16.s, z16.s, z4.s\n"
"sqadd z17.s, z17.s, z5.s\n"
"sqadd z18.s, z18.s, z6.s\n"
+ "and z7.d, z19.d, z0.d\n"
+ "and z8.d, z20.d, z0.d\n"
+ "and z9.d, z21.d, z0.d\n"
+ "asr z7.s, z7.s, #0x1f\n"
+ "asr z8.s, z8.s, #0x1f\n"
+ "asr z9.s, z9.s, #0x1f\n"
"sqadd z19.s, z19.s, z7.s\n"
"sqadd z20.s, z20.s, z8.s\n"
- "and z9.d, z21.d, z0.d\n"
+ "sqadd z21.s, z21.s, z9.s\n"
"and z10.d, z22.d, z0.d\n"
"and z4.d, z23.d, z0.d\n"
"and z5.d, z24.d, z0.d\n"
- "and z6.d, z25.d, z0.d\n"
- "and z7.d, z26.d, z0.d\n"
- "and z8.d, z27.d, z0.d\n"
- "asr z9.s, z9.s, #0x1f\n"
"asr z10.s, z10.s, #0x1f\n"
"asr z4.s, z4.s, #0x1f\n"
"asr z5.s, z5.s, #0x1f\n"
- "asr z6.s, z6.s, #0x1f\n"
- "asr z7.s, z7.s, #0x1f\n"
- "asr z8.s, z8.s, #0x1f\n"
- "sqadd z21.s, z21.s, z9.s\n"
"sqadd z22.s, z22.s, z10.s\n"
"sqadd z23.s, z23.s, z4.s\n"
"sqadd z24.s, z24.s, z5.s\n"
+ "and z6.d, z25.d, z0.d\n"
+ "and z7.d, z26.d, z0.d\n"
+ "and z8.d, z27.d, z0.d\n"
+ "asr z6.s, z6.s, #0x1f\n"
+ "asr z7.s, z7.s, #0x1f\n"
+ "asr z8.s, z8.s, #0x1f\n"
"sqadd z25.s, z25.s, z6.s\n"
"sqadd z26.s, z26.s, z7.s\n"
"sqadd z27.s, z27.s, z8.s\n"
"41:" // Height 3: no shift correction
- "add x20, %x[qp], %[c_offset]\n"
- "ld1rw { z4.s }, p2/Z, [x20]\n"
".inst 0x44828810 // srshl z16.s, p2/M, z16.s, z0.s\n"
- "add z16.s, z16.s, z4.s\n"
+ "add x19, %x[qp], %[c_offset]\n"
+ "ld1rw { z4.s }, p2/Z, [x19]\n"
".inst 0x44828811 // srshl z17.s, p2/M, z17.s, z0.s\n"
+ "add x19, %x[qp], %[minval]\n"
".inst 0x44828812 // srshl z18.s, p2/M, z18.s, z0.s\n"
- "add z17.s, z17.s, z4.s\n"
- "add z18.s, z18.s, z4.s\n"
+ "ld1rw { z5.s }, p2/Z, [x19]\n"
+ "add x19, %x[qp], %[maxval]\n"
".inst 0x44828813 // srshl z19.s, p2/M, z19.s, z0.s\n"
+ "ld1rw { z6.s }, p2/Z, [x19]\n"
".inst 0x44828814 // srshl z20.s, p2/M, z20.s, z0.s\n"
+ "add z16.s, z16.s, z4.s\n"
+ "add z17.s, z17.s, z4.s\n"
+ "add z18.s, z18.s, z4.s\n"
"add z19.s, z19.s, z4.s\n"
"add z20.s, z20.s, z4.s\n"
- ".inst 0x44828815 // srshl z21.s, p2/M, z21.s, z0.s\n"
- ".inst 0x44828816 // srshl z22.s, p2/M, z22.s, z0.s\n"
- "add z21.s, z21.s, z4.s\n"
- "add z22.s, z22.s, z4.s\n"
- ".inst 0x44828817 // srshl z23.s, p2/M, z23.s, z0.s\n"
- ".inst 0x44828818 // srshl z24.s, p2/M, z24.s, z0.s\n"
- "add z23.s, z23.s, z4.s\n"
- "add z24.s, z24.s, z4.s\n"
- ".inst 0x44828819 // srshl z25.s, p2/M, z25.s, z0.s\n"
- ".inst 0x4482881a // srshl z26.s, p2/M, z26.s, z0.s\n"
- "add z25.s, z25.s, z4.s\n"
- "add z26.s, z26.s, z4.s\n"
- ".inst 0x4482881b // srshl z27.s, p2/M, z27.s, z0.s\n"
- "add x20, %x[qp], %[maxval]\n"
- "ld1rw { z6.s }, p2/Z, [x20]\n"
- "add z27.s, z27.s, z4.s\n"
- "add x20, %x[qp], %[minval]\n"
- "ld1rw { z5.s }, p2/Z, [x20]\n"
"smin z16.s, p2/M, z16.s, z6.s\n"
"smin z17.s, p2/M, z17.s, z6.s\n"
"smin z18.s, p2/M, z18.s, z6.s\n"
"smin z19.s, p2/M, z19.s, z6.s\n"
- "smin z20.s, p2/M, z20.s, z6.s\n"
- "smin z21.s, p2/M, z21.s, z6.s\n"
- "smin z22.s, p2/M, z22.s, z6.s\n"
- "smin z23.s, p2/M, z23.s, z6.s\n"
- "smin z24.s, p2/M, z24.s, z6.s\n"
- "smin z25.s, p2/M, z25.s, z6.s\n"
- "smin z26.s, p2/M, z26.s, z6.s\n"
- "smin z27.s, p2/M, z27.s, z6.s\n"
"smax z16.s, p2/M, z16.s, z5.s\n"
"smax z17.s, p2/M, z17.s, z5.s\n"
"smax z18.s, p2/M, z18.s, z5.s\n"
- "uzp1 z16.h, z16.h, z17.h\n"
"smax z19.s, p2/M, z19.s, z5.s\n"
- "smax z20.s, p2/M, z20.s, z5.s\n"
+ "smin z20.s, p2/M, z20.s, z6.s\n"
+ "uzp1 z16.h, z16.h, z17.h\n"
+ ".inst 0x44828815 // srshl z21.s, p2/M, z21.s, z0.s\n"
"uzp1 z17.h, z18.h, z19.h\n"
+ "smax z20.s, p2/M, z20.s, z5.s\n"
"uzp1 z16.b, z16.b, z17.b\n"
+ "st1b { z16.b }, p1, [x26]\n"
+ "add z21.s, z21.s, z4.s\n"
+ "addvl x26, x26, #1\n"
+ ".inst 0x44828816 // srshl z22.s, p2/M, z22.s, z0.s\n"
+ ".inst 0x44828817 // srshl z23.s, p2/M, z23.s, z0.s\n"
+ ".inst 0x44828818 // srshl z24.s, p2/M, z24.s, z0.s\n"
+ "smin z21.s, p2/M, z21.s, z6.s\n"
+ ".inst 0x44828819 // srshl z25.s, p2/M, z25.s, z0.s\n"
+ "add z22.s, z22.s, z4.s\n"
+ "add z23.s, z23.s, z4.s\n"
+ "add z24.s, z24.s, z4.s\n"
+ "add z25.s, z25.s, z4.s\n"
"smax z21.s, p2/M, z21.s, z5.s\n"
- "smax z22.s, p2/M, z22.s, z5.s\n"
+ "smin z22.s, p2/M, z22.s, z6.s\n"
+ "smin z23.s, p2/M, z23.s, z6.s\n"
+ "smin z24.s, p2/M, z24.s, z6.s\n"
"uzp1 z20.h, z20.h, z21.h\n"
- "st1b { z16.b }, p1, [x27]\n"
+ "smax z22.s, p2/M, z22.s, z5.s\n"
"smax z23.s, p2/M, z23.s, z5.s\n"
"smax z24.s, p2/M, z24.s, z5.s\n"
+ "smin z25.s, p2/M, z25.s, z6.s\n"
+ ".inst 0x4482881a // srshl z26.s, p2/M, z26.s, z0.s\n"
"uzp1 z21.h, z22.h, z23.h\n"
+ ".inst 0x4482881b // srshl z27.s, p2/M, z27.s, z0.s\n"
"uzp1 z20.b, z20.b, z21.b\n"
+ "st1b { z20.b }, p1, [x22]\n"
+ "add z26.s, z26.s, z4.s\n"
"smax z25.s, p2/M, z25.s, z5.s\n"
- "smax z26.s, p2/M, z26.s, z5.s\n"
+ "add z27.s, z27.s, z4.s\n"
+ "smin z26.s, p2/M, z26.s, z6.s\n"
"uzp1 z24.h, z24.h, z25.h\n"
- "st1b { z20.b }, p1, [x23]\n"
+ "smin z27.s, p2/M, z27.s, z6.s\n"
+ "smax z26.s, p2/M, z26.s, z5.s\n"
"smax z27.s, p2/M, z27.s, z5.s\n"
"uzp1 z25.h, z26.h, z27.h\n"
"uzp1 z24.b, z24.b, z25.b\n"
- "st1b { z24.b }, p1, [x22]\n"
- "addvl x27, x27, #1\n"
+ "st1b { z24.b }, p1, [x21]\n"
"42:" // Height 3: Writeback done
"decw x9, ALL, MUL #4\n"
"cmp x9, XZR\n"
"bgt 30b\n"
"b 58f\n"
"43:" // Height 4
- "ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "mov x20, #0x4\n"
- "mov x10, %x[col_bias]\n"
"mov z11.s, #0x0\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x27, %x[col_bias]\n"
"mov z12.s, #0x0\n"
- "mov z13.s, #0x0\n"
+ "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"bic %x[flags], %x[flags], #0x80000000\n"
- "ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov z13.s, #0x0\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "mov x26, %x[output_ptr]\n"
"mov z14.s, #0x0\n"
+ "mov x19, #0x4\n"
"mov z15.b, #0x1\n"
- "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x27, %x[output_ptr]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
+ "madd %x[output_ptr], x20, x19, %x[output_ptr]\n"
"44:" // Height 4: Column loop
- "mov x20, #0x0\n"
- "whilelt p1.b, x20, x9\n"
"mov z16.s, #0x0\n"
+ "mov x19, #0x0\n"
"mov z17.s, #0x0\n"
+ "whilelt p1.b, x19, x9\n"
"mov z18.s, #0x0\n"
"mov z19.s, #0x0\n"
"mov z20.s, #0x0\n"
@@ -1023,66 +1023,66 @@ void sve_hybrid_s8qa_dot_4x4VL (
"mov z30.s, #0x0\n"
"mov z31.s, #0x0\n"
"45:" // Height 4: setup done
- "mov x26, #0x0\n"
+ "mov x25, #0x0\n"
"46:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w24, [x20, x25, LSL #0x2]\n"
"tbz %x[flags], #3, 47f\n"
- "ldr x21, [%x[input_ptr], x26, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x24, [x21, #0x0]\n"
- "ldr x23, [x21, #0x8]\n"
- "ldr x22, [x21, #0x10]\n"
- "ldr x21, [x21, #0x18]\n"
- "cbnz x26, 48f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x24, x24, x20\n"
- "add x23, x23, x20\n"
- "add x22, x22, x20\n"
- "add x21, x21, x20\n"
+ "ldr x20, [%x[input_ptr], x25, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x23, [x20, #0x0]\n"
+ "ldr x22, [x20, #0x8]\n"
+ "ldr x21, [x20, #0x10]\n"
+ "ldr x20, [x20, #0x18]\n"
+ "cbnz x25, 48f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x23, x23, x19\n"
+ "add x22, x22, x19\n"
+ "add x21, x21, x19\n"
+ "add x20, x20, x19\n"
"b 48f\n"
"47:" // Height 4: setup direct input
- "mov x24, %x[input_ptr]\n"
- "add x23, x24, x20\n"
- "add x22, x23, x20\n"
- "add x21, x22, x20\n"
+ "mov x23, %x[input_ptr]\n"
+ "add x22, x23, x19\n"
+ "add x21, x22, x19\n"
+ "add x20, x21, x19\n"
"48:" // Height 4: input setup done
- "cmp x25, #0x10\n"
+ "cmp x24, #0x10\n"
"ble 51f\n"
"49:" // Height 4: Multiply loop: Main loop head
- "whilelt p0.b, XZR, x25\n"
- "ld1rqb { z0.b }, p0/Z, [x24]\n"
- "ld1rqb { z1.b }, p0/Z, [x23]\n"
- "add x24, x24, #0x10\n"
- "ld1rqb { z2.b }, p0/Z, [x22]\n"
- "ld1rqb { z3.b }, p0/Z, [x21]\n"
- "add x23, x23, #0x10\n"
- "add x22, x22, #0x10\n"
"ld1b { z4.b }, p2/Z, [x28]\n"
+ "whilelt p0.b, XZR, x24\n"
"ld1b { z5.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "ld1rqb { z0.b }, p0/Z, [x23]\n"
"sdot z16.s, z4.b, z0.b[0]\n"
+ "ld1rqb { z1.b }, p0/Z, [x22]\n"
+ "add x23, x23, #0x10\n"
+ "sdot z17.s, z5.b, z0.b[0]\n"
+ "ld1rqb { z2.b }, p0/Z, [x21]\n"
+ "add x22, x22, #0x10\n"
"sdot z20.s, z4.b, z1.b[0]\n"
- "ld1b { z6.b }, p2/Z, [x28, #2, MUL VL]\n"
- "ld1b { z7.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "ld1rqb { z3.b }, p0/Z, [x20]\n"
+ "add x21, x21, #0x10\n"
"sdot z24.s, z4.b, z2.b[0]\n"
- "sdot z28.s, z4.b, z3.b[0]\n"
- "sdot z17.s, z5.b, z0.b[0]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #2, MUL VL]\n"
+ "add x20, x20, #0x10\n"
"sdot z21.s, z5.b, z1.b[0]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "sdot z25.s, z5.b, z2.b[0]\n"
"ld1b { z8.b }, p2/Z, [x28, #4, MUL VL]\n"
+ "sdot z28.s, z4.b, z3.b[0]\n"
"ld1b { z9.b }, p2/Z, [x28, #5, MUL VL]\n"
- "sdot z25.s, z5.b, z2.b[0]\n"
"sdot z29.s, z5.b, z3.b[0]\n"
"ld1b { z10.b }, p2/Z, [x28, #6, MUL VL]\n"
- "ld1b { z4.b }, p2/Z, [x28, #7, MUL VL]\n"
"sdot z18.s, z6.b, z0.b[0]\n"
- "sdot z22.s, z6.b, z1.b[0]\n"
+ "ld1b { z4.b }, p2/Z, [x28, #7, MUL VL]\n"
"addvl x28, x28, #16\n"
+ "sdot z22.s, z6.b, z1.b[0]\n"
"ld1b { z5.b }, p2/Z, [x28, #-8, MUL VL]\n"
"sdot z26.s, z6.b, z2.b[0]\n"
"sdot z30.s, z6.b, z3.b[0]\n"
"ld1b { z6.b }, p2/Z, [x28, #-7, MUL VL]\n"
- "add x21, x21, #0x10\n"
"sdot z19.s, z7.b, z0.b[0]\n"
"sdot z23.s, z7.b, z1.b[0]\n"
"sdot z27.s, z7.b, z2.b[0]\n"
@@ -1147,27 +1147,27 @@ void sve_hybrid_s8qa_dot_4x4VL (
"sdot z13.s, z2.b, z15.b\n"
"sdot z14.s, z3.b, z15.b\n"
"50:" // Height 4: Multiply loop: unique 7: skip row sum
- "sub x25, x25, #0x10\n"
- "cmp x25, #0x10\n"
+ "sub x24, x24, #0x10\n"
+ "cmp x24, #0x10\n"
"bgt 49b\n"
"51:" // Height 4: Multiply loop: Single iteration only
- "whilelt p0.b, XZR, x25\n"
- "ld1rqb { z0.b }, p0/Z, [x24]\n"
- "ld1rqb { z1.b }, p0/Z, [x23]\n"
- "subs x25, x25, #0x4\n"
- "ld1rqb { z2.b }, p0/Z, [x22]\n"
- "ld1rqb { z3.b }, p0/Z, [x21]\n"
"ld1b { z4.b }, p2/Z, [x28]\n"
+ "whilelt p0.b, XZR, x24\n"
"ld1b { z5.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "subs x24, x24, #0x4\n"
+ "ld1rqb { z0.b }, p0/Z, [x23]\n"
"sdot z16.s, z4.b, z0.b[0]\n"
+ "ld1rqb { z1.b }, p0/Z, [x22]\n"
+ "sdot z17.s, z5.b, z0.b[0]\n"
+ "ld1rqb { z2.b }, p0/Z, [x21]\n"
+ "ld1rqb { z3.b }, p0/Z, [x20]\n"
"sdot z20.s, z4.b, z1.b[0]\n"
"ld1b { z6.b }, p2/Z, [x28, #2, MUL VL]\n"
+ "sdot z21.s, z5.b, z1.b[0]\n"
"ld1b { z7.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
"sdot z24.s, z4.b, z2.b[0]\n"
"sdot z28.s, z4.b, z3.b[0]\n"
- "sdot z17.s, z5.b, z0.b[0]\n"
- "sdot z21.s, z5.b, z1.b[0]\n"
- "addvl x28, x28, #4\n"
"sdot z25.s, z5.b, z2.b[0]\n"
"sdot z29.s, z5.b, z3.b[0]\n"
"sdot z18.s, z6.b, z0.b[0]\n"
@@ -1180,16 +1180,16 @@ void sve_hybrid_s8qa_dot_4x4VL (
"sdot z31.s, z7.b, z3.b[0]\n"
"ble 52f\n"
"ld1b { z8.b }, p2/Z, [x28]\n"
- "ld1b { z9.b }, p2/Z, [x28, #1, MUL VL]\n"
- "subs x25, x25, #0x4\n"
"sdot z16.s, z8.b, z0.b[1]\n"
- "ld1b { z10.b }, p2/Z, [x28, #2, MUL VL]\n"
- "ld1b { z4.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "ld1b { z9.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "subs x24, x24, #0x4\n"
"sdot z20.s, z8.b, z1.b[1]\n"
+ "ld1b { z10.b }, p2/Z, [x28, #2, MUL VL]\n"
"sdot z24.s, z8.b, z2.b[1]\n"
+ "ld1b { z4.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
"sdot z28.s, z8.b, z3.b[1]\n"
"sdot z17.s, z9.b, z0.b[1]\n"
- "addvl x28, x28, #4\n"
"sdot z21.s, z9.b, z1.b[1]\n"
"sdot z25.s, z9.b, z2.b[1]\n"
"sdot z29.s, z9.b, z3.b[1]\n"
@@ -1203,16 +1203,16 @@ void sve_hybrid_s8qa_dot_4x4VL (
"sdot z31.s, z4.b, z3.b[1]\n"
"ble 52f\n"
"ld1b { z5.b }, p2/Z, [x28]\n"
- "ld1b { z6.b }, p2/Z, [x28, #1, MUL VL]\n"
- "subs x25, x25, #0x4\n"
"sdot z16.s, z5.b, z0.b[2]\n"
- "ld1b { z7.b }, p2/Z, [x28, #2, MUL VL]\n"
- "ld1b { z8.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "subs x24, x24, #0x4\n"
"sdot z20.s, z5.b, z1.b[2]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #2, MUL VL]\n"
"sdot z24.s, z5.b, z2.b[2]\n"
+ "ld1b { z8.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
"sdot z28.s, z5.b, z3.b[2]\n"
"sdot z17.s, z6.b, z0.b[2]\n"
- "addvl x28, x28, #4\n"
"sdot z21.s, z6.b, z1.b[2]\n"
"sdot z25.s, z6.b, z2.b[2]\n"
"sdot z29.s, z6.b, z3.b[2]\n"
@@ -1226,16 +1226,16 @@ void sve_hybrid_s8qa_dot_4x4VL (
"sdot z31.s, z8.b, z3.b[2]\n"
"ble 52f\n"
"ld1b { z9.b }, p2/Z, [x28]\n"
- "ld1b { z10.b }, p2/Z, [x28, #1, MUL VL]\n"
"sdot z16.s, z9.b, z0.b[3]\n"
+ "ld1b { z10.b }, p2/Z, [x28, #1, MUL VL]\n"
"sdot z20.s, z9.b, z1.b[3]\n"
"ld1b { z4.b }, p2/Z, [x28, #2, MUL VL]\n"
- "ld1b { z5.b }, p2/Z, [x28, #3, MUL VL]\n"
"sdot z24.s, z9.b, z2.b[3]\n"
+ "ld1b { z5.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
"sdot z28.s, z9.b, z3.b[3]\n"
"sdot z17.s, z10.b, z0.b[3]\n"
"sdot z21.s, z10.b, z1.b[3]\n"
- "addvl x28, x28, #4\n"
"sdot z25.s, z10.b, z2.b[3]\n"
"sdot z29.s, z10.b, z3.b[3]\n"
"sdot z18.s, z4.b, z0.b[3]\n"
@@ -1253,27 +1253,27 @@ void sve_hybrid_s8qa_dot_4x4VL (
"sdot z13.s, z2.b, z15.b\n"
"sdot z14.s, z3.b, z15.b\n"
"53:" // Height 4: Multiply loop: unique 8: skip row sum
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x26, x26, #0x1\n"
- "cmp x26, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x25, x25, #0x1\n"
+ "cmp x25, x19\n"
"bne 46b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x27, x20\n"
- "add x22, x23, x20\n"
- "add x21, x22, x20\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x22, x26, x19\n"
+ "add x21, x22, x19\n"
+ "add x20, x21, x19\n"
"tbnz %x[flags], #31, 54f\n"
- "mov x20, #0x4\n"
- "whilelt p0.s, XZR, x20\n"
- "add x20, %x[qp], %[b_offset]\n"
- "ld1rw { z4.s }, p2/Z, [x20]\n"
+ "add x19, %x[qp], %[b_offset]\n"
+ "ld1rw { z4.s }, p2/Z, [x19]\n"
+ "neg z4.s, p2/M, z4.s\n"
+ "mov x19, #0x4\n"
+ "whilelt p0.s, XZR, x19\n"
"saddv d11, p0, z11.s\n"
- "mov z11.s, z11.s[0]\n"
"saddv d12, p0, z12.s\n"
"saddv d13, p0, z13.s\n"
+ "saddv d14, p0, z14.s\n"
+ "mov z11.s, z11.s[0]\n"
"mov z12.s, z12.s[0]\n"
"mov z13.s, z13.s[0]\n"
- "saddv d14, p0, z14.s\n"
- "neg z4.s, p2/M, z4.s\n"
"mov z14.s, z14.s[0]\n"
"mul z11.s, p2/M, z11.s, z4.s\n"
"mul z12.s, p2/M, z12.s, z4.s\n"
@@ -1281,24 +1281,24 @@ void sve_hybrid_s8qa_dot_4x4VL (
"mul z14.s, p2/M, z14.s, z4.s\n"
"54:" // Height 4: skip row sum fixup
"add z16.s, z16.s, z11.s\n"
+ "ld1w { z0.s }, p2/Z, [x27]\n"
+ "orr %x[flags], %x[flags], #0x80000000\n"
"add z17.s, z17.s, z11.s\n"
- "ld1w { z0.s }, p2/Z, [x10]\n"
- "ld1w { z1.s }, p2/Z, [x10, #1, MUL VL]\n"
+ "ld1w { z1.s }, p2/Z, [x27, #1, MUL VL]\n"
+ "add x23, %x[qp], %[per_layer_right_shift]\n"
"add z18.s, z18.s, z11.s\n"
+ "ld1w { z2.s }, p2/Z, [x27, #2, MUL VL]\n"
+ "add x19, %x[qp], %[per_layer_mul]\n"
"add z19.s, z19.s, z11.s\n"
- "ld1w { z2.s }, p2/Z, [x10, #2, MUL VL]\n"
- "ld1w { z3.s }, p2/Z, [x10, #3, MUL VL]\n"
+ "ld1w { z3.s }, p2/Z, [x27, #3, MUL VL]\n"
+ "addvl x27, x27, #4\n"
"add z20.s, z20.s, z12.s\n"
+ "ld1rw { z4.s }, p2/Z, [x19]\n"
"add z21.s, z21.s, z12.s\n"
- "add x20, %x[qp], %[per_layer_mul]\n"
- "orr %x[flags], %x[flags], #0x80000000\n"
"add z22.s, z22.s, z12.s\n"
"add z23.s, z23.s, z12.s\n"
- "ld1rw { z4.s }, p2/Z, [x20]\n"
- "add x20, %x[qp], %[per_layer_right_shift]\n"
"add z24.s, z24.s, z13.s\n"
"add z25.s, z25.s, z13.s\n"
- "addvl x10, x10, #4\n"
"add z26.s, z26.s, z13.s\n"
"add z27.s, z27.s, z13.s\n"
"add z28.s, z28.s, z14.s\n"
@@ -1318,8 +1318,8 @@ void sve_hybrid_s8qa_dot_4x4VL (
"add z26.s, z26.s, z2.s\n"
"add z27.s, z27.s, z3.s\n"
"add z28.s, z28.s, z0.s\n"
+ "ld1rw { z0.s }, p2/Z, [x23]\n"
"add z29.s, z29.s, z1.s\n"
- "ld1rw { z0.s }, p2/Z, [x20]\n"
"add z30.s, z30.s, z2.s\n"
"add z31.s, z31.s, z3.s\n"
".inst 0x04a47610 // sqrdmulh z16.s, z16.s, z4.s\n"
@@ -1341,160 +1341,160 @@ void sve_hybrid_s8qa_dot_4x4VL (
"tbz %x[flags], #5, 55f\n"
"and z4.d, z16.d, z0.d\n"
"and z5.d, z17.d, z0.d\n"
+ "and z6.d, z18.d, z0.d\n"
"asr z4.s, z4.s, #0x1f\n"
"asr z5.s, z5.s, #0x1f\n"
+ "asr z6.s, z6.s, #0x1f\n"
"sqadd z16.s, z16.s, z4.s\n"
"sqadd z17.s, z17.s, z5.s\n"
- "and z6.d, z18.d, z0.d\n"
+ "sqadd z18.s, z18.s, z6.s\n"
"and z7.d, z19.d, z0.d\n"
"and z8.d, z20.d, z0.d\n"
"and z9.d, z21.d, z0.d\n"
- "and z10.d, z22.d, z0.d\n"
- "and z4.d, z23.d, z0.d\n"
- "and z5.d, z24.d, z0.d\n"
- "asr z6.s, z6.s, #0x1f\n"
"asr z7.s, z7.s, #0x1f\n"
"asr z8.s, z8.s, #0x1f\n"
"asr z9.s, z9.s, #0x1f\n"
- "asr z10.s, z10.s, #0x1f\n"
- "asr z4.s, z4.s, #0x1f\n"
- "asr z5.s, z5.s, #0x1f\n"
- "sqadd z18.s, z18.s, z6.s\n"
"sqadd z19.s, z19.s, z7.s\n"
"sqadd z20.s, z20.s, z8.s\n"
"sqadd z21.s, z21.s, z9.s\n"
+ "and z10.d, z22.d, z0.d\n"
+ "and z4.d, z23.d, z0.d\n"
+ "and z5.d, z24.d, z0.d\n"
+ "asr z10.s, z10.s, #0x1f\n"
+ "asr z4.s, z4.s, #0x1f\n"
+ "asr z5.s, z5.s, #0x1f\n"
"sqadd z22.s, z22.s, z10.s\n"
"sqadd z23.s, z23.s, z4.s\n"
"sqadd z24.s, z24.s, z5.s\n"
"and z6.d, z25.d, z0.d\n"
"and z7.d, z26.d, z0.d\n"
"and z8.d, z27.d, z0.d\n"
- "and z9.d, z28.d, z0.d\n"
- "and z10.d, z29.d, z0.d\n"
- "and z4.d, z30.d, z0.d\n"
- "and z5.d, z31.d, z0.d\n"
"asr z6.s, z6.s, #0x1f\n"
"asr z7.s, z7.s, #0x1f\n"
"asr z8.s, z8.s, #0x1f\n"
- "asr z9.s, z9.s, #0x1f\n"
- "asr z10.s, z10.s, #0x1f\n"
- "asr z4.s, z4.s, #0x1f\n"
- "asr z5.s, z5.s, #0x1f\n"
"sqadd z25.s, z25.s, z6.s\n"
"sqadd z26.s, z26.s, z7.s\n"
"sqadd z27.s, z27.s, z8.s\n"
+ "and z9.d, z28.d, z0.d\n"
+ "and z10.d, z29.d, z0.d\n"
+ "and z4.d, z30.d, z0.d\n"
+ "asr z9.s, z9.s, #0x1f\n"
+ "asr z10.s, z10.s, #0x1f\n"
+ "asr z4.s, z4.s, #0x1f\n"
"sqadd z28.s, z28.s, z9.s\n"
"sqadd z29.s, z29.s, z10.s\n"
"sqadd z30.s, z30.s, z4.s\n"
+ "and z5.d, z31.d, z0.d\n"
+ "asr z5.s, z5.s, #0x1f\n"
"sqadd z31.s, z31.s, z5.s\n"
"55:" // Height 4: no shift correction
- "add x20, %x[qp], %[c_offset]\n"
- "ld1rw { z4.s }, p2/Z, [x20]\n"
".inst 0x44828810 // srshl z16.s, p2/M, z16.s, z0.s\n"
- "add z16.s, z16.s, z4.s\n"
+ "add x19, %x[qp], %[c_offset]\n"
+ "ld1rw { z4.s }, p2/Z, [x19]\n"
".inst 0x44828811 // srshl z17.s, p2/M, z17.s, z0.s\n"
+ "add x19, %x[qp], %[minval]\n"
".inst 0x44828812 // srshl z18.s, p2/M, z18.s, z0.s\n"
- "add z17.s, z17.s, z4.s\n"
- "add z18.s, z18.s, z4.s\n"
+ "ld1rw { z5.s }, p2/Z, [x19]\n"
+ "add x19, %x[qp], %[maxval]\n"
".inst 0x44828813 // srshl z19.s, p2/M, z19.s, z0.s\n"
+ "ld1rw { z6.s }, p2/Z, [x19]\n"
".inst 0x44828814 // srshl z20.s, p2/M, z20.s, z0.s\n"
+ "add z16.s, z16.s, z4.s\n"
+ "add z17.s, z17.s, z4.s\n"
+ "add z18.s, z18.s, z4.s\n"
"add z19.s, z19.s, z4.s\n"
"add z20.s, z20.s, z4.s\n"
- ".inst 0x44828815 // srshl z21.s, p2/M, z21.s, z0.s\n"
- ".inst 0x44828816 // srshl z22.s, p2/M, z22.s, z0.s\n"
- "add z21.s, z21.s, z4.s\n"
- "add z22.s, z22.s, z4.s\n"
- ".inst 0x44828817 // srshl z23.s, p2/M, z23.s, z0.s\n"
- ".inst 0x44828818 // srshl z24.s, p2/M, z24.s, z0.s\n"
- "add z23.s, z23.s, z4.s\n"
- "add z24.s, z24.s, z4.s\n"
- ".inst 0x44828819 // srshl z25.s, p2/M, z25.s, z0.s\n"
- ".inst 0x4482881a // srshl z26.s, p2/M, z26.s, z0.s\n"
- "add z25.s, z25.s, z4.s\n"
- "add z26.s, z26.s, z4.s\n"
- ".inst 0x4482881b // srshl z27.s, p2/M, z27.s, z0.s\n"
- ".inst 0x4482881c // srshl z28.s, p2/M, z28.s, z0.s\n"
- "add z27.s, z27.s, z4.s\n"
- "add z28.s, z28.s, z4.s\n"
- ".inst 0x4482881d // srshl z29.s, p2/M, z29.s, z0.s\n"
- ".inst 0x4482881e // srshl z30.s, p2/M, z30.s, z0.s\n"
- "add z29.s, z29.s, z4.s\n"
- "add z30.s, z30.s, z4.s\n"
- ".inst 0x4482881f // srshl z31.s, p2/M, z31.s, z0.s\n"
- "add x20, %x[qp], %[maxval]\n"
- "ld1rw { z6.s }, p2/Z, [x20]\n"
- "add z31.s, z31.s, z4.s\n"
- "add x20, %x[qp], %[minval]\n"
- "ld1rw { z5.s }, p2/Z, [x20]\n"
"smin z16.s, p2/M, z16.s, z6.s\n"
"smin z17.s, p2/M, z17.s, z6.s\n"
"smin z18.s, p2/M, z18.s, z6.s\n"
"smin z19.s, p2/M, z19.s, z6.s\n"
- "smin z20.s, p2/M, z20.s, z6.s\n"
- "smin z21.s, p2/M, z21.s, z6.s\n"
- "smin z22.s, p2/M, z22.s, z6.s\n"
- "smin z23.s, p2/M, z23.s, z6.s\n"
- "smin z24.s, p2/M, z24.s, z6.s\n"
- "smin z25.s, p2/M, z25.s, z6.s\n"
- "smin z26.s, p2/M, z26.s, z6.s\n"
- "smin z27.s, p2/M, z27.s, z6.s\n"
- "smin z28.s, p2/M, z28.s, z6.s\n"
- "smin z29.s, p2/M, z29.s, z6.s\n"
- "smin z30.s, p2/M, z30.s, z6.s\n"
- "smin z31.s, p2/M, z31.s, z6.s\n"
"smax z16.s, p2/M, z16.s, z5.s\n"
"smax z17.s, p2/M, z17.s, z5.s\n"
"smax z18.s, p2/M, z18.s, z5.s\n"
- "uzp1 z16.h, z16.h, z17.h\n"
"smax z19.s, p2/M, z19.s, z5.s\n"
- "smax z20.s, p2/M, z20.s, z5.s\n"
+ "smin z20.s, p2/M, z20.s, z6.s\n"
+ "uzp1 z16.h, z16.h, z17.h\n"
+ ".inst 0x44828815 // srshl z21.s, p2/M, z21.s, z0.s\n"
"uzp1 z17.h, z18.h, z19.h\n"
+ "smax z20.s, p2/M, z20.s, z5.s\n"
"uzp1 z16.b, z16.b, z17.b\n"
+ "st1b { z16.b }, p1, [x26]\n"
+ "add z21.s, z21.s, z4.s\n"
+ "addvl x26, x26, #1\n"
+ ".inst 0x44828816 // srshl z22.s, p2/M, z22.s, z0.s\n"
+ ".inst 0x44828817 // srshl z23.s, p2/M, z23.s, z0.s\n"
+ ".inst 0x44828818 // srshl z24.s, p2/M, z24.s, z0.s\n"
+ "smin z21.s, p2/M, z21.s, z6.s\n"
+ ".inst 0x44828819 // srshl z25.s, p2/M, z25.s, z0.s\n"
+ "add z22.s, z22.s, z4.s\n"
+ "add z23.s, z23.s, z4.s\n"
+ "add z24.s, z24.s, z4.s\n"
+ "add z25.s, z25.s, z4.s\n"
"smax z21.s, p2/M, z21.s, z5.s\n"
- "smax z22.s, p2/M, z22.s, z5.s\n"
+ "smin z22.s, p2/M, z22.s, z6.s\n"
+ "smin z23.s, p2/M, z23.s, z6.s\n"
+ "smin z24.s, p2/M, z24.s, z6.s\n"
"uzp1 z20.h, z20.h, z21.h\n"
- "st1b { z16.b }, p1, [x27]\n"
+ "smax z22.s, p2/M, z22.s, z5.s\n"
"smax z23.s, p2/M, z23.s, z5.s\n"
"smax z24.s, p2/M, z24.s, z5.s\n"
+ "smin z25.s, p2/M, z25.s, z6.s\n"
+ ".inst 0x4482881a // srshl z26.s, p2/M, z26.s, z0.s\n"
"uzp1 z21.h, z22.h, z23.h\n"
+ ".inst 0x4482881b // srshl z27.s, p2/M, z27.s, z0.s\n"
"uzp1 z20.b, z20.b, z21.b\n"
+ "st1b { z20.b }, p1, [x22]\n"
+ "add z26.s, z26.s, z4.s\n"
"smax z25.s, p2/M, z25.s, z5.s\n"
- "smax z26.s, p2/M, z26.s, z5.s\n"
+ "add z27.s, z27.s, z4.s\n"
+ ".inst 0x4482881c // srshl z28.s, p2/M, z28.s, z0.s\n"
+ "smin z26.s, p2/M, z26.s, z6.s\n"
"uzp1 z24.h, z24.h, z25.h\n"
- "st1b { z20.b }, p1, [x23]\n"
+ "smin z27.s, p2/M, z27.s, z6.s\n"
+ "add z28.s, z28.s, z4.s\n"
+ "smax z26.s, p2/M, z26.s, z5.s\n"
+ ".inst 0x4482881d // srshl z29.s, p2/M, z29.s, z0.s\n"
"smax z27.s, p2/M, z27.s, z5.s\n"
- "smax z28.s, p2/M, z28.s, z5.s\n"
+ "smin z28.s, p2/M, z28.s, z6.s\n"
+ ".inst 0x4482881e // srshl z30.s, p2/M, z30.s, z0.s\n"
+ "add z29.s, z29.s, z4.s\n"
"uzp1 z25.h, z26.h, z27.h\n"
+ "smax z28.s, p2/M, z28.s, z5.s\n"
+ "add z30.s, z30.s, z4.s\n"
"uzp1 z24.b, z24.b, z25.b\n"
+ "st1b { z24.b }, p1, [x21]\n"
+ "smin z29.s, p2/M, z29.s, z6.s\n"
+ "smin z30.s, p2/M, z30.s, z6.s\n"
+ ".inst 0x4482881f // srshl z31.s, p2/M, z31.s, z0.s\n"
"smax z29.s, p2/M, z29.s, z5.s\n"
"smax z30.s, p2/M, z30.s, z5.s\n"
+ "add z31.s, z31.s, z4.s\n"
"uzp1 z28.h, z28.h, z29.h\n"
- "st1b { z24.b }, p1, [x22]\n"
+ "smin z31.s, p2/M, z31.s, z6.s\n"
"smax z31.s, p2/M, z31.s, z5.s\n"
"uzp1 z29.h, z30.h, z31.h\n"
"uzp1 z28.b, z28.b, z29.b\n"
- "st1b { z28.b }, p1, [x21]\n"
- "addvl x27, x27, #1\n"
+ "st1b { z28.b }, p1, [x20]\n"
"56:" // Height 4: Writeback done
"decw x9, ALL, MUL #4\n"
"cmp x9, XZR\n"
"bgt 44b\n"
"subs %x[M], %x[M], #0x4\n"
"beq 58f\n"
- "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 57f\n"
- "add x21, x21, #0x4\n"
- "str x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "add x20, x20, #0x4\n"
+ "str x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"b 1b\n"
"57:" // Update direct input
- "mov x20, #0x4\n"
- "madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
+ "mov x19, #0x4\n"
+ "madd %x[input_ptr], x19, x20, %x[input_ptr]\n"
"b 1b\n"
"58:" // Exit
: [M] "+&r" (M), [flags] "+&r" (flags), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
: [args_ptr] "r" (&ka), [b_offset] "I" (offsetof(Requantize32, b_offset)), [c_offset] "I" (offsetof(Requantize32, c_offset)), [col_bias] "r" (col_bias), [maxval] "I" (offsetof(Requantize32, maxval)), [minval] "I" (offsetof(Requantize32, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths)), [per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [qp] "r" (qp)
- : "cc", "memory", "p0", "p1", "p2", "x9", "x10", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "x9", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8qa_mmla_4x4VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8qa_mmla_4x4VL/generic.cpp
index 626a06b26b..04f80982e8 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8qa_mmla_4x4VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8qa_mmla_4x4VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef ARM_COMPUTE_ENABLE_SVE
@@ -85,18 +85,18 @@ void sve_hybrid_s8qa_mmla_4x4VL (
"cmp %x[M], #0x2\n"
"bgt 29f\n"
"beq 15f\n"
- "mov x10, %x[col_bias]\n"
"mov z11.s, #0x0\n"
- "mov z15.b, #0x1\n"
- "bic %x[flags], %x[flags], #0x80000000\n"
"ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov z15.b, #0x1\n"
"ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x27, %x[output_ptr]\n"
+ "mov x27, %x[col_bias]\n"
+ "bic %x[flags], %x[flags], #0x80000000\n"
+ "mov x26, %x[output_ptr]\n"
"2:" // Height 1: Column loop
- "mov x20, #0x0\n"
- "whilelt p1.b, x20, x9\n"
"mov z16.s, #0x0\n"
+ "mov x19, #0x0\n"
"mov z17.s, #0x0\n"
+ "whilelt p1.b, x19, x9\n"
"mov z18.s, #0x0\n"
"mov z19.s, #0x0\n"
"mov z20.s, #0x0\n"
@@ -104,119 +104,119 @@ void sve_hybrid_s8qa_mmla_4x4VL (
"mov z22.s, #0x0\n"
"mov z23.s, #0x0\n"
"3:" // Height 1: setup done
- "mov x26, #0x0\n"
+ "mov x25, #0x0\n"
"4:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w24, [x20, x25, LSL #0x2]\n"
"tbz %x[flags], #3, 5f\n"
- "ldr x21, [%x[input_ptr], x26, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x24, [x21, #0x0]\n"
- "cbnz x26, 6f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x24, x24, x20\n"
+ "ldr x20, [%x[input_ptr], x25, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x23, [x20, #0x0]\n"
+ "cbnz x25, 6f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x23, x23, x19\n"
"b 6f\n"
"5:" // Height 1: setup direct input
- "mov x24, %x[input_ptr]\n"
+ "mov x23, %x[input_ptr]\n"
"6:" // Height 1: input setup done
- "cmp x25, #0x10\n"
+ "cmp x24, #0x10\n"
"ble 9f\n"
"7:" // Height 1: Multiply loop: Main loop head
- "whilelt p0.b, XZR, x25\n"
- "ld1rqb { z1.b }, p0/Z, [x24]\n"
- "trn1 z0.d, z1.d, z2.d\n"
"ld1b { z5.b }, p2/Z, [x28]\n"
- ".inst 0x45059810 // smmla z16.s, z0.b, z5.b\n"
+ "whilelt p0.b, XZR, x24\n"
"ld1b { z6.b }, p2/Z, [x28, #1, MUL VL]\n"
- "trn2 z1.d, z1.d, z2.d\n"
+ "ld1rqb { z1.b }, p0/Z, [x23]\n"
+ "trn1 z0.d, z1.d, z2.d\n"
"ld1b { z7.b }, p2/Z, [x28, #2, MUL VL]\n"
+ "add x23, x23, #0x10\n"
+ "trn2 z1.d, z1.d, z2.d\n"
"ld1b { z8.b }, p2/Z, [x28, #3, MUL VL]\n"
- ".inst 0x45069814 // smmla z20.s, z0.b, z6.b\n"
- ".inst 0x45079811 // smmla z17.s, z0.b, z7.b\n"
"ld1b { z9.b }, p2/Z, [x28, #4, MUL VL]\n"
+ ".inst 0x45059810 // smmla z16.s, z0.b, z5.b\n"
+ ".inst 0x45069814 // smmla z20.s, z0.b, z6.b\n"
"ld1b { z10.b }, p2/Z, [x28, #5, MUL VL]\n"
- ".inst 0x45089815 // smmla z21.s, z0.b, z8.b\n"
- ".inst 0x45099812 // smmla z18.s, z0.b, z9.b\n"
+ ".inst 0x45079811 // smmla z17.s, z0.b, z7.b\n"
"ld1b { z4.b }, p2/Z, [x28, #6, MUL VL]\n"
+ ".inst 0x45089815 // smmla z21.s, z0.b, z8.b\n"
"ld1b { z5.b }, p2/Z, [x28, #7, MUL VL]\n"
"addvl x28, x28, #16\n"
- ".inst 0x450a9816 // smmla z22.s, z0.b, z10.b\n"
+ ".inst 0x45099812 // smmla z18.s, z0.b, z9.b\n"
"ld1b { z6.b }, p2/Z, [x28, #-8, MUL VL]\n"
- ".inst 0x45049813 // smmla z19.s, z0.b, z4.b\n"
- ".inst 0x45059817 // smmla z23.s, z0.b, z5.b\n"
+ ".inst 0x450a9816 // smmla z22.s, z0.b, z10.b\n"
"ld1b { z7.b }, p2/Z, [x28, #-7, MUL VL]\n"
- ".inst 0x45069830 // smmla z16.s, z1.b, z6.b\n"
+ ".inst 0x45049813 // smmla z19.s, z0.b, z4.b\n"
"ld1b { z8.b }, p2/Z, [x28, #-6, MUL VL]\n"
+ ".inst 0x45059817 // smmla z23.s, z0.b, z5.b\n"
"ld1b { z9.b }, p2/Z, [x28, #-5, MUL VL]\n"
- ".inst 0x45079834 // smmla z20.s, z1.b, z7.b\n"
+ ".inst 0x45069830 // smmla z16.s, z1.b, z6.b\n"
"ld1b { z10.b }, p2/Z, [x28, #-4, MUL VL]\n"
+ ".inst 0x45079834 // smmla z20.s, z1.b, z7.b\n"
"ld1b { z4.b }, p2/Z, [x28, #-3, MUL VL]\n"
".inst 0x45089831 // smmla z17.s, z1.b, z8.b\n"
- ".inst 0x45099835 // smmla z21.s, z1.b, z9.b\n"
"ld1b { z5.b }, p2/Z, [x28, #-2, MUL VL]\n"
+ ".inst 0x45099835 // smmla z21.s, z1.b, z9.b\n"
"ld1b { z6.b }, p2/Z, [x28, #-1, MUL VL]\n"
".inst 0x450a9832 // smmla z18.s, z1.b, z10.b\n"
".inst 0x45049836 // smmla z22.s, z1.b, z4.b\n"
".inst 0x45059833 // smmla z19.s, z1.b, z5.b\n"
".inst 0x45069837 // smmla z23.s, z1.b, z6.b\n"
- "add x24, x24, #0x10\n"
"tbnz %x[flags], #31, 8f\n"
"sdot z11.s, z0.b, z15.b\n"
"sdot z11.s, z1.b, z15.b\n"
"8:" // Height 1: Multiply loop: unique 1: skip row sum
- "sub x25, x25, #0x10\n"
- "cmp x25, #0x10\n"
+ "sub x24, x24, #0x10\n"
+ "cmp x24, #0x10\n"
"bgt 7b\n"
"9:" // Height 1: Multiply loop: Single iteration only
- "whilelt p0.b, XZR, x25\n"
- "ld1rqb { z1.b }, p0/Z, [x24]\n"
- "trn1 z0.d, z1.d, z2.d\n"
"ld1b { z5.b }, p2/Z, [x28]\n"
- ".inst 0x45059810 // smmla z16.s, z0.b, z5.b\n"
+ "whilelt p0.b, XZR, x24\n"
"ld1b { z6.b }, p2/Z, [x28, #1, MUL VL]\n"
- "subs x25, x25, #0x8\n"
+ "ld1rqb { z1.b }, p0/Z, [x23]\n"
+ "trn1 z0.d, z1.d, z2.d\n"
"ld1b { z7.b }, p2/Z, [x28, #2, MUL VL]\n"
- "ld1b { z8.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "subs x24, x24, #0x8\n"
"trn2 z1.d, z1.d, z2.d\n"
- ".inst 0x45069814 // smmla z20.s, z0.b, z6.b\n"
+ "ld1b { z8.b }, p2/Z, [x28, #3, MUL VL]\n"
"ld1b { z9.b }, p2/Z, [x28, #4, MUL VL]\n"
+ ".inst 0x45059810 // smmla z16.s, z0.b, z5.b\n"
+ ".inst 0x45069814 // smmla z20.s, z0.b, z6.b\n"
"ld1b { z10.b }, p2/Z, [x28, #5, MUL VL]\n"
".inst 0x45079811 // smmla z17.s, z0.b, z7.b\n"
- ".inst 0x45089815 // smmla z21.s, z0.b, z8.b\n"
"ld1b { z4.b }, p2/Z, [x28, #6, MUL VL]\n"
+ ".inst 0x45089815 // smmla z21.s, z0.b, z8.b\n"
"ld1b { z5.b }, p2/Z, [x28, #7, MUL VL]\n"
+ "addvl x28, x28, #8\n"
".inst 0x45099812 // smmla z18.s, z0.b, z9.b\n"
".inst 0x450a9816 // smmla z22.s, z0.b, z10.b\n"
".inst 0x45049813 // smmla z19.s, z0.b, z4.b\n"
".inst 0x45059817 // smmla z23.s, z0.b, z5.b\n"
- "addvl x28, x28, #8\n"
"ble 10f\n"
"ld1b { z6.b }, p2/Z, [x28]\n"
".inst 0x45069830 // smmla z16.s, z1.b, z6.b\n"
"ld1b { z7.b }, p2/Z, [x28, #1, MUL VL]\n"
- ".inst 0x45079834 // smmla z20.s, z1.b, z7.b\n"
"ld1b { z8.b }, p2/Z, [x28, #2, MUL VL]\n"
+ ".inst 0x45079834 // smmla z20.s, z1.b, z7.b\n"
"ld1b { z9.b }, p2/Z, [x28, #3, MUL VL]\n"
".inst 0x45089831 // smmla z17.s, z1.b, z8.b\n"
- ".inst 0x45099835 // smmla z21.s, z1.b, z9.b\n"
"ld1b { z10.b }, p2/Z, [x28, #4, MUL VL]\n"
"ld1b { z4.b }, p2/Z, [x28, #5, MUL VL]\n"
- ".inst 0x450a9832 // smmla z18.s, z1.b, z10.b\n"
- ".inst 0x45049836 // smmla z22.s, z1.b, z4.b\n"
+ ".inst 0x45099835 // smmla z21.s, z1.b, z9.b\n"
"ld1b { z5.b }, p2/Z, [x28, #6, MUL VL]\n"
"ld1b { z6.b }, p2/Z, [x28, #7, MUL VL]\n"
+ ".inst 0x450a9832 // smmla z18.s, z1.b, z10.b\n"
+ "addvl x28, x28, #8\n"
+ ".inst 0x45049836 // smmla z22.s, z1.b, z4.b\n"
".inst 0x45059833 // smmla z19.s, z1.b, z5.b\n"
".inst 0x45069837 // smmla z23.s, z1.b, z6.b\n"
- "addvl x28, x28, #8\n"
"10:" // Height 1: Multiply loop: multiply skip
"tbnz %x[flags], #31, 11f\n"
"sdot z11.s, z0.b, z15.b\n"
"sdot z11.s, z1.b, z15.b\n"
"11:" // Height 1: Multiply loop: unique 2: skip row sum
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x26, x26, #0x1\n"
- "cmp x26, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x25, x25, #0x1\n"
+ "cmp x25, x19\n"
"bne 4b\n"
"uzp1 z16.d, z16.d, z20.d\n"
"uzp1 z17.d, z17.d, z21.d\n"
@@ -224,33 +224,33 @@ void sve_hybrid_s8qa_mmla_4x4VL (
"uzp1 z19.d, z19.d, z23.d\n"
"mov z23.d, z16.d\n"
"tbnz %x[flags], #31, 12f\n"
- "add x23, %x[qp], %[b_offset]\n"
- "ld1rw { z1.s }, p2/Z, [x23]\n"
".inst 0x4491a96b // addp z11.s, p2/m, z11.s, z11.s\n"
- "neg z1.s, p2/M, z1.s\n"
+ "add x22, %x[qp], %[b_offset]\n"
+ "ld1rw { z1.s }, p2/Z, [x22]\n"
"mov z11.s, z11.s[0]\n"
+ "neg z1.s, p2/M, z1.s\n"
"mul z11.s, p2/M, z11.s, z1.s\n"
"12:" // Height 1: skip row sum fixup
"add z23.s, z23.s, z11.s\n"
+ "ld1w { z0.s }, p2/Z, [x27]\n"
+ "orr %x[flags], %x[flags], #0x80000000\n"
"add z17.s, z17.s, z11.s\n"
- "ld1w { z0.s }, p2/Z, [x10]\n"
- "ld1w { z1.s }, p2/Z, [x10, #1, MUL VL]\n"
+ "ld1w { z1.s }, p2/Z, [x27, #1, MUL VL]\n"
+ "add x23, %x[qp], %[per_layer_right_shift]\n"
"add z18.s, z18.s, z11.s\n"
+ "ld1w { z2.s }, p2/Z, [x27, #2, MUL VL]\n"
+ "add x22, %x[qp], %[per_layer_mul]\n"
"add z19.s, z19.s, z11.s\n"
- "ld1w { z2.s }, p2/Z, [x10, #2, MUL VL]\n"
- "ld1w { z3.s }, p2/Z, [x10, #3, MUL VL]\n"
- "add x23, %x[qp], %[per_layer_mul]\n"
- "orr %x[flags], %x[flags], #0x80000000\n"
+ "ld1w { z3.s }, p2/Z, [x27, #3, MUL VL]\n"
+ "addvl x27, x27, #4\n"
"add z23.s, z23.s, z0.s\n"
+ "ld1rw { z0.s }, p2/Z, [x23]\n"
"add z17.s, z17.s, z1.s\n"
+ "ld1rw { z4.s }, p2/Z, [x22]\n"
"add z18.s, z18.s, z2.s\n"
"add z19.s, z19.s, z3.s\n"
- "ld1rw { z4.s }, p2/Z, [x23]\n"
- "add x23, %x[qp], %[per_layer_right_shift]\n"
- "ld1rw { z0.s }, p2/Z, [x23]\n"
".inst 0x04a476f7 // sqrdmulh z23.s, z23.s, z4.s\n"
".inst 0x04a47631 // sqrdmulh z17.s, z17.s, z4.s\n"
- "addvl x10, x10, #4\n"
".inst 0x04a47652 // sqrdmulh z18.s, z18.s, z4.s\n"
".inst 0x04a47673 // sqrdmulh z19.s, z19.s, z4.s\n"
"tbz %x[flags], #5, 13f\n"
@@ -261,26 +261,26 @@ void sve_hybrid_s8qa_mmla_4x4VL (
"asr z4.s, z4.s, #0x1f\n"
"asr z5.s, z5.s, #0x1f\n"
"asr z6.s, z6.s, #0x1f\n"
- "asr z7.s, z7.s, #0x1f\n"
"sqadd z23.s, z23.s, z4.s\n"
"sqadd z17.s, z17.s, z5.s\n"
"sqadd z18.s, z18.s, z6.s\n"
+ "asr z7.s, z7.s, #0x1f\n"
"sqadd z19.s, z19.s, z7.s\n"
"13:" // Height 1: no shift correction
- "add x23, %x[qp], %[c_offset]\n"
- "ld1rw { z4.s }, p2/Z, [x23]\n"
".inst 0x44828817 // srshl z23.s, p2/M, z23.s, z0.s\n"
- "add z23.s, z23.s, z4.s\n"
+ "add x22, %x[qp], %[c_offset]\n"
+ "ld1rw { z4.s }, p2/Z, [x22]\n"
".inst 0x44828811 // srshl z17.s, p2/M, z17.s, z0.s\n"
+ "add x22, %x[qp], %[minval]\n"
".inst 0x44828812 // srshl z18.s, p2/M, z18.s, z0.s\n"
+ "ld1rw { z5.s }, p2/Z, [x22]\n"
+ "add x22, %x[qp], %[maxval]\n"
+ ".inst 0x44828813 // srshl z19.s, p2/M, z19.s, z0.s\n"
+ "ld1rw { z6.s }, p2/Z, [x22]\n"
+ "add z23.s, z23.s, z4.s\n"
"add z17.s, z17.s, z4.s\n"
"add z18.s, z18.s, z4.s\n"
- ".inst 0x44828813 // srshl z19.s, p2/M, z19.s, z0.s\n"
- "add x23, %x[qp], %[maxval]\n"
- "ld1rw { z6.s }, p2/Z, [x23]\n"
"add z19.s, z19.s, z4.s\n"
- "add x23, %x[qp], %[minval]\n"
- "ld1rw { z5.s }, p2/Z, [x23]\n"
"smin z23.s, p2/M, z23.s, z6.s\n"
"smin z17.s, p2/M, z17.s, z6.s\n"
"smin z18.s, p2/M, z18.s, z6.s\n"
@@ -288,31 +288,31 @@ void sve_hybrid_s8qa_mmla_4x4VL (
"smax z23.s, p2/M, z23.s, z5.s\n"
"smax z17.s, p2/M, z17.s, z5.s\n"
"smax z18.s, p2/M, z18.s, z5.s\n"
- "uzp1 z23.h, z23.h, z17.h\n"
"smax z19.s, p2/M, z19.s, z5.s\n"
+ "uzp1 z23.h, z23.h, z17.h\n"
"uzp1 z17.h, z18.h, z19.h\n"
"uzp1 z23.b, z23.b, z17.b\n"
- "st1b { z23.b }, p1, [x27]\n"
- "addvl x27, x27, #1\n"
+ "st1b { z23.b }, p1, [x26]\n"
+ "addvl x26, x26, #1\n"
"14:" // Height 1: Writeback done
"decw x9, ALL, MUL #4\n"
"cmp x9, XZR\n"
"bgt 2b\n"
"b 58f\n"
"15:" // Height 2
- "mov x10, %x[col_bias]\n"
"mov z11.s, #0x0\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x27, %x[col_bias]\n"
"mov z12.s, #0x0\n"
+ "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"bic %x[flags], %x[flags], #0x80000000\n"
"mov z15.b, #0x1\n"
- "ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x27, %x[output_ptr]\n"
+ "mov x26, %x[output_ptr]\n"
"16:" // Height 2: Column loop
- "mov x20, #0x0\n"
- "whilelt p1.b, x20, x9\n"
"mov z16.s, #0x0\n"
+ "mov x19, #0x0\n"
"mov z17.s, #0x0\n"
+ "whilelt p1.b, x19, x9\n"
"mov z18.s, #0x0\n"
"mov z19.s, #0x0\n"
"mov z20.s, #0x0\n"
@@ -320,130 +320,130 @@ void sve_hybrid_s8qa_mmla_4x4VL (
"mov z22.s, #0x0\n"
"mov z23.s, #0x0\n"
"17:" // Height 2: setup done
- "mov x26, #0x0\n"
+ "mov x25, #0x0\n"
"18:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w24, [x20, x25, LSL #0x2]\n"
"tbz %x[flags], #3, 19f\n"
- "ldr x21, [%x[input_ptr], x26, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x24, [x21, #0x0]\n"
- "ldr x23, [x21, #0x8]\n"
- "cbnz x26, 20f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x24, x24, x20\n"
- "add x23, x23, x20\n"
+ "ldr x20, [%x[input_ptr], x25, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x23, [x20, #0x0]\n"
+ "ldr x22, [x20, #0x8]\n"
+ "cbnz x25, 20f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x23, x23, x19\n"
+ "add x22, x22, x19\n"
"b 20f\n"
"19:" // Height 2: setup direct input
- "mov x24, %x[input_ptr]\n"
- "add x23, x24, x20\n"
+ "mov x23, %x[input_ptr]\n"
+ "add x22, x23, x19\n"
"20:" // Height 2: input setup done
- "cmp x25, #0x10\n"
+ "cmp x24, #0x10\n"
"ble 23f\n"
"21:" // Height 2: Multiply loop: Main loop head
- "whilelt p0.b, XZR, x25\n"
- "ld1rqb { z1.b }, p0/Z, [x24]\n"
- "ld1rqb { z2.b }, p0/Z, [x23]\n"
- "trn1 z0.d, z1.d, z2.d\n"
"ld1b { z5.b }, p2/Z, [x28]\n"
- ".inst 0x45059810 // smmla z16.s, z0.b, z5.b\n"
+ "whilelt p0.b, XZR, x24\n"
"ld1b { z6.b }, p2/Z, [x28, #1, MUL VL]\n"
- "trn2 z1.d, z1.d, z2.d\n"
+ "ld1rqb { z1.b }, p0/Z, [x23]\n"
+ "add x23, x23, #0x10\n"
+ "ld1rqb { z2.b }, p0/Z, [x22]\n"
+ "trn1 z0.d, z1.d, z2.d\n"
"ld1b { z7.b }, p2/Z, [x28, #2, MUL VL]\n"
+ "add x22, x22, #0x10\n"
+ "trn2 z1.d, z1.d, z2.d\n"
"ld1b { z8.b }, p2/Z, [x28, #3, MUL VL]\n"
- ".inst 0x45069814 // smmla z20.s, z0.b, z6.b\n"
- ".inst 0x45079811 // smmla z17.s, z0.b, z7.b\n"
"ld1b { z9.b }, p2/Z, [x28, #4, MUL VL]\n"
+ ".inst 0x45059810 // smmla z16.s, z0.b, z5.b\n"
+ ".inst 0x45069814 // smmla z20.s, z0.b, z6.b\n"
"ld1b { z10.b }, p2/Z, [x28, #5, MUL VL]\n"
- ".inst 0x45089815 // smmla z21.s, z0.b, z8.b\n"
- ".inst 0x45099812 // smmla z18.s, z0.b, z9.b\n"
+ ".inst 0x45079811 // smmla z17.s, z0.b, z7.b\n"
"ld1b { z4.b }, p2/Z, [x28, #6, MUL VL]\n"
+ ".inst 0x45089815 // smmla z21.s, z0.b, z8.b\n"
"ld1b { z5.b }, p2/Z, [x28, #7, MUL VL]\n"
"addvl x28, x28, #16\n"
- ".inst 0x450a9816 // smmla z22.s, z0.b, z10.b\n"
+ ".inst 0x45099812 // smmla z18.s, z0.b, z9.b\n"
"ld1b { z6.b }, p2/Z, [x28, #-8, MUL VL]\n"
- ".inst 0x45049813 // smmla z19.s, z0.b, z4.b\n"
- ".inst 0x45059817 // smmla z23.s, z0.b, z5.b\n"
+ ".inst 0x450a9816 // smmla z22.s, z0.b, z10.b\n"
"ld1b { z7.b }, p2/Z, [x28, #-7, MUL VL]\n"
- ".inst 0x45069830 // smmla z16.s, z1.b, z6.b\n"
+ ".inst 0x45049813 // smmla z19.s, z0.b, z4.b\n"
"ld1b { z8.b }, p2/Z, [x28, #-6, MUL VL]\n"
+ ".inst 0x45059817 // smmla z23.s, z0.b, z5.b\n"
"ld1b { z9.b }, p2/Z, [x28, #-5, MUL VL]\n"
- ".inst 0x45079834 // smmla z20.s, z1.b, z7.b\n"
+ ".inst 0x45069830 // smmla z16.s, z1.b, z6.b\n"
"ld1b { z10.b }, p2/Z, [x28, #-4, MUL VL]\n"
+ ".inst 0x45079834 // smmla z20.s, z1.b, z7.b\n"
"ld1b { z4.b }, p2/Z, [x28, #-3, MUL VL]\n"
".inst 0x45089831 // smmla z17.s, z1.b, z8.b\n"
- ".inst 0x45099835 // smmla z21.s, z1.b, z9.b\n"
"ld1b { z5.b }, p2/Z, [x28, #-2, MUL VL]\n"
+ ".inst 0x45099835 // smmla z21.s, z1.b, z9.b\n"
"ld1b { z6.b }, p2/Z, [x28, #-1, MUL VL]\n"
".inst 0x450a9832 // smmla z18.s, z1.b, z10.b\n"
".inst 0x45049836 // smmla z22.s, z1.b, z4.b\n"
".inst 0x45059833 // smmla z19.s, z1.b, z5.b\n"
".inst 0x45069837 // smmla z23.s, z1.b, z6.b\n"
- "add x24, x24, #0x10\n"
- "add x23, x23, #0x10\n"
"tbnz %x[flags], #31, 22f\n"
"sdot z11.s, z0.b, z15.b\n"
"sdot z11.s, z1.b, z15.b\n"
"22:" // Height 2: Multiply loop: unique 3: skip row sum
- "sub x25, x25, #0x10\n"
- "cmp x25, #0x10\n"
+ "sub x24, x24, #0x10\n"
+ "cmp x24, #0x10\n"
"bgt 21b\n"
"23:" // Height 2: Multiply loop: Single iteration only
- "whilelt p0.b, XZR, x25\n"
- "ld1rqb { z1.b }, p0/Z, [x24]\n"
- "ld1rqb { z2.b }, p0/Z, [x23]\n"
- "trn1 z0.d, z1.d, z2.d\n"
"ld1b { z5.b }, p2/Z, [x28]\n"
- ".inst 0x45059810 // smmla z16.s, z0.b, z5.b\n"
+ "whilelt p0.b, XZR, x24\n"
"ld1b { z6.b }, p2/Z, [x28, #1, MUL VL]\n"
- "subs x25, x25, #0x8\n"
+ "subs x24, x24, #0x8\n"
+ "ld1rqb { z1.b }, p0/Z, [x23]\n"
+ "ld1rqb { z2.b }, p0/Z, [x22]\n"
+ "trn1 z0.d, z1.d, z2.d\n"
"ld1b { z7.b }, p2/Z, [x28, #2, MUL VL]\n"
- "ld1b { z8.b }, p2/Z, [x28, #3, MUL VL]\n"
"trn2 z1.d, z1.d, z2.d\n"
- ".inst 0x45069814 // smmla z20.s, z0.b, z6.b\n"
+ "ld1b { z8.b }, p2/Z, [x28, #3, MUL VL]\n"
"ld1b { z9.b }, p2/Z, [x28, #4, MUL VL]\n"
+ ".inst 0x45059810 // smmla z16.s, z0.b, z5.b\n"
+ ".inst 0x45069814 // smmla z20.s, z0.b, z6.b\n"
"ld1b { z10.b }, p2/Z, [x28, #5, MUL VL]\n"
".inst 0x45079811 // smmla z17.s, z0.b, z7.b\n"
- ".inst 0x45089815 // smmla z21.s, z0.b, z8.b\n"
"ld1b { z4.b }, p2/Z, [x28, #6, MUL VL]\n"
+ ".inst 0x45089815 // smmla z21.s, z0.b, z8.b\n"
"ld1b { z5.b }, p2/Z, [x28, #7, MUL VL]\n"
+ "addvl x28, x28, #8\n"
".inst 0x45099812 // smmla z18.s, z0.b, z9.b\n"
".inst 0x450a9816 // smmla z22.s, z0.b, z10.b\n"
".inst 0x45049813 // smmla z19.s, z0.b, z4.b\n"
".inst 0x45059817 // smmla z23.s, z0.b, z5.b\n"
- "addvl x28, x28, #8\n"
"ble 24f\n"
"ld1b { z6.b }, p2/Z, [x28]\n"
".inst 0x45069830 // smmla z16.s, z1.b, z6.b\n"
"ld1b { z7.b }, p2/Z, [x28, #1, MUL VL]\n"
- ".inst 0x45079834 // smmla z20.s, z1.b, z7.b\n"
"ld1b { z8.b }, p2/Z, [x28, #2, MUL VL]\n"
+ ".inst 0x45079834 // smmla z20.s, z1.b, z7.b\n"
"ld1b { z9.b }, p2/Z, [x28, #3, MUL VL]\n"
".inst 0x45089831 // smmla z17.s, z1.b, z8.b\n"
- ".inst 0x45099835 // smmla z21.s, z1.b, z9.b\n"
"ld1b { z10.b }, p2/Z, [x28, #4, MUL VL]\n"
"ld1b { z4.b }, p2/Z, [x28, #5, MUL VL]\n"
- ".inst 0x450a9832 // smmla z18.s, z1.b, z10.b\n"
- ".inst 0x45049836 // smmla z22.s, z1.b, z4.b\n"
+ ".inst 0x45099835 // smmla z21.s, z1.b, z9.b\n"
"ld1b { z5.b }, p2/Z, [x28, #6, MUL VL]\n"
"ld1b { z6.b }, p2/Z, [x28, #7, MUL VL]\n"
+ ".inst 0x450a9832 // smmla z18.s, z1.b, z10.b\n"
+ "addvl x28, x28, #8\n"
+ ".inst 0x45049836 // smmla z22.s, z1.b, z4.b\n"
".inst 0x45059833 // smmla z19.s, z1.b, z5.b\n"
".inst 0x45069837 // smmla z23.s, z1.b, z6.b\n"
- "addvl x28, x28, #8\n"
"24:" // Height 2: Multiply loop: multiply skip
"tbnz %x[flags], #31, 25f\n"
"sdot z11.s, z0.b, z15.b\n"
"sdot z11.s, z1.b, z15.b\n"
"25:" // Height 2: Multiply loop: unique 4: skip row sum
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x26, x26, #0x1\n"
- "cmp x26, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x25, x25, #0x1\n"
+ "cmp x25, x19\n"
"bne 18b\n"
"uzp1 z7.d, z16.d, z20.d\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp2 z16.d, z16.d, z20.d\n"
- "add x22, x27, x20\n"
+ "add x21, x26, x19\n"
"uzp1 z20.d, z17.d, z21.d\n"
"uzp2 z17.d, z17.d, z21.d\n"
"uzp1 z21.d, z18.d, z22.d\n"
@@ -452,39 +452,39 @@ void sve_hybrid_s8qa_mmla_4x4VL (
"uzp2 z19.d, z19.d, z23.d\n"
"mov z23.d, z7.d\n"
"tbnz %x[flags], #31, 26f\n"
- "add x23, %x[qp], %[b_offset]\n"
- "ld1rw { z2.s }, p2/Z, [x23]\n"
".inst 0x4491a96b // addp z11.s, p2/m, z11.s, z11.s\n"
- "neg z2.s, p2/M, z2.s\n"
+ "add x22, %x[qp], %[b_offset]\n"
+ "ld1rw { z2.s }, p2/Z, [x22]\n"
"mov z12.s, z11.s[3]\n"
"mov z11.s, z11.s[0]\n"
+ "neg z2.s, p2/M, z2.s\n"
"mul z11.s, p2/M, z11.s, z2.s\n"
"mul z12.s, p2/M, z12.s, z2.s\n"
"26:" // Height 2: skip row sum fixup
"add z23.s, z23.s, z11.s\n"
+ "ld1w { z0.s }, p2/Z, [x27]\n"
+ "orr %x[flags], %x[flags], #0x80000000\n"
"add z20.s, z20.s, z11.s\n"
- "ld1w { z0.s }, p2/Z, [x10]\n"
- "ld1w { z1.s }, p2/Z, [x10, #1, MUL VL]\n"
+ "ld1w { z1.s }, p2/Z, [x27, #1, MUL VL]\n"
+ "add x23, %x[qp], %[per_layer_right_shift]\n"
"add z21.s, z21.s, z11.s\n"
+ "ld1w { z2.s }, p2/Z, [x27, #2, MUL VL]\n"
+ "add x22, %x[qp], %[per_layer_mul]\n"
"add z22.s, z22.s, z11.s\n"
- "ld1w { z2.s }, p2/Z, [x10, #2, MUL VL]\n"
- "ld1w { z3.s }, p2/Z, [x10, #3, MUL VL]\n"
+ "ld1w { z3.s }, p2/Z, [x27, #3, MUL VL]\n"
+ "addvl x27, x27, #4\n"
"add z16.s, z16.s, z12.s\n"
+ "ld1rw { z4.s }, p2/Z, [x22]\n"
"add z17.s, z17.s, z12.s\n"
- "add x23, %x[qp], %[per_layer_mul]\n"
- "orr %x[flags], %x[flags], #0x80000000\n"
"add z18.s, z18.s, z12.s\n"
"add z19.s, z19.s, z12.s\n"
- "ld1rw { z4.s }, p2/Z, [x23]\n"
- "add x23, %x[qp], %[per_layer_right_shift]\n"
"add z23.s, z23.s, z0.s\n"
"add z20.s, z20.s, z1.s\n"
- "addvl x10, x10, #4\n"
"add z21.s, z21.s, z2.s\n"
"add z22.s, z22.s, z3.s\n"
"add z16.s, z16.s, z0.s\n"
- "add z17.s, z17.s, z1.s\n"
"ld1rw { z0.s }, p2/Z, [x23]\n"
+ "add z17.s, z17.s, z1.s\n"
"add z18.s, z18.s, z2.s\n"
"add z19.s, z19.s, z3.s\n"
".inst 0x04a476f7 // sqrdmulh z23.s, z23.s, z4.s\n"
@@ -497,97 +497,97 @@ void sve_hybrid_s8qa_mmla_4x4VL (
".inst 0x04a47673 // sqrdmulh z19.s, z19.s, z4.s\n"
"tbz %x[flags], #5, 27f\n"
"and z4.d, z23.d, z0.d\n"
- "asr z4.s, z4.s, #0x1f\n"
- "sqadd z23.s, z23.s, z4.s\n"
"and z5.d, z20.d, z0.d\n"
"and z6.d, z21.d, z0.d\n"
+ "asr z4.s, z4.s, #0x1f\n"
+ "asr z5.s, z5.s, #0x1f\n"
+ "asr z6.s, z6.s, #0x1f\n"
+ "sqadd z23.s, z23.s, z4.s\n"
+ "sqadd z20.s, z20.s, z5.s\n"
+ "sqadd z21.s, z21.s, z6.s\n"
"and z7.d, z22.d, z0.d\n"
"and z8.d, z16.d, z0.d\n"
"and z9.d, z17.d, z0.d\n"
- "and z10.d, z18.d, z0.d\n"
- "and z4.d, z19.d, z0.d\n"
- "asr z5.s, z5.s, #0x1f\n"
- "asr z6.s, z6.s, #0x1f\n"
"asr z7.s, z7.s, #0x1f\n"
"asr z8.s, z8.s, #0x1f\n"
"asr z9.s, z9.s, #0x1f\n"
- "asr z10.s, z10.s, #0x1f\n"
- "asr z4.s, z4.s, #0x1f\n"
- "sqadd z20.s, z20.s, z5.s\n"
- "sqadd z21.s, z21.s, z6.s\n"
"sqadd z22.s, z22.s, z7.s\n"
"sqadd z16.s, z16.s, z8.s\n"
"sqadd z17.s, z17.s, z9.s\n"
+ "and z10.d, z18.d, z0.d\n"
+ "and z4.d, z19.d, z0.d\n"
+ "asr z10.s, z10.s, #0x1f\n"
+ "asr z4.s, z4.s, #0x1f\n"
"sqadd z18.s, z18.s, z10.s\n"
"sqadd z19.s, z19.s, z4.s\n"
"27:" // Height 2: no shift correction
- "add x23, %x[qp], %[c_offset]\n"
- "ld1rw { z4.s }, p2/Z, [x23]\n"
".inst 0x44828817 // srshl z23.s, p2/M, z23.s, z0.s\n"
- "add z23.s, z23.s, z4.s\n"
+ "add x22, %x[qp], %[c_offset]\n"
+ "ld1rw { z4.s }, p2/Z, [x22]\n"
".inst 0x44828814 // srshl z20.s, p2/M, z20.s, z0.s\n"
+ "add x22, %x[qp], %[minval]\n"
".inst 0x44828815 // srshl z21.s, p2/M, z21.s, z0.s\n"
- "add z20.s, z20.s, z4.s\n"
- "add z21.s, z21.s, z4.s\n"
+ "ld1rw { z5.s }, p2/Z, [x22]\n"
+ "add x22, %x[qp], %[maxval]\n"
".inst 0x44828816 // srshl z22.s, p2/M, z22.s, z0.s\n"
+ "ld1rw { z6.s }, p2/Z, [x22]\n"
".inst 0x44828810 // srshl z16.s, p2/M, z16.s, z0.s\n"
+ "add z23.s, z23.s, z4.s\n"
+ "add z20.s, z20.s, z4.s\n"
+ "add z21.s, z21.s, z4.s\n"
"add z22.s, z22.s, z4.s\n"
"add z16.s, z16.s, z4.s\n"
- ".inst 0x44828811 // srshl z17.s, p2/M, z17.s, z0.s\n"
- ".inst 0x44828812 // srshl z18.s, p2/M, z18.s, z0.s\n"
- "add z17.s, z17.s, z4.s\n"
- "add z18.s, z18.s, z4.s\n"
- ".inst 0x44828813 // srshl z19.s, p2/M, z19.s, z0.s\n"
- "add x23, %x[qp], %[maxval]\n"
- "ld1rw { z6.s }, p2/Z, [x23]\n"
- "add z19.s, z19.s, z4.s\n"
- "add x23, %x[qp], %[minval]\n"
- "ld1rw { z5.s }, p2/Z, [x23]\n"
"smin z23.s, p2/M, z23.s, z6.s\n"
"smin z20.s, p2/M, z20.s, z6.s\n"
"smin z21.s, p2/M, z21.s, z6.s\n"
"smin z22.s, p2/M, z22.s, z6.s\n"
- "smin z16.s, p2/M, z16.s, z6.s\n"
- "smin z17.s, p2/M, z17.s, z6.s\n"
- "smin z18.s, p2/M, z18.s, z6.s\n"
- "smin z19.s, p2/M, z19.s, z6.s\n"
"smax z23.s, p2/M, z23.s, z5.s\n"
"smax z20.s, p2/M, z20.s, z5.s\n"
"smax z21.s, p2/M, z21.s, z5.s\n"
- "uzp1 z23.h, z23.h, z20.h\n"
"smax z22.s, p2/M, z22.s, z5.s\n"
- "smax z16.s, p2/M, z16.s, z5.s\n"
+ "smin z16.s, p2/M, z16.s, z6.s\n"
+ "uzp1 z23.h, z23.h, z20.h\n"
+ ".inst 0x44828811 // srshl z17.s, p2/M, z17.s, z0.s\n"
"uzp1 z20.h, z21.h, z22.h\n"
+ "smax z16.s, p2/M, z16.s, z5.s\n"
"uzp1 z23.b, z23.b, z20.b\n"
+ "st1b { z23.b }, p1, [x26]\n"
+ "add z17.s, z17.s, z4.s\n"
+ "addvl x26, x26, #1\n"
+ ".inst 0x44828812 // srshl z18.s, p2/M, z18.s, z0.s\n"
+ ".inst 0x44828813 // srshl z19.s, p2/M, z19.s, z0.s\n"
+ "smin z17.s, p2/M, z17.s, z6.s\n"
+ "add z18.s, z18.s, z4.s\n"
+ "add z19.s, z19.s, z4.s\n"
"smax z17.s, p2/M, z17.s, z5.s\n"
- "smax z18.s, p2/M, z18.s, z5.s\n"
+ "smin z18.s, p2/M, z18.s, z6.s\n"
+ "smin z19.s, p2/M, z19.s, z6.s\n"
"uzp1 z16.h, z16.h, z17.h\n"
- "st1b { z23.b }, p1, [x27]\n"
+ "smax z18.s, p2/M, z18.s, z5.s\n"
"smax z19.s, p2/M, z19.s, z5.s\n"
"uzp1 z17.h, z18.h, z19.h\n"
"uzp1 z16.b, z16.b, z17.b\n"
- "st1b { z16.b }, p1, [x22]\n"
- "addvl x27, x27, #1\n"
+ "st1b { z16.b }, p1, [x21]\n"
"28:" // Height 2: Writeback done
"decw x9, ALL, MUL #4\n"
"cmp x9, XZR\n"
"bgt 16b\n"
"b 58f\n"
"29:" // Height 3
- "mov x10, %x[col_bias]\n"
"mov z11.s, #0x0\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x27, %x[col_bias]\n"
"mov z12.s, #0x0\n"
+ "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"bic %x[flags], %x[flags], #0x80000000\n"
"mov z13.s, #0x0\n"
+ "mov x26, %x[output_ptr]\n"
"mov z15.b, #0x1\n"
- "ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x27, %x[output_ptr]\n"
"30:" // Height 3: Column loop
- "mov x20, #0x0\n"
- "whilelt p1.b, x20, x9\n"
"mov z16.s, #0x0\n"
+ "mov x19, #0x0\n"
"mov z17.s, #0x0\n"
+ "whilelt p1.b, x19, x9\n"
"mov z18.s, #0x0\n"
"mov z19.s, #0x0\n"
"mov z20.s, #0x0\n"
@@ -603,74 +603,74 @@ void sve_hybrid_s8qa_mmla_4x4VL (
"mov z30.s, #0x0\n"
"mov z31.s, #0x0\n"
"31:" // Height 3: setup done
- "mov x26, #0x0\n"
+ "mov x25, #0x0\n"
"32:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w24, [x20, x25, LSL #0x2]\n"
"tbz %x[flags], #3, 33f\n"
- "ldr x21, [%x[input_ptr], x26, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x24, [x21, #0x0]\n"
- "ldr x23, [x21, #0x8]\n"
- "ldr x22, [x21, #0x10]\n"
- "cbnz x26, 34f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x24, x24, x20\n"
- "add x23, x23, x20\n"
- "add x22, x22, x20\n"
+ "ldr x20, [%x[input_ptr], x25, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x23, [x20, #0x0]\n"
+ "ldr x22, [x20, #0x8]\n"
+ "ldr x21, [x20, #0x10]\n"
+ "cbnz x25, 34f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x23, x23, x19\n"
+ "add x22, x22, x19\n"
+ "add x21, x21, x19\n"
"b 34f\n"
"33:" // Height 3: setup direct input
- "mov x24, %x[input_ptr]\n"
- "add x23, x24, x20\n"
- "add x22, x23, x20\n"
+ "mov x23, %x[input_ptr]\n"
+ "add x22, x23, x19\n"
+ "add x21, x22, x19\n"
"34:" // Height 3: input setup done
- "cmp x25, #0x10\n"
+ "cmp x24, #0x10\n"
"ble 37f\n"
"35:" // Height 3: Multiply loop: Main loop head
- "whilelt p0.b, XZR, x25\n"
- "ld1rqb { z1.b }, p0/Z, [x24]\n"
- "ld1rqb { z2.b }, p0/Z, [x23]\n"
- "ld1rqb { z3.b }, p0/Z, [x22]\n"
- "trn1 z0.d, z1.d, z2.d\n"
- "trn2 z1.d, z1.d, z2.d\n"
"ld1b { z5.b }, p2/Z, [x28]\n"
- "trn1 z2.d, z3.d, z4.d\n"
- "trn2 z3.d, z3.d, z4.d\n"
- ".inst 0x45059810 // smmla z16.s, z0.b, z5.b\n"
+ "whilelt p0.b, XZR, x24\n"
"ld1b { z6.b }, p2/Z, [x28, #1, MUL VL]\n"
- ".inst 0x45059858 // smmla z24.s, z2.b, z5.b\n"
+ "ld1rqb { z1.b }, p0/Z, [x23]\n"
+ "add x23, x23, #0x10\n"
+ "ld1rqb { z2.b }, p0/Z, [x22]\n"
+ "trn1 z0.d, z1.d, z2.d\n"
+ "ld1rqb { z3.b }, p0/Z, [x21]\n"
+ "add x22, x22, #0x10\n"
+ "trn2 z1.d, z1.d, z2.d\n"
"ld1b { z7.b }, p2/Z, [x28, #2, MUL VL]\n"
+ "add x21, x21, #0x10\n"
+ ".inst 0x45059810 // smmla z16.s, z0.b, z5.b\n"
"ld1b { z8.b }, p2/Z, [x28, #3, MUL VL]\n"
".inst 0x45069814 // smmla z20.s, z0.b, z6.b\n"
"ld1b { z9.b }, p2/Z, [x28, #4, MUL VL]\n"
+ "trn1 z2.d, z3.d, z4.d\n"
"ld1b { z10.b }, p2/Z, [x28, #5, MUL VL]\n"
- ".inst 0x4506985c // smmla z28.s, z2.b, z6.b\n"
- ".inst 0x45079811 // smmla z17.s, z0.b, z7.b\n"
+ "trn2 z3.d, z3.d, z4.d\n"
"ld1b { z4.b }, p2/Z, [x28, #6, MUL VL]\n"
+ ".inst 0x45059858 // smmla z24.s, z2.b, z5.b\n"
"ld1b { z5.b }, p2/Z, [x28, #7, MUL VL]\n"
"addvl x28, x28, #16\n"
- ".inst 0x45079859 // smmla z25.s, z2.b, z7.b\n"
+ ".inst 0x4506985c // smmla z28.s, z2.b, z6.b\n"
"ld1b { z6.b }, p2/Z, [x28, #-8, MUL VL]\n"
+ ".inst 0x45079811 // smmla z17.s, z0.b, z7.b\n"
+ ".inst 0x45079859 // smmla z25.s, z2.b, z7.b\n"
+ "ld1b { z7.b }, p2/Z, [x28, #-7, MUL VL]\n"
".inst 0x45089815 // smmla z21.s, z0.b, z8.b\n"
".inst 0x4508985d // smmla z29.s, z2.b, z8.b\n"
- "ld1b { z7.b }, p2/Z, [x28, #-7, MUL VL]\n"
+ "ld1b { z8.b }, p2/Z, [x28, #-6, MUL VL]\n"
".inst 0x45099812 // smmla z18.s, z0.b, z9.b\n"
".inst 0x4509985a // smmla z26.s, z2.b, z9.b\n"
- "ld1b { z8.b }, p2/Z, [x28, #-6, MUL VL]\n"
"ld1b { z9.b }, p2/Z, [x28, #-5, MUL VL]\n"
".inst 0x450a9816 // smmla z22.s, z0.b, z10.b\n"
".inst 0x450a985e // smmla z30.s, z2.b, z10.b\n"
"ld1b { z10.b }, p2/Z, [x28, #-4, MUL VL]\n"
- "add x24, x24, #0x10\n"
".inst 0x45049813 // smmla z19.s, z0.b, z4.b\n"
".inst 0x4504985b // smmla z27.s, z2.b, z4.b\n"
"ld1b { z4.b }, p2/Z, [x28, #-3, MUL VL]\n"
- "add x23, x23, #0x10\n"
".inst 0x45059817 // smmla z23.s, z0.b, z5.b\n"
".inst 0x4505985f // smmla z31.s, z2.b, z5.b\n"
"ld1b { z5.b }, p2/Z, [x28, #-2, MUL VL]\n"
- "add x22, x22, #0x10\n"
".inst 0x45069830 // smmla z16.s, z1.b, z6.b\n"
".inst 0x45069878 // smmla z24.s, z3.b, z6.b\n"
"ld1b { z6.b }, p2/Z, [x28, #-1, MUL VL]\n"
@@ -694,36 +694,36 @@ void sve_hybrid_s8qa_mmla_4x4VL (
"sdot z11.s, z1.b, z15.b\n"
"sdot z13.s, z3.b, z15.b\n"
"36:" // Height 3: Multiply loop: unique 5: skip row sum
- "sub x25, x25, #0x10\n"
- "cmp x25, #0x10\n"
+ "sub x24, x24, #0x10\n"
+ "cmp x24, #0x10\n"
"bgt 35b\n"
"37:" // Height 3: Multiply loop: Single iteration only
- "whilelt p0.b, XZR, x25\n"
- "ld1rqb { z1.b }, p0/Z, [x24]\n"
- "ld1rqb { z2.b }, p0/Z, [x23]\n"
- "ld1rqb { z3.b }, p0/Z, [x22]\n"
- "trn1 z0.d, z1.d, z2.d\n"
- "trn2 z1.d, z1.d, z2.d\n"
"ld1b { z5.b }, p2/Z, [x28]\n"
- "trn1 z2.d, z3.d, z4.d\n"
- "trn2 z3.d, z3.d, z4.d\n"
- ".inst 0x45059810 // smmla z16.s, z0.b, z5.b\n"
+ "whilelt p0.b, XZR, x24\n"
"ld1b { z6.b }, p2/Z, [x28, #1, MUL VL]\n"
- ".inst 0x45059858 // smmla z24.s, z2.b, z5.b\n"
+ "ld1rqb { z1.b }, p0/Z, [x23]\n"
+ "subs x24, x24, #0x8\n"
+ "ld1rqb { z2.b }, p0/Z, [x22]\n"
+ "trn1 z0.d, z1.d, z2.d\n"
+ "ld1rqb { z3.b }, p0/Z, [x21]\n"
+ "trn2 z1.d, z1.d, z2.d\n"
"ld1b { z7.b }, p2/Z, [x28, #2, MUL VL]\n"
"ld1b { z8.b }, p2/Z, [x28, #3, MUL VL]\n"
- "subs x25, x25, #0x8\n"
+ ".inst 0x45059810 // smmla z16.s, z0.b, z5.b\n"
+ ".inst 0x45069814 // smmla z20.s, z0.b, z6.b\n"
"ld1b { z9.b }, p2/Z, [x28, #4, MUL VL]\n"
+ "trn1 z2.d, z3.d, z4.d\n"
"ld1b { z10.b }, p2/Z, [x28, #5, MUL VL]\n"
- ".inst 0x45069814 // smmla z20.s, z0.b, z6.b\n"
- ".inst 0x4506985c // smmla z28.s, z2.b, z6.b\n"
+ "trn2 z3.d, z3.d, z4.d\n"
"ld1b { z4.b }, p2/Z, [x28, #6, MUL VL]\n"
+ ".inst 0x45059858 // smmla z24.s, z2.b, z5.b\n"
"ld1b { z5.b }, p2/Z, [x28, #7, MUL VL]\n"
+ "addvl x28, x28, #8\n"
+ ".inst 0x4506985c // smmla z28.s, z2.b, z6.b\n"
".inst 0x45079811 // smmla z17.s, z0.b, z7.b\n"
".inst 0x45079859 // smmla z25.s, z2.b, z7.b\n"
".inst 0x45089815 // smmla z21.s, z0.b, z8.b\n"
".inst 0x4508985d // smmla z29.s, z2.b, z8.b\n"
- "addvl x28, x28, #8\n"
".inst 0x45099812 // smmla z18.s, z0.b, z9.b\n"
".inst 0x4509985a // smmla z26.s, z2.b, z9.b\n"
".inst 0x450a9816 // smmla z22.s, z0.b, z10.b\n"
@@ -735,23 +735,23 @@ void sve_hybrid_s8qa_mmla_4x4VL (
"ble 38f\n"
"ld1b { z6.b }, p2/Z, [x28]\n"
".inst 0x45069830 // smmla z16.s, z1.b, z6.b\n"
- ".inst 0x45069878 // smmla z24.s, z3.b, z6.b\n"
"ld1b { z7.b }, p2/Z, [x28, #1, MUL VL]\n"
+ ".inst 0x45069878 // smmla z24.s, z3.b, z6.b\n"
"ld1b { z8.b }, p2/Z, [x28, #2, MUL VL]\n"
"ld1b { z9.b }, p2/Z, [x28, #3, MUL VL]\n"
".inst 0x45079834 // smmla z20.s, z1.b, z7.b\n"
- ".inst 0x4507987c // smmla z28.s, z3.b, z7.b\n"
"ld1b { z10.b }, p2/Z, [x28, #4, MUL VL]\n"
+ ".inst 0x4507987c // smmla z28.s, z3.b, z7.b\n"
"ld1b { z4.b }, p2/Z, [x28, #5, MUL VL]\n"
".inst 0x45089831 // smmla z17.s, z1.b, z8.b\n"
- ".inst 0x45089879 // smmla z25.s, z3.b, z8.b\n"
"ld1b { z5.b }, p2/Z, [x28, #6, MUL VL]\n"
+ ".inst 0x45089879 // smmla z25.s, z3.b, z8.b\n"
"ld1b { z6.b }, p2/Z, [x28, #7, MUL VL]\n"
+ "addvl x28, x28, #8\n"
".inst 0x45099835 // smmla z21.s, z1.b, z9.b\n"
".inst 0x4509987d // smmla z29.s, z3.b, z9.b\n"
".inst 0x450a9832 // smmla z18.s, z1.b, z10.b\n"
".inst 0x450a987a // smmla z26.s, z3.b, z10.b\n"
- "addvl x28, x28, #8\n"
".inst 0x45049836 // smmla z22.s, z1.b, z4.b\n"
".inst 0x4504987e // smmla z30.s, z3.b, z4.b\n"
".inst 0x45059833 // smmla z19.s, z1.b, z5.b\n"
@@ -765,17 +765,17 @@ void sve_hybrid_s8qa_mmla_4x4VL (
"sdot z11.s, z1.b, z15.b\n"
"sdot z13.s, z3.b, z15.b\n"
"39:" // Height 3: Multiply loop: unique 6: skip row sum
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x26, x26, #0x1\n"
- "cmp x26, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x25, x25, #0x1\n"
+ "cmp x25, x19\n"
"bne 32b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp1 z7.d, z16.d, z20.d\n"
- "add x22, x27, x20\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp2 z16.d, z16.d, z20.d\n"
+ "add x21, x26, x19\n"
"uzp1 z20.d, z17.d, z21.d\n"
"uzp2 z17.d, z17.d, z21.d\n"
- "add x21, x22, x20\n"
+ "add x20, x21, x19\n"
"uzp1 z21.d, z18.d, z22.d\n"
"uzp2 z18.d, z18.d, z22.d\n"
"uzp1 z22.d, z19.d, z23.d\n"
@@ -786,37 +786,37 @@ void sve_hybrid_s8qa_mmla_4x4VL (
"uzp1 z27.d, z27.d, z31.d\n"
"mov z31.d, z7.d\n"
"tbnz %x[flags], #31, 40f\n"
- "add x23, %x[qp], %[b_offset]\n"
- "ld1rw { z3.s }, p2/Z, [x23]\n"
".inst 0x4491a96b // addp z11.s, p2/m, z11.s, z11.s\n"
+ "add x22, %x[qp], %[b_offset]\n"
+ "ld1rw { z3.s }, p2/Z, [x22]\n"
".inst 0x4491a9ad // addp z13.s, p2/m, z13.s, z13.s\n"
- "neg z3.s, p2/M, z3.s\n"
"mov z12.s, z11.s[3]\n"
"mov z11.s, z11.s[0]\n"
- "mul z11.s, p2/M, z11.s, z3.s\n"
+ "neg z3.s, p2/M, z3.s\n"
"mov z13.s, z13.s[0]\n"
+ "mul z11.s, p2/M, z11.s, z3.s\n"
"mul z12.s, p2/M, z12.s, z3.s\n"
"mul z13.s, p2/M, z13.s, z3.s\n"
"40:" // Height 3: skip row sum fixup
"add z31.s, z31.s, z11.s\n"
+ "ld1w { z0.s }, p2/Z, [x27]\n"
+ "orr %x[flags], %x[flags], #0x80000000\n"
"add z20.s, z20.s, z11.s\n"
- "ld1w { z0.s }, p2/Z, [x10]\n"
- "ld1w { z1.s }, p2/Z, [x10, #1, MUL VL]\n"
+ "ld1w { z1.s }, p2/Z, [x27, #1, MUL VL]\n"
+ "add x23, %x[qp], %[per_layer_right_shift]\n"
"add z21.s, z21.s, z11.s\n"
+ "ld1w { z2.s }, p2/Z, [x27, #2, MUL VL]\n"
+ "add x22, %x[qp], %[per_layer_mul]\n"
"add z22.s, z22.s, z11.s\n"
- "ld1w { z2.s }, p2/Z, [x10, #2, MUL VL]\n"
- "ld1w { z3.s }, p2/Z, [x10, #3, MUL VL]\n"
+ "ld1w { z3.s }, p2/Z, [x27, #3, MUL VL]\n"
+ "addvl x27, x27, #4\n"
"add z16.s, z16.s, z12.s\n"
+ "ld1rw { z4.s }, p2/Z, [x22]\n"
"add z17.s, z17.s, z12.s\n"
- "add x23, %x[qp], %[per_layer_mul]\n"
- "orr %x[flags], %x[flags], #0x80000000\n"
"add z18.s, z18.s, z12.s\n"
"add z19.s, z19.s, z12.s\n"
- "ld1rw { z4.s }, p2/Z, [x23]\n"
- "add x23, %x[qp], %[per_layer_right_shift]\n"
"add z24.s, z24.s, z13.s\n"
"add z25.s, z25.s, z13.s\n"
- "addvl x10, x10, #4\n"
"add z26.s, z26.s, z13.s\n"
"add z27.s, z27.s, z13.s\n"
"add z31.s, z31.s, z0.s\n"
@@ -828,8 +828,8 @@ void sve_hybrid_s8qa_mmla_4x4VL (
"add z18.s, z18.s, z2.s\n"
"add z19.s, z19.s, z3.s\n"
"add z24.s, z24.s, z0.s\n"
- "add z25.s, z25.s, z1.s\n"
"ld1rw { z0.s }, p2/Z, [x23]\n"
+ "add z25.s, z25.s, z1.s\n"
"add z26.s, z26.s, z2.s\n"
"add z27.s, z27.s, z3.s\n"
".inst 0x04a477ff // sqrdmulh z31.s, z31.s, z4.s\n"
@@ -848,131 +848,131 @@ void sve_hybrid_s8qa_mmla_4x4VL (
"and z4.d, z31.d, z0.d\n"
"and z5.d, z20.d, z0.d\n"
"and z6.d, z21.d, z0.d\n"
- "and z7.d, z22.d, z0.d\n"
- "and z8.d, z16.d, z0.d\n"
"asr z4.s, z4.s, #0x1f\n"
"asr z5.s, z5.s, #0x1f\n"
"asr z6.s, z6.s, #0x1f\n"
- "asr z7.s, z7.s, #0x1f\n"
- "asr z8.s, z8.s, #0x1f\n"
"sqadd z31.s, z31.s, z4.s\n"
"sqadd z20.s, z20.s, z5.s\n"
"sqadd z21.s, z21.s, z6.s\n"
+ "and z7.d, z22.d, z0.d\n"
+ "and z8.d, z16.d, z0.d\n"
+ "and z9.d, z17.d, z0.d\n"
+ "asr z7.s, z7.s, #0x1f\n"
+ "asr z8.s, z8.s, #0x1f\n"
+ "asr z9.s, z9.s, #0x1f\n"
"sqadd z22.s, z22.s, z7.s\n"
"sqadd z16.s, z16.s, z8.s\n"
- "and z9.d, z17.d, z0.d\n"
+ "sqadd z17.s, z17.s, z9.s\n"
"and z10.d, z18.d, z0.d\n"
"and z4.d, z19.d, z0.d\n"
"and z5.d, z24.d, z0.d\n"
- "and z6.d, z25.d, z0.d\n"
- "and z7.d, z26.d, z0.d\n"
- "and z8.d, z27.d, z0.d\n"
- "asr z9.s, z9.s, #0x1f\n"
"asr z10.s, z10.s, #0x1f\n"
"asr z4.s, z4.s, #0x1f\n"
"asr z5.s, z5.s, #0x1f\n"
- "asr z6.s, z6.s, #0x1f\n"
- "asr z7.s, z7.s, #0x1f\n"
- "asr z8.s, z8.s, #0x1f\n"
- "sqadd z17.s, z17.s, z9.s\n"
"sqadd z18.s, z18.s, z10.s\n"
"sqadd z19.s, z19.s, z4.s\n"
"sqadd z24.s, z24.s, z5.s\n"
+ "and z6.d, z25.d, z0.d\n"
+ "and z7.d, z26.d, z0.d\n"
+ "and z8.d, z27.d, z0.d\n"
+ "asr z6.s, z6.s, #0x1f\n"
+ "asr z7.s, z7.s, #0x1f\n"
+ "asr z8.s, z8.s, #0x1f\n"
"sqadd z25.s, z25.s, z6.s\n"
"sqadd z26.s, z26.s, z7.s\n"
"sqadd z27.s, z27.s, z8.s\n"
"41:" // Height 3: no shift correction
- "add x23, %x[qp], %[c_offset]\n"
- "ld1rw { z4.s }, p2/Z, [x23]\n"
".inst 0x4482881f // srshl z31.s, p2/M, z31.s, z0.s\n"
- "add z31.s, z31.s, z4.s\n"
+ "add x22, %x[qp], %[c_offset]\n"
+ "ld1rw { z4.s }, p2/Z, [x22]\n"
".inst 0x44828814 // srshl z20.s, p2/M, z20.s, z0.s\n"
+ "add x22, %x[qp], %[minval]\n"
".inst 0x44828815 // srshl z21.s, p2/M, z21.s, z0.s\n"
- "add z20.s, z20.s, z4.s\n"
- "add z21.s, z21.s, z4.s\n"
+ "ld1rw { z5.s }, p2/Z, [x22]\n"
+ "add x22, %x[qp], %[maxval]\n"
".inst 0x44828816 // srshl z22.s, p2/M, z22.s, z0.s\n"
+ "ld1rw { z6.s }, p2/Z, [x22]\n"
".inst 0x44828810 // srshl z16.s, p2/M, z16.s, z0.s\n"
+ "add z31.s, z31.s, z4.s\n"
+ "add z20.s, z20.s, z4.s\n"
+ "add z21.s, z21.s, z4.s\n"
"add z22.s, z22.s, z4.s\n"
"add z16.s, z16.s, z4.s\n"
- ".inst 0x44828811 // srshl z17.s, p2/M, z17.s, z0.s\n"
- ".inst 0x44828812 // srshl z18.s, p2/M, z18.s, z0.s\n"
- "add z17.s, z17.s, z4.s\n"
- "add z18.s, z18.s, z4.s\n"
- ".inst 0x44828813 // srshl z19.s, p2/M, z19.s, z0.s\n"
- ".inst 0x44828818 // srshl z24.s, p2/M, z24.s, z0.s\n"
- "add z19.s, z19.s, z4.s\n"
- "add z24.s, z24.s, z4.s\n"
- ".inst 0x44828819 // srshl z25.s, p2/M, z25.s, z0.s\n"
- ".inst 0x4482881a // srshl z26.s, p2/M, z26.s, z0.s\n"
- "add z25.s, z25.s, z4.s\n"
- "add z26.s, z26.s, z4.s\n"
- ".inst 0x4482881b // srshl z27.s, p2/M, z27.s, z0.s\n"
- "add x23, %x[qp], %[maxval]\n"
- "ld1rw { z6.s }, p2/Z, [x23]\n"
- "add z27.s, z27.s, z4.s\n"
- "add x23, %x[qp], %[minval]\n"
- "ld1rw { z5.s }, p2/Z, [x23]\n"
"smin z31.s, p2/M, z31.s, z6.s\n"
"smin z20.s, p2/M, z20.s, z6.s\n"
"smin z21.s, p2/M, z21.s, z6.s\n"
"smin z22.s, p2/M, z22.s, z6.s\n"
- "smin z16.s, p2/M, z16.s, z6.s\n"
- "smin z17.s, p2/M, z17.s, z6.s\n"
- "smin z18.s, p2/M, z18.s, z6.s\n"
- "smin z19.s, p2/M, z19.s, z6.s\n"
- "smin z24.s, p2/M, z24.s, z6.s\n"
- "smin z25.s, p2/M, z25.s, z6.s\n"
- "smin z26.s, p2/M, z26.s, z6.s\n"
- "smin z27.s, p2/M, z27.s, z6.s\n"
"smax z31.s, p2/M, z31.s, z5.s\n"
"smax z20.s, p2/M, z20.s, z5.s\n"
"smax z21.s, p2/M, z21.s, z5.s\n"
- "uzp1 z31.h, z31.h, z20.h\n"
"smax z22.s, p2/M, z22.s, z5.s\n"
- "smax z16.s, p2/M, z16.s, z5.s\n"
+ "smin z16.s, p2/M, z16.s, z6.s\n"
+ "uzp1 z31.h, z31.h, z20.h\n"
+ ".inst 0x44828811 // srshl z17.s, p2/M, z17.s, z0.s\n"
"uzp1 z20.h, z21.h, z22.h\n"
+ "smax z16.s, p2/M, z16.s, z5.s\n"
"uzp1 z31.b, z31.b, z20.b\n"
+ "st1b { z31.b }, p1, [x26]\n"
+ "add z17.s, z17.s, z4.s\n"
+ "addvl x26, x26, #1\n"
+ ".inst 0x44828812 // srshl z18.s, p2/M, z18.s, z0.s\n"
+ ".inst 0x44828813 // srshl z19.s, p2/M, z19.s, z0.s\n"
+ ".inst 0x44828818 // srshl z24.s, p2/M, z24.s, z0.s\n"
+ "smin z17.s, p2/M, z17.s, z6.s\n"
+ ".inst 0x44828819 // srshl z25.s, p2/M, z25.s, z0.s\n"
+ "add z18.s, z18.s, z4.s\n"
+ "add z19.s, z19.s, z4.s\n"
+ "add z24.s, z24.s, z4.s\n"
+ "add z25.s, z25.s, z4.s\n"
"smax z17.s, p2/M, z17.s, z5.s\n"
- "smax z18.s, p2/M, z18.s, z5.s\n"
+ "smin z18.s, p2/M, z18.s, z6.s\n"
+ "smin z19.s, p2/M, z19.s, z6.s\n"
+ "smin z24.s, p2/M, z24.s, z6.s\n"
"uzp1 z16.h, z16.h, z17.h\n"
- "st1b { z31.b }, p1, [x27]\n"
+ "smax z18.s, p2/M, z18.s, z5.s\n"
"smax z19.s, p2/M, z19.s, z5.s\n"
"smax z24.s, p2/M, z24.s, z5.s\n"
+ "smin z25.s, p2/M, z25.s, z6.s\n"
+ ".inst 0x4482881a // srshl z26.s, p2/M, z26.s, z0.s\n"
"uzp1 z17.h, z18.h, z19.h\n"
+ ".inst 0x4482881b // srshl z27.s, p2/M, z27.s, z0.s\n"
"uzp1 z16.b, z16.b, z17.b\n"
+ "st1b { z16.b }, p1, [x21]\n"
+ "add z26.s, z26.s, z4.s\n"
"smax z25.s, p2/M, z25.s, z5.s\n"
- "smax z26.s, p2/M, z26.s, z5.s\n"
+ "add z27.s, z27.s, z4.s\n"
+ "smin z26.s, p2/M, z26.s, z6.s\n"
"uzp1 z24.h, z24.h, z25.h\n"
- "st1b { z16.b }, p1, [x22]\n"
+ "smin z27.s, p2/M, z27.s, z6.s\n"
+ "smax z26.s, p2/M, z26.s, z5.s\n"
"smax z27.s, p2/M, z27.s, z5.s\n"
"uzp1 z25.h, z26.h, z27.h\n"
"uzp1 z24.b, z24.b, z25.b\n"
- "st1b { z24.b }, p1, [x21]\n"
- "addvl x27, x27, #1\n"
+ "st1b { z24.b }, p1, [x20]\n"
"42:" // Height 3: Writeback done
"decw x9, ALL, MUL #4\n"
"cmp x9, XZR\n"
"bgt 30b\n"
"b 58f\n"
"43:" // Height 4
- "ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "mov x20, #0x4\n"
- "mov x10, %x[col_bias]\n"
"mov z11.s, #0x0\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x27, %x[col_bias]\n"
"mov z12.s, #0x0\n"
- "mov z13.s, #0x0\n"
+ "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"bic %x[flags], %x[flags], #0x80000000\n"
- "ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov z13.s, #0x0\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "mov x26, %x[output_ptr]\n"
"mov z14.s, #0x0\n"
+ "mov x19, #0x4\n"
"mov z15.b, #0x1\n"
- "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x27, %x[output_ptr]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
+ "madd %x[output_ptr], x20, x19, %x[output_ptr]\n"
"44:" // Height 4: Column loop
- "mov x20, #0x0\n"
- "whilelt p1.b, x20, x9\n"
"mov z16.s, #0x0\n"
+ "mov x19, #0x0\n"
"mov z17.s, #0x0\n"
+ "whilelt p1.b, x19, x9\n"
"mov z18.s, #0x0\n"
"mov z19.s, #0x0\n"
"mov z20.s, #0x0\n"
@@ -988,85 +988,85 @@ void sve_hybrid_s8qa_mmla_4x4VL (
"mov z30.s, #0x0\n"
"mov z31.s, #0x0\n"
"45:" // Height 4: setup done
- "mov x26, #0x0\n"
+ "mov x25, #0x0\n"
"46:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w24, [x20, x25, LSL #0x2]\n"
"tbz %x[flags], #3, 47f\n"
- "ldr x21, [%x[input_ptr], x26, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x24, [x21, #0x0]\n"
- "ldr x23, [x21, #0x8]\n"
- "ldr x22, [x21, #0x10]\n"
- "ldr x21, [x21, #0x18]\n"
- "cbnz x26, 48f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x24, x24, x20\n"
- "add x23, x23, x20\n"
- "add x22, x22, x20\n"
- "add x21, x21, x20\n"
+ "ldr x20, [%x[input_ptr], x25, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x23, [x20, #0x0]\n"
+ "ldr x22, [x20, #0x8]\n"
+ "ldr x21, [x20, #0x10]\n"
+ "ldr x20, [x20, #0x18]\n"
+ "cbnz x25, 48f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x23, x23, x19\n"
+ "add x22, x22, x19\n"
+ "add x21, x21, x19\n"
+ "add x20, x20, x19\n"
"b 48f\n"
"47:" // Height 4: setup direct input
- "mov x24, %x[input_ptr]\n"
- "add x23, x24, x20\n"
- "add x22, x23, x20\n"
- "add x21, x22, x20\n"
+ "mov x23, %x[input_ptr]\n"
+ "add x22, x23, x19\n"
+ "add x21, x22, x19\n"
+ "add x20, x21, x19\n"
"48:" // Height 4: input setup done
- "cmp x25, #0x10\n"
+ "cmp x24, #0x10\n"
"ble 51f\n"
"49:" // Height 4: Multiply loop: Main loop head
- "whilelt p0.b, XZR, x25\n"
- "ld1rqb { z1.b }, p0/Z, [x24]\n"
- "ld1rqb { z2.b }, p0/Z, [x23]\n"
+ "ld1b { z5.b }, p2/Z, [x28]\n"
+ "whilelt p0.b, XZR, x24\n"
+ "ld1b { z6.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "ld1rqb { z1.b }, p0/Z, [x23]\n"
+ "add x23, x23, #0x10\n"
+ "ld1rqb { z2.b }, p0/Z, [x22]\n"
"trn1 z0.d, z1.d, z2.d\n"
- "ld1rqb { z3.b }, p0/Z, [x22]\n"
- "ld1rqb { z4.b }, p0/Z, [x21]\n"
+ "ld1rqb { z3.b }, p0/Z, [x21]\n"
+ "add x22, x22, #0x10\n"
"trn2 z1.d, z1.d, z2.d\n"
- "trn1 z2.d, z3.d, z4.d\n"
- "ld1b { z5.b }, p2/Z, [x28]\n"
- "trn2 z3.d, z3.d, z4.d\n"
+ "ld1rqb { z4.b }, p0/Z, [x20]\n"
+ "add x21, x21, #0x10\n"
".inst 0x45059810 // smmla z16.s, z0.b, z5.b\n"
- ".inst 0x45059858 // smmla z24.s, z2.b, z5.b\n"
- "ld1b { z6.b }, p2/Z, [x28, #1, MUL VL]\n"
"ld1b { z7.b }, p2/Z, [x28, #2, MUL VL]\n"
+ "add x20, x20, #0x10\n"
".inst 0x45069814 // smmla z20.s, z0.b, z6.b\n"
- ".inst 0x4506985c // smmla z28.s, z2.b, z6.b\n"
"ld1b { z8.b }, p2/Z, [x28, #3, MUL VL]\n"
"ld1b { z9.b }, p2/Z, [x28, #4, MUL VL]\n"
- ".inst 0x45079811 // smmla z17.s, z0.b, z7.b\n"
- ".inst 0x45079859 // smmla z25.s, z2.b, z7.b\n"
+ "trn1 z2.d, z3.d, z4.d\n"
+ "trn2 z3.d, z3.d, z4.d\n"
"ld1b { z10.b }, p2/Z, [x28, #5, MUL VL]\n"
+ ".inst 0x45079811 // smmla z17.s, z0.b, z7.b\n"
"ld1b { z4.b }, p2/Z, [x28, #6, MUL VL]\n"
- ".inst 0x45089815 // smmla z21.s, z0.b, z8.b\n"
- ".inst 0x4508985d // smmla z29.s, z2.b, z8.b\n"
+ ".inst 0x45059858 // smmla z24.s, z2.b, z5.b\n"
"ld1b { z5.b }, p2/Z, [x28, #7, MUL VL]\n"
"addvl x28, x28, #16\n"
+ ".inst 0x4506985c // smmla z28.s, z2.b, z6.b\n"
"ld1b { z6.b }, p2/Z, [x28, #-8, MUL VL]\n"
+ ".inst 0x45079859 // smmla z25.s, z2.b, z7.b\n"
+ "ld1b { z7.b }, p2/Z, [x28, #-7, MUL VL]\n"
+ ".inst 0x45089815 // smmla z21.s, z0.b, z8.b\n"
+ ".inst 0x4508985d // smmla z29.s, z2.b, z8.b\n"
+ "ld1b { z8.b }, p2/Z, [x28, #-6, MUL VL]\n"
".inst 0x45099812 // smmla z18.s, z0.b, z9.b\n"
".inst 0x4509985a // smmla z26.s, z2.b, z9.b\n"
+ "ld1b { z9.b }, p2/Z, [x28, #-5, MUL VL]\n"
".inst 0x450a9816 // smmla z22.s, z0.b, z10.b\n"
- "ld1b { z7.b }, p2/Z, [x28, #-7, MUL VL]\n"
- "ld1b { z8.b }, p2/Z, [x28, #-6, MUL VL]\n"
".inst 0x450a985e // smmla z30.s, z2.b, z10.b\n"
- ".inst 0x45049813 // smmla z19.s, z0.b, z4.b\n"
- "ld1b { z9.b }, p2/Z, [x28, #-5, MUL VL]\n"
"ld1b { z10.b }, p2/Z, [x28, #-4, MUL VL]\n"
+ ".inst 0x45049813 // smmla z19.s, z0.b, z4.b\n"
".inst 0x4504985b // smmla z27.s, z2.b, z4.b\n"
- ".inst 0x45059817 // smmla z23.s, z0.b, z5.b\n"
"ld1b { z4.b }, p2/Z, [x28, #-3, MUL VL]\n"
- "add x24, x24, #0x10\n"
+ ".inst 0x45059817 // smmla z23.s, z0.b, z5.b\n"
".inst 0x4505985f // smmla z31.s, z2.b, z5.b\n"
- ".inst 0x45069830 // smmla z16.s, z1.b, z6.b\n"
"ld1b { z5.b }, p2/Z, [x28, #-2, MUL VL]\n"
- "add x23, x23, #0x10\n"
+ ".inst 0x45069830 // smmla z16.s, z1.b, z6.b\n"
".inst 0x45069878 // smmla z24.s, z3.b, z6.b\n"
"ld1b { z6.b }, p2/Z, [x28, #-1, MUL VL]\n"
".inst 0x45079834 // smmla z20.s, z1.b, z7.b\n"
- "add x22, x22, #0x10\n"
".inst 0x4507987c // smmla z28.s, z3.b, z7.b\n"
".inst 0x45089831 // smmla z17.s, z1.b, z8.b\n"
- "add x21, x21, #0x10\n"
".inst 0x45089879 // smmla z25.s, z3.b, z8.b\n"
".inst 0x45099835 // smmla z21.s, z1.b, z9.b\n"
".inst 0x4509987d // smmla z29.s, z3.b, z9.b\n"
@@ -1084,38 +1084,38 @@ void sve_hybrid_s8qa_mmla_4x4VL (
"sdot z11.s, z1.b, z15.b\n"
"sdot z13.s, z3.b, z15.b\n"
"50:" // Height 4: Multiply loop: unique 7: skip row sum
- "sub x25, x25, #0x10\n"
- "cmp x25, #0x10\n"
+ "sub x24, x24, #0x10\n"
+ "cmp x24, #0x10\n"
"bgt 49b\n"
"51:" // Height 4: Multiply loop: Single iteration only
- "whilelt p0.b, XZR, x25\n"
- "ld1rqb { z1.b }, p0/Z, [x24]\n"
- "ld1rqb { z2.b }, p0/Z, [x23]\n"
- "trn1 z0.d, z1.d, z2.d\n"
- "ld1rqb { z3.b }, p0/Z, [x22]\n"
- "ld1rqb { z4.b }, p0/Z, [x21]\n"
- "trn2 z1.d, z1.d, z2.d\n"
- "trn1 z2.d, z3.d, z4.d\n"
"ld1b { z5.b }, p2/Z, [x28]\n"
- "trn2 z3.d, z3.d, z4.d\n"
- ".inst 0x45059810 // smmla z16.s, z0.b, z5.b\n"
- ".inst 0x45059858 // smmla z24.s, z2.b, z5.b\n"
+ "whilelt p0.b, XZR, x24\n"
"ld1b { z6.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "subs x24, x24, #0x8\n"
+ "ld1rqb { z1.b }, p0/Z, [x23]\n"
+ "ld1rqb { z2.b }, p0/Z, [x22]\n"
+ "trn1 z0.d, z1.d, z2.d\n"
+ "ld1rqb { z3.b }, p0/Z, [x21]\n"
+ "trn2 z1.d, z1.d, z2.d\n"
+ "ld1rqb { z4.b }, p0/Z, [x20]\n"
"ld1b { z7.b }, p2/Z, [x28, #2, MUL VL]\n"
- "subs x25, x25, #0x8\n"
+ ".inst 0x45059810 // smmla z16.s, z0.b, z5.b\n"
".inst 0x45069814 // smmla z20.s, z0.b, z6.b\n"
"ld1b { z8.b }, p2/Z, [x28, #3, MUL VL]\n"
"ld1b { z9.b }, p2/Z, [x28, #4, MUL VL]\n"
- ".inst 0x4506985c // smmla z28.s, z2.b, z6.b\n"
- ".inst 0x45079811 // smmla z17.s, z0.b, z7.b\n"
+ "trn1 z2.d, z3.d, z4.d\n"
+ "trn2 z3.d, z3.d, z4.d\n"
"ld1b { z10.b }, p2/Z, [x28, #5, MUL VL]\n"
+ ".inst 0x45079811 // smmla z17.s, z0.b, z7.b\n"
"ld1b { z4.b }, p2/Z, [x28, #6, MUL VL]\n"
+ ".inst 0x45059858 // smmla z24.s, z2.b, z5.b\n"
+ "ld1b { z5.b }, p2/Z, [x28, #7, MUL VL]\n"
+ "addvl x28, x28, #8\n"
+ ".inst 0x4506985c // smmla z28.s, z2.b, z6.b\n"
".inst 0x45079859 // smmla z25.s, z2.b, z7.b\n"
".inst 0x45089815 // smmla z21.s, z0.b, z8.b\n"
- "ld1b { z5.b }, p2/Z, [x28, #7, MUL VL]\n"
".inst 0x4508985d // smmla z29.s, z2.b, z8.b\n"
".inst 0x45099812 // smmla z18.s, z0.b, z9.b\n"
- "addvl x28, x28, #8\n"
".inst 0x4509985a // smmla z26.s, z2.b, z9.b\n"
".inst 0x450a9816 // smmla z22.s, z0.b, z10.b\n"
".inst 0x450a985e // smmla z30.s, z2.b, z10.b\n"
@@ -1126,23 +1126,23 @@ void sve_hybrid_s8qa_mmla_4x4VL (
"ble 52f\n"
"ld1b { z6.b }, p2/Z, [x28]\n"
".inst 0x45069830 // smmla z16.s, z1.b, z6.b\n"
- ".inst 0x45069878 // smmla z24.s, z3.b, z6.b\n"
"ld1b { z7.b }, p2/Z, [x28, #1, MUL VL]\n"
+ ".inst 0x45069878 // smmla z24.s, z3.b, z6.b\n"
"ld1b { z8.b }, p2/Z, [x28, #2, MUL VL]\n"
"ld1b { z9.b }, p2/Z, [x28, #3, MUL VL]\n"
".inst 0x45079834 // smmla z20.s, z1.b, z7.b\n"
- ".inst 0x4507987c // smmla z28.s, z3.b, z7.b\n"
"ld1b { z10.b }, p2/Z, [x28, #4, MUL VL]\n"
+ ".inst 0x4507987c // smmla z28.s, z3.b, z7.b\n"
"ld1b { z4.b }, p2/Z, [x28, #5, MUL VL]\n"
".inst 0x45089831 // smmla z17.s, z1.b, z8.b\n"
- ".inst 0x45089879 // smmla z25.s, z3.b, z8.b\n"
"ld1b { z5.b }, p2/Z, [x28, #6, MUL VL]\n"
+ ".inst 0x45089879 // smmla z25.s, z3.b, z8.b\n"
"ld1b { z6.b }, p2/Z, [x28, #7, MUL VL]\n"
+ "addvl x28, x28, #8\n"
".inst 0x45099835 // smmla z21.s, z1.b, z9.b\n"
".inst 0x4509987d // smmla z29.s, z3.b, z9.b\n"
".inst 0x450a9832 // smmla z18.s, z1.b, z10.b\n"
".inst 0x450a987a // smmla z26.s, z3.b, z10.b\n"
- "addvl x28, x28, #8\n"
".inst 0x45049836 // smmla z22.s, z1.b, z4.b\n"
".inst 0x4504987e // smmla z30.s, z3.b, z4.b\n"
".inst 0x45059833 // smmla z19.s, z1.b, z5.b\n"
@@ -1156,19 +1156,19 @@ void sve_hybrid_s8qa_mmla_4x4VL (
"sdot z11.s, z1.b, z15.b\n"
"sdot z13.s, z3.b, z15.b\n"
"53:" // Height 4: Multiply loop: unique 8: skip row sum
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x26, x26, #0x1\n"
- "cmp x26, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x25, x25, #0x1\n"
+ "cmp x25, x19\n"
"bne 46b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp1 z7.d, z16.d, z20.d\n"
- "add x22, x27, x20\n"
- "add x21, x22, x20\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp2 z16.d, z16.d, z20.d\n"
+ "add x21, x26, x19\n"
"uzp1 z20.d, z17.d, z21.d\n"
- "add x20, x21, x20\n"
"uzp2 z17.d, z17.d, z21.d\n"
+ "add x20, x21, x19\n"
"uzp1 z21.d, z18.d, z22.d\n"
+ "add x19, x20, x19\n"
"uzp2 z18.d, z18.d, z22.d\n"
"uzp1 z22.d, z19.d, z23.d\n"
"uzp2 z19.d, z19.d, z23.d\n"
@@ -1182,39 +1182,39 @@ void sve_hybrid_s8qa_mmla_4x4VL (
"uzp2 z27.d, z27.d, z31.d\n"
"mov z31.d, z7.d\n"
"tbnz %x[flags], #31, 54f\n"
- "add x23, %x[qp], %[b_offset]\n"
- "ld1rw { z4.s }, p2/Z, [x23]\n"
".inst 0x4491a96b // addp z11.s, p2/m, z11.s, z11.s\n"
+ "add x22, %x[qp], %[b_offset]\n"
+ "ld1rw { z4.s }, p2/Z, [x22]\n"
".inst 0x4491a9ad // addp z13.s, p2/m, z13.s, z13.s\n"
- "neg z4.s, p2/M, z4.s\n"
"mov z12.s, z11.s[3]\n"
"mov z11.s, z11.s[0]\n"
- "mul z11.s, p2/M, z11.s, z4.s\n"
+ "neg z4.s, p2/M, z4.s\n"
"mov z14.s, z13.s[3]\n"
"mov z13.s, z13.s[0]\n"
+ "mul z11.s, p2/M, z11.s, z4.s\n"
"mul z12.s, p2/M, z12.s, z4.s\n"
"mul z13.s, p2/M, z13.s, z4.s\n"
"mul z14.s, p2/M, z14.s, z4.s\n"
"54:" // Height 4: skip row sum fixup
"add z31.s, z31.s, z11.s\n"
+ "ld1w { z0.s }, p2/Z, [x27]\n"
+ "orr %x[flags], %x[flags], #0x80000000\n"
"add z20.s, z20.s, z11.s\n"
- "ld1w { z0.s }, p2/Z, [x10]\n"
- "ld1w { z1.s }, p2/Z, [x10, #1, MUL VL]\n"
+ "ld1w { z1.s }, p2/Z, [x27, #1, MUL VL]\n"
+ "add x23, %x[qp], %[per_layer_right_shift]\n"
"add z21.s, z21.s, z11.s\n"
+ "ld1w { z2.s }, p2/Z, [x27, #2, MUL VL]\n"
+ "add x22, %x[qp], %[per_layer_mul]\n"
"add z22.s, z22.s, z11.s\n"
- "ld1w { z2.s }, p2/Z, [x10, #2, MUL VL]\n"
- "ld1w { z3.s }, p2/Z, [x10, #3, MUL VL]\n"
+ "ld1w { z3.s }, p2/Z, [x27, #3, MUL VL]\n"
+ "addvl x27, x27, #4\n"
"add z16.s, z16.s, z12.s\n"
+ "ld1rw { z4.s }, p2/Z, [x22]\n"
"add z17.s, z17.s, z12.s\n"
- "add x23, %x[qp], %[per_layer_mul]\n"
- "orr %x[flags], %x[flags], #0x80000000\n"
"add z18.s, z18.s, z12.s\n"
"add z19.s, z19.s, z12.s\n"
- "ld1rw { z4.s }, p2/Z, [x23]\n"
- "add x23, %x[qp], %[per_layer_right_shift]\n"
"add z23.s, z23.s, z13.s\n"
"add z28.s, z28.s, z13.s\n"
- "addvl x10, x10, #4\n"
"add z29.s, z29.s, z13.s\n"
"add z30.s, z30.s, z13.s\n"
"add z24.s, z24.s, z14.s\n"
@@ -1234,8 +1234,8 @@ void sve_hybrid_s8qa_mmla_4x4VL (
"add z29.s, z29.s, z2.s\n"
"add z30.s, z30.s, z3.s\n"
"add z24.s, z24.s, z0.s\n"
- "add z25.s, z25.s, z1.s\n"
"ld1rw { z0.s }, p2/Z, [x23]\n"
+ "add z25.s, z25.s, z1.s\n"
"add z26.s, z26.s, z2.s\n"
"add z27.s, z27.s, z3.s\n"
".inst 0x04a477ff // sqrdmulh z31.s, z31.s, z4.s\n"
@@ -1257,160 +1257,160 @@ void sve_hybrid_s8qa_mmla_4x4VL (
"tbz %x[flags], #5, 55f\n"
"and z4.d, z31.d, z0.d\n"
"and z5.d, z20.d, z0.d\n"
+ "and z6.d, z21.d, z0.d\n"
"asr z4.s, z4.s, #0x1f\n"
"asr z5.s, z5.s, #0x1f\n"
+ "asr z6.s, z6.s, #0x1f\n"
"sqadd z31.s, z31.s, z4.s\n"
"sqadd z20.s, z20.s, z5.s\n"
- "and z6.d, z21.d, z0.d\n"
+ "sqadd z21.s, z21.s, z6.s\n"
"and z7.d, z22.d, z0.d\n"
"and z8.d, z16.d, z0.d\n"
"and z9.d, z17.d, z0.d\n"
- "and z10.d, z18.d, z0.d\n"
- "and z4.d, z19.d, z0.d\n"
- "and z5.d, z23.d, z0.d\n"
- "asr z6.s, z6.s, #0x1f\n"
"asr z7.s, z7.s, #0x1f\n"
"asr z8.s, z8.s, #0x1f\n"
"asr z9.s, z9.s, #0x1f\n"
- "asr z10.s, z10.s, #0x1f\n"
- "asr z4.s, z4.s, #0x1f\n"
- "asr z5.s, z5.s, #0x1f\n"
- "sqadd z21.s, z21.s, z6.s\n"
"sqadd z22.s, z22.s, z7.s\n"
"sqadd z16.s, z16.s, z8.s\n"
"sqadd z17.s, z17.s, z9.s\n"
+ "and z10.d, z18.d, z0.d\n"
+ "and z4.d, z19.d, z0.d\n"
+ "and z5.d, z23.d, z0.d\n"
+ "asr z10.s, z10.s, #0x1f\n"
+ "asr z4.s, z4.s, #0x1f\n"
+ "asr z5.s, z5.s, #0x1f\n"
"sqadd z18.s, z18.s, z10.s\n"
"sqadd z19.s, z19.s, z4.s\n"
"sqadd z23.s, z23.s, z5.s\n"
"and z6.d, z28.d, z0.d\n"
"and z7.d, z29.d, z0.d\n"
"and z8.d, z30.d, z0.d\n"
- "and z9.d, z24.d, z0.d\n"
- "and z10.d, z25.d, z0.d\n"
- "and z4.d, z26.d, z0.d\n"
- "and z5.d, z27.d, z0.d\n"
"asr z6.s, z6.s, #0x1f\n"
"asr z7.s, z7.s, #0x1f\n"
"asr z8.s, z8.s, #0x1f\n"
- "asr z9.s, z9.s, #0x1f\n"
- "asr z10.s, z10.s, #0x1f\n"
- "asr z4.s, z4.s, #0x1f\n"
- "asr z5.s, z5.s, #0x1f\n"
"sqadd z28.s, z28.s, z6.s\n"
"sqadd z29.s, z29.s, z7.s\n"
"sqadd z30.s, z30.s, z8.s\n"
+ "and z9.d, z24.d, z0.d\n"
+ "and z10.d, z25.d, z0.d\n"
+ "and z4.d, z26.d, z0.d\n"
+ "asr z9.s, z9.s, #0x1f\n"
+ "asr z10.s, z10.s, #0x1f\n"
+ "asr z4.s, z4.s, #0x1f\n"
"sqadd z24.s, z24.s, z9.s\n"
"sqadd z25.s, z25.s, z10.s\n"
"sqadd z26.s, z26.s, z4.s\n"
+ "and z5.d, z27.d, z0.d\n"
+ "asr z5.s, z5.s, #0x1f\n"
"sqadd z27.s, z27.s, z5.s\n"
"55:" // Height 4: no shift correction
- "add x23, %x[qp], %[c_offset]\n"
- "ld1rw { z4.s }, p2/Z, [x23]\n"
".inst 0x4482881f // srshl z31.s, p2/M, z31.s, z0.s\n"
- "add z31.s, z31.s, z4.s\n"
+ "add x22, %x[qp], %[c_offset]\n"
+ "ld1rw { z4.s }, p2/Z, [x22]\n"
".inst 0x44828814 // srshl z20.s, p2/M, z20.s, z0.s\n"
+ "add x22, %x[qp], %[minval]\n"
".inst 0x44828815 // srshl z21.s, p2/M, z21.s, z0.s\n"
- "add z20.s, z20.s, z4.s\n"
- "add z21.s, z21.s, z4.s\n"
+ "ld1rw { z5.s }, p2/Z, [x22]\n"
+ "add x22, %x[qp], %[maxval]\n"
".inst 0x44828816 // srshl z22.s, p2/M, z22.s, z0.s\n"
+ "ld1rw { z6.s }, p2/Z, [x22]\n"
".inst 0x44828810 // srshl z16.s, p2/M, z16.s, z0.s\n"
+ "add z31.s, z31.s, z4.s\n"
+ "add z20.s, z20.s, z4.s\n"
+ "add z21.s, z21.s, z4.s\n"
"add z22.s, z22.s, z4.s\n"
"add z16.s, z16.s, z4.s\n"
- ".inst 0x44828811 // srshl z17.s, p2/M, z17.s, z0.s\n"
- ".inst 0x44828812 // srshl z18.s, p2/M, z18.s, z0.s\n"
- "add z17.s, z17.s, z4.s\n"
- "add z18.s, z18.s, z4.s\n"
- ".inst 0x44828813 // srshl z19.s, p2/M, z19.s, z0.s\n"
- ".inst 0x44828817 // srshl z23.s, p2/M, z23.s, z0.s\n"
- "add z19.s, z19.s, z4.s\n"
- "add z23.s, z23.s, z4.s\n"
- ".inst 0x4482881c // srshl z28.s, p2/M, z28.s, z0.s\n"
- ".inst 0x4482881d // srshl z29.s, p2/M, z29.s, z0.s\n"
- "add z28.s, z28.s, z4.s\n"
- "add z29.s, z29.s, z4.s\n"
- ".inst 0x4482881e // srshl z30.s, p2/M, z30.s, z0.s\n"
- ".inst 0x44828818 // srshl z24.s, p2/M, z24.s, z0.s\n"
- "add z30.s, z30.s, z4.s\n"
- "add z24.s, z24.s, z4.s\n"
- ".inst 0x44828819 // srshl z25.s, p2/M, z25.s, z0.s\n"
- ".inst 0x4482881a // srshl z26.s, p2/M, z26.s, z0.s\n"
- "add z25.s, z25.s, z4.s\n"
- "add z26.s, z26.s, z4.s\n"
- ".inst 0x4482881b // srshl z27.s, p2/M, z27.s, z0.s\n"
- "add x23, %x[qp], %[maxval]\n"
- "ld1rw { z6.s }, p2/Z, [x23]\n"
- "add z27.s, z27.s, z4.s\n"
- "add x23, %x[qp], %[minval]\n"
- "ld1rw { z5.s }, p2/Z, [x23]\n"
"smin z31.s, p2/M, z31.s, z6.s\n"
"smin z20.s, p2/M, z20.s, z6.s\n"
"smin z21.s, p2/M, z21.s, z6.s\n"
"smin z22.s, p2/M, z22.s, z6.s\n"
- "smin z16.s, p2/M, z16.s, z6.s\n"
- "smin z17.s, p2/M, z17.s, z6.s\n"
- "smin z18.s, p2/M, z18.s, z6.s\n"
- "smin z19.s, p2/M, z19.s, z6.s\n"
- "smin z23.s, p2/M, z23.s, z6.s\n"
- "smin z28.s, p2/M, z28.s, z6.s\n"
- "smin z29.s, p2/M, z29.s, z6.s\n"
- "smin z30.s, p2/M, z30.s, z6.s\n"
- "smin z24.s, p2/M, z24.s, z6.s\n"
- "smin z25.s, p2/M, z25.s, z6.s\n"
- "smin z26.s, p2/M, z26.s, z6.s\n"
- "smin z27.s, p2/M, z27.s, z6.s\n"
"smax z31.s, p2/M, z31.s, z5.s\n"
"smax z20.s, p2/M, z20.s, z5.s\n"
"smax z21.s, p2/M, z21.s, z5.s\n"
- "uzp1 z31.h, z31.h, z20.h\n"
"smax z22.s, p2/M, z22.s, z5.s\n"
- "smax z16.s, p2/M, z16.s, z5.s\n"
+ "smin z16.s, p2/M, z16.s, z6.s\n"
+ "uzp1 z31.h, z31.h, z20.h\n"
+ ".inst 0x44828811 // srshl z17.s, p2/M, z17.s, z0.s\n"
"uzp1 z20.h, z21.h, z22.h\n"
+ "smax z16.s, p2/M, z16.s, z5.s\n"
"uzp1 z31.b, z31.b, z20.b\n"
+ "st1b { z31.b }, p1, [x26]\n"
+ "add z17.s, z17.s, z4.s\n"
+ "addvl x26, x26, #1\n"
+ ".inst 0x44828812 // srshl z18.s, p2/M, z18.s, z0.s\n"
+ ".inst 0x44828813 // srshl z19.s, p2/M, z19.s, z0.s\n"
+ ".inst 0x44828817 // srshl z23.s, p2/M, z23.s, z0.s\n"
+ "smin z17.s, p2/M, z17.s, z6.s\n"
+ ".inst 0x4482881c // srshl z28.s, p2/M, z28.s, z0.s\n"
+ "add z18.s, z18.s, z4.s\n"
+ "add z19.s, z19.s, z4.s\n"
+ "add z23.s, z23.s, z4.s\n"
+ "add z28.s, z28.s, z4.s\n"
"smax z17.s, p2/M, z17.s, z5.s\n"
- "smax z18.s, p2/M, z18.s, z5.s\n"
+ "smin z18.s, p2/M, z18.s, z6.s\n"
+ "smin z19.s, p2/M, z19.s, z6.s\n"
+ "smin z23.s, p2/M, z23.s, z6.s\n"
"uzp1 z16.h, z16.h, z17.h\n"
- "st1b { z31.b }, p1, [x27]\n"
+ "smax z18.s, p2/M, z18.s, z5.s\n"
"smax z19.s, p2/M, z19.s, z5.s\n"
"smax z23.s, p2/M, z23.s, z5.s\n"
+ "smin z28.s, p2/M, z28.s, z6.s\n"
+ ".inst 0x4482881d // srshl z29.s, p2/M, z29.s, z0.s\n"
"uzp1 z17.h, z18.h, z19.h\n"
+ ".inst 0x4482881e // srshl z30.s, p2/M, z30.s, z0.s\n"
"uzp1 z16.b, z16.b, z17.b\n"
+ "st1b { z16.b }, p1, [x21]\n"
+ "add z29.s, z29.s, z4.s\n"
"smax z28.s, p2/M, z28.s, z5.s\n"
- "smax z29.s, p2/M, z29.s, z5.s\n"
+ "add z30.s, z30.s, z4.s\n"
+ ".inst 0x44828818 // srshl z24.s, p2/M, z24.s, z0.s\n"
+ "smin z29.s, p2/M, z29.s, z6.s\n"
"uzp1 z23.h, z23.h, z28.h\n"
- "st1b { z16.b }, p1, [x22]\n"
+ "smin z30.s, p2/M, z30.s, z6.s\n"
+ "add z24.s, z24.s, z4.s\n"
+ "smax z29.s, p2/M, z29.s, z5.s\n"
+ ".inst 0x44828819 // srshl z25.s, p2/M, z25.s, z0.s\n"
"smax z30.s, p2/M, z30.s, z5.s\n"
- "smax z24.s, p2/M, z24.s, z5.s\n"
+ "smin z24.s, p2/M, z24.s, z6.s\n"
+ ".inst 0x4482881a // srshl z26.s, p2/M, z26.s, z0.s\n"
+ "add z25.s, z25.s, z4.s\n"
"uzp1 z28.h, z29.h, z30.h\n"
+ "smax z24.s, p2/M, z24.s, z5.s\n"
+ "add z26.s, z26.s, z4.s\n"
"uzp1 z23.b, z23.b, z28.b\n"
+ "st1b { z23.b }, p1, [x20]\n"
+ "smin z25.s, p2/M, z25.s, z6.s\n"
+ "smin z26.s, p2/M, z26.s, z6.s\n"
+ ".inst 0x4482881b // srshl z27.s, p2/M, z27.s, z0.s\n"
"smax z25.s, p2/M, z25.s, z5.s\n"
"smax z26.s, p2/M, z26.s, z5.s\n"
+ "add z27.s, z27.s, z4.s\n"
"uzp1 z24.h, z24.h, z25.h\n"
- "st1b { z23.b }, p1, [x21]\n"
+ "smin z27.s, p2/M, z27.s, z6.s\n"
"smax z27.s, p2/M, z27.s, z5.s\n"
"uzp1 z25.h, z26.h, z27.h\n"
"uzp1 z24.b, z24.b, z25.b\n"
- "st1b { z24.b }, p1, [x20]\n"
- "addvl x27, x27, #1\n"
+ "st1b { z24.b }, p1, [x19]\n"
"56:" // Height 4: Writeback done
"decw x9, ALL, MUL #4\n"
"cmp x9, XZR\n"
"bgt 44b\n"
"subs %x[M], %x[M], #0x4\n"
"beq 58f\n"
- "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 57f\n"
- "add x21, x21, #0x4\n"
- "str x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "add x20, x20, #0x4\n"
+ "str x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"b 1b\n"
"57:" // Update direct input
- "mov x20, #0x4\n"
- "madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
+ "mov x19, #0x4\n"
+ "madd %x[input_ptr], x19, x20, %x[input_ptr]\n"
"b 1b\n"
"58:" // Exit
: [M] "+&r" (M), [flags] "+&r" (flags), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
: [args_ptr] "r" (&ka), [b_offset] "I" (offsetof(Requantize32, b_offset)), [c_offset] "I" (offsetof(Requantize32, c_offset)), [col_bias] "r" (col_bias), [maxval] "I" (offsetof(Requantize32, maxval)), [minval] "I" (offsetof(Requantize32, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths)), [per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [qp] "r" (qp)
- : "cc", "memory", "p0", "p1", "p2", "x9", "x10", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "x9", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8qs_dot_6x4VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8qs_dot_6x4VL/generic.cpp
index 1e71806838..6b08d2834b 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8qs_dot_6x4VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8qs_dot_6x4VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef ARM_COMPUTE_ENABLE_SVE
@@ -95,137 +95,137 @@ void sve_hybrid_s8qs_dot_6x4VL (
"cmp %x[M], #0x2\n"
"bgt 27f\n"
"beq 14f\n"
- "mov x14, %x[col_bias]\n"
"ldr x13, [%x[args_ptr], %[offsetof_multiplier_ptr]]\n"
"ldr x12, [%x[args_ptr], %[offsetof_shift_ptr]]\n"
- "mov x11, %x[output_ptr]\n"
+ "mov x11, %x[col_bias]\n"
"ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x9, %x[output_ptr]\n"
+ "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"2:" // Height 1: Column loop
- "mov x20, #0x0\n"
- "whilelt p1.b, x20, x10\n"
"mov z8.s, #0x0\n"
+ "mov x19, #0x0\n"
"mov z9.s, #0x0\n"
+ "whilelt p1.b, x19, x10\n"
"mov z10.s, #0x0\n"
"mov z11.s, #0x0\n"
"3:" // Height 1: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"4:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 5f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "cbnz x28, 6f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "cbnz x27, 6f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
"b 6f\n"
"5:" // Height 1: setup direct input
- "mov x26, %x[input_ptr]\n"
+ "mov x25, %x[input_ptr]\n"
"6:" // Height 1: input setup done
- "cmp x27, #0x10\n"
+ "cmp x26, #0x10\n"
"ble 8f\n"
"7:" // Height 1: Multiply loop: Main loop head
- "whilelt p0.b, XZR, x27\n"
- "ld1rqb { z0.b }, p0/Z, [x26]\n"
- "ld1b { z6.b }, p2/Z, [x9]\n"
+ "ld1b { z6.b }, p2/Z, [x28]\n"
+ "whilelt p0.b, XZR, x26\n"
+ "ld1b { z7.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "sub x26, x26, #0x10\n"
+ "ld1rqb { z0.b }, p0/Z, [x25]\n"
"sdot z8.s, z6.b, z0.b[0]\n"
- "ld1b { z7.b }, p2/Z, [x9, #1, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #2, MUL VL]\n"
+ "cmp x26, #0x10\n"
"sdot z9.s, z7.b, z0.b[0]\n"
- "ld1b { z6.b }, p2/Z, [x9, #2, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "add x25, x25, #0x10\n"
"sdot z10.s, z6.b, z0.b[0]\n"
- "ld1b { z7.b }, p2/Z, [x9, #3, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #4, MUL VL]\n"
"sdot z11.s, z7.b, z0.b[0]\n"
- "ld1b { z6.b }, p2/Z, [x9, #4, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #5, MUL VL]\n"
"sdot z8.s, z6.b, z0.b[1]\n"
- "ld1b { z7.b }, p2/Z, [x9, #5, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #6, MUL VL]\n"
"sdot z9.s, z7.b, z0.b[1]\n"
- "ld1b { z6.b }, p2/Z, [x9, #6, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #7, MUL VL]\n"
+ "addvl x28, x28, #16\n"
"sdot z10.s, z6.b, z0.b[1]\n"
- "ld1b { z7.b }, p2/Z, [x9, #7, MUL VL]\n"
- "addvl x9, x9, #16\n"
+ "ld1b { z6.b }, p2/Z, [x28, #-8, MUL VL]\n"
"sdot z11.s, z7.b, z0.b[1]\n"
- "ld1b { z6.b }, p2/Z, [x9, #-8, MUL VL]\n"
- "ld1b { z7.b }, p2/Z, [x9, #-7, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #-7, MUL VL]\n"
"sdot z8.s, z6.b, z0.b[2]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #-6, MUL VL]\n"
"sdot z9.s, z7.b, z0.b[2]\n"
- "ld1b { z6.b }, p2/Z, [x9, #-6, MUL VL]\n"
- "ld1b { z7.b }, p2/Z, [x9, #-5, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #-5, MUL VL]\n"
"sdot z10.s, z6.b, z0.b[2]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #-4, MUL VL]\n"
"sdot z11.s, z7.b, z0.b[2]\n"
- "ld1b { z6.b }, p2/Z, [x9, #-4, MUL VL]\n"
- "ld1b { z7.b }, p2/Z, [x9, #-3, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #-3, MUL VL]\n"
"sdot z8.s, z6.b, z0.b[3]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #-2, MUL VL]\n"
"sdot z9.s, z7.b, z0.b[3]\n"
- "ld1b { z6.b }, p2/Z, [x9, #-2, MUL VL]\n"
- "ld1b { z7.b }, p2/Z, [x9, #-1, MUL VL]\n"
- "sub x27, x27, #0x10\n"
- "cmp x27, #0x10\n"
+ "ld1b { z7.b }, p2/Z, [x28, #-1, MUL VL]\n"
"sdot z10.s, z6.b, z0.b[3]\n"
"sdot z11.s, z7.b, z0.b[3]\n"
- "add x26, x26, #0x10\n"
"bgt 7b\n"
"8:" // Height 1: Multiply loop: Single iteration only
- "whilelt p0.b, XZR, x27\n"
- "ld1rqb { z0.b }, p0/Z, [x26]\n"
- "ld1b { z6.b }, p2/Z, [x9]\n"
+ "ld1b { z6.b }, p2/Z, [x28]\n"
+ "whilelt p0.b, XZR, x26\n"
+ "ld1b { z7.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "subs x26, x26, #0x4\n"
+ "ld1rqb { z0.b }, p0/Z, [x25]\n"
"sdot z8.s, z6.b, z0.b[0]\n"
- "ld1b { z7.b }, p2/Z, [x9, #1, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #2, MUL VL]\n"
"sdot z9.s, z7.b, z0.b[0]\n"
- "ld1b { z6.b }, p2/Z, [x9, #2, MUL VL]\n"
- "subs x27, x27, #0x4\n"
- "ld1b { z7.b }, p2/Z, [x9, #3, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
"sdot z10.s, z6.b, z0.b[0]\n"
"sdot z11.s, z7.b, z0.b[0]\n"
- "addvl x9, x9, #4\n"
"ble 9f\n"
- "ld1b { z6.b }, p2/Z, [x9]\n"
- "ld1b { z7.b }, p2/Z, [x9, #1, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28]\n"
"sdot z8.s, z6.b, z0.b[1]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "subs x26, x26, #0x4\n"
"sdot z9.s, z7.b, z0.b[1]\n"
- "ld1b { z6.b }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1b { z7.b }, p2/Z, [x9, #3, MUL VL]\n"
- "subs x27, x27, #0x4\n"
+ "ld1b { z6.b }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #3, MUL VL]\n"
"sdot z10.s, z6.b, z0.b[1]\n"
+ "addvl x28, x28, #4\n"
"sdot z11.s, z7.b, z0.b[1]\n"
- "addvl x9, x9, #4\n"
"ble 9f\n"
- "ld1b { z6.b }, p2/Z, [x9]\n"
- "ld1b { z7.b }, p2/Z, [x9, #1, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28]\n"
"sdot z8.s, z6.b, z0.b[2]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "subs x26, x26, #0x4\n"
"sdot z9.s, z7.b, z0.b[2]\n"
- "ld1b { z6.b }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1b { z7.b }, p2/Z, [x9, #3, MUL VL]\n"
- "subs x27, x27, #0x4\n"
+ "ld1b { z6.b }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #3, MUL VL]\n"
"sdot z10.s, z6.b, z0.b[2]\n"
+ "addvl x28, x28, #4\n"
"sdot z11.s, z7.b, z0.b[2]\n"
- "addvl x9, x9, #4\n"
"ble 9f\n"
- "ld1b { z6.b }, p2/Z, [x9]\n"
- "ld1b { z7.b }, p2/Z, [x9, #1, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28]\n"
"sdot z8.s, z6.b, z0.b[3]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #2, MUL VL]\n"
"sdot z9.s, z7.b, z0.b[3]\n"
- "ld1b { z6.b }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1b { z7.b }, p2/Z, [x9, #3, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
"sdot z10.s, z6.b, z0.b[3]\n"
"sdot z11.s, z7.b, z0.b[3]\n"
- "addvl x9, x9, #4\n"
"9:" // Height 1: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 4b\n"
- "ld1w { z0.s }, p2/Z, [x14]\n"
- "ld1w { z1.s }, p2/Z, [x14, #1, MUL VL]\n"
+ "ld1w { z0.s }, p2/Z, [x11]\n"
"add z8.s, z8.s, z0.s\n"
+ "ld1w { z1.s }, p2/Z, [x11, #1, MUL VL]\n"
+ "ld1w { z2.s }, p2/Z, [x11, #2, MUL VL]\n"
"add z9.s, z9.s, z1.s\n"
- "ld1w { z2.s }, p2/Z, [x14, #2, MUL VL]\n"
- "ld1w { z3.s }, p2/Z, [x14, #3, MUL VL]\n"
+ "ld1w { z3.s }, p2/Z, [x11, #3, MUL VL]\n"
+ "addvl x11, x11, #4\n"
"add z10.s, z10.s, z2.s\n"
"add z11.s, z11.s, z3.s\n"
- "addvl x14, x14, #4\n"
"tbz %x[flags], #4, 10f\n"
"ld1w { z0.s }, p2/Z, [x12]\n"
"ld1w { z4.s }, p2/Z, [x13]\n"
@@ -234,20 +234,20 @@ void sve_hybrid_s8qs_dot_6x4VL (
"ld1w { z2.s }, p2/Z, [x12, #2, MUL VL]\n"
"ld1w { z6.s }, p2/Z, [x13, #2, MUL VL]\n"
"ld1w { z3.s }, p2/Z, [x12, #3, MUL VL]\n"
- "ld1w { z7.s }, p2/Z, [x13, #3, MUL VL]\n"
"addvl x12, x12, #4\n"
+ "ld1w { z7.s }, p2/Z, [x13, #3, MUL VL]\n"
"addvl x13, x13, #4\n"
"b 11f\n"
"10:" // Height 1: per layer parameters
- "add x26, %x[qp], %[per_layer_right_shift]\n"
- "add x25, %x[qp], %[per_layer_mul]\n"
- "ld1rw { z0.s }, p2/Z, [x26]\n"
- "ld1rw { z4.s }, p2/Z, [x25]\n"
+ "add x24, %x[qp], %[per_layer_right_shift]\n"
+ "ld1rw { z0.s }, p2/Z, [x24]\n"
"mov z1.d, z0.d\n"
- "mov z5.d, z4.d\n"
+ "add x24, %x[qp], %[per_layer_mul]\n"
+ "ld1rw { z4.s }, p2/Z, [x24]\n"
"mov z2.d, z0.d\n"
- "mov z6.d, z4.d\n"
"mov z3.d, z0.d\n"
+ "mov z5.d, z4.d\n"
+ "mov z6.d, z4.d\n"
"mov z7.d, z4.d\n"
"11:" // Height 1: parameters loaded
".inst 0x04a47508 // sqrdmulh z8.s, z8.s, z4.s\n"
@@ -262,26 +262,26 @@ void sve_hybrid_s8qs_dot_6x4VL (
"asr z4.s, z4.s, #0x1f\n"
"asr z5.s, z5.s, #0x1f\n"
"asr z6.s, z6.s, #0x1f\n"
- "asr z7.s, z7.s, #0x1f\n"
"sqadd z8.s, z8.s, z4.s\n"
"sqadd z9.s, z9.s, z5.s\n"
"sqadd z10.s, z10.s, z6.s\n"
+ "asr z7.s, z7.s, #0x1f\n"
"sqadd z11.s, z11.s, z7.s\n"
"12:" // Height 1: no shift correction
- "add x25, %x[qp], %[c_offset]\n"
- "ld1rw { z4.s }, p2/Z, [x25]\n"
".inst 0x44828808 // srshl z8.s, p2/M, z8.s, z0.s\n"
- "add z8.s, z8.s, z4.s\n"
+ "add x24, %x[qp], %[c_offset]\n"
+ "ld1rw { z4.s }, p2/Z, [x24]\n"
".inst 0x44828829 // srshl z9.s, p2/M, z9.s, z1.s\n"
+ "add x24, %x[qp], %[minval]\n"
".inst 0x4482884a // srshl z10.s, p2/M, z10.s, z2.s\n"
+ "ld1rw { z5.s }, p2/Z, [x24]\n"
+ "add x24, %x[qp], %[maxval]\n"
+ ".inst 0x4482886b // srshl z11.s, p2/M, z11.s, z3.s\n"
+ "ld1rw { z6.s }, p2/Z, [x24]\n"
+ "add z8.s, z8.s, z4.s\n"
"add z9.s, z9.s, z4.s\n"
"add z10.s, z10.s, z4.s\n"
- ".inst 0x4482886b // srshl z11.s, p2/M, z11.s, z3.s\n"
- "add x25, %x[qp], %[maxval]\n"
- "ld1rw { z6.s }, p2/Z, [x25]\n"
"add z11.s, z11.s, z4.s\n"
- "add x25, %x[qp], %[minval]\n"
- "ld1rw { z5.s }, p2/Z, [x25]\n"
"smin z8.s, p2/M, z8.s, z6.s\n"
"smin z9.s, p2/M, z9.s, z6.s\n"
"smin z10.s, p2/M, z10.s, z6.s\n"
@@ -289,29 +289,29 @@ void sve_hybrid_s8qs_dot_6x4VL (
"smax z8.s, p2/M, z8.s, z5.s\n"
"smax z9.s, p2/M, z9.s, z5.s\n"
"smax z10.s, p2/M, z10.s, z5.s\n"
- "uzp1 z8.h, z8.h, z9.h\n"
"smax z11.s, p2/M, z11.s, z5.s\n"
+ "uzp1 z8.h, z8.h, z9.h\n"
"uzp1 z9.h, z10.h, z11.h\n"
"uzp1 z8.b, z8.b, z9.b\n"
- "st1b { z8.b }, p1, [x11]\n"
- "addvl x11, x11, #1\n"
+ "st1b { z8.b }, p1, [x9]\n"
+ "addvl x9, x9, #1\n"
"13:" // Height 1: Writeback done
"decw x10, ALL, MUL #4\n"
"cmp x10, XZR\n"
"bgt 2b\n"
"b 80f\n"
"14:" // Height 2
- "mov x14, %x[col_bias]\n"
"ldr x13, [%x[args_ptr], %[offsetof_multiplier_ptr]]\n"
+ "mov x11, %x[col_bias]\n"
"ldr x12, [%x[args_ptr], %[offsetof_shift_ptr]]\n"
- "mov x11, %x[output_ptr]\n"
+ "mov x9, %x[output_ptr]\n"
"ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"15:" // Height 2: Column loop
- "mov x20, #0x0\n"
- "whilelt p1.b, x20, x10\n"
"mov z8.s, #0x0\n"
+ "mov x19, #0x0\n"
"mov z9.s, #0x0\n"
+ "whilelt p1.b, x19, x10\n"
"mov z10.s, #0x0\n"
"mov z11.s, #0x0\n"
"mov z12.s, #0x0\n"
@@ -319,165 +319,165 @@ void sve_hybrid_s8qs_dot_6x4VL (
"mov z14.s, #0x0\n"
"mov z15.s, #0x0\n"
"16:" // Height 2: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"17:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 18f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "cbnz x28, 19f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20\n"
- "add x25, x25, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "cbnz x27, 19f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
+ "add x24, x24, x19\n"
"b 19f\n"
"18:" // Height 2: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19\n"
"19:" // Height 2: input setup done
- "cmp x27, #0x10\n"
+ "cmp x26, #0x10\n"
"ble 21f\n"
"20:" // Height 2: Multiply loop: Main loop head
- "whilelt p0.b, XZR, x27\n"
- "ld1rqb { z0.b }, p0/Z, [x26]\n"
- "ld1rqb { z1.b }, p0/Z, [x25]\n"
- "sub x27, x27, #0x10\n"
- "ld1b { z6.b }, p2/Z, [x9]\n"
- "ld1b { z7.b }, p2/Z, [x9, #1, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28]\n"
+ "whilelt p0.b, XZR, x26\n"
+ "ld1b { z7.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "sub x26, x26, #0x10\n"
+ "ld1rqb { z0.b }, p0/Z, [x25]\n"
"sdot z8.s, z6.b, z0.b[0]\n"
- "sdot z12.s, z6.b, z1.b[0]\n"
+ "ld1rqb { z1.b }, p0/Z, [x24]\n"
+ "cmp x26, #0x10\n"
"sdot z9.s, z7.b, z0.b[0]\n"
+ "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
+ "sdot z12.s, z6.b, z1.b[0]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #2, MUL VL]\n"
"sdot z13.s, z7.b, z1.b[0]\n"
- "ld1b { z6.b }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1b { z7.b }, p2/Z, [x9, #3, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #3, MUL VL]\n"
"sdot z10.s, z6.b, z0.b[0]\n"
"sdot z14.s, z6.b, z1.b[0]\n"
- "ld1b { z6.b }, p2/Z, [x9, #4, MUL VL]\n"
- "cmp x27, #0x10\n"
+ "ld1b { z6.b }, p2/Z, [x28, #4, MUL VL]\n"
"sdot z11.s, z7.b, z0.b[0]\n"
"sdot z15.s, z7.b, z1.b[0]\n"
- "ld1b { z7.b }, p2/Z, [x9, #5, MUL VL]\n"
- "add x26, x26, #0x10\n"
+ "ld1b { z7.b }, p2/Z, [x28, #5, MUL VL]\n"
"sdot z8.s, z6.b, z0.b[1]\n"
"sdot z12.s, z6.b, z1.b[1]\n"
- "ld1b { z6.b }, p2/Z, [x9, #6, MUL VL]\n"
- "add x25, x25, #0x10\n"
+ "ld1b { z6.b }, p2/Z, [x28, #6, MUL VL]\n"
"sdot z9.s, z7.b, z0.b[1]\n"
"sdot z13.s, z7.b, z1.b[1]\n"
- "ld1b { z7.b }, p2/Z, [x9, #7, MUL VL]\n"
- "addvl x9, x9, #16\n"
+ "ld1b { z7.b }, p2/Z, [x28, #7, MUL VL]\n"
+ "addvl x28, x28, #16\n"
"sdot z10.s, z6.b, z0.b[1]\n"
"sdot z14.s, z6.b, z1.b[1]\n"
- "ld1b { z6.b }, p2/Z, [x9, #-8, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #-8, MUL VL]\n"
"sdot z11.s, z7.b, z0.b[1]\n"
"sdot z15.s, z7.b, z1.b[1]\n"
- "ld1b { z7.b }, p2/Z, [x9, #-7, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #-7, MUL VL]\n"
"sdot z8.s, z6.b, z0.b[2]\n"
"sdot z12.s, z6.b, z1.b[2]\n"
- "ld1b { z6.b }, p2/Z, [x9, #-6, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #-6, MUL VL]\n"
"sdot z9.s, z7.b, z0.b[2]\n"
"sdot z13.s, z7.b, z1.b[2]\n"
- "ld1b { z7.b }, p2/Z, [x9, #-5, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #-5, MUL VL]\n"
"sdot z10.s, z6.b, z0.b[2]\n"
"sdot z14.s, z6.b, z1.b[2]\n"
- "ld1b { z6.b }, p2/Z, [x9, #-4, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #-4, MUL VL]\n"
"sdot z11.s, z7.b, z0.b[2]\n"
"sdot z15.s, z7.b, z1.b[2]\n"
- "ld1b { z7.b }, p2/Z, [x9, #-3, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #-3, MUL VL]\n"
"sdot z8.s, z6.b, z0.b[3]\n"
"sdot z12.s, z6.b, z1.b[3]\n"
- "ld1b { z6.b }, p2/Z, [x9, #-2, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #-2, MUL VL]\n"
"sdot z9.s, z7.b, z0.b[3]\n"
"sdot z13.s, z7.b, z1.b[3]\n"
- "ld1b { z7.b }, p2/Z, [x9, #-1, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #-1, MUL VL]\n"
"sdot z10.s, z6.b, z0.b[3]\n"
"sdot z14.s, z6.b, z1.b[3]\n"
"sdot z11.s, z7.b, z0.b[3]\n"
"sdot z15.s, z7.b, z1.b[3]\n"
"bgt 20b\n"
"21:" // Height 2: Multiply loop: Single iteration only
- "whilelt p0.b, XZR, x27\n"
- "ld1rqb { z0.b }, p0/Z, [x26]\n"
- "ld1rqb { z1.b }, p0/Z, [x25]\n"
- "subs x27, x27, #0x4\n"
- "ld1b { z6.b }, p2/Z, [x9]\n"
- "ld1b { z7.b }, p2/Z, [x9, #1, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28]\n"
+ "whilelt p0.b, XZR, x26\n"
+ "ld1b { z7.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "subs x26, x26, #0x4\n"
+ "ld1rqb { z0.b }, p0/Z, [x25]\n"
"sdot z8.s, z6.b, z0.b[0]\n"
- "sdot z12.s, z6.b, z1.b[0]\n"
+ "ld1rqb { z1.b }, p0/Z, [x24]\n"
"sdot z9.s, z7.b, z0.b[0]\n"
+ "sdot z12.s, z6.b, z1.b[0]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #2, MUL VL]\n"
"sdot z13.s, z7.b, z1.b[0]\n"
- "ld1b { z6.b }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1b { z7.b }, p2/Z, [x9, #3, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
"sdot z10.s, z6.b, z0.b[0]\n"
"sdot z14.s, z6.b, z1.b[0]\n"
- "addvl x9, x9, #4\n"
"sdot z11.s, z7.b, z0.b[0]\n"
"sdot z15.s, z7.b, z1.b[0]\n"
"ble 22f\n"
- "ld1b { z6.b }, p2/Z, [x9]\n"
- "ld1b { z7.b }, p2/Z, [x9, #1, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28]\n"
"sdot z8.s, z6.b, z0.b[1]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "subs x26, x26, #0x4\n"
"sdot z12.s, z6.b, z1.b[1]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #2, MUL VL]\n"
"sdot z9.s, z7.b, z0.b[1]\n"
"sdot z13.s, z7.b, z1.b[1]\n"
- "ld1b { z6.b }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1b { z7.b }, p2/Z, [x9, #3, MUL VL]\n"
- "subs x27, x27, #0x4\n"
+ "ld1b { z7.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
"sdot z10.s, z6.b, z0.b[1]\n"
"sdot z14.s, z6.b, z1.b[1]\n"
- "addvl x9, x9, #4\n"
"sdot z11.s, z7.b, z0.b[1]\n"
"sdot z15.s, z7.b, z1.b[1]\n"
"ble 22f\n"
- "ld1b { z6.b }, p2/Z, [x9]\n"
- "ld1b { z7.b }, p2/Z, [x9, #1, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28]\n"
"sdot z8.s, z6.b, z0.b[2]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "subs x26, x26, #0x4\n"
"sdot z12.s, z6.b, z1.b[2]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #2, MUL VL]\n"
"sdot z9.s, z7.b, z0.b[2]\n"
"sdot z13.s, z7.b, z1.b[2]\n"
- "ld1b { z6.b }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1b { z7.b }, p2/Z, [x9, #3, MUL VL]\n"
- "subs x27, x27, #0x4\n"
+ "ld1b { z7.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
"sdot z10.s, z6.b, z0.b[2]\n"
"sdot z14.s, z6.b, z1.b[2]\n"
- "addvl x9, x9, #4\n"
"sdot z11.s, z7.b, z0.b[2]\n"
"sdot z15.s, z7.b, z1.b[2]\n"
"ble 22f\n"
- "ld1b { z6.b }, p2/Z, [x9]\n"
- "ld1b { z7.b }, p2/Z, [x9, #1, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28]\n"
"sdot z8.s, z6.b, z0.b[3]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #1, MUL VL]\n"
"sdot z12.s, z6.b, z1.b[3]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #2, MUL VL]\n"
"sdot z9.s, z7.b, z0.b[3]\n"
"sdot z13.s, z7.b, z1.b[3]\n"
- "ld1b { z6.b }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1b { z7.b }, p2/Z, [x9, #3, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
"sdot z10.s, z6.b, z0.b[3]\n"
"sdot z14.s, z6.b, z1.b[3]\n"
- "addvl x9, x9, #4\n"
"sdot z11.s, z7.b, z0.b[3]\n"
"sdot z15.s, z7.b, z1.b[3]\n"
"22:" // Height 2: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 17b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "ld1w { z0.s }, p2/Z, [x14]\n"
- "add x24, x11, x20\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ld1w { z0.s }, p2/Z, [x11]\n"
"add z8.s, z8.s, z0.s\n"
- "ld1w { z1.s }, p2/Z, [x14, #1, MUL VL]\n"
- "ld1w { z2.s }, p2/Z, [x14, #2, MUL VL]\n"
+ "ld1w { z1.s }, p2/Z, [x11, #1, MUL VL]\n"
+ "add x23, x9, x19\n"
+ "add z12.s, z12.s, z0.s\n"
+ "ld1w { z2.s }, p2/Z, [x11, #2, MUL VL]\n"
+ "ld1w { z3.s }, p2/Z, [x11, #3, MUL VL]\n"
"add z9.s, z9.s, z1.s\n"
+ "addvl x11, x11, #4\n"
+ "add z13.s, z13.s, z1.s\n"
"add z10.s, z10.s, z2.s\n"
- "ld1w { z3.s }, p2/Z, [x14, #3, MUL VL]\n"
"add z11.s, z11.s, z3.s\n"
- "add z12.s, z12.s, z0.s\n"
- "addvl x14, x14, #4\n"
- "add z13.s, z13.s, z1.s\n"
"add z14.s, z14.s, z2.s\n"
"add z15.s, z15.s, z3.s\n"
"tbz %x[flags], #4, 23f\n"
@@ -488,20 +488,20 @@ void sve_hybrid_s8qs_dot_6x4VL (
"ld1w { z2.s }, p2/Z, [x12, #2, MUL VL]\n"
"ld1w { z6.s }, p2/Z, [x13, #2, MUL VL]\n"
"ld1w { z3.s }, p2/Z, [x12, #3, MUL VL]\n"
- "ld1w { z7.s }, p2/Z, [x13, #3, MUL VL]\n"
"addvl x12, x12, #4\n"
+ "ld1w { z7.s }, p2/Z, [x13, #3, MUL VL]\n"
"addvl x13, x13, #4\n"
"b 24f\n"
"23:" // Height 2: per layer parameters
- "add x26, %x[qp], %[per_layer_right_shift]\n"
- "add x25, %x[qp], %[per_layer_mul]\n"
- "ld1rw { z0.s }, p2/Z, [x26]\n"
- "ld1rw { z4.s }, p2/Z, [x25]\n"
+ "add x24, %x[qp], %[per_layer_right_shift]\n"
+ "ld1rw { z0.s }, p2/Z, [x24]\n"
"mov z1.d, z0.d\n"
- "mov z5.d, z4.d\n"
+ "add x24, %x[qp], %[per_layer_mul]\n"
+ "ld1rw { z4.s }, p2/Z, [x24]\n"
"mov z2.d, z0.d\n"
- "mov z6.d, z4.d\n"
"mov z3.d, z0.d\n"
+ "mov z5.d, z4.d\n"
+ "mov z6.d, z4.d\n"
"mov z7.d, z4.d\n"
"24:" // Height 2: parameters loaded
".inst 0x04a47508 // sqrdmulh z8.s, z8.s, z4.s\n"
@@ -516,92 +516,92 @@ void sve_hybrid_s8qs_dot_6x4VL (
"and z4.d, z8.d, z0.d\n"
"and z5.d, z9.d, z1.d\n"
"and z6.d, z10.d, z2.d\n"
- "and z7.d, z11.d, z3.d\n"
"asr z4.s, z4.s, #0x1f\n"
"asr z5.s, z5.s, #0x1f\n"
"asr z6.s, z6.s, #0x1f\n"
- "asr z7.s, z7.s, #0x1f\n"
"sqadd z8.s, z8.s, z4.s\n"
"sqadd z9.s, z9.s, z5.s\n"
"sqadd z10.s, z10.s, z6.s\n"
- "sqadd z11.s, z11.s, z7.s\n"
+ "and z7.d, z11.d, z3.d\n"
"and z4.d, z12.d, z0.d\n"
"and z5.d, z13.d, z1.d\n"
- "and z6.d, z14.d, z2.d\n"
- "and z7.d, z15.d, z3.d\n"
+ "asr z7.s, z7.s, #0x1f\n"
"asr z4.s, z4.s, #0x1f\n"
"asr z5.s, z5.s, #0x1f\n"
- "asr z6.s, z6.s, #0x1f\n"
- "asr z7.s, z7.s, #0x1f\n"
+ "sqadd z11.s, z11.s, z7.s\n"
"sqadd z12.s, z12.s, z4.s\n"
"sqadd z13.s, z13.s, z5.s\n"
+ "and z6.d, z14.d, z2.d\n"
+ "and z7.d, z15.d, z3.d\n"
+ "asr z6.s, z6.s, #0x1f\n"
+ "asr z7.s, z7.s, #0x1f\n"
"sqadd z14.s, z14.s, z6.s\n"
"sqadd z15.s, z15.s, z7.s\n"
"25:" // Height 2: no shift correction
- "add x25, %x[qp], %[c_offset]\n"
- "ld1rw { z4.s }, p2/Z, [x25]\n"
".inst 0x44828808 // srshl z8.s, p2/M, z8.s, z0.s\n"
- "add z8.s, z8.s, z4.s\n"
+ "add x24, %x[qp], %[c_offset]\n"
+ "ld1rw { z4.s }, p2/Z, [x24]\n"
".inst 0x44828829 // srshl z9.s, p2/M, z9.s, z1.s\n"
+ "add x24, %x[qp], %[minval]\n"
".inst 0x4482884a // srshl z10.s, p2/M, z10.s, z2.s\n"
- "add z9.s, z9.s, z4.s\n"
- "add z10.s, z10.s, z4.s\n"
+ "ld1rw { z5.s }, p2/Z, [x24]\n"
+ "add x24, %x[qp], %[maxval]\n"
".inst 0x4482886b // srshl z11.s, p2/M, z11.s, z3.s\n"
+ "ld1rw { z6.s }, p2/Z, [x24]\n"
".inst 0x4482880c // srshl z12.s, p2/M, z12.s, z0.s\n"
+ "add z8.s, z8.s, z4.s\n"
+ "add z9.s, z9.s, z4.s\n"
+ "add z10.s, z10.s, z4.s\n"
"add z11.s, z11.s, z4.s\n"
"add z12.s, z12.s, z4.s\n"
- ".inst 0x4482882d // srshl z13.s, p2/M, z13.s, z1.s\n"
- ".inst 0x4482884e // srshl z14.s, p2/M, z14.s, z2.s\n"
- "add z13.s, z13.s, z4.s\n"
- "add z14.s, z14.s, z4.s\n"
- ".inst 0x4482886f // srshl z15.s, p2/M, z15.s, z3.s\n"
- "add x25, %x[qp], %[maxval]\n"
- "ld1rw { z6.s }, p2/Z, [x25]\n"
- "add z15.s, z15.s, z4.s\n"
- "add x25, %x[qp], %[minval]\n"
- "ld1rw { z5.s }, p2/Z, [x25]\n"
"smin z8.s, p2/M, z8.s, z6.s\n"
"smin z9.s, p2/M, z9.s, z6.s\n"
"smin z10.s, p2/M, z10.s, z6.s\n"
"smin z11.s, p2/M, z11.s, z6.s\n"
- "smin z12.s, p2/M, z12.s, z6.s\n"
- "smin z13.s, p2/M, z13.s, z6.s\n"
- "smin z14.s, p2/M, z14.s, z6.s\n"
- "smin z15.s, p2/M, z15.s, z6.s\n"
"smax z8.s, p2/M, z8.s, z5.s\n"
"smax z9.s, p2/M, z9.s, z5.s\n"
"smax z10.s, p2/M, z10.s, z5.s\n"
- "uzp1 z8.h, z8.h, z9.h\n"
"smax z11.s, p2/M, z11.s, z5.s\n"
- "smax z12.s, p2/M, z12.s, z5.s\n"
+ "smin z12.s, p2/M, z12.s, z6.s\n"
+ "uzp1 z8.h, z8.h, z9.h\n"
+ ".inst 0x4482882d // srshl z13.s, p2/M, z13.s, z1.s\n"
"uzp1 z9.h, z10.h, z11.h\n"
+ "smax z12.s, p2/M, z12.s, z5.s\n"
"uzp1 z8.b, z8.b, z9.b\n"
+ "st1b { z8.b }, p1, [x9]\n"
+ "add z13.s, z13.s, z4.s\n"
+ "addvl x9, x9, #1\n"
+ ".inst 0x4482884e // srshl z14.s, p2/M, z14.s, z2.s\n"
+ ".inst 0x4482886f // srshl z15.s, p2/M, z15.s, z3.s\n"
+ "smin z13.s, p2/M, z13.s, z6.s\n"
+ "add z14.s, z14.s, z4.s\n"
+ "add z15.s, z15.s, z4.s\n"
"smax z13.s, p2/M, z13.s, z5.s\n"
- "smax z14.s, p2/M, z14.s, z5.s\n"
+ "smin z14.s, p2/M, z14.s, z6.s\n"
+ "smin z15.s, p2/M, z15.s, z6.s\n"
"uzp1 z12.h, z12.h, z13.h\n"
- "st1b { z8.b }, p1, [x11]\n"
+ "smax z14.s, p2/M, z14.s, z5.s\n"
"smax z15.s, p2/M, z15.s, z5.s\n"
"uzp1 z13.h, z14.h, z15.h\n"
"uzp1 z12.b, z12.b, z13.b\n"
- "st1b { z12.b }, p1, [x24]\n"
- "addvl x11, x11, #1\n"
+ "st1b { z12.b }, p1, [x23]\n"
"26:" // Height 2: Writeback done
"decw x10, ALL, MUL #4\n"
"cmp x10, XZR\n"
"bgt 15b\n"
"b 80f\n"
"27:" // Height 3
- "mov x14, %x[col_bias]\n"
"ldr x13, [%x[args_ptr], %[offsetof_multiplier_ptr]]\n"
+ "mov x11, %x[col_bias]\n"
"ldr x12, [%x[args_ptr], %[offsetof_shift_ptr]]\n"
- "mov x11, %x[output_ptr]\n"
+ "mov x9, %x[output_ptr]\n"
"ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"28:" // Height 3: Column loop
- "mov x20, #0x0\n"
- "whilelt p1.b, x20, x10\n"
"mov z8.s, #0x0\n"
+ "mov x19, #0x0\n"
"mov z9.s, #0x0\n"
+ "whilelt p1.b, x19, x10\n"
"mov z10.s, #0x0\n"
"mov z11.s, #0x0\n"
"mov z12.s, #0x0\n"
@@ -613,99 +613,99 @@ void sve_hybrid_s8qs_dot_6x4VL (
"mov z18.s, #0x0\n"
"mov z19.s, #0x0\n"
"29:" // Height 3: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"30:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 31f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "cbnz x28, 32f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20\n"
- "add x25, x25, x20\n"
- "add x24, x24, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "cbnz x27, 32f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
+ "add x24, x24, x19\n"
+ "add x23, x23, x19\n"
"b 32f\n"
"31:" // Height 3: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20\n"
- "add x24, x25, x20\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19\n"
+ "add x23, x24, x19\n"
"32:" // Height 3: input setup done
- "cmp x27, #0x10\n"
+ "cmp x26, #0x10\n"
"ble 34f\n"
"33:" // Height 3: Multiply loop: Main loop head
- "whilelt p0.b, XZR, x27\n"
- "ld1rqb { z0.b }, p0/Z, [x26]\n"
- "ld1rqb { z1.b }, p0/Z, [x25]\n"
- "sub x27, x27, #0x10\n"
- "ld1rqb { z2.b }, p0/Z, [x24]\n"
- "ld1b { z6.b }, p2/Z, [x9]\n"
+ "ld1b { z6.b }, p2/Z, [x28]\n"
+ "whilelt p0.b, XZR, x26\n"
+ "ld1b { z7.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "sub x26, x26, #0x10\n"
+ "ld1rqb { z0.b }, p0/Z, [x25]\n"
"sdot z8.s, z6.b, z0.b[0]\n"
- "sdot z12.s, z6.b, z1.b[0]\n"
- "ld1b { z7.b }, p2/Z, [x9, #1, MUL VL]\n"
- "sdot z16.s, z6.b, z2.b[0]\n"
+ "ld1rqb { z1.b }, p0/Z, [x24]\n"
+ "cmp x26, #0x10\n"
"sdot z9.s, z7.b, z0.b[0]\n"
- "ld1b { z6.b }, p2/Z, [x9, #2, MUL VL]\n"
+ "ld1rqb { z2.b }, p0/Z, [x23]\n"
+ "add x25, x25, #0x10\n"
+ "sdot z12.s, z6.b, z1.b[0]\n"
+ "add x24, x24, #0x10\n"
"sdot z13.s, z7.b, z1.b[0]\n"
+ "add x23, x23, #0x10\n"
+ "sdot z16.s, z6.b, z2.b[0]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #2, MUL VL]\n"
"sdot z17.s, z7.b, z2.b[0]\n"
- "ld1b { z7.b }, p2/Z, [x9, #3, MUL VL]\n"
- "cmp x27, #0x10\n"
+ "ld1b { z7.b }, p2/Z, [x28, #3, MUL VL]\n"
"sdot z10.s, z6.b, z0.b[0]\n"
"sdot z14.s, z6.b, z1.b[0]\n"
- "add x26, x26, #0x10\n"
- "add x25, x25, #0x10\n"
"sdot z18.s, z6.b, z2.b[0]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #4, MUL VL]\n"
"sdot z11.s, z7.b, z0.b[0]\n"
- "ld1b { z6.b }, p2/Z, [x9, #4, MUL VL]\n"
- "add x24, x24, #0x10\n"
"sdot z15.s, z7.b, z1.b[0]\n"
"sdot z19.s, z7.b, z2.b[0]\n"
- "ld1b { z7.b }, p2/Z, [x9, #5, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #5, MUL VL]\n"
"sdot z8.s, z6.b, z0.b[1]\n"
"sdot z12.s, z6.b, z1.b[1]\n"
"sdot z16.s, z6.b, z2.b[1]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #6, MUL VL]\n"
"sdot z9.s, z7.b, z0.b[1]\n"
- "ld1b { z6.b }, p2/Z, [x9, #6, MUL VL]\n"
"sdot z13.s, z7.b, z1.b[1]\n"
"sdot z17.s, z7.b, z2.b[1]\n"
- "ld1b { z7.b }, p2/Z, [x9, #7, MUL VL]\n"
- "addvl x9, x9, #16\n"
+ "ld1b { z7.b }, p2/Z, [x28, #7, MUL VL]\n"
+ "addvl x28, x28, #16\n"
"sdot z10.s, z6.b, z0.b[1]\n"
"sdot z14.s, z6.b, z1.b[1]\n"
"sdot z18.s, z6.b, z2.b[1]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #-8, MUL VL]\n"
"sdot z11.s, z7.b, z0.b[1]\n"
- "ld1b { z6.b }, p2/Z, [x9, #-8, MUL VL]\n"
"sdot z15.s, z7.b, z1.b[1]\n"
"sdot z19.s, z7.b, z2.b[1]\n"
- "ld1b { z7.b }, p2/Z, [x9, #-7, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #-7, MUL VL]\n"
"sdot z8.s, z6.b, z0.b[2]\n"
"sdot z12.s, z6.b, z1.b[2]\n"
"sdot z16.s, z6.b, z2.b[2]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #-6, MUL VL]\n"
"sdot z9.s, z7.b, z0.b[2]\n"
- "ld1b { z6.b }, p2/Z, [x9, #-6, MUL VL]\n"
"sdot z13.s, z7.b, z1.b[2]\n"
"sdot z17.s, z7.b, z2.b[2]\n"
- "ld1b { z7.b }, p2/Z, [x9, #-5, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #-5, MUL VL]\n"
"sdot z10.s, z6.b, z0.b[2]\n"
"sdot z14.s, z6.b, z1.b[2]\n"
"sdot z18.s, z6.b, z2.b[2]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #-4, MUL VL]\n"
"sdot z11.s, z7.b, z0.b[2]\n"
- "ld1b { z6.b }, p2/Z, [x9, #-4, MUL VL]\n"
"sdot z15.s, z7.b, z1.b[2]\n"
"sdot z19.s, z7.b, z2.b[2]\n"
- "ld1b { z7.b }, p2/Z, [x9, #-3, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #-3, MUL VL]\n"
"sdot z8.s, z6.b, z0.b[3]\n"
"sdot z12.s, z6.b, z1.b[3]\n"
"sdot z16.s, z6.b, z2.b[3]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #-2, MUL VL]\n"
"sdot z9.s, z7.b, z0.b[3]\n"
- "ld1b { z6.b }, p2/Z, [x9, #-2, MUL VL]\n"
"sdot z13.s, z7.b, z1.b[3]\n"
"sdot z17.s, z7.b, z2.b[3]\n"
- "ld1b { z7.b }, p2/Z, [x9, #-1, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #-1, MUL VL]\n"
"sdot z10.s, z6.b, z0.b[3]\n"
"sdot z14.s, z6.b, z1.b[3]\n"
"sdot z18.s, z6.b, z2.b[3]\n"
@@ -714,22 +714,22 @@ void sve_hybrid_s8qs_dot_6x4VL (
"sdot z19.s, z7.b, z2.b[3]\n"
"bgt 33b\n"
"34:" // Height 3: Multiply loop: Single iteration only
- "whilelt p0.b, XZR, x27\n"
- "ld1rqb { z0.b }, p0/Z, [x26]\n"
- "ld1rqb { z1.b }, p0/Z, [x25]\n"
- "subs x27, x27, #0x4\n"
- "ld1rqb { z2.b }, p0/Z, [x24]\n"
- "ld1b { z6.b }, p2/Z, [x9]\n"
+ "ld1b { z6.b }, p2/Z, [x28]\n"
+ "whilelt p0.b, XZR, x26\n"
+ "ld1b { z7.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "subs x26, x26, #0x4\n"
+ "ld1rqb { z0.b }, p0/Z, [x25]\n"
"sdot z8.s, z6.b, z0.b[0]\n"
- "sdot z12.s, z6.b, z1.b[0]\n"
- "ld1b { z7.b }, p2/Z, [x9, #1, MUL VL]\n"
- "sdot z16.s, z6.b, z2.b[0]\n"
+ "ld1rqb { z1.b }, p0/Z, [x24]\n"
"sdot z9.s, z7.b, z0.b[0]\n"
- "ld1b { z6.b }, p2/Z, [x9, #2, MUL VL]\n"
+ "ld1rqb { z2.b }, p0/Z, [x23]\n"
+ "sdot z12.s, z6.b, z1.b[0]\n"
"sdot z13.s, z7.b, z1.b[0]\n"
+ "sdot z16.s, z6.b, z2.b[0]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #2, MUL VL]\n"
"sdot z17.s, z7.b, z2.b[0]\n"
- "ld1b { z7.b }, p2/Z, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
+ "ld1b { z7.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
"sdot z10.s, z6.b, z0.b[0]\n"
"sdot z14.s, z6.b, z1.b[0]\n"
"sdot z18.s, z6.b, z2.b[0]\n"
@@ -737,18 +737,18 @@ void sve_hybrid_s8qs_dot_6x4VL (
"sdot z15.s, z7.b, z1.b[0]\n"
"sdot z19.s, z7.b, z2.b[0]\n"
"ble 35f\n"
- "ld1b { z6.b }, p2/Z, [x9]\n"
- "ld1b { z7.b }, p2/Z, [x9, #1, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28]\n"
"sdot z8.s, z6.b, z0.b[1]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "subs x26, x26, #0x4\n"
"sdot z12.s, z6.b, z1.b[1]\n"
"sdot z16.s, z6.b, z2.b[1]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #2, MUL VL]\n"
"sdot z9.s, z7.b, z0.b[1]\n"
- "ld1b { z6.b }, p2/Z, [x9, #2, MUL VL]\n"
- "subs x27, x27, #0x4\n"
"sdot z13.s, z7.b, z1.b[1]\n"
"sdot z17.s, z7.b, z2.b[1]\n"
- "ld1b { z7.b }, p2/Z, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
+ "ld1b { z7.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
"sdot z10.s, z6.b, z0.b[1]\n"
"sdot z14.s, z6.b, z1.b[1]\n"
"sdot z18.s, z6.b, z2.b[1]\n"
@@ -756,18 +756,18 @@ void sve_hybrid_s8qs_dot_6x4VL (
"sdot z15.s, z7.b, z1.b[1]\n"
"sdot z19.s, z7.b, z2.b[1]\n"
"ble 35f\n"
- "ld1b { z6.b }, p2/Z, [x9]\n"
- "ld1b { z7.b }, p2/Z, [x9, #1, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28]\n"
"sdot z8.s, z6.b, z0.b[2]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "subs x26, x26, #0x4\n"
"sdot z12.s, z6.b, z1.b[2]\n"
"sdot z16.s, z6.b, z2.b[2]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #2, MUL VL]\n"
"sdot z9.s, z7.b, z0.b[2]\n"
- "ld1b { z6.b }, p2/Z, [x9, #2, MUL VL]\n"
- "subs x27, x27, #0x4\n"
"sdot z13.s, z7.b, z1.b[2]\n"
"sdot z17.s, z7.b, z2.b[2]\n"
- "ld1b { z7.b }, p2/Z, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
+ "ld1b { z7.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
"sdot z10.s, z6.b, z0.b[2]\n"
"sdot z14.s, z6.b, z1.b[2]\n"
"sdot z18.s, z6.b, z2.b[2]\n"
@@ -775,17 +775,17 @@ void sve_hybrid_s8qs_dot_6x4VL (
"sdot z15.s, z7.b, z1.b[2]\n"
"sdot z19.s, z7.b, z2.b[2]\n"
"ble 35f\n"
- "ld1b { z6.b }, p2/Z, [x9]\n"
- "ld1b { z7.b }, p2/Z, [x9, #1, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28]\n"
"sdot z8.s, z6.b, z0.b[3]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #1, MUL VL]\n"
"sdot z12.s, z6.b, z1.b[3]\n"
"sdot z16.s, z6.b, z2.b[3]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #2, MUL VL]\n"
"sdot z9.s, z7.b, z0.b[3]\n"
- "ld1b { z6.b }, p2/Z, [x9, #2, MUL VL]\n"
"sdot z13.s, z7.b, z1.b[3]\n"
"sdot z17.s, z7.b, z2.b[3]\n"
- "ld1b { z7.b }, p2/Z, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
+ "ld1b { z7.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
"sdot z10.s, z6.b, z0.b[3]\n"
"sdot z14.s, z6.b, z1.b[3]\n"
"sdot z18.s, z6.b, z2.b[3]\n"
@@ -793,27 +793,27 @@ void sve_hybrid_s8qs_dot_6x4VL (
"sdot z15.s, z7.b, z1.b[3]\n"
"sdot z19.s, z7.b, z2.b[3]\n"
"35:" // Height 3: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 30b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "ld1w { z0.s }, p2/Z, [x14]\n"
- "add x24, x11, x20\n"
- "add x23, x24, x20\n"
- "ld1w { z1.s }, p2/Z, [x14, #1, MUL VL]\n"
- "ld1w { z2.s }, p2/Z, [x14, #2, MUL VL]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ld1w { z0.s }, p2/Z, [x11]\n"
"add z8.s, z8.s, z0.s\n"
+ "ld1w { z1.s }, p2/Z, [x11, #1, MUL VL]\n"
+ "add x23, x9, x19\n"
+ "add z12.s, z12.s, z0.s\n"
+ "ld1w { z2.s }, p2/Z, [x11, #2, MUL VL]\n"
+ "add x22, x23, x19\n"
+ "add z16.s, z16.s, z0.s\n"
+ "ld1w { z3.s }, p2/Z, [x11, #3, MUL VL]\n"
+ "addvl x11, x11, #4\n"
"add z9.s, z9.s, z1.s\n"
- "ld1w { z3.s }, p2/Z, [x14, #3, MUL VL]\n"
+ "add z13.s, z13.s, z1.s\n"
"add z10.s, z10.s, z2.s\n"
"add z11.s, z11.s, z3.s\n"
- "addvl x14, x14, #4\n"
- "add z12.s, z12.s, z0.s\n"
- "add z13.s, z13.s, z1.s\n"
"add z14.s, z14.s, z2.s\n"
"add z15.s, z15.s, z3.s\n"
- "add z16.s, z16.s, z0.s\n"
"add z17.s, z17.s, z1.s\n"
"add z18.s, z18.s, z2.s\n"
"add z19.s, z19.s, z3.s\n"
@@ -825,20 +825,20 @@ void sve_hybrid_s8qs_dot_6x4VL (
"ld1w { z2.s }, p2/Z, [x12, #2, MUL VL]\n"
"ld1w { z6.s }, p2/Z, [x13, #2, MUL VL]\n"
"ld1w { z3.s }, p2/Z, [x12, #3, MUL VL]\n"
- "ld1w { z7.s }, p2/Z, [x13, #3, MUL VL]\n"
"addvl x12, x12, #4\n"
+ "ld1w { z7.s }, p2/Z, [x13, #3, MUL VL]\n"
"addvl x13, x13, #4\n"
"b 37f\n"
"36:" // Height 3: per layer parameters
- "add x26, %x[qp], %[per_layer_right_shift]\n"
- "add x25, %x[qp], %[per_layer_mul]\n"
- "ld1rw { z0.s }, p2/Z, [x26]\n"
- "ld1rw { z4.s }, p2/Z, [x25]\n"
+ "add x24, %x[qp], %[per_layer_right_shift]\n"
+ "ld1rw { z0.s }, p2/Z, [x24]\n"
"mov z1.d, z0.d\n"
- "mov z5.d, z4.d\n"
+ "add x24, %x[qp], %[per_layer_mul]\n"
+ "ld1rw { z4.s }, p2/Z, [x24]\n"
"mov z2.d, z0.d\n"
- "mov z6.d, z4.d\n"
"mov z3.d, z0.d\n"
+ "mov z5.d, z4.d\n"
+ "mov z6.d, z4.d\n"
"mov z7.d, z4.d\n"
"37:" // Height 3: parameters loaded
".inst 0x04a47508 // sqrdmulh z8.s, z8.s, z4.s\n"
@@ -857,124 +857,124 @@ void sve_hybrid_s8qs_dot_6x4VL (
"and z4.d, z8.d, z0.d\n"
"and z5.d, z9.d, z1.d\n"
"and z6.d, z10.d, z2.d\n"
- "and z7.d, z11.d, z3.d\n"
"asr z4.s, z4.s, #0x1f\n"
"asr z5.s, z5.s, #0x1f\n"
"asr z6.s, z6.s, #0x1f\n"
- "asr z7.s, z7.s, #0x1f\n"
"sqadd z8.s, z8.s, z4.s\n"
"sqadd z9.s, z9.s, z5.s\n"
"sqadd z10.s, z10.s, z6.s\n"
- "sqadd z11.s, z11.s, z7.s\n"
+ "and z7.d, z11.d, z3.d\n"
"and z4.d, z12.d, z0.d\n"
"and z5.d, z13.d, z1.d\n"
- "and z6.d, z14.d, z2.d\n"
- "and z7.d, z15.d, z3.d\n"
+ "asr z7.s, z7.s, #0x1f\n"
"asr z4.s, z4.s, #0x1f\n"
"asr z5.s, z5.s, #0x1f\n"
- "asr z6.s, z6.s, #0x1f\n"
- "asr z7.s, z7.s, #0x1f\n"
+ "sqadd z11.s, z11.s, z7.s\n"
"sqadd z12.s, z12.s, z4.s\n"
"sqadd z13.s, z13.s, z5.s\n"
+ "and z6.d, z14.d, z2.d\n"
+ "and z7.d, z15.d, z3.d\n"
+ "and z4.d, z16.d, z0.d\n"
+ "asr z6.s, z6.s, #0x1f\n"
+ "asr z7.s, z7.s, #0x1f\n"
+ "asr z4.s, z4.s, #0x1f\n"
"sqadd z14.s, z14.s, z6.s\n"
"sqadd z15.s, z15.s, z7.s\n"
- "and z4.d, z16.d, z0.d\n"
+ "sqadd z16.s, z16.s, z4.s\n"
"and z5.d, z17.d, z1.d\n"
"and z6.d, z18.d, z2.d\n"
"and z7.d, z19.d, z3.d\n"
- "asr z4.s, z4.s, #0x1f\n"
"asr z5.s, z5.s, #0x1f\n"
"asr z6.s, z6.s, #0x1f\n"
"asr z7.s, z7.s, #0x1f\n"
- "sqadd z16.s, z16.s, z4.s\n"
"sqadd z17.s, z17.s, z5.s\n"
"sqadd z18.s, z18.s, z6.s\n"
"sqadd z19.s, z19.s, z7.s\n"
"38:" // Height 3: no shift correction
- "add x25, %x[qp], %[c_offset]\n"
- "ld1rw { z4.s }, p2/Z, [x25]\n"
".inst 0x44828808 // srshl z8.s, p2/M, z8.s, z0.s\n"
- "add z8.s, z8.s, z4.s\n"
+ "add x24, %x[qp], %[c_offset]\n"
+ "ld1rw { z4.s }, p2/Z, [x24]\n"
".inst 0x44828829 // srshl z9.s, p2/M, z9.s, z1.s\n"
+ "add x24, %x[qp], %[minval]\n"
".inst 0x4482884a // srshl z10.s, p2/M, z10.s, z2.s\n"
- "add z9.s, z9.s, z4.s\n"
- "add z10.s, z10.s, z4.s\n"
+ "ld1rw { z5.s }, p2/Z, [x24]\n"
+ "add x24, %x[qp], %[maxval]\n"
".inst 0x4482886b // srshl z11.s, p2/M, z11.s, z3.s\n"
+ "ld1rw { z6.s }, p2/Z, [x24]\n"
".inst 0x4482880c // srshl z12.s, p2/M, z12.s, z0.s\n"
+ "add z8.s, z8.s, z4.s\n"
+ "add z9.s, z9.s, z4.s\n"
+ "add z10.s, z10.s, z4.s\n"
"add z11.s, z11.s, z4.s\n"
"add z12.s, z12.s, z4.s\n"
- ".inst 0x4482882d // srshl z13.s, p2/M, z13.s, z1.s\n"
- ".inst 0x4482884e // srshl z14.s, p2/M, z14.s, z2.s\n"
- "add z13.s, z13.s, z4.s\n"
- "add z14.s, z14.s, z4.s\n"
- ".inst 0x4482886f // srshl z15.s, p2/M, z15.s, z3.s\n"
- ".inst 0x44828810 // srshl z16.s, p2/M, z16.s, z0.s\n"
- "add z15.s, z15.s, z4.s\n"
- "add z16.s, z16.s, z4.s\n"
- ".inst 0x44828831 // srshl z17.s, p2/M, z17.s, z1.s\n"
- ".inst 0x44828852 // srshl z18.s, p2/M, z18.s, z2.s\n"
- "add z17.s, z17.s, z4.s\n"
- "add z18.s, z18.s, z4.s\n"
- ".inst 0x44828873 // srshl z19.s, p2/M, z19.s, z3.s\n"
- "add x25, %x[qp], %[maxval]\n"
- "ld1rw { z6.s }, p2/Z, [x25]\n"
- "add z19.s, z19.s, z4.s\n"
- "add x25, %x[qp], %[minval]\n"
- "ld1rw { z5.s }, p2/Z, [x25]\n"
"smin z8.s, p2/M, z8.s, z6.s\n"
"smin z9.s, p2/M, z9.s, z6.s\n"
"smin z10.s, p2/M, z10.s, z6.s\n"
"smin z11.s, p2/M, z11.s, z6.s\n"
- "smin z12.s, p2/M, z12.s, z6.s\n"
- "smin z13.s, p2/M, z13.s, z6.s\n"
- "smin z14.s, p2/M, z14.s, z6.s\n"
- "smin z15.s, p2/M, z15.s, z6.s\n"
- "smin z16.s, p2/M, z16.s, z6.s\n"
- "smin z17.s, p2/M, z17.s, z6.s\n"
- "smin z18.s, p2/M, z18.s, z6.s\n"
- "smin z19.s, p2/M, z19.s, z6.s\n"
"smax z8.s, p2/M, z8.s, z5.s\n"
"smax z9.s, p2/M, z9.s, z5.s\n"
"smax z10.s, p2/M, z10.s, z5.s\n"
- "uzp1 z8.h, z8.h, z9.h\n"
"smax z11.s, p2/M, z11.s, z5.s\n"
- "smax z12.s, p2/M, z12.s, z5.s\n"
+ "smin z12.s, p2/M, z12.s, z6.s\n"
+ "uzp1 z8.h, z8.h, z9.h\n"
+ ".inst 0x4482882d // srshl z13.s, p2/M, z13.s, z1.s\n"
"uzp1 z9.h, z10.h, z11.h\n"
+ "smax z12.s, p2/M, z12.s, z5.s\n"
"uzp1 z8.b, z8.b, z9.b\n"
+ "st1b { z8.b }, p1, [x9]\n"
+ "add z13.s, z13.s, z4.s\n"
+ "addvl x9, x9, #1\n"
+ ".inst 0x4482884e // srshl z14.s, p2/M, z14.s, z2.s\n"
+ ".inst 0x4482886f // srshl z15.s, p2/M, z15.s, z3.s\n"
+ ".inst 0x44828810 // srshl z16.s, p2/M, z16.s, z0.s\n"
+ "smin z13.s, p2/M, z13.s, z6.s\n"
+ ".inst 0x44828831 // srshl z17.s, p2/M, z17.s, z1.s\n"
+ "add z14.s, z14.s, z4.s\n"
+ "add z15.s, z15.s, z4.s\n"
+ "add z16.s, z16.s, z4.s\n"
+ "add z17.s, z17.s, z4.s\n"
"smax z13.s, p2/M, z13.s, z5.s\n"
- "smax z14.s, p2/M, z14.s, z5.s\n"
+ "smin z14.s, p2/M, z14.s, z6.s\n"
+ "smin z15.s, p2/M, z15.s, z6.s\n"
+ "smin z16.s, p2/M, z16.s, z6.s\n"
"uzp1 z12.h, z12.h, z13.h\n"
- "st1b { z8.b }, p1, [x11]\n"
+ "smax z14.s, p2/M, z14.s, z5.s\n"
"smax z15.s, p2/M, z15.s, z5.s\n"
"smax z16.s, p2/M, z16.s, z5.s\n"
+ "smin z17.s, p2/M, z17.s, z6.s\n"
+ ".inst 0x44828852 // srshl z18.s, p2/M, z18.s, z2.s\n"
"uzp1 z13.h, z14.h, z15.h\n"
+ ".inst 0x44828873 // srshl z19.s, p2/M, z19.s, z3.s\n"
"uzp1 z12.b, z12.b, z13.b\n"
+ "st1b { z12.b }, p1, [x23]\n"
+ "add z18.s, z18.s, z4.s\n"
"smax z17.s, p2/M, z17.s, z5.s\n"
- "smax z18.s, p2/M, z18.s, z5.s\n"
+ "add z19.s, z19.s, z4.s\n"
+ "smin z18.s, p2/M, z18.s, z6.s\n"
"uzp1 z16.h, z16.h, z17.h\n"
- "st1b { z12.b }, p1, [x24]\n"
+ "smin z19.s, p2/M, z19.s, z6.s\n"
+ "smax z18.s, p2/M, z18.s, z5.s\n"
"smax z19.s, p2/M, z19.s, z5.s\n"
"uzp1 z17.h, z18.h, z19.h\n"
"uzp1 z16.b, z16.b, z17.b\n"
- "st1b { z16.b }, p1, [x23]\n"
- "addvl x11, x11, #1\n"
+ "st1b { z16.b }, p1, [x22]\n"
"39:" // Height 3: Writeback done
"decw x10, ALL, MUL #4\n"
"cmp x10, XZR\n"
"bgt 28b\n"
"b 80f\n"
"40:" // Height 4
- "mov x14, %x[col_bias]\n"
"ldr x13, [%x[args_ptr], %[offsetof_multiplier_ptr]]\n"
+ "mov x11, %x[col_bias]\n"
"ldr x12, [%x[args_ptr], %[offsetof_shift_ptr]]\n"
- "mov x11, %x[output_ptr]\n"
+ "mov x9, %x[output_ptr]\n"
"ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"41:" // Height 4: Column loop
- "mov x20, #0x0\n"
- "whilelt p1.b, x20, x10\n"
"mov z8.s, #0x0\n"
+ "mov x19, #0x0\n"
"mov z9.s, #0x0\n"
+ "whilelt p1.b, x19, x10\n"
"mov z10.s, #0x0\n"
"mov z11.s, #0x0\n"
"mov z12.s, #0x0\n"
@@ -990,118 +990,118 @@ void sve_hybrid_s8qs_dot_6x4VL (
"mov z22.s, #0x0\n"
"mov z23.s, #0x0\n"
"42:" // Height 4: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"43:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 44f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "cbnz x28, 45f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20\n"
- "add x25, x25, x20\n"
- "add x24, x24, x20\n"
- "add x23, x23, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "cbnz x27, 45f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
+ "add x24, x24, x19\n"
+ "add x23, x23, x19\n"
+ "add x22, x22, x19\n"
"b 45f\n"
"44:" // Height 4: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20\n"
- "add x24, x25, x20\n"
- "add x23, x24, x20\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19\n"
+ "add x23, x24, x19\n"
+ "add x22, x23, x19\n"
"45:" // Height 4: input setup done
- "cmp x27, #0x10\n"
+ "cmp x26, #0x10\n"
"ble 47f\n"
"46:" // Height 4: Multiply loop: Main loop head
- "whilelt p0.b, XZR, x27\n"
- "ld1rqb { z0.b }, p0/Z, [x26]\n"
- "ld1rqb { z1.b }, p0/Z, [x25]\n"
- "sub x27, x27, #0x10\n"
- "ld1rqb { z2.b }, p0/Z, [x24]\n"
- "ld1rqb { z3.b }, p0/Z, [x23]\n"
- "cmp x27, #0x10\n"
- "add x26, x26, #0x10\n"
- "ld1b { z6.b }, p2/Z, [x9]\n"
- "ld1b { z7.b }, p2/Z, [x9, #1, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28]\n"
+ "whilelt p0.b, XZR, x26\n"
+ "ld1b { z7.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "sub x26, x26, #0x10\n"
+ "ld1rqb { z0.b }, p0/Z, [x25]\n"
"sdot z8.s, z6.b, z0.b[0]\n"
- "sdot z12.s, z6.b, z1.b[0]\n"
- "sdot z16.s, z6.b, z2.b[0]\n"
- "sdot z20.s, z6.b, z3.b[0]\n"
- "ld1b { z6.b }, p2/Z, [x9, #2, MUL VL]\n"
- "add x25, x25, #0x10\n"
+ "ld1rqb { z1.b }, p0/Z, [x24]\n"
+ "cmp x26, #0x10\n"
"sdot z9.s, z7.b, z0.b[0]\n"
- "sdot z13.s, z7.b, z1.b[0]\n"
+ "ld1rqb { z2.b }, p0/Z, [x23]\n"
+ "add x25, x25, #0x10\n"
+ "sdot z12.s, z6.b, z1.b[0]\n"
+ "ld1rqb { z3.b }, p0/Z, [x22]\n"
"add x24, x24, #0x10\n"
+ "sdot z16.s, z6.b, z2.b[0]\n"
"add x23, x23, #0x10\n"
+ "sdot z13.s, z7.b, z1.b[0]\n"
+ "add x22, x22, #0x10\n"
"sdot z17.s, z7.b, z2.b[0]\n"
+ "sdot z20.s, z6.b, z3.b[0]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #2, MUL VL]\n"
"sdot z21.s, z7.b, z3.b[0]\n"
- "ld1b { z7.b }, p2/Z, [x9, #3, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #3, MUL VL]\n"
"sdot z10.s, z6.b, z0.b[0]\n"
"sdot z14.s, z6.b, z1.b[0]\n"
"sdot z18.s, z6.b, z2.b[0]\n"
"sdot z22.s, z6.b, z3.b[0]\n"
- "ld1b { z6.b }, p2/Z, [x9, #4, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #4, MUL VL]\n"
"sdot z11.s, z7.b, z0.b[0]\n"
"sdot z15.s, z7.b, z1.b[0]\n"
"sdot z19.s, z7.b, z2.b[0]\n"
"sdot z23.s, z7.b, z3.b[0]\n"
- "ld1b { z7.b }, p2/Z, [x9, #5, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #5, MUL VL]\n"
"sdot z8.s, z6.b, z0.b[1]\n"
"sdot z12.s, z6.b, z1.b[1]\n"
"sdot z16.s, z6.b, z2.b[1]\n"
"sdot z20.s, z6.b, z3.b[1]\n"
- "ld1b { z6.b }, p2/Z, [x9, #6, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #6, MUL VL]\n"
"sdot z9.s, z7.b, z0.b[1]\n"
"sdot z13.s, z7.b, z1.b[1]\n"
"sdot z17.s, z7.b, z2.b[1]\n"
"sdot z21.s, z7.b, z3.b[1]\n"
- "ld1b { z7.b }, p2/Z, [x9, #7, MUL VL]\n"
- "addvl x9, x9, #16\n"
+ "ld1b { z7.b }, p2/Z, [x28, #7, MUL VL]\n"
+ "addvl x28, x28, #16\n"
"sdot z10.s, z6.b, z0.b[1]\n"
"sdot z14.s, z6.b, z1.b[1]\n"
"sdot z18.s, z6.b, z2.b[1]\n"
"sdot z22.s, z6.b, z3.b[1]\n"
- "ld1b { z6.b }, p2/Z, [x9, #-8, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #-8, MUL VL]\n"
"sdot z11.s, z7.b, z0.b[1]\n"
"sdot z15.s, z7.b, z1.b[1]\n"
"sdot z19.s, z7.b, z2.b[1]\n"
"sdot z23.s, z7.b, z3.b[1]\n"
- "ld1b { z7.b }, p2/Z, [x9, #-7, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #-7, MUL VL]\n"
"sdot z8.s, z6.b, z0.b[2]\n"
"sdot z12.s, z6.b, z1.b[2]\n"
"sdot z16.s, z6.b, z2.b[2]\n"
"sdot z20.s, z6.b, z3.b[2]\n"
- "ld1b { z6.b }, p2/Z, [x9, #-6, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #-6, MUL VL]\n"
"sdot z9.s, z7.b, z0.b[2]\n"
"sdot z13.s, z7.b, z1.b[2]\n"
"sdot z17.s, z7.b, z2.b[2]\n"
"sdot z21.s, z7.b, z3.b[2]\n"
- "ld1b { z7.b }, p2/Z, [x9, #-5, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #-5, MUL VL]\n"
"sdot z10.s, z6.b, z0.b[2]\n"
"sdot z14.s, z6.b, z1.b[2]\n"
"sdot z18.s, z6.b, z2.b[2]\n"
"sdot z22.s, z6.b, z3.b[2]\n"
- "ld1b { z6.b }, p2/Z, [x9, #-4, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #-4, MUL VL]\n"
"sdot z11.s, z7.b, z0.b[2]\n"
"sdot z15.s, z7.b, z1.b[2]\n"
"sdot z19.s, z7.b, z2.b[2]\n"
"sdot z23.s, z7.b, z3.b[2]\n"
- "ld1b { z7.b }, p2/Z, [x9, #-3, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #-3, MUL VL]\n"
"sdot z8.s, z6.b, z0.b[3]\n"
"sdot z12.s, z6.b, z1.b[3]\n"
"sdot z16.s, z6.b, z2.b[3]\n"
"sdot z20.s, z6.b, z3.b[3]\n"
- "ld1b { z6.b }, p2/Z, [x9, #-2, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #-2, MUL VL]\n"
"sdot z9.s, z7.b, z0.b[3]\n"
"sdot z13.s, z7.b, z1.b[3]\n"
"sdot z17.s, z7.b, z2.b[3]\n"
"sdot z21.s, z7.b, z3.b[3]\n"
- "ld1b { z7.b }, p2/Z, [x9, #-1, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #-1, MUL VL]\n"
"sdot z10.s, z6.b, z0.b[3]\n"
"sdot z14.s, z6.b, z1.b[3]\n"
"sdot z18.s, z6.b, z2.b[3]\n"
@@ -1112,25 +1112,25 @@ void sve_hybrid_s8qs_dot_6x4VL (
"sdot z23.s, z7.b, z3.b[3]\n"
"bgt 46b\n"
"47:" // Height 4: Multiply loop: Single iteration only
- "whilelt p0.b, XZR, x27\n"
- "ld1rqb { z0.b }, p0/Z, [x26]\n"
- "ld1rqb { z1.b }, p0/Z, [x25]\n"
- "subs x27, x27, #0x4\n"
- "ld1rqb { z2.b }, p0/Z, [x24]\n"
- "ld1rqb { z3.b }, p0/Z, [x23]\n"
- "ld1b { z6.b }, p2/Z, [x9]\n"
- "ld1b { z7.b }, p2/Z, [x9, #1, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28]\n"
+ "whilelt p0.b, XZR, x26\n"
+ "ld1b { z7.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "subs x26, x26, #0x4\n"
+ "ld1rqb { z0.b }, p0/Z, [x25]\n"
"sdot z8.s, z6.b, z0.b[0]\n"
+ "ld1rqb { z1.b }, p0/Z, [x24]\n"
+ "sdot z9.s, z7.b, z0.b[0]\n"
+ "ld1rqb { z2.b }, p0/Z, [x23]\n"
+ "ld1rqb { z3.b }, p0/Z, [x22]\n"
"sdot z12.s, z6.b, z1.b[0]\n"
+ "sdot z13.s, z7.b, z1.b[0]\n"
"sdot z16.s, z6.b, z2.b[0]\n"
"sdot z20.s, z6.b, z3.b[0]\n"
- "ld1b { z6.b }, p2/Z, [x9, #2, MUL VL]\n"
- "sdot z9.s, z7.b, z0.b[0]\n"
- "sdot z13.s, z7.b, z1.b[0]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #2, MUL VL]\n"
"sdot z17.s, z7.b, z2.b[0]\n"
"sdot z21.s, z7.b, z3.b[0]\n"
- "ld1b { z7.b }, p2/Z, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
+ "ld1b { z7.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
"sdot z10.s, z6.b, z0.b[0]\n"
"sdot z14.s, z6.b, z1.b[0]\n"
"sdot z18.s, z6.b, z2.b[0]\n"
@@ -1140,20 +1140,20 @@ void sve_hybrid_s8qs_dot_6x4VL (
"sdot z19.s, z7.b, z2.b[0]\n"
"sdot z23.s, z7.b, z3.b[0]\n"
"ble 48f\n"
- "ld1b { z6.b }, p2/Z, [x9]\n"
- "ld1b { z7.b }, p2/Z, [x9, #1, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28]\n"
"sdot z8.s, z6.b, z0.b[1]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "subs x26, x26, #0x4\n"
"sdot z12.s, z6.b, z1.b[1]\n"
"sdot z16.s, z6.b, z2.b[1]\n"
"sdot z20.s, z6.b, z3.b[1]\n"
- "ld1b { z6.b }, p2/Z, [x9, #2, MUL VL]\n"
- "subs x27, x27, #0x4\n"
+ "ld1b { z6.b }, p2/Z, [x28, #2, MUL VL]\n"
"sdot z9.s, z7.b, z0.b[1]\n"
"sdot z13.s, z7.b, z1.b[1]\n"
"sdot z17.s, z7.b, z2.b[1]\n"
"sdot z21.s, z7.b, z3.b[1]\n"
- "ld1b { z7.b }, p2/Z, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
+ "ld1b { z7.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
"sdot z10.s, z6.b, z0.b[1]\n"
"sdot z14.s, z6.b, z1.b[1]\n"
"sdot z18.s, z6.b, z2.b[1]\n"
@@ -1163,20 +1163,20 @@ void sve_hybrid_s8qs_dot_6x4VL (
"sdot z19.s, z7.b, z2.b[1]\n"
"sdot z23.s, z7.b, z3.b[1]\n"
"ble 48f\n"
- "ld1b { z6.b }, p2/Z, [x9]\n"
- "ld1b { z7.b }, p2/Z, [x9, #1, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28]\n"
"sdot z8.s, z6.b, z0.b[2]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "subs x26, x26, #0x4\n"
"sdot z12.s, z6.b, z1.b[2]\n"
"sdot z16.s, z6.b, z2.b[2]\n"
"sdot z20.s, z6.b, z3.b[2]\n"
- "ld1b { z6.b }, p2/Z, [x9, #2, MUL VL]\n"
- "subs x27, x27, #0x4\n"
+ "ld1b { z6.b }, p2/Z, [x28, #2, MUL VL]\n"
"sdot z9.s, z7.b, z0.b[2]\n"
"sdot z13.s, z7.b, z1.b[2]\n"
"sdot z17.s, z7.b, z2.b[2]\n"
"sdot z21.s, z7.b, z3.b[2]\n"
- "ld1b { z7.b }, p2/Z, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
+ "ld1b { z7.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
"sdot z10.s, z6.b, z0.b[2]\n"
"sdot z14.s, z6.b, z1.b[2]\n"
"sdot z18.s, z6.b, z2.b[2]\n"
@@ -1186,19 +1186,19 @@ void sve_hybrid_s8qs_dot_6x4VL (
"sdot z19.s, z7.b, z2.b[2]\n"
"sdot z23.s, z7.b, z3.b[2]\n"
"ble 48f\n"
- "ld1b { z6.b }, p2/Z, [x9]\n"
- "ld1b { z7.b }, p2/Z, [x9, #1, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28]\n"
"sdot z8.s, z6.b, z0.b[3]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #1, MUL VL]\n"
"sdot z12.s, z6.b, z1.b[3]\n"
"sdot z16.s, z6.b, z2.b[3]\n"
"sdot z20.s, z6.b, z3.b[3]\n"
- "ld1b { z6.b }, p2/Z, [x9, #2, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #2, MUL VL]\n"
"sdot z9.s, z7.b, z0.b[3]\n"
"sdot z13.s, z7.b, z1.b[3]\n"
"sdot z17.s, z7.b, z2.b[3]\n"
"sdot z21.s, z7.b, z3.b[3]\n"
- "ld1b { z7.b }, p2/Z, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
+ "ld1b { z7.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
"sdot z10.s, z6.b, z0.b[3]\n"
"sdot z14.s, z6.b, z1.b[3]\n"
"sdot z18.s, z6.b, z2.b[3]\n"
@@ -1208,28 +1208,28 @@ void sve_hybrid_s8qs_dot_6x4VL (
"sdot z19.s, z7.b, z2.b[3]\n"
"sdot z23.s, z7.b, z3.b[3]\n"
"48:" // Height 4: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 43b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "ld1w { z0.s }, p2/Z, [x14]\n"
- "add x24, x11, x20\n"
- "add x23, x24, x20\n"
- "ld1w { z1.s }, p2/Z, [x14, #1, MUL VL]\n"
- "ld1w { z2.s }, p2/Z, [x14, #2, MUL VL]\n"
- "add x22, x23, x20\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ld1w { z0.s }, p2/Z, [x11]\n"
"add z8.s, z8.s, z0.s\n"
- "ld1w { z3.s }, p2/Z, [x14, #3, MUL VL]\n"
+ "ld1w { z1.s }, p2/Z, [x11, #1, MUL VL]\n"
+ "add x23, x9, x19\n"
+ "add z12.s, z12.s, z0.s\n"
+ "ld1w { z2.s }, p2/Z, [x11, #2, MUL VL]\n"
+ "add x22, x23, x19\n"
+ "add z16.s, z16.s, z0.s\n"
+ "ld1w { z3.s }, p2/Z, [x11, #3, MUL VL]\n"
+ "add x21, x22, x19\n"
"add z9.s, z9.s, z1.s\n"
+ "addvl x11, x11, #4\n"
+ "add z13.s, z13.s, z1.s\n"
"add z10.s, z10.s, z2.s\n"
- "addvl x14, x14, #4\n"
"add z11.s, z11.s, z3.s\n"
- "add z12.s, z12.s, z0.s\n"
- "add z13.s, z13.s, z1.s\n"
"add z14.s, z14.s, z2.s\n"
"add z15.s, z15.s, z3.s\n"
- "add z16.s, z16.s, z0.s\n"
"add z17.s, z17.s, z1.s\n"
"add z18.s, z18.s, z2.s\n"
"add z19.s, z19.s, z3.s\n"
@@ -1245,20 +1245,20 @@ void sve_hybrid_s8qs_dot_6x4VL (
"ld1w { z2.s }, p2/Z, [x12, #2, MUL VL]\n"
"ld1w { z6.s }, p2/Z, [x13, #2, MUL VL]\n"
"ld1w { z3.s }, p2/Z, [x12, #3, MUL VL]\n"
- "ld1w { z7.s }, p2/Z, [x13, #3, MUL VL]\n"
"addvl x12, x12, #4\n"
+ "ld1w { z7.s }, p2/Z, [x13, #3, MUL VL]\n"
"addvl x13, x13, #4\n"
"b 50f\n"
"49:" // Height 4: per layer parameters
- "add x26, %x[qp], %[per_layer_right_shift]\n"
- "add x25, %x[qp], %[per_layer_mul]\n"
- "ld1rw { z0.s }, p2/Z, [x26]\n"
- "ld1rw { z4.s }, p2/Z, [x25]\n"
+ "add x24, %x[qp], %[per_layer_right_shift]\n"
+ "ld1rw { z0.s }, p2/Z, [x24]\n"
"mov z1.d, z0.d\n"
- "mov z5.d, z4.d\n"
+ "add x24, %x[qp], %[per_layer_mul]\n"
+ "ld1rw { z4.s }, p2/Z, [x24]\n"
"mov z2.d, z0.d\n"
- "mov z6.d, z4.d\n"
"mov z3.d, z0.d\n"
+ "mov z5.d, z4.d\n"
+ "mov z6.d, z4.d\n"
"mov z7.d, z4.d\n"
"50:" // Height 4: parameters loaded
".inst 0x04a47508 // sqrdmulh z8.s, z8.s, z4.s\n"
@@ -1281,156 +1281,156 @@ void sve_hybrid_s8qs_dot_6x4VL (
"and z4.d, z8.d, z0.d\n"
"and z5.d, z9.d, z1.d\n"
"and z6.d, z10.d, z2.d\n"
- "and z7.d, z11.d, z3.d\n"
"asr z4.s, z4.s, #0x1f\n"
"asr z5.s, z5.s, #0x1f\n"
"asr z6.s, z6.s, #0x1f\n"
- "asr z7.s, z7.s, #0x1f\n"
"sqadd z8.s, z8.s, z4.s\n"
"sqadd z9.s, z9.s, z5.s\n"
"sqadd z10.s, z10.s, z6.s\n"
- "sqadd z11.s, z11.s, z7.s\n"
+ "and z7.d, z11.d, z3.d\n"
"and z4.d, z12.d, z0.d\n"
"and z5.d, z13.d, z1.d\n"
- "and z6.d, z14.d, z2.d\n"
- "and z7.d, z15.d, z3.d\n"
+ "asr z7.s, z7.s, #0x1f\n"
"asr z4.s, z4.s, #0x1f\n"
"asr z5.s, z5.s, #0x1f\n"
- "asr z6.s, z6.s, #0x1f\n"
- "asr z7.s, z7.s, #0x1f\n"
+ "sqadd z11.s, z11.s, z7.s\n"
"sqadd z12.s, z12.s, z4.s\n"
"sqadd z13.s, z13.s, z5.s\n"
+ "and z6.d, z14.d, z2.d\n"
+ "and z7.d, z15.d, z3.d\n"
+ "and z4.d, z16.d, z0.d\n"
+ "asr z6.s, z6.s, #0x1f\n"
+ "asr z7.s, z7.s, #0x1f\n"
+ "asr z4.s, z4.s, #0x1f\n"
"sqadd z14.s, z14.s, z6.s\n"
"sqadd z15.s, z15.s, z7.s\n"
- "and z4.d, z16.d, z0.d\n"
+ "sqadd z16.s, z16.s, z4.s\n"
"and z5.d, z17.d, z1.d\n"
"and z6.d, z18.d, z2.d\n"
"and z7.d, z19.d, z3.d\n"
- "asr z4.s, z4.s, #0x1f\n"
"asr z5.s, z5.s, #0x1f\n"
"asr z6.s, z6.s, #0x1f\n"
"asr z7.s, z7.s, #0x1f\n"
- "sqadd z16.s, z16.s, z4.s\n"
"sqadd z17.s, z17.s, z5.s\n"
"sqadd z18.s, z18.s, z6.s\n"
"sqadd z19.s, z19.s, z7.s\n"
"and z4.d, z20.d, z0.d\n"
"and z5.d, z21.d, z1.d\n"
"and z6.d, z22.d, z2.d\n"
- "and z7.d, z23.d, z3.d\n"
"asr z4.s, z4.s, #0x1f\n"
"asr z5.s, z5.s, #0x1f\n"
"asr z6.s, z6.s, #0x1f\n"
- "asr z7.s, z7.s, #0x1f\n"
"sqadd z20.s, z20.s, z4.s\n"
"sqadd z21.s, z21.s, z5.s\n"
"sqadd z22.s, z22.s, z6.s\n"
+ "and z7.d, z23.d, z3.d\n"
+ "asr z7.s, z7.s, #0x1f\n"
"sqadd z23.s, z23.s, z7.s\n"
"51:" // Height 4: no shift correction
- "add x25, %x[qp], %[c_offset]\n"
- "ld1rw { z4.s }, p2/Z, [x25]\n"
".inst 0x44828808 // srshl z8.s, p2/M, z8.s, z0.s\n"
- "add z8.s, z8.s, z4.s\n"
+ "add x24, %x[qp], %[c_offset]\n"
+ "ld1rw { z4.s }, p2/Z, [x24]\n"
".inst 0x44828829 // srshl z9.s, p2/M, z9.s, z1.s\n"
+ "add x24, %x[qp], %[minval]\n"
".inst 0x4482884a // srshl z10.s, p2/M, z10.s, z2.s\n"
- "add z9.s, z9.s, z4.s\n"
- "add z10.s, z10.s, z4.s\n"
+ "ld1rw { z5.s }, p2/Z, [x24]\n"
+ "add x24, %x[qp], %[maxval]\n"
".inst 0x4482886b // srshl z11.s, p2/M, z11.s, z3.s\n"
+ "ld1rw { z6.s }, p2/Z, [x24]\n"
".inst 0x4482880c // srshl z12.s, p2/M, z12.s, z0.s\n"
+ "add z8.s, z8.s, z4.s\n"
+ "add z9.s, z9.s, z4.s\n"
+ "add z10.s, z10.s, z4.s\n"
"add z11.s, z11.s, z4.s\n"
"add z12.s, z12.s, z4.s\n"
- ".inst 0x4482882d // srshl z13.s, p2/M, z13.s, z1.s\n"
- ".inst 0x4482884e // srshl z14.s, p2/M, z14.s, z2.s\n"
- "add z13.s, z13.s, z4.s\n"
- "add z14.s, z14.s, z4.s\n"
- ".inst 0x4482886f // srshl z15.s, p2/M, z15.s, z3.s\n"
- ".inst 0x44828810 // srshl z16.s, p2/M, z16.s, z0.s\n"
- "add z15.s, z15.s, z4.s\n"
- "add z16.s, z16.s, z4.s\n"
- ".inst 0x44828831 // srshl z17.s, p2/M, z17.s, z1.s\n"
- ".inst 0x44828852 // srshl z18.s, p2/M, z18.s, z2.s\n"
- "add z17.s, z17.s, z4.s\n"
- "add z18.s, z18.s, z4.s\n"
- ".inst 0x44828873 // srshl z19.s, p2/M, z19.s, z3.s\n"
- ".inst 0x44828814 // srshl z20.s, p2/M, z20.s, z0.s\n"
- "add z19.s, z19.s, z4.s\n"
- "add z20.s, z20.s, z4.s\n"
- ".inst 0x44828835 // srshl z21.s, p2/M, z21.s, z1.s\n"
- ".inst 0x44828856 // srshl z22.s, p2/M, z22.s, z2.s\n"
- "add z21.s, z21.s, z4.s\n"
- "add z22.s, z22.s, z4.s\n"
- ".inst 0x44828877 // srshl z23.s, p2/M, z23.s, z3.s\n"
- "add x25, %x[qp], %[maxval]\n"
- "ld1rw { z6.s }, p2/Z, [x25]\n"
- "add z23.s, z23.s, z4.s\n"
- "add x25, %x[qp], %[minval]\n"
- "ld1rw { z5.s }, p2/Z, [x25]\n"
"smin z8.s, p2/M, z8.s, z6.s\n"
"smin z9.s, p2/M, z9.s, z6.s\n"
"smin z10.s, p2/M, z10.s, z6.s\n"
"smin z11.s, p2/M, z11.s, z6.s\n"
- "smin z12.s, p2/M, z12.s, z6.s\n"
- "smin z13.s, p2/M, z13.s, z6.s\n"
- "smin z14.s, p2/M, z14.s, z6.s\n"
- "smin z15.s, p2/M, z15.s, z6.s\n"
- "smin z16.s, p2/M, z16.s, z6.s\n"
- "smin z17.s, p2/M, z17.s, z6.s\n"
- "smin z18.s, p2/M, z18.s, z6.s\n"
- "smin z19.s, p2/M, z19.s, z6.s\n"
- "smin z20.s, p2/M, z20.s, z6.s\n"
- "smin z21.s, p2/M, z21.s, z6.s\n"
- "smin z22.s, p2/M, z22.s, z6.s\n"
- "smin z23.s, p2/M, z23.s, z6.s\n"
"smax z8.s, p2/M, z8.s, z5.s\n"
"smax z9.s, p2/M, z9.s, z5.s\n"
"smax z10.s, p2/M, z10.s, z5.s\n"
- "uzp1 z8.h, z8.h, z9.h\n"
"smax z11.s, p2/M, z11.s, z5.s\n"
- "smax z12.s, p2/M, z12.s, z5.s\n"
+ "smin z12.s, p2/M, z12.s, z6.s\n"
+ "uzp1 z8.h, z8.h, z9.h\n"
+ ".inst 0x4482882d // srshl z13.s, p2/M, z13.s, z1.s\n"
"uzp1 z9.h, z10.h, z11.h\n"
+ "smax z12.s, p2/M, z12.s, z5.s\n"
"uzp1 z8.b, z8.b, z9.b\n"
+ "st1b { z8.b }, p1, [x9]\n"
+ "add z13.s, z13.s, z4.s\n"
+ "addvl x9, x9, #1\n"
+ ".inst 0x4482884e // srshl z14.s, p2/M, z14.s, z2.s\n"
+ ".inst 0x4482886f // srshl z15.s, p2/M, z15.s, z3.s\n"
+ ".inst 0x44828810 // srshl z16.s, p2/M, z16.s, z0.s\n"
+ "smin z13.s, p2/M, z13.s, z6.s\n"
+ ".inst 0x44828831 // srshl z17.s, p2/M, z17.s, z1.s\n"
+ "add z14.s, z14.s, z4.s\n"
+ "add z15.s, z15.s, z4.s\n"
+ "add z16.s, z16.s, z4.s\n"
+ "add z17.s, z17.s, z4.s\n"
"smax z13.s, p2/M, z13.s, z5.s\n"
- "smax z14.s, p2/M, z14.s, z5.s\n"
+ "smin z14.s, p2/M, z14.s, z6.s\n"
+ "smin z15.s, p2/M, z15.s, z6.s\n"
+ "smin z16.s, p2/M, z16.s, z6.s\n"
"uzp1 z12.h, z12.h, z13.h\n"
- "st1b { z8.b }, p1, [x11]\n"
+ "smax z14.s, p2/M, z14.s, z5.s\n"
"smax z15.s, p2/M, z15.s, z5.s\n"
"smax z16.s, p2/M, z16.s, z5.s\n"
+ "smin z17.s, p2/M, z17.s, z6.s\n"
+ ".inst 0x44828852 // srshl z18.s, p2/M, z18.s, z2.s\n"
"uzp1 z13.h, z14.h, z15.h\n"
+ ".inst 0x44828873 // srshl z19.s, p2/M, z19.s, z3.s\n"
"uzp1 z12.b, z12.b, z13.b\n"
+ "st1b { z12.b }, p1, [x23]\n"
+ "add z18.s, z18.s, z4.s\n"
"smax z17.s, p2/M, z17.s, z5.s\n"
- "smax z18.s, p2/M, z18.s, z5.s\n"
+ "add z19.s, z19.s, z4.s\n"
+ ".inst 0x44828814 // srshl z20.s, p2/M, z20.s, z0.s\n"
+ "smin z18.s, p2/M, z18.s, z6.s\n"
"uzp1 z16.h, z16.h, z17.h\n"
- "st1b { z12.b }, p1, [x24]\n"
+ "smin z19.s, p2/M, z19.s, z6.s\n"
+ "add z20.s, z20.s, z4.s\n"
+ "smax z18.s, p2/M, z18.s, z5.s\n"
+ ".inst 0x44828835 // srshl z21.s, p2/M, z21.s, z1.s\n"
"smax z19.s, p2/M, z19.s, z5.s\n"
- "smax z20.s, p2/M, z20.s, z5.s\n"
+ "smin z20.s, p2/M, z20.s, z6.s\n"
+ ".inst 0x44828856 // srshl z22.s, p2/M, z22.s, z2.s\n"
+ "add z21.s, z21.s, z4.s\n"
"uzp1 z17.h, z18.h, z19.h\n"
+ "smax z20.s, p2/M, z20.s, z5.s\n"
+ "add z22.s, z22.s, z4.s\n"
"uzp1 z16.b, z16.b, z17.b\n"
+ "st1b { z16.b }, p1, [x22]\n"
+ "smin z21.s, p2/M, z21.s, z6.s\n"
+ "smin z22.s, p2/M, z22.s, z6.s\n"
+ ".inst 0x44828877 // srshl z23.s, p2/M, z23.s, z3.s\n"
"smax z21.s, p2/M, z21.s, z5.s\n"
"smax z22.s, p2/M, z22.s, z5.s\n"
+ "add z23.s, z23.s, z4.s\n"
"uzp1 z20.h, z20.h, z21.h\n"
- "st1b { z16.b }, p1, [x23]\n"
+ "smin z23.s, p2/M, z23.s, z6.s\n"
"smax z23.s, p2/M, z23.s, z5.s\n"
"uzp1 z21.h, z22.h, z23.h\n"
"uzp1 z20.b, z20.b, z21.b\n"
- "st1b { z20.b }, p1, [x22]\n"
- "addvl x11, x11, #1\n"
+ "st1b { z20.b }, p1, [x21]\n"
"52:" // Height 4: Writeback done
"decw x10, ALL, MUL #4\n"
"cmp x10, XZR\n"
"bgt 41b\n"
"b 80f\n"
"53:" // Height 5
- "mov x14, %x[col_bias]\n"
"ldr x13, [%x[args_ptr], %[offsetof_multiplier_ptr]]\n"
+ "mov x11, %x[col_bias]\n"
"ldr x12, [%x[args_ptr], %[offsetof_shift_ptr]]\n"
- "mov x11, %x[output_ptr]\n"
+ "mov x9, %x[output_ptr]\n"
"ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"54:" // Height 5: Column loop
- "mov x20, #0x0\n"
- "whilelt p1.b, x20, x10\n"
"mov z8.s, #0x0\n"
+ "mov x19, #0x0\n"
"mov z9.s, #0x0\n"
+ "whilelt p1.b, x19, x10\n"
"mov z10.s, #0x0\n"
"mov z11.s, #0x0\n"
"mov z12.s, #0x0\n"
@@ -1450,137 +1450,137 @@ void sve_hybrid_s8qs_dot_6x4VL (
"mov z26.s, #0x0\n"
"mov z27.s, #0x0\n"
"55:" // Height 5: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"56:" // Height 5: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 57f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "ldr x22, [x21, #0x20]\n"
- "cbnz x28, 58f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20\n"
- "add x25, x25, x20\n"
- "add x24, x24, x20\n"
- "add x23, x23, x20\n"
- "add x22, x22, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "ldr x21, [x20, #0x20]\n"
+ "cbnz x27, 58f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
+ "add x24, x24, x19\n"
+ "add x23, x23, x19\n"
+ "add x22, x22, x19\n"
+ "add x21, x21, x19\n"
"b 58f\n"
"57:" // Height 5: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20\n"
- "add x24, x25, x20\n"
- "add x23, x24, x20\n"
- "add x22, x23, x20\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19\n"
+ "add x23, x24, x19\n"
+ "add x22, x23, x19\n"
+ "add x21, x22, x19\n"
"58:" // Height 5: input setup done
- "cmp x27, #0x10\n"
+ "cmp x26, #0x10\n"
"ble 60f\n"
"59:" // Height 5: Multiply loop: Main loop head
- "whilelt p0.b, XZR, x27\n"
- "ld1rqb { z0.b }, p0/Z, [x26]\n"
- "ld1rqb { z1.b }, p0/Z, [x25]\n"
- "sub x27, x27, #0x10\n"
- "ld1rqb { z2.b }, p0/Z, [x24]\n"
- "ld1rqb { z3.b }, p0/Z, [x23]\n"
- "cmp x27, #0x10\n"
- "add x26, x26, #0x10\n"
- "ld1rqb { z4.b }, p0/Z, [x22]\n"
- "ld1b { z6.b }, p2/Z, [x9]\n"
+ "ld1b { z6.b }, p2/Z, [x28]\n"
+ "whilelt p0.b, XZR, x26\n"
+ "ld1b { z7.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "sub x26, x26, #0x10\n"
+ "ld1rqb { z0.b }, p0/Z, [x25]\n"
"sdot z8.s, z6.b, z0.b[0]\n"
- "sdot z12.s, z6.b, z1.b[0]\n"
- "ld1b { z7.b }, p2/Z, [x9, #1, MUL VL]\n"
- "sdot z16.s, z6.b, z2.b[0]\n"
- "sdot z20.s, z6.b, z3.b[0]\n"
- "add x25, x25, #0x10\n"
- "sdot z24.s, z6.b, z4.b[0]\n"
+ "ld1rqb { z1.b }, p0/Z, [x24]\n"
+ "cmp x26, #0x10\n"
"sdot z9.s, z7.b, z0.b[0]\n"
- "ld1b { z6.b }, p2/Z, [x9, #2, MUL VL]\n"
+ "ld1rqb { z2.b }, p0/Z, [x23]\n"
+ "add x25, x25, #0x10\n"
+ "sdot z12.s, z6.b, z1.b[0]\n"
+ "ld1rqb { z3.b }, p0/Z, [x22]\n"
"add x24, x24, #0x10\n"
- "sdot z13.s, z7.b, z1.b[0]\n"
- "sdot z17.s, z7.b, z2.b[0]\n"
+ "sdot z16.s, z6.b, z2.b[0]\n"
+ "ld1rqb { z4.b }, p0/Z, [x21]\n"
"add x23, x23, #0x10\n"
+ "sdot z13.s, z7.b, z1.b[0]\n"
"add x22, x22, #0x10\n"
+ "sdot z17.s, z7.b, z2.b[0]\n"
+ "add x21, x21, #0x10\n"
+ "sdot z20.s, z6.b, z3.b[0]\n"
+ "sdot z24.s, z6.b, z4.b[0]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #2, MUL VL]\n"
"sdot z21.s, z7.b, z3.b[0]\n"
"sdot z25.s, z7.b, z4.b[0]\n"
- "ld1b { z7.b }, p2/Z, [x9, #3, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #3, MUL VL]\n"
"sdot z10.s, z6.b, z0.b[0]\n"
"sdot z14.s, z6.b, z1.b[0]\n"
"sdot z18.s, z6.b, z2.b[0]\n"
"sdot z22.s, z6.b, z3.b[0]\n"
"sdot z26.s, z6.b, z4.b[0]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #4, MUL VL]\n"
"sdot z11.s, z7.b, z0.b[0]\n"
- "ld1b { z6.b }, p2/Z, [x9, #4, MUL VL]\n"
"sdot z15.s, z7.b, z1.b[0]\n"
"sdot z19.s, z7.b, z2.b[0]\n"
"sdot z23.s, z7.b, z3.b[0]\n"
"sdot z27.s, z7.b, z4.b[0]\n"
- "ld1b { z7.b }, p2/Z, [x9, #5, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #5, MUL VL]\n"
"sdot z8.s, z6.b, z0.b[1]\n"
"sdot z12.s, z6.b, z1.b[1]\n"
"sdot z16.s, z6.b, z2.b[1]\n"
"sdot z20.s, z6.b, z3.b[1]\n"
"sdot z24.s, z6.b, z4.b[1]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #6, MUL VL]\n"
"sdot z9.s, z7.b, z0.b[1]\n"
- "ld1b { z6.b }, p2/Z, [x9, #6, MUL VL]\n"
"sdot z13.s, z7.b, z1.b[1]\n"
"sdot z17.s, z7.b, z2.b[1]\n"
"sdot z21.s, z7.b, z3.b[1]\n"
"sdot z25.s, z7.b, z4.b[1]\n"
- "ld1b { z7.b }, p2/Z, [x9, #7, MUL VL]\n"
- "addvl x9, x9, #16\n"
+ "ld1b { z7.b }, p2/Z, [x28, #7, MUL VL]\n"
+ "addvl x28, x28, #16\n"
"sdot z10.s, z6.b, z0.b[1]\n"
"sdot z14.s, z6.b, z1.b[1]\n"
"sdot z18.s, z6.b, z2.b[1]\n"
"sdot z22.s, z6.b, z3.b[1]\n"
"sdot z26.s, z6.b, z4.b[1]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #-8, MUL VL]\n"
"sdot z11.s, z7.b, z0.b[1]\n"
- "ld1b { z6.b }, p2/Z, [x9, #-8, MUL VL]\n"
"sdot z15.s, z7.b, z1.b[1]\n"
"sdot z19.s, z7.b, z2.b[1]\n"
"sdot z23.s, z7.b, z3.b[1]\n"
"sdot z27.s, z7.b, z4.b[1]\n"
- "ld1b { z7.b }, p2/Z, [x9, #-7, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #-7, MUL VL]\n"
"sdot z8.s, z6.b, z0.b[2]\n"
"sdot z12.s, z6.b, z1.b[2]\n"
"sdot z16.s, z6.b, z2.b[2]\n"
"sdot z20.s, z6.b, z3.b[2]\n"
"sdot z24.s, z6.b, z4.b[2]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #-6, MUL VL]\n"
"sdot z9.s, z7.b, z0.b[2]\n"
- "ld1b { z6.b }, p2/Z, [x9, #-6, MUL VL]\n"
"sdot z13.s, z7.b, z1.b[2]\n"
"sdot z17.s, z7.b, z2.b[2]\n"
"sdot z21.s, z7.b, z3.b[2]\n"
"sdot z25.s, z7.b, z4.b[2]\n"
- "ld1b { z7.b }, p2/Z, [x9, #-5, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #-5, MUL VL]\n"
"sdot z10.s, z6.b, z0.b[2]\n"
"sdot z14.s, z6.b, z1.b[2]\n"
"sdot z18.s, z6.b, z2.b[2]\n"
"sdot z22.s, z6.b, z3.b[2]\n"
"sdot z26.s, z6.b, z4.b[2]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #-4, MUL VL]\n"
"sdot z11.s, z7.b, z0.b[2]\n"
- "ld1b { z6.b }, p2/Z, [x9, #-4, MUL VL]\n"
"sdot z15.s, z7.b, z1.b[2]\n"
"sdot z19.s, z7.b, z2.b[2]\n"
"sdot z23.s, z7.b, z3.b[2]\n"
"sdot z27.s, z7.b, z4.b[2]\n"
- "ld1b { z7.b }, p2/Z, [x9, #-3, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #-3, MUL VL]\n"
"sdot z8.s, z6.b, z0.b[3]\n"
"sdot z12.s, z6.b, z1.b[3]\n"
"sdot z16.s, z6.b, z2.b[3]\n"
"sdot z20.s, z6.b, z3.b[3]\n"
"sdot z24.s, z6.b, z4.b[3]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #-2, MUL VL]\n"
"sdot z9.s, z7.b, z0.b[3]\n"
- "ld1b { z6.b }, p2/Z, [x9, #-2, MUL VL]\n"
"sdot z13.s, z7.b, z1.b[3]\n"
"sdot z17.s, z7.b, z2.b[3]\n"
"sdot z21.s, z7.b, z3.b[3]\n"
"sdot z25.s, z7.b, z4.b[3]\n"
- "ld1b { z7.b }, p2/Z, [x9, #-1, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #-1, MUL VL]\n"
"sdot z10.s, z6.b, z0.b[3]\n"
"sdot z14.s, z6.b, z1.b[3]\n"
"sdot z18.s, z6.b, z2.b[3]\n"
@@ -1593,28 +1593,28 @@ void sve_hybrid_s8qs_dot_6x4VL (
"sdot z27.s, z7.b, z4.b[3]\n"
"bgt 59b\n"
"60:" // Height 5: Multiply loop: Single iteration only
- "whilelt p0.b, XZR, x27\n"
- "ld1rqb { z0.b }, p0/Z, [x26]\n"
- "ld1rqb { z1.b }, p0/Z, [x25]\n"
- "subs x27, x27, #0x4\n"
- "ld1rqb { z2.b }, p0/Z, [x24]\n"
- "ld1rqb { z3.b }, p0/Z, [x23]\n"
- "ld1rqb { z4.b }, p0/Z, [x22]\n"
- "ld1b { z6.b }, p2/Z, [x9]\n"
+ "ld1b { z6.b }, p2/Z, [x28]\n"
+ "whilelt p0.b, XZR, x26\n"
+ "ld1b { z7.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "subs x26, x26, #0x4\n"
+ "ld1rqb { z0.b }, p0/Z, [x25]\n"
"sdot z8.s, z6.b, z0.b[0]\n"
+ "ld1rqb { z1.b }, p0/Z, [x24]\n"
+ "sdot z9.s, z7.b, z0.b[0]\n"
+ "ld1rqb { z2.b }, p0/Z, [x23]\n"
+ "ld1rqb { z3.b }, p0/Z, [x22]\n"
"sdot z12.s, z6.b, z1.b[0]\n"
- "ld1b { z7.b }, p2/Z, [x9, #1, MUL VL]\n"
+ "ld1rqb { z4.b }, p0/Z, [x21]\n"
+ "sdot z13.s, z7.b, z1.b[0]\n"
"sdot z16.s, z6.b, z2.b[0]\n"
"sdot z20.s, z6.b, z3.b[0]\n"
"sdot z24.s, z6.b, z4.b[0]\n"
- "sdot z9.s, z7.b, z0.b[0]\n"
- "ld1b { z6.b }, p2/Z, [x9, #2, MUL VL]\n"
- "sdot z13.s, z7.b, z1.b[0]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #2, MUL VL]\n"
"sdot z17.s, z7.b, z2.b[0]\n"
"sdot z21.s, z7.b, z3.b[0]\n"
"sdot z25.s, z7.b, z4.b[0]\n"
- "ld1b { z7.b }, p2/Z, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
+ "ld1b { z7.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
"sdot z10.s, z6.b, z0.b[0]\n"
"sdot z14.s, z6.b, z1.b[0]\n"
"sdot z18.s, z6.b, z2.b[0]\n"
@@ -1626,22 +1626,22 @@ void sve_hybrid_s8qs_dot_6x4VL (
"sdot z23.s, z7.b, z3.b[0]\n"
"sdot z27.s, z7.b, z4.b[0]\n"
"ble 61f\n"
- "ld1b { z6.b }, p2/Z, [x9]\n"
- "ld1b { z7.b }, p2/Z, [x9, #1, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28]\n"
"sdot z8.s, z6.b, z0.b[1]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "subs x26, x26, #0x4\n"
"sdot z12.s, z6.b, z1.b[1]\n"
"sdot z16.s, z6.b, z2.b[1]\n"
"sdot z20.s, z6.b, z3.b[1]\n"
- "subs x27, x27, #0x4\n"
"sdot z24.s, z6.b, z4.b[1]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #2, MUL VL]\n"
"sdot z9.s, z7.b, z0.b[1]\n"
- "ld1b { z6.b }, p2/Z, [x9, #2, MUL VL]\n"
"sdot z13.s, z7.b, z1.b[1]\n"
"sdot z17.s, z7.b, z2.b[1]\n"
"sdot z21.s, z7.b, z3.b[1]\n"
"sdot z25.s, z7.b, z4.b[1]\n"
- "ld1b { z7.b }, p2/Z, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
+ "ld1b { z7.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
"sdot z10.s, z6.b, z0.b[1]\n"
"sdot z14.s, z6.b, z1.b[1]\n"
"sdot z18.s, z6.b, z2.b[1]\n"
@@ -1653,22 +1653,22 @@ void sve_hybrid_s8qs_dot_6x4VL (
"sdot z23.s, z7.b, z3.b[1]\n"
"sdot z27.s, z7.b, z4.b[1]\n"
"ble 61f\n"
- "ld1b { z6.b }, p2/Z, [x9]\n"
- "ld1b { z7.b }, p2/Z, [x9, #1, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28]\n"
"sdot z8.s, z6.b, z0.b[2]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "subs x26, x26, #0x4\n"
"sdot z12.s, z6.b, z1.b[2]\n"
"sdot z16.s, z6.b, z2.b[2]\n"
"sdot z20.s, z6.b, z3.b[2]\n"
- "subs x27, x27, #0x4\n"
"sdot z24.s, z6.b, z4.b[2]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #2, MUL VL]\n"
"sdot z9.s, z7.b, z0.b[2]\n"
- "ld1b { z6.b }, p2/Z, [x9, #2, MUL VL]\n"
"sdot z13.s, z7.b, z1.b[2]\n"
"sdot z17.s, z7.b, z2.b[2]\n"
"sdot z21.s, z7.b, z3.b[2]\n"
"sdot z25.s, z7.b, z4.b[2]\n"
- "ld1b { z7.b }, p2/Z, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
+ "ld1b { z7.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
"sdot z10.s, z6.b, z0.b[2]\n"
"sdot z14.s, z6.b, z1.b[2]\n"
"sdot z18.s, z6.b, z2.b[2]\n"
@@ -1680,21 +1680,21 @@ void sve_hybrid_s8qs_dot_6x4VL (
"sdot z23.s, z7.b, z3.b[2]\n"
"sdot z27.s, z7.b, z4.b[2]\n"
"ble 61f\n"
- "ld1b { z6.b }, p2/Z, [x9]\n"
- "ld1b { z7.b }, p2/Z, [x9, #1, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28]\n"
"sdot z8.s, z6.b, z0.b[3]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #1, MUL VL]\n"
"sdot z12.s, z6.b, z1.b[3]\n"
"sdot z16.s, z6.b, z2.b[3]\n"
"sdot z20.s, z6.b, z3.b[3]\n"
"sdot z24.s, z6.b, z4.b[3]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #2, MUL VL]\n"
"sdot z9.s, z7.b, z0.b[3]\n"
- "ld1b { z6.b }, p2/Z, [x9, #2, MUL VL]\n"
"sdot z13.s, z7.b, z1.b[3]\n"
"sdot z17.s, z7.b, z2.b[3]\n"
"sdot z21.s, z7.b, z3.b[3]\n"
"sdot z25.s, z7.b, z4.b[3]\n"
- "ld1b { z7.b }, p2/Z, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
+ "ld1b { z7.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
"sdot z10.s, z6.b, z0.b[3]\n"
"sdot z14.s, z6.b, z1.b[3]\n"
"sdot z18.s, z6.b, z2.b[3]\n"
@@ -1706,29 +1706,29 @@ void sve_hybrid_s8qs_dot_6x4VL (
"sdot z23.s, z7.b, z3.b[3]\n"
"sdot z27.s, z7.b, z4.b[3]\n"
"61:" // Height 5: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 56b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x11, x20\n"
- "ld1w { z0.s }, p2/Z, [x14]\n"
- "add x23, x24, x20\n"
- "ld1w { z1.s }, p2/Z, [x14, #1, MUL VL]\n"
- "ld1w { z2.s }, p2/Z, [x14, #2, MUL VL]\n"
- "add x22, x23, x20\n"
- "add x21, x22, x20\n"
- "ld1w { z3.s }, p2/Z, [x14, #3, MUL VL]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ld1w { z0.s }, p2/Z, [x11]\n"
"add z8.s, z8.s, z0.s\n"
+ "ld1w { z1.s }, p2/Z, [x11, #1, MUL VL]\n"
+ "add x23, x9, x19\n"
+ "add z12.s, z12.s, z0.s\n"
+ "ld1w { z2.s }, p2/Z, [x11, #2, MUL VL]\n"
+ "add x22, x23, x19\n"
+ "add z16.s, z16.s, z0.s\n"
+ "ld1w { z3.s }, p2/Z, [x11, #3, MUL VL]\n"
+ "add x21, x22, x19\n"
"add z9.s, z9.s, z1.s\n"
- "addvl x14, x14, #4\n"
+ "add x20, x21, x19\n"
+ "add z13.s, z13.s, z1.s\n"
+ "addvl x11, x11, #4\n"
"add z10.s, z10.s, z2.s\n"
"add z11.s, z11.s, z3.s\n"
- "add z12.s, z12.s, z0.s\n"
- "add z13.s, z13.s, z1.s\n"
"add z14.s, z14.s, z2.s\n"
"add z15.s, z15.s, z3.s\n"
- "add z16.s, z16.s, z0.s\n"
"add z17.s, z17.s, z1.s\n"
"add z18.s, z18.s, z2.s\n"
"add z19.s, z19.s, z3.s\n"
@@ -1748,20 +1748,20 @@ void sve_hybrid_s8qs_dot_6x4VL (
"ld1w { z2.s }, p2/Z, [x12, #2, MUL VL]\n"
"ld1w { z6.s }, p2/Z, [x13, #2, MUL VL]\n"
"ld1w { z3.s }, p2/Z, [x12, #3, MUL VL]\n"
- "ld1w { z7.s }, p2/Z, [x13, #3, MUL VL]\n"
"addvl x12, x12, #4\n"
+ "ld1w { z7.s }, p2/Z, [x13, #3, MUL VL]\n"
"addvl x13, x13, #4\n"
"b 63f\n"
"62:" // Height 5: per layer parameters
- "add x26, %x[qp], %[per_layer_right_shift]\n"
- "add x25, %x[qp], %[per_layer_mul]\n"
- "ld1rw { z0.s }, p2/Z, [x26]\n"
- "ld1rw { z4.s }, p2/Z, [x25]\n"
+ "add x24, %x[qp], %[per_layer_right_shift]\n"
+ "ld1rw { z0.s }, p2/Z, [x24]\n"
"mov z1.d, z0.d\n"
- "mov z5.d, z4.d\n"
+ "add x24, %x[qp], %[per_layer_mul]\n"
+ "ld1rw { z4.s }, p2/Z, [x24]\n"
"mov z2.d, z0.d\n"
- "mov z6.d, z4.d\n"
"mov z3.d, z0.d\n"
+ "mov z5.d, z4.d\n"
+ "mov z6.d, z4.d\n"
"mov z7.d, z4.d\n"
"63:" // Height 5: parameters loaded
".inst 0x04a47508 // sqrdmulh z8.s, z8.s, z4.s\n"
@@ -1788,191 +1788,191 @@ void sve_hybrid_s8qs_dot_6x4VL (
"and z4.d, z8.d, z0.d\n"
"and z5.d, z9.d, z1.d\n"
"and z6.d, z10.d, z2.d\n"
- "and z7.d, z11.d, z3.d\n"
"asr z4.s, z4.s, #0x1f\n"
"asr z5.s, z5.s, #0x1f\n"
"asr z6.s, z6.s, #0x1f\n"
- "asr z7.s, z7.s, #0x1f\n"
"sqadd z8.s, z8.s, z4.s\n"
"sqadd z9.s, z9.s, z5.s\n"
"sqadd z10.s, z10.s, z6.s\n"
- "sqadd z11.s, z11.s, z7.s\n"
+ "and z7.d, z11.d, z3.d\n"
"and z4.d, z12.d, z0.d\n"
"and z5.d, z13.d, z1.d\n"
- "and z6.d, z14.d, z2.d\n"
- "and z7.d, z15.d, z3.d\n"
+ "asr z7.s, z7.s, #0x1f\n"
"asr z4.s, z4.s, #0x1f\n"
"asr z5.s, z5.s, #0x1f\n"
- "asr z6.s, z6.s, #0x1f\n"
- "asr z7.s, z7.s, #0x1f\n"
+ "sqadd z11.s, z11.s, z7.s\n"
"sqadd z12.s, z12.s, z4.s\n"
"sqadd z13.s, z13.s, z5.s\n"
+ "and z6.d, z14.d, z2.d\n"
+ "and z7.d, z15.d, z3.d\n"
+ "and z4.d, z16.d, z0.d\n"
+ "asr z6.s, z6.s, #0x1f\n"
+ "asr z7.s, z7.s, #0x1f\n"
+ "asr z4.s, z4.s, #0x1f\n"
"sqadd z14.s, z14.s, z6.s\n"
"sqadd z15.s, z15.s, z7.s\n"
- "and z4.d, z16.d, z0.d\n"
+ "sqadd z16.s, z16.s, z4.s\n"
"and z5.d, z17.d, z1.d\n"
"and z6.d, z18.d, z2.d\n"
"and z7.d, z19.d, z3.d\n"
- "asr z4.s, z4.s, #0x1f\n"
"asr z5.s, z5.s, #0x1f\n"
"asr z6.s, z6.s, #0x1f\n"
"asr z7.s, z7.s, #0x1f\n"
- "sqadd z16.s, z16.s, z4.s\n"
"sqadd z17.s, z17.s, z5.s\n"
"sqadd z18.s, z18.s, z6.s\n"
"sqadd z19.s, z19.s, z7.s\n"
"and z4.d, z20.d, z0.d\n"
"and z5.d, z21.d, z1.d\n"
"and z6.d, z22.d, z2.d\n"
- "and z7.d, z23.d, z3.d\n"
"asr z4.s, z4.s, #0x1f\n"
"asr z5.s, z5.s, #0x1f\n"
"asr z6.s, z6.s, #0x1f\n"
- "asr z7.s, z7.s, #0x1f\n"
"sqadd z20.s, z20.s, z4.s\n"
"sqadd z21.s, z21.s, z5.s\n"
"sqadd z22.s, z22.s, z6.s\n"
- "sqadd z23.s, z23.s, z7.s\n"
+ "and z7.d, z23.d, z3.d\n"
"and z4.d, z24.d, z0.d\n"
"and z5.d, z25.d, z1.d\n"
- "and z6.d, z26.d, z2.d\n"
- "and z7.d, z27.d, z3.d\n"
+ "asr z7.s, z7.s, #0x1f\n"
"asr z4.s, z4.s, #0x1f\n"
"asr z5.s, z5.s, #0x1f\n"
- "asr z6.s, z6.s, #0x1f\n"
- "asr z7.s, z7.s, #0x1f\n"
+ "sqadd z23.s, z23.s, z7.s\n"
"sqadd z24.s, z24.s, z4.s\n"
"sqadd z25.s, z25.s, z5.s\n"
+ "and z6.d, z26.d, z2.d\n"
+ "and z7.d, z27.d, z3.d\n"
+ "asr z6.s, z6.s, #0x1f\n"
+ "asr z7.s, z7.s, #0x1f\n"
"sqadd z26.s, z26.s, z6.s\n"
"sqadd z27.s, z27.s, z7.s\n"
"64:" // Height 5: no shift correction
- "add x25, %x[qp], %[c_offset]\n"
- "ld1rw { z4.s }, p2/Z, [x25]\n"
".inst 0x44828808 // srshl z8.s, p2/M, z8.s, z0.s\n"
- "add z8.s, z8.s, z4.s\n"
+ "add x24, %x[qp], %[c_offset]\n"
+ "ld1rw { z4.s }, p2/Z, [x24]\n"
".inst 0x44828829 // srshl z9.s, p2/M, z9.s, z1.s\n"
+ "add x24, %x[qp], %[minval]\n"
".inst 0x4482884a // srshl z10.s, p2/M, z10.s, z2.s\n"
- "add z9.s, z9.s, z4.s\n"
- "add z10.s, z10.s, z4.s\n"
+ "ld1rw { z5.s }, p2/Z, [x24]\n"
+ "add x24, %x[qp], %[maxval]\n"
".inst 0x4482886b // srshl z11.s, p2/M, z11.s, z3.s\n"
+ "ld1rw { z6.s }, p2/Z, [x24]\n"
".inst 0x4482880c // srshl z12.s, p2/M, z12.s, z0.s\n"
+ "add z8.s, z8.s, z4.s\n"
+ "add z9.s, z9.s, z4.s\n"
+ "add z10.s, z10.s, z4.s\n"
"add z11.s, z11.s, z4.s\n"
"add z12.s, z12.s, z4.s\n"
- ".inst 0x4482882d // srshl z13.s, p2/M, z13.s, z1.s\n"
- ".inst 0x4482884e // srshl z14.s, p2/M, z14.s, z2.s\n"
- "add z13.s, z13.s, z4.s\n"
- "add z14.s, z14.s, z4.s\n"
- ".inst 0x4482886f // srshl z15.s, p2/M, z15.s, z3.s\n"
- ".inst 0x44828810 // srshl z16.s, p2/M, z16.s, z0.s\n"
- "add z15.s, z15.s, z4.s\n"
- "add z16.s, z16.s, z4.s\n"
- ".inst 0x44828831 // srshl z17.s, p2/M, z17.s, z1.s\n"
- ".inst 0x44828852 // srshl z18.s, p2/M, z18.s, z2.s\n"
- "add z17.s, z17.s, z4.s\n"
- "add z18.s, z18.s, z4.s\n"
- ".inst 0x44828873 // srshl z19.s, p2/M, z19.s, z3.s\n"
- ".inst 0x44828814 // srshl z20.s, p2/M, z20.s, z0.s\n"
- "add z19.s, z19.s, z4.s\n"
- "add z20.s, z20.s, z4.s\n"
- ".inst 0x44828835 // srshl z21.s, p2/M, z21.s, z1.s\n"
- ".inst 0x44828856 // srshl z22.s, p2/M, z22.s, z2.s\n"
- "add z21.s, z21.s, z4.s\n"
- "add z22.s, z22.s, z4.s\n"
- ".inst 0x44828877 // srshl z23.s, p2/M, z23.s, z3.s\n"
- ".inst 0x44828818 // srshl z24.s, p2/M, z24.s, z0.s\n"
- "add z23.s, z23.s, z4.s\n"
- "add z24.s, z24.s, z4.s\n"
- ".inst 0x44828839 // srshl z25.s, p2/M, z25.s, z1.s\n"
- ".inst 0x4482885a // srshl z26.s, p2/M, z26.s, z2.s\n"
- "add z25.s, z25.s, z4.s\n"
- "add z26.s, z26.s, z4.s\n"
- ".inst 0x4482887b // srshl z27.s, p2/M, z27.s, z3.s\n"
- "add x25, %x[qp], %[maxval]\n"
- "ld1rw { z6.s }, p2/Z, [x25]\n"
- "add z27.s, z27.s, z4.s\n"
- "add x25, %x[qp], %[minval]\n"
- "ld1rw { z5.s }, p2/Z, [x25]\n"
"smin z8.s, p2/M, z8.s, z6.s\n"
"smin z9.s, p2/M, z9.s, z6.s\n"
"smin z10.s, p2/M, z10.s, z6.s\n"
"smin z11.s, p2/M, z11.s, z6.s\n"
- "smin z12.s, p2/M, z12.s, z6.s\n"
- "smin z13.s, p2/M, z13.s, z6.s\n"
- "smin z14.s, p2/M, z14.s, z6.s\n"
- "smin z15.s, p2/M, z15.s, z6.s\n"
- "smin z16.s, p2/M, z16.s, z6.s\n"
- "smin z17.s, p2/M, z17.s, z6.s\n"
- "smin z18.s, p2/M, z18.s, z6.s\n"
- "smin z19.s, p2/M, z19.s, z6.s\n"
- "smin z20.s, p2/M, z20.s, z6.s\n"
- "smin z21.s, p2/M, z21.s, z6.s\n"
- "smin z22.s, p2/M, z22.s, z6.s\n"
- "smin z23.s, p2/M, z23.s, z6.s\n"
- "smin z24.s, p2/M, z24.s, z6.s\n"
- "smin z25.s, p2/M, z25.s, z6.s\n"
- "smin z26.s, p2/M, z26.s, z6.s\n"
- "smin z27.s, p2/M, z27.s, z6.s\n"
"smax z8.s, p2/M, z8.s, z5.s\n"
"smax z9.s, p2/M, z9.s, z5.s\n"
"smax z10.s, p2/M, z10.s, z5.s\n"
- "uzp1 z8.h, z8.h, z9.h\n"
"smax z11.s, p2/M, z11.s, z5.s\n"
- "smax z12.s, p2/M, z12.s, z5.s\n"
+ "smin z12.s, p2/M, z12.s, z6.s\n"
+ "uzp1 z8.h, z8.h, z9.h\n"
+ ".inst 0x4482882d // srshl z13.s, p2/M, z13.s, z1.s\n"
"uzp1 z9.h, z10.h, z11.h\n"
+ "smax z12.s, p2/M, z12.s, z5.s\n"
"uzp1 z8.b, z8.b, z9.b\n"
+ "st1b { z8.b }, p1, [x9]\n"
+ "add z13.s, z13.s, z4.s\n"
+ "addvl x9, x9, #1\n"
+ ".inst 0x4482884e // srshl z14.s, p2/M, z14.s, z2.s\n"
+ ".inst 0x4482886f // srshl z15.s, p2/M, z15.s, z3.s\n"
+ ".inst 0x44828810 // srshl z16.s, p2/M, z16.s, z0.s\n"
+ "smin z13.s, p2/M, z13.s, z6.s\n"
+ ".inst 0x44828831 // srshl z17.s, p2/M, z17.s, z1.s\n"
+ "add z14.s, z14.s, z4.s\n"
+ "add z15.s, z15.s, z4.s\n"
+ "add z16.s, z16.s, z4.s\n"
+ "add z17.s, z17.s, z4.s\n"
"smax z13.s, p2/M, z13.s, z5.s\n"
- "smax z14.s, p2/M, z14.s, z5.s\n"
+ "smin z14.s, p2/M, z14.s, z6.s\n"
+ "smin z15.s, p2/M, z15.s, z6.s\n"
+ "smin z16.s, p2/M, z16.s, z6.s\n"
"uzp1 z12.h, z12.h, z13.h\n"
- "st1b { z8.b }, p1, [x11]\n"
+ "smax z14.s, p2/M, z14.s, z5.s\n"
"smax z15.s, p2/M, z15.s, z5.s\n"
"smax z16.s, p2/M, z16.s, z5.s\n"
+ "smin z17.s, p2/M, z17.s, z6.s\n"
+ ".inst 0x44828852 // srshl z18.s, p2/M, z18.s, z2.s\n"
"uzp1 z13.h, z14.h, z15.h\n"
+ ".inst 0x44828873 // srshl z19.s, p2/M, z19.s, z3.s\n"
"uzp1 z12.b, z12.b, z13.b\n"
+ "st1b { z12.b }, p1, [x23]\n"
+ "add z18.s, z18.s, z4.s\n"
"smax z17.s, p2/M, z17.s, z5.s\n"
- "smax z18.s, p2/M, z18.s, z5.s\n"
+ "add z19.s, z19.s, z4.s\n"
+ ".inst 0x44828814 // srshl z20.s, p2/M, z20.s, z0.s\n"
+ "smin z18.s, p2/M, z18.s, z6.s\n"
"uzp1 z16.h, z16.h, z17.h\n"
- "st1b { z12.b }, p1, [x24]\n"
+ "smin z19.s, p2/M, z19.s, z6.s\n"
+ "add z20.s, z20.s, z4.s\n"
+ "smax z18.s, p2/M, z18.s, z5.s\n"
+ ".inst 0x44828835 // srshl z21.s, p2/M, z21.s, z1.s\n"
"smax z19.s, p2/M, z19.s, z5.s\n"
- "smax z20.s, p2/M, z20.s, z5.s\n"
+ "smin z20.s, p2/M, z20.s, z6.s\n"
+ ".inst 0x44828856 // srshl z22.s, p2/M, z22.s, z2.s\n"
+ "add z21.s, z21.s, z4.s\n"
"uzp1 z17.h, z18.h, z19.h\n"
+ "smax z20.s, p2/M, z20.s, z5.s\n"
+ "add z22.s, z22.s, z4.s\n"
"uzp1 z16.b, z16.b, z17.b\n"
+ "st1b { z16.b }, p1, [x22]\n"
+ "smin z21.s, p2/M, z21.s, z6.s\n"
+ "smin z22.s, p2/M, z22.s, z6.s\n"
+ ".inst 0x44828877 // srshl z23.s, p2/M, z23.s, z3.s\n"
+ ".inst 0x44828818 // srshl z24.s, p2/M, z24.s, z0.s\n"
+ ".inst 0x44828839 // srshl z25.s, p2/M, z25.s, z1.s\n"
"smax z21.s, p2/M, z21.s, z5.s\n"
- "smax z22.s, p2/M, z22.s, z5.s\n"
+ "add z23.s, z23.s, z4.s\n"
+ "add z24.s, z24.s, z4.s\n"
+ "add z25.s, z25.s, z4.s\n"
"uzp1 z20.h, z20.h, z21.h\n"
- "st1b { z16.b }, p1, [x23]\n"
+ "smax z22.s, p2/M, z22.s, z5.s\n"
+ "smin z23.s, p2/M, z23.s, z6.s\n"
+ "smin z24.s, p2/M, z24.s, z6.s\n"
+ "smin z25.s, p2/M, z25.s, z6.s\n"
+ ".inst 0x4482885a // srshl z26.s, p2/M, z26.s, z2.s\n"
"smax z23.s, p2/M, z23.s, z5.s\n"
"smax z24.s, p2/M, z24.s, z5.s\n"
+ "smax z25.s, p2/M, z25.s, z5.s\n"
+ "add z26.s, z26.s, z4.s\n"
"uzp1 z21.h, z22.h, z23.h\n"
+ ".inst 0x4482887b // srshl z27.s, p2/M, z27.s, z3.s\n"
+ "uzp1 z24.h, z24.h, z25.h\n"
"uzp1 z20.b, z20.b, z21.b\n"
- "smax z25.s, p2/M, z25.s, z5.s\n"
+ "st1b { z20.b }, p1, [x21]\n"
+ "add z27.s, z27.s, z4.s\n"
+ "smin z26.s, p2/M, z26.s, z6.s\n"
+ "smin z27.s, p2/M, z27.s, z6.s\n"
"smax z26.s, p2/M, z26.s, z5.s\n"
- "uzp1 z24.h, z24.h, z25.h\n"
- "st1b { z20.b }, p1, [x22]\n"
"smax z27.s, p2/M, z27.s, z5.s\n"
"uzp1 z25.h, z26.h, z27.h\n"
"uzp1 z24.b, z24.b, z25.b\n"
- "st1b { z24.b }, p1, [x21]\n"
- "addvl x11, x11, #1\n"
+ "st1b { z24.b }, p1, [x20]\n"
"65:" // Height 5: Writeback done
"decw x10, ALL, MUL #4\n"
"cmp x10, XZR\n"
"bgt 54b\n"
"b 80f\n"
"66:" // Height 6
- "ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "mov x20, #0x6\n"
- "mov x14, %x[col_bias]\n"
"ldr x13, [%x[args_ptr], %[offsetof_multiplier_ptr]]\n"
+ "mov x11, %x[col_bias]\n"
"ldr x12, [%x[args_ptr], %[offsetof_shift_ptr]]\n"
+ "mov x9, %x[output_ptr]\n"
"ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
- "mov x11, %x[output_ptr]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
- "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x20, #0x6\n"
+ "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "madd %x[output_ptr], x19, x20, %x[output_ptr]\n"
"67:" // Height 6: Column loop
- "mov x20, #0x0\n"
- "whilelt p1.b, x20, x10\n"
"mov z8.s, #0x0\n"
+ "mov x19, #0x0\n"
"mov z9.s, #0x0\n"
+ "whilelt p1.b, x19, x10\n"
"mov z10.s, #0x0\n"
"mov z11.s, #0x0\n"
"mov z12.s, #0x0\n"
@@ -1996,156 +1996,156 @@ void sve_hybrid_s8qs_dot_6x4VL (
"mov z30.s, #0x0\n"
"mov z31.s, #0x0\n"
"68:" // Height 6: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"69:" // Height 6: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 70f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "ldr x22, [x21, #0x20]\n"
- "ldr x21, [x21, #0x28]\n"
- "cbnz x28, 71f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20\n"
- "add x25, x25, x20\n"
- "add x24, x24, x20\n"
- "add x23, x23, x20\n"
- "add x22, x22, x20\n"
- "add x21, x21, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "ldr x21, [x20, #0x20]\n"
+ "ldr x20, [x20, #0x28]\n"
+ "cbnz x27, 71f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
+ "add x24, x24, x19\n"
+ "add x23, x23, x19\n"
+ "add x22, x22, x19\n"
+ "add x21, x21, x19\n"
+ "add x20, x20, x19\n"
"b 71f\n"
"70:" // Height 6: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20\n"
- "add x24, x25, x20\n"
- "add x23, x24, x20\n"
- "add x22, x23, x20\n"
- "add x21, x22, x20\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19\n"
+ "add x23, x24, x19\n"
+ "add x22, x23, x19\n"
+ "add x21, x22, x19\n"
+ "add x20, x21, x19\n"
"71:" // Height 6: input setup done
- "cmp x27, #0x10\n"
+ "cmp x26, #0x10\n"
"ble 73f\n"
"72:" // Height 6: Multiply loop: Main loop head
- "whilelt p0.b, XZR, x27\n"
- "ld1rqb { z0.b }, p0/Z, [x26]\n"
- "ld1rqb { z1.b }, p0/Z, [x25]\n"
- "sub x27, x27, #0x10\n"
- "ld1rqb { z2.b }, p0/Z, [x24]\n"
- "ld1rqb { z3.b }, p0/Z, [x23]\n"
- "cmp x27, #0x10\n"
- "add x26, x26, #0x10\n"
- "ld1rqb { z4.b }, p0/Z, [x22]\n"
- "ld1rqb { z5.b }, p0/Z, [x21]\n"
- "add x25, x25, #0x10\n"
- "add x24, x24, #0x10\n"
- "ld1b { z6.b }, p2/Z, [x9]\n"
- "ld1b { z7.b }, p2/Z, [x9, #1, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28]\n"
+ "whilelt p0.b, XZR, x26\n"
+ "ld1b { z7.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "sub x26, x26, #0x10\n"
+ "ld1rqb { z0.b }, p0/Z, [x25]\n"
"sdot z8.s, z6.b, z0.b[0]\n"
+ "ld1rqb { z1.b }, p0/Z, [x24]\n"
+ "cmp x26, #0x10\n"
+ "sdot z9.s, z7.b, z0.b[0]\n"
+ "ld1rqb { z2.b }, p0/Z, [x23]\n"
+ "add x25, x25, #0x10\n"
"sdot z12.s, z6.b, z1.b[0]\n"
+ "ld1rqb { z3.b }, p0/Z, [x22]\n"
+ "add x24, x24, #0x10\n"
"sdot z16.s, z6.b, z2.b[0]\n"
- "sdot z20.s, z6.b, z3.b[0]\n"
+ "ld1rqb { z4.b }, p0/Z, [x21]\n"
"add x23, x23, #0x10\n"
+ "sdot z13.s, z7.b, z1.b[0]\n"
+ "ld1rqb { z5.b }, p0/Z, [x20]\n"
"add x22, x22, #0x10\n"
- "sdot z24.s, z6.b, z4.b[0]\n"
- "sdot z28.s, z6.b, z5.b[0]\n"
- "ld1b { z6.b }, p2/Z, [x9, #2, MUL VL]\n"
+ "sdot z20.s, z6.b, z3.b[0]\n"
"add x21, x21, #0x10\n"
- "sdot z9.s, z7.b, z0.b[0]\n"
- "sdot z13.s, z7.b, z1.b[0]\n"
"sdot z17.s, z7.b, z2.b[0]\n"
+ "add x20, x20, #0x10\n"
+ "sdot z24.s, z6.b, z4.b[0]\n"
+ "sdot z28.s, z6.b, z5.b[0]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #2, MUL VL]\n"
"sdot z21.s, z7.b, z3.b[0]\n"
"sdot z25.s, z7.b, z4.b[0]\n"
"sdot z29.s, z7.b, z5.b[0]\n"
- "ld1b { z7.b }, p2/Z, [x9, #3, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #3, MUL VL]\n"
"sdot z10.s, z6.b, z0.b[0]\n"
"sdot z14.s, z6.b, z1.b[0]\n"
"sdot z18.s, z6.b, z2.b[0]\n"
"sdot z22.s, z6.b, z3.b[0]\n"
"sdot z26.s, z6.b, z4.b[0]\n"
"sdot z30.s, z6.b, z5.b[0]\n"
- "ld1b { z6.b }, p2/Z, [x9, #4, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #4, MUL VL]\n"
"sdot z11.s, z7.b, z0.b[0]\n"
"sdot z15.s, z7.b, z1.b[0]\n"
"sdot z19.s, z7.b, z2.b[0]\n"
"sdot z23.s, z7.b, z3.b[0]\n"
"sdot z27.s, z7.b, z4.b[0]\n"
"sdot z31.s, z7.b, z5.b[0]\n"
- "ld1b { z7.b }, p2/Z, [x9, #5, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #5, MUL VL]\n"
"sdot z8.s, z6.b, z0.b[1]\n"
"sdot z12.s, z6.b, z1.b[1]\n"
"sdot z16.s, z6.b, z2.b[1]\n"
"sdot z20.s, z6.b, z3.b[1]\n"
"sdot z24.s, z6.b, z4.b[1]\n"
"sdot z28.s, z6.b, z5.b[1]\n"
- "ld1b { z6.b }, p2/Z, [x9, #6, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #6, MUL VL]\n"
"sdot z9.s, z7.b, z0.b[1]\n"
"sdot z13.s, z7.b, z1.b[1]\n"
"sdot z17.s, z7.b, z2.b[1]\n"
"sdot z21.s, z7.b, z3.b[1]\n"
"sdot z25.s, z7.b, z4.b[1]\n"
"sdot z29.s, z7.b, z5.b[1]\n"
- "ld1b { z7.b }, p2/Z, [x9, #7, MUL VL]\n"
- "addvl x9, x9, #16\n"
+ "ld1b { z7.b }, p2/Z, [x28, #7, MUL VL]\n"
+ "addvl x28, x28, #16\n"
"sdot z10.s, z6.b, z0.b[1]\n"
"sdot z14.s, z6.b, z1.b[1]\n"
"sdot z18.s, z6.b, z2.b[1]\n"
"sdot z22.s, z6.b, z3.b[1]\n"
"sdot z26.s, z6.b, z4.b[1]\n"
"sdot z30.s, z6.b, z5.b[1]\n"
- "ld1b { z6.b }, p2/Z, [x9, #-8, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #-8, MUL VL]\n"
"sdot z11.s, z7.b, z0.b[1]\n"
"sdot z15.s, z7.b, z1.b[1]\n"
"sdot z19.s, z7.b, z2.b[1]\n"
"sdot z23.s, z7.b, z3.b[1]\n"
"sdot z27.s, z7.b, z4.b[1]\n"
"sdot z31.s, z7.b, z5.b[1]\n"
- "ld1b { z7.b }, p2/Z, [x9, #-7, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #-7, MUL VL]\n"
"sdot z8.s, z6.b, z0.b[2]\n"
"sdot z12.s, z6.b, z1.b[2]\n"
"sdot z16.s, z6.b, z2.b[2]\n"
"sdot z20.s, z6.b, z3.b[2]\n"
"sdot z24.s, z6.b, z4.b[2]\n"
"sdot z28.s, z6.b, z5.b[2]\n"
- "ld1b { z6.b }, p2/Z, [x9, #-6, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #-6, MUL VL]\n"
"sdot z9.s, z7.b, z0.b[2]\n"
"sdot z13.s, z7.b, z1.b[2]\n"
"sdot z17.s, z7.b, z2.b[2]\n"
"sdot z21.s, z7.b, z3.b[2]\n"
"sdot z25.s, z7.b, z4.b[2]\n"
"sdot z29.s, z7.b, z5.b[2]\n"
- "ld1b { z7.b }, p2/Z, [x9, #-5, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #-5, MUL VL]\n"
"sdot z10.s, z6.b, z0.b[2]\n"
"sdot z14.s, z6.b, z1.b[2]\n"
"sdot z18.s, z6.b, z2.b[2]\n"
"sdot z22.s, z6.b, z3.b[2]\n"
"sdot z26.s, z6.b, z4.b[2]\n"
"sdot z30.s, z6.b, z5.b[2]\n"
- "ld1b { z6.b }, p2/Z, [x9, #-4, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #-4, MUL VL]\n"
"sdot z11.s, z7.b, z0.b[2]\n"
"sdot z15.s, z7.b, z1.b[2]\n"
"sdot z19.s, z7.b, z2.b[2]\n"
"sdot z23.s, z7.b, z3.b[2]\n"
"sdot z27.s, z7.b, z4.b[2]\n"
"sdot z31.s, z7.b, z5.b[2]\n"
- "ld1b { z7.b }, p2/Z, [x9, #-3, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #-3, MUL VL]\n"
"sdot z8.s, z6.b, z0.b[3]\n"
"sdot z12.s, z6.b, z1.b[3]\n"
"sdot z16.s, z6.b, z2.b[3]\n"
"sdot z20.s, z6.b, z3.b[3]\n"
"sdot z24.s, z6.b, z4.b[3]\n"
"sdot z28.s, z6.b, z5.b[3]\n"
- "ld1b { z6.b }, p2/Z, [x9, #-2, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #-2, MUL VL]\n"
"sdot z9.s, z7.b, z0.b[3]\n"
"sdot z13.s, z7.b, z1.b[3]\n"
"sdot z17.s, z7.b, z2.b[3]\n"
"sdot z21.s, z7.b, z3.b[3]\n"
"sdot z25.s, z7.b, z4.b[3]\n"
"sdot z29.s, z7.b, z5.b[3]\n"
- "ld1b { z7.b }, p2/Z, [x9, #-1, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #-1, MUL VL]\n"
"sdot z10.s, z6.b, z0.b[3]\n"
"sdot z14.s, z6.b, z1.b[3]\n"
"sdot z18.s, z6.b, z2.b[3]\n"
@@ -2160,31 +2160,31 @@ void sve_hybrid_s8qs_dot_6x4VL (
"sdot z31.s, z7.b, z5.b[3]\n"
"bgt 72b\n"
"73:" // Height 6: Multiply loop: Single iteration only
- "whilelt p0.b, XZR, x27\n"
- "ld1rqb { z0.b }, p0/Z, [x26]\n"
- "ld1rqb { z1.b }, p0/Z, [x25]\n"
- "subs x27, x27, #0x4\n"
- "ld1rqb { z2.b }, p0/Z, [x24]\n"
- "ld1rqb { z3.b }, p0/Z, [x23]\n"
- "ld1rqb { z4.b }, p0/Z, [x22]\n"
- "ld1rqb { z5.b }, p0/Z, [x21]\n"
- "ld1b { z6.b }, p2/Z, [x9]\n"
- "ld1b { z7.b }, p2/Z, [x9, #1, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28]\n"
+ "whilelt p0.b, XZR, x26\n"
+ "ld1b { z7.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "subs x26, x26, #0x4\n"
+ "ld1rqb { z0.b }, p0/Z, [x25]\n"
"sdot z8.s, z6.b, z0.b[0]\n"
+ "ld1rqb { z1.b }, p0/Z, [x24]\n"
+ "sdot z9.s, z7.b, z0.b[0]\n"
+ "ld1rqb { z2.b }, p0/Z, [x23]\n"
+ "ld1rqb { z3.b }, p0/Z, [x22]\n"
"sdot z12.s, z6.b, z1.b[0]\n"
+ "ld1rqb { z4.b }, p0/Z, [x21]\n"
+ "sdot z13.s, z7.b, z1.b[0]\n"
+ "ld1rqb { z5.b }, p0/Z, [x20]\n"
"sdot z16.s, z6.b, z2.b[0]\n"
"sdot z20.s, z6.b, z3.b[0]\n"
"sdot z24.s, z6.b, z4.b[0]\n"
"sdot z28.s, z6.b, z5.b[0]\n"
- "ld1b { z6.b }, p2/Z, [x9, #2, MUL VL]\n"
- "sdot z9.s, z7.b, z0.b[0]\n"
- "sdot z13.s, z7.b, z1.b[0]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #2, MUL VL]\n"
"sdot z17.s, z7.b, z2.b[0]\n"
"sdot z21.s, z7.b, z3.b[0]\n"
"sdot z25.s, z7.b, z4.b[0]\n"
"sdot z29.s, z7.b, z5.b[0]\n"
- "ld1b { z7.b }, p2/Z, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
+ "ld1b { z7.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
"sdot z10.s, z6.b, z0.b[0]\n"
"sdot z14.s, z6.b, z1.b[0]\n"
"sdot z18.s, z6.b, z2.b[0]\n"
@@ -2198,24 +2198,24 @@ void sve_hybrid_s8qs_dot_6x4VL (
"sdot z27.s, z7.b, z4.b[0]\n"
"sdot z31.s, z7.b, z5.b[0]\n"
"ble 74f\n"
- "ld1b { z6.b }, p2/Z, [x9]\n"
- "ld1b { z7.b }, p2/Z, [x9, #1, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28]\n"
"sdot z8.s, z6.b, z0.b[1]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "subs x26, x26, #0x4\n"
"sdot z12.s, z6.b, z1.b[1]\n"
"sdot z16.s, z6.b, z2.b[1]\n"
"sdot z20.s, z6.b, z3.b[1]\n"
- "subs x27, x27, #0x4\n"
"sdot z24.s, z6.b, z4.b[1]\n"
"sdot z28.s, z6.b, z5.b[1]\n"
- "ld1b { z6.b }, p2/Z, [x9, #2, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #2, MUL VL]\n"
"sdot z9.s, z7.b, z0.b[1]\n"
"sdot z13.s, z7.b, z1.b[1]\n"
"sdot z17.s, z7.b, z2.b[1]\n"
"sdot z21.s, z7.b, z3.b[1]\n"
"sdot z25.s, z7.b, z4.b[1]\n"
"sdot z29.s, z7.b, z5.b[1]\n"
- "ld1b { z7.b }, p2/Z, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
+ "ld1b { z7.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
"sdot z10.s, z6.b, z0.b[1]\n"
"sdot z14.s, z6.b, z1.b[1]\n"
"sdot z18.s, z6.b, z2.b[1]\n"
@@ -2229,24 +2229,24 @@ void sve_hybrid_s8qs_dot_6x4VL (
"sdot z27.s, z7.b, z4.b[1]\n"
"sdot z31.s, z7.b, z5.b[1]\n"
"ble 74f\n"
- "ld1b { z6.b }, p2/Z, [x9]\n"
- "ld1b { z7.b }, p2/Z, [x9, #1, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28]\n"
"sdot z8.s, z6.b, z0.b[2]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "subs x26, x26, #0x4\n"
"sdot z12.s, z6.b, z1.b[2]\n"
"sdot z16.s, z6.b, z2.b[2]\n"
"sdot z20.s, z6.b, z3.b[2]\n"
- "subs x27, x27, #0x4\n"
"sdot z24.s, z6.b, z4.b[2]\n"
"sdot z28.s, z6.b, z5.b[2]\n"
- "ld1b { z6.b }, p2/Z, [x9, #2, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #2, MUL VL]\n"
"sdot z9.s, z7.b, z0.b[2]\n"
"sdot z13.s, z7.b, z1.b[2]\n"
"sdot z17.s, z7.b, z2.b[2]\n"
"sdot z21.s, z7.b, z3.b[2]\n"
"sdot z25.s, z7.b, z4.b[2]\n"
"sdot z29.s, z7.b, z5.b[2]\n"
- "ld1b { z7.b }, p2/Z, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
+ "ld1b { z7.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
"sdot z10.s, z6.b, z0.b[2]\n"
"sdot z14.s, z6.b, z1.b[2]\n"
"sdot z18.s, z6.b, z2.b[2]\n"
@@ -2260,23 +2260,23 @@ void sve_hybrid_s8qs_dot_6x4VL (
"sdot z27.s, z7.b, z4.b[2]\n"
"sdot z31.s, z7.b, z5.b[2]\n"
"ble 74f\n"
- "ld1b { z6.b }, p2/Z, [x9]\n"
- "ld1b { z7.b }, p2/Z, [x9, #1, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28]\n"
"sdot z8.s, z6.b, z0.b[3]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #1, MUL VL]\n"
"sdot z12.s, z6.b, z1.b[3]\n"
"sdot z16.s, z6.b, z2.b[3]\n"
"sdot z20.s, z6.b, z3.b[3]\n"
"sdot z24.s, z6.b, z4.b[3]\n"
"sdot z28.s, z6.b, z5.b[3]\n"
- "ld1b { z6.b }, p2/Z, [x9, #2, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #2, MUL VL]\n"
"sdot z9.s, z7.b, z0.b[3]\n"
"sdot z13.s, z7.b, z1.b[3]\n"
"sdot z17.s, z7.b, z2.b[3]\n"
"sdot z21.s, z7.b, z3.b[3]\n"
"sdot z25.s, z7.b, z4.b[3]\n"
"sdot z29.s, z7.b, z5.b[3]\n"
- "ld1b { z7.b }, p2/Z, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
+ "ld1b { z7.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
"sdot z10.s, z6.b, z0.b[3]\n"
"sdot z14.s, z6.b, z1.b[3]\n"
"sdot z18.s, z6.b, z2.b[3]\n"
@@ -2290,30 +2290,30 @@ void sve_hybrid_s8qs_dot_6x4VL (
"sdot z27.s, z7.b, z4.b[3]\n"
"sdot z31.s, z7.b, z5.b[3]\n"
"74:" // Height 6: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 69b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x11, x20\n"
- "add x23, x24, x20\n"
- "ld1w { z0.s }, p2/Z, [x14]\n"
- "ld1w { z1.s }, p2/Z, [x14, #1, MUL VL]\n"
- "ld1w { z2.s }, p2/Z, [x14, #2, MUL VL]\n"
- "add x22, x23, x20\n"
- "add x21, x22, x20\n"
- "ld1w { z3.s }, p2/Z, [x14, #3, MUL VL]\n"
- "add x20, x21, x20\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ld1w { z0.s }, p2/Z, [x11]\n"
"add z8.s, z8.s, z0.s\n"
+ "ld1w { z1.s }, p2/Z, [x11, #1, MUL VL]\n"
+ "add x23, x9, x19\n"
+ "add z12.s, z12.s, z0.s\n"
+ "ld1w { z2.s }, p2/Z, [x11, #2, MUL VL]\n"
+ "add x22, x23, x19\n"
+ "add z16.s, z16.s, z0.s\n"
+ "ld1w { z3.s }, p2/Z, [x11, #3, MUL VL]\n"
+ "add x21, x22, x19\n"
"add z9.s, z9.s, z1.s\n"
+ "add x20, x21, x19\n"
+ "add z13.s, z13.s, z1.s\n"
+ "add x19, x20, x19\n"
"add z10.s, z10.s, z2.s\n"
+ "addvl x11, x11, #4\n"
"add z11.s, z11.s, z3.s\n"
- "addvl x14, x14, #4\n"
- "add z12.s, z12.s, z0.s\n"
- "add z13.s, z13.s, z1.s\n"
"add z14.s, z14.s, z2.s\n"
"add z15.s, z15.s, z3.s\n"
- "add z16.s, z16.s, z0.s\n"
"add z17.s, z17.s, z1.s\n"
"add z18.s, z18.s, z2.s\n"
"add z19.s, z19.s, z3.s\n"
@@ -2337,20 +2337,20 @@ void sve_hybrid_s8qs_dot_6x4VL (
"ld1w { z2.s }, p2/Z, [x12, #2, MUL VL]\n"
"ld1w { z6.s }, p2/Z, [x13, #2, MUL VL]\n"
"ld1w { z3.s }, p2/Z, [x12, #3, MUL VL]\n"
- "ld1w { z7.s }, p2/Z, [x13, #3, MUL VL]\n"
"addvl x12, x12, #4\n"
+ "ld1w { z7.s }, p2/Z, [x13, #3, MUL VL]\n"
"addvl x13, x13, #4\n"
"b 76f\n"
"75:" // Height 6: per layer parameters
- "add x26, %x[qp], %[per_layer_right_shift]\n"
- "add x25, %x[qp], %[per_layer_mul]\n"
- "ld1rw { z0.s }, p2/Z, [x26]\n"
- "ld1rw { z4.s }, p2/Z, [x25]\n"
+ "add x24, %x[qp], %[per_layer_right_shift]\n"
+ "ld1rw { z0.s }, p2/Z, [x24]\n"
"mov z1.d, z0.d\n"
- "mov z5.d, z4.d\n"
+ "add x24, %x[qp], %[per_layer_mul]\n"
+ "ld1rw { z4.s }, p2/Z, [x24]\n"
"mov z2.d, z0.d\n"
- "mov z6.d, z4.d\n"
"mov z3.d, z0.d\n"
+ "mov z5.d, z4.d\n"
+ "mov z6.d, z4.d\n"
"mov z7.d, z4.d\n"
"76:" // Height 6: parameters loaded
".inst 0x04a47508 // sqrdmulh z8.s, z8.s, z4.s\n"
@@ -2381,223 +2381,223 @@ void sve_hybrid_s8qs_dot_6x4VL (
"and z4.d, z8.d, z0.d\n"
"and z5.d, z9.d, z1.d\n"
"and z6.d, z10.d, z2.d\n"
- "and z7.d, z11.d, z3.d\n"
"asr z4.s, z4.s, #0x1f\n"
"asr z5.s, z5.s, #0x1f\n"
"asr z6.s, z6.s, #0x1f\n"
- "asr z7.s, z7.s, #0x1f\n"
"sqadd z8.s, z8.s, z4.s\n"
"sqadd z9.s, z9.s, z5.s\n"
"sqadd z10.s, z10.s, z6.s\n"
- "sqadd z11.s, z11.s, z7.s\n"
+ "and z7.d, z11.d, z3.d\n"
"and z4.d, z12.d, z0.d\n"
"and z5.d, z13.d, z1.d\n"
- "and z6.d, z14.d, z2.d\n"
- "and z7.d, z15.d, z3.d\n"
+ "asr z7.s, z7.s, #0x1f\n"
"asr z4.s, z4.s, #0x1f\n"
"asr z5.s, z5.s, #0x1f\n"
- "asr z6.s, z6.s, #0x1f\n"
- "asr z7.s, z7.s, #0x1f\n"
+ "sqadd z11.s, z11.s, z7.s\n"
"sqadd z12.s, z12.s, z4.s\n"
"sqadd z13.s, z13.s, z5.s\n"
+ "and z6.d, z14.d, z2.d\n"
+ "and z7.d, z15.d, z3.d\n"
+ "and z4.d, z16.d, z0.d\n"
+ "asr z6.s, z6.s, #0x1f\n"
+ "asr z7.s, z7.s, #0x1f\n"
+ "asr z4.s, z4.s, #0x1f\n"
"sqadd z14.s, z14.s, z6.s\n"
"sqadd z15.s, z15.s, z7.s\n"
- "and z4.d, z16.d, z0.d\n"
+ "sqadd z16.s, z16.s, z4.s\n"
"and z5.d, z17.d, z1.d\n"
"and z6.d, z18.d, z2.d\n"
"and z7.d, z19.d, z3.d\n"
- "asr z4.s, z4.s, #0x1f\n"
"asr z5.s, z5.s, #0x1f\n"
"asr z6.s, z6.s, #0x1f\n"
"asr z7.s, z7.s, #0x1f\n"
- "sqadd z16.s, z16.s, z4.s\n"
"sqadd z17.s, z17.s, z5.s\n"
"sqadd z18.s, z18.s, z6.s\n"
"sqadd z19.s, z19.s, z7.s\n"
"and z4.d, z20.d, z0.d\n"
"and z5.d, z21.d, z1.d\n"
"and z6.d, z22.d, z2.d\n"
- "and z7.d, z23.d, z3.d\n"
"asr z4.s, z4.s, #0x1f\n"
"asr z5.s, z5.s, #0x1f\n"
"asr z6.s, z6.s, #0x1f\n"
- "asr z7.s, z7.s, #0x1f\n"
"sqadd z20.s, z20.s, z4.s\n"
"sqadd z21.s, z21.s, z5.s\n"
"sqadd z22.s, z22.s, z6.s\n"
- "sqadd z23.s, z23.s, z7.s\n"
+ "and z7.d, z23.d, z3.d\n"
"and z4.d, z24.d, z0.d\n"
"and z5.d, z25.d, z1.d\n"
- "and z6.d, z26.d, z2.d\n"
- "and z7.d, z27.d, z3.d\n"
+ "asr z7.s, z7.s, #0x1f\n"
"asr z4.s, z4.s, #0x1f\n"
"asr z5.s, z5.s, #0x1f\n"
- "asr z6.s, z6.s, #0x1f\n"
- "asr z7.s, z7.s, #0x1f\n"
+ "sqadd z23.s, z23.s, z7.s\n"
"sqadd z24.s, z24.s, z4.s\n"
"sqadd z25.s, z25.s, z5.s\n"
+ "and z6.d, z26.d, z2.d\n"
+ "and z7.d, z27.d, z3.d\n"
+ "and z4.d, z28.d, z0.d\n"
+ "asr z6.s, z6.s, #0x1f\n"
+ "asr z7.s, z7.s, #0x1f\n"
+ "asr z4.s, z4.s, #0x1f\n"
"sqadd z26.s, z26.s, z6.s\n"
"sqadd z27.s, z27.s, z7.s\n"
- "and z4.d, z28.d, z0.d\n"
+ "sqadd z28.s, z28.s, z4.s\n"
"and z5.d, z29.d, z1.d\n"
"and z6.d, z30.d, z2.d\n"
"and z7.d, z31.d, z3.d\n"
- "asr z4.s, z4.s, #0x1f\n"
"asr z5.s, z5.s, #0x1f\n"
"asr z6.s, z6.s, #0x1f\n"
"asr z7.s, z7.s, #0x1f\n"
- "sqadd z28.s, z28.s, z4.s\n"
"sqadd z29.s, z29.s, z5.s\n"
"sqadd z30.s, z30.s, z6.s\n"
"sqadd z31.s, z31.s, z7.s\n"
"77:" // Height 6: no shift correction
- "add x25, %x[qp], %[c_offset]\n"
- "ld1rw { z4.s }, p2/Z, [x25]\n"
".inst 0x44828808 // srshl z8.s, p2/M, z8.s, z0.s\n"
- "add z8.s, z8.s, z4.s\n"
+ "add x24, %x[qp], %[c_offset]\n"
+ "ld1rw { z4.s }, p2/Z, [x24]\n"
".inst 0x44828829 // srshl z9.s, p2/M, z9.s, z1.s\n"
+ "add x24, %x[qp], %[minval]\n"
".inst 0x4482884a // srshl z10.s, p2/M, z10.s, z2.s\n"
- "add z9.s, z9.s, z4.s\n"
- "add z10.s, z10.s, z4.s\n"
+ "ld1rw { z5.s }, p2/Z, [x24]\n"
+ "add x24, %x[qp], %[maxval]\n"
".inst 0x4482886b // srshl z11.s, p2/M, z11.s, z3.s\n"
+ "ld1rw { z6.s }, p2/Z, [x24]\n"
".inst 0x4482880c // srshl z12.s, p2/M, z12.s, z0.s\n"
+ "add z8.s, z8.s, z4.s\n"
+ "add z9.s, z9.s, z4.s\n"
+ "add z10.s, z10.s, z4.s\n"
"add z11.s, z11.s, z4.s\n"
"add z12.s, z12.s, z4.s\n"
- ".inst 0x4482882d // srshl z13.s, p2/M, z13.s, z1.s\n"
- ".inst 0x4482884e // srshl z14.s, p2/M, z14.s, z2.s\n"
- "add z13.s, z13.s, z4.s\n"
- "add z14.s, z14.s, z4.s\n"
- ".inst 0x4482886f // srshl z15.s, p2/M, z15.s, z3.s\n"
- ".inst 0x44828810 // srshl z16.s, p2/M, z16.s, z0.s\n"
- "add z15.s, z15.s, z4.s\n"
- "add z16.s, z16.s, z4.s\n"
- ".inst 0x44828831 // srshl z17.s, p2/M, z17.s, z1.s\n"
- ".inst 0x44828852 // srshl z18.s, p2/M, z18.s, z2.s\n"
- "add z17.s, z17.s, z4.s\n"
- "add z18.s, z18.s, z4.s\n"
- ".inst 0x44828873 // srshl z19.s, p2/M, z19.s, z3.s\n"
- ".inst 0x44828814 // srshl z20.s, p2/M, z20.s, z0.s\n"
- "add z19.s, z19.s, z4.s\n"
- "add z20.s, z20.s, z4.s\n"
- ".inst 0x44828835 // srshl z21.s, p2/M, z21.s, z1.s\n"
- ".inst 0x44828856 // srshl z22.s, p2/M, z22.s, z2.s\n"
- "add z21.s, z21.s, z4.s\n"
- "add z22.s, z22.s, z4.s\n"
- ".inst 0x44828877 // srshl z23.s, p2/M, z23.s, z3.s\n"
- ".inst 0x44828818 // srshl z24.s, p2/M, z24.s, z0.s\n"
- "add z23.s, z23.s, z4.s\n"
- "add z24.s, z24.s, z4.s\n"
- ".inst 0x44828839 // srshl z25.s, p2/M, z25.s, z1.s\n"
- ".inst 0x4482885a // srshl z26.s, p2/M, z26.s, z2.s\n"
- "add z25.s, z25.s, z4.s\n"
- "add z26.s, z26.s, z4.s\n"
- ".inst 0x4482887b // srshl z27.s, p2/M, z27.s, z3.s\n"
- ".inst 0x4482881c // srshl z28.s, p2/M, z28.s, z0.s\n"
- "add z27.s, z27.s, z4.s\n"
- "add z28.s, z28.s, z4.s\n"
- ".inst 0x4482883d // srshl z29.s, p2/M, z29.s, z1.s\n"
- ".inst 0x4482885e // srshl z30.s, p2/M, z30.s, z2.s\n"
- "add z29.s, z29.s, z4.s\n"
- "add z30.s, z30.s, z4.s\n"
- ".inst 0x4482887f // srshl z31.s, p2/M, z31.s, z3.s\n"
- "add x25, %x[qp], %[maxval]\n"
- "ld1rw { z6.s }, p2/Z, [x25]\n"
- "add z31.s, z31.s, z4.s\n"
- "add x25, %x[qp], %[minval]\n"
- "ld1rw { z5.s }, p2/Z, [x25]\n"
"smin z8.s, p2/M, z8.s, z6.s\n"
"smin z9.s, p2/M, z9.s, z6.s\n"
"smin z10.s, p2/M, z10.s, z6.s\n"
"smin z11.s, p2/M, z11.s, z6.s\n"
- "smin z12.s, p2/M, z12.s, z6.s\n"
- "smin z13.s, p2/M, z13.s, z6.s\n"
- "smin z14.s, p2/M, z14.s, z6.s\n"
- "smin z15.s, p2/M, z15.s, z6.s\n"
- "smin z16.s, p2/M, z16.s, z6.s\n"
- "smin z17.s, p2/M, z17.s, z6.s\n"
- "smin z18.s, p2/M, z18.s, z6.s\n"
- "smin z19.s, p2/M, z19.s, z6.s\n"
- "smin z20.s, p2/M, z20.s, z6.s\n"
- "smin z21.s, p2/M, z21.s, z6.s\n"
- "smin z22.s, p2/M, z22.s, z6.s\n"
- "smin z23.s, p2/M, z23.s, z6.s\n"
- "smin z24.s, p2/M, z24.s, z6.s\n"
- "smin z25.s, p2/M, z25.s, z6.s\n"
- "smin z26.s, p2/M, z26.s, z6.s\n"
- "smin z27.s, p2/M, z27.s, z6.s\n"
- "smin z28.s, p2/M, z28.s, z6.s\n"
- "smin z29.s, p2/M, z29.s, z6.s\n"
- "smin z30.s, p2/M, z30.s, z6.s\n"
- "smin z31.s, p2/M, z31.s, z6.s\n"
"smax z8.s, p2/M, z8.s, z5.s\n"
"smax z9.s, p2/M, z9.s, z5.s\n"
"smax z10.s, p2/M, z10.s, z5.s\n"
- "uzp1 z8.h, z8.h, z9.h\n"
"smax z11.s, p2/M, z11.s, z5.s\n"
- "smax z12.s, p2/M, z12.s, z5.s\n"
+ "smin z12.s, p2/M, z12.s, z6.s\n"
+ "uzp1 z8.h, z8.h, z9.h\n"
+ ".inst 0x4482882d // srshl z13.s, p2/M, z13.s, z1.s\n"
"uzp1 z9.h, z10.h, z11.h\n"
+ "smax z12.s, p2/M, z12.s, z5.s\n"
"uzp1 z8.b, z8.b, z9.b\n"
+ "st1b { z8.b }, p1, [x9]\n"
+ "add z13.s, z13.s, z4.s\n"
+ "addvl x9, x9, #1\n"
+ ".inst 0x4482884e // srshl z14.s, p2/M, z14.s, z2.s\n"
+ ".inst 0x4482886f // srshl z15.s, p2/M, z15.s, z3.s\n"
+ ".inst 0x44828810 // srshl z16.s, p2/M, z16.s, z0.s\n"
+ "smin z13.s, p2/M, z13.s, z6.s\n"
+ ".inst 0x44828831 // srshl z17.s, p2/M, z17.s, z1.s\n"
+ "add z14.s, z14.s, z4.s\n"
+ "add z15.s, z15.s, z4.s\n"
+ "add z16.s, z16.s, z4.s\n"
+ "add z17.s, z17.s, z4.s\n"
"smax z13.s, p2/M, z13.s, z5.s\n"
- "smax z14.s, p2/M, z14.s, z5.s\n"
+ "smin z14.s, p2/M, z14.s, z6.s\n"
+ "smin z15.s, p2/M, z15.s, z6.s\n"
+ "smin z16.s, p2/M, z16.s, z6.s\n"
"uzp1 z12.h, z12.h, z13.h\n"
- "st1b { z8.b }, p1, [x11]\n"
+ "smax z14.s, p2/M, z14.s, z5.s\n"
"smax z15.s, p2/M, z15.s, z5.s\n"
"smax z16.s, p2/M, z16.s, z5.s\n"
+ "smin z17.s, p2/M, z17.s, z6.s\n"
+ ".inst 0x44828852 // srshl z18.s, p2/M, z18.s, z2.s\n"
"uzp1 z13.h, z14.h, z15.h\n"
+ ".inst 0x44828873 // srshl z19.s, p2/M, z19.s, z3.s\n"
"uzp1 z12.b, z12.b, z13.b\n"
+ "st1b { z12.b }, p1, [x23]\n"
+ "add z18.s, z18.s, z4.s\n"
"smax z17.s, p2/M, z17.s, z5.s\n"
- "smax z18.s, p2/M, z18.s, z5.s\n"
+ "add z19.s, z19.s, z4.s\n"
+ ".inst 0x44828814 // srshl z20.s, p2/M, z20.s, z0.s\n"
+ "smin z18.s, p2/M, z18.s, z6.s\n"
"uzp1 z16.h, z16.h, z17.h\n"
- "st1b { z12.b }, p1, [x24]\n"
+ "smin z19.s, p2/M, z19.s, z6.s\n"
+ "add z20.s, z20.s, z4.s\n"
+ "smax z18.s, p2/M, z18.s, z5.s\n"
+ ".inst 0x44828835 // srshl z21.s, p2/M, z21.s, z1.s\n"
"smax z19.s, p2/M, z19.s, z5.s\n"
- "smax z20.s, p2/M, z20.s, z5.s\n"
+ "smin z20.s, p2/M, z20.s, z6.s\n"
+ ".inst 0x44828856 // srshl z22.s, p2/M, z22.s, z2.s\n"
+ "add z21.s, z21.s, z4.s\n"
"uzp1 z17.h, z18.h, z19.h\n"
+ "smax z20.s, p2/M, z20.s, z5.s\n"
+ "add z22.s, z22.s, z4.s\n"
"uzp1 z16.b, z16.b, z17.b\n"
+ "st1b { z16.b }, p1, [x22]\n"
+ "smin z21.s, p2/M, z21.s, z6.s\n"
+ "smin z22.s, p2/M, z22.s, z6.s\n"
+ ".inst 0x44828877 // srshl z23.s, p2/M, z23.s, z3.s\n"
+ ".inst 0x44828818 // srshl z24.s, p2/M, z24.s, z0.s\n"
+ ".inst 0x44828839 // srshl z25.s, p2/M, z25.s, z1.s\n"
"smax z21.s, p2/M, z21.s, z5.s\n"
- "smax z22.s, p2/M, z22.s, z5.s\n"
+ "add z23.s, z23.s, z4.s\n"
+ "add z24.s, z24.s, z4.s\n"
+ "add z25.s, z25.s, z4.s\n"
"uzp1 z20.h, z20.h, z21.h\n"
- "st1b { z16.b }, p1, [x23]\n"
+ "smax z22.s, p2/M, z22.s, z5.s\n"
+ "smin z23.s, p2/M, z23.s, z6.s\n"
+ "smin z24.s, p2/M, z24.s, z6.s\n"
+ "smin z25.s, p2/M, z25.s, z6.s\n"
+ ".inst 0x4482885a // srshl z26.s, p2/M, z26.s, z2.s\n"
"smax z23.s, p2/M, z23.s, z5.s\n"
"smax z24.s, p2/M, z24.s, z5.s\n"
+ "smax z25.s, p2/M, z25.s, z5.s\n"
+ "add z26.s, z26.s, z4.s\n"
"uzp1 z21.h, z22.h, z23.h\n"
+ ".inst 0x4482887b // srshl z27.s, p2/M, z27.s, z3.s\n"
+ "uzp1 z24.h, z24.h, z25.h\n"
"uzp1 z20.b, z20.b, z21.b\n"
- "smax z25.s, p2/M, z25.s, z5.s\n"
+ "st1b { z20.b }, p1, [x21]\n"
+ "add z27.s, z27.s, z4.s\n"
+ "smin z26.s, p2/M, z26.s, z6.s\n"
+ ".inst 0x4482881c // srshl z28.s, p2/M, z28.s, z0.s\n"
+ ".inst 0x4482883d // srshl z29.s, p2/M, z29.s, z1.s\n"
+ "smin z27.s, p2/M, z27.s, z6.s\n"
"smax z26.s, p2/M, z26.s, z5.s\n"
- "uzp1 z24.h, z24.h, z25.h\n"
- "st1b { z20.b }, p1, [x22]\n"
+ "add z28.s, z28.s, z4.s\n"
+ "add z29.s, z29.s, z4.s\n"
"smax z27.s, p2/M, z27.s, z5.s\n"
- "smax z28.s, p2/M, z28.s, z5.s\n"
+ "smin z28.s, p2/M, z28.s, z6.s\n"
+ "smin z29.s, p2/M, z29.s, z6.s\n"
+ ".inst 0x4482885e // srshl z30.s, p2/M, z30.s, z2.s\n"
"uzp1 z25.h, z26.h, z27.h\n"
+ "smax z28.s, p2/M, z28.s, z5.s\n"
"uzp1 z24.b, z24.b, z25.b\n"
+ "st1b { z24.b }, p1, [x20]\n"
+ "add z30.s, z30.s, z4.s\n"
"smax z29.s, p2/M, z29.s, z5.s\n"
- "smax z30.s, p2/M, z30.s, z5.s\n"
+ ".inst 0x4482887f // srshl z31.s, p2/M, z31.s, z3.s\n"
+ "smin z30.s, p2/M, z30.s, z6.s\n"
"uzp1 z28.h, z28.h, z29.h\n"
- "st1b { z24.b }, p1, [x21]\n"
+ "add z31.s, z31.s, z4.s\n"
+ "smax z30.s, p2/M, z30.s, z5.s\n"
+ "smin z31.s, p2/M, z31.s, z6.s\n"
"smax z31.s, p2/M, z31.s, z5.s\n"
"uzp1 z29.h, z30.h, z31.h\n"
"uzp1 z28.b, z28.b, z29.b\n"
- "st1b { z28.b }, p1, [x20]\n"
- "addvl x11, x11, #1\n"
+ "st1b { z28.b }, p1, [x19]\n"
"78:" // Height 6: Writeback done
"decw x10, ALL, MUL #4\n"
"cmp x10, XZR\n"
"bgt 67b\n"
"subs %x[M], %x[M], #0x6\n"
"beq 80f\n"
- "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 79f\n"
- "add x21, x21, #0x6\n"
- "str x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "add x20, x20, #0x6\n"
+ "str x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"b 1b\n"
"79:" // Update direct input
- "mov x20, #0x6\n"
- "madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
+ "mov x19, #0x6\n"
+ "madd %x[input_ptr], x19, x20, %x[input_ptr]\n"
"b 1b\n"
"80:" // Exit
: [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
: [args_ptr] "r" (&ka), [c_offset] "I" (offsetof(Requantize32, c_offset)), [col_bias] "r" (col_bias), [flags] "r" (flags), [maxval] "I" (offsetof(Requantize32, maxval)), [minval] "I" (offsetof(Requantize32, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_multiplier_ptr] "I" (offsetof(KernelArgs, multiplier_ptr)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_shift_ptr] "I" (offsetof(KernelArgs, shift_ptr)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths)), [per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [qp] "r" (qp)
- : "cc", "memory", "p0", "p1", "p2", "x9", "x10", "x11", "x12", "x13", "x14", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "x9", "x10", "x11", "x12", "x13", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8qs_mmla_6x4VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8qs_mmla_6x4VL/generic.cpp
index 6041794bdb..6aba002706 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8qs_mmla_6x4VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8qs_mmla_6x4VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef ARM_COMPUTE_ENABLE_SVE
@@ -95,17 +95,17 @@ void sve_hybrid_s8qs_mmla_6x4VL (
"cmp %x[M], #0x2\n"
"bgt 27f\n"
"beq 14f\n"
- "mov x14, %x[col_bias]\n"
"ldr x13, [%x[args_ptr], %[offsetof_multiplier_ptr]]\n"
"ldr x12, [%x[args_ptr], %[offsetof_shift_ptr]]\n"
- "mov x11, %x[output_ptr]\n"
+ "mov x11, %x[col_bias]\n"
"ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x9, %x[output_ptr]\n"
+ "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"2:" // Height 1: Column loop
- "mov x20, #0x0\n"
- "whilelt p1.b, x20, x10\n"
"mov z8.s, #0x0\n"
+ "mov x19, #0x0\n"
"mov z9.s, #0x0\n"
+ "whilelt p1.b, x19, x10\n"
"mov z10.s, #0x0\n"
"mov z11.s, #0x0\n"
"mov z12.s, #0x0\n"
@@ -113,124 +113,124 @@ void sve_hybrid_s8qs_mmla_6x4VL (
"mov z14.s, #0x0\n"
"mov z15.s, #0x0\n"
"3:" // Height 1: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"4:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 5f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "cbnz x28, 6f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "cbnz x27, 6f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
"b 6f\n"
"5:" // Height 1: setup direct input
- "mov x26, %x[input_ptr]\n"
+ "mov x25, %x[input_ptr]\n"
"6:" // Height 1: input setup done
- "cmp x27, #0x10\n"
+ "cmp x26, #0x10\n"
"ble 8f\n"
"7:" // Height 1: Multiply loop: Main loop head
- "whilelt p0.b, XZR, x27\n"
- "ld1rqb { z1.b }, p0/Z, [x26]\n"
+ "ld1b { z7.b }, p2/Z, [x28]\n"
+ "whilelt p0.b, XZR, x26\n"
+ "ld1b { z6.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "ld1rqb { z1.b }, p0/Z, [x25]\n"
"trn1 z0.d, z1.d, z2.d\n"
- "ld1b { z7.b }, p2/Z, [x9]\n"
- "ld1b { z6.b }, p2/Z, [x9, #1, MUL VL]\n"
+ "sub x26, x26, #0x10\n"
+ "trn2 z1.d, z1.d, z2.d\n"
+ "cmp x26, #0x10\n"
+ "add x25, x25, #0x10\n"
".inst 0x45079808 // smmla z8.s, z0.b, z7.b\n"
+ "ld1b { z7.b }, p2/Z, [x28, #2, MUL VL]\n"
".inst 0x4506980c // smmla z12.s, z0.b, z6.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1b { z6.b }, p2/Z, [x9, #3, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #3, MUL VL]\n"
".inst 0x45079809 // smmla z9.s, z0.b, z7.b\n"
+ "ld1b { z7.b }, p2/Z, [x28, #4, MUL VL]\n"
".inst 0x4506980d // smmla z13.s, z0.b, z6.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #4, MUL VL]\n"
- "ld1b { z6.b }, p2/Z, [x9, #5, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #5, MUL VL]\n"
".inst 0x4507980a // smmla z10.s, z0.b, z7.b\n"
+ "ld1b { z7.b }, p2/Z, [x28, #6, MUL VL]\n"
".inst 0x4506980e // smmla z14.s, z0.b, z6.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #6, MUL VL]\n"
- "ld1b { z6.b }, p2/Z, [x9, #7, MUL VL]\n"
- "addvl x9, x9, #16\n"
- "trn2 z1.d, z1.d, z2.d\n"
+ "ld1b { z6.b }, p2/Z, [x28, #7, MUL VL]\n"
+ "addvl x28, x28, #16\n"
".inst 0x4507980b // smmla z11.s, z0.b, z7.b\n"
+ "ld1b { z7.b }, p2/Z, [x28, #-8, MUL VL]\n"
".inst 0x4506980f // smmla z15.s, z0.b, z6.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #-8, MUL VL]\n"
- "ld1b { z6.b }, p2/Z, [x9, #-7, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #-7, MUL VL]\n"
".inst 0x45079828 // smmla z8.s, z1.b, z7.b\n"
+ "ld1b { z7.b }, p2/Z, [x28, #-6, MUL VL]\n"
".inst 0x4506982c // smmla z12.s, z1.b, z6.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #-6, MUL VL]\n"
- "ld1b { z6.b }, p2/Z, [x9, #-5, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #-5, MUL VL]\n"
".inst 0x45079829 // smmla z9.s, z1.b, z7.b\n"
+ "ld1b { z7.b }, p2/Z, [x28, #-4, MUL VL]\n"
".inst 0x4506982d // smmla z13.s, z1.b, z6.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #-4, MUL VL]\n"
- "ld1b { z6.b }, p2/Z, [x9, #-3, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #-3, MUL VL]\n"
".inst 0x4507982a // smmla z10.s, z1.b, z7.b\n"
+ "ld1b { z7.b }, p2/Z, [x28, #-2, MUL VL]\n"
".inst 0x4506982e // smmla z14.s, z1.b, z6.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #-2, MUL VL]\n"
- "ld1b { z6.b }, p2/Z, [x9, #-1, MUL VL]\n"
- "sub x27, x27, #0x10\n"
- "cmp x27, #0x10\n"
+ "ld1b { z6.b }, p2/Z, [x28, #-1, MUL VL]\n"
".inst 0x4507982b // smmla z11.s, z1.b, z7.b\n"
".inst 0x4506982f // smmla z15.s, z1.b, z6.b\n"
- "add x26, x26, #0x10\n"
"bgt 7b\n"
"8:" // Height 1: Multiply loop: Single iteration only
- "whilelt p0.b, XZR, x27\n"
- "ld1rqb { z1.b }, p0/Z, [x26]\n"
+ "ld1b { z7.b }, p2/Z, [x28]\n"
+ "whilelt p0.b, XZR, x26\n"
+ "ld1b { z6.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "ld1rqb { z1.b }, p0/Z, [x25]\n"
"trn1 z0.d, z1.d, z2.d\n"
- "ld1b { z7.b }, p2/Z, [x9]\n"
- "ld1b { z6.b }, p2/Z, [x9, #1, MUL VL]\n"
+ "subs x26, x26, #0x8\n"
+ "trn2 z1.d, z1.d, z2.d\n"
".inst 0x45079808 // smmla z8.s, z0.b, z7.b\n"
+ "ld1b { z7.b }, p2/Z, [x28, #2, MUL VL]\n"
".inst 0x4506980c // smmla z12.s, z0.b, z6.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1b { z6.b }, p2/Z, [x9, #3, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #3, MUL VL]\n"
".inst 0x45079809 // smmla z9.s, z0.b, z7.b\n"
+ "ld1b { z7.b }, p2/Z, [x28, #4, MUL VL]\n"
".inst 0x4506980d // smmla z13.s, z0.b, z6.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #4, MUL VL]\n"
- "ld1b { z6.b }, p2/Z, [x9, #5, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #5, MUL VL]\n"
".inst 0x4507980a // smmla z10.s, z0.b, z7.b\n"
+ "ld1b { z7.b }, p2/Z, [x28, #6, MUL VL]\n"
".inst 0x4506980e // smmla z14.s, z0.b, z6.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #6, MUL VL]\n"
- "ld1b { z6.b }, p2/Z, [x9, #7, MUL VL]\n"
- "subs x27, x27, #0x8\n"
- "trn2 z1.d, z1.d, z2.d\n"
+ "ld1b { z6.b }, p2/Z, [x28, #7, MUL VL]\n"
+ "addvl x28, x28, #8\n"
".inst 0x4507980b // smmla z11.s, z0.b, z7.b\n"
".inst 0x4506980f // smmla z15.s, z0.b, z6.b\n"
- "addvl x9, x9, #8\n"
"ble 9f\n"
- "ld1b { z7.b }, p2/Z, [x9]\n"
- "ld1b { z6.b }, p2/Z, [x9, #1, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28]\n"
".inst 0x45079828 // smmla z8.s, z1.b, z7.b\n"
+ "ld1b { z6.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #2, MUL VL]\n"
".inst 0x4506982c // smmla z12.s, z1.b, z6.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1b { z6.b }, p2/Z, [x9, #3, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #3, MUL VL]\n"
".inst 0x45079829 // smmla z9.s, z1.b, z7.b\n"
+ "ld1b { z7.b }, p2/Z, [x28, #4, MUL VL]\n"
".inst 0x4506982d // smmla z13.s, z1.b, z6.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #4, MUL VL]\n"
- "ld1b { z6.b }, p2/Z, [x9, #5, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #5, MUL VL]\n"
".inst 0x4507982a // smmla z10.s, z1.b, z7.b\n"
+ "ld1b { z7.b }, p2/Z, [x28, #6, MUL VL]\n"
".inst 0x4506982e // smmla z14.s, z1.b, z6.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #6, MUL VL]\n"
- "ld1b { z6.b }, p2/Z, [x9, #7, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #7, MUL VL]\n"
+ "addvl x28, x28, #8\n"
".inst 0x4507982b // smmla z11.s, z1.b, z7.b\n"
".inst 0x4506982f // smmla z15.s, z1.b, z6.b\n"
- "addvl x9, x9, #8\n"
"9:" // Height 1: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 4b\n"
"uzp1 z8.d, z8.d, z12.d\n"
+ "ld1w { z0.s }, p2/Z, [x11]\n"
"uzp1 z9.d, z9.d, z13.d\n"
- "ld1w { z0.s }, p2/Z, [x14]\n"
- "ld1w { z1.s }, p2/Z, [x14, #1, MUL VL]\n"
+ "ld1w { z1.s }, p2/Z, [x11, #1, MUL VL]\n"
"uzp1 z10.d, z10.d, z14.d\n"
+ "ld1w { z2.s }, p2/Z, [x11, #2, MUL VL]\n"
"uzp1 z11.d, z11.d, z15.d\n"
- "ld1w { z2.s }, p2/Z, [x14, #2, MUL VL]\n"
- "ld1w { z3.s }, p2/Z, [x14, #3, MUL VL]\n"
+ "ld1w { z3.s }, p2/Z, [x11, #3, MUL VL]\n"
+ "addvl x11, x11, #4\n"
"mov z15.d, z8.d\n"
- "add z15.s, z15.s, z0.s\n"
- "addvl x14, x14, #4\n"
"add z9.s, z9.s, z1.s\n"
+ "add z15.s, z15.s, z0.s\n"
"add z10.s, z10.s, z2.s\n"
"add z11.s, z11.s, z3.s\n"
"tbz %x[flags], #4, 10f\n"
@@ -241,20 +241,20 @@ void sve_hybrid_s8qs_mmla_6x4VL (
"ld1w { z2.s }, p2/Z, [x12, #2, MUL VL]\n"
"ld1w { z6.s }, p2/Z, [x13, #2, MUL VL]\n"
"ld1w { z3.s }, p2/Z, [x12, #3, MUL VL]\n"
- "ld1w { z7.s }, p2/Z, [x13, #3, MUL VL]\n"
"addvl x12, x12, #4\n"
+ "ld1w { z7.s }, p2/Z, [x13, #3, MUL VL]\n"
"addvl x13, x13, #4\n"
"b 11f\n"
"10:" // Height 1: per layer parameters
- "add x26, %x[qp], %[per_layer_right_shift]\n"
- "add x25, %x[qp], %[per_layer_mul]\n"
- "ld1rw { z0.s }, p2/Z, [x26]\n"
- "ld1rw { z4.s }, p2/Z, [x25]\n"
+ "add x24, %x[qp], %[per_layer_right_shift]\n"
+ "ld1rw { z0.s }, p2/Z, [x24]\n"
"mov z1.d, z0.d\n"
- "mov z5.d, z4.d\n"
+ "add x24, %x[qp], %[per_layer_mul]\n"
+ "ld1rw { z4.s }, p2/Z, [x24]\n"
"mov z2.d, z0.d\n"
- "mov z6.d, z4.d\n"
"mov z3.d, z0.d\n"
+ "mov z5.d, z4.d\n"
+ "mov z6.d, z4.d\n"
"mov z7.d, z4.d\n"
"11:" // Height 1: parameters loaded
".inst 0x04a475ef // sqrdmulh z15.s, z15.s, z4.s\n"
@@ -269,26 +269,26 @@ void sve_hybrid_s8qs_mmla_6x4VL (
"asr z4.s, z4.s, #0x1f\n"
"asr z5.s, z5.s, #0x1f\n"
"asr z6.s, z6.s, #0x1f\n"
- "asr z7.s, z7.s, #0x1f\n"
"sqadd z15.s, z15.s, z4.s\n"
"sqadd z9.s, z9.s, z5.s\n"
"sqadd z10.s, z10.s, z6.s\n"
+ "asr z7.s, z7.s, #0x1f\n"
"sqadd z11.s, z11.s, z7.s\n"
"12:" // Height 1: no shift correction
- "add x25, %x[qp], %[c_offset]\n"
- "ld1rw { z4.s }, p2/Z, [x25]\n"
".inst 0x4482880f // srshl z15.s, p2/M, z15.s, z0.s\n"
- "add z15.s, z15.s, z4.s\n"
+ "add x24, %x[qp], %[c_offset]\n"
+ "ld1rw { z4.s }, p2/Z, [x24]\n"
".inst 0x44828829 // srshl z9.s, p2/M, z9.s, z1.s\n"
+ "add x24, %x[qp], %[minval]\n"
".inst 0x4482884a // srshl z10.s, p2/M, z10.s, z2.s\n"
+ "ld1rw { z5.s }, p2/Z, [x24]\n"
+ "add x24, %x[qp], %[maxval]\n"
+ ".inst 0x4482886b // srshl z11.s, p2/M, z11.s, z3.s\n"
+ "ld1rw { z6.s }, p2/Z, [x24]\n"
+ "add z15.s, z15.s, z4.s\n"
"add z9.s, z9.s, z4.s\n"
"add z10.s, z10.s, z4.s\n"
- ".inst 0x4482886b // srshl z11.s, p2/M, z11.s, z3.s\n"
- "add x25, %x[qp], %[maxval]\n"
- "ld1rw { z6.s }, p2/Z, [x25]\n"
"add z11.s, z11.s, z4.s\n"
- "add x25, %x[qp], %[minval]\n"
- "ld1rw { z5.s }, p2/Z, [x25]\n"
"smin z15.s, p2/M, z15.s, z6.s\n"
"smin z9.s, p2/M, z9.s, z6.s\n"
"smin z10.s, p2/M, z10.s, z6.s\n"
@@ -296,29 +296,29 @@ void sve_hybrid_s8qs_mmla_6x4VL (
"smax z15.s, p2/M, z15.s, z5.s\n"
"smax z9.s, p2/M, z9.s, z5.s\n"
"smax z10.s, p2/M, z10.s, z5.s\n"
- "uzp1 z15.h, z15.h, z9.h\n"
"smax z11.s, p2/M, z11.s, z5.s\n"
+ "uzp1 z15.h, z15.h, z9.h\n"
"uzp1 z9.h, z10.h, z11.h\n"
"uzp1 z15.b, z15.b, z9.b\n"
- "st1b { z15.b }, p1, [x11]\n"
- "addvl x11, x11, #1\n"
+ "st1b { z15.b }, p1, [x9]\n"
+ "addvl x9, x9, #1\n"
"13:" // Height 1: Writeback done
"decw x10, ALL, MUL #4\n"
"cmp x10, XZR\n"
"bgt 2b\n"
"b 80f\n"
"14:" // Height 2
- "mov x14, %x[col_bias]\n"
"ldr x13, [%x[args_ptr], %[offsetof_multiplier_ptr]]\n"
+ "mov x11, %x[col_bias]\n"
"ldr x12, [%x[args_ptr], %[offsetof_shift_ptr]]\n"
- "mov x11, %x[output_ptr]\n"
+ "mov x9, %x[output_ptr]\n"
"ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"15:" // Height 2: Column loop
- "mov x20, #0x0\n"
- "whilelt p1.b, x20, x10\n"
"mov z8.s, #0x0\n"
+ "mov x19, #0x0\n"
"mov z9.s, #0x0\n"
+ "whilelt p1.b, x19, x10\n"
"mov z10.s, #0x0\n"
"mov z11.s, #0x0\n"
"mov z12.s, #0x0\n"
@@ -326,133 +326,133 @@ void sve_hybrid_s8qs_mmla_6x4VL (
"mov z14.s, #0x0\n"
"mov z15.s, #0x0\n"
"16:" // Height 2: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"17:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 18f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "cbnz x28, 19f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20\n"
- "add x25, x25, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "cbnz x27, 19f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
+ "add x24, x24, x19\n"
"b 19f\n"
"18:" // Height 2: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19\n"
"19:" // Height 2: input setup done
- "cmp x27, #0x10\n"
+ "cmp x26, #0x10\n"
"ble 21f\n"
"20:" // Height 2: Multiply loop: Main loop head
- "whilelt p0.b, XZR, x27\n"
- "ld1rqb { z1.b }, p0/Z, [x26]\n"
- "ld1rqb { z2.b }, p0/Z, [x25]\n"
+ "ld1b { z7.b }, p2/Z, [x28]\n"
+ "whilelt p0.b, XZR, x26\n"
+ "ld1b { z6.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "sub x26, x26, #0x10\n"
+ "ld1rqb { z1.b }, p0/Z, [x25]\n"
+ "cmp x26, #0x10\n"
+ "ld1rqb { z2.b }, p0/Z, [x24]\n"
"trn1 z0.d, z1.d, z2.d\n"
- "ld1b { z7.b }, p2/Z, [x9]\n"
- "ld1b { z6.b }, p2/Z, [x9, #1, MUL VL]\n"
+ "add x25, x25, #0x10\n"
+ "trn2 z1.d, z1.d, z2.d\n"
+ "add x24, x24, #0x10\n"
".inst 0x45079808 // smmla z8.s, z0.b, z7.b\n"
+ "ld1b { z7.b }, p2/Z, [x28, #2, MUL VL]\n"
".inst 0x4506980c // smmla z12.s, z0.b, z6.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1b { z6.b }, p2/Z, [x9, #3, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #3, MUL VL]\n"
".inst 0x45079809 // smmla z9.s, z0.b, z7.b\n"
+ "ld1b { z7.b }, p2/Z, [x28, #4, MUL VL]\n"
".inst 0x4506980d // smmla z13.s, z0.b, z6.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #4, MUL VL]\n"
- "ld1b { z6.b }, p2/Z, [x9, #5, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #5, MUL VL]\n"
".inst 0x4507980a // smmla z10.s, z0.b, z7.b\n"
+ "ld1b { z7.b }, p2/Z, [x28, #6, MUL VL]\n"
".inst 0x4506980e // smmla z14.s, z0.b, z6.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #6, MUL VL]\n"
- "ld1b { z6.b }, p2/Z, [x9, #7, MUL VL]\n"
- "addvl x9, x9, #16\n"
- "trn2 z1.d, z1.d, z2.d\n"
+ "ld1b { z6.b }, p2/Z, [x28, #7, MUL VL]\n"
+ "addvl x28, x28, #16\n"
".inst 0x4507980b // smmla z11.s, z0.b, z7.b\n"
+ "ld1b { z7.b }, p2/Z, [x28, #-8, MUL VL]\n"
".inst 0x4506980f // smmla z15.s, z0.b, z6.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #-8, MUL VL]\n"
- "ld1b { z6.b }, p2/Z, [x9, #-7, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #-7, MUL VL]\n"
".inst 0x45079828 // smmla z8.s, z1.b, z7.b\n"
+ "ld1b { z7.b }, p2/Z, [x28, #-6, MUL VL]\n"
".inst 0x4506982c // smmla z12.s, z1.b, z6.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #-6, MUL VL]\n"
- "ld1b { z6.b }, p2/Z, [x9, #-5, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #-5, MUL VL]\n"
".inst 0x45079829 // smmla z9.s, z1.b, z7.b\n"
+ "ld1b { z7.b }, p2/Z, [x28, #-4, MUL VL]\n"
".inst 0x4506982d // smmla z13.s, z1.b, z6.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #-4, MUL VL]\n"
- "ld1b { z6.b }, p2/Z, [x9, #-3, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #-3, MUL VL]\n"
".inst 0x4507982a // smmla z10.s, z1.b, z7.b\n"
+ "ld1b { z7.b }, p2/Z, [x28, #-2, MUL VL]\n"
".inst 0x4506982e // smmla z14.s, z1.b, z6.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #-2, MUL VL]\n"
- "ld1b { z6.b }, p2/Z, [x9, #-1, MUL VL]\n"
- "sub x27, x27, #0x10\n"
- "cmp x27, #0x10\n"
+ "ld1b { z6.b }, p2/Z, [x28, #-1, MUL VL]\n"
".inst 0x4507982b // smmla z11.s, z1.b, z7.b\n"
".inst 0x4506982f // smmla z15.s, z1.b, z6.b\n"
- "add x26, x26, #0x10\n"
- "add x25, x25, #0x10\n"
"bgt 20b\n"
"21:" // Height 2: Multiply loop: Single iteration only
- "whilelt p0.b, XZR, x27\n"
- "ld1rqb { z1.b }, p0/Z, [x26]\n"
- "ld1rqb { z2.b }, p0/Z, [x25]\n"
+ "ld1b { z7.b }, p2/Z, [x28]\n"
+ "whilelt p0.b, XZR, x26\n"
+ "ld1b { z6.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "subs x26, x26, #0x8\n"
+ "ld1rqb { z1.b }, p0/Z, [x25]\n"
+ "ld1rqb { z2.b }, p0/Z, [x24]\n"
"trn1 z0.d, z1.d, z2.d\n"
- "ld1b { z7.b }, p2/Z, [x9]\n"
- "ld1b { z6.b }, p2/Z, [x9, #1, MUL VL]\n"
+ "trn2 z1.d, z1.d, z2.d\n"
".inst 0x45079808 // smmla z8.s, z0.b, z7.b\n"
+ "ld1b { z7.b }, p2/Z, [x28, #2, MUL VL]\n"
".inst 0x4506980c // smmla z12.s, z0.b, z6.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1b { z6.b }, p2/Z, [x9, #3, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #3, MUL VL]\n"
".inst 0x45079809 // smmla z9.s, z0.b, z7.b\n"
+ "ld1b { z7.b }, p2/Z, [x28, #4, MUL VL]\n"
".inst 0x4506980d // smmla z13.s, z0.b, z6.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #4, MUL VL]\n"
- "ld1b { z6.b }, p2/Z, [x9, #5, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #5, MUL VL]\n"
".inst 0x4507980a // smmla z10.s, z0.b, z7.b\n"
+ "ld1b { z7.b }, p2/Z, [x28, #6, MUL VL]\n"
".inst 0x4506980e // smmla z14.s, z0.b, z6.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #6, MUL VL]\n"
- "ld1b { z6.b }, p2/Z, [x9, #7, MUL VL]\n"
- "subs x27, x27, #0x8\n"
- "trn2 z1.d, z1.d, z2.d\n"
+ "ld1b { z6.b }, p2/Z, [x28, #7, MUL VL]\n"
+ "addvl x28, x28, #8\n"
".inst 0x4507980b // smmla z11.s, z0.b, z7.b\n"
".inst 0x4506980f // smmla z15.s, z0.b, z6.b\n"
- "addvl x9, x9, #8\n"
"ble 22f\n"
- "ld1b { z7.b }, p2/Z, [x9]\n"
- "ld1b { z6.b }, p2/Z, [x9, #1, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28]\n"
".inst 0x45079828 // smmla z8.s, z1.b, z7.b\n"
+ "ld1b { z6.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #2, MUL VL]\n"
".inst 0x4506982c // smmla z12.s, z1.b, z6.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1b { z6.b }, p2/Z, [x9, #3, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #3, MUL VL]\n"
".inst 0x45079829 // smmla z9.s, z1.b, z7.b\n"
+ "ld1b { z7.b }, p2/Z, [x28, #4, MUL VL]\n"
".inst 0x4506982d // smmla z13.s, z1.b, z6.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #4, MUL VL]\n"
- "ld1b { z6.b }, p2/Z, [x9, #5, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #5, MUL VL]\n"
".inst 0x4507982a // smmla z10.s, z1.b, z7.b\n"
+ "ld1b { z7.b }, p2/Z, [x28, #6, MUL VL]\n"
".inst 0x4506982e // smmla z14.s, z1.b, z6.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #6, MUL VL]\n"
- "ld1b { z6.b }, p2/Z, [x9, #7, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #7, MUL VL]\n"
+ "addvl x28, x28, #8\n"
".inst 0x4507982b // smmla z11.s, z1.b, z7.b\n"
".inst 0x4506982f // smmla z15.s, z1.b, z6.b\n"
- "addvl x9, x9, #8\n"
"22:" // Height 2: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 17b\n"
"uzp1 z7.d, z8.d, z12.d\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp2 z8.d, z8.d, z12.d\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "ld1w { z0.s }, p2/Z, [x14]\n"
+ "ld1w { z0.s }, p2/Z, [x11]\n"
+ "add x23, x9, x19\n"
"uzp1 z12.d, z9.d, z13.d\n"
+ "ld1w { z1.s }, p2/Z, [x11, #1, MUL VL]\n"
"uzp2 z9.d, z9.d, z13.d\n"
- "ld1w { z1.s }, p2/Z, [x14, #1, MUL VL]\n"
- "ld1w { z2.s }, p2/Z, [x14, #2, MUL VL]\n"
+ "ld1w { z2.s }, p2/Z, [x11, #2, MUL VL]\n"
"uzp1 z13.d, z10.d, z14.d\n"
+ "ld1w { z3.s }, p2/Z, [x11, #3, MUL VL]\n"
+ "addvl x11, x11, #4\n"
"uzp2 z10.d, z10.d, z14.d\n"
- "ld1w { z3.s }, p2/Z, [x14, #3, MUL VL]\n"
- "add x24, x11, x20\n"
"uzp1 z14.d, z11.d, z15.d\n"
"uzp2 z11.d, z11.d, z15.d\n"
- "addvl x14, x14, #4\n"
"mov z15.d, z7.d\n"
"add z15.s, z15.s, z0.s\n"
"add z12.s, z12.s, z1.s\n"
@@ -470,20 +470,20 @@ void sve_hybrid_s8qs_mmla_6x4VL (
"ld1w { z2.s }, p2/Z, [x12, #2, MUL VL]\n"
"ld1w { z6.s }, p2/Z, [x13, #2, MUL VL]\n"
"ld1w { z3.s }, p2/Z, [x12, #3, MUL VL]\n"
- "ld1w { z7.s }, p2/Z, [x13, #3, MUL VL]\n"
"addvl x12, x12, #4\n"
+ "ld1w { z7.s }, p2/Z, [x13, #3, MUL VL]\n"
"addvl x13, x13, #4\n"
"b 24f\n"
"23:" // Height 2: per layer parameters
- "add x26, %x[qp], %[per_layer_right_shift]\n"
- "add x25, %x[qp], %[per_layer_mul]\n"
- "ld1rw { z0.s }, p2/Z, [x26]\n"
- "ld1rw { z4.s }, p2/Z, [x25]\n"
+ "add x24, %x[qp], %[per_layer_right_shift]\n"
+ "ld1rw { z0.s }, p2/Z, [x24]\n"
"mov z1.d, z0.d\n"
- "mov z5.d, z4.d\n"
+ "add x24, %x[qp], %[per_layer_mul]\n"
+ "ld1rw { z4.s }, p2/Z, [x24]\n"
"mov z2.d, z0.d\n"
- "mov z6.d, z4.d\n"
"mov z3.d, z0.d\n"
+ "mov z5.d, z4.d\n"
+ "mov z6.d, z4.d\n"
"mov z7.d, z4.d\n"
"24:" // Height 2: parameters loaded
".inst 0x04a475ef // sqrdmulh z15.s, z15.s, z4.s\n"
@@ -498,92 +498,92 @@ void sve_hybrid_s8qs_mmla_6x4VL (
"and z4.d, z15.d, z0.d\n"
"and z5.d, z12.d, z1.d\n"
"and z6.d, z13.d, z2.d\n"
- "and z7.d, z14.d, z3.d\n"
"asr z4.s, z4.s, #0x1f\n"
"asr z5.s, z5.s, #0x1f\n"
"asr z6.s, z6.s, #0x1f\n"
- "asr z7.s, z7.s, #0x1f\n"
"sqadd z15.s, z15.s, z4.s\n"
"sqadd z12.s, z12.s, z5.s\n"
"sqadd z13.s, z13.s, z6.s\n"
- "sqadd z14.s, z14.s, z7.s\n"
+ "and z7.d, z14.d, z3.d\n"
"and z4.d, z8.d, z0.d\n"
"and z5.d, z9.d, z1.d\n"
- "and z6.d, z10.d, z2.d\n"
- "and z7.d, z11.d, z3.d\n"
+ "asr z7.s, z7.s, #0x1f\n"
"asr z4.s, z4.s, #0x1f\n"
"asr z5.s, z5.s, #0x1f\n"
- "asr z6.s, z6.s, #0x1f\n"
- "asr z7.s, z7.s, #0x1f\n"
+ "sqadd z14.s, z14.s, z7.s\n"
"sqadd z8.s, z8.s, z4.s\n"
"sqadd z9.s, z9.s, z5.s\n"
+ "and z6.d, z10.d, z2.d\n"
+ "and z7.d, z11.d, z3.d\n"
+ "asr z6.s, z6.s, #0x1f\n"
+ "asr z7.s, z7.s, #0x1f\n"
"sqadd z10.s, z10.s, z6.s\n"
"sqadd z11.s, z11.s, z7.s\n"
"25:" // Height 2: no shift correction
- "add x25, %x[qp], %[c_offset]\n"
- "ld1rw { z4.s }, p2/Z, [x25]\n"
".inst 0x4482880f // srshl z15.s, p2/M, z15.s, z0.s\n"
- "add z15.s, z15.s, z4.s\n"
+ "add x24, %x[qp], %[c_offset]\n"
+ "ld1rw { z4.s }, p2/Z, [x24]\n"
".inst 0x4482882c // srshl z12.s, p2/M, z12.s, z1.s\n"
+ "add x24, %x[qp], %[minval]\n"
".inst 0x4482884d // srshl z13.s, p2/M, z13.s, z2.s\n"
- "add z12.s, z12.s, z4.s\n"
- "add z13.s, z13.s, z4.s\n"
+ "ld1rw { z5.s }, p2/Z, [x24]\n"
+ "add x24, %x[qp], %[maxval]\n"
".inst 0x4482886e // srshl z14.s, p2/M, z14.s, z3.s\n"
+ "ld1rw { z6.s }, p2/Z, [x24]\n"
".inst 0x44828808 // srshl z8.s, p2/M, z8.s, z0.s\n"
+ "add z15.s, z15.s, z4.s\n"
+ "add z12.s, z12.s, z4.s\n"
+ "add z13.s, z13.s, z4.s\n"
"add z14.s, z14.s, z4.s\n"
"add z8.s, z8.s, z4.s\n"
- ".inst 0x44828829 // srshl z9.s, p2/M, z9.s, z1.s\n"
- ".inst 0x4482884a // srshl z10.s, p2/M, z10.s, z2.s\n"
- "add z9.s, z9.s, z4.s\n"
- "add z10.s, z10.s, z4.s\n"
- ".inst 0x4482886b // srshl z11.s, p2/M, z11.s, z3.s\n"
- "add x25, %x[qp], %[maxval]\n"
- "ld1rw { z6.s }, p2/Z, [x25]\n"
- "add z11.s, z11.s, z4.s\n"
- "add x25, %x[qp], %[minval]\n"
- "ld1rw { z5.s }, p2/Z, [x25]\n"
"smin z15.s, p2/M, z15.s, z6.s\n"
"smin z12.s, p2/M, z12.s, z6.s\n"
"smin z13.s, p2/M, z13.s, z6.s\n"
"smin z14.s, p2/M, z14.s, z6.s\n"
- "smin z8.s, p2/M, z8.s, z6.s\n"
- "smin z9.s, p2/M, z9.s, z6.s\n"
- "smin z10.s, p2/M, z10.s, z6.s\n"
- "smin z11.s, p2/M, z11.s, z6.s\n"
"smax z15.s, p2/M, z15.s, z5.s\n"
"smax z12.s, p2/M, z12.s, z5.s\n"
"smax z13.s, p2/M, z13.s, z5.s\n"
- "uzp1 z15.h, z15.h, z12.h\n"
"smax z14.s, p2/M, z14.s, z5.s\n"
- "smax z8.s, p2/M, z8.s, z5.s\n"
+ "smin z8.s, p2/M, z8.s, z6.s\n"
+ "uzp1 z15.h, z15.h, z12.h\n"
+ ".inst 0x44828829 // srshl z9.s, p2/M, z9.s, z1.s\n"
"uzp1 z12.h, z13.h, z14.h\n"
+ "smax z8.s, p2/M, z8.s, z5.s\n"
"uzp1 z15.b, z15.b, z12.b\n"
+ "st1b { z15.b }, p1, [x9]\n"
+ "add z9.s, z9.s, z4.s\n"
+ "addvl x9, x9, #1\n"
+ ".inst 0x4482884a // srshl z10.s, p2/M, z10.s, z2.s\n"
+ ".inst 0x4482886b // srshl z11.s, p2/M, z11.s, z3.s\n"
+ "smin z9.s, p2/M, z9.s, z6.s\n"
+ "add z10.s, z10.s, z4.s\n"
+ "add z11.s, z11.s, z4.s\n"
"smax z9.s, p2/M, z9.s, z5.s\n"
- "smax z10.s, p2/M, z10.s, z5.s\n"
+ "smin z10.s, p2/M, z10.s, z6.s\n"
+ "smin z11.s, p2/M, z11.s, z6.s\n"
"uzp1 z8.h, z8.h, z9.h\n"
- "st1b { z15.b }, p1, [x11]\n"
+ "smax z10.s, p2/M, z10.s, z5.s\n"
"smax z11.s, p2/M, z11.s, z5.s\n"
"uzp1 z9.h, z10.h, z11.h\n"
"uzp1 z8.b, z8.b, z9.b\n"
- "st1b { z8.b }, p1, [x24]\n"
- "addvl x11, x11, #1\n"
+ "st1b { z8.b }, p1, [x23]\n"
"26:" // Height 2: Writeback done
"decw x10, ALL, MUL #4\n"
"cmp x10, XZR\n"
"bgt 15b\n"
"b 80f\n"
"27:" // Height 3
- "mov x14, %x[col_bias]\n"
"ldr x13, [%x[args_ptr], %[offsetof_multiplier_ptr]]\n"
+ "mov x11, %x[col_bias]\n"
"ldr x12, [%x[args_ptr], %[offsetof_shift_ptr]]\n"
- "mov x11, %x[output_ptr]\n"
+ "mov x9, %x[output_ptr]\n"
"ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"28:" // Height 3: Column loop
- "mov x20, #0x0\n"
- "whilelt p1.b, x20, x10\n"
"mov z8.s, #0x0\n"
+ "mov x19, #0x0\n"
"mov z9.s, #0x0\n"
+ "whilelt p1.b, x19, x10\n"
"mov z10.s, #0x0\n"
"mov z11.s, #0x0\n"
"mov z12.s, #0x0\n"
@@ -599,176 +599,176 @@ void sve_hybrid_s8qs_mmla_6x4VL (
"mov z22.s, #0x0\n"
"mov z23.s, #0x0\n"
"29:" // Height 3: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"30:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 31f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "cbnz x28, 32f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20\n"
- "add x25, x25, x20\n"
- "add x24, x24, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "cbnz x27, 32f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
+ "add x24, x24, x19\n"
+ "add x23, x23, x19\n"
"b 32f\n"
"31:" // Height 3: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20\n"
- "add x24, x25, x20\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19\n"
+ "add x23, x24, x19\n"
"32:" // Height 3: input setup done
- "cmp x27, #0x10\n"
+ "cmp x26, #0x10\n"
"ble 34f\n"
"33:" // Height 3: Multiply loop: Main loop head
- "whilelt p0.b, XZR, x27\n"
- "ld1rqb { z1.b }, p0/Z, [x26]\n"
- "ld1rqb { z2.b }, p0/Z, [x25]\n"
- "ld1rqb { z3.b }, p0/Z, [x24]\n"
+ "ld1b { z7.b }, p2/Z, [x28]\n"
+ "whilelt p0.b, XZR, x26\n"
+ "ld1b { z6.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "ld1rqb { z1.b }, p0/Z, [x25]\n"
+ "sub x26, x26, #0x10\n"
+ "ld1rqb { z2.b }, p0/Z, [x24]\n"
"trn1 z0.d, z1.d, z2.d\n"
+ "ld1rqb { z3.b }, p0/Z, [x23]\n"
+ "cmp x26, #0x10\n"
"trn2 z1.d, z1.d, z2.d\n"
- "ld1b { z7.b }, p2/Z, [x9]\n"
- "trn1 z2.d, z3.d, z4.d\n"
- "ld1b { z6.b }, p2/Z, [x9, #1, MUL VL]\n"
+ "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
".inst 0x45079808 // smmla z8.s, z0.b, z7.b\n"
- ".inst 0x45079850 // smmla z16.s, z2.b, z7.b\n"
".inst 0x4506980c // smmla z12.s, z0.b, z6.b\n"
+ "add x23, x23, #0x10\n"
+ "trn1 z2.d, z3.d, z4.d\n"
+ "trn2 z3.d, z3.d, z4.d\n"
+ ".inst 0x45079850 // smmla z16.s, z2.b, z7.b\n"
+ "ld1b { z7.b }, p2/Z, [x28, #2, MUL VL]\n"
".inst 0x45069854 // smmla z20.s, z2.b, z6.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1b { z6.b }, p2/Z, [x9, #3, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #3, MUL VL]\n"
".inst 0x45079809 // smmla z9.s, z0.b, z7.b\n"
".inst 0x45079851 // smmla z17.s, z2.b, z7.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #4, MUL VL]\n"
- "trn2 z3.d, z3.d, z4.d\n"
+ "ld1b { z7.b }, p2/Z, [x28, #4, MUL VL]\n"
".inst 0x4506980d // smmla z13.s, z0.b, z6.b\n"
".inst 0x45069855 // smmla z21.s, z2.b, z6.b\n"
- "ld1b { z6.b }, p2/Z, [x9, #5, MUL VL]\n"
- "sub x27, x27, #0x10\n"
+ "ld1b { z6.b }, p2/Z, [x28, #5, MUL VL]\n"
".inst 0x4507980a // smmla z10.s, z0.b, z7.b\n"
".inst 0x45079852 // smmla z18.s, z2.b, z7.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #6, MUL VL]\n"
- "cmp x27, #0x10\n"
+ "ld1b { z7.b }, p2/Z, [x28, #6, MUL VL]\n"
".inst 0x4506980e // smmla z14.s, z0.b, z6.b\n"
".inst 0x45069856 // smmla z22.s, z2.b, z6.b\n"
- "ld1b { z6.b }, p2/Z, [x9, #7, MUL VL]\n"
- "addvl x9, x9, #16\n"
+ "ld1b { z6.b }, p2/Z, [x28, #7, MUL VL]\n"
+ "addvl x28, x28, #16\n"
".inst 0x4507980b // smmla z11.s, z0.b, z7.b\n"
".inst 0x45079853 // smmla z19.s, z2.b, z7.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #-8, MUL VL]\n"
- "add x26, x26, #0x10\n"
+ "ld1b { z7.b }, p2/Z, [x28, #-8, MUL VL]\n"
".inst 0x4506980f // smmla z15.s, z0.b, z6.b\n"
".inst 0x45069857 // smmla z23.s, z2.b, z6.b\n"
- "ld1b { z6.b }, p2/Z, [x9, #-7, MUL VL]\n"
- "add x25, x25, #0x10\n"
+ "ld1b { z6.b }, p2/Z, [x28, #-7, MUL VL]\n"
".inst 0x45079828 // smmla z8.s, z1.b, z7.b\n"
".inst 0x45079870 // smmla z16.s, z3.b, z7.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #-6, MUL VL]\n"
- "add x24, x24, #0x10\n"
+ "ld1b { z7.b }, p2/Z, [x28, #-6, MUL VL]\n"
".inst 0x4506982c // smmla z12.s, z1.b, z6.b\n"
".inst 0x45069874 // smmla z20.s, z3.b, z6.b\n"
- "ld1b { z6.b }, p2/Z, [x9, #-5, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #-5, MUL VL]\n"
".inst 0x45079829 // smmla z9.s, z1.b, z7.b\n"
".inst 0x45079871 // smmla z17.s, z3.b, z7.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #-4, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #-4, MUL VL]\n"
".inst 0x4506982d // smmla z13.s, z1.b, z6.b\n"
".inst 0x45069875 // smmla z21.s, z3.b, z6.b\n"
- "ld1b { z6.b }, p2/Z, [x9, #-3, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #-3, MUL VL]\n"
".inst 0x4507982a // smmla z10.s, z1.b, z7.b\n"
".inst 0x45079872 // smmla z18.s, z3.b, z7.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #-2, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #-2, MUL VL]\n"
".inst 0x4506982e // smmla z14.s, z1.b, z6.b\n"
".inst 0x45069876 // smmla z22.s, z3.b, z6.b\n"
- "ld1b { z6.b }, p2/Z, [x9, #-1, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #-1, MUL VL]\n"
".inst 0x4507982b // smmla z11.s, z1.b, z7.b\n"
".inst 0x45079873 // smmla z19.s, z3.b, z7.b\n"
".inst 0x4506982f // smmla z15.s, z1.b, z6.b\n"
".inst 0x45069877 // smmla z23.s, z3.b, z6.b\n"
"bgt 33b\n"
"34:" // Height 3: Multiply loop: Single iteration only
- "whilelt p0.b, XZR, x27\n"
- "ld1rqb { z1.b }, p0/Z, [x26]\n"
- "ld1rqb { z2.b }, p0/Z, [x25]\n"
- "ld1rqb { z3.b }, p0/Z, [x24]\n"
+ "ld1b { z7.b }, p2/Z, [x28]\n"
+ "whilelt p0.b, XZR, x26\n"
+ "ld1b { z6.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "ld1rqb { z1.b }, p0/Z, [x25]\n"
+ "subs x26, x26, #0x8\n"
+ "ld1rqb { z2.b }, p0/Z, [x24]\n"
"trn1 z0.d, z1.d, z2.d\n"
+ "ld1rqb { z3.b }, p0/Z, [x23]\n"
"trn2 z1.d, z1.d, z2.d\n"
- "ld1b { z7.b }, p2/Z, [x9]\n"
- "trn1 z2.d, z3.d, z4.d\n"
- "ld1b { z6.b }, p2/Z, [x9, #1, MUL VL]\n"
".inst 0x45079808 // smmla z8.s, z0.b, z7.b\n"
- ".inst 0x45079850 // smmla z16.s, z2.b, z7.b\n"
".inst 0x4506980c // smmla z12.s, z0.b, z6.b\n"
+ "trn1 z2.d, z3.d, z4.d\n"
+ "trn2 z3.d, z3.d, z4.d\n"
+ ".inst 0x45079850 // smmla z16.s, z2.b, z7.b\n"
+ "ld1b { z7.b }, p2/Z, [x28, #2, MUL VL]\n"
".inst 0x45069854 // smmla z20.s, z2.b, z6.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1b { z6.b }, p2/Z, [x9, #3, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #3, MUL VL]\n"
".inst 0x45079809 // smmla z9.s, z0.b, z7.b\n"
".inst 0x45079851 // smmla z17.s, z2.b, z7.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #4, MUL VL]\n"
- "subs x27, x27, #0x8\n"
+ "ld1b { z7.b }, p2/Z, [x28, #4, MUL VL]\n"
".inst 0x4506980d // smmla z13.s, z0.b, z6.b\n"
".inst 0x45069855 // smmla z21.s, z2.b, z6.b\n"
- "ld1b { z6.b }, p2/Z, [x9, #5, MUL VL]\n"
- "trn2 z3.d, z3.d, z4.d\n"
+ "ld1b { z6.b }, p2/Z, [x28, #5, MUL VL]\n"
".inst 0x4507980a // smmla z10.s, z0.b, z7.b\n"
".inst 0x45079852 // smmla z18.s, z2.b, z7.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #6, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #6, MUL VL]\n"
".inst 0x4506980e // smmla z14.s, z0.b, z6.b\n"
".inst 0x45069856 // smmla z22.s, z2.b, z6.b\n"
- "ld1b { z6.b }, p2/Z, [x9, #7, MUL VL]\n"
- "addvl x9, x9, #8\n"
+ "ld1b { z6.b }, p2/Z, [x28, #7, MUL VL]\n"
+ "addvl x28, x28, #8\n"
".inst 0x4507980b // smmla z11.s, z0.b, z7.b\n"
".inst 0x45079853 // smmla z19.s, z2.b, z7.b\n"
".inst 0x4506980f // smmla z15.s, z0.b, z6.b\n"
".inst 0x45069857 // smmla z23.s, z2.b, z6.b\n"
"ble 35f\n"
- "ld1b { z7.b }, p2/Z, [x9]\n"
- "ld1b { z6.b }, p2/Z, [x9, #1, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28]\n"
".inst 0x45079828 // smmla z8.s, z1.b, z7.b\n"
+ "ld1b { z6.b }, p2/Z, [x28, #1, MUL VL]\n"
".inst 0x45079870 // smmla z16.s, z3.b, z7.b\n"
+ "ld1b { z7.b }, p2/Z, [x28, #2, MUL VL]\n"
".inst 0x4506982c // smmla z12.s, z1.b, z6.b\n"
".inst 0x45069874 // smmla z20.s, z3.b, z6.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1b { z6.b }, p2/Z, [x9, #3, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #3, MUL VL]\n"
".inst 0x45079829 // smmla z9.s, z1.b, z7.b\n"
".inst 0x45079871 // smmla z17.s, z3.b, z7.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #4, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #4, MUL VL]\n"
".inst 0x4506982d // smmla z13.s, z1.b, z6.b\n"
".inst 0x45069875 // smmla z21.s, z3.b, z6.b\n"
- "ld1b { z6.b }, p2/Z, [x9, #5, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #5, MUL VL]\n"
".inst 0x4507982a // smmla z10.s, z1.b, z7.b\n"
".inst 0x45079872 // smmla z18.s, z3.b, z7.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #6, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #6, MUL VL]\n"
".inst 0x4506982e // smmla z14.s, z1.b, z6.b\n"
".inst 0x45069876 // smmla z22.s, z3.b, z6.b\n"
- "ld1b { z6.b }, p2/Z, [x9, #7, MUL VL]\n"
- "addvl x9, x9, #8\n"
+ "ld1b { z6.b }, p2/Z, [x28, #7, MUL VL]\n"
+ "addvl x28, x28, #8\n"
".inst 0x4507982b // smmla z11.s, z1.b, z7.b\n"
".inst 0x45079873 // smmla z19.s, z3.b, z7.b\n"
".inst 0x4506982f // smmla z15.s, z1.b, z6.b\n"
".inst 0x45069877 // smmla z23.s, z3.b, z6.b\n"
"35:" // Height 3: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 30b\n"
"uzp1 z7.d, z8.d, z12.d\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp2 z8.d, z8.d, z12.d\n"
- "ld1w { z0.s }, p2/Z, [x14]\n"
+ "ld1w { z0.s }, p2/Z, [x11]\n"
+ "add x23, x9, x19\n"
"uzp1 z12.d, z9.d, z13.d\n"
+ "ld1w { z1.s }, p2/Z, [x11, #1, MUL VL]\n"
"uzp2 z9.d, z9.d, z13.d\n"
- "ld1w { z1.s }, p2/Z, [x14, #1, MUL VL]\n"
- "ld1w { z2.s }, p2/Z, [x14, #2, MUL VL]\n"
+ "ld1w { z2.s }, p2/Z, [x11, #2, MUL VL]\n"
+ "add x22, x23, x19\n"
"uzp1 z13.d, z10.d, z14.d\n"
+ "ld1w { z3.s }, p2/Z, [x11, #3, MUL VL]\n"
+ "addvl x11, x11, #4\n"
"uzp2 z10.d, z10.d, z14.d\n"
- "ld1w { z3.s }, p2/Z, [x14, #3, MUL VL]\n"
- "add x24, x11, x20\n"
"uzp1 z14.d, z11.d, z15.d\n"
"uzp2 z11.d, z11.d, z15.d\n"
- "add x23, x24, x20\n"
- "addvl x14, x14, #4\n"
"uzp1 z16.d, z16.d, z20.d\n"
"uzp1 z17.d, z17.d, z21.d\n"
"uzp1 z18.d, z18.d, z22.d\n"
@@ -794,20 +794,20 @@ void sve_hybrid_s8qs_mmla_6x4VL (
"ld1w { z2.s }, p2/Z, [x12, #2, MUL VL]\n"
"ld1w { z6.s }, p2/Z, [x13, #2, MUL VL]\n"
"ld1w { z3.s }, p2/Z, [x12, #3, MUL VL]\n"
- "ld1w { z7.s }, p2/Z, [x13, #3, MUL VL]\n"
"addvl x12, x12, #4\n"
+ "ld1w { z7.s }, p2/Z, [x13, #3, MUL VL]\n"
"addvl x13, x13, #4\n"
"b 37f\n"
"36:" // Height 3: per layer parameters
- "add x26, %x[qp], %[per_layer_right_shift]\n"
- "add x25, %x[qp], %[per_layer_mul]\n"
- "ld1rw { z0.s }, p2/Z, [x26]\n"
- "ld1rw { z4.s }, p2/Z, [x25]\n"
+ "add x24, %x[qp], %[per_layer_right_shift]\n"
+ "ld1rw { z0.s }, p2/Z, [x24]\n"
"mov z1.d, z0.d\n"
- "mov z5.d, z4.d\n"
+ "add x24, %x[qp], %[per_layer_mul]\n"
+ "ld1rw { z4.s }, p2/Z, [x24]\n"
"mov z2.d, z0.d\n"
- "mov z6.d, z4.d\n"
"mov z3.d, z0.d\n"
+ "mov z5.d, z4.d\n"
+ "mov z6.d, z4.d\n"
"mov z7.d, z4.d\n"
"37:" // Height 3: parameters loaded
".inst 0x04a476f7 // sqrdmulh z23.s, z23.s, z4.s\n"
@@ -826,124 +826,124 @@ void sve_hybrid_s8qs_mmla_6x4VL (
"and z4.d, z23.d, z0.d\n"
"and z5.d, z12.d, z1.d\n"
"and z6.d, z13.d, z2.d\n"
- "and z7.d, z14.d, z3.d\n"
"asr z4.s, z4.s, #0x1f\n"
"asr z5.s, z5.s, #0x1f\n"
"asr z6.s, z6.s, #0x1f\n"
- "asr z7.s, z7.s, #0x1f\n"
"sqadd z23.s, z23.s, z4.s\n"
"sqadd z12.s, z12.s, z5.s\n"
"sqadd z13.s, z13.s, z6.s\n"
- "sqadd z14.s, z14.s, z7.s\n"
+ "and z7.d, z14.d, z3.d\n"
"and z4.d, z8.d, z0.d\n"
"and z5.d, z9.d, z1.d\n"
- "and z6.d, z10.d, z2.d\n"
- "and z7.d, z11.d, z3.d\n"
+ "asr z7.s, z7.s, #0x1f\n"
"asr z4.s, z4.s, #0x1f\n"
"asr z5.s, z5.s, #0x1f\n"
- "asr z6.s, z6.s, #0x1f\n"
- "asr z7.s, z7.s, #0x1f\n"
+ "sqadd z14.s, z14.s, z7.s\n"
"sqadd z8.s, z8.s, z4.s\n"
"sqadd z9.s, z9.s, z5.s\n"
+ "and z6.d, z10.d, z2.d\n"
+ "and z7.d, z11.d, z3.d\n"
+ "and z4.d, z16.d, z0.d\n"
+ "asr z6.s, z6.s, #0x1f\n"
+ "asr z7.s, z7.s, #0x1f\n"
+ "asr z4.s, z4.s, #0x1f\n"
"sqadd z10.s, z10.s, z6.s\n"
"sqadd z11.s, z11.s, z7.s\n"
- "and z4.d, z16.d, z0.d\n"
+ "sqadd z16.s, z16.s, z4.s\n"
"and z5.d, z17.d, z1.d\n"
"and z6.d, z18.d, z2.d\n"
"and z7.d, z19.d, z3.d\n"
- "asr z4.s, z4.s, #0x1f\n"
"asr z5.s, z5.s, #0x1f\n"
"asr z6.s, z6.s, #0x1f\n"
"asr z7.s, z7.s, #0x1f\n"
- "sqadd z16.s, z16.s, z4.s\n"
"sqadd z17.s, z17.s, z5.s\n"
"sqadd z18.s, z18.s, z6.s\n"
"sqadd z19.s, z19.s, z7.s\n"
"38:" // Height 3: no shift correction
- "add x25, %x[qp], %[c_offset]\n"
- "ld1rw { z4.s }, p2/Z, [x25]\n"
".inst 0x44828817 // srshl z23.s, p2/M, z23.s, z0.s\n"
- "add z23.s, z23.s, z4.s\n"
+ "add x24, %x[qp], %[c_offset]\n"
+ "ld1rw { z4.s }, p2/Z, [x24]\n"
".inst 0x4482882c // srshl z12.s, p2/M, z12.s, z1.s\n"
+ "add x24, %x[qp], %[minval]\n"
".inst 0x4482884d // srshl z13.s, p2/M, z13.s, z2.s\n"
- "add z12.s, z12.s, z4.s\n"
- "add z13.s, z13.s, z4.s\n"
+ "ld1rw { z5.s }, p2/Z, [x24]\n"
+ "add x24, %x[qp], %[maxval]\n"
".inst 0x4482886e // srshl z14.s, p2/M, z14.s, z3.s\n"
+ "ld1rw { z6.s }, p2/Z, [x24]\n"
".inst 0x44828808 // srshl z8.s, p2/M, z8.s, z0.s\n"
+ "add z23.s, z23.s, z4.s\n"
+ "add z12.s, z12.s, z4.s\n"
+ "add z13.s, z13.s, z4.s\n"
"add z14.s, z14.s, z4.s\n"
"add z8.s, z8.s, z4.s\n"
- ".inst 0x44828829 // srshl z9.s, p2/M, z9.s, z1.s\n"
- ".inst 0x4482884a // srshl z10.s, p2/M, z10.s, z2.s\n"
- "add z9.s, z9.s, z4.s\n"
- "add z10.s, z10.s, z4.s\n"
- ".inst 0x4482886b // srshl z11.s, p2/M, z11.s, z3.s\n"
- ".inst 0x44828810 // srshl z16.s, p2/M, z16.s, z0.s\n"
- "add z11.s, z11.s, z4.s\n"
- "add z16.s, z16.s, z4.s\n"
- ".inst 0x44828831 // srshl z17.s, p2/M, z17.s, z1.s\n"
- ".inst 0x44828852 // srshl z18.s, p2/M, z18.s, z2.s\n"
- "add z17.s, z17.s, z4.s\n"
- "add z18.s, z18.s, z4.s\n"
- ".inst 0x44828873 // srshl z19.s, p2/M, z19.s, z3.s\n"
- "add x25, %x[qp], %[maxval]\n"
- "ld1rw { z6.s }, p2/Z, [x25]\n"
- "add z19.s, z19.s, z4.s\n"
- "add x25, %x[qp], %[minval]\n"
- "ld1rw { z5.s }, p2/Z, [x25]\n"
"smin z23.s, p2/M, z23.s, z6.s\n"
"smin z12.s, p2/M, z12.s, z6.s\n"
"smin z13.s, p2/M, z13.s, z6.s\n"
"smin z14.s, p2/M, z14.s, z6.s\n"
- "smin z8.s, p2/M, z8.s, z6.s\n"
- "smin z9.s, p2/M, z9.s, z6.s\n"
- "smin z10.s, p2/M, z10.s, z6.s\n"
- "smin z11.s, p2/M, z11.s, z6.s\n"
- "smin z16.s, p2/M, z16.s, z6.s\n"
- "smin z17.s, p2/M, z17.s, z6.s\n"
- "smin z18.s, p2/M, z18.s, z6.s\n"
- "smin z19.s, p2/M, z19.s, z6.s\n"
"smax z23.s, p2/M, z23.s, z5.s\n"
"smax z12.s, p2/M, z12.s, z5.s\n"
"smax z13.s, p2/M, z13.s, z5.s\n"
- "uzp1 z23.h, z23.h, z12.h\n"
"smax z14.s, p2/M, z14.s, z5.s\n"
- "smax z8.s, p2/M, z8.s, z5.s\n"
+ "smin z8.s, p2/M, z8.s, z6.s\n"
+ "uzp1 z23.h, z23.h, z12.h\n"
+ ".inst 0x44828829 // srshl z9.s, p2/M, z9.s, z1.s\n"
"uzp1 z12.h, z13.h, z14.h\n"
+ "smax z8.s, p2/M, z8.s, z5.s\n"
"uzp1 z23.b, z23.b, z12.b\n"
+ "st1b { z23.b }, p1, [x9]\n"
+ "add z9.s, z9.s, z4.s\n"
+ "addvl x9, x9, #1\n"
+ ".inst 0x4482884a // srshl z10.s, p2/M, z10.s, z2.s\n"
+ ".inst 0x4482886b // srshl z11.s, p2/M, z11.s, z3.s\n"
+ ".inst 0x44828810 // srshl z16.s, p2/M, z16.s, z0.s\n"
+ "smin z9.s, p2/M, z9.s, z6.s\n"
+ ".inst 0x44828831 // srshl z17.s, p2/M, z17.s, z1.s\n"
+ "add z10.s, z10.s, z4.s\n"
+ "add z11.s, z11.s, z4.s\n"
+ "add z16.s, z16.s, z4.s\n"
+ "add z17.s, z17.s, z4.s\n"
"smax z9.s, p2/M, z9.s, z5.s\n"
- "smax z10.s, p2/M, z10.s, z5.s\n"
+ "smin z10.s, p2/M, z10.s, z6.s\n"
+ "smin z11.s, p2/M, z11.s, z6.s\n"
+ "smin z16.s, p2/M, z16.s, z6.s\n"
"uzp1 z8.h, z8.h, z9.h\n"
- "st1b { z23.b }, p1, [x11]\n"
+ "smax z10.s, p2/M, z10.s, z5.s\n"
"smax z11.s, p2/M, z11.s, z5.s\n"
"smax z16.s, p2/M, z16.s, z5.s\n"
+ "smin z17.s, p2/M, z17.s, z6.s\n"
+ ".inst 0x44828852 // srshl z18.s, p2/M, z18.s, z2.s\n"
"uzp1 z9.h, z10.h, z11.h\n"
+ ".inst 0x44828873 // srshl z19.s, p2/M, z19.s, z3.s\n"
"uzp1 z8.b, z8.b, z9.b\n"
+ "st1b { z8.b }, p1, [x23]\n"
+ "add z18.s, z18.s, z4.s\n"
"smax z17.s, p2/M, z17.s, z5.s\n"
- "smax z18.s, p2/M, z18.s, z5.s\n"
+ "add z19.s, z19.s, z4.s\n"
+ "smin z18.s, p2/M, z18.s, z6.s\n"
"uzp1 z16.h, z16.h, z17.h\n"
- "st1b { z8.b }, p1, [x24]\n"
+ "smin z19.s, p2/M, z19.s, z6.s\n"
+ "smax z18.s, p2/M, z18.s, z5.s\n"
"smax z19.s, p2/M, z19.s, z5.s\n"
"uzp1 z17.h, z18.h, z19.h\n"
"uzp1 z16.b, z16.b, z17.b\n"
- "st1b { z16.b }, p1, [x23]\n"
- "addvl x11, x11, #1\n"
+ "st1b { z16.b }, p1, [x22]\n"
"39:" // Height 3: Writeback done
"decw x10, ALL, MUL #4\n"
"cmp x10, XZR\n"
"bgt 28b\n"
"b 80f\n"
"40:" // Height 4
- "mov x14, %x[col_bias]\n"
"ldr x13, [%x[args_ptr], %[offsetof_multiplier_ptr]]\n"
+ "mov x11, %x[col_bias]\n"
"ldr x12, [%x[args_ptr], %[offsetof_shift_ptr]]\n"
- "mov x11, %x[output_ptr]\n"
+ "mov x9, %x[output_ptr]\n"
"ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"41:" // Height 4: Column loop
- "mov x20, #0x0\n"
- "whilelt p1.b, x20, x10\n"
"mov z8.s, #0x0\n"
+ "mov x19, #0x0\n"
"mov z9.s, #0x0\n"
+ "whilelt p1.b, x19, x10\n"
"mov z10.s, #0x0\n"
"mov z11.s, #0x0\n"
"mov z12.s, #0x0\n"
@@ -959,185 +959,185 @@ void sve_hybrid_s8qs_mmla_6x4VL (
"mov z22.s, #0x0\n"
"mov z23.s, #0x0\n"
"42:" // Height 4: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"43:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 44f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "cbnz x28, 45f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20\n"
- "add x25, x25, x20\n"
- "add x24, x24, x20\n"
- "add x23, x23, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "cbnz x27, 45f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
+ "add x24, x24, x19\n"
+ "add x23, x23, x19\n"
+ "add x22, x22, x19\n"
"b 45f\n"
"44:" // Height 4: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20\n"
- "add x24, x25, x20\n"
- "add x23, x24, x20\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19\n"
+ "add x23, x24, x19\n"
+ "add x22, x23, x19\n"
"45:" // Height 4: input setup done
- "cmp x27, #0x10\n"
+ "cmp x26, #0x10\n"
"ble 47f\n"
"46:" // Height 4: Multiply loop: Main loop head
- "whilelt p0.b, XZR, x27\n"
- "ld1rqb { z1.b }, p0/Z, [x26]\n"
- "ld1rqb { z2.b }, p0/Z, [x25]\n"
+ "ld1b { z7.b }, p2/Z, [x28]\n"
+ "whilelt p0.b, XZR, x26\n"
+ "ld1b { z6.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "sub x26, x26, #0x10\n"
+ "ld1rqb { z1.b }, p0/Z, [x25]\n"
+ "cmp x26, #0x10\n"
+ "ld1rqb { z2.b }, p0/Z, [x24]\n"
"trn1 z0.d, z1.d, z2.d\n"
- "ld1rqb { z3.b }, p0/Z, [x24]\n"
- "ld1rqb { z4.b }, p0/Z, [x23]\n"
+ "ld1rqb { z3.b }, p0/Z, [x23]\n"
+ "add x25, x25, #0x10\n"
"trn2 z1.d, z1.d, z2.d\n"
- "trn1 z2.d, z3.d, z4.d\n"
- "ld1b { z7.b }, p2/Z, [x9]\n"
- "ld1b { z6.b }, p2/Z, [x9, #1, MUL VL]\n"
+ "ld1rqb { z4.b }, p0/Z, [x22]\n"
+ "add x24, x24, #0x10\n"
".inst 0x45079808 // smmla z8.s, z0.b, z7.b\n"
- ".inst 0x45079850 // smmla z16.s, z2.b, z7.b\n"
+ "add x23, x23, #0x10\n"
".inst 0x4506980c // smmla z12.s, z0.b, z6.b\n"
+ "add x22, x22, #0x10\n"
+ "trn1 z2.d, z3.d, z4.d\n"
+ "trn2 z3.d, z3.d, z4.d\n"
+ ".inst 0x45079850 // smmla z16.s, z2.b, z7.b\n"
+ "ld1b { z7.b }, p2/Z, [x28, #2, MUL VL]\n"
".inst 0x45069854 // smmla z20.s, z2.b, z6.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1b { z6.b }, p2/Z, [x9, #3, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #3, MUL VL]\n"
".inst 0x45079809 // smmla z9.s, z0.b, z7.b\n"
".inst 0x45079851 // smmla z17.s, z2.b, z7.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #4, MUL VL]\n"
- "trn2 z3.d, z3.d, z4.d\n"
+ "ld1b { z7.b }, p2/Z, [x28, #4, MUL VL]\n"
".inst 0x4506980d // smmla z13.s, z0.b, z6.b\n"
".inst 0x45069855 // smmla z21.s, z2.b, z6.b\n"
- "ld1b { z6.b }, p2/Z, [x9, #5, MUL VL]\n"
- "sub x27, x27, #0x10\n"
+ "ld1b { z6.b }, p2/Z, [x28, #5, MUL VL]\n"
".inst 0x4507980a // smmla z10.s, z0.b, z7.b\n"
".inst 0x45079852 // smmla z18.s, z2.b, z7.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #6, MUL VL]\n"
- "cmp x27, #0x10\n"
+ "ld1b { z7.b }, p2/Z, [x28, #6, MUL VL]\n"
".inst 0x4506980e // smmla z14.s, z0.b, z6.b\n"
".inst 0x45069856 // smmla z22.s, z2.b, z6.b\n"
- "ld1b { z6.b }, p2/Z, [x9, #7, MUL VL]\n"
- "addvl x9, x9, #16\n"
+ "ld1b { z6.b }, p2/Z, [x28, #7, MUL VL]\n"
+ "addvl x28, x28, #16\n"
".inst 0x4507980b // smmla z11.s, z0.b, z7.b\n"
".inst 0x45079853 // smmla z19.s, z2.b, z7.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #-8, MUL VL]\n"
- "add x26, x26, #0x10\n"
+ "ld1b { z7.b }, p2/Z, [x28, #-8, MUL VL]\n"
".inst 0x4506980f // smmla z15.s, z0.b, z6.b\n"
".inst 0x45069857 // smmla z23.s, z2.b, z6.b\n"
- "ld1b { z6.b }, p2/Z, [x9, #-7, MUL VL]\n"
- "add x25, x25, #0x10\n"
+ "ld1b { z6.b }, p2/Z, [x28, #-7, MUL VL]\n"
".inst 0x45079828 // smmla z8.s, z1.b, z7.b\n"
".inst 0x45079870 // smmla z16.s, z3.b, z7.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #-6, MUL VL]\n"
- "add x24, x24, #0x10\n"
+ "ld1b { z7.b }, p2/Z, [x28, #-6, MUL VL]\n"
".inst 0x4506982c // smmla z12.s, z1.b, z6.b\n"
".inst 0x45069874 // smmla z20.s, z3.b, z6.b\n"
- "ld1b { z6.b }, p2/Z, [x9, #-5, MUL VL]\n"
- "add x23, x23, #0x10\n"
+ "ld1b { z6.b }, p2/Z, [x28, #-5, MUL VL]\n"
".inst 0x45079829 // smmla z9.s, z1.b, z7.b\n"
".inst 0x45079871 // smmla z17.s, z3.b, z7.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #-4, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #-4, MUL VL]\n"
".inst 0x4506982d // smmla z13.s, z1.b, z6.b\n"
".inst 0x45069875 // smmla z21.s, z3.b, z6.b\n"
- "ld1b { z6.b }, p2/Z, [x9, #-3, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #-3, MUL VL]\n"
".inst 0x4507982a // smmla z10.s, z1.b, z7.b\n"
".inst 0x45079872 // smmla z18.s, z3.b, z7.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #-2, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #-2, MUL VL]\n"
".inst 0x4506982e // smmla z14.s, z1.b, z6.b\n"
".inst 0x45069876 // smmla z22.s, z3.b, z6.b\n"
- "ld1b { z6.b }, p2/Z, [x9, #-1, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #-1, MUL VL]\n"
".inst 0x4507982b // smmla z11.s, z1.b, z7.b\n"
".inst 0x45079873 // smmla z19.s, z3.b, z7.b\n"
".inst 0x4506982f // smmla z15.s, z1.b, z6.b\n"
".inst 0x45069877 // smmla z23.s, z3.b, z6.b\n"
"bgt 46b\n"
"47:" // Height 4: Multiply loop: Single iteration only
- "whilelt p0.b, XZR, x27\n"
- "ld1rqb { z1.b }, p0/Z, [x26]\n"
- "ld1rqb { z2.b }, p0/Z, [x25]\n"
+ "ld1b { z7.b }, p2/Z, [x28]\n"
+ "whilelt p0.b, XZR, x26\n"
+ "ld1b { z6.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "subs x26, x26, #0x8\n"
+ "ld1rqb { z1.b }, p0/Z, [x25]\n"
+ "ld1rqb { z2.b }, p0/Z, [x24]\n"
"trn1 z0.d, z1.d, z2.d\n"
- "ld1rqb { z3.b }, p0/Z, [x24]\n"
- "ld1rqb { z4.b }, p0/Z, [x23]\n"
+ "ld1rqb { z3.b }, p0/Z, [x23]\n"
"trn2 z1.d, z1.d, z2.d\n"
- "trn1 z2.d, z3.d, z4.d\n"
- "ld1b { z7.b }, p2/Z, [x9]\n"
- "ld1b { z6.b }, p2/Z, [x9, #1, MUL VL]\n"
+ "ld1rqb { z4.b }, p0/Z, [x22]\n"
".inst 0x45079808 // smmla z8.s, z0.b, z7.b\n"
- ".inst 0x45079850 // smmla z16.s, z2.b, z7.b\n"
".inst 0x4506980c // smmla z12.s, z0.b, z6.b\n"
+ "trn1 z2.d, z3.d, z4.d\n"
+ "trn2 z3.d, z3.d, z4.d\n"
+ ".inst 0x45079850 // smmla z16.s, z2.b, z7.b\n"
+ "ld1b { z7.b }, p2/Z, [x28, #2, MUL VL]\n"
".inst 0x45069854 // smmla z20.s, z2.b, z6.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1b { z6.b }, p2/Z, [x9, #3, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #3, MUL VL]\n"
".inst 0x45079809 // smmla z9.s, z0.b, z7.b\n"
".inst 0x45079851 // smmla z17.s, z2.b, z7.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #4, MUL VL]\n"
- "subs x27, x27, #0x8\n"
+ "ld1b { z7.b }, p2/Z, [x28, #4, MUL VL]\n"
".inst 0x4506980d // smmla z13.s, z0.b, z6.b\n"
".inst 0x45069855 // smmla z21.s, z2.b, z6.b\n"
- "ld1b { z6.b }, p2/Z, [x9, #5, MUL VL]\n"
- "trn2 z3.d, z3.d, z4.d\n"
+ "ld1b { z6.b }, p2/Z, [x28, #5, MUL VL]\n"
".inst 0x4507980a // smmla z10.s, z0.b, z7.b\n"
".inst 0x45079852 // smmla z18.s, z2.b, z7.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #6, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #6, MUL VL]\n"
".inst 0x4506980e // smmla z14.s, z0.b, z6.b\n"
".inst 0x45069856 // smmla z22.s, z2.b, z6.b\n"
- "ld1b { z6.b }, p2/Z, [x9, #7, MUL VL]\n"
- "addvl x9, x9, #8\n"
+ "ld1b { z6.b }, p2/Z, [x28, #7, MUL VL]\n"
+ "addvl x28, x28, #8\n"
".inst 0x4507980b // smmla z11.s, z0.b, z7.b\n"
".inst 0x45079853 // smmla z19.s, z2.b, z7.b\n"
".inst 0x4506980f // smmla z15.s, z0.b, z6.b\n"
".inst 0x45069857 // smmla z23.s, z2.b, z6.b\n"
"ble 48f\n"
- "ld1b { z7.b }, p2/Z, [x9]\n"
- "ld1b { z6.b }, p2/Z, [x9, #1, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28]\n"
".inst 0x45079828 // smmla z8.s, z1.b, z7.b\n"
+ "ld1b { z6.b }, p2/Z, [x28, #1, MUL VL]\n"
".inst 0x45079870 // smmla z16.s, z3.b, z7.b\n"
+ "ld1b { z7.b }, p2/Z, [x28, #2, MUL VL]\n"
".inst 0x4506982c // smmla z12.s, z1.b, z6.b\n"
".inst 0x45069874 // smmla z20.s, z3.b, z6.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1b { z6.b }, p2/Z, [x9, #3, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #3, MUL VL]\n"
".inst 0x45079829 // smmla z9.s, z1.b, z7.b\n"
".inst 0x45079871 // smmla z17.s, z3.b, z7.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #4, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #4, MUL VL]\n"
".inst 0x4506982d // smmla z13.s, z1.b, z6.b\n"
".inst 0x45069875 // smmla z21.s, z3.b, z6.b\n"
- "ld1b { z6.b }, p2/Z, [x9, #5, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #5, MUL VL]\n"
".inst 0x4507982a // smmla z10.s, z1.b, z7.b\n"
".inst 0x45079872 // smmla z18.s, z3.b, z7.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #6, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #6, MUL VL]\n"
".inst 0x4506982e // smmla z14.s, z1.b, z6.b\n"
".inst 0x45069876 // smmla z22.s, z3.b, z6.b\n"
- "ld1b { z6.b }, p2/Z, [x9, #7, MUL VL]\n"
- "addvl x9, x9, #8\n"
+ "ld1b { z6.b }, p2/Z, [x28, #7, MUL VL]\n"
+ "addvl x28, x28, #8\n"
".inst 0x4507982b // smmla z11.s, z1.b, z7.b\n"
".inst 0x45079873 // smmla z19.s, z3.b, z7.b\n"
".inst 0x4506982f // smmla z15.s, z1.b, z6.b\n"
".inst 0x45069877 // smmla z23.s, z3.b, z6.b\n"
"48:" // Height 4: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 43b\n"
"uzp1 z7.d, z8.d, z12.d\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp2 z8.d, z8.d, z12.d\n"
- "ld1w { z0.s }, p2/Z, [x14]\n"
+ "ld1w { z0.s }, p2/Z, [x11]\n"
+ "add x23, x9, x19\n"
"uzp1 z12.d, z9.d, z13.d\n"
+ "ld1w { z1.s }, p2/Z, [x11, #1, MUL VL]\n"
"uzp2 z9.d, z9.d, z13.d\n"
- "ld1w { z1.s }, p2/Z, [x14, #1, MUL VL]\n"
- "ld1w { z2.s }, p2/Z, [x14, #2, MUL VL]\n"
+ "ld1w { z2.s }, p2/Z, [x11, #2, MUL VL]\n"
+ "add x22, x23, x19\n"
"uzp1 z13.d, z10.d, z14.d\n"
+ "ld1w { z3.s }, p2/Z, [x11, #3, MUL VL]\n"
+ "add x21, x22, x19\n"
"uzp2 z10.d, z10.d, z14.d\n"
- "ld1w { z3.s }, p2/Z, [x14, #3, MUL VL]\n"
- "add x24, x11, x20\n"
+ "addvl x11, x11, #4\n"
"uzp1 z14.d, z11.d, z15.d\n"
"uzp2 z11.d, z11.d, z15.d\n"
- "add x23, x24, x20\n"
- "add x22, x23, x20\n"
"uzp1 z15.d, z16.d, z20.d\n"
"uzp2 z16.d, z16.d, z20.d\n"
- "addvl x14, x14, #4\n"
"uzp1 z20.d, z17.d, z21.d\n"
"uzp2 z17.d, z17.d, z21.d\n"
"uzp1 z21.d, z18.d, z22.d\n"
@@ -1169,20 +1169,20 @@ void sve_hybrid_s8qs_mmla_6x4VL (
"ld1w { z2.s }, p2/Z, [x12, #2, MUL VL]\n"
"ld1w { z6.s }, p2/Z, [x13, #2, MUL VL]\n"
"ld1w { z3.s }, p2/Z, [x12, #3, MUL VL]\n"
- "ld1w { z7.s }, p2/Z, [x13, #3, MUL VL]\n"
"addvl x12, x12, #4\n"
+ "ld1w { z7.s }, p2/Z, [x13, #3, MUL VL]\n"
"addvl x13, x13, #4\n"
"b 50f\n"
"49:" // Height 4: per layer parameters
- "add x26, %x[qp], %[per_layer_right_shift]\n"
- "add x25, %x[qp], %[per_layer_mul]\n"
- "ld1rw { z0.s }, p2/Z, [x26]\n"
- "ld1rw { z4.s }, p2/Z, [x25]\n"
+ "add x24, %x[qp], %[per_layer_right_shift]\n"
+ "ld1rw { z0.s }, p2/Z, [x24]\n"
"mov z1.d, z0.d\n"
- "mov z5.d, z4.d\n"
+ "add x24, %x[qp], %[per_layer_mul]\n"
+ "ld1rw { z4.s }, p2/Z, [x24]\n"
"mov z2.d, z0.d\n"
- "mov z6.d, z4.d\n"
"mov z3.d, z0.d\n"
+ "mov z5.d, z4.d\n"
+ "mov z6.d, z4.d\n"
"mov z7.d, z4.d\n"
"50:" // Height 4: parameters loaded
".inst 0x04a476f7 // sqrdmulh z23.s, z23.s, z4.s\n"
@@ -1205,156 +1205,156 @@ void sve_hybrid_s8qs_mmla_6x4VL (
"and z4.d, z23.d, z0.d\n"
"and z5.d, z12.d, z1.d\n"
"and z6.d, z13.d, z2.d\n"
- "and z7.d, z14.d, z3.d\n"
"asr z4.s, z4.s, #0x1f\n"
"asr z5.s, z5.s, #0x1f\n"
"asr z6.s, z6.s, #0x1f\n"
- "asr z7.s, z7.s, #0x1f\n"
"sqadd z23.s, z23.s, z4.s\n"
"sqadd z12.s, z12.s, z5.s\n"
"sqadd z13.s, z13.s, z6.s\n"
- "sqadd z14.s, z14.s, z7.s\n"
+ "and z7.d, z14.d, z3.d\n"
"and z4.d, z8.d, z0.d\n"
"and z5.d, z9.d, z1.d\n"
- "and z6.d, z10.d, z2.d\n"
- "and z7.d, z11.d, z3.d\n"
+ "asr z7.s, z7.s, #0x1f\n"
"asr z4.s, z4.s, #0x1f\n"
"asr z5.s, z5.s, #0x1f\n"
- "asr z6.s, z6.s, #0x1f\n"
- "asr z7.s, z7.s, #0x1f\n"
+ "sqadd z14.s, z14.s, z7.s\n"
"sqadd z8.s, z8.s, z4.s\n"
"sqadd z9.s, z9.s, z5.s\n"
+ "and z6.d, z10.d, z2.d\n"
+ "and z7.d, z11.d, z3.d\n"
+ "and z4.d, z15.d, z0.d\n"
+ "asr z6.s, z6.s, #0x1f\n"
+ "asr z7.s, z7.s, #0x1f\n"
+ "asr z4.s, z4.s, #0x1f\n"
"sqadd z10.s, z10.s, z6.s\n"
"sqadd z11.s, z11.s, z7.s\n"
- "and z4.d, z15.d, z0.d\n"
+ "sqadd z15.s, z15.s, z4.s\n"
"and z5.d, z20.d, z1.d\n"
"and z6.d, z21.d, z2.d\n"
"and z7.d, z22.d, z3.d\n"
- "asr z4.s, z4.s, #0x1f\n"
"asr z5.s, z5.s, #0x1f\n"
"asr z6.s, z6.s, #0x1f\n"
"asr z7.s, z7.s, #0x1f\n"
- "sqadd z15.s, z15.s, z4.s\n"
"sqadd z20.s, z20.s, z5.s\n"
"sqadd z21.s, z21.s, z6.s\n"
"sqadd z22.s, z22.s, z7.s\n"
"and z4.d, z16.d, z0.d\n"
"and z5.d, z17.d, z1.d\n"
"and z6.d, z18.d, z2.d\n"
- "and z7.d, z19.d, z3.d\n"
"asr z4.s, z4.s, #0x1f\n"
"asr z5.s, z5.s, #0x1f\n"
"asr z6.s, z6.s, #0x1f\n"
- "asr z7.s, z7.s, #0x1f\n"
"sqadd z16.s, z16.s, z4.s\n"
"sqadd z17.s, z17.s, z5.s\n"
"sqadd z18.s, z18.s, z6.s\n"
+ "and z7.d, z19.d, z3.d\n"
+ "asr z7.s, z7.s, #0x1f\n"
"sqadd z19.s, z19.s, z7.s\n"
"51:" // Height 4: no shift correction
- "add x25, %x[qp], %[c_offset]\n"
- "ld1rw { z4.s }, p2/Z, [x25]\n"
".inst 0x44828817 // srshl z23.s, p2/M, z23.s, z0.s\n"
- "add z23.s, z23.s, z4.s\n"
+ "add x24, %x[qp], %[c_offset]\n"
+ "ld1rw { z4.s }, p2/Z, [x24]\n"
".inst 0x4482882c // srshl z12.s, p2/M, z12.s, z1.s\n"
+ "add x24, %x[qp], %[minval]\n"
".inst 0x4482884d // srshl z13.s, p2/M, z13.s, z2.s\n"
- "add z12.s, z12.s, z4.s\n"
- "add z13.s, z13.s, z4.s\n"
+ "ld1rw { z5.s }, p2/Z, [x24]\n"
+ "add x24, %x[qp], %[maxval]\n"
".inst 0x4482886e // srshl z14.s, p2/M, z14.s, z3.s\n"
+ "ld1rw { z6.s }, p2/Z, [x24]\n"
".inst 0x44828808 // srshl z8.s, p2/M, z8.s, z0.s\n"
+ "add z23.s, z23.s, z4.s\n"
+ "add z12.s, z12.s, z4.s\n"
+ "add z13.s, z13.s, z4.s\n"
"add z14.s, z14.s, z4.s\n"
"add z8.s, z8.s, z4.s\n"
- ".inst 0x44828829 // srshl z9.s, p2/M, z9.s, z1.s\n"
- ".inst 0x4482884a // srshl z10.s, p2/M, z10.s, z2.s\n"
- "add z9.s, z9.s, z4.s\n"
- "add z10.s, z10.s, z4.s\n"
- ".inst 0x4482886b // srshl z11.s, p2/M, z11.s, z3.s\n"
- ".inst 0x4482880f // srshl z15.s, p2/M, z15.s, z0.s\n"
- "add z11.s, z11.s, z4.s\n"
- "add z15.s, z15.s, z4.s\n"
- ".inst 0x44828834 // srshl z20.s, p2/M, z20.s, z1.s\n"
- ".inst 0x44828855 // srshl z21.s, p2/M, z21.s, z2.s\n"
- "add z20.s, z20.s, z4.s\n"
- "add z21.s, z21.s, z4.s\n"
- ".inst 0x44828876 // srshl z22.s, p2/M, z22.s, z3.s\n"
- ".inst 0x44828810 // srshl z16.s, p2/M, z16.s, z0.s\n"
- "add z22.s, z22.s, z4.s\n"
- "add z16.s, z16.s, z4.s\n"
- ".inst 0x44828831 // srshl z17.s, p2/M, z17.s, z1.s\n"
- ".inst 0x44828852 // srshl z18.s, p2/M, z18.s, z2.s\n"
- "add z17.s, z17.s, z4.s\n"
- "add z18.s, z18.s, z4.s\n"
- ".inst 0x44828873 // srshl z19.s, p2/M, z19.s, z3.s\n"
- "add x25, %x[qp], %[maxval]\n"
- "ld1rw { z6.s }, p2/Z, [x25]\n"
- "add z19.s, z19.s, z4.s\n"
- "add x25, %x[qp], %[minval]\n"
- "ld1rw { z5.s }, p2/Z, [x25]\n"
"smin z23.s, p2/M, z23.s, z6.s\n"
"smin z12.s, p2/M, z12.s, z6.s\n"
"smin z13.s, p2/M, z13.s, z6.s\n"
"smin z14.s, p2/M, z14.s, z6.s\n"
- "smin z8.s, p2/M, z8.s, z6.s\n"
- "smin z9.s, p2/M, z9.s, z6.s\n"
- "smin z10.s, p2/M, z10.s, z6.s\n"
- "smin z11.s, p2/M, z11.s, z6.s\n"
- "smin z15.s, p2/M, z15.s, z6.s\n"
- "smin z20.s, p2/M, z20.s, z6.s\n"
- "smin z21.s, p2/M, z21.s, z6.s\n"
- "smin z22.s, p2/M, z22.s, z6.s\n"
- "smin z16.s, p2/M, z16.s, z6.s\n"
- "smin z17.s, p2/M, z17.s, z6.s\n"
- "smin z18.s, p2/M, z18.s, z6.s\n"
- "smin z19.s, p2/M, z19.s, z6.s\n"
"smax z23.s, p2/M, z23.s, z5.s\n"
"smax z12.s, p2/M, z12.s, z5.s\n"
"smax z13.s, p2/M, z13.s, z5.s\n"
- "uzp1 z23.h, z23.h, z12.h\n"
"smax z14.s, p2/M, z14.s, z5.s\n"
- "smax z8.s, p2/M, z8.s, z5.s\n"
+ "smin z8.s, p2/M, z8.s, z6.s\n"
+ "uzp1 z23.h, z23.h, z12.h\n"
+ ".inst 0x44828829 // srshl z9.s, p2/M, z9.s, z1.s\n"
"uzp1 z12.h, z13.h, z14.h\n"
+ "smax z8.s, p2/M, z8.s, z5.s\n"
"uzp1 z23.b, z23.b, z12.b\n"
+ "st1b { z23.b }, p1, [x9]\n"
+ "add z9.s, z9.s, z4.s\n"
+ "addvl x9, x9, #1\n"
+ ".inst 0x4482884a // srshl z10.s, p2/M, z10.s, z2.s\n"
+ ".inst 0x4482886b // srshl z11.s, p2/M, z11.s, z3.s\n"
+ ".inst 0x4482880f // srshl z15.s, p2/M, z15.s, z0.s\n"
+ "smin z9.s, p2/M, z9.s, z6.s\n"
+ ".inst 0x44828834 // srshl z20.s, p2/M, z20.s, z1.s\n"
+ "add z10.s, z10.s, z4.s\n"
+ "add z11.s, z11.s, z4.s\n"
+ "add z15.s, z15.s, z4.s\n"
+ "add z20.s, z20.s, z4.s\n"
"smax z9.s, p2/M, z9.s, z5.s\n"
- "smax z10.s, p2/M, z10.s, z5.s\n"
+ "smin z10.s, p2/M, z10.s, z6.s\n"
+ "smin z11.s, p2/M, z11.s, z6.s\n"
+ "smin z15.s, p2/M, z15.s, z6.s\n"
"uzp1 z8.h, z8.h, z9.h\n"
- "st1b { z23.b }, p1, [x11]\n"
+ "smax z10.s, p2/M, z10.s, z5.s\n"
"smax z11.s, p2/M, z11.s, z5.s\n"
"smax z15.s, p2/M, z15.s, z5.s\n"
+ "smin z20.s, p2/M, z20.s, z6.s\n"
+ ".inst 0x44828855 // srshl z21.s, p2/M, z21.s, z2.s\n"
"uzp1 z9.h, z10.h, z11.h\n"
+ ".inst 0x44828876 // srshl z22.s, p2/M, z22.s, z3.s\n"
"uzp1 z8.b, z8.b, z9.b\n"
+ "st1b { z8.b }, p1, [x23]\n"
+ "add z21.s, z21.s, z4.s\n"
"smax z20.s, p2/M, z20.s, z5.s\n"
- "smax z21.s, p2/M, z21.s, z5.s\n"
+ "add z22.s, z22.s, z4.s\n"
+ ".inst 0x44828810 // srshl z16.s, p2/M, z16.s, z0.s\n"
+ "smin z21.s, p2/M, z21.s, z6.s\n"
"uzp1 z15.h, z15.h, z20.h\n"
- "st1b { z8.b }, p1, [x24]\n"
+ "smin z22.s, p2/M, z22.s, z6.s\n"
+ "add z16.s, z16.s, z4.s\n"
+ "smax z21.s, p2/M, z21.s, z5.s\n"
+ ".inst 0x44828831 // srshl z17.s, p2/M, z17.s, z1.s\n"
"smax z22.s, p2/M, z22.s, z5.s\n"
- "smax z16.s, p2/M, z16.s, z5.s\n"
+ "smin z16.s, p2/M, z16.s, z6.s\n"
+ ".inst 0x44828852 // srshl z18.s, p2/M, z18.s, z2.s\n"
+ "add z17.s, z17.s, z4.s\n"
"uzp1 z20.h, z21.h, z22.h\n"
+ "smax z16.s, p2/M, z16.s, z5.s\n"
+ "add z18.s, z18.s, z4.s\n"
"uzp1 z15.b, z15.b, z20.b\n"
+ "st1b { z15.b }, p1, [x22]\n"
+ "smin z17.s, p2/M, z17.s, z6.s\n"
+ "smin z18.s, p2/M, z18.s, z6.s\n"
+ ".inst 0x44828873 // srshl z19.s, p2/M, z19.s, z3.s\n"
"smax z17.s, p2/M, z17.s, z5.s\n"
"smax z18.s, p2/M, z18.s, z5.s\n"
+ "add z19.s, z19.s, z4.s\n"
"uzp1 z16.h, z16.h, z17.h\n"
- "st1b { z15.b }, p1, [x23]\n"
+ "smin z19.s, p2/M, z19.s, z6.s\n"
"smax z19.s, p2/M, z19.s, z5.s\n"
"uzp1 z17.h, z18.h, z19.h\n"
"uzp1 z16.b, z16.b, z17.b\n"
- "st1b { z16.b }, p1, [x22]\n"
- "addvl x11, x11, #1\n"
+ "st1b { z16.b }, p1, [x21]\n"
"52:" // Height 4: Writeback done
"decw x10, ALL, MUL #4\n"
"cmp x10, XZR\n"
"bgt 41b\n"
"b 80f\n"
"53:" // Height 5
- "mov x14, %x[col_bias]\n"
"ldr x13, [%x[args_ptr], %[offsetof_multiplier_ptr]]\n"
+ "mov x11, %x[col_bias]\n"
"ldr x12, [%x[args_ptr], %[offsetof_shift_ptr]]\n"
- "mov x11, %x[output_ptr]\n"
+ "mov x9, %x[output_ptr]\n"
"ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"54:" // Height 5: Column loop
- "mov x20, #0x0\n"
- "whilelt p1.b, x20, x10\n"
"mov z8.s, #0x0\n"
+ "mov x19, #0x0\n"
"mov z9.s, #0x0\n"
+ "whilelt p1.b, x19, x10\n"
"mov z10.s, #0x0\n"
"mov z11.s, #0x0\n"
"mov z12.s, #0x0\n"
@@ -1378,115 +1378,115 @@ void sve_hybrid_s8qs_mmla_6x4VL (
"mov z30.s, #0x0\n"
"mov z31.s, #0x0\n"
"55:" // Height 5: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"56:" // Height 5: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 57f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "ldr x22, [x21, #0x20]\n"
- "cbnz x28, 58f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20\n"
- "add x25, x25, x20\n"
- "add x24, x24, x20\n"
- "add x23, x23, x20\n"
- "add x22, x22, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "ldr x21, [x20, #0x20]\n"
+ "cbnz x27, 58f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
+ "add x24, x24, x19\n"
+ "add x23, x23, x19\n"
+ "add x22, x22, x19\n"
+ "add x21, x21, x19\n"
"b 58f\n"
"57:" // Height 5: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20\n"
- "add x24, x25, x20\n"
- "add x23, x24, x20\n"
- "add x22, x23, x20\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19\n"
+ "add x23, x24, x19\n"
+ "add x22, x23, x19\n"
+ "add x21, x22, x19\n"
"58:" // Height 5: input setup done
- "cmp x27, #0x10\n"
+ "cmp x26, #0x10\n"
"ble 60f\n"
"59:" // Height 5: Multiply loop: Main loop head
- "whilelt p0.b, XZR, x27\n"
- "ld1rqb { z1.b }, p0/Z, [x26]\n"
- "ld1rqb { z2.b }, p0/Z, [x25]\n"
- "ld1rqb { z3.b }, p0/Z, [x24]\n"
- "ld1rqb { z4.b }, p0/Z, [x23]\n"
+ "ld1b { z7.b }, p2/Z, [x28]\n"
+ "whilelt p0.b, XZR, x26\n"
+ "ld1rqb { z1.b }, p0/Z, [x25]\n"
+ "ld1rqb { z2.b }, p0/Z, [x24]\n"
"trn1 z0.d, z1.d, z2.d\n"
+ "ld1rqb { z3.b }, p0/Z, [x23]\n"
+ "sub x26, x26, #0x10\n"
"trn2 z1.d, z1.d, z2.d\n"
- "ld1rqb { z5.b }, p0/Z, [x22]\n"
+ "ld1rqb { z4.b }, p0/Z, [x22]\n"
+ "cmp x26, #0x10\n"
+ ".inst 0x45079808 // smmla z8.s, z0.b, z7.b\n"
+ "ld1rqb { z5.b }, p0/Z, [x21]\n"
+ "add x25, x25, #0x10\n"
"trn1 z2.d, z3.d, z4.d\n"
+ "add x24, x24, #0x10\n"
"trn2 z3.d, z3.d, z4.d\n"
- "ld1b { z7.b }, p2/Z, [x9]\n"
+ "add x23, x23, #0x10\n"
"trn1 z4.d, z5.d, z6.d\n"
+ "add x22, x22, #0x10\n"
"trn2 z5.d, z5.d, z6.d\n"
- "ld1b { z6.b }, p2/Z, [x9, #1, MUL VL]\n"
- ".inst 0x45079808 // smmla z8.s, z0.b, z7.b\n"
+ "ld1b { z6.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "add x21, x21, #0x10\n"
".inst 0x45079850 // smmla z16.s, z2.b, z7.b\n"
".inst 0x45079898 // smmla z24.s, z4.b, z7.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #2, MUL VL]\n"
- "sub x27, x27, #0x10\n"
+ "ld1b { z7.b }, p2/Z, [x28, #2, MUL VL]\n"
".inst 0x4506980c // smmla z12.s, z0.b, z6.b\n"
".inst 0x45069854 // smmla z20.s, z2.b, z6.b\n"
- "cmp x27, #0x10\n"
- "add x26, x26, #0x10\n"
".inst 0x4506989c // smmla z28.s, z4.b, z6.b\n"
- "ld1b { z6.b }, p2/Z, [x9, #3, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #3, MUL VL]\n"
".inst 0x45079809 // smmla z9.s, z0.b, z7.b\n"
- "add x25, x25, #0x10\n"
".inst 0x45079851 // smmla z17.s, z2.b, z7.b\n"
".inst 0x45079899 // smmla z25.s, z4.b, z7.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #4, MUL VL]\n"
- "add x24, x24, #0x10\n"
+ "ld1b { z7.b }, p2/Z, [x28, #4, MUL VL]\n"
".inst 0x4506980d // smmla z13.s, z0.b, z6.b\n"
".inst 0x45069855 // smmla z21.s, z2.b, z6.b\n"
- "add x23, x23, #0x10\n"
- "add x22, x22, #0x10\n"
".inst 0x4506989d // smmla z29.s, z4.b, z6.b\n"
- "ld1b { z6.b }, p2/Z, [x9, #5, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #5, MUL VL]\n"
".inst 0x4507980a // smmla z10.s, z0.b, z7.b\n"
".inst 0x45079852 // smmla z18.s, z2.b, z7.b\n"
".inst 0x4507989a // smmla z26.s, z4.b, z7.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #6, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #6, MUL VL]\n"
".inst 0x4506980e // smmla z14.s, z0.b, z6.b\n"
".inst 0x45069856 // smmla z22.s, z2.b, z6.b\n"
".inst 0x4506989e // smmla z30.s, z4.b, z6.b\n"
- "ld1b { z6.b }, p2/Z, [x9, #7, MUL VL]\n"
- "addvl x9, x9, #16\n"
+ "ld1b { z6.b }, p2/Z, [x28, #7, MUL VL]\n"
+ "addvl x28, x28, #16\n"
".inst 0x4507980b // smmla z11.s, z0.b, z7.b\n"
".inst 0x45079853 // smmla z19.s, z2.b, z7.b\n"
".inst 0x4507989b // smmla z27.s, z4.b, z7.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #-8, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #-8, MUL VL]\n"
".inst 0x4506980f // smmla z15.s, z0.b, z6.b\n"
".inst 0x45069857 // smmla z23.s, z2.b, z6.b\n"
".inst 0x4506989f // smmla z31.s, z4.b, z6.b\n"
- "ld1b { z6.b }, p2/Z, [x9, #-7, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #-7, MUL VL]\n"
".inst 0x45079828 // smmla z8.s, z1.b, z7.b\n"
".inst 0x45079870 // smmla z16.s, z3.b, z7.b\n"
".inst 0x450798b8 // smmla z24.s, z5.b, z7.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #-6, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #-6, MUL VL]\n"
".inst 0x4506982c // smmla z12.s, z1.b, z6.b\n"
".inst 0x45069874 // smmla z20.s, z3.b, z6.b\n"
".inst 0x450698bc // smmla z28.s, z5.b, z6.b\n"
- "ld1b { z6.b }, p2/Z, [x9, #-5, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #-5, MUL VL]\n"
".inst 0x45079829 // smmla z9.s, z1.b, z7.b\n"
".inst 0x45079871 // smmla z17.s, z3.b, z7.b\n"
".inst 0x450798b9 // smmla z25.s, z5.b, z7.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #-4, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #-4, MUL VL]\n"
".inst 0x4506982d // smmla z13.s, z1.b, z6.b\n"
".inst 0x45069875 // smmla z21.s, z3.b, z6.b\n"
".inst 0x450698bd // smmla z29.s, z5.b, z6.b\n"
- "ld1b { z6.b }, p2/Z, [x9, #-3, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #-3, MUL VL]\n"
".inst 0x4507982a // smmla z10.s, z1.b, z7.b\n"
".inst 0x45079872 // smmla z18.s, z3.b, z7.b\n"
".inst 0x450798ba // smmla z26.s, z5.b, z7.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #-2, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #-2, MUL VL]\n"
".inst 0x4506982e // smmla z14.s, z1.b, z6.b\n"
".inst 0x45069876 // smmla z22.s, z3.b, z6.b\n"
".inst 0x450698be // smmla z30.s, z5.b, z6.b\n"
- "ld1b { z6.b }, p2/Z, [x9, #-1, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #-1, MUL VL]\n"
".inst 0x4507982b // smmla z11.s, z1.b, z7.b\n"
".inst 0x45079873 // smmla z19.s, z3.b, z7.b\n"
".inst 0x450798bb // smmla z27.s, z5.b, z7.b\n"
@@ -1495,80 +1495,80 @@ void sve_hybrid_s8qs_mmla_6x4VL (
".inst 0x450698bf // smmla z31.s, z5.b, z6.b\n"
"bgt 59b\n"
"60:" // Height 5: Multiply loop: Single iteration only
- "whilelt p0.b, XZR, x27\n"
- "ld1rqb { z1.b }, p0/Z, [x26]\n"
- "ld1rqb { z2.b }, p0/Z, [x25]\n"
- "ld1rqb { z3.b }, p0/Z, [x24]\n"
- "ld1rqb { z4.b }, p0/Z, [x23]\n"
+ "ld1b { z7.b }, p2/Z, [x28]\n"
+ "whilelt p0.b, XZR, x26\n"
+ "ld1rqb { z1.b }, p0/Z, [x25]\n"
+ "ld1rqb { z2.b }, p0/Z, [x24]\n"
"trn1 z0.d, z1.d, z2.d\n"
+ "ld1rqb { z3.b }, p0/Z, [x23]\n"
+ "subs x26, x26, #0x8\n"
"trn2 z1.d, z1.d, z2.d\n"
- "ld1rqb { z5.b }, p0/Z, [x22]\n"
+ "ld1rqb { z4.b }, p0/Z, [x22]\n"
+ "ld1rqb { z5.b }, p0/Z, [x21]\n"
+ ".inst 0x45079808 // smmla z8.s, z0.b, z7.b\n"
"trn1 z2.d, z3.d, z4.d\n"
"trn2 z3.d, z3.d, z4.d\n"
- "ld1b { z7.b }, p2/Z, [x9]\n"
"trn1 z4.d, z5.d, z6.d\n"
"trn2 z5.d, z5.d, z6.d\n"
- "ld1b { z6.b }, p2/Z, [x9, #1, MUL VL]\n"
- ".inst 0x45079808 // smmla z8.s, z0.b, z7.b\n"
+ "ld1b { z6.b }, p2/Z, [x28, #1, MUL VL]\n"
".inst 0x45079850 // smmla z16.s, z2.b, z7.b\n"
".inst 0x45079898 // smmla z24.s, z4.b, z7.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #2, MUL VL]\n"
- "subs x27, x27, #0x8\n"
+ "ld1b { z7.b }, p2/Z, [x28, #2, MUL VL]\n"
".inst 0x4506980c // smmla z12.s, z0.b, z6.b\n"
".inst 0x45069854 // smmla z20.s, z2.b, z6.b\n"
".inst 0x4506989c // smmla z28.s, z4.b, z6.b\n"
- "ld1b { z6.b }, p2/Z, [x9, #3, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #3, MUL VL]\n"
".inst 0x45079809 // smmla z9.s, z0.b, z7.b\n"
".inst 0x45079851 // smmla z17.s, z2.b, z7.b\n"
".inst 0x45079899 // smmla z25.s, z4.b, z7.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #4, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #4, MUL VL]\n"
".inst 0x4506980d // smmla z13.s, z0.b, z6.b\n"
".inst 0x45069855 // smmla z21.s, z2.b, z6.b\n"
".inst 0x4506989d // smmla z29.s, z4.b, z6.b\n"
- "ld1b { z6.b }, p2/Z, [x9, #5, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #5, MUL VL]\n"
".inst 0x4507980a // smmla z10.s, z0.b, z7.b\n"
".inst 0x45079852 // smmla z18.s, z2.b, z7.b\n"
".inst 0x4507989a // smmla z26.s, z4.b, z7.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #6, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #6, MUL VL]\n"
".inst 0x4506980e // smmla z14.s, z0.b, z6.b\n"
".inst 0x45069856 // smmla z22.s, z2.b, z6.b\n"
".inst 0x4506989e // smmla z30.s, z4.b, z6.b\n"
- "ld1b { z6.b }, p2/Z, [x9, #7, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #7, MUL VL]\n"
+ "addvl x28, x28, #8\n"
".inst 0x4507980b // smmla z11.s, z0.b, z7.b\n"
- "addvl x9, x9, #8\n"
".inst 0x45079853 // smmla z19.s, z2.b, z7.b\n"
".inst 0x4507989b // smmla z27.s, z4.b, z7.b\n"
".inst 0x4506980f // smmla z15.s, z0.b, z6.b\n"
".inst 0x45069857 // smmla z23.s, z2.b, z6.b\n"
".inst 0x4506989f // smmla z31.s, z4.b, z6.b\n"
"ble 61f\n"
- "ld1b { z7.b }, p2/Z, [x9]\n"
- "ld1b { z6.b }, p2/Z, [x9, #1, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28]\n"
".inst 0x45079828 // smmla z8.s, z1.b, z7.b\n"
+ "ld1b { z6.b }, p2/Z, [x28, #1, MUL VL]\n"
".inst 0x45079870 // smmla z16.s, z3.b, z7.b\n"
".inst 0x450798b8 // smmla z24.s, z5.b, z7.b\n"
+ "ld1b { z7.b }, p2/Z, [x28, #2, MUL VL]\n"
".inst 0x4506982c // smmla z12.s, z1.b, z6.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #2, MUL VL]\n"
".inst 0x45069874 // smmla z20.s, z3.b, z6.b\n"
".inst 0x450698bc // smmla z28.s, z5.b, z6.b\n"
- "ld1b { z6.b }, p2/Z, [x9, #3, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #3, MUL VL]\n"
".inst 0x45079829 // smmla z9.s, z1.b, z7.b\n"
".inst 0x45079871 // smmla z17.s, z3.b, z7.b\n"
".inst 0x450798b9 // smmla z25.s, z5.b, z7.b\n"
+ "ld1b { z7.b }, p2/Z, [x28, #4, MUL VL]\n"
".inst 0x4506982d // smmla z13.s, z1.b, z6.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #4, MUL VL]\n"
".inst 0x45069875 // smmla z21.s, z3.b, z6.b\n"
".inst 0x450698bd // smmla z29.s, z5.b, z6.b\n"
- "ld1b { z6.b }, p2/Z, [x9, #5, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #5, MUL VL]\n"
".inst 0x4507982a // smmla z10.s, z1.b, z7.b\n"
".inst 0x45079872 // smmla z18.s, z3.b, z7.b\n"
".inst 0x450798ba // smmla z26.s, z5.b, z7.b\n"
+ "ld1b { z7.b }, p2/Z, [x28, #6, MUL VL]\n"
".inst 0x4506982e // smmla z14.s, z1.b, z6.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #6, MUL VL]\n"
".inst 0x45069876 // smmla z22.s, z3.b, z6.b\n"
".inst 0x450698be // smmla z30.s, z5.b, z6.b\n"
- "ld1b { z6.b }, p2/Z, [x9, #7, MUL VL]\n"
- "addvl x9, x9, #8\n"
+ "ld1b { z6.b }, p2/Z, [x28, #7, MUL VL]\n"
+ "addvl x28, x28, #8\n"
".inst 0x4507982b // smmla z11.s, z1.b, z7.b\n"
".inst 0x45079873 // smmla z19.s, z3.b, z7.b\n"
".inst 0x450798bb // smmla z27.s, z5.b, z7.b\n"
@@ -1576,30 +1576,30 @@ void sve_hybrid_s8qs_mmla_6x4VL (
".inst 0x45069877 // smmla z23.s, z3.b, z6.b\n"
".inst 0x450698bf // smmla z31.s, z5.b, z6.b\n"
"61:" // Height 5: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 56b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp1 z7.d, z8.d, z12.d\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp2 z8.d, z8.d, z12.d\n"
- "add x24, x11, x20\n"
+ "ld1w { z0.s }, p2/Z, [x11]\n"
+ "add x23, x9, x19\n"
"uzp1 z12.d, z9.d, z13.d\n"
+ "ld1w { z1.s }, p2/Z, [x11, #1, MUL VL]\n"
"uzp2 z9.d, z9.d, z13.d\n"
- "ld1w { z0.s }, p2/Z, [x14]\n"
- "ld1w { z1.s }, p2/Z, [x14, #1, MUL VL]\n"
+ "ld1w { z2.s }, p2/Z, [x11, #2, MUL VL]\n"
+ "add x22, x23, x19\n"
"uzp1 z13.d, z10.d, z14.d\n"
+ "ld1w { z3.s }, p2/Z, [x11, #3, MUL VL]\n"
+ "add x21, x22, x19\n"
"uzp2 z10.d, z10.d, z14.d\n"
- "ld1w { z2.s }, p2/Z, [x14, #2, MUL VL]\n"
- "ld1w { z3.s }, p2/Z, [x14, #3, MUL VL]\n"
+ "add x20, x21, x19\n"
"uzp1 z14.d, z11.d, z15.d\n"
+ "addvl x11, x11, #4\n"
"uzp2 z11.d, z11.d, z15.d\n"
- "add x23, x24, x20\n"
- "add x22, x23, x20\n"
"uzp1 z15.d, z16.d, z20.d\n"
"uzp2 z16.d, z16.d, z20.d\n"
- "add x21, x22, x20\n"
- "addvl x14, x14, #4\n"
"uzp1 z20.d, z17.d, z21.d\n"
"uzp2 z17.d, z17.d, z21.d\n"
"uzp1 z21.d, z18.d, z22.d\n"
@@ -1639,20 +1639,20 @@ void sve_hybrid_s8qs_mmla_6x4VL (
"ld1w { z2.s }, p2/Z, [x12, #2, MUL VL]\n"
"ld1w { z6.s }, p2/Z, [x13, #2, MUL VL]\n"
"ld1w { z3.s }, p2/Z, [x12, #3, MUL VL]\n"
- "ld1w { z7.s }, p2/Z, [x13, #3, MUL VL]\n"
"addvl x12, x12, #4\n"
+ "ld1w { z7.s }, p2/Z, [x13, #3, MUL VL]\n"
"addvl x13, x13, #4\n"
"b 63f\n"
"62:" // Height 5: per layer parameters
- "add x26, %x[qp], %[per_layer_right_shift]\n"
- "add x25, %x[qp], %[per_layer_mul]\n"
- "ld1rw { z0.s }, p2/Z, [x26]\n"
- "ld1rw { z4.s }, p2/Z, [x25]\n"
+ "add x24, %x[qp], %[per_layer_right_shift]\n"
+ "ld1rw { z0.s }, p2/Z, [x24]\n"
"mov z1.d, z0.d\n"
- "mov z5.d, z4.d\n"
+ "add x24, %x[qp], %[per_layer_mul]\n"
+ "ld1rw { z4.s }, p2/Z, [x24]\n"
"mov z2.d, z0.d\n"
- "mov z6.d, z4.d\n"
"mov z3.d, z0.d\n"
+ "mov z5.d, z4.d\n"
+ "mov z6.d, z4.d\n"
"mov z7.d, z4.d\n"
"63:" // Height 5: parameters loaded
".inst 0x04a477ff // sqrdmulh z31.s, z31.s, z4.s\n"
@@ -1679,191 +1679,191 @@ void sve_hybrid_s8qs_mmla_6x4VL (
"and z4.d, z31.d, z0.d\n"
"and z5.d, z12.d, z1.d\n"
"and z6.d, z13.d, z2.d\n"
- "and z7.d, z14.d, z3.d\n"
"asr z4.s, z4.s, #0x1f\n"
"asr z5.s, z5.s, #0x1f\n"
"asr z6.s, z6.s, #0x1f\n"
- "asr z7.s, z7.s, #0x1f\n"
"sqadd z31.s, z31.s, z4.s\n"
"sqadd z12.s, z12.s, z5.s\n"
"sqadd z13.s, z13.s, z6.s\n"
- "sqadd z14.s, z14.s, z7.s\n"
+ "and z7.d, z14.d, z3.d\n"
"and z4.d, z8.d, z0.d\n"
"and z5.d, z9.d, z1.d\n"
- "and z6.d, z10.d, z2.d\n"
- "and z7.d, z11.d, z3.d\n"
+ "asr z7.s, z7.s, #0x1f\n"
"asr z4.s, z4.s, #0x1f\n"
"asr z5.s, z5.s, #0x1f\n"
- "asr z6.s, z6.s, #0x1f\n"
- "asr z7.s, z7.s, #0x1f\n"
+ "sqadd z14.s, z14.s, z7.s\n"
"sqadd z8.s, z8.s, z4.s\n"
"sqadd z9.s, z9.s, z5.s\n"
+ "and z6.d, z10.d, z2.d\n"
+ "and z7.d, z11.d, z3.d\n"
+ "and z4.d, z15.d, z0.d\n"
+ "asr z6.s, z6.s, #0x1f\n"
+ "asr z7.s, z7.s, #0x1f\n"
+ "asr z4.s, z4.s, #0x1f\n"
"sqadd z10.s, z10.s, z6.s\n"
"sqadd z11.s, z11.s, z7.s\n"
- "and z4.d, z15.d, z0.d\n"
+ "sqadd z15.s, z15.s, z4.s\n"
"and z5.d, z20.d, z1.d\n"
"and z6.d, z21.d, z2.d\n"
"and z7.d, z22.d, z3.d\n"
- "asr z4.s, z4.s, #0x1f\n"
"asr z5.s, z5.s, #0x1f\n"
"asr z6.s, z6.s, #0x1f\n"
"asr z7.s, z7.s, #0x1f\n"
- "sqadd z15.s, z15.s, z4.s\n"
"sqadd z20.s, z20.s, z5.s\n"
"sqadd z21.s, z21.s, z6.s\n"
"sqadd z22.s, z22.s, z7.s\n"
"and z4.d, z16.d, z0.d\n"
"and z5.d, z17.d, z1.d\n"
"and z6.d, z18.d, z2.d\n"
- "and z7.d, z19.d, z3.d\n"
"asr z4.s, z4.s, #0x1f\n"
"asr z5.s, z5.s, #0x1f\n"
"asr z6.s, z6.s, #0x1f\n"
- "asr z7.s, z7.s, #0x1f\n"
"sqadd z16.s, z16.s, z4.s\n"
"sqadd z17.s, z17.s, z5.s\n"
"sqadd z18.s, z18.s, z6.s\n"
- "sqadd z19.s, z19.s, z7.s\n"
+ "and z7.d, z19.d, z3.d\n"
"and z4.d, z24.d, z0.d\n"
"and z5.d, z25.d, z1.d\n"
- "and z6.d, z26.d, z2.d\n"
- "and z7.d, z27.d, z3.d\n"
+ "asr z7.s, z7.s, #0x1f\n"
"asr z4.s, z4.s, #0x1f\n"
"asr z5.s, z5.s, #0x1f\n"
- "asr z6.s, z6.s, #0x1f\n"
- "asr z7.s, z7.s, #0x1f\n"
+ "sqadd z19.s, z19.s, z7.s\n"
"sqadd z24.s, z24.s, z4.s\n"
"sqadd z25.s, z25.s, z5.s\n"
+ "and z6.d, z26.d, z2.d\n"
+ "and z7.d, z27.d, z3.d\n"
+ "asr z6.s, z6.s, #0x1f\n"
+ "asr z7.s, z7.s, #0x1f\n"
"sqadd z26.s, z26.s, z6.s\n"
"sqadd z27.s, z27.s, z7.s\n"
"64:" // Height 5: no shift correction
- "add x25, %x[qp], %[c_offset]\n"
- "ld1rw { z4.s }, p2/Z, [x25]\n"
".inst 0x4482881f // srshl z31.s, p2/M, z31.s, z0.s\n"
- "add z31.s, z31.s, z4.s\n"
+ "add x24, %x[qp], %[c_offset]\n"
+ "ld1rw { z4.s }, p2/Z, [x24]\n"
".inst 0x4482882c // srshl z12.s, p2/M, z12.s, z1.s\n"
+ "add x24, %x[qp], %[minval]\n"
".inst 0x4482884d // srshl z13.s, p2/M, z13.s, z2.s\n"
- "add z12.s, z12.s, z4.s\n"
- "add z13.s, z13.s, z4.s\n"
+ "ld1rw { z5.s }, p2/Z, [x24]\n"
+ "add x24, %x[qp], %[maxval]\n"
".inst 0x4482886e // srshl z14.s, p2/M, z14.s, z3.s\n"
+ "ld1rw { z6.s }, p2/Z, [x24]\n"
".inst 0x44828808 // srshl z8.s, p2/M, z8.s, z0.s\n"
+ "add z31.s, z31.s, z4.s\n"
+ "add z12.s, z12.s, z4.s\n"
+ "add z13.s, z13.s, z4.s\n"
"add z14.s, z14.s, z4.s\n"
"add z8.s, z8.s, z4.s\n"
- ".inst 0x44828829 // srshl z9.s, p2/M, z9.s, z1.s\n"
- ".inst 0x4482884a // srshl z10.s, p2/M, z10.s, z2.s\n"
- "add z9.s, z9.s, z4.s\n"
- "add z10.s, z10.s, z4.s\n"
- ".inst 0x4482886b // srshl z11.s, p2/M, z11.s, z3.s\n"
- ".inst 0x4482880f // srshl z15.s, p2/M, z15.s, z0.s\n"
- "add z11.s, z11.s, z4.s\n"
- "add z15.s, z15.s, z4.s\n"
- ".inst 0x44828834 // srshl z20.s, p2/M, z20.s, z1.s\n"
- ".inst 0x44828855 // srshl z21.s, p2/M, z21.s, z2.s\n"
- "add z20.s, z20.s, z4.s\n"
- "add z21.s, z21.s, z4.s\n"
- ".inst 0x44828876 // srshl z22.s, p2/M, z22.s, z3.s\n"
- ".inst 0x44828810 // srshl z16.s, p2/M, z16.s, z0.s\n"
- "add z22.s, z22.s, z4.s\n"
- "add z16.s, z16.s, z4.s\n"
- ".inst 0x44828831 // srshl z17.s, p2/M, z17.s, z1.s\n"
- ".inst 0x44828852 // srshl z18.s, p2/M, z18.s, z2.s\n"
- "add z17.s, z17.s, z4.s\n"
- "add z18.s, z18.s, z4.s\n"
- ".inst 0x44828873 // srshl z19.s, p2/M, z19.s, z3.s\n"
- ".inst 0x44828818 // srshl z24.s, p2/M, z24.s, z0.s\n"
- "add z19.s, z19.s, z4.s\n"
- "add z24.s, z24.s, z4.s\n"
- ".inst 0x44828839 // srshl z25.s, p2/M, z25.s, z1.s\n"
- ".inst 0x4482885a // srshl z26.s, p2/M, z26.s, z2.s\n"
- "add z25.s, z25.s, z4.s\n"
- "add z26.s, z26.s, z4.s\n"
- ".inst 0x4482887b // srshl z27.s, p2/M, z27.s, z3.s\n"
- "add x25, %x[qp], %[maxval]\n"
- "ld1rw { z6.s }, p2/Z, [x25]\n"
- "add z27.s, z27.s, z4.s\n"
- "add x25, %x[qp], %[minval]\n"
- "ld1rw { z5.s }, p2/Z, [x25]\n"
"smin z31.s, p2/M, z31.s, z6.s\n"
"smin z12.s, p2/M, z12.s, z6.s\n"
"smin z13.s, p2/M, z13.s, z6.s\n"
"smin z14.s, p2/M, z14.s, z6.s\n"
- "smin z8.s, p2/M, z8.s, z6.s\n"
- "smin z9.s, p2/M, z9.s, z6.s\n"
- "smin z10.s, p2/M, z10.s, z6.s\n"
- "smin z11.s, p2/M, z11.s, z6.s\n"
- "smin z15.s, p2/M, z15.s, z6.s\n"
- "smin z20.s, p2/M, z20.s, z6.s\n"
- "smin z21.s, p2/M, z21.s, z6.s\n"
- "smin z22.s, p2/M, z22.s, z6.s\n"
- "smin z16.s, p2/M, z16.s, z6.s\n"
- "smin z17.s, p2/M, z17.s, z6.s\n"
- "smin z18.s, p2/M, z18.s, z6.s\n"
- "smin z19.s, p2/M, z19.s, z6.s\n"
- "smin z24.s, p2/M, z24.s, z6.s\n"
- "smin z25.s, p2/M, z25.s, z6.s\n"
- "smin z26.s, p2/M, z26.s, z6.s\n"
- "smin z27.s, p2/M, z27.s, z6.s\n"
"smax z31.s, p2/M, z31.s, z5.s\n"
"smax z12.s, p2/M, z12.s, z5.s\n"
"smax z13.s, p2/M, z13.s, z5.s\n"
- "uzp1 z31.h, z31.h, z12.h\n"
"smax z14.s, p2/M, z14.s, z5.s\n"
- "smax z8.s, p2/M, z8.s, z5.s\n"
+ "smin z8.s, p2/M, z8.s, z6.s\n"
+ "uzp1 z31.h, z31.h, z12.h\n"
+ ".inst 0x44828829 // srshl z9.s, p2/M, z9.s, z1.s\n"
"uzp1 z12.h, z13.h, z14.h\n"
+ "smax z8.s, p2/M, z8.s, z5.s\n"
"uzp1 z31.b, z31.b, z12.b\n"
+ "st1b { z31.b }, p1, [x9]\n"
+ "add z9.s, z9.s, z4.s\n"
+ "addvl x9, x9, #1\n"
+ ".inst 0x4482884a // srshl z10.s, p2/M, z10.s, z2.s\n"
+ ".inst 0x4482886b // srshl z11.s, p2/M, z11.s, z3.s\n"
+ ".inst 0x4482880f // srshl z15.s, p2/M, z15.s, z0.s\n"
+ "smin z9.s, p2/M, z9.s, z6.s\n"
+ ".inst 0x44828834 // srshl z20.s, p2/M, z20.s, z1.s\n"
+ "add z10.s, z10.s, z4.s\n"
+ "add z11.s, z11.s, z4.s\n"
+ "add z15.s, z15.s, z4.s\n"
+ "add z20.s, z20.s, z4.s\n"
"smax z9.s, p2/M, z9.s, z5.s\n"
- "smax z10.s, p2/M, z10.s, z5.s\n"
+ "smin z10.s, p2/M, z10.s, z6.s\n"
+ "smin z11.s, p2/M, z11.s, z6.s\n"
+ "smin z15.s, p2/M, z15.s, z6.s\n"
"uzp1 z8.h, z8.h, z9.h\n"
- "st1b { z31.b }, p1, [x11]\n"
+ "smax z10.s, p2/M, z10.s, z5.s\n"
"smax z11.s, p2/M, z11.s, z5.s\n"
"smax z15.s, p2/M, z15.s, z5.s\n"
+ "smin z20.s, p2/M, z20.s, z6.s\n"
+ ".inst 0x44828855 // srshl z21.s, p2/M, z21.s, z2.s\n"
"uzp1 z9.h, z10.h, z11.h\n"
+ ".inst 0x44828876 // srshl z22.s, p2/M, z22.s, z3.s\n"
"uzp1 z8.b, z8.b, z9.b\n"
+ "st1b { z8.b }, p1, [x23]\n"
+ "add z21.s, z21.s, z4.s\n"
"smax z20.s, p2/M, z20.s, z5.s\n"
- "smax z21.s, p2/M, z21.s, z5.s\n"
+ "add z22.s, z22.s, z4.s\n"
+ ".inst 0x44828810 // srshl z16.s, p2/M, z16.s, z0.s\n"
+ "smin z21.s, p2/M, z21.s, z6.s\n"
"uzp1 z15.h, z15.h, z20.h\n"
- "st1b { z8.b }, p1, [x24]\n"
+ "smin z22.s, p2/M, z22.s, z6.s\n"
+ "add z16.s, z16.s, z4.s\n"
+ "smax z21.s, p2/M, z21.s, z5.s\n"
+ ".inst 0x44828831 // srshl z17.s, p2/M, z17.s, z1.s\n"
"smax z22.s, p2/M, z22.s, z5.s\n"
- "smax z16.s, p2/M, z16.s, z5.s\n"
+ "smin z16.s, p2/M, z16.s, z6.s\n"
+ ".inst 0x44828852 // srshl z18.s, p2/M, z18.s, z2.s\n"
+ "add z17.s, z17.s, z4.s\n"
"uzp1 z20.h, z21.h, z22.h\n"
+ "smax z16.s, p2/M, z16.s, z5.s\n"
+ "add z18.s, z18.s, z4.s\n"
"uzp1 z15.b, z15.b, z20.b\n"
+ "st1b { z15.b }, p1, [x22]\n"
+ "smin z17.s, p2/M, z17.s, z6.s\n"
+ "smin z18.s, p2/M, z18.s, z6.s\n"
+ ".inst 0x44828873 // srshl z19.s, p2/M, z19.s, z3.s\n"
+ ".inst 0x44828818 // srshl z24.s, p2/M, z24.s, z0.s\n"
+ ".inst 0x44828839 // srshl z25.s, p2/M, z25.s, z1.s\n"
"smax z17.s, p2/M, z17.s, z5.s\n"
- "smax z18.s, p2/M, z18.s, z5.s\n"
+ "add z19.s, z19.s, z4.s\n"
+ "add z24.s, z24.s, z4.s\n"
+ "add z25.s, z25.s, z4.s\n"
"uzp1 z16.h, z16.h, z17.h\n"
- "st1b { z15.b }, p1, [x23]\n"
+ "smax z18.s, p2/M, z18.s, z5.s\n"
+ "smin z19.s, p2/M, z19.s, z6.s\n"
+ "smin z24.s, p2/M, z24.s, z6.s\n"
+ "smin z25.s, p2/M, z25.s, z6.s\n"
+ ".inst 0x4482885a // srshl z26.s, p2/M, z26.s, z2.s\n"
"smax z19.s, p2/M, z19.s, z5.s\n"
"smax z24.s, p2/M, z24.s, z5.s\n"
+ "smax z25.s, p2/M, z25.s, z5.s\n"
+ "add z26.s, z26.s, z4.s\n"
"uzp1 z17.h, z18.h, z19.h\n"
+ ".inst 0x4482887b // srshl z27.s, p2/M, z27.s, z3.s\n"
+ "uzp1 z24.h, z24.h, z25.h\n"
"uzp1 z16.b, z16.b, z17.b\n"
- "smax z25.s, p2/M, z25.s, z5.s\n"
+ "st1b { z16.b }, p1, [x21]\n"
+ "add z27.s, z27.s, z4.s\n"
+ "smin z26.s, p2/M, z26.s, z6.s\n"
+ "smin z27.s, p2/M, z27.s, z6.s\n"
"smax z26.s, p2/M, z26.s, z5.s\n"
- "uzp1 z24.h, z24.h, z25.h\n"
- "st1b { z16.b }, p1, [x22]\n"
"smax z27.s, p2/M, z27.s, z5.s\n"
"uzp1 z25.h, z26.h, z27.h\n"
"uzp1 z24.b, z24.b, z25.b\n"
- "st1b { z24.b }, p1, [x21]\n"
- "addvl x11, x11, #1\n"
+ "st1b { z24.b }, p1, [x20]\n"
"65:" // Height 5: Writeback done
"decw x10, ALL, MUL #4\n"
"cmp x10, XZR\n"
"bgt 54b\n"
"b 80f\n"
"66:" // Height 6
- "ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "mov x20, #0x6\n"
- "mov x14, %x[col_bias]\n"
"ldr x13, [%x[args_ptr], %[offsetof_multiplier_ptr]]\n"
+ "mov x11, %x[col_bias]\n"
"ldr x12, [%x[args_ptr], %[offsetof_shift_ptr]]\n"
+ "mov x9, %x[output_ptr]\n"
"ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
- "mov x11, %x[output_ptr]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
- "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x20, #0x6\n"
+ "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "madd %x[output_ptr], x19, x20, %x[output_ptr]\n"
"67:" // Height 6: Column loop
- "mov x20, #0x0\n"
- "whilelt p1.b, x20, x10\n"
"mov z8.s, #0x0\n"
+ "mov x19, #0x0\n"
"mov z9.s, #0x0\n"
+ "whilelt p1.b, x19, x10\n"
"mov z10.s, #0x0\n"
"mov z11.s, #0x0\n"
"mov z12.s, #0x0\n"
@@ -1887,120 +1887,120 @@ void sve_hybrid_s8qs_mmla_6x4VL (
"mov z30.s, #0x0\n"
"mov z31.s, #0x0\n"
"68:" // Height 6: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"69:" // Height 6: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 70f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "ldr x22, [x21, #0x20]\n"
- "ldr x21, [x21, #0x28]\n"
- "cbnz x28, 71f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20\n"
- "add x25, x25, x20\n"
- "add x24, x24, x20\n"
- "add x23, x23, x20\n"
- "add x22, x22, x20\n"
- "add x21, x21, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "ldr x21, [x20, #0x20]\n"
+ "ldr x20, [x20, #0x28]\n"
+ "cbnz x27, 71f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
+ "add x24, x24, x19\n"
+ "add x23, x23, x19\n"
+ "add x22, x22, x19\n"
+ "add x21, x21, x19\n"
+ "add x20, x20, x19\n"
"b 71f\n"
"70:" // Height 6: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20\n"
- "add x24, x25, x20\n"
- "add x23, x24, x20\n"
- "add x22, x23, x20\n"
- "add x21, x22, x20\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19\n"
+ "add x23, x24, x19\n"
+ "add x22, x23, x19\n"
+ "add x21, x22, x19\n"
+ "add x20, x21, x19\n"
"71:" // Height 6: input setup done
- "cmp x27, #0x10\n"
+ "cmp x26, #0x10\n"
"ble 73f\n"
"72:" // Height 6: Multiply loop: Main loop head
- "whilelt p0.b, XZR, x27\n"
- "ld1rqb { z1.b }, p0/Z, [x26]\n"
- "ld1rqb { z2.b }, p0/Z, [x25]\n"
+ "ld1b { z7.b }, p2/Z, [x28]\n"
+ "whilelt p0.b, XZR, x26\n"
+ "sub x26, x26, #0x10\n"
+ "ld1rqb { z1.b }, p0/Z, [x25]\n"
+ "ld1rqb { z2.b }, p0/Z, [x24]\n"
"trn1 z0.d, z1.d, z2.d\n"
- "ld1rqb { z3.b }, p0/Z, [x24]\n"
- "ld1rqb { z4.b }, p0/Z, [x23]\n"
+ "ld1rqb { z3.b }, p0/Z, [x23]\n"
+ "cmp x26, #0x10\n"
"trn2 z1.d, z1.d, z2.d\n"
+ "ld1rqb { z4.b }, p0/Z, [x22]\n"
+ "add x25, x25, #0x10\n"
+ ".inst 0x45079808 // smmla z8.s, z0.b, z7.b\n"
+ "ld1rqb { z5.b }, p0/Z, [x21]\n"
+ "add x24, x24, #0x10\n"
"trn1 z2.d, z3.d, z4.d\n"
- "ld1rqb { z5.b }, p0/Z, [x22]\n"
- "ld1rqb { z6.b }, p0/Z, [x21]\n"
+ "ld1rqb { z6.b }, p0/Z, [x20]\n"
+ "add x23, x23, #0x10\n"
"trn2 z3.d, z3.d, z4.d\n"
+ "add x22, x22, #0x10\n"
+ "add x21, x21, #0x10\n"
+ ".inst 0x45079850 // smmla z16.s, z2.b, z7.b\n"
+ "add x20, x20, #0x10\n"
"trn1 z4.d, z5.d, z6.d\n"
"trn2 z5.d, z5.d, z6.d\n"
- "ld1b { z7.b }, p2/Z, [x9]\n"
- "ld1b { z6.b }, p2/Z, [x9, #1, MUL VL]\n"
- ".inst 0x45079808 // smmla z8.s, z0.b, z7.b\n"
- ".inst 0x45079850 // smmla z16.s, z2.b, z7.b\n"
+ "ld1b { z6.b }, p2/Z, [x28, #1, MUL VL]\n"
".inst 0x45079898 // smmla z24.s, z4.b, z7.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #2, MUL VL]\n"
- "sub x27, x27, #0x10\n"
+ "ld1b { z7.b }, p2/Z, [x28, #2, MUL VL]\n"
".inst 0x4506980c // smmla z12.s, z0.b, z6.b\n"
".inst 0x45069854 // smmla z20.s, z2.b, z6.b\n"
- "cmp x27, #0x10\n"
- "add x26, x26, #0x10\n"
".inst 0x4506989c // smmla z28.s, z4.b, z6.b\n"
- "ld1b { z6.b }, p2/Z, [x9, #3, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #3, MUL VL]\n"
".inst 0x45079809 // smmla z9.s, z0.b, z7.b\n"
- "add x25, x25, #0x10\n"
".inst 0x45079851 // smmla z17.s, z2.b, z7.b\n"
".inst 0x45079899 // smmla z25.s, z4.b, z7.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #4, MUL VL]\n"
- "add x24, x24, #0x10\n"
+ "ld1b { z7.b }, p2/Z, [x28, #4, MUL VL]\n"
".inst 0x4506980d // smmla z13.s, z0.b, z6.b\n"
".inst 0x45069855 // smmla z21.s, z2.b, z6.b\n"
- "add x23, x23, #0x10\n"
- "add x22, x22, #0x10\n"
".inst 0x4506989d // smmla z29.s, z4.b, z6.b\n"
- "ld1b { z6.b }, p2/Z, [x9, #5, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #5, MUL VL]\n"
".inst 0x4507980a // smmla z10.s, z0.b, z7.b\n"
- "add x21, x21, #0x10\n"
".inst 0x45079852 // smmla z18.s, z2.b, z7.b\n"
".inst 0x4507989a // smmla z26.s, z4.b, z7.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #6, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #6, MUL VL]\n"
".inst 0x4506980e // smmla z14.s, z0.b, z6.b\n"
".inst 0x45069856 // smmla z22.s, z2.b, z6.b\n"
".inst 0x4506989e // smmla z30.s, z4.b, z6.b\n"
- "ld1b { z6.b }, p2/Z, [x9, #7, MUL VL]\n"
- "addvl x9, x9, #16\n"
+ "ld1b { z6.b }, p2/Z, [x28, #7, MUL VL]\n"
+ "addvl x28, x28, #16\n"
".inst 0x4507980b // smmla z11.s, z0.b, z7.b\n"
".inst 0x45079853 // smmla z19.s, z2.b, z7.b\n"
".inst 0x4507989b // smmla z27.s, z4.b, z7.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #-8, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #-8, MUL VL]\n"
".inst 0x4506980f // smmla z15.s, z0.b, z6.b\n"
".inst 0x45069857 // smmla z23.s, z2.b, z6.b\n"
".inst 0x4506989f // smmla z31.s, z4.b, z6.b\n"
- "ld1b { z6.b }, p2/Z, [x9, #-7, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #-7, MUL VL]\n"
".inst 0x45079828 // smmla z8.s, z1.b, z7.b\n"
".inst 0x45079870 // smmla z16.s, z3.b, z7.b\n"
".inst 0x450798b8 // smmla z24.s, z5.b, z7.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #-6, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #-6, MUL VL]\n"
".inst 0x4506982c // smmla z12.s, z1.b, z6.b\n"
".inst 0x45069874 // smmla z20.s, z3.b, z6.b\n"
".inst 0x450698bc // smmla z28.s, z5.b, z6.b\n"
- "ld1b { z6.b }, p2/Z, [x9, #-5, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #-5, MUL VL]\n"
".inst 0x45079829 // smmla z9.s, z1.b, z7.b\n"
".inst 0x45079871 // smmla z17.s, z3.b, z7.b\n"
".inst 0x450798b9 // smmla z25.s, z5.b, z7.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #-4, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #-4, MUL VL]\n"
".inst 0x4506982d // smmla z13.s, z1.b, z6.b\n"
".inst 0x45069875 // smmla z21.s, z3.b, z6.b\n"
".inst 0x450698bd // smmla z29.s, z5.b, z6.b\n"
- "ld1b { z6.b }, p2/Z, [x9, #-3, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #-3, MUL VL]\n"
".inst 0x4507982a // smmla z10.s, z1.b, z7.b\n"
".inst 0x45079872 // smmla z18.s, z3.b, z7.b\n"
".inst 0x450798ba // smmla z26.s, z5.b, z7.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #-2, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #-2, MUL VL]\n"
".inst 0x4506982e // smmla z14.s, z1.b, z6.b\n"
".inst 0x45069876 // smmla z22.s, z3.b, z6.b\n"
".inst 0x450698be // smmla z30.s, z5.b, z6.b\n"
- "ld1b { z6.b }, p2/Z, [x9, #-1, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #-1, MUL VL]\n"
".inst 0x4507982b // smmla z11.s, z1.b, z7.b\n"
".inst 0x45079873 // smmla z19.s, z3.b, z7.b\n"
".inst 0x450798bb // smmla z27.s, z5.b, z7.b\n"
@@ -2009,81 +2009,81 @@ void sve_hybrid_s8qs_mmla_6x4VL (
".inst 0x450698bf // smmla z31.s, z5.b, z6.b\n"
"bgt 72b\n"
"73:" // Height 6: Multiply loop: Single iteration only
- "whilelt p0.b, XZR, x27\n"
- "ld1rqb { z1.b }, p0/Z, [x26]\n"
- "ld1rqb { z2.b }, p0/Z, [x25]\n"
+ "ld1b { z7.b }, p2/Z, [x28]\n"
+ "whilelt p0.b, XZR, x26\n"
+ "subs x26, x26, #0x8\n"
+ "ld1rqb { z1.b }, p0/Z, [x25]\n"
+ "ld1rqb { z2.b }, p0/Z, [x24]\n"
"trn1 z0.d, z1.d, z2.d\n"
- "ld1rqb { z3.b }, p0/Z, [x24]\n"
- "ld1rqb { z4.b }, p0/Z, [x23]\n"
+ "ld1rqb { z3.b }, p0/Z, [x23]\n"
"trn2 z1.d, z1.d, z2.d\n"
+ "ld1rqb { z4.b }, p0/Z, [x22]\n"
+ "ld1rqb { z5.b }, p0/Z, [x21]\n"
+ ".inst 0x45079808 // smmla z8.s, z0.b, z7.b\n"
+ "ld1rqb { z6.b }, p0/Z, [x20]\n"
"trn1 z2.d, z3.d, z4.d\n"
- "ld1rqb { z5.b }, p0/Z, [x22]\n"
- "ld1rqb { z6.b }, p0/Z, [x21]\n"
"trn2 z3.d, z3.d, z4.d\n"
"trn1 z4.d, z5.d, z6.d\n"
"trn2 z5.d, z5.d, z6.d\n"
- "ld1b { z7.b }, p2/Z, [x9]\n"
- "ld1b { z6.b }, p2/Z, [x9, #1, MUL VL]\n"
- ".inst 0x45079808 // smmla z8.s, z0.b, z7.b\n"
+ "ld1b { z6.b }, p2/Z, [x28, #1, MUL VL]\n"
".inst 0x45079850 // smmla z16.s, z2.b, z7.b\n"
".inst 0x45079898 // smmla z24.s, z4.b, z7.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #2, MUL VL]\n"
- "subs x27, x27, #0x8\n"
+ "ld1b { z7.b }, p2/Z, [x28, #2, MUL VL]\n"
".inst 0x4506980c // smmla z12.s, z0.b, z6.b\n"
".inst 0x45069854 // smmla z20.s, z2.b, z6.b\n"
".inst 0x4506989c // smmla z28.s, z4.b, z6.b\n"
- "ld1b { z6.b }, p2/Z, [x9, #3, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #3, MUL VL]\n"
".inst 0x45079809 // smmla z9.s, z0.b, z7.b\n"
".inst 0x45079851 // smmla z17.s, z2.b, z7.b\n"
".inst 0x45079899 // smmla z25.s, z4.b, z7.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #4, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #4, MUL VL]\n"
".inst 0x4506980d // smmla z13.s, z0.b, z6.b\n"
".inst 0x45069855 // smmla z21.s, z2.b, z6.b\n"
".inst 0x4506989d // smmla z29.s, z4.b, z6.b\n"
- "ld1b { z6.b }, p2/Z, [x9, #5, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #5, MUL VL]\n"
".inst 0x4507980a // smmla z10.s, z0.b, z7.b\n"
".inst 0x45079852 // smmla z18.s, z2.b, z7.b\n"
".inst 0x4507989a // smmla z26.s, z4.b, z7.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #6, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #6, MUL VL]\n"
".inst 0x4506980e // smmla z14.s, z0.b, z6.b\n"
".inst 0x45069856 // smmla z22.s, z2.b, z6.b\n"
".inst 0x4506989e // smmla z30.s, z4.b, z6.b\n"
- "ld1b { z6.b }, p2/Z, [x9, #7, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #7, MUL VL]\n"
+ "addvl x28, x28, #8\n"
".inst 0x4507980b // smmla z11.s, z0.b, z7.b\n"
- "addvl x9, x9, #8\n"
".inst 0x45079853 // smmla z19.s, z2.b, z7.b\n"
".inst 0x4507989b // smmla z27.s, z4.b, z7.b\n"
".inst 0x4506980f // smmla z15.s, z0.b, z6.b\n"
".inst 0x45069857 // smmla z23.s, z2.b, z6.b\n"
".inst 0x4506989f // smmla z31.s, z4.b, z6.b\n"
"ble 74f\n"
- "ld1b { z7.b }, p2/Z, [x9]\n"
- "ld1b { z6.b }, p2/Z, [x9, #1, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28]\n"
".inst 0x45079828 // smmla z8.s, z1.b, z7.b\n"
+ "ld1b { z6.b }, p2/Z, [x28, #1, MUL VL]\n"
".inst 0x45079870 // smmla z16.s, z3.b, z7.b\n"
".inst 0x450798b8 // smmla z24.s, z5.b, z7.b\n"
+ "ld1b { z7.b }, p2/Z, [x28, #2, MUL VL]\n"
".inst 0x4506982c // smmla z12.s, z1.b, z6.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #2, MUL VL]\n"
".inst 0x45069874 // smmla z20.s, z3.b, z6.b\n"
".inst 0x450698bc // smmla z28.s, z5.b, z6.b\n"
- "ld1b { z6.b }, p2/Z, [x9, #3, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #3, MUL VL]\n"
".inst 0x45079829 // smmla z9.s, z1.b, z7.b\n"
".inst 0x45079871 // smmla z17.s, z3.b, z7.b\n"
".inst 0x450798b9 // smmla z25.s, z5.b, z7.b\n"
+ "ld1b { z7.b }, p2/Z, [x28, #4, MUL VL]\n"
".inst 0x4506982d // smmla z13.s, z1.b, z6.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #4, MUL VL]\n"
".inst 0x45069875 // smmla z21.s, z3.b, z6.b\n"
".inst 0x450698bd // smmla z29.s, z5.b, z6.b\n"
- "ld1b { z6.b }, p2/Z, [x9, #5, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #5, MUL VL]\n"
".inst 0x4507982a // smmla z10.s, z1.b, z7.b\n"
".inst 0x45079872 // smmla z18.s, z3.b, z7.b\n"
".inst 0x450798ba // smmla z26.s, z5.b, z7.b\n"
+ "ld1b { z7.b }, p2/Z, [x28, #6, MUL VL]\n"
".inst 0x4506982e // smmla z14.s, z1.b, z6.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #6, MUL VL]\n"
".inst 0x45069876 // smmla z22.s, z3.b, z6.b\n"
".inst 0x450698be // smmla z30.s, z5.b, z6.b\n"
- "ld1b { z6.b }, p2/Z, [x9, #7, MUL VL]\n"
- "addvl x9, x9, #8\n"
+ "ld1b { z6.b }, p2/Z, [x28, #7, MUL VL]\n"
+ "addvl x28, x28, #8\n"
".inst 0x4507982b // smmla z11.s, z1.b, z7.b\n"
".inst 0x45079873 // smmla z19.s, z3.b, z7.b\n"
".inst 0x450798bb // smmla z27.s, z5.b, z7.b\n"
@@ -2091,33 +2091,33 @@ void sve_hybrid_s8qs_mmla_6x4VL (
".inst 0x45069877 // smmla z23.s, z3.b, z6.b\n"
".inst 0x450698bf // smmla z31.s, z5.b, z6.b\n"
"74:" // Height 6: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 69b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp1 z7.d, z8.d, z12.d\n"
- "add x24, x11, x20\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp2 z8.d, z8.d, z12.d\n"
+ "ld1w { z0.s }, p2/Z, [x11]\n"
+ "add x23, x9, x19\n"
"uzp1 z12.d, z9.d, z13.d\n"
+ "ld1w { z1.s }, p2/Z, [x11, #1, MUL VL]\n"
"uzp2 z9.d, z9.d, z13.d\n"
- "add x23, x24, x20\n"
- "ld1w { z0.s }, p2/Z, [x14]\n"
+ "ld1w { z2.s }, p2/Z, [x11, #2, MUL VL]\n"
+ "add x22, x23, x19\n"
"uzp1 z13.d, z10.d, z14.d\n"
+ "ld1w { z3.s }, p2/Z, [x11, #3, MUL VL]\n"
+ "add x21, x22, x19\n"
"uzp2 z10.d, z10.d, z14.d\n"
- "ld1w { z1.s }, p2/Z, [x14, #1, MUL VL]\n"
- "ld1w { z2.s }, p2/Z, [x14, #2, MUL VL]\n"
+ "add x20, x21, x19\n"
"uzp1 z14.d, z11.d, z15.d\n"
+ "add x19, x20, x19\n"
"uzp2 z11.d, z11.d, z15.d\n"
- "ld1w { z3.s }, p2/Z, [x14, #3, MUL VL]\n"
- "add x22, x23, x20\n"
+ "addvl x11, x11, #4\n"
"uzp1 z15.d, z16.d, z20.d\n"
"uzp2 z16.d, z16.d, z20.d\n"
- "add x21, x22, x20\n"
- "add x20, x21, x20\n"
"uzp1 z20.d, z17.d, z21.d\n"
"uzp2 z17.d, z17.d, z21.d\n"
- "addvl x14, x14, #4\n"
"uzp1 z21.d, z18.d, z22.d\n"
"uzp2 z18.d, z18.d, z22.d\n"
"uzp1 z22.d, z19.d, z23.d\n"
@@ -2163,20 +2163,20 @@ void sve_hybrid_s8qs_mmla_6x4VL (
"ld1w { z2.s }, p2/Z, [x12, #2, MUL VL]\n"
"ld1w { z6.s }, p2/Z, [x13, #2, MUL VL]\n"
"ld1w { z3.s }, p2/Z, [x12, #3, MUL VL]\n"
- "ld1w { z7.s }, p2/Z, [x13, #3, MUL VL]\n"
"addvl x12, x12, #4\n"
+ "ld1w { z7.s }, p2/Z, [x13, #3, MUL VL]\n"
"addvl x13, x13, #4\n"
"b 76f\n"
"75:" // Height 6: per layer parameters
- "add x26, %x[qp], %[per_layer_right_shift]\n"
- "add x25, %x[qp], %[per_layer_mul]\n"
- "ld1rw { z0.s }, p2/Z, [x26]\n"
- "ld1rw { z4.s }, p2/Z, [x25]\n"
+ "add x24, %x[qp], %[per_layer_right_shift]\n"
+ "ld1rw { z0.s }, p2/Z, [x24]\n"
"mov z1.d, z0.d\n"
- "mov z5.d, z4.d\n"
+ "add x24, %x[qp], %[per_layer_mul]\n"
+ "ld1rw { z4.s }, p2/Z, [x24]\n"
"mov z2.d, z0.d\n"
- "mov z6.d, z4.d\n"
"mov z3.d, z0.d\n"
+ "mov z5.d, z4.d\n"
+ "mov z6.d, z4.d\n"
"mov z7.d, z4.d\n"
"76:" // Height 6: parameters loaded
".inst 0x04a477ff // sqrdmulh z31.s, z31.s, z4.s\n"
@@ -2207,223 +2207,223 @@ void sve_hybrid_s8qs_mmla_6x4VL (
"and z4.d, z31.d, z0.d\n"
"and z5.d, z12.d, z1.d\n"
"and z6.d, z13.d, z2.d\n"
- "and z7.d, z14.d, z3.d\n"
"asr z4.s, z4.s, #0x1f\n"
"asr z5.s, z5.s, #0x1f\n"
"asr z6.s, z6.s, #0x1f\n"
- "asr z7.s, z7.s, #0x1f\n"
"sqadd z31.s, z31.s, z4.s\n"
"sqadd z12.s, z12.s, z5.s\n"
"sqadd z13.s, z13.s, z6.s\n"
- "sqadd z14.s, z14.s, z7.s\n"
+ "and z7.d, z14.d, z3.d\n"
"and z4.d, z8.d, z0.d\n"
"and z5.d, z9.d, z1.d\n"
- "and z6.d, z10.d, z2.d\n"
- "and z7.d, z11.d, z3.d\n"
+ "asr z7.s, z7.s, #0x1f\n"
"asr z4.s, z4.s, #0x1f\n"
"asr z5.s, z5.s, #0x1f\n"
- "asr z6.s, z6.s, #0x1f\n"
- "asr z7.s, z7.s, #0x1f\n"
+ "sqadd z14.s, z14.s, z7.s\n"
"sqadd z8.s, z8.s, z4.s\n"
"sqadd z9.s, z9.s, z5.s\n"
+ "and z6.d, z10.d, z2.d\n"
+ "and z7.d, z11.d, z3.d\n"
+ "and z4.d, z15.d, z0.d\n"
+ "asr z6.s, z6.s, #0x1f\n"
+ "asr z7.s, z7.s, #0x1f\n"
+ "asr z4.s, z4.s, #0x1f\n"
"sqadd z10.s, z10.s, z6.s\n"
"sqadd z11.s, z11.s, z7.s\n"
- "and z4.d, z15.d, z0.d\n"
+ "sqadd z15.s, z15.s, z4.s\n"
"and z5.d, z20.d, z1.d\n"
"and z6.d, z21.d, z2.d\n"
"and z7.d, z22.d, z3.d\n"
- "asr z4.s, z4.s, #0x1f\n"
"asr z5.s, z5.s, #0x1f\n"
"asr z6.s, z6.s, #0x1f\n"
"asr z7.s, z7.s, #0x1f\n"
- "sqadd z15.s, z15.s, z4.s\n"
"sqadd z20.s, z20.s, z5.s\n"
"sqadd z21.s, z21.s, z6.s\n"
"sqadd z22.s, z22.s, z7.s\n"
"and z4.d, z16.d, z0.d\n"
"and z5.d, z17.d, z1.d\n"
"and z6.d, z18.d, z2.d\n"
- "and z7.d, z19.d, z3.d\n"
"asr z4.s, z4.s, #0x1f\n"
"asr z5.s, z5.s, #0x1f\n"
"asr z6.s, z6.s, #0x1f\n"
- "asr z7.s, z7.s, #0x1f\n"
"sqadd z16.s, z16.s, z4.s\n"
"sqadd z17.s, z17.s, z5.s\n"
"sqadd z18.s, z18.s, z6.s\n"
- "sqadd z19.s, z19.s, z7.s\n"
+ "and z7.d, z19.d, z3.d\n"
"and z4.d, z23.d, z0.d\n"
"and z5.d, z28.d, z1.d\n"
- "and z6.d, z29.d, z2.d\n"
- "and z7.d, z30.d, z3.d\n"
+ "asr z7.s, z7.s, #0x1f\n"
"asr z4.s, z4.s, #0x1f\n"
"asr z5.s, z5.s, #0x1f\n"
- "asr z6.s, z6.s, #0x1f\n"
- "asr z7.s, z7.s, #0x1f\n"
+ "sqadd z19.s, z19.s, z7.s\n"
"sqadd z23.s, z23.s, z4.s\n"
"sqadd z28.s, z28.s, z5.s\n"
+ "and z6.d, z29.d, z2.d\n"
+ "and z7.d, z30.d, z3.d\n"
+ "and z4.d, z24.d, z0.d\n"
+ "asr z6.s, z6.s, #0x1f\n"
+ "asr z7.s, z7.s, #0x1f\n"
+ "asr z4.s, z4.s, #0x1f\n"
"sqadd z29.s, z29.s, z6.s\n"
"sqadd z30.s, z30.s, z7.s\n"
- "and z4.d, z24.d, z0.d\n"
+ "sqadd z24.s, z24.s, z4.s\n"
"and z5.d, z25.d, z1.d\n"
"and z6.d, z26.d, z2.d\n"
"and z7.d, z27.d, z3.d\n"
- "asr z4.s, z4.s, #0x1f\n"
"asr z5.s, z5.s, #0x1f\n"
"asr z6.s, z6.s, #0x1f\n"
"asr z7.s, z7.s, #0x1f\n"
- "sqadd z24.s, z24.s, z4.s\n"
"sqadd z25.s, z25.s, z5.s\n"
"sqadd z26.s, z26.s, z6.s\n"
"sqadd z27.s, z27.s, z7.s\n"
"77:" // Height 6: no shift correction
- "add x25, %x[qp], %[c_offset]\n"
- "ld1rw { z4.s }, p2/Z, [x25]\n"
".inst 0x4482881f // srshl z31.s, p2/M, z31.s, z0.s\n"
- "add z31.s, z31.s, z4.s\n"
+ "add x24, %x[qp], %[c_offset]\n"
+ "ld1rw { z4.s }, p2/Z, [x24]\n"
".inst 0x4482882c // srshl z12.s, p2/M, z12.s, z1.s\n"
+ "add x24, %x[qp], %[minval]\n"
".inst 0x4482884d // srshl z13.s, p2/M, z13.s, z2.s\n"
- "add z12.s, z12.s, z4.s\n"
- "add z13.s, z13.s, z4.s\n"
+ "ld1rw { z5.s }, p2/Z, [x24]\n"
+ "add x24, %x[qp], %[maxval]\n"
".inst 0x4482886e // srshl z14.s, p2/M, z14.s, z3.s\n"
+ "ld1rw { z6.s }, p2/Z, [x24]\n"
".inst 0x44828808 // srshl z8.s, p2/M, z8.s, z0.s\n"
+ "add z31.s, z31.s, z4.s\n"
+ "add z12.s, z12.s, z4.s\n"
+ "add z13.s, z13.s, z4.s\n"
"add z14.s, z14.s, z4.s\n"
"add z8.s, z8.s, z4.s\n"
- ".inst 0x44828829 // srshl z9.s, p2/M, z9.s, z1.s\n"
- ".inst 0x4482884a // srshl z10.s, p2/M, z10.s, z2.s\n"
- "add z9.s, z9.s, z4.s\n"
- "add z10.s, z10.s, z4.s\n"
- ".inst 0x4482886b // srshl z11.s, p2/M, z11.s, z3.s\n"
- ".inst 0x4482880f // srshl z15.s, p2/M, z15.s, z0.s\n"
- "add z11.s, z11.s, z4.s\n"
- "add z15.s, z15.s, z4.s\n"
- ".inst 0x44828834 // srshl z20.s, p2/M, z20.s, z1.s\n"
- ".inst 0x44828855 // srshl z21.s, p2/M, z21.s, z2.s\n"
- "add z20.s, z20.s, z4.s\n"
- "add z21.s, z21.s, z4.s\n"
- ".inst 0x44828876 // srshl z22.s, p2/M, z22.s, z3.s\n"
- ".inst 0x44828810 // srshl z16.s, p2/M, z16.s, z0.s\n"
- "add z22.s, z22.s, z4.s\n"
- "add z16.s, z16.s, z4.s\n"
- ".inst 0x44828831 // srshl z17.s, p2/M, z17.s, z1.s\n"
- ".inst 0x44828852 // srshl z18.s, p2/M, z18.s, z2.s\n"
- "add z17.s, z17.s, z4.s\n"
- "add z18.s, z18.s, z4.s\n"
- ".inst 0x44828873 // srshl z19.s, p2/M, z19.s, z3.s\n"
- ".inst 0x44828817 // srshl z23.s, p2/M, z23.s, z0.s\n"
- "add z19.s, z19.s, z4.s\n"
- "add z23.s, z23.s, z4.s\n"
- ".inst 0x4482883c // srshl z28.s, p2/M, z28.s, z1.s\n"
- ".inst 0x4482885d // srshl z29.s, p2/M, z29.s, z2.s\n"
- "add z28.s, z28.s, z4.s\n"
- "add z29.s, z29.s, z4.s\n"
- ".inst 0x4482887e // srshl z30.s, p2/M, z30.s, z3.s\n"
- ".inst 0x44828818 // srshl z24.s, p2/M, z24.s, z0.s\n"
- "add z30.s, z30.s, z4.s\n"
- "add z24.s, z24.s, z4.s\n"
- ".inst 0x44828839 // srshl z25.s, p2/M, z25.s, z1.s\n"
- ".inst 0x4482885a // srshl z26.s, p2/M, z26.s, z2.s\n"
- "add z25.s, z25.s, z4.s\n"
- "add z26.s, z26.s, z4.s\n"
- ".inst 0x4482887b // srshl z27.s, p2/M, z27.s, z3.s\n"
- "add x25, %x[qp], %[maxval]\n"
- "ld1rw { z6.s }, p2/Z, [x25]\n"
- "add z27.s, z27.s, z4.s\n"
- "add x25, %x[qp], %[minval]\n"
- "ld1rw { z5.s }, p2/Z, [x25]\n"
"smin z31.s, p2/M, z31.s, z6.s\n"
"smin z12.s, p2/M, z12.s, z6.s\n"
"smin z13.s, p2/M, z13.s, z6.s\n"
"smin z14.s, p2/M, z14.s, z6.s\n"
- "smin z8.s, p2/M, z8.s, z6.s\n"
- "smin z9.s, p2/M, z9.s, z6.s\n"
- "smin z10.s, p2/M, z10.s, z6.s\n"
- "smin z11.s, p2/M, z11.s, z6.s\n"
- "smin z15.s, p2/M, z15.s, z6.s\n"
- "smin z20.s, p2/M, z20.s, z6.s\n"
- "smin z21.s, p2/M, z21.s, z6.s\n"
- "smin z22.s, p2/M, z22.s, z6.s\n"
- "smin z16.s, p2/M, z16.s, z6.s\n"
- "smin z17.s, p2/M, z17.s, z6.s\n"
- "smin z18.s, p2/M, z18.s, z6.s\n"
- "smin z19.s, p2/M, z19.s, z6.s\n"
- "smin z23.s, p2/M, z23.s, z6.s\n"
- "smin z28.s, p2/M, z28.s, z6.s\n"
- "smin z29.s, p2/M, z29.s, z6.s\n"
- "smin z30.s, p2/M, z30.s, z6.s\n"
- "smin z24.s, p2/M, z24.s, z6.s\n"
- "smin z25.s, p2/M, z25.s, z6.s\n"
- "smin z26.s, p2/M, z26.s, z6.s\n"
- "smin z27.s, p2/M, z27.s, z6.s\n"
"smax z31.s, p2/M, z31.s, z5.s\n"
"smax z12.s, p2/M, z12.s, z5.s\n"
"smax z13.s, p2/M, z13.s, z5.s\n"
- "uzp1 z31.h, z31.h, z12.h\n"
"smax z14.s, p2/M, z14.s, z5.s\n"
- "smax z8.s, p2/M, z8.s, z5.s\n"
+ "smin z8.s, p2/M, z8.s, z6.s\n"
+ "uzp1 z31.h, z31.h, z12.h\n"
+ ".inst 0x44828829 // srshl z9.s, p2/M, z9.s, z1.s\n"
"uzp1 z12.h, z13.h, z14.h\n"
+ "smax z8.s, p2/M, z8.s, z5.s\n"
"uzp1 z31.b, z31.b, z12.b\n"
+ "st1b { z31.b }, p1, [x9]\n"
+ "add z9.s, z9.s, z4.s\n"
+ "addvl x9, x9, #1\n"
+ ".inst 0x4482884a // srshl z10.s, p2/M, z10.s, z2.s\n"
+ ".inst 0x4482886b // srshl z11.s, p2/M, z11.s, z3.s\n"
+ ".inst 0x4482880f // srshl z15.s, p2/M, z15.s, z0.s\n"
+ "smin z9.s, p2/M, z9.s, z6.s\n"
+ ".inst 0x44828834 // srshl z20.s, p2/M, z20.s, z1.s\n"
+ "add z10.s, z10.s, z4.s\n"
+ "add z11.s, z11.s, z4.s\n"
+ "add z15.s, z15.s, z4.s\n"
+ "add z20.s, z20.s, z4.s\n"
"smax z9.s, p2/M, z9.s, z5.s\n"
- "smax z10.s, p2/M, z10.s, z5.s\n"
+ "smin z10.s, p2/M, z10.s, z6.s\n"
+ "smin z11.s, p2/M, z11.s, z6.s\n"
+ "smin z15.s, p2/M, z15.s, z6.s\n"
"uzp1 z8.h, z8.h, z9.h\n"
- "st1b { z31.b }, p1, [x11]\n"
+ "smax z10.s, p2/M, z10.s, z5.s\n"
"smax z11.s, p2/M, z11.s, z5.s\n"
"smax z15.s, p2/M, z15.s, z5.s\n"
+ "smin z20.s, p2/M, z20.s, z6.s\n"
+ ".inst 0x44828855 // srshl z21.s, p2/M, z21.s, z2.s\n"
"uzp1 z9.h, z10.h, z11.h\n"
+ ".inst 0x44828876 // srshl z22.s, p2/M, z22.s, z3.s\n"
"uzp1 z8.b, z8.b, z9.b\n"
+ "st1b { z8.b }, p1, [x23]\n"
+ "add z21.s, z21.s, z4.s\n"
"smax z20.s, p2/M, z20.s, z5.s\n"
- "smax z21.s, p2/M, z21.s, z5.s\n"
+ "add z22.s, z22.s, z4.s\n"
+ ".inst 0x44828810 // srshl z16.s, p2/M, z16.s, z0.s\n"
+ "smin z21.s, p2/M, z21.s, z6.s\n"
"uzp1 z15.h, z15.h, z20.h\n"
- "st1b { z8.b }, p1, [x24]\n"
+ "smin z22.s, p2/M, z22.s, z6.s\n"
+ "add z16.s, z16.s, z4.s\n"
+ "smax z21.s, p2/M, z21.s, z5.s\n"
+ ".inst 0x44828831 // srshl z17.s, p2/M, z17.s, z1.s\n"
"smax z22.s, p2/M, z22.s, z5.s\n"
- "smax z16.s, p2/M, z16.s, z5.s\n"
+ "smin z16.s, p2/M, z16.s, z6.s\n"
+ ".inst 0x44828852 // srshl z18.s, p2/M, z18.s, z2.s\n"
+ "add z17.s, z17.s, z4.s\n"
"uzp1 z20.h, z21.h, z22.h\n"
+ "smax z16.s, p2/M, z16.s, z5.s\n"
+ "add z18.s, z18.s, z4.s\n"
"uzp1 z15.b, z15.b, z20.b\n"
+ "st1b { z15.b }, p1, [x22]\n"
+ "smin z17.s, p2/M, z17.s, z6.s\n"
+ "smin z18.s, p2/M, z18.s, z6.s\n"
+ ".inst 0x44828873 // srshl z19.s, p2/M, z19.s, z3.s\n"
+ ".inst 0x44828817 // srshl z23.s, p2/M, z23.s, z0.s\n"
+ ".inst 0x4482883c // srshl z28.s, p2/M, z28.s, z1.s\n"
"smax z17.s, p2/M, z17.s, z5.s\n"
- "smax z18.s, p2/M, z18.s, z5.s\n"
+ "add z19.s, z19.s, z4.s\n"
+ "add z23.s, z23.s, z4.s\n"
+ "add z28.s, z28.s, z4.s\n"
"uzp1 z16.h, z16.h, z17.h\n"
- "st1b { z15.b }, p1, [x23]\n"
+ "smax z18.s, p2/M, z18.s, z5.s\n"
+ "smin z19.s, p2/M, z19.s, z6.s\n"
+ "smin z23.s, p2/M, z23.s, z6.s\n"
+ "smin z28.s, p2/M, z28.s, z6.s\n"
+ ".inst 0x4482885d // srshl z29.s, p2/M, z29.s, z2.s\n"
"smax z19.s, p2/M, z19.s, z5.s\n"
"smax z23.s, p2/M, z23.s, z5.s\n"
+ "smax z28.s, p2/M, z28.s, z5.s\n"
+ "add z29.s, z29.s, z4.s\n"
"uzp1 z17.h, z18.h, z19.h\n"
+ ".inst 0x4482887e // srshl z30.s, p2/M, z30.s, z3.s\n"
+ "uzp1 z23.h, z23.h, z28.h\n"
"uzp1 z16.b, z16.b, z17.b\n"
- "smax z28.s, p2/M, z28.s, z5.s\n"
+ "st1b { z16.b }, p1, [x21]\n"
+ "add z30.s, z30.s, z4.s\n"
+ "smin z29.s, p2/M, z29.s, z6.s\n"
+ ".inst 0x44828818 // srshl z24.s, p2/M, z24.s, z0.s\n"
+ ".inst 0x44828839 // srshl z25.s, p2/M, z25.s, z1.s\n"
+ "smin z30.s, p2/M, z30.s, z6.s\n"
"smax z29.s, p2/M, z29.s, z5.s\n"
- "uzp1 z23.h, z23.h, z28.h\n"
- "st1b { z16.b }, p1, [x22]\n"
+ "add z24.s, z24.s, z4.s\n"
+ "add z25.s, z25.s, z4.s\n"
"smax z30.s, p2/M, z30.s, z5.s\n"
- "smax z24.s, p2/M, z24.s, z5.s\n"
+ "smin z24.s, p2/M, z24.s, z6.s\n"
+ "smin z25.s, p2/M, z25.s, z6.s\n"
+ ".inst 0x4482885a // srshl z26.s, p2/M, z26.s, z2.s\n"
"uzp1 z28.h, z29.h, z30.h\n"
+ "smax z24.s, p2/M, z24.s, z5.s\n"
"uzp1 z23.b, z23.b, z28.b\n"
+ "st1b { z23.b }, p1, [x20]\n"
+ "add z26.s, z26.s, z4.s\n"
"smax z25.s, p2/M, z25.s, z5.s\n"
- "smax z26.s, p2/M, z26.s, z5.s\n"
+ ".inst 0x4482887b // srshl z27.s, p2/M, z27.s, z3.s\n"
+ "smin z26.s, p2/M, z26.s, z6.s\n"
"uzp1 z24.h, z24.h, z25.h\n"
- "st1b { z23.b }, p1, [x21]\n"
+ "add z27.s, z27.s, z4.s\n"
+ "smax z26.s, p2/M, z26.s, z5.s\n"
+ "smin z27.s, p2/M, z27.s, z6.s\n"
"smax z27.s, p2/M, z27.s, z5.s\n"
"uzp1 z25.h, z26.h, z27.h\n"
"uzp1 z24.b, z24.b, z25.b\n"
- "st1b { z24.b }, p1, [x20]\n"
- "addvl x11, x11, #1\n"
+ "st1b { z24.b }, p1, [x19]\n"
"78:" // Height 6: Writeback done
"decw x10, ALL, MUL #4\n"
"cmp x10, XZR\n"
"bgt 67b\n"
"subs %x[M], %x[M], #0x6\n"
"beq 80f\n"
- "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 79f\n"
- "add x21, x21, #0x6\n"
- "str x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "add x20, x20, #0x6\n"
+ "str x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"b 1b\n"
"79:" // Update direct input
- "mov x20, #0x6\n"
- "madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
+ "mov x19, #0x6\n"
+ "madd %x[input_ptr], x19, x20, %x[input_ptr]\n"
"b 1b\n"
"80:" // Exit
: [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
: [args_ptr] "r" (&ka), [c_offset] "I" (offsetof(Requantize32, c_offset)), [col_bias] "r" (col_bias), [flags] "r" (flags), [maxval] "I" (offsetof(Requantize32, maxval)), [minval] "I" (offsetof(Requantize32, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_multiplier_ptr] "I" (offsetof(KernelArgs, multiplier_ptr)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_shift_ptr] "I" (offsetof(KernelArgs, shift_ptr)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths)), [per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [qp] "r" (qp)
- : "cc", "memory", "p0", "p1", "p2", "x9", "x10", "x11", "x12", "x13", "x14", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "x9", "x10", "x11", "x12", "x13", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8s32_dot_6x4VL/a64fx.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8s32_dot_6x4VL/a64fx.cpp
index 51e9aa1b40..e0fea96ef3 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8s32_dot_6x4VL/a64fx.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8s32_dot_6x4VL/a64fx.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef ARM_COMPUTE_ENABLE_SVE
@@ -87,23 +87,23 @@ void sve_hybrid_s8s32_dot_6x4VL_a64fx (
"cmp %x[M], #0x2\n"
"bgt 21f\n"
"beq 11f\n"
- "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x28, %x[output_ptr]\n"
"2:" // Height 1: Column loop
- "mov x20, #0x0\n"
- "whilelt p3.s, x20, x11\n"
- "incw x20\n"
- "whilelt p2.s, x20, x11\n"
- "incw x20\n"
- "whilelt p1.s, x20, x11\n"
- "incw x20\n"
- "whilelt p0.s, x20, x11\n"
+ "mov x19, #0x0\n"
+ "whilelt p3.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p0.s, x19, x10\n"
"tbz %x[flags], #0, 3f\n"
- "ld1w { z8.s }, p3/Z, [x9]\n"
- "ld1w { z9.s }, p2/Z, [x9, #1, MUL VL]\n"
- "ld1w { z10.s }, p1/Z, [x9, #2, MUL VL]\n"
- "ld1w { z11.s }, p0/Z, [x9, #3, MUL VL]\n"
+ "ld1w { z8.s }, p3/Z, [x28]\n"
+ "ld1w { z9.s }, p2/Z, [x28, #1, MUL VL]\n"
+ "ld1w { z10.s }, p1/Z, [x28, #2, MUL VL]\n"
+ "ld1w { z11.s }, p0/Z, [x28, #3, MUL VL]\n"
"b 4f\n"
"3:" // Height 1: no accumulate
"mov z8.s, #0x0\n"
@@ -111,87 +111,87 @@ void sve_hybrid_s8s32_dot_6x4VL_a64fx (
"mov z10.s, #0x0\n"
"mov z11.s, #0x0\n"
"4:" // Height 1: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"5:" // Height 1: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 6f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "cbnz x28, 7f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "cbnz x27, 7f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
"b 7f\n"
"6:" // Height 1: setup direct input
- "mov x26, %x[input_ptr]\n"
+ "mov x25, %x[input_ptr]\n"
"7:" // Height 1: input setup done
- "subs x27, x27, #0x4\n"
- "ld1rw { z0.s }, p4/Z, [x26]\n"
- "ld1b { z6.b }, p4/Z, [x10]\n"
- "ld1b { z7.b }, p4/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x4\n"
+ "ld1rw { z0.s }, p4/Z, [x25]\n"
+ "ld1b { z6.b }, p4/Z, [x9]\n"
+ "ld1b { z7.b }, p4/Z, [x9, #1, MUL VL]\n"
"ble 9f\n"
"8:" // Height 1: Multiply loop: Main loop
"sdot z8.s, z6.b, z0.b\n"
"sdot z9.s, z7.b, z0.b\n"
- "ld1b { z6.b }, p4/Z, [x10, #2, MUL VL]\n"
- "ld1b { z7.b }, p4/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
- "add x26, x26, #0x4\n"
+ "ld1b { z6.b }, p4/Z, [x9, #2, MUL VL]\n"
+ "ld1b { z7.b }, p4/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
+ "add x25, x25, #0x4\n"
"sdot z10.s, z6.b, z0.b\n"
"sdot z11.s, z7.b, z0.b\n"
- "subs x27, x27, #0x4\n"
- "ld1rw { z0.s }, p4/Z, [x26]\n"
- "ld1b { z6.b }, p4/Z, [x10]\n"
- "ld1b { z7.b }, p4/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x4\n"
+ "ld1rw { z0.s }, p4/Z, [x25]\n"
+ "ld1b { z6.b }, p4/Z, [x9]\n"
+ "ld1b { z7.b }, p4/Z, [x9, #1, MUL VL]\n"
"bgt 8b\n"
"9:" // Height 1: Multiply loop: Main loop skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
"sdot z8.s, z6.b, z0.b\n"
"sdot z9.s, z7.b, z0.b\n"
- "ld1b { z6.b }, p4/Z, [x10, #2, MUL VL]\n"
- "ld1b { z7.b }, p4/Z, [x10, #3, MUL VL]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ld1b { z6.b }, p4/Z, [x9, #2, MUL VL]\n"
+ "ld1b { z7.b }, p4/Z, [x9, #3, MUL VL]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"sdot z10.s, z6.b, z0.b\n"
"sdot z11.s, z7.b, z0.b\n"
- "addvl x10, x10, #4\n"
- "bne 5b\n"
- "st1w { z8.s }, p3, [x9]\n"
- "st1w { z9.s }, p2, [x9, #1, MUL VL]\n"
- "st1w { z10.s }, p1, [x9, #2, MUL VL]\n"
- "st1w { z11.s }, p0, [x9, #3, MUL VL]\n"
"addvl x9, x9, #4\n"
+ "bne 5b\n"
+ "st1w { z8.s }, p3, [x28]\n"
+ "st1w { z9.s }, p2, [x28, #1, MUL VL]\n"
+ "st1w { z10.s }, p1, [x28, #2, MUL VL]\n"
+ "st1w { z11.s }, p0, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
"10:" // Height 1: Writeback done
- "decw x11, ALL, MUL #4\n"
- "cmp x11, XZR\n"
+ "decw x10, ALL, MUL #4\n"
+ "cmp x10, XZR\n"
"bgt 2b\n"
"b 62f\n"
"11:" // Height 2
- "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x28, %x[output_ptr]\n"
"12:" // Height 2: Column loop
- "mov x20, #0x0\n"
- "whilelt p3.s, x20, x11\n"
- "incw x20\n"
- "whilelt p2.s, x20, x11\n"
- "incw x20\n"
- "whilelt p1.s, x20, x11\n"
- "incw x20\n"
- "whilelt p0.s, x20, x11\n"
+ "mov x19, #0x0\n"
+ "whilelt p3.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p0.s, x19, x10\n"
"tbz %x[flags], #0, 13f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "ld1w { z8.s }, p3/Z, [x9]\n"
- "ld1w { z9.s }, p2/Z, [x9, #1, MUL VL]\n"
- "ld1w { z10.s }, p1/Z, [x9, #2, MUL VL]\n"
- "ld1w { z11.s }, p0/Z, [x9, #3, MUL VL]\n"
- "ld1w { z12.s }, p3/Z, [x24]\n"
- "ld1w { z13.s }, p2/Z, [x24, #1, MUL VL]\n"
- "ld1w { z14.s }, p1/Z, [x24, #2, MUL VL]\n"
- "ld1w { z15.s }, p0/Z, [x24, #3, MUL VL]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x23, x28, x19, LSL #2\n"
+ "ld1w { z8.s }, p3/Z, [x28]\n"
+ "ld1w { z9.s }, p2/Z, [x28, #1, MUL VL]\n"
+ "ld1w { z10.s }, p1/Z, [x28, #2, MUL VL]\n"
+ "ld1w { z11.s }, p0/Z, [x28, #3, MUL VL]\n"
+ "ld1w { z12.s }, p3/Z, [x23]\n"
+ "ld1w { z13.s }, p2/Z, [x23, #1, MUL VL]\n"
+ "ld1w { z14.s }, p1/Z, [x23, #2, MUL VL]\n"
+ "ld1w { z15.s }, p0/Z, [x23, #3, MUL VL]\n"
"b 14f\n"
"13:" // Height 2: no accumulate
"mov z8.s, #0x0\n"
@@ -203,112 +203,112 @@ void sve_hybrid_s8s32_dot_6x4VL_a64fx (
"mov z14.s, #0x0\n"
"mov z15.s, #0x0\n"
"14:" // Height 2: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"15:" // Height 2: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 16f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "cbnz x28, 17f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20\n"
- "add x25, x25, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "cbnz x27, 17f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
+ "add x24, x24, x19\n"
"b 17f\n"
"16:" // Height 2: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19\n"
"17:" // Height 2: input setup done
- "subs x27, x27, #0x4\n"
- "ld1rw { z0.s }, p4/Z, [x26]\n"
- "ld1rw { z1.s }, p4/Z, [x25]\n"
- "ld1b { z6.b }, p4/Z, [x10]\n"
- "ld1b { z7.b }, p4/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x4\n"
+ "ld1rw { z0.s }, p4/Z, [x25]\n"
+ "ld1rw { z1.s }, p4/Z, [x24]\n"
+ "ld1b { z6.b }, p4/Z, [x9]\n"
+ "ld1b { z7.b }, p4/Z, [x9, #1, MUL VL]\n"
"ble 19f\n"
"18:" // Height 2: Multiply loop: Main loop
"sdot z8.s, z6.b, z0.b\n"
"sdot z12.s, z6.b, z1.b\n"
- "ld1b { z6.b }, p4/Z, [x10, #2, MUL VL]\n"
- "add x26, x26, #0x4\n"
+ "ld1b { z6.b }, p4/Z, [x9, #2, MUL VL]\n"
+ "add x25, x25, #0x4\n"
"sdot z9.s, z7.b, z0.b\n"
"sdot z13.s, z7.b, z1.b\n"
- "ld1b { z7.b }, p4/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
- "subs x27, x27, #0x4\n"
- "add x25, x25, #0x4\n"
+ "ld1b { z7.b }, p4/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
+ "subs x26, x26, #0x4\n"
+ "add x24, x24, #0x4\n"
"sdot z10.s, z6.b, z0.b\n"
"sdot z14.s, z6.b, z1.b\n"
"sdot z11.s, z7.b, z0.b\n"
"sdot z15.s, z7.b, z1.b\n"
- "ld1rw { z0.s }, p4/Z, [x26]\n"
- "ld1rw { z1.s }, p4/Z, [x25]\n"
- "ld1b { z6.b }, p4/Z, [x10]\n"
- "ld1b { z7.b }, p4/Z, [x10, #1, MUL VL]\n"
+ "ld1rw { z0.s }, p4/Z, [x25]\n"
+ "ld1rw { z1.s }, p4/Z, [x24]\n"
+ "ld1b { z6.b }, p4/Z, [x9]\n"
+ "ld1b { z7.b }, p4/Z, [x9, #1, MUL VL]\n"
"bgt 18b\n"
"19:" // Height 2: Multiply loop: Main loop skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
"sdot z8.s, z6.b, z0.b\n"
"sdot z12.s, z6.b, z1.b\n"
- "ld1b { z6.b }, p4/Z, [x10, #2, MUL VL]\n"
+ "ld1b { z6.b }, p4/Z, [x9, #2, MUL VL]\n"
"sdot z9.s, z7.b, z0.b\n"
"sdot z13.s, z7.b, z1.b\n"
- "ld1b { z7.b }, p4/Z, [x10, #3, MUL VL]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ld1b { z7.b }, p4/Z, [x9, #3, MUL VL]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"sdot z10.s, z6.b, z0.b\n"
"sdot z14.s, z6.b, z1.b\n"
- "addvl x10, x10, #4\n"
+ "addvl x9, x9, #4\n"
"sdot z11.s, z7.b, z0.b\n"
"sdot z15.s, z7.b, z1.b\n"
"bne 15b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "st1w { z8.s }, p3, [x9]\n"
- "st1w { z9.s }, p2, [x9, #1, MUL VL]\n"
- "st1w { z10.s }, p1, [x9, #2, MUL VL]\n"
- "st1w { z11.s }, p0, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
- "st1w { z12.s }, p3, [x24]\n"
- "st1w { z13.s }, p2, [x24, #1, MUL VL]\n"
- "st1w { z14.s }, p1, [x24, #2, MUL VL]\n"
- "st1w { z15.s }, p0, [x24, #3, MUL VL]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x23, x28, x19, LSL #2\n"
+ "st1w { z8.s }, p3, [x28]\n"
+ "st1w { z9.s }, p2, [x28, #1, MUL VL]\n"
+ "st1w { z10.s }, p1, [x28, #2, MUL VL]\n"
+ "st1w { z11.s }, p0, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "st1w { z12.s }, p3, [x23]\n"
+ "st1w { z13.s }, p2, [x23, #1, MUL VL]\n"
+ "st1w { z14.s }, p1, [x23, #2, MUL VL]\n"
+ "st1w { z15.s }, p0, [x23, #3, MUL VL]\n"
"20:" // Height 2: Writeback done
- "decw x11, ALL, MUL #4\n"
- "cmp x11, XZR\n"
+ "decw x10, ALL, MUL #4\n"
+ "cmp x10, XZR\n"
"bgt 12b\n"
"b 62f\n"
"21:" // Height 3
- "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x28, %x[output_ptr]\n"
"22:" // Height 3: Column loop
- "mov x20, #0x0\n"
- "whilelt p3.s, x20, x11\n"
- "incw x20\n"
- "whilelt p2.s, x20, x11\n"
- "incw x20\n"
- "whilelt p1.s, x20, x11\n"
- "incw x20\n"
- "whilelt p0.s, x20, x11\n"
+ "mov x19, #0x0\n"
+ "whilelt p3.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p0.s, x19, x10\n"
"tbz %x[flags], #0, 23f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "ld1w { z8.s }, p3/Z, [x9]\n"
- "ld1w { z9.s }, p2/Z, [x9, #1, MUL VL]\n"
- "ld1w { z10.s }, p1/Z, [x9, #2, MUL VL]\n"
- "ld1w { z11.s }, p0/Z, [x9, #3, MUL VL]\n"
- "ld1w { z12.s }, p3/Z, [x24]\n"
- "ld1w { z13.s }, p2/Z, [x24, #1, MUL VL]\n"
- "ld1w { z14.s }, p1/Z, [x24, #2, MUL VL]\n"
- "ld1w { z15.s }, p0/Z, [x24, #3, MUL VL]\n"
- "ld1w { z16.s }, p3/Z, [x23]\n"
- "ld1w { z17.s }, p2/Z, [x23, #1, MUL VL]\n"
- "ld1w { z18.s }, p1/Z, [x23, #2, MUL VL]\n"
- "ld1w { z19.s }, p0/Z, [x23, #3, MUL VL]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x23, x28, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "ld1w { z8.s }, p3/Z, [x28]\n"
+ "ld1w { z9.s }, p2/Z, [x28, #1, MUL VL]\n"
+ "ld1w { z10.s }, p1/Z, [x28, #2, MUL VL]\n"
+ "ld1w { z11.s }, p0/Z, [x28, #3, MUL VL]\n"
+ "ld1w { z12.s }, p3/Z, [x23]\n"
+ "ld1w { z13.s }, p2/Z, [x23, #1, MUL VL]\n"
+ "ld1w { z14.s }, p1/Z, [x23, #2, MUL VL]\n"
+ "ld1w { z15.s }, p0/Z, [x23, #3, MUL VL]\n"
+ "ld1w { z16.s }, p3/Z, [x22]\n"
+ "ld1w { z17.s }, p2/Z, [x22, #1, MUL VL]\n"
+ "ld1w { z18.s }, p1/Z, [x22, #2, MUL VL]\n"
+ "ld1w { z19.s }, p0/Z, [x22, #3, MUL VL]\n"
"b 24f\n"
"23:" // Height 3: no accumulate
"mov z8.s, #0x0\n"
@@ -324,74 +324,74 @@ void sve_hybrid_s8s32_dot_6x4VL_a64fx (
"mov z18.s, #0x0\n"
"mov z19.s, #0x0\n"
"24:" // Height 3: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"25:" // Height 3: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 26f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "cbnz x28, 27f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20\n"
- "add x25, x25, x20\n"
- "add x24, x24, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "cbnz x27, 27f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
+ "add x24, x24, x19\n"
+ "add x23, x23, x19\n"
"b 27f\n"
"26:" // Height 3: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20\n"
- "add x24, x25, x20\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19\n"
+ "add x23, x24, x19\n"
"27:" // Height 3: input setup done
- "subs x27, x27, #0x4\n"
- "ld1rw { z0.s }, p4/Z, [x26]\n"
- "ld1rw { z1.s }, p4/Z, [x25]\n"
- "ld1rw { z2.s }, p4/Z, [x24]\n"
- "ld1b { z6.b }, p4/Z, [x10]\n"
- "ld1b { z7.b }, p4/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x4\n"
+ "ld1rw { z0.s }, p4/Z, [x25]\n"
+ "ld1rw { z1.s }, p4/Z, [x24]\n"
+ "ld1rw { z2.s }, p4/Z, [x23]\n"
+ "ld1b { z6.b }, p4/Z, [x9]\n"
+ "ld1b { z7.b }, p4/Z, [x9, #1, MUL VL]\n"
"ble 29f\n"
"28:" // Height 3: Multiply loop: Main loop
"sdot z8.s, z6.b, z0.b\n"
"sdot z12.s, z6.b, z1.b\n"
- "add x26, x26, #0x4\n"
- "subs x27, x27, #0x4\n"
+ "add x25, x25, #0x4\n"
+ "subs x26, x26, #0x4\n"
"sdot z16.s, z6.b, z2.b\n"
"sdot z9.s, z7.b, z0.b\n"
- "ld1b { z6.b }, p4/Z, [x10, #2, MUL VL]\n"
- "add x25, x25, #0x4\n"
+ "ld1b { z6.b }, p4/Z, [x9, #2, MUL VL]\n"
+ "add x24, x24, #0x4\n"
"sdot z13.s, z7.b, z1.b\n"
"sdot z17.s, z7.b, z2.b\n"
- "ld1b { z7.b }, p4/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
- "add x24, x24, #0x4\n"
+ "ld1b { z7.b }, p4/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
+ "add x23, x23, #0x4\n"
"sdot z10.s, z6.b, z0.b\n"
"sdot z14.s, z6.b, z1.b\n"
"sdot z18.s, z6.b, z2.b\n"
"sdot z11.s, z7.b, z0.b\n"
- "ld1rw { z0.s }, p4/Z, [x26]\n"
- "ld1b { z6.b }, p4/Z, [x10]\n"
+ "ld1rw { z0.s }, p4/Z, [x25]\n"
+ "ld1b { z6.b }, p4/Z, [x9]\n"
"sdot z15.s, z7.b, z1.b\n"
"sdot z19.s, z7.b, z2.b\n"
- "ld1rw { z1.s }, p4/Z, [x25]\n"
- "ld1rw { z2.s }, p4/Z, [x24]\n"
- "ld1b { z7.b }, p4/Z, [x10, #1, MUL VL]\n"
+ "ld1rw { z1.s }, p4/Z, [x24]\n"
+ "ld1rw { z2.s }, p4/Z, [x23]\n"
+ "ld1b { z7.b }, p4/Z, [x9, #1, MUL VL]\n"
"bgt 28b\n"
"29:" // Height 3: Multiply loop: Main loop skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
"sdot z8.s, z6.b, z0.b\n"
"sdot z12.s, z6.b, z1.b\n"
- "add x28, x28, #0x1\n"
+ "add x27, x27, #0x1\n"
"sdot z16.s, z6.b, z2.b\n"
"sdot z9.s, z7.b, z0.b\n"
- "ld1b { z6.b }, p4/Z, [x10, #2, MUL VL]\n"
- "cmp x28, x20\n"
+ "ld1b { z6.b }, p4/Z, [x9, #2, MUL VL]\n"
+ "cmp x27, x19\n"
"sdot z13.s, z7.b, z1.b\n"
"sdot z17.s, z7.b, z2.b\n"
- "ld1b { z7.b }, p4/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
+ "ld1b { z7.b }, p4/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"sdot z10.s, z6.b, z0.b\n"
"sdot z14.s, z6.b, z1.b\n"
"sdot z18.s, z6.b, z2.b\n"
@@ -399,61 +399,61 @@ void sve_hybrid_s8s32_dot_6x4VL_a64fx (
"sdot z15.s, z7.b, z1.b\n"
"sdot z19.s, z7.b, z2.b\n"
"bne 25b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "st1w { z8.s }, p3, [x9]\n"
- "st1w { z9.s }, p2, [x9, #1, MUL VL]\n"
- "st1w { z10.s }, p1, [x9, #2, MUL VL]\n"
- "st1w { z11.s }, p0, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
- "st1w { z12.s }, p3, [x24]\n"
- "st1w { z13.s }, p2, [x24, #1, MUL VL]\n"
- "st1w { z14.s }, p1, [x24, #2, MUL VL]\n"
- "st1w { z15.s }, p0, [x24, #3, MUL VL]\n"
- "st1w { z16.s }, p3, [x23]\n"
- "st1w { z17.s }, p2, [x23, #1, MUL VL]\n"
- "st1w { z18.s }, p1, [x23, #2, MUL VL]\n"
- "st1w { z19.s }, p0, [x23, #3, MUL VL]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x23, x28, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "st1w { z8.s }, p3, [x28]\n"
+ "st1w { z9.s }, p2, [x28, #1, MUL VL]\n"
+ "st1w { z10.s }, p1, [x28, #2, MUL VL]\n"
+ "st1w { z11.s }, p0, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "st1w { z12.s }, p3, [x23]\n"
+ "st1w { z13.s }, p2, [x23, #1, MUL VL]\n"
+ "st1w { z14.s }, p1, [x23, #2, MUL VL]\n"
+ "st1w { z15.s }, p0, [x23, #3, MUL VL]\n"
+ "st1w { z16.s }, p3, [x22]\n"
+ "st1w { z17.s }, p2, [x22, #1, MUL VL]\n"
+ "st1w { z18.s }, p1, [x22, #2, MUL VL]\n"
+ "st1w { z19.s }, p0, [x22, #3, MUL VL]\n"
"30:" // Height 3: Writeback done
- "decw x11, ALL, MUL #4\n"
- "cmp x11, XZR\n"
+ "decw x10, ALL, MUL #4\n"
+ "cmp x10, XZR\n"
"bgt 22b\n"
"b 62f\n"
"31:" // Height 4
- "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x28, %x[output_ptr]\n"
"32:" // Height 4: Column loop
- "mov x20, #0x0\n"
- "whilelt p3.s, x20, x11\n"
- "incw x20\n"
- "whilelt p2.s, x20, x11\n"
- "incw x20\n"
- "whilelt p1.s, x20, x11\n"
- "incw x20\n"
- "whilelt p0.s, x20, x11\n"
+ "mov x19, #0x0\n"
+ "whilelt p3.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p0.s, x19, x10\n"
"tbz %x[flags], #0, 33f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "ld1w { z8.s }, p3/Z, [x9]\n"
- "add x22, x23, x20, LSL #2\n"
- "ld1w { z9.s }, p2/Z, [x9, #1, MUL VL]\n"
- "ld1w { z10.s }, p1/Z, [x9, #2, MUL VL]\n"
- "ld1w { z11.s }, p0/Z, [x9, #3, MUL VL]\n"
- "ld1w { z12.s }, p3/Z, [x24]\n"
- "ld1w { z13.s }, p2/Z, [x24, #1, MUL VL]\n"
- "ld1w { z14.s }, p1/Z, [x24, #2, MUL VL]\n"
- "ld1w { z15.s }, p0/Z, [x24, #3, MUL VL]\n"
- "ld1w { z16.s }, p3/Z, [x23]\n"
- "ld1w { z17.s }, p2/Z, [x23, #1, MUL VL]\n"
- "ld1w { z18.s }, p1/Z, [x23, #2, MUL VL]\n"
- "ld1w { z19.s }, p0/Z, [x23, #3, MUL VL]\n"
- "ld1w { z20.s }, p3/Z, [x22]\n"
- "ld1w { z21.s }, p2/Z, [x22, #1, MUL VL]\n"
- "ld1w { z22.s }, p1/Z, [x22, #2, MUL VL]\n"
- "ld1w { z23.s }, p0/Z, [x22, #3, MUL VL]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x23, x28, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "ld1w { z8.s }, p3/Z, [x28]\n"
+ "ld1w { z9.s }, p2/Z, [x28, #1, MUL VL]\n"
+ "add x21, x22, x19, LSL #2\n"
+ "ld1w { z10.s }, p1/Z, [x28, #2, MUL VL]\n"
+ "ld1w { z11.s }, p0/Z, [x28, #3, MUL VL]\n"
+ "ld1w { z12.s }, p3/Z, [x23]\n"
+ "ld1w { z13.s }, p2/Z, [x23, #1, MUL VL]\n"
+ "ld1w { z14.s }, p1/Z, [x23, #2, MUL VL]\n"
+ "ld1w { z15.s }, p0/Z, [x23, #3, MUL VL]\n"
+ "ld1w { z16.s }, p3/Z, [x22]\n"
+ "ld1w { z17.s }, p2/Z, [x22, #1, MUL VL]\n"
+ "ld1w { z18.s }, p1/Z, [x22, #2, MUL VL]\n"
+ "ld1w { z19.s }, p0/Z, [x22, #3, MUL VL]\n"
+ "ld1w { z20.s }, p3/Z, [x21]\n"
+ "ld1w { z21.s }, p2/Z, [x21, #1, MUL VL]\n"
+ "ld1w { z22.s }, p1/Z, [x21, #2, MUL VL]\n"
+ "ld1w { z23.s }, p0/Z, [x21, #3, MUL VL]\n"
"b 34f\n"
"33:" // Height 4: no accumulate
"mov z8.s, #0x0\n"
@@ -473,86 +473,86 @@ void sve_hybrid_s8s32_dot_6x4VL_a64fx (
"mov z22.s, #0x0\n"
"mov z23.s, #0x0\n"
"34:" // Height 4: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"35:" // Height 4: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 36f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "cbnz x28, 37f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20\n"
- "add x25, x25, x20\n"
- "add x24, x24, x20\n"
- "add x23, x23, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "cbnz x27, 37f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
+ "add x24, x24, x19\n"
+ "add x23, x23, x19\n"
+ "add x22, x22, x19\n"
"b 37f\n"
"36:" // Height 4: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20\n"
- "add x24, x25, x20\n"
- "add x23, x24, x20\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19\n"
+ "add x23, x24, x19\n"
+ "add x22, x23, x19\n"
"37:" // Height 4: input setup done
- "subs x27, x27, #0x4\n"
- "ld1rw { z0.s }, p4/Z, [x26]\n"
- "ld1rw { z1.s }, p4/Z, [x25]\n"
- "ld1rw { z2.s }, p4/Z, [x24]\n"
- "ld1rw { z3.s }, p4/Z, [x23]\n"
- "ld1b { z6.b }, p4/Z, [x10]\n"
- "ld1b { z7.b }, p4/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x4\n"
+ "ld1rw { z0.s }, p4/Z, [x25]\n"
+ "ld1rw { z1.s }, p4/Z, [x24]\n"
+ "ld1rw { z2.s }, p4/Z, [x23]\n"
+ "ld1rw { z3.s }, p4/Z, [x22]\n"
+ "ld1b { z6.b }, p4/Z, [x9]\n"
+ "ld1b { z7.b }, p4/Z, [x9, #1, MUL VL]\n"
"ble 39f\n"
"38:" // Height 4: Multiply loop: Main loop
"sdot z8.s, z6.b, z0.b\n"
"sdot z12.s, z6.b, z1.b\n"
- "add x26, x26, #0x4\n"
- "subs x27, x27, #0x4\n"
+ "add x25, x25, #0x4\n"
+ "subs x26, x26, #0x4\n"
"sdot z16.s, z6.b, z2.b\n"
"sdot z20.s, z6.b, z3.b\n"
- "ld1b { z6.b }, p4/Z, [x10, #2, MUL VL]\n"
- "add x25, x25, #0x4\n"
+ "ld1b { z6.b }, p4/Z, [x9, #2, MUL VL]\n"
+ "add x24, x24, #0x4\n"
"sdot z9.s, z7.b, z0.b\n"
"sdot z13.s, z7.b, z1.b\n"
- "add x24, x24, #0x4\n"
"add x23, x23, #0x4\n"
+ "add x22, x22, #0x4\n"
"sdot z17.s, z7.b, z2.b\n"
"sdot z21.s, z7.b, z3.b\n"
- "ld1b { z7.b }, p4/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
+ "ld1b { z7.b }, p4/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"sdot z10.s, z6.b, z0.b\n"
"sdot z14.s, z6.b, z1.b\n"
"sdot z18.s, z6.b, z2.b\n"
"sdot z22.s, z6.b, z3.b\n"
- "ld1b { z6.b }, p4/Z, [x10]\n"
+ "ld1b { z6.b }, p4/Z, [x9]\n"
"sdot z11.s, z7.b, z0.b\n"
"sdot z15.s, z7.b, z1.b\n"
- "ld1rw { z0.s }, p4/Z, [x26]\n"
- "ld1rw { z1.s }, p4/Z, [x25]\n"
+ "ld1rw { z0.s }, p4/Z, [x25]\n"
+ "ld1rw { z1.s }, p4/Z, [x24]\n"
"sdot z19.s, z7.b, z2.b\n"
"sdot z23.s, z7.b, z3.b\n"
- "ld1rw { z2.s }, p4/Z, [x24]\n"
- "ld1rw { z3.s }, p4/Z, [x23]\n"
- "ld1b { z7.b }, p4/Z, [x10, #1, MUL VL]\n"
+ "ld1rw { z2.s }, p4/Z, [x23]\n"
+ "ld1rw { z3.s }, p4/Z, [x22]\n"
+ "ld1b { z7.b }, p4/Z, [x9, #1, MUL VL]\n"
"bgt 38b\n"
"39:" // Height 4: Multiply loop: Main loop skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
"sdot z8.s, z6.b, z0.b\n"
"sdot z12.s, z6.b, z1.b\n"
- "add x28, x28, #0x1\n"
+ "add x27, x27, #0x1\n"
"sdot z16.s, z6.b, z2.b\n"
"sdot z20.s, z6.b, z3.b\n"
- "ld1b { z6.b }, p4/Z, [x10, #2, MUL VL]\n"
- "cmp x28, x20\n"
+ "ld1b { z6.b }, p4/Z, [x9, #2, MUL VL]\n"
+ "cmp x27, x19\n"
"sdot z9.s, z7.b, z0.b\n"
"sdot z13.s, z7.b, z1.b\n"
"sdot z17.s, z7.b, z2.b\n"
"sdot z21.s, z7.b, z3.b\n"
- "ld1b { z7.b }, p4/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
+ "ld1b { z7.b }, p4/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"sdot z10.s, z6.b, z0.b\n"
"sdot z14.s, z6.b, z1.b\n"
"sdot z18.s, z6.b, z2.b\n"
@@ -562,71 +562,71 @@ void sve_hybrid_s8s32_dot_6x4VL_a64fx (
"sdot z19.s, z7.b, z2.b\n"
"sdot z23.s, z7.b, z3.b\n"
"bne 35b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "st1w { z8.s }, p3, [x9]\n"
- "add x22, x23, x20, LSL #2\n"
- "st1w { z9.s }, p2, [x9, #1, MUL VL]\n"
- "st1w { z10.s }, p1, [x9, #2, MUL VL]\n"
- "st1w { z11.s }, p0, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
- "st1w { z12.s }, p3, [x24]\n"
- "st1w { z13.s }, p2, [x24, #1, MUL VL]\n"
- "st1w { z14.s }, p1, [x24, #2, MUL VL]\n"
- "st1w { z15.s }, p0, [x24, #3, MUL VL]\n"
- "st1w { z16.s }, p3, [x23]\n"
- "st1w { z17.s }, p2, [x23, #1, MUL VL]\n"
- "st1w { z18.s }, p1, [x23, #2, MUL VL]\n"
- "st1w { z19.s }, p0, [x23, #3, MUL VL]\n"
- "st1w { z20.s }, p3, [x22]\n"
- "st1w { z21.s }, p2, [x22, #1, MUL VL]\n"
- "st1w { z22.s }, p1, [x22, #2, MUL VL]\n"
- "st1w { z23.s }, p0, [x22, #3, MUL VL]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x23, x28, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "st1w { z8.s }, p3, [x28]\n"
+ "add x21, x22, x19, LSL #2\n"
+ "st1w { z9.s }, p2, [x28, #1, MUL VL]\n"
+ "st1w { z10.s }, p1, [x28, #2, MUL VL]\n"
+ "st1w { z11.s }, p0, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "st1w { z12.s }, p3, [x23]\n"
+ "st1w { z13.s }, p2, [x23, #1, MUL VL]\n"
+ "st1w { z14.s }, p1, [x23, #2, MUL VL]\n"
+ "st1w { z15.s }, p0, [x23, #3, MUL VL]\n"
+ "st1w { z16.s }, p3, [x22]\n"
+ "st1w { z17.s }, p2, [x22, #1, MUL VL]\n"
+ "st1w { z18.s }, p1, [x22, #2, MUL VL]\n"
+ "st1w { z19.s }, p0, [x22, #3, MUL VL]\n"
+ "st1w { z20.s }, p3, [x21]\n"
+ "st1w { z21.s }, p2, [x21, #1, MUL VL]\n"
+ "st1w { z22.s }, p1, [x21, #2, MUL VL]\n"
+ "st1w { z23.s }, p0, [x21, #3, MUL VL]\n"
"40:" // Height 4: Writeback done
- "decw x11, ALL, MUL #4\n"
- "cmp x11, XZR\n"
+ "decw x10, ALL, MUL #4\n"
+ "cmp x10, XZR\n"
"bgt 32b\n"
"b 62f\n"
"41:" // Height 5
- "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x28, %x[output_ptr]\n"
"42:" // Height 5: Column loop
- "mov x20, #0x0\n"
- "whilelt p3.s, x20, x11\n"
- "incw x20\n"
- "whilelt p2.s, x20, x11\n"
- "incw x20\n"
- "whilelt p1.s, x20, x11\n"
- "incw x20\n"
- "whilelt p0.s, x20, x11\n"
+ "mov x19, #0x0\n"
+ "whilelt p3.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p0.s, x19, x10\n"
"tbz %x[flags], #0, 43f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "ld1w { z8.s }, p3/Z, [x9]\n"
- "add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
- "ld1w { z9.s }, p2/Z, [x9, #1, MUL VL]\n"
- "ld1w { z10.s }, p1/Z, [x9, #2, MUL VL]\n"
- "ld1w { z11.s }, p0/Z, [x9, #3, MUL VL]\n"
- "ld1w { z12.s }, p3/Z, [x24]\n"
- "ld1w { z13.s }, p2/Z, [x24, #1, MUL VL]\n"
- "ld1w { z14.s }, p1/Z, [x24, #2, MUL VL]\n"
- "ld1w { z15.s }, p0/Z, [x24, #3, MUL VL]\n"
- "ld1w { z16.s }, p3/Z, [x23]\n"
- "ld1w { z17.s }, p2/Z, [x23, #1, MUL VL]\n"
- "ld1w { z18.s }, p1/Z, [x23, #2, MUL VL]\n"
- "ld1w { z19.s }, p0/Z, [x23, #3, MUL VL]\n"
- "ld1w { z20.s }, p3/Z, [x22]\n"
- "ld1w { z21.s }, p2/Z, [x22, #1, MUL VL]\n"
- "ld1w { z22.s }, p1/Z, [x22, #2, MUL VL]\n"
- "ld1w { z23.s }, p0/Z, [x22, #3, MUL VL]\n"
- "ld1w { z24.s }, p3/Z, [x21]\n"
- "ld1w { z25.s }, p2/Z, [x21, #1, MUL VL]\n"
- "ld1w { z26.s }, p1/Z, [x21, #2, MUL VL]\n"
- "ld1w { z27.s }, p0/Z, [x21, #3, MUL VL]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x23, x28, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "ld1w { z8.s }, p3/Z, [x28]\n"
+ "ld1w { z9.s }, p2/Z, [x28, #1, MUL VL]\n"
+ "add x21, x22, x19, LSL #2\n"
+ "add x20, x21, x19, LSL #2\n"
+ "ld1w { z10.s }, p1/Z, [x28, #2, MUL VL]\n"
+ "ld1w { z11.s }, p0/Z, [x28, #3, MUL VL]\n"
+ "ld1w { z12.s }, p3/Z, [x23]\n"
+ "ld1w { z13.s }, p2/Z, [x23, #1, MUL VL]\n"
+ "ld1w { z14.s }, p1/Z, [x23, #2, MUL VL]\n"
+ "ld1w { z15.s }, p0/Z, [x23, #3, MUL VL]\n"
+ "ld1w { z16.s }, p3/Z, [x22]\n"
+ "ld1w { z17.s }, p2/Z, [x22, #1, MUL VL]\n"
+ "ld1w { z18.s }, p1/Z, [x22, #2, MUL VL]\n"
+ "ld1w { z19.s }, p0/Z, [x22, #3, MUL VL]\n"
+ "ld1w { z20.s }, p3/Z, [x21]\n"
+ "ld1w { z21.s }, p2/Z, [x21, #1, MUL VL]\n"
+ "ld1w { z22.s }, p1/Z, [x21, #2, MUL VL]\n"
+ "ld1w { z23.s }, p0/Z, [x21, #3, MUL VL]\n"
+ "ld1w { z24.s }, p3/Z, [x20]\n"
+ "ld1w { z25.s }, p2/Z, [x20, #1, MUL VL]\n"
+ "ld1w { z26.s }, p1/Z, [x20, #2, MUL VL]\n"
+ "ld1w { z27.s }, p0/Z, [x20, #3, MUL VL]\n"
"b 44f\n"
"43:" // Height 5: no accumulate
"mov z8.s, #0x0\n"
@@ -650,98 +650,98 @@ void sve_hybrid_s8s32_dot_6x4VL_a64fx (
"mov z26.s, #0x0\n"
"mov z27.s, #0x0\n"
"44:" // Height 5: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"45:" // Height 5: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 46f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "ldr x22, [x21, #0x20]\n"
- "cbnz x28, 47f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20\n"
- "add x25, x25, x20\n"
- "add x24, x24, x20\n"
- "add x23, x23, x20\n"
- "add x22, x22, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "ldr x21, [x20, #0x20]\n"
+ "cbnz x27, 47f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
+ "add x24, x24, x19\n"
+ "add x23, x23, x19\n"
+ "add x22, x22, x19\n"
+ "add x21, x21, x19\n"
"b 47f\n"
"46:" // Height 5: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20\n"
- "add x24, x25, x20\n"
- "add x23, x24, x20\n"
- "add x22, x23, x20\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19\n"
+ "add x23, x24, x19\n"
+ "add x22, x23, x19\n"
+ "add x21, x22, x19\n"
"47:" // Height 5: input setup done
- "subs x27, x27, #0x4\n"
- "ld1rw { z0.s }, p4/Z, [x26]\n"
- "ld1rw { z1.s }, p4/Z, [x25]\n"
- "ld1rw { z2.s }, p4/Z, [x24]\n"
- "ld1rw { z3.s }, p4/Z, [x23]\n"
- "ld1rw { z4.s }, p4/Z, [x22]\n"
- "ld1b { z6.b }, p4/Z, [x10]\n"
- "ld1b { z7.b }, p4/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x4\n"
+ "ld1rw { z0.s }, p4/Z, [x25]\n"
+ "ld1rw { z1.s }, p4/Z, [x24]\n"
+ "ld1rw { z2.s }, p4/Z, [x23]\n"
+ "ld1rw { z3.s }, p4/Z, [x22]\n"
+ "ld1rw { z4.s }, p4/Z, [x21]\n"
+ "ld1b { z6.b }, p4/Z, [x9]\n"
+ "ld1b { z7.b }, p4/Z, [x9, #1, MUL VL]\n"
"ble 49f\n"
"48:" // Height 5: Multiply loop: Main loop
"sdot z8.s, z6.b, z0.b\n"
"sdot z12.s, z6.b, z1.b\n"
- "add x26, x26, #0x4\n"
- "subs x27, x27, #0x4\n"
+ "add x25, x25, #0x4\n"
+ "subs x26, x26, #0x4\n"
"sdot z16.s, z6.b, z2.b\n"
"sdot z20.s, z6.b, z3.b\n"
- "add x25, x25, #0x4\n"
"add x24, x24, #0x4\n"
+ "add x23, x23, #0x4\n"
"sdot z24.s, z6.b, z4.b\n"
"sdot z9.s, z7.b, z0.b\n"
- "ld1b { z6.b }, p4/Z, [x10, #2, MUL VL]\n"
- "add x23, x23, #0x4\n"
+ "ld1b { z6.b }, p4/Z, [x9, #2, MUL VL]\n"
+ "add x22, x22, #0x4\n"
"sdot z13.s, z7.b, z1.b\n"
"sdot z17.s, z7.b, z2.b\n"
- "add x22, x22, #0x4\n"
+ "add x21, x21, #0x4\n"
"sdot z21.s, z7.b, z3.b\n"
"sdot z25.s, z7.b, z4.b\n"
- "ld1b { z7.b }, p4/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
+ "ld1b { z7.b }, p4/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"sdot z10.s, z6.b, z0.b\n"
"sdot z14.s, z6.b, z1.b\n"
"sdot z18.s, z6.b, z2.b\n"
"sdot z22.s, z6.b, z3.b\n"
"sdot z26.s, z6.b, z4.b\n"
"sdot z11.s, z7.b, z0.b\n"
- "ld1rw { z0.s }, p4/Z, [x26]\n"
- "ld1b { z6.b }, p4/Z, [x10]\n"
+ "ld1rw { z0.s }, p4/Z, [x25]\n"
+ "ld1b { z6.b }, p4/Z, [x9]\n"
"sdot z15.s, z7.b, z1.b\n"
"sdot z19.s, z7.b, z2.b\n"
- "ld1rw { z1.s }, p4/Z, [x25]\n"
- "ld1rw { z2.s }, p4/Z, [x24]\n"
+ "ld1rw { z1.s }, p4/Z, [x24]\n"
+ "ld1rw { z2.s }, p4/Z, [x23]\n"
"sdot z23.s, z7.b, z3.b\n"
"sdot z27.s, z7.b, z4.b\n"
- "ld1rw { z3.s }, p4/Z, [x23]\n"
- "ld1rw { z4.s }, p4/Z, [x22]\n"
- "ld1b { z7.b }, p4/Z, [x10, #1, MUL VL]\n"
+ "ld1rw { z3.s }, p4/Z, [x22]\n"
+ "ld1rw { z4.s }, p4/Z, [x21]\n"
+ "ld1b { z7.b }, p4/Z, [x9, #1, MUL VL]\n"
"bgt 48b\n"
"49:" // Height 5: Multiply loop: Main loop skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
"sdot z8.s, z6.b, z0.b\n"
"sdot z12.s, z6.b, z1.b\n"
- "add x28, x28, #0x1\n"
+ "add x27, x27, #0x1\n"
"sdot z16.s, z6.b, z2.b\n"
"sdot z20.s, z6.b, z3.b\n"
- "cmp x28, x20\n"
+ "cmp x27, x19\n"
"sdot z24.s, z6.b, z4.b\n"
"sdot z9.s, z7.b, z0.b\n"
- "ld1b { z6.b }, p4/Z, [x10, #2, MUL VL]\n"
+ "ld1b { z6.b }, p4/Z, [x9, #2, MUL VL]\n"
"sdot z13.s, z7.b, z1.b\n"
"sdot z17.s, z7.b, z2.b\n"
"sdot z21.s, z7.b, z3.b\n"
"sdot z25.s, z7.b, z4.b\n"
- "ld1b { z7.b }, p4/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
+ "ld1b { z7.b }, p4/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"sdot z10.s, z6.b, z0.b\n"
"sdot z14.s, z6.b, z1.b\n"
"sdot z18.s, z6.b, z2.b\n"
@@ -753,84 +753,84 @@ void sve_hybrid_s8s32_dot_6x4VL_a64fx (
"sdot z23.s, z7.b, z3.b\n"
"sdot z27.s, z7.b, z4.b\n"
"bne 45b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "st1w { z8.s }, p3, [x9]\n"
- "add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
- "st1w { z9.s }, p2, [x9, #1, MUL VL]\n"
- "st1w { z10.s }, p1, [x9, #2, MUL VL]\n"
- "st1w { z11.s }, p0, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
- "st1w { z12.s }, p3, [x24]\n"
- "st1w { z13.s }, p2, [x24, #1, MUL VL]\n"
- "st1w { z14.s }, p1, [x24, #2, MUL VL]\n"
- "st1w { z15.s }, p0, [x24, #3, MUL VL]\n"
- "st1w { z16.s }, p3, [x23]\n"
- "st1w { z17.s }, p2, [x23, #1, MUL VL]\n"
- "st1w { z18.s }, p1, [x23, #2, MUL VL]\n"
- "st1w { z19.s }, p0, [x23, #3, MUL VL]\n"
- "st1w { z20.s }, p3, [x22]\n"
- "st1w { z21.s }, p2, [x22, #1, MUL VL]\n"
- "st1w { z22.s }, p1, [x22, #2, MUL VL]\n"
- "st1w { z23.s }, p0, [x22, #3, MUL VL]\n"
- "st1w { z24.s }, p3, [x21]\n"
- "st1w { z25.s }, p2, [x21, #1, MUL VL]\n"
- "st1w { z26.s }, p1, [x21, #2, MUL VL]\n"
- "st1w { z27.s }, p0, [x21, #3, MUL VL]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x23, x28, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "st1w { z8.s }, p3, [x28]\n"
+ "add x21, x22, x19, LSL #2\n"
+ "add x20, x21, x19, LSL #2\n"
+ "st1w { z9.s }, p2, [x28, #1, MUL VL]\n"
+ "st1w { z10.s }, p1, [x28, #2, MUL VL]\n"
+ "st1w { z11.s }, p0, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "st1w { z12.s }, p3, [x23]\n"
+ "st1w { z13.s }, p2, [x23, #1, MUL VL]\n"
+ "st1w { z14.s }, p1, [x23, #2, MUL VL]\n"
+ "st1w { z15.s }, p0, [x23, #3, MUL VL]\n"
+ "st1w { z16.s }, p3, [x22]\n"
+ "st1w { z17.s }, p2, [x22, #1, MUL VL]\n"
+ "st1w { z18.s }, p1, [x22, #2, MUL VL]\n"
+ "st1w { z19.s }, p0, [x22, #3, MUL VL]\n"
+ "st1w { z20.s }, p3, [x21]\n"
+ "st1w { z21.s }, p2, [x21, #1, MUL VL]\n"
+ "st1w { z22.s }, p1, [x21, #2, MUL VL]\n"
+ "st1w { z23.s }, p0, [x21, #3, MUL VL]\n"
+ "st1w { z24.s }, p3, [x20]\n"
+ "st1w { z25.s }, p2, [x20, #1, MUL VL]\n"
+ "st1w { z26.s }, p1, [x20, #2, MUL VL]\n"
+ "st1w { z27.s }, p0, [x20, #3, MUL VL]\n"
"50:" // Height 5: Writeback done
- "decw x11, ALL, MUL #4\n"
- "cmp x11, XZR\n"
+ "decw x10, ALL, MUL #4\n"
+ "cmp x10, XZR\n"
"bgt 42b\n"
"b 62f\n"
"51:" // Height 6
- "ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "mov x20, #0x18\n"
- "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
- "mov x9, %x[output_ptr]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "mov x19, #0x18\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x28, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "madd %x[output_ptr], x20, x19, %x[output_ptr]\n"
"52:" // Height 6: Column loop
- "mov x20, #0x0\n"
- "whilelt p3.s, x20, x11\n"
- "incw x20\n"
- "whilelt p2.s, x20, x11\n"
- "incw x20\n"
- "whilelt p1.s, x20, x11\n"
- "incw x20\n"
- "whilelt p0.s, x20, x11\n"
+ "mov x19, #0x0\n"
+ "whilelt p3.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p0.s, x19, x10\n"
"tbz %x[flags], #0, 53f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "ld1w { z8.s }, p3/Z, [x9]\n"
- "add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
- "ld1w { z9.s }, p2/Z, [x9, #1, MUL VL]\n"
- "ld1w { z10.s }, p1/Z, [x9, #2, MUL VL]\n"
- "add x20, x21, x20, LSL #2\n"
- "ld1w { z11.s }, p0/Z, [x9, #3, MUL VL]\n"
- "ld1w { z12.s }, p3/Z, [x24]\n"
- "ld1w { z13.s }, p2/Z, [x24, #1, MUL VL]\n"
- "ld1w { z14.s }, p1/Z, [x24, #2, MUL VL]\n"
- "ld1w { z15.s }, p0/Z, [x24, #3, MUL VL]\n"
- "ld1w { z16.s }, p3/Z, [x23]\n"
- "ld1w { z17.s }, p2/Z, [x23, #1, MUL VL]\n"
- "ld1w { z18.s }, p1/Z, [x23, #2, MUL VL]\n"
- "ld1w { z19.s }, p0/Z, [x23, #3, MUL VL]\n"
- "ld1w { z20.s }, p3/Z, [x22]\n"
- "ld1w { z21.s }, p2/Z, [x22, #1, MUL VL]\n"
- "ld1w { z22.s }, p1/Z, [x22, #2, MUL VL]\n"
- "ld1w { z23.s }, p0/Z, [x22, #3, MUL VL]\n"
- "ld1w { z24.s }, p3/Z, [x21]\n"
- "ld1w { z25.s }, p2/Z, [x21, #1, MUL VL]\n"
- "ld1w { z26.s }, p1/Z, [x21, #2, MUL VL]\n"
- "ld1w { z27.s }, p0/Z, [x21, #3, MUL VL]\n"
- "ld1w { z28.s }, p3/Z, [x20]\n"
- "ld1w { z29.s }, p2/Z, [x20, #1, MUL VL]\n"
- "ld1w { z30.s }, p1/Z, [x20, #2, MUL VL]\n"
- "ld1w { z31.s }, p0/Z, [x20, #3, MUL VL]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x23, x28, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "ld1w { z8.s }, p3/Z, [x28]\n"
+ "ld1w { z9.s }, p2/Z, [x28, #1, MUL VL]\n"
+ "add x21, x22, x19, LSL #2\n"
+ "add x20, x21, x19, LSL #2\n"
+ "ld1w { z10.s }, p1/Z, [x28, #2, MUL VL]\n"
+ "ld1w { z11.s }, p0/Z, [x28, #3, MUL VL]\n"
+ "add x19, x20, x19, LSL #2\n"
+ "ld1w { z12.s }, p3/Z, [x23]\n"
+ "ld1w { z13.s }, p2/Z, [x23, #1, MUL VL]\n"
+ "ld1w { z14.s }, p1/Z, [x23, #2, MUL VL]\n"
+ "ld1w { z15.s }, p0/Z, [x23, #3, MUL VL]\n"
+ "ld1w { z16.s }, p3/Z, [x22]\n"
+ "ld1w { z17.s }, p2/Z, [x22, #1, MUL VL]\n"
+ "ld1w { z18.s }, p1/Z, [x22, #2, MUL VL]\n"
+ "ld1w { z19.s }, p0/Z, [x22, #3, MUL VL]\n"
+ "ld1w { z20.s }, p3/Z, [x21]\n"
+ "ld1w { z21.s }, p2/Z, [x21, #1, MUL VL]\n"
+ "ld1w { z22.s }, p1/Z, [x21, #2, MUL VL]\n"
+ "ld1w { z23.s }, p0/Z, [x21, #3, MUL VL]\n"
+ "ld1w { z24.s }, p3/Z, [x20]\n"
+ "ld1w { z25.s }, p2/Z, [x20, #1, MUL VL]\n"
+ "ld1w { z26.s }, p1/Z, [x20, #2, MUL VL]\n"
+ "ld1w { z27.s }, p0/Z, [x20, #3, MUL VL]\n"
+ "ld1w { z28.s }, p3/Z, [x19]\n"
+ "ld1w { z29.s }, p2/Z, [x19, #1, MUL VL]\n"
+ "ld1w { z30.s }, p1/Z, [x19, #2, MUL VL]\n"
+ "ld1w { z31.s }, p0/Z, [x19, #3, MUL VL]\n"
"b 54f\n"
"53:" // Height 6: no accumulate
"mov z8.s, #0x0\n"
@@ -858,110 +858,110 @@ void sve_hybrid_s8s32_dot_6x4VL_a64fx (
"mov z30.s, #0x0\n"
"mov z31.s, #0x0\n"
"54:" // Height 6: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"55:" // Height 6: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 56f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "ldr x22, [x21, #0x20]\n"
- "ldr x21, [x21, #0x28]\n"
- "cbnz x28, 57f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20\n"
- "add x25, x25, x20\n"
- "add x24, x24, x20\n"
- "add x23, x23, x20\n"
- "add x22, x22, x20\n"
- "add x21, x21, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "ldr x21, [x20, #0x20]\n"
+ "ldr x20, [x20, #0x28]\n"
+ "cbnz x27, 57f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
+ "add x24, x24, x19\n"
+ "add x23, x23, x19\n"
+ "add x22, x22, x19\n"
+ "add x21, x21, x19\n"
+ "add x20, x20, x19\n"
"b 57f\n"
"56:" // Height 6: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20\n"
- "add x24, x25, x20\n"
- "add x23, x24, x20\n"
- "add x22, x23, x20\n"
- "add x21, x22, x20\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19\n"
+ "add x23, x24, x19\n"
+ "add x22, x23, x19\n"
+ "add x21, x22, x19\n"
+ "add x20, x21, x19\n"
"57:" // Height 6: input setup done
- "subs x27, x27, #0x4\n"
- "ld1rw { z0.s }, p4/Z, [x26]\n"
- "ld1rw { z1.s }, p4/Z, [x25]\n"
- "ld1rw { z2.s }, p4/Z, [x24]\n"
- "ld1rw { z3.s }, p4/Z, [x23]\n"
- "ld1rw { z4.s }, p4/Z, [x22]\n"
- "ld1rw { z5.s }, p4/Z, [x21]\n"
- "ld1b { z6.b }, p4/Z, [x10]\n"
- "ld1b { z7.b }, p4/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x4\n"
+ "ld1rw { z0.s }, p4/Z, [x25]\n"
+ "ld1rw { z1.s }, p4/Z, [x24]\n"
+ "ld1rw { z2.s }, p4/Z, [x23]\n"
+ "ld1rw { z3.s }, p4/Z, [x22]\n"
+ "ld1rw { z4.s }, p4/Z, [x21]\n"
+ "ld1rw { z5.s }, p4/Z, [x20]\n"
+ "ld1b { z6.b }, p4/Z, [x9]\n"
+ "ld1b { z7.b }, p4/Z, [x9, #1, MUL VL]\n"
"ble 59f\n"
"58:" // Height 6: Multiply loop: Main loop
"sdot z8.s, z6.b, z0.b\n"
"sdot z12.s, z6.b, z1.b\n"
- "add x26, x26, #0x4\n"
- "subs x27, x27, #0x4\n"
+ "add x25, x25, #0x4\n"
+ "subs x26, x26, #0x4\n"
"sdot z16.s, z6.b, z2.b\n"
"sdot z20.s, z6.b, z3.b\n"
- "add x25, x25, #0x4\n"
"add x24, x24, #0x4\n"
+ "add x23, x23, #0x4\n"
"sdot z24.s, z6.b, z4.b\n"
"sdot z28.s, z6.b, z5.b\n"
- "ld1b { z6.b }, p4/Z, [x10, #2, MUL VL]\n"
- "add x23, x23, #0x4\n"
+ "ld1b { z6.b }, p4/Z, [x9, #2, MUL VL]\n"
+ "add x22, x22, #0x4\n"
"sdot z9.s, z7.b, z0.b\n"
"sdot z13.s, z7.b, z1.b\n"
- "add x22, x22, #0x4\n"
"add x21, x21, #0x4\n"
+ "add x20, x20, #0x4\n"
"sdot z17.s, z7.b, z2.b\n"
"sdot z21.s, z7.b, z3.b\n"
"sdot z25.s, z7.b, z4.b\n"
"sdot z29.s, z7.b, z5.b\n"
- "ld1b { z7.b }, p4/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
+ "ld1b { z7.b }, p4/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"sdot z10.s, z6.b, z0.b\n"
"sdot z14.s, z6.b, z1.b\n"
"sdot z18.s, z6.b, z2.b\n"
"sdot z22.s, z6.b, z3.b\n"
"sdot z26.s, z6.b, z4.b\n"
"sdot z30.s, z6.b, z5.b\n"
- "ld1b { z6.b }, p4/Z, [x10]\n"
+ "ld1b { z6.b }, p4/Z, [x9]\n"
"sdot z11.s, z7.b, z0.b\n"
"sdot z15.s, z7.b, z1.b\n"
- "ld1rw { z0.s }, p4/Z, [x26]\n"
- "ld1rw { z1.s }, p4/Z, [x25]\n"
+ "ld1rw { z0.s }, p4/Z, [x25]\n"
+ "ld1rw { z1.s }, p4/Z, [x24]\n"
"sdot z19.s, z7.b, z2.b\n"
"sdot z23.s, z7.b, z3.b\n"
- "ld1rw { z2.s }, p4/Z, [x24]\n"
- "ld1rw { z3.s }, p4/Z, [x23]\n"
+ "ld1rw { z2.s }, p4/Z, [x23]\n"
+ "ld1rw { z3.s }, p4/Z, [x22]\n"
"sdot z27.s, z7.b, z4.b\n"
"sdot z31.s, z7.b, z5.b\n"
- "ld1rw { z4.s }, p4/Z, [x22]\n"
- "ld1rw { z5.s }, p4/Z, [x21]\n"
- "ld1b { z7.b }, p4/Z, [x10, #1, MUL VL]\n"
+ "ld1rw { z4.s }, p4/Z, [x21]\n"
+ "ld1rw { z5.s }, p4/Z, [x20]\n"
+ "ld1b { z7.b }, p4/Z, [x9, #1, MUL VL]\n"
"bgt 58b\n"
"59:" // Height 6: Multiply loop: Main loop skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
"sdot z8.s, z6.b, z0.b\n"
"sdot z12.s, z6.b, z1.b\n"
- "add x28, x28, #0x1\n"
+ "add x27, x27, #0x1\n"
"sdot z16.s, z6.b, z2.b\n"
"sdot z20.s, z6.b, z3.b\n"
- "cmp x28, x20\n"
+ "cmp x27, x19\n"
"sdot z24.s, z6.b, z4.b\n"
"sdot z28.s, z6.b, z5.b\n"
- "ld1b { z6.b }, p4/Z, [x10, #2, MUL VL]\n"
+ "ld1b { z6.b }, p4/Z, [x9, #2, MUL VL]\n"
"sdot z9.s, z7.b, z0.b\n"
"sdot z13.s, z7.b, z1.b\n"
"sdot z17.s, z7.b, z2.b\n"
"sdot z21.s, z7.b, z3.b\n"
"sdot z25.s, z7.b, z4.b\n"
"sdot z29.s, z7.b, z5.b\n"
- "ld1b { z7.b }, p4/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
+ "ld1b { z7.b }, p4/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"sdot z10.s, z6.b, z0.b\n"
"sdot z14.s, z6.b, z1.b\n"
"sdot z18.s, z6.b, z2.b\n"
@@ -975,57 +975,57 @@ void sve_hybrid_s8s32_dot_6x4VL_a64fx (
"sdot z27.s, z7.b, z4.b\n"
"sdot z31.s, z7.b, z5.b\n"
"bne 55b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "st1w { z8.s }, p3, [x9]\n"
- "add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
- "st1w { z9.s }, p2, [x9, #1, MUL VL]\n"
- "add x20, x21, x20, LSL #2\n"
- "st1w { z10.s }, p1, [x9, #2, MUL VL]\n"
- "st1w { z11.s }, p0, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
- "st1w { z12.s }, p3, [x24]\n"
- "st1w { z13.s }, p2, [x24, #1, MUL VL]\n"
- "st1w { z14.s }, p1, [x24, #2, MUL VL]\n"
- "st1w { z15.s }, p0, [x24, #3, MUL VL]\n"
- "st1w { z16.s }, p3, [x23]\n"
- "st1w { z17.s }, p2, [x23, #1, MUL VL]\n"
- "st1w { z18.s }, p1, [x23, #2, MUL VL]\n"
- "st1w { z19.s }, p0, [x23, #3, MUL VL]\n"
- "st1w { z20.s }, p3, [x22]\n"
- "st1w { z21.s }, p2, [x22, #1, MUL VL]\n"
- "st1w { z22.s }, p1, [x22, #2, MUL VL]\n"
- "st1w { z23.s }, p0, [x22, #3, MUL VL]\n"
- "st1w { z24.s }, p3, [x21]\n"
- "st1w { z25.s }, p2, [x21, #1, MUL VL]\n"
- "st1w { z26.s }, p1, [x21, #2, MUL VL]\n"
- "st1w { z27.s }, p0, [x21, #3, MUL VL]\n"
- "st1w { z28.s }, p3, [x20]\n"
- "st1w { z29.s }, p2, [x20, #1, MUL VL]\n"
- "st1w { z30.s }, p1, [x20, #2, MUL VL]\n"
- "st1w { z31.s }, p0, [x20, #3, MUL VL]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x23, x28, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "st1w { z8.s }, p3, [x28]\n"
+ "add x21, x22, x19, LSL #2\n"
+ "add x20, x21, x19, LSL #2\n"
+ "st1w { z9.s }, p2, [x28, #1, MUL VL]\n"
+ "add x19, x20, x19, LSL #2\n"
+ "st1w { z10.s }, p1, [x28, #2, MUL VL]\n"
+ "st1w { z11.s }, p0, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "st1w { z12.s }, p3, [x23]\n"
+ "st1w { z13.s }, p2, [x23, #1, MUL VL]\n"
+ "st1w { z14.s }, p1, [x23, #2, MUL VL]\n"
+ "st1w { z15.s }, p0, [x23, #3, MUL VL]\n"
+ "st1w { z16.s }, p3, [x22]\n"
+ "st1w { z17.s }, p2, [x22, #1, MUL VL]\n"
+ "st1w { z18.s }, p1, [x22, #2, MUL VL]\n"
+ "st1w { z19.s }, p0, [x22, #3, MUL VL]\n"
+ "st1w { z20.s }, p3, [x21]\n"
+ "st1w { z21.s }, p2, [x21, #1, MUL VL]\n"
+ "st1w { z22.s }, p1, [x21, #2, MUL VL]\n"
+ "st1w { z23.s }, p0, [x21, #3, MUL VL]\n"
+ "st1w { z24.s }, p3, [x20]\n"
+ "st1w { z25.s }, p2, [x20, #1, MUL VL]\n"
+ "st1w { z26.s }, p1, [x20, #2, MUL VL]\n"
+ "st1w { z27.s }, p0, [x20, #3, MUL VL]\n"
+ "st1w { z28.s }, p3, [x19]\n"
+ "st1w { z29.s }, p2, [x19, #1, MUL VL]\n"
+ "st1w { z30.s }, p1, [x19, #2, MUL VL]\n"
+ "st1w { z31.s }, p0, [x19, #3, MUL VL]\n"
"60:" // Height 6: Writeback done
- "decw x11, ALL, MUL #4\n"
- "cmp x11, XZR\n"
+ "decw x10, ALL, MUL #4\n"
+ "cmp x10, XZR\n"
"bgt 52b\n"
"subs %x[M], %x[M], #0x6\n"
"beq 62f\n"
- "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 61f\n"
- "add x21, x21, #0x6\n"
- "str x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "add x20, x20, #0x6\n"
+ "str x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"b 1b\n"
"61:" // Update direct input
- "mov x20, #0x6\n"
- "madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
+ "mov x19, #0x6\n"
+ "madd %x[input_ptr], x19, x20, %x[input_ptr]\n"
"b 1b\n"
"62:" // Exit
: [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
: [args_ptr] "r" (&ka), [flags] "r" (flags), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x9", "x10", "x11", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x9", "x10", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8s32_dot_6x4VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8s32_dot_6x4VL/generic.cpp
index b3d2e6b271..dc5b7a33f4 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8s32_dot_6x4VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8s32_dot_6x4VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef ARM_COMPUTE_ENABLE_SVE
@@ -87,23 +87,23 @@ void sve_hybrid_s8s32_dot_6x4VL (
"cmp %x[M], #0x2\n"
"bgt 23f\n"
"beq 12f\n"
- "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x28, %x[output_ptr]\n"
"2:" // Height 1: Column loop
- "mov x20, #0x0\n"
- "whilelt p4.s, x20, x11\n"
- "incw x20\n"
- "whilelt p3.s, x20, x11\n"
- "incw x20\n"
- "whilelt p2.s, x20, x11\n"
- "incw x20\n"
- "whilelt p1.s, x20, x11\n"
+ "mov x19, #0x0\n"
+ "whilelt p4.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p3.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x10\n"
"tbz %x[flags], #0, 3f\n"
- "ld1w { z8.s }, p4/Z, [x9]\n"
- "ld1w { z9.s }, p3/Z, [x9, #1, MUL VL]\n"
- "ld1w { z10.s }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1w { z11.s }, p1/Z, [x9, #3, MUL VL]\n"
+ "ld1w { z8.s }, p4/Z, [x28]\n"
+ "ld1w { z9.s }, p3/Z, [x28, #1, MUL VL]\n"
+ "ld1w { z10.s }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1w { z11.s }, p1/Z, [x28, #3, MUL VL]\n"
"b 4f\n"
"3:" // Height 1: no accumulate
"mov z8.s, #0x0\n"
@@ -111,148 +111,148 @@ void sve_hybrid_s8s32_dot_6x4VL (
"mov z10.s, #0x0\n"
"mov z11.s, #0x0\n"
"4:" // Height 1: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"5:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 6f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "cbnz x28, 7f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "cbnz x27, 7f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
"b 7f\n"
"6:" // Height 1: setup direct input
- "mov x26, %x[input_ptr]\n"
+ "mov x25, %x[input_ptr]\n"
"7:" // Height 1: input setup done
- "cmp x27, #0x10\n"
+ "cmp x26, #0x10\n"
"ble 9f\n"
"8:" // Height 1: Multiply loop: Main loop head
- "whilelt p0.b, XZR, x27\n"
- "ld1rqb { z0.b }, p0/Z, [x26]\n"
- "ld1b { z6.b }, p5/Z, [x10]\n"
+ "ld1b { z6.b }, p5/Z, [x9]\n"
+ "whilelt p0.b, XZR, x26\n"
+ "ld1b { z7.b }, p5/Z, [x9, #1, MUL VL]\n"
+ "sub x26, x26, #0x10\n"
+ "ld1rqb { z0.b }, p0/Z, [x25]\n"
"sdot z8.s, z6.b, z0.b[0]\n"
- "ld1b { z7.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #2, MUL VL]\n"
+ "cmp x26, #0x10\n"
"sdot z9.s, z7.b, z0.b[0]\n"
- "ld1b { z6.b }, p5/Z, [x10, #2, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #3, MUL VL]\n"
+ "add x25, x25, #0x10\n"
"sdot z10.s, z6.b, z0.b[0]\n"
- "ld1b { z7.b }, p5/Z, [x10, #3, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #4, MUL VL]\n"
"sdot z11.s, z7.b, z0.b[0]\n"
- "ld1b { z6.b }, p5/Z, [x10, #4, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #5, MUL VL]\n"
"sdot z8.s, z6.b, z0.b[1]\n"
- "ld1b { z7.b }, p5/Z, [x10, #5, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #6, MUL VL]\n"
"sdot z9.s, z7.b, z0.b[1]\n"
- "ld1b { z6.b }, p5/Z, [x10, #6, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #7, MUL VL]\n"
+ "addvl x9, x9, #16\n"
"sdot z10.s, z6.b, z0.b[1]\n"
- "ld1b { z7.b }, p5/Z, [x10, #7, MUL VL]\n"
- "addvl x10, x10, #16\n"
+ "ld1b { z6.b }, p5/Z, [x9, #-8, MUL VL]\n"
"sdot z11.s, z7.b, z0.b[1]\n"
- "ld1b { z6.b }, p5/Z, [x10, #-8, MUL VL]\n"
- "ld1b { z7.b }, p5/Z, [x10, #-7, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #-7, MUL VL]\n"
"sdot z8.s, z6.b, z0.b[2]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #-6, MUL VL]\n"
"sdot z9.s, z7.b, z0.b[2]\n"
- "ld1b { z6.b }, p5/Z, [x10, #-6, MUL VL]\n"
- "ld1b { z7.b }, p5/Z, [x10, #-5, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #-5, MUL VL]\n"
"sdot z10.s, z6.b, z0.b[2]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #-4, MUL VL]\n"
"sdot z11.s, z7.b, z0.b[2]\n"
- "ld1b { z6.b }, p5/Z, [x10, #-4, MUL VL]\n"
- "ld1b { z7.b }, p5/Z, [x10, #-3, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #-3, MUL VL]\n"
"sdot z8.s, z6.b, z0.b[3]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #-2, MUL VL]\n"
"sdot z9.s, z7.b, z0.b[3]\n"
- "ld1b { z6.b }, p5/Z, [x10, #-2, MUL VL]\n"
- "ld1b { z7.b }, p5/Z, [x10, #-1, MUL VL]\n"
- "sub x27, x27, #0x10\n"
- "cmp x27, #0x10\n"
+ "ld1b { z7.b }, p5/Z, [x9, #-1, MUL VL]\n"
"sdot z10.s, z6.b, z0.b[3]\n"
"sdot z11.s, z7.b, z0.b[3]\n"
- "add x26, x26, #0x10\n"
"bgt 8b\n"
"9:" // Height 1: Multiply loop: Single iteration only
- "whilelt p0.b, XZR, x27\n"
- "ld1rqb { z0.b }, p0/Z, [x26]\n"
- "ld1b { z6.b }, p5/Z, [x10]\n"
+ "ld1b { z6.b }, p5/Z, [x9]\n"
+ "whilelt p0.b, XZR, x26\n"
+ "ld1b { z7.b }, p5/Z, [x9, #1, MUL VL]\n"
+ "subs x26, x26, #0x4\n"
+ "ld1rqb { z0.b }, p0/Z, [x25]\n"
"sdot z8.s, z6.b, z0.b[0]\n"
- "ld1b { z7.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #2, MUL VL]\n"
"sdot z9.s, z7.b, z0.b[0]\n"
- "ld1b { z6.b }, p5/Z, [x10, #2, MUL VL]\n"
- "subs x27, x27, #0x4\n"
- "ld1b { z7.b }, p5/Z, [x10, #3, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"sdot z10.s, z6.b, z0.b[0]\n"
"sdot z11.s, z7.b, z0.b[0]\n"
- "addvl x10, x10, #4\n"
"ble 10f\n"
- "ld1b { z6.b }, p5/Z, [x10]\n"
- "ld1b { z7.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9]\n"
"sdot z8.s, z6.b, z0.b[1]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #1, MUL VL]\n"
+ "subs x26, x26, #0x4\n"
"sdot z9.s, z7.b, z0.b[1]\n"
- "ld1b { z6.b }, p5/Z, [x10, #2, MUL VL]\n"
- "ld1b { z7.b }, p5/Z, [x10, #3, MUL VL]\n"
- "subs x27, x27, #0x4\n"
+ "ld1b { z6.b }, p5/Z, [x9, #2, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #3, MUL VL]\n"
"sdot z10.s, z6.b, z0.b[1]\n"
+ "addvl x9, x9, #4\n"
"sdot z11.s, z7.b, z0.b[1]\n"
- "addvl x10, x10, #4\n"
"ble 10f\n"
- "ld1b { z6.b }, p5/Z, [x10]\n"
- "ld1b { z7.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9]\n"
"sdot z8.s, z6.b, z0.b[2]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #1, MUL VL]\n"
+ "subs x26, x26, #0x4\n"
"sdot z9.s, z7.b, z0.b[2]\n"
- "ld1b { z6.b }, p5/Z, [x10, #2, MUL VL]\n"
- "ld1b { z7.b }, p5/Z, [x10, #3, MUL VL]\n"
- "subs x27, x27, #0x4\n"
+ "ld1b { z6.b }, p5/Z, [x9, #2, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #3, MUL VL]\n"
"sdot z10.s, z6.b, z0.b[2]\n"
+ "addvl x9, x9, #4\n"
"sdot z11.s, z7.b, z0.b[2]\n"
- "addvl x10, x10, #4\n"
"ble 10f\n"
- "ld1b { z6.b }, p5/Z, [x10]\n"
- "ld1b { z7.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9]\n"
"sdot z8.s, z6.b, z0.b[3]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #1, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #2, MUL VL]\n"
"sdot z9.s, z7.b, z0.b[3]\n"
- "ld1b { z6.b }, p5/Z, [x10, #2, MUL VL]\n"
- "ld1b { z7.b }, p5/Z, [x10, #3, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"sdot z10.s, z6.b, z0.b[3]\n"
"sdot z11.s, z7.b, z0.b[3]\n"
- "addvl x10, x10, #4\n"
"10:" // Height 1: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 5b\n"
- "st1w { z8.s }, p4, [x9]\n"
- "st1w { z9.s }, p3, [x9, #1, MUL VL]\n"
- "st1w { z10.s }, p2, [x9, #2, MUL VL]\n"
- "st1w { z11.s }, p1, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
+ "st1w { z8.s }, p4, [x28]\n"
+ "st1w { z9.s }, p3, [x28, #1, MUL VL]\n"
+ "st1w { z10.s }, p2, [x28, #2, MUL VL]\n"
+ "st1w { z11.s }, p1, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
"11:" // Height 1: Writeback done
- "decw x11, ALL, MUL #4\n"
- "cmp x11, XZR\n"
+ "decw x10, ALL, MUL #4\n"
+ "cmp x10, XZR\n"
"bgt 2b\n"
"b 68f\n"
"12:" // Height 2
- "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x28, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"13:" // Height 2: Column loop
- "mov x20, #0x0\n"
- "whilelt p4.s, x20, x11\n"
- "incw x20\n"
- "whilelt p3.s, x20, x11\n"
- "incw x20\n"
- "whilelt p2.s, x20, x11\n"
- "incw x20\n"
- "whilelt p1.s, x20, x11\n"
+ "mov x19, #0x0\n"
+ "whilelt p4.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p3.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x10\n"
"tbz %x[flags], #0, 14f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "ld1w { z8.s }, p4/Z, [x9]\n"
- "ld1w { z9.s }, p3/Z, [x9, #1, MUL VL]\n"
- "ld1w { z10.s }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1w { z11.s }, p1/Z, [x9, #3, MUL VL]\n"
- "ld1w { z12.s }, p4/Z, [x24]\n"
- "ld1w { z13.s }, p3/Z, [x24, #1, MUL VL]\n"
- "ld1w { z14.s }, p2/Z, [x24, #2, MUL VL]\n"
- "ld1w { z15.s }, p1/Z, [x24, #3, MUL VL]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ld1w { z8.s }, p4/Z, [x28]\n"
+ "add x23, x28, x19, LSL #2\n"
+ "ld1w { z9.s }, p3/Z, [x28, #1, MUL VL]\n"
+ "ld1w { z10.s }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1w { z11.s }, p1/Z, [x28, #3, MUL VL]\n"
+ "ld1w { z12.s }, p4/Z, [x23]\n"
+ "ld1w { z13.s }, p3/Z, [x23, #1, MUL VL]\n"
+ "ld1w { z14.s }, p2/Z, [x23, #2, MUL VL]\n"
+ "ld1w { z15.s }, p1/Z, [x23, #3, MUL VL]\n"
"b 15f\n"
"14:" // Height 2: no accumulate
"mov z8.s, #0x0\n"
@@ -264,197 +264,197 @@ void sve_hybrid_s8s32_dot_6x4VL (
"mov z14.s, #0x0\n"
"mov z15.s, #0x0\n"
"15:" // Height 2: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"16:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 17f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "cbnz x28, 18f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20\n"
- "add x25, x25, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "cbnz x27, 18f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
+ "add x24, x24, x19\n"
"b 18f\n"
"17:" // Height 2: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19\n"
"18:" // Height 2: input setup done
- "cmp x27, #0x10\n"
+ "cmp x26, #0x10\n"
"ble 20f\n"
"19:" // Height 2: Multiply loop: Main loop head
- "whilelt p0.b, XZR, x27\n"
- "ld1rqb { z0.b }, p0/Z, [x26]\n"
- "ld1rqb { z1.b }, p0/Z, [x25]\n"
- "sub x27, x27, #0x10\n"
- "ld1b { z6.b }, p5/Z, [x10]\n"
- "ld1b { z7.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9]\n"
+ "whilelt p0.b, XZR, x26\n"
+ "ld1b { z7.b }, p5/Z, [x9, #1, MUL VL]\n"
+ "sub x26, x26, #0x10\n"
+ "ld1rqb { z0.b }, p0/Z, [x25]\n"
"sdot z8.s, z6.b, z0.b[0]\n"
- "sdot z12.s, z6.b, z1.b[0]\n"
+ "ld1rqb { z1.b }, p0/Z, [x24]\n"
+ "cmp x26, #0x10\n"
"sdot z9.s, z7.b, z0.b[0]\n"
+ "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
+ "sdot z12.s, z6.b, z1.b[0]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #2, MUL VL]\n"
"sdot z13.s, z7.b, z1.b[0]\n"
- "ld1b { z6.b }, p5/Z, [x10, #2, MUL VL]\n"
- "ld1b { z7.b }, p5/Z, [x10, #3, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #3, MUL VL]\n"
"sdot z10.s, z6.b, z0.b[0]\n"
"sdot z14.s, z6.b, z1.b[0]\n"
- "ld1b { z6.b }, p5/Z, [x10, #4, MUL VL]\n"
- "cmp x27, #0x10\n"
+ "ld1b { z6.b }, p5/Z, [x9, #4, MUL VL]\n"
"sdot z11.s, z7.b, z0.b[0]\n"
"sdot z15.s, z7.b, z1.b[0]\n"
- "ld1b { z7.b }, p5/Z, [x10, #5, MUL VL]\n"
- "add x26, x26, #0x10\n"
+ "ld1b { z7.b }, p5/Z, [x9, #5, MUL VL]\n"
"sdot z8.s, z6.b, z0.b[1]\n"
"sdot z12.s, z6.b, z1.b[1]\n"
- "ld1b { z6.b }, p5/Z, [x10, #6, MUL VL]\n"
- "add x25, x25, #0x10\n"
+ "ld1b { z6.b }, p5/Z, [x9, #6, MUL VL]\n"
"sdot z9.s, z7.b, z0.b[1]\n"
"sdot z13.s, z7.b, z1.b[1]\n"
- "ld1b { z7.b }, p5/Z, [x10, #7, MUL VL]\n"
- "addvl x10, x10, #16\n"
+ "ld1b { z7.b }, p5/Z, [x9, #7, MUL VL]\n"
+ "addvl x9, x9, #16\n"
"sdot z10.s, z6.b, z0.b[1]\n"
"sdot z14.s, z6.b, z1.b[1]\n"
- "ld1b { z6.b }, p5/Z, [x10, #-8, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #-8, MUL VL]\n"
"sdot z11.s, z7.b, z0.b[1]\n"
"sdot z15.s, z7.b, z1.b[1]\n"
- "ld1b { z7.b }, p5/Z, [x10, #-7, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #-7, MUL VL]\n"
"sdot z8.s, z6.b, z0.b[2]\n"
"sdot z12.s, z6.b, z1.b[2]\n"
- "ld1b { z6.b }, p5/Z, [x10, #-6, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #-6, MUL VL]\n"
"sdot z9.s, z7.b, z0.b[2]\n"
"sdot z13.s, z7.b, z1.b[2]\n"
- "ld1b { z7.b }, p5/Z, [x10, #-5, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #-5, MUL VL]\n"
"sdot z10.s, z6.b, z0.b[2]\n"
"sdot z14.s, z6.b, z1.b[2]\n"
- "ld1b { z6.b }, p5/Z, [x10, #-4, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #-4, MUL VL]\n"
"sdot z11.s, z7.b, z0.b[2]\n"
"sdot z15.s, z7.b, z1.b[2]\n"
- "ld1b { z7.b }, p5/Z, [x10, #-3, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #-3, MUL VL]\n"
"sdot z8.s, z6.b, z0.b[3]\n"
"sdot z12.s, z6.b, z1.b[3]\n"
- "ld1b { z6.b }, p5/Z, [x10, #-2, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #-2, MUL VL]\n"
"sdot z9.s, z7.b, z0.b[3]\n"
"sdot z13.s, z7.b, z1.b[3]\n"
- "ld1b { z7.b }, p5/Z, [x10, #-1, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #-1, MUL VL]\n"
"sdot z10.s, z6.b, z0.b[3]\n"
"sdot z14.s, z6.b, z1.b[3]\n"
"sdot z11.s, z7.b, z0.b[3]\n"
"sdot z15.s, z7.b, z1.b[3]\n"
"bgt 19b\n"
"20:" // Height 2: Multiply loop: Single iteration only
- "whilelt p0.b, XZR, x27\n"
- "ld1rqb { z0.b }, p0/Z, [x26]\n"
- "ld1rqb { z1.b }, p0/Z, [x25]\n"
- "subs x27, x27, #0x4\n"
- "ld1b { z6.b }, p5/Z, [x10]\n"
- "ld1b { z7.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9]\n"
+ "whilelt p0.b, XZR, x26\n"
+ "ld1b { z7.b }, p5/Z, [x9, #1, MUL VL]\n"
+ "subs x26, x26, #0x4\n"
+ "ld1rqb { z0.b }, p0/Z, [x25]\n"
"sdot z8.s, z6.b, z0.b[0]\n"
- "sdot z12.s, z6.b, z1.b[0]\n"
+ "ld1rqb { z1.b }, p0/Z, [x24]\n"
"sdot z9.s, z7.b, z0.b[0]\n"
+ "sdot z12.s, z6.b, z1.b[0]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #2, MUL VL]\n"
"sdot z13.s, z7.b, z1.b[0]\n"
- "ld1b { z6.b }, p5/Z, [x10, #2, MUL VL]\n"
- "ld1b { z7.b }, p5/Z, [x10, #3, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"sdot z10.s, z6.b, z0.b[0]\n"
"sdot z14.s, z6.b, z1.b[0]\n"
- "addvl x10, x10, #4\n"
"sdot z11.s, z7.b, z0.b[0]\n"
"sdot z15.s, z7.b, z1.b[0]\n"
"ble 21f\n"
- "ld1b { z6.b }, p5/Z, [x10]\n"
- "ld1b { z7.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9]\n"
"sdot z8.s, z6.b, z0.b[1]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #1, MUL VL]\n"
+ "subs x26, x26, #0x4\n"
"sdot z12.s, z6.b, z1.b[1]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #2, MUL VL]\n"
"sdot z9.s, z7.b, z0.b[1]\n"
"sdot z13.s, z7.b, z1.b[1]\n"
- "ld1b { z6.b }, p5/Z, [x10, #2, MUL VL]\n"
- "ld1b { z7.b }, p5/Z, [x10, #3, MUL VL]\n"
- "subs x27, x27, #0x4\n"
+ "ld1b { z7.b }, p5/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"sdot z10.s, z6.b, z0.b[1]\n"
"sdot z14.s, z6.b, z1.b[1]\n"
- "addvl x10, x10, #4\n"
"sdot z11.s, z7.b, z0.b[1]\n"
"sdot z15.s, z7.b, z1.b[1]\n"
"ble 21f\n"
- "ld1b { z6.b }, p5/Z, [x10]\n"
- "ld1b { z7.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9]\n"
"sdot z8.s, z6.b, z0.b[2]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #1, MUL VL]\n"
+ "subs x26, x26, #0x4\n"
"sdot z12.s, z6.b, z1.b[2]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #2, MUL VL]\n"
"sdot z9.s, z7.b, z0.b[2]\n"
"sdot z13.s, z7.b, z1.b[2]\n"
- "ld1b { z6.b }, p5/Z, [x10, #2, MUL VL]\n"
- "ld1b { z7.b }, p5/Z, [x10, #3, MUL VL]\n"
- "subs x27, x27, #0x4\n"
+ "ld1b { z7.b }, p5/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"sdot z10.s, z6.b, z0.b[2]\n"
"sdot z14.s, z6.b, z1.b[2]\n"
- "addvl x10, x10, #4\n"
"sdot z11.s, z7.b, z0.b[2]\n"
"sdot z15.s, z7.b, z1.b[2]\n"
"ble 21f\n"
- "ld1b { z6.b }, p5/Z, [x10]\n"
- "ld1b { z7.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9]\n"
"sdot z8.s, z6.b, z0.b[3]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #1, MUL VL]\n"
"sdot z12.s, z6.b, z1.b[3]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #2, MUL VL]\n"
"sdot z9.s, z7.b, z0.b[3]\n"
"sdot z13.s, z7.b, z1.b[3]\n"
- "ld1b { z6.b }, p5/Z, [x10, #2, MUL VL]\n"
- "ld1b { z7.b }, p5/Z, [x10, #3, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"sdot z10.s, z6.b, z0.b[3]\n"
"sdot z14.s, z6.b, z1.b[3]\n"
- "addvl x10, x10, #4\n"
"sdot z11.s, z7.b, z0.b[3]\n"
"sdot z15.s, z7.b, z1.b[3]\n"
"21:" // Height 2: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 16b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "st1w { z8.s }, p4, [x9]\n"
- "st1w { z9.s }, p3, [x9, #1, MUL VL]\n"
- "st1w { z10.s }, p2, [x9, #2, MUL VL]\n"
- "st1w { z11.s }, p1, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
- "st1w { z12.s }, p4, [x24]\n"
- "st1w { z13.s }, p3, [x24, #1, MUL VL]\n"
- "st1w { z14.s }, p2, [x24, #2, MUL VL]\n"
- "st1w { z15.s }, p1, [x24, #3, MUL VL]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "st1w { z8.s }, p4, [x28]\n"
+ "add x23, x28, x19, LSL #2\n"
+ "st1w { z9.s }, p3, [x28, #1, MUL VL]\n"
+ "st1w { z10.s }, p2, [x28, #2, MUL VL]\n"
+ "st1w { z11.s }, p1, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "st1w { z12.s }, p4, [x23]\n"
+ "st1w { z13.s }, p3, [x23, #1, MUL VL]\n"
+ "st1w { z14.s }, p2, [x23, #2, MUL VL]\n"
+ "st1w { z15.s }, p1, [x23, #3, MUL VL]\n"
"22:" // Height 2: Writeback done
- "decw x11, ALL, MUL #4\n"
- "cmp x11, XZR\n"
+ "decw x10, ALL, MUL #4\n"
+ "cmp x10, XZR\n"
"bgt 13b\n"
"b 68f\n"
"23:" // Height 3
- "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x28, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"24:" // Height 3: Column loop
- "mov x20, #0x0\n"
- "whilelt p4.s, x20, x11\n"
- "incw x20\n"
- "whilelt p3.s, x20, x11\n"
- "incw x20\n"
- "whilelt p2.s, x20, x11\n"
- "incw x20\n"
- "whilelt p1.s, x20, x11\n"
+ "mov x19, #0x0\n"
+ "whilelt p4.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p3.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x10\n"
"tbz %x[flags], #0, 25f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "ld1w { z8.s }, p4/Z, [x9]\n"
- "ld1w { z9.s }, p3/Z, [x9, #1, MUL VL]\n"
- "ld1w { z10.s }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1w { z11.s }, p1/Z, [x9, #3, MUL VL]\n"
- "ld1w { z12.s }, p4/Z, [x24]\n"
- "ld1w { z13.s }, p3/Z, [x24, #1, MUL VL]\n"
- "ld1w { z14.s }, p2/Z, [x24, #2, MUL VL]\n"
- "ld1w { z15.s }, p1/Z, [x24, #3, MUL VL]\n"
- "ld1w { z16.s }, p4/Z, [x23]\n"
- "ld1w { z17.s }, p3/Z, [x23, #1, MUL VL]\n"
- "ld1w { z18.s }, p2/Z, [x23, #2, MUL VL]\n"
- "ld1w { z19.s }, p1/Z, [x23, #3, MUL VL]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ld1w { z8.s }, p4/Z, [x28]\n"
+ "add x23, x28, x19, LSL #2\n"
+ "ld1w { z9.s }, p3/Z, [x28, #1, MUL VL]\n"
+ "ld1w { z10.s }, p2/Z, [x28, #2, MUL VL]\n"
+ "add x22, x23, x19, LSL #2\n"
+ "ld1w { z11.s }, p1/Z, [x28, #3, MUL VL]\n"
+ "ld1w { z12.s }, p4/Z, [x23]\n"
+ "ld1w { z13.s }, p3/Z, [x23, #1, MUL VL]\n"
+ "ld1w { z14.s }, p2/Z, [x23, #2, MUL VL]\n"
+ "ld1w { z15.s }, p1/Z, [x23, #3, MUL VL]\n"
+ "ld1w { z16.s }, p4/Z, [x22]\n"
+ "ld1w { z17.s }, p3/Z, [x22, #1, MUL VL]\n"
+ "ld1w { z18.s }, p2/Z, [x22, #2, MUL VL]\n"
+ "ld1w { z19.s }, p1/Z, [x22, #3, MUL VL]\n"
"b 26f\n"
"25:" // Height 3: no accumulate
"mov z8.s, #0x0\n"
@@ -470,99 +470,99 @@ void sve_hybrid_s8s32_dot_6x4VL (
"mov z18.s, #0x0\n"
"mov z19.s, #0x0\n"
"26:" // Height 3: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"27:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 28f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "cbnz x28, 29f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20\n"
- "add x25, x25, x20\n"
- "add x24, x24, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "cbnz x27, 29f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
+ "add x24, x24, x19\n"
+ "add x23, x23, x19\n"
"b 29f\n"
"28:" // Height 3: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20\n"
- "add x24, x25, x20\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19\n"
+ "add x23, x24, x19\n"
"29:" // Height 3: input setup done
- "cmp x27, #0x10\n"
+ "cmp x26, #0x10\n"
"ble 31f\n"
"30:" // Height 3: Multiply loop: Main loop head
- "whilelt p0.b, XZR, x27\n"
- "ld1rqb { z0.b }, p0/Z, [x26]\n"
- "ld1rqb { z1.b }, p0/Z, [x25]\n"
- "sub x27, x27, #0x10\n"
- "ld1rqb { z2.b }, p0/Z, [x24]\n"
- "ld1b { z6.b }, p5/Z, [x10]\n"
+ "ld1b { z6.b }, p5/Z, [x9]\n"
+ "whilelt p0.b, XZR, x26\n"
+ "ld1b { z7.b }, p5/Z, [x9, #1, MUL VL]\n"
+ "sub x26, x26, #0x10\n"
+ "ld1rqb { z0.b }, p0/Z, [x25]\n"
"sdot z8.s, z6.b, z0.b[0]\n"
- "sdot z12.s, z6.b, z1.b[0]\n"
- "ld1b { z7.b }, p5/Z, [x10, #1, MUL VL]\n"
- "sdot z16.s, z6.b, z2.b[0]\n"
+ "ld1rqb { z1.b }, p0/Z, [x24]\n"
+ "cmp x26, #0x10\n"
"sdot z9.s, z7.b, z0.b[0]\n"
- "ld1b { z6.b }, p5/Z, [x10, #2, MUL VL]\n"
+ "ld1rqb { z2.b }, p0/Z, [x23]\n"
+ "add x25, x25, #0x10\n"
+ "sdot z12.s, z6.b, z1.b[0]\n"
+ "add x24, x24, #0x10\n"
"sdot z13.s, z7.b, z1.b[0]\n"
+ "add x23, x23, #0x10\n"
+ "sdot z16.s, z6.b, z2.b[0]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #2, MUL VL]\n"
"sdot z17.s, z7.b, z2.b[0]\n"
- "ld1b { z7.b }, p5/Z, [x10, #3, MUL VL]\n"
- "cmp x27, #0x10\n"
+ "ld1b { z7.b }, p5/Z, [x9, #3, MUL VL]\n"
"sdot z10.s, z6.b, z0.b[0]\n"
"sdot z14.s, z6.b, z1.b[0]\n"
- "add x26, x26, #0x10\n"
- "add x25, x25, #0x10\n"
"sdot z18.s, z6.b, z2.b[0]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #4, MUL VL]\n"
"sdot z11.s, z7.b, z0.b[0]\n"
- "ld1b { z6.b }, p5/Z, [x10, #4, MUL VL]\n"
- "add x24, x24, #0x10\n"
"sdot z15.s, z7.b, z1.b[0]\n"
"sdot z19.s, z7.b, z2.b[0]\n"
- "ld1b { z7.b }, p5/Z, [x10, #5, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #5, MUL VL]\n"
"sdot z8.s, z6.b, z0.b[1]\n"
"sdot z12.s, z6.b, z1.b[1]\n"
"sdot z16.s, z6.b, z2.b[1]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #6, MUL VL]\n"
"sdot z9.s, z7.b, z0.b[1]\n"
- "ld1b { z6.b }, p5/Z, [x10, #6, MUL VL]\n"
"sdot z13.s, z7.b, z1.b[1]\n"
"sdot z17.s, z7.b, z2.b[1]\n"
- "ld1b { z7.b }, p5/Z, [x10, #7, MUL VL]\n"
- "addvl x10, x10, #16\n"
+ "ld1b { z7.b }, p5/Z, [x9, #7, MUL VL]\n"
+ "addvl x9, x9, #16\n"
"sdot z10.s, z6.b, z0.b[1]\n"
"sdot z14.s, z6.b, z1.b[1]\n"
"sdot z18.s, z6.b, z2.b[1]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #-8, MUL VL]\n"
"sdot z11.s, z7.b, z0.b[1]\n"
- "ld1b { z6.b }, p5/Z, [x10, #-8, MUL VL]\n"
"sdot z15.s, z7.b, z1.b[1]\n"
"sdot z19.s, z7.b, z2.b[1]\n"
- "ld1b { z7.b }, p5/Z, [x10, #-7, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #-7, MUL VL]\n"
"sdot z8.s, z6.b, z0.b[2]\n"
"sdot z12.s, z6.b, z1.b[2]\n"
"sdot z16.s, z6.b, z2.b[2]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #-6, MUL VL]\n"
"sdot z9.s, z7.b, z0.b[2]\n"
- "ld1b { z6.b }, p5/Z, [x10, #-6, MUL VL]\n"
"sdot z13.s, z7.b, z1.b[2]\n"
"sdot z17.s, z7.b, z2.b[2]\n"
- "ld1b { z7.b }, p5/Z, [x10, #-5, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #-5, MUL VL]\n"
"sdot z10.s, z6.b, z0.b[2]\n"
"sdot z14.s, z6.b, z1.b[2]\n"
"sdot z18.s, z6.b, z2.b[2]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #-4, MUL VL]\n"
"sdot z11.s, z7.b, z0.b[2]\n"
- "ld1b { z6.b }, p5/Z, [x10, #-4, MUL VL]\n"
"sdot z15.s, z7.b, z1.b[2]\n"
"sdot z19.s, z7.b, z2.b[2]\n"
- "ld1b { z7.b }, p5/Z, [x10, #-3, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #-3, MUL VL]\n"
"sdot z8.s, z6.b, z0.b[3]\n"
"sdot z12.s, z6.b, z1.b[3]\n"
"sdot z16.s, z6.b, z2.b[3]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #-2, MUL VL]\n"
"sdot z9.s, z7.b, z0.b[3]\n"
- "ld1b { z6.b }, p5/Z, [x10, #-2, MUL VL]\n"
"sdot z13.s, z7.b, z1.b[3]\n"
"sdot z17.s, z7.b, z2.b[3]\n"
- "ld1b { z7.b }, p5/Z, [x10, #-1, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #-1, MUL VL]\n"
"sdot z10.s, z6.b, z0.b[3]\n"
"sdot z14.s, z6.b, z1.b[3]\n"
"sdot z18.s, z6.b, z2.b[3]\n"
@@ -571,22 +571,22 @@ void sve_hybrid_s8s32_dot_6x4VL (
"sdot z19.s, z7.b, z2.b[3]\n"
"bgt 30b\n"
"31:" // Height 3: Multiply loop: Single iteration only
- "whilelt p0.b, XZR, x27\n"
- "ld1rqb { z0.b }, p0/Z, [x26]\n"
- "ld1rqb { z1.b }, p0/Z, [x25]\n"
- "subs x27, x27, #0x4\n"
- "ld1rqb { z2.b }, p0/Z, [x24]\n"
- "ld1b { z6.b }, p5/Z, [x10]\n"
+ "ld1b { z6.b }, p5/Z, [x9]\n"
+ "whilelt p0.b, XZR, x26\n"
+ "ld1b { z7.b }, p5/Z, [x9, #1, MUL VL]\n"
+ "subs x26, x26, #0x4\n"
+ "ld1rqb { z0.b }, p0/Z, [x25]\n"
"sdot z8.s, z6.b, z0.b[0]\n"
- "sdot z12.s, z6.b, z1.b[0]\n"
- "ld1b { z7.b }, p5/Z, [x10, #1, MUL VL]\n"
- "sdot z16.s, z6.b, z2.b[0]\n"
+ "ld1rqb { z1.b }, p0/Z, [x24]\n"
"sdot z9.s, z7.b, z0.b[0]\n"
- "ld1b { z6.b }, p5/Z, [x10, #2, MUL VL]\n"
+ "ld1rqb { z2.b }, p0/Z, [x23]\n"
+ "sdot z12.s, z6.b, z1.b[0]\n"
"sdot z13.s, z7.b, z1.b[0]\n"
+ "sdot z16.s, z6.b, z2.b[0]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #2, MUL VL]\n"
"sdot z17.s, z7.b, z2.b[0]\n"
- "ld1b { z7.b }, p5/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
+ "ld1b { z7.b }, p5/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"sdot z10.s, z6.b, z0.b[0]\n"
"sdot z14.s, z6.b, z1.b[0]\n"
"sdot z18.s, z6.b, z2.b[0]\n"
@@ -594,18 +594,18 @@ void sve_hybrid_s8s32_dot_6x4VL (
"sdot z15.s, z7.b, z1.b[0]\n"
"sdot z19.s, z7.b, z2.b[0]\n"
"ble 32f\n"
- "ld1b { z6.b }, p5/Z, [x10]\n"
- "ld1b { z7.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9]\n"
"sdot z8.s, z6.b, z0.b[1]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #1, MUL VL]\n"
+ "subs x26, x26, #0x4\n"
"sdot z12.s, z6.b, z1.b[1]\n"
"sdot z16.s, z6.b, z2.b[1]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #2, MUL VL]\n"
"sdot z9.s, z7.b, z0.b[1]\n"
- "ld1b { z6.b }, p5/Z, [x10, #2, MUL VL]\n"
- "subs x27, x27, #0x4\n"
"sdot z13.s, z7.b, z1.b[1]\n"
"sdot z17.s, z7.b, z2.b[1]\n"
- "ld1b { z7.b }, p5/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
+ "ld1b { z7.b }, p5/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"sdot z10.s, z6.b, z0.b[1]\n"
"sdot z14.s, z6.b, z1.b[1]\n"
"sdot z18.s, z6.b, z2.b[1]\n"
@@ -613,18 +613,18 @@ void sve_hybrid_s8s32_dot_6x4VL (
"sdot z15.s, z7.b, z1.b[1]\n"
"sdot z19.s, z7.b, z2.b[1]\n"
"ble 32f\n"
- "ld1b { z6.b }, p5/Z, [x10]\n"
- "ld1b { z7.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9]\n"
"sdot z8.s, z6.b, z0.b[2]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #1, MUL VL]\n"
+ "subs x26, x26, #0x4\n"
"sdot z12.s, z6.b, z1.b[2]\n"
"sdot z16.s, z6.b, z2.b[2]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #2, MUL VL]\n"
"sdot z9.s, z7.b, z0.b[2]\n"
- "ld1b { z6.b }, p5/Z, [x10, #2, MUL VL]\n"
- "subs x27, x27, #0x4\n"
"sdot z13.s, z7.b, z1.b[2]\n"
"sdot z17.s, z7.b, z2.b[2]\n"
- "ld1b { z7.b }, p5/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
+ "ld1b { z7.b }, p5/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"sdot z10.s, z6.b, z0.b[2]\n"
"sdot z14.s, z6.b, z1.b[2]\n"
"sdot z18.s, z6.b, z2.b[2]\n"
@@ -632,17 +632,17 @@ void sve_hybrid_s8s32_dot_6x4VL (
"sdot z15.s, z7.b, z1.b[2]\n"
"sdot z19.s, z7.b, z2.b[2]\n"
"ble 32f\n"
- "ld1b { z6.b }, p5/Z, [x10]\n"
- "ld1b { z7.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9]\n"
"sdot z8.s, z6.b, z0.b[3]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #1, MUL VL]\n"
"sdot z12.s, z6.b, z1.b[3]\n"
"sdot z16.s, z6.b, z2.b[3]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #2, MUL VL]\n"
"sdot z9.s, z7.b, z0.b[3]\n"
- "ld1b { z6.b }, p5/Z, [x10, #2, MUL VL]\n"
"sdot z13.s, z7.b, z1.b[3]\n"
"sdot z17.s, z7.b, z2.b[3]\n"
- "ld1b { z7.b }, p5/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
+ "ld1b { z7.b }, p5/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"sdot z10.s, z6.b, z0.b[3]\n"
"sdot z14.s, z6.b, z1.b[3]\n"
"sdot z18.s, z6.b, z2.b[3]\n"
@@ -650,65 +650,65 @@ void sve_hybrid_s8s32_dot_6x4VL (
"sdot z15.s, z7.b, z1.b[3]\n"
"sdot z19.s, z7.b, z2.b[3]\n"
"32:" // Height 3: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 27b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "st1w { z8.s }, p4, [x9]\n"
- "st1w { z9.s }, p3, [x9, #1, MUL VL]\n"
- "st1w { z10.s }, p2, [x9, #2, MUL VL]\n"
- "st1w { z11.s }, p1, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
- "st1w { z12.s }, p4, [x24]\n"
- "st1w { z13.s }, p3, [x24, #1, MUL VL]\n"
- "st1w { z14.s }, p2, [x24, #2, MUL VL]\n"
- "st1w { z15.s }, p1, [x24, #3, MUL VL]\n"
- "st1w { z16.s }, p4, [x23]\n"
- "st1w { z17.s }, p3, [x23, #1, MUL VL]\n"
- "st1w { z18.s }, p2, [x23, #2, MUL VL]\n"
- "st1w { z19.s }, p1, [x23, #3, MUL VL]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "st1w { z8.s }, p4, [x28]\n"
+ "add x23, x28, x19, LSL #2\n"
+ "st1w { z9.s }, p3, [x28, #1, MUL VL]\n"
+ "st1w { z10.s }, p2, [x28, #2, MUL VL]\n"
+ "add x22, x23, x19, LSL #2\n"
+ "st1w { z11.s }, p1, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "st1w { z12.s }, p4, [x23]\n"
+ "st1w { z13.s }, p3, [x23, #1, MUL VL]\n"
+ "st1w { z14.s }, p2, [x23, #2, MUL VL]\n"
+ "st1w { z15.s }, p1, [x23, #3, MUL VL]\n"
+ "st1w { z16.s }, p4, [x22]\n"
+ "st1w { z17.s }, p3, [x22, #1, MUL VL]\n"
+ "st1w { z18.s }, p2, [x22, #2, MUL VL]\n"
+ "st1w { z19.s }, p1, [x22, #3, MUL VL]\n"
"33:" // Height 3: Writeback done
- "decw x11, ALL, MUL #4\n"
- "cmp x11, XZR\n"
+ "decw x10, ALL, MUL #4\n"
+ "cmp x10, XZR\n"
"bgt 24b\n"
"b 68f\n"
"34:" // Height 4
- "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x28, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"35:" // Height 4: Column loop
- "mov x20, #0x0\n"
- "whilelt p4.s, x20, x11\n"
- "incw x20\n"
- "whilelt p3.s, x20, x11\n"
- "incw x20\n"
- "whilelt p2.s, x20, x11\n"
- "incw x20\n"
- "whilelt p1.s, x20, x11\n"
+ "mov x19, #0x0\n"
+ "whilelt p4.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p3.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x10\n"
"tbz %x[flags], #0, 36f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "ld1w { z8.s }, p4/Z, [x9]\n"
- "add x22, x23, x20, LSL #2\n"
- "ld1w { z9.s }, p3/Z, [x9, #1, MUL VL]\n"
- "ld1w { z10.s }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1w { z11.s }, p1/Z, [x9, #3, MUL VL]\n"
- "ld1w { z12.s }, p4/Z, [x24]\n"
- "ld1w { z13.s }, p3/Z, [x24, #1, MUL VL]\n"
- "ld1w { z14.s }, p2/Z, [x24, #2, MUL VL]\n"
- "ld1w { z15.s }, p1/Z, [x24, #3, MUL VL]\n"
- "ld1w { z16.s }, p4/Z, [x23]\n"
- "ld1w { z17.s }, p3/Z, [x23, #1, MUL VL]\n"
- "ld1w { z18.s }, p2/Z, [x23, #2, MUL VL]\n"
- "ld1w { z19.s }, p1/Z, [x23, #3, MUL VL]\n"
- "ld1w { z20.s }, p4/Z, [x22]\n"
- "ld1w { z21.s }, p3/Z, [x22, #1, MUL VL]\n"
- "ld1w { z22.s }, p2/Z, [x22, #2, MUL VL]\n"
- "ld1w { z23.s }, p1/Z, [x22, #3, MUL VL]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ld1w { z8.s }, p4/Z, [x28]\n"
+ "add x23, x28, x19, LSL #2\n"
+ "ld1w { z9.s }, p3/Z, [x28, #1, MUL VL]\n"
+ "ld1w { z10.s }, p2/Z, [x28, #2, MUL VL]\n"
+ "add x22, x23, x19, LSL #2\n"
+ "ld1w { z11.s }, p1/Z, [x28, #3, MUL VL]\n"
+ "add x21, x22, x19, LSL #2\n"
+ "ld1w { z12.s }, p4/Z, [x23]\n"
+ "ld1w { z13.s }, p3/Z, [x23, #1, MUL VL]\n"
+ "ld1w { z14.s }, p2/Z, [x23, #2, MUL VL]\n"
+ "ld1w { z15.s }, p1/Z, [x23, #3, MUL VL]\n"
+ "ld1w { z16.s }, p4/Z, [x22]\n"
+ "ld1w { z17.s }, p3/Z, [x22, #1, MUL VL]\n"
+ "ld1w { z18.s }, p2/Z, [x22, #2, MUL VL]\n"
+ "ld1w { z19.s }, p1/Z, [x22, #3, MUL VL]\n"
+ "ld1w { z20.s }, p4/Z, [x21]\n"
+ "ld1w { z21.s }, p3/Z, [x21, #1, MUL VL]\n"
+ "ld1w { z22.s }, p2/Z, [x21, #2, MUL VL]\n"
+ "ld1w { z23.s }, p1/Z, [x21, #3, MUL VL]\n"
"b 37f\n"
"36:" // Height 4: no accumulate
"mov z8.s, #0x0\n"
@@ -728,118 +728,118 @@ void sve_hybrid_s8s32_dot_6x4VL (
"mov z22.s, #0x0\n"
"mov z23.s, #0x0\n"
"37:" // Height 4: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"38:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 39f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "cbnz x28, 40f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20\n"
- "add x25, x25, x20\n"
- "add x24, x24, x20\n"
- "add x23, x23, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "cbnz x27, 40f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
+ "add x24, x24, x19\n"
+ "add x23, x23, x19\n"
+ "add x22, x22, x19\n"
"b 40f\n"
"39:" // Height 4: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20\n"
- "add x24, x25, x20\n"
- "add x23, x24, x20\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19\n"
+ "add x23, x24, x19\n"
+ "add x22, x23, x19\n"
"40:" // Height 4: input setup done
- "cmp x27, #0x10\n"
+ "cmp x26, #0x10\n"
"ble 42f\n"
"41:" // Height 4: Multiply loop: Main loop head
- "whilelt p0.b, XZR, x27\n"
- "ld1rqb { z0.b }, p0/Z, [x26]\n"
- "ld1rqb { z1.b }, p0/Z, [x25]\n"
- "sub x27, x27, #0x10\n"
- "ld1rqb { z2.b }, p0/Z, [x24]\n"
- "ld1rqb { z3.b }, p0/Z, [x23]\n"
- "cmp x27, #0x10\n"
- "add x26, x26, #0x10\n"
- "ld1b { z6.b }, p5/Z, [x10]\n"
- "ld1b { z7.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9]\n"
+ "whilelt p0.b, XZR, x26\n"
+ "ld1b { z7.b }, p5/Z, [x9, #1, MUL VL]\n"
+ "sub x26, x26, #0x10\n"
+ "ld1rqb { z0.b }, p0/Z, [x25]\n"
"sdot z8.s, z6.b, z0.b[0]\n"
- "sdot z12.s, z6.b, z1.b[0]\n"
- "sdot z16.s, z6.b, z2.b[0]\n"
- "sdot z20.s, z6.b, z3.b[0]\n"
- "ld1b { z6.b }, p5/Z, [x10, #2, MUL VL]\n"
- "add x25, x25, #0x10\n"
+ "ld1rqb { z1.b }, p0/Z, [x24]\n"
+ "cmp x26, #0x10\n"
"sdot z9.s, z7.b, z0.b[0]\n"
- "sdot z13.s, z7.b, z1.b[0]\n"
+ "ld1rqb { z2.b }, p0/Z, [x23]\n"
+ "add x25, x25, #0x10\n"
+ "sdot z12.s, z6.b, z1.b[0]\n"
+ "ld1rqb { z3.b }, p0/Z, [x22]\n"
"add x24, x24, #0x10\n"
+ "sdot z16.s, z6.b, z2.b[0]\n"
"add x23, x23, #0x10\n"
+ "sdot z13.s, z7.b, z1.b[0]\n"
+ "add x22, x22, #0x10\n"
"sdot z17.s, z7.b, z2.b[0]\n"
+ "sdot z20.s, z6.b, z3.b[0]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #2, MUL VL]\n"
"sdot z21.s, z7.b, z3.b[0]\n"
- "ld1b { z7.b }, p5/Z, [x10, #3, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #3, MUL VL]\n"
"sdot z10.s, z6.b, z0.b[0]\n"
"sdot z14.s, z6.b, z1.b[0]\n"
"sdot z18.s, z6.b, z2.b[0]\n"
"sdot z22.s, z6.b, z3.b[0]\n"
- "ld1b { z6.b }, p5/Z, [x10, #4, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #4, MUL VL]\n"
"sdot z11.s, z7.b, z0.b[0]\n"
"sdot z15.s, z7.b, z1.b[0]\n"
"sdot z19.s, z7.b, z2.b[0]\n"
"sdot z23.s, z7.b, z3.b[0]\n"
- "ld1b { z7.b }, p5/Z, [x10, #5, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #5, MUL VL]\n"
"sdot z8.s, z6.b, z0.b[1]\n"
"sdot z12.s, z6.b, z1.b[1]\n"
"sdot z16.s, z6.b, z2.b[1]\n"
"sdot z20.s, z6.b, z3.b[1]\n"
- "ld1b { z6.b }, p5/Z, [x10, #6, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #6, MUL VL]\n"
"sdot z9.s, z7.b, z0.b[1]\n"
"sdot z13.s, z7.b, z1.b[1]\n"
"sdot z17.s, z7.b, z2.b[1]\n"
"sdot z21.s, z7.b, z3.b[1]\n"
- "ld1b { z7.b }, p5/Z, [x10, #7, MUL VL]\n"
- "addvl x10, x10, #16\n"
+ "ld1b { z7.b }, p5/Z, [x9, #7, MUL VL]\n"
+ "addvl x9, x9, #16\n"
"sdot z10.s, z6.b, z0.b[1]\n"
"sdot z14.s, z6.b, z1.b[1]\n"
"sdot z18.s, z6.b, z2.b[1]\n"
"sdot z22.s, z6.b, z3.b[1]\n"
- "ld1b { z6.b }, p5/Z, [x10, #-8, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #-8, MUL VL]\n"
"sdot z11.s, z7.b, z0.b[1]\n"
"sdot z15.s, z7.b, z1.b[1]\n"
"sdot z19.s, z7.b, z2.b[1]\n"
"sdot z23.s, z7.b, z3.b[1]\n"
- "ld1b { z7.b }, p5/Z, [x10, #-7, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #-7, MUL VL]\n"
"sdot z8.s, z6.b, z0.b[2]\n"
"sdot z12.s, z6.b, z1.b[2]\n"
"sdot z16.s, z6.b, z2.b[2]\n"
"sdot z20.s, z6.b, z3.b[2]\n"
- "ld1b { z6.b }, p5/Z, [x10, #-6, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #-6, MUL VL]\n"
"sdot z9.s, z7.b, z0.b[2]\n"
"sdot z13.s, z7.b, z1.b[2]\n"
"sdot z17.s, z7.b, z2.b[2]\n"
"sdot z21.s, z7.b, z3.b[2]\n"
- "ld1b { z7.b }, p5/Z, [x10, #-5, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #-5, MUL VL]\n"
"sdot z10.s, z6.b, z0.b[2]\n"
"sdot z14.s, z6.b, z1.b[2]\n"
"sdot z18.s, z6.b, z2.b[2]\n"
"sdot z22.s, z6.b, z3.b[2]\n"
- "ld1b { z6.b }, p5/Z, [x10, #-4, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #-4, MUL VL]\n"
"sdot z11.s, z7.b, z0.b[2]\n"
"sdot z15.s, z7.b, z1.b[2]\n"
"sdot z19.s, z7.b, z2.b[2]\n"
"sdot z23.s, z7.b, z3.b[2]\n"
- "ld1b { z7.b }, p5/Z, [x10, #-3, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #-3, MUL VL]\n"
"sdot z8.s, z6.b, z0.b[3]\n"
"sdot z12.s, z6.b, z1.b[3]\n"
"sdot z16.s, z6.b, z2.b[3]\n"
"sdot z20.s, z6.b, z3.b[3]\n"
- "ld1b { z6.b }, p5/Z, [x10, #-2, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #-2, MUL VL]\n"
"sdot z9.s, z7.b, z0.b[3]\n"
"sdot z13.s, z7.b, z1.b[3]\n"
"sdot z17.s, z7.b, z2.b[3]\n"
"sdot z21.s, z7.b, z3.b[3]\n"
- "ld1b { z7.b }, p5/Z, [x10, #-1, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #-1, MUL VL]\n"
"sdot z10.s, z6.b, z0.b[3]\n"
"sdot z14.s, z6.b, z1.b[3]\n"
"sdot z18.s, z6.b, z2.b[3]\n"
@@ -850,25 +850,25 @@ void sve_hybrid_s8s32_dot_6x4VL (
"sdot z23.s, z7.b, z3.b[3]\n"
"bgt 41b\n"
"42:" // Height 4: Multiply loop: Single iteration only
- "whilelt p0.b, XZR, x27\n"
- "ld1rqb { z0.b }, p0/Z, [x26]\n"
- "ld1rqb { z1.b }, p0/Z, [x25]\n"
- "subs x27, x27, #0x4\n"
- "ld1rqb { z2.b }, p0/Z, [x24]\n"
- "ld1rqb { z3.b }, p0/Z, [x23]\n"
- "ld1b { z6.b }, p5/Z, [x10]\n"
- "ld1b { z7.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9]\n"
+ "whilelt p0.b, XZR, x26\n"
+ "ld1b { z7.b }, p5/Z, [x9, #1, MUL VL]\n"
+ "subs x26, x26, #0x4\n"
+ "ld1rqb { z0.b }, p0/Z, [x25]\n"
"sdot z8.s, z6.b, z0.b[0]\n"
+ "ld1rqb { z1.b }, p0/Z, [x24]\n"
+ "sdot z9.s, z7.b, z0.b[0]\n"
+ "ld1rqb { z2.b }, p0/Z, [x23]\n"
+ "ld1rqb { z3.b }, p0/Z, [x22]\n"
"sdot z12.s, z6.b, z1.b[0]\n"
+ "sdot z13.s, z7.b, z1.b[0]\n"
"sdot z16.s, z6.b, z2.b[0]\n"
"sdot z20.s, z6.b, z3.b[0]\n"
- "ld1b { z6.b }, p5/Z, [x10, #2, MUL VL]\n"
- "sdot z9.s, z7.b, z0.b[0]\n"
- "sdot z13.s, z7.b, z1.b[0]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #2, MUL VL]\n"
"sdot z17.s, z7.b, z2.b[0]\n"
"sdot z21.s, z7.b, z3.b[0]\n"
- "ld1b { z7.b }, p5/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
+ "ld1b { z7.b }, p5/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"sdot z10.s, z6.b, z0.b[0]\n"
"sdot z14.s, z6.b, z1.b[0]\n"
"sdot z18.s, z6.b, z2.b[0]\n"
@@ -878,20 +878,20 @@ void sve_hybrid_s8s32_dot_6x4VL (
"sdot z19.s, z7.b, z2.b[0]\n"
"sdot z23.s, z7.b, z3.b[0]\n"
"ble 43f\n"
- "ld1b { z6.b }, p5/Z, [x10]\n"
- "ld1b { z7.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9]\n"
"sdot z8.s, z6.b, z0.b[1]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #1, MUL VL]\n"
+ "subs x26, x26, #0x4\n"
"sdot z12.s, z6.b, z1.b[1]\n"
"sdot z16.s, z6.b, z2.b[1]\n"
"sdot z20.s, z6.b, z3.b[1]\n"
- "ld1b { z6.b }, p5/Z, [x10, #2, MUL VL]\n"
- "subs x27, x27, #0x4\n"
+ "ld1b { z6.b }, p5/Z, [x9, #2, MUL VL]\n"
"sdot z9.s, z7.b, z0.b[1]\n"
"sdot z13.s, z7.b, z1.b[1]\n"
"sdot z17.s, z7.b, z2.b[1]\n"
"sdot z21.s, z7.b, z3.b[1]\n"
- "ld1b { z7.b }, p5/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
+ "ld1b { z7.b }, p5/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"sdot z10.s, z6.b, z0.b[1]\n"
"sdot z14.s, z6.b, z1.b[1]\n"
"sdot z18.s, z6.b, z2.b[1]\n"
@@ -901,20 +901,20 @@ void sve_hybrid_s8s32_dot_6x4VL (
"sdot z19.s, z7.b, z2.b[1]\n"
"sdot z23.s, z7.b, z3.b[1]\n"
"ble 43f\n"
- "ld1b { z6.b }, p5/Z, [x10]\n"
- "ld1b { z7.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9]\n"
"sdot z8.s, z6.b, z0.b[2]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #1, MUL VL]\n"
+ "subs x26, x26, #0x4\n"
"sdot z12.s, z6.b, z1.b[2]\n"
"sdot z16.s, z6.b, z2.b[2]\n"
"sdot z20.s, z6.b, z3.b[2]\n"
- "ld1b { z6.b }, p5/Z, [x10, #2, MUL VL]\n"
- "subs x27, x27, #0x4\n"
+ "ld1b { z6.b }, p5/Z, [x9, #2, MUL VL]\n"
"sdot z9.s, z7.b, z0.b[2]\n"
"sdot z13.s, z7.b, z1.b[2]\n"
"sdot z17.s, z7.b, z2.b[2]\n"
"sdot z21.s, z7.b, z3.b[2]\n"
- "ld1b { z7.b }, p5/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
+ "ld1b { z7.b }, p5/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"sdot z10.s, z6.b, z0.b[2]\n"
"sdot z14.s, z6.b, z1.b[2]\n"
"sdot z18.s, z6.b, z2.b[2]\n"
@@ -924,19 +924,19 @@ void sve_hybrid_s8s32_dot_6x4VL (
"sdot z19.s, z7.b, z2.b[2]\n"
"sdot z23.s, z7.b, z3.b[2]\n"
"ble 43f\n"
- "ld1b { z6.b }, p5/Z, [x10]\n"
- "ld1b { z7.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9]\n"
"sdot z8.s, z6.b, z0.b[3]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #1, MUL VL]\n"
"sdot z12.s, z6.b, z1.b[3]\n"
"sdot z16.s, z6.b, z2.b[3]\n"
"sdot z20.s, z6.b, z3.b[3]\n"
- "ld1b { z6.b }, p5/Z, [x10, #2, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #2, MUL VL]\n"
"sdot z9.s, z7.b, z0.b[3]\n"
"sdot z13.s, z7.b, z1.b[3]\n"
"sdot z17.s, z7.b, z2.b[3]\n"
"sdot z21.s, z7.b, z3.b[3]\n"
- "ld1b { z7.b }, p5/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
+ "ld1b { z7.b }, p5/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"sdot z10.s, z6.b, z0.b[3]\n"
"sdot z14.s, z6.b, z1.b[3]\n"
"sdot z18.s, z6.b, z2.b[3]\n"
@@ -946,75 +946,75 @@ void sve_hybrid_s8s32_dot_6x4VL (
"sdot z19.s, z7.b, z2.b[3]\n"
"sdot z23.s, z7.b, z3.b[3]\n"
"43:" // Height 4: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 38b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "st1w { z8.s }, p4, [x9]\n"
- "add x22, x23, x20, LSL #2\n"
- "st1w { z9.s }, p3, [x9, #1, MUL VL]\n"
- "st1w { z10.s }, p2, [x9, #2, MUL VL]\n"
- "st1w { z11.s }, p1, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
- "st1w { z12.s }, p4, [x24]\n"
- "st1w { z13.s }, p3, [x24, #1, MUL VL]\n"
- "st1w { z14.s }, p2, [x24, #2, MUL VL]\n"
- "st1w { z15.s }, p1, [x24, #3, MUL VL]\n"
- "st1w { z16.s }, p4, [x23]\n"
- "st1w { z17.s }, p3, [x23, #1, MUL VL]\n"
- "st1w { z18.s }, p2, [x23, #2, MUL VL]\n"
- "st1w { z19.s }, p1, [x23, #3, MUL VL]\n"
- "st1w { z20.s }, p4, [x22]\n"
- "st1w { z21.s }, p3, [x22, #1, MUL VL]\n"
- "st1w { z22.s }, p2, [x22, #2, MUL VL]\n"
- "st1w { z23.s }, p1, [x22, #3, MUL VL]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "st1w { z8.s }, p4, [x28]\n"
+ "add x23, x28, x19, LSL #2\n"
+ "st1w { z9.s }, p3, [x28, #1, MUL VL]\n"
+ "st1w { z10.s }, p2, [x28, #2, MUL VL]\n"
+ "add x22, x23, x19, LSL #2\n"
+ "st1w { z11.s }, p1, [x28, #3, MUL VL]\n"
+ "add x21, x22, x19, LSL #2\n"
+ "st1w { z12.s }, p4, [x23]\n"
+ "addvl x28, x28, #4\n"
+ "st1w { z13.s }, p3, [x23, #1, MUL VL]\n"
+ "st1w { z14.s }, p2, [x23, #2, MUL VL]\n"
+ "st1w { z15.s }, p1, [x23, #3, MUL VL]\n"
+ "st1w { z16.s }, p4, [x22]\n"
+ "st1w { z17.s }, p3, [x22, #1, MUL VL]\n"
+ "st1w { z18.s }, p2, [x22, #2, MUL VL]\n"
+ "st1w { z19.s }, p1, [x22, #3, MUL VL]\n"
+ "st1w { z20.s }, p4, [x21]\n"
+ "st1w { z21.s }, p3, [x21, #1, MUL VL]\n"
+ "st1w { z22.s }, p2, [x21, #2, MUL VL]\n"
+ "st1w { z23.s }, p1, [x21, #3, MUL VL]\n"
"44:" // Height 4: Writeback done
- "decw x11, ALL, MUL #4\n"
- "cmp x11, XZR\n"
+ "decw x10, ALL, MUL #4\n"
+ "cmp x10, XZR\n"
"bgt 35b\n"
"b 68f\n"
"45:" // Height 5
- "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x28, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"46:" // Height 5: Column loop
- "mov x20, #0x0\n"
- "whilelt p4.s, x20, x11\n"
- "incw x20\n"
- "whilelt p3.s, x20, x11\n"
- "incw x20\n"
- "whilelt p2.s, x20, x11\n"
- "incw x20\n"
- "whilelt p1.s, x20, x11\n"
+ "mov x19, #0x0\n"
+ "whilelt p4.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p3.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x10\n"
"tbz %x[flags], #0, 47f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "ld1w { z8.s }, p4/Z, [x9]\n"
- "add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
- "ld1w { z9.s }, p3/Z, [x9, #1, MUL VL]\n"
- "ld1w { z10.s }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1w { z11.s }, p1/Z, [x9, #3, MUL VL]\n"
- "ld1w { z12.s }, p4/Z, [x24]\n"
- "ld1w { z13.s }, p3/Z, [x24, #1, MUL VL]\n"
- "ld1w { z14.s }, p2/Z, [x24, #2, MUL VL]\n"
- "ld1w { z15.s }, p1/Z, [x24, #3, MUL VL]\n"
- "ld1w { z16.s }, p4/Z, [x23]\n"
- "ld1w { z17.s }, p3/Z, [x23, #1, MUL VL]\n"
- "ld1w { z18.s }, p2/Z, [x23, #2, MUL VL]\n"
- "ld1w { z19.s }, p1/Z, [x23, #3, MUL VL]\n"
- "ld1w { z20.s }, p4/Z, [x22]\n"
- "ld1w { z21.s }, p3/Z, [x22, #1, MUL VL]\n"
- "ld1w { z22.s }, p2/Z, [x22, #2, MUL VL]\n"
- "ld1w { z23.s }, p1/Z, [x22, #3, MUL VL]\n"
- "ld1w { z24.s }, p4/Z, [x21]\n"
- "ld1w { z25.s }, p3/Z, [x21, #1, MUL VL]\n"
- "ld1w { z26.s }, p2/Z, [x21, #2, MUL VL]\n"
- "ld1w { z27.s }, p1/Z, [x21, #3, MUL VL]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ld1w { z8.s }, p4/Z, [x28]\n"
+ "add x23, x28, x19, LSL #2\n"
+ "ld1w { z9.s }, p3/Z, [x28, #1, MUL VL]\n"
+ "ld1w { z10.s }, p2/Z, [x28, #2, MUL VL]\n"
+ "add x22, x23, x19, LSL #2\n"
+ "ld1w { z11.s }, p1/Z, [x28, #3, MUL VL]\n"
+ "add x21, x22, x19, LSL #2\n"
+ "ld1w { z12.s }, p4/Z, [x23]\n"
+ "add x20, x21, x19, LSL #2\n"
+ "ld1w { z13.s }, p3/Z, [x23, #1, MUL VL]\n"
+ "ld1w { z14.s }, p2/Z, [x23, #2, MUL VL]\n"
+ "ld1w { z15.s }, p1/Z, [x23, #3, MUL VL]\n"
+ "ld1w { z16.s }, p4/Z, [x22]\n"
+ "ld1w { z17.s }, p3/Z, [x22, #1, MUL VL]\n"
+ "ld1w { z18.s }, p2/Z, [x22, #2, MUL VL]\n"
+ "ld1w { z19.s }, p1/Z, [x22, #3, MUL VL]\n"
+ "ld1w { z20.s }, p4/Z, [x21]\n"
+ "ld1w { z21.s }, p3/Z, [x21, #1, MUL VL]\n"
+ "ld1w { z22.s }, p2/Z, [x21, #2, MUL VL]\n"
+ "ld1w { z23.s }, p1/Z, [x21, #3, MUL VL]\n"
+ "ld1w { z24.s }, p4/Z, [x20]\n"
+ "ld1w { z25.s }, p3/Z, [x20, #1, MUL VL]\n"
+ "ld1w { z26.s }, p2/Z, [x20, #2, MUL VL]\n"
+ "ld1w { z27.s }, p1/Z, [x20, #3, MUL VL]\n"
"b 48f\n"
"47:" // Height 5: no accumulate
"mov z8.s, #0x0\n"
@@ -1038,137 +1038,137 @@ void sve_hybrid_s8s32_dot_6x4VL (
"mov z26.s, #0x0\n"
"mov z27.s, #0x0\n"
"48:" // Height 5: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"49:" // Height 5: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 50f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "ldr x22, [x21, #0x20]\n"
- "cbnz x28, 51f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20\n"
- "add x25, x25, x20\n"
- "add x24, x24, x20\n"
- "add x23, x23, x20\n"
- "add x22, x22, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "ldr x21, [x20, #0x20]\n"
+ "cbnz x27, 51f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
+ "add x24, x24, x19\n"
+ "add x23, x23, x19\n"
+ "add x22, x22, x19\n"
+ "add x21, x21, x19\n"
"b 51f\n"
"50:" // Height 5: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20\n"
- "add x24, x25, x20\n"
- "add x23, x24, x20\n"
- "add x22, x23, x20\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19\n"
+ "add x23, x24, x19\n"
+ "add x22, x23, x19\n"
+ "add x21, x22, x19\n"
"51:" // Height 5: input setup done
- "cmp x27, #0x10\n"
+ "cmp x26, #0x10\n"
"ble 53f\n"
"52:" // Height 5: Multiply loop: Main loop head
- "whilelt p0.b, XZR, x27\n"
- "ld1rqb { z0.b }, p0/Z, [x26]\n"
- "ld1rqb { z1.b }, p0/Z, [x25]\n"
- "sub x27, x27, #0x10\n"
- "ld1rqb { z2.b }, p0/Z, [x24]\n"
- "ld1rqb { z3.b }, p0/Z, [x23]\n"
- "cmp x27, #0x10\n"
- "add x26, x26, #0x10\n"
- "ld1rqb { z4.b }, p0/Z, [x22]\n"
- "ld1b { z6.b }, p5/Z, [x10]\n"
+ "ld1b { z6.b }, p5/Z, [x9]\n"
+ "whilelt p0.b, XZR, x26\n"
+ "ld1b { z7.b }, p5/Z, [x9, #1, MUL VL]\n"
+ "sub x26, x26, #0x10\n"
+ "ld1rqb { z0.b }, p0/Z, [x25]\n"
"sdot z8.s, z6.b, z0.b[0]\n"
- "sdot z12.s, z6.b, z1.b[0]\n"
- "ld1b { z7.b }, p5/Z, [x10, #1, MUL VL]\n"
- "sdot z16.s, z6.b, z2.b[0]\n"
- "sdot z20.s, z6.b, z3.b[0]\n"
- "add x25, x25, #0x10\n"
- "sdot z24.s, z6.b, z4.b[0]\n"
+ "ld1rqb { z1.b }, p0/Z, [x24]\n"
+ "cmp x26, #0x10\n"
"sdot z9.s, z7.b, z0.b[0]\n"
- "ld1b { z6.b }, p5/Z, [x10, #2, MUL VL]\n"
+ "ld1rqb { z2.b }, p0/Z, [x23]\n"
+ "add x25, x25, #0x10\n"
+ "sdot z12.s, z6.b, z1.b[0]\n"
+ "ld1rqb { z3.b }, p0/Z, [x22]\n"
"add x24, x24, #0x10\n"
- "sdot z13.s, z7.b, z1.b[0]\n"
- "sdot z17.s, z7.b, z2.b[0]\n"
+ "sdot z16.s, z6.b, z2.b[0]\n"
+ "ld1rqb { z4.b }, p0/Z, [x21]\n"
"add x23, x23, #0x10\n"
+ "sdot z13.s, z7.b, z1.b[0]\n"
"add x22, x22, #0x10\n"
+ "sdot z17.s, z7.b, z2.b[0]\n"
+ "add x21, x21, #0x10\n"
+ "sdot z20.s, z6.b, z3.b[0]\n"
+ "sdot z24.s, z6.b, z4.b[0]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #2, MUL VL]\n"
"sdot z21.s, z7.b, z3.b[0]\n"
"sdot z25.s, z7.b, z4.b[0]\n"
- "ld1b { z7.b }, p5/Z, [x10, #3, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #3, MUL VL]\n"
"sdot z10.s, z6.b, z0.b[0]\n"
"sdot z14.s, z6.b, z1.b[0]\n"
"sdot z18.s, z6.b, z2.b[0]\n"
"sdot z22.s, z6.b, z3.b[0]\n"
"sdot z26.s, z6.b, z4.b[0]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #4, MUL VL]\n"
"sdot z11.s, z7.b, z0.b[0]\n"
- "ld1b { z6.b }, p5/Z, [x10, #4, MUL VL]\n"
"sdot z15.s, z7.b, z1.b[0]\n"
"sdot z19.s, z7.b, z2.b[0]\n"
"sdot z23.s, z7.b, z3.b[0]\n"
"sdot z27.s, z7.b, z4.b[0]\n"
- "ld1b { z7.b }, p5/Z, [x10, #5, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #5, MUL VL]\n"
"sdot z8.s, z6.b, z0.b[1]\n"
"sdot z12.s, z6.b, z1.b[1]\n"
"sdot z16.s, z6.b, z2.b[1]\n"
"sdot z20.s, z6.b, z3.b[1]\n"
"sdot z24.s, z6.b, z4.b[1]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #6, MUL VL]\n"
"sdot z9.s, z7.b, z0.b[1]\n"
- "ld1b { z6.b }, p5/Z, [x10, #6, MUL VL]\n"
"sdot z13.s, z7.b, z1.b[1]\n"
"sdot z17.s, z7.b, z2.b[1]\n"
"sdot z21.s, z7.b, z3.b[1]\n"
"sdot z25.s, z7.b, z4.b[1]\n"
- "ld1b { z7.b }, p5/Z, [x10, #7, MUL VL]\n"
- "addvl x10, x10, #16\n"
+ "ld1b { z7.b }, p5/Z, [x9, #7, MUL VL]\n"
+ "addvl x9, x9, #16\n"
"sdot z10.s, z6.b, z0.b[1]\n"
"sdot z14.s, z6.b, z1.b[1]\n"
"sdot z18.s, z6.b, z2.b[1]\n"
"sdot z22.s, z6.b, z3.b[1]\n"
"sdot z26.s, z6.b, z4.b[1]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #-8, MUL VL]\n"
"sdot z11.s, z7.b, z0.b[1]\n"
- "ld1b { z6.b }, p5/Z, [x10, #-8, MUL VL]\n"
"sdot z15.s, z7.b, z1.b[1]\n"
"sdot z19.s, z7.b, z2.b[1]\n"
"sdot z23.s, z7.b, z3.b[1]\n"
"sdot z27.s, z7.b, z4.b[1]\n"
- "ld1b { z7.b }, p5/Z, [x10, #-7, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #-7, MUL VL]\n"
"sdot z8.s, z6.b, z0.b[2]\n"
"sdot z12.s, z6.b, z1.b[2]\n"
"sdot z16.s, z6.b, z2.b[2]\n"
"sdot z20.s, z6.b, z3.b[2]\n"
"sdot z24.s, z6.b, z4.b[2]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #-6, MUL VL]\n"
"sdot z9.s, z7.b, z0.b[2]\n"
- "ld1b { z6.b }, p5/Z, [x10, #-6, MUL VL]\n"
"sdot z13.s, z7.b, z1.b[2]\n"
"sdot z17.s, z7.b, z2.b[2]\n"
"sdot z21.s, z7.b, z3.b[2]\n"
"sdot z25.s, z7.b, z4.b[2]\n"
- "ld1b { z7.b }, p5/Z, [x10, #-5, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #-5, MUL VL]\n"
"sdot z10.s, z6.b, z0.b[2]\n"
"sdot z14.s, z6.b, z1.b[2]\n"
"sdot z18.s, z6.b, z2.b[2]\n"
"sdot z22.s, z6.b, z3.b[2]\n"
"sdot z26.s, z6.b, z4.b[2]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #-4, MUL VL]\n"
"sdot z11.s, z7.b, z0.b[2]\n"
- "ld1b { z6.b }, p5/Z, [x10, #-4, MUL VL]\n"
"sdot z15.s, z7.b, z1.b[2]\n"
"sdot z19.s, z7.b, z2.b[2]\n"
"sdot z23.s, z7.b, z3.b[2]\n"
"sdot z27.s, z7.b, z4.b[2]\n"
- "ld1b { z7.b }, p5/Z, [x10, #-3, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #-3, MUL VL]\n"
"sdot z8.s, z6.b, z0.b[3]\n"
"sdot z12.s, z6.b, z1.b[3]\n"
"sdot z16.s, z6.b, z2.b[3]\n"
"sdot z20.s, z6.b, z3.b[3]\n"
"sdot z24.s, z6.b, z4.b[3]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #-2, MUL VL]\n"
"sdot z9.s, z7.b, z0.b[3]\n"
- "ld1b { z6.b }, p5/Z, [x10, #-2, MUL VL]\n"
"sdot z13.s, z7.b, z1.b[3]\n"
"sdot z17.s, z7.b, z2.b[3]\n"
"sdot z21.s, z7.b, z3.b[3]\n"
"sdot z25.s, z7.b, z4.b[3]\n"
- "ld1b { z7.b }, p5/Z, [x10, #-1, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #-1, MUL VL]\n"
"sdot z10.s, z6.b, z0.b[3]\n"
"sdot z14.s, z6.b, z1.b[3]\n"
"sdot z18.s, z6.b, z2.b[3]\n"
@@ -1181,28 +1181,28 @@ void sve_hybrid_s8s32_dot_6x4VL (
"sdot z27.s, z7.b, z4.b[3]\n"
"bgt 52b\n"
"53:" // Height 5: Multiply loop: Single iteration only
- "whilelt p0.b, XZR, x27\n"
- "ld1rqb { z0.b }, p0/Z, [x26]\n"
- "ld1rqb { z1.b }, p0/Z, [x25]\n"
- "subs x27, x27, #0x4\n"
- "ld1rqb { z2.b }, p0/Z, [x24]\n"
- "ld1rqb { z3.b }, p0/Z, [x23]\n"
- "ld1rqb { z4.b }, p0/Z, [x22]\n"
- "ld1b { z6.b }, p5/Z, [x10]\n"
+ "ld1b { z6.b }, p5/Z, [x9]\n"
+ "whilelt p0.b, XZR, x26\n"
+ "ld1b { z7.b }, p5/Z, [x9, #1, MUL VL]\n"
+ "subs x26, x26, #0x4\n"
+ "ld1rqb { z0.b }, p0/Z, [x25]\n"
"sdot z8.s, z6.b, z0.b[0]\n"
+ "ld1rqb { z1.b }, p0/Z, [x24]\n"
+ "sdot z9.s, z7.b, z0.b[0]\n"
+ "ld1rqb { z2.b }, p0/Z, [x23]\n"
+ "ld1rqb { z3.b }, p0/Z, [x22]\n"
"sdot z12.s, z6.b, z1.b[0]\n"
- "ld1b { z7.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1rqb { z4.b }, p0/Z, [x21]\n"
+ "sdot z13.s, z7.b, z1.b[0]\n"
"sdot z16.s, z6.b, z2.b[0]\n"
"sdot z20.s, z6.b, z3.b[0]\n"
"sdot z24.s, z6.b, z4.b[0]\n"
- "sdot z9.s, z7.b, z0.b[0]\n"
- "ld1b { z6.b }, p5/Z, [x10, #2, MUL VL]\n"
- "sdot z13.s, z7.b, z1.b[0]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #2, MUL VL]\n"
"sdot z17.s, z7.b, z2.b[0]\n"
"sdot z21.s, z7.b, z3.b[0]\n"
"sdot z25.s, z7.b, z4.b[0]\n"
- "ld1b { z7.b }, p5/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
+ "ld1b { z7.b }, p5/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"sdot z10.s, z6.b, z0.b[0]\n"
"sdot z14.s, z6.b, z1.b[0]\n"
"sdot z18.s, z6.b, z2.b[0]\n"
@@ -1214,22 +1214,22 @@ void sve_hybrid_s8s32_dot_6x4VL (
"sdot z23.s, z7.b, z3.b[0]\n"
"sdot z27.s, z7.b, z4.b[0]\n"
"ble 54f\n"
- "ld1b { z6.b }, p5/Z, [x10]\n"
- "ld1b { z7.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9]\n"
"sdot z8.s, z6.b, z0.b[1]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #1, MUL VL]\n"
+ "subs x26, x26, #0x4\n"
"sdot z12.s, z6.b, z1.b[1]\n"
"sdot z16.s, z6.b, z2.b[1]\n"
"sdot z20.s, z6.b, z3.b[1]\n"
- "subs x27, x27, #0x4\n"
"sdot z24.s, z6.b, z4.b[1]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #2, MUL VL]\n"
"sdot z9.s, z7.b, z0.b[1]\n"
- "ld1b { z6.b }, p5/Z, [x10, #2, MUL VL]\n"
"sdot z13.s, z7.b, z1.b[1]\n"
"sdot z17.s, z7.b, z2.b[1]\n"
"sdot z21.s, z7.b, z3.b[1]\n"
"sdot z25.s, z7.b, z4.b[1]\n"
- "ld1b { z7.b }, p5/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
+ "ld1b { z7.b }, p5/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"sdot z10.s, z6.b, z0.b[1]\n"
"sdot z14.s, z6.b, z1.b[1]\n"
"sdot z18.s, z6.b, z2.b[1]\n"
@@ -1241,22 +1241,22 @@ void sve_hybrid_s8s32_dot_6x4VL (
"sdot z23.s, z7.b, z3.b[1]\n"
"sdot z27.s, z7.b, z4.b[1]\n"
"ble 54f\n"
- "ld1b { z6.b }, p5/Z, [x10]\n"
- "ld1b { z7.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9]\n"
"sdot z8.s, z6.b, z0.b[2]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #1, MUL VL]\n"
+ "subs x26, x26, #0x4\n"
"sdot z12.s, z6.b, z1.b[2]\n"
"sdot z16.s, z6.b, z2.b[2]\n"
"sdot z20.s, z6.b, z3.b[2]\n"
- "subs x27, x27, #0x4\n"
"sdot z24.s, z6.b, z4.b[2]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #2, MUL VL]\n"
"sdot z9.s, z7.b, z0.b[2]\n"
- "ld1b { z6.b }, p5/Z, [x10, #2, MUL VL]\n"
"sdot z13.s, z7.b, z1.b[2]\n"
"sdot z17.s, z7.b, z2.b[2]\n"
"sdot z21.s, z7.b, z3.b[2]\n"
"sdot z25.s, z7.b, z4.b[2]\n"
- "ld1b { z7.b }, p5/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
+ "ld1b { z7.b }, p5/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"sdot z10.s, z6.b, z0.b[2]\n"
"sdot z14.s, z6.b, z1.b[2]\n"
"sdot z18.s, z6.b, z2.b[2]\n"
@@ -1268,21 +1268,21 @@ void sve_hybrid_s8s32_dot_6x4VL (
"sdot z23.s, z7.b, z3.b[2]\n"
"sdot z27.s, z7.b, z4.b[2]\n"
"ble 54f\n"
- "ld1b { z6.b }, p5/Z, [x10]\n"
- "ld1b { z7.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9]\n"
"sdot z8.s, z6.b, z0.b[3]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #1, MUL VL]\n"
"sdot z12.s, z6.b, z1.b[3]\n"
"sdot z16.s, z6.b, z2.b[3]\n"
"sdot z20.s, z6.b, z3.b[3]\n"
"sdot z24.s, z6.b, z4.b[3]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #2, MUL VL]\n"
"sdot z9.s, z7.b, z0.b[3]\n"
- "ld1b { z6.b }, p5/Z, [x10, #2, MUL VL]\n"
"sdot z13.s, z7.b, z1.b[3]\n"
"sdot z17.s, z7.b, z2.b[3]\n"
"sdot z21.s, z7.b, z3.b[3]\n"
"sdot z25.s, z7.b, z4.b[3]\n"
- "ld1b { z7.b }, p5/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
+ "ld1b { z7.b }, p5/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"sdot z10.s, z6.b, z0.b[3]\n"
"sdot z14.s, z6.b, z1.b[3]\n"
"sdot z18.s, z6.b, z2.b[3]\n"
@@ -1294,88 +1294,88 @@ void sve_hybrid_s8s32_dot_6x4VL (
"sdot z23.s, z7.b, z3.b[3]\n"
"sdot z27.s, z7.b, z4.b[3]\n"
"54:" // Height 5: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 49b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "st1w { z8.s }, p4, [x9]\n"
- "add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
- "st1w { z9.s }, p3, [x9, #1, MUL VL]\n"
- "st1w { z10.s }, p2, [x9, #2, MUL VL]\n"
- "st1w { z11.s }, p1, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
- "st1w { z12.s }, p4, [x24]\n"
- "st1w { z13.s }, p3, [x24, #1, MUL VL]\n"
- "st1w { z14.s }, p2, [x24, #2, MUL VL]\n"
- "st1w { z15.s }, p1, [x24, #3, MUL VL]\n"
- "st1w { z16.s }, p4, [x23]\n"
- "st1w { z17.s }, p3, [x23, #1, MUL VL]\n"
- "st1w { z18.s }, p2, [x23, #2, MUL VL]\n"
- "st1w { z19.s }, p1, [x23, #3, MUL VL]\n"
- "st1w { z20.s }, p4, [x22]\n"
- "st1w { z21.s }, p3, [x22, #1, MUL VL]\n"
- "st1w { z22.s }, p2, [x22, #2, MUL VL]\n"
- "st1w { z23.s }, p1, [x22, #3, MUL VL]\n"
- "st1w { z24.s }, p4, [x21]\n"
- "st1w { z25.s }, p3, [x21, #1, MUL VL]\n"
- "st1w { z26.s }, p2, [x21, #2, MUL VL]\n"
- "st1w { z27.s }, p1, [x21, #3, MUL VL]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "st1w { z8.s }, p4, [x28]\n"
+ "add x23, x28, x19, LSL #2\n"
+ "st1w { z9.s }, p3, [x28, #1, MUL VL]\n"
+ "st1w { z10.s }, p2, [x28, #2, MUL VL]\n"
+ "add x22, x23, x19, LSL #2\n"
+ "st1w { z11.s }, p1, [x28, #3, MUL VL]\n"
+ "add x21, x22, x19, LSL #2\n"
+ "st1w { z12.s }, p4, [x23]\n"
+ "add x20, x21, x19, LSL #2\n"
+ "st1w { z13.s }, p3, [x23, #1, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "st1w { z14.s }, p2, [x23, #2, MUL VL]\n"
+ "st1w { z15.s }, p1, [x23, #3, MUL VL]\n"
+ "st1w { z16.s }, p4, [x22]\n"
+ "st1w { z17.s }, p3, [x22, #1, MUL VL]\n"
+ "st1w { z18.s }, p2, [x22, #2, MUL VL]\n"
+ "st1w { z19.s }, p1, [x22, #3, MUL VL]\n"
+ "st1w { z20.s }, p4, [x21]\n"
+ "st1w { z21.s }, p3, [x21, #1, MUL VL]\n"
+ "st1w { z22.s }, p2, [x21, #2, MUL VL]\n"
+ "st1w { z23.s }, p1, [x21, #3, MUL VL]\n"
+ "st1w { z24.s }, p4, [x20]\n"
+ "st1w { z25.s }, p3, [x20, #1, MUL VL]\n"
+ "st1w { z26.s }, p2, [x20, #2, MUL VL]\n"
+ "st1w { z27.s }, p1, [x20, #3, MUL VL]\n"
"55:" // Height 5: Writeback done
- "decw x11, ALL, MUL #4\n"
- "cmp x11, XZR\n"
+ "decw x10, ALL, MUL #4\n"
+ "cmp x10, XZR\n"
"bgt 46b\n"
"b 68f\n"
"56:" // Height 6
- "ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x28, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"mov x20, #0x18\n"
- "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
- "mov x9, %x[output_ptr]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "madd %x[output_ptr], x19, x20, %x[output_ptr]\n"
"57:" // Height 6: Column loop
- "mov x20, #0x0\n"
- "whilelt p4.s, x20, x11\n"
- "incw x20\n"
- "whilelt p3.s, x20, x11\n"
- "incw x20\n"
- "whilelt p2.s, x20, x11\n"
- "incw x20\n"
- "whilelt p1.s, x20, x11\n"
+ "mov x19, #0x0\n"
+ "whilelt p4.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p3.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x10\n"
"tbz %x[flags], #0, 58f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "ld1w { z8.s }, p4/Z, [x9]\n"
- "add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
- "ld1w { z9.s }, p3/Z, [x9, #1, MUL VL]\n"
- "ld1w { z10.s }, p2/Z, [x9, #2, MUL VL]\n"
- "add x20, x21, x20, LSL #2\n"
- "ld1w { z11.s }, p1/Z, [x9, #3, MUL VL]\n"
- "ld1w { z12.s }, p4/Z, [x24]\n"
- "ld1w { z13.s }, p3/Z, [x24, #1, MUL VL]\n"
- "ld1w { z14.s }, p2/Z, [x24, #2, MUL VL]\n"
- "ld1w { z15.s }, p1/Z, [x24, #3, MUL VL]\n"
- "ld1w { z16.s }, p4/Z, [x23]\n"
- "ld1w { z17.s }, p3/Z, [x23, #1, MUL VL]\n"
- "ld1w { z18.s }, p2/Z, [x23, #2, MUL VL]\n"
- "ld1w { z19.s }, p1/Z, [x23, #3, MUL VL]\n"
- "ld1w { z20.s }, p4/Z, [x22]\n"
- "ld1w { z21.s }, p3/Z, [x22, #1, MUL VL]\n"
- "ld1w { z22.s }, p2/Z, [x22, #2, MUL VL]\n"
- "ld1w { z23.s }, p1/Z, [x22, #3, MUL VL]\n"
- "ld1w { z24.s }, p4/Z, [x21]\n"
- "ld1w { z25.s }, p3/Z, [x21, #1, MUL VL]\n"
- "ld1w { z26.s }, p2/Z, [x21, #2, MUL VL]\n"
- "ld1w { z27.s }, p1/Z, [x21, #3, MUL VL]\n"
- "ld1w { z28.s }, p4/Z, [x20]\n"
- "ld1w { z29.s }, p3/Z, [x20, #1, MUL VL]\n"
- "ld1w { z30.s }, p2/Z, [x20, #2, MUL VL]\n"
- "ld1w { z31.s }, p1/Z, [x20, #3, MUL VL]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ld1w { z8.s }, p4/Z, [x28]\n"
+ "add x23, x28, x19, LSL #2\n"
+ "ld1w { z9.s }, p3/Z, [x28, #1, MUL VL]\n"
+ "ld1w { z10.s }, p2/Z, [x28, #2, MUL VL]\n"
+ "add x22, x23, x19, LSL #2\n"
+ "ld1w { z11.s }, p1/Z, [x28, #3, MUL VL]\n"
+ "add x21, x22, x19, LSL #2\n"
+ "ld1w { z12.s }, p4/Z, [x23]\n"
+ "add x20, x21, x19, LSL #2\n"
+ "ld1w { z13.s }, p3/Z, [x23, #1, MUL VL]\n"
+ "add x19, x20, x19, LSL #2\n"
+ "ld1w { z14.s }, p2/Z, [x23, #2, MUL VL]\n"
+ "ld1w { z15.s }, p1/Z, [x23, #3, MUL VL]\n"
+ "ld1w { z16.s }, p4/Z, [x22]\n"
+ "ld1w { z17.s }, p3/Z, [x22, #1, MUL VL]\n"
+ "ld1w { z18.s }, p2/Z, [x22, #2, MUL VL]\n"
+ "ld1w { z19.s }, p1/Z, [x22, #3, MUL VL]\n"
+ "ld1w { z20.s }, p4/Z, [x21]\n"
+ "ld1w { z21.s }, p3/Z, [x21, #1, MUL VL]\n"
+ "ld1w { z22.s }, p2/Z, [x21, #2, MUL VL]\n"
+ "ld1w { z23.s }, p1/Z, [x21, #3, MUL VL]\n"
+ "ld1w { z24.s }, p4/Z, [x20]\n"
+ "ld1w { z25.s }, p3/Z, [x20, #1, MUL VL]\n"
+ "ld1w { z26.s }, p2/Z, [x20, #2, MUL VL]\n"
+ "ld1w { z27.s }, p1/Z, [x20, #3, MUL VL]\n"
+ "ld1w { z28.s }, p4/Z, [x19]\n"
+ "ld1w { z29.s }, p3/Z, [x19, #1, MUL VL]\n"
+ "ld1w { z30.s }, p2/Z, [x19, #2, MUL VL]\n"
+ "ld1w { z31.s }, p1/Z, [x19, #3, MUL VL]\n"
"b 59f\n"
"58:" // Height 6: no accumulate
"mov z8.s, #0x0\n"
@@ -1403,156 +1403,156 @@ void sve_hybrid_s8s32_dot_6x4VL (
"mov z30.s, #0x0\n"
"mov z31.s, #0x0\n"
"59:" // Height 6: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"60:" // Height 6: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 61f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "ldr x22, [x21, #0x20]\n"
- "ldr x21, [x21, #0x28]\n"
- "cbnz x28, 62f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20\n"
- "add x25, x25, x20\n"
- "add x24, x24, x20\n"
- "add x23, x23, x20\n"
- "add x22, x22, x20\n"
- "add x21, x21, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "ldr x21, [x20, #0x20]\n"
+ "ldr x20, [x20, #0x28]\n"
+ "cbnz x27, 62f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
+ "add x24, x24, x19\n"
+ "add x23, x23, x19\n"
+ "add x22, x22, x19\n"
+ "add x21, x21, x19\n"
+ "add x20, x20, x19\n"
"b 62f\n"
"61:" // Height 6: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20\n"
- "add x24, x25, x20\n"
- "add x23, x24, x20\n"
- "add x22, x23, x20\n"
- "add x21, x22, x20\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19\n"
+ "add x23, x24, x19\n"
+ "add x22, x23, x19\n"
+ "add x21, x22, x19\n"
+ "add x20, x21, x19\n"
"62:" // Height 6: input setup done
- "cmp x27, #0x10\n"
+ "cmp x26, #0x10\n"
"ble 64f\n"
"63:" // Height 6: Multiply loop: Main loop head
- "whilelt p0.b, XZR, x27\n"
- "ld1rqb { z0.b }, p0/Z, [x26]\n"
- "ld1rqb { z1.b }, p0/Z, [x25]\n"
- "sub x27, x27, #0x10\n"
- "ld1rqb { z2.b }, p0/Z, [x24]\n"
- "ld1rqb { z3.b }, p0/Z, [x23]\n"
- "cmp x27, #0x10\n"
- "add x26, x26, #0x10\n"
- "ld1rqb { z4.b }, p0/Z, [x22]\n"
- "ld1rqb { z5.b }, p0/Z, [x21]\n"
- "add x25, x25, #0x10\n"
- "add x24, x24, #0x10\n"
- "ld1b { z6.b }, p5/Z, [x10]\n"
- "ld1b { z7.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9]\n"
+ "whilelt p0.b, XZR, x26\n"
+ "ld1b { z7.b }, p5/Z, [x9, #1, MUL VL]\n"
+ "sub x26, x26, #0x10\n"
+ "ld1rqb { z0.b }, p0/Z, [x25]\n"
"sdot z8.s, z6.b, z0.b[0]\n"
+ "ld1rqb { z1.b }, p0/Z, [x24]\n"
+ "cmp x26, #0x10\n"
+ "sdot z9.s, z7.b, z0.b[0]\n"
+ "ld1rqb { z2.b }, p0/Z, [x23]\n"
+ "add x25, x25, #0x10\n"
"sdot z12.s, z6.b, z1.b[0]\n"
+ "ld1rqb { z3.b }, p0/Z, [x22]\n"
+ "add x24, x24, #0x10\n"
"sdot z16.s, z6.b, z2.b[0]\n"
- "sdot z20.s, z6.b, z3.b[0]\n"
+ "ld1rqb { z4.b }, p0/Z, [x21]\n"
"add x23, x23, #0x10\n"
+ "sdot z13.s, z7.b, z1.b[0]\n"
+ "ld1rqb { z5.b }, p0/Z, [x20]\n"
"add x22, x22, #0x10\n"
- "sdot z24.s, z6.b, z4.b[0]\n"
- "sdot z28.s, z6.b, z5.b[0]\n"
- "ld1b { z6.b }, p5/Z, [x10, #2, MUL VL]\n"
+ "sdot z20.s, z6.b, z3.b[0]\n"
"add x21, x21, #0x10\n"
- "sdot z9.s, z7.b, z0.b[0]\n"
- "sdot z13.s, z7.b, z1.b[0]\n"
"sdot z17.s, z7.b, z2.b[0]\n"
+ "add x20, x20, #0x10\n"
+ "sdot z24.s, z6.b, z4.b[0]\n"
+ "sdot z28.s, z6.b, z5.b[0]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #2, MUL VL]\n"
"sdot z21.s, z7.b, z3.b[0]\n"
"sdot z25.s, z7.b, z4.b[0]\n"
"sdot z29.s, z7.b, z5.b[0]\n"
- "ld1b { z7.b }, p5/Z, [x10, #3, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #3, MUL VL]\n"
"sdot z10.s, z6.b, z0.b[0]\n"
"sdot z14.s, z6.b, z1.b[0]\n"
"sdot z18.s, z6.b, z2.b[0]\n"
"sdot z22.s, z6.b, z3.b[0]\n"
"sdot z26.s, z6.b, z4.b[0]\n"
"sdot z30.s, z6.b, z5.b[0]\n"
- "ld1b { z6.b }, p5/Z, [x10, #4, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #4, MUL VL]\n"
"sdot z11.s, z7.b, z0.b[0]\n"
"sdot z15.s, z7.b, z1.b[0]\n"
"sdot z19.s, z7.b, z2.b[0]\n"
"sdot z23.s, z7.b, z3.b[0]\n"
"sdot z27.s, z7.b, z4.b[0]\n"
"sdot z31.s, z7.b, z5.b[0]\n"
- "ld1b { z7.b }, p5/Z, [x10, #5, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #5, MUL VL]\n"
"sdot z8.s, z6.b, z0.b[1]\n"
"sdot z12.s, z6.b, z1.b[1]\n"
"sdot z16.s, z6.b, z2.b[1]\n"
"sdot z20.s, z6.b, z3.b[1]\n"
"sdot z24.s, z6.b, z4.b[1]\n"
"sdot z28.s, z6.b, z5.b[1]\n"
- "ld1b { z6.b }, p5/Z, [x10, #6, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #6, MUL VL]\n"
"sdot z9.s, z7.b, z0.b[1]\n"
"sdot z13.s, z7.b, z1.b[1]\n"
"sdot z17.s, z7.b, z2.b[1]\n"
"sdot z21.s, z7.b, z3.b[1]\n"
"sdot z25.s, z7.b, z4.b[1]\n"
"sdot z29.s, z7.b, z5.b[1]\n"
- "ld1b { z7.b }, p5/Z, [x10, #7, MUL VL]\n"
- "addvl x10, x10, #16\n"
+ "ld1b { z7.b }, p5/Z, [x9, #7, MUL VL]\n"
+ "addvl x9, x9, #16\n"
"sdot z10.s, z6.b, z0.b[1]\n"
"sdot z14.s, z6.b, z1.b[1]\n"
"sdot z18.s, z6.b, z2.b[1]\n"
"sdot z22.s, z6.b, z3.b[1]\n"
"sdot z26.s, z6.b, z4.b[1]\n"
"sdot z30.s, z6.b, z5.b[1]\n"
- "ld1b { z6.b }, p5/Z, [x10, #-8, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #-8, MUL VL]\n"
"sdot z11.s, z7.b, z0.b[1]\n"
"sdot z15.s, z7.b, z1.b[1]\n"
"sdot z19.s, z7.b, z2.b[1]\n"
"sdot z23.s, z7.b, z3.b[1]\n"
"sdot z27.s, z7.b, z4.b[1]\n"
"sdot z31.s, z7.b, z5.b[1]\n"
- "ld1b { z7.b }, p5/Z, [x10, #-7, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #-7, MUL VL]\n"
"sdot z8.s, z6.b, z0.b[2]\n"
"sdot z12.s, z6.b, z1.b[2]\n"
"sdot z16.s, z6.b, z2.b[2]\n"
"sdot z20.s, z6.b, z3.b[2]\n"
"sdot z24.s, z6.b, z4.b[2]\n"
"sdot z28.s, z6.b, z5.b[2]\n"
- "ld1b { z6.b }, p5/Z, [x10, #-6, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #-6, MUL VL]\n"
"sdot z9.s, z7.b, z0.b[2]\n"
"sdot z13.s, z7.b, z1.b[2]\n"
"sdot z17.s, z7.b, z2.b[2]\n"
"sdot z21.s, z7.b, z3.b[2]\n"
"sdot z25.s, z7.b, z4.b[2]\n"
"sdot z29.s, z7.b, z5.b[2]\n"
- "ld1b { z7.b }, p5/Z, [x10, #-5, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #-5, MUL VL]\n"
"sdot z10.s, z6.b, z0.b[2]\n"
"sdot z14.s, z6.b, z1.b[2]\n"
"sdot z18.s, z6.b, z2.b[2]\n"
"sdot z22.s, z6.b, z3.b[2]\n"
"sdot z26.s, z6.b, z4.b[2]\n"
"sdot z30.s, z6.b, z5.b[2]\n"
- "ld1b { z6.b }, p5/Z, [x10, #-4, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #-4, MUL VL]\n"
"sdot z11.s, z7.b, z0.b[2]\n"
"sdot z15.s, z7.b, z1.b[2]\n"
"sdot z19.s, z7.b, z2.b[2]\n"
"sdot z23.s, z7.b, z3.b[2]\n"
"sdot z27.s, z7.b, z4.b[2]\n"
"sdot z31.s, z7.b, z5.b[2]\n"
- "ld1b { z7.b }, p5/Z, [x10, #-3, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #-3, MUL VL]\n"
"sdot z8.s, z6.b, z0.b[3]\n"
"sdot z12.s, z6.b, z1.b[3]\n"
"sdot z16.s, z6.b, z2.b[3]\n"
"sdot z20.s, z6.b, z3.b[3]\n"
"sdot z24.s, z6.b, z4.b[3]\n"
"sdot z28.s, z6.b, z5.b[3]\n"
- "ld1b { z6.b }, p5/Z, [x10, #-2, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #-2, MUL VL]\n"
"sdot z9.s, z7.b, z0.b[3]\n"
"sdot z13.s, z7.b, z1.b[3]\n"
"sdot z17.s, z7.b, z2.b[3]\n"
"sdot z21.s, z7.b, z3.b[3]\n"
"sdot z25.s, z7.b, z4.b[3]\n"
"sdot z29.s, z7.b, z5.b[3]\n"
- "ld1b { z7.b }, p5/Z, [x10, #-1, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #-1, MUL VL]\n"
"sdot z10.s, z6.b, z0.b[3]\n"
"sdot z14.s, z6.b, z1.b[3]\n"
"sdot z18.s, z6.b, z2.b[3]\n"
@@ -1567,31 +1567,31 @@ void sve_hybrid_s8s32_dot_6x4VL (
"sdot z31.s, z7.b, z5.b[3]\n"
"bgt 63b\n"
"64:" // Height 6: Multiply loop: Single iteration only
- "whilelt p0.b, XZR, x27\n"
- "ld1rqb { z0.b }, p0/Z, [x26]\n"
- "ld1rqb { z1.b }, p0/Z, [x25]\n"
- "subs x27, x27, #0x4\n"
- "ld1rqb { z2.b }, p0/Z, [x24]\n"
- "ld1rqb { z3.b }, p0/Z, [x23]\n"
- "ld1rqb { z4.b }, p0/Z, [x22]\n"
- "ld1rqb { z5.b }, p0/Z, [x21]\n"
- "ld1b { z6.b }, p5/Z, [x10]\n"
- "ld1b { z7.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9]\n"
+ "whilelt p0.b, XZR, x26\n"
+ "ld1b { z7.b }, p5/Z, [x9, #1, MUL VL]\n"
+ "subs x26, x26, #0x4\n"
+ "ld1rqb { z0.b }, p0/Z, [x25]\n"
"sdot z8.s, z6.b, z0.b[0]\n"
+ "ld1rqb { z1.b }, p0/Z, [x24]\n"
+ "sdot z9.s, z7.b, z0.b[0]\n"
+ "ld1rqb { z2.b }, p0/Z, [x23]\n"
+ "ld1rqb { z3.b }, p0/Z, [x22]\n"
"sdot z12.s, z6.b, z1.b[0]\n"
+ "ld1rqb { z4.b }, p0/Z, [x21]\n"
+ "sdot z13.s, z7.b, z1.b[0]\n"
+ "ld1rqb { z5.b }, p0/Z, [x20]\n"
"sdot z16.s, z6.b, z2.b[0]\n"
"sdot z20.s, z6.b, z3.b[0]\n"
"sdot z24.s, z6.b, z4.b[0]\n"
"sdot z28.s, z6.b, z5.b[0]\n"
- "ld1b { z6.b }, p5/Z, [x10, #2, MUL VL]\n"
- "sdot z9.s, z7.b, z0.b[0]\n"
- "sdot z13.s, z7.b, z1.b[0]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #2, MUL VL]\n"
"sdot z17.s, z7.b, z2.b[0]\n"
"sdot z21.s, z7.b, z3.b[0]\n"
"sdot z25.s, z7.b, z4.b[0]\n"
"sdot z29.s, z7.b, z5.b[0]\n"
- "ld1b { z7.b }, p5/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
+ "ld1b { z7.b }, p5/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"sdot z10.s, z6.b, z0.b[0]\n"
"sdot z14.s, z6.b, z1.b[0]\n"
"sdot z18.s, z6.b, z2.b[0]\n"
@@ -1605,24 +1605,24 @@ void sve_hybrid_s8s32_dot_6x4VL (
"sdot z27.s, z7.b, z4.b[0]\n"
"sdot z31.s, z7.b, z5.b[0]\n"
"ble 65f\n"
- "ld1b { z6.b }, p5/Z, [x10]\n"
- "ld1b { z7.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9]\n"
"sdot z8.s, z6.b, z0.b[1]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #1, MUL VL]\n"
+ "subs x26, x26, #0x4\n"
"sdot z12.s, z6.b, z1.b[1]\n"
"sdot z16.s, z6.b, z2.b[1]\n"
"sdot z20.s, z6.b, z3.b[1]\n"
- "subs x27, x27, #0x4\n"
"sdot z24.s, z6.b, z4.b[1]\n"
"sdot z28.s, z6.b, z5.b[1]\n"
- "ld1b { z6.b }, p5/Z, [x10, #2, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #2, MUL VL]\n"
"sdot z9.s, z7.b, z0.b[1]\n"
"sdot z13.s, z7.b, z1.b[1]\n"
"sdot z17.s, z7.b, z2.b[1]\n"
"sdot z21.s, z7.b, z3.b[1]\n"
"sdot z25.s, z7.b, z4.b[1]\n"
"sdot z29.s, z7.b, z5.b[1]\n"
- "ld1b { z7.b }, p5/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
+ "ld1b { z7.b }, p5/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"sdot z10.s, z6.b, z0.b[1]\n"
"sdot z14.s, z6.b, z1.b[1]\n"
"sdot z18.s, z6.b, z2.b[1]\n"
@@ -1636,24 +1636,24 @@ void sve_hybrid_s8s32_dot_6x4VL (
"sdot z27.s, z7.b, z4.b[1]\n"
"sdot z31.s, z7.b, z5.b[1]\n"
"ble 65f\n"
- "ld1b { z6.b }, p5/Z, [x10]\n"
- "ld1b { z7.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9]\n"
"sdot z8.s, z6.b, z0.b[2]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #1, MUL VL]\n"
+ "subs x26, x26, #0x4\n"
"sdot z12.s, z6.b, z1.b[2]\n"
"sdot z16.s, z6.b, z2.b[2]\n"
"sdot z20.s, z6.b, z3.b[2]\n"
- "subs x27, x27, #0x4\n"
"sdot z24.s, z6.b, z4.b[2]\n"
"sdot z28.s, z6.b, z5.b[2]\n"
- "ld1b { z6.b }, p5/Z, [x10, #2, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #2, MUL VL]\n"
"sdot z9.s, z7.b, z0.b[2]\n"
"sdot z13.s, z7.b, z1.b[2]\n"
"sdot z17.s, z7.b, z2.b[2]\n"
"sdot z21.s, z7.b, z3.b[2]\n"
"sdot z25.s, z7.b, z4.b[2]\n"
"sdot z29.s, z7.b, z5.b[2]\n"
- "ld1b { z7.b }, p5/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
+ "ld1b { z7.b }, p5/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"sdot z10.s, z6.b, z0.b[2]\n"
"sdot z14.s, z6.b, z1.b[2]\n"
"sdot z18.s, z6.b, z2.b[2]\n"
@@ -1667,23 +1667,23 @@ void sve_hybrid_s8s32_dot_6x4VL (
"sdot z27.s, z7.b, z4.b[2]\n"
"sdot z31.s, z7.b, z5.b[2]\n"
"ble 65f\n"
- "ld1b { z6.b }, p5/Z, [x10]\n"
- "ld1b { z7.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9]\n"
"sdot z8.s, z6.b, z0.b[3]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #1, MUL VL]\n"
"sdot z12.s, z6.b, z1.b[3]\n"
"sdot z16.s, z6.b, z2.b[3]\n"
"sdot z20.s, z6.b, z3.b[3]\n"
"sdot z24.s, z6.b, z4.b[3]\n"
"sdot z28.s, z6.b, z5.b[3]\n"
- "ld1b { z6.b }, p5/Z, [x10, #2, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #2, MUL VL]\n"
"sdot z9.s, z7.b, z0.b[3]\n"
"sdot z13.s, z7.b, z1.b[3]\n"
"sdot z17.s, z7.b, z2.b[3]\n"
"sdot z21.s, z7.b, z3.b[3]\n"
"sdot z25.s, z7.b, z4.b[3]\n"
"sdot z29.s, z7.b, z5.b[3]\n"
- "ld1b { z7.b }, p5/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
+ "ld1b { z7.b }, p5/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"sdot z10.s, z6.b, z0.b[3]\n"
"sdot z14.s, z6.b, z1.b[3]\n"
"sdot z18.s, z6.b, z2.b[3]\n"
@@ -1697,61 +1697,61 @@ void sve_hybrid_s8s32_dot_6x4VL (
"sdot z27.s, z7.b, z4.b[3]\n"
"sdot z31.s, z7.b, z5.b[3]\n"
"65:" // Height 6: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 60b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "st1w { z8.s }, p4, [x9]\n"
- "add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
- "st1w { z9.s }, p3, [x9, #1, MUL VL]\n"
- "add x20, x21, x20, LSL #2\n"
- "st1w { z10.s }, p2, [x9, #2, MUL VL]\n"
- "st1w { z11.s }, p1, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
- "st1w { z12.s }, p4, [x24]\n"
- "st1w { z13.s }, p3, [x24, #1, MUL VL]\n"
- "st1w { z14.s }, p2, [x24, #2, MUL VL]\n"
- "st1w { z15.s }, p1, [x24, #3, MUL VL]\n"
- "st1w { z16.s }, p4, [x23]\n"
- "st1w { z17.s }, p3, [x23, #1, MUL VL]\n"
- "st1w { z18.s }, p2, [x23, #2, MUL VL]\n"
- "st1w { z19.s }, p1, [x23, #3, MUL VL]\n"
- "st1w { z20.s }, p4, [x22]\n"
- "st1w { z21.s }, p3, [x22, #1, MUL VL]\n"
- "st1w { z22.s }, p2, [x22, #2, MUL VL]\n"
- "st1w { z23.s }, p1, [x22, #3, MUL VL]\n"
- "st1w { z24.s }, p4, [x21]\n"
- "st1w { z25.s }, p3, [x21, #1, MUL VL]\n"
- "st1w { z26.s }, p2, [x21, #2, MUL VL]\n"
- "st1w { z27.s }, p1, [x21, #3, MUL VL]\n"
- "st1w { z28.s }, p4, [x20]\n"
- "st1w { z29.s }, p3, [x20, #1, MUL VL]\n"
- "st1w { z30.s }, p2, [x20, #2, MUL VL]\n"
- "st1w { z31.s }, p1, [x20, #3, MUL VL]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "st1w { z8.s }, p4, [x28]\n"
+ "add x23, x28, x19, LSL #2\n"
+ "st1w { z9.s }, p3, [x28, #1, MUL VL]\n"
+ "st1w { z10.s }, p2, [x28, #2, MUL VL]\n"
+ "add x22, x23, x19, LSL #2\n"
+ "st1w { z11.s }, p1, [x28, #3, MUL VL]\n"
+ "add x21, x22, x19, LSL #2\n"
+ "st1w { z12.s }, p4, [x23]\n"
+ "add x20, x21, x19, LSL #2\n"
+ "st1w { z13.s }, p3, [x23, #1, MUL VL]\n"
+ "add x19, x20, x19, LSL #2\n"
+ "st1w { z14.s }, p2, [x23, #2, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "st1w { z15.s }, p1, [x23, #3, MUL VL]\n"
+ "st1w { z16.s }, p4, [x22]\n"
+ "st1w { z17.s }, p3, [x22, #1, MUL VL]\n"
+ "st1w { z18.s }, p2, [x22, #2, MUL VL]\n"
+ "st1w { z19.s }, p1, [x22, #3, MUL VL]\n"
+ "st1w { z20.s }, p4, [x21]\n"
+ "st1w { z21.s }, p3, [x21, #1, MUL VL]\n"
+ "st1w { z22.s }, p2, [x21, #2, MUL VL]\n"
+ "st1w { z23.s }, p1, [x21, #3, MUL VL]\n"
+ "st1w { z24.s }, p4, [x20]\n"
+ "st1w { z25.s }, p3, [x20, #1, MUL VL]\n"
+ "st1w { z26.s }, p2, [x20, #2, MUL VL]\n"
+ "st1w { z27.s }, p1, [x20, #3, MUL VL]\n"
+ "st1w { z28.s }, p4, [x19]\n"
+ "st1w { z29.s }, p3, [x19, #1, MUL VL]\n"
+ "st1w { z30.s }, p2, [x19, #2, MUL VL]\n"
+ "st1w { z31.s }, p1, [x19, #3, MUL VL]\n"
"66:" // Height 6: Writeback done
- "decw x11, ALL, MUL #4\n"
- "cmp x11, XZR\n"
+ "decw x10, ALL, MUL #4\n"
+ "cmp x10, XZR\n"
"bgt 57b\n"
"subs %x[M], %x[M], #0x6\n"
"beq 68f\n"
- "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 67f\n"
- "add x21, x21, #0x6\n"
- "str x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "add x20, x20, #0x6\n"
+ "str x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"b 1b\n"
"67:" // Update direct input
- "mov x20, #0x6\n"
- "madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
+ "mov x19, #0x6\n"
+ "madd %x[input_ptr], x19, x20, %x[input_ptr]\n"
"b 1b\n"
"68:" // Exit
: [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
: [args_ptr] "r" (&ka), [flags] "r" (flags), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "x9", "x10", "x11", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "x9", "x10", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8s32_mmla_6x4VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8s32_mmla_6x4VL/generic.cpp
index 350425647a..c3abb203ca 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8s32_mmla_6x4VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8s32_mmla_6x4VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef ARM_COMPUTE_ENABLE_SVE
@@ -87,25 +87,25 @@ void sve_hybrid_s8s32_mmla_6x4VL (
"cmp %x[M], #0x2\n"
"bgt 23f\n"
"beq 12f\n"
- "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x28, %x[output_ptr]\n"
"2:" // Height 1: Column loop
- "mov x20, #0x0\n"
- "whilelt p4.s, x20, x11\n"
- "incw x20\n"
- "whilelt p3.s, x20, x11\n"
- "incw x20\n"
- "whilelt p2.s, x20, x11\n"
- "incw x20\n"
- "whilelt p1.s, x20, x11\n"
+ "mov x19, #0x0\n"
+ "whilelt p4.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p3.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x10\n"
"tbz %x[flags], #0, 3f\n"
- "ld1w { z9.s }, p4/Z, [x9]\n"
- "ld1w { z10.s }, p3/Z, [x9, #1, MUL VL]\n"
+ "ld1w { z9.s }, p4/Z, [x28]\n"
"zip1 z8.d, z9.d, z12.d\n"
+ "ld1w { z10.s }, p3/Z, [x28, #1, MUL VL]\n"
"zip2 z12.d, z9.d, z12.d\n"
- "ld1w { z11.s }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1w { z16.s }, p1/Z, [x9, #3, MUL VL]\n"
+ "ld1w { z11.s }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1w { z16.s }, p1/Z, [x28, #3, MUL VL]\n"
"zip1 z9.d, z10.d, z13.d\n"
"zip2 z13.d, z10.d, z13.d\n"
"zip1 z10.d, z11.d, z14.d\n"
@@ -123,154 +123,154 @@ void sve_hybrid_s8s32_mmla_6x4VL (
"mov z14.s, #0x0\n"
"mov z15.s, #0x0\n"
"4:" // Height 1: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"5:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 6f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "cbnz x28, 7f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "cbnz x27, 7f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
"b 7f\n"
"6:" // Height 1: setup direct input
- "mov x26, %x[input_ptr]\n"
+ "mov x25, %x[input_ptr]\n"
"7:" // Height 1: input setup done
- "cmp x27, #0x10\n"
+ "cmp x26, #0x10\n"
"ble 9f\n"
"8:" // Height 1: Multiply loop: Main loop head
- "whilelt p0.b, XZR, x27\n"
- "ld1rqb { z1.b }, p0/Z, [x26]\n"
+ "ld1b { z7.b }, p5/Z, [x9]\n"
+ "whilelt p0.b, XZR, x26\n"
+ "ld1b { z6.b }, p5/Z, [x9, #1, MUL VL]\n"
+ "ld1rqb { z1.b }, p0/Z, [x25]\n"
"trn1 z0.d, z1.d, z2.d\n"
- "ld1b { z7.b }, p5/Z, [x10]\n"
- "ld1b { z6.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "sub x26, x26, #0x10\n"
+ "trn2 z1.d, z1.d, z2.d\n"
+ "cmp x26, #0x10\n"
+ "add x25, x25, #0x10\n"
".inst 0x45079808 // smmla z8.s, z0.b, z7.b\n"
+ "ld1b { z7.b }, p5/Z, [x9, #2, MUL VL]\n"
".inst 0x4506980c // smmla z12.s, z0.b, z6.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #2, MUL VL]\n"
- "ld1b { z6.b }, p5/Z, [x10, #3, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #3, MUL VL]\n"
".inst 0x45079809 // smmla z9.s, z0.b, z7.b\n"
+ "ld1b { z7.b }, p5/Z, [x9, #4, MUL VL]\n"
".inst 0x4506980d // smmla z13.s, z0.b, z6.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #4, MUL VL]\n"
- "ld1b { z6.b }, p5/Z, [x10, #5, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #5, MUL VL]\n"
".inst 0x4507980a // smmla z10.s, z0.b, z7.b\n"
+ "ld1b { z7.b }, p5/Z, [x9, #6, MUL VL]\n"
".inst 0x4506980e // smmla z14.s, z0.b, z6.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #6, MUL VL]\n"
- "ld1b { z6.b }, p5/Z, [x10, #7, MUL VL]\n"
- "addvl x10, x10, #16\n"
- "trn2 z1.d, z1.d, z2.d\n"
+ "ld1b { z6.b }, p5/Z, [x9, #7, MUL VL]\n"
+ "addvl x9, x9, #16\n"
".inst 0x4507980b // smmla z11.s, z0.b, z7.b\n"
+ "ld1b { z7.b }, p5/Z, [x9, #-8, MUL VL]\n"
".inst 0x4506980f // smmla z15.s, z0.b, z6.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #-8, MUL VL]\n"
- "ld1b { z6.b }, p5/Z, [x10, #-7, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #-7, MUL VL]\n"
".inst 0x45079828 // smmla z8.s, z1.b, z7.b\n"
+ "ld1b { z7.b }, p5/Z, [x9, #-6, MUL VL]\n"
".inst 0x4506982c // smmla z12.s, z1.b, z6.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #-6, MUL VL]\n"
- "ld1b { z6.b }, p5/Z, [x10, #-5, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #-5, MUL VL]\n"
".inst 0x45079829 // smmla z9.s, z1.b, z7.b\n"
+ "ld1b { z7.b }, p5/Z, [x9, #-4, MUL VL]\n"
".inst 0x4506982d // smmla z13.s, z1.b, z6.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #-4, MUL VL]\n"
- "ld1b { z6.b }, p5/Z, [x10, #-3, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #-3, MUL VL]\n"
".inst 0x4507982a // smmla z10.s, z1.b, z7.b\n"
+ "ld1b { z7.b }, p5/Z, [x9, #-2, MUL VL]\n"
".inst 0x4506982e // smmla z14.s, z1.b, z6.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #-2, MUL VL]\n"
- "ld1b { z6.b }, p5/Z, [x10, #-1, MUL VL]\n"
- "sub x27, x27, #0x10\n"
- "cmp x27, #0x10\n"
+ "ld1b { z6.b }, p5/Z, [x9, #-1, MUL VL]\n"
".inst 0x4507982b // smmla z11.s, z1.b, z7.b\n"
".inst 0x4506982f // smmla z15.s, z1.b, z6.b\n"
- "add x26, x26, #0x10\n"
"bgt 8b\n"
"9:" // Height 1: Multiply loop: Single iteration only
- "whilelt p0.b, XZR, x27\n"
- "ld1rqb { z1.b }, p0/Z, [x26]\n"
+ "ld1b { z7.b }, p5/Z, [x9]\n"
+ "whilelt p0.b, XZR, x26\n"
+ "ld1b { z6.b }, p5/Z, [x9, #1, MUL VL]\n"
+ "ld1rqb { z1.b }, p0/Z, [x25]\n"
"trn1 z0.d, z1.d, z2.d\n"
- "ld1b { z7.b }, p5/Z, [x10]\n"
- "ld1b { z6.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x8\n"
+ "trn2 z1.d, z1.d, z2.d\n"
".inst 0x45079808 // smmla z8.s, z0.b, z7.b\n"
+ "ld1b { z7.b }, p5/Z, [x9, #2, MUL VL]\n"
".inst 0x4506980c // smmla z12.s, z0.b, z6.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #2, MUL VL]\n"
- "ld1b { z6.b }, p5/Z, [x10, #3, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #3, MUL VL]\n"
".inst 0x45079809 // smmla z9.s, z0.b, z7.b\n"
+ "ld1b { z7.b }, p5/Z, [x9, #4, MUL VL]\n"
".inst 0x4506980d // smmla z13.s, z0.b, z6.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #4, MUL VL]\n"
- "ld1b { z6.b }, p5/Z, [x10, #5, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #5, MUL VL]\n"
".inst 0x4507980a // smmla z10.s, z0.b, z7.b\n"
+ "ld1b { z7.b }, p5/Z, [x9, #6, MUL VL]\n"
".inst 0x4506980e // smmla z14.s, z0.b, z6.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #6, MUL VL]\n"
- "ld1b { z6.b }, p5/Z, [x10, #7, MUL VL]\n"
- "subs x27, x27, #0x8\n"
- "trn2 z1.d, z1.d, z2.d\n"
+ "ld1b { z6.b }, p5/Z, [x9, #7, MUL VL]\n"
+ "addvl x9, x9, #8\n"
".inst 0x4507980b // smmla z11.s, z0.b, z7.b\n"
".inst 0x4506980f // smmla z15.s, z0.b, z6.b\n"
- "addvl x10, x10, #8\n"
"ble 10f\n"
- "ld1b { z7.b }, p5/Z, [x10]\n"
- "ld1b { z6.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9]\n"
".inst 0x45079828 // smmla z8.s, z1.b, z7.b\n"
+ "ld1b { z6.b }, p5/Z, [x9, #1, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #2, MUL VL]\n"
".inst 0x4506982c // smmla z12.s, z1.b, z6.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #2, MUL VL]\n"
- "ld1b { z6.b }, p5/Z, [x10, #3, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #3, MUL VL]\n"
".inst 0x45079829 // smmla z9.s, z1.b, z7.b\n"
+ "ld1b { z7.b }, p5/Z, [x9, #4, MUL VL]\n"
".inst 0x4506982d // smmla z13.s, z1.b, z6.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #4, MUL VL]\n"
- "ld1b { z6.b }, p5/Z, [x10, #5, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #5, MUL VL]\n"
".inst 0x4507982a // smmla z10.s, z1.b, z7.b\n"
+ "ld1b { z7.b }, p5/Z, [x9, #6, MUL VL]\n"
".inst 0x4506982e // smmla z14.s, z1.b, z6.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #6, MUL VL]\n"
- "ld1b { z6.b }, p5/Z, [x10, #7, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #7, MUL VL]\n"
+ "addvl x9, x9, #8\n"
".inst 0x4507982b // smmla z11.s, z1.b, z7.b\n"
".inst 0x4506982f // smmla z15.s, z1.b, z6.b\n"
- "addvl x10, x10, #8\n"
"10:" // Height 1: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 5b\n"
"uzp1 z8.d, z8.d, z12.d\n"
+ "st1w { z8.s }, p4, [x28]\n"
"uzp1 z9.d, z9.d, z13.d\n"
- "st1w { z8.s }, p4, [x9]\n"
"uzp1 z10.d, z10.d, z14.d\n"
+ "st1w { z9.s }, p3, [x28, #1, MUL VL]\n"
"uzp1 z11.d, z11.d, z15.d\n"
- "st1w { z9.s }, p3, [x9, #1, MUL VL]\n"
- "st1w { z10.s }, p2, [x9, #2, MUL VL]\n"
- "st1w { z11.s }, p1, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
+ "st1w { z10.s }, p2, [x28, #2, MUL VL]\n"
+ "st1w { z11.s }, p1, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
"11:" // Height 1: Writeback done
- "decw x11, ALL, MUL #4\n"
- "cmp x11, XZR\n"
+ "decw x10, ALL, MUL #4\n"
+ "cmp x10, XZR\n"
"bgt 2b\n"
"b 68f\n"
"12:" // Height 2
- "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x28, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"13:" // Height 2: Column loop
- "mov x20, #0x0\n"
- "whilelt p4.s, x20, x11\n"
- "incw x20\n"
- "whilelt p3.s, x20, x11\n"
- "incw x20\n"
- "whilelt p2.s, x20, x11\n"
- "incw x20\n"
- "whilelt p1.s, x20, x11\n"
+ "mov x19, #0x0\n"
+ "whilelt p4.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p3.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x10\n"
"tbz %x[flags], #0, 14f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "ld1w { z9.s }, p4/Z, [x9]\n"
- "ld1w { z10.s }, p3/Z, [x9, #1, MUL VL]\n"
- "ld1w { z11.s }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1w { z16.s }, p1/Z, [x9, #3, MUL VL]\n"
- "ld1w { z12.s }, p4/Z, [x24]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ld1w { z9.s }, p4/Z, [x28]\n"
+ "add x23, x28, x19, LSL #2\n"
+ "ld1w { z10.s }, p3/Z, [x28, #1, MUL VL]\n"
+ "ld1w { z11.s }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1w { z16.s }, p1/Z, [x28, #3, MUL VL]\n"
+ "ld1w { z12.s }, p4/Z, [x23]\n"
"zip1 z8.d, z9.d, z12.d\n"
+ "ld1w { z13.s }, p3/Z, [x23, #1, MUL VL]\n"
"zip2 z12.d, z9.d, z12.d\n"
- "ld1w { z13.s }, p3/Z, [x24, #1, MUL VL]\n"
- "ld1w { z14.s }, p2/Z, [x24, #2, MUL VL]\n"
+ "ld1w { z14.s }, p2/Z, [x23, #2, MUL VL]\n"
+ "ld1w { z15.s }, p1/Z, [x23, #3, MUL VL]\n"
"zip1 z9.d, z10.d, z13.d\n"
"zip2 z13.d, z10.d, z13.d\n"
- "ld1w { z15.s }, p1/Z, [x24, #3, MUL VL]\n"
"zip1 z10.d, z11.d, z14.d\n"
"zip2 z14.d, z11.d, z14.d\n"
"zip1 z11.d, z16.d, z15.d\n"
@@ -286,179 +286,179 @@ void sve_hybrid_s8s32_mmla_6x4VL (
"mov z14.s, #0x0\n"
"mov z15.s, #0x0\n"
"15:" // Height 2: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"16:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 17f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "cbnz x28, 18f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20\n"
- "add x25, x25, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "cbnz x27, 18f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
+ "add x24, x24, x19\n"
"b 18f\n"
"17:" // Height 2: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19\n"
"18:" // Height 2: input setup done
- "cmp x27, #0x10\n"
+ "cmp x26, #0x10\n"
"ble 20f\n"
"19:" // Height 2: Multiply loop: Main loop head
- "whilelt p0.b, XZR, x27\n"
- "ld1rqb { z1.b }, p0/Z, [x26]\n"
- "ld1rqb { z2.b }, p0/Z, [x25]\n"
+ "ld1b { z7.b }, p5/Z, [x9]\n"
+ "whilelt p0.b, XZR, x26\n"
+ "ld1b { z6.b }, p5/Z, [x9, #1, MUL VL]\n"
+ "sub x26, x26, #0x10\n"
+ "ld1rqb { z1.b }, p0/Z, [x25]\n"
+ "cmp x26, #0x10\n"
+ "ld1rqb { z2.b }, p0/Z, [x24]\n"
"trn1 z0.d, z1.d, z2.d\n"
- "ld1b { z7.b }, p5/Z, [x10]\n"
- "ld1b { z6.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "add x25, x25, #0x10\n"
+ "trn2 z1.d, z1.d, z2.d\n"
+ "add x24, x24, #0x10\n"
".inst 0x45079808 // smmla z8.s, z0.b, z7.b\n"
+ "ld1b { z7.b }, p5/Z, [x9, #2, MUL VL]\n"
".inst 0x4506980c // smmla z12.s, z0.b, z6.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #2, MUL VL]\n"
- "ld1b { z6.b }, p5/Z, [x10, #3, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #3, MUL VL]\n"
".inst 0x45079809 // smmla z9.s, z0.b, z7.b\n"
+ "ld1b { z7.b }, p5/Z, [x9, #4, MUL VL]\n"
".inst 0x4506980d // smmla z13.s, z0.b, z6.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #4, MUL VL]\n"
- "ld1b { z6.b }, p5/Z, [x10, #5, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #5, MUL VL]\n"
".inst 0x4507980a // smmla z10.s, z0.b, z7.b\n"
+ "ld1b { z7.b }, p5/Z, [x9, #6, MUL VL]\n"
".inst 0x4506980e // smmla z14.s, z0.b, z6.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #6, MUL VL]\n"
- "ld1b { z6.b }, p5/Z, [x10, #7, MUL VL]\n"
- "addvl x10, x10, #16\n"
- "trn2 z1.d, z1.d, z2.d\n"
+ "ld1b { z6.b }, p5/Z, [x9, #7, MUL VL]\n"
+ "addvl x9, x9, #16\n"
".inst 0x4507980b // smmla z11.s, z0.b, z7.b\n"
+ "ld1b { z7.b }, p5/Z, [x9, #-8, MUL VL]\n"
".inst 0x4506980f // smmla z15.s, z0.b, z6.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #-8, MUL VL]\n"
- "ld1b { z6.b }, p5/Z, [x10, #-7, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #-7, MUL VL]\n"
".inst 0x45079828 // smmla z8.s, z1.b, z7.b\n"
+ "ld1b { z7.b }, p5/Z, [x9, #-6, MUL VL]\n"
".inst 0x4506982c // smmla z12.s, z1.b, z6.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #-6, MUL VL]\n"
- "ld1b { z6.b }, p5/Z, [x10, #-5, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #-5, MUL VL]\n"
".inst 0x45079829 // smmla z9.s, z1.b, z7.b\n"
+ "ld1b { z7.b }, p5/Z, [x9, #-4, MUL VL]\n"
".inst 0x4506982d // smmla z13.s, z1.b, z6.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #-4, MUL VL]\n"
- "ld1b { z6.b }, p5/Z, [x10, #-3, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #-3, MUL VL]\n"
".inst 0x4507982a // smmla z10.s, z1.b, z7.b\n"
+ "ld1b { z7.b }, p5/Z, [x9, #-2, MUL VL]\n"
".inst 0x4506982e // smmla z14.s, z1.b, z6.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #-2, MUL VL]\n"
- "ld1b { z6.b }, p5/Z, [x10, #-1, MUL VL]\n"
- "sub x27, x27, #0x10\n"
- "cmp x27, #0x10\n"
+ "ld1b { z6.b }, p5/Z, [x9, #-1, MUL VL]\n"
".inst 0x4507982b // smmla z11.s, z1.b, z7.b\n"
".inst 0x4506982f // smmla z15.s, z1.b, z6.b\n"
- "add x26, x26, #0x10\n"
- "add x25, x25, #0x10\n"
"bgt 19b\n"
"20:" // Height 2: Multiply loop: Single iteration only
- "whilelt p0.b, XZR, x27\n"
- "ld1rqb { z1.b }, p0/Z, [x26]\n"
- "ld1rqb { z2.b }, p0/Z, [x25]\n"
+ "ld1b { z7.b }, p5/Z, [x9]\n"
+ "whilelt p0.b, XZR, x26\n"
+ "ld1b { z6.b }, p5/Z, [x9, #1, MUL VL]\n"
+ "subs x26, x26, #0x8\n"
+ "ld1rqb { z1.b }, p0/Z, [x25]\n"
+ "ld1rqb { z2.b }, p0/Z, [x24]\n"
"trn1 z0.d, z1.d, z2.d\n"
- "ld1b { z7.b }, p5/Z, [x10]\n"
- "ld1b { z6.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "trn2 z1.d, z1.d, z2.d\n"
".inst 0x45079808 // smmla z8.s, z0.b, z7.b\n"
+ "ld1b { z7.b }, p5/Z, [x9, #2, MUL VL]\n"
".inst 0x4506980c // smmla z12.s, z0.b, z6.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #2, MUL VL]\n"
- "ld1b { z6.b }, p5/Z, [x10, #3, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #3, MUL VL]\n"
".inst 0x45079809 // smmla z9.s, z0.b, z7.b\n"
+ "ld1b { z7.b }, p5/Z, [x9, #4, MUL VL]\n"
".inst 0x4506980d // smmla z13.s, z0.b, z6.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #4, MUL VL]\n"
- "ld1b { z6.b }, p5/Z, [x10, #5, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #5, MUL VL]\n"
".inst 0x4507980a // smmla z10.s, z0.b, z7.b\n"
+ "ld1b { z7.b }, p5/Z, [x9, #6, MUL VL]\n"
".inst 0x4506980e // smmla z14.s, z0.b, z6.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #6, MUL VL]\n"
- "ld1b { z6.b }, p5/Z, [x10, #7, MUL VL]\n"
- "subs x27, x27, #0x8\n"
- "trn2 z1.d, z1.d, z2.d\n"
+ "ld1b { z6.b }, p5/Z, [x9, #7, MUL VL]\n"
+ "addvl x9, x9, #8\n"
".inst 0x4507980b // smmla z11.s, z0.b, z7.b\n"
".inst 0x4506980f // smmla z15.s, z0.b, z6.b\n"
- "addvl x10, x10, #8\n"
"ble 21f\n"
- "ld1b { z7.b }, p5/Z, [x10]\n"
- "ld1b { z6.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9]\n"
".inst 0x45079828 // smmla z8.s, z1.b, z7.b\n"
+ "ld1b { z6.b }, p5/Z, [x9, #1, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #2, MUL VL]\n"
".inst 0x4506982c // smmla z12.s, z1.b, z6.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #2, MUL VL]\n"
- "ld1b { z6.b }, p5/Z, [x10, #3, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #3, MUL VL]\n"
".inst 0x45079829 // smmla z9.s, z1.b, z7.b\n"
+ "ld1b { z7.b }, p5/Z, [x9, #4, MUL VL]\n"
".inst 0x4506982d // smmla z13.s, z1.b, z6.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #4, MUL VL]\n"
- "ld1b { z6.b }, p5/Z, [x10, #5, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #5, MUL VL]\n"
".inst 0x4507982a // smmla z10.s, z1.b, z7.b\n"
+ "ld1b { z7.b }, p5/Z, [x9, #6, MUL VL]\n"
".inst 0x4506982e // smmla z14.s, z1.b, z6.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #6, MUL VL]\n"
- "ld1b { z6.b }, p5/Z, [x10, #7, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #7, MUL VL]\n"
+ "addvl x9, x9, #8\n"
".inst 0x4507982b // smmla z11.s, z1.b, z7.b\n"
".inst 0x4506982f // smmla z15.s, z1.b, z6.b\n"
- "addvl x10, x10, #8\n"
"21:" // Height 2: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 16b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
"uzp1 z7.d, z8.d, z12.d\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp2 z8.d, z8.d, z12.d\n"
+ "st1w { z7.s }, p4, [x28]\n"
"uzp1 z12.d, z9.d, z13.d\n"
+ "add x23, x28, x19, LSL #2\n"
"uzp2 z9.d, z9.d, z13.d\n"
- "st1w { z7.s }, p4, [x9]\n"
+ "st1w { z12.s }, p3, [x28, #1, MUL VL]\n"
"uzp1 z13.d, z10.d, z14.d\n"
"uzp2 z10.d, z10.d, z14.d\n"
- "st1w { z12.s }, p3, [x9, #1, MUL VL]\n"
+ "st1w { z13.s }, p2, [x28, #2, MUL VL]\n"
"uzp1 z14.d, z11.d, z15.d\n"
"uzp2 z11.d, z11.d, z15.d\n"
- "st1w { z13.s }, p2, [x9, #2, MUL VL]\n"
- "st1w { z14.s }, p1, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
- "st1w { z8.s }, p4, [x24]\n"
- "st1w { z9.s }, p3, [x24, #1, MUL VL]\n"
- "st1w { z10.s }, p2, [x24, #2, MUL VL]\n"
- "st1w { z11.s }, p1, [x24, #3, MUL VL]\n"
+ "st1w { z14.s }, p1, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "st1w { z8.s }, p4, [x23]\n"
+ "st1w { z9.s }, p3, [x23, #1, MUL VL]\n"
+ "st1w { z10.s }, p2, [x23, #2, MUL VL]\n"
+ "st1w { z11.s }, p1, [x23, #3, MUL VL]\n"
"22:" // Height 2: Writeback done
- "decw x11, ALL, MUL #4\n"
- "cmp x11, XZR\n"
+ "decw x10, ALL, MUL #4\n"
+ "cmp x10, XZR\n"
"bgt 13b\n"
"b 68f\n"
"23:" // Height 3
- "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x28, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"24:" // Height 3: Column loop
- "mov x20, #0x0\n"
- "whilelt p4.s, x20, x11\n"
- "incw x20\n"
- "whilelt p3.s, x20, x11\n"
- "incw x20\n"
- "whilelt p2.s, x20, x11\n"
- "incw x20\n"
- "whilelt p1.s, x20, x11\n"
+ "mov x19, #0x0\n"
+ "whilelt p4.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p3.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x10\n"
"tbz %x[flags], #0, 25f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "ld1w { z9.s }, p4/Z, [x9]\n"
- "ld1w { z10.s }, p3/Z, [x9, #1, MUL VL]\n"
- "ld1w { z11.s }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1w { z16.s }, p1/Z, [x9, #3, MUL VL]\n"
- "ld1w { z12.s }, p4/Z, [x24]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ld1w { z9.s }, p4/Z, [x28]\n"
+ "add x23, x28, x19, LSL #2\n"
+ "ld1w { z10.s }, p3/Z, [x28, #1, MUL VL]\n"
+ "ld1w { z11.s }, p2/Z, [x28, #2, MUL VL]\n"
+ "add x22, x23, x19, LSL #2\n"
+ "ld1w { z16.s }, p1/Z, [x28, #3, MUL VL]\n"
+ "ld1w { z12.s }, p4/Z, [x23]\n"
"zip1 z8.d, z9.d, z12.d\n"
+ "ld1w { z13.s }, p3/Z, [x23, #1, MUL VL]\n"
"zip2 z12.d, z9.d, z12.d\n"
- "ld1w { z13.s }, p3/Z, [x24, #1, MUL VL]\n"
- "ld1w { z14.s }, p2/Z, [x24, #2, MUL VL]\n"
+ "ld1w { z14.s }, p2/Z, [x23, #2, MUL VL]\n"
+ "ld1w { z15.s }, p1/Z, [x23, #3, MUL VL]\n"
"zip1 z9.d, z10.d, z13.d\n"
+ "ld1w { z17.s }, p4/Z, [x22]\n"
"zip2 z13.d, z10.d, z13.d\n"
- "ld1w { z15.s }, p1/Z, [x24, #3, MUL VL]\n"
- "ld1w { z17.s }, p4/Z, [x23]\n"
+ "ld1w { z18.s }, p3/Z, [x22, #1, MUL VL]\n"
"zip1 z10.d, z11.d, z14.d\n"
+ "ld1w { z19.s }, p2/Z, [x22, #2, MUL VL]\n"
"zip2 z14.d, z11.d, z14.d\n"
- "ld1w { z18.s }, p3/Z, [x23, #1, MUL VL]\n"
- "ld1w { z19.s }, p2/Z, [x23, #2, MUL VL]\n"
+ "ld1w { z24.s }, p1/Z, [x22, #3, MUL VL]\n"
"zip1 z11.d, z16.d, z15.d\n"
"zip2 z15.d, z16.d, z15.d\n"
- "ld1w { z24.s }, p1/Z, [x23, #3, MUL VL]\n"
"zip1 z16.d, z17.d, z20.d\n"
"zip2 z20.d, z17.d, z20.d\n"
"zip1 z17.d, z18.d, z21.d\n"
@@ -486,239 +486,239 @@ void sve_hybrid_s8s32_mmla_6x4VL (
"mov z22.s, #0x0\n"
"mov z23.s, #0x0\n"
"26:" // Height 3: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"27:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 28f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "cbnz x28, 29f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20\n"
- "add x25, x25, x20\n"
- "add x24, x24, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "cbnz x27, 29f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
+ "add x24, x24, x19\n"
+ "add x23, x23, x19\n"
"b 29f\n"
"28:" // Height 3: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20\n"
- "add x24, x25, x20\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19\n"
+ "add x23, x24, x19\n"
"29:" // Height 3: input setup done
- "cmp x27, #0x10\n"
+ "cmp x26, #0x10\n"
"ble 31f\n"
"30:" // Height 3: Multiply loop: Main loop head
- "whilelt p0.b, XZR, x27\n"
- "ld1rqb { z1.b }, p0/Z, [x26]\n"
- "ld1rqb { z2.b }, p0/Z, [x25]\n"
- "ld1rqb { z3.b }, p0/Z, [x24]\n"
+ "ld1b { z7.b }, p5/Z, [x9]\n"
+ "whilelt p0.b, XZR, x26\n"
+ "ld1b { z6.b }, p5/Z, [x9, #1, MUL VL]\n"
+ "ld1rqb { z1.b }, p0/Z, [x25]\n"
+ "sub x26, x26, #0x10\n"
+ "ld1rqb { z2.b }, p0/Z, [x24]\n"
"trn1 z0.d, z1.d, z2.d\n"
+ "ld1rqb { z3.b }, p0/Z, [x23]\n"
+ "cmp x26, #0x10\n"
"trn2 z1.d, z1.d, z2.d\n"
- "ld1b { z7.b }, p5/Z, [x10]\n"
- "trn1 z2.d, z3.d, z4.d\n"
- "ld1b { z6.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
".inst 0x45079808 // smmla z8.s, z0.b, z7.b\n"
- ".inst 0x45079850 // smmla z16.s, z2.b, z7.b\n"
".inst 0x4506980c // smmla z12.s, z0.b, z6.b\n"
+ "add x23, x23, #0x10\n"
+ "trn1 z2.d, z3.d, z4.d\n"
+ "trn2 z3.d, z3.d, z4.d\n"
+ ".inst 0x45079850 // smmla z16.s, z2.b, z7.b\n"
+ "ld1b { z7.b }, p5/Z, [x9, #2, MUL VL]\n"
".inst 0x45069854 // smmla z20.s, z2.b, z6.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #2, MUL VL]\n"
- "ld1b { z6.b }, p5/Z, [x10, #3, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #3, MUL VL]\n"
".inst 0x45079809 // smmla z9.s, z0.b, z7.b\n"
".inst 0x45079851 // smmla z17.s, z2.b, z7.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #4, MUL VL]\n"
- "trn2 z3.d, z3.d, z4.d\n"
+ "ld1b { z7.b }, p5/Z, [x9, #4, MUL VL]\n"
".inst 0x4506980d // smmla z13.s, z0.b, z6.b\n"
".inst 0x45069855 // smmla z21.s, z2.b, z6.b\n"
- "ld1b { z6.b }, p5/Z, [x10, #5, MUL VL]\n"
- "sub x27, x27, #0x10\n"
+ "ld1b { z6.b }, p5/Z, [x9, #5, MUL VL]\n"
".inst 0x4507980a // smmla z10.s, z0.b, z7.b\n"
".inst 0x45079852 // smmla z18.s, z2.b, z7.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #6, MUL VL]\n"
- "cmp x27, #0x10\n"
+ "ld1b { z7.b }, p5/Z, [x9, #6, MUL VL]\n"
".inst 0x4506980e // smmla z14.s, z0.b, z6.b\n"
".inst 0x45069856 // smmla z22.s, z2.b, z6.b\n"
- "ld1b { z6.b }, p5/Z, [x10, #7, MUL VL]\n"
- "addvl x10, x10, #16\n"
+ "ld1b { z6.b }, p5/Z, [x9, #7, MUL VL]\n"
+ "addvl x9, x9, #16\n"
".inst 0x4507980b // smmla z11.s, z0.b, z7.b\n"
".inst 0x45079853 // smmla z19.s, z2.b, z7.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #-8, MUL VL]\n"
- "add x26, x26, #0x10\n"
+ "ld1b { z7.b }, p5/Z, [x9, #-8, MUL VL]\n"
".inst 0x4506980f // smmla z15.s, z0.b, z6.b\n"
".inst 0x45069857 // smmla z23.s, z2.b, z6.b\n"
- "ld1b { z6.b }, p5/Z, [x10, #-7, MUL VL]\n"
- "add x25, x25, #0x10\n"
+ "ld1b { z6.b }, p5/Z, [x9, #-7, MUL VL]\n"
".inst 0x45079828 // smmla z8.s, z1.b, z7.b\n"
".inst 0x45079870 // smmla z16.s, z3.b, z7.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #-6, MUL VL]\n"
- "add x24, x24, #0x10\n"
+ "ld1b { z7.b }, p5/Z, [x9, #-6, MUL VL]\n"
".inst 0x4506982c // smmla z12.s, z1.b, z6.b\n"
".inst 0x45069874 // smmla z20.s, z3.b, z6.b\n"
- "ld1b { z6.b }, p5/Z, [x10, #-5, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #-5, MUL VL]\n"
".inst 0x45079829 // smmla z9.s, z1.b, z7.b\n"
".inst 0x45079871 // smmla z17.s, z3.b, z7.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #-4, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #-4, MUL VL]\n"
".inst 0x4506982d // smmla z13.s, z1.b, z6.b\n"
".inst 0x45069875 // smmla z21.s, z3.b, z6.b\n"
- "ld1b { z6.b }, p5/Z, [x10, #-3, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #-3, MUL VL]\n"
".inst 0x4507982a // smmla z10.s, z1.b, z7.b\n"
".inst 0x45079872 // smmla z18.s, z3.b, z7.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #-2, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #-2, MUL VL]\n"
".inst 0x4506982e // smmla z14.s, z1.b, z6.b\n"
".inst 0x45069876 // smmla z22.s, z3.b, z6.b\n"
- "ld1b { z6.b }, p5/Z, [x10, #-1, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #-1, MUL VL]\n"
".inst 0x4507982b // smmla z11.s, z1.b, z7.b\n"
".inst 0x45079873 // smmla z19.s, z3.b, z7.b\n"
".inst 0x4506982f // smmla z15.s, z1.b, z6.b\n"
".inst 0x45069877 // smmla z23.s, z3.b, z6.b\n"
"bgt 30b\n"
"31:" // Height 3: Multiply loop: Single iteration only
- "whilelt p0.b, XZR, x27\n"
- "ld1rqb { z1.b }, p0/Z, [x26]\n"
- "ld1rqb { z2.b }, p0/Z, [x25]\n"
- "ld1rqb { z3.b }, p0/Z, [x24]\n"
+ "ld1b { z7.b }, p5/Z, [x9]\n"
+ "whilelt p0.b, XZR, x26\n"
+ "ld1b { z6.b }, p5/Z, [x9, #1, MUL VL]\n"
+ "ld1rqb { z1.b }, p0/Z, [x25]\n"
+ "subs x26, x26, #0x8\n"
+ "ld1rqb { z2.b }, p0/Z, [x24]\n"
"trn1 z0.d, z1.d, z2.d\n"
+ "ld1rqb { z3.b }, p0/Z, [x23]\n"
"trn2 z1.d, z1.d, z2.d\n"
- "ld1b { z7.b }, p5/Z, [x10]\n"
- "trn1 z2.d, z3.d, z4.d\n"
- "ld1b { z6.b }, p5/Z, [x10, #1, MUL VL]\n"
".inst 0x45079808 // smmla z8.s, z0.b, z7.b\n"
- ".inst 0x45079850 // smmla z16.s, z2.b, z7.b\n"
".inst 0x4506980c // smmla z12.s, z0.b, z6.b\n"
+ "trn1 z2.d, z3.d, z4.d\n"
+ "trn2 z3.d, z3.d, z4.d\n"
+ ".inst 0x45079850 // smmla z16.s, z2.b, z7.b\n"
+ "ld1b { z7.b }, p5/Z, [x9, #2, MUL VL]\n"
".inst 0x45069854 // smmla z20.s, z2.b, z6.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #2, MUL VL]\n"
- "ld1b { z6.b }, p5/Z, [x10, #3, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #3, MUL VL]\n"
".inst 0x45079809 // smmla z9.s, z0.b, z7.b\n"
".inst 0x45079851 // smmla z17.s, z2.b, z7.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #4, MUL VL]\n"
- "subs x27, x27, #0x8\n"
+ "ld1b { z7.b }, p5/Z, [x9, #4, MUL VL]\n"
".inst 0x4506980d // smmla z13.s, z0.b, z6.b\n"
".inst 0x45069855 // smmla z21.s, z2.b, z6.b\n"
- "ld1b { z6.b }, p5/Z, [x10, #5, MUL VL]\n"
- "trn2 z3.d, z3.d, z4.d\n"
+ "ld1b { z6.b }, p5/Z, [x9, #5, MUL VL]\n"
".inst 0x4507980a // smmla z10.s, z0.b, z7.b\n"
".inst 0x45079852 // smmla z18.s, z2.b, z7.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #6, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #6, MUL VL]\n"
".inst 0x4506980e // smmla z14.s, z0.b, z6.b\n"
".inst 0x45069856 // smmla z22.s, z2.b, z6.b\n"
- "ld1b { z6.b }, p5/Z, [x10, #7, MUL VL]\n"
- "addvl x10, x10, #8\n"
+ "ld1b { z6.b }, p5/Z, [x9, #7, MUL VL]\n"
+ "addvl x9, x9, #8\n"
".inst 0x4507980b // smmla z11.s, z0.b, z7.b\n"
".inst 0x45079853 // smmla z19.s, z2.b, z7.b\n"
".inst 0x4506980f // smmla z15.s, z0.b, z6.b\n"
".inst 0x45069857 // smmla z23.s, z2.b, z6.b\n"
"ble 32f\n"
- "ld1b { z7.b }, p5/Z, [x10]\n"
- "ld1b { z6.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9]\n"
".inst 0x45079828 // smmla z8.s, z1.b, z7.b\n"
+ "ld1b { z6.b }, p5/Z, [x9, #1, MUL VL]\n"
".inst 0x45079870 // smmla z16.s, z3.b, z7.b\n"
+ "ld1b { z7.b }, p5/Z, [x9, #2, MUL VL]\n"
".inst 0x4506982c // smmla z12.s, z1.b, z6.b\n"
".inst 0x45069874 // smmla z20.s, z3.b, z6.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #2, MUL VL]\n"
- "ld1b { z6.b }, p5/Z, [x10, #3, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #3, MUL VL]\n"
".inst 0x45079829 // smmla z9.s, z1.b, z7.b\n"
".inst 0x45079871 // smmla z17.s, z3.b, z7.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #4, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #4, MUL VL]\n"
".inst 0x4506982d // smmla z13.s, z1.b, z6.b\n"
".inst 0x45069875 // smmla z21.s, z3.b, z6.b\n"
- "ld1b { z6.b }, p5/Z, [x10, #5, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #5, MUL VL]\n"
".inst 0x4507982a // smmla z10.s, z1.b, z7.b\n"
".inst 0x45079872 // smmla z18.s, z3.b, z7.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #6, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #6, MUL VL]\n"
".inst 0x4506982e // smmla z14.s, z1.b, z6.b\n"
".inst 0x45069876 // smmla z22.s, z3.b, z6.b\n"
- "ld1b { z6.b }, p5/Z, [x10, #7, MUL VL]\n"
- "addvl x10, x10, #8\n"
+ "ld1b { z6.b }, p5/Z, [x9, #7, MUL VL]\n"
+ "addvl x9, x9, #8\n"
".inst 0x4507982b // smmla z11.s, z1.b, z7.b\n"
".inst 0x45079873 // smmla z19.s, z3.b, z7.b\n"
".inst 0x4506982f // smmla z15.s, z1.b, z6.b\n"
".inst 0x45069877 // smmla z23.s, z3.b, z6.b\n"
"32:" // Height 3: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 27b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
"uzp1 z7.d, z8.d, z12.d\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp2 z8.d, z8.d, z12.d\n"
+ "st1w { z7.s }, p4, [x28]\n"
"uzp1 z12.d, z9.d, z13.d\n"
- "st1w { z7.s }, p4, [x9]\n"
+ "add x23, x28, x19, LSL #2\n"
"uzp2 z9.d, z9.d, z13.d\n"
+ "st1w { z12.s }, p3, [x28, #1, MUL VL]\n"
"uzp1 z13.d, z10.d, z14.d\n"
- "st1w { z12.s }, p3, [x9, #1, MUL VL]\n"
+ "add x22, x23, x19, LSL #2\n"
"uzp2 z10.d, z10.d, z14.d\n"
+ "st1w { z13.s }, p2, [x28, #2, MUL VL]\n"
"uzp1 z14.d, z11.d, z15.d\n"
- "st1w { z13.s }, p2, [x9, #2, MUL VL]\n"
"uzp2 z11.d, z11.d, z15.d\n"
+ "st1w { z14.s }, p1, [x28, #3, MUL VL]\n"
"uzp1 z16.d, z16.d, z20.d\n"
- "st1w { z14.s }, p1, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
+ "addvl x28, x28, #4\n"
"uzp1 z17.d, z17.d, z21.d\n"
+ "st1w { z8.s }, p4, [x23]\n"
"uzp1 z18.d, z18.d, z22.d\n"
- "st1w { z8.s }, p4, [x24]\n"
+ "st1w { z9.s }, p3, [x23, #1, MUL VL]\n"
"uzp1 z19.d, z19.d, z23.d\n"
- "st1w { z9.s }, p3, [x24, #1, MUL VL]\n"
- "st1w { z10.s }, p2, [x24, #2, MUL VL]\n"
- "st1w { z11.s }, p1, [x24, #3, MUL VL]\n"
- "st1w { z16.s }, p4, [x23]\n"
- "st1w { z17.s }, p3, [x23, #1, MUL VL]\n"
- "st1w { z18.s }, p2, [x23, #2, MUL VL]\n"
- "st1w { z19.s }, p1, [x23, #3, MUL VL]\n"
+ "st1w { z10.s }, p2, [x23, #2, MUL VL]\n"
+ "st1w { z11.s }, p1, [x23, #3, MUL VL]\n"
+ "st1w { z16.s }, p4, [x22]\n"
+ "st1w { z17.s }, p3, [x22, #1, MUL VL]\n"
+ "st1w { z18.s }, p2, [x22, #2, MUL VL]\n"
+ "st1w { z19.s }, p1, [x22, #3, MUL VL]\n"
"33:" // Height 3: Writeback done
- "decw x11, ALL, MUL #4\n"
- "cmp x11, XZR\n"
+ "decw x10, ALL, MUL #4\n"
+ "cmp x10, XZR\n"
"bgt 24b\n"
"b 68f\n"
"34:" // Height 4
- "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x28, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"35:" // Height 4: Column loop
- "mov x20, #0x0\n"
- "whilelt p4.s, x20, x11\n"
- "incw x20\n"
- "whilelt p3.s, x20, x11\n"
- "incw x20\n"
- "whilelt p2.s, x20, x11\n"
- "incw x20\n"
- "whilelt p1.s, x20, x11\n"
+ "mov x19, #0x0\n"
+ "whilelt p4.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p3.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x10\n"
"tbz %x[flags], #0, 36f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "ld1w { z9.s }, p4/Z, [x9]\n"
- "add x22, x23, x20, LSL #2\n"
- "ld1w { z10.s }, p3/Z, [x9, #1, MUL VL]\n"
- "ld1w { z11.s }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1w { z16.s }, p1/Z, [x9, #3, MUL VL]\n"
- "ld1w { z12.s }, p4/Z, [x24]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ld1w { z9.s }, p4/Z, [x28]\n"
+ "add x23, x28, x19, LSL #2\n"
+ "ld1w { z10.s }, p3/Z, [x28, #1, MUL VL]\n"
+ "ld1w { z11.s }, p2/Z, [x28, #2, MUL VL]\n"
+ "add x22, x23, x19, LSL #2\n"
+ "ld1w { z16.s }, p1/Z, [x28, #3, MUL VL]\n"
+ "add x21, x22, x19, LSL #2\n"
+ "ld1w { z12.s }, p4/Z, [x23]\n"
"zip1 z8.d, z9.d, z12.d\n"
+ "ld1w { z13.s }, p3/Z, [x23, #1, MUL VL]\n"
"zip2 z12.d, z9.d, z12.d\n"
- "ld1w { z13.s }, p3/Z, [x24, #1, MUL VL]\n"
- "ld1w { z14.s }, p2/Z, [x24, #2, MUL VL]\n"
+ "ld1w { z14.s }, p2/Z, [x23, #2, MUL VL]\n"
+ "ld1w { z15.s }, p1/Z, [x23, #3, MUL VL]\n"
"zip1 z9.d, z10.d, z13.d\n"
+ "ld1w { z17.s }, p4/Z, [x22]\n"
"zip2 z13.d, z10.d, z13.d\n"
- "ld1w { z15.s }, p1/Z, [x24, #3, MUL VL]\n"
- "ld1w { z17.s }, p4/Z, [x23]\n"
+ "ld1w { z18.s }, p3/Z, [x22, #1, MUL VL]\n"
"zip1 z10.d, z11.d, z14.d\n"
+ "ld1w { z19.s }, p2/Z, [x22, #2, MUL VL]\n"
"zip2 z14.d, z11.d, z14.d\n"
- "ld1w { z18.s }, p3/Z, [x23, #1, MUL VL]\n"
- "ld1w { z19.s }, p2/Z, [x23, #2, MUL VL]\n"
+ "ld1w { z24.s }, p1/Z, [x22, #3, MUL VL]\n"
"zip1 z11.d, z16.d, z15.d\n"
+ "ld1w { z20.s }, p4/Z, [x21]\n"
"zip2 z15.d, z16.d, z15.d\n"
- "ld1w { z24.s }, p1/Z, [x23, #3, MUL VL]\n"
- "ld1w { z20.s }, p4/Z, [x22]\n"
+ "ld1w { z21.s }, p3/Z, [x21, #1, MUL VL]\n"
+ "ld1w { z22.s }, p2/Z, [x21, #2, MUL VL]\n"
"zip1 z16.d, z17.d, z20.d\n"
+ "ld1w { z23.s }, p1/Z, [x21, #3, MUL VL]\n"
"zip2 z20.d, z17.d, z20.d\n"
- "ld1w { z21.s }, p3/Z, [x22, #1, MUL VL]\n"
- "ld1w { z22.s }, p2/Z, [x22, #2, MUL VL]\n"
"zip1 z17.d, z18.d, z21.d\n"
"zip2 z21.d, z18.d, z21.d\n"
- "ld1w { z23.s }, p1/Z, [x22, #3, MUL VL]\n"
"zip1 z18.d, z19.d, z22.d\n"
"zip2 z22.d, z19.d, z22.d\n"
"zip1 z19.d, z24.d, z23.d\n"
@@ -742,263 +742,263 @@ void sve_hybrid_s8s32_mmla_6x4VL (
"mov z22.s, #0x0\n"
"mov z23.s, #0x0\n"
"37:" // Height 4: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"38:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 39f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "cbnz x28, 40f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20\n"
- "add x25, x25, x20\n"
- "add x24, x24, x20\n"
- "add x23, x23, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "cbnz x27, 40f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
+ "add x24, x24, x19\n"
+ "add x23, x23, x19\n"
+ "add x22, x22, x19\n"
"b 40f\n"
"39:" // Height 4: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20\n"
- "add x24, x25, x20\n"
- "add x23, x24, x20\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19\n"
+ "add x23, x24, x19\n"
+ "add x22, x23, x19\n"
"40:" // Height 4: input setup done
- "cmp x27, #0x10\n"
+ "cmp x26, #0x10\n"
"ble 42f\n"
"41:" // Height 4: Multiply loop: Main loop head
- "whilelt p0.b, XZR, x27\n"
- "ld1rqb { z1.b }, p0/Z, [x26]\n"
- "ld1rqb { z2.b }, p0/Z, [x25]\n"
+ "ld1b { z7.b }, p5/Z, [x9]\n"
+ "whilelt p0.b, XZR, x26\n"
+ "ld1b { z6.b }, p5/Z, [x9, #1, MUL VL]\n"
+ "sub x26, x26, #0x10\n"
+ "ld1rqb { z1.b }, p0/Z, [x25]\n"
+ "cmp x26, #0x10\n"
+ "ld1rqb { z2.b }, p0/Z, [x24]\n"
"trn1 z0.d, z1.d, z2.d\n"
- "ld1rqb { z3.b }, p0/Z, [x24]\n"
- "ld1rqb { z4.b }, p0/Z, [x23]\n"
+ "ld1rqb { z3.b }, p0/Z, [x23]\n"
+ "add x25, x25, #0x10\n"
"trn2 z1.d, z1.d, z2.d\n"
- "trn1 z2.d, z3.d, z4.d\n"
- "ld1b { z7.b }, p5/Z, [x10]\n"
- "ld1b { z6.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1rqb { z4.b }, p0/Z, [x22]\n"
+ "add x24, x24, #0x10\n"
".inst 0x45079808 // smmla z8.s, z0.b, z7.b\n"
- ".inst 0x45079850 // smmla z16.s, z2.b, z7.b\n"
+ "add x23, x23, #0x10\n"
".inst 0x4506980c // smmla z12.s, z0.b, z6.b\n"
+ "add x22, x22, #0x10\n"
+ "trn1 z2.d, z3.d, z4.d\n"
+ "trn2 z3.d, z3.d, z4.d\n"
+ ".inst 0x45079850 // smmla z16.s, z2.b, z7.b\n"
+ "ld1b { z7.b }, p5/Z, [x9, #2, MUL VL]\n"
".inst 0x45069854 // smmla z20.s, z2.b, z6.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #2, MUL VL]\n"
- "ld1b { z6.b }, p5/Z, [x10, #3, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #3, MUL VL]\n"
".inst 0x45079809 // smmla z9.s, z0.b, z7.b\n"
".inst 0x45079851 // smmla z17.s, z2.b, z7.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #4, MUL VL]\n"
- "trn2 z3.d, z3.d, z4.d\n"
+ "ld1b { z7.b }, p5/Z, [x9, #4, MUL VL]\n"
".inst 0x4506980d // smmla z13.s, z0.b, z6.b\n"
".inst 0x45069855 // smmla z21.s, z2.b, z6.b\n"
- "ld1b { z6.b }, p5/Z, [x10, #5, MUL VL]\n"
- "sub x27, x27, #0x10\n"
+ "ld1b { z6.b }, p5/Z, [x9, #5, MUL VL]\n"
".inst 0x4507980a // smmla z10.s, z0.b, z7.b\n"
".inst 0x45079852 // smmla z18.s, z2.b, z7.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #6, MUL VL]\n"
- "cmp x27, #0x10\n"
+ "ld1b { z7.b }, p5/Z, [x9, #6, MUL VL]\n"
".inst 0x4506980e // smmla z14.s, z0.b, z6.b\n"
".inst 0x45069856 // smmla z22.s, z2.b, z6.b\n"
- "ld1b { z6.b }, p5/Z, [x10, #7, MUL VL]\n"
- "addvl x10, x10, #16\n"
+ "ld1b { z6.b }, p5/Z, [x9, #7, MUL VL]\n"
+ "addvl x9, x9, #16\n"
".inst 0x4507980b // smmla z11.s, z0.b, z7.b\n"
".inst 0x45079853 // smmla z19.s, z2.b, z7.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #-8, MUL VL]\n"
- "add x26, x26, #0x10\n"
+ "ld1b { z7.b }, p5/Z, [x9, #-8, MUL VL]\n"
".inst 0x4506980f // smmla z15.s, z0.b, z6.b\n"
".inst 0x45069857 // smmla z23.s, z2.b, z6.b\n"
- "ld1b { z6.b }, p5/Z, [x10, #-7, MUL VL]\n"
- "add x25, x25, #0x10\n"
+ "ld1b { z6.b }, p5/Z, [x9, #-7, MUL VL]\n"
".inst 0x45079828 // smmla z8.s, z1.b, z7.b\n"
".inst 0x45079870 // smmla z16.s, z3.b, z7.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #-6, MUL VL]\n"
- "add x24, x24, #0x10\n"
+ "ld1b { z7.b }, p5/Z, [x9, #-6, MUL VL]\n"
".inst 0x4506982c // smmla z12.s, z1.b, z6.b\n"
".inst 0x45069874 // smmla z20.s, z3.b, z6.b\n"
- "ld1b { z6.b }, p5/Z, [x10, #-5, MUL VL]\n"
- "add x23, x23, #0x10\n"
+ "ld1b { z6.b }, p5/Z, [x9, #-5, MUL VL]\n"
".inst 0x45079829 // smmla z9.s, z1.b, z7.b\n"
".inst 0x45079871 // smmla z17.s, z3.b, z7.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #-4, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #-4, MUL VL]\n"
".inst 0x4506982d // smmla z13.s, z1.b, z6.b\n"
".inst 0x45069875 // smmla z21.s, z3.b, z6.b\n"
- "ld1b { z6.b }, p5/Z, [x10, #-3, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #-3, MUL VL]\n"
".inst 0x4507982a // smmla z10.s, z1.b, z7.b\n"
".inst 0x45079872 // smmla z18.s, z3.b, z7.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #-2, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #-2, MUL VL]\n"
".inst 0x4506982e // smmla z14.s, z1.b, z6.b\n"
".inst 0x45069876 // smmla z22.s, z3.b, z6.b\n"
- "ld1b { z6.b }, p5/Z, [x10, #-1, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #-1, MUL VL]\n"
".inst 0x4507982b // smmla z11.s, z1.b, z7.b\n"
".inst 0x45079873 // smmla z19.s, z3.b, z7.b\n"
".inst 0x4506982f // smmla z15.s, z1.b, z6.b\n"
".inst 0x45069877 // smmla z23.s, z3.b, z6.b\n"
"bgt 41b\n"
"42:" // Height 4: Multiply loop: Single iteration only
- "whilelt p0.b, XZR, x27\n"
- "ld1rqb { z1.b }, p0/Z, [x26]\n"
- "ld1rqb { z2.b }, p0/Z, [x25]\n"
+ "ld1b { z7.b }, p5/Z, [x9]\n"
+ "whilelt p0.b, XZR, x26\n"
+ "ld1b { z6.b }, p5/Z, [x9, #1, MUL VL]\n"
+ "subs x26, x26, #0x8\n"
+ "ld1rqb { z1.b }, p0/Z, [x25]\n"
+ "ld1rqb { z2.b }, p0/Z, [x24]\n"
"trn1 z0.d, z1.d, z2.d\n"
- "ld1rqb { z3.b }, p0/Z, [x24]\n"
- "ld1rqb { z4.b }, p0/Z, [x23]\n"
+ "ld1rqb { z3.b }, p0/Z, [x23]\n"
"trn2 z1.d, z1.d, z2.d\n"
- "trn1 z2.d, z3.d, z4.d\n"
- "ld1b { z7.b }, p5/Z, [x10]\n"
- "ld1b { z6.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1rqb { z4.b }, p0/Z, [x22]\n"
".inst 0x45079808 // smmla z8.s, z0.b, z7.b\n"
- ".inst 0x45079850 // smmla z16.s, z2.b, z7.b\n"
".inst 0x4506980c // smmla z12.s, z0.b, z6.b\n"
+ "trn1 z2.d, z3.d, z4.d\n"
+ "trn2 z3.d, z3.d, z4.d\n"
+ ".inst 0x45079850 // smmla z16.s, z2.b, z7.b\n"
+ "ld1b { z7.b }, p5/Z, [x9, #2, MUL VL]\n"
".inst 0x45069854 // smmla z20.s, z2.b, z6.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #2, MUL VL]\n"
- "ld1b { z6.b }, p5/Z, [x10, #3, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #3, MUL VL]\n"
".inst 0x45079809 // smmla z9.s, z0.b, z7.b\n"
".inst 0x45079851 // smmla z17.s, z2.b, z7.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #4, MUL VL]\n"
- "subs x27, x27, #0x8\n"
+ "ld1b { z7.b }, p5/Z, [x9, #4, MUL VL]\n"
".inst 0x4506980d // smmla z13.s, z0.b, z6.b\n"
".inst 0x45069855 // smmla z21.s, z2.b, z6.b\n"
- "ld1b { z6.b }, p5/Z, [x10, #5, MUL VL]\n"
- "trn2 z3.d, z3.d, z4.d\n"
+ "ld1b { z6.b }, p5/Z, [x9, #5, MUL VL]\n"
".inst 0x4507980a // smmla z10.s, z0.b, z7.b\n"
".inst 0x45079852 // smmla z18.s, z2.b, z7.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #6, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #6, MUL VL]\n"
".inst 0x4506980e // smmla z14.s, z0.b, z6.b\n"
".inst 0x45069856 // smmla z22.s, z2.b, z6.b\n"
- "ld1b { z6.b }, p5/Z, [x10, #7, MUL VL]\n"
- "addvl x10, x10, #8\n"
+ "ld1b { z6.b }, p5/Z, [x9, #7, MUL VL]\n"
+ "addvl x9, x9, #8\n"
".inst 0x4507980b // smmla z11.s, z0.b, z7.b\n"
".inst 0x45079853 // smmla z19.s, z2.b, z7.b\n"
".inst 0x4506980f // smmla z15.s, z0.b, z6.b\n"
".inst 0x45069857 // smmla z23.s, z2.b, z6.b\n"
"ble 43f\n"
- "ld1b { z7.b }, p5/Z, [x10]\n"
- "ld1b { z6.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9]\n"
".inst 0x45079828 // smmla z8.s, z1.b, z7.b\n"
+ "ld1b { z6.b }, p5/Z, [x9, #1, MUL VL]\n"
".inst 0x45079870 // smmla z16.s, z3.b, z7.b\n"
+ "ld1b { z7.b }, p5/Z, [x9, #2, MUL VL]\n"
".inst 0x4506982c // smmla z12.s, z1.b, z6.b\n"
".inst 0x45069874 // smmla z20.s, z3.b, z6.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #2, MUL VL]\n"
- "ld1b { z6.b }, p5/Z, [x10, #3, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #3, MUL VL]\n"
".inst 0x45079829 // smmla z9.s, z1.b, z7.b\n"
".inst 0x45079871 // smmla z17.s, z3.b, z7.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #4, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #4, MUL VL]\n"
".inst 0x4506982d // smmla z13.s, z1.b, z6.b\n"
".inst 0x45069875 // smmla z21.s, z3.b, z6.b\n"
- "ld1b { z6.b }, p5/Z, [x10, #5, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #5, MUL VL]\n"
".inst 0x4507982a // smmla z10.s, z1.b, z7.b\n"
".inst 0x45079872 // smmla z18.s, z3.b, z7.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #6, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #6, MUL VL]\n"
".inst 0x4506982e // smmla z14.s, z1.b, z6.b\n"
".inst 0x45069876 // smmla z22.s, z3.b, z6.b\n"
- "ld1b { z6.b }, p5/Z, [x10, #7, MUL VL]\n"
- "addvl x10, x10, #8\n"
+ "ld1b { z6.b }, p5/Z, [x9, #7, MUL VL]\n"
+ "addvl x9, x9, #8\n"
".inst 0x4507982b // smmla z11.s, z1.b, z7.b\n"
".inst 0x45079873 // smmla z19.s, z3.b, z7.b\n"
".inst 0x4506982f // smmla z15.s, z1.b, z6.b\n"
".inst 0x45069877 // smmla z23.s, z3.b, z6.b\n"
"43:" // Height 4: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 38b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
"uzp1 z7.d, z8.d, z12.d\n"
- "add x22, x23, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp2 z8.d, z8.d, z12.d\n"
+ "st1w { z7.s }, p4, [x28]\n"
"uzp1 z12.d, z9.d, z13.d\n"
- "st1w { z7.s }, p4, [x9]\n"
+ "add x23, x28, x19, LSL #2\n"
"uzp2 z9.d, z9.d, z13.d\n"
+ "st1w { z12.s }, p3, [x28, #1, MUL VL]\n"
"uzp1 z13.d, z10.d, z14.d\n"
- "st1w { z12.s }, p3, [x9, #1, MUL VL]\n"
+ "add x22, x23, x19, LSL #2\n"
"uzp2 z10.d, z10.d, z14.d\n"
+ "st1w { z13.s }, p2, [x28, #2, MUL VL]\n"
"uzp1 z14.d, z11.d, z15.d\n"
- "st1w { z13.s }, p2, [x9, #2, MUL VL]\n"
+ "add x21, x22, x19, LSL #2\n"
"uzp2 z11.d, z11.d, z15.d\n"
+ "st1w { z14.s }, p1, [x28, #3, MUL VL]\n"
"uzp1 z15.d, z16.d, z20.d\n"
- "st1w { z14.s }, p1, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
+ "addvl x28, x28, #4\n"
"uzp2 z16.d, z16.d, z20.d\n"
+ "st1w { z8.s }, p4, [x23]\n"
"uzp1 z20.d, z17.d, z21.d\n"
- "st1w { z8.s }, p4, [x24]\n"
+ "st1w { z9.s }, p3, [x23, #1, MUL VL]\n"
"uzp2 z17.d, z17.d, z21.d\n"
+ "st1w { z10.s }, p2, [x23, #2, MUL VL]\n"
"uzp1 z21.d, z18.d, z22.d\n"
- "st1w { z9.s }, p3, [x24, #1, MUL VL]\n"
+ "st1w { z11.s }, p1, [x23, #3, MUL VL]\n"
"uzp2 z18.d, z18.d, z22.d\n"
+ "st1w { z15.s }, p4, [x22]\n"
"uzp1 z22.d, z19.d, z23.d\n"
- "st1w { z10.s }, p2, [x24, #2, MUL VL]\n"
+ "st1w { z20.s }, p3, [x22, #1, MUL VL]\n"
"uzp2 z19.d, z19.d, z23.d\n"
- "st1w { z11.s }, p1, [x24, #3, MUL VL]\n"
- "st1w { z15.s }, p4, [x23]\n"
- "st1w { z20.s }, p3, [x23, #1, MUL VL]\n"
- "st1w { z21.s }, p2, [x23, #2, MUL VL]\n"
- "st1w { z22.s }, p1, [x23, #3, MUL VL]\n"
- "st1w { z16.s }, p4, [x22]\n"
- "st1w { z17.s }, p3, [x22, #1, MUL VL]\n"
- "st1w { z18.s }, p2, [x22, #2, MUL VL]\n"
- "st1w { z19.s }, p1, [x22, #3, MUL VL]\n"
+ "st1w { z21.s }, p2, [x22, #2, MUL VL]\n"
+ "st1w { z22.s }, p1, [x22, #3, MUL VL]\n"
+ "st1w { z16.s }, p4, [x21]\n"
+ "st1w { z17.s }, p3, [x21, #1, MUL VL]\n"
+ "st1w { z18.s }, p2, [x21, #2, MUL VL]\n"
+ "st1w { z19.s }, p1, [x21, #3, MUL VL]\n"
"44:" // Height 4: Writeback done
- "decw x11, ALL, MUL #4\n"
- "cmp x11, XZR\n"
+ "decw x10, ALL, MUL #4\n"
+ "cmp x10, XZR\n"
"bgt 35b\n"
"b 68f\n"
"45:" // Height 5
- "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x28, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"46:" // Height 5: Column loop
- "mov x20, #0x0\n"
- "whilelt p4.s, x20, x11\n"
- "incw x20\n"
- "whilelt p3.s, x20, x11\n"
- "incw x20\n"
- "whilelt p2.s, x20, x11\n"
- "incw x20\n"
- "whilelt p1.s, x20, x11\n"
+ "mov x19, #0x0\n"
+ "whilelt p4.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p3.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x10\n"
"tbz %x[flags], #0, 47f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "ld1w { z9.s }, p4/Z, [x9]\n"
- "add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
- "ld1w { z10.s }, p3/Z, [x9, #1, MUL VL]\n"
- "ld1w { z11.s }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1w { z16.s }, p1/Z, [x9, #3, MUL VL]\n"
- "ld1w { z12.s }, p4/Z, [x24]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ld1w { z9.s }, p4/Z, [x28]\n"
+ "add x23, x28, x19, LSL #2\n"
+ "ld1w { z10.s }, p3/Z, [x28, #1, MUL VL]\n"
+ "ld1w { z11.s }, p2/Z, [x28, #2, MUL VL]\n"
+ "add x22, x23, x19, LSL #2\n"
+ "ld1w { z16.s }, p1/Z, [x28, #3, MUL VL]\n"
+ "add x21, x22, x19, LSL #2\n"
+ "ld1w { z12.s }, p4/Z, [x23]\n"
"zip1 z8.d, z9.d, z12.d\n"
+ "ld1w { z13.s }, p3/Z, [x23, #1, MUL VL]\n"
+ "add x20, x21, x19, LSL #2\n"
"zip2 z12.d, z9.d, z12.d\n"
- "ld1w { z13.s }, p3/Z, [x24, #1, MUL VL]\n"
- "ld1w { z14.s }, p2/Z, [x24, #2, MUL VL]\n"
+ "ld1w { z14.s }, p2/Z, [x23, #2, MUL VL]\n"
+ "ld1w { z15.s }, p1/Z, [x23, #3, MUL VL]\n"
"zip1 z9.d, z10.d, z13.d\n"
+ "ld1w { z17.s }, p4/Z, [x22]\n"
"zip2 z13.d, z10.d, z13.d\n"
- "ld1w { z15.s }, p1/Z, [x24, #3, MUL VL]\n"
- "ld1w { z17.s }, p4/Z, [x23]\n"
+ "ld1w { z18.s }, p3/Z, [x22, #1, MUL VL]\n"
"zip1 z10.d, z11.d, z14.d\n"
+ "ld1w { z19.s }, p2/Z, [x22, #2, MUL VL]\n"
"zip2 z14.d, z11.d, z14.d\n"
- "ld1w { z18.s }, p3/Z, [x23, #1, MUL VL]\n"
- "ld1w { z19.s }, p2/Z, [x23, #2, MUL VL]\n"
+ "ld1w { z24.s }, p1/Z, [x22, #3, MUL VL]\n"
"zip1 z11.d, z16.d, z15.d\n"
+ "ld1w { z20.s }, p4/Z, [x21]\n"
"zip2 z15.d, z16.d, z15.d\n"
- "ld1w { z24.s }, p1/Z, [x23, #3, MUL VL]\n"
- "ld1w { z20.s }, p4/Z, [x22]\n"
+ "ld1w { z21.s }, p3/Z, [x21, #1, MUL VL]\n"
+ "ld1w { z22.s }, p2/Z, [x21, #2, MUL VL]\n"
"zip1 z16.d, z17.d, z20.d\n"
+ "ld1w { z23.s }, p1/Z, [x21, #3, MUL VL]\n"
"zip2 z20.d, z17.d, z20.d\n"
- "ld1w { z21.s }, p3/Z, [x22, #1, MUL VL]\n"
- "ld1w { z22.s }, p2/Z, [x22, #2, MUL VL]\n"
+ "ld1w { z25.s }, p4/Z, [x20]\n"
"zip1 z17.d, z18.d, z21.d\n"
+ "ld1w { z26.s }, p3/Z, [x20, #1, MUL VL]\n"
"zip2 z21.d, z18.d, z21.d\n"
- "ld1w { z23.s }, p1/Z, [x22, #3, MUL VL]\n"
- "ld1w { z25.s }, p4/Z, [x21]\n"
+ "ld1w { z27.s }, p2/Z, [x20, #2, MUL VL]\n"
"zip1 z18.d, z19.d, z22.d\n"
+ "ld1w { z6.s }, p1/Z, [x20, #3, MUL VL]\n"
"zip2 z22.d, z19.d, z22.d\n"
- "ld1w { z26.s }, p3/Z, [x21, #1, MUL VL]\n"
- "ld1w { z27.s }, p2/Z, [x21, #2, MUL VL]\n"
"zip1 z19.d, z24.d, z23.d\n"
"zip2 z23.d, z24.d, z23.d\n"
- "ld1w { z6.s }, p1/Z, [x21, #3, MUL VL]\n"
"zip1 z24.d, z25.d, z28.d\n"
"zip2 z28.d, z25.d, z28.d\n"
"zip1 z25.d, z26.d, z29.d\n"
@@ -1034,115 +1034,115 @@ void sve_hybrid_s8s32_mmla_6x4VL (
"mov z30.s, #0x0\n"
"mov z31.s, #0x0\n"
"48:" // Height 5: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"49:" // Height 5: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 50f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "ldr x22, [x21, #0x20]\n"
- "cbnz x28, 51f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20\n"
- "add x25, x25, x20\n"
- "add x24, x24, x20\n"
- "add x23, x23, x20\n"
- "add x22, x22, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "ldr x21, [x20, #0x20]\n"
+ "cbnz x27, 51f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
+ "add x24, x24, x19\n"
+ "add x23, x23, x19\n"
+ "add x22, x22, x19\n"
+ "add x21, x21, x19\n"
"b 51f\n"
"50:" // Height 5: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20\n"
- "add x24, x25, x20\n"
- "add x23, x24, x20\n"
- "add x22, x23, x20\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19\n"
+ "add x23, x24, x19\n"
+ "add x22, x23, x19\n"
+ "add x21, x22, x19\n"
"51:" // Height 5: input setup done
- "cmp x27, #0x10\n"
+ "cmp x26, #0x10\n"
"ble 53f\n"
"52:" // Height 5: Multiply loop: Main loop head
- "whilelt p0.b, XZR, x27\n"
- "ld1rqb { z1.b }, p0/Z, [x26]\n"
- "ld1rqb { z2.b }, p0/Z, [x25]\n"
- "ld1rqb { z3.b }, p0/Z, [x24]\n"
- "ld1rqb { z4.b }, p0/Z, [x23]\n"
+ "ld1b { z7.b }, p5/Z, [x9]\n"
+ "whilelt p0.b, XZR, x26\n"
+ "ld1rqb { z1.b }, p0/Z, [x25]\n"
+ "ld1rqb { z2.b }, p0/Z, [x24]\n"
"trn1 z0.d, z1.d, z2.d\n"
+ "ld1rqb { z3.b }, p0/Z, [x23]\n"
+ "sub x26, x26, #0x10\n"
"trn2 z1.d, z1.d, z2.d\n"
- "ld1rqb { z5.b }, p0/Z, [x22]\n"
+ "ld1rqb { z4.b }, p0/Z, [x22]\n"
+ "cmp x26, #0x10\n"
+ ".inst 0x45079808 // smmla z8.s, z0.b, z7.b\n"
+ "ld1rqb { z5.b }, p0/Z, [x21]\n"
+ "add x25, x25, #0x10\n"
"trn1 z2.d, z3.d, z4.d\n"
+ "add x24, x24, #0x10\n"
"trn2 z3.d, z3.d, z4.d\n"
- "ld1b { z7.b }, p5/Z, [x10]\n"
+ "add x23, x23, #0x10\n"
"trn1 z4.d, z5.d, z6.d\n"
+ "add x22, x22, #0x10\n"
"trn2 z5.d, z5.d, z6.d\n"
- "ld1b { z6.b }, p5/Z, [x10, #1, MUL VL]\n"
- ".inst 0x45079808 // smmla z8.s, z0.b, z7.b\n"
+ "ld1b { z6.b }, p5/Z, [x9, #1, MUL VL]\n"
+ "add x21, x21, #0x10\n"
".inst 0x45079850 // smmla z16.s, z2.b, z7.b\n"
".inst 0x45079898 // smmla z24.s, z4.b, z7.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #2, MUL VL]\n"
- "sub x27, x27, #0x10\n"
+ "ld1b { z7.b }, p5/Z, [x9, #2, MUL VL]\n"
".inst 0x4506980c // smmla z12.s, z0.b, z6.b\n"
".inst 0x45069854 // smmla z20.s, z2.b, z6.b\n"
- "cmp x27, #0x10\n"
- "add x26, x26, #0x10\n"
".inst 0x4506989c // smmla z28.s, z4.b, z6.b\n"
- "ld1b { z6.b }, p5/Z, [x10, #3, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #3, MUL VL]\n"
".inst 0x45079809 // smmla z9.s, z0.b, z7.b\n"
- "add x25, x25, #0x10\n"
".inst 0x45079851 // smmla z17.s, z2.b, z7.b\n"
".inst 0x45079899 // smmla z25.s, z4.b, z7.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #4, MUL VL]\n"
- "add x24, x24, #0x10\n"
+ "ld1b { z7.b }, p5/Z, [x9, #4, MUL VL]\n"
".inst 0x4506980d // smmla z13.s, z0.b, z6.b\n"
".inst 0x45069855 // smmla z21.s, z2.b, z6.b\n"
- "add x23, x23, #0x10\n"
- "add x22, x22, #0x10\n"
".inst 0x4506989d // smmla z29.s, z4.b, z6.b\n"
- "ld1b { z6.b }, p5/Z, [x10, #5, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #5, MUL VL]\n"
".inst 0x4507980a // smmla z10.s, z0.b, z7.b\n"
".inst 0x45079852 // smmla z18.s, z2.b, z7.b\n"
".inst 0x4507989a // smmla z26.s, z4.b, z7.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #6, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #6, MUL VL]\n"
".inst 0x4506980e // smmla z14.s, z0.b, z6.b\n"
".inst 0x45069856 // smmla z22.s, z2.b, z6.b\n"
".inst 0x4506989e // smmla z30.s, z4.b, z6.b\n"
- "ld1b { z6.b }, p5/Z, [x10, #7, MUL VL]\n"
- "addvl x10, x10, #16\n"
+ "ld1b { z6.b }, p5/Z, [x9, #7, MUL VL]\n"
+ "addvl x9, x9, #16\n"
".inst 0x4507980b // smmla z11.s, z0.b, z7.b\n"
".inst 0x45079853 // smmla z19.s, z2.b, z7.b\n"
".inst 0x4507989b // smmla z27.s, z4.b, z7.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #-8, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #-8, MUL VL]\n"
".inst 0x4506980f // smmla z15.s, z0.b, z6.b\n"
".inst 0x45069857 // smmla z23.s, z2.b, z6.b\n"
".inst 0x4506989f // smmla z31.s, z4.b, z6.b\n"
- "ld1b { z6.b }, p5/Z, [x10, #-7, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #-7, MUL VL]\n"
".inst 0x45079828 // smmla z8.s, z1.b, z7.b\n"
".inst 0x45079870 // smmla z16.s, z3.b, z7.b\n"
".inst 0x450798b8 // smmla z24.s, z5.b, z7.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #-6, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #-6, MUL VL]\n"
".inst 0x4506982c // smmla z12.s, z1.b, z6.b\n"
".inst 0x45069874 // smmla z20.s, z3.b, z6.b\n"
".inst 0x450698bc // smmla z28.s, z5.b, z6.b\n"
- "ld1b { z6.b }, p5/Z, [x10, #-5, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #-5, MUL VL]\n"
".inst 0x45079829 // smmla z9.s, z1.b, z7.b\n"
".inst 0x45079871 // smmla z17.s, z3.b, z7.b\n"
".inst 0x450798b9 // smmla z25.s, z5.b, z7.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #-4, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #-4, MUL VL]\n"
".inst 0x4506982d // smmla z13.s, z1.b, z6.b\n"
".inst 0x45069875 // smmla z21.s, z3.b, z6.b\n"
".inst 0x450698bd // smmla z29.s, z5.b, z6.b\n"
- "ld1b { z6.b }, p5/Z, [x10, #-3, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #-3, MUL VL]\n"
".inst 0x4507982a // smmla z10.s, z1.b, z7.b\n"
".inst 0x45079872 // smmla z18.s, z3.b, z7.b\n"
".inst 0x450798ba // smmla z26.s, z5.b, z7.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #-2, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #-2, MUL VL]\n"
".inst 0x4506982e // smmla z14.s, z1.b, z6.b\n"
".inst 0x45069876 // smmla z22.s, z3.b, z6.b\n"
".inst 0x450698be // smmla z30.s, z5.b, z6.b\n"
- "ld1b { z6.b }, p5/Z, [x10, #-1, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #-1, MUL VL]\n"
".inst 0x4507982b // smmla z11.s, z1.b, z7.b\n"
".inst 0x45079873 // smmla z19.s, z3.b, z7.b\n"
".inst 0x450798bb // smmla z27.s, z5.b, z7.b\n"
@@ -1151,80 +1151,80 @@ void sve_hybrid_s8s32_mmla_6x4VL (
".inst 0x450698bf // smmla z31.s, z5.b, z6.b\n"
"bgt 52b\n"
"53:" // Height 5: Multiply loop: Single iteration only
- "whilelt p0.b, XZR, x27\n"
- "ld1rqb { z1.b }, p0/Z, [x26]\n"
- "ld1rqb { z2.b }, p0/Z, [x25]\n"
- "ld1rqb { z3.b }, p0/Z, [x24]\n"
- "ld1rqb { z4.b }, p0/Z, [x23]\n"
+ "ld1b { z7.b }, p5/Z, [x9]\n"
+ "whilelt p0.b, XZR, x26\n"
+ "ld1rqb { z1.b }, p0/Z, [x25]\n"
+ "ld1rqb { z2.b }, p0/Z, [x24]\n"
"trn1 z0.d, z1.d, z2.d\n"
+ "ld1rqb { z3.b }, p0/Z, [x23]\n"
+ "subs x26, x26, #0x8\n"
"trn2 z1.d, z1.d, z2.d\n"
- "ld1rqb { z5.b }, p0/Z, [x22]\n"
+ "ld1rqb { z4.b }, p0/Z, [x22]\n"
+ "ld1rqb { z5.b }, p0/Z, [x21]\n"
+ ".inst 0x45079808 // smmla z8.s, z0.b, z7.b\n"
"trn1 z2.d, z3.d, z4.d\n"
"trn2 z3.d, z3.d, z4.d\n"
- "ld1b { z7.b }, p5/Z, [x10]\n"
"trn1 z4.d, z5.d, z6.d\n"
"trn2 z5.d, z5.d, z6.d\n"
- "ld1b { z6.b }, p5/Z, [x10, #1, MUL VL]\n"
- ".inst 0x45079808 // smmla z8.s, z0.b, z7.b\n"
+ "ld1b { z6.b }, p5/Z, [x9, #1, MUL VL]\n"
".inst 0x45079850 // smmla z16.s, z2.b, z7.b\n"
".inst 0x45079898 // smmla z24.s, z4.b, z7.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #2, MUL VL]\n"
- "subs x27, x27, #0x8\n"
+ "ld1b { z7.b }, p5/Z, [x9, #2, MUL VL]\n"
".inst 0x4506980c // smmla z12.s, z0.b, z6.b\n"
".inst 0x45069854 // smmla z20.s, z2.b, z6.b\n"
".inst 0x4506989c // smmla z28.s, z4.b, z6.b\n"
- "ld1b { z6.b }, p5/Z, [x10, #3, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #3, MUL VL]\n"
".inst 0x45079809 // smmla z9.s, z0.b, z7.b\n"
".inst 0x45079851 // smmla z17.s, z2.b, z7.b\n"
".inst 0x45079899 // smmla z25.s, z4.b, z7.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #4, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #4, MUL VL]\n"
".inst 0x4506980d // smmla z13.s, z0.b, z6.b\n"
".inst 0x45069855 // smmla z21.s, z2.b, z6.b\n"
".inst 0x4506989d // smmla z29.s, z4.b, z6.b\n"
- "ld1b { z6.b }, p5/Z, [x10, #5, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #5, MUL VL]\n"
".inst 0x4507980a // smmla z10.s, z0.b, z7.b\n"
".inst 0x45079852 // smmla z18.s, z2.b, z7.b\n"
".inst 0x4507989a // smmla z26.s, z4.b, z7.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #6, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #6, MUL VL]\n"
".inst 0x4506980e // smmla z14.s, z0.b, z6.b\n"
".inst 0x45069856 // smmla z22.s, z2.b, z6.b\n"
".inst 0x4506989e // smmla z30.s, z4.b, z6.b\n"
- "ld1b { z6.b }, p5/Z, [x10, #7, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #7, MUL VL]\n"
+ "addvl x9, x9, #8\n"
".inst 0x4507980b // smmla z11.s, z0.b, z7.b\n"
- "addvl x10, x10, #8\n"
".inst 0x45079853 // smmla z19.s, z2.b, z7.b\n"
".inst 0x4507989b // smmla z27.s, z4.b, z7.b\n"
".inst 0x4506980f // smmla z15.s, z0.b, z6.b\n"
".inst 0x45069857 // smmla z23.s, z2.b, z6.b\n"
".inst 0x4506989f // smmla z31.s, z4.b, z6.b\n"
"ble 54f\n"
- "ld1b { z7.b }, p5/Z, [x10]\n"
- "ld1b { z6.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9]\n"
".inst 0x45079828 // smmla z8.s, z1.b, z7.b\n"
+ "ld1b { z6.b }, p5/Z, [x9, #1, MUL VL]\n"
".inst 0x45079870 // smmla z16.s, z3.b, z7.b\n"
".inst 0x450798b8 // smmla z24.s, z5.b, z7.b\n"
+ "ld1b { z7.b }, p5/Z, [x9, #2, MUL VL]\n"
".inst 0x4506982c // smmla z12.s, z1.b, z6.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #2, MUL VL]\n"
".inst 0x45069874 // smmla z20.s, z3.b, z6.b\n"
".inst 0x450698bc // smmla z28.s, z5.b, z6.b\n"
- "ld1b { z6.b }, p5/Z, [x10, #3, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #3, MUL VL]\n"
".inst 0x45079829 // smmla z9.s, z1.b, z7.b\n"
".inst 0x45079871 // smmla z17.s, z3.b, z7.b\n"
".inst 0x450798b9 // smmla z25.s, z5.b, z7.b\n"
+ "ld1b { z7.b }, p5/Z, [x9, #4, MUL VL]\n"
".inst 0x4506982d // smmla z13.s, z1.b, z6.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #4, MUL VL]\n"
".inst 0x45069875 // smmla z21.s, z3.b, z6.b\n"
".inst 0x450698bd // smmla z29.s, z5.b, z6.b\n"
- "ld1b { z6.b }, p5/Z, [x10, #5, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #5, MUL VL]\n"
".inst 0x4507982a // smmla z10.s, z1.b, z7.b\n"
".inst 0x45079872 // smmla z18.s, z3.b, z7.b\n"
".inst 0x450798ba // smmla z26.s, z5.b, z7.b\n"
+ "ld1b { z7.b }, p5/Z, [x9, #6, MUL VL]\n"
".inst 0x4506982e // smmla z14.s, z1.b, z6.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #6, MUL VL]\n"
".inst 0x45069876 // smmla z22.s, z3.b, z6.b\n"
".inst 0x450698be // smmla z30.s, z5.b, z6.b\n"
- "ld1b { z6.b }, p5/Z, [x10, #7, MUL VL]\n"
- "addvl x10, x10, #8\n"
+ "ld1b { z6.b }, p5/Z, [x9, #7, MUL VL]\n"
+ "addvl x9, x9, #8\n"
".inst 0x4507982b // smmla z11.s, z1.b, z7.b\n"
".inst 0x45079873 // smmla z19.s, z3.b, z7.b\n"
".inst 0x450798bb // smmla z27.s, z5.b, z7.b\n"
@@ -1232,127 +1232,127 @@ void sve_hybrid_s8s32_mmla_6x4VL (
".inst 0x45069877 // smmla z23.s, z3.b, z6.b\n"
".inst 0x450698bf // smmla z31.s, z5.b, z6.b\n"
"54:" // Height 5: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 49b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
"uzp1 z7.d, z8.d, z12.d\n"
- "add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp2 z8.d, z8.d, z12.d\n"
+ "st1w { z7.s }, p4, [x28]\n"
"uzp1 z12.d, z9.d, z13.d\n"
+ "add x23, x28, x19, LSL #2\n"
"uzp2 z9.d, z9.d, z13.d\n"
+ "st1w { z12.s }, p3, [x28, #1, MUL VL]\n"
"uzp1 z13.d, z10.d, z14.d\n"
- "st1w { z7.s }, p4, [x9]\n"
+ "add x22, x23, x19, LSL #2\n"
"uzp2 z10.d, z10.d, z14.d\n"
+ "st1w { z13.s }, p2, [x28, #2, MUL VL]\n"
"uzp1 z14.d, z11.d, z15.d\n"
- "st1w { z12.s }, p3, [x9, #1, MUL VL]\n"
+ "add x21, x22, x19, LSL #2\n"
"uzp2 z11.d, z11.d, z15.d\n"
+ "st1w { z14.s }, p1, [x28, #3, MUL VL]\n"
"uzp1 z15.d, z16.d, z20.d\n"
- "st1w { z13.s }, p2, [x9, #2, MUL VL]\n"
+ "add x20, x21, x19, LSL #2\n"
"uzp2 z16.d, z16.d, z20.d\n"
+ "st1w { z8.s }, p4, [x23]\n"
+ "addvl x28, x28, #4\n"
"uzp1 z20.d, z17.d, z21.d\n"
- "st1w { z14.s }, p1, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
+ "st1w { z9.s }, p3, [x23, #1, MUL VL]\n"
"uzp2 z17.d, z17.d, z21.d\n"
+ "st1w { z10.s }, p2, [x23, #2, MUL VL]\n"
"uzp1 z21.d, z18.d, z22.d\n"
- "st1w { z8.s }, p4, [x24]\n"
+ "st1w { z11.s }, p1, [x23, #3, MUL VL]\n"
"uzp2 z18.d, z18.d, z22.d\n"
+ "st1w { z15.s }, p4, [x22]\n"
"uzp1 z22.d, z19.d, z23.d\n"
- "st1w { z9.s }, p3, [x24, #1, MUL VL]\n"
+ "st1w { z20.s }, p3, [x22, #1, MUL VL]\n"
"uzp2 z19.d, z19.d, z23.d\n"
+ "st1w { z21.s }, p2, [x22, #2, MUL VL]\n"
"uzp1 z24.d, z24.d, z28.d\n"
- "st1w { z10.s }, p2, [x24, #2, MUL VL]\n"
+ "st1w { z22.s }, p1, [x22, #3, MUL VL]\n"
"uzp1 z25.d, z25.d, z29.d\n"
+ "st1w { z16.s }, p4, [x21]\n"
"uzp1 z26.d, z26.d, z30.d\n"
- "st1w { z11.s }, p1, [x24, #3, MUL VL]\n"
+ "st1w { z17.s }, p3, [x21, #1, MUL VL]\n"
"uzp1 z27.d, z27.d, z31.d\n"
- "st1w { z15.s }, p4, [x23]\n"
- "st1w { z20.s }, p3, [x23, #1, MUL VL]\n"
- "st1w { z21.s }, p2, [x23, #2, MUL VL]\n"
- "st1w { z22.s }, p1, [x23, #3, MUL VL]\n"
- "st1w { z16.s }, p4, [x22]\n"
- "st1w { z17.s }, p3, [x22, #1, MUL VL]\n"
- "st1w { z18.s }, p2, [x22, #2, MUL VL]\n"
- "st1w { z19.s }, p1, [x22, #3, MUL VL]\n"
- "st1w { z24.s }, p4, [x21]\n"
- "st1w { z25.s }, p3, [x21, #1, MUL VL]\n"
- "st1w { z26.s }, p2, [x21, #2, MUL VL]\n"
- "st1w { z27.s }, p1, [x21, #3, MUL VL]\n"
+ "st1w { z18.s }, p2, [x21, #2, MUL VL]\n"
+ "st1w { z19.s }, p1, [x21, #3, MUL VL]\n"
+ "st1w { z24.s }, p4, [x20]\n"
+ "st1w { z25.s }, p3, [x20, #1, MUL VL]\n"
+ "st1w { z26.s }, p2, [x20, #2, MUL VL]\n"
+ "st1w { z27.s }, p1, [x20, #3, MUL VL]\n"
"55:" // Height 5: Writeback done
- "decw x11, ALL, MUL #4\n"
- "cmp x11, XZR\n"
+ "decw x10, ALL, MUL #4\n"
+ "cmp x10, XZR\n"
"bgt 46b\n"
"b 68f\n"
"56:" // Height 6
- "ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x28, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"mov x20, #0x18\n"
- "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
- "mov x9, %x[output_ptr]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "madd %x[output_ptr], x19, x20, %x[output_ptr]\n"
"57:" // Height 6: Column loop
- "mov x20, #0x0\n"
- "whilelt p4.s, x20, x11\n"
- "incw x20\n"
- "whilelt p3.s, x20, x11\n"
- "incw x20\n"
- "whilelt p2.s, x20, x11\n"
- "incw x20\n"
- "whilelt p1.s, x20, x11\n"
+ "mov x19, #0x0\n"
+ "whilelt p4.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p3.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x10\n"
"tbz %x[flags], #0, 58f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "ld1w { z9.s }, p4/Z, [x9]\n"
- "add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
- "ld1w { z10.s }, p3/Z, [x9, #1, MUL VL]\n"
- "ld1w { z11.s }, p2/Z, [x9, #2, MUL VL]\n"
- "add x20, x21, x20, LSL #2\n"
- "ld1w { z16.s }, p1/Z, [x9, #3, MUL VL]\n"
- "ld1w { z12.s }, p4/Z, [x24]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ld1w { z9.s }, p4/Z, [x28]\n"
+ "add x23, x28, x19, LSL #2\n"
+ "ld1w { z10.s }, p3/Z, [x28, #1, MUL VL]\n"
+ "ld1w { z11.s }, p2/Z, [x28, #2, MUL VL]\n"
+ "add x22, x23, x19, LSL #2\n"
+ "ld1w { z16.s }, p1/Z, [x28, #3, MUL VL]\n"
+ "add x21, x22, x19, LSL #2\n"
+ "ld1w { z12.s }, p4/Z, [x23]\n"
"zip1 z8.d, z9.d, z12.d\n"
- "ld1w { z13.s }, p3/Z, [x24, #1, MUL VL]\n"
- "ld1w { z14.s }, p2/Z, [x24, #2, MUL VL]\n"
+ "ld1w { z13.s }, p3/Z, [x23, #1, MUL VL]\n"
+ "add x20, x21, x19, LSL #2\n"
"zip2 z12.d, z9.d, z12.d\n"
+ "ld1w { z14.s }, p2/Z, [x23, #2, MUL VL]\n"
+ "add x19, x20, x19, LSL #2\n"
"zip1 z9.d, z10.d, z13.d\n"
- "ld1w { z15.s }, p1/Z, [x24, #3, MUL VL]\n"
- "ld1w { z17.s }, p4/Z, [x23]\n"
+ "ld1w { z15.s }, p1/Z, [x23, #3, MUL VL]\n"
"zip2 z13.d, z10.d, z13.d\n"
+ "ld1w { z17.s }, p4/Z, [x22]\n"
"zip1 z10.d, z11.d, z14.d\n"
- "ld1w { z18.s }, p3/Z, [x23, #1, MUL VL]\n"
- "ld1w { z19.s }, p2/Z, [x23, #2, MUL VL]\n"
+ "ld1w { z18.s }, p3/Z, [x22, #1, MUL VL]\n"
"zip2 z14.d, z11.d, z14.d\n"
+ "ld1w { z19.s }, p2/Z, [x22, #2, MUL VL]\n"
"zip1 z11.d, z16.d, z15.d\n"
- "ld1w { z24.s }, p1/Z, [x23, #3, MUL VL]\n"
- "ld1w { z20.s }, p4/Z, [x22]\n"
+ "ld1w { z24.s }, p1/Z, [x22, #3, MUL VL]\n"
"zip2 z15.d, z16.d, z15.d\n"
+ "ld1w { z20.s }, p4/Z, [x21]\n"
+ "ld1w { z21.s }, p3/Z, [x21, #1, MUL VL]\n"
"zip1 z16.d, z17.d, z20.d\n"
- "ld1w { z21.s }, p3/Z, [x22, #1, MUL VL]\n"
- "ld1w { z22.s }, p2/Z, [x22, #2, MUL VL]\n"
+ "ld1w { z22.s }, p2/Z, [x21, #2, MUL VL]\n"
"zip2 z20.d, z17.d, z20.d\n"
+ "ld1w { z23.s }, p1/Z, [x21, #3, MUL VL]\n"
"zip1 z17.d, z18.d, z21.d\n"
- "ld1w { z23.s }, p1/Z, [x22, #3, MUL VL]\n"
- "ld1w { z25.s }, p4/Z, [x21]\n"
+ "ld1w { z25.s }, p4/Z, [x20]\n"
"zip2 z21.d, z18.d, z21.d\n"
+ "ld1w { z26.s }, p3/Z, [x20, #1, MUL VL]\n"
"zip1 z18.d, z19.d, z22.d\n"
- "ld1w { z26.s }, p3/Z, [x21, #1, MUL VL]\n"
- "ld1w { z27.s }, p2/Z, [x21, #2, MUL VL]\n"
+ "ld1w { z27.s }, p2/Z, [x20, #2, MUL VL]\n"
"zip2 z22.d, z19.d, z22.d\n"
+ "ld1w { z6.s }, p1/Z, [x20, #3, MUL VL]\n"
"zip1 z19.d, z24.d, z23.d\n"
- "ld1w { z6.s }, p1/Z, [x21, #3, MUL VL]\n"
- "ld1w { z28.s }, p4/Z, [x20]\n"
+ "ld1w { z28.s }, p4/Z, [x19]\n"
"zip2 z23.d, z24.d, z23.d\n"
+ "ld1w { z29.s }, p3/Z, [x19, #1, MUL VL]\n"
+ "ld1w { z30.s }, p2/Z, [x19, #2, MUL VL]\n"
"zip1 z24.d, z25.d, z28.d\n"
- "ld1w { z29.s }, p3/Z, [x20, #1, MUL VL]\n"
- "ld1w { z30.s }, p2/Z, [x20, #2, MUL VL]\n"
+ "ld1w { z31.s }, p1/Z, [x19, #3, MUL VL]\n"
"zip2 z28.d, z25.d, z28.d\n"
"zip1 z25.d, z26.d, z29.d\n"
- "ld1w { z31.s }, p1/Z, [x20, #3, MUL VL]\n"
"zip2 z29.d, z26.d, z29.d\n"
"zip1 z26.d, z27.d, z30.d\n"
"zip2 z30.d, z27.d, z30.d\n"
@@ -1385,120 +1385,120 @@ void sve_hybrid_s8s32_mmla_6x4VL (
"mov z30.s, #0x0\n"
"mov z31.s, #0x0\n"
"59:" // Height 6: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"60:" // Height 6: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 61f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "ldr x22, [x21, #0x20]\n"
- "ldr x21, [x21, #0x28]\n"
- "cbnz x28, 62f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20\n"
- "add x25, x25, x20\n"
- "add x24, x24, x20\n"
- "add x23, x23, x20\n"
- "add x22, x22, x20\n"
- "add x21, x21, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "ldr x21, [x20, #0x20]\n"
+ "ldr x20, [x20, #0x28]\n"
+ "cbnz x27, 62f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
+ "add x24, x24, x19\n"
+ "add x23, x23, x19\n"
+ "add x22, x22, x19\n"
+ "add x21, x21, x19\n"
+ "add x20, x20, x19\n"
"b 62f\n"
"61:" // Height 6: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20\n"
- "add x24, x25, x20\n"
- "add x23, x24, x20\n"
- "add x22, x23, x20\n"
- "add x21, x22, x20\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19\n"
+ "add x23, x24, x19\n"
+ "add x22, x23, x19\n"
+ "add x21, x22, x19\n"
+ "add x20, x21, x19\n"
"62:" // Height 6: input setup done
- "cmp x27, #0x10\n"
+ "cmp x26, #0x10\n"
"ble 64f\n"
"63:" // Height 6: Multiply loop: Main loop head
- "whilelt p0.b, XZR, x27\n"
- "ld1rqb { z1.b }, p0/Z, [x26]\n"
- "ld1rqb { z2.b }, p0/Z, [x25]\n"
+ "ld1b { z7.b }, p5/Z, [x9]\n"
+ "whilelt p0.b, XZR, x26\n"
+ "sub x26, x26, #0x10\n"
+ "ld1rqb { z1.b }, p0/Z, [x25]\n"
+ "ld1rqb { z2.b }, p0/Z, [x24]\n"
"trn1 z0.d, z1.d, z2.d\n"
- "ld1rqb { z3.b }, p0/Z, [x24]\n"
- "ld1rqb { z4.b }, p0/Z, [x23]\n"
+ "ld1rqb { z3.b }, p0/Z, [x23]\n"
+ "cmp x26, #0x10\n"
"trn2 z1.d, z1.d, z2.d\n"
+ "ld1rqb { z4.b }, p0/Z, [x22]\n"
+ "add x25, x25, #0x10\n"
+ ".inst 0x45079808 // smmla z8.s, z0.b, z7.b\n"
+ "ld1rqb { z5.b }, p0/Z, [x21]\n"
+ "add x24, x24, #0x10\n"
"trn1 z2.d, z3.d, z4.d\n"
- "ld1rqb { z5.b }, p0/Z, [x22]\n"
- "ld1rqb { z6.b }, p0/Z, [x21]\n"
+ "ld1rqb { z6.b }, p0/Z, [x20]\n"
+ "add x23, x23, #0x10\n"
"trn2 z3.d, z3.d, z4.d\n"
+ "add x22, x22, #0x10\n"
+ "add x21, x21, #0x10\n"
+ ".inst 0x45079850 // smmla z16.s, z2.b, z7.b\n"
+ "add x20, x20, #0x10\n"
"trn1 z4.d, z5.d, z6.d\n"
"trn2 z5.d, z5.d, z6.d\n"
- "ld1b { z7.b }, p5/Z, [x10]\n"
- "ld1b { z6.b }, p5/Z, [x10, #1, MUL VL]\n"
- ".inst 0x45079808 // smmla z8.s, z0.b, z7.b\n"
- ".inst 0x45079850 // smmla z16.s, z2.b, z7.b\n"
+ "ld1b { z6.b }, p5/Z, [x9, #1, MUL VL]\n"
".inst 0x45079898 // smmla z24.s, z4.b, z7.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #2, MUL VL]\n"
- "sub x27, x27, #0x10\n"
+ "ld1b { z7.b }, p5/Z, [x9, #2, MUL VL]\n"
".inst 0x4506980c // smmla z12.s, z0.b, z6.b\n"
".inst 0x45069854 // smmla z20.s, z2.b, z6.b\n"
- "cmp x27, #0x10\n"
- "add x26, x26, #0x10\n"
".inst 0x4506989c // smmla z28.s, z4.b, z6.b\n"
- "ld1b { z6.b }, p5/Z, [x10, #3, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #3, MUL VL]\n"
".inst 0x45079809 // smmla z9.s, z0.b, z7.b\n"
- "add x25, x25, #0x10\n"
".inst 0x45079851 // smmla z17.s, z2.b, z7.b\n"
".inst 0x45079899 // smmla z25.s, z4.b, z7.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #4, MUL VL]\n"
- "add x24, x24, #0x10\n"
+ "ld1b { z7.b }, p5/Z, [x9, #4, MUL VL]\n"
".inst 0x4506980d // smmla z13.s, z0.b, z6.b\n"
".inst 0x45069855 // smmla z21.s, z2.b, z6.b\n"
- "add x23, x23, #0x10\n"
- "add x22, x22, #0x10\n"
".inst 0x4506989d // smmla z29.s, z4.b, z6.b\n"
- "ld1b { z6.b }, p5/Z, [x10, #5, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #5, MUL VL]\n"
".inst 0x4507980a // smmla z10.s, z0.b, z7.b\n"
- "add x21, x21, #0x10\n"
".inst 0x45079852 // smmla z18.s, z2.b, z7.b\n"
".inst 0x4507989a // smmla z26.s, z4.b, z7.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #6, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #6, MUL VL]\n"
".inst 0x4506980e // smmla z14.s, z0.b, z6.b\n"
".inst 0x45069856 // smmla z22.s, z2.b, z6.b\n"
".inst 0x4506989e // smmla z30.s, z4.b, z6.b\n"
- "ld1b { z6.b }, p5/Z, [x10, #7, MUL VL]\n"
- "addvl x10, x10, #16\n"
+ "ld1b { z6.b }, p5/Z, [x9, #7, MUL VL]\n"
+ "addvl x9, x9, #16\n"
".inst 0x4507980b // smmla z11.s, z0.b, z7.b\n"
".inst 0x45079853 // smmla z19.s, z2.b, z7.b\n"
".inst 0x4507989b // smmla z27.s, z4.b, z7.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #-8, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #-8, MUL VL]\n"
".inst 0x4506980f // smmla z15.s, z0.b, z6.b\n"
".inst 0x45069857 // smmla z23.s, z2.b, z6.b\n"
".inst 0x4506989f // smmla z31.s, z4.b, z6.b\n"
- "ld1b { z6.b }, p5/Z, [x10, #-7, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #-7, MUL VL]\n"
".inst 0x45079828 // smmla z8.s, z1.b, z7.b\n"
".inst 0x45079870 // smmla z16.s, z3.b, z7.b\n"
".inst 0x450798b8 // smmla z24.s, z5.b, z7.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #-6, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #-6, MUL VL]\n"
".inst 0x4506982c // smmla z12.s, z1.b, z6.b\n"
".inst 0x45069874 // smmla z20.s, z3.b, z6.b\n"
".inst 0x450698bc // smmla z28.s, z5.b, z6.b\n"
- "ld1b { z6.b }, p5/Z, [x10, #-5, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #-5, MUL VL]\n"
".inst 0x45079829 // smmla z9.s, z1.b, z7.b\n"
".inst 0x45079871 // smmla z17.s, z3.b, z7.b\n"
".inst 0x450798b9 // smmla z25.s, z5.b, z7.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #-4, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #-4, MUL VL]\n"
".inst 0x4506982d // smmla z13.s, z1.b, z6.b\n"
".inst 0x45069875 // smmla z21.s, z3.b, z6.b\n"
".inst 0x450698bd // smmla z29.s, z5.b, z6.b\n"
- "ld1b { z6.b }, p5/Z, [x10, #-3, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #-3, MUL VL]\n"
".inst 0x4507982a // smmla z10.s, z1.b, z7.b\n"
".inst 0x45079872 // smmla z18.s, z3.b, z7.b\n"
".inst 0x450798ba // smmla z26.s, z5.b, z7.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #-2, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #-2, MUL VL]\n"
".inst 0x4506982e // smmla z14.s, z1.b, z6.b\n"
".inst 0x45069876 // smmla z22.s, z3.b, z6.b\n"
".inst 0x450698be // smmla z30.s, z5.b, z6.b\n"
- "ld1b { z6.b }, p5/Z, [x10, #-1, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #-1, MUL VL]\n"
".inst 0x4507982b // smmla z11.s, z1.b, z7.b\n"
".inst 0x45079873 // smmla z19.s, z3.b, z7.b\n"
".inst 0x450798bb // smmla z27.s, z5.b, z7.b\n"
@@ -1507,81 +1507,81 @@ void sve_hybrid_s8s32_mmla_6x4VL (
".inst 0x450698bf // smmla z31.s, z5.b, z6.b\n"
"bgt 63b\n"
"64:" // Height 6: Multiply loop: Single iteration only
- "whilelt p0.b, XZR, x27\n"
- "ld1rqb { z1.b }, p0/Z, [x26]\n"
- "ld1rqb { z2.b }, p0/Z, [x25]\n"
+ "ld1b { z7.b }, p5/Z, [x9]\n"
+ "whilelt p0.b, XZR, x26\n"
+ "subs x26, x26, #0x8\n"
+ "ld1rqb { z1.b }, p0/Z, [x25]\n"
+ "ld1rqb { z2.b }, p0/Z, [x24]\n"
"trn1 z0.d, z1.d, z2.d\n"
- "ld1rqb { z3.b }, p0/Z, [x24]\n"
- "ld1rqb { z4.b }, p0/Z, [x23]\n"
+ "ld1rqb { z3.b }, p0/Z, [x23]\n"
"trn2 z1.d, z1.d, z2.d\n"
+ "ld1rqb { z4.b }, p0/Z, [x22]\n"
+ "ld1rqb { z5.b }, p0/Z, [x21]\n"
+ ".inst 0x45079808 // smmla z8.s, z0.b, z7.b\n"
+ "ld1rqb { z6.b }, p0/Z, [x20]\n"
"trn1 z2.d, z3.d, z4.d\n"
- "ld1rqb { z5.b }, p0/Z, [x22]\n"
- "ld1rqb { z6.b }, p0/Z, [x21]\n"
"trn2 z3.d, z3.d, z4.d\n"
"trn1 z4.d, z5.d, z6.d\n"
"trn2 z5.d, z5.d, z6.d\n"
- "ld1b { z7.b }, p5/Z, [x10]\n"
- "ld1b { z6.b }, p5/Z, [x10, #1, MUL VL]\n"
- ".inst 0x45079808 // smmla z8.s, z0.b, z7.b\n"
+ "ld1b { z6.b }, p5/Z, [x9, #1, MUL VL]\n"
".inst 0x45079850 // smmla z16.s, z2.b, z7.b\n"
".inst 0x45079898 // smmla z24.s, z4.b, z7.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #2, MUL VL]\n"
- "subs x27, x27, #0x8\n"
+ "ld1b { z7.b }, p5/Z, [x9, #2, MUL VL]\n"
".inst 0x4506980c // smmla z12.s, z0.b, z6.b\n"
".inst 0x45069854 // smmla z20.s, z2.b, z6.b\n"
".inst 0x4506989c // smmla z28.s, z4.b, z6.b\n"
- "ld1b { z6.b }, p5/Z, [x10, #3, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #3, MUL VL]\n"
".inst 0x45079809 // smmla z9.s, z0.b, z7.b\n"
".inst 0x45079851 // smmla z17.s, z2.b, z7.b\n"
".inst 0x45079899 // smmla z25.s, z4.b, z7.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #4, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #4, MUL VL]\n"
".inst 0x4506980d // smmla z13.s, z0.b, z6.b\n"
".inst 0x45069855 // smmla z21.s, z2.b, z6.b\n"
".inst 0x4506989d // smmla z29.s, z4.b, z6.b\n"
- "ld1b { z6.b }, p5/Z, [x10, #5, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #5, MUL VL]\n"
".inst 0x4507980a // smmla z10.s, z0.b, z7.b\n"
".inst 0x45079852 // smmla z18.s, z2.b, z7.b\n"
".inst 0x4507989a // smmla z26.s, z4.b, z7.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #6, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #6, MUL VL]\n"
".inst 0x4506980e // smmla z14.s, z0.b, z6.b\n"
".inst 0x45069856 // smmla z22.s, z2.b, z6.b\n"
".inst 0x4506989e // smmla z30.s, z4.b, z6.b\n"
- "ld1b { z6.b }, p5/Z, [x10, #7, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #7, MUL VL]\n"
+ "addvl x9, x9, #8\n"
".inst 0x4507980b // smmla z11.s, z0.b, z7.b\n"
- "addvl x10, x10, #8\n"
".inst 0x45079853 // smmla z19.s, z2.b, z7.b\n"
".inst 0x4507989b // smmla z27.s, z4.b, z7.b\n"
".inst 0x4506980f // smmla z15.s, z0.b, z6.b\n"
".inst 0x45069857 // smmla z23.s, z2.b, z6.b\n"
".inst 0x4506989f // smmla z31.s, z4.b, z6.b\n"
"ble 65f\n"
- "ld1b { z7.b }, p5/Z, [x10]\n"
- "ld1b { z6.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9]\n"
".inst 0x45079828 // smmla z8.s, z1.b, z7.b\n"
+ "ld1b { z6.b }, p5/Z, [x9, #1, MUL VL]\n"
".inst 0x45079870 // smmla z16.s, z3.b, z7.b\n"
".inst 0x450798b8 // smmla z24.s, z5.b, z7.b\n"
+ "ld1b { z7.b }, p5/Z, [x9, #2, MUL VL]\n"
".inst 0x4506982c // smmla z12.s, z1.b, z6.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #2, MUL VL]\n"
".inst 0x45069874 // smmla z20.s, z3.b, z6.b\n"
".inst 0x450698bc // smmla z28.s, z5.b, z6.b\n"
- "ld1b { z6.b }, p5/Z, [x10, #3, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #3, MUL VL]\n"
".inst 0x45079829 // smmla z9.s, z1.b, z7.b\n"
".inst 0x45079871 // smmla z17.s, z3.b, z7.b\n"
".inst 0x450798b9 // smmla z25.s, z5.b, z7.b\n"
+ "ld1b { z7.b }, p5/Z, [x9, #4, MUL VL]\n"
".inst 0x4506982d // smmla z13.s, z1.b, z6.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #4, MUL VL]\n"
".inst 0x45069875 // smmla z21.s, z3.b, z6.b\n"
".inst 0x450698bd // smmla z29.s, z5.b, z6.b\n"
- "ld1b { z6.b }, p5/Z, [x10, #5, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #5, MUL VL]\n"
".inst 0x4507982a // smmla z10.s, z1.b, z7.b\n"
".inst 0x45079872 // smmla z18.s, z3.b, z7.b\n"
".inst 0x450798ba // smmla z26.s, z5.b, z7.b\n"
+ "ld1b { z7.b }, p5/Z, [x9, #6, MUL VL]\n"
".inst 0x4506982e // smmla z14.s, z1.b, z6.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #6, MUL VL]\n"
".inst 0x45069876 // smmla z22.s, z3.b, z6.b\n"
".inst 0x450698be // smmla z30.s, z5.b, z6.b\n"
- "ld1b { z6.b }, p5/Z, [x10, #7, MUL VL]\n"
- "addvl x10, x10, #8\n"
+ "ld1b { z6.b }, p5/Z, [x9, #7, MUL VL]\n"
+ "addvl x9, x9, #8\n"
".inst 0x4507982b // smmla z11.s, z1.b, z7.b\n"
".inst 0x45079873 // smmla z19.s, z3.b, z7.b\n"
".inst 0x450798bb // smmla z27.s, z5.b, z7.b\n"
@@ -1589,85 +1589,85 @@ void sve_hybrid_s8s32_mmla_6x4VL (
".inst 0x45069877 // smmla z23.s, z3.b, z6.b\n"
".inst 0x450698bf // smmla z31.s, z5.b, z6.b\n"
"65:" // Height 6: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 60b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
"uzp1 z7.d, z8.d, z12.d\n"
- "add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp2 z8.d, z8.d, z12.d\n"
+ "st1w { z7.s }, p4, [x28]\n"
"uzp1 z12.d, z9.d, z13.d\n"
- "add x20, x21, x20, LSL #2\n"
+ "add x23, x28, x19, LSL #2\n"
"uzp2 z9.d, z9.d, z13.d\n"
+ "st1w { z12.s }, p3, [x28, #1, MUL VL]\n"
"uzp1 z13.d, z10.d, z14.d\n"
- "st1w { z7.s }, p4, [x9]\n"
+ "add x22, x23, x19, LSL #2\n"
"uzp2 z10.d, z10.d, z14.d\n"
+ "st1w { z13.s }, p2, [x28, #2, MUL VL]\n"
"uzp1 z14.d, z11.d, z15.d\n"
- "st1w { z12.s }, p3, [x9, #1, MUL VL]\n"
+ "add x21, x22, x19, LSL #2\n"
"uzp2 z11.d, z11.d, z15.d\n"
+ "st1w { z14.s }, p1, [x28, #3, MUL VL]\n"
"uzp1 z15.d, z16.d, z20.d\n"
- "st1w { z13.s }, p2, [x9, #2, MUL VL]\n"
+ "add x20, x21, x19, LSL #2\n"
"uzp2 z16.d, z16.d, z20.d\n"
+ "st1w { z8.s }, p4, [x23]\n"
+ "add x19, x20, x19, LSL #2\n"
"uzp1 z20.d, z17.d, z21.d\n"
- "st1w { z14.s }, p1, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
+ "st1w { z9.s }, p3, [x23, #1, MUL VL]\n"
+ "addvl x28, x28, #4\n"
"uzp2 z17.d, z17.d, z21.d\n"
+ "st1w { z10.s }, p2, [x23, #2, MUL VL]\n"
"uzp1 z21.d, z18.d, z22.d\n"
- "st1w { z8.s }, p4, [x24]\n"
+ "st1w { z11.s }, p1, [x23, #3, MUL VL]\n"
"uzp2 z18.d, z18.d, z22.d\n"
+ "st1w { z15.s }, p4, [x22]\n"
"uzp1 z22.d, z19.d, z23.d\n"
- "st1w { z9.s }, p3, [x24, #1, MUL VL]\n"
+ "st1w { z20.s }, p3, [x22, #1, MUL VL]\n"
"uzp2 z19.d, z19.d, z23.d\n"
+ "st1w { z21.s }, p2, [x22, #2, MUL VL]\n"
"uzp1 z23.d, z24.d, z28.d\n"
- "st1w { z10.s }, p2, [x24, #2, MUL VL]\n"
+ "st1w { z22.s }, p1, [x22, #3, MUL VL]\n"
"uzp2 z24.d, z24.d, z28.d\n"
+ "st1w { z16.s }, p4, [x21]\n"
"uzp1 z28.d, z25.d, z29.d\n"
- "st1w { z11.s }, p1, [x24, #3, MUL VL]\n"
+ "st1w { z17.s }, p3, [x21, #1, MUL VL]\n"
"uzp2 z25.d, z25.d, z29.d\n"
+ "st1w { z18.s }, p2, [x21, #2, MUL VL]\n"
"uzp1 z29.d, z26.d, z30.d\n"
- "st1w { z15.s }, p4, [x23]\n"
+ "st1w { z19.s }, p1, [x21, #3, MUL VL]\n"
"uzp2 z26.d, z26.d, z30.d\n"
+ "st1w { z23.s }, p4, [x20]\n"
"uzp1 z30.d, z27.d, z31.d\n"
- "st1w { z20.s }, p3, [x23, #1, MUL VL]\n"
+ "st1w { z28.s }, p3, [x20, #1, MUL VL]\n"
"uzp2 z27.d, z27.d, z31.d\n"
- "st1w { z21.s }, p2, [x23, #2, MUL VL]\n"
- "st1w { z22.s }, p1, [x23, #3, MUL VL]\n"
- "st1w { z16.s }, p4, [x22]\n"
- "st1w { z17.s }, p3, [x22, #1, MUL VL]\n"
- "st1w { z18.s }, p2, [x22, #2, MUL VL]\n"
- "st1w { z19.s }, p1, [x22, #3, MUL VL]\n"
- "st1w { z23.s }, p4, [x21]\n"
- "st1w { z28.s }, p3, [x21, #1, MUL VL]\n"
- "st1w { z29.s }, p2, [x21, #2, MUL VL]\n"
- "st1w { z30.s }, p1, [x21, #3, MUL VL]\n"
- "st1w { z24.s }, p4, [x20]\n"
- "st1w { z25.s }, p3, [x20, #1, MUL VL]\n"
- "st1w { z26.s }, p2, [x20, #2, MUL VL]\n"
- "st1w { z27.s }, p1, [x20, #3, MUL VL]\n"
+ "st1w { z29.s }, p2, [x20, #2, MUL VL]\n"
+ "st1w { z30.s }, p1, [x20, #3, MUL VL]\n"
+ "st1w { z24.s }, p4, [x19]\n"
+ "st1w { z25.s }, p3, [x19, #1, MUL VL]\n"
+ "st1w { z26.s }, p2, [x19, #2, MUL VL]\n"
+ "st1w { z27.s }, p1, [x19, #3, MUL VL]\n"
"66:" // Height 6: Writeback done
- "decw x11, ALL, MUL #4\n"
- "cmp x11, XZR\n"
+ "decw x10, ALL, MUL #4\n"
+ "cmp x10, XZR\n"
"bgt 57b\n"
"subs %x[M], %x[M], #0x6\n"
"beq 68f\n"
- "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 67f\n"
- "add x21, x21, #0x6\n"
- "str x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "add x20, x20, #0x6\n"
+ "str x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"b 1b\n"
"67:" // Update direct input
- "mov x20, #0x6\n"
- "madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
+ "mov x19, #0x6\n"
+ "madd %x[input_ptr], x19, x20, %x[input_ptr]\n"
"b 1b\n"
"68:" // Exit
: [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
: [args_ptr] "r" (&ka), [flags] "r" (flags), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "x9", "x10", "x11", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "x9", "x10", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8qa_dot_4x4VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8qa_dot_4x4VL/generic.cpp
index 79bd563a4b..be6d5b901d 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8qa_dot_4x4VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8qa_dot_4x4VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef ARM_COMPUTE_ENABLE_SVE
@@ -85,165 +85,165 @@ void sve_hybrid_u8qa_dot_4x4VL (
"cmp %x[M], #0x2\n"
"bgt 29f\n"
"beq 15f\n"
- "mov x10, %x[col_bias]\n"
"mov z11.s, #0x0\n"
- "mov z15.b, #0x1\n"
- "bic %x[flags], %x[flags], #0x80000000\n"
"ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov z15.b, #0x1\n"
"ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x27, %x[output_ptr]\n"
+ "mov x27, %x[col_bias]\n"
+ "bic %x[flags], %x[flags], #0x80000000\n"
+ "mov x26, %x[output_ptr]\n"
"2:" // Height 1: Column loop
- "mov x20, #0x0\n"
- "whilelt p1.b, x20, x9\n"
"mov z16.s, #0x0\n"
+ "mov x19, #0x0\n"
"mov z17.s, #0x0\n"
+ "whilelt p1.b, x19, x9\n"
"mov z18.s, #0x0\n"
"mov z19.s, #0x0\n"
"3:" // Height 1: setup done
- "mov x26, #0x0\n"
+ "mov x25, #0x0\n"
"4:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w24, [x20, x25, LSL #0x2]\n"
"tbz %x[flags], #3, 5f\n"
- "ldr x21, [%x[input_ptr], x26, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x24, [x21, #0x0]\n"
- "cbnz x26, 6f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x24, x24, x20\n"
+ "ldr x20, [%x[input_ptr], x25, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x23, [x20, #0x0]\n"
+ "cbnz x25, 6f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x23, x23, x19\n"
"b 6f\n"
"5:" // Height 1: setup direct input
- "mov x24, %x[input_ptr]\n"
+ "mov x23, %x[input_ptr]\n"
"6:" // Height 1: input setup done
- "cmp x25, #0x10\n"
+ "cmp x24, #0x10\n"
"ble 9f\n"
"7:" // Height 1: Multiply loop: Main loop head
- "whilelt p0.b, XZR, x25\n"
- "ld1rqb { z0.b }, p0/Z, [x24]\n"
"ld1b { z4.b }, p2/Z, [x28]\n"
- "udot z16.s, z4.b, z0.b[0]\n"
+ "whilelt p0.b, XZR, x24\n"
"ld1b { z5.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "ld1rqb { z0.b }, p0/Z, [x23]\n"
+ "udot z16.s, z4.b, z0.b[0]\n"
"ld1b { z6.b }, p2/Z, [x28, #2, MUL VL]\n"
+ "add x23, x23, #0x10\n"
"udot z17.s, z5.b, z0.b[0]\n"
- "udot z18.s, z6.b, z0.b[0]\n"
"ld1b { z7.b }, p2/Z, [x28, #3, MUL VL]\n"
- "udot z19.s, z7.b, z0.b[0]\n"
"ld1b { z8.b }, p2/Z, [x28, #4, MUL VL]\n"
- "udot z16.s, z8.b, z0.b[1]\n"
+ "udot z18.s, z6.b, z0.b[0]\n"
"ld1b { z9.b }, p2/Z, [x28, #5, MUL VL]\n"
"ld1b { z10.b }, p2/Z, [x28, #6, MUL VL]\n"
- "udot z17.s, z9.b, z0.b[1]\n"
- "udot z18.s, z10.b, z0.b[1]\n"
+ "udot z19.s, z7.b, z0.b[0]\n"
+ "udot z16.s, z8.b, z0.b[1]\n"
"ld1b { z4.b }, p2/Z, [x28, #7, MUL VL]\n"
"addvl x28, x28, #16\n"
- "udot z19.s, z4.b, z0.b[1]\n"
+ "udot z17.s, z9.b, z0.b[1]\n"
"ld1b { z5.b }, p2/Z, [x28, #-8, MUL VL]\n"
"ld1b { z6.b }, p2/Z, [x28, #-7, MUL VL]\n"
+ "udot z18.s, z10.b, z0.b[1]\n"
"ld1b { z7.b }, p2/Z, [x28, #-6, MUL VL]\n"
- "udot z16.s, z5.b, z0.b[2]\n"
- "udot z17.s, z6.b, z0.b[2]\n"
+ "udot z19.s, z4.b, z0.b[1]\n"
"ld1b { z8.b }, p2/Z, [x28, #-5, MUL VL]\n"
- "udot z18.s, z7.b, z0.b[2]\n"
- "udot z19.s, z8.b, z0.b[2]\n"
+ "udot z16.s, z5.b, z0.b[2]\n"
"ld1b { z9.b }, p2/Z, [x28, #-4, MUL VL]\n"
+ "udot z17.s, z6.b, z0.b[2]\n"
"ld1b { z10.b }, p2/Z, [x28, #-3, MUL VL]\n"
+ "udot z18.s, z7.b, z0.b[2]\n"
"ld1b { z4.b }, p2/Z, [x28, #-2, MUL VL]\n"
+ "udot z19.s, z8.b, z0.b[2]\n"
+ "ld1b { z5.b }, p2/Z, [x28, #-1, MUL VL]\n"
"udot z16.s, z9.b, z0.b[3]\n"
"udot z17.s, z10.b, z0.b[3]\n"
- "ld1b { z5.b }, p2/Z, [x28, #-1, MUL VL]\n"
"udot z18.s, z4.b, z0.b[3]\n"
"udot z19.s, z5.b, z0.b[3]\n"
- "add x24, x24, #0x10\n"
"tbnz %x[flags], #31, 8f\n"
"udot z11.s, z0.b, z15.b\n"
"8:" // Height 1: Multiply loop: unique 1: skip row sum
- "sub x25, x25, #0x10\n"
- "cmp x25, #0x10\n"
+ "sub x24, x24, #0x10\n"
+ "cmp x24, #0x10\n"
"bgt 7b\n"
"9:" // Height 1: Multiply loop: Single iteration only
- "whilelt p0.b, XZR, x25\n"
- "ld1rqb { z0.b }, p0/Z, [x24]\n"
"ld1b { z4.b }, p2/Z, [x28]\n"
- "subs x25, x25, #0x4\n"
+ "whilelt p0.b, XZR, x24\n"
"ld1b { z5.b }, p2/Z, [x28, #1, MUL VL]\n"
- "ld1b { z6.b }, p2/Z, [x28, #2, MUL VL]\n"
+ "subs x24, x24, #0x4\n"
+ "ld1rqb { z0.b }, p0/Z, [x23]\n"
"udot z16.s, z4.b, z0.b[0]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #2, MUL VL]\n"
"udot z17.s, z5.b, z0.b[0]\n"
"ld1b { z7.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
"udot z18.s, z6.b, z0.b[0]\n"
"udot z19.s, z7.b, z0.b[0]\n"
- "addvl x28, x28, #4\n"
"ble 10f\n"
"ld1b { z8.b }, p2/Z, [x28]\n"
- "ld1b { z9.b }, p2/Z, [x28, #1, MUL VL]\n"
- "subs x25, x25, #0x4\n"
"udot z16.s, z8.b, z0.b[1]\n"
+ "ld1b { z9.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "subs x24, x24, #0x4\n"
+ "udot z17.s, z9.b, z0.b[1]\n"
"ld1b { z10.b }, p2/Z, [x28, #2, MUL VL]\n"
"ld1b { z4.b }, p2/Z, [x28, #3, MUL VL]\n"
- "udot z17.s, z9.b, z0.b[1]\n"
"udot z18.s, z10.b, z0.b[1]\n"
- "udot z19.s, z4.b, z0.b[1]\n"
"addvl x28, x28, #4\n"
+ "udot z19.s, z4.b, z0.b[1]\n"
"ble 10f\n"
"ld1b { z5.b }, p2/Z, [x28]\n"
- "ld1b { z6.b }, p2/Z, [x28, #1, MUL VL]\n"
- "subs x25, x25, #0x4\n"
"udot z16.s, z5.b, z0.b[2]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "subs x24, x24, #0x4\n"
+ "udot z17.s, z6.b, z0.b[2]\n"
"ld1b { z7.b }, p2/Z, [x28, #2, MUL VL]\n"
"ld1b { z8.b }, p2/Z, [x28, #3, MUL VL]\n"
- "udot z17.s, z6.b, z0.b[2]\n"
"udot z18.s, z7.b, z0.b[2]\n"
- "udot z19.s, z8.b, z0.b[2]\n"
"addvl x28, x28, #4\n"
+ "udot z19.s, z8.b, z0.b[2]\n"
"ble 10f\n"
"ld1b { z9.b }, p2/Z, [x28]\n"
- "ld1b { z10.b }, p2/Z, [x28, #1, MUL VL]\n"
"udot z16.s, z9.b, z0.b[3]\n"
- "udot z17.s, z10.b, z0.b[3]\n"
+ "ld1b { z10.b }, p2/Z, [x28, #1, MUL VL]\n"
"ld1b { z4.b }, p2/Z, [x28, #2, MUL VL]\n"
+ "udot z17.s, z10.b, z0.b[3]\n"
"ld1b { z5.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
"udot z18.s, z4.b, z0.b[3]\n"
"udot z19.s, z5.b, z0.b[3]\n"
- "addvl x28, x28, #4\n"
"10:" // Height 1: Multiply loop: multiply skip
"tbnz %x[flags], #31, 11f\n"
"udot z11.s, z0.b, z15.b\n"
"11:" // Height 1: Multiply loop: unique 2: skip row sum
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x26, x26, #0x1\n"
- "cmp x26, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x25, x25, #0x1\n"
+ "cmp x25, x19\n"
"bne 4b\n"
"tbnz %x[flags], #31, 12f\n"
- "mov x20, #0x4\n"
- "whilelt p0.s, XZR, x20\n"
- "add x20, %x[qp], %[b_offset]\n"
- "ld1rw { z1.s }, p2/Z, [x20]\n"
+ "add x19, %x[qp], %[b_offset]\n"
+ "ld1rw { z1.s }, p2/Z, [x19]\n"
+ "neg z1.s, p2/M, z1.s\n"
+ "mov x19, #0x4\n"
+ "whilelt p0.s, XZR, x19\n"
"uaddv d11, p0, z11.s\n"
"mov z11.s, z11.s[0]\n"
- "neg z1.s, p2/M, z1.s\n"
"mul z11.s, p2/M, z11.s, z1.s\n"
"12:" // Height 1: skip row sum fixup
"add z16.s, z16.s, z11.s\n"
+ "ld1w { z0.s }, p2/Z, [x27]\n"
+ "orr %x[flags], %x[flags], #0x80000000\n"
"add z17.s, z17.s, z11.s\n"
- "ld1w { z0.s }, p2/Z, [x10]\n"
- "ld1w { z1.s }, p2/Z, [x10, #1, MUL VL]\n"
+ "ld1w { z1.s }, p2/Z, [x27, #1, MUL VL]\n"
+ "add x23, %x[qp], %[per_layer_right_shift]\n"
"add z18.s, z18.s, z11.s\n"
+ "ld1w { z2.s }, p2/Z, [x27, #2, MUL VL]\n"
+ "add x19, %x[qp], %[per_layer_mul]\n"
"add z19.s, z19.s, z11.s\n"
- "ld1w { z2.s }, p2/Z, [x10, #2, MUL VL]\n"
- "ld1w { z3.s }, p2/Z, [x10, #3, MUL VL]\n"
- "add x20, %x[qp], %[per_layer_mul]\n"
- "orr %x[flags], %x[flags], #0x80000000\n"
+ "ld1w { z3.s }, p2/Z, [x27, #3, MUL VL]\n"
+ "addvl x27, x27, #4\n"
"add z16.s, z16.s, z0.s\n"
+ "ld1rw { z0.s }, p2/Z, [x23]\n"
"add z17.s, z17.s, z1.s\n"
+ "ld1rw { z4.s }, p2/Z, [x19]\n"
"add z18.s, z18.s, z2.s\n"
"add z19.s, z19.s, z3.s\n"
- "ld1rw { z4.s }, p2/Z, [x20]\n"
- "add x20, %x[qp], %[per_layer_right_shift]\n"
- "ld1rw { z0.s }, p2/Z, [x20]\n"
".inst 0x04a47610 // sqrdmulh z16.s, z16.s, z4.s\n"
".inst 0x04a47631 // sqrdmulh z17.s, z17.s, z4.s\n"
- "addvl x10, x10, #4\n"
".inst 0x04a47652 // sqrdmulh z18.s, z18.s, z4.s\n"
".inst 0x04a47673 // sqrdmulh z19.s, z19.s, z4.s\n"
"tbz %x[flags], #5, 13f\n"
@@ -254,26 +254,26 @@ void sve_hybrid_u8qa_dot_4x4VL (
"asr z4.s, z4.s, #0x1f\n"
"asr z5.s, z5.s, #0x1f\n"
"asr z6.s, z6.s, #0x1f\n"
- "asr z7.s, z7.s, #0x1f\n"
"sqadd z16.s, z16.s, z4.s\n"
"sqadd z17.s, z17.s, z5.s\n"
"sqadd z18.s, z18.s, z6.s\n"
+ "asr z7.s, z7.s, #0x1f\n"
"sqadd z19.s, z19.s, z7.s\n"
"13:" // Height 1: no shift correction
- "add x20, %x[qp], %[c_offset]\n"
- "ld1rw { z4.s }, p2/Z, [x20]\n"
".inst 0x44828810 // srshl z16.s, p2/M, z16.s, z0.s\n"
- "add z16.s, z16.s, z4.s\n"
+ "add x19, %x[qp], %[c_offset]\n"
+ "ld1rw { z4.s }, p2/Z, [x19]\n"
".inst 0x44828811 // srshl z17.s, p2/M, z17.s, z0.s\n"
+ "add x19, %x[qp], %[minval]\n"
".inst 0x44828812 // srshl z18.s, p2/M, z18.s, z0.s\n"
+ "ld1rw { z5.s }, p2/Z, [x19]\n"
+ "add x19, %x[qp], %[maxval]\n"
+ ".inst 0x44828813 // srshl z19.s, p2/M, z19.s, z0.s\n"
+ "ld1rw { z6.s }, p2/Z, [x19]\n"
+ "add z16.s, z16.s, z4.s\n"
"add z17.s, z17.s, z4.s\n"
"add z18.s, z18.s, z4.s\n"
- ".inst 0x44828813 // srshl z19.s, p2/M, z19.s, z0.s\n"
- "add x20, %x[qp], %[maxval]\n"
- "ld1rw { z6.s }, p2/Z, [x20]\n"
"add z19.s, z19.s, z4.s\n"
- "add x20, %x[qp], %[minval]\n"
- "ld1rw { z5.s }, p2/Z, [x20]\n"
"smin z16.s, p2/M, z16.s, z6.s\n"
"smin z17.s, p2/M, z17.s, z6.s\n"
"smin z18.s, p2/M, z18.s, z6.s\n"
@@ -281,31 +281,31 @@ void sve_hybrid_u8qa_dot_4x4VL (
"smax z16.s, p2/M, z16.s, z5.s\n"
"smax z17.s, p2/M, z17.s, z5.s\n"
"smax z18.s, p2/M, z18.s, z5.s\n"
- "uzp1 z16.h, z16.h, z17.h\n"
"smax z19.s, p2/M, z19.s, z5.s\n"
+ "uzp1 z16.h, z16.h, z17.h\n"
"uzp1 z17.h, z18.h, z19.h\n"
"uzp1 z16.b, z16.b, z17.b\n"
- "st1b { z16.b }, p1, [x27]\n"
- "addvl x27, x27, #1\n"
+ "st1b { z16.b }, p1, [x26]\n"
+ "addvl x26, x26, #1\n"
"14:" // Height 1: Writeback done
"decw x9, ALL, MUL #4\n"
"cmp x9, XZR\n"
"bgt 2b\n"
"b 58f\n"
"15:" // Height 2
- "mov x10, %x[col_bias]\n"
"mov z11.s, #0x0\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x27, %x[col_bias]\n"
"mov z12.s, #0x0\n"
+ "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"bic %x[flags], %x[flags], #0x80000000\n"
"mov z15.b, #0x1\n"
- "ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x27, %x[output_ptr]\n"
+ "mov x26, %x[output_ptr]\n"
"16:" // Height 2: Column loop
- "mov x20, #0x0\n"
- "whilelt p1.b, x20, x9\n"
"mov z16.s, #0x0\n"
+ "mov x19, #0x0\n"
"mov z17.s, #0x0\n"
+ "whilelt p1.b, x19, x9\n"
"mov z18.s, #0x0\n"
"mov z19.s, #0x0\n"
"mov z20.s, #0x0\n"
@@ -313,68 +313,68 @@ void sve_hybrid_u8qa_dot_4x4VL (
"mov z22.s, #0x0\n"
"mov z23.s, #0x0\n"
"17:" // Height 2: setup done
- "mov x26, #0x0\n"
+ "mov x25, #0x0\n"
"18:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w24, [x20, x25, LSL #0x2]\n"
"tbz %x[flags], #3, 19f\n"
- "ldr x21, [%x[input_ptr], x26, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x24, [x21, #0x0]\n"
- "ldr x23, [x21, #0x8]\n"
- "cbnz x26, 20f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x24, x24, x20\n"
- "add x23, x23, x20\n"
+ "ldr x20, [%x[input_ptr], x25, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x23, [x20, #0x0]\n"
+ "ldr x22, [x20, #0x8]\n"
+ "cbnz x25, 20f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x23, x23, x19\n"
+ "add x22, x22, x19\n"
"b 20f\n"
"19:" // Height 2: setup direct input
- "mov x24, %x[input_ptr]\n"
- "add x23, x24, x20\n"
+ "mov x23, %x[input_ptr]\n"
+ "add x22, x23, x19\n"
"20:" // Height 2: input setup done
- "cmp x25, #0x10\n"
+ "cmp x24, #0x10\n"
"ble 23f\n"
"21:" // Height 2: Multiply loop: Main loop head
- "whilelt p0.b, XZR, x25\n"
- "ld1rqb { z0.b }, p0/Z, [x24]\n"
- "ld1rqb { z1.b }, p0/Z, [x23]\n"
- "add x24, x24, #0x10\n"
"ld1b { z4.b }, p2/Z, [x28]\n"
+ "whilelt p0.b, XZR, x24\n"
"ld1b { z5.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "ld1rqb { z0.b }, p0/Z, [x23]\n"
"udot z16.s, z4.b, z0.b[0]\n"
- "udot z20.s, z4.b, z1.b[0]\n"
+ "ld1rqb { z1.b }, p0/Z, [x22]\n"
+ "add x23, x23, #0x10\n"
+ "udot z17.s, z5.b, z0.b[0]\n"
"ld1b { z6.b }, p2/Z, [x28, #2, MUL VL]\n"
+ "add x22, x22, #0x10\n"
+ "udot z20.s, z4.b, z1.b[0]\n"
"ld1b { z7.b }, p2/Z, [x28, #3, MUL VL]\n"
- "udot z17.s, z5.b, z0.b[0]\n"
"udot z21.s, z5.b, z1.b[0]\n"
- "udot z18.s, z6.b, z0.b[0]\n"
- "udot z22.s, z6.b, z1.b[0]\n"
"ld1b { z8.b }, p2/Z, [x28, #4, MUL VL]\n"
+ "udot z18.s, z6.b, z0.b[0]\n"
"ld1b { z9.b }, p2/Z, [x28, #5, MUL VL]\n"
- "udot z19.s, z7.b, z0.b[0]\n"
- "udot z23.s, z7.b, z1.b[0]\n"
+ "udot z22.s, z6.b, z1.b[0]\n"
"ld1b { z10.b }, p2/Z, [x28, #6, MUL VL]\n"
+ "udot z19.s, z7.b, z0.b[0]\n"
"ld1b { z4.b }, p2/Z, [x28, #7, MUL VL]\n"
"addvl x28, x28, #16\n"
+ "udot z23.s, z7.b, z1.b[0]\n"
+ "ld1b { z5.b }, p2/Z, [x28, #-8, MUL VL]\n"
"udot z16.s, z8.b, z0.b[1]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #-7, MUL VL]\n"
"udot z20.s, z8.b, z1.b[1]\n"
- "ld1b { z5.b }, p2/Z, [x28, #-8, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #-6, MUL VL]\n"
"udot z17.s, z9.b, z0.b[1]\n"
+ "ld1b { z8.b }, p2/Z, [x28, #-5, MUL VL]\n"
"udot z21.s, z9.b, z1.b[1]\n"
- "ld1b { z6.b }, p2/Z, [x28, #-7, MUL VL]\n"
- "ld1b { z7.b }, p2/Z, [x28, #-6, MUL VL]\n"
+ "ld1b { z9.b }, p2/Z, [x28, #-4, MUL VL]\n"
"udot z18.s, z10.b, z0.b[1]\n"
"udot z22.s, z10.b, z1.b[1]\n"
- "ld1b { z8.b }, p2/Z, [x28, #-5, MUL VL]\n"
- "ld1b { z9.b }, p2/Z, [x28, #-4, MUL VL]\n"
+ "ld1b { z10.b }, p2/Z, [x28, #-3, MUL VL]\n"
"udot z19.s, z4.b, z0.b[1]\n"
"udot z23.s, z4.b, z1.b[1]\n"
- "ld1b { z10.b }, p2/Z, [x28, #-3, MUL VL]\n"
"ld1b { z4.b }, p2/Z, [x28, #-2, MUL VL]\n"
"udot z16.s, z5.b, z0.b[2]\n"
"udot z20.s, z5.b, z1.b[2]\n"
"ld1b { z5.b }, p2/Z, [x28, #-1, MUL VL]\n"
- "add x23, x23, #0x10\n"
"udot z17.s, z6.b, z0.b[2]\n"
"udot z21.s, z6.b, z1.b[2]\n"
"udot z18.s, z7.b, z0.b[2]\n"
@@ -393,69 +393,69 @@ void sve_hybrid_u8qa_dot_4x4VL (
"udot z11.s, z0.b, z15.b\n"
"udot z12.s, z1.b, z15.b\n"
"22:" // Height 2: Multiply loop: unique 3: skip row sum
- "sub x25, x25, #0x10\n"
- "cmp x25, #0x10\n"
+ "sub x24, x24, #0x10\n"
+ "cmp x24, #0x10\n"
"bgt 21b\n"
"23:" // Height 2: Multiply loop: Single iteration only
- "whilelt p0.b, XZR, x25\n"
- "ld1rqb { z0.b }, p0/Z, [x24]\n"
- "ld1rqb { z1.b }, p0/Z, [x23]\n"
- "subs x25, x25, #0x4\n"
"ld1b { z4.b }, p2/Z, [x28]\n"
+ "whilelt p0.b, XZR, x24\n"
"ld1b { z5.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "subs x24, x24, #0x4\n"
+ "ld1rqb { z0.b }, p0/Z, [x23]\n"
"udot z16.s, z4.b, z0.b[0]\n"
- "udot z20.s, z4.b, z1.b[0]\n"
+ "ld1rqb { z1.b }, p0/Z, [x22]\n"
+ "udot z17.s, z5.b, z0.b[0]\n"
"ld1b { z6.b }, p2/Z, [x28, #2, MUL VL]\n"
"ld1b { z7.b }, p2/Z, [x28, #3, MUL VL]\n"
- "udot z17.s, z5.b, z0.b[0]\n"
+ "udot z20.s, z4.b, z1.b[0]\n"
+ "addvl x28, x28, #4\n"
"udot z21.s, z5.b, z1.b[0]\n"
"udot z18.s, z6.b, z0.b[0]\n"
"udot z22.s, z6.b, z1.b[0]\n"
- "addvl x28, x28, #4\n"
"udot z19.s, z7.b, z0.b[0]\n"
"udot z23.s, z7.b, z1.b[0]\n"
"ble 24f\n"
"ld1b { z8.b }, p2/Z, [x28]\n"
- "ld1b { z9.b }, p2/Z, [x28, #1, MUL VL]\n"
- "subs x25, x25, #0x4\n"
"udot z16.s, z8.b, z0.b[1]\n"
+ "ld1b { z9.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "subs x24, x24, #0x4\n"
+ "udot z20.s, z8.b, z1.b[1]\n"
"ld1b { z10.b }, p2/Z, [x28, #2, MUL VL]\n"
"ld1b { z4.b }, p2/Z, [x28, #3, MUL VL]\n"
- "udot z20.s, z8.b, z1.b[1]\n"
"udot z17.s, z9.b, z0.b[1]\n"
+ "addvl x28, x28, #4\n"
"udot z21.s, z9.b, z1.b[1]\n"
"udot z18.s, z10.b, z0.b[1]\n"
- "addvl x28, x28, #4\n"
"udot z22.s, z10.b, z1.b[1]\n"
"udot z19.s, z4.b, z0.b[1]\n"
"udot z23.s, z4.b, z1.b[1]\n"
"ble 24f\n"
"ld1b { z5.b }, p2/Z, [x28]\n"
- "ld1b { z6.b }, p2/Z, [x28, #1, MUL VL]\n"
- "subs x25, x25, #0x4\n"
"udot z16.s, z5.b, z0.b[2]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "subs x24, x24, #0x4\n"
+ "udot z20.s, z5.b, z1.b[2]\n"
"ld1b { z7.b }, p2/Z, [x28, #2, MUL VL]\n"
"ld1b { z8.b }, p2/Z, [x28, #3, MUL VL]\n"
- "udot z20.s, z5.b, z1.b[2]\n"
"udot z17.s, z6.b, z0.b[2]\n"
+ "addvl x28, x28, #4\n"
"udot z21.s, z6.b, z1.b[2]\n"
"udot z18.s, z7.b, z0.b[2]\n"
- "addvl x28, x28, #4\n"
"udot z22.s, z7.b, z1.b[2]\n"
"udot z19.s, z8.b, z0.b[2]\n"
"udot z23.s, z8.b, z1.b[2]\n"
"ble 24f\n"
"ld1b { z9.b }, p2/Z, [x28]\n"
- "ld1b { z10.b }, p2/Z, [x28, #1, MUL VL]\n"
"udot z16.s, z9.b, z0.b[3]\n"
+ "ld1b { z10.b }, p2/Z, [x28, #1, MUL VL]\n"
"udot z20.s, z9.b, z1.b[3]\n"
"ld1b { z4.b }, p2/Z, [x28, #2, MUL VL]\n"
"ld1b { z5.b }, p2/Z, [x28, #3, MUL VL]\n"
"udot z17.s, z10.b, z0.b[3]\n"
+ "addvl x28, x28, #4\n"
"udot z21.s, z10.b, z1.b[3]\n"
"udot z18.s, z4.b, z0.b[3]\n"
"udot z22.s, z4.b, z1.b[3]\n"
- "addvl x28, x28, #4\n"
"udot z19.s, z5.b, z0.b[3]\n"
"udot z23.s, z5.b, z1.b[3]\n"
"24:" // Height 2: Multiply loop: multiply skip
@@ -463,49 +463,49 @@ void sve_hybrid_u8qa_dot_4x4VL (
"udot z11.s, z0.b, z15.b\n"
"udot z12.s, z1.b, z15.b\n"
"25:" // Height 2: Multiply loop: unique 4: skip row sum
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x26, x26, #0x1\n"
- "cmp x26, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x25, x25, #0x1\n"
+ "cmp x25, x19\n"
"bne 18b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x27, x20\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x22, x26, x19\n"
"tbnz %x[flags], #31, 26f\n"
- "mov x20, #0x4\n"
- "whilelt p0.s, XZR, x20\n"
- "add x20, %x[qp], %[b_offset]\n"
- "ld1rw { z2.s }, p2/Z, [x20]\n"
+ "add x19, %x[qp], %[b_offset]\n"
+ "ld1rw { z2.s }, p2/Z, [x19]\n"
+ "neg z2.s, p2/M, z2.s\n"
+ "mov x19, #0x4\n"
+ "whilelt p0.s, XZR, x19\n"
"uaddv d11, p0, z11.s\n"
- "mov z11.s, z11.s[0]\n"
"uaddv d12, p0, z12.s\n"
- "neg z2.s, p2/M, z2.s\n"
+ "mov z11.s, z11.s[0]\n"
"mov z12.s, z12.s[0]\n"
"mul z11.s, p2/M, z11.s, z2.s\n"
"mul z12.s, p2/M, z12.s, z2.s\n"
"26:" // Height 2: skip row sum fixup
"add z16.s, z16.s, z11.s\n"
+ "ld1w { z0.s }, p2/Z, [x27]\n"
+ "orr %x[flags], %x[flags], #0x80000000\n"
"add z17.s, z17.s, z11.s\n"
- "ld1w { z0.s }, p2/Z, [x10]\n"
- "ld1w { z1.s }, p2/Z, [x10, #1, MUL VL]\n"
+ "ld1w { z1.s }, p2/Z, [x27, #1, MUL VL]\n"
+ "add x23, %x[qp], %[per_layer_right_shift]\n"
"add z18.s, z18.s, z11.s\n"
+ "ld1w { z2.s }, p2/Z, [x27, #2, MUL VL]\n"
+ "add x19, %x[qp], %[per_layer_mul]\n"
"add z19.s, z19.s, z11.s\n"
- "ld1w { z2.s }, p2/Z, [x10, #2, MUL VL]\n"
- "ld1w { z3.s }, p2/Z, [x10, #3, MUL VL]\n"
+ "ld1w { z3.s }, p2/Z, [x27, #3, MUL VL]\n"
+ "addvl x27, x27, #4\n"
"add z20.s, z20.s, z12.s\n"
+ "ld1rw { z4.s }, p2/Z, [x19]\n"
"add z21.s, z21.s, z12.s\n"
- "add x20, %x[qp], %[per_layer_mul]\n"
- "orr %x[flags], %x[flags], #0x80000000\n"
"add z22.s, z22.s, z12.s\n"
"add z23.s, z23.s, z12.s\n"
- "ld1rw { z4.s }, p2/Z, [x20]\n"
- "add x20, %x[qp], %[per_layer_right_shift]\n"
"add z16.s, z16.s, z0.s\n"
"add z17.s, z17.s, z1.s\n"
- "addvl x10, x10, #4\n"
"add z18.s, z18.s, z2.s\n"
"add z19.s, z19.s, z3.s\n"
"add z20.s, z20.s, z0.s\n"
+ "ld1rw { z0.s }, p2/Z, [x23]\n"
"add z21.s, z21.s, z1.s\n"
- "ld1rw { z0.s }, p2/Z, [x20]\n"
"add z22.s, z22.s, z2.s\n"
"add z23.s, z23.s, z3.s\n"
".inst 0x04a47610 // sqrdmulh z16.s, z16.s, z4.s\n"
@@ -518,97 +518,97 @@ void sve_hybrid_u8qa_dot_4x4VL (
".inst 0x04a476f7 // sqrdmulh z23.s, z23.s, z4.s\n"
"tbz %x[flags], #5, 27f\n"
"and z4.d, z16.d, z0.d\n"
- "asr z4.s, z4.s, #0x1f\n"
- "sqadd z16.s, z16.s, z4.s\n"
"and z5.d, z17.d, z0.d\n"
"and z6.d, z18.d, z0.d\n"
+ "asr z4.s, z4.s, #0x1f\n"
+ "asr z5.s, z5.s, #0x1f\n"
+ "asr z6.s, z6.s, #0x1f\n"
+ "sqadd z16.s, z16.s, z4.s\n"
+ "sqadd z17.s, z17.s, z5.s\n"
+ "sqadd z18.s, z18.s, z6.s\n"
"and z7.d, z19.d, z0.d\n"
"and z8.d, z20.d, z0.d\n"
"and z9.d, z21.d, z0.d\n"
- "and z10.d, z22.d, z0.d\n"
- "and z4.d, z23.d, z0.d\n"
- "asr z5.s, z5.s, #0x1f\n"
- "asr z6.s, z6.s, #0x1f\n"
"asr z7.s, z7.s, #0x1f\n"
"asr z8.s, z8.s, #0x1f\n"
"asr z9.s, z9.s, #0x1f\n"
- "asr z10.s, z10.s, #0x1f\n"
- "asr z4.s, z4.s, #0x1f\n"
- "sqadd z17.s, z17.s, z5.s\n"
- "sqadd z18.s, z18.s, z6.s\n"
"sqadd z19.s, z19.s, z7.s\n"
"sqadd z20.s, z20.s, z8.s\n"
"sqadd z21.s, z21.s, z9.s\n"
+ "and z10.d, z22.d, z0.d\n"
+ "and z4.d, z23.d, z0.d\n"
+ "asr z10.s, z10.s, #0x1f\n"
+ "asr z4.s, z4.s, #0x1f\n"
"sqadd z22.s, z22.s, z10.s\n"
"sqadd z23.s, z23.s, z4.s\n"
"27:" // Height 2: no shift correction
- "add x20, %x[qp], %[c_offset]\n"
- "ld1rw { z4.s }, p2/Z, [x20]\n"
".inst 0x44828810 // srshl z16.s, p2/M, z16.s, z0.s\n"
- "add z16.s, z16.s, z4.s\n"
+ "add x19, %x[qp], %[c_offset]\n"
+ "ld1rw { z4.s }, p2/Z, [x19]\n"
".inst 0x44828811 // srshl z17.s, p2/M, z17.s, z0.s\n"
+ "add x19, %x[qp], %[minval]\n"
".inst 0x44828812 // srshl z18.s, p2/M, z18.s, z0.s\n"
- "add z17.s, z17.s, z4.s\n"
- "add z18.s, z18.s, z4.s\n"
+ "ld1rw { z5.s }, p2/Z, [x19]\n"
+ "add x19, %x[qp], %[maxval]\n"
".inst 0x44828813 // srshl z19.s, p2/M, z19.s, z0.s\n"
+ "ld1rw { z6.s }, p2/Z, [x19]\n"
".inst 0x44828814 // srshl z20.s, p2/M, z20.s, z0.s\n"
+ "add z16.s, z16.s, z4.s\n"
+ "add z17.s, z17.s, z4.s\n"
+ "add z18.s, z18.s, z4.s\n"
"add z19.s, z19.s, z4.s\n"
"add z20.s, z20.s, z4.s\n"
- ".inst 0x44828815 // srshl z21.s, p2/M, z21.s, z0.s\n"
- ".inst 0x44828816 // srshl z22.s, p2/M, z22.s, z0.s\n"
- "add z21.s, z21.s, z4.s\n"
- "add z22.s, z22.s, z4.s\n"
- ".inst 0x44828817 // srshl z23.s, p2/M, z23.s, z0.s\n"
- "add x20, %x[qp], %[maxval]\n"
- "ld1rw { z6.s }, p2/Z, [x20]\n"
- "add z23.s, z23.s, z4.s\n"
- "add x20, %x[qp], %[minval]\n"
- "ld1rw { z5.s }, p2/Z, [x20]\n"
"smin z16.s, p2/M, z16.s, z6.s\n"
"smin z17.s, p2/M, z17.s, z6.s\n"
"smin z18.s, p2/M, z18.s, z6.s\n"
"smin z19.s, p2/M, z19.s, z6.s\n"
- "smin z20.s, p2/M, z20.s, z6.s\n"
- "smin z21.s, p2/M, z21.s, z6.s\n"
- "smin z22.s, p2/M, z22.s, z6.s\n"
- "smin z23.s, p2/M, z23.s, z6.s\n"
"smax z16.s, p2/M, z16.s, z5.s\n"
"smax z17.s, p2/M, z17.s, z5.s\n"
"smax z18.s, p2/M, z18.s, z5.s\n"
- "uzp1 z16.h, z16.h, z17.h\n"
"smax z19.s, p2/M, z19.s, z5.s\n"
- "smax z20.s, p2/M, z20.s, z5.s\n"
+ "smin z20.s, p2/M, z20.s, z6.s\n"
+ "uzp1 z16.h, z16.h, z17.h\n"
+ ".inst 0x44828815 // srshl z21.s, p2/M, z21.s, z0.s\n"
"uzp1 z17.h, z18.h, z19.h\n"
+ "smax z20.s, p2/M, z20.s, z5.s\n"
"uzp1 z16.b, z16.b, z17.b\n"
+ "st1b { z16.b }, p1, [x26]\n"
+ "add z21.s, z21.s, z4.s\n"
+ "addvl x26, x26, #1\n"
+ ".inst 0x44828816 // srshl z22.s, p2/M, z22.s, z0.s\n"
+ ".inst 0x44828817 // srshl z23.s, p2/M, z23.s, z0.s\n"
+ "smin z21.s, p2/M, z21.s, z6.s\n"
+ "add z22.s, z22.s, z4.s\n"
+ "add z23.s, z23.s, z4.s\n"
"smax z21.s, p2/M, z21.s, z5.s\n"
- "smax z22.s, p2/M, z22.s, z5.s\n"
+ "smin z22.s, p2/M, z22.s, z6.s\n"
+ "smin z23.s, p2/M, z23.s, z6.s\n"
"uzp1 z20.h, z20.h, z21.h\n"
- "st1b { z16.b }, p1, [x27]\n"
+ "smax z22.s, p2/M, z22.s, z5.s\n"
"smax z23.s, p2/M, z23.s, z5.s\n"
"uzp1 z21.h, z22.h, z23.h\n"
"uzp1 z20.b, z20.b, z21.b\n"
- "st1b { z20.b }, p1, [x23]\n"
- "addvl x27, x27, #1\n"
+ "st1b { z20.b }, p1, [x22]\n"
"28:" // Height 2: Writeback done
"decw x9, ALL, MUL #4\n"
"cmp x9, XZR\n"
"bgt 16b\n"
"b 58f\n"
"29:" // Height 3
- "mov x10, %x[col_bias]\n"
"mov z11.s, #0x0\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x27, %x[col_bias]\n"
"mov z12.s, #0x0\n"
+ "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"bic %x[flags], %x[flags], #0x80000000\n"
"mov z13.s, #0x0\n"
+ "mov x26, %x[output_ptr]\n"
"mov z15.b, #0x1\n"
- "ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x27, %x[output_ptr]\n"
"30:" // Height 3: Column loop
- "mov x20, #0x0\n"
- "whilelt p1.b, x20, x9\n"
"mov z16.s, #0x0\n"
+ "mov x19, #0x0\n"
"mov z17.s, #0x0\n"
+ "whilelt p1.b, x19, x9\n"
"mov z18.s, #0x0\n"
"mov z19.s, #0x0\n"
"mov z20.s, #0x0\n"
@@ -620,83 +620,83 @@ void sve_hybrid_u8qa_dot_4x4VL (
"mov z26.s, #0x0\n"
"mov z27.s, #0x0\n"
"31:" // Height 3: setup done
- "mov x26, #0x0\n"
+ "mov x25, #0x0\n"
"32:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w24, [x20, x25, LSL #0x2]\n"
"tbz %x[flags], #3, 33f\n"
- "ldr x21, [%x[input_ptr], x26, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x24, [x21, #0x0]\n"
- "ldr x23, [x21, #0x8]\n"
- "ldr x22, [x21, #0x10]\n"
- "cbnz x26, 34f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x24, x24, x20\n"
- "add x23, x23, x20\n"
- "add x22, x22, x20\n"
+ "ldr x20, [%x[input_ptr], x25, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x23, [x20, #0x0]\n"
+ "ldr x22, [x20, #0x8]\n"
+ "ldr x21, [x20, #0x10]\n"
+ "cbnz x25, 34f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x23, x23, x19\n"
+ "add x22, x22, x19\n"
+ "add x21, x21, x19\n"
"b 34f\n"
"33:" // Height 3: setup direct input
- "mov x24, %x[input_ptr]\n"
- "add x23, x24, x20\n"
- "add x22, x23, x20\n"
+ "mov x23, %x[input_ptr]\n"
+ "add x22, x23, x19\n"
+ "add x21, x22, x19\n"
"34:" // Height 3: input setup done
- "cmp x25, #0x10\n"
+ "cmp x24, #0x10\n"
"ble 37f\n"
"35:" // Height 3: Multiply loop: Main loop head
- "whilelt p0.b, XZR, x25\n"
- "ld1rqb { z0.b }, p0/Z, [x24]\n"
- "ld1rqb { z1.b }, p0/Z, [x23]\n"
- "add x24, x24, #0x10\n"
- "ld1rqb { z2.b }, p0/Z, [x22]\n"
"ld1b { z4.b }, p2/Z, [x28]\n"
+ "whilelt p0.b, XZR, x24\n"
+ "ld1b { z5.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "ld1rqb { z0.b }, p0/Z, [x23]\n"
"udot z16.s, z4.b, z0.b[0]\n"
+ "ld1rqb { z1.b }, p0/Z, [x22]\n"
+ "add x23, x23, #0x10\n"
+ "udot z17.s, z5.b, z0.b[0]\n"
+ "ld1rqb { z2.b }, p0/Z, [x21]\n"
+ "add x22, x22, #0x10\n"
"udot z20.s, z4.b, z1.b[0]\n"
- "ld1b { z5.b }, p2/Z, [x28, #1, MUL VL]\n"
"ld1b { z6.b }, p2/Z, [x28, #2, MUL VL]\n"
+ "add x21, x21, #0x10\n"
"udot z24.s, z4.b, z2.b[0]\n"
- "udot z17.s, z5.b, z0.b[0]\n"
"ld1b { z7.b }, p2/Z, [x28, #3, MUL VL]\n"
"udot z21.s, z5.b, z1.b[0]\n"
- "udot z25.s, z5.b, z2.b[0]\n"
"ld1b { z8.b }, p2/Z, [x28, #4, MUL VL]\n"
- "udot z18.s, z6.b, z0.b[0]\n"
- "udot z22.s, z6.b, z1.b[0]\n"
+ "udot z25.s, z5.b, z2.b[0]\n"
"ld1b { z9.b }, p2/Z, [x28, #5, MUL VL]\n"
+ "udot z18.s, z6.b, z0.b[0]\n"
"ld1b { z10.b }, p2/Z, [x28, #6, MUL VL]\n"
- "udot z26.s, z6.b, z2.b[0]\n"
- "udot z19.s, z7.b, z0.b[0]\n"
+ "udot z22.s, z6.b, z1.b[0]\n"
"ld1b { z4.b }, p2/Z, [x28, #7, MUL VL]\n"
"addvl x28, x28, #16\n"
- "udot z23.s, z7.b, z1.b[0]\n"
- "udot z27.s, z7.b, z2.b[0]\n"
+ "udot z26.s, z6.b, z2.b[0]\n"
"ld1b { z5.b }, p2/Z, [x28, #-8, MUL VL]\n"
+ "udot z19.s, z7.b, z0.b[0]\n"
"ld1b { z6.b }, p2/Z, [x28, #-7, MUL VL]\n"
+ "udot z23.s, z7.b, z1.b[0]\n"
+ "udot z27.s, z7.b, z2.b[0]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #-6, MUL VL]\n"
"udot z16.s, z8.b, z0.b[1]\n"
"udot z20.s, z8.b, z1.b[1]\n"
- "ld1b { z7.b }, p2/Z, [x28, #-6, MUL VL]\n"
- "add x23, x23, #0x10\n"
"udot z24.s, z8.b, z2.b[1]\n"
- "udot z17.s, z9.b, z0.b[1]\n"
"ld1b { z8.b }, p2/Z, [x28, #-5, MUL VL]\n"
- "add x22, x22, #0x10\n"
+ "udot z17.s, z9.b, z0.b[1]\n"
"udot z21.s, z9.b, z1.b[1]\n"
"udot z25.s, z9.b, z2.b[1]\n"
"ld1b { z9.b }, p2/Z, [x28, #-4, MUL VL]\n"
"udot z18.s, z10.b, z0.b[1]\n"
"udot z22.s, z10.b, z1.b[1]\n"
"udot z26.s, z10.b, z2.b[1]\n"
- "udot z19.s, z4.b, z0.b[1]\n"
"ld1b { z10.b }, p2/Z, [x28, #-3, MUL VL]\n"
+ "udot z19.s, z4.b, z0.b[1]\n"
"udot z23.s, z4.b, z1.b[1]\n"
"udot z27.s, z4.b, z2.b[1]\n"
"ld1b { z4.b }, p2/Z, [x28, #-2, MUL VL]\n"
"udot z16.s, z5.b, z0.b[2]\n"
"udot z20.s, z5.b, z1.b[2]\n"
"udot z24.s, z5.b, z2.b[2]\n"
- "udot z17.s, z6.b, z0.b[2]\n"
"ld1b { z5.b }, p2/Z, [x28, #-1, MUL VL]\n"
+ "udot z17.s, z6.b, z0.b[2]\n"
"udot z21.s, z6.b, z1.b[2]\n"
"udot z25.s, z6.b, z2.b[2]\n"
"udot z18.s, z7.b, z0.b[2]\n"
@@ -722,26 +722,26 @@ void sve_hybrid_u8qa_dot_4x4VL (
"udot z12.s, z1.b, z15.b\n"
"udot z13.s, z2.b, z15.b\n"
"36:" // Height 3: Multiply loop: unique 5: skip row sum
- "sub x25, x25, #0x10\n"
- "cmp x25, #0x10\n"
+ "sub x24, x24, #0x10\n"
+ "cmp x24, #0x10\n"
"bgt 35b\n"
"37:" // Height 3: Multiply loop: Single iteration only
- "whilelt p0.b, XZR, x25\n"
- "ld1rqb { z0.b }, p0/Z, [x24]\n"
- "ld1rqb { z1.b }, p0/Z, [x23]\n"
- "subs x25, x25, #0x4\n"
- "ld1rqb { z2.b }, p0/Z, [x22]\n"
"ld1b { z4.b }, p2/Z, [x28]\n"
- "udot z16.s, z4.b, z0.b[0]\n"
- "udot z20.s, z4.b, z1.b[0]\n"
+ "whilelt p0.b, XZR, x24\n"
"ld1b { z5.b }, p2/Z, [x28, #1, MUL VL]\n"
- "ld1b { z6.b }, p2/Z, [x28, #2, MUL VL]\n"
- "udot z24.s, z4.b, z2.b[0]\n"
+ "subs x24, x24, #0x4\n"
+ "ld1rqb { z0.b }, p0/Z, [x23]\n"
+ "udot z16.s, z4.b, z0.b[0]\n"
+ "ld1rqb { z1.b }, p0/Z, [x22]\n"
"udot z17.s, z5.b, z0.b[0]\n"
+ "ld1rqb { z2.b }, p0/Z, [x21]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #2, MUL VL]\n"
+ "udot z20.s, z4.b, z1.b[0]\n"
"ld1b { z7.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "udot z24.s, z4.b, z2.b[0]\n"
"udot z21.s, z5.b, z1.b[0]\n"
"udot z25.s, z5.b, z2.b[0]\n"
- "addvl x28, x28, #4\n"
"udot z18.s, z6.b, z0.b[0]\n"
"udot z22.s, z6.b, z1.b[0]\n"
"udot z26.s, z6.b, z2.b[0]\n"
@@ -750,16 +750,16 @@ void sve_hybrid_u8qa_dot_4x4VL (
"udot z27.s, z7.b, z2.b[0]\n"
"ble 38f\n"
"ld1b { z8.b }, p2/Z, [x28]\n"
- "ld1b { z9.b }, p2/Z, [x28, #1, MUL VL]\n"
- "subs x25, x25, #0x4\n"
"udot z16.s, z8.b, z0.b[1]\n"
- "ld1b { z10.b }, p2/Z, [x28, #2, MUL VL]\n"
- "ld1b { z4.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "ld1b { z9.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "subs x24, x24, #0x4\n"
"udot z20.s, z8.b, z1.b[1]\n"
+ "ld1b { z10.b }, p2/Z, [x28, #2, MUL VL]\n"
"udot z24.s, z8.b, z2.b[1]\n"
+ "ld1b { z4.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
"udot z17.s, z9.b, z0.b[1]\n"
"udot z21.s, z9.b, z1.b[1]\n"
- "addvl x28, x28, #4\n"
"udot z25.s, z9.b, z2.b[1]\n"
"udot z18.s, z10.b, z0.b[1]\n"
"udot z22.s, z10.b, z1.b[1]\n"
@@ -769,16 +769,16 @@ void sve_hybrid_u8qa_dot_4x4VL (
"udot z27.s, z4.b, z2.b[1]\n"
"ble 38f\n"
"ld1b { z5.b }, p2/Z, [x28]\n"
- "ld1b { z6.b }, p2/Z, [x28, #1, MUL VL]\n"
- "subs x25, x25, #0x4\n"
"udot z16.s, z5.b, z0.b[2]\n"
- "ld1b { z7.b }, p2/Z, [x28, #2, MUL VL]\n"
- "ld1b { z8.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "subs x24, x24, #0x4\n"
"udot z20.s, z5.b, z1.b[2]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #2, MUL VL]\n"
"udot z24.s, z5.b, z2.b[2]\n"
+ "ld1b { z8.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
"udot z17.s, z6.b, z0.b[2]\n"
"udot z21.s, z6.b, z1.b[2]\n"
- "addvl x28, x28, #4\n"
"udot z25.s, z6.b, z2.b[2]\n"
"udot z18.s, z7.b, z0.b[2]\n"
"udot z22.s, z7.b, z1.b[2]\n"
@@ -788,16 +788,16 @@ void sve_hybrid_u8qa_dot_4x4VL (
"udot z27.s, z8.b, z2.b[2]\n"
"ble 38f\n"
"ld1b { z9.b }, p2/Z, [x28]\n"
- "ld1b { z10.b }, p2/Z, [x28, #1, MUL VL]\n"
"udot z16.s, z9.b, z0.b[3]\n"
+ "ld1b { z10.b }, p2/Z, [x28, #1, MUL VL]\n"
"udot z20.s, z9.b, z1.b[3]\n"
"ld1b { z4.b }, p2/Z, [x28, #2, MUL VL]\n"
- "ld1b { z5.b }, p2/Z, [x28, #3, MUL VL]\n"
"udot z24.s, z9.b, z2.b[3]\n"
+ "ld1b { z5.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
"udot z17.s, z10.b, z0.b[3]\n"
"udot z21.s, z10.b, z1.b[3]\n"
"udot z25.s, z10.b, z2.b[3]\n"
- "addvl x28, x28, #4\n"
"udot z18.s, z4.b, z0.b[3]\n"
"udot z22.s, z4.b, z1.b[3]\n"
"udot z26.s, z4.b, z2.b[3]\n"
@@ -810,48 +810,48 @@ void sve_hybrid_u8qa_dot_4x4VL (
"udot z12.s, z1.b, z15.b\n"
"udot z13.s, z2.b, z15.b\n"
"39:" // Height 3: Multiply loop: unique 6: skip row sum
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x26, x26, #0x1\n"
- "cmp x26, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x25, x25, #0x1\n"
+ "cmp x25, x19\n"
"bne 32b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x27, x20\n"
- "add x22, x23, x20\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x22, x26, x19\n"
+ "add x21, x22, x19\n"
"tbnz %x[flags], #31, 40f\n"
- "mov x20, #0x4\n"
- "whilelt p0.s, XZR, x20\n"
- "add x20, %x[qp], %[b_offset]\n"
- "ld1rw { z3.s }, p2/Z, [x20]\n"
+ "add x19, %x[qp], %[b_offset]\n"
+ "ld1rw { z3.s }, p2/Z, [x19]\n"
+ "neg z3.s, p2/M, z3.s\n"
+ "mov x19, #0x4\n"
+ "whilelt p0.s, XZR, x19\n"
"uaddv d11, p0, z11.s\n"
- "mov z11.s, z11.s[0]\n"
"uaddv d12, p0, z12.s\n"
"uaddv d13, p0, z13.s\n"
+ "mov z11.s, z11.s[0]\n"
"mov z12.s, z12.s[0]\n"
"mov z13.s, z13.s[0]\n"
- "neg z3.s, p2/M, z3.s\n"
"mul z11.s, p2/M, z11.s, z3.s\n"
"mul z12.s, p2/M, z12.s, z3.s\n"
"mul z13.s, p2/M, z13.s, z3.s\n"
"40:" // Height 3: skip row sum fixup
"add z16.s, z16.s, z11.s\n"
+ "ld1w { z0.s }, p2/Z, [x27]\n"
+ "orr %x[flags], %x[flags], #0x80000000\n"
"add z17.s, z17.s, z11.s\n"
- "ld1w { z0.s }, p2/Z, [x10]\n"
- "ld1w { z1.s }, p2/Z, [x10, #1, MUL VL]\n"
+ "ld1w { z1.s }, p2/Z, [x27, #1, MUL VL]\n"
+ "add x23, %x[qp], %[per_layer_right_shift]\n"
"add z18.s, z18.s, z11.s\n"
+ "ld1w { z2.s }, p2/Z, [x27, #2, MUL VL]\n"
+ "add x19, %x[qp], %[per_layer_mul]\n"
"add z19.s, z19.s, z11.s\n"
- "ld1w { z2.s }, p2/Z, [x10, #2, MUL VL]\n"
- "ld1w { z3.s }, p2/Z, [x10, #3, MUL VL]\n"
+ "ld1w { z3.s }, p2/Z, [x27, #3, MUL VL]\n"
+ "addvl x27, x27, #4\n"
"add z20.s, z20.s, z12.s\n"
+ "ld1rw { z4.s }, p2/Z, [x19]\n"
"add z21.s, z21.s, z12.s\n"
- "add x20, %x[qp], %[per_layer_mul]\n"
- "orr %x[flags], %x[flags], #0x80000000\n"
"add z22.s, z22.s, z12.s\n"
"add z23.s, z23.s, z12.s\n"
- "ld1rw { z4.s }, p2/Z, [x20]\n"
- "add x20, %x[qp], %[per_layer_right_shift]\n"
"add z24.s, z24.s, z13.s\n"
"add z25.s, z25.s, z13.s\n"
- "addvl x10, x10, #4\n"
"add z26.s, z26.s, z13.s\n"
"add z27.s, z27.s, z13.s\n"
"add z16.s, z16.s, z0.s\n"
@@ -863,8 +863,8 @@ void sve_hybrid_u8qa_dot_4x4VL (
"add z22.s, z22.s, z2.s\n"
"add z23.s, z23.s, z3.s\n"
"add z24.s, z24.s, z0.s\n"
+ "ld1rw { z0.s }, p2/Z, [x23]\n"
"add z25.s, z25.s, z1.s\n"
- "ld1rw { z0.s }, p2/Z, [x20]\n"
"add z26.s, z26.s, z2.s\n"
"add z27.s, z27.s, z3.s\n"
".inst 0x04a47610 // sqrdmulh z16.s, z16.s, z4.s\n"
@@ -883,131 +883,131 @@ void sve_hybrid_u8qa_dot_4x4VL (
"and z4.d, z16.d, z0.d\n"
"and z5.d, z17.d, z0.d\n"
"and z6.d, z18.d, z0.d\n"
- "and z7.d, z19.d, z0.d\n"
- "and z8.d, z20.d, z0.d\n"
"asr z4.s, z4.s, #0x1f\n"
"asr z5.s, z5.s, #0x1f\n"
"asr z6.s, z6.s, #0x1f\n"
- "asr z7.s, z7.s, #0x1f\n"
- "asr z8.s, z8.s, #0x1f\n"
"sqadd z16.s, z16.s, z4.s\n"
"sqadd z17.s, z17.s, z5.s\n"
"sqadd z18.s, z18.s, z6.s\n"
+ "and z7.d, z19.d, z0.d\n"
+ "and z8.d, z20.d, z0.d\n"
+ "and z9.d, z21.d, z0.d\n"
+ "asr z7.s, z7.s, #0x1f\n"
+ "asr z8.s, z8.s, #0x1f\n"
+ "asr z9.s, z9.s, #0x1f\n"
"sqadd z19.s, z19.s, z7.s\n"
"sqadd z20.s, z20.s, z8.s\n"
- "and z9.d, z21.d, z0.d\n"
+ "sqadd z21.s, z21.s, z9.s\n"
"and z10.d, z22.d, z0.d\n"
"and z4.d, z23.d, z0.d\n"
"and z5.d, z24.d, z0.d\n"
- "and z6.d, z25.d, z0.d\n"
- "and z7.d, z26.d, z0.d\n"
- "and z8.d, z27.d, z0.d\n"
- "asr z9.s, z9.s, #0x1f\n"
"asr z10.s, z10.s, #0x1f\n"
"asr z4.s, z4.s, #0x1f\n"
"asr z5.s, z5.s, #0x1f\n"
- "asr z6.s, z6.s, #0x1f\n"
- "asr z7.s, z7.s, #0x1f\n"
- "asr z8.s, z8.s, #0x1f\n"
- "sqadd z21.s, z21.s, z9.s\n"
"sqadd z22.s, z22.s, z10.s\n"
"sqadd z23.s, z23.s, z4.s\n"
"sqadd z24.s, z24.s, z5.s\n"
+ "and z6.d, z25.d, z0.d\n"
+ "and z7.d, z26.d, z0.d\n"
+ "and z8.d, z27.d, z0.d\n"
+ "asr z6.s, z6.s, #0x1f\n"
+ "asr z7.s, z7.s, #0x1f\n"
+ "asr z8.s, z8.s, #0x1f\n"
"sqadd z25.s, z25.s, z6.s\n"
"sqadd z26.s, z26.s, z7.s\n"
"sqadd z27.s, z27.s, z8.s\n"
"41:" // Height 3: no shift correction
- "add x20, %x[qp], %[c_offset]\n"
- "ld1rw { z4.s }, p2/Z, [x20]\n"
".inst 0x44828810 // srshl z16.s, p2/M, z16.s, z0.s\n"
- "add z16.s, z16.s, z4.s\n"
+ "add x19, %x[qp], %[c_offset]\n"
+ "ld1rw { z4.s }, p2/Z, [x19]\n"
".inst 0x44828811 // srshl z17.s, p2/M, z17.s, z0.s\n"
+ "add x19, %x[qp], %[minval]\n"
".inst 0x44828812 // srshl z18.s, p2/M, z18.s, z0.s\n"
- "add z17.s, z17.s, z4.s\n"
- "add z18.s, z18.s, z4.s\n"
+ "ld1rw { z5.s }, p2/Z, [x19]\n"
+ "add x19, %x[qp], %[maxval]\n"
".inst 0x44828813 // srshl z19.s, p2/M, z19.s, z0.s\n"
+ "ld1rw { z6.s }, p2/Z, [x19]\n"
".inst 0x44828814 // srshl z20.s, p2/M, z20.s, z0.s\n"
+ "add z16.s, z16.s, z4.s\n"
+ "add z17.s, z17.s, z4.s\n"
+ "add z18.s, z18.s, z4.s\n"
"add z19.s, z19.s, z4.s\n"
"add z20.s, z20.s, z4.s\n"
- ".inst 0x44828815 // srshl z21.s, p2/M, z21.s, z0.s\n"
- ".inst 0x44828816 // srshl z22.s, p2/M, z22.s, z0.s\n"
- "add z21.s, z21.s, z4.s\n"
- "add z22.s, z22.s, z4.s\n"
- ".inst 0x44828817 // srshl z23.s, p2/M, z23.s, z0.s\n"
- ".inst 0x44828818 // srshl z24.s, p2/M, z24.s, z0.s\n"
- "add z23.s, z23.s, z4.s\n"
- "add z24.s, z24.s, z4.s\n"
- ".inst 0x44828819 // srshl z25.s, p2/M, z25.s, z0.s\n"
- ".inst 0x4482881a // srshl z26.s, p2/M, z26.s, z0.s\n"
- "add z25.s, z25.s, z4.s\n"
- "add z26.s, z26.s, z4.s\n"
- ".inst 0x4482881b // srshl z27.s, p2/M, z27.s, z0.s\n"
- "add x20, %x[qp], %[maxval]\n"
- "ld1rw { z6.s }, p2/Z, [x20]\n"
- "add z27.s, z27.s, z4.s\n"
- "add x20, %x[qp], %[minval]\n"
- "ld1rw { z5.s }, p2/Z, [x20]\n"
"smin z16.s, p2/M, z16.s, z6.s\n"
"smin z17.s, p2/M, z17.s, z6.s\n"
"smin z18.s, p2/M, z18.s, z6.s\n"
"smin z19.s, p2/M, z19.s, z6.s\n"
- "smin z20.s, p2/M, z20.s, z6.s\n"
- "smin z21.s, p2/M, z21.s, z6.s\n"
- "smin z22.s, p2/M, z22.s, z6.s\n"
- "smin z23.s, p2/M, z23.s, z6.s\n"
- "smin z24.s, p2/M, z24.s, z6.s\n"
- "smin z25.s, p2/M, z25.s, z6.s\n"
- "smin z26.s, p2/M, z26.s, z6.s\n"
- "smin z27.s, p2/M, z27.s, z6.s\n"
"smax z16.s, p2/M, z16.s, z5.s\n"
"smax z17.s, p2/M, z17.s, z5.s\n"
"smax z18.s, p2/M, z18.s, z5.s\n"
- "uzp1 z16.h, z16.h, z17.h\n"
"smax z19.s, p2/M, z19.s, z5.s\n"
- "smax z20.s, p2/M, z20.s, z5.s\n"
+ "smin z20.s, p2/M, z20.s, z6.s\n"
+ "uzp1 z16.h, z16.h, z17.h\n"
+ ".inst 0x44828815 // srshl z21.s, p2/M, z21.s, z0.s\n"
"uzp1 z17.h, z18.h, z19.h\n"
+ "smax z20.s, p2/M, z20.s, z5.s\n"
"uzp1 z16.b, z16.b, z17.b\n"
+ "st1b { z16.b }, p1, [x26]\n"
+ "add z21.s, z21.s, z4.s\n"
+ "addvl x26, x26, #1\n"
+ ".inst 0x44828816 // srshl z22.s, p2/M, z22.s, z0.s\n"
+ ".inst 0x44828817 // srshl z23.s, p2/M, z23.s, z0.s\n"
+ ".inst 0x44828818 // srshl z24.s, p2/M, z24.s, z0.s\n"
+ "smin z21.s, p2/M, z21.s, z6.s\n"
+ ".inst 0x44828819 // srshl z25.s, p2/M, z25.s, z0.s\n"
+ "add z22.s, z22.s, z4.s\n"
+ "add z23.s, z23.s, z4.s\n"
+ "add z24.s, z24.s, z4.s\n"
+ "add z25.s, z25.s, z4.s\n"
"smax z21.s, p2/M, z21.s, z5.s\n"
- "smax z22.s, p2/M, z22.s, z5.s\n"
+ "smin z22.s, p2/M, z22.s, z6.s\n"
+ "smin z23.s, p2/M, z23.s, z6.s\n"
+ "smin z24.s, p2/M, z24.s, z6.s\n"
"uzp1 z20.h, z20.h, z21.h\n"
- "st1b { z16.b }, p1, [x27]\n"
+ "smax z22.s, p2/M, z22.s, z5.s\n"
"smax z23.s, p2/M, z23.s, z5.s\n"
"smax z24.s, p2/M, z24.s, z5.s\n"
+ "smin z25.s, p2/M, z25.s, z6.s\n"
+ ".inst 0x4482881a // srshl z26.s, p2/M, z26.s, z0.s\n"
"uzp1 z21.h, z22.h, z23.h\n"
+ ".inst 0x4482881b // srshl z27.s, p2/M, z27.s, z0.s\n"
"uzp1 z20.b, z20.b, z21.b\n"
+ "st1b { z20.b }, p1, [x22]\n"
+ "add z26.s, z26.s, z4.s\n"
"smax z25.s, p2/M, z25.s, z5.s\n"
- "smax z26.s, p2/M, z26.s, z5.s\n"
+ "add z27.s, z27.s, z4.s\n"
+ "smin z26.s, p2/M, z26.s, z6.s\n"
"uzp1 z24.h, z24.h, z25.h\n"
- "st1b { z20.b }, p1, [x23]\n"
+ "smin z27.s, p2/M, z27.s, z6.s\n"
+ "smax z26.s, p2/M, z26.s, z5.s\n"
"smax z27.s, p2/M, z27.s, z5.s\n"
"uzp1 z25.h, z26.h, z27.h\n"
"uzp1 z24.b, z24.b, z25.b\n"
- "st1b { z24.b }, p1, [x22]\n"
- "addvl x27, x27, #1\n"
+ "st1b { z24.b }, p1, [x21]\n"
"42:" // Height 3: Writeback done
"decw x9, ALL, MUL #4\n"
"cmp x9, XZR\n"
"bgt 30b\n"
"b 58f\n"
"43:" // Height 4
- "ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "mov x20, #0x4\n"
- "mov x10, %x[col_bias]\n"
"mov z11.s, #0x0\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x27, %x[col_bias]\n"
"mov z12.s, #0x0\n"
- "mov z13.s, #0x0\n"
+ "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"bic %x[flags], %x[flags], #0x80000000\n"
- "ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov z13.s, #0x0\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "mov x26, %x[output_ptr]\n"
"mov z14.s, #0x0\n"
+ "mov x19, #0x4\n"
"mov z15.b, #0x1\n"
- "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x27, %x[output_ptr]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
+ "madd %x[output_ptr], x20, x19, %x[output_ptr]\n"
"44:" // Height 4: Column loop
- "mov x20, #0x0\n"
- "whilelt p1.b, x20, x9\n"
"mov z16.s, #0x0\n"
+ "mov x19, #0x0\n"
"mov z17.s, #0x0\n"
+ "whilelt p1.b, x19, x9\n"
"mov z18.s, #0x0\n"
"mov z19.s, #0x0\n"
"mov z20.s, #0x0\n"
@@ -1023,66 +1023,66 @@ void sve_hybrid_u8qa_dot_4x4VL (
"mov z30.s, #0x0\n"
"mov z31.s, #0x0\n"
"45:" // Height 4: setup done
- "mov x26, #0x0\n"
+ "mov x25, #0x0\n"
"46:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w24, [x20, x25, LSL #0x2]\n"
"tbz %x[flags], #3, 47f\n"
- "ldr x21, [%x[input_ptr], x26, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x24, [x21, #0x0]\n"
- "ldr x23, [x21, #0x8]\n"
- "ldr x22, [x21, #0x10]\n"
- "ldr x21, [x21, #0x18]\n"
- "cbnz x26, 48f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x24, x24, x20\n"
- "add x23, x23, x20\n"
- "add x22, x22, x20\n"
- "add x21, x21, x20\n"
+ "ldr x20, [%x[input_ptr], x25, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x23, [x20, #0x0]\n"
+ "ldr x22, [x20, #0x8]\n"
+ "ldr x21, [x20, #0x10]\n"
+ "ldr x20, [x20, #0x18]\n"
+ "cbnz x25, 48f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x23, x23, x19\n"
+ "add x22, x22, x19\n"
+ "add x21, x21, x19\n"
+ "add x20, x20, x19\n"
"b 48f\n"
"47:" // Height 4: setup direct input
- "mov x24, %x[input_ptr]\n"
- "add x23, x24, x20\n"
- "add x22, x23, x20\n"
- "add x21, x22, x20\n"
+ "mov x23, %x[input_ptr]\n"
+ "add x22, x23, x19\n"
+ "add x21, x22, x19\n"
+ "add x20, x21, x19\n"
"48:" // Height 4: input setup done
- "cmp x25, #0x10\n"
+ "cmp x24, #0x10\n"
"ble 51f\n"
"49:" // Height 4: Multiply loop: Main loop head
- "whilelt p0.b, XZR, x25\n"
- "ld1rqb { z0.b }, p0/Z, [x24]\n"
- "ld1rqb { z1.b }, p0/Z, [x23]\n"
- "add x24, x24, #0x10\n"
- "ld1rqb { z2.b }, p0/Z, [x22]\n"
- "ld1rqb { z3.b }, p0/Z, [x21]\n"
- "add x23, x23, #0x10\n"
- "add x22, x22, #0x10\n"
"ld1b { z4.b }, p2/Z, [x28]\n"
+ "whilelt p0.b, XZR, x24\n"
"ld1b { z5.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "ld1rqb { z0.b }, p0/Z, [x23]\n"
"udot z16.s, z4.b, z0.b[0]\n"
+ "ld1rqb { z1.b }, p0/Z, [x22]\n"
+ "add x23, x23, #0x10\n"
+ "udot z17.s, z5.b, z0.b[0]\n"
+ "ld1rqb { z2.b }, p0/Z, [x21]\n"
+ "add x22, x22, #0x10\n"
"udot z20.s, z4.b, z1.b[0]\n"
- "ld1b { z6.b }, p2/Z, [x28, #2, MUL VL]\n"
- "ld1b { z7.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "ld1rqb { z3.b }, p0/Z, [x20]\n"
+ "add x21, x21, #0x10\n"
"udot z24.s, z4.b, z2.b[0]\n"
- "udot z28.s, z4.b, z3.b[0]\n"
- "udot z17.s, z5.b, z0.b[0]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #2, MUL VL]\n"
+ "add x20, x20, #0x10\n"
"udot z21.s, z5.b, z1.b[0]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "udot z25.s, z5.b, z2.b[0]\n"
"ld1b { z8.b }, p2/Z, [x28, #4, MUL VL]\n"
+ "udot z28.s, z4.b, z3.b[0]\n"
"ld1b { z9.b }, p2/Z, [x28, #5, MUL VL]\n"
- "udot z25.s, z5.b, z2.b[0]\n"
"udot z29.s, z5.b, z3.b[0]\n"
"ld1b { z10.b }, p2/Z, [x28, #6, MUL VL]\n"
- "ld1b { z4.b }, p2/Z, [x28, #7, MUL VL]\n"
"udot z18.s, z6.b, z0.b[0]\n"
- "udot z22.s, z6.b, z1.b[0]\n"
+ "ld1b { z4.b }, p2/Z, [x28, #7, MUL VL]\n"
"addvl x28, x28, #16\n"
+ "udot z22.s, z6.b, z1.b[0]\n"
"ld1b { z5.b }, p2/Z, [x28, #-8, MUL VL]\n"
"udot z26.s, z6.b, z2.b[0]\n"
"udot z30.s, z6.b, z3.b[0]\n"
"ld1b { z6.b }, p2/Z, [x28, #-7, MUL VL]\n"
- "add x21, x21, #0x10\n"
"udot z19.s, z7.b, z0.b[0]\n"
"udot z23.s, z7.b, z1.b[0]\n"
"udot z27.s, z7.b, z2.b[0]\n"
@@ -1147,27 +1147,27 @@ void sve_hybrid_u8qa_dot_4x4VL (
"udot z13.s, z2.b, z15.b\n"
"udot z14.s, z3.b, z15.b\n"
"50:" // Height 4: Multiply loop: unique 7: skip row sum
- "sub x25, x25, #0x10\n"
- "cmp x25, #0x10\n"
+ "sub x24, x24, #0x10\n"
+ "cmp x24, #0x10\n"
"bgt 49b\n"
"51:" // Height 4: Multiply loop: Single iteration only
- "whilelt p0.b, XZR, x25\n"
- "ld1rqb { z0.b }, p0/Z, [x24]\n"
- "ld1rqb { z1.b }, p0/Z, [x23]\n"
- "subs x25, x25, #0x4\n"
- "ld1rqb { z2.b }, p0/Z, [x22]\n"
- "ld1rqb { z3.b }, p0/Z, [x21]\n"
"ld1b { z4.b }, p2/Z, [x28]\n"
+ "whilelt p0.b, XZR, x24\n"
"ld1b { z5.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "subs x24, x24, #0x4\n"
+ "ld1rqb { z0.b }, p0/Z, [x23]\n"
"udot z16.s, z4.b, z0.b[0]\n"
+ "ld1rqb { z1.b }, p0/Z, [x22]\n"
+ "udot z17.s, z5.b, z0.b[0]\n"
+ "ld1rqb { z2.b }, p0/Z, [x21]\n"
+ "ld1rqb { z3.b }, p0/Z, [x20]\n"
"udot z20.s, z4.b, z1.b[0]\n"
"ld1b { z6.b }, p2/Z, [x28, #2, MUL VL]\n"
+ "udot z21.s, z5.b, z1.b[0]\n"
"ld1b { z7.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
"udot z24.s, z4.b, z2.b[0]\n"
"udot z28.s, z4.b, z3.b[0]\n"
- "udot z17.s, z5.b, z0.b[0]\n"
- "udot z21.s, z5.b, z1.b[0]\n"
- "addvl x28, x28, #4\n"
"udot z25.s, z5.b, z2.b[0]\n"
"udot z29.s, z5.b, z3.b[0]\n"
"udot z18.s, z6.b, z0.b[0]\n"
@@ -1180,16 +1180,16 @@ void sve_hybrid_u8qa_dot_4x4VL (
"udot z31.s, z7.b, z3.b[0]\n"
"ble 52f\n"
"ld1b { z8.b }, p2/Z, [x28]\n"
- "ld1b { z9.b }, p2/Z, [x28, #1, MUL VL]\n"
- "subs x25, x25, #0x4\n"
"udot z16.s, z8.b, z0.b[1]\n"
- "ld1b { z10.b }, p2/Z, [x28, #2, MUL VL]\n"
- "ld1b { z4.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "ld1b { z9.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "subs x24, x24, #0x4\n"
"udot z20.s, z8.b, z1.b[1]\n"
+ "ld1b { z10.b }, p2/Z, [x28, #2, MUL VL]\n"
"udot z24.s, z8.b, z2.b[1]\n"
+ "ld1b { z4.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
"udot z28.s, z8.b, z3.b[1]\n"
"udot z17.s, z9.b, z0.b[1]\n"
- "addvl x28, x28, #4\n"
"udot z21.s, z9.b, z1.b[1]\n"
"udot z25.s, z9.b, z2.b[1]\n"
"udot z29.s, z9.b, z3.b[1]\n"
@@ -1203,16 +1203,16 @@ void sve_hybrid_u8qa_dot_4x4VL (
"udot z31.s, z4.b, z3.b[1]\n"
"ble 52f\n"
"ld1b { z5.b }, p2/Z, [x28]\n"
- "ld1b { z6.b }, p2/Z, [x28, #1, MUL VL]\n"
- "subs x25, x25, #0x4\n"
"udot z16.s, z5.b, z0.b[2]\n"
- "ld1b { z7.b }, p2/Z, [x28, #2, MUL VL]\n"
- "ld1b { z8.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "subs x24, x24, #0x4\n"
"udot z20.s, z5.b, z1.b[2]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #2, MUL VL]\n"
"udot z24.s, z5.b, z2.b[2]\n"
+ "ld1b { z8.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
"udot z28.s, z5.b, z3.b[2]\n"
"udot z17.s, z6.b, z0.b[2]\n"
- "addvl x28, x28, #4\n"
"udot z21.s, z6.b, z1.b[2]\n"
"udot z25.s, z6.b, z2.b[2]\n"
"udot z29.s, z6.b, z3.b[2]\n"
@@ -1226,16 +1226,16 @@ void sve_hybrid_u8qa_dot_4x4VL (
"udot z31.s, z8.b, z3.b[2]\n"
"ble 52f\n"
"ld1b { z9.b }, p2/Z, [x28]\n"
- "ld1b { z10.b }, p2/Z, [x28, #1, MUL VL]\n"
"udot z16.s, z9.b, z0.b[3]\n"
+ "ld1b { z10.b }, p2/Z, [x28, #1, MUL VL]\n"
"udot z20.s, z9.b, z1.b[3]\n"
"ld1b { z4.b }, p2/Z, [x28, #2, MUL VL]\n"
- "ld1b { z5.b }, p2/Z, [x28, #3, MUL VL]\n"
"udot z24.s, z9.b, z2.b[3]\n"
+ "ld1b { z5.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
"udot z28.s, z9.b, z3.b[3]\n"
"udot z17.s, z10.b, z0.b[3]\n"
"udot z21.s, z10.b, z1.b[3]\n"
- "addvl x28, x28, #4\n"
"udot z25.s, z10.b, z2.b[3]\n"
"udot z29.s, z10.b, z3.b[3]\n"
"udot z18.s, z4.b, z0.b[3]\n"
@@ -1253,27 +1253,27 @@ void sve_hybrid_u8qa_dot_4x4VL (
"udot z13.s, z2.b, z15.b\n"
"udot z14.s, z3.b, z15.b\n"
"53:" // Height 4: Multiply loop: unique 8: skip row sum
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x26, x26, #0x1\n"
- "cmp x26, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x25, x25, #0x1\n"
+ "cmp x25, x19\n"
"bne 46b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x27, x20\n"
- "add x22, x23, x20\n"
- "add x21, x22, x20\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x22, x26, x19\n"
+ "add x21, x22, x19\n"
+ "add x20, x21, x19\n"
"tbnz %x[flags], #31, 54f\n"
- "mov x20, #0x4\n"
- "whilelt p0.s, XZR, x20\n"
- "add x20, %x[qp], %[b_offset]\n"
- "ld1rw { z4.s }, p2/Z, [x20]\n"
+ "add x19, %x[qp], %[b_offset]\n"
+ "ld1rw { z4.s }, p2/Z, [x19]\n"
+ "neg z4.s, p2/M, z4.s\n"
+ "mov x19, #0x4\n"
+ "whilelt p0.s, XZR, x19\n"
"uaddv d11, p0, z11.s\n"
- "mov z11.s, z11.s[0]\n"
"uaddv d12, p0, z12.s\n"
"uaddv d13, p0, z13.s\n"
+ "uaddv d14, p0, z14.s\n"
+ "mov z11.s, z11.s[0]\n"
"mov z12.s, z12.s[0]\n"
"mov z13.s, z13.s[0]\n"
- "uaddv d14, p0, z14.s\n"
- "neg z4.s, p2/M, z4.s\n"
"mov z14.s, z14.s[0]\n"
"mul z11.s, p2/M, z11.s, z4.s\n"
"mul z12.s, p2/M, z12.s, z4.s\n"
@@ -1281,24 +1281,24 @@ void sve_hybrid_u8qa_dot_4x4VL (
"mul z14.s, p2/M, z14.s, z4.s\n"
"54:" // Height 4: skip row sum fixup
"add z16.s, z16.s, z11.s\n"
+ "ld1w { z0.s }, p2/Z, [x27]\n"
+ "orr %x[flags], %x[flags], #0x80000000\n"
"add z17.s, z17.s, z11.s\n"
- "ld1w { z0.s }, p2/Z, [x10]\n"
- "ld1w { z1.s }, p2/Z, [x10, #1, MUL VL]\n"
+ "ld1w { z1.s }, p2/Z, [x27, #1, MUL VL]\n"
+ "add x23, %x[qp], %[per_layer_right_shift]\n"
"add z18.s, z18.s, z11.s\n"
+ "ld1w { z2.s }, p2/Z, [x27, #2, MUL VL]\n"
+ "add x19, %x[qp], %[per_layer_mul]\n"
"add z19.s, z19.s, z11.s\n"
- "ld1w { z2.s }, p2/Z, [x10, #2, MUL VL]\n"
- "ld1w { z3.s }, p2/Z, [x10, #3, MUL VL]\n"
+ "ld1w { z3.s }, p2/Z, [x27, #3, MUL VL]\n"
+ "addvl x27, x27, #4\n"
"add z20.s, z20.s, z12.s\n"
+ "ld1rw { z4.s }, p2/Z, [x19]\n"
"add z21.s, z21.s, z12.s\n"
- "add x20, %x[qp], %[per_layer_mul]\n"
- "orr %x[flags], %x[flags], #0x80000000\n"
"add z22.s, z22.s, z12.s\n"
"add z23.s, z23.s, z12.s\n"
- "ld1rw { z4.s }, p2/Z, [x20]\n"
- "add x20, %x[qp], %[per_layer_right_shift]\n"
"add z24.s, z24.s, z13.s\n"
"add z25.s, z25.s, z13.s\n"
- "addvl x10, x10, #4\n"
"add z26.s, z26.s, z13.s\n"
"add z27.s, z27.s, z13.s\n"
"add z28.s, z28.s, z14.s\n"
@@ -1318,8 +1318,8 @@ void sve_hybrid_u8qa_dot_4x4VL (
"add z26.s, z26.s, z2.s\n"
"add z27.s, z27.s, z3.s\n"
"add z28.s, z28.s, z0.s\n"
+ "ld1rw { z0.s }, p2/Z, [x23]\n"
"add z29.s, z29.s, z1.s\n"
- "ld1rw { z0.s }, p2/Z, [x20]\n"
"add z30.s, z30.s, z2.s\n"
"add z31.s, z31.s, z3.s\n"
".inst 0x04a47610 // sqrdmulh z16.s, z16.s, z4.s\n"
@@ -1341,160 +1341,160 @@ void sve_hybrid_u8qa_dot_4x4VL (
"tbz %x[flags], #5, 55f\n"
"and z4.d, z16.d, z0.d\n"
"and z5.d, z17.d, z0.d\n"
+ "and z6.d, z18.d, z0.d\n"
"asr z4.s, z4.s, #0x1f\n"
"asr z5.s, z5.s, #0x1f\n"
+ "asr z6.s, z6.s, #0x1f\n"
"sqadd z16.s, z16.s, z4.s\n"
"sqadd z17.s, z17.s, z5.s\n"
- "and z6.d, z18.d, z0.d\n"
+ "sqadd z18.s, z18.s, z6.s\n"
"and z7.d, z19.d, z0.d\n"
"and z8.d, z20.d, z0.d\n"
"and z9.d, z21.d, z0.d\n"
- "and z10.d, z22.d, z0.d\n"
- "and z4.d, z23.d, z0.d\n"
- "and z5.d, z24.d, z0.d\n"
- "asr z6.s, z6.s, #0x1f\n"
"asr z7.s, z7.s, #0x1f\n"
"asr z8.s, z8.s, #0x1f\n"
"asr z9.s, z9.s, #0x1f\n"
- "asr z10.s, z10.s, #0x1f\n"
- "asr z4.s, z4.s, #0x1f\n"
- "asr z5.s, z5.s, #0x1f\n"
- "sqadd z18.s, z18.s, z6.s\n"
"sqadd z19.s, z19.s, z7.s\n"
"sqadd z20.s, z20.s, z8.s\n"
"sqadd z21.s, z21.s, z9.s\n"
+ "and z10.d, z22.d, z0.d\n"
+ "and z4.d, z23.d, z0.d\n"
+ "and z5.d, z24.d, z0.d\n"
+ "asr z10.s, z10.s, #0x1f\n"
+ "asr z4.s, z4.s, #0x1f\n"
+ "asr z5.s, z5.s, #0x1f\n"
"sqadd z22.s, z22.s, z10.s\n"
"sqadd z23.s, z23.s, z4.s\n"
"sqadd z24.s, z24.s, z5.s\n"
"and z6.d, z25.d, z0.d\n"
"and z7.d, z26.d, z0.d\n"
"and z8.d, z27.d, z0.d\n"
- "and z9.d, z28.d, z0.d\n"
- "and z10.d, z29.d, z0.d\n"
- "and z4.d, z30.d, z0.d\n"
- "and z5.d, z31.d, z0.d\n"
"asr z6.s, z6.s, #0x1f\n"
"asr z7.s, z7.s, #0x1f\n"
"asr z8.s, z8.s, #0x1f\n"
- "asr z9.s, z9.s, #0x1f\n"
- "asr z10.s, z10.s, #0x1f\n"
- "asr z4.s, z4.s, #0x1f\n"
- "asr z5.s, z5.s, #0x1f\n"
"sqadd z25.s, z25.s, z6.s\n"
"sqadd z26.s, z26.s, z7.s\n"
"sqadd z27.s, z27.s, z8.s\n"
+ "and z9.d, z28.d, z0.d\n"
+ "and z10.d, z29.d, z0.d\n"
+ "and z4.d, z30.d, z0.d\n"
+ "asr z9.s, z9.s, #0x1f\n"
+ "asr z10.s, z10.s, #0x1f\n"
+ "asr z4.s, z4.s, #0x1f\n"
"sqadd z28.s, z28.s, z9.s\n"
"sqadd z29.s, z29.s, z10.s\n"
"sqadd z30.s, z30.s, z4.s\n"
+ "and z5.d, z31.d, z0.d\n"
+ "asr z5.s, z5.s, #0x1f\n"
"sqadd z31.s, z31.s, z5.s\n"
"55:" // Height 4: no shift correction
- "add x20, %x[qp], %[c_offset]\n"
- "ld1rw { z4.s }, p2/Z, [x20]\n"
".inst 0x44828810 // srshl z16.s, p2/M, z16.s, z0.s\n"
- "add z16.s, z16.s, z4.s\n"
+ "add x19, %x[qp], %[c_offset]\n"
+ "ld1rw { z4.s }, p2/Z, [x19]\n"
".inst 0x44828811 // srshl z17.s, p2/M, z17.s, z0.s\n"
+ "add x19, %x[qp], %[minval]\n"
".inst 0x44828812 // srshl z18.s, p2/M, z18.s, z0.s\n"
- "add z17.s, z17.s, z4.s\n"
- "add z18.s, z18.s, z4.s\n"
+ "ld1rw { z5.s }, p2/Z, [x19]\n"
+ "add x19, %x[qp], %[maxval]\n"
".inst 0x44828813 // srshl z19.s, p2/M, z19.s, z0.s\n"
+ "ld1rw { z6.s }, p2/Z, [x19]\n"
".inst 0x44828814 // srshl z20.s, p2/M, z20.s, z0.s\n"
+ "add z16.s, z16.s, z4.s\n"
+ "add z17.s, z17.s, z4.s\n"
+ "add z18.s, z18.s, z4.s\n"
"add z19.s, z19.s, z4.s\n"
"add z20.s, z20.s, z4.s\n"
- ".inst 0x44828815 // srshl z21.s, p2/M, z21.s, z0.s\n"
- ".inst 0x44828816 // srshl z22.s, p2/M, z22.s, z0.s\n"
- "add z21.s, z21.s, z4.s\n"
- "add z22.s, z22.s, z4.s\n"
- ".inst 0x44828817 // srshl z23.s, p2/M, z23.s, z0.s\n"
- ".inst 0x44828818 // srshl z24.s, p2/M, z24.s, z0.s\n"
- "add z23.s, z23.s, z4.s\n"
- "add z24.s, z24.s, z4.s\n"
- ".inst 0x44828819 // srshl z25.s, p2/M, z25.s, z0.s\n"
- ".inst 0x4482881a // srshl z26.s, p2/M, z26.s, z0.s\n"
- "add z25.s, z25.s, z4.s\n"
- "add z26.s, z26.s, z4.s\n"
- ".inst 0x4482881b // srshl z27.s, p2/M, z27.s, z0.s\n"
- ".inst 0x4482881c // srshl z28.s, p2/M, z28.s, z0.s\n"
- "add z27.s, z27.s, z4.s\n"
- "add z28.s, z28.s, z4.s\n"
- ".inst 0x4482881d // srshl z29.s, p2/M, z29.s, z0.s\n"
- ".inst 0x4482881e // srshl z30.s, p2/M, z30.s, z0.s\n"
- "add z29.s, z29.s, z4.s\n"
- "add z30.s, z30.s, z4.s\n"
- ".inst 0x4482881f // srshl z31.s, p2/M, z31.s, z0.s\n"
- "add x20, %x[qp], %[maxval]\n"
- "ld1rw { z6.s }, p2/Z, [x20]\n"
- "add z31.s, z31.s, z4.s\n"
- "add x20, %x[qp], %[minval]\n"
- "ld1rw { z5.s }, p2/Z, [x20]\n"
"smin z16.s, p2/M, z16.s, z6.s\n"
"smin z17.s, p2/M, z17.s, z6.s\n"
"smin z18.s, p2/M, z18.s, z6.s\n"
"smin z19.s, p2/M, z19.s, z6.s\n"
- "smin z20.s, p2/M, z20.s, z6.s\n"
- "smin z21.s, p2/M, z21.s, z6.s\n"
- "smin z22.s, p2/M, z22.s, z6.s\n"
- "smin z23.s, p2/M, z23.s, z6.s\n"
- "smin z24.s, p2/M, z24.s, z6.s\n"
- "smin z25.s, p2/M, z25.s, z6.s\n"
- "smin z26.s, p2/M, z26.s, z6.s\n"
- "smin z27.s, p2/M, z27.s, z6.s\n"
- "smin z28.s, p2/M, z28.s, z6.s\n"
- "smin z29.s, p2/M, z29.s, z6.s\n"
- "smin z30.s, p2/M, z30.s, z6.s\n"
- "smin z31.s, p2/M, z31.s, z6.s\n"
"smax z16.s, p2/M, z16.s, z5.s\n"
"smax z17.s, p2/M, z17.s, z5.s\n"
"smax z18.s, p2/M, z18.s, z5.s\n"
- "uzp1 z16.h, z16.h, z17.h\n"
"smax z19.s, p2/M, z19.s, z5.s\n"
- "smax z20.s, p2/M, z20.s, z5.s\n"
+ "smin z20.s, p2/M, z20.s, z6.s\n"
+ "uzp1 z16.h, z16.h, z17.h\n"
+ ".inst 0x44828815 // srshl z21.s, p2/M, z21.s, z0.s\n"
"uzp1 z17.h, z18.h, z19.h\n"
+ "smax z20.s, p2/M, z20.s, z5.s\n"
"uzp1 z16.b, z16.b, z17.b\n"
+ "st1b { z16.b }, p1, [x26]\n"
+ "add z21.s, z21.s, z4.s\n"
+ "addvl x26, x26, #1\n"
+ ".inst 0x44828816 // srshl z22.s, p2/M, z22.s, z0.s\n"
+ ".inst 0x44828817 // srshl z23.s, p2/M, z23.s, z0.s\n"
+ ".inst 0x44828818 // srshl z24.s, p2/M, z24.s, z0.s\n"
+ "smin z21.s, p2/M, z21.s, z6.s\n"
+ ".inst 0x44828819 // srshl z25.s, p2/M, z25.s, z0.s\n"
+ "add z22.s, z22.s, z4.s\n"
+ "add z23.s, z23.s, z4.s\n"
+ "add z24.s, z24.s, z4.s\n"
+ "add z25.s, z25.s, z4.s\n"
"smax z21.s, p2/M, z21.s, z5.s\n"
- "smax z22.s, p2/M, z22.s, z5.s\n"
+ "smin z22.s, p2/M, z22.s, z6.s\n"
+ "smin z23.s, p2/M, z23.s, z6.s\n"
+ "smin z24.s, p2/M, z24.s, z6.s\n"
"uzp1 z20.h, z20.h, z21.h\n"
- "st1b { z16.b }, p1, [x27]\n"
+ "smax z22.s, p2/M, z22.s, z5.s\n"
"smax z23.s, p2/M, z23.s, z5.s\n"
"smax z24.s, p2/M, z24.s, z5.s\n"
+ "smin z25.s, p2/M, z25.s, z6.s\n"
+ ".inst 0x4482881a // srshl z26.s, p2/M, z26.s, z0.s\n"
"uzp1 z21.h, z22.h, z23.h\n"
+ ".inst 0x4482881b // srshl z27.s, p2/M, z27.s, z0.s\n"
"uzp1 z20.b, z20.b, z21.b\n"
+ "st1b { z20.b }, p1, [x22]\n"
+ "add z26.s, z26.s, z4.s\n"
"smax z25.s, p2/M, z25.s, z5.s\n"
- "smax z26.s, p2/M, z26.s, z5.s\n"
+ "add z27.s, z27.s, z4.s\n"
+ ".inst 0x4482881c // srshl z28.s, p2/M, z28.s, z0.s\n"
+ "smin z26.s, p2/M, z26.s, z6.s\n"
"uzp1 z24.h, z24.h, z25.h\n"
- "st1b { z20.b }, p1, [x23]\n"
+ "smin z27.s, p2/M, z27.s, z6.s\n"
+ "add z28.s, z28.s, z4.s\n"
+ "smax z26.s, p2/M, z26.s, z5.s\n"
+ ".inst 0x4482881d // srshl z29.s, p2/M, z29.s, z0.s\n"
"smax z27.s, p2/M, z27.s, z5.s\n"
- "smax z28.s, p2/M, z28.s, z5.s\n"
+ "smin z28.s, p2/M, z28.s, z6.s\n"
+ ".inst 0x4482881e // srshl z30.s, p2/M, z30.s, z0.s\n"
+ "add z29.s, z29.s, z4.s\n"
"uzp1 z25.h, z26.h, z27.h\n"
+ "smax z28.s, p2/M, z28.s, z5.s\n"
+ "add z30.s, z30.s, z4.s\n"
"uzp1 z24.b, z24.b, z25.b\n"
+ "st1b { z24.b }, p1, [x21]\n"
+ "smin z29.s, p2/M, z29.s, z6.s\n"
+ "smin z30.s, p2/M, z30.s, z6.s\n"
+ ".inst 0x4482881f // srshl z31.s, p2/M, z31.s, z0.s\n"
"smax z29.s, p2/M, z29.s, z5.s\n"
"smax z30.s, p2/M, z30.s, z5.s\n"
+ "add z31.s, z31.s, z4.s\n"
"uzp1 z28.h, z28.h, z29.h\n"
- "st1b { z24.b }, p1, [x22]\n"
+ "smin z31.s, p2/M, z31.s, z6.s\n"
"smax z31.s, p2/M, z31.s, z5.s\n"
"uzp1 z29.h, z30.h, z31.h\n"
"uzp1 z28.b, z28.b, z29.b\n"
- "st1b { z28.b }, p1, [x21]\n"
- "addvl x27, x27, #1\n"
+ "st1b { z28.b }, p1, [x20]\n"
"56:" // Height 4: Writeback done
"decw x9, ALL, MUL #4\n"
"cmp x9, XZR\n"
"bgt 44b\n"
"subs %x[M], %x[M], #0x4\n"
"beq 58f\n"
- "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 57f\n"
- "add x21, x21, #0x4\n"
- "str x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "add x20, x20, #0x4\n"
+ "str x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"b 1b\n"
"57:" // Update direct input
- "mov x20, #0x4\n"
- "madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
+ "mov x19, #0x4\n"
+ "madd %x[input_ptr], x19, x20, %x[input_ptr]\n"
"b 1b\n"
"58:" // Exit
: [M] "+&r" (M), [flags] "+&r" (flags), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
: [args_ptr] "r" (&ka), [b_offset] "I" (offsetof(Requantize32, b_offset)), [c_offset] "I" (offsetof(Requantize32, c_offset)), [col_bias] "r" (col_bias), [maxval] "I" (offsetof(Requantize32, maxval)), [minval] "I" (offsetof(Requantize32, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths)), [per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [qp] "r" (qp)
- : "cc", "memory", "p0", "p1", "p2", "x9", "x10", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "x9", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8qa_mmla_4x4VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8qa_mmla_4x4VL/generic.cpp
index f9d38c2925..0f3f5e35e1 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8qa_mmla_4x4VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8qa_mmla_4x4VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef ARM_COMPUTE_ENABLE_SVE
@@ -85,18 +85,18 @@ void sve_hybrid_u8qa_mmla_4x4VL (
"cmp %x[M], #0x2\n"
"bgt 29f\n"
"beq 15f\n"
- "mov x10, %x[col_bias]\n"
"mov z11.s, #0x0\n"
- "mov z15.b, #0x1\n"
- "bic %x[flags], %x[flags], #0x80000000\n"
"ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov z15.b, #0x1\n"
"ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x27, %x[output_ptr]\n"
+ "mov x27, %x[col_bias]\n"
+ "bic %x[flags], %x[flags], #0x80000000\n"
+ "mov x26, %x[output_ptr]\n"
"2:" // Height 1: Column loop
- "mov x20, #0x0\n"
- "whilelt p1.b, x20, x9\n"
"mov z16.s, #0x0\n"
+ "mov x19, #0x0\n"
"mov z17.s, #0x0\n"
+ "whilelt p1.b, x19, x9\n"
"mov z18.s, #0x0\n"
"mov z19.s, #0x0\n"
"mov z20.s, #0x0\n"
@@ -104,119 +104,119 @@ void sve_hybrid_u8qa_mmla_4x4VL (
"mov z22.s, #0x0\n"
"mov z23.s, #0x0\n"
"3:" // Height 1: setup done
- "mov x26, #0x0\n"
+ "mov x25, #0x0\n"
"4:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w24, [x20, x25, LSL #0x2]\n"
"tbz %x[flags], #3, 5f\n"
- "ldr x21, [%x[input_ptr], x26, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x24, [x21, #0x0]\n"
- "cbnz x26, 6f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x24, x24, x20\n"
+ "ldr x20, [%x[input_ptr], x25, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x23, [x20, #0x0]\n"
+ "cbnz x25, 6f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x23, x23, x19\n"
"b 6f\n"
"5:" // Height 1: setup direct input
- "mov x24, %x[input_ptr]\n"
+ "mov x23, %x[input_ptr]\n"
"6:" // Height 1: input setup done
- "cmp x25, #0x10\n"
+ "cmp x24, #0x10\n"
"ble 9f\n"
"7:" // Height 1: Multiply loop: Main loop head
- "whilelt p0.b, XZR, x25\n"
- "ld1rqb { z1.b }, p0/Z, [x24]\n"
- "trn1 z0.d, z1.d, z2.d\n"
"ld1b { z5.b }, p2/Z, [x28]\n"
- ".inst 0x45c59810 // ummla z16.s, z0.b, z5.b\n"
+ "whilelt p0.b, XZR, x24\n"
"ld1b { z6.b }, p2/Z, [x28, #1, MUL VL]\n"
- "trn2 z1.d, z1.d, z2.d\n"
+ "ld1rqb { z1.b }, p0/Z, [x23]\n"
+ "trn1 z0.d, z1.d, z2.d\n"
"ld1b { z7.b }, p2/Z, [x28, #2, MUL VL]\n"
+ "add x23, x23, #0x10\n"
+ "trn2 z1.d, z1.d, z2.d\n"
"ld1b { z8.b }, p2/Z, [x28, #3, MUL VL]\n"
- ".inst 0x45c69814 // ummla z20.s, z0.b, z6.b\n"
- ".inst 0x45c79811 // ummla z17.s, z0.b, z7.b\n"
"ld1b { z9.b }, p2/Z, [x28, #4, MUL VL]\n"
+ ".inst 0x45c59810 // ummla z16.s, z0.b, z5.b\n"
+ ".inst 0x45c69814 // ummla z20.s, z0.b, z6.b\n"
"ld1b { z10.b }, p2/Z, [x28, #5, MUL VL]\n"
- ".inst 0x45c89815 // ummla z21.s, z0.b, z8.b\n"
- ".inst 0x45c99812 // ummla z18.s, z0.b, z9.b\n"
+ ".inst 0x45c79811 // ummla z17.s, z0.b, z7.b\n"
"ld1b { z4.b }, p2/Z, [x28, #6, MUL VL]\n"
+ ".inst 0x45c89815 // ummla z21.s, z0.b, z8.b\n"
"ld1b { z5.b }, p2/Z, [x28, #7, MUL VL]\n"
"addvl x28, x28, #16\n"
- ".inst 0x45ca9816 // ummla z22.s, z0.b, z10.b\n"
+ ".inst 0x45c99812 // ummla z18.s, z0.b, z9.b\n"
"ld1b { z6.b }, p2/Z, [x28, #-8, MUL VL]\n"
- ".inst 0x45c49813 // ummla z19.s, z0.b, z4.b\n"
- ".inst 0x45c59817 // ummla z23.s, z0.b, z5.b\n"
+ ".inst 0x45ca9816 // ummla z22.s, z0.b, z10.b\n"
"ld1b { z7.b }, p2/Z, [x28, #-7, MUL VL]\n"
- ".inst 0x45c69830 // ummla z16.s, z1.b, z6.b\n"
+ ".inst 0x45c49813 // ummla z19.s, z0.b, z4.b\n"
"ld1b { z8.b }, p2/Z, [x28, #-6, MUL VL]\n"
+ ".inst 0x45c59817 // ummla z23.s, z0.b, z5.b\n"
"ld1b { z9.b }, p2/Z, [x28, #-5, MUL VL]\n"
- ".inst 0x45c79834 // ummla z20.s, z1.b, z7.b\n"
+ ".inst 0x45c69830 // ummla z16.s, z1.b, z6.b\n"
"ld1b { z10.b }, p2/Z, [x28, #-4, MUL VL]\n"
+ ".inst 0x45c79834 // ummla z20.s, z1.b, z7.b\n"
"ld1b { z4.b }, p2/Z, [x28, #-3, MUL VL]\n"
".inst 0x45c89831 // ummla z17.s, z1.b, z8.b\n"
- ".inst 0x45c99835 // ummla z21.s, z1.b, z9.b\n"
"ld1b { z5.b }, p2/Z, [x28, #-2, MUL VL]\n"
+ ".inst 0x45c99835 // ummla z21.s, z1.b, z9.b\n"
"ld1b { z6.b }, p2/Z, [x28, #-1, MUL VL]\n"
".inst 0x45ca9832 // ummla z18.s, z1.b, z10.b\n"
".inst 0x45c49836 // ummla z22.s, z1.b, z4.b\n"
".inst 0x45c59833 // ummla z19.s, z1.b, z5.b\n"
".inst 0x45c69837 // ummla z23.s, z1.b, z6.b\n"
- "add x24, x24, #0x10\n"
"tbnz %x[flags], #31, 8f\n"
"udot z11.s, z0.b, z15.b\n"
"udot z11.s, z1.b, z15.b\n"
"8:" // Height 1: Multiply loop: unique 1: skip row sum
- "sub x25, x25, #0x10\n"
- "cmp x25, #0x10\n"
+ "sub x24, x24, #0x10\n"
+ "cmp x24, #0x10\n"
"bgt 7b\n"
"9:" // Height 1: Multiply loop: Single iteration only
- "whilelt p0.b, XZR, x25\n"
- "ld1rqb { z1.b }, p0/Z, [x24]\n"
- "trn1 z0.d, z1.d, z2.d\n"
"ld1b { z5.b }, p2/Z, [x28]\n"
- ".inst 0x45c59810 // ummla z16.s, z0.b, z5.b\n"
+ "whilelt p0.b, XZR, x24\n"
"ld1b { z6.b }, p2/Z, [x28, #1, MUL VL]\n"
- "subs x25, x25, #0x8\n"
+ "ld1rqb { z1.b }, p0/Z, [x23]\n"
+ "trn1 z0.d, z1.d, z2.d\n"
"ld1b { z7.b }, p2/Z, [x28, #2, MUL VL]\n"
- "ld1b { z8.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "subs x24, x24, #0x8\n"
"trn2 z1.d, z1.d, z2.d\n"
- ".inst 0x45c69814 // ummla z20.s, z0.b, z6.b\n"
+ "ld1b { z8.b }, p2/Z, [x28, #3, MUL VL]\n"
"ld1b { z9.b }, p2/Z, [x28, #4, MUL VL]\n"
+ ".inst 0x45c59810 // ummla z16.s, z0.b, z5.b\n"
+ ".inst 0x45c69814 // ummla z20.s, z0.b, z6.b\n"
"ld1b { z10.b }, p2/Z, [x28, #5, MUL VL]\n"
".inst 0x45c79811 // ummla z17.s, z0.b, z7.b\n"
- ".inst 0x45c89815 // ummla z21.s, z0.b, z8.b\n"
"ld1b { z4.b }, p2/Z, [x28, #6, MUL VL]\n"
+ ".inst 0x45c89815 // ummla z21.s, z0.b, z8.b\n"
"ld1b { z5.b }, p2/Z, [x28, #7, MUL VL]\n"
+ "addvl x28, x28, #8\n"
".inst 0x45c99812 // ummla z18.s, z0.b, z9.b\n"
".inst 0x45ca9816 // ummla z22.s, z0.b, z10.b\n"
".inst 0x45c49813 // ummla z19.s, z0.b, z4.b\n"
".inst 0x45c59817 // ummla z23.s, z0.b, z5.b\n"
- "addvl x28, x28, #8\n"
"ble 10f\n"
"ld1b { z6.b }, p2/Z, [x28]\n"
".inst 0x45c69830 // ummla z16.s, z1.b, z6.b\n"
"ld1b { z7.b }, p2/Z, [x28, #1, MUL VL]\n"
- ".inst 0x45c79834 // ummla z20.s, z1.b, z7.b\n"
"ld1b { z8.b }, p2/Z, [x28, #2, MUL VL]\n"
+ ".inst 0x45c79834 // ummla z20.s, z1.b, z7.b\n"
"ld1b { z9.b }, p2/Z, [x28, #3, MUL VL]\n"
".inst 0x45c89831 // ummla z17.s, z1.b, z8.b\n"
- ".inst 0x45c99835 // ummla z21.s, z1.b, z9.b\n"
"ld1b { z10.b }, p2/Z, [x28, #4, MUL VL]\n"
"ld1b { z4.b }, p2/Z, [x28, #5, MUL VL]\n"
- ".inst 0x45ca9832 // ummla z18.s, z1.b, z10.b\n"
- ".inst 0x45c49836 // ummla z22.s, z1.b, z4.b\n"
+ ".inst 0x45c99835 // ummla z21.s, z1.b, z9.b\n"
"ld1b { z5.b }, p2/Z, [x28, #6, MUL VL]\n"
"ld1b { z6.b }, p2/Z, [x28, #7, MUL VL]\n"
+ ".inst 0x45ca9832 // ummla z18.s, z1.b, z10.b\n"
+ "addvl x28, x28, #8\n"
+ ".inst 0x45c49836 // ummla z22.s, z1.b, z4.b\n"
".inst 0x45c59833 // ummla z19.s, z1.b, z5.b\n"
".inst 0x45c69837 // ummla z23.s, z1.b, z6.b\n"
- "addvl x28, x28, #8\n"
"10:" // Height 1: Multiply loop: multiply skip
"tbnz %x[flags], #31, 11f\n"
"udot z11.s, z0.b, z15.b\n"
"udot z11.s, z1.b, z15.b\n"
"11:" // Height 1: Multiply loop: unique 2: skip row sum
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x26, x26, #0x1\n"
- "cmp x26, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x25, x25, #0x1\n"
+ "cmp x25, x19\n"
"bne 4b\n"
"uzp1 z16.d, z16.d, z20.d\n"
"uzp1 z17.d, z17.d, z21.d\n"
@@ -224,33 +224,33 @@ void sve_hybrid_u8qa_mmla_4x4VL (
"uzp1 z19.d, z19.d, z23.d\n"
"mov z23.d, z16.d\n"
"tbnz %x[flags], #31, 12f\n"
- "add x23, %x[qp], %[b_offset]\n"
- "ld1rw { z1.s }, p2/Z, [x23]\n"
".inst 0x4491a96b // addp z11.s, p2/m, z11.s, z11.s\n"
- "neg z1.s, p2/M, z1.s\n"
+ "add x22, %x[qp], %[b_offset]\n"
+ "ld1rw { z1.s }, p2/Z, [x22]\n"
"mov z11.s, z11.s[0]\n"
+ "neg z1.s, p2/M, z1.s\n"
"mul z11.s, p2/M, z11.s, z1.s\n"
"12:" // Height 1: skip row sum fixup
"add z23.s, z23.s, z11.s\n"
+ "ld1w { z0.s }, p2/Z, [x27]\n"
+ "orr %x[flags], %x[flags], #0x80000000\n"
"add z17.s, z17.s, z11.s\n"
- "ld1w { z0.s }, p2/Z, [x10]\n"
- "ld1w { z1.s }, p2/Z, [x10, #1, MUL VL]\n"
+ "ld1w { z1.s }, p2/Z, [x27, #1, MUL VL]\n"
+ "add x23, %x[qp], %[per_layer_right_shift]\n"
"add z18.s, z18.s, z11.s\n"
+ "ld1w { z2.s }, p2/Z, [x27, #2, MUL VL]\n"
+ "add x22, %x[qp], %[per_layer_mul]\n"
"add z19.s, z19.s, z11.s\n"
- "ld1w { z2.s }, p2/Z, [x10, #2, MUL VL]\n"
- "ld1w { z3.s }, p2/Z, [x10, #3, MUL VL]\n"
- "add x23, %x[qp], %[per_layer_mul]\n"
- "orr %x[flags], %x[flags], #0x80000000\n"
+ "ld1w { z3.s }, p2/Z, [x27, #3, MUL VL]\n"
+ "addvl x27, x27, #4\n"
"add z23.s, z23.s, z0.s\n"
+ "ld1rw { z0.s }, p2/Z, [x23]\n"
"add z17.s, z17.s, z1.s\n"
+ "ld1rw { z4.s }, p2/Z, [x22]\n"
"add z18.s, z18.s, z2.s\n"
"add z19.s, z19.s, z3.s\n"
- "ld1rw { z4.s }, p2/Z, [x23]\n"
- "add x23, %x[qp], %[per_layer_right_shift]\n"
- "ld1rw { z0.s }, p2/Z, [x23]\n"
".inst 0x04a476f7 // sqrdmulh z23.s, z23.s, z4.s\n"
".inst 0x04a47631 // sqrdmulh z17.s, z17.s, z4.s\n"
- "addvl x10, x10, #4\n"
".inst 0x04a47652 // sqrdmulh z18.s, z18.s, z4.s\n"
".inst 0x04a47673 // sqrdmulh z19.s, z19.s, z4.s\n"
"tbz %x[flags], #5, 13f\n"
@@ -261,26 +261,26 @@ void sve_hybrid_u8qa_mmla_4x4VL (
"asr z4.s, z4.s, #0x1f\n"
"asr z5.s, z5.s, #0x1f\n"
"asr z6.s, z6.s, #0x1f\n"
- "asr z7.s, z7.s, #0x1f\n"
"sqadd z23.s, z23.s, z4.s\n"
"sqadd z17.s, z17.s, z5.s\n"
"sqadd z18.s, z18.s, z6.s\n"
+ "asr z7.s, z7.s, #0x1f\n"
"sqadd z19.s, z19.s, z7.s\n"
"13:" // Height 1: no shift correction
- "add x23, %x[qp], %[c_offset]\n"
- "ld1rw { z4.s }, p2/Z, [x23]\n"
".inst 0x44828817 // srshl z23.s, p2/M, z23.s, z0.s\n"
- "add z23.s, z23.s, z4.s\n"
+ "add x22, %x[qp], %[c_offset]\n"
+ "ld1rw { z4.s }, p2/Z, [x22]\n"
".inst 0x44828811 // srshl z17.s, p2/M, z17.s, z0.s\n"
+ "add x22, %x[qp], %[minval]\n"
".inst 0x44828812 // srshl z18.s, p2/M, z18.s, z0.s\n"
+ "ld1rw { z5.s }, p2/Z, [x22]\n"
+ "add x22, %x[qp], %[maxval]\n"
+ ".inst 0x44828813 // srshl z19.s, p2/M, z19.s, z0.s\n"
+ "ld1rw { z6.s }, p2/Z, [x22]\n"
+ "add z23.s, z23.s, z4.s\n"
"add z17.s, z17.s, z4.s\n"
"add z18.s, z18.s, z4.s\n"
- ".inst 0x44828813 // srshl z19.s, p2/M, z19.s, z0.s\n"
- "add x23, %x[qp], %[maxval]\n"
- "ld1rw { z6.s }, p2/Z, [x23]\n"
"add z19.s, z19.s, z4.s\n"
- "add x23, %x[qp], %[minval]\n"
- "ld1rw { z5.s }, p2/Z, [x23]\n"
"smin z23.s, p2/M, z23.s, z6.s\n"
"smin z17.s, p2/M, z17.s, z6.s\n"
"smin z18.s, p2/M, z18.s, z6.s\n"
@@ -288,31 +288,31 @@ void sve_hybrid_u8qa_mmla_4x4VL (
"smax z23.s, p2/M, z23.s, z5.s\n"
"smax z17.s, p2/M, z17.s, z5.s\n"
"smax z18.s, p2/M, z18.s, z5.s\n"
- "uzp1 z23.h, z23.h, z17.h\n"
"smax z19.s, p2/M, z19.s, z5.s\n"
+ "uzp1 z23.h, z23.h, z17.h\n"
"uzp1 z17.h, z18.h, z19.h\n"
"uzp1 z23.b, z23.b, z17.b\n"
- "st1b { z23.b }, p1, [x27]\n"
- "addvl x27, x27, #1\n"
+ "st1b { z23.b }, p1, [x26]\n"
+ "addvl x26, x26, #1\n"
"14:" // Height 1: Writeback done
"decw x9, ALL, MUL #4\n"
"cmp x9, XZR\n"
"bgt 2b\n"
"b 58f\n"
"15:" // Height 2
- "mov x10, %x[col_bias]\n"
"mov z11.s, #0x0\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x27, %x[col_bias]\n"
"mov z12.s, #0x0\n"
+ "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"bic %x[flags], %x[flags], #0x80000000\n"
"mov z15.b, #0x1\n"
- "ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x27, %x[output_ptr]\n"
+ "mov x26, %x[output_ptr]\n"
"16:" // Height 2: Column loop
- "mov x20, #0x0\n"
- "whilelt p1.b, x20, x9\n"
"mov z16.s, #0x0\n"
+ "mov x19, #0x0\n"
"mov z17.s, #0x0\n"
+ "whilelt p1.b, x19, x9\n"
"mov z18.s, #0x0\n"
"mov z19.s, #0x0\n"
"mov z20.s, #0x0\n"
@@ -320,130 +320,130 @@ void sve_hybrid_u8qa_mmla_4x4VL (
"mov z22.s, #0x0\n"
"mov z23.s, #0x0\n"
"17:" // Height 2: setup done
- "mov x26, #0x0\n"
+ "mov x25, #0x0\n"
"18:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w24, [x20, x25, LSL #0x2]\n"
"tbz %x[flags], #3, 19f\n"
- "ldr x21, [%x[input_ptr], x26, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x24, [x21, #0x0]\n"
- "ldr x23, [x21, #0x8]\n"
- "cbnz x26, 20f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x24, x24, x20\n"
- "add x23, x23, x20\n"
+ "ldr x20, [%x[input_ptr], x25, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x23, [x20, #0x0]\n"
+ "ldr x22, [x20, #0x8]\n"
+ "cbnz x25, 20f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x23, x23, x19\n"
+ "add x22, x22, x19\n"
"b 20f\n"
"19:" // Height 2: setup direct input
- "mov x24, %x[input_ptr]\n"
- "add x23, x24, x20\n"
+ "mov x23, %x[input_ptr]\n"
+ "add x22, x23, x19\n"
"20:" // Height 2: input setup done
- "cmp x25, #0x10\n"
+ "cmp x24, #0x10\n"
"ble 23f\n"
"21:" // Height 2: Multiply loop: Main loop head
- "whilelt p0.b, XZR, x25\n"
- "ld1rqb { z1.b }, p0/Z, [x24]\n"
- "ld1rqb { z2.b }, p0/Z, [x23]\n"
- "trn1 z0.d, z1.d, z2.d\n"
"ld1b { z5.b }, p2/Z, [x28]\n"
- ".inst 0x45c59810 // ummla z16.s, z0.b, z5.b\n"
+ "whilelt p0.b, XZR, x24\n"
"ld1b { z6.b }, p2/Z, [x28, #1, MUL VL]\n"
- "trn2 z1.d, z1.d, z2.d\n"
+ "ld1rqb { z1.b }, p0/Z, [x23]\n"
+ "add x23, x23, #0x10\n"
+ "ld1rqb { z2.b }, p0/Z, [x22]\n"
+ "trn1 z0.d, z1.d, z2.d\n"
"ld1b { z7.b }, p2/Z, [x28, #2, MUL VL]\n"
+ "add x22, x22, #0x10\n"
+ "trn2 z1.d, z1.d, z2.d\n"
"ld1b { z8.b }, p2/Z, [x28, #3, MUL VL]\n"
- ".inst 0x45c69814 // ummla z20.s, z0.b, z6.b\n"
- ".inst 0x45c79811 // ummla z17.s, z0.b, z7.b\n"
"ld1b { z9.b }, p2/Z, [x28, #4, MUL VL]\n"
+ ".inst 0x45c59810 // ummla z16.s, z0.b, z5.b\n"
+ ".inst 0x45c69814 // ummla z20.s, z0.b, z6.b\n"
"ld1b { z10.b }, p2/Z, [x28, #5, MUL VL]\n"
- ".inst 0x45c89815 // ummla z21.s, z0.b, z8.b\n"
- ".inst 0x45c99812 // ummla z18.s, z0.b, z9.b\n"
+ ".inst 0x45c79811 // ummla z17.s, z0.b, z7.b\n"
"ld1b { z4.b }, p2/Z, [x28, #6, MUL VL]\n"
+ ".inst 0x45c89815 // ummla z21.s, z0.b, z8.b\n"
"ld1b { z5.b }, p2/Z, [x28, #7, MUL VL]\n"
"addvl x28, x28, #16\n"
- ".inst 0x45ca9816 // ummla z22.s, z0.b, z10.b\n"
+ ".inst 0x45c99812 // ummla z18.s, z0.b, z9.b\n"
"ld1b { z6.b }, p2/Z, [x28, #-8, MUL VL]\n"
- ".inst 0x45c49813 // ummla z19.s, z0.b, z4.b\n"
- ".inst 0x45c59817 // ummla z23.s, z0.b, z5.b\n"
+ ".inst 0x45ca9816 // ummla z22.s, z0.b, z10.b\n"
"ld1b { z7.b }, p2/Z, [x28, #-7, MUL VL]\n"
- ".inst 0x45c69830 // ummla z16.s, z1.b, z6.b\n"
+ ".inst 0x45c49813 // ummla z19.s, z0.b, z4.b\n"
"ld1b { z8.b }, p2/Z, [x28, #-6, MUL VL]\n"
+ ".inst 0x45c59817 // ummla z23.s, z0.b, z5.b\n"
"ld1b { z9.b }, p2/Z, [x28, #-5, MUL VL]\n"
- ".inst 0x45c79834 // ummla z20.s, z1.b, z7.b\n"
+ ".inst 0x45c69830 // ummla z16.s, z1.b, z6.b\n"
"ld1b { z10.b }, p2/Z, [x28, #-4, MUL VL]\n"
+ ".inst 0x45c79834 // ummla z20.s, z1.b, z7.b\n"
"ld1b { z4.b }, p2/Z, [x28, #-3, MUL VL]\n"
".inst 0x45c89831 // ummla z17.s, z1.b, z8.b\n"
- ".inst 0x45c99835 // ummla z21.s, z1.b, z9.b\n"
"ld1b { z5.b }, p2/Z, [x28, #-2, MUL VL]\n"
+ ".inst 0x45c99835 // ummla z21.s, z1.b, z9.b\n"
"ld1b { z6.b }, p2/Z, [x28, #-1, MUL VL]\n"
".inst 0x45ca9832 // ummla z18.s, z1.b, z10.b\n"
".inst 0x45c49836 // ummla z22.s, z1.b, z4.b\n"
".inst 0x45c59833 // ummla z19.s, z1.b, z5.b\n"
".inst 0x45c69837 // ummla z23.s, z1.b, z6.b\n"
- "add x24, x24, #0x10\n"
- "add x23, x23, #0x10\n"
"tbnz %x[flags], #31, 22f\n"
"udot z11.s, z0.b, z15.b\n"
"udot z11.s, z1.b, z15.b\n"
"22:" // Height 2: Multiply loop: unique 3: skip row sum
- "sub x25, x25, #0x10\n"
- "cmp x25, #0x10\n"
+ "sub x24, x24, #0x10\n"
+ "cmp x24, #0x10\n"
"bgt 21b\n"
"23:" // Height 2: Multiply loop: Single iteration only
- "whilelt p0.b, XZR, x25\n"
- "ld1rqb { z1.b }, p0/Z, [x24]\n"
- "ld1rqb { z2.b }, p0/Z, [x23]\n"
- "trn1 z0.d, z1.d, z2.d\n"
"ld1b { z5.b }, p2/Z, [x28]\n"
- ".inst 0x45c59810 // ummla z16.s, z0.b, z5.b\n"
+ "whilelt p0.b, XZR, x24\n"
"ld1b { z6.b }, p2/Z, [x28, #1, MUL VL]\n"
- "subs x25, x25, #0x8\n"
+ "subs x24, x24, #0x8\n"
+ "ld1rqb { z1.b }, p0/Z, [x23]\n"
+ "ld1rqb { z2.b }, p0/Z, [x22]\n"
+ "trn1 z0.d, z1.d, z2.d\n"
"ld1b { z7.b }, p2/Z, [x28, #2, MUL VL]\n"
- "ld1b { z8.b }, p2/Z, [x28, #3, MUL VL]\n"
"trn2 z1.d, z1.d, z2.d\n"
- ".inst 0x45c69814 // ummla z20.s, z0.b, z6.b\n"
+ "ld1b { z8.b }, p2/Z, [x28, #3, MUL VL]\n"
"ld1b { z9.b }, p2/Z, [x28, #4, MUL VL]\n"
+ ".inst 0x45c59810 // ummla z16.s, z0.b, z5.b\n"
+ ".inst 0x45c69814 // ummla z20.s, z0.b, z6.b\n"
"ld1b { z10.b }, p2/Z, [x28, #5, MUL VL]\n"
".inst 0x45c79811 // ummla z17.s, z0.b, z7.b\n"
- ".inst 0x45c89815 // ummla z21.s, z0.b, z8.b\n"
"ld1b { z4.b }, p2/Z, [x28, #6, MUL VL]\n"
+ ".inst 0x45c89815 // ummla z21.s, z0.b, z8.b\n"
"ld1b { z5.b }, p2/Z, [x28, #7, MUL VL]\n"
+ "addvl x28, x28, #8\n"
".inst 0x45c99812 // ummla z18.s, z0.b, z9.b\n"
".inst 0x45ca9816 // ummla z22.s, z0.b, z10.b\n"
".inst 0x45c49813 // ummla z19.s, z0.b, z4.b\n"
".inst 0x45c59817 // ummla z23.s, z0.b, z5.b\n"
- "addvl x28, x28, #8\n"
"ble 24f\n"
"ld1b { z6.b }, p2/Z, [x28]\n"
".inst 0x45c69830 // ummla z16.s, z1.b, z6.b\n"
"ld1b { z7.b }, p2/Z, [x28, #1, MUL VL]\n"
- ".inst 0x45c79834 // ummla z20.s, z1.b, z7.b\n"
"ld1b { z8.b }, p2/Z, [x28, #2, MUL VL]\n"
+ ".inst 0x45c79834 // ummla z20.s, z1.b, z7.b\n"
"ld1b { z9.b }, p2/Z, [x28, #3, MUL VL]\n"
".inst 0x45c89831 // ummla z17.s, z1.b, z8.b\n"
- ".inst 0x45c99835 // ummla z21.s, z1.b, z9.b\n"
"ld1b { z10.b }, p2/Z, [x28, #4, MUL VL]\n"
"ld1b { z4.b }, p2/Z, [x28, #5, MUL VL]\n"
- ".inst 0x45ca9832 // ummla z18.s, z1.b, z10.b\n"
- ".inst 0x45c49836 // ummla z22.s, z1.b, z4.b\n"
+ ".inst 0x45c99835 // ummla z21.s, z1.b, z9.b\n"
"ld1b { z5.b }, p2/Z, [x28, #6, MUL VL]\n"
"ld1b { z6.b }, p2/Z, [x28, #7, MUL VL]\n"
+ ".inst 0x45ca9832 // ummla z18.s, z1.b, z10.b\n"
+ "addvl x28, x28, #8\n"
+ ".inst 0x45c49836 // ummla z22.s, z1.b, z4.b\n"
".inst 0x45c59833 // ummla z19.s, z1.b, z5.b\n"
".inst 0x45c69837 // ummla z23.s, z1.b, z6.b\n"
- "addvl x28, x28, #8\n"
"24:" // Height 2: Multiply loop: multiply skip
"tbnz %x[flags], #31, 25f\n"
"udot z11.s, z0.b, z15.b\n"
"udot z11.s, z1.b, z15.b\n"
"25:" // Height 2: Multiply loop: unique 4: skip row sum
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x26, x26, #0x1\n"
- "cmp x26, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x25, x25, #0x1\n"
+ "cmp x25, x19\n"
"bne 18b\n"
"uzp1 z7.d, z16.d, z20.d\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp2 z16.d, z16.d, z20.d\n"
- "add x22, x27, x20\n"
+ "add x21, x26, x19\n"
"uzp1 z20.d, z17.d, z21.d\n"
"uzp2 z17.d, z17.d, z21.d\n"
"uzp1 z21.d, z18.d, z22.d\n"
@@ -452,39 +452,39 @@ void sve_hybrid_u8qa_mmla_4x4VL (
"uzp2 z19.d, z19.d, z23.d\n"
"mov z23.d, z7.d\n"
"tbnz %x[flags], #31, 26f\n"
- "add x23, %x[qp], %[b_offset]\n"
- "ld1rw { z2.s }, p2/Z, [x23]\n"
".inst 0x4491a96b // addp z11.s, p2/m, z11.s, z11.s\n"
- "neg z2.s, p2/M, z2.s\n"
+ "add x22, %x[qp], %[b_offset]\n"
+ "ld1rw { z2.s }, p2/Z, [x22]\n"
"mov z12.s, z11.s[3]\n"
"mov z11.s, z11.s[0]\n"
+ "neg z2.s, p2/M, z2.s\n"
"mul z11.s, p2/M, z11.s, z2.s\n"
"mul z12.s, p2/M, z12.s, z2.s\n"
"26:" // Height 2: skip row sum fixup
"add z23.s, z23.s, z11.s\n"
+ "ld1w { z0.s }, p2/Z, [x27]\n"
+ "orr %x[flags], %x[flags], #0x80000000\n"
"add z20.s, z20.s, z11.s\n"
- "ld1w { z0.s }, p2/Z, [x10]\n"
- "ld1w { z1.s }, p2/Z, [x10, #1, MUL VL]\n"
+ "ld1w { z1.s }, p2/Z, [x27, #1, MUL VL]\n"
+ "add x23, %x[qp], %[per_layer_right_shift]\n"
"add z21.s, z21.s, z11.s\n"
+ "ld1w { z2.s }, p2/Z, [x27, #2, MUL VL]\n"
+ "add x22, %x[qp], %[per_layer_mul]\n"
"add z22.s, z22.s, z11.s\n"
- "ld1w { z2.s }, p2/Z, [x10, #2, MUL VL]\n"
- "ld1w { z3.s }, p2/Z, [x10, #3, MUL VL]\n"
+ "ld1w { z3.s }, p2/Z, [x27, #3, MUL VL]\n"
+ "addvl x27, x27, #4\n"
"add z16.s, z16.s, z12.s\n"
+ "ld1rw { z4.s }, p2/Z, [x22]\n"
"add z17.s, z17.s, z12.s\n"
- "add x23, %x[qp], %[per_layer_mul]\n"
- "orr %x[flags], %x[flags], #0x80000000\n"
"add z18.s, z18.s, z12.s\n"
"add z19.s, z19.s, z12.s\n"
- "ld1rw { z4.s }, p2/Z, [x23]\n"
- "add x23, %x[qp], %[per_layer_right_shift]\n"
"add z23.s, z23.s, z0.s\n"
"add z20.s, z20.s, z1.s\n"
- "addvl x10, x10, #4\n"
"add z21.s, z21.s, z2.s\n"
"add z22.s, z22.s, z3.s\n"
"add z16.s, z16.s, z0.s\n"
- "add z17.s, z17.s, z1.s\n"
"ld1rw { z0.s }, p2/Z, [x23]\n"
+ "add z17.s, z17.s, z1.s\n"
"add z18.s, z18.s, z2.s\n"
"add z19.s, z19.s, z3.s\n"
".inst 0x04a476f7 // sqrdmulh z23.s, z23.s, z4.s\n"
@@ -497,97 +497,97 @@ void sve_hybrid_u8qa_mmla_4x4VL (
".inst 0x04a47673 // sqrdmulh z19.s, z19.s, z4.s\n"
"tbz %x[flags], #5, 27f\n"
"and z4.d, z23.d, z0.d\n"
- "asr z4.s, z4.s, #0x1f\n"
- "sqadd z23.s, z23.s, z4.s\n"
"and z5.d, z20.d, z0.d\n"
"and z6.d, z21.d, z0.d\n"
+ "asr z4.s, z4.s, #0x1f\n"
+ "asr z5.s, z5.s, #0x1f\n"
+ "asr z6.s, z6.s, #0x1f\n"
+ "sqadd z23.s, z23.s, z4.s\n"
+ "sqadd z20.s, z20.s, z5.s\n"
+ "sqadd z21.s, z21.s, z6.s\n"
"and z7.d, z22.d, z0.d\n"
"and z8.d, z16.d, z0.d\n"
"and z9.d, z17.d, z0.d\n"
- "and z10.d, z18.d, z0.d\n"
- "and z4.d, z19.d, z0.d\n"
- "asr z5.s, z5.s, #0x1f\n"
- "asr z6.s, z6.s, #0x1f\n"
"asr z7.s, z7.s, #0x1f\n"
"asr z8.s, z8.s, #0x1f\n"
"asr z9.s, z9.s, #0x1f\n"
- "asr z10.s, z10.s, #0x1f\n"
- "asr z4.s, z4.s, #0x1f\n"
- "sqadd z20.s, z20.s, z5.s\n"
- "sqadd z21.s, z21.s, z6.s\n"
"sqadd z22.s, z22.s, z7.s\n"
"sqadd z16.s, z16.s, z8.s\n"
"sqadd z17.s, z17.s, z9.s\n"
+ "and z10.d, z18.d, z0.d\n"
+ "and z4.d, z19.d, z0.d\n"
+ "asr z10.s, z10.s, #0x1f\n"
+ "asr z4.s, z4.s, #0x1f\n"
"sqadd z18.s, z18.s, z10.s\n"
"sqadd z19.s, z19.s, z4.s\n"
"27:" // Height 2: no shift correction
- "add x23, %x[qp], %[c_offset]\n"
- "ld1rw { z4.s }, p2/Z, [x23]\n"
".inst 0x44828817 // srshl z23.s, p2/M, z23.s, z0.s\n"
- "add z23.s, z23.s, z4.s\n"
+ "add x22, %x[qp], %[c_offset]\n"
+ "ld1rw { z4.s }, p2/Z, [x22]\n"
".inst 0x44828814 // srshl z20.s, p2/M, z20.s, z0.s\n"
+ "add x22, %x[qp], %[minval]\n"
".inst 0x44828815 // srshl z21.s, p2/M, z21.s, z0.s\n"
- "add z20.s, z20.s, z4.s\n"
- "add z21.s, z21.s, z4.s\n"
+ "ld1rw { z5.s }, p2/Z, [x22]\n"
+ "add x22, %x[qp], %[maxval]\n"
".inst 0x44828816 // srshl z22.s, p2/M, z22.s, z0.s\n"
+ "ld1rw { z6.s }, p2/Z, [x22]\n"
".inst 0x44828810 // srshl z16.s, p2/M, z16.s, z0.s\n"
+ "add z23.s, z23.s, z4.s\n"
+ "add z20.s, z20.s, z4.s\n"
+ "add z21.s, z21.s, z4.s\n"
"add z22.s, z22.s, z4.s\n"
"add z16.s, z16.s, z4.s\n"
- ".inst 0x44828811 // srshl z17.s, p2/M, z17.s, z0.s\n"
- ".inst 0x44828812 // srshl z18.s, p2/M, z18.s, z0.s\n"
- "add z17.s, z17.s, z4.s\n"
- "add z18.s, z18.s, z4.s\n"
- ".inst 0x44828813 // srshl z19.s, p2/M, z19.s, z0.s\n"
- "add x23, %x[qp], %[maxval]\n"
- "ld1rw { z6.s }, p2/Z, [x23]\n"
- "add z19.s, z19.s, z4.s\n"
- "add x23, %x[qp], %[minval]\n"
- "ld1rw { z5.s }, p2/Z, [x23]\n"
"smin z23.s, p2/M, z23.s, z6.s\n"
"smin z20.s, p2/M, z20.s, z6.s\n"
"smin z21.s, p2/M, z21.s, z6.s\n"
"smin z22.s, p2/M, z22.s, z6.s\n"
- "smin z16.s, p2/M, z16.s, z6.s\n"
- "smin z17.s, p2/M, z17.s, z6.s\n"
- "smin z18.s, p2/M, z18.s, z6.s\n"
- "smin z19.s, p2/M, z19.s, z6.s\n"
"smax z23.s, p2/M, z23.s, z5.s\n"
"smax z20.s, p2/M, z20.s, z5.s\n"
"smax z21.s, p2/M, z21.s, z5.s\n"
- "uzp1 z23.h, z23.h, z20.h\n"
"smax z22.s, p2/M, z22.s, z5.s\n"
- "smax z16.s, p2/M, z16.s, z5.s\n"
+ "smin z16.s, p2/M, z16.s, z6.s\n"
+ "uzp1 z23.h, z23.h, z20.h\n"
+ ".inst 0x44828811 // srshl z17.s, p2/M, z17.s, z0.s\n"
"uzp1 z20.h, z21.h, z22.h\n"
+ "smax z16.s, p2/M, z16.s, z5.s\n"
"uzp1 z23.b, z23.b, z20.b\n"
+ "st1b { z23.b }, p1, [x26]\n"
+ "add z17.s, z17.s, z4.s\n"
+ "addvl x26, x26, #1\n"
+ ".inst 0x44828812 // srshl z18.s, p2/M, z18.s, z0.s\n"
+ ".inst 0x44828813 // srshl z19.s, p2/M, z19.s, z0.s\n"
+ "smin z17.s, p2/M, z17.s, z6.s\n"
+ "add z18.s, z18.s, z4.s\n"
+ "add z19.s, z19.s, z4.s\n"
"smax z17.s, p2/M, z17.s, z5.s\n"
- "smax z18.s, p2/M, z18.s, z5.s\n"
+ "smin z18.s, p2/M, z18.s, z6.s\n"
+ "smin z19.s, p2/M, z19.s, z6.s\n"
"uzp1 z16.h, z16.h, z17.h\n"
- "st1b { z23.b }, p1, [x27]\n"
+ "smax z18.s, p2/M, z18.s, z5.s\n"
"smax z19.s, p2/M, z19.s, z5.s\n"
"uzp1 z17.h, z18.h, z19.h\n"
"uzp1 z16.b, z16.b, z17.b\n"
- "st1b { z16.b }, p1, [x22]\n"
- "addvl x27, x27, #1\n"
+ "st1b { z16.b }, p1, [x21]\n"
"28:" // Height 2: Writeback done
"decw x9, ALL, MUL #4\n"
"cmp x9, XZR\n"
"bgt 16b\n"
"b 58f\n"
"29:" // Height 3
- "mov x10, %x[col_bias]\n"
"mov z11.s, #0x0\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x27, %x[col_bias]\n"
"mov z12.s, #0x0\n"
+ "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"bic %x[flags], %x[flags], #0x80000000\n"
"mov z13.s, #0x0\n"
+ "mov x26, %x[output_ptr]\n"
"mov z15.b, #0x1\n"
- "ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x27, %x[output_ptr]\n"
"30:" // Height 3: Column loop
- "mov x20, #0x0\n"
- "whilelt p1.b, x20, x9\n"
"mov z16.s, #0x0\n"
+ "mov x19, #0x0\n"
"mov z17.s, #0x0\n"
+ "whilelt p1.b, x19, x9\n"
"mov z18.s, #0x0\n"
"mov z19.s, #0x0\n"
"mov z20.s, #0x0\n"
@@ -603,74 +603,74 @@ void sve_hybrid_u8qa_mmla_4x4VL (
"mov z30.s, #0x0\n"
"mov z31.s, #0x0\n"
"31:" // Height 3: setup done
- "mov x26, #0x0\n"
+ "mov x25, #0x0\n"
"32:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w24, [x20, x25, LSL #0x2]\n"
"tbz %x[flags], #3, 33f\n"
- "ldr x21, [%x[input_ptr], x26, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x24, [x21, #0x0]\n"
- "ldr x23, [x21, #0x8]\n"
- "ldr x22, [x21, #0x10]\n"
- "cbnz x26, 34f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x24, x24, x20\n"
- "add x23, x23, x20\n"
- "add x22, x22, x20\n"
+ "ldr x20, [%x[input_ptr], x25, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x23, [x20, #0x0]\n"
+ "ldr x22, [x20, #0x8]\n"
+ "ldr x21, [x20, #0x10]\n"
+ "cbnz x25, 34f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x23, x23, x19\n"
+ "add x22, x22, x19\n"
+ "add x21, x21, x19\n"
"b 34f\n"
"33:" // Height 3: setup direct input
- "mov x24, %x[input_ptr]\n"
- "add x23, x24, x20\n"
- "add x22, x23, x20\n"
+ "mov x23, %x[input_ptr]\n"
+ "add x22, x23, x19\n"
+ "add x21, x22, x19\n"
"34:" // Height 3: input setup done
- "cmp x25, #0x10\n"
+ "cmp x24, #0x10\n"
"ble 37f\n"
"35:" // Height 3: Multiply loop: Main loop head
- "whilelt p0.b, XZR, x25\n"
- "ld1rqb { z1.b }, p0/Z, [x24]\n"
- "ld1rqb { z2.b }, p0/Z, [x23]\n"
- "ld1rqb { z3.b }, p0/Z, [x22]\n"
- "trn1 z0.d, z1.d, z2.d\n"
- "trn2 z1.d, z1.d, z2.d\n"
"ld1b { z5.b }, p2/Z, [x28]\n"
- "trn1 z2.d, z3.d, z4.d\n"
- "trn2 z3.d, z3.d, z4.d\n"
- ".inst 0x45c59810 // ummla z16.s, z0.b, z5.b\n"
+ "whilelt p0.b, XZR, x24\n"
"ld1b { z6.b }, p2/Z, [x28, #1, MUL VL]\n"
- ".inst 0x45c59858 // ummla z24.s, z2.b, z5.b\n"
+ "ld1rqb { z1.b }, p0/Z, [x23]\n"
+ "add x23, x23, #0x10\n"
+ "ld1rqb { z2.b }, p0/Z, [x22]\n"
+ "trn1 z0.d, z1.d, z2.d\n"
+ "ld1rqb { z3.b }, p0/Z, [x21]\n"
+ "add x22, x22, #0x10\n"
+ "trn2 z1.d, z1.d, z2.d\n"
"ld1b { z7.b }, p2/Z, [x28, #2, MUL VL]\n"
+ "add x21, x21, #0x10\n"
+ ".inst 0x45c59810 // ummla z16.s, z0.b, z5.b\n"
"ld1b { z8.b }, p2/Z, [x28, #3, MUL VL]\n"
".inst 0x45c69814 // ummla z20.s, z0.b, z6.b\n"
"ld1b { z9.b }, p2/Z, [x28, #4, MUL VL]\n"
+ "trn1 z2.d, z3.d, z4.d\n"
"ld1b { z10.b }, p2/Z, [x28, #5, MUL VL]\n"
- ".inst 0x45c6985c // ummla z28.s, z2.b, z6.b\n"
- ".inst 0x45c79811 // ummla z17.s, z0.b, z7.b\n"
+ "trn2 z3.d, z3.d, z4.d\n"
"ld1b { z4.b }, p2/Z, [x28, #6, MUL VL]\n"
+ ".inst 0x45c59858 // ummla z24.s, z2.b, z5.b\n"
"ld1b { z5.b }, p2/Z, [x28, #7, MUL VL]\n"
"addvl x28, x28, #16\n"
- ".inst 0x45c79859 // ummla z25.s, z2.b, z7.b\n"
+ ".inst 0x45c6985c // ummla z28.s, z2.b, z6.b\n"
"ld1b { z6.b }, p2/Z, [x28, #-8, MUL VL]\n"
+ ".inst 0x45c79811 // ummla z17.s, z0.b, z7.b\n"
+ ".inst 0x45c79859 // ummla z25.s, z2.b, z7.b\n"
+ "ld1b { z7.b }, p2/Z, [x28, #-7, MUL VL]\n"
".inst 0x45c89815 // ummla z21.s, z0.b, z8.b\n"
".inst 0x45c8985d // ummla z29.s, z2.b, z8.b\n"
- "ld1b { z7.b }, p2/Z, [x28, #-7, MUL VL]\n"
+ "ld1b { z8.b }, p2/Z, [x28, #-6, MUL VL]\n"
".inst 0x45c99812 // ummla z18.s, z0.b, z9.b\n"
".inst 0x45c9985a // ummla z26.s, z2.b, z9.b\n"
- "ld1b { z8.b }, p2/Z, [x28, #-6, MUL VL]\n"
"ld1b { z9.b }, p2/Z, [x28, #-5, MUL VL]\n"
".inst 0x45ca9816 // ummla z22.s, z0.b, z10.b\n"
".inst 0x45ca985e // ummla z30.s, z2.b, z10.b\n"
"ld1b { z10.b }, p2/Z, [x28, #-4, MUL VL]\n"
- "add x24, x24, #0x10\n"
".inst 0x45c49813 // ummla z19.s, z0.b, z4.b\n"
".inst 0x45c4985b // ummla z27.s, z2.b, z4.b\n"
"ld1b { z4.b }, p2/Z, [x28, #-3, MUL VL]\n"
- "add x23, x23, #0x10\n"
".inst 0x45c59817 // ummla z23.s, z0.b, z5.b\n"
".inst 0x45c5985f // ummla z31.s, z2.b, z5.b\n"
"ld1b { z5.b }, p2/Z, [x28, #-2, MUL VL]\n"
- "add x22, x22, #0x10\n"
".inst 0x45c69830 // ummla z16.s, z1.b, z6.b\n"
".inst 0x45c69878 // ummla z24.s, z3.b, z6.b\n"
"ld1b { z6.b }, p2/Z, [x28, #-1, MUL VL]\n"
@@ -694,36 +694,36 @@ void sve_hybrid_u8qa_mmla_4x4VL (
"udot z11.s, z1.b, z15.b\n"
"udot z13.s, z3.b, z15.b\n"
"36:" // Height 3: Multiply loop: unique 5: skip row sum
- "sub x25, x25, #0x10\n"
- "cmp x25, #0x10\n"
+ "sub x24, x24, #0x10\n"
+ "cmp x24, #0x10\n"
"bgt 35b\n"
"37:" // Height 3: Multiply loop: Single iteration only
- "whilelt p0.b, XZR, x25\n"
- "ld1rqb { z1.b }, p0/Z, [x24]\n"
- "ld1rqb { z2.b }, p0/Z, [x23]\n"
- "ld1rqb { z3.b }, p0/Z, [x22]\n"
- "trn1 z0.d, z1.d, z2.d\n"
- "trn2 z1.d, z1.d, z2.d\n"
"ld1b { z5.b }, p2/Z, [x28]\n"
- "trn1 z2.d, z3.d, z4.d\n"
- "trn2 z3.d, z3.d, z4.d\n"
- ".inst 0x45c59810 // ummla z16.s, z0.b, z5.b\n"
+ "whilelt p0.b, XZR, x24\n"
"ld1b { z6.b }, p2/Z, [x28, #1, MUL VL]\n"
- ".inst 0x45c59858 // ummla z24.s, z2.b, z5.b\n"
+ "ld1rqb { z1.b }, p0/Z, [x23]\n"
+ "subs x24, x24, #0x8\n"
+ "ld1rqb { z2.b }, p0/Z, [x22]\n"
+ "trn1 z0.d, z1.d, z2.d\n"
+ "ld1rqb { z3.b }, p0/Z, [x21]\n"
+ "trn2 z1.d, z1.d, z2.d\n"
"ld1b { z7.b }, p2/Z, [x28, #2, MUL VL]\n"
"ld1b { z8.b }, p2/Z, [x28, #3, MUL VL]\n"
- "subs x25, x25, #0x8\n"
+ ".inst 0x45c59810 // ummla z16.s, z0.b, z5.b\n"
+ ".inst 0x45c69814 // ummla z20.s, z0.b, z6.b\n"
"ld1b { z9.b }, p2/Z, [x28, #4, MUL VL]\n"
+ "trn1 z2.d, z3.d, z4.d\n"
"ld1b { z10.b }, p2/Z, [x28, #5, MUL VL]\n"
- ".inst 0x45c69814 // ummla z20.s, z0.b, z6.b\n"
- ".inst 0x45c6985c // ummla z28.s, z2.b, z6.b\n"
+ "trn2 z3.d, z3.d, z4.d\n"
"ld1b { z4.b }, p2/Z, [x28, #6, MUL VL]\n"
+ ".inst 0x45c59858 // ummla z24.s, z2.b, z5.b\n"
"ld1b { z5.b }, p2/Z, [x28, #7, MUL VL]\n"
+ "addvl x28, x28, #8\n"
+ ".inst 0x45c6985c // ummla z28.s, z2.b, z6.b\n"
".inst 0x45c79811 // ummla z17.s, z0.b, z7.b\n"
".inst 0x45c79859 // ummla z25.s, z2.b, z7.b\n"
".inst 0x45c89815 // ummla z21.s, z0.b, z8.b\n"
".inst 0x45c8985d // ummla z29.s, z2.b, z8.b\n"
- "addvl x28, x28, #8\n"
".inst 0x45c99812 // ummla z18.s, z0.b, z9.b\n"
".inst 0x45c9985a // ummla z26.s, z2.b, z9.b\n"
".inst 0x45ca9816 // ummla z22.s, z0.b, z10.b\n"
@@ -735,23 +735,23 @@ void sve_hybrid_u8qa_mmla_4x4VL (
"ble 38f\n"
"ld1b { z6.b }, p2/Z, [x28]\n"
".inst 0x45c69830 // ummla z16.s, z1.b, z6.b\n"
- ".inst 0x45c69878 // ummla z24.s, z3.b, z6.b\n"
"ld1b { z7.b }, p2/Z, [x28, #1, MUL VL]\n"
+ ".inst 0x45c69878 // ummla z24.s, z3.b, z6.b\n"
"ld1b { z8.b }, p2/Z, [x28, #2, MUL VL]\n"
"ld1b { z9.b }, p2/Z, [x28, #3, MUL VL]\n"
".inst 0x45c79834 // ummla z20.s, z1.b, z7.b\n"
- ".inst 0x45c7987c // ummla z28.s, z3.b, z7.b\n"
"ld1b { z10.b }, p2/Z, [x28, #4, MUL VL]\n"
+ ".inst 0x45c7987c // ummla z28.s, z3.b, z7.b\n"
"ld1b { z4.b }, p2/Z, [x28, #5, MUL VL]\n"
".inst 0x45c89831 // ummla z17.s, z1.b, z8.b\n"
- ".inst 0x45c89879 // ummla z25.s, z3.b, z8.b\n"
"ld1b { z5.b }, p2/Z, [x28, #6, MUL VL]\n"
+ ".inst 0x45c89879 // ummla z25.s, z3.b, z8.b\n"
"ld1b { z6.b }, p2/Z, [x28, #7, MUL VL]\n"
+ "addvl x28, x28, #8\n"
".inst 0x45c99835 // ummla z21.s, z1.b, z9.b\n"
".inst 0x45c9987d // ummla z29.s, z3.b, z9.b\n"
".inst 0x45ca9832 // ummla z18.s, z1.b, z10.b\n"
".inst 0x45ca987a // ummla z26.s, z3.b, z10.b\n"
- "addvl x28, x28, #8\n"
".inst 0x45c49836 // ummla z22.s, z1.b, z4.b\n"
".inst 0x45c4987e // ummla z30.s, z3.b, z4.b\n"
".inst 0x45c59833 // ummla z19.s, z1.b, z5.b\n"
@@ -765,17 +765,17 @@ void sve_hybrid_u8qa_mmla_4x4VL (
"udot z11.s, z1.b, z15.b\n"
"udot z13.s, z3.b, z15.b\n"
"39:" // Height 3: Multiply loop: unique 6: skip row sum
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x26, x26, #0x1\n"
- "cmp x26, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x25, x25, #0x1\n"
+ "cmp x25, x19\n"
"bne 32b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp1 z7.d, z16.d, z20.d\n"
- "add x22, x27, x20\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp2 z16.d, z16.d, z20.d\n"
+ "add x21, x26, x19\n"
"uzp1 z20.d, z17.d, z21.d\n"
"uzp2 z17.d, z17.d, z21.d\n"
- "add x21, x22, x20\n"
+ "add x20, x21, x19\n"
"uzp1 z21.d, z18.d, z22.d\n"
"uzp2 z18.d, z18.d, z22.d\n"
"uzp1 z22.d, z19.d, z23.d\n"
@@ -786,37 +786,37 @@ void sve_hybrid_u8qa_mmla_4x4VL (
"uzp1 z27.d, z27.d, z31.d\n"
"mov z31.d, z7.d\n"
"tbnz %x[flags], #31, 40f\n"
- "add x23, %x[qp], %[b_offset]\n"
- "ld1rw { z3.s }, p2/Z, [x23]\n"
".inst 0x4491a96b // addp z11.s, p2/m, z11.s, z11.s\n"
+ "add x22, %x[qp], %[b_offset]\n"
+ "ld1rw { z3.s }, p2/Z, [x22]\n"
".inst 0x4491a9ad // addp z13.s, p2/m, z13.s, z13.s\n"
- "neg z3.s, p2/M, z3.s\n"
"mov z12.s, z11.s[3]\n"
"mov z11.s, z11.s[0]\n"
- "mul z11.s, p2/M, z11.s, z3.s\n"
+ "neg z3.s, p2/M, z3.s\n"
"mov z13.s, z13.s[0]\n"
+ "mul z11.s, p2/M, z11.s, z3.s\n"
"mul z12.s, p2/M, z12.s, z3.s\n"
"mul z13.s, p2/M, z13.s, z3.s\n"
"40:" // Height 3: skip row sum fixup
"add z31.s, z31.s, z11.s\n"
+ "ld1w { z0.s }, p2/Z, [x27]\n"
+ "orr %x[flags], %x[flags], #0x80000000\n"
"add z20.s, z20.s, z11.s\n"
- "ld1w { z0.s }, p2/Z, [x10]\n"
- "ld1w { z1.s }, p2/Z, [x10, #1, MUL VL]\n"
+ "ld1w { z1.s }, p2/Z, [x27, #1, MUL VL]\n"
+ "add x23, %x[qp], %[per_layer_right_shift]\n"
"add z21.s, z21.s, z11.s\n"
+ "ld1w { z2.s }, p2/Z, [x27, #2, MUL VL]\n"
+ "add x22, %x[qp], %[per_layer_mul]\n"
"add z22.s, z22.s, z11.s\n"
- "ld1w { z2.s }, p2/Z, [x10, #2, MUL VL]\n"
- "ld1w { z3.s }, p2/Z, [x10, #3, MUL VL]\n"
+ "ld1w { z3.s }, p2/Z, [x27, #3, MUL VL]\n"
+ "addvl x27, x27, #4\n"
"add z16.s, z16.s, z12.s\n"
+ "ld1rw { z4.s }, p2/Z, [x22]\n"
"add z17.s, z17.s, z12.s\n"
- "add x23, %x[qp], %[per_layer_mul]\n"
- "orr %x[flags], %x[flags], #0x80000000\n"
"add z18.s, z18.s, z12.s\n"
"add z19.s, z19.s, z12.s\n"
- "ld1rw { z4.s }, p2/Z, [x23]\n"
- "add x23, %x[qp], %[per_layer_right_shift]\n"
"add z24.s, z24.s, z13.s\n"
"add z25.s, z25.s, z13.s\n"
- "addvl x10, x10, #4\n"
"add z26.s, z26.s, z13.s\n"
"add z27.s, z27.s, z13.s\n"
"add z31.s, z31.s, z0.s\n"
@@ -828,8 +828,8 @@ void sve_hybrid_u8qa_mmla_4x4VL (
"add z18.s, z18.s, z2.s\n"
"add z19.s, z19.s, z3.s\n"
"add z24.s, z24.s, z0.s\n"
- "add z25.s, z25.s, z1.s\n"
"ld1rw { z0.s }, p2/Z, [x23]\n"
+ "add z25.s, z25.s, z1.s\n"
"add z26.s, z26.s, z2.s\n"
"add z27.s, z27.s, z3.s\n"
".inst 0x04a477ff // sqrdmulh z31.s, z31.s, z4.s\n"
@@ -848,131 +848,131 @@ void sve_hybrid_u8qa_mmla_4x4VL (
"and z4.d, z31.d, z0.d\n"
"and z5.d, z20.d, z0.d\n"
"and z6.d, z21.d, z0.d\n"
- "and z7.d, z22.d, z0.d\n"
- "and z8.d, z16.d, z0.d\n"
"asr z4.s, z4.s, #0x1f\n"
"asr z5.s, z5.s, #0x1f\n"
"asr z6.s, z6.s, #0x1f\n"
- "asr z7.s, z7.s, #0x1f\n"
- "asr z8.s, z8.s, #0x1f\n"
"sqadd z31.s, z31.s, z4.s\n"
"sqadd z20.s, z20.s, z5.s\n"
"sqadd z21.s, z21.s, z6.s\n"
+ "and z7.d, z22.d, z0.d\n"
+ "and z8.d, z16.d, z0.d\n"
+ "and z9.d, z17.d, z0.d\n"
+ "asr z7.s, z7.s, #0x1f\n"
+ "asr z8.s, z8.s, #0x1f\n"
+ "asr z9.s, z9.s, #0x1f\n"
"sqadd z22.s, z22.s, z7.s\n"
"sqadd z16.s, z16.s, z8.s\n"
- "and z9.d, z17.d, z0.d\n"
+ "sqadd z17.s, z17.s, z9.s\n"
"and z10.d, z18.d, z0.d\n"
"and z4.d, z19.d, z0.d\n"
"and z5.d, z24.d, z0.d\n"
- "and z6.d, z25.d, z0.d\n"
- "and z7.d, z26.d, z0.d\n"
- "and z8.d, z27.d, z0.d\n"
- "asr z9.s, z9.s, #0x1f\n"
"asr z10.s, z10.s, #0x1f\n"
"asr z4.s, z4.s, #0x1f\n"
"asr z5.s, z5.s, #0x1f\n"
- "asr z6.s, z6.s, #0x1f\n"
- "asr z7.s, z7.s, #0x1f\n"
- "asr z8.s, z8.s, #0x1f\n"
- "sqadd z17.s, z17.s, z9.s\n"
"sqadd z18.s, z18.s, z10.s\n"
"sqadd z19.s, z19.s, z4.s\n"
"sqadd z24.s, z24.s, z5.s\n"
+ "and z6.d, z25.d, z0.d\n"
+ "and z7.d, z26.d, z0.d\n"
+ "and z8.d, z27.d, z0.d\n"
+ "asr z6.s, z6.s, #0x1f\n"
+ "asr z7.s, z7.s, #0x1f\n"
+ "asr z8.s, z8.s, #0x1f\n"
"sqadd z25.s, z25.s, z6.s\n"
"sqadd z26.s, z26.s, z7.s\n"
"sqadd z27.s, z27.s, z8.s\n"
"41:" // Height 3: no shift correction
- "add x23, %x[qp], %[c_offset]\n"
- "ld1rw { z4.s }, p2/Z, [x23]\n"
".inst 0x4482881f // srshl z31.s, p2/M, z31.s, z0.s\n"
- "add z31.s, z31.s, z4.s\n"
+ "add x22, %x[qp], %[c_offset]\n"
+ "ld1rw { z4.s }, p2/Z, [x22]\n"
".inst 0x44828814 // srshl z20.s, p2/M, z20.s, z0.s\n"
+ "add x22, %x[qp], %[minval]\n"
".inst 0x44828815 // srshl z21.s, p2/M, z21.s, z0.s\n"
- "add z20.s, z20.s, z4.s\n"
- "add z21.s, z21.s, z4.s\n"
+ "ld1rw { z5.s }, p2/Z, [x22]\n"
+ "add x22, %x[qp], %[maxval]\n"
".inst 0x44828816 // srshl z22.s, p2/M, z22.s, z0.s\n"
+ "ld1rw { z6.s }, p2/Z, [x22]\n"
".inst 0x44828810 // srshl z16.s, p2/M, z16.s, z0.s\n"
+ "add z31.s, z31.s, z4.s\n"
+ "add z20.s, z20.s, z4.s\n"
+ "add z21.s, z21.s, z4.s\n"
"add z22.s, z22.s, z4.s\n"
"add z16.s, z16.s, z4.s\n"
- ".inst 0x44828811 // srshl z17.s, p2/M, z17.s, z0.s\n"
- ".inst 0x44828812 // srshl z18.s, p2/M, z18.s, z0.s\n"
- "add z17.s, z17.s, z4.s\n"
- "add z18.s, z18.s, z4.s\n"
- ".inst 0x44828813 // srshl z19.s, p2/M, z19.s, z0.s\n"
- ".inst 0x44828818 // srshl z24.s, p2/M, z24.s, z0.s\n"
- "add z19.s, z19.s, z4.s\n"
- "add z24.s, z24.s, z4.s\n"
- ".inst 0x44828819 // srshl z25.s, p2/M, z25.s, z0.s\n"
- ".inst 0x4482881a // srshl z26.s, p2/M, z26.s, z0.s\n"
- "add z25.s, z25.s, z4.s\n"
- "add z26.s, z26.s, z4.s\n"
- ".inst 0x4482881b // srshl z27.s, p2/M, z27.s, z0.s\n"
- "add x23, %x[qp], %[maxval]\n"
- "ld1rw { z6.s }, p2/Z, [x23]\n"
- "add z27.s, z27.s, z4.s\n"
- "add x23, %x[qp], %[minval]\n"
- "ld1rw { z5.s }, p2/Z, [x23]\n"
"smin z31.s, p2/M, z31.s, z6.s\n"
"smin z20.s, p2/M, z20.s, z6.s\n"
"smin z21.s, p2/M, z21.s, z6.s\n"
"smin z22.s, p2/M, z22.s, z6.s\n"
- "smin z16.s, p2/M, z16.s, z6.s\n"
- "smin z17.s, p2/M, z17.s, z6.s\n"
- "smin z18.s, p2/M, z18.s, z6.s\n"
- "smin z19.s, p2/M, z19.s, z6.s\n"
- "smin z24.s, p2/M, z24.s, z6.s\n"
- "smin z25.s, p2/M, z25.s, z6.s\n"
- "smin z26.s, p2/M, z26.s, z6.s\n"
- "smin z27.s, p2/M, z27.s, z6.s\n"
"smax z31.s, p2/M, z31.s, z5.s\n"
"smax z20.s, p2/M, z20.s, z5.s\n"
"smax z21.s, p2/M, z21.s, z5.s\n"
- "uzp1 z31.h, z31.h, z20.h\n"
"smax z22.s, p2/M, z22.s, z5.s\n"
- "smax z16.s, p2/M, z16.s, z5.s\n"
+ "smin z16.s, p2/M, z16.s, z6.s\n"
+ "uzp1 z31.h, z31.h, z20.h\n"
+ ".inst 0x44828811 // srshl z17.s, p2/M, z17.s, z0.s\n"
"uzp1 z20.h, z21.h, z22.h\n"
+ "smax z16.s, p2/M, z16.s, z5.s\n"
"uzp1 z31.b, z31.b, z20.b\n"
+ "st1b { z31.b }, p1, [x26]\n"
+ "add z17.s, z17.s, z4.s\n"
+ "addvl x26, x26, #1\n"
+ ".inst 0x44828812 // srshl z18.s, p2/M, z18.s, z0.s\n"
+ ".inst 0x44828813 // srshl z19.s, p2/M, z19.s, z0.s\n"
+ ".inst 0x44828818 // srshl z24.s, p2/M, z24.s, z0.s\n"
+ "smin z17.s, p2/M, z17.s, z6.s\n"
+ ".inst 0x44828819 // srshl z25.s, p2/M, z25.s, z0.s\n"
+ "add z18.s, z18.s, z4.s\n"
+ "add z19.s, z19.s, z4.s\n"
+ "add z24.s, z24.s, z4.s\n"
+ "add z25.s, z25.s, z4.s\n"
"smax z17.s, p2/M, z17.s, z5.s\n"
- "smax z18.s, p2/M, z18.s, z5.s\n"
+ "smin z18.s, p2/M, z18.s, z6.s\n"
+ "smin z19.s, p2/M, z19.s, z6.s\n"
+ "smin z24.s, p2/M, z24.s, z6.s\n"
"uzp1 z16.h, z16.h, z17.h\n"
- "st1b { z31.b }, p1, [x27]\n"
+ "smax z18.s, p2/M, z18.s, z5.s\n"
"smax z19.s, p2/M, z19.s, z5.s\n"
"smax z24.s, p2/M, z24.s, z5.s\n"
+ "smin z25.s, p2/M, z25.s, z6.s\n"
+ ".inst 0x4482881a // srshl z26.s, p2/M, z26.s, z0.s\n"
"uzp1 z17.h, z18.h, z19.h\n"
+ ".inst 0x4482881b // srshl z27.s, p2/M, z27.s, z0.s\n"
"uzp1 z16.b, z16.b, z17.b\n"
+ "st1b { z16.b }, p1, [x21]\n"
+ "add z26.s, z26.s, z4.s\n"
"smax z25.s, p2/M, z25.s, z5.s\n"
- "smax z26.s, p2/M, z26.s, z5.s\n"
+ "add z27.s, z27.s, z4.s\n"
+ "smin z26.s, p2/M, z26.s, z6.s\n"
"uzp1 z24.h, z24.h, z25.h\n"
- "st1b { z16.b }, p1, [x22]\n"
+ "smin z27.s, p2/M, z27.s, z6.s\n"
+ "smax z26.s, p2/M, z26.s, z5.s\n"
"smax z27.s, p2/M, z27.s, z5.s\n"
"uzp1 z25.h, z26.h, z27.h\n"
"uzp1 z24.b, z24.b, z25.b\n"
- "st1b { z24.b }, p1, [x21]\n"
- "addvl x27, x27, #1\n"
+ "st1b { z24.b }, p1, [x20]\n"
"42:" // Height 3: Writeback done
"decw x9, ALL, MUL #4\n"
"cmp x9, XZR\n"
"bgt 30b\n"
"b 58f\n"
"43:" // Height 4
- "ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "mov x20, #0x4\n"
- "mov x10, %x[col_bias]\n"
"mov z11.s, #0x0\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x27, %x[col_bias]\n"
"mov z12.s, #0x0\n"
- "mov z13.s, #0x0\n"
+ "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"bic %x[flags], %x[flags], #0x80000000\n"
- "ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov z13.s, #0x0\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "mov x26, %x[output_ptr]\n"
"mov z14.s, #0x0\n"
+ "mov x19, #0x4\n"
"mov z15.b, #0x1\n"
- "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x27, %x[output_ptr]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
+ "madd %x[output_ptr], x20, x19, %x[output_ptr]\n"
"44:" // Height 4: Column loop
- "mov x20, #0x0\n"
- "whilelt p1.b, x20, x9\n"
"mov z16.s, #0x0\n"
+ "mov x19, #0x0\n"
"mov z17.s, #0x0\n"
+ "whilelt p1.b, x19, x9\n"
"mov z18.s, #0x0\n"
"mov z19.s, #0x0\n"
"mov z20.s, #0x0\n"
@@ -988,85 +988,85 @@ void sve_hybrid_u8qa_mmla_4x4VL (
"mov z30.s, #0x0\n"
"mov z31.s, #0x0\n"
"45:" // Height 4: setup done
- "mov x26, #0x0\n"
+ "mov x25, #0x0\n"
"46:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w24, [x20, x25, LSL #0x2]\n"
"tbz %x[flags], #3, 47f\n"
- "ldr x21, [%x[input_ptr], x26, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x24, [x21, #0x0]\n"
- "ldr x23, [x21, #0x8]\n"
- "ldr x22, [x21, #0x10]\n"
- "ldr x21, [x21, #0x18]\n"
- "cbnz x26, 48f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x24, x24, x20\n"
- "add x23, x23, x20\n"
- "add x22, x22, x20\n"
- "add x21, x21, x20\n"
+ "ldr x20, [%x[input_ptr], x25, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x23, [x20, #0x0]\n"
+ "ldr x22, [x20, #0x8]\n"
+ "ldr x21, [x20, #0x10]\n"
+ "ldr x20, [x20, #0x18]\n"
+ "cbnz x25, 48f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x23, x23, x19\n"
+ "add x22, x22, x19\n"
+ "add x21, x21, x19\n"
+ "add x20, x20, x19\n"
"b 48f\n"
"47:" // Height 4: setup direct input
- "mov x24, %x[input_ptr]\n"
- "add x23, x24, x20\n"
- "add x22, x23, x20\n"
- "add x21, x22, x20\n"
+ "mov x23, %x[input_ptr]\n"
+ "add x22, x23, x19\n"
+ "add x21, x22, x19\n"
+ "add x20, x21, x19\n"
"48:" // Height 4: input setup done
- "cmp x25, #0x10\n"
+ "cmp x24, #0x10\n"
"ble 51f\n"
"49:" // Height 4: Multiply loop: Main loop head
- "whilelt p0.b, XZR, x25\n"
- "ld1rqb { z1.b }, p0/Z, [x24]\n"
- "ld1rqb { z2.b }, p0/Z, [x23]\n"
+ "ld1b { z5.b }, p2/Z, [x28]\n"
+ "whilelt p0.b, XZR, x24\n"
+ "ld1b { z6.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "ld1rqb { z1.b }, p0/Z, [x23]\n"
+ "add x23, x23, #0x10\n"
+ "ld1rqb { z2.b }, p0/Z, [x22]\n"
"trn1 z0.d, z1.d, z2.d\n"
- "ld1rqb { z3.b }, p0/Z, [x22]\n"
- "ld1rqb { z4.b }, p0/Z, [x21]\n"
+ "ld1rqb { z3.b }, p0/Z, [x21]\n"
+ "add x22, x22, #0x10\n"
"trn2 z1.d, z1.d, z2.d\n"
- "trn1 z2.d, z3.d, z4.d\n"
- "ld1b { z5.b }, p2/Z, [x28]\n"
- "trn2 z3.d, z3.d, z4.d\n"
+ "ld1rqb { z4.b }, p0/Z, [x20]\n"
+ "add x21, x21, #0x10\n"
".inst 0x45c59810 // ummla z16.s, z0.b, z5.b\n"
- ".inst 0x45c59858 // ummla z24.s, z2.b, z5.b\n"
- "ld1b { z6.b }, p2/Z, [x28, #1, MUL VL]\n"
"ld1b { z7.b }, p2/Z, [x28, #2, MUL VL]\n"
+ "add x20, x20, #0x10\n"
".inst 0x45c69814 // ummla z20.s, z0.b, z6.b\n"
- ".inst 0x45c6985c // ummla z28.s, z2.b, z6.b\n"
"ld1b { z8.b }, p2/Z, [x28, #3, MUL VL]\n"
"ld1b { z9.b }, p2/Z, [x28, #4, MUL VL]\n"
- ".inst 0x45c79811 // ummla z17.s, z0.b, z7.b\n"
- ".inst 0x45c79859 // ummla z25.s, z2.b, z7.b\n"
+ "trn1 z2.d, z3.d, z4.d\n"
+ "trn2 z3.d, z3.d, z4.d\n"
"ld1b { z10.b }, p2/Z, [x28, #5, MUL VL]\n"
+ ".inst 0x45c79811 // ummla z17.s, z0.b, z7.b\n"
"ld1b { z4.b }, p2/Z, [x28, #6, MUL VL]\n"
- ".inst 0x45c89815 // ummla z21.s, z0.b, z8.b\n"
- ".inst 0x45c8985d // ummla z29.s, z2.b, z8.b\n"
+ ".inst 0x45c59858 // ummla z24.s, z2.b, z5.b\n"
"ld1b { z5.b }, p2/Z, [x28, #7, MUL VL]\n"
"addvl x28, x28, #16\n"
+ ".inst 0x45c6985c // ummla z28.s, z2.b, z6.b\n"
"ld1b { z6.b }, p2/Z, [x28, #-8, MUL VL]\n"
+ ".inst 0x45c79859 // ummla z25.s, z2.b, z7.b\n"
+ "ld1b { z7.b }, p2/Z, [x28, #-7, MUL VL]\n"
+ ".inst 0x45c89815 // ummla z21.s, z0.b, z8.b\n"
+ ".inst 0x45c8985d // ummla z29.s, z2.b, z8.b\n"
+ "ld1b { z8.b }, p2/Z, [x28, #-6, MUL VL]\n"
".inst 0x45c99812 // ummla z18.s, z0.b, z9.b\n"
".inst 0x45c9985a // ummla z26.s, z2.b, z9.b\n"
+ "ld1b { z9.b }, p2/Z, [x28, #-5, MUL VL]\n"
".inst 0x45ca9816 // ummla z22.s, z0.b, z10.b\n"
- "ld1b { z7.b }, p2/Z, [x28, #-7, MUL VL]\n"
- "ld1b { z8.b }, p2/Z, [x28, #-6, MUL VL]\n"
".inst 0x45ca985e // ummla z30.s, z2.b, z10.b\n"
- ".inst 0x45c49813 // ummla z19.s, z0.b, z4.b\n"
- "ld1b { z9.b }, p2/Z, [x28, #-5, MUL VL]\n"
"ld1b { z10.b }, p2/Z, [x28, #-4, MUL VL]\n"
+ ".inst 0x45c49813 // ummla z19.s, z0.b, z4.b\n"
".inst 0x45c4985b // ummla z27.s, z2.b, z4.b\n"
- ".inst 0x45c59817 // ummla z23.s, z0.b, z5.b\n"
"ld1b { z4.b }, p2/Z, [x28, #-3, MUL VL]\n"
- "add x24, x24, #0x10\n"
+ ".inst 0x45c59817 // ummla z23.s, z0.b, z5.b\n"
".inst 0x45c5985f // ummla z31.s, z2.b, z5.b\n"
- ".inst 0x45c69830 // ummla z16.s, z1.b, z6.b\n"
"ld1b { z5.b }, p2/Z, [x28, #-2, MUL VL]\n"
- "add x23, x23, #0x10\n"
+ ".inst 0x45c69830 // ummla z16.s, z1.b, z6.b\n"
".inst 0x45c69878 // ummla z24.s, z3.b, z6.b\n"
"ld1b { z6.b }, p2/Z, [x28, #-1, MUL VL]\n"
".inst 0x45c79834 // ummla z20.s, z1.b, z7.b\n"
- "add x22, x22, #0x10\n"
".inst 0x45c7987c // ummla z28.s, z3.b, z7.b\n"
".inst 0x45c89831 // ummla z17.s, z1.b, z8.b\n"
- "add x21, x21, #0x10\n"
".inst 0x45c89879 // ummla z25.s, z3.b, z8.b\n"
".inst 0x45c99835 // ummla z21.s, z1.b, z9.b\n"
".inst 0x45c9987d // ummla z29.s, z3.b, z9.b\n"
@@ -1084,38 +1084,38 @@ void sve_hybrid_u8qa_mmla_4x4VL (
"udot z11.s, z1.b, z15.b\n"
"udot z13.s, z3.b, z15.b\n"
"50:" // Height 4: Multiply loop: unique 7: skip row sum
- "sub x25, x25, #0x10\n"
- "cmp x25, #0x10\n"
+ "sub x24, x24, #0x10\n"
+ "cmp x24, #0x10\n"
"bgt 49b\n"
"51:" // Height 4: Multiply loop: Single iteration only
- "whilelt p0.b, XZR, x25\n"
- "ld1rqb { z1.b }, p0/Z, [x24]\n"
- "ld1rqb { z2.b }, p0/Z, [x23]\n"
- "trn1 z0.d, z1.d, z2.d\n"
- "ld1rqb { z3.b }, p0/Z, [x22]\n"
- "ld1rqb { z4.b }, p0/Z, [x21]\n"
- "trn2 z1.d, z1.d, z2.d\n"
- "trn1 z2.d, z3.d, z4.d\n"
"ld1b { z5.b }, p2/Z, [x28]\n"
- "trn2 z3.d, z3.d, z4.d\n"
- ".inst 0x45c59810 // ummla z16.s, z0.b, z5.b\n"
- ".inst 0x45c59858 // ummla z24.s, z2.b, z5.b\n"
+ "whilelt p0.b, XZR, x24\n"
"ld1b { z6.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "subs x24, x24, #0x8\n"
+ "ld1rqb { z1.b }, p0/Z, [x23]\n"
+ "ld1rqb { z2.b }, p0/Z, [x22]\n"
+ "trn1 z0.d, z1.d, z2.d\n"
+ "ld1rqb { z3.b }, p0/Z, [x21]\n"
+ "trn2 z1.d, z1.d, z2.d\n"
+ "ld1rqb { z4.b }, p0/Z, [x20]\n"
"ld1b { z7.b }, p2/Z, [x28, #2, MUL VL]\n"
- "subs x25, x25, #0x8\n"
+ ".inst 0x45c59810 // ummla z16.s, z0.b, z5.b\n"
".inst 0x45c69814 // ummla z20.s, z0.b, z6.b\n"
"ld1b { z8.b }, p2/Z, [x28, #3, MUL VL]\n"
"ld1b { z9.b }, p2/Z, [x28, #4, MUL VL]\n"
- ".inst 0x45c6985c // ummla z28.s, z2.b, z6.b\n"
- ".inst 0x45c79811 // ummla z17.s, z0.b, z7.b\n"
+ "trn1 z2.d, z3.d, z4.d\n"
+ "trn2 z3.d, z3.d, z4.d\n"
"ld1b { z10.b }, p2/Z, [x28, #5, MUL VL]\n"
+ ".inst 0x45c79811 // ummla z17.s, z0.b, z7.b\n"
"ld1b { z4.b }, p2/Z, [x28, #6, MUL VL]\n"
+ ".inst 0x45c59858 // ummla z24.s, z2.b, z5.b\n"
+ "ld1b { z5.b }, p2/Z, [x28, #7, MUL VL]\n"
+ "addvl x28, x28, #8\n"
+ ".inst 0x45c6985c // ummla z28.s, z2.b, z6.b\n"
".inst 0x45c79859 // ummla z25.s, z2.b, z7.b\n"
".inst 0x45c89815 // ummla z21.s, z0.b, z8.b\n"
- "ld1b { z5.b }, p2/Z, [x28, #7, MUL VL]\n"
".inst 0x45c8985d // ummla z29.s, z2.b, z8.b\n"
".inst 0x45c99812 // ummla z18.s, z0.b, z9.b\n"
- "addvl x28, x28, #8\n"
".inst 0x45c9985a // ummla z26.s, z2.b, z9.b\n"
".inst 0x45ca9816 // ummla z22.s, z0.b, z10.b\n"
".inst 0x45ca985e // ummla z30.s, z2.b, z10.b\n"
@@ -1126,23 +1126,23 @@ void sve_hybrid_u8qa_mmla_4x4VL (
"ble 52f\n"
"ld1b { z6.b }, p2/Z, [x28]\n"
".inst 0x45c69830 // ummla z16.s, z1.b, z6.b\n"
- ".inst 0x45c69878 // ummla z24.s, z3.b, z6.b\n"
"ld1b { z7.b }, p2/Z, [x28, #1, MUL VL]\n"
+ ".inst 0x45c69878 // ummla z24.s, z3.b, z6.b\n"
"ld1b { z8.b }, p2/Z, [x28, #2, MUL VL]\n"
"ld1b { z9.b }, p2/Z, [x28, #3, MUL VL]\n"
".inst 0x45c79834 // ummla z20.s, z1.b, z7.b\n"
- ".inst 0x45c7987c // ummla z28.s, z3.b, z7.b\n"
"ld1b { z10.b }, p2/Z, [x28, #4, MUL VL]\n"
+ ".inst 0x45c7987c // ummla z28.s, z3.b, z7.b\n"
"ld1b { z4.b }, p2/Z, [x28, #5, MUL VL]\n"
".inst 0x45c89831 // ummla z17.s, z1.b, z8.b\n"
- ".inst 0x45c89879 // ummla z25.s, z3.b, z8.b\n"
"ld1b { z5.b }, p2/Z, [x28, #6, MUL VL]\n"
+ ".inst 0x45c89879 // ummla z25.s, z3.b, z8.b\n"
"ld1b { z6.b }, p2/Z, [x28, #7, MUL VL]\n"
+ "addvl x28, x28, #8\n"
".inst 0x45c99835 // ummla z21.s, z1.b, z9.b\n"
".inst 0x45c9987d // ummla z29.s, z3.b, z9.b\n"
".inst 0x45ca9832 // ummla z18.s, z1.b, z10.b\n"
".inst 0x45ca987a // ummla z26.s, z3.b, z10.b\n"
- "addvl x28, x28, #8\n"
".inst 0x45c49836 // ummla z22.s, z1.b, z4.b\n"
".inst 0x45c4987e // ummla z30.s, z3.b, z4.b\n"
".inst 0x45c59833 // ummla z19.s, z1.b, z5.b\n"
@@ -1156,19 +1156,19 @@ void sve_hybrid_u8qa_mmla_4x4VL (
"udot z11.s, z1.b, z15.b\n"
"udot z13.s, z3.b, z15.b\n"
"53:" // Height 4: Multiply loop: unique 8: skip row sum
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x26, x26, #0x1\n"
- "cmp x26, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x25, x25, #0x1\n"
+ "cmp x25, x19\n"
"bne 46b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp1 z7.d, z16.d, z20.d\n"
- "add x22, x27, x20\n"
- "add x21, x22, x20\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp2 z16.d, z16.d, z20.d\n"
+ "add x21, x26, x19\n"
"uzp1 z20.d, z17.d, z21.d\n"
- "add x20, x21, x20\n"
"uzp2 z17.d, z17.d, z21.d\n"
+ "add x20, x21, x19\n"
"uzp1 z21.d, z18.d, z22.d\n"
+ "add x19, x20, x19\n"
"uzp2 z18.d, z18.d, z22.d\n"
"uzp1 z22.d, z19.d, z23.d\n"
"uzp2 z19.d, z19.d, z23.d\n"
@@ -1182,39 +1182,39 @@ void sve_hybrid_u8qa_mmla_4x4VL (
"uzp2 z27.d, z27.d, z31.d\n"
"mov z31.d, z7.d\n"
"tbnz %x[flags], #31, 54f\n"
- "add x23, %x[qp], %[b_offset]\n"
- "ld1rw { z4.s }, p2/Z, [x23]\n"
".inst 0x4491a96b // addp z11.s, p2/m, z11.s, z11.s\n"
+ "add x22, %x[qp], %[b_offset]\n"
+ "ld1rw { z4.s }, p2/Z, [x22]\n"
".inst 0x4491a9ad // addp z13.s, p2/m, z13.s, z13.s\n"
- "neg z4.s, p2/M, z4.s\n"
"mov z12.s, z11.s[3]\n"
"mov z11.s, z11.s[0]\n"
- "mul z11.s, p2/M, z11.s, z4.s\n"
+ "neg z4.s, p2/M, z4.s\n"
"mov z14.s, z13.s[3]\n"
"mov z13.s, z13.s[0]\n"
+ "mul z11.s, p2/M, z11.s, z4.s\n"
"mul z12.s, p2/M, z12.s, z4.s\n"
"mul z13.s, p2/M, z13.s, z4.s\n"
"mul z14.s, p2/M, z14.s, z4.s\n"
"54:" // Height 4: skip row sum fixup
"add z31.s, z31.s, z11.s\n"
+ "ld1w { z0.s }, p2/Z, [x27]\n"
+ "orr %x[flags], %x[flags], #0x80000000\n"
"add z20.s, z20.s, z11.s\n"
- "ld1w { z0.s }, p2/Z, [x10]\n"
- "ld1w { z1.s }, p2/Z, [x10, #1, MUL VL]\n"
+ "ld1w { z1.s }, p2/Z, [x27, #1, MUL VL]\n"
+ "add x23, %x[qp], %[per_layer_right_shift]\n"
"add z21.s, z21.s, z11.s\n"
+ "ld1w { z2.s }, p2/Z, [x27, #2, MUL VL]\n"
+ "add x22, %x[qp], %[per_layer_mul]\n"
"add z22.s, z22.s, z11.s\n"
- "ld1w { z2.s }, p2/Z, [x10, #2, MUL VL]\n"
- "ld1w { z3.s }, p2/Z, [x10, #3, MUL VL]\n"
+ "ld1w { z3.s }, p2/Z, [x27, #3, MUL VL]\n"
+ "addvl x27, x27, #4\n"
"add z16.s, z16.s, z12.s\n"
+ "ld1rw { z4.s }, p2/Z, [x22]\n"
"add z17.s, z17.s, z12.s\n"
- "add x23, %x[qp], %[per_layer_mul]\n"
- "orr %x[flags], %x[flags], #0x80000000\n"
"add z18.s, z18.s, z12.s\n"
"add z19.s, z19.s, z12.s\n"
- "ld1rw { z4.s }, p2/Z, [x23]\n"
- "add x23, %x[qp], %[per_layer_right_shift]\n"
"add z23.s, z23.s, z13.s\n"
"add z28.s, z28.s, z13.s\n"
- "addvl x10, x10, #4\n"
"add z29.s, z29.s, z13.s\n"
"add z30.s, z30.s, z13.s\n"
"add z24.s, z24.s, z14.s\n"
@@ -1234,8 +1234,8 @@ void sve_hybrid_u8qa_mmla_4x4VL (
"add z29.s, z29.s, z2.s\n"
"add z30.s, z30.s, z3.s\n"
"add z24.s, z24.s, z0.s\n"
- "add z25.s, z25.s, z1.s\n"
"ld1rw { z0.s }, p2/Z, [x23]\n"
+ "add z25.s, z25.s, z1.s\n"
"add z26.s, z26.s, z2.s\n"
"add z27.s, z27.s, z3.s\n"
".inst 0x04a477ff // sqrdmulh z31.s, z31.s, z4.s\n"
@@ -1257,160 +1257,160 @@ void sve_hybrid_u8qa_mmla_4x4VL (
"tbz %x[flags], #5, 55f\n"
"and z4.d, z31.d, z0.d\n"
"and z5.d, z20.d, z0.d\n"
+ "and z6.d, z21.d, z0.d\n"
"asr z4.s, z4.s, #0x1f\n"
"asr z5.s, z5.s, #0x1f\n"
+ "asr z6.s, z6.s, #0x1f\n"
"sqadd z31.s, z31.s, z4.s\n"
"sqadd z20.s, z20.s, z5.s\n"
- "and z6.d, z21.d, z0.d\n"
+ "sqadd z21.s, z21.s, z6.s\n"
"and z7.d, z22.d, z0.d\n"
"and z8.d, z16.d, z0.d\n"
"and z9.d, z17.d, z0.d\n"
- "and z10.d, z18.d, z0.d\n"
- "and z4.d, z19.d, z0.d\n"
- "and z5.d, z23.d, z0.d\n"
- "asr z6.s, z6.s, #0x1f\n"
"asr z7.s, z7.s, #0x1f\n"
"asr z8.s, z8.s, #0x1f\n"
"asr z9.s, z9.s, #0x1f\n"
- "asr z10.s, z10.s, #0x1f\n"
- "asr z4.s, z4.s, #0x1f\n"
- "asr z5.s, z5.s, #0x1f\n"
- "sqadd z21.s, z21.s, z6.s\n"
"sqadd z22.s, z22.s, z7.s\n"
"sqadd z16.s, z16.s, z8.s\n"
"sqadd z17.s, z17.s, z9.s\n"
+ "and z10.d, z18.d, z0.d\n"
+ "and z4.d, z19.d, z0.d\n"
+ "and z5.d, z23.d, z0.d\n"
+ "asr z10.s, z10.s, #0x1f\n"
+ "asr z4.s, z4.s, #0x1f\n"
+ "asr z5.s, z5.s, #0x1f\n"
"sqadd z18.s, z18.s, z10.s\n"
"sqadd z19.s, z19.s, z4.s\n"
"sqadd z23.s, z23.s, z5.s\n"
"and z6.d, z28.d, z0.d\n"
"and z7.d, z29.d, z0.d\n"
"and z8.d, z30.d, z0.d\n"
- "and z9.d, z24.d, z0.d\n"
- "and z10.d, z25.d, z0.d\n"
- "and z4.d, z26.d, z0.d\n"
- "and z5.d, z27.d, z0.d\n"
"asr z6.s, z6.s, #0x1f\n"
"asr z7.s, z7.s, #0x1f\n"
"asr z8.s, z8.s, #0x1f\n"
- "asr z9.s, z9.s, #0x1f\n"
- "asr z10.s, z10.s, #0x1f\n"
- "asr z4.s, z4.s, #0x1f\n"
- "asr z5.s, z5.s, #0x1f\n"
"sqadd z28.s, z28.s, z6.s\n"
"sqadd z29.s, z29.s, z7.s\n"
"sqadd z30.s, z30.s, z8.s\n"
+ "and z9.d, z24.d, z0.d\n"
+ "and z10.d, z25.d, z0.d\n"
+ "and z4.d, z26.d, z0.d\n"
+ "asr z9.s, z9.s, #0x1f\n"
+ "asr z10.s, z10.s, #0x1f\n"
+ "asr z4.s, z4.s, #0x1f\n"
"sqadd z24.s, z24.s, z9.s\n"
"sqadd z25.s, z25.s, z10.s\n"
"sqadd z26.s, z26.s, z4.s\n"
+ "and z5.d, z27.d, z0.d\n"
+ "asr z5.s, z5.s, #0x1f\n"
"sqadd z27.s, z27.s, z5.s\n"
"55:" // Height 4: no shift correction
- "add x23, %x[qp], %[c_offset]\n"
- "ld1rw { z4.s }, p2/Z, [x23]\n"
".inst 0x4482881f // srshl z31.s, p2/M, z31.s, z0.s\n"
- "add z31.s, z31.s, z4.s\n"
+ "add x22, %x[qp], %[c_offset]\n"
+ "ld1rw { z4.s }, p2/Z, [x22]\n"
".inst 0x44828814 // srshl z20.s, p2/M, z20.s, z0.s\n"
+ "add x22, %x[qp], %[minval]\n"
".inst 0x44828815 // srshl z21.s, p2/M, z21.s, z0.s\n"
- "add z20.s, z20.s, z4.s\n"
- "add z21.s, z21.s, z4.s\n"
+ "ld1rw { z5.s }, p2/Z, [x22]\n"
+ "add x22, %x[qp], %[maxval]\n"
".inst 0x44828816 // srshl z22.s, p2/M, z22.s, z0.s\n"
+ "ld1rw { z6.s }, p2/Z, [x22]\n"
".inst 0x44828810 // srshl z16.s, p2/M, z16.s, z0.s\n"
+ "add z31.s, z31.s, z4.s\n"
+ "add z20.s, z20.s, z4.s\n"
+ "add z21.s, z21.s, z4.s\n"
"add z22.s, z22.s, z4.s\n"
"add z16.s, z16.s, z4.s\n"
- ".inst 0x44828811 // srshl z17.s, p2/M, z17.s, z0.s\n"
- ".inst 0x44828812 // srshl z18.s, p2/M, z18.s, z0.s\n"
- "add z17.s, z17.s, z4.s\n"
- "add z18.s, z18.s, z4.s\n"
- ".inst 0x44828813 // srshl z19.s, p2/M, z19.s, z0.s\n"
- ".inst 0x44828817 // srshl z23.s, p2/M, z23.s, z0.s\n"
- "add z19.s, z19.s, z4.s\n"
- "add z23.s, z23.s, z4.s\n"
- ".inst 0x4482881c // srshl z28.s, p2/M, z28.s, z0.s\n"
- ".inst 0x4482881d // srshl z29.s, p2/M, z29.s, z0.s\n"
- "add z28.s, z28.s, z4.s\n"
- "add z29.s, z29.s, z4.s\n"
- ".inst 0x4482881e // srshl z30.s, p2/M, z30.s, z0.s\n"
- ".inst 0x44828818 // srshl z24.s, p2/M, z24.s, z0.s\n"
- "add z30.s, z30.s, z4.s\n"
- "add z24.s, z24.s, z4.s\n"
- ".inst 0x44828819 // srshl z25.s, p2/M, z25.s, z0.s\n"
- ".inst 0x4482881a // srshl z26.s, p2/M, z26.s, z0.s\n"
- "add z25.s, z25.s, z4.s\n"
- "add z26.s, z26.s, z4.s\n"
- ".inst 0x4482881b // srshl z27.s, p2/M, z27.s, z0.s\n"
- "add x23, %x[qp], %[maxval]\n"
- "ld1rw { z6.s }, p2/Z, [x23]\n"
- "add z27.s, z27.s, z4.s\n"
- "add x23, %x[qp], %[minval]\n"
- "ld1rw { z5.s }, p2/Z, [x23]\n"
"smin z31.s, p2/M, z31.s, z6.s\n"
"smin z20.s, p2/M, z20.s, z6.s\n"
"smin z21.s, p2/M, z21.s, z6.s\n"
"smin z22.s, p2/M, z22.s, z6.s\n"
- "smin z16.s, p2/M, z16.s, z6.s\n"
- "smin z17.s, p2/M, z17.s, z6.s\n"
- "smin z18.s, p2/M, z18.s, z6.s\n"
- "smin z19.s, p2/M, z19.s, z6.s\n"
- "smin z23.s, p2/M, z23.s, z6.s\n"
- "smin z28.s, p2/M, z28.s, z6.s\n"
- "smin z29.s, p2/M, z29.s, z6.s\n"
- "smin z30.s, p2/M, z30.s, z6.s\n"
- "smin z24.s, p2/M, z24.s, z6.s\n"
- "smin z25.s, p2/M, z25.s, z6.s\n"
- "smin z26.s, p2/M, z26.s, z6.s\n"
- "smin z27.s, p2/M, z27.s, z6.s\n"
"smax z31.s, p2/M, z31.s, z5.s\n"
"smax z20.s, p2/M, z20.s, z5.s\n"
"smax z21.s, p2/M, z21.s, z5.s\n"
- "uzp1 z31.h, z31.h, z20.h\n"
"smax z22.s, p2/M, z22.s, z5.s\n"
- "smax z16.s, p2/M, z16.s, z5.s\n"
+ "smin z16.s, p2/M, z16.s, z6.s\n"
+ "uzp1 z31.h, z31.h, z20.h\n"
+ ".inst 0x44828811 // srshl z17.s, p2/M, z17.s, z0.s\n"
"uzp1 z20.h, z21.h, z22.h\n"
+ "smax z16.s, p2/M, z16.s, z5.s\n"
"uzp1 z31.b, z31.b, z20.b\n"
+ "st1b { z31.b }, p1, [x26]\n"
+ "add z17.s, z17.s, z4.s\n"
+ "addvl x26, x26, #1\n"
+ ".inst 0x44828812 // srshl z18.s, p2/M, z18.s, z0.s\n"
+ ".inst 0x44828813 // srshl z19.s, p2/M, z19.s, z0.s\n"
+ ".inst 0x44828817 // srshl z23.s, p2/M, z23.s, z0.s\n"
+ "smin z17.s, p2/M, z17.s, z6.s\n"
+ ".inst 0x4482881c // srshl z28.s, p2/M, z28.s, z0.s\n"
+ "add z18.s, z18.s, z4.s\n"
+ "add z19.s, z19.s, z4.s\n"
+ "add z23.s, z23.s, z4.s\n"
+ "add z28.s, z28.s, z4.s\n"
"smax z17.s, p2/M, z17.s, z5.s\n"
- "smax z18.s, p2/M, z18.s, z5.s\n"
+ "smin z18.s, p2/M, z18.s, z6.s\n"
+ "smin z19.s, p2/M, z19.s, z6.s\n"
+ "smin z23.s, p2/M, z23.s, z6.s\n"
"uzp1 z16.h, z16.h, z17.h\n"
- "st1b { z31.b }, p1, [x27]\n"
+ "smax z18.s, p2/M, z18.s, z5.s\n"
"smax z19.s, p2/M, z19.s, z5.s\n"
"smax z23.s, p2/M, z23.s, z5.s\n"
+ "smin z28.s, p2/M, z28.s, z6.s\n"
+ ".inst 0x4482881d // srshl z29.s, p2/M, z29.s, z0.s\n"
"uzp1 z17.h, z18.h, z19.h\n"
+ ".inst 0x4482881e // srshl z30.s, p2/M, z30.s, z0.s\n"
"uzp1 z16.b, z16.b, z17.b\n"
+ "st1b { z16.b }, p1, [x21]\n"
+ "add z29.s, z29.s, z4.s\n"
"smax z28.s, p2/M, z28.s, z5.s\n"
- "smax z29.s, p2/M, z29.s, z5.s\n"
+ "add z30.s, z30.s, z4.s\n"
+ ".inst 0x44828818 // srshl z24.s, p2/M, z24.s, z0.s\n"
+ "smin z29.s, p2/M, z29.s, z6.s\n"
"uzp1 z23.h, z23.h, z28.h\n"
- "st1b { z16.b }, p1, [x22]\n"
+ "smin z30.s, p2/M, z30.s, z6.s\n"
+ "add z24.s, z24.s, z4.s\n"
+ "smax z29.s, p2/M, z29.s, z5.s\n"
+ ".inst 0x44828819 // srshl z25.s, p2/M, z25.s, z0.s\n"
"smax z30.s, p2/M, z30.s, z5.s\n"
- "smax z24.s, p2/M, z24.s, z5.s\n"
+ "smin z24.s, p2/M, z24.s, z6.s\n"
+ ".inst 0x4482881a // srshl z26.s, p2/M, z26.s, z0.s\n"
+ "add z25.s, z25.s, z4.s\n"
"uzp1 z28.h, z29.h, z30.h\n"
+ "smax z24.s, p2/M, z24.s, z5.s\n"
+ "add z26.s, z26.s, z4.s\n"
"uzp1 z23.b, z23.b, z28.b\n"
+ "st1b { z23.b }, p1, [x20]\n"
+ "smin z25.s, p2/M, z25.s, z6.s\n"
+ "smin z26.s, p2/M, z26.s, z6.s\n"
+ ".inst 0x4482881b // srshl z27.s, p2/M, z27.s, z0.s\n"
"smax z25.s, p2/M, z25.s, z5.s\n"
"smax z26.s, p2/M, z26.s, z5.s\n"
+ "add z27.s, z27.s, z4.s\n"
"uzp1 z24.h, z24.h, z25.h\n"
- "st1b { z23.b }, p1, [x21]\n"
+ "smin z27.s, p2/M, z27.s, z6.s\n"
"smax z27.s, p2/M, z27.s, z5.s\n"
"uzp1 z25.h, z26.h, z27.h\n"
"uzp1 z24.b, z24.b, z25.b\n"
- "st1b { z24.b }, p1, [x20]\n"
- "addvl x27, x27, #1\n"
+ "st1b { z24.b }, p1, [x19]\n"
"56:" // Height 4: Writeback done
"decw x9, ALL, MUL #4\n"
"cmp x9, XZR\n"
"bgt 44b\n"
"subs %x[M], %x[M], #0x4\n"
"beq 58f\n"
- "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 57f\n"
- "add x21, x21, #0x4\n"
- "str x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "add x20, x20, #0x4\n"
+ "str x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"b 1b\n"
"57:" // Update direct input
- "mov x20, #0x4\n"
- "madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
+ "mov x19, #0x4\n"
+ "madd %x[input_ptr], x19, x20, %x[input_ptr]\n"
"b 1b\n"
"58:" // Exit
: [M] "+&r" (M), [flags] "+&r" (flags), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
: [args_ptr] "r" (&ka), [b_offset] "I" (offsetof(Requantize32, b_offset)), [c_offset] "I" (offsetof(Requantize32, c_offset)), [col_bias] "r" (col_bias), [maxval] "I" (offsetof(Requantize32, maxval)), [minval] "I" (offsetof(Requantize32, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths)), [per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [qp] "r" (qp)
- : "cc", "memory", "p0", "p1", "p2", "x9", "x10", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "x9", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8u32_dot_6x4VL/a64fx.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8u32_dot_6x4VL/a64fx.cpp
index a7dbef329e..11f9165a3f 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8u32_dot_6x4VL/a64fx.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8u32_dot_6x4VL/a64fx.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef ARM_COMPUTE_ENABLE_SVE
@@ -87,23 +87,23 @@ void sve_hybrid_u8u32_dot_6x4VL_a64fx (
"cmp %x[M], #0x2\n"
"bgt 21f\n"
"beq 11f\n"
- "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x28, %x[output_ptr]\n"
"2:" // Height 1: Column loop
- "mov x20, #0x0\n"
- "whilelt p3.s, x20, x11\n"
- "incw x20\n"
- "whilelt p2.s, x20, x11\n"
- "incw x20\n"
- "whilelt p1.s, x20, x11\n"
- "incw x20\n"
- "whilelt p0.s, x20, x11\n"
+ "mov x19, #0x0\n"
+ "whilelt p3.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p0.s, x19, x10\n"
"tbz %x[flags], #0, 3f\n"
- "ld1w { z8.s }, p3/Z, [x9]\n"
- "ld1w { z9.s }, p2/Z, [x9, #1, MUL VL]\n"
- "ld1w { z10.s }, p1/Z, [x9, #2, MUL VL]\n"
- "ld1w { z11.s }, p0/Z, [x9, #3, MUL VL]\n"
+ "ld1w { z8.s }, p3/Z, [x28]\n"
+ "ld1w { z9.s }, p2/Z, [x28, #1, MUL VL]\n"
+ "ld1w { z10.s }, p1/Z, [x28, #2, MUL VL]\n"
+ "ld1w { z11.s }, p0/Z, [x28, #3, MUL VL]\n"
"b 4f\n"
"3:" // Height 1: no accumulate
"mov z8.s, #0x0\n"
@@ -111,87 +111,87 @@ void sve_hybrid_u8u32_dot_6x4VL_a64fx (
"mov z10.s, #0x0\n"
"mov z11.s, #0x0\n"
"4:" // Height 1: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"5:" // Height 1: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 6f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "cbnz x28, 7f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "cbnz x27, 7f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
"b 7f\n"
"6:" // Height 1: setup direct input
- "mov x26, %x[input_ptr]\n"
+ "mov x25, %x[input_ptr]\n"
"7:" // Height 1: input setup done
- "subs x27, x27, #0x4\n"
- "ld1rw { z0.s }, p4/Z, [x26]\n"
- "ld1b { z6.b }, p4/Z, [x10]\n"
- "ld1b { z7.b }, p4/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x4\n"
+ "ld1rw { z0.s }, p4/Z, [x25]\n"
+ "ld1b { z6.b }, p4/Z, [x9]\n"
+ "ld1b { z7.b }, p4/Z, [x9, #1, MUL VL]\n"
"ble 9f\n"
"8:" // Height 1: Multiply loop: Main loop
"udot z8.s, z6.b, z0.b\n"
"udot z9.s, z7.b, z0.b\n"
- "ld1b { z6.b }, p4/Z, [x10, #2, MUL VL]\n"
- "ld1b { z7.b }, p4/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
- "add x26, x26, #0x4\n"
+ "ld1b { z6.b }, p4/Z, [x9, #2, MUL VL]\n"
+ "ld1b { z7.b }, p4/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
+ "add x25, x25, #0x4\n"
"udot z10.s, z6.b, z0.b\n"
"udot z11.s, z7.b, z0.b\n"
- "subs x27, x27, #0x4\n"
- "ld1rw { z0.s }, p4/Z, [x26]\n"
- "ld1b { z6.b }, p4/Z, [x10]\n"
- "ld1b { z7.b }, p4/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x4\n"
+ "ld1rw { z0.s }, p4/Z, [x25]\n"
+ "ld1b { z6.b }, p4/Z, [x9]\n"
+ "ld1b { z7.b }, p4/Z, [x9, #1, MUL VL]\n"
"bgt 8b\n"
"9:" // Height 1: Multiply loop: Main loop skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
"udot z8.s, z6.b, z0.b\n"
"udot z9.s, z7.b, z0.b\n"
- "ld1b { z6.b }, p4/Z, [x10, #2, MUL VL]\n"
- "ld1b { z7.b }, p4/Z, [x10, #3, MUL VL]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ld1b { z6.b }, p4/Z, [x9, #2, MUL VL]\n"
+ "ld1b { z7.b }, p4/Z, [x9, #3, MUL VL]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"udot z10.s, z6.b, z0.b\n"
"udot z11.s, z7.b, z0.b\n"
- "addvl x10, x10, #4\n"
- "bne 5b\n"
- "st1w { z8.s }, p3, [x9]\n"
- "st1w { z9.s }, p2, [x9, #1, MUL VL]\n"
- "st1w { z10.s }, p1, [x9, #2, MUL VL]\n"
- "st1w { z11.s }, p0, [x9, #3, MUL VL]\n"
"addvl x9, x9, #4\n"
+ "bne 5b\n"
+ "st1w { z8.s }, p3, [x28]\n"
+ "st1w { z9.s }, p2, [x28, #1, MUL VL]\n"
+ "st1w { z10.s }, p1, [x28, #2, MUL VL]\n"
+ "st1w { z11.s }, p0, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
"10:" // Height 1: Writeback done
- "decw x11, ALL, MUL #4\n"
- "cmp x11, XZR\n"
+ "decw x10, ALL, MUL #4\n"
+ "cmp x10, XZR\n"
"bgt 2b\n"
"b 62f\n"
"11:" // Height 2
- "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x28, %x[output_ptr]\n"
"12:" // Height 2: Column loop
- "mov x20, #0x0\n"
- "whilelt p3.s, x20, x11\n"
- "incw x20\n"
- "whilelt p2.s, x20, x11\n"
- "incw x20\n"
- "whilelt p1.s, x20, x11\n"
- "incw x20\n"
- "whilelt p0.s, x20, x11\n"
+ "mov x19, #0x0\n"
+ "whilelt p3.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p0.s, x19, x10\n"
"tbz %x[flags], #0, 13f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "ld1w { z8.s }, p3/Z, [x9]\n"
- "ld1w { z9.s }, p2/Z, [x9, #1, MUL VL]\n"
- "ld1w { z10.s }, p1/Z, [x9, #2, MUL VL]\n"
- "ld1w { z11.s }, p0/Z, [x9, #3, MUL VL]\n"
- "ld1w { z12.s }, p3/Z, [x24]\n"
- "ld1w { z13.s }, p2/Z, [x24, #1, MUL VL]\n"
- "ld1w { z14.s }, p1/Z, [x24, #2, MUL VL]\n"
- "ld1w { z15.s }, p0/Z, [x24, #3, MUL VL]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x23, x28, x19, LSL #2\n"
+ "ld1w { z8.s }, p3/Z, [x28]\n"
+ "ld1w { z9.s }, p2/Z, [x28, #1, MUL VL]\n"
+ "ld1w { z10.s }, p1/Z, [x28, #2, MUL VL]\n"
+ "ld1w { z11.s }, p0/Z, [x28, #3, MUL VL]\n"
+ "ld1w { z12.s }, p3/Z, [x23]\n"
+ "ld1w { z13.s }, p2/Z, [x23, #1, MUL VL]\n"
+ "ld1w { z14.s }, p1/Z, [x23, #2, MUL VL]\n"
+ "ld1w { z15.s }, p0/Z, [x23, #3, MUL VL]\n"
"b 14f\n"
"13:" // Height 2: no accumulate
"mov z8.s, #0x0\n"
@@ -203,112 +203,112 @@ void sve_hybrid_u8u32_dot_6x4VL_a64fx (
"mov z14.s, #0x0\n"
"mov z15.s, #0x0\n"
"14:" // Height 2: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"15:" // Height 2: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 16f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "cbnz x28, 17f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20\n"
- "add x25, x25, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "cbnz x27, 17f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
+ "add x24, x24, x19\n"
"b 17f\n"
"16:" // Height 2: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19\n"
"17:" // Height 2: input setup done
- "subs x27, x27, #0x4\n"
- "ld1rw { z0.s }, p4/Z, [x26]\n"
- "ld1rw { z1.s }, p4/Z, [x25]\n"
- "ld1b { z6.b }, p4/Z, [x10]\n"
- "ld1b { z7.b }, p4/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x4\n"
+ "ld1rw { z0.s }, p4/Z, [x25]\n"
+ "ld1rw { z1.s }, p4/Z, [x24]\n"
+ "ld1b { z6.b }, p4/Z, [x9]\n"
+ "ld1b { z7.b }, p4/Z, [x9, #1, MUL VL]\n"
"ble 19f\n"
"18:" // Height 2: Multiply loop: Main loop
"udot z8.s, z6.b, z0.b\n"
"udot z12.s, z6.b, z1.b\n"
- "ld1b { z6.b }, p4/Z, [x10, #2, MUL VL]\n"
- "add x26, x26, #0x4\n"
+ "ld1b { z6.b }, p4/Z, [x9, #2, MUL VL]\n"
+ "add x25, x25, #0x4\n"
"udot z9.s, z7.b, z0.b\n"
"udot z13.s, z7.b, z1.b\n"
- "ld1b { z7.b }, p4/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
- "subs x27, x27, #0x4\n"
- "add x25, x25, #0x4\n"
+ "ld1b { z7.b }, p4/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
+ "subs x26, x26, #0x4\n"
+ "add x24, x24, #0x4\n"
"udot z10.s, z6.b, z0.b\n"
"udot z14.s, z6.b, z1.b\n"
"udot z11.s, z7.b, z0.b\n"
"udot z15.s, z7.b, z1.b\n"
- "ld1rw { z0.s }, p4/Z, [x26]\n"
- "ld1rw { z1.s }, p4/Z, [x25]\n"
- "ld1b { z6.b }, p4/Z, [x10]\n"
- "ld1b { z7.b }, p4/Z, [x10, #1, MUL VL]\n"
+ "ld1rw { z0.s }, p4/Z, [x25]\n"
+ "ld1rw { z1.s }, p4/Z, [x24]\n"
+ "ld1b { z6.b }, p4/Z, [x9]\n"
+ "ld1b { z7.b }, p4/Z, [x9, #1, MUL VL]\n"
"bgt 18b\n"
"19:" // Height 2: Multiply loop: Main loop skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
"udot z8.s, z6.b, z0.b\n"
"udot z12.s, z6.b, z1.b\n"
- "ld1b { z6.b }, p4/Z, [x10, #2, MUL VL]\n"
+ "ld1b { z6.b }, p4/Z, [x9, #2, MUL VL]\n"
"udot z9.s, z7.b, z0.b\n"
"udot z13.s, z7.b, z1.b\n"
- "ld1b { z7.b }, p4/Z, [x10, #3, MUL VL]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ld1b { z7.b }, p4/Z, [x9, #3, MUL VL]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"udot z10.s, z6.b, z0.b\n"
"udot z14.s, z6.b, z1.b\n"
- "addvl x10, x10, #4\n"
+ "addvl x9, x9, #4\n"
"udot z11.s, z7.b, z0.b\n"
"udot z15.s, z7.b, z1.b\n"
"bne 15b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "st1w { z8.s }, p3, [x9]\n"
- "st1w { z9.s }, p2, [x9, #1, MUL VL]\n"
- "st1w { z10.s }, p1, [x9, #2, MUL VL]\n"
- "st1w { z11.s }, p0, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
- "st1w { z12.s }, p3, [x24]\n"
- "st1w { z13.s }, p2, [x24, #1, MUL VL]\n"
- "st1w { z14.s }, p1, [x24, #2, MUL VL]\n"
- "st1w { z15.s }, p0, [x24, #3, MUL VL]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x23, x28, x19, LSL #2\n"
+ "st1w { z8.s }, p3, [x28]\n"
+ "st1w { z9.s }, p2, [x28, #1, MUL VL]\n"
+ "st1w { z10.s }, p1, [x28, #2, MUL VL]\n"
+ "st1w { z11.s }, p0, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "st1w { z12.s }, p3, [x23]\n"
+ "st1w { z13.s }, p2, [x23, #1, MUL VL]\n"
+ "st1w { z14.s }, p1, [x23, #2, MUL VL]\n"
+ "st1w { z15.s }, p0, [x23, #3, MUL VL]\n"
"20:" // Height 2: Writeback done
- "decw x11, ALL, MUL #4\n"
- "cmp x11, XZR\n"
+ "decw x10, ALL, MUL #4\n"
+ "cmp x10, XZR\n"
"bgt 12b\n"
"b 62f\n"
"21:" // Height 3
- "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x28, %x[output_ptr]\n"
"22:" // Height 3: Column loop
- "mov x20, #0x0\n"
- "whilelt p3.s, x20, x11\n"
- "incw x20\n"
- "whilelt p2.s, x20, x11\n"
- "incw x20\n"
- "whilelt p1.s, x20, x11\n"
- "incw x20\n"
- "whilelt p0.s, x20, x11\n"
+ "mov x19, #0x0\n"
+ "whilelt p3.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p0.s, x19, x10\n"
"tbz %x[flags], #0, 23f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "ld1w { z8.s }, p3/Z, [x9]\n"
- "ld1w { z9.s }, p2/Z, [x9, #1, MUL VL]\n"
- "ld1w { z10.s }, p1/Z, [x9, #2, MUL VL]\n"
- "ld1w { z11.s }, p0/Z, [x9, #3, MUL VL]\n"
- "ld1w { z12.s }, p3/Z, [x24]\n"
- "ld1w { z13.s }, p2/Z, [x24, #1, MUL VL]\n"
- "ld1w { z14.s }, p1/Z, [x24, #2, MUL VL]\n"
- "ld1w { z15.s }, p0/Z, [x24, #3, MUL VL]\n"
- "ld1w { z16.s }, p3/Z, [x23]\n"
- "ld1w { z17.s }, p2/Z, [x23, #1, MUL VL]\n"
- "ld1w { z18.s }, p1/Z, [x23, #2, MUL VL]\n"
- "ld1w { z19.s }, p0/Z, [x23, #3, MUL VL]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x23, x28, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "ld1w { z8.s }, p3/Z, [x28]\n"
+ "ld1w { z9.s }, p2/Z, [x28, #1, MUL VL]\n"
+ "ld1w { z10.s }, p1/Z, [x28, #2, MUL VL]\n"
+ "ld1w { z11.s }, p0/Z, [x28, #3, MUL VL]\n"
+ "ld1w { z12.s }, p3/Z, [x23]\n"
+ "ld1w { z13.s }, p2/Z, [x23, #1, MUL VL]\n"
+ "ld1w { z14.s }, p1/Z, [x23, #2, MUL VL]\n"
+ "ld1w { z15.s }, p0/Z, [x23, #3, MUL VL]\n"
+ "ld1w { z16.s }, p3/Z, [x22]\n"
+ "ld1w { z17.s }, p2/Z, [x22, #1, MUL VL]\n"
+ "ld1w { z18.s }, p1/Z, [x22, #2, MUL VL]\n"
+ "ld1w { z19.s }, p0/Z, [x22, #3, MUL VL]\n"
"b 24f\n"
"23:" // Height 3: no accumulate
"mov z8.s, #0x0\n"
@@ -324,74 +324,74 @@ void sve_hybrid_u8u32_dot_6x4VL_a64fx (
"mov z18.s, #0x0\n"
"mov z19.s, #0x0\n"
"24:" // Height 3: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"25:" // Height 3: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 26f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "cbnz x28, 27f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20\n"
- "add x25, x25, x20\n"
- "add x24, x24, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "cbnz x27, 27f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
+ "add x24, x24, x19\n"
+ "add x23, x23, x19\n"
"b 27f\n"
"26:" // Height 3: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20\n"
- "add x24, x25, x20\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19\n"
+ "add x23, x24, x19\n"
"27:" // Height 3: input setup done
- "subs x27, x27, #0x4\n"
- "ld1rw { z0.s }, p4/Z, [x26]\n"
- "ld1rw { z1.s }, p4/Z, [x25]\n"
- "ld1rw { z2.s }, p4/Z, [x24]\n"
- "ld1b { z6.b }, p4/Z, [x10]\n"
- "ld1b { z7.b }, p4/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x4\n"
+ "ld1rw { z0.s }, p4/Z, [x25]\n"
+ "ld1rw { z1.s }, p4/Z, [x24]\n"
+ "ld1rw { z2.s }, p4/Z, [x23]\n"
+ "ld1b { z6.b }, p4/Z, [x9]\n"
+ "ld1b { z7.b }, p4/Z, [x9, #1, MUL VL]\n"
"ble 29f\n"
"28:" // Height 3: Multiply loop: Main loop
"udot z8.s, z6.b, z0.b\n"
"udot z12.s, z6.b, z1.b\n"
- "add x26, x26, #0x4\n"
- "subs x27, x27, #0x4\n"
+ "add x25, x25, #0x4\n"
+ "subs x26, x26, #0x4\n"
"udot z16.s, z6.b, z2.b\n"
"udot z9.s, z7.b, z0.b\n"
- "ld1b { z6.b }, p4/Z, [x10, #2, MUL VL]\n"
- "add x25, x25, #0x4\n"
+ "ld1b { z6.b }, p4/Z, [x9, #2, MUL VL]\n"
+ "add x24, x24, #0x4\n"
"udot z13.s, z7.b, z1.b\n"
"udot z17.s, z7.b, z2.b\n"
- "ld1b { z7.b }, p4/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
- "add x24, x24, #0x4\n"
+ "ld1b { z7.b }, p4/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
+ "add x23, x23, #0x4\n"
"udot z10.s, z6.b, z0.b\n"
"udot z14.s, z6.b, z1.b\n"
"udot z18.s, z6.b, z2.b\n"
"udot z11.s, z7.b, z0.b\n"
- "ld1rw { z0.s }, p4/Z, [x26]\n"
- "ld1b { z6.b }, p4/Z, [x10]\n"
+ "ld1rw { z0.s }, p4/Z, [x25]\n"
+ "ld1b { z6.b }, p4/Z, [x9]\n"
"udot z15.s, z7.b, z1.b\n"
"udot z19.s, z7.b, z2.b\n"
- "ld1rw { z1.s }, p4/Z, [x25]\n"
- "ld1rw { z2.s }, p4/Z, [x24]\n"
- "ld1b { z7.b }, p4/Z, [x10, #1, MUL VL]\n"
+ "ld1rw { z1.s }, p4/Z, [x24]\n"
+ "ld1rw { z2.s }, p4/Z, [x23]\n"
+ "ld1b { z7.b }, p4/Z, [x9, #1, MUL VL]\n"
"bgt 28b\n"
"29:" // Height 3: Multiply loop: Main loop skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
"udot z8.s, z6.b, z0.b\n"
"udot z12.s, z6.b, z1.b\n"
- "add x28, x28, #0x1\n"
+ "add x27, x27, #0x1\n"
"udot z16.s, z6.b, z2.b\n"
"udot z9.s, z7.b, z0.b\n"
- "ld1b { z6.b }, p4/Z, [x10, #2, MUL VL]\n"
- "cmp x28, x20\n"
+ "ld1b { z6.b }, p4/Z, [x9, #2, MUL VL]\n"
+ "cmp x27, x19\n"
"udot z13.s, z7.b, z1.b\n"
"udot z17.s, z7.b, z2.b\n"
- "ld1b { z7.b }, p4/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
+ "ld1b { z7.b }, p4/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"udot z10.s, z6.b, z0.b\n"
"udot z14.s, z6.b, z1.b\n"
"udot z18.s, z6.b, z2.b\n"
@@ -399,61 +399,61 @@ void sve_hybrid_u8u32_dot_6x4VL_a64fx (
"udot z15.s, z7.b, z1.b\n"
"udot z19.s, z7.b, z2.b\n"
"bne 25b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "st1w { z8.s }, p3, [x9]\n"
- "st1w { z9.s }, p2, [x9, #1, MUL VL]\n"
- "st1w { z10.s }, p1, [x9, #2, MUL VL]\n"
- "st1w { z11.s }, p0, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
- "st1w { z12.s }, p3, [x24]\n"
- "st1w { z13.s }, p2, [x24, #1, MUL VL]\n"
- "st1w { z14.s }, p1, [x24, #2, MUL VL]\n"
- "st1w { z15.s }, p0, [x24, #3, MUL VL]\n"
- "st1w { z16.s }, p3, [x23]\n"
- "st1w { z17.s }, p2, [x23, #1, MUL VL]\n"
- "st1w { z18.s }, p1, [x23, #2, MUL VL]\n"
- "st1w { z19.s }, p0, [x23, #3, MUL VL]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x23, x28, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "st1w { z8.s }, p3, [x28]\n"
+ "st1w { z9.s }, p2, [x28, #1, MUL VL]\n"
+ "st1w { z10.s }, p1, [x28, #2, MUL VL]\n"
+ "st1w { z11.s }, p0, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "st1w { z12.s }, p3, [x23]\n"
+ "st1w { z13.s }, p2, [x23, #1, MUL VL]\n"
+ "st1w { z14.s }, p1, [x23, #2, MUL VL]\n"
+ "st1w { z15.s }, p0, [x23, #3, MUL VL]\n"
+ "st1w { z16.s }, p3, [x22]\n"
+ "st1w { z17.s }, p2, [x22, #1, MUL VL]\n"
+ "st1w { z18.s }, p1, [x22, #2, MUL VL]\n"
+ "st1w { z19.s }, p0, [x22, #3, MUL VL]\n"
"30:" // Height 3: Writeback done
- "decw x11, ALL, MUL #4\n"
- "cmp x11, XZR\n"
+ "decw x10, ALL, MUL #4\n"
+ "cmp x10, XZR\n"
"bgt 22b\n"
"b 62f\n"
"31:" // Height 4
- "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x28, %x[output_ptr]\n"
"32:" // Height 4: Column loop
- "mov x20, #0x0\n"
- "whilelt p3.s, x20, x11\n"
- "incw x20\n"
- "whilelt p2.s, x20, x11\n"
- "incw x20\n"
- "whilelt p1.s, x20, x11\n"
- "incw x20\n"
- "whilelt p0.s, x20, x11\n"
+ "mov x19, #0x0\n"
+ "whilelt p3.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p0.s, x19, x10\n"
"tbz %x[flags], #0, 33f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "ld1w { z8.s }, p3/Z, [x9]\n"
- "add x22, x23, x20, LSL #2\n"
- "ld1w { z9.s }, p2/Z, [x9, #1, MUL VL]\n"
- "ld1w { z10.s }, p1/Z, [x9, #2, MUL VL]\n"
- "ld1w { z11.s }, p0/Z, [x9, #3, MUL VL]\n"
- "ld1w { z12.s }, p3/Z, [x24]\n"
- "ld1w { z13.s }, p2/Z, [x24, #1, MUL VL]\n"
- "ld1w { z14.s }, p1/Z, [x24, #2, MUL VL]\n"
- "ld1w { z15.s }, p0/Z, [x24, #3, MUL VL]\n"
- "ld1w { z16.s }, p3/Z, [x23]\n"
- "ld1w { z17.s }, p2/Z, [x23, #1, MUL VL]\n"
- "ld1w { z18.s }, p1/Z, [x23, #2, MUL VL]\n"
- "ld1w { z19.s }, p0/Z, [x23, #3, MUL VL]\n"
- "ld1w { z20.s }, p3/Z, [x22]\n"
- "ld1w { z21.s }, p2/Z, [x22, #1, MUL VL]\n"
- "ld1w { z22.s }, p1/Z, [x22, #2, MUL VL]\n"
- "ld1w { z23.s }, p0/Z, [x22, #3, MUL VL]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x23, x28, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "ld1w { z8.s }, p3/Z, [x28]\n"
+ "ld1w { z9.s }, p2/Z, [x28, #1, MUL VL]\n"
+ "add x21, x22, x19, LSL #2\n"
+ "ld1w { z10.s }, p1/Z, [x28, #2, MUL VL]\n"
+ "ld1w { z11.s }, p0/Z, [x28, #3, MUL VL]\n"
+ "ld1w { z12.s }, p3/Z, [x23]\n"
+ "ld1w { z13.s }, p2/Z, [x23, #1, MUL VL]\n"
+ "ld1w { z14.s }, p1/Z, [x23, #2, MUL VL]\n"
+ "ld1w { z15.s }, p0/Z, [x23, #3, MUL VL]\n"
+ "ld1w { z16.s }, p3/Z, [x22]\n"
+ "ld1w { z17.s }, p2/Z, [x22, #1, MUL VL]\n"
+ "ld1w { z18.s }, p1/Z, [x22, #2, MUL VL]\n"
+ "ld1w { z19.s }, p0/Z, [x22, #3, MUL VL]\n"
+ "ld1w { z20.s }, p3/Z, [x21]\n"
+ "ld1w { z21.s }, p2/Z, [x21, #1, MUL VL]\n"
+ "ld1w { z22.s }, p1/Z, [x21, #2, MUL VL]\n"
+ "ld1w { z23.s }, p0/Z, [x21, #3, MUL VL]\n"
"b 34f\n"
"33:" // Height 4: no accumulate
"mov z8.s, #0x0\n"
@@ -473,86 +473,86 @@ void sve_hybrid_u8u32_dot_6x4VL_a64fx (
"mov z22.s, #0x0\n"
"mov z23.s, #0x0\n"
"34:" // Height 4: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"35:" // Height 4: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 36f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "cbnz x28, 37f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20\n"
- "add x25, x25, x20\n"
- "add x24, x24, x20\n"
- "add x23, x23, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "cbnz x27, 37f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
+ "add x24, x24, x19\n"
+ "add x23, x23, x19\n"
+ "add x22, x22, x19\n"
"b 37f\n"
"36:" // Height 4: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20\n"
- "add x24, x25, x20\n"
- "add x23, x24, x20\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19\n"
+ "add x23, x24, x19\n"
+ "add x22, x23, x19\n"
"37:" // Height 4: input setup done
- "subs x27, x27, #0x4\n"
- "ld1rw { z0.s }, p4/Z, [x26]\n"
- "ld1rw { z1.s }, p4/Z, [x25]\n"
- "ld1rw { z2.s }, p4/Z, [x24]\n"
- "ld1rw { z3.s }, p4/Z, [x23]\n"
- "ld1b { z6.b }, p4/Z, [x10]\n"
- "ld1b { z7.b }, p4/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x4\n"
+ "ld1rw { z0.s }, p4/Z, [x25]\n"
+ "ld1rw { z1.s }, p4/Z, [x24]\n"
+ "ld1rw { z2.s }, p4/Z, [x23]\n"
+ "ld1rw { z3.s }, p4/Z, [x22]\n"
+ "ld1b { z6.b }, p4/Z, [x9]\n"
+ "ld1b { z7.b }, p4/Z, [x9, #1, MUL VL]\n"
"ble 39f\n"
"38:" // Height 4: Multiply loop: Main loop
"udot z8.s, z6.b, z0.b\n"
"udot z12.s, z6.b, z1.b\n"
- "add x26, x26, #0x4\n"
- "subs x27, x27, #0x4\n"
+ "add x25, x25, #0x4\n"
+ "subs x26, x26, #0x4\n"
"udot z16.s, z6.b, z2.b\n"
"udot z20.s, z6.b, z3.b\n"
- "ld1b { z6.b }, p4/Z, [x10, #2, MUL VL]\n"
- "add x25, x25, #0x4\n"
+ "ld1b { z6.b }, p4/Z, [x9, #2, MUL VL]\n"
+ "add x24, x24, #0x4\n"
"udot z9.s, z7.b, z0.b\n"
"udot z13.s, z7.b, z1.b\n"
- "add x24, x24, #0x4\n"
"add x23, x23, #0x4\n"
+ "add x22, x22, #0x4\n"
"udot z17.s, z7.b, z2.b\n"
"udot z21.s, z7.b, z3.b\n"
- "ld1b { z7.b }, p4/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
+ "ld1b { z7.b }, p4/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"udot z10.s, z6.b, z0.b\n"
"udot z14.s, z6.b, z1.b\n"
"udot z18.s, z6.b, z2.b\n"
"udot z22.s, z6.b, z3.b\n"
- "ld1b { z6.b }, p4/Z, [x10]\n"
+ "ld1b { z6.b }, p4/Z, [x9]\n"
"udot z11.s, z7.b, z0.b\n"
"udot z15.s, z7.b, z1.b\n"
- "ld1rw { z0.s }, p4/Z, [x26]\n"
- "ld1rw { z1.s }, p4/Z, [x25]\n"
+ "ld1rw { z0.s }, p4/Z, [x25]\n"
+ "ld1rw { z1.s }, p4/Z, [x24]\n"
"udot z19.s, z7.b, z2.b\n"
"udot z23.s, z7.b, z3.b\n"
- "ld1rw { z2.s }, p4/Z, [x24]\n"
- "ld1rw { z3.s }, p4/Z, [x23]\n"
- "ld1b { z7.b }, p4/Z, [x10, #1, MUL VL]\n"
+ "ld1rw { z2.s }, p4/Z, [x23]\n"
+ "ld1rw { z3.s }, p4/Z, [x22]\n"
+ "ld1b { z7.b }, p4/Z, [x9, #1, MUL VL]\n"
"bgt 38b\n"
"39:" // Height 4: Multiply loop: Main loop skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
"udot z8.s, z6.b, z0.b\n"
"udot z12.s, z6.b, z1.b\n"
- "add x28, x28, #0x1\n"
+ "add x27, x27, #0x1\n"
"udot z16.s, z6.b, z2.b\n"
"udot z20.s, z6.b, z3.b\n"
- "ld1b { z6.b }, p4/Z, [x10, #2, MUL VL]\n"
- "cmp x28, x20\n"
+ "ld1b { z6.b }, p4/Z, [x9, #2, MUL VL]\n"
+ "cmp x27, x19\n"
"udot z9.s, z7.b, z0.b\n"
"udot z13.s, z7.b, z1.b\n"
"udot z17.s, z7.b, z2.b\n"
"udot z21.s, z7.b, z3.b\n"
- "ld1b { z7.b }, p4/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
+ "ld1b { z7.b }, p4/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"udot z10.s, z6.b, z0.b\n"
"udot z14.s, z6.b, z1.b\n"
"udot z18.s, z6.b, z2.b\n"
@@ -562,71 +562,71 @@ void sve_hybrid_u8u32_dot_6x4VL_a64fx (
"udot z19.s, z7.b, z2.b\n"
"udot z23.s, z7.b, z3.b\n"
"bne 35b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "st1w { z8.s }, p3, [x9]\n"
- "add x22, x23, x20, LSL #2\n"
- "st1w { z9.s }, p2, [x9, #1, MUL VL]\n"
- "st1w { z10.s }, p1, [x9, #2, MUL VL]\n"
- "st1w { z11.s }, p0, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
- "st1w { z12.s }, p3, [x24]\n"
- "st1w { z13.s }, p2, [x24, #1, MUL VL]\n"
- "st1w { z14.s }, p1, [x24, #2, MUL VL]\n"
- "st1w { z15.s }, p0, [x24, #3, MUL VL]\n"
- "st1w { z16.s }, p3, [x23]\n"
- "st1w { z17.s }, p2, [x23, #1, MUL VL]\n"
- "st1w { z18.s }, p1, [x23, #2, MUL VL]\n"
- "st1w { z19.s }, p0, [x23, #3, MUL VL]\n"
- "st1w { z20.s }, p3, [x22]\n"
- "st1w { z21.s }, p2, [x22, #1, MUL VL]\n"
- "st1w { z22.s }, p1, [x22, #2, MUL VL]\n"
- "st1w { z23.s }, p0, [x22, #3, MUL VL]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x23, x28, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "st1w { z8.s }, p3, [x28]\n"
+ "add x21, x22, x19, LSL #2\n"
+ "st1w { z9.s }, p2, [x28, #1, MUL VL]\n"
+ "st1w { z10.s }, p1, [x28, #2, MUL VL]\n"
+ "st1w { z11.s }, p0, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "st1w { z12.s }, p3, [x23]\n"
+ "st1w { z13.s }, p2, [x23, #1, MUL VL]\n"
+ "st1w { z14.s }, p1, [x23, #2, MUL VL]\n"
+ "st1w { z15.s }, p0, [x23, #3, MUL VL]\n"
+ "st1w { z16.s }, p3, [x22]\n"
+ "st1w { z17.s }, p2, [x22, #1, MUL VL]\n"
+ "st1w { z18.s }, p1, [x22, #2, MUL VL]\n"
+ "st1w { z19.s }, p0, [x22, #3, MUL VL]\n"
+ "st1w { z20.s }, p3, [x21]\n"
+ "st1w { z21.s }, p2, [x21, #1, MUL VL]\n"
+ "st1w { z22.s }, p1, [x21, #2, MUL VL]\n"
+ "st1w { z23.s }, p0, [x21, #3, MUL VL]\n"
"40:" // Height 4: Writeback done
- "decw x11, ALL, MUL #4\n"
- "cmp x11, XZR\n"
+ "decw x10, ALL, MUL #4\n"
+ "cmp x10, XZR\n"
"bgt 32b\n"
"b 62f\n"
"41:" // Height 5
- "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x28, %x[output_ptr]\n"
"42:" // Height 5: Column loop
- "mov x20, #0x0\n"
- "whilelt p3.s, x20, x11\n"
- "incw x20\n"
- "whilelt p2.s, x20, x11\n"
- "incw x20\n"
- "whilelt p1.s, x20, x11\n"
- "incw x20\n"
- "whilelt p0.s, x20, x11\n"
+ "mov x19, #0x0\n"
+ "whilelt p3.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p0.s, x19, x10\n"
"tbz %x[flags], #0, 43f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "ld1w { z8.s }, p3/Z, [x9]\n"
- "add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
- "ld1w { z9.s }, p2/Z, [x9, #1, MUL VL]\n"
- "ld1w { z10.s }, p1/Z, [x9, #2, MUL VL]\n"
- "ld1w { z11.s }, p0/Z, [x9, #3, MUL VL]\n"
- "ld1w { z12.s }, p3/Z, [x24]\n"
- "ld1w { z13.s }, p2/Z, [x24, #1, MUL VL]\n"
- "ld1w { z14.s }, p1/Z, [x24, #2, MUL VL]\n"
- "ld1w { z15.s }, p0/Z, [x24, #3, MUL VL]\n"
- "ld1w { z16.s }, p3/Z, [x23]\n"
- "ld1w { z17.s }, p2/Z, [x23, #1, MUL VL]\n"
- "ld1w { z18.s }, p1/Z, [x23, #2, MUL VL]\n"
- "ld1w { z19.s }, p0/Z, [x23, #3, MUL VL]\n"
- "ld1w { z20.s }, p3/Z, [x22]\n"
- "ld1w { z21.s }, p2/Z, [x22, #1, MUL VL]\n"
- "ld1w { z22.s }, p1/Z, [x22, #2, MUL VL]\n"
- "ld1w { z23.s }, p0/Z, [x22, #3, MUL VL]\n"
- "ld1w { z24.s }, p3/Z, [x21]\n"
- "ld1w { z25.s }, p2/Z, [x21, #1, MUL VL]\n"
- "ld1w { z26.s }, p1/Z, [x21, #2, MUL VL]\n"
- "ld1w { z27.s }, p0/Z, [x21, #3, MUL VL]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x23, x28, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "ld1w { z8.s }, p3/Z, [x28]\n"
+ "ld1w { z9.s }, p2/Z, [x28, #1, MUL VL]\n"
+ "add x21, x22, x19, LSL #2\n"
+ "add x20, x21, x19, LSL #2\n"
+ "ld1w { z10.s }, p1/Z, [x28, #2, MUL VL]\n"
+ "ld1w { z11.s }, p0/Z, [x28, #3, MUL VL]\n"
+ "ld1w { z12.s }, p3/Z, [x23]\n"
+ "ld1w { z13.s }, p2/Z, [x23, #1, MUL VL]\n"
+ "ld1w { z14.s }, p1/Z, [x23, #2, MUL VL]\n"
+ "ld1w { z15.s }, p0/Z, [x23, #3, MUL VL]\n"
+ "ld1w { z16.s }, p3/Z, [x22]\n"
+ "ld1w { z17.s }, p2/Z, [x22, #1, MUL VL]\n"
+ "ld1w { z18.s }, p1/Z, [x22, #2, MUL VL]\n"
+ "ld1w { z19.s }, p0/Z, [x22, #3, MUL VL]\n"
+ "ld1w { z20.s }, p3/Z, [x21]\n"
+ "ld1w { z21.s }, p2/Z, [x21, #1, MUL VL]\n"
+ "ld1w { z22.s }, p1/Z, [x21, #2, MUL VL]\n"
+ "ld1w { z23.s }, p0/Z, [x21, #3, MUL VL]\n"
+ "ld1w { z24.s }, p3/Z, [x20]\n"
+ "ld1w { z25.s }, p2/Z, [x20, #1, MUL VL]\n"
+ "ld1w { z26.s }, p1/Z, [x20, #2, MUL VL]\n"
+ "ld1w { z27.s }, p0/Z, [x20, #3, MUL VL]\n"
"b 44f\n"
"43:" // Height 5: no accumulate
"mov z8.s, #0x0\n"
@@ -650,98 +650,98 @@ void sve_hybrid_u8u32_dot_6x4VL_a64fx (
"mov z26.s, #0x0\n"
"mov z27.s, #0x0\n"
"44:" // Height 5: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"45:" // Height 5: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 46f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "ldr x22, [x21, #0x20]\n"
- "cbnz x28, 47f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20\n"
- "add x25, x25, x20\n"
- "add x24, x24, x20\n"
- "add x23, x23, x20\n"
- "add x22, x22, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "ldr x21, [x20, #0x20]\n"
+ "cbnz x27, 47f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
+ "add x24, x24, x19\n"
+ "add x23, x23, x19\n"
+ "add x22, x22, x19\n"
+ "add x21, x21, x19\n"
"b 47f\n"
"46:" // Height 5: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20\n"
- "add x24, x25, x20\n"
- "add x23, x24, x20\n"
- "add x22, x23, x20\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19\n"
+ "add x23, x24, x19\n"
+ "add x22, x23, x19\n"
+ "add x21, x22, x19\n"
"47:" // Height 5: input setup done
- "subs x27, x27, #0x4\n"
- "ld1rw { z0.s }, p4/Z, [x26]\n"
- "ld1rw { z1.s }, p4/Z, [x25]\n"
- "ld1rw { z2.s }, p4/Z, [x24]\n"
- "ld1rw { z3.s }, p4/Z, [x23]\n"
- "ld1rw { z4.s }, p4/Z, [x22]\n"
- "ld1b { z6.b }, p4/Z, [x10]\n"
- "ld1b { z7.b }, p4/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x4\n"
+ "ld1rw { z0.s }, p4/Z, [x25]\n"
+ "ld1rw { z1.s }, p4/Z, [x24]\n"
+ "ld1rw { z2.s }, p4/Z, [x23]\n"
+ "ld1rw { z3.s }, p4/Z, [x22]\n"
+ "ld1rw { z4.s }, p4/Z, [x21]\n"
+ "ld1b { z6.b }, p4/Z, [x9]\n"
+ "ld1b { z7.b }, p4/Z, [x9, #1, MUL VL]\n"
"ble 49f\n"
"48:" // Height 5: Multiply loop: Main loop
"udot z8.s, z6.b, z0.b\n"
"udot z12.s, z6.b, z1.b\n"
- "add x26, x26, #0x4\n"
- "subs x27, x27, #0x4\n"
+ "add x25, x25, #0x4\n"
+ "subs x26, x26, #0x4\n"
"udot z16.s, z6.b, z2.b\n"
"udot z20.s, z6.b, z3.b\n"
- "add x25, x25, #0x4\n"
"add x24, x24, #0x4\n"
+ "add x23, x23, #0x4\n"
"udot z24.s, z6.b, z4.b\n"
"udot z9.s, z7.b, z0.b\n"
- "ld1b { z6.b }, p4/Z, [x10, #2, MUL VL]\n"
- "add x23, x23, #0x4\n"
+ "ld1b { z6.b }, p4/Z, [x9, #2, MUL VL]\n"
+ "add x22, x22, #0x4\n"
"udot z13.s, z7.b, z1.b\n"
"udot z17.s, z7.b, z2.b\n"
- "add x22, x22, #0x4\n"
+ "add x21, x21, #0x4\n"
"udot z21.s, z7.b, z3.b\n"
"udot z25.s, z7.b, z4.b\n"
- "ld1b { z7.b }, p4/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
+ "ld1b { z7.b }, p4/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"udot z10.s, z6.b, z0.b\n"
"udot z14.s, z6.b, z1.b\n"
"udot z18.s, z6.b, z2.b\n"
"udot z22.s, z6.b, z3.b\n"
"udot z26.s, z6.b, z4.b\n"
"udot z11.s, z7.b, z0.b\n"
- "ld1rw { z0.s }, p4/Z, [x26]\n"
- "ld1b { z6.b }, p4/Z, [x10]\n"
+ "ld1rw { z0.s }, p4/Z, [x25]\n"
+ "ld1b { z6.b }, p4/Z, [x9]\n"
"udot z15.s, z7.b, z1.b\n"
"udot z19.s, z7.b, z2.b\n"
- "ld1rw { z1.s }, p4/Z, [x25]\n"
- "ld1rw { z2.s }, p4/Z, [x24]\n"
+ "ld1rw { z1.s }, p4/Z, [x24]\n"
+ "ld1rw { z2.s }, p4/Z, [x23]\n"
"udot z23.s, z7.b, z3.b\n"
"udot z27.s, z7.b, z4.b\n"
- "ld1rw { z3.s }, p4/Z, [x23]\n"
- "ld1rw { z4.s }, p4/Z, [x22]\n"
- "ld1b { z7.b }, p4/Z, [x10, #1, MUL VL]\n"
+ "ld1rw { z3.s }, p4/Z, [x22]\n"
+ "ld1rw { z4.s }, p4/Z, [x21]\n"
+ "ld1b { z7.b }, p4/Z, [x9, #1, MUL VL]\n"
"bgt 48b\n"
"49:" // Height 5: Multiply loop: Main loop skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
"udot z8.s, z6.b, z0.b\n"
"udot z12.s, z6.b, z1.b\n"
- "add x28, x28, #0x1\n"
+ "add x27, x27, #0x1\n"
"udot z16.s, z6.b, z2.b\n"
"udot z20.s, z6.b, z3.b\n"
- "cmp x28, x20\n"
+ "cmp x27, x19\n"
"udot z24.s, z6.b, z4.b\n"
"udot z9.s, z7.b, z0.b\n"
- "ld1b { z6.b }, p4/Z, [x10, #2, MUL VL]\n"
+ "ld1b { z6.b }, p4/Z, [x9, #2, MUL VL]\n"
"udot z13.s, z7.b, z1.b\n"
"udot z17.s, z7.b, z2.b\n"
"udot z21.s, z7.b, z3.b\n"
"udot z25.s, z7.b, z4.b\n"
- "ld1b { z7.b }, p4/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
+ "ld1b { z7.b }, p4/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"udot z10.s, z6.b, z0.b\n"
"udot z14.s, z6.b, z1.b\n"
"udot z18.s, z6.b, z2.b\n"
@@ -753,84 +753,84 @@ void sve_hybrid_u8u32_dot_6x4VL_a64fx (
"udot z23.s, z7.b, z3.b\n"
"udot z27.s, z7.b, z4.b\n"
"bne 45b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "st1w { z8.s }, p3, [x9]\n"
- "add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
- "st1w { z9.s }, p2, [x9, #1, MUL VL]\n"
- "st1w { z10.s }, p1, [x9, #2, MUL VL]\n"
- "st1w { z11.s }, p0, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
- "st1w { z12.s }, p3, [x24]\n"
- "st1w { z13.s }, p2, [x24, #1, MUL VL]\n"
- "st1w { z14.s }, p1, [x24, #2, MUL VL]\n"
- "st1w { z15.s }, p0, [x24, #3, MUL VL]\n"
- "st1w { z16.s }, p3, [x23]\n"
- "st1w { z17.s }, p2, [x23, #1, MUL VL]\n"
- "st1w { z18.s }, p1, [x23, #2, MUL VL]\n"
- "st1w { z19.s }, p0, [x23, #3, MUL VL]\n"
- "st1w { z20.s }, p3, [x22]\n"
- "st1w { z21.s }, p2, [x22, #1, MUL VL]\n"
- "st1w { z22.s }, p1, [x22, #2, MUL VL]\n"
- "st1w { z23.s }, p0, [x22, #3, MUL VL]\n"
- "st1w { z24.s }, p3, [x21]\n"
- "st1w { z25.s }, p2, [x21, #1, MUL VL]\n"
- "st1w { z26.s }, p1, [x21, #2, MUL VL]\n"
- "st1w { z27.s }, p0, [x21, #3, MUL VL]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x23, x28, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "st1w { z8.s }, p3, [x28]\n"
+ "add x21, x22, x19, LSL #2\n"
+ "add x20, x21, x19, LSL #2\n"
+ "st1w { z9.s }, p2, [x28, #1, MUL VL]\n"
+ "st1w { z10.s }, p1, [x28, #2, MUL VL]\n"
+ "st1w { z11.s }, p0, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "st1w { z12.s }, p3, [x23]\n"
+ "st1w { z13.s }, p2, [x23, #1, MUL VL]\n"
+ "st1w { z14.s }, p1, [x23, #2, MUL VL]\n"
+ "st1w { z15.s }, p0, [x23, #3, MUL VL]\n"
+ "st1w { z16.s }, p3, [x22]\n"
+ "st1w { z17.s }, p2, [x22, #1, MUL VL]\n"
+ "st1w { z18.s }, p1, [x22, #2, MUL VL]\n"
+ "st1w { z19.s }, p0, [x22, #3, MUL VL]\n"
+ "st1w { z20.s }, p3, [x21]\n"
+ "st1w { z21.s }, p2, [x21, #1, MUL VL]\n"
+ "st1w { z22.s }, p1, [x21, #2, MUL VL]\n"
+ "st1w { z23.s }, p0, [x21, #3, MUL VL]\n"
+ "st1w { z24.s }, p3, [x20]\n"
+ "st1w { z25.s }, p2, [x20, #1, MUL VL]\n"
+ "st1w { z26.s }, p1, [x20, #2, MUL VL]\n"
+ "st1w { z27.s }, p0, [x20, #3, MUL VL]\n"
"50:" // Height 5: Writeback done
- "decw x11, ALL, MUL #4\n"
- "cmp x11, XZR\n"
+ "decw x10, ALL, MUL #4\n"
+ "cmp x10, XZR\n"
"bgt 42b\n"
"b 62f\n"
"51:" // Height 6
- "ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "mov x20, #0x18\n"
- "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
- "mov x9, %x[output_ptr]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "mov x19, #0x18\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x28, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "madd %x[output_ptr], x20, x19, %x[output_ptr]\n"
"52:" // Height 6: Column loop
- "mov x20, #0x0\n"
- "whilelt p3.s, x20, x11\n"
- "incw x20\n"
- "whilelt p2.s, x20, x11\n"
- "incw x20\n"
- "whilelt p1.s, x20, x11\n"
- "incw x20\n"
- "whilelt p0.s, x20, x11\n"
+ "mov x19, #0x0\n"
+ "whilelt p3.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p0.s, x19, x10\n"
"tbz %x[flags], #0, 53f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "ld1w { z8.s }, p3/Z, [x9]\n"
- "add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
- "ld1w { z9.s }, p2/Z, [x9, #1, MUL VL]\n"
- "ld1w { z10.s }, p1/Z, [x9, #2, MUL VL]\n"
- "add x20, x21, x20, LSL #2\n"
- "ld1w { z11.s }, p0/Z, [x9, #3, MUL VL]\n"
- "ld1w { z12.s }, p3/Z, [x24]\n"
- "ld1w { z13.s }, p2/Z, [x24, #1, MUL VL]\n"
- "ld1w { z14.s }, p1/Z, [x24, #2, MUL VL]\n"
- "ld1w { z15.s }, p0/Z, [x24, #3, MUL VL]\n"
- "ld1w { z16.s }, p3/Z, [x23]\n"
- "ld1w { z17.s }, p2/Z, [x23, #1, MUL VL]\n"
- "ld1w { z18.s }, p1/Z, [x23, #2, MUL VL]\n"
- "ld1w { z19.s }, p0/Z, [x23, #3, MUL VL]\n"
- "ld1w { z20.s }, p3/Z, [x22]\n"
- "ld1w { z21.s }, p2/Z, [x22, #1, MUL VL]\n"
- "ld1w { z22.s }, p1/Z, [x22, #2, MUL VL]\n"
- "ld1w { z23.s }, p0/Z, [x22, #3, MUL VL]\n"
- "ld1w { z24.s }, p3/Z, [x21]\n"
- "ld1w { z25.s }, p2/Z, [x21, #1, MUL VL]\n"
- "ld1w { z26.s }, p1/Z, [x21, #2, MUL VL]\n"
- "ld1w { z27.s }, p0/Z, [x21, #3, MUL VL]\n"
- "ld1w { z28.s }, p3/Z, [x20]\n"
- "ld1w { z29.s }, p2/Z, [x20, #1, MUL VL]\n"
- "ld1w { z30.s }, p1/Z, [x20, #2, MUL VL]\n"
- "ld1w { z31.s }, p0/Z, [x20, #3, MUL VL]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x23, x28, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "ld1w { z8.s }, p3/Z, [x28]\n"
+ "ld1w { z9.s }, p2/Z, [x28, #1, MUL VL]\n"
+ "add x21, x22, x19, LSL #2\n"
+ "add x20, x21, x19, LSL #2\n"
+ "ld1w { z10.s }, p1/Z, [x28, #2, MUL VL]\n"
+ "ld1w { z11.s }, p0/Z, [x28, #3, MUL VL]\n"
+ "add x19, x20, x19, LSL #2\n"
+ "ld1w { z12.s }, p3/Z, [x23]\n"
+ "ld1w { z13.s }, p2/Z, [x23, #1, MUL VL]\n"
+ "ld1w { z14.s }, p1/Z, [x23, #2, MUL VL]\n"
+ "ld1w { z15.s }, p0/Z, [x23, #3, MUL VL]\n"
+ "ld1w { z16.s }, p3/Z, [x22]\n"
+ "ld1w { z17.s }, p2/Z, [x22, #1, MUL VL]\n"
+ "ld1w { z18.s }, p1/Z, [x22, #2, MUL VL]\n"
+ "ld1w { z19.s }, p0/Z, [x22, #3, MUL VL]\n"
+ "ld1w { z20.s }, p3/Z, [x21]\n"
+ "ld1w { z21.s }, p2/Z, [x21, #1, MUL VL]\n"
+ "ld1w { z22.s }, p1/Z, [x21, #2, MUL VL]\n"
+ "ld1w { z23.s }, p0/Z, [x21, #3, MUL VL]\n"
+ "ld1w { z24.s }, p3/Z, [x20]\n"
+ "ld1w { z25.s }, p2/Z, [x20, #1, MUL VL]\n"
+ "ld1w { z26.s }, p1/Z, [x20, #2, MUL VL]\n"
+ "ld1w { z27.s }, p0/Z, [x20, #3, MUL VL]\n"
+ "ld1w { z28.s }, p3/Z, [x19]\n"
+ "ld1w { z29.s }, p2/Z, [x19, #1, MUL VL]\n"
+ "ld1w { z30.s }, p1/Z, [x19, #2, MUL VL]\n"
+ "ld1w { z31.s }, p0/Z, [x19, #3, MUL VL]\n"
"b 54f\n"
"53:" // Height 6: no accumulate
"mov z8.s, #0x0\n"
@@ -858,110 +858,110 @@ void sve_hybrid_u8u32_dot_6x4VL_a64fx (
"mov z30.s, #0x0\n"
"mov z31.s, #0x0\n"
"54:" // Height 6: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"55:" // Height 6: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 56f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "ldr x22, [x21, #0x20]\n"
- "ldr x21, [x21, #0x28]\n"
- "cbnz x28, 57f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20\n"
- "add x25, x25, x20\n"
- "add x24, x24, x20\n"
- "add x23, x23, x20\n"
- "add x22, x22, x20\n"
- "add x21, x21, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "ldr x21, [x20, #0x20]\n"
+ "ldr x20, [x20, #0x28]\n"
+ "cbnz x27, 57f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
+ "add x24, x24, x19\n"
+ "add x23, x23, x19\n"
+ "add x22, x22, x19\n"
+ "add x21, x21, x19\n"
+ "add x20, x20, x19\n"
"b 57f\n"
"56:" // Height 6: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20\n"
- "add x24, x25, x20\n"
- "add x23, x24, x20\n"
- "add x22, x23, x20\n"
- "add x21, x22, x20\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19\n"
+ "add x23, x24, x19\n"
+ "add x22, x23, x19\n"
+ "add x21, x22, x19\n"
+ "add x20, x21, x19\n"
"57:" // Height 6: input setup done
- "subs x27, x27, #0x4\n"
- "ld1rw { z0.s }, p4/Z, [x26]\n"
- "ld1rw { z1.s }, p4/Z, [x25]\n"
- "ld1rw { z2.s }, p4/Z, [x24]\n"
- "ld1rw { z3.s }, p4/Z, [x23]\n"
- "ld1rw { z4.s }, p4/Z, [x22]\n"
- "ld1rw { z5.s }, p4/Z, [x21]\n"
- "ld1b { z6.b }, p4/Z, [x10]\n"
- "ld1b { z7.b }, p4/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x4\n"
+ "ld1rw { z0.s }, p4/Z, [x25]\n"
+ "ld1rw { z1.s }, p4/Z, [x24]\n"
+ "ld1rw { z2.s }, p4/Z, [x23]\n"
+ "ld1rw { z3.s }, p4/Z, [x22]\n"
+ "ld1rw { z4.s }, p4/Z, [x21]\n"
+ "ld1rw { z5.s }, p4/Z, [x20]\n"
+ "ld1b { z6.b }, p4/Z, [x9]\n"
+ "ld1b { z7.b }, p4/Z, [x9, #1, MUL VL]\n"
"ble 59f\n"
"58:" // Height 6: Multiply loop: Main loop
"udot z8.s, z6.b, z0.b\n"
"udot z12.s, z6.b, z1.b\n"
- "add x26, x26, #0x4\n"
- "subs x27, x27, #0x4\n"
+ "add x25, x25, #0x4\n"
+ "subs x26, x26, #0x4\n"
"udot z16.s, z6.b, z2.b\n"
"udot z20.s, z6.b, z3.b\n"
- "add x25, x25, #0x4\n"
"add x24, x24, #0x4\n"
+ "add x23, x23, #0x4\n"
"udot z24.s, z6.b, z4.b\n"
"udot z28.s, z6.b, z5.b\n"
- "ld1b { z6.b }, p4/Z, [x10, #2, MUL VL]\n"
- "add x23, x23, #0x4\n"
+ "ld1b { z6.b }, p4/Z, [x9, #2, MUL VL]\n"
+ "add x22, x22, #0x4\n"
"udot z9.s, z7.b, z0.b\n"
"udot z13.s, z7.b, z1.b\n"
- "add x22, x22, #0x4\n"
"add x21, x21, #0x4\n"
+ "add x20, x20, #0x4\n"
"udot z17.s, z7.b, z2.b\n"
"udot z21.s, z7.b, z3.b\n"
"udot z25.s, z7.b, z4.b\n"
"udot z29.s, z7.b, z5.b\n"
- "ld1b { z7.b }, p4/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
+ "ld1b { z7.b }, p4/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"udot z10.s, z6.b, z0.b\n"
"udot z14.s, z6.b, z1.b\n"
"udot z18.s, z6.b, z2.b\n"
"udot z22.s, z6.b, z3.b\n"
"udot z26.s, z6.b, z4.b\n"
"udot z30.s, z6.b, z5.b\n"
- "ld1b { z6.b }, p4/Z, [x10]\n"
+ "ld1b { z6.b }, p4/Z, [x9]\n"
"udot z11.s, z7.b, z0.b\n"
"udot z15.s, z7.b, z1.b\n"
- "ld1rw { z0.s }, p4/Z, [x26]\n"
- "ld1rw { z1.s }, p4/Z, [x25]\n"
+ "ld1rw { z0.s }, p4/Z, [x25]\n"
+ "ld1rw { z1.s }, p4/Z, [x24]\n"
"udot z19.s, z7.b, z2.b\n"
"udot z23.s, z7.b, z3.b\n"
- "ld1rw { z2.s }, p4/Z, [x24]\n"
- "ld1rw { z3.s }, p4/Z, [x23]\n"
+ "ld1rw { z2.s }, p4/Z, [x23]\n"
+ "ld1rw { z3.s }, p4/Z, [x22]\n"
"udot z27.s, z7.b, z4.b\n"
"udot z31.s, z7.b, z5.b\n"
- "ld1rw { z4.s }, p4/Z, [x22]\n"
- "ld1rw { z5.s }, p4/Z, [x21]\n"
- "ld1b { z7.b }, p4/Z, [x10, #1, MUL VL]\n"
+ "ld1rw { z4.s }, p4/Z, [x21]\n"
+ "ld1rw { z5.s }, p4/Z, [x20]\n"
+ "ld1b { z7.b }, p4/Z, [x9, #1, MUL VL]\n"
"bgt 58b\n"
"59:" // Height 6: Multiply loop: Main loop skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
"udot z8.s, z6.b, z0.b\n"
"udot z12.s, z6.b, z1.b\n"
- "add x28, x28, #0x1\n"
+ "add x27, x27, #0x1\n"
"udot z16.s, z6.b, z2.b\n"
"udot z20.s, z6.b, z3.b\n"
- "cmp x28, x20\n"
+ "cmp x27, x19\n"
"udot z24.s, z6.b, z4.b\n"
"udot z28.s, z6.b, z5.b\n"
- "ld1b { z6.b }, p4/Z, [x10, #2, MUL VL]\n"
+ "ld1b { z6.b }, p4/Z, [x9, #2, MUL VL]\n"
"udot z9.s, z7.b, z0.b\n"
"udot z13.s, z7.b, z1.b\n"
"udot z17.s, z7.b, z2.b\n"
"udot z21.s, z7.b, z3.b\n"
"udot z25.s, z7.b, z4.b\n"
"udot z29.s, z7.b, z5.b\n"
- "ld1b { z7.b }, p4/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
+ "ld1b { z7.b }, p4/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"udot z10.s, z6.b, z0.b\n"
"udot z14.s, z6.b, z1.b\n"
"udot z18.s, z6.b, z2.b\n"
@@ -975,57 +975,57 @@ void sve_hybrid_u8u32_dot_6x4VL_a64fx (
"udot z27.s, z7.b, z4.b\n"
"udot z31.s, z7.b, z5.b\n"
"bne 55b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "st1w { z8.s }, p3, [x9]\n"
- "add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
- "st1w { z9.s }, p2, [x9, #1, MUL VL]\n"
- "add x20, x21, x20, LSL #2\n"
- "st1w { z10.s }, p1, [x9, #2, MUL VL]\n"
- "st1w { z11.s }, p0, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
- "st1w { z12.s }, p3, [x24]\n"
- "st1w { z13.s }, p2, [x24, #1, MUL VL]\n"
- "st1w { z14.s }, p1, [x24, #2, MUL VL]\n"
- "st1w { z15.s }, p0, [x24, #3, MUL VL]\n"
- "st1w { z16.s }, p3, [x23]\n"
- "st1w { z17.s }, p2, [x23, #1, MUL VL]\n"
- "st1w { z18.s }, p1, [x23, #2, MUL VL]\n"
- "st1w { z19.s }, p0, [x23, #3, MUL VL]\n"
- "st1w { z20.s }, p3, [x22]\n"
- "st1w { z21.s }, p2, [x22, #1, MUL VL]\n"
- "st1w { z22.s }, p1, [x22, #2, MUL VL]\n"
- "st1w { z23.s }, p0, [x22, #3, MUL VL]\n"
- "st1w { z24.s }, p3, [x21]\n"
- "st1w { z25.s }, p2, [x21, #1, MUL VL]\n"
- "st1w { z26.s }, p1, [x21, #2, MUL VL]\n"
- "st1w { z27.s }, p0, [x21, #3, MUL VL]\n"
- "st1w { z28.s }, p3, [x20]\n"
- "st1w { z29.s }, p2, [x20, #1, MUL VL]\n"
- "st1w { z30.s }, p1, [x20, #2, MUL VL]\n"
- "st1w { z31.s }, p0, [x20, #3, MUL VL]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x23, x28, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "st1w { z8.s }, p3, [x28]\n"
+ "add x21, x22, x19, LSL #2\n"
+ "add x20, x21, x19, LSL #2\n"
+ "st1w { z9.s }, p2, [x28, #1, MUL VL]\n"
+ "add x19, x20, x19, LSL #2\n"
+ "st1w { z10.s }, p1, [x28, #2, MUL VL]\n"
+ "st1w { z11.s }, p0, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "st1w { z12.s }, p3, [x23]\n"
+ "st1w { z13.s }, p2, [x23, #1, MUL VL]\n"
+ "st1w { z14.s }, p1, [x23, #2, MUL VL]\n"
+ "st1w { z15.s }, p0, [x23, #3, MUL VL]\n"
+ "st1w { z16.s }, p3, [x22]\n"
+ "st1w { z17.s }, p2, [x22, #1, MUL VL]\n"
+ "st1w { z18.s }, p1, [x22, #2, MUL VL]\n"
+ "st1w { z19.s }, p0, [x22, #3, MUL VL]\n"
+ "st1w { z20.s }, p3, [x21]\n"
+ "st1w { z21.s }, p2, [x21, #1, MUL VL]\n"
+ "st1w { z22.s }, p1, [x21, #2, MUL VL]\n"
+ "st1w { z23.s }, p0, [x21, #3, MUL VL]\n"
+ "st1w { z24.s }, p3, [x20]\n"
+ "st1w { z25.s }, p2, [x20, #1, MUL VL]\n"
+ "st1w { z26.s }, p1, [x20, #2, MUL VL]\n"
+ "st1w { z27.s }, p0, [x20, #3, MUL VL]\n"
+ "st1w { z28.s }, p3, [x19]\n"
+ "st1w { z29.s }, p2, [x19, #1, MUL VL]\n"
+ "st1w { z30.s }, p1, [x19, #2, MUL VL]\n"
+ "st1w { z31.s }, p0, [x19, #3, MUL VL]\n"
"60:" // Height 6: Writeback done
- "decw x11, ALL, MUL #4\n"
- "cmp x11, XZR\n"
+ "decw x10, ALL, MUL #4\n"
+ "cmp x10, XZR\n"
"bgt 52b\n"
"subs %x[M], %x[M], #0x6\n"
"beq 62f\n"
- "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 61f\n"
- "add x21, x21, #0x6\n"
- "str x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "add x20, x20, #0x6\n"
+ "str x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"b 1b\n"
"61:" // Update direct input
- "mov x20, #0x6\n"
- "madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
+ "mov x19, #0x6\n"
+ "madd %x[input_ptr], x19, x20, %x[input_ptr]\n"
"b 1b\n"
"62:" // Exit
: [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
: [args_ptr] "r" (&ka), [flags] "r" (flags), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x9", "x10", "x11", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x9", "x10", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8u32_dot_6x4VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8u32_dot_6x4VL/generic.cpp
index 30a108af7e..fc8bdb50a9 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8u32_dot_6x4VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8u32_dot_6x4VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef ARM_COMPUTE_ENABLE_SVE
@@ -87,23 +87,23 @@ void sve_hybrid_u8u32_dot_6x4VL (
"cmp %x[M], #0x2\n"
"bgt 23f\n"
"beq 12f\n"
- "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x28, %x[output_ptr]\n"
"2:" // Height 1: Column loop
- "mov x20, #0x0\n"
- "whilelt p4.s, x20, x11\n"
- "incw x20\n"
- "whilelt p3.s, x20, x11\n"
- "incw x20\n"
- "whilelt p2.s, x20, x11\n"
- "incw x20\n"
- "whilelt p1.s, x20, x11\n"
+ "mov x19, #0x0\n"
+ "whilelt p4.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p3.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x10\n"
"tbz %x[flags], #0, 3f\n"
- "ld1w { z8.s }, p4/Z, [x9]\n"
- "ld1w { z9.s }, p3/Z, [x9, #1, MUL VL]\n"
- "ld1w { z10.s }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1w { z11.s }, p1/Z, [x9, #3, MUL VL]\n"
+ "ld1w { z8.s }, p4/Z, [x28]\n"
+ "ld1w { z9.s }, p3/Z, [x28, #1, MUL VL]\n"
+ "ld1w { z10.s }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1w { z11.s }, p1/Z, [x28, #3, MUL VL]\n"
"b 4f\n"
"3:" // Height 1: no accumulate
"mov z8.s, #0x0\n"
@@ -111,148 +111,148 @@ void sve_hybrid_u8u32_dot_6x4VL (
"mov z10.s, #0x0\n"
"mov z11.s, #0x0\n"
"4:" // Height 1: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"5:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 6f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "cbnz x28, 7f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "cbnz x27, 7f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
"b 7f\n"
"6:" // Height 1: setup direct input
- "mov x26, %x[input_ptr]\n"
+ "mov x25, %x[input_ptr]\n"
"7:" // Height 1: input setup done
- "cmp x27, #0x10\n"
+ "cmp x26, #0x10\n"
"ble 9f\n"
"8:" // Height 1: Multiply loop: Main loop head
- "whilelt p0.b, XZR, x27\n"
- "ld1rqb { z0.b }, p0/Z, [x26]\n"
- "ld1b { z6.b }, p5/Z, [x10]\n"
+ "ld1b { z6.b }, p5/Z, [x9]\n"
+ "whilelt p0.b, XZR, x26\n"
+ "ld1b { z7.b }, p5/Z, [x9, #1, MUL VL]\n"
+ "sub x26, x26, #0x10\n"
+ "ld1rqb { z0.b }, p0/Z, [x25]\n"
"udot z8.s, z6.b, z0.b[0]\n"
- "ld1b { z7.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #2, MUL VL]\n"
+ "cmp x26, #0x10\n"
"udot z9.s, z7.b, z0.b[0]\n"
- "ld1b { z6.b }, p5/Z, [x10, #2, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #3, MUL VL]\n"
+ "add x25, x25, #0x10\n"
"udot z10.s, z6.b, z0.b[0]\n"
- "ld1b { z7.b }, p5/Z, [x10, #3, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #4, MUL VL]\n"
"udot z11.s, z7.b, z0.b[0]\n"
- "ld1b { z6.b }, p5/Z, [x10, #4, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #5, MUL VL]\n"
"udot z8.s, z6.b, z0.b[1]\n"
- "ld1b { z7.b }, p5/Z, [x10, #5, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #6, MUL VL]\n"
"udot z9.s, z7.b, z0.b[1]\n"
- "ld1b { z6.b }, p5/Z, [x10, #6, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #7, MUL VL]\n"
+ "addvl x9, x9, #16\n"
"udot z10.s, z6.b, z0.b[1]\n"
- "ld1b { z7.b }, p5/Z, [x10, #7, MUL VL]\n"
- "addvl x10, x10, #16\n"
+ "ld1b { z6.b }, p5/Z, [x9, #-8, MUL VL]\n"
"udot z11.s, z7.b, z0.b[1]\n"
- "ld1b { z6.b }, p5/Z, [x10, #-8, MUL VL]\n"
- "ld1b { z7.b }, p5/Z, [x10, #-7, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #-7, MUL VL]\n"
"udot z8.s, z6.b, z0.b[2]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #-6, MUL VL]\n"
"udot z9.s, z7.b, z0.b[2]\n"
- "ld1b { z6.b }, p5/Z, [x10, #-6, MUL VL]\n"
- "ld1b { z7.b }, p5/Z, [x10, #-5, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #-5, MUL VL]\n"
"udot z10.s, z6.b, z0.b[2]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #-4, MUL VL]\n"
"udot z11.s, z7.b, z0.b[2]\n"
- "ld1b { z6.b }, p5/Z, [x10, #-4, MUL VL]\n"
- "ld1b { z7.b }, p5/Z, [x10, #-3, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #-3, MUL VL]\n"
"udot z8.s, z6.b, z0.b[3]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #-2, MUL VL]\n"
"udot z9.s, z7.b, z0.b[3]\n"
- "ld1b { z6.b }, p5/Z, [x10, #-2, MUL VL]\n"
- "ld1b { z7.b }, p5/Z, [x10, #-1, MUL VL]\n"
- "sub x27, x27, #0x10\n"
- "cmp x27, #0x10\n"
+ "ld1b { z7.b }, p5/Z, [x9, #-1, MUL VL]\n"
"udot z10.s, z6.b, z0.b[3]\n"
"udot z11.s, z7.b, z0.b[3]\n"
- "add x26, x26, #0x10\n"
"bgt 8b\n"
"9:" // Height 1: Multiply loop: Single iteration only
- "whilelt p0.b, XZR, x27\n"
- "ld1rqb { z0.b }, p0/Z, [x26]\n"
- "ld1b { z6.b }, p5/Z, [x10]\n"
+ "ld1b { z6.b }, p5/Z, [x9]\n"
+ "whilelt p0.b, XZR, x26\n"
+ "ld1b { z7.b }, p5/Z, [x9, #1, MUL VL]\n"
+ "subs x26, x26, #0x4\n"
+ "ld1rqb { z0.b }, p0/Z, [x25]\n"
"udot z8.s, z6.b, z0.b[0]\n"
- "ld1b { z7.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #2, MUL VL]\n"
"udot z9.s, z7.b, z0.b[0]\n"
- "ld1b { z6.b }, p5/Z, [x10, #2, MUL VL]\n"
- "subs x27, x27, #0x4\n"
- "ld1b { z7.b }, p5/Z, [x10, #3, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"udot z10.s, z6.b, z0.b[0]\n"
"udot z11.s, z7.b, z0.b[0]\n"
- "addvl x10, x10, #4\n"
"ble 10f\n"
- "ld1b { z6.b }, p5/Z, [x10]\n"
- "ld1b { z7.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9]\n"
"udot z8.s, z6.b, z0.b[1]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #1, MUL VL]\n"
+ "subs x26, x26, #0x4\n"
"udot z9.s, z7.b, z0.b[1]\n"
- "ld1b { z6.b }, p5/Z, [x10, #2, MUL VL]\n"
- "ld1b { z7.b }, p5/Z, [x10, #3, MUL VL]\n"
- "subs x27, x27, #0x4\n"
+ "ld1b { z6.b }, p5/Z, [x9, #2, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #3, MUL VL]\n"
"udot z10.s, z6.b, z0.b[1]\n"
+ "addvl x9, x9, #4\n"
"udot z11.s, z7.b, z0.b[1]\n"
- "addvl x10, x10, #4\n"
"ble 10f\n"
- "ld1b { z6.b }, p5/Z, [x10]\n"
- "ld1b { z7.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9]\n"
"udot z8.s, z6.b, z0.b[2]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #1, MUL VL]\n"
+ "subs x26, x26, #0x4\n"
"udot z9.s, z7.b, z0.b[2]\n"
- "ld1b { z6.b }, p5/Z, [x10, #2, MUL VL]\n"
- "ld1b { z7.b }, p5/Z, [x10, #3, MUL VL]\n"
- "subs x27, x27, #0x4\n"
+ "ld1b { z6.b }, p5/Z, [x9, #2, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #3, MUL VL]\n"
"udot z10.s, z6.b, z0.b[2]\n"
+ "addvl x9, x9, #4\n"
"udot z11.s, z7.b, z0.b[2]\n"
- "addvl x10, x10, #4\n"
"ble 10f\n"
- "ld1b { z6.b }, p5/Z, [x10]\n"
- "ld1b { z7.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9]\n"
"udot z8.s, z6.b, z0.b[3]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #1, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #2, MUL VL]\n"
"udot z9.s, z7.b, z0.b[3]\n"
- "ld1b { z6.b }, p5/Z, [x10, #2, MUL VL]\n"
- "ld1b { z7.b }, p5/Z, [x10, #3, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"udot z10.s, z6.b, z0.b[3]\n"
"udot z11.s, z7.b, z0.b[3]\n"
- "addvl x10, x10, #4\n"
"10:" // Height 1: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 5b\n"
- "st1w { z8.s }, p4, [x9]\n"
- "st1w { z9.s }, p3, [x9, #1, MUL VL]\n"
- "st1w { z10.s }, p2, [x9, #2, MUL VL]\n"
- "st1w { z11.s }, p1, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
+ "st1w { z8.s }, p4, [x28]\n"
+ "st1w { z9.s }, p3, [x28, #1, MUL VL]\n"
+ "st1w { z10.s }, p2, [x28, #2, MUL VL]\n"
+ "st1w { z11.s }, p1, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
"11:" // Height 1: Writeback done
- "decw x11, ALL, MUL #4\n"
- "cmp x11, XZR\n"
+ "decw x10, ALL, MUL #4\n"
+ "cmp x10, XZR\n"
"bgt 2b\n"
"b 68f\n"
"12:" // Height 2
- "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x28, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"13:" // Height 2: Column loop
- "mov x20, #0x0\n"
- "whilelt p4.s, x20, x11\n"
- "incw x20\n"
- "whilelt p3.s, x20, x11\n"
- "incw x20\n"
- "whilelt p2.s, x20, x11\n"
- "incw x20\n"
- "whilelt p1.s, x20, x11\n"
+ "mov x19, #0x0\n"
+ "whilelt p4.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p3.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x10\n"
"tbz %x[flags], #0, 14f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "ld1w { z8.s }, p4/Z, [x9]\n"
- "ld1w { z9.s }, p3/Z, [x9, #1, MUL VL]\n"
- "ld1w { z10.s }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1w { z11.s }, p1/Z, [x9, #3, MUL VL]\n"
- "ld1w { z12.s }, p4/Z, [x24]\n"
- "ld1w { z13.s }, p3/Z, [x24, #1, MUL VL]\n"
- "ld1w { z14.s }, p2/Z, [x24, #2, MUL VL]\n"
- "ld1w { z15.s }, p1/Z, [x24, #3, MUL VL]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ld1w { z8.s }, p4/Z, [x28]\n"
+ "add x23, x28, x19, LSL #2\n"
+ "ld1w { z9.s }, p3/Z, [x28, #1, MUL VL]\n"
+ "ld1w { z10.s }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1w { z11.s }, p1/Z, [x28, #3, MUL VL]\n"
+ "ld1w { z12.s }, p4/Z, [x23]\n"
+ "ld1w { z13.s }, p3/Z, [x23, #1, MUL VL]\n"
+ "ld1w { z14.s }, p2/Z, [x23, #2, MUL VL]\n"
+ "ld1w { z15.s }, p1/Z, [x23, #3, MUL VL]\n"
"b 15f\n"
"14:" // Height 2: no accumulate
"mov z8.s, #0x0\n"
@@ -264,197 +264,197 @@ void sve_hybrid_u8u32_dot_6x4VL (
"mov z14.s, #0x0\n"
"mov z15.s, #0x0\n"
"15:" // Height 2: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"16:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 17f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "cbnz x28, 18f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20\n"
- "add x25, x25, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "cbnz x27, 18f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
+ "add x24, x24, x19\n"
"b 18f\n"
"17:" // Height 2: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19\n"
"18:" // Height 2: input setup done
- "cmp x27, #0x10\n"
+ "cmp x26, #0x10\n"
"ble 20f\n"
"19:" // Height 2: Multiply loop: Main loop head
- "whilelt p0.b, XZR, x27\n"
- "ld1rqb { z0.b }, p0/Z, [x26]\n"
- "ld1rqb { z1.b }, p0/Z, [x25]\n"
- "sub x27, x27, #0x10\n"
- "ld1b { z6.b }, p5/Z, [x10]\n"
- "ld1b { z7.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9]\n"
+ "whilelt p0.b, XZR, x26\n"
+ "ld1b { z7.b }, p5/Z, [x9, #1, MUL VL]\n"
+ "sub x26, x26, #0x10\n"
+ "ld1rqb { z0.b }, p0/Z, [x25]\n"
"udot z8.s, z6.b, z0.b[0]\n"
- "udot z12.s, z6.b, z1.b[0]\n"
+ "ld1rqb { z1.b }, p0/Z, [x24]\n"
+ "cmp x26, #0x10\n"
"udot z9.s, z7.b, z0.b[0]\n"
+ "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
+ "udot z12.s, z6.b, z1.b[0]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #2, MUL VL]\n"
"udot z13.s, z7.b, z1.b[0]\n"
- "ld1b { z6.b }, p5/Z, [x10, #2, MUL VL]\n"
- "ld1b { z7.b }, p5/Z, [x10, #3, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #3, MUL VL]\n"
"udot z10.s, z6.b, z0.b[0]\n"
"udot z14.s, z6.b, z1.b[0]\n"
- "ld1b { z6.b }, p5/Z, [x10, #4, MUL VL]\n"
- "cmp x27, #0x10\n"
+ "ld1b { z6.b }, p5/Z, [x9, #4, MUL VL]\n"
"udot z11.s, z7.b, z0.b[0]\n"
"udot z15.s, z7.b, z1.b[0]\n"
- "ld1b { z7.b }, p5/Z, [x10, #5, MUL VL]\n"
- "add x26, x26, #0x10\n"
+ "ld1b { z7.b }, p5/Z, [x9, #5, MUL VL]\n"
"udot z8.s, z6.b, z0.b[1]\n"
"udot z12.s, z6.b, z1.b[1]\n"
- "ld1b { z6.b }, p5/Z, [x10, #6, MUL VL]\n"
- "add x25, x25, #0x10\n"
+ "ld1b { z6.b }, p5/Z, [x9, #6, MUL VL]\n"
"udot z9.s, z7.b, z0.b[1]\n"
"udot z13.s, z7.b, z1.b[1]\n"
- "ld1b { z7.b }, p5/Z, [x10, #7, MUL VL]\n"
- "addvl x10, x10, #16\n"
+ "ld1b { z7.b }, p5/Z, [x9, #7, MUL VL]\n"
+ "addvl x9, x9, #16\n"
"udot z10.s, z6.b, z0.b[1]\n"
"udot z14.s, z6.b, z1.b[1]\n"
- "ld1b { z6.b }, p5/Z, [x10, #-8, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #-8, MUL VL]\n"
"udot z11.s, z7.b, z0.b[1]\n"
"udot z15.s, z7.b, z1.b[1]\n"
- "ld1b { z7.b }, p5/Z, [x10, #-7, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #-7, MUL VL]\n"
"udot z8.s, z6.b, z0.b[2]\n"
"udot z12.s, z6.b, z1.b[2]\n"
- "ld1b { z6.b }, p5/Z, [x10, #-6, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #-6, MUL VL]\n"
"udot z9.s, z7.b, z0.b[2]\n"
"udot z13.s, z7.b, z1.b[2]\n"
- "ld1b { z7.b }, p5/Z, [x10, #-5, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #-5, MUL VL]\n"
"udot z10.s, z6.b, z0.b[2]\n"
"udot z14.s, z6.b, z1.b[2]\n"
- "ld1b { z6.b }, p5/Z, [x10, #-4, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #-4, MUL VL]\n"
"udot z11.s, z7.b, z0.b[2]\n"
"udot z15.s, z7.b, z1.b[2]\n"
- "ld1b { z7.b }, p5/Z, [x10, #-3, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #-3, MUL VL]\n"
"udot z8.s, z6.b, z0.b[3]\n"
"udot z12.s, z6.b, z1.b[3]\n"
- "ld1b { z6.b }, p5/Z, [x10, #-2, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #-2, MUL VL]\n"
"udot z9.s, z7.b, z0.b[3]\n"
"udot z13.s, z7.b, z1.b[3]\n"
- "ld1b { z7.b }, p5/Z, [x10, #-1, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #-1, MUL VL]\n"
"udot z10.s, z6.b, z0.b[3]\n"
"udot z14.s, z6.b, z1.b[3]\n"
"udot z11.s, z7.b, z0.b[3]\n"
"udot z15.s, z7.b, z1.b[3]\n"
"bgt 19b\n"
"20:" // Height 2: Multiply loop: Single iteration only
- "whilelt p0.b, XZR, x27\n"
- "ld1rqb { z0.b }, p0/Z, [x26]\n"
- "ld1rqb { z1.b }, p0/Z, [x25]\n"
- "subs x27, x27, #0x4\n"
- "ld1b { z6.b }, p5/Z, [x10]\n"
- "ld1b { z7.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9]\n"
+ "whilelt p0.b, XZR, x26\n"
+ "ld1b { z7.b }, p5/Z, [x9, #1, MUL VL]\n"
+ "subs x26, x26, #0x4\n"
+ "ld1rqb { z0.b }, p0/Z, [x25]\n"
"udot z8.s, z6.b, z0.b[0]\n"
- "udot z12.s, z6.b, z1.b[0]\n"
+ "ld1rqb { z1.b }, p0/Z, [x24]\n"
"udot z9.s, z7.b, z0.b[0]\n"
+ "udot z12.s, z6.b, z1.b[0]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #2, MUL VL]\n"
"udot z13.s, z7.b, z1.b[0]\n"
- "ld1b { z6.b }, p5/Z, [x10, #2, MUL VL]\n"
- "ld1b { z7.b }, p5/Z, [x10, #3, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"udot z10.s, z6.b, z0.b[0]\n"
"udot z14.s, z6.b, z1.b[0]\n"
- "addvl x10, x10, #4\n"
"udot z11.s, z7.b, z0.b[0]\n"
"udot z15.s, z7.b, z1.b[0]\n"
"ble 21f\n"
- "ld1b { z6.b }, p5/Z, [x10]\n"
- "ld1b { z7.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9]\n"
"udot z8.s, z6.b, z0.b[1]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #1, MUL VL]\n"
+ "subs x26, x26, #0x4\n"
"udot z12.s, z6.b, z1.b[1]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #2, MUL VL]\n"
"udot z9.s, z7.b, z0.b[1]\n"
"udot z13.s, z7.b, z1.b[1]\n"
- "ld1b { z6.b }, p5/Z, [x10, #2, MUL VL]\n"
- "ld1b { z7.b }, p5/Z, [x10, #3, MUL VL]\n"
- "subs x27, x27, #0x4\n"
+ "ld1b { z7.b }, p5/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"udot z10.s, z6.b, z0.b[1]\n"
"udot z14.s, z6.b, z1.b[1]\n"
- "addvl x10, x10, #4\n"
"udot z11.s, z7.b, z0.b[1]\n"
"udot z15.s, z7.b, z1.b[1]\n"
"ble 21f\n"
- "ld1b { z6.b }, p5/Z, [x10]\n"
- "ld1b { z7.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9]\n"
"udot z8.s, z6.b, z0.b[2]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #1, MUL VL]\n"
+ "subs x26, x26, #0x4\n"
"udot z12.s, z6.b, z1.b[2]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #2, MUL VL]\n"
"udot z9.s, z7.b, z0.b[2]\n"
"udot z13.s, z7.b, z1.b[2]\n"
- "ld1b { z6.b }, p5/Z, [x10, #2, MUL VL]\n"
- "ld1b { z7.b }, p5/Z, [x10, #3, MUL VL]\n"
- "subs x27, x27, #0x4\n"
+ "ld1b { z7.b }, p5/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"udot z10.s, z6.b, z0.b[2]\n"
"udot z14.s, z6.b, z1.b[2]\n"
- "addvl x10, x10, #4\n"
"udot z11.s, z7.b, z0.b[2]\n"
"udot z15.s, z7.b, z1.b[2]\n"
"ble 21f\n"
- "ld1b { z6.b }, p5/Z, [x10]\n"
- "ld1b { z7.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9]\n"
"udot z8.s, z6.b, z0.b[3]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #1, MUL VL]\n"
"udot z12.s, z6.b, z1.b[3]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #2, MUL VL]\n"
"udot z9.s, z7.b, z0.b[3]\n"
"udot z13.s, z7.b, z1.b[3]\n"
- "ld1b { z6.b }, p5/Z, [x10, #2, MUL VL]\n"
- "ld1b { z7.b }, p5/Z, [x10, #3, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"udot z10.s, z6.b, z0.b[3]\n"
"udot z14.s, z6.b, z1.b[3]\n"
- "addvl x10, x10, #4\n"
"udot z11.s, z7.b, z0.b[3]\n"
"udot z15.s, z7.b, z1.b[3]\n"
"21:" // Height 2: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 16b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "st1w { z8.s }, p4, [x9]\n"
- "st1w { z9.s }, p3, [x9, #1, MUL VL]\n"
- "st1w { z10.s }, p2, [x9, #2, MUL VL]\n"
- "st1w { z11.s }, p1, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
- "st1w { z12.s }, p4, [x24]\n"
- "st1w { z13.s }, p3, [x24, #1, MUL VL]\n"
- "st1w { z14.s }, p2, [x24, #2, MUL VL]\n"
- "st1w { z15.s }, p1, [x24, #3, MUL VL]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "st1w { z8.s }, p4, [x28]\n"
+ "add x23, x28, x19, LSL #2\n"
+ "st1w { z9.s }, p3, [x28, #1, MUL VL]\n"
+ "st1w { z10.s }, p2, [x28, #2, MUL VL]\n"
+ "st1w { z11.s }, p1, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "st1w { z12.s }, p4, [x23]\n"
+ "st1w { z13.s }, p3, [x23, #1, MUL VL]\n"
+ "st1w { z14.s }, p2, [x23, #2, MUL VL]\n"
+ "st1w { z15.s }, p1, [x23, #3, MUL VL]\n"
"22:" // Height 2: Writeback done
- "decw x11, ALL, MUL #4\n"
- "cmp x11, XZR\n"
+ "decw x10, ALL, MUL #4\n"
+ "cmp x10, XZR\n"
"bgt 13b\n"
"b 68f\n"
"23:" // Height 3
- "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x28, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"24:" // Height 3: Column loop
- "mov x20, #0x0\n"
- "whilelt p4.s, x20, x11\n"
- "incw x20\n"
- "whilelt p3.s, x20, x11\n"
- "incw x20\n"
- "whilelt p2.s, x20, x11\n"
- "incw x20\n"
- "whilelt p1.s, x20, x11\n"
+ "mov x19, #0x0\n"
+ "whilelt p4.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p3.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x10\n"
"tbz %x[flags], #0, 25f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "ld1w { z8.s }, p4/Z, [x9]\n"
- "ld1w { z9.s }, p3/Z, [x9, #1, MUL VL]\n"
- "ld1w { z10.s }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1w { z11.s }, p1/Z, [x9, #3, MUL VL]\n"
- "ld1w { z12.s }, p4/Z, [x24]\n"
- "ld1w { z13.s }, p3/Z, [x24, #1, MUL VL]\n"
- "ld1w { z14.s }, p2/Z, [x24, #2, MUL VL]\n"
- "ld1w { z15.s }, p1/Z, [x24, #3, MUL VL]\n"
- "ld1w { z16.s }, p4/Z, [x23]\n"
- "ld1w { z17.s }, p3/Z, [x23, #1, MUL VL]\n"
- "ld1w { z18.s }, p2/Z, [x23, #2, MUL VL]\n"
- "ld1w { z19.s }, p1/Z, [x23, #3, MUL VL]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ld1w { z8.s }, p4/Z, [x28]\n"
+ "add x23, x28, x19, LSL #2\n"
+ "ld1w { z9.s }, p3/Z, [x28, #1, MUL VL]\n"
+ "ld1w { z10.s }, p2/Z, [x28, #2, MUL VL]\n"
+ "add x22, x23, x19, LSL #2\n"
+ "ld1w { z11.s }, p1/Z, [x28, #3, MUL VL]\n"
+ "ld1w { z12.s }, p4/Z, [x23]\n"
+ "ld1w { z13.s }, p3/Z, [x23, #1, MUL VL]\n"
+ "ld1w { z14.s }, p2/Z, [x23, #2, MUL VL]\n"
+ "ld1w { z15.s }, p1/Z, [x23, #3, MUL VL]\n"
+ "ld1w { z16.s }, p4/Z, [x22]\n"
+ "ld1w { z17.s }, p3/Z, [x22, #1, MUL VL]\n"
+ "ld1w { z18.s }, p2/Z, [x22, #2, MUL VL]\n"
+ "ld1w { z19.s }, p1/Z, [x22, #3, MUL VL]\n"
"b 26f\n"
"25:" // Height 3: no accumulate
"mov z8.s, #0x0\n"
@@ -470,99 +470,99 @@ void sve_hybrid_u8u32_dot_6x4VL (
"mov z18.s, #0x0\n"
"mov z19.s, #0x0\n"
"26:" // Height 3: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"27:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 28f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "cbnz x28, 29f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20\n"
- "add x25, x25, x20\n"
- "add x24, x24, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "cbnz x27, 29f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
+ "add x24, x24, x19\n"
+ "add x23, x23, x19\n"
"b 29f\n"
"28:" // Height 3: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20\n"
- "add x24, x25, x20\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19\n"
+ "add x23, x24, x19\n"
"29:" // Height 3: input setup done
- "cmp x27, #0x10\n"
+ "cmp x26, #0x10\n"
"ble 31f\n"
"30:" // Height 3: Multiply loop: Main loop head
- "whilelt p0.b, XZR, x27\n"
- "ld1rqb { z0.b }, p0/Z, [x26]\n"
- "ld1rqb { z1.b }, p0/Z, [x25]\n"
- "sub x27, x27, #0x10\n"
- "ld1rqb { z2.b }, p0/Z, [x24]\n"
- "ld1b { z6.b }, p5/Z, [x10]\n"
+ "ld1b { z6.b }, p5/Z, [x9]\n"
+ "whilelt p0.b, XZR, x26\n"
+ "ld1b { z7.b }, p5/Z, [x9, #1, MUL VL]\n"
+ "sub x26, x26, #0x10\n"
+ "ld1rqb { z0.b }, p0/Z, [x25]\n"
"udot z8.s, z6.b, z0.b[0]\n"
- "udot z12.s, z6.b, z1.b[0]\n"
- "ld1b { z7.b }, p5/Z, [x10, #1, MUL VL]\n"
- "udot z16.s, z6.b, z2.b[0]\n"
+ "ld1rqb { z1.b }, p0/Z, [x24]\n"
+ "cmp x26, #0x10\n"
"udot z9.s, z7.b, z0.b[0]\n"
- "ld1b { z6.b }, p5/Z, [x10, #2, MUL VL]\n"
+ "ld1rqb { z2.b }, p0/Z, [x23]\n"
+ "add x25, x25, #0x10\n"
+ "udot z12.s, z6.b, z1.b[0]\n"
+ "add x24, x24, #0x10\n"
"udot z13.s, z7.b, z1.b[0]\n"
+ "add x23, x23, #0x10\n"
+ "udot z16.s, z6.b, z2.b[0]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #2, MUL VL]\n"
"udot z17.s, z7.b, z2.b[0]\n"
- "ld1b { z7.b }, p5/Z, [x10, #3, MUL VL]\n"
- "cmp x27, #0x10\n"
+ "ld1b { z7.b }, p5/Z, [x9, #3, MUL VL]\n"
"udot z10.s, z6.b, z0.b[0]\n"
"udot z14.s, z6.b, z1.b[0]\n"
- "add x26, x26, #0x10\n"
- "add x25, x25, #0x10\n"
"udot z18.s, z6.b, z2.b[0]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #4, MUL VL]\n"
"udot z11.s, z7.b, z0.b[0]\n"
- "ld1b { z6.b }, p5/Z, [x10, #4, MUL VL]\n"
- "add x24, x24, #0x10\n"
"udot z15.s, z7.b, z1.b[0]\n"
"udot z19.s, z7.b, z2.b[0]\n"
- "ld1b { z7.b }, p5/Z, [x10, #5, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #5, MUL VL]\n"
"udot z8.s, z6.b, z0.b[1]\n"
"udot z12.s, z6.b, z1.b[1]\n"
"udot z16.s, z6.b, z2.b[1]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #6, MUL VL]\n"
"udot z9.s, z7.b, z0.b[1]\n"
- "ld1b { z6.b }, p5/Z, [x10, #6, MUL VL]\n"
"udot z13.s, z7.b, z1.b[1]\n"
"udot z17.s, z7.b, z2.b[1]\n"
- "ld1b { z7.b }, p5/Z, [x10, #7, MUL VL]\n"
- "addvl x10, x10, #16\n"
+ "ld1b { z7.b }, p5/Z, [x9, #7, MUL VL]\n"
+ "addvl x9, x9, #16\n"
"udot z10.s, z6.b, z0.b[1]\n"
"udot z14.s, z6.b, z1.b[1]\n"
"udot z18.s, z6.b, z2.b[1]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #-8, MUL VL]\n"
"udot z11.s, z7.b, z0.b[1]\n"
- "ld1b { z6.b }, p5/Z, [x10, #-8, MUL VL]\n"
"udot z15.s, z7.b, z1.b[1]\n"
"udot z19.s, z7.b, z2.b[1]\n"
- "ld1b { z7.b }, p5/Z, [x10, #-7, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #-7, MUL VL]\n"
"udot z8.s, z6.b, z0.b[2]\n"
"udot z12.s, z6.b, z1.b[2]\n"
"udot z16.s, z6.b, z2.b[2]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #-6, MUL VL]\n"
"udot z9.s, z7.b, z0.b[2]\n"
- "ld1b { z6.b }, p5/Z, [x10, #-6, MUL VL]\n"
"udot z13.s, z7.b, z1.b[2]\n"
"udot z17.s, z7.b, z2.b[2]\n"
- "ld1b { z7.b }, p5/Z, [x10, #-5, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #-5, MUL VL]\n"
"udot z10.s, z6.b, z0.b[2]\n"
"udot z14.s, z6.b, z1.b[2]\n"
"udot z18.s, z6.b, z2.b[2]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #-4, MUL VL]\n"
"udot z11.s, z7.b, z0.b[2]\n"
- "ld1b { z6.b }, p5/Z, [x10, #-4, MUL VL]\n"
"udot z15.s, z7.b, z1.b[2]\n"
"udot z19.s, z7.b, z2.b[2]\n"
- "ld1b { z7.b }, p5/Z, [x10, #-3, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #-3, MUL VL]\n"
"udot z8.s, z6.b, z0.b[3]\n"
"udot z12.s, z6.b, z1.b[3]\n"
"udot z16.s, z6.b, z2.b[3]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #-2, MUL VL]\n"
"udot z9.s, z7.b, z0.b[3]\n"
- "ld1b { z6.b }, p5/Z, [x10, #-2, MUL VL]\n"
"udot z13.s, z7.b, z1.b[3]\n"
"udot z17.s, z7.b, z2.b[3]\n"
- "ld1b { z7.b }, p5/Z, [x10, #-1, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #-1, MUL VL]\n"
"udot z10.s, z6.b, z0.b[3]\n"
"udot z14.s, z6.b, z1.b[3]\n"
"udot z18.s, z6.b, z2.b[3]\n"
@@ -571,22 +571,22 @@ void sve_hybrid_u8u32_dot_6x4VL (
"udot z19.s, z7.b, z2.b[3]\n"
"bgt 30b\n"
"31:" // Height 3: Multiply loop: Single iteration only
- "whilelt p0.b, XZR, x27\n"
- "ld1rqb { z0.b }, p0/Z, [x26]\n"
- "ld1rqb { z1.b }, p0/Z, [x25]\n"
- "subs x27, x27, #0x4\n"
- "ld1rqb { z2.b }, p0/Z, [x24]\n"
- "ld1b { z6.b }, p5/Z, [x10]\n"
+ "ld1b { z6.b }, p5/Z, [x9]\n"
+ "whilelt p0.b, XZR, x26\n"
+ "ld1b { z7.b }, p5/Z, [x9, #1, MUL VL]\n"
+ "subs x26, x26, #0x4\n"
+ "ld1rqb { z0.b }, p0/Z, [x25]\n"
"udot z8.s, z6.b, z0.b[0]\n"
- "udot z12.s, z6.b, z1.b[0]\n"
- "ld1b { z7.b }, p5/Z, [x10, #1, MUL VL]\n"
- "udot z16.s, z6.b, z2.b[0]\n"
+ "ld1rqb { z1.b }, p0/Z, [x24]\n"
"udot z9.s, z7.b, z0.b[0]\n"
- "ld1b { z6.b }, p5/Z, [x10, #2, MUL VL]\n"
+ "ld1rqb { z2.b }, p0/Z, [x23]\n"
+ "udot z12.s, z6.b, z1.b[0]\n"
"udot z13.s, z7.b, z1.b[0]\n"
+ "udot z16.s, z6.b, z2.b[0]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #2, MUL VL]\n"
"udot z17.s, z7.b, z2.b[0]\n"
- "ld1b { z7.b }, p5/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
+ "ld1b { z7.b }, p5/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"udot z10.s, z6.b, z0.b[0]\n"
"udot z14.s, z6.b, z1.b[0]\n"
"udot z18.s, z6.b, z2.b[0]\n"
@@ -594,18 +594,18 @@ void sve_hybrid_u8u32_dot_6x4VL (
"udot z15.s, z7.b, z1.b[0]\n"
"udot z19.s, z7.b, z2.b[0]\n"
"ble 32f\n"
- "ld1b { z6.b }, p5/Z, [x10]\n"
- "ld1b { z7.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9]\n"
"udot z8.s, z6.b, z0.b[1]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #1, MUL VL]\n"
+ "subs x26, x26, #0x4\n"
"udot z12.s, z6.b, z1.b[1]\n"
"udot z16.s, z6.b, z2.b[1]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #2, MUL VL]\n"
"udot z9.s, z7.b, z0.b[1]\n"
- "ld1b { z6.b }, p5/Z, [x10, #2, MUL VL]\n"
- "subs x27, x27, #0x4\n"
"udot z13.s, z7.b, z1.b[1]\n"
"udot z17.s, z7.b, z2.b[1]\n"
- "ld1b { z7.b }, p5/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
+ "ld1b { z7.b }, p5/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"udot z10.s, z6.b, z0.b[1]\n"
"udot z14.s, z6.b, z1.b[1]\n"
"udot z18.s, z6.b, z2.b[1]\n"
@@ -613,18 +613,18 @@ void sve_hybrid_u8u32_dot_6x4VL (
"udot z15.s, z7.b, z1.b[1]\n"
"udot z19.s, z7.b, z2.b[1]\n"
"ble 32f\n"
- "ld1b { z6.b }, p5/Z, [x10]\n"
- "ld1b { z7.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9]\n"
"udot z8.s, z6.b, z0.b[2]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #1, MUL VL]\n"
+ "subs x26, x26, #0x4\n"
"udot z12.s, z6.b, z1.b[2]\n"
"udot z16.s, z6.b, z2.b[2]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #2, MUL VL]\n"
"udot z9.s, z7.b, z0.b[2]\n"
- "ld1b { z6.b }, p5/Z, [x10, #2, MUL VL]\n"
- "subs x27, x27, #0x4\n"
"udot z13.s, z7.b, z1.b[2]\n"
"udot z17.s, z7.b, z2.b[2]\n"
- "ld1b { z7.b }, p5/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
+ "ld1b { z7.b }, p5/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"udot z10.s, z6.b, z0.b[2]\n"
"udot z14.s, z6.b, z1.b[2]\n"
"udot z18.s, z6.b, z2.b[2]\n"
@@ -632,17 +632,17 @@ void sve_hybrid_u8u32_dot_6x4VL (
"udot z15.s, z7.b, z1.b[2]\n"
"udot z19.s, z7.b, z2.b[2]\n"
"ble 32f\n"
- "ld1b { z6.b }, p5/Z, [x10]\n"
- "ld1b { z7.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9]\n"
"udot z8.s, z6.b, z0.b[3]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #1, MUL VL]\n"
"udot z12.s, z6.b, z1.b[3]\n"
"udot z16.s, z6.b, z2.b[3]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #2, MUL VL]\n"
"udot z9.s, z7.b, z0.b[3]\n"
- "ld1b { z6.b }, p5/Z, [x10, #2, MUL VL]\n"
"udot z13.s, z7.b, z1.b[3]\n"
"udot z17.s, z7.b, z2.b[3]\n"
- "ld1b { z7.b }, p5/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
+ "ld1b { z7.b }, p5/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"udot z10.s, z6.b, z0.b[3]\n"
"udot z14.s, z6.b, z1.b[3]\n"
"udot z18.s, z6.b, z2.b[3]\n"
@@ -650,65 +650,65 @@ void sve_hybrid_u8u32_dot_6x4VL (
"udot z15.s, z7.b, z1.b[3]\n"
"udot z19.s, z7.b, z2.b[3]\n"
"32:" // Height 3: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 27b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "st1w { z8.s }, p4, [x9]\n"
- "st1w { z9.s }, p3, [x9, #1, MUL VL]\n"
- "st1w { z10.s }, p2, [x9, #2, MUL VL]\n"
- "st1w { z11.s }, p1, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
- "st1w { z12.s }, p4, [x24]\n"
- "st1w { z13.s }, p3, [x24, #1, MUL VL]\n"
- "st1w { z14.s }, p2, [x24, #2, MUL VL]\n"
- "st1w { z15.s }, p1, [x24, #3, MUL VL]\n"
- "st1w { z16.s }, p4, [x23]\n"
- "st1w { z17.s }, p3, [x23, #1, MUL VL]\n"
- "st1w { z18.s }, p2, [x23, #2, MUL VL]\n"
- "st1w { z19.s }, p1, [x23, #3, MUL VL]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "st1w { z8.s }, p4, [x28]\n"
+ "add x23, x28, x19, LSL #2\n"
+ "st1w { z9.s }, p3, [x28, #1, MUL VL]\n"
+ "st1w { z10.s }, p2, [x28, #2, MUL VL]\n"
+ "add x22, x23, x19, LSL #2\n"
+ "st1w { z11.s }, p1, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "st1w { z12.s }, p4, [x23]\n"
+ "st1w { z13.s }, p3, [x23, #1, MUL VL]\n"
+ "st1w { z14.s }, p2, [x23, #2, MUL VL]\n"
+ "st1w { z15.s }, p1, [x23, #3, MUL VL]\n"
+ "st1w { z16.s }, p4, [x22]\n"
+ "st1w { z17.s }, p3, [x22, #1, MUL VL]\n"
+ "st1w { z18.s }, p2, [x22, #2, MUL VL]\n"
+ "st1w { z19.s }, p1, [x22, #3, MUL VL]\n"
"33:" // Height 3: Writeback done
- "decw x11, ALL, MUL #4\n"
- "cmp x11, XZR\n"
+ "decw x10, ALL, MUL #4\n"
+ "cmp x10, XZR\n"
"bgt 24b\n"
"b 68f\n"
"34:" // Height 4
- "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x28, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"35:" // Height 4: Column loop
- "mov x20, #0x0\n"
- "whilelt p4.s, x20, x11\n"
- "incw x20\n"
- "whilelt p3.s, x20, x11\n"
- "incw x20\n"
- "whilelt p2.s, x20, x11\n"
- "incw x20\n"
- "whilelt p1.s, x20, x11\n"
+ "mov x19, #0x0\n"
+ "whilelt p4.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p3.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x10\n"
"tbz %x[flags], #0, 36f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "ld1w { z8.s }, p4/Z, [x9]\n"
- "add x22, x23, x20, LSL #2\n"
- "ld1w { z9.s }, p3/Z, [x9, #1, MUL VL]\n"
- "ld1w { z10.s }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1w { z11.s }, p1/Z, [x9, #3, MUL VL]\n"
- "ld1w { z12.s }, p4/Z, [x24]\n"
- "ld1w { z13.s }, p3/Z, [x24, #1, MUL VL]\n"
- "ld1w { z14.s }, p2/Z, [x24, #2, MUL VL]\n"
- "ld1w { z15.s }, p1/Z, [x24, #3, MUL VL]\n"
- "ld1w { z16.s }, p4/Z, [x23]\n"
- "ld1w { z17.s }, p3/Z, [x23, #1, MUL VL]\n"
- "ld1w { z18.s }, p2/Z, [x23, #2, MUL VL]\n"
- "ld1w { z19.s }, p1/Z, [x23, #3, MUL VL]\n"
- "ld1w { z20.s }, p4/Z, [x22]\n"
- "ld1w { z21.s }, p3/Z, [x22, #1, MUL VL]\n"
- "ld1w { z22.s }, p2/Z, [x22, #2, MUL VL]\n"
- "ld1w { z23.s }, p1/Z, [x22, #3, MUL VL]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ld1w { z8.s }, p4/Z, [x28]\n"
+ "add x23, x28, x19, LSL #2\n"
+ "ld1w { z9.s }, p3/Z, [x28, #1, MUL VL]\n"
+ "ld1w { z10.s }, p2/Z, [x28, #2, MUL VL]\n"
+ "add x22, x23, x19, LSL #2\n"
+ "ld1w { z11.s }, p1/Z, [x28, #3, MUL VL]\n"
+ "add x21, x22, x19, LSL #2\n"
+ "ld1w { z12.s }, p4/Z, [x23]\n"
+ "ld1w { z13.s }, p3/Z, [x23, #1, MUL VL]\n"
+ "ld1w { z14.s }, p2/Z, [x23, #2, MUL VL]\n"
+ "ld1w { z15.s }, p1/Z, [x23, #3, MUL VL]\n"
+ "ld1w { z16.s }, p4/Z, [x22]\n"
+ "ld1w { z17.s }, p3/Z, [x22, #1, MUL VL]\n"
+ "ld1w { z18.s }, p2/Z, [x22, #2, MUL VL]\n"
+ "ld1w { z19.s }, p1/Z, [x22, #3, MUL VL]\n"
+ "ld1w { z20.s }, p4/Z, [x21]\n"
+ "ld1w { z21.s }, p3/Z, [x21, #1, MUL VL]\n"
+ "ld1w { z22.s }, p2/Z, [x21, #2, MUL VL]\n"
+ "ld1w { z23.s }, p1/Z, [x21, #3, MUL VL]\n"
"b 37f\n"
"36:" // Height 4: no accumulate
"mov z8.s, #0x0\n"
@@ -728,118 +728,118 @@ void sve_hybrid_u8u32_dot_6x4VL (
"mov z22.s, #0x0\n"
"mov z23.s, #0x0\n"
"37:" // Height 4: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"38:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 39f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "cbnz x28, 40f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20\n"
- "add x25, x25, x20\n"
- "add x24, x24, x20\n"
- "add x23, x23, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "cbnz x27, 40f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
+ "add x24, x24, x19\n"
+ "add x23, x23, x19\n"
+ "add x22, x22, x19\n"
"b 40f\n"
"39:" // Height 4: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20\n"
- "add x24, x25, x20\n"
- "add x23, x24, x20\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19\n"
+ "add x23, x24, x19\n"
+ "add x22, x23, x19\n"
"40:" // Height 4: input setup done
- "cmp x27, #0x10\n"
+ "cmp x26, #0x10\n"
"ble 42f\n"
"41:" // Height 4: Multiply loop: Main loop head
- "whilelt p0.b, XZR, x27\n"
- "ld1rqb { z0.b }, p0/Z, [x26]\n"
- "ld1rqb { z1.b }, p0/Z, [x25]\n"
- "sub x27, x27, #0x10\n"
- "ld1rqb { z2.b }, p0/Z, [x24]\n"
- "ld1rqb { z3.b }, p0/Z, [x23]\n"
- "cmp x27, #0x10\n"
- "add x26, x26, #0x10\n"
- "ld1b { z6.b }, p5/Z, [x10]\n"
- "ld1b { z7.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9]\n"
+ "whilelt p0.b, XZR, x26\n"
+ "ld1b { z7.b }, p5/Z, [x9, #1, MUL VL]\n"
+ "sub x26, x26, #0x10\n"
+ "ld1rqb { z0.b }, p0/Z, [x25]\n"
"udot z8.s, z6.b, z0.b[0]\n"
- "udot z12.s, z6.b, z1.b[0]\n"
- "udot z16.s, z6.b, z2.b[0]\n"
- "udot z20.s, z6.b, z3.b[0]\n"
- "ld1b { z6.b }, p5/Z, [x10, #2, MUL VL]\n"
- "add x25, x25, #0x10\n"
+ "ld1rqb { z1.b }, p0/Z, [x24]\n"
+ "cmp x26, #0x10\n"
"udot z9.s, z7.b, z0.b[0]\n"
- "udot z13.s, z7.b, z1.b[0]\n"
+ "ld1rqb { z2.b }, p0/Z, [x23]\n"
+ "add x25, x25, #0x10\n"
+ "udot z12.s, z6.b, z1.b[0]\n"
+ "ld1rqb { z3.b }, p0/Z, [x22]\n"
"add x24, x24, #0x10\n"
+ "udot z16.s, z6.b, z2.b[0]\n"
"add x23, x23, #0x10\n"
+ "udot z13.s, z7.b, z1.b[0]\n"
+ "add x22, x22, #0x10\n"
"udot z17.s, z7.b, z2.b[0]\n"
+ "udot z20.s, z6.b, z3.b[0]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #2, MUL VL]\n"
"udot z21.s, z7.b, z3.b[0]\n"
- "ld1b { z7.b }, p5/Z, [x10, #3, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #3, MUL VL]\n"
"udot z10.s, z6.b, z0.b[0]\n"
"udot z14.s, z6.b, z1.b[0]\n"
"udot z18.s, z6.b, z2.b[0]\n"
"udot z22.s, z6.b, z3.b[0]\n"
- "ld1b { z6.b }, p5/Z, [x10, #4, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #4, MUL VL]\n"
"udot z11.s, z7.b, z0.b[0]\n"
"udot z15.s, z7.b, z1.b[0]\n"
"udot z19.s, z7.b, z2.b[0]\n"
"udot z23.s, z7.b, z3.b[0]\n"
- "ld1b { z7.b }, p5/Z, [x10, #5, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #5, MUL VL]\n"
"udot z8.s, z6.b, z0.b[1]\n"
"udot z12.s, z6.b, z1.b[1]\n"
"udot z16.s, z6.b, z2.b[1]\n"
"udot z20.s, z6.b, z3.b[1]\n"
- "ld1b { z6.b }, p5/Z, [x10, #6, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #6, MUL VL]\n"
"udot z9.s, z7.b, z0.b[1]\n"
"udot z13.s, z7.b, z1.b[1]\n"
"udot z17.s, z7.b, z2.b[1]\n"
"udot z21.s, z7.b, z3.b[1]\n"
- "ld1b { z7.b }, p5/Z, [x10, #7, MUL VL]\n"
- "addvl x10, x10, #16\n"
+ "ld1b { z7.b }, p5/Z, [x9, #7, MUL VL]\n"
+ "addvl x9, x9, #16\n"
"udot z10.s, z6.b, z0.b[1]\n"
"udot z14.s, z6.b, z1.b[1]\n"
"udot z18.s, z6.b, z2.b[1]\n"
"udot z22.s, z6.b, z3.b[1]\n"
- "ld1b { z6.b }, p5/Z, [x10, #-8, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #-8, MUL VL]\n"
"udot z11.s, z7.b, z0.b[1]\n"
"udot z15.s, z7.b, z1.b[1]\n"
"udot z19.s, z7.b, z2.b[1]\n"
"udot z23.s, z7.b, z3.b[1]\n"
- "ld1b { z7.b }, p5/Z, [x10, #-7, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #-7, MUL VL]\n"
"udot z8.s, z6.b, z0.b[2]\n"
"udot z12.s, z6.b, z1.b[2]\n"
"udot z16.s, z6.b, z2.b[2]\n"
"udot z20.s, z6.b, z3.b[2]\n"
- "ld1b { z6.b }, p5/Z, [x10, #-6, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #-6, MUL VL]\n"
"udot z9.s, z7.b, z0.b[2]\n"
"udot z13.s, z7.b, z1.b[2]\n"
"udot z17.s, z7.b, z2.b[2]\n"
"udot z21.s, z7.b, z3.b[2]\n"
- "ld1b { z7.b }, p5/Z, [x10, #-5, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #-5, MUL VL]\n"
"udot z10.s, z6.b, z0.b[2]\n"
"udot z14.s, z6.b, z1.b[2]\n"
"udot z18.s, z6.b, z2.b[2]\n"
"udot z22.s, z6.b, z3.b[2]\n"
- "ld1b { z6.b }, p5/Z, [x10, #-4, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #-4, MUL VL]\n"
"udot z11.s, z7.b, z0.b[2]\n"
"udot z15.s, z7.b, z1.b[2]\n"
"udot z19.s, z7.b, z2.b[2]\n"
"udot z23.s, z7.b, z3.b[2]\n"
- "ld1b { z7.b }, p5/Z, [x10, #-3, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #-3, MUL VL]\n"
"udot z8.s, z6.b, z0.b[3]\n"
"udot z12.s, z6.b, z1.b[3]\n"
"udot z16.s, z6.b, z2.b[3]\n"
"udot z20.s, z6.b, z3.b[3]\n"
- "ld1b { z6.b }, p5/Z, [x10, #-2, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #-2, MUL VL]\n"
"udot z9.s, z7.b, z0.b[3]\n"
"udot z13.s, z7.b, z1.b[3]\n"
"udot z17.s, z7.b, z2.b[3]\n"
"udot z21.s, z7.b, z3.b[3]\n"
- "ld1b { z7.b }, p5/Z, [x10, #-1, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #-1, MUL VL]\n"
"udot z10.s, z6.b, z0.b[3]\n"
"udot z14.s, z6.b, z1.b[3]\n"
"udot z18.s, z6.b, z2.b[3]\n"
@@ -850,25 +850,25 @@ void sve_hybrid_u8u32_dot_6x4VL (
"udot z23.s, z7.b, z3.b[3]\n"
"bgt 41b\n"
"42:" // Height 4: Multiply loop: Single iteration only
- "whilelt p0.b, XZR, x27\n"
- "ld1rqb { z0.b }, p0/Z, [x26]\n"
- "ld1rqb { z1.b }, p0/Z, [x25]\n"
- "subs x27, x27, #0x4\n"
- "ld1rqb { z2.b }, p0/Z, [x24]\n"
- "ld1rqb { z3.b }, p0/Z, [x23]\n"
- "ld1b { z6.b }, p5/Z, [x10]\n"
- "ld1b { z7.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9]\n"
+ "whilelt p0.b, XZR, x26\n"
+ "ld1b { z7.b }, p5/Z, [x9, #1, MUL VL]\n"
+ "subs x26, x26, #0x4\n"
+ "ld1rqb { z0.b }, p0/Z, [x25]\n"
"udot z8.s, z6.b, z0.b[0]\n"
+ "ld1rqb { z1.b }, p0/Z, [x24]\n"
+ "udot z9.s, z7.b, z0.b[0]\n"
+ "ld1rqb { z2.b }, p0/Z, [x23]\n"
+ "ld1rqb { z3.b }, p0/Z, [x22]\n"
"udot z12.s, z6.b, z1.b[0]\n"
+ "udot z13.s, z7.b, z1.b[0]\n"
"udot z16.s, z6.b, z2.b[0]\n"
"udot z20.s, z6.b, z3.b[0]\n"
- "ld1b { z6.b }, p5/Z, [x10, #2, MUL VL]\n"
- "udot z9.s, z7.b, z0.b[0]\n"
- "udot z13.s, z7.b, z1.b[0]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #2, MUL VL]\n"
"udot z17.s, z7.b, z2.b[0]\n"
"udot z21.s, z7.b, z3.b[0]\n"
- "ld1b { z7.b }, p5/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
+ "ld1b { z7.b }, p5/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"udot z10.s, z6.b, z0.b[0]\n"
"udot z14.s, z6.b, z1.b[0]\n"
"udot z18.s, z6.b, z2.b[0]\n"
@@ -878,20 +878,20 @@ void sve_hybrid_u8u32_dot_6x4VL (
"udot z19.s, z7.b, z2.b[0]\n"
"udot z23.s, z7.b, z3.b[0]\n"
"ble 43f\n"
- "ld1b { z6.b }, p5/Z, [x10]\n"
- "ld1b { z7.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9]\n"
"udot z8.s, z6.b, z0.b[1]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #1, MUL VL]\n"
+ "subs x26, x26, #0x4\n"
"udot z12.s, z6.b, z1.b[1]\n"
"udot z16.s, z6.b, z2.b[1]\n"
"udot z20.s, z6.b, z3.b[1]\n"
- "ld1b { z6.b }, p5/Z, [x10, #2, MUL VL]\n"
- "subs x27, x27, #0x4\n"
+ "ld1b { z6.b }, p5/Z, [x9, #2, MUL VL]\n"
"udot z9.s, z7.b, z0.b[1]\n"
"udot z13.s, z7.b, z1.b[1]\n"
"udot z17.s, z7.b, z2.b[1]\n"
"udot z21.s, z7.b, z3.b[1]\n"
- "ld1b { z7.b }, p5/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
+ "ld1b { z7.b }, p5/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"udot z10.s, z6.b, z0.b[1]\n"
"udot z14.s, z6.b, z1.b[1]\n"
"udot z18.s, z6.b, z2.b[1]\n"
@@ -901,20 +901,20 @@ void sve_hybrid_u8u32_dot_6x4VL (
"udot z19.s, z7.b, z2.b[1]\n"
"udot z23.s, z7.b, z3.b[1]\n"
"ble 43f\n"
- "ld1b { z6.b }, p5/Z, [x10]\n"
- "ld1b { z7.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9]\n"
"udot z8.s, z6.b, z0.b[2]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #1, MUL VL]\n"
+ "subs x26, x26, #0x4\n"
"udot z12.s, z6.b, z1.b[2]\n"
"udot z16.s, z6.b, z2.b[2]\n"
"udot z20.s, z6.b, z3.b[2]\n"
- "ld1b { z6.b }, p5/Z, [x10, #2, MUL VL]\n"
- "subs x27, x27, #0x4\n"
+ "ld1b { z6.b }, p5/Z, [x9, #2, MUL VL]\n"
"udot z9.s, z7.b, z0.b[2]\n"
"udot z13.s, z7.b, z1.b[2]\n"
"udot z17.s, z7.b, z2.b[2]\n"
"udot z21.s, z7.b, z3.b[2]\n"
- "ld1b { z7.b }, p5/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
+ "ld1b { z7.b }, p5/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"udot z10.s, z6.b, z0.b[2]\n"
"udot z14.s, z6.b, z1.b[2]\n"
"udot z18.s, z6.b, z2.b[2]\n"
@@ -924,19 +924,19 @@ void sve_hybrid_u8u32_dot_6x4VL (
"udot z19.s, z7.b, z2.b[2]\n"
"udot z23.s, z7.b, z3.b[2]\n"
"ble 43f\n"
- "ld1b { z6.b }, p5/Z, [x10]\n"
- "ld1b { z7.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9]\n"
"udot z8.s, z6.b, z0.b[3]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #1, MUL VL]\n"
"udot z12.s, z6.b, z1.b[3]\n"
"udot z16.s, z6.b, z2.b[3]\n"
"udot z20.s, z6.b, z3.b[3]\n"
- "ld1b { z6.b }, p5/Z, [x10, #2, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #2, MUL VL]\n"
"udot z9.s, z7.b, z0.b[3]\n"
"udot z13.s, z7.b, z1.b[3]\n"
"udot z17.s, z7.b, z2.b[3]\n"
"udot z21.s, z7.b, z3.b[3]\n"
- "ld1b { z7.b }, p5/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
+ "ld1b { z7.b }, p5/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"udot z10.s, z6.b, z0.b[3]\n"
"udot z14.s, z6.b, z1.b[3]\n"
"udot z18.s, z6.b, z2.b[3]\n"
@@ -946,75 +946,75 @@ void sve_hybrid_u8u32_dot_6x4VL (
"udot z19.s, z7.b, z2.b[3]\n"
"udot z23.s, z7.b, z3.b[3]\n"
"43:" // Height 4: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 38b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "st1w { z8.s }, p4, [x9]\n"
- "add x22, x23, x20, LSL #2\n"
- "st1w { z9.s }, p3, [x9, #1, MUL VL]\n"
- "st1w { z10.s }, p2, [x9, #2, MUL VL]\n"
- "st1w { z11.s }, p1, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
- "st1w { z12.s }, p4, [x24]\n"
- "st1w { z13.s }, p3, [x24, #1, MUL VL]\n"
- "st1w { z14.s }, p2, [x24, #2, MUL VL]\n"
- "st1w { z15.s }, p1, [x24, #3, MUL VL]\n"
- "st1w { z16.s }, p4, [x23]\n"
- "st1w { z17.s }, p3, [x23, #1, MUL VL]\n"
- "st1w { z18.s }, p2, [x23, #2, MUL VL]\n"
- "st1w { z19.s }, p1, [x23, #3, MUL VL]\n"
- "st1w { z20.s }, p4, [x22]\n"
- "st1w { z21.s }, p3, [x22, #1, MUL VL]\n"
- "st1w { z22.s }, p2, [x22, #2, MUL VL]\n"
- "st1w { z23.s }, p1, [x22, #3, MUL VL]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "st1w { z8.s }, p4, [x28]\n"
+ "add x23, x28, x19, LSL #2\n"
+ "st1w { z9.s }, p3, [x28, #1, MUL VL]\n"
+ "st1w { z10.s }, p2, [x28, #2, MUL VL]\n"
+ "add x22, x23, x19, LSL #2\n"
+ "st1w { z11.s }, p1, [x28, #3, MUL VL]\n"
+ "add x21, x22, x19, LSL #2\n"
+ "st1w { z12.s }, p4, [x23]\n"
+ "addvl x28, x28, #4\n"
+ "st1w { z13.s }, p3, [x23, #1, MUL VL]\n"
+ "st1w { z14.s }, p2, [x23, #2, MUL VL]\n"
+ "st1w { z15.s }, p1, [x23, #3, MUL VL]\n"
+ "st1w { z16.s }, p4, [x22]\n"
+ "st1w { z17.s }, p3, [x22, #1, MUL VL]\n"
+ "st1w { z18.s }, p2, [x22, #2, MUL VL]\n"
+ "st1w { z19.s }, p1, [x22, #3, MUL VL]\n"
+ "st1w { z20.s }, p4, [x21]\n"
+ "st1w { z21.s }, p3, [x21, #1, MUL VL]\n"
+ "st1w { z22.s }, p2, [x21, #2, MUL VL]\n"
+ "st1w { z23.s }, p1, [x21, #3, MUL VL]\n"
"44:" // Height 4: Writeback done
- "decw x11, ALL, MUL #4\n"
- "cmp x11, XZR\n"
+ "decw x10, ALL, MUL #4\n"
+ "cmp x10, XZR\n"
"bgt 35b\n"
"b 68f\n"
"45:" // Height 5
- "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x28, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"46:" // Height 5: Column loop
- "mov x20, #0x0\n"
- "whilelt p4.s, x20, x11\n"
- "incw x20\n"
- "whilelt p3.s, x20, x11\n"
- "incw x20\n"
- "whilelt p2.s, x20, x11\n"
- "incw x20\n"
- "whilelt p1.s, x20, x11\n"
+ "mov x19, #0x0\n"
+ "whilelt p4.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p3.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x10\n"
"tbz %x[flags], #0, 47f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "ld1w { z8.s }, p4/Z, [x9]\n"
- "add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
- "ld1w { z9.s }, p3/Z, [x9, #1, MUL VL]\n"
- "ld1w { z10.s }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1w { z11.s }, p1/Z, [x9, #3, MUL VL]\n"
- "ld1w { z12.s }, p4/Z, [x24]\n"
- "ld1w { z13.s }, p3/Z, [x24, #1, MUL VL]\n"
- "ld1w { z14.s }, p2/Z, [x24, #2, MUL VL]\n"
- "ld1w { z15.s }, p1/Z, [x24, #3, MUL VL]\n"
- "ld1w { z16.s }, p4/Z, [x23]\n"
- "ld1w { z17.s }, p3/Z, [x23, #1, MUL VL]\n"
- "ld1w { z18.s }, p2/Z, [x23, #2, MUL VL]\n"
- "ld1w { z19.s }, p1/Z, [x23, #3, MUL VL]\n"
- "ld1w { z20.s }, p4/Z, [x22]\n"
- "ld1w { z21.s }, p3/Z, [x22, #1, MUL VL]\n"
- "ld1w { z22.s }, p2/Z, [x22, #2, MUL VL]\n"
- "ld1w { z23.s }, p1/Z, [x22, #3, MUL VL]\n"
- "ld1w { z24.s }, p4/Z, [x21]\n"
- "ld1w { z25.s }, p3/Z, [x21, #1, MUL VL]\n"
- "ld1w { z26.s }, p2/Z, [x21, #2, MUL VL]\n"
- "ld1w { z27.s }, p1/Z, [x21, #3, MUL VL]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ld1w { z8.s }, p4/Z, [x28]\n"
+ "add x23, x28, x19, LSL #2\n"
+ "ld1w { z9.s }, p3/Z, [x28, #1, MUL VL]\n"
+ "ld1w { z10.s }, p2/Z, [x28, #2, MUL VL]\n"
+ "add x22, x23, x19, LSL #2\n"
+ "ld1w { z11.s }, p1/Z, [x28, #3, MUL VL]\n"
+ "add x21, x22, x19, LSL #2\n"
+ "ld1w { z12.s }, p4/Z, [x23]\n"
+ "add x20, x21, x19, LSL #2\n"
+ "ld1w { z13.s }, p3/Z, [x23, #1, MUL VL]\n"
+ "ld1w { z14.s }, p2/Z, [x23, #2, MUL VL]\n"
+ "ld1w { z15.s }, p1/Z, [x23, #3, MUL VL]\n"
+ "ld1w { z16.s }, p4/Z, [x22]\n"
+ "ld1w { z17.s }, p3/Z, [x22, #1, MUL VL]\n"
+ "ld1w { z18.s }, p2/Z, [x22, #2, MUL VL]\n"
+ "ld1w { z19.s }, p1/Z, [x22, #3, MUL VL]\n"
+ "ld1w { z20.s }, p4/Z, [x21]\n"
+ "ld1w { z21.s }, p3/Z, [x21, #1, MUL VL]\n"
+ "ld1w { z22.s }, p2/Z, [x21, #2, MUL VL]\n"
+ "ld1w { z23.s }, p1/Z, [x21, #3, MUL VL]\n"
+ "ld1w { z24.s }, p4/Z, [x20]\n"
+ "ld1w { z25.s }, p3/Z, [x20, #1, MUL VL]\n"
+ "ld1w { z26.s }, p2/Z, [x20, #2, MUL VL]\n"
+ "ld1w { z27.s }, p1/Z, [x20, #3, MUL VL]\n"
"b 48f\n"
"47:" // Height 5: no accumulate
"mov z8.s, #0x0\n"
@@ -1038,137 +1038,137 @@ void sve_hybrid_u8u32_dot_6x4VL (
"mov z26.s, #0x0\n"
"mov z27.s, #0x0\n"
"48:" // Height 5: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"49:" // Height 5: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 50f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "ldr x22, [x21, #0x20]\n"
- "cbnz x28, 51f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20\n"
- "add x25, x25, x20\n"
- "add x24, x24, x20\n"
- "add x23, x23, x20\n"
- "add x22, x22, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "ldr x21, [x20, #0x20]\n"
+ "cbnz x27, 51f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
+ "add x24, x24, x19\n"
+ "add x23, x23, x19\n"
+ "add x22, x22, x19\n"
+ "add x21, x21, x19\n"
"b 51f\n"
"50:" // Height 5: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20\n"
- "add x24, x25, x20\n"
- "add x23, x24, x20\n"
- "add x22, x23, x20\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19\n"
+ "add x23, x24, x19\n"
+ "add x22, x23, x19\n"
+ "add x21, x22, x19\n"
"51:" // Height 5: input setup done
- "cmp x27, #0x10\n"
+ "cmp x26, #0x10\n"
"ble 53f\n"
"52:" // Height 5: Multiply loop: Main loop head
- "whilelt p0.b, XZR, x27\n"
- "ld1rqb { z0.b }, p0/Z, [x26]\n"
- "ld1rqb { z1.b }, p0/Z, [x25]\n"
- "sub x27, x27, #0x10\n"
- "ld1rqb { z2.b }, p0/Z, [x24]\n"
- "ld1rqb { z3.b }, p0/Z, [x23]\n"
- "cmp x27, #0x10\n"
- "add x26, x26, #0x10\n"
- "ld1rqb { z4.b }, p0/Z, [x22]\n"
- "ld1b { z6.b }, p5/Z, [x10]\n"
+ "ld1b { z6.b }, p5/Z, [x9]\n"
+ "whilelt p0.b, XZR, x26\n"
+ "ld1b { z7.b }, p5/Z, [x9, #1, MUL VL]\n"
+ "sub x26, x26, #0x10\n"
+ "ld1rqb { z0.b }, p0/Z, [x25]\n"
"udot z8.s, z6.b, z0.b[0]\n"
- "udot z12.s, z6.b, z1.b[0]\n"
- "ld1b { z7.b }, p5/Z, [x10, #1, MUL VL]\n"
- "udot z16.s, z6.b, z2.b[0]\n"
- "udot z20.s, z6.b, z3.b[0]\n"
- "add x25, x25, #0x10\n"
- "udot z24.s, z6.b, z4.b[0]\n"
+ "ld1rqb { z1.b }, p0/Z, [x24]\n"
+ "cmp x26, #0x10\n"
"udot z9.s, z7.b, z0.b[0]\n"
- "ld1b { z6.b }, p5/Z, [x10, #2, MUL VL]\n"
+ "ld1rqb { z2.b }, p0/Z, [x23]\n"
+ "add x25, x25, #0x10\n"
+ "udot z12.s, z6.b, z1.b[0]\n"
+ "ld1rqb { z3.b }, p0/Z, [x22]\n"
"add x24, x24, #0x10\n"
- "udot z13.s, z7.b, z1.b[0]\n"
- "udot z17.s, z7.b, z2.b[0]\n"
+ "udot z16.s, z6.b, z2.b[0]\n"
+ "ld1rqb { z4.b }, p0/Z, [x21]\n"
"add x23, x23, #0x10\n"
+ "udot z13.s, z7.b, z1.b[0]\n"
"add x22, x22, #0x10\n"
+ "udot z17.s, z7.b, z2.b[0]\n"
+ "add x21, x21, #0x10\n"
+ "udot z20.s, z6.b, z3.b[0]\n"
+ "udot z24.s, z6.b, z4.b[0]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #2, MUL VL]\n"
"udot z21.s, z7.b, z3.b[0]\n"
"udot z25.s, z7.b, z4.b[0]\n"
- "ld1b { z7.b }, p5/Z, [x10, #3, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #3, MUL VL]\n"
"udot z10.s, z6.b, z0.b[0]\n"
"udot z14.s, z6.b, z1.b[0]\n"
"udot z18.s, z6.b, z2.b[0]\n"
"udot z22.s, z6.b, z3.b[0]\n"
"udot z26.s, z6.b, z4.b[0]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #4, MUL VL]\n"
"udot z11.s, z7.b, z0.b[0]\n"
- "ld1b { z6.b }, p5/Z, [x10, #4, MUL VL]\n"
"udot z15.s, z7.b, z1.b[0]\n"
"udot z19.s, z7.b, z2.b[0]\n"
"udot z23.s, z7.b, z3.b[0]\n"
"udot z27.s, z7.b, z4.b[0]\n"
- "ld1b { z7.b }, p5/Z, [x10, #5, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #5, MUL VL]\n"
"udot z8.s, z6.b, z0.b[1]\n"
"udot z12.s, z6.b, z1.b[1]\n"
"udot z16.s, z6.b, z2.b[1]\n"
"udot z20.s, z6.b, z3.b[1]\n"
"udot z24.s, z6.b, z4.b[1]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #6, MUL VL]\n"
"udot z9.s, z7.b, z0.b[1]\n"
- "ld1b { z6.b }, p5/Z, [x10, #6, MUL VL]\n"
"udot z13.s, z7.b, z1.b[1]\n"
"udot z17.s, z7.b, z2.b[1]\n"
"udot z21.s, z7.b, z3.b[1]\n"
"udot z25.s, z7.b, z4.b[1]\n"
- "ld1b { z7.b }, p5/Z, [x10, #7, MUL VL]\n"
- "addvl x10, x10, #16\n"
+ "ld1b { z7.b }, p5/Z, [x9, #7, MUL VL]\n"
+ "addvl x9, x9, #16\n"
"udot z10.s, z6.b, z0.b[1]\n"
"udot z14.s, z6.b, z1.b[1]\n"
"udot z18.s, z6.b, z2.b[1]\n"
"udot z22.s, z6.b, z3.b[1]\n"
"udot z26.s, z6.b, z4.b[1]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #-8, MUL VL]\n"
"udot z11.s, z7.b, z0.b[1]\n"
- "ld1b { z6.b }, p5/Z, [x10, #-8, MUL VL]\n"
"udot z15.s, z7.b, z1.b[1]\n"
"udot z19.s, z7.b, z2.b[1]\n"
"udot z23.s, z7.b, z3.b[1]\n"
"udot z27.s, z7.b, z4.b[1]\n"
- "ld1b { z7.b }, p5/Z, [x10, #-7, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #-7, MUL VL]\n"
"udot z8.s, z6.b, z0.b[2]\n"
"udot z12.s, z6.b, z1.b[2]\n"
"udot z16.s, z6.b, z2.b[2]\n"
"udot z20.s, z6.b, z3.b[2]\n"
"udot z24.s, z6.b, z4.b[2]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #-6, MUL VL]\n"
"udot z9.s, z7.b, z0.b[2]\n"
- "ld1b { z6.b }, p5/Z, [x10, #-6, MUL VL]\n"
"udot z13.s, z7.b, z1.b[2]\n"
"udot z17.s, z7.b, z2.b[2]\n"
"udot z21.s, z7.b, z3.b[2]\n"
"udot z25.s, z7.b, z4.b[2]\n"
- "ld1b { z7.b }, p5/Z, [x10, #-5, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #-5, MUL VL]\n"
"udot z10.s, z6.b, z0.b[2]\n"
"udot z14.s, z6.b, z1.b[2]\n"
"udot z18.s, z6.b, z2.b[2]\n"
"udot z22.s, z6.b, z3.b[2]\n"
"udot z26.s, z6.b, z4.b[2]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #-4, MUL VL]\n"
"udot z11.s, z7.b, z0.b[2]\n"
- "ld1b { z6.b }, p5/Z, [x10, #-4, MUL VL]\n"
"udot z15.s, z7.b, z1.b[2]\n"
"udot z19.s, z7.b, z2.b[2]\n"
"udot z23.s, z7.b, z3.b[2]\n"
"udot z27.s, z7.b, z4.b[2]\n"
- "ld1b { z7.b }, p5/Z, [x10, #-3, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #-3, MUL VL]\n"
"udot z8.s, z6.b, z0.b[3]\n"
"udot z12.s, z6.b, z1.b[3]\n"
"udot z16.s, z6.b, z2.b[3]\n"
"udot z20.s, z6.b, z3.b[3]\n"
"udot z24.s, z6.b, z4.b[3]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #-2, MUL VL]\n"
"udot z9.s, z7.b, z0.b[3]\n"
- "ld1b { z6.b }, p5/Z, [x10, #-2, MUL VL]\n"
"udot z13.s, z7.b, z1.b[3]\n"
"udot z17.s, z7.b, z2.b[3]\n"
"udot z21.s, z7.b, z3.b[3]\n"
"udot z25.s, z7.b, z4.b[3]\n"
- "ld1b { z7.b }, p5/Z, [x10, #-1, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #-1, MUL VL]\n"
"udot z10.s, z6.b, z0.b[3]\n"
"udot z14.s, z6.b, z1.b[3]\n"
"udot z18.s, z6.b, z2.b[3]\n"
@@ -1181,28 +1181,28 @@ void sve_hybrid_u8u32_dot_6x4VL (
"udot z27.s, z7.b, z4.b[3]\n"
"bgt 52b\n"
"53:" // Height 5: Multiply loop: Single iteration only
- "whilelt p0.b, XZR, x27\n"
- "ld1rqb { z0.b }, p0/Z, [x26]\n"
- "ld1rqb { z1.b }, p0/Z, [x25]\n"
- "subs x27, x27, #0x4\n"
- "ld1rqb { z2.b }, p0/Z, [x24]\n"
- "ld1rqb { z3.b }, p0/Z, [x23]\n"
- "ld1rqb { z4.b }, p0/Z, [x22]\n"
- "ld1b { z6.b }, p5/Z, [x10]\n"
+ "ld1b { z6.b }, p5/Z, [x9]\n"
+ "whilelt p0.b, XZR, x26\n"
+ "ld1b { z7.b }, p5/Z, [x9, #1, MUL VL]\n"
+ "subs x26, x26, #0x4\n"
+ "ld1rqb { z0.b }, p0/Z, [x25]\n"
"udot z8.s, z6.b, z0.b[0]\n"
+ "ld1rqb { z1.b }, p0/Z, [x24]\n"
+ "udot z9.s, z7.b, z0.b[0]\n"
+ "ld1rqb { z2.b }, p0/Z, [x23]\n"
+ "ld1rqb { z3.b }, p0/Z, [x22]\n"
"udot z12.s, z6.b, z1.b[0]\n"
- "ld1b { z7.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1rqb { z4.b }, p0/Z, [x21]\n"
+ "udot z13.s, z7.b, z1.b[0]\n"
"udot z16.s, z6.b, z2.b[0]\n"
"udot z20.s, z6.b, z3.b[0]\n"
"udot z24.s, z6.b, z4.b[0]\n"
- "udot z9.s, z7.b, z0.b[0]\n"
- "ld1b { z6.b }, p5/Z, [x10, #2, MUL VL]\n"
- "udot z13.s, z7.b, z1.b[0]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #2, MUL VL]\n"
"udot z17.s, z7.b, z2.b[0]\n"
"udot z21.s, z7.b, z3.b[0]\n"
"udot z25.s, z7.b, z4.b[0]\n"
- "ld1b { z7.b }, p5/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
+ "ld1b { z7.b }, p5/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"udot z10.s, z6.b, z0.b[0]\n"
"udot z14.s, z6.b, z1.b[0]\n"
"udot z18.s, z6.b, z2.b[0]\n"
@@ -1214,22 +1214,22 @@ void sve_hybrid_u8u32_dot_6x4VL (
"udot z23.s, z7.b, z3.b[0]\n"
"udot z27.s, z7.b, z4.b[0]\n"
"ble 54f\n"
- "ld1b { z6.b }, p5/Z, [x10]\n"
- "ld1b { z7.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9]\n"
"udot z8.s, z6.b, z0.b[1]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #1, MUL VL]\n"
+ "subs x26, x26, #0x4\n"
"udot z12.s, z6.b, z1.b[1]\n"
"udot z16.s, z6.b, z2.b[1]\n"
"udot z20.s, z6.b, z3.b[1]\n"
- "subs x27, x27, #0x4\n"
"udot z24.s, z6.b, z4.b[1]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #2, MUL VL]\n"
"udot z9.s, z7.b, z0.b[1]\n"
- "ld1b { z6.b }, p5/Z, [x10, #2, MUL VL]\n"
"udot z13.s, z7.b, z1.b[1]\n"
"udot z17.s, z7.b, z2.b[1]\n"
"udot z21.s, z7.b, z3.b[1]\n"
"udot z25.s, z7.b, z4.b[1]\n"
- "ld1b { z7.b }, p5/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
+ "ld1b { z7.b }, p5/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"udot z10.s, z6.b, z0.b[1]\n"
"udot z14.s, z6.b, z1.b[1]\n"
"udot z18.s, z6.b, z2.b[1]\n"
@@ -1241,22 +1241,22 @@ void sve_hybrid_u8u32_dot_6x4VL (
"udot z23.s, z7.b, z3.b[1]\n"
"udot z27.s, z7.b, z4.b[1]\n"
"ble 54f\n"
- "ld1b { z6.b }, p5/Z, [x10]\n"
- "ld1b { z7.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9]\n"
"udot z8.s, z6.b, z0.b[2]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #1, MUL VL]\n"
+ "subs x26, x26, #0x4\n"
"udot z12.s, z6.b, z1.b[2]\n"
"udot z16.s, z6.b, z2.b[2]\n"
"udot z20.s, z6.b, z3.b[2]\n"
- "subs x27, x27, #0x4\n"
"udot z24.s, z6.b, z4.b[2]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #2, MUL VL]\n"
"udot z9.s, z7.b, z0.b[2]\n"
- "ld1b { z6.b }, p5/Z, [x10, #2, MUL VL]\n"
"udot z13.s, z7.b, z1.b[2]\n"
"udot z17.s, z7.b, z2.b[2]\n"
"udot z21.s, z7.b, z3.b[2]\n"
"udot z25.s, z7.b, z4.b[2]\n"
- "ld1b { z7.b }, p5/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
+ "ld1b { z7.b }, p5/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"udot z10.s, z6.b, z0.b[2]\n"
"udot z14.s, z6.b, z1.b[2]\n"
"udot z18.s, z6.b, z2.b[2]\n"
@@ -1268,21 +1268,21 @@ void sve_hybrid_u8u32_dot_6x4VL (
"udot z23.s, z7.b, z3.b[2]\n"
"udot z27.s, z7.b, z4.b[2]\n"
"ble 54f\n"
- "ld1b { z6.b }, p5/Z, [x10]\n"
- "ld1b { z7.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9]\n"
"udot z8.s, z6.b, z0.b[3]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #1, MUL VL]\n"
"udot z12.s, z6.b, z1.b[3]\n"
"udot z16.s, z6.b, z2.b[3]\n"
"udot z20.s, z6.b, z3.b[3]\n"
"udot z24.s, z6.b, z4.b[3]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #2, MUL VL]\n"
"udot z9.s, z7.b, z0.b[3]\n"
- "ld1b { z6.b }, p5/Z, [x10, #2, MUL VL]\n"
"udot z13.s, z7.b, z1.b[3]\n"
"udot z17.s, z7.b, z2.b[3]\n"
"udot z21.s, z7.b, z3.b[3]\n"
"udot z25.s, z7.b, z4.b[3]\n"
- "ld1b { z7.b }, p5/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
+ "ld1b { z7.b }, p5/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"udot z10.s, z6.b, z0.b[3]\n"
"udot z14.s, z6.b, z1.b[3]\n"
"udot z18.s, z6.b, z2.b[3]\n"
@@ -1294,88 +1294,88 @@ void sve_hybrid_u8u32_dot_6x4VL (
"udot z23.s, z7.b, z3.b[3]\n"
"udot z27.s, z7.b, z4.b[3]\n"
"54:" // Height 5: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 49b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "st1w { z8.s }, p4, [x9]\n"
- "add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
- "st1w { z9.s }, p3, [x9, #1, MUL VL]\n"
- "st1w { z10.s }, p2, [x9, #2, MUL VL]\n"
- "st1w { z11.s }, p1, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
- "st1w { z12.s }, p4, [x24]\n"
- "st1w { z13.s }, p3, [x24, #1, MUL VL]\n"
- "st1w { z14.s }, p2, [x24, #2, MUL VL]\n"
- "st1w { z15.s }, p1, [x24, #3, MUL VL]\n"
- "st1w { z16.s }, p4, [x23]\n"
- "st1w { z17.s }, p3, [x23, #1, MUL VL]\n"
- "st1w { z18.s }, p2, [x23, #2, MUL VL]\n"
- "st1w { z19.s }, p1, [x23, #3, MUL VL]\n"
- "st1w { z20.s }, p4, [x22]\n"
- "st1w { z21.s }, p3, [x22, #1, MUL VL]\n"
- "st1w { z22.s }, p2, [x22, #2, MUL VL]\n"
- "st1w { z23.s }, p1, [x22, #3, MUL VL]\n"
- "st1w { z24.s }, p4, [x21]\n"
- "st1w { z25.s }, p3, [x21, #1, MUL VL]\n"
- "st1w { z26.s }, p2, [x21, #2, MUL VL]\n"
- "st1w { z27.s }, p1, [x21, #3, MUL VL]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "st1w { z8.s }, p4, [x28]\n"
+ "add x23, x28, x19, LSL #2\n"
+ "st1w { z9.s }, p3, [x28, #1, MUL VL]\n"
+ "st1w { z10.s }, p2, [x28, #2, MUL VL]\n"
+ "add x22, x23, x19, LSL #2\n"
+ "st1w { z11.s }, p1, [x28, #3, MUL VL]\n"
+ "add x21, x22, x19, LSL #2\n"
+ "st1w { z12.s }, p4, [x23]\n"
+ "add x20, x21, x19, LSL #2\n"
+ "st1w { z13.s }, p3, [x23, #1, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "st1w { z14.s }, p2, [x23, #2, MUL VL]\n"
+ "st1w { z15.s }, p1, [x23, #3, MUL VL]\n"
+ "st1w { z16.s }, p4, [x22]\n"
+ "st1w { z17.s }, p3, [x22, #1, MUL VL]\n"
+ "st1w { z18.s }, p2, [x22, #2, MUL VL]\n"
+ "st1w { z19.s }, p1, [x22, #3, MUL VL]\n"
+ "st1w { z20.s }, p4, [x21]\n"
+ "st1w { z21.s }, p3, [x21, #1, MUL VL]\n"
+ "st1w { z22.s }, p2, [x21, #2, MUL VL]\n"
+ "st1w { z23.s }, p1, [x21, #3, MUL VL]\n"
+ "st1w { z24.s }, p4, [x20]\n"
+ "st1w { z25.s }, p3, [x20, #1, MUL VL]\n"
+ "st1w { z26.s }, p2, [x20, #2, MUL VL]\n"
+ "st1w { z27.s }, p1, [x20, #3, MUL VL]\n"
"55:" // Height 5: Writeback done
- "decw x11, ALL, MUL #4\n"
- "cmp x11, XZR\n"
+ "decw x10, ALL, MUL #4\n"
+ "cmp x10, XZR\n"
"bgt 46b\n"
"b 68f\n"
"56:" // Height 6
- "ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x28, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"mov x20, #0x18\n"
- "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
- "mov x9, %x[output_ptr]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "madd %x[output_ptr], x19, x20, %x[output_ptr]\n"
"57:" // Height 6: Column loop
- "mov x20, #0x0\n"
- "whilelt p4.s, x20, x11\n"
- "incw x20\n"
- "whilelt p3.s, x20, x11\n"
- "incw x20\n"
- "whilelt p2.s, x20, x11\n"
- "incw x20\n"
- "whilelt p1.s, x20, x11\n"
+ "mov x19, #0x0\n"
+ "whilelt p4.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p3.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x10\n"
"tbz %x[flags], #0, 58f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "ld1w { z8.s }, p4/Z, [x9]\n"
- "add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
- "ld1w { z9.s }, p3/Z, [x9, #1, MUL VL]\n"
- "ld1w { z10.s }, p2/Z, [x9, #2, MUL VL]\n"
- "add x20, x21, x20, LSL #2\n"
- "ld1w { z11.s }, p1/Z, [x9, #3, MUL VL]\n"
- "ld1w { z12.s }, p4/Z, [x24]\n"
- "ld1w { z13.s }, p3/Z, [x24, #1, MUL VL]\n"
- "ld1w { z14.s }, p2/Z, [x24, #2, MUL VL]\n"
- "ld1w { z15.s }, p1/Z, [x24, #3, MUL VL]\n"
- "ld1w { z16.s }, p4/Z, [x23]\n"
- "ld1w { z17.s }, p3/Z, [x23, #1, MUL VL]\n"
- "ld1w { z18.s }, p2/Z, [x23, #2, MUL VL]\n"
- "ld1w { z19.s }, p1/Z, [x23, #3, MUL VL]\n"
- "ld1w { z20.s }, p4/Z, [x22]\n"
- "ld1w { z21.s }, p3/Z, [x22, #1, MUL VL]\n"
- "ld1w { z22.s }, p2/Z, [x22, #2, MUL VL]\n"
- "ld1w { z23.s }, p1/Z, [x22, #3, MUL VL]\n"
- "ld1w { z24.s }, p4/Z, [x21]\n"
- "ld1w { z25.s }, p3/Z, [x21, #1, MUL VL]\n"
- "ld1w { z26.s }, p2/Z, [x21, #2, MUL VL]\n"
- "ld1w { z27.s }, p1/Z, [x21, #3, MUL VL]\n"
- "ld1w { z28.s }, p4/Z, [x20]\n"
- "ld1w { z29.s }, p3/Z, [x20, #1, MUL VL]\n"
- "ld1w { z30.s }, p2/Z, [x20, #2, MUL VL]\n"
- "ld1w { z31.s }, p1/Z, [x20, #3, MUL VL]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ld1w { z8.s }, p4/Z, [x28]\n"
+ "add x23, x28, x19, LSL #2\n"
+ "ld1w { z9.s }, p3/Z, [x28, #1, MUL VL]\n"
+ "ld1w { z10.s }, p2/Z, [x28, #2, MUL VL]\n"
+ "add x22, x23, x19, LSL #2\n"
+ "ld1w { z11.s }, p1/Z, [x28, #3, MUL VL]\n"
+ "add x21, x22, x19, LSL #2\n"
+ "ld1w { z12.s }, p4/Z, [x23]\n"
+ "add x20, x21, x19, LSL #2\n"
+ "ld1w { z13.s }, p3/Z, [x23, #1, MUL VL]\n"
+ "add x19, x20, x19, LSL #2\n"
+ "ld1w { z14.s }, p2/Z, [x23, #2, MUL VL]\n"
+ "ld1w { z15.s }, p1/Z, [x23, #3, MUL VL]\n"
+ "ld1w { z16.s }, p4/Z, [x22]\n"
+ "ld1w { z17.s }, p3/Z, [x22, #1, MUL VL]\n"
+ "ld1w { z18.s }, p2/Z, [x22, #2, MUL VL]\n"
+ "ld1w { z19.s }, p1/Z, [x22, #3, MUL VL]\n"
+ "ld1w { z20.s }, p4/Z, [x21]\n"
+ "ld1w { z21.s }, p3/Z, [x21, #1, MUL VL]\n"
+ "ld1w { z22.s }, p2/Z, [x21, #2, MUL VL]\n"
+ "ld1w { z23.s }, p1/Z, [x21, #3, MUL VL]\n"
+ "ld1w { z24.s }, p4/Z, [x20]\n"
+ "ld1w { z25.s }, p3/Z, [x20, #1, MUL VL]\n"
+ "ld1w { z26.s }, p2/Z, [x20, #2, MUL VL]\n"
+ "ld1w { z27.s }, p1/Z, [x20, #3, MUL VL]\n"
+ "ld1w { z28.s }, p4/Z, [x19]\n"
+ "ld1w { z29.s }, p3/Z, [x19, #1, MUL VL]\n"
+ "ld1w { z30.s }, p2/Z, [x19, #2, MUL VL]\n"
+ "ld1w { z31.s }, p1/Z, [x19, #3, MUL VL]\n"
"b 59f\n"
"58:" // Height 6: no accumulate
"mov z8.s, #0x0\n"
@@ -1403,156 +1403,156 @@ void sve_hybrid_u8u32_dot_6x4VL (
"mov z30.s, #0x0\n"
"mov z31.s, #0x0\n"
"59:" // Height 6: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"60:" // Height 6: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 61f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "ldr x22, [x21, #0x20]\n"
- "ldr x21, [x21, #0x28]\n"
- "cbnz x28, 62f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20\n"
- "add x25, x25, x20\n"
- "add x24, x24, x20\n"
- "add x23, x23, x20\n"
- "add x22, x22, x20\n"
- "add x21, x21, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "ldr x21, [x20, #0x20]\n"
+ "ldr x20, [x20, #0x28]\n"
+ "cbnz x27, 62f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
+ "add x24, x24, x19\n"
+ "add x23, x23, x19\n"
+ "add x22, x22, x19\n"
+ "add x21, x21, x19\n"
+ "add x20, x20, x19\n"
"b 62f\n"
"61:" // Height 6: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20\n"
- "add x24, x25, x20\n"
- "add x23, x24, x20\n"
- "add x22, x23, x20\n"
- "add x21, x22, x20\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19\n"
+ "add x23, x24, x19\n"
+ "add x22, x23, x19\n"
+ "add x21, x22, x19\n"
+ "add x20, x21, x19\n"
"62:" // Height 6: input setup done
- "cmp x27, #0x10\n"
+ "cmp x26, #0x10\n"
"ble 64f\n"
"63:" // Height 6: Multiply loop: Main loop head
- "whilelt p0.b, XZR, x27\n"
- "ld1rqb { z0.b }, p0/Z, [x26]\n"
- "ld1rqb { z1.b }, p0/Z, [x25]\n"
- "sub x27, x27, #0x10\n"
- "ld1rqb { z2.b }, p0/Z, [x24]\n"
- "ld1rqb { z3.b }, p0/Z, [x23]\n"
- "cmp x27, #0x10\n"
- "add x26, x26, #0x10\n"
- "ld1rqb { z4.b }, p0/Z, [x22]\n"
- "ld1rqb { z5.b }, p0/Z, [x21]\n"
- "add x25, x25, #0x10\n"
- "add x24, x24, #0x10\n"
- "ld1b { z6.b }, p5/Z, [x10]\n"
- "ld1b { z7.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9]\n"
+ "whilelt p0.b, XZR, x26\n"
+ "ld1b { z7.b }, p5/Z, [x9, #1, MUL VL]\n"
+ "sub x26, x26, #0x10\n"
+ "ld1rqb { z0.b }, p0/Z, [x25]\n"
"udot z8.s, z6.b, z0.b[0]\n"
+ "ld1rqb { z1.b }, p0/Z, [x24]\n"
+ "cmp x26, #0x10\n"
+ "udot z9.s, z7.b, z0.b[0]\n"
+ "ld1rqb { z2.b }, p0/Z, [x23]\n"
+ "add x25, x25, #0x10\n"
"udot z12.s, z6.b, z1.b[0]\n"
+ "ld1rqb { z3.b }, p0/Z, [x22]\n"
+ "add x24, x24, #0x10\n"
"udot z16.s, z6.b, z2.b[0]\n"
- "udot z20.s, z6.b, z3.b[0]\n"
+ "ld1rqb { z4.b }, p0/Z, [x21]\n"
"add x23, x23, #0x10\n"
+ "udot z13.s, z7.b, z1.b[0]\n"
+ "ld1rqb { z5.b }, p0/Z, [x20]\n"
"add x22, x22, #0x10\n"
- "udot z24.s, z6.b, z4.b[0]\n"
- "udot z28.s, z6.b, z5.b[0]\n"
- "ld1b { z6.b }, p5/Z, [x10, #2, MUL VL]\n"
+ "udot z20.s, z6.b, z3.b[0]\n"
"add x21, x21, #0x10\n"
- "udot z9.s, z7.b, z0.b[0]\n"
- "udot z13.s, z7.b, z1.b[0]\n"
"udot z17.s, z7.b, z2.b[0]\n"
+ "add x20, x20, #0x10\n"
+ "udot z24.s, z6.b, z4.b[0]\n"
+ "udot z28.s, z6.b, z5.b[0]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #2, MUL VL]\n"
"udot z21.s, z7.b, z3.b[0]\n"
"udot z25.s, z7.b, z4.b[0]\n"
"udot z29.s, z7.b, z5.b[0]\n"
- "ld1b { z7.b }, p5/Z, [x10, #3, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #3, MUL VL]\n"
"udot z10.s, z6.b, z0.b[0]\n"
"udot z14.s, z6.b, z1.b[0]\n"
"udot z18.s, z6.b, z2.b[0]\n"
"udot z22.s, z6.b, z3.b[0]\n"
"udot z26.s, z6.b, z4.b[0]\n"
"udot z30.s, z6.b, z5.b[0]\n"
- "ld1b { z6.b }, p5/Z, [x10, #4, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #4, MUL VL]\n"
"udot z11.s, z7.b, z0.b[0]\n"
"udot z15.s, z7.b, z1.b[0]\n"
"udot z19.s, z7.b, z2.b[0]\n"
"udot z23.s, z7.b, z3.b[0]\n"
"udot z27.s, z7.b, z4.b[0]\n"
"udot z31.s, z7.b, z5.b[0]\n"
- "ld1b { z7.b }, p5/Z, [x10, #5, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #5, MUL VL]\n"
"udot z8.s, z6.b, z0.b[1]\n"
"udot z12.s, z6.b, z1.b[1]\n"
"udot z16.s, z6.b, z2.b[1]\n"
"udot z20.s, z6.b, z3.b[1]\n"
"udot z24.s, z6.b, z4.b[1]\n"
"udot z28.s, z6.b, z5.b[1]\n"
- "ld1b { z6.b }, p5/Z, [x10, #6, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #6, MUL VL]\n"
"udot z9.s, z7.b, z0.b[1]\n"
"udot z13.s, z7.b, z1.b[1]\n"
"udot z17.s, z7.b, z2.b[1]\n"
"udot z21.s, z7.b, z3.b[1]\n"
"udot z25.s, z7.b, z4.b[1]\n"
"udot z29.s, z7.b, z5.b[1]\n"
- "ld1b { z7.b }, p5/Z, [x10, #7, MUL VL]\n"
- "addvl x10, x10, #16\n"
+ "ld1b { z7.b }, p5/Z, [x9, #7, MUL VL]\n"
+ "addvl x9, x9, #16\n"
"udot z10.s, z6.b, z0.b[1]\n"
"udot z14.s, z6.b, z1.b[1]\n"
"udot z18.s, z6.b, z2.b[1]\n"
"udot z22.s, z6.b, z3.b[1]\n"
"udot z26.s, z6.b, z4.b[1]\n"
"udot z30.s, z6.b, z5.b[1]\n"
- "ld1b { z6.b }, p5/Z, [x10, #-8, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #-8, MUL VL]\n"
"udot z11.s, z7.b, z0.b[1]\n"
"udot z15.s, z7.b, z1.b[1]\n"
"udot z19.s, z7.b, z2.b[1]\n"
"udot z23.s, z7.b, z3.b[1]\n"
"udot z27.s, z7.b, z4.b[1]\n"
"udot z31.s, z7.b, z5.b[1]\n"
- "ld1b { z7.b }, p5/Z, [x10, #-7, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #-7, MUL VL]\n"
"udot z8.s, z6.b, z0.b[2]\n"
"udot z12.s, z6.b, z1.b[2]\n"
"udot z16.s, z6.b, z2.b[2]\n"
"udot z20.s, z6.b, z3.b[2]\n"
"udot z24.s, z6.b, z4.b[2]\n"
"udot z28.s, z6.b, z5.b[2]\n"
- "ld1b { z6.b }, p5/Z, [x10, #-6, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #-6, MUL VL]\n"
"udot z9.s, z7.b, z0.b[2]\n"
"udot z13.s, z7.b, z1.b[2]\n"
"udot z17.s, z7.b, z2.b[2]\n"
"udot z21.s, z7.b, z3.b[2]\n"
"udot z25.s, z7.b, z4.b[2]\n"
"udot z29.s, z7.b, z5.b[2]\n"
- "ld1b { z7.b }, p5/Z, [x10, #-5, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #-5, MUL VL]\n"
"udot z10.s, z6.b, z0.b[2]\n"
"udot z14.s, z6.b, z1.b[2]\n"
"udot z18.s, z6.b, z2.b[2]\n"
"udot z22.s, z6.b, z3.b[2]\n"
"udot z26.s, z6.b, z4.b[2]\n"
"udot z30.s, z6.b, z5.b[2]\n"
- "ld1b { z6.b }, p5/Z, [x10, #-4, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #-4, MUL VL]\n"
"udot z11.s, z7.b, z0.b[2]\n"
"udot z15.s, z7.b, z1.b[2]\n"
"udot z19.s, z7.b, z2.b[2]\n"
"udot z23.s, z7.b, z3.b[2]\n"
"udot z27.s, z7.b, z4.b[2]\n"
"udot z31.s, z7.b, z5.b[2]\n"
- "ld1b { z7.b }, p5/Z, [x10, #-3, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #-3, MUL VL]\n"
"udot z8.s, z6.b, z0.b[3]\n"
"udot z12.s, z6.b, z1.b[3]\n"
"udot z16.s, z6.b, z2.b[3]\n"
"udot z20.s, z6.b, z3.b[3]\n"
"udot z24.s, z6.b, z4.b[3]\n"
"udot z28.s, z6.b, z5.b[3]\n"
- "ld1b { z6.b }, p5/Z, [x10, #-2, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #-2, MUL VL]\n"
"udot z9.s, z7.b, z0.b[3]\n"
"udot z13.s, z7.b, z1.b[3]\n"
"udot z17.s, z7.b, z2.b[3]\n"
"udot z21.s, z7.b, z3.b[3]\n"
"udot z25.s, z7.b, z4.b[3]\n"
"udot z29.s, z7.b, z5.b[3]\n"
- "ld1b { z7.b }, p5/Z, [x10, #-1, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #-1, MUL VL]\n"
"udot z10.s, z6.b, z0.b[3]\n"
"udot z14.s, z6.b, z1.b[3]\n"
"udot z18.s, z6.b, z2.b[3]\n"
@@ -1567,31 +1567,31 @@ void sve_hybrid_u8u32_dot_6x4VL (
"udot z31.s, z7.b, z5.b[3]\n"
"bgt 63b\n"
"64:" // Height 6: Multiply loop: Single iteration only
- "whilelt p0.b, XZR, x27\n"
- "ld1rqb { z0.b }, p0/Z, [x26]\n"
- "ld1rqb { z1.b }, p0/Z, [x25]\n"
- "subs x27, x27, #0x4\n"
- "ld1rqb { z2.b }, p0/Z, [x24]\n"
- "ld1rqb { z3.b }, p0/Z, [x23]\n"
- "ld1rqb { z4.b }, p0/Z, [x22]\n"
- "ld1rqb { z5.b }, p0/Z, [x21]\n"
- "ld1b { z6.b }, p5/Z, [x10]\n"
- "ld1b { z7.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9]\n"
+ "whilelt p0.b, XZR, x26\n"
+ "ld1b { z7.b }, p5/Z, [x9, #1, MUL VL]\n"
+ "subs x26, x26, #0x4\n"
+ "ld1rqb { z0.b }, p0/Z, [x25]\n"
"udot z8.s, z6.b, z0.b[0]\n"
+ "ld1rqb { z1.b }, p0/Z, [x24]\n"
+ "udot z9.s, z7.b, z0.b[0]\n"
+ "ld1rqb { z2.b }, p0/Z, [x23]\n"
+ "ld1rqb { z3.b }, p0/Z, [x22]\n"
"udot z12.s, z6.b, z1.b[0]\n"
+ "ld1rqb { z4.b }, p0/Z, [x21]\n"
+ "udot z13.s, z7.b, z1.b[0]\n"
+ "ld1rqb { z5.b }, p0/Z, [x20]\n"
"udot z16.s, z6.b, z2.b[0]\n"
"udot z20.s, z6.b, z3.b[0]\n"
"udot z24.s, z6.b, z4.b[0]\n"
"udot z28.s, z6.b, z5.b[0]\n"
- "ld1b { z6.b }, p5/Z, [x10, #2, MUL VL]\n"
- "udot z9.s, z7.b, z0.b[0]\n"
- "udot z13.s, z7.b, z1.b[0]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #2, MUL VL]\n"
"udot z17.s, z7.b, z2.b[0]\n"
"udot z21.s, z7.b, z3.b[0]\n"
"udot z25.s, z7.b, z4.b[0]\n"
"udot z29.s, z7.b, z5.b[0]\n"
- "ld1b { z7.b }, p5/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
+ "ld1b { z7.b }, p5/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"udot z10.s, z6.b, z0.b[0]\n"
"udot z14.s, z6.b, z1.b[0]\n"
"udot z18.s, z6.b, z2.b[0]\n"
@@ -1605,24 +1605,24 @@ void sve_hybrid_u8u32_dot_6x4VL (
"udot z27.s, z7.b, z4.b[0]\n"
"udot z31.s, z7.b, z5.b[0]\n"
"ble 65f\n"
- "ld1b { z6.b }, p5/Z, [x10]\n"
- "ld1b { z7.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9]\n"
"udot z8.s, z6.b, z0.b[1]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #1, MUL VL]\n"
+ "subs x26, x26, #0x4\n"
"udot z12.s, z6.b, z1.b[1]\n"
"udot z16.s, z6.b, z2.b[1]\n"
"udot z20.s, z6.b, z3.b[1]\n"
- "subs x27, x27, #0x4\n"
"udot z24.s, z6.b, z4.b[1]\n"
"udot z28.s, z6.b, z5.b[1]\n"
- "ld1b { z6.b }, p5/Z, [x10, #2, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #2, MUL VL]\n"
"udot z9.s, z7.b, z0.b[1]\n"
"udot z13.s, z7.b, z1.b[1]\n"
"udot z17.s, z7.b, z2.b[1]\n"
"udot z21.s, z7.b, z3.b[1]\n"
"udot z25.s, z7.b, z4.b[1]\n"
"udot z29.s, z7.b, z5.b[1]\n"
- "ld1b { z7.b }, p5/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
+ "ld1b { z7.b }, p5/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"udot z10.s, z6.b, z0.b[1]\n"
"udot z14.s, z6.b, z1.b[1]\n"
"udot z18.s, z6.b, z2.b[1]\n"
@@ -1636,24 +1636,24 @@ void sve_hybrid_u8u32_dot_6x4VL (
"udot z27.s, z7.b, z4.b[1]\n"
"udot z31.s, z7.b, z5.b[1]\n"
"ble 65f\n"
- "ld1b { z6.b }, p5/Z, [x10]\n"
- "ld1b { z7.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9]\n"
"udot z8.s, z6.b, z0.b[2]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #1, MUL VL]\n"
+ "subs x26, x26, #0x4\n"
"udot z12.s, z6.b, z1.b[2]\n"
"udot z16.s, z6.b, z2.b[2]\n"
"udot z20.s, z6.b, z3.b[2]\n"
- "subs x27, x27, #0x4\n"
"udot z24.s, z6.b, z4.b[2]\n"
"udot z28.s, z6.b, z5.b[2]\n"
- "ld1b { z6.b }, p5/Z, [x10, #2, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #2, MUL VL]\n"
"udot z9.s, z7.b, z0.b[2]\n"
"udot z13.s, z7.b, z1.b[2]\n"
"udot z17.s, z7.b, z2.b[2]\n"
"udot z21.s, z7.b, z3.b[2]\n"
"udot z25.s, z7.b, z4.b[2]\n"
"udot z29.s, z7.b, z5.b[2]\n"
- "ld1b { z7.b }, p5/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
+ "ld1b { z7.b }, p5/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"udot z10.s, z6.b, z0.b[2]\n"
"udot z14.s, z6.b, z1.b[2]\n"
"udot z18.s, z6.b, z2.b[2]\n"
@@ -1667,23 +1667,23 @@ void sve_hybrid_u8u32_dot_6x4VL (
"udot z27.s, z7.b, z4.b[2]\n"
"udot z31.s, z7.b, z5.b[2]\n"
"ble 65f\n"
- "ld1b { z6.b }, p5/Z, [x10]\n"
- "ld1b { z7.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9]\n"
"udot z8.s, z6.b, z0.b[3]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #1, MUL VL]\n"
"udot z12.s, z6.b, z1.b[3]\n"
"udot z16.s, z6.b, z2.b[3]\n"
"udot z20.s, z6.b, z3.b[3]\n"
"udot z24.s, z6.b, z4.b[3]\n"
"udot z28.s, z6.b, z5.b[3]\n"
- "ld1b { z6.b }, p5/Z, [x10, #2, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #2, MUL VL]\n"
"udot z9.s, z7.b, z0.b[3]\n"
"udot z13.s, z7.b, z1.b[3]\n"
"udot z17.s, z7.b, z2.b[3]\n"
"udot z21.s, z7.b, z3.b[3]\n"
"udot z25.s, z7.b, z4.b[3]\n"
"udot z29.s, z7.b, z5.b[3]\n"
- "ld1b { z7.b }, p5/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
+ "ld1b { z7.b }, p5/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"udot z10.s, z6.b, z0.b[3]\n"
"udot z14.s, z6.b, z1.b[3]\n"
"udot z18.s, z6.b, z2.b[3]\n"
@@ -1697,61 +1697,61 @@ void sve_hybrid_u8u32_dot_6x4VL (
"udot z27.s, z7.b, z4.b[3]\n"
"udot z31.s, z7.b, z5.b[3]\n"
"65:" // Height 6: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 60b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "st1w { z8.s }, p4, [x9]\n"
- "add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
- "st1w { z9.s }, p3, [x9, #1, MUL VL]\n"
- "add x20, x21, x20, LSL #2\n"
- "st1w { z10.s }, p2, [x9, #2, MUL VL]\n"
- "st1w { z11.s }, p1, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
- "st1w { z12.s }, p4, [x24]\n"
- "st1w { z13.s }, p3, [x24, #1, MUL VL]\n"
- "st1w { z14.s }, p2, [x24, #2, MUL VL]\n"
- "st1w { z15.s }, p1, [x24, #3, MUL VL]\n"
- "st1w { z16.s }, p4, [x23]\n"
- "st1w { z17.s }, p3, [x23, #1, MUL VL]\n"
- "st1w { z18.s }, p2, [x23, #2, MUL VL]\n"
- "st1w { z19.s }, p1, [x23, #3, MUL VL]\n"
- "st1w { z20.s }, p4, [x22]\n"
- "st1w { z21.s }, p3, [x22, #1, MUL VL]\n"
- "st1w { z22.s }, p2, [x22, #2, MUL VL]\n"
- "st1w { z23.s }, p1, [x22, #3, MUL VL]\n"
- "st1w { z24.s }, p4, [x21]\n"
- "st1w { z25.s }, p3, [x21, #1, MUL VL]\n"
- "st1w { z26.s }, p2, [x21, #2, MUL VL]\n"
- "st1w { z27.s }, p1, [x21, #3, MUL VL]\n"
- "st1w { z28.s }, p4, [x20]\n"
- "st1w { z29.s }, p3, [x20, #1, MUL VL]\n"
- "st1w { z30.s }, p2, [x20, #2, MUL VL]\n"
- "st1w { z31.s }, p1, [x20, #3, MUL VL]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "st1w { z8.s }, p4, [x28]\n"
+ "add x23, x28, x19, LSL #2\n"
+ "st1w { z9.s }, p3, [x28, #1, MUL VL]\n"
+ "st1w { z10.s }, p2, [x28, #2, MUL VL]\n"
+ "add x22, x23, x19, LSL #2\n"
+ "st1w { z11.s }, p1, [x28, #3, MUL VL]\n"
+ "add x21, x22, x19, LSL #2\n"
+ "st1w { z12.s }, p4, [x23]\n"
+ "add x20, x21, x19, LSL #2\n"
+ "st1w { z13.s }, p3, [x23, #1, MUL VL]\n"
+ "add x19, x20, x19, LSL #2\n"
+ "st1w { z14.s }, p2, [x23, #2, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "st1w { z15.s }, p1, [x23, #3, MUL VL]\n"
+ "st1w { z16.s }, p4, [x22]\n"
+ "st1w { z17.s }, p3, [x22, #1, MUL VL]\n"
+ "st1w { z18.s }, p2, [x22, #2, MUL VL]\n"
+ "st1w { z19.s }, p1, [x22, #3, MUL VL]\n"
+ "st1w { z20.s }, p4, [x21]\n"
+ "st1w { z21.s }, p3, [x21, #1, MUL VL]\n"
+ "st1w { z22.s }, p2, [x21, #2, MUL VL]\n"
+ "st1w { z23.s }, p1, [x21, #3, MUL VL]\n"
+ "st1w { z24.s }, p4, [x20]\n"
+ "st1w { z25.s }, p3, [x20, #1, MUL VL]\n"
+ "st1w { z26.s }, p2, [x20, #2, MUL VL]\n"
+ "st1w { z27.s }, p1, [x20, #3, MUL VL]\n"
+ "st1w { z28.s }, p4, [x19]\n"
+ "st1w { z29.s }, p3, [x19, #1, MUL VL]\n"
+ "st1w { z30.s }, p2, [x19, #2, MUL VL]\n"
+ "st1w { z31.s }, p1, [x19, #3, MUL VL]\n"
"66:" // Height 6: Writeback done
- "decw x11, ALL, MUL #4\n"
- "cmp x11, XZR\n"
+ "decw x10, ALL, MUL #4\n"
+ "cmp x10, XZR\n"
"bgt 57b\n"
"subs %x[M], %x[M], #0x6\n"
"beq 68f\n"
- "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 67f\n"
- "add x21, x21, #0x6\n"
- "str x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "add x20, x20, #0x6\n"
+ "str x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"b 1b\n"
"67:" // Update direct input
- "mov x20, #0x6\n"
- "madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
+ "mov x19, #0x6\n"
+ "madd %x[input_ptr], x19, x20, %x[input_ptr]\n"
"b 1b\n"
"68:" // Exit
: [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
: [args_ptr] "r" (&ka), [flags] "r" (flags), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "x9", "x10", "x11", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "x9", "x10", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8u32_mmla_6x4VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8u32_mmla_6x4VL/generic.cpp
index 59f33289b4..e8bad69ccd 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8u32_mmla_6x4VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8u32_mmla_6x4VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef ARM_COMPUTE_ENABLE_SVE
@@ -87,25 +87,25 @@ void sve_hybrid_u8u32_mmla_6x4VL (
"cmp %x[M], #0x2\n"
"bgt 23f\n"
"beq 12f\n"
- "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x28, %x[output_ptr]\n"
"2:" // Height 1: Column loop
- "mov x20, #0x0\n"
- "whilelt p4.s, x20, x11\n"
- "incw x20\n"
- "whilelt p3.s, x20, x11\n"
- "incw x20\n"
- "whilelt p2.s, x20, x11\n"
- "incw x20\n"
- "whilelt p1.s, x20, x11\n"
+ "mov x19, #0x0\n"
+ "whilelt p4.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p3.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x10\n"
"tbz %x[flags], #0, 3f\n"
- "ld1w { z9.s }, p4/Z, [x9]\n"
- "ld1w { z10.s }, p3/Z, [x9, #1, MUL VL]\n"
+ "ld1w { z9.s }, p4/Z, [x28]\n"
"zip1 z8.d, z9.d, z12.d\n"
+ "ld1w { z10.s }, p3/Z, [x28, #1, MUL VL]\n"
"zip2 z12.d, z9.d, z12.d\n"
- "ld1w { z11.s }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1w { z16.s }, p1/Z, [x9, #3, MUL VL]\n"
+ "ld1w { z11.s }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1w { z16.s }, p1/Z, [x28, #3, MUL VL]\n"
"zip1 z9.d, z10.d, z13.d\n"
"zip2 z13.d, z10.d, z13.d\n"
"zip1 z10.d, z11.d, z14.d\n"
@@ -123,154 +123,154 @@ void sve_hybrid_u8u32_mmla_6x4VL (
"mov z14.s, #0x0\n"
"mov z15.s, #0x0\n"
"4:" // Height 1: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"5:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 6f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "cbnz x28, 7f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "cbnz x27, 7f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
"b 7f\n"
"6:" // Height 1: setup direct input
- "mov x26, %x[input_ptr]\n"
+ "mov x25, %x[input_ptr]\n"
"7:" // Height 1: input setup done
- "cmp x27, #0x10\n"
+ "cmp x26, #0x10\n"
"ble 9f\n"
"8:" // Height 1: Multiply loop: Main loop head
- "whilelt p0.b, XZR, x27\n"
- "ld1rqb { z1.b }, p0/Z, [x26]\n"
+ "ld1b { z7.b }, p5/Z, [x9]\n"
+ "whilelt p0.b, XZR, x26\n"
+ "ld1b { z6.b }, p5/Z, [x9, #1, MUL VL]\n"
+ "ld1rqb { z1.b }, p0/Z, [x25]\n"
"trn1 z0.d, z1.d, z2.d\n"
- "ld1b { z7.b }, p5/Z, [x10]\n"
- "ld1b { z6.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "sub x26, x26, #0x10\n"
+ "trn2 z1.d, z1.d, z2.d\n"
+ "cmp x26, #0x10\n"
+ "add x25, x25, #0x10\n"
".inst 0x45c79808 // ummla z8.s, z0.b, z7.b\n"
+ "ld1b { z7.b }, p5/Z, [x9, #2, MUL VL]\n"
".inst 0x45c6980c // ummla z12.s, z0.b, z6.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #2, MUL VL]\n"
- "ld1b { z6.b }, p5/Z, [x10, #3, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #3, MUL VL]\n"
".inst 0x45c79809 // ummla z9.s, z0.b, z7.b\n"
+ "ld1b { z7.b }, p5/Z, [x9, #4, MUL VL]\n"
".inst 0x45c6980d // ummla z13.s, z0.b, z6.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #4, MUL VL]\n"
- "ld1b { z6.b }, p5/Z, [x10, #5, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #5, MUL VL]\n"
".inst 0x45c7980a // ummla z10.s, z0.b, z7.b\n"
+ "ld1b { z7.b }, p5/Z, [x9, #6, MUL VL]\n"
".inst 0x45c6980e // ummla z14.s, z0.b, z6.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #6, MUL VL]\n"
- "ld1b { z6.b }, p5/Z, [x10, #7, MUL VL]\n"
- "addvl x10, x10, #16\n"
- "trn2 z1.d, z1.d, z2.d\n"
+ "ld1b { z6.b }, p5/Z, [x9, #7, MUL VL]\n"
+ "addvl x9, x9, #16\n"
".inst 0x45c7980b // ummla z11.s, z0.b, z7.b\n"
+ "ld1b { z7.b }, p5/Z, [x9, #-8, MUL VL]\n"
".inst 0x45c6980f // ummla z15.s, z0.b, z6.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #-8, MUL VL]\n"
- "ld1b { z6.b }, p5/Z, [x10, #-7, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #-7, MUL VL]\n"
".inst 0x45c79828 // ummla z8.s, z1.b, z7.b\n"
+ "ld1b { z7.b }, p5/Z, [x9, #-6, MUL VL]\n"
".inst 0x45c6982c // ummla z12.s, z1.b, z6.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #-6, MUL VL]\n"
- "ld1b { z6.b }, p5/Z, [x10, #-5, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #-5, MUL VL]\n"
".inst 0x45c79829 // ummla z9.s, z1.b, z7.b\n"
+ "ld1b { z7.b }, p5/Z, [x9, #-4, MUL VL]\n"
".inst 0x45c6982d // ummla z13.s, z1.b, z6.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #-4, MUL VL]\n"
- "ld1b { z6.b }, p5/Z, [x10, #-3, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #-3, MUL VL]\n"
".inst 0x45c7982a // ummla z10.s, z1.b, z7.b\n"
+ "ld1b { z7.b }, p5/Z, [x9, #-2, MUL VL]\n"
".inst 0x45c6982e // ummla z14.s, z1.b, z6.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #-2, MUL VL]\n"
- "ld1b { z6.b }, p5/Z, [x10, #-1, MUL VL]\n"
- "sub x27, x27, #0x10\n"
- "cmp x27, #0x10\n"
+ "ld1b { z6.b }, p5/Z, [x9, #-1, MUL VL]\n"
".inst 0x45c7982b // ummla z11.s, z1.b, z7.b\n"
".inst 0x45c6982f // ummla z15.s, z1.b, z6.b\n"
- "add x26, x26, #0x10\n"
"bgt 8b\n"
"9:" // Height 1: Multiply loop: Single iteration only
- "whilelt p0.b, XZR, x27\n"
- "ld1rqb { z1.b }, p0/Z, [x26]\n"
+ "ld1b { z7.b }, p5/Z, [x9]\n"
+ "whilelt p0.b, XZR, x26\n"
+ "ld1b { z6.b }, p5/Z, [x9, #1, MUL VL]\n"
+ "ld1rqb { z1.b }, p0/Z, [x25]\n"
"trn1 z0.d, z1.d, z2.d\n"
- "ld1b { z7.b }, p5/Z, [x10]\n"
- "ld1b { z6.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x26, x26, #0x8\n"
+ "trn2 z1.d, z1.d, z2.d\n"
".inst 0x45c79808 // ummla z8.s, z0.b, z7.b\n"
+ "ld1b { z7.b }, p5/Z, [x9, #2, MUL VL]\n"
".inst 0x45c6980c // ummla z12.s, z0.b, z6.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #2, MUL VL]\n"
- "ld1b { z6.b }, p5/Z, [x10, #3, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #3, MUL VL]\n"
".inst 0x45c79809 // ummla z9.s, z0.b, z7.b\n"
+ "ld1b { z7.b }, p5/Z, [x9, #4, MUL VL]\n"
".inst 0x45c6980d // ummla z13.s, z0.b, z6.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #4, MUL VL]\n"
- "ld1b { z6.b }, p5/Z, [x10, #5, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #5, MUL VL]\n"
".inst 0x45c7980a // ummla z10.s, z0.b, z7.b\n"
+ "ld1b { z7.b }, p5/Z, [x9, #6, MUL VL]\n"
".inst 0x45c6980e // ummla z14.s, z0.b, z6.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #6, MUL VL]\n"
- "ld1b { z6.b }, p5/Z, [x10, #7, MUL VL]\n"
- "subs x27, x27, #0x8\n"
- "trn2 z1.d, z1.d, z2.d\n"
+ "ld1b { z6.b }, p5/Z, [x9, #7, MUL VL]\n"
+ "addvl x9, x9, #8\n"
".inst 0x45c7980b // ummla z11.s, z0.b, z7.b\n"
".inst 0x45c6980f // ummla z15.s, z0.b, z6.b\n"
- "addvl x10, x10, #8\n"
"ble 10f\n"
- "ld1b { z7.b }, p5/Z, [x10]\n"
- "ld1b { z6.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9]\n"
".inst 0x45c79828 // ummla z8.s, z1.b, z7.b\n"
+ "ld1b { z6.b }, p5/Z, [x9, #1, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #2, MUL VL]\n"
".inst 0x45c6982c // ummla z12.s, z1.b, z6.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #2, MUL VL]\n"
- "ld1b { z6.b }, p5/Z, [x10, #3, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #3, MUL VL]\n"
".inst 0x45c79829 // ummla z9.s, z1.b, z7.b\n"
+ "ld1b { z7.b }, p5/Z, [x9, #4, MUL VL]\n"
".inst 0x45c6982d // ummla z13.s, z1.b, z6.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #4, MUL VL]\n"
- "ld1b { z6.b }, p5/Z, [x10, #5, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #5, MUL VL]\n"
".inst 0x45c7982a // ummla z10.s, z1.b, z7.b\n"
+ "ld1b { z7.b }, p5/Z, [x9, #6, MUL VL]\n"
".inst 0x45c6982e // ummla z14.s, z1.b, z6.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #6, MUL VL]\n"
- "ld1b { z6.b }, p5/Z, [x10, #7, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #7, MUL VL]\n"
+ "addvl x9, x9, #8\n"
".inst 0x45c7982b // ummla z11.s, z1.b, z7.b\n"
".inst 0x45c6982f // ummla z15.s, z1.b, z6.b\n"
- "addvl x10, x10, #8\n"
"10:" // Height 1: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 5b\n"
"uzp1 z8.d, z8.d, z12.d\n"
+ "st1w { z8.s }, p4, [x28]\n"
"uzp1 z9.d, z9.d, z13.d\n"
- "st1w { z8.s }, p4, [x9]\n"
"uzp1 z10.d, z10.d, z14.d\n"
+ "st1w { z9.s }, p3, [x28, #1, MUL VL]\n"
"uzp1 z11.d, z11.d, z15.d\n"
- "st1w { z9.s }, p3, [x9, #1, MUL VL]\n"
- "st1w { z10.s }, p2, [x9, #2, MUL VL]\n"
- "st1w { z11.s }, p1, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
+ "st1w { z10.s }, p2, [x28, #2, MUL VL]\n"
+ "st1w { z11.s }, p1, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
"11:" // Height 1: Writeback done
- "decw x11, ALL, MUL #4\n"
- "cmp x11, XZR\n"
+ "decw x10, ALL, MUL #4\n"
+ "cmp x10, XZR\n"
"bgt 2b\n"
"b 68f\n"
"12:" // Height 2
- "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x28, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"13:" // Height 2: Column loop
- "mov x20, #0x0\n"
- "whilelt p4.s, x20, x11\n"
- "incw x20\n"
- "whilelt p3.s, x20, x11\n"
- "incw x20\n"
- "whilelt p2.s, x20, x11\n"
- "incw x20\n"
- "whilelt p1.s, x20, x11\n"
+ "mov x19, #0x0\n"
+ "whilelt p4.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p3.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x10\n"
"tbz %x[flags], #0, 14f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "ld1w { z9.s }, p4/Z, [x9]\n"
- "ld1w { z10.s }, p3/Z, [x9, #1, MUL VL]\n"
- "ld1w { z11.s }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1w { z16.s }, p1/Z, [x9, #3, MUL VL]\n"
- "ld1w { z12.s }, p4/Z, [x24]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ld1w { z9.s }, p4/Z, [x28]\n"
+ "add x23, x28, x19, LSL #2\n"
+ "ld1w { z10.s }, p3/Z, [x28, #1, MUL VL]\n"
+ "ld1w { z11.s }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1w { z16.s }, p1/Z, [x28, #3, MUL VL]\n"
+ "ld1w { z12.s }, p4/Z, [x23]\n"
"zip1 z8.d, z9.d, z12.d\n"
+ "ld1w { z13.s }, p3/Z, [x23, #1, MUL VL]\n"
"zip2 z12.d, z9.d, z12.d\n"
- "ld1w { z13.s }, p3/Z, [x24, #1, MUL VL]\n"
- "ld1w { z14.s }, p2/Z, [x24, #2, MUL VL]\n"
+ "ld1w { z14.s }, p2/Z, [x23, #2, MUL VL]\n"
+ "ld1w { z15.s }, p1/Z, [x23, #3, MUL VL]\n"
"zip1 z9.d, z10.d, z13.d\n"
"zip2 z13.d, z10.d, z13.d\n"
- "ld1w { z15.s }, p1/Z, [x24, #3, MUL VL]\n"
"zip1 z10.d, z11.d, z14.d\n"
"zip2 z14.d, z11.d, z14.d\n"
"zip1 z11.d, z16.d, z15.d\n"
@@ -286,179 +286,179 @@ void sve_hybrid_u8u32_mmla_6x4VL (
"mov z14.s, #0x0\n"
"mov z15.s, #0x0\n"
"15:" // Height 2: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"16:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 17f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "cbnz x28, 18f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20\n"
- "add x25, x25, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "cbnz x27, 18f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
+ "add x24, x24, x19\n"
"b 18f\n"
"17:" // Height 2: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19\n"
"18:" // Height 2: input setup done
- "cmp x27, #0x10\n"
+ "cmp x26, #0x10\n"
"ble 20f\n"
"19:" // Height 2: Multiply loop: Main loop head
- "whilelt p0.b, XZR, x27\n"
- "ld1rqb { z1.b }, p0/Z, [x26]\n"
- "ld1rqb { z2.b }, p0/Z, [x25]\n"
+ "ld1b { z7.b }, p5/Z, [x9]\n"
+ "whilelt p0.b, XZR, x26\n"
+ "ld1b { z6.b }, p5/Z, [x9, #1, MUL VL]\n"
+ "sub x26, x26, #0x10\n"
+ "ld1rqb { z1.b }, p0/Z, [x25]\n"
+ "cmp x26, #0x10\n"
+ "ld1rqb { z2.b }, p0/Z, [x24]\n"
"trn1 z0.d, z1.d, z2.d\n"
- "ld1b { z7.b }, p5/Z, [x10]\n"
- "ld1b { z6.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "add x25, x25, #0x10\n"
+ "trn2 z1.d, z1.d, z2.d\n"
+ "add x24, x24, #0x10\n"
".inst 0x45c79808 // ummla z8.s, z0.b, z7.b\n"
+ "ld1b { z7.b }, p5/Z, [x9, #2, MUL VL]\n"
".inst 0x45c6980c // ummla z12.s, z0.b, z6.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #2, MUL VL]\n"
- "ld1b { z6.b }, p5/Z, [x10, #3, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #3, MUL VL]\n"
".inst 0x45c79809 // ummla z9.s, z0.b, z7.b\n"
+ "ld1b { z7.b }, p5/Z, [x9, #4, MUL VL]\n"
".inst 0x45c6980d // ummla z13.s, z0.b, z6.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #4, MUL VL]\n"
- "ld1b { z6.b }, p5/Z, [x10, #5, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #5, MUL VL]\n"
".inst 0x45c7980a // ummla z10.s, z0.b, z7.b\n"
+ "ld1b { z7.b }, p5/Z, [x9, #6, MUL VL]\n"
".inst 0x45c6980e // ummla z14.s, z0.b, z6.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #6, MUL VL]\n"
- "ld1b { z6.b }, p5/Z, [x10, #7, MUL VL]\n"
- "addvl x10, x10, #16\n"
- "trn2 z1.d, z1.d, z2.d\n"
+ "ld1b { z6.b }, p5/Z, [x9, #7, MUL VL]\n"
+ "addvl x9, x9, #16\n"
".inst 0x45c7980b // ummla z11.s, z0.b, z7.b\n"
+ "ld1b { z7.b }, p5/Z, [x9, #-8, MUL VL]\n"
".inst 0x45c6980f // ummla z15.s, z0.b, z6.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #-8, MUL VL]\n"
- "ld1b { z6.b }, p5/Z, [x10, #-7, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #-7, MUL VL]\n"
".inst 0x45c79828 // ummla z8.s, z1.b, z7.b\n"
+ "ld1b { z7.b }, p5/Z, [x9, #-6, MUL VL]\n"
".inst 0x45c6982c // ummla z12.s, z1.b, z6.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #-6, MUL VL]\n"
- "ld1b { z6.b }, p5/Z, [x10, #-5, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #-5, MUL VL]\n"
".inst 0x45c79829 // ummla z9.s, z1.b, z7.b\n"
+ "ld1b { z7.b }, p5/Z, [x9, #-4, MUL VL]\n"
".inst 0x45c6982d // ummla z13.s, z1.b, z6.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #-4, MUL VL]\n"
- "ld1b { z6.b }, p5/Z, [x10, #-3, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #-3, MUL VL]\n"
".inst 0x45c7982a // ummla z10.s, z1.b, z7.b\n"
+ "ld1b { z7.b }, p5/Z, [x9, #-2, MUL VL]\n"
".inst 0x45c6982e // ummla z14.s, z1.b, z6.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #-2, MUL VL]\n"
- "ld1b { z6.b }, p5/Z, [x10, #-1, MUL VL]\n"
- "sub x27, x27, #0x10\n"
- "cmp x27, #0x10\n"
+ "ld1b { z6.b }, p5/Z, [x9, #-1, MUL VL]\n"
".inst 0x45c7982b // ummla z11.s, z1.b, z7.b\n"
".inst 0x45c6982f // ummla z15.s, z1.b, z6.b\n"
- "add x26, x26, #0x10\n"
- "add x25, x25, #0x10\n"
"bgt 19b\n"
"20:" // Height 2: Multiply loop: Single iteration only
- "whilelt p0.b, XZR, x27\n"
- "ld1rqb { z1.b }, p0/Z, [x26]\n"
- "ld1rqb { z2.b }, p0/Z, [x25]\n"
+ "ld1b { z7.b }, p5/Z, [x9]\n"
+ "whilelt p0.b, XZR, x26\n"
+ "ld1b { z6.b }, p5/Z, [x9, #1, MUL VL]\n"
+ "subs x26, x26, #0x8\n"
+ "ld1rqb { z1.b }, p0/Z, [x25]\n"
+ "ld1rqb { z2.b }, p0/Z, [x24]\n"
"trn1 z0.d, z1.d, z2.d\n"
- "ld1b { z7.b }, p5/Z, [x10]\n"
- "ld1b { z6.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "trn2 z1.d, z1.d, z2.d\n"
".inst 0x45c79808 // ummla z8.s, z0.b, z7.b\n"
+ "ld1b { z7.b }, p5/Z, [x9, #2, MUL VL]\n"
".inst 0x45c6980c // ummla z12.s, z0.b, z6.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #2, MUL VL]\n"
- "ld1b { z6.b }, p5/Z, [x10, #3, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #3, MUL VL]\n"
".inst 0x45c79809 // ummla z9.s, z0.b, z7.b\n"
+ "ld1b { z7.b }, p5/Z, [x9, #4, MUL VL]\n"
".inst 0x45c6980d // ummla z13.s, z0.b, z6.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #4, MUL VL]\n"
- "ld1b { z6.b }, p5/Z, [x10, #5, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #5, MUL VL]\n"
".inst 0x45c7980a // ummla z10.s, z0.b, z7.b\n"
+ "ld1b { z7.b }, p5/Z, [x9, #6, MUL VL]\n"
".inst 0x45c6980e // ummla z14.s, z0.b, z6.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #6, MUL VL]\n"
- "ld1b { z6.b }, p5/Z, [x10, #7, MUL VL]\n"
- "subs x27, x27, #0x8\n"
- "trn2 z1.d, z1.d, z2.d\n"
+ "ld1b { z6.b }, p5/Z, [x9, #7, MUL VL]\n"
+ "addvl x9, x9, #8\n"
".inst 0x45c7980b // ummla z11.s, z0.b, z7.b\n"
".inst 0x45c6980f // ummla z15.s, z0.b, z6.b\n"
- "addvl x10, x10, #8\n"
"ble 21f\n"
- "ld1b { z7.b }, p5/Z, [x10]\n"
- "ld1b { z6.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9]\n"
".inst 0x45c79828 // ummla z8.s, z1.b, z7.b\n"
+ "ld1b { z6.b }, p5/Z, [x9, #1, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #2, MUL VL]\n"
".inst 0x45c6982c // ummla z12.s, z1.b, z6.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #2, MUL VL]\n"
- "ld1b { z6.b }, p5/Z, [x10, #3, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #3, MUL VL]\n"
".inst 0x45c79829 // ummla z9.s, z1.b, z7.b\n"
+ "ld1b { z7.b }, p5/Z, [x9, #4, MUL VL]\n"
".inst 0x45c6982d // ummla z13.s, z1.b, z6.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #4, MUL VL]\n"
- "ld1b { z6.b }, p5/Z, [x10, #5, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #5, MUL VL]\n"
".inst 0x45c7982a // ummla z10.s, z1.b, z7.b\n"
+ "ld1b { z7.b }, p5/Z, [x9, #6, MUL VL]\n"
".inst 0x45c6982e // ummla z14.s, z1.b, z6.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #6, MUL VL]\n"
- "ld1b { z6.b }, p5/Z, [x10, #7, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #7, MUL VL]\n"
+ "addvl x9, x9, #8\n"
".inst 0x45c7982b // ummla z11.s, z1.b, z7.b\n"
".inst 0x45c6982f // ummla z15.s, z1.b, z6.b\n"
- "addvl x10, x10, #8\n"
"21:" // Height 2: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 16b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
"uzp1 z7.d, z8.d, z12.d\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp2 z8.d, z8.d, z12.d\n"
+ "st1w { z7.s }, p4, [x28]\n"
"uzp1 z12.d, z9.d, z13.d\n"
+ "add x23, x28, x19, LSL #2\n"
"uzp2 z9.d, z9.d, z13.d\n"
- "st1w { z7.s }, p4, [x9]\n"
+ "st1w { z12.s }, p3, [x28, #1, MUL VL]\n"
"uzp1 z13.d, z10.d, z14.d\n"
"uzp2 z10.d, z10.d, z14.d\n"
- "st1w { z12.s }, p3, [x9, #1, MUL VL]\n"
+ "st1w { z13.s }, p2, [x28, #2, MUL VL]\n"
"uzp1 z14.d, z11.d, z15.d\n"
"uzp2 z11.d, z11.d, z15.d\n"
- "st1w { z13.s }, p2, [x9, #2, MUL VL]\n"
- "st1w { z14.s }, p1, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
- "st1w { z8.s }, p4, [x24]\n"
- "st1w { z9.s }, p3, [x24, #1, MUL VL]\n"
- "st1w { z10.s }, p2, [x24, #2, MUL VL]\n"
- "st1w { z11.s }, p1, [x24, #3, MUL VL]\n"
+ "st1w { z14.s }, p1, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "st1w { z8.s }, p4, [x23]\n"
+ "st1w { z9.s }, p3, [x23, #1, MUL VL]\n"
+ "st1w { z10.s }, p2, [x23, #2, MUL VL]\n"
+ "st1w { z11.s }, p1, [x23, #3, MUL VL]\n"
"22:" // Height 2: Writeback done
- "decw x11, ALL, MUL #4\n"
- "cmp x11, XZR\n"
+ "decw x10, ALL, MUL #4\n"
+ "cmp x10, XZR\n"
"bgt 13b\n"
"b 68f\n"
"23:" // Height 3
- "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x28, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"24:" // Height 3: Column loop
- "mov x20, #0x0\n"
- "whilelt p4.s, x20, x11\n"
- "incw x20\n"
- "whilelt p3.s, x20, x11\n"
- "incw x20\n"
- "whilelt p2.s, x20, x11\n"
- "incw x20\n"
- "whilelt p1.s, x20, x11\n"
+ "mov x19, #0x0\n"
+ "whilelt p4.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p3.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x10\n"
"tbz %x[flags], #0, 25f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "ld1w { z9.s }, p4/Z, [x9]\n"
- "ld1w { z10.s }, p3/Z, [x9, #1, MUL VL]\n"
- "ld1w { z11.s }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1w { z16.s }, p1/Z, [x9, #3, MUL VL]\n"
- "ld1w { z12.s }, p4/Z, [x24]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ld1w { z9.s }, p4/Z, [x28]\n"
+ "add x23, x28, x19, LSL #2\n"
+ "ld1w { z10.s }, p3/Z, [x28, #1, MUL VL]\n"
+ "ld1w { z11.s }, p2/Z, [x28, #2, MUL VL]\n"
+ "add x22, x23, x19, LSL #2\n"
+ "ld1w { z16.s }, p1/Z, [x28, #3, MUL VL]\n"
+ "ld1w { z12.s }, p4/Z, [x23]\n"
"zip1 z8.d, z9.d, z12.d\n"
+ "ld1w { z13.s }, p3/Z, [x23, #1, MUL VL]\n"
"zip2 z12.d, z9.d, z12.d\n"
- "ld1w { z13.s }, p3/Z, [x24, #1, MUL VL]\n"
- "ld1w { z14.s }, p2/Z, [x24, #2, MUL VL]\n"
+ "ld1w { z14.s }, p2/Z, [x23, #2, MUL VL]\n"
+ "ld1w { z15.s }, p1/Z, [x23, #3, MUL VL]\n"
"zip1 z9.d, z10.d, z13.d\n"
+ "ld1w { z17.s }, p4/Z, [x22]\n"
"zip2 z13.d, z10.d, z13.d\n"
- "ld1w { z15.s }, p1/Z, [x24, #3, MUL VL]\n"
- "ld1w { z17.s }, p4/Z, [x23]\n"
+ "ld1w { z18.s }, p3/Z, [x22, #1, MUL VL]\n"
"zip1 z10.d, z11.d, z14.d\n"
+ "ld1w { z19.s }, p2/Z, [x22, #2, MUL VL]\n"
"zip2 z14.d, z11.d, z14.d\n"
- "ld1w { z18.s }, p3/Z, [x23, #1, MUL VL]\n"
- "ld1w { z19.s }, p2/Z, [x23, #2, MUL VL]\n"
+ "ld1w { z24.s }, p1/Z, [x22, #3, MUL VL]\n"
"zip1 z11.d, z16.d, z15.d\n"
"zip2 z15.d, z16.d, z15.d\n"
- "ld1w { z24.s }, p1/Z, [x23, #3, MUL VL]\n"
"zip1 z16.d, z17.d, z20.d\n"
"zip2 z20.d, z17.d, z20.d\n"
"zip1 z17.d, z18.d, z21.d\n"
@@ -486,239 +486,239 @@ void sve_hybrid_u8u32_mmla_6x4VL (
"mov z22.s, #0x0\n"
"mov z23.s, #0x0\n"
"26:" // Height 3: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"27:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 28f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "cbnz x28, 29f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20\n"
- "add x25, x25, x20\n"
- "add x24, x24, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "cbnz x27, 29f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
+ "add x24, x24, x19\n"
+ "add x23, x23, x19\n"
"b 29f\n"
"28:" // Height 3: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20\n"
- "add x24, x25, x20\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19\n"
+ "add x23, x24, x19\n"
"29:" // Height 3: input setup done
- "cmp x27, #0x10\n"
+ "cmp x26, #0x10\n"
"ble 31f\n"
"30:" // Height 3: Multiply loop: Main loop head
- "whilelt p0.b, XZR, x27\n"
- "ld1rqb { z1.b }, p0/Z, [x26]\n"
- "ld1rqb { z2.b }, p0/Z, [x25]\n"
- "ld1rqb { z3.b }, p0/Z, [x24]\n"
+ "ld1b { z7.b }, p5/Z, [x9]\n"
+ "whilelt p0.b, XZR, x26\n"
+ "ld1b { z6.b }, p5/Z, [x9, #1, MUL VL]\n"
+ "ld1rqb { z1.b }, p0/Z, [x25]\n"
+ "sub x26, x26, #0x10\n"
+ "ld1rqb { z2.b }, p0/Z, [x24]\n"
"trn1 z0.d, z1.d, z2.d\n"
+ "ld1rqb { z3.b }, p0/Z, [x23]\n"
+ "cmp x26, #0x10\n"
"trn2 z1.d, z1.d, z2.d\n"
- "ld1b { z7.b }, p5/Z, [x10]\n"
- "trn1 z2.d, z3.d, z4.d\n"
- "ld1b { z6.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
".inst 0x45c79808 // ummla z8.s, z0.b, z7.b\n"
- ".inst 0x45c79850 // ummla z16.s, z2.b, z7.b\n"
".inst 0x45c6980c // ummla z12.s, z0.b, z6.b\n"
+ "add x23, x23, #0x10\n"
+ "trn1 z2.d, z3.d, z4.d\n"
+ "trn2 z3.d, z3.d, z4.d\n"
+ ".inst 0x45c79850 // ummla z16.s, z2.b, z7.b\n"
+ "ld1b { z7.b }, p5/Z, [x9, #2, MUL VL]\n"
".inst 0x45c69854 // ummla z20.s, z2.b, z6.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #2, MUL VL]\n"
- "ld1b { z6.b }, p5/Z, [x10, #3, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #3, MUL VL]\n"
".inst 0x45c79809 // ummla z9.s, z0.b, z7.b\n"
".inst 0x45c79851 // ummla z17.s, z2.b, z7.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #4, MUL VL]\n"
- "trn2 z3.d, z3.d, z4.d\n"
+ "ld1b { z7.b }, p5/Z, [x9, #4, MUL VL]\n"
".inst 0x45c6980d // ummla z13.s, z0.b, z6.b\n"
".inst 0x45c69855 // ummla z21.s, z2.b, z6.b\n"
- "ld1b { z6.b }, p5/Z, [x10, #5, MUL VL]\n"
- "sub x27, x27, #0x10\n"
+ "ld1b { z6.b }, p5/Z, [x9, #5, MUL VL]\n"
".inst 0x45c7980a // ummla z10.s, z0.b, z7.b\n"
".inst 0x45c79852 // ummla z18.s, z2.b, z7.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #6, MUL VL]\n"
- "cmp x27, #0x10\n"
+ "ld1b { z7.b }, p5/Z, [x9, #6, MUL VL]\n"
".inst 0x45c6980e // ummla z14.s, z0.b, z6.b\n"
".inst 0x45c69856 // ummla z22.s, z2.b, z6.b\n"
- "ld1b { z6.b }, p5/Z, [x10, #7, MUL VL]\n"
- "addvl x10, x10, #16\n"
+ "ld1b { z6.b }, p5/Z, [x9, #7, MUL VL]\n"
+ "addvl x9, x9, #16\n"
".inst 0x45c7980b // ummla z11.s, z0.b, z7.b\n"
".inst 0x45c79853 // ummla z19.s, z2.b, z7.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #-8, MUL VL]\n"
- "add x26, x26, #0x10\n"
+ "ld1b { z7.b }, p5/Z, [x9, #-8, MUL VL]\n"
".inst 0x45c6980f // ummla z15.s, z0.b, z6.b\n"
".inst 0x45c69857 // ummla z23.s, z2.b, z6.b\n"
- "ld1b { z6.b }, p5/Z, [x10, #-7, MUL VL]\n"
- "add x25, x25, #0x10\n"
+ "ld1b { z6.b }, p5/Z, [x9, #-7, MUL VL]\n"
".inst 0x45c79828 // ummla z8.s, z1.b, z7.b\n"
".inst 0x45c79870 // ummla z16.s, z3.b, z7.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #-6, MUL VL]\n"
- "add x24, x24, #0x10\n"
+ "ld1b { z7.b }, p5/Z, [x9, #-6, MUL VL]\n"
".inst 0x45c6982c // ummla z12.s, z1.b, z6.b\n"
".inst 0x45c69874 // ummla z20.s, z3.b, z6.b\n"
- "ld1b { z6.b }, p5/Z, [x10, #-5, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #-5, MUL VL]\n"
".inst 0x45c79829 // ummla z9.s, z1.b, z7.b\n"
".inst 0x45c79871 // ummla z17.s, z3.b, z7.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #-4, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #-4, MUL VL]\n"
".inst 0x45c6982d // ummla z13.s, z1.b, z6.b\n"
".inst 0x45c69875 // ummla z21.s, z3.b, z6.b\n"
- "ld1b { z6.b }, p5/Z, [x10, #-3, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #-3, MUL VL]\n"
".inst 0x45c7982a // ummla z10.s, z1.b, z7.b\n"
".inst 0x45c79872 // ummla z18.s, z3.b, z7.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #-2, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #-2, MUL VL]\n"
".inst 0x45c6982e // ummla z14.s, z1.b, z6.b\n"
".inst 0x45c69876 // ummla z22.s, z3.b, z6.b\n"
- "ld1b { z6.b }, p5/Z, [x10, #-1, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #-1, MUL VL]\n"
".inst 0x45c7982b // ummla z11.s, z1.b, z7.b\n"
".inst 0x45c79873 // ummla z19.s, z3.b, z7.b\n"
".inst 0x45c6982f // ummla z15.s, z1.b, z6.b\n"
".inst 0x45c69877 // ummla z23.s, z3.b, z6.b\n"
"bgt 30b\n"
"31:" // Height 3: Multiply loop: Single iteration only
- "whilelt p0.b, XZR, x27\n"
- "ld1rqb { z1.b }, p0/Z, [x26]\n"
- "ld1rqb { z2.b }, p0/Z, [x25]\n"
- "ld1rqb { z3.b }, p0/Z, [x24]\n"
+ "ld1b { z7.b }, p5/Z, [x9]\n"
+ "whilelt p0.b, XZR, x26\n"
+ "ld1b { z6.b }, p5/Z, [x9, #1, MUL VL]\n"
+ "ld1rqb { z1.b }, p0/Z, [x25]\n"
+ "subs x26, x26, #0x8\n"
+ "ld1rqb { z2.b }, p0/Z, [x24]\n"
"trn1 z0.d, z1.d, z2.d\n"
+ "ld1rqb { z3.b }, p0/Z, [x23]\n"
"trn2 z1.d, z1.d, z2.d\n"
- "ld1b { z7.b }, p5/Z, [x10]\n"
- "trn1 z2.d, z3.d, z4.d\n"
- "ld1b { z6.b }, p5/Z, [x10, #1, MUL VL]\n"
".inst 0x45c79808 // ummla z8.s, z0.b, z7.b\n"
- ".inst 0x45c79850 // ummla z16.s, z2.b, z7.b\n"
".inst 0x45c6980c // ummla z12.s, z0.b, z6.b\n"
+ "trn1 z2.d, z3.d, z4.d\n"
+ "trn2 z3.d, z3.d, z4.d\n"
+ ".inst 0x45c79850 // ummla z16.s, z2.b, z7.b\n"
+ "ld1b { z7.b }, p5/Z, [x9, #2, MUL VL]\n"
".inst 0x45c69854 // ummla z20.s, z2.b, z6.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #2, MUL VL]\n"
- "ld1b { z6.b }, p5/Z, [x10, #3, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #3, MUL VL]\n"
".inst 0x45c79809 // ummla z9.s, z0.b, z7.b\n"
".inst 0x45c79851 // ummla z17.s, z2.b, z7.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #4, MUL VL]\n"
- "subs x27, x27, #0x8\n"
+ "ld1b { z7.b }, p5/Z, [x9, #4, MUL VL]\n"
".inst 0x45c6980d // ummla z13.s, z0.b, z6.b\n"
".inst 0x45c69855 // ummla z21.s, z2.b, z6.b\n"
- "ld1b { z6.b }, p5/Z, [x10, #5, MUL VL]\n"
- "trn2 z3.d, z3.d, z4.d\n"
+ "ld1b { z6.b }, p5/Z, [x9, #5, MUL VL]\n"
".inst 0x45c7980a // ummla z10.s, z0.b, z7.b\n"
".inst 0x45c79852 // ummla z18.s, z2.b, z7.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #6, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #6, MUL VL]\n"
".inst 0x45c6980e // ummla z14.s, z0.b, z6.b\n"
".inst 0x45c69856 // ummla z22.s, z2.b, z6.b\n"
- "ld1b { z6.b }, p5/Z, [x10, #7, MUL VL]\n"
- "addvl x10, x10, #8\n"
+ "ld1b { z6.b }, p5/Z, [x9, #7, MUL VL]\n"
+ "addvl x9, x9, #8\n"
".inst 0x45c7980b // ummla z11.s, z0.b, z7.b\n"
".inst 0x45c79853 // ummla z19.s, z2.b, z7.b\n"
".inst 0x45c6980f // ummla z15.s, z0.b, z6.b\n"
".inst 0x45c69857 // ummla z23.s, z2.b, z6.b\n"
"ble 32f\n"
- "ld1b { z7.b }, p5/Z, [x10]\n"
- "ld1b { z6.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9]\n"
".inst 0x45c79828 // ummla z8.s, z1.b, z7.b\n"
+ "ld1b { z6.b }, p5/Z, [x9, #1, MUL VL]\n"
".inst 0x45c79870 // ummla z16.s, z3.b, z7.b\n"
+ "ld1b { z7.b }, p5/Z, [x9, #2, MUL VL]\n"
".inst 0x45c6982c // ummla z12.s, z1.b, z6.b\n"
".inst 0x45c69874 // ummla z20.s, z3.b, z6.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #2, MUL VL]\n"
- "ld1b { z6.b }, p5/Z, [x10, #3, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #3, MUL VL]\n"
".inst 0x45c79829 // ummla z9.s, z1.b, z7.b\n"
".inst 0x45c79871 // ummla z17.s, z3.b, z7.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #4, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #4, MUL VL]\n"
".inst 0x45c6982d // ummla z13.s, z1.b, z6.b\n"
".inst 0x45c69875 // ummla z21.s, z3.b, z6.b\n"
- "ld1b { z6.b }, p5/Z, [x10, #5, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #5, MUL VL]\n"
".inst 0x45c7982a // ummla z10.s, z1.b, z7.b\n"
".inst 0x45c79872 // ummla z18.s, z3.b, z7.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #6, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #6, MUL VL]\n"
".inst 0x45c6982e // ummla z14.s, z1.b, z6.b\n"
".inst 0x45c69876 // ummla z22.s, z3.b, z6.b\n"
- "ld1b { z6.b }, p5/Z, [x10, #7, MUL VL]\n"
- "addvl x10, x10, #8\n"
+ "ld1b { z6.b }, p5/Z, [x9, #7, MUL VL]\n"
+ "addvl x9, x9, #8\n"
".inst 0x45c7982b // ummla z11.s, z1.b, z7.b\n"
".inst 0x45c79873 // ummla z19.s, z3.b, z7.b\n"
".inst 0x45c6982f // ummla z15.s, z1.b, z6.b\n"
".inst 0x45c69877 // ummla z23.s, z3.b, z6.b\n"
"32:" // Height 3: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 27b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
"uzp1 z7.d, z8.d, z12.d\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp2 z8.d, z8.d, z12.d\n"
+ "st1w { z7.s }, p4, [x28]\n"
"uzp1 z12.d, z9.d, z13.d\n"
- "st1w { z7.s }, p4, [x9]\n"
+ "add x23, x28, x19, LSL #2\n"
"uzp2 z9.d, z9.d, z13.d\n"
+ "st1w { z12.s }, p3, [x28, #1, MUL VL]\n"
"uzp1 z13.d, z10.d, z14.d\n"
- "st1w { z12.s }, p3, [x9, #1, MUL VL]\n"
+ "add x22, x23, x19, LSL #2\n"
"uzp2 z10.d, z10.d, z14.d\n"
+ "st1w { z13.s }, p2, [x28, #2, MUL VL]\n"
"uzp1 z14.d, z11.d, z15.d\n"
- "st1w { z13.s }, p2, [x9, #2, MUL VL]\n"
"uzp2 z11.d, z11.d, z15.d\n"
+ "st1w { z14.s }, p1, [x28, #3, MUL VL]\n"
"uzp1 z16.d, z16.d, z20.d\n"
- "st1w { z14.s }, p1, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
+ "addvl x28, x28, #4\n"
"uzp1 z17.d, z17.d, z21.d\n"
+ "st1w { z8.s }, p4, [x23]\n"
"uzp1 z18.d, z18.d, z22.d\n"
- "st1w { z8.s }, p4, [x24]\n"
+ "st1w { z9.s }, p3, [x23, #1, MUL VL]\n"
"uzp1 z19.d, z19.d, z23.d\n"
- "st1w { z9.s }, p3, [x24, #1, MUL VL]\n"
- "st1w { z10.s }, p2, [x24, #2, MUL VL]\n"
- "st1w { z11.s }, p1, [x24, #3, MUL VL]\n"
- "st1w { z16.s }, p4, [x23]\n"
- "st1w { z17.s }, p3, [x23, #1, MUL VL]\n"
- "st1w { z18.s }, p2, [x23, #2, MUL VL]\n"
- "st1w { z19.s }, p1, [x23, #3, MUL VL]\n"
+ "st1w { z10.s }, p2, [x23, #2, MUL VL]\n"
+ "st1w { z11.s }, p1, [x23, #3, MUL VL]\n"
+ "st1w { z16.s }, p4, [x22]\n"
+ "st1w { z17.s }, p3, [x22, #1, MUL VL]\n"
+ "st1w { z18.s }, p2, [x22, #2, MUL VL]\n"
+ "st1w { z19.s }, p1, [x22, #3, MUL VL]\n"
"33:" // Height 3: Writeback done
- "decw x11, ALL, MUL #4\n"
- "cmp x11, XZR\n"
+ "decw x10, ALL, MUL #4\n"
+ "cmp x10, XZR\n"
"bgt 24b\n"
"b 68f\n"
"34:" // Height 4
- "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x28, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"35:" // Height 4: Column loop
- "mov x20, #0x0\n"
- "whilelt p4.s, x20, x11\n"
- "incw x20\n"
- "whilelt p3.s, x20, x11\n"
- "incw x20\n"
- "whilelt p2.s, x20, x11\n"
- "incw x20\n"
- "whilelt p1.s, x20, x11\n"
+ "mov x19, #0x0\n"
+ "whilelt p4.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p3.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x10\n"
"tbz %x[flags], #0, 36f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "ld1w { z9.s }, p4/Z, [x9]\n"
- "add x22, x23, x20, LSL #2\n"
- "ld1w { z10.s }, p3/Z, [x9, #1, MUL VL]\n"
- "ld1w { z11.s }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1w { z16.s }, p1/Z, [x9, #3, MUL VL]\n"
- "ld1w { z12.s }, p4/Z, [x24]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ld1w { z9.s }, p4/Z, [x28]\n"
+ "add x23, x28, x19, LSL #2\n"
+ "ld1w { z10.s }, p3/Z, [x28, #1, MUL VL]\n"
+ "ld1w { z11.s }, p2/Z, [x28, #2, MUL VL]\n"
+ "add x22, x23, x19, LSL #2\n"
+ "ld1w { z16.s }, p1/Z, [x28, #3, MUL VL]\n"
+ "add x21, x22, x19, LSL #2\n"
+ "ld1w { z12.s }, p4/Z, [x23]\n"
"zip1 z8.d, z9.d, z12.d\n"
+ "ld1w { z13.s }, p3/Z, [x23, #1, MUL VL]\n"
"zip2 z12.d, z9.d, z12.d\n"
- "ld1w { z13.s }, p3/Z, [x24, #1, MUL VL]\n"
- "ld1w { z14.s }, p2/Z, [x24, #2, MUL VL]\n"
+ "ld1w { z14.s }, p2/Z, [x23, #2, MUL VL]\n"
+ "ld1w { z15.s }, p1/Z, [x23, #3, MUL VL]\n"
"zip1 z9.d, z10.d, z13.d\n"
+ "ld1w { z17.s }, p4/Z, [x22]\n"
"zip2 z13.d, z10.d, z13.d\n"
- "ld1w { z15.s }, p1/Z, [x24, #3, MUL VL]\n"
- "ld1w { z17.s }, p4/Z, [x23]\n"
+ "ld1w { z18.s }, p3/Z, [x22, #1, MUL VL]\n"
"zip1 z10.d, z11.d, z14.d\n"
+ "ld1w { z19.s }, p2/Z, [x22, #2, MUL VL]\n"
"zip2 z14.d, z11.d, z14.d\n"
- "ld1w { z18.s }, p3/Z, [x23, #1, MUL VL]\n"
- "ld1w { z19.s }, p2/Z, [x23, #2, MUL VL]\n"
+ "ld1w { z24.s }, p1/Z, [x22, #3, MUL VL]\n"
"zip1 z11.d, z16.d, z15.d\n"
+ "ld1w { z20.s }, p4/Z, [x21]\n"
"zip2 z15.d, z16.d, z15.d\n"
- "ld1w { z24.s }, p1/Z, [x23, #3, MUL VL]\n"
- "ld1w { z20.s }, p4/Z, [x22]\n"
+ "ld1w { z21.s }, p3/Z, [x21, #1, MUL VL]\n"
+ "ld1w { z22.s }, p2/Z, [x21, #2, MUL VL]\n"
"zip1 z16.d, z17.d, z20.d\n"
+ "ld1w { z23.s }, p1/Z, [x21, #3, MUL VL]\n"
"zip2 z20.d, z17.d, z20.d\n"
- "ld1w { z21.s }, p3/Z, [x22, #1, MUL VL]\n"
- "ld1w { z22.s }, p2/Z, [x22, #2, MUL VL]\n"
"zip1 z17.d, z18.d, z21.d\n"
"zip2 z21.d, z18.d, z21.d\n"
- "ld1w { z23.s }, p1/Z, [x22, #3, MUL VL]\n"
"zip1 z18.d, z19.d, z22.d\n"
"zip2 z22.d, z19.d, z22.d\n"
"zip1 z19.d, z24.d, z23.d\n"
@@ -742,263 +742,263 @@ void sve_hybrid_u8u32_mmla_6x4VL (
"mov z22.s, #0x0\n"
"mov z23.s, #0x0\n"
"37:" // Height 4: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"38:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 39f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "cbnz x28, 40f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20\n"
- "add x25, x25, x20\n"
- "add x24, x24, x20\n"
- "add x23, x23, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "cbnz x27, 40f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
+ "add x24, x24, x19\n"
+ "add x23, x23, x19\n"
+ "add x22, x22, x19\n"
"b 40f\n"
"39:" // Height 4: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20\n"
- "add x24, x25, x20\n"
- "add x23, x24, x20\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19\n"
+ "add x23, x24, x19\n"
+ "add x22, x23, x19\n"
"40:" // Height 4: input setup done
- "cmp x27, #0x10\n"
+ "cmp x26, #0x10\n"
"ble 42f\n"
"41:" // Height 4: Multiply loop: Main loop head
- "whilelt p0.b, XZR, x27\n"
- "ld1rqb { z1.b }, p0/Z, [x26]\n"
- "ld1rqb { z2.b }, p0/Z, [x25]\n"
+ "ld1b { z7.b }, p5/Z, [x9]\n"
+ "whilelt p0.b, XZR, x26\n"
+ "ld1b { z6.b }, p5/Z, [x9, #1, MUL VL]\n"
+ "sub x26, x26, #0x10\n"
+ "ld1rqb { z1.b }, p0/Z, [x25]\n"
+ "cmp x26, #0x10\n"
+ "ld1rqb { z2.b }, p0/Z, [x24]\n"
"trn1 z0.d, z1.d, z2.d\n"
- "ld1rqb { z3.b }, p0/Z, [x24]\n"
- "ld1rqb { z4.b }, p0/Z, [x23]\n"
+ "ld1rqb { z3.b }, p0/Z, [x23]\n"
+ "add x25, x25, #0x10\n"
"trn2 z1.d, z1.d, z2.d\n"
- "trn1 z2.d, z3.d, z4.d\n"
- "ld1b { z7.b }, p5/Z, [x10]\n"
- "ld1b { z6.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1rqb { z4.b }, p0/Z, [x22]\n"
+ "add x24, x24, #0x10\n"
".inst 0x45c79808 // ummla z8.s, z0.b, z7.b\n"
- ".inst 0x45c79850 // ummla z16.s, z2.b, z7.b\n"
+ "add x23, x23, #0x10\n"
".inst 0x45c6980c // ummla z12.s, z0.b, z6.b\n"
+ "add x22, x22, #0x10\n"
+ "trn1 z2.d, z3.d, z4.d\n"
+ "trn2 z3.d, z3.d, z4.d\n"
+ ".inst 0x45c79850 // ummla z16.s, z2.b, z7.b\n"
+ "ld1b { z7.b }, p5/Z, [x9, #2, MUL VL]\n"
".inst 0x45c69854 // ummla z20.s, z2.b, z6.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #2, MUL VL]\n"
- "ld1b { z6.b }, p5/Z, [x10, #3, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #3, MUL VL]\n"
".inst 0x45c79809 // ummla z9.s, z0.b, z7.b\n"
".inst 0x45c79851 // ummla z17.s, z2.b, z7.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #4, MUL VL]\n"
- "trn2 z3.d, z3.d, z4.d\n"
+ "ld1b { z7.b }, p5/Z, [x9, #4, MUL VL]\n"
".inst 0x45c6980d // ummla z13.s, z0.b, z6.b\n"
".inst 0x45c69855 // ummla z21.s, z2.b, z6.b\n"
- "ld1b { z6.b }, p5/Z, [x10, #5, MUL VL]\n"
- "sub x27, x27, #0x10\n"
+ "ld1b { z6.b }, p5/Z, [x9, #5, MUL VL]\n"
".inst 0x45c7980a // ummla z10.s, z0.b, z7.b\n"
".inst 0x45c79852 // ummla z18.s, z2.b, z7.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #6, MUL VL]\n"
- "cmp x27, #0x10\n"
+ "ld1b { z7.b }, p5/Z, [x9, #6, MUL VL]\n"
".inst 0x45c6980e // ummla z14.s, z0.b, z6.b\n"
".inst 0x45c69856 // ummla z22.s, z2.b, z6.b\n"
- "ld1b { z6.b }, p5/Z, [x10, #7, MUL VL]\n"
- "addvl x10, x10, #16\n"
+ "ld1b { z6.b }, p5/Z, [x9, #7, MUL VL]\n"
+ "addvl x9, x9, #16\n"
".inst 0x45c7980b // ummla z11.s, z0.b, z7.b\n"
".inst 0x45c79853 // ummla z19.s, z2.b, z7.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #-8, MUL VL]\n"
- "add x26, x26, #0x10\n"
+ "ld1b { z7.b }, p5/Z, [x9, #-8, MUL VL]\n"
".inst 0x45c6980f // ummla z15.s, z0.b, z6.b\n"
".inst 0x45c69857 // ummla z23.s, z2.b, z6.b\n"
- "ld1b { z6.b }, p5/Z, [x10, #-7, MUL VL]\n"
- "add x25, x25, #0x10\n"
+ "ld1b { z6.b }, p5/Z, [x9, #-7, MUL VL]\n"
".inst 0x45c79828 // ummla z8.s, z1.b, z7.b\n"
".inst 0x45c79870 // ummla z16.s, z3.b, z7.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #-6, MUL VL]\n"
- "add x24, x24, #0x10\n"
+ "ld1b { z7.b }, p5/Z, [x9, #-6, MUL VL]\n"
".inst 0x45c6982c // ummla z12.s, z1.b, z6.b\n"
".inst 0x45c69874 // ummla z20.s, z3.b, z6.b\n"
- "ld1b { z6.b }, p5/Z, [x10, #-5, MUL VL]\n"
- "add x23, x23, #0x10\n"
+ "ld1b { z6.b }, p5/Z, [x9, #-5, MUL VL]\n"
".inst 0x45c79829 // ummla z9.s, z1.b, z7.b\n"
".inst 0x45c79871 // ummla z17.s, z3.b, z7.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #-4, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #-4, MUL VL]\n"
".inst 0x45c6982d // ummla z13.s, z1.b, z6.b\n"
".inst 0x45c69875 // ummla z21.s, z3.b, z6.b\n"
- "ld1b { z6.b }, p5/Z, [x10, #-3, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #-3, MUL VL]\n"
".inst 0x45c7982a // ummla z10.s, z1.b, z7.b\n"
".inst 0x45c79872 // ummla z18.s, z3.b, z7.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #-2, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #-2, MUL VL]\n"
".inst 0x45c6982e // ummla z14.s, z1.b, z6.b\n"
".inst 0x45c69876 // ummla z22.s, z3.b, z6.b\n"
- "ld1b { z6.b }, p5/Z, [x10, #-1, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #-1, MUL VL]\n"
".inst 0x45c7982b // ummla z11.s, z1.b, z7.b\n"
".inst 0x45c79873 // ummla z19.s, z3.b, z7.b\n"
".inst 0x45c6982f // ummla z15.s, z1.b, z6.b\n"
".inst 0x45c69877 // ummla z23.s, z3.b, z6.b\n"
"bgt 41b\n"
"42:" // Height 4: Multiply loop: Single iteration only
- "whilelt p0.b, XZR, x27\n"
- "ld1rqb { z1.b }, p0/Z, [x26]\n"
- "ld1rqb { z2.b }, p0/Z, [x25]\n"
+ "ld1b { z7.b }, p5/Z, [x9]\n"
+ "whilelt p0.b, XZR, x26\n"
+ "ld1b { z6.b }, p5/Z, [x9, #1, MUL VL]\n"
+ "subs x26, x26, #0x8\n"
+ "ld1rqb { z1.b }, p0/Z, [x25]\n"
+ "ld1rqb { z2.b }, p0/Z, [x24]\n"
"trn1 z0.d, z1.d, z2.d\n"
- "ld1rqb { z3.b }, p0/Z, [x24]\n"
- "ld1rqb { z4.b }, p0/Z, [x23]\n"
+ "ld1rqb { z3.b }, p0/Z, [x23]\n"
"trn2 z1.d, z1.d, z2.d\n"
- "trn1 z2.d, z3.d, z4.d\n"
- "ld1b { z7.b }, p5/Z, [x10]\n"
- "ld1b { z6.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1rqb { z4.b }, p0/Z, [x22]\n"
".inst 0x45c79808 // ummla z8.s, z0.b, z7.b\n"
- ".inst 0x45c79850 // ummla z16.s, z2.b, z7.b\n"
".inst 0x45c6980c // ummla z12.s, z0.b, z6.b\n"
+ "trn1 z2.d, z3.d, z4.d\n"
+ "trn2 z3.d, z3.d, z4.d\n"
+ ".inst 0x45c79850 // ummla z16.s, z2.b, z7.b\n"
+ "ld1b { z7.b }, p5/Z, [x9, #2, MUL VL]\n"
".inst 0x45c69854 // ummla z20.s, z2.b, z6.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #2, MUL VL]\n"
- "ld1b { z6.b }, p5/Z, [x10, #3, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #3, MUL VL]\n"
".inst 0x45c79809 // ummla z9.s, z0.b, z7.b\n"
".inst 0x45c79851 // ummla z17.s, z2.b, z7.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #4, MUL VL]\n"
- "subs x27, x27, #0x8\n"
+ "ld1b { z7.b }, p5/Z, [x9, #4, MUL VL]\n"
".inst 0x45c6980d // ummla z13.s, z0.b, z6.b\n"
".inst 0x45c69855 // ummla z21.s, z2.b, z6.b\n"
- "ld1b { z6.b }, p5/Z, [x10, #5, MUL VL]\n"
- "trn2 z3.d, z3.d, z4.d\n"
+ "ld1b { z6.b }, p5/Z, [x9, #5, MUL VL]\n"
".inst 0x45c7980a // ummla z10.s, z0.b, z7.b\n"
".inst 0x45c79852 // ummla z18.s, z2.b, z7.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #6, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #6, MUL VL]\n"
".inst 0x45c6980e // ummla z14.s, z0.b, z6.b\n"
".inst 0x45c69856 // ummla z22.s, z2.b, z6.b\n"
- "ld1b { z6.b }, p5/Z, [x10, #7, MUL VL]\n"
- "addvl x10, x10, #8\n"
+ "ld1b { z6.b }, p5/Z, [x9, #7, MUL VL]\n"
+ "addvl x9, x9, #8\n"
".inst 0x45c7980b // ummla z11.s, z0.b, z7.b\n"
".inst 0x45c79853 // ummla z19.s, z2.b, z7.b\n"
".inst 0x45c6980f // ummla z15.s, z0.b, z6.b\n"
".inst 0x45c69857 // ummla z23.s, z2.b, z6.b\n"
"ble 43f\n"
- "ld1b { z7.b }, p5/Z, [x10]\n"
- "ld1b { z6.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9]\n"
".inst 0x45c79828 // ummla z8.s, z1.b, z7.b\n"
+ "ld1b { z6.b }, p5/Z, [x9, #1, MUL VL]\n"
".inst 0x45c79870 // ummla z16.s, z3.b, z7.b\n"
+ "ld1b { z7.b }, p5/Z, [x9, #2, MUL VL]\n"
".inst 0x45c6982c // ummla z12.s, z1.b, z6.b\n"
".inst 0x45c69874 // ummla z20.s, z3.b, z6.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #2, MUL VL]\n"
- "ld1b { z6.b }, p5/Z, [x10, #3, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #3, MUL VL]\n"
".inst 0x45c79829 // ummla z9.s, z1.b, z7.b\n"
".inst 0x45c79871 // ummla z17.s, z3.b, z7.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #4, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #4, MUL VL]\n"
".inst 0x45c6982d // ummla z13.s, z1.b, z6.b\n"
".inst 0x45c69875 // ummla z21.s, z3.b, z6.b\n"
- "ld1b { z6.b }, p5/Z, [x10, #5, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #5, MUL VL]\n"
".inst 0x45c7982a // ummla z10.s, z1.b, z7.b\n"
".inst 0x45c79872 // ummla z18.s, z3.b, z7.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #6, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #6, MUL VL]\n"
".inst 0x45c6982e // ummla z14.s, z1.b, z6.b\n"
".inst 0x45c69876 // ummla z22.s, z3.b, z6.b\n"
- "ld1b { z6.b }, p5/Z, [x10, #7, MUL VL]\n"
- "addvl x10, x10, #8\n"
+ "ld1b { z6.b }, p5/Z, [x9, #7, MUL VL]\n"
+ "addvl x9, x9, #8\n"
".inst 0x45c7982b // ummla z11.s, z1.b, z7.b\n"
".inst 0x45c79873 // ummla z19.s, z3.b, z7.b\n"
".inst 0x45c6982f // ummla z15.s, z1.b, z6.b\n"
".inst 0x45c69877 // ummla z23.s, z3.b, z6.b\n"
"43:" // Height 4: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 38b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
"uzp1 z7.d, z8.d, z12.d\n"
- "add x22, x23, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp2 z8.d, z8.d, z12.d\n"
+ "st1w { z7.s }, p4, [x28]\n"
"uzp1 z12.d, z9.d, z13.d\n"
- "st1w { z7.s }, p4, [x9]\n"
+ "add x23, x28, x19, LSL #2\n"
"uzp2 z9.d, z9.d, z13.d\n"
+ "st1w { z12.s }, p3, [x28, #1, MUL VL]\n"
"uzp1 z13.d, z10.d, z14.d\n"
- "st1w { z12.s }, p3, [x9, #1, MUL VL]\n"
+ "add x22, x23, x19, LSL #2\n"
"uzp2 z10.d, z10.d, z14.d\n"
+ "st1w { z13.s }, p2, [x28, #2, MUL VL]\n"
"uzp1 z14.d, z11.d, z15.d\n"
- "st1w { z13.s }, p2, [x9, #2, MUL VL]\n"
+ "add x21, x22, x19, LSL #2\n"
"uzp2 z11.d, z11.d, z15.d\n"
+ "st1w { z14.s }, p1, [x28, #3, MUL VL]\n"
"uzp1 z15.d, z16.d, z20.d\n"
- "st1w { z14.s }, p1, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
+ "addvl x28, x28, #4\n"
"uzp2 z16.d, z16.d, z20.d\n"
+ "st1w { z8.s }, p4, [x23]\n"
"uzp1 z20.d, z17.d, z21.d\n"
- "st1w { z8.s }, p4, [x24]\n"
+ "st1w { z9.s }, p3, [x23, #1, MUL VL]\n"
"uzp2 z17.d, z17.d, z21.d\n"
+ "st1w { z10.s }, p2, [x23, #2, MUL VL]\n"
"uzp1 z21.d, z18.d, z22.d\n"
- "st1w { z9.s }, p3, [x24, #1, MUL VL]\n"
+ "st1w { z11.s }, p1, [x23, #3, MUL VL]\n"
"uzp2 z18.d, z18.d, z22.d\n"
+ "st1w { z15.s }, p4, [x22]\n"
"uzp1 z22.d, z19.d, z23.d\n"
- "st1w { z10.s }, p2, [x24, #2, MUL VL]\n"
+ "st1w { z20.s }, p3, [x22, #1, MUL VL]\n"
"uzp2 z19.d, z19.d, z23.d\n"
- "st1w { z11.s }, p1, [x24, #3, MUL VL]\n"
- "st1w { z15.s }, p4, [x23]\n"
- "st1w { z20.s }, p3, [x23, #1, MUL VL]\n"
- "st1w { z21.s }, p2, [x23, #2, MUL VL]\n"
- "st1w { z22.s }, p1, [x23, #3, MUL VL]\n"
- "st1w { z16.s }, p4, [x22]\n"
- "st1w { z17.s }, p3, [x22, #1, MUL VL]\n"
- "st1w { z18.s }, p2, [x22, #2, MUL VL]\n"
- "st1w { z19.s }, p1, [x22, #3, MUL VL]\n"
+ "st1w { z21.s }, p2, [x22, #2, MUL VL]\n"
+ "st1w { z22.s }, p1, [x22, #3, MUL VL]\n"
+ "st1w { z16.s }, p4, [x21]\n"
+ "st1w { z17.s }, p3, [x21, #1, MUL VL]\n"
+ "st1w { z18.s }, p2, [x21, #2, MUL VL]\n"
+ "st1w { z19.s }, p1, [x21, #3, MUL VL]\n"
"44:" // Height 4: Writeback done
- "decw x11, ALL, MUL #4\n"
- "cmp x11, XZR\n"
+ "decw x10, ALL, MUL #4\n"
+ "cmp x10, XZR\n"
"bgt 35b\n"
"b 68f\n"
"45:" // Height 5
- "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x28, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"46:" // Height 5: Column loop
- "mov x20, #0x0\n"
- "whilelt p4.s, x20, x11\n"
- "incw x20\n"
- "whilelt p3.s, x20, x11\n"
- "incw x20\n"
- "whilelt p2.s, x20, x11\n"
- "incw x20\n"
- "whilelt p1.s, x20, x11\n"
+ "mov x19, #0x0\n"
+ "whilelt p4.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p3.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x10\n"
"tbz %x[flags], #0, 47f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "ld1w { z9.s }, p4/Z, [x9]\n"
- "add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
- "ld1w { z10.s }, p3/Z, [x9, #1, MUL VL]\n"
- "ld1w { z11.s }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1w { z16.s }, p1/Z, [x9, #3, MUL VL]\n"
- "ld1w { z12.s }, p4/Z, [x24]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ld1w { z9.s }, p4/Z, [x28]\n"
+ "add x23, x28, x19, LSL #2\n"
+ "ld1w { z10.s }, p3/Z, [x28, #1, MUL VL]\n"
+ "ld1w { z11.s }, p2/Z, [x28, #2, MUL VL]\n"
+ "add x22, x23, x19, LSL #2\n"
+ "ld1w { z16.s }, p1/Z, [x28, #3, MUL VL]\n"
+ "add x21, x22, x19, LSL #2\n"
+ "ld1w { z12.s }, p4/Z, [x23]\n"
"zip1 z8.d, z9.d, z12.d\n"
+ "ld1w { z13.s }, p3/Z, [x23, #1, MUL VL]\n"
+ "add x20, x21, x19, LSL #2\n"
"zip2 z12.d, z9.d, z12.d\n"
- "ld1w { z13.s }, p3/Z, [x24, #1, MUL VL]\n"
- "ld1w { z14.s }, p2/Z, [x24, #2, MUL VL]\n"
+ "ld1w { z14.s }, p2/Z, [x23, #2, MUL VL]\n"
+ "ld1w { z15.s }, p1/Z, [x23, #3, MUL VL]\n"
"zip1 z9.d, z10.d, z13.d\n"
+ "ld1w { z17.s }, p4/Z, [x22]\n"
"zip2 z13.d, z10.d, z13.d\n"
- "ld1w { z15.s }, p1/Z, [x24, #3, MUL VL]\n"
- "ld1w { z17.s }, p4/Z, [x23]\n"
+ "ld1w { z18.s }, p3/Z, [x22, #1, MUL VL]\n"
"zip1 z10.d, z11.d, z14.d\n"
+ "ld1w { z19.s }, p2/Z, [x22, #2, MUL VL]\n"
"zip2 z14.d, z11.d, z14.d\n"
- "ld1w { z18.s }, p3/Z, [x23, #1, MUL VL]\n"
- "ld1w { z19.s }, p2/Z, [x23, #2, MUL VL]\n"
+ "ld1w { z24.s }, p1/Z, [x22, #3, MUL VL]\n"
"zip1 z11.d, z16.d, z15.d\n"
+ "ld1w { z20.s }, p4/Z, [x21]\n"
"zip2 z15.d, z16.d, z15.d\n"
- "ld1w { z24.s }, p1/Z, [x23, #3, MUL VL]\n"
- "ld1w { z20.s }, p4/Z, [x22]\n"
+ "ld1w { z21.s }, p3/Z, [x21, #1, MUL VL]\n"
+ "ld1w { z22.s }, p2/Z, [x21, #2, MUL VL]\n"
"zip1 z16.d, z17.d, z20.d\n"
+ "ld1w { z23.s }, p1/Z, [x21, #3, MUL VL]\n"
"zip2 z20.d, z17.d, z20.d\n"
- "ld1w { z21.s }, p3/Z, [x22, #1, MUL VL]\n"
- "ld1w { z22.s }, p2/Z, [x22, #2, MUL VL]\n"
+ "ld1w { z25.s }, p4/Z, [x20]\n"
"zip1 z17.d, z18.d, z21.d\n"
+ "ld1w { z26.s }, p3/Z, [x20, #1, MUL VL]\n"
"zip2 z21.d, z18.d, z21.d\n"
- "ld1w { z23.s }, p1/Z, [x22, #3, MUL VL]\n"
- "ld1w { z25.s }, p4/Z, [x21]\n"
+ "ld1w { z27.s }, p2/Z, [x20, #2, MUL VL]\n"
"zip1 z18.d, z19.d, z22.d\n"
+ "ld1w { z6.s }, p1/Z, [x20, #3, MUL VL]\n"
"zip2 z22.d, z19.d, z22.d\n"
- "ld1w { z26.s }, p3/Z, [x21, #1, MUL VL]\n"
- "ld1w { z27.s }, p2/Z, [x21, #2, MUL VL]\n"
"zip1 z19.d, z24.d, z23.d\n"
"zip2 z23.d, z24.d, z23.d\n"
- "ld1w { z6.s }, p1/Z, [x21, #3, MUL VL]\n"
"zip1 z24.d, z25.d, z28.d\n"
"zip2 z28.d, z25.d, z28.d\n"
"zip1 z25.d, z26.d, z29.d\n"
@@ -1034,115 +1034,115 @@ void sve_hybrid_u8u32_mmla_6x4VL (
"mov z30.s, #0x0\n"
"mov z31.s, #0x0\n"
"48:" // Height 5: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"49:" // Height 5: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 50f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "ldr x22, [x21, #0x20]\n"
- "cbnz x28, 51f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20\n"
- "add x25, x25, x20\n"
- "add x24, x24, x20\n"
- "add x23, x23, x20\n"
- "add x22, x22, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "ldr x21, [x20, #0x20]\n"
+ "cbnz x27, 51f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
+ "add x24, x24, x19\n"
+ "add x23, x23, x19\n"
+ "add x22, x22, x19\n"
+ "add x21, x21, x19\n"
"b 51f\n"
"50:" // Height 5: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20\n"
- "add x24, x25, x20\n"
- "add x23, x24, x20\n"
- "add x22, x23, x20\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19\n"
+ "add x23, x24, x19\n"
+ "add x22, x23, x19\n"
+ "add x21, x22, x19\n"
"51:" // Height 5: input setup done
- "cmp x27, #0x10\n"
+ "cmp x26, #0x10\n"
"ble 53f\n"
"52:" // Height 5: Multiply loop: Main loop head
- "whilelt p0.b, XZR, x27\n"
- "ld1rqb { z1.b }, p0/Z, [x26]\n"
- "ld1rqb { z2.b }, p0/Z, [x25]\n"
- "ld1rqb { z3.b }, p0/Z, [x24]\n"
- "ld1rqb { z4.b }, p0/Z, [x23]\n"
+ "ld1b { z7.b }, p5/Z, [x9]\n"
+ "whilelt p0.b, XZR, x26\n"
+ "ld1rqb { z1.b }, p0/Z, [x25]\n"
+ "ld1rqb { z2.b }, p0/Z, [x24]\n"
"trn1 z0.d, z1.d, z2.d\n"
+ "ld1rqb { z3.b }, p0/Z, [x23]\n"
+ "sub x26, x26, #0x10\n"
"trn2 z1.d, z1.d, z2.d\n"
- "ld1rqb { z5.b }, p0/Z, [x22]\n"
+ "ld1rqb { z4.b }, p0/Z, [x22]\n"
+ "cmp x26, #0x10\n"
+ ".inst 0x45c79808 // ummla z8.s, z0.b, z7.b\n"
+ "ld1rqb { z5.b }, p0/Z, [x21]\n"
+ "add x25, x25, #0x10\n"
"trn1 z2.d, z3.d, z4.d\n"
+ "add x24, x24, #0x10\n"
"trn2 z3.d, z3.d, z4.d\n"
- "ld1b { z7.b }, p5/Z, [x10]\n"
+ "add x23, x23, #0x10\n"
"trn1 z4.d, z5.d, z6.d\n"
+ "add x22, x22, #0x10\n"
"trn2 z5.d, z5.d, z6.d\n"
- "ld1b { z6.b }, p5/Z, [x10, #1, MUL VL]\n"
- ".inst 0x45c79808 // ummla z8.s, z0.b, z7.b\n"
+ "ld1b { z6.b }, p5/Z, [x9, #1, MUL VL]\n"
+ "add x21, x21, #0x10\n"
".inst 0x45c79850 // ummla z16.s, z2.b, z7.b\n"
".inst 0x45c79898 // ummla z24.s, z4.b, z7.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #2, MUL VL]\n"
- "sub x27, x27, #0x10\n"
+ "ld1b { z7.b }, p5/Z, [x9, #2, MUL VL]\n"
".inst 0x45c6980c // ummla z12.s, z0.b, z6.b\n"
".inst 0x45c69854 // ummla z20.s, z2.b, z6.b\n"
- "cmp x27, #0x10\n"
- "add x26, x26, #0x10\n"
".inst 0x45c6989c // ummla z28.s, z4.b, z6.b\n"
- "ld1b { z6.b }, p5/Z, [x10, #3, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #3, MUL VL]\n"
".inst 0x45c79809 // ummla z9.s, z0.b, z7.b\n"
- "add x25, x25, #0x10\n"
".inst 0x45c79851 // ummla z17.s, z2.b, z7.b\n"
".inst 0x45c79899 // ummla z25.s, z4.b, z7.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #4, MUL VL]\n"
- "add x24, x24, #0x10\n"
+ "ld1b { z7.b }, p5/Z, [x9, #4, MUL VL]\n"
".inst 0x45c6980d // ummla z13.s, z0.b, z6.b\n"
".inst 0x45c69855 // ummla z21.s, z2.b, z6.b\n"
- "add x23, x23, #0x10\n"
- "add x22, x22, #0x10\n"
".inst 0x45c6989d // ummla z29.s, z4.b, z6.b\n"
- "ld1b { z6.b }, p5/Z, [x10, #5, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #5, MUL VL]\n"
".inst 0x45c7980a // ummla z10.s, z0.b, z7.b\n"
".inst 0x45c79852 // ummla z18.s, z2.b, z7.b\n"
".inst 0x45c7989a // ummla z26.s, z4.b, z7.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #6, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #6, MUL VL]\n"
".inst 0x45c6980e // ummla z14.s, z0.b, z6.b\n"
".inst 0x45c69856 // ummla z22.s, z2.b, z6.b\n"
".inst 0x45c6989e // ummla z30.s, z4.b, z6.b\n"
- "ld1b { z6.b }, p5/Z, [x10, #7, MUL VL]\n"
- "addvl x10, x10, #16\n"
+ "ld1b { z6.b }, p5/Z, [x9, #7, MUL VL]\n"
+ "addvl x9, x9, #16\n"
".inst 0x45c7980b // ummla z11.s, z0.b, z7.b\n"
".inst 0x45c79853 // ummla z19.s, z2.b, z7.b\n"
".inst 0x45c7989b // ummla z27.s, z4.b, z7.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #-8, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #-8, MUL VL]\n"
".inst 0x45c6980f // ummla z15.s, z0.b, z6.b\n"
".inst 0x45c69857 // ummla z23.s, z2.b, z6.b\n"
".inst 0x45c6989f // ummla z31.s, z4.b, z6.b\n"
- "ld1b { z6.b }, p5/Z, [x10, #-7, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #-7, MUL VL]\n"
".inst 0x45c79828 // ummla z8.s, z1.b, z7.b\n"
".inst 0x45c79870 // ummla z16.s, z3.b, z7.b\n"
".inst 0x45c798b8 // ummla z24.s, z5.b, z7.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #-6, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #-6, MUL VL]\n"
".inst 0x45c6982c // ummla z12.s, z1.b, z6.b\n"
".inst 0x45c69874 // ummla z20.s, z3.b, z6.b\n"
".inst 0x45c698bc // ummla z28.s, z5.b, z6.b\n"
- "ld1b { z6.b }, p5/Z, [x10, #-5, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #-5, MUL VL]\n"
".inst 0x45c79829 // ummla z9.s, z1.b, z7.b\n"
".inst 0x45c79871 // ummla z17.s, z3.b, z7.b\n"
".inst 0x45c798b9 // ummla z25.s, z5.b, z7.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #-4, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #-4, MUL VL]\n"
".inst 0x45c6982d // ummla z13.s, z1.b, z6.b\n"
".inst 0x45c69875 // ummla z21.s, z3.b, z6.b\n"
".inst 0x45c698bd // ummla z29.s, z5.b, z6.b\n"
- "ld1b { z6.b }, p5/Z, [x10, #-3, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #-3, MUL VL]\n"
".inst 0x45c7982a // ummla z10.s, z1.b, z7.b\n"
".inst 0x45c79872 // ummla z18.s, z3.b, z7.b\n"
".inst 0x45c798ba // ummla z26.s, z5.b, z7.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #-2, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #-2, MUL VL]\n"
".inst 0x45c6982e // ummla z14.s, z1.b, z6.b\n"
".inst 0x45c69876 // ummla z22.s, z3.b, z6.b\n"
".inst 0x45c698be // ummla z30.s, z5.b, z6.b\n"
- "ld1b { z6.b }, p5/Z, [x10, #-1, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #-1, MUL VL]\n"
".inst 0x45c7982b // ummla z11.s, z1.b, z7.b\n"
".inst 0x45c79873 // ummla z19.s, z3.b, z7.b\n"
".inst 0x45c798bb // ummla z27.s, z5.b, z7.b\n"
@@ -1151,80 +1151,80 @@ void sve_hybrid_u8u32_mmla_6x4VL (
".inst 0x45c698bf // ummla z31.s, z5.b, z6.b\n"
"bgt 52b\n"
"53:" // Height 5: Multiply loop: Single iteration only
- "whilelt p0.b, XZR, x27\n"
- "ld1rqb { z1.b }, p0/Z, [x26]\n"
- "ld1rqb { z2.b }, p0/Z, [x25]\n"
- "ld1rqb { z3.b }, p0/Z, [x24]\n"
- "ld1rqb { z4.b }, p0/Z, [x23]\n"
+ "ld1b { z7.b }, p5/Z, [x9]\n"
+ "whilelt p0.b, XZR, x26\n"
+ "ld1rqb { z1.b }, p0/Z, [x25]\n"
+ "ld1rqb { z2.b }, p0/Z, [x24]\n"
"trn1 z0.d, z1.d, z2.d\n"
+ "ld1rqb { z3.b }, p0/Z, [x23]\n"
+ "subs x26, x26, #0x8\n"
"trn2 z1.d, z1.d, z2.d\n"
- "ld1rqb { z5.b }, p0/Z, [x22]\n"
+ "ld1rqb { z4.b }, p0/Z, [x22]\n"
+ "ld1rqb { z5.b }, p0/Z, [x21]\n"
+ ".inst 0x45c79808 // ummla z8.s, z0.b, z7.b\n"
"trn1 z2.d, z3.d, z4.d\n"
"trn2 z3.d, z3.d, z4.d\n"
- "ld1b { z7.b }, p5/Z, [x10]\n"
"trn1 z4.d, z5.d, z6.d\n"
"trn2 z5.d, z5.d, z6.d\n"
- "ld1b { z6.b }, p5/Z, [x10, #1, MUL VL]\n"
- ".inst 0x45c79808 // ummla z8.s, z0.b, z7.b\n"
+ "ld1b { z6.b }, p5/Z, [x9, #1, MUL VL]\n"
".inst 0x45c79850 // ummla z16.s, z2.b, z7.b\n"
".inst 0x45c79898 // ummla z24.s, z4.b, z7.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #2, MUL VL]\n"
- "subs x27, x27, #0x8\n"
+ "ld1b { z7.b }, p5/Z, [x9, #2, MUL VL]\n"
".inst 0x45c6980c // ummla z12.s, z0.b, z6.b\n"
".inst 0x45c69854 // ummla z20.s, z2.b, z6.b\n"
".inst 0x45c6989c // ummla z28.s, z4.b, z6.b\n"
- "ld1b { z6.b }, p5/Z, [x10, #3, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #3, MUL VL]\n"
".inst 0x45c79809 // ummla z9.s, z0.b, z7.b\n"
".inst 0x45c79851 // ummla z17.s, z2.b, z7.b\n"
".inst 0x45c79899 // ummla z25.s, z4.b, z7.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #4, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #4, MUL VL]\n"
".inst 0x45c6980d // ummla z13.s, z0.b, z6.b\n"
".inst 0x45c69855 // ummla z21.s, z2.b, z6.b\n"
".inst 0x45c6989d // ummla z29.s, z4.b, z6.b\n"
- "ld1b { z6.b }, p5/Z, [x10, #5, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #5, MUL VL]\n"
".inst 0x45c7980a // ummla z10.s, z0.b, z7.b\n"
".inst 0x45c79852 // ummla z18.s, z2.b, z7.b\n"
".inst 0x45c7989a // ummla z26.s, z4.b, z7.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #6, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #6, MUL VL]\n"
".inst 0x45c6980e // ummla z14.s, z0.b, z6.b\n"
".inst 0x45c69856 // ummla z22.s, z2.b, z6.b\n"
".inst 0x45c6989e // ummla z30.s, z4.b, z6.b\n"
- "ld1b { z6.b }, p5/Z, [x10, #7, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #7, MUL VL]\n"
+ "addvl x9, x9, #8\n"
".inst 0x45c7980b // ummla z11.s, z0.b, z7.b\n"
- "addvl x10, x10, #8\n"
".inst 0x45c79853 // ummla z19.s, z2.b, z7.b\n"
".inst 0x45c7989b // ummla z27.s, z4.b, z7.b\n"
".inst 0x45c6980f // ummla z15.s, z0.b, z6.b\n"
".inst 0x45c69857 // ummla z23.s, z2.b, z6.b\n"
".inst 0x45c6989f // ummla z31.s, z4.b, z6.b\n"
"ble 54f\n"
- "ld1b { z7.b }, p5/Z, [x10]\n"
- "ld1b { z6.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9]\n"
".inst 0x45c79828 // ummla z8.s, z1.b, z7.b\n"
+ "ld1b { z6.b }, p5/Z, [x9, #1, MUL VL]\n"
".inst 0x45c79870 // ummla z16.s, z3.b, z7.b\n"
".inst 0x45c798b8 // ummla z24.s, z5.b, z7.b\n"
+ "ld1b { z7.b }, p5/Z, [x9, #2, MUL VL]\n"
".inst 0x45c6982c // ummla z12.s, z1.b, z6.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #2, MUL VL]\n"
".inst 0x45c69874 // ummla z20.s, z3.b, z6.b\n"
".inst 0x45c698bc // ummla z28.s, z5.b, z6.b\n"
- "ld1b { z6.b }, p5/Z, [x10, #3, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #3, MUL VL]\n"
".inst 0x45c79829 // ummla z9.s, z1.b, z7.b\n"
".inst 0x45c79871 // ummla z17.s, z3.b, z7.b\n"
".inst 0x45c798b9 // ummla z25.s, z5.b, z7.b\n"
+ "ld1b { z7.b }, p5/Z, [x9, #4, MUL VL]\n"
".inst 0x45c6982d // ummla z13.s, z1.b, z6.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #4, MUL VL]\n"
".inst 0x45c69875 // ummla z21.s, z3.b, z6.b\n"
".inst 0x45c698bd // ummla z29.s, z5.b, z6.b\n"
- "ld1b { z6.b }, p5/Z, [x10, #5, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #5, MUL VL]\n"
".inst 0x45c7982a // ummla z10.s, z1.b, z7.b\n"
".inst 0x45c79872 // ummla z18.s, z3.b, z7.b\n"
".inst 0x45c798ba // ummla z26.s, z5.b, z7.b\n"
+ "ld1b { z7.b }, p5/Z, [x9, #6, MUL VL]\n"
".inst 0x45c6982e // ummla z14.s, z1.b, z6.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #6, MUL VL]\n"
".inst 0x45c69876 // ummla z22.s, z3.b, z6.b\n"
".inst 0x45c698be // ummla z30.s, z5.b, z6.b\n"
- "ld1b { z6.b }, p5/Z, [x10, #7, MUL VL]\n"
- "addvl x10, x10, #8\n"
+ "ld1b { z6.b }, p5/Z, [x9, #7, MUL VL]\n"
+ "addvl x9, x9, #8\n"
".inst 0x45c7982b // ummla z11.s, z1.b, z7.b\n"
".inst 0x45c79873 // ummla z19.s, z3.b, z7.b\n"
".inst 0x45c798bb // ummla z27.s, z5.b, z7.b\n"
@@ -1232,127 +1232,127 @@ void sve_hybrid_u8u32_mmla_6x4VL (
".inst 0x45c69877 // ummla z23.s, z3.b, z6.b\n"
".inst 0x45c698bf // ummla z31.s, z5.b, z6.b\n"
"54:" // Height 5: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 49b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
"uzp1 z7.d, z8.d, z12.d\n"
- "add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp2 z8.d, z8.d, z12.d\n"
+ "st1w { z7.s }, p4, [x28]\n"
"uzp1 z12.d, z9.d, z13.d\n"
+ "add x23, x28, x19, LSL #2\n"
"uzp2 z9.d, z9.d, z13.d\n"
+ "st1w { z12.s }, p3, [x28, #1, MUL VL]\n"
"uzp1 z13.d, z10.d, z14.d\n"
- "st1w { z7.s }, p4, [x9]\n"
+ "add x22, x23, x19, LSL #2\n"
"uzp2 z10.d, z10.d, z14.d\n"
+ "st1w { z13.s }, p2, [x28, #2, MUL VL]\n"
"uzp1 z14.d, z11.d, z15.d\n"
- "st1w { z12.s }, p3, [x9, #1, MUL VL]\n"
+ "add x21, x22, x19, LSL #2\n"
"uzp2 z11.d, z11.d, z15.d\n"
+ "st1w { z14.s }, p1, [x28, #3, MUL VL]\n"
"uzp1 z15.d, z16.d, z20.d\n"
- "st1w { z13.s }, p2, [x9, #2, MUL VL]\n"
+ "add x20, x21, x19, LSL #2\n"
"uzp2 z16.d, z16.d, z20.d\n"
+ "st1w { z8.s }, p4, [x23]\n"
+ "addvl x28, x28, #4\n"
"uzp1 z20.d, z17.d, z21.d\n"
- "st1w { z14.s }, p1, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
+ "st1w { z9.s }, p3, [x23, #1, MUL VL]\n"
"uzp2 z17.d, z17.d, z21.d\n"
+ "st1w { z10.s }, p2, [x23, #2, MUL VL]\n"
"uzp1 z21.d, z18.d, z22.d\n"
- "st1w { z8.s }, p4, [x24]\n"
+ "st1w { z11.s }, p1, [x23, #3, MUL VL]\n"
"uzp2 z18.d, z18.d, z22.d\n"
+ "st1w { z15.s }, p4, [x22]\n"
"uzp1 z22.d, z19.d, z23.d\n"
- "st1w { z9.s }, p3, [x24, #1, MUL VL]\n"
+ "st1w { z20.s }, p3, [x22, #1, MUL VL]\n"
"uzp2 z19.d, z19.d, z23.d\n"
+ "st1w { z21.s }, p2, [x22, #2, MUL VL]\n"
"uzp1 z24.d, z24.d, z28.d\n"
- "st1w { z10.s }, p2, [x24, #2, MUL VL]\n"
+ "st1w { z22.s }, p1, [x22, #3, MUL VL]\n"
"uzp1 z25.d, z25.d, z29.d\n"
+ "st1w { z16.s }, p4, [x21]\n"
"uzp1 z26.d, z26.d, z30.d\n"
- "st1w { z11.s }, p1, [x24, #3, MUL VL]\n"
+ "st1w { z17.s }, p3, [x21, #1, MUL VL]\n"
"uzp1 z27.d, z27.d, z31.d\n"
- "st1w { z15.s }, p4, [x23]\n"
- "st1w { z20.s }, p3, [x23, #1, MUL VL]\n"
- "st1w { z21.s }, p2, [x23, #2, MUL VL]\n"
- "st1w { z22.s }, p1, [x23, #3, MUL VL]\n"
- "st1w { z16.s }, p4, [x22]\n"
- "st1w { z17.s }, p3, [x22, #1, MUL VL]\n"
- "st1w { z18.s }, p2, [x22, #2, MUL VL]\n"
- "st1w { z19.s }, p1, [x22, #3, MUL VL]\n"
- "st1w { z24.s }, p4, [x21]\n"
- "st1w { z25.s }, p3, [x21, #1, MUL VL]\n"
- "st1w { z26.s }, p2, [x21, #2, MUL VL]\n"
- "st1w { z27.s }, p1, [x21, #3, MUL VL]\n"
+ "st1w { z18.s }, p2, [x21, #2, MUL VL]\n"
+ "st1w { z19.s }, p1, [x21, #3, MUL VL]\n"
+ "st1w { z24.s }, p4, [x20]\n"
+ "st1w { z25.s }, p3, [x20, #1, MUL VL]\n"
+ "st1w { z26.s }, p2, [x20, #2, MUL VL]\n"
+ "st1w { z27.s }, p1, [x20, #3, MUL VL]\n"
"55:" // Height 5: Writeback done
- "decw x11, ALL, MUL #4\n"
- "cmp x11, XZR\n"
+ "decw x10, ALL, MUL #4\n"
+ "cmp x10, XZR\n"
"bgt 46b\n"
"b 68f\n"
"56:" // Height 6
- "ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x28, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"mov x20, #0x18\n"
- "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
- "mov x9, %x[output_ptr]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "madd %x[output_ptr], x19, x20, %x[output_ptr]\n"
"57:" // Height 6: Column loop
- "mov x20, #0x0\n"
- "whilelt p4.s, x20, x11\n"
- "incw x20\n"
- "whilelt p3.s, x20, x11\n"
- "incw x20\n"
- "whilelt p2.s, x20, x11\n"
- "incw x20\n"
- "whilelt p1.s, x20, x11\n"
+ "mov x19, #0x0\n"
+ "whilelt p4.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p3.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x10\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x10\n"
"tbz %x[flags], #0, 58f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "ld1w { z9.s }, p4/Z, [x9]\n"
- "add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
- "ld1w { z10.s }, p3/Z, [x9, #1, MUL VL]\n"
- "ld1w { z11.s }, p2/Z, [x9, #2, MUL VL]\n"
- "add x20, x21, x20, LSL #2\n"
- "ld1w { z16.s }, p1/Z, [x9, #3, MUL VL]\n"
- "ld1w { z12.s }, p4/Z, [x24]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ld1w { z9.s }, p4/Z, [x28]\n"
+ "add x23, x28, x19, LSL #2\n"
+ "ld1w { z10.s }, p3/Z, [x28, #1, MUL VL]\n"
+ "ld1w { z11.s }, p2/Z, [x28, #2, MUL VL]\n"
+ "add x22, x23, x19, LSL #2\n"
+ "ld1w { z16.s }, p1/Z, [x28, #3, MUL VL]\n"
+ "add x21, x22, x19, LSL #2\n"
+ "ld1w { z12.s }, p4/Z, [x23]\n"
"zip1 z8.d, z9.d, z12.d\n"
- "ld1w { z13.s }, p3/Z, [x24, #1, MUL VL]\n"
- "ld1w { z14.s }, p2/Z, [x24, #2, MUL VL]\n"
+ "ld1w { z13.s }, p3/Z, [x23, #1, MUL VL]\n"
+ "add x20, x21, x19, LSL #2\n"
"zip2 z12.d, z9.d, z12.d\n"
+ "ld1w { z14.s }, p2/Z, [x23, #2, MUL VL]\n"
+ "add x19, x20, x19, LSL #2\n"
"zip1 z9.d, z10.d, z13.d\n"
- "ld1w { z15.s }, p1/Z, [x24, #3, MUL VL]\n"
- "ld1w { z17.s }, p4/Z, [x23]\n"
+ "ld1w { z15.s }, p1/Z, [x23, #3, MUL VL]\n"
"zip2 z13.d, z10.d, z13.d\n"
+ "ld1w { z17.s }, p4/Z, [x22]\n"
"zip1 z10.d, z11.d, z14.d\n"
- "ld1w { z18.s }, p3/Z, [x23, #1, MUL VL]\n"
- "ld1w { z19.s }, p2/Z, [x23, #2, MUL VL]\n"
+ "ld1w { z18.s }, p3/Z, [x22, #1, MUL VL]\n"
"zip2 z14.d, z11.d, z14.d\n"
+ "ld1w { z19.s }, p2/Z, [x22, #2, MUL VL]\n"
"zip1 z11.d, z16.d, z15.d\n"
- "ld1w { z24.s }, p1/Z, [x23, #3, MUL VL]\n"
- "ld1w { z20.s }, p4/Z, [x22]\n"
+ "ld1w { z24.s }, p1/Z, [x22, #3, MUL VL]\n"
"zip2 z15.d, z16.d, z15.d\n"
+ "ld1w { z20.s }, p4/Z, [x21]\n"
+ "ld1w { z21.s }, p3/Z, [x21, #1, MUL VL]\n"
"zip1 z16.d, z17.d, z20.d\n"
- "ld1w { z21.s }, p3/Z, [x22, #1, MUL VL]\n"
- "ld1w { z22.s }, p2/Z, [x22, #2, MUL VL]\n"
+ "ld1w { z22.s }, p2/Z, [x21, #2, MUL VL]\n"
"zip2 z20.d, z17.d, z20.d\n"
+ "ld1w { z23.s }, p1/Z, [x21, #3, MUL VL]\n"
"zip1 z17.d, z18.d, z21.d\n"
- "ld1w { z23.s }, p1/Z, [x22, #3, MUL VL]\n"
- "ld1w { z25.s }, p4/Z, [x21]\n"
+ "ld1w { z25.s }, p4/Z, [x20]\n"
"zip2 z21.d, z18.d, z21.d\n"
+ "ld1w { z26.s }, p3/Z, [x20, #1, MUL VL]\n"
"zip1 z18.d, z19.d, z22.d\n"
- "ld1w { z26.s }, p3/Z, [x21, #1, MUL VL]\n"
- "ld1w { z27.s }, p2/Z, [x21, #2, MUL VL]\n"
+ "ld1w { z27.s }, p2/Z, [x20, #2, MUL VL]\n"
"zip2 z22.d, z19.d, z22.d\n"
+ "ld1w { z6.s }, p1/Z, [x20, #3, MUL VL]\n"
"zip1 z19.d, z24.d, z23.d\n"
- "ld1w { z6.s }, p1/Z, [x21, #3, MUL VL]\n"
- "ld1w { z28.s }, p4/Z, [x20]\n"
+ "ld1w { z28.s }, p4/Z, [x19]\n"
"zip2 z23.d, z24.d, z23.d\n"
+ "ld1w { z29.s }, p3/Z, [x19, #1, MUL VL]\n"
+ "ld1w { z30.s }, p2/Z, [x19, #2, MUL VL]\n"
"zip1 z24.d, z25.d, z28.d\n"
- "ld1w { z29.s }, p3/Z, [x20, #1, MUL VL]\n"
- "ld1w { z30.s }, p2/Z, [x20, #2, MUL VL]\n"
+ "ld1w { z31.s }, p1/Z, [x19, #3, MUL VL]\n"
"zip2 z28.d, z25.d, z28.d\n"
"zip1 z25.d, z26.d, z29.d\n"
- "ld1w { z31.s }, p1/Z, [x20, #3, MUL VL]\n"
"zip2 z29.d, z26.d, z29.d\n"
"zip1 z26.d, z27.d, z30.d\n"
"zip2 z30.d, z27.d, z30.d\n"
@@ -1385,120 +1385,120 @@ void sve_hybrid_u8u32_mmla_6x4VL (
"mov z30.s, #0x0\n"
"mov z31.s, #0x0\n"
"59:" // Height 6: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"60:" // Height 6: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 61f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "ldr x22, [x21, #0x20]\n"
- "ldr x21, [x21, #0x28]\n"
- "cbnz x28, 62f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20\n"
- "add x25, x25, x20\n"
- "add x24, x24, x20\n"
- "add x23, x23, x20\n"
- "add x22, x22, x20\n"
- "add x21, x21, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "ldr x21, [x20, #0x20]\n"
+ "ldr x20, [x20, #0x28]\n"
+ "cbnz x27, 62f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
+ "add x24, x24, x19\n"
+ "add x23, x23, x19\n"
+ "add x22, x22, x19\n"
+ "add x21, x21, x19\n"
+ "add x20, x20, x19\n"
"b 62f\n"
"61:" // Height 6: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20\n"
- "add x24, x25, x20\n"
- "add x23, x24, x20\n"
- "add x22, x23, x20\n"
- "add x21, x22, x20\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19\n"
+ "add x23, x24, x19\n"
+ "add x22, x23, x19\n"
+ "add x21, x22, x19\n"
+ "add x20, x21, x19\n"
"62:" // Height 6: input setup done
- "cmp x27, #0x10\n"
+ "cmp x26, #0x10\n"
"ble 64f\n"
"63:" // Height 6: Multiply loop: Main loop head
- "whilelt p0.b, XZR, x27\n"
- "ld1rqb { z1.b }, p0/Z, [x26]\n"
- "ld1rqb { z2.b }, p0/Z, [x25]\n"
+ "ld1b { z7.b }, p5/Z, [x9]\n"
+ "whilelt p0.b, XZR, x26\n"
+ "sub x26, x26, #0x10\n"
+ "ld1rqb { z1.b }, p0/Z, [x25]\n"
+ "ld1rqb { z2.b }, p0/Z, [x24]\n"
"trn1 z0.d, z1.d, z2.d\n"
- "ld1rqb { z3.b }, p0/Z, [x24]\n"
- "ld1rqb { z4.b }, p0/Z, [x23]\n"
+ "ld1rqb { z3.b }, p0/Z, [x23]\n"
+ "cmp x26, #0x10\n"
"trn2 z1.d, z1.d, z2.d\n"
+ "ld1rqb { z4.b }, p0/Z, [x22]\n"
+ "add x25, x25, #0x10\n"
+ ".inst 0x45c79808 // ummla z8.s, z0.b, z7.b\n"
+ "ld1rqb { z5.b }, p0/Z, [x21]\n"
+ "add x24, x24, #0x10\n"
"trn1 z2.d, z3.d, z4.d\n"
- "ld1rqb { z5.b }, p0/Z, [x22]\n"
- "ld1rqb { z6.b }, p0/Z, [x21]\n"
+ "ld1rqb { z6.b }, p0/Z, [x20]\n"
+ "add x23, x23, #0x10\n"
"trn2 z3.d, z3.d, z4.d\n"
+ "add x22, x22, #0x10\n"
+ "add x21, x21, #0x10\n"
+ ".inst 0x45c79850 // ummla z16.s, z2.b, z7.b\n"
+ "add x20, x20, #0x10\n"
"trn1 z4.d, z5.d, z6.d\n"
"trn2 z5.d, z5.d, z6.d\n"
- "ld1b { z7.b }, p5/Z, [x10]\n"
- "ld1b { z6.b }, p5/Z, [x10, #1, MUL VL]\n"
- ".inst 0x45c79808 // ummla z8.s, z0.b, z7.b\n"
- ".inst 0x45c79850 // ummla z16.s, z2.b, z7.b\n"
+ "ld1b { z6.b }, p5/Z, [x9, #1, MUL VL]\n"
".inst 0x45c79898 // ummla z24.s, z4.b, z7.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #2, MUL VL]\n"
- "sub x27, x27, #0x10\n"
+ "ld1b { z7.b }, p5/Z, [x9, #2, MUL VL]\n"
".inst 0x45c6980c // ummla z12.s, z0.b, z6.b\n"
".inst 0x45c69854 // ummla z20.s, z2.b, z6.b\n"
- "cmp x27, #0x10\n"
- "add x26, x26, #0x10\n"
".inst 0x45c6989c // ummla z28.s, z4.b, z6.b\n"
- "ld1b { z6.b }, p5/Z, [x10, #3, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #3, MUL VL]\n"
".inst 0x45c79809 // ummla z9.s, z0.b, z7.b\n"
- "add x25, x25, #0x10\n"
".inst 0x45c79851 // ummla z17.s, z2.b, z7.b\n"
".inst 0x45c79899 // ummla z25.s, z4.b, z7.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #4, MUL VL]\n"
- "add x24, x24, #0x10\n"
+ "ld1b { z7.b }, p5/Z, [x9, #4, MUL VL]\n"
".inst 0x45c6980d // ummla z13.s, z0.b, z6.b\n"
".inst 0x45c69855 // ummla z21.s, z2.b, z6.b\n"
- "add x23, x23, #0x10\n"
- "add x22, x22, #0x10\n"
".inst 0x45c6989d // ummla z29.s, z4.b, z6.b\n"
- "ld1b { z6.b }, p5/Z, [x10, #5, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #5, MUL VL]\n"
".inst 0x45c7980a // ummla z10.s, z0.b, z7.b\n"
- "add x21, x21, #0x10\n"
".inst 0x45c79852 // ummla z18.s, z2.b, z7.b\n"
".inst 0x45c7989a // ummla z26.s, z4.b, z7.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #6, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #6, MUL VL]\n"
".inst 0x45c6980e // ummla z14.s, z0.b, z6.b\n"
".inst 0x45c69856 // ummla z22.s, z2.b, z6.b\n"
".inst 0x45c6989e // ummla z30.s, z4.b, z6.b\n"
- "ld1b { z6.b }, p5/Z, [x10, #7, MUL VL]\n"
- "addvl x10, x10, #16\n"
+ "ld1b { z6.b }, p5/Z, [x9, #7, MUL VL]\n"
+ "addvl x9, x9, #16\n"
".inst 0x45c7980b // ummla z11.s, z0.b, z7.b\n"
".inst 0x45c79853 // ummla z19.s, z2.b, z7.b\n"
".inst 0x45c7989b // ummla z27.s, z4.b, z7.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #-8, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #-8, MUL VL]\n"
".inst 0x45c6980f // ummla z15.s, z0.b, z6.b\n"
".inst 0x45c69857 // ummla z23.s, z2.b, z6.b\n"
".inst 0x45c6989f // ummla z31.s, z4.b, z6.b\n"
- "ld1b { z6.b }, p5/Z, [x10, #-7, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #-7, MUL VL]\n"
".inst 0x45c79828 // ummla z8.s, z1.b, z7.b\n"
".inst 0x45c79870 // ummla z16.s, z3.b, z7.b\n"
".inst 0x45c798b8 // ummla z24.s, z5.b, z7.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #-6, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #-6, MUL VL]\n"
".inst 0x45c6982c // ummla z12.s, z1.b, z6.b\n"
".inst 0x45c69874 // ummla z20.s, z3.b, z6.b\n"
".inst 0x45c698bc // ummla z28.s, z5.b, z6.b\n"
- "ld1b { z6.b }, p5/Z, [x10, #-5, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #-5, MUL VL]\n"
".inst 0x45c79829 // ummla z9.s, z1.b, z7.b\n"
".inst 0x45c79871 // ummla z17.s, z3.b, z7.b\n"
".inst 0x45c798b9 // ummla z25.s, z5.b, z7.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #-4, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #-4, MUL VL]\n"
".inst 0x45c6982d // ummla z13.s, z1.b, z6.b\n"
".inst 0x45c69875 // ummla z21.s, z3.b, z6.b\n"
".inst 0x45c698bd // ummla z29.s, z5.b, z6.b\n"
- "ld1b { z6.b }, p5/Z, [x10, #-3, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #-3, MUL VL]\n"
".inst 0x45c7982a // ummla z10.s, z1.b, z7.b\n"
".inst 0x45c79872 // ummla z18.s, z3.b, z7.b\n"
".inst 0x45c798ba // ummla z26.s, z5.b, z7.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #-2, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #-2, MUL VL]\n"
".inst 0x45c6982e // ummla z14.s, z1.b, z6.b\n"
".inst 0x45c69876 // ummla z22.s, z3.b, z6.b\n"
".inst 0x45c698be // ummla z30.s, z5.b, z6.b\n"
- "ld1b { z6.b }, p5/Z, [x10, #-1, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #-1, MUL VL]\n"
".inst 0x45c7982b // ummla z11.s, z1.b, z7.b\n"
".inst 0x45c79873 // ummla z19.s, z3.b, z7.b\n"
".inst 0x45c798bb // ummla z27.s, z5.b, z7.b\n"
@@ -1507,81 +1507,81 @@ void sve_hybrid_u8u32_mmla_6x4VL (
".inst 0x45c698bf // ummla z31.s, z5.b, z6.b\n"
"bgt 63b\n"
"64:" // Height 6: Multiply loop: Single iteration only
- "whilelt p0.b, XZR, x27\n"
- "ld1rqb { z1.b }, p0/Z, [x26]\n"
- "ld1rqb { z2.b }, p0/Z, [x25]\n"
+ "ld1b { z7.b }, p5/Z, [x9]\n"
+ "whilelt p0.b, XZR, x26\n"
+ "subs x26, x26, #0x8\n"
+ "ld1rqb { z1.b }, p0/Z, [x25]\n"
+ "ld1rqb { z2.b }, p0/Z, [x24]\n"
"trn1 z0.d, z1.d, z2.d\n"
- "ld1rqb { z3.b }, p0/Z, [x24]\n"
- "ld1rqb { z4.b }, p0/Z, [x23]\n"
+ "ld1rqb { z3.b }, p0/Z, [x23]\n"
"trn2 z1.d, z1.d, z2.d\n"
+ "ld1rqb { z4.b }, p0/Z, [x22]\n"
+ "ld1rqb { z5.b }, p0/Z, [x21]\n"
+ ".inst 0x45c79808 // ummla z8.s, z0.b, z7.b\n"
+ "ld1rqb { z6.b }, p0/Z, [x20]\n"
"trn1 z2.d, z3.d, z4.d\n"
- "ld1rqb { z5.b }, p0/Z, [x22]\n"
- "ld1rqb { z6.b }, p0/Z, [x21]\n"
"trn2 z3.d, z3.d, z4.d\n"
"trn1 z4.d, z5.d, z6.d\n"
"trn2 z5.d, z5.d, z6.d\n"
- "ld1b { z7.b }, p5/Z, [x10]\n"
- "ld1b { z6.b }, p5/Z, [x10, #1, MUL VL]\n"
- ".inst 0x45c79808 // ummla z8.s, z0.b, z7.b\n"
+ "ld1b { z6.b }, p5/Z, [x9, #1, MUL VL]\n"
".inst 0x45c79850 // ummla z16.s, z2.b, z7.b\n"
".inst 0x45c79898 // ummla z24.s, z4.b, z7.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #2, MUL VL]\n"
- "subs x27, x27, #0x8\n"
+ "ld1b { z7.b }, p5/Z, [x9, #2, MUL VL]\n"
".inst 0x45c6980c // ummla z12.s, z0.b, z6.b\n"
".inst 0x45c69854 // ummla z20.s, z2.b, z6.b\n"
".inst 0x45c6989c // ummla z28.s, z4.b, z6.b\n"
- "ld1b { z6.b }, p5/Z, [x10, #3, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #3, MUL VL]\n"
".inst 0x45c79809 // ummla z9.s, z0.b, z7.b\n"
".inst 0x45c79851 // ummla z17.s, z2.b, z7.b\n"
".inst 0x45c79899 // ummla z25.s, z4.b, z7.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #4, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #4, MUL VL]\n"
".inst 0x45c6980d // ummla z13.s, z0.b, z6.b\n"
".inst 0x45c69855 // ummla z21.s, z2.b, z6.b\n"
".inst 0x45c6989d // ummla z29.s, z4.b, z6.b\n"
- "ld1b { z6.b }, p5/Z, [x10, #5, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #5, MUL VL]\n"
".inst 0x45c7980a // ummla z10.s, z0.b, z7.b\n"
".inst 0x45c79852 // ummla z18.s, z2.b, z7.b\n"
".inst 0x45c7989a // ummla z26.s, z4.b, z7.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #6, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9, #6, MUL VL]\n"
".inst 0x45c6980e // ummla z14.s, z0.b, z6.b\n"
".inst 0x45c69856 // ummla z22.s, z2.b, z6.b\n"
".inst 0x45c6989e // ummla z30.s, z4.b, z6.b\n"
- "ld1b { z6.b }, p5/Z, [x10, #7, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #7, MUL VL]\n"
+ "addvl x9, x9, #8\n"
".inst 0x45c7980b // ummla z11.s, z0.b, z7.b\n"
- "addvl x10, x10, #8\n"
".inst 0x45c79853 // ummla z19.s, z2.b, z7.b\n"
".inst 0x45c7989b // ummla z27.s, z4.b, z7.b\n"
".inst 0x45c6980f // ummla z15.s, z0.b, z6.b\n"
".inst 0x45c69857 // ummla z23.s, z2.b, z6.b\n"
".inst 0x45c6989f // ummla z31.s, z4.b, z6.b\n"
"ble 65f\n"
- "ld1b { z7.b }, p5/Z, [x10]\n"
- "ld1b { z6.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1b { z7.b }, p5/Z, [x9]\n"
".inst 0x45c79828 // ummla z8.s, z1.b, z7.b\n"
+ "ld1b { z6.b }, p5/Z, [x9, #1, MUL VL]\n"
".inst 0x45c79870 // ummla z16.s, z3.b, z7.b\n"
".inst 0x45c798b8 // ummla z24.s, z5.b, z7.b\n"
+ "ld1b { z7.b }, p5/Z, [x9, #2, MUL VL]\n"
".inst 0x45c6982c // ummla z12.s, z1.b, z6.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #2, MUL VL]\n"
".inst 0x45c69874 // ummla z20.s, z3.b, z6.b\n"
".inst 0x45c698bc // ummla z28.s, z5.b, z6.b\n"
- "ld1b { z6.b }, p5/Z, [x10, #3, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #3, MUL VL]\n"
".inst 0x45c79829 // ummla z9.s, z1.b, z7.b\n"
".inst 0x45c79871 // ummla z17.s, z3.b, z7.b\n"
".inst 0x45c798b9 // ummla z25.s, z5.b, z7.b\n"
+ "ld1b { z7.b }, p5/Z, [x9, #4, MUL VL]\n"
".inst 0x45c6982d // ummla z13.s, z1.b, z6.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #4, MUL VL]\n"
".inst 0x45c69875 // ummla z21.s, z3.b, z6.b\n"
".inst 0x45c698bd // ummla z29.s, z5.b, z6.b\n"
- "ld1b { z6.b }, p5/Z, [x10, #5, MUL VL]\n"
+ "ld1b { z6.b }, p5/Z, [x9, #5, MUL VL]\n"
".inst 0x45c7982a // ummla z10.s, z1.b, z7.b\n"
".inst 0x45c79872 // ummla z18.s, z3.b, z7.b\n"
".inst 0x45c798ba // ummla z26.s, z5.b, z7.b\n"
+ "ld1b { z7.b }, p5/Z, [x9, #6, MUL VL]\n"
".inst 0x45c6982e // ummla z14.s, z1.b, z6.b\n"
- "ld1b { z7.b }, p5/Z, [x10, #6, MUL VL]\n"
".inst 0x45c69876 // ummla z22.s, z3.b, z6.b\n"
".inst 0x45c698be // ummla z30.s, z5.b, z6.b\n"
- "ld1b { z6.b }, p5/Z, [x10, #7, MUL VL]\n"
- "addvl x10, x10, #8\n"
+ "ld1b { z6.b }, p5/Z, [x9, #7, MUL VL]\n"
+ "addvl x9, x9, #8\n"
".inst 0x45c7982b // ummla z11.s, z1.b, z7.b\n"
".inst 0x45c79873 // ummla z19.s, z3.b, z7.b\n"
".inst 0x45c798bb // ummla z27.s, z5.b, z7.b\n"
@@ -1589,85 +1589,85 @@ void sve_hybrid_u8u32_mmla_6x4VL (
".inst 0x45c69877 // ummla z23.s, z3.b, z6.b\n"
".inst 0x45c698bf // ummla z31.s, z5.b, z6.b\n"
"65:" // Height 6: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 60b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
"uzp1 z7.d, z8.d, z12.d\n"
- "add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp2 z8.d, z8.d, z12.d\n"
+ "st1w { z7.s }, p4, [x28]\n"
"uzp1 z12.d, z9.d, z13.d\n"
- "add x20, x21, x20, LSL #2\n"
+ "add x23, x28, x19, LSL #2\n"
"uzp2 z9.d, z9.d, z13.d\n"
+ "st1w { z12.s }, p3, [x28, #1, MUL VL]\n"
"uzp1 z13.d, z10.d, z14.d\n"
- "st1w { z7.s }, p4, [x9]\n"
+ "add x22, x23, x19, LSL #2\n"
"uzp2 z10.d, z10.d, z14.d\n"
+ "st1w { z13.s }, p2, [x28, #2, MUL VL]\n"
"uzp1 z14.d, z11.d, z15.d\n"
- "st1w { z12.s }, p3, [x9, #1, MUL VL]\n"
+ "add x21, x22, x19, LSL #2\n"
"uzp2 z11.d, z11.d, z15.d\n"
+ "st1w { z14.s }, p1, [x28, #3, MUL VL]\n"
"uzp1 z15.d, z16.d, z20.d\n"
- "st1w { z13.s }, p2, [x9, #2, MUL VL]\n"
+ "add x20, x21, x19, LSL #2\n"
"uzp2 z16.d, z16.d, z20.d\n"
+ "st1w { z8.s }, p4, [x23]\n"
+ "add x19, x20, x19, LSL #2\n"
"uzp1 z20.d, z17.d, z21.d\n"
- "st1w { z14.s }, p1, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
+ "st1w { z9.s }, p3, [x23, #1, MUL VL]\n"
+ "addvl x28, x28, #4\n"
"uzp2 z17.d, z17.d, z21.d\n"
+ "st1w { z10.s }, p2, [x23, #2, MUL VL]\n"
"uzp1 z21.d, z18.d, z22.d\n"
- "st1w { z8.s }, p4, [x24]\n"
+ "st1w { z11.s }, p1, [x23, #3, MUL VL]\n"
"uzp2 z18.d, z18.d, z22.d\n"
+ "st1w { z15.s }, p4, [x22]\n"
"uzp1 z22.d, z19.d, z23.d\n"
- "st1w { z9.s }, p3, [x24, #1, MUL VL]\n"
+ "st1w { z20.s }, p3, [x22, #1, MUL VL]\n"
"uzp2 z19.d, z19.d, z23.d\n"
+ "st1w { z21.s }, p2, [x22, #2, MUL VL]\n"
"uzp1 z23.d, z24.d, z28.d\n"
- "st1w { z10.s }, p2, [x24, #2, MUL VL]\n"
+ "st1w { z22.s }, p1, [x22, #3, MUL VL]\n"
"uzp2 z24.d, z24.d, z28.d\n"
+ "st1w { z16.s }, p4, [x21]\n"
"uzp1 z28.d, z25.d, z29.d\n"
- "st1w { z11.s }, p1, [x24, #3, MUL VL]\n"
+ "st1w { z17.s }, p3, [x21, #1, MUL VL]\n"
"uzp2 z25.d, z25.d, z29.d\n"
+ "st1w { z18.s }, p2, [x21, #2, MUL VL]\n"
"uzp1 z29.d, z26.d, z30.d\n"
- "st1w { z15.s }, p4, [x23]\n"
+ "st1w { z19.s }, p1, [x21, #3, MUL VL]\n"
"uzp2 z26.d, z26.d, z30.d\n"
+ "st1w { z23.s }, p4, [x20]\n"
"uzp1 z30.d, z27.d, z31.d\n"
- "st1w { z20.s }, p3, [x23, #1, MUL VL]\n"
+ "st1w { z28.s }, p3, [x20, #1, MUL VL]\n"
"uzp2 z27.d, z27.d, z31.d\n"
- "st1w { z21.s }, p2, [x23, #2, MUL VL]\n"
- "st1w { z22.s }, p1, [x23, #3, MUL VL]\n"
- "st1w { z16.s }, p4, [x22]\n"
- "st1w { z17.s }, p3, [x22, #1, MUL VL]\n"
- "st1w { z18.s }, p2, [x22, #2, MUL VL]\n"
- "st1w { z19.s }, p1, [x22, #3, MUL VL]\n"
- "st1w { z23.s }, p4, [x21]\n"
- "st1w { z28.s }, p3, [x21, #1, MUL VL]\n"
- "st1w { z29.s }, p2, [x21, #2, MUL VL]\n"
- "st1w { z30.s }, p1, [x21, #3, MUL VL]\n"
- "st1w { z24.s }, p4, [x20]\n"
- "st1w { z25.s }, p3, [x20, #1, MUL VL]\n"
- "st1w { z26.s }, p2, [x20, #2, MUL VL]\n"
- "st1w { z27.s }, p1, [x20, #3, MUL VL]\n"
+ "st1w { z29.s }, p2, [x20, #2, MUL VL]\n"
+ "st1w { z30.s }, p1, [x20, #3, MUL VL]\n"
+ "st1w { z24.s }, p4, [x19]\n"
+ "st1w { z25.s }, p3, [x19, #1, MUL VL]\n"
+ "st1w { z26.s }, p2, [x19, #2, MUL VL]\n"
+ "st1w { z27.s }, p1, [x19, #3, MUL VL]\n"
"66:" // Height 6: Writeback done
- "decw x11, ALL, MUL #4\n"
- "cmp x11, XZR\n"
+ "decw x10, ALL, MUL #4\n"
+ "cmp x10, XZR\n"
"bgt 57b\n"
"subs %x[M], %x[M], #0x6\n"
"beq 68f\n"
- "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 67f\n"
- "add x21, x21, #0x6\n"
- "str x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "add x20, x20, #0x6\n"
+ "str x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"b 1b\n"
"67:" // Update direct input
- "mov x20, #0x6\n"
- "madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
+ "mov x19, #0x6\n"
+ "madd %x[input_ptr], x19, x20, %x[input_ptr]\n"
"b 1b\n"
"68:" // Exit
: [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
: [args_ptr] "r" (&ka), [flags] "r" (flags), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "x9", "x10", "x11", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "x9", "x10", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_bf16fp32_dot_8x3VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_bf16fp32_dot_8x3VL/generic.cpp
index 94452929c6..e604dcc4bc 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_bf16fp32_dot_8x3VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_bf16fp32_dot_8x3VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef ARM_COMPUTE_ENABLE_SVE
@@ -33,44 +33,42 @@ void sve_interleaved_bf16fp32_dot_8x3VL(
float *Cpanel, int ablocks, int bblocks, int K) {
struct KernelArgs {
+ size_t bblocks = {};
size_t K = {};
const bfloat16 *Bpanel = {};
- size_t bblocks = {};
} ka;
+ ka.bblocks = bblocks;
ka.K = (K/2) - 1;
ka.Bpanel = Bpanel;
- ka.bblocks = bblocks;
__asm__ __volatile__(
"ptrue p0.b\n"
"1:" // Height loop
- "ldr x23, [%x[args_ptr], %[offsetof_bblocks]]\n"
- "ldr x22, [%x[args_ptr], %[offsetof_Bpanel]]\n"
+ "ldr x22, [%x[args_ptr], %[offsetof_bblocks]]\n"
"mov x21, %x[Apanel]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_Bpanel]]\n"
"2:" // Width loop
- "ldr x20, [%x[args_ptr], %[offsetof_K]]\n"
- "mov %x[Apanel], x21\n"
- "cmp x20, #0x2\n"
"mov z8.b, #0x0\n"
"mov z9.b, #0x0\n"
- "ld1rqh { z0.h }, p0/Z, [%x[Apanel]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_K]]\n"
"mov z10.b, #0x0\n"
"mov z11.b, #0x0\n"
- "ld1rqh { z1.h }, p0/Z, [%x[Apanel], #16]\n"
+ "ld1h { z4.h }, p0/Z, [x20]\n"
"mov z12.b, #0x0\n"
"mov z13.b, #0x0\n"
- "ld1h { z4.h }, p0/Z, [x22]\n"
+ "mov %x[Apanel], x21\n"
"mov z14.b, #0x0\n"
"mov z15.b, #0x0\n"
- "ld1h { z5.h }, p0/Z, [x22, #1, MUL VL]\n"
+ "cmp x19, #0x2\n"
"mov z16.b, #0x0\n"
"mov z17.b, #0x0\n"
- "ld1h { z6.h }, p0/Z, [x22, #2, MUL VL]\n"
"mov z18.b, #0x0\n"
"mov z19.b, #0x0\n"
+ "ld1rqh { z0.h }, p0/Z, [%x[Apanel]]\n"
"mov z20.b, #0x0\n"
"mov z21.b, #0x0\n"
+ "ld1rqh { z1.h }, p0/Z, [%x[Apanel], #16]\n"
"mov z22.b, #0x0\n"
"mov z23.b, #0x0\n"
"mov z24.b, #0x0\n"
@@ -85,29 +83,31 @@ void sve_interleaved_bf16fp32_dot_8x3VL(
"3:" // main loop head
".inst 0x64604088 // bfdot z8.s, z4.h, z0.h[0]\n"
".inst 0x6468408b // bfdot z11.s, z4.h, z0.h[1]\n"
- "ld1rqh { z2.h }, p0/Z, [%x[Apanel], #32]\n"
+ "ld1h { z5.h }, p0/Z, [x20, #1, MUL VL]\n"
".inst 0x6470408e // bfdot z14.s, z4.h, z0.h[2]\n"
".inst 0x64784091 // bfdot z17.s, z4.h, z0.h[3]\n"
- "ld1rqh { z3.h }, p0/Z, [%x[Apanel], #48]\n"
+ "ld1h { z6.h }, p0/Z, [x20, #2, MUL VL]\n"
".inst 0x64614094 // bfdot z20.s, z4.h, z1.h[0]\n"
".inst 0x64694097 // bfdot z23.s, z4.h, z1.h[1]\n"
- "sub x20, x20, #0x2\n"
+ "ld1rqh { z2.h }, p0/Z, [%x[Apanel], #32]\n"
".inst 0x6471409a // bfdot z26.s, z4.h, z1.h[2]\n"
".inst 0x6479409d // bfdot z29.s, z4.h, z1.h[3]\n"
- "ld1h { z4.h }, p0/Z, [x22, #3, MUL VL]\n"
+ "ld1rqh { z3.h }, p0/Z, [%x[Apanel], #48]\n"
".inst 0x646040a9 // bfdot z9.s, z5.h, z0.h[0]\n"
".inst 0x646840ac // bfdot z12.s, z5.h, z0.h[1]\n"
- "cmp x20, #0x2\n"
+ "ld1h { z4.h }, p0/Z, [x20, #3, MUL VL]\n"
".inst 0x647040af // bfdot z15.s, z5.h, z0.h[2]\n"
".inst 0x647840b2 // bfdot z18.s, z5.h, z0.h[3]\n"
- "add %x[Apanel], %x[Apanel], #0x40\n"
+ "sub x19, x19, #0x2\n"
".inst 0x646140b5 // bfdot z21.s, z5.h, z1.h[0]\n"
".inst 0x646940b8 // bfdot z24.s, z5.h, z1.h[1]\n"
+ "cmp x19, #0x2\n"
".inst 0x647140bb // bfdot z27.s, z5.h, z1.h[2]\n"
".inst 0x647940be // bfdot z30.s, z5.h, z1.h[3]\n"
- "ld1h { z5.h }, p0/Z, [x22, #4, MUL VL]\n"
+ "ld1h { z5.h }, p0/Z, [x20, #4, MUL VL]\n"
".inst 0x646040ca // bfdot z10.s, z6.h, z0.h[0]\n"
".inst 0x646840cd // bfdot z13.s, z6.h, z0.h[1]\n"
+ "add %x[Apanel], %x[Apanel], #0x40\n"
".inst 0x647040d0 // bfdot z16.s, z6.h, z0.h[2]\n"
".inst 0x647840d3 // bfdot z19.s, z6.h, z0.h[3]\n"
"ld1rqh { z0.h }, p0/Z, [%x[Apanel]]\n"
@@ -115,27 +115,26 @@ void sve_interleaved_bf16fp32_dot_8x3VL(
".inst 0x646940d9 // bfdot z25.s, z6.h, z1.h[1]\n"
".inst 0x647140dc // bfdot z28.s, z6.h, z1.h[2]\n"
".inst 0x647940df // bfdot z31.s, z6.h, z1.h[3]\n"
- "ld1h { z6.h }, p0/Z, [x22, #5, MUL VL]\n"
- "addvl x22, x22, #6\n"
+ "ld1h { z6.h }, p0/Z, [x20, #5, MUL VL]\n"
".inst 0x64624088 // bfdot z8.s, z4.h, z2.h[0]\n"
".inst 0x646a408b // bfdot z11.s, z4.h, z2.h[1]\n"
"ld1rqh { z1.h }, p0/Z, [%x[Apanel], #16]\n"
".inst 0x6472408e // bfdot z14.s, z4.h, z2.h[2]\n"
".inst 0x647a4091 // bfdot z17.s, z4.h, z2.h[3]\n"
+ "addvl x20, x20, #6\n"
".inst 0x64634094 // bfdot z20.s, z4.h, z3.h[0]\n"
".inst 0x646b4097 // bfdot z23.s, z4.h, z3.h[1]\n"
".inst 0x6473409a // bfdot z26.s, z4.h, z3.h[2]\n"
".inst 0x647b409d // bfdot z29.s, z4.h, z3.h[3]\n"
- "ld1h { z4.h }, p0/Z, [x22]\n"
".inst 0x646240a9 // bfdot z9.s, z5.h, z2.h[0]\n"
".inst 0x646a40ac // bfdot z12.s, z5.h, z2.h[1]\n"
+ "ld1h { z4.h }, p0/Z, [x20]\n"
".inst 0x647240af // bfdot z15.s, z5.h, z2.h[2]\n"
".inst 0x647a40b2 // bfdot z18.s, z5.h, z2.h[3]\n"
".inst 0x646340b5 // bfdot z21.s, z5.h, z3.h[0]\n"
".inst 0x646b40b8 // bfdot z24.s, z5.h, z3.h[1]\n"
".inst 0x647340bb // bfdot z27.s, z5.h, z3.h[2]\n"
".inst 0x647b40be // bfdot z30.s, z5.h, z3.h[3]\n"
- "ld1h { z5.h }, p0/Z, [x22, #1, MUL VL]\n"
".inst 0x646240ca // bfdot z10.s, z6.h, z2.h[0]\n"
".inst 0x646a40cd // bfdot z13.s, z6.h, z2.h[1]\n"
".inst 0x647240d0 // bfdot z16.s, z6.h, z2.h[2]\n"
@@ -144,19 +143,20 @@ void sve_interleaved_bf16fp32_dot_8x3VL(
".inst 0x646b40d9 // bfdot z25.s, z6.h, z3.h[1]\n"
".inst 0x647340dc // bfdot z28.s, z6.h, z3.h[2]\n"
".inst 0x647b40df // bfdot z31.s, z6.h, z3.h[3]\n"
- "ld1h { z6.h }, p0/Z, [x22, #2, MUL VL]\n"
"bge 3b\n"
"4:" // main loop skip
".inst 0x64604088 // bfdot z8.s, z4.h, z0.h[0]\n"
".inst 0x6468408b // bfdot z11.s, z4.h, z0.h[1]\n"
- "add %x[Apanel], %x[Apanel], #0x20\n"
+ "ld1h { z5.h }, p0/Z, [x20, #1, MUL VL]\n"
".inst 0x6470408e // bfdot z14.s, z4.h, z0.h[2]\n"
".inst 0x64784091 // bfdot z17.s, z4.h, z0.h[3]\n"
- "addvl x22, x22, #3\n"
+ "ld1h { z6.h }, p0/Z, [x20, #2, MUL VL]\n"
".inst 0x64614094 // bfdot z20.s, z4.h, z1.h[0]\n"
".inst 0x64694097 // bfdot z23.s, z4.h, z1.h[1]\n"
+ "add %x[Apanel], %x[Apanel], #0x20\n"
".inst 0x6471409a // bfdot z26.s, z4.h, z1.h[2]\n"
".inst 0x6479409d // bfdot z29.s, z4.h, z1.h[3]\n"
+ "addvl x20, x20, #3\n"
".inst 0x646040a9 // bfdot z9.s, z5.h, z0.h[0]\n"
".inst 0x646840ac // bfdot z12.s, z5.h, z0.h[1]\n"
".inst 0x647040af // bfdot z15.s, z5.h, z0.h[2]\n"
@@ -173,19 +173,19 @@ void sve_interleaved_bf16fp32_dot_8x3VL(
".inst 0x646940d9 // bfdot z25.s, z6.h, z1.h[1]\n"
".inst 0x647140dc // bfdot z28.s, z6.h, z1.h[2]\n"
".inst 0x647940df // bfdot z31.s, z6.h, z1.h[3]\n"
- "cbz x20, 5f\n"
+ "cbz x19, 5f\n"
"ld1rqh { z0.h }, p0/Z, [%x[Apanel]]\n"
"ld1rqh { z1.h }, p0/Z, [%x[Apanel], #16]\n"
"add %x[Apanel], %x[Apanel], #0x20\n"
- "ld1h { z7.h }, p0/Z, [x22]\n"
- "ld1h { z4.h }, p0/Z, [x22, #1, MUL VL]\n"
+ "ld1h { z7.h }, p0/Z, [x20]\n"
+ "ld1h { z4.h }, p0/Z, [x20, #1, MUL VL]\n"
+ "ld1h { z5.h }, p0/Z, [x20, #2, MUL VL]\n"
+ "addvl x20, x20, #3\n"
".inst 0x646040e8 // bfdot z8.s, z7.h, z0.h[0]\n"
- "ld1h { z5.h }, p0/Z, [x22, #2, MUL VL]\n"
".inst 0x646840eb // bfdot z11.s, z7.h, z0.h[1]\n"
".inst 0x647040ee // bfdot z14.s, z7.h, z0.h[2]\n"
".inst 0x647840f1 // bfdot z17.s, z7.h, z0.h[3]\n"
".inst 0x646140f4 // bfdot z20.s, z7.h, z1.h[0]\n"
- "addvl x22, x22, #3\n"
".inst 0x646940f7 // bfdot z23.s, z7.h, z1.h[1]\n"
".inst 0x647140fa // bfdot z26.s, z7.h, z1.h[2]\n"
".inst 0x647940fd // bfdot z29.s, z7.h, z1.h[3]\n"
@@ -207,7 +207,7 @@ void sve_interleaved_bf16fp32_dot_8x3VL(
".inst 0x647940bf // bfdot z31.s, z5.h, z1.h[3]\n"
"5:" // multiply loop done
"st1w { z8.s }, p0, [%x[Cpanel]]\n"
- "subs x23, x23, #0x1\n"
+ "subs x22, x22, #0x1\n"
"st1w { z9.s }, p0, [%x[Cpanel], #1, MUL VL]\n"
"st1w { z10.s }, p0, [%x[Cpanel], #2, MUL VL]\n"
"st1w { z11.s }, p0, [%x[Cpanel], #3, MUL VL]\n"
@@ -238,7 +238,7 @@ void sve_interleaved_bf16fp32_dot_8x3VL(
"bne 1b\n"
: [Apanel] "+&r" (Apanel), [Cpanel] "+&r" (Cpanel), [ablocks] "+&r" (ablocks)
: [args_ptr] "r" (&ka), [offsetof_Bpanel] "I" (offsetof(KernelArgs, Bpanel)), [offsetof_K] "I" (offsetof(KernelArgs, K)), [offsetof_bblocks] "I" (offsetof(KernelArgs, bblocks))
- : "cc", "memory", "p0", "x20", "x21", "x22", "x23", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "x19", "x20", "x21", "x22", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_bf16fp32_mmla_8x3VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_bf16fp32_mmla_8x3VL/generic.cpp
index fe5382db05..de4f0ad313 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_bf16fp32_mmla_8x3VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_bf16fp32_mmla_8x3VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef ARM_COMPUTE_ENABLE_SVE
@@ -33,28 +33,28 @@ void sve_interleaved_bf16fp32_mmla_8x3VL(
float *Cpanel, int ablocks, int bblocks, int K) {
struct KernelArgs {
+ size_t bblocks = {};
size_t K = {};
const bfloat16 *Bpanel = {};
- size_t bblocks = {};
} ka;
+ ka.bblocks = bblocks;
ka.K = (K/4) - 1;
ka.Bpanel = Bpanel;
- ka.bblocks = bblocks;
__asm__ __volatile__(
"ptrue p0.b\n"
"1:" // Height loop
- "ldr x23, [%x[args_ptr], %[offsetof_bblocks]]\n"
- "ldr x22, [%x[args_ptr], %[offsetof_Bpanel]]\n"
+ "ldr x22, [%x[args_ptr], %[offsetof_bblocks]]\n"
"mov x21, %x[Apanel]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_Bpanel]]\n"
"2:" // Width loop
- "ldr x20, [%x[args_ptr], %[offsetof_K]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_K]]\n"
"mov %x[Apanel], x21\n"
- "cmp x20, #0x2\n"
+ "cmp x19, #0x2\n"
"mov z8.b, #0x0\n"
"mov z9.b, #0x0\n"
- "ld1h { z4.h }, p0/Z, [x22]\n"
+ "ld1h { z4.h }, p0/Z, [x20]\n"
"mov z10.b, #0x0\n"
"mov z11.b, #0x0\n"
"ld1rqh { z0.h }, p0/Z, [%x[Apanel]]\n"
@@ -63,13 +63,13 @@ void sve_interleaved_bf16fp32_mmla_8x3VL(
"ld1rqh { z1.h }, p0/Z, [%x[Apanel], #16]\n"
"mov z14.b, #0x0\n"
"mov z15.b, #0x0\n"
- "ld1h { z5.h }, p0/Z, [x22, #1, MUL VL]\n"
+ "ld1h { z5.h }, p0/Z, [x20, #1, MUL VL]\n"
"mov z16.b, #0x0\n"
"mov z17.b, #0x0\n"
"ld1rqh { z2.h }, p0/Z, [%x[Apanel], #32]\n"
"mov z18.b, #0x0\n"
"mov z19.b, #0x0\n"
- "addvl x22, x22, #2\n"
+ "addvl x20, x20, #2\n"
"mov z20.b, #0x0\n"
"mov z21.b, #0x0\n"
"add %x[Apanel], %x[Apanel], #0x30\n"
@@ -87,143 +87,143 @@ void sve_interleaved_bf16fp32_mmla_8x3VL(
"3:" // main loop head
"ld1rqh { z3.h }, p0/Z, [%x[Apanel]]\n"
".inst 0x6464e408 // bfmmla z8.s, z0.h, z4.h\n"
- ".inst 0x6465e40b // bfmmla z11.s, z0.h, z5.h\n"
".inst 0x6464e42e // bfmmla z14.s, z1.h, z4.h\n"
+ ".inst 0x6465e40b // bfmmla z11.s, z0.h, z5.h\n"
".inst 0x6465e431 // bfmmla z17.s, z1.h, z5.h\n"
- "ld1h { z6.h }, p0/Z, [x22]\n"
+ "ld1h { z6.h }, p0/Z, [x20]\n"
".inst 0x6464e454 // bfmmla z20.s, z2.h, z4.h\n"
".inst 0x6465e457 // bfmmla z23.s, z2.h, z5.h\n"
- "ld1h { z7.h }, p0/Z, [x22, #1, MUL VL]\n"
+ "ld1h { z7.h }, p0/Z, [x20, #1, MUL VL]\n"
".inst 0x6464e47a // bfmmla z26.s, z3.h, z4.h\n"
".inst 0x6465e47d // bfmmla z29.s, z3.h, z5.h\n"
- "ld1h { z4.h }, p0/Z, [x22, #2, MUL VL]\n"
- "ld1h { z5.h }, p0/Z, [x22, #3, MUL VL]\n"
+ "ld1h { z4.h }, p0/Z, [x20, #2, MUL VL]\n"
+ "ld1h { z5.h }, p0/Z, [x20, #3, MUL VL]\n"
".inst 0x6466e409 // bfmmla z9.s, z0.h, z6.h\n"
- ".inst 0x6467e40c // bfmmla z12.s, z0.h, z7.h\n"
".inst 0x6466e42f // bfmmla z15.s, z1.h, z6.h\n"
- ".inst 0x6467e432 // bfmmla z18.s, z1.h, z7.h\n"
- "sub x20, x20, #0x2\n"
".inst 0x6466e455 // bfmmla z21.s, z2.h, z6.h\n"
- ".inst 0x6467e458 // bfmmla z24.s, z2.h, z7.h\n"
- "cmp x20, #0x2\n"
".inst 0x6466e47b // bfmmla z27.s, z3.h, z6.h\n"
- ".inst 0x6467e47e // bfmmla z30.s, z3.h, z7.h\n"
- "ld1h { z6.h }, p0/Z, [x22, #4, MUL VL]\n"
+ "ld1h { z6.h }, p0/Z, [x20, #4, MUL VL]\n"
+ ".inst 0x6467e40c // bfmmla z12.s, z0.h, z7.h\n"
".inst 0x6464e40a // bfmmla z10.s, z0.h, z4.h\n"
+ "sub x19, x19, #0x2\n"
".inst 0x6465e40d // bfmmla z13.s, z0.h, z5.h\n"
+ ".inst 0x6467e432 // bfmmla z18.s, z1.h, z7.h\n"
"ld1rqh { z0.h }, p0/Z, [%x[Apanel], #16]\n"
".inst 0x6464e430 // bfmmla z16.s, z1.h, z4.h\n"
".inst 0x6465e433 // bfmmla z19.s, z1.h, z5.h\n"
"ld1rqh { z1.h }, p0/Z, [%x[Apanel], #32]\n"
+ ".inst 0x6467e458 // bfmmla z24.s, z2.h, z7.h\n"
+ ".inst 0x6467e47e // bfmmla z30.s, z3.h, z7.h\n"
+ "ld1h { z7.h }, p0/Z, [x20, #5, MUL VL]\n"
".inst 0x6464e456 // bfmmla z22.s, z2.h, z4.h\n"
".inst 0x6465e459 // bfmmla z25.s, z2.h, z5.h\n"
- "ld1h { z7.h }, p0/Z, [x22, #5, MUL VL]\n"
+ "ld1rqh { z2.h }, p0/Z, [%x[Apanel], #48]\n"
".inst 0x6464e47c // bfmmla z28.s, z3.h, z4.h\n"
".inst 0x6465e47f // bfmmla z31.s, z3.h, z5.h\n"
- "ld1rqh { z2.h }, p0/Z, [%x[Apanel], #48]\n"
"ld1rqh { z3.h }, p0/Z, [%x[Apanel], #64]\n"
- "ld1h { z4.h }, p0/Z, [x22, #6, MUL VL]\n"
+ "ld1h { z4.h }, p0/Z, [x20, #6, MUL VL]\n"
+ "ld1h { z5.h }, p0/Z, [x20, #7, MUL VL]\n"
+ "addvl x20, x20, #16\n"
".inst 0x6466e408 // bfmmla z8.s, z0.h, z6.h\n"
- "ld1h { z5.h }, p0/Z, [x22, #7, MUL VL]\n"
- "addvl x22, x22, #16\n"
- ".inst 0x6467e40b // bfmmla z11.s, z0.h, z7.h\n"
".inst 0x6466e42e // bfmmla z14.s, z1.h, z6.h\n"
+ "cmp x19, #0x2\n"
+ ".inst 0x6467e40b // bfmmla z11.s, z0.h, z7.h\n"
".inst 0x6467e431 // bfmmla z17.s, z1.h, z7.h\n"
".inst 0x6466e454 // bfmmla z20.s, z2.h, z6.h\n"
".inst 0x6467e457 // bfmmla z23.s, z2.h, z7.h\n"
".inst 0x6466e47a // bfmmla z26.s, z3.h, z6.h\n"
".inst 0x6467e47d // bfmmla z29.s, z3.h, z7.h\n"
- "ld1h { z6.h }, p0/Z, [x22, #-8, MUL VL]\n"
- "ld1h { z7.h }, p0/Z, [x22, #-7, MUL VL]\n"
+ "ld1h { z6.h }, p0/Z, [x20, #-8, MUL VL]\n"
+ "ld1h { z7.h }, p0/Z, [x20, #-7, MUL VL]\n"
".inst 0x6464e409 // bfmmla z9.s, z0.h, z4.h\n"
- ".inst 0x6465e40c // bfmmla z12.s, z0.h, z5.h\n"
".inst 0x6464e42f // bfmmla z15.s, z1.h, z4.h\n"
- ".inst 0x6465e432 // bfmmla z18.s, z1.h, z5.h\n"
".inst 0x6464e455 // bfmmla z21.s, z2.h, z4.h\n"
- ".inst 0x6465e458 // bfmmla z24.s, z2.h, z5.h\n"
".inst 0x6464e47b // bfmmla z27.s, z3.h, z4.h\n"
- ".inst 0x6465e47e // bfmmla z30.s, z3.h, z5.h\n"
- "ld1h { z4.h }, p0/Z, [x22, #-6, MUL VL]\n"
+ "ld1h { z4.h }, p0/Z, [x20, #-6, MUL VL]\n"
+ ".inst 0x6465e40c // bfmmla z12.s, z0.h, z5.h\n"
".inst 0x6466e40a // bfmmla z10.s, z0.h, z6.h\n"
".inst 0x6467e40d // bfmmla z13.s, z0.h, z7.h\n"
+ ".inst 0x6465e432 // bfmmla z18.s, z1.h, z5.h\n"
"ld1rqh { z0.h }, p0/Z, [%x[Apanel], #80]\n"
".inst 0x6466e430 // bfmmla z16.s, z1.h, z6.h\n"
".inst 0x6467e433 // bfmmla z19.s, z1.h, z7.h\n"
"ld1rqh { z1.h }, p0/Z, [%x[Apanel], #96]\n"
+ ".inst 0x6465e458 // bfmmla z24.s, z2.h, z5.h\n"
+ ".inst 0x6465e47e // bfmmla z30.s, z3.h, z5.h\n"
+ "ld1h { z5.h }, p0/Z, [x20, #-5, MUL VL]\n"
".inst 0x6466e456 // bfmmla z22.s, z2.h, z6.h\n"
".inst 0x6467e459 // bfmmla z25.s, z2.h, z7.h\n"
- "ld1h { z5.h }, p0/Z, [x22, #-5, MUL VL]\n"
+ "ld1rqh { z2.h }, p0/Z, [%x[Apanel], #112]\n"
".inst 0x6466e47c // bfmmla z28.s, z3.h, z6.h\n"
".inst 0x6467e47f // bfmmla z31.s, z3.h, z7.h\n"
- "ld1rqh { z2.h }, p0/Z, [%x[Apanel], #112]\n"
"add %x[Apanel], %x[Apanel], #0x80\n"
- "addvl x22, x22, #-4\n"
+ "addvl x20, x20, #-4\n"
"bge 3b\n"
"4:" // main loop skip
"ld1rqh { z3.h }, p0/Z, [%x[Apanel]]\n"
".inst 0x6464e408 // bfmmla z8.s, z0.h, z4.h\n"
- ".inst 0x6465e40b // bfmmla z11.s, z0.h, z5.h\n"
".inst 0x6464e42e // bfmmla z14.s, z1.h, z4.h\n"
+ ".inst 0x6465e40b // bfmmla z11.s, z0.h, z5.h\n"
".inst 0x6465e431 // bfmmla z17.s, z1.h, z5.h\n"
- "ld1h { z6.h }, p0/Z, [x22]\n"
+ "ld1h { z6.h }, p0/Z, [x20]\n"
".inst 0x6464e454 // bfmmla z20.s, z2.h, z4.h\n"
".inst 0x6465e457 // bfmmla z23.s, z2.h, z5.h\n"
- "ld1h { z7.h }, p0/Z, [x22, #1, MUL VL]\n"
+ "ld1h { z7.h }, p0/Z, [x20, #1, MUL VL]\n"
".inst 0x6464e47a // bfmmla z26.s, z3.h, z4.h\n"
".inst 0x6465e47d // bfmmla z29.s, z3.h, z5.h\n"
- "ld1h { z4.h }, p0/Z, [x22, #2, MUL VL]\n"
- "ld1h { z5.h }, p0/Z, [x22, #3, MUL VL]\n"
+ "ld1h { z4.h }, p0/Z, [x20, #2, MUL VL]\n"
+ "ld1h { z5.h }, p0/Z, [x20, #3, MUL VL]\n"
".inst 0x6466e409 // bfmmla z9.s, z0.h, z6.h\n"
- ".inst 0x6467e40c // bfmmla z12.s, z0.h, z7.h\n"
".inst 0x6466e42f // bfmmla z15.s, z1.h, z6.h\n"
- ".inst 0x6467e432 // bfmmla z18.s, z1.h, z7.h\n"
- "add %x[Apanel], %x[Apanel], #0x10\n"
".inst 0x6466e455 // bfmmla z21.s, z2.h, z6.h\n"
- ".inst 0x6467e458 // bfmmla z24.s, z2.h, z7.h\n"
- "addvl x22, x22, #4\n"
".inst 0x6466e47b // bfmmla z27.s, z3.h, z6.h\n"
- ".inst 0x6467e47e // bfmmla z30.s, z3.h, z7.h\n"
+ "add %x[Apanel], %x[Apanel], #0x10\n"
+ ".inst 0x6467e40c // bfmmla z12.s, z0.h, z7.h\n"
".inst 0x6464e40a // bfmmla z10.s, z0.h, z4.h\n"
+ "addvl x20, x20, #4\n"
".inst 0x6465e40d // bfmmla z13.s, z0.h, z5.h\n"
+ ".inst 0x6467e432 // bfmmla z18.s, z1.h, z7.h\n"
".inst 0x6464e430 // bfmmla z16.s, z1.h, z4.h\n"
".inst 0x6465e433 // bfmmla z19.s, z1.h, z5.h\n"
+ ".inst 0x6467e458 // bfmmla z24.s, z2.h, z7.h\n"
+ ".inst 0x6467e47e // bfmmla z30.s, z3.h, z7.h\n"
".inst 0x6464e456 // bfmmla z22.s, z2.h, z4.h\n"
".inst 0x6465e459 // bfmmla z25.s, z2.h, z5.h\n"
".inst 0x6464e47c // bfmmla z28.s, z3.h, z4.h\n"
".inst 0x6465e47f // bfmmla z31.s, z3.h, z5.h\n"
- "cbz x20, 5f\n"
- "ld1h { z6.h }, p0/Z, [x22]\n"
+ "cbz x19, 5f\n"
+ "ld1h { z6.h }, p0/Z, [x20]\n"
"ld1rqh { z0.h }, p0/Z, [%x[Apanel]]\n"
".inst 0x6466e408 // bfmmla z8.s, z0.h, z6.h\n"
"ld1rqh { z1.h }, p0/Z, [%x[Apanel], #16]\n"
- "ld1h { z7.h }, p0/Z, [x22, #1, MUL VL]\n"
- ".inst 0x6467e40b // bfmmla z11.s, z0.h, z7.h\n"
+ "ld1h { z7.h }, p0/Z, [x20, #1, MUL VL]\n"
+ ".inst 0x6466e42e // bfmmla z14.s, z1.h, z6.h\n"
"ld1rqh { z2.h }, p0/Z, [%x[Apanel], #32]\n"
"ld1rqh { z3.h }, p0/Z, [%x[Apanel], #48]\n"
- ".inst 0x6466e42e // bfmmla z14.s, z1.h, z6.h\n"
+ ".inst 0x6467e40b // bfmmla z11.s, z0.h, z7.h\n"
".inst 0x6467e431 // bfmmla z17.s, z1.h, z7.h\n"
".inst 0x6466e454 // bfmmla z20.s, z2.h, z6.h\n"
- "ld1h { z4.h }, p0/Z, [x22, #2, MUL VL]\n"
+ "ld1h { z4.h }, p0/Z, [x20, #2, MUL VL]\n"
".inst 0x6467e457 // bfmmla z23.s, z2.h, z7.h\n"
".inst 0x6466e47a // bfmmla z26.s, z3.h, z6.h\n"
- "ld1h { z5.h }, p0/Z, [x22, #3, MUL VL]\n"
+ "ld1h { z5.h }, p0/Z, [x20, #3, MUL VL]\n"
".inst 0x6467e47d // bfmmla z29.s, z3.h, z7.h\n"
- "ld1h { z6.h }, p0/Z, [x22, #4, MUL VL]\n"
- "ld1h { z7.h }, p0/Z, [x22, #5, MUL VL]\n"
+ "ld1h { z6.h }, p0/Z, [x20, #4, MUL VL]\n"
+ "ld1h { z7.h }, p0/Z, [x20, #5, MUL VL]\n"
".inst 0x6464e409 // bfmmla z9.s, z0.h, z4.h\n"
- ".inst 0x6465e40c // bfmmla z12.s, z0.h, z5.h\n"
- "addvl x22, x22, #6\n"
".inst 0x6464e42f // bfmmla z15.s, z1.h, z4.h\n"
- ".inst 0x6465e432 // bfmmla z18.s, z1.h, z5.h\n"
"add %x[Apanel], %x[Apanel], #0x40\n"
".inst 0x6464e455 // bfmmla z21.s, z2.h, z4.h\n"
- ".inst 0x6465e458 // bfmmla z24.s, z2.h, z5.h\n"
".inst 0x6464e47b // bfmmla z27.s, z3.h, z4.h\n"
- ".inst 0x6465e47e // bfmmla z30.s, z3.h, z5.h\n"
+ "addvl x20, x20, #6\n"
+ ".inst 0x6465e40c // bfmmla z12.s, z0.h, z5.h\n"
".inst 0x6466e40a // bfmmla z10.s, z0.h, z6.h\n"
".inst 0x6467e40d // bfmmla z13.s, z0.h, z7.h\n"
+ ".inst 0x6465e432 // bfmmla z18.s, z1.h, z5.h\n"
".inst 0x6466e430 // bfmmla z16.s, z1.h, z6.h\n"
".inst 0x6467e433 // bfmmla z19.s, z1.h, z7.h\n"
+ ".inst 0x6465e458 // bfmmla z24.s, z2.h, z5.h\n"
+ ".inst 0x6465e47e // bfmmla z30.s, z3.h, z5.h\n"
".inst 0x6466e456 // bfmmla z22.s, z2.h, z6.h\n"
".inst 0x6467e459 // bfmmla z25.s, z2.h, z7.h\n"
".inst 0x6466e47c // bfmmla z28.s, z3.h, z6.h\n"
@@ -243,7 +243,7 @@ void sve_interleaved_bf16fp32_mmla_8x3VL(
"uzp2 z14.d, z14.d, z17.d\n"
"st1w { z9.s }, p0, [%x[Cpanel], #4, MUL VL]\n"
"uzp1 z17.d, z15.d, z18.d\n"
- "subs x23, x23, #0x1\n"
+ "subs x22, x22, #0x1\n"
"st1w { z10.s }, p0, [%x[Cpanel], #5, MUL VL]\n"
"uzp2 z15.d, z15.d, z18.d\n"
"uzp1 z18.d, z16.d, z19.d\n"
@@ -285,7 +285,7 @@ void sve_interleaved_bf16fp32_mmla_8x3VL(
"bne 1b\n"
: [Apanel] "+&r" (Apanel), [Cpanel] "+&r" (Cpanel), [ablocks] "+&r" (ablocks)
: [args_ptr] "r" (&ka), [offsetof_Bpanel] "I" (offsetof(KernelArgs, Bpanel)), [offsetof_K] "I" (offsetof(KernelArgs, K)), [offsetof_bblocks] "I" (offsetof(KernelArgs, bblocks))
- : "cc", "memory", "p0", "x20", "x21", "x22", "x23", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "x19", "x20", "x21", "x22", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp16_mla_8x3VL/a64fx.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp16_mla_8x3VL/a64fx.cpp
index 9287509889..602634706e 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp16_mla_8x3VL/a64fx.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp16_mla_8x3VL/a64fx.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef ARM_COMPUTE_ENABLE_SVE
@@ -32,34 +32,34 @@ void sve_interleaved_fp16_mla_8x3VL_a64fx(
__fp16 *Cpanel, int ablocks, int bblocks, int K) {
struct KernelArgs {
+ size_t bblocks = {};
size_t K = {};
const __fp16 *Bpanel = {};
- size_t bblocks = {};
} ka;
+ ka.bblocks = bblocks;
ka.K = (K/1) - 1;
ka.Bpanel = Bpanel;
- ka.bblocks = bblocks;
__asm__ __volatile__(
"ptrue p0.b\n"
"1:" // Height loop
- "ldr x23, [%x[args_ptr], %[offsetof_bblocks]]\n"
- "ldr x22, [%x[args_ptr], %[offsetof_Bpanel]]\n"
+ "ldr x22, [%x[args_ptr], %[offsetof_bblocks]]\n"
"mov x21, %x[Apanel]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_Bpanel]]\n"
"2:" // Width loop
- "ldr x20, [%x[args_ptr], %[offsetof_K]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_K]]\n"
"mov %x[Apanel], x21\n"
- "cmp x20, #0x2\n"
+ "cmp x19, #0x2\n"
"mov z8.b, #0x0\n"
"mov z9.b, #0x0\n"
- "ld1h { z0.h }, p0/Z, [x22]\n"
+ "ld1h { z0.h }, p0/Z, [x20]\n"
"mov z10.b, #0x0\n"
"mov z11.b, #0x0\n"
- "ld1h { z1.h }, p0/Z, [x22, #1, MUL VL]\n"
+ "ld1h { z1.h }, p0/Z, [x20, #1, MUL VL]\n"
"mov z12.b, #0x0\n"
"mov z13.b, #0x0\n"
- "ld1h { z2.h }, p0/Z, [x22, #2, MUL VL]\n"
+ "ld1h { z2.h }, p0/Z, [x20, #2, MUL VL]\n"
"mov z14.b, #0x0\n"
"mov z15.b, #0x0\n"
"ld1rh { z3.h }, p0/Z, [%x[Apanel]]\n"
@@ -86,7 +86,7 @@ void sve_interleaved_fp16_mla_8x3VL_a64fx(
"3:" // main loop head
"fmla z8.h, p0/M, z0.h, z3.h\n"
"fmla z9.h, p0/M, z1.h, z3.h\n"
- "sub x20, x20, #0x2\n"
+ "sub x19, x19, #0x2\n"
"fmla z10.h, p0/M, z2.h, z3.h\n"
"ld1rh { z3.h }, p0/Z, [%x[Apanel], #8]\n"
"fmla z11.h, p0/M, z0.h, z4.h\n"
@@ -95,7 +95,7 @@ void sve_interleaved_fp16_mla_8x3VL_a64fx(
"ld1rh { z4.h }, p0/Z, [%x[Apanel], #10]\n"
"fmla z14.h, p0/M, z0.h, z5.h\n"
"fmla z15.h, p0/M, z1.h, z5.h\n"
- "cmp x20, #0x2\n"
+ "cmp x19, #0x2\n"
"fmla z16.h, p0/M, z2.h, z5.h\n"
"ld1rh { z5.h }, p0/Z, [%x[Apanel], #12]\n"
"fmla z17.h, p0/M, z0.h, z6.h\n"
@@ -115,11 +115,11 @@ void sve_interleaved_fp16_mla_8x3VL_a64fx(
"fmla z28.h, p0/M, z2.h, z5.h\n"
"ld1rh { z5.h }, p0/Z, [%x[Apanel], #20]\n"
"fmla z29.h, p0/M, z0.h, z6.h\n"
- "ld1h { z0.h }, p0/Z, [x22, #3, MUL VL]\n"
+ "ld1h { z0.h }, p0/Z, [x20, #3, MUL VL]\n"
"fmla z30.h, p0/M, z1.h, z6.h\n"
"fmla z31.h, p0/M, z2.h, z6.h\n"
- "ld1h { z1.h }, p0/Z, [x22, #4, MUL VL]\n"
- "ld1h { z2.h }, p0/Z, [x22, #5, MUL VL]\n"
+ "ld1h { z1.h }, p0/Z, [x20, #4, MUL VL]\n"
+ "ld1h { z2.h }, p0/Z, [x20, #5, MUL VL]\n"
"fmla z8.h, p0/M, z0.h, z3.h\n"
"ld1rh { z6.h }, p0/Z, [%x[Apanel], #22]\n"
"fmla z9.h, p0/M, z1.h, z3.h\n"
@@ -131,7 +131,7 @@ void sve_interleaved_fp16_mla_8x3VL_a64fx(
"ld1rh { z4.h }, p0/Z, [%x[Apanel], #26]\n"
"fmla z14.h, p0/M, z0.h, z5.h\n"
"fmla z15.h, p0/M, z1.h, z5.h\n"
- "addvl x22, x22, #6\n"
+ "addvl x20, x20, #6\n"
"fmla z16.h, p0/M, z2.h, z5.h\n"
"ld1rh { z5.h }, p0/Z, [%x[Apanel], #28]\n"
"fmla z17.h, p0/M, z0.h, z6.h\n"
@@ -151,18 +151,18 @@ void sve_interleaved_fp16_mla_8x3VL_a64fx(
"fmla z27.h, p0/M, z1.h, z5.h\n"
"fmla z28.h, p0/M, z2.h, z5.h\n"
"fmla z29.h, p0/M, z0.h, z6.h\n"
- "ld1h { z0.h }, p0/Z, [x22]\n"
+ "ld1h { z0.h }, p0/Z, [x20]\n"
"fmla z30.h, p0/M, z1.h, z6.h\n"
"fmla z31.h, p0/M, z2.h, z6.h\n"
- "ld1h { z1.h }, p0/Z, [x22, #1, MUL VL]\n"
- "ld1h { z2.h }, p0/Z, [x22, #2, MUL VL]\n"
+ "ld1h { z1.h }, p0/Z, [x20, #1, MUL VL]\n"
+ "ld1h { z2.h }, p0/Z, [x20, #2, MUL VL]\n"
"ld1rh { z5.h }, p0/Z, [%x[Apanel], #4]\n"
"ld1rh { z6.h }, p0/Z, [%x[Apanel], #6]\n"
"bge 3b\n"
"4:" // main loop skip
"fmla z8.h, p0/M, z0.h, z3.h\n"
"fmla z9.h, p0/M, z1.h, z3.h\n"
- "addvl x22, x22, #3\n"
+ "addvl x20, x20, #3\n"
"fmla z10.h, p0/M, z2.h, z3.h\n"
"ld1rh { z3.h }, p0/Z, [%x[Apanel], #8]\n"
"fmla z11.h, p0/M, z0.h, z4.h\n"
@@ -190,10 +190,10 @@ void sve_interleaved_fp16_mla_8x3VL_a64fx(
"fmla z29.h, p0/M, z0.h, z6.h\n"
"fmla z30.h, p0/M, z1.h, z6.h\n"
"fmla z31.h, p0/M, z2.h, z6.h\n"
- "cbz x20, 5f\n"
- "ld1h { z0.h }, p0/Z, [x22]\n"
- "ld1h { z1.h }, p0/Z, [x22, #1, MUL VL]\n"
- "ld1h { z2.h }, p0/Z, [x22, #2, MUL VL]\n"
+ "cbz x19, 5f\n"
+ "ld1h { z0.h }, p0/Z, [x20]\n"
+ "ld1h { z1.h }, p0/Z, [x20, #1, MUL VL]\n"
+ "ld1h { z2.h }, p0/Z, [x20, #2, MUL VL]\n"
"ld1rh { z3.h }, p0/Z, [%x[Apanel]]\n"
"fmla z8.h, p0/M, z0.h, z3.h\n"
"ld1rh { z4.h }, p0/Z, [%x[Apanel], #2]\n"
@@ -202,24 +202,24 @@ void sve_interleaved_fp16_mla_8x3VL_a64fx(
"ld1rh { z6.h }, p0/Z, [%x[Apanel], #6]\n"
"fmla z10.h, p0/M, z2.h, z3.h\n"
"fmla z11.h, p0/M, z0.h, z4.h\n"
+ "ld1rh { z3.h }, p0/Z, [%x[Apanel], #8]\n"
"fmla z12.h, p0/M, z1.h, z4.h\n"
"fmla z13.h, p0/M, z2.h, z4.h\n"
- "ld1rh { z3.h }, p0/Z, [%x[Apanel], #8]\n"
+ "ld1rh { z4.h }, p0/Z, [%x[Apanel], #10]\n"
"fmla z14.h, p0/M, z0.h, z5.h\n"
"fmla z15.h, p0/M, z1.h, z5.h\n"
- "ld1rh { z4.h }, p0/Z, [%x[Apanel], #10]\n"
"fmla z16.h, p0/M, z2.h, z5.h\n"
"fmla z17.h, p0/M, z0.h, z6.h\n"
"ld1rh { z5.h }, p0/Z, [%x[Apanel], #12]\n"
"fmla z18.h, p0/M, z1.h, z6.h\n"
"fmla z19.h, p0/M, z2.h, z6.h\n"
"ld1rh { z6.h }, p0/Z, [%x[Apanel], #14]\n"
+ "addvl x20, x20, #3\n"
"fmla z20.h, p0/M, z0.h, z3.h\n"
"fmla z21.h, p0/M, z1.h, z3.h\n"
- "addvl x22, x22, #3\n"
+ "add %x[Apanel], %x[Apanel], #0x10\n"
"fmla z22.h, p0/M, z2.h, z3.h\n"
"fmla z23.h, p0/M, z0.h, z4.h\n"
- "add %x[Apanel], %x[Apanel], #0x10\n"
"fmla z24.h, p0/M, z1.h, z4.h\n"
"fmla z25.h, p0/M, z2.h, z4.h\n"
"fmla z26.h, p0/M, z0.h, z5.h\n"
@@ -230,7 +230,7 @@ void sve_interleaved_fp16_mla_8x3VL_a64fx(
"fmla z31.h, p0/M, z2.h, z6.h\n"
"5:" // multiply loop done
"st1h { z8.h }, p0, [%x[Cpanel]]\n"
- "subs x23, x23, #0x1\n"
+ "subs x22, x22, #0x1\n"
"st1h { z9.h }, p0, [%x[Cpanel], #1, MUL VL]\n"
"st1h { z10.h }, p0, [%x[Cpanel], #2, MUL VL]\n"
"st1h { z11.h }, p0, [%x[Cpanel], #3, MUL VL]\n"
@@ -261,7 +261,7 @@ void sve_interleaved_fp16_mla_8x3VL_a64fx(
"bne 1b\n"
: [Apanel] "+&r" (Apanel), [Cpanel] "+&r" (Cpanel), [ablocks] "+&r" (ablocks)
: [args_ptr] "r" (&ka), [offsetof_Bpanel] "I" (offsetof(KernelArgs, Bpanel)), [offsetof_K] "I" (offsetof(KernelArgs, K)), [offsetof_bblocks] "I" (offsetof(KernelArgs, bblocks))
- : "cc", "memory", "p0", "x20", "x21", "x22", "x23", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "x19", "x20", "x21", "x22", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp16_mla_8x3VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp16_mla_8x3VL/generic.cpp
index 1ac2ac075e..f8e4b89b95 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp16_mla_8x3VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp16_mla_8x3VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef ARM_COMPUTE_ENABLE_SVE
@@ -32,41 +32,39 @@ void sve_interleaved_fp16_mla_8x3VL(
__fp16 *Cpanel, int ablocks, int bblocks, int K) {
struct KernelArgs {
+ size_t bblocks = {};
size_t K = {};
const __fp16 *Bpanel = {};
- size_t bblocks = {};
} ka;
+ ka.bblocks = bblocks;
ka.K = (K/1) - 1;
ka.Bpanel = Bpanel;
- ka.bblocks = bblocks;
__asm__ __volatile__(
"ptrue p0.b\n"
"1:" // Height loop
- "ldr x23, [%x[args_ptr], %[offsetof_bblocks]]\n"
- "ldr x22, [%x[args_ptr], %[offsetof_Bpanel]]\n"
+ "ldr x22, [%x[args_ptr], %[offsetof_bblocks]]\n"
"mov x21, %x[Apanel]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_Bpanel]]\n"
"2:" // Width loop
- "ldr x20, [%x[args_ptr], %[offsetof_K]]\n"
- "mov %x[Apanel], x21\n"
- "cmp x20, #0x2\n"
"mov z8.b, #0x0\n"
"mov z9.b, #0x0\n"
- "ld1rqh { z0.h }, p0/Z, [%x[Apanel]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_K]]\n"
"mov z10.b, #0x0\n"
"mov z11.b, #0x0\n"
- "ld1h { z2.h }, p0/Z, [x22]\n"
+ "ld1h { z2.h }, p0/Z, [x20]\n"
"mov z12.b, #0x0\n"
"mov z13.b, #0x0\n"
- "ld1h { z3.h }, p0/Z, [x22, #1, MUL VL]\n"
+ "mov %x[Apanel], x21\n"
"mov z14.b, #0x0\n"
"mov z15.b, #0x0\n"
- "ld1h { z4.h }, p0/Z, [x22, #2, MUL VL]\n"
+ "cmp x19, #0x2\n"
"mov z16.b, #0x0\n"
"mov z17.b, #0x0\n"
"mov z18.b, #0x0\n"
"mov z19.b, #0x0\n"
+ "ld1rqh { z0.h }, p0/Z, [%x[Apanel]]\n"
"mov z20.b, #0x0\n"
"mov z21.b, #0x0\n"
"mov z22.b, #0x0\n"
@@ -83,34 +81,34 @@ void sve_interleaved_fp16_mla_8x3VL(
"3:" // main loop head
"fmla z8.h, z2.h, z0.h[0]\n"
"fmla z11.h, z2.h, z0.h[1]\n"
- "ld1rqh { z1.h }, p0/Z, [%x[Apanel], #16]\n"
+ "ld1h { z3.h }, p0/Z, [x20, #1, MUL VL]\n"
"fmla z14.h, z2.h, z0.h[2]\n"
"fmla z17.h, z2.h, z0.h[3]\n"
- "ld1h { z5.h }, p0/Z, [x22, #3, MUL VL]\n"
+ "ld1h { z4.h }, p0/Z, [x20, #2, MUL VL]\n"
"fmla z20.h, z2.h, z0.h[4]\n"
"fmla z23.h, z2.h, z0.h[5]\n"
- "ld1h { z6.h }, p0/Z, [x22, #4, MUL VL]\n"
+ "ld1rqh { z1.h }, p0/Z, [%x[Apanel], #16]\n"
"fmla z26.h, z2.h, z0.h[6]\n"
"fmla z29.h, z2.h, z0.h[7]\n"
- "ld1h { z7.h }, p0/Z, [x22, #5, MUL VL]\n"
+ "ld1h { z5.h }, p0/Z, [x20, #3, MUL VL]\n"
"fmla z9.h, z3.h, z0.h[0]\n"
"fmla z12.h, z3.h, z0.h[1]\n"
- "addvl x22, x22, #6\n"
+ "ld1h { z6.h }, p0/Z, [x20, #4, MUL VL]\n"
"fmla z15.h, z3.h, z0.h[2]\n"
"fmla z18.h, z3.h, z0.h[3]\n"
- "sub x20, x20, #0x2\n"
+ "ld1h { z7.h }, p0/Z, [x20, #5, MUL VL]\n"
"fmla z21.h, z3.h, z0.h[4]\n"
"fmla z24.h, z3.h, z0.h[5]\n"
- "cmp x20, #0x2\n"
+ "sub x19, x19, #0x2\n"
"fmla z27.h, z3.h, z0.h[6]\n"
"fmla z30.h, z3.h, z0.h[7]\n"
- "add %x[Apanel], %x[Apanel], #0x20\n"
+ "cmp x19, #0x2\n"
"fmla z10.h, z4.h, z0.h[0]\n"
"fmla z13.h, z4.h, z0.h[1]\n"
- "ld1h { z2.h }, p0/Z, [x22]\n"
+ "add %x[Apanel], %x[Apanel], #0x20\n"
"fmla z16.h, z4.h, z0.h[2]\n"
"fmla z19.h, z4.h, z0.h[3]\n"
- "ld1h { z3.h }, p0/Z, [x22, #1, MUL VL]\n"
+ "addvl x20, x20, #6\n"
"fmla z22.h, z4.h, z0.h[4]\n"
"fmla z25.h, z4.h, z0.h[5]\n"
"fmla z28.h, z4.h, z0.h[6]\n"
@@ -118,7 +116,7 @@ void sve_interleaved_fp16_mla_8x3VL(
"ld1rqh { z0.h }, p0/Z, [%x[Apanel]]\n"
"fmla z8.h, z5.h, z1.h[0]\n"
"fmla z11.h, z5.h, z1.h[1]\n"
- "ld1h { z4.h }, p0/Z, [x22, #2, MUL VL]\n"
+ "ld1h { z2.h }, p0/Z, [x20]\n"
"fmla z14.h, z5.h, z1.h[2]\n"
"fmla z17.h, z5.h, z1.h[3]\n"
"fmla z20.h, z5.h, z1.h[4]\n"
@@ -145,14 +143,16 @@ void sve_interleaved_fp16_mla_8x3VL(
"4:" // main loop skip
"fmla z8.h, z2.h, z0.h[0]\n"
"fmla z11.h, z2.h, z0.h[1]\n"
- "add %x[Apanel], %x[Apanel], #0x10\n"
+ "ld1h { z3.h }, p0/Z, [x20, #1, MUL VL]\n"
"fmla z14.h, z2.h, z0.h[2]\n"
"fmla z17.h, z2.h, z0.h[3]\n"
- "addvl x22, x22, #3\n"
+ "ld1h { z4.h }, p0/Z, [x20, #2, MUL VL]\n"
"fmla z20.h, z2.h, z0.h[4]\n"
"fmla z23.h, z2.h, z0.h[5]\n"
+ "add %x[Apanel], %x[Apanel], #0x10\n"
"fmla z26.h, z2.h, z0.h[6]\n"
"fmla z29.h, z2.h, z0.h[7]\n"
+ "addvl x20, x20, #3\n"
"fmla z9.h, z3.h, z0.h[0]\n"
"fmla z12.h, z3.h, z0.h[1]\n"
"fmla z15.h, z3.h, z0.h[2]\n"
@@ -169,19 +169,19 @@ void sve_interleaved_fp16_mla_8x3VL(
"fmla z25.h, z4.h, z0.h[5]\n"
"fmla z28.h, z4.h, z0.h[6]\n"
"fmla z31.h, z4.h, z0.h[7]\n"
- "cbz x20, 5f\n"
+ "cbz x19, 5f\n"
"ld1rqh { z0.h }, p0/Z, [%x[Apanel]]\n"
- "ld1h { z5.h }, p0/Z, [x22]\n"
+ "add %x[Apanel], %x[Apanel], #0x10\n"
+ "ld1h { z5.h }, p0/Z, [x20]\n"
+ "ld1h { z6.h }, p0/Z, [x20, #1, MUL VL]\n"
+ "ld1h { z7.h }, p0/Z, [x20, #2, MUL VL]\n"
+ "addvl x20, x20, #3\n"
"fmla z8.h, z5.h, z0.h[0]\n"
- "ld1h { z6.h }, p0/Z, [x22, #1, MUL VL]\n"
- "ld1h { z7.h }, p0/Z, [x22, #2, MUL VL]\n"
"fmla z11.h, z5.h, z0.h[1]\n"
"fmla z14.h, z5.h, z0.h[2]\n"
"fmla z17.h, z5.h, z0.h[3]\n"
- "add %x[Apanel], %x[Apanel], #0x10\n"
"fmla z20.h, z5.h, z0.h[4]\n"
"fmla z23.h, z5.h, z0.h[5]\n"
- "addvl x22, x22, #3\n"
"fmla z26.h, z5.h, z0.h[6]\n"
"fmla z29.h, z5.h, z0.h[7]\n"
"fmla z9.h, z6.h, z0.h[0]\n"
@@ -202,7 +202,7 @@ void sve_interleaved_fp16_mla_8x3VL(
"fmla z31.h, z7.h, z0.h[7]\n"
"5:" // multiply loop done
"st1h { z8.h }, p0, [%x[Cpanel]]\n"
- "subs x23, x23, #0x1\n"
+ "subs x22, x22, #0x1\n"
"st1h { z9.h }, p0, [%x[Cpanel], #1, MUL VL]\n"
"st1h { z10.h }, p0, [%x[Cpanel], #2, MUL VL]\n"
"st1h { z11.h }, p0, [%x[Cpanel], #3, MUL VL]\n"
@@ -233,7 +233,7 @@ void sve_interleaved_fp16_mla_8x3VL(
"bne 1b\n"
: [Apanel] "+&r" (Apanel), [Cpanel] "+&r" (Cpanel), [ablocks] "+&r" (ablocks)
: [args_ptr] "r" (&ka), [offsetof_Bpanel] "I" (offsetof(KernelArgs, Bpanel)), [offsetof_K] "I" (offsetof(KernelArgs, K)), [offsetof_bblocks] "I" (offsetof(KernelArgs, bblocks))
- : "cc", "memory", "p0", "x20", "x21", "x22", "x23", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "x19", "x20", "x21", "x22", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp32_mla_8x3VL/a64fx.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp32_mla_8x3VL/a64fx.cpp
index 3141a258a8..6defe0e223 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp32_mla_8x3VL/a64fx.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp32_mla_8x3VL/a64fx.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef ARM_COMPUTE_ENABLE_SVE
@@ -32,34 +32,34 @@ void sve_interleaved_fp32_mla_8x3VL_a64fx(
float *Cpanel, int ablocks, int bblocks, int K) {
struct KernelArgs {
+ size_t bblocks = {};
size_t K = {};
const float *Bpanel = {};
- size_t bblocks = {};
} ka;
+ ka.bblocks = bblocks;
ka.K = (K/1) - 1;
ka.Bpanel = Bpanel;
- ka.bblocks = bblocks;
__asm__ __volatile__(
"ptrue p0.b\n"
"1:" // Height loop
- "ldr x23, [%x[args_ptr], %[offsetof_bblocks]]\n"
- "ldr x22, [%x[args_ptr], %[offsetof_Bpanel]]\n"
+ "ldr x22, [%x[args_ptr], %[offsetof_bblocks]]\n"
"mov x21, %x[Apanel]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_Bpanel]]\n"
"2:" // Width loop
- "ldr x20, [%x[args_ptr], %[offsetof_K]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_K]]\n"
"mov %x[Apanel], x21\n"
- "cmp x20, #0x2\n"
+ "cmp x19, #0x2\n"
"mov z8.b, #0x0\n"
"mov z9.b, #0x0\n"
- "ld1w { z0.s }, p0/Z, [x22]\n"
+ "ld1w { z0.s }, p0/Z, [x20]\n"
"mov z10.b, #0x0\n"
"mov z11.b, #0x0\n"
- "ld1w { z1.s }, p0/Z, [x22, #1, MUL VL]\n"
+ "ld1w { z1.s }, p0/Z, [x20, #1, MUL VL]\n"
"mov z12.b, #0x0\n"
"mov z13.b, #0x0\n"
- "ld1w { z2.s }, p0/Z, [x22, #2, MUL VL]\n"
+ "ld1w { z2.s }, p0/Z, [x20, #2, MUL VL]\n"
"mov z14.b, #0x0\n"
"mov z15.b, #0x0\n"
"ld1rw { z3.s }, p0/Z, [%x[Apanel]]\n"
@@ -86,7 +86,7 @@ void sve_interleaved_fp32_mla_8x3VL_a64fx(
"3:" // main loop head
"fmla z8.s, p0/M, z0.s, z3.s\n"
"fmla z9.s, p0/M, z1.s, z3.s\n"
- "sub x20, x20, #0x2\n"
+ "sub x19, x19, #0x2\n"
"fmla z10.s, p0/M, z2.s, z3.s\n"
"ld1rw { z3.s }, p0/Z, [%x[Apanel], #16]\n"
"fmla z11.s, p0/M, z0.s, z4.s\n"
@@ -95,7 +95,7 @@ void sve_interleaved_fp32_mla_8x3VL_a64fx(
"ld1rw { z4.s }, p0/Z, [%x[Apanel], #20]\n"
"fmla z14.s, p0/M, z0.s, z5.s\n"
"fmla z15.s, p0/M, z1.s, z5.s\n"
- "cmp x20, #0x2\n"
+ "cmp x19, #0x2\n"
"fmla z16.s, p0/M, z2.s, z5.s\n"
"ld1rw { z5.s }, p0/Z, [%x[Apanel], #24]\n"
"fmla z17.s, p0/M, z0.s, z6.s\n"
@@ -115,11 +115,11 @@ void sve_interleaved_fp32_mla_8x3VL_a64fx(
"fmla z28.s, p0/M, z2.s, z5.s\n"
"ld1rw { z5.s }, p0/Z, [%x[Apanel], #40]\n"
"fmla z29.s, p0/M, z0.s, z6.s\n"
- "ld1w { z0.s }, p0/Z, [x22, #3, MUL VL]\n"
+ "ld1w { z0.s }, p0/Z, [x20, #3, MUL VL]\n"
"fmla z30.s, p0/M, z1.s, z6.s\n"
"fmla z31.s, p0/M, z2.s, z6.s\n"
- "ld1w { z1.s }, p0/Z, [x22, #4, MUL VL]\n"
- "ld1w { z2.s }, p0/Z, [x22, #5, MUL VL]\n"
+ "ld1w { z1.s }, p0/Z, [x20, #4, MUL VL]\n"
+ "ld1w { z2.s }, p0/Z, [x20, #5, MUL VL]\n"
"fmla z8.s, p0/M, z0.s, z3.s\n"
"ld1rw { z6.s }, p0/Z, [%x[Apanel], #44]\n"
"fmla z9.s, p0/M, z1.s, z3.s\n"
@@ -131,7 +131,7 @@ void sve_interleaved_fp32_mla_8x3VL_a64fx(
"ld1rw { z4.s }, p0/Z, [%x[Apanel], #52]\n"
"fmla z14.s, p0/M, z0.s, z5.s\n"
"fmla z15.s, p0/M, z1.s, z5.s\n"
- "addvl x22, x22, #6\n"
+ "addvl x20, x20, #6\n"
"fmla z16.s, p0/M, z2.s, z5.s\n"
"ld1rw { z5.s }, p0/Z, [%x[Apanel], #56]\n"
"fmla z17.s, p0/M, z0.s, z6.s\n"
@@ -151,18 +151,18 @@ void sve_interleaved_fp32_mla_8x3VL_a64fx(
"fmla z27.s, p0/M, z1.s, z5.s\n"
"fmla z28.s, p0/M, z2.s, z5.s\n"
"fmla z29.s, p0/M, z0.s, z6.s\n"
- "ld1w { z0.s }, p0/Z, [x22]\n"
+ "ld1w { z0.s }, p0/Z, [x20]\n"
"fmla z30.s, p0/M, z1.s, z6.s\n"
"fmla z31.s, p0/M, z2.s, z6.s\n"
- "ld1w { z1.s }, p0/Z, [x22, #1, MUL VL]\n"
- "ld1w { z2.s }, p0/Z, [x22, #2, MUL VL]\n"
+ "ld1w { z1.s }, p0/Z, [x20, #1, MUL VL]\n"
+ "ld1w { z2.s }, p0/Z, [x20, #2, MUL VL]\n"
"ld1rw { z5.s }, p0/Z, [%x[Apanel], #8]\n"
"ld1rw { z6.s }, p0/Z, [%x[Apanel], #12]\n"
"bge 3b\n"
"4:" // main loop skip
"fmla z8.s, p0/M, z0.s, z3.s\n"
"fmla z9.s, p0/M, z1.s, z3.s\n"
- "addvl x22, x22, #3\n"
+ "addvl x20, x20, #3\n"
"fmla z10.s, p0/M, z2.s, z3.s\n"
"ld1rw { z3.s }, p0/Z, [%x[Apanel], #16]\n"
"fmla z11.s, p0/M, z0.s, z4.s\n"
@@ -190,10 +190,10 @@ void sve_interleaved_fp32_mla_8x3VL_a64fx(
"fmla z29.s, p0/M, z0.s, z6.s\n"
"fmla z30.s, p0/M, z1.s, z6.s\n"
"fmla z31.s, p0/M, z2.s, z6.s\n"
- "cbz x20, 5f\n"
- "ld1w { z0.s }, p0/Z, [x22]\n"
- "ld1w { z1.s }, p0/Z, [x22, #1, MUL VL]\n"
- "ld1w { z2.s }, p0/Z, [x22, #2, MUL VL]\n"
+ "cbz x19, 5f\n"
+ "ld1w { z0.s }, p0/Z, [x20]\n"
+ "ld1w { z1.s }, p0/Z, [x20, #1, MUL VL]\n"
+ "ld1w { z2.s }, p0/Z, [x20, #2, MUL VL]\n"
"ld1rw { z3.s }, p0/Z, [%x[Apanel]]\n"
"fmla z8.s, p0/M, z0.s, z3.s\n"
"ld1rw { z4.s }, p0/Z, [%x[Apanel], #4]\n"
@@ -202,24 +202,24 @@ void sve_interleaved_fp32_mla_8x3VL_a64fx(
"ld1rw { z6.s }, p0/Z, [%x[Apanel], #12]\n"
"fmla z10.s, p0/M, z2.s, z3.s\n"
"fmla z11.s, p0/M, z0.s, z4.s\n"
+ "ld1rw { z3.s }, p0/Z, [%x[Apanel], #16]\n"
"fmla z12.s, p0/M, z1.s, z4.s\n"
"fmla z13.s, p0/M, z2.s, z4.s\n"
- "ld1rw { z3.s }, p0/Z, [%x[Apanel], #16]\n"
+ "ld1rw { z4.s }, p0/Z, [%x[Apanel], #20]\n"
"fmla z14.s, p0/M, z0.s, z5.s\n"
"fmla z15.s, p0/M, z1.s, z5.s\n"
- "ld1rw { z4.s }, p0/Z, [%x[Apanel], #20]\n"
"fmla z16.s, p0/M, z2.s, z5.s\n"
"fmla z17.s, p0/M, z0.s, z6.s\n"
"ld1rw { z5.s }, p0/Z, [%x[Apanel], #24]\n"
"fmla z18.s, p0/M, z1.s, z6.s\n"
"fmla z19.s, p0/M, z2.s, z6.s\n"
"ld1rw { z6.s }, p0/Z, [%x[Apanel], #28]\n"
+ "addvl x20, x20, #3\n"
"fmla z20.s, p0/M, z0.s, z3.s\n"
"fmla z21.s, p0/M, z1.s, z3.s\n"
- "addvl x22, x22, #3\n"
+ "add %x[Apanel], %x[Apanel], #0x20\n"
"fmla z22.s, p0/M, z2.s, z3.s\n"
"fmla z23.s, p0/M, z0.s, z4.s\n"
- "add %x[Apanel], %x[Apanel], #0x20\n"
"fmla z24.s, p0/M, z1.s, z4.s\n"
"fmla z25.s, p0/M, z2.s, z4.s\n"
"fmla z26.s, p0/M, z0.s, z5.s\n"
@@ -230,7 +230,7 @@ void sve_interleaved_fp32_mla_8x3VL_a64fx(
"fmla z31.s, p0/M, z2.s, z6.s\n"
"5:" // multiply loop done
"st1w { z8.s }, p0, [%x[Cpanel]]\n"
- "subs x23, x23, #0x1\n"
+ "subs x22, x22, #0x1\n"
"st1w { z9.s }, p0, [%x[Cpanel], #1, MUL VL]\n"
"st1w { z10.s }, p0, [%x[Cpanel], #2, MUL VL]\n"
"st1w { z11.s }, p0, [%x[Cpanel], #3, MUL VL]\n"
@@ -261,7 +261,7 @@ void sve_interleaved_fp32_mla_8x3VL_a64fx(
"bne 1b\n"
: [Apanel] "+&r" (Apanel), [Cpanel] "+&r" (Cpanel), [ablocks] "+&r" (ablocks)
: [args_ptr] "r" (&ka), [offsetof_Bpanel] "I" (offsetof(KernelArgs, Bpanel)), [offsetof_K] "I" (offsetof(KernelArgs, K)), [offsetof_bblocks] "I" (offsetof(KernelArgs, bblocks))
- : "cc", "memory", "p0", "x20", "x21", "x22", "x23", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "x19", "x20", "x21", "x22", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp32_mla_8x3VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp32_mla_8x3VL/generic.cpp
index 9d1c0c3728..e02db6ec48 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp32_mla_8x3VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp32_mla_8x3VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef ARM_COMPUTE_ENABLE_SVE
@@ -32,44 +32,42 @@ void sve_interleaved_fp32_mla_8x3VL(
float *Cpanel, int ablocks, int bblocks, int K) {
struct KernelArgs {
+ size_t bblocks = {};
size_t K = {};
const float *Bpanel = {};
- size_t bblocks = {};
} ka;
+ ka.bblocks = bblocks;
ka.K = (K/1) - 1;
ka.Bpanel = Bpanel;
- ka.bblocks = bblocks;
__asm__ __volatile__(
"ptrue p0.b\n"
"1:" // Height loop
- "ldr x23, [%x[args_ptr], %[offsetof_bblocks]]\n"
- "ldr x22, [%x[args_ptr], %[offsetof_Bpanel]]\n"
+ "ldr x22, [%x[args_ptr], %[offsetof_bblocks]]\n"
"mov x21, %x[Apanel]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_Bpanel]]\n"
"2:" // Width loop
- "ldr x20, [%x[args_ptr], %[offsetof_K]]\n"
- "mov %x[Apanel], x21\n"
- "cmp x20, #0x2\n"
"mov z8.b, #0x0\n"
"mov z9.b, #0x0\n"
- "ld1rqw { z0.s }, p0/Z, [%x[Apanel]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_K]]\n"
"mov z10.b, #0x0\n"
"mov z11.b, #0x0\n"
- "ld1rqw { z1.s }, p0/Z, [%x[Apanel], #16]\n"
+ "ld1w { z4.s }, p0/Z, [x20]\n"
"mov z12.b, #0x0\n"
"mov z13.b, #0x0\n"
- "ld1w { z4.s }, p0/Z, [x22]\n"
+ "mov %x[Apanel], x21\n"
"mov z14.b, #0x0\n"
"mov z15.b, #0x0\n"
- "ld1w { z5.s }, p0/Z, [x22, #1, MUL VL]\n"
+ "cmp x19, #0x2\n"
"mov z16.b, #0x0\n"
"mov z17.b, #0x0\n"
- "ld1w { z6.s }, p0/Z, [x22, #2, MUL VL]\n"
"mov z18.b, #0x0\n"
"mov z19.b, #0x0\n"
+ "ld1rqw { z0.s }, p0/Z, [%x[Apanel]]\n"
"mov z20.b, #0x0\n"
"mov z21.b, #0x0\n"
+ "ld1rqw { z1.s }, p0/Z, [%x[Apanel], #16]\n"
"mov z22.b, #0x0\n"
"mov z23.b, #0x0\n"
"mov z24.b, #0x0\n"
@@ -84,29 +82,31 @@ void sve_interleaved_fp32_mla_8x3VL(
"3:" // main loop head
"fmla z8.s, z4.s, z0.s[0]\n"
"fmla z11.s, z4.s, z0.s[1]\n"
- "ld1rqw { z2.s }, p0/Z, [%x[Apanel], #32]\n"
+ "ld1w { z5.s }, p0/Z, [x20, #1, MUL VL]\n"
"fmla z14.s, z4.s, z0.s[2]\n"
"fmla z17.s, z4.s, z0.s[3]\n"
- "ld1rqw { z3.s }, p0/Z, [%x[Apanel], #48]\n"
+ "ld1w { z6.s }, p0/Z, [x20, #2, MUL VL]\n"
"fmla z20.s, z4.s, z1.s[0]\n"
"fmla z23.s, z4.s, z1.s[1]\n"
- "sub x20, x20, #0x2\n"
+ "ld1rqw { z2.s }, p0/Z, [%x[Apanel], #32]\n"
"fmla z26.s, z4.s, z1.s[2]\n"
"fmla z29.s, z4.s, z1.s[3]\n"
- "ld1w { z4.s }, p0/Z, [x22, #3, MUL VL]\n"
+ "ld1rqw { z3.s }, p0/Z, [%x[Apanel], #48]\n"
"fmla z9.s, z5.s, z0.s[0]\n"
"fmla z12.s, z5.s, z0.s[1]\n"
- "cmp x20, #0x2\n"
+ "ld1w { z4.s }, p0/Z, [x20, #3, MUL VL]\n"
"fmla z15.s, z5.s, z0.s[2]\n"
"fmla z18.s, z5.s, z0.s[3]\n"
- "add %x[Apanel], %x[Apanel], #0x40\n"
+ "sub x19, x19, #0x2\n"
"fmla z21.s, z5.s, z1.s[0]\n"
"fmla z24.s, z5.s, z1.s[1]\n"
+ "cmp x19, #0x2\n"
"fmla z27.s, z5.s, z1.s[2]\n"
"fmla z30.s, z5.s, z1.s[3]\n"
- "ld1w { z5.s }, p0/Z, [x22, #4, MUL VL]\n"
+ "ld1w { z5.s }, p0/Z, [x20, #4, MUL VL]\n"
"fmla z10.s, z6.s, z0.s[0]\n"
"fmla z13.s, z6.s, z0.s[1]\n"
+ "add %x[Apanel], %x[Apanel], #0x40\n"
"fmla z16.s, z6.s, z0.s[2]\n"
"fmla z19.s, z6.s, z0.s[3]\n"
"ld1rqw { z0.s }, p0/Z, [%x[Apanel]]\n"
@@ -114,27 +114,26 @@ void sve_interleaved_fp32_mla_8x3VL(
"fmla z25.s, z6.s, z1.s[1]\n"
"fmla z28.s, z6.s, z1.s[2]\n"
"fmla z31.s, z6.s, z1.s[3]\n"
- "ld1w { z6.s }, p0/Z, [x22, #5, MUL VL]\n"
- "addvl x22, x22, #6\n"
+ "ld1w { z6.s }, p0/Z, [x20, #5, MUL VL]\n"
"fmla z8.s, z4.s, z2.s[0]\n"
"fmla z11.s, z4.s, z2.s[1]\n"
"ld1rqw { z1.s }, p0/Z, [%x[Apanel], #16]\n"
"fmla z14.s, z4.s, z2.s[2]\n"
"fmla z17.s, z4.s, z2.s[3]\n"
+ "addvl x20, x20, #6\n"
"fmla z20.s, z4.s, z3.s[0]\n"
"fmla z23.s, z4.s, z3.s[1]\n"
"fmla z26.s, z4.s, z3.s[2]\n"
"fmla z29.s, z4.s, z3.s[3]\n"
- "ld1w { z4.s }, p0/Z, [x22]\n"
"fmla z9.s, z5.s, z2.s[0]\n"
"fmla z12.s, z5.s, z2.s[1]\n"
+ "ld1w { z4.s }, p0/Z, [x20]\n"
"fmla z15.s, z5.s, z2.s[2]\n"
"fmla z18.s, z5.s, z2.s[3]\n"
"fmla z21.s, z5.s, z3.s[0]\n"
"fmla z24.s, z5.s, z3.s[1]\n"
"fmla z27.s, z5.s, z3.s[2]\n"
"fmla z30.s, z5.s, z3.s[3]\n"
- "ld1w { z5.s }, p0/Z, [x22, #1, MUL VL]\n"
"fmla z10.s, z6.s, z2.s[0]\n"
"fmla z13.s, z6.s, z2.s[1]\n"
"fmla z16.s, z6.s, z2.s[2]\n"
@@ -143,19 +142,20 @@ void sve_interleaved_fp32_mla_8x3VL(
"fmla z25.s, z6.s, z3.s[1]\n"
"fmla z28.s, z6.s, z3.s[2]\n"
"fmla z31.s, z6.s, z3.s[3]\n"
- "ld1w { z6.s }, p0/Z, [x22, #2, MUL VL]\n"
"bge 3b\n"
"4:" // main loop skip
"fmla z8.s, z4.s, z0.s[0]\n"
"fmla z11.s, z4.s, z0.s[1]\n"
- "add %x[Apanel], %x[Apanel], #0x20\n"
+ "ld1w { z5.s }, p0/Z, [x20, #1, MUL VL]\n"
"fmla z14.s, z4.s, z0.s[2]\n"
"fmla z17.s, z4.s, z0.s[3]\n"
- "addvl x22, x22, #3\n"
+ "ld1w { z6.s }, p0/Z, [x20, #2, MUL VL]\n"
"fmla z20.s, z4.s, z1.s[0]\n"
"fmla z23.s, z4.s, z1.s[1]\n"
+ "add %x[Apanel], %x[Apanel], #0x20\n"
"fmla z26.s, z4.s, z1.s[2]\n"
"fmla z29.s, z4.s, z1.s[3]\n"
+ "addvl x20, x20, #3\n"
"fmla z9.s, z5.s, z0.s[0]\n"
"fmla z12.s, z5.s, z0.s[1]\n"
"fmla z15.s, z5.s, z0.s[2]\n"
@@ -172,19 +172,19 @@ void sve_interleaved_fp32_mla_8x3VL(
"fmla z25.s, z6.s, z1.s[1]\n"
"fmla z28.s, z6.s, z1.s[2]\n"
"fmla z31.s, z6.s, z1.s[3]\n"
- "cbz x20, 5f\n"
+ "cbz x19, 5f\n"
"ld1rqw { z0.s }, p0/Z, [%x[Apanel]]\n"
"ld1rqw { z1.s }, p0/Z, [%x[Apanel], #16]\n"
"add %x[Apanel], %x[Apanel], #0x20\n"
- "ld1w { z7.s }, p0/Z, [x22]\n"
- "ld1w { z4.s }, p0/Z, [x22, #1, MUL VL]\n"
+ "ld1w { z7.s }, p0/Z, [x20]\n"
+ "ld1w { z4.s }, p0/Z, [x20, #1, MUL VL]\n"
+ "ld1w { z5.s }, p0/Z, [x20, #2, MUL VL]\n"
+ "addvl x20, x20, #3\n"
"fmla z8.s, z7.s, z0.s[0]\n"
- "ld1w { z5.s }, p0/Z, [x22, #2, MUL VL]\n"
"fmla z11.s, z7.s, z0.s[1]\n"
"fmla z14.s, z7.s, z0.s[2]\n"
"fmla z17.s, z7.s, z0.s[3]\n"
"fmla z20.s, z7.s, z1.s[0]\n"
- "addvl x22, x22, #3\n"
"fmla z23.s, z7.s, z1.s[1]\n"
"fmla z26.s, z7.s, z1.s[2]\n"
"fmla z29.s, z7.s, z1.s[3]\n"
@@ -206,7 +206,7 @@ void sve_interleaved_fp32_mla_8x3VL(
"fmla z31.s, z5.s, z1.s[3]\n"
"5:" // multiply loop done
"st1w { z8.s }, p0, [%x[Cpanel]]\n"
- "subs x23, x23, #0x1\n"
+ "subs x22, x22, #0x1\n"
"st1w { z9.s }, p0, [%x[Cpanel], #1, MUL VL]\n"
"st1w { z10.s }, p0, [%x[Cpanel], #2, MUL VL]\n"
"st1w { z11.s }, p0, [%x[Cpanel], #3, MUL VL]\n"
@@ -237,7 +237,7 @@ void sve_interleaved_fp32_mla_8x3VL(
"bne 1b\n"
: [Apanel] "+&r" (Apanel), [Cpanel] "+&r" (Cpanel), [ablocks] "+&r" (ablocks)
: [args_ptr] "r" (&ka), [offsetof_Bpanel] "I" (offsetof(KernelArgs, Bpanel)), [offsetof_K] "I" (offsetof(KernelArgs, K)), [offsetof_bblocks] "I" (offsetof(KernelArgs, bblocks))
- : "cc", "memory", "p0", "x20", "x21", "x22", "x23", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "x19", "x20", "x21", "x22", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_s8s32_dot_8x3VL/a64fx.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_s8s32_dot_8x3VL/a64fx.cpp
index a7ca48d87a..5ca4b73b8a 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_s8s32_dot_8x3VL/a64fx.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_s8s32_dot_8x3VL/a64fx.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef ARM_COMPUTE_ENABLE_SVE
@@ -33,34 +33,34 @@ void sve_interleaved_s8s32_dot_8x3VL_a64fx(
int32_t *Cpanel, int ablocks, int bblocks, int K) {
struct KernelArgs {
+ size_t bblocks = {};
size_t K = {};
const int8_t *Bpanel = {};
- size_t bblocks = {};
} ka;
+ ka.bblocks = bblocks;
ka.K = (K/4) - 1;
ka.Bpanel = Bpanel;
- ka.bblocks = bblocks;
__asm__ __volatile__(
"ptrue p0.b\n"
"1:" // Height loop
- "ldr x23, [%x[args_ptr], %[offsetof_bblocks]]\n"
- "ldr x22, [%x[args_ptr], %[offsetof_Bpanel]]\n"
+ "ldr x22, [%x[args_ptr], %[offsetof_bblocks]]\n"
"mov x21, %x[Apanel]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_Bpanel]]\n"
"2:" // Width loop
- "ldr x20, [%x[args_ptr], %[offsetof_K]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_K]]\n"
"mov %x[Apanel], x21\n"
- "cmp x20, #0x2\n"
+ "cmp x19, #0x2\n"
"mov z8.s, #0x0\n"
"mov z9.s, #0x0\n"
- "ld1b { z0.b }, p0/Z, [x22]\n"
+ "ld1b { z0.b }, p0/Z, [x20]\n"
"mov z10.s, #0x0\n"
"mov z11.s, #0x0\n"
- "ld1b { z1.b }, p0/Z, [x22, #1, MUL VL]\n"
+ "ld1b { z1.b }, p0/Z, [x20, #1, MUL VL]\n"
"mov z12.s, #0x0\n"
"mov z13.s, #0x0\n"
- "ld1b { z2.b }, p0/Z, [x22, #2, MUL VL]\n"
+ "ld1b { z2.b }, p0/Z, [x20, #2, MUL VL]\n"
"mov z14.s, #0x0\n"
"mov z15.s, #0x0\n"
"ld1rw { z3.s }, p0/Z, [%x[Apanel]]\n"
@@ -87,7 +87,7 @@ void sve_interleaved_s8s32_dot_8x3VL_a64fx(
"3:" // main loop head
"sdot z8.s, z0.b, z3.b\n"
"sdot z9.s, z1.b, z3.b\n"
- "sub x20, x20, #0x2\n"
+ "sub x19, x19, #0x2\n"
"sdot z10.s, z2.b, z3.b\n"
"ld1rw { z3.s }, p0/Z, [%x[Apanel], #16]\n"
"sdot z11.s, z0.b, z4.b\n"
@@ -96,7 +96,7 @@ void sve_interleaved_s8s32_dot_8x3VL_a64fx(
"ld1rw { z4.s }, p0/Z, [%x[Apanel], #20]\n"
"sdot z14.s, z0.b, z5.b\n"
"sdot z15.s, z1.b, z5.b\n"
- "cmp x20, #0x2\n"
+ "cmp x19, #0x2\n"
"sdot z16.s, z2.b, z5.b\n"
"ld1rw { z5.s }, p0/Z, [%x[Apanel], #24]\n"
"sdot z17.s, z0.b, z6.b\n"
@@ -116,11 +116,11 @@ void sve_interleaved_s8s32_dot_8x3VL_a64fx(
"sdot z28.s, z2.b, z5.b\n"
"ld1rw { z5.s }, p0/Z, [%x[Apanel], #40]\n"
"sdot z29.s, z0.b, z6.b\n"
- "ld1b { z0.b }, p0/Z, [x22, #3, MUL VL]\n"
+ "ld1b { z0.b }, p0/Z, [x20, #3, MUL VL]\n"
"sdot z30.s, z1.b, z6.b\n"
"sdot z31.s, z2.b, z6.b\n"
- "ld1b { z1.b }, p0/Z, [x22, #4, MUL VL]\n"
- "ld1b { z2.b }, p0/Z, [x22, #5, MUL VL]\n"
+ "ld1b { z1.b }, p0/Z, [x20, #4, MUL VL]\n"
+ "ld1b { z2.b }, p0/Z, [x20, #5, MUL VL]\n"
"sdot z8.s, z0.b, z3.b\n"
"ld1rw { z6.s }, p0/Z, [%x[Apanel], #44]\n"
"sdot z9.s, z1.b, z3.b\n"
@@ -132,7 +132,7 @@ void sve_interleaved_s8s32_dot_8x3VL_a64fx(
"ld1rw { z4.s }, p0/Z, [%x[Apanel], #52]\n"
"sdot z14.s, z0.b, z5.b\n"
"sdot z15.s, z1.b, z5.b\n"
- "addvl x22, x22, #6\n"
+ "addvl x20, x20, #6\n"
"sdot z16.s, z2.b, z5.b\n"
"ld1rw { z5.s }, p0/Z, [%x[Apanel], #56]\n"
"sdot z17.s, z0.b, z6.b\n"
@@ -152,18 +152,18 @@ void sve_interleaved_s8s32_dot_8x3VL_a64fx(
"sdot z27.s, z1.b, z5.b\n"
"sdot z28.s, z2.b, z5.b\n"
"sdot z29.s, z0.b, z6.b\n"
- "ld1b { z0.b }, p0/Z, [x22]\n"
+ "ld1b { z0.b }, p0/Z, [x20]\n"
"sdot z30.s, z1.b, z6.b\n"
"sdot z31.s, z2.b, z6.b\n"
- "ld1b { z1.b }, p0/Z, [x22, #1, MUL VL]\n"
- "ld1b { z2.b }, p0/Z, [x22, #2, MUL VL]\n"
+ "ld1b { z1.b }, p0/Z, [x20, #1, MUL VL]\n"
+ "ld1b { z2.b }, p0/Z, [x20, #2, MUL VL]\n"
"ld1rw { z5.s }, p0/Z, [%x[Apanel], #8]\n"
"ld1rw { z6.s }, p0/Z, [%x[Apanel], #12]\n"
"bge 3b\n"
"4:" // main loop skip
"sdot z8.s, z0.b, z3.b\n"
"sdot z9.s, z1.b, z3.b\n"
- "addvl x22, x22, #3\n"
+ "addvl x20, x20, #3\n"
"sdot z10.s, z2.b, z3.b\n"
"ld1rw { z3.s }, p0/Z, [%x[Apanel], #16]\n"
"sdot z11.s, z0.b, z4.b\n"
@@ -191,10 +191,10 @@ void sve_interleaved_s8s32_dot_8x3VL_a64fx(
"sdot z29.s, z0.b, z6.b\n"
"sdot z30.s, z1.b, z6.b\n"
"sdot z31.s, z2.b, z6.b\n"
- "cbz x20, 5f\n"
- "ld1b { z0.b }, p0/Z, [x22]\n"
- "ld1b { z1.b }, p0/Z, [x22, #1, MUL VL]\n"
- "ld1b { z2.b }, p0/Z, [x22, #2, MUL VL]\n"
+ "cbz x19, 5f\n"
+ "ld1b { z0.b }, p0/Z, [x20]\n"
+ "ld1b { z1.b }, p0/Z, [x20, #1, MUL VL]\n"
+ "ld1b { z2.b }, p0/Z, [x20, #2, MUL VL]\n"
"ld1rw { z3.s }, p0/Z, [%x[Apanel]]\n"
"sdot z8.s, z0.b, z3.b\n"
"ld1rw { z4.s }, p0/Z, [%x[Apanel], #4]\n"
@@ -203,24 +203,24 @@ void sve_interleaved_s8s32_dot_8x3VL_a64fx(
"ld1rw { z6.s }, p0/Z, [%x[Apanel], #12]\n"
"sdot z10.s, z2.b, z3.b\n"
"sdot z11.s, z0.b, z4.b\n"
+ "ld1rw { z3.s }, p0/Z, [%x[Apanel], #16]\n"
"sdot z12.s, z1.b, z4.b\n"
"sdot z13.s, z2.b, z4.b\n"
- "ld1rw { z3.s }, p0/Z, [%x[Apanel], #16]\n"
+ "ld1rw { z4.s }, p0/Z, [%x[Apanel], #20]\n"
"sdot z14.s, z0.b, z5.b\n"
"sdot z15.s, z1.b, z5.b\n"
- "ld1rw { z4.s }, p0/Z, [%x[Apanel], #20]\n"
"sdot z16.s, z2.b, z5.b\n"
"sdot z17.s, z0.b, z6.b\n"
"ld1rw { z5.s }, p0/Z, [%x[Apanel], #24]\n"
"sdot z18.s, z1.b, z6.b\n"
"sdot z19.s, z2.b, z6.b\n"
"ld1rw { z6.s }, p0/Z, [%x[Apanel], #28]\n"
+ "addvl x20, x20, #3\n"
"sdot z20.s, z0.b, z3.b\n"
"sdot z21.s, z1.b, z3.b\n"
- "addvl x22, x22, #3\n"
+ "add %x[Apanel], %x[Apanel], #0x20\n"
"sdot z22.s, z2.b, z3.b\n"
"sdot z23.s, z0.b, z4.b\n"
- "add %x[Apanel], %x[Apanel], #0x20\n"
"sdot z24.s, z1.b, z4.b\n"
"sdot z25.s, z2.b, z4.b\n"
"sdot z26.s, z0.b, z5.b\n"
@@ -231,7 +231,7 @@ void sve_interleaved_s8s32_dot_8x3VL_a64fx(
"sdot z31.s, z2.b, z6.b\n"
"5:" // multiply loop done
"st1w { z8.s }, p0, [%x[Cpanel]]\n"
- "subs x23, x23, #0x1\n"
+ "subs x22, x22, #0x1\n"
"st1w { z9.s }, p0, [%x[Cpanel], #1, MUL VL]\n"
"st1w { z10.s }, p0, [%x[Cpanel], #2, MUL VL]\n"
"st1w { z11.s }, p0, [%x[Cpanel], #3, MUL VL]\n"
@@ -262,7 +262,7 @@ void sve_interleaved_s8s32_dot_8x3VL_a64fx(
"bne 1b\n"
: [Apanel] "+&r" (Apanel), [Cpanel] "+&r" (Cpanel), [ablocks] "+&r" (ablocks)
: [args_ptr] "r" (&ka), [offsetof_Bpanel] "I" (offsetof(KernelArgs, Bpanel)), [offsetof_K] "I" (offsetof(KernelArgs, K)), [offsetof_bblocks] "I" (offsetof(KernelArgs, bblocks))
- : "cc", "memory", "p0", "x20", "x21", "x22", "x23", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "x19", "x20", "x21", "x22", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_s8s32_dot_8x3VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_s8s32_dot_8x3VL/generic.cpp
index e5f59d220b..5fb938b20f 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_s8s32_dot_8x3VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_s8s32_dot_8x3VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef ARM_COMPUTE_ENABLE_SVE
@@ -33,44 +33,42 @@ void sve_interleaved_s8s32_dot_8x3VL(
int32_t *Cpanel, int ablocks, int bblocks, int K) {
struct KernelArgs {
+ size_t bblocks = {};
size_t K = {};
const int8_t *Bpanel = {};
- size_t bblocks = {};
} ka;
+ ka.bblocks = bblocks;
ka.K = (K/4) - 1;
ka.Bpanel = Bpanel;
- ka.bblocks = bblocks;
__asm__ __volatile__(
"ptrue p0.b\n"
"1:" // Height loop
- "ldr x23, [%x[args_ptr], %[offsetof_bblocks]]\n"
- "ldr x22, [%x[args_ptr], %[offsetof_Bpanel]]\n"
+ "ldr x22, [%x[args_ptr], %[offsetof_bblocks]]\n"
"mov x21, %x[Apanel]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_Bpanel]]\n"
"2:" // Width loop
- "ldr x20, [%x[args_ptr], %[offsetof_K]]\n"
- "mov %x[Apanel], x21\n"
- "cmp x20, #0x2\n"
"mov z8.s, #0x0\n"
"mov z9.s, #0x0\n"
- "ld1rqb { z0.b }, p0/Z, [%x[Apanel]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_K]]\n"
"mov z10.s, #0x0\n"
"mov z11.s, #0x0\n"
- "ld1rqb { z1.b }, p0/Z, [%x[Apanel], #16]\n"
+ "ld1b { z4.b }, p0/Z, [x20]\n"
"mov z12.s, #0x0\n"
"mov z13.s, #0x0\n"
- "ld1b { z4.b }, p0/Z, [x22]\n"
+ "mov %x[Apanel], x21\n"
"mov z14.s, #0x0\n"
"mov z15.s, #0x0\n"
- "ld1b { z5.b }, p0/Z, [x22, #1, MUL VL]\n"
+ "cmp x19, #0x2\n"
"mov z16.s, #0x0\n"
"mov z17.s, #0x0\n"
- "ld1b { z6.b }, p0/Z, [x22, #2, MUL VL]\n"
"mov z18.s, #0x0\n"
"mov z19.s, #0x0\n"
+ "ld1rqb { z0.b }, p0/Z, [%x[Apanel]]\n"
"mov z20.s, #0x0\n"
"mov z21.s, #0x0\n"
+ "ld1rqb { z1.b }, p0/Z, [%x[Apanel], #16]\n"
"mov z22.s, #0x0\n"
"mov z23.s, #0x0\n"
"mov z24.s, #0x0\n"
@@ -85,29 +83,31 @@ void sve_interleaved_s8s32_dot_8x3VL(
"3:" // main loop head
"sdot z8.s, z4.b, z0.b[0]\n"
"sdot z11.s, z4.b, z0.b[1]\n"
- "ld1rqb { z2.b }, p0/Z, [%x[Apanel], #32]\n"
+ "ld1b { z5.b }, p0/Z, [x20, #1, MUL VL]\n"
"sdot z14.s, z4.b, z0.b[2]\n"
"sdot z17.s, z4.b, z0.b[3]\n"
- "ld1rqb { z3.b }, p0/Z, [%x[Apanel], #48]\n"
+ "ld1b { z6.b }, p0/Z, [x20, #2, MUL VL]\n"
"sdot z20.s, z4.b, z1.b[0]\n"
"sdot z23.s, z4.b, z1.b[1]\n"
- "sub x20, x20, #0x2\n"
+ "ld1rqb { z2.b }, p0/Z, [%x[Apanel], #32]\n"
"sdot z26.s, z4.b, z1.b[2]\n"
"sdot z29.s, z4.b, z1.b[3]\n"
- "ld1b { z4.b }, p0/Z, [x22, #3, MUL VL]\n"
+ "ld1rqb { z3.b }, p0/Z, [%x[Apanel], #48]\n"
"sdot z9.s, z5.b, z0.b[0]\n"
"sdot z12.s, z5.b, z0.b[1]\n"
- "cmp x20, #0x2\n"
+ "ld1b { z4.b }, p0/Z, [x20, #3, MUL VL]\n"
"sdot z15.s, z5.b, z0.b[2]\n"
"sdot z18.s, z5.b, z0.b[3]\n"
- "add %x[Apanel], %x[Apanel], #0x40\n"
+ "sub x19, x19, #0x2\n"
"sdot z21.s, z5.b, z1.b[0]\n"
"sdot z24.s, z5.b, z1.b[1]\n"
+ "cmp x19, #0x2\n"
"sdot z27.s, z5.b, z1.b[2]\n"
"sdot z30.s, z5.b, z1.b[3]\n"
- "ld1b { z5.b }, p0/Z, [x22, #4, MUL VL]\n"
+ "ld1b { z5.b }, p0/Z, [x20, #4, MUL VL]\n"
"sdot z10.s, z6.b, z0.b[0]\n"
"sdot z13.s, z6.b, z0.b[1]\n"
+ "add %x[Apanel], %x[Apanel], #0x40\n"
"sdot z16.s, z6.b, z0.b[2]\n"
"sdot z19.s, z6.b, z0.b[3]\n"
"ld1rqb { z0.b }, p0/Z, [%x[Apanel]]\n"
@@ -115,27 +115,26 @@ void sve_interleaved_s8s32_dot_8x3VL(
"sdot z25.s, z6.b, z1.b[1]\n"
"sdot z28.s, z6.b, z1.b[2]\n"
"sdot z31.s, z6.b, z1.b[3]\n"
- "ld1b { z6.b }, p0/Z, [x22, #5, MUL VL]\n"
- "addvl x22, x22, #6\n"
+ "ld1b { z6.b }, p0/Z, [x20, #5, MUL VL]\n"
"sdot z8.s, z4.b, z2.b[0]\n"
"sdot z11.s, z4.b, z2.b[1]\n"
"ld1rqb { z1.b }, p0/Z, [%x[Apanel], #16]\n"
"sdot z14.s, z4.b, z2.b[2]\n"
"sdot z17.s, z4.b, z2.b[3]\n"
+ "addvl x20, x20, #6\n"
"sdot z20.s, z4.b, z3.b[0]\n"
"sdot z23.s, z4.b, z3.b[1]\n"
"sdot z26.s, z4.b, z3.b[2]\n"
"sdot z29.s, z4.b, z3.b[3]\n"
- "ld1b { z4.b }, p0/Z, [x22]\n"
"sdot z9.s, z5.b, z2.b[0]\n"
"sdot z12.s, z5.b, z2.b[1]\n"
+ "ld1b { z4.b }, p0/Z, [x20]\n"
"sdot z15.s, z5.b, z2.b[2]\n"
"sdot z18.s, z5.b, z2.b[3]\n"
"sdot z21.s, z5.b, z3.b[0]\n"
"sdot z24.s, z5.b, z3.b[1]\n"
"sdot z27.s, z5.b, z3.b[2]\n"
"sdot z30.s, z5.b, z3.b[3]\n"
- "ld1b { z5.b }, p0/Z, [x22, #1, MUL VL]\n"
"sdot z10.s, z6.b, z2.b[0]\n"
"sdot z13.s, z6.b, z2.b[1]\n"
"sdot z16.s, z6.b, z2.b[2]\n"
@@ -144,19 +143,20 @@ void sve_interleaved_s8s32_dot_8x3VL(
"sdot z25.s, z6.b, z3.b[1]\n"
"sdot z28.s, z6.b, z3.b[2]\n"
"sdot z31.s, z6.b, z3.b[3]\n"
- "ld1b { z6.b }, p0/Z, [x22, #2, MUL VL]\n"
"bge 3b\n"
"4:" // main loop skip
"sdot z8.s, z4.b, z0.b[0]\n"
"sdot z11.s, z4.b, z0.b[1]\n"
- "add %x[Apanel], %x[Apanel], #0x20\n"
+ "ld1b { z5.b }, p0/Z, [x20, #1, MUL VL]\n"
"sdot z14.s, z4.b, z0.b[2]\n"
"sdot z17.s, z4.b, z0.b[3]\n"
- "addvl x22, x22, #3\n"
+ "ld1b { z6.b }, p0/Z, [x20, #2, MUL VL]\n"
"sdot z20.s, z4.b, z1.b[0]\n"
"sdot z23.s, z4.b, z1.b[1]\n"
+ "add %x[Apanel], %x[Apanel], #0x20\n"
"sdot z26.s, z4.b, z1.b[2]\n"
"sdot z29.s, z4.b, z1.b[3]\n"
+ "addvl x20, x20, #3\n"
"sdot z9.s, z5.b, z0.b[0]\n"
"sdot z12.s, z5.b, z0.b[1]\n"
"sdot z15.s, z5.b, z0.b[2]\n"
@@ -173,19 +173,19 @@ void sve_interleaved_s8s32_dot_8x3VL(
"sdot z25.s, z6.b, z1.b[1]\n"
"sdot z28.s, z6.b, z1.b[2]\n"
"sdot z31.s, z6.b, z1.b[3]\n"
- "cbz x20, 5f\n"
+ "cbz x19, 5f\n"
"ld1rqb { z0.b }, p0/Z, [%x[Apanel]]\n"
"ld1rqb { z1.b }, p0/Z, [%x[Apanel], #16]\n"
"add %x[Apanel], %x[Apanel], #0x20\n"
- "ld1b { z7.b }, p0/Z, [x22]\n"
- "ld1b { z4.b }, p0/Z, [x22, #1, MUL VL]\n"
+ "ld1b { z7.b }, p0/Z, [x20]\n"
+ "ld1b { z4.b }, p0/Z, [x20, #1, MUL VL]\n"
+ "ld1b { z5.b }, p0/Z, [x20, #2, MUL VL]\n"
+ "addvl x20, x20, #3\n"
"sdot z8.s, z7.b, z0.b[0]\n"
- "ld1b { z5.b }, p0/Z, [x22, #2, MUL VL]\n"
"sdot z11.s, z7.b, z0.b[1]\n"
"sdot z14.s, z7.b, z0.b[2]\n"
"sdot z17.s, z7.b, z0.b[3]\n"
"sdot z20.s, z7.b, z1.b[0]\n"
- "addvl x22, x22, #3\n"
"sdot z23.s, z7.b, z1.b[1]\n"
"sdot z26.s, z7.b, z1.b[2]\n"
"sdot z29.s, z7.b, z1.b[3]\n"
@@ -207,7 +207,7 @@ void sve_interleaved_s8s32_dot_8x3VL(
"sdot z31.s, z5.b, z1.b[3]\n"
"5:" // multiply loop done
"st1w { z8.s }, p0, [%x[Cpanel]]\n"
- "subs x23, x23, #0x1\n"
+ "subs x22, x22, #0x1\n"
"st1w { z9.s }, p0, [%x[Cpanel], #1, MUL VL]\n"
"st1w { z10.s }, p0, [%x[Cpanel], #2, MUL VL]\n"
"st1w { z11.s }, p0, [%x[Cpanel], #3, MUL VL]\n"
@@ -238,7 +238,7 @@ void sve_interleaved_s8s32_dot_8x3VL(
"bne 1b\n"
: [Apanel] "+&r" (Apanel), [Cpanel] "+&r" (Cpanel), [ablocks] "+&r" (ablocks)
: [args_ptr] "r" (&ka), [offsetof_Bpanel] "I" (offsetof(KernelArgs, Bpanel)), [offsetof_K] "I" (offsetof(KernelArgs, K)), [offsetof_bblocks] "I" (offsetof(KernelArgs, bblocks))
- : "cc", "memory", "p0", "x20", "x21", "x22", "x23", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "x19", "x20", "x21", "x22", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_s8s32_mmla_8x3VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_s8s32_mmla_8x3VL/generic.cpp
index 104d5f918e..b8f1864af3 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_s8s32_mmla_8x3VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_s8s32_mmla_8x3VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -33,28 +33,28 @@ void sve_interleaved_s8s32_mmla_8x3VL(
int32_t *Cpanel, int ablocks, int bblocks, int K) {
struct KernelArgs {
+ size_t bblocks = {};
size_t K = {};
const int8_t *Bpanel = {};
- size_t bblocks = {};
} ka;
+ ka.bblocks = bblocks;
ka.K = (K/8) - 1;
ka.Bpanel = Bpanel;
- ka.bblocks = bblocks;
__asm__ __volatile__(
"ptrue p0.b\n"
"1:" // Height loop
- "ldr x23, [%x[args_ptr], %[offsetof_bblocks]]\n"
- "ldr x22, [%x[args_ptr], %[offsetof_Bpanel]]\n"
+ "ldr x22, [%x[args_ptr], %[offsetof_bblocks]]\n"
"mov x21, %x[Apanel]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_Bpanel]]\n"
"2:" // Width loop
- "ldr x20, [%x[args_ptr], %[offsetof_K]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_K]]\n"
"mov %x[Apanel], x21\n"
- "cmp x20, #0x2\n"
+ "cmp x19, #0x2\n"
"mov z8.s, #0x0\n"
"mov z9.s, #0x0\n"
- "ld1b { z4.b }, p0/Z, [x22]\n"
+ "ld1b { z4.b }, p0/Z, [x20]\n"
"mov z10.s, #0x0\n"
"mov z11.s, #0x0\n"
"ld1rqb { z0.b }, p0/Z, [%x[Apanel]]\n"
@@ -63,13 +63,13 @@ void sve_interleaved_s8s32_mmla_8x3VL(
"ld1rqb { z1.b }, p0/Z, [%x[Apanel], #16]\n"
"mov z14.s, #0x0\n"
"mov z15.s, #0x0\n"
- "ld1b { z5.b }, p0/Z, [x22, #1, MUL VL]\n"
+ "ld1b { z5.b }, p0/Z, [x20, #1, MUL VL]\n"
"mov z16.s, #0x0\n"
"mov z17.s, #0x0\n"
"ld1rqb { z2.b }, p0/Z, [%x[Apanel], #32]\n"
"mov z18.s, #0x0\n"
"mov z19.s, #0x0\n"
- "addvl x22, x22, #2\n"
+ "addvl x20, x20, #2\n"
"mov z20.s, #0x0\n"
"mov z21.s, #0x0\n"
"add %x[Apanel], %x[Apanel], #0x30\n"
@@ -87,143 +87,143 @@ void sve_interleaved_s8s32_mmla_8x3VL(
"3:" // main loop head
"ld1rqb { z3.b }, p0/Z, [%x[Apanel]]\n"
".inst 0x45049808 // smmla z8.s, z0.b, z4.b\n"
- ".inst 0x4505980b // smmla z11.s, z0.b, z5.b\n"
".inst 0x4504982e // smmla z14.s, z1.b, z4.b\n"
+ ".inst 0x4505980b // smmla z11.s, z0.b, z5.b\n"
".inst 0x45059831 // smmla z17.s, z1.b, z5.b\n"
- "ld1b { z6.b }, p0/Z, [x22]\n"
+ "ld1b { z6.b }, p0/Z, [x20]\n"
".inst 0x45049854 // smmla z20.s, z2.b, z4.b\n"
".inst 0x45059857 // smmla z23.s, z2.b, z5.b\n"
- "ld1b { z7.b }, p0/Z, [x22, #1, MUL VL]\n"
+ "ld1b { z7.b }, p0/Z, [x20, #1, MUL VL]\n"
".inst 0x4504987a // smmla z26.s, z3.b, z4.b\n"
".inst 0x4505987d // smmla z29.s, z3.b, z5.b\n"
- "ld1b { z4.b }, p0/Z, [x22, #2, MUL VL]\n"
- "ld1b { z5.b }, p0/Z, [x22, #3, MUL VL]\n"
+ "ld1b { z4.b }, p0/Z, [x20, #2, MUL VL]\n"
+ "ld1b { z5.b }, p0/Z, [x20, #3, MUL VL]\n"
".inst 0x45069809 // smmla z9.s, z0.b, z6.b\n"
- ".inst 0x4507980c // smmla z12.s, z0.b, z7.b\n"
".inst 0x4506982f // smmla z15.s, z1.b, z6.b\n"
- ".inst 0x45079832 // smmla z18.s, z1.b, z7.b\n"
- "sub x20, x20, #0x2\n"
".inst 0x45069855 // smmla z21.s, z2.b, z6.b\n"
- ".inst 0x45079858 // smmla z24.s, z2.b, z7.b\n"
- "cmp x20, #0x2\n"
".inst 0x4506987b // smmla z27.s, z3.b, z6.b\n"
- ".inst 0x4507987e // smmla z30.s, z3.b, z7.b\n"
- "ld1b { z6.b }, p0/Z, [x22, #4, MUL VL]\n"
+ "ld1b { z6.b }, p0/Z, [x20, #4, MUL VL]\n"
+ ".inst 0x4507980c // smmla z12.s, z0.b, z7.b\n"
".inst 0x4504980a // smmla z10.s, z0.b, z4.b\n"
+ "sub x19, x19, #0x2\n"
".inst 0x4505980d // smmla z13.s, z0.b, z5.b\n"
+ ".inst 0x45079832 // smmla z18.s, z1.b, z7.b\n"
"ld1rqb { z0.b }, p0/Z, [%x[Apanel], #16]\n"
".inst 0x45049830 // smmla z16.s, z1.b, z4.b\n"
".inst 0x45059833 // smmla z19.s, z1.b, z5.b\n"
"ld1rqb { z1.b }, p0/Z, [%x[Apanel], #32]\n"
+ ".inst 0x45079858 // smmla z24.s, z2.b, z7.b\n"
+ ".inst 0x4507987e // smmla z30.s, z3.b, z7.b\n"
+ "ld1b { z7.b }, p0/Z, [x20, #5, MUL VL]\n"
".inst 0x45049856 // smmla z22.s, z2.b, z4.b\n"
".inst 0x45059859 // smmla z25.s, z2.b, z5.b\n"
- "ld1b { z7.b }, p0/Z, [x22, #5, MUL VL]\n"
+ "ld1rqb { z2.b }, p0/Z, [%x[Apanel], #48]\n"
".inst 0x4504987c // smmla z28.s, z3.b, z4.b\n"
".inst 0x4505987f // smmla z31.s, z3.b, z5.b\n"
- "ld1rqb { z2.b }, p0/Z, [%x[Apanel], #48]\n"
"ld1rqb { z3.b }, p0/Z, [%x[Apanel], #64]\n"
- "ld1b { z4.b }, p0/Z, [x22, #6, MUL VL]\n"
+ "ld1b { z4.b }, p0/Z, [x20, #6, MUL VL]\n"
+ "ld1b { z5.b }, p0/Z, [x20, #7, MUL VL]\n"
+ "addvl x20, x20, #16\n"
".inst 0x45069808 // smmla z8.s, z0.b, z6.b\n"
- "ld1b { z5.b }, p0/Z, [x22, #7, MUL VL]\n"
- "addvl x22, x22, #16\n"
- ".inst 0x4507980b // smmla z11.s, z0.b, z7.b\n"
".inst 0x4506982e // smmla z14.s, z1.b, z6.b\n"
+ "cmp x19, #0x2\n"
+ ".inst 0x4507980b // smmla z11.s, z0.b, z7.b\n"
".inst 0x45079831 // smmla z17.s, z1.b, z7.b\n"
".inst 0x45069854 // smmla z20.s, z2.b, z6.b\n"
".inst 0x45079857 // smmla z23.s, z2.b, z7.b\n"
".inst 0x4506987a // smmla z26.s, z3.b, z6.b\n"
".inst 0x4507987d // smmla z29.s, z3.b, z7.b\n"
- "ld1b { z6.b }, p0/Z, [x22, #-8, MUL VL]\n"
- "ld1b { z7.b }, p0/Z, [x22, #-7, MUL VL]\n"
+ "ld1b { z6.b }, p0/Z, [x20, #-8, MUL VL]\n"
+ "ld1b { z7.b }, p0/Z, [x20, #-7, MUL VL]\n"
".inst 0x45049809 // smmla z9.s, z0.b, z4.b\n"
- ".inst 0x4505980c // smmla z12.s, z0.b, z5.b\n"
".inst 0x4504982f // smmla z15.s, z1.b, z4.b\n"
- ".inst 0x45059832 // smmla z18.s, z1.b, z5.b\n"
".inst 0x45049855 // smmla z21.s, z2.b, z4.b\n"
- ".inst 0x45059858 // smmla z24.s, z2.b, z5.b\n"
".inst 0x4504987b // smmla z27.s, z3.b, z4.b\n"
- ".inst 0x4505987e // smmla z30.s, z3.b, z5.b\n"
- "ld1b { z4.b }, p0/Z, [x22, #-6, MUL VL]\n"
+ "ld1b { z4.b }, p0/Z, [x20, #-6, MUL VL]\n"
+ ".inst 0x4505980c // smmla z12.s, z0.b, z5.b\n"
".inst 0x4506980a // smmla z10.s, z0.b, z6.b\n"
".inst 0x4507980d // smmla z13.s, z0.b, z7.b\n"
+ ".inst 0x45059832 // smmla z18.s, z1.b, z5.b\n"
"ld1rqb { z0.b }, p0/Z, [%x[Apanel], #80]\n"
".inst 0x45069830 // smmla z16.s, z1.b, z6.b\n"
".inst 0x45079833 // smmla z19.s, z1.b, z7.b\n"
"ld1rqb { z1.b }, p0/Z, [%x[Apanel], #96]\n"
+ ".inst 0x45059858 // smmla z24.s, z2.b, z5.b\n"
+ ".inst 0x4505987e // smmla z30.s, z3.b, z5.b\n"
+ "ld1b { z5.b }, p0/Z, [x20, #-5, MUL VL]\n"
".inst 0x45069856 // smmla z22.s, z2.b, z6.b\n"
".inst 0x45079859 // smmla z25.s, z2.b, z7.b\n"
- "ld1b { z5.b }, p0/Z, [x22, #-5, MUL VL]\n"
+ "ld1rqb { z2.b }, p0/Z, [%x[Apanel], #112]\n"
".inst 0x4506987c // smmla z28.s, z3.b, z6.b\n"
".inst 0x4507987f // smmla z31.s, z3.b, z7.b\n"
- "ld1rqb { z2.b }, p0/Z, [%x[Apanel], #112]\n"
"add %x[Apanel], %x[Apanel], #0x80\n"
- "addvl x22, x22, #-4\n"
+ "addvl x20, x20, #-4\n"
"bge 3b\n"
"4:" // main loop skip
"ld1rqb { z3.b }, p0/Z, [%x[Apanel]]\n"
".inst 0x45049808 // smmla z8.s, z0.b, z4.b\n"
- ".inst 0x4505980b // smmla z11.s, z0.b, z5.b\n"
".inst 0x4504982e // smmla z14.s, z1.b, z4.b\n"
+ ".inst 0x4505980b // smmla z11.s, z0.b, z5.b\n"
".inst 0x45059831 // smmla z17.s, z1.b, z5.b\n"
- "ld1b { z6.b }, p0/Z, [x22]\n"
+ "ld1b { z6.b }, p0/Z, [x20]\n"
".inst 0x45049854 // smmla z20.s, z2.b, z4.b\n"
".inst 0x45059857 // smmla z23.s, z2.b, z5.b\n"
- "ld1b { z7.b }, p0/Z, [x22, #1, MUL VL]\n"
+ "ld1b { z7.b }, p0/Z, [x20, #1, MUL VL]\n"
".inst 0x4504987a // smmla z26.s, z3.b, z4.b\n"
".inst 0x4505987d // smmla z29.s, z3.b, z5.b\n"
- "ld1b { z4.b }, p0/Z, [x22, #2, MUL VL]\n"
- "ld1b { z5.b }, p0/Z, [x22, #3, MUL VL]\n"
+ "ld1b { z4.b }, p0/Z, [x20, #2, MUL VL]\n"
+ "ld1b { z5.b }, p0/Z, [x20, #3, MUL VL]\n"
".inst 0x45069809 // smmla z9.s, z0.b, z6.b\n"
- ".inst 0x4507980c // smmla z12.s, z0.b, z7.b\n"
".inst 0x4506982f // smmla z15.s, z1.b, z6.b\n"
- ".inst 0x45079832 // smmla z18.s, z1.b, z7.b\n"
- "add %x[Apanel], %x[Apanel], #0x10\n"
".inst 0x45069855 // smmla z21.s, z2.b, z6.b\n"
- ".inst 0x45079858 // smmla z24.s, z2.b, z7.b\n"
- "addvl x22, x22, #4\n"
".inst 0x4506987b // smmla z27.s, z3.b, z6.b\n"
- ".inst 0x4507987e // smmla z30.s, z3.b, z7.b\n"
+ "add %x[Apanel], %x[Apanel], #0x10\n"
+ ".inst 0x4507980c // smmla z12.s, z0.b, z7.b\n"
".inst 0x4504980a // smmla z10.s, z0.b, z4.b\n"
+ "addvl x20, x20, #4\n"
".inst 0x4505980d // smmla z13.s, z0.b, z5.b\n"
+ ".inst 0x45079832 // smmla z18.s, z1.b, z7.b\n"
".inst 0x45049830 // smmla z16.s, z1.b, z4.b\n"
".inst 0x45059833 // smmla z19.s, z1.b, z5.b\n"
+ ".inst 0x45079858 // smmla z24.s, z2.b, z7.b\n"
+ ".inst 0x4507987e // smmla z30.s, z3.b, z7.b\n"
".inst 0x45049856 // smmla z22.s, z2.b, z4.b\n"
".inst 0x45059859 // smmla z25.s, z2.b, z5.b\n"
".inst 0x4504987c // smmla z28.s, z3.b, z4.b\n"
".inst 0x4505987f // smmla z31.s, z3.b, z5.b\n"
- "cbz x20, 5f\n"
- "ld1b { z6.b }, p0/Z, [x22]\n"
+ "cbz x19, 5f\n"
+ "ld1b { z6.b }, p0/Z, [x20]\n"
"ld1rqb { z0.b }, p0/Z, [%x[Apanel]]\n"
".inst 0x45069808 // smmla z8.s, z0.b, z6.b\n"
"ld1rqb { z1.b }, p0/Z, [%x[Apanel], #16]\n"
- "ld1b { z7.b }, p0/Z, [x22, #1, MUL VL]\n"
- ".inst 0x4507980b // smmla z11.s, z0.b, z7.b\n"
+ "ld1b { z7.b }, p0/Z, [x20, #1, MUL VL]\n"
+ ".inst 0x4506982e // smmla z14.s, z1.b, z6.b\n"
"ld1rqb { z2.b }, p0/Z, [%x[Apanel], #32]\n"
"ld1rqb { z3.b }, p0/Z, [%x[Apanel], #48]\n"
- ".inst 0x4506982e // smmla z14.s, z1.b, z6.b\n"
+ ".inst 0x4507980b // smmla z11.s, z0.b, z7.b\n"
".inst 0x45079831 // smmla z17.s, z1.b, z7.b\n"
".inst 0x45069854 // smmla z20.s, z2.b, z6.b\n"
- "ld1b { z4.b }, p0/Z, [x22, #2, MUL VL]\n"
+ "ld1b { z4.b }, p0/Z, [x20, #2, MUL VL]\n"
".inst 0x45079857 // smmla z23.s, z2.b, z7.b\n"
".inst 0x4506987a // smmla z26.s, z3.b, z6.b\n"
- "ld1b { z5.b }, p0/Z, [x22, #3, MUL VL]\n"
+ "ld1b { z5.b }, p0/Z, [x20, #3, MUL VL]\n"
".inst 0x4507987d // smmla z29.s, z3.b, z7.b\n"
- "ld1b { z6.b }, p0/Z, [x22, #4, MUL VL]\n"
- "ld1b { z7.b }, p0/Z, [x22, #5, MUL VL]\n"
+ "ld1b { z6.b }, p0/Z, [x20, #4, MUL VL]\n"
+ "ld1b { z7.b }, p0/Z, [x20, #5, MUL VL]\n"
".inst 0x45049809 // smmla z9.s, z0.b, z4.b\n"
- ".inst 0x4505980c // smmla z12.s, z0.b, z5.b\n"
- "addvl x22, x22, #6\n"
".inst 0x4504982f // smmla z15.s, z1.b, z4.b\n"
- ".inst 0x45059832 // smmla z18.s, z1.b, z5.b\n"
"add %x[Apanel], %x[Apanel], #0x40\n"
".inst 0x45049855 // smmla z21.s, z2.b, z4.b\n"
- ".inst 0x45059858 // smmla z24.s, z2.b, z5.b\n"
".inst 0x4504987b // smmla z27.s, z3.b, z4.b\n"
- ".inst 0x4505987e // smmla z30.s, z3.b, z5.b\n"
+ "addvl x20, x20, #6\n"
+ ".inst 0x4505980c // smmla z12.s, z0.b, z5.b\n"
".inst 0x4506980a // smmla z10.s, z0.b, z6.b\n"
".inst 0x4507980d // smmla z13.s, z0.b, z7.b\n"
+ ".inst 0x45059832 // smmla z18.s, z1.b, z5.b\n"
".inst 0x45069830 // smmla z16.s, z1.b, z6.b\n"
".inst 0x45079833 // smmla z19.s, z1.b, z7.b\n"
+ ".inst 0x45059858 // smmla z24.s, z2.b, z5.b\n"
+ ".inst 0x4505987e // smmla z30.s, z3.b, z5.b\n"
".inst 0x45069856 // smmla z22.s, z2.b, z6.b\n"
".inst 0x45079859 // smmla z25.s, z2.b, z7.b\n"
".inst 0x4506987c // smmla z28.s, z3.b, z6.b\n"
@@ -243,7 +243,7 @@ void sve_interleaved_s8s32_mmla_8x3VL(
"uzp2 z14.d, z14.d, z17.d\n"
"st1w { z9.s }, p0, [%x[Cpanel], #4, MUL VL]\n"
"uzp1 z17.d, z15.d, z18.d\n"
- "subs x23, x23, #0x1\n"
+ "subs x22, x22, #0x1\n"
"st1w { z10.s }, p0, [%x[Cpanel], #5, MUL VL]\n"
"uzp2 z15.d, z15.d, z18.d\n"
"uzp1 z18.d, z16.d, z19.d\n"
@@ -285,7 +285,7 @@ void sve_interleaved_s8s32_mmla_8x3VL(
"bne 1b\n"
: [Apanel] "+&r" (Apanel), [Cpanel] "+&r" (Cpanel), [ablocks] "+&r" (ablocks)
: [args_ptr] "r" (&ka), [offsetof_Bpanel] "I" (offsetof(KernelArgs, Bpanel)), [offsetof_K] "I" (offsetof(KernelArgs, K)), [offsetof_bblocks] "I" (offsetof(KernelArgs, bblocks))
- : "cc", "memory", "p0", "x20", "x21", "x22", "x23", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "x19", "x20", "x21", "x22", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_u8u32_dot_8x3VL/a64fx.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_u8u32_dot_8x3VL/a64fx.cpp
index 2bfec8f350..1e2fb138fd 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_u8u32_dot_8x3VL/a64fx.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_u8u32_dot_8x3VL/a64fx.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef ARM_COMPUTE_ENABLE_SVE
@@ -33,34 +33,34 @@ void sve_interleaved_u8u32_dot_8x3VL_a64fx(
uint32_t *Cpanel, int ablocks, int bblocks, int K) {
struct KernelArgs {
+ size_t bblocks = {};
size_t K = {};
const uint8_t *Bpanel = {};
- size_t bblocks = {};
} ka;
+ ka.bblocks = bblocks;
ka.K = (K/4) - 1;
ka.Bpanel = Bpanel;
- ka.bblocks = bblocks;
__asm__ __volatile__(
"ptrue p0.b\n"
"1:" // Height loop
- "ldr x23, [%x[args_ptr], %[offsetof_bblocks]]\n"
- "ldr x22, [%x[args_ptr], %[offsetof_Bpanel]]\n"
+ "ldr x22, [%x[args_ptr], %[offsetof_bblocks]]\n"
"mov x21, %x[Apanel]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_Bpanel]]\n"
"2:" // Width loop
- "ldr x20, [%x[args_ptr], %[offsetof_K]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_K]]\n"
"mov %x[Apanel], x21\n"
- "cmp x20, #0x2\n"
+ "cmp x19, #0x2\n"
"mov z8.s, #0x0\n"
"mov z9.s, #0x0\n"
- "ld1b { z0.b }, p0/Z, [x22]\n"
+ "ld1b { z0.b }, p0/Z, [x20]\n"
"mov z10.s, #0x0\n"
"mov z11.s, #0x0\n"
- "ld1b { z1.b }, p0/Z, [x22, #1, MUL VL]\n"
+ "ld1b { z1.b }, p0/Z, [x20, #1, MUL VL]\n"
"mov z12.s, #0x0\n"
"mov z13.s, #0x0\n"
- "ld1b { z2.b }, p0/Z, [x22, #2, MUL VL]\n"
+ "ld1b { z2.b }, p0/Z, [x20, #2, MUL VL]\n"
"mov z14.s, #0x0\n"
"mov z15.s, #0x0\n"
"ld1rw { z3.s }, p0/Z, [%x[Apanel]]\n"
@@ -87,7 +87,7 @@ void sve_interleaved_u8u32_dot_8x3VL_a64fx(
"3:" // main loop head
"udot z8.s, z0.b, z3.b\n"
"udot z9.s, z1.b, z3.b\n"
- "sub x20, x20, #0x2\n"
+ "sub x19, x19, #0x2\n"
"udot z10.s, z2.b, z3.b\n"
"ld1rw { z3.s }, p0/Z, [%x[Apanel], #16]\n"
"udot z11.s, z0.b, z4.b\n"
@@ -96,7 +96,7 @@ void sve_interleaved_u8u32_dot_8x3VL_a64fx(
"ld1rw { z4.s }, p0/Z, [%x[Apanel], #20]\n"
"udot z14.s, z0.b, z5.b\n"
"udot z15.s, z1.b, z5.b\n"
- "cmp x20, #0x2\n"
+ "cmp x19, #0x2\n"
"udot z16.s, z2.b, z5.b\n"
"ld1rw { z5.s }, p0/Z, [%x[Apanel], #24]\n"
"udot z17.s, z0.b, z6.b\n"
@@ -116,11 +116,11 @@ void sve_interleaved_u8u32_dot_8x3VL_a64fx(
"udot z28.s, z2.b, z5.b\n"
"ld1rw { z5.s }, p0/Z, [%x[Apanel], #40]\n"
"udot z29.s, z0.b, z6.b\n"
- "ld1b { z0.b }, p0/Z, [x22, #3, MUL VL]\n"
+ "ld1b { z0.b }, p0/Z, [x20, #3, MUL VL]\n"
"udot z30.s, z1.b, z6.b\n"
"udot z31.s, z2.b, z6.b\n"
- "ld1b { z1.b }, p0/Z, [x22, #4, MUL VL]\n"
- "ld1b { z2.b }, p0/Z, [x22, #5, MUL VL]\n"
+ "ld1b { z1.b }, p0/Z, [x20, #4, MUL VL]\n"
+ "ld1b { z2.b }, p0/Z, [x20, #5, MUL VL]\n"
"udot z8.s, z0.b, z3.b\n"
"ld1rw { z6.s }, p0/Z, [%x[Apanel], #44]\n"
"udot z9.s, z1.b, z3.b\n"
@@ -132,7 +132,7 @@ void sve_interleaved_u8u32_dot_8x3VL_a64fx(
"ld1rw { z4.s }, p0/Z, [%x[Apanel], #52]\n"
"udot z14.s, z0.b, z5.b\n"
"udot z15.s, z1.b, z5.b\n"
- "addvl x22, x22, #6\n"
+ "addvl x20, x20, #6\n"
"udot z16.s, z2.b, z5.b\n"
"ld1rw { z5.s }, p0/Z, [%x[Apanel], #56]\n"
"udot z17.s, z0.b, z6.b\n"
@@ -152,18 +152,18 @@ void sve_interleaved_u8u32_dot_8x3VL_a64fx(
"udot z27.s, z1.b, z5.b\n"
"udot z28.s, z2.b, z5.b\n"
"udot z29.s, z0.b, z6.b\n"
- "ld1b { z0.b }, p0/Z, [x22]\n"
+ "ld1b { z0.b }, p0/Z, [x20]\n"
"udot z30.s, z1.b, z6.b\n"
"udot z31.s, z2.b, z6.b\n"
- "ld1b { z1.b }, p0/Z, [x22, #1, MUL VL]\n"
- "ld1b { z2.b }, p0/Z, [x22, #2, MUL VL]\n"
+ "ld1b { z1.b }, p0/Z, [x20, #1, MUL VL]\n"
+ "ld1b { z2.b }, p0/Z, [x20, #2, MUL VL]\n"
"ld1rw { z5.s }, p0/Z, [%x[Apanel], #8]\n"
"ld1rw { z6.s }, p0/Z, [%x[Apanel], #12]\n"
"bge 3b\n"
"4:" // main loop skip
"udot z8.s, z0.b, z3.b\n"
"udot z9.s, z1.b, z3.b\n"
- "addvl x22, x22, #3\n"
+ "addvl x20, x20, #3\n"
"udot z10.s, z2.b, z3.b\n"
"ld1rw { z3.s }, p0/Z, [%x[Apanel], #16]\n"
"udot z11.s, z0.b, z4.b\n"
@@ -191,10 +191,10 @@ void sve_interleaved_u8u32_dot_8x3VL_a64fx(
"udot z29.s, z0.b, z6.b\n"
"udot z30.s, z1.b, z6.b\n"
"udot z31.s, z2.b, z6.b\n"
- "cbz x20, 5f\n"
- "ld1b { z0.b }, p0/Z, [x22]\n"
- "ld1b { z1.b }, p0/Z, [x22, #1, MUL VL]\n"
- "ld1b { z2.b }, p0/Z, [x22, #2, MUL VL]\n"
+ "cbz x19, 5f\n"
+ "ld1b { z0.b }, p0/Z, [x20]\n"
+ "ld1b { z1.b }, p0/Z, [x20, #1, MUL VL]\n"
+ "ld1b { z2.b }, p0/Z, [x20, #2, MUL VL]\n"
"ld1rw { z3.s }, p0/Z, [%x[Apanel]]\n"
"udot z8.s, z0.b, z3.b\n"
"ld1rw { z4.s }, p0/Z, [%x[Apanel], #4]\n"
@@ -203,24 +203,24 @@ void sve_interleaved_u8u32_dot_8x3VL_a64fx(
"ld1rw { z6.s }, p0/Z, [%x[Apanel], #12]\n"
"udot z10.s, z2.b, z3.b\n"
"udot z11.s, z0.b, z4.b\n"
+ "ld1rw { z3.s }, p0/Z, [%x[Apanel], #16]\n"
"udot z12.s, z1.b, z4.b\n"
"udot z13.s, z2.b, z4.b\n"
- "ld1rw { z3.s }, p0/Z, [%x[Apanel], #16]\n"
+ "ld1rw { z4.s }, p0/Z, [%x[Apanel], #20]\n"
"udot z14.s, z0.b, z5.b\n"
"udot z15.s, z1.b, z5.b\n"
- "ld1rw { z4.s }, p0/Z, [%x[Apanel], #20]\n"
"udot z16.s, z2.b, z5.b\n"
"udot z17.s, z0.b, z6.b\n"
"ld1rw { z5.s }, p0/Z, [%x[Apanel], #24]\n"
"udot z18.s, z1.b, z6.b\n"
"udot z19.s, z2.b, z6.b\n"
"ld1rw { z6.s }, p0/Z, [%x[Apanel], #28]\n"
+ "addvl x20, x20, #3\n"
"udot z20.s, z0.b, z3.b\n"
"udot z21.s, z1.b, z3.b\n"
- "addvl x22, x22, #3\n"
+ "add %x[Apanel], %x[Apanel], #0x20\n"
"udot z22.s, z2.b, z3.b\n"
"udot z23.s, z0.b, z4.b\n"
- "add %x[Apanel], %x[Apanel], #0x20\n"
"udot z24.s, z1.b, z4.b\n"
"udot z25.s, z2.b, z4.b\n"
"udot z26.s, z0.b, z5.b\n"
@@ -231,7 +231,7 @@ void sve_interleaved_u8u32_dot_8x3VL_a64fx(
"udot z31.s, z2.b, z6.b\n"
"5:" // multiply loop done
"st1w { z8.s }, p0, [%x[Cpanel]]\n"
- "subs x23, x23, #0x1\n"
+ "subs x22, x22, #0x1\n"
"st1w { z9.s }, p0, [%x[Cpanel], #1, MUL VL]\n"
"st1w { z10.s }, p0, [%x[Cpanel], #2, MUL VL]\n"
"st1w { z11.s }, p0, [%x[Cpanel], #3, MUL VL]\n"
@@ -262,7 +262,7 @@ void sve_interleaved_u8u32_dot_8x3VL_a64fx(
"bne 1b\n"
: [Apanel] "+&r" (Apanel), [Cpanel] "+&r" (Cpanel), [ablocks] "+&r" (ablocks)
: [args_ptr] "r" (&ka), [offsetof_Bpanel] "I" (offsetof(KernelArgs, Bpanel)), [offsetof_K] "I" (offsetof(KernelArgs, K)), [offsetof_bblocks] "I" (offsetof(KernelArgs, bblocks))
- : "cc", "memory", "p0", "x20", "x21", "x22", "x23", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "x19", "x20", "x21", "x22", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_u8u32_dot_8x3VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_u8u32_dot_8x3VL/generic.cpp
index 99fff4e83d..f1642d0b21 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_u8u32_dot_8x3VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_u8u32_dot_8x3VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef ARM_COMPUTE_ENABLE_SVE
@@ -33,44 +33,42 @@ void sve_interleaved_u8u32_dot_8x3VL(
uint32_t *Cpanel, int ablocks, int bblocks, int K) {
struct KernelArgs {
+ size_t bblocks = {};
size_t K = {};
const uint8_t *Bpanel = {};
- size_t bblocks = {};
} ka;
+ ka.bblocks = bblocks;
ka.K = (K/4) - 1;
ka.Bpanel = Bpanel;
- ka.bblocks = bblocks;
__asm__ __volatile__(
"ptrue p0.b\n"
"1:" // Height loop
- "ldr x23, [%x[args_ptr], %[offsetof_bblocks]]\n"
- "ldr x22, [%x[args_ptr], %[offsetof_Bpanel]]\n"
+ "ldr x22, [%x[args_ptr], %[offsetof_bblocks]]\n"
"mov x21, %x[Apanel]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_Bpanel]]\n"
"2:" // Width loop
- "ldr x20, [%x[args_ptr], %[offsetof_K]]\n"
- "mov %x[Apanel], x21\n"
- "cmp x20, #0x2\n"
"mov z8.s, #0x0\n"
"mov z9.s, #0x0\n"
- "ld1rqb { z0.b }, p0/Z, [%x[Apanel]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_K]]\n"
"mov z10.s, #0x0\n"
"mov z11.s, #0x0\n"
- "ld1rqb { z1.b }, p0/Z, [%x[Apanel], #16]\n"
+ "ld1b { z4.b }, p0/Z, [x20]\n"
"mov z12.s, #0x0\n"
"mov z13.s, #0x0\n"
- "ld1b { z4.b }, p0/Z, [x22]\n"
+ "mov %x[Apanel], x21\n"
"mov z14.s, #0x0\n"
"mov z15.s, #0x0\n"
- "ld1b { z5.b }, p0/Z, [x22, #1, MUL VL]\n"
+ "cmp x19, #0x2\n"
"mov z16.s, #0x0\n"
"mov z17.s, #0x0\n"
- "ld1b { z6.b }, p0/Z, [x22, #2, MUL VL]\n"
"mov z18.s, #0x0\n"
"mov z19.s, #0x0\n"
+ "ld1rqb { z0.b }, p0/Z, [%x[Apanel]]\n"
"mov z20.s, #0x0\n"
"mov z21.s, #0x0\n"
+ "ld1rqb { z1.b }, p0/Z, [%x[Apanel], #16]\n"
"mov z22.s, #0x0\n"
"mov z23.s, #0x0\n"
"mov z24.s, #0x0\n"
@@ -85,29 +83,31 @@ void sve_interleaved_u8u32_dot_8x3VL(
"3:" // main loop head
"udot z8.s, z4.b, z0.b[0]\n"
"udot z11.s, z4.b, z0.b[1]\n"
- "ld1rqb { z2.b }, p0/Z, [%x[Apanel], #32]\n"
+ "ld1b { z5.b }, p0/Z, [x20, #1, MUL VL]\n"
"udot z14.s, z4.b, z0.b[2]\n"
"udot z17.s, z4.b, z0.b[3]\n"
- "ld1rqb { z3.b }, p0/Z, [%x[Apanel], #48]\n"
+ "ld1b { z6.b }, p0/Z, [x20, #2, MUL VL]\n"
"udot z20.s, z4.b, z1.b[0]\n"
"udot z23.s, z4.b, z1.b[1]\n"
- "sub x20, x20, #0x2\n"
+ "ld1rqb { z2.b }, p0/Z, [%x[Apanel], #32]\n"
"udot z26.s, z4.b, z1.b[2]\n"
"udot z29.s, z4.b, z1.b[3]\n"
- "ld1b { z4.b }, p0/Z, [x22, #3, MUL VL]\n"
+ "ld1rqb { z3.b }, p0/Z, [%x[Apanel], #48]\n"
"udot z9.s, z5.b, z0.b[0]\n"
"udot z12.s, z5.b, z0.b[1]\n"
- "cmp x20, #0x2\n"
+ "ld1b { z4.b }, p0/Z, [x20, #3, MUL VL]\n"
"udot z15.s, z5.b, z0.b[2]\n"
"udot z18.s, z5.b, z0.b[3]\n"
- "add %x[Apanel], %x[Apanel], #0x40\n"
+ "sub x19, x19, #0x2\n"
"udot z21.s, z5.b, z1.b[0]\n"
"udot z24.s, z5.b, z1.b[1]\n"
+ "cmp x19, #0x2\n"
"udot z27.s, z5.b, z1.b[2]\n"
"udot z30.s, z5.b, z1.b[3]\n"
- "ld1b { z5.b }, p0/Z, [x22, #4, MUL VL]\n"
+ "ld1b { z5.b }, p0/Z, [x20, #4, MUL VL]\n"
"udot z10.s, z6.b, z0.b[0]\n"
"udot z13.s, z6.b, z0.b[1]\n"
+ "add %x[Apanel], %x[Apanel], #0x40\n"
"udot z16.s, z6.b, z0.b[2]\n"
"udot z19.s, z6.b, z0.b[3]\n"
"ld1rqb { z0.b }, p0/Z, [%x[Apanel]]\n"
@@ -115,27 +115,26 @@ void sve_interleaved_u8u32_dot_8x3VL(
"udot z25.s, z6.b, z1.b[1]\n"
"udot z28.s, z6.b, z1.b[2]\n"
"udot z31.s, z6.b, z1.b[3]\n"
- "ld1b { z6.b }, p0/Z, [x22, #5, MUL VL]\n"
- "addvl x22, x22, #6\n"
+ "ld1b { z6.b }, p0/Z, [x20, #5, MUL VL]\n"
"udot z8.s, z4.b, z2.b[0]\n"
"udot z11.s, z4.b, z2.b[1]\n"
"ld1rqb { z1.b }, p0/Z, [%x[Apanel], #16]\n"
"udot z14.s, z4.b, z2.b[2]\n"
"udot z17.s, z4.b, z2.b[3]\n"
+ "addvl x20, x20, #6\n"
"udot z20.s, z4.b, z3.b[0]\n"
"udot z23.s, z4.b, z3.b[1]\n"
"udot z26.s, z4.b, z3.b[2]\n"
"udot z29.s, z4.b, z3.b[3]\n"
- "ld1b { z4.b }, p0/Z, [x22]\n"
"udot z9.s, z5.b, z2.b[0]\n"
"udot z12.s, z5.b, z2.b[1]\n"
+ "ld1b { z4.b }, p0/Z, [x20]\n"
"udot z15.s, z5.b, z2.b[2]\n"
"udot z18.s, z5.b, z2.b[3]\n"
"udot z21.s, z5.b, z3.b[0]\n"
"udot z24.s, z5.b, z3.b[1]\n"
"udot z27.s, z5.b, z3.b[2]\n"
"udot z30.s, z5.b, z3.b[3]\n"
- "ld1b { z5.b }, p0/Z, [x22, #1, MUL VL]\n"
"udot z10.s, z6.b, z2.b[0]\n"
"udot z13.s, z6.b, z2.b[1]\n"
"udot z16.s, z6.b, z2.b[2]\n"
@@ -144,19 +143,20 @@ void sve_interleaved_u8u32_dot_8x3VL(
"udot z25.s, z6.b, z3.b[1]\n"
"udot z28.s, z6.b, z3.b[2]\n"
"udot z31.s, z6.b, z3.b[3]\n"
- "ld1b { z6.b }, p0/Z, [x22, #2, MUL VL]\n"
"bge 3b\n"
"4:" // main loop skip
"udot z8.s, z4.b, z0.b[0]\n"
"udot z11.s, z4.b, z0.b[1]\n"
- "add %x[Apanel], %x[Apanel], #0x20\n"
+ "ld1b { z5.b }, p0/Z, [x20, #1, MUL VL]\n"
"udot z14.s, z4.b, z0.b[2]\n"
"udot z17.s, z4.b, z0.b[3]\n"
- "addvl x22, x22, #3\n"
+ "ld1b { z6.b }, p0/Z, [x20, #2, MUL VL]\n"
"udot z20.s, z4.b, z1.b[0]\n"
"udot z23.s, z4.b, z1.b[1]\n"
+ "add %x[Apanel], %x[Apanel], #0x20\n"
"udot z26.s, z4.b, z1.b[2]\n"
"udot z29.s, z4.b, z1.b[3]\n"
+ "addvl x20, x20, #3\n"
"udot z9.s, z5.b, z0.b[0]\n"
"udot z12.s, z5.b, z0.b[1]\n"
"udot z15.s, z5.b, z0.b[2]\n"
@@ -173,19 +173,19 @@ void sve_interleaved_u8u32_dot_8x3VL(
"udot z25.s, z6.b, z1.b[1]\n"
"udot z28.s, z6.b, z1.b[2]\n"
"udot z31.s, z6.b, z1.b[3]\n"
- "cbz x20, 5f\n"
+ "cbz x19, 5f\n"
"ld1rqb { z0.b }, p0/Z, [%x[Apanel]]\n"
"ld1rqb { z1.b }, p0/Z, [%x[Apanel], #16]\n"
"add %x[Apanel], %x[Apanel], #0x20\n"
- "ld1b { z7.b }, p0/Z, [x22]\n"
- "ld1b { z4.b }, p0/Z, [x22, #1, MUL VL]\n"
+ "ld1b { z7.b }, p0/Z, [x20]\n"
+ "ld1b { z4.b }, p0/Z, [x20, #1, MUL VL]\n"
+ "ld1b { z5.b }, p0/Z, [x20, #2, MUL VL]\n"
+ "addvl x20, x20, #3\n"
"udot z8.s, z7.b, z0.b[0]\n"
- "ld1b { z5.b }, p0/Z, [x22, #2, MUL VL]\n"
"udot z11.s, z7.b, z0.b[1]\n"
"udot z14.s, z7.b, z0.b[2]\n"
"udot z17.s, z7.b, z0.b[3]\n"
"udot z20.s, z7.b, z1.b[0]\n"
- "addvl x22, x22, #3\n"
"udot z23.s, z7.b, z1.b[1]\n"
"udot z26.s, z7.b, z1.b[2]\n"
"udot z29.s, z7.b, z1.b[3]\n"
@@ -207,7 +207,7 @@ void sve_interleaved_u8u32_dot_8x3VL(
"udot z31.s, z5.b, z1.b[3]\n"
"5:" // multiply loop done
"st1w { z8.s }, p0, [%x[Cpanel]]\n"
- "subs x23, x23, #0x1\n"
+ "subs x22, x22, #0x1\n"
"st1w { z9.s }, p0, [%x[Cpanel], #1, MUL VL]\n"
"st1w { z10.s }, p0, [%x[Cpanel], #2, MUL VL]\n"
"st1w { z11.s }, p0, [%x[Cpanel], #3, MUL VL]\n"
@@ -238,7 +238,7 @@ void sve_interleaved_u8u32_dot_8x3VL(
"bne 1b\n"
: [Apanel] "+&r" (Apanel), [Cpanel] "+&r" (Cpanel), [ablocks] "+&r" (ablocks)
: [args_ptr] "r" (&ka), [offsetof_Bpanel] "I" (offsetof(KernelArgs, Bpanel)), [offsetof_K] "I" (offsetof(KernelArgs, K)), [offsetof_bblocks] "I" (offsetof(KernelArgs, bblocks))
- : "cc", "memory", "p0", "x20", "x21", "x22", "x23", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "x19", "x20", "x21", "x22", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_u8u32_mmla_8x3VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_u8u32_mmla_8x3VL/generic.cpp
index 0b70d034dd..c4fdfa6abc 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_u8u32_mmla_8x3VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_u8u32_mmla_8x3VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -33,28 +33,28 @@ void sve_interleaved_u8u32_mmla_8x3VL(
uint32_t *Cpanel, int ablocks, int bblocks, int K) {
struct KernelArgs {
+ size_t bblocks = {};
size_t K = {};
const uint8_t *Bpanel = {};
- size_t bblocks = {};
} ka;
+ ka.bblocks = bblocks;
ka.K = (K/8) - 1;
ka.Bpanel = Bpanel;
- ka.bblocks = bblocks;
__asm__ __volatile__(
"ptrue p0.b\n"
"1:" // Height loop
- "ldr x23, [%x[args_ptr], %[offsetof_bblocks]]\n"
- "ldr x22, [%x[args_ptr], %[offsetof_Bpanel]]\n"
+ "ldr x22, [%x[args_ptr], %[offsetof_bblocks]]\n"
"mov x21, %x[Apanel]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_Bpanel]]\n"
"2:" // Width loop
- "ldr x20, [%x[args_ptr], %[offsetof_K]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_K]]\n"
"mov %x[Apanel], x21\n"
- "cmp x20, #0x2\n"
+ "cmp x19, #0x2\n"
"mov z8.s, #0x0\n"
"mov z9.s, #0x0\n"
- "ld1b { z4.b }, p0/Z, [x22]\n"
+ "ld1b { z4.b }, p0/Z, [x20]\n"
"mov z10.s, #0x0\n"
"mov z11.s, #0x0\n"
"ld1rqb { z0.b }, p0/Z, [%x[Apanel]]\n"
@@ -63,13 +63,13 @@ void sve_interleaved_u8u32_mmla_8x3VL(
"ld1rqb { z1.b }, p0/Z, [%x[Apanel], #16]\n"
"mov z14.s, #0x0\n"
"mov z15.s, #0x0\n"
- "ld1b { z5.b }, p0/Z, [x22, #1, MUL VL]\n"
+ "ld1b { z5.b }, p0/Z, [x20, #1, MUL VL]\n"
"mov z16.s, #0x0\n"
"mov z17.s, #0x0\n"
"ld1rqb { z2.b }, p0/Z, [%x[Apanel], #32]\n"
"mov z18.s, #0x0\n"
"mov z19.s, #0x0\n"
- "addvl x22, x22, #2\n"
+ "addvl x20, x20, #2\n"
"mov z20.s, #0x0\n"
"mov z21.s, #0x0\n"
"add %x[Apanel], %x[Apanel], #0x30\n"
@@ -87,143 +87,143 @@ void sve_interleaved_u8u32_mmla_8x3VL(
"3:" // main loop head
"ld1rqb { z3.b }, p0/Z, [%x[Apanel]]\n"
".inst 0x45c49808 // ummla z8.s, z0.b, z4.b\n"
- ".inst 0x45c5980b // ummla z11.s, z0.b, z5.b\n"
".inst 0x45c4982e // ummla z14.s, z1.b, z4.b\n"
+ ".inst 0x45c5980b // ummla z11.s, z0.b, z5.b\n"
".inst 0x45c59831 // ummla z17.s, z1.b, z5.b\n"
- "ld1b { z6.b }, p0/Z, [x22]\n"
+ "ld1b { z6.b }, p0/Z, [x20]\n"
".inst 0x45c49854 // ummla z20.s, z2.b, z4.b\n"
".inst 0x45c59857 // ummla z23.s, z2.b, z5.b\n"
- "ld1b { z7.b }, p0/Z, [x22, #1, MUL VL]\n"
+ "ld1b { z7.b }, p0/Z, [x20, #1, MUL VL]\n"
".inst 0x45c4987a // ummla z26.s, z3.b, z4.b\n"
".inst 0x45c5987d // ummla z29.s, z3.b, z5.b\n"
- "ld1b { z4.b }, p0/Z, [x22, #2, MUL VL]\n"
- "ld1b { z5.b }, p0/Z, [x22, #3, MUL VL]\n"
+ "ld1b { z4.b }, p0/Z, [x20, #2, MUL VL]\n"
+ "ld1b { z5.b }, p0/Z, [x20, #3, MUL VL]\n"
".inst 0x45c69809 // ummla z9.s, z0.b, z6.b\n"
- ".inst 0x45c7980c // ummla z12.s, z0.b, z7.b\n"
".inst 0x45c6982f // ummla z15.s, z1.b, z6.b\n"
- ".inst 0x45c79832 // ummla z18.s, z1.b, z7.b\n"
- "sub x20, x20, #0x2\n"
".inst 0x45c69855 // ummla z21.s, z2.b, z6.b\n"
- ".inst 0x45c79858 // ummla z24.s, z2.b, z7.b\n"
- "cmp x20, #0x2\n"
".inst 0x45c6987b // ummla z27.s, z3.b, z6.b\n"
- ".inst 0x45c7987e // ummla z30.s, z3.b, z7.b\n"
- "ld1b { z6.b }, p0/Z, [x22, #4, MUL VL]\n"
+ "ld1b { z6.b }, p0/Z, [x20, #4, MUL VL]\n"
+ ".inst 0x45c7980c // ummla z12.s, z0.b, z7.b\n"
".inst 0x45c4980a // ummla z10.s, z0.b, z4.b\n"
+ "sub x19, x19, #0x2\n"
".inst 0x45c5980d // ummla z13.s, z0.b, z5.b\n"
+ ".inst 0x45c79832 // ummla z18.s, z1.b, z7.b\n"
"ld1rqb { z0.b }, p0/Z, [%x[Apanel], #16]\n"
".inst 0x45c49830 // ummla z16.s, z1.b, z4.b\n"
".inst 0x45c59833 // ummla z19.s, z1.b, z5.b\n"
"ld1rqb { z1.b }, p0/Z, [%x[Apanel], #32]\n"
+ ".inst 0x45c79858 // ummla z24.s, z2.b, z7.b\n"
+ ".inst 0x45c7987e // ummla z30.s, z3.b, z7.b\n"
+ "ld1b { z7.b }, p0/Z, [x20, #5, MUL VL]\n"
".inst 0x45c49856 // ummla z22.s, z2.b, z4.b\n"
".inst 0x45c59859 // ummla z25.s, z2.b, z5.b\n"
- "ld1b { z7.b }, p0/Z, [x22, #5, MUL VL]\n"
+ "ld1rqb { z2.b }, p0/Z, [%x[Apanel], #48]\n"
".inst 0x45c4987c // ummla z28.s, z3.b, z4.b\n"
".inst 0x45c5987f // ummla z31.s, z3.b, z5.b\n"
- "ld1rqb { z2.b }, p0/Z, [%x[Apanel], #48]\n"
"ld1rqb { z3.b }, p0/Z, [%x[Apanel], #64]\n"
- "ld1b { z4.b }, p0/Z, [x22, #6, MUL VL]\n"
+ "ld1b { z4.b }, p0/Z, [x20, #6, MUL VL]\n"
+ "ld1b { z5.b }, p0/Z, [x20, #7, MUL VL]\n"
+ "addvl x20, x20, #16\n"
".inst 0x45c69808 // ummla z8.s, z0.b, z6.b\n"
- "ld1b { z5.b }, p0/Z, [x22, #7, MUL VL]\n"
- "addvl x22, x22, #16\n"
- ".inst 0x45c7980b // ummla z11.s, z0.b, z7.b\n"
".inst 0x45c6982e // ummla z14.s, z1.b, z6.b\n"
+ "cmp x19, #0x2\n"
+ ".inst 0x45c7980b // ummla z11.s, z0.b, z7.b\n"
".inst 0x45c79831 // ummla z17.s, z1.b, z7.b\n"
".inst 0x45c69854 // ummla z20.s, z2.b, z6.b\n"
".inst 0x45c79857 // ummla z23.s, z2.b, z7.b\n"
".inst 0x45c6987a // ummla z26.s, z3.b, z6.b\n"
".inst 0x45c7987d // ummla z29.s, z3.b, z7.b\n"
- "ld1b { z6.b }, p0/Z, [x22, #-8, MUL VL]\n"
- "ld1b { z7.b }, p0/Z, [x22, #-7, MUL VL]\n"
+ "ld1b { z6.b }, p0/Z, [x20, #-8, MUL VL]\n"
+ "ld1b { z7.b }, p0/Z, [x20, #-7, MUL VL]\n"
".inst 0x45c49809 // ummla z9.s, z0.b, z4.b\n"
- ".inst 0x45c5980c // ummla z12.s, z0.b, z5.b\n"
".inst 0x45c4982f // ummla z15.s, z1.b, z4.b\n"
- ".inst 0x45c59832 // ummla z18.s, z1.b, z5.b\n"
".inst 0x45c49855 // ummla z21.s, z2.b, z4.b\n"
- ".inst 0x45c59858 // ummla z24.s, z2.b, z5.b\n"
".inst 0x45c4987b // ummla z27.s, z3.b, z4.b\n"
- ".inst 0x45c5987e // ummla z30.s, z3.b, z5.b\n"
- "ld1b { z4.b }, p0/Z, [x22, #-6, MUL VL]\n"
+ "ld1b { z4.b }, p0/Z, [x20, #-6, MUL VL]\n"
+ ".inst 0x45c5980c // ummla z12.s, z0.b, z5.b\n"
".inst 0x45c6980a // ummla z10.s, z0.b, z6.b\n"
".inst 0x45c7980d // ummla z13.s, z0.b, z7.b\n"
+ ".inst 0x45c59832 // ummla z18.s, z1.b, z5.b\n"
"ld1rqb { z0.b }, p0/Z, [%x[Apanel], #80]\n"
".inst 0x45c69830 // ummla z16.s, z1.b, z6.b\n"
".inst 0x45c79833 // ummla z19.s, z1.b, z7.b\n"
"ld1rqb { z1.b }, p0/Z, [%x[Apanel], #96]\n"
+ ".inst 0x45c59858 // ummla z24.s, z2.b, z5.b\n"
+ ".inst 0x45c5987e // ummla z30.s, z3.b, z5.b\n"
+ "ld1b { z5.b }, p0/Z, [x20, #-5, MUL VL]\n"
".inst 0x45c69856 // ummla z22.s, z2.b, z6.b\n"
".inst 0x45c79859 // ummla z25.s, z2.b, z7.b\n"
- "ld1b { z5.b }, p0/Z, [x22, #-5, MUL VL]\n"
+ "ld1rqb { z2.b }, p0/Z, [%x[Apanel], #112]\n"
".inst 0x45c6987c // ummla z28.s, z3.b, z6.b\n"
".inst 0x45c7987f // ummla z31.s, z3.b, z7.b\n"
- "ld1rqb { z2.b }, p0/Z, [%x[Apanel], #112]\n"
"add %x[Apanel], %x[Apanel], #0x80\n"
- "addvl x22, x22, #-4\n"
+ "addvl x20, x20, #-4\n"
"bge 3b\n"
"4:" // main loop skip
"ld1rqb { z3.b }, p0/Z, [%x[Apanel]]\n"
".inst 0x45c49808 // ummla z8.s, z0.b, z4.b\n"
- ".inst 0x45c5980b // ummla z11.s, z0.b, z5.b\n"
".inst 0x45c4982e // ummla z14.s, z1.b, z4.b\n"
+ ".inst 0x45c5980b // ummla z11.s, z0.b, z5.b\n"
".inst 0x45c59831 // ummla z17.s, z1.b, z5.b\n"
- "ld1b { z6.b }, p0/Z, [x22]\n"
+ "ld1b { z6.b }, p0/Z, [x20]\n"
".inst 0x45c49854 // ummla z20.s, z2.b, z4.b\n"
".inst 0x45c59857 // ummla z23.s, z2.b, z5.b\n"
- "ld1b { z7.b }, p0/Z, [x22, #1, MUL VL]\n"
+ "ld1b { z7.b }, p0/Z, [x20, #1, MUL VL]\n"
".inst 0x45c4987a // ummla z26.s, z3.b, z4.b\n"
".inst 0x45c5987d // ummla z29.s, z3.b, z5.b\n"
- "ld1b { z4.b }, p0/Z, [x22, #2, MUL VL]\n"
- "ld1b { z5.b }, p0/Z, [x22, #3, MUL VL]\n"
+ "ld1b { z4.b }, p0/Z, [x20, #2, MUL VL]\n"
+ "ld1b { z5.b }, p0/Z, [x20, #3, MUL VL]\n"
".inst 0x45c69809 // ummla z9.s, z0.b, z6.b\n"
- ".inst 0x45c7980c // ummla z12.s, z0.b, z7.b\n"
".inst 0x45c6982f // ummla z15.s, z1.b, z6.b\n"
- ".inst 0x45c79832 // ummla z18.s, z1.b, z7.b\n"
- "add %x[Apanel], %x[Apanel], #0x10\n"
".inst 0x45c69855 // ummla z21.s, z2.b, z6.b\n"
- ".inst 0x45c79858 // ummla z24.s, z2.b, z7.b\n"
- "addvl x22, x22, #4\n"
".inst 0x45c6987b // ummla z27.s, z3.b, z6.b\n"
- ".inst 0x45c7987e // ummla z30.s, z3.b, z7.b\n"
+ "add %x[Apanel], %x[Apanel], #0x10\n"
+ ".inst 0x45c7980c // ummla z12.s, z0.b, z7.b\n"
".inst 0x45c4980a // ummla z10.s, z0.b, z4.b\n"
+ "addvl x20, x20, #4\n"
".inst 0x45c5980d // ummla z13.s, z0.b, z5.b\n"
+ ".inst 0x45c79832 // ummla z18.s, z1.b, z7.b\n"
".inst 0x45c49830 // ummla z16.s, z1.b, z4.b\n"
".inst 0x45c59833 // ummla z19.s, z1.b, z5.b\n"
+ ".inst 0x45c79858 // ummla z24.s, z2.b, z7.b\n"
+ ".inst 0x45c7987e // ummla z30.s, z3.b, z7.b\n"
".inst 0x45c49856 // ummla z22.s, z2.b, z4.b\n"
".inst 0x45c59859 // ummla z25.s, z2.b, z5.b\n"
".inst 0x45c4987c // ummla z28.s, z3.b, z4.b\n"
".inst 0x45c5987f // ummla z31.s, z3.b, z5.b\n"
- "cbz x20, 5f\n"
- "ld1b { z6.b }, p0/Z, [x22]\n"
+ "cbz x19, 5f\n"
+ "ld1b { z6.b }, p0/Z, [x20]\n"
"ld1rqb { z0.b }, p0/Z, [%x[Apanel]]\n"
".inst 0x45c69808 // ummla z8.s, z0.b, z6.b\n"
"ld1rqb { z1.b }, p0/Z, [%x[Apanel], #16]\n"
- "ld1b { z7.b }, p0/Z, [x22, #1, MUL VL]\n"
- ".inst 0x45c7980b // ummla z11.s, z0.b, z7.b\n"
+ "ld1b { z7.b }, p0/Z, [x20, #1, MUL VL]\n"
+ ".inst 0x45c6982e // ummla z14.s, z1.b, z6.b\n"
"ld1rqb { z2.b }, p0/Z, [%x[Apanel], #32]\n"
"ld1rqb { z3.b }, p0/Z, [%x[Apanel], #48]\n"
- ".inst 0x45c6982e // ummla z14.s, z1.b, z6.b\n"
+ ".inst 0x45c7980b // ummla z11.s, z0.b, z7.b\n"
".inst 0x45c79831 // ummla z17.s, z1.b, z7.b\n"
".inst 0x45c69854 // ummla z20.s, z2.b, z6.b\n"
- "ld1b { z4.b }, p0/Z, [x22, #2, MUL VL]\n"
+ "ld1b { z4.b }, p0/Z, [x20, #2, MUL VL]\n"
".inst 0x45c79857 // ummla z23.s, z2.b, z7.b\n"
".inst 0x45c6987a // ummla z26.s, z3.b, z6.b\n"
- "ld1b { z5.b }, p0/Z, [x22, #3, MUL VL]\n"
+ "ld1b { z5.b }, p0/Z, [x20, #3, MUL VL]\n"
".inst 0x45c7987d // ummla z29.s, z3.b, z7.b\n"
- "ld1b { z6.b }, p0/Z, [x22, #4, MUL VL]\n"
- "ld1b { z7.b }, p0/Z, [x22, #5, MUL VL]\n"
+ "ld1b { z6.b }, p0/Z, [x20, #4, MUL VL]\n"
+ "ld1b { z7.b }, p0/Z, [x20, #5, MUL VL]\n"
".inst 0x45c49809 // ummla z9.s, z0.b, z4.b\n"
- ".inst 0x45c5980c // ummla z12.s, z0.b, z5.b\n"
- "addvl x22, x22, #6\n"
".inst 0x45c4982f // ummla z15.s, z1.b, z4.b\n"
- ".inst 0x45c59832 // ummla z18.s, z1.b, z5.b\n"
"add %x[Apanel], %x[Apanel], #0x40\n"
".inst 0x45c49855 // ummla z21.s, z2.b, z4.b\n"
- ".inst 0x45c59858 // ummla z24.s, z2.b, z5.b\n"
".inst 0x45c4987b // ummla z27.s, z3.b, z4.b\n"
- ".inst 0x45c5987e // ummla z30.s, z3.b, z5.b\n"
+ "addvl x20, x20, #6\n"
+ ".inst 0x45c5980c // ummla z12.s, z0.b, z5.b\n"
".inst 0x45c6980a // ummla z10.s, z0.b, z6.b\n"
".inst 0x45c7980d // ummla z13.s, z0.b, z7.b\n"
+ ".inst 0x45c59832 // ummla z18.s, z1.b, z5.b\n"
".inst 0x45c69830 // ummla z16.s, z1.b, z6.b\n"
".inst 0x45c79833 // ummla z19.s, z1.b, z7.b\n"
+ ".inst 0x45c59858 // ummla z24.s, z2.b, z5.b\n"
+ ".inst 0x45c5987e // ummla z30.s, z3.b, z5.b\n"
".inst 0x45c69856 // ummla z22.s, z2.b, z6.b\n"
".inst 0x45c79859 // ummla z25.s, z2.b, z7.b\n"
".inst 0x45c6987c // ummla z28.s, z3.b, z6.b\n"
@@ -243,7 +243,7 @@ void sve_interleaved_u8u32_mmla_8x3VL(
"uzp2 z14.d, z14.d, z17.d\n"
"st1w { z9.s }, p0, [%x[Cpanel], #4, MUL VL]\n"
"uzp1 z17.d, z15.d, z18.d\n"
- "subs x23, x23, #0x1\n"
+ "subs x22, x22, #0x1\n"
"st1w { z10.s }, p0, [%x[Cpanel], #5, MUL VL]\n"
"uzp2 z15.d, z15.d, z18.d\n"
"uzp1 z18.d, z16.d, z19.d\n"
@@ -285,7 +285,7 @@ void sve_interleaved_u8u32_mmla_8x3VL(
"bne 1b\n"
: [Apanel] "+&r" (Apanel), [Cpanel] "+&r" (Cpanel), [ablocks] "+&r" (ablocks)
: [args_ptr] "r" (&ka), [offsetof_Bpanel] "I" (offsetof(KernelArgs, Bpanel)), [offsetof_K] "I" (offsetof(KernelArgs, K)), [offsetof_bblocks] "I" (offsetof(KernelArgs, bblocks))
- : "cc", "memory", "p0", "x20", "x21", "x22", "x23", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "x19", "x20", "x21", "x22", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_smallK_hybrid_fp32_mla_8x1VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_smallK_hybrid_fp32_mla_8x1VL.hpp
new file mode 100644
index 0000000000..ab225589e1
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_smallK_hybrid_fp32_mla_8x1VL.hpp
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2019-2020 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+
+#ifdef ARM_COMPUTE_ENABLE_SVE
+
+
+
+namespace arm_gemm
+{
+
+// Actual kernel implementations
+void sve_smallK_hybrid_fp32_mla_8x1VL(const float *, int, const float *, float *, int, int, int, int, const float *, Activation, bool);
+
+class cls_sve_smallK_hybrid_fp32_mla_8x1VL
+{
+public:
+ typedef float operand_type;
+ typedef float result_type;
+
+ typedef void (*kern_type)(const float *, int, const float *, float *, int, int, int, int, const float *, Activation, bool);
+
+ /* Kernel blocking parameters */
+ static constexpr unsigned int out_height()
+ {
+ return 8;
+ }
+
+ static unsigned int out_width()
+ {
+ return get_vector_length<float>() * 1;
+ }
+
+ static constexpr unsigned int k_unroll()
+ {
+ return 1;
+ }
+
+ static constexpr bool supports_accumulate()
+ {
+ return false;
+ }
+
+ static constexpr bool supports_bias()
+ {
+ return true;
+ }
+
+ static constexpr bool supports_activation()
+ {
+ return true;
+ }
+
+ StdTransformsSVE<operand_type, result_type, 8, 1, 1> transforms = {};
+
+ // Default to the generic kernel
+ kern_type kernel=sve_smallK_hybrid_fp32_mla_8x1VL;
+
+ cls_sve_smallK_hybrid_fp32_mla_8x1VL(const CPUInfo *)
+ {
+
+ }
+};
+
+} // namespace arm_gemm
+
+#endif // ARM_COMPUTE_ENABLE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_smallK_hybrid_fp32_mla_8x1VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_smallK_hybrid_fp32_mla_8x1VL/generic.cpp
new file mode 100644
index 0000000000..c021539099
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_smallK_hybrid_fp32_mla_8x1VL/generic.cpp
@@ -0,0 +1,18807 @@
+/*
+ * Copyright (c) 2019-2020 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifdef ARM_COMPUTE_ENABLE_SVE
+
+#include <algorithm>
+
+#include "arm_gemm.hpp"
+
+
+#include "../../asmlib.hpp"
+#include "../../utils.hpp"
+
+namespace arm_gemm {
+
+void sve_smallK_hybrid_fp32_mla_8x1VL(const float *A, int lda, const float *B, float *C, int ldc, int M, int N, int K, const float *bias, Activation act, bool) {
+ const long loops_count = iceildiv(N, (int)get_vector_length<float>()) - 1;
+ const long ldab = lda * sizeof(float);
+ const long ldcb = ldc * sizeof(float);
+ const long odd_depth = (K % 4) ? (K % 4) : 4;
+ const long last_width = N - (loops_count * get_vector_length<float>());
+ float nullbias[64];
+ if (!bias) {
+ memset(nullbias, 0, (1 * get_vector_length<float>() * sizeof(float)));
+ }
+ float minval = - static_cast<float>(std::numeric_limits<float>::infinity());
+ float maxval = static_cast<float>(std::numeric_limits<float>::infinity());
+ const float * const minptr = &minval;
+ const float * const maxptr = &maxval;
+
+ switch(act.type)
+ {
+ default:
+ case Activation::Type::None:
+ break;
+ case Activation::Type::BoundedReLU:
+ maxval = static_cast<float>(act.param1);
+ /* fall through */
+ case Activation::Type::ReLU:
+ minval = 0.0f;
+ break;
+ }
+
+ for (int y0=0; y0<M; y0+=8) {
+ long loops = loops_count;
+ long oob_rows = std::max(8 - (M-y0), 0);
+ long temp = 0;
+ const float *b_ptr0 = B;
+ const float *biasptr = bias ? bias : nullbias;
+ const uint64_t biasinc = bias ? get_vector_length<float>() * 1*sizeof(float) : 0;
+ const float *a_ptr0 = A + (y0 * lda);
+
+ float *c_ptr0 = C + (y0 * ldc);
+
+ switch(K) {
+ case 1:
+ __asm __volatile (
+ "a_ptr1 .req X0\n"
+ "a_ptr2 .req X1\n"
+ "a_ptr3 .req X2\n"
+ "a_ptr4 .req X3\n"
+ "a_ptr5 .req X4\n"
+ "a_ptr6 .req X5\n"
+ "a_ptr7 .req X6\n"
+ "c_ptr1 .req X7\n"
+ "c_ptr2 .req X8\n"
+ "c_ptr3 .req X9\n"
+ "c_ptr4 .req X10\n"
+ "c_ptr5 .req X11\n"
+ "c_ptr6 .req X12\n"
+ "c_ptr7 .req X13\n"
+ "add a_ptr1, %[a_ptr0], %[lda]\n"
+ "add c_ptr1, %[c_ptr0], %[ldc]\n"
+ "add a_ptr2, a_ptr1, %[lda]\n"
+ "add c_ptr2, c_ptr1, %[ldc]\n"
+ "add a_ptr3, a_ptr2, %[lda]\n"
+ "add c_ptr3, c_ptr2, %[ldc]\n"
+ "add a_ptr4, a_ptr3, %[lda]\n"
+ "add c_ptr4, c_ptr3, %[ldc]\n"
+ "add a_ptr5, a_ptr4, %[lda]\n"
+ "add c_ptr5, c_ptr4, %[ldc]\n"
+ "add a_ptr6, a_ptr5, %[lda]\n"
+ "add c_ptr6, c_ptr5, %[ldc]\n"
+ "add a_ptr7, a_ptr6, %[lda]\n"
+ "add c_ptr7, c_ptr6, %[ldc]\n"
+ "cbz %[oob_rows], 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr7, %[c_ptr0], #0x0\n"
+ "add a_ptr7, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr6, %[c_ptr0], #0x0\n"
+ "add a_ptr6, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr5, %[c_ptr0], #0x0\n"
+ "add a_ptr5, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr4, %[c_ptr0], #0x0\n"
+ "add a_ptr4, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr3, %[c_ptr0], #0x0\n"
+ "add a_ptr3, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr2, %[c_ptr0], #0x0\n"
+ "add a_ptr2, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr1, %[c_ptr0], #0x0\n"
+ "add a_ptr1, %[a_ptr0], #0x0\n"
+ "1:\n"
+ "ptrue p7.s\n"
+ "whilelt p6.s, %[temp], %[odd_depth]\n"
+ "whilelt p0.s, %[temp], %[last_width]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #1\n"
+ "ld1rqw z0.s, p6/z, [%[a_ptr0]]\n"
+ "ld1rqw z1.s, p6/z, [a_ptr1]\n"
+ "ld1rqw z2.s, p6/z, [a_ptr2]\n"
+ "ld1rqw z3.s, p6/z, [a_ptr3]\n"
+ "ld1rqw z4.s, p6/z, [a_ptr4]\n"
+ "ld1rqw z5.s, p6/z, [a_ptr5]\n"
+ "ld1rqw z6.s, p6/z, [a_ptr6]\n"
+ "ld1rqw z7.s, p6/z, [a_ptr7]\n"
+ "cbz %[loops], 2f\n"
+ "ld1w z24.s, p7/z, [%[biasptr]]\n"
+ "add %[biasptr], %[biasptr], %[biasinc]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "mov z25.d, z24.d\n"
+ "mov z26.d, z24.d\n"
+ "mov z27.d, z24.d\n"
+ "mov z28.d, z24.d\n"
+ "mov z29.d, z24.d\n"
+ "mov z30.d, z24.d\n"
+ "mov z31.d, z24.d\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "b.eq 3f\n"
+ "4:\n"
+ "ld1rw z22.s, p7/z, [%[minptr]]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "ld1rw z23.s, p7/z, [%[maxptr]]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #1\n"
+ "fmax z24.s, p7/m, z24.s, z22.s\n"
+ "fmax z25.s, p7/m, z25.s, z22.s\n"
+ "fmax z26.s, p7/m, z26.s, z22.s\n"
+ "fmax z27.s, p7/m, z27.s, z22.s\n"
+ "fmin z24.s, p7/m, z24.s, z23.s\n"
+ "fmin z25.s, p7/m, z25.s, z23.s\n"
+ "fmin z26.s, p7/m, z26.s, z23.s\n"
+ "fmin z27.s, p7/m, z27.s, z23.s\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "fmax z28.s, p7/m, z28.s, z22.s\n"
+ "ld1w z24.s, p7/z, [%[biasptr]]\n"
+ "fmax z29.s, p7/m, z29.s, z22.s\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "fmax z30.s, p7/m, z30.s, z22.s\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "fmin z28.s, p7/m, z28.s, z23.s\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "fmin z29.s, p7/m, z29.s, z23.s\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "fmin z30.s, p7/m, z30.s, z23.s\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "fmax z31.s, p7/m, z31.s, z22.s\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "mov z25.d, z24.d\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "mov z26.d, z24.d\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "fmin z31.s, p7/m, z31.s, z23.s\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "mov z27.d, z24.d\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "mov z28.d, z24.d\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "mov z29.d, z24.d\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.d, z24.d\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.d, z24.d\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "add %[biasptr], %[biasptr], %[biasinc]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "b.ne 4b\n"
+ "3:\n"
+ "ld1rw z22.s, p7/z, [%[minptr]]\n"
+ "ld1rw z23.s, p7/z, [%[maxptr]]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #1\n"
+ "fmax z24.s, p7/m, z24.s, z22.s\n"
+ "fmax z25.s, p7/m, z25.s, z22.s\n"
+ "fmax z26.s, p7/m, z26.s, z22.s\n"
+ "fmax z27.s, p7/m, z27.s, z22.s\n"
+ "fmin z24.s, p7/m, z24.s, z23.s\n"
+ "fmin z25.s, p7/m, z25.s, z23.s\n"
+ "fmin z26.s, p7/m, z26.s, z23.s\n"
+ "fmin z27.s, p7/m, z27.s, z23.s\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "fmax z28.s, p7/m, z28.s, z22.s\n"
+ "ld1w z24.s, p0/z, [%[biasptr]]\n"
+ "fmax z29.s, p7/m, z29.s, z22.s\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "fmax z30.s, p7/m, z30.s, z22.s\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "fmin z28.s, p7/m, z28.s, z23.s\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "fmin z29.s, p7/m, z29.s, z23.s\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "fmin z30.s, p7/m, z30.s, z23.s\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "fmax z31.s, p7/m, z31.s, z22.s\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "mov z25.d, z24.d\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "mov z26.d, z24.d\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "fmin z31.s, p7/m, z31.s, z23.s\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "mov z27.d, z24.d\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "mov z28.d, z24.d\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "mov z29.d, z24.d\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.d, z24.d\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.d, z24.d\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "add %[biasptr], %[biasptr], %[biasinc]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "b 5f\n"
+ "2:\n"
+ "ld1w z24.s, p0/z, [%[biasptr]]\n"
+ "add %[biasptr], %[biasptr], %[biasinc]\n"
+ "mov z25.d, z24.d\n"
+ "mov z26.d, z24.d\n"
+ "mov z27.d, z24.d\n"
+ "mov z28.d, z24.d\n"
+ "mov z29.d, z24.d\n"
+ "mov z30.d, z24.d\n"
+ "mov z31.d, z24.d\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "5:\n"
+ "ld1rw z22.s, p7/z, [%[minptr]]\n"
+ "ld1rw z23.s, p7/z, [%[maxptr]]\n"
+ "fmax z24.s, p7/m, z24.s, z22.s\n"
+ "fmax z25.s, p7/m, z25.s, z22.s\n"
+ "fmax z26.s, p7/m, z26.s, z22.s\n"
+ "fmax z27.s, p7/m, z27.s, z22.s\n"
+ "fmin z24.s, p7/m, z24.s, z23.s\n"
+ "fmin z25.s, p7/m, z25.s, z23.s\n"
+ "fmin z26.s, p7/m, z26.s, z23.s\n"
+ "fmin z27.s, p7/m, z27.s, z23.s\n"
+ "st1w z24.s, p0, [%[c_ptr0]]\n"
+ "fmax z28.s, p7/m, z28.s, z22.s\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "fmax z29.s, p7/m, z29.s, z22.s\n"
+ "st1w z25.s, p0, [c_ptr1]\n"
+ "fmax z30.s, p7/m, z30.s, z22.s\n"
+ "fmin z28.s, p7/m, z28.s, z23.s\n"
+ "fmax z31.s, p7/m, z31.s, z22.s\n"
+ "st1w z26.s, p0, [c_ptr2]\n"
+ "fmin z29.s, p7/m, z29.s, z23.s\n"
+ "fmin z30.s, p7/m, z30.s, z23.s\n"
+ "fmin z31.s, p7/m, z31.s, z23.s\n"
+ "st1w z27.s, p0, [c_ptr3]\n"
+ "st1w z28.s, p0, [c_ptr4]\n"
+ "st1w z29.s, p0, [c_ptr5]\n"
+ "st1w z30.s, p0, [c_ptr6]\n"
+ "st1w z31.s, p0, [c_ptr7]\n"
+ ".unreq a_ptr1\n"
+ ".unreq a_ptr2\n"
+ ".unreq a_ptr3\n"
+ ".unreq a_ptr4\n"
+ ".unreq a_ptr5\n"
+ ".unreq a_ptr6\n"
+ ".unreq a_ptr7\n"
+ ".unreq c_ptr1\n"
+ ".unreq c_ptr2\n"
+ ".unreq c_ptr3\n"
+ ".unreq c_ptr4\n"
+ ".unreq c_ptr5\n"
+ ".unreq c_ptr6\n"
+ ".unreq c_ptr7\n"
+ : [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [oob_rows] "+r" (oob_rows), [temp] "+r" (temp), [biasptr] "+r" (biasptr)
+ : [lda] "r" (ldab), [ldc] "r" (ldcb), [odd_depth] "r" (odd_depth), [last_width] "r" (last_width), [biasinc] "r" (biasinc), [minptr] "r" (minptr), [maxptr] "r" (maxptr)
+ : "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "cc", "memory"
+ );
+ break;
+ case 2:
+ __asm __volatile (
+ "a_ptr1 .req X0\n"
+ "a_ptr2 .req X1\n"
+ "a_ptr3 .req X2\n"
+ "a_ptr4 .req X3\n"
+ "a_ptr5 .req X4\n"
+ "a_ptr6 .req X5\n"
+ "a_ptr7 .req X6\n"
+ "c_ptr1 .req X7\n"
+ "c_ptr2 .req X8\n"
+ "c_ptr3 .req X9\n"
+ "c_ptr4 .req X10\n"
+ "c_ptr5 .req X11\n"
+ "c_ptr6 .req X12\n"
+ "c_ptr7 .req X13\n"
+ "add a_ptr1, %[a_ptr0], %[lda]\n"
+ "add c_ptr1, %[c_ptr0], %[ldc]\n"
+ "add a_ptr2, a_ptr1, %[lda]\n"
+ "add c_ptr2, c_ptr1, %[ldc]\n"
+ "add a_ptr3, a_ptr2, %[lda]\n"
+ "add c_ptr3, c_ptr2, %[ldc]\n"
+ "add a_ptr4, a_ptr3, %[lda]\n"
+ "add c_ptr4, c_ptr3, %[ldc]\n"
+ "add a_ptr5, a_ptr4, %[lda]\n"
+ "add c_ptr5, c_ptr4, %[ldc]\n"
+ "add a_ptr6, a_ptr5, %[lda]\n"
+ "add c_ptr6, c_ptr5, %[ldc]\n"
+ "add a_ptr7, a_ptr6, %[lda]\n"
+ "add c_ptr7, c_ptr6, %[ldc]\n"
+ "cbz %[oob_rows], 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr7, %[c_ptr0], #0x0\n"
+ "add a_ptr7, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr6, %[c_ptr0], #0x0\n"
+ "add a_ptr6, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr5, %[c_ptr0], #0x0\n"
+ "add a_ptr5, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr4, %[c_ptr0], #0x0\n"
+ "add a_ptr4, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr3, %[c_ptr0], #0x0\n"
+ "add a_ptr3, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr2, %[c_ptr0], #0x0\n"
+ "add a_ptr2, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr1, %[c_ptr0], #0x0\n"
+ "add a_ptr1, %[a_ptr0], #0x0\n"
+ "1:\n"
+ "ptrue p7.s\n"
+ "whilelt p6.s, %[temp], %[odd_depth]\n"
+ "whilelt p0.s, %[temp], %[last_width]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "ld1rqw z0.s, p6/z, [%[a_ptr0]]\n"
+ "ld1rqw z1.s, p6/z, [a_ptr1]\n"
+ "ld1rqw z2.s, p6/z, [a_ptr2]\n"
+ "ld1rqw z3.s, p6/z, [a_ptr3]\n"
+ "ld1rqw z4.s, p6/z, [a_ptr4]\n"
+ "ld1rqw z5.s, p6/z, [a_ptr5]\n"
+ "ld1rqw z6.s, p6/z, [a_ptr6]\n"
+ "ld1rqw z7.s, p6/z, [a_ptr7]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #2\n"
+ "cbz %[loops], 2f\n"
+ "ld1w z24.s, p7/z, [%[biasptr]]\n"
+ "add %[biasptr], %[biasptr], %[biasinc]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "mov z25.d, z24.d\n"
+ "mov z26.d, z24.d\n"
+ "mov z27.d, z24.d\n"
+ "mov z28.d, z24.d\n"
+ "mov z29.d, z24.d\n"
+ "mov z30.d, z24.d\n"
+ "mov z31.d, z24.d\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "b.eq 3f\n"
+ "4:\n"
+ "ld1rw z22.s, p7/z, [%[minptr]]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "ld1rw z23.s, p7/z, [%[maxptr]]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmax z24.s, p7/m, z24.s, z22.s\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmax z25.s, p7/m, z25.s, z22.s\n"
+ "addvl %[b_ptr0], %[b_ptr0], #2\n"
+ "fmax z26.s, p7/m, z26.s, z22.s\n"
+ "fmin z24.s, p7/m, z24.s, z23.s\n"
+ "fmin z25.s, p7/m, z25.s, z23.s\n"
+ "fmax z27.s, p7/m, z27.s, z22.s\n"
+ "fmin z26.s, p7/m, z26.s, z23.s\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "fmax z28.s, p7/m, z28.s, z22.s\n"
+ "ld1w z24.s, p7/z, [%[biasptr]]\n"
+ "fmax z29.s, p7/m, z29.s, z22.s\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "fmin z27.s, p7/m, z27.s, z23.s\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "fmin z28.s, p7/m, z28.s, z23.s\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "fmin z29.s, p7/m, z29.s, z23.s\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "fmax z30.s, p7/m, z30.s, z22.s\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "fmax z31.s, p7/m, z31.s, z22.s\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "mov z25.d, z24.d\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "fmin z30.s, p7/m, z30.s, z23.s\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "fmin z31.s, p7/m, z31.s, z23.s\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "mov z26.d, z24.d\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "mov z27.d, z24.d\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "mov z28.d, z24.d\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z29.d, z24.d\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "mov z30.d, z24.d\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.d, z24.d\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "add %[biasptr], %[biasptr], %[biasinc]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "b.ne 4b\n"
+ "3:\n"
+ "ld1rw z22.s, p7/z, [%[minptr]]\n"
+ "ld1rw z23.s, p7/z, [%[maxptr]]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #2\n"
+ "fmax z24.s, p7/m, z24.s, z22.s\n"
+ "fmax z25.s, p7/m, z25.s, z22.s\n"
+ "fmax z26.s, p7/m, z26.s, z22.s\n"
+ "fmax z27.s, p7/m, z27.s, z22.s\n"
+ "fmin z24.s, p7/m, z24.s, z23.s\n"
+ "fmin z25.s, p7/m, z25.s, z23.s\n"
+ "fmin z26.s, p7/m, z26.s, z23.s\n"
+ "fmin z27.s, p7/m, z27.s, z23.s\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "fmax z28.s, p7/m, z28.s, z22.s\n"
+ "ld1w z24.s, p0/z, [%[biasptr]]\n"
+ "fmax z29.s, p7/m, z29.s, z22.s\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "fmax z30.s, p7/m, z30.s, z22.s\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "fmin z28.s, p7/m, z28.s, z23.s\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "fmin z29.s, p7/m, z29.s, z23.s\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "fmin z30.s, p7/m, z30.s, z23.s\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "fmax z31.s, p7/m, z31.s, z22.s\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "mov z25.d, z24.d\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "mov z26.d, z24.d\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "fmin z31.s, p7/m, z31.s, z23.s\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "mov z27.d, z24.d\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "mov z28.d, z24.d\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "mov z29.d, z24.d\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.d, z24.d\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.d, z24.d\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "add %[biasptr], %[biasptr], %[biasinc]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "b 5f\n"
+ "2:\n"
+ "ld1w z24.s, p0/z, [%[biasptr]]\n"
+ "add %[biasptr], %[biasptr], %[biasinc]\n"
+ "mov z25.d, z24.d\n"
+ "mov z26.d, z24.d\n"
+ "mov z27.d, z24.d\n"
+ "mov z28.d, z24.d\n"
+ "mov z29.d, z24.d\n"
+ "mov z30.d, z24.d\n"
+ "mov z31.d, z24.d\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "5:\n"
+ "ld1rw z22.s, p7/z, [%[minptr]]\n"
+ "ld1rw z23.s, p7/z, [%[maxptr]]\n"
+ "fmax z24.s, p7/m, z24.s, z22.s\n"
+ "fmax z25.s, p7/m, z25.s, z22.s\n"
+ "fmax z26.s, p7/m, z26.s, z22.s\n"
+ "fmax z27.s, p7/m, z27.s, z22.s\n"
+ "fmin z24.s, p7/m, z24.s, z23.s\n"
+ "fmin z25.s, p7/m, z25.s, z23.s\n"
+ "fmin z26.s, p7/m, z26.s, z23.s\n"
+ "fmin z27.s, p7/m, z27.s, z23.s\n"
+ "st1w z24.s, p0, [%[c_ptr0]]\n"
+ "fmax z28.s, p7/m, z28.s, z22.s\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "fmax z29.s, p7/m, z29.s, z22.s\n"
+ "st1w z25.s, p0, [c_ptr1]\n"
+ "fmax z30.s, p7/m, z30.s, z22.s\n"
+ "fmin z28.s, p7/m, z28.s, z23.s\n"
+ "fmax z31.s, p7/m, z31.s, z22.s\n"
+ "st1w z26.s, p0, [c_ptr2]\n"
+ "fmin z29.s, p7/m, z29.s, z23.s\n"
+ "fmin z30.s, p7/m, z30.s, z23.s\n"
+ "fmin z31.s, p7/m, z31.s, z23.s\n"
+ "st1w z27.s, p0, [c_ptr3]\n"
+ "st1w z28.s, p0, [c_ptr4]\n"
+ "st1w z29.s, p0, [c_ptr5]\n"
+ "st1w z30.s, p0, [c_ptr6]\n"
+ "st1w z31.s, p0, [c_ptr7]\n"
+ ".unreq a_ptr1\n"
+ ".unreq a_ptr2\n"
+ ".unreq a_ptr3\n"
+ ".unreq a_ptr4\n"
+ ".unreq a_ptr5\n"
+ ".unreq a_ptr6\n"
+ ".unreq a_ptr7\n"
+ ".unreq c_ptr1\n"
+ ".unreq c_ptr2\n"
+ ".unreq c_ptr3\n"
+ ".unreq c_ptr4\n"
+ ".unreq c_ptr5\n"
+ ".unreq c_ptr6\n"
+ ".unreq c_ptr7\n"
+ : [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [oob_rows] "+r" (oob_rows), [temp] "+r" (temp), [biasptr] "+r" (biasptr)
+ : [lda] "r" (ldab), [ldc] "r" (ldcb), [odd_depth] "r" (odd_depth), [last_width] "r" (last_width), [biasinc] "r" (biasinc), [minptr] "r" (minptr), [maxptr] "r" (maxptr)
+ : "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "cc", "memory"
+ );
+ break;
+ case 3:
+ __asm __volatile (
+ "a_ptr1 .req X0\n"
+ "a_ptr2 .req X1\n"
+ "a_ptr3 .req X2\n"
+ "a_ptr4 .req X3\n"
+ "a_ptr5 .req X4\n"
+ "a_ptr6 .req X5\n"
+ "a_ptr7 .req X6\n"
+ "c_ptr1 .req X7\n"
+ "c_ptr2 .req X8\n"
+ "c_ptr3 .req X9\n"
+ "c_ptr4 .req X10\n"
+ "c_ptr5 .req X11\n"
+ "c_ptr6 .req X12\n"
+ "c_ptr7 .req X13\n"
+ "add a_ptr1, %[a_ptr0], %[lda]\n"
+ "add c_ptr1, %[c_ptr0], %[ldc]\n"
+ "add a_ptr2, a_ptr1, %[lda]\n"
+ "add c_ptr2, c_ptr1, %[ldc]\n"
+ "add a_ptr3, a_ptr2, %[lda]\n"
+ "add c_ptr3, c_ptr2, %[ldc]\n"
+ "add a_ptr4, a_ptr3, %[lda]\n"
+ "add c_ptr4, c_ptr3, %[ldc]\n"
+ "add a_ptr5, a_ptr4, %[lda]\n"
+ "add c_ptr5, c_ptr4, %[ldc]\n"
+ "add a_ptr6, a_ptr5, %[lda]\n"
+ "add c_ptr6, c_ptr5, %[ldc]\n"
+ "add a_ptr7, a_ptr6, %[lda]\n"
+ "add c_ptr7, c_ptr6, %[ldc]\n"
+ "cbz %[oob_rows], 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr7, %[c_ptr0], #0x0\n"
+ "add a_ptr7, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr6, %[c_ptr0], #0x0\n"
+ "add a_ptr6, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr5, %[c_ptr0], #0x0\n"
+ "add a_ptr5, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr4, %[c_ptr0], #0x0\n"
+ "add a_ptr4, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr3, %[c_ptr0], #0x0\n"
+ "add a_ptr3, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr2, %[c_ptr0], #0x0\n"
+ "add a_ptr2, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr1, %[c_ptr0], #0x0\n"
+ "add a_ptr1, %[a_ptr0], #0x0\n"
+ "1:\n"
+ "ptrue p7.s\n"
+ "whilelt p6.s, %[temp], %[odd_depth]\n"
+ "whilelt p0.s, %[temp], %[last_width]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "ld1rqw z0.s, p6/z, [%[a_ptr0]]\n"
+ "ld1rqw z1.s, p6/z, [a_ptr1]\n"
+ "ld1rqw z2.s, p6/z, [a_ptr2]\n"
+ "ld1rqw z3.s, p6/z, [a_ptr3]\n"
+ "ld1rqw z4.s, p6/z, [a_ptr4]\n"
+ "ld1rqw z5.s, p6/z, [a_ptr5]\n"
+ "ld1rqw z6.s, p6/z, [a_ptr6]\n"
+ "ld1rqw z7.s, p6/z, [a_ptr7]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #3\n"
+ "cbz %[loops], 2f\n"
+ "ld1w z24.s, p7/z, [%[biasptr]]\n"
+ "add %[biasptr], %[biasptr], %[biasinc]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "mov z25.d, z24.d\n"
+ "mov z26.d, z24.d\n"
+ "mov z27.d, z24.d\n"
+ "mov z28.d, z24.d\n"
+ "mov z29.d, z24.d\n"
+ "mov z30.d, z24.d\n"
+ "mov z31.d, z24.d\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "b.eq 3f\n"
+ "4:\n"
+ "ld1rw z22.s, p7/z, [%[minptr]]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "ld1rw z23.s, p7/z, [%[maxptr]]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmax z24.s, p7/m, z24.s, z22.s\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmax z25.s, p7/m, z25.s, z22.s\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmax z26.s, p7/m, z26.s, z22.s\n"
+ "addvl %[b_ptr0], %[b_ptr0], #3\n"
+ "fmin z24.s, p7/m, z24.s, z23.s\n"
+ "fmin z25.s, p7/m, z25.s, z23.s\n"
+ "fmin z26.s, p7/m, z26.s, z23.s\n"
+ "fmax z27.s, p7/m, z27.s, z22.s\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "fmax z28.s, p7/m, z28.s, z22.s\n"
+ "ld1w z24.s, p7/z, [%[biasptr]]\n"
+ "fmax z29.s, p7/m, z29.s, z22.s\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "fmin z27.s, p7/m, z27.s, z23.s\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "fmin z28.s, p7/m, z28.s, z23.s\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "fmin z29.s, p7/m, z29.s, z23.s\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "fmax z30.s, p7/m, z30.s, z22.s\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "fmax z31.s, p7/m, z31.s, z22.s\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "mov z25.d, z24.d\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "fmin z30.s, p7/m, z30.s, z23.s\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "fmin z31.s, p7/m, z31.s, z23.s\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "mov z26.d, z24.d\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "mov z27.d, z24.d\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "mov z28.d, z24.d\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z29.d, z24.d\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "mov z30.d, z24.d\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.d, z24.d\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "add %[biasptr], %[biasptr], %[biasinc]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "b.ne 4b\n"
+ "3:\n"
+ "ld1rw z22.s, p7/z, [%[minptr]]\n"
+ "ld1rw z23.s, p7/z, [%[maxptr]]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmax z24.s, p7/m, z24.s, z22.s\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmax z25.s, p7/m, z25.s, z22.s\n"
+ "addvl %[b_ptr0], %[b_ptr0], #3\n"
+ "fmax z26.s, p7/m, z26.s, z22.s\n"
+ "fmin z24.s, p7/m, z24.s, z23.s\n"
+ "fmin z25.s, p7/m, z25.s, z23.s\n"
+ "fmax z27.s, p7/m, z27.s, z22.s\n"
+ "fmin z26.s, p7/m, z26.s, z23.s\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "fmax z28.s, p7/m, z28.s, z22.s\n"
+ "ld1w z24.s, p0/z, [%[biasptr]]\n"
+ "fmax z29.s, p7/m, z29.s, z22.s\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "fmin z27.s, p7/m, z27.s, z23.s\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "fmin z28.s, p7/m, z28.s, z23.s\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "fmin z29.s, p7/m, z29.s, z23.s\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "fmax z30.s, p7/m, z30.s, z22.s\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "fmax z31.s, p7/m, z31.s, z22.s\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "mov z25.d, z24.d\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "fmin z30.s, p7/m, z30.s, z23.s\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "fmin z31.s, p7/m, z31.s, z23.s\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "mov z26.d, z24.d\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "mov z27.d, z24.d\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "mov z28.d, z24.d\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z29.d, z24.d\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "mov z30.d, z24.d\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.d, z24.d\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "add %[biasptr], %[biasptr], %[biasinc]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "b 5f\n"
+ "2:\n"
+ "ld1w z24.s, p0/z, [%[biasptr]]\n"
+ "add %[biasptr], %[biasptr], %[biasinc]\n"
+ "mov z25.d, z24.d\n"
+ "mov z26.d, z24.d\n"
+ "mov z27.d, z24.d\n"
+ "mov z28.d, z24.d\n"
+ "mov z29.d, z24.d\n"
+ "mov z30.d, z24.d\n"
+ "mov z31.d, z24.d\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "5:\n"
+ "ld1rw z22.s, p7/z, [%[minptr]]\n"
+ "ld1rw z23.s, p7/z, [%[maxptr]]\n"
+ "fmax z24.s, p7/m, z24.s, z22.s\n"
+ "fmax z25.s, p7/m, z25.s, z22.s\n"
+ "fmax z26.s, p7/m, z26.s, z22.s\n"
+ "fmax z27.s, p7/m, z27.s, z22.s\n"
+ "fmin z24.s, p7/m, z24.s, z23.s\n"
+ "fmin z25.s, p7/m, z25.s, z23.s\n"
+ "fmin z26.s, p7/m, z26.s, z23.s\n"
+ "fmin z27.s, p7/m, z27.s, z23.s\n"
+ "st1w z24.s, p0, [%[c_ptr0]]\n"
+ "fmax z28.s, p7/m, z28.s, z22.s\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "fmax z29.s, p7/m, z29.s, z22.s\n"
+ "st1w z25.s, p0, [c_ptr1]\n"
+ "fmax z30.s, p7/m, z30.s, z22.s\n"
+ "fmin z28.s, p7/m, z28.s, z23.s\n"
+ "fmax z31.s, p7/m, z31.s, z22.s\n"
+ "st1w z26.s, p0, [c_ptr2]\n"
+ "fmin z29.s, p7/m, z29.s, z23.s\n"
+ "fmin z30.s, p7/m, z30.s, z23.s\n"
+ "fmin z31.s, p7/m, z31.s, z23.s\n"
+ "st1w z27.s, p0, [c_ptr3]\n"
+ "st1w z28.s, p0, [c_ptr4]\n"
+ "st1w z29.s, p0, [c_ptr5]\n"
+ "st1w z30.s, p0, [c_ptr6]\n"
+ "st1w z31.s, p0, [c_ptr7]\n"
+ ".unreq a_ptr1\n"
+ ".unreq a_ptr2\n"
+ ".unreq a_ptr3\n"
+ ".unreq a_ptr4\n"
+ ".unreq a_ptr5\n"
+ ".unreq a_ptr6\n"
+ ".unreq a_ptr7\n"
+ ".unreq c_ptr1\n"
+ ".unreq c_ptr2\n"
+ ".unreq c_ptr3\n"
+ ".unreq c_ptr4\n"
+ ".unreq c_ptr5\n"
+ ".unreq c_ptr6\n"
+ ".unreq c_ptr7\n"
+ : [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [oob_rows] "+r" (oob_rows), [temp] "+r" (temp), [biasptr] "+r" (biasptr)
+ : [lda] "r" (ldab), [ldc] "r" (ldcb), [odd_depth] "r" (odd_depth), [last_width] "r" (last_width), [biasinc] "r" (biasinc), [minptr] "r" (minptr), [maxptr] "r" (maxptr)
+ : "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "cc", "memory"
+ );
+ break;
+ case 4:
+ __asm __volatile (
+ "a_ptr1 .req X0\n"
+ "a_ptr2 .req X1\n"
+ "a_ptr3 .req X2\n"
+ "a_ptr4 .req X3\n"
+ "a_ptr5 .req X4\n"
+ "a_ptr6 .req X5\n"
+ "a_ptr7 .req X6\n"
+ "c_ptr1 .req X7\n"
+ "c_ptr2 .req X8\n"
+ "c_ptr3 .req X9\n"
+ "c_ptr4 .req X10\n"
+ "c_ptr5 .req X11\n"
+ "c_ptr6 .req X12\n"
+ "c_ptr7 .req X13\n"
+ "add a_ptr1, %[a_ptr0], %[lda]\n"
+ "add c_ptr1, %[c_ptr0], %[ldc]\n"
+ "add a_ptr2, a_ptr1, %[lda]\n"
+ "add c_ptr2, c_ptr1, %[ldc]\n"
+ "add a_ptr3, a_ptr2, %[lda]\n"
+ "add c_ptr3, c_ptr2, %[ldc]\n"
+ "add a_ptr4, a_ptr3, %[lda]\n"
+ "add c_ptr4, c_ptr3, %[ldc]\n"
+ "add a_ptr5, a_ptr4, %[lda]\n"
+ "add c_ptr5, c_ptr4, %[ldc]\n"
+ "add a_ptr6, a_ptr5, %[lda]\n"
+ "add c_ptr6, c_ptr5, %[ldc]\n"
+ "add a_ptr7, a_ptr6, %[lda]\n"
+ "add c_ptr7, c_ptr6, %[ldc]\n"
+ "cbz %[oob_rows], 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr7, %[c_ptr0], #0x0\n"
+ "add a_ptr7, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr6, %[c_ptr0], #0x0\n"
+ "add a_ptr6, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr5, %[c_ptr0], #0x0\n"
+ "add a_ptr5, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr4, %[c_ptr0], #0x0\n"
+ "add a_ptr4, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr3, %[c_ptr0], #0x0\n"
+ "add a_ptr3, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr2, %[c_ptr0], #0x0\n"
+ "add a_ptr2, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr1, %[c_ptr0], #0x0\n"
+ "add a_ptr1, %[a_ptr0], #0x0\n"
+ "1:\n"
+ "ptrue p7.s\n"
+ "whilelt p6.s, %[temp], %[odd_depth]\n"
+ "whilelt p0.s, %[temp], %[last_width]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "ld1rqw z0.s, p6/z, [%[a_ptr0]]\n"
+ "ld1rqw z1.s, p6/z, [a_ptr1]\n"
+ "ld1rqw z2.s, p6/z, [a_ptr2]\n"
+ "ld1rqw z3.s, p6/z, [a_ptr3]\n"
+ "ld1rqw z4.s, p6/z, [a_ptr4]\n"
+ "ld1rqw z5.s, p6/z, [a_ptr5]\n"
+ "ld1rqw z6.s, p6/z, [a_ptr6]\n"
+ "ld1rqw z7.s, p6/z, [a_ptr7]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #4\n"
+ "cbz %[loops], 2f\n"
+ "ld1w z24.s, p7/z, [%[biasptr]]\n"
+ "add %[biasptr], %[biasptr], %[biasinc]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "mov z25.d, z24.d\n"
+ "mov z26.d, z24.d\n"
+ "mov z27.d, z24.d\n"
+ "mov z28.d, z24.d\n"
+ "mov z29.d, z24.d\n"
+ "mov z30.d, z24.d\n"
+ "mov z31.d, z24.d\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "b.eq 3f\n"
+ "4:\n"
+ "ld1rw z22.s, p7/z, [%[minptr]]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "ld1rw z23.s, p7/z, [%[maxptr]]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmax z24.s, p7/m, z24.s, z22.s\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmax z25.s, p7/m, z25.s, z22.s\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmax z26.s, p7/m, z26.s, z22.s\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmax z27.s, p7/m, z27.s, z22.s\n"
+ "addvl %[b_ptr0], %[b_ptr0], #4\n"
+ "fmin z24.s, p7/m, z24.s, z23.s\n"
+ "fmin z25.s, p7/m, z25.s, z23.s\n"
+ "fmin z26.s, p7/m, z26.s, z23.s\n"
+ "fmin z27.s, p7/m, z27.s, z23.s\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "fmax z28.s, p7/m, z28.s, z22.s\n"
+ "ld1w z24.s, p7/z, [%[biasptr]]\n"
+ "fmax z29.s, p7/m, z29.s, z22.s\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "fmax z30.s, p7/m, z30.s, z22.s\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "fmin z28.s, p7/m, z28.s, z23.s\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "fmin z29.s, p7/m, z29.s, z23.s\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "fmin z30.s, p7/m, z30.s, z23.s\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "fmax z31.s, p7/m, z31.s, z22.s\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "mov z25.d, z24.d\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "mov z26.d, z24.d\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "fmin z31.s, p7/m, z31.s, z23.s\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "mov z27.d, z24.d\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "mov z28.d, z24.d\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "mov z29.d, z24.d\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.d, z24.d\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.d, z24.d\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "add %[biasptr], %[biasptr], %[biasinc]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "b.ne 4b\n"
+ "3:\n"
+ "ld1rw z22.s, p7/z, [%[minptr]]\n"
+ "ld1rw z23.s, p7/z, [%[maxptr]]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmax z24.s, p7/m, z24.s, z22.s\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmax z25.s, p7/m, z25.s, z22.s\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmax z26.s, p7/m, z26.s, z22.s\n"
+ "addvl %[b_ptr0], %[b_ptr0], #4\n"
+ "fmin z24.s, p7/m, z24.s, z23.s\n"
+ "fmin z25.s, p7/m, z25.s, z23.s\n"
+ "fmin z26.s, p7/m, z26.s, z23.s\n"
+ "fmax z27.s, p7/m, z27.s, z22.s\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "fmax z28.s, p7/m, z28.s, z22.s\n"
+ "ld1w z24.s, p0/z, [%[biasptr]]\n"
+ "fmax z29.s, p7/m, z29.s, z22.s\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "fmin z27.s, p7/m, z27.s, z23.s\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "fmin z28.s, p7/m, z28.s, z23.s\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "fmin z29.s, p7/m, z29.s, z23.s\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "fmax z30.s, p7/m, z30.s, z22.s\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "fmax z31.s, p7/m, z31.s, z22.s\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "mov z25.d, z24.d\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "fmin z30.s, p7/m, z30.s, z23.s\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "fmin z31.s, p7/m, z31.s, z23.s\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "mov z26.d, z24.d\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "mov z27.d, z24.d\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "mov z28.d, z24.d\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z29.d, z24.d\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "mov z30.d, z24.d\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.d, z24.d\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "add %[biasptr], %[biasptr], %[biasinc]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "b 5f\n"
+ "2:\n"
+ "ld1w z24.s, p0/z, [%[biasptr]]\n"
+ "add %[biasptr], %[biasptr], %[biasinc]\n"
+ "mov z25.d, z24.d\n"
+ "mov z26.d, z24.d\n"
+ "mov z27.d, z24.d\n"
+ "mov z28.d, z24.d\n"
+ "mov z29.d, z24.d\n"
+ "mov z30.d, z24.d\n"
+ "mov z31.d, z24.d\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "5:\n"
+ "ld1rw z22.s, p7/z, [%[minptr]]\n"
+ "ld1rw z23.s, p7/z, [%[maxptr]]\n"
+ "fmax z24.s, p7/m, z24.s, z22.s\n"
+ "fmax z25.s, p7/m, z25.s, z22.s\n"
+ "fmax z26.s, p7/m, z26.s, z22.s\n"
+ "fmax z27.s, p7/m, z27.s, z22.s\n"
+ "fmin z24.s, p7/m, z24.s, z23.s\n"
+ "fmin z25.s, p7/m, z25.s, z23.s\n"
+ "fmin z26.s, p7/m, z26.s, z23.s\n"
+ "fmin z27.s, p7/m, z27.s, z23.s\n"
+ "st1w z24.s, p0, [%[c_ptr0]]\n"
+ "fmax z28.s, p7/m, z28.s, z22.s\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "fmax z29.s, p7/m, z29.s, z22.s\n"
+ "st1w z25.s, p0, [c_ptr1]\n"
+ "fmax z30.s, p7/m, z30.s, z22.s\n"
+ "fmin z28.s, p7/m, z28.s, z23.s\n"
+ "fmax z31.s, p7/m, z31.s, z22.s\n"
+ "st1w z26.s, p0, [c_ptr2]\n"
+ "fmin z29.s, p7/m, z29.s, z23.s\n"
+ "fmin z30.s, p7/m, z30.s, z23.s\n"
+ "fmin z31.s, p7/m, z31.s, z23.s\n"
+ "st1w z27.s, p0, [c_ptr3]\n"
+ "st1w z28.s, p0, [c_ptr4]\n"
+ "st1w z29.s, p0, [c_ptr5]\n"
+ "st1w z30.s, p0, [c_ptr6]\n"
+ "st1w z31.s, p0, [c_ptr7]\n"
+ ".unreq a_ptr1\n"
+ ".unreq a_ptr2\n"
+ ".unreq a_ptr3\n"
+ ".unreq a_ptr4\n"
+ ".unreq a_ptr5\n"
+ ".unreq a_ptr6\n"
+ ".unreq a_ptr7\n"
+ ".unreq c_ptr1\n"
+ ".unreq c_ptr2\n"
+ ".unreq c_ptr3\n"
+ ".unreq c_ptr4\n"
+ ".unreq c_ptr5\n"
+ ".unreq c_ptr6\n"
+ ".unreq c_ptr7\n"
+ : [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [oob_rows] "+r" (oob_rows), [temp] "+r" (temp), [biasptr] "+r" (biasptr)
+ : [lda] "r" (ldab), [ldc] "r" (ldcb), [odd_depth] "r" (odd_depth), [last_width] "r" (last_width), [biasinc] "r" (biasinc), [minptr] "r" (minptr), [maxptr] "r" (maxptr)
+ : "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "cc", "memory"
+ );
+ break;
+ case 5:
+ __asm __volatile (
+ "a_ptr1 .req X0\n"
+ "a_ptr2 .req X1\n"
+ "a_ptr3 .req X2\n"
+ "a_ptr4 .req X3\n"
+ "a_ptr5 .req X4\n"
+ "a_ptr6 .req X5\n"
+ "a_ptr7 .req X6\n"
+ "c_ptr1 .req X7\n"
+ "c_ptr2 .req X8\n"
+ "c_ptr3 .req X9\n"
+ "c_ptr4 .req X10\n"
+ "c_ptr5 .req X11\n"
+ "c_ptr6 .req X12\n"
+ "c_ptr7 .req X13\n"
+ "add a_ptr1, %[a_ptr0], %[lda]\n"
+ "add c_ptr1, %[c_ptr0], %[ldc]\n"
+ "add a_ptr2, a_ptr1, %[lda]\n"
+ "add c_ptr2, c_ptr1, %[ldc]\n"
+ "add a_ptr3, a_ptr2, %[lda]\n"
+ "add c_ptr3, c_ptr2, %[ldc]\n"
+ "add a_ptr4, a_ptr3, %[lda]\n"
+ "add c_ptr4, c_ptr3, %[ldc]\n"
+ "add a_ptr5, a_ptr4, %[lda]\n"
+ "add c_ptr5, c_ptr4, %[ldc]\n"
+ "add a_ptr6, a_ptr5, %[lda]\n"
+ "add c_ptr6, c_ptr5, %[ldc]\n"
+ "add a_ptr7, a_ptr6, %[lda]\n"
+ "add c_ptr7, c_ptr6, %[ldc]\n"
+ "cbz %[oob_rows], 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr7, %[c_ptr0], #0x0\n"
+ "add a_ptr7, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr6, %[c_ptr0], #0x0\n"
+ "add a_ptr6, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr5, %[c_ptr0], #0x0\n"
+ "add a_ptr5, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr4, %[c_ptr0], #0x0\n"
+ "add a_ptr4, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr3, %[c_ptr0], #0x0\n"
+ "add a_ptr3, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr2, %[c_ptr0], #0x0\n"
+ "add a_ptr2, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr1, %[c_ptr0], #0x0\n"
+ "add a_ptr1, %[a_ptr0], #0x0\n"
+ "1:\n"
+ "ptrue p7.s\n"
+ "whilelt p6.s, %[temp], %[odd_depth]\n"
+ "whilelt p0.s, %[temp], %[last_width]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #5\n"
+ "cbz %[loops], 2f\n"
+ "ld1w z24.s, p7/z, [%[biasptr]]\n"
+ "add %[biasptr], %[biasptr], %[biasinc]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "mov z25.d, z24.d\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1]\n"
+ "mov z26.d, z24.d\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2]\n"
+ "mov z27.d, z24.d\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3]\n"
+ "mov z28.d, z24.d\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4]\n"
+ "mov z29.d, z24.d\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5]\n"
+ "mov z30.d, z24.d\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6]\n"
+ "mov z31.d, z24.d\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p6/z, [%[a_ptr0], #0x10]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p6/z, [a_ptr1, #0x10]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p6/z, [a_ptr2, #0x10]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p6/z, [a_ptr3, #0x10]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p6/z, [a_ptr4, #0x10]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p6/z, [a_ptr5, #0x10]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p6/z, [a_ptr6, #0x10]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p6/z, [a_ptr7, #0x10]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "b.eq 3f\n"
+ "4:\n"
+ "ld1rw z22.s, p7/z, [%[minptr]]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "ld1rw z23.s, p7/z, [%[maxptr]]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmax z24.s, p7/m, z24.s, z22.s\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmax z25.s, p7/m, z25.s, z22.s\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmax z26.s, p7/m, z26.s, z22.s\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmax z27.s, p7/m, z27.s, z22.s\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmin z24.s, p7/m, z24.s, z23.s\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
+ "fmin z25.s, p7/m, z25.s, z23.s\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1]\n"
+ "fmin z26.s, p7/m, z26.s, z23.s\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2]\n"
+ "fmin z27.s, p7/m, z27.s, z23.s\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "fmax z28.s, p7/m, z28.s, z22.s\n"
+ "ld1w z24.s, p7/z, [%[biasptr]]\n"
+ "fmax z29.s, p7/m, z29.s, z22.s\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3]\n"
+ "fmax z30.s, p7/m, z30.s, z22.s\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "fmax z31.s, p7/m, z31.s, z22.s\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4]\n"
+ "fmin z28.s, p7/m, z28.s, z23.s\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5]\n"
+ "fmin z29.s, p7/m, z29.s, z23.s\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "fmin z30.s, p7/m, z30.s, z23.s\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6]\n"
+ "fmin z31.s, p7/m, z31.s, z23.s\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7]\n"
+ "mov z25.d, z24.d\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "mov z26.d, z24.d\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "mov z27.d, z24.d\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "mov z28.d, z24.d\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "mov z29.d, z24.d\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.d, z24.d\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.d, z24.d\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #5\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "add %[biasptr], %[biasptr], %[biasinc]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p6/z, [%[a_ptr0], #0x10]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p6/z, [a_ptr1, #0x10]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p6/z, [a_ptr2, #0x10]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p6/z, [a_ptr3, #0x10]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p6/z, [a_ptr4, #0x10]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p6/z, [a_ptr5, #0x10]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p6/z, [a_ptr6, #0x10]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p6/z, [a_ptr7, #0x10]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "b.ne 4b\n"
+ "3:\n"
+ "ld1rw z22.s, p7/z, [%[minptr]]\n"
+ "ld1rw z23.s, p7/z, [%[maxptr]]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmax z24.s, p7/m, z24.s, z22.s\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmax z25.s, p7/m, z25.s, z22.s\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmax z26.s, p7/m, z26.s, z22.s\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmax z27.s, p7/m, z27.s, z22.s\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
+ "fmin z24.s, p7/m, z24.s, z23.s\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1]\n"
+ "fmin z25.s, p7/m, z25.s, z23.s\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2]\n"
+ "fmin z26.s, p7/m, z26.s, z23.s\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3]\n"
+ "fmin z27.s, p7/m, z27.s, z23.s\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "fmax z28.s, p7/m, z28.s, z22.s\n"
+ "ld1w z24.s, p0/z, [%[biasptr]]\n"
+ "fmax z29.s, p7/m, z29.s, z22.s\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4]\n"
+ "fmax z30.s, p7/m, z30.s, z22.s\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "fmax z31.s, p7/m, z31.s, z22.s\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5]\n"
+ "fmin z28.s, p7/m, z28.s, z23.s\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6]\n"
+ "fmin z29.s, p7/m, z29.s, z23.s\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "fmin z30.s, p7/m, z30.s, z23.s\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7]\n"
+ "fmin z31.s, p7/m, z31.s, z23.s\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "mov z25.d, z24.d\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "mov z26.d, z24.d\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "mov z27.d, z24.d\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "mov z28.d, z24.d\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "mov z29.d, z24.d\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.d, z24.d\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.d, z24.d\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #5\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "add %[biasptr], %[biasptr], %[biasinc]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p6/z, [%[a_ptr0], #0x10]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p6/z, [a_ptr1, #0x10]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p6/z, [a_ptr2, #0x10]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p6/z, [a_ptr3, #0x10]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p6/z, [a_ptr4, #0x10]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p6/z, [a_ptr5, #0x10]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p6/z, [a_ptr6, #0x10]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p6/z, [a_ptr7, #0x10]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "b 5f\n"
+ "2:\n"
+ "ld1w z24.s, p0/z, [%[biasptr]]\n"
+ "add %[biasptr], %[biasptr], %[biasinc]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1]\n"
+ "mov z25.d, z24.d\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2]\n"
+ "mov z26.d, z24.d\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3]\n"
+ "mov z27.d, z24.d\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4]\n"
+ "mov z28.d, z24.d\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5]\n"
+ "mov z29.d, z24.d\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6]\n"
+ "mov z30.d, z24.d\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7]\n"
+ "mov z31.d, z24.d\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p6/z, [%[a_ptr0], #0x10]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p6/z, [a_ptr1, #0x10]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p6/z, [a_ptr2, #0x10]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p6/z, [a_ptr3, #0x10]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p6/z, [a_ptr4, #0x10]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p6/z, [a_ptr5, #0x10]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p6/z, [a_ptr6, #0x10]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p6/z, [a_ptr7, #0x10]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "5:\n"
+ "ld1rw z22.s, p7/z, [%[minptr]]\n"
+ "ld1rw z23.s, p7/z, [%[maxptr]]\n"
+ "fmax z24.s, p7/m, z24.s, z22.s\n"
+ "fmax z25.s, p7/m, z25.s, z22.s\n"
+ "fmax z26.s, p7/m, z26.s, z22.s\n"
+ "fmax z27.s, p7/m, z27.s, z22.s\n"
+ "fmin z24.s, p7/m, z24.s, z23.s\n"
+ "fmin z25.s, p7/m, z25.s, z23.s\n"
+ "fmin z26.s, p7/m, z26.s, z23.s\n"
+ "fmin z27.s, p7/m, z27.s, z23.s\n"
+ "st1w z24.s, p0, [%[c_ptr0]]\n"
+ "fmax z28.s, p7/m, z28.s, z22.s\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "fmax z29.s, p7/m, z29.s, z22.s\n"
+ "st1w z25.s, p0, [c_ptr1]\n"
+ "fmax z30.s, p7/m, z30.s, z22.s\n"
+ "fmin z28.s, p7/m, z28.s, z23.s\n"
+ "fmax z31.s, p7/m, z31.s, z22.s\n"
+ "st1w z26.s, p0, [c_ptr2]\n"
+ "fmin z29.s, p7/m, z29.s, z23.s\n"
+ "fmin z30.s, p7/m, z30.s, z23.s\n"
+ "fmin z31.s, p7/m, z31.s, z23.s\n"
+ "st1w z27.s, p0, [c_ptr3]\n"
+ "st1w z28.s, p0, [c_ptr4]\n"
+ "st1w z29.s, p0, [c_ptr5]\n"
+ "st1w z30.s, p0, [c_ptr6]\n"
+ "st1w z31.s, p0, [c_ptr7]\n"
+ ".unreq a_ptr1\n"
+ ".unreq a_ptr2\n"
+ ".unreq a_ptr3\n"
+ ".unreq a_ptr4\n"
+ ".unreq a_ptr5\n"
+ ".unreq a_ptr6\n"
+ ".unreq a_ptr7\n"
+ ".unreq c_ptr1\n"
+ ".unreq c_ptr2\n"
+ ".unreq c_ptr3\n"
+ ".unreq c_ptr4\n"
+ ".unreq c_ptr5\n"
+ ".unreq c_ptr6\n"
+ ".unreq c_ptr7\n"
+ : [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [oob_rows] "+r" (oob_rows), [temp] "+r" (temp), [biasptr] "+r" (biasptr)
+ : [lda] "r" (ldab), [ldc] "r" (ldcb), [odd_depth] "r" (odd_depth), [last_width] "r" (last_width), [biasinc] "r" (biasinc), [minptr] "r" (minptr), [maxptr] "r" (maxptr)
+ : "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "cc", "memory"
+ );
+ break;
+ case 6:
+ __asm __volatile (
+ "a_ptr1 .req X0\n"
+ "a_ptr2 .req X1\n"
+ "a_ptr3 .req X2\n"
+ "a_ptr4 .req X3\n"
+ "a_ptr5 .req X4\n"
+ "a_ptr6 .req X5\n"
+ "a_ptr7 .req X6\n"
+ "c_ptr1 .req X7\n"
+ "c_ptr2 .req X8\n"
+ "c_ptr3 .req X9\n"
+ "c_ptr4 .req X10\n"
+ "c_ptr5 .req X11\n"
+ "c_ptr6 .req X12\n"
+ "c_ptr7 .req X13\n"
+ "add a_ptr1, %[a_ptr0], %[lda]\n"
+ "add c_ptr1, %[c_ptr0], %[ldc]\n"
+ "add a_ptr2, a_ptr1, %[lda]\n"
+ "add c_ptr2, c_ptr1, %[ldc]\n"
+ "add a_ptr3, a_ptr2, %[lda]\n"
+ "add c_ptr3, c_ptr2, %[ldc]\n"
+ "add a_ptr4, a_ptr3, %[lda]\n"
+ "add c_ptr4, c_ptr3, %[ldc]\n"
+ "add a_ptr5, a_ptr4, %[lda]\n"
+ "add c_ptr5, c_ptr4, %[ldc]\n"
+ "add a_ptr6, a_ptr5, %[lda]\n"
+ "add c_ptr6, c_ptr5, %[ldc]\n"
+ "add a_ptr7, a_ptr6, %[lda]\n"
+ "add c_ptr7, c_ptr6, %[ldc]\n"
+ "cbz %[oob_rows], 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr7, %[c_ptr0], #0x0\n"
+ "add a_ptr7, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr6, %[c_ptr0], #0x0\n"
+ "add a_ptr6, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr5, %[c_ptr0], #0x0\n"
+ "add a_ptr5, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr4, %[c_ptr0], #0x0\n"
+ "add a_ptr4, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr3, %[c_ptr0], #0x0\n"
+ "add a_ptr3, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr2, %[c_ptr0], #0x0\n"
+ "add a_ptr2, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr1, %[c_ptr0], #0x0\n"
+ "add a_ptr1, %[a_ptr0], #0x0\n"
+ "1:\n"
+ "ptrue p7.s\n"
+ "whilelt p6.s, %[temp], %[odd_depth]\n"
+ "whilelt p0.s, %[temp], %[last_width]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #6\n"
+ "cbz %[loops], 2f\n"
+ "ld1w z24.s, p7/z, [%[biasptr]]\n"
+ "add %[biasptr], %[biasptr], %[biasinc]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "mov z25.d, z24.d\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1]\n"
+ "mov z26.d, z24.d\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2]\n"
+ "mov z27.d, z24.d\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3]\n"
+ "mov z28.d, z24.d\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4]\n"
+ "mov z29.d, z24.d\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5]\n"
+ "mov z30.d, z24.d\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6]\n"
+ "mov z31.d, z24.d\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p6/z, [%[a_ptr0], #0x10]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p6/z, [a_ptr1, #0x10]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p6/z, [a_ptr2, #0x10]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p6/z, [a_ptr3, #0x10]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p6/z, [a_ptr4, #0x10]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p6/z, [a_ptr5, #0x10]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p6/z, [a_ptr6, #0x10]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p6/z, [a_ptr7, #0x10]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "b.eq 3f\n"
+ "4:\n"
+ "ld1rw z22.s, p7/z, [%[minptr]]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "ld1rw z23.s, p7/z, [%[maxptr]]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmax z24.s, p7/m, z24.s, z22.s\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmax z25.s, p7/m, z25.s, z22.s\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmax z26.s, p7/m, z26.s, z22.s\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmax z27.s, p7/m, z27.s, z22.s\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmin z24.s, p7/m, z24.s, z23.s\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "fmin z25.s, p7/m, z25.s, z23.s\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
+ "fmin z26.s, p7/m, z26.s, z23.s\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1]\n"
+ "fmin z27.s, p7/m, z27.s, z23.s\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "fmax z28.s, p7/m, z28.s, z22.s\n"
+ "ld1w z24.s, p7/z, [%[biasptr]]\n"
+ "fmax z29.s, p7/m, z29.s, z22.s\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2]\n"
+ "fmax z30.s, p7/m, z30.s, z22.s\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "fmax z31.s, p7/m, z31.s, z22.s\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3]\n"
+ "fmin z28.s, p7/m, z28.s, z23.s\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4]\n"
+ "fmin z29.s, p7/m, z29.s, z23.s\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "fmin z30.s, p7/m, z30.s, z23.s\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5]\n"
+ "fmin z31.s, p7/m, z31.s, z23.s\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6]\n"
+ "mov z25.d, z24.d\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "mov z26.d, z24.d\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7]\n"
+ "mov z27.d, z24.d\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "mov z28.d, z24.d\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "mov z29.d, z24.d\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.d, z24.d\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.d, z24.d\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #6\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "add %[biasptr], %[biasptr], %[biasinc]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p6/z, [%[a_ptr0], #0x10]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p6/z, [a_ptr1, #0x10]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p6/z, [a_ptr2, #0x10]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p6/z, [a_ptr3, #0x10]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p6/z, [a_ptr4, #0x10]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p6/z, [a_ptr5, #0x10]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p6/z, [a_ptr6, #0x10]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p6/z, [a_ptr7, #0x10]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "b.ne 4b\n"
+ "3:\n"
+ "ld1rw z22.s, p7/z, [%[minptr]]\n"
+ "ld1rw z23.s, p7/z, [%[maxptr]]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmax z24.s, p7/m, z24.s, z22.s\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmax z25.s, p7/m, z25.s, z22.s\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmax z26.s, p7/m, z26.s, z22.s\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmax z27.s, p7/m, z27.s, z22.s\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "fmin z24.s, p7/m, z24.s, z23.s\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
+ "fmin z25.s, p7/m, z25.s, z23.s\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1]\n"
+ "fmin z26.s, p7/m, z26.s, z23.s\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2]\n"
+ "fmin z27.s, p7/m, z27.s, z23.s\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "fmax z28.s, p7/m, z28.s, z22.s\n"
+ "ld1w z24.s, p0/z, [%[biasptr]]\n"
+ "fmax z29.s, p7/m, z29.s, z22.s\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3]\n"
+ "fmax z30.s, p7/m, z30.s, z22.s\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "fmax z31.s, p7/m, z31.s, z22.s\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4]\n"
+ "fmin z28.s, p7/m, z28.s, z23.s\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5]\n"
+ "fmin z29.s, p7/m, z29.s, z23.s\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "fmin z30.s, p7/m, z30.s, z23.s\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6]\n"
+ "fmin z31.s, p7/m, z31.s, z23.s\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7]\n"
+ "mov z25.d, z24.d\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "mov z26.d, z24.d\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "mov z27.d, z24.d\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "mov z28.d, z24.d\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "mov z29.d, z24.d\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.d, z24.d\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.d, z24.d\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #6\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "add %[biasptr], %[biasptr], %[biasinc]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p6/z, [%[a_ptr0], #0x10]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p6/z, [a_ptr1, #0x10]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p6/z, [a_ptr2, #0x10]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p6/z, [a_ptr3, #0x10]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p6/z, [a_ptr4, #0x10]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p6/z, [a_ptr5, #0x10]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p6/z, [a_ptr6, #0x10]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p6/z, [a_ptr7, #0x10]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "b 5f\n"
+ "2:\n"
+ "ld1w z24.s, p0/z, [%[biasptr]]\n"
+ "add %[biasptr], %[biasptr], %[biasinc]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1]\n"
+ "mov z25.d, z24.d\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2]\n"
+ "mov z26.d, z24.d\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3]\n"
+ "mov z27.d, z24.d\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4]\n"
+ "mov z28.d, z24.d\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5]\n"
+ "mov z29.d, z24.d\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6]\n"
+ "mov z30.d, z24.d\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7]\n"
+ "mov z31.d, z24.d\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p6/z, [%[a_ptr0], #0x10]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p6/z, [a_ptr1, #0x10]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p6/z, [a_ptr2, #0x10]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p6/z, [a_ptr3, #0x10]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p6/z, [a_ptr4, #0x10]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p6/z, [a_ptr5, #0x10]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p6/z, [a_ptr6, #0x10]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p6/z, [a_ptr7, #0x10]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "5:\n"
+ "ld1rw z22.s, p7/z, [%[minptr]]\n"
+ "ld1rw z23.s, p7/z, [%[maxptr]]\n"
+ "fmax z24.s, p7/m, z24.s, z22.s\n"
+ "fmax z25.s, p7/m, z25.s, z22.s\n"
+ "fmax z26.s, p7/m, z26.s, z22.s\n"
+ "fmax z27.s, p7/m, z27.s, z22.s\n"
+ "fmin z24.s, p7/m, z24.s, z23.s\n"
+ "fmin z25.s, p7/m, z25.s, z23.s\n"
+ "fmin z26.s, p7/m, z26.s, z23.s\n"
+ "fmin z27.s, p7/m, z27.s, z23.s\n"
+ "st1w z24.s, p0, [%[c_ptr0]]\n"
+ "fmax z28.s, p7/m, z28.s, z22.s\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "fmax z29.s, p7/m, z29.s, z22.s\n"
+ "st1w z25.s, p0, [c_ptr1]\n"
+ "fmax z30.s, p7/m, z30.s, z22.s\n"
+ "fmin z28.s, p7/m, z28.s, z23.s\n"
+ "fmax z31.s, p7/m, z31.s, z22.s\n"
+ "st1w z26.s, p0, [c_ptr2]\n"
+ "fmin z29.s, p7/m, z29.s, z23.s\n"
+ "fmin z30.s, p7/m, z30.s, z23.s\n"
+ "fmin z31.s, p7/m, z31.s, z23.s\n"
+ "st1w z27.s, p0, [c_ptr3]\n"
+ "st1w z28.s, p0, [c_ptr4]\n"
+ "st1w z29.s, p0, [c_ptr5]\n"
+ "st1w z30.s, p0, [c_ptr6]\n"
+ "st1w z31.s, p0, [c_ptr7]\n"
+ ".unreq a_ptr1\n"
+ ".unreq a_ptr2\n"
+ ".unreq a_ptr3\n"
+ ".unreq a_ptr4\n"
+ ".unreq a_ptr5\n"
+ ".unreq a_ptr6\n"
+ ".unreq a_ptr7\n"
+ ".unreq c_ptr1\n"
+ ".unreq c_ptr2\n"
+ ".unreq c_ptr3\n"
+ ".unreq c_ptr4\n"
+ ".unreq c_ptr5\n"
+ ".unreq c_ptr6\n"
+ ".unreq c_ptr7\n"
+ : [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [oob_rows] "+r" (oob_rows), [temp] "+r" (temp), [biasptr] "+r" (biasptr)
+ : [lda] "r" (ldab), [ldc] "r" (ldcb), [odd_depth] "r" (odd_depth), [last_width] "r" (last_width), [biasinc] "r" (biasinc), [minptr] "r" (minptr), [maxptr] "r" (maxptr)
+ : "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "cc", "memory"
+ );
+ break;
+ case 7:
+ __asm __volatile (
+ "a_ptr1 .req X0\n"
+ "a_ptr2 .req X1\n"
+ "a_ptr3 .req X2\n"
+ "a_ptr4 .req X3\n"
+ "a_ptr5 .req X4\n"
+ "a_ptr6 .req X5\n"
+ "a_ptr7 .req X6\n"
+ "c_ptr1 .req X7\n"
+ "c_ptr2 .req X8\n"
+ "c_ptr3 .req X9\n"
+ "c_ptr4 .req X10\n"
+ "c_ptr5 .req X11\n"
+ "c_ptr6 .req X12\n"
+ "c_ptr7 .req X13\n"
+ "add a_ptr1, %[a_ptr0], %[lda]\n"
+ "add c_ptr1, %[c_ptr0], %[ldc]\n"
+ "add a_ptr2, a_ptr1, %[lda]\n"
+ "add c_ptr2, c_ptr1, %[ldc]\n"
+ "add a_ptr3, a_ptr2, %[lda]\n"
+ "add c_ptr3, c_ptr2, %[ldc]\n"
+ "add a_ptr4, a_ptr3, %[lda]\n"
+ "add c_ptr4, c_ptr3, %[ldc]\n"
+ "add a_ptr5, a_ptr4, %[lda]\n"
+ "add c_ptr5, c_ptr4, %[ldc]\n"
+ "add a_ptr6, a_ptr5, %[lda]\n"
+ "add c_ptr6, c_ptr5, %[ldc]\n"
+ "add a_ptr7, a_ptr6, %[lda]\n"
+ "add c_ptr7, c_ptr6, %[ldc]\n"
+ "cbz %[oob_rows], 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr7, %[c_ptr0], #0x0\n"
+ "add a_ptr7, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr6, %[c_ptr0], #0x0\n"
+ "add a_ptr6, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr5, %[c_ptr0], #0x0\n"
+ "add a_ptr5, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr4, %[c_ptr0], #0x0\n"
+ "add a_ptr4, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr3, %[c_ptr0], #0x0\n"
+ "add a_ptr3, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr2, %[c_ptr0], #0x0\n"
+ "add a_ptr2, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr1, %[c_ptr0], #0x0\n"
+ "add a_ptr1, %[a_ptr0], #0x0\n"
+ "1:\n"
+ "ptrue p7.s\n"
+ "whilelt p6.s, %[temp], %[odd_depth]\n"
+ "whilelt p0.s, %[temp], %[last_width]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #7\n"
+ "cbz %[loops], 2f\n"
+ "ld1w z24.s, p7/z, [%[biasptr]]\n"
+ "add %[biasptr], %[biasptr], %[biasinc]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "mov z25.d, z24.d\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1]\n"
+ "mov z26.d, z24.d\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2]\n"
+ "mov z27.d, z24.d\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3]\n"
+ "mov z28.d, z24.d\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4]\n"
+ "mov z29.d, z24.d\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5]\n"
+ "mov z30.d, z24.d\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6]\n"
+ "mov z31.d, z24.d\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p6/z, [%[a_ptr0], #0x10]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p6/z, [a_ptr1, #0x10]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p6/z, [a_ptr2, #0x10]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p6/z, [a_ptr3, #0x10]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p6/z, [a_ptr4, #0x10]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p6/z, [a_ptr5, #0x10]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p6/z, [a_ptr6, #0x10]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p6/z, [a_ptr7, #0x10]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "b.eq 3f\n"
+ "4:\n"
+ "ld1rw z22.s, p7/z, [%[minptr]]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "ld1rw z23.s, p7/z, [%[maxptr]]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmax z24.s, p7/m, z24.s, z22.s\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmax z25.s, p7/m, z25.s, z22.s\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmax z26.s, p7/m, z26.s, z22.s\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmax z27.s, p7/m, z27.s, z22.s\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmin z24.s, p7/m, z24.s, z23.s\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "fmin z25.s, p7/m, z25.s, z23.s\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
+ "fmin z26.s, p7/m, z26.s, z23.s\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1]\n"
+ "fmin z27.s, p7/m, z27.s, z23.s\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "fmax z28.s, p7/m, z28.s, z22.s\n"
+ "ld1w z24.s, p7/z, [%[biasptr]]\n"
+ "fmax z29.s, p7/m, z29.s, z22.s\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2]\n"
+ "fmax z30.s, p7/m, z30.s, z22.s\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "fmax z31.s, p7/m, z31.s, z22.s\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "fmin z28.s, p7/m, z28.s, z23.s\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3]\n"
+ "fmin z29.s, p7/m, z29.s, z23.s\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "fmin z30.s, p7/m, z30.s, z23.s\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4]\n"
+ "fmin z31.s, p7/m, z31.s, z23.s\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5]\n"
+ "mov z25.d, z24.d\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "mov z26.d, z24.d\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6]\n"
+ "mov z27.d, z24.d\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "mov z28.d, z24.d\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "mov z29.d, z24.d\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.d, z24.d\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.d, z24.d\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #7\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "add %[biasptr], %[biasptr], %[biasinc]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p6/z, [%[a_ptr0], #0x10]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p6/z, [a_ptr1, #0x10]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p6/z, [a_ptr2, #0x10]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p6/z, [a_ptr3, #0x10]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p6/z, [a_ptr4, #0x10]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p6/z, [a_ptr5, #0x10]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p6/z, [a_ptr6, #0x10]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p6/z, [a_ptr7, #0x10]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "b.ne 4b\n"
+ "3:\n"
+ "ld1rw z22.s, p7/z, [%[minptr]]\n"
+ "ld1rw z23.s, p7/z, [%[maxptr]]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmax z24.s, p7/m, z24.s, z22.s\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmax z25.s, p7/m, z25.s, z22.s\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmax z26.s, p7/m, z26.s, z22.s\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmax z27.s, p7/m, z27.s, z22.s\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "fmin z24.s, p7/m, z24.s, z23.s\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
+ "fmin z25.s, p7/m, z25.s, z23.s\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1]\n"
+ "fmin z26.s, p7/m, z26.s, z23.s\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2]\n"
+ "fmin z27.s, p7/m, z27.s, z23.s\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "fmax z28.s, p7/m, z28.s, z22.s\n"
+ "ld1w z24.s, p0/z, [%[biasptr]]\n"
+ "fmax z29.s, p7/m, z29.s, z22.s\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3]\n"
+ "fmax z30.s, p7/m, z30.s, z22.s\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "fmax z31.s, p7/m, z31.s, z22.s\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "fmin z28.s, p7/m, z28.s, z23.s\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4]\n"
+ "fmin z29.s, p7/m, z29.s, z23.s\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "fmin z30.s, p7/m, z30.s, z23.s\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5]\n"
+ "fmin z31.s, p7/m, z31.s, z23.s\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6]\n"
+ "mov z25.d, z24.d\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "mov z26.d, z24.d\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7]\n"
+ "mov z27.d, z24.d\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "mov z28.d, z24.d\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "mov z29.d, z24.d\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.d, z24.d\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.d, z24.d\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #7\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "add %[biasptr], %[biasptr], %[biasinc]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p6/z, [%[a_ptr0], #0x10]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p6/z, [a_ptr1, #0x10]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p6/z, [a_ptr2, #0x10]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p6/z, [a_ptr3, #0x10]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p6/z, [a_ptr4, #0x10]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p6/z, [a_ptr5, #0x10]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p6/z, [a_ptr6, #0x10]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p6/z, [a_ptr7, #0x10]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "b 5f\n"
+ "2:\n"
+ "ld1w z24.s, p0/z, [%[biasptr]]\n"
+ "add %[biasptr], %[biasptr], %[biasinc]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1]\n"
+ "mov z25.d, z24.d\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2]\n"
+ "mov z26.d, z24.d\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3]\n"
+ "mov z27.d, z24.d\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4]\n"
+ "mov z28.d, z24.d\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5]\n"
+ "mov z29.d, z24.d\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6]\n"
+ "mov z30.d, z24.d\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7]\n"
+ "mov z31.d, z24.d\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p6/z, [%[a_ptr0], #0x10]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p6/z, [a_ptr1, #0x10]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p6/z, [a_ptr2, #0x10]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p6/z, [a_ptr3, #0x10]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p6/z, [a_ptr4, #0x10]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p6/z, [a_ptr5, #0x10]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p6/z, [a_ptr6, #0x10]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p6/z, [a_ptr7, #0x10]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "5:\n"
+ "ld1rw z22.s, p7/z, [%[minptr]]\n"
+ "ld1rw z23.s, p7/z, [%[maxptr]]\n"
+ "fmax z24.s, p7/m, z24.s, z22.s\n"
+ "fmax z25.s, p7/m, z25.s, z22.s\n"
+ "fmax z26.s, p7/m, z26.s, z22.s\n"
+ "fmax z27.s, p7/m, z27.s, z22.s\n"
+ "fmin z24.s, p7/m, z24.s, z23.s\n"
+ "fmin z25.s, p7/m, z25.s, z23.s\n"
+ "fmin z26.s, p7/m, z26.s, z23.s\n"
+ "fmin z27.s, p7/m, z27.s, z23.s\n"
+ "st1w z24.s, p0, [%[c_ptr0]]\n"
+ "fmax z28.s, p7/m, z28.s, z22.s\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "fmax z29.s, p7/m, z29.s, z22.s\n"
+ "st1w z25.s, p0, [c_ptr1]\n"
+ "fmax z30.s, p7/m, z30.s, z22.s\n"
+ "fmin z28.s, p7/m, z28.s, z23.s\n"
+ "fmax z31.s, p7/m, z31.s, z22.s\n"
+ "st1w z26.s, p0, [c_ptr2]\n"
+ "fmin z29.s, p7/m, z29.s, z23.s\n"
+ "fmin z30.s, p7/m, z30.s, z23.s\n"
+ "fmin z31.s, p7/m, z31.s, z23.s\n"
+ "st1w z27.s, p0, [c_ptr3]\n"
+ "st1w z28.s, p0, [c_ptr4]\n"
+ "st1w z29.s, p0, [c_ptr5]\n"
+ "st1w z30.s, p0, [c_ptr6]\n"
+ "st1w z31.s, p0, [c_ptr7]\n"
+ ".unreq a_ptr1\n"
+ ".unreq a_ptr2\n"
+ ".unreq a_ptr3\n"
+ ".unreq a_ptr4\n"
+ ".unreq a_ptr5\n"
+ ".unreq a_ptr6\n"
+ ".unreq a_ptr7\n"
+ ".unreq c_ptr1\n"
+ ".unreq c_ptr2\n"
+ ".unreq c_ptr3\n"
+ ".unreq c_ptr4\n"
+ ".unreq c_ptr5\n"
+ ".unreq c_ptr6\n"
+ ".unreq c_ptr7\n"
+ : [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [oob_rows] "+r" (oob_rows), [temp] "+r" (temp), [biasptr] "+r" (biasptr)
+ : [lda] "r" (ldab), [ldc] "r" (ldcb), [odd_depth] "r" (odd_depth), [last_width] "r" (last_width), [biasinc] "r" (biasinc), [minptr] "r" (minptr), [maxptr] "r" (maxptr)
+ : "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "cc", "memory"
+ );
+ break;
+ case 8:
+ __asm __volatile (
+ "a_ptr1 .req X0\n"
+ "a_ptr2 .req X1\n"
+ "a_ptr3 .req X2\n"
+ "a_ptr4 .req X3\n"
+ "a_ptr5 .req X4\n"
+ "a_ptr6 .req X5\n"
+ "a_ptr7 .req X6\n"
+ "c_ptr1 .req X7\n"
+ "c_ptr2 .req X8\n"
+ "c_ptr3 .req X9\n"
+ "c_ptr4 .req X10\n"
+ "c_ptr5 .req X11\n"
+ "c_ptr6 .req X12\n"
+ "c_ptr7 .req X13\n"
+ "add a_ptr1, %[a_ptr0], %[lda]\n"
+ "add c_ptr1, %[c_ptr0], %[ldc]\n"
+ "add a_ptr2, a_ptr1, %[lda]\n"
+ "add c_ptr2, c_ptr1, %[ldc]\n"
+ "add a_ptr3, a_ptr2, %[lda]\n"
+ "add c_ptr3, c_ptr2, %[ldc]\n"
+ "add a_ptr4, a_ptr3, %[lda]\n"
+ "add c_ptr4, c_ptr3, %[ldc]\n"
+ "add a_ptr5, a_ptr4, %[lda]\n"
+ "add c_ptr5, c_ptr4, %[ldc]\n"
+ "add a_ptr6, a_ptr5, %[lda]\n"
+ "add c_ptr6, c_ptr5, %[ldc]\n"
+ "add a_ptr7, a_ptr6, %[lda]\n"
+ "add c_ptr7, c_ptr6, %[ldc]\n"
+ "cbz %[oob_rows], 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr7, %[c_ptr0], #0x0\n"
+ "add a_ptr7, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr6, %[c_ptr0], #0x0\n"
+ "add a_ptr6, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr5, %[c_ptr0], #0x0\n"
+ "add a_ptr5, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr4, %[c_ptr0], #0x0\n"
+ "add a_ptr4, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr3, %[c_ptr0], #0x0\n"
+ "add a_ptr3, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr2, %[c_ptr0], #0x0\n"
+ "add a_ptr2, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr1, %[c_ptr0], #0x0\n"
+ "add a_ptr1, %[a_ptr0], #0x0\n"
+ "1:\n"
+ "ptrue p7.s\n"
+ "whilelt p6.s, %[temp], %[odd_depth]\n"
+ "whilelt p0.s, %[temp], %[last_width]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "ld1w z23.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "cbz %[loops], 2f\n"
+ "ld1w z24.s, p7/z, [%[biasptr]]\n"
+ "add %[biasptr], %[biasptr], %[biasinc]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "mov z25.d, z24.d\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1]\n"
+ "mov z26.d, z24.d\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2]\n"
+ "mov z27.d, z24.d\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3]\n"
+ "mov z28.d, z24.d\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4]\n"
+ "mov z29.d, z24.d\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5]\n"
+ "mov z30.d, z24.d\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6]\n"
+ "mov z31.d, z24.d\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p6/z, [%[a_ptr0], #0x10]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p6/z, [a_ptr1, #0x10]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p6/z, [a_ptr2, #0x10]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p6/z, [a_ptr3, #0x10]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p6/z, [a_ptr4, #0x10]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p6/z, [a_ptr5, #0x10]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p6/z, [a_ptr6, #0x10]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p6/z, [a_ptr7, #0x10]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "b.eq 3f\n"
+ "4:\n"
+ "ld1rw z22.s, p7/z, [%[minptr]]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "ld1rw z23.s, p7/z, [%[maxptr]]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmax z24.s, p7/m, z24.s, z22.s\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmax z25.s, p7/m, z25.s, z22.s\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmax z26.s, p7/m, z26.s, z22.s\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmax z27.s, p7/m, z27.s, z22.s\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmin z24.s, p7/m, z24.s, z23.s\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "fmin z25.s, p7/m, z25.s, z23.s\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
+ "fmin z26.s, p7/m, z26.s, z23.s\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1]\n"
+ "fmin z27.s, p7/m, z27.s, z23.s\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "fmax z28.s, p7/m, z28.s, z22.s\n"
+ "ld1w z24.s, p7/z, [%[biasptr]]\n"
+ "fmax z29.s, p7/m, z29.s, z22.s\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2]\n"
+ "fmax z30.s, p7/m, z30.s, z22.s\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "fmax z31.s, p7/m, z31.s, z22.s\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "fmin z28.s, p7/m, z28.s, z23.s\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3]\n"
+ "fmin z29.s, p7/m, z29.s, z23.s\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "fmin z30.s, p7/m, z30.s, z23.s\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4]\n"
+ "fmin z31.s, p7/m, z31.s, z23.s\n"
+ "ld1w z23.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "mov z25.d, z24.d\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "mov z26.d, z24.d\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5]\n"
+ "mov z27.d, z24.d\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "mov z28.d, z24.d\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "mov z29.d, z24.d\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.d, z24.d\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.d, z24.d\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "add %[biasptr], %[biasptr], %[biasinc]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p6/z, [%[a_ptr0], #0x10]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p6/z, [a_ptr1, #0x10]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p6/z, [a_ptr2, #0x10]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p6/z, [a_ptr3, #0x10]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p6/z, [a_ptr4, #0x10]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p6/z, [a_ptr5, #0x10]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p6/z, [a_ptr6, #0x10]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p6/z, [a_ptr7, #0x10]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "b.ne 4b\n"
+ "3:\n"
+ "ld1rw z22.s, p7/z, [%[minptr]]\n"
+ "ld1rw z23.s, p7/z, [%[maxptr]]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmax z24.s, p7/m, z24.s, z22.s\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmax z25.s, p7/m, z25.s, z22.s\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmax z26.s, p7/m, z26.s, z22.s\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmax z27.s, p7/m, z27.s, z22.s\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "fmin z24.s, p7/m, z24.s, z23.s\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
+ "fmin z25.s, p7/m, z25.s, z23.s\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1]\n"
+ "fmin z26.s, p7/m, z26.s, z23.s\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2]\n"
+ "fmin z27.s, p7/m, z27.s, z23.s\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "fmax z28.s, p7/m, z28.s, z22.s\n"
+ "ld1w z24.s, p0/z, [%[biasptr]]\n"
+ "fmax z29.s, p7/m, z29.s, z22.s\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3]\n"
+ "fmax z30.s, p7/m, z30.s, z22.s\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "fmax z31.s, p7/m, z31.s, z22.s\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "fmin z28.s, p7/m, z28.s, z23.s\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4]\n"
+ "fmin z29.s, p7/m, z29.s, z23.s\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "fmin z30.s, p7/m, z30.s, z23.s\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5]\n"
+ "fmin z31.s, p7/m, z31.s, z23.s\n"
+ "ld1w z23.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "mov z25.d, z24.d\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "mov z26.d, z24.d\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6]\n"
+ "mov z27.d, z24.d\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "mov z28.d, z24.d\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "mov z29.d, z24.d\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.d, z24.d\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.d, z24.d\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "add %[biasptr], %[biasptr], %[biasinc]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p6/z, [%[a_ptr0], #0x10]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p6/z, [a_ptr1, #0x10]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p6/z, [a_ptr2, #0x10]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p6/z, [a_ptr3, #0x10]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p6/z, [a_ptr4, #0x10]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p6/z, [a_ptr5, #0x10]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p6/z, [a_ptr6, #0x10]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p6/z, [a_ptr7, #0x10]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "b 5f\n"
+ "2:\n"
+ "ld1w z24.s, p0/z, [%[biasptr]]\n"
+ "add %[biasptr], %[biasptr], %[biasinc]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1]\n"
+ "mov z25.d, z24.d\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2]\n"
+ "mov z26.d, z24.d\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3]\n"
+ "mov z27.d, z24.d\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4]\n"
+ "mov z28.d, z24.d\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5]\n"
+ "mov z29.d, z24.d\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6]\n"
+ "mov z30.d, z24.d\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7]\n"
+ "mov z31.d, z24.d\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p6/z, [%[a_ptr0], #0x10]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p6/z, [a_ptr1, #0x10]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p6/z, [a_ptr2, #0x10]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p6/z, [a_ptr3, #0x10]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p6/z, [a_ptr4, #0x10]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p6/z, [a_ptr5, #0x10]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p6/z, [a_ptr6, #0x10]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p6/z, [a_ptr7, #0x10]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "5:\n"
+ "ld1rw z22.s, p7/z, [%[minptr]]\n"
+ "ld1rw z23.s, p7/z, [%[maxptr]]\n"
+ "fmax z24.s, p7/m, z24.s, z22.s\n"
+ "fmax z25.s, p7/m, z25.s, z22.s\n"
+ "fmax z26.s, p7/m, z26.s, z22.s\n"
+ "fmax z27.s, p7/m, z27.s, z22.s\n"
+ "fmin z24.s, p7/m, z24.s, z23.s\n"
+ "fmin z25.s, p7/m, z25.s, z23.s\n"
+ "fmin z26.s, p7/m, z26.s, z23.s\n"
+ "fmin z27.s, p7/m, z27.s, z23.s\n"
+ "st1w z24.s, p0, [%[c_ptr0]]\n"
+ "fmax z28.s, p7/m, z28.s, z22.s\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "fmax z29.s, p7/m, z29.s, z22.s\n"
+ "st1w z25.s, p0, [c_ptr1]\n"
+ "fmax z30.s, p7/m, z30.s, z22.s\n"
+ "fmin z28.s, p7/m, z28.s, z23.s\n"
+ "fmax z31.s, p7/m, z31.s, z22.s\n"
+ "st1w z26.s, p0, [c_ptr2]\n"
+ "fmin z29.s, p7/m, z29.s, z23.s\n"
+ "fmin z30.s, p7/m, z30.s, z23.s\n"
+ "fmin z31.s, p7/m, z31.s, z23.s\n"
+ "st1w z27.s, p0, [c_ptr3]\n"
+ "st1w z28.s, p0, [c_ptr4]\n"
+ "st1w z29.s, p0, [c_ptr5]\n"
+ "st1w z30.s, p0, [c_ptr6]\n"
+ "st1w z31.s, p0, [c_ptr7]\n"
+ ".unreq a_ptr1\n"
+ ".unreq a_ptr2\n"
+ ".unreq a_ptr3\n"
+ ".unreq a_ptr4\n"
+ ".unreq a_ptr5\n"
+ ".unreq a_ptr6\n"
+ ".unreq a_ptr7\n"
+ ".unreq c_ptr1\n"
+ ".unreq c_ptr2\n"
+ ".unreq c_ptr3\n"
+ ".unreq c_ptr4\n"
+ ".unreq c_ptr5\n"
+ ".unreq c_ptr6\n"
+ ".unreq c_ptr7\n"
+ : [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [oob_rows] "+r" (oob_rows), [temp] "+r" (temp), [biasptr] "+r" (biasptr)
+ : [lda] "r" (ldab), [ldc] "r" (ldcb), [odd_depth] "r" (odd_depth), [last_width] "r" (last_width), [biasinc] "r" (biasinc), [minptr] "r" (minptr), [maxptr] "r" (maxptr)
+ : "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "cc", "memory"
+ );
+ break;
+ case 9:
+ __asm __volatile (
+ "a_ptr1 .req X0\n"
+ "a_ptr2 .req X1\n"
+ "a_ptr3 .req X2\n"
+ "a_ptr4 .req X3\n"
+ "a_ptr5 .req X4\n"
+ "a_ptr6 .req X5\n"
+ "a_ptr7 .req X6\n"
+ "c_ptr1 .req X7\n"
+ "c_ptr2 .req X8\n"
+ "c_ptr3 .req X9\n"
+ "c_ptr4 .req X10\n"
+ "c_ptr5 .req X11\n"
+ "c_ptr6 .req X12\n"
+ "c_ptr7 .req X13\n"
+ "add a_ptr1, %[a_ptr0], %[lda]\n"
+ "add c_ptr1, %[c_ptr0], %[ldc]\n"
+ "add a_ptr2, a_ptr1, %[lda]\n"
+ "add c_ptr2, c_ptr1, %[ldc]\n"
+ "add a_ptr3, a_ptr2, %[lda]\n"
+ "add c_ptr3, c_ptr2, %[ldc]\n"
+ "add a_ptr4, a_ptr3, %[lda]\n"
+ "add c_ptr4, c_ptr3, %[ldc]\n"
+ "add a_ptr5, a_ptr4, %[lda]\n"
+ "add c_ptr5, c_ptr4, %[ldc]\n"
+ "add a_ptr6, a_ptr5, %[lda]\n"
+ "add c_ptr6, c_ptr5, %[ldc]\n"
+ "add a_ptr7, a_ptr6, %[lda]\n"
+ "add c_ptr7, c_ptr6, %[ldc]\n"
+ "cbz %[oob_rows], 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr7, %[c_ptr0], #0x0\n"
+ "add a_ptr7, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr6, %[c_ptr0], #0x0\n"
+ "add a_ptr6, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr5, %[c_ptr0], #0x0\n"
+ "add a_ptr5, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr4, %[c_ptr0], #0x0\n"
+ "add a_ptr4, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr3, %[c_ptr0], #0x0\n"
+ "add a_ptr3, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr2, %[c_ptr0], #0x0\n"
+ "add a_ptr2, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr1, %[c_ptr0], #0x0\n"
+ "add a_ptr1, %[a_ptr0], #0x0\n"
+ "1:\n"
+ "ptrue p7.s\n"
+ "whilelt p6.s, %[temp], %[odd_depth]\n"
+ "whilelt p0.s, %[temp], %[last_width]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "ld1w z23.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "cbz %[loops], 2f\n"
+ "ld1w z24.s, p7/z, [%[biasptr]]\n"
+ "add %[biasptr], %[biasptr], %[biasinc]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "mov z25.d, z24.d\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1]\n"
+ "mov z26.d, z24.d\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2]\n"
+ "mov z27.d, z24.d\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3]\n"
+ "mov z28.d, z24.d\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4]\n"
+ "mov z29.d, z24.d\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5]\n"
+ "mov z30.d, z24.d\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6]\n"
+ "mov z31.d, z24.d\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #1\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x10]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x10]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x10]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x10]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x10]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x10]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x10]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x10]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "ld1rqw z0.s, p6/z, [%[a_ptr0], #0x20]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "ld1rqw z1.s, p6/z, [a_ptr1, #0x20]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "ld1rqw z2.s, p6/z, [a_ptr2, #0x20]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "ld1rqw z3.s, p6/z, [a_ptr3, #0x20]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "ld1rqw z4.s, p6/z, [a_ptr4, #0x20]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "ld1rqw z5.s, p6/z, [a_ptr5, #0x20]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "ld1rqw z6.s, p6/z, [a_ptr6, #0x20]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "ld1rqw z7.s, p6/z, [a_ptr7, #0x20]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "b.eq 3f\n"
+ "4:\n"
+ "ld1rw z22.s, p7/z, [%[minptr]]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "ld1rw z23.s, p7/z, [%[maxptr]]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmax z24.s, p7/m, z24.s, z22.s\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmax z25.s, p7/m, z25.s, z22.s\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmax z26.s, p7/m, z26.s, z22.s\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmax z27.s, p7/m, z27.s, z22.s\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmin z24.s, p7/m, z24.s, z23.s\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "fmin z25.s, p7/m, z25.s, z23.s\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
+ "fmin z26.s, p7/m, z26.s, z23.s\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1]\n"
+ "fmin z27.s, p7/m, z27.s, z23.s\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "fmax z28.s, p7/m, z28.s, z22.s\n"
+ "ld1w z24.s, p7/z, [%[biasptr]]\n"
+ "fmax z29.s, p7/m, z29.s, z22.s\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2]\n"
+ "fmax z30.s, p7/m, z30.s, z22.s\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "fmax z31.s, p7/m, z31.s, z22.s\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "fmin z28.s, p7/m, z28.s, z23.s\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3]\n"
+ "fmin z29.s, p7/m, z29.s, z23.s\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "fmin z30.s, p7/m, z30.s, z23.s\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4]\n"
+ "fmin z31.s, p7/m, z31.s, z23.s\n"
+ "ld1w z23.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "mov z25.d, z24.d\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "mov z26.d, z24.d\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5]\n"
+ "mov z27.d, z24.d\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "mov z28.d, z24.d\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "mov z29.d, z24.d\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.d, z24.d\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.d, z24.d\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "add %[biasptr], %[biasptr], %[biasinc]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #1\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x10]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x10]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x10]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x10]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x10]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x10]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x10]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x10]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "ld1rqw z0.s, p6/z, [%[a_ptr0], #0x20]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "ld1rqw z1.s, p6/z, [a_ptr1, #0x20]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "ld1rqw z2.s, p6/z, [a_ptr2, #0x20]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "ld1rqw z3.s, p6/z, [a_ptr3, #0x20]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "ld1rqw z4.s, p6/z, [a_ptr4, #0x20]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "ld1rqw z5.s, p6/z, [a_ptr5, #0x20]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "ld1rqw z6.s, p6/z, [a_ptr6, #0x20]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "ld1rqw z7.s, p6/z, [a_ptr7, #0x20]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "b.ne 4b\n"
+ "3:\n"
+ "ld1rw z22.s, p7/z, [%[minptr]]\n"
+ "ld1rw z23.s, p7/z, [%[maxptr]]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmax z24.s, p7/m, z24.s, z22.s\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmax z25.s, p7/m, z25.s, z22.s\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmax z26.s, p7/m, z26.s, z22.s\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmax z27.s, p7/m, z27.s, z22.s\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "fmin z24.s, p7/m, z24.s, z23.s\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
+ "fmin z25.s, p7/m, z25.s, z23.s\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1]\n"
+ "fmin z26.s, p7/m, z26.s, z23.s\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2]\n"
+ "fmin z27.s, p7/m, z27.s, z23.s\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "fmax z28.s, p7/m, z28.s, z22.s\n"
+ "ld1w z24.s, p0/z, [%[biasptr]]\n"
+ "fmax z29.s, p7/m, z29.s, z22.s\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3]\n"
+ "fmax z30.s, p7/m, z30.s, z22.s\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "fmax z31.s, p7/m, z31.s, z22.s\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "fmin z28.s, p7/m, z28.s, z23.s\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4]\n"
+ "fmin z29.s, p7/m, z29.s, z23.s\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "fmin z30.s, p7/m, z30.s, z23.s\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5]\n"
+ "fmin z31.s, p7/m, z31.s, z23.s\n"
+ "ld1w z23.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "mov z25.d, z24.d\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "mov z26.d, z24.d\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6]\n"
+ "mov z27.d, z24.d\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "mov z28.d, z24.d\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "mov z29.d, z24.d\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.d, z24.d\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.d, z24.d\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "add %[biasptr], %[biasptr], %[biasinc]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #1\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x10]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x10]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x10]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x10]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x10]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x10]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x10]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x10]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "ld1rqw z0.s, p6/z, [%[a_ptr0], #0x20]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "ld1rqw z1.s, p6/z, [a_ptr1, #0x20]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "ld1rqw z2.s, p6/z, [a_ptr2, #0x20]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "ld1rqw z3.s, p6/z, [a_ptr3, #0x20]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "ld1rqw z4.s, p6/z, [a_ptr4, #0x20]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "ld1rqw z5.s, p6/z, [a_ptr5, #0x20]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "ld1rqw z6.s, p6/z, [a_ptr6, #0x20]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "ld1rqw z7.s, p6/z, [a_ptr7, #0x20]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "b 5f\n"
+ "2:\n"
+ "ld1w z24.s, p0/z, [%[biasptr]]\n"
+ "add %[biasptr], %[biasptr], %[biasinc]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1]\n"
+ "mov z25.d, z24.d\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2]\n"
+ "mov z26.d, z24.d\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3]\n"
+ "mov z27.d, z24.d\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4]\n"
+ "mov z28.d, z24.d\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5]\n"
+ "mov z29.d, z24.d\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6]\n"
+ "mov z30.d, z24.d\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7]\n"
+ "mov z31.d, z24.d\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #1\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x10]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x10]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x10]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x10]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x10]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x10]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x10]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x10]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "ld1rqw z0.s, p6/z, [%[a_ptr0], #0x20]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "ld1rqw z1.s, p6/z, [a_ptr1, #0x20]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "ld1rqw z2.s, p6/z, [a_ptr2, #0x20]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "ld1rqw z3.s, p6/z, [a_ptr3, #0x20]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "ld1rqw z4.s, p6/z, [a_ptr4, #0x20]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "ld1rqw z5.s, p6/z, [a_ptr5, #0x20]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "ld1rqw z6.s, p6/z, [a_ptr6, #0x20]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "ld1rqw z7.s, p6/z, [a_ptr7, #0x20]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "5:\n"
+ "ld1rw z22.s, p7/z, [%[minptr]]\n"
+ "ld1rw z23.s, p7/z, [%[maxptr]]\n"
+ "fmax z24.s, p7/m, z24.s, z22.s\n"
+ "fmax z25.s, p7/m, z25.s, z22.s\n"
+ "fmax z26.s, p7/m, z26.s, z22.s\n"
+ "fmax z27.s, p7/m, z27.s, z22.s\n"
+ "fmin z24.s, p7/m, z24.s, z23.s\n"
+ "fmin z25.s, p7/m, z25.s, z23.s\n"
+ "fmin z26.s, p7/m, z26.s, z23.s\n"
+ "fmin z27.s, p7/m, z27.s, z23.s\n"
+ "st1w z24.s, p0, [%[c_ptr0]]\n"
+ "fmax z28.s, p7/m, z28.s, z22.s\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "fmax z29.s, p7/m, z29.s, z22.s\n"
+ "st1w z25.s, p0, [c_ptr1]\n"
+ "fmax z30.s, p7/m, z30.s, z22.s\n"
+ "fmin z28.s, p7/m, z28.s, z23.s\n"
+ "fmax z31.s, p7/m, z31.s, z22.s\n"
+ "st1w z26.s, p0, [c_ptr2]\n"
+ "fmin z29.s, p7/m, z29.s, z23.s\n"
+ "fmin z30.s, p7/m, z30.s, z23.s\n"
+ "fmin z31.s, p7/m, z31.s, z23.s\n"
+ "st1w z27.s, p0, [c_ptr3]\n"
+ "st1w z28.s, p0, [c_ptr4]\n"
+ "st1w z29.s, p0, [c_ptr5]\n"
+ "st1w z30.s, p0, [c_ptr6]\n"
+ "st1w z31.s, p0, [c_ptr7]\n"
+ ".unreq a_ptr1\n"
+ ".unreq a_ptr2\n"
+ ".unreq a_ptr3\n"
+ ".unreq a_ptr4\n"
+ ".unreq a_ptr5\n"
+ ".unreq a_ptr6\n"
+ ".unreq a_ptr7\n"
+ ".unreq c_ptr1\n"
+ ".unreq c_ptr2\n"
+ ".unreq c_ptr3\n"
+ ".unreq c_ptr4\n"
+ ".unreq c_ptr5\n"
+ ".unreq c_ptr6\n"
+ ".unreq c_ptr7\n"
+ : [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [oob_rows] "+r" (oob_rows), [temp] "+r" (temp), [biasptr] "+r" (biasptr)
+ : [lda] "r" (ldab), [ldc] "r" (ldcb), [odd_depth] "r" (odd_depth), [last_width] "r" (last_width), [biasinc] "r" (biasinc), [minptr] "r" (minptr), [maxptr] "r" (maxptr)
+ : "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "cc", "memory"
+ );
+ break;
+ case 10:
+ __asm __volatile (
+ "a_ptr1 .req X0\n"
+ "a_ptr2 .req X1\n"
+ "a_ptr3 .req X2\n"
+ "a_ptr4 .req X3\n"
+ "a_ptr5 .req X4\n"
+ "a_ptr6 .req X5\n"
+ "a_ptr7 .req X6\n"
+ "c_ptr1 .req X7\n"
+ "c_ptr2 .req X8\n"
+ "c_ptr3 .req X9\n"
+ "c_ptr4 .req X10\n"
+ "c_ptr5 .req X11\n"
+ "c_ptr6 .req X12\n"
+ "c_ptr7 .req X13\n"
+ "add a_ptr1, %[a_ptr0], %[lda]\n"
+ "add c_ptr1, %[c_ptr0], %[ldc]\n"
+ "add a_ptr2, a_ptr1, %[lda]\n"
+ "add c_ptr2, c_ptr1, %[ldc]\n"
+ "add a_ptr3, a_ptr2, %[lda]\n"
+ "add c_ptr3, c_ptr2, %[ldc]\n"
+ "add a_ptr4, a_ptr3, %[lda]\n"
+ "add c_ptr4, c_ptr3, %[ldc]\n"
+ "add a_ptr5, a_ptr4, %[lda]\n"
+ "add c_ptr5, c_ptr4, %[ldc]\n"
+ "add a_ptr6, a_ptr5, %[lda]\n"
+ "add c_ptr6, c_ptr5, %[ldc]\n"
+ "add a_ptr7, a_ptr6, %[lda]\n"
+ "add c_ptr7, c_ptr6, %[ldc]\n"
+ "cbz %[oob_rows], 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr7, %[c_ptr0], #0x0\n"
+ "add a_ptr7, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr6, %[c_ptr0], #0x0\n"
+ "add a_ptr6, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr5, %[c_ptr0], #0x0\n"
+ "add a_ptr5, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr4, %[c_ptr0], #0x0\n"
+ "add a_ptr4, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr3, %[c_ptr0], #0x0\n"
+ "add a_ptr3, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr2, %[c_ptr0], #0x0\n"
+ "add a_ptr2, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr1, %[c_ptr0], #0x0\n"
+ "add a_ptr1, %[a_ptr0], #0x0\n"
+ "1:\n"
+ "ptrue p7.s\n"
+ "whilelt p6.s, %[temp], %[odd_depth]\n"
+ "whilelt p0.s, %[temp], %[last_width]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "ld1w z23.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "cbz %[loops], 2f\n"
+ "ld1w z24.s, p7/z, [%[biasptr]]\n"
+ "add %[biasptr], %[biasptr], %[biasinc]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "mov z25.d, z24.d\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1]\n"
+ "mov z26.d, z24.d\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2]\n"
+ "mov z27.d, z24.d\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3]\n"
+ "mov z28.d, z24.d\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4]\n"
+ "mov z29.d, z24.d\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5]\n"
+ "mov z30.d, z24.d\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6]\n"
+ "mov z31.d, z24.d\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #2\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x10]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x10]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x10]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x10]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x10]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x10]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x10]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x10]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "ld1rqw z0.s, p6/z, [%[a_ptr0], #0x20]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "ld1rqw z1.s, p6/z, [a_ptr1, #0x20]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "ld1rqw z2.s, p6/z, [a_ptr2, #0x20]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "ld1rqw z3.s, p6/z, [a_ptr3, #0x20]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "ld1rqw z4.s, p6/z, [a_ptr4, #0x20]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "ld1rqw z5.s, p6/z, [a_ptr5, #0x20]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "ld1rqw z6.s, p6/z, [a_ptr6, #0x20]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "ld1rqw z7.s, p6/z, [a_ptr7, #0x20]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "b.eq 3f\n"
+ "4:\n"
+ "ld1rw z22.s, p7/z, [%[minptr]]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "ld1rw z23.s, p7/z, [%[maxptr]]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmax z24.s, p7/m, z24.s, z22.s\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmax z25.s, p7/m, z25.s, z22.s\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmax z26.s, p7/m, z26.s, z22.s\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmax z27.s, p7/m, z27.s, z22.s\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmin z24.s, p7/m, z24.s, z23.s\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "fmin z25.s, p7/m, z25.s, z23.s\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
+ "fmin z26.s, p7/m, z26.s, z23.s\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1]\n"
+ "fmin z27.s, p7/m, z27.s, z23.s\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "fmax z28.s, p7/m, z28.s, z22.s\n"
+ "ld1w z24.s, p7/z, [%[biasptr]]\n"
+ "fmax z29.s, p7/m, z29.s, z22.s\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2]\n"
+ "fmax z30.s, p7/m, z30.s, z22.s\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "fmax z31.s, p7/m, z31.s, z22.s\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "fmin z28.s, p7/m, z28.s, z23.s\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3]\n"
+ "fmin z29.s, p7/m, z29.s, z23.s\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "fmin z30.s, p7/m, z30.s, z23.s\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4]\n"
+ "fmin z31.s, p7/m, z31.s, z23.s\n"
+ "ld1w z23.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "mov z25.d, z24.d\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "mov z26.d, z24.d\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5]\n"
+ "mov z27.d, z24.d\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "mov z28.d, z24.d\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "mov z29.d, z24.d\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.d, z24.d\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.d, z24.d\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "add %[biasptr], %[biasptr], %[biasinc]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #2\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x10]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x10]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x10]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x10]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x10]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x10]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x10]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x10]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "ld1rqw z0.s, p6/z, [%[a_ptr0], #0x20]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "ld1rqw z1.s, p6/z, [a_ptr1, #0x20]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "ld1rqw z2.s, p6/z, [a_ptr2, #0x20]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "ld1rqw z3.s, p6/z, [a_ptr3, #0x20]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "ld1rqw z4.s, p6/z, [a_ptr4, #0x20]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "ld1rqw z5.s, p6/z, [a_ptr5, #0x20]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "ld1rqw z6.s, p6/z, [a_ptr6, #0x20]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "ld1rqw z7.s, p6/z, [a_ptr7, #0x20]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "b.ne 4b\n"
+ "3:\n"
+ "ld1rw z22.s, p7/z, [%[minptr]]\n"
+ "ld1rw z23.s, p7/z, [%[maxptr]]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmax z24.s, p7/m, z24.s, z22.s\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmax z25.s, p7/m, z25.s, z22.s\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmax z26.s, p7/m, z26.s, z22.s\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmax z27.s, p7/m, z27.s, z22.s\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "fmin z24.s, p7/m, z24.s, z23.s\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
+ "fmin z25.s, p7/m, z25.s, z23.s\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1]\n"
+ "fmin z26.s, p7/m, z26.s, z23.s\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2]\n"
+ "fmin z27.s, p7/m, z27.s, z23.s\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "fmax z28.s, p7/m, z28.s, z22.s\n"
+ "ld1w z24.s, p0/z, [%[biasptr]]\n"
+ "fmax z29.s, p7/m, z29.s, z22.s\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3]\n"
+ "fmax z30.s, p7/m, z30.s, z22.s\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "fmax z31.s, p7/m, z31.s, z22.s\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "fmin z28.s, p7/m, z28.s, z23.s\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4]\n"
+ "fmin z29.s, p7/m, z29.s, z23.s\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "fmin z30.s, p7/m, z30.s, z23.s\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5]\n"
+ "fmin z31.s, p7/m, z31.s, z23.s\n"
+ "ld1w z23.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "mov z25.d, z24.d\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "mov z26.d, z24.d\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6]\n"
+ "mov z27.d, z24.d\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "mov z28.d, z24.d\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "mov z29.d, z24.d\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.d, z24.d\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.d, z24.d\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "add %[biasptr], %[biasptr], %[biasinc]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #2\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x10]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x10]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x10]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x10]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x10]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x10]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x10]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x10]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "ld1rqw z0.s, p6/z, [%[a_ptr0], #0x20]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "ld1rqw z1.s, p6/z, [a_ptr1, #0x20]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "ld1rqw z2.s, p6/z, [a_ptr2, #0x20]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "ld1rqw z3.s, p6/z, [a_ptr3, #0x20]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "ld1rqw z4.s, p6/z, [a_ptr4, #0x20]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "ld1rqw z5.s, p6/z, [a_ptr5, #0x20]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "ld1rqw z6.s, p6/z, [a_ptr6, #0x20]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "ld1rqw z7.s, p6/z, [a_ptr7, #0x20]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "b 5f\n"
+ "2:\n"
+ "ld1w z24.s, p0/z, [%[biasptr]]\n"
+ "add %[biasptr], %[biasptr], %[biasinc]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1]\n"
+ "mov z25.d, z24.d\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2]\n"
+ "mov z26.d, z24.d\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3]\n"
+ "mov z27.d, z24.d\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4]\n"
+ "mov z28.d, z24.d\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5]\n"
+ "mov z29.d, z24.d\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6]\n"
+ "mov z30.d, z24.d\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7]\n"
+ "mov z31.d, z24.d\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #2\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x10]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x10]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x10]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x10]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x10]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x10]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x10]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x10]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "ld1rqw z0.s, p6/z, [%[a_ptr0], #0x20]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "ld1rqw z1.s, p6/z, [a_ptr1, #0x20]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "ld1rqw z2.s, p6/z, [a_ptr2, #0x20]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "ld1rqw z3.s, p6/z, [a_ptr3, #0x20]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "ld1rqw z4.s, p6/z, [a_ptr4, #0x20]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "ld1rqw z5.s, p6/z, [a_ptr5, #0x20]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "ld1rqw z6.s, p6/z, [a_ptr6, #0x20]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "ld1rqw z7.s, p6/z, [a_ptr7, #0x20]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "5:\n"
+ "ld1rw z22.s, p7/z, [%[minptr]]\n"
+ "ld1rw z23.s, p7/z, [%[maxptr]]\n"
+ "fmax z24.s, p7/m, z24.s, z22.s\n"
+ "fmax z25.s, p7/m, z25.s, z22.s\n"
+ "fmax z26.s, p7/m, z26.s, z22.s\n"
+ "fmax z27.s, p7/m, z27.s, z22.s\n"
+ "fmin z24.s, p7/m, z24.s, z23.s\n"
+ "fmin z25.s, p7/m, z25.s, z23.s\n"
+ "fmin z26.s, p7/m, z26.s, z23.s\n"
+ "fmin z27.s, p7/m, z27.s, z23.s\n"
+ "st1w z24.s, p0, [%[c_ptr0]]\n"
+ "fmax z28.s, p7/m, z28.s, z22.s\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "fmax z29.s, p7/m, z29.s, z22.s\n"
+ "st1w z25.s, p0, [c_ptr1]\n"
+ "fmax z30.s, p7/m, z30.s, z22.s\n"
+ "fmin z28.s, p7/m, z28.s, z23.s\n"
+ "fmax z31.s, p7/m, z31.s, z22.s\n"
+ "st1w z26.s, p0, [c_ptr2]\n"
+ "fmin z29.s, p7/m, z29.s, z23.s\n"
+ "fmin z30.s, p7/m, z30.s, z23.s\n"
+ "fmin z31.s, p7/m, z31.s, z23.s\n"
+ "st1w z27.s, p0, [c_ptr3]\n"
+ "st1w z28.s, p0, [c_ptr4]\n"
+ "st1w z29.s, p0, [c_ptr5]\n"
+ "st1w z30.s, p0, [c_ptr6]\n"
+ "st1w z31.s, p0, [c_ptr7]\n"
+ ".unreq a_ptr1\n"
+ ".unreq a_ptr2\n"
+ ".unreq a_ptr3\n"
+ ".unreq a_ptr4\n"
+ ".unreq a_ptr5\n"
+ ".unreq a_ptr6\n"
+ ".unreq a_ptr7\n"
+ ".unreq c_ptr1\n"
+ ".unreq c_ptr2\n"
+ ".unreq c_ptr3\n"
+ ".unreq c_ptr4\n"
+ ".unreq c_ptr5\n"
+ ".unreq c_ptr6\n"
+ ".unreq c_ptr7\n"
+ : [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [oob_rows] "+r" (oob_rows), [temp] "+r" (temp), [biasptr] "+r" (biasptr)
+ : [lda] "r" (ldab), [ldc] "r" (ldcb), [odd_depth] "r" (odd_depth), [last_width] "r" (last_width), [biasinc] "r" (biasinc), [minptr] "r" (minptr), [maxptr] "r" (maxptr)
+ : "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "cc", "memory"
+ );
+ break;
+ case 11:
+ __asm __volatile (
+ "a_ptr1 .req X0\n"
+ "a_ptr2 .req X1\n"
+ "a_ptr3 .req X2\n"
+ "a_ptr4 .req X3\n"
+ "a_ptr5 .req X4\n"
+ "a_ptr6 .req X5\n"
+ "a_ptr7 .req X6\n"
+ "c_ptr1 .req X7\n"
+ "c_ptr2 .req X8\n"
+ "c_ptr3 .req X9\n"
+ "c_ptr4 .req X10\n"
+ "c_ptr5 .req X11\n"
+ "c_ptr6 .req X12\n"
+ "c_ptr7 .req X13\n"
+ "add a_ptr1, %[a_ptr0], %[lda]\n"
+ "add c_ptr1, %[c_ptr0], %[ldc]\n"
+ "add a_ptr2, a_ptr1, %[lda]\n"
+ "add c_ptr2, c_ptr1, %[ldc]\n"
+ "add a_ptr3, a_ptr2, %[lda]\n"
+ "add c_ptr3, c_ptr2, %[ldc]\n"
+ "add a_ptr4, a_ptr3, %[lda]\n"
+ "add c_ptr4, c_ptr3, %[ldc]\n"
+ "add a_ptr5, a_ptr4, %[lda]\n"
+ "add c_ptr5, c_ptr4, %[ldc]\n"
+ "add a_ptr6, a_ptr5, %[lda]\n"
+ "add c_ptr6, c_ptr5, %[ldc]\n"
+ "add a_ptr7, a_ptr6, %[lda]\n"
+ "add c_ptr7, c_ptr6, %[ldc]\n"
+ "cbz %[oob_rows], 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr7, %[c_ptr0], #0x0\n"
+ "add a_ptr7, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr6, %[c_ptr0], #0x0\n"
+ "add a_ptr6, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr5, %[c_ptr0], #0x0\n"
+ "add a_ptr5, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr4, %[c_ptr0], #0x0\n"
+ "add a_ptr4, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr3, %[c_ptr0], #0x0\n"
+ "add a_ptr3, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr2, %[c_ptr0], #0x0\n"
+ "add a_ptr2, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr1, %[c_ptr0], #0x0\n"
+ "add a_ptr1, %[a_ptr0], #0x0\n"
+ "1:\n"
+ "ptrue p7.s\n"
+ "whilelt p6.s, %[temp], %[odd_depth]\n"
+ "whilelt p0.s, %[temp], %[last_width]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "ld1w z23.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "cbz %[loops], 2f\n"
+ "ld1w z24.s, p7/z, [%[biasptr]]\n"
+ "add %[biasptr], %[biasptr], %[biasinc]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "mov z25.d, z24.d\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1]\n"
+ "mov z26.d, z24.d\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2]\n"
+ "mov z27.d, z24.d\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3]\n"
+ "mov z28.d, z24.d\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4]\n"
+ "mov z29.d, z24.d\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5]\n"
+ "mov z30.d, z24.d\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6]\n"
+ "mov z31.d, z24.d\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x10]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x10]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x10]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x10]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x10]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x10]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x10]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x10]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #3\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "ld1rqw z0.s, p6/z, [%[a_ptr0], #0x20]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "ld1rqw z1.s, p6/z, [a_ptr1, #0x20]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "ld1rqw z2.s, p6/z, [a_ptr2, #0x20]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "ld1rqw z3.s, p6/z, [a_ptr3, #0x20]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "ld1rqw z4.s, p6/z, [a_ptr4, #0x20]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "ld1rqw z5.s, p6/z, [a_ptr5, #0x20]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "ld1rqw z6.s, p6/z, [a_ptr6, #0x20]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "ld1rqw z7.s, p6/z, [a_ptr7, #0x20]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "b.eq 3f\n"
+ "4:\n"
+ "ld1rw z22.s, p7/z, [%[minptr]]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "ld1rw z23.s, p7/z, [%[maxptr]]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmax z24.s, p7/m, z24.s, z22.s\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmax z25.s, p7/m, z25.s, z22.s\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmax z26.s, p7/m, z26.s, z22.s\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmax z27.s, p7/m, z27.s, z22.s\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmin z24.s, p7/m, z24.s, z23.s\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "fmin z25.s, p7/m, z25.s, z23.s\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
+ "fmin z26.s, p7/m, z26.s, z23.s\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1]\n"
+ "fmin z27.s, p7/m, z27.s, z23.s\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "fmax z28.s, p7/m, z28.s, z22.s\n"
+ "ld1w z24.s, p7/z, [%[biasptr]]\n"
+ "fmax z29.s, p7/m, z29.s, z22.s\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2]\n"
+ "fmax z30.s, p7/m, z30.s, z22.s\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "fmax z31.s, p7/m, z31.s, z22.s\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "fmin z28.s, p7/m, z28.s, z23.s\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3]\n"
+ "fmin z29.s, p7/m, z29.s, z23.s\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "fmin z30.s, p7/m, z30.s, z23.s\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4]\n"
+ "fmin z31.s, p7/m, z31.s, z23.s\n"
+ "ld1w z23.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "mov z25.d, z24.d\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "mov z26.d, z24.d\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5]\n"
+ "mov z27.d, z24.d\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "mov z28.d, z24.d\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "mov z29.d, z24.d\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.d, z24.d\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.d, z24.d\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "add %[biasptr], %[biasptr], %[biasinc]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x10]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x10]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x10]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x10]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x10]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x10]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x10]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x10]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #3\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "ld1rqw z0.s, p6/z, [%[a_ptr0], #0x20]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "ld1rqw z1.s, p6/z, [a_ptr1, #0x20]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "ld1rqw z2.s, p6/z, [a_ptr2, #0x20]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "ld1rqw z3.s, p6/z, [a_ptr3, #0x20]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "ld1rqw z4.s, p6/z, [a_ptr4, #0x20]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "ld1rqw z5.s, p6/z, [a_ptr5, #0x20]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "ld1rqw z6.s, p6/z, [a_ptr6, #0x20]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "ld1rqw z7.s, p6/z, [a_ptr7, #0x20]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "b.ne 4b\n"
+ "3:\n"
+ "ld1rw z22.s, p7/z, [%[minptr]]\n"
+ "ld1rw z23.s, p7/z, [%[maxptr]]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmax z24.s, p7/m, z24.s, z22.s\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmax z25.s, p7/m, z25.s, z22.s\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmax z26.s, p7/m, z26.s, z22.s\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmax z27.s, p7/m, z27.s, z22.s\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "fmin z24.s, p7/m, z24.s, z23.s\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
+ "fmin z25.s, p7/m, z25.s, z23.s\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1]\n"
+ "fmin z26.s, p7/m, z26.s, z23.s\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2]\n"
+ "fmin z27.s, p7/m, z27.s, z23.s\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "fmax z28.s, p7/m, z28.s, z22.s\n"
+ "ld1w z24.s, p0/z, [%[biasptr]]\n"
+ "fmax z29.s, p7/m, z29.s, z22.s\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3]\n"
+ "fmax z30.s, p7/m, z30.s, z22.s\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "fmax z31.s, p7/m, z31.s, z22.s\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "fmin z28.s, p7/m, z28.s, z23.s\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4]\n"
+ "fmin z29.s, p7/m, z29.s, z23.s\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "fmin z30.s, p7/m, z30.s, z23.s\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5]\n"
+ "fmin z31.s, p7/m, z31.s, z23.s\n"
+ "ld1w z23.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "mov z25.d, z24.d\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "mov z26.d, z24.d\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6]\n"
+ "mov z27.d, z24.d\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "mov z28.d, z24.d\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "mov z29.d, z24.d\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.d, z24.d\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.d, z24.d\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "add %[biasptr], %[biasptr], %[biasinc]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x10]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x10]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x10]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x10]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x10]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x10]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x10]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x10]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #3\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "ld1rqw z0.s, p6/z, [%[a_ptr0], #0x20]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "ld1rqw z1.s, p6/z, [a_ptr1, #0x20]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "ld1rqw z2.s, p6/z, [a_ptr2, #0x20]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "ld1rqw z3.s, p6/z, [a_ptr3, #0x20]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "ld1rqw z4.s, p6/z, [a_ptr4, #0x20]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "ld1rqw z5.s, p6/z, [a_ptr5, #0x20]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "ld1rqw z6.s, p6/z, [a_ptr6, #0x20]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "ld1rqw z7.s, p6/z, [a_ptr7, #0x20]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "b 5f\n"
+ "2:\n"
+ "ld1w z24.s, p0/z, [%[biasptr]]\n"
+ "add %[biasptr], %[biasptr], %[biasinc]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1]\n"
+ "mov z25.d, z24.d\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2]\n"
+ "mov z26.d, z24.d\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3]\n"
+ "mov z27.d, z24.d\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4]\n"
+ "mov z28.d, z24.d\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5]\n"
+ "mov z29.d, z24.d\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6]\n"
+ "mov z30.d, z24.d\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7]\n"
+ "mov z31.d, z24.d\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x10]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x10]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x10]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x10]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x10]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x10]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x10]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x10]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #3\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "ld1rqw z0.s, p6/z, [%[a_ptr0], #0x20]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "ld1rqw z1.s, p6/z, [a_ptr1, #0x20]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "ld1rqw z2.s, p6/z, [a_ptr2, #0x20]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "ld1rqw z3.s, p6/z, [a_ptr3, #0x20]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "ld1rqw z4.s, p6/z, [a_ptr4, #0x20]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "ld1rqw z5.s, p6/z, [a_ptr5, #0x20]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "ld1rqw z6.s, p6/z, [a_ptr6, #0x20]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "ld1rqw z7.s, p6/z, [a_ptr7, #0x20]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "5:\n"
+ "ld1rw z22.s, p7/z, [%[minptr]]\n"
+ "ld1rw z23.s, p7/z, [%[maxptr]]\n"
+ "fmax z24.s, p7/m, z24.s, z22.s\n"
+ "fmax z25.s, p7/m, z25.s, z22.s\n"
+ "fmax z26.s, p7/m, z26.s, z22.s\n"
+ "fmax z27.s, p7/m, z27.s, z22.s\n"
+ "fmin z24.s, p7/m, z24.s, z23.s\n"
+ "fmin z25.s, p7/m, z25.s, z23.s\n"
+ "fmin z26.s, p7/m, z26.s, z23.s\n"
+ "fmin z27.s, p7/m, z27.s, z23.s\n"
+ "st1w z24.s, p0, [%[c_ptr0]]\n"
+ "fmax z28.s, p7/m, z28.s, z22.s\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "fmax z29.s, p7/m, z29.s, z22.s\n"
+ "st1w z25.s, p0, [c_ptr1]\n"
+ "fmax z30.s, p7/m, z30.s, z22.s\n"
+ "fmin z28.s, p7/m, z28.s, z23.s\n"
+ "fmax z31.s, p7/m, z31.s, z22.s\n"
+ "st1w z26.s, p0, [c_ptr2]\n"
+ "fmin z29.s, p7/m, z29.s, z23.s\n"
+ "fmin z30.s, p7/m, z30.s, z23.s\n"
+ "fmin z31.s, p7/m, z31.s, z23.s\n"
+ "st1w z27.s, p0, [c_ptr3]\n"
+ "st1w z28.s, p0, [c_ptr4]\n"
+ "st1w z29.s, p0, [c_ptr5]\n"
+ "st1w z30.s, p0, [c_ptr6]\n"
+ "st1w z31.s, p0, [c_ptr7]\n"
+ ".unreq a_ptr1\n"
+ ".unreq a_ptr2\n"
+ ".unreq a_ptr3\n"
+ ".unreq a_ptr4\n"
+ ".unreq a_ptr5\n"
+ ".unreq a_ptr6\n"
+ ".unreq a_ptr7\n"
+ ".unreq c_ptr1\n"
+ ".unreq c_ptr2\n"
+ ".unreq c_ptr3\n"
+ ".unreq c_ptr4\n"
+ ".unreq c_ptr5\n"
+ ".unreq c_ptr6\n"
+ ".unreq c_ptr7\n"
+ : [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [oob_rows] "+r" (oob_rows), [temp] "+r" (temp), [biasptr] "+r" (biasptr)
+ : [lda] "r" (ldab), [ldc] "r" (ldcb), [odd_depth] "r" (odd_depth), [last_width] "r" (last_width), [biasinc] "r" (biasinc), [minptr] "r" (minptr), [maxptr] "r" (maxptr)
+ : "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "cc", "memory"
+ );
+ break;
+ case 12:
+ __asm __volatile (
+ "a_ptr1 .req X0\n"
+ "a_ptr2 .req X1\n"
+ "a_ptr3 .req X2\n"
+ "a_ptr4 .req X3\n"
+ "a_ptr5 .req X4\n"
+ "a_ptr6 .req X5\n"
+ "a_ptr7 .req X6\n"
+ "c_ptr1 .req X7\n"
+ "c_ptr2 .req X8\n"
+ "c_ptr3 .req X9\n"
+ "c_ptr4 .req X10\n"
+ "c_ptr5 .req X11\n"
+ "c_ptr6 .req X12\n"
+ "c_ptr7 .req X13\n"
+ "add a_ptr1, %[a_ptr0], %[lda]\n"
+ "add c_ptr1, %[c_ptr0], %[ldc]\n"
+ "add a_ptr2, a_ptr1, %[lda]\n"
+ "add c_ptr2, c_ptr1, %[ldc]\n"
+ "add a_ptr3, a_ptr2, %[lda]\n"
+ "add c_ptr3, c_ptr2, %[ldc]\n"
+ "add a_ptr4, a_ptr3, %[lda]\n"
+ "add c_ptr4, c_ptr3, %[ldc]\n"
+ "add a_ptr5, a_ptr4, %[lda]\n"
+ "add c_ptr5, c_ptr4, %[ldc]\n"
+ "add a_ptr6, a_ptr5, %[lda]\n"
+ "add c_ptr6, c_ptr5, %[ldc]\n"
+ "add a_ptr7, a_ptr6, %[lda]\n"
+ "add c_ptr7, c_ptr6, %[ldc]\n"
+ "cbz %[oob_rows], 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr7, %[c_ptr0], #0x0\n"
+ "add a_ptr7, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr6, %[c_ptr0], #0x0\n"
+ "add a_ptr6, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr5, %[c_ptr0], #0x0\n"
+ "add a_ptr5, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr4, %[c_ptr0], #0x0\n"
+ "add a_ptr4, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr3, %[c_ptr0], #0x0\n"
+ "add a_ptr3, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr2, %[c_ptr0], #0x0\n"
+ "add a_ptr2, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr1, %[c_ptr0], #0x0\n"
+ "add a_ptr1, %[a_ptr0], #0x0\n"
+ "1:\n"
+ "ptrue p7.s\n"
+ "whilelt p6.s, %[temp], %[odd_depth]\n"
+ "whilelt p0.s, %[temp], %[last_width]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "ld1w z23.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "cbz %[loops], 2f\n"
+ "ld1w z24.s, p7/z, [%[biasptr]]\n"
+ "add %[biasptr], %[biasptr], %[biasinc]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "mov z25.d, z24.d\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1]\n"
+ "mov z26.d, z24.d\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2]\n"
+ "mov z27.d, z24.d\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3]\n"
+ "mov z28.d, z24.d\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4]\n"
+ "mov z29.d, z24.d\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5]\n"
+ "mov z30.d, z24.d\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6]\n"
+ "mov z31.d, z24.d\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x10]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x10]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x10]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x10]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x10]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x10]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x10]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x10]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #4\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "ld1rqw z0.s, p6/z, [%[a_ptr0], #0x20]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "ld1rqw z1.s, p6/z, [a_ptr1, #0x20]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "ld1rqw z2.s, p6/z, [a_ptr2, #0x20]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "ld1rqw z3.s, p6/z, [a_ptr3, #0x20]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "ld1rqw z4.s, p6/z, [a_ptr4, #0x20]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "ld1rqw z5.s, p6/z, [a_ptr5, #0x20]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "ld1rqw z6.s, p6/z, [a_ptr6, #0x20]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "ld1rqw z7.s, p6/z, [a_ptr7, #0x20]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "b.eq 3f\n"
+ "4:\n"
+ "ld1rw z22.s, p7/z, [%[minptr]]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "ld1rw z23.s, p7/z, [%[maxptr]]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmax z24.s, p7/m, z24.s, z22.s\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmax z25.s, p7/m, z25.s, z22.s\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmax z26.s, p7/m, z26.s, z22.s\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmax z27.s, p7/m, z27.s, z22.s\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmin z24.s, p7/m, z24.s, z23.s\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "fmin z25.s, p7/m, z25.s, z23.s\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
+ "fmin z26.s, p7/m, z26.s, z23.s\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1]\n"
+ "fmin z27.s, p7/m, z27.s, z23.s\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "fmax z28.s, p7/m, z28.s, z22.s\n"
+ "ld1w z24.s, p7/z, [%[biasptr]]\n"
+ "fmax z29.s, p7/m, z29.s, z22.s\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2]\n"
+ "fmax z30.s, p7/m, z30.s, z22.s\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "fmax z31.s, p7/m, z31.s, z22.s\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "fmin z28.s, p7/m, z28.s, z23.s\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3]\n"
+ "fmin z29.s, p7/m, z29.s, z23.s\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "fmin z30.s, p7/m, z30.s, z23.s\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4]\n"
+ "fmin z31.s, p7/m, z31.s, z23.s\n"
+ "ld1w z23.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "mov z25.d, z24.d\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "mov z26.d, z24.d\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5]\n"
+ "mov z27.d, z24.d\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "mov z28.d, z24.d\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "mov z29.d, z24.d\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.d, z24.d\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.d, z24.d\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "add %[biasptr], %[biasptr], %[biasinc]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x10]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x10]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x10]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x10]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x10]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x10]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x10]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x10]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #4\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "ld1rqw z0.s, p6/z, [%[a_ptr0], #0x20]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "ld1rqw z1.s, p6/z, [a_ptr1, #0x20]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "ld1rqw z2.s, p6/z, [a_ptr2, #0x20]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "ld1rqw z3.s, p6/z, [a_ptr3, #0x20]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "ld1rqw z4.s, p6/z, [a_ptr4, #0x20]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "ld1rqw z5.s, p6/z, [a_ptr5, #0x20]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "ld1rqw z6.s, p6/z, [a_ptr6, #0x20]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "ld1rqw z7.s, p6/z, [a_ptr7, #0x20]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "b.ne 4b\n"
+ "3:\n"
+ "ld1rw z22.s, p7/z, [%[minptr]]\n"
+ "ld1rw z23.s, p7/z, [%[maxptr]]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmax z24.s, p7/m, z24.s, z22.s\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmax z25.s, p7/m, z25.s, z22.s\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmax z26.s, p7/m, z26.s, z22.s\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmax z27.s, p7/m, z27.s, z22.s\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "fmin z24.s, p7/m, z24.s, z23.s\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
+ "fmin z25.s, p7/m, z25.s, z23.s\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1]\n"
+ "fmin z26.s, p7/m, z26.s, z23.s\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2]\n"
+ "fmin z27.s, p7/m, z27.s, z23.s\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "fmax z28.s, p7/m, z28.s, z22.s\n"
+ "ld1w z24.s, p0/z, [%[biasptr]]\n"
+ "fmax z29.s, p7/m, z29.s, z22.s\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3]\n"
+ "fmax z30.s, p7/m, z30.s, z22.s\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "fmax z31.s, p7/m, z31.s, z22.s\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "fmin z28.s, p7/m, z28.s, z23.s\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4]\n"
+ "fmin z29.s, p7/m, z29.s, z23.s\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "fmin z30.s, p7/m, z30.s, z23.s\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5]\n"
+ "fmin z31.s, p7/m, z31.s, z23.s\n"
+ "ld1w z23.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "mov z25.d, z24.d\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "mov z26.d, z24.d\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6]\n"
+ "mov z27.d, z24.d\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "mov z28.d, z24.d\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "mov z29.d, z24.d\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.d, z24.d\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.d, z24.d\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "add %[biasptr], %[biasptr], %[biasinc]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x10]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x10]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x10]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x10]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x10]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x10]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x10]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x10]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #4\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "ld1rqw z0.s, p6/z, [%[a_ptr0], #0x20]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "ld1rqw z1.s, p6/z, [a_ptr1, #0x20]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "ld1rqw z2.s, p6/z, [a_ptr2, #0x20]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "ld1rqw z3.s, p6/z, [a_ptr3, #0x20]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "ld1rqw z4.s, p6/z, [a_ptr4, #0x20]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "ld1rqw z5.s, p6/z, [a_ptr5, #0x20]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "ld1rqw z6.s, p6/z, [a_ptr6, #0x20]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "ld1rqw z7.s, p6/z, [a_ptr7, #0x20]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "b 5f\n"
+ "2:\n"
+ "ld1w z24.s, p0/z, [%[biasptr]]\n"
+ "add %[biasptr], %[biasptr], %[biasinc]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1]\n"
+ "mov z25.d, z24.d\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2]\n"
+ "mov z26.d, z24.d\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3]\n"
+ "mov z27.d, z24.d\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4]\n"
+ "mov z28.d, z24.d\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5]\n"
+ "mov z29.d, z24.d\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6]\n"
+ "mov z30.d, z24.d\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7]\n"
+ "mov z31.d, z24.d\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x10]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x10]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x10]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x10]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x10]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x10]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x10]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x10]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #4\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "ld1rqw z0.s, p6/z, [%[a_ptr0], #0x20]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "ld1rqw z1.s, p6/z, [a_ptr1, #0x20]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "ld1rqw z2.s, p6/z, [a_ptr2, #0x20]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "ld1rqw z3.s, p6/z, [a_ptr3, #0x20]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "ld1rqw z4.s, p6/z, [a_ptr4, #0x20]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "ld1rqw z5.s, p6/z, [a_ptr5, #0x20]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "ld1rqw z6.s, p6/z, [a_ptr6, #0x20]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "ld1rqw z7.s, p6/z, [a_ptr7, #0x20]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "5:\n"
+ "ld1rw z22.s, p7/z, [%[minptr]]\n"
+ "ld1rw z23.s, p7/z, [%[maxptr]]\n"
+ "fmax z24.s, p7/m, z24.s, z22.s\n"
+ "fmax z25.s, p7/m, z25.s, z22.s\n"
+ "fmax z26.s, p7/m, z26.s, z22.s\n"
+ "fmax z27.s, p7/m, z27.s, z22.s\n"
+ "fmin z24.s, p7/m, z24.s, z23.s\n"
+ "fmin z25.s, p7/m, z25.s, z23.s\n"
+ "fmin z26.s, p7/m, z26.s, z23.s\n"
+ "fmin z27.s, p7/m, z27.s, z23.s\n"
+ "st1w z24.s, p0, [%[c_ptr0]]\n"
+ "fmax z28.s, p7/m, z28.s, z22.s\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "fmax z29.s, p7/m, z29.s, z22.s\n"
+ "st1w z25.s, p0, [c_ptr1]\n"
+ "fmax z30.s, p7/m, z30.s, z22.s\n"
+ "fmin z28.s, p7/m, z28.s, z23.s\n"
+ "fmax z31.s, p7/m, z31.s, z22.s\n"
+ "st1w z26.s, p0, [c_ptr2]\n"
+ "fmin z29.s, p7/m, z29.s, z23.s\n"
+ "fmin z30.s, p7/m, z30.s, z23.s\n"
+ "fmin z31.s, p7/m, z31.s, z23.s\n"
+ "st1w z27.s, p0, [c_ptr3]\n"
+ "st1w z28.s, p0, [c_ptr4]\n"
+ "st1w z29.s, p0, [c_ptr5]\n"
+ "st1w z30.s, p0, [c_ptr6]\n"
+ "st1w z31.s, p0, [c_ptr7]\n"
+ ".unreq a_ptr1\n"
+ ".unreq a_ptr2\n"
+ ".unreq a_ptr3\n"
+ ".unreq a_ptr4\n"
+ ".unreq a_ptr5\n"
+ ".unreq a_ptr6\n"
+ ".unreq a_ptr7\n"
+ ".unreq c_ptr1\n"
+ ".unreq c_ptr2\n"
+ ".unreq c_ptr3\n"
+ ".unreq c_ptr4\n"
+ ".unreq c_ptr5\n"
+ ".unreq c_ptr6\n"
+ ".unreq c_ptr7\n"
+ : [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [oob_rows] "+r" (oob_rows), [temp] "+r" (temp), [biasptr] "+r" (biasptr)
+ : [lda] "r" (ldab), [ldc] "r" (ldcb), [odd_depth] "r" (odd_depth), [last_width] "r" (last_width), [biasinc] "r" (biasinc), [minptr] "r" (minptr), [maxptr] "r" (maxptr)
+ : "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "cc", "memory"
+ );
+ break;
+ case 13:
+ __asm __volatile (
+ "a_ptr1 .req X0\n"
+ "a_ptr2 .req X1\n"
+ "a_ptr3 .req X2\n"
+ "a_ptr4 .req X3\n"
+ "a_ptr5 .req X4\n"
+ "a_ptr6 .req X5\n"
+ "a_ptr7 .req X6\n"
+ "c_ptr1 .req X7\n"
+ "c_ptr2 .req X8\n"
+ "c_ptr3 .req X9\n"
+ "c_ptr4 .req X10\n"
+ "c_ptr5 .req X11\n"
+ "c_ptr6 .req X12\n"
+ "c_ptr7 .req X13\n"
+ "add a_ptr1, %[a_ptr0], %[lda]\n"
+ "add c_ptr1, %[c_ptr0], %[ldc]\n"
+ "add a_ptr2, a_ptr1, %[lda]\n"
+ "add c_ptr2, c_ptr1, %[ldc]\n"
+ "add a_ptr3, a_ptr2, %[lda]\n"
+ "add c_ptr3, c_ptr2, %[ldc]\n"
+ "add a_ptr4, a_ptr3, %[lda]\n"
+ "add c_ptr4, c_ptr3, %[ldc]\n"
+ "add a_ptr5, a_ptr4, %[lda]\n"
+ "add c_ptr5, c_ptr4, %[ldc]\n"
+ "add a_ptr6, a_ptr5, %[lda]\n"
+ "add c_ptr6, c_ptr5, %[ldc]\n"
+ "add a_ptr7, a_ptr6, %[lda]\n"
+ "add c_ptr7, c_ptr6, %[ldc]\n"
+ "cbz %[oob_rows], 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr7, %[c_ptr0], #0x0\n"
+ "add a_ptr7, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr6, %[c_ptr0], #0x0\n"
+ "add a_ptr6, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr5, %[c_ptr0], #0x0\n"
+ "add a_ptr5, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr4, %[c_ptr0], #0x0\n"
+ "add a_ptr4, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr3, %[c_ptr0], #0x0\n"
+ "add a_ptr3, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr2, %[c_ptr0], #0x0\n"
+ "add a_ptr2, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr1, %[c_ptr0], #0x0\n"
+ "add a_ptr1, %[a_ptr0], #0x0\n"
+ "1:\n"
+ "ptrue p7.s\n"
+ "whilelt p6.s, %[temp], %[odd_depth]\n"
+ "whilelt p0.s, %[temp], %[last_width]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "ld1w z23.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "cbz %[loops], 2f\n"
+ "ld1w z24.s, p7/z, [%[biasptr]]\n"
+ "add %[biasptr], %[biasptr], %[biasinc]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "mov z25.d, z24.d\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1]\n"
+ "mov z26.d, z24.d\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2]\n"
+ "mov z27.d, z24.d\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3]\n"
+ "mov z28.d, z24.d\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4]\n"
+ "mov z29.d, z24.d\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5]\n"
+ "mov z30.d, z24.d\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6]\n"
+ "mov z31.d, z24.d\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x10]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x10]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x10]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x10]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x10]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x10]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x10]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x10]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #5\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x20]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x20]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x20]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x20]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x20]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x20]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x20]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x20]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p6/z, [%[a_ptr0], #0x30]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p6/z, [a_ptr1, #0x30]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p6/z, [a_ptr2, #0x30]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p6/z, [a_ptr3, #0x30]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p6/z, [a_ptr4, #0x30]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p6/z, [a_ptr5, #0x30]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p6/z, [a_ptr6, #0x30]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p6/z, [a_ptr7, #0x30]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "b.eq 3f\n"
+ "4:\n"
+ "ld1rw z22.s, p7/z, [%[minptr]]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "ld1rw z23.s, p7/z, [%[maxptr]]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmax z24.s, p7/m, z24.s, z22.s\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmax z25.s, p7/m, z25.s, z22.s\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmax z26.s, p7/m, z26.s, z22.s\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmax z27.s, p7/m, z27.s, z22.s\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmin z24.s, p7/m, z24.s, z23.s\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "fmin z25.s, p7/m, z25.s, z23.s\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
+ "fmin z26.s, p7/m, z26.s, z23.s\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1]\n"
+ "fmin z27.s, p7/m, z27.s, z23.s\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "fmax z28.s, p7/m, z28.s, z22.s\n"
+ "ld1w z24.s, p7/z, [%[biasptr]]\n"
+ "fmax z29.s, p7/m, z29.s, z22.s\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2]\n"
+ "fmax z30.s, p7/m, z30.s, z22.s\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "fmax z31.s, p7/m, z31.s, z22.s\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "fmin z28.s, p7/m, z28.s, z23.s\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3]\n"
+ "fmin z29.s, p7/m, z29.s, z23.s\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "fmin z30.s, p7/m, z30.s, z23.s\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4]\n"
+ "fmin z31.s, p7/m, z31.s, z23.s\n"
+ "ld1w z23.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "mov z25.d, z24.d\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "mov z26.d, z24.d\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5]\n"
+ "mov z27.d, z24.d\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "mov z28.d, z24.d\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "mov z29.d, z24.d\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.d, z24.d\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.d, z24.d\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "add %[biasptr], %[biasptr], %[biasinc]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x10]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x10]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x10]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x10]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x10]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x10]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x10]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x10]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #5\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x20]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x20]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x20]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x20]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x20]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x20]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x20]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x20]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p6/z, [%[a_ptr0], #0x30]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p6/z, [a_ptr1, #0x30]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p6/z, [a_ptr2, #0x30]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p6/z, [a_ptr3, #0x30]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p6/z, [a_ptr4, #0x30]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p6/z, [a_ptr5, #0x30]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p6/z, [a_ptr6, #0x30]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p6/z, [a_ptr7, #0x30]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "b.ne 4b\n"
+ "3:\n"
+ "ld1rw z22.s, p7/z, [%[minptr]]\n"
+ "ld1rw z23.s, p7/z, [%[maxptr]]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmax z24.s, p7/m, z24.s, z22.s\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmax z25.s, p7/m, z25.s, z22.s\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmax z26.s, p7/m, z26.s, z22.s\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmax z27.s, p7/m, z27.s, z22.s\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "fmin z24.s, p7/m, z24.s, z23.s\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
+ "fmin z25.s, p7/m, z25.s, z23.s\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1]\n"
+ "fmin z26.s, p7/m, z26.s, z23.s\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2]\n"
+ "fmin z27.s, p7/m, z27.s, z23.s\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "fmax z28.s, p7/m, z28.s, z22.s\n"
+ "ld1w z24.s, p0/z, [%[biasptr]]\n"
+ "fmax z29.s, p7/m, z29.s, z22.s\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3]\n"
+ "fmax z30.s, p7/m, z30.s, z22.s\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "fmax z31.s, p7/m, z31.s, z22.s\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "fmin z28.s, p7/m, z28.s, z23.s\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4]\n"
+ "fmin z29.s, p7/m, z29.s, z23.s\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "fmin z30.s, p7/m, z30.s, z23.s\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5]\n"
+ "fmin z31.s, p7/m, z31.s, z23.s\n"
+ "ld1w z23.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "mov z25.d, z24.d\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "mov z26.d, z24.d\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6]\n"
+ "mov z27.d, z24.d\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "mov z28.d, z24.d\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "mov z29.d, z24.d\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.d, z24.d\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.d, z24.d\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "add %[biasptr], %[biasptr], %[biasinc]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x10]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x10]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x10]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x10]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x10]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x10]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x10]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x10]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #5\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x20]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x20]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x20]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x20]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x20]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x20]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x20]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x20]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p6/z, [%[a_ptr0], #0x30]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p6/z, [a_ptr1, #0x30]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p6/z, [a_ptr2, #0x30]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p6/z, [a_ptr3, #0x30]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p6/z, [a_ptr4, #0x30]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p6/z, [a_ptr5, #0x30]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p6/z, [a_ptr6, #0x30]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p6/z, [a_ptr7, #0x30]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "b 5f\n"
+ "2:\n"
+ "ld1w z24.s, p0/z, [%[biasptr]]\n"
+ "add %[biasptr], %[biasptr], %[biasinc]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1]\n"
+ "mov z25.d, z24.d\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2]\n"
+ "mov z26.d, z24.d\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3]\n"
+ "mov z27.d, z24.d\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4]\n"
+ "mov z28.d, z24.d\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5]\n"
+ "mov z29.d, z24.d\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6]\n"
+ "mov z30.d, z24.d\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7]\n"
+ "mov z31.d, z24.d\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x10]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x10]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x10]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x10]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x10]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x10]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x10]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x10]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #5\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x20]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x20]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x20]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x20]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x20]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x20]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x20]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x20]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p6/z, [%[a_ptr0], #0x30]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p6/z, [a_ptr1, #0x30]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p6/z, [a_ptr2, #0x30]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p6/z, [a_ptr3, #0x30]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p6/z, [a_ptr4, #0x30]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p6/z, [a_ptr5, #0x30]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p6/z, [a_ptr6, #0x30]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p6/z, [a_ptr7, #0x30]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "5:\n"
+ "ld1rw z22.s, p7/z, [%[minptr]]\n"
+ "ld1rw z23.s, p7/z, [%[maxptr]]\n"
+ "fmax z24.s, p7/m, z24.s, z22.s\n"
+ "fmax z25.s, p7/m, z25.s, z22.s\n"
+ "fmax z26.s, p7/m, z26.s, z22.s\n"
+ "fmax z27.s, p7/m, z27.s, z22.s\n"
+ "fmin z24.s, p7/m, z24.s, z23.s\n"
+ "fmin z25.s, p7/m, z25.s, z23.s\n"
+ "fmin z26.s, p7/m, z26.s, z23.s\n"
+ "fmin z27.s, p7/m, z27.s, z23.s\n"
+ "st1w z24.s, p0, [%[c_ptr0]]\n"
+ "fmax z28.s, p7/m, z28.s, z22.s\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "fmax z29.s, p7/m, z29.s, z22.s\n"
+ "st1w z25.s, p0, [c_ptr1]\n"
+ "fmax z30.s, p7/m, z30.s, z22.s\n"
+ "fmin z28.s, p7/m, z28.s, z23.s\n"
+ "fmax z31.s, p7/m, z31.s, z22.s\n"
+ "st1w z26.s, p0, [c_ptr2]\n"
+ "fmin z29.s, p7/m, z29.s, z23.s\n"
+ "fmin z30.s, p7/m, z30.s, z23.s\n"
+ "fmin z31.s, p7/m, z31.s, z23.s\n"
+ "st1w z27.s, p0, [c_ptr3]\n"
+ "st1w z28.s, p0, [c_ptr4]\n"
+ "st1w z29.s, p0, [c_ptr5]\n"
+ "st1w z30.s, p0, [c_ptr6]\n"
+ "st1w z31.s, p0, [c_ptr7]\n"
+ ".unreq a_ptr1\n"
+ ".unreq a_ptr2\n"
+ ".unreq a_ptr3\n"
+ ".unreq a_ptr4\n"
+ ".unreq a_ptr5\n"
+ ".unreq a_ptr6\n"
+ ".unreq a_ptr7\n"
+ ".unreq c_ptr1\n"
+ ".unreq c_ptr2\n"
+ ".unreq c_ptr3\n"
+ ".unreq c_ptr4\n"
+ ".unreq c_ptr5\n"
+ ".unreq c_ptr6\n"
+ ".unreq c_ptr7\n"
+ : [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [oob_rows] "+r" (oob_rows), [temp] "+r" (temp), [biasptr] "+r" (biasptr)
+ : [lda] "r" (ldab), [ldc] "r" (ldcb), [odd_depth] "r" (odd_depth), [last_width] "r" (last_width), [biasinc] "r" (biasinc), [minptr] "r" (minptr), [maxptr] "r" (maxptr)
+ : "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "cc", "memory"
+ );
+ break;
+ case 14:
+ __asm __volatile (
+ "a_ptr1 .req X0\n"
+ "a_ptr2 .req X1\n"
+ "a_ptr3 .req X2\n"
+ "a_ptr4 .req X3\n"
+ "a_ptr5 .req X4\n"
+ "a_ptr6 .req X5\n"
+ "a_ptr7 .req X6\n"
+ "c_ptr1 .req X7\n"
+ "c_ptr2 .req X8\n"
+ "c_ptr3 .req X9\n"
+ "c_ptr4 .req X10\n"
+ "c_ptr5 .req X11\n"
+ "c_ptr6 .req X12\n"
+ "c_ptr7 .req X13\n"
+ "add a_ptr1, %[a_ptr0], %[lda]\n"
+ "add c_ptr1, %[c_ptr0], %[ldc]\n"
+ "add a_ptr2, a_ptr1, %[lda]\n"
+ "add c_ptr2, c_ptr1, %[ldc]\n"
+ "add a_ptr3, a_ptr2, %[lda]\n"
+ "add c_ptr3, c_ptr2, %[ldc]\n"
+ "add a_ptr4, a_ptr3, %[lda]\n"
+ "add c_ptr4, c_ptr3, %[ldc]\n"
+ "add a_ptr5, a_ptr4, %[lda]\n"
+ "add c_ptr5, c_ptr4, %[ldc]\n"
+ "add a_ptr6, a_ptr5, %[lda]\n"
+ "add c_ptr6, c_ptr5, %[ldc]\n"
+ "add a_ptr7, a_ptr6, %[lda]\n"
+ "add c_ptr7, c_ptr6, %[ldc]\n"
+ "cbz %[oob_rows], 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr7, %[c_ptr0], #0x0\n"
+ "add a_ptr7, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr6, %[c_ptr0], #0x0\n"
+ "add a_ptr6, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr5, %[c_ptr0], #0x0\n"
+ "add a_ptr5, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr4, %[c_ptr0], #0x0\n"
+ "add a_ptr4, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr3, %[c_ptr0], #0x0\n"
+ "add a_ptr3, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr2, %[c_ptr0], #0x0\n"
+ "add a_ptr2, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr1, %[c_ptr0], #0x0\n"
+ "add a_ptr1, %[a_ptr0], #0x0\n"
+ "1:\n"
+ "ptrue p7.s\n"
+ "whilelt p6.s, %[temp], %[odd_depth]\n"
+ "whilelt p0.s, %[temp], %[last_width]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "ld1w z23.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "cbz %[loops], 2f\n"
+ "ld1w z24.s, p7/z, [%[biasptr]]\n"
+ "add %[biasptr], %[biasptr], %[biasinc]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "mov z25.d, z24.d\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1]\n"
+ "mov z26.d, z24.d\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2]\n"
+ "mov z27.d, z24.d\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3]\n"
+ "mov z28.d, z24.d\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4]\n"
+ "mov z29.d, z24.d\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5]\n"
+ "mov z30.d, z24.d\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6]\n"
+ "mov z31.d, z24.d\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x10]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x10]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x10]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x10]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x10]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x10]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x10]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x10]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #6\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x20]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x20]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x20]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x20]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x20]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x20]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x20]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x20]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p6/z, [%[a_ptr0], #0x30]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p6/z, [a_ptr1, #0x30]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p6/z, [a_ptr2, #0x30]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p6/z, [a_ptr3, #0x30]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p6/z, [a_ptr4, #0x30]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p6/z, [a_ptr5, #0x30]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p6/z, [a_ptr6, #0x30]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p6/z, [a_ptr7, #0x30]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "b.eq 3f\n"
+ "4:\n"
+ "ld1rw z22.s, p7/z, [%[minptr]]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "ld1rw z23.s, p7/z, [%[maxptr]]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmax z24.s, p7/m, z24.s, z22.s\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmax z25.s, p7/m, z25.s, z22.s\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmax z26.s, p7/m, z26.s, z22.s\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmax z27.s, p7/m, z27.s, z22.s\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmin z24.s, p7/m, z24.s, z23.s\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "fmin z25.s, p7/m, z25.s, z23.s\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
+ "fmin z26.s, p7/m, z26.s, z23.s\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1]\n"
+ "fmin z27.s, p7/m, z27.s, z23.s\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "fmax z28.s, p7/m, z28.s, z22.s\n"
+ "ld1w z24.s, p7/z, [%[biasptr]]\n"
+ "fmax z29.s, p7/m, z29.s, z22.s\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2]\n"
+ "fmax z30.s, p7/m, z30.s, z22.s\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "fmax z31.s, p7/m, z31.s, z22.s\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "fmin z28.s, p7/m, z28.s, z23.s\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3]\n"
+ "fmin z29.s, p7/m, z29.s, z23.s\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "fmin z30.s, p7/m, z30.s, z23.s\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4]\n"
+ "fmin z31.s, p7/m, z31.s, z23.s\n"
+ "ld1w z23.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "mov z25.d, z24.d\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "mov z26.d, z24.d\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5]\n"
+ "mov z27.d, z24.d\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "mov z28.d, z24.d\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "mov z29.d, z24.d\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.d, z24.d\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.d, z24.d\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "add %[biasptr], %[biasptr], %[biasinc]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x10]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x10]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x10]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x10]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x10]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x10]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x10]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x10]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #6\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x20]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x20]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x20]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x20]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x20]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x20]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x20]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x20]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p6/z, [%[a_ptr0], #0x30]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p6/z, [a_ptr1, #0x30]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p6/z, [a_ptr2, #0x30]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p6/z, [a_ptr3, #0x30]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p6/z, [a_ptr4, #0x30]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p6/z, [a_ptr5, #0x30]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p6/z, [a_ptr6, #0x30]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p6/z, [a_ptr7, #0x30]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "b.ne 4b\n"
+ "3:\n"
+ "ld1rw z22.s, p7/z, [%[minptr]]\n"
+ "ld1rw z23.s, p7/z, [%[maxptr]]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmax z24.s, p7/m, z24.s, z22.s\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmax z25.s, p7/m, z25.s, z22.s\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmax z26.s, p7/m, z26.s, z22.s\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmax z27.s, p7/m, z27.s, z22.s\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "fmin z24.s, p7/m, z24.s, z23.s\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
+ "fmin z25.s, p7/m, z25.s, z23.s\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1]\n"
+ "fmin z26.s, p7/m, z26.s, z23.s\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2]\n"
+ "fmin z27.s, p7/m, z27.s, z23.s\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "fmax z28.s, p7/m, z28.s, z22.s\n"
+ "ld1w z24.s, p0/z, [%[biasptr]]\n"
+ "fmax z29.s, p7/m, z29.s, z22.s\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3]\n"
+ "fmax z30.s, p7/m, z30.s, z22.s\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "fmax z31.s, p7/m, z31.s, z22.s\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "fmin z28.s, p7/m, z28.s, z23.s\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4]\n"
+ "fmin z29.s, p7/m, z29.s, z23.s\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "fmin z30.s, p7/m, z30.s, z23.s\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5]\n"
+ "fmin z31.s, p7/m, z31.s, z23.s\n"
+ "ld1w z23.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "mov z25.d, z24.d\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "mov z26.d, z24.d\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6]\n"
+ "mov z27.d, z24.d\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "mov z28.d, z24.d\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "mov z29.d, z24.d\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.d, z24.d\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.d, z24.d\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "add %[biasptr], %[biasptr], %[biasinc]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x10]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x10]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x10]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x10]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x10]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x10]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x10]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x10]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #6\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x20]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x20]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x20]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x20]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x20]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x20]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x20]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x20]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p6/z, [%[a_ptr0], #0x30]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p6/z, [a_ptr1, #0x30]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p6/z, [a_ptr2, #0x30]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p6/z, [a_ptr3, #0x30]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p6/z, [a_ptr4, #0x30]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p6/z, [a_ptr5, #0x30]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p6/z, [a_ptr6, #0x30]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p6/z, [a_ptr7, #0x30]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "b 5f\n"
+ "2:\n"
+ "ld1w z24.s, p0/z, [%[biasptr]]\n"
+ "add %[biasptr], %[biasptr], %[biasinc]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1]\n"
+ "mov z25.d, z24.d\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2]\n"
+ "mov z26.d, z24.d\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3]\n"
+ "mov z27.d, z24.d\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4]\n"
+ "mov z28.d, z24.d\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5]\n"
+ "mov z29.d, z24.d\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6]\n"
+ "mov z30.d, z24.d\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7]\n"
+ "mov z31.d, z24.d\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x10]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x10]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x10]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x10]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x10]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x10]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x10]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x10]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #6\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x20]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x20]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x20]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x20]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x20]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x20]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x20]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x20]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p6/z, [%[a_ptr0], #0x30]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p6/z, [a_ptr1, #0x30]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p6/z, [a_ptr2, #0x30]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p6/z, [a_ptr3, #0x30]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p6/z, [a_ptr4, #0x30]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p6/z, [a_ptr5, #0x30]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p6/z, [a_ptr6, #0x30]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p6/z, [a_ptr7, #0x30]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "5:\n"
+ "ld1rw z22.s, p7/z, [%[minptr]]\n"
+ "ld1rw z23.s, p7/z, [%[maxptr]]\n"
+ "fmax z24.s, p7/m, z24.s, z22.s\n"
+ "fmax z25.s, p7/m, z25.s, z22.s\n"
+ "fmax z26.s, p7/m, z26.s, z22.s\n"
+ "fmax z27.s, p7/m, z27.s, z22.s\n"
+ "fmin z24.s, p7/m, z24.s, z23.s\n"
+ "fmin z25.s, p7/m, z25.s, z23.s\n"
+ "fmin z26.s, p7/m, z26.s, z23.s\n"
+ "fmin z27.s, p7/m, z27.s, z23.s\n"
+ "st1w z24.s, p0, [%[c_ptr0]]\n"
+ "fmax z28.s, p7/m, z28.s, z22.s\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "fmax z29.s, p7/m, z29.s, z22.s\n"
+ "st1w z25.s, p0, [c_ptr1]\n"
+ "fmax z30.s, p7/m, z30.s, z22.s\n"
+ "fmin z28.s, p7/m, z28.s, z23.s\n"
+ "fmax z31.s, p7/m, z31.s, z22.s\n"
+ "st1w z26.s, p0, [c_ptr2]\n"
+ "fmin z29.s, p7/m, z29.s, z23.s\n"
+ "fmin z30.s, p7/m, z30.s, z23.s\n"
+ "fmin z31.s, p7/m, z31.s, z23.s\n"
+ "st1w z27.s, p0, [c_ptr3]\n"
+ "st1w z28.s, p0, [c_ptr4]\n"
+ "st1w z29.s, p0, [c_ptr5]\n"
+ "st1w z30.s, p0, [c_ptr6]\n"
+ "st1w z31.s, p0, [c_ptr7]\n"
+ ".unreq a_ptr1\n"
+ ".unreq a_ptr2\n"
+ ".unreq a_ptr3\n"
+ ".unreq a_ptr4\n"
+ ".unreq a_ptr5\n"
+ ".unreq a_ptr6\n"
+ ".unreq a_ptr7\n"
+ ".unreq c_ptr1\n"
+ ".unreq c_ptr2\n"
+ ".unreq c_ptr3\n"
+ ".unreq c_ptr4\n"
+ ".unreq c_ptr5\n"
+ ".unreq c_ptr6\n"
+ ".unreq c_ptr7\n"
+ : [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [oob_rows] "+r" (oob_rows), [temp] "+r" (temp), [biasptr] "+r" (biasptr)
+ : [lda] "r" (ldab), [ldc] "r" (ldcb), [odd_depth] "r" (odd_depth), [last_width] "r" (last_width), [biasinc] "r" (biasinc), [minptr] "r" (minptr), [maxptr] "r" (maxptr)
+ : "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "cc", "memory"
+ );
+ break;
+ case 15:
+ __asm __volatile (
+ "a_ptr1 .req X0\n"
+ "a_ptr2 .req X1\n"
+ "a_ptr3 .req X2\n"
+ "a_ptr4 .req X3\n"
+ "a_ptr5 .req X4\n"
+ "a_ptr6 .req X5\n"
+ "a_ptr7 .req X6\n"
+ "c_ptr1 .req X7\n"
+ "c_ptr2 .req X8\n"
+ "c_ptr3 .req X9\n"
+ "c_ptr4 .req X10\n"
+ "c_ptr5 .req X11\n"
+ "c_ptr6 .req X12\n"
+ "c_ptr7 .req X13\n"
+ "add a_ptr1, %[a_ptr0], %[lda]\n"
+ "add c_ptr1, %[c_ptr0], %[ldc]\n"
+ "add a_ptr2, a_ptr1, %[lda]\n"
+ "add c_ptr2, c_ptr1, %[ldc]\n"
+ "add a_ptr3, a_ptr2, %[lda]\n"
+ "add c_ptr3, c_ptr2, %[ldc]\n"
+ "add a_ptr4, a_ptr3, %[lda]\n"
+ "add c_ptr4, c_ptr3, %[ldc]\n"
+ "add a_ptr5, a_ptr4, %[lda]\n"
+ "add c_ptr5, c_ptr4, %[ldc]\n"
+ "add a_ptr6, a_ptr5, %[lda]\n"
+ "add c_ptr6, c_ptr5, %[ldc]\n"
+ "add a_ptr7, a_ptr6, %[lda]\n"
+ "add c_ptr7, c_ptr6, %[ldc]\n"
+ "cbz %[oob_rows], 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr7, %[c_ptr0], #0x0\n"
+ "add a_ptr7, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr6, %[c_ptr0], #0x0\n"
+ "add a_ptr6, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr5, %[c_ptr0], #0x0\n"
+ "add a_ptr5, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr4, %[c_ptr0], #0x0\n"
+ "add a_ptr4, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr3, %[c_ptr0], #0x0\n"
+ "add a_ptr3, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr2, %[c_ptr0], #0x0\n"
+ "add a_ptr2, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr1, %[c_ptr0], #0x0\n"
+ "add a_ptr1, %[a_ptr0], #0x0\n"
+ "1:\n"
+ "ptrue p7.s\n"
+ "whilelt p6.s, %[temp], %[odd_depth]\n"
+ "whilelt p0.s, %[temp], %[last_width]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "ld1w z23.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "cbz %[loops], 2f\n"
+ "ld1w z24.s, p7/z, [%[biasptr]]\n"
+ "add %[biasptr], %[biasptr], %[biasinc]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "mov z25.d, z24.d\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1]\n"
+ "mov z26.d, z24.d\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2]\n"
+ "mov z27.d, z24.d\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3]\n"
+ "mov z28.d, z24.d\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4]\n"
+ "mov z29.d, z24.d\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5]\n"
+ "mov z30.d, z24.d\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6]\n"
+ "mov z31.d, z24.d\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x10]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x10]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x10]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x10]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x10]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x10]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x10]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x10]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x20]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x20]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x20]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x20]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x20]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x20]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x20]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x20]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #7\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p6/z, [%[a_ptr0], #0x30]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p6/z, [a_ptr1, #0x30]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p6/z, [a_ptr2, #0x30]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p6/z, [a_ptr3, #0x30]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p6/z, [a_ptr4, #0x30]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p6/z, [a_ptr5, #0x30]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p6/z, [a_ptr6, #0x30]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p6/z, [a_ptr7, #0x30]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "b.eq 3f\n"
+ "4:\n"
+ "ld1rw z22.s, p7/z, [%[minptr]]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "ld1rw z23.s, p7/z, [%[maxptr]]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmax z24.s, p7/m, z24.s, z22.s\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmax z25.s, p7/m, z25.s, z22.s\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmax z26.s, p7/m, z26.s, z22.s\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmax z27.s, p7/m, z27.s, z22.s\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmin z24.s, p7/m, z24.s, z23.s\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "fmin z25.s, p7/m, z25.s, z23.s\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
+ "fmin z26.s, p7/m, z26.s, z23.s\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1]\n"
+ "fmin z27.s, p7/m, z27.s, z23.s\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "fmax z28.s, p7/m, z28.s, z22.s\n"
+ "ld1w z24.s, p7/z, [%[biasptr]]\n"
+ "fmax z29.s, p7/m, z29.s, z22.s\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2]\n"
+ "fmax z30.s, p7/m, z30.s, z22.s\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "fmax z31.s, p7/m, z31.s, z22.s\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "fmin z28.s, p7/m, z28.s, z23.s\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3]\n"
+ "fmin z29.s, p7/m, z29.s, z23.s\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "fmin z30.s, p7/m, z30.s, z23.s\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4]\n"
+ "fmin z31.s, p7/m, z31.s, z23.s\n"
+ "ld1w z23.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "mov z25.d, z24.d\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "mov z26.d, z24.d\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5]\n"
+ "mov z27.d, z24.d\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "mov z28.d, z24.d\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "mov z29.d, z24.d\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.d, z24.d\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.d, z24.d\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "add %[biasptr], %[biasptr], %[biasinc]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x10]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x10]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x10]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x10]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x10]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x10]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x10]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x10]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x20]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x20]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x20]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x20]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x20]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x20]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x20]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x20]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #7\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p6/z, [%[a_ptr0], #0x30]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p6/z, [a_ptr1, #0x30]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p6/z, [a_ptr2, #0x30]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p6/z, [a_ptr3, #0x30]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p6/z, [a_ptr4, #0x30]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p6/z, [a_ptr5, #0x30]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p6/z, [a_ptr6, #0x30]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p6/z, [a_ptr7, #0x30]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "b.ne 4b\n"
+ "3:\n"
+ "ld1rw z22.s, p7/z, [%[minptr]]\n"
+ "ld1rw z23.s, p7/z, [%[maxptr]]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmax z24.s, p7/m, z24.s, z22.s\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmax z25.s, p7/m, z25.s, z22.s\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmax z26.s, p7/m, z26.s, z22.s\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmax z27.s, p7/m, z27.s, z22.s\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "fmin z24.s, p7/m, z24.s, z23.s\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
+ "fmin z25.s, p7/m, z25.s, z23.s\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1]\n"
+ "fmin z26.s, p7/m, z26.s, z23.s\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2]\n"
+ "fmin z27.s, p7/m, z27.s, z23.s\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "fmax z28.s, p7/m, z28.s, z22.s\n"
+ "ld1w z24.s, p0/z, [%[biasptr]]\n"
+ "fmax z29.s, p7/m, z29.s, z22.s\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3]\n"
+ "fmax z30.s, p7/m, z30.s, z22.s\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "fmax z31.s, p7/m, z31.s, z22.s\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "fmin z28.s, p7/m, z28.s, z23.s\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4]\n"
+ "fmin z29.s, p7/m, z29.s, z23.s\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "fmin z30.s, p7/m, z30.s, z23.s\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5]\n"
+ "fmin z31.s, p7/m, z31.s, z23.s\n"
+ "ld1w z23.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "mov z25.d, z24.d\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "mov z26.d, z24.d\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6]\n"
+ "mov z27.d, z24.d\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "mov z28.d, z24.d\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "mov z29.d, z24.d\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.d, z24.d\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.d, z24.d\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "add %[biasptr], %[biasptr], %[biasinc]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x10]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x10]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x10]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x10]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x10]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x10]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x10]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x10]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x20]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x20]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x20]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x20]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x20]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x20]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x20]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x20]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #7\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p6/z, [%[a_ptr0], #0x30]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p6/z, [a_ptr1, #0x30]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p6/z, [a_ptr2, #0x30]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p6/z, [a_ptr3, #0x30]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p6/z, [a_ptr4, #0x30]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p6/z, [a_ptr5, #0x30]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p6/z, [a_ptr6, #0x30]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p6/z, [a_ptr7, #0x30]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "b 5f\n"
+ "2:\n"
+ "ld1w z24.s, p0/z, [%[biasptr]]\n"
+ "add %[biasptr], %[biasptr], %[biasinc]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1]\n"
+ "mov z25.d, z24.d\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2]\n"
+ "mov z26.d, z24.d\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3]\n"
+ "mov z27.d, z24.d\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4]\n"
+ "mov z28.d, z24.d\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5]\n"
+ "mov z29.d, z24.d\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6]\n"
+ "mov z30.d, z24.d\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7]\n"
+ "mov z31.d, z24.d\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x10]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x10]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x10]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x10]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x10]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x10]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x10]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x10]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x20]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x20]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x20]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x20]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x20]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x20]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x20]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x20]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #7\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p6/z, [%[a_ptr0], #0x30]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p6/z, [a_ptr1, #0x30]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p6/z, [a_ptr2, #0x30]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p6/z, [a_ptr3, #0x30]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p6/z, [a_ptr4, #0x30]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p6/z, [a_ptr5, #0x30]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p6/z, [a_ptr6, #0x30]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p6/z, [a_ptr7, #0x30]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "5:\n"
+ "ld1rw z22.s, p7/z, [%[minptr]]\n"
+ "ld1rw z23.s, p7/z, [%[maxptr]]\n"
+ "fmax z24.s, p7/m, z24.s, z22.s\n"
+ "fmax z25.s, p7/m, z25.s, z22.s\n"
+ "fmax z26.s, p7/m, z26.s, z22.s\n"
+ "fmax z27.s, p7/m, z27.s, z22.s\n"
+ "fmin z24.s, p7/m, z24.s, z23.s\n"
+ "fmin z25.s, p7/m, z25.s, z23.s\n"
+ "fmin z26.s, p7/m, z26.s, z23.s\n"
+ "fmin z27.s, p7/m, z27.s, z23.s\n"
+ "st1w z24.s, p0, [%[c_ptr0]]\n"
+ "fmax z28.s, p7/m, z28.s, z22.s\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "fmax z29.s, p7/m, z29.s, z22.s\n"
+ "st1w z25.s, p0, [c_ptr1]\n"
+ "fmax z30.s, p7/m, z30.s, z22.s\n"
+ "fmin z28.s, p7/m, z28.s, z23.s\n"
+ "fmax z31.s, p7/m, z31.s, z22.s\n"
+ "st1w z26.s, p0, [c_ptr2]\n"
+ "fmin z29.s, p7/m, z29.s, z23.s\n"
+ "fmin z30.s, p7/m, z30.s, z23.s\n"
+ "fmin z31.s, p7/m, z31.s, z23.s\n"
+ "st1w z27.s, p0, [c_ptr3]\n"
+ "st1w z28.s, p0, [c_ptr4]\n"
+ "st1w z29.s, p0, [c_ptr5]\n"
+ "st1w z30.s, p0, [c_ptr6]\n"
+ "st1w z31.s, p0, [c_ptr7]\n"
+ ".unreq a_ptr1\n"
+ ".unreq a_ptr2\n"
+ ".unreq a_ptr3\n"
+ ".unreq a_ptr4\n"
+ ".unreq a_ptr5\n"
+ ".unreq a_ptr6\n"
+ ".unreq a_ptr7\n"
+ ".unreq c_ptr1\n"
+ ".unreq c_ptr2\n"
+ ".unreq c_ptr3\n"
+ ".unreq c_ptr4\n"
+ ".unreq c_ptr5\n"
+ ".unreq c_ptr6\n"
+ ".unreq c_ptr7\n"
+ : [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [oob_rows] "+r" (oob_rows), [temp] "+r" (temp), [biasptr] "+r" (biasptr)
+ : [lda] "r" (ldab), [ldc] "r" (ldcb), [odd_depth] "r" (odd_depth), [last_width] "r" (last_width), [biasinc] "r" (biasinc), [minptr] "r" (minptr), [maxptr] "r" (maxptr)
+ : "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "cc", "memory"
+ );
+ break;
+ case 16:
+ __asm __volatile (
+ "a_ptr1 .req X0\n"
+ "a_ptr2 .req X1\n"
+ "a_ptr3 .req X2\n"
+ "a_ptr4 .req X3\n"
+ "a_ptr5 .req X4\n"
+ "a_ptr6 .req X5\n"
+ "a_ptr7 .req X6\n"
+ "c_ptr1 .req X7\n"
+ "c_ptr2 .req X8\n"
+ "c_ptr3 .req X9\n"
+ "c_ptr4 .req X10\n"
+ "c_ptr5 .req X11\n"
+ "c_ptr6 .req X12\n"
+ "c_ptr7 .req X13\n"
+ "add a_ptr1, %[a_ptr0], %[lda]\n"
+ "add c_ptr1, %[c_ptr0], %[ldc]\n"
+ "add a_ptr2, a_ptr1, %[lda]\n"
+ "add c_ptr2, c_ptr1, %[ldc]\n"
+ "add a_ptr3, a_ptr2, %[lda]\n"
+ "add c_ptr3, c_ptr2, %[ldc]\n"
+ "add a_ptr4, a_ptr3, %[lda]\n"
+ "add c_ptr4, c_ptr3, %[ldc]\n"
+ "add a_ptr5, a_ptr4, %[lda]\n"
+ "add c_ptr5, c_ptr4, %[ldc]\n"
+ "add a_ptr6, a_ptr5, %[lda]\n"
+ "add c_ptr6, c_ptr5, %[ldc]\n"
+ "add a_ptr7, a_ptr6, %[lda]\n"
+ "add c_ptr7, c_ptr6, %[ldc]\n"
+ "cbz %[oob_rows], 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr7, %[c_ptr0], #0x0\n"
+ "add a_ptr7, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr6, %[c_ptr0], #0x0\n"
+ "add a_ptr6, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr5, %[c_ptr0], #0x0\n"
+ "add a_ptr5, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr4, %[c_ptr0], #0x0\n"
+ "add a_ptr4, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr3, %[c_ptr0], #0x0\n"
+ "add a_ptr3, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr2, %[c_ptr0], #0x0\n"
+ "add a_ptr2, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr1, %[c_ptr0], #0x0\n"
+ "add a_ptr1, %[a_ptr0], #0x0\n"
+ "1:\n"
+ "ptrue p7.s\n"
+ "whilelt p6.s, %[temp], %[odd_depth]\n"
+ "whilelt p0.s, %[temp], %[last_width]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "ld1w z23.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "cbz %[loops], 2f\n"
+ "ld1w z24.s, p7/z, [%[biasptr]]\n"
+ "add %[biasptr], %[biasptr], %[biasinc]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "mov z25.d, z24.d\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1]\n"
+ "mov z26.d, z24.d\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2]\n"
+ "mov z27.d, z24.d\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3]\n"
+ "mov z28.d, z24.d\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4]\n"
+ "mov z29.d, z24.d\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5]\n"
+ "mov z30.d, z24.d\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6]\n"
+ "mov z31.d, z24.d\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x10]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x10]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x10]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x10]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x10]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x10]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x10]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x10]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x20]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x20]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x20]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x20]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x20]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x20]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x20]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "ld1w z23.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x20]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p6/z, [%[a_ptr0], #0x30]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p6/z, [a_ptr1, #0x30]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p6/z, [a_ptr2, #0x30]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p6/z, [a_ptr3, #0x30]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p6/z, [a_ptr4, #0x30]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p6/z, [a_ptr5, #0x30]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p6/z, [a_ptr6, #0x30]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p6/z, [a_ptr7, #0x30]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "b.eq 3f\n"
+ "4:\n"
+ "ld1rw z22.s, p7/z, [%[minptr]]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "ld1rw z23.s, p7/z, [%[maxptr]]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmax z24.s, p7/m, z24.s, z22.s\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmax z25.s, p7/m, z25.s, z22.s\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmax z26.s, p7/m, z26.s, z22.s\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmax z27.s, p7/m, z27.s, z22.s\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmin z24.s, p7/m, z24.s, z23.s\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "fmin z25.s, p7/m, z25.s, z23.s\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
+ "fmin z26.s, p7/m, z26.s, z23.s\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1]\n"
+ "fmin z27.s, p7/m, z27.s, z23.s\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "fmax z28.s, p7/m, z28.s, z22.s\n"
+ "ld1w z24.s, p7/z, [%[biasptr]]\n"
+ "fmax z29.s, p7/m, z29.s, z22.s\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2]\n"
+ "fmax z30.s, p7/m, z30.s, z22.s\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "fmax z31.s, p7/m, z31.s, z22.s\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "fmin z28.s, p7/m, z28.s, z23.s\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3]\n"
+ "fmin z29.s, p7/m, z29.s, z23.s\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "fmin z30.s, p7/m, z30.s, z23.s\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4]\n"
+ "fmin z31.s, p7/m, z31.s, z23.s\n"
+ "ld1w z23.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "mov z25.d, z24.d\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "mov z26.d, z24.d\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5]\n"
+ "mov z27.d, z24.d\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "mov z28.d, z24.d\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "mov z29.d, z24.d\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.d, z24.d\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.d, z24.d\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "add %[biasptr], %[biasptr], %[biasinc]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x10]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x10]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x10]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x10]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x10]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x10]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x10]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x10]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x20]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x20]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x20]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x20]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x20]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x20]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x20]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "ld1w z23.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x20]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p6/z, [%[a_ptr0], #0x30]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p6/z, [a_ptr1, #0x30]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p6/z, [a_ptr2, #0x30]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p6/z, [a_ptr3, #0x30]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p6/z, [a_ptr4, #0x30]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p6/z, [a_ptr5, #0x30]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p6/z, [a_ptr6, #0x30]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p6/z, [a_ptr7, #0x30]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "b.ne 4b\n"
+ "3:\n"
+ "ld1rw z22.s, p7/z, [%[minptr]]\n"
+ "ld1rw z23.s, p7/z, [%[maxptr]]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmax z24.s, p7/m, z24.s, z22.s\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmax z25.s, p7/m, z25.s, z22.s\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmax z26.s, p7/m, z26.s, z22.s\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmax z27.s, p7/m, z27.s, z22.s\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "fmin z24.s, p7/m, z24.s, z23.s\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
+ "fmin z25.s, p7/m, z25.s, z23.s\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1]\n"
+ "fmin z26.s, p7/m, z26.s, z23.s\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2]\n"
+ "fmin z27.s, p7/m, z27.s, z23.s\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "fmax z28.s, p7/m, z28.s, z22.s\n"
+ "ld1w z24.s, p0/z, [%[biasptr]]\n"
+ "fmax z29.s, p7/m, z29.s, z22.s\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3]\n"
+ "fmax z30.s, p7/m, z30.s, z22.s\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "fmax z31.s, p7/m, z31.s, z22.s\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "fmin z28.s, p7/m, z28.s, z23.s\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4]\n"
+ "fmin z29.s, p7/m, z29.s, z23.s\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "fmin z30.s, p7/m, z30.s, z23.s\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5]\n"
+ "fmin z31.s, p7/m, z31.s, z23.s\n"
+ "ld1w z23.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "mov z25.d, z24.d\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "mov z26.d, z24.d\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6]\n"
+ "mov z27.d, z24.d\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "mov z28.d, z24.d\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "mov z29.d, z24.d\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.d, z24.d\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.d, z24.d\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "add %[biasptr], %[biasptr], %[biasinc]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x10]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x10]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x10]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x10]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x10]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x10]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x10]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x10]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x20]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x20]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x20]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x20]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x20]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x20]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x20]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "ld1w z23.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x20]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p6/z, [%[a_ptr0], #0x30]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p6/z, [a_ptr1, #0x30]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p6/z, [a_ptr2, #0x30]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p6/z, [a_ptr3, #0x30]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p6/z, [a_ptr4, #0x30]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p6/z, [a_ptr5, #0x30]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p6/z, [a_ptr6, #0x30]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p6/z, [a_ptr7, #0x30]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "b 5f\n"
+ "2:\n"
+ "ld1w z24.s, p0/z, [%[biasptr]]\n"
+ "add %[biasptr], %[biasptr], %[biasinc]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1]\n"
+ "mov z25.d, z24.d\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2]\n"
+ "mov z26.d, z24.d\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3]\n"
+ "mov z27.d, z24.d\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4]\n"
+ "mov z28.d, z24.d\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5]\n"
+ "mov z29.d, z24.d\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6]\n"
+ "mov z30.d, z24.d\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7]\n"
+ "mov z31.d, z24.d\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x10]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x10]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x10]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x10]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x10]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x10]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x10]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x10]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x20]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x20]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x20]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x20]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x20]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x20]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x20]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "ld1w z23.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x20]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p6/z, [%[a_ptr0], #0x30]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p6/z, [a_ptr1, #0x30]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p6/z, [a_ptr2, #0x30]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p6/z, [a_ptr3, #0x30]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p6/z, [a_ptr4, #0x30]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p6/z, [a_ptr5, #0x30]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p6/z, [a_ptr6, #0x30]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p6/z, [a_ptr7, #0x30]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "5:\n"
+ "ld1rw z22.s, p7/z, [%[minptr]]\n"
+ "ld1rw z23.s, p7/z, [%[maxptr]]\n"
+ "fmax z24.s, p7/m, z24.s, z22.s\n"
+ "fmax z25.s, p7/m, z25.s, z22.s\n"
+ "fmax z26.s, p7/m, z26.s, z22.s\n"
+ "fmax z27.s, p7/m, z27.s, z22.s\n"
+ "fmin z24.s, p7/m, z24.s, z23.s\n"
+ "fmin z25.s, p7/m, z25.s, z23.s\n"
+ "fmin z26.s, p7/m, z26.s, z23.s\n"
+ "fmin z27.s, p7/m, z27.s, z23.s\n"
+ "st1w z24.s, p0, [%[c_ptr0]]\n"
+ "fmax z28.s, p7/m, z28.s, z22.s\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "fmax z29.s, p7/m, z29.s, z22.s\n"
+ "st1w z25.s, p0, [c_ptr1]\n"
+ "fmax z30.s, p7/m, z30.s, z22.s\n"
+ "fmin z28.s, p7/m, z28.s, z23.s\n"
+ "fmax z31.s, p7/m, z31.s, z22.s\n"
+ "st1w z26.s, p0, [c_ptr2]\n"
+ "fmin z29.s, p7/m, z29.s, z23.s\n"
+ "fmin z30.s, p7/m, z30.s, z23.s\n"
+ "fmin z31.s, p7/m, z31.s, z23.s\n"
+ "st1w z27.s, p0, [c_ptr3]\n"
+ "st1w z28.s, p0, [c_ptr4]\n"
+ "st1w z29.s, p0, [c_ptr5]\n"
+ "st1w z30.s, p0, [c_ptr6]\n"
+ "st1w z31.s, p0, [c_ptr7]\n"
+ ".unreq a_ptr1\n"
+ ".unreq a_ptr2\n"
+ ".unreq a_ptr3\n"
+ ".unreq a_ptr4\n"
+ ".unreq a_ptr5\n"
+ ".unreq a_ptr6\n"
+ ".unreq a_ptr7\n"
+ ".unreq c_ptr1\n"
+ ".unreq c_ptr2\n"
+ ".unreq c_ptr3\n"
+ ".unreq c_ptr4\n"
+ ".unreq c_ptr5\n"
+ ".unreq c_ptr6\n"
+ ".unreq c_ptr7\n"
+ : [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [oob_rows] "+r" (oob_rows), [temp] "+r" (temp), [biasptr] "+r" (biasptr)
+ : [lda] "r" (ldab), [ldc] "r" (ldcb), [odd_depth] "r" (odd_depth), [last_width] "r" (last_width), [biasinc] "r" (biasinc), [minptr] "r" (minptr), [maxptr] "r" (maxptr)
+ : "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "cc", "memory"
+ );
+ break;
+ case 17:
+ __asm __volatile (
+ "a_ptr1 .req X0\n"
+ "a_ptr2 .req X1\n"
+ "a_ptr3 .req X2\n"
+ "a_ptr4 .req X3\n"
+ "a_ptr5 .req X4\n"
+ "a_ptr6 .req X5\n"
+ "a_ptr7 .req X6\n"
+ "c_ptr1 .req X7\n"
+ "c_ptr2 .req X8\n"
+ "c_ptr3 .req X9\n"
+ "c_ptr4 .req X10\n"
+ "c_ptr5 .req X11\n"
+ "c_ptr6 .req X12\n"
+ "c_ptr7 .req X13\n"
+ "add a_ptr1, %[a_ptr0], %[lda]\n"
+ "add c_ptr1, %[c_ptr0], %[ldc]\n"
+ "add a_ptr2, a_ptr1, %[lda]\n"
+ "add c_ptr2, c_ptr1, %[ldc]\n"
+ "add a_ptr3, a_ptr2, %[lda]\n"
+ "add c_ptr3, c_ptr2, %[ldc]\n"
+ "add a_ptr4, a_ptr3, %[lda]\n"
+ "add c_ptr4, c_ptr3, %[ldc]\n"
+ "add a_ptr5, a_ptr4, %[lda]\n"
+ "add c_ptr5, c_ptr4, %[ldc]\n"
+ "add a_ptr6, a_ptr5, %[lda]\n"
+ "add c_ptr6, c_ptr5, %[ldc]\n"
+ "add a_ptr7, a_ptr6, %[lda]\n"
+ "add c_ptr7, c_ptr6, %[ldc]\n"
+ "cbz %[oob_rows], 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr7, %[c_ptr0], #0x0\n"
+ "add a_ptr7, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr6, %[c_ptr0], #0x0\n"
+ "add a_ptr6, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr5, %[c_ptr0], #0x0\n"
+ "add a_ptr5, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr4, %[c_ptr0], #0x0\n"
+ "add a_ptr4, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr3, %[c_ptr0], #0x0\n"
+ "add a_ptr3, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr2, %[c_ptr0], #0x0\n"
+ "add a_ptr2, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr1, %[c_ptr0], #0x0\n"
+ "add a_ptr1, %[a_ptr0], #0x0\n"
+ "1:\n"
+ "ptrue p7.s\n"
+ "whilelt p6.s, %[temp], %[odd_depth]\n"
+ "whilelt p0.s, %[temp], %[last_width]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "ld1w z23.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "cbz %[loops], 2f\n"
+ "ld1w z24.s, p7/z, [%[biasptr]]\n"
+ "add %[biasptr], %[biasptr], %[biasinc]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "mov z25.d, z24.d\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1]\n"
+ "mov z26.d, z24.d\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2]\n"
+ "mov z27.d, z24.d\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3]\n"
+ "mov z28.d, z24.d\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4]\n"
+ "mov z29.d, z24.d\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5]\n"
+ "mov z30.d, z24.d\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6]\n"
+ "mov z31.d, z24.d\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x10]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x10]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x10]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x10]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x10]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x10]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x10]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x10]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x20]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x20]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x20]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x20]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x20]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x20]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x20]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "ld1w z23.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x20]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #1\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x30]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x30]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x30]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x30]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x30]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x30]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x30]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x30]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "ld1rqw z0.s, p6/z, [%[a_ptr0], #0x40]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "ld1rqw z1.s, p6/z, [a_ptr1, #0x40]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "ld1rqw z2.s, p6/z, [a_ptr2, #0x40]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "ld1rqw z3.s, p6/z, [a_ptr3, #0x40]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "ld1rqw z4.s, p6/z, [a_ptr4, #0x40]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "ld1rqw z5.s, p6/z, [a_ptr5, #0x40]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "ld1rqw z6.s, p6/z, [a_ptr6, #0x40]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "ld1rqw z7.s, p6/z, [a_ptr7, #0x40]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "b.eq 3f\n"
+ "4:\n"
+ "ld1rw z22.s, p7/z, [%[minptr]]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "ld1rw z23.s, p7/z, [%[maxptr]]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmax z24.s, p7/m, z24.s, z22.s\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmax z25.s, p7/m, z25.s, z22.s\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmax z26.s, p7/m, z26.s, z22.s\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmax z27.s, p7/m, z27.s, z22.s\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmin z24.s, p7/m, z24.s, z23.s\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "fmin z25.s, p7/m, z25.s, z23.s\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
+ "fmin z26.s, p7/m, z26.s, z23.s\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1]\n"
+ "fmin z27.s, p7/m, z27.s, z23.s\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "fmax z28.s, p7/m, z28.s, z22.s\n"
+ "ld1w z24.s, p7/z, [%[biasptr]]\n"
+ "fmax z29.s, p7/m, z29.s, z22.s\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2]\n"
+ "fmax z30.s, p7/m, z30.s, z22.s\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "fmax z31.s, p7/m, z31.s, z22.s\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "fmin z28.s, p7/m, z28.s, z23.s\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3]\n"
+ "fmin z29.s, p7/m, z29.s, z23.s\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "fmin z30.s, p7/m, z30.s, z23.s\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4]\n"
+ "fmin z31.s, p7/m, z31.s, z23.s\n"
+ "ld1w z23.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "mov z25.d, z24.d\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "mov z26.d, z24.d\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5]\n"
+ "mov z27.d, z24.d\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "mov z28.d, z24.d\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "mov z29.d, z24.d\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.d, z24.d\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.d, z24.d\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "add %[biasptr], %[biasptr], %[biasinc]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x10]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x10]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x10]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x10]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x10]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x10]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x10]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x10]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x20]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x20]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x20]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x20]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x20]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x20]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x20]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "ld1w z23.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x20]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #1\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x30]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x30]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x30]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x30]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x30]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x30]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x30]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x30]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "ld1rqw z0.s, p6/z, [%[a_ptr0], #0x40]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "ld1rqw z1.s, p6/z, [a_ptr1, #0x40]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "ld1rqw z2.s, p6/z, [a_ptr2, #0x40]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "ld1rqw z3.s, p6/z, [a_ptr3, #0x40]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "ld1rqw z4.s, p6/z, [a_ptr4, #0x40]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "ld1rqw z5.s, p6/z, [a_ptr5, #0x40]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "ld1rqw z6.s, p6/z, [a_ptr6, #0x40]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "ld1rqw z7.s, p6/z, [a_ptr7, #0x40]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "b.ne 4b\n"
+ "3:\n"
+ "ld1rw z22.s, p7/z, [%[minptr]]\n"
+ "ld1rw z23.s, p7/z, [%[maxptr]]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmax z24.s, p7/m, z24.s, z22.s\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmax z25.s, p7/m, z25.s, z22.s\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmax z26.s, p7/m, z26.s, z22.s\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmax z27.s, p7/m, z27.s, z22.s\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "fmin z24.s, p7/m, z24.s, z23.s\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
+ "fmin z25.s, p7/m, z25.s, z23.s\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1]\n"
+ "fmin z26.s, p7/m, z26.s, z23.s\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2]\n"
+ "fmin z27.s, p7/m, z27.s, z23.s\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "fmax z28.s, p7/m, z28.s, z22.s\n"
+ "ld1w z24.s, p0/z, [%[biasptr]]\n"
+ "fmax z29.s, p7/m, z29.s, z22.s\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3]\n"
+ "fmax z30.s, p7/m, z30.s, z22.s\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "fmax z31.s, p7/m, z31.s, z22.s\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "fmin z28.s, p7/m, z28.s, z23.s\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4]\n"
+ "fmin z29.s, p7/m, z29.s, z23.s\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "fmin z30.s, p7/m, z30.s, z23.s\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5]\n"
+ "fmin z31.s, p7/m, z31.s, z23.s\n"
+ "ld1w z23.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "mov z25.d, z24.d\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "mov z26.d, z24.d\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6]\n"
+ "mov z27.d, z24.d\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "mov z28.d, z24.d\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "mov z29.d, z24.d\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.d, z24.d\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.d, z24.d\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "add %[biasptr], %[biasptr], %[biasinc]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x10]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x10]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x10]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x10]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x10]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x10]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x10]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x10]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x20]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x20]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x20]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x20]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x20]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x20]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x20]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "ld1w z23.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x20]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #1\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x30]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x30]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x30]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x30]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x30]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x30]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x30]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x30]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "ld1rqw z0.s, p6/z, [%[a_ptr0], #0x40]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "ld1rqw z1.s, p6/z, [a_ptr1, #0x40]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "ld1rqw z2.s, p6/z, [a_ptr2, #0x40]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "ld1rqw z3.s, p6/z, [a_ptr3, #0x40]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "ld1rqw z4.s, p6/z, [a_ptr4, #0x40]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "ld1rqw z5.s, p6/z, [a_ptr5, #0x40]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "ld1rqw z6.s, p6/z, [a_ptr6, #0x40]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "ld1rqw z7.s, p6/z, [a_ptr7, #0x40]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "b 5f\n"
+ "2:\n"
+ "ld1w z24.s, p0/z, [%[biasptr]]\n"
+ "add %[biasptr], %[biasptr], %[biasinc]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1]\n"
+ "mov z25.d, z24.d\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2]\n"
+ "mov z26.d, z24.d\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3]\n"
+ "mov z27.d, z24.d\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4]\n"
+ "mov z28.d, z24.d\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5]\n"
+ "mov z29.d, z24.d\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6]\n"
+ "mov z30.d, z24.d\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7]\n"
+ "mov z31.d, z24.d\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x10]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x10]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x10]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x10]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x10]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x10]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x10]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x10]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x20]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x20]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x20]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x20]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x20]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x20]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x20]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "ld1w z23.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x20]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #1\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x30]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x30]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x30]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x30]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x30]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x30]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x30]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x30]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "ld1rqw z0.s, p6/z, [%[a_ptr0], #0x40]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "ld1rqw z1.s, p6/z, [a_ptr1, #0x40]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "ld1rqw z2.s, p6/z, [a_ptr2, #0x40]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "ld1rqw z3.s, p6/z, [a_ptr3, #0x40]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "ld1rqw z4.s, p6/z, [a_ptr4, #0x40]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "ld1rqw z5.s, p6/z, [a_ptr5, #0x40]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "ld1rqw z6.s, p6/z, [a_ptr6, #0x40]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "ld1rqw z7.s, p6/z, [a_ptr7, #0x40]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "5:\n"
+ "ld1rw z22.s, p7/z, [%[minptr]]\n"
+ "ld1rw z23.s, p7/z, [%[maxptr]]\n"
+ "fmax z24.s, p7/m, z24.s, z22.s\n"
+ "fmax z25.s, p7/m, z25.s, z22.s\n"
+ "fmax z26.s, p7/m, z26.s, z22.s\n"
+ "fmax z27.s, p7/m, z27.s, z22.s\n"
+ "fmin z24.s, p7/m, z24.s, z23.s\n"
+ "fmin z25.s, p7/m, z25.s, z23.s\n"
+ "fmin z26.s, p7/m, z26.s, z23.s\n"
+ "fmin z27.s, p7/m, z27.s, z23.s\n"
+ "st1w z24.s, p0, [%[c_ptr0]]\n"
+ "fmax z28.s, p7/m, z28.s, z22.s\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "fmax z29.s, p7/m, z29.s, z22.s\n"
+ "st1w z25.s, p0, [c_ptr1]\n"
+ "fmax z30.s, p7/m, z30.s, z22.s\n"
+ "fmin z28.s, p7/m, z28.s, z23.s\n"
+ "fmax z31.s, p7/m, z31.s, z22.s\n"
+ "st1w z26.s, p0, [c_ptr2]\n"
+ "fmin z29.s, p7/m, z29.s, z23.s\n"
+ "fmin z30.s, p7/m, z30.s, z23.s\n"
+ "fmin z31.s, p7/m, z31.s, z23.s\n"
+ "st1w z27.s, p0, [c_ptr3]\n"
+ "st1w z28.s, p0, [c_ptr4]\n"
+ "st1w z29.s, p0, [c_ptr5]\n"
+ "st1w z30.s, p0, [c_ptr6]\n"
+ "st1w z31.s, p0, [c_ptr7]\n"
+ ".unreq a_ptr1\n"
+ ".unreq a_ptr2\n"
+ ".unreq a_ptr3\n"
+ ".unreq a_ptr4\n"
+ ".unreq a_ptr5\n"
+ ".unreq a_ptr6\n"
+ ".unreq a_ptr7\n"
+ ".unreq c_ptr1\n"
+ ".unreq c_ptr2\n"
+ ".unreq c_ptr3\n"
+ ".unreq c_ptr4\n"
+ ".unreq c_ptr5\n"
+ ".unreq c_ptr6\n"
+ ".unreq c_ptr7\n"
+ : [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [oob_rows] "+r" (oob_rows), [temp] "+r" (temp), [biasptr] "+r" (biasptr)
+ : [lda] "r" (ldab), [ldc] "r" (ldcb), [odd_depth] "r" (odd_depth), [last_width] "r" (last_width), [biasinc] "r" (biasinc), [minptr] "r" (minptr), [maxptr] "r" (maxptr)
+ : "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "cc", "memory"
+ );
+ break;
+ case 18:
+ __asm __volatile (
+ "a_ptr1 .req X0\n"
+ "a_ptr2 .req X1\n"
+ "a_ptr3 .req X2\n"
+ "a_ptr4 .req X3\n"
+ "a_ptr5 .req X4\n"
+ "a_ptr6 .req X5\n"
+ "a_ptr7 .req X6\n"
+ "c_ptr1 .req X7\n"
+ "c_ptr2 .req X8\n"
+ "c_ptr3 .req X9\n"
+ "c_ptr4 .req X10\n"
+ "c_ptr5 .req X11\n"
+ "c_ptr6 .req X12\n"
+ "c_ptr7 .req X13\n"
+ "add a_ptr1, %[a_ptr0], %[lda]\n"
+ "add c_ptr1, %[c_ptr0], %[ldc]\n"
+ "add a_ptr2, a_ptr1, %[lda]\n"
+ "add c_ptr2, c_ptr1, %[ldc]\n"
+ "add a_ptr3, a_ptr2, %[lda]\n"
+ "add c_ptr3, c_ptr2, %[ldc]\n"
+ "add a_ptr4, a_ptr3, %[lda]\n"
+ "add c_ptr4, c_ptr3, %[ldc]\n"
+ "add a_ptr5, a_ptr4, %[lda]\n"
+ "add c_ptr5, c_ptr4, %[ldc]\n"
+ "add a_ptr6, a_ptr5, %[lda]\n"
+ "add c_ptr6, c_ptr5, %[ldc]\n"
+ "add a_ptr7, a_ptr6, %[lda]\n"
+ "add c_ptr7, c_ptr6, %[ldc]\n"
+ "cbz %[oob_rows], 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr7, %[c_ptr0], #0x0\n"
+ "add a_ptr7, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr6, %[c_ptr0], #0x0\n"
+ "add a_ptr6, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr5, %[c_ptr0], #0x0\n"
+ "add a_ptr5, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr4, %[c_ptr0], #0x0\n"
+ "add a_ptr4, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr3, %[c_ptr0], #0x0\n"
+ "add a_ptr3, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr2, %[c_ptr0], #0x0\n"
+ "add a_ptr2, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr1, %[c_ptr0], #0x0\n"
+ "add a_ptr1, %[a_ptr0], #0x0\n"
+ "1:\n"
+ "ptrue p7.s\n"
+ "whilelt p6.s, %[temp], %[odd_depth]\n"
+ "whilelt p0.s, %[temp], %[last_width]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "ld1w z23.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "cbz %[loops], 2f\n"
+ "ld1w z24.s, p7/z, [%[biasptr]]\n"
+ "add %[biasptr], %[biasptr], %[biasinc]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "mov z25.d, z24.d\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1]\n"
+ "mov z26.d, z24.d\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2]\n"
+ "mov z27.d, z24.d\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3]\n"
+ "mov z28.d, z24.d\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4]\n"
+ "mov z29.d, z24.d\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5]\n"
+ "mov z30.d, z24.d\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6]\n"
+ "mov z31.d, z24.d\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x10]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x10]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x10]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x10]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x10]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x10]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x10]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x10]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x20]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x20]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x20]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x20]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x20]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x20]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x20]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "ld1w z23.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x20]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #2\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x30]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x30]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x30]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x30]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x30]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x30]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x30]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x30]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "ld1rqw z0.s, p6/z, [%[a_ptr0], #0x40]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "ld1rqw z1.s, p6/z, [a_ptr1, #0x40]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "ld1rqw z2.s, p6/z, [a_ptr2, #0x40]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "ld1rqw z3.s, p6/z, [a_ptr3, #0x40]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "ld1rqw z4.s, p6/z, [a_ptr4, #0x40]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "ld1rqw z5.s, p6/z, [a_ptr5, #0x40]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "ld1rqw z6.s, p6/z, [a_ptr6, #0x40]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "ld1rqw z7.s, p6/z, [a_ptr7, #0x40]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "b.eq 3f\n"
+ "4:\n"
+ "ld1rw z22.s, p7/z, [%[minptr]]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "ld1rw z23.s, p7/z, [%[maxptr]]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmax z24.s, p7/m, z24.s, z22.s\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmax z25.s, p7/m, z25.s, z22.s\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmax z26.s, p7/m, z26.s, z22.s\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmax z27.s, p7/m, z27.s, z22.s\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmin z24.s, p7/m, z24.s, z23.s\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "fmin z25.s, p7/m, z25.s, z23.s\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
+ "fmin z26.s, p7/m, z26.s, z23.s\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1]\n"
+ "fmin z27.s, p7/m, z27.s, z23.s\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "fmax z28.s, p7/m, z28.s, z22.s\n"
+ "ld1w z24.s, p7/z, [%[biasptr]]\n"
+ "fmax z29.s, p7/m, z29.s, z22.s\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2]\n"
+ "fmax z30.s, p7/m, z30.s, z22.s\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "fmax z31.s, p7/m, z31.s, z22.s\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "fmin z28.s, p7/m, z28.s, z23.s\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3]\n"
+ "fmin z29.s, p7/m, z29.s, z23.s\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "fmin z30.s, p7/m, z30.s, z23.s\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4]\n"
+ "fmin z31.s, p7/m, z31.s, z23.s\n"
+ "ld1w z23.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "mov z25.d, z24.d\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "mov z26.d, z24.d\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5]\n"
+ "mov z27.d, z24.d\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "mov z28.d, z24.d\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "mov z29.d, z24.d\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.d, z24.d\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.d, z24.d\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "add %[biasptr], %[biasptr], %[biasinc]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x10]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x10]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x10]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x10]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x10]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x10]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x10]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x10]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x20]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x20]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x20]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x20]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x20]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x20]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x20]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "ld1w z23.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x20]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #2\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x30]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x30]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x30]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x30]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x30]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x30]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x30]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x30]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "ld1rqw z0.s, p6/z, [%[a_ptr0], #0x40]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "ld1rqw z1.s, p6/z, [a_ptr1, #0x40]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "ld1rqw z2.s, p6/z, [a_ptr2, #0x40]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "ld1rqw z3.s, p6/z, [a_ptr3, #0x40]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "ld1rqw z4.s, p6/z, [a_ptr4, #0x40]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "ld1rqw z5.s, p6/z, [a_ptr5, #0x40]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "ld1rqw z6.s, p6/z, [a_ptr6, #0x40]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "ld1rqw z7.s, p6/z, [a_ptr7, #0x40]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "b.ne 4b\n"
+ "3:\n"
+ "ld1rw z22.s, p7/z, [%[minptr]]\n"
+ "ld1rw z23.s, p7/z, [%[maxptr]]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmax z24.s, p7/m, z24.s, z22.s\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmax z25.s, p7/m, z25.s, z22.s\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmax z26.s, p7/m, z26.s, z22.s\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmax z27.s, p7/m, z27.s, z22.s\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "fmin z24.s, p7/m, z24.s, z23.s\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
+ "fmin z25.s, p7/m, z25.s, z23.s\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1]\n"
+ "fmin z26.s, p7/m, z26.s, z23.s\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2]\n"
+ "fmin z27.s, p7/m, z27.s, z23.s\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "fmax z28.s, p7/m, z28.s, z22.s\n"
+ "ld1w z24.s, p0/z, [%[biasptr]]\n"
+ "fmax z29.s, p7/m, z29.s, z22.s\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3]\n"
+ "fmax z30.s, p7/m, z30.s, z22.s\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "fmax z31.s, p7/m, z31.s, z22.s\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "fmin z28.s, p7/m, z28.s, z23.s\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4]\n"
+ "fmin z29.s, p7/m, z29.s, z23.s\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "fmin z30.s, p7/m, z30.s, z23.s\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5]\n"
+ "fmin z31.s, p7/m, z31.s, z23.s\n"
+ "ld1w z23.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "mov z25.d, z24.d\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "mov z26.d, z24.d\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6]\n"
+ "mov z27.d, z24.d\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "mov z28.d, z24.d\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "mov z29.d, z24.d\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.d, z24.d\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.d, z24.d\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "add %[biasptr], %[biasptr], %[biasinc]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x10]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x10]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x10]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x10]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x10]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x10]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x10]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x10]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x20]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x20]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x20]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x20]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x20]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x20]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x20]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "ld1w z23.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x20]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #2\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x30]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x30]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x30]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x30]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x30]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x30]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x30]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x30]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "ld1rqw z0.s, p6/z, [%[a_ptr0], #0x40]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "ld1rqw z1.s, p6/z, [a_ptr1, #0x40]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "ld1rqw z2.s, p6/z, [a_ptr2, #0x40]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "ld1rqw z3.s, p6/z, [a_ptr3, #0x40]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "ld1rqw z4.s, p6/z, [a_ptr4, #0x40]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "ld1rqw z5.s, p6/z, [a_ptr5, #0x40]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "ld1rqw z6.s, p6/z, [a_ptr6, #0x40]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "ld1rqw z7.s, p6/z, [a_ptr7, #0x40]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "b 5f\n"
+ "2:\n"
+ "ld1w z24.s, p0/z, [%[biasptr]]\n"
+ "add %[biasptr], %[biasptr], %[biasinc]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1]\n"
+ "mov z25.d, z24.d\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2]\n"
+ "mov z26.d, z24.d\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3]\n"
+ "mov z27.d, z24.d\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4]\n"
+ "mov z28.d, z24.d\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5]\n"
+ "mov z29.d, z24.d\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6]\n"
+ "mov z30.d, z24.d\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7]\n"
+ "mov z31.d, z24.d\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x10]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x10]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x10]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x10]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x10]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x10]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x10]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x10]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x20]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x20]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x20]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x20]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x20]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x20]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x20]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "ld1w z23.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x20]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #2\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x30]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x30]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x30]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x30]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x30]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x30]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x30]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x30]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "ld1rqw z0.s, p6/z, [%[a_ptr0], #0x40]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "ld1rqw z1.s, p6/z, [a_ptr1, #0x40]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "ld1rqw z2.s, p6/z, [a_ptr2, #0x40]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "ld1rqw z3.s, p6/z, [a_ptr3, #0x40]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "ld1rqw z4.s, p6/z, [a_ptr4, #0x40]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "ld1rqw z5.s, p6/z, [a_ptr5, #0x40]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "ld1rqw z6.s, p6/z, [a_ptr6, #0x40]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "ld1rqw z7.s, p6/z, [a_ptr7, #0x40]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "5:\n"
+ "ld1rw z22.s, p7/z, [%[minptr]]\n"
+ "ld1rw z23.s, p7/z, [%[maxptr]]\n"
+ "fmax z24.s, p7/m, z24.s, z22.s\n"
+ "fmax z25.s, p7/m, z25.s, z22.s\n"
+ "fmax z26.s, p7/m, z26.s, z22.s\n"
+ "fmax z27.s, p7/m, z27.s, z22.s\n"
+ "fmin z24.s, p7/m, z24.s, z23.s\n"
+ "fmin z25.s, p7/m, z25.s, z23.s\n"
+ "fmin z26.s, p7/m, z26.s, z23.s\n"
+ "fmin z27.s, p7/m, z27.s, z23.s\n"
+ "st1w z24.s, p0, [%[c_ptr0]]\n"
+ "fmax z28.s, p7/m, z28.s, z22.s\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "fmax z29.s, p7/m, z29.s, z22.s\n"
+ "st1w z25.s, p0, [c_ptr1]\n"
+ "fmax z30.s, p7/m, z30.s, z22.s\n"
+ "fmin z28.s, p7/m, z28.s, z23.s\n"
+ "fmax z31.s, p7/m, z31.s, z22.s\n"
+ "st1w z26.s, p0, [c_ptr2]\n"
+ "fmin z29.s, p7/m, z29.s, z23.s\n"
+ "fmin z30.s, p7/m, z30.s, z23.s\n"
+ "fmin z31.s, p7/m, z31.s, z23.s\n"
+ "st1w z27.s, p0, [c_ptr3]\n"
+ "st1w z28.s, p0, [c_ptr4]\n"
+ "st1w z29.s, p0, [c_ptr5]\n"
+ "st1w z30.s, p0, [c_ptr6]\n"
+ "st1w z31.s, p0, [c_ptr7]\n"
+ ".unreq a_ptr1\n"
+ ".unreq a_ptr2\n"
+ ".unreq a_ptr3\n"
+ ".unreq a_ptr4\n"
+ ".unreq a_ptr5\n"
+ ".unreq a_ptr6\n"
+ ".unreq a_ptr7\n"
+ ".unreq c_ptr1\n"
+ ".unreq c_ptr2\n"
+ ".unreq c_ptr3\n"
+ ".unreq c_ptr4\n"
+ ".unreq c_ptr5\n"
+ ".unreq c_ptr6\n"
+ ".unreq c_ptr7\n"
+ : [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [oob_rows] "+r" (oob_rows), [temp] "+r" (temp), [biasptr] "+r" (biasptr)
+ : [lda] "r" (ldab), [ldc] "r" (ldcb), [odd_depth] "r" (odd_depth), [last_width] "r" (last_width), [biasinc] "r" (biasinc), [minptr] "r" (minptr), [maxptr] "r" (maxptr)
+ : "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "cc", "memory"
+ );
+ break;
+ case 19:
+ __asm __volatile (
+ "a_ptr1 .req X0\n"
+ "a_ptr2 .req X1\n"
+ "a_ptr3 .req X2\n"
+ "a_ptr4 .req X3\n"
+ "a_ptr5 .req X4\n"
+ "a_ptr6 .req X5\n"
+ "a_ptr7 .req X6\n"
+ "c_ptr1 .req X7\n"
+ "c_ptr2 .req X8\n"
+ "c_ptr3 .req X9\n"
+ "c_ptr4 .req X10\n"
+ "c_ptr5 .req X11\n"
+ "c_ptr6 .req X12\n"
+ "c_ptr7 .req X13\n"
+ "add a_ptr1, %[a_ptr0], %[lda]\n"
+ "add c_ptr1, %[c_ptr0], %[ldc]\n"
+ "add a_ptr2, a_ptr1, %[lda]\n"
+ "add c_ptr2, c_ptr1, %[ldc]\n"
+ "add a_ptr3, a_ptr2, %[lda]\n"
+ "add c_ptr3, c_ptr2, %[ldc]\n"
+ "add a_ptr4, a_ptr3, %[lda]\n"
+ "add c_ptr4, c_ptr3, %[ldc]\n"
+ "add a_ptr5, a_ptr4, %[lda]\n"
+ "add c_ptr5, c_ptr4, %[ldc]\n"
+ "add a_ptr6, a_ptr5, %[lda]\n"
+ "add c_ptr6, c_ptr5, %[ldc]\n"
+ "add a_ptr7, a_ptr6, %[lda]\n"
+ "add c_ptr7, c_ptr6, %[ldc]\n"
+ "cbz %[oob_rows], 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr7, %[c_ptr0], #0x0\n"
+ "add a_ptr7, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr6, %[c_ptr0], #0x0\n"
+ "add a_ptr6, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr5, %[c_ptr0], #0x0\n"
+ "add a_ptr5, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr4, %[c_ptr0], #0x0\n"
+ "add a_ptr4, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr3, %[c_ptr0], #0x0\n"
+ "add a_ptr3, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr2, %[c_ptr0], #0x0\n"
+ "add a_ptr2, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr1, %[c_ptr0], #0x0\n"
+ "add a_ptr1, %[a_ptr0], #0x0\n"
+ "1:\n"
+ "ptrue p7.s\n"
+ "whilelt p6.s, %[temp], %[odd_depth]\n"
+ "whilelt p0.s, %[temp], %[last_width]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "ld1w z23.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "cbz %[loops], 2f\n"
+ "ld1w z24.s, p7/z, [%[biasptr]]\n"
+ "add %[biasptr], %[biasptr], %[biasinc]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "mov z25.d, z24.d\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1]\n"
+ "mov z26.d, z24.d\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2]\n"
+ "mov z27.d, z24.d\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3]\n"
+ "mov z28.d, z24.d\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4]\n"
+ "mov z29.d, z24.d\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5]\n"
+ "mov z30.d, z24.d\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6]\n"
+ "mov z31.d, z24.d\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x10]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x10]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x10]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x10]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x10]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x10]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x10]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x10]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x20]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x20]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x20]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x20]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x20]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x20]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x20]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "ld1w z23.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x20]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x30]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x30]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x30]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x30]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x30]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x30]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x30]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x30]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #3\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "ld1rqw z0.s, p6/z, [%[a_ptr0], #0x40]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "ld1rqw z1.s, p6/z, [a_ptr1, #0x40]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "ld1rqw z2.s, p6/z, [a_ptr2, #0x40]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "ld1rqw z3.s, p6/z, [a_ptr3, #0x40]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "ld1rqw z4.s, p6/z, [a_ptr4, #0x40]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "ld1rqw z5.s, p6/z, [a_ptr5, #0x40]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "ld1rqw z6.s, p6/z, [a_ptr6, #0x40]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "ld1rqw z7.s, p6/z, [a_ptr7, #0x40]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "b.eq 3f\n"
+ "4:\n"
+ "ld1rw z22.s, p7/z, [%[minptr]]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "ld1rw z23.s, p7/z, [%[maxptr]]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmax z24.s, p7/m, z24.s, z22.s\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmax z25.s, p7/m, z25.s, z22.s\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmax z26.s, p7/m, z26.s, z22.s\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmax z27.s, p7/m, z27.s, z22.s\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmin z24.s, p7/m, z24.s, z23.s\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "fmin z25.s, p7/m, z25.s, z23.s\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
+ "fmin z26.s, p7/m, z26.s, z23.s\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1]\n"
+ "fmin z27.s, p7/m, z27.s, z23.s\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "fmax z28.s, p7/m, z28.s, z22.s\n"
+ "ld1w z24.s, p7/z, [%[biasptr]]\n"
+ "fmax z29.s, p7/m, z29.s, z22.s\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2]\n"
+ "fmax z30.s, p7/m, z30.s, z22.s\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "fmax z31.s, p7/m, z31.s, z22.s\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "fmin z28.s, p7/m, z28.s, z23.s\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3]\n"
+ "fmin z29.s, p7/m, z29.s, z23.s\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "fmin z30.s, p7/m, z30.s, z23.s\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4]\n"
+ "fmin z31.s, p7/m, z31.s, z23.s\n"
+ "ld1w z23.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "mov z25.d, z24.d\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "mov z26.d, z24.d\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5]\n"
+ "mov z27.d, z24.d\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "mov z28.d, z24.d\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "mov z29.d, z24.d\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.d, z24.d\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.d, z24.d\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "add %[biasptr], %[biasptr], %[biasinc]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x10]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x10]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x10]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x10]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x10]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x10]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x10]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x10]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x20]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x20]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x20]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x20]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x20]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x20]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x20]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "ld1w z23.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x20]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x30]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x30]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x30]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x30]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x30]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x30]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x30]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x30]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #3\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "ld1rqw z0.s, p6/z, [%[a_ptr0], #0x40]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "ld1rqw z1.s, p6/z, [a_ptr1, #0x40]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "ld1rqw z2.s, p6/z, [a_ptr2, #0x40]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "ld1rqw z3.s, p6/z, [a_ptr3, #0x40]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "ld1rqw z4.s, p6/z, [a_ptr4, #0x40]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "ld1rqw z5.s, p6/z, [a_ptr5, #0x40]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "ld1rqw z6.s, p6/z, [a_ptr6, #0x40]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "ld1rqw z7.s, p6/z, [a_ptr7, #0x40]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "b.ne 4b\n"
+ "3:\n"
+ "ld1rw z22.s, p7/z, [%[minptr]]\n"
+ "ld1rw z23.s, p7/z, [%[maxptr]]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmax z24.s, p7/m, z24.s, z22.s\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmax z25.s, p7/m, z25.s, z22.s\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmax z26.s, p7/m, z26.s, z22.s\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmax z27.s, p7/m, z27.s, z22.s\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "fmin z24.s, p7/m, z24.s, z23.s\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
+ "fmin z25.s, p7/m, z25.s, z23.s\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1]\n"
+ "fmin z26.s, p7/m, z26.s, z23.s\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2]\n"
+ "fmin z27.s, p7/m, z27.s, z23.s\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "fmax z28.s, p7/m, z28.s, z22.s\n"
+ "ld1w z24.s, p0/z, [%[biasptr]]\n"
+ "fmax z29.s, p7/m, z29.s, z22.s\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3]\n"
+ "fmax z30.s, p7/m, z30.s, z22.s\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "fmax z31.s, p7/m, z31.s, z22.s\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "fmin z28.s, p7/m, z28.s, z23.s\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4]\n"
+ "fmin z29.s, p7/m, z29.s, z23.s\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "fmin z30.s, p7/m, z30.s, z23.s\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5]\n"
+ "fmin z31.s, p7/m, z31.s, z23.s\n"
+ "ld1w z23.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "mov z25.d, z24.d\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "mov z26.d, z24.d\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6]\n"
+ "mov z27.d, z24.d\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "mov z28.d, z24.d\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "mov z29.d, z24.d\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.d, z24.d\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.d, z24.d\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "add %[biasptr], %[biasptr], %[biasinc]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x10]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x10]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x10]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x10]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x10]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x10]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x10]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x10]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x20]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x20]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x20]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x20]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x20]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x20]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x20]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "ld1w z23.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x20]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x30]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x30]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x30]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x30]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x30]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x30]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x30]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x30]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #3\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "ld1rqw z0.s, p6/z, [%[a_ptr0], #0x40]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "ld1rqw z1.s, p6/z, [a_ptr1, #0x40]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "ld1rqw z2.s, p6/z, [a_ptr2, #0x40]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "ld1rqw z3.s, p6/z, [a_ptr3, #0x40]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "ld1rqw z4.s, p6/z, [a_ptr4, #0x40]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "ld1rqw z5.s, p6/z, [a_ptr5, #0x40]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "ld1rqw z6.s, p6/z, [a_ptr6, #0x40]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "ld1rqw z7.s, p6/z, [a_ptr7, #0x40]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "b 5f\n"
+ "2:\n"
+ "ld1w z24.s, p0/z, [%[biasptr]]\n"
+ "add %[biasptr], %[biasptr], %[biasinc]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1]\n"
+ "mov z25.d, z24.d\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2]\n"
+ "mov z26.d, z24.d\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3]\n"
+ "mov z27.d, z24.d\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4]\n"
+ "mov z28.d, z24.d\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5]\n"
+ "mov z29.d, z24.d\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6]\n"
+ "mov z30.d, z24.d\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7]\n"
+ "mov z31.d, z24.d\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x10]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x10]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x10]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x10]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x10]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x10]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x10]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x10]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x20]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x20]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x20]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x20]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x20]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x20]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x20]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "ld1w z23.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x20]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x30]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x30]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x30]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x30]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x30]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x30]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x30]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x30]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #3\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "ld1rqw z0.s, p6/z, [%[a_ptr0], #0x40]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "ld1rqw z1.s, p6/z, [a_ptr1, #0x40]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "ld1rqw z2.s, p6/z, [a_ptr2, #0x40]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "ld1rqw z3.s, p6/z, [a_ptr3, #0x40]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "ld1rqw z4.s, p6/z, [a_ptr4, #0x40]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "ld1rqw z5.s, p6/z, [a_ptr5, #0x40]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "ld1rqw z6.s, p6/z, [a_ptr6, #0x40]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "ld1rqw z7.s, p6/z, [a_ptr7, #0x40]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "5:\n"
+ "ld1rw z22.s, p7/z, [%[minptr]]\n"
+ "ld1rw z23.s, p7/z, [%[maxptr]]\n"
+ "fmax z24.s, p7/m, z24.s, z22.s\n"
+ "fmax z25.s, p7/m, z25.s, z22.s\n"
+ "fmax z26.s, p7/m, z26.s, z22.s\n"
+ "fmax z27.s, p7/m, z27.s, z22.s\n"
+ "fmin z24.s, p7/m, z24.s, z23.s\n"
+ "fmin z25.s, p7/m, z25.s, z23.s\n"
+ "fmin z26.s, p7/m, z26.s, z23.s\n"
+ "fmin z27.s, p7/m, z27.s, z23.s\n"
+ "st1w z24.s, p0, [%[c_ptr0]]\n"
+ "fmax z28.s, p7/m, z28.s, z22.s\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "fmax z29.s, p7/m, z29.s, z22.s\n"
+ "st1w z25.s, p0, [c_ptr1]\n"
+ "fmax z30.s, p7/m, z30.s, z22.s\n"
+ "fmin z28.s, p7/m, z28.s, z23.s\n"
+ "fmax z31.s, p7/m, z31.s, z22.s\n"
+ "st1w z26.s, p0, [c_ptr2]\n"
+ "fmin z29.s, p7/m, z29.s, z23.s\n"
+ "fmin z30.s, p7/m, z30.s, z23.s\n"
+ "fmin z31.s, p7/m, z31.s, z23.s\n"
+ "st1w z27.s, p0, [c_ptr3]\n"
+ "st1w z28.s, p0, [c_ptr4]\n"
+ "st1w z29.s, p0, [c_ptr5]\n"
+ "st1w z30.s, p0, [c_ptr6]\n"
+ "st1w z31.s, p0, [c_ptr7]\n"
+ ".unreq a_ptr1\n"
+ ".unreq a_ptr2\n"
+ ".unreq a_ptr3\n"
+ ".unreq a_ptr4\n"
+ ".unreq a_ptr5\n"
+ ".unreq a_ptr6\n"
+ ".unreq a_ptr7\n"
+ ".unreq c_ptr1\n"
+ ".unreq c_ptr2\n"
+ ".unreq c_ptr3\n"
+ ".unreq c_ptr4\n"
+ ".unreq c_ptr5\n"
+ ".unreq c_ptr6\n"
+ ".unreq c_ptr7\n"
+ : [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [oob_rows] "+r" (oob_rows), [temp] "+r" (temp), [biasptr] "+r" (biasptr)
+ : [lda] "r" (ldab), [ldc] "r" (ldcb), [odd_depth] "r" (odd_depth), [last_width] "r" (last_width), [biasinc] "r" (biasinc), [minptr] "r" (minptr), [maxptr] "r" (maxptr)
+ : "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "cc", "memory"
+ );
+ break;
+ case 20:
+ __asm __volatile (
+ "a_ptr1 .req X0\n"
+ "a_ptr2 .req X1\n"
+ "a_ptr3 .req X2\n"
+ "a_ptr4 .req X3\n"
+ "a_ptr5 .req X4\n"
+ "a_ptr6 .req X5\n"
+ "a_ptr7 .req X6\n"
+ "c_ptr1 .req X7\n"
+ "c_ptr2 .req X8\n"
+ "c_ptr3 .req X9\n"
+ "c_ptr4 .req X10\n"
+ "c_ptr5 .req X11\n"
+ "c_ptr6 .req X12\n"
+ "c_ptr7 .req X13\n"
+ "add a_ptr1, %[a_ptr0], %[lda]\n"
+ "add c_ptr1, %[c_ptr0], %[ldc]\n"
+ "add a_ptr2, a_ptr1, %[lda]\n"
+ "add c_ptr2, c_ptr1, %[ldc]\n"
+ "add a_ptr3, a_ptr2, %[lda]\n"
+ "add c_ptr3, c_ptr2, %[ldc]\n"
+ "add a_ptr4, a_ptr3, %[lda]\n"
+ "add c_ptr4, c_ptr3, %[ldc]\n"
+ "add a_ptr5, a_ptr4, %[lda]\n"
+ "add c_ptr5, c_ptr4, %[ldc]\n"
+ "add a_ptr6, a_ptr5, %[lda]\n"
+ "add c_ptr6, c_ptr5, %[ldc]\n"
+ "add a_ptr7, a_ptr6, %[lda]\n"
+ "add c_ptr7, c_ptr6, %[ldc]\n"
+ "cbz %[oob_rows], 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr7, %[c_ptr0], #0x0\n"
+ "add a_ptr7, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr6, %[c_ptr0], #0x0\n"
+ "add a_ptr6, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr5, %[c_ptr0], #0x0\n"
+ "add a_ptr5, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr4, %[c_ptr0], #0x0\n"
+ "add a_ptr4, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr3, %[c_ptr0], #0x0\n"
+ "add a_ptr3, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr2, %[c_ptr0], #0x0\n"
+ "add a_ptr2, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr1, %[c_ptr0], #0x0\n"
+ "add a_ptr1, %[a_ptr0], #0x0\n"
+ "1:\n"
+ "ptrue p7.s\n"
+ "whilelt p6.s, %[temp], %[odd_depth]\n"
+ "whilelt p0.s, %[temp], %[last_width]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "ld1w z23.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "cbz %[loops], 2f\n"
+ "ld1w z24.s, p7/z, [%[biasptr]]\n"
+ "add %[biasptr], %[biasptr], %[biasinc]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "mov z25.d, z24.d\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1]\n"
+ "mov z26.d, z24.d\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2]\n"
+ "mov z27.d, z24.d\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3]\n"
+ "mov z28.d, z24.d\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4]\n"
+ "mov z29.d, z24.d\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5]\n"
+ "mov z30.d, z24.d\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6]\n"
+ "mov z31.d, z24.d\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x10]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x10]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x10]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x10]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x10]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x10]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x10]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x10]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x20]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x20]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x20]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x20]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x20]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x20]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x20]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "ld1w z23.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x20]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x30]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x30]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x30]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x30]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x30]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x30]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x30]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x30]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #4\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "ld1rqw z0.s, p6/z, [%[a_ptr0], #0x40]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "ld1rqw z1.s, p6/z, [a_ptr1, #0x40]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "ld1rqw z2.s, p6/z, [a_ptr2, #0x40]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "ld1rqw z3.s, p6/z, [a_ptr3, #0x40]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "ld1rqw z4.s, p6/z, [a_ptr4, #0x40]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "ld1rqw z5.s, p6/z, [a_ptr5, #0x40]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "ld1rqw z6.s, p6/z, [a_ptr6, #0x40]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "ld1rqw z7.s, p6/z, [a_ptr7, #0x40]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "b.eq 3f\n"
+ "4:\n"
+ "ld1rw z22.s, p7/z, [%[minptr]]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "ld1rw z23.s, p7/z, [%[maxptr]]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmax z24.s, p7/m, z24.s, z22.s\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmax z25.s, p7/m, z25.s, z22.s\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmax z26.s, p7/m, z26.s, z22.s\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmax z27.s, p7/m, z27.s, z22.s\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmin z24.s, p7/m, z24.s, z23.s\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "fmin z25.s, p7/m, z25.s, z23.s\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
+ "fmin z26.s, p7/m, z26.s, z23.s\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1]\n"
+ "fmin z27.s, p7/m, z27.s, z23.s\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "fmax z28.s, p7/m, z28.s, z22.s\n"
+ "ld1w z24.s, p7/z, [%[biasptr]]\n"
+ "fmax z29.s, p7/m, z29.s, z22.s\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2]\n"
+ "fmax z30.s, p7/m, z30.s, z22.s\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "fmax z31.s, p7/m, z31.s, z22.s\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "fmin z28.s, p7/m, z28.s, z23.s\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3]\n"
+ "fmin z29.s, p7/m, z29.s, z23.s\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "fmin z30.s, p7/m, z30.s, z23.s\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4]\n"
+ "fmin z31.s, p7/m, z31.s, z23.s\n"
+ "ld1w z23.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "mov z25.d, z24.d\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "mov z26.d, z24.d\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5]\n"
+ "mov z27.d, z24.d\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "mov z28.d, z24.d\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "mov z29.d, z24.d\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.d, z24.d\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.d, z24.d\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "add %[biasptr], %[biasptr], %[biasinc]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x10]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x10]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x10]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x10]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x10]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x10]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x10]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x10]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x20]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x20]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x20]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x20]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x20]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x20]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x20]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "ld1w z23.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x20]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x30]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x30]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x30]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x30]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x30]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x30]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x30]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x30]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #4\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "ld1rqw z0.s, p6/z, [%[a_ptr0], #0x40]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "ld1rqw z1.s, p6/z, [a_ptr1, #0x40]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "ld1rqw z2.s, p6/z, [a_ptr2, #0x40]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "ld1rqw z3.s, p6/z, [a_ptr3, #0x40]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "ld1rqw z4.s, p6/z, [a_ptr4, #0x40]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "ld1rqw z5.s, p6/z, [a_ptr5, #0x40]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "ld1rqw z6.s, p6/z, [a_ptr6, #0x40]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "ld1rqw z7.s, p6/z, [a_ptr7, #0x40]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "b.ne 4b\n"
+ "3:\n"
+ "ld1rw z22.s, p7/z, [%[minptr]]\n"
+ "ld1rw z23.s, p7/z, [%[maxptr]]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmax z24.s, p7/m, z24.s, z22.s\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmax z25.s, p7/m, z25.s, z22.s\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmax z26.s, p7/m, z26.s, z22.s\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmax z27.s, p7/m, z27.s, z22.s\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "fmin z24.s, p7/m, z24.s, z23.s\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
+ "fmin z25.s, p7/m, z25.s, z23.s\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1]\n"
+ "fmin z26.s, p7/m, z26.s, z23.s\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2]\n"
+ "fmin z27.s, p7/m, z27.s, z23.s\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "fmax z28.s, p7/m, z28.s, z22.s\n"
+ "ld1w z24.s, p0/z, [%[biasptr]]\n"
+ "fmax z29.s, p7/m, z29.s, z22.s\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3]\n"
+ "fmax z30.s, p7/m, z30.s, z22.s\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "fmax z31.s, p7/m, z31.s, z22.s\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "fmin z28.s, p7/m, z28.s, z23.s\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4]\n"
+ "fmin z29.s, p7/m, z29.s, z23.s\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "fmin z30.s, p7/m, z30.s, z23.s\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5]\n"
+ "fmin z31.s, p7/m, z31.s, z23.s\n"
+ "ld1w z23.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "mov z25.d, z24.d\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "mov z26.d, z24.d\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6]\n"
+ "mov z27.d, z24.d\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "mov z28.d, z24.d\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "mov z29.d, z24.d\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.d, z24.d\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.d, z24.d\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "add %[biasptr], %[biasptr], %[biasinc]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x10]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x10]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x10]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x10]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x10]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x10]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x10]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x10]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x20]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x20]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x20]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x20]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x20]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x20]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x20]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "ld1w z23.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x20]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x30]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x30]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x30]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x30]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x30]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x30]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x30]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x30]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #4\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "ld1rqw z0.s, p6/z, [%[a_ptr0], #0x40]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "ld1rqw z1.s, p6/z, [a_ptr1, #0x40]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "ld1rqw z2.s, p6/z, [a_ptr2, #0x40]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "ld1rqw z3.s, p6/z, [a_ptr3, #0x40]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "ld1rqw z4.s, p6/z, [a_ptr4, #0x40]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "ld1rqw z5.s, p6/z, [a_ptr5, #0x40]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "ld1rqw z6.s, p6/z, [a_ptr6, #0x40]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "ld1rqw z7.s, p6/z, [a_ptr7, #0x40]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "b 5f\n"
+ "2:\n"
+ "ld1w z24.s, p0/z, [%[biasptr]]\n"
+ "add %[biasptr], %[biasptr], %[biasinc]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1]\n"
+ "mov z25.d, z24.d\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2]\n"
+ "mov z26.d, z24.d\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3]\n"
+ "mov z27.d, z24.d\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4]\n"
+ "mov z28.d, z24.d\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5]\n"
+ "mov z29.d, z24.d\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6]\n"
+ "mov z30.d, z24.d\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7]\n"
+ "mov z31.d, z24.d\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x10]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x10]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x10]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x10]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x10]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x10]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x10]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x10]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x20]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x20]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x20]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x20]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x20]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x20]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x20]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "ld1w z23.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x20]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x30]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x30]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x30]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x30]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x30]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x30]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x30]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x30]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #4\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "ld1rqw z0.s, p6/z, [%[a_ptr0], #0x40]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "ld1rqw z1.s, p6/z, [a_ptr1, #0x40]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "ld1rqw z2.s, p6/z, [a_ptr2, #0x40]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "ld1rqw z3.s, p6/z, [a_ptr3, #0x40]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "ld1rqw z4.s, p6/z, [a_ptr4, #0x40]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "ld1rqw z5.s, p6/z, [a_ptr5, #0x40]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "ld1rqw z6.s, p6/z, [a_ptr6, #0x40]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "ld1rqw z7.s, p6/z, [a_ptr7, #0x40]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "5:\n"
+ "ld1rw z22.s, p7/z, [%[minptr]]\n"
+ "ld1rw z23.s, p7/z, [%[maxptr]]\n"
+ "fmax z24.s, p7/m, z24.s, z22.s\n"
+ "fmax z25.s, p7/m, z25.s, z22.s\n"
+ "fmax z26.s, p7/m, z26.s, z22.s\n"
+ "fmax z27.s, p7/m, z27.s, z22.s\n"
+ "fmin z24.s, p7/m, z24.s, z23.s\n"
+ "fmin z25.s, p7/m, z25.s, z23.s\n"
+ "fmin z26.s, p7/m, z26.s, z23.s\n"
+ "fmin z27.s, p7/m, z27.s, z23.s\n"
+ "st1w z24.s, p0, [%[c_ptr0]]\n"
+ "fmax z28.s, p7/m, z28.s, z22.s\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "fmax z29.s, p7/m, z29.s, z22.s\n"
+ "st1w z25.s, p0, [c_ptr1]\n"
+ "fmax z30.s, p7/m, z30.s, z22.s\n"
+ "fmin z28.s, p7/m, z28.s, z23.s\n"
+ "fmax z31.s, p7/m, z31.s, z22.s\n"
+ "st1w z26.s, p0, [c_ptr2]\n"
+ "fmin z29.s, p7/m, z29.s, z23.s\n"
+ "fmin z30.s, p7/m, z30.s, z23.s\n"
+ "fmin z31.s, p7/m, z31.s, z23.s\n"
+ "st1w z27.s, p0, [c_ptr3]\n"
+ "st1w z28.s, p0, [c_ptr4]\n"
+ "st1w z29.s, p0, [c_ptr5]\n"
+ "st1w z30.s, p0, [c_ptr6]\n"
+ "st1w z31.s, p0, [c_ptr7]\n"
+ ".unreq a_ptr1\n"
+ ".unreq a_ptr2\n"
+ ".unreq a_ptr3\n"
+ ".unreq a_ptr4\n"
+ ".unreq a_ptr5\n"
+ ".unreq a_ptr6\n"
+ ".unreq a_ptr7\n"
+ ".unreq c_ptr1\n"
+ ".unreq c_ptr2\n"
+ ".unreq c_ptr3\n"
+ ".unreq c_ptr4\n"
+ ".unreq c_ptr5\n"
+ ".unreq c_ptr6\n"
+ ".unreq c_ptr7\n"
+ : [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [oob_rows] "+r" (oob_rows), [temp] "+r" (temp), [biasptr] "+r" (biasptr)
+ : [lda] "r" (ldab), [ldc] "r" (ldcb), [odd_depth] "r" (odd_depth), [last_width] "r" (last_width), [biasinc] "r" (biasinc), [minptr] "r" (minptr), [maxptr] "r" (maxptr)
+ : "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "cc", "memory"
+ );
+ break;
+ case 21:
+ __asm __volatile (
+ "a_ptr1 .req X0\n"
+ "a_ptr2 .req X1\n"
+ "a_ptr3 .req X2\n"
+ "a_ptr4 .req X3\n"
+ "a_ptr5 .req X4\n"
+ "a_ptr6 .req X5\n"
+ "a_ptr7 .req X6\n"
+ "c_ptr1 .req X7\n"
+ "c_ptr2 .req X8\n"
+ "c_ptr3 .req X9\n"
+ "c_ptr4 .req X10\n"
+ "c_ptr5 .req X11\n"
+ "c_ptr6 .req X12\n"
+ "c_ptr7 .req X13\n"
+ "add a_ptr1, %[a_ptr0], %[lda]\n"
+ "add c_ptr1, %[c_ptr0], %[ldc]\n"
+ "add a_ptr2, a_ptr1, %[lda]\n"
+ "add c_ptr2, c_ptr1, %[ldc]\n"
+ "add a_ptr3, a_ptr2, %[lda]\n"
+ "add c_ptr3, c_ptr2, %[ldc]\n"
+ "add a_ptr4, a_ptr3, %[lda]\n"
+ "add c_ptr4, c_ptr3, %[ldc]\n"
+ "add a_ptr5, a_ptr4, %[lda]\n"
+ "add c_ptr5, c_ptr4, %[ldc]\n"
+ "add a_ptr6, a_ptr5, %[lda]\n"
+ "add c_ptr6, c_ptr5, %[ldc]\n"
+ "add a_ptr7, a_ptr6, %[lda]\n"
+ "add c_ptr7, c_ptr6, %[ldc]\n"
+ "cbz %[oob_rows], 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr7, %[c_ptr0], #0x0\n"
+ "add a_ptr7, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr6, %[c_ptr0], #0x0\n"
+ "add a_ptr6, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr5, %[c_ptr0], #0x0\n"
+ "add a_ptr5, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr4, %[c_ptr0], #0x0\n"
+ "add a_ptr4, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr3, %[c_ptr0], #0x0\n"
+ "add a_ptr3, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr2, %[c_ptr0], #0x0\n"
+ "add a_ptr2, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr1, %[c_ptr0], #0x0\n"
+ "add a_ptr1, %[a_ptr0], #0x0\n"
+ "1:\n"
+ "ptrue p7.s\n"
+ "whilelt p6.s, %[temp], %[odd_depth]\n"
+ "whilelt p0.s, %[temp], %[last_width]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "ld1w z23.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "cbz %[loops], 2f\n"
+ "ld1w z24.s, p7/z, [%[biasptr]]\n"
+ "add %[biasptr], %[biasptr], %[biasinc]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "mov z25.d, z24.d\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1]\n"
+ "mov z26.d, z24.d\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2]\n"
+ "mov z27.d, z24.d\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3]\n"
+ "mov z28.d, z24.d\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4]\n"
+ "mov z29.d, z24.d\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5]\n"
+ "mov z30.d, z24.d\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6]\n"
+ "mov z31.d, z24.d\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x10]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x10]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x10]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x10]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x10]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x10]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x10]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x10]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x20]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x20]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x20]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x20]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x20]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x20]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x20]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "ld1w z23.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x20]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x30]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x30]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x30]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x30]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x30]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x30]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x30]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x30]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #5\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x40]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x40]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x40]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x40]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x40]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x40]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x40]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x40]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p6/z, [%[a_ptr0], #0x50]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p6/z, [a_ptr1, #0x50]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p6/z, [a_ptr2, #0x50]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p6/z, [a_ptr3, #0x50]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p6/z, [a_ptr4, #0x50]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p6/z, [a_ptr5, #0x50]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p6/z, [a_ptr6, #0x50]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p6/z, [a_ptr7, #0x50]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "b.eq 3f\n"
+ "4:\n"
+ "ld1rw z22.s, p7/z, [%[minptr]]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "ld1rw z23.s, p7/z, [%[maxptr]]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmax z24.s, p7/m, z24.s, z22.s\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmax z25.s, p7/m, z25.s, z22.s\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmax z26.s, p7/m, z26.s, z22.s\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmax z27.s, p7/m, z27.s, z22.s\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmin z24.s, p7/m, z24.s, z23.s\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "fmin z25.s, p7/m, z25.s, z23.s\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
+ "fmin z26.s, p7/m, z26.s, z23.s\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1]\n"
+ "fmin z27.s, p7/m, z27.s, z23.s\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "fmax z28.s, p7/m, z28.s, z22.s\n"
+ "ld1w z24.s, p7/z, [%[biasptr]]\n"
+ "fmax z29.s, p7/m, z29.s, z22.s\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2]\n"
+ "fmax z30.s, p7/m, z30.s, z22.s\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "fmax z31.s, p7/m, z31.s, z22.s\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "fmin z28.s, p7/m, z28.s, z23.s\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3]\n"
+ "fmin z29.s, p7/m, z29.s, z23.s\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "fmin z30.s, p7/m, z30.s, z23.s\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4]\n"
+ "fmin z31.s, p7/m, z31.s, z23.s\n"
+ "ld1w z23.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "mov z25.d, z24.d\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "mov z26.d, z24.d\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5]\n"
+ "mov z27.d, z24.d\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "mov z28.d, z24.d\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "mov z29.d, z24.d\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.d, z24.d\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.d, z24.d\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "add %[biasptr], %[biasptr], %[biasinc]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x10]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x10]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x10]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x10]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x10]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x10]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x10]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x10]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x20]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x20]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x20]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x20]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x20]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x20]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x20]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "ld1w z23.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x20]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x30]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x30]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x30]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x30]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x30]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x30]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x30]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x30]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #5\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x40]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x40]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x40]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x40]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x40]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x40]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x40]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x40]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p6/z, [%[a_ptr0], #0x50]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p6/z, [a_ptr1, #0x50]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p6/z, [a_ptr2, #0x50]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p6/z, [a_ptr3, #0x50]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p6/z, [a_ptr4, #0x50]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p6/z, [a_ptr5, #0x50]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p6/z, [a_ptr6, #0x50]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p6/z, [a_ptr7, #0x50]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "b.ne 4b\n"
+ "3:\n"
+ "ld1rw z22.s, p7/z, [%[minptr]]\n"
+ "ld1rw z23.s, p7/z, [%[maxptr]]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmax z24.s, p7/m, z24.s, z22.s\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmax z25.s, p7/m, z25.s, z22.s\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmax z26.s, p7/m, z26.s, z22.s\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmax z27.s, p7/m, z27.s, z22.s\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "fmin z24.s, p7/m, z24.s, z23.s\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
+ "fmin z25.s, p7/m, z25.s, z23.s\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1]\n"
+ "fmin z26.s, p7/m, z26.s, z23.s\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2]\n"
+ "fmin z27.s, p7/m, z27.s, z23.s\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "fmax z28.s, p7/m, z28.s, z22.s\n"
+ "ld1w z24.s, p0/z, [%[biasptr]]\n"
+ "fmax z29.s, p7/m, z29.s, z22.s\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3]\n"
+ "fmax z30.s, p7/m, z30.s, z22.s\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "fmax z31.s, p7/m, z31.s, z22.s\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "fmin z28.s, p7/m, z28.s, z23.s\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4]\n"
+ "fmin z29.s, p7/m, z29.s, z23.s\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "fmin z30.s, p7/m, z30.s, z23.s\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5]\n"
+ "fmin z31.s, p7/m, z31.s, z23.s\n"
+ "ld1w z23.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "mov z25.d, z24.d\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "mov z26.d, z24.d\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6]\n"
+ "mov z27.d, z24.d\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "mov z28.d, z24.d\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "mov z29.d, z24.d\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.d, z24.d\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.d, z24.d\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "add %[biasptr], %[biasptr], %[biasinc]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x10]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x10]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x10]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x10]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x10]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x10]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x10]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x10]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x20]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x20]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x20]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x20]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x20]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x20]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x20]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "ld1w z23.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x20]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x30]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x30]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x30]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x30]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x30]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x30]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x30]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x30]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #5\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x40]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x40]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x40]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x40]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x40]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x40]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x40]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x40]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p6/z, [%[a_ptr0], #0x50]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p6/z, [a_ptr1, #0x50]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p6/z, [a_ptr2, #0x50]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p6/z, [a_ptr3, #0x50]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p6/z, [a_ptr4, #0x50]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p6/z, [a_ptr5, #0x50]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p6/z, [a_ptr6, #0x50]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p6/z, [a_ptr7, #0x50]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "b 5f\n"
+ "2:\n"
+ "ld1w z24.s, p0/z, [%[biasptr]]\n"
+ "add %[biasptr], %[biasptr], %[biasinc]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1]\n"
+ "mov z25.d, z24.d\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2]\n"
+ "mov z26.d, z24.d\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3]\n"
+ "mov z27.d, z24.d\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4]\n"
+ "mov z28.d, z24.d\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5]\n"
+ "mov z29.d, z24.d\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6]\n"
+ "mov z30.d, z24.d\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7]\n"
+ "mov z31.d, z24.d\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x10]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x10]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x10]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x10]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x10]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x10]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x10]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x10]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x20]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x20]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x20]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x20]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x20]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x20]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x20]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "ld1w z23.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x20]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x30]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x30]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x30]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x30]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x30]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x30]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x30]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x30]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #5\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x40]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x40]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x40]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x40]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x40]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x40]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x40]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x40]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p6/z, [%[a_ptr0], #0x50]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p6/z, [a_ptr1, #0x50]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p6/z, [a_ptr2, #0x50]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p6/z, [a_ptr3, #0x50]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p6/z, [a_ptr4, #0x50]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p6/z, [a_ptr5, #0x50]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p6/z, [a_ptr6, #0x50]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p6/z, [a_ptr7, #0x50]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "5:\n"
+ "ld1rw z22.s, p7/z, [%[minptr]]\n"
+ "ld1rw z23.s, p7/z, [%[maxptr]]\n"
+ "fmax z24.s, p7/m, z24.s, z22.s\n"
+ "fmax z25.s, p7/m, z25.s, z22.s\n"
+ "fmax z26.s, p7/m, z26.s, z22.s\n"
+ "fmax z27.s, p7/m, z27.s, z22.s\n"
+ "fmin z24.s, p7/m, z24.s, z23.s\n"
+ "fmin z25.s, p7/m, z25.s, z23.s\n"
+ "fmin z26.s, p7/m, z26.s, z23.s\n"
+ "fmin z27.s, p7/m, z27.s, z23.s\n"
+ "st1w z24.s, p0, [%[c_ptr0]]\n"
+ "fmax z28.s, p7/m, z28.s, z22.s\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "fmax z29.s, p7/m, z29.s, z22.s\n"
+ "st1w z25.s, p0, [c_ptr1]\n"
+ "fmax z30.s, p7/m, z30.s, z22.s\n"
+ "fmin z28.s, p7/m, z28.s, z23.s\n"
+ "fmax z31.s, p7/m, z31.s, z22.s\n"
+ "st1w z26.s, p0, [c_ptr2]\n"
+ "fmin z29.s, p7/m, z29.s, z23.s\n"
+ "fmin z30.s, p7/m, z30.s, z23.s\n"
+ "fmin z31.s, p7/m, z31.s, z23.s\n"
+ "st1w z27.s, p0, [c_ptr3]\n"
+ "st1w z28.s, p0, [c_ptr4]\n"
+ "st1w z29.s, p0, [c_ptr5]\n"
+ "st1w z30.s, p0, [c_ptr6]\n"
+ "st1w z31.s, p0, [c_ptr7]\n"
+ ".unreq a_ptr1\n"
+ ".unreq a_ptr2\n"
+ ".unreq a_ptr3\n"
+ ".unreq a_ptr4\n"
+ ".unreq a_ptr5\n"
+ ".unreq a_ptr6\n"
+ ".unreq a_ptr7\n"
+ ".unreq c_ptr1\n"
+ ".unreq c_ptr2\n"
+ ".unreq c_ptr3\n"
+ ".unreq c_ptr4\n"
+ ".unreq c_ptr5\n"
+ ".unreq c_ptr6\n"
+ ".unreq c_ptr7\n"
+ : [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [oob_rows] "+r" (oob_rows), [temp] "+r" (temp), [biasptr] "+r" (biasptr)
+ : [lda] "r" (ldab), [ldc] "r" (ldcb), [odd_depth] "r" (odd_depth), [last_width] "r" (last_width), [biasinc] "r" (biasinc), [minptr] "r" (minptr), [maxptr] "r" (maxptr)
+ : "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "cc", "memory"
+ );
+ break;
+ case 22:
+ __asm __volatile (
+ "a_ptr1 .req X0\n"
+ "a_ptr2 .req X1\n"
+ "a_ptr3 .req X2\n"
+ "a_ptr4 .req X3\n"
+ "a_ptr5 .req X4\n"
+ "a_ptr6 .req X5\n"
+ "a_ptr7 .req X6\n"
+ "c_ptr1 .req X7\n"
+ "c_ptr2 .req X8\n"
+ "c_ptr3 .req X9\n"
+ "c_ptr4 .req X10\n"
+ "c_ptr5 .req X11\n"
+ "c_ptr6 .req X12\n"
+ "c_ptr7 .req X13\n"
+ "add a_ptr1, %[a_ptr0], %[lda]\n"
+ "add c_ptr1, %[c_ptr0], %[ldc]\n"
+ "add a_ptr2, a_ptr1, %[lda]\n"
+ "add c_ptr2, c_ptr1, %[ldc]\n"
+ "add a_ptr3, a_ptr2, %[lda]\n"
+ "add c_ptr3, c_ptr2, %[ldc]\n"
+ "add a_ptr4, a_ptr3, %[lda]\n"
+ "add c_ptr4, c_ptr3, %[ldc]\n"
+ "add a_ptr5, a_ptr4, %[lda]\n"
+ "add c_ptr5, c_ptr4, %[ldc]\n"
+ "add a_ptr6, a_ptr5, %[lda]\n"
+ "add c_ptr6, c_ptr5, %[ldc]\n"
+ "add a_ptr7, a_ptr6, %[lda]\n"
+ "add c_ptr7, c_ptr6, %[ldc]\n"
+ "cbz %[oob_rows], 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr7, %[c_ptr0], #0x0\n"
+ "add a_ptr7, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr6, %[c_ptr0], #0x0\n"
+ "add a_ptr6, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr5, %[c_ptr0], #0x0\n"
+ "add a_ptr5, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr4, %[c_ptr0], #0x0\n"
+ "add a_ptr4, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr3, %[c_ptr0], #0x0\n"
+ "add a_ptr3, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr2, %[c_ptr0], #0x0\n"
+ "add a_ptr2, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr1, %[c_ptr0], #0x0\n"
+ "add a_ptr1, %[a_ptr0], #0x0\n"
+ "1:\n"
+ "ptrue p7.s\n"
+ "whilelt p6.s, %[temp], %[odd_depth]\n"
+ "whilelt p0.s, %[temp], %[last_width]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "ld1w z23.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "cbz %[loops], 2f\n"
+ "ld1w z24.s, p7/z, [%[biasptr]]\n"
+ "add %[biasptr], %[biasptr], %[biasinc]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "mov z25.d, z24.d\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1]\n"
+ "mov z26.d, z24.d\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2]\n"
+ "mov z27.d, z24.d\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3]\n"
+ "mov z28.d, z24.d\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4]\n"
+ "mov z29.d, z24.d\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5]\n"
+ "mov z30.d, z24.d\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6]\n"
+ "mov z31.d, z24.d\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x10]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x10]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x10]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x10]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x10]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x10]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x10]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x10]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x20]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x20]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x20]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x20]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x20]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x20]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x20]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "ld1w z23.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x20]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x30]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x30]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x30]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x30]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x30]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x30]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x30]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x30]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #6\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x40]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x40]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x40]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x40]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x40]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x40]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x40]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x40]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p6/z, [%[a_ptr0], #0x50]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p6/z, [a_ptr1, #0x50]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p6/z, [a_ptr2, #0x50]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p6/z, [a_ptr3, #0x50]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p6/z, [a_ptr4, #0x50]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p6/z, [a_ptr5, #0x50]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p6/z, [a_ptr6, #0x50]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p6/z, [a_ptr7, #0x50]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "b.eq 3f\n"
+ "4:\n"
+ "ld1rw z22.s, p7/z, [%[minptr]]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "ld1rw z23.s, p7/z, [%[maxptr]]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmax z24.s, p7/m, z24.s, z22.s\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmax z25.s, p7/m, z25.s, z22.s\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmax z26.s, p7/m, z26.s, z22.s\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmax z27.s, p7/m, z27.s, z22.s\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmin z24.s, p7/m, z24.s, z23.s\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "fmin z25.s, p7/m, z25.s, z23.s\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
+ "fmin z26.s, p7/m, z26.s, z23.s\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1]\n"
+ "fmin z27.s, p7/m, z27.s, z23.s\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "fmax z28.s, p7/m, z28.s, z22.s\n"
+ "ld1w z24.s, p7/z, [%[biasptr]]\n"
+ "fmax z29.s, p7/m, z29.s, z22.s\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2]\n"
+ "fmax z30.s, p7/m, z30.s, z22.s\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "fmax z31.s, p7/m, z31.s, z22.s\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "fmin z28.s, p7/m, z28.s, z23.s\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3]\n"
+ "fmin z29.s, p7/m, z29.s, z23.s\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "fmin z30.s, p7/m, z30.s, z23.s\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4]\n"
+ "fmin z31.s, p7/m, z31.s, z23.s\n"
+ "ld1w z23.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "mov z25.d, z24.d\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "mov z26.d, z24.d\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5]\n"
+ "mov z27.d, z24.d\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "mov z28.d, z24.d\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "mov z29.d, z24.d\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.d, z24.d\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.d, z24.d\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "add %[biasptr], %[biasptr], %[biasinc]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x10]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x10]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x10]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x10]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x10]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x10]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x10]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x10]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x20]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x20]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x20]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x20]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x20]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x20]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x20]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "ld1w z23.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x20]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x30]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x30]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x30]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x30]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x30]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x30]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x30]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x30]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #6\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x40]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x40]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x40]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x40]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x40]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x40]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x40]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x40]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p6/z, [%[a_ptr0], #0x50]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p6/z, [a_ptr1, #0x50]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p6/z, [a_ptr2, #0x50]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p6/z, [a_ptr3, #0x50]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p6/z, [a_ptr4, #0x50]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p6/z, [a_ptr5, #0x50]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p6/z, [a_ptr6, #0x50]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p6/z, [a_ptr7, #0x50]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "b.ne 4b\n"
+ "3:\n"
+ "ld1rw z22.s, p7/z, [%[minptr]]\n"
+ "ld1rw z23.s, p7/z, [%[maxptr]]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmax z24.s, p7/m, z24.s, z22.s\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmax z25.s, p7/m, z25.s, z22.s\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmax z26.s, p7/m, z26.s, z22.s\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmax z27.s, p7/m, z27.s, z22.s\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "fmin z24.s, p7/m, z24.s, z23.s\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
+ "fmin z25.s, p7/m, z25.s, z23.s\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1]\n"
+ "fmin z26.s, p7/m, z26.s, z23.s\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2]\n"
+ "fmin z27.s, p7/m, z27.s, z23.s\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "fmax z28.s, p7/m, z28.s, z22.s\n"
+ "ld1w z24.s, p0/z, [%[biasptr]]\n"
+ "fmax z29.s, p7/m, z29.s, z22.s\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3]\n"
+ "fmax z30.s, p7/m, z30.s, z22.s\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "fmax z31.s, p7/m, z31.s, z22.s\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "fmin z28.s, p7/m, z28.s, z23.s\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4]\n"
+ "fmin z29.s, p7/m, z29.s, z23.s\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "fmin z30.s, p7/m, z30.s, z23.s\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5]\n"
+ "fmin z31.s, p7/m, z31.s, z23.s\n"
+ "ld1w z23.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "mov z25.d, z24.d\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "mov z26.d, z24.d\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6]\n"
+ "mov z27.d, z24.d\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "mov z28.d, z24.d\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "mov z29.d, z24.d\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.d, z24.d\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.d, z24.d\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "add %[biasptr], %[biasptr], %[biasinc]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x10]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x10]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x10]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x10]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x10]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x10]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x10]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x10]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x20]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x20]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x20]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x20]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x20]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x20]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x20]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "ld1w z23.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x20]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x30]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x30]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x30]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x30]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x30]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x30]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x30]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x30]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #6\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x40]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x40]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x40]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x40]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x40]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x40]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x40]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x40]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p6/z, [%[a_ptr0], #0x50]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p6/z, [a_ptr1, #0x50]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p6/z, [a_ptr2, #0x50]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p6/z, [a_ptr3, #0x50]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p6/z, [a_ptr4, #0x50]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p6/z, [a_ptr5, #0x50]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p6/z, [a_ptr6, #0x50]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p6/z, [a_ptr7, #0x50]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "b 5f\n"
+ "2:\n"
+ "ld1w z24.s, p0/z, [%[biasptr]]\n"
+ "add %[biasptr], %[biasptr], %[biasinc]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1]\n"
+ "mov z25.d, z24.d\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2]\n"
+ "mov z26.d, z24.d\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3]\n"
+ "mov z27.d, z24.d\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4]\n"
+ "mov z28.d, z24.d\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5]\n"
+ "mov z29.d, z24.d\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6]\n"
+ "mov z30.d, z24.d\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7]\n"
+ "mov z31.d, z24.d\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x10]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x10]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x10]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x10]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x10]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x10]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x10]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x10]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x20]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x20]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x20]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x20]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x20]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x20]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x20]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "ld1w z23.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x20]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x30]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x30]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x30]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x30]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x30]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x30]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x30]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x30]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #6\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x40]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x40]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x40]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x40]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x40]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x40]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x40]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x40]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p6/z, [%[a_ptr0], #0x50]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p6/z, [a_ptr1, #0x50]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p6/z, [a_ptr2, #0x50]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p6/z, [a_ptr3, #0x50]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p6/z, [a_ptr4, #0x50]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p6/z, [a_ptr5, #0x50]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p6/z, [a_ptr6, #0x50]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p6/z, [a_ptr7, #0x50]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "5:\n"
+ "ld1rw z22.s, p7/z, [%[minptr]]\n"
+ "ld1rw z23.s, p7/z, [%[maxptr]]\n"
+ "fmax z24.s, p7/m, z24.s, z22.s\n"
+ "fmax z25.s, p7/m, z25.s, z22.s\n"
+ "fmax z26.s, p7/m, z26.s, z22.s\n"
+ "fmax z27.s, p7/m, z27.s, z22.s\n"
+ "fmin z24.s, p7/m, z24.s, z23.s\n"
+ "fmin z25.s, p7/m, z25.s, z23.s\n"
+ "fmin z26.s, p7/m, z26.s, z23.s\n"
+ "fmin z27.s, p7/m, z27.s, z23.s\n"
+ "st1w z24.s, p0, [%[c_ptr0]]\n"
+ "fmax z28.s, p7/m, z28.s, z22.s\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "fmax z29.s, p7/m, z29.s, z22.s\n"
+ "st1w z25.s, p0, [c_ptr1]\n"
+ "fmax z30.s, p7/m, z30.s, z22.s\n"
+ "fmin z28.s, p7/m, z28.s, z23.s\n"
+ "fmax z31.s, p7/m, z31.s, z22.s\n"
+ "st1w z26.s, p0, [c_ptr2]\n"
+ "fmin z29.s, p7/m, z29.s, z23.s\n"
+ "fmin z30.s, p7/m, z30.s, z23.s\n"
+ "fmin z31.s, p7/m, z31.s, z23.s\n"
+ "st1w z27.s, p0, [c_ptr3]\n"
+ "st1w z28.s, p0, [c_ptr4]\n"
+ "st1w z29.s, p0, [c_ptr5]\n"
+ "st1w z30.s, p0, [c_ptr6]\n"
+ "st1w z31.s, p0, [c_ptr7]\n"
+ ".unreq a_ptr1\n"
+ ".unreq a_ptr2\n"
+ ".unreq a_ptr3\n"
+ ".unreq a_ptr4\n"
+ ".unreq a_ptr5\n"
+ ".unreq a_ptr6\n"
+ ".unreq a_ptr7\n"
+ ".unreq c_ptr1\n"
+ ".unreq c_ptr2\n"
+ ".unreq c_ptr3\n"
+ ".unreq c_ptr4\n"
+ ".unreq c_ptr5\n"
+ ".unreq c_ptr6\n"
+ ".unreq c_ptr7\n"
+ : [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [oob_rows] "+r" (oob_rows), [temp] "+r" (temp), [biasptr] "+r" (biasptr)
+ : [lda] "r" (ldab), [ldc] "r" (ldcb), [odd_depth] "r" (odd_depth), [last_width] "r" (last_width), [biasinc] "r" (biasinc), [minptr] "r" (minptr), [maxptr] "r" (maxptr)
+ : "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "cc", "memory"
+ );
+ break;
+ case 23:
+ __asm __volatile (
+ "a_ptr1 .req X0\n"
+ "a_ptr2 .req X1\n"
+ "a_ptr3 .req X2\n"
+ "a_ptr4 .req X3\n"
+ "a_ptr5 .req X4\n"
+ "a_ptr6 .req X5\n"
+ "a_ptr7 .req X6\n"
+ "c_ptr1 .req X7\n"
+ "c_ptr2 .req X8\n"
+ "c_ptr3 .req X9\n"
+ "c_ptr4 .req X10\n"
+ "c_ptr5 .req X11\n"
+ "c_ptr6 .req X12\n"
+ "c_ptr7 .req X13\n"
+ "add a_ptr1, %[a_ptr0], %[lda]\n"
+ "add c_ptr1, %[c_ptr0], %[ldc]\n"
+ "add a_ptr2, a_ptr1, %[lda]\n"
+ "add c_ptr2, c_ptr1, %[ldc]\n"
+ "add a_ptr3, a_ptr2, %[lda]\n"
+ "add c_ptr3, c_ptr2, %[ldc]\n"
+ "add a_ptr4, a_ptr3, %[lda]\n"
+ "add c_ptr4, c_ptr3, %[ldc]\n"
+ "add a_ptr5, a_ptr4, %[lda]\n"
+ "add c_ptr5, c_ptr4, %[ldc]\n"
+ "add a_ptr6, a_ptr5, %[lda]\n"
+ "add c_ptr6, c_ptr5, %[ldc]\n"
+ "add a_ptr7, a_ptr6, %[lda]\n"
+ "add c_ptr7, c_ptr6, %[ldc]\n"
+ "cbz %[oob_rows], 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr7, %[c_ptr0], #0x0\n"
+ "add a_ptr7, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr6, %[c_ptr0], #0x0\n"
+ "add a_ptr6, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr5, %[c_ptr0], #0x0\n"
+ "add a_ptr5, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr4, %[c_ptr0], #0x0\n"
+ "add a_ptr4, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr3, %[c_ptr0], #0x0\n"
+ "add a_ptr3, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr2, %[c_ptr0], #0x0\n"
+ "add a_ptr2, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr1, %[c_ptr0], #0x0\n"
+ "add a_ptr1, %[a_ptr0], #0x0\n"
+ "1:\n"
+ "ptrue p7.s\n"
+ "whilelt p6.s, %[temp], %[odd_depth]\n"
+ "whilelt p0.s, %[temp], %[last_width]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "ld1w z23.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "cbz %[loops], 2f\n"
+ "ld1w z24.s, p7/z, [%[biasptr]]\n"
+ "add %[biasptr], %[biasptr], %[biasinc]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "mov z25.d, z24.d\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1]\n"
+ "mov z26.d, z24.d\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2]\n"
+ "mov z27.d, z24.d\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3]\n"
+ "mov z28.d, z24.d\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4]\n"
+ "mov z29.d, z24.d\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5]\n"
+ "mov z30.d, z24.d\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6]\n"
+ "mov z31.d, z24.d\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x10]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x10]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x10]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x10]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x10]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x10]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x10]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x10]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x20]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x20]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x20]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x20]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x20]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x20]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x20]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "ld1w z23.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x20]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x30]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x30]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x30]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x30]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x30]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x30]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x30]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x30]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x40]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x40]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x40]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x40]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x40]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x40]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x40]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x40]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #7\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p6/z, [%[a_ptr0], #0x50]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p6/z, [a_ptr1, #0x50]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p6/z, [a_ptr2, #0x50]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p6/z, [a_ptr3, #0x50]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p6/z, [a_ptr4, #0x50]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p6/z, [a_ptr5, #0x50]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p6/z, [a_ptr6, #0x50]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p6/z, [a_ptr7, #0x50]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "b.eq 3f\n"
+ "4:\n"
+ "ld1rw z22.s, p7/z, [%[minptr]]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "ld1rw z23.s, p7/z, [%[maxptr]]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmax z24.s, p7/m, z24.s, z22.s\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmax z25.s, p7/m, z25.s, z22.s\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmax z26.s, p7/m, z26.s, z22.s\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmax z27.s, p7/m, z27.s, z22.s\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmin z24.s, p7/m, z24.s, z23.s\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "fmin z25.s, p7/m, z25.s, z23.s\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
+ "fmin z26.s, p7/m, z26.s, z23.s\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1]\n"
+ "fmin z27.s, p7/m, z27.s, z23.s\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "fmax z28.s, p7/m, z28.s, z22.s\n"
+ "ld1w z24.s, p7/z, [%[biasptr]]\n"
+ "fmax z29.s, p7/m, z29.s, z22.s\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2]\n"
+ "fmax z30.s, p7/m, z30.s, z22.s\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "fmax z31.s, p7/m, z31.s, z22.s\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "fmin z28.s, p7/m, z28.s, z23.s\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3]\n"
+ "fmin z29.s, p7/m, z29.s, z23.s\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "fmin z30.s, p7/m, z30.s, z23.s\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4]\n"
+ "fmin z31.s, p7/m, z31.s, z23.s\n"
+ "ld1w z23.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "mov z25.d, z24.d\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "mov z26.d, z24.d\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5]\n"
+ "mov z27.d, z24.d\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "mov z28.d, z24.d\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "mov z29.d, z24.d\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.d, z24.d\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.d, z24.d\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "add %[biasptr], %[biasptr], %[biasinc]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x10]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x10]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x10]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x10]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x10]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x10]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x10]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x10]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x20]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x20]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x20]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x20]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x20]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x20]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x20]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "ld1w z23.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x20]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x30]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x30]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x30]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x30]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x30]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x30]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x30]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x30]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x40]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x40]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x40]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x40]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x40]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x40]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x40]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x40]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #7\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p6/z, [%[a_ptr0], #0x50]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p6/z, [a_ptr1, #0x50]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p6/z, [a_ptr2, #0x50]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p6/z, [a_ptr3, #0x50]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p6/z, [a_ptr4, #0x50]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p6/z, [a_ptr5, #0x50]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p6/z, [a_ptr6, #0x50]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p6/z, [a_ptr7, #0x50]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "b.ne 4b\n"
+ "3:\n"
+ "ld1rw z22.s, p7/z, [%[minptr]]\n"
+ "ld1rw z23.s, p7/z, [%[maxptr]]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmax z24.s, p7/m, z24.s, z22.s\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmax z25.s, p7/m, z25.s, z22.s\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmax z26.s, p7/m, z26.s, z22.s\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmax z27.s, p7/m, z27.s, z22.s\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "fmin z24.s, p7/m, z24.s, z23.s\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
+ "fmin z25.s, p7/m, z25.s, z23.s\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1]\n"
+ "fmin z26.s, p7/m, z26.s, z23.s\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2]\n"
+ "fmin z27.s, p7/m, z27.s, z23.s\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "fmax z28.s, p7/m, z28.s, z22.s\n"
+ "ld1w z24.s, p0/z, [%[biasptr]]\n"
+ "fmax z29.s, p7/m, z29.s, z22.s\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3]\n"
+ "fmax z30.s, p7/m, z30.s, z22.s\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "fmax z31.s, p7/m, z31.s, z22.s\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "fmin z28.s, p7/m, z28.s, z23.s\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4]\n"
+ "fmin z29.s, p7/m, z29.s, z23.s\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "fmin z30.s, p7/m, z30.s, z23.s\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5]\n"
+ "fmin z31.s, p7/m, z31.s, z23.s\n"
+ "ld1w z23.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "mov z25.d, z24.d\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "mov z26.d, z24.d\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6]\n"
+ "mov z27.d, z24.d\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "mov z28.d, z24.d\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "mov z29.d, z24.d\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.d, z24.d\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.d, z24.d\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "add %[biasptr], %[biasptr], %[biasinc]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x10]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x10]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x10]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x10]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x10]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x10]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x10]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x10]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x20]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x20]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x20]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x20]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x20]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x20]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x20]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "ld1w z23.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x20]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x30]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x30]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x30]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x30]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x30]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x30]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x30]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x30]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x40]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x40]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x40]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x40]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x40]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x40]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x40]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x40]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #7\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p6/z, [%[a_ptr0], #0x50]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p6/z, [a_ptr1, #0x50]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p6/z, [a_ptr2, #0x50]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p6/z, [a_ptr3, #0x50]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p6/z, [a_ptr4, #0x50]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p6/z, [a_ptr5, #0x50]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p6/z, [a_ptr6, #0x50]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p6/z, [a_ptr7, #0x50]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "b 5f\n"
+ "2:\n"
+ "ld1w z24.s, p0/z, [%[biasptr]]\n"
+ "add %[biasptr], %[biasptr], %[biasinc]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1]\n"
+ "mov z25.d, z24.d\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2]\n"
+ "mov z26.d, z24.d\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3]\n"
+ "mov z27.d, z24.d\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4]\n"
+ "mov z28.d, z24.d\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5]\n"
+ "mov z29.d, z24.d\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6]\n"
+ "mov z30.d, z24.d\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7]\n"
+ "mov z31.d, z24.d\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x10]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x10]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x10]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x10]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x10]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x10]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x10]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x10]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x20]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x20]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x20]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x20]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x20]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x20]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x20]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "ld1w z23.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x20]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x30]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x30]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x30]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x30]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x30]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x30]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x30]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x30]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x40]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x40]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x40]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x40]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x40]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x40]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x40]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x40]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #7\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p6/z, [%[a_ptr0], #0x50]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p6/z, [a_ptr1, #0x50]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p6/z, [a_ptr2, #0x50]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p6/z, [a_ptr3, #0x50]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p6/z, [a_ptr4, #0x50]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p6/z, [a_ptr5, #0x50]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p6/z, [a_ptr6, #0x50]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p6/z, [a_ptr7, #0x50]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "5:\n"
+ "ld1rw z22.s, p7/z, [%[minptr]]\n"
+ "ld1rw z23.s, p7/z, [%[maxptr]]\n"
+ "fmax z24.s, p7/m, z24.s, z22.s\n"
+ "fmax z25.s, p7/m, z25.s, z22.s\n"
+ "fmax z26.s, p7/m, z26.s, z22.s\n"
+ "fmax z27.s, p7/m, z27.s, z22.s\n"
+ "fmin z24.s, p7/m, z24.s, z23.s\n"
+ "fmin z25.s, p7/m, z25.s, z23.s\n"
+ "fmin z26.s, p7/m, z26.s, z23.s\n"
+ "fmin z27.s, p7/m, z27.s, z23.s\n"
+ "st1w z24.s, p0, [%[c_ptr0]]\n"
+ "fmax z28.s, p7/m, z28.s, z22.s\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "fmax z29.s, p7/m, z29.s, z22.s\n"
+ "st1w z25.s, p0, [c_ptr1]\n"
+ "fmax z30.s, p7/m, z30.s, z22.s\n"
+ "fmin z28.s, p7/m, z28.s, z23.s\n"
+ "fmax z31.s, p7/m, z31.s, z22.s\n"
+ "st1w z26.s, p0, [c_ptr2]\n"
+ "fmin z29.s, p7/m, z29.s, z23.s\n"
+ "fmin z30.s, p7/m, z30.s, z23.s\n"
+ "fmin z31.s, p7/m, z31.s, z23.s\n"
+ "st1w z27.s, p0, [c_ptr3]\n"
+ "st1w z28.s, p0, [c_ptr4]\n"
+ "st1w z29.s, p0, [c_ptr5]\n"
+ "st1w z30.s, p0, [c_ptr6]\n"
+ "st1w z31.s, p0, [c_ptr7]\n"
+ ".unreq a_ptr1\n"
+ ".unreq a_ptr2\n"
+ ".unreq a_ptr3\n"
+ ".unreq a_ptr4\n"
+ ".unreq a_ptr5\n"
+ ".unreq a_ptr6\n"
+ ".unreq a_ptr7\n"
+ ".unreq c_ptr1\n"
+ ".unreq c_ptr2\n"
+ ".unreq c_ptr3\n"
+ ".unreq c_ptr4\n"
+ ".unreq c_ptr5\n"
+ ".unreq c_ptr6\n"
+ ".unreq c_ptr7\n"
+ : [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [oob_rows] "+r" (oob_rows), [temp] "+r" (temp), [biasptr] "+r" (biasptr)
+ : [lda] "r" (ldab), [ldc] "r" (ldcb), [odd_depth] "r" (odd_depth), [last_width] "r" (last_width), [biasinc] "r" (biasinc), [minptr] "r" (minptr), [maxptr] "r" (maxptr)
+ : "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "cc", "memory"
+ );
+ break;
+ default:
+ case 24:
+ __asm __volatile (
+ "a_ptr1 .req X0\n"
+ "a_ptr2 .req X1\n"
+ "a_ptr3 .req X2\n"
+ "a_ptr4 .req X3\n"
+ "a_ptr5 .req X4\n"
+ "a_ptr6 .req X5\n"
+ "a_ptr7 .req X6\n"
+ "c_ptr1 .req X7\n"
+ "c_ptr2 .req X8\n"
+ "c_ptr3 .req X9\n"
+ "c_ptr4 .req X10\n"
+ "c_ptr5 .req X11\n"
+ "c_ptr6 .req X12\n"
+ "c_ptr7 .req X13\n"
+ "add a_ptr1, %[a_ptr0], %[lda]\n"
+ "add c_ptr1, %[c_ptr0], %[ldc]\n"
+ "add a_ptr2, a_ptr1, %[lda]\n"
+ "add c_ptr2, c_ptr1, %[ldc]\n"
+ "add a_ptr3, a_ptr2, %[lda]\n"
+ "add c_ptr3, c_ptr2, %[ldc]\n"
+ "add a_ptr4, a_ptr3, %[lda]\n"
+ "add c_ptr4, c_ptr3, %[ldc]\n"
+ "add a_ptr5, a_ptr4, %[lda]\n"
+ "add c_ptr5, c_ptr4, %[ldc]\n"
+ "add a_ptr6, a_ptr5, %[lda]\n"
+ "add c_ptr6, c_ptr5, %[ldc]\n"
+ "add a_ptr7, a_ptr6, %[lda]\n"
+ "add c_ptr7, c_ptr6, %[ldc]\n"
+ "cbz %[oob_rows], 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr7, %[c_ptr0], #0x0\n"
+ "add a_ptr7, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr6, %[c_ptr0], #0x0\n"
+ "add a_ptr6, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr5, %[c_ptr0], #0x0\n"
+ "add a_ptr5, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr4, %[c_ptr0], #0x0\n"
+ "add a_ptr4, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr3, %[c_ptr0], #0x0\n"
+ "add a_ptr3, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr2, %[c_ptr0], #0x0\n"
+ "add a_ptr2, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr1, %[c_ptr0], #0x0\n"
+ "add a_ptr1, %[a_ptr0], #0x0\n"
+ "1:\n"
+ "ptrue p7.s\n"
+ "whilelt p6.s, %[temp], %[odd_depth]\n"
+ "whilelt p0.s, %[temp], %[last_width]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "ld1w z23.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "cbz %[loops], 2f\n"
+ "ld1w z24.s, p7/z, [%[biasptr]]\n"
+ "add %[biasptr], %[biasptr], %[biasinc]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "mov z25.d, z24.d\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1]\n"
+ "mov z26.d, z24.d\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2]\n"
+ "mov z27.d, z24.d\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3]\n"
+ "mov z28.d, z24.d\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4]\n"
+ "mov z29.d, z24.d\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5]\n"
+ "mov z30.d, z24.d\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6]\n"
+ "mov z31.d, z24.d\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x10]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x10]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x10]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x10]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x10]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x10]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x10]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x10]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x20]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x20]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x20]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x20]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x20]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x20]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x20]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "ld1w z23.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x20]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x30]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x30]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x30]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x30]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x30]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x30]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x30]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x30]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x40]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x40]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x40]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x40]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x40]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x40]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x40]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "ld1w z23.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x40]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p6/z, [%[a_ptr0], #0x50]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p6/z, [a_ptr1, #0x50]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p6/z, [a_ptr2, #0x50]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p6/z, [a_ptr3, #0x50]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p6/z, [a_ptr4, #0x50]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p6/z, [a_ptr5, #0x50]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p6/z, [a_ptr6, #0x50]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p6/z, [a_ptr7, #0x50]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "b.eq 3f\n"
+ "4:\n"
+ "ld1rw z22.s, p7/z, [%[minptr]]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "ld1rw z23.s, p7/z, [%[maxptr]]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmax z24.s, p7/m, z24.s, z22.s\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmax z25.s, p7/m, z25.s, z22.s\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmax z26.s, p7/m, z26.s, z22.s\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmax z27.s, p7/m, z27.s, z22.s\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmin z24.s, p7/m, z24.s, z23.s\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "fmin z25.s, p7/m, z25.s, z23.s\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
+ "fmin z26.s, p7/m, z26.s, z23.s\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1]\n"
+ "fmin z27.s, p7/m, z27.s, z23.s\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "fmax z28.s, p7/m, z28.s, z22.s\n"
+ "ld1w z24.s, p7/z, [%[biasptr]]\n"
+ "fmax z29.s, p7/m, z29.s, z22.s\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2]\n"
+ "fmax z30.s, p7/m, z30.s, z22.s\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "fmax z31.s, p7/m, z31.s, z22.s\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "fmin z28.s, p7/m, z28.s, z23.s\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3]\n"
+ "fmin z29.s, p7/m, z29.s, z23.s\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "fmin z30.s, p7/m, z30.s, z23.s\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4]\n"
+ "fmin z31.s, p7/m, z31.s, z23.s\n"
+ "ld1w z23.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "mov z25.d, z24.d\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "mov z26.d, z24.d\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5]\n"
+ "mov z27.d, z24.d\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "mov z28.d, z24.d\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "mov z29.d, z24.d\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.d, z24.d\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.d, z24.d\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "add %[biasptr], %[biasptr], %[biasinc]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x10]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x10]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x10]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x10]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x10]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x10]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x10]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x10]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x20]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x20]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x20]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x20]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x20]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x20]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x20]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "ld1w z23.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x20]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x30]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x30]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x30]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x30]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x30]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x30]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x30]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x30]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x40]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x40]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x40]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x40]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x40]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x40]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x40]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "ld1w z23.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x40]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p6/z, [%[a_ptr0], #0x50]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p6/z, [a_ptr1, #0x50]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p6/z, [a_ptr2, #0x50]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p6/z, [a_ptr3, #0x50]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p6/z, [a_ptr4, #0x50]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p6/z, [a_ptr5, #0x50]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p6/z, [a_ptr6, #0x50]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p6/z, [a_ptr7, #0x50]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "b.ne 4b\n"
+ "3:\n"
+ "ld1rw z22.s, p7/z, [%[minptr]]\n"
+ "ld1rw z23.s, p7/z, [%[maxptr]]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmax z24.s, p7/m, z24.s, z22.s\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmax z25.s, p7/m, z25.s, z22.s\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmax z26.s, p7/m, z26.s, z22.s\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmax z27.s, p7/m, z27.s, z22.s\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "fmin z24.s, p7/m, z24.s, z23.s\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
+ "fmin z25.s, p7/m, z25.s, z23.s\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1]\n"
+ "fmin z26.s, p7/m, z26.s, z23.s\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2]\n"
+ "fmin z27.s, p7/m, z27.s, z23.s\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "fmax z28.s, p7/m, z28.s, z22.s\n"
+ "ld1w z24.s, p0/z, [%[biasptr]]\n"
+ "fmax z29.s, p7/m, z29.s, z22.s\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3]\n"
+ "fmax z30.s, p7/m, z30.s, z22.s\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "fmax z31.s, p7/m, z31.s, z22.s\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "fmin z28.s, p7/m, z28.s, z23.s\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4]\n"
+ "fmin z29.s, p7/m, z29.s, z23.s\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "fmin z30.s, p7/m, z30.s, z23.s\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5]\n"
+ "fmin z31.s, p7/m, z31.s, z23.s\n"
+ "ld1w z23.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "mov z25.d, z24.d\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "mov z26.d, z24.d\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6]\n"
+ "mov z27.d, z24.d\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "mov z28.d, z24.d\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "mov z29.d, z24.d\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.d, z24.d\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.d, z24.d\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "add %[biasptr], %[biasptr], %[biasinc]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x10]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x10]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x10]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x10]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x10]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x10]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x10]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x10]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x20]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x20]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x20]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x20]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x20]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x20]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x20]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "ld1w z23.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x20]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x30]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x30]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x30]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x30]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x30]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x30]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x30]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x30]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x40]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x40]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x40]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x40]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x40]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x40]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x40]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "ld1w z23.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x40]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p6/z, [%[a_ptr0], #0x50]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p6/z, [a_ptr1, #0x50]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p6/z, [a_ptr2, #0x50]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p6/z, [a_ptr3, #0x50]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p6/z, [a_ptr4, #0x50]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p6/z, [a_ptr5, #0x50]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p6/z, [a_ptr6, #0x50]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p6/z, [a_ptr7, #0x50]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "b 5f\n"
+ "2:\n"
+ "ld1w z24.s, p0/z, [%[biasptr]]\n"
+ "add %[biasptr], %[biasptr], %[biasinc]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1]\n"
+ "mov z25.d, z24.d\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2]\n"
+ "mov z26.d, z24.d\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3]\n"
+ "mov z27.d, z24.d\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4]\n"
+ "mov z28.d, z24.d\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5]\n"
+ "mov z29.d, z24.d\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6]\n"
+ "mov z30.d, z24.d\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7]\n"
+ "mov z31.d, z24.d\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x10]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x10]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x10]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x10]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x10]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x10]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x10]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x10]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x20]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x20]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x20]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x20]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x20]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x20]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x20]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "ld1w z23.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x20]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "ld1w z16.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "ld1w z17.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "ld1w z18.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x30]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x30]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x30]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x30]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x30]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x30]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x30]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x30]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "ld1w z19.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "ld1w z20.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "ld1w z21.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "ld1w z22.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #0x40]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #0x40]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "ld1rqw z2.s, p7/z, [a_ptr2, #0x40]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "ld1rqw z3.s, p7/z, [a_ptr3, #0x40]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "ld1rqw z4.s, p7/z, [a_ptr4, #0x40]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr5, #0x40]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr6, #0x40]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "ld1w z23.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr7, #0x40]\n"
+ "fmla z25.s, z16.s, z1.s[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "fmla z26.s, z16.s, z2.s[0]\n"
+ "fmla z27.s, z16.s, z3.s[0]\n"
+ "fmla z28.s, z16.s, z4.s[0]\n"
+ "fmla z29.s, z16.s, z5.s[0]\n"
+ "fmla z30.s, z16.s, z6.s[0]\n"
+ "fmla z31.s, z16.s, z7.s[0]\n"
+ "fmla z24.s, z17.s, z0.s[1]\n"
+ "fmla z25.s, z17.s, z1.s[1]\n"
+ "fmla z26.s, z17.s, z2.s[1]\n"
+ "fmla z27.s, z17.s, z3.s[1]\n"
+ "fmla z28.s, z17.s, z4.s[1]\n"
+ "fmla z29.s, z17.s, z5.s[1]\n"
+ "fmla z30.s, z17.s, z6.s[1]\n"
+ "fmla z31.s, z17.s, z7.s[1]\n"
+ "fmla z24.s, z18.s, z0.s[2]\n"
+ "fmla z25.s, z18.s, z1.s[2]\n"
+ "fmla z26.s, z18.s, z2.s[2]\n"
+ "fmla z27.s, z18.s, z3.s[2]\n"
+ "fmla z28.s, z18.s, z4.s[2]\n"
+ "fmla z29.s, z18.s, z5.s[2]\n"
+ "fmla z30.s, z18.s, z6.s[2]\n"
+ "fmla z31.s, z18.s, z7.s[2]\n"
+ "fmla z24.s, z19.s, z0.s[3]\n"
+ "ld1rqw z0.s, p6/z, [%[a_ptr0], #0x50]\n"
+ "fmla z25.s, z19.s, z1.s[3]\n"
+ "ld1rqw z1.s, p6/z, [a_ptr1, #0x50]\n"
+ "fmla z26.s, z19.s, z2.s[3]\n"
+ "ld1rqw z2.s, p6/z, [a_ptr2, #0x50]\n"
+ "fmla z27.s, z19.s, z3.s[3]\n"
+ "ld1rqw z3.s, p6/z, [a_ptr3, #0x50]\n"
+ "fmla z28.s, z19.s, z4.s[3]\n"
+ "ld1rqw z4.s, p6/z, [a_ptr4, #0x50]\n"
+ "fmla z29.s, z19.s, z5.s[3]\n"
+ "ld1rqw z5.s, p6/z, [a_ptr5, #0x50]\n"
+ "fmla z30.s, z19.s, z6.s[3]\n"
+ "ld1rqw z6.s, p6/z, [a_ptr6, #0x50]\n"
+ "fmla z31.s, z19.s, z7.s[3]\n"
+ "ld1rqw z7.s, p6/z, [a_ptr7, #0x50]\n"
+ "fmla z24.s, z20.s, z0.s[0]\n"
+ "fmla z25.s, z20.s, z1.s[0]\n"
+ "fmla z26.s, z20.s, z2.s[0]\n"
+ "fmla z27.s, z20.s, z3.s[0]\n"
+ "fmla z28.s, z20.s, z4.s[0]\n"
+ "fmla z29.s, z20.s, z5.s[0]\n"
+ "fmla z30.s, z20.s, z6.s[0]\n"
+ "fmla z31.s, z20.s, z7.s[0]\n"
+ "fmla z24.s, z21.s, z0.s[1]\n"
+ "fmla z25.s, z21.s, z1.s[1]\n"
+ "fmla z26.s, z21.s, z2.s[1]\n"
+ "fmla z27.s, z21.s, z3.s[1]\n"
+ "fmla z28.s, z21.s, z4.s[1]\n"
+ "fmla z29.s, z21.s, z5.s[1]\n"
+ "fmla z30.s, z21.s, z6.s[1]\n"
+ "fmla z31.s, z21.s, z7.s[1]\n"
+ "fmla z24.s, z22.s, z0.s[2]\n"
+ "fmla z25.s, z22.s, z1.s[2]\n"
+ "fmla z26.s, z22.s, z2.s[2]\n"
+ "fmla z27.s, z22.s, z3.s[2]\n"
+ "fmla z28.s, z22.s, z4.s[2]\n"
+ "fmla z29.s, z22.s, z5.s[2]\n"
+ "fmla z30.s, z22.s, z6.s[2]\n"
+ "fmla z31.s, z22.s, z7.s[2]\n"
+ "fmla z24.s, z23.s, z0.s[3]\n"
+ "fmla z25.s, z23.s, z1.s[3]\n"
+ "fmla z26.s, z23.s, z2.s[3]\n"
+ "fmla z27.s, z23.s, z3.s[3]\n"
+ "fmla z28.s, z23.s, z4.s[3]\n"
+ "fmla z29.s, z23.s, z5.s[3]\n"
+ "fmla z30.s, z23.s, z6.s[3]\n"
+ "fmla z31.s, z23.s, z7.s[3]\n"
+ "5:\n"
+ "ld1rw z22.s, p7/z, [%[minptr]]\n"
+ "ld1rw z23.s, p7/z, [%[maxptr]]\n"
+ "fmax z24.s, p7/m, z24.s, z22.s\n"
+ "fmax z25.s, p7/m, z25.s, z22.s\n"
+ "fmax z26.s, p7/m, z26.s, z22.s\n"
+ "fmax z27.s, p7/m, z27.s, z22.s\n"
+ "fmin z24.s, p7/m, z24.s, z23.s\n"
+ "fmin z25.s, p7/m, z25.s, z23.s\n"
+ "fmin z26.s, p7/m, z26.s, z23.s\n"
+ "fmin z27.s, p7/m, z27.s, z23.s\n"
+ "st1w z24.s, p0, [%[c_ptr0]]\n"
+ "fmax z28.s, p7/m, z28.s, z22.s\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "fmax z29.s, p7/m, z29.s, z22.s\n"
+ "st1w z25.s, p0, [c_ptr1]\n"
+ "fmax z30.s, p7/m, z30.s, z22.s\n"
+ "fmin z28.s, p7/m, z28.s, z23.s\n"
+ "fmax z31.s, p7/m, z31.s, z22.s\n"
+ "st1w z26.s, p0, [c_ptr2]\n"
+ "fmin z29.s, p7/m, z29.s, z23.s\n"
+ "fmin z30.s, p7/m, z30.s, z23.s\n"
+ "fmin z31.s, p7/m, z31.s, z23.s\n"
+ "st1w z27.s, p0, [c_ptr3]\n"
+ "st1w z28.s, p0, [c_ptr4]\n"
+ "st1w z29.s, p0, [c_ptr5]\n"
+ "st1w z30.s, p0, [c_ptr6]\n"
+ "st1w z31.s, p0, [c_ptr7]\n"
+ ".unreq a_ptr1\n"
+ ".unreq a_ptr2\n"
+ ".unreq a_ptr3\n"
+ ".unreq a_ptr4\n"
+ ".unreq a_ptr5\n"
+ ".unreq a_ptr6\n"
+ ".unreq a_ptr7\n"
+ ".unreq c_ptr1\n"
+ ".unreq c_ptr2\n"
+ ".unreq c_ptr3\n"
+ ".unreq c_ptr4\n"
+ ".unreq c_ptr5\n"
+ ".unreq c_ptr6\n"
+ ".unreq c_ptr7\n"
+ : [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [oob_rows] "+r" (oob_rows), [temp] "+r" (temp), [biasptr] "+r" (biasptr)
+ : [lda] "r" (ldab), [ldc] "r" (ldcb), [odd_depth] "r" (odd_depth), [last_width] "r" (last_width), [biasinc] "r" (biasinc), [minptr] "r" (minptr), [maxptr] "r" (maxptr)
+ : "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "cc", "memory"
+ );
+ break;
+ }
+ }
+}
+
+} // namespace arm_gemm
+
+#endif // __ARM_FEATURE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_smallK_hybrid_s8s32_dot_8x1VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_smallK_hybrid_s8s32_dot_8x1VL.hpp
new file mode 100644
index 0000000000..e735567e95
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_smallK_hybrid_s8s32_dot_8x1VL.hpp
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2019-2020 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+
+#ifdef ARM_COMPUTE_ENABLE_SVE
+
+#include <cstdint>
+
+namespace arm_gemm
+{
+
+// Actual kernel implementations
+void sve_smallK_hybrid_s8s32_dot_8x1VL(const int8_t *, int, const int8_t *, int32_t *, int, int, int, int, const int32_t *, Activation, bool);
+
+class cls_sve_smallK_hybrid_s8s32_dot_8x1VL
+{
+public:
+ typedef int8_t operand_type;
+ typedef int32_t result_type;
+
+ typedef void (*kern_type)(const int8_t *, int, const int8_t *, int32_t *, int, int, int, int, const int32_t *, Activation, bool);
+
+ /* Kernel blocking parameters */
+ static constexpr unsigned int out_height()
+ {
+ return 8;
+ }
+
+ static unsigned int out_width()
+ {
+ return get_vector_length<int32_t>() * 1;
+ }
+
+ static constexpr unsigned int k_unroll()
+ {
+ return 4;
+ }
+
+ static constexpr bool supports_accumulate()
+ {
+ return false;
+ }
+
+ static constexpr bool supports_bias()
+ {
+ return false;
+ }
+
+ static constexpr bool supports_activation()
+ {
+ return false;
+ }
+
+ StdTransformsSVE<operand_type, result_type, 8, 1, 4> transforms = {};
+
+ // Default to the generic kernel
+ kern_type kernel=sve_smallK_hybrid_s8s32_dot_8x1VL;
+
+ cls_sve_smallK_hybrid_s8s32_dot_8x1VL(const CPUInfo *)
+ {
+
+ }
+};
+
+} // namespace arm_gemm
+
+#endif // ARM_COMPUTE_ENABLE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_smallK_hybrid_s8s32_dot_8x1VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_smallK_hybrid_s8s32_dot_8x1VL/generic.cpp
new file mode 100644
index 0000000000..489b381624
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_smallK_hybrid_s8s32_dot_8x1VL/generic.cpp
@@ -0,0 +1,8747 @@
+/*
+ * Copyright (c) 2019-2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifdef ARM_COMPUTE_ENABLE_SVE
+
+#include <algorithm>
+
+#include "arm_gemm.hpp"
+
+#include <cstdint>
+#include "../../asmlib.hpp"
+#include "../../utils.hpp"
+
+namespace arm_gemm {
+
+void sve_smallK_hybrid_s8s32_dot_8x1VL(const int8_t *A, int lda, const int8_t *B, int32_t *C, int ldc, int M, int N, int K, const int32_t *, Activation, bool) {
+ const long loops_count = iceildiv(N, (int)get_vector_length<int32_t>()) - 1;
+ const long ldab = lda * sizeof(int8_t);
+ const long ldcb = ldc * sizeof(int32_t);
+ const long odd_depth = (K % 16) ? (K % 16) : 16;
+ const long last_width = N - (loops_count * get_vector_length<int32_t>());
+ const long odds_count = K % 4;
+ K = (K + 3) / 4;
+
+ for (int y0=0; y0<M; y0+=8) {
+ long loops = loops_count;
+ long oob_rows = std::max(8 - (M-y0), 0);
+ long odds = odds_count;
+ long temp = 0;
+ const int8_t *b_ptr0 = B;
+ const int8_t *a_ptr0 = A + (y0 * lda);
+
+ int32_t *c_ptr0 = C + (y0 * ldc);
+
+ switch(K) {
+ case 1:
+ __asm __volatile (
+ "a_ptr1 .req X0\n"
+ "a_ptr2 .req X1\n"
+ "a_ptr3 .req X2\n"
+ "a_ptr4 .req X3\n"
+ "a_ptr5 .req X4\n"
+ "a_ptr6 .req X5\n"
+ "a_ptr7 .req X6\n"
+ "c_ptr1 .req X7\n"
+ "c_ptr2 .req X8\n"
+ "c_ptr3 .req X9\n"
+ "c_ptr4 .req X10\n"
+ "c_ptr5 .req X11\n"
+ "c_ptr6 .req X12\n"
+ "c_ptr7 .req X13\n"
+ "add a_ptr1, %[a_ptr0], %[lda]\n"
+ "add c_ptr1, %[c_ptr0], %[ldc]\n"
+ "add a_ptr2, a_ptr1, %[lda]\n"
+ "add c_ptr2, c_ptr1, %[ldc]\n"
+ "add a_ptr3, a_ptr2, %[lda]\n"
+ "add c_ptr3, c_ptr2, %[ldc]\n"
+ "add a_ptr4, a_ptr3, %[lda]\n"
+ "add c_ptr4, c_ptr3, %[ldc]\n"
+ "add a_ptr5, a_ptr4, %[lda]\n"
+ "add c_ptr5, c_ptr4, %[ldc]\n"
+ "add a_ptr6, a_ptr5, %[lda]\n"
+ "add c_ptr6, c_ptr5, %[ldc]\n"
+ "add a_ptr7, a_ptr6, %[lda]\n"
+ "add c_ptr7, c_ptr6, %[ldc]\n"
+ "cbz %[oob_rows], 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr7, %[c_ptr0], #0x0\n"
+ "add a_ptr7, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr6, %[c_ptr0], #0x0\n"
+ "add a_ptr6, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr5, %[c_ptr0], #0x0\n"
+ "add a_ptr5, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr4, %[c_ptr0], #0x0\n"
+ "add a_ptr4, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr3, %[c_ptr0], #0x0\n"
+ "add a_ptr3, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr2, %[c_ptr0], #0x0\n"
+ "add a_ptr2, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr1, %[c_ptr0], #0x0\n"
+ "add a_ptr1, %[a_ptr0], #0x0\n"
+ "1:\n"
+ "ptrue p7.b\n"
+ "whilelt p6.b, %[temp], %[odd_depth]\n"
+ "whilelt p0.s, %[temp], %[last_width]\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #1\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0]]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7]\n"
+ "cbz %[loops], 2f\n"
+ "mov z24.s, #0\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "mov z25.s, #0\n"
+ "mov z26.s, #0\n"
+ "mov z27.s, #0\n"
+ "mov z28.s, #0\n"
+ "mov z29.s, #0\n"
+ "mov z30.s, #0\n"
+ "mov z31.s, #0\n"
+ "sdot z24.s, z16.b, z0.b[0]\n"
+ "sdot z25.s, z16.b, z1.b[0]\n"
+ "sdot z26.s, z16.b, z2.b[0]\n"
+ "sdot z27.s, z16.b, z3.b[0]\n"
+ "sdot z28.s, z16.b, z4.b[0]\n"
+ "sdot z29.s, z16.b, z5.b[0]\n"
+ "sdot z30.s, z16.b, z6.b[0]\n"
+ "sdot z31.s, z16.b, z7.b[0]\n"
+ "b.eq 3f\n"
+ "4:\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "mov z24.s, #0\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "mov z25.s, #0\n"
+ "addvl %[b_ptr0], %[b_ptr0], #1\n"
+ "sdot z24.s, z16.b, z0.b[0]\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "mov z26.s, #0\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "sdot z25.s, z16.b, z1.b[0]\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "mov z27.s, #0\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "sdot z26.s, z16.b, z2.b[0]\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "mov z28.s, #0\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "sdot z27.s, z16.b, z3.b[0]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "mov z29.s, #0\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "sdot z28.s, z16.b, z4.b[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.s, #0\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "sdot z29.s, z16.b, z5.b[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.s, #0\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "sdot z30.s, z16.b, z6.b[0]\n"
+ "sdot z31.s, z16.b, z7.b[0]\n"
+ "b.ne 4b\n"
+ "3:\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "mov z24.s, #0\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #1\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "mov z25.s, #0\n"
+ "sdot z24.s, z16.b, z0.b[0]\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "mov z26.s, #0\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "sdot z25.s, z16.b, z1.b[0]\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "mov z27.s, #0\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "sdot z26.s, z16.b, z2.b[0]\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "mov z28.s, #0\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "sdot z27.s, z16.b, z3.b[0]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "mov z29.s, #0\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "sdot z28.s, z16.b, z4.b[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.s, #0\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "sdot z29.s, z16.b, z5.b[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.s, #0\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "sdot z30.s, z16.b, z6.b[0]\n"
+ "sdot z31.s, z16.b, z7.b[0]\n"
+ "b 5f\n"
+ "2:\n"
+ "mov z24.s, #0\n"
+ "mov z25.s, #0\n"
+ "mov z26.s, #0\n"
+ "mov z27.s, #0\n"
+ "mov z28.s, #0\n"
+ "mov z29.s, #0\n"
+ "mov z30.s, #0\n"
+ "mov z31.s, #0\n"
+ "sdot z24.s, z16.b, z0.b[0]\n"
+ "sdot z25.s, z16.b, z1.b[0]\n"
+ "sdot z26.s, z16.b, z2.b[0]\n"
+ "sdot z27.s, z16.b, z3.b[0]\n"
+ "sdot z28.s, z16.b, z4.b[0]\n"
+ "sdot z29.s, z16.b, z5.b[0]\n"
+ "sdot z30.s, z16.b, z6.b[0]\n"
+ "sdot z31.s, z16.b, z7.b[0]\n"
+ "5:\n"
+ "st1w z24.s, p0, [%[c_ptr0]]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "st1w z25.s, p0, [c_ptr1]\n"
+ "st1w z26.s, p0, [c_ptr2]\n"
+ "st1w z27.s, p0, [c_ptr3]\n"
+ "st1w z28.s, p0, [c_ptr4]\n"
+ "st1w z29.s, p0, [c_ptr5]\n"
+ "st1w z30.s, p0, [c_ptr6]\n"
+ "st1w z31.s, p0, [c_ptr7]\n"
+ ".unreq a_ptr1\n"
+ ".unreq a_ptr2\n"
+ ".unreq a_ptr3\n"
+ ".unreq a_ptr4\n"
+ ".unreq a_ptr5\n"
+ ".unreq a_ptr6\n"
+ ".unreq a_ptr7\n"
+ ".unreq c_ptr1\n"
+ ".unreq c_ptr2\n"
+ ".unreq c_ptr3\n"
+ ".unreq c_ptr4\n"
+ ".unreq c_ptr5\n"
+ ".unreq c_ptr6\n"
+ ".unreq c_ptr7\n"
+ : [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [oob_rows] "+r" (oob_rows), [temp] "+r" (temp), [odds] "+r" (odds)
+ : [lda] "r" (ldab), [ldc] "r" (ldcb), [odd_depth] "r" (odd_depth), [last_width] "r" (last_width)
+ : "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "cc", "memory"
+ );
+ break;
+ case 2:
+ __asm __volatile (
+ "a_ptr1 .req X0\n"
+ "a_ptr2 .req X1\n"
+ "a_ptr3 .req X2\n"
+ "a_ptr4 .req X3\n"
+ "a_ptr5 .req X4\n"
+ "a_ptr6 .req X5\n"
+ "a_ptr7 .req X6\n"
+ "c_ptr1 .req X7\n"
+ "c_ptr2 .req X8\n"
+ "c_ptr3 .req X9\n"
+ "c_ptr4 .req X10\n"
+ "c_ptr5 .req X11\n"
+ "c_ptr6 .req X12\n"
+ "c_ptr7 .req X13\n"
+ "add a_ptr1, %[a_ptr0], %[lda]\n"
+ "add c_ptr1, %[c_ptr0], %[ldc]\n"
+ "add a_ptr2, a_ptr1, %[lda]\n"
+ "add c_ptr2, c_ptr1, %[ldc]\n"
+ "add a_ptr3, a_ptr2, %[lda]\n"
+ "add c_ptr3, c_ptr2, %[ldc]\n"
+ "add a_ptr4, a_ptr3, %[lda]\n"
+ "add c_ptr4, c_ptr3, %[ldc]\n"
+ "add a_ptr5, a_ptr4, %[lda]\n"
+ "add c_ptr5, c_ptr4, %[ldc]\n"
+ "add a_ptr6, a_ptr5, %[lda]\n"
+ "add c_ptr6, c_ptr5, %[ldc]\n"
+ "add a_ptr7, a_ptr6, %[lda]\n"
+ "add c_ptr7, c_ptr6, %[ldc]\n"
+ "cbz %[oob_rows], 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr7, %[c_ptr0], #0x0\n"
+ "add a_ptr7, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr6, %[c_ptr0], #0x0\n"
+ "add a_ptr6, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr5, %[c_ptr0], #0x0\n"
+ "add a_ptr5, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr4, %[c_ptr0], #0x0\n"
+ "add a_ptr4, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr3, %[c_ptr0], #0x0\n"
+ "add a_ptr3, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr2, %[c_ptr0], #0x0\n"
+ "add a_ptr2, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr1, %[c_ptr0], #0x0\n"
+ "add a_ptr1, %[a_ptr0], #0x0\n"
+ "1:\n"
+ "ptrue p7.b\n"
+ "whilelt p6.b, %[temp], %[odd_depth]\n"
+ "whilelt p0.s, %[temp], %[last_width]\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0]]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #2\n"
+ "cbz %[loops], 2f\n"
+ "mov z24.s, #0\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "mov z25.s, #0\n"
+ "mov z26.s, #0\n"
+ "mov z27.s, #0\n"
+ "mov z28.s, #0\n"
+ "mov z29.s, #0\n"
+ "mov z30.s, #0\n"
+ "mov z31.s, #0\n"
+ "sdot z24.s, z16.b, z0.b[0]\n"
+ "sdot z25.s, z16.b, z1.b[0]\n"
+ "sdot z26.s, z16.b, z2.b[0]\n"
+ "sdot z27.s, z16.b, z3.b[0]\n"
+ "sdot z28.s, z16.b, z4.b[0]\n"
+ "sdot z29.s, z16.b, z5.b[0]\n"
+ "sdot z30.s, z16.b, z6.b[0]\n"
+ "sdot z31.s, z16.b, z7.b[0]\n"
+ "sdot z24.s, z17.b, z0.b[1]\n"
+ "sdot z25.s, z17.b, z1.b[1]\n"
+ "sdot z26.s, z17.b, z2.b[1]\n"
+ "sdot z27.s, z17.b, z3.b[1]\n"
+ "sdot z28.s, z17.b, z4.b[1]\n"
+ "sdot z29.s, z17.b, z5.b[1]\n"
+ "sdot z30.s, z17.b, z6.b[1]\n"
+ "sdot z31.s, z17.b, z7.b[1]\n"
+ "b.eq 3f\n"
+ "4:\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "mov z24.s, #0\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "mov z25.s, #0\n"
+ "addvl %[b_ptr0], %[b_ptr0], #2\n"
+ "sdot z24.s, z16.b, z0.b[0]\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "mov z26.s, #0\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "sdot z25.s, z16.b, z1.b[0]\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "mov z27.s, #0\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "sdot z26.s, z16.b, z2.b[0]\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "mov z28.s, #0\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "sdot z27.s, z16.b, z3.b[0]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "mov z29.s, #0\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "sdot z28.s, z16.b, z4.b[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.s, #0\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "sdot z29.s, z16.b, z5.b[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.s, #0\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "sdot z30.s, z16.b, z6.b[0]\n"
+ "sdot z24.s, z17.b, z0.b[1]\n"
+ "sdot z31.s, z16.b, z7.b[0]\n"
+ "sdot z25.s, z17.b, z1.b[1]\n"
+ "sdot z26.s, z17.b, z2.b[1]\n"
+ "sdot z27.s, z17.b, z3.b[1]\n"
+ "sdot z28.s, z17.b, z4.b[1]\n"
+ "sdot z29.s, z17.b, z5.b[1]\n"
+ "sdot z30.s, z17.b, z6.b[1]\n"
+ "sdot z31.s, z17.b, z7.b[1]\n"
+ "b.ne 4b\n"
+ "3:\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "mov z24.s, #0\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #2\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "mov z25.s, #0\n"
+ "sdot z24.s, z16.b, z0.b[0]\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "mov z26.s, #0\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "sdot z25.s, z16.b, z1.b[0]\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "mov z27.s, #0\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "sdot z26.s, z16.b, z2.b[0]\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "mov z28.s, #0\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "sdot z27.s, z16.b, z3.b[0]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "mov z29.s, #0\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "sdot z28.s, z16.b, z4.b[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.s, #0\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "sdot z29.s, z16.b, z5.b[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.s, #0\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "sdot z30.s, z16.b, z6.b[0]\n"
+ "sdot z24.s, z17.b, z0.b[1]\n"
+ "sdot z31.s, z16.b, z7.b[0]\n"
+ "sdot z25.s, z17.b, z1.b[1]\n"
+ "sdot z26.s, z17.b, z2.b[1]\n"
+ "sdot z27.s, z17.b, z3.b[1]\n"
+ "sdot z28.s, z17.b, z4.b[1]\n"
+ "sdot z29.s, z17.b, z5.b[1]\n"
+ "sdot z30.s, z17.b, z6.b[1]\n"
+ "sdot z31.s, z17.b, z7.b[1]\n"
+ "b 5f\n"
+ "2:\n"
+ "mov z24.s, #0\n"
+ "mov z25.s, #0\n"
+ "mov z26.s, #0\n"
+ "mov z27.s, #0\n"
+ "mov z28.s, #0\n"
+ "mov z29.s, #0\n"
+ "mov z30.s, #0\n"
+ "mov z31.s, #0\n"
+ "sdot z24.s, z16.b, z0.b[0]\n"
+ "sdot z25.s, z16.b, z1.b[0]\n"
+ "sdot z26.s, z16.b, z2.b[0]\n"
+ "sdot z27.s, z16.b, z3.b[0]\n"
+ "sdot z28.s, z16.b, z4.b[0]\n"
+ "sdot z29.s, z16.b, z5.b[0]\n"
+ "sdot z30.s, z16.b, z6.b[0]\n"
+ "sdot z31.s, z16.b, z7.b[0]\n"
+ "sdot z24.s, z17.b, z0.b[1]\n"
+ "sdot z25.s, z17.b, z1.b[1]\n"
+ "sdot z26.s, z17.b, z2.b[1]\n"
+ "sdot z27.s, z17.b, z3.b[1]\n"
+ "sdot z28.s, z17.b, z4.b[1]\n"
+ "sdot z29.s, z17.b, z5.b[1]\n"
+ "sdot z30.s, z17.b, z6.b[1]\n"
+ "sdot z31.s, z17.b, z7.b[1]\n"
+ "5:\n"
+ "st1w z24.s, p0, [%[c_ptr0]]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "st1w z25.s, p0, [c_ptr1]\n"
+ "st1w z26.s, p0, [c_ptr2]\n"
+ "st1w z27.s, p0, [c_ptr3]\n"
+ "st1w z28.s, p0, [c_ptr4]\n"
+ "st1w z29.s, p0, [c_ptr5]\n"
+ "st1w z30.s, p0, [c_ptr6]\n"
+ "st1w z31.s, p0, [c_ptr7]\n"
+ ".unreq a_ptr1\n"
+ ".unreq a_ptr2\n"
+ ".unreq a_ptr3\n"
+ ".unreq a_ptr4\n"
+ ".unreq a_ptr5\n"
+ ".unreq a_ptr6\n"
+ ".unreq a_ptr7\n"
+ ".unreq c_ptr1\n"
+ ".unreq c_ptr2\n"
+ ".unreq c_ptr3\n"
+ ".unreq c_ptr4\n"
+ ".unreq c_ptr5\n"
+ ".unreq c_ptr6\n"
+ ".unreq c_ptr7\n"
+ : [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [oob_rows] "+r" (oob_rows), [temp] "+r" (temp), [odds] "+r" (odds)
+ : [lda] "r" (ldab), [ldc] "r" (ldcb), [odd_depth] "r" (odd_depth), [last_width] "r" (last_width)
+ : "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "cc", "memory"
+ );
+ break;
+ case 3:
+ __asm __volatile (
+ "a_ptr1 .req X0\n"
+ "a_ptr2 .req X1\n"
+ "a_ptr3 .req X2\n"
+ "a_ptr4 .req X3\n"
+ "a_ptr5 .req X4\n"
+ "a_ptr6 .req X5\n"
+ "a_ptr7 .req X6\n"
+ "c_ptr1 .req X7\n"
+ "c_ptr2 .req X8\n"
+ "c_ptr3 .req X9\n"
+ "c_ptr4 .req X10\n"
+ "c_ptr5 .req X11\n"
+ "c_ptr6 .req X12\n"
+ "c_ptr7 .req X13\n"
+ "add a_ptr1, %[a_ptr0], %[lda]\n"
+ "add c_ptr1, %[c_ptr0], %[ldc]\n"
+ "add a_ptr2, a_ptr1, %[lda]\n"
+ "add c_ptr2, c_ptr1, %[ldc]\n"
+ "add a_ptr3, a_ptr2, %[lda]\n"
+ "add c_ptr3, c_ptr2, %[ldc]\n"
+ "add a_ptr4, a_ptr3, %[lda]\n"
+ "add c_ptr4, c_ptr3, %[ldc]\n"
+ "add a_ptr5, a_ptr4, %[lda]\n"
+ "add c_ptr5, c_ptr4, %[ldc]\n"
+ "add a_ptr6, a_ptr5, %[lda]\n"
+ "add c_ptr6, c_ptr5, %[ldc]\n"
+ "add a_ptr7, a_ptr6, %[lda]\n"
+ "add c_ptr7, c_ptr6, %[ldc]\n"
+ "cbz %[oob_rows], 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr7, %[c_ptr0], #0x0\n"
+ "add a_ptr7, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr6, %[c_ptr0], #0x0\n"
+ "add a_ptr6, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr5, %[c_ptr0], #0x0\n"
+ "add a_ptr5, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr4, %[c_ptr0], #0x0\n"
+ "add a_ptr4, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr3, %[c_ptr0], #0x0\n"
+ "add a_ptr3, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr2, %[c_ptr0], #0x0\n"
+ "add a_ptr2, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr1, %[c_ptr0], #0x0\n"
+ "add a_ptr1, %[a_ptr0], #0x0\n"
+ "1:\n"
+ "ptrue p7.b\n"
+ "whilelt p6.b, %[temp], %[odd_depth]\n"
+ "whilelt p0.s, %[temp], %[last_width]\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0]]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #3\n"
+ "cbz %[loops], 2f\n"
+ "mov z24.s, #0\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "mov z25.s, #0\n"
+ "mov z26.s, #0\n"
+ "mov z27.s, #0\n"
+ "mov z28.s, #0\n"
+ "mov z29.s, #0\n"
+ "mov z30.s, #0\n"
+ "mov z31.s, #0\n"
+ "sdot z24.s, z16.b, z0.b[0]\n"
+ "sdot z25.s, z16.b, z1.b[0]\n"
+ "sdot z26.s, z16.b, z2.b[0]\n"
+ "sdot z27.s, z16.b, z3.b[0]\n"
+ "sdot z28.s, z16.b, z4.b[0]\n"
+ "sdot z29.s, z16.b, z5.b[0]\n"
+ "sdot z30.s, z16.b, z6.b[0]\n"
+ "sdot z31.s, z16.b, z7.b[0]\n"
+ "sdot z24.s, z17.b, z0.b[1]\n"
+ "sdot z25.s, z17.b, z1.b[1]\n"
+ "sdot z26.s, z17.b, z2.b[1]\n"
+ "sdot z27.s, z17.b, z3.b[1]\n"
+ "sdot z28.s, z17.b, z4.b[1]\n"
+ "sdot z29.s, z17.b, z5.b[1]\n"
+ "sdot z30.s, z17.b, z6.b[1]\n"
+ "sdot z31.s, z17.b, z7.b[1]\n"
+ "sdot z24.s, z18.b, z0.b[2]\n"
+ "sdot z25.s, z18.b, z1.b[2]\n"
+ "sdot z26.s, z18.b, z2.b[2]\n"
+ "sdot z27.s, z18.b, z3.b[2]\n"
+ "sdot z28.s, z18.b, z4.b[2]\n"
+ "sdot z29.s, z18.b, z5.b[2]\n"
+ "sdot z30.s, z18.b, z6.b[2]\n"
+ "sdot z31.s, z18.b, z7.b[2]\n"
+ "b.eq 3f\n"
+ "4:\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "mov z24.s, #0\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "mov z25.s, #0\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "sdot z24.s, z16.b, z0.b[0]\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "mov z26.s, #0\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "sdot z25.s, z16.b, z1.b[0]\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "mov z27.s, #0\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "sdot z26.s, z16.b, z2.b[0]\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "mov z28.s, #0\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "sdot z27.s, z16.b, z3.b[0]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "mov z29.s, #0\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "sdot z28.s, z16.b, z4.b[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.s, #0\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "sdot z29.s, z16.b, z5.b[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.s, #0\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "sdot z30.s, z16.b, z6.b[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #3\n"
+ "sdot z31.s, z16.b, z7.b[0]\n"
+ "sdot z24.s, z17.b, z0.b[1]\n"
+ "sdot z25.s, z17.b, z1.b[1]\n"
+ "sdot z26.s, z17.b, z2.b[1]\n"
+ "sdot z27.s, z17.b, z3.b[1]\n"
+ "sdot z28.s, z17.b, z4.b[1]\n"
+ "sdot z29.s, z17.b, z5.b[1]\n"
+ "sdot z30.s, z17.b, z6.b[1]\n"
+ "sdot z31.s, z17.b, z7.b[1]\n"
+ "sdot z24.s, z18.b, z0.b[2]\n"
+ "sdot z25.s, z18.b, z1.b[2]\n"
+ "sdot z26.s, z18.b, z2.b[2]\n"
+ "sdot z27.s, z18.b, z3.b[2]\n"
+ "sdot z28.s, z18.b, z4.b[2]\n"
+ "sdot z29.s, z18.b, z5.b[2]\n"
+ "sdot z30.s, z18.b, z6.b[2]\n"
+ "sdot z31.s, z18.b, z7.b[2]\n"
+ "b.ne 4b\n"
+ "3:\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "mov z24.s, #0\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "mov z25.s, #0\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "sdot z24.s, z16.b, z0.b[0]\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "mov z26.s, #0\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "sdot z25.s, z16.b, z1.b[0]\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "mov z27.s, #0\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "sdot z26.s, z16.b, z2.b[0]\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "mov z28.s, #0\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "sdot z27.s, z16.b, z3.b[0]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "mov z29.s, #0\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "sdot z28.s, z16.b, z4.b[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.s, #0\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "sdot z29.s, z16.b, z5.b[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.s, #0\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "sdot z30.s, z16.b, z6.b[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #3\n"
+ "sdot z31.s, z16.b, z7.b[0]\n"
+ "sdot z24.s, z17.b, z0.b[1]\n"
+ "sdot z25.s, z17.b, z1.b[1]\n"
+ "sdot z26.s, z17.b, z2.b[1]\n"
+ "sdot z27.s, z17.b, z3.b[1]\n"
+ "sdot z28.s, z17.b, z4.b[1]\n"
+ "sdot z29.s, z17.b, z5.b[1]\n"
+ "sdot z30.s, z17.b, z6.b[1]\n"
+ "sdot z31.s, z17.b, z7.b[1]\n"
+ "sdot z24.s, z18.b, z0.b[2]\n"
+ "sdot z25.s, z18.b, z1.b[2]\n"
+ "sdot z26.s, z18.b, z2.b[2]\n"
+ "sdot z27.s, z18.b, z3.b[2]\n"
+ "sdot z28.s, z18.b, z4.b[2]\n"
+ "sdot z29.s, z18.b, z5.b[2]\n"
+ "sdot z30.s, z18.b, z6.b[2]\n"
+ "sdot z31.s, z18.b, z7.b[2]\n"
+ "b 5f\n"
+ "2:\n"
+ "mov z24.s, #0\n"
+ "mov z25.s, #0\n"
+ "mov z26.s, #0\n"
+ "mov z27.s, #0\n"
+ "mov z28.s, #0\n"
+ "mov z29.s, #0\n"
+ "mov z30.s, #0\n"
+ "mov z31.s, #0\n"
+ "sdot z24.s, z16.b, z0.b[0]\n"
+ "sdot z25.s, z16.b, z1.b[0]\n"
+ "sdot z26.s, z16.b, z2.b[0]\n"
+ "sdot z27.s, z16.b, z3.b[0]\n"
+ "sdot z28.s, z16.b, z4.b[0]\n"
+ "sdot z29.s, z16.b, z5.b[0]\n"
+ "sdot z30.s, z16.b, z6.b[0]\n"
+ "sdot z31.s, z16.b, z7.b[0]\n"
+ "sdot z24.s, z17.b, z0.b[1]\n"
+ "sdot z25.s, z17.b, z1.b[1]\n"
+ "sdot z26.s, z17.b, z2.b[1]\n"
+ "sdot z27.s, z17.b, z3.b[1]\n"
+ "sdot z28.s, z17.b, z4.b[1]\n"
+ "sdot z29.s, z17.b, z5.b[1]\n"
+ "sdot z30.s, z17.b, z6.b[1]\n"
+ "sdot z31.s, z17.b, z7.b[1]\n"
+ "sdot z24.s, z18.b, z0.b[2]\n"
+ "sdot z25.s, z18.b, z1.b[2]\n"
+ "sdot z26.s, z18.b, z2.b[2]\n"
+ "sdot z27.s, z18.b, z3.b[2]\n"
+ "sdot z28.s, z18.b, z4.b[2]\n"
+ "sdot z29.s, z18.b, z5.b[2]\n"
+ "sdot z30.s, z18.b, z6.b[2]\n"
+ "sdot z31.s, z18.b, z7.b[2]\n"
+ "5:\n"
+ "st1w z24.s, p0, [%[c_ptr0]]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "st1w z25.s, p0, [c_ptr1]\n"
+ "st1w z26.s, p0, [c_ptr2]\n"
+ "st1w z27.s, p0, [c_ptr3]\n"
+ "st1w z28.s, p0, [c_ptr4]\n"
+ "st1w z29.s, p0, [c_ptr5]\n"
+ "st1w z30.s, p0, [c_ptr6]\n"
+ "st1w z31.s, p0, [c_ptr7]\n"
+ ".unreq a_ptr1\n"
+ ".unreq a_ptr2\n"
+ ".unreq a_ptr3\n"
+ ".unreq a_ptr4\n"
+ ".unreq a_ptr5\n"
+ ".unreq a_ptr6\n"
+ ".unreq a_ptr7\n"
+ ".unreq c_ptr1\n"
+ ".unreq c_ptr2\n"
+ ".unreq c_ptr3\n"
+ ".unreq c_ptr4\n"
+ ".unreq c_ptr5\n"
+ ".unreq c_ptr6\n"
+ ".unreq c_ptr7\n"
+ : [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [oob_rows] "+r" (oob_rows), [temp] "+r" (temp), [odds] "+r" (odds)
+ : [lda] "r" (ldab), [ldc] "r" (ldcb), [odd_depth] "r" (odd_depth), [last_width] "r" (last_width)
+ : "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "cc", "memory"
+ );
+ break;
+ case 4:
+ __asm __volatile (
+ "a_ptr1 .req X0\n"
+ "a_ptr2 .req X1\n"
+ "a_ptr3 .req X2\n"
+ "a_ptr4 .req X3\n"
+ "a_ptr5 .req X4\n"
+ "a_ptr6 .req X5\n"
+ "a_ptr7 .req X6\n"
+ "c_ptr1 .req X7\n"
+ "c_ptr2 .req X8\n"
+ "c_ptr3 .req X9\n"
+ "c_ptr4 .req X10\n"
+ "c_ptr5 .req X11\n"
+ "c_ptr6 .req X12\n"
+ "c_ptr7 .req X13\n"
+ "add a_ptr1, %[a_ptr0], %[lda]\n"
+ "add c_ptr1, %[c_ptr0], %[ldc]\n"
+ "add a_ptr2, a_ptr1, %[lda]\n"
+ "add c_ptr2, c_ptr1, %[ldc]\n"
+ "add a_ptr3, a_ptr2, %[lda]\n"
+ "add c_ptr3, c_ptr2, %[ldc]\n"
+ "add a_ptr4, a_ptr3, %[lda]\n"
+ "add c_ptr4, c_ptr3, %[ldc]\n"
+ "add a_ptr5, a_ptr4, %[lda]\n"
+ "add c_ptr5, c_ptr4, %[ldc]\n"
+ "add a_ptr6, a_ptr5, %[lda]\n"
+ "add c_ptr6, c_ptr5, %[ldc]\n"
+ "add a_ptr7, a_ptr6, %[lda]\n"
+ "add c_ptr7, c_ptr6, %[ldc]\n"
+ "cbz %[oob_rows], 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr7, %[c_ptr0], #0x0\n"
+ "add a_ptr7, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr6, %[c_ptr0], #0x0\n"
+ "add a_ptr6, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr5, %[c_ptr0], #0x0\n"
+ "add a_ptr5, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr4, %[c_ptr0], #0x0\n"
+ "add a_ptr4, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr3, %[c_ptr0], #0x0\n"
+ "add a_ptr3, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr2, %[c_ptr0], #0x0\n"
+ "add a_ptr2, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr1, %[c_ptr0], #0x0\n"
+ "add a_ptr1, %[a_ptr0], #0x0\n"
+ "1:\n"
+ "ptrue p7.b\n"
+ "whilelt p6.b, %[temp], %[odd_depth]\n"
+ "whilelt p0.s, %[temp], %[last_width]\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0]]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #4\n"
+ "cbz %[loops], 2f\n"
+ "mov z24.s, #0\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "mov z25.s, #0\n"
+ "mov z26.s, #0\n"
+ "mov z27.s, #0\n"
+ "mov z28.s, #0\n"
+ "mov z29.s, #0\n"
+ "mov z30.s, #0\n"
+ "mov z31.s, #0\n"
+ "sdot z24.s, z16.b, z0.b[0]\n"
+ "sdot z25.s, z16.b, z1.b[0]\n"
+ "sdot z26.s, z16.b, z2.b[0]\n"
+ "sdot z27.s, z16.b, z3.b[0]\n"
+ "sdot z28.s, z16.b, z4.b[0]\n"
+ "sdot z29.s, z16.b, z5.b[0]\n"
+ "sdot z30.s, z16.b, z6.b[0]\n"
+ "sdot z31.s, z16.b, z7.b[0]\n"
+ "sdot z24.s, z17.b, z0.b[1]\n"
+ "sdot z25.s, z17.b, z1.b[1]\n"
+ "sdot z26.s, z17.b, z2.b[1]\n"
+ "sdot z27.s, z17.b, z3.b[1]\n"
+ "sdot z28.s, z17.b, z4.b[1]\n"
+ "sdot z29.s, z17.b, z5.b[1]\n"
+ "sdot z30.s, z17.b, z6.b[1]\n"
+ "sdot z31.s, z17.b, z7.b[1]\n"
+ "sdot z24.s, z18.b, z0.b[2]\n"
+ "sdot z25.s, z18.b, z1.b[2]\n"
+ "sdot z26.s, z18.b, z2.b[2]\n"
+ "sdot z27.s, z18.b, z3.b[2]\n"
+ "sdot z28.s, z18.b, z4.b[2]\n"
+ "sdot z29.s, z18.b, z5.b[2]\n"
+ "sdot z30.s, z18.b, z6.b[2]\n"
+ "sdot z31.s, z18.b, z7.b[2]\n"
+ "sdot z24.s, z19.b, z0.b[3]\n"
+ "sdot z25.s, z19.b, z1.b[3]\n"
+ "sdot z26.s, z19.b, z2.b[3]\n"
+ "sdot z27.s, z19.b, z3.b[3]\n"
+ "sdot z28.s, z19.b, z4.b[3]\n"
+ "sdot z29.s, z19.b, z5.b[3]\n"
+ "sdot z30.s, z19.b, z6.b[3]\n"
+ "sdot z31.s, z19.b, z7.b[3]\n"
+ "b.eq 3f\n"
+ "4:\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "mov z24.s, #0\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "mov z25.s, #0\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "sdot z24.s, z16.b, z0.b[0]\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "mov z26.s, #0\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "sdot z25.s, z16.b, z1.b[0]\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "mov z27.s, #0\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "sdot z26.s, z16.b, z2.b[0]\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "mov z28.s, #0\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "sdot z27.s, z16.b, z3.b[0]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "mov z29.s, #0\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "sdot z28.s, z16.b, z4.b[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.s, #0\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "sdot z29.s, z16.b, z5.b[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.s, #0\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "sdot z30.s, z16.b, z6.b[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #4\n"
+ "sdot z31.s, z16.b, z7.b[0]\n"
+ "sdot z24.s, z17.b, z0.b[1]\n"
+ "sdot z25.s, z17.b, z1.b[1]\n"
+ "sdot z26.s, z17.b, z2.b[1]\n"
+ "sdot z27.s, z17.b, z3.b[1]\n"
+ "sdot z28.s, z17.b, z4.b[1]\n"
+ "sdot z29.s, z17.b, z5.b[1]\n"
+ "sdot z30.s, z17.b, z6.b[1]\n"
+ "sdot z31.s, z17.b, z7.b[1]\n"
+ "sdot z24.s, z18.b, z0.b[2]\n"
+ "sdot z25.s, z18.b, z1.b[2]\n"
+ "sdot z26.s, z18.b, z2.b[2]\n"
+ "sdot z27.s, z18.b, z3.b[2]\n"
+ "sdot z28.s, z18.b, z4.b[2]\n"
+ "sdot z29.s, z18.b, z5.b[2]\n"
+ "sdot z30.s, z18.b, z6.b[2]\n"
+ "sdot z31.s, z18.b, z7.b[2]\n"
+ "sdot z24.s, z19.b, z0.b[3]\n"
+ "sdot z25.s, z19.b, z1.b[3]\n"
+ "sdot z26.s, z19.b, z2.b[3]\n"
+ "sdot z27.s, z19.b, z3.b[3]\n"
+ "sdot z28.s, z19.b, z4.b[3]\n"
+ "sdot z29.s, z19.b, z5.b[3]\n"
+ "sdot z30.s, z19.b, z6.b[3]\n"
+ "sdot z31.s, z19.b, z7.b[3]\n"
+ "b.ne 4b\n"
+ "3:\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "mov z24.s, #0\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "mov z25.s, #0\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "sdot z24.s, z16.b, z0.b[0]\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "mov z26.s, #0\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "sdot z25.s, z16.b, z1.b[0]\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "mov z27.s, #0\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "sdot z26.s, z16.b, z2.b[0]\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "mov z28.s, #0\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "sdot z27.s, z16.b, z3.b[0]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "mov z29.s, #0\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "sdot z28.s, z16.b, z4.b[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.s, #0\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "sdot z29.s, z16.b, z5.b[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.s, #0\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "sdot z30.s, z16.b, z6.b[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #4\n"
+ "sdot z31.s, z16.b, z7.b[0]\n"
+ "sdot z24.s, z17.b, z0.b[1]\n"
+ "sdot z25.s, z17.b, z1.b[1]\n"
+ "sdot z26.s, z17.b, z2.b[1]\n"
+ "sdot z27.s, z17.b, z3.b[1]\n"
+ "sdot z28.s, z17.b, z4.b[1]\n"
+ "sdot z29.s, z17.b, z5.b[1]\n"
+ "sdot z30.s, z17.b, z6.b[1]\n"
+ "sdot z31.s, z17.b, z7.b[1]\n"
+ "sdot z24.s, z18.b, z0.b[2]\n"
+ "sdot z25.s, z18.b, z1.b[2]\n"
+ "sdot z26.s, z18.b, z2.b[2]\n"
+ "sdot z27.s, z18.b, z3.b[2]\n"
+ "sdot z28.s, z18.b, z4.b[2]\n"
+ "sdot z29.s, z18.b, z5.b[2]\n"
+ "sdot z30.s, z18.b, z6.b[2]\n"
+ "sdot z31.s, z18.b, z7.b[2]\n"
+ "sdot z24.s, z19.b, z0.b[3]\n"
+ "sdot z25.s, z19.b, z1.b[3]\n"
+ "sdot z26.s, z19.b, z2.b[3]\n"
+ "sdot z27.s, z19.b, z3.b[3]\n"
+ "sdot z28.s, z19.b, z4.b[3]\n"
+ "sdot z29.s, z19.b, z5.b[3]\n"
+ "sdot z30.s, z19.b, z6.b[3]\n"
+ "sdot z31.s, z19.b, z7.b[3]\n"
+ "b 5f\n"
+ "2:\n"
+ "mov z24.s, #0\n"
+ "mov z25.s, #0\n"
+ "mov z26.s, #0\n"
+ "mov z27.s, #0\n"
+ "mov z28.s, #0\n"
+ "mov z29.s, #0\n"
+ "mov z30.s, #0\n"
+ "mov z31.s, #0\n"
+ "sdot z24.s, z16.b, z0.b[0]\n"
+ "sdot z25.s, z16.b, z1.b[0]\n"
+ "sdot z26.s, z16.b, z2.b[0]\n"
+ "sdot z27.s, z16.b, z3.b[0]\n"
+ "sdot z28.s, z16.b, z4.b[0]\n"
+ "sdot z29.s, z16.b, z5.b[0]\n"
+ "sdot z30.s, z16.b, z6.b[0]\n"
+ "sdot z31.s, z16.b, z7.b[0]\n"
+ "sdot z24.s, z17.b, z0.b[1]\n"
+ "sdot z25.s, z17.b, z1.b[1]\n"
+ "sdot z26.s, z17.b, z2.b[1]\n"
+ "sdot z27.s, z17.b, z3.b[1]\n"
+ "sdot z28.s, z17.b, z4.b[1]\n"
+ "sdot z29.s, z17.b, z5.b[1]\n"
+ "sdot z30.s, z17.b, z6.b[1]\n"
+ "sdot z31.s, z17.b, z7.b[1]\n"
+ "sdot z24.s, z18.b, z0.b[2]\n"
+ "sdot z25.s, z18.b, z1.b[2]\n"
+ "sdot z26.s, z18.b, z2.b[2]\n"
+ "sdot z27.s, z18.b, z3.b[2]\n"
+ "sdot z28.s, z18.b, z4.b[2]\n"
+ "sdot z29.s, z18.b, z5.b[2]\n"
+ "sdot z30.s, z18.b, z6.b[2]\n"
+ "sdot z31.s, z18.b, z7.b[2]\n"
+ "sdot z24.s, z19.b, z0.b[3]\n"
+ "sdot z25.s, z19.b, z1.b[3]\n"
+ "sdot z26.s, z19.b, z2.b[3]\n"
+ "sdot z27.s, z19.b, z3.b[3]\n"
+ "sdot z28.s, z19.b, z4.b[3]\n"
+ "sdot z29.s, z19.b, z5.b[3]\n"
+ "sdot z30.s, z19.b, z6.b[3]\n"
+ "sdot z31.s, z19.b, z7.b[3]\n"
+ "5:\n"
+ "st1w z24.s, p0, [%[c_ptr0]]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "st1w z25.s, p0, [c_ptr1]\n"
+ "st1w z26.s, p0, [c_ptr2]\n"
+ "st1w z27.s, p0, [c_ptr3]\n"
+ "st1w z28.s, p0, [c_ptr4]\n"
+ "st1w z29.s, p0, [c_ptr5]\n"
+ "st1w z30.s, p0, [c_ptr6]\n"
+ "st1w z31.s, p0, [c_ptr7]\n"
+ ".unreq a_ptr1\n"
+ ".unreq a_ptr2\n"
+ ".unreq a_ptr3\n"
+ ".unreq a_ptr4\n"
+ ".unreq a_ptr5\n"
+ ".unreq a_ptr6\n"
+ ".unreq a_ptr7\n"
+ ".unreq c_ptr1\n"
+ ".unreq c_ptr2\n"
+ ".unreq c_ptr3\n"
+ ".unreq c_ptr4\n"
+ ".unreq c_ptr5\n"
+ ".unreq c_ptr6\n"
+ ".unreq c_ptr7\n"
+ : [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [oob_rows] "+r" (oob_rows), [temp] "+r" (temp), [odds] "+r" (odds)
+ : [lda] "r" (ldab), [ldc] "r" (ldcb), [odd_depth] "r" (odd_depth), [last_width] "r" (last_width)
+ : "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "cc", "memory"
+ );
+ break;
+ case 5:
+ __asm __volatile (
+ "a_ptr1 .req X0\n"
+ "a_ptr2 .req X1\n"
+ "a_ptr3 .req X2\n"
+ "a_ptr4 .req X3\n"
+ "a_ptr5 .req X4\n"
+ "a_ptr6 .req X5\n"
+ "a_ptr7 .req X6\n"
+ "c_ptr1 .req X7\n"
+ "c_ptr2 .req X8\n"
+ "c_ptr3 .req X9\n"
+ "c_ptr4 .req X10\n"
+ "c_ptr5 .req X11\n"
+ "c_ptr6 .req X12\n"
+ "c_ptr7 .req X13\n"
+ "add a_ptr1, %[a_ptr0], %[lda]\n"
+ "add c_ptr1, %[c_ptr0], %[ldc]\n"
+ "add a_ptr2, a_ptr1, %[lda]\n"
+ "add c_ptr2, c_ptr1, %[ldc]\n"
+ "add a_ptr3, a_ptr2, %[lda]\n"
+ "add c_ptr3, c_ptr2, %[ldc]\n"
+ "add a_ptr4, a_ptr3, %[lda]\n"
+ "add c_ptr4, c_ptr3, %[ldc]\n"
+ "add a_ptr5, a_ptr4, %[lda]\n"
+ "add c_ptr5, c_ptr4, %[ldc]\n"
+ "add a_ptr6, a_ptr5, %[lda]\n"
+ "add c_ptr6, c_ptr5, %[ldc]\n"
+ "add a_ptr7, a_ptr6, %[lda]\n"
+ "add c_ptr7, c_ptr6, %[ldc]\n"
+ "cbz %[oob_rows], 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr7, %[c_ptr0], #0x0\n"
+ "add a_ptr7, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr6, %[c_ptr0], #0x0\n"
+ "add a_ptr6, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr5, %[c_ptr0], #0x0\n"
+ "add a_ptr5, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr4, %[c_ptr0], #0x0\n"
+ "add a_ptr4, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr3, %[c_ptr0], #0x0\n"
+ "add a_ptr3, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr2, %[c_ptr0], #0x0\n"
+ "add a_ptr2, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr1, %[c_ptr0], #0x0\n"
+ "add a_ptr1, %[a_ptr0], #0x0\n"
+ "1:\n"
+ "ptrue p7.b\n"
+ "whilelt p6.b, %[temp], %[odd_depth]\n"
+ "whilelt p0.s, %[temp], %[last_width]\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #5\n"
+ "cbz %[loops], 2f\n"
+ "mov z24.s, #0\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
+ "mov z25.s, #0\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1]\n"
+ "mov z26.s, #0\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2]\n"
+ "mov z27.s, #0\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3]\n"
+ "mov z28.s, #0\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4]\n"
+ "mov z29.s, #0\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5]\n"
+ "mov z30.s, #0\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6]\n"
+ "mov z31.s, #0\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7]\n"
+ "sdot z24.s, z16.b, z0.b[0]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "sdot z25.s, z16.b, z1.b[0]\n"
+ "sdot z26.s, z16.b, z2.b[0]\n"
+ "sdot z27.s, z16.b, z3.b[0]\n"
+ "sdot z28.s, z16.b, z4.b[0]\n"
+ "sdot z29.s, z16.b, z5.b[0]\n"
+ "sdot z30.s, z16.b, z6.b[0]\n"
+ "sdot z31.s, z16.b, z7.b[0]\n"
+ "sdot z24.s, z17.b, z0.b[1]\n"
+ "sdot z25.s, z17.b, z1.b[1]\n"
+ "sdot z26.s, z17.b, z2.b[1]\n"
+ "sdot z27.s, z17.b, z3.b[1]\n"
+ "sdot z28.s, z17.b, z4.b[1]\n"
+ "sdot z29.s, z17.b, z5.b[1]\n"
+ "sdot z30.s, z17.b, z6.b[1]\n"
+ "sdot z31.s, z17.b, z7.b[1]\n"
+ "sdot z24.s, z18.b, z0.b[2]\n"
+ "sdot z25.s, z18.b, z1.b[2]\n"
+ "sdot z26.s, z18.b, z2.b[2]\n"
+ "sdot z27.s, z18.b, z3.b[2]\n"
+ "sdot z28.s, z18.b, z4.b[2]\n"
+ "sdot z29.s, z18.b, z5.b[2]\n"
+ "sdot z30.s, z18.b, z6.b[2]\n"
+ "sdot z31.s, z18.b, z7.b[2]\n"
+ "sdot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x10]\n"
+ "sdot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1, #0x10]\n"
+ "sdot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2, #0x10]\n"
+ "sdot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3, #0x10]\n"
+ "sdot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4, #0x10]\n"
+ "sdot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5, #0x10]\n"
+ "sdot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6, #0x10]\n"
+ "sdot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7, #0x10]\n"
+ "sdot z24.s, z20.b, z0.b[0]\n"
+ "sdot z25.s, z20.b, z1.b[0]\n"
+ "sdot z26.s, z20.b, z2.b[0]\n"
+ "sdot z27.s, z20.b, z3.b[0]\n"
+ "sdot z28.s, z20.b, z4.b[0]\n"
+ "sdot z29.s, z20.b, z5.b[0]\n"
+ "sdot z30.s, z20.b, z6.b[0]\n"
+ "sdot z31.s, z20.b, z7.b[0]\n"
+ "b.eq 3f\n"
+ "4:\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "mov z24.s, #0\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "mov z25.s, #0\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "mov z26.s, #0\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #5\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "mov z27.s, #0\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1]\n"
+ "sdot z24.s, z16.b, z0.b[0]\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "mov z28.s, #0\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3]\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "sdot z25.s, z16.b, z1.b[0]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "mov z29.s, #0\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4]\n"
+ "sdot z26.s, z16.b, z2.b[0]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5]\n"
+ "sdot z27.s, z16.b, z3.b[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.s, #0\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6]\n"
+ "sdot z28.s, z16.b, z4.b[0]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7]\n"
+ "sdot z29.s, z16.b, z5.b[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.s, #0\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "sdot z30.s, z16.b, z6.b[0]\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "sdot z31.s, z16.b, z7.b[0]\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "sdot z24.s, z17.b, z0.b[1]\n"
+ "sdot z25.s, z17.b, z1.b[1]\n"
+ "sdot z26.s, z17.b, z2.b[1]\n"
+ "sdot z27.s, z17.b, z3.b[1]\n"
+ "sdot z28.s, z17.b, z4.b[1]\n"
+ "sdot z29.s, z17.b, z5.b[1]\n"
+ "sdot z30.s, z17.b, z6.b[1]\n"
+ "sdot z31.s, z17.b, z7.b[1]\n"
+ "sdot z24.s, z18.b, z0.b[2]\n"
+ "sdot z25.s, z18.b, z1.b[2]\n"
+ "sdot z26.s, z18.b, z2.b[2]\n"
+ "sdot z27.s, z18.b, z3.b[2]\n"
+ "sdot z28.s, z18.b, z4.b[2]\n"
+ "sdot z29.s, z18.b, z5.b[2]\n"
+ "sdot z30.s, z18.b, z6.b[2]\n"
+ "sdot z31.s, z18.b, z7.b[2]\n"
+ "sdot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x10]\n"
+ "sdot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1, #0x10]\n"
+ "sdot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2, #0x10]\n"
+ "sdot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3, #0x10]\n"
+ "sdot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4, #0x10]\n"
+ "sdot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5, #0x10]\n"
+ "sdot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6, #0x10]\n"
+ "sdot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7, #0x10]\n"
+ "sdot z24.s, z20.b, z0.b[0]\n"
+ "sdot z25.s, z20.b, z1.b[0]\n"
+ "sdot z26.s, z20.b, z2.b[0]\n"
+ "sdot z27.s, z20.b, z3.b[0]\n"
+ "sdot z28.s, z20.b, z4.b[0]\n"
+ "sdot z29.s, z20.b, z5.b[0]\n"
+ "sdot z30.s, z20.b, z6.b[0]\n"
+ "sdot z31.s, z20.b, z7.b[0]\n"
+ "b.ne 4b\n"
+ "3:\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "mov z24.s, #0\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "mov z25.s, #0\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "mov z26.s, #0\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #5\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "mov z27.s, #0\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1]\n"
+ "sdot z24.s, z16.b, z0.b[0]\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "mov z28.s, #0\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3]\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "sdot z25.s, z16.b, z1.b[0]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "mov z29.s, #0\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4]\n"
+ "sdot z26.s, z16.b, z2.b[0]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5]\n"
+ "sdot z27.s, z16.b, z3.b[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.s, #0\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6]\n"
+ "sdot z28.s, z16.b, z4.b[0]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7]\n"
+ "sdot z29.s, z16.b, z5.b[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.s, #0\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "sdot z30.s, z16.b, z6.b[0]\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "sdot z31.s, z16.b, z7.b[0]\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "sdot z24.s, z17.b, z0.b[1]\n"
+ "sdot z25.s, z17.b, z1.b[1]\n"
+ "sdot z26.s, z17.b, z2.b[1]\n"
+ "sdot z27.s, z17.b, z3.b[1]\n"
+ "sdot z28.s, z17.b, z4.b[1]\n"
+ "sdot z29.s, z17.b, z5.b[1]\n"
+ "sdot z30.s, z17.b, z6.b[1]\n"
+ "sdot z31.s, z17.b, z7.b[1]\n"
+ "sdot z24.s, z18.b, z0.b[2]\n"
+ "sdot z25.s, z18.b, z1.b[2]\n"
+ "sdot z26.s, z18.b, z2.b[2]\n"
+ "sdot z27.s, z18.b, z3.b[2]\n"
+ "sdot z28.s, z18.b, z4.b[2]\n"
+ "sdot z29.s, z18.b, z5.b[2]\n"
+ "sdot z30.s, z18.b, z6.b[2]\n"
+ "sdot z31.s, z18.b, z7.b[2]\n"
+ "sdot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x10]\n"
+ "sdot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1, #0x10]\n"
+ "sdot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2, #0x10]\n"
+ "sdot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3, #0x10]\n"
+ "sdot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4, #0x10]\n"
+ "sdot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5, #0x10]\n"
+ "sdot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6, #0x10]\n"
+ "sdot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7, #0x10]\n"
+ "sdot z24.s, z20.b, z0.b[0]\n"
+ "sdot z25.s, z20.b, z1.b[0]\n"
+ "sdot z26.s, z20.b, z2.b[0]\n"
+ "sdot z27.s, z20.b, z3.b[0]\n"
+ "sdot z28.s, z20.b, z4.b[0]\n"
+ "sdot z29.s, z20.b, z5.b[0]\n"
+ "sdot z30.s, z20.b, z6.b[0]\n"
+ "sdot z31.s, z20.b, z7.b[0]\n"
+ "b 5f\n"
+ "2:\n"
+ "mov z24.s, #0\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
+ "mov z25.s, #0\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1]\n"
+ "mov z26.s, #0\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2]\n"
+ "mov z27.s, #0\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3]\n"
+ "mov z28.s, #0\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4]\n"
+ "mov z29.s, #0\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5]\n"
+ "mov z30.s, #0\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6]\n"
+ "mov z31.s, #0\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7]\n"
+ "sdot z24.s, z16.b, z0.b[0]\n"
+ "sdot z25.s, z16.b, z1.b[0]\n"
+ "sdot z26.s, z16.b, z2.b[0]\n"
+ "sdot z27.s, z16.b, z3.b[0]\n"
+ "sdot z28.s, z16.b, z4.b[0]\n"
+ "sdot z29.s, z16.b, z5.b[0]\n"
+ "sdot z30.s, z16.b, z6.b[0]\n"
+ "sdot z31.s, z16.b, z7.b[0]\n"
+ "sdot z24.s, z17.b, z0.b[1]\n"
+ "sdot z25.s, z17.b, z1.b[1]\n"
+ "sdot z26.s, z17.b, z2.b[1]\n"
+ "sdot z27.s, z17.b, z3.b[1]\n"
+ "sdot z28.s, z17.b, z4.b[1]\n"
+ "sdot z29.s, z17.b, z5.b[1]\n"
+ "sdot z30.s, z17.b, z6.b[1]\n"
+ "sdot z31.s, z17.b, z7.b[1]\n"
+ "sdot z24.s, z18.b, z0.b[2]\n"
+ "sdot z25.s, z18.b, z1.b[2]\n"
+ "sdot z26.s, z18.b, z2.b[2]\n"
+ "sdot z27.s, z18.b, z3.b[2]\n"
+ "sdot z28.s, z18.b, z4.b[2]\n"
+ "sdot z29.s, z18.b, z5.b[2]\n"
+ "sdot z30.s, z18.b, z6.b[2]\n"
+ "sdot z31.s, z18.b, z7.b[2]\n"
+ "sdot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x10]\n"
+ "sdot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1, #0x10]\n"
+ "sdot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2, #0x10]\n"
+ "sdot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3, #0x10]\n"
+ "sdot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4, #0x10]\n"
+ "sdot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5, #0x10]\n"
+ "sdot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6, #0x10]\n"
+ "sdot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7, #0x10]\n"
+ "sdot z24.s, z20.b, z0.b[0]\n"
+ "sdot z25.s, z20.b, z1.b[0]\n"
+ "sdot z26.s, z20.b, z2.b[0]\n"
+ "sdot z27.s, z20.b, z3.b[0]\n"
+ "sdot z28.s, z20.b, z4.b[0]\n"
+ "sdot z29.s, z20.b, z5.b[0]\n"
+ "sdot z30.s, z20.b, z6.b[0]\n"
+ "sdot z31.s, z20.b, z7.b[0]\n"
+ "5:\n"
+ "st1w z24.s, p0, [%[c_ptr0]]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "st1w z25.s, p0, [c_ptr1]\n"
+ "st1w z26.s, p0, [c_ptr2]\n"
+ "st1w z27.s, p0, [c_ptr3]\n"
+ "st1w z28.s, p0, [c_ptr4]\n"
+ "st1w z29.s, p0, [c_ptr5]\n"
+ "st1w z30.s, p0, [c_ptr6]\n"
+ "st1w z31.s, p0, [c_ptr7]\n"
+ ".unreq a_ptr1\n"
+ ".unreq a_ptr2\n"
+ ".unreq a_ptr3\n"
+ ".unreq a_ptr4\n"
+ ".unreq a_ptr5\n"
+ ".unreq a_ptr6\n"
+ ".unreq a_ptr7\n"
+ ".unreq c_ptr1\n"
+ ".unreq c_ptr2\n"
+ ".unreq c_ptr3\n"
+ ".unreq c_ptr4\n"
+ ".unreq c_ptr5\n"
+ ".unreq c_ptr6\n"
+ ".unreq c_ptr7\n"
+ : [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [oob_rows] "+r" (oob_rows), [temp] "+r" (temp), [odds] "+r" (odds)
+ : [lda] "r" (ldab), [ldc] "r" (ldcb), [odd_depth] "r" (odd_depth), [last_width] "r" (last_width)
+ : "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "cc", "memory"
+ );
+ break;
+ case 6:
+ __asm __volatile (
+ "a_ptr1 .req X0\n"
+ "a_ptr2 .req X1\n"
+ "a_ptr3 .req X2\n"
+ "a_ptr4 .req X3\n"
+ "a_ptr5 .req X4\n"
+ "a_ptr6 .req X5\n"
+ "a_ptr7 .req X6\n"
+ "c_ptr1 .req X7\n"
+ "c_ptr2 .req X8\n"
+ "c_ptr3 .req X9\n"
+ "c_ptr4 .req X10\n"
+ "c_ptr5 .req X11\n"
+ "c_ptr6 .req X12\n"
+ "c_ptr7 .req X13\n"
+ "add a_ptr1, %[a_ptr0], %[lda]\n"
+ "add c_ptr1, %[c_ptr0], %[ldc]\n"
+ "add a_ptr2, a_ptr1, %[lda]\n"
+ "add c_ptr2, c_ptr1, %[ldc]\n"
+ "add a_ptr3, a_ptr2, %[lda]\n"
+ "add c_ptr3, c_ptr2, %[ldc]\n"
+ "add a_ptr4, a_ptr3, %[lda]\n"
+ "add c_ptr4, c_ptr3, %[ldc]\n"
+ "add a_ptr5, a_ptr4, %[lda]\n"
+ "add c_ptr5, c_ptr4, %[ldc]\n"
+ "add a_ptr6, a_ptr5, %[lda]\n"
+ "add c_ptr6, c_ptr5, %[ldc]\n"
+ "add a_ptr7, a_ptr6, %[lda]\n"
+ "add c_ptr7, c_ptr6, %[ldc]\n"
+ "cbz %[oob_rows], 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr7, %[c_ptr0], #0x0\n"
+ "add a_ptr7, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr6, %[c_ptr0], #0x0\n"
+ "add a_ptr6, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr5, %[c_ptr0], #0x0\n"
+ "add a_ptr5, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr4, %[c_ptr0], #0x0\n"
+ "add a_ptr4, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr3, %[c_ptr0], #0x0\n"
+ "add a_ptr3, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr2, %[c_ptr0], #0x0\n"
+ "add a_ptr2, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr1, %[c_ptr0], #0x0\n"
+ "add a_ptr1, %[a_ptr0], #0x0\n"
+ "1:\n"
+ "ptrue p7.b\n"
+ "whilelt p6.b, %[temp], %[odd_depth]\n"
+ "whilelt p0.s, %[temp], %[last_width]\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "ld1b z21.b, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #6\n"
+ "cbz %[loops], 2f\n"
+ "mov z24.s, #0\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
+ "mov z25.s, #0\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1]\n"
+ "mov z26.s, #0\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2]\n"
+ "mov z27.s, #0\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3]\n"
+ "mov z28.s, #0\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4]\n"
+ "mov z29.s, #0\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5]\n"
+ "mov z30.s, #0\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6]\n"
+ "mov z31.s, #0\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7]\n"
+ "sdot z24.s, z16.b, z0.b[0]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "sdot z25.s, z16.b, z1.b[0]\n"
+ "sdot z26.s, z16.b, z2.b[0]\n"
+ "sdot z27.s, z16.b, z3.b[0]\n"
+ "sdot z28.s, z16.b, z4.b[0]\n"
+ "sdot z29.s, z16.b, z5.b[0]\n"
+ "sdot z30.s, z16.b, z6.b[0]\n"
+ "sdot z31.s, z16.b, z7.b[0]\n"
+ "sdot z24.s, z17.b, z0.b[1]\n"
+ "sdot z25.s, z17.b, z1.b[1]\n"
+ "sdot z26.s, z17.b, z2.b[1]\n"
+ "sdot z27.s, z17.b, z3.b[1]\n"
+ "sdot z28.s, z17.b, z4.b[1]\n"
+ "sdot z29.s, z17.b, z5.b[1]\n"
+ "sdot z30.s, z17.b, z6.b[1]\n"
+ "sdot z31.s, z17.b, z7.b[1]\n"
+ "sdot z24.s, z18.b, z0.b[2]\n"
+ "sdot z25.s, z18.b, z1.b[2]\n"
+ "sdot z26.s, z18.b, z2.b[2]\n"
+ "sdot z27.s, z18.b, z3.b[2]\n"
+ "sdot z28.s, z18.b, z4.b[2]\n"
+ "sdot z29.s, z18.b, z5.b[2]\n"
+ "sdot z30.s, z18.b, z6.b[2]\n"
+ "sdot z31.s, z18.b, z7.b[2]\n"
+ "sdot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x10]\n"
+ "sdot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1, #0x10]\n"
+ "sdot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2, #0x10]\n"
+ "sdot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3, #0x10]\n"
+ "sdot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4, #0x10]\n"
+ "sdot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5, #0x10]\n"
+ "sdot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6, #0x10]\n"
+ "sdot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7, #0x10]\n"
+ "sdot z24.s, z20.b, z0.b[0]\n"
+ "sdot z25.s, z20.b, z1.b[0]\n"
+ "sdot z26.s, z20.b, z2.b[0]\n"
+ "sdot z27.s, z20.b, z3.b[0]\n"
+ "sdot z28.s, z20.b, z4.b[0]\n"
+ "sdot z29.s, z20.b, z5.b[0]\n"
+ "sdot z30.s, z20.b, z6.b[0]\n"
+ "sdot z31.s, z20.b, z7.b[0]\n"
+ "sdot z24.s, z21.b, z0.b[1]\n"
+ "sdot z25.s, z21.b, z1.b[1]\n"
+ "sdot z26.s, z21.b, z2.b[1]\n"
+ "sdot z27.s, z21.b, z3.b[1]\n"
+ "sdot z28.s, z21.b, z4.b[1]\n"
+ "sdot z29.s, z21.b, z5.b[1]\n"
+ "sdot z30.s, z21.b, z6.b[1]\n"
+ "sdot z31.s, z21.b, z7.b[1]\n"
+ "b.eq 3f\n"
+ "4:\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "mov z24.s, #0\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "mov z25.s, #0\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "mov z26.s, #0\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "ld1b z21.b, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #6\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "mov z27.s, #0\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1]\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "mov z28.s, #0\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2]\n"
+ "sdot z24.s, z16.b, z0.b[0]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "mov z29.s, #0\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3]\n"
+ "sdot z25.s, z16.b, z1.b[0]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4]\n"
+ "sdot z26.s, z16.b, z2.b[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.s, #0\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5]\n"
+ "sdot z27.s, z16.b, z3.b[0]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6]\n"
+ "sdot z28.s, z16.b, z4.b[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.s, #0\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7]\n"
+ "sdot z29.s, z16.b, z5.b[0]\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "sdot z30.s, z16.b, z6.b[0]\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "sdot z31.s, z16.b, z7.b[0]\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "sdot z24.s, z17.b, z0.b[1]\n"
+ "sdot z25.s, z17.b, z1.b[1]\n"
+ "sdot z26.s, z17.b, z2.b[1]\n"
+ "sdot z27.s, z17.b, z3.b[1]\n"
+ "sdot z28.s, z17.b, z4.b[1]\n"
+ "sdot z29.s, z17.b, z5.b[1]\n"
+ "sdot z30.s, z17.b, z6.b[1]\n"
+ "sdot z31.s, z17.b, z7.b[1]\n"
+ "sdot z24.s, z18.b, z0.b[2]\n"
+ "sdot z25.s, z18.b, z1.b[2]\n"
+ "sdot z26.s, z18.b, z2.b[2]\n"
+ "sdot z27.s, z18.b, z3.b[2]\n"
+ "sdot z28.s, z18.b, z4.b[2]\n"
+ "sdot z29.s, z18.b, z5.b[2]\n"
+ "sdot z30.s, z18.b, z6.b[2]\n"
+ "sdot z31.s, z18.b, z7.b[2]\n"
+ "sdot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x10]\n"
+ "sdot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1, #0x10]\n"
+ "sdot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2, #0x10]\n"
+ "sdot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3, #0x10]\n"
+ "sdot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4, #0x10]\n"
+ "sdot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5, #0x10]\n"
+ "sdot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6, #0x10]\n"
+ "sdot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7, #0x10]\n"
+ "sdot z24.s, z20.b, z0.b[0]\n"
+ "sdot z25.s, z20.b, z1.b[0]\n"
+ "sdot z26.s, z20.b, z2.b[0]\n"
+ "sdot z27.s, z20.b, z3.b[0]\n"
+ "sdot z28.s, z20.b, z4.b[0]\n"
+ "sdot z29.s, z20.b, z5.b[0]\n"
+ "sdot z30.s, z20.b, z6.b[0]\n"
+ "sdot z31.s, z20.b, z7.b[0]\n"
+ "sdot z24.s, z21.b, z0.b[1]\n"
+ "sdot z25.s, z21.b, z1.b[1]\n"
+ "sdot z26.s, z21.b, z2.b[1]\n"
+ "sdot z27.s, z21.b, z3.b[1]\n"
+ "sdot z28.s, z21.b, z4.b[1]\n"
+ "sdot z29.s, z21.b, z5.b[1]\n"
+ "sdot z30.s, z21.b, z6.b[1]\n"
+ "sdot z31.s, z21.b, z7.b[1]\n"
+ "b.ne 4b\n"
+ "3:\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "mov z24.s, #0\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "mov z25.s, #0\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "mov z26.s, #0\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "ld1b z21.b, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #6\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "mov z27.s, #0\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1]\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "mov z28.s, #0\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2]\n"
+ "sdot z24.s, z16.b, z0.b[0]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "mov z29.s, #0\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3]\n"
+ "sdot z25.s, z16.b, z1.b[0]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4]\n"
+ "sdot z26.s, z16.b, z2.b[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.s, #0\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5]\n"
+ "sdot z27.s, z16.b, z3.b[0]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6]\n"
+ "sdot z28.s, z16.b, z4.b[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.s, #0\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7]\n"
+ "sdot z29.s, z16.b, z5.b[0]\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "sdot z30.s, z16.b, z6.b[0]\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "sdot z31.s, z16.b, z7.b[0]\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "sdot z24.s, z17.b, z0.b[1]\n"
+ "sdot z25.s, z17.b, z1.b[1]\n"
+ "sdot z26.s, z17.b, z2.b[1]\n"
+ "sdot z27.s, z17.b, z3.b[1]\n"
+ "sdot z28.s, z17.b, z4.b[1]\n"
+ "sdot z29.s, z17.b, z5.b[1]\n"
+ "sdot z30.s, z17.b, z6.b[1]\n"
+ "sdot z31.s, z17.b, z7.b[1]\n"
+ "sdot z24.s, z18.b, z0.b[2]\n"
+ "sdot z25.s, z18.b, z1.b[2]\n"
+ "sdot z26.s, z18.b, z2.b[2]\n"
+ "sdot z27.s, z18.b, z3.b[2]\n"
+ "sdot z28.s, z18.b, z4.b[2]\n"
+ "sdot z29.s, z18.b, z5.b[2]\n"
+ "sdot z30.s, z18.b, z6.b[2]\n"
+ "sdot z31.s, z18.b, z7.b[2]\n"
+ "sdot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x10]\n"
+ "sdot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1, #0x10]\n"
+ "sdot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2, #0x10]\n"
+ "sdot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3, #0x10]\n"
+ "sdot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4, #0x10]\n"
+ "sdot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5, #0x10]\n"
+ "sdot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6, #0x10]\n"
+ "sdot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7, #0x10]\n"
+ "sdot z24.s, z20.b, z0.b[0]\n"
+ "sdot z25.s, z20.b, z1.b[0]\n"
+ "sdot z26.s, z20.b, z2.b[0]\n"
+ "sdot z27.s, z20.b, z3.b[0]\n"
+ "sdot z28.s, z20.b, z4.b[0]\n"
+ "sdot z29.s, z20.b, z5.b[0]\n"
+ "sdot z30.s, z20.b, z6.b[0]\n"
+ "sdot z31.s, z20.b, z7.b[0]\n"
+ "sdot z24.s, z21.b, z0.b[1]\n"
+ "sdot z25.s, z21.b, z1.b[1]\n"
+ "sdot z26.s, z21.b, z2.b[1]\n"
+ "sdot z27.s, z21.b, z3.b[1]\n"
+ "sdot z28.s, z21.b, z4.b[1]\n"
+ "sdot z29.s, z21.b, z5.b[1]\n"
+ "sdot z30.s, z21.b, z6.b[1]\n"
+ "sdot z31.s, z21.b, z7.b[1]\n"
+ "b 5f\n"
+ "2:\n"
+ "mov z24.s, #0\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
+ "mov z25.s, #0\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1]\n"
+ "mov z26.s, #0\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2]\n"
+ "mov z27.s, #0\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3]\n"
+ "mov z28.s, #0\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4]\n"
+ "mov z29.s, #0\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5]\n"
+ "mov z30.s, #0\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6]\n"
+ "mov z31.s, #0\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7]\n"
+ "sdot z24.s, z16.b, z0.b[0]\n"
+ "sdot z25.s, z16.b, z1.b[0]\n"
+ "sdot z26.s, z16.b, z2.b[0]\n"
+ "sdot z27.s, z16.b, z3.b[0]\n"
+ "sdot z28.s, z16.b, z4.b[0]\n"
+ "sdot z29.s, z16.b, z5.b[0]\n"
+ "sdot z30.s, z16.b, z6.b[0]\n"
+ "sdot z31.s, z16.b, z7.b[0]\n"
+ "sdot z24.s, z17.b, z0.b[1]\n"
+ "sdot z25.s, z17.b, z1.b[1]\n"
+ "sdot z26.s, z17.b, z2.b[1]\n"
+ "sdot z27.s, z17.b, z3.b[1]\n"
+ "sdot z28.s, z17.b, z4.b[1]\n"
+ "sdot z29.s, z17.b, z5.b[1]\n"
+ "sdot z30.s, z17.b, z6.b[1]\n"
+ "sdot z31.s, z17.b, z7.b[1]\n"
+ "sdot z24.s, z18.b, z0.b[2]\n"
+ "sdot z25.s, z18.b, z1.b[2]\n"
+ "sdot z26.s, z18.b, z2.b[2]\n"
+ "sdot z27.s, z18.b, z3.b[2]\n"
+ "sdot z28.s, z18.b, z4.b[2]\n"
+ "sdot z29.s, z18.b, z5.b[2]\n"
+ "sdot z30.s, z18.b, z6.b[2]\n"
+ "sdot z31.s, z18.b, z7.b[2]\n"
+ "sdot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x10]\n"
+ "sdot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1, #0x10]\n"
+ "sdot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2, #0x10]\n"
+ "sdot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3, #0x10]\n"
+ "sdot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4, #0x10]\n"
+ "sdot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5, #0x10]\n"
+ "sdot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6, #0x10]\n"
+ "sdot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7, #0x10]\n"
+ "sdot z24.s, z20.b, z0.b[0]\n"
+ "sdot z25.s, z20.b, z1.b[0]\n"
+ "sdot z26.s, z20.b, z2.b[0]\n"
+ "sdot z27.s, z20.b, z3.b[0]\n"
+ "sdot z28.s, z20.b, z4.b[0]\n"
+ "sdot z29.s, z20.b, z5.b[0]\n"
+ "sdot z30.s, z20.b, z6.b[0]\n"
+ "sdot z31.s, z20.b, z7.b[0]\n"
+ "sdot z24.s, z21.b, z0.b[1]\n"
+ "sdot z25.s, z21.b, z1.b[1]\n"
+ "sdot z26.s, z21.b, z2.b[1]\n"
+ "sdot z27.s, z21.b, z3.b[1]\n"
+ "sdot z28.s, z21.b, z4.b[1]\n"
+ "sdot z29.s, z21.b, z5.b[1]\n"
+ "sdot z30.s, z21.b, z6.b[1]\n"
+ "sdot z31.s, z21.b, z7.b[1]\n"
+ "5:\n"
+ "st1w z24.s, p0, [%[c_ptr0]]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "st1w z25.s, p0, [c_ptr1]\n"
+ "st1w z26.s, p0, [c_ptr2]\n"
+ "st1w z27.s, p0, [c_ptr3]\n"
+ "st1w z28.s, p0, [c_ptr4]\n"
+ "st1w z29.s, p0, [c_ptr5]\n"
+ "st1w z30.s, p0, [c_ptr6]\n"
+ "st1w z31.s, p0, [c_ptr7]\n"
+ ".unreq a_ptr1\n"
+ ".unreq a_ptr2\n"
+ ".unreq a_ptr3\n"
+ ".unreq a_ptr4\n"
+ ".unreq a_ptr5\n"
+ ".unreq a_ptr6\n"
+ ".unreq a_ptr7\n"
+ ".unreq c_ptr1\n"
+ ".unreq c_ptr2\n"
+ ".unreq c_ptr3\n"
+ ".unreq c_ptr4\n"
+ ".unreq c_ptr5\n"
+ ".unreq c_ptr6\n"
+ ".unreq c_ptr7\n"
+ : [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [oob_rows] "+r" (oob_rows), [temp] "+r" (temp), [odds] "+r" (odds)
+ : [lda] "r" (ldab), [ldc] "r" (ldcb), [odd_depth] "r" (odd_depth), [last_width] "r" (last_width)
+ : "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "cc", "memory"
+ );
+ break;
+ case 7:
+ __asm __volatile (
+ "a_ptr1 .req X0\n"
+ "a_ptr2 .req X1\n"
+ "a_ptr3 .req X2\n"
+ "a_ptr4 .req X3\n"
+ "a_ptr5 .req X4\n"
+ "a_ptr6 .req X5\n"
+ "a_ptr7 .req X6\n"
+ "c_ptr1 .req X7\n"
+ "c_ptr2 .req X8\n"
+ "c_ptr3 .req X9\n"
+ "c_ptr4 .req X10\n"
+ "c_ptr5 .req X11\n"
+ "c_ptr6 .req X12\n"
+ "c_ptr7 .req X13\n"
+ "add a_ptr1, %[a_ptr0], %[lda]\n"
+ "add c_ptr1, %[c_ptr0], %[ldc]\n"
+ "add a_ptr2, a_ptr1, %[lda]\n"
+ "add c_ptr2, c_ptr1, %[ldc]\n"
+ "add a_ptr3, a_ptr2, %[lda]\n"
+ "add c_ptr3, c_ptr2, %[ldc]\n"
+ "add a_ptr4, a_ptr3, %[lda]\n"
+ "add c_ptr4, c_ptr3, %[ldc]\n"
+ "add a_ptr5, a_ptr4, %[lda]\n"
+ "add c_ptr5, c_ptr4, %[ldc]\n"
+ "add a_ptr6, a_ptr5, %[lda]\n"
+ "add c_ptr6, c_ptr5, %[ldc]\n"
+ "add a_ptr7, a_ptr6, %[lda]\n"
+ "add c_ptr7, c_ptr6, %[ldc]\n"
+ "cbz %[oob_rows], 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr7, %[c_ptr0], #0x0\n"
+ "add a_ptr7, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr6, %[c_ptr0], #0x0\n"
+ "add a_ptr6, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr5, %[c_ptr0], #0x0\n"
+ "add a_ptr5, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr4, %[c_ptr0], #0x0\n"
+ "add a_ptr4, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr3, %[c_ptr0], #0x0\n"
+ "add a_ptr3, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr2, %[c_ptr0], #0x0\n"
+ "add a_ptr2, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr1, %[c_ptr0], #0x0\n"
+ "add a_ptr1, %[a_ptr0], #0x0\n"
+ "1:\n"
+ "ptrue p7.b\n"
+ "whilelt p6.b, %[temp], %[odd_depth]\n"
+ "whilelt p0.s, %[temp], %[last_width]\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "ld1b z21.b, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "ld1b z22.b, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #7\n"
+ "cbz %[loops], 2f\n"
+ "mov z24.s, #0\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
+ "mov z25.s, #0\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1]\n"
+ "mov z26.s, #0\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2]\n"
+ "mov z27.s, #0\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3]\n"
+ "mov z28.s, #0\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4]\n"
+ "mov z29.s, #0\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5]\n"
+ "mov z30.s, #0\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6]\n"
+ "mov z31.s, #0\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7]\n"
+ "sdot z24.s, z16.b, z0.b[0]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "sdot z25.s, z16.b, z1.b[0]\n"
+ "sdot z26.s, z16.b, z2.b[0]\n"
+ "sdot z27.s, z16.b, z3.b[0]\n"
+ "sdot z28.s, z16.b, z4.b[0]\n"
+ "sdot z29.s, z16.b, z5.b[0]\n"
+ "sdot z30.s, z16.b, z6.b[0]\n"
+ "sdot z31.s, z16.b, z7.b[0]\n"
+ "sdot z24.s, z17.b, z0.b[1]\n"
+ "sdot z25.s, z17.b, z1.b[1]\n"
+ "sdot z26.s, z17.b, z2.b[1]\n"
+ "sdot z27.s, z17.b, z3.b[1]\n"
+ "sdot z28.s, z17.b, z4.b[1]\n"
+ "sdot z29.s, z17.b, z5.b[1]\n"
+ "sdot z30.s, z17.b, z6.b[1]\n"
+ "sdot z31.s, z17.b, z7.b[1]\n"
+ "sdot z24.s, z18.b, z0.b[2]\n"
+ "sdot z25.s, z18.b, z1.b[2]\n"
+ "sdot z26.s, z18.b, z2.b[2]\n"
+ "sdot z27.s, z18.b, z3.b[2]\n"
+ "sdot z28.s, z18.b, z4.b[2]\n"
+ "sdot z29.s, z18.b, z5.b[2]\n"
+ "sdot z30.s, z18.b, z6.b[2]\n"
+ "sdot z31.s, z18.b, z7.b[2]\n"
+ "sdot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x10]\n"
+ "sdot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1, #0x10]\n"
+ "sdot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2, #0x10]\n"
+ "sdot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3, #0x10]\n"
+ "sdot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4, #0x10]\n"
+ "sdot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5, #0x10]\n"
+ "sdot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6, #0x10]\n"
+ "sdot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7, #0x10]\n"
+ "sdot z24.s, z20.b, z0.b[0]\n"
+ "sdot z25.s, z20.b, z1.b[0]\n"
+ "sdot z26.s, z20.b, z2.b[0]\n"
+ "sdot z27.s, z20.b, z3.b[0]\n"
+ "sdot z28.s, z20.b, z4.b[0]\n"
+ "sdot z29.s, z20.b, z5.b[0]\n"
+ "sdot z30.s, z20.b, z6.b[0]\n"
+ "sdot z31.s, z20.b, z7.b[0]\n"
+ "sdot z24.s, z21.b, z0.b[1]\n"
+ "sdot z25.s, z21.b, z1.b[1]\n"
+ "sdot z26.s, z21.b, z2.b[1]\n"
+ "sdot z27.s, z21.b, z3.b[1]\n"
+ "sdot z28.s, z21.b, z4.b[1]\n"
+ "sdot z29.s, z21.b, z5.b[1]\n"
+ "sdot z30.s, z21.b, z6.b[1]\n"
+ "sdot z31.s, z21.b, z7.b[1]\n"
+ "sdot z24.s, z22.b, z0.b[2]\n"
+ "sdot z25.s, z22.b, z1.b[2]\n"
+ "sdot z26.s, z22.b, z2.b[2]\n"
+ "sdot z27.s, z22.b, z3.b[2]\n"
+ "sdot z28.s, z22.b, z4.b[2]\n"
+ "sdot z29.s, z22.b, z5.b[2]\n"
+ "sdot z30.s, z22.b, z6.b[2]\n"
+ "sdot z31.s, z22.b, z7.b[2]\n"
+ "b.eq 3f\n"
+ "4:\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "mov z24.s, #0\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "mov z25.s, #0\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "mov z26.s, #0\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "ld1b z21.b, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "mov z27.s, #0\n"
+ "ld1b z22.b, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #7\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "mov z28.s, #0\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1]\n"
+ "sdot z24.s, z16.b, z0.b[0]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "mov z29.s, #0\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3]\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "sdot z25.s, z16.b, z1.b[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.s, #0\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4]\n"
+ "sdot z26.s, z16.b, z2.b[0]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5]\n"
+ "sdot z27.s, z16.b, z3.b[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.s, #0\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6]\n"
+ "sdot z28.s, z16.b, z4.b[0]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7]\n"
+ "sdot z29.s, z16.b, z5.b[0]\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "sdot z30.s, z16.b, z6.b[0]\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "sdot z31.s, z16.b, z7.b[0]\n"
+ "sdot z24.s, z17.b, z0.b[1]\n"
+ "sdot z25.s, z17.b, z1.b[1]\n"
+ "sdot z26.s, z17.b, z2.b[1]\n"
+ "sdot z27.s, z17.b, z3.b[1]\n"
+ "sdot z28.s, z17.b, z4.b[1]\n"
+ "sdot z29.s, z17.b, z5.b[1]\n"
+ "sdot z30.s, z17.b, z6.b[1]\n"
+ "sdot z31.s, z17.b, z7.b[1]\n"
+ "sdot z24.s, z18.b, z0.b[2]\n"
+ "sdot z25.s, z18.b, z1.b[2]\n"
+ "sdot z26.s, z18.b, z2.b[2]\n"
+ "sdot z27.s, z18.b, z3.b[2]\n"
+ "sdot z28.s, z18.b, z4.b[2]\n"
+ "sdot z29.s, z18.b, z5.b[2]\n"
+ "sdot z30.s, z18.b, z6.b[2]\n"
+ "sdot z31.s, z18.b, z7.b[2]\n"
+ "sdot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x10]\n"
+ "sdot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1, #0x10]\n"
+ "sdot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2, #0x10]\n"
+ "sdot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3, #0x10]\n"
+ "sdot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4, #0x10]\n"
+ "sdot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5, #0x10]\n"
+ "sdot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6, #0x10]\n"
+ "sdot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7, #0x10]\n"
+ "sdot z24.s, z20.b, z0.b[0]\n"
+ "sdot z25.s, z20.b, z1.b[0]\n"
+ "sdot z26.s, z20.b, z2.b[0]\n"
+ "sdot z27.s, z20.b, z3.b[0]\n"
+ "sdot z28.s, z20.b, z4.b[0]\n"
+ "sdot z29.s, z20.b, z5.b[0]\n"
+ "sdot z30.s, z20.b, z6.b[0]\n"
+ "sdot z31.s, z20.b, z7.b[0]\n"
+ "sdot z24.s, z21.b, z0.b[1]\n"
+ "sdot z25.s, z21.b, z1.b[1]\n"
+ "sdot z26.s, z21.b, z2.b[1]\n"
+ "sdot z27.s, z21.b, z3.b[1]\n"
+ "sdot z28.s, z21.b, z4.b[1]\n"
+ "sdot z29.s, z21.b, z5.b[1]\n"
+ "sdot z30.s, z21.b, z6.b[1]\n"
+ "sdot z31.s, z21.b, z7.b[1]\n"
+ "sdot z24.s, z22.b, z0.b[2]\n"
+ "sdot z25.s, z22.b, z1.b[2]\n"
+ "sdot z26.s, z22.b, z2.b[2]\n"
+ "sdot z27.s, z22.b, z3.b[2]\n"
+ "sdot z28.s, z22.b, z4.b[2]\n"
+ "sdot z29.s, z22.b, z5.b[2]\n"
+ "sdot z30.s, z22.b, z6.b[2]\n"
+ "sdot z31.s, z22.b, z7.b[2]\n"
+ "b.ne 4b\n"
+ "3:\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "mov z24.s, #0\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "mov z25.s, #0\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "mov z26.s, #0\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "ld1b z21.b, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "mov z27.s, #0\n"
+ "ld1b z22.b, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #7\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "mov z28.s, #0\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1]\n"
+ "sdot z24.s, z16.b, z0.b[0]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "mov z29.s, #0\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3]\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "sdot z25.s, z16.b, z1.b[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.s, #0\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4]\n"
+ "sdot z26.s, z16.b, z2.b[0]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5]\n"
+ "sdot z27.s, z16.b, z3.b[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.s, #0\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6]\n"
+ "sdot z28.s, z16.b, z4.b[0]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7]\n"
+ "sdot z29.s, z16.b, z5.b[0]\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "sdot z30.s, z16.b, z6.b[0]\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "sdot z31.s, z16.b, z7.b[0]\n"
+ "sdot z24.s, z17.b, z0.b[1]\n"
+ "sdot z25.s, z17.b, z1.b[1]\n"
+ "sdot z26.s, z17.b, z2.b[1]\n"
+ "sdot z27.s, z17.b, z3.b[1]\n"
+ "sdot z28.s, z17.b, z4.b[1]\n"
+ "sdot z29.s, z17.b, z5.b[1]\n"
+ "sdot z30.s, z17.b, z6.b[1]\n"
+ "sdot z31.s, z17.b, z7.b[1]\n"
+ "sdot z24.s, z18.b, z0.b[2]\n"
+ "sdot z25.s, z18.b, z1.b[2]\n"
+ "sdot z26.s, z18.b, z2.b[2]\n"
+ "sdot z27.s, z18.b, z3.b[2]\n"
+ "sdot z28.s, z18.b, z4.b[2]\n"
+ "sdot z29.s, z18.b, z5.b[2]\n"
+ "sdot z30.s, z18.b, z6.b[2]\n"
+ "sdot z31.s, z18.b, z7.b[2]\n"
+ "sdot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x10]\n"
+ "sdot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1, #0x10]\n"
+ "sdot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2, #0x10]\n"
+ "sdot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3, #0x10]\n"
+ "sdot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4, #0x10]\n"
+ "sdot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5, #0x10]\n"
+ "sdot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6, #0x10]\n"
+ "sdot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7, #0x10]\n"
+ "sdot z24.s, z20.b, z0.b[0]\n"
+ "sdot z25.s, z20.b, z1.b[0]\n"
+ "sdot z26.s, z20.b, z2.b[0]\n"
+ "sdot z27.s, z20.b, z3.b[0]\n"
+ "sdot z28.s, z20.b, z4.b[0]\n"
+ "sdot z29.s, z20.b, z5.b[0]\n"
+ "sdot z30.s, z20.b, z6.b[0]\n"
+ "sdot z31.s, z20.b, z7.b[0]\n"
+ "sdot z24.s, z21.b, z0.b[1]\n"
+ "sdot z25.s, z21.b, z1.b[1]\n"
+ "sdot z26.s, z21.b, z2.b[1]\n"
+ "sdot z27.s, z21.b, z3.b[1]\n"
+ "sdot z28.s, z21.b, z4.b[1]\n"
+ "sdot z29.s, z21.b, z5.b[1]\n"
+ "sdot z30.s, z21.b, z6.b[1]\n"
+ "sdot z31.s, z21.b, z7.b[1]\n"
+ "sdot z24.s, z22.b, z0.b[2]\n"
+ "sdot z25.s, z22.b, z1.b[2]\n"
+ "sdot z26.s, z22.b, z2.b[2]\n"
+ "sdot z27.s, z22.b, z3.b[2]\n"
+ "sdot z28.s, z22.b, z4.b[2]\n"
+ "sdot z29.s, z22.b, z5.b[2]\n"
+ "sdot z30.s, z22.b, z6.b[2]\n"
+ "sdot z31.s, z22.b, z7.b[2]\n"
+ "b 5f\n"
+ "2:\n"
+ "mov z24.s, #0\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
+ "mov z25.s, #0\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1]\n"
+ "mov z26.s, #0\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2]\n"
+ "mov z27.s, #0\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3]\n"
+ "mov z28.s, #0\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4]\n"
+ "mov z29.s, #0\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5]\n"
+ "mov z30.s, #0\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6]\n"
+ "mov z31.s, #0\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7]\n"
+ "sdot z24.s, z16.b, z0.b[0]\n"
+ "sdot z25.s, z16.b, z1.b[0]\n"
+ "sdot z26.s, z16.b, z2.b[0]\n"
+ "sdot z27.s, z16.b, z3.b[0]\n"
+ "sdot z28.s, z16.b, z4.b[0]\n"
+ "sdot z29.s, z16.b, z5.b[0]\n"
+ "sdot z30.s, z16.b, z6.b[0]\n"
+ "sdot z31.s, z16.b, z7.b[0]\n"
+ "sdot z24.s, z17.b, z0.b[1]\n"
+ "sdot z25.s, z17.b, z1.b[1]\n"
+ "sdot z26.s, z17.b, z2.b[1]\n"
+ "sdot z27.s, z17.b, z3.b[1]\n"
+ "sdot z28.s, z17.b, z4.b[1]\n"
+ "sdot z29.s, z17.b, z5.b[1]\n"
+ "sdot z30.s, z17.b, z6.b[1]\n"
+ "sdot z31.s, z17.b, z7.b[1]\n"
+ "sdot z24.s, z18.b, z0.b[2]\n"
+ "sdot z25.s, z18.b, z1.b[2]\n"
+ "sdot z26.s, z18.b, z2.b[2]\n"
+ "sdot z27.s, z18.b, z3.b[2]\n"
+ "sdot z28.s, z18.b, z4.b[2]\n"
+ "sdot z29.s, z18.b, z5.b[2]\n"
+ "sdot z30.s, z18.b, z6.b[2]\n"
+ "sdot z31.s, z18.b, z7.b[2]\n"
+ "sdot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x10]\n"
+ "sdot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1, #0x10]\n"
+ "sdot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2, #0x10]\n"
+ "sdot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3, #0x10]\n"
+ "sdot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4, #0x10]\n"
+ "sdot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5, #0x10]\n"
+ "sdot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6, #0x10]\n"
+ "sdot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7, #0x10]\n"
+ "sdot z24.s, z20.b, z0.b[0]\n"
+ "sdot z25.s, z20.b, z1.b[0]\n"
+ "sdot z26.s, z20.b, z2.b[0]\n"
+ "sdot z27.s, z20.b, z3.b[0]\n"
+ "sdot z28.s, z20.b, z4.b[0]\n"
+ "sdot z29.s, z20.b, z5.b[0]\n"
+ "sdot z30.s, z20.b, z6.b[0]\n"
+ "sdot z31.s, z20.b, z7.b[0]\n"
+ "sdot z24.s, z21.b, z0.b[1]\n"
+ "sdot z25.s, z21.b, z1.b[1]\n"
+ "sdot z26.s, z21.b, z2.b[1]\n"
+ "sdot z27.s, z21.b, z3.b[1]\n"
+ "sdot z28.s, z21.b, z4.b[1]\n"
+ "sdot z29.s, z21.b, z5.b[1]\n"
+ "sdot z30.s, z21.b, z6.b[1]\n"
+ "sdot z31.s, z21.b, z7.b[1]\n"
+ "sdot z24.s, z22.b, z0.b[2]\n"
+ "sdot z25.s, z22.b, z1.b[2]\n"
+ "sdot z26.s, z22.b, z2.b[2]\n"
+ "sdot z27.s, z22.b, z3.b[2]\n"
+ "sdot z28.s, z22.b, z4.b[2]\n"
+ "sdot z29.s, z22.b, z5.b[2]\n"
+ "sdot z30.s, z22.b, z6.b[2]\n"
+ "sdot z31.s, z22.b, z7.b[2]\n"
+ "5:\n"
+ "st1w z24.s, p0, [%[c_ptr0]]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "st1w z25.s, p0, [c_ptr1]\n"
+ "st1w z26.s, p0, [c_ptr2]\n"
+ "st1w z27.s, p0, [c_ptr3]\n"
+ "st1w z28.s, p0, [c_ptr4]\n"
+ "st1w z29.s, p0, [c_ptr5]\n"
+ "st1w z30.s, p0, [c_ptr6]\n"
+ "st1w z31.s, p0, [c_ptr7]\n"
+ ".unreq a_ptr1\n"
+ ".unreq a_ptr2\n"
+ ".unreq a_ptr3\n"
+ ".unreq a_ptr4\n"
+ ".unreq a_ptr5\n"
+ ".unreq a_ptr6\n"
+ ".unreq a_ptr7\n"
+ ".unreq c_ptr1\n"
+ ".unreq c_ptr2\n"
+ ".unreq c_ptr3\n"
+ ".unreq c_ptr4\n"
+ ".unreq c_ptr5\n"
+ ".unreq c_ptr6\n"
+ ".unreq c_ptr7\n"
+ : [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [oob_rows] "+r" (oob_rows), [temp] "+r" (temp), [odds] "+r" (odds)
+ : [lda] "r" (ldab), [ldc] "r" (ldcb), [odd_depth] "r" (odd_depth), [last_width] "r" (last_width)
+ : "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "cc", "memory"
+ );
+ break;
+ case 8:
+ __asm __volatile (
+ "a_ptr1 .req X0\n"
+ "a_ptr2 .req X1\n"
+ "a_ptr3 .req X2\n"
+ "a_ptr4 .req X3\n"
+ "a_ptr5 .req X4\n"
+ "a_ptr6 .req X5\n"
+ "a_ptr7 .req X6\n"
+ "c_ptr1 .req X7\n"
+ "c_ptr2 .req X8\n"
+ "c_ptr3 .req X9\n"
+ "c_ptr4 .req X10\n"
+ "c_ptr5 .req X11\n"
+ "c_ptr6 .req X12\n"
+ "c_ptr7 .req X13\n"
+ "add a_ptr1, %[a_ptr0], %[lda]\n"
+ "add c_ptr1, %[c_ptr0], %[ldc]\n"
+ "add a_ptr2, a_ptr1, %[lda]\n"
+ "add c_ptr2, c_ptr1, %[ldc]\n"
+ "add a_ptr3, a_ptr2, %[lda]\n"
+ "add c_ptr3, c_ptr2, %[ldc]\n"
+ "add a_ptr4, a_ptr3, %[lda]\n"
+ "add c_ptr4, c_ptr3, %[ldc]\n"
+ "add a_ptr5, a_ptr4, %[lda]\n"
+ "add c_ptr5, c_ptr4, %[ldc]\n"
+ "add a_ptr6, a_ptr5, %[lda]\n"
+ "add c_ptr6, c_ptr5, %[ldc]\n"
+ "add a_ptr7, a_ptr6, %[lda]\n"
+ "add c_ptr7, c_ptr6, %[ldc]\n"
+ "cbz %[oob_rows], 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr7, %[c_ptr0], #0x0\n"
+ "add a_ptr7, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr6, %[c_ptr0], #0x0\n"
+ "add a_ptr6, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr5, %[c_ptr0], #0x0\n"
+ "add a_ptr5, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr4, %[c_ptr0], #0x0\n"
+ "add a_ptr4, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr3, %[c_ptr0], #0x0\n"
+ "add a_ptr3, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr2, %[c_ptr0], #0x0\n"
+ "add a_ptr2, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr1, %[c_ptr0], #0x0\n"
+ "add a_ptr1, %[a_ptr0], #0x0\n"
+ "1:\n"
+ "ptrue p7.b\n"
+ "whilelt p6.b, %[temp], %[odd_depth]\n"
+ "whilelt p0.s, %[temp], %[last_width]\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "ld1b z21.b, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "ld1b z22.b, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "ld1b z23.b, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "cbz %[loops], 2f\n"
+ "mov z24.s, #0\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
+ "mov z25.s, #0\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1]\n"
+ "mov z26.s, #0\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2]\n"
+ "mov z27.s, #0\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3]\n"
+ "mov z28.s, #0\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4]\n"
+ "mov z29.s, #0\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5]\n"
+ "mov z30.s, #0\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6]\n"
+ "mov z31.s, #0\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7]\n"
+ "sdot z24.s, z16.b, z0.b[0]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "sdot z25.s, z16.b, z1.b[0]\n"
+ "sdot z26.s, z16.b, z2.b[0]\n"
+ "sdot z27.s, z16.b, z3.b[0]\n"
+ "sdot z28.s, z16.b, z4.b[0]\n"
+ "sdot z29.s, z16.b, z5.b[0]\n"
+ "sdot z30.s, z16.b, z6.b[0]\n"
+ "sdot z31.s, z16.b, z7.b[0]\n"
+ "sdot z24.s, z17.b, z0.b[1]\n"
+ "sdot z25.s, z17.b, z1.b[1]\n"
+ "sdot z26.s, z17.b, z2.b[1]\n"
+ "sdot z27.s, z17.b, z3.b[1]\n"
+ "sdot z28.s, z17.b, z4.b[1]\n"
+ "sdot z29.s, z17.b, z5.b[1]\n"
+ "sdot z30.s, z17.b, z6.b[1]\n"
+ "sdot z31.s, z17.b, z7.b[1]\n"
+ "sdot z24.s, z18.b, z0.b[2]\n"
+ "sdot z25.s, z18.b, z1.b[2]\n"
+ "sdot z26.s, z18.b, z2.b[2]\n"
+ "sdot z27.s, z18.b, z3.b[2]\n"
+ "sdot z28.s, z18.b, z4.b[2]\n"
+ "sdot z29.s, z18.b, z5.b[2]\n"
+ "sdot z30.s, z18.b, z6.b[2]\n"
+ "sdot z31.s, z18.b, z7.b[2]\n"
+ "sdot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x10]\n"
+ "sdot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1, #0x10]\n"
+ "sdot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2, #0x10]\n"
+ "sdot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3, #0x10]\n"
+ "sdot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4, #0x10]\n"
+ "sdot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5, #0x10]\n"
+ "sdot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6, #0x10]\n"
+ "sdot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7, #0x10]\n"
+ "sdot z24.s, z20.b, z0.b[0]\n"
+ "sdot z25.s, z20.b, z1.b[0]\n"
+ "sdot z26.s, z20.b, z2.b[0]\n"
+ "sdot z27.s, z20.b, z3.b[0]\n"
+ "sdot z28.s, z20.b, z4.b[0]\n"
+ "sdot z29.s, z20.b, z5.b[0]\n"
+ "sdot z30.s, z20.b, z6.b[0]\n"
+ "sdot z31.s, z20.b, z7.b[0]\n"
+ "sdot z24.s, z21.b, z0.b[1]\n"
+ "sdot z25.s, z21.b, z1.b[1]\n"
+ "sdot z26.s, z21.b, z2.b[1]\n"
+ "sdot z27.s, z21.b, z3.b[1]\n"
+ "sdot z28.s, z21.b, z4.b[1]\n"
+ "sdot z29.s, z21.b, z5.b[1]\n"
+ "sdot z30.s, z21.b, z6.b[1]\n"
+ "sdot z31.s, z21.b, z7.b[1]\n"
+ "sdot z24.s, z22.b, z0.b[2]\n"
+ "sdot z25.s, z22.b, z1.b[2]\n"
+ "sdot z26.s, z22.b, z2.b[2]\n"
+ "sdot z27.s, z22.b, z3.b[2]\n"
+ "sdot z28.s, z22.b, z4.b[2]\n"
+ "sdot z29.s, z22.b, z5.b[2]\n"
+ "sdot z30.s, z22.b, z6.b[2]\n"
+ "sdot z31.s, z22.b, z7.b[2]\n"
+ "sdot z24.s, z23.b, z0.b[3]\n"
+ "sdot z25.s, z23.b, z1.b[3]\n"
+ "sdot z26.s, z23.b, z2.b[3]\n"
+ "sdot z27.s, z23.b, z3.b[3]\n"
+ "sdot z28.s, z23.b, z4.b[3]\n"
+ "sdot z29.s, z23.b, z5.b[3]\n"
+ "sdot z30.s, z23.b, z6.b[3]\n"
+ "sdot z31.s, z23.b, z7.b[3]\n"
+ "b.eq 3f\n"
+ "4:\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "mov z24.s, #0\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "mov z25.s, #0\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "mov z26.s, #0\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "ld1b z21.b, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "mov z27.s, #0\n"
+ "ld1b z22.b, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "ld1b z23.b, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "mov z28.s, #0\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "mov z29.s, #0\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2]\n"
+ "sdot z24.s, z16.b, z0.b[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.s, #0\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3]\n"
+ "sdot z25.s, z16.b, z1.b[0]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4]\n"
+ "sdot z26.s, z16.b, z2.b[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.s, #0\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5]\n"
+ "sdot z27.s, z16.b, z3.b[0]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6]\n"
+ "sdot z28.s, z16.b, z4.b[0]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7]\n"
+ "sdot z24.s, z17.b, z0.b[1]\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "sdot z29.s, z16.b, z5.b[0]\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "sdot z30.s, z16.b, z6.b[0]\n"
+ "sdot z31.s, z16.b, z7.b[0]\n"
+ "sdot z25.s, z17.b, z1.b[1]\n"
+ "sdot z26.s, z17.b, z2.b[1]\n"
+ "sdot z27.s, z17.b, z3.b[1]\n"
+ "sdot z28.s, z17.b, z4.b[1]\n"
+ "sdot z29.s, z17.b, z5.b[1]\n"
+ "sdot z30.s, z17.b, z6.b[1]\n"
+ "sdot z31.s, z17.b, z7.b[1]\n"
+ "sdot z24.s, z18.b, z0.b[2]\n"
+ "sdot z25.s, z18.b, z1.b[2]\n"
+ "sdot z26.s, z18.b, z2.b[2]\n"
+ "sdot z27.s, z18.b, z3.b[2]\n"
+ "sdot z28.s, z18.b, z4.b[2]\n"
+ "sdot z29.s, z18.b, z5.b[2]\n"
+ "sdot z30.s, z18.b, z6.b[2]\n"
+ "sdot z31.s, z18.b, z7.b[2]\n"
+ "sdot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x10]\n"
+ "sdot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1, #0x10]\n"
+ "sdot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2, #0x10]\n"
+ "sdot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3, #0x10]\n"
+ "sdot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4, #0x10]\n"
+ "sdot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5, #0x10]\n"
+ "sdot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6, #0x10]\n"
+ "sdot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7, #0x10]\n"
+ "sdot z24.s, z20.b, z0.b[0]\n"
+ "sdot z25.s, z20.b, z1.b[0]\n"
+ "sdot z26.s, z20.b, z2.b[0]\n"
+ "sdot z27.s, z20.b, z3.b[0]\n"
+ "sdot z28.s, z20.b, z4.b[0]\n"
+ "sdot z29.s, z20.b, z5.b[0]\n"
+ "sdot z30.s, z20.b, z6.b[0]\n"
+ "sdot z31.s, z20.b, z7.b[0]\n"
+ "sdot z24.s, z21.b, z0.b[1]\n"
+ "sdot z25.s, z21.b, z1.b[1]\n"
+ "sdot z26.s, z21.b, z2.b[1]\n"
+ "sdot z27.s, z21.b, z3.b[1]\n"
+ "sdot z28.s, z21.b, z4.b[1]\n"
+ "sdot z29.s, z21.b, z5.b[1]\n"
+ "sdot z30.s, z21.b, z6.b[1]\n"
+ "sdot z31.s, z21.b, z7.b[1]\n"
+ "sdot z24.s, z22.b, z0.b[2]\n"
+ "sdot z25.s, z22.b, z1.b[2]\n"
+ "sdot z26.s, z22.b, z2.b[2]\n"
+ "sdot z27.s, z22.b, z3.b[2]\n"
+ "sdot z28.s, z22.b, z4.b[2]\n"
+ "sdot z29.s, z22.b, z5.b[2]\n"
+ "sdot z30.s, z22.b, z6.b[2]\n"
+ "sdot z31.s, z22.b, z7.b[2]\n"
+ "sdot z24.s, z23.b, z0.b[3]\n"
+ "sdot z25.s, z23.b, z1.b[3]\n"
+ "sdot z26.s, z23.b, z2.b[3]\n"
+ "sdot z27.s, z23.b, z3.b[3]\n"
+ "sdot z28.s, z23.b, z4.b[3]\n"
+ "sdot z29.s, z23.b, z5.b[3]\n"
+ "sdot z30.s, z23.b, z6.b[3]\n"
+ "sdot z31.s, z23.b, z7.b[3]\n"
+ "b.ne 4b\n"
+ "3:\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "mov z24.s, #0\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "mov z25.s, #0\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "mov z26.s, #0\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "ld1b z21.b, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "mov z27.s, #0\n"
+ "ld1b z22.b, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "ld1b z23.b, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "mov z28.s, #0\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "mov z29.s, #0\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2]\n"
+ "sdot z24.s, z16.b, z0.b[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.s, #0\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3]\n"
+ "sdot z25.s, z16.b, z1.b[0]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4]\n"
+ "sdot z26.s, z16.b, z2.b[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.s, #0\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5]\n"
+ "sdot z27.s, z16.b, z3.b[0]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6]\n"
+ "sdot z28.s, z16.b, z4.b[0]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7]\n"
+ "sdot z24.s, z17.b, z0.b[1]\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "sdot z29.s, z16.b, z5.b[0]\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "sdot z30.s, z16.b, z6.b[0]\n"
+ "sdot z31.s, z16.b, z7.b[0]\n"
+ "sdot z25.s, z17.b, z1.b[1]\n"
+ "sdot z26.s, z17.b, z2.b[1]\n"
+ "sdot z27.s, z17.b, z3.b[1]\n"
+ "sdot z28.s, z17.b, z4.b[1]\n"
+ "sdot z29.s, z17.b, z5.b[1]\n"
+ "sdot z30.s, z17.b, z6.b[1]\n"
+ "sdot z31.s, z17.b, z7.b[1]\n"
+ "sdot z24.s, z18.b, z0.b[2]\n"
+ "sdot z25.s, z18.b, z1.b[2]\n"
+ "sdot z26.s, z18.b, z2.b[2]\n"
+ "sdot z27.s, z18.b, z3.b[2]\n"
+ "sdot z28.s, z18.b, z4.b[2]\n"
+ "sdot z29.s, z18.b, z5.b[2]\n"
+ "sdot z30.s, z18.b, z6.b[2]\n"
+ "sdot z31.s, z18.b, z7.b[2]\n"
+ "sdot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x10]\n"
+ "sdot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1, #0x10]\n"
+ "sdot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2, #0x10]\n"
+ "sdot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3, #0x10]\n"
+ "sdot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4, #0x10]\n"
+ "sdot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5, #0x10]\n"
+ "sdot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6, #0x10]\n"
+ "sdot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7, #0x10]\n"
+ "sdot z24.s, z20.b, z0.b[0]\n"
+ "sdot z25.s, z20.b, z1.b[0]\n"
+ "sdot z26.s, z20.b, z2.b[0]\n"
+ "sdot z27.s, z20.b, z3.b[0]\n"
+ "sdot z28.s, z20.b, z4.b[0]\n"
+ "sdot z29.s, z20.b, z5.b[0]\n"
+ "sdot z30.s, z20.b, z6.b[0]\n"
+ "sdot z31.s, z20.b, z7.b[0]\n"
+ "sdot z24.s, z21.b, z0.b[1]\n"
+ "sdot z25.s, z21.b, z1.b[1]\n"
+ "sdot z26.s, z21.b, z2.b[1]\n"
+ "sdot z27.s, z21.b, z3.b[1]\n"
+ "sdot z28.s, z21.b, z4.b[1]\n"
+ "sdot z29.s, z21.b, z5.b[1]\n"
+ "sdot z30.s, z21.b, z6.b[1]\n"
+ "sdot z31.s, z21.b, z7.b[1]\n"
+ "sdot z24.s, z22.b, z0.b[2]\n"
+ "sdot z25.s, z22.b, z1.b[2]\n"
+ "sdot z26.s, z22.b, z2.b[2]\n"
+ "sdot z27.s, z22.b, z3.b[2]\n"
+ "sdot z28.s, z22.b, z4.b[2]\n"
+ "sdot z29.s, z22.b, z5.b[2]\n"
+ "sdot z30.s, z22.b, z6.b[2]\n"
+ "sdot z31.s, z22.b, z7.b[2]\n"
+ "sdot z24.s, z23.b, z0.b[3]\n"
+ "sdot z25.s, z23.b, z1.b[3]\n"
+ "sdot z26.s, z23.b, z2.b[3]\n"
+ "sdot z27.s, z23.b, z3.b[3]\n"
+ "sdot z28.s, z23.b, z4.b[3]\n"
+ "sdot z29.s, z23.b, z5.b[3]\n"
+ "sdot z30.s, z23.b, z6.b[3]\n"
+ "sdot z31.s, z23.b, z7.b[3]\n"
+ "b 5f\n"
+ "2:\n"
+ "mov z24.s, #0\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
+ "mov z25.s, #0\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1]\n"
+ "mov z26.s, #0\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2]\n"
+ "mov z27.s, #0\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3]\n"
+ "mov z28.s, #0\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4]\n"
+ "mov z29.s, #0\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5]\n"
+ "mov z30.s, #0\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6]\n"
+ "mov z31.s, #0\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7]\n"
+ "sdot z24.s, z16.b, z0.b[0]\n"
+ "sdot z25.s, z16.b, z1.b[0]\n"
+ "sdot z26.s, z16.b, z2.b[0]\n"
+ "sdot z27.s, z16.b, z3.b[0]\n"
+ "sdot z28.s, z16.b, z4.b[0]\n"
+ "sdot z29.s, z16.b, z5.b[0]\n"
+ "sdot z30.s, z16.b, z6.b[0]\n"
+ "sdot z31.s, z16.b, z7.b[0]\n"
+ "sdot z24.s, z17.b, z0.b[1]\n"
+ "sdot z25.s, z17.b, z1.b[1]\n"
+ "sdot z26.s, z17.b, z2.b[1]\n"
+ "sdot z27.s, z17.b, z3.b[1]\n"
+ "sdot z28.s, z17.b, z4.b[1]\n"
+ "sdot z29.s, z17.b, z5.b[1]\n"
+ "sdot z30.s, z17.b, z6.b[1]\n"
+ "sdot z31.s, z17.b, z7.b[1]\n"
+ "sdot z24.s, z18.b, z0.b[2]\n"
+ "sdot z25.s, z18.b, z1.b[2]\n"
+ "sdot z26.s, z18.b, z2.b[2]\n"
+ "sdot z27.s, z18.b, z3.b[2]\n"
+ "sdot z28.s, z18.b, z4.b[2]\n"
+ "sdot z29.s, z18.b, z5.b[2]\n"
+ "sdot z30.s, z18.b, z6.b[2]\n"
+ "sdot z31.s, z18.b, z7.b[2]\n"
+ "sdot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x10]\n"
+ "sdot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1, #0x10]\n"
+ "sdot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2, #0x10]\n"
+ "sdot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3, #0x10]\n"
+ "sdot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4, #0x10]\n"
+ "sdot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5, #0x10]\n"
+ "sdot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6, #0x10]\n"
+ "sdot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7, #0x10]\n"
+ "sdot z24.s, z20.b, z0.b[0]\n"
+ "sdot z25.s, z20.b, z1.b[0]\n"
+ "sdot z26.s, z20.b, z2.b[0]\n"
+ "sdot z27.s, z20.b, z3.b[0]\n"
+ "sdot z28.s, z20.b, z4.b[0]\n"
+ "sdot z29.s, z20.b, z5.b[0]\n"
+ "sdot z30.s, z20.b, z6.b[0]\n"
+ "sdot z31.s, z20.b, z7.b[0]\n"
+ "sdot z24.s, z21.b, z0.b[1]\n"
+ "sdot z25.s, z21.b, z1.b[1]\n"
+ "sdot z26.s, z21.b, z2.b[1]\n"
+ "sdot z27.s, z21.b, z3.b[1]\n"
+ "sdot z28.s, z21.b, z4.b[1]\n"
+ "sdot z29.s, z21.b, z5.b[1]\n"
+ "sdot z30.s, z21.b, z6.b[1]\n"
+ "sdot z31.s, z21.b, z7.b[1]\n"
+ "sdot z24.s, z22.b, z0.b[2]\n"
+ "sdot z25.s, z22.b, z1.b[2]\n"
+ "sdot z26.s, z22.b, z2.b[2]\n"
+ "sdot z27.s, z22.b, z3.b[2]\n"
+ "sdot z28.s, z22.b, z4.b[2]\n"
+ "sdot z29.s, z22.b, z5.b[2]\n"
+ "sdot z30.s, z22.b, z6.b[2]\n"
+ "sdot z31.s, z22.b, z7.b[2]\n"
+ "sdot z24.s, z23.b, z0.b[3]\n"
+ "sdot z25.s, z23.b, z1.b[3]\n"
+ "sdot z26.s, z23.b, z2.b[3]\n"
+ "sdot z27.s, z23.b, z3.b[3]\n"
+ "sdot z28.s, z23.b, z4.b[3]\n"
+ "sdot z29.s, z23.b, z5.b[3]\n"
+ "sdot z30.s, z23.b, z6.b[3]\n"
+ "sdot z31.s, z23.b, z7.b[3]\n"
+ "5:\n"
+ "st1w z24.s, p0, [%[c_ptr0]]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "st1w z25.s, p0, [c_ptr1]\n"
+ "st1w z26.s, p0, [c_ptr2]\n"
+ "st1w z27.s, p0, [c_ptr3]\n"
+ "st1w z28.s, p0, [c_ptr4]\n"
+ "st1w z29.s, p0, [c_ptr5]\n"
+ "st1w z30.s, p0, [c_ptr6]\n"
+ "st1w z31.s, p0, [c_ptr7]\n"
+ ".unreq a_ptr1\n"
+ ".unreq a_ptr2\n"
+ ".unreq a_ptr3\n"
+ ".unreq a_ptr4\n"
+ ".unreq a_ptr5\n"
+ ".unreq a_ptr6\n"
+ ".unreq a_ptr7\n"
+ ".unreq c_ptr1\n"
+ ".unreq c_ptr2\n"
+ ".unreq c_ptr3\n"
+ ".unreq c_ptr4\n"
+ ".unreq c_ptr5\n"
+ ".unreq c_ptr6\n"
+ ".unreq c_ptr7\n"
+ : [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [oob_rows] "+r" (oob_rows), [temp] "+r" (temp), [odds] "+r" (odds)
+ : [lda] "r" (ldab), [ldc] "r" (ldcb), [odd_depth] "r" (odd_depth), [last_width] "r" (last_width)
+ : "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "cc", "memory"
+ );
+ break;
+ case 9:
+ __asm __volatile (
+ "a_ptr1 .req X0\n"
+ "a_ptr2 .req X1\n"
+ "a_ptr3 .req X2\n"
+ "a_ptr4 .req X3\n"
+ "a_ptr5 .req X4\n"
+ "a_ptr6 .req X5\n"
+ "a_ptr7 .req X6\n"
+ "c_ptr1 .req X7\n"
+ "c_ptr2 .req X8\n"
+ "c_ptr3 .req X9\n"
+ "c_ptr4 .req X10\n"
+ "c_ptr5 .req X11\n"
+ "c_ptr6 .req X12\n"
+ "c_ptr7 .req X13\n"
+ "add a_ptr1, %[a_ptr0], %[lda]\n"
+ "add c_ptr1, %[c_ptr0], %[ldc]\n"
+ "add a_ptr2, a_ptr1, %[lda]\n"
+ "add c_ptr2, c_ptr1, %[ldc]\n"
+ "add a_ptr3, a_ptr2, %[lda]\n"
+ "add c_ptr3, c_ptr2, %[ldc]\n"
+ "add a_ptr4, a_ptr3, %[lda]\n"
+ "add c_ptr4, c_ptr3, %[ldc]\n"
+ "add a_ptr5, a_ptr4, %[lda]\n"
+ "add c_ptr5, c_ptr4, %[ldc]\n"
+ "add a_ptr6, a_ptr5, %[lda]\n"
+ "add c_ptr6, c_ptr5, %[ldc]\n"
+ "add a_ptr7, a_ptr6, %[lda]\n"
+ "add c_ptr7, c_ptr6, %[ldc]\n"
+ "cbz %[oob_rows], 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr7, %[c_ptr0], #0x0\n"
+ "add a_ptr7, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr6, %[c_ptr0], #0x0\n"
+ "add a_ptr6, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr5, %[c_ptr0], #0x0\n"
+ "add a_ptr5, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr4, %[c_ptr0], #0x0\n"
+ "add a_ptr4, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr3, %[c_ptr0], #0x0\n"
+ "add a_ptr3, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr2, %[c_ptr0], #0x0\n"
+ "add a_ptr2, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr1, %[c_ptr0], #0x0\n"
+ "add a_ptr1, %[a_ptr0], #0x0\n"
+ "1:\n"
+ "ptrue p7.b\n"
+ "whilelt p6.b, %[temp], %[odd_depth]\n"
+ "whilelt p0.s, %[temp], %[last_width]\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "ld1b z21.b, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "ld1b z22.b, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "ld1b z23.b, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "cbz %[loops], 2f\n"
+ "mov z24.s, #0\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
+ "mov z25.s, #0\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1]\n"
+ "mov z26.s, #0\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2]\n"
+ "mov z27.s, #0\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3]\n"
+ "mov z28.s, #0\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4]\n"
+ "mov z29.s, #0\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5]\n"
+ "mov z30.s, #0\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6]\n"
+ "mov z31.s, #0\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7]\n"
+ "sdot z24.s, z16.b, z0.b[0]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "sdot z25.s, z16.b, z1.b[0]\n"
+ "sdot z26.s, z16.b, z2.b[0]\n"
+ "sdot z27.s, z16.b, z3.b[0]\n"
+ "sdot z28.s, z16.b, z4.b[0]\n"
+ "sdot z29.s, z16.b, z5.b[0]\n"
+ "sdot z30.s, z16.b, z6.b[0]\n"
+ "sdot z31.s, z16.b, z7.b[0]\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "sdot z24.s, z17.b, z0.b[1]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #1\n"
+ "sdot z25.s, z17.b, z1.b[1]\n"
+ "sdot z26.s, z17.b, z2.b[1]\n"
+ "sdot z27.s, z17.b, z3.b[1]\n"
+ "sdot z28.s, z17.b, z4.b[1]\n"
+ "sdot z29.s, z17.b, z5.b[1]\n"
+ "sdot z30.s, z17.b, z6.b[1]\n"
+ "sdot z31.s, z17.b, z7.b[1]\n"
+ "sdot z24.s, z18.b, z0.b[2]\n"
+ "sdot z25.s, z18.b, z1.b[2]\n"
+ "sdot z26.s, z18.b, z2.b[2]\n"
+ "sdot z27.s, z18.b, z3.b[2]\n"
+ "sdot z28.s, z18.b, z4.b[2]\n"
+ "sdot z29.s, z18.b, z5.b[2]\n"
+ "sdot z30.s, z18.b, z6.b[2]\n"
+ "sdot z31.s, z18.b, z7.b[2]\n"
+ "sdot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0], #0x10]\n"
+ "sdot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1, #0x10]\n"
+ "sdot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2, #0x10]\n"
+ "sdot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3, #0x10]\n"
+ "sdot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4, #0x10]\n"
+ "sdot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5, #0x10]\n"
+ "sdot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6, #0x10]\n"
+ "sdot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7, #0x10]\n"
+ "sdot z24.s, z20.b, z0.b[0]\n"
+ "sdot z25.s, z20.b, z1.b[0]\n"
+ "sdot z26.s, z20.b, z2.b[0]\n"
+ "sdot z27.s, z20.b, z3.b[0]\n"
+ "sdot z28.s, z20.b, z4.b[0]\n"
+ "sdot z29.s, z20.b, z5.b[0]\n"
+ "sdot z30.s, z20.b, z6.b[0]\n"
+ "sdot z31.s, z20.b, z7.b[0]\n"
+ "sdot z24.s, z21.b, z0.b[1]\n"
+ "sdot z25.s, z21.b, z1.b[1]\n"
+ "sdot z26.s, z21.b, z2.b[1]\n"
+ "sdot z27.s, z21.b, z3.b[1]\n"
+ "sdot z28.s, z21.b, z4.b[1]\n"
+ "sdot z29.s, z21.b, z5.b[1]\n"
+ "sdot z30.s, z21.b, z6.b[1]\n"
+ "sdot z31.s, z21.b, z7.b[1]\n"
+ "sdot z24.s, z22.b, z0.b[2]\n"
+ "sdot z25.s, z22.b, z1.b[2]\n"
+ "sdot z26.s, z22.b, z2.b[2]\n"
+ "sdot z27.s, z22.b, z3.b[2]\n"
+ "sdot z28.s, z22.b, z4.b[2]\n"
+ "sdot z29.s, z22.b, z5.b[2]\n"
+ "sdot z30.s, z22.b, z6.b[2]\n"
+ "sdot z31.s, z22.b, z7.b[2]\n"
+ "sdot z24.s, z23.b, z0.b[3]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x20]\n"
+ "sdot z25.s, z23.b, z1.b[3]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1, #0x20]\n"
+ "sdot z26.s, z23.b, z2.b[3]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2, #0x20]\n"
+ "sdot z27.s, z23.b, z3.b[3]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3, #0x20]\n"
+ "sdot z28.s, z23.b, z4.b[3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4, #0x20]\n"
+ "sdot z29.s, z23.b, z5.b[3]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5, #0x20]\n"
+ "sdot z30.s, z23.b, z6.b[3]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6, #0x20]\n"
+ "sdot z31.s, z23.b, z7.b[3]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7, #0x20]\n"
+ "sdot z24.s, z16.b, z0.b[0]\n"
+ "sdot z25.s, z16.b, z1.b[0]\n"
+ "sdot z26.s, z16.b, z2.b[0]\n"
+ "sdot z27.s, z16.b, z3.b[0]\n"
+ "sdot z28.s, z16.b, z4.b[0]\n"
+ "sdot z29.s, z16.b, z5.b[0]\n"
+ "sdot z30.s, z16.b, z6.b[0]\n"
+ "sdot z31.s, z16.b, z7.b[0]\n"
+ "b.eq 3f\n"
+ "4:\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "mov z24.s, #0\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "mov z25.s, #0\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "mov z26.s, #0\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "ld1b z21.b, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "mov z27.s, #0\n"
+ "ld1b z22.b, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "ld1b z23.b, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "mov z28.s, #0\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "mov z29.s, #0\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2]\n"
+ "sdot z24.s, z16.b, z0.b[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.s, #0\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3]\n"
+ "sdot z25.s, z16.b, z1.b[0]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4]\n"
+ "sdot z26.s, z16.b, z2.b[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.s, #0\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5]\n"
+ "sdot z27.s, z16.b, z3.b[0]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6]\n"
+ "sdot z28.s, z16.b, z4.b[0]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7]\n"
+ "sdot z24.s, z17.b, z0.b[1]\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "sdot z29.s, z16.b, z5.b[0]\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "sdot z30.s, z16.b, z6.b[0]\n"
+ "sdot z31.s, z16.b, z7.b[0]\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "sdot z25.s, z17.b, z1.b[1]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #1\n"
+ "sdot z26.s, z17.b, z2.b[1]\n"
+ "sdot z27.s, z17.b, z3.b[1]\n"
+ "sdot z28.s, z17.b, z4.b[1]\n"
+ "sdot z29.s, z17.b, z5.b[1]\n"
+ "sdot z30.s, z17.b, z6.b[1]\n"
+ "sdot z31.s, z17.b, z7.b[1]\n"
+ "sdot z24.s, z18.b, z0.b[2]\n"
+ "sdot z25.s, z18.b, z1.b[2]\n"
+ "sdot z26.s, z18.b, z2.b[2]\n"
+ "sdot z27.s, z18.b, z3.b[2]\n"
+ "sdot z28.s, z18.b, z4.b[2]\n"
+ "sdot z29.s, z18.b, z5.b[2]\n"
+ "sdot z30.s, z18.b, z6.b[2]\n"
+ "sdot z31.s, z18.b, z7.b[2]\n"
+ "sdot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0], #0x10]\n"
+ "sdot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1, #0x10]\n"
+ "sdot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2, #0x10]\n"
+ "sdot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3, #0x10]\n"
+ "sdot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4, #0x10]\n"
+ "sdot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5, #0x10]\n"
+ "sdot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6, #0x10]\n"
+ "sdot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7, #0x10]\n"
+ "sdot z24.s, z20.b, z0.b[0]\n"
+ "sdot z25.s, z20.b, z1.b[0]\n"
+ "sdot z26.s, z20.b, z2.b[0]\n"
+ "sdot z27.s, z20.b, z3.b[0]\n"
+ "sdot z28.s, z20.b, z4.b[0]\n"
+ "sdot z29.s, z20.b, z5.b[0]\n"
+ "sdot z30.s, z20.b, z6.b[0]\n"
+ "sdot z31.s, z20.b, z7.b[0]\n"
+ "sdot z24.s, z21.b, z0.b[1]\n"
+ "sdot z25.s, z21.b, z1.b[1]\n"
+ "sdot z26.s, z21.b, z2.b[1]\n"
+ "sdot z27.s, z21.b, z3.b[1]\n"
+ "sdot z28.s, z21.b, z4.b[1]\n"
+ "sdot z29.s, z21.b, z5.b[1]\n"
+ "sdot z30.s, z21.b, z6.b[1]\n"
+ "sdot z31.s, z21.b, z7.b[1]\n"
+ "sdot z24.s, z22.b, z0.b[2]\n"
+ "sdot z25.s, z22.b, z1.b[2]\n"
+ "sdot z26.s, z22.b, z2.b[2]\n"
+ "sdot z27.s, z22.b, z3.b[2]\n"
+ "sdot z28.s, z22.b, z4.b[2]\n"
+ "sdot z29.s, z22.b, z5.b[2]\n"
+ "sdot z30.s, z22.b, z6.b[2]\n"
+ "sdot z31.s, z22.b, z7.b[2]\n"
+ "sdot z24.s, z23.b, z0.b[3]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x20]\n"
+ "sdot z25.s, z23.b, z1.b[3]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1, #0x20]\n"
+ "sdot z26.s, z23.b, z2.b[3]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2, #0x20]\n"
+ "sdot z27.s, z23.b, z3.b[3]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3, #0x20]\n"
+ "sdot z28.s, z23.b, z4.b[3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4, #0x20]\n"
+ "sdot z29.s, z23.b, z5.b[3]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5, #0x20]\n"
+ "sdot z30.s, z23.b, z6.b[3]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6, #0x20]\n"
+ "sdot z31.s, z23.b, z7.b[3]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7, #0x20]\n"
+ "sdot z24.s, z16.b, z0.b[0]\n"
+ "sdot z25.s, z16.b, z1.b[0]\n"
+ "sdot z26.s, z16.b, z2.b[0]\n"
+ "sdot z27.s, z16.b, z3.b[0]\n"
+ "sdot z28.s, z16.b, z4.b[0]\n"
+ "sdot z29.s, z16.b, z5.b[0]\n"
+ "sdot z30.s, z16.b, z6.b[0]\n"
+ "sdot z31.s, z16.b, z7.b[0]\n"
+ "b.ne 4b\n"
+ "3:\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "mov z24.s, #0\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "mov z25.s, #0\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "mov z26.s, #0\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "ld1b z21.b, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "mov z27.s, #0\n"
+ "ld1b z22.b, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "ld1b z23.b, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "mov z28.s, #0\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "mov z29.s, #0\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2]\n"
+ "sdot z24.s, z16.b, z0.b[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.s, #0\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3]\n"
+ "sdot z25.s, z16.b, z1.b[0]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4]\n"
+ "sdot z26.s, z16.b, z2.b[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.s, #0\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5]\n"
+ "sdot z27.s, z16.b, z3.b[0]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6]\n"
+ "sdot z28.s, z16.b, z4.b[0]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7]\n"
+ "sdot z24.s, z17.b, z0.b[1]\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "sdot z29.s, z16.b, z5.b[0]\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "sdot z30.s, z16.b, z6.b[0]\n"
+ "sdot z31.s, z16.b, z7.b[0]\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "sdot z25.s, z17.b, z1.b[1]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #1\n"
+ "sdot z26.s, z17.b, z2.b[1]\n"
+ "sdot z27.s, z17.b, z3.b[1]\n"
+ "sdot z28.s, z17.b, z4.b[1]\n"
+ "sdot z29.s, z17.b, z5.b[1]\n"
+ "sdot z30.s, z17.b, z6.b[1]\n"
+ "sdot z31.s, z17.b, z7.b[1]\n"
+ "sdot z24.s, z18.b, z0.b[2]\n"
+ "sdot z25.s, z18.b, z1.b[2]\n"
+ "sdot z26.s, z18.b, z2.b[2]\n"
+ "sdot z27.s, z18.b, z3.b[2]\n"
+ "sdot z28.s, z18.b, z4.b[2]\n"
+ "sdot z29.s, z18.b, z5.b[2]\n"
+ "sdot z30.s, z18.b, z6.b[2]\n"
+ "sdot z31.s, z18.b, z7.b[2]\n"
+ "sdot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0], #0x10]\n"
+ "sdot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1, #0x10]\n"
+ "sdot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2, #0x10]\n"
+ "sdot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3, #0x10]\n"
+ "sdot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4, #0x10]\n"
+ "sdot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5, #0x10]\n"
+ "sdot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6, #0x10]\n"
+ "sdot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7, #0x10]\n"
+ "sdot z24.s, z20.b, z0.b[0]\n"
+ "sdot z25.s, z20.b, z1.b[0]\n"
+ "sdot z26.s, z20.b, z2.b[0]\n"
+ "sdot z27.s, z20.b, z3.b[0]\n"
+ "sdot z28.s, z20.b, z4.b[0]\n"
+ "sdot z29.s, z20.b, z5.b[0]\n"
+ "sdot z30.s, z20.b, z6.b[0]\n"
+ "sdot z31.s, z20.b, z7.b[0]\n"
+ "sdot z24.s, z21.b, z0.b[1]\n"
+ "sdot z25.s, z21.b, z1.b[1]\n"
+ "sdot z26.s, z21.b, z2.b[1]\n"
+ "sdot z27.s, z21.b, z3.b[1]\n"
+ "sdot z28.s, z21.b, z4.b[1]\n"
+ "sdot z29.s, z21.b, z5.b[1]\n"
+ "sdot z30.s, z21.b, z6.b[1]\n"
+ "sdot z31.s, z21.b, z7.b[1]\n"
+ "sdot z24.s, z22.b, z0.b[2]\n"
+ "sdot z25.s, z22.b, z1.b[2]\n"
+ "sdot z26.s, z22.b, z2.b[2]\n"
+ "sdot z27.s, z22.b, z3.b[2]\n"
+ "sdot z28.s, z22.b, z4.b[2]\n"
+ "sdot z29.s, z22.b, z5.b[2]\n"
+ "sdot z30.s, z22.b, z6.b[2]\n"
+ "sdot z31.s, z22.b, z7.b[2]\n"
+ "sdot z24.s, z23.b, z0.b[3]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x20]\n"
+ "sdot z25.s, z23.b, z1.b[3]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1, #0x20]\n"
+ "sdot z26.s, z23.b, z2.b[3]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2, #0x20]\n"
+ "sdot z27.s, z23.b, z3.b[3]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3, #0x20]\n"
+ "sdot z28.s, z23.b, z4.b[3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4, #0x20]\n"
+ "sdot z29.s, z23.b, z5.b[3]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5, #0x20]\n"
+ "sdot z30.s, z23.b, z6.b[3]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6, #0x20]\n"
+ "sdot z31.s, z23.b, z7.b[3]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7, #0x20]\n"
+ "sdot z24.s, z16.b, z0.b[0]\n"
+ "sdot z25.s, z16.b, z1.b[0]\n"
+ "sdot z26.s, z16.b, z2.b[0]\n"
+ "sdot z27.s, z16.b, z3.b[0]\n"
+ "sdot z28.s, z16.b, z4.b[0]\n"
+ "sdot z29.s, z16.b, z5.b[0]\n"
+ "sdot z30.s, z16.b, z6.b[0]\n"
+ "sdot z31.s, z16.b, z7.b[0]\n"
+ "b 5f\n"
+ "2:\n"
+ "mov z24.s, #0\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
+ "mov z25.s, #0\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1]\n"
+ "mov z26.s, #0\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2]\n"
+ "mov z27.s, #0\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3]\n"
+ "mov z28.s, #0\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4]\n"
+ "mov z29.s, #0\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5]\n"
+ "mov z30.s, #0\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6]\n"
+ "mov z31.s, #0\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7]\n"
+ "sdot z24.s, z16.b, z0.b[0]\n"
+ "sdot z25.s, z16.b, z1.b[0]\n"
+ "sdot z26.s, z16.b, z2.b[0]\n"
+ "sdot z27.s, z16.b, z3.b[0]\n"
+ "sdot z28.s, z16.b, z4.b[0]\n"
+ "sdot z29.s, z16.b, z5.b[0]\n"
+ "sdot z30.s, z16.b, z6.b[0]\n"
+ "sdot z31.s, z16.b, z7.b[0]\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "sdot z24.s, z17.b, z0.b[1]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #1\n"
+ "sdot z25.s, z17.b, z1.b[1]\n"
+ "sdot z26.s, z17.b, z2.b[1]\n"
+ "sdot z27.s, z17.b, z3.b[1]\n"
+ "sdot z28.s, z17.b, z4.b[1]\n"
+ "sdot z29.s, z17.b, z5.b[1]\n"
+ "sdot z30.s, z17.b, z6.b[1]\n"
+ "sdot z31.s, z17.b, z7.b[1]\n"
+ "sdot z24.s, z18.b, z0.b[2]\n"
+ "sdot z25.s, z18.b, z1.b[2]\n"
+ "sdot z26.s, z18.b, z2.b[2]\n"
+ "sdot z27.s, z18.b, z3.b[2]\n"
+ "sdot z28.s, z18.b, z4.b[2]\n"
+ "sdot z29.s, z18.b, z5.b[2]\n"
+ "sdot z30.s, z18.b, z6.b[2]\n"
+ "sdot z31.s, z18.b, z7.b[2]\n"
+ "sdot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0], #0x10]\n"
+ "sdot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1, #0x10]\n"
+ "sdot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2, #0x10]\n"
+ "sdot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3, #0x10]\n"
+ "sdot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4, #0x10]\n"
+ "sdot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5, #0x10]\n"
+ "sdot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6, #0x10]\n"
+ "sdot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7, #0x10]\n"
+ "sdot z24.s, z20.b, z0.b[0]\n"
+ "sdot z25.s, z20.b, z1.b[0]\n"
+ "sdot z26.s, z20.b, z2.b[0]\n"
+ "sdot z27.s, z20.b, z3.b[0]\n"
+ "sdot z28.s, z20.b, z4.b[0]\n"
+ "sdot z29.s, z20.b, z5.b[0]\n"
+ "sdot z30.s, z20.b, z6.b[0]\n"
+ "sdot z31.s, z20.b, z7.b[0]\n"
+ "sdot z24.s, z21.b, z0.b[1]\n"
+ "sdot z25.s, z21.b, z1.b[1]\n"
+ "sdot z26.s, z21.b, z2.b[1]\n"
+ "sdot z27.s, z21.b, z3.b[1]\n"
+ "sdot z28.s, z21.b, z4.b[1]\n"
+ "sdot z29.s, z21.b, z5.b[1]\n"
+ "sdot z30.s, z21.b, z6.b[1]\n"
+ "sdot z31.s, z21.b, z7.b[1]\n"
+ "sdot z24.s, z22.b, z0.b[2]\n"
+ "sdot z25.s, z22.b, z1.b[2]\n"
+ "sdot z26.s, z22.b, z2.b[2]\n"
+ "sdot z27.s, z22.b, z3.b[2]\n"
+ "sdot z28.s, z22.b, z4.b[2]\n"
+ "sdot z29.s, z22.b, z5.b[2]\n"
+ "sdot z30.s, z22.b, z6.b[2]\n"
+ "sdot z31.s, z22.b, z7.b[2]\n"
+ "sdot z24.s, z23.b, z0.b[3]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x20]\n"
+ "sdot z25.s, z23.b, z1.b[3]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1, #0x20]\n"
+ "sdot z26.s, z23.b, z2.b[3]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2, #0x20]\n"
+ "sdot z27.s, z23.b, z3.b[3]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3, #0x20]\n"
+ "sdot z28.s, z23.b, z4.b[3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4, #0x20]\n"
+ "sdot z29.s, z23.b, z5.b[3]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5, #0x20]\n"
+ "sdot z30.s, z23.b, z6.b[3]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6, #0x20]\n"
+ "sdot z31.s, z23.b, z7.b[3]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7, #0x20]\n"
+ "sdot z24.s, z16.b, z0.b[0]\n"
+ "sdot z25.s, z16.b, z1.b[0]\n"
+ "sdot z26.s, z16.b, z2.b[0]\n"
+ "sdot z27.s, z16.b, z3.b[0]\n"
+ "sdot z28.s, z16.b, z4.b[0]\n"
+ "sdot z29.s, z16.b, z5.b[0]\n"
+ "sdot z30.s, z16.b, z6.b[0]\n"
+ "sdot z31.s, z16.b, z7.b[0]\n"
+ "5:\n"
+ "st1w z24.s, p0, [%[c_ptr0]]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "st1w z25.s, p0, [c_ptr1]\n"
+ "st1w z26.s, p0, [c_ptr2]\n"
+ "st1w z27.s, p0, [c_ptr3]\n"
+ "st1w z28.s, p0, [c_ptr4]\n"
+ "st1w z29.s, p0, [c_ptr5]\n"
+ "st1w z30.s, p0, [c_ptr6]\n"
+ "st1w z31.s, p0, [c_ptr7]\n"
+ ".unreq a_ptr1\n"
+ ".unreq a_ptr2\n"
+ ".unreq a_ptr3\n"
+ ".unreq a_ptr4\n"
+ ".unreq a_ptr5\n"
+ ".unreq a_ptr6\n"
+ ".unreq a_ptr7\n"
+ ".unreq c_ptr1\n"
+ ".unreq c_ptr2\n"
+ ".unreq c_ptr3\n"
+ ".unreq c_ptr4\n"
+ ".unreq c_ptr5\n"
+ ".unreq c_ptr6\n"
+ ".unreq c_ptr7\n"
+ : [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [oob_rows] "+r" (oob_rows), [temp] "+r" (temp), [odds] "+r" (odds)
+ : [lda] "r" (ldab), [ldc] "r" (ldcb), [odd_depth] "r" (odd_depth), [last_width] "r" (last_width)
+ : "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "cc", "memory"
+ );
+ break;
+ case 10:
+ __asm __volatile (
+ "a_ptr1 .req X0\n"
+ "a_ptr2 .req X1\n"
+ "a_ptr3 .req X2\n"
+ "a_ptr4 .req X3\n"
+ "a_ptr5 .req X4\n"
+ "a_ptr6 .req X5\n"
+ "a_ptr7 .req X6\n"
+ "c_ptr1 .req X7\n"
+ "c_ptr2 .req X8\n"
+ "c_ptr3 .req X9\n"
+ "c_ptr4 .req X10\n"
+ "c_ptr5 .req X11\n"
+ "c_ptr6 .req X12\n"
+ "c_ptr7 .req X13\n"
+ "add a_ptr1, %[a_ptr0], %[lda]\n"
+ "add c_ptr1, %[c_ptr0], %[ldc]\n"
+ "add a_ptr2, a_ptr1, %[lda]\n"
+ "add c_ptr2, c_ptr1, %[ldc]\n"
+ "add a_ptr3, a_ptr2, %[lda]\n"
+ "add c_ptr3, c_ptr2, %[ldc]\n"
+ "add a_ptr4, a_ptr3, %[lda]\n"
+ "add c_ptr4, c_ptr3, %[ldc]\n"
+ "add a_ptr5, a_ptr4, %[lda]\n"
+ "add c_ptr5, c_ptr4, %[ldc]\n"
+ "add a_ptr6, a_ptr5, %[lda]\n"
+ "add c_ptr6, c_ptr5, %[ldc]\n"
+ "add a_ptr7, a_ptr6, %[lda]\n"
+ "add c_ptr7, c_ptr6, %[ldc]\n"
+ "cbz %[oob_rows], 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr7, %[c_ptr0], #0x0\n"
+ "add a_ptr7, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr6, %[c_ptr0], #0x0\n"
+ "add a_ptr6, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr5, %[c_ptr0], #0x0\n"
+ "add a_ptr5, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr4, %[c_ptr0], #0x0\n"
+ "add a_ptr4, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr3, %[c_ptr0], #0x0\n"
+ "add a_ptr3, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr2, %[c_ptr0], #0x0\n"
+ "add a_ptr2, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr1, %[c_ptr0], #0x0\n"
+ "add a_ptr1, %[a_ptr0], #0x0\n"
+ "1:\n"
+ "ptrue p7.b\n"
+ "whilelt p6.b, %[temp], %[odd_depth]\n"
+ "whilelt p0.s, %[temp], %[last_width]\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "ld1b z21.b, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "ld1b z22.b, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "ld1b z23.b, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "cbz %[loops], 2f\n"
+ "mov z24.s, #0\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
+ "mov z25.s, #0\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1]\n"
+ "mov z26.s, #0\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2]\n"
+ "mov z27.s, #0\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3]\n"
+ "mov z28.s, #0\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4]\n"
+ "mov z29.s, #0\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5]\n"
+ "mov z30.s, #0\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6]\n"
+ "mov z31.s, #0\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7]\n"
+ "sdot z24.s, z16.b, z0.b[0]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "sdot z25.s, z16.b, z1.b[0]\n"
+ "sdot z26.s, z16.b, z2.b[0]\n"
+ "sdot z27.s, z16.b, z3.b[0]\n"
+ "sdot z28.s, z16.b, z4.b[0]\n"
+ "sdot z29.s, z16.b, z5.b[0]\n"
+ "sdot z30.s, z16.b, z6.b[0]\n"
+ "sdot z31.s, z16.b, z7.b[0]\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "sdot z24.s, z17.b, z0.b[1]\n"
+ "sdot z25.s, z17.b, z1.b[1]\n"
+ "sdot z26.s, z17.b, z2.b[1]\n"
+ "sdot z27.s, z17.b, z3.b[1]\n"
+ "sdot z28.s, z17.b, z4.b[1]\n"
+ "sdot z29.s, z17.b, z5.b[1]\n"
+ "sdot z30.s, z17.b, z6.b[1]\n"
+ "sdot z31.s, z17.b, z7.b[1]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "sdot z24.s, z18.b, z0.b[2]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #2\n"
+ "sdot z25.s, z18.b, z1.b[2]\n"
+ "sdot z26.s, z18.b, z2.b[2]\n"
+ "sdot z27.s, z18.b, z3.b[2]\n"
+ "sdot z28.s, z18.b, z4.b[2]\n"
+ "sdot z29.s, z18.b, z5.b[2]\n"
+ "sdot z30.s, z18.b, z6.b[2]\n"
+ "sdot z31.s, z18.b, z7.b[2]\n"
+ "sdot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0], #0x10]\n"
+ "sdot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1, #0x10]\n"
+ "sdot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2, #0x10]\n"
+ "sdot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3, #0x10]\n"
+ "sdot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4, #0x10]\n"
+ "sdot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5, #0x10]\n"
+ "sdot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6, #0x10]\n"
+ "sdot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7, #0x10]\n"
+ "sdot z24.s, z20.b, z0.b[0]\n"
+ "sdot z25.s, z20.b, z1.b[0]\n"
+ "sdot z26.s, z20.b, z2.b[0]\n"
+ "sdot z27.s, z20.b, z3.b[0]\n"
+ "sdot z28.s, z20.b, z4.b[0]\n"
+ "sdot z29.s, z20.b, z5.b[0]\n"
+ "sdot z30.s, z20.b, z6.b[0]\n"
+ "sdot z31.s, z20.b, z7.b[0]\n"
+ "sdot z24.s, z21.b, z0.b[1]\n"
+ "sdot z25.s, z21.b, z1.b[1]\n"
+ "sdot z26.s, z21.b, z2.b[1]\n"
+ "sdot z27.s, z21.b, z3.b[1]\n"
+ "sdot z28.s, z21.b, z4.b[1]\n"
+ "sdot z29.s, z21.b, z5.b[1]\n"
+ "sdot z30.s, z21.b, z6.b[1]\n"
+ "sdot z31.s, z21.b, z7.b[1]\n"
+ "sdot z24.s, z22.b, z0.b[2]\n"
+ "sdot z25.s, z22.b, z1.b[2]\n"
+ "sdot z26.s, z22.b, z2.b[2]\n"
+ "sdot z27.s, z22.b, z3.b[2]\n"
+ "sdot z28.s, z22.b, z4.b[2]\n"
+ "sdot z29.s, z22.b, z5.b[2]\n"
+ "sdot z30.s, z22.b, z6.b[2]\n"
+ "sdot z31.s, z22.b, z7.b[2]\n"
+ "sdot z24.s, z23.b, z0.b[3]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x20]\n"
+ "sdot z25.s, z23.b, z1.b[3]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1, #0x20]\n"
+ "sdot z26.s, z23.b, z2.b[3]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2, #0x20]\n"
+ "sdot z27.s, z23.b, z3.b[3]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3, #0x20]\n"
+ "sdot z28.s, z23.b, z4.b[3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4, #0x20]\n"
+ "sdot z29.s, z23.b, z5.b[3]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5, #0x20]\n"
+ "sdot z30.s, z23.b, z6.b[3]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6, #0x20]\n"
+ "sdot z31.s, z23.b, z7.b[3]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7, #0x20]\n"
+ "sdot z24.s, z16.b, z0.b[0]\n"
+ "sdot z25.s, z16.b, z1.b[0]\n"
+ "sdot z26.s, z16.b, z2.b[0]\n"
+ "sdot z27.s, z16.b, z3.b[0]\n"
+ "sdot z28.s, z16.b, z4.b[0]\n"
+ "sdot z29.s, z16.b, z5.b[0]\n"
+ "sdot z30.s, z16.b, z6.b[0]\n"
+ "sdot z31.s, z16.b, z7.b[0]\n"
+ "sdot z24.s, z17.b, z0.b[1]\n"
+ "sdot z25.s, z17.b, z1.b[1]\n"
+ "sdot z26.s, z17.b, z2.b[1]\n"
+ "sdot z27.s, z17.b, z3.b[1]\n"
+ "sdot z28.s, z17.b, z4.b[1]\n"
+ "sdot z29.s, z17.b, z5.b[1]\n"
+ "sdot z30.s, z17.b, z6.b[1]\n"
+ "sdot z31.s, z17.b, z7.b[1]\n"
+ "b.eq 3f\n"
+ "4:\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "mov z24.s, #0\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "mov z25.s, #0\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "mov z26.s, #0\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "ld1b z21.b, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "mov z27.s, #0\n"
+ "ld1b z22.b, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "ld1b z23.b, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "mov z28.s, #0\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "mov z29.s, #0\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2]\n"
+ "sdot z24.s, z16.b, z0.b[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.s, #0\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3]\n"
+ "sdot z25.s, z16.b, z1.b[0]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4]\n"
+ "sdot z26.s, z16.b, z2.b[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.s, #0\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5]\n"
+ "sdot z27.s, z16.b, z3.b[0]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6]\n"
+ "sdot z28.s, z16.b, z4.b[0]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7]\n"
+ "sdot z24.s, z17.b, z0.b[1]\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "sdot z29.s, z16.b, z5.b[0]\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "sdot z30.s, z16.b, z6.b[0]\n"
+ "sdot z31.s, z16.b, z7.b[0]\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "sdot z25.s, z17.b, z1.b[1]\n"
+ "sdot z26.s, z17.b, z2.b[1]\n"
+ "sdot z27.s, z17.b, z3.b[1]\n"
+ "sdot z28.s, z17.b, z4.b[1]\n"
+ "sdot z29.s, z17.b, z5.b[1]\n"
+ "sdot z30.s, z17.b, z6.b[1]\n"
+ "sdot z31.s, z17.b, z7.b[1]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "sdot z24.s, z18.b, z0.b[2]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #2\n"
+ "sdot z25.s, z18.b, z1.b[2]\n"
+ "sdot z26.s, z18.b, z2.b[2]\n"
+ "sdot z27.s, z18.b, z3.b[2]\n"
+ "sdot z28.s, z18.b, z4.b[2]\n"
+ "sdot z29.s, z18.b, z5.b[2]\n"
+ "sdot z30.s, z18.b, z6.b[2]\n"
+ "sdot z31.s, z18.b, z7.b[2]\n"
+ "sdot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0], #0x10]\n"
+ "sdot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1, #0x10]\n"
+ "sdot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2, #0x10]\n"
+ "sdot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3, #0x10]\n"
+ "sdot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4, #0x10]\n"
+ "sdot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5, #0x10]\n"
+ "sdot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6, #0x10]\n"
+ "sdot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7, #0x10]\n"
+ "sdot z24.s, z20.b, z0.b[0]\n"
+ "sdot z25.s, z20.b, z1.b[0]\n"
+ "sdot z26.s, z20.b, z2.b[0]\n"
+ "sdot z27.s, z20.b, z3.b[0]\n"
+ "sdot z28.s, z20.b, z4.b[0]\n"
+ "sdot z29.s, z20.b, z5.b[0]\n"
+ "sdot z30.s, z20.b, z6.b[0]\n"
+ "sdot z31.s, z20.b, z7.b[0]\n"
+ "sdot z24.s, z21.b, z0.b[1]\n"
+ "sdot z25.s, z21.b, z1.b[1]\n"
+ "sdot z26.s, z21.b, z2.b[1]\n"
+ "sdot z27.s, z21.b, z3.b[1]\n"
+ "sdot z28.s, z21.b, z4.b[1]\n"
+ "sdot z29.s, z21.b, z5.b[1]\n"
+ "sdot z30.s, z21.b, z6.b[1]\n"
+ "sdot z31.s, z21.b, z7.b[1]\n"
+ "sdot z24.s, z22.b, z0.b[2]\n"
+ "sdot z25.s, z22.b, z1.b[2]\n"
+ "sdot z26.s, z22.b, z2.b[2]\n"
+ "sdot z27.s, z22.b, z3.b[2]\n"
+ "sdot z28.s, z22.b, z4.b[2]\n"
+ "sdot z29.s, z22.b, z5.b[2]\n"
+ "sdot z30.s, z22.b, z6.b[2]\n"
+ "sdot z31.s, z22.b, z7.b[2]\n"
+ "sdot z24.s, z23.b, z0.b[3]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x20]\n"
+ "sdot z25.s, z23.b, z1.b[3]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1, #0x20]\n"
+ "sdot z26.s, z23.b, z2.b[3]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2, #0x20]\n"
+ "sdot z27.s, z23.b, z3.b[3]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3, #0x20]\n"
+ "sdot z28.s, z23.b, z4.b[3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4, #0x20]\n"
+ "sdot z29.s, z23.b, z5.b[3]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5, #0x20]\n"
+ "sdot z30.s, z23.b, z6.b[3]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6, #0x20]\n"
+ "sdot z31.s, z23.b, z7.b[3]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7, #0x20]\n"
+ "sdot z24.s, z16.b, z0.b[0]\n"
+ "sdot z25.s, z16.b, z1.b[0]\n"
+ "sdot z26.s, z16.b, z2.b[0]\n"
+ "sdot z27.s, z16.b, z3.b[0]\n"
+ "sdot z28.s, z16.b, z4.b[0]\n"
+ "sdot z29.s, z16.b, z5.b[0]\n"
+ "sdot z30.s, z16.b, z6.b[0]\n"
+ "sdot z31.s, z16.b, z7.b[0]\n"
+ "sdot z24.s, z17.b, z0.b[1]\n"
+ "sdot z25.s, z17.b, z1.b[1]\n"
+ "sdot z26.s, z17.b, z2.b[1]\n"
+ "sdot z27.s, z17.b, z3.b[1]\n"
+ "sdot z28.s, z17.b, z4.b[1]\n"
+ "sdot z29.s, z17.b, z5.b[1]\n"
+ "sdot z30.s, z17.b, z6.b[1]\n"
+ "sdot z31.s, z17.b, z7.b[1]\n"
+ "b.ne 4b\n"
+ "3:\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "mov z24.s, #0\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "mov z25.s, #0\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "mov z26.s, #0\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "ld1b z21.b, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "mov z27.s, #0\n"
+ "ld1b z22.b, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "ld1b z23.b, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "mov z28.s, #0\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "mov z29.s, #0\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2]\n"
+ "sdot z24.s, z16.b, z0.b[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.s, #0\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3]\n"
+ "sdot z25.s, z16.b, z1.b[0]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4]\n"
+ "sdot z26.s, z16.b, z2.b[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.s, #0\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5]\n"
+ "sdot z27.s, z16.b, z3.b[0]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6]\n"
+ "sdot z28.s, z16.b, z4.b[0]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7]\n"
+ "sdot z24.s, z17.b, z0.b[1]\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "sdot z29.s, z16.b, z5.b[0]\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "sdot z30.s, z16.b, z6.b[0]\n"
+ "sdot z31.s, z16.b, z7.b[0]\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "sdot z25.s, z17.b, z1.b[1]\n"
+ "sdot z26.s, z17.b, z2.b[1]\n"
+ "sdot z27.s, z17.b, z3.b[1]\n"
+ "sdot z28.s, z17.b, z4.b[1]\n"
+ "sdot z29.s, z17.b, z5.b[1]\n"
+ "sdot z30.s, z17.b, z6.b[1]\n"
+ "sdot z31.s, z17.b, z7.b[1]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "sdot z24.s, z18.b, z0.b[2]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #2\n"
+ "sdot z25.s, z18.b, z1.b[2]\n"
+ "sdot z26.s, z18.b, z2.b[2]\n"
+ "sdot z27.s, z18.b, z3.b[2]\n"
+ "sdot z28.s, z18.b, z4.b[2]\n"
+ "sdot z29.s, z18.b, z5.b[2]\n"
+ "sdot z30.s, z18.b, z6.b[2]\n"
+ "sdot z31.s, z18.b, z7.b[2]\n"
+ "sdot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0], #0x10]\n"
+ "sdot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1, #0x10]\n"
+ "sdot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2, #0x10]\n"
+ "sdot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3, #0x10]\n"
+ "sdot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4, #0x10]\n"
+ "sdot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5, #0x10]\n"
+ "sdot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6, #0x10]\n"
+ "sdot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7, #0x10]\n"
+ "sdot z24.s, z20.b, z0.b[0]\n"
+ "sdot z25.s, z20.b, z1.b[0]\n"
+ "sdot z26.s, z20.b, z2.b[0]\n"
+ "sdot z27.s, z20.b, z3.b[0]\n"
+ "sdot z28.s, z20.b, z4.b[0]\n"
+ "sdot z29.s, z20.b, z5.b[0]\n"
+ "sdot z30.s, z20.b, z6.b[0]\n"
+ "sdot z31.s, z20.b, z7.b[0]\n"
+ "sdot z24.s, z21.b, z0.b[1]\n"
+ "sdot z25.s, z21.b, z1.b[1]\n"
+ "sdot z26.s, z21.b, z2.b[1]\n"
+ "sdot z27.s, z21.b, z3.b[1]\n"
+ "sdot z28.s, z21.b, z4.b[1]\n"
+ "sdot z29.s, z21.b, z5.b[1]\n"
+ "sdot z30.s, z21.b, z6.b[1]\n"
+ "sdot z31.s, z21.b, z7.b[1]\n"
+ "sdot z24.s, z22.b, z0.b[2]\n"
+ "sdot z25.s, z22.b, z1.b[2]\n"
+ "sdot z26.s, z22.b, z2.b[2]\n"
+ "sdot z27.s, z22.b, z3.b[2]\n"
+ "sdot z28.s, z22.b, z4.b[2]\n"
+ "sdot z29.s, z22.b, z5.b[2]\n"
+ "sdot z30.s, z22.b, z6.b[2]\n"
+ "sdot z31.s, z22.b, z7.b[2]\n"
+ "sdot z24.s, z23.b, z0.b[3]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x20]\n"
+ "sdot z25.s, z23.b, z1.b[3]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1, #0x20]\n"
+ "sdot z26.s, z23.b, z2.b[3]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2, #0x20]\n"
+ "sdot z27.s, z23.b, z3.b[3]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3, #0x20]\n"
+ "sdot z28.s, z23.b, z4.b[3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4, #0x20]\n"
+ "sdot z29.s, z23.b, z5.b[3]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5, #0x20]\n"
+ "sdot z30.s, z23.b, z6.b[3]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6, #0x20]\n"
+ "sdot z31.s, z23.b, z7.b[3]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7, #0x20]\n"
+ "sdot z24.s, z16.b, z0.b[0]\n"
+ "sdot z25.s, z16.b, z1.b[0]\n"
+ "sdot z26.s, z16.b, z2.b[0]\n"
+ "sdot z27.s, z16.b, z3.b[0]\n"
+ "sdot z28.s, z16.b, z4.b[0]\n"
+ "sdot z29.s, z16.b, z5.b[0]\n"
+ "sdot z30.s, z16.b, z6.b[0]\n"
+ "sdot z31.s, z16.b, z7.b[0]\n"
+ "sdot z24.s, z17.b, z0.b[1]\n"
+ "sdot z25.s, z17.b, z1.b[1]\n"
+ "sdot z26.s, z17.b, z2.b[1]\n"
+ "sdot z27.s, z17.b, z3.b[1]\n"
+ "sdot z28.s, z17.b, z4.b[1]\n"
+ "sdot z29.s, z17.b, z5.b[1]\n"
+ "sdot z30.s, z17.b, z6.b[1]\n"
+ "sdot z31.s, z17.b, z7.b[1]\n"
+ "b 5f\n"
+ "2:\n"
+ "mov z24.s, #0\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
+ "mov z25.s, #0\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1]\n"
+ "mov z26.s, #0\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2]\n"
+ "mov z27.s, #0\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3]\n"
+ "mov z28.s, #0\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4]\n"
+ "mov z29.s, #0\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5]\n"
+ "mov z30.s, #0\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6]\n"
+ "mov z31.s, #0\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7]\n"
+ "sdot z24.s, z16.b, z0.b[0]\n"
+ "sdot z25.s, z16.b, z1.b[0]\n"
+ "sdot z26.s, z16.b, z2.b[0]\n"
+ "sdot z27.s, z16.b, z3.b[0]\n"
+ "sdot z28.s, z16.b, z4.b[0]\n"
+ "sdot z29.s, z16.b, z5.b[0]\n"
+ "sdot z30.s, z16.b, z6.b[0]\n"
+ "sdot z31.s, z16.b, z7.b[0]\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "sdot z24.s, z17.b, z0.b[1]\n"
+ "sdot z25.s, z17.b, z1.b[1]\n"
+ "sdot z26.s, z17.b, z2.b[1]\n"
+ "sdot z27.s, z17.b, z3.b[1]\n"
+ "sdot z28.s, z17.b, z4.b[1]\n"
+ "sdot z29.s, z17.b, z5.b[1]\n"
+ "sdot z30.s, z17.b, z6.b[1]\n"
+ "sdot z31.s, z17.b, z7.b[1]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "sdot z24.s, z18.b, z0.b[2]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #2\n"
+ "sdot z25.s, z18.b, z1.b[2]\n"
+ "sdot z26.s, z18.b, z2.b[2]\n"
+ "sdot z27.s, z18.b, z3.b[2]\n"
+ "sdot z28.s, z18.b, z4.b[2]\n"
+ "sdot z29.s, z18.b, z5.b[2]\n"
+ "sdot z30.s, z18.b, z6.b[2]\n"
+ "sdot z31.s, z18.b, z7.b[2]\n"
+ "sdot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0], #0x10]\n"
+ "sdot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1, #0x10]\n"
+ "sdot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2, #0x10]\n"
+ "sdot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3, #0x10]\n"
+ "sdot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4, #0x10]\n"
+ "sdot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5, #0x10]\n"
+ "sdot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6, #0x10]\n"
+ "sdot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7, #0x10]\n"
+ "sdot z24.s, z20.b, z0.b[0]\n"
+ "sdot z25.s, z20.b, z1.b[0]\n"
+ "sdot z26.s, z20.b, z2.b[0]\n"
+ "sdot z27.s, z20.b, z3.b[0]\n"
+ "sdot z28.s, z20.b, z4.b[0]\n"
+ "sdot z29.s, z20.b, z5.b[0]\n"
+ "sdot z30.s, z20.b, z6.b[0]\n"
+ "sdot z31.s, z20.b, z7.b[0]\n"
+ "sdot z24.s, z21.b, z0.b[1]\n"
+ "sdot z25.s, z21.b, z1.b[1]\n"
+ "sdot z26.s, z21.b, z2.b[1]\n"
+ "sdot z27.s, z21.b, z3.b[1]\n"
+ "sdot z28.s, z21.b, z4.b[1]\n"
+ "sdot z29.s, z21.b, z5.b[1]\n"
+ "sdot z30.s, z21.b, z6.b[1]\n"
+ "sdot z31.s, z21.b, z7.b[1]\n"
+ "sdot z24.s, z22.b, z0.b[2]\n"
+ "sdot z25.s, z22.b, z1.b[2]\n"
+ "sdot z26.s, z22.b, z2.b[2]\n"
+ "sdot z27.s, z22.b, z3.b[2]\n"
+ "sdot z28.s, z22.b, z4.b[2]\n"
+ "sdot z29.s, z22.b, z5.b[2]\n"
+ "sdot z30.s, z22.b, z6.b[2]\n"
+ "sdot z31.s, z22.b, z7.b[2]\n"
+ "sdot z24.s, z23.b, z0.b[3]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x20]\n"
+ "sdot z25.s, z23.b, z1.b[3]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1, #0x20]\n"
+ "sdot z26.s, z23.b, z2.b[3]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2, #0x20]\n"
+ "sdot z27.s, z23.b, z3.b[3]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3, #0x20]\n"
+ "sdot z28.s, z23.b, z4.b[3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4, #0x20]\n"
+ "sdot z29.s, z23.b, z5.b[3]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5, #0x20]\n"
+ "sdot z30.s, z23.b, z6.b[3]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6, #0x20]\n"
+ "sdot z31.s, z23.b, z7.b[3]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7, #0x20]\n"
+ "sdot z24.s, z16.b, z0.b[0]\n"
+ "sdot z25.s, z16.b, z1.b[0]\n"
+ "sdot z26.s, z16.b, z2.b[0]\n"
+ "sdot z27.s, z16.b, z3.b[0]\n"
+ "sdot z28.s, z16.b, z4.b[0]\n"
+ "sdot z29.s, z16.b, z5.b[0]\n"
+ "sdot z30.s, z16.b, z6.b[0]\n"
+ "sdot z31.s, z16.b, z7.b[0]\n"
+ "sdot z24.s, z17.b, z0.b[1]\n"
+ "sdot z25.s, z17.b, z1.b[1]\n"
+ "sdot z26.s, z17.b, z2.b[1]\n"
+ "sdot z27.s, z17.b, z3.b[1]\n"
+ "sdot z28.s, z17.b, z4.b[1]\n"
+ "sdot z29.s, z17.b, z5.b[1]\n"
+ "sdot z30.s, z17.b, z6.b[1]\n"
+ "sdot z31.s, z17.b, z7.b[1]\n"
+ "5:\n"
+ "st1w z24.s, p0, [%[c_ptr0]]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "st1w z25.s, p0, [c_ptr1]\n"
+ "st1w z26.s, p0, [c_ptr2]\n"
+ "st1w z27.s, p0, [c_ptr3]\n"
+ "st1w z28.s, p0, [c_ptr4]\n"
+ "st1w z29.s, p0, [c_ptr5]\n"
+ "st1w z30.s, p0, [c_ptr6]\n"
+ "st1w z31.s, p0, [c_ptr7]\n"
+ ".unreq a_ptr1\n"
+ ".unreq a_ptr2\n"
+ ".unreq a_ptr3\n"
+ ".unreq a_ptr4\n"
+ ".unreq a_ptr5\n"
+ ".unreq a_ptr6\n"
+ ".unreq a_ptr7\n"
+ ".unreq c_ptr1\n"
+ ".unreq c_ptr2\n"
+ ".unreq c_ptr3\n"
+ ".unreq c_ptr4\n"
+ ".unreq c_ptr5\n"
+ ".unreq c_ptr6\n"
+ ".unreq c_ptr7\n"
+ : [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [oob_rows] "+r" (oob_rows), [temp] "+r" (temp), [odds] "+r" (odds)
+ : [lda] "r" (ldab), [ldc] "r" (ldcb), [odd_depth] "r" (odd_depth), [last_width] "r" (last_width)
+ : "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "cc", "memory"
+ );
+ break;
+ case 11:
+ __asm __volatile (
+ "a_ptr1 .req X0\n"
+ "a_ptr2 .req X1\n"
+ "a_ptr3 .req X2\n"
+ "a_ptr4 .req X3\n"
+ "a_ptr5 .req X4\n"
+ "a_ptr6 .req X5\n"
+ "a_ptr7 .req X6\n"
+ "c_ptr1 .req X7\n"
+ "c_ptr2 .req X8\n"
+ "c_ptr3 .req X9\n"
+ "c_ptr4 .req X10\n"
+ "c_ptr5 .req X11\n"
+ "c_ptr6 .req X12\n"
+ "c_ptr7 .req X13\n"
+ "add a_ptr1, %[a_ptr0], %[lda]\n"
+ "add c_ptr1, %[c_ptr0], %[ldc]\n"
+ "add a_ptr2, a_ptr1, %[lda]\n"
+ "add c_ptr2, c_ptr1, %[ldc]\n"
+ "add a_ptr3, a_ptr2, %[lda]\n"
+ "add c_ptr3, c_ptr2, %[ldc]\n"
+ "add a_ptr4, a_ptr3, %[lda]\n"
+ "add c_ptr4, c_ptr3, %[ldc]\n"
+ "add a_ptr5, a_ptr4, %[lda]\n"
+ "add c_ptr5, c_ptr4, %[ldc]\n"
+ "add a_ptr6, a_ptr5, %[lda]\n"
+ "add c_ptr6, c_ptr5, %[ldc]\n"
+ "add a_ptr7, a_ptr6, %[lda]\n"
+ "add c_ptr7, c_ptr6, %[ldc]\n"
+ "cbz %[oob_rows], 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr7, %[c_ptr0], #0x0\n"
+ "add a_ptr7, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr6, %[c_ptr0], #0x0\n"
+ "add a_ptr6, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr5, %[c_ptr0], #0x0\n"
+ "add a_ptr5, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr4, %[c_ptr0], #0x0\n"
+ "add a_ptr4, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr3, %[c_ptr0], #0x0\n"
+ "add a_ptr3, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr2, %[c_ptr0], #0x0\n"
+ "add a_ptr2, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr1, %[c_ptr0], #0x0\n"
+ "add a_ptr1, %[a_ptr0], #0x0\n"
+ "1:\n"
+ "ptrue p7.b\n"
+ "whilelt p6.b, %[temp], %[odd_depth]\n"
+ "whilelt p0.s, %[temp], %[last_width]\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "ld1b z21.b, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "ld1b z22.b, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "ld1b z23.b, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "cbz %[loops], 2f\n"
+ "mov z24.s, #0\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
+ "mov z25.s, #0\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1]\n"
+ "mov z26.s, #0\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2]\n"
+ "mov z27.s, #0\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3]\n"
+ "mov z28.s, #0\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4]\n"
+ "mov z29.s, #0\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5]\n"
+ "mov z30.s, #0\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6]\n"
+ "mov z31.s, #0\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7]\n"
+ "sdot z24.s, z16.b, z0.b[0]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "sdot z25.s, z16.b, z1.b[0]\n"
+ "sdot z26.s, z16.b, z2.b[0]\n"
+ "sdot z27.s, z16.b, z3.b[0]\n"
+ "sdot z28.s, z16.b, z4.b[0]\n"
+ "sdot z29.s, z16.b, z5.b[0]\n"
+ "sdot z30.s, z16.b, z6.b[0]\n"
+ "sdot z31.s, z16.b, z7.b[0]\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "sdot z24.s, z17.b, z0.b[1]\n"
+ "sdot z25.s, z17.b, z1.b[1]\n"
+ "sdot z26.s, z17.b, z2.b[1]\n"
+ "sdot z27.s, z17.b, z3.b[1]\n"
+ "sdot z28.s, z17.b, z4.b[1]\n"
+ "sdot z29.s, z17.b, z5.b[1]\n"
+ "sdot z30.s, z17.b, z6.b[1]\n"
+ "sdot z31.s, z17.b, z7.b[1]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "sdot z24.s, z18.b, z0.b[2]\n"
+ "sdot z25.s, z18.b, z1.b[2]\n"
+ "sdot z26.s, z18.b, z2.b[2]\n"
+ "sdot z27.s, z18.b, z3.b[2]\n"
+ "sdot z28.s, z18.b, z4.b[2]\n"
+ "sdot z29.s, z18.b, z5.b[2]\n"
+ "sdot z30.s, z18.b, z6.b[2]\n"
+ "sdot z31.s, z18.b, z7.b[2]\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "sdot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0], #0x10]\n"
+ "sdot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1, #0x10]\n"
+ "sdot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2, #0x10]\n"
+ "sdot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3, #0x10]\n"
+ "sdot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4, #0x10]\n"
+ "sdot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5, #0x10]\n"
+ "sdot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6, #0x10]\n"
+ "sdot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7, #0x10]\n"
+ "sdot z24.s, z20.b, z0.b[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #3\n"
+ "sdot z25.s, z20.b, z1.b[0]\n"
+ "sdot z26.s, z20.b, z2.b[0]\n"
+ "sdot z27.s, z20.b, z3.b[0]\n"
+ "sdot z28.s, z20.b, z4.b[0]\n"
+ "sdot z29.s, z20.b, z5.b[0]\n"
+ "sdot z30.s, z20.b, z6.b[0]\n"
+ "sdot z31.s, z20.b, z7.b[0]\n"
+ "sdot z24.s, z21.b, z0.b[1]\n"
+ "sdot z25.s, z21.b, z1.b[1]\n"
+ "sdot z26.s, z21.b, z2.b[1]\n"
+ "sdot z27.s, z21.b, z3.b[1]\n"
+ "sdot z28.s, z21.b, z4.b[1]\n"
+ "sdot z29.s, z21.b, z5.b[1]\n"
+ "sdot z30.s, z21.b, z6.b[1]\n"
+ "sdot z31.s, z21.b, z7.b[1]\n"
+ "sdot z24.s, z22.b, z0.b[2]\n"
+ "sdot z25.s, z22.b, z1.b[2]\n"
+ "sdot z26.s, z22.b, z2.b[2]\n"
+ "sdot z27.s, z22.b, z3.b[2]\n"
+ "sdot z28.s, z22.b, z4.b[2]\n"
+ "sdot z29.s, z22.b, z5.b[2]\n"
+ "sdot z30.s, z22.b, z6.b[2]\n"
+ "sdot z31.s, z22.b, z7.b[2]\n"
+ "sdot z24.s, z23.b, z0.b[3]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x20]\n"
+ "sdot z25.s, z23.b, z1.b[3]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1, #0x20]\n"
+ "sdot z26.s, z23.b, z2.b[3]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2, #0x20]\n"
+ "sdot z27.s, z23.b, z3.b[3]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3, #0x20]\n"
+ "sdot z28.s, z23.b, z4.b[3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4, #0x20]\n"
+ "sdot z29.s, z23.b, z5.b[3]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5, #0x20]\n"
+ "sdot z30.s, z23.b, z6.b[3]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6, #0x20]\n"
+ "sdot z31.s, z23.b, z7.b[3]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7, #0x20]\n"
+ "sdot z24.s, z16.b, z0.b[0]\n"
+ "sdot z25.s, z16.b, z1.b[0]\n"
+ "sdot z26.s, z16.b, z2.b[0]\n"
+ "sdot z27.s, z16.b, z3.b[0]\n"
+ "sdot z28.s, z16.b, z4.b[0]\n"
+ "sdot z29.s, z16.b, z5.b[0]\n"
+ "sdot z30.s, z16.b, z6.b[0]\n"
+ "sdot z31.s, z16.b, z7.b[0]\n"
+ "sdot z24.s, z17.b, z0.b[1]\n"
+ "sdot z25.s, z17.b, z1.b[1]\n"
+ "sdot z26.s, z17.b, z2.b[1]\n"
+ "sdot z27.s, z17.b, z3.b[1]\n"
+ "sdot z28.s, z17.b, z4.b[1]\n"
+ "sdot z29.s, z17.b, z5.b[1]\n"
+ "sdot z30.s, z17.b, z6.b[1]\n"
+ "sdot z31.s, z17.b, z7.b[1]\n"
+ "sdot z24.s, z18.b, z0.b[2]\n"
+ "sdot z25.s, z18.b, z1.b[2]\n"
+ "sdot z26.s, z18.b, z2.b[2]\n"
+ "sdot z27.s, z18.b, z3.b[2]\n"
+ "sdot z28.s, z18.b, z4.b[2]\n"
+ "sdot z29.s, z18.b, z5.b[2]\n"
+ "sdot z30.s, z18.b, z6.b[2]\n"
+ "sdot z31.s, z18.b, z7.b[2]\n"
+ "b.eq 3f\n"
+ "4:\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "mov z24.s, #0\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "mov z25.s, #0\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "mov z26.s, #0\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "ld1b z21.b, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "mov z27.s, #0\n"
+ "ld1b z22.b, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "ld1b z23.b, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "mov z28.s, #0\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "mov z29.s, #0\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2]\n"
+ "sdot z24.s, z16.b, z0.b[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.s, #0\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3]\n"
+ "sdot z25.s, z16.b, z1.b[0]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4]\n"
+ "sdot z26.s, z16.b, z2.b[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.s, #0\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5]\n"
+ "sdot z27.s, z16.b, z3.b[0]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6]\n"
+ "sdot z28.s, z16.b, z4.b[0]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7]\n"
+ "sdot z24.s, z17.b, z0.b[1]\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "sdot z29.s, z16.b, z5.b[0]\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "sdot z30.s, z16.b, z6.b[0]\n"
+ "sdot z31.s, z16.b, z7.b[0]\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "sdot z25.s, z17.b, z1.b[1]\n"
+ "sdot z26.s, z17.b, z2.b[1]\n"
+ "sdot z27.s, z17.b, z3.b[1]\n"
+ "sdot z28.s, z17.b, z4.b[1]\n"
+ "sdot z29.s, z17.b, z5.b[1]\n"
+ "sdot z30.s, z17.b, z6.b[1]\n"
+ "sdot z31.s, z17.b, z7.b[1]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "sdot z24.s, z18.b, z0.b[2]\n"
+ "sdot z25.s, z18.b, z1.b[2]\n"
+ "sdot z26.s, z18.b, z2.b[2]\n"
+ "sdot z27.s, z18.b, z3.b[2]\n"
+ "sdot z28.s, z18.b, z4.b[2]\n"
+ "sdot z29.s, z18.b, z5.b[2]\n"
+ "sdot z30.s, z18.b, z6.b[2]\n"
+ "sdot z31.s, z18.b, z7.b[2]\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "sdot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0], #0x10]\n"
+ "sdot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1, #0x10]\n"
+ "sdot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2, #0x10]\n"
+ "sdot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3, #0x10]\n"
+ "sdot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4, #0x10]\n"
+ "sdot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5, #0x10]\n"
+ "sdot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6, #0x10]\n"
+ "sdot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7, #0x10]\n"
+ "sdot z24.s, z20.b, z0.b[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #3\n"
+ "sdot z25.s, z20.b, z1.b[0]\n"
+ "sdot z26.s, z20.b, z2.b[0]\n"
+ "sdot z27.s, z20.b, z3.b[0]\n"
+ "sdot z28.s, z20.b, z4.b[0]\n"
+ "sdot z29.s, z20.b, z5.b[0]\n"
+ "sdot z30.s, z20.b, z6.b[0]\n"
+ "sdot z31.s, z20.b, z7.b[0]\n"
+ "sdot z24.s, z21.b, z0.b[1]\n"
+ "sdot z25.s, z21.b, z1.b[1]\n"
+ "sdot z26.s, z21.b, z2.b[1]\n"
+ "sdot z27.s, z21.b, z3.b[1]\n"
+ "sdot z28.s, z21.b, z4.b[1]\n"
+ "sdot z29.s, z21.b, z5.b[1]\n"
+ "sdot z30.s, z21.b, z6.b[1]\n"
+ "sdot z31.s, z21.b, z7.b[1]\n"
+ "sdot z24.s, z22.b, z0.b[2]\n"
+ "sdot z25.s, z22.b, z1.b[2]\n"
+ "sdot z26.s, z22.b, z2.b[2]\n"
+ "sdot z27.s, z22.b, z3.b[2]\n"
+ "sdot z28.s, z22.b, z4.b[2]\n"
+ "sdot z29.s, z22.b, z5.b[2]\n"
+ "sdot z30.s, z22.b, z6.b[2]\n"
+ "sdot z31.s, z22.b, z7.b[2]\n"
+ "sdot z24.s, z23.b, z0.b[3]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x20]\n"
+ "sdot z25.s, z23.b, z1.b[3]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1, #0x20]\n"
+ "sdot z26.s, z23.b, z2.b[3]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2, #0x20]\n"
+ "sdot z27.s, z23.b, z3.b[3]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3, #0x20]\n"
+ "sdot z28.s, z23.b, z4.b[3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4, #0x20]\n"
+ "sdot z29.s, z23.b, z5.b[3]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5, #0x20]\n"
+ "sdot z30.s, z23.b, z6.b[3]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6, #0x20]\n"
+ "sdot z31.s, z23.b, z7.b[3]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7, #0x20]\n"
+ "sdot z24.s, z16.b, z0.b[0]\n"
+ "sdot z25.s, z16.b, z1.b[0]\n"
+ "sdot z26.s, z16.b, z2.b[0]\n"
+ "sdot z27.s, z16.b, z3.b[0]\n"
+ "sdot z28.s, z16.b, z4.b[0]\n"
+ "sdot z29.s, z16.b, z5.b[0]\n"
+ "sdot z30.s, z16.b, z6.b[0]\n"
+ "sdot z31.s, z16.b, z7.b[0]\n"
+ "sdot z24.s, z17.b, z0.b[1]\n"
+ "sdot z25.s, z17.b, z1.b[1]\n"
+ "sdot z26.s, z17.b, z2.b[1]\n"
+ "sdot z27.s, z17.b, z3.b[1]\n"
+ "sdot z28.s, z17.b, z4.b[1]\n"
+ "sdot z29.s, z17.b, z5.b[1]\n"
+ "sdot z30.s, z17.b, z6.b[1]\n"
+ "sdot z31.s, z17.b, z7.b[1]\n"
+ "sdot z24.s, z18.b, z0.b[2]\n"
+ "sdot z25.s, z18.b, z1.b[2]\n"
+ "sdot z26.s, z18.b, z2.b[2]\n"
+ "sdot z27.s, z18.b, z3.b[2]\n"
+ "sdot z28.s, z18.b, z4.b[2]\n"
+ "sdot z29.s, z18.b, z5.b[2]\n"
+ "sdot z30.s, z18.b, z6.b[2]\n"
+ "sdot z31.s, z18.b, z7.b[2]\n"
+ "b.ne 4b\n"
+ "3:\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "mov z24.s, #0\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "mov z25.s, #0\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "mov z26.s, #0\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "ld1b z21.b, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "mov z27.s, #0\n"
+ "ld1b z22.b, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "ld1b z23.b, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "mov z28.s, #0\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "mov z29.s, #0\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2]\n"
+ "sdot z24.s, z16.b, z0.b[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.s, #0\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3]\n"
+ "sdot z25.s, z16.b, z1.b[0]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4]\n"
+ "sdot z26.s, z16.b, z2.b[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.s, #0\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5]\n"
+ "sdot z27.s, z16.b, z3.b[0]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6]\n"
+ "sdot z28.s, z16.b, z4.b[0]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7]\n"
+ "sdot z24.s, z17.b, z0.b[1]\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "sdot z29.s, z16.b, z5.b[0]\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "sdot z30.s, z16.b, z6.b[0]\n"
+ "sdot z31.s, z16.b, z7.b[0]\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "sdot z25.s, z17.b, z1.b[1]\n"
+ "sdot z26.s, z17.b, z2.b[1]\n"
+ "sdot z27.s, z17.b, z3.b[1]\n"
+ "sdot z28.s, z17.b, z4.b[1]\n"
+ "sdot z29.s, z17.b, z5.b[1]\n"
+ "sdot z30.s, z17.b, z6.b[1]\n"
+ "sdot z31.s, z17.b, z7.b[1]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "sdot z24.s, z18.b, z0.b[2]\n"
+ "sdot z25.s, z18.b, z1.b[2]\n"
+ "sdot z26.s, z18.b, z2.b[2]\n"
+ "sdot z27.s, z18.b, z3.b[2]\n"
+ "sdot z28.s, z18.b, z4.b[2]\n"
+ "sdot z29.s, z18.b, z5.b[2]\n"
+ "sdot z30.s, z18.b, z6.b[2]\n"
+ "sdot z31.s, z18.b, z7.b[2]\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "sdot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0], #0x10]\n"
+ "sdot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1, #0x10]\n"
+ "sdot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2, #0x10]\n"
+ "sdot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3, #0x10]\n"
+ "sdot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4, #0x10]\n"
+ "sdot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5, #0x10]\n"
+ "sdot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6, #0x10]\n"
+ "sdot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7, #0x10]\n"
+ "sdot z24.s, z20.b, z0.b[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #3\n"
+ "sdot z25.s, z20.b, z1.b[0]\n"
+ "sdot z26.s, z20.b, z2.b[0]\n"
+ "sdot z27.s, z20.b, z3.b[0]\n"
+ "sdot z28.s, z20.b, z4.b[0]\n"
+ "sdot z29.s, z20.b, z5.b[0]\n"
+ "sdot z30.s, z20.b, z6.b[0]\n"
+ "sdot z31.s, z20.b, z7.b[0]\n"
+ "sdot z24.s, z21.b, z0.b[1]\n"
+ "sdot z25.s, z21.b, z1.b[1]\n"
+ "sdot z26.s, z21.b, z2.b[1]\n"
+ "sdot z27.s, z21.b, z3.b[1]\n"
+ "sdot z28.s, z21.b, z4.b[1]\n"
+ "sdot z29.s, z21.b, z5.b[1]\n"
+ "sdot z30.s, z21.b, z6.b[1]\n"
+ "sdot z31.s, z21.b, z7.b[1]\n"
+ "sdot z24.s, z22.b, z0.b[2]\n"
+ "sdot z25.s, z22.b, z1.b[2]\n"
+ "sdot z26.s, z22.b, z2.b[2]\n"
+ "sdot z27.s, z22.b, z3.b[2]\n"
+ "sdot z28.s, z22.b, z4.b[2]\n"
+ "sdot z29.s, z22.b, z5.b[2]\n"
+ "sdot z30.s, z22.b, z6.b[2]\n"
+ "sdot z31.s, z22.b, z7.b[2]\n"
+ "sdot z24.s, z23.b, z0.b[3]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x20]\n"
+ "sdot z25.s, z23.b, z1.b[3]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1, #0x20]\n"
+ "sdot z26.s, z23.b, z2.b[3]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2, #0x20]\n"
+ "sdot z27.s, z23.b, z3.b[3]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3, #0x20]\n"
+ "sdot z28.s, z23.b, z4.b[3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4, #0x20]\n"
+ "sdot z29.s, z23.b, z5.b[3]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5, #0x20]\n"
+ "sdot z30.s, z23.b, z6.b[3]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6, #0x20]\n"
+ "sdot z31.s, z23.b, z7.b[3]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7, #0x20]\n"
+ "sdot z24.s, z16.b, z0.b[0]\n"
+ "sdot z25.s, z16.b, z1.b[0]\n"
+ "sdot z26.s, z16.b, z2.b[0]\n"
+ "sdot z27.s, z16.b, z3.b[0]\n"
+ "sdot z28.s, z16.b, z4.b[0]\n"
+ "sdot z29.s, z16.b, z5.b[0]\n"
+ "sdot z30.s, z16.b, z6.b[0]\n"
+ "sdot z31.s, z16.b, z7.b[0]\n"
+ "sdot z24.s, z17.b, z0.b[1]\n"
+ "sdot z25.s, z17.b, z1.b[1]\n"
+ "sdot z26.s, z17.b, z2.b[1]\n"
+ "sdot z27.s, z17.b, z3.b[1]\n"
+ "sdot z28.s, z17.b, z4.b[1]\n"
+ "sdot z29.s, z17.b, z5.b[1]\n"
+ "sdot z30.s, z17.b, z6.b[1]\n"
+ "sdot z31.s, z17.b, z7.b[1]\n"
+ "sdot z24.s, z18.b, z0.b[2]\n"
+ "sdot z25.s, z18.b, z1.b[2]\n"
+ "sdot z26.s, z18.b, z2.b[2]\n"
+ "sdot z27.s, z18.b, z3.b[2]\n"
+ "sdot z28.s, z18.b, z4.b[2]\n"
+ "sdot z29.s, z18.b, z5.b[2]\n"
+ "sdot z30.s, z18.b, z6.b[2]\n"
+ "sdot z31.s, z18.b, z7.b[2]\n"
+ "b 5f\n"
+ "2:\n"
+ "mov z24.s, #0\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
+ "mov z25.s, #0\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1]\n"
+ "mov z26.s, #0\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2]\n"
+ "mov z27.s, #0\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3]\n"
+ "mov z28.s, #0\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4]\n"
+ "mov z29.s, #0\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5]\n"
+ "mov z30.s, #0\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6]\n"
+ "mov z31.s, #0\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7]\n"
+ "sdot z24.s, z16.b, z0.b[0]\n"
+ "sdot z25.s, z16.b, z1.b[0]\n"
+ "sdot z26.s, z16.b, z2.b[0]\n"
+ "sdot z27.s, z16.b, z3.b[0]\n"
+ "sdot z28.s, z16.b, z4.b[0]\n"
+ "sdot z29.s, z16.b, z5.b[0]\n"
+ "sdot z30.s, z16.b, z6.b[0]\n"
+ "sdot z31.s, z16.b, z7.b[0]\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "sdot z24.s, z17.b, z0.b[1]\n"
+ "sdot z25.s, z17.b, z1.b[1]\n"
+ "sdot z26.s, z17.b, z2.b[1]\n"
+ "sdot z27.s, z17.b, z3.b[1]\n"
+ "sdot z28.s, z17.b, z4.b[1]\n"
+ "sdot z29.s, z17.b, z5.b[1]\n"
+ "sdot z30.s, z17.b, z6.b[1]\n"
+ "sdot z31.s, z17.b, z7.b[1]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "sdot z24.s, z18.b, z0.b[2]\n"
+ "sdot z25.s, z18.b, z1.b[2]\n"
+ "sdot z26.s, z18.b, z2.b[2]\n"
+ "sdot z27.s, z18.b, z3.b[2]\n"
+ "sdot z28.s, z18.b, z4.b[2]\n"
+ "sdot z29.s, z18.b, z5.b[2]\n"
+ "sdot z30.s, z18.b, z6.b[2]\n"
+ "sdot z31.s, z18.b, z7.b[2]\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "sdot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0], #0x10]\n"
+ "sdot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1, #0x10]\n"
+ "sdot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2, #0x10]\n"
+ "sdot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3, #0x10]\n"
+ "sdot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4, #0x10]\n"
+ "sdot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5, #0x10]\n"
+ "sdot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6, #0x10]\n"
+ "sdot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7, #0x10]\n"
+ "sdot z24.s, z20.b, z0.b[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #3\n"
+ "sdot z25.s, z20.b, z1.b[0]\n"
+ "sdot z26.s, z20.b, z2.b[0]\n"
+ "sdot z27.s, z20.b, z3.b[0]\n"
+ "sdot z28.s, z20.b, z4.b[0]\n"
+ "sdot z29.s, z20.b, z5.b[0]\n"
+ "sdot z30.s, z20.b, z6.b[0]\n"
+ "sdot z31.s, z20.b, z7.b[0]\n"
+ "sdot z24.s, z21.b, z0.b[1]\n"
+ "sdot z25.s, z21.b, z1.b[1]\n"
+ "sdot z26.s, z21.b, z2.b[1]\n"
+ "sdot z27.s, z21.b, z3.b[1]\n"
+ "sdot z28.s, z21.b, z4.b[1]\n"
+ "sdot z29.s, z21.b, z5.b[1]\n"
+ "sdot z30.s, z21.b, z6.b[1]\n"
+ "sdot z31.s, z21.b, z7.b[1]\n"
+ "sdot z24.s, z22.b, z0.b[2]\n"
+ "sdot z25.s, z22.b, z1.b[2]\n"
+ "sdot z26.s, z22.b, z2.b[2]\n"
+ "sdot z27.s, z22.b, z3.b[2]\n"
+ "sdot z28.s, z22.b, z4.b[2]\n"
+ "sdot z29.s, z22.b, z5.b[2]\n"
+ "sdot z30.s, z22.b, z6.b[2]\n"
+ "sdot z31.s, z22.b, z7.b[2]\n"
+ "sdot z24.s, z23.b, z0.b[3]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x20]\n"
+ "sdot z25.s, z23.b, z1.b[3]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1, #0x20]\n"
+ "sdot z26.s, z23.b, z2.b[3]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2, #0x20]\n"
+ "sdot z27.s, z23.b, z3.b[3]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3, #0x20]\n"
+ "sdot z28.s, z23.b, z4.b[3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4, #0x20]\n"
+ "sdot z29.s, z23.b, z5.b[3]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5, #0x20]\n"
+ "sdot z30.s, z23.b, z6.b[3]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6, #0x20]\n"
+ "sdot z31.s, z23.b, z7.b[3]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7, #0x20]\n"
+ "sdot z24.s, z16.b, z0.b[0]\n"
+ "sdot z25.s, z16.b, z1.b[0]\n"
+ "sdot z26.s, z16.b, z2.b[0]\n"
+ "sdot z27.s, z16.b, z3.b[0]\n"
+ "sdot z28.s, z16.b, z4.b[0]\n"
+ "sdot z29.s, z16.b, z5.b[0]\n"
+ "sdot z30.s, z16.b, z6.b[0]\n"
+ "sdot z31.s, z16.b, z7.b[0]\n"
+ "sdot z24.s, z17.b, z0.b[1]\n"
+ "sdot z25.s, z17.b, z1.b[1]\n"
+ "sdot z26.s, z17.b, z2.b[1]\n"
+ "sdot z27.s, z17.b, z3.b[1]\n"
+ "sdot z28.s, z17.b, z4.b[1]\n"
+ "sdot z29.s, z17.b, z5.b[1]\n"
+ "sdot z30.s, z17.b, z6.b[1]\n"
+ "sdot z31.s, z17.b, z7.b[1]\n"
+ "sdot z24.s, z18.b, z0.b[2]\n"
+ "sdot z25.s, z18.b, z1.b[2]\n"
+ "sdot z26.s, z18.b, z2.b[2]\n"
+ "sdot z27.s, z18.b, z3.b[2]\n"
+ "sdot z28.s, z18.b, z4.b[2]\n"
+ "sdot z29.s, z18.b, z5.b[2]\n"
+ "sdot z30.s, z18.b, z6.b[2]\n"
+ "sdot z31.s, z18.b, z7.b[2]\n"
+ "5:\n"
+ "st1w z24.s, p0, [%[c_ptr0]]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "st1w z25.s, p0, [c_ptr1]\n"
+ "st1w z26.s, p0, [c_ptr2]\n"
+ "st1w z27.s, p0, [c_ptr3]\n"
+ "st1w z28.s, p0, [c_ptr4]\n"
+ "st1w z29.s, p0, [c_ptr5]\n"
+ "st1w z30.s, p0, [c_ptr6]\n"
+ "st1w z31.s, p0, [c_ptr7]\n"
+ ".unreq a_ptr1\n"
+ ".unreq a_ptr2\n"
+ ".unreq a_ptr3\n"
+ ".unreq a_ptr4\n"
+ ".unreq a_ptr5\n"
+ ".unreq a_ptr6\n"
+ ".unreq a_ptr7\n"
+ ".unreq c_ptr1\n"
+ ".unreq c_ptr2\n"
+ ".unreq c_ptr3\n"
+ ".unreq c_ptr4\n"
+ ".unreq c_ptr5\n"
+ ".unreq c_ptr6\n"
+ ".unreq c_ptr7\n"
+ : [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [oob_rows] "+r" (oob_rows), [temp] "+r" (temp), [odds] "+r" (odds)
+ : [lda] "r" (ldab), [ldc] "r" (ldcb), [odd_depth] "r" (odd_depth), [last_width] "r" (last_width)
+ : "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "cc", "memory"
+ );
+ break;
+ case 12:
+ __asm __volatile (
+ "a_ptr1 .req X0\n"
+ "a_ptr2 .req X1\n"
+ "a_ptr3 .req X2\n"
+ "a_ptr4 .req X3\n"
+ "a_ptr5 .req X4\n"
+ "a_ptr6 .req X5\n"
+ "a_ptr7 .req X6\n"
+ "c_ptr1 .req X7\n"
+ "c_ptr2 .req X8\n"
+ "c_ptr3 .req X9\n"
+ "c_ptr4 .req X10\n"
+ "c_ptr5 .req X11\n"
+ "c_ptr6 .req X12\n"
+ "c_ptr7 .req X13\n"
+ "add a_ptr1, %[a_ptr0], %[lda]\n"
+ "add c_ptr1, %[c_ptr0], %[ldc]\n"
+ "add a_ptr2, a_ptr1, %[lda]\n"
+ "add c_ptr2, c_ptr1, %[ldc]\n"
+ "add a_ptr3, a_ptr2, %[lda]\n"
+ "add c_ptr3, c_ptr2, %[ldc]\n"
+ "add a_ptr4, a_ptr3, %[lda]\n"
+ "add c_ptr4, c_ptr3, %[ldc]\n"
+ "add a_ptr5, a_ptr4, %[lda]\n"
+ "add c_ptr5, c_ptr4, %[ldc]\n"
+ "add a_ptr6, a_ptr5, %[lda]\n"
+ "add c_ptr6, c_ptr5, %[ldc]\n"
+ "add a_ptr7, a_ptr6, %[lda]\n"
+ "add c_ptr7, c_ptr6, %[ldc]\n"
+ "cbz %[oob_rows], 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr7, %[c_ptr0], #0x0\n"
+ "add a_ptr7, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr6, %[c_ptr0], #0x0\n"
+ "add a_ptr6, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr5, %[c_ptr0], #0x0\n"
+ "add a_ptr5, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr4, %[c_ptr0], #0x0\n"
+ "add a_ptr4, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr3, %[c_ptr0], #0x0\n"
+ "add a_ptr3, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr2, %[c_ptr0], #0x0\n"
+ "add a_ptr2, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr1, %[c_ptr0], #0x0\n"
+ "add a_ptr1, %[a_ptr0], #0x0\n"
+ "1:\n"
+ "ptrue p7.b\n"
+ "whilelt p6.b, %[temp], %[odd_depth]\n"
+ "whilelt p0.s, %[temp], %[last_width]\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "ld1b z21.b, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "ld1b z22.b, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "ld1b z23.b, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "cbz %[loops], 2f\n"
+ "mov z24.s, #0\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
+ "mov z25.s, #0\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1]\n"
+ "mov z26.s, #0\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2]\n"
+ "mov z27.s, #0\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3]\n"
+ "mov z28.s, #0\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4]\n"
+ "mov z29.s, #0\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5]\n"
+ "mov z30.s, #0\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6]\n"
+ "mov z31.s, #0\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7]\n"
+ "sdot z24.s, z16.b, z0.b[0]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "sdot z25.s, z16.b, z1.b[0]\n"
+ "sdot z26.s, z16.b, z2.b[0]\n"
+ "sdot z27.s, z16.b, z3.b[0]\n"
+ "sdot z28.s, z16.b, z4.b[0]\n"
+ "sdot z29.s, z16.b, z5.b[0]\n"
+ "sdot z30.s, z16.b, z6.b[0]\n"
+ "sdot z31.s, z16.b, z7.b[0]\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "sdot z24.s, z17.b, z0.b[1]\n"
+ "sdot z25.s, z17.b, z1.b[1]\n"
+ "sdot z26.s, z17.b, z2.b[1]\n"
+ "sdot z27.s, z17.b, z3.b[1]\n"
+ "sdot z28.s, z17.b, z4.b[1]\n"
+ "sdot z29.s, z17.b, z5.b[1]\n"
+ "sdot z30.s, z17.b, z6.b[1]\n"
+ "sdot z31.s, z17.b, z7.b[1]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "sdot z24.s, z18.b, z0.b[2]\n"
+ "sdot z25.s, z18.b, z1.b[2]\n"
+ "sdot z26.s, z18.b, z2.b[2]\n"
+ "sdot z27.s, z18.b, z3.b[2]\n"
+ "sdot z28.s, z18.b, z4.b[2]\n"
+ "sdot z29.s, z18.b, z5.b[2]\n"
+ "sdot z30.s, z18.b, z6.b[2]\n"
+ "sdot z31.s, z18.b, z7.b[2]\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "sdot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0], #0x10]\n"
+ "sdot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1, #0x10]\n"
+ "sdot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2, #0x10]\n"
+ "sdot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3, #0x10]\n"
+ "sdot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4, #0x10]\n"
+ "sdot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5, #0x10]\n"
+ "sdot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6, #0x10]\n"
+ "sdot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7, #0x10]\n"
+ "sdot z24.s, z20.b, z0.b[0]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "sdot z25.s, z20.b, z1.b[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #4\n"
+ "sdot z26.s, z20.b, z2.b[0]\n"
+ "sdot z27.s, z20.b, z3.b[0]\n"
+ "sdot z28.s, z20.b, z4.b[0]\n"
+ "sdot z29.s, z20.b, z5.b[0]\n"
+ "sdot z30.s, z20.b, z6.b[0]\n"
+ "sdot z31.s, z20.b, z7.b[0]\n"
+ "sdot z24.s, z21.b, z0.b[1]\n"
+ "sdot z25.s, z21.b, z1.b[1]\n"
+ "sdot z26.s, z21.b, z2.b[1]\n"
+ "sdot z27.s, z21.b, z3.b[1]\n"
+ "sdot z28.s, z21.b, z4.b[1]\n"
+ "sdot z29.s, z21.b, z5.b[1]\n"
+ "sdot z30.s, z21.b, z6.b[1]\n"
+ "sdot z31.s, z21.b, z7.b[1]\n"
+ "sdot z24.s, z22.b, z0.b[2]\n"
+ "sdot z25.s, z22.b, z1.b[2]\n"
+ "sdot z26.s, z22.b, z2.b[2]\n"
+ "sdot z27.s, z22.b, z3.b[2]\n"
+ "sdot z28.s, z22.b, z4.b[2]\n"
+ "sdot z29.s, z22.b, z5.b[2]\n"
+ "sdot z30.s, z22.b, z6.b[2]\n"
+ "sdot z31.s, z22.b, z7.b[2]\n"
+ "sdot z24.s, z23.b, z0.b[3]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x20]\n"
+ "sdot z25.s, z23.b, z1.b[3]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1, #0x20]\n"
+ "sdot z26.s, z23.b, z2.b[3]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2, #0x20]\n"
+ "sdot z27.s, z23.b, z3.b[3]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3, #0x20]\n"
+ "sdot z28.s, z23.b, z4.b[3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4, #0x20]\n"
+ "sdot z29.s, z23.b, z5.b[3]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5, #0x20]\n"
+ "sdot z30.s, z23.b, z6.b[3]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6, #0x20]\n"
+ "sdot z31.s, z23.b, z7.b[3]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7, #0x20]\n"
+ "sdot z24.s, z16.b, z0.b[0]\n"
+ "sdot z25.s, z16.b, z1.b[0]\n"
+ "sdot z26.s, z16.b, z2.b[0]\n"
+ "sdot z27.s, z16.b, z3.b[0]\n"
+ "sdot z28.s, z16.b, z4.b[0]\n"
+ "sdot z29.s, z16.b, z5.b[0]\n"
+ "sdot z30.s, z16.b, z6.b[0]\n"
+ "sdot z31.s, z16.b, z7.b[0]\n"
+ "sdot z24.s, z17.b, z0.b[1]\n"
+ "sdot z25.s, z17.b, z1.b[1]\n"
+ "sdot z26.s, z17.b, z2.b[1]\n"
+ "sdot z27.s, z17.b, z3.b[1]\n"
+ "sdot z28.s, z17.b, z4.b[1]\n"
+ "sdot z29.s, z17.b, z5.b[1]\n"
+ "sdot z30.s, z17.b, z6.b[1]\n"
+ "sdot z31.s, z17.b, z7.b[1]\n"
+ "sdot z24.s, z18.b, z0.b[2]\n"
+ "sdot z25.s, z18.b, z1.b[2]\n"
+ "sdot z26.s, z18.b, z2.b[2]\n"
+ "sdot z27.s, z18.b, z3.b[2]\n"
+ "sdot z28.s, z18.b, z4.b[2]\n"
+ "sdot z29.s, z18.b, z5.b[2]\n"
+ "sdot z30.s, z18.b, z6.b[2]\n"
+ "sdot z31.s, z18.b, z7.b[2]\n"
+ "sdot z24.s, z19.b, z0.b[3]\n"
+ "sdot z25.s, z19.b, z1.b[3]\n"
+ "sdot z26.s, z19.b, z2.b[3]\n"
+ "sdot z27.s, z19.b, z3.b[3]\n"
+ "sdot z28.s, z19.b, z4.b[3]\n"
+ "sdot z29.s, z19.b, z5.b[3]\n"
+ "sdot z30.s, z19.b, z6.b[3]\n"
+ "sdot z31.s, z19.b, z7.b[3]\n"
+ "b.eq 3f\n"
+ "4:\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "mov z24.s, #0\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "mov z25.s, #0\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "mov z26.s, #0\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "ld1b z21.b, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "mov z27.s, #0\n"
+ "ld1b z22.b, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "ld1b z23.b, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "mov z28.s, #0\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "mov z29.s, #0\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2]\n"
+ "sdot z24.s, z16.b, z0.b[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.s, #0\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3]\n"
+ "sdot z25.s, z16.b, z1.b[0]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4]\n"
+ "sdot z26.s, z16.b, z2.b[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.s, #0\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5]\n"
+ "sdot z27.s, z16.b, z3.b[0]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6]\n"
+ "sdot z28.s, z16.b, z4.b[0]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7]\n"
+ "sdot z24.s, z17.b, z0.b[1]\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "sdot z29.s, z16.b, z5.b[0]\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "sdot z30.s, z16.b, z6.b[0]\n"
+ "sdot z31.s, z16.b, z7.b[0]\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "sdot z25.s, z17.b, z1.b[1]\n"
+ "sdot z26.s, z17.b, z2.b[1]\n"
+ "sdot z27.s, z17.b, z3.b[1]\n"
+ "sdot z28.s, z17.b, z4.b[1]\n"
+ "sdot z29.s, z17.b, z5.b[1]\n"
+ "sdot z30.s, z17.b, z6.b[1]\n"
+ "sdot z31.s, z17.b, z7.b[1]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "sdot z24.s, z18.b, z0.b[2]\n"
+ "sdot z25.s, z18.b, z1.b[2]\n"
+ "sdot z26.s, z18.b, z2.b[2]\n"
+ "sdot z27.s, z18.b, z3.b[2]\n"
+ "sdot z28.s, z18.b, z4.b[2]\n"
+ "sdot z29.s, z18.b, z5.b[2]\n"
+ "sdot z30.s, z18.b, z6.b[2]\n"
+ "sdot z31.s, z18.b, z7.b[2]\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "sdot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0], #0x10]\n"
+ "sdot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1, #0x10]\n"
+ "sdot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2, #0x10]\n"
+ "sdot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3, #0x10]\n"
+ "sdot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4, #0x10]\n"
+ "sdot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5, #0x10]\n"
+ "sdot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6, #0x10]\n"
+ "sdot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7, #0x10]\n"
+ "sdot z24.s, z20.b, z0.b[0]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "sdot z25.s, z20.b, z1.b[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #4\n"
+ "sdot z26.s, z20.b, z2.b[0]\n"
+ "sdot z27.s, z20.b, z3.b[0]\n"
+ "sdot z28.s, z20.b, z4.b[0]\n"
+ "sdot z29.s, z20.b, z5.b[0]\n"
+ "sdot z30.s, z20.b, z6.b[0]\n"
+ "sdot z31.s, z20.b, z7.b[0]\n"
+ "sdot z24.s, z21.b, z0.b[1]\n"
+ "sdot z25.s, z21.b, z1.b[1]\n"
+ "sdot z26.s, z21.b, z2.b[1]\n"
+ "sdot z27.s, z21.b, z3.b[1]\n"
+ "sdot z28.s, z21.b, z4.b[1]\n"
+ "sdot z29.s, z21.b, z5.b[1]\n"
+ "sdot z30.s, z21.b, z6.b[1]\n"
+ "sdot z31.s, z21.b, z7.b[1]\n"
+ "sdot z24.s, z22.b, z0.b[2]\n"
+ "sdot z25.s, z22.b, z1.b[2]\n"
+ "sdot z26.s, z22.b, z2.b[2]\n"
+ "sdot z27.s, z22.b, z3.b[2]\n"
+ "sdot z28.s, z22.b, z4.b[2]\n"
+ "sdot z29.s, z22.b, z5.b[2]\n"
+ "sdot z30.s, z22.b, z6.b[2]\n"
+ "sdot z31.s, z22.b, z7.b[2]\n"
+ "sdot z24.s, z23.b, z0.b[3]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x20]\n"
+ "sdot z25.s, z23.b, z1.b[3]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1, #0x20]\n"
+ "sdot z26.s, z23.b, z2.b[3]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2, #0x20]\n"
+ "sdot z27.s, z23.b, z3.b[3]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3, #0x20]\n"
+ "sdot z28.s, z23.b, z4.b[3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4, #0x20]\n"
+ "sdot z29.s, z23.b, z5.b[3]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5, #0x20]\n"
+ "sdot z30.s, z23.b, z6.b[3]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6, #0x20]\n"
+ "sdot z31.s, z23.b, z7.b[3]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7, #0x20]\n"
+ "sdot z24.s, z16.b, z0.b[0]\n"
+ "sdot z25.s, z16.b, z1.b[0]\n"
+ "sdot z26.s, z16.b, z2.b[0]\n"
+ "sdot z27.s, z16.b, z3.b[0]\n"
+ "sdot z28.s, z16.b, z4.b[0]\n"
+ "sdot z29.s, z16.b, z5.b[0]\n"
+ "sdot z30.s, z16.b, z6.b[0]\n"
+ "sdot z31.s, z16.b, z7.b[0]\n"
+ "sdot z24.s, z17.b, z0.b[1]\n"
+ "sdot z25.s, z17.b, z1.b[1]\n"
+ "sdot z26.s, z17.b, z2.b[1]\n"
+ "sdot z27.s, z17.b, z3.b[1]\n"
+ "sdot z28.s, z17.b, z4.b[1]\n"
+ "sdot z29.s, z17.b, z5.b[1]\n"
+ "sdot z30.s, z17.b, z6.b[1]\n"
+ "sdot z31.s, z17.b, z7.b[1]\n"
+ "sdot z24.s, z18.b, z0.b[2]\n"
+ "sdot z25.s, z18.b, z1.b[2]\n"
+ "sdot z26.s, z18.b, z2.b[2]\n"
+ "sdot z27.s, z18.b, z3.b[2]\n"
+ "sdot z28.s, z18.b, z4.b[2]\n"
+ "sdot z29.s, z18.b, z5.b[2]\n"
+ "sdot z30.s, z18.b, z6.b[2]\n"
+ "sdot z31.s, z18.b, z7.b[2]\n"
+ "sdot z24.s, z19.b, z0.b[3]\n"
+ "sdot z25.s, z19.b, z1.b[3]\n"
+ "sdot z26.s, z19.b, z2.b[3]\n"
+ "sdot z27.s, z19.b, z3.b[3]\n"
+ "sdot z28.s, z19.b, z4.b[3]\n"
+ "sdot z29.s, z19.b, z5.b[3]\n"
+ "sdot z30.s, z19.b, z6.b[3]\n"
+ "sdot z31.s, z19.b, z7.b[3]\n"
+ "b.ne 4b\n"
+ "3:\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "mov z24.s, #0\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "mov z25.s, #0\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "mov z26.s, #0\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "ld1b z21.b, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "mov z27.s, #0\n"
+ "ld1b z22.b, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "ld1b z23.b, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "mov z28.s, #0\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "mov z29.s, #0\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2]\n"
+ "sdot z24.s, z16.b, z0.b[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.s, #0\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3]\n"
+ "sdot z25.s, z16.b, z1.b[0]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4]\n"
+ "sdot z26.s, z16.b, z2.b[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.s, #0\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5]\n"
+ "sdot z27.s, z16.b, z3.b[0]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6]\n"
+ "sdot z28.s, z16.b, z4.b[0]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7]\n"
+ "sdot z24.s, z17.b, z0.b[1]\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "sdot z29.s, z16.b, z5.b[0]\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "sdot z30.s, z16.b, z6.b[0]\n"
+ "sdot z31.s, z16.b, z7.b[0]\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "sdot z25.s, z17.b, z1.b[1]\n"
+ "sdot z26.s, z17.b, z2.b[1]\n"
+ "sdot z27.s, z17.b, z3.b[1]\n"
+ "sdot z28.s, z17.b, z4.b[1]\n"
+ "sdot z29.s, z17.b, z5.b[1]\n"
+ "sdot z30.s, z17.b, z6.b[1]\n"
+ "sdot z31.s, z17.b, z7.b[1]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "sdot z24.s, z18.b, z0.b[2]\n"
+ "sdot z25.s, z18.b, z1.b[2]\n"
+ "sdot z26.s, z18.b, z2.b[2]\n"
+ "sdot z27.s, z18.b, z3.b[2]\n"
+ "sdot z28.s, z18.b, z4.b[2]\n"
+ "sdot z29.s, z18.b, z5.b[2]\n"
+ "sdot z30.s, z18.b, z6.b[2]\n"
+ "sdot z31.s, z18.b, z7.b[2]\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "sdot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0], #0x10]\n"
+ "sdot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1, #0x10]\n"
+ "sdot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2, #0x10]\n"
+ "sdot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3, #0x10]\n"
+ "sdot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4, #0x10]\n"
+ "sdot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5, #0x10]\n"
+ "sdot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6, #0x10]\n"
+ "sdot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7, #0x10]\n"
+ "sdot z24.s, z20.b, z0.b[0]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "sdot z25.s, z20.b, z1.b[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #4\n"
+ "sdot z26.s, z20.b, z2.b[0]\n"
+ "sdot z27.s, z20.b, z3.b[0]\n"
+ "sdot z28.s, z20.b, z4.b[0]\n"
+ "sdot z29.s, z20.b, z5.b[0]\n"
+ "sdot z30.s, z20.b, z6.b[0]\n"
+ "sdot z31.s, z20.b, z7.b[0]\n"
+ "sdot z24.s, z21.b, z0.b[1]\n"
+ "sdot z25.s, z21.b, z1.b[1]\n"
+ "sdot z26.s, z21.b, z2.b[1]\n"
+ "sdot z27.s, z21.b, z3.b[1]\n"
+ "sdot z28.s, z21.b, z4.b[1]\n"
+ "sdot z29.s, z21.b, z5.b[1]\n"
+ "sdot z30.s, z21.b, z6.b[1]\n"
+ "sdot z31.s, z21.b, z7.b[1]\n"
+ "sdot z24.s, z22.b, z0.b[2]\n"
+ "sdot z25.s, z22.b, z1.b[2]\n"
+ "sdot z26.s, z22.b, z2.b[2]\n"
+ "sdot z27.s, z22.b, z3.b[2]\n"
+ "sdot z28.s, z22.b, z4.b[2]\n"
+ "sdot z29.s, z22.b, z5.b[2]\n"
+ "sdot z30.s, z22.b, z6.b[2]\n"
+ "sdot z31.s, z22.b, z7.b[2]\n"
+ "sdot z24.s, z23.b, z0.b[3]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x20]\n"
+ "sdot z25.s, z23.b, z1.b[3]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1, #0x20]\n"
+ "sdot z26.s, z23.b, z2.b[3]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2, #0x20]\n"
+ "sdot z27.s, z23.b, z3.b[3]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3, #0x20]\n"
+ "sdot z28.s, z23.b, z4.b[3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4, #0x20]\n"
+ "sdot z29.s, z23.b, z5.b[3]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5, #0x20]\n"
+ "sdot z30.s, z23.b, z6.b[3]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6, #0x20]\n"
+ "sdot z31.s, z23.b, z7.b[3]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7, #0x20]\n"
+ "sdot z24.s, z16.b, z0.b[0]\n"
+ "sdot z25.s, z16.b, z1.b[0]\n"
+ "sdot z26.s, z16.b, z2.b[0]\n"
+ "sdot z27.s, z16.b, z3.b[0]\n"
+ "sdot z28.s, z16.b, z4.b[0]\n"
+ "sdot z29.s, z16.b, z5.b[0]\n"
+ "sdot z30.s, z16.b, z6.b[0]\n"
+ "sdot z31.s, z16.b, z7.b[0]\n"
+ "sdot z24.s, z17.b, z0.b[1]\n"
+ "sdot z25.s, z17.b, z1.b[1]\n"
+ "sdot z26.s, z17.b, z2.b[1]\n"
+ "sdot z27.s, z17.b, z3.b[1]\n"
+ "sdot z28.s, z17.b, z4.b[1]\n"
+ "sdot z29.s, z17.b, z5.b[1]\n"
+ "sdot z30.s, z17.b, z6.b[1]\n"
+ "sdot z31.s, z17.b, z7.b[1]\n"
+ "sdot z24.s, z18.b, z0.b[2]\n"
+ "sdot z25.s, z18.b, z1.b[2]\n"
+ "sdot z26.s, z18.b, z2.b[2]\n"
+ "sdot z27.s, z18.b, z3.b[2]\n"
+ "sdot z28.s, z18.b, z4.b[2]\n"
+ "sdot z29.s, z18.b, z5.b[2]\n"
+ "sdot z30.s, z18.b, z6.b[2]\n"
+ "sdot z31.s, z18.b, z7.b[2]\n"
+ "sdot z24.s, z19.b, z0.b[3]\n"
+ "sdot z25.s, z19.b, z1.b[3]\n"
+ "sdot z26.s, z19.b, z2.b[3]\n"
+ "sdot z27.s, z19.b, z3.b[3]\n"
+ "sdot z28.s, z19.b, z4.b[3]\n"
+ "sdot z29.s, z19.b, z5.b[3]\n"
+ "sdot z30.s, z19.b, z6.b[3]\n"
+ "sdot z31.s, z19.b, z7.b[3]\n"
+ "b 5f\n"
+ "2:\n"
+ "mov z24.s, #0\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
+ "mov z25.s, #0\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1]\n"
+ "mov z26.s, #0\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2]\n"
+ "mov z27.s, #0\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3]\n"
+ "mov z28.s, #0\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4]\n"
+ "mov z29.s, #0\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5]\n"
+ "mov z30.s, #0\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6]\n"
+ "mov z31.s, #0\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7]\n"
+ "sdot z24.s, z16.b, z0.b[0]\n"
+ "sdot z25.s, z16.b, z1.b[0]\n"
+ "sdot z26.s, z16.b, z2.b[0]\n"
+ "sdot z27.s, z16.b, z3.b[0]\n"
+ "sdot z28.s, z16.b, z4.b[0]\n"
+ "sdot z29.s, z16.b, z5.b[0]\n"
+ "sdot z30.s, z16.b, z6.b[0]\n"
+ "sdot z31.s, z16.b, z7.b[0]\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "sdot z24.s, z17.b, z0.b[1]\n"
+ "sdot z25.s, z17.b, z1.b[1]\n"
+ "sdot z26.s, z17.b, z2.b[1]\n"
+ "sdot z27.s, z17.b, z3.b[1]\n"
+ "sdot z28.s, z17.b, z4.b[1]\n"
+ "sdot z29.s, z17.b, z5.b[1]\n"
+ "sdot z30.s, z17.b, z6.b[1]\n"
+ "sdot z31.s, z17.b, z7.b[1]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "sdot z24.s, z18.b, z0.b[2]\n"
+ "sdot z25.s, z18.b, z1.b[2]\n"
+ "sdot z26.s, z18.b, z2.b[2]\n"
+ "sdot z27.s, z18.b, z3.b[2]\n"
+ "sdot z28.s, z18.b, z4.b[2]\n"
+ "sdot z29.s, z18.b, z5.b[2]\n"
+ "sdot z30.s, z18.b, z6.b[2]\n"
+ "sdot z31.s, z18.b, z7.b[2]\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "sdot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0], #0x10]\n"
+ "sdot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1, #0x10]\n"
+ "sdot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2, #0x10]\n"
+ "sdot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3, #0x10]\n"
+ "sdot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4, #0x10]\n"
+ "sdot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5, #0x10]\n"
+ "sdot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6, #0x10]\n"
+ "sdot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7, #0x10]\n"
+ "sdot z24.s, z20.b, z0.b[0]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "sdot z25.s, z20.b, z1.b[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #4\n"
+ "sdot z26.s, z20.b, z2.b[0]\n"
+ "sdot z27.s, z20.b, z3.b[0]\n"
+ "sdot z28.s, z20.b, z4.b[0]\n"
+ "sdot z29.s, z20.b, z5.b[0]\n"
+ "sdot z30.s, z20.b, z6.b[0]\n"
+ "sdot z31.s, z20.b, z7.b[0]\n"
+ "sdot z24.s, z21.b, z0.b[1]\n"
+ "sdot z25.s, z21.b, z1.b[1]\n"
+ "sdot z26.s, z21.b, z2.b[1]\n"
+ "sdot z27.s, z21.b, z3.b[1]\n"
+ "sdot z28.s, z21.b, z4.b[1]\n"
+ "sdot z29.s, z21.b, z5.b[1]\n"
+ "sdot z30.s, z21.b, z6.b[1]\n"
+ "sdot z31.s, z21.b, z7.b[1]\n"
+ "sdot z24.s, z22.b, z0.b[2]\n"
+ "sdot z25.s, z22.b, z1.b[2]\n"
+ "sdot z26.s, z22.b, z2.b[2]\n"
+ "sdot z27.s, z22.b, z3.b[2]\n"
+ "sdot z28.s, z22.b, z4.b[2]\n"
+ "sdot z29.s, z22.b, z5.b[2]\n"
+ "sdot z30.s, z22.b, z6.b[2]\n"
+ "sdot z31.s, z22.b, z7.b[2]\n"
+ "sdot z24.s, z23.b, z0.b[3]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x20]\n"
+ "sdot z25.s, z23.b, z1.b[3]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1, #0x20]\n"
+ "sdot z26.s, z23.b, z2.b[3]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2, #0x20]\n"
+ "sdot z27.s, z23.b, z3.b[3]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3, #0x20]\n"
+ "sdot z28.s, z23.b, z4.b[3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4, #0x20]\n"
+ "sdot z29.s, z23.b, z5.b[3]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5, #0x20]\n"
+ "sdot z30.s, z23.b, z6.b[3]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6, #0x20]\n"
+ "sdot z31.s, z23.b, z7.b[3]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7, #0x20]\n"
+ "sdot z24.s, z16.b, z0.b[0]\n"
+ "sdot z25.s, z16.b, z1.b[0]\n"
+ "sdot z26.s, z16.b, z2.b[0]\n"
+ "sdot z27.s, z16.b, z3.b[0]\n"
+ "sdot z28.s, z16.b, z4.b[0]\n"
+ "sdot z29.s, z16.b, z5.b[0]\n"
+ "sdot z30.s, z16.b, z6.b[0]\n"
+ "sdot z31.s, z16.b, z7.b[0]\n"
+ "sdot z24.s, z17.b, z0.b[1]\n"
+ "sdot z25.s, z17.b, z1.b[1]\n"
+ "sdot z26.s, z17.b, z2.b[1]\n"
+ "sdot z27.s, z17.b, z3.b[1]\n"
+ "sdot z28.s, z17.b, z4.b[1]\n"
+ "sdot z29.s, z17.b, z5.b[1]\n"
+ "sdot z30.s, z17.b, z6.b[1]\n"
+ "sdot z31.s, z17.b, z7.b[1]\n"
+ "sdot z24.s, z18.b, z0.b[2]\n"
+ "sdot z25.s, z18.b, z1.b[2]\n"
+ "sdot z26.s, z18.b, z2.b[2]\n"
+ "sdot z27.s, z18.b, z3.b[2]\n"
+ "sdot z28.s, z18.b, z4.b[2]\n"
+ "sdot z29.s, z18.b, z5.b[2]\n"
+ "sdot z30.s, z18.b, z6.b[2]\n"
+ "sdot z31.s, z18.b, z7.b[2]\n"
+ "sdot z24.s, z19.b, z0.b[3]\n"
+ "sdot z25.s, z19.b, z1.b[3]\n"
+ "sdot z26.s, z19.b, z2.b[3]\n"
+ "sdot z27.s, z19.b, z3.b[3]\n"
+ "sdot z28.s, z19.b, z4.b[3]\n"
+ "sdot z29.s, z19.b, z5.b[3]\n"
+ "sdot z30.s, z19.b, z6.b[3]\n"
+ "sdot z31.s, z19.b, z7.b[3]\n"
+ "5:\n"
+ "st1w z24.s, p0, [%[c_ptr0]]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "st1w z25.s, p0, [c_ptr1]\n"
+ "st1w z26.s, p0, [c_ptr2]\n"
+ "st1w z27.s, p0, [c_ptr3]\n"
+ "st1w z28.s, p0, [c_ptr4]\n"
+ "st1w z29.s, p0, [c_ptr5]\n"
+ "st1w z30.s, p0, [c_ptr6]\n"
+ "st1w z31.s, p0, [c_ptr7]\n"
+ ".unreq a_ptr1\n"
+ ".unreq a_ptr2\n"
+ ".unreq a_ptr3\n"
+ ".unreq a_ptr4\n"
+ ".unreq a_ptr5\n"
+ ".unreq a_ptr6\n"
+ ".unreq a_ptr7\n"
+ ".unreq c_ptr1\n"
+ ".unreq c_ptr2\n"
+ ".unreq c_ptr3\n"
+ ".unreq c_ptr4\n"
+ ".unreq c_ptr5\n"
+ ".unreq c_ptr6\n"
+ ".unreq c_ptr7\n"
+ : [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [oob_rows] "+r" (oob_rows), [temp] "+r" (temp), [odds] "+r" (odds)
+ : [lda] "r" (ldab), [ldc] "r" (ldcb), [odd_depth] "r" (odd_depth), [last_width] "r" (last_width)
+ : "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "cc", "memory"
+ );
+ break;
+ case 13:
+ __asm __volatile (
+ "a_ptr1 .req X0\n"
+ "a_ptr2 .req X1\n"
+ "a_ptr3 .req X2\n"
+ "a_ptr4 .req X3\n"
+ "a_ptr5 .req X4\n"
+ "a_ptr6 .req X5\n"
+ "a_ptr7 .req X6\n"
+ "c_ptr1 .req X7\n"
+ "c_ptr2 .req X8\n"
+ "c_ptr3 .req X9\n"
+ "c_ptr4 .req X10\n"
+ "c_ptr5 .req X11\n"
+ "c_ptr6 .req X12\n"
+ "c_ptr7 .req X13\n"
+ "add a_ptr1, %[a_ptr0], %[lda]\n"
+ "add c_ptr1, %[c_ptr0], %[ldc]\n"
+ "add a_ptr2, a_ptr1, %[lda]\n"
+ "add c_ptr2, c_ptr1, %[ldc]\n"
+ "add a_ptr3, a_ptr2, %[lda]\n"
+ "add c_ptr3, c_ptr2, %[ldc]\n"
+ "add a_ptr4, a_ptr3, %[lda]\n"
+ "add c_ptr4, c_ptr3, %[ldc]\n"
+ "add a_ptr5, a_ptr4, %[lda]\n"
+ "add c_ptr5, c_ptr4, %[ldc]\n"
+ "add a_ptr6, a_ptr5, %[lda]\n"
+ "add c_ptr6, c_ptr5, %[ldc]\n"
+ "add a_ptr7, a_ptr6, %[lda]\n"
+ "add c_ptr7, c_ptr6, %[ldc]\n"
+ "cbz %[oob_rows], 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr7, %[c_ptr0], #0x0\n"
+ "add a_ptr7, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr6, %[c_ptr0], #0x0\n"
+ "add a_ptr6, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr5, %[c_ptr0], #0x0\n"
+ "add a_ptr5, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr4, %[c_ptr0], #0x0\n"
+ "add a_ptr4, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr3, %[c_ptr0], #0x0\n"
+ "add a_ptr3, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr2, %[c_ptr0], #0x0\n"
+ "add a_ptr2, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr1, %[c_ptr0], #0x0\n"
+ "add a_ptr1, %[a_ptr0], #0x0\n"
+ "1:\n"
+ "ptrue p7.b\n"
+ "whilelt p6.b, %[temp], %[odd_depth]\n"
+ "whilelt p0.s, %[temp], %[last_width]\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "ld1b z21.b, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "ld1b z22.b, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "ld1b z23.b, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "cbz %[loops], 2f\n"
+ "mov z24.s, #0\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
+ "mov z25.s, #0\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1]\n"
+ "mov z26.s, #0\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2]\n"
+ "mov z27.s, #0\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3]\n"
+ "mov z28.s, #0\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4]\n"
+ "mov z29.s, #0\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5]\n"
+ "mov z30.s, #0\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6]\n"
+ "mov z31.s, #0\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7]\n"
+ "sdot z24.s, z16.b, z0.b[0]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "sdot z25.s, z16.b, z1.b[0]\n"
+ "sdot z26.s, z16.b, z2.b[0]\n"
+ "sdot z27.s, z16.b, z3.b[0]\n"
+ "sdot z28.s, z16.b, z4.b[0]\n"
+ "sdot z29.s, z16.b, z5.b[0]\n"
+ "sdot z30.s, z16.b, z6.b[0]\n"
+ "sdot z31.s, z16.b, z7.b[0]\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "sdot z24.s, z17.b, z0.b[1]\n"
+ "sdot z25.s, z17.b, z1.b[1]\n"
+ "sdot z26.s, z17.b, z2.b[1]\n"
+ "sdot z27.s, z17.b, z3.b[1]\n"
+ "sdot z28.s, z17.b, z4.b[1]\n"
+ "sdot z29.s, z17.b, z5.b[1]\n"
+ "sdot z30.s, z17.b, z6.b[1]\n"
+ "sdot z31.s, z17.b, z7.b[1]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "sdot z24.s, z18.b, z0.b[2]\n"
+ "sdot z25.s, z18.b, z1.b[2]\n"
+ "sdot z26.s, z18.b, z2.b[2]\n"
+ "sdot z27.s, z18.b, z3.b[2]\n"
+ "sdot z28.s, z18.b, z4.b[2]\n"
+ "sdot z29.s, z18.b, z5.b[2]\n"
+ "sdot z30.s, z18.b, z6.b[2]\n"
+ "sdot z31.s, z18.b, z7.b[2]\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "sdot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0], #0x10]\n"
+ "sdot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1, #0x10]\n"
+ "sdot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2, #0x10]\n"
+ "sdot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3, #0x10]\n"
+ "sdot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4, #0x10]\n"
+ "sdot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5, #0x10]\n"
+ "sdot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6, #0x10]\n"
+ "sdot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7, #0x10]\n"
+ "sdot z24.s, z20.b, z0.b[0]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "sdot z25.s, z20.b, z1.b[0]\n"
+ "sdot z26.s, z20.b, z2.b[0]\n"
+ "sdot z27.s, z20.b, z3.b[0]\n"
+ "sdot z28.s, z20.b, z4.b[0]\n"
+ "sdot z29.s, z20.b, z5.b[0]\n"
+ "sdot z30.s, z20.b, z6.b[0]\n"
+ "sdot z31.s, z20.b, z7.b[0]\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "sdot z24.s, z21.b, z0.b[1]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #5\n"
+ "sdot z25.s, z21.b, z1.b[1]\n"
+ "sdot z26.s, z21.b, z2.b[1]\n"
+ "sdot z27.s, z21.b, z3.b[1]\n"
+ "sdot z28.s, z21.b, z4.b[1]\n"
+ "sdot z29.s, z21.b, z5.b[1]\n"
+ "sdot z30.s, z21.b, z6.b[1]\n"
+ "sdot z31.s, z21.b, z7.b[1]\n"
+ "sdot z24.s, z22.b, z0.b[2]\n"
+ "sdot z25.s, z22.b, z1.b[2]\n"
+ "sdot z26.s, z22.b, z2.b[2]\n"
+ "sdot z27.s, z22.b, z3.b[2]\n"
+ "sdot z28.s, z22.b, z4.b[2]\n"
+ "sdot z29.s, z22.b, z5.b[2]\n"
+ "sdot z30.s, z22.b, z6.b[2]\n"
+ "sdot z31.s, z22.b, z7.b[2]\n"
+ "sdot z24.s, z23.b, z0.b[3]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0], #0x20]\n"
+ "sdot z25.s, z23.b, z1.b[3]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1, #0x20]\n"
+ "sdot z26.s, z23.b, z2.b[3]\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2, #0x20]\n"
+ "sdot z27.s, z23.b, z3.b[3]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3, #0x20]\n"
+ "sdot z28.s, z23.b, z4.b[3]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4, #0x20]\n"
+ "sdot z29.s, z23.b, z5.b[3]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5, #0x20]\n"
+ "sdot z30.s, z23.b, z6.b[3]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6, #0x20]\n"
+ "sdot z31.s, z23.b, z7.b[3]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7, #0x20]\n"
+ "sdot z24.s, z16.b, z0.b[0]\n"
+ "sdot z25.s, z16.b, z1.b[0]\n"
+ "sdot z26.s, z16.b, z2.b[0]\n"
+ "sdot z27.s, z16.b, z3.b[0]\n"
+ "sdot z28.s, z16.b, z4.b[0]\n"
+ "sdot z29.s, z16.b, z5.b[0]\n"
+ "sdot z30.s, z16.b, z6.b[0]\n"
+ "sdot z31.s, z16.b, z7.b[0]\n"
+ "sdot z24.s, z17.b, z0.b[1]\n"
+ "sdot z25.s, z17.b, z1.b[1]\n"
+ "sdot z26.s, z17.b, z2.b[1]\n"
+ "sdot z27.s, z17.b, z3.b[1]\n"
+ "sdot z28.s, z17.b, z4.b[1]\n"
+ "sdot z29.s, z17.b, z5.b[1]\n"
+ "sdot z30.s, z17.b, z6.b[1]\n"
+ "sdot z31.s, z17.b, z7.b[1]\n"
+ "sdot z24.s, z18.b, z0.b[2]\n"
+ "sdot z25.s, z18.b, z1.b[2]\n"
+ "sdot z26.s, z18.b, z2.b[2]\n"
+ "sdot z27.s, z18.b, z3.b[2]\n"
+ "sdot z28.s, z18.b, z4.b[2]\n"
+ "sdot z29.s, z18.b, z5.b[2]\n"
+ "sdot z30.s, z18.b, z6.b[2]\n"
+ "sdot z31.s, z18.b, z7.b[2]\n"
+ "sdot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x30]\n"
+ "sdot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1, #0x30]\n"
+ "sdot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2, #0x30]\n"
+ "sdot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3, #0x30]\n"
+ "sdot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4, #0x30]\n"
+ "sdot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5, #0x30]\n"
+ "sdot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6, #0x30]\n"
+ "sdot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7, #0x30]\n"
+ "sdot z24.s, z20.b, z0.b[0]\n"
+ "sdot z25.s, z20.b, z1.b[0]\n"
+ "sdot z26.s, z20.b, z2.b[0]\n"
+ "sdot z27.s, z20.b, z3.b[0]\n"
+ "sdot z28.s, z20.b, z4.b[0]\n"
+ "sdot z29.s, z20.b, z5.b[0]\n"
+ "sdot z30.s, z20.b, z6.b[0]\n"
+ "sdot z31.s, z20.b, z7.b[0]\n"
+ "b.eq 3f\n"
+ "4:\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "mov z24.s, #0\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "mov z25.s, #0\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "mov z26.s, #0\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "ld1b z21.b, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "mov z27.s, #0\n"
+ "ld1b z22.b, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "ld1b z23.b, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "mov z28.s, #0\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "mov z29.s, #0\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2]\n"
+ "sdot z24.s, z16.b, z0.b[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.s, #0\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3]\n"
+ "sdot z25.s, z16.b, z1.b[0]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4]\n"
+ "sdot z26.s, z16.b, z2.b[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.s, #0\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5]\n"
+ "sdot z27.s, z16.b, z3.b[0]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6]\n"
+ "sdot z28.s, z16.b, z4.b[0]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7]\n"
+ "sdot z24.s, z17.b, z0.b[1]\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "sdot z29.s, z16.b, z5.b[0]\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "sdot z30.s, z16.b, z6.b[0]\n"
+ "sdot z31.s, z16.b, z7.b[0]\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "sdot z25.s, z17.b, z1.b[1]\n"
+ "sdot z26.s, z17.b, z2.b[1]\n"
+ "sdot z27.s, z17.b, z3.b[1]\n"
+ "sdot z28.s, z17.b, z4.b[1]\n"
+ "sdot z29.s, z17.b, z5.b[1]\n"
+ "sdot z30.s, z17.b, z6.b[1]\n"
+ "sdot z31.s, z17.b, z7.b[1]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "sdot z24.s, z18.b, z0.b[2]\n"
+ "sdot z25.s, z18.b, z1.b[2]\n"
+ "sdot z26.s, z18.b, z2.b[2]\n"
+ "sdot z27.s, z18.b, z3.b[2]\n"
+ "sdot z28.s, z18.b, z4.b[2]\n"
+ "sdot z29.s, z18.b, z5.b[2]\n"
+ "sdot z30.s, z18.b, z6.b[2]\n"
+ "sdot z31.s, z18.b, z7.b[2]\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "sdot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0], #0x10]\n"
+ "sdot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1, #0x10]\n"
+ "sdot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2, #0x10]\n"
+ "sdot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3, #0x10]\n"
+ "sdot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4, #0x10]\n"
+ "sdot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5, #0x10]\n"
+ "sdot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6, #0x10]\n"
+ "sdot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7, #0x10]\n"
+ "sdot z24.s, z20.b, z0.b[0]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "sdot z25.s, z20.b, z1.b[0]\n"
+ "sdot z26.s, z20.b, z2.b[0]\n"
+ "sdot z27.s, z20.b, z3.b[0]\n"
+ "sdot z28.s, z20.b, z4.b[0]\n"
+ "sdot z29.s, z20.b, z5.b[0]\n"
+ "sdot z30.s, z20.b, z6.b[0]\n"
+ "sdot z31.s, z20.b, z7.b[0]\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "sdot z24.s, z21.b, z0.b[1]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #5\n"
+ "sdot z25.s, z21.b, z1.b[1]\n"
+ "sdot z26.s, z21.b, z2.b[1]\n"
+ "sdot z27.s, z21.b, z3.b[1]\n"
+ "sdot z28.s, z21.b, z4.b[1]\n"
+ "sdot z29.s, z21.b, z5.b[1]\n"
+ "sdot z30.s, z21.b, z6.b[1]\n"
+ "sdot z31.s, z21.b, z7.b[1]\n"
+ "sdot z24.s, z22.b, z0.b[2]\n"
+ "sdot z25.s, z22.b, z1.b[2]\n"
+ "sdot z26.s, z22.b, z2.b[2]\n"
+ "sdot z27.s, z22.b, z3.b[2]\n"
+ "sdot z28.s, z22.b, z4.b[2]\n"
+ "sdot z29.s, z22.b, z5.b[2]\n"
+ "sdot z30.s, z22.b, z6.b[2]\n"
+ "sdot z31.s, z22.b, z7.b[2]\n"
+ "sdot z24.s, z23.b, z0.b[3]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0], #0x20]\n"
+ "sdot z25.s, z23.b, z1.b[3]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1, #0x20]\n"
+ "sdot z26.s, z23.b, z2.b[3]\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2, #0x20]\n"
+ "sdot z27.s, z23.b, z3.b[3]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3, #0x20]\n"
+ "sdot z28.s, z23.b, z4.b[3]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4, #0x20]\n"
+ "sdot z29.s, z23.b, z5.b[3]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5, #0x20]\n"
+ "sdot z30.s, z23.b, z6.b[3]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6, #0x20]\n"
+ "sdot z31.s, z23.b, z7.b[3]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7, #0x20]\n"
+ "sdot z24.s, z16.b, z0.b[0]\n"
+ "sdot z25.s, z16.b, z1.b[0]\n"
+ "sdot z26.s, z16.b, z2.b[0]\n"
+ "sdot z27.s, z16.b, z3.b[0]\n"
+ "sdot z28.s, z16.b, z4.b[0]\n"
+ "sdot z29.s, z16.b, z5.b[0]\n"
+ "sdot z30.s, z16.b, z6.b[0]\n"
+ "sdot z31.s, z16.b, z7.b[0]\n"
+ "sdot z24.s, z17.b, z0.b[1]\n"
+ "sdot z25.s, z17.b, z1.b[1]\n"
+ "sdot z26.s, z17.b, z2.b[1]\n"
+ "sdot z27.s, z17.b, z3.b[1]\n"
+ "sdot z28.s, z17.b, z4.b[1]\n"
+ "sdot z29.s, z17.b, z5.b[1]\n"
+ "sdot z30.s, z17.b, z6.b[1]\n"
+ "sdot z31.s, z17.b, z7.b[1]\n"
+ "sdot z24.s, z18.b, z0.b[2]\n"
+ "sdot z25.s, z18.b, z1.b[2]\n"
+ "sdot z26.s, z18.b, z2.b[2]\n"
+ "sdot z27.s, z18.b, z3.b[2]\n"
+ "sdot z28.s, z18.b, z4.b[2]\n"
+ "sdot z29.s, z18.b, z5.b[2]\n"
+ "sdot z30.s, z18.b, z6.b[2]\n"
+ "sdot z31.s, z18.b, z7.b[2]\n"
+ "sdot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x30]\n"
+ "sdot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1, #0x30]\n"
+ "sdot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2, #0x30]\n"
+ "sdot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3, #0x30]\n"
+ "sdot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4, #0x30]\n"
+ "sdot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5, #0x30]\n"
+ "sdot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6, #0x30]\n"
+ "sdot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7, #0x30]\n"
+ "sdot z24.s, z20.b, z0.b[0]\n"
+ "sdot z25.s, z20.b, z1.b[0]\n"
+ "sdot z26.s, z20.b, z2.b[0]\n"
+ "sdot z27.s, z20.b, z3.b[0]\n"
+ "sdot z28.s, z20.b, z4.b[0]\n"
+ "sdot z29.s, z20.b, z5.b[0]\n"
+ "sdot z30.s, z20.b, z6.b[0]\n"
+ "sdot z31.s, z20.b, z7.b[0]\n"
+ "b.ne 4b\n"
+ "3:\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "mov z24.s, #0\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "mov z25.s, #0\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "mov z26.s, #0\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "ld1b z21.b, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "mov z27.s, #0\n"
+ "ld1b z22.b, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "ld1b z23.b, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "mov z28.s, #0\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "mov z29.s, #0\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2]\n"
+ "sdot z24.s, z16.b, z0.b[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.s, #0\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3]\n"
+ "sdot z25.s, z16.b, z1.b[0]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4]\n"
+ "sdot z26.s, z16.b, z2.b[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.s, #0\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5]\n"
+ "sdot z27.s, z16.b, z3.b[0]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6]\n"
+ "sdot z28.s, z16.b, z4.b[0]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7]\n"
+ "sdot z24.s, z17.b, z0.b[1]\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "sdot z29.s, z16.b, z5.b[0]\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "sdot z30.s, z16.b, z6.b[0]\n"
+ "sdot z31.s, z16.b, z7.b[0]\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "sdot z25.s, z17.b, z1.b[1]\n"
+ "sdot z26.s, z17.b, z2.b[1]\n"
+ "sdot z27.s, z17.b, z3.b[1]\n"
+ "sdot z28.s, z17.b, z4.b[1]\n"
+ "sdot z29.s, z17.b, z5.b[1]\n"
+ "sdot z30.s, z17.b, z6.b[1]\n"
+ "sdot z31.s, z17.b, z7.b[1]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "sdot z24.s, z18.b, z0.b[2]\n"
+ "sdot z25.s, z18.b, z1.b[2]\n"
+ "sdot z26.s, z18.b, z2.b[2]\n"
+ "sdot z27.s, z18.b, z3.b[2]\n"
+ "sdot z28.s, z18.b, z4.b[2]\n"
+ "sdot z29.s, z18.b, z5.b[2]\n"
+ "sdot z30.s, z18.b, z6.b[2]\n"
+ "sdot z31.s, z18.b, z7.b[2]\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "sdot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0], #0x10]\n"
+ "sdot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1, #0x10]\n"
+ "sdot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2, #0x10]\n"
+ "sdot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3, #0x10]\n"
+ "sdot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4, #0x10]\n"
+ "sdot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5, #0x10]\n"
+ "sdot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6, #0x10]\n"
+ "sdot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7, #0x10]\n"
+ "sdot z24.s, z20.b, z0.b[0]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "sdot z25.s, z20.b, z1.b[0]\n"
+ "sdot z26.s, z20.b, z2.b[0]\n"
+ "sdot z27.s, z20.b, z3.b[0]\n"
+ "sdot z28.s, z20.b, z4.b[0]\n"
+ "sdot z29.s, z20.b, z5.b[0]\n"
+ "sdot z30.s, z20.b, z6.b[0]\n"
+ "sdot z31.s, z20.b, z7.b[0]\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "sdot z24.s, z21.b, z0.b[1]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #5\n"
+ "sdot z25.s, z21.b, z1.b[1]\n"
+ "sdot z26.s, z21.b, z2.b[1]\n"
+ "sdot z27.s, z21.b, z3.b[1]\n"
+ "sdot z28.s, z21.b, z4.b[1]\n"
+ "sdot z29.s, z21.b, z5.b[1]\n"
+ "sdot z30.s, z21.b, z6.b[1]\n"
+ "sdot z31.s, z21.b, z7.b[1]\n"
+ "sdot z24.s, z22.b, z0.b[2]\n"
+ "sdot z25.s, z22.b, z1.b[2]\n"
+ "sdot z26.s, z22.b, z2.b[2]\n"
+ "sdot z27.s, z22.b, z3.b[2]\n"
+ "sdot z28.s, z22.b, z4.b[2]\n"
+ "sdot z29.s, z22.b, z5.b[2]\n"
+ "sdot z30.s, z22.b, z6.b[2]\n"
+ "sdot z31.s, z22.b, z7.b[2]\n"
+ "sdot z24.s, z23.b, z0.b[3]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0], #0x20]\n"
+ "sdot z25.s, z23.b, z1.b[3]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1, #0x20]\n"
+ "sdot z26.s, z23.b, z2.b[3]\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2, #0x20]\n"
+ "sdot z27.s, z23.b, z3.b[3]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3, #0x20]\n"
+ "sdot z28.s, z23.b, z4.b[3]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4, #0x20]\n"
+ "sdot z29.s, z23.b, z5.b[3]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5, #0x20]\n"
+ "sdot z30.s, z23.b, z6.b[3]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6, #0x20]\n"
+ "sdot z31.s, z23.b, z7.b[3]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7, #0x20]\n"
+ "sdot z24.s, z16.b, z0.b[0]\n"
+ "sdot z25.s, z16.b, z1.b[0]\n"
+ "sdot z26.s, z16.b, z2.b[0]\n"
+ "sdot z27.s, z16.b, z3.b[0]\n"
+ "sdot z28.s, z16.b, z4.b[0]\n"
+ "sdot z29.s, z16.b, z5.b[0]\n"
+ "sdot z30.s, z16.b, z6.b[0]\n"
+ "sdot z31.s, z16.b, z7.b[0]\n"
+ "sdot z24.s, z17.b, z0.b[1]\n"
+ "sdot z25.s, z17.b, z1.b[1]\n"
+ "sdot z26.s, z17.b, z2.b[1]\n"
+ "sdot z27.s, z17.b, z3.b[1]\n"
+ "sdot z28.s, z17.b, z4.b[1]\n"
+ "sdot z29.s, z17.b, z5.b[1]\n"
+ "sdot z30.s, z17.b, z6.b[1]\n"
+ "sdot z31.s, z17.b, z7.b[1]\n"
+ "sdot z24.s, z18.b, z0.b[2]\n"
+ "sdot z25.s, z18.b, z1.b[2]\n"
+ "sdot z26.s, z18.b, z2.b[2]\n"
+ "sdot z27.s, z18.b, z3.b[2]\n"
+ "sdot z28.s, z18.b, z4.b[2]\n"
+ "sdot z29.s, z18.b, z5.b[2]\n"
+ "sdot z30.s, z18.b, z6.b[2]\n"
+ "sdot z31.s, z18.b, z7.b[2]\n"
+ "sdot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x30]\n"
+ "sdot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1, #0x30]\n"
+ "sdot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2, #0x30]\n"
+ "sdot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3, #0x30]\n"
+ "sdot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4, #0x30]\n"
+ "sdot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5, #0x30]\n"
+ "sdot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6, #0x30]\n"
+ "sdot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7, #0x30]\n"
+ "sdot z24.s, z20.b, z0.b[0]\n"
+ "sdot z25.s, z20.b, z1.b[0]\n"
+ "sdot z26.s, z20.b, z2.b[0]\n"
+ "sdot z27.s, z20.b, z3.b[0]\n"
+ "sdot z28.s, z20.b, z4.b[0]\n"
+ "sdot z29.s, z20.b, z5.b[0]\n"
+ "sdot z30.s, z20.b, z6.b[0]\n"
+ "sdot z31.s, z20.b, z7.b[0]\n"
+ "b 5f\n"
+ "2:\n"
+ "mov z24.s, #0\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
+ "mov z25.s, #0\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1]\n"
+ "mov z26.s, #0\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2]\n"
+ "mov z27.s, #0\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3]\n"
+ "mov z28.s, #0\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4]\n"
+ "mov z29.s, #0\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5]\n"
+ "mov z30.s, #0\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6]\n"
+ "mov z31.s, #0\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7]\n"
+ "sdot z24.s, z16.b, z0.b[0]\n"
+ "sdot z25.s, z16.b, z1.b[0]\n"
+ "sdot z26.s, z16.b, z2.b[0]\n"
+ "sdot z27.s, z16.b, z3.b[0]\n"
+ "sdot z28.s, z16.b, z4.b[0]\n"
+ "sdot z29.s, z16.b, z5.b[0]\n"
+ "sdot z30.s, z16.b, z6.b[0]\n"
+ "sdot z31.s, z16.b, z7.b[0]\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "sdot z24.s, z17.b, z0.b[1]\n"
+ "sdot z25.s, z17.b, z1.b[1]\n"
+ "sdot z26.s, z17.b, z2.b[1]\n"
+ "sdot z27.s, z17.b, z3.b[1]\n"
+ "sdot z28.s, z17.b, z4.b[1]\n"
+ "sdot z29.s, z17.b, z5.b[1]\n"
+ "sdot z30.s, z17.b, z6.b[1]\n"
+ "sdot z31.s, z17.b, z7.b[1]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "sdot z24.s, z18.b, z0.b[2]\n"
+ "sdot z25.s, z18.b, z1.b[2]\n"
+ "sdot z26.s, z18.b, z2.b[2]\n"
+ "sdot z27.s, z18.b, z3.b[2]\n"
+ "sdot z28.s, z18.b, z4.b[2]\n"
+ "sdot z29.s, z18.b, z5.b[2]\n"
+ "sdot z30.s, z18.b, z6.b[2]\n"
+ "sdot z31.s, z18.b, z7.b[2]\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "sdot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0], #0x10]\n"
+ "sdot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1, #0x10]\n"
+ "sdot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2, #0x10]\n"
+ "sdot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3, #0x10]\n"
+ "sdot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4, #0x10]\n"
+ "sdot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5, #0x10]\n"
+ "sdot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6, #0x10]\n"
+ "sdot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7, #0x10]\n"
+ "sdot z24.s, z20.b, z0.b[0]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "sdot z25.s, z20.b, z1.b[0]\n"
+ "sdot z26.s, z20.b, z2.b[0]\n"
+ "sdot z27.s, z20.b, z3.b[0]\n"
+ "sdot z28.s, z20.b, z4.b[0]\n"
+ "sdot z29.s, z20.b, z5.b[0]\n"
+ "sdot z30.s, z20.b, z6.b[0]\n"
+ "sdot z31.s, z20.b, z7.b[0]\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "sdot z24.s, z21.b, z0.b[1]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #5\n"
+ "sdot z25.s, z21.b, z1.b[1]\n"
+ "sdot z26.s, z21.b, z2.b[1]\n"
+ "sdot z27.s, z21.b, z3.b[1]\n"
+ "sdot z28.s, z21.b, z4.b[1]\n"
+ "sdot z29.s, z21.b, z5.b[1]\n"
+ "sdot z30.s, z21.b, z6.b[1]\n"
+ "sdot z31.s, z21.b, z7.b[1]\n"
+ "sdot z24.s, z22.b, z0.b[2]\n"
+ "sdot z25.s, z22.b, z1.b[2]\n"
+ "sdot z26.s, z22.b, z2.b[2]\n"
+ "sdot z27.s, z22.b, z3.b[2]\n"
+ "sdot z28.s, z22.b, z4.b[2]\n"
+ "sdot z29.s, z22.b, z5.b[2]\n"
+ "sdot z30.s, z22.b, z6.b[2]\n"
+ "sdot z31.s, z22.b, z7.b[2]\n"
+ "sdot z24.s, z23.b, z0.b[3]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0], #0x20]\n"
+ "sdot z25.s, z23.b, z1.b[3]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1, #0x20]\n"
+ "sdot z26.s, z23.b, z2.b[3]\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2, #0x20]\n"
+ "sdot z27.s, z23.b, z3.b[3]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3, #0x20]\n"
+ "sdot z28.s, z23.b, z4.b[3]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4, #0x20]\n"
+ "sdot z29.s, z23.b, z5.b[3]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5, #0x20]\n"
+ "sdot z30.s, z23.b, z6.b[3]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6, #0x20]\n"
+ "sdot z31.s, z23.b, z7.b[3]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7, #0x20]\n"
+ "sdot z24.s, z16.b, z0.b[0]\n"
+ "sdot z25.s, z16.b, z1.b[0]\n"
+ "sdot z26.s, z16.b, z2.b[0]\n"
+ "sdot z27.s, z16.b, z3.b[0]\n"
+ "sdot z28.s, z16.b, z4.b[0]\n"
+ "sdot z29.s, z16.b, z5.b[0]\n"
+ "sdot z30.s, z16.b, z6.b[0]\n"
+ "sdot z31.s, z16.b, z7.b[0]\n"
+ "sdot z24.s, z17.b, z0.b[1]\n"
+ "sdot z25.s, z17.b, z1.b[1]\n"
+ "sdot z26.s, z17.b, z2.b[1]\n"
+ "sdot z27.s, z17.b, z3.b[1]\n"
+ "sdot z28.s, z17.b, z4.b[1]\n"
+ "sdot z29.s, z17.b, z5.b[1]\n"
+ "sdot z30.s, z17.b, z6.b[1]\n"
+ "sdot z31.s, z17.b, z7.b[1]\n"
+ "sdot z24.s, z18.b, z0.b[2]\n"
+ "sdot z25.s, z18.b, z1.b[2]\n"
+ "sdot z26.s, z18.b, z2.b[2]\n"
+ "sdot z27.s, z18.b, z3.b[2]\n"
+ "sdot z28.s, z18.b, z4.b[2]\n"
+ "sdot z29.s, z18.b, z5.b[2]\n"
+ "sdot z30.s, z18.b, z6.b[2]\n"
+ "sdot z31.s, z18.b, z7.b[2]\n"
+ "sdot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x30]\n"
+ "sdot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1, #0x30]\n"
+ "sdot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2, #0x30]\n"
+ "sdot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3, #0x30]\n"
+ "sdot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4, #0x30]\n"
+ "sdot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5, #0x30]\n"
+ "sdot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6, #0x30]\n"
+ "sdot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7, #0x30]\n"
+ "sdot z24.s, z20.b, z0.b[0]\n"
+ "sdot z25.s, z20.b, z1.b[0]\n"
+ "sdot z26.s, z20.b, z2.b[0]\n"
+ "sdot z27.s, z20.b, z3.b[0]\n"
+ "sdot z28.s, z20.b, z4.b[0]\n"
+ "sdot z29.s, z20.b, z5.b[0]\n"
+ "sdot z30.s, z20.b, z6.b[0]\n"
+ "sdot z31.s, z20.b, z7.b[0]\n"
+ "5:\n"
+ "st1w z24.s, p0, [%[c_ptr0]]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "st1w z25.s, p0, [c_ptr1]\n"
+ "st1w z26.s, p0, [c_ptr2]\n"
+ "st1w z27.s, p0, [c_ptr3]\n"
+ "st1w z28.s, p0, [c_ptr4]\n"
+ "st1w z29.s, p0, [c_ptr5]\n"
+ "st1w z30.s, p0, [c_ptr6]\n"
+ "st1w z31.s, p0, [c_ptr7]\n"
+ ".unreq a_ptr1\n"
+ ".unreq a_ptr2\n"
+ ".unreq a_ptr3\n"
+ ".unreq a_ptr4\n"
+ ".unreq a_ptr5\n"
+ ".unreq a_ptr6\n"
+ ".unreq a_ptr7\n"
+ ".unreq c_ptr1\n"
+ ".unreq c_ptr2\n"
+ ".unreq c_ptr3\n"
+ ".unreq c_ptr4\n"
+ ".unreq c_ptr5\n"
+ ".unreq c_ptr6\n"
+ ".unreq c_ptr7\n"
+ : [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [oob_rows] "+r" (oob_rows), [temp] "+r" (temp), [odds] "+r" (odds)
+ : [lda] "r" (ldab), [ldc] "r" (ldcb), [odd_depth] "r" (odd_depth), [last_width] "r" (last_width)
+ : "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "cc", "memory"
+ );
+ break;
+ case 14:
+ __asm __volatile (
+ "a_ptr1 .req X0\n"
+ "a_ptr2 .req X1\n"
+ "a_ptr3 .req X2\n"
+ "a_ptr4 .req X3\n"
+ "a_ptr5 .req X4\n"
+ "a_ptr6 .req X5\n"
+ "a_ptr7 .req X6\n"
+ "c_ptr1 .req X7\n"
+ "c_ptr2 .req X8\n"
+ "c_ptr3 .req X9\n"
+ "c_ptr4 .req X10\n"
+ "c_ptr5 .req X11\n"
+ "c_ptr6 .req X12\n"
+ "c_ptr7 .req X13\n"
+ "add a_ptr1, %[a_ptr0], %[lda]\n"
+ "add c_ptr1, %[c_ptr0], %[ldc]\n"
+ "add a_ptr2, a_ptr1, %[lda]\n"
+ "add c_ptr2, c_ptr1, %[ldc]\n"
+ "add a_ptr3, a_ptr2, %[lda]\n"
+ "add c_ptr3, c_ptr2, %[ldc]\n"
+ "add a_ptr4, a_ptr3, %[lda]\n"
+ "add c_ptr4, c_ptr3, %[ldc]\n"
+ "add a_ptr5, a_ptr4, %[lda]\n"
+ "add c_ptr5, c_ptr4, %[ldc]\n"
+ "add a_ptr6, a_ptr5, %[lda]\n"
+ "add c_ptr6, c_ptr5, %[ldc]\n"
+ "add a_ptr7, a_ptr6, %[lda]\n"
+ "add c_ptr7, c_ptr6, %[ldc]\n"
+ "cbz %[oob_rows], 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr7, %[c_ptr0], #0x0\n"
+ "add a_ptr7, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr6, %[c_ptr0], #0x0\n"
+ "add a_ptr6, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr5, %[c_ptr0], #0x0\n"
+ "add a_ptr5, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr4, %[c_ptr0], #0x0\n"
+ "add a_ptr4, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr3, %[c_ptr0], #0x0\n"
+ "add a_ptr3, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr2, %[c_ptr0], #0x0\n"
+ "add a_ptr2, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr1, %[c_ptr0], #0x0\n"
+ "add a_ptr1, %[a_ptr0], #0x0\n"
+ "1:\n"
+ "ptrue p7.b\n"
+ "whilelt p6.b, %[temp], %[odd_depth]\n"
+ "whilelt p0.s, %[temp], %[last_width]\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "ld1b z21.b, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "ld1b z22.b, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "ld1b z23.b, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "cbz %[loops], 2f\n"
+ "mov z24.s, #0\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
+ "mov z25.s, #0\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1]\n"
+ "mov z26.s, #0\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2]\n"
+ "mov z27.s, #0\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3]\n"
+ "mov z28.s, #0\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4]\n"
+ "mov z29.s, #0\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5]\n"
+ "mov z30.s, #0\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6]\n"
+ "mov z31.s, #0\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7]\n"
+ "sdot z24.s, z16.b, z0.b[0]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "sdot z25.s, z16.b, z1.b[0]\n"
+ "sdot z26.s, z16.b, z2.b[0]\n"
+ "sdot z27.s, z16.b, z3.b[0]\n"
+ "sdot z28.s, z16.b, z4.b[0]\n"
+ "sdot z29.s, z16.b, z5.b[0]\n"
+ "sdot z30.s, z16.b, z6.b[0]\n"
+ "sdot z31.s, z16.b, z7.b[0]\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "sdot z24.s, z17.b, z0.b[1]\n"
+ "sdot z25.s, z17.b, z1.b[1]\n"
+ "sdot z26.s, z17.b, z2.b[1]\n"
+ "sdot z27.s, z17.b, z3.b[1]\n"
+ "sdot z28.s, z17.b, z4.b[1]\n"
+ "sdot z29.s, z17.b, z5.b[1]\n"
+ "sdot z30.s, z17.b, z6.b[1]\n"
+ "sdot z31.s, z17.b, z7.b[1]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "sdot z24.s, z18.b, z0.b[2]\n"
+ "sdot z25.s, z18.b, z1.b[2]\n"
+ "sdot z26.s, z18.b, z2.b[2]\n"
+ "sdot z27.s, z18.b, z3.b[2]\n"
+ "sdot z28.s, z18.b, z4.b[2]\n"
+ "sdot z29.s, z18.b, z5.b[2]\n"
+ "sdot z30.s, z18.b, z6.b[2]\n"
+ "sdot z31.s, z18.b, z7.b[2]\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "sdot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0], #0x10]\n"
+ "sdot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1, #0x10]\n"
+ "sdot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2, #0x10]\n"
+ "sdot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3, #0x10]\n"
+ "sdot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4, #0x10]\n"
+ "sdot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5, #0x10]\n"
+ "sdot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6, #0x10]\n"
+ "sdot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7, #0x10]\n"
+ "sdot z24.s, z20.b, z0.b[0]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "sdot z25.s, z20.b, z1.b[0]\n"
+ "sdot z26.s, z20.b, z2.b[0]\n"
+ "sdot z27.s, z20.b, z3.b[0]\n"
+ "sdot z28.s, z20.b, z4.b[0]\n"
+ "sdot z29.s, z20.b, z5.b[0]\n"
+ "sdot z30.s, z20.b, z6.b[0]\n"
+ "sdot z31.s, z20.b, z7.b[0]\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "sdot z24.s, z21.b, z0.b[1]\n"
+ "sdot z25.s, z21.b, z1.b[1]\n"
+ "sdot z26.s, z21.b, z2.b[1]\n"
+ "sdot z27.s, z21.b, z3.b[1]\n"
+ "sdot z28.s, z21.b, z4.b[1]\n"
+ "sdot z29.s, z21.b, z5.b[1]\n"
+ "sdot z30.s, z21.b, z6.b[1]\n"
+ "sdot z31.s, z21.b, z7.b[1]\n"
+ "ld1b z21.b, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "sdot z24.s, z22.b, z0.b[2]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #6\n"
+ "sdot z25.s, z22.b, z1.b[2]\n"
+ "sdot z26.s, z22.b, z2.b[2]\n"
+ "sdot z27.s, z22.b, z3.b[2]\n"
+ "sdot z28.s, z22.b, z4.b[2]\n"
+ "sdot z29.s, z22.b, z5.b[2]\n"
+ "sdot z30.s, z22.b, z6.b[2]\n"
+ "sdot z31.s, z22.b, z7.b[2]\n"
+ "sdot z24.s, z23.b, z0.b[3]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0], #0x20]\n"
+ "sdot z25.s, z23.b, z1.b[3]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1, #0x20]\n"
+ "sdot z26.s, z23.b, z2.b[3]\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2, #0x20]\n"
+ "sdot z27.s, z23.b, z3.b[3]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3, #0x20]\n"
+ "sdot z28.s, z23.b, z4.b[3]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4, #0x20]\n"
+ "sdot z29.s, z23.b, z5.b[3]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5, #0x20]\n"
+ "sdot z30.s, z23.b, z6.b[3]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6, #0x20]\n"
+ "sdot z31.s, z23.b, z7.b[3]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7, #0x20]\n"
+ "sdot z24.s, z16.b, z0.b[0]\n"
+ "sdot z25.s, z16.b, z1.b[0]\n"
+ "sdot z26.s, z16.b, z2.b[0]\n"
+ "sdot z27.s, z16.b, z3.b[0]\n"
+ "sdot z28.s, z16.b, z4.b[0]\n"
+ "sdot z29.s, z16.b, z5.b[0]\n"
+ "sdot z30.s, z16.b, z6.b[0]\n"
+ "sdot z31.s, z16.b, z7.b[0]\n"
+ "sdot z24.s, z17.b, z0.b[1]\n"
+ "sdot z25.s, z17.b, z1.b[1]\n"
+ "sdot z26.s, z17.b, z2.b[1]\n"
+ "sdot z27.s, z17.b, z3.b[1]\n"
+ "sdot z28.s, z17.b, z4.b[1]\n"
+ "sdot z29.s, z17.b, z5.b[1]\n"
+ "sdot z30.s, z17.b, z6.b[1]\n"
+ "sdot z31.s, z17.b, z7.b[1]\n"
+ "sdot z24.s, z18.b, z0.b[2]\n"
+ "sdot z25.s, z18.b, z1.b[2]\n"
+ "sdot z26.s, z18.b, z2.b[2]\n"
+ "sdot z27.s, z18.b, z3.b[2]\n"
+ "sdot z28.s, z18.b, z4.b[2]\n"
+ "sdot z29.s, z18.b, z5.b[2]\n"
+ "sdot z30.s, z18.b, z6.b[2]\n"
+ "sdot z31.s, z18.b, z7.b[2]\n"
+ "sdot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x30]\n"
+ "sdot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1, #0x30]\n"
+ "sdot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2, #0x30]\n"
+ "sdot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3, #0x30]\n"
+ "sdot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4, #0x30]\n"
+ "sdot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5, #0x30]\n"
+ "sdot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6, #0x30]\n"
+ "sdot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7, #0x30]\n"
+ "sdot z24.s, z20.b, z0.b[0]\n"
+ "sdot z25.s, z20.b, z1.b[0]\n"
+ "sdot z26.s, z20.b, z2.b[0]\n"
+ "sdot z27.s, z20.b, z3.b[0]\n"
+ "sdot z28.s, z20.b, z4.b[0]\n"
+ "sdot z29.s, z20.b, z5.b[0]\n"
+ "sdot z30.s, z20.b, z6.b[0]\n"
+ "sdot z31.s, z20.b, z7.b[0]\n"
+ "sdot z24.s, z21.b, z0.b[1]\n"
+ "sdot z25.s, z21.b, z1.b[1]\n"
+ "sdot z26.s, z21.b, z2.b[1]\n"
+ "sdot z27.s, z21.b, z3.b[1]\n"
+ "sdot z28.s, z21.b, z4.b[1]\n"
+ "sdot z29.s, z21.b, z5.b[1]\n"
+ "sdot z30.s, z21.b, z6.b[1]\n"
+ "sdot z31.s, z21.b, z7.b[1]\n"
+ "b.eq 3f\n"
+ "4:\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "mov z24.s, #0\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "mov z25.s, #0\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "mov z26.s, #0\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "ld1b z21.b, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "mov z27.s, #0\n"
+ "ld1b z22.b, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "ld1b z23.b, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "mov z28.s, #0\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "mov z29.s, #0\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2]\n"
+ "sdot z24.s, z16.b, z0.b[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.s, #0\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3]\n"
+ "sdot z25.s, z16.b, z1.b[0]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4]\n"
+ "sdot z26.s, z16.b, z2.b[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.s, #0\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5]\n"
+ "sdot z27.s, z16.b, z3.b[0]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6]\n"
+ "sdot z28.s, z16.b, z4.b[0]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7]\n"
+ "sdot z24.s, z17.b, z0.b[1]\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "sdot z29.s, z16.b, z5.b[0]\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "sdot z30.s, z16.b, z6.b[0]\n"
+ "sdot z31.s, z16.b, z7.b[0]\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "sdot z25.s, z17.b, z1.b[1]\n"
+ "sdot z26.s, z17.b, z2.b[1]\n"
+ "sdot z27.s, z17.b, z3.b[1]\n"
+ "sdot z28.s, z17.b, z4.b[1]\n"
+ "sdot z29.s, z17.b, z5.b[1]\n"
+ "sdot z30.s, z17.b, z6.b[1]\n"
+ "sdot z31.s, z17.b, z7.b[1]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "sdot z24.s, z18.b, z0.b[2]\n"
+ "sdot z25.s, z18.b, z1.b[2]\n"
+ "sdot z26.s, z18.b, z2.b[2]\n"
+ "sdot z27.s, z18.b, z3.b[2]\n"
+ "sdot z28.s, z18.b, z4.b[2]\n"
+ "sdot z29.s, z18.b, z5.b[2]\n"
+ "sdot z30.s, z18.b, z6.b[2]\n"
+ "sdot z31.s, z18.b, z7.b[2]\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "sdot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0], #0x10]\n"
+ "sdot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1, #0x10]\n"
+ "sdot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2, #0x10]\n"
+ "sdot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3, #0x10]\n"
+ "sdot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4, #0x10]\n"
+ "sdot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5, #0x10]\n"
+ "sdot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6, #0x10]\n"
+ "sdot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7, #0x10]\n"
+ "sdot z24.s, z20.b, z0.b[0]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "sdot z25.s, z20.b, z1.b[0]\n"
+ "sdot z26.s, z20.b, z2.b[0]\n"
+ "sdot z27.s, z20.b, z3.b[0]\n"
+ "sdot z28.s, z20.b, z4.b[0]\n"
+ "sdot z29.s, z20.b, z5.b[0]\n"
+ "sdot z30.s, z20.b, z6.b[0]\n"
+ "sdot z31.s, z20.b, z7.b[0]\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "sdot z24.s, z21.b, z0.b[1]\n"
+ "sdot z25.s, z21.b, z1.b[1]\n"
+ "sdot z26.s, z21.b, z2.b[1]\n"
+ "sdot z27.s, z21.b, z3.b[1]\n"
+ "sdot z28.s, z21.b, z4.b[1]\n"
+ "sdot z29.s, z21.b, z5.b[1]\n"
+ "sdot z30.s, z21.b, z6.b[1]\n"
+ "sdot z31.s, z21.b, z7.b[1]\n"
+ "ld1b z21.b, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "sdot z24.s, z22.b, z0.b[2]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #6\n"
+ "sdot z25.s, z22.b, z1.b[2]\n"
+ "sdot z26.s, z22.b, z2.b[2]\n"
+ "sdot z27.s, z22.b, z3.b[2]\n"
+ "sdot z28.s, z22.b, z4.b[2]\n"
+ "sdot z29.s, z22.b, z5.b[2]\n"
+ "sdot z30.s, z22.b, z6.b[2]\n"
+ "sdot z31.s, z22.b, z7.b[2]\n"
+ "sdot z24.s, z23.b, z0.b[3]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0], #0x20]\n"
+ "sdot z25.s, z23.b, z1.b[3]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1, #0x20]\n"
+ "sdot z26.s, z23.b, z2.b[3]\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2, #0x20]\n"
+ "sdot z27.s, z23.b, z3.b[3]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3, #0x20]\n"
+ "sdot z28.s, z23.b, z4.b[3]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4, #0x20]\n"
+ "sdot z29.s, z23.b, z5.b[3]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5, #0x20]\n"
+ "sdot z30.s, z23.b, z6.b[3]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6, #0x20]\n"
+ "sdot z31.s, z23.b, z7.b[3]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7, #0x20]\n"
+ "sdot z24.s, z16.b, z0.b[0]\n"
+ "sdot z25.s, z16.b, z1.b[0]\n"
+ "sdot z26.s, z16.b, z2.b[0]\n"
+ "sdot z27.s, z16.b, z3.b[0]\n"
+ "sdot z28.s, z16.b, z4.b[0]\n"
+ "sdot z29.s, z16.b, z5.b[0]\n"
+ "sdot z30.s, z16.b, z6.b[0]\n"
+ "sdot z31.s, z16.b, z7.b[0]\n"
+ "sdot z24.s, z17.b, z0.b[1]\n"
+ "sdot z25.s, z17.b, z1.b[1]\n"
+ "sdot z26.s, z17.b, z2.b[1]\n"
+ "sdot z27.s, z17.b, z3.b[1]\n"
+ "sdot z28.s, z17.b, z4.b[1]\n"
+ "sdot z29.s, z17.b, z5.b[1]\n"
+ "sdot z30.s, z17.b, z6.b[1]\n"
+ "sdot z31.s, z17.b, z7.b[1]\n"
+ "sdot z24.s, z18.b, z0.b[2]\n"
+ "sdot z25.s, z18.b, z1.b[2]\n"
+ "sdot z26.s, z18.b, z2.b[2]\n"
+ "sdot z27.s, z18.b, z3.b[2]\n"
+ "sdot z28.s, z18.b, z4.b[2]\n"
+ "sdot z29.s, z18.b, z5.b[2]\n"
+ "sdot z30.s, z18.b, z6.b[2]\n"
+ "sdot z31.s, z18.b, z7.b[2]\n"
+ "sdot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x30]\n"
+ "sdot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1, #0x30]\n"
+ "sdot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2, #0x30]\n"
+ "sdot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3, #0x30]\n"
+ "sdot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4, #0x30]\n"
+ "sdot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5, #0x30]\n"
+ "sdot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6, #0x30]\n"
+ "sdot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7, #0x30]\n"
+ "sdot z24.s, z20.b, z0.b[0]\n"
+ "sdot z25.s, z20.b, z1.b[0]\n"
+ "sdot z26.s, z20.b, z2.b[0]\n"
+ "sdot z27.s, z20.b, z3.b[0]\n"
+ "sdot z28.s, z20.b, z4.b[0]\n"
+ "sdot z29.s, z20.b, z5.b[0]\n"
+ "sdot z30.s, z20.b, z6.b[0]\n"
+ "sdot z31.s, z20.b, z7.b[0]\n"
+ "sdot z24.s, z21.b, z0.b[1]\n"
+ "sdot z25.s, z21.b, z1.b[1]\n"
+ "sdot z26.s, z21.b, z2.b[1]\n"
+ "sdot z27.s, z21.b, z3.b[1]\n"
+ "sdot z28.s, z21.b, z4.b[1]\n"
+ "sdot z29.s, z21.b, z5.b[1]\n"
+ "sdot z30.s, z21.b, z6.b[1]\n"
+ "sdot z31.s, z21.b, z7.b[1]\n"
+ "b.ne 4b\n"
+ "3:\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "mov z24.s, #0\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "mov z25.s, #0\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "mov z26.s, #0\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "ld1b z21.b, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "mov z27.s, #0\n"
+ "ld1b z22.b, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "ld1b z23.b, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "mov z28.s, #0\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "mov z29.s, #0\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2]\n"
+ "sdot z24.s, z16.b, z0.b[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.s, #0\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3]\n"
+ "sdot z25.s, z16.b, z1.b[0]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4]\n"
+ "sdot z26.s, z16.b, z2.b[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.s, #0\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5]\n"
+ "sdot z27.s, z16.b, z3.b[0]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6]\n"
+ "sdot z28.s, z16.b, z4.b[0]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7]\n"
+ "sdot z24.s, z17.b, z0.b[1]\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "sdot z29.s, z16.b, z5.b[0]\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "sdot z30.s, z16.b, z6.b[0]\n"
+ "sdot z31.s, z16.b, z7.b[0]\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "sdot z25.s, z17.b, z1.b[1]\n"
+ "sdot z26.s, z17.b, z2.b[1]\n"
+ "sdot z27.s, z17.b, z3.b[1]\n"
+ "sdot z28.s, z17.b, z4.b[1]\n"
+ "sdot z29.s, z17.b, z5.b[1]\n"
+ "sdot z30.s, z17.b, z6.b[1]\n"
+ "sdot z31.s, z17.b, z7.b[1]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "sdot z24.s, z18.b, z0.b[2]\n"
+ "sdot z25.s, z18.b, z1.b[2]\n"
+ "sdot z26.s, z18.b, z2.b[2]\n"
+ "sdot z27.s, z18.b, z3.b[2]\n"
+ "sdot z28.s, z18.b, z4.b[2]\n"
+ "sdot z29.s, z18.b, z5.b[2]\n"
+ "sdot z30.s, z18.b, z6.b[2]\n"
+ "sdot z31.s, z18.b, z7.b[2]\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "sdot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0], #0x10]\n"
+ "sdot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1, #0x10]\n"
+ "sdot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2, #0x10]\n"
+ "sdot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3, #0x10]\n"
+ "sdot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4, #0x10]\n"
+ "sdot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5, #0x10]\n"
+ "sdot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6, #0x10]\n"
+ "sdot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7, #0x10]\n"
+ "sdot z24.s, z20.b, z0.b[0]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "sdot z25.s, z20.b, z1.b[0]\n"
+ "sdot z26.s, z20.b, z2.b[0]\n"
+ "sdot z27.s, z20.b, z3.b[0]\n"
+ "sdot z28.s, z20.b, z4.b[0]\n"
+ "sdot z29.s, z20.b, z5.b[0]\n"
+ "sdot z30.s, z20.b, z6.b[0]\n"
+ "sdot z31.s, z20.b, z7.b[0]\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "sdot z24.s, z21.b, z0.b[1]\n"
+ "sdot z25.s, z21.b, z1.b[1]\n"
+ "sdot z26.s, z21.b, z2.b[1]\n"
+ "sdot z27.s, z21.b, z3.b[1]\n"
+ "sdot z28.s, z21.b, z4.b[1]\n"
+ "sdot z29.s, z21.b, z5.b[1]\n"
+ "sdot z30.s, z21.b, z6.b[1]\n"
+ "sdot z31.s, z21.b, z7.b[1]\n"
+ "ld1b z21.b, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "sdot z24.s, z22.b, z0.b[2]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #6\n"
+ "sdot z25.s, z22.b, z1.b[2]\n"
+ "sdot z26.s, z22.b, z2.b[2]\n"
+ "sdot z27.s, z22.b, z3.b[2]\n"
+ "sdot z28.s, z22.b, z4.b[2]\n"
+ "sdot z29.s, z22.b, z5.b[2]\n"
+ "sdot z30.s, z22.b, z6.b[2]\n"
+ "sdot z31.s, z22.b, z7.b[2]\n"
+ "sdot z24.s, z23.b, z0.b[3]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0], #0x20]\n"
+ "sdot z25.s, z23.b, z1.b[3]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1, #0x20]\n"
+ "sdot z26.s, z23.b, z2.b[3]\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2, #0x20]\n"
+ "sdot z27.s, z23.b, z3.b[3]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3, #0x20]\n"
+ "sdot z28.s, z23.b, z4.b[3]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4, #0x20]\n"
+ "sdot z29.s, z23.b, z5.b[3]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5, #0x20]\n"
+ "sdot z30.s, z23.b, z6.b[3]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6, #0x20]\n"
+ "sdot z31.s, z23.b, z7.b[3]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7, #0x20]\n"
+ "sdot z24.s, z16.b, z0.b[0]\n"
+ "sdot z25.s, z16.b, z1.b[0]\n"
+ "sdot z26.s, z16.b, z2.b[0]\n"
+ "sdot z27.s, z16.b, z3.b[0]\n"
+ "sdot z28.s, z16.b, z4.b[0]\n"
+ "sdot z29.s, z16.b, z5.b[0]\n"
+ "sdot z30.s, z16.b, z6.b[0]\n"
+ "sdot z31.s, z16.b, z7.b[0]\n"
+ "sdot z24.s, z17.b, z0.b[1]\n"
+ "sdot z25.s, z17.b, z1.b[1]\n"
+ "sdot z26.s, z17.b, z2.b[1]\n"
+ "sdot z27.s, z17.b, z3.b[1]\n"
+ "sdot z28.s, z17.b, z4.b[1]\n"
+ "sdot z29.s, z17.b, z5.b[1]\n"
+ "sdot z30.s, z17.b, z6.b[1]\n"
+ "sdot z31.s, z17.b, z7.b[1]\n"
+ "sdot z24.s, z18.b, z0.b[2]\n"
+ "sdot z25.s, z18.b, z1.b[2]\n"
+ "sdot z26.s, z18.b, z2.b[2]\n"
+ "sdot z27.s, z18.b, z3.b[2]\n"
+ "sdot z28.s, z18.b, z4.b[2]\n"
+ "sdot z29.s, z18.b, z5.b[2]\n"
+ "sdot z30.s, z18.b, z6.b[2]\n"
+ "sdot z31.s, z18.b, z7.b[2]\n"
+ "sdot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x30]\n"
+ "sdot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1, #0x30]\n"
+ "sdot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2, #0x30]\n"
+ "sdot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3, #0x30]\n"
+ "sdot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4, #0x30]\n"
+ "sdot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5, #0x30]\n"
+ "sdot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6, #0x30]\n"
+ "sdot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7, #0x30]\n"
+ "sdot z24.s, z20.b, z0.b[0]\n"
+ "sdot z25.s, z20.b, z1.b[0]\n"
+ "sdot z26.s, z20.b, z2.b[0]\n"
+ "sdot z27.s, z20.b, z3.b[0]\n"
+ "sdot z28.s, z20.b, z4.b[0]\n"
+ "sdot z29.s, z20.b, z5.b[0]\n"
+ "sdot z30.s, z20.b, z6.b[0]\n"
+ "sdot z31.s, z20.b, z7.b[0]\n"
+ "sdot z24.s, z21.b, z0.b[1]\n"
+ "sdot z25.s, z21.b, z1.b[1]\n"
+ "sdot z26.s, z21.b, z2.b[1]\n"
+ "sdot z27.s, z21.b, z3.b[1]\n"
+ "sdot z28.s, z21.b, z4.b[1]\n"
+ "sdot z29.s, z21.b, z5.b[1]\n"
+ "sdot z30.s, z21.b, z6.b[1]\n"
+ "sdot z31.s, z21.b, z7.b[1]\n"
+ "b 5f\n"
+ "2:\n"
+ "mov z24.s, #0\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
+ "mov z25.s, #0\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1]\n"
+ "mov z26.s, #0\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2]\n"
+ "mov z27.s, #0\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3]\n"
+ "mov z28.s, #0\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4]\n"
+ "mov z29.s, #0\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5]\n"
+ "mov z30.s, #0\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6]\n"
+ "mov z31.s, #0\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7]\n"
+ "sdot z24.s, z16.b, z0.b[0]\n"
+ "sdot z25.s, z16.b, z1.b[0]\n"
+ "sdot z26.s, z16.b, z2.b[0]\n"
+ "sdot z27.s, z16.b, z3.b[0]\n"
+ "sdot z28.s, z16.b, z4.b[0]\n"
+ "sdot z29.s, z16.b, z5.b[0]\n"
+ "sdot z30.s, z16.b, z6.b[0]\n"
+ "sdot z31.s, z16.b, z7.b[0]\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "sdot z24.s, z17.b, z0.b[1]\n"
+ "sdot z25.s, z17.b, z1.b[1]\n"
+ "sdot z26.s, z17.b, z2.b[1]\n"
+ "sdot z27.s, z17.b, z3.b[1]\n"
+ "sdot z28.s, z17.b, z4.b[1]\n"
+ "sdot z29.s, z17.b, z5.b[1]\n"
+ "sdot z30.s, z17.b, z6.b[1]\n"
+ "sdot z31.s, z17.b, z7.b[1]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "sdot z24.s, z18.b, z0.b[2]\n"
+ "sdot z25.s, z18.b, z1.b[2]\n"
+ "sdot z26.s, z18.b, z2.b[2]\n"
+ "sdot z27.s, z18.b, z3.b[2]\n"
+ "sdot z28.s, z18.b, z4.b[2]\n"
+ "sdot z29.s, z18.b, z5.b[2]\n"
+ "sdot z30.s, z18.b, z6.b[2]\n"
+ "sdot z31.s, z18.b, z7.b[2]\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "sdot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0], #0x10]\n"
+ "sdot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1, #0x10]\n"
+ "sdot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2, #0x10]\n"
+ "sdot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3, #0x10]\n"
+ "sdot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4, #0x10]\n"
+ "sdot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5, #0x10]\n"
+ "sdot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6, #0x10]\n"
+ "sdot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7, #0x10]\n"
+ "sdot z24.s, z20.b, z0.b[0]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "sdot z25.s, z20.b, z1.b[0]\n"
+ "sdot z26.s, z20.b, z2.b[0]\n"
+ "sdot z27.s, z20.b, z3.b[0]\n"
+ "sdot z28.s, z20.b, z4.b[0]\n"
+ "sdot z29.s, z20.b, z5.b[0]\n"
+ "sdot z30.s, z20.b, z6.b[0]\n"
+ "sdot z31.s, z20.b, z7.b[0]\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "sdot z24.s, z21.b, z0.b[1]\n"
+ "sdot z25.s, z21.b, z1.b[1]\n"
+ "sdot z26.s, z21.b, z2.b[1]\n"
+ "sdot z27.s, z21.b, z3.b[1]\n"
+ "sdot z28.s, z21.b, z4.b[1]\n"
+ "sdot z29.s, z21.b, z5.b[1]\n"
+ "sdot z30.s, z21.b, z6.b[1]\n"
+ "sdot z31.s, z21.b, z7.b[1]\n"
+ "ld1b z21.b, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "sdot z24.s, z22.b, z0.b[2]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #6\n"
+ "sdot z25.s, z22.b, z1.b[2]\n"
+ "sdot z26.s, z22.b, z2.b[2]\n"
+ "sdot z27.s, z22.b, z3.b[2]\n"
+ "sdot z28.s, z22.b, z4.b[2]\n"
+ "sdot z29.s, z22.b, z5.b[2]\n"
+ "sdot z30.s, z22.b, z6.b[2]\n"
+ "sdot z31.s, z22.b, z7.b[2]\n"
+ "sdot z24.s, z23.b, z0.b[3]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0], #0x20]\n"
+ "sdot z25.s, z23.b, z1.b[3]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1, #0x20]\n"
+ "sdot z26.s, z23.b, z2.b[3]\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2, #0x20]\n"
+ "sdot z27.s, z23.b, z3.b[3]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3, #0x20]\n"
+ "sdot z28.s, z23.b, z4.b[3]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4, #0x20]\n"
+ "sdot z29.s, z23.b, z5.b[3]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5, #0x20]\n"
+ "sdot z30.s, z23.b, z6.b[3]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6, #0x20]\n"
+ "sdot z31.s, z23.b, z7.b[3]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7, #0x20]\n"
+ "sdot z24.s, z16.b, z0.b[0]\n"
+ "sdot z25.s, z16.b, z1.b[0]\n"
+ "sdot z26.s, z16.b, z2.b[0]\n"
+ "sdot z27.s, z16.b, z3.b[0]\n"
+ "sdot z28.s, z16.b, z4.b[0]\n"
+ "sdot z29.s, z16.b, z5.b[0]\n"
+ "sdot z30.s, z16.b, z6.b[0]\n"
+ "sdot z31.s, z16.b, z7.b[0]\n"
+ "sdot z24.s, z17.b, z0.b[1]\n"
+ "sdot z25.s, z17.b, z1.b[1]\n"
+ "sdot z26.s, z17.b, z2.b[1]\n"
+ "sdot z27.s, z17.b, z3.b[1]\n"
+ "sdot z28.s, z17.b, z4.b[1]\n"
+ "sdot z29.s, z17.b, z5.b[1]\n"
+ "sdot z30.s, z17.b, z6.b[1]\n"
+ "sdot z31.s, z17.b, z7.b[1]\n"
+ "sdot z24.s, z18.b, z0.b[2]\n"
+ "sdot z25.s, z18.b, z1.b[2]\n"
+ "sdot z26.s, z18.b, z2.b[2]\n"
+ "sdot z27.s, z18.b, z3.b[2]\n"
+ "sdot z28.s, z18.b, z4.b[2]\n"
+ "sdot z29.s, z18.b, z5.b[2]\n"
+ "sdot z30.s, z18.b, z6.b[2]\n"
+ "sdot z31.s, z18.b, z7.b[2]\n"
+ "sdot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x30]\n"
+ "sdot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1, #0x30]\n"
+ "sdot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2, #0x30]\n"
+ "sdot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3, #0x30]\n"
+ "sdot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4, #0x30]\n"
+ "sdot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5, #0x30]\n"
+ "sdot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6, #0x30]\n"
+ "sdot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7, #0x30]\n"
+ "sdot z24.s, z20.b, z0.b[0]\n"
+ "sdot z25.s, z20.b, z1.b[0]\n"
+ "sdot z26.s, z20.b, z2.b[0]\n"
+ "sdot z27.s, z20.b, z3.b[0]\n"
+ "sdot z28.s, z20.b, z4.b[0]\n"
+ "sdot z29.s, z20.b, z5.b[0]\n"
+ "sdot z30.s, z20.b, z6.b[0]\n"
+ "sdot z31.s, z20.b, z7.b[0]\n"
+ "sdot z24.s, z21.b, z0.b[1]\n"
+ "sdot z25.s, z21.b, z1.b[1]\n"
+ "sdot z26.s, z21.b, z2.b[1]\n"
+ "sdot z27.s, z21.b, z3.b[1]\n"
+ "sdot z28.s, z21.b, z4.b[1]\n"
+ "sdot z29.s, z21.b, z5.b[1]\n"
+ "sdot z30.s, z21.b, z6.b[1]\n"
+ "sdot z31.s, z21.b, z7.b[1]\n"
+ "5:\n"
+ "st1w z24.s, p0, [%[c_ptr0]]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "st1w z25.s, p0, [c_ptr1]\n"
+ "st1w z26.s, p0, [c_ptr2]\n"
+ "st1w z27.s, p0, [c_ptr3]\n"
+ "st1w z28.s, p0, [c_ptr4]\n"
+ "st1w z29.s, p0, [c_ptr5]\n"
+ "st1w z30.s, p0, [c_ptr6]\n"
+ "st1w z31.s, p0, [c_ptr7]\n"
+ ".unreq a_ptr1\n"
+ ".unreq a_ptr2\n"
+ ".unreq a_ptr3\n"
+ ".unreq a_ptr4\n"
+ ".unreq a_ptr5\n"
+ ".unreq a_ptr6\n"
+ ".unreq a_ptr7\n"
+ ".unreq c_ptr1\n"
+ ".unreq c_ptr2\n"
+ ".unreq c_ptr3\n"
+ ".unreq c_ptr4\n"
+ ".unreq c_ptr5\n"
+ ".unreq c_ptr6\n"
+ ".unreq c_ptr7\n"
+ : [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [oob_rows] "+r" (oob_rows), [temp] "+r" (temp), [odds] "+r" (odds)
+ : [lda] "r" (ldab), [ldc] "r" (ldcb), [odd_depth] "r" (odd_depth), [last_width] "r" (last_width)
+ : "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "cc", "memory"
+ );
+ break;
+ case 15:
+ __asm __volatile (
+ "a_ptr1 .req X0\n"
+ "a_ptr2 .req X1\n"
+ "a_ptr3 .req X2\n"
+ "a_ptr4 .req X3\n"
+ "a_ptr5 .req X4\n"
+ "a_ptr6 .req X5\n"
+ "a_ptr7 .req X6\n"
+ "c_ptr1 .req X7\n"
+ "c_ptr2 .req X8\n"
+ "c_ptr3 .req X9\n"
+ "c_ptr4 .req X10\n"
+ "c_ptr5 .req X11\n"
+ "c_ptr6 .req X12\n"
+ "c_ptr7 .req X13\n"
+ "add a_ptr1, %[a_ptr0], %[lda]\n"
+ "add c_ptr1, %[c_ptr0], %[ldc]\n"
+ "add a_ptr2, a_ptr1, %[lda]\n"
+ "add c_ptr2, c_ptr1, %[ldc]\n"
+ "add a_ptr3, a_ptr2, %[lda]\n"
+ "add c_ptr3, c_ptr2, %[ldc]\n"
+ "add a_ptr4, a_ptr3, %[lda]\n"
+ "add c_ptr4, c_ptr3, %[ldc]\n"
+ "add a_ptr5, a_ptr4, %[lda]\n"
+ "add c_ptr5, c_ptr4, %[ldc]\n"
+ "add a_ptr6, a_ptr5, %[lda]\n"
+ "add c_ptr6, c_ptr5, %[ldc]\n"
+ "add a_ptr7, a_ptr6, %[lda]\n"
+ "add c_ptr7, c_ptr6, %[ldc]\n"
+ "cbz %[oob_rows], 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr7, %[c_ptr0], #0x0\n"
+ "add a_ptr7, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr6, %[c_ptr0], #0x0\n"
+ "add a_ptr6, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr5, %[c_ptr0], #0x0\n"
+ "add a_ptr5, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr4, %[c_ptr0], #0x0\n"
+ "add a_ptr4, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr3, %[c_ptr0], #0x0\n"
+ "add a_ptr3, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr2, %[c_ptr0], #0x0\n"
+ "add a_ptr2, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr1, %[c_ptr0], #0x0\n"
+ "add a_ptr1, %[a_ptr0], #0x0\n"
+ "1:\n"
+ "ptrue p7.b\n"
+ "whilelt p6.b, %[temp], %[odd_depth]\n"
+ "whilelt p0.s, %[temp], %[last_width]\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "ld1b z21.b, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "ld1b z22.b, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "ld1b z23.b, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "cbz %[loops], 2f\n"
+ "mov z24.s, #0\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
+ "mov z25.s, #0\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1]\n"
+ "mov z26.s, #0\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2]\n"
+ "mov z27.s, #0\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3]\n"
+ "mov z28.s, #0\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4]\n"
+ "mov z29.s, #0\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5]\n"
+ "mov z30.s, #0\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6]\n"
+ "mov z31.s, #0\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7]\n"
+ "sdot z24.s, z16.b, z0.b[0]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "sdot z25.s, z16.b, z1.b[0]\n"
+ "sdot z26.s, z16.b, z2.b[0]\n"
+ "sdot z27.s, z16.b, z3.b[0]\n"
+ "sdot z28.s, z16.b, z4.b[0]\n"
+ "sdot z29.s, z16.b, z5.b[0]\n"
+ "sdot z30.s, z16.b, z6.b[0]\n"
+ "sdot z31.s, z16.b, z7.b[0]\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "sdot z24.s, z17.b, z0.b[1]\n"
+ "sdot z25.s, z17.b, z1.b[1]\n"
+ "sdot z26.s, z17.b, z2.b[1]\n"
+ "sdot z27.s, z17.b, z3.b[1]\n"
+ "sdot z28.s, z17.b, z4.b[1]\n"
+ "sdot z29.s, z17.b, z5.b[1]\n"
+ "sdot z30.s, z17.b, z6.b[1]\n"
+ "sdot z31.s, z17.b, z7.b[1]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "sdot z24.s, z18.b, z0.b[2]\n"
+ "sdot z25.s, z18.b, z1.b[2]\n"
+ "sdot z26.s, z18.b, z2.b[2]\n"
+ "sdot z27.s, z18.b, z3.b[2]\n"
+ "sdot z28.s, z18.b, z4.b[2]\n"
+ "sdot z29.s, z18.b, z5.b[2]\n"
+ "sdot z30.s, z18.b, z6.b[2]\n"
+ "sdot z31.s, z18.b, z7.b[2]\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "sdot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0], #0x10]\n"
+ "sdot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1, #0x10]\n"
+ "sdot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2, #0x10]\n"
+ "sdot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3, #0x10]\n"
+ "sdot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4, #0x10]\n"
+ "sdot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5, #0x10]\n"
+ "sdot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6, #0x10]\n"
+ "sdot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7, #0x10]\n"
+ "sdot z24.s, z20.b, z0.b[0]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "sdot z25.s, z20.b, z1.b[0]\n"
+ "sdot z26.s, z20.b, z2.b[0]\n"
+ "sdot z27.s, z20.b, z3.b[0]\n"
+ "sdot z28.s, z20.b, z4.b[0]\n"
+ "sdot z29.s, z20.b, z5.b[0]\n"
+ "sdot z30.s, z20.b, z6.b[0]\n"
+ "sdot z31.s, z20.b, z7.b[0]\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "sdot z24.s, z21.b, z0.b[1]\n"
+ "sdot z25.s, z21.b, z1.b[1]\n"
+ "sdot z26.s, z21.b, z2.b[1]\n"
+ "sdot z27.s, z21.b, z3.b[1]\n"
+ "sdot z28.s, z21.b, z4.b[1]\n"
+ "sdot z29.s, z21.b, z5.b[1]\n"
+ "sdot z30.s, z21.b, z6.b[1]\n"
+ "sdot z31.s, z21.b, z7.b[1]\n"
+ "ld1b z21.b, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "sdot z24.s, z22.b, z0.b[2]\n"
+ "sdot z25.s, z22.b, z1.b[2]\n"
+ "sdot z26.s, z22.b, z2.b[2]\n"
+ "sdot z27.s, z22.b, z3.b[2]\n"
+ "sdot z28.s, z22.b, z4.b[2]\n"
+ "sdot z29.s, z22.b, z5.b[2]\n"
+ "sdot z30.s, z22.b, z6.b[2]\n"
+ "sdot z31.s, z22.b, z7.b[2]\n"
+ "ld1b z22.b, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "sdot z24.s, z23.b, z0.b[3]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0], #0x20]\n"
+ "sdot z25.s, z23.b, z1.b[3]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1, #0x20]\n"
+ "sdot z26.s, z23.b, z2.b[3]\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2, #0x20]\n"
+ "sdot z27.s, z23.b, z3.b[3]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3, #0x20]\n"
+ "sdot z28.s, z23.b, z4.b[3]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4, #0x20]\n"
+ "sdot z29.s, z23.b, z5.b[3]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5, #0x20]\n"
+ "sdot z30.s, z23.b, z6.b[3]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6, #0x20]\n"
+ "sdot z31.s, z23.b, z7.b[3]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7, #0x20]\n"
+ "sdot z24.s, z16.b, z0.b[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #7\n"
+ "sdot z25.s, z16.b, z1.b[0]\n"
+ "sdot z26.s, z16.b, z2.b[0]\n"
+ "sdot z27.s, z16.b, z3.b[0]\n"
+ "sdot z28.s, z16.b, z4.b[0]\n"
+ "sdot z29.s, z16.b, z5.b[0]\n"
+ "sdot z30.s, z16.b, z6.b[0]\n"
+ "sdot z31.s, z16.b, z7.b[0]\n"
+ "sdot z24.s, z17.b, z0.b[1]\n"
+ "sdot z25.s, z17.b, z1.b[1]\n"
+ "sdot z26.s, z17.b, z2.b[1]\n"
+ "sdot z27.s, z17.b, z3.b[1]\n"
+ "sdot z28.s, z17.b, z4.b[1]\n"
+ "sdot z29.s, z17.b, z5.b[1]\n"
+ "sdot z30.s, z17.b, z6.b[1]\n"
+ "sdot z31.s, z17.b, z7.b[1]\n"
+ "sdot z24.s, z18.b, z0.b[2]\n"
+ "sdot z25.s, z18.b, z1.b[2]\n"
+ "sdot z26.s, z18.b, z2.b[2]\n"
+ "sdot z27.s, z18.b, z3.b[2]\n"
+ "sdot z28.s, z18.b, z4.b[2]\n"
+ "sdot z29.s, z18.b, z5.b[2]\n"
+ "sdot z30.s, z18.b, z6.b[2]\n"
+ "sdot z31.s, z18.b, z7.b[2]\n"
+ "sdot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x30]\n"
+ "sdot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1, #0x30]\n"
+ "sdot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2, #0x30]\n"
+ "sdot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3, #0x30]\n"
+ "sdot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4, #0x30]\n"
+ "sdot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5, #0x30]\n"
+ "sdot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6, #0x30]\n"
+ "sdot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7, #0x30]\n"
+ "sdot z24.s, z20.b, z0.b[0]\n"
+ "sdot z25.s, z20.b, z1.b[0]\n"
+ "sdot z26.s, z20.b, z2.b[0]\n"
+ "sdot z27.s, z20.b, z3.b[0]\n"
+ "sdot z28.s, z20.b, z4.b[0]\n"
+ "sdot z29.s, z20.b, z5.b[0]\n"
+ "sdot z30.s, z20.b, z6.b[0]\n"
+ "sdot z31.s, z20.b, z7.b[0]\n"
+ "sdot z24.s, z21.b, z0.b[1]\n"
+ "sdot z25.s, z21.b, z1.b[1]\n"
+ "sdot z26.s, z21.b, z2.b[1]\n"
+ "sdot z27.s, z21.b, z3.b[1]\n"
+ "sdot z28.s, z21.b, z4.b[1]\n"
+ "sdot z29.s, z21.b, z5.b[1]\n"
+ "sdot z30.s, z21.b, z6.b[1]\n"
+ "sdot z31.s, z21.b, z7.b[1]\n"
+ "sdot z24.s, z22.b, z0.b[2]\n"
+ "sdot z25.s, z22.b, z1.b[2]\n"
+ "sdot z26.s, z22.b, z2.b[2]\n"
+ "sdot z27.s, z22.b, z3.b[2]\n"
+ "sdot z28.s, z22.b, z4.b[2]\n"
+ "sdot z29.s, z22.b, z5.b[2]\n"
+ "sdot z30.s, z22.b, z6.b[2]\n"
+ "sdot z31.s, z22.b, z7.b[2]\n"
+ "b.eq 3f\n"
+ "4:\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "mov z24.s, #0\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "mov z25.s, #0\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "mov z26.s, #0\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "ld1b z21.b, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "mov z27.s, #0\n"
+ "ld1b z22.b, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "ld1b z23.b, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "mov z28.s, #0\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "mov z29.s, #0\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2]\n"
+ "sdot z24.s, z16.b, z0.b[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.s, #0\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3]\n"
+ "sdot z25.s, z16.b, z1.b[0]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4]\n"
+ "sdot z26.s, z16.b, z2.b[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.s, #0\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5]\n"
+ "sdot z27.s, z16.b, z3.b[0]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6]\n"
+ "sdot z28.s, z16.b, z4.b[0]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7]\n"
+ "sdot z24.s, z17.b, z0.b[1]\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "sdot z29.s, z16.b, z5.b[0]\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "sdot z30.s, z16.b, z6.b[0]\n"
+ "sdot z31.s, z16.b, z7.b[0]\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "sdot z25.s, z17.b, z1.b[1]\n"
+ "sdot z26.s, z17.b, z2.b[1]\n"
+ "sdot z27.s, z17.b, z3.b[1]\n"
+ "sdot z28.s, z17.b, z4.b[1]\n"
+ "sdot z29.s, z17.b, z5.b[1]\n"
+ "sdot z30.s, z17.b, z6.b[1]\n"
+ "sdot z31.s, z17.b, z7.b[1]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "sdot z24.s, z18.b, z0.b[2]\n"
+ "sdot z25.s, z18.b, z1.b[2]\n"
+ "sdot z26.s, z18.b, z2.b[2]\n"
+ "sdot z27.s, z18.b, z3.b[2]\n"
+ "sdot z28.s, z18.b, z4.b[2]\n"
+ "sdot z29.s, z18.b, z5.b[2]\n"
+ "sdot z30.s, z18.b, z6.b[2]\n"
+ "sdot z31.s, z18.b, z7.b[2]\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "sdot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0], #0x10]\n"
+ "sdot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1, #0x10]\n"
+ "sdot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2, #0x10]\n"
+ "sdot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3, #0x10]\n"
+ "sdot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4, #0x10]\n"
+ "sdot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5, #0x10]\n"
+ "sdot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6, #0x10]\n"
+ "sdot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7, #0x10]\n"
+ "sdot z24.s, z20.b, z0.b[0]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "sdot z25.s, z20.b, z1.b[0]\n"
+ "sdot z26.s, z20.b, z2.b[0]\n"
+ "sdot z27.s, z20.b, z3.b[0]\n"
+ "sdot z28.s, z20.b, z4.b[0]\n"
+ "sdot z29.s, z20.b, z5.b[0]\n"
+ "sdot z30.s, z20.b, z6.b[0]\n"
+ "sdot z31.s, z20.b, z7.b[0]\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "sdot z24.s, z21.b, z0.b[1]\n"
+ "sdot z25.s, z21.b, z1.b[1]\n"
+ "sdot z26.s, z21.b, z2.b[1]\n"
+ "sdot z27.s, z21.b, z3.b[1]\n"
+ "sdot z28.s, z21.b, z4.b[1]\n"
+ "sdot z29.s, z21.b, z5.b[1]\n"
+ "sdot z30.s, z21.b, z6.b[1]\n"
+ "sdot z31.s, z21.b, z7.b[1]\n"
+ "ld1b z21.b, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "sdot z24.s, z22.b, z0.b[2]\n"
+ "sdot z25.s, z22.b, z1.b[2]\n"
+ "sdot z26.s, z22.b, z2.b[2]\n"
+ "sdot z27.s, z22.b, z3.b[2]\n"
+ "sdot z28.s, z22.b, z4.b[2]\n"
+ "sdot z29.s, z22.b, z5.b[2]\n"
+ "sdot z30.s, z22.b, z6.b[2]\n"
+ "sdot z31.s, z22.b, z7.b[2]\n"
+ "ld1b z22.b, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "sdot z24.s, z23.b, z0.b[3]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0], #0x20]\n"
+ "sdot z25.s, z23.b, z1.b[3]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1, #0x20]\n"
+ "sdot z26.s, z23.b, z2.b[3]\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2, #0x20]\n"
+ "sdot z27.s, z23.b, z3.b[3]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3, #0x20]\n"
+ "sdot z28.s, z23.b, z4.b[3]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4, #0x20]\n"
+ "sdot z29.s, z23.b, z5.b[3]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5, #0x20]\n"
+ "sdot z30.s, z23.b, z6.b[3]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6, #0x20]\n"
+ "sdot z31.s, z23.b, z7.b[3]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7, #0x20]\n"
+ "sdot z24.s, z16.b, z0.b[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #7\n"
+ "sdot z25.s, z16.b, z1.b[0]\n"
+ "sdot z26.s, z16.b, z2.b[0]\n"
+ "sdot z27.s, z16.b, z3.b[0]\n"
+ "sdot z28.s, z16.b, z4.b[0]\n"
+ "sdot z29.s, z16.b, z5.b[0]\n"
+ "sdot z30.s, z16.b, z6.b[0]\n"
+ "sdot z31.s, z16.b, z7.b[0]\n"
+ "sdot z24.s, z17.b, z0.b[1]\n"
+ "sdot z25.s, z17.b, z1.b[1]\n"
+ "sdot z26.s, z17.b, z2.b[1]\n"
+ "sdot z27.s, z17.b, z3.b[1]\n"
+ "sdot z28.s, z17.b, z4.b[1]\n"
+ "sdot z29.s, z17.b, z5.b[1]\n"
+ "sdot z30.s, z17.b, z6.b[1]\n"
+ "sdot z31.s, z17.b, z7.b[1]\n"
+ "sdot z24.s, z18.b, z0.b[2]\n"
+ "sdot z25.s, z18.b, z1.b[2]\n"
+ "sdot z26.s, z18.b, z2.b[2]\n"
+ "sdot z27.s, z18.b, z3.b[2]\n"
+ "sdot z28.s, z18.b, z4.b[2]\n"
+ "sdot z29.s, z18.b, z5.b[2]\n"
+ "sdot z30.s, z18.b, z6.b[2]\n"
+ "sdot z31.s, z18.b, z7.b[2]\n"
+ "sdot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x30]\n"
+ "sdot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1, #0x30]\n"
+ "sdot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2, #0x30]\n"
+ "sdot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3, #0x30]\n"
+ "sdot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4, #0x30]\n"
+ "sdot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5, #0x30]\n"
+ "sdot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6, #0x30]\n"
+ "sdot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7, #0x30]\n"
+ "sdot z24.s, z20.b, z0.b[0]\n"
+ "sdot z25.s, z20.b, z1.b[0]\n"
+ "sdot z26.s, z20.b, z2.b[0]\n"
+ "sdot z27.s, z20.b, z3.b[0]\n"
+ "sdot z28.s, z20.b, z4.b[0]\n"
+ "sdot z29.s, z20.b, z5.b[0]\n"
+ "sdot z30.s, z20.b, z6.b[0]\n"
+ "sdot z31.s, z20.b, z7.b[0]\n"
+ "sdot z24.s, z21.b, z0.b[1]\n"
+ "sdot z25.s, z21.b, z1.b[1]\n"
+ "sdot z26.s, z21.b, z2.b[1]\n"
+ "sdot z27.s, z21.b, z3.b[1]\n"
+ "sdot z28.s, z21.b, z4.b[1]\n"
+ "sdot z29.s, z21.b, z5.b[1]\n"
+ "sdot z30.s, z21.b, z6.b[1]\n"
+ "sdot z31.s, z21.b, z7.b[1]\n"
+ "sdot z24.s, z22.b, z0.b[2]\n"
+ "sdot z25.s, z22.b, z1.b[2]\n"
+ "sdot z26.s, z22.b, z2.b[2]\n"
+ "sdot z27.s, z22.b, z3.b[2]\n"
+ "sdot z28.s, z22.b, z4.b[2]\n"
+ "sdot z29.s, z22.b, z5.b[2]\n"
+ "sdot z30.s, z22.b, z6.b[2]\n"
+ "sdot z31.s, z22.b, z7.b[2]\n"
+ "b.ne 4b\n"
+ "3:\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "mov z24.s, #0\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "mov z25.s, #0\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "mov z26.s, #0\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "ld1b z21.b, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "mov z27.s, #0\n"
+ "ld1b z22.b, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "ld1b z23.b, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "mov z28.s, #0\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "mov z29.s, #0\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2]\n"
+ "sdot z24.s, z16.b, z0.b[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.s, #0\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3]\n"
+ "sdot z25.s, z16.b, z1.b[0]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4]\n"
+ "sdot z26.s, z16.b, z2.b[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.s, #0\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5]\n"
+ "sdot z27.s, z16.b, z3.b[0]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6]\n"
+ "sdot z28.s, z16.b, z4.b[0]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7]\n"
+ "sdot z24.s, z17.b, z0.b[1]\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "sdot z29.s, z16.b, z5.b[0]\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "sdot z30.s, z16.b, z6.b[0]\n"
+ "sdot z31.s, z16.b, z7.b[0]\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "sdot z25.s, z17.b, z1.b[1]\n"
+ "sdot z26.s, z17.b, z2.b[1]\n"
+ "sdot z27.s, z17.b, z3.b[1]\n"
+ "sdot z28.s, z17.b, z4.b[1]\n"
+ "sdot z29.s, z17.b, z5.b[1]\n"
+ "sdot z30.s, z17.b, z6.b[1]\n"
+ "sdot z31.s, z17.b, z7.b[1]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "sdot z24.s, z18.b, z0.b[2]\n"
+ "sdot z25.s, z18.b, z1.b[2]\n"
+ "sdot z26.s, z18.b, z2.b[2]\n"
+ "sdot z27.s, z18.b, z3.b[2]\n"
+ "sdot z28.s, z18.b, z4.b[2]\n"
+ "sdot z29.s, z18.b, z5.b[2]\n"
+ "sdot z30.s, z18.b, z6.b[2]\n"
+ "sdot z31.s, z18.b, z7.b[2]\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "sdot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0], #0x10]\n"
+ "sdot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1, #0x10]\n"
+ "sdot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2, #0x10]\n"
+ "sdot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3, #0x10]\n"
+ "sdot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4, #0x10]\n"
+ "sdot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5, #0x10]\n"
+ "sdot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6, #0x10]\n"
+ "sdot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7, #0x10]\n"
+ "sdot z24.s, z20.b, z0.b[0]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "sdot z25.s, z20.b, z1.b[0]\n"
+ "sdot z26.s, z20.b, z2.b[0]\n"
+ "sdot z27.s, z20.b, z3.b[0]\n"
+ "sdot z28.s, z20.b, z4.b[0]\n"
+ "sdot z29.s, z20.b, z5.b[0]\n"
+ "sdot z30.s, z20.b, z6.b[0]\n"
+ "sdot z31.s, z20.b, z7.b[0]\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "sdot z24.s, z21.b, z0.b[1]\n"
+ "sdot z25.s, z21.b, z1.b[1]\n"
+ "sdot z26.s, z21.b, z2.b[1]\n"
+ "sdot z27.s, z21.b, z3.b[1]\n"
+ "sdot z28.s, z21.b, z4.b[1]\n"
+ "sdot z29.s, z21.b, z5.b[1]\n"
+ "sdot z30.s, z21.b, z6.b[1]\n"
+ "sdot z31.s, z21.b, z7.b[1]\n"
+ "ld1b z21.b, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "sdot z24.s, z22.b, z0.b[2]\n"
+ "sdot z25.s, z22.b, z1.b[2]\n"
+ "sdot z26.s, z22.b, z2.b[2]\n"
+ "sdot z27.s, z22.b, z3.b[2]\n"
+ "sdot z28.s, z22.b, z4.b[2]\n"
+ "sdot z29.s, z22.b, z5.b[2]\n"
+ "sdot z30.s, z22.b, z6.b[2]\n"
+ "sdot z31.s, z22.b, z7.b[2]\n"
+ "ld1b z22.b, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "sdot z24.s, z23.b, z0.b[3]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0], #0x20]\n"
+ "sdot z25.s, z23.b, z1.b[3]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1, #0x20]\n"
+ "sdot z26.s, z23.b, z2.b[3]\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2, #0x20]\n"
+ "sdot z27.s, z23.b, z3.b[3]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3, #0x20]\n"
+ "sdot z28.s, z23.b, z4.b[3]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4, #0x20]\n"
+ "sdot z29.s, z23.b, z5.b[3]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5, #0x20]\n"
+ "sdot z30.s, z23.b, z6.b[3]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6, #0x20]\n"
+ "sdot z31.s, z23.b, z7.b[3]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7, #0x20]\n"
+ "sdot z24.s, z16.b, z0.b[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #7\n"
+ "sdot z25.s, z16.b, z1.b[0]\n"
+ "sdot z26.s, z16.b, z2.b[0]\n"
+ "sdot z27.s, z16.b, z3.b[0]\n"
+ "sdot z28.s, z16.b, z4.b[0]\n"
+ "sdot z29.s, z16.b, z5.b[0]\n"
+ "sdot z30.s, z16.b, z6.b[0]\n"
+ "sdot z31.s, z16.b, z7.b[0]\n"
+ "sdot z24.s, z17.b, z0.b[1]\n"
+ "sdot z25.s, z17.b, z1.b[1]\n"
+ "sdot z26.s, z17.b, z2.b[1]\n"
+ "sdot z27.s, z17.b, z3.b[1]\n"
+ "sdot z28.s, z17.b, z4.b[1]\n"
+ "sdot z29.s, z17.b, z5.b[1]\n"
+ "sdot z30.s, z17.b, z6.b[1]\n"
+ "sdot z31.s, z17.b, z7.b[1]\n"
+ "sdot z24.s, z18.b, z0.b[2]\n"
+ "sdot z25.s, z18.b, z1.b[2]\n"
+ "sdot z26.s, z18.b, z2.b[2]\n"
+ "sdot z27.s, z18.b, z3.b[2]\n"
+ "sdot z28.s, z18.b, z4.b[2]\n"
+ "sdot z29.s, z18.b, z5.b[2]\n"
+ "sdot z30.s, z18.b, z6.b[2]\n"
+ "sdot z31.s, z18.b, z7.b[2]\n"
+ "sdot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x30]\n"
+ "sdot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1, #0x30]\n"
+ "sdot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2, #0x30]\n"
+ "sdot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3, #0x30]\n"
+ "sdot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4, #0x30]\n"
+ "sdot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5, #0x30]\n"
+ "sdot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6, #0x30]\n"
+ "sdot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7, #0x30]\n"
+ "sdot z24.s, z20.b, z0.b[0]\n"
+ "sdot z25.s, z20.b, z1.b[0]\n"
+ "sdot z26.s, z20.b, z2.b[0]\n"
+ "sdot z27.s, z20.b, z3.b[0]\n"
+ "sdot z28.s, z20.b, z4.b[0]\n"
+ "sdot z29.s, z20.b, z5.b[0]\n"
+ "sdot z30.s, z20.b, z6.b[0]\n"
+ "sdot z31.s, z20.b, z7.b[0]\n"
+ "sdot z24.s, z21.b, z0.b[1]\n"
+ "sdot z25.s, z21.b, z1.b[1]\n"
+ "sdot z26.s, z21.b, z2.b[1]\n"
+ "sdot z27.s, z21.b, z3.b[1]\n"
+ "sdot z28.s, z21.b, z4.b[1]\n"
+ "sdot z29.s, z21.b, z5.b[1]\n"
+ "sdot z30.s, z21.b, z6.b[1]\n"
+ "sdot z31.s, z21.b, z7.b[1]\n"
+ "sdot z24.s, z22.b, z0.b[2]\n"
+ "sdot z25.s, z22.b, z1.b[2]\n"
+ "sdot z26.s, z22.b, z2.b[2]\n"
+ "sdot z27.s, z22.b, z3.b[2]\n"
+ "sdot z28.s, z22.b, z4.b[2]\n"
+ "sdot z29.s, z22.b, z5.b[2]\n"
+ "sdot z30.s, z22.b, z6.b[2]\n"
+ "sdot z31.s, z22.b, z7.b[2]\n"
+ "b 5f\n"
+ "2:\n"
+ "mov z24.s, #0\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
+ "mov z25.s, #0\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1]\n"
+ "mov z26.s, #0\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2]\n"
+ "mov z27.s, #0\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3]\n"
+ "mov z28.s, #0\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4]\n"
+ "mov z29.s, #0\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5]\n"
+ "mov z30.s, #0\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6]\n"
+ "mov z31.s, #0\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7]\n"
+ "sdot z24.s, z16.b, z0.b[0]\n"
+ "sdot z25.s, z16.b, z1.b[0]\n"
+ "sdot z26.s, z16.b, z2.b[0]\n"
+ "sdot z27.s, z16.b, z3.b[0]\n"
+ "sdot z28.s, z16.b, z4.b[0]\n"
+ "sdot z29.s, z16.b, z5.b[0]\n"
+ "sdot z30.s, z16.b, z6.b[0]\n"
+ "sdot z31.s, z16.b, z7.b[0]\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "sdot z24.s, z17.b, z0.b[1]\n"
+ "sdot z25.s, z17.b, z1.b[1]\n"
+ "sdot z26.s, z17.b, z2.b[1]\n"
+ "sdot z27.s, z17.b, z3.b[1]\n"
+ "sdot z28.s, z17.b, z4.b[1]\n"
+ "sdot z29.s, z17.b, z5.b[1]\n"
+ "sdot z30.s, z17.b, z6.b[1]\n"
+ "sdot z31.s, z17.b, z7.b[1]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "sdot z24.s, z18.b, z0.b[2]\n"
+ "sdot z25.s, z18.b, z1.b[2]\n"
+ "sdot z26.s, z18.b, z2.b[2]\n"
+ "sdot z27.s, z18.b, z3.b[2]\n"
+ "sdot z28.s, z18.b, z4.b[2]\n"
+ "sdot z29.s, z18.b, z5.b[2]\n"
+ "sdot z30.s, z18.b, z6.b[2]\n"
+ "sdot z31.s, z18.b, z7.b[2]\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "sdot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0], #0x10]\n"
+ "sdot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1, #0x10]\n"
+ "sdot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2, #0x10]\n"
+ "sdot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3, #0x10]\n"
+ "sdot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4, #0x10]\n"
+ "sdot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5, #0x10]\n"
+ "sdot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6, #0x10]\n"
+ "sdot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7, #0x10]\n"
+ "sdot z24.s, z20.b, z0.b[0]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "sdot z25.s, z20.b, z1.b[0]\n"
+ "sdot z26.s, z20.b, z2.b[0]\n"
+ "sdot z27.s, z20.b, z3.b[0]\n"
+ "sdot z28.s, z20.b, z4.b[0]\n"
+ "sdot z29.s, z20.b, z5.b[0]\n"
+ "sdot z30.s, z20.b, z6.b[0]\n"
+ "sdot z31.s, z20.b, z7.b[0]\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "sdot z24.s, z21.b, z0.b[1]\n"
+ "sdot z25.s, z21.b, z1.b[1]\n"
+ "sdot z26.s, z21.b, z2.b[1]\n"
+ "sdot z27.s, z21.b, z3.b[1]\n"
+ "sdot z28.s, z21.b, z4.b[1]\n"
+ "sdot z29.s, z21.b, z5.b[1]\n"
+ "sdot z30.s, z21.b, z6.b[1]\n"
+ "sdot z31.s, z21.b, z7.b[1]\n"
+ "ld1b z21.b, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "sdot z24.s, z22.b, z0.b[2]\n"
+ "sdot z25.s, z22.b, z1.b[2]\n"
+ "sdot z26.s, z22.b, z2.b[2]\n"
+ "sdot z27.s, z22.b, z3.b[2]\n"
+ "sdot z28.s, z22.b, z4.b[2]\n"
+ "sdot z29.s, z22.b, z5.b[2]\n"
+ "sdot z30.s, z22.b, z6.b[2]\n"
+ "sdot z31.s, z22.b, z7.b[2]\n"
+ "ld1b z22.b, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "sdot z24.s, z23.b, z0.b[3]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0], #0x20]\n"
+ "sdot z25.s, z23.b, z1.b[3]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1, #0x20]\n"
+ "sdot z26.s, z23.b, z2.b[3]\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2, #0x20]\n"
+ "sdot z27.s, z23.b, z3.b[3]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3, #0x20]\n"
+ "sdot z28.s, z23.b, z4.b[3]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4, #0x20]\n"
+ "sdot z29.s, z23.b, z5.b[3]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5, #0x20]\n"
+ "sdot z30.s, z23.b, z6.b[3]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6, #0x20]\n"
+ "sdot z31.s, z23.b, z7.b[3]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7, #0x20]\n"
+ "sdot z24.s, z16.b, z0.b[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #7\n"
+ "sdot z25.s, z16.b, z1.b[0]\n"
+ "sdot z26.s, z16.b, z2.b[0]\n"
+ "sdot z27.s, z16.b, z3.b[0]\n"
+ "sdot z28.s, z16.b, z4.b[0]\n"
+ "sdot z29.s, z16.b, z5.b[0]\n"
+ "sdot z30.s, z16.b, z6.b[0]\n"
+ "sdot z31.s, z16.b, z7.b[0]\n"
+ "sdot z24.s, z17.b, z0.b[1]\n"
+ "sdot z25.s, z17.b, z1.b[1]\n"
+ "sdot z26.s, z17.b, z2.b[1]\n"
+ "sdot z27.s, z17.b, z3.b[1]\n"
+ "sdot z28.s, z17.b, z4.b[1]\n"
+ "sdot z29.s, z17.b, z5.b[1]\n"
+ "sdot z30.s, z17.b, z6.b[1]\n"
+ "sdot z31.s, z17.b, z7.b[1]\n"
+ "sdot z24.s, z18.b, z0.b[2]\n"
+ "sdot z25.s, z18.b, z1.b[2]\n"
+ "sdot z26.s, z18.b, z2.b[2]\n"
+ "sdot z27.s, z18.b, z3.b[2]\n"
+ "sdot z28.s, z18.b, z4.b[2]\n"
+ "sdot z29.s, z18.b, z5.b[2]\n"
+ "sdot z30.s, z18.b, z6.b[2]\n"
+ "sdot z31.s, z18.b, z7.b[2]\n"
+ "sdot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x30]\n"
+ "sdot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1, #0x30]\n"
+ "sdot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2, #0x30]\n"
+ "sdot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3, #0x30]\n"
+ "sdot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4, #0x30]\n"
+ "sdot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5, #0x30]\n"
+ "sdot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6, #0x30]\n"
+ "sdot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7, #0x30]\n"
+ "sdot z24.s, z20.b, z0.b[0]\n"
+ "sdot z25.s, z20.b, z1.b[0]\n"
+ "sdot z26.s, z20.b, z2.b[0]\n"
+ "sdot z27.s, z20.b, z3.b[0]\n"
+ "sdot z28.s, z20.b, z4.b[0]\n"
+ "sdot z29.s, z20.b, z5.b[0]\n"
+ "sdot z30.s, z20.b, z6.b[0]\n"
+ "sdot z31.s, z20.b, z7.b[0]\n"
+ "sdot z24.s, z21.b, z0.b[1]\n"
+ "sdot z25.s, z21.b, z1.b[1]\n"
+ "sdot z26.s, z21.b, z2.b[1]\n"
+ "sdot z27.s, z21.b, z3.b[1]\n"
+ "sdot z28.s, z21.b, z4.b[1]\n"
+ "sdot z29.s, z21.b, z5.b[1]\n"
+ "sdot z30.s, z21.b, z6.b[1]\n"
+ "sdot z31.s, z21.b, z7.b[1]\n"
+ "sdot z24.s, z22.b, z0.b[2]\n"
+ "sdot z25.s, z22.b, z1.b[2]\n"
+ "sdot z26.s, z22.b, z2.b[2]\n"
+ "sdot z27.s, z22.b, z3.b[2]\n"
+ "sdot z28.s, z22.b, z4.b[2]\n"
+ "sdot z29.s, z22.b, z5.b[2]\n"
+ "sdot z30.s, z22.b, z6.b[2]\n"
+ "sdot z31.s, z22.b, z7.b[2]\n"
+ "5:\n"
+ "st1w z24.s, p0, [%[c_ptr0]]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "st1w z25.s, p0, [c_ptr1]\n"
+ "st1w z26.s, p0, [c_ptr2]\n"
+ "st1w z27.s, p0, [c_ptr3]\n"
+ "st1w z28.s, p0, [c_ptr4]\n"
+ "st1w z29.s, p0, [c_ptr5]\n"
+ "st1w z30.s, p0, [c_ptr6]\n"
+ "st1w z31.s, p0, [c_ptr7]\n"
+ ".unreq a_ptr1\n"
+ ".unreq a_ptr2\n"
+ ".unreq a_ptr3\n"
+ ".unreq a_ptr4\n"
+ ".unreq a_ptr5\n"
+ ".unreq a_ptr6\n"
+ ".unreq a_ptr7\n"
+ ".unreq c_ptr1\n"
+ ".unreq c_ptr2\n"
+ ".unreq c_ptr3\n"
+ ".unreq c_ptr4\n"
+ ".unreq c_ptr5\n"
+ ".unreq c_ptr6\n"
+ ".unreq c_ptr7\n"
+ : [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [oob_rows] "+r" (oob_rows), [temp] "+r" (temp), [odds] "+r" (odds)
+ : [lda] "r" (ldab), [ldc] "r" (ldcb), [odd_depth] "r" (odd_depth), [last_width] "r" (last_width)
+ : "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "cc", "memory"
+ );
+ break;
+ default:
+ case 16:
+ __asm __volatile (
+ "a_ptr1 .req X0\n"
+ "a_ptr2 .req X1\n"
+ "a_ptr3 .req X2\n"
+ "a_ptr4 .req X3\n"
+ "a_ptr5 .req X4\n"
+ "a_ptr6 .req X5\n"
+ "a_ptr7 .req X6\n"
+ "c_ptr1 .req X7\n"
+ "c_ptr2 .req X8\n"
+ "c_ptr3 .req X9\n"
+ "c_ptr4 .req X10\n"
+ "c_ptr5 .req X11\n"
+ "c_ptr6 .req X12\n"
+ "c_ptr7 .req X13\n"
+ "add a_ptr1, %[a_ptr0], %[lda]\n"
+ "add c_ptr1, %[c_ptr0], %[ldc]\n"
+ "add a_ptr2, a_ptr1, %[lda]\n"
+ "add c_ptr2, c_ptr1, %[ldc]\n"
+ "add a_ptr3, a_ptr2, %[lda]\n"
+ "add c_ptr3, c_ptr2, %[ldc]\n"
+ "add a_ptr4, a_ptr3, %[lda]\n"
+ "add c_ptr4, c_ptr3, %[ldc]\n"
+ "add a_ptr5, a_ptr4, %[lda]\n"
+ "add c_ptr5, c_ptr4, %[ldc]\n"
+ "add a_ptr6, a_ptr5, %[lda]\n"
+ "add c_ptr6, c_ptr5, %[ldc]\n"
+ "add a_ptr7, a_ptr6, %[lda]\n"
+ "add c_ptr7, c_ptr6, %[ldc]\n"
+ "cbz %[oob_rows], 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr7, %[c_ptr0], #0x0\n"
+ "add a_ptr7, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr6, %[c_ptr0], #0x0\n"
+ "add a_ptr6, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr5, %[c_ptr0], #0x0\n"
+ "add a_ptr5, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr4, %[c_ptr0], #0x0\n"
+ "add a_ptr4, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr3, %[c_ptr0], #0x0\n"
+ "add a_ptr3, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr2, %[c_ptr0], #0x0\n"
+ "add a_ptr2, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr1, %[c_ptr0], #0x0\n"
+ "add a_ptr1, %[a_ptr0], #0x0\n"
+ "1:\n"
+ "ptrue p7.b\n"
+ "whilelt p6.b, %[temp], %[odd_depth]\n"
+ "whilelt p0.s, %[temp], %[last_width]\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "ld1b z21.b, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "ld1b z22.b, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "ld1b z23.b, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "cbz %[loops], 2f\n"
+ "mov z24.s, #0\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
+ "mov z25.s, #0\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1]\n"
+ "mov z26.s, #0\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2]\n"
+ "mov z27.s, #0\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3]\n"
+ "mov z28.s, #0\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4]\n"
+ "mov z29.s, #0\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5]\n"
+ "mov z30.s, #0\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6]\n"
+ "mov z31.s, #0\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7]\n"
+ "sdot z24.s, z16.b, z0.b[0]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "sdot z25.s, z16.b, z1.b[0]\n"
+ "sdot z26.s, z16.b, z2.b[0]\n"
+ "sdot z27.s, z16.b, z3.b[0]\n"
+ "sdot z28.s, z16.b, z4.b[0]\n"
+ "sdot z29.s, z16.b, z5.b[0]\n"
+ "sdot z30.s, z16.b, z6.b[0]\n"
+ "sdot z31.s, z16.b, z7.b[0]\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "sdot z24.s, z17.b, z0.b[1]\n"
+ "sdot z25.s, z17.b, z1.b[1]\n"
+ "sdot z26.s, z17.b, z2.b[1]\n"
+ "sdot z27.s, z17.b, z3.b[1]\n"
+ "sdot z28.s, z17.b, z4.b[1]\n"
+ "sdot z29.s, z17.b, z5.b[1]\n"
+ "sdot z30.s, z17.b, z6.b[1]\n"
+ "sdot z31.s, z17.b, z7.b[1]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "sdot z24.s, z18.b, z0.b[2]\n"
+ "sdot z25.s, z18.b, z1.b[2]\n"
+ "sdot z26.s, z18.b, z2.b[2]\n"
+ "sdot z27.s, z18.b, z3.b[2]\n"
+ "sdot z28.s, z18.b, z4.b[2]\n"
+ "sdot z29.s, z18.b, z5.b[2]\n"
+ "sdot z30.s, z18.b, z6.b[2]\n"
+ "sdot z31.s, z18.b, z7.b[2]\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "sdot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0], #0x10]\n"
+ "sdot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1, #0x10]\n"
+ "sdot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2, #0x10]\n"
+ "sdot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3, #0x10]\n"
+ "sdot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4, #0x10]\n"
+ "sdot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5, #0x10]\n"
+ "sdot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6, #0x10]\n"
+ "sdot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7, #0x10]\n"
+ "sdot z24.s, z20.b, z0.b[0]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "sdot z25.s, z20.b, z1.b[0]\n"
+ "sdot z26.s, z20.b, z2.b[0]\n"
+ "sdot z27.s, z20.b, z3.b[0]\n"
+ "sdot z28.s, z20.b, z4.b[0]\n"
+ "sdot z29.s, z20.b, z5.b[0]\n"
+ "sdot z30.s, z20.b, z6.b[0]\n"
+ "sdot z31.s, z20.b, z7.b[0]\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "sdot z24.s, z21.b, z0.b[1]\n"
+ "sdot z25.s, z21.b, z1.b[1]\n"
+ "sdot z26.s, z21.b, z2.b[1]\n"
+ "sdot z27.s, z21.b, z3.b[1]\n"
+ "sdot z28.s, z21.b, z4.b[1]\n"
+ "sdot z29.s, z21.b, z5.b[1]\n"
+ "sdot z30.s, z21.b, z6.b[1]\n"
+ "sdot z31.s, z21.b, z7.b[1]\n"
+ "ld1b z21.b, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "sdot z24.s, z22.b, z0.b[2]\n"
+ "sdot z25.s, z22.b, z1.b[2]\n"
+ "sdot z26.s, z22.b, z2.b[2]\n"
+ "sdot z27.s, z22.b, z3.b[2]\n"
+ "sdot z28.s, z22.b, z4.b[2]\n"
+ "sdot z29.s, z22.b, z5.b[2]\n"
+ "sdot z30.s, z22.b, z6.b[2]\n"
+ "sdot z31.s, z22.b, z7.b[2]\n"
+ "ld1b z22.b, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "sdot z24.s, z23.b, z0.b[3]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0], #0x20]\n"
+ "sdot z25.s, z23.b, z1.b[3]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1, #0x20]\n"
+ "sdot z26.s, z23.b, z2.b[3]\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2, #0x20]\n"
+ "sdot z27.s, z23.b, z3.b[3]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3, #0x20]\n"
+ "sdot z28.s, z23.b, z4.b[3]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4, #0x20]\n"
+ "sdot z29.s, z23.b, z5.b[3]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5, #0x20]\n"
+ "sdot z30.s, z23.b, z6.b[3]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6, #0x20]\n"
+ "sdot z31.s, z23.b, z7.b[3]\n"
+ "ld1b z23.b, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "sdot z24.s, z16.b, z0.b[0]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7, #0x20]\n"
+ "sdot z25.s, z16.b, z1.b[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "sdot z26.s, z16.b, z2.b[0]\n"
+ "sdot z27.s, z16.b, z3.b[0]\n"
+ "sdot z28.s, z16.b, z4.b[0]\n"
+ "sdot z29.s, z16.b, z5.b[0]\n"
+ "sdot z30.s, z16.b, z6.b[0]\n"
+ "sdot z31.s, z16.b, z7.b[0]\n"
+ "sdot z24.s, z17.b, z0.b[1]\n"
+ "sdot z25.s, z17.b, z1.b[1]\n"
+ "sdot z26.s, z17.b, z2.b[1]\n"
+ "sdot z27.s, z17.b, z3.b[1]\n"
+ "sdot z28.s, z17.b, z4.b[1]\n"
+ "sdot z29.s, z17.b, z5.b[1]\n"
+ "sdot z30.s, z17.b, z6.b[1]\n"
+ "sdot z31.s, z17.b, z7.b[1]\n"
+ "sdot z24.s, z18.b, z0.b[2]\n"
+ "sdot z25.s, z18.b, z1.b[2]\n"
+ "sdot z26.s, z18.b, z2.b[2]\n"
+ "sdot z27.s, z18.b, z3.b[2]\n"
+ "sdot z28.s, z18.b, z4.b[2]\n"
+ "sdot z29.s, z18.b, z5.b[2]\n"
+ "sdot z30.s, z18.b, z6.b[2]\n"
+ "sdot z31.s, z18.b, z7.b[2]\n"
+ "sdot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x30]\n"
+ "sdot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1, #0x30]\n"
+ "sdot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2, #0x30]\n"
+ "sdot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3, #0x30]\n"
+ "sdot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4, #0x30]\n"
+ "sdot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5, #0x30]\n"
+ "sdot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6, #0x30]\n"
+ "sdot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7, #0x30]\n"
+ "sdot z24.s, z20.b, z0.b[0]\n"
+ "sdot z25.s, z20.b, z1.b[0]\n"
+ "sdot z26.s, z20.b, z2.b[0]\n"
+ "sdot z27.s, z20.b, z3.b[0]\n"
+ "sdot z28.s, z20.b, z4.b[0]\n"
+ "sdot z29.s, z20.b, z5.b[0]\n"
+ "sdot z30.s, z20.b, z6.b[0]\n"
+ "sdot z31.s, z20.b, z7.b[0]\n"
+ "sdot z24.s, z21.b, z0.b[1]\n"
+ "sdot z25.s, z21.b, z1.b[1]\n"
+ "sdot z26.s, z21.b, z2.b[1]\n"
+ "sdot z27.s, z21.b, z3.b[1]\n"
+ "sdot z28.s, z21.b, z4.b[1]\n"
+ "sdot z29.s, z21.b, z5.b[1]\n"
+ "sdot z30.s, z21.b, z6.b[1]\n"
+ "sdot z31.s, z21.b, z7.b[1]\n"
+ "sdot z24.s, z22.b, z0.b[2]\n"
+ "sdot z25.s, z22.b, z1.b[2]\n"
+ "sdot z26.s, z22.b, z2.b[2]\n"
+ "sdot z27.s, z22.b, z3.b[2]\n"
+ "sdot z28.s, z22.b, z4.b[2]\n"
+ "sdot z29.s, z22.b, z5.b[2]\n"
+ "sdot z30.s, z22.b, z6.b[2]\n"
+ "sdot z31.s, z22.b, z7.b[2]\n"
+ "sdot z24.s, z23.b, z0.b[3]\n"
+ "sdot z25.s, z23.b, z1.b[3]\n"
+ "sdot z26.s, z23.b, z2.b[3]\n"
+ "sdot z27.s, z23.b, z3.b[3]\n"
+ "sdot z28.s, z23.b, z4.b[3]\n"
+ "sdot z29.s, z23.b, z5.b[3]\n"
+ "sdot z30.s, z23.b, z6.b[3]\n"
+ "sdot z31.s, z23.b, z7.b[3]\n"
+ "b.eq 3f\n"
+ "4:\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "mov z24.s, #0\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "mov z25.s, #0\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "mov z26.s, #0\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "ld1b z21.b, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "mov z27.s, #0\n"
+ "ld1b z22.b, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "ld1b z23.b, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "mov z28.s, #0\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "mov z29.s, #0\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2]\n"
+ "sdot z24.s, z16.b, z0.b[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.s, #0\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3]\n"
+ "sdot z25.s, z16.b, z1.b[0]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4]\n"
+ "sdot z26.s, z16.b, z2.b[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.s, #0\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5]\n"
+ "sdot z27.s, z16.b, z3.b[0]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6]\n"
+ "sdot z28.s, z16.b, z4.b[0]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7]\n"
+ "sdot z24.s, z17.b, z0.b[1]\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "sdot z29.s, z16.b, z5.b[0]\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "sdot z30.s, z16.b, z6.b[0]\n"
+ "sdot z31.s, z16.b, z7.b[0]\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "sdot z25.s, z17.b, z1.b[1]\n"
+ "sdot z26.s, z17.b, z2.b[1]\n"
+ "sdot z27.s, z17.b, z3.b[1]\n"
+ "sdot z28.s, z17.b, z4.b[1]\n"
+ "sdot z29.s, z17.b, z5.b[1]\n"
+ "sdot z30.s, z17.b, z6.b[1]\n"
+ "sdot z31.s, z17.b, z7.b[1]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "sdot z24.s, z18.b, z0.b[2]\n"
+ "sdot z25.s, z18.b, z1.b[2]\n"
+ "sdot z26.s, z18.b, z2.b[2]\n"
+ "sdot z27.s, z18.b, z3.b[2]\n"
+ "sdot z28.s, z18.b, z4.b[2]\n"
+ "sdot z29.s, z18.b, z5.b[2]\n"
+ "sdot z30.s, z18.b, z6.b[2]\n"
+ "sdot z31.s, z18.b, z7.b[2]\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "sdot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0], #0x10]\n"
+ "sdot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1, #0x10]\n"
+ "sdot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2, #0x10]\n"
+ "sdot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3, #0x10]\n"
+ "sdot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4, #0x10]\n"
+ "sdot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5, #0x10]\n"
+ "sdot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6, #0x10]\n"
+ "sdot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7, #0x10]\n"
+ "sdot z24.s, z20.b, z0.b[0]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "sdot z25.s, z20.b, z1.b[0]\n"
+ "sdot z26.s, z20.b, z2.b[0]\n"
+ "sdot z27.s, z20.b, z3.b[0]\n"
+ "sdot z28.s, z20.b, z4.b[0]\n"
+ "sdot z29.s, z20.b, z5.b[0]\n"
+ "sdot z30.s, z20.b, z6.b[0]\n"
+ "sdot z31.s, z20.b, z7.b[0]\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "sdot z24.s, z21.b, z0.b[1]\n"
+ "sdot z25.s, z21.b, z1.b[1]\n"
+ "sdot z26.s, z21.b, z2.b[1]\n"
+ "sdot z27.s, z21.b, z3.b[1]\n"
+ "sdot z28.s, z21.b, z4.b[1]\n"
+ "sdot z29.s, z21.b, z5.b[1]\n"
+ "sdot z30.s, z21.b, z6.b[1]\n"
+ "sdot z31.s, z21.b, z7.b[1]\n"
+ "ld1b z21.b, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "sdot z24.s, z22.b, z0.b[2]\n"
+ "sdot z25.s, z22.b, z1.b[2]\n"
+ "sdot z26.s, z22.b, z2.b[2]\n"
+ "sdot z27.s, z22.b, z3.b[2]\n"
+ "sdot z28.s, z22.b, z4.b[2]\n"
+ "sdot z29.s, z22.b, z5.b[2]\n"
+ "sdot z30.s, z22.b, z6.b[2]\n"
+ "sdot z31.s, z22.b, z7.b[2]\n"
+ "ld1b z22.b, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "sdot z24.s, z23.b, z0.b[3]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0], #0x20]\n"
+ "sdot z25.s, z23.b, z1.b[3]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1, #0x20]\n"
+ "sdot z26.s, z23.b, z2.b[3]\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2, #0x20]\n"
+ "sdot z27.s, z23.b, z3.b[3]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3, #0x20]\n"
+ "sdot z28.s, z23.b, z4.b[3]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4, #0x20]\n"
+ "sdot z29.s, z23.b, z5.b[3]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5, #0x20]\n"
+ "sdot z30.s, z23.b, z6.b[3]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6, #0x20]\n"
+ "sdot z31.s, z23.b, z7.b[3]\n"
+ "ld1b z23.b, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "sdot z24.s, z16.b, z0.b[0]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7, #0x20]\n"
+ "sdot z25.s, z16.b, z1.b[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "sdot z26.s, z16.b, z2.b[0]\n"
+ "sdot z27.s, z16.b, z3.b[0]\n"
+ "sdot z28.s, z16.b, z4.b[0]\n"
+ "sdot z29.s, z16.b, z5.b[0]\n"
+ "sdot z30.s, z16.b, z6.b[0]\n"
+ "sdot z31.s, z16.b, z7.b[0]\n"
+ "sdot z24.s, z17.b, z0.b[1]\n"
+ "sdot z25.s, z17.b, z1.b[1]\n"
+ "sdot z26.s, z17.b, z2.b[1]\n"
+ "sdot z27.s, z17.b, z3.b[1]\n"
+ "sdot z28.s, z17.b, z4.b[1]\n"
+ "sdot z29.s, z17.b, z5.b[1]\n"
+ "sdot z30.s, z17.b, z6.b[1]\n"
+ "sdot z31.s, z17.b, z7.b[1]\n"
+ "sdot z24.s, z18.b, z0.b[2]\n"
+ "sdot z25.s, z18.b, z1.b[2]\n"
+ "sdot z26.s, z18.b, z2.b[2]\n"
+ "sdot z27.s, z18.b, z3.b[2]\n"
+ "sdot z28.s, z18.b, z4.b[2]\n"
+ "sdot z29.s, z18.b, z5.b[2]\n"
+ "sdot z30.s, z18.b, z6.b[2]\n"
+ "sdot z31.s, z18.b, z7.b[2]\n"
+ "sdot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x30]\n"
+ "sdot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1, #0x30]\n"
+ "sdot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2, #0x30]\n"
+ "sdot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3, #0x30]\n"
+ "sdot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4, #0x30]\n"
+ "sdot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5, #0x30]\n"
+ "sdot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6, #0x30]\n"
+ "sdot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7, #0x30]\n"
+ "sdot z24.s, z20.b, z0.b[0]\n"
+ "sdot z25.s, z20.b, z1.b[0]\n"
+ "sdot z26.s, z20.b, z2.b[0]\n"
+ "sdot z27.s, z20.b, z3.b[0]\n"
+ "sdot z28.s, z20.b, z4.b[0]\n"
+ "sdot z29.s, z20.b, z5.b[0]\n"
+ "sdot z30.s, z20.b, z6.b[0]\n"
+ "sdot z31.s, z20.b, z7.b[0]\n"
+ "sdot z24.s, z21.b, z0.b[1]\n"
+ "sdot z25.s, z21.b, z1.b[1]\n"
+ "sdot z26.s, z21.b, z2.b[1]\n"
+ "sdot z27.s, z21.b, z3.b[1]\n"
+ "sdot z28.s, z21.b, z4.b[1]\n"
+ "sdot z29.s, z21.b, z5.b[1]\n"
+ "sdot z30.s, z21.b, z6.b[1]\n"
+ "sdot z31.s, z21.b, z7.b[1]\n"
+ "sdot z24.s, z22.b, z0.b[2]\n"
+ "sdot z25.s, z22.b, z1.b[2]\n"
+ "sdot z26.s, z22.b, z2.b[2]\n"
+ "sdot z27.s, z22.b, z3.b[2]\n"
+ "sdot z28.s, z22.b, z4.b[2]\n"
+ "sdot z29.s, z22.b, z5.b[2]\n"
+ "sdot z30.s, z22.b, z6.b[2]\n"
+ "sdot z31.s, z22.b, z7.b[2]\n"
+ "sdot z24.s, z23.b, z0.b[3]\n"
+ "sdot z25.s, z23.b, z1.b[3]\n"
+ "sdot z26.s, z23.b, z2.b[3]\n"
+ "sdot z27.s, z23.b, z3.b[3]\n"
+ "sdot z28.s, z23.b, z4.b[3]\n"
+ "sdot z29.s, z23.b, z5.b[3]\n"
+ "sdot z30.s, z23.b, z6.b[3]\n"
+ "sdot z31.s, z23.b, z7.b[3]\n"
+ "b.ne 4b\n"
+ "3:\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "mov z24.s, #0\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "mov z25.s, #0\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "mov z26.s, #0\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "ld1b z21.b, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "mov z27.s, #0\n"
+ "ld1b z22.b, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "ld1b z23.b, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "mov z28.s, #0\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "mov z29.s, #0\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2]\n"
+ "sdot z24.s, z16.b, z0.b[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.s, #0\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3]\n"
+ "sdot z25.s, z16.b, z1.b[0]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4]\n"
+ "sdot z26.s, z16.b, z2.b[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.s, #0\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5]\n"
+ "sdot z27.s, z16.b, z3.b[0]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6]\n"
+ "sdot z28.s, z16.b, z4.b[0]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7]\n"
+ "sdot z24.s, z17.b, z0.b[1]\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "sdot z29.s, z16.b, z5.b[0]\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "sdot z30.s, z16.b, z6.b[0]\n"
+ "sdot z31.s, z16.b, z7.b[0]\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "sdot z25.s, z17.b, z1.b[1]\n"
+ "sdot z26.s, z17.b, z2.b[1]\n"
+ "sdot z27.s, z17.b, z3.b[1]\n"
+ "sdot z28.s, z17.b, z4.b[1]\n"
+ "sdot z29.s, z17.b, z5.b[1]\n"
+ "sdot z30.s, z17.b, z6.b[1]\n"
+ "sdot z31.s, z17.b, z7.b[1]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "sdot z24.s, z18.b, z0.b[2]\n"
+ "sdot z25.s, z18.b, z1.b[2]\n"
+ "sdot z26.s, z18.b, z2.b[2]\n"
+ "sdot z27.s, z18.b, z3.b[2]\n"
+ "sdot z28.s, z18.b, z4.b[2]\n"
+ "sdot z29.s, z18.b, z5.b[2]\n"
+ "sdot z30.s, z18.b, z6.b[2]\n"
+ "sdot z31.s, z18.b, z7.b[2]\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "sdot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0], #0x10]\n"
+ "sdot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1, #0x10]\n"
+ "sdot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2, #0x10]\n"
+ "sdot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3, #0x10]\n"
+ "sdot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4, #0x10]\n"
+ "sdot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5, #0x10]\n"
+ "sdot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6, #0x10]\n"
+ "sdot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7, #0x10]\n"
+ "sdot z24.s, z20.b, z0.b[0]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "sdot z25.s, z20.b, z1.b[0]\n"
+ "sdot z26.s, z20.b, z2.b[0]\n"
+ "sdot z27.s, z20.b, z3.b[0]\n"
+ "sdot z28.s, z20.b, z4.b[0]\n"
+ "sdot z29.s, z20.b, z5.b[0]\n"
+ "sdot z30.s, z20.b, z6.b[0]\n"
+ "sdot z31.s, z20.b, z7.b[0]\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "sdot z24.s, z21.b, z0.b[1]\n"
+ "sdot z25.s, z21.b, z1.b[1]\n"
+ "sdot z26.s, z21.b, z2.b[1]\n"
+ "sdot z27.s, z21.b, z3.b[1]\n"
+ "sdot z28.s, z21.b, z4.b[1]\n"
+ "sdot z29.s, z21.b, z5.b[1]\n"
+ "sdot z30.s, z21.b, z6.b[1]\n"
+ "sdot z31.s, z21.b, z7.b[1]\n"
+ "ld1b z21.b, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "sdot z24.s, z22.b, z0.b[2]\n"
+ "sdot z25.s, z22.b, z1.b[2]\n"
+ "sdot z26.s, z22.b, z2.b[2]\n"
+ "sdot z27.s, z22.b, z3.b[2]\n"
+ "sdot z28.s, z22.b, z4.b[2]\n"
+ "sdot z29.s, z22.b, z5.b[2]\n"
+ "sdot z30.s, z22.b, z6.b[2]\n"
+ "sdot z31.s, z22.b, z7.b[2]\n"
+ "ld1b z22.b, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "sdot z24.s, z23.b, z0.b[3]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0], #0x20]\n"
+ "sdot z25.s, z23.b, z1.b[3]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1, #0x20]\n"
+ "sdot z26.s, z23.b, z2.b[3]\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2, #0x20]\n"
+ "sdot z27.s, z23.b, z3.b[3]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3, #0x20]\n"
+ "sdot z28.s, z23.b, z4.b[3]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4, #0x20]\n"
+ "sdot z29.s, z23.b, z5.b[3]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5, #0x20]\n"
+ "sdot z30.s, z23.b, z6.b[3]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6, #0x20]\n"
+ "sdot z31.s, z23.b, z7.b[3]\n"
+ "ld1b z23.b, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "sdot z24.s, z16.b, z0.b[0]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7, #0x20]\n"
+ "sdot z25.s, z16.b, z1.b[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "sdot z26.s, z16.b, z2.b[0]\n"
+ "sdot z27.s, z16.b, z3.b[0]\n"
+ "sdot z28.s, z16.b, z4.b[0]\n"
+ "sdot z29.s, z16.b, z5.b[0]\n"
+ "sdot z30.s, z16.b, z6.b[0]\n"
+ "sdot z31.s, z16.b, z7.b[0]\n"
+ "sdot z24.s, z17.b, z0.b[1]\n"
+ "sdot z25.s, z17.b, z1.b[1]\n"
+ "sdot z26.s, z17.b, z2.b[1]\n"
+ "sdot z27.s, z17.b, z3.b[1]\n"
+ "sdot z28.s, z17.b, z4.b[1]\n"
+ "sdot z29.s, z17.b, z5.b[1]\n"
+ "sdot z30.s, z17.b, z6.b[1]\n"
+ "sdot z31.s, z17.b, z7.b[1]\n"
+ "sdot z24.s, z18.b, z0.b[2]\n"
+ "sdot z25.s, z18.b, z1.b[2]\n"
+ "sdot z26.s, z18.b, z2.b[2]\n"
+ "sdot z27.s, z18.b, z3.b[2]\n"
+ "sdot z28.s, z18.b, z4.b[2]\n"
+ "sdot z29.s, z18.b, z5.b[2]\n"
+ "sdot z30.s, z18.b, z6.b[2]\n"
+ "sdot z31.s, z18.b, z7.b[2]\n"
+ "sdot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x30]\n"
+ "sdot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1, #0x30]\n"
+ "sdot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2, #0x30]\n"
+ "sdot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3, #0x30]\n"
+ "sdot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4, #0x30]\n"
+ "sdot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5, #0x30]\n"
+ "sdot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6, #0x30]\n"
+ "sdot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7, #0x30]\n"
+ "sdot z24.s, z20.b, z0.b[0]\n"
+ "sdot z25.s, z20.b, z1.b[0]\n"
+ "sdot z26.s, z20.b, z2.b[0]\n"
+ "sdot z27.s, z20.b, z3.b[0]\n"
+ "sdot z28.s, z20.b, z4.b[0]\n"
+ "sdot z29.s, z20.b, z5.b[0]\n"
+ "sdot z30.s, z20.b, z6.b[0]\n"
+ "sdot z31.s, z20.b, z7.b[0]\n"
+ "sdot z24.s, z21.b, z0.b[1]\n"
+ "sdot z25.s, z21.b, z1.b[1]\n"
+ "sdot z26.s, z21.b, z2.b[1]\n"
+ "sdot z27.s, z21.b, z3.b[1]\n"
+ "sdot z28.s, z21.b, z4.b[1]\n"
+ "sdot z29.s, z21.b, z5.b[1]\n"
+ "sdot z30.s, z21.b, z6.b[1]\n"
+ "sdot z31.s, z21.b, z7.b[1]\n"
+ "sdot z24.s, z22.b, z0.b[2]\n"
+ "sdot z25.s, z22.b, z1.b[2]\n"
+ "sdot z26.s, z22.b, z2.b[2]\n"
+ "sdot z27.s, z22.b, z3.b[2]\n"
+ "sdot z28.s, z22.b, z4.b[2]\n"
+ "sdot z29.s, z22.b, z5.b[2]\n"
+ "sdot z30.s, z22.b, z6.b[2]\n"
+ "sdot z31.s, z22.b, z7.b[2]\n"
+ "sdot z24.s, z23.b, z0.b[3]\n"
+ "sdot z25.s, z23.b, z1.b[3]\n"
+ "sdot z26.s, z23.b, z2.b[3]\n"
+ "sdot z27.s, z23.b, z3.b[3]\n"
+ "sdot z28.s, z23.b, z4.b[3]\n"
+ "sdot z29.s, z23.b, z5.b[3]\n"
+ "sdot z30.s, z23.b, z6.b[3]\n"
+ "sdot z31.s, z23.b, z7.b[3]\n"
+ "b 5f\n"
+ "2:\n"
+ "mov z24.s, #0\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
+ "mov z25.s, #0\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1]\n"
+ "mov z26.s, #0\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2]\n"
+ "mov z27.s, #0\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3]\n"
+ "mov z28.s, #0\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4]\n"
+ "mov z29.s, #0\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5]\n"
+ "mov z30.s, #0\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6]\n"
+ "mov z31.s, #0\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7]\n"
+ "sdot z24.s, z16.b, z0.b[0]\n"
+ "sdot z25.s, z16.b, z1.b[0]\n"
+ "sdot z26.s, z16.b, z2.b[0]\n"
+ "sdot z27.s, z16.b, z3.b[0]\n"
+ "sdot z28.s, z16.b, z4.b[0]\n"
+ "sdot z29.s, z16.b, z5.b[0]\n"
+ "sdot z30.s, z16.b, z6.b[0]\n"
+ "sdot z31.s, z16.b, z7.b[0]\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "sdot z24.s, z17.b, z0.b[1]\n"
+ "sdot z25.s, z17.b, z1.b[1]\n"
+ "sdot z26.s, z17.b, z2.b[1]\n"
+ "sdot z27.s, z17.b, z3.b[1]\n"
+ "sdot z28.s, z17.b, z4.b[1]\n"
+ "sdot z29.s, z17.b, z5.b[1]\n"
+ "sdot z30.s, z17.b, z6.b[1]\n"
+ "sdot z31.s, z17.b, z7.b[1]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "sdot z24.s, z18.b, z0.b[2]\n"
+ "sdot z25.s, z18.b, z1.b[2]\n"
+ "sdot z26.s, z18.b, z2.b[2]\n"
+ "sdot z27.s, z18.b, z3.b[2]\n"
+ "sdot z28.s, z18.b, z4.b[2]\n"
+ "sdot z29.s, z18.b, z5.b[2]\n"
+ "sdot z30.s, z18.b, z6.b[2]\n"
+ "sdot z31.s, z18.b, z7.b[2]\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "sdot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0], #0x10]\n"
+ "sdot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1, #0x10]\n"
+ "sdot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2, #0x10]\n"
+ "sdot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3, #0x10]\n"
+ "sdot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4, #0x10]\n"
+ "sdot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5, #0x10]\n"
+ "sdot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6, #0x10]\n"
+ "sdot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7, #0x10]\n"
+ "sdot z24.s, z20.b, z0.b[0]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "sdot z25.s, z20.b, z1.b[0]\n"
+ "sdot z26.s, z20.b, z2.b[0]\n"
+ "sdot z27.s, z20.b, z3.b[0]\n"
+ "sdot z28.s, z20.b, z4.b[0]\n"
+ "sdot z29.s, z20.b, z5.b[0]\n"
+ "sdot z30.s, z20.b, z6.b[0]\n"
+ "sdot z31.s, z20.b, z7.b[0]\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "sdot z24.s, z21.b, z0.b[1]\n"
+ "sdot z25.s, z21.b, z1.b[1]\n"
+ "sdot z26.s, z21.b, z2.b[1]\n"
+ "sdot z27.s, z21.b, z3.b[1]\n"
+ "sdot z28.s, z21.b, z4.b[1]\n"
+ "sdot z29.s, z21.b, z5.b[1]\n"
+ "sdot z30.s, z21.b, z6.b[1]\n"
+ "sdot z31.s, z21.b, z7.b[1]\n"
+ "ld1b z21.b, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "sdot z24.s, z22.b, z0.b[2]\n"
+ "sdot z25.s, z22.b, z1.b[2]\n"
+ "sdot z26.s, z22.b, z2.b[2]\n"
+ "sdot z27.s, z22.b, z3.b[2]\n"
+ "sdot z28.s, z22.b, z4.b[2]\n"
+ "sdot z29.s, z22.b, z5.b[2]\n"
+ "sdot z30.s, z22.b, z6.b[2]\n"
+ "sdot z31.s, z22.b, z7.b[2]\n"
+ "ld1b z22.b, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "sdot z24.s, z23.b, z0.b[3]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0], #0x20]\n"
+ "sdot z25.s, z23.b, z1.b[3]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1, #0x20]\n"
+ "sdot z26.s, z23.b, z2.b[3]\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2, #0x20]\n"
+ "sdot z27.s, z23.b, z3.b[3]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3, #0x20]\n"
+ "sdot z28.s, z23.b, z4.b[3]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4, #0x20]\n"
+ "sdot z29.s, z23.b, z5.b[3]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5, #0x20]\n"
+ "sdot z30.s, z23.b, z6.b[3]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6, #0x20]\n"
+ "sdot z31.s, z23.b, z7.b[3]\n"
+ "ld1b z23.b, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "sdot z24.s, z16.b, z0.b[0]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7, #0x20]\n"
+ "sdot z25.s, z16.b, z1.b[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "sdot z26.s, z16.b, z2.b[0]\n"
+ "sdot z27.s, z16.b, z3.b[0]\n"
+ "sdot z28.s, z16.b, z4.b[0]\n"
+ "sdot z29.s, z16.b, z5.b[0]\n"
+ "sdot z30.s, z16.b, z6.b[0]\n"
+ "sdot z31.s, z16.b, z7.b[0]\n"
+ "sdot z24.s, z17.b, z0.b[1]\n"
+ "sdot z25.s, z17.b, z1.b[1]\n"
+ "sdot z26.s, z17.b, z2.b[1]\n"
+ "sdot z27.s, z17.b, z3.b[1]\n"
+ "sdot z28.s, z17.b, z4.b[1]\n"
+ "sdot z29.s, z17.b, z5.b[1]\n"
+ "sdot z30.s, z17.b, z6.b[1]\n"
+ "sdot z31.s, z17.b, z7.b[1]\n"
+ "sdot z24.s, z18.b, z0.b[2]\n"
+ "sdot z25.s, z18.b, z1.b[2]\n"
+ "sdot z26.s, z18.b, z2.b[2]\n"
+ "sdot z27.s, z18.b, z3.b[2]\n"
+ "sdot z28.s, z18.b, z4.b[2]\n"
+ "sdot z29.s, z18.b, z5.b[2]\n"
+ "sdot z30.s, z18.b, z6.b[2]\n"
+ "sdot z31.s, z18.b, z7.b[2]\n"
+ "sdot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x30]\n"
+ "sdot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1, #0x30]\n"
+ "sdot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2, #0x30]\n"
+ "sdot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3, #0x30]\n"
+ "sdot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4, #0x30]\n"
+ "sdot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5, #0x30]\n"
+ "sdot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6, #0x30]\n"
+ "sdot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7, #0x30]\n"
+ "sdot z24.s, z20.b, z0.b[0]\n"
+ "sdot z25.s, z20.b, z1.b[0]\n"
+ "sdot z26.s, z20.b, z2.b[0]\n"
+ "sdot z27.s, z20.b, z3.b[0]\n"
+ "sdot z28.s, z20.b, z4.b[0]\n"
+ "sdot z29.s, z20.b, z5.b[0]\n"
+ "sdot z30.s, z20.b, z6.b[0]\n"
+ "sdot z31.s, z20.b, z7.b[0]\n"
+ "sdot z24.s, z21.b, z0.b[1]\n"
+ "sdot z25.s, z21.b, z1.b[1]\n"
+ "sdot z26.s, z21.b, z2.b[1]\n"
+ "sdot z27.s, z21.b, z3.b[1]\n"
+ "sdot z28.s, z21.b, z4.b[1]\n"
+ "sdot z29.s, z21.b, z5.b[1]\n"
+ "sdot z30.s, z21.b, z6.b[1]\n"
+ "sdot z31.s, z21.b, z7.b[1]\n"
+ "sdot z24.s, z22.b, z0.b[2]\n"
+ "sdot z25.s, z22.b, z1.b[2]\n"
+ "sdot z26.s, z22.b, z2.b[2]\n"
+ "sdot z27.s, z22.b, z3.b[2]\n"
+ "sdot z28.s, z22.b, z4.b[2]\n"
+ "sdot z29.s, z22.b, z5.b[2]\n"
+ "sdot z30.s, z22.b, z6.b[2]\n"
+ "sdot z31.s, z22.b, z7.b[2]\n"
+ "sdot z24.s, z23.b, z0.b[3]\n"
+ "sdot z25.s, z23.b, z1.b[3]\n"
+ "sdot z26.s, z23.b, z2.b[3]\n"
+ "sdot z27.s, z23.b, z3.b[3]\n"
+ "sdot z28.s, z23.b, z4.b[3]\n"
+ "sdot z29.s, z23.b, z5.b[3]\n"
+ "sdot z30.s, z23.b, z6.b[3]\n"
+ "sdot z31.s, z23.b, z7.b[3]\n"
+ "5:\n"
+ "st1w z24.s, p0, [%[c_ptr0]]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "st1w z25.s, p0, [c_ptr1]\n"
+ "st1w z26.s, p0, [c_ptr2]\n"
+ "st1w z27.s, p0, [c_ptr3]\n"
+ "st1w z28.s, p0, [c_ptr4]\n"
+ "st1w z29.s, p0, [c_ptr5]\n"
+ "st1w z30.s, p0, [c_ptr6]\n"
+ "st1w z31.s, p0, [c_ptr7]\n"
+ ".unreq a_ptr1\n"
+ ".unreq a_ptr2\n"
+ ".unreq a_ptr3\n"
+ ".unreq a_ptr4\n"
+ ".unreq a_ptr5\n"
+ ".unreq a_ptr6\n"
+ ".unreq a_ptr7\n"
+ ".unreq c_ptr1\n"
+ ".unreq c_ptr2\n"
+ ".unreq c_ptr3\n"
+ ".unreq c_ptr4\n"
+ ".unreq c_ptr5\n"
+ ".unreq c_ptr6\n"
+ ".unreq c_ptr7\n"
+ : [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [oob_rows] "+r" (oob_rows), [temp] "+r" (temp), [odds] "+r" (odds)
+ : [lda] "r" (ldab), [ldc] "r" (ldcb), [odd_depth] "r" (odd_depth), [last_width] "r" (last_width)
+ : "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "cc", "memory"
+ );
+ break;
+ }
+ }
+}
+
+} // namespace arm_gemm
+
+#endif // __ARM_FEATURE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_smallK_hybrid_u8u32_dot_8x1VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_smallK_hybrid_u8u32_dot_8x1VL.hpp
new file mode 100644
index 0000000000..25dd10019d
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_smallK_hybrid_u8u32_dot_8x1VL.hpp
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2019-2020 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+
+#ifdef ARM_COMPUTE_ENABLE_SVE
+
+#include <cstdint>
+
+namespace arm_gemm
+{
+
+// Actual kernel implementations
+void sve_smallK_hybrid_u8u32_dot_8x1VL(const uint8_t *, int, const uint8_t *, uint32_t *, int, int, int, int, const uint32_t *, Activation, bool);
+
+class cls_sve_smallK_hybrid_u8u32_dot_8x1VL
+{
+public:
+ typedef uint8_t operand_type;
+ typedef uint32_t result_type;
+
+ typedef void (*kern_type)(const uint8_t *, int, const uint8_t *, uint32_t *, int, int, int, int, const uint32_t *, Activation, bool);
+
+ /* Kernel blocking parameters */
+ static constexpr unsigned int out_height()
+ {
+ return 8;
+ }
+
+ static unsigned int out_width()
+ {
+ return get_vector_length<uint32_t>() * 1;
+ }
+
+ static constexpr unsigned int k_unroll()
+ {
+ return 4;
+ }
+
+ static constexpr bool supports_accumulate()
+ {
+ return false;
+ }
+
+ static constexpr bool supports_bias()
+ {
+ return false;
+ }
+
+ static constexpr bool supports_activation()
+ {
+ return false;
+ }
+
+ StdTransformsSVE<operand_type, result_type, 8, 1, 4> transforms = {};
+
+ // Default to the generic kernel
+ kern_type kernel=sve_smallK_hybrid_u8u32_dot_8x1VL;
+
+ cls_sve_smallK_hybrid_u8u32_dot_8x1VL(const CPUInfo *)
+ {
+
+ }
+};
+
+} // namespace arm_gemm
+
+#endif // ARM_COMPUTE_ENABLE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_smallK_hybrid_u8u32_dot_8x1VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_smallK_hybrid_u8u32_dot_8x1VL/generic.cpp
new file mode 100644
index 0000000000..8ab83e670e
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_smallK_hybrid_u8u32_dot_8x1VL/generic.cpp
@@ -0,0 +1,8747 @@
+/*
+ * Copyright (c) 2019-2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifdef ARM_COMPUTE_ENABLE_SVE
+
+#include <algorithm>
+
+#include "arm_gemm.hpp"
+
+#include <cstdint>
+#include "../../asmlib.hpp"
+#include "../../utils.hpp"
+
+namespace arm_gemm {
+
+void sve_smallK_hybrid_u8u32_dot_8x1VL(const uint8_t *A, int lda, const uint8_t *B, uint32_t *C, int ldc, int M, int N, int K, const uint32_t *, Activation, bool) {
+ const long loops_count = iceildiv(N, (int)get_vector_length<uint32_t>()) - 1;
+ const long ldab = lda * sizeof(uint8_t);
+ const long ldcb = ldc * sizeof(uint32_t);
+ const long odd_depth = (K % 16) ? (K % 16) : 16;
+ const long last_width = N - (loops_count * get_vector_length<uint32_t>());
+ const long odds_count = K % 4;
+ K = (K + 3) / 4;
+
+ for (int y0=0; y0<M; y0+=8) {
+ long loops = loops_count;
+ long oob_rows = std::max(8 - (M-y0), 0);
+ long odds = odds_count;
+ long temp = 0;
+ const uint8_t *b_ptr0 = B;
+ const uint8_t *a_ptr0 = A + (y0 * lda);
+
+ uint32_t *c_ptr0 = C + (y0 * ldc);
+
+ switch(K) {
+ case 1:
+ __asm __volatile (
+ "a_ptr1 .req X0\n"
+ "a_ptr2 .req X1\n"
+ "a_ptr3 .req X2\n"
+ "a_ptr4 .req X3\n"
+ "a_ptr5 .req X4\n"
+ "a_ptr6 .req X5\n"
+ "a_ptr7 .req X6\n"
+ "c_ptr1 .req X7\n"
+ "c_ptr2 .req X8\n"
+ "c_ptr3 .req X9\n"
+ "c_ptr4 .req X10\n"
+ "c_ptr5 .req X11\n"
+ "c_ptr6 .req X12\n"
+ "c_ptr7 .req X13\n"
+ "add a_ptr1, %[a_ptr0], %[lda]\n"
+ "add c_ptr1, %[c_ptr0], %[ldc]\n"
+ "add a_ptr2, a_ptr1, %[lda]\n"
+ "add c_ptr2, c_ptr1, %[ldc]\n"
+ "add a_ptr3, a_ptr2, %[lda]\n"
+ "add c_ptr3, c_ptr2, %[ldc]\n"
+ "add a_ptr4, a_ptr3, %[lda]\n"
+ "add c_ptr4, c_ptr3, %[ldc]\n"
+ "add a_ptr5, a_ptr4, %[lda]\n"
+ "add c_ptr5, c_ptr4, %[ldc]\n"
+ "add a_ptr6, a_ptr5, %[lda]\n"
+ "add c_ptr6, c_ptr5, %[ldc]\n"
+ "add a_ptr7, a_ptr6, %[lda]\n"
+ "add c_ptr7, c_ptr6, %[ldc]\n"
+ "cbz %[oob_rows], 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr7, %[c_ptr0], #0x0\n"
+ "add a_ptr7, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr6, %[c_ptr0], #0x0\n"
+ "add a_ptr6, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr5, %[c_ptr0], #0x0\n"
+ "add a_ptr5, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr4, %[c_ptr0], #0x0\n"
+ "add a_ptr4, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr3, %[c_ptr0], #0x0\n"
+ "add a_ptr3, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr2, %[c_ptr0], #0x0\n"
+ "add a_ptr2, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr1, %[c_ptr0], #0x0\n"
+ "add a_ptr1, %[a_ptr0], #0x0\n"
+ "1:\n"
+ "ptrue p7.b\n"
+ "whilelt p6.b, %[temp], %[odd_depth]\n"
+ "whilelt p0.s, %[temp], %[last_width]\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #1\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0]]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7]\n"
+ "cbz %[loops], 2f\n"
+ "mov z24.s, #0\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "mov z25.s, #0\n"
+ "mov z26.s, #0\n"
+ "mov z27.s, #0\n"
+ "mov z28.s, #0\n"
+ "mov z29.s, #0\n"
+ "mov z30.s, #0\n"
+ "mov z31.s, #0\n"
+ "udot z24.s, z16.b, z0.b[0]\n"
+ "udot z25.s, z16.b, z1.b[0]\n"
+ "udot z26.s, z16.b, z2.b[0]\n"
+ "udot z27.s, z16.b, z3.b[0]\n"
+ "udot z28.s, z16.b, z4.b[0]\n"
+ "udot z29.s, z16.b, z5.b[0]\n"
+ "udot z30.s, z16.b, z6.b[0]\n"
+ "udot z31.s, z16.b, z7.b[0]\n"
+ "b.eq 3f\n"
+ "4:\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "mov z24.s, #0\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "mov z25.s, #0\n"
+ "addvl %[b_ptr0], %[b_ptr0], #1\n"
+ "udot z24.s, z16.b, z0.b[0]\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "mov z26.s, #0\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "udot z25.s, z16.b, z1.b[0]\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "mov z27.s, #0\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "udot z26.s, z16.b, z2.b[0]\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "mov z28.s, #0\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "udot z27.s, z16.b, z3.b[0]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "mov z29.s, #0\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "udot z28.s, z16.b, z4.b[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.s, #0\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "udot z29.s, z16.b, z5.b[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.s, #0\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "udot z30.s, z16.b, z6.b[0]\n"
+ "udot z31.s, z16.b, z7.b[0]\n"
+ "b.ne 4b\n"
+ "3:\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "mov z24.s, #0\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #1\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "mov z25.s, #0\n"
+ "udot z24.s, z16.b, z0.b[0]\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "mov z26.s, #0\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "udot z25.s, z16.b, z1.b[0]\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "mov z27.s, #0\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "udot z26.s, z16.b, z2.b[0]\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "mov z28.s, #0\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "udot z27.s, z16.b, z3.b[0]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "mov z29.s, #0\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "udot z28.s, z16.b, z4.b[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.s, #0\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "udot z29.s, z16.b, z5.b[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.s, #0\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "udot z30.s, z16.b, z6.b[0]\n"
+ "udot z31.s, z16.b, z7.b[0]\n"
+ "b 5f\n"
+ "2:\n"
+ "mov z24.s, #0\n"
+ "mov z25.s, #0\n"
+ "mov z26.s, #0\n"
+ "mov z27.s, #0\n"
+ "mov z28.s, #0\n"
+ "mov z29.s, #0\n"
+ "mov z30.s, #0\n"
+ "mov z31.s, #0\n"
+ "udot z24.s, z16.b, z0.b[0]\n"
+ "udot z25.s, z16.b, z1.b[0]\n"
+ "udot z26.s, z16.b, z2.b[0]\n"
+ "udot z27.s, z16.b, z3.b[0]\n"
+ "udot z28.s, z16.b, z4.b[0]\n"
+ "udot z29.s, z16.b, z5.b[0]\n"
+ "udot z30.s, z16.b, z6.b[0]\n"
+ "udot z31.s, z16.b, z7.b[0]\n"
+ "5:\n"
+ "st1w z24.s, p0, [%[c_ptr0]]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "st1w z25.s, p0, [c_ptr1]\n"
+ "st1w z26.s, p0, [c_ptr2]\n"
+ "st1w z27.s, p0, [c_ptr3]\n"
+ "st1w z28.s, p0, [c_ptr4]\n"
+ "st1w z29.s, p0, [c_ptr5]\n"
+ "st1w z30.s, p0, [c_ptr6]\n"
+ "st1w z31.s, p0, [c_ptr7]\n"
+ ".unreq a_ptr1\n"
+ ".unreq a_ptr2\n"
+ ".unreq a_ptr3\n"
+ ".unreq a_ptr4\n"
+ ".unreq a_ptr5\n"
+ ".unreq a_ptr6\n"
+ ".unreq a_ptr7\n"
+ ".unreq c_ptr1\n"
+ ".unreq c_ptr2\n"
+ ".unreq c_ptr3\n"
+ ".unreq c_ptr4\n"
+ ".unreq c_ptr5\n"
+ ".unreq c_ptr6\n"
+ ".unreq c_ptr7\n"
+ : [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [oob_rows] "+r" (oob_rows), [temp] "+r" (temp), [odds] "+r" (odds)
+ : [lda] "r" (ldab), [ldc] "r" (ldcb), [odd_depth] "r" (odd_depth), [last_width] "r" (last_width)
+ : "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "cc", "memory"
+ );
+ break;
+ case 2:
+ __asm __volatile (
+ "a_ptr1 .req X0\n"
+ "a_ptr2 .req X1\n"
+ "a_ptr3 .req X2\n"
+ "a_ptr4 .req X3\n"
+ "a_ptr5 .req X4\n"
+ "a_ptr6 .req X5\n"
+ "a_ptr7 .req X6\n"
+ "c_ptr1 .req X7\n"
+ "c_ptr2 .req X8\n"
+ "c_ptr3 .req X9\n"
+ "c_ptr4 .req X10\n"
+ "c_ptr5 .req X11\n"
+ "c_ptr6 .req X12\n"
+ "c_ptr7 .req X13\n"
+ "add a_ptr1, %[a_ptr0], %[lda]\n"
+ "add c_ptr1, %[c_ptr0], %[ldc]\n"
+ "add a_ptr2, a_ptr1, %[lda]\n"
+ "add c_ptr2, c_ptr1, %[ldc]\n"
+ "add a_ptr3, a_ptr2, %[lda]\n"
+ "add c_ptr3, c_ptr2, %[ldc]\n"
+ "add a_ptr4, a_ptr3, %[lda]\n"
+ "add c_ptr4, c_ptr3, %[ldc]\n"
+ "add a_ptr5, a_ptr4, %[lda]\n"
+ "add c_ptr5, c_ptr4, %[ldc]\n"
+ "add a_ptr6, a_ptr5, %[lda]\n"
+ "add c_ptr6, c_ptr5, %[ldc]\n"
+ "add a_ptr7, a_ptr6, %[lda]\n"
+ "add c_ptr7, c_ptr6, %[ldc]\n"
+ "cbz %[oob_rows], 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr7, %[c_ptr0], #0x0\n"
+ "add a_ptr7, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr6, %[c_ptr0], #0x0\n"
+ "add a_ptr6, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr5, %[c_ptr0], #0x0\n"
+ "add a_ptr5, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr4, %[c_ptr0], #0x0\n"
+ "add a_ptr4, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr3, %[c_ptr0], #0x0\n"
+ "add a_ptr3, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr2, %[c_ptr0], #0x0\n"
+ "add a_ptr2, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr1, %[c_ptr0], #0x0\n"
+ "add a_ptr1, %[a_ptr0], #0x0\n"
+ "1:\n"
+ "ptrue p7.b\n"
+ "whilelt p6.b, %[temp], %[odd_depth]\n"
+ "whilelt p0.s, %[temp], %[last_width]\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0]]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #2\n"
+ "cbz %[loops], 2f\n"
+ "mov z24.s, #0\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "mov z25.s, #0\n"
+ "mov z26.s, #0\n"
+ "mov z27.s, #0\n"
+ "mov z28.s, #0\n"
+ "mov z29.s, #0\n"
+ "mov z30.s, #0\n"
+ "mov z31.s, #0\n"
+ "udot z24.s, z16.b, z0.b[0]\n"
+ "udot z25.s, z16.b, z1.b[0]\n"
+ "udot z26.s, z16.b, z2.b[0]\n"
+ "udot z27.s, z16.b, z3.b[0]\n"
+ "udot z28.s, z16.b, z4.b[0]\n"
+ "udot z29.s, z16.b, z5.b[0]\n"
+ "udot z30.s, z16.b, z6.b[0]\n"
+ "udot z31.s, z16.b, z7.b[0]\n"
+ "udot z24.s, z17.b, z0.b[1]\n"
+ "udot z25.s, z17.b, z1.b[1]\n"
+ "udot z26.s, z17.b, z2.b[1]\n"
+ "udot z27.s, z17.b, z3.b[1]\n"
+ "udot z28.s, z17.b, z4.b[1]\n"
+ "udot z29.s, z17.b, z5.b[1]\n"
+ "udot z30.s, z17.b, z6.b[1]\n"
+ "udot z31.s, z17.b, z7.b[1]\n"
+ "b.eq 3f\n"
+ "4:\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "mov z24.s, #0\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "mov z25.s, #0\n"
+ "addvl %[b_ptr0], %[b_ptr0], #2\n"
+ "udot z24.s, z16.b, z0.b[0]\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "mov z26.s, #0\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "udot z25.s, z16.b, z1.b[0]\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "mov z27.s, #0\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "udot z26.s, z16.b, z2.b[0]\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "mov z28.s, #0\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "udot z27.s, z16.b, z3.b[0]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "mov z29.s, #0\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "udot z28.s, z16.b, z4.b[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.s, #0\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "udot z29.s, z16.b, z5.b[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.s, #0\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "udot z30.s, z16.b, z6.b[0]\n"
+ "udot z24.s, z17.b, z0.b[1]\n"
+ "udot z31.s, z16.b, z7.b[0]\n"
+ "udot z25.s, z17.b, z1.b[1]\n"
+ "udot z26.s, z17.b, z2.b[1]\n"
+ "udot z27.s, z17.b, z3.b[1]\n"
+ "udot z28.s, z17.b, z4.b[1]\n"
+ "udot z29.s, z17.b, z5.b[1]\n"
+ "udot z30.s, z17.b, z6.b[1]\n"
+ "udot z31.s, z17.b, z7.b[1]\n"
+ "b.ne 4b\n"
+ "3:\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "mov z24.s, #0\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #2\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "mov z25.s, #0\n"
+ "udot z24.s, z16.b, z0.b[0]\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "mov z26.s, #0\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "udot z25.s, z16.b, z1.b[0]\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "mov z27.s, #0\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "udot z26.s, z16.b, z2.b[0]\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "mov z28.s, #0\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "udot z27.s, z16.b, z3.b[0]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "mov z29.s, #0\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "udot z28.s, z16.b, z4.b[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.s, #0\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "udot z29.s, z16.b, z5.b[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.s, #0\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "udot z30.s, z16.b, z6.b[0]\n"
+ "udot z24.s, z17.b, z0.b[1]\n"
+ "udot z31.s, z16.b, z7.b[0]\n"
+ "udot z25.s, z17.b, z1.b[1]\n"
+ "udot z26.s, z17.b, z2.b[1]\n"
+ "udot z27.s, z17.b, z3.b[1]\n"
+ "udot z28.s, z17.b, z4.b[1]\n"
+ "udot z29.s, z17.b, z5.b[1]\n"
+ "udot z30.s, z17.b, z6.b[1]\n"
+ "udot z31.s, z17.b, z7.b[1]\n"
+ "b 5f\n"
+ "2:\n"
+ "mov z24.s, #0\n"
+ "mov z25.s, #0\n"
+ "mov z26.s, #0\n"
+ "mov z27.s, #0\n"
+ "mov z28.s, #0\n"
+ "mov z29.s, #0\n"
+ "mov z30.s, #0\n"
+ "mov z31.s, #0\n"
+ "udot z24.s, z16.b, z0.b[0]\n"
+ "udot z25.s, z16.b, z1.b[0]\n"
+ "udot z26.s, z16.b, z2.b[0]\n"
+ "udot z27.s, z16.b, z3.b[0]\n"
+ "udot z28.s, z16.b, z4.b[0]\n"
+ "udot z29.s, z16.b, z5.b[0]\n"
+ "udot z30.s, z16.b, z6.b[0]\n"
+ "udot z31.s, z16.b, z7.b[0]\n"
+ "udot z24.s, z17.b, z0.b[1]\n"
+ "udot z25.s, z17.b, z1.b[1]\n"
+ "udot z26.s, z17.b, z2.b[1]\n"
+ "udot z27.s, z17.b, z3.b[1]\n"
+ "udot z28.s, z17.b, z4.b[1]\n"
+ "udot z29.s, z17.b, z5.b[1]\n"
+ "udot z30.s, z17.b, z6.b[1]\n"
+ "udot z31.s, z17.b, z7.b[1]\n"
+ "5:\n"
+ "st1w z24.s, p0, [%[c_ptr0]]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "st1w z25.s, p0, [c_ptr1]\n"
+ "st1w z26.s, p0, [c_ptr2]\n"
+ "st1w z27.s, p0, [c_ptr3]\n"
+ "st1w z28.s, p0, [c_ptr4]\n"
+ "st1w z29.s, p0, [c_ptr5]\n"
+ "st1w z30.s, p0, [c_ptr6]\n"
+ "st1w z31.s, p0, [c_ptr7]\n"
+ ".unreq a_ptr1\n"
+ ".unreq a_ptr2\n"
+ ".unreq a_ptr3\n"
+ ".unreq a_ptr4\n"
+ ".unreq a_ptr5\n"
+ ".unreq a_ptr6\n"
+ ".unreq a_ptr7\n"
+ ".unreq c_ptr1\n"
+ ".unreq c_ptr2\n"
+ ".unreq c_ptr3\n"
+ ".unreq c_ptr4\n"
+ ".unreq c_ptr5\n"
+ ".unreq c_ptr6\n"
+ ".unreq c_ptr7\n"
+ : [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [oob_rows] "+r" (oob_rows), [temp] "+r" (temp), [odds] "+r" (odds)
+ : [lda] "r" (ldab), [ldc] "r" (ldcb), [odd_depth] "r" (odd_depth), [last_width] "r" (last_width)
+ : "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "cc", "memory"
+ );
+ break;
+ case 3:
+ __asm __volatile (
+ "a_ptr1 .req X0\n"
+ "a_ptr2 .req X1\n"
+ "a_ptr3 .req X2\n"
+ "a_ptr4 .req X3\n"
+ "a_ptr5 .req X4\n"
+ "a_ptr6 .req X5\n"
+ "a_ptr7 .req X6\n"
+ "c_ptr1 .req X7\n"
+ "c_ptr2 .req X8\n"
+ "c_ptr3 .req X9\n"
+ "c_ptr4 .req X10\n"
+ "c_ptr5 .req X11\n"
+ "c_ptr6 .req X12\n"
+ "c_ptr7 .req X13\n"
+ "add a_ptr1, %[a_ptr0], %[lda]\n"
+ "add c_ptr1, %[c_ptr0], %[ldc]\n"
+ "add a_ptr2, a_ptr1, %[lda]\n"
+ "add c_ptr2, c_ptr1, %[ldc]\n"
+ "add a_ptr3, a_ptr2, %[lda]\n"
+ "add c_ptr3, c_ptr2, %[ldc]\n"
+ "add a_ptr4, a_ptr3, %[lda]\n"
+ "add c_ptr4, c_ptr3, %[ldc]\n"
+ "add a_ptr5, a_ptr4, %[lda]\n"
+ "add c_ptr5, c_ptr4, %[ldc]\n"
+ "add a_ptr6, a_ptr5, %[lda]\n"
+ "add c_ptr6, c_ptr5, %[ldc]\n"
+ "add a_ptr7, a_ptr6, %[lda]\n"
+ "add c_ptr7, c_ptr6, %[ldc]\n"
+ "cbz %[oob_rows], 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr7, %[c_ptr0], #0x0\n"
+ "add a_ptr7, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr6, %[c_ptr0], #0x0\n"
+ "add a_ptr6, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr5, %[c_ptr0], #0x0\n"
+ "add a_ptr5, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr4, %[c_ptr0], #0x0\n"
+ "add a_ptr4, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr3, %[c_ptr0], #0x0\n"
+ "add a_ptr3, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr2, %[c_ptr0], #0x0\n"
+ "add a_ptr2, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr1, %[c_ptr0], #0x0\n"
+ "add a_ptr1, %[a_ptr0], #0x0\n"
+ "1:\n"
+ "ptrue p7.b\n"
+ "whilelt p6.b, %[temp], %[odd_depth]\n"
+ "whilelt p0.s, %[temp], %[last_width]\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0]]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #3\n"
+ "cbz %[loops], 2f\n"
+ "mov z24.s, #0\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "mov z25.s, #0\n"
+ "mov z26.s, #0\n"
+ "mov z27.s, #0\n"
+ "mov z28.s, #0\n"
+ "mov z29.s, #0\n"
+ "mov z30.s, #0\n"
+ "mov z31.s, #0\n"
+ "udot z24.s, z16.b, z0.b[0]\n"
+ "udot z25.s, z16.b, z1.b[0]\n"
+ "udot z26.s, z16.b, z2.b[0]\n"
+ "udot z27.s, z16.b, z3.b[0]\n"
+ "udot z28.s, z16.b, z4.b[0]\n"
+ "udot z29.s, z16.b, z5.b[0]\n"
+ "udot z30.s, z16.b, z6.b[0]\n"
+ "udot z31.s, z16.b, z7.b[0]\n"
+ "udot z24.s, z17.b, z0.b[1]\n"
+ "udot z25.s, z17.b, z1.b[1]\n"
+ "udot z26.s, z17.b, z2.b[1]\n"
+ "udot z27.s, z17.b, z3.b[1]\n"
+ "udot z28.s, z17.b, z4.b[1]\n"
+ "udot z29.s, z17.b, z5.b[1]\n"
+ "udot z30.s, z17.b, z6.b[1]\n"
+ "udot z31.s, z17.b, z7.b[1]\n"
+ "udot z24.s, z18.b, z0.b[2]\n"
+ "udot z25.s, z18.b, z1.b[2]\n"
+ "udot z26.s, z18.b, z2.b[2]\n"
+ "udot z27.s, z18.b, z3.b[2]\n"
+ "udot z28.s, z18.b, z4.b[2]\n"
+ "udot z29.s, z18.b, z5.b[2]\n"
+ "udot z30.s, z18.b, z6.b[2]\n"
+ "udot z31.s, z18.b, z7.b[2]\n"
+ "b.eq 3f\n"
+ "4:\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "mov z24.s, #0\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "mov z25.s, #0\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "udot z24.s, z16.b, z0.b[0]\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "mov z26.s, #0\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "udot z25.s, z16.b, z1.b[0]\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "mov z27.s, #0\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "udot z26.s, z16.b, z2.b[0]\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "mov z28.s, #0\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "udot z27.s, z16.b, z3.b[0]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "mov z29.s, #0\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "udot z28.s, z16.b, z4.b[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.s, #0\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "udot z29.s, z16.b, z5.b[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.s, #0\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "udot z30.s, z16.b, z6.b[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #3\n"
+ "udot z31.s, z16.b, z7.b[0]\n"
+ "udot z24.s, z17.b, z0.b[1]\n"
+ "udot z25.s, z17.b, z1.b[1]\n"
+ "udot z26.s, z17.b, z2.b[1]\n"
+ "udot z27.s, z17.b, z3.b[1]\n"
+ "udot z28.s, z17.b, z4.b[1]\n"
+ "udot z29.s, z17.b, z5.b[1]\n"
+ "udot z30.s, z17.b, z6.b[1]\n"
+ "udot z31.s, z17.b, z7.b[1]\n"
+ "udot z24.s, z18.b, z0.b[2]\n"
+ "udot z25.s, z18.b, z1.b[2]\n"
+ "udot z26.s, z18.b, z2.b[2]\n"
+ "udot z27.s, z18.b, z3.b[2]\n"
+ "udot z28.s, z18.b, z4.b[2]\n"
+ "udot z29.s, z18.b, z5.b[2]\n"
+ "udot z30.s, z18.b, z6.b[2]\n"
+ "udot z31.s, z18.b, z7.b[2]\n"
+ "b.ne 4b\n"
+ "3:\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "mov z24.s, #0\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "mov z25.s, #0\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "udot z24.s, z16.b, z0.b[0]\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "mov z26.s, #0\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "udot z25.s, z16.b, z1.b[0]\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "mov z27.s, #0\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "udot z26.s, z16.b, z2.b[0]\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "mov z28.s, #0\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "udot z27.s, z16.b, z3.b[0]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "mov z29.s, #0\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "udot z28.s, z16.b, z4.b[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.s, #0\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "udot z29.s, z16.b, z5.b[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.s, #0\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "udot z30.s, z16.b, z6.b[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #3\n"
+ "udot z31.s, z16.b, z7.b[0]\n"
+ "udot z24.s, z17.b, z0.b[1]\n"
+ "udot z25.s, z17.b, z1.b[1]\n"
+ "udot z26.s, z17.b, z2.b[1]\n"
+ "udot z27.s, z17.b, z3.b[1]\n"
+ "udot z28.s, z17.b, z4.b[1]\n"
+ "udot z29.s, z17.b, z5.b[1]\n"
+ "udot z30.s, z17.b, z6.b[1]\n"
+ "udot z31.s, z17.b, z7.b[1]\n"
+ "udot z24.s, z18.b, z0.b[2]\n"
+ "udot z25.s, z18.b, z1.b[2]\n"
+ "udot z26.s, z18.b, z2.b[2]\n"
+ "udot z27.s, z18.b, z3.b[2]\n"
+ "udot z28.s, z18.b, z4.b[2]\n"
+ "udot z29.s, z18.b, z5.b[2]\n"
+ "udot z30.s, z18.b, z6.b[2]\n"
+ "udot z31.s, z18.b, z7.b[2]\n"
+ "b 5f\n"
+ "2:\n"
+ "mov z24.s, #0\n"
+ "mov z25.s, #0\n"
+ "mov z26.s, #0\n"
+ "mov z27.s, #0\n"
+ "mov z28.s, #0\n"
+ "mov z29.s, #0\n"
+ "mov z30.s, #0\n"
+ "mov z31.s, #0\n"
+ "udot z24.s, z16.b, z0.b[0]\n"
+ "udot z25.s, z16.b, z1.b[0]\n"
+ "udot z26.s, z16.b, z2.b[0]\n"
+ "udot z27.s, z16.b, z3.b[0]\n"
+ "udot z28.s, z16.b, z4.b[0]\n"
+ "udot z29.s, z16.b, z5.b[0]\n"
+ "udot z30.s, z16.b, z6.b[0]\n"
+ "udot z31.s, z16.b, z7.b[0]\n"
+ "udot z24.s, z17.b, z0.b[1]\n"
+ "udot z25.s, z17.b, z1.b[1]\n"
+ "udot z26.s, z17.b, z2.b[1]\n"
+ "udot z27.s, z17.b, z3.b[1]\n"
+ "udot z28.s, z17.b, z4.b[1]\n"
+ "udot z29.s, z17.b, z5.b[1]\n"
+ "udot z30.s, z17.b, z6.b[1]\n"
+ "udot z31.s, z17.b, z7.b[1]\n"
+ "udot z24.s, z18.b, z0.b[2]\n"
+ "udot z25.s, z18.b, z1.b[2]\n"
+ "udot z26.s, z18.b, z2.b[2]\n"
+ "udot z27.s, z18.b, z3.b[2]\n"
+ "udot z28.s, z18.b, z4.b[2]\n"
+ "udot z29.s, z18.b, z5.b[2]\n"
+ "udot z30.s, z18.b, z6.b[2]\n"
+ "udot z31.s, z18.b, z7.b[2]\n"
+ "5:\n"
+ "st1w z24.s, p0, [%[c_ptr0]]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "st1w z25.s, p0, [c_ptr1]\n"
+ "st1w z26.s, p0, [c_ptr2]\n"
+ "st1w z27.s, p0, [c_ptr3]\n"
+ "st1w z28.s, p0, [c_ptr4]\n"
+ "st1w z29.s, p0, [c_ptr5]\n"
+ "st1w z30.s, p0, [c_ptr6]\n"
+ "st1w z31.s, p0, [c_ptr7]\n"
+ ".unreq a_ptr1\n"
+ ".unreq a_ptr2\n"
+ ".unreq a_ptr3\n"
+ ".unreq a_ptr4\n"
+ ".unreq a_ptr5\n"
+ ".unreq a_ptr6\n"
+ ".unreq a_ptr7\n"
+ ".unreq c_ptr1\n"
+ ".unreq c_ptr2\n"
+ ".unreq c_ptr3\n"
+ ".unreq c_ptr4\n"
+ ".unreq c_ptr5\n"
+ ".unreq c_ptr6\n"
+ ".unreq c_ptr7\n"
+ : [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [oob_rows] "+r" (oob_rows), [temp] "+r" (temp), [odds] "+r" (odds)
+ : [lda] "r" (ldab), [ldc] "r" (ldcb), [odd_depth] "r" (odd_depth), [last_width] "r" (last_width)
+ : "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "cc", "memory"
+ );
+ break;
+ case 4:
+ __asm __volatile (
+ "a_ptr1 .req X0\n"
+ "a_ptr2 .req X1\n"
+ "a_ptr3 .req X2\n"
+ "a_ptr4 .req X3\n"
+ "a_ptr5 .req X4\n"
+ "a_ptr6 .req X5\n"
+ "a_ptr7 .req X6\n"
+ "c_ptr1 .req X7\n"
+ "c_ptr2 .req X8\n"
+ "c_ptr3 .req X9\n"
+ "c_ptr4 .req X10\n"
+ "c_ptr5 .req X11\n"
+ "c_ptr6 .req X12\n"
+ "c_ptr7 .req X13\n"
+ "add a_ptr1, %[a_ptr0], %[lda]\n"
+ "add c_ptr1, %[c_ptr0], %[ldc]\n"
+ "add a_ptr2, a_ptr1, %[lda]\n"
+ "add c_ptr2, c_ptr1, %[ldc]\n"
+ "add a_ptr3, a_ptr2, %[lda]\n"
+ "add c_ptr3, c_ptr2, %[ldc]\n"
+ "add a_ptr4, a_ptr3, %[lda]\n"
+ "add c_ptr4, c_ptr3, %[ldc]\n"
+ "add a_ptr5, a_ptr4, %[lda]\n"
+ "add c_ptr5, c_ptr4, %[ldc]\n"
+ "add a_ptr6, a_ptr5, %[lda]\n"
+ "add c_ptr6, c_ptr5, %[ldc]\n"
+ "add a_ptr7, a_ptr6, %[lda]\n"
+ "add c_ptr7, c_ptr6, %[ldc]\n"
+ "cbz %[oob_rows], 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr7, %[c_ptr0], #0x0\n"
+ "add a_ptr7, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr6, %[c_ptr0], #0x0\n"
+ "add a_ptr6, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr5, %[c_ptr0], #0x0\n"
+ "add a_ptr5, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr4, %[c_ptr0], #0x0\n"
+ "add a_ptr4, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr3, %[c_ptr0], #0x0\n"
+ "add a_ptr3, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr2, %[c_ptr0], #0x0\n"
+ "add a_ptr2, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr1, %[c_ptr0], #0x0\n"
+ "add a_ptr1, %[a_ptr0], #0x0\n"
+ "1:\n"
+ "ptrue p7.b\n"
+ "whilelt p6.b, %[temp], %[odd_depth]\n"
+ "whilelt p0.s, %[temp], %[last_width]\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0]]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #4\n"
+ "cbz %[loops], 2f\n"
+ "mov z24.s, #0\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "mov z25.s, #0\n"
+ "mov z26.s, #0\n"
+ "mov z27.s, #0\n"
+ "mov z28.s, #0\n"
+ "mov z29.s, #0\n"
+ "mov z30.s, #0\n"
+ "mov z31.s, #0\n"
+ "udot z24.s, z16.b, z0.b[0]\n"
+ "udot z25.s, z16.b, z1.b[0]\n"
+ "udot z26.s, z16.b, z2.b[0]\n"
+ "udot z27.s, z16.b, z3.b[0]\n"
+ "udot z28.s, z16.b, z4.b[0]\n"
+ "udot z29.s, z16.b, z5.b[0]\n"
+ "udot z30.s, z16.b, z6.b[0]\n"
+ "udot z31.s, z16.b, z7.b[0]\n"
+ "udot z24.s, z17.b, z0.b[1]\n"
+ "udot z25.s, z17.b, z1.b[1]\n"
+ "udot z26.s, z17.b, z2.b[1]\n"
+ "udot z27.s, z17.b, z3.b[1]\n"
+ "udot z28.s, z17.b, z4.b[1]\n"
+ "udot z29.s, z17.b, z5.b[1]\n"
+ "udot z30.s, z17.b, z6.b[1]\n"
+ "udot z31.s, z17.b, z7.b[1]\n"
+ "udot z24.s, z18.b, z0.b[2]\n"
+ "udot z25.s, z18.b, z1.b[2]\n"
+ "udot z26.s, z18.b, z2.b[2]\n"
+ "udot z27.s, z18.b, z3.b[2]\n"
+ "udot z28.s, z18.b, z4.b[2]\n"
+ "udot z29.s, z18.b, z5.b[2]\n"
+ "udot z30.s, z18.b, z6.b[2]\n"
+ "udot z31.s, z18.b, z7.b[2]\n"
+ "udot z24.s, z19.b, z0.b[3]\n"
+ "udot z25.s, z19.b, z1.b[3]\n"
+ "udot z26.s, z19.b, z2.b[3]\n"
+ "udot z27.s, z19.b, z3.b[3]\n"
+ "udot z28.s, z19.b, z4.b[3]\n"
+ "udot z29.s, z19.b, z5.b[3]\n"
+ "udot z30.s, z19.b, z6.b[3]\n"
+ "udot z31.s, z19.b, z7.b[3]\n"
+ "b.eq 3f\n"
+ "4:\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "mov z24.s, #0\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "mov z25.s, #0\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "udot z24.s, z16.b, z0.b[0]\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "mov z26.s, #0\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "udot z25.s, z16.b, z1.b[0]\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "mov z27.s, #0\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "udot z26.s, z16.b, z2.b[0]\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "mov z28.s, #0\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "udot z27.s, z16.b, z3.b[0]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "mov z29.s, #0\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "udot z28.s, z16.b, z4.b[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.s, #0\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "udot z29.s, z16.b, z5.b[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.s, #0\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "udot z30.s, z16.b, z6.b[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #4\n"
+ "udot z31.s, z16.b, z7.b[0]\n"
+ "udot z24.s, z17.b, z0.b[1]\n"
+ "udot z25.s, z17.b, z1.b[1]\n"
+ "udot z26.s, z17.b, z2.b[1]\n"
+ "udot z27.s, z17.b, z3.b[1]\n"
+ "udot z28.s, z17.b, z4.b[1]\n"
+ "udot z29.s, z17.b, z5.b[1]\n"
+ "udot z30.s, z17.b, z6.b[1]\n"
+ "udot z31.s, z17.b, z7.b[1]\n"
+ "udot z24.s, z18.b, z0.b[2]\n"
+ "udot z25.s, z18.b, z1.b[2]\n"
+ "udot z26.s, z18.b, z2.b[2]\n"
+ "udot z27.s, z18.b, z3.b[2]\n"
+ "udot z28.s, z18.b, z4.b[2]\n"
+ "udot z29.s, z18.b, z5.b[2]\n"
+ "udot z30.s, z18.b, z6.b[2]\n"
+ "udot z31.s, z18.b, z7.b[2]\n"
+ "udot z24.s, z19.b, z0.b[3]\n"
+ "udot z25.s, z19.b, z1.b[3]\n"
+ "udot z26.s, z19.b, z2.b[3]\n"
+ "udot z27.s, z19.b, z3.b[3]\n"
+ "udot z28.s, z19.b, z4.b[3]\n"
+ "udot z29.s, z19.b, z5.b[3]\n"
+ "udot z30.s, z19.b, z6.b[3]\n"
+ "udot z31.s, z19.b, z7.b[3]\n"
+ "b.ne 4b\n"
+ "3:\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "mov z24.s, #0\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "mov z25.s, #0\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "udot z24.s, z16.b, z0.b[0]\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "mov z26.s, #0\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "udot z25.s, z16.b, z1.b[0]\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "mov z27.s, #0\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "udot z26.s, z16.b, z2.b[0]\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "mov z28.s, #0\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "udot z27.s, z16.b, z3.b[0]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "mov z29.s, #0\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "udot z28.s, z16.b, z4.b[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.s, #0\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "udot z29.s, z16.b, z5.b[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.s, #0\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "udot z30.s, z16.b, z6.b[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #4\n"
+ "udot z31.s, z16.b, z7.b[0]\n"
+ "udot z24.s, z17.b, z0.b[1]\n"
+ "udot z25.s, z17.b, z1.b[1]\n"
+ "udot z26.s, z17.b, z2.b[1]\n"
+ "udot z27.s, z17.b, z3.b[1]\n"
+ "udot z28.s, z17.b, z4.b[1]\n"
+ "udot z29.s, z17.b, z5.b[1]\n"
+ "udot z30.s, z17.b, z6.b[1]\n"
+ "udot z31.s, z17.b, z7.b[1]\n"
+ "udot z24.s, z18.b, z0.b[2]\n"
+ "udot z25.s, z18.b, z1.b[2]\n"
+ "udot z26.s, z18.b, z2.b[2]\n"
+ "udot z27.s, z18.b, z3.b[2]\n"
+ "udot z28.s, z18.b, z4.b[2]\n"
+ "udot z29.s, z18.b, z5.b[2]\n"
+ "udot z30.s, z18.b, z6.b[2]\n"
+ "udot z31.s, z18.b, z7.b[2]\n"
+ "udot z24.s, z19.b, z0.b[3]\n"
+ "udot z25.s, z19.b, z1.b[3]\n"
+ "udot z26.s, z19.b, z2.b[3]\n"
+ "udot z27.s, z19.b, z3.b[3]\n"
+ "udot z28.s, z19.b, z4.b[3]\n"
+ "udot z29.s, z19.b, z5.b[3]\n"
+ "udot z30.s, z19.b, z6.b[3]\n"
+ "udot z31.s, z19.b, z7.b[3]\n"
+ "b 5f\n"
+ "2:\n"
+ "mov z24.s, #0\n"
+ "mov z25.s, #0\n"
+ "mov z26.s, #0\n"
+ "mov z27.s, #0\n"
+ "mov z28.s, #0\n"
+ "mov z29.s, #0\n"
+ "mov z30.s, #0\n"
+ "mov z31.s, #0\n"
+ "udot z24.s, z16.b, z0.b[0]\n"
+ "udot z25.s, z16.b, z1.b[0]\n"
+ "udot z26.s, z16.b, z2.b[0]\n"
+ "udot z27.s, z16.b, z3.b[0]\n"
+ "udot z28.s, z16.b, z4.b[0]\n"
+ "udot z29.s, z16.b, z5.b[0]\n"
+ "udot z30.s, z16.b, z6.b[0]\n"
+ "udot z31.s, z16.b, z7.b[0]\n"
+ "udot z24.s, z17.b, z0.b[1]\n"
+ "udot z25.s, z17.b, z1.b[1]\n"
+ "udot z26.s, z17.b, z2.b[1]\n"
+ "udot z27.s, z17.b, z3.b[1]\n"
+ "udot z28.s, z17.b, z4.b[1]\n"
+ "udot z29.s, z17.b, z5.b[1]\n"
+ "udot z30.s, z17.b, z6.b[1]\n"
+ "udot z31.s, z17.b, z7.b[1]\n"
+ "udot z24.s, z18.b, z0.b[2]\n"
+ "udot z25.s, z18.b, z1.b[2]\n"
+ "udot z26.s, z18.b, z2.b[2]\n"
+ "udot z27.s, z18.b, z3.b[2]\n"
+ "udot z28.s, z18.b, z4.b[2]\n"
+ "udot z29.s, z18.b, z5.b[2]\n"
+ "udot z30.s, z18.b, z6.b[2]\n"
+ "udot z31.s, z18.b, z7.b[2]\n"
+ "udot z24.s, z19.b, z0.b[3]\n"
+ "udot z25.s, z19.b, z1.b[3]\n"
+ "udot z26.s, z19.b, z2.b[3]\n"
+ "udot z27.s, z19.b, z3.b[3]\n"
+ "udot z28.s, z19.b, z4.b[3]\n"
+ "udot z29.s, z19.b, z5.b[3]\n"
+ "udot z30.s, z19.b, z6.b[3]\n"
+ "udot z31.s, z19.b, z7.b[3]\n"
+ "5:\n"
+ "st1w z24.s, p0, [%[c_ptr0]]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "st1w z25.s, p0, [c_ptr1]\n"
+ "st1w z26.s, p0, [c_ptr2]\n"
+ "st1w z27.s, p0, [c_ptr3]\n"
+ "st1w z28.s, p0, [c_ptr4]\n"
+ "st1w z29.s, p0, [c_ptr5]\n"
+ "st1w z30.s, p0, [c_ptr6]\n"
+ "st1w z31.s, p0, [c_ptr7]\n"
+ ".unreq a_ptr1\n"
+ ".unreq a_ptr2\n"
+ ".unreq a_ptr3\n"
+ ".unreq a_ptr4\n"
+ ".unreq a_ptr5\n"
+ ".unreq a_ptr6\n"
+ ".unreq a_ptr7\n"
+ ".unreq c_ptr1\n"
+ ".unreq c_ptr2\n"
+ ".unreq c_ptr3\n"
+ ".unreq c_ptr4\n"
+ ".unreq c_ptr5\n"
+ ".unreq c_ptr6\n"
+ ".unreq c_ptr7\n"
+ : [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [oob_rows] "+r" (oob_rows), [temp] "+r" (temp), [odds] "+r" (odds)
+ : [lda] "r" (ldab), [ldc] "r" (ldcb), [odd_depth] "r" (odd_depth), [last_width] "r" (last_width)
+ : "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "cc", "memory"
+ );
+ break;
+ case 5:
+ __asm __volatile (
+ "a_ptr1 .req X0\n"
+ "a_ptr2 .req X1\n"
+ "a_ptr3 .req X2\n"
+ "a_ptr4 .req X3\n"
+ "a_ptr5 .req X4\n"
+ "a_ptr6 .req X5\n"
+ "a_ptr7 .req X6\n"
+ "c_ptr1 .req X7\n"
+ "c_ptr2 .req X8\n"
+ "c_ptr3 .req X9\n"
+ "c_ptr4 .req X10\n"
+ "c_ptr5 .req X11\n"
+ "c_ptr6 .req X12\n"
+ "c_ptr7 .req X13\n"
+ "add a_ptr1, %[a_ptr0], %[lda]\n"
+ "add c_ptr1, %[c_ptr0], %[ldc]\n"
+ "add a_ptr2, a_ptr1, %[lda]\n"
+ "add c_ptr2, c_ptr1, %[ldc]\n"
+ "add a_ptr3, a_ptr2, %[lda]\n"
+ "add c_ptr3, c_ptr2, %[ldc]\n"
+ "add a_ptr4, a_ptr3, %[lda]\n"
+ "add c_ptr4, c_ptr3, %[ldc]\n"
+ "add a_ptr5, a_ptr4, %[lda]\n"
+ "add c_ptr5, c_ptr4, %[ldc]\n"
+ "add a_ptr6, a_ptr5, %[lda]\n"
+ "add c_ptr6, c_ptr5, %[ldc]\n"
+ "add a_ptr7, a_ptr6, %[lda]\n"
+ "add c_ptr7, c_ptr6, %[ldc]\n"
+ "cbz %[oob_rows], 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr7, %[c_ptr0], #0x0\n"
+ "add a_ptr7, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr6, %[c_ptr0], #0x0\n"
+ "add a_ptr6, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr5, %[c_ptr0], #0x0\n"
+ "add a_ptr5, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr4, %[c_ptr0], #0x0\n"
+ "add a_ptr4, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr3, %[c_ptr0], #0x0\n"
+ "add a_ptr3, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr2, %[c_ptr0], #0x0\n"
+ "add a_ptr2, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr1, %[c_ptr0], #0x0\n"
+ "add a_ptr1, %[a_ptr0], #0x0\n"
+ "1:\n"
+ "ptrue p7.b\n"
+ "whilelt p6.b, %[temp], %[odd_depth]\n"
+ "whilelt p0.s, %[temp], %[last_width]\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #5\n"
+ "cbz %[loops], 2f\n"
+ "mov z24.s, #0\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
+ "mov z25.s, #0\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1]\n"
+ "mov z26.s, #0\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2]\n"
+ "mov z27.s, #0\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3]\n"
+ "mov z28.s, #0\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4]\n"
+ "mov z29.s, #0\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5]\n"
+ "mov z30.s, #0\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6]\n"
+ "mov z31.s, #0\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7]\n"
+ "udot z24.s, z16.b, z0.b[0]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "udot z25.s, z16.b, z1.b[0]\n"
+ "udot z26.s, z16.b, z2.b[0]\n"
+ "udot z27.s, z16.b, z3.b[0]\n"
+ "udot z28.s, z16.b, z4.b[0]\n"
+ "udot z29.s, z16.b, z5.b[0]\n"
+ "udot z30.s, z16.b, z6.b[0]\n"
+ "udot z31.s, z16.b, z7.b[0]\n"
+ "udot z24.s, z17.b, z0.b[1]\n"
+ "udot z25.s, z17.b, z1.b[1]\n"
+ "udot z26.s, z17.b, z2.b[1]\n"
+ "udot z27.s, z17.b, z3.b[1]\n"
+ "udot z28.s, z17.b, z4.b[1]\n"
+ "udot z29.s, z17.b, z5.b[1]\n"
+ "udot z30.s, z17.b, z6.b[1]\n"
+ "udot z31.s, z17.b, z7.b[1]\n"
+ "udot z24.s, z18.b, z0.b[2]\n"
+ "udot z25.s, z18.b, z1.b[2]\n"
+ "udot z26.s, z18.b, z2.b[2]\n"
+ "udot z27.s, z18.b, z3.b[2]\n"
+ "udot z28.s, z18.b, z4.b[2]\n"
+ "udot z29.s, z18.b, z5.b[2]\n"
+ "udot z30.s, z18.b, z6.b[2]\n"
+ "udot z31.s, z18.b, z7.b[2]\n"
+ "udot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x10]\n"
+ "udot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1, #0x10]\n"
+ "udot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2, #0x10]\n"
+ "udot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3, #0x10]\n"
+ "udot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4, #0x10]\n"
+ "udot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5, #0x10]\n"
+ "udot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6, #0x10]\n"
+ "udot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7, #0x10]\n"
+ "udot z24.s, z20.b, z0.b[0]\n"
+ "udot z25.s, z20.b, z1.b[0]\n"
+ "udot z26.s, z20.b, z2.b[0]\n"
+ "udot z27.s, z20.b, z3.b[0]\n"
+ "udot z28.s, z20.b, z4.b[0]\n"
+ "udot z29.s, z20.b, z5.b[0]\n"
+ "udot z30.s, z20.b, z6.b[0]\n"
+ "udot z31.s, z20.b, z7.b[0]\n"
+ "b.eq 3f\n"
+ "4:\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "mov z24.s, #0\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "mov z25.s, #0\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "mov z26.s, #0\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #5\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "mov z27.s, #0\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1]\n"
+ "udot z24.s, z16.b, z0.b[0]\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "mov z28.s, #0\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3]\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "udot z25.s, z16.b, z1.b[0]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "mov z29.s, #0\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4]\n"
+ "udot z26.s, z16.b, z2.b[0]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5]\n"
+ "udot z27.s, z16.b, z3.b[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.s, #0\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6]\n"
+ "udot z28.s, z16.b, z4.b[0]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7]\n"
+ "udot z29.s, z16.b, z5.b[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.s, #0\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "udot z30.s, z16.b, z6.b[0]\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "udot z31.s, z16.b, z7.b[0]\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "udot z24.s, z17.b, z0.b[1]\n"
+ "udot z25.s, z17.b, z1.b[1]\n"
+ "udot z26.s, z17.b, z2.b[1]\n"
+ "udot z27.s, z17.b, z3.b[1]\n"
+ "udot z28.s, z17.b, z4.b[1]\n"
+ "udot z29.s, z17.b, z5.b[1]\n"
+ "udot z30.s, z17.b, z6.b[1]\n"
+ "udot z31.s, z17.b, z7.b[1]\n"
+ "udot z24.s, z18.b, z0.b[2]\n"
+ "udot z25.s, z18.b, z1.b[2]\n"
+ "udot z26.s, z18.b, z2.b[2]\n"
+ "udot z27.s, z18.b, z3.b[2]\n"
+ "udot z28.s, z18.b, z4.b[2]\n"
+ "udot z29.s, z18.b, z5.b[2]\n"
+ "udot z30.s, z18.b, z6.b[2]\n"
+ "udot z31.s, z18.b, z7.b[2]\n"
+ "udot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x10]\n"
+ "udot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1, #0x10]\n"
+ "udot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2, #0x10]\n"
+ "udot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3, #0x10]\n"
+ "udot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4, #0x10]\n"
+ "udot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5, #0x10]\n"
+ "udot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6, #0x10]\n"
+ "udot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7, #0x10]\n"
+ "udot z24.s, z20.b, z0.b[0]\n"
+ "udot z25.s, z20.b, z1.b[0]\n"
+ "udot z26.s, z20.b, z2.b[0]\n"
+ "udot z27.s, z20.b, z3.b[0]\n"
+ "udot z28.s, z20.b, z4.b[0]\n"
+ "udot z29.s, z20.b, z5.b[0]\n"
+ "udot z30.s, z20.b, z6.b[0]\n"
+ "udot z31.s, z20.b, z7.b[0]\n"
+ "b.ne 4b\n"
+ "3:\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "mov z24.s, #0\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "mov z25.s, #0\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "mov z26.s, #0\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #5\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "mov z27.s, #0\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1]\n"
+ "udot z24.s, z16.b, z0.b[0]\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "mov z28.s, #0\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3]\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "udot z25.s, z16.b, z1.b[0]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "mov z29.s, #0\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4]\n"
+ "udot z26.s, z16.b, z2.b[0]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5]\n"
+ "udot z27.s, z16.b, z3.b[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.s, #0\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6]\n"
+ "udot z28.s, z16.b, z4.b[0]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7]\n"
+ "udot z29.s, z16.b, z5.b[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.s, #0\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "udot z30.s, z16.b, z6.b[0]\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "udot z31.s, z16.b, z7.b[0]\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "udot z24.s, z17.b, z0.b[1]\n"
+ "udot z25.s, z17.b, z1.b[1]\n"
+ "udot z26.s, z17.b, z2.b[1]\n"
+ "udot z27.s, z17.b, z3.b[1]\n"
+ "udot z28.s, z17.b, z4.b[1]\n"
+ "udot z29.s, z17.b, z5.b[1]\n"
+ "udot z30.s, z17.b, z6.b[1]\n"
+ "udot z31.s, z17.b, z7.b[1]\n"
+ "udot z24.s, z18.b, z0.b[2]\n"
+ "udot z25.s, z18.b, z1.b[2]\n"
+ "udot z26.s, z18.b, z2.b[2]\n"
+ "udot z27.s, z18.b, z3.b[2]\n"
+ "udot z28.s, z18.b, z4.b[2]\n"
+ "udot z29.s, z18.b, z5.b[2]\n"
+ "udot z30.s, z18.b, z6.b[2]\n"
+ "udot z31.s, z18.b, z7.b[2]\n"
+ "udot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x10]\n"
+ "udot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1, #0x10]\n"
+ "udot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2, #0x10]\n"
+ "udot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3, #0x10]\n"
+ "udot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4, #0x10]\n"
+ "udot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5, #0x10]\n"
+ "udot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6, #0x10]\n"
+ "udot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7, #0x10]\n"
+ "udot z24.s, z20.b, z0.b[0]\n"
+ "udot z25.s, z20.b, z1.b[0]\n"
+ "udot z26.s, z20.b, z2.b[0]\n"
+ "udot z27.s, z20.b, z3.b[0]\n"
+ "udot z28.s, z20.b, z4.b[0]\n"
+ "udot z29.s, z20.b, z5.b[0]\n"
+ "udot z30.s, z20.b, z6.b[0]\n"
+ "udot z31.s, z20.b, z7.b[0]\n"
+ "b 5f\n"
+ "2:\n"
+ "mov z24.s, #0\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
+ "mov z25.s, #0\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1]\n"
+ "mov z26.s, #0\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2]\n"
+ "mov z27.s, #0\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3]\n"
+ "mov z28.s, #0\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4]\n"
+ "mov z29.s, #0\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5]\n"
+ "mov z30.s, #0\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6]\n"
+ "mov z31.s, #0\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7]\n"
+ "udot z24.s, z16.b, z0.b[0]\n"
+ "udot z25.s, z16.b, z1.b[0]\n"
+ "udot z26.s, z16.b, z2.b[0]\n"
+ "udot z27.s, z16.b, z3.b[0]\n"
+ "udot z28.s, z16.b, z4.b[0]\n"
+ "udot z29.s, z16.b, z5.b[0]\n"
+ "udot z30.s, z16.b, z6.b[0]\n"
+ "udot z31.s, z16.b, z7.b[0]\n"
+ "udot z24.s, z17.b, z0.b[1]\n"
+ "udot z25.s, z17.b, z1.b[1]\n"
+ "udot z26.s, z17.b, z2.b[1]\n"
+ "udot z27.s, z17.b, z3.b[1]\n"
+ "udot z28.s, z17.b, z4.b[1]\n"
+ "udot z29.s, z17.b, z5.b[1]\n"
+ "udot z30.s, z17.b, z6.b[1]\n"
+ "udot z31.s, z17.b, z7.b[1]\n"
+ "udot z24.s, z18.b, z0.b[2]\n"
+ "udot z25.s, z18.b, z1.b[2]\n"
+ "udot z26.s, z18.b, z2.b[2]\n"
+ "udot z27.s, z18.b, z3.b[2]\n"
+ "udot z28.s, z18.b, z4.b[2]\n"
+ "udot z29.s, z18.b, z5.b[2]\n"
+ "udot z30.s, z18.b, z6.b[2]\n"
+ "udot z31.s, z18.b, z7.b[2]\n"
+ "udot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x10]\n"
+ "udot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1, #0x10]\n"
+ "udot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2, #0x10]\n"
+ "udot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3, #0x10]\n"
+ "udot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4, #0x10]\n"
+ "udot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5, #0x10]\n"
+ "udot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6, #0x10]\n"
+ "udot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7, #0x10]\n"
+ "udot z24.s, z20.b, z0.b[0]\n"
+ "udot z25.s, z20.b, z1.b[0]\n"
+ "udot z26.s, z20.b, z2.b[0]\n"
+ "udot z27.s, z20.b, z3.b[0]\n"
+ "udot z28.s, z20.b, z4.b[0]\n"
+ "udot z29.s, z20.b, z5.b[0]\n"
+ "udot z30.s, z20.b, z6.b[0]\n"
+ "udot z31.s, z20.b, z7.b[0]\n"
+ "5:\n"
+ "st1w z24.s, p0, [%[c_ptr0]]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "st1w z25.s, p0, [c_ptr1]\n"
+ "st1w z26.s, p0, [c_ptr2]\n"
+ "st1w z27.s, p0, [c_ptr3]\n"
+ "st1w z28.s, p0, [c_ptr4]\n"
+ "st1w z29.s, p0, [c_ptr5]\n"
+ "st1w z30.s, p0, [c_ptr6]\n"
+ "st1w z31.s, p0, [c_ptr7]\n"
+ ".unreq a_ptr1\n"
+ ".unreq a_ptr2\n"
+ ".unreq a_ptr3\n"
+ ".unreq a_ptr4\n"
+ ".unreq a_ptr5\n"
+ ".unreq a_ptr6\n"
+ ".unreq a_ptr7\n"
+ ".unreq c_ptr1\n"
+ ".unreq c_ptr2\n"
+ ".unreq c_ptr3\n"
+ ".unreq c_ptr4\n"
+ ".unreq c_ptr5\n"
+ ".unreq c_ptr6\n"
+ ".unreq c_ptr7\n"
+ : [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [oob_rows] "+r" (oob_rows), [temp] "+r" (temp), [odds] "+r" (odds)
+ : [lda] "r" (ldab), [ldc] "r" (ldcb), [odd_depth] "r" (odd_depth), [last_width] "r" (last_width)
+ : "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "cc", "memory"
+ );
+ break;
+ case 6:
+ __asm __volatile (
+ "a_ptr1 .req X0\n"
+ "a_ptr2 .req X1\n"
+ "a_ptr3 .req X2\n"
+ "a_ptr4 .req X3\n"
+ "a_ptr5 .req X4\n"
+ "a_ptr6 .req X5\n"
+ "a_ptr7 .req X6\n"
+ "c_ptr1 .req X7\n"
+ "c_ptr2 .req X8\n"
+ "c_ptr3 .req X9\n"
+ "c_ptr4 .req X10\n"
+ "c_ptr5 .req X11\n"
+ "c_ptr6 .req X12\n"
+ "c_ptr7 .req X13\n"
+ "add a_ptr1, %[a_ptr0], %[lda]\n"
+ "add c_ptr1, %[c_ptr0], %[ldc]\n"
+ "add a_ptr2, a_ptr1, %[lda]\n"
+ "add c_ptr2, c_ptr1, %[ldc]\n"
+ "add a_ptr3, a_ptr2, %[lda]\n"
+ "add c_ptr3, c_ptr2, %[ldc]\n"
+ "add a_ptr4, a_ptr3, %[lda]\n"
+ "add c_ptr4, c_ptr3, %[ldc]\n"
+ "add a_ptr5, a_ptr4, %[lda]\n"
+ "add c_ptr5, c_ptr4, %[ldc]\n"
+ "add a_ptr6, a_ptr5, %[lda]\n"
+ "add c_ptr6, c_ptr5, %[ldc]\n"
+ "add a_ptr7, a_ptr6, %[lda]\n"
+ "add c_ptr7, c_ptr6, %[ldc]\n"
+ "cbz %[oob_rows], 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr7, %[c_ptr0], #0x0\n"
+ "add a_ptr7, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr6, %[c_ptr0], #0x0\n"
+ "add a_ptr6, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr5, %[c_ptr0], #0x0\n"
+ "add a_ptr5, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr4, %[c_ptr0], #0x0\n"
+ "add a_ptr4, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr3, %[c_ptr0], #0x0\n"
+ "add a_ptr3, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr2, %[c_ptr0], #0x0\n"
+ "add a_ptr2, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr1, %[c_ptr0], #0x0\n"
+ "add a_ptr1, %[a_ptr0], #0x0\n"
+ "1:\n"
+ "ptrue p7.b\n"
+ "whilelt p6.b, %[temp], %[odd_depth]\n"
+ "whilelt p0.s, %[temp], %[last_width]\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "ld1b z21.b, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #6\n"
+ "cbz %[loops], 2f\n"
+ "mov z24.s, #0\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
+ "mov z25.s, #0\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1]\n"
+ "mov z26.s, #0\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2]\n"
+ "mov z27.s, #0\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3]\n"
+ "mov z28.s, #0\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4]\n"
+ "mov z29.s, #0\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5]\n"
+ "mov z30.s, #0\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6]\n"
+ "mov z31.s, #0\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7]\n"
+ "udot z24.s, z16.b, z0.b[0]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "udot z25.s, z16.b, z1.b[0]\n"
+ "udot z26.s, z16.b, z2.b[0]\n"
+ "udot z27.s, z16.b, z3.b[0]\n"
+ "udot z28.s, z16.b, z4.b[0]\n"
+ "udot z29.s, z16.b, z5.b[0]\n"
+ "udot z30.s, z16.b, z6.b[0]\n"
+ "udot z31.s, z16.b, z7.b[0]\n"
+ "udot z24.s, z17.b, z0.b[1]\n"
+ "udot z25.s, z17.b, z1.b[1]\n"
+ "udot z26.s, z17.b, z2.b[1]\n"
+ "udot z27.s, z17.b, z3.b[1]\n"
+ "udot z28.s, z17.b, z4.b[1]\n"
+ "udot z29.s, z17.b, z5.b[1]\n"
+ "udot z30.s, z17.b, z6.b[1]\n"
+ "udot z31.s, z17.b, z7.b[1]\n"
+ "udot z24.s, z18.b, z0.b[2]\n"
+ "udot z25.s, z18.b, z1.b[2]\n"
+ "udot z26.s, z18.b, z2.b[2]\n"
+ "udot z27.s, z18.b, z3.b[2]\n"
+ "udot z28.s, z18.b, z4.b[2]\n"
+ "udot z29.s, z18.b, z5.b[2]\n"
+ "udot z30.s, z18.b, z6.b[2]\n"
+ "udot z31.s, z18.b, z7.b[2]\n"
+ "udot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x10]\n"
+ "udot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1, #0x10]\n"
+ "udot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2, #0x10]\n"
+ "udot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3, #0x10]\n"
+ "udot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4, #0x10]\n"
+ "udot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5, #0x10]\n"
+ "udot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6, #0x10]\n"
+ "udot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7, #0x10]\n"
+ "udot z24.s, z20.b, z0.b[0]\n"
+ "udot z25.s, z20.b, z1.b[0]\n"
+ "udot z26.s, z20.b, z2.b[0]\n"
+ "udot z27.s, z20.b, z3.b[0]\n"
+ "udot z28.s, z20.b, z4.b[0]\n"
+ "udot z29.s, z20.b, z5.b[0]\n"
+ "udot z30.s, z20.b, z6.b[0]\n"
+ "udot z31.s, z20.b, z7.b[0]\n"
+ "udot z24.s, z21.b, z0.b[1]\n"
+ "udot z25.s, z21.b, z1.b[1]\n"
+ "udot z26.s, z21.b, z2.b[1]\n"
+ "udot z27.s, z21.b, z3.b[1]\n"
+ "udot z28.s, z21.b, z4.b[1]\n"
+ "udot z29.s, z21.b, z5.b[1]\n"
+ "udot z30.s, z21.b, z6.b[1]\n"
+ "udot z31.s, z21.b, z7.b[1]\n"
+ "b.eq 3f\n"
+ "4:\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "mov z24.s, #0\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "mov z25.s, #0\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "mov z26.s, #0\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "ld1b z21.b, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #6\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "mov z27.s, #0\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1]\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "mov z28.s, #0\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2]\n"
+ "udot z24.s, z16.b, z0.b[0]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "mov z29.s, #0\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3]\n"
+ "udot z25.s, z16.b, z1.b[0]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4]\n"
+ "udot z26.s, z16.b, z2.b[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.s, #0\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5]\n"
+ "udot z27.s, z16.b, z3.b[0]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6]\n"
+ "udot z28.s, z16.b, z4.b[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.s, #0\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7]\n"
+ "udot z29.s, z16.b, z5.b[0]\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "udot z30.s, z16.b, z6.b[0]\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "udot z31.s, z16.b, z7.b[0]\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "udot z24.s, z17.b, z0.b[1]\n"
+ "udot z25.s, z17.b, z1.b[1]\n"
+ "udot z26.s, z17.b, z2.b[1]\n"
+ "udot z27.s, z17.b, z3.b[1]\n"
+ "udot z28.s, z17.b, z4.b[1]\n"
+ "udot z29.s, z17.b, z5.b[1]\n"
+ "udot z30.s, z17.b, z6.b[1]\n"
+ "udot z31.s, z17.b, z7.b[1]\n"
+ "udot z24.s, z18.b, z0.b[2]\n"
+ "udot z25.s, z18.b, z1.b[2]\n"
+ "udot z26.s, z18.b, z2.b[2]\n"
+ "udot z27.s, z18.b, z3.b[2]\n"
+ "udot z28.s, z18.b, z4.b[2]\n"
+ "udot z29.s, z18.b, z5.b[2]\n"
+ "udot z30.s, z18.b, z6.b[2]\n"
+ "udot z31.s, z18.b, z7.b[2]\n"
+ "udot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x10]\n"
+ "udot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1, #0x10]\n"
+ "udot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2, #0x10]\n"
+ "udot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3, #0x10]\n"
+ "udot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4, #0x10]\n"
+ "udot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5, #0x10]\n"
+ "udot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6, #0x10]\n"
+ "udot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7, #0x10]\n"
+ "udot z24.s, z20.b, z0.b[0]\n"
+ "udot z25.s, z20.b, z1.b[0]\n"
+ "udot z26.s, z20.b, z2.b[0]\n"
+ "udot z27.s, z20.b, z3.b[0]\n"
+ "udot z28.s, z20.b, z4.b[0]\n"
+ "udot z29.s, z20.b, z5.b[0]\n"
+ "udot z30.s, z20.b, z6.b[0]\n"
+ "udot z31.s, z20.b, z7.b[0]\n"
+ "udot z24.s, z21.b, z0.b[1]\n"
+ "udot z25.s, z21.b, z1.b[1]\n"
+ "udot z26.s, z21.b, z2.b[1]\n"
+ "udot z27.s, z21.b, z3.b[1]\n"
+ "udot z28.s, z21.b, z4.b[1]\n"
+ "udot z29.s, z21.b, z5.b[1]\n"
+ "udot z30.s, z21.b, z6.b[1]\n"
+ "udot z31.s, z21.b, z7.b[1]\n"
+ "b.ne 4b\n"
+ "3:\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "mov z24.s, #0\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "mov z25.s, #0\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "mov z26.s, #0\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "ld1b z21.b, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #6\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "mov z27.s, #0\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1]\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "mov z28.s, #0\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2]\n"
+ "udot z24.s, z16.b, z0.b[0]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "mov z29.s, #0\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3]\n"
+ "udot z25.s, z16.b, z1.b[0]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4]\n"
+ "udot z26.s, z16.b, z2.b[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.s, #0\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5]\n"
+ "udot z27.s, z16.b, z3.b[0]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6]\n"
+ "udot z28.s, z16.b, z4.b[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.s, #0\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7]\n"
+ "udot z29.s, z16.b, z5.b[0]\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "udot z30.s, z16.b, z6.b[0]\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "udot z31.s, z16.b, z7.b[0]\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "udot z24.s, z17.b, z0.b[1]\n"
+ "udot z25.s, z17.b, z1.b[1]\n"
+ "udot z26.s, z17.b, z2.b[1]\n"
+ "udot z27.s, z17.b, z3.b[1]\n"
+ "udot z28.s, z17.b, z4.b[1]\n"
+ "udot z29.s, z17.b, z5.b[1]\n"
+ "udot z30.s, z17.b, z6.b[1]\n"
+ "udot z31.s, z17.b, z7.b[1]\n"
+ "udot z24.s, z18.b, z0.b[2]\n"
+ "udot z25.s, z18.b, z1.b[2]\n"
+ "udot z26.s, z18.b, z2.b[2]\n"
+ "udot z27.s, z18.b, z3.b[2]\n"
+ "udot z28.s, z18.b, z4.b[2]\n"
+ "udot z29.s, z18.b, z5.b[2]\n"
+ "udot z30.s, z18.b, z6.b[2]\n"
+ "udot z31.s, z18.b, z7.b[2]\n"
+ "udot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x10]\n"
+ "udot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1, #0x10]\n"
+ "udot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2, #0x10]\n"
+ "udot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3, #0x10]\n"
+ "udot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4, #0x10]\n"
+ "udot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5, #0x10]\n"
+ "udot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6, #0x10]\n"
+ "udot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7, #0x10]\n"
+ "udot z24.s, z20.b, z0.b[0]\n"
+ "udot z25.s, z20.b, z1.b[0]\n"
+ "udot z26.s, z20.b, z2.b[0]\n"
+ "udot z27.s, z20.b, z3.b[0]\n"
+ "udot z28.s, z20.b, z4.b[0]\n"
+ "udot z29.s, z20.b, z5.b[0]\n"
+ "udot z30.s, z20.b, z6.b[0]\n"
+ "udot z31.s, z20.b, z7.b[0]\n"
+ "udot z24.s, z21.b, z0.b[1]\n"
+ "udot z25.s, z21.b, z1.b[1]\n"
+ "udot z26.s, z21.b, z2.b[1]\n"
+ "udot z27.s, z21.b, z3.b[1]\n"
+ "udot z28.s, z21.b, z4.b[1]\n"
+ "udot z29.s, z21.b, z5.b[1]\n"
+ "udot z30.s, z21.b, z6.b[1]\n"
+ "udot z31.s, z21.b, z7.b[1]\n"
+ "b 5f\n"
+ "2:\n"
+ "mov z24.s, #0\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
+ "mov z25.s, #0\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1]\n"
+ "mov z26.s, #0\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2]\n"
+ "mov z27.s, #0\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3]\n"
+ "mov z28.s, #0\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4]\n"
+ "mov z29.s, #0\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5]\n"
+ "mov z30.s, #0\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6]\n"
+ "mov z31.s, #0\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7]\n"
+ "udot z24.s, z16.b, z0.b[0]\n"
+ "udot z25.s, z16.b, z1.b[0]\n"
+ "udot z26.s, z16.b, z2.b[0]\n"
+ "udot z27.s, z16.b, z3.b[0]\n"
+ "udot z28.s, z16.b, z4.b[0]\n"
+ "udot z29.s, z16.b, z5.b[0]\n"
+ "udot z30.s, z16.b, z6.b[0]\n"
+ "udot z31.s, z16.b, z7.b[0]\n"
+ "udot z24.s, z17.b, z0.b[1]\n"
+ "udot z25.s, z17.b, z1.b[1]\n"
+ "udot z26.s, z17.b, z2.b[1]\n"
+ "udot z27.s, z17.b, z3.b[1]\n"
+ "udot z28.s, z17.b, z4.b[1]\n"
+ "udot z29.s, z17.b, z5.b[1]\n"
+ "udot z30.s, z17.b, z6.b[1]\n"
+ "udot z31.s, z17.b, z7.b[1]\n"
+ "udot z24.s, z18.b, z0.b[2]\n"
+ "udot z25.s, z18.b, z1.b[2]\n"
+ "udot z26.s, z18.b, z2.b[2]\n"
+ "udot z27.s, z18.b, z3.b[2]\n"
+ "udot z28.s, z18.b, z4.b[2]\n"
+ "udot z29.s, z18.b, z5.b[2]\n"
+ "udot z30.s, z18.b, z6.b[2]\n"
+ "udot z31.s, z18.b, z7.b[2]\n"
+ "udot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x10]\n"
+ "udot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1, #0x10]\n"
+ "udot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2, #0x10]\n"
+ "udot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3, #0x10]\n"
+ "udot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4, #0x10]\n"
+ "udot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5, #0x10]\n"
+ "udot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6, #0x10]\n"
+ "udot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7, #0x10]\n"
+ "udot z24.s, z20.b, z0.b[0]\n"
+ "udot z25.s, z20.b, z1.b[0]\n"
+ "udot z26.s, z20.b, z2.b[0]\n"
+ "udot z27.s, z20.b, z3.b[0]\n"
+ "udot z28.s, z20.b, z4.b[0]\n"
+ "udot z29.s, z20.b, z5.b[0]\n"
+ "udot z30.s, z20.b, z6.b[0]\n"
+ "udot z31.s, z20.b, z7.b[0]\n"
+ "udot z24.s, z21.b, z0.b[1]\n"
+ "udot z25.s, z21.b, z1.b[1]\n"
+ "udot z26.s, z21.b, z2.b[1]\n"
+ "udot z27.s, z21.b, z3.b[1]\n"
+ "udot z28.s, z21.b, z4.b[1]\n"
+ "udot z29.s, z21.b, z5.b[1]\n"
+ "udot z30.s, z21.b, z6.b[1]\n"
+ "udot z31.s, z21.b, z7.b[1]\n"
+ "5:\n"
+ "st1w z24.s, p0, [%[c_ptr0]]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "st1w z25.s, p0, [c_ptr1]\n"
+ "st1w z26.s, p0, [c_ptr2]\n"
+ "st1w z27.s, p0, [c_ptr3]\n"
+ "st1w z28.s, p0, [c_ptr4]\n"
+ "st1w z29.s, p0, [c_ptr5]\n"
+ "st1w z30.s, p0, [c_ptr6]\n"
+ "st1w z31.s, p0, [c_ptr7]\n"
+ ".unreq a_ptr1\n"
+ ".unreq a_ptr2\n"
+ ".unreq a_ptr3\n"
+ ".unreq a_ptr4\n"
+ ".unreq a_ptr5\n"
+ ".unreq a_ptr6\n"
+ ".unreq a_ptr7\n"
+ ".unreq c_ptr1\n"
+ ".unreq c_ptr2\n"
+ ".unreq c_ptr3\n"
+ ".unreq c_ptr4\n"
+ ".unreq c_ptr5\n"
+ ".unreq c_ptr6\n"
+ ".unreq c_ptr7\n"
+ : [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [oob_rows] "+r" (oob_rows), [temp] "+r" (temp), [odds] "+r" (odds)
+ : [lda] "r" (ldab), [ldc] "r" (ldcb), [odd_depth] "r" (odd_depth), [last_width] "r" (last_width)
+ : "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "cc", "memory"
+ );
+ break;
+ case 7:
+ __asm __volatile (
+ "a_ptr1 .req X0\n"
+ "a_ptr2 .req X1\n"
+ "a_ptr3 .req X2\n"
+ "a_ptr4 .req X3\n"
+ "a_ptr5 .req X4\n"
+ "a_ptr6 .req X5\n"
+ "a_ptr7 .req X6\n"
+ "c_ptr1 .req X7\n"
+ "c_ptr2 .req X8\n"
+ "c_ptr3 .req X9\n"
+ "c_ptr4 .req X10\n"
+ "c_ptr5 .req X11\n"
+ "c_ptr6 .req X12\n"
+ "c_ptr7 .req X13\n"
+ "add a_ptr1, %[a_ptr0], %[lda]\n"
+ "add c_ptr1, %[c_ptr0], %[ldc]\n"
+ "add a_ptr2, a_ptr1, %[lda]\n"
+ "add c_ptr2, c_ptr1, %[ldc]\n"
+ "add a_ptr3, a_ptr2, %[lda]\n"
+ "add c_ptr3, c_ptr2, %[ldc]\n"
+ "add a_ptr4, a_ptr3, %[lda]\n"
+ "add c_ptr4, c_ptr3, %[ldc]\n"
+ "add a_ptr5, a_ptr4, %[lda]\n"
+ "add c_ptr5, c_ptr4, %[ldc]\n"
+ "add a_ptr6, a_ptr5, %[lda]\n"
+ "add c_ptr6, c_ptr5, %[ldc]\n"
+ "add a_ptr7, a_ptr6, %[lda]\n"
+ "add c_ptr7, c_ptr6, %[ldc]\n"
+ "cbz %[oob_rows], 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr7, %[c_ptr0], #0x0\n"
+ "add a_ptr7, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr6, %[c_ptr0], #0x0\n"
+ "add a_ptr6, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr5, %[c_ptr0], #0x0\n"
+ "add a_ptr5, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr4, %[c_ptr0], #0x0\n"
+ "add a_ptr4, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr3, %[c_ptr0], #0x0\n"
+ "add a_ptr3, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr2, %[c_ptr0], #0x0\n"
+ "add a_ptr2, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr1, %[c_ptr0], #0x0\n"
+ "add a_ptr1, %[a_ptr0], #0x0\n"
+ "1:\n"
+ "ptrue p7.b\n"
+ "whilelt p6.b, %[temp], %[odd_depth]\n"
+ "whilelt p0.s, %[temp], %[last_width]\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "ld1b z21.b, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "ld1b z22.b, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #7\n"
+ "cbz %[loops], 2f\n"
+ "mov z24.s, #0\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
+ "mov z25.s, #0\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1]\n"
+ "mov z26.s, #0\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2]\n"
+ "mov z27.s, #0\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3]\n"
+ "mov z28.s, #0\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4]\n"
+ "mov z29.s, #0\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5]\n"
+ "mov z30.s, #0\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6]\n"
+ "mov z31.s, #0\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7]\n"
+ "udot z24.s, z16.b, z0.b[0]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "udot z25.s, z16.b, z1.b[0]\n"
+ "udot z26.s, z16.b, z2.b[0]\n"
+ "udot z27.s, z16.b, z3.b[0]\n"
+ "udot z28.s, z16.b, z4.b[0]\n"
+ "udot z29.s, z16.b, z5.b[0]\n"
+ "udot z30.s, z16.b, z6.b[0]\n"
+ "udot z31.s, z16.b, z7.b[0]\n"
+ "udot z24.s, z17.b, z0.b[1]\n"
+ "udot z25.s, z17.b, z1.b[1]\n"
+ "udot z26.s, z17.b, z2.b[1]\n"
+ "udot z27.s, z17.b, z3.b[1]\n"
+ "udot z28.s, z17.b, z4.b[1]\n"
+ "udot z29.s, z17.b, z5.b[1]\n"
+ "udot z30.s, z17.b, z6.b[1]\n"
+ "udot z31.s, z17.b, z7.b[1]\n"
+ "udot z24.s, z18.b, z0.b[2]\n"
+ "udot z25.s, z18.b, z1.b[2]\n"
+ "udot z26.s, z18.b, z2.b[2]\n"
+ "udot z27.s, z18.b, z3.b[2]\n"
+ "udot z28.s, z18.b, z4.b[2]\n"
+ "udot z29.s, z18.b, z5.b[2]\n"
+ "udot z30.s, z18.b, z6.b[2]\n"
+ "udot z31.s, z18.b, z7.b[2]\n"
+ "udot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x10]\n"
+ "udot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1, #0x10]\n"
+ "udot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2, #0x10]\n"
+ "udot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3, #0x10]\n"
+ "udot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4, #0x10]\n"
+ "udot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5, #0x10]\n"
+ "udot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6, #0x10]\n"
+ "udot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7, #0x10]\n"
+ "udot z24.s, z20.b, z0.b[0]\n"
+ "udot z25.s, z20.b, z1.b[0]\n"
+ "udot z26.s, z20.b, z2.b[0]\n"
+ "udot z27.s, z20.b, z3.b[0]\n"
+ "udot z28.s, z20.b, z4.b[0]\n"
+ "udot z29.s, z20.b, z5.b[0]\n"
+ "udot z30.s, z20.b, z6.b[0]\n"
+ "udot z31.s, z20.b, z7.b[0]\n"
+ "udot z24.s, z21.b, z0.b[1]\n"
+ "udot z25.s, z21.b, z1.b[1]\n"
+ "udot z26.s, z21.b, z2.b[1]\n"
+ "udot z27.s, z21.b, z3.b[1]\n"
+ "udot z28.s, z21.b, z4.b[1]\n"
+ "udot z29.s, z21.b, z5.b[1]\n"
+ "udot z30.s, z21.b, z6.b[1]\n"
+ "udot z31.s, z21.b, z7.b[1]\n"
+ "udot z24.s, z22.b, z0.b[2]\n"
+ "udot z25.s, z22.b, z1.b[2]\n"
+ "udot z26.s, z22.b, z2.b[2]\n"
+ "udot z27.s, z22.b, z3.b[2]\n"
+ "udot z28.s, z22.b, z4.b[2]\n"
+ "udot z29.s, z22.b, z5.b[2]\n"
+ "udot z30.s, z22.b, z6.b[2]\n"
+ "udot z31.s, z22.b, z7.b[2]\n"
+ "b.eq 3f\n"
+ "4:\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "mov z24.s, #0\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "mov z25.s, #0\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "mov z26.s, #0\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "ld1b z21.b, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "mov z27.s, #0\n"
+ "ld1b z22.b, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #7\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "mov z28.s, #0\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1]\n"
+ "udot z24.s, z16.b, z0.b[0]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "mov z29.s, #0\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3]\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "udot z25.s, z16.b, z1.b[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.s, #0\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4]\n"
+ "udot z26.s, z16.b, z2.b[0]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5]\n"
+ "udot z27.s, z16.b, z3.b[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.s, #0\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6]\n"
+ "udot z28.s, z16.b, z4.b[0]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7]\n"
+ "udot z29.s, z16.b, z5.b[0]\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "udot z30.s, z16.b, z6.b[0]\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "udot z31.s, z16.b, z7.b[0]\n"
+ "udot z24.s, z17.b, z0.b[1]\n"
+ "udot z25.s, z17.b, z1.b[1]\n"
+ "udot z26.s, z17.b, z2.b[1]\n"
+ "udot z27.s, z17.b, z3.b[1]\n"
+ "udot z28.s, z17.b, z4.b[1]\n"
+ "udot z29.s, z17.b, z5.b[1]\n"
+ "udot z30.s, z17.b, z6.b[1]\n"
+ "udot z31.s, z17.b, z7.b[1]\n"
+ "udot z24.s, z18.b, z0.b[2]\n"
+ "udot z25.s, z18.b, z1.b[2]\n"
+ "udot z26.s, z18.b, z2.b[2]\n"
+ "udot z27.s, z18.b, z3.b[2]\n"
+ "udot z28.s, z18.b, z4.b[2]\n"
+ "udot z29.s, z18.b, z5.b[2]\n"
+ "udot z30.s, z18.b, z6.b[2]\n"
+ "udot z31.s, z18.b, z7.b[2]\n"
+ "udot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x10]\n"
+ "udot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1, #0x10]\n"
+ "udot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2, #0x10]\n"
+ "udot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3, #0x10]\n"
+ "udot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4, #0x10]\n"
+ "udot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5, #0x10]\n"
+ "udot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6, #0x10]\n"
+ "udot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7, #0x10]\n"
+ "udot z24.s, z20.b, z0.b[0]\n"
+ "udot z25.s, z20.b, z1.b[0]\n"
+ "udot z26.s, z20.b, z2.b[0]\n"
+ "udot z27.s, z20.b, z3.b[0]\n"
+ "udot z28.s, z20.b, z4.b[0]\n"
+ "udot z29.s, z20.b, z5.b[0]\n"
+ "udot z30.s, z20.b, z6.b[0]\n"
+ "udot z31.s, z20.b, z7.b[0]\n"
+ "udot z24.s, z21.b, z0.b[1]\n"
+ "udot z25.s, z21.b, z1.b[1]\n"
+ "udot z26.s, z21.b, z2.b[1]\n"
+ "udot z27.s, z21.b, z3.b[1]\n"
+ "udot z28.s, z21.b, z4.b[1]\n"
+ "udot z29.s, z21.b, z5.b[1]\n"
+ "udot z30.s, z21.b, z6.b[1]\n"
+ "udot z31.s, z21.b, z7.b[1]\n"
+ "udot z24.s, z22.b, z0.b[2]\n"
+ "udot z25.s, z22.b, z1.b[2]\n"
+ "udot z26.s, z22.b, z2.b[2]\n"
+ "udot z27.s, z22.b, z3.b[2]\n"
+ "udot z28.s, z22.b, z4.b[2]\n"
+ "udot z29.s, z22.b, z5.b[2]\n"
+ "udot z30.s, z22.b, z6.b[2]\n"
+ "udot z31.s, z22.b, z7.b[2]\n"
+ "b.ne 4b\n"
+ "3:\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "mov z24.s, #0\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "mov z25.s, #0\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "mov z26.s, #0\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "ld1b z21.b, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "mov z27.s, #0\n"
+ "ld1b z22.b, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #7\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "mov z28.s, #0\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1]\n"
+ "udot z24.s, z16.b, z0.b[0]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "mov z29.s, #0\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3]\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "udot z25.s, z16.b, z1.b[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.s, #0\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4]\n"
+ "udot z26.s, z16.b, z2.b[0]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5]\n"
+ "udot z27.s, z16.b, z3.b[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.s, #0\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6]\n"
+ "udot z28.s, z16.b, z4.b[0]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7]\n"
+ "udot z29.s, z16.b, z5.b[0]\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "udot z30.s, z16.b, z6.b[0]\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "udot z31.s, z16.b, z7.b[0]\n"
+ "udot z24.s, z17.b, z0.b[1]\n"
+ "udot z25.s, z17.b, z1.b[1]\n"
+ "udot z26.s, z17.b, z2.b[1]\n"
+ "udot z27.s, z17.b, z3.b[1]\n"
+ "udot z28.s, z17.b, z4.b[1]\n"
+ "udot z29.s, z17.b, z5.b[1]\n"
+ "udot z30.s, z17.b, z6.b[1]\n"
+ "udot z31.s, z17.b, z7.b[1]\n"
+ "udot z24.s, z18.b, z0.b[2]\n"
+ "udot z25.s, z18.b, z1.b[2]\n"
+ "udot z26.s, z18.b, z2.b[2]\n"
+ "udot z27.s, z18.b, z3.b[2]\n"
+ "udot z28.s, z18.b, z4.b[2]\n"
+ "udot z29.s, z18.b, z5.b[2]\n"
+ "udot z30.s, z18.b, z6.b[2]\n"
+ "udot z31.s, z18.b, z7.b[2]\n"
+ "udot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x10]\n"
+ "udot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1, #0x10]\n"
+ "udot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2, #0x10]\n"
+ "udot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3, #0x10]\n"
+ "udot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4, #0x10]\n"
+ "udot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5, #0x10]\n"
+ "udot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6, #0x10]\n"
+ "udot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7, #0x10]\n"
+ "udot z24.s, z20.b, z0.b[0]\n"
+ "udot z25.s, z20.b, z1.b[0]\n"
+ "udot z26.s, z20.b, z2.b[0]\n"
+ "udot z27.s, z20.b, z3.b[0]\n"
+ "udot z28.s, z20.b, z4.b[0]\n"
+ "udot z29.s, z20.b, z5.b[0]\n"
+ "udot z30.s, z20.b, z6.b[0]\n"
+ "udot z31.s, z20.b, z7.b[0]\n"
+ "udot z24.s, z21.b, z0.b[1]\n"
+ "udot z25.s, z21.b, z1.b[1]\n"
+ "udot z26.s, z21.b, z2.b[1]\n"
+ "udot z27.s, z21.b, z3.b[1]\n"
+ "udot z28.s, z21.b, z4.b[1]\n"
+ "udot z29.s, z21.b, z5.b[1]\n"
+ "udot z30.s, z21.b, z6.b[1]\n"
+ "udot z31.s, z21.b, z7.b[1]\n"
+ "udot z24.s, z22.b, z0.b[2]\n"
+ "udot z25.s, z22.b, z1.b[2]\n"
+ "udot z26.s, z22.b, z2.b[2]\n"
+ "udot z27.s, z22.b, z3.b[2]\n"
+ "udot z28.s, z22.b, z4.b[2]\n"
+ "udot z29.s, z22.b, z5.b[2]\n"
+ "udot z30.s, z22.b, z6.b[2]\n"
+ "udot z31.s, z22.b, z7.b[2]\n"
+ "b 5f\n"
+ "2:\n"
+ "mov z24.s, #0\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
+ "mov z25.s, #0\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1]\n"
+ "mov z26.s, #0\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2]\n"
+ "mov z27.s, #0\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3]\n"
+ "mov z28.s, #0\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4]\n"
+ "mov z29.s, #0\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5]\n"
+ "mov z30.s, #0\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6]\n"
+ "mov z31.s, #0\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7]\n"
+ "udot z24.s, z16.b, z0.b[0]\n"
+ "udot z25.s, z16.b, z1.b[0]\n"
+ "udot z26.s, z16.b, z2.b[0]\n"
+ "udot z27.s, z16.b, z3.b[0]\n"
+ "udot z28.s, z16.b, z4.b[0]\n"
+ "udot z29.s, z16.b, z5.b[0]\n"
+ "udot z30.s, z16.b, z6.b[0]\n"
+ "udot z31.s, z16.b, z7.b[0]\n"
+ "udot z24.s, z17.b, z0.b[1]\n"
+ "udot z25.s, z17.b, z1.b[1]\n"
+ "udot z26.s, z17.b, z2.b[1]\n"
+ "udot z27.s, z17.b, z3.b[1]\n"
+ "udot z28.s, z17.b, z4.b[1]\n"
+ "udot z29.s, z17.b, z5.b[1]\n"
+ "udot z30.s, z17.b, z6.b[1]\n"
+ "udot z31.s, z17.b, z7.b[1]\n"
+ "udot z24.s, z18.b, z0.b[2]\n"
+ "udot z25.s, z18.b, z1.b[2]\n"
+ "udot z26.s, z18.b, z2.b[2]\n"
+ "udot z27.s, z18.b, z3.b[2]\n"
+ "udot z28.s, z18.b, z4.b[2]\n"
+ "udot z29.s, z18.b, z5.b[2]\n"
+ "udot z30.s, z18.b, z6.b[2]\n"
+ "udot z31.s, z18.b, z7.b[2]\n"
+ "udot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x10]\n"
+ "udot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1, #0x10]\n"
+ "udot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2, #0x10]\n"
+ "udot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3, #0x10]\n"
+ "udot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4, #0x10]\n"
+ "udot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5, #0x10]\n"
+ "udot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6, #0x10]\n"
+ "udot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7, #0x10]\n"
+ "udot z24.s, z20.b, z0.b[0]\n"
+ "udot z25.s, z20.b, z1.b[0]\n"
+ "udot z26.s, z20.b, z2.b[0]\n"
+ "udot z27.s, z20.b, z3.b[0]\n"
+ "udot z28.s, z20.b, z4.b[0]\n"
+ "udot z29.s, z20.b, z5.b[0]\n"
+ "udot z30.s, z20.b, z6.b[0]\n"
+ "udot z31.s, z20.b, z7.b[0]\n"
+ "udot z24.s, z21.b, z0.b[1]\n"
+ "udot z25.s, z21.b, z1.b[1]\n"
+ "udot z26.s, z21.b, z2.b[1]\n"
+ "udot z27.s, z21.b, z3.b[1]\n"
+ "udot z28.s, z21.b, z4.b[1]\n"
+ "udot z29.s, z21.b, z5.b[1]\n"
+ "udot z30.s, z21.b, z6.b[1]\n"
+ "udot z31.s, z21.b, z7.b[1]\n"
+ "udot z24.s, z22.b, z0.b[2]\n"
+ "udot z25.s, z22.b, z1.b[2]\n"
+ "udot z26.s, z22.b, z2.b[2]\n"
+ "udot z27.s, z22.b, z3.b[2]\n"
+ "udot z28.s, z22.b, z4.b[2]\n"
+ "udot z29.s, z22.b, z5.b[2]\n"
+ "udot z30.s, z22.b, z6.b[2]\n"
+ "udot z31.s, z22.b, z7.b[2]\n"
+ "5:\n"
+ "st1w z24.s, p0, [%[c_ptr0]]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "st1w z25.s, p0, [c_ptr1]\n"
+ "st1w z26.s, p0, [c_ptr2]\n"
+ "st1w z27.s, p0, [c_ptr3]\n"
+ "st1w z28.s, p0, [c_ptr4]\n"
+ "st1w z29.s, p0, [c_ptr5]\n"
+ "st1w z30.s, p0, [c_ptr6]\n"
+ "st1w z31.s, p0, [c_ptr7]\n"
+ ".unreq a_ptr1\n"
+ ".unreq a_ptr2\n"
+ ".unreq a_ptr3\n"
+ ".unreq a_ptr4\n"
+ ".unreq a_ptr5\n"
+ ".unreq a_ptr6\n"
+ ".unreq a_ptr7\n"
+ ".unreq c_ptr1\n"
+ ".unreq c_ptr2\n"
+ ".unreq c_ptr3\n"
+ ".unreq c_ptr4\n"
+ ".unreq c_ptr5\n"
+ ".unreq c_ptr6\n"
+ ".unreq c_ptr7\n"
+ : [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [oob_rows] "+r" (oob_rows), [temp] "+r" (temp), [odds] "+r" (odds)
+ : [lda] "r" (ldab), [ldc] "r" (ldcb), [odd_depth] "r" (odd_depth), [last_width] "r" (last_width)
+ : "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "cc", "memory"
+ );
+ break;
+ case 8:
+ __asm __volatile (
+ "a_ptr1 .req X0\n"
+ "a_ptr2 .req X1\n"
+ "a_ptr3 .req X2\n"
+ "a_ptr4 .req X3\n"
+ "a_ptr5 .req X4\n"
+ "a_ptr6 .req X5\n"
+ "a_ptr7 .req X6\n"
+ "c_ptr1 .req X7\n"
+ "c_ptr2 .req X8\n"
+ "c_ptr3 .req X9\n"
+ "c_ptr4 .req X10\n"
+ "c_ptr5 .req X11\n"
+ "c_ptr6 .req X12\n"
+ "c_ptr7 .req X13\n"
+ "add a_ptr1, %[a_ptr0], %[lda]\n"
+ "add c_ptr1, %[c_ptr0], %[ldc]\n"
+ "add a_ptr2, a_ptr1, %[lda]\n"
+ "add c_ptr2, c_ptr1, %[ldc]\n"
+ "add a_ptr3, a_ptr2, %[lda]\n"
+ "add c_ptr3, c_ptr2, %[ldc]\n"
+ "add a_ptr4, a_ptr3, %[lda]\n"
+ "add c_ptr4, c_ptr3, %[ldc]\n"
+ "add a_ptr5, a_ptr4, %[lda]\n"
+ "add c_ptr5, c_ptr4, %[ldc]\n"
+ "add a_ptr6, a_ptr5, %[lda]\n"
+ "add c_ptr6, c_ptr5, %[ldc]\n"
+ "add a_ptr7, a_ptr6, %[lda]\n"
+ "add c_ptr7, c_ptr6, %[ldc]\n"
+ "cbz %[oob_rows], 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr7, %[c_ptr0], #0x0\n"
+ "add a_ptr7, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr6, %[c_ptr0], #0x0\n"
+ "add a_ptr6, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr5, %[c_ptr0], #0x0\n"
+ "add a_ptr5, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr4, %[c_ptr0], #0x0\n"
+ "add a_ptr4, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr3, %[c_ptr0], #0x0\n"
+ "add a_ptr3, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr2, %[c_ptr0], #0x0\n"
+ "add a_ptr2, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr1, %[c_ptr0], #0x0\n"
+ "add a_ptr1, %[a_ptr0], #0x0\n"
+ "1:\n"
+ "ptrue p7.b\n"
+ "whilelt p6.b, %[temp], %[odd_depth]\n"
+ "whilelt p0.s, %[temp], %[last_width]\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "ld1b z21.b, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "ld1b z22.b, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "ld1b z23.b, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "cbz %[loops], 2f\n"
+ "mov z24.s, #0\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
+ "mov z25.s, #0\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1]\n"
+ "mov z26.s, #0\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2]\n"
+ "mov z27.s, #0\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3]\n"
+ "mov z28.s, #0\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4]\n"
+ "mov z29.s, #0\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5]\n"
+ "mov z30.s, #0\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6]\n"
+ "mov z31.s, #0\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7]\n"
+ "udot z24.s, z16.b, z0.b[0]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "udot z25.s, z16.b, z1.b[0]\n"
+ "udot z26.s, z16.b, z2.b[0]\n"
+ "udot z27.s, z16.b, z3.b[0]\n"
+ "udot z28.s, z16.b, z4.b[0]\n"
+ "udot z29.s, z16.b, z5.b[0]\n"
+ "udot z30.s, z16.b, z6.b[0]\n"
+ "udot z31.s, z16.b, z7.b[0]\n"
+ "udot z24.s, z17.b, z0.b[1]\n"
+ "udot z25.s, z17.b, z1.b[1]\n"
+ "udot z26.s, z17.b, z2.b[1]\n"
+ "udot z27.s, z17.b, z3.b[1]\n"
+ "udot z28.s, z17.b, z4.b[1]\n"
+ "udot z29.s, z17.b, z5.b[1]\n"
+ "udot z30.s, z17.b, z6.b[1]\n"
+ "udot z31.s, z17.b, z7.b[1]\n"
+ "udot z24.s, z18.b, z0.b[2]\n"
+ "udot z25.s, z18.b, z1.b[2]\n"
+ "udot z26.s, z18.b, z2.b[2]\n"
+ "udot z27.s, z18.b, z3.b[2]\n"
+ "udot z28.s, z18.b, z4.b[2]\n"
+ "udot z29.s, z18.b, z5.b[2]\n"
+ "udot z30.s, z18.b, z6.b[2]\n"
+ "udot z31.s, z18.b, z7.b[2]\n"
+ "udot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x10]\n"
+ "udot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1, #0x10]\n"
+ "udot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2, #0x10]\n"
+ "udot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3, #0x10]\n"
+ "udot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4, #0x10]\n"
+ "udot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5, #0x10]\n"
+ "udot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6, #0x10]\n"
+ "udot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7, #0x10]\n"
+ "udot z24.s, z20.b, z0.b[0]\n"
+ "udot z25.s, z20.b, z1.b[0]\n"
+ "udot z26.s, z20.b, z2.b[0]\n"
+ "udot z27.s, z20.b, z3.b[0]\n"
+ "udot z28.s, z20.b, z4.b[0]\n"
+ "udot z29.s, z20.b, z5.b[0]\n"
+ "udot z30.s, z20.b, z6.b[0]\n"
+ "udot z31.s, z20.b, z7.b[0]\n"
+ "udot z24.s, z21.b, z0.b[1]\n"
+ "udot z25.s, z21.b, z1.b[1]\n"
+ "udot z26.s, z21.b, z2.b[1]\n"
+ "udot z27.s, z21.b, z3.b[1]\n"
+ "udot z28.s, z21.b, z4.b[1]\n"
+ "udot z29.s, z21.b, z5.b[1]\n"
+ "udot z30.s, z21.b, z6.b[1]\n"
+ "udot z31.s, z21.b, z7.b[1]\n"
+ "udot z24.s, z22.b, z0.b[2]\n"
+ "udot z25.s, z22.b, z1.b[2]\n"
+ "udot z26.s, z22.b, z2.b[2]\n"
+ "udot z27.s, z22.b, z3.b[2]\n"
+ "udot z28.s, z22.b, z4.b[2]\n"
+ "udot z29.s, z22.b, z5.b[2]\n"
+ "udot z30.s, z22.b, z6.b[2]\n"
+ "udot z31.s, z22.b, z7.b[2]\n"
+ "udot z24.s, z23.b, z0.b[3]\n"
+ "udot z25.s, z23.b, z1.b[3]\n"
+ "udot z26.s, z23.b, z2.b[3]\n"
+ "udot z27.s, z23.b, z3.b[3]\n"
+ "udot z28.s, z23.b, z4.b[3]\n"
+ "udot z29.s, z23.b, z5.b[3]\n"
+ "udot z30.s, z23.b, z6.b[3]\n"
+ "udot z31.s, z23.b, z7.b[3]\n"
+ "b.eq 3f\n"
+ "4:\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "mov z24.s, #0\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "mov z25.s, #0\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "mov z26.s, #0\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "ld1b z21.b, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "mov z27.s, #0\n"
+ "ld1b z22.b, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "ld1b z23.b, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "mov z28.s, #0\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "mov z29.s, #0\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2]\n"
+ "udot z24.s, z16.b, z0.b[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.s, #0\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3]\n"
+ "udot z25.s, z16.b, z1.b[0]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4]\n"
+ "udot z26.s, z16.b, z2.b[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.s, #0\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5]\n"
+ "udot z27.s, z16.b, z3.b[0]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6]\n"
+ "udot z28.s, z16.b, z4.b[0]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7]\n"
+ "udot z24.s, z17.b, z0.b[1]\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "udot z29.s, z16.b, z5.b[0]\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "udot z30.s, z16.b, z6.b[0]\n"
+ "udot z31.s, z16.b, z7.b[0]\n"
+ "udot z25.s, z17.b, z1.b[1]\n"
+ "udot z26.s, z17.b, z2.b[1]\n"
+ "udot z27.s, z17.b, z3.b[1]\n"
+ "udot z28.s, z17.b, z4.b[1]\n"
+ "udot z29.s, z17.b, z5.b[1]\n"
+ "udot z30.s, z17.b, z6.b[1]\n"
+ "udot z31.s, z17.b, z7.b[1]\n"
+ "udot z24.s, z18.b, z0.b[2]\n"
+ "udot z25.s, z18.b, z1.b[2]\n"
+ "udot z26.s, z18.b, z2.b[2]\n"
+ "udot z27.s, z18.b, z3.b[2]\n"
+ "udot z28.s, z18.b, z4.b[2]\n"
+ "udot z29.s, z18.b, z5.b[2]\n"
+ "udot z30.s, z18.b, z6.b[2]\n"
+ "udot z31.s, z18.b, z7.b[2]\n"
+ "udot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x10]\n"
+ "udot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1, #0x10]\n"
+ "udot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2, #0x10]\n"
+ "udot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3, #0x10]\n"
+ "udot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4, #0x10]\n"
+ "udot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5, #0x10]\n"
+ "udot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6, #0x10]\n"
+ "udot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7, #0x10]\n"
+ "udot z24.s, z20.b, z0.b[0]\n"
+ "udot z25.s, z20.b, z1.b[0]\n"
+ "udot z26.s, z20.b, z2.b[0]\n"
+ "udot z27.s, z20.b, z3.b[0]\n"
+ "udot z28.s, z20.b, z4.b[0]\n"
+ "udot z29.s, z20.b, z5.b[0]\n"
+ "udot z30.s, z20.b, z6.b[0]\n"
+ "udot z31.s, z20.b, z7.b[0]\n"
+ "udot z24.s, z21.b, z0.b[1]\n"
+ "udot z25.s, z21.b, z1.b[1]\n"
+ "udot z26.s, z21.b, z2.b[1]\n"
+ "udot z27.s, z21.b, z3.b[1]\n"
+ "udot z28.s, z21.b, z4.b[1]\n"
+ "udot z29.s, z21.b, z5.b[1]\n"
+ "udot z30.s, z21.b, z6.b[1]\n"
+ "udot z31.s, z21.b, z7.b[1]\n"
+ "udot z24.s, z22.b, z0.b[2]\n"
+ "udot z25.s, z22.b, z1.b[2]\n"
+ "udot z26.s, z22.b, z2.b[2]\n"
+ "udot z27.s, z22.b, z3.b[2]\n"
+ "udot z28.s, z22.b, z4.b[2]\n"
+ "udot z29.s, z22.b, z5.b[2]\n"
+ "udot z30.s, z22.b, z6.b[2]\n"
+ "udot z31.s, z22.b, z7.b[2]\n"
+ "udot z24.s, z23.b, z0.b[3]\n"
+ "udot z25.s, z23.b, z1.b[3]\n"
+ "udot z26.s, z23.b, z2.b[3]\n"
+ "udot z27.s, z23.b, z3.b[3]\n"
+ "udot z28.s, z23.b, z4.b[3]\n"
+ "udot z29.s, z23.b, z5.b[3]\n"
+ "udot z30.s, z23.b, z6.b[3]\n"
+ "udot z31.s, z23.b, z7.b[3]\n"
+ "b.ne 4b\n"
+ "3:\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "mov z24.s, #0\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "mov z25.s, #0\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "mov z26.s, #0\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "ld1b z21.b, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "mov z27.s, #0\n"
+ "ld1b z22.b, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "ld1b z23.b, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "mov z28.s, #0\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "mov z29.s, #0\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2]\n"
+ "udot z24.s, z16.b, z0.b[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.s, #0\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3]\n"
+ "udot z25.s, z16.b, z1.b[0]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4]\n"
+ "udot z26.s, z16.b, z2.b[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.s, #0\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5]\n"
+ "udot z27.s, z16.b, z3.b[0]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6]\n"
+ "udot z28.s, z16.b, z4.b[0]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7]\n"
+ "udot z24.s, z17.b, z0.b[1]\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "udot z29.s, z16.b, z5.b[0]\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "udot z30.s, z16.b, z6.b[0]\n"
+ "udot z31.s, z16.b, z7.b[0]\n"
+ "udot z25.s, z17.b, z1.b[1]\n"
+ "udot z26.s, z17.b, z2.b[1]\n"
+ "udot z27.s, z17.b, z3.b[1]\n"
+ "udot z28.s, z17.b, z4.b[1]\n"
+ "udot z29.s, z17.b, z5.b[1]\n"
+ "udot z30.s, z17.b, z6.b[1]\n"
+ "udot z31.s, z17.b, z7.b[1]\n"
+ "udot z24.s, z18.b, z0.b[2]\n"
+ "udot z25.s, z18.b, z1.b[2]\n"
+ "udot z26.s, z18.b, z2.b[2]\n"
+ "udot z27.s, z18.b, z3.b[2]\n"
+ "udot z28.s, z18.b, z4.b[2]\n"
+ "udot z29.s, z18.b, z5.b[2]\n"
+ "udot z30.s, z18.b, z6.b[2]\n"
+ "udot z31.s, z18.b, z7.b[2]\n"
+ "udot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x10]\n"
+ "udot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1, #0x10]\n"
+ "udot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2, #0x10]\n"
+ "udot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3, #0x10]\n"
+ "udot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4, #0x10]\n"
+ "udot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5, #0x10]\n"
+ "udot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6, #0x10]\n"
+ "udot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7, #0x10]\n"
+ "udot z24.s, z20.b, z0.b[0]\n"
+ "udot z25.s, z20.b, z1.b[0]\n"
+ "udot z26.s, z20.b, z2.b[0]\n"
+ "udot z27.s, z20.b, z3.b[0]\n"
+ "udot z28.s, z20.b, z4.b[0]\n"
+ "udot z29.s, z20.b, z5.b[0]\n"
+ "udot z30.s, z20.b, z6.b[0]\n"
+ "udot z31.s, z20.b, z7.b[0]\n"
+ "udot z24.s, z21.b, z0.b[1]\n"
+ "udot z25.s, z21.b, z1.b[1]\n"
+ "udot z26.s, z21.b, z2.b[1]\n"
+ "udot z27.s, z21.b, z3.b[1]\n"
+ "udot z28.s, z21.b, z4.b[1]\n"
+ "udot z29.s, z21.b, z5.b[1]\n"
+ "udot z30.s, z21.b, z6.b[1]\n"
+ "udot z31.s, z21.b, z7.b[1]\n"
+ "udot z24.s, z22.b, z0.b[2]\n"
+ "udot z25.s, z22.b, z1.b[2]\n"
+ "udot z26.s, z22.b, z2.b[2]\n"
+ "udot z27.s, z22.b, z3.b[2]\n"
+ "udot z28.s, z22.b, z4.b[2]\n"
+ "udot z29.s, z22.b, z5.b[2]\n"
+ "udot z30.s, z22.b, z6.b[2]\n"
+ "udot z31.s, z22.b, z7.b[2]\n"
+ "udot z24.s, z23.b, z0.b[3]\n"
+ "udot z25.s, z23.b, z1.b[3]\n"
+ "udot z26.s, z23.b, z2.b[3]\n"
+ "udot z27.s, z23.b, z3.b[3]\n"
+ "udot z28.s, z23.b, z4.b[3]\n"
+ "udot z29.s, z23.b, z5.b[3]\n"
+ "udot z30.s, z23.b, z6.b[3]\n"
+ "udot z31.s, z23.b, z7.b[3]\n"
+ "b 5f\n"
+ "2:\n"
+ "mov z24.s, #0\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
+ "mov z25.s, #0\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1]\n"
+ "mov z26.s, #0\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2]\n"
+ "mov z27.s, #0\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3]\n"
+ "mov z28.s, #0\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4]\n"
+ "mov z29.s, #0\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5]\n"
+ "mov z30.s, #0\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6]\n"
+ "mov z31.s, #0\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7]\n"
+ "udot z24.s, z16.b, z0.b[0]\n"
+ "udot z25.s, z16.b, z1.b[0]\n"
+ "udot z26.s, z16.b, z2.b[0]\n"
+ "udot z27.s, z16.b, z3.b[0]\n"
+ "udot z28.s, z16.b, z4.b[0]\n"
+ "udot z29.s, z16.b, z5.b[0]\n"
+ "udot z30.s, z16.b, z6.b[0]\n"
+ "udot z31.s, z16.b, z7.b[0]\n"
+ "udot z24.s, z17.b, z0.b[1]\n"
+ "udot z25.s, z17.b, z1.b[1]\n"
+ "udot z26.s, z17.b, z2.b[1]\n"
+ "udot z27.s, z17.b, z3.b[1]\n"
+ "udot z28.s, z17.b, z4.b[1]\n"
+ "udot z29.s, z17.b, z5.b[1]\n"
+ "udot z30.s, z17.b, z6.b[1]\n"
+ "udot z31.s, z17.b, z7.b[1]\n"
+ "udot z24.s, z18.b, z0.b[2]\n"
+ "udot z25.s, z18.b, z1.b[2]\n"
+ "udot z26.s, z18.b, z2.b[2]\n"
+ "udot z27.s, z18.b, z3.b[2]\n"
+ "udot z28.s, z18.b, z4.b[2]\n"
+ "udot z29.s, z18.b, z5.b[2]\n"
+ "udot z30.s, z18.b, z6.b[2]\n"
+ "udot z31.s, z18.b, z7.b[2]\n"
+ "udot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x10]\n"
+ "udot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1, #0x10]\n"
+ "udot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2, #0x10]\n"
+ "udot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3, #0x10]\n"
+ "udot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4, #0x10]\n"
+ "udot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5, #0x10]\n"
+ "udot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6, #0x10]\n"
+ "udot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7, #0x10]\n"
+ "udot z24.s, z20.b, z0.b[0]\n"
+ "udot z25.s, z20.b, z1.b[0]\n"
+ "udot z26.s, z20.b, z2.b[0]\n"
+ "udot z27.s, z20.b, z3.b[0]\n"
+ "udot z28.s, z20.b, z4.b[0]\n"
+ "udot z29.s, z20.b, z5.b[0]\n"
+ "udot z30.s, z20.b, z6.b[0]\n"
+ "udot z31.s, z20.b, z7.b[0]\n"
+ "udot z24.s, z21.b, z0.b[1]\n"
+ "udot z25.s, z21.b, z1.b[1]\n"
+ "udot z26.s, z21.b, z2.b[1]\n"
+ "udot z27.s, z21.b, z3.b[1]\n"
+ "udot z28.s, z21.b, z4.b[1]\n"
+ "udot z29.s, z21.b, z5.b[1]\n"
+ "udot z30.s, z21.b, z6.b[1]\n"
+ "udot z31.s, z21.b, z7.b[1]\n"
+ "udot z24.s, z22.b, z0.b[2]\n"
+ "udot z25.s, z22.b, z1.b[2]\n"
+ "udot z26.s, z22.b, z2.b[2]\n"
+ "udot z27.s, z22.b, z3.b[2]\n"
+ "udot z28.s, z22.b, z4.b[2]\n"
+ "udot z29.s, z22.b, z5.b[2]\n"
+ "udot z30.s, z22.b, z6.b[2]\n"
+ "udot z31.s, z22.b, z7.b[2]\n"
+ "udot z24.s, z23.b, z0.b[3]\n"
+ "udot z25.s, z23.b, z1.b[3]\n"
+ "udot z26.s, z23.b, z2.b[3]\n"
+ "udot z27.s, z23.b, z3.b[3]\n"
+ "udot z28.s, z23.b, z4.b[3]\n"
+ "udot z29.s, z23.b, z5.b[3]\n"
+ "udot z30.s, z23.b, z6.b[3]\n"
+ "udot z31.s, z23.b, z7.b[3]\n"
+ "5:\n"
+ "st1w z24.s, p0, [%[c_ptr0]]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "st1w z25.s, p0, [c_ptr1]\n"
+ "st1w z26.s, p0, [c_ptr2]\n"
+ "st1w z27.s, p0, [c_ptr3]\n"
+ "st1w z28.s, p0, [c_ptr4]\n"
+ "st1w z29.s, p0, [c_ptr5]\n"
+ "st1w z30.s, p0, [c_ptr6]\n"
+ "st1w z31.s, p0, [c_ptr7]\n"
+ ".unreq a_ptr1\n"
+ ".unreq a_ptr2\n"
+ ".unreq a_ptr3\n"
+ ".unreq a_ptr4\n"
+ ".unreq a_ptr5\n"
+ ".unreq a_ptr6\n"
+ ".unreq a_ptr7\n"
+ ".unreq c_ptr1\n"
+ ".unreq c_ptr2\n"
+ ".unreq c_ptr3\n"
+ ".unreq c_ptr4\n"
+ ".unreq c_ptr5\n"
+ ".unreq c_ptr6\n"
+ ".unreq c_ptr7\n"
+ : [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [oob_rows] "+r" (oob_rows), [temp] "+r" (temp), [odds] "+r" (odds)
+ : [lda] "r" (ldab), [ldc] "r" (ldcb), [odd_depth] "r" (odd_depth), [last_width] "r" (last_width)
+ : "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "cc", "memory"
+ );
+ break;
+ case 9:
+ __asm __volatile (
+ "a_ptr1 .req X0\n"
+ "a_ptr2 .req X1\n"
+ "a_ptr3 .req X2\n"
+ "a_ptr4 .req X3\n"
+ "a_ptr5 .req X4\n"
+ "a_ptr6 .req X5\n"
+ "a_ptr7 .req X6\n"
+ "c_ptr1 .req X7\n"
+ "c_ptr2 .req X8\n"
+ "c_ptr3 .req X9\n"
+ "c_ptr4 .req X10\n"
+ "c_ptr5 .req X11\n"
+ "c_ptr6 .req X12\n"
+ "c_ptr7 .req X13\n"
+ "add a_ptr1, %[a_ptr0], %[lda]\n"
+ "add c_ptr1, %[c_ptr0], %[ldc]\n"
+ "add a_ptr2, a_ptr1, %[lda]\n"
+ "add c_ptr2, c_ptr1, %[ldc]\n"
+ "add a_ptr3, a_ptr2, %[lda]\n"
+ "add c_ptr3, c_ptr2, %[ldc]\n"
+ "add a_ptr4, a_ptr3, %[lda]\n"
+ "add c_ptr4, c_ptr3, %[ldc]\n"
+ "add a_ptr5, a_ptr4, %[lda]\n"
+ "add c_ptr5, c_ptr4, %[ldc]\n"
+ "add a_ptr6, a_ptr5, %[lda]\n"
+ "add c_ptr6, c_ptr5, %[ldc]\n"
+ "add a_ptr7, a_ptr6, %[lda]\n"
+ "add c_ptr7, c_ptr6, %[ldc]\n"
+ "cbz %[oob_rows], 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr7, %[c_ptr0], #0x0\n"
+ "add a_ptr7, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr6, %[c_ptr0], #0x0\n"
+ "add a_ptr6, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr5, %[c_ptr0], #0x0\n"
+ "add a_ptr5, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr4, %[c_ptr0], #0x0\n"
+ "add a_ptr4, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr3, %[c_ptr0], #0x0\n"
+ "add a_ptr3, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr2, %[c_ptr0], #0x0\n"
+ "add a_ptr2, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr1, %[c_ptr0], #0x0\n"
+ "add a_ptr1, %[a_ptr0], #0x0\n"
+ "1:\n"
+ "ptrue p7.b\n"
+ "whilelt p6.b, %[temp], %[odd_depth]\n"
+ "whilelt p0.s, %[temp], %[last_width]\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "ld1b z21.b, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "ld1b z22.b, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "ld1b z23.b, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "cbz %[loops], 2f\n"
+ "mov z24.s, #0\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
+ "mov z25.s, #0\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1]\n"
+ "mov z26.s, #0\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2]\n"
+ "mov z27.s, #0\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3]\n"
+ "mov z28.s, #0\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4]\n"
+ "mov z29.s, #0\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5]\n"
+ "mov z30.s, #0\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6]\n"
+ "mov z31.s, #0\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7]\n"
+ "udot z24.s, z16.b, z0.b[0]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "udot z25.s, z16.b, z1.b[0]\n"
+ "udot z26.s, z16.b, z2.b[0]\n"
+ "udot z27.s, z16.b, z3.b[0]\n"
+ "udot z28.s, z16.b, z4.b[0]\n"
+ "udot z29.s, z16.b, z5.b[0]\n"
+ "udot z30.s, z16.b, z6.b[0]\n"
+ "udot z31.s, z16.b, z7.b[0]\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "udot z24.s, z17.b, z0.b[1]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #1\n"
+ "udot z25.s, z17.b, z1.b[1]\n"
+ "udot z26.s, z17.b, z2.b[1]\n"
+ "udot z27.s, z17.b, z3.b[1]\n"
+ "udot z28.s, z17.b, z4.b[1]\n"
+ "udot z29.s, z17.b, z5.b[1]\n"
+ "udot z30.s, z17.b, z6.b[1]\n"
+ "udot z31.s, z17.b, z7.b[1]\n"
+ "udot z24.s, z18.b, z0.b[2]\n"
+ "udot z25.s, z18.b, z1.b[2]\n"
+ "udot z26.s, z18.b, z2.b[2]\n"
+ "udot z27.s, z18.b, z3.b[2]\n"
+ "udot z28.s, z18.b, z4.b[2]\n"
+ "udot z29.s, z18.b, z5.b[2]\n"
+ "udot z30.s, z18.b, z6.b[2]\n"
+ "udot z31.s, z18.b, z7.b[2]\n"
+ "udot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0], #0x10]\n"
+ "udot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1, #0x10]\n"
+ "udot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2, #0x10]\n"
+ "udot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3, #0x10]\n"
+ "udot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4, #0x10]\n"
+ "udot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5, #0x10]\n"
+ "udot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6, #0x10]\n"
+ "udot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7, #0x10]\n"
+ "udot z24.s, z20.b, z0.b[0]\n"
+ "udot z25.s, z20.b, z1.b[0]\n"
+ "udot z26.s, z20.b, z2.b[0]\n"
+ "udot z27.s, z20.b, z3.b[0]\n"
+ "udot z28.s, z20.b, z4.b[0]\n"
+ "udot z29.s, z20.b, z5.b[0]\n"
+ "udot z30.s, z20.b, z6.b[0]\n"
+ "udot z31.s, z20.b, z7.b[0]\n"
+ "udot z24.s, z21.b, z0.b[1]\n"
+ "udot z25.s, z21.b, z1.b[1]\n"
+ "udot z26.s, z21.b, z2.b[1]\n"
+ "udot z27.s, z21.b, z3.b[1]\n"
+ "udot z28.s, z21.b, z4.b[1]\n"
+ "udot z29.s, z21.b, z5.b[1]\n"
+ "udot z30.s, z21.b, z6.b[1]\n"
+ "udot z31.s, z21.b, z7.b[1]\n"
+ "udot z24.s, z22.b, z0.b[2]\n"
+ "udot z25.s, z22.b, z1.b[2]\n"
+ "udot z26.s, z22.b, z2.b[2]\n"
+ "udot z27.s, z22.b, z3.b[2]\n"
+ "udot z28.s, z22.b, z4.b[2]\n"
+ "udot z29.s, z22.b, z5.b[2]\n"
+ "udot z30.s, z22.b, z6.b[2]\n"
+ "udot z31.s, z22.b, z7.b[2]\n"
+ "udot z24.s, z23.b, z0.b[3]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x20]\n"
+ "udot z25.s, z23.b, z1.b[3]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1, #0x20]\n"
+ "udot z26.s, z23.b, z2.b[3]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2, #0x20]\n"
+ "udot z27.s, z23.b, z3.b[3]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3, #0x20]\n"
+ "udot z28.s, z23.b, z4.b[3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4, #0x20]\n"
+ "udot z29.s, z23.b, z5.b[3]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5, #0x20]\n"
+ "udot z30.s, z23.b, z6.b[3]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6, #0x20]\n"
+ "udot z31.s, z23.b, z7.b[3]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7, #0x20]\n"
+ "udot z24.s, z16.b, z0.b[0]\n"
+ "udot z25.s, z16.b, z1.b[0]\n"
+ "udot z26.s, z16.b, z2.b[0]\n"
+ "udot z27.s, z16.b, z3.b[0]\n"
+ "udot z28.s, z16.b, z4.b[0]\n"
+ "udot z29.s, z16.b, z5.b[0]\n"
+ "udot z30.s, z16.b, z6.b[0]\n"
+ "udot z31.s, z16.b, z7.b[0]\n"
+ "b.eq 3f\n"
+ "4:\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "mov z24.s, #0\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "mov z25.s, #0\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "mov z26.s, #0\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "ld1b z21.b, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "mov z27.s, #0\n"
+ "ld1b z22.b, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "ld1b z23.b, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "mov z28.s, #0\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "mov z29.s, #0\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2]\n"
+ "udot z24.s, z16.b, z0.b[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.s, #0\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3]\n"
+ "udot z25.s, z16.b, z1.b[0]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4]\n"
+ "udot z26.s, z16.b, z2.b[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.s, #0\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5]\n"
+ "udot z27.s, z16.b, z3.b[0]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6]\n"
+ "udot z28.s, z16.b, z4.b[0]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7]\n"
+ "udot z24.s, z17.b, z0.b[1]\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "udot z29.s, z16.b, z5.b[0]\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "udot z30.s, z16.b, z6.b[0]\n"
+ "udot z31.s, z16.b, z7.b[0]\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "udot z25.s, z17.b, z1.b[1]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #1\n"
+ "udot z26.s, z17.b, z2.b[1]\n"
+ "udot z27.s, z17.b, z3.b[1]\n"
+ "udot z28.s, z17.b, z4.b[1]\n"
+ "udot z29.s, z17.b, z5.b[1]\n"
+ "udot z30.s, z17.b, z6.b[1]\n"
+ "udot z31.s, z17.b, z7.b[1]\n"
+ "udot z24.s, z18.b, z0.b[2]\n"
+ "udot z25.s, z18.b, z1.b[2]\n"
+ "udot z26.s, z18.b, z2.b[2]\n"
+ "udot z27.s, z18.b, z3.b[2]\n"
+ "udot z28.s, z18.b, z4.b[2]\n"
+ "udot z29.s, z18.b, z5.b[2]\n"
+ "udot z30.s, z18.b, z6.b[2]\n"
+ "udot z31.s, z18.b, z7.b[2]\n"
+ "udot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0], #0x10]\n"
+ "udot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1, #0x10]\n"
+ "udot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2, #0x10]\n"
+ "udot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3, #0x10]\n"
+ "udot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4, #0x10]\n"
+ "udot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5, #0x10]\n"
+ "udot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6, #0x10]\n"
+ "udot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7, #0x10]\n"
+ "udot z24.s, z20.b, z0.b[0]\n"
+ "udot z25.s, z20.b, z1.b[0]\n"
+ "udot z26.s, z20.b, z2.b[0]\n"
+ "udot z27.s, z20.b, z3.b[0]\n"
+ "udot z28.s, z20.b, z4.b[0]\n"
+ "udot z29.s, z20.b, z5.b[0]\n"
+ "udot z30.s, z20.b, z6.b[0]\n"
+ "udot z31.s, z20.b, z7.b[0]\n"
+ "udot z24.s, z21.b, z0.b[1]\n"
+ "udot z25.s, z21.b, z1.b[1]\n"
+ "udot z26.s, z21.b, z2.b[1]\n"
+ "udot z27.s, z21.b, z3.b[1]\n"
+ "udot z28.s, z21.b, z4.b[1]\n"
+ "udot z29.s, z21.b, z5.b[1]\n"
+ "udot z30.s, z21.b, z6.b[1]\n"
+ "udot z31.s, z21.b, z7.b[1]\n"
+ "udot z24.s, z22.b, z0.b[2]\n"
+ "udot z25.s, z22.b, z1.b[2]\n"
+ "udot z26.s, z22.b, z2.b[2]\n"
+ "udot z27.s, z22.b, z3.b[2]\n"
+ "udot z28.s, z22.b, z4.b[2]\n"
+ "udot z29.s, z22.b, z5.b[2]\n"
+ "udot z30.s, z22.b, z6.b[2]\n"
+ "udot z31.s, z22.b, z7.b[2]\n"
+ "udot z24.s, z23.b, z0.b[3]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x20]\n"
+ "udot z25.s, z23.b, z1.b[3]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1, #0x20]\n"
+ "udot z26.s, z23.b, z2.b[3]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2, #0x20]\n"
+ "udot z27.s, z23.b, z3.b[3]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3, #0x20]\n"
+ "udot z28.s, z23.b, z4.b[3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4, #0x20]\n"
+ "udot z29.s, z23.b, z5.b[3]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5, #0x20]\n"
+ "udot z30.s, z23.b, z6.b[3]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6, #0x20]\n"
+ "udot z31.s, z23.b, z7.b[3]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7, #0x20]\n"
+ "udot z24.s, z16.b, z0.b[0]\n"
+ "udot z25.s, z16.b, z1.b[0]\n"
+ "udot z26.s, z16.b, z2.b[0]\n"
+ "udot z27.s, z16.b, z3.b[0]\n"
+ "udot z28.s, z16.b, z4.b[0]\n"
+ "udot z29.s, z16.b, z5.b[0]\n"
+ "udot z30.s, z16.b, z6.b[0]\n"
+ "udot z31.s, z16.b, z7.b[0]\n"
+ "b.ne 4b\n"
+ "3:\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "mov z24.s, #0\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "mov z25.s, #0\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "mov z26.s, #0\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "ld1b z21.b, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "mov z27.s, #0\n"
+ "ld1b z22.b, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "ld1b z23.b, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "mov z28.s, #0\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "mov z29.s, #0\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2]\n"
+ "udot z24.s, z16.b, z0.b[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.s, #0\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3]\n"
+ "udot z25.s, z16.b, z1.b[0]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4]\n"
+ "udot z26.s, z16.b, z2.b[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.s, #0\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5]\n"
+ "udot z27.s, z16.b, z3.b[0]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6]\n"
+ "udot z28.s, z16.b, z4.b[0]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7]\n"
+ "udot z24.s, z17.b, z0.b[1]\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "udot z29.s, z16.b, z5.b[0]\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "udot z30.s, z16.b, z6.b[0]\n"
+ "udot z31.s, z16.b, z7.b[0]\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "udot z25.s, z17.b, z1.b[1]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #1\n"
+ "udot z26.s, z17.b, z2.b[1]\n"
+ "udot z27.s, z17.b, z3.b[1]\n"
+ "udot z28.s, z17.b, z4.b[1]\n"
+ "udot z29.s, z17.b, z5.b[1]\n"
+ "udot z30.s, z17.b, z6.b[1]\n"
+ "udot z31.s, z17.b, z7.b[1]\n"
+ "udot z24.s, z18.b, z0.b[2]\n"
+ "udot z25.s, z18.b, z1.b[2]\n"
+ "udot z26.s, z18.b, z2.b[2]\n"
+ "udot z27.s, z18.b, z3.b[2]\n"
+ "udot z28.s, z18.b, z4.b[2]\n"
+ "udot z29.s, z18.b, z5.b[2]\n"
+ "udot z30.s, z18.b, z6.b[2]\n"
+ "udot z31.s, z18.b, z7.b[2]\n"
+ "udot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0], #0x10]\n"
+ "udot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1, #0x10]\n"
+ "udot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2, #0x10]\n"
+ "udot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3, #0x10]\n"
+ "udot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4, #0x10]\n"
+ "udot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5, #0x10]\n"
+ "udot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6, #0x10]\n"
+ "udot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7, #0x10]\n"
+ "udot z24.s, z20.b, z0.b[0]\n"
+ "udot z25.s, z20.b, z1.b[0]\n"
+ "udot z26.s, z20.b, z2.b[0]\n"
+ "udot z27.s, z20.b, z3.b[0]\n"
+ "udot z28.s, z20.b, z4.b[0]\n"
+ "udot z29.s, z20.b, z5.b[0]\n"
+ "udot z30.s, z20.b, z6.b[0]\n"
+ "udot z31.s, z20.b, z7.b[0]\n"
+ "udot z24.s, z21.b, z0.b[1]\n"
+ "udot z25.s, z21.b, z1.b[1]\n"
+ "udot z26.s, z21.b, z2.b[1]\n"
+ "udot z27.s, z21.b, z3.b[1]\n"
+ "udot z28.s, z21.b, z4.b[1]\n"
+ "udot z29.s, z21.b, z5.b[1]\n"
+ "udot z30.s, z21.b, z6.b[1]\n"
+ "udot z31.s, z21.b, z7.b[1]\n"
+ "udot z24.s, z22.b, z0.b[2]\n"
+ "udot z25.s, z22.b, z1.b[2]\n"
+ "udot z26.s, z22.b, z2.b[2]\n"
+ "udot z27.s, z22.b, z3.b[2]\n"
+ "udot z28.s, z22.b, z4.b[2]\n"
+ "udot z29.s, z22.b, z5.b[2]\n"
+ "udot z30.s, z22.b, z6.b[2]\n"
+ "udot z31.s, z22.b, z7.b[2]\n"
+ "udot z24.s, z23.b, z0.b[3]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x20]\n"
+ "udot z25.s, z23.b, z1.b[3]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1, #0x20]\n"
+ "udot z26.s, z23.b, z2.b[3]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2, #0x20]\n"
+ "udot z27.s, z23.b, z3.b[3]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3, #0x20]\n"
+ "udot z28.s, z23.b, z4.b[3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4, #0x20]\n"
+ "udot z29.s, z23.b, z5.b[3]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5, #0x20]\n"
+ "udot z30.s, z23.b, z6.b[3]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6, #0x20]\n"
+ "udot z31.s, z23.b, z7.b[3]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7, #0x20]\n"
+ "udot z24.s, z16.b, z0.b[0]\n"
+ "udot z25.s, z16.b, z1.b[0]\n"
+ "udot z26.s, z16.b, z2.b[0]\n"
+ "udot z27.s, z16.b, z3.b[0]\n"
+ "udot z28.s, z16.b, z4.b[0]\n"
+ "udot z29.s, z16.b, z5.b[0]\n"
+ "udot z30.s, z16.b, z6.b[0]\n"
+ "udot z31.s, z16.b, z7.b[0]\n"
+ "b 5f\n"
+ "2:\n"
+ "mov z24.s, #0\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
+ "mov z25.s, #0\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1]\n"
+ "mov z26.s, #0\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2]\n"
+ "mov z27.s, #0\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3]\n"
+ "mov z28.s, #0\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4]\n"
+ "mov z29.s, #0\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5]\n"
+ "mov z30.s, #0\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6]\n"
+ "mov z31.s, #0\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7]\n"
+ "udot z24.s, z16.b, z0.b[0]\n"
+ "udot z25.s, z16.b, z1.b[0]\n"
+ "udot z26.s, z16.b, z2.b[0]\n"
+ "udot z27.s, z16.b, z3.b[0]\n"
+ "udot z28.s, z16.b, z4.b[0]\n"
+ "udot z29.s, z16.b, z5.b[0]\n"
+ "udot z30.s, z16.b, z6.b[0]\n"
+ "udot z31.s, z16.b, z7.b[0]\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "udot z24.s, z17.b, z0.b[1]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #1\n"
+ "udot z25.s, z17.b, z1.b[1]\n"
+ "udot z26.s, z17.b, z2.b[1]\n"
+ "udot z27.s, z17.b, z3.b[1]\n"
+ "udot z28.s, z17.b, z4.b[1]\n"
+ "udot z29.s, z17.b, z5.b[1]\n"
+ "udot z30.s, z17.b, z6.b[1]\n"
+ "udot z31.s, z17.b, z7.b[1]\n"
+ "udot z24.s, z18.b, z0.b[2]\n"
+ "udot z25.s, z18.b, z1.b[2]\n"
+ "udot z26.s, z18.b, z2.b[2]\n"
+ "udot z27.s, z18.b, z3.b[2]\n"
+ "udot z28.s, z18.b, z4.b[2]\n"
+ "udot z29.s, z18.b, z5.b[2]\n"
+ "udot z30.s, z18.b, z6.b[2]\n"
+ "udot z31.s, z18.b, z7.b[2]\n"
+ "udot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0], #0x10]\n"
+ "udot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1, #0x10]\n"
+ "udot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2, #0x10]\n"
+ "udot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3, #0x10]\n"
+ "udot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4, #0x10]\n"
+ "udot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5, #0x10]\n"
+ "udot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6, #0x10]\n"
+ "udot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7, #0x10]\n"
+ "udot z24.s, z20.b, z0.b[0]\n"
+ "udot z25.s, z20.b, z1.b[0]\n"
+ "udot z26.s, z20.b, z2.b[0]\n"
+ "udot z27.s, z20.b, z3.b[0]\n"
+ "udot z28.s, z20.b, z4.b[0]\n"
+ "udot z29.s, z20.b, z5.b[0]\n"
+ "udot z30.s, z20.b, z6.b[0]\n"
+ "udot z31.s, z20.b, z7.b[0]\n"
+ "udot z24.s, z21.b, z0.b[1]\n"
+ "udot z25.s, z21.b, z1.b[1]\n"
+ "udot z26.s, z21.b, z2.b[1]\n"
+ "udot z27.s, z21.b, z3.b[1]\n"
+ "udot z28.s, z21.b, z4.b[1]\n"
+ "udot z29.s, z21.b, z5.b[1]\n"
+ "udot z30.s, z21.b, z6.b[1]\n"
+ "udot z31.s, z21.b, z7.b[1]\n"
+ "udot z24.s, z22.b, z0.b[2]\n"
+ "udot z25.s, z22.b, z1.b[2]\n"
+ "udot z26.s, z22.b, z2.b[2]\n"
+ "udot z27.s, z22.b, z3.b[2]\n"
+ "udot z28.s, z22.b, z4.b[2]\n"
+ "udot z29.s, z22.b, z5.b[2]\n"
+ "udot z30.s, z22.b, z6.b[2]\n"
+ "udot z31.s, z22.b, z7.b[2]\n"
+ "udot z24.s, z23.b, z0.b[3]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x20]\n"
+ "udot z25.s, z23.b, z1.b[3]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1, #0x20]\n"
+ "udot z26.s, z23.b, z2.b[3]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2, #0x20]\n"
+ "udot z27.s, z23.b, z3.b[3]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3, #0x20]\n"
+ "udot z28.s, z23.b, z4.b[3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4, #0x20]\n"
+ "udot z29.s, z23.b, z5.b[3]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5, #0x20]\n"
+ "udot z30.s, z23.b, z6.b[3]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6, #0x20]\n"
+ "udot z31.s, z23.b, z7.b[3]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7, #0x20]\n"
+ "udot z24.s, z16.b, z0.b[0]\n"
+ "udot z25.s, z16.b, z1.b[0]\n"
+ "udot z26.s, z16.b, z2.b[0]\n"
+ "udot z27.s, z16.b, z3.b[0]\n"
+ "udot z28.s, z16.b, z4.b[0]\n"
+ "udot z29.s, z16.b, z5.b[0]\n"
+ "udot z30.s, z16.b, z6.b[0]\n"
+ "udot z31.s, z16.b, z7.b[0]\n"
+ "5:\n"
+ "st1w z24.s, p0, [%[c_ptr0]]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "st1w z25.s, p0, [c_ptr1]\n"
+ "st1w z26.s, p0, [c_ptr2]\n"
+ "st1w z27.s, p0, [c_ptr3]\n"
+ "st1w z28.s, p0, [c_ptr4]\n"
+ "st1w z29.s, p0, [c_ptr5]\n"
+ "st1w z30.s, p0, [c_ptr6]\n"
+ "st1w z31.s, p0, [c_ptr7]\n"
+ ".unreq a_ptr1\n"
+ ".unreq a_ptr2\n"
+ ".unreq a_ptr3\n"
+ ".unreq a_ptr4\n"
+ ".unreq a_ptr5\n"
+ ".unreq a_ptr6\n"
+ ".unreq a_ptr7\n"
+ ".unreq c_ptr1\n"
+ ".unreq c_ptr2\n"
+ ".unreq c_ptr3\n"
+ ".unreq c_ptr4\n"
+ ".unreq c_ptr5\n"
+ ".unreq c_ptr6\n"
+ ".unreq c_ptr7\n"
+ : [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [oob_rows] "+r" (oob_rows), [temp] "+r" (temp), [odds] "+r" (odds)
+ : [lda] "r" (ldab), [ldc] "r" (ldcb), [odd_depth] "r" (odd_depth), [last_width] "r" (last_width)
+ : "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "cc", "memory"
+ );
+ break;
+ case 10:
+ __asm __volatile (
+ "a_ptr1 .req X0\n"
+ "a_ptr2 .req X1\n"
+ "a_ptr3 .req X2\n"
+ "a_ptr4 .req X3\n"
+ "a_ptr5 .req X4\n"
+ "a_ptr6 .req X5\n"
+ "a_ptr7 .req X6\n"
+ "c_ptr1 .req X7\n"
+ "c_ptr2 .req X8\n"
+ "c_ptr3 .req X9\n"
+ "c_ptr4 .req X10\n"
+ "c_ptr5 .req X11\n"
+ "c_ptr6 .req X12\n"
+ "c_ptr7 .req X13\n"
+ "add a_ptr1, %[a_ptr0], %[lda]\n"
+ "add c_ptr1, %[c_ptr0], %[ldc]\n"
+ "add a_ptr2, a_ptr1, %[lda]\n"
+ "add c_ptr2, c_ptr1, %[ldc]\n"
+ "add a_ptr3, a_ptr2, %[lda]\n"
+ "add c_ptr3, c_ptr2, %[ldc]\n"
+ "add a_ptr4, a_ptr3, %[lda]\n"
+ "add c_ptr4, c_ptr3, %[ldc]\n"
+ "add a_ptr5, a_ptr4, %[lda]\n"
+ "add c_ptr5, c_ptr4, %[ldc]\n"
+ "add a_ptr6, a_ptr5, %[lda]\n"
+ "add c_ptr6, c_ptr5, %[ldc]\n"
+ "add a_ptr7, a_ptr6, %[lda]\n"
+ "add c_ptr7, c_ptr6, %[ldc]\n"
+ "cbz %[oob_rows], 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr7, %[c_ptr0], #0x0\n"
+ "add a_ptr7, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr6, %[c_ptr0], #0x0\n"
+ "add a_ptr6, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr5, %[c_ptr0], #0x0\n"
+ "add a_ptr5, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr4, %[c_ptr0], #0x0\n"
+ "add a_ptr4, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr3, %[c_ptr0], #0x0\n"
+ "add a_ptr3, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr2, %[c_ptr0], #0x0\n"
+ "add a_ptr2, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr1, %[c_ptr0], #0x0\n"
+ "add a_ptr1, %[a_ptr0], #0x0\n"
+ "1:\n"
+ "ptrue p7.b\n"
+ "whilelt p6.b, %[temp], %[odd_depth]\n"
+ "whilelt p0.s, %[temp], %[last_width]\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "ld1b z21.b, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "ld1b z22.b, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "ld1b z23.b, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "cbz %[loops], 2f\n"
+ "mov z24.s, #0\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
+ "mov z25.s, #0\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1]\n"
+ "mov z26.s, #0\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2]\n"
+ "mov z27.s, #0\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3]\n"
+ "mov z28.s, #0\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4]\n"
+ "mov z29.s, #0\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5]\n"
+ "mov z30.s, #0\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6]\n"
+ "mov z31.s, #0\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7]\n"
+ "udot z24.s, z16.b, z0.b[0]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "udot z25.s, z16.b, z1.b[0]\n"
+ "udot z26.s, z16.b, z2.b[0]\n"
+ "udot z27.s, z16.b, z3.b[0]\n"
+ "udot z28.s, z16.b, z4.b[0]\n"
+ "udot z29.s, z16.b, z5.b[0]\n"
+ "udot z30.s, z16.b, z6.b[0]\n"
+ "udot z31.s, z16.b, z7.b[0]\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "udot z24.s, z17.b, z0.b[1]\n"
+ "udot z25.s, z17.b, z1.b[1]\n"
+ "udot z26.s, z17.b, z2.b[1]\n"
+ "udot z27.s, z17.b, z3.b[1]\n"
+ "udot z28.s, z17.b, z4.b[1]\n"
+ "udot z29.s, z17.b, z5.b[1]\n"
+ "udot z30.s, z17.b, z6.b[1]\n"
+ "udot z31.s, z17.b, z7.b[1]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "udot z24.s, z18.b, z0.b[2]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #2\n"
+ "udot z25.s, z18.b, z1.b[2]\n"
+ "udot z26.s, z18.b, z2.b[2]\n"
+ "udot z27.s, z18.b, z3.b[2]\n"
+ "udot z28.s, z18.b, z4.b[2]\n"
+ "udot z29.s, z18.b, z5.b[2]\n"
+ "udot z30.s, z18.b, z6.b[2]\n"
+ "udot z31.s, z18.b, z7.b[2]\n"
+ "udot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0], #0x10]\n"
+ "udot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1, #0x10]\n"
+ "udot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2, #0x10]\n"
+ "udot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3, #0x10]\n"
+ "udot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4, #0x10]\n"
+ "udot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5, #0x10]\n"
+ "udot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6, #0x10]\n"
+ "udot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7, #0x10]\n"
+ "udot z24.s, z20.b, z0.b[0]\n"
+ "udot z25.s, z20.b, z1.b[0]\n"
+ "udot z26.s, z20.b, z2.b[0]\n"
+ "udot z27.s, z20.b, z3.b[0]\n"
+ "udot z28.s, z20.b, z4.b[0]\n"
+ "udot z29.s, z20.b, z5.b[0]\n"
+ "udot z30.s, z20.b, z6.b[0]\n"
+ "udot z31.s, z20.b, z7.b[0]\n"
+ "udot z24.s, z21.b, z0.b[1]\n"
+ "udot z25.s, z21.b, z1.b[1]\n"
+ "udot z26.s, z21.b, z2.b[1]\n"
+ "udot z27.s, z21.b, z3.b[1]\n"
+ "udot z28.s, z21.b, z4.b[1]\n"
+ "udot z29.s, z21.b, z5.b[1]\n"
+ "udot z30.s, z21.b, z6.b[1]\n"
+ "udot z31.s, z21.b, z7.b[1]\n"
+ "udot z24.s, z22.b, z0.b[2]\n"
+ "udot z25.s, z22.b, z1.b[2]\n"
+ "udot z26.s, z22.b, z2.b[2]\n"
+ "udot z27.s, z22.b, z3.b[2]\n"
+ "udot z28.s, z22.b, z4.b[2]\n"
+ "udot z29.s, z22.b, z5.b[2]\n"
+ "udot z30.s, z22.b, z6.b[2]\n"
+ "udot z31.s, z22.b, z7.b[2]\n"
+ "udot z24.s, z23.b, z0.b[3]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x20]\n"
+ "udot z25.s, z23.b, z1.b[3]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1, #0x20]\n"
+ "udot z26.s, z23.b, z2.b[3]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2, #0x20]\n"
+ "udot z27.s, z23.b, z3.b[3]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3, #0x20]\n"
+ "udot z28.s, z23.b, z4.b[3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4, #0x20]\n"
+ "udot z29.s, z23.b, z5.b[3]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5, #0x20]\n"
+ "udot z30.s, z23.b, z6.b[3]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6, #0x20]\n"
+ "udot z31.s, z23.b, z7.b[3]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7, #0x20]\n"
+ "udot z24.s, z16.b, z0.b[0]\n"
+ "udot z25.s, z16.b, z1.b[0]\n"
+ "udot z26.s, z16.b, z2.b[0]\n"
+ "udot z27.s, z16.b, z3.b[0]\n"
+ "udot z28.s, z16.b, z4.b[0]\n"
+ "udot z29.s, z16.b, z5.b[0]\n"
+ "udot z30.s, z16.b, z6.b[0]\n"
+ "udot z31.s, z16.b, z7.b[0]\n"
+ "udot z24.s, z17.b, z0.b[1]\n"
+ "udot z25.s, z17.b, z1.b[1]\n"
+ "udot z26.s, z17.b, z2.b[1]\n"
+ "udot z27.s, z17.b, z3.b[1]\n"
+ "udot z28.s, z17.b, z4.b[1]\n"
+ "udot z29.s, z17.b, z5.b[1]\n"
+ "udot z30.s, z17.b, z6.b[1]\n"
+ "udot z31.s, z17.b, z7.b[1]\n"
+ "b.eq 3f\n"
+ "4:\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "mov z24.s, #0\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "mov z25.s, #0\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "mov z26.s, #0\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "ld1b z21.b, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "mov z27.s, #0\n"
+ "ld1b z22.b, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "ld1b z23.b, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "mov z28.s, #0\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "mov z29.s, #0\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2]\n"
+ "udot z24.s, z16.b, z0.b[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.s, #0\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3]\n"
+ "udot z25.s, z16.b, z1.b[0]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4]\n"
+ "udot z26.s, z16.b, z2.b[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.s, #0\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5]\n"
+ "udot z27.s, z16.b, z3.b[0]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6]\n"
+ "udot z28.s, z16.b, z4.b[0]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7]\n"
+ "udot z24.s, z17.b, z0.b[1]\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "udot z29.s, z16.b, z5.b[0]\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "udot z30.s, z16.b, z6.b[0]\n"
+ "udot z31.s, z16.b, z7.b[0]\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "udot z25.s, z17.b, z1.b[1]\n"
+ "udot z26.s, z17.b, z2.b[1]\n"
+ "udot z27.s, z17.b, z3.b[1]\n"
+ "udot z28.s, z17.b, z4.b[1]\n"
+ "udot z29.s, z17.b, z5.b[1]\n"
+ "udot z30.s, z17.b, z6.b[1]\n"
+ "udot z31.s, z17.b, z7.b[1]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "udot z24.s, z18.b, z0.b[2]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #2\n"
+ "udot z25.s, z18.b, z1.b[2]\n"
+ "udot z26.s, z18.b, z2.b[2]\n"
+ "udot z27.s, z18.b, z3.b[2]\n"
+ "udot z28.s, z18.b, z4.b[2]\n"
+ "udot z29.s, z18.b, z5.b[2]\n"
+ "udot z30.s, z18.b, z6.b[2]\n"
+ "udot z31.s, z18.b, z7.b[2]\n"
+ "udot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0], #0x10]\n"
+ "udot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1, #0x10]\n"
+ "udot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2, #0x10]\n"
+ "udot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3, #0x10]\n"
+ "udot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4, #0x10]\n"
+ "udot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5, #0x10]\n"
+ "udot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6, #0x10]\n"
+ "udot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7, #0x10]\n"
+ "udot z24.s, z20.b, z0.b[0]\n"
+ "udot z25.s, z20.b, z1.b[0]\n"
+ "udot z26.s, z20.b, z2.b[0]\n"
+ "udot z27.s, z20.b, z3.b[0]\n"
+ "udot z28.s, z20.b, z4.b[0]\n"
+ "udot z29.s, z20.b, z5.b[0]\n"
+ "udot z30.s, z20.b, z6.b[0]\n"
+ "udot z31.s, z20.b, z7.b[0]\n"
+ "udot z24.s, z21.b, z0.b[1]\n"
+ "udot z25.s, z21.b, z1.b[1]\n"
+ "udot z26.s, z21.b, z2.b[1]\n"
+ "udot z27.s, z21.b, z3.b[1]\n"
+ "udot z28.s, z21.b, z4.b[1]\n"
+ "udot z29.s, z21.b, z5.b[1]\n"
+ "udot z30.s, z21.b, z6.b[1]\n"
+ "udot z31.s, z21.b, z7.b[1]\n"
+ "udot z24.s, z22.b, z0.b[2]\n"
+ "udot z25.s, z22.b, z1.b[2]\n"
+ "udot z26.s, z22.b, z2.b[2]\n"
+ "udot z27.s, z22.b, z3.b[2]\n"
+ "udot z28.s, z22.b, z4.b[2]\n"
+ "udot z29.s, z22.b, z5.b[2]\n"
+ "udot z30.s, z22.b, z6.b[2]\n"
+ "udot z31.s, z22.b, z7.b[2]\n"
+ "udot z24.s, z23.b, z0.b[3]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x20]\n"
+ "udot z25.s, z23.b, z1.b[3]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1, #0x20]\n"
+ "udot z26.s, z23.b, z2.b[3]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2, #0x20]\n"
+ "udot z27.s, z23.b, z3.b[3]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3, #0x20]\n"
+ "udot z28.s, z23.b, z4.b[3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4, #0x20]\n"
+ "udot z29.s, z23.b, z5.b[3]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5, #0x20]\n"
+ "udot z30.s, z23.b, z6.b[3]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6, #0x20]\n"
+ "udot z31.s, z23.b, z7.b[3]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7, #0x20]\n"
+ "udot z24.s, z16.b, z0.b[0]\n"
+ "udot z25.s, z16.b, z1.b[0]\n"
+ "udot z26.s, z16.b, z2.b[0]\n"
+ "udot z27.s, z16.b, z3.b[0]\n"
+ "udot z28.s, z16.b, z4.b[0]\n"
+ "udot z29.s, z16.b, z5.b[0]\n"
+ "udot z30.s, z16.b, z6.b[0]\n"
+ "udot z31.s, z16.b, z7.b[0]\n"
+ "udot z24.s, z17.b, z0.b[1]\n"
+ "udot z25.s, z17.b, z1.b[1]\n"
+ "udot z26.s, z17.b, z2.b[1]\n"
+ "udot z27.s, z17.b, z3.b[1]\n"
+ "udot z28.s, z17.b, z4.b[1]\n"
+ "udot z29.s, z17.b, z5.b[1]\n"
+ "udot z30.s, z17.b, z6.b[1]\n"
+ "udot z31.s, z17.b, z7.b[1]\n"
+ "b.ne 4b\n"
+ "3:\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "mov z24.s, #0\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "mov z25.s, #0\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "mov z26.s, #0\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "ld1b z21.b, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "mov z27.s, #0\n"
+ "ld1b z22.b, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "ld1b z23.b, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "mov z28.s, #0\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "mov z29.s, #0\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2]\n"
+ "udot z24.s, z16.b, z0.b[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.s, #0\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3]\n"
+ "udot z25.s, z16.b, z1.b[0]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4]\n"
+ "udot z26.s, z16.b, z2.b[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.s, #0\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5]\n"
+ "udot z27.s, z16.b, z3.b[0]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6]\n"
+ "udot z28.s, z16.b, z4.b[0]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7]\n"
+ "udot z24.s, z17.b, z0.b[1]\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "udot z29.s, z16.b, z5.b[0]\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "udot z30.s, z16.b, z6.b[0]\n"
+ "udot z31.s, z16.b, z7.b[0]\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "udot z25.s, z17.b, z1.b[1]\n"
+ "udot z26.s, z17.b, z2.b[1]\n"
+ "udot z27.s, z17.b, z3.b[1]\n"
+ "udot z28.s, z17.b, z4.b[1]\n"
+ "udot z29.s, z17.b, z5.b[1]\n"
+ "udot z30.s, z17.b, z6.b[1]\n"
+ "udot z31.s, z17.b, z7.b[1]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "udot z24.s, z18.b, z0.b[2]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #2\n"
+ "udot z25.s, z18.b, z1.b[2]\n"
+ "udot z26.s, z18.b, z2.b[2]\n"
+ "udot z27.s, z18.b, z3.b[2]\n"
+ "udot z28.s, z18.b, z4.b[2]\n"
+ "udot z29.s, z18.b, z5.b[2]\n"
+ "udot z30.s, z18.b, z6.b[2]\n"
+ "udot z31.s, z18.b, z7.b[2]\n"
+ "udot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0], #0x10]\n"
+ "udot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1, #0x10]\n"
+ "udot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2, #0x10]\n"
+ "udot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3, #0x10]\n"
+ "udot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4, #0x10]\n"
+ "udot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5, #0x10]\n"
+ "udot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6, #0x10]\n"
+ "udot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7, #0x10]\n"
+ "udot z24.s, z20.b, z0.b[0]\n"
+ "udot z25.s, z20.b, z1.b[0]\n"
+ "udot z26.s, z20.b, z2.b[0]\n"
+ "udot z27.s, z20.b, z3.b[0]\n"
+ "udot z28.s, z20.b, z4.b[0]\n"
+ "udot z29.s, z20.b, z5.b[0]\n"
+ "udot z30.s, z20.b, z6.b[0]\n"
+ "udot z31.s, z20.b, z7.b[0]\n"
+ "udot z24.s, z21.b, z0.b[1]\n"
+ "udot z25.s, z21.b, z1.b[1]\n"
+ "udot z26.s, z21.b, z2.b[1]\n"
+ "udot z27.s, z21.b, z3.b[1]\n"
+ "udot z28.s, z21.b, z4.b[1]\n"
+ "udot z29.s, z21.b, z5.b[1]\n"
+ "udot z30.s, z21.b, z6.b[1]\n"
+ "udot z31.s, z21.b, z7.b[1]\n"
+ "udot z24.s, z22.b, z0.b[2]\n"
+ "udot z25.s, z22.b, z1.b[2]\n"
+ "udot z26.s, z22.b, z2.b[2]\n"
+ "udot z27.s, z22.b, z3.b[2]\n"
+ "udot z28.s, z22.b, z4.b[2]\n"
+ "udot z29.s, z22.b, z5.b[2]\n"
+ "udot z30.s, z22.b, z6.b[2]\n"
+ "udot z31.s, z22.b, z7.b[2]\n"
+ "udot z24.s, z23.b, z0.b[3]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x20]\n"
+ "udot z25.s, z23.b, z1.b[3]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1, #0x20]\n"
+ "udot z26.s, z23.b, z2.b[3]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2, #0x20]\n"
+ "udot z27.s, z23.b, z3.b[3]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3, #0x20]\n"
+ "udot z28.s, z23.b, z4.b[3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4, #0x20]\n"
+ "udot z29.s, z23.b, z5.b[3]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5, #0x20]\n"
+ "udot z30.s, z23.b, z6.b[3]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6, #0x20]\n"
+ "udot z31.s, z23.b, z7.b[3]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7, #0x20]\n"
+ "udot z24.s, z16.b, z0.b[0]\n"
+ "udot z25.s, z16.b, z1.b[0]\n"
+ "udot z26.s, z16.b, z2.b[0]\n"
+ "udot z27.s, z16.b, z3.b[0]\n"
+ "udot z28.s, z16.b, z4.b[0]\n"
+ "udot z29.s, z16.b, z5.b[0]\n"
+ "udot z30.s, z16.b, z6.b[0]\n"
+ "udot z31.s, z16.b, z7.b[0]\n"
+ "udot z24.s, z17.b, z0.b[1]\n"
+ "udot z25.s, z17.b, z1.b[1]\n"
+ "udot z26.s, z17.b, z2.b[1]\n"
+ "udot z27.s, z17.b, z3.b[1]\n"
+ "udot z28.s, z17.b, z4.b[1]\n"
+ "udot z29.s, z17.b, z5.b[1]\n"
+ "udot z30.s, z17.b, z6.b[1]\n"
+ "udot z31.s, z17.b, z7.b[1]\n"
+ "b 5f\n"
+ "2:\n"
+ "mov z24.s, #0\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
+ "mov z25.s, #0\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1]\n"
+ "mov z26.s, #0\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2]\n"
+ "mov z27.s, #0\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3]\n"
+ "mov z28.s, #0\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4]\n"
+ "mov z29.s, #0\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5]\n"
+ "mov z30.s, #0\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6]\n"
+ "mov z31.s, #0\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7]\n"
+ "udot z24.s, z16.b, z0.b[0]\n"
+ "udot z25.s, z16.b, z1.b[0]\n"
+ "udot z26.s, z16.b, z2.b[0]\n"
+ "udot z27.s, z16.b, z3.b[0]\n"
+ "udot z28.s, z16.b, z4.b[0]\n"
+ "udot z29.s, z16.b, z5.b[0]\n"
+ "udot z30.s, z16.b, z6.b[0]\n"
+ "udot z31.s, z16.b, z7.b[0]\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "udot z24.s, z17.b, z0.b[1]\n"
+ "udot z25.s, z17.b, z1.b[1]\n"
+ "udot z26.s, z17.b, z2.b[1]\n"
+ "udot z27.s, z17.b, z3.b[1]\n"
+ "udot z28.s, z17.b, z4.b[1]\n"
+ "udot z29.s, z17.b, z5.b[1]\n"
+ "udot z30.s, z17.b, z6.b[1]\n"
+ "udot z31.s, z17.b, z7.b[1]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "udot z24.s, z18.b, z0.b[2]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #2\n"
+ "udot z25.s, z18.b, z1.b[2]\n"
+ "udot z26.s, z18.b, z2.b[2]\n"
+ "udot z27.s, z18.b, z3.b[2]\n"
+ "udot z28.s, z18.b, z4.b[2]\n"
+ "udot z29.s, z18.b, z5.b[2]\n"
+ "udot z30.s, z18.b, z6.b[2]\n"
+ "udot z31.s, z18.b, z7.b[2]\n"
+ "udot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0], #0x10]\n"
+ "udot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1, #0x10]\n"
+ "udot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2, #0x10]\n"
+ "udot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3, #0x10]\n"
+ "udot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4, #0x10]\n"
+ "udot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5, #0x10]\n"
+ "udot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6, #0x10]\n"
+ "udot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7, #0x10]\n"
+ "udot z24.s, z20.b, z0.b[0]\n"
+ "udot z25.s, z20.b, z1.b[0]\n"
+ "udot z26.s, z20.b, z2.b[0]\n"
+ "udot z27.s, z20.b, z3.b[0]\n"
+ "udot z28.s, z20.b, z4.b[0]\n"
+ "udot z29.s, z20.b, z5.b[0]\n"
+ "udot z30.s, z20.b, z6.b[0]\n"
+ "udot z31.s, z20.b, z7.b[0]\n"
+ "udot z24.s, z21.b, z0.b[1]\n"
+ "udot z25.s, z21.b, z1.b[1]\n"
+ "udot z26.s, z21.b, z2.b[1]\n"
+ "udot z27.s, z21.b, z3.b[1]\n"
+ "udot z28.s, z21.b, z4.b[1]\n"
+ "udot z29.s, z21.b, z5.b[1]\n"
+ "udot z30.s, z21.b, z6.b[1]\n"
+ "udot z31.s, z21.b, z7.b[1]\n"
+ "udot z24.s, z22.b, z0.b[2]\n"
+ "udot z25.s, z22.b, z1.b[2]\n"
+ "udot z26.s, z22.b, z2.b[2]\n"
+ "udot z27.s, z22.b, z3.b[2]\n"
+ "udot z28.s, z22.b, z4.b[2]\n"
+ "udot z29.s, z22.b, z5.b[2]\n"
+ "udot z30.s, z22.b, z6.b[2]\n"
+ "udot z31.s, z22.b, z7.b[2]\n"
+ "udot z24.s, z23.b, z0.b[3]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x20]\n"
+ "udot z25.s, z23.b, z1.b[3]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1, #0x20]\n"
+ "udot z26.s, z23.b, z2.b[3]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2, #0x20]\n"
+ "udot z27.s, z23.b, z3.b[3]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3, #0x20]\n"
+ "udot z28.s, z23.b, z4.b[3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4, #0x20]\n"
+ "udot z29.s, z23.b, z5.b[3]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5, #0x20]\n"
+ "udot z30.s, z23.b, z6.b[3]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6, #0x20]\n"
+ "udot z31.s, z23.b, z7.b[3]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7, #0x20]\n"
+ "udot z24.s, z16.b, z0.b[0]\n"
+ "udot z25.s, z16.b, z1.b[0]\n"
+ "udot z26.s, z16.b, z2.b[0]\n"
+ "udot z27.s, z16.b, z3.b[0]\n"
+ "udot z28.s, z16.b, z4.b[0]\n"
+ "udot z29.s, z16.b, z5.b[0]\n"
+ "udot z30.s, z16.b, z6.b[0]\n"
+ "udot z31.s, z16.b, z7.b[0]\n"
+ "udot z24.s, z17.b, z0.b[1]\n"
+ "udot z25.s, z17.b, z1.b[1]\n"
+ "udot z26.s, z17.b, z2.b[1]\n"
+ "udot z27.s, z17.b, z3.b[1]\n"
+ "udot z28.s, z17.b, z4.b[1]\n"
+ "udot z29.s, z17.b, z5.b[1]\n"
+ "udot z30.s, z17.b, z6.b[1]\n"
+ "udot z31.s, z17.b, z7.b[1]\n"
+ "5:\n"
+ "st1w z24.s, p0, [%[c_ptr0]]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "st1w z25.s, p0, [c_ptr1]\n"
+ "st1w z26.s, p0, [c_ptr2]\n"
+ "st1w z27.s, p0, [c_ptr3]\n"
+ "st1w z28.s, p0, [c_ptr4]\n"
+ "st1w z29.s, p0, [c_ptr5]\n"
+ "st1w z30.s, p0, [c_ptr6]\n"
+ "st1w z31.s, p0, [c_ptr7]\n"
+ ".unreq a_ptr1\n"
+ ".unreq a_ptr2\n"
+ ".unreq a_ptr3\n"
+ ".unreq a_ptr4\n"
+ ".unreq a_ptr5\n"
+ ".unreq a_ptr6\n"
+ ".unreq a_ptr7\n"
+ ".unreq c_ptr1\n"
+ ".unreq c_ptr2\n"
+ ".unreq c_ptr3\n"
+ ".unreq c_ptr4\n"
+ ".unreq c_ptr5\n"
+ ".unreq c_ptr6\n"
+ ".unreq c_ptr7\n"
+ : [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [oob_rows] "+r" (oob_rows), [temp] "+r" (temp), [odds] "+r" (odds)
+ : [lda] "r" (ldab), [ldc] "r" (ldcb), [odd_depth] "r" (odd_depth), [last_width] "r" (last_width)
+ : "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "cc", "memory"
+ );
+ break;
+ case 11:
+ __asm __volatile (
+ "a_ptr1 .req X0\n"
+ "a_ptr2 .req X1\n"
+ "a_ptr3 .req X2\n"
+ "a_ptr4 .req X3\n"
+ "a_ptr5 .req X4\n"
+ "a_ptr6 .req X5\n"
+ "a_ptr7 .req X6\n"
+ "c_ptr1 .req X7\n"
+ "c_ptr2 .req X8\n"
+ "c_ptr3 .req X9\n"
+ "c_ptr4 .req X10\n"
+ "c_ptr5 .req X11\n"
+ "c_ptr6 .req X12\n"
+ "c_ptr7 .req X13\n"
+ "add a_ptr1, %[a_ptr0], %[lda]\n"
+ "add c_ptr1, %[c_ptr0], %[ldc]\n"
+ "add a_ptr2, a_ptr1, %[lda]\n"
+ "add c_ptr2, c_ptr1, %[ldc]\n"
+ "add a_ptr3, a_ptr2, %[lda]\n"
+ "add c_ptr3, c_ptr2, %[ldc]\n"
+ "add a_ptr4, a_ptr3, %[lda]\n"
+ "add c_ptr4, c_ptr3, %[ldc]\n"
+ "add a_ptr5, a_ptr4, %[lda]\n"
+ "add c_ptr5, c_ptr4, %[ldc]\n"
+ "add a_ptr6, a_ptr5, %[lda]\n"
+ "add c_ptr6, c_ptr5, %[ldc]\n"
+ "add a_ptr7, a_ptr6, %[lda]\n"
+ "add c_ptr7, c_ptr6, %[ldc]\n"
+ "cbz %[oob_rows], 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr7, %[c_ptr0], #0x0\n"
+ "add a_ptr7, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr6, %[c_ptr0], #0x0\n"
+ "add a_ptr6, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr5, %[c_ptr0], #0x0\n"
+ "add a_ptr5, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr4, %[c_ptr0], #0x0\n"
+ "add a_ptr4, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr3, %[c_ptr0], #0x0\n"
+ "add a_ptr3, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr2, %[c_ptr0], #0x0\n"
+ "add a_ptr2, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr1, %[c_ptr0], #0x0\n"
+ "add a_ptr1, %[a_ptr0], #0x0\n"
+ "1:\n"
+ "ptrue p7.b\n"
+ "whilelt p6.b, %[temp], %[odd_depth]\n"
+ "whilelt p0.s, %[temp], %[last_width]\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "ld1b z21.b, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "ld1b z22.b, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "ld1b z23.b, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "cbz %[loops], 2f\n"
+ "mov z24.s, #0\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
+ "mov z25.s, #0\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1]\n"
+ "mov z26.s, #0\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2]\n"
+ "mov z27.s, #0\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3]\n"
+ "mov z28.s, #0\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4]\n"
+ "mov z29.s, #0\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5]\n"
+ "mov z30.s, #0\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6]\n"
+ "mov z31.s, #0\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7]\n"
+ "udot z24.s, z16.b, z0.b[0]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "udot z25.s, z16.b, z1.b[0]\n"
+ "udot z26.s, z16.b, z2.b[0]\n"
+ "udot z27.s, z16.b, z3.b[0]\n"
+ "udot z28.s, z16.b, z4.b[0]\n"
+ "udot z29.s, z16.b, z5.b[0]\n"
+ "udot z30.s, z16.b, z6.b[0]\n"
+ "udot z31.s, z16.b, z7.b[0]\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "udot z24.s, z17.b, z0.b[1]\n"
+ "udot z25.s, z17.b, z1.b[1]\n"
+ "udot z26.s, z17.b, z2.b[1]\n"
+ "udot z27.s, z17.b, z3.b[1]\n"
+ "udot z28.s, z17.b, z4.b[1]\n"
+ "udot z29.s, z17.b, z5.b[1]\n"
+ "udot z30.s, z17.b, z6.b[1]\n"
+ "udot z31.s, z17.b, z7.b[1]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "udot z24.s, z18.b, z0.b[2]\n"
+ "udot z25.s, z18.b, z1.b[2]\n"
+ "udot z26.s, z18.b, z2.b[2]\n"
+ "udot z27.s, z18.b, z3.b[2]\n"
+ "udot z28.s, z18.b, z4.b[2]\n"
+ "udot z29.s, z18.b, z5.b[2]\n"
+ "udot z30.s, z18.b, z6.b[2]\n"
+ "udot z31.s, z18.b, z7.b[2]\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "udot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0], #0x10]\n"
+ "udot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1, #0x10]\n"
+ "udot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2, #0x10]\n"
+ "udot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3, #0x10]\n"
+ "udot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4, #0x10]\n"
+ "udot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5, #0x10]\n"
+ "udot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6, #0x10]\n"
+ "udot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7, #0x10]\n"
+ "udot z24.s, z20.b, z0.b[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #3\n"
+ "udot z25.s, z20.b, z1.b[0]\n"
+ "udot z26.s, z20.b, z2.b[0]\n"
+ "udot z27.s, z20.b, z3.b[0]\n"
+ "udot z28.s, z20.b, z4.b[0]\n"
+ "udot z29.s, z20.b, z5.b[0]\n"
+ "udot z30.s, z20.b, z6.b[0]\n"
+ "udot z31.s, z20.b, z7.b[0]\n"
+ "udot z24.s, z21.b, z0.b[1]\n"
+ "udot z25.s, z21.b, z1.b[1]\n"
+ "udot z26.s, z21.b, z2.b[1]\n"
+ "udot z27.s, z21.b, z3.b[1]\n"
+ "udot z28.s, z21.b, z4.b[1]\n"
+ "udot z29.s, z21.b, z5.b[1]\n"
+ "udot z30.s, z21.b, z6.b[1]\n"
+ "udot z31.s, z21.b, z7.b[1]\n"
+ "udot z24.s, z22.b, z0.b[2]\n"
+ "udot z25.s, z22.b, z1.b[2]\n"
+ "udot z26.s, z22.b, z2.b[2]\n"
+ "udot z27.s, z22.b, z3.b[2]\n"
+ "udot z28.s, z22.b, z4.b[2]\n"
+ "udot z29.s, z22.b, z5.b[2]\n"
+ "udot z30.s, z22.b, z6.b[2]\n"
+ "udot z31.s, z22.b, z7.b[2]\n"
+ "udot z24.s, z23.b, z0.b[3]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x20]\n"
+ "udot z25.s, z23.b, z1.b[3]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1, #0x20]\n"
+ "udot z26.s, z23.b, z2.b[3]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2, #0x20]\n"
+ "udot z27.s, z23.b, z3.b[3]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3, #0x20]\n"
+ "udot z28.s, z23.b, z4.b[3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4, #0x20]\n"
+ "udot z29.s, z23.b, z5.b[3]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5, #0x20]\n"
+ "udot z30.s, z23.b, z6.b[3]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6, #0x20]\n"
+ "udot z31.s, z23.b, z7.b[3]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7, #0x20]\n"
+ "udot z24.s, z16.b, z0.b[0]\n"
+ "udot z25.s, z16.b, z1.b[0]\n"
+ "udot z26.s, z16.b, z2.b[0]\n"
+ "udot z27.s, z16.b, z3.b[0]\n"
+ "udot z28.s, z16.b, z4.b[0]\n"
+ "udot z29.s, z16.b, z5.b[0]\n"
+ "udot z30.s, z16.b, z6.b[0]\n"
+ "udot z31.s, z16.b, z7.b[0]\n"
+ "udot z24.s, z17.b, z0.b[1]\n"
+ "udot z25.s, z17.b, z1.b[1]\n"
+ "udot z26.s, z17.b, z2.b[1]\n"
+ "udot z27.s, z17.b, z3.b[1]\n"
+ "udot z28.s, z17.b, z4.b[1]\n"
+ "udot z29.s, z17.b, z5.b[1]\n"
+ "udot z30.s, z17.b, z6.b[1]\n"
+ "udot z31.s, z17.b, z7.b[1]\n"
+ "udot z24.s, z18.b, z0.b[2]\n"
+ "udot z25.s, z18.b, z1.b[2]\n"
+ "udot z26.s, z18.b, z2.b[2]\n"
+ "udot z27.s, z18.b, z3.b[2]\n"
+ "udot z28.s, z18.b, z4.b[2]\n"
+ "udot z29.s, z18.b, z5.b[2]\n"
+ "udot z30.s, z18.b, z6.b[2]\n"
+ "udot z31.s, z18.b, z7.b[2]\n"
+ "b.eq 3f\n"
+ "4:\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "mov z24.s, #0\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "mov z25.s, #0\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "mov z26.s, #0\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "ld1b z21.b, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "mov z27.s, #0\n"
+ "ld1b z22.b, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "ld1b z23.b, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "mov z28.s, #0\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "mov z29.s, #0\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2]\n"
+ "udot z24.s, z16.b, z0.b[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.s, #0\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3]\n"
+ "udot z25.s, z16.b, z1.b[0]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4]\n"
+ "udot z26.s, z16.b, z2.b[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.s, #0\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5]\n"
+ "udot z27.s, z16.b, z3.b[0]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6]\n"
+ "udot z28.s, z16.b, z4.b[0]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7]\n"
+ "udot z24.s, z17.b, z0.b[1]\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "udot z29.s, z16.b, z5.b[0]\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "udot z30.s, z16.b, z6.b[0]\n"
+ "udot z31.s, z16.b, z7.b[0]\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "udot z25.s, z17.b, z1.b[1]\n"
+ "udot z26.s, z17.b, z2.b[1]\n"
+ "udot z27.s, z17.b, z3.b[1]\n"
+ "udot z28.s, z17.b, z4.b[1]\n"
+ "udot z29.s, z17.b, z5.b[1]\n"
+ "udot z30.s, z17.b, z6.b[1]\n"
+ "udot z31.s, z17.b, z7.b[1]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "udot z24.s, z18.b, z0.b[2]\n"
+ "udot z25.s, z18.b, z1.b[2]\n"
+ "udot z26.s, z18.b, z2.b[2]\n"
+ "udot z27.s, z18.b, z3.b[2]\n"
+ "udot z28.s, z18.b, z4.b[2]\n"
+ "udot z29.s, z18.b, z5.b[2]\n"
+ "udot z30.s, z18.b, z6.b[2]\n"
+ "udot z31.s, z18.b, z7.b[2]\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "udot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0], #0x10]\n"
+ "udot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1, #0x10]\n"
+ "udot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2, #0x10]\n"
+ "udot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3, #0x10]\n"
+ "udot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4, #0x10]\n"
+ "udot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5, #0x10]\n"
+ "udot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6, #0x10]\n"
+ "udot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7, #0x10]\n"
+ "udot z24.s, z20.b, z0.b[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #3\n"
+ "udot z25.s, z20.b, z1.b[0]\n"
+ "udot z26.s, z20.b, z2.b[0]\n"
+ "udot z27.s, z20.b, z3.b[0]\n"
+ "udot z28.s, z20.b, z4.b[0]\n"
+ "udot z29.s, z20.b, z5.b[0]\n"
+ "udot z30.s, z20.b, z6.b[0]\n"
+ "udot z31.s, z20.b, z7.b[0]\n"
+ "udot z24.s, z21.b, z0.b[1]\n"
+ "udot z25.s, z21.b, z1.b[1]\n"
+ "udot z26.s, z21.b, z2.b[1]\n"
+ "udot z27.s, z21.b, z3.b[1]\n"
+ "udot z28.s, z21.b, z4.b[1]\n"
+ "udot z29.s, z21.b, z5.b[1]\n"
+ "udot z30.s, z21.b, z6.b[1]\n"
+ "udot z31.s, z21.b, z7.b[1]\n"
+ "udot z24.s, z22.b, z0.b[2]\n"
+ "udot z25.s, z22.b, z1.b[2]\n"
+ "udot z26.s, z22.b, z2.b[2]\n"
+ "udot z27.s, z22.b, z3.b[2]\n"
+ "udot z28.s, z22.b, z4.b[2]\n"
+ "udot z29.s, z22.b, z5.b[2]\n"
+ "udot z30.s, z22.b, z6.b[2]\n"
+ "udot z31.s, z22.b, z7.b[2]\n"
+ "udot z24.s, z23.b, z0.b[3]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x20]\n"
+ "udot z25.s, z23.b, z1.b[3]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1, #0x20]\n"
+ "udot z26.s, z23.b, z2.b[3]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2, #0x20]\n"
+ "udot z27.s, z23.b, z3.b[3]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3, #0x20]\n"
+ "udot z28.s, z23.b, z4.b[3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4, #0x20]\n"
+ "udot z29.s, z23.b, z5.b[3]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5, #0x20]\n"
+ "udot z30.s, z23.b, z6.b[3]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6, #0x20]\n"
+ "udot z31.s, z23.b, z7.b[3]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7, #0x20]\n"
+ "udot z24.s, z16.b, z0.b[0]\n"
+ "udot z25.s, z16.b, z1.b[0]\n"
+ "udot z26.s, z16.b, z2.b[0]\n"
+ "udot z27.s, z16.b, z3.b[0]\n"
+ "udot z28.s, z16.b, z4.b[0]\n"
+ "udot z29.s, z16.b, z5.b[0]\n"
+ "udot z30.s, z16.b, z6.b[0]\n"
+ "udot z31.s, z16.b, z7.b[0]\n"
+ "udot z24.s, z17.b, z0.b[1]\n"
+ "udot z25.s, z17.b, z1.b[1]\n"
+ "udot z26.s, z17.b, z2.b[1]\n"
+ "udot z27.s, z17.b, z3.b[1]\n"
+ "udot z28.s, z17.b, z4.b[1]\n"
+ "udot z29.s, z17.b, z5.b[1]\n"
+ "udot z30.s, z17.b, z6.b[1]\n"
+ "udot z31.s, z17.b, z7.b[1]\n"
+ "udot z24.s, z18.b, z0.b[2]\n"
+ "udot z25.s, z18.b, z1.b[2]\n"
+ "udot z26.s, z18.b, z2.b[2]\n"
+ "udot z27.s, z18.b, z3.b[2]\n"
+ "udot z28.s, z18.b, z4.b[2]\n"
+ "udot z29.s, z18.b, z5.b[2]\n"
+ "udot z30.s, z18.b, z6.b[2]\n"
+ "udot z31.s, z18.b, z7.b[2]\n"
+ "b.ne 4b\n"
+ "3:\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "mov z24.s, #0\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "mov z25.s, #0\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "mov z26.s, #0\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "ld1b z21.b, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "mov z27.s, #0\n"
+ "ld1b z22.b, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "ld1b z23.b, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "mov z28.s, #0\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "mov z29.s, #0\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2]\n"
+ "udot z24.s, z16.b, z0.b[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.s, #0\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3]\n"
+ "udot z25.s, z16.b, z1.b[0]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4]\n"
+ "udot z26.s, z16.b, z2.b[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.s, #0\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5]\n"
+ "udot z27.s, z16.b, z3.b[0]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6]\n"
+ "udot z28.s, z16.b, z4.b[0]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7]\n"
+ "udot z24.s, z17.b, z0.b[1]\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "udot z29.s, z16.b, z5.b[0]\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "udot z30.s, z16.b, z6.b[0]\n"
+ "udot z31.s, z16.b, z7.b[0]\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "udot z25.s, z17.b, z1.b[1]\n"
+ "udot z26.s, z17.b, z2.b[1]\n"
+ "udot z27.s, z17.b, z3.b[1]\n"
+ "udot z28.s, z17.b, z4.b[1]\n"
+ "udot z29.s, z17.b, z5.b[1]\n"
+ "udot z30.s, z17.b, z6.b[1]\n"
+ "udot z31.s, z17.b, z7.b[1]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "udot z24.s, z18.b, z0.b[2]\n"
+ "udot z25.s, z18.b, z1.b[2]\n"
+ "udot z26.s, z18.b, z2.b[2]\n"
+ "udot z27.s, z18.b, z3.b[2]\n"
+ "udot z28.s, z18.b, z4.b[2]\n"
+ "udot z29.s, z18.b, z5.b[2]\n"
+ "udot z30.s, z18.b, z6.b[2]\n"
+ "udot z31.s, z18.b, z7.b[2]\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "udot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0], #0x10]\n"
+ "udot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1, #0x10]\n"
+ "udot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2, #0x10]\n"
+ "udot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3, #0x10]\n"
+ "udot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4, #0x10]\n"
+ "udot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5, #0x10]\n"
+ "udot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6, #0x10]\n"
+ "udot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7, #0x10]\n"
+ "udot z24.s, z20.b, z0.b[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #3\n"
+ "udot z25.s, z20.b, z1.b[0]\n"
+ "udot z26.s, z20.b, z2.b[0]\n"
+ "udot z27.s, z20.b, z3.b[0]\n"
+ "udot z28.s, z20.b, z4.b[0]\n"
+ "udot z29.s, z20.b, z5.b[0]\n"
+ "udot z30.s, z20.b, z6.b[0]\n"
+ "udot z31.s, z20.b, z7.b[0]\n"
+ "udot z24.s, z21.b, z0.b[1]\n"
+ "udot z25.s, z21.b, z1.b[1]\n"
+ "udot z26.s, z21.b, z2.b[1]\n"
+ "udot z27.s, z21.b, z3.b[1]\n"
+ "udot z28.s, z21.b, z4.b[1]\n"
+ "udot z29.s, z21.b, z5.b[1]\n"
+ "udot z30.s, z21.b, z6.b[1]\n"
+ "udot z31.s, z21.b, z7.b[1]\n"
+ "udot z24.s, z22.b, z0.b[2]\n"
+ "udot z25.s, z22.b, z1.b[2]\n"
+ "udot z26.s, z22.b, z2.b[2]\n"
+ "udot z27.s, z22.b, z3.b[2]\n"
+ "udot z28.s, z22.b, z4.b[2]\n"
+ "udot z29.s, z22.b, z5.b[2]\n"
+ "udot z30.s, z22.b, z6.b[2]\n"
+ "udot z31.s, z22.b, z7.b[2]\n"
+ "udot z24.s, z23.b, z0.b[3]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x20]\n"
+ "udot z25.s, z23.b, z1.b[3]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1, #0x20]\n"
+ "udot z26.s, z23.b, z2.b[3]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2, #0x20]\n"
+ "udot z27.s, z23.b, z3.b[3]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3, #0x20]\n"
+ "udot z28.s, z23.b, z4.b[3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4, #0x20]\n"
+ "udot z29.s, z23.b, z5.b[3]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5, #0x20]\n"
+ "udot z30.s, z23.b, z6.b[3]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6, #0x20]\n"
+ "udot z31.s, z23.b, z7.b[3]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7, #0x20]\n"
+ "udot z24.s, z16.b, z0.b[0]\n"
+ "udot z25.s, z16.b, z1.b[0]\n"
+ "udot z26.s, z16.b, z2.b[0]\n"
+ "udot z27.s, z16.b, z3.b[0]\n"
+ "udot z28.s, z16.b, z4.b[0]\n"
+ "udot z29.s, z16.b, z5.b[0]\n"
+ "udot z30.s, z16.b, z6.b[0]\n"
+ "udot z31.s, z16.b, z7.b[0]\n"
+ "udot z24.s, z17.b, z0.b[1]\n"
+ "udot z25.s, z17.b, z1.b[1]\n"
+ "udot z26.s, z17.b, z2.b[1]\n"
+ "udot z27.s, z17.b, z3.b[1]\n"
+ "udot z28.s, z17.b, z4.b[1]\n"
+ "udot z29.s, z17.b, z5.b[1]\n"
+ "udot z30.s, z17.b, z6.b[1]\n"
+ "udot z31.s, z17.b, z7.b[1]\n"
+ "udot z24.s, z18.b, z0.b[2]\n"
+ "udot z25.s, z18.b, z1.b[2]\n"
+ "udot z26.s, z18.b, z2.b[2]\n"
+ "udot z27.s, z18.b, z3.b[2]\n"
+ "udot z28.s, z18.b, z4.b[2]\n"
+ "udot z29.s, z18.b, z5.b[2]\n"
+ "udot z30.s, z18.b, z6.b[2]\n"
+ "udot z31.s, z18.b, z7.b[2]\n"
+ "b 5f\n"
+ "2:\n"
+ "mov z24.s, #0\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
+ "mov z25.s, #0\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1]\n"
+ "mov z26.s, #0\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2]\n"
+ "mov z27.s, #0\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3]\n"
+ "mov z28.s, #0\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4]\n"
+ "mov z29.s, #0\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5]\n"
+ "mov z30.s, #0\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6]\n"
+ "mov z31.s, #0\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7]\n"
+ "udot z24.s, z16.b, z0.b[0]\n"
+ "udot z25.s, z16.b, z1.b[0]\n"
+ "udot z26.s, z16.b, z2.b[0]\n"
+ "udot z27.s, z16.b, z3.b[0]\n"
+ "udot z28.s, z16.b, z4.b[0]\n"
+ "udot z29.s, z16.b, z5.b[0]\n"
+ "udot z30.s, z16.b, z6.b[0]\n"
+ "udot z31.s, z16.b, z7.b[0]\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "udot z24.s, z17.b, z0.b[1]\n"
+ "udot z25.s, z17.b, z1.b[1]\n"
+ "udot z26.s, z17.b, z2.b[1]\n"
+ "udot z27.s, z17.b, z3.b[1]\n"
+ "udot z28.s, z17.b, z4.b[1]\n"
+ "udot z29.s, z17.b, z5.b[1]\n"
+ "udot z30.s, z17.b, z6.b[1]\n"
+ "udot z31.s, z17.b, z7.b[1]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "udot z24.s, z18.b, z0.b[2]\n"
+ "udot z25.s, z18.b, z1.b[2]\n"
+ "udot z26.s, z18.b, z2.b[2]\n"
+ "udot z27.s, z18.b, z3.b[2]\n"
+ "udot z28.s, z18.b, z4.b[2]\n"
+ "udot z29.s, z18.b, z5.b[2]\n"
+ "udot z30.s, z18.b, z6.b[2]\n"
+ "udot z31.s, z18.b, z7.b[2]\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "udot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0], #0x10]\n"
+ "udot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1, #0x10]\n"
+ "udot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2, #0x10]\n"
+ "udot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3, #0x10]\n"
+ "udot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4, #0x10]\n"
+ "udot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5, #0x10]\n"
+ "udot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6, #0x10]\n"
+ "udot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7, #0x10]\n"
+ "udot z24.s, z20.b, z0.b[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #3\n"
+ "udot z25.s, z20.b, z1.b[0]\n"
+ "udot z26.s, z20.b, z2.b[0]\n"
+ "udot z27.s, z20.b, z3.b[0]\n"
+ "udot z28.s, z20.b, z4.b[0]\n"
+ "udot z29.s, z20.b, z5.b[0]\n"
+ "udot z30.s, z20.b, z6.b[0]\n"
+ "udot z31.s, z20.b, z7.b[0]\n"
+ "udot z24.s, z21.b, z0.b[1]\n"
+ "udot z25.s, z21.b, z1.b[1]\n"
+ "udot z26.s, z21.b, z2.b[1]\n"
+ "udot z27.s, z21.b, z3.b[1]\n"
+ "udot z28.s, z21.b, z4.b[1]\n"
+ "udot z29.s, z21.b, z5.b[1]\n"
+ "udot z30.s, z21.b, z6.b[1]\n"
+ "udot z31.s, z21.b, z7.b[1]\n"
+ "udot z24.s, z22.b, z0.b[2]\n"
+ "udot z25.s, z22.b, z1.b[2]\n"
+ "udot z26.s, z22.b, z2.b[2]\n"
+ "udot z27.s, z22.b, z3.b[2]\n"
+ "udot z28.s, z22.b, z4.b[2]\n"
+ "udot z29.s, z22.b, z5.b[2]\n"
+ "udot z30.s, z22.b, z6.b[2]\n"
+ "udot z31.s, z22.b, z7.b[2]\n"
+ "udot z24.s, z23.b, z0.b[3]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x20]\n"
+ "udot z25.s, z23.b, z1.b[3]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1, #0x20]\n"
+ "udot z26.s, z23.b, z2.b[3]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2, #0x20]\n"
+ "udot z27.s, z23.b, z3.b[3]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3, #0x20]\n"
+ "udot z28.s, z23.b, z4.b[3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4, #0x20]\n"
+ "udot z29.s, z23.b, z5.b[3]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5, #0x20]\n"
+ "udot z30.s, z23.b, z6.b[3]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6, #0x20]\n"
+ "udot z31.s, z23.b, z7.b[3]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7, #0x20]\n"
+ "udot z24.s, z16.b, z0.b[0]\n"
+ "udot z25.s, z16.b, z1.b[0]\n"
+ "udot z26.s, z16.b, z2.b[0]\n"
+ "udot z27.s, z16.b, z3.b[0]\n"
+ "udot z28.s, z16.b, z4.b[0]\n"
+ "udot z29.s, z16.b, z5.b[0]\n"
+ "udot z30.s, z16.b, z6.b[0]\n"
+ "udot z31.s, z16.b, z7.b[0]\n"
+ "udot z24.s, z17.b, z0.b[1]\n"
+ "udot z25.s, z17.b, z1.b[1]\n"
+ "udot z26.s, z17.b, z2.b[1]\n"
+ "udot z27.s, z17.b, z3.b[1]\n"
+ "udot z28.s, z17.b, z4.b[1]\n"
+ "udot z29.s, z17.b, z5.b[1]\n"
+ "udot z30.s, z17.b, z6.b[1]\n"
+ "udot z31.s, z17.b, z7.b[1]\n"
+ "udot z24.s, z18.b, z0.b[2]\n"
+ "udot z25.s, z18.b, z1.b[2]\n"
+ "udot z26.s, z18.b, z2.b[2]\n"
+ "udot z27.s, z18.b, z3.b[2]\n"
+ "udot z28.s, z18.b, z4.b[2]\n"
+ "udot z29.s, z18.b, z5.b[2]\n"
+ "udot z30.s, z18.b, z6.b[2]\n"
+ "udot z31.s, z18.b, z7.b[2]\n"
+ "5:\n"
+ "st1w z24.s, p0, [%[c_ptr0]]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "st1w z25.s, p0, [c_ptr1]\n"
+ "st1w z26.s, p0, [c_ptr2]\n"
+ "st1w z27.s, p0, [c_ptr3]\n"
+ "st1w z28.s, p0, [c_ptr4]\n"
+ "st1w z29.s, p0, [c_ptr5]\n"
+ "st1w z30.s, p0, [c_ptr6]\n"
+ "st1w z31.s, p0, [c_ptr7]\n"
+ ".unreq a_ptr1\n"
+ ".unreq a_ptr2\n"
+ ".unreq a_ptr3\n"
+ ".unreq a_ptr4\n"
+ ".unreq a_ptr5\n"
+ ".unreq a_ptr6\n"
+ ".unreq a_ptr7\n"
+ ".unreq c_ptr1\n"
+ ".unreq c_ptr2\n"
+ ".unreq c_ptr3\n"
+ ".unreq c_ptr4\n"
+ ".unreq c_ptr5\n"
+ ".unreq c_ptr6\n"
+ ".unreq c_ptr7\n"
+ : [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [oob_rows] "+r" (oob_rows), [temp] "+r" (temp), [odds] "+r" (odds)
+ : [lda] "r" (ldab), [ldc] "r" (ldcb), [odd_depth] "r" (odd_depth), [last_width] "r" (last_width)
+ : "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "cc", "memory"
+ );
+ break;
+ case 12:
+ __asm __volatile (
+ "a_ptr1 .req X0\n"
+ "a_ptr2 .req X1\n"
+ "a_ptr3 .req X2\n"
+ "a_ptr4 .req X3\n"
+ "a_ptr5 .req X4\n"
+ "a_ptr6 .req X5\n"
+ "a_ptr7 .req X6\n"
+ "c_ptr1 .req X7\n"
+ "c_ptr2 .req X8\n"
+ "c_ptr3 .req X9\n"
+ "c_ptr4 .req X10\n"
+ "c_ptr5 .req X11\n"
+ "c_ptr6 .req X12\n"
+ "c_ptr7 .req X13\n"
+ "add a_ptr1, %[a_ptr0], %[lda]\n"
+ "add c_ptr1, %[c_ptr0], %[ldc]\n"
+ "add a_ptr2, a_ptr1, %[lda]\n"
+ "add c_ptr2, c_ptr1, %[ldc]\n"
+ "add a_ptr3, a_ptr2, %[lda]\n"
+ "add c_ptr3, c_ptr2, %[ldc]\n"
+ "add a_ptr4, a_ptr3, %[lda]\n"
+ "add c_ptr4, c_ptr3, %[ldc]\n"
+ "add a_ptr5, a_ptr4, %[lda]\n"
+ "add c_ptr5, c_ptr4, %[ldc]\n"
+ "add a_ptr6, a_ptr5, %[lda]\n"
+ "add c_ptr6, c_ptr5, %[ldc]\n"
+ "add a_ptr7, a_ptr6, %[lda]\n"
+ "add c_ptr7, c_ptr6, %[ldc]\n"
+ "cbz %[oob_rows], 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr7, %[c_ptr0], #0x0\n"
+ "add a_ptr7, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr6, %[c_ptr0], #0x0\n"
+ "add a_ptr6, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr5, %[c_ptr0], #0x0\n"
+ "add a_ptr5, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr4, %[c_ptr0], #0x0\n"
+ "add a_ptr4, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr3, %[c_ptr0], #0x0\n"
+ "add a_ptr3, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr2, %[c_ptr0], #0x0\n"
+ "add a_ptr2, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr1, %[c_ptr0], #0x0\n"
+ "add a_ptr1, %[a_ptr0], #0x0\n"
+ "1:\n"
+ "ptrue p7.b\n"
+ "whilelt p6.b, %[temp], %[odd_depth]\n"
+ "whilelt p0.s, %[temp], %[last_width]\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "ld1b z21.b, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "ld1b z22.b, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "ld1b z23.b, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "cbz %[loops], 2f\n"
+ "mov z24.s, #0\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
+ "mov z25.s, #0\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1]\n"
+ "mov z26.s, #0\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2]\n"
+ "mov z27.s, #0\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3]\n"
+ "mov z28.s, #0\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4]\n"
+ "mov z29.s, #0\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5]\n"
+ "mov z30.s, #0\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6]\n"
+ "mov z31.s, #0\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7]\n"
+ "udot z24.s, z16.b, z0.b[0]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "udot z25.s, z16.b, z1.b[0]\n"
+ "udot z26.s, z16.b, z2.b[0]\n"
+ "udot z27.s, z16.b, z3.b[0]\n"
+ "udot z28.s, z16.b, z4.b[0]\n"
+ "udot z29.s, z16.b, z5.b[0]\n"
+ "udot z30.s, z16.b, z6.b[0]\n"
+ "udot z31.s, z16.b, z7.b[0]\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "udot z24.s, z17.b, z0.b[1]\n"
+ "udot z25.s, z17.b, z1.b[1]\n"
+ "udot z26.s, z17.b, z2.b[1]\n"
+ "udot z27.s, z17.b, z3.b[1]\n"
+ "udot z28.s, z17.b, z4.b[1]\n"
+ "udot z29.s, z17.b, z5.b[1]\n"
+ "udot z30.s, z17.b, z6.b[1]\n"
+ "udot z31.s, z17.b, z7.b[1]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "udot z24.s, z18.b, z0.b[2]\n"
+ "udot z25.s, z18.b, z1.b[2]\n"
+ "udot z26.s, z18.b, z2.b[2]\n"
+ "udot z27.s, z18.b, z3.b[2]\n"
+ "udot z28.s, z18.b, z4.b[2]\n"
+ "udot z29.s, z18.b, z5.b[2]\n"
+ "udot z30.s, z18.b, z6.b[2]\n"
+ "udot z31.s, z18.b, z7.b[2]\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "udot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0], #0x10]\n"
+ "udot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1, #0x10]\n"
+ "udot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2, #0x10]\n"
+ "udot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3, #0x10]\n"
+ "udot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4, #0x10]\n"
+ "udot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5, #0x10]\n"
+ "udot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6, #0x10]\n"
+ "udot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7, #0x10]\n"
+ "udot z24.s, z20.b, z0.b[0]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "udot z25.s, z20.b, z1.b[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #4\n"
+ "udot z26.s, z20.b, z2.b[0]\n"
+ "udot z27.s, z20.b, z3.b[0]\n"
+ "udot z28.s, z20.b, z4.b[0]\n"
+ "udot z29.s, z20.b, z5.b[0]\n"
+ "udot z30.s, z20.b, z6.b[0]\n"
+ "udot z31.s, z20.b, z7.b[0]\n"
+ "udot z24.s, z21.b, z0.b[1]\n"
+ "udot z25.s, z21.b, z1.b[1]\n"
+ "udot z26.s, z21.b, z2.b[1]\n"
+ "udot z27.s, z21.b, z3.b[1]\n"
+ "udot z28.s, z21.b, z4.b[1]\n"
+ "udot z29.s, z21.b, z5.b[1]\n"
+ "udot z30.s, z21.b, z6.b[1]\n"
+ "udot z31.s, z21.b, z7.b[1]\n"
+ "udot z24.s, z22.b, z0.b[2]\n"
+ "udot z25.s, z22.b, z1.b[2]\n"
+ "udot z26.s, z22.b, z2.b[2]\n"
+ "udot z27.s, z22.b, z3.b[2]\n"
+ "udot z28.s, z22.b, z4.b[2]\n"
+ "udot z29.s, z22.b, z5.b[2]\n"
+ "udot z30.s, z22.b, z6.b[2]\n"
+ "udot z31.s, z22.b, z7.b[2]\n"
+ "udot z24.s, z23.b, z0.b[3]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x20]\n"
+ "udot z25.s, z23.b, z1.b[3]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1, #0x20]\n"
+ "udot z26.s, z23.b, z2.b[3]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2, #0x20]\n"
+ "udot z27.s, z23.b, z3.b[3]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3, #0x20]\n"
+ "udot z28.s, z23.b, z4.b[3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4, #0x20]\n"
+ "udot z29.s, z23.b, z5.b[3]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5, #0x20]\n"
+ "udot z30.s, z23.b, z6.b[3]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6, #0x20]\n"
+ "udot z31.s, z23.b, z7.b[3]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7, #0x20]\n"
+ "udot z24.s, z16.b, z0.b[0]\n"
+ "udot z25.s, z16.b, z1.b[0]\n"
+ "udot z26.s, z16.b, z2.b[0]\n"
+ "udot z27.s, z16.b, z3.b[0]\n"
+ "udot z28.s, z16.b, z4.b[0]\n"
+ "udot z29.s, z16.b, z5.b[0]\n"
+ "udot z30.s, z16.b, z6.b[0]\n"
+ "udot z31.s, z16.b, z7.b[0]\n"
+ "udot z24.s, z17.b, z0.b[1]\n"
+ "udot z25.s, z17.b, z1.b[1]\n"
+ "udot z26.s, z17.b, z2.b[1]\n"
+ "udot z27.s, z17.b, z3.b[1]\n"
+ "udot z28.s, z17.b, z4.b[1]\n"
+ "udot z29.s, z17.b, z5.b[1]\n"
+ "udot z30.s, z17.b, z6.b[1]\n"
+ "udot z31.s, z17.b, z7.b[1]\n"
+ "udot z24.s, z18.b, z0.b[2]\n"
+ "udot z25.s, z18.b, z1.b[2]\n"
+ "udot z26.s, z18.b, z2.b[2]\n"
+ "udot z27.s, z18.b, z3.b[2]\n"
+ "udot z28.s, z18.b, z4.b[2]\n"
+ "udot z29.s, z18.b, z5.b[2]\n"
+ "udot z30.s, z18.b, z6.b[2]\n"
+ "udot z31.s, z18.b, z7.b[2]\n"
+ "udot z24.s, z19.b, z0.b[3]\n"
+ "udot z25.s, z19.b, z1.b[3]\n"
+ "udot z26.s, z19.b, z2.b[3]\n"
+ "udot z27.s, z19.b, z3.b[3]\n"
+ "udot z28.s, z19.b, z4.b[3]\n"
+ "udot z29.s, z19.b, z5.b[3]\n"
+ "udot z30.s, z19.b, z6.b[3]\n"
+ "udot z31.s, z19.b, z7.b[3]\n"
+ "b.eq 3f\n"
+ "4:\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "mov z24.s, #0\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "mov z25.s, #0\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "mov z26.s, #0\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "ld1b z21.b, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "mov z27.s, #0\n"
+ "ld1b z22.b, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "ld1b z23.b, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "mov z28.s, #0\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "mov z29.s, #0\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2]\n"
+ "udot z24.s, z16.b, z0.b[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.s, #0\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3]\n"
+ "udot z25.s, z16.b, z1.b[0]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4]\n"
+ "udot z26.s, z16.b, z2.b[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.s, #0\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5]\n"
+ "udot z27.s, z16.b, z3.b[0]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6]\n"
+ "udot z28.s, z16.b, z4.b[0]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7]\n"
+ "udot z24.s, z17.b, z0.b[1]\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "udot z29.s, z16.b, z5.b[0]\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "udot z30.s, z16.b, z6.b[0]\n"
+ "udot z31.s, z16.b, z7.b[0]\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "udot z25.s, z17.b, z1.b[1]\n"
+ "udot z26.s, z17.b, z2.b[1]\n"
+ "udot z27.s, z17.b, z3.b[1]\n"
+ "udot z28.s, z17.b, z4.b[1]\n"
+ "udot z29.s, z17.b, z5.b[1]\n"
+ "udot z30.s, z17.b, z6.b[1]\n"
+ "udot z31.s, z17.b, z7.b[1]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "udot z24.s, z18.b, z0.b[2]\n"
+ "udot z25.s, z18.b, z1.b[2]\n"
+ "udot z26.s, z18.b, z2.b[2]\n"
+ "udot z27.s, z18.b, z3.b[2]\n"
+ "udot z28.s, z18.b, z4.b[2]\n"
+ "udot z29.s, z18.b, z5.b[2]\n"
+ "udot z30.s, z18.b, z6.b[2]\n"
+ "udot z31.s, z18.b, z7.b[2]\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "udot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0], #0x10]\n"
+ "udot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1, #0x10]\n"
+ "udot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2, #0x10]\n"
+ "udot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3, #0x10]\n"
+ "udot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4, #0x10]\n"
+ "udot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5, #0x10]\n"
+ "udot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6, #0x10]\n"
+ "udot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7, #0x10]\n"
+ "udot z24.s, z20.b, z0.b[0]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "udot z25.s, z20.b, z1.b[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #4\n"
+ "udot z26.s, z20.b, z2.b[0]\n"
+ "udot z27.s, z20.b, z3.b[0]\n"
+ "udot z28.s, z20.b, z4.b[0]\n"
+ "udot z29.s, z20.b, z5.b[0]\n"
+ "udot z30.s, z20.b, z6.b[0]\n"
+ "udot z31.s, z20.b, z7.b[0]\n"
+ "udot z24.s, z21.b, z0.b[1]\n"
+ "udot z25.s, z21.b, z1.b[1]\n"
+ "udot z26.s, z21.b, z2.b[1]\n"
+ "udot z27.s, z21.b, z3.b[1]\n"
+ "udot z28.s, z21.b, z4.b[1]\n"
+ "udot z29.s, z21.b, z5.b[1]\n"
+ "udot z30.s, z21.b, z6.b[1]\n"
+ "udot z31.s, z21.b, z7.b[1]\n"
+ "udot z24.s, z22.b, z0.b[2]\n"
+ "udot z25.s, z22.b, z1.b[2]\n"
+ "udot z26.s, z22.b, z2.b[2]\n"
+ "udot z27.s, z22.b, z3.b[2]\n"
+ "udot z28.s, z22.b, z4.b[2]\n"
+ "udot z29.s, z22.b, z5.b[2]\n"
+ "udot z30.s, z22.b, z6.b[2]\n"
+ "udot z31.s, z22.b, z7.b[2]\n"
+ "udot z24.s, z23.b, z0.b[3]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x20]\n"
+ "udot z25.s, z23.b, z1.b[3]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1, #0x20]\n"
+ "udot z26.s, z23.b, z2.b[3]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2, #0x20]\n"
+ "udot z27.s, z23.b, z3.b[3]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3, #0x20]\n"
+ "udot z28.s, z23.b, z4.b[3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4, #0x20]\n"
+ "udot z29.s, z23.b, z5.b[3]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5, #0x20]\n"
+ "udot z30.s, z23.b, z6.b[3]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6, #0x20]\n"
+ "udot z31.s, z23.b, z7.b[3]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7, #0x20]\n"
+ "udot z24.s, z16.b, z0.b[0]\n"
+ "udot z25.s, z16.b, z1.b[0]\n"
+ "udot z26.s, z16.b, z2.b[0]\n"
+ "udot z27.s, z16.b, z3.b[0]\n"
+ "udot z28.s, z16.b, z4.b[0]\n"
+ "udot z29.s, z16.b, z5.b[0]\n"
+ "udot z30.s, z16.b, z6.b[0]\n"
+ "udot z31.s, z16.b, z7.b[0]\n"
+ "udot z24.s, z17.b, z0.b[1]\n"
+ "udot z25.s, z17.b, z1.b[1]\n"
+ "udot z26.s, z17.b, z2.b[1]\n"
+ "udot z27.s, z17.b, z3.b[1]\n"
+ "udot z28.s, z17.b, z4.b[1]\n"
+ "udot z29.s, z17.b, z5.b[1]\n"
+ "udot z30.s, z17.b, z6.b[1]\n"
+ "udot z31.s, z17.b, z7.b[1]\n"
+ "udot z24.s, z18.b, z0.b[2]\n"
+ "udot z25.s, z18.b, z1.b[2]\n"
+ "udot z26.s, z18.b, z2.b[2]\n"
+ "udot z27.s, z18.b, z3.b[2]\n"
+ "udot z28.s, z18.b, z4.b[2]\n"
+ "udot z29.s, z18.b, z5.b[2]\n"
+ "udot z30.s, z18.b, z6.b[2]\n"
+ "udot z31.s, z18.b, z7.b[2]\n"
+ "udot z24.s, z19.b, z0.b[3]\n"
+ "udot z25.s, z19.b, z1.b[3]\n"
+ "udot z26.s, z19.b, z2.b[3]\n"
+ "udot z27.s, z19.b, z3.b[3]\n"
+ "udot z28.s, z19.b, z4.b[3]\n"
+ "udot z29.s, z19.b, z5.b[3]\n"
+ "udot z30.s, z19.b, z6.b[3]\n"
+ "udot z31.s, z19.b, z7.b[3]\n"
+ "b.ne 4b\n"
+ "3:\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "mov z24.s, #0\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "mov z25.s, #0\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "mov z26.s, #0\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "ld1b z21.b, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "mov z27.s, #0\n"
+ "ld1b z22.b, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "ld1b z23.b, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "mov z28.s, #0\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "mov z29.s, #0\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2]\n"
+ "udot z24.s, z16.b, z0.b[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.s, #0\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3]\n"
+ "udot z25.s, z16.b, z1.b[0]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4]\n"
+ "udot z26.s, z16.b, z2.b[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.s, #0\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5]\n"
+ "udot z27.s, z16.b, z3.b[0]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6]\n"
+ "udot z28.s, z16.b, z4.b[0]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7]\n"
+ "udot z24.s, z17.b, z0.b[1]\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "udot z29.s, z16.b, z5.b[0]\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "udot z30.s, z16.b, z6.b[0]\n"
+ "udot z31.s, z16.b, z7.b[0]\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "udot z25.s, z17.b, z1.b[1]\n"
+ "udot z26.s, z17.b, z2.b[1]\n"
+ "udot z27.s, z17.b, z3.b[1]\n"
+ "udot z28.s, z17.b, z4.b[1]\n"
+ "udot z29.s, z17.b, z5.b[1]\n"
+ "udot z30.s, z17.b, z6.b[1]\n"
+ "udot z31.s, z17.b, z7.b[1]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "udot z24.s, z18.b, z0.b[2]\n"
+ "udot z25.s, z18.b, z1.b[2]\n"
+ "udot z26.s, z18.b, z2.b[2]\n"
+ "udot z27.s, z18.b, z3.b[2]\n"
+ "udot z28.s, z18.b, z4.b[2]\n"
+ "udot z29.s, z18.b, z5.b[2]\n"
+ "udot z30.s, z18.b, z6.b[2]\n"
+ "udot z31.s, z18.b, z7.b[2]\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "udot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0], #0x10]\n"
+ "udot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1, #0x10]\n"
+ "udot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2, #0x10]\n"
+ "udot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3, #0x10]\n"
+ "udot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4, #0x10]\n"
+ "udot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5, #0x10]\n"
+ "udot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6, #0x10]\n"
+ "udot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7, #0x10]\n"
+ "udot z24.s, z20.b, z0.b[0]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "udot z25.s, z20.b, z1.b[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #4\n"
+ "udot z26.s, z20.b, z2.b[0]\n"
+ "udot z27.s, z20.b, z3.b[0]\n"
+ "udot z28.s, z20.b, z4.b[0]\n"
+ "udot z29.s, z20.b, z5.b[0]\n"
+ "udot z30.s, z20.b, z6.b[0]\n"
+ "udot z31.s, z20.b, z7.b[0]\n"
+ "udot z24.s, z21.b, z0.b[1]\n"
+ "udot z25.s, z21.b, z1.b[1]\n"
+ "udot z26.s, z21.b, z2.b[1]\n"
+ "udot z27.s, z21.b, z3.b[1]\n"
+ "udot z28.s, z21.b, z4.b[1]\n"
+ "udot z29.s, z21.b, z5.b[1]\n"
+ "udot z30.s, z21.b, z6.b[1]\n"
+ "udot z31.s, z21.b, z7.b[1]\n"
+ "udot z24.s, z22.b, z0.b[2]\n"
+ "udot z25.s, z22.b, z1.b[2]\n"
+ "udot z26.s, z22.b, z2.b[2]\n"
+ "udot z27.s, z22.b, z3.b[2]\n"
+ "udot z28.s, z22.b, z4.b[2]\n"
+ "udot z29.s, z22.b, z5.b[2]\n"
+ "udot z30.s, z22.b, z6.b[2]\n"
+ "udot z31.s, z22.b, z7.b[2]\n"
+ "udot z24.s, z23.b, z0.b[3]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x20]\n"
+ "udot z25.s, z23.b, z1.b[3]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1, #0x20]\n"
+ "udot z26.s, z23.b, z2.b[3]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2, #0x20]\n"
+ "udot z27.s, z23.b, z3.b[3]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3, #0x20]\n"
+ "udot z28.s, z23.b, z4.b[3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4, #0x20]\n"
+ "udot z29.s, z23.b, z5.b[3]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5, #0x20]\n"
+ "udot z30.s, z23.b, z6.b[3]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6, #0x20]\n"
+ "udot z31.s, z23.b, z7.b[3]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7, #0x20]\n"
+ "udot z24.s, z16.b, z0.b[0]\n"
+ "udot z25.s, z16.b, z1.b[0]\n"
+ "udot z26.s, z16.b, z2.b[0]\n"
+ "udot z27.s, z16.b, z3.b[0]\n"
+ "udot z28.s, z16.b, z4.b[0]\n"
+ "udot z29.s, z16.b, z5.b[0]\n"
+ "udot z30.s, z16.b, z6.b[0]\n"
+ "udot z31.s, z16.b, z7.b[0]\n"
+ "udot z24.s, z17.b, z0.b[1]\n"
+ "udot z25.s, z17.b, z1.b[1]\n"
+ "udot z26.s, z17.b, z2.b[1]\n"
+ "udot z27.s, z17.b, z3.b[1]\n"
+ "udot z28.s, z17.b, z4.b[1]\n"
+ "udot z29.s, z17.b, z5.b[1]\n"
+ "udot z30.s, z17.b, z6.b[1]\n"
+ "udot z31.s, z17.b, z7.b[1]\n"
+ "udot z24.s, z18.b, z0.b[2]\n"
+ "udot z25.s, z18.b, z1.b[2]\n"
+ "udot z26.s, z18.b, z2.b[2]\n"
+ "udot z27.s, z18.b, z3.b[2]\n"
+ "udot z28.s, z18.b, z4.b[2]\n"
+ "udot z29.s, z18.b, z5.b[2]\n"
+ "udot z30.s, z18.b, z6.b[2]\n"
+ "udot z31.s, z18.b, z7.b[2]\n"
+ "udot z24.s, z19.b, z0.b[3]\n"
+ "udot z25.s, z19.b, z1.b[3]\n"
+ "udot z26.s, z19.b, z2.b[3]\n"
+ "udot z27.s, z19.b, z3.b[3]\n"
+ "udot z28.s, z19.b, z4.b[3]\n"
+ "udot z29.s, z19.b, z5.b[3]\n"
+ "udot z30.s, z19.b, z6.b[3]\n"
+ "udot z31.s, z19.b, z7.b[3]\n"
+ "b 5f\n"
+ "2:\n"
+ "mov z24.s, #0\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
+ "mov z25.s, #0\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1]\n"
+ "mov z26.s, #0\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2]\n"
+ "mov z27.s, #0\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3]\n"
+ "mov z28.s, #0\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4]\n"
+ "mov z29.s, #0\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5]\n"
+ "mov z30.s, #0\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6]\n"
+ "mov z31.s, #0\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7]\n"
+ "udot z24.s, z16.b, z0.b[0]\n"
+ "udot z25.s, z16.b, z1.b[0]\n"
+ "udot z26.s, z16.b, z2.b[0]\n"
+ "udot z27.s, z16.b, z3.b[0]\n"
+ "udot z28.s, z16.b, z4.b[0]\n"
+ "udot z29.s, z16.b, z5.b[0]\n"
+ "udot z30.s, z16.b, z6.b[0]\n"
+ "udot z31.s, z16.b, z7.b[0]\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "udot z24.s, z17.b, z0.b[1]\n"
+ "udot z25.s, z17.b, z1.b[1]\n"
+ "udot z26.s, z17.b, z2.b[1]\n"
+ "udot z27.s, z17.b, z3.b[1]\n"
+ "udot z28.s, z17.b, z4.b[1]\n"
+ "udot z29.s, z17.b, z5.b[1]\n"
+ "udot z30.s, z17.b, z6.b[1]\n"
+ "udot z31.s, z17.b, z7.b[1]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "udot z24.s, z18.b, z0.b[2]\n"
+ "udot z25.s, z18.b, z1.b[2]\n"
+ "udot z26.s, z18.b, z2.b[2]\n"
+ "udot z27.s, z18.b, z3.b[2]\n"
+ "udot z28.s, z18.b, z4.b[2]\n"
+ "udot z29.s, z18.b, z5.b[2]\n"
+ "udot z30.s, z18.b, z6.b[2]\n"
+ "udot z31.s, z18.b, z7.b[2]\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "udot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0], #0x10]\n"
+ "udot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1, #0x10]\n"
+ "udot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2, #0x10]\n"
+ "udot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3, #0x10]\n"
+ "udot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4, #0x10]\n"
+ "udot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5, #0x10]\n"
+ "udot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6, #0x10]\n"
+ "udot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7, #0x10]\n"
+ "udot z24.s, z20.b, z0.b[0]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "udot z25.s, z20.b, z1.b[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #4\n"
+ "udot z26.s, z20.b, z2.b[0]\n"
+ "udot z27.s, z20.b, z3.b[0]\n"
+ "udot z28.s, z20.b, z4.b[0]\n"
+ "udot z29.s, z20.b, z5.b[0]\n"
+ "udot z30.s, z20.b, z6.b[0]\n"
+ "udot z31.s, z20.b, z7.b[0]\n"
+ "udot z24.s, z21.b, z0.b[1]\n"
+ "udot z25.s, z21.b, z1.b[1]\n"
+ "udot z26.s, z21.b, z2.b[1]\n"
+ "udot z27.s, z21.b, z3.b[1]\n"
+ "udot z28.s, z21.b, z4.b[1]\n"
+ "udot z29.s, z21.b, z5.b[1]\n"
+ "udot z30.s, z21.b, z6.b[1]\n"
+ "udot z31.s, z21.b, z7.b[1]\n"
+ "udot z24.s, z22.b, z0.b[2]\n"
+ "udot z25.s, z22.b, z1.b[2]\n"
+ "udot z26.s, z22.b, z2.b[2]\n"
+ "udot z27.s, z22.b, z3.b[2]\n"
+ "udot z28.s, z22.b, z4.b[2]\n"
+ "udot z29.s, z22.b, z5.b[2]\n"
+ "udot z30.s, z22.b, z6.b[2]\n"
+ "udot z31.s, z22.b, z7.b[2]\n"
+ "udot z24.s, z23.b, z0.b[3]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x20]\n"
+ "udot z25.s, z23.b, z1.b[3]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1, #0x20]\n"
+ "udot z26.s, z23.b, z2.b[3]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2, #0x20]\n"
+ "udot z27.s, z23.b, z3.b[3]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3, #0x20]\n"
+ "udot z28.s, z23.b, z4.b[3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4, #0x20]\n"
+ "udot z29.s, z23.b, z5.b[3]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5, #0x20]\n"
+ "udot z30.s, z23.b, z6.b[3]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6, #0x20]\n"
+ "udot z31.s, z23.b, z7.b[3]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7, #0x20]\n"
+ "udot z24.s, z16.b, z0.b[0]\n"
+ "udot z25.s, z16.b, z1.b[0]\n"
+ "udot z26.s, z16.b, z2.b[0]\n"
+ "udot z27.s, z16.b, z3.b[0]\n"
+ "udot z28.s, z16.b, z4.b[0]\n"
+ "udot z29.s, z16.b, z5.b[0]\n"
+ "udot z30.s, z16.b, z6.b[0]\n"
+ "udot z31.s, z16.b, z7.b[0]\n"
+ "udot z24.s, z17.b, z0.b[1]\n"
+ "udot z25.s, z17.b, z1.b[1]\n"
+ "udot z26.s, z17.b, z2.b[1]\n"
+ "udot z27.s, z17.b, z3.b[1]\n"
+ "udot z28.s, z17.b, z4.b[1]\n"
+ "udot z29.s, z17.b, z5.b[1]\n"
+ "udot z30.s, z17.b, z6.b[1]\n"
+ "udot z31.s, z17.b, z7.b[1]\n"
+ "udot z24.s, z18.b, z0.b[2]\n"
+ "udot z25.s, z18.b, z1.b[2]\n"
+ "udot z26.s, z18.b, z2.b[2]\n"
+ "udot z27.s, z18.b, z3.b[2]\n"
+ "udot z28.s, z18.b, z4.b[2]\n"
+ "udot z29.s, z18.b, z5.b[2]\n"
+ "udot z30.s, z18.b, z6.b[2]\n"
+ "udot z31.s, z18.b, z7.b[2]\n"
+ "udot z24.s, z19.b, z0.b[3]\n"
+ "udot z25.s, z19.b, z1.b[3]\n"
+ "udot z26.s, z19.b, z2.b[3]\n"
+ "udot z27.s, z19.b, z3.b[3]\n"
+ "udot z28.s, z19.b, z4.b[3]\n"
+ "udot z29.s, z19.b, z5.b[3]\n"
+ "udot z30.s, z19.b, z6.b[3]\n"
+ "udot z31.s, z19.b, z7.b[3]\n"
+ "5:\n"
+ "st1w z24.s, p0, [%[c_ptr0]]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "st1w z25.s, p0, [c_ptr1]\n"
+ "st1w z26.s, p0, [c_ptr2]\n"
+ "st1w z27.s, p0, [c_ptr3]\n"
+ "st1w z28.s, p0, [c_ptr4]\n"
+ "st1w z29.s, p0, [c_ptr5]\n"
+ "st1w z30.s, p0, [c_ptr6]\n"
+ "st1w z31.s, p0, [c_ptr7]\n"
+ ".unreq a_ptr1\n"
+ ".unreq a_ptr2\n"
+ ".unreq a_ptr3\n"
+ ".unreq a_ptr4\n"
+ ".unreq a_ptr5\n"
+ ".unreq a_ptr6\n"
+ ".unreq a_ptr7\n"
+ ".unreq c_ptr1\n"
+ ".unreq c_ptr2\n"
+ ".unreq c_ptr3\n"
+ ".unreq c_ptr4\n"
+ ".unreq c_ptr5\n"
+ ".unreq c_ptr6\n"
+ ".unreq c_ptr7\n"
+ : [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [oob_rows] "+r" (oob_rows), [temp] "+r" (temp), [odds] "+r" (odds)
+ : [lda] "r" (ldab), [ldc] "r" (ldcb), [odd_depth] "r" (odd_depth), [last_width] "r" (last_width)
+ : "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "cc", "memory"
+ );
+ break;
+ case 13:
+ __asm __volatile (
+ "a_ptr1 .req X0\n"
+ "a_ptr2 .req X1\n"
+ "a_ptr3 .req X2\n"
+ "a_ptr4 .req X3\n"
+ "a_ptr5 .req X4\n"
+ "a_ptr6 .req X5\n"
+ "a_ptr7 .req X6\n"
+ "c_ptr1 .req X7\n"
+ "c_ptr2 .req X8\n"
+ "c_ptr3 .req X9\n"
+ "c_ptr4 .req X10\n"
+ "c_ptr5 .req X11\n"
+ "c_ptr6 .req X12\n"
+ "c_ptr7 .req X13\n"
+ "add a_ptr1, %[a_ptr0], %[lda]\n"
+ "add c_ptr1, %[c_ptr0], %[ldc]\n"
+ "add a_ptr2, a_ptr1, %[lda]\n"
+ "add c_ptr2, c_ptr1, %[ldc]\n"
+ "add a_ptr3, a_ptr2, %[lda]\n"
+ "add c_ptr3, c_ptr2, %[ldc]\n"
+ "add a_ptr4, a_ptr3, %[lda]\n"
+ "add c_ptr4, c_ptr3, %[ldc]\n"
+ "add a_ptr5, a_ptr4, %[lda]\n"
+ "add c_ptr5, c_ptr4, %[ldc]\n"
+ "add a_ptr6, a_ptr5, %[lda]\n"
+ "add c_ptr6, c_ptr5, %[ldc]\n"
+ "add a_ptr7, a_ptr6, %[lda]\n"
+ "add c_ptr7, c_ptr6, %[ldc]\n"
+ "cbz %[oob_rows], 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr7, %[c_ptr0], #0x0\n"
+ "add a_ptr7, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr6, %[c_ptr0], #0x0\n"
+ "add a_ptr6, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr5, %[c_ptr0], #0x0\n"
+ "add a_ptr5, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr4, %[c_ptr0], #0x0\n"
+ "add a_ptr4, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr3, %[c_ptr0], #0x0\n"
+ "add a_ptr3, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr2, %[c_ptr0], #0x0\n"
+ "add a_ptr2, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr1, %[c_ptr0], #0x0\n"
+ "add a_ptr1, %[a_ptr0], #0x0\n"
+ "1:\n"
+ "ptrue p7.b\n"
+ "whilelt p6.b, %[temp], %[odd_depth]\n"
+ "whilelt p0.s, %[temp], %[last_width]\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "ld1b z21.b, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "ld1b z22.b, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "ld1b z23.b, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "cbz %[loops], 2f\n"
+ "mov z24.s, #0\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
+ "mov z25.s, #0\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1]\n"
+ "mov z26.s, #0\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2]\n"
+ "mov z27.s, #0\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3]\n"
+ "mov z28.s, #0\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4]\n"
+ "mov z29.s, #0\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5]\n"
+ "mov z30.s, #0\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6]\n"
+ "mov z31.s, #0\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7]\n"
+ "udot z24.s, z16.b, z0.b[0]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "udot z25.s, z16.b, z1.b[0]\n"
+ "udot z26.s, z16.b, z2.b[0]\n"
+ "udot z27.s, z16.b, z3.b[0]\n"
+ "udot z28.s, z16.b, z4.b[0]\n"
+ "udot z29.s, z16.b, z5.b[0]\n"
+ "udot z30.s, z16.b, z6.b[0]\n"
+ "udot z31.s, z16.b, z7.b[0]\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "udot z24.s, z17.b, z0.b[1]\n"
+ "udot z25.s, z17.b, z1.b[1]\n"
+ "udot z26.s, z17.b, z2.b[1]\n"
+ "udot z27.s, z17.b, z3.b[1]\n"
+ "udot z28.s, z17.b, z4.b[1]\n"
+ "udot z29.s, z17.b, z5.b[1]\n"
+ "udot z30.s, z17.b, z6.b[1]\n"
+ "udot z31.s, z17.b, z7.b[1]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "udot z24.s, z18.b, z0.b[2]\n"
+ "udot z25.s, z18.b, z1.b[2]\n"
+ "udot z26.s, z18.b, z2.b[2]\n"
+ "udot z27.s, z18.b, z3.b[2]\n"
+ "udot z28.s, z18.b, z4.b[2]\n"
+ "udot z29.s, z18.b, z5.b[2]\n"
+ "udot z30.s, z18.b, z6.b[2]\n"
+ "udot z31.s, z18.b, z7.b[2]\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "udot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0], #0x10]\n"
+ "udot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1, #0x10]\n"
+ "udot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2, #0x10]\n"
+ "udot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3, #0x10]\n"
+ "udot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4, #0x10]\n"
+ "udot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5, #0x10]\n"
+ "udot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6, #0x10]\n"
+ "udot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7, #0x10]\n"
+ "udot z24.s, z20.b, z0.b[0]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "udot z25.s, z20.b, z1.b[0]\n"
+ "udot z26.s, z20.b, z2.b[0]\n"
+ "udot z27.s, z20.b, z3.b[0]\n"
+ "udot z28.s, z20.b, z4.b[0]\n"
+ "udot z29.s, z20.b, z5.b[0]\n"
+ "udot z30.s, z20.b, z6.b[0]\n"
+ "udot z31.s, z20.b, z7.b[0]\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "udot z24.s, z21.b, z0.b[1]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #5\n"
+ "udot z25.s, z21.b, z1.b[1]\n"
+ "udot z26.s, z21.b, z2.b[1]\n"
+ "udot z27.s, z21.b, z3.b[1]\n"
+ "udot z28.s, z21.b, z4.b[1]\n"
+ "udot z29.s, z21.b, z5.b[1]\n"
+ "udot z30.s, z21.b, z6.b[1]\n"
+ "udot z31.s, z21.b, z7.b[1]\n"
+ "udot z24.s, z22.b, z0.b[2]\n"
+ "udot z25.s, z22.b, z1.b[2]\n"
+ "udot z26.s, z22.b, z2.b[2]\n"
+ "udot z27.s, z22.b, z3.b[2]\n"
+ "udot z28.s, z22.b, z4.b[2]\n"
+ "udot z29.s, z22.b, z5.b[2]\n"
+ "udot z30.s, z22.b, z6.b[2]\n"
+ "udot z31.s, z22.b, z7.b[2]\n"
+ "udot z24.s, z23.b, z0.b[3]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0], #0x20]\n"
+ "udot z25.s, z23.b, z1.b[3]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1, #0x20]\n"
+ "udot z26.s, z23.b, z2.b[3]\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2, #0x20]\n"
+ "udot z27.s, z23.b, z3.b[3]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3, #0x20]\n"
+ "udot z28.s, z23.b, z4.b[3]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4, #0x20]\n"
+ "udot z29.s, z23.b, z5.b[3]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5, #0x20]\n"
+ "udot z30.s, z23.b, z6.b[3]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6, #0x20]\n"
+ "udot z31.s, z23.b, z7.b[3]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7, #0x20]\n"
+ "udot z24.s, z16.b, z0.b[0]\n"
+ "udot z25.s, z16.b, z1.b[0]\n"
+ "udot z26.s, z16.b, z2.b[0]\n"
+ "udot z27.s, z16.b, z3.b[0]\n"
+ "udot z28.s, z16.b, z4.b[0]\n"
+ "udot z29.s, z16.b, z5.b[0]\n"
+ "udot z30.s, z16.b, z6.b[0]\n"
+ "udot z31.s, z16.b, z7.b[0]\n"
+ "udot z24.s, z17.b, z0.b[1]\n"
+ "udot z25.s, z17.b, z1.b[1]\n"
+ "udot z26.s, z17.b, z2.b[1]\n"
+ "udot z27.s, z17.b, z3.b[1]\n"
+ "udot z28.s, z17.b, z4.b[1]\n"
+ "udot z29.s, z17.b, z5.b[1]\n"
+ "udot z30.s, z17.b, z6.b[1]\n"
+ "udot z31.s, z17.b, z7.b[1]\n"
+ "udot z24.s, z18.b, z0.b[2]\n"
+ "udot z25.s, z18.b, z1.b[2]\n"
+ "udot z26.s, z18.b, z2.b[2]\n"
+ "udot z27.s, z18.b, z3.b[2]\n"
+ "udot z28.s, z18.b, z4.b[2]\n"
+ "udot z29.s, z18.b, z5.b[2]\n"
+ "udot z30.s, z18.b, z6.b[2]\n"
+ "udot z31.s, z18.b, z7.b[2]\n"
+ "udot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x30]\n"
+ "udot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1, #0x30]\n"
+ "udot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2, #0x30]\n"
+ "udot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3, #0x30]\n"
+ "udot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4, #0x30]\n"
+ "udot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5, #0x30]\n"
+ "udot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6, #0x30]\n"
+ "udot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7, #0x30]\n"
+ "udot z24.s, z20.b, z0.b[0]\n"
+ "udot z25.s, z20.b, z1.b[0]\n"
+ "udot z26.s, z20.b, z2.b[0]\n"
+ "udot z27.s, z20.b, z3.b[0]\n"
+ "udot z28.s, z20.b, z4.b[0]\n"
+ "udot z29.s, z20.b, z5.b[0]\n"
+ "udot z30.s, z20.b, z6.b[0]\n"
+ "udot z31.s, z20.b, z7.b[0]\n"
+ "b.eq 3f\n"
+ "4:\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "mov z24.s, #0\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "mov z25.s, #0\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "mov z26.s, #0\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "ld1b z21.b, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "mov z27.s, #0\n"
+ "ld1b z22.b, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "ld1b z23.b, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "mov z28.s, #0\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "mov z29.s, #0\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2]\n"
+ "udot z24.s, z16.b, z0.b[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.s, #0\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3]\n"
+ "udot z25.s, z16.b, z1.b[0]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4]\n"
+ "udot z26.s, z16.b, z2.b[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.s, #0\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5]\n"
+ "udot z27.s, z16.b, z3.b[0]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6]\n"
+ "udot z28.s, z16.b, z4.b[0]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7]\n"
+ "udot z24.s, z17.b, z0.b[1]\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "udot z29.s, z16.b, z5.b[0]\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "udot z30.s, z16.b, z6.b[0]\n"
+ "udot z31.s, z16.b, z7.b[0]\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "udot z25.s, z17.b, z1.b[1]\n"
+ "udot z26.s, z17.b, z2.b[1]\n"
+ "udot z27.s, z17.b, z3.b[1]\n"
+ "udot z28.s, z17.b, z4.b[1]\n"
+ "udot z29.s, z17.b, z5.b[1]\n"
+ "udot z30.s, z17.b, z6.b[1]\n"
+ "udot z31.s, z17.b, z7.b[1]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "udot z24.s, z18.b, z0.b[2]\n"
+ "udot z25.s, z18.b, z1.b[2]\n"
+ "udot z26.s, z18.b, z2.b[2]\n"
+ "udot z27.s, z18.b, z3.b[2]\n"
+ "udot z28.s, z18.b, z4.b[2]\n"
+ "udot z29.s, z18.b, z5.b[2]\n"
+ "udot z30.s, z18.b, z6.b[2]\n"
+ "udot z31.s, z18.b, z7.b[2]\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "udot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0], #0x10]\n"
+ "udot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1, #0x10]\n"
+ "udot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2, #0x10]\n"
+ "udot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3, #0x10]\n"
+ "udot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4, #0x10]\n"
+ "udot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5, #0x10]\n"
+ "udot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6, #0x10]\n"
+ "udot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7, #0x10]\n"
+ "udot z24.s, z20.b, z0.b[0]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "udot z25.s, z20.b, z1.b[0]\n"
+ "udot z26.s, z20.b, z2.b[0]\n"
+ "udot z27.s, z20.b, z3.b[0]\n"
+ "udot z28.s, z20.b, z4.b[0]\n"
+ "udot z29.s, z20.b, z5.b[0]\n"
+ "udot z30.s, z20.b, z6.b[0]\n"
+ "udot z31.s, z20.b, z7.b[0]\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "udot z24.s, z21.b, z0.b[1]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #5\n"
+ "udot z25.s, z21.b, z1.b[1]\n"
+ "udot z26.s, z21.b, z2.b[1]\n"
+ "udot z27.s, z21.b, z3.b[1]\n"
+ "udot z28.s, z21.b, z4.b[1]\n"
+ "udot z29.s, z21.b, z5.b[1]\n"
+ "udot z30.s, z21.b, z6.b[1]\n"
+ "udot z31.s, z21.b, z7.b[1]\n"
+ "udot z24.s, z22.b, z0.b[2]\n"
+ "udot z25.s, z22.b, z1.b[2]\n"
+ "udot z26.s, z22.b, z2.b[2]\n"
+ "udot z27.s, z22.b, z3.b[2]\n"
+ "udot z28.s, z22.b, z4.b[2]\n"
+ "udot z29.s, z22.b, z5.b[2]\n"
+ "udot z30.s, z22.b, z6.b[2]\n"
+ "udot z31.s, z22.b, z7.b[2]\n"
+ "udot z24.s, z23.b, z0.b[3]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0], #0x20]\n"
+ "udot z25.s, z23.b, z1.b[3]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1, #0x20]\n"
+ "udot z26.s, z23.b, z2.b[3]\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2, #0x20]\n"
+ "udot z27.s, z23.b, z3.b[3]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3, #0x20]\n"
+ "udot z28.s, z23.b, z4.b[3]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4, #0x20]\n"
+ "udot z29.s, z23.b, z5.b[3]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5, #0x20]\n"
+ "udot z30.s, z23.b, z6.b[3]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6, #0x20]\n"
+ "udot z31.s, z23.b, z7.b[3]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7, #0x20]\n"
+ "udot z24.s, z16.b, z0.b[0]\n"
+ "udot z25.s, z16.b, z1.b[0]\n"
+ "udot z26.s, z16.b, z2.b[0]\n"
+ "udot z27.s, z16.b, z3.b[0]\n"
+ "udot z28.s, z16.b, z4.b[0]\n"
+ "udot z29.s, z16.b, z5.b[0]\n"
+ "udot z30.s, z16.b, z6.b[0]\n"
+ "udot z31.s, z16.b, z7.b[0]\n"
+ "udot z24.s, z17.b, z0.b[1]\n"
+ "udot z25.s, z17.b, z1.b[1]\n"
+ "udot z26.s, z17.b, z2.b[1]\n"
+ "udot z27.s, z17.b, z3.b[1]\n"
+ "udot z28.s, z17.b, z4.b[1]\n"
+ "udot z29.s, z17.b, z5.b[1]\n"
+ "udot z30.s, z17.b, z6.b[1]\n"
+ "udot z31.s, z17.b, z7.b[1]\n"
+ "udot z24.s, z18.b, z0.b[2]\n"
+ "udot z25.s, z18.b, z1.b[2]\n"
+ "udot z26.s, z18.b, z2.b[2]\n"
+ "udot z27.s, z18.b, z3.b[2]\n"
+ "udot z28.s, z18.b, z4.b[2]\n"
+ "udot z29.s, z18.b, z5.b[2]\n"
+ "udot z30.s, z18.b, z6.b[2]\n"
+ "udot z31.s, z18.b, z7.b[2]\n"
+ "udot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x30]\n"
+ "udot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1, #0x30]\n"
+ "udot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2, #0x30]\n"
+ "udot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3, #0x30]\n"
+ "udot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4, #0x30]\n"
+ "udot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5, #0x30]\n"
+ "udot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6, #0x30]\n"
+ "udot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7, #0x30]\n"
+ "udot z24.s, z20.b, z0.b[0]\n"
+ "udot z25.s, z20.b, z1.b[0]\n"
+ "udot z26.s, z20.b, z2.b[0]\n"
+ "udot z27.s, z20.b, z3.b[0]\n"
+ "udot z28.s, z20.b, z4.b[0]\n"
+ "udot z29.s, z20.b, z5.b[0]\n"
+ "udot z30.s, z20.b, z6.b[0]\n"
+ "udot z31.s, z20.b, z7.b[0]\n"
+ "b.ne 4b\n"
+ "3:\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "mov z24.s, #0\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "mov z25.s, #0\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "mov z26.s, #0\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "ld1b z21.b, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "mov z27.s, #0\n"
+ "ld1b z22.b, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "ld1b z23.b, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "mov z28.s, #0\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "mov z29.s, #0\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2]\n"
+ "udot z24.s, z16.b, z0.b[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.s, #0\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3]\n"
+ "udot z25.s, z16.b, z1.b[0]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4]\n"
+ "udot z26.s, z16.b, z2.b[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.s, #0\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5]\n"
+ "udot z27.s, z16.b, z3.b[0]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6]\n"
+ "udot z28.s, z16.b, z4.b[0]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7]\n"
+ "udot z24.s, z17.b, z0.b[1]\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "udot z29.s, z16.b, z5.b[0]\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "udot z30.s, z16.b, z6.b[0]\n"
+ "udot z31.s, z16.b, z7.b[0]\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "udot z25.s, z17.b, z1.b[1]\n"
+ "udot z26.s, z17.b, z2.b[1]\n"
+ "udot z27.s, z17.b, z3.b[1]\n"
+ "udot z28.s, z17.b, z4.b[1]\n"
+ "udot z29.s, z17.b, z5.b[1]\n"
+ "udot z30.s, z17.b, z6.b[1]\n"
+ "udot z31.s, z17.b, z7.b[1]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "udot z24.s, z18.b, z0.b[2]\n"
+ "udot z25.s, z18.b, z1.b[2]\n"
+ "udot z26.s, z18.b, z2.b[2]\n"
+ "udot z27.s, z18.b, z3.b[2]\n"
+ "udot z28.s, z18.b, z4.b[2]\n"
+ "udot z29.s, z18.b, z5.b[2]\n"
+ "udot z30.s, z18.b, z6.b[2]\n"
+ "udot z31.s, z18.b, z7.b[2]\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "udot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0], #0x10]\n"
+ "udot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1, #0x10]\n"
+ "udot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2, #0x10]\n"
+ "udot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3, #0x10]\n"
+ "udot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4, #0x10]\n"
+ "udot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5, #0x10]\n"
+ "udot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6, #0x10]\n"
+ "udot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7, #0x10]\n"
+ "udot z24.s, z20.b, z0.b[0]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "udot z25.s, z20.b, z1.b[0]\n"
+ "udot z26.s, z20.b, z2.b[0]\n"
+ "udot z27.s, z20.b, z3.b[0]\n"
+ "udot z28.s, z20.b, z4.b[0]\n"
+ "udot z29.s, z20.b, z5.b[0]\n"
+ "udot z30.s, z20.b, z6.b[0]\n"
+ "udot z31.s, z20.b, z7.b[0]\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "udot z24.s, z21.b, z0.b[1]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #5\n"
+ "udot z25.s, z21.b, z1.b[1]\n"
+ "udot z26.s, z21.b, z2.b[1]\n"
+ "udot z27.s, z21.b, z3.b[1]\n"
+ "udot z28.s, z21.b, z4.b[1]\n"
+ "udot z29.s, z21.b, z5.b[1]\n"
+ "udot z30.s, z21.b, z6.b[1]\n"
+ "udot z31.s, z21.b, z7.b[1]\n"
+ "udot z24.s, z22.b, z0.b[2]\n"
+ "udot z25.s, z22.b, z1.b[2]\n"
+ "udot z26.s, z22.b, z2.b[2]\n"
+ "udot z27.s, z22.b, z3.b[2]\n"
+ "udot z28.s, z22.b, z4.b[2]\n"
+ "udot z29.s, z22.b, z5.b[2]\n"
+ "udot z30.s, z22.b, z6.b[2]\n"
+ "udot z31.s, z22.b, z7.b[2]\n"
+ "udot z24.s, z23.b, z0.b[3]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0], #0x20]\n"
+ "udot z25.s, z23.b, z1.b[3]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1, #0x20]\n"
+ "udot z26.s, z23.b, z2.b[3]\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2, #0x20]\n"
+ "udot z27.s, z23.b, z3.b[3]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3, #0x20]\n"
+ "udot z28.s, z23.b, z4.b[3]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4, #0x20]\n"
+ "udot z29.s, z23.b, z5.b[3]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5, #0x20]\n"
+ "udot z30.s, z23.b, z6.b[3]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6, #0x20]\n"
+ "udot z31.s, z23.b, z7.b[3]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7, #0x20]\n"
+ "udot z24.s, z16.b, z0.b[0]\n"
+ "udot z25.s, z16.b, z1.b[0]\n"
+ "udot z26.s, z16.b, z2.b[0]\n"
+ "udot z27.s, z16.b, z3.b[0]\n"
+ "udot z28.s, z16.b, z4.b[0]\n"
+ "udot z29.s, z16.b, z5.b[0]\n"
+ "udot z30.s, z16.b, z6.b[0]\n"
+ "udot z31.s, z16.b, z7.b[0]\n"
+ "udot z24.s, z17.b, z0.b[1]\n"
+ "udot z25.s, z17.b, z1.b[1]\n"
+ "udot z26.s, z17.b, z2.b[1]\n"
+ "udot z27.s, z17.b, z3.b[1]\n"
+ "udot z28.s, z17.b, z4.b[1]\n"
+ "udot z29.s, z17.b, z5.b[1]\n"
+ "udot z30.s, z17.b, z6.b[1]\n"
+ "udot z31.s, z17.b, z7.b[1]\n"
+ "udot z24.s, z18.b, z0.b[2]\n"
+ "udot z25.s, z18.b, z1.b[2]\n"
+ "udot z26.s, z18.b, z2.b[2]\n"
+ "udot z27.s, z18.b, z3.b[2]\n"
+ "udot z28.s, z18.b, z4.b[2]\n"
+ "udot z29.s, z18.b, z5.b[2]\n"
+ "udot z30.s, z18.b, z6.b[2]\n"
+ "udot z31.s, z18.b, z7.b[2]\n"
+ "udot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x30]\n"
+ "udot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1, #0x30]\n"
+ "udot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2, #0x30]\n"
+ "udot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3, #0x30]\n"
+ "udot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4, #0x30]\n"
+ "udot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5, #0x30]\n"
+ "udot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6, #0x30]\n"
+ "udot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7, #0x30]\n"
+ "udot z24.s, z20.b, z0.b[0]\n"
+ "udot z25.s, z20.b, z1.b[0]\n"
+ "udot z26.s, z20.b, z2.b[0]\n"
+ "udot z27.s, z20.b, z3.b[0]\n"
+ "udot z28.s, z20.b, z4.b[0]\n"
+ "udot z29.s, z20.b, z5.b[0]\n"
+ "udot z30.s, z20.b, z6.b[0]\n"
+ "udot z31.s, z20.b, z7.b[0]\n"
+ "b 5f\n"
+ "2:\n"
+ "mov z24.s, #0\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
+ "mov z25.s, #0\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1]\n"
+ "mov z26.s, #0\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2]\n"
+ "mov z27.s, #0\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3]\n"
+ "mov z28.s, #0\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4]\n"
+ "mov z29.s, #0\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5]\n"
+ "mov z30.s, #0\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6]\n"
+ "mov z31.s, #0\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7]\n"
+ "udot z24.s, z16.b, z0.b[0]\n"
+ "udot z25.s, z16.b, z1.b[0]\n"
+ "udot z26.s, z16.b, z2.b[0]\n"
+ "udot z27.s, z16.b, z3.b[0]\n"
+ "udot z28.s, z16.b, z4.b[0]\n"
+ "udot z29.s, z16.b, z5.b[0]\n"
+ "udot z30.s, z16.b, z6.b[0]\n"
+ "udot z31.s, z16.b, z7.b[0]\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "udot z24.s, z17.b, z0.b[1]\n"
+ "udot z25.s, z17.b, z1.b[1]\n"
+ "udot z26.s, z17.b, z2.b[1]\n"
+ "udot z27.s, z17.b, z3.b[1]\n"
+ "udot z28.s, z17.b, z4.b[1]\n"
+ "udot z29.s, z17.b, z5.b[1]\n"
+ "udot z30.s, z17.b, z6.b[1]\n"
+ "udot z31.s, z17.b, z7.b[1]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "udot z24.s, z18.b, z0.b[2]\n"
+ "udot z25.s, z18.b, z1.b[2]\n"
+ "udot z26.s, z18.b, z2.b[2]\n"
+ "udot z27.s, z18.b, z3.b[2]\n"
+ "udot z28.s, z18.b, z4.b[2]\n"
+ "udot z29.s, z18.b, z5.b[2]\n"
+ "udot z30.s, z18.b, z6.b[2]\n"
+ "udot z31.s, z18.b, z7.b[2]\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "udot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0], #0x10]\n"
+ "udot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1, #0x10]\n"
+ "udot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2, #0x10]\n"
+ "udot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3, #0x10]\n"
+ "udot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4, #0x10]\n"
+ "udot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5, #0x10]\n"
+ "udot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6, #0x10]\n"
+ "udot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7, #0x10]\n"
+ "udot z24.s, z20.b, z0.b[0]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "udot z25.s, z20.b, z1.b[0]\n"
+ "udot z26.s, z20.b, z2.b[0]\n"
+ "udot z27.s, z20.b, z3.b[0]\n"
+ "udot z28.s, z20.b, z4.b[0]\n"
+ "udot z29.s, z20.b, z5.b[0]\n"
+ "udot z30.s, z20.b, z6.b[0]\n"
+ "udot z31.s, z20.b, z7.b[0]\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "udot z24.s, z21.b, z0.b[1]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #5\n"
+ "udot z25.s, z21.b, z1.b[1]\n"
+ "udot z26.s, z21.b, z2.b[1]\n"
+ "udot z27.s, z21.b, z3.b[1]\n"
+ "udot z28.s, z21.b, z4.b[1]\n"
+ "udot z29.s, z21.b, z5.b[1]\n"
+ "udot z30.s, z21.b, z6.b[1]\n"
+ "udot z31.s, z21.b, z7.b[1]\n"
+ "udot z24.s, z22.b, z0.b[2]\n"
+ "udot z25.s, z22.b, z1.b[2]\n"
+ "udot z26.s, z22.b, z2.b[2]\n"
+ "udot z27.s, z22.b, z3.b[2]\n"
+ "udot z28.s, z22.b, z4.b[2]\n"
+ "udot z29.s, z22.b, z5.b[2]\n"
+ "udot z30.s, z22.b, z6.b[2]\n"
+ "udot z31.s, z22.b, z7.b[2]\n"
+ "udot z24.s, z23.b, z0.b[3]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0], #0x20]\n"
+ "udot z25.s, z23.b, z1.b[3]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1, #0x20]\n"
+ "udot z26.s, z23.b, z2.b[3]\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2, #0x20]\n"
+ "udot z27.s, z23.b, z3.b[3]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3, #0x20]\n"
+ "udot z28.s, z23.b, z4.b[3]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4, #0x20]\n"
+ "udot z29.s, z23.b, z5.b[3]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5, #0x20]\n"
+ "udot z30.s, z23.b, z6.b[3]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6, #0x20]\n"
+ "udot z31.s, z23.b, z7.b[3]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7, #0x20]\n"
+ "udot z24.s, z16.b, z0.b[0]\n"
+ "udot z25.s, z16.b, z1.b[0]\n"
+ "udot z26.s, z16.b, z2.b[0]\n"
+ "udot z27.s, z16.b, z3.b[0]\n"
+ "udot z28.s, z16.b, z4.b[0]\n"
+ "udot z29.s, z16.b, z5.b[0]\n"
+ "udot z30.s, z16.b, z6.b[0]\n"
+ "udot z31.s, z16.b, z7.b[0]\n"
+ "udot z24.s, z17.b, z0.b[1]\n"
+ "udot z25.s, z17.b, z1.b[1]\n"
+ "udot z26.s, z17.b, z2.b[1]\n"
+ "udot z27.s, z17.b, z3.b[1]\n"
+ "udot z28.s, z17.b, z4.b[1]\n"
+ "udot z29.s, z17.b, z5.b[1]\n"
+ "udot z30.s, z17.b, z6.b[1]\n"
+ "udot z31.s, z17.b, z7.b[1]\n"
+ "udot z24.s, z18.b, z0.b[2]\n"
+ "udot z25.s, z18.b, z1.b[2]\n"
+ "udot z26.s, z18.b, z2.b[2]\n"
+ "udot z27.s, z18.b, z3.b[2]\n"
+ "udot z28.s, z18.b, z4.b[2]\n"
+ "udot z29.s, z18.b, z5.b[2]\n"
+ "udot z30.s, z18.b, z6.b[2]\n"
+ "udot z31.s, z18.b, z7.b[2]\n"
+ "udot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x30]\n"
+ "udot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1, #0x30]\n"
+ "udot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2, #0x30]\n"
+ "udot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3, #0x30]\n"
+ "udot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4, #0x30]\n"
+ "udot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5, #0x30]\n"
+ "udot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6, #0x30]\n"
+ "udot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7, #0x30]\n"
+ "udot z24.s, z20.b, z0.b[0]\n"
+ "udot z25.s, z20.b, z1.b[0]\n"
+ "udot z26.s, z20.b, z2.b[0]\n"
+ "udot z27.s, z20.b, z3.b[0]\n"
+ "udot z28.s, z20.b, z4.b[0]\n"
+ "udot z29.s, z20.b, z5.b[0]\n"
+ "udot z30.s, z20.b, z6.b[0]\n"
+ "udot z31.s, z20.b, z7.b[0]\n"
+ "5:\n"
+ "st1w z24.s, p0, [%[c_ptr0]]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "st1w z25.s, p0, [c_ptr1]\n"
+ "st1w z26.s, p0, [c_ptr2]\n"
+ "st1w z27.s, p0, [c_ptr3]\n"
+ "st1w z28.s, p0, [c_ptr4]\n"
+ "st1w z29.s, p0, [c_ptr5]\n"
+ "st1w z30.s, p0, [c_ptr6]\n"
+ "st1w z31.s, p0, [c_ptr7]\n"
+ ".unreq a_ptr1\n"
+ ".unreq a_ptr2\n"
+ ".unreq a_ptr3\n"
+ ".unreq a_ptr4\n"
+ ".unreq a_ptr5\n"
+ ".unreq a_ptr6\n"
+ ".unreq a_ptr7\n"
+ ".unreq c_ptr1\n"
+ ".unreq c_ptr2\n"
+ ".unreq c_ptr3\n"
+ ".unreq c_ptr4\n"
+ ".unreq c_ptr5\n"
+ ".unreq c_ptr6\n"
+ ".unreq c_ptr7\n"
+ : [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [oob_rows] "+r" (oob_rows), [temp] "+r" (temp), [odds] "+r" (odds)
+ : [lda] "r" (ldab), [ldc] "r" (ldcb), [odd_depth] "r" (odd_depth), [last_width] "r" (last_width)
+ : "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "cc", "memory"
+ );
+ break;
+ case 14:
+ __asm __volatile (
+ "a_ptr1 .req X0\n"
+ "a_ptr2 .req X1\n"
+ "a_ptr3 .req X2\n"
+ "a_ptr4 .req X3\n"
+ "a_ptr5 .req X4\n"
+ "a_ptr6 .req X5\n"
+ "a_ptr7 .req X6\n"
+ "c_ptr1 .req X7\n"
+ "c_ptr2 .req X8\n"
+ "c_ptr3 .req X9\n"
+ "c_ptr4 .req X10\n"
+ "c_ptr5 .req X11\n"
+ "c_ptr6 .req X12\n"
+ "c_ptr7 .req X13\n"
+ "add a_ptr1, %[a_ptr0], %[lda]\n"
+ "add c_ptr1, %[c_ptr0], %[ldc]\n"
+ "add a_ptr2, a_ptr1, %[lda]\n"
+ "add c_ptr2, c_ptr1, %[ldc]\n"
+ "add a_ptr3, a_ptr2, %[lda]\n"
+ "add c_ptr3, c_ptr2, %[ldc]\n"
+ "add a_ptr4, a_ptr3, %[lda]\n"
+ "add c_ptr4, c_ptr3, %[ldc]\n"
+ "add a_ptr5, a_ptr4, %[lda]\n"
+ "add c_ptr5, c_ptr4, %[ldc]\n"
+ "add a_ptr6, a_ptr5, %[lda]\n"
+ "add c_ptr6, c_ptr5, %[ldc]\n"
+ "add a_ptr7, a_ptr6, %[lda]\n"
+ "add c_ptr7, c_ptr6, %[ldc]\n"
+ "cbz %[oob_rows], 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr7, %[c_ptr0], #0x0\n"
+ "add a_ptr7, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr6, %[c_ptr0], #0x0\n"
+ "add a_ptr6, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr5, %[c_ptr0], #0x0\n"
+ "add a_ptr5, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr4, %[c_ptr0], #0x0\n"
+ "add a_ptr4, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr3, %[c_ptr0], #0x0\n"
+ "add a_ptr3, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr2, %[c_ptr0], #0x0\n"
+ "add a_ptr2, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr1, %[c_ptr0], #0x0\n"
+ "add a_ptr1, %[a_ptr0], #0x0\n"
+ "1:\n"
+ "ptrue p7.b\n"
+ "whilelt p6.b, %[temp], %[odd_depth]\n"
+ "whilelt p0.s, %[temp], %[last_width]\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "ld1b z21.b, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "ld1b z22.b, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "ld1b z23.b, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "cbz %[loops], 2f\n"
+ "mov z24.s, #0\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
+ "mov z25.s, #0\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1]\n"
+ "mov z26.s, #0\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2]\n"
+ "mov z27.s, #0\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3]\n"
+ "mov z28.s, #0\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4]\n"
+ "mov z29.s, #0\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5]\n"
+ "mov z30.s, #0\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6]\n"
+ "mov z31.s, #0\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7]\n"
+ "udot z24.s, z16.b, z0.b[0]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "udot z25.s, z16.b, z1.b[0]\n"
+ "udot z26.s, z16.b, z2.b[0]\n"
+ "udot z27.s, z16.b, z3.b[0]\n"
+ "udot z28.s, z16.b, z4.b[0]\n"
+ "udot z29.s, z16.b, z5.b[0]\n"
+ "udot z30.s, z16.b, z6.b[0]\n"
+ "udot z31.s, z16.b, z7.b[0]\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "udot z24.s, z17.b, z0.b[1]\n"
+ "udot z25.s, z17.b, z1.b[1]\n"
+ "udot z26.s, z17.b, z2.b[1]\n"
+ "udot z27.s, z17.b, z3.b[1]\n"
+ "udot z28.s, z17.b, z4.b[1]\n"
+ "udot z29.s, z17.b, z5.b[1]\n"
+ "udot z30.s, z17.b, z6.b[1]\n"
+ "udot z31.s, z17.b, z7.b[1]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "udot z24.s, z18.b, z0.b[2]\n"
+ "udot z25.s, z18.b, z1.b[2]\n"
+ "udot z26.s, z18.b, z2.b[2]\n"
+ "udot z27.s, z18.b, z3.b[2]\n"
+ "udot z28.s, z18.b, z4.b[2]\n"
+ "udot z29.s, z18.b, z5.b[2]\n"
+ "udot z30.s, z18.b, z6.b[2]\n"
+ "udot z31.s, z18.b, z7.b[2]\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "udot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0], #0x10]\n"
+ "udot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1, #0x10]\n"
+ "udot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2, #0x10]\n"
+ "udot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3, #0x10]\n"
+ "udot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4, #0x10]\n"
+ "udot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5, #0x10]\n"
+ "udot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6, #0x10]\n"
+ "udot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7, #0x10]\n"
+ "udot z24.s, z20.b, z0.b[0]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "udot z25.s, z20.b, z1.b[0]\n"
+ "udot z26.s, z20.b, z2.b[0]\n"
+ "udot z27.s, z20.b, z3.b[0]\n"
+ "udot z28.s, z20.b, z4.b[0]\n"
+ "udot z29.s, z20.b, z5.b[0]\n"
+ "udot z30.s, z20.b, z6.b[0]\n"
+ "udot z31.s, z20.b, z7.b[0]\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "udot z24.s, z21.b, z0.b[1]\n"
+ "udot z25.s, z21.b, z1.b[1]\n"
+ "udot z26.s, z21.b, z2.b[1]\n"
+ "udot z27.s, z21.b, z3.b[1]\n"
+ "udot z28.s, z21.b, z4.b[1]\n"
+ "udot z29.s, z21.b, z5.b[1]\n"
+ "udot z30.s, z21.b, z6.b[1]\n"
+ "udot z31.s, z21.b, z7.b[1]\n"
+ "ld1b z21.b, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "udot z24.s, z22.b, z0.b[2]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #6\n"
+ "udot z25.s, z22.b, z1.b[2]\n"
+ "udot z26.s, z22.b, z2.b[2]\n"
+ "udot z27.s, z22.b, z3.b[2]\n"
+ "udot z28.s, z22.b, z4.b[2]\n"
+ "udot z29.s, z22.b, z5.b[2]\n"
+ "udot z30.s, z22.b, z6.b[2]\n"
+ "udot z31.s, z22.b, z7.b[2]\n"
+ "udot z24.s, z23.b, z0.b[3]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0], #0x20]\n"
+ "udot z25.s, z23.b, z1.b[3]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1, #0x20]\n"
+ "udot z26.s, z23.b, z2.b[3]\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2, #0x20]\n"
+ "udot z27.s, z23.b, z3.b[3]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3, #0x20]\n"
+ "udot z28.s, z23.b, z4.b[3]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4, #0x20]\n"
+ "udot z29.s, z23.b, z5.b[3]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5, #0x20]\n"
+ "udot z30.s, z23.b, z6.b[3]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6, #0x20]\n"
+ "udot z31.s, z23.b, z7.b[3]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7, #0x20]\n"
+ "udot z24.s, z16.b, z0.b[0]\n"
+ "udot z25.s, z16.b, z1.b[0]\n"
+ "udot z26.s, z16.b, z2.b[0]\n"
+ "udot z27.s, z16.b, z3.b[0]\n"
+ "udot z28.s, z16.b, z4.b[0]\n"
+ "udot z29.s, z16.b, z5.b[0]\n"
+ "udot z30.s, z16.b, z6.b[0]\n"
+ "udot z31.s, z16.b, z7.b[0]\n"
+ "udot z24.s, z17.b, z0.b[1]\n"
+ "udot z25.s, z17.b, z1.b[1]\n"
+ "udot z26.s, z17.b, z2.b[1]\n"
+ "udot z27.s, z17.b, z3.b[1]\n"
+ "udot z28.s, z17.b, z4.b[1]\n"
+ "udot z29.s, z17.b, z5.b[1]\n"
+ "udot z30.s, z17.b, z6.b[1]\n"
+ "udot z31.s, z17.b, z7.b[1]\n"
+ "udot z24.s, z18.b, z0.b[2]\n"
+ "udot z25.s, z18.b, z1.b[2]\n"
+ "udot z26.s, z18.b, z2.b[2]\n"
+ "udot z27.s, z18.b, z3.b[2]\n"
+ "udot z28.s, z18.b, z4.b[2]\n"
+ "udot z29.s, z18.b, z5.b[2]\n"
+ "udot z30.s, z18.b, z6.b[2]\n"
+ "udot z31.s, z18.b, z7.b[2]\n"
+ "udot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x30]\n"
+ "udot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1, #0x30]\n"
+ "udot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2, #0x30]\n"
+ "udot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3, #0x30]\n"
+ "udot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4, #0x30]\n"
+ "udot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5, #0x30]\n"
+ "udot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6, #0x30]\n"
+ "udot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7, #0x30]\n"
+ "udot z24.s, z20.b, z0.b[0]\n"
+ "udot z25.s, z20.b, z1.b[0]\n"
+ "udot z26.s, z20.b, z2.b[0]\n"
+ "udot z27.s, z20.b, z3.b[0]\n"
+ "udot z28.s, z20.b, z4.b[0]\n"
+ "udot z29.s, z20.b, z5.b[0]\n"
+ "udot z30.s, z20.b, z6.b[0]\n"
+ "udot z31.s, z20.b, z7.b[0]\n"
+ "udot z24.s, z21.b, z0.b[1]\n"
+ "udot z25.s, z21.b, z1.b[1]\n"
+ "udot z26.s, z21.b, z2.b[1]\n"
+ "udot z27.s, z21.b, z3.b[1]\n"
+ "udot z28.s, z21.b, z4.b[1]\n"
+ "udot z29.s, z21.b, z5.b[1]\n"
+ "udot z30.s, z21.b, z6.b[1]\n"
+ "udot z31.s, z21.b, z7.b[1]\n"
+ "b.eq 3f\n"
+ "4:\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "mov z24.s, #0\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "mov z25.s, #0\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "mov z26.s, #0\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "ld1b z21.b, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "mov z27.s, #0\n"
+ "ld1b z22.b, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "ld1b z23.b, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "mov z28.s, #0\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "mov z29.s, #0\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2]\n"
+ "udot z24.s, z16.b, z0.b[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.s, #0\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3]\n"
+ "udot z25.s, z16.b, z1.b[0]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4]\n"
+ "udot z26.s, z16.b, z2.b[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.s, #0\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5]\n"
+ "udot z27.s, z16.b, z3.b[0]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6]\n"
+ "udot z28.s, z16.b, z4.b[0]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7]\n"
+ "udot z24.s, z17.b, z0.b[1]\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "udot z29.s, z16.b, z5.b[0]\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "udot z30.s, z16.b, z6.b[0]\n"
+ "udot z31.s, z16.b, z7.b[0]\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "udot z25.s, z17.b, z1.b[1]\n"
+ "udot z26.s, z17.b, z2.b[1]\n"
+ "udot z27.s, z17.b, z3.b[1]\n"
+ "udot z28.s, z17.b, z4.b[1]\n"
+ "udot z29.s, z17.b, z5.b[1]\n"
+ "udot z30.s, z17.b, z6.b[1]\n"
+ "udot z31.s, z17.b, z7.b[1]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "udot z24.s, z18.b, z0.b[2]\n"
+ "udot z25.s, z18.b, z1.b[2]\n"
+ "udot z26.s, z18.b, z2.b[2]\n"
+ "udot z27.s, z18.b, z3.b[2]\n"
+ "udot z28.s, z18.b, z4.b[2]\n"
+ "udot z29.s, z18.b, z5.b[2]\n"
+ "udot z30.s, z18.b, z6.b[2]\n"
+ "udot z31.s, z18.b, z7.b[2]\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "udot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0], #0x10]\n"
+ "udot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1, #0x10]\n"
+ "udot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2, #0x10]\n"
+ "udot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3, #0x10]\n"
+ "udot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4, #0x10]\n"
+ "udot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5, #0x10]\n"
+ "udot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6, #0x10]\n"
+ "udot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7, #0x10]\n"
+ "udot z24.s, z20.b, z0.b[0]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "udot z25.s, z20.b, z1.b[0]\n"
+ "udot z26.s, z20.b, z2.b[0]\n"
+ "udot z27.s, z20.b, z3.b[0]\n"
+ "udot z28.s, z20.b, z4.b[0]\n"
+ "udot z29.s, z20.b, z5.b[0]\n"
+ "udot z30.s, z20.b, z6.b[0]\n"
+ "udot z31.s, z20.b, z7.b[0]\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "udot z24.s, z21.b, z0.b[1]\n"
+ "udot z25.s, z21.b, z1.b[1]\n"
+ "udot z26.s, z21.b, z2.b[1]\n"
+ "udot z27.s, z21.b, z3.b[1]\n"
+ "udot z28.s, z21.b, z4.b[1]\n"
+ "udot z29.s, z21.b, z5.b[1]\n"
+ "udot z30.s, z21.b, z6.b[1]\n"
+ "udot z31.s, z21.b, z7.b[1]\n"
+ "ld1b z21.b, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "udot z24.s, z22.b, z0.b[2]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #6\n"
+ "udot z25.s, z22.b, z1.b[2]\n"
+ "udot z26.s, z22.b, z2.b[2]\n"
+ "udot z27.s, z22.b, z3.b[2]\n"
+ "udot z28.s, z22.b, z4.b[2]\n"
+ "udot z29.s, z22.b, z5.b[2]\n"
+ "udot z30.s, z22.b, z6.b[2]\n"
+ "udot z31.s, z22.b, z7.b[2]\n"
+ "udot z24.s, z23.b, z0.b[3]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0], #0x20]\n"
+ "udot z25.s, z23.b, z1.b[3]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1, #0x20]\n"
+ "udot z26.s, z23.b, z2.b[3]\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2, #0x20]\n"
+ "udot z27.s, z23.b, z3.b[3]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3, #0x20]\n"
+ "udot z28.s, z23.b, z4.b[3]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4, #0x20]\n"
+ "udot z29.s, z23.b, z5.b[3]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5, #0x20]\n"
+ "udot z30.s, z23.b, z6.b[3]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6, #0x20]\n"
+ "udot z31.s, z23.b, z7.b[3]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7, #0x20]\n"
+ "udot z24.s, z16.b, z0.b[0]\n"
+ "udot z25.s, z16.b, z1.b[0]\n"
+ "udot z26.s, z16.b, z2.b[0]\n"
+ "udot z27.s, z16.b, z3.b[0]\n"
+ "udot z28.s, z16.b, z4.b[0]\n"
+ "udot z29.s, z16.b, z5.b[0]\n"
+ "udot z30.s, z16.b, z6.b[0]\n"
+ "udot z31.s, z16.b, z7.b[0]\n"
+ "udot z24.s, z17.b, z0.b[1]\n"
+ "udot z25.s, z17.b, z1.b[1]\n"
+ "udot z26.s, z17.b, z2.b[1]\n"
+ "udot z27.s, z17.b, z3.b[1]\n"
+ "udot z28.s, z17.b, z4.b[1]\n"
+ "udot z29.s, z17.b, z5.b[1]\n"
+ "udot z30.s, z17.b, z6.b[1]\n"
+ "udot z31.s, z17.b, z7.b[1]\n"
+ "udot z24.s, z18.b, z0.b[2]\n"
+ "udot z25.s, z18.b, z1.b[2]\n"
+ "udot z26.s, z18.b, z2.b[2]\n"
+ "udot z27.s, z18.b, z3.b[2]\n"
+ "udot z28.s, z18.b, z4.b[2]\n"
+ "udot z29.s, z18.b, z5.b[2]\n"
+ "udot z30.s, z18.b, z6.b[2]\n"
+ "udot z31.s, z18.b, z7.b[2]\n"
+ "udot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x30]\n"
+ "udot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1, #0x30]\n"
+ "udot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2, #0x30]\n"
+ "udot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3, #0x30]\n"
+ "udot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4, #0x30]\n"
+ "udot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5, #0x30]\n"
+ "udot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6, #0x30]\n"
+ "udot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7, #0x30]\n"
+ "udot z24.s, z20.b, z0.b[0]\n"
+ "udot z25.s, z20.b, z1.b[0]\n"
+ "udot z26.s, z20.b, z2.b[0]\n"
+ "udot z27.s, z20.b, z3.b[0]\n"
+ "udot z28.s, z20.b, z4.b[0]\n"
+ "udot z29.s, z20.b, z5.b[0]\n"
+ "udot z30.s, z20.b, z6.b[0]\n"
+ "udot z31.s, z20.b, z7.b[0]\n"
+ "udot z24.s, z21.b, z0.b[1]\n"
+ "udot z25.s, z21.b, z1.b[1]\n"
+ "udot z26.s, z21.b, z2.b[1]\n"
+ "udot z27.s, z21.b, z3.b[1]\n"
+ "udot z28.s, z21.b, z4.b[1]\n"
+ "udot z29.s, z21.b, z5.b[1]\n"
+ "udot z30.s, z21.b, z6.b[1]\n"
+ "udot z31.s, z21.b, z7.b[1]\n"
+ "b.ne 4b\n"
+ "3:\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "mov z24.s, #0\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "mov z25.s, #0\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "mov z26.s, #0\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "ld1b z21.b, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "mov z27.s, #0\n"
+ "ld1b z22.b, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "ld1b z23.b, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "mov z28.s, #0\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "mov z29.s, #0\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2]\n"
+ "udot z24.s, z16.b, z0.b[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.s, #0\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3]\n"
+ "udot z25.s, z16.b, z1.b[0]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4]\n"
+ "udot z26.s, z16.b, z2.b[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.s, #0\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5]\n"
+ "udot z27.s, z16.b, z3.b[0]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6]\n"
+ "udot z28.s, z16.b, z4.b[0]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7]\n"
+ "udot z24.s, z17.b, z0.b[1]\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "udot z29.s, z16.b, z5.b[0]\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "udot z30.s, z16.b, z6.b[0]\n"
+ "udot z31.s, z16.b, z7.b[0]\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "udot z25.s, z17.b, z1.b[1]\n"
+ "udot z26.s, z17.b, z2.b[1]\n"
+ "udot z27.s, z17.b, z3.b[1]\n"
+ "udot z28.s, z17.b, z4.b[1]\n"
+ "udot z29.s, z17.b, z5.b[1]\n"
+ "udot z30.s, z17.b, z6.b[1]\n"
+ "udot z31.s, z17.b, z7.b[1]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "udot z24.s, z18.b, z0.b[2]\n"
+ "udot z25.s, z18.b, z1.b[2]\n"
+ "udot z26.s, z18.b, z2.b[2]\n"
+ "udot z27.s, z18.b, z3.b[2]\n"
+ "udot z28.s, z18.b, z4.b[2]\n"
+ "udot z29.s, z18.b, z5.b[2]\n"
+ "udot z30.s, z18.b, z6.b[2]\n"
+ "udot z31.s, z18.b, z7.b[2]\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "udot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0], #0x10]\n"
+ "udot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1, #0x10]\n"
+ "udot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2, #0x10]\n"
+ "udot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3, #0x10]\n"
+ "udot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4, #0x10]\n"
+ "udot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5, #0x10]\n"
+ "udot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6, #0x10]\n"
+ "udot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7, #0x10]\n"
+ "udot z24.s, z20.b, z0.b[0]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "udot z25.s, z20.b, z1.b[0]\n"
+ "udot z26.s, z20.b, z2.b[0]\n"
+ "udot z27.s, z20.b, z3.b[0]\n"
+ "udot z28.s, z20.b, z4.b[0]\n"
+ "udot z29.s, z20.b, z5.b[0]\n"
+ "udot z30.s, z20.b, z6.b[0]\n"
+ "udot z31.s, z20.b, z7.b[0]\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "udot z24.s, z21.b, z0.b[1]\n"
+ "udot z25.s, z21.b, z1.b[1]\n"
+ "udot z26.s, z21.b, z2.b[1]\n"
+ "udot z27.s, z21.b, z3.b[1]\n"
+ "udot z28.s, z21.b, z4.b[1]\n"
+ "udot z29.s, z21.b, z5.b[1]\n"
+ "udot z30.s, z21.b, z6.b[1]\n"
+ "udot z31.s, z21.b, z7.b[1]\n"
+ "ld1b z21.b, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "udot z24.s, z22.b, z0.b[2]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #6\n"
+ "udot z25.s, z22.b, z1.b[2]\n"
+ "udot z26.s, z22.b, z2.b[2]\n"
+ "udot z27.s, z22.b, z3.b[2]\n"
+ "udot z28.s, z22.b, z4.b[2]\n"
+ "udot z29.s, z22.b, z5.b[2]\n"
+ "udot z30.s, z22.b, z6.b[2]\n"
+ "udot z31.s, z22.b, z7.b[2]\n"
+ "udot z24.s, z23.b, z0.b[3]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0], #0x20]\n"
+ "udot z25.s, z23.b, z1.b[3]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1, #0x20]\n"
+ "udot z26.s, z23.b, z2.b[3]\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2, #0x20]\n"
+ "udot z27.s, z23.b, z3.b[3]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3, #0x20]\n"
+ "udot z28.s, z23.b, z4.b[3]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4, #0x20]\n"
+ "udot z29.s, z23.b, z5.b[3]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5, #0x20]\n"
+ "udot z30.s, z23.b, z6.b[3]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6, #0x20]\n"
+ "udot z31.s, z23.b, z7.b[3]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7, #0x20]\n"
+ "udot z24.s, z16.b, z0.b[0]\n"
+ "udot z25.s, z16.b, z1.b[0]\n"
+ "udot z26.s, z16.b, z2.b[0]\n"
+ "udot z27.s, z16.b, z3.b[0]\n"
+ "udot z28.s, z16.b, z4.b[0]\n"
+ "udot z29.s, z16.b, z5.b[0]\n"
+ "udot z30.s, z16.b, z6.b[0]\n"
+ "udot z31.s, z16.b, z7.b[0]\n"
+ "udot z24.s, z17.b, z0.b[1]\n"
+ "udot z25.s, z17.b, z1.b[1]\n"
+ "udot z26.s, z17.b, z2.b[1]\n"
+ "udot z27.s, z17.b, z3.b[1]\n"
+ "udot z28.s, z17.b, z4.b[1]\n"
+ "udot z29.s, z17.b, z5.b[1]\n"
+ "udot z30.s, z17.b, z6.b[1]\n"
+ "udot z31.s, z17.b, z7.b[1]\n"
+ "udot z24.s, z18.b, z0.b[2]\n"
+ "udot z25.s, z18.b, z1.b[2]\n"
+ "udot z26.s, z18.b, z2.b[2]\n"
+ "udot z27.s, z18.b, z3.b[2]\n"
+ "udot z28.s, z18.b, z4.b[2]\n"
+ "udot z29.s, z18.b, z5.b[2]\n"
+ "udot z30.s, z18.b, z6.b[2]\n"
+ "udot z31.s, z18.b, z7.b[2]\n"
+ "udot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x30]\n"
+ "udot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1, #0x30]\n"
+ "udot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2, #0x30]\n"
+ "udot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3, #0x30]\n"
+ "udot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4, #0x30]\n"
+ "udot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5, #0x30]\n"
+ "udot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6, #0x30]\n"
+ "udot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7, #0x30]\n"
+ "udot z24.s, z20.b, z0.b[0]\n"
+ "udot z25.s, z20.b, z1.b[0]\n"
+ "udot z26.s, z20.b, z2.b[0]\n"
+ "udot z27.s, z20.b, z3.b[0]\n"
+ "udot z28.s, z20.b, z4.b[0]\n"
+ "udot z29.s, z20.b, z5.b[0]\n"
+ "udot z30.s, z20.b, z6.b[0]\n"
+ "udot z31.s, z20.b, z7.b[0]\n"
+ "udot z24.s, z21.b, z0.b[1]\n"
+ "udot z25.s, z21.b, z1.b[1]\n"
+ "udot z26.s, z21.b, z2.b[1]\n"
+ "udot z27.s, z21.b, z3.b[1]\n"
+ "udot z28.s, z21.b, z4.b[1]\n"
+ "udot z29.s, z21.b, z5.b[1]\n"
+ "udot z30.s, z21.b, z6.b[1]\n"
+ "udot z31.s, z21.b, z7.b[1]\n"
+ "b 5f\n"
+ "2:\n"
+ "mov z24.s, #0\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
+ "mov z25.s, #0\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1]\n"
+ "mov z26.s, #0\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2]\n"
+ "mov z27.s, #0\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3]\n"
+ "mov z28.s, #0\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4]\n"
+ "mov z29.s, #0\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5]\n"
+ "mov z30.s, #0\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6]\n"
+ "mov z31.s, #0\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7]\n"
+ "udot z24.s, z16.b, z0.b[0]\n"
+ "udot z25.s, z16.b, z1.b[0]\n"
+ "udot z26.s, z16.b, z2.b[0]\n"
+ "udot z27.s, z16.b, z3.b[0]\n"
+ "udot z28.s, z16.b, z4.b[0]\n"
+ "udot z29.s, z16.b, z5.b[0]\n"
+ "udot z30.s, z16.b, z6.b[0]\n"
+ "udot z31.s, z16.b, z7.b[0]\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "udot z24.s, z17.b, z0.b[1]\n"
+ "udot z25.s, z17.b, z1.b[1]\n"
+ "udot z26.s, z17.b, z2.b[1]\n"
+ "udot z27.s, z17.b, z3.b[1]\n"
+ "udot z28.s, z17.b, z4.b[1]\n"
+ "udot z29.s, z17.b, z5.b[1]\n"
+ "udot z30.s, z17.b, z6.b[1]\n"
+ "udot z31.s, z17.b, z7.b[1]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "udot z24.s, z18.b, z0.b[2]\n"
+ "udot z25.s, z18.b, z1.b[2]\n"
+ "udot z26.s, z18.b, z2.b[2]\n"
+ "udot z27.s, z18.b, z3.b[2]\n"
+ "udot z28.s, z18.b, z4.b[2]\n"
+ "udot z29.s, z18.b, z5.b[2]\n"
+ "udot z30.s, z18.b, z6.b[2]\n"
+ "udot z31.s, z18.b, z7.b[2]\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "udot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0], #0x10]\n"
+ "udot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1, #0x10]\n"
+ "udot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2, #0x10]\n"
+ "udot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3, #0x10]\n"
+ "udot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4, #0x10]\n"
+ "udot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5, #0x10]\n"
+ "udot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6, #0x10]\n"
+ "udot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7, #0x10]\n"
+ "udot z24.s, z20.b, z0.b[0]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "udot z25.s, z20.b, z1.b[0]\n"
+ "udot z26.s, z20.b, z2.b[0]\n"
+ "udot z27.s, z20.b, z3.b[0]\n"
+ "udot z28.s, z20.b, z4.b[0]\n"
+ "udot z29.s, z20.b, z5.b[0]\n"
+ "udot z30.s, z20.b, z6.b[0]\n"
+ "udot z31.s, z20.b, z7.b[0]\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "udot z24.s, z21.b, z0.b[1]\n"
+ "udot z25.s, z21.b, z1.b[1]\n"
+ "udot z26.s, z21.b, z2.b[1]\n"
+ "udot z27.s, z21.b, z3.b[1]\n"
+ "udot z28.s, z21.b, z4.b[1]\n"
+ "udot z29.s, z21.b, z5.b[1]\n"
+ "udot z30.s, z21.b, z6.b[1]\n"
+ "udot z31.s, z21.b, z7.b[1]\n"
+ "ld1b z21.b, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "udot z24.s, z22.b, z0.b[2]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #6\n"
+ "udot z25.s, z22.b, z1.b[2]\n"
+ "udot z26.s, z22.b, z2.b[2]\n"
+ "udot z27.s, z22.b, z3.b[2]\n"
+ "udot z28.s, z22.b, z4.b[2]\n"
+ "udot z29.s, z22.b, z5.b[2]\n"
+ "udot z30.s, z22.b, z6.b[2]\n"
+ "udot z31.s, z22.b, z7.b[2]\n"
+ "udot z24.s, z23.b, z0.b[3]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0], #0x20]\n"
+ "udot z25.s, z23.b, z1.b[3]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1, #0x20]\n"
+ "udot z26.s, z23.b, z2.b[3]\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2, #0x20]\n"
+ "udot z27.s, z23.b, z3.b[3]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3, #0x20]\n"
+ "udot z28.s, z23.b, z4.b[3]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4, #0x20]\n"
+ "udot z29.s, z23.b, z5.b[3]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5, #0x20]\n"
+ "udot z30.s, z23.b, z6.b[3]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6, #0x20]\n"
+ "udot z31.s, z23.b, z7.b[3]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7, #0x20]\n"
+ "udot z24.s, z16.b, z0.b[0]\n"
+ "udot z25.s, z16.b, z1.b[0]\n"
+ "udot z26.s, z16.b, z2.b[0]\n"
+ "udot z27.s, z16.b, z3.b[0]\n"
+ "udot z28.s, z16.b, z4.b[0]\n"
+ "udot z29.s, z16.b, z5.b[0]\n"
+ "udot z30.s, z16.b, z6.b[0]\n"
+ "udot z31.s, z16.b, z7.b[0]\n"
+ "udot z24.s, z17.b, z0.b[1]\n"
+ "udot z25.s, z17.b, z1.b[1]\n"
+ "udot z26.s, z17.b, z2.b[1]\n"
+ "udot z27.s, z17.b, z3.b[1]\n"
+ "udot z28.s, z17.b, z4.b[1]\n"
+ "udot z29.s, z17.b, z5.b[1]\n"
+ "udot z30.s, z17.b, z6.b[1]\n"
+ "udot z31.s, z17.b, z7.b[1]\n"
+ "udot z24.s, z18.b, z0.b[2]\n"
+ "udot z25.s, z18.b, z1.b[2]\n"
+ "udot z26.s, z18.b, z2.b[2]\n"
+ "udot z27.s, z18.b, z3.b[2]\n"
+ "udot z28.s, z18.b, z4.b[2]\n"
+ "udot z29.s, z18.b, z5.b[2]\n"
+ "udot z30.s, z18.b, z6.b[2]\n"
+ "udot z31.s, z18.b, z7.b[2]\n"
+ "udot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x30]\n"
+ "udot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1, #0x30]\n"
+ "udot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2, #0x30]\n"
+ "udot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3, #0x30]\n"
+ "udot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4, #0x30]\n"
+ "udot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5, #0x30]\n"
+ "udot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6, #0x30]\n"
+ "udot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7, #0x30]\n"
+ "udot z24.s, z20.b, z0.b[0]\n"
+ "udot z25.s, z20.b, z1.b[0]\n"
+ "udot z26.s, z20.b, z2.b[0]\n"
+ "udot z27.s, z20.b, z3.b[0]\n"
+ "udot z28.s, z20.b, z4.b[0]\n"
+ "udot z29.s, z20.b, z5.b[0]\n"
+ "udot z30.s, z20.b, z6.b[0]\n"
+ "udot z31.s, z20.b, z7.b[0]\n"
+ "udot z24.s, z21.b, z0.b[1]\n"
+ "udot z25.s, z21.b, z1.b[1]\n"
+ "udot z26.s, z21.b, z2.b[1]\n"
+ "udot z27.s, z21.b, z3.b[1]\n"
+ "udot z28.s, z21.b, z4.b[1]\n"
+ "udot z29.s, z21.b, z5.b[1]\n"
+ "udot z30.s, z21.b, z6.b[1]\n"
+ "udot z31.s, z21.b, z7.b[1]\n"
+ "5:\n"
+ "st1w z24.s, p0, [%[c_ptr0]]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "st1w z25.s, p0, [c_ptr1]\n"
+ "st1w z26.s, p0, [c_ptr2]\n"
+ "st1w z27.s, p0, [c_ptr3]\n"
+ "st1w z28.s, p0, [c_ptr4]\n"
+ "st1w z29.s, p0, [c_ptr5]\n"
+ "st1w z30.s, p0, [c_ptr6]\n"
+ "st1w z31.s, p0, [c_ptr7]\n"
+ ".unreq a_ptr1\n"
+ ".unreq a_ptr2\n"
+ ".unreq a_ptr3\n"
+ ".unreq a_ptr4\n"
+ ".unreq a_ptr5\n"
+ ".unreq a_ptr6\n"
+ ".unreq a_ptr7\n"
+ ".unreq c_ptr1\n"
+ ".unreq c_ptr2\n"
+ ".unreq c_ptr3\n"
+ ".unreq c_ptr4\n"
+ ".unreq c_ptr5\n"
+ ".unreq c_ptr6\n"
+ ".unreq c_ptr7\n"
+ : [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [oob_rows] "+r" (oob_rows), [temp] "+r" (temp), [odds] "+r" (odds)
+ : [lda] "r" (ldab), [ldc] "r" (ldcb), [odd_depth] "r" (odd_depth), [last_width] "r" (last_width)
+ : "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "cc", "memory"
+ );
+ break;
+ case 15:
+ __asm __volatile (
+ "a_ptr1 .req X0\n"
+ "a_ptr2 .req X1\n"
+ "a_ptr3 .req X2\n"
+ "a_ptr4 .req X3\n"
+ "a_ptr5 .req X4\n"
+ "a_ptr6 .req X5\n"
+ "a_ptr7 .req X6\n"
+ "c_ptr1 .req X7\n"
+ "c_ptr2 .req X8\n"
+ "c_ptr3 .req X9\n"
+ "c_ptr4 .req X10\n"
+ "c_ptr5 .req X11\n"
+ "c_ptr6 .req X12\n"
+ "c_ptr7 .req X13\n"
+ "add a_ptr1, %[a_ptr0], %[lda]\n"
+ "add c_ptr1, %[c_ptr0], %[ldc]\n"
+ "add a_ptr2, a_ptr1, %[lda]\n"
+ "add c_ptr2, c_ptr1, %[ldc]\n"
+ "add a_ptr3, a_ptr2, %[lda]\n"
+ "add c_ptr3, c_ptr2, %[ldc]\n"
+ "add a_ptr4, a_ptr3, %[lda]\n"
+ "add c_ptr4, c_ptr3, %[ldc]\n"
+ "add a_ptr5, a_ptr4, %[lda]\n"
+ "add c_ptr5, c_ptr4, %[ldc]\n"
+ "add a_ptr6, a_ptr5, %[lda]\n"
+ "add c_ptr6, c_ptr5, %[ldc]\n"
+ "add a_ptr7, a_ptr6, %[lda]\n"
+ "add c_ptr7, c_ptr6, %[ldc]\n"
+ "cbz %[oob_rows], 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr7, %[c_ptr0], #0x0\n"
+ "add a_ptr7, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr6, %[c_ptr0], #0x0\n"
+ "add a_ptr6, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr5, %[c_ptr0], #0x0\n"
+ "add a_ptr5, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr4, %[c_ptr0], #0x0\n"
+ "add a_ptr4, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr3, %[c_ptr0], #0x0\n"
+ "add a_ptr3, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr2, %[c_ptr0], #0x0\n"
+ "add a_ptr2, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr1, %[c_ptr0], #0x0\n"
+ "add a_ptr1, %[a_ptr0], #0x0\n"
+ "1:\n"
+ "ptrue p7.b\n"
+ "whilelt p6.b, %[temp], %[odd_depth]\n"
+ "whilelt p0.s, %[temp], %[last_width]\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "ld1b z21.b, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "ld1b z22.b, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "ld1b z23.b, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "cbz %[loops], 2f\n"
+ "mov z24.s, #0\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
+ "mov z25.s, #0\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1]\n"
+ "mov z26.s, #0\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2]\n"
+ "mov z27.s, #0\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3]\n"
+ "mov z28.s, #0\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4]\n"
+ "mov z29.s, #0\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5]\n"
+ "mov z30.s, #0\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6]\n"
+ "mov z31.s, #0\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7]\n"
+ "udot z24.s, z16.b, z0.b[0]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "udot z25.s, z16.b, z1.b[0]\n"
+ "udot z26.s, z16.b, z2.b[0]\n"
+ "udot z27.s, z16.b, z3.b[0]\n"
+ "udot z28.s, z16.b, z4.b[0]\n"
+ "udot z29.s, z16.b, z5.b[0]\n"
+ "udot z30.s, z16.b, z6.b[0]\n"
+ "udot z31.s, z16.b, z7.b[0]\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "udot z24.s, z17.b, z0.b[1]\n"
+ "udot z25.s, z17.b, z1.b[1]\n"
+ "udot z26.s, z17.b, z2.b[1]\n"
+ "udot z27.s, z17.b, z3.b[1]\n"
+ "udot z28.s, z17.b, z4.b[1]\n"
+ "udot z29.s, z17.b, z5.b[1]\n"
+ "udot z30.s, z17.b, z6.b[1]\n"
+ "udot z31.s, z17.b, z7.b[1]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "udot z24.s, z18.b, z0.b[2]\n"
+ "udot z25.s, z18.b, z1.b[2]\n"
+ "udot z26.s, z18.b, z2.b[2]\n"
+ "udot z27.s, z18.b, z3.b[2]\n"
+ "udot z28.s, z18.b, z4.b[2]\n"
+ "udot z29.s, z18.b, z5.b[2]\n"
+ "udot z30.s, z18.b, z6.b[2]\n"
+ "udot z31.s, z18.b, z7.b[2]\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "udot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0], #0x10]\n"
+ "udot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1, #0x10]\n"
+ "udot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2, #0x10]\n"
+ "udot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3, #0x10]\n"
+ "udot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4, #0x10]\n"
+ "udot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5, #0x10]\n"
+ "udot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6, #0x10]\n"
+ "udot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7, #0x10]\n"
+ "udot z24.s, z20.b, z0.b[0]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "udot z25.s, z20.b, z1.b[0]\n"
+ "udot z26.s, z20.b, z2.b[0]\n"
+ "udot z27.s, z20.b, z3.b[0]\n"
+ "udot z28.s, z20.b, z4.b[0]\n"
+ "udot z29.s, z20.b, z5.b[0]\n"
+ "udot z30.s, z20.b, z6.b[0]\n"
+ "udot z31.s, z20.b, z7.b[0]\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "udot z24.s, z21.b, z0.b[1]\n"
+ "udot z25.s, z21.b, z1.b[1]\n"
+ "udot z26.s, z21.b, z2.b[1]\n"
+ "udot z27.s, z21.b, z3.b[1]\n"
+ "udot z28.s, z21.b, z4.b[1]\n"
+ "udot z29.s, z21.b, z5.b[1]\n"
+ "udot z30.s, z21.b, z6.b[1]\n"
+ "udot z31.s, z21.b, z7.b[1]\n"
+ "ld1b z21.b, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "udot z24.s, z22.b, z0.b[2]\n"
+ "udot z25.s, z22.b, z1.b[2]\n"
+ "udot z26.s, z22.b, z2.b[2]\n"
+ "udot z27.s, z22.b, z3.b[2]\n"
+ "udot z28.s, z22.b, z4.b[2]\n"
+ "udot z29.s, z22.b, z5.b[2]\n"
+ "udot z30.s, z22.b, z6.b[2]\n"
+ "udot z31.s, z22.b, z7.b[2]\n"
+ "ld1b z22.b, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "udot z24.s, z23.b, z0.b[3]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0], #0x20]\n"
+ "udot z25.s, z23.b, z1.b[3]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1, #0x20]\n"
+ "udot z26.s, z23.b, z2.b[3]\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2, #0x20]\n"
+ "udot z27.s, z23.b, z3.b[3]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3, #0x20]\n"
+ "udot z28.s, z23.b, z4.b[3]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4, #0x20]\n"
+ "udot z29.s, z23.b, z5.b[3]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5, #0x20]\n"
+ "udot z30.s, z23.b, z6.b[3]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6, #0x20]\n"
+ "udot z31.s, z23.b, z7.b[3]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7, #0x20]\n"
+ "udot z24.s, z16.b, z0.b[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #7\n"
+ "udot z25.s, z16.b, z1.b[0]\n"
+ "udot z26.s, z16.b, z2.b[0]\n"
+ "udot z27.s, z16.b, z3.b[0]\n"
+ "udot z28.s, z16.b, z4.b[0]\n"
+ "udot z29.s, z16.b, z5.b[0]\n"
+ "udot z30.s, z16.b, z6.b[0]\n"
+ "udot z31.s, z16.b, z7.b[0]\n"
+ "udot z24.s, z17.b, z0.b[1]\n"
+ "udot z25.s, z17.b, z1.b[1]\n"
+ "udot z26.s, z17.b, z2.b[1]\n"
+ "udot z27.s, z17.b, z3.b[1]\n"
+ "udot z28.s, z17.b, z4.b[1]\n"
+ "udot z29.s, z17.b, z5.b[1]\n"
+ "udot z30.s, z17.b, z6.b[1]\n"
+ "udot z31.s, z17.b, z7.b[1]\n"
+ "udot z24.s, z18.b, z0.b[2]\n"
+ "udot z25.s, z18.b, z1.b[2]\n"
+ "udot z26.s, z18.b, z2.b[2]\n"
+ "udot z27.s, z18.b, z3.b[2]\n"
+ "udot z28.s, z18.b, z4.b[2]\n"
+ "udot z29.s, z18.b, z5.b[2]\n"
+ "udot z30.s, z18.b, z6.b[2]\n"
+ "udot z31.s, z18.b, z7.b[2]\n"
+ "udot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x30]\n"
+ "udot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1, #0x30]\n"
+ "udot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2, #0x30]\n"
+ "udot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3, #0x30]\n"
+ "udot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4, #0x30]\n"
+ "udot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5, #0x30]\n"
+ "udot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6, #0x30]\n"
+ "udot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7, #0x30]\n"
+ "udot z24.s, z20.b, z0.b[0]\n"
+ "udot z25.s, z20.b, z1.b[0]\n"
+ "udot z26.s, z20.b, z2.b[0]\n"
+ "udot z27.s, z20.b, z3.b[0]\n"
+ "udot z28.s, z20.b, z4.b[0]\n"
+ "udot z29.s, z20.b, z5.b[0]\n"
+ "udot z30.s, z20.b, z6.b[0]\n"
+ "udot z31.s, z20.b, z7.b[0]\n"
+ "udot z24.s, z21.b, z0.b[1]\n"
+ "udot z25.s, z21.b, z1.b[1]\n"
+ "udot z26.s, z21.b, z2.b[1]\n"
+ "udot z27.s, z21.b, z3.b[1]\n"
+ "udot z28.s, z21.b, z4.b[1]\n"
+ "udot z29.s, z21.b, z5.b[1]\n"
+ "udot z30.s, z21.b, z6.b[1]\n"
+ "udot z31.s, z21.b, z7.b[1]\n"
+ "udot z24.s, z22.b, z0.b[2]\n"
+ "udot z25.s, z22.b, z1.b[2]\n"
+ "udot z26.s, z22.b, z2.b[2]\n"
+ "udot z27.s, z22.b, z3.b[2]\n"
+ "udot z28.s, z22.b, z4.b[2]\n"
+ "udot z29.s, z22.b, z5.b[2]\n"
+ "udot z30.s, z22.b, z6.b[2]\n"
+ "udot z31.s, z22.b, z7.b[2]\n"
+ "b.eq 3f\n"
+ "4:\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "mov z24.s, #0\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "mov z25.s, #0\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "mov z26.s, #0\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "ld1b z21.b, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "mov z27.s, #0\n"
+ "ld1b z22.b, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "ld1b z23.b, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "mov z28.s, #0\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "mov z29.s, #0\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2]\n"
+ "udot z24.s, z16.b, z0.b[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.s, #0\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3]\n"
+ "udot z25.s, z16.b, z1.b[0]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4]\n"
+ "udot z26.s, z16.b, z2.b[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.s, #0\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5]\n"
+ "udot z27.s, z16.b, z3.b[0]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6]\n"
+ "udot z28.s, z16.b, z4.b[0]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7]\n"
+ "udot z24.s, z17.b, z0.b[1]\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "udot z29.s, z16.b, z5.b[0]\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "udot z30.s, z16.b, z6.b[0]\n"
+ "udot z31.s, z16.b, z7.b[0]\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "udot z25.s, z17.b, z1.b[1]\n"
+ "udot z26.s, z17.b, z2.b[1]\n"
+ "udot z27.s, z17.b, z3.b[1]\n"
+ "udot z28.s, z17.b, z4.b[1]\n"
+ "udot z29.s, z17.b, z5.b[1]\n"
+ "udot z30.s, z17.b, z6.b[1]\n"
+ "udot z31.s, z17.b, z7.b[1]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "udot z24.s, z18.b, z0.b[2]\n"
+ "udot z25.s, z18.b, z1.b[2]\n"
+ "udot z26.s, z18.b, z2.b[2]\n"
+ "udot z27.s, z18.b, z3.b[2]\n"
+ "udot z28.s, z18.b, z4.b[2]\n"
+ "udot z29.s, z18.b, z5.b[2]\n"
+ "udot z30.s, z18.b, z6.b[2]\n"
+ "udot z31.s, z18.b, z7.b[2]\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "udot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0], #0x10]\n"
+ "udot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1, #0x10]\n"
+ "udot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2, #0x10]\n"
+ "udot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3, #0x10]\n"
+ "udot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4, #0x10]\n"
+ "udot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5, #0x10]\n"
+ "udot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6, #0x10]\n"
+ "udot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7, #0x10]\n"
+ "udot z24.s, z20.b, z0.b[0]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "udot z25.s, z20.b, z1.b[0]\n"
+ "udot z26.s, z20.b, z2.b[0]\n"
+ "udot z27.s, z20.b, z3.b[0]\n"
+ "udot z28.s, z20.b, z4.b[0]\n"
+ "udot z29.s, z20.b, z5.b[0]\n"
+ "udot z30.s, z20.b, z6.b[0]\n"
+ "udot z31.s, z20.b, z7.b[0]\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "udot z24.s, z21.b, z0.b[1]\n"
+ "udot z25.s, z21.b, z1.b[1]\n"
+ "udot z26.s, z21.b, z2.b[1]\n"
+ "udot z27.s, z21.b, z3.b[1]\n"
+ "udot z28.s, z21.b, z4.b[1]\n"
+ "udot z29.s, z21.b, z5.b[1]\n"
+ "udot z30.s, z21.b, z6.b[1]\n"
+ "udot z31.s, z21.b, z7.b[1]\n"
+ "ld1b z21.b, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "udot z24.s, z22.b, z0.b[2]\n"
+ "udot z25.s, z22.b, z1.b[2]\n"
+ "udot z26.s, z22.b, z2.b[2]\n"
+ "udot z27.s, z22.b, z3.b[2]\n"
+ "udot z28.s, z22.b, z4.b[2]\n"
+ "udot z29.s, z22.b, z5.b[2]\n"
+ "udot z30.s, z22.b, z6.b[2]\n"
+ "udot z31.s, z22.b, z7.b[2]\n"
+ "ld1b z22.b, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "udot z24.s, z23.b, z0.b[3]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0], #0x20]\n"
+ "udot z25.s, z23.b, z1.b[3]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1, #0x20]\n"
+ "udot z26.s, z23.b, z2.b[3]\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2, #0x20]\n"
+ "udot z27.s, z23.b, z3.b[3]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3, #0x20]\n"
+ "udot z28.s, z23.b, z4.b[3]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4, #0x20]\n"
+ "udot z29.s, z23.b, z5.b[3]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5, #0x20]\n"
+ "udot z30.s, z23.b, z6.b[3]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6, #0x20]\n"
+ "udot z31.s, z23.b, z7.b[3]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7, #0x20]\n"
+ "udot z24.s, z16.b, z0.b[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #7\n"
+ "udot z25.s, z16.b, z1.b[0]\n"
+ "udot z26.s, z16.b, z2.b[0]\n"
+ "udot z27.s, z16.b, z3.b[0]\n"
+ "udot z28.s, z16.b, z4.b[0]\n"
+ "udot z29.s, z16.b, z5.b[0]\n"
+ "udot z30.s, z16.b, z6.b[0]\n"
+ "udot z31.s, z16.b, z7.b[0]\n"
+ "udot z24.s, z17.b, z0.b[1]\n"
+ "udot z25.s, z17.b, z1.b[1]\n"
+ "udot z26.s, z17.b, z2.b[1]\n"
+ "udot z27.s, z17.b, z3.b[1]\n"
+ "udot z28.s, z17.b, z4.b[1]\n"
+ "udot z29.s, z17.b, z5.b[1]\n"
+ "udot z30.s, z17.b, z6.b[1]\n"
+ "udot z31.s, z17.b, z7.b[1]\n"
+ "udot z24.s, z18.b, z0.b[2]\n"
+ "udot z25.s, z18.b, z1.b[2]\n"
+ "udot z26.s, z18.b, z2.b[2]\n"
+ "udot z27.s, z18.b, z3.b[2]\n"
+ "udot z28.s, z18.b, z4.b[2]\n"
+ "udot z29.s, z18.b, z5.b[2]\n"
+ "udot z30.s, z18.b, z6.b[2]\n"
+ "udot z31.s, z18.b, z7.b[2]\n"
+ "udot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x30]\n"
+ "udot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1, #0x30]\n"
+ "udot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2, #0x30]\n"
+ "udot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3, #0x30]\n"
+ "udot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4, #0x30]\n"
+ "udot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5, #0x30]\n"
+ "udot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6, #0x30]\n"
+ "udot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7, #0x30]\n"
+ "udot z24.s, z20.b, z0.b[0]\n"
+ "udot z25.s, z20.b, z1.b[0]\n"
+ "udot z26.s, z20.b, z2.b[0]\n"
+ "udot z27.s, z20.b, z3.b[0]\n"
+ "udot z28.s, z20.b, z4.b[0]\n"
+ "udot z29.s, z20.b, z5.b[0]\n"
+ "udot z30.s, z20.b, z6.b[0]\n"
+ "udot z31.s, z20.b, z7.b[0]\n"
+ "udot z24.s, z21.b, z0.b[1]\n"
+ "udot z25.s, z21.b, z1.b[1]\n"
+ "udot z26.s, z21.b, z2.b[1]\n"
+ "udot z27.s, z21.b, z3.b[1]\n"
+ "udot z28.s, z21.b, z4.b[1]\n"
+ "udot z29.s, z21.b, z5.b[1]\n"
+ "udot z30.s, z21.b, z6.b[1]\n"
+ "udot z31.s, z21.b, z7.b[1]\n"
+ "udot z24.s, z22.b, z0.b[2]\n"
+ "udot z25.s, z22.b, z1.b[2]\n"
+ "udot z26.s, z22.b, z2.b[2]\n"
+ "udot z27.s, z22.b, z3.b[2]\n"
+ "udot z28.s, z22.b, z4.b[2]\n"
+ "udot z29.s, z22.b, z5.b[2]\n"
+ "udot z30.s, z22.b, z6.b[2]\n"
+ "udot z31.s, z22.b, z7.b[2]\n"
+ "b.ne 4b\n"
+ "3:\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "mov z24.s, #0\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "mov z25.s, #0\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "mov z26.s, #0\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "ld1b z21.b, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "mov z27.s, #0\n"
+ "ld1b z22.b, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "ld1b z23.b, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "mov z28.s, #0\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "mov z29.s, #0\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2]\n"
+ "udot z24.s, z16.b, z0.b[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.s, #0\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3]\n"
+ "udot z25.s, z16.b, z1.b[0]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4]\n"
+ "udot z26.s, z16.b, z2.b[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.s, #0\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5]\n"
+ "udot z27.s, z16.b, z3.b[0]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6]\n"
+ "udot z28.s, z16.b, z4.b[0]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7]\n"
+ "udot z24.s, z17.b, z0.b[1]\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "udot z29.s, z16.b, z5.b[0]\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "udot z30.s, z16.b, z6.b[0]\n"
+ "udot z31.s, z16.b, z7.b[0]\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "udot z25.s, z17.b, z1.b[1]\n"
+ "udot z26.s, z17.b, z2.b[1]\n"
+ "udot z27.s, z17.b, z3.b[1]\n"
+ "udot z28.s, z17.b, z4.b[1]\n"
+ "udot z29.s, z17.b, z5.b[1]\n"
+ "udot z30.s, z17.b, z6.b[1]\n"
+ "udot z31.s, z17.b, z7.b[1]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "udot z24.s, z18.b, z0.b[2]\n"
+ "udot z25.s, z18.b, z1.b[2]\n"
+ "udot z26.s, z18.b, z2.b[2]\n"
+ "udot z27.s, z18.b, z3.b[2]\n"
+ "udot z28.s, z18.b, z4.b[2]\n"
+ "udot z29.s, z18.b, z5.b[2]\n"
+ "udot z30.s, z18.b, z6.b[2]\n"
+ "udot z31.s, z18.b, z7.b[2]\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "udot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0], #0x10]\n"
+ "udot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1, #0x10]\n"
+ "udot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2, #0x10]\n"
+ "udot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3, #0x10]\n"
+ "udot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4, #0x10]\n"
+ "udot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5, #0x10]\n"
+ "udot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6, #0x10]\n"
+ "udot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7, #0x10]\n"
+ "udot z24.s, z20.b, z0.b[0]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "udot z25.s, z20.b, z1.b[0]\n"
+ "udot z26.s, z20.b, z2.b[0]\n"
+ "udot z27.s, z20.b, z3.b[0]\n"
+ "udot z28.s, z20.b, z4.b[0]\n"
+ "udot z29.s, z20.b, z5.b[0]\n"
+ "udot z30.s, z20.b, z6.b[0]\n"
+ "udot z31.s, z20.b, z7.b[0]\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "udot z24.s, z21.b, z0.b[1]\n"
+ "udot z25.s, z21.b, z1.b[1]\n"
+ "udot z26.s, z21.b, z2.b[1]\n"
+ "udot z27.s, z21.b, z3.b[1]\n"
+ "udot z28.s, z21.b, z4.b[1]\n"
+ "udot z29.s, z21.b, z5.b[1]\n"
+ "udot z30.s, z21.b, z6.b[1]\n"
+ "udot z31.s, z21.b, z7.b[1]\n"
+ "ld1b z21.b, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "udot z24.s, z22.b, z0.b[2]\n"
+ "udot z25.s, z22.b, z1.b[2]\n"
+ "udot z26.s, z22.b, z2.b[2]\n"
+ "udot z27.s, z22.b, z3.b[2]\n"
+ "udot z28.s, z22.b, z4.b[2]\n"
+ "udot z29.s, z22.b, z5.b[2]\n"
+ "udot z30.s, z22.b, z6.b[2]\n"
+ "udot z31.s, z22.b, z7.b[2]\n"
+ "ld1b z22.b, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "udot z24.s, z23.b, z0.b[3]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0], #0x20]\n"
+ "udot z25.s, z23.b, z1.b[3]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1, #0x20]\n"
+ "udot z26.s, z23.b, z2.b[3]\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2, #0x20]\n"
+ "udot z27.s, z23.b, z3.b[3]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3, #0x20]\n"
+ "udot z28.s, z23.b, z4.b[3]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4, #0x20]\n"
+ "udot z29.s, z23.b, z5.b[3]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5, #0x20]\n"
+ "udot z30.s, z23.b, z6.b[3]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6, #0x20]\n"
+ "udot z31.s, z23.b, z7.b[3]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7, #0x20]\n"
+ "udot z24.s, z16.b, z0.b[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #7\n"
+ "udot z25.s, z16.b, z1.b[0]\n"
+ "udot z26.s, z16.b, z2.b[0]\n"
+ "udot z27.s, z16.b, z3.b[0]\n"
+ "udot z28.s, z16.b, z4.b[0]\n"
+ "udot z29.s, z16.b, z5.b[0]\n"
+ "udot z30.s, z16.b, z6.b[0]\n"
+ "udot z31.s, z16.b, z7.b[0]\n"
+ "udot z24.s, z17.b, z0.b[1]\n"
+ "udot z25.s, z17.b, z1.b[1]\n"
+ "udot z26.s, z17.b, z2.b[1]\n"
+ "udot z27.s, z17.b, z3.b[1]\n"
+ "udot z28.s, z17.b, z4.b[1]\n"
+ "udot z29.s, z17.b, z5.b[1]\n"
+ "udot z30.s, z17.b, z6.b[1]\n"
+ "udot z31.s, z17.b, z7.b[1]\n"
+ "udot z24.s, z18.b, z0.b[2]\n"
+ "udot z25.s, z18.b, z1.b[2]\n"
+ "udot z26.s, z18.b, z2.b[2]\n"
+ "udot z27.s, z18.b, z3.b[2]\n"
+ "udot z28.s, z18.b, z4.b[2]\n"
+ "udot z29.s, z18.b, z5.b[2]\n"
+ "udot z30.s, z18.b, z6.b[2]\n"
+ "udot z31.s, z18.b, z7.b[2]\n"
+ "udot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x30]\n"
+ "udot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1, #0x30]\n"
+ "udot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2, #0x30]\n"
+ "udot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3, #0x30]\n"
+ "udot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4, #0x30]\n"
+ "udot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5, #0x30]\n"
+ "udot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6, #0x30]\n"
+ "udot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7, #0x30]\n"
+ "udot z24.s, z20.b, z0.b[0]\n"
+ "udot z25.s, z20.b, z1.b[0]\n"
+ "udot z26.s, z20.b, z2.b[0]\n"
+ "udot z27.s, z20.b, z3.b[0]\n"
+ "udot z28.s, z20.b, z4.b[0]\n"
+ "udot z29.s, z20.b, z5.b[0]\n"
+ "udot z30.s, z20.b, z6.b[0]\n"
+ "udot z31.s, z20.b, z7.b[0]\n"
+ "udot z24.s, z21.b, z0.b[1]\n"
+ "udot z25.s, z21.b, z1.b[1]\n"
+ "udot z26.s, z21.b, z2.b[1]\n"
+ "udot z27.s, z21.b, z3.b[1]\n"
+ "udot z28.s, z21.b, z4.b[1]\n"
+ "udot z29.s, z21.b, z5.b[1]\n"
+ "udot z30.s, z21.b, z6.b[1]\n"
+ "udot z31.s, z21.b, z7.b[1]\n"
+ "udot z24.s, z22.b, z0.b[2]\n"
+ "udot z25.s, z22.b, z1.b[2]\n"
+ "udot z26.s, z22.b, z2.b[2]\n"
+ "udot z27.s, z22.b, z3.b[2]\n"
+ "udot z28.s, z22.b, z4.b[2]\n"
+ "udot z29.s, z22.b, z5.b[2]\n"
+ "udot z30.s, z22.b, z6.b[2]\n"
+ "udot z31.s, z22.b, z7.b[2]\n"
+ "b 5f\n"
+ "2:\n"
+ "mov z24.s, #0\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
+ "mov z25.s, #0\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1]\n"
+ "mov z26.s, #0\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2]\n"
+ "mov z27.s, #0\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3]\n"
+ "mov z28.s, #0\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4]\n"
+ "mov z29.s, #0\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5]\n"
+ "mov z30.s, #0\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6]\n"
+ "mov z31.s, #0\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7]\n"
+ "udot z24.s, z16.b, z0.b[0]\n"
+ "udot z25.s, z16.b, z1.b[0]\n"
+ "udot z26.s, z16.b, z2.b[0]\n"
+ "udot z27.s, z16.b, z3.b[0]\n"
+ "udot z28.s, z16.b, z4.b[0]\n"
+ "udot z29.s, z16.b, z5.b[0]\n"
+ "udot z30.s, z16.b, z6.b[0]\n"
+ "udot z31.s, z16.b, z7.b[0]\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "udot z24.s, z17.b, z0.b[1]\n"
+ "udot z25.s, z17.b, z1.b[1]\n"
+ "udot z26.s, z17.b, z2.b[1]\n"
+ "udot z27.s, z17.b, z3.b[1]\n"
+ "udot z28.s, z17.b, z4.b[1]\n"
+ "udot z29.s, z17.b, z5.b[1]\n"
+ "udot z30.s, z17.b, z6.b[1]\n"
+ "udot z31.s, z17.b, z7.b[1]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "udot z24.s, z18.b, z0.b[2]\n"
+ "udot z25.s, z18.b, z1.b[2]\n"
+ "udot z26.s, z18.b, z2.b[2]\n"
+ "udot z27.s, z18.b, z3.b[2]\n"
+ "udot z28.s, z18.b, z4.b[2]\n"
+ "udot z29.s, z18.b, z5.b[2]\n"
+ "udot z30.s, z18.b, z6.b[2]\n"
+ "udot z31.s, z18.b, z7.b[2]\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "udot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0], #0x10]\n"
+ "udot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1, #0x10]\n"
+ "udot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2, #0x10]\n"
+ "udot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3, #0x10]\n"
+ "udot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4, #0x10]\n"
+ "udot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5, #0x10]\n"
+ "udot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6, #0x10]\n"
+ "udot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7, #0x10]\n"
+ "udot z24.s, z20.b, z0.b[0]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "udot z25.s, z20.b, z1.b[0]\n"
+ "udot z26.s, z20.b, z2.b[0]\n"
+ "udot z27.s, z20.b, z3.b[0]\n"
+ "udot z28.s, z20.b, z4.b[0]\n"
+ "udot z29.s, z20.b, z5.b[0]\n"
+ "udot z30.s, z20.b, z6.b[0]\n"
+ "udot z31.s, z20.b, z7.b[0]\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "udot z24.s, z21.b, z0.b[1]\n"
+ "udot z25.s, z21.b, z1.b[1]\n"
+ "udot z26.s, z21.b, z2.b[1]\n"
+ "udot z27.s, z21.b, z3.b[1]\n"
+ "udot z28.s, z21.b, z4.b[1]\n"
+ "udot z29.s, z21.b, z5.b[1]\n"
+ "udot z30.s, z21.b, z6.b[1]\n"
+ "udot z31.s, z21.b, z7.b[1]\n"
+ "ld1b z21.b, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "udot z24.s, z22.b, z0.b[2]\n"
+ "udot z25.s, z22.b, z1.b[2]\n"
+ "udot z26.s, z22.b, z2.b[2]\n"
+ "udot z27.s, z22.b, z3.b[2]\n"
+ "udot z28.s, z22.b, z4.b[2]\n"
+ "udot z29.s, z22.b, z5.b[2]\n"
+ "udot z30.s, z22.b, z6.b[2]\n"
+ "udot z31.s, z22.b, z7.b[2]\n"
+ "ld1b z22.b, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "udot z24.s, z23.b, z0.b[3]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0], #0x20]\n"
+ "udot z25.s, z23.b, z1.b[3]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1, #0x20]\n"
+ "udot z26.s, z23.b, z2.b[3]\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2, #0x20]\n"
+ "udot z27.s, z23.b, z3.b[3]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3, #0x20]\n"
+ "udot z28.s, z23.b, z4.b[3]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4, #0x20]\n"
+ "udot z29.s, z23.b, z5.b[3]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5, #0x20]\n"
+ "udot z30.s, z23.b, z6.b[3]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6, #0x20]\n"
+ "udot z31.s, z23.b, z7.b[3]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7, #0x20]\n"
+ "udot z24.s, z16.b, z0.b[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #7\n"
+ "udot z25.s, z16.b, z1.b[0]\n"
+ "udot z26.s, z16.b, z2.b[0]\n"
+ "udot z27.s, z16.b, z3.b[0]\n"
+ "udot z28.s, z16.b, z4.b[0]\n"
+ "udot z29.s, z16.b, z5.b[0]\n"
+ "udot z30.s, z16.b, z6.b[0]\n"
+ "udot z31.s, z16.b, z7.b[0]\n"
+ "udot z24.s, z17.b, z0.b[1]\n"
+ "udot z25.s, z17.b, z1.b[1]\n"
+ "udot z26.s, z17.b, z2.b[1]\n"
+ "udot z27.s, z17.b, z3.b[1]\n"
+ "udot z28.s, z17.b, z4.b[1]\n"
+ "udot z29.s, z17.b, z5.b[1]\n"
+ "udot z30.s, z17.b, z6.b[1]\n"
+ "udot z31.s, z17.b, z7.b[1]\n"
+ "udot z24.s, z18.b, z0.b[2]\n"
+ "udot z25.s, z18.b, z1.b[2]\n"
+ "udot z26.s, z18.b, z2.b[2]\n"
+ "udot z27.s, z18.b, z3.b[2]\n"
+ "udot z28.s, z18.b, z4.b[2]\n"
+ "udot z29.s, z18.b, z5.b[2]\n"
+ "udot z30.s, z18.b, z6.b[2]\n"
+ "udot z31.s, z18.b, z7.b[2]\n"
+ "udot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x30]\n"
+ "udot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1, #0x30]\n"
+ "udot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2, #0x30]\n"
+ "udot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3, #0x30]\n"
+ "udot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4, #0x30]\n"
+ "udot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5, #0x30]\n"
+ "udot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6, #0x30]\n"
+ "udot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7, #0x30]\n"
+ "udot z24.s, z20.b, z0.b[0]\n"
+ "udot z25.s, z20.b, z1.b[0]\n"
+ "udot z26.s, z20.b, z2.b[0]\n"
+ "udot z27.s, z20.b, z3.b[0]\n"
+ "udot z28.s, z20.b, z4.b[0]\n"
+ "udot z29.s, z20.b, z5.b[0]\n"
+ "udot z30.s, z20.b, z6.b[0]\n"
+ "udot z31.s, z20.b, z7.b[0]\n"
+ "udot z24.s, z21.b, z0.b[1]\n"
+ "udot z25.s, z21.b, z1.b[1]\n"
+ "udot z26.s, z21.b, z2.b[1]\n"
+ "udot z27.s, z21.b, z3.b[1]\n"
+ "udot z28.s, z21.b, z4.b[1]\n"
+ "udot z29.s, z21.b, z5.b[1]\n"
+ "udot z30.s, z21.b, z6.b[1]\n"
+ "udot z31.s, z21.b, z7.b[1]\n"
+ "udot z24.s, z22.b, z0.b[2]\n"
+ "udot z25.s, z22.b, z1.b[2]\n"
+ "udot z26.s, z22.b, z2.b[2]\n"
+ "udot z27.s, z22.b, z3.b[2]\n"
+ "udot z28.s, z22.b, z4.b[2]\n"
+ "udot z29.s, z22.b, z5.b[2]\n"
+ "udot z30.s, z22.b, z6.b[2]\n"
+ "udot z31.s, z22.b, z7.b[2]\n"
+ "5:\n"
+ "st1w z24.s, p0, [%[c_ptr0]]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "st1w z25.s, p0, [c_ptr1]\n"
+ "st1w z26.s, p0, [c_ptr2]\n"
+ "st1w z27.s, p0, [c_ptr3]\n"
+ "st1w z28.s, p0, [c_ptr4]\n"
+ "st1w z29.s, p0, [c_ptr5]\n"
+ "st1w z30.s, p0, [c_ptr6]\n"
+ "st1w z31.s, p0, [c_ptr7]\n"
+ ".unreq a_ptr1\n"
+ ".unreq a_ptr2\n"
+ ".unreq a_ptr3\n"
+ ".unreq a_ptr4\n"
+ ".unreq a_ptr5\n"
+ ".unreq a_ptr6\n"
+ ".unreq a_ptr7\n"
+ ".unreq c_ptr1\n"
+ ".unreq c_ptr2\n"
+ ".unreq c_ptr3\n"
+ ".unreq c_ptr4\n"
+ ".unreq c_ptr5\n"
+ ".unreq c_ptr6\n"
+ ".unreq c_ptr7\n"
+ : [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [oob_rows] "+r" (oob_rows), [temp] "+r" (temp), [odds] "+r" (odds)
+ : [lda] "r" (ldab), [ldc] "r" (ldcb), [odd_depth] "r" (odd_depth), [last_width] "r" (last_width)
+ : "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "cc", "memory"
+ );
+ break;
+ default:
+ case 16:
+ __asm __volatile (
+ "a_ptr1 .req X0\n"
+ "a_ptr2 .req X1\n"
+ "a_ptr3 .req X2\n"
+ "a_ptr4 .req X3\n"
+ "a_ptr5 .req X4\n"
+ "a_ptr6 .req X5\n"
+ "a_ptr7 .req X6\n"
+ "c_ptr1 .req X7\n"
+ "c_ptr2 .req X8\n"
+ "c_ptr3 .req X9\n"
+ "c_ptr4 .req X10\n"
+ "c_ptr5 .req X11\n"
+ "c_ptr6 .req X12\n"
+ "c_ptr7 .req X13\n"
+ "add a_ptr1, %[a_ptr0], %[lda]\n"
+ "add c_ptr1, %[c_ptr0], %[ldc]\n"
+ "add a_ptr2, a_ptr1, %[lda]\n"
+ "add c_ptr2, c_ptr1, %[ldc]\n"
+ "add a_ptr3, a_ptr2, %[lda]\n"
+ "add c_ptr3, c_ptr2, %[ldc]\n"
+ "add a_ptr4, a_ptr3, %[lda]\n"
+ "add c_ptr4, c_ptr3, %[ldc]\n"
+ "add a_ptr5, a_ptr4, %[lda]\n"
+ "add c_ptr5, c_ptr4, %[ldc]\n"
+ "add a_ptr6, a_ptr5, %[lda]\n"
+ "add c_ptr6, c_ptr5, %[ldc]\n"
+ "add a_ptr7, a_ptr6, %[lda]\n"
+ "add c_ptr7, c_ptr6, %[ldc]\n"
+ "cbz %[oob_rows], 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr7, %[c_ptr0], #0x0\n"
+ "add a_ptr7, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr6, %[c_ptr0], #0x0\n"
+ "add a_ptr6, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr5, %[c_ptr0], #0x0\n"
+ "add a_ptr5, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr4, %[c_ptr0], #0x0\n"
+ "add a_ptr4, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr3, %[c_ptr0], #0x0\n"
+ "add a_ptr3, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr2, %[c_ptr0], #0x0\n"
+ "add a_ptr2, %[a_ptr0], #0x0\n"
+ "b.eq 1f\n"
+ "subs %[oob_rows], %[oob_rows], #0x1\n"
+ "add c_ptr1, %[c_ptr0], #0x0\n"
+ "add a_ptr1, %[a_ptr0], #0x0\n"
+ "1:\n"
+ "ptrue p7.b\n"
+ "whilelt p6.b, %[temp], %[odd_depth]\n"
+ "whilelt p0.s, %[temp], %[last_width]\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "ld1b z21.b, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "ld1b z22.b, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "ld1b z23.b, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "cbz %[loops], 2f\n"
+ "mov z24.s, #0\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
+ "mov z25.s, #0\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1]\n"
+ "mov z26.s, #0\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2]\n"
+ "mov z27.s, #0\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3]\n"
+ "mov z28.s, #0\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4]\n"
+ "mov z29.s, #0\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5]\n"
+ "mov z30.s, #0\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6]\n"
+ "mov z31.s, #0\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7]\n"
+ "udot z24.s, z16.b, z0.b[0]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "udot z25.s, z16.b, z1.b[0]\n"
+ "udot z26.s, z16.b, z2.b[0]\n"
+ "udot z27.s, z16.b, z3.b[0]\n"
+ "udot z28.s, z16.b, z4.b[0]\n"
+ "udot z29.s, z16.b, z5.b[0]\n"
+ "udot z30.s, z16.b, z6.b[0]\n"
+ "udot z31.s, z16.b, z7.b[0]\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "udot z24.s, z17.b, z0.b[1]\n"
+ "udot z25.s, z17.b, z1.b[1]\n"
+ "udot z26.s, z17.b, z2.b[1]\n"
+ "udot z27.s, z17.b, z3.b[1]\n"
+ "udot z28.s, z17.b, z4.b[1]\n"
+ "udot z29.s, z17.b, z5.b[1]\n"
+ "udot z30.s, z17.b, z6.b[1]\n"
+ "udot z31.s, z17.b, z7.b[1]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "udot z24.s, z18.b, z0.b[2]\n"
+ "udot z25.s, z18.b, z1.b[2]\n"
+ "udot z26.s, z18.b, z2.b[2]\n"
+ "udot z27.s, z18.b, z3.b[2]\n"
+ "udot z28.s, z18.b, z4.b[2]\n"
+ "udot z29.s, z18.b, z5.b[2]\n"
+ "udot z30.s, z18.b, z6.b[2]\n"
+ "udot z31.s, z18.b, z7.b[2]\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "udot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0], #0x10]\n"
+ "udot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1, #0x10]\n"
+ "udot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2, #0x10]\n"
+ "udot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3, #0x10]\n"
+ "udot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4, #0x10]\n"
+ "udot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5, #0x10]\n"
+ "udot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6, #0x10]\n"
+ "udot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7, #0x10]\n"
+ "udot z24.s, z20.b, z0.b[0]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "udot z25.s, z20.b, z1.b[0]\n"
+ "udot z26.s, z20.b, z2.b[0]\n"
+ "udot z27.s, z20.b, z3.b[0]\n"
+ "udot z28.s, z20.b, z4.b[0]\n"
+ "udot z29.s, z20.b, z5.b[0]\n"
+ "udot z30.s, z20.b, z6.b[0]\n"
+ "udot z31.s, z20.b, z7.b[0]\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "udot z24.s, z21.b, z0.b[1]\n"
+ "udot z25.s, z21.b, z1.b[1]\n"
+ "udot z26.s, z21.b, z2.b[1]\n"
+ "udot z27.s, z21.b, z3.b[1]\n"
+ "udot z28.s, z21.b, z4.b[1]\n"
+ "udot z29.s, z21.b, z5.b[1]\n"
+ "udot z30.s, z21.b, z6.b[1]\n"
+ "udot z31.s, z21.b, z7.b[1]\n"
+ "ld1b z21.b, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "udot z24.s, z22.b, z0.b[2]\n"
+ "udot z25.s, z22.b, z1.b[2]\n"
+ "udot z26.s, z22.b, z2.b[2]\n"
+ "udot z27.s, z22.b, z3.b[2]\n"
+ "udot z28.s, z22.b, z4.b[2]\n"
+ "udot z29.s, z22.b, z5.b[2]\n"
+ "udot z30.s, z22.b, z6.b[2]\n"
+ "udot z31.s, z22.b, z7.b[2]\n"
+ "ld1b z22.b, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "udot z24.s, z23.b, z0.b[3]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0], #0x20]\n"
+ "udot z25.s, z23.b, z1.b[3]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1, #0x20]\n"
+ "udot z26.s, z23.b, z2.b[3]\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2, #0x20]\n"
+ "udot z27.s, z23.b, z3.b[3]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3, #0x20]\n"
+ "udot z28.s, z23.b, z4.b[3]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4, #0x20]\n"
+ "udot z29.s, z23.b, z5.b[3]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5, #0x20]\n"
+ "udot z30.s, z23.b, z6.b[3]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6, #0x20]\n"
+ "udot z31.s, z23.b, z7.b[3]\n"
+ "ld1b z23.b, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "udot z24.s, z16.b, z0.b[0]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7, #0x20]\n"
+ "udot z25.s, z16.b, z1.b[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "udot z26.s, z16.b, z2.b[0]\n"
+ "udot z27.s, z16.b, z3.b[0]\n"
+ "udot z28.s, z16.b, z4.b[0]\n"
+ "udot z29.s, z16.b, z5.b[0]\n"
+ "udot z30.s, z16.b, z6.b[0]\n"
+ "udot z31.s, z16.b, z7.b[0]\n"
+ "udot z24.s, z17.b, z0.b[1]\n"
+ "udot z25.s, z17.b, z1.b[1]\n"
+ "udot z26.s, z17.b, z2.b[1]\n"
+ "udot z27.s, z17.b, z3.b[1]\n"
+ "udot z28.s, z17.b, z4.b[1]\n"
+ "udot z29.s, z17.b, z5.b[1]\n"
+ "udot z30.s, z17.b, z6.b[1]\n"
+ "udot z31.s, z17.b, z7.b[1]\n"
+ "udot z24.s, z18.b, z0.b[2]\n"
+ "udot z25.s, z18.b, z1.b[2]\n"
+ "udot z26.s, z18.b, z2.b[2]\n"
+ "udot z27.s, z18.b, z3.b[2]\n"
+ "udot z28.s, z18.b, z4.b[2]\n"
+ "udot z29.s, z18.b, z5.b[2]\n"
+ "udot z30.s, z18.b, z6.b[2]\n"
+ "udot z31.s, z18.b, z7.b[2]\n"
+ "udot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x30]\n"
+ "udot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1, #0x30]\n"
+ "udot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2, #0x30]\n"
+ "udot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3, #0x30]\n"
+ "udot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4, #0x30]\n"
+ "udot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5, #0x30]\n"
+ "udot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6, #0x30]\n"
+ "udot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7, #0x30]\n"
+ "udot z24.s, z20.b, z0.b[0]\n"
+ "udot z25.s, z20.b, z1.b[0]\n"
+ "udot z26.s, z20.b, z2.b[0]\n"
+ "udot z27.s, z20.b, z3.b[0]\n"
+ "udot z28.s, z20.b, z4.b[0]\n"
+ "udot z29.s, z20.b, z5.b[0]\n"
+ "udot z30.s, z20.b, z6.b[0]\n"
+ "udot z31.s, z20.b, z7.b[0]\n"
+ "udot z24.s, z21.b, z0.b[1]\n"
+ "udot z25.s, z21.b, z1.b[1]\n"
+ "udot z26.s, z21.b, z2.b[1]\n"
+ "udot z27.s, z21.b, z3.b[1]\n"
+ "udot z28.s, z21.b, z4.b[1]\n"
+ "udot z29.s, z21.b, z5.b[1]\n"
+ "udot z30.s, z21.b, z6.b[1]\n"
+ "udot z31.s, z21.b, z7.b[1]\n"
+ "udot z24.s, z22.b, z0.b[2]\n"
+ "udot z25.s, z22.b, z1.b[2]\n"
+ "udot z26.s, z22.b, z2.b[2]\n"
+ "udot z27.s, z22.b, z3.b[2]\n"
+ "udot z28.s, z22.b, z4.b[2]\n"
+ "udot z29.s, z22.b, z5.b[2]\n"
+ "udot z30.s, z22.b, z6.b[2]\n"
+ "udot z31.s, z22.b, z7.b[2]\n"
+ "udot z24.s, z23.b, z0.b[3]\n"
+ "udot z25.s, z23.b, z1.b[3]\n"
+ "udot z26.s, z23.b, z2.b[3]\n"
+ "udot z27.s, z23.b, z3.b[3]\n"
+ "udot z28.s, z23.b, z4.b[3]\n"
+ "udot z29.s, z23.b, z5.b[3]\n"
+ "udot z30.s, z23.b, z6.b[3]\n"
+ "udot z31.s, z23.b, z7.b[3]\n"
+ "b.eq 3f\n"
+ "4:\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "mov z24.s, #0\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "mov z25.s, #0\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "mov z26.s, #0\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "ld1b z21.b, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "mov z27.s, #0\n"
+ "ld1b z22.b, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "ld1b z23.b, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "mov z28.s, #0\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "mov z29.s, #0\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2]\n"
+ "udot z24.s, z16.b, z0.b[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.s, #0\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3]\n"
+ "udot z25.s, z16.b, z1.b[0]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4]\n"
+ "udot z26.s, z16.b, z2.b[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.s, #0\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5]\n"
+ "udot z27.s, z16.b, z3.b[0]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6]\n"
+ "udot z28.s, z16.b, z4.b[0]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7]\n"
+ "udot z24.s, z17.b, z0.b[1]\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "udot z29.s, z16.b, z5.b[0]\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "udot z30.s, z16.b, z6.b[0]\n"
+ "udot z31.s, z16.b, z7.b[0]\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "udot z25.s, z17.b, z1.b[1]\n"
+ "udot z26.s, z17.b, z2.b[1]\n"
+ "udot z27.s, z17.b, z3.b[1]\n"
+ "udot z28.s, z17.b, z4.b[1]\n"
+ "udot z29.s, z17.b, z5.b[1]\n"
+ "udot z30.s, z17.b, z6.b[1]\n"
+ "udot z31.s, z17.b, z7.b[1]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "udot z24.s, z18.b, z0.b[2]\n"
+ "udot z25.s, z18.b, z1.b[2]\n"
+ "udot z26.s, z18.b, z2.b[2]\n"
+ "udot z27.s, z18.b, z3.b[2]\n"
+ "udot z28.s, z18.b, z4.b[2]\n"
+ "udot z29.s, z18.b, z5.b[2]\n"
+ "udot z30.s, z18.b, z6.b[2]\n"
+ "udot z31.s, z18.b, z7.b[2]\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "udot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0], #0x10]\n"
+ "udot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1, #0x10]\n"
+ "udot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2, #0x10]\n"
+ "udot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3, #0x10]\n"
+ "udot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4, #0x10]\n"
+ "udot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5, #0x10]\n"
+ "udot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6, #0x10]\n"
+ "udot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7, #0x10]\n"
+ "udot z24.s, z20.b, z0.b[0]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "udot z25.s, z20.b, z1.b[0]\n"
+ "udot z26.s, z20.b, z2.b[0]\n"
+ "udot z27.s, z20.b, z3.b[0]\n"
+ "udot z28.s, z20.b, z4.b[0]\n"
+ "udot z29.s, z20.b, z5.b[0]\n"
+ "udot z30.s, z20.b, z6.b[0]\n"
+ "udot z31.s, z20.b, z7.b[0]\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "udot z24.s, z21.b, z0.b[1]\n"
+ "udot z25.s, z21.b, z1.b[1]\n"
+ "udot z26.s, z21.b, z2.b[1]\n"
+ "udot z27.s, z21.b, z3.b[1]\n"
+ "udot z28.s, z21.b, z4.b[1]\n"
+ "udot z29.s, z21.b, z5.b[1]\n"
+ "udot z30.s, z21.b, z6.b[1]\n"
+ "udot z31.s, z21.b, z7.b[1]\n"
+ "ld1b z21.b, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "udot z24.s, z22.b, z0.b[2]\n"
+ "udot z25.s, z22.b, z1.b[2]\n"
+ "udot z26.s, z22.b, z2.b[2]\n"
+ "udot z27.s, z22.b, z3.b[2]\n"
+ "udot z28.s, z22.b, z4.b[2]\n"
+ "udot z29.s, z22.b, z5.b[2]\n"
+ "udot z30.s, z22.b, z6.b[2]\n"
+ "udot z31.s, z22.b, z7.b[2]\n"
+ "ld1b z22.b, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "udot z24.s, z23.b, z0.b[3]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0], #0x20]\n"
+ "udot z25.s, z23.b, z1.b[3]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1, #0x20]\n"
+ "udot z26.s, z23.b, z2.b[3]\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2, #0x20]\n"
+ "udot z27.s, z23.b, z3.b[3]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3, #0x20]\n"
+ "udot z28.s, z23.b, z4.b[3]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4, #0x20]\n"
+ "udot z29.s, z23.b, z5.b[3]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5, #0x20]\n"
+ "udot z30.s, z23.b, z6.b[3]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6, #0x20]\n"
+ "udot z31.s, z23.b, z7.b[3]\n"
+ "ld1b z23.b, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "udot z24.s, z16.b, z0.b[0]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7, #0x20]\n"
+ "udot z25.s, z16.b, z1.b[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "udot z26.s, z16.b, z2.b[0]\n"
+ "udot z27.s, z16.b, z3.b[0]\n"
+ "udot z28.s, z16.b, z4.b[0]\n"
+ "udot z29.s, z16.b, z5.b[0]\n"
+ "udot z30.s, z16.b, z6.b[0]\n"
+ "udot z31.s, z16.b, z7.b[0]\n"
+ "udot z24.s, z17.b, z0.b[1]\n"
+ "udot z25.s, z17.b, z1.b[1]\n"
+ "udot z26.s, z17.b, z2.b[1]\n"
+ "udot z27.s, z17.b, z3.b[1]\n"
+ "udot z28.s, z17.b, z4.b[1]\n"
+ "udot z29.s, z17.b, z5.b[1]\n"
+ "udot z30.s, z17.b, z6.b[1]\n"
+ "udot z31.s, z17.b, z7.b[1]\n"
+ "udot z24.s, z18.b, z0.b[2]\n"
+ "udot z25.s, z18.b, z1.b[2]\n"
+ "udot z26.s, z18.b, z2.b[2]\n"
+ "udot z27.s, z18.b, z3.b[2]\n"
+ "udot z28.s, z18.b, z4.b[2]\n"
+ "udot z29.s, z18.b, z5.b[2]\n"
+ "udot z30.s, z18.b, z6.b[2]\n"
+ "udot z31.s, z18.b, z7.b[2]\n"
+ "udot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x30]\n"
+ "udot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1, #0x30]\n"
+ "udot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2, #0x30]\n"
+ "udot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3, #0x30]\n"
+ "udot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4, #0x30]\n"
+ "udot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5, #0x30]\n"
+ "udot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6, #0x30]\n"
+ "udot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7, #0x30]\n"
+ "udot z24.s, z20.b, z0.b[0]\n"
+ "udot z25.s, z20.b, z1.b[0]\n"
+ "udot z26.s, z20.b, z2.b[0]\n"
+ "udot z27.s, z20.b, z3.b[0]\n"
+ "udot z28.s, z20.b, z4.b[0]\n"
+ "udot z29.s, z20.b, z5.b[0]\n"
+ "udot z30.s, z20.b, z6.b[0]\n"
+ "udot z31.s, z20.b, z7.b[0]\n"
+ "udot z24.s, z21.b, z0.b[1]\n"
+ "udot z25.s, z21.b, z1.b[1]\n"
+ "udot z26.s, z21.b, z2.b[1]\n"
+ "udot z27.s, z21.b, z3.b[1]\n"
+ "udot z28.s, z21.b, z4.b[1]\n"
+ "udot z29.s, z21.b, z5.b[1]\n"
+ "udot z30.s, z21.b, z6.b[1]\n"
+ "udot z31.s, z21.b, z7.b[1]\n"
+ "udot z24.s, z22.b, z0.b[2]\n"
+ "udot z25.s, z22.b, z1.b[2]\n"
+ "udot z26.s, z22.b, z2.b[2]\n"
+ "udot z27.s, z22.b, z3.b[2]\n"
+ "udot z28.s, z22.b, z4.b[2]\n"
+ "udot z29.s, z22.b, z5.b[2]\n"
+ "udot z30.s, z22.b, z6.b[2]\n"
+ "udot z31.s, z22.b, z7.b[2]\n"
+ "udot z24.s, z23.b, z0.b[3]\n"
+ "udot z25.s, z23.b, z1.b[3]\n"
+ "udot z26.s, z23.b, z2.b[3]\n"
+ "udot z27.s, z23.b, z3.b[3]\n"
+ "udot z28.s, z23.b, z4.b[3]\n"
+ "udot z29.s, z23.b, z5.b[3]\n"
+ "udot z30.s, z23.b, z6.b[3]\n"
+ "udot z31.s, z23.b, z7.b[3]\n"
+ "b.ne 4b\n"
+ "3:\n"
+ "st1w z24.s, p7, [%[c_ptr0]]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "mov z24.s, #0\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "st1w z25.s, p7, [c_ptr1]\n"
+ "addvl c_ptr1, c_ptr1, #1\n"
+ "mov z25.s, #0\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "st1w z26.s, p7, [c_ptr2]\n"
+ "addvl c_ptr2, c_ptr2, #1\n"
+ "mov z26.s, #0\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "ld1b z21.b, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "st1w z27.s, p7, [c_ptr3]\n"
+ "addvl c_ptr3, c_ptr3, #1\n"
+ "mov z27.s, #0\n"
+ "ld1b z22.b, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "ld1b z23.b, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "st1w z28.s, p7, [c_ptr4]\n"
+ "addvl c_ptr4, c_ptr4, #1\n"
+ "mov z28.s, #0\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1]\n"
+ "st1w z29.s, p7, [c_ptr5]\n"
+ "addvl c_ptr5, c_ptr5, #1\n"
+ "mov z29.s, #0\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2]\n"
+ "udot z24.s, z16.b, z0.b[0]\n"
+ "st1w z30.s, p7, [c_ptr6]\n"
+ "mov z30.s, #0\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3]\n"
+ "udot z25.s, z16.b, z1.b[0]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4]\n"
+ "udot z26.s, z16.b, z2.b[0]\n"
+ "st1w z31.s, p7, [c_ptr7]\n"
+ "mov z31.s, #0\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5]\n"
+ "udot z27.s, z16.b, z3.b[0]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6]\n"
+ "udot z28.s, z16.b, z4.b[0]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7]\n"
+ "udot z24.s, z17.b, z0.b[1]\n"
+ "addvl c_ptr6, c_ptr6, #1\n"
+ "udot z29.s, z16.b, z5.b[0]\n"
+ "addvl c_ptr7, c_ptr7, #1\n"
+ "udot z30.s, z16.b, z6.b[0]\n"
+ "udot z31.s, z16.b, z7.b[0]\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "udot z25.s, z17.b, z1.b[1]\n"
+ "udot z26.s, z17.b, z2.b[1]\n"
+ "udot z27.s, z17.b, z3.b[1]\n"
+ "udot z28.s, z17.b, z4.b[1]\n"
+ "udot z29.s, z17.b, z5.b[1]\n"
+ "udot z30.s, z17.b, z6.b[1]\n"
+ "udot z31.s, z17.b, z7.b[1]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "udot z24.s, z18.b, z0.b[2]\n"
+ "udot z25.s, z18.b, z1.b[2]\n"
+ "udot z26.s, z18.b, z2.b[2]\n"
+ "udot z27.s, z18.b, z3.b[2]\n"
+ "udot z28.s, z18.b, z4.b[2]\n"
+ "udot z29.s, z18.b, z5.b[2]\n"
+ "udot z30.s, z18.b, z6.b[2]\n"
+ "udot z31.s, z18.b, z7.b[2]\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "udot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0], #0x10]\n"
+ "udot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1, #0x10]\n"
+ "udot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2, #0x10]\n"
+ "udot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3, #0x10]\n"
+ "udot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4, #0x10]\n"
+ "udot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5, #0x10]\n"
+ "udot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6, #0x10]\n"
+ "udot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7, #0x10]\n"
+ "udot z24.s, z20.b, z0.b[0]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "udot z25.s, z20.b, z1.b[0]\n"
+ "udot z26.s, z20.b, z2.b[0]\n"
+ "udot z27.s, z20.b, z3.b[0]\n"
+ "udot z28.s, z20.b, z4.b[0]\n"
+ "udot z29.s, z20.b, z5.b[0]\n"
+ "udot z30.s, z20.b, z6.b[0]\n"
+ "udot z31.s, z20.b, z7.b[0]\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "udot z24.s, z21.b, z0.b[1]\n"
+ "udot z25.s, z21.b, z1.b[1]\n"
+ "udot z26.s, z21.b, z2.b[1]\n"
+ "udot z27.s, z21.b, z3.b[1]\n"
+ "udot z28.s, z21.b, z4.b[1]\n"
+ "udot z29.s, z21.b, z5.b[1]\n"
+ "udot z30.s, z21.b, z6.b[1]\n"
+ "udot z31.s, z21.b, z7.b[1]\n"
+ "ld1b z21.b, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "udot z24.s, z22.b, z0.b[2]\n"
+ "udot z25.s, z22.b, z1.b[2]\n"
+ "udot z26.s, z22.b, z2.b[2]\n"
+ "udot z27.s, z22.b, z3.b[2]\n"
+ "udot z28.s, z22.b, z4.b[2]\n"
+ "udot z29.s, z22.b, z5.b[2]\n"
+ "udot z30.s, z22.b, z6.b[2]\n"
+ "udot z31.s, z22.b, z7.b[2]\n"
+ "ld1b z22.b, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "udot z24.s, z23.b, z0.b[3]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0], #0x20]\n"
+ "udot z25.s, z23.b, z1.b[3]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1, #0x20]\n"
+ "udot z26.s, z23.b, z2.b[3]\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2, #0x20]\n"
+ "udot z27.s, z23.b, z3.b[3]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3, #0x20]\n"
+ "udot z28.s, z23.b, z4.b[3]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4, #0x20]\n"
+ "udot z29.s, z23.b, z5.b[3]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5, #0x20]\n"
+ "udot z30.s, z23.b, z6.b[3]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6, #0x20]\n"
+ "udot z31.s, z23.b, z7.b[3]\n"
+ "ld1b z23.b, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "udot z24.s, z16.b, z0.b[0]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7, #0x20]\n"
+ "udot z25.s, z16.b, z1.b[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "udot z26.s, z16.b, z2.b[0]\n"
+ "udot z27.s, z16.b, z3.b[0]\n"
+ "udot z28.s, z16.b, z4.b[0]\n"
+ "udot z29.s, z16.b, z5.b[0]\n"
+ "udot z30.s, z16.b, z6.b[0]\n"
+ "udot z31.s, z16.b, z7.b[0]\n"
+ "udot z24.s, z17.b, z0.b[1]\n"
+ "udot z25.s, z17.b, z1.b[1]\n"
+ "udot z26.s, z17.b, z2.b[1]\n"
+ "udot z27.s, z17.b, z3.b[1]\n"
+ "udot z28.s, z17.b, z4.b[1]\n"
+ "udot z29.s, z17.b, z5.b[1]\n"
+ "udot z30.s, z17.b, z6.b[1]\n"
+ "udot z31.s, z17.b, z7.b[1]\n"
+ "udot z24.s, z18.b, z0.b[2]\n"
+ "udot z25.s, z18.b, z1.b[2]\n"
+ "udot z26.s, z18.b, z2.b[2]\n"
+ "udot z27.s, z18.b, z3.b[2]\n"
+ "udot z28.s, z18.b, z4.b[2]\n"
+ "udot z29.s, z18.b, z5.b[2]\n"
+ "udot z30.s, z18.b, z6.b[2]\n"
+ "udot z31.s, z18.b, z7.b[2]\n"
+ "udot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x30]\n"
+ "udot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1, #0x30]\n"
+ "udot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2, #0x30]\n"
+ "udot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3, #0x30]\n"
+ "udot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4, #0x30]\n"
+ "udot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5, #0x30]\n"
+ "udot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6, #0x30]\n"
+ "udot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7, #0x30]\n"
+ "udot z24.s, z20.b, z0.b[0]\n"
+ "udot z25.s, z20.b, z1.b[0]\n"
+ "udot z26.s, z20.b, z2.b[0]\n"
+ "udot z27.s, z20.b, z3.b[0]\n"
+ "udot z28.s, z20.b, z4.b[0]\n"
+ "udot z29.s, z20.b, z5.b[0]\n"
+ "udot z30.s, z20.b, z6.b[0]\n"
+ "udot z31.s, z20.b, z7.b[0]\n"
+ "udot z24.s, z21.b, z0.b[1]\n"
+ "udot z25.s, z21.b, z1.b[1]\n"
+ "udot z26.s, z21.b, z2.b[1]\n"
+ "udot z27.s, z21.b, z3.b[1]\n"
+ "udot z28.s, z21.b, z4.b[1]\n"
+ "udot z29.s, z21.b, z5.b[1]\n"
+ "udot z30.s, z21.b, z6.b[1]\n"
+ "udot z31.s, z21.b, z7.b[1]\n"
+ "udot z24.s, z22.b, z0.b[2]\n"
+ "udot z25.s, z22.b, z1.b[2]\n"
+ "udot z26.s, z22.b, z2.b[2]\n"
+ "udot z27.s, z22.b, z3.b[2]\n"
+ "udot z28.s, z22.b, z4.b[2]\n"
+ "udot z29.s, z22.b, z5.b[2]\n"
+ "udot z30.s, z22.b, z6.b[2]\n"
+ "udot z31.s, z22.b, z7.b[2]\n"
+ "udot z24.s, z23.b, z0.b[3]\n"
+ "udot z25.s, z23.b, z1.b[3]\n"
+ "udot z26.s, z23.b, z2.b[3]\n"
+ "udot z27.s, z23.b, z3.b[3]\n"
+ "udot z28.s, z23.b, z4.b[3]\n"
+ "udot z29.s, z23.b, z5.b[3]\n"
+ "udot z30.s, z23.b, z6.b[3]\n"
+ "udot z31.s, z23.b, z7.b[3]\n"
+ "b 5f\n"
+ "2:\n"
+ "mov z24.s, #0\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
+ "mov z25.s, #0\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1]\n"
+ "mov z26.s, #0\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2]\n"
+ "mov z27.s, #0\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3]\n"
+ "mov z28.s, #0\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4]\n"
+ "mov z29.s, #0\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5]\n"
+ "mov z30.s, #0\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6]\n"
+ "mov z31.s, #0\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7]\n"
+ "udot z24.s, z16.b, z0.b[0]\n"
+ "udot z25.s, z16.b, z1.b[0]\n"
+ "udot z26.s, z16.b, z2.b[0]\n"
+ "udot z27.s, z16.b, z3.b[0]\n"
+ "udot z28.s, z16.b, z4.b[0]\n"
+ "udot z29.s, z16.b, z5.b[0]\n"
+ "udot z30.s, z16.b, z6.b[0]\n"
+ "udot z31.s, z16.b, z7.b[0]\n"
+ "ld1b z16.b, p7/z, [%[b_ptr0]]\n"
+ "udot z24.s, z17.b, z0.b[1]\n"
+ "udot z25.s, z17.b, z1.b[1]\n"
+ "udot z26.s, z17.b, z2.b[1]\n"
+ "udot z27.s, z17.b, z3.b[1]\n"
+ "udot z28.s, z17.b, z4.b[1]\n"
+ "udot z29.s, z17.b, z5.b[1]\n"
+ "udot z30.s, z17.b, z6.b[1]\n"
+ "udot z31.s, z17.b, z7.b[1]\n"
+ "ld1b z17.b, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "udot z24.s, z18.b, z0.b[2]\n"
+ "udot z25.s, z18.b, z1.b[2]\n"
+ "udot z26.s, z18.b, z2.b[2]\n"
+ "udot z27.s, z18.b, z3.b[2]\n"
+ "udot z28.s, z18.b, z4.b[2]\n"
+ "udot z29.s, z18.b, z5.b[2]\n"
+ "udot z30.s, z18.b, z6.b[2]\n"
+ "udot z31.s, z18.b, z7.b[2]\n"
+ "ld1b z18.b, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "udot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0], #0x10]\n"
+ "udot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1, #0x10]\n"
+ "udot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2, #0x10]\n"
+ "udot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3, #0x10]\n"
+ "udot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4, #0x10]\n"
+ "udot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5, #0x10]\n"
+ "udot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6, #0x10]\n"
+ "udot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7, #0x10]\n"
+ "udot z24.s, z20.b, z0.b[0]\n"
+ "ld1b z19.b, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "udot z25.s, z20.b, z1.b[0]\n"
+ "udot z26.s, z20.b, z2.b[0]\n"
+ "udot z27.s, z20.b, z3.b[0]\n"
+ "udot z28.s, z20.b, z4.b[0]\n"
+ "udot z29.s, z20.b, z5.b[0]\n"
+ "udot z30.s, z20.b, z6.b[0]\n"
+ "udot z31.s, z20.b, z7.b[0]\n"
+ "ld1b z20.b, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "udot z24.s, z21.b, z0.b[1]\n"
+ "udot z25.s, z21.b, z1.b[1]\n"
+ "udot z26.s, z21.b, z2.b[1]\n"
+ "udot z27.s, z21.b, z3.b[1]\n"
+ "udot z28.s, z21.b, z4.b[1]\n"
+ "udot z29.s, z21.b, z5.b[1]\n"
+ "udot z30.s, z21.b, z6.b[1]\n"
+ "udot z31.s, z21.b, z7.b[1]\n"
+ "ld1b z21.b, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "udot z24.s, z22.b, z0.b[2]\n"
+ "udot z25.s, z22.b, z1.b[2]\n"
+ "udot z26.s, z22.b, z2.b[2]\n"
+ "udot z27.s, z22.b, z3.b[2]\n"
+ "udot z28.s, z22.b, z4.b[2]\n"
+ "udot z29.s, z22.b, z5.b[2]\n"
+ "udot z30.s, z22.b, z6.b[2]\n"
+ "udot z31.s, z22.b, z7.b[2]\n"
+ "ld1b z22.b, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "udot z24.s, z23.b, z0.b[3]\n"
+ "ld1rqb z0.b, p7/z, [%[a_ptr0], #0x20]\n"
+ "udot z25.s, z23.b, z1.b[3]\n"
+ "ld1rqb z1.b, p7/z, [a_ptr1, #0x20]\n"
+ "udot z26.s, z23.b, z2.b[3]\n"
+ "ld1rqb z2.b, p7/z, [a_ptr2, #0x20]\n"
+ "udot z27.s, z23.b, z3.b[3]\n"
+ "ld1rqb z3.b, p7/z, [a_ptr3, #0x20]\n"
+ "udot z28.s, z23.b, z4.b[3]\n"
+ "ld1rqb z4.b, p7/z, [a_ptr4, #0x20]\n"
+ "udot z29.s, z23.b, z5.b[3]\n"
+ "ld1rqb z5.b, p7/z, [a_ptr5, #0x20]\n"
+ "udot z30.s, z23.b, z6.b[3]\n"
+ "ld1rqb z6.b, p7/z, [a_ptr6, #0x20]\n"
+ "udot z31.s, z23.b, z7.b[3]\n"
+ "ld1b z23.b, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "udot z24.s, z16.b, z0.b[0]\n"
+ "ld1rqb z7.b, p7/z, [a_ptr7, #0x20]\n"
+ "udot z25.s, z16.b, z1.b[0]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "udot z26.s, z16.b, z2.b[0]\n"
+ "udot z27.s, z16.b, z3.b[0]\n"
+ "udot z28.s, z16.b, z4.b[0]\n"
+ "udot z29.s, z16.b, z5.b[0]\n"
+ "udot z30.s, z16.b, z6.b[0]\n"
+ "udot z31.s, z16.b, z7.b[0]\n"
+ "udot z24.s, z17.b, z0.b[1]\n"
+ "udot z25.s, z17.b, z1.b[1]\n"
+ "udot z26.s, z17.b, z2.b[1]\n"
+ "udot z27.s, z17.b, z3.b[1]\n"
+ "udot z28.s, z17.b, z4.b[1]\n"
+ "udot z29.s, z17.b, z5.b[1]\n"
+ "udot z30.s, z17.b, z6.b[1]\n"
+ "udot z31.s, z17.b, z7.b[1]\n"
+ "udot z24.s, z18.b, z0.b[2]\n"
+ "udot z25.s, z18.b, z1.b[2]\n"
+ "udot z26.s, z18.b, z2.b[2]\n"
+ "udot z27.s, z18.b, z3.b[2]\n"
+ "udot z28.s, z18.b, z4.b[2]\n"
+ "udot z29.s, z18.b, z5.b[2]\n"
+ "udot z30.s, z18.b, z6.b[2]\n"
+ "udot z31.s, z18.b, z7.b[2]\n"
+ "udot z24.s, z19.b, z0.b[3]\n"
+ "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x30]\n"
+ "udot z25.s, z19.b, z1.b[3]\n"
+ "ld1rqb z1.b, p6/z, [a_ptr1, #0x30]\n"
+ "udot z26.s, z19.b, z2.b[3]\n"
+ "ld1rqb z2.b, p6/z, [a_ptr2, #0x30]\n"
+ "udot z27.s, z19.b, z3.b[3]\n"
+ "ld1rqb z3.b, p6/z, [a_ptr3, #0x30]\n"
+ "udot z28.s, z19.b, z4.b[3]\n"
+ "ld1rqb z4.b, p6/z, [a_ptr4, #0x30]\n"
+ "udot z29.s, z19.b, z5.b[3]\n"
+ "ld1rqb z5.b, p6/z, [a_ptr5, #0x30]\n"
+ "udot z30.s, z19.b, z6.b[3]\n"
+ "ld1rqb z6.b, p6/z, [a_ptr6, #0x30]\n"
+ "udot z31.s, z19.b, z7.b[3]\n"
+ "ld1rqb z7.b, p6/z, [a_ptr7, #0x30]\n"
+ "udot z24.s, z20.b, z0.b[0]\n"
+ "udot z25.s, z20.b, z1.b[0]\n"
+ "udot z26.s, z20.b, z2.b[0]\n"
+ "udot z27.s, z20.b, z3.b[0]\n"
+ "udot z28.s, z20.b, z4.b[0]\n"
+ "udot z29.s, z20.b, z5.b[0]\n"
+ "udot z30.s, z20.b, z6.b[0]\n"
+ "udot z31.s, z20.b, z7.b[0]\n"
+ "udot z24.s, z21.b, z0.b[1]\n"
+ "udot z25.s, z21.b, z1.b[1]\n"
+ "udot z26.s, z21.b, z2.b[1]\n"
+ "udot z27.s, z21.b, z3.b[1]\n"
+ "udot z28.s, z21.b, z4.b[1]\n"
+ "udot z29.s, z21.b, z5.b[1]\n"
+ "udot z30.s, z21.b, z6.b[1]\n"
+ "udot z31.s, z21.b, z7.b[1]\n"
+ "udot z24.s, z22.b, z0.b[2]\n"
+ "udot z25.s, z22.b, z1.b[2]\n"
+ "udot z26.s, z22.b, z2.b[2]\n"
+ "udot z27.s, z22.b, z3.b[2]\n"
+ "udot z28.s, z22.b, z4.b[2]\n"
+ "udot z29.s, z22.b, z5.b[2]\n"
+ "udot z30.s, z22.b, z6.b[2]\n"
+ "udot z31.s, z22.b, z7.b[2]\n"
+ "udot z24.s, z23.b, z0.b[3]\n"
+ "udot z25.s, z23.b, z1.b[3]\n"
+ "udot z26.s, z23.b, z2.b[3]\n"
+ "udot z27.s, z23.b, z3.b[3]\n"
+ "udot z28.s, z23.b, z4.b[3]\n"
+ "udot z29.s, z23.b, z5.b[3]\n"
+ "udot z30.s, z23.b, z6.b[3]\n"
+ "udot z31.s, z23.b, z7.b[3]\n"
+ "5:\n"
+ "st1w z24.s, p0, [%[c_ptr0]]\n"
+ "addvl %[c_ptr0], %[c_ptr0], #1\n"
+ "st1w z25.s, p0, [c_ptr1]\n"
+ "st1w z26.s, p0, [c_ptr2]\n"
+ "st1w z27.s, p0, [c_ptr3]\n"
+ "st1w z28.s, p0, [c_ptr4]\n"
+ "st1w z29.s, p0, [c_ptr5]\n"
+ "st1w z30.s, p0, [c_ptr6]\n"
+ "st1w z31.s, p0, [c_ptr7]\n"
+ ".unreq a_ptr1\n"
+ ".unreq a_ptr2\n"
+ ".unreq a_ptr3\n"
+ ".unreq a_ptr4\n"
+ ".unreq a_ptr5\n"
+ ".unreq a_ptr6\n"
+ ".unreq a_ptr7\n"
+ ".unreq c_ptr1\n"
+ ".unreq c_ptr2\n"
+ ".unreq c_ptr3\n"
+ ".unreq c_ptr4\n"
+ ".unreq c_ptr5\n"
+ ".unreq c_ptr6\n"
+ ".unreq c_ptr7\n"
+ : [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [oob_rows] "+r" (oob_rows), [temp] "+r" (temp), [odds] "+r" (odds)
+ : [lda] "r" (ldab), [ldc] "r" (ldcb), [odd_depth] "r" (odd_depth), [last_width] "r" (last_width)
+ : "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "cc", "memory"
+ );
+ break;
+ }
+ }
+}
+
+} // namespace arm_gemm
+
+#endif // __ARM_FEATURE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/rowsum_indirect_s8.cpp b/src/core/NEON/kernels/arm_gemm/rowsum_indirect_s8.cpp
index 7345793f93..5433676558 100644
--- a/src/core/NEON/kernels/arm_gemm/rowsum_indirect_s8.cpp
+++ b/src/core/NEON/kernels/arm_gemm/rowsum_indirect_s8.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2020, 2023 Arm Limited.
+ * Copyright (c) 2019-2020 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef __aarch64__
@@ -63,8 +63,8 @@ void row_sums_indirect(
ka.string_lengths = string_lengths;
__asm__ __volatile__(
- "add x20, %x[qp], %[b_offset]\n"
- "ld1r { v2.4s }, [x20]\n"
+ "add x19, %x[qp], %[b_offset]\n"
+ "ld1r { v2.4s }, [x19]\n"
"neg v2.4s, v2.4s\n"
"1:" // Row loop
"cmp %x[M], #0x6\n"
@@ -76,97 +76,97 @@ void row_sums_indirect(
"bgt 35f\n"
"beq 18f\n"
"movi v1.8h, #0x0\n"
+ "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"movi v0.4s, #0x0\n"
- "mov x10, #0x0\n"
- "ldr w21, [%x[args_ptr], %[offsetof_num_strings]]\n"
"mov x9, #0x0\n"
+ "mov x28, #0x0\n"
"2:" // Height 1: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w28, [x20, x9, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w27, [x19, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 3f\n"
- "ldr x20, [%x[input_ptr], x9, LSL #0x3]\n"
- "add x20, x20, %x[input_offset], LSL #3\n"
- "ldr x27, [x20, #0x0]\n"
- "cbnz x9, 4f\n"
- "ldr w20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x27, x27, x20\n"
+ "ldr x19, [%x[input_ptr], x28, LSL #0x3]\n"
+ "add x19, x19, %x[input_offset], LSL #3\n"
+ "ldr x26, [x19, #0x0]\n"
+ "cbnz x28, 4f\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x26, x26, x19\n"
"b 4f\n"
"3:" // Height 1: setup direct input
- "mov x27, %x[input_ptr]\n"
+ "mov x26, %x[input_ptr]\n"
"4:" // Height 1: input setup done
- "cmp x28, #0x10\n"
+ "cmp x27, #0x10\n"
"blt 8f\n"
- "cmp x28, #0x20\n"
+ "cmp x27, #0x20\n"
"blt 7f\n"
"5:" // Height 1: Multiply loop: Main loop head
- "ldr q31, [x27, #0x0]\n"
- "cmp x10, #0x7e\n"
- "add x27, x27, #0x10\n"
+ "ldr q31, [x26, #0x0]\n"
+ "cmp x9, #0x7e\n"
+ "add x26, x26, #0x10\n"
"blt 6f\n"
"sadalp v0.4s, v1.8h\n"
"movi v1.8h, #0x0\n"
- "mov x10, #0x0\n"
+ "mov x9, #0x0\n"
"6:" // Height 1: Multiply loop: unique 1: no collapse
- "sub x28, x28, #0x10\n"
- "cmp x28, #0x20\n"
"sadalp v1.8h, v31.16b\n"
- "add x10, x10, #0x1\n"
+ "add x9, x9, #0x1\n"
+ "sub x27, x27, #0x10\n"
+ "cmp x27, #0x20\n"
"bge 5b\n"
"7:" // Height 1: Multiply loop: Single iteration only
- "ldr q31, [x27, #0x0]\n"
- "sub x28, x28, #0x10\n"
+ "sub x27, x27, #0x10\n"
+ "ldr q31, [x26, #0x0]\n"
+ "add x26, x26, #0x10\n"
"sadalp v1.8h, v31.16b\n"
- "add x27, x27, #0x10\n"
"8:" // Height 1: Multiply loop: Main loop skip
- "cbz x28, 17f\n"
- "tbz x28, #3, 12f\n"
- "ldr d31, [x27], #0x8\n"
- "tbz x28, #2, 10f\n"
- "ld1 { v31.s }[2], [x27], #0x4\n"
- "tbz x28, #1, 9f\n"
- "ld1 { v31.h }[6], [x27], #0x2\n"
- "tbz x28, #0, 16f\n"
- "ld1 { v31.b }[14], [x27]\n"
+ "cbz x27, 17f\n"
+ "tbz x27, #3, 12f\n"
+ "ldr d31, [x26], #0x8\n"
+ "tbz x27, #2, 10f\n"
+ "ld1 { v31.s }[2], [x26], #0x4\n"
+ "tbz x27, #1, 9f\n"
+ "ld1 { v31.h }[6], [x26], #0x2\n"
+ "tbz x27, #0, 16f\n"
+ "ld1 { v31.b }[14], [x26]\n"
"b 16f\n"
"9:" // Height 1: Multiply loop: Ragged operand read: partial_1_12
- "tbz x28, #0, 16f\n"
- "ld1 { v31.b }[12], [x27]\n"
+ "tbz x27, #0, 16f\n"
+ "ld1 { v31.b }[12], [x26]\n"
"b 16f\n"
"10:" // Height 1: Multiply loop: Ragged operand read: partial_2_8
- "tbz x28, #1, 11f\n"
- "ld1 { v31.h }[4], [x27], #0x2\n"
- "tbz x28, #0, 16f\n"
- "ld1 { v31.b }[10], [x27]\n"
+ "tbz x27, #1, 11f\n"
+ "ld1 { v31.h }[4], [x26], #0x2\n"
+ "tbz x27, #0, 16f\n"
+ "ld1 { v31.b }[10], [x26]\n"
"b 16f\n"
"11:" // Height 1: Multiply loop: Ragged operand read: partial_1_8
- "tbz x28, #0, 16f\n"
- "ld1 { v31.b }[8], [x27]\n"
+ "tbz x27, #0, 16f\n"
+ "ld1 { v31.b }[8], [x26]\n"
"b 16f\n"
"12:" // Height 1: Multiply loop: Ragged operand read: partial_4_0
- "tbz x28, #2, 14f\n"
- "ldr s31, [x27], #0x4\n"
- "tbz x28, #1, 13f\n"
- "ld1 { v31.h }[2], [x27], #0x2\n"
- "tbz x28, #0, 16f\n"
- "ld1 { v31.b }[6], [x27]\n"
+ "tbz x27, #2, 14f\n"
+ "ldr s31, [x26], #0x4\n"
+ "tbz x27, #1, 13f\n"
+ "ld1 { v31.h }[2], [x26], #0x2\n"
+ "tbz x27, #0, 16f\n"
+ "ld1 { v31.b }[6], [x26]\n"
"b 16f\n"
"13:" // Height 1: Multiply loop: Ragged operand read: partial_1_4
- "tbz x28, #0, 16f\n"
- "ld1 { v31.b }[4], [x27]\n"
+ "tbz x27, #0, 16f\n"
+ "ld1 { v31.b }[4], [x26]\n"
"b 16f\n"
"14:" // Height 1: Multiply loop: Ragged operand read: partial_2_0
- "tbz x28, #1, 15f\n"
- "ldr h31, [x27], #0x2\n"
- "tbz x28, #0, 16f\n"
- "ld1 { v31.b }[2], [x27]\n"
+ "tbz x27, #1, 15f\n"
+ "ldr h31, [x26], #0x2\n"
+ "tbz x27, #0, 16f\n"
+ "ld1 { v31.b }[2], [x26]\n"
"b 16f\n"
"15:" // Height 1: Multiply loop: Ragged operand read: partial_1_0
- "ldr b31, [x27, #0x0]\n"
+ "ldr b31, [x26, #0x0]\n"
"16:" // Height 1: Multiply loop: Ragged operand read: Done
"sadalp v1.8h, v31.16b\n"
"17:" // Height 1: Multiply loop: No odd multiplies
- "add x9, x9, #0x1\n"
- "cmp x9, x21\n"
+ "add x28, x28, #0x1\n"
+ "cmp x28, x20\n"
"bne 2b\n"
"sadalp v0.4s, v1.8h\n"
"addp v0.4s, v0.4s, v0.4s\n"
@@ -176,126 +176,126 @@ void row_sums_indirect(
"b 104f\n"
"18:" // Height 2
"movi v1.8h, #0x0\n"
+ "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "mov x9, #0x0\n"
"movi v0.4s, #0x0\n"
- "mov x10, #0x0\n"
- "ldr w21, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "mov x28, #0x0\n"
"movi v30.8h, #0x0\n"
"movi v29.4s, #0x0\n"
- "mov x9, #0x0\n"
"19:" // Height 2: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w28, [x20, x9, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w27, [x19, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 20f\n"
- "ldr x20, [%x[input_ptr], x9, LSL #0x3]\n"
- "add x20, x20, %x[input_offset], LSL #3\n"
- "ldr x27, [x20, #0x0]\n"
- "ldr x26, [x20, #0x8]\n"
- "cbnz x9, 21f\n"
- "ldr w20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x27, x27, x20\n"
- "add x26, x26, x20\n"
+ "ldr x19, [%x[input_ptr], x28, LSL #0x3]\n"
+ "add x19, x19, %x[input_offset], LSL #3\n"
+ "ldr x26, [x19, #0x0]\n"
+ "ldr x25, [x19, #0x8]\n"
+ "cbnz x28, 21f\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x26, x26, x19\n"
+ "add x25, x25, x19\n"
"b 21f\n"
"20:" // Height 2: setup direct input
- "mov x27, %x[input_ptr]\n"
- "add x26, x27, %x[input_offset]\n"
+ "mov x26, %x[input_ptr]\n"
+ "add x25, x26, %x[input_offset]\n"
"21:" // Height 2: input setup done
- "cmp x28, #0x10\n"
+ "cmp x27, #0x10\n"
"blt 25f\n"
- "cmp x28, #0x20\n"
+ "cmp x27, #0x20\n"
"blt 24f\n"
"22:" // Height 2: Multiply loop: Main loop head
- "ldr q31, [x27, #0x0]\n"
- "ldr q28, [x26, #0x0]\n"
- "cmp x10, #0x7e\n"
- "add x27, x27, #0x10\n"
+ "ldr q31, [x26, #0x0]\n"
+ "ldr q28, [x25, #0x0]\n"
+ "cmp x9, #0x7e\n"
"add x26, x26, #0x10\n"
+ "add x25, x25, #0x10\n"
"blt 23f\n"
"sadalp v0.4s, v1.8h\n"
"movi v1.8h, #0x0\n"
- "mov x10, #0x0\n"
"sadalp v29.4s, v30.8h\n"
"movi v30.8h, #0x0\n"
+ "mov x9, #0x0\n"
"23:" // Height 2: Multiply loop: unique 2: no collapse
- "sub x28, x28, #0x10\n"
- "cmp x28, #0x20\n"
"sadalp v1.8h, v31.16b\n"
"sadalp v30.8h, v28.16b\n"
- "add x10, x10, #0x1\n"
+ "add x9, x9, #0x1\n"
+ "sub x27, x27, #0x10\n"
+ "cmp x27, #0x20\n"
"bge 22b\n"
"24:" // Height 2: Multiply loop: Single iteration only
- "ldr q31, [x27, #0x0]\n"
- "ldr q28, [x26, #0x0]\n"
- "sub x28, x28, #0x10\n"
+ "sub x27, x27, #0x10\n"
+ "ldr q31, [x26, #0x0]\n"
+ "ldr q28, [x25, #0x0]\n"
+ "add x26, x26, #0x10\n"
+ "add x25, x25, #0x10\n"
"sadalp v1.8h, v31.16b\n"
"sadalp v30.8h, v28.16b\n"
- "add x27, x27, #0x10\n"
- "add x26, x26, #0x10\n"
"25:" // Height 2: Multiply loop: Main loop skip
- "cbz x28, 34f\n"
- "tbz x28, #3, 29f\n"
- "ldr d31, [x27], #0x8\n"
- "ldr d28, [x26], #0x8\n"
- "tbz x28, #2, 27f\n"
- "ld1 { v31.s }[2], [x27], #0x4\n"
- "ld1 { v28.s }[2], [x26], #0x4\n"
- "tbz x28, #1, 26f\n"
- "ld1 { v31.h }[6], [x27], #0x2\n"
- "ld1 { v28.h }[6], [x26], #0x2\n"
- "tbz x28, #0, 33f\n"
- "ld1 { v31.b }[14], [x27]\n"
- "ld1 { v28.b }[14], [x26]\n"
+ "cbz x27, 34f\n"
+ "tbz x27, #3, 29f\n"
+ "ldr d31, [x26], #0x8\n"
+ "ldr d28, [x25], #0x8\n"
+ "tbz x27, #2, 27f\n"
+ "ld1 { v31.s }[2], [x26], #0x4\n"
+ "ld1 { v28.s }[2], [x25], #0x4\n"
+ "tbz x27, #1, 26f\n"
+ "ld1 { v31.h }[6], [x26], #0x2\n"
+ "ld1 { v28.h }[6], [x25], #0x2\n"
+ "tbz x27, #0, 33f\n"
+ "ld1 { v31.b }[14], [x26]\n"
+ "ld1 { v28.b }[14], [x25]\n"
"b 33f\n"
"26:" // Height 2: Multiply loop: Ragged operand read: partial_1_12
- "tbz x28, #0, 33f\n"
- "ld1 { v31.b }[12], [x27]\n"
- "ld1 { v28.b }[12], [x26]\n"
+ "tbz x27, #0, 33f\n"
+ "ld1 { v31.b }[12], [x26]\n"
+ "ld1 { v28.b }[12], [x25]\n"
"b 33f\n"
"27:" // Height 2: Multiply loop: Ragged operand read: partial_2_8
- "tbz x28, #1, 28f\n"
- "ld1 { v31.h }[4], [x27], #0x2\n"
- "ld1 { v28.h }[4], [x26], #0x2\n"
- "tbz x28, #0, 33f\n"
- "ld1 { v31.b }[10], [x27]\n"
- "ld1 { v28.b }[10], [x26]\n"
+ "tbz x27, #1, 28f\n"
+ "ld1 { v31.h }[4], [x26], #0x2\n"
+ "ld1 { v28.h }[4], [x25], #0x2\n"
+ "tbz x27, #0, 33f\n"
+ "ld1 { v31.b }[10], [x26]\n"
+ "ld1 { v28.b }[10], [x25]\n"
"b 33f\n"
"28:" // Height 2: Multiply loop: Ragged operand read: partial_1_8
- "tbz x28, #0, 33f\n"
- "ld1 { v31.b }[8], [x27]\n"
- "ld1 { v28.b }[8], [x26]\n"
+ "tbz x27, #0, 33f\n"
+ "ld1 { v31.b }[8], [x26]\n"
+ "ld1 { v28.b }[8], [x25]\n"
"b 33f\n"
"29:" // Height 2: Multiply loop: Ragged operand read: partial_4_0
- "tbz x28, #2, 31f\n"
- "ldr s31, [x27], #0x4\n"
- "ldr s28, [x26], #0x4\n"
- "tbz x28, #1, 30f\n"
- "ld1 { v31.h }[2], [x27], #0x2\n"
- "ld1 { v28.h }[2], [x26], #0x2\n"
- "tbz x28, #0, 33f\n"
- "ld1 { v31.b }[6], [x27]\n"
- "ld1 { v28.b }[6], [x26]\n"
+ "tbz x27, #2, 31f\n"
+ "ldr s31, [x26], #0x4\n"
+ "ldr s28, [x25], #0x4\n"
+ "tbz x27, #1, 30f\n"
+ "ld1 { v31.h }[2], [x26], #0x2\n"
+ "ld1 { v28.h }[2], [x25], #0x2\n"
+ "tbz x27, #0, 33f\n"
+ "ld1 { v31.b }[6], [x26]\n"
+ "ld1 { v28.b }[6], [x25]\n"
"b 33f\n"
"30:" // Height 2: Multiply loop: Ragged operand read: partial_1_4
- "tbz x28, #0, 33f\n"
- "ld1 { v31.b }[4], [x27]\n"
- "ld1 { v28.b }[4], [x26]\n"
+ "tbz x27, #0, 33f\n"
+ "ld1 { v31.b }[4], [x26]\n"
+ "ld1 { v28.b }[4], [x25]\n"
"b 33f\n"
"31:" // Height 2: Multiply loop: Ragged operand read: partial_2_0
- "tbz x28, #1, 32f\n"
- "ldr h31, [x27], #0x2\n"
- "ldr h28, [x26], #0x2\n"
- "tbz x28, #0, 33f\n"
- "ld1 { v31.b }[2], [x27]\n"
- "ld1 { v28.b }[2], [x26]\n"
+ "tbz x27, #1, 32f\n"
+ "ldr h31, [x26], #0x2\n"
+ "ldr h28, [x25], #0x2\n"
+ "tbz x27, #0, 33f\n"
+ "ld1 { v31.b }[2], [x26]\n"
+ "ld1 { v28.b }[2], [x25]\n"
"b 33f\n"
"32:" // Height 2: Multiply loop: Ragged operand read: partial_1_0
- "ldr b31, [x27, #0x0]\n"
- "ldr b28, [x26, #0x0]\n"
+ "ldr b31, [x26, #0x0]\n"
+ "ldr b28, [x25, #0x0]\n"
"33:" // Height 2: Multiply loop: Ragged operand read: Done
"sadalp v1.8h, v31.16b\n"
"sadalp v30.8h, v28.16b\n"
"34:" // Height 2: Multiply loop: No odd multiplies
- "add x9, x9, #0x1\n"
- "cmp x9, x21\n"
+ "add x28, x28, #0x1\n"
+ "cmp x28, x20\n"
"bne 19b\n"
"sadalp v0.4s, v1.8h\n"
"sadalp v29.4s, v30.8h\n"
@@ -306,354 +306,354 @@ void row_sums_indirect(
"b 104f\n"
"35:" // Height 3
"movi v1.8h, #0x0\n"
+ "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "mov x9, #0x0\n"
"movi v0.4s, #0x0\n"
- "mov x10, #0x0\n"
- "ldr w21, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "mov x28, #0x0\n"
"movi v30.8h, #0x0\n"
"movi v29.4s, #0x0\n"
- "mov x9, #0x0\n"
"movi v27.8h, #0x0\n"
"movi v26.4s, #0x0\n"
"36:" // Height 3: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w28, [x20, x9, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w27, [x19, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 37f\n"
- "ldr x20, [%x[input_ptr], x9, LSL #0x3]\n"
- "add x20, x20, %x[input_offset], LSL #3\n"
- "ldr x27, [x20, #0x0]\n"
- "ldr x26, [x20, #0x8]\n"
- "ldr x25, [x20, #0x10]\n"
- "cbnz x9, 38f\n"
- "ldr w20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x27, x27, x20\n"
- "add x26, x26, x20\n"
- "add x25, x25, x20\n"
+ "ldr x19, [%x[input_ptr], x28, LSL #0x3]\n"
+ "add x19, x19, %x[input_offset], LSL #3\n"
+ "ldr x26, [x19, #0x0]\n"
+ "ldr x25, [x19, #0x8]\n"
+ "ldr x24, [x19, #0x10]\n"
+ "cbnz x28, 38f\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x26, x26, x19\n"
+ "add x25, x25, x19\n"
+ "add x24, x24, x19\n"
"b 38f\n"
"37:" // Height 3: setup direct input
- "mov x27, %x[input_ptr]\n"
- "add x26, x27, %x[input_offset]\n"
+ "mov x26, %x[input_ptr]\n"
"add x25, x26, %x[input_offset]\n"
+ "add x24, x25, %x[input_offset]\n"
"38:" // Height 3: input setup done
- "cmp x28, #0x10\n"
+ "cmp x27, #0x10\n"
"blt 42f\n"
- "cmp x28, #0x20\n"
+ "cmp x27, #0x20\n"
"blt 41f\n"
"39:" // Height 3: Multiply loop: Main loop head
- "ldr q31, [x27, #0x0]\n"
- "ldr q28, [x26, #0x0]\n"
- "cmp x10, #0x7e\n"
- "add x27, x27, #0x10\n"
- "ldr q25, [x25, #0x0]\n"
+ "ldr q31, [x26, #0x0]\n"
+ "ldr q28, [x25, #0x0]\n"
+ "ldr q25, [x24, #0x0]\n"
+ "cmp x9, #0x7e\n"
"add x26, x26, #0x10\n"
"add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
"blt 40f\n"
"sadalp v0.4s, v1.8h\n"
"movi v1.8h, #0x0\n"
- "mov x10, #0x0\n"
"sadalp v29.4s, v30.8h\n"
"movi v30.8h, #0x0\n"
"sadalp v26.4s, v27.8h\n"
"movi v27.8h, #0x0\n"
+ "mov x9, #0x0\n"
"40:" // Height 3: Multiply loop: unique 3: no collapse
- "sub x28, x28, #0x10\n"
- "cmp x28, #0x20\n"
"sadalp v1.8h, v31.16b\n"
"sadalp v30.8h, v28.16b\n"
"sadalp v27.8h, v25.16b\n"
- "add x10, x10, #0x1\n"
+ "add x9, x9, #0x1\n"
+ "sub x27, x27, #0x10\n"
+ "cmp x27, #0x20\n"
"bge 39b\n"
"41:" // Height 3: Multiply loop: Single iteration only
- "ldr q31, [x27, #0x0]\n"
- "ldr q28, [x26, #0x0]\n"
- "sub x28, x28, #0x10\n"
+ "sub x27, x27, #0x10\n"
+ "ldr q31, [x26, #0x0]\n"
+ "ldr q28, [x25, #0x0]\n"
+ "ldr q25, [x24, #0x0]\n"
+ "add x26, x26, #0x10\n"
+ "add x25, x25, #0x10\n"
"sadalp v1.8h, v31.16b\n"
- "ldr q25, [x25, #0x0]\n"
"sadalp v30.8h, v28.16b\n"
"sadalp v27.8h, v25.16b\n"
- "add x27, x27, #0x10\n"
- "add x26, x26, #0x10\n"
- "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
"42:" // Height 3: Multiply loop: Main loop skip
- "cbz x28, 51f\n"
- "tbz x28, #3, 46f\n"
- "ldr d31, [x27], #0x8\n"
- "ldr d28, [x26], #0x8\n"
- "ldr d25, [x25], #0x8\n"
- "tbz x28, #2, 44f\n"
- "ld1 { v31.s }[2], [x27], #0x4\n"
- "ld1 { v28.s }[2], [x26], #0x4\n"
- "ld1 { v25.s }[2], [x25], #0x4\n"
- "tbz x28, #1, 43f\n"
- "ld1 { v31.h }[6], [x27], #0x2\n"
- "ld1 { v28.h }[6], [x26], #0x2\n"
- "ld1 { v25.h }[6], [x25], #0x2\n"
- "tbz x28, #0, 50f\n"
- "ld1 { v31.b }[14], [x27]\n"
- "ld1 { v28.b }[14], [x26]\n"
- "ld1 { v25.b }[14], [x25]\n"
+ "cbz x27, 51f\n"
+ "tbz x27, #3, 46f\n"
+ "ldr d31, [x26], #0x8\n"
+ "ldr d28, [x25], #0x8\n"
+ "ldr d25, [x24], #0x8\n"
+ "tbz x27, #2, 44f\n"
+ "ld1 { v31.s }[2], [x26], #0x4\n"
+ "ld1 { v28.s }[2], [x25], #0x4\n"
+ "ld1 { v25.s }[2], [x24], #0x4\n"
+ "tbz x27, #1, 43f\n"
+ "ld1 { v31.h }[6], [x26], #0x2\n"
+ "ld1 { v28.h }[6], [x25], #0x2\n"
+ "ld1 { v25.h }[6], [x24], #0x2\n"
+ "tbz x27, #0, 50f\n"
+ "ld1 { v31.b }[14], [x26]\n"
+ "ld1 { v28.b }[14], [x25]\n"
+ "ld1 { v25.b }[14], [x24]\n"
"b 50f\n"
"43:" // Height 3: Multiply loop: Ragged operand read: partial_1_12
- "tbz x28, #0, 50f\n"
- "ld1 { v31.b }[12], [x27]\n"
- "ld1 { v28.b }[12], [x26]\n"
- "ld1 { v25.b }[12], [x25]\n"
+ "tbz x27, #0, 50f\n"
+ "ld1 { v31.b }[12], [x26]\n"
+ "ld1 { v28.b }[12], [x25]\n"
+ "ld1 { v25.b }[12], [x24]\n"
"b 50f\n"
"44:" // Height 3: Multiply loop: Ragged operand read: partial_2_8
- "tbz x28, #1, 45f\n"
- "ld1 { v31.h }[4], [x27], #0x2\n"
- "ld1 { v28.h }[4], [x26], #0x2\n"
- "ld1 { v25.h }[4], [x25], #0x2\n"
- "tbz x28, #0, 50f\n"
- "ld1 { v31.b }[10], [x27]\n"
- "ld1 { v28.b }[10], [x26]\n"
- "ld1 { v25.b }[10], [x25]\n"
+ "tbz x27, #1, 45f\n"
+ "ld1 { v31.h }[4], [x26], #0x2\n"
+ "ld1 { v28.h }[4], [x25], #0x2\n"
+ "ld1 { v25.h }[4], [x24], #0x2\n"
+ "tbz x27, #0, 50f\n"
+ "ld1 { v31.b }[10], [x26]\n"
+ "ld1 { v28.b }[10], [x25]\n"
+ "ld1 { v25.b }[10], [x24]\n"
"b 50f\n"
"45:" // Height 3: Multiply loop: Ragged operand read: partial_1_8
- "tbz x28, #0, 50f\n"
- "ld1 { v31.b }[8], [x27]\n"
- "ld1 { v28.b }[8], [x26]\n"
- "ld1 { v25.b }[8], [x25]\n"
+ "tbz x27, #0, 50f\n"
+ "ld1 { v31.b }[8], [x26]\n"
+ "ld1 { v28.b }[8], [x25]\n"
+ "ld1 { v25.b }[8], [x24]\n"
"b 50f\n"
"46:" // Height 3: Multiply loop: Ragged operand read: partial_4_0
- "tbz x28, #2, 48f\n"
- "ldr s31, [x27], #0x4\n"
- "ldr s28, [x26], #0x4\n"
- "ldr s25, [x25], #0x4\n"
- "tbz x28, #1, 47f\n"
- "ld1 { v31.h }[2], [x27], #0x2\n"
- "ld1 { v28.h }[2], [x26], #0x2\n"
- "ld1 { v25.h }[2], [x25], #0x2\n"
- "tbz x28, #0, 50f\n"
- "ld1 { v31.b }[6], [x27]\n"
- "ld1 { v28.b }[6], [x26]\n"
- "ld1 { v25.b }[6], [x25]\n"
+ "tbz x27, #2, 48f\n"
+ "ldr s31, [x26], #0x4\n"
+ "ldr s28, [x25], #0x4\n"
+ "ldr s25, [x24], #0x4\n"
+ "tbz x27, #1, 47f\n"
+ "ld1 { v31.h }[2], [x26], #0x2\n"
+ "ld1 { v28.h }[2], [x25], #0x2\n"
+ "ld1 { v25.h }[2], [x24], #0x2\n"
+ "tbz x27, #0, 50f\n"
+ "ld1 { v31.b }[6], [x26]\n"
+ "ld1 { v28.b }[6], [x25]\n"
+ "ld1 { v25.b }[6], [x24]\n"
"b 50f\n"
"47:" // Height 3: Multiply loop: Ragged operand read: partial_1_4
- "tbz x28, #0, 50f\n"
- "ld1 { v31.b }[4], [x27]\n"
- "ld1 { v28.b }[4], [x26]\n"
- "ld1 { v25.b }[4], [x25]\n"
+ "tbz x27, #0, 50f\n"
+ "ld1 { v31.b }[4], [x26]\n"
+ "ld1 { v28.b }[4], [x25]\n"
+ "ld1 { v25.b }[4], [x24]\n"
"b 50f\n"
"48:" // Height 3: Multiply loop: Ragged operand read: partial_2_0
- "tbz x28, #1, 49f\n"
- "ldr h31, [x27], #0x2\n"
- "ldr h28, [x26], #0x2\n"
- "ldr h25, [x25], #0x2\n"
- "tbz x28, #0, 50f\n"
- "ld1 { v31.b }[2], [x27]\n"
- "ld1 { v28.b }[2], [x26]\n"
- "ld1 { v25.b }[2], [x25]\n"
+ "tbz x27, #1, 49f\n"
+ "ldr h31, [x26], #0x2\n"
+ "ldr h28, [x25], #0x2\n"
+ "ldr h25, [x24], #0x2\n"
+ "tbz x27, #0, 50f\n"
+ "ld1 { v31.b }[2], [x26]\n"
+ "ld1 { v28.b }[2], [x25]\n"
+ "ld1 { v25.b }[2], [x24]\n"
"b 50f\n"
"49:" // Height 3: Multiply loop: Ragged operand read: partial_1_0
- "ldr b31, [x27, #0x0]\n"
- "ldr b28, [x26, #0x0]\n"
- "ldr b25, [x25, #0x0]\n"
+ "ldr b31, [x26, #0x0]\n"
+ "ldr b28, [x25, #0x0]\n"
+ "ldr b25, [x24, #0x0]\n"
"50:" // Height 3: Multiply loop: Ragged operand read: Done
"sadalp v1.8h, v31.16b\n"
"sadalp v30.8h, v28.16b\n"
"sadalp v27.8h, v25.16b\n"
"51:" // Height 3: Multiply loop: No odd multiplies
- "add x9, x9, #0x1\n"
- "cmp x9, x21\n"
+ "add x28, x28, #0x1\n"
+ "cmp x28, x20\n"
"bne 36b\n"
"sadalp v0.4s, v1.8h\n"
"sadalp v29.4s, v30.8h\n"
- "sadalp v26.4s, v27.8h\n"
"addp v0.4s, v0.4s, v29.4s\n"
- "addp v26.4s, v26.4s, v26.4s\n"
+ "sadalp v26.4s, v27.8h\n"
"addp v0.4s, v0.4s, v0.4s\n"
"addp v26.4s, v26.4s, v26.4s\n"
"mul v0.4s, v0.4s, v2.4s\n"
"str d0, [%x[out_ptr]], #0x8\n"
+ "addp v26.4s, v26.4s, v26.4s\n"
"mul v26.4s, v26.4s, v2.4s\n"
"str s26, [%x[out_ptr]], #0x4\n"
"b 104f\n"
"52:" // Height 4
"movi v1.8h, #0x0\n"
+ "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "mov x9, #0x0\n"
"movi v0.4s, #0x0\n"
- "mov x10, #0x0\n"
- "ldr w21, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "mov x28, #0x0\n"
"movi v30.8h, #0x0\n"
"movi v29.4s, #0x0\n"
- "mov x9, #0x0\n"
"movi v27.8h, #0x0\n"
"movi v26.4s, #0x0\n"
"movi v24.8h, #0x0\n"
"movi v23.4s, #0x0\n"
"53:" // Height 4: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w28, [x20, x9, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w27, [x19, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 54f\n"
- "ldr x20, [%x[input_ptr], x9, LSL #0x3]\n"
- "add x20, x20, %x[input_offset], LSL #3\n"
- "ldr x27, [x20, #0x0]\n"
- "ldr x26, [x20, #0x8]\n"
- "ldr x25, [x20, #0x10]\n"
- "ldr x24, [x20, #0x18]\n"
- "cbnz x9, 55f\n"
- "ldr w20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x27, x27, x20\n"
- "add x26, x26, x20\n"
- "add x25, x25, x20\n"
- "add x24, x24, x20\n"
+ "ldr x19, [%x[input_ptr], x28, LSL #0x3]\n"
+ "add x19, x19, %x[input_offset], LSL #3\n"
+ "ldr x26, [x19, #0x0]\n"
+ "ldr x25, [x19, #0x8]\n"
+ "ldr x24, [x19, #0x10]\n"
+ "ldr x23, [x19, #0x18]\n"
+ "cbnz x28, 55f\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x26, x26, x19\n"
+ "add x25, x25, x19\n"
+ "add x24, x24, x19\n"
+ "add x23, x23, x19\n"
"b 55f\n"
"54:" // Height 4: setup direct input
- "mov x27, %x[input_ptr]\n"
- "add x26, x27, %x[input_offset]\n"
+ "mov x26, %x[input_ptr]\n"
"add x25, x26, %x[input_offset]\n"
"add x24, x25, %x[input_offset]\n"
+ "add x23, x24, %x[input_offset]\n"
"55:" // Height 4: input setup done
- "cmp x28, #0x10\n"
+ "cmp x27, #0x10\n"
"blt 59f\n"
- "cmp x28, #0x20\n"
+ "cmp x27, #0x20\n"
"blt 58f\n"
"56:" // Height 4: Multiply loop: Main loop head
- "ldr q31, [x27, #0x0]\n"
- "ldr q28, [x26, #0x0]\n"
- "cmp x10, #0x7e\n"
- "add x27, x27, #0x10\n"
- "ldr q25, [x25, #0x0]\n"
- "ldr q22, [x24, #0x0]\n"
+ "ldr q31, [x26, #0x0]\n"
+ "ldr q28, [x25, #0x0]\n"
+ "ldr q25, [x24, #0x0]\n"
+ "ldr q22, [x23, #0x0]\n"
+ "cmp x9, #0x7e\n"
"add x26, x26, #0x10\n"
"add x25, x25, #0x10\n"
"add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
"blt 57f\n"
"sadalp v0.4s, v1.8h\n"
"movi v1.8h, #0x0\n"
- "mov x10, #0x0\n"
"sadalp v29.4s, v30.8h\n"
"movi v30.8h, #0x0\n"
"sadalp v26.4s, v27.8h\n"
"movi v27.8h, #0x0\n"
"sadalp v23.4s, v24.8h\n"
"movi v24.8h, #0x0\n"
+ "mov x9, #0x0\n"
"57:" // Height 4: Multiply loop: unique 4: no collapse
- "sub x28, x28, #0x10\n"
- "cmp x28, #0x20\n"
"sadalp v1.8h, v31.16b\n"
"sadalp v30.8h, v28.16b\n"
"sadalp v27.8h, v25.16b\n"
"sadalp v24.8h, v22.16b\n"
- "add x10, x10, #0x1\n"
+ "add x9, x9, #0x1\n"
+ "sub x27, x27, #0x10\n"
+ "cmp x27, #0x20\n"
"bge 56b\n"
"58:" // Height 4: Multiply loop: Single iteration only
- "ldr q31, [x27, #0x0]\n"
- "ldr q28, [x26, #0x0]\n"
- "sub x28, x28, #0x10\n"
+ "sub x27, x27, #0x10\n"
+ "ldr q31, [x26, #0x0]\n"
+ "ldr q28, [x25, #0x0]\n"
+ "ldr q25, [x24, #0x0]\n"
+ "ldr q22, [x23, #0x0]\n"
+ "add x26, x26, #0x10\n"
"sadalp v1.8h, v31.16b\n"
- "ldr q25, [x25, #0x0]\n"
- "ldr q22, [x24, #0x0]\n"
"sadalp v30.8h, v28.16b\n"
"sadalp v27.8h, v25.16b\n"
"sadalp v24.8h, v22.16b\n"
- "add x27, x27, #0x10\n"
- "add x26, x26, #0x10\n"
"add x25, x25, #0x10\n"
"add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
"59:" // Height 4: Multiply loop: Main loop skip
- "cbz x28, 68f\n"
- "tbz x28, #3, 63f\n"
- "ldr d31, [x27], #0x8\n"
- "ldr d28, [x26], #0x8\n"
- "ldr d25, [x25], #0x8\n"
- "ldr d22, [x24], #0x8\n"
- "tbz x28, #2, 61f\n"
- "ld1 { v31.s }[2], [x27], #0x4\n"
- "ld1 { v28.s }[2], [x26], #0x4\n"
- "ld1 { v25.s }[2], [x25], #0x4\n"
- "ld1 { v22.s }[2], [x24], #0x4\n"
- "tbz x28, #1, 60f\n"
- "ld1 { v31.h }[6], [x27], #0x2\n"
- "ld1 { v28.h }[6], [x26], #0x2\n"
- "ld1 { v25.h }[6], [x25], #0x2\n"
- "ld1 { v22.h }[6], [x24], #0x2\n"
- "tbz x28, #0, 67f\n"
- "ld1 { v31.b }[14], [x27]\n"
- "ld1 { v28.b }[14], [x26]\n"
- "ld1 { v25.b }[14], [x25]\n"
- "ld1 { v22.b }[14], [x24]\n"
+ "cbz x27, 68f\n"
+ "tbz x27, #3, 63f\n"
+ "ldr d31, [x26], #0x8\n"
+ "ldr d28, [x25], #0x8\n"
+ "ldr d25, [x24], #0x8\n"
+ "ldr d22, [x23], #0x8\n"
+ "tbz x27, #2, 61f\n"
+ "ld1 { v31.s }[2], [x26], #0x4\n"
+ "ld1 { v28.s }[2], [x25], #0x4\n"
+ "ld1 { v25.s }[2], [x24], #0x4\n"
+ "ld1 { v22.s }[2], [x23], #0x4\n"
+ "tbz x27, #1, 60f\n"
+ "ld1 { v31.h }[6], [x26], #0x2\n"
+ "ld1 { v28.h }[6], [x25], #0x2\n"
+ "ld1 { v25.h }[6], [x24], #0x2\n"
+ "ld1 { v22.h }[6], [x23], #0x2\n"
+ "tbz x27, #0, 67f\n"
+ "ld1 { v31.b }[14], [x26]\n"
+ "ld1 { v28.b }[14], [x25]\n"
+ "ld1 { v25.b }[14], [x24]\n"
+ "ld1 { v22.b }[14], [x23]\n"
"b 67f\n"
"60:" // Height 4: Multiply loop: Ragged operand read: partial_1_12
- "tbz x28, #0, 67f\n"
- "ld1 { v31.b }[12], [x27]\n"
- "ld1 { v28.b }[12], [x26]\n"
- "ld1 { v25.b }[12], [x25]\n"
- "ld1 { v22.b }[12], [x24]\n"
+ "tbz x27, #0, 67f\n"
+ "ld1 { v31.b }[12], [x26]\n"
+ "ld1 { v28.b }[12], [x25]\n"
+ "ld1 { v25.b }[12], [x24]\n"
+ "ld1 { v22.b }[12], [x23]\n"
"b 67f\n"
"61:" // Height 4: Multiply loop: Ragged operand read: partial_2_8
- "tbz x28, #1, 62f\n"
- "ld1 { v31.h }[4], [x27], #0x2\n"
- "ld1 { v28.h }[4], [x26], #0x2\n"
- "ld1 { v25.h }[4], [x25], #0x2\n"
- "ld1 { v22.h }[4], [x24], #0x2\n"
- "tbz x28, #0, 67f\n"
- "ld1 { v31.b }[10], [x27]\n"
- "ld1 { v28.b }[10], [x26]\n"
- "ld1 { v25.b }[10], [x25]\n"
- "ld1 { v22.b }[10], [x24]\n"
+ "tbz x27, #1, 62f\n"
+ "ld1 { v31.h }[4], [x26], #0x2\n"
+ "ld1 { v28.h }[4], [x25], #0x2\n"
+ "ld1 { v25.h }[4], [x24], #0x2\n"
+ "ld1 { v22.h }[4], [x23], #0x2\n"
+ "tbz x27, #0, 67f\n"
+ "ld1 { v31.b }[10], [x26]\n"
+ "ld1 { v28.b }[10], [x25]\n"
+ "ld1 { v25.b }[10], [x24]\n"
+ "ld1 { v22.b }[10], [x23]\n"
"b 67f\n"
"62:" // Height 4: Multiply loop: Ragged operand read: partial_1_8
- "tbz x28, #0, 67f\n"
- "ld1 { v31.b }[8], [x27]\n"
- "ld1 { v28.b }[8], [x26]\n"
- "ld1 { v25.b }[8], [x25]\n"
- "ld1 { v22.b }[8], [x24]\n"
+ "tbz x27, #0, 67f\n"
+ "ld1 { v31.b }[8], [x26]\n"
+ "ld1 { v28.b }[8], [x25]\n"
+ "ld1 { v25.b }[8], [x24]\n"
+ "ld1 { v22.b }[8], [x23]\n"
"b 67f\n"
"63:" // Height 4: Multiply loop: Ragged operand read: partial_4_0
- "tbz x28, #2, 65f\n"
- "ldr s31, [x27], #0x4\n"
- "ldr s28, [x26], #0x4\n"
- "ldr s25, [x25], #0x4\n"
- "ldr s22, [x24], #0x4\n"
- "tbz x28, #1, 64f\n"
- "ld1 { v31.h }[2], [x27], #0x2\n"
- "ld1 { v28.h }[2], [x26], #0x2\n"
- "ld1 { v25.h }[2], [x25], #0x2\n"
- "ld1 { v22.h }[2], [x24], #0x2\n"
- "tbz x28, #0, 67f\n"
- "ld1 { v31.b }[6], [x27]\n"
- "ld1 { v28.b }[6], [x26]\n"
- "ld1 { v25.b }[6], [x25]\n"
- "ld1 { v22.b }[6], [x24]\n"
+ "tbz x27, #2, 65f\n"
+ "ldr s31, [x26], #0x4\n"
+ "ldr s28, [x25], #0x4\n"
+ "ldr s25, [x24], #0x4\n"
+ "ldr s22, [x23], #0x4\n"
+ "tbz x27, #1, 64f\n"
+ "ld1 { v31.h }[2], [x26], #0x2\n"
+ "ld1 { v28.h }[2], [x25], #0x2\n"
+ "ld1 { v25.h }[2], [x24], #0x2\n"
+ "ld1 { v22.h }[2], [x23], #0x2\n"
+ "tbz x27, #0, 67f\n"
+ "ld1 { v31.b }[6], [x26]\n"
+ "ld1 { v28.b }[6], [x25]\n"
+ "ld1 { v25.b }[6], [x24]\n"
+ "ld1 { v22.b }[6], [x23]\n"
"b 67f\n"
"64:" // Height 4: Multiply loop: Ragged operand read: partial_1_4
- "tbz x28, #0, 67f\n"
- "ld1 { v31.b }[4], [x27]\n"
- "ld1 { v28.b }[4], [x26]\n"
- "ld1 { v25.b }[4], [x25]\n"
- "ld1 { v22.b }[4], [x24]\n"
+ "tbz x27, #0, 67f\n"
+ "ld1 { v31.b }[4], [x26]\n"
+ "ld1 { v28.b }[4], [x25]\n"
+ "ld1 { v25.b }[4], [x24]\n"
+ "ld1 { v22.b }[4], [x23]\n"
"b 67f\n"
"65:" // Height 4: Multiply loop: Ragged operand read: partial_2_0
- "tbz x28, #1, 66f\n"
- "ldr h31, [x27], #0x2\n"
- "ldr h28, [x26], #0x2\n"
- "ldr h25, [x25], #0x2\n"
- "ldr h22, [x24], #0x2\n"
- "tbz x28, #0, 67f\n"
- "ld1 { v31.b }[2], [x27]\n"
- "ld1 { v28.b }[2], [x26]\n"
- "ld1 { v25.b }[2], [x25]\n"
- "ld1 { v22.b }[2], [x24]\n"
+ "tbz x27, #1, 66f\n"
+ "ldr h31, [x26], #0x2\n"
+ "ldr h28, [x25], #0x2\n"
+ "ldr h25, [x24], #0x2\n"
+ "ldr h22, [x23], #0x2\n"
+ "tbz x27, #0, 67f\n"
+ "ld1 { v31.b }[2], [x26]\n"
+ "ld1 { v28.b }[2], [x25]\n"
+ "ld1 { v25.b }[2], [x24]\n"
+ "ld1 { v22.b }[2], [x23]\n"
"b 67f\n"
"66:" // Height 4: Multiply loop: Ragged operand read: partial_1_0
- "ldr b31, [x27, #0x0]\n"
- "ldr b28, [x26, #0x0]\n"
- "ldr b25, [x25, #0x0]\n"
- "ldr b22, [x24, #0x0]\n"
+ "ldr b31, [x26, #0x0]\n"
+ "ldr b28, [x25, #0x0]\n"
+ "ldr b25, [x24, #0x0]\n"
+ "ldr b22, [x23, #0x0]\n"
"67:" // Height 4: Multiply loop: Ragged operand read: Done
"sadalp v1.8h, v31.16b\n"
"sadalp v30.8h, v28.16b\n"
"sadalp v27.8h, v25.16b\n"
"sadalp v24.8h, v22.16b\n"
"68:" // Height 4: Multiply loop: No odd multiplies
- "add x9, x9, #0x1\n"
- "cmp x9, x21\n"
+ "add x28, x28, #0x1\n"
+ "cmp x28, x20\n"
"bne 53b\n"
"sadalp v0.4s, v1.8h\n"
"sadalp v29.4s, v30.8h\n"
+ "addp v0.4s, v0.4s, v29.4s\n"
"sadalp v26.4s, v27.8h\n"
"sadalp v23.4s, v24.8h\n"
- "addp v0.4s, v0.4s, v29.4s\n"
"addp v29.4s, v26.4s, v23.4s\n"
"addp v0.4s, v0.4s, v29.4s\n"
"mul v0.4s, v0.4s, v2.4s\n"
@@ -661,12 +661,12 @@ void row_sums_indirect(
"b 104f\n"
"69:" // Height 5
"movi v1.8h, #0x0\n"
+ "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "mov x9, #0x0\n"
"movi v0.4s, #0x0\n"
- "mov x10, #0x0\n"
- "ldr w21, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "mov x28, #0x0\n"
"movi v30.8h, #0x0\n"
"movi v29.4s, #0x0\n"
- "mov x9, #0x0\n"
"movi v27.8h, #0x0\n"
"movi v26.4s, #0x0\n"
"movi v24.8h, #0x0\n"
@@ -674,51 +674,50 @@ void row_sums_indirect(
"movi v21.8h, #0x0\n"
"movi v20.4s, #0x0\n"
"70:" // Height 5: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w28, [x20, x9, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w27, [x19, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 71f\n"
- "ldr x20, [%x[input_ptr], x9, LSL #0x3]\n"
- "add x20, x20, %x[input_offset], LSL #3\n"
- "ldr x27, [x20, #0x0]\n"
- "ldr x26, [x20, #0x8]\n"
- "ldr x25, [x20, #0x10]\n"
- "ldr x24, [x20, #0x18]\n"
- "ldr x23, [x20, #0x20]\n"
- "cbnz x9, 72f\n"
- "ldr w20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x27, x27, x20\n"
- "add x26, x26, x20\n"
- "add x25, x25, x20\n"
- "add x24, x24, x20\n"
- "add x23, x23, x20\n"
+ "ldr x19, [%x[input_ptr], x28, LSL #0x3]\n"
+ "add x19, x19, %x[input_offset], LSL #3\n"
+ "ldr x26, [x19, #0x0]\n"
+ "ldr x25, [x19, #0x8]\n"
+ "ldr x24, [x19, #0x10]\n"
+ "ldr x23, [x19, #0x18]\n"
+ "ldr x22, [x19, #0x20]\n"
+ "cbnz x28, 72f\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x26, x26, x19\n"
+ "add x25, x25, x19\n"
+ "add x24, x24, x19\n"
+ "add x23, x23, x19\n"
+ "add x22, x22, x19\n"
"b 72f\n"
"71:" // Height 5: setup direct input
- "mov x27, %x[input_ptr]\n"
- "add x26, x27, %x[input_offset]\n"
+ "mov x26, %x[input_ptr]\n"
"add x25, x26, %x[input_offset]\n"
"add x24, x25, %x[input_offset]\n"
"add x23, x24, %x[input_offset]\n"
+ "add x22, x23, %x[input_offset]\n"
"72:" // Height 5: input setup done
- "cmp x28, #0x10\n"
+ "cmp x27, #0x10\n"
"blt 76f\n"
- "cmp x28, #0x20\n"
+ "cmp x27, #0x20\n"
"blt 75f\n"
"73:" // Height 5: Multiply loop: Main loop head
- "ldr q31, [x27, #0x0]\n"
- "ldr q28, [x26, #0x0]\n"
- "cmp x10, #0x7e\n"
- "add x27, x27, #0x10\n"
- "ldr q25, [x25, #0x0]\n"
- "ldr q22, [x24, #0x0]\n"
+ "ldr q31, [x26, #0x0]\n"
+ "ldr q28, [x25, #0x0]\n"
+ "ldr q25, [x24, #0x0]\n"
+ "ldr q22, [x23, #0x0]\n"
+ "ldr q19, [x22, #0x0]\n"
+ "cmp x9, #0x7e\n"
"add x26, x26, #0x10\n"
"add x25, x25, #0x10\n"
- "ldr q19, [x23, #0x0]\n"
"add x24, x24, #0x10\n"
"add x23, x23, #0x10\n"
+ "add x22, x22, #0x10\n"
"blt 74f\n"
"sadalp v0.4s, v1.8h\n"
"movi v1.8h, #0x0\n"
- "mov x10, #0x0\n"
"sadalp v29.4s, v30.8h\n"
"movi v30.8h, #0x0\n"
"sadalp v26.4s, v27.8h\n"
@@ -727,138 +726,139 @@ void row_sums_indirect(
"movi v24.8h, #0x0\n"
"sadalp v20.4s, v21.8h\n"
"movi v21.8h, #0x0\n"
+ "mov x9, #0x0\n"
"74:" // Height 5: Multiply loop: unique 5: no collapse
- "sub x28, x28, #0x10\n"
- "cmp x28, #0x20\n"
"sadalp v1.8h, v31.16b\n"
"sadalp v30.8h, v28.16b\n"
"sadalp v27.8h, v25.16b\n"
"sadalp v24.8h, v22.16b\n"
- "add x10, x10, #0x1\n"
"sadalp v21.8h, v19.16b\n"
+ "add x9, x9, #0x1\n"
+ "sub x27, x27, #0x10\n"
+ "cmp x27, #0x20\n"
"bge 73b\n"
"75:" // Height 5: Multiply loop: Single iteration only
- "ldr q31, [x27, #0x0]\n"
- "ldr q28, [x26, #0x0]\n"
- "sub x28, x28, #0x10\n"
+ "sub x27, x27, #0x10\n"
+ "ldr q31, [x26, #0x0]\n"
+ "ldr q28, [x25, #0x0]\n"
+ "ldr q25, [x24, #0x0]\n"
+ "ldr q22, [x23, #0x0]\n"
+ "ldr q19, [x22, #0x0]\n"
"sadalp v1.8h, v31.16b\n"
- "ldr q25, [x25, #0x0]\n"
- "ldr q22, [x24, #0x0]\n"
"sadalp v30.8h, v28.16b\n"
"sadalp v27.8h, v25.16b\n"
- "ldr q19, [x23, #0x0]\n"
"sadalp v24.8h, v22.16b\n"
"sadalp v21.8h, v19.16b\n"
- "add x27, x27, #0x10\n"
"add x26, x26, #0x10\n"
"add x25, x25, #0x10\n"
"add x24, x24, #0x10\n"
"add x23, x23, #0x10\n"
+ "add x22, x22, #0x10\n"
"76:" // Height 5: Multiply loop: Main loop skip
- "cbz x28, 85f\n"
- "tbz x28, #3, 80f\n"
- "ldr d31, [x27], #0x8\n"
- "ldr d28, [x26], #0x8\n"
- "ldr d25, [x25], #0x8\n"
- "ldr d22, [x24], #0x8\n"
- "ldr d19, [x23], #0x8\n"
- "tbz x28, #2, 78f\n"
- "ld1 { v31.s }[2], [x27], #0x4\n"
- "ld1 { v28.s }[2], [x26], #0x4\n"
- "ld1 { v25.s }[2], [x25], #0x4\n"
- "ld1 { v22.s }[2], [x24], #0x4\n"
- "ld1 { v19.s }[2], [x23], #0x4\n"
- "tbz x28, #1, 77f\n"
- "ld1 { v31.h }[6], [x27], #0x2\n"
- "ld1 { v28.h }[6], [x26], #0x2\n"
- "ld1 { v25.h }[6], [x25], #0x2\n"
- "ld1 { v22.h }[6], [x24], #0x2\n"
- "ld1 { v19.h }[6], [x23], #0x2\n"
- "tbz x28, #0, 84f\n"
- "ld1 { v31.b }[14], [x27]\n"
- "ld1 { v28.b }[14], [x26]\n"
- "ld1 { v25.b }[14], [x25]\n"
- "ld1 { v22.b }[14], [x24]\n"
- "ld1 { v19.b }[14], [x23]\n"
+ "cbz x27, 85f\n"
+ "tbz x27, #3, 80f\n"
+ "ldr d31, [x26], #0x8\n"
+ "ldr d28, [x25], #0x8\n"
+ "ldr d25, [x24], #0x8\n"
+ "ldr d22, [x23], #0x8\n"
+ "ldr d19, [x22], #0x8\n"
+ "tbz x27, #2, 78f\n"
+ "ld1 { v31.s }[2], [x26], #0x4\n"
+ "ld1 { v28.s }[2], [x25], #0x4\n"
+ "ld1 { v25.s }[2], [x24], #0x4\n"
+ "ld1 { v22.s }[2], [x23], #0x4\n"
+ "ld1 { v19.s }[2], [x22], #0x4\n"
+ "tbz x27, #1, 77f\n"
+ "ld1 { v31.h }[6], [x26], #0x2\n"
+ "ld1 { v28.h }[6], [x25], #0x2\n"
+ "ld1 { v25.h }[6], [x24], #0x2\n"
+ "ld1 { v22.h }[6], [x23], #0x2\n"
+ "ld1 { v19.h }[6], [x22], #0x2\n"
+ "tbz x27, #0, 84f\n"
+ "ld1 { v31.b }[14], [x26]\n"
+ "ld1 { v28.b }[14], [x25]\n"
+ "ld1 { v25.b }[14], [x24]\n"
+ "ld1 { v22.b }[14], [x23]\n"
+ "ld1 { v19.b }[14], [x22]\n"
"b 84f\n"
"77:" // Height 5: Multiply loop: Ragged operand read: partial_1_12
- "tbz x28, #0, 84f\n"
- "ld1 { v31.b }[12], [x27]\n"
- "ld1 { v28.b }[12], [x26]\n"
- "ld1 { v25.b }[12], [x25]\n"
- "ld1 { v22.b }[12], [x24]\n"
- "ld1 { v19.b }[12], [x23]\n"
+ "tbz x27, #0, 84f\n"
+ "ld1 { v31.b }[12], [x26]\n"
+ "ld1 { v28.b }[12], [x25]\n"
+ "ld1 { v25.b }[12], [x24]\n"
+ "ld1 { v22.b }[12], [x23]\n"
+ "ld1 { v19.b }[12], [x22]\n"
"b 84f\n"
"78:" // Height 5: Multiply loop: Ragged operand read: partial_2_8
- "tbz x28, #1, 79f\n"
- "ld1 { v31.h }[4], [x27], #0x2\n"
- "ld1 { v28.h }[4], [x26], #0x2\n"
- "ld1 { v25.h }[4], [x25], #0x2\n"
- "ld1 { v22.h }[4], [x24], #0x2\n"
- "ld1 { v19.h }[4], [x23], #0x2\n"
- "tbz x28, #0, 84f\n"
- "ld1 { v31.b }[10], [x27]\n"
- "ld1 { v28.b }[10], [x26]\n"
- "ld1 { v25.b }[10], [x25]\n"
- "ld1 { v22.b }[10], [x24]\n"
- "ld1 { v19.b }[10], [x23]\n"
+ "tbz x27, #1, 79f\n"
+ "ld1 { v31.h }[4], [x26], #0x2\n"
+ "ld1 { v28.h }[4], [x25], #0x2\n"
+ "ld1 { v25.h }[4], [x24], #0x2\n"
+ "ld1 { v22.h }[4], [x23], #0x2\n"
+ "ld1 { v19.h }[4], [x22], #0x2\n"
+ "tbz x27, #0, 84f\n"
+ "ld1 { v31.b }[10], [x26]\n"
+ "ld1 { v28.b }[10], [x25]\n"
+ "ld1 { v25.b }[10], [x24]\n"
+ "ld1 { v22.b }[10], [x23]\n"
+ "ld1 { v19.b }[10], [x22]\n"
"b 84f\n"
"79:" // Height 5: Multiply loop: Ragged operand read: partial_1_8
- "tbz x28, #0, 84f\n"
- "ld1 { v31.b }[8], [x27]\n"
- "ld1 { v28.b }[8], [x26]\n"
- "ld1 { v25.b }[8], [x25]\n"
- "ld1 { v22.b }[8], [x24]\n"
- "ld1 { v19.b }[8], [x23]\n"
+ "tbz x27, #0, 84f\n"
+ "ld1 { v31.b }[8], [x26]\n"
+ "ld1 { v28.b }[8], [x25]\n"
+ "ld1 { v25.b }[8], [x24]\n"
+ "ld1 { v22.b }[8], [x23]\n"
+ "ld1 { v19.b }[8], [x22]\n"
"b 84f\n"
"80:" // Height 5: Multiply loop: Ragged operand read: partial_4_0
- "tbz x28, #2, 82f\n"
- "ldr s31, [x27], #0x4\n"
- "ldr s28, [x26], #0x4\n"
- "ldr s25, [x25], #0x4\n"
- "ldr s22, [x24], #0x4\n"
- "ldr s19, [x23], #0x4\n"
- "tbz x28, #1, 81f\n"
- "ld1 { v31.h }[2], [x27], #0x2\n"
- "ld1 { v28.h }[2], [x26], #0x2\n"
- "ld1 { v25.h }[2], [x25], #0x2\n"
- "ld1 { v22.h }[2], [x24], #0x2\n"
- "ld1 { v19.h }[2], [x23], #0x2\n"
- "tbz x28, #0, 84f\n"
- "ld1 { v31.b }[6], [x27]\n"
- "ld1 { v28.b }[6], [x26]\n"
- "ld1 { v25.b }[6], [x25]\n"
- "ld1 { v22.b }[6], [x24]\n"
- "ld1 { v19.b }[6], [x23]\n"
+ "tbz x27, #2, 82f\n"
+ "ldr s31, [x26], #0x4\n"
+ "ldr s28, [x25], #0x4\n"
+ "ldr s25, [x24], #0x4\n"
+ "ldr s22, [x23], #0x4\n"
+ "ldr s19, [x22], #0x4\n"
+ "tbz x27, #1, 81f\n"
+ "ld1 { v31.h }[2], [x26], #0x2\n"
+ "ld1 { v28.h }[2], [x25], #0x2\n"
+ "ld1 { v25.h }[2], [x24], #0x2\n"
+ "ld1 { v22.h }[2], [x23], #0x2\n"
+ "ld1 { v19.h }[2], [x22], #0x2\n"
+ "tbz x27, #0, 84f\n"
+ "ld1 { v31.b }[6], [x26]\n"
+ "ld1 { v28.b }[6], [x25]\n"
+ "ld1 { v25.b }[6], [x24]\n"
+ "ld1 { v22.b }[6], [x23]\n"
+ "ld1 { v19.b }[6], [x22]\n"
"b 84f\n"
"81:" // Height 5: Multiply loop: Ragged operand read: partial_1_4
- "tbz x28, #0, 84f\n"
- "ld1 { v31.b }[4], [x27]\n"
- "ld1 { v28.b }[4], [x26]\n"
- "ld1 { v25.b }[4], [x25]\n"
- "ld1 { v22.b }[4], [x24]\n"
- "ld1 { v19.b }[4], [x23]\n"
+ "tbz x27, #0, 84f\n"
+ "ld1 { v31.b }[4], [x26]\n"
+ "ld1 { v28.b }[4], [x25]\n"
+ "ld1 { v25.b }[4], [x24]\n"
+ "ld1 { v22.b }[4], [x23]\n"
+ "ld1 { v19.b }[4], [x22]\n"
"b 84f\n"
"82:" // Height 5: Multiply loop: Ragged operand read: partial_2_0
- "tbz x28, #1, 83f\n"
- "ldr h31, [x27], #0x2\n"
- "ldr h28, [x26], #0x2\n"
- "ldr h25, [x25], #0x2\n"
- "ldr h22, [x24], #0x2\n"
- "ldr h19, [x23], #0x2\n"
- "tbz x28, #0, 84f\n"
- "ld1 { v31.b }[2], [x27]\n"
- "ld1 { v28.b }[2], [x26]\n"
- "ld1 { v25.b }[2], [x25]\n"
- "ld1 { v22.b }[2], [x24]\n"
- "ld1 { v19.b }[2], [x23]\n"
+ "tbz x27, #1, 83f\n"
+ "ldr h31, [x26], #0x2\n"
+ "ldr h28, [x25], #0x2\n"
+ "ldr h25, [x24], #0x2\n"
+ "ldr h22, [x23], #0x2\n"
+ "ldr h19, [x22], #0x2\n"
+ "tbz x27, #0, 84f\n"
+ "ld1 { v31.b }[2], [x26]\n"
+ "ld1 { v28.b }[2], [x25]\n"
+ "ld1 { v25.b }[2], [x24]\n"
+ "ld1 { v22.b }[2], [x23]\n"
+ "ld1 { v19.b }[2], [x22]\n"
"b 84f\n"
"83:" // Height 5: Multiply loop: Ragged operand read: partial_1_0
- "ldr b31, [x27, #0x0]\n"
- "ldr b28, [x26, #0x0]\n"
- "ldr b25, [x25, #0x0]\n"
- "ldr b22, [x24, #0x0]\n"
- "ldr b19, [x23, #0x0]\n"
+ "ldr b31, [x26, #0x0]\n"
+ "ldr b28, [x25, #0x0]\n"
+ "ldr b25, [x24, #0x0]\n"
+ "ldr b22, [x23, #0x0]\n"
+ "ldr b19, [x22, #0x0]\n"
"84:" // Height 5: Multiply loop: Ragged operand read: Done
"sadalp v1.8h, v31.16b\n"
"sadalp v30.8h, v28.16b\n"
@@ -866,32 +866,32 @@ void row_sums_indirect(
"sadalp v24.8h, v22.16b\n"
"sadalp v21.8h, v19.16b\n"
"85:" // Height 5: Multiply loop: No odd multiplies
- "add x9, x9, #0x1\n"
- "cmp x9, x21\n"
+ "add x28, x28, #0x1\n"
+ "cmp x28, x20\n"
"bne 70b\n"
"sadalp v0.4s, v1.8h\n"
"sadalp v29.4s, v30.8h\n"
+ "addp v0.4s, v0.4s, v29.4s\n"
"sadalp v26.4s, v27.8h\n"
"sadalp v23.4s, v24.8h\n"
- "sadalp v20.4s, v21.8h\n"
- "addp v0.4s, v0.4s, v29.4s\n"
"addp v29.4s, v26.4s, v23.4s\n"
- "addp v20.4s, v20.4s, v20.4s\n"
+ "sadalp v20.4s, v21.8h\n"
"addp v0.4s, v0.4s, v29.4s\n"
"addp v20.4s, v20.4s, v20.4s\n"
"mul v0.4s, v0.4s, v2.4s\n"
"st1 { v0.4s }, [%x[out_ptr]], #0x10\n"
+ "addp v20.4s, v20.4s, v20.4s\n"
"mul v20.4s, v20.4s, v2.4s\n"
"str s20, [%x[out_ptr]], #0x4\n"
"b 104f\n"
"86:" // Height 6
"movi v1.8h, #0x0\n"
+ "ldr w21, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "mov x9, #0x0\n"
"movi v0.4s, #0x0\n"
- "mov x10, #0x0\n"
- "ldr w22, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "mov x28, #0x0\n"
"movi v30.8h, #0x0\n"
"movi v29.4s, #0x0\n"
- "mov x9, #0x0\n"
"movi v27.8h, #0x0\n"
"movi v26.4s, #0x0\n"
"movi v24.8h, #0x0\n"
@@ -901,56 +901,55 @@ void row_sums_indirect(
"movi v18.8h, #0x0\n"
"movi v17.4s, #0x0\n"
"87:" // Height 6: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w28, [x20, x9, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w27, [x19, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 88f\n"
- "ldr x20, [%x[input_ptr], x9, LSL #0x3]\n"
- "add x20, x20, %x[input_offset], LSL #3\n"
- "ldr x27, [x20, #0x0]\n"
- "ldr x26, [x20, #0x8]\n"
- "ldr x25, [x20, #0x10]\n"
- "ldr x24, [x20, #0x18]\n"
- "ldr x23, [x20, #0x20]\n"
- "ldr x21, [x20, #0x28]\n"
- "cbnz x9, 89f\n"
- "ldr w20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x27, x27, x20\n"
- "add x26, x26, x20\n"
- "add x25, x25, x20\n"
- "add x24, x24, x20\n"
- "add x23, x23, x20\n"
- "add x21, x21, x20\n"
+ "ldr x19, [%x[input_ptr], x28, LSL #0x3]\n"
+ "add x19, x19, %x[input_offset], LSL #3\n"
+ "ldr x26, [x19, #0x0]\n"
+ "ldr x25, [x19, #0x8]\n"
+ "ldr x24, [x19, #0x10]\n"
+ "ldr x23, [x19, #0x18]\n"
+ "ldr x22, [x19, #0x20]\n"
+ "ldr x20, [x19, #0x28]\n"
+ "cbnz x28, 89f\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x26, x26, x19\n"
+ "add x25, x25, x19\n"
+ "add x24, x24, x19\n"
+ "add x23, x23, x19\n"
+ "add x22, x22, x19\n"
+ "add x20, x20, x19\n"
"b 89f\n"
"88:" // Height 6: setup direct input
- "mov x27, %x[input_ptr]\n"
- "add x26, x27, %x[input_offset]\n"
+ "mov x26, %x[input_ptr]\n"
"add x25, x26, %x[input_offset]\n"
"add x24, x25, %x[input_offset]\n"
"add x23, x24, %x[input_offset]\n"
- "add x21, x23, %x[input_offset]\n"
+ "add x22, x23, %x[input_offset]\n"
+ "add x20, x22, %x[input_offset]\n"
"89:" // Height 6: input setup done
- "cmp x28, #0x10\n"
+ "cmp x27, #0x10\n"
"blt 93f\n"
- "cmp x28, #0x20\n"
+ "cmp x27, #0x20\n"
"blt 92f\n"
"90:" // Height 6: Multiply loop: Main loop head
- "ldr q31, [x27, #0x0]\n"
- "ldr q28, [x26, #0x0]\n"
- "cmp x10, #0x7e\n"
- "add x27, x27, #0x10\n"
- "ldr q25, [x25, #0x0]\n"
- "ldr q22, [x24, #0x0]\n"
+ "ldr q31, [x26, #0x0]\n"
+ "ldr q28, [x25, #0x0]\n"
+ "ldr q25, [x24, #0x0]\n"
+ "ldr q22, [x23, #0x0]\n"
+ "ldr q19, [x22, #0x0]\n"
+ "ldr q16, [x20, #0x0]\n"
+ "cmp x9, #0x7e\n"
"add x26, x26, #0x10\n"
"add x25, x25, #0x10\n"
- "ldr q19, [x23, #0x0]\n"
- "ldr q16, [x21, #0x0]\n"
"add x24, x24, #0x10\n"
"add x23, x23, #0x10\n"
- "add x21, x21, #0x10\n"
+ "add x22, x22, #0x10\n"
+ "add x20, x20, #0x10\n"
"blt 91f\n"
"sadalp v0.4s, v1.8h\n"
"movi v1.8h, #0x0\n"
- "mov x10, #0x0\n"
"sadalp v29.4s, v30.8h\n"
"movi v30.8h, #0x0\n"
"sadalp v26.4s, v27.8h\n"
@@ -961,157 +960,158 @@ void row_sums_indirect(
"movi v21.8h, #0x0\n"
"sadalp v17.4s, v18.8h\n"
"movi v18.8h, #0x0\n"
+ "mov x9, #0x0\n"
"91:" // Height 6: Multiply loop: unique 6: no collapse
- "sub x28, x28, #0x10\n"
- "cmp x28, #0x20\n"
"sadalp v1.8h, v31.16b\n"
"sadalp v30.8h, v28.16b\n"
"sadalp v27.8h, v25.16b\n"
"sadalp v24.8h, v22.16b\n"
- "add x10, x10, #0x1\n"
"sadalp v21.8h, v19.16b\n"
"sadalp v18.8h, v16.16b\n"
+ "add x9, x9, #0x1\n"
+ "sub x27, x27, #0x10\n"
+ "cmp x27, #0x20\n"
"bge 90b\n"
"92:" // Height 6: Multiply loop: Single iteration only
- "ldr q31, [x27, #0x0]\n"
- "ldr q28, [x26, #0x0]\n"
- "sub x28, x28, #0x10\n"
+ "sub x27, x27, #0x10\n"
+ "ldr q31, [x26, #0x0]\n"
+ "ldr q28, [x25, #0x0]\n"
+ "ldr q25, [x24, #0x0]\n"
+ "ldr q22, [x23, #0x0]\n"
+ "ldr q19, [x22, #0x0]\n"
+ "ldr q16, [x20, #0x0]\n"
"sadalp v1.8h, v31.16b\n"
- "ldr q25, [x25, #0x0]\n"
- "ldr q22, [x24, #0x0]\n"
"sadalp v30.8h, v28.16b\n"
"sadalp v27.8h, v25.16b\n"
- "ldr q19, [x23, #0x0]\n"
- "ldr q16, [x21, #0x0]\n"
"sadalp v24.8h, v22.16b\n"
"sadalp v21.8h, v19.16b\n"
"sadalp v18.8h, v16.16b\n"
- "add x27, x27, #0x10\n"
"add x26, x26, #0x10\n"
"add x25, x25, #0x10\n"
"add x24, x24, #0x10\n"
"add x23, x23, #0x10\n"
- "add x21, x21, #0x10\n"
+ "add x22, x22, #0x10\n"
+ "add x20, x20, #0x10\n"
"93:" // Height 6: Multiply loop: Main loop skip
- "cbz x28, 102f\n"
- "tbz x28, #3, 97f\n"
- "ldr d31, [x27], #0x8\n"
- "ldr d28, [x26], #0x8\n"
- "ldr d25, [x25], #0x8\n"
- "ldr d22, [x24], #0x8\n"
- "ldr d19, [x23], #0x8\n"
- "ldr d16, [x21], #0x8\n"
- "tbz x28, #2, 95f\n"
- "ld1 { v31.s }[2], [x27], #0x4\n"
- "ld1 { v28.s }[2], [x26], #0x4\n"
- "ld1 { v25.s }[2], [x25], #0x4\n"
- "ld1 { v22.s }[2], [x24], #0x4\n"
- "ld1 { v19.s }[2], [x23], #0x4\n"
- "ld1 { v16.s }[2], [x21], #0x4\n"
- "tbz x28, #1, 94f\n"
- "ld1 { v31.h }[6], [x27], #0x2\n"
- "ld1 { v28.h }[6], [x26], #0x2\n"
- "ld1 { v25.h }[6], [x25], #0x2\n"
- "ld1 { v22.h }[6], [x24], #0x2\n"
- "ld1 { v19.h }[6], [x23], #0x2\n"
- "ld1 { v16.h }[6], [x21], #0x2\n"
- "tbz x28, #0, 101f\n"
- "ld1 { v31.b }[14], [x27]\n"
- "ld1 { v28.b }[14], [x26]\n"
- "ld1 { v25.b }[14], [x25]\n"
- "ld1 { v22.b }[14], [x24]\n"
- "ld1 { v19.b }[14], [x23]\n"
- "ld1 { v16.b }[14], [x21]\n"
+ "cbz x27, 102f\n"
+ "tbz x27, #3, 97f\n"
+ "ldr d31, [x26], #0x8\n"
+ "ldr d28, [x25], #0x8\n"
+ "ldr d25, [x24], #0x8\n"
+ "ldr d22, [x23], #0x8\n"
+ "ldr d19, [x22], #0x8\n"
+ "ldr d16, [x20], #0x8\n"
+ "tbz x27, #2, 95f\n"
+ "ld1 { v31.s }[2], [x26], #0x4\n"
+ "ld1 { v28.s }[2], [x25], #0x4\n"
+ "ld1 { v25.s }[2], [x24], #0x4\n"
+ "ld1 { v22.s }[2], [x23], #0x4\n"
+ "ld1 { v19.s }[2], [x22], #0x4\n"
+ "ld1 { v16.s }[2], [x20], #0x4\n"
+ "tbz x27, #1, 94f\n"
+ "ld1 { v31.h }[6], [x26], #0x2\n"
+ "ld1 { v28.h }[6], [x25], #0x2\n"
+ "ld1 { v25.h }[6], [x24], #0x2\n"
+ "ld1 { v22.h }[6], [x23], #0x2\n"
+ "ld1 { v19.h }[6], [x22], #0x2\n"
+ "ld1 { v16.h }[6], [x20], #0x2\n"
+ "tbz x27, #0, 101f\n"
+ "ld1 { v31.b }[14], [x26]\n"
+ "ld1 { v28.b }[14], [x25]\n"
+ "ld1 { v25.b }[14], [x24]\n"
+ "ld1 { v22.b }[14], [x23]\n"
+ "ld1 { v19.b }[14], [x22]\n"
+ "ld1 { v16.b }[14], [x20]\n"
"b 101f\n"
"94:" // Height 6: Multiply loop: Ragged operand read: partial_1_12
- "tbz x28, #0, 101f\n"
- "ld1 { v31.b }[12], [x27]\n"
- "ld1 { v28.b }[12], [x26]\n"
- "ld1 { v25.b }[12], [x25]\n"
- "ld1 { v22.b }[12], [x24]\n"
- "ld1 { v19.b }[12], [x23]\n"
- "ld1 { v16.b }[12], [x21]\n"
+ "tbz x27, #0, 101f\n"
+ "ld1 { v31.b }[12], [x26]\n"
+ "ld1 { v28.b }[12], [x25]\n"
+ "ld1 { v25.b }[12], [x24]\n"
+ "ld1 { v22.b }[12], [x23]\n"
+ "ld1 { v19.b }[12], [x22]\n"
+ "ld1 { v16.b }[12], [x20]\n"
"b 101f\n"
"95:" // Height 6: Multiply loop: Ragged operand read: partial_2_8
- "tbz x28, #1, 96f\n"
- "ld1 { v31.h }[4], [x27], #0x2\n"
- "ld1 { v28.h }[4], [x26], #0x2\n"
- "ld1 { v25.h }[4], [x25], #0x2\n"
- "ld1 { v22.h }[4], [x24], #0x2\n"
- "ld1 { v19.h }[4], [x23], #0x2\n"
- "ld1 { v16.h }[4], [x21], #0x2\n"
- "tbz x28, #0, 101f\n"
- "ld1 { v31.b }[10], [x27]\n"
- "ld1 { v28.b }[10], [x26]\n"
- "ld1 { v25.b }[10], [x25]\n"
- "ld1 { v22.b }[10], [x24]\n"
- "ld1 { v19.b }[10], [x23]\n"
- "ld1 { v16.b }[10], [x21]\n"
+ "tbz x27, #1, 96f\n"
+ "ld1 { v31.h }[4], [x26], #0x2\n"
+ "ld1 { v28.h }[4], [x25], #0x2\n"
+ "ld1 { v25.h }[4], [x24], #0x2\n"
+ "ld1 { v22.h }[4], [x23], #0x2\n"
+ "ld1 { v19.h }[4], [x22], #0x2\n"
+ "ld1 { v16.h }[4], [x20], #0x2\n"
+ "tbz x27, #0, 101f\n"
+ "ld1 { v31.b }[10], [x26]\n"
+ "ld1 { v28.b }[10], [x25]\n"
+ "ld1 { v25.b }[10], [x24]\n"
+ "ld1 { v22.b }[10], [x23]\n"
+ "ld1 { v19.b }[10], [x22]\n"
+ "ld1 { v16.b }[10], [x20]\n"
"b 101f\n"
"96:" // Height 6: Multiply loop: Ragged operand read: partial_1_8
- "tbz x28, #0, 101f\n"
- "ld1 { v31.b }[8], [x27]\n"
- "ld1 { v28.b }[8], [x26]\n"
- "ld1 { v25.b }[8], [x25]\n"
- "ld1 { v22.b }[8], [x24]\n"
- "ld1 { v19.b }[8], [x23]\n"
- "ld1 { v16.b }[8], [x21]\n"
+ "tbz x27, #0, 101f\n"
+ "ld1 { v31.b }[8], [x26]\n"
+ "ld1 { v28.b }[8], [x25]\n"
+ "ld1 { v25.b }[8], [x24]\n"
+ "ld1 { v22.b }[8], [x23]\n"
+ "ld1 { v19.b }[8], [x22]\n"
+ "ld1 { v16.b }[8], [x20]\n"
"b 101f\n"
"97:" // Height 6: Multiply loop: Ragged operand read: partial_4_0
- "tbz x28, #2, 99f\n"
- "ldr s31, [x27], #0x4\n"
- "ldr s28, [x26], #0x4\n"
- "ldr s25, [x25], #0x4\n"
- "ldr s22, [x24], #0x4\n"
- "ldr s19, [x23], #0x4\n"
- "ldr s16, [x21], #0x4\n"
- "tbz x28, #1, 98f\n"
- "ld1 { v31.h }[2], [x27], #0x2\n"
- "ld1 { v28.h }[2], [x26], #0x2\n"
- "ld1 { v25.h }[2], [x25], #0x2\n"
- "ld1 { v22.h }[2], [x24], #0x2\n"
- "ld1 { v19.h }[2], [x23], #0x2\n"
- "ld1 { v16.h }[2], [x21], #0x2\n"
- "tbz x28, #0, 101f\n"
- "ld1 { v31.b }[6], [x27]\n"
- "ld1 { v28.b }[6], [x26]\n"
- "ld1 { v25.b }[6], [x25]\n"
- "ld1 { v22.b }[6], [x24]\n"
- "ld1 { v19.b }[6], [x23]\n"
- "ld1 { v16.b }[6], [x21]\n"
+ "tbz x27, #2, 99f\n"
+ "ldr s31, [x26], #0x4\n"
+ "ldr s28, [x25], #0x4\n"
+ "ldr s25, [x24], #0x4\n"
+ "ldr s22, [x23], #0x4\n"
+ "ldr s19, [x22], #0x4\n"
+ "ldr s16, [x20], #0x4\n"
+ "tbz x27, #1, 98f\n"
+ "ld1 { v31.h }[2], [x26], #0x2\n"
+ "ld1 { v28.h }[2], [x25], #0x2\n"
+ "ld1 { v25.h }[2], [x24], #0x2\n"
+ "ld1 { v22.h }[2], [x23], #0x2\n"
+ "ld1 { v19.h }[2], [x22], #0x2\n"
+ "ld1 { v16.h }[2], [x20], #0x2\n"
+ "tbz x27, #0, 101f\n"
+ "ld1 { v31.b }[6], [x26]\n"
+ "ld1 { v28.b }[6], [x25]\n"
+ "ld1 { v25.b }[6], [x24]\n"
+ "ld1 { v22.b }[6], [x23]\n"
+ "ld1 { v19.b }[6], [x22]\n"
+ "ld1 { v16.b }[6], [x20]\n"
"b 101f\n"
"98:" // Height 6: Multiply loop: Ragged operand read: partial_1_4
- "tbz x28, #0, 101f\n"
- "ld1 { v31.b }[4], [x27]\n"
- "ld1 { v28.b }[4], [x26]\n"
- "ld1 { v25.b }[4], [x25]\n"
- "ld1 { v22.b }[4], [x24]\n"
- "ld1 { v19.b }[4], [x23]\n"
- "ld1 { v16.b }[4], [x21]\n"
+ "tbz x27, #0, 101f\n"
+ "ld1 { v31.b }[4], [x26]\n"
+ "ld1 { v28.b }[4], [x25]\n"
+ "ld1 { v25.b }[4], [x24]\n"
+ "ld1 { v22.b }[4], [x23]\n"
+ "ld1 { v19.b }[4], [x22]\n"
+ "ld1 { v16.b }[4], [x20]\n"
"b 101f\n"
"99:" // Height 6: Multiply loop: Ragged operand read: partial_2_0
- "tbz x28, #1, 100f\n"
- "ldr h31, [x27], #0x2\n"
- "ldr h28, [x26], #0x2\n"
- "ldr h25, [x25], #0x2\n"
- "ldr h22, [x24], #0x2\n"
- "ldr h19, [x23], #0x2\n"
- "ldr h16, [x21], #0x2\n"
- "tbz x28, #0, 101f\n"
- "ld1 { v31.b }[2], [x27]\n"
- "ld1 { v28.b }[2], [x26]\n"
- "ld1 { v25.b }[2], [x25]\n"
- "ld1 { v22.b }[2], [x24]\n"
- "ld1 { v19.b }[2], [x23]\n"
- "ld1 { v16.b }[2], [x21]\n"
+ "tbz x27, #1, 100f\n"
+ "ldr h31, [x26], #0x2\n"
+ "ldr h28, [x25], #0x2\n"
+ "ldr h25, [x24], #0x2\n"
+ "ldr h22, [x23], #0x2\n"
+ "ldr h19, [x22], #0x2\n"
+ "ldr h16, [x20], #0x2\n"
+ "tbz x27, #0, 101f\n"
+ "ld1 { v31.b }[2], [x26]\n"
+ "ld1 { v28.b }[2], [x25]\n"
+ "ld1 { v25.b }[2], [x24]\n"
+ "ld1 { v22.b }[2], [x23]\n"
+ "ld1 { v19.b }[2], [x22]\n"
+ "ld1 { v16.b }[2], [x20]\n"
"b 101f\n"
"100:" // Height 6: Multiply loop: Ragged operand read: partial_1_0
- "ldr b31, [x27, #0x0]\n"
- "ldr b28, [x26, #0x0]\n"
- "ldr b25, [x25, #0x0]\n"
- "ldr b22, [x24, #0x0]\n"
- "ldr b19, [x23, #0x0]\n"
- "ldr b16, [x21, #0x0]\n"
+ "ldr b31, [x26, #0x0]\n"
+ "ldr b28, [x25, #0x0]\n"
+ "ldr b25, [x24, #0x0]\n"
+ "ldr b22, [x23, #0x0]\n"
+ "ldr b19, [x22, #0x0]\n"
+ "ldr b16, [x20, #0x0]\n"
"101:" // Height 6: Multiply loop: Ragged operand read: Done
"sadalp v1.8h, v31.16b\n"
"sadalp v30.8h, v28.16b\n"
@@ -1120,23 +1120,23 @@ void row_sums_indirect(
"sadalp v21.8h, v19.16b\n"
"sadalp v18.8h, v16.16b\n"
"102:" // Height 6: Multiply loop: No odd multiplies
- "add x9, x9, #0x1\n"
- "cmp x9, x22\n"
+ "add x28, x28, #0x1\n"
+ "cmp x28, x21\n"
"bne 87b\n"
"sadalp v0.4s, v1.8h\n"
"sadalp v29.4s, v30.8h\n"
- "subs %x[M], %x[M], #0x6\n"
+ "addp v0.4s, v0.4s, v29.4s\n"
"sadalp v26.4s, v27.8h\n"
"sadalp v23.4s, v24.8h\n"
+ "addp v29.4s, v26.4s, v23.4s\n"
"sadalp v20.4s, v21.8h\n"
"sadalp v17.4s, v18.8h\n"
"addp v0.4s, v0.4s, v29.4s\n"
- "addp v29.4s, v26.4s, v23.4s\n"
+ "subs %x[M], %x[M], #0x6\n"
"addp v20.4s, v20.4s, v17.4s\n"
- "addp v0.4s, v0.4s, v29.4s\n"
- "addp v20.4s, v20.4s, v20.4s\n"
"mul v0.4s, v0.4s, v2.4s\n"
"st1 { v0.4s }, [%x[out_ptr]], #0x10\n"
+ "addp v20.4s, v20.4s, v20.4s\n"
"mul v20.4s, v20.4s, v2.4s\n"
"str d20, [%x[out_ptr]], #0x8\n"
"beq 104f\n"
@@ -1144,14 +1144,14 @@ void row_sums_indirect(
"add %x[input_offset], %x[input_offset], #0x6\n"
"b 1b\n"
"103:" // Update direct input
- "mov x20, #0x6\n"
- "madd %x[input_ptr], x20, %x[input_offset], %x[input_ptr]\n"
+ "mov x19, #0x6\n"
+ "madd %x[input_ptr], x19, %x[input_offset], %x[input_ptr]\n"
"b 1b\n"
"104:" // Exit
- : [M] "+&r" (M), [input_offset] "+&r" (input_offset), [input_ptr] "+&r" (input_ptr), [out_ptr] "+&r" (out_ptr)
+ : [M] "+r" (M), [input_offset] "+r" (input_offset), [input_ptr] "+r" (input_ptr), [out_ptr] "+r" (out_ptr)
: [args_ptr] "r" (&ka), [b_offset] "I" (offsetof(Requantize32, b_offset)), [flags] "r" (flags), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths)), [qp] "r" (qp)
- : "cc", "memory", "v0", "v1", "v2", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/rowsum_indirect_u8.cpp b/src/core/NEON/kernels/arm_gemm/rowsum_indirect_u8.cpp
index ff95507d79..f5709d92ac 100644
--- a/src/core/NEON/kernels/arm_gemm/rowsum_indirect_u8.cpp
+++ b/src/core/NEON/kernels/arm_gemm/rowsum_indirect_u8.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2020, 2023 Arm Limited.
+ * Copyright (c) 2019-2020 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef __aarch64__
@@ -63,8 +63,8 @@ void row_sums_indirect(
ka.string_lengths = string_lengths;
__asm__ __volatile__(
- "add x20, %x[qp], %[b_offset]\n"
- "ld1r { v2.4s }, [x20]\n"
+ "add x19, %x[qp], %[b_offset]\n"
+ "ld1r { v2.4s }, [x19]\n"
"neg v2.4s, v2.4s\n"
"1:" // Row loop
"cmp %x[M], #0x6\n"
@@ -76,97 +76,97 @@ void row_sums_indirect(
"bgt 35f\n"
"beq 18f\n"
"movi v1.8h, #0x0\n"
+ "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"movi v0.4s, #0x0\n"
- "mov x10, #0x0\n"
- "ldr w21, [%x[args_ptr], %[offsetof_num_strings]]\n"
"mov x9, #0x0\n"
+ "mov x28, #0x0\n"
"2:" // Height 1: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w28, [x20, x9, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w27, [x19, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 3f\n"
- "ldr x20, [%x[input_ptr], x9, LSL #0x3]\n"
- "add x20, x20, %x[input_offset], LSL #3\n"
- "ldr x27, [x20, #0x0]\n"
- "cbnz x9, 4f\n"
- "ldr w20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x27, x27, x20\n"
+ "ldr x19, [%x[input_ptr], x28, LSL #0x3]\n"
+ "add x19, x19, %x[input_offset], LSL #3\n"
+ "ldr x26, [x19, #0x0]\n"
+ "cbnz x28, 4f\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x26, x26, x19\n"
"b 4f\n"
"3:" // Height 1: setup direct input
- "mov x27, %x[input_ptr]\n"
+ "mov x26, %x[input_ptr]\n"
"4:" // Height 1: input setup done
- "cmp x28, #0x10\n"
+ "cmp x27, #0x10\n"
"blt 8f\n"
- "cmp x28, #0x20\n"
+ "cmp x27, #0x20\n"
"blt 7f\n"
"5:" // Height 1: Multiply loop: Main loop head
- "ldr q31, [x27, #0x0]\n"
- "cmp x10, #0x7e\n"
- "add x27, x27, #0x10\n"
+ "ldr q31, [x26, #0x0]\n"
+ "cmp x9, #0x7e\n"
+ "add x26, x26, #0x10\n"
"blt 6f\n"
"uadalp v0.4s, v1.8h\n"
"movi v1.8h, #0x0\n"
- "mov x10, #0x0\n"
+ "mov x9, #0x0\n"
"6:" // Height 1: Multiply loop: unique 1: no collapse
- "sub x28, x28, #0x10\n"
- "cmp x28, #0x20\n"
"uadalp v1.8h, v31.16b\n"
- "add x10, x10, #0x1\n"
+ "add x9, x9, #0x1\n"
+ "sub x27, x27, #0x10\n"
+ "cmp x27, #0x20\n"
"bge 5b\n"
"7:" // Height 1: Multiply loop: Single iteration only
- "ldr q31, [x27, #0x0]\n"
- "sub x28, x28, #0x10\n"
+ "sub x27, x27, #0x10\n"
+ "ldr q31, [x26, #0x0]\n"
+ "add x26, x26, #0x10\n"
"uadalp v1.8h, v31.16b\n"
- "add x27, x27, #0x10\n"
"8:" // Height 1: Multiply loop: Main loop skip
- "cbz x28, 17f\n"
- "tbz x28, #3, 12f\n"
- "ldr d31, [x27], #0x8\n"
- "tbz x28, #2, 10f\n"
- "ld1 { v31.s }[2], [x27], #0x4\n"
- "tbz x28, #1, 9f\n"
- "ld1 { v31.h }[6], [x27], #0x2\n"
- "tbz x28, #0, 16f\n"
- "ld1 { v31.b }[14], [x27]\n"
+ "cbz x27, 17f\n"
+ "tbz x27, #3, 12f\n"
+ "ldr d31, [x26], #0x8\n"
+ "tbz x27, #2, 10f\n"
+ "ld1 { v31.s }[2], [x26], #0x4\n"
+ "tbz x27, #1, 9f\n"
+ "ld1 { v31.h }[6], [x26], #0x2\n"
+ "tbz x27, #0, 16f\n"
+ "ld1 { v31.b }[14], [x26]\n"
"b 16f\n"
"9:" // Height 1: Multiply loop: Ragged operand read: partial_1_12
- "tbz x28, #0, 16f\n"
- "ld1 { v31.b }[12], [x27]\n"
+ "tbz x27, #0, 16f\n"
+ "ld1 { v31.b }[12], [x26]\n"
"b 16f\n"
"10:" // Height 1: Multiply loop: Ragged operand read: partial_2_8
- "tbz x28, #1, 11f\n"
- "ld1 { v31.h }[4], [x27], #0x2\n"
- "tbz x28, #0, 16f\n"
- "ld1 { v31.b }[10], [x27]\n"
+ "tbz x27, #1, 11f\n"
+ "ld1 { v31.h }[4], [x26], #0x2\n"
+ "tbz x27, #0, 16f\n"
+ "ld1 { v31.b }[10], [x26]\n"
"b 16f\n"
"11:" // Height 1: Multiply loop: Ragged operand read: partial_1_8
- "tbz x28, #0, 16f\n"
- "ld1 { v31.b }[8], [x27]\n"
+ "tbz x27, #0, 16f\n"
+ "ld1 { v31.b }[8], [x26]\n"
"b 16f\n"
"12:" // Height 1: Multiply loop: Ragged operand read: partial_4_0
- "tbz x28, #2, 14f\n"
- "ldr s31, [x27], #0x4\n"
- "tbz x28, #1, 13f\n"
- "ld1 { v31.h }[2], [x27], #0x2\n"
- "tbz x28, #0, 16f\n"
- "ld1 { v31.b }[6], [x27]\n"
+ "tbz x27, #2, 14f\n"
+ "ldr s31, [x26], #0x4\n"
+ "tbz x27, #1, 13f\n"
+ "ld1 { v31.h }[2], [x26], #0x2\n"
+ "tbz x27, #0, 16f\n"
+ "ld1 { v31.b }[6], [x26]\n"
"b 16f\n"
"13:" // Height 1: Multiply loop: Ragged operand read: partial_1_4
- "tbz x28, #0, 16f\n"
- "ld1 { v31.b }[4], [x27]\n"
+ "tbz x27, #0, 16f\n"
+ "ld1 { v31.b }[4], [x26]\n"
"b 16f\n"
"14:" // Height 1: Multiply loop: Ragged operand read: partial_2_0
- "tbz x28, #1, 15f\n"
- "ldr h31, [x27], #0x2\n"
- "tbz x28, #0, 16f\n"
- "ld1 { v31.b }[2], [x27]\n"
+ "tbz x27, #1, 15f\n"
+ "ldr h31, [x26], #0x2\n"
+ "tbz x27, #0, 16f\n"
+ "ld1 { v31.b }[2], [x26]\n"
"b 16f\n"
"15:" // Height 1: Multiply loop: Ragged operand read: partial_1_0
- "ldr b31, [x27, #0x0]\n"
+ "ldr b31, [x26, #0x0]\n"
"16:" // Height 1: Multiply loop: Ragged operand read: Done
"uadalp v1.8h, v31.16b\n"
"17:" // Height 1: Multiply loop: No odd multiplies
- "add x9, x9, #0x1\n"
- "cmp x9, x21\n"
+ "add x28, x28, #0x1\n"
+ "cmp x28, x20\n"
"bne 2b\n"
"uadalp v0.4s, v1.8h\n"
"addp v0.4s, v0.4s, v0.4s\n"
@@ -176,126 +176,126 @@ void row_sums_indirect(
"b 104f\n"
"18:" // Height 2
"movi v1.8h, #0x0\n"
+ "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "mov x9, #0x0\n"
"movi v0.4s, #0x0\n"
- "mov x10, #0x0\n"
- "ldr w21, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "mov x28, #0x0\n"
"movi v30.8h, #0x0\n"
"movi v29.4s, #0x0\n"
- "mov x9, #0x0\n"
"19:" // Height 2: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w28, [x20, x9, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w27, [x19, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 20f\n"
- "ldr x20, [%x[input_ptr], x9, LSL #0x3]\n"
- "add x20, x20, %x[input_offset], LSL #3\n"
- "ldr x27, [x20, #0x0]\n"
- "ldr x26, [x20, #0x8]\n"
- "cbnz x9, 21f\n"
- "ldr w20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x27, x27, x20\n"
- "add x26, x26, x20\n"
+ "ldr x19, [%x[input_ptr], x28, LSL #0x3]\n"
+ "add x19, x19, %x[input_offset], LSL #3\n"
+ "ldr x26, [x19, #0x0]\n"
+ "ldr x25, [x19, #0x8]\n"
+ "cbnz x28, 21f\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x26, x26, x19\n"
+ "add x25, x25, x19\n"
"b 21f\n"
"20:" // Height 2: setup direct input
- "mov x27, %x[input_ptr]\n"
- "add x26, x27, %x[input_offset]\n"
+ "mov x26, %x[input_ptr]\n"
+ "add x25, x26, %x[input_offset]\n"
"21:" // Height 2: input setup done
- "cmp x28, #0x10\n"
+ "cmp x27, #0x10\n"
"blt 25f\n"
- "cmp x28, #0x20\n"
+ "cmp x27, #0x20\n"
"blt 24f\n"
"22:" // Height 2: Multiply loop: Main loop head
- "ldr q31, [x27, #0x0]\n"
- "ldr q28, [x26, #0x0]\n"
- "cmp x10, #0x7e\n"
- "add x27, x27, #0x10\n"
+ "ldr q31, [x26, #0x0]\n"
+ "ldr q28, [x25, #0x0]\n"
+ "cmp x9, #0x7e\n"
"add x26, x26, #0x10\n"
+ "add x25, x25, #0x10\n"
"blt 23f\n"
"uadalp v0.4s, v1.8h\n"
"movi v1.8h, #0x0\n"
- "mov x10, #0x0\n"
"uadalp v29.4s, v30.8h\n"
"movi v30.8h, #0x0\n"
+ "mov x9, #0x0\n"
"23:" // Height 2: Multiply loop: unique 2: no collapse
- "sub x28, x28, #0x10\n"
- "cmp x28, #0x20\n"
"uadalp v1.8h, v31.16b\n"
"uadalp v30.8h, v28.16b\n"
- "add x10, x10, #0x1\n"
+ "add x9, x9, #0x1\n"
+ "sub x27, x27, #0x10\n"
+ "cmp x27, #0x20\n"
"bge 22b\n"
"24:" // Height 2: Multiply loop: Single iteration only
- "ldr q31, [x27, #0x0]\n"
- "ldr q28, [x26, #0x0]\n"
- "sub x28, x28, #0x10\n"
+ "sub x27, x27, #0x10\n"
+ "ldr q31, [x26, #0x0]\n"
+ "ldr q28, [x25, #0x0]\n"
+ "add x26, x26, #0x10\n"
+ "add x25, x25, #0x10\n"
"uadalp v1.8h, v31.16b\n"
"uadalp v30.8h, v28.16b\n"
- "add x27, x27, #0x10\n"
- "add x26, x26, #0x10\n"
"25:" // Height 2: Multiply loop: Main loop skip
- "cbz x28, 34f\n"
- "tbz x28, #3, 29f\n"
- "ldr d31, [x27], #0x8\n"
- "ldr d28, [x26], #0x8\n"
- "tbz x28, #2, 27f\n"
- "ld1 { v31.s }[2], [x27], #0x4\n"
- "ld1 { v28.s }[2], [x26], #0x4\n"
- "tbz x28, #1, 26f\n"
- "ld1 { v31.h }[6], [x27], #0x2\n"
- "ld1 { v28.h }[6], [x26], #0x2\n"
- "tbz x28, #0, 33f\n"
- "ld1 { v31.b }[14], [x27]\n"
- "ld1 { v28.b }[14], [x26]\n"
+ "cbz x27, 34f\n"
+ "tbz x27, #3, 29f\n"
+ "ldr d31, [x26], #0x8\n"
+ "ldr d28, [x25], #0x8\n"
+ "tbz x27, #2, 27f\n"
+ "ld1 { v31.s }[2], [x26], #0x4\n"
+ "ld1 { v28.s }[2], [x25], #0x4\n"
+ "tbz x27, #1, 26f\n"
+ "ld1 { v31.h }[6], [x26], #0x2\n"
+ "ld1 { v28.h }[6], [x25], #0x2\n"
+ "tbz x27, #0, 33f\n"
+ "ld1 { v31.b }[14], [x26]\n"
+ "ld1 { v28.b }[14], [x25]\n"
"b 33f\n"
"26:" // Height 2: Multiply loop: Ragged operand read: partial_1_12
- "tbz x28, #0, 33f\n"
- "ld1 { v31.b }[12], [x27]\n"
- "ld1 { v28.b }[12], [x26]\n"
+ "tbz x27, #0, 33f\n"
+ "ld1 { v31.b }[12], [x26]\n"
+ "ld1 { v28.b }[12], [x25]\n"
"b 33f\n"
"27:" // Height 2: Multiply loop: Ragged operand read: partial_2_8
- "tbz x28, #1, 28f\n"
- "ld1 { v31.h }[4], [x27], #0x2\n"
- "ld1 { v28.h }[4], [x26], #0x2\n"
- "tbz x28, #0, 33f\n"
- "ld1 { v31.b }[10], [x27]\n"
- "ld1 { v28.b }[10], [x26]\n"
+ "tbz x27, #1, 28f\n"
+ "ld1 { v31.h }[4], [x26], #0x2\n"
+ "ld1 { v28.h }[4], [x25], #0x2\n"
+ "tbz x27, #0, 33f\n"
+ "ld1 { v31.b }[10], [x26]\n"
+ "ld1 { v28.b }[10], [x25]\n"
"b 33f\n"
"28:" // Height 2: Multiply loop: Ragged operand read: partial_1_8
- "tbz x28, #0, 33f\n"
- "ld1 { v31.b }[8], [x27]\n"
- "ld1 { v28.b }[8], [x26]\n"
+ "tbz x27, #0, 33f\n"
+ "ld1 { v31.b }[8], [x26]\n"
+ "ld1 { v28.b }[8], [x25]\n"
"b 33f\n"
"29:" // Height 2: Multiply loop: Ragged operand read: partial_4_0
- "tbz x28, #2, 31f\n"
- "ldr s31, [x27], #0x4\n"
- "ldr s28, [x26], #0x4\n"
- "tbz x28, #1, 30f\n"
- "ld1 { v31.h }[2], [x27], #0x2\n"
- "ld1 { v28.h }[2], [x26], #0x2\n"
- "tbz x28, #0, 33f\n"
- "ld1 { v31.b }[6], [x27]\n"
- "ld1 { v28.b }[6], [x26]\n"
+ "tbz x27, #2, 31f\n"
+ "ldr s31, [x26], #0x4\n"
+ "ldr s28, [x25], #0x4\n"
+ "tbz x27, #1, 30f\n"
+ "ld1 { v31.h }[2], [x26], #0x2\n"
+ "ld1 { v28.h }[2], [x25], #0x2\n"
+ "tbz x27, #0, 33f\n"
+ "ld1 { v31.b }[6], [x26]\n"
+ "ld1 { v28.b }[6], [x25]\n"
"b 33f\n"
"30:" // Height 2: Multiply loop: Ragged operand read: partial_1_4
- "tbz x28, #0, 33f\n"
- "ld1 { v31.b }[4], [x27]\n"
- "ld1 { v28.b }[4], [x26]\n"
+ "tbz x27, #0, 33f\n"
+ "ld1 { v31.b }[4], [x26]\n"
+ "ld1 { v28.b }[4], [x25]\n"
"b 33f\n"
"31:" // Height 2: Multiply loop: Ragged operand read: partial_2_0
- "tbz x28, #1, 32f\n"
- "ldr h31, [x27], #0x2\n"
- "ldr h28, [x26], #0x2\n"
- "tbz x28, #0, 33f\n"
- "ld1 { v31.b }[2], [x27]\n"
- "ld1 { v28.b }[2], [x26]\n"
+ "tbz x27, #1, 32f\n"
+ "ldr h31, [x26], #0x2\n"
+ "ldr h28, [x25], #0x2\n"
+ "tbz x27, #0, 33f\n"
+ "ld1 { v31.b }[2], [x26]\n"
+ "ld1 { v28.b }[2], [x25]\n"
"b 33f\n"
"32:" // Height 2: Multiply loop: Ragged operand read: partial_1_0
- "ldr b31, [x27, #0x0]\n"
- "ldr b28, [x26, #0x0]\n"
+ "ldr b31, [x26, #0x0]\n"
+ "ldr b28, [x25, #0x0]\n"
"33:" // Height 2: Multiply loop: Ragged operand read: Done
"uadalp v1.8h, v31.16b\n"
"uadalp v30.8h, v28.16b\n"
"34:" // Height 2: Multiply loop: No odd multiplies
- "add x9, x9, #0x1\n"
- "cmp x9, x21\n"
+ "add x28, x28, #0x1\n"
+ "cmp x28, x20\n"
"bne 19b\n"
"uadalp v0.4s, v1.8h\n"
"uadalp v29.4s, v30.8h\n"
@@ -306,354 +306,354 @@ void row_sums_indirect(
"b 104f\n"
"35:" // Height 3
"movi v1.8h, #0x0\n"
+ "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "mov x9, #0x0\n"
"movi v0.4s, #0x0\n"
- "mov x10, #0x0\n"
- "ldr w21, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "mov x28, #0x0\n"
"movi v30.8h, #0x0\n"
"movi v29.4s, #0x0\n"
- "mov x9, #0x0\n"
"movi v27.8h, #0x0\n"
"movi v26.4s, #0x0\n"
"36:" // Height 3: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w28, [x20, x9, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w27, [x19, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 37f\n"
- "ldr x20, [%x[input_ptr], x9, LSL #0x3]\n"
- "add x20, x20, %x[input_offset], LSL #3\n"
- "ldr x27, [x20, #0x0]\n"
- "ldr x26, [x20, #0x8]\n"
- "ldr x25, [x20, #0x10]\n"
- "cbnz x9, 38f\n"
- "ldr w20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x27, x27, x20\n"
- "add x26, x26, x20\n"
- "add x25, x25, x20\n"
+ "ldr x19, [%x[input_ptr], x28, LSL #0x3]\n"
+ "add x19, x19, %x[input_offset], LSL #3\n"
+ "ldr x26, [x19, #0x0]\n"
+ "ldr x25, [x19, #0x8]\n"
+ "ldr x24, [x19, #0x10]\n"
+ "cbnz x28, 38f\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x26, x26, x19\n"
+ "add x25, x25, x19\n"
+ "add x24, x24, x19\n"
"b 38f\n"
"37:" // Height 3: setup direct input
- "mov x27, %x[input_ptr]\n"
- "add x26, x27, %x[input_offset]\n"
+ "mov x26, %x[input_ptr]\n"
"add x25, x26, %x[input_offset]\n"
+ "add x24, x25, %x[input_offset]\n"
"38:" // Height 3: input setup done
- "cmp x28, #0x10\n"
+ "cmp x27, #0x10\n"
"blt 42f\n"
- "cmp x28, #0x20\n"
+ "cmp x27, #0x20\n"
"blt 41f\n"
"39:" // Height 3: Multiply loop: Main loop head
- "ldr q31, [x27, #0x0]\n"
- "ldr q28, [x26, #0x0]\n"
- "cmp x10, #0x7e\n"
- "add x27, x27, #0x10\n"
- "ldr q25, [x25, #0x0]\n"
+ "ldr q31, [x26, #0x0]\n"
+ "ldr q28, [x25, #0x0]\n"
+ "ldr q25, [x24, #0x0]\n"
+ "cmp x9, #0x7e\n"
"add x26, x26, #0x10\n"
"add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
"blt 40f\n"
"uadalp v0.4s, v1.8h\n"
"movi v1.8h, #0x0\n"
- "mov x10, #0x0\n"
"uadalp v29.4s, v30.8h\n"
"movi v30.8h, #0x0\n"
"uadalp v26.4s, v27.8h\n"
"movi v27.8h, #0x0\n"
+ "mov x9, #0x0\n"
"40:" // Height 3: Multiply loop: unique 3: no collapse
- "sub x28, x28, #0x10\n"
- "cmp x28, #0x20\n"
"uadalp v1.8h, v31.16b\n"
"uadalp v30.8h, v28.16b\n"
"uadalp v27.8h, v25.16b\n"
- "add x10, x10, #0x1\n"
+ "add x9, x9, #0x1\n"
+ "sub x27, x27, #0x10\n"
+ "cmp x27, #0x20\n"
"bge 39b\n"
"41:" // Height 3: Multiply loop: Single iteration only
- "ldr q31, [x27, #0x0]\n"
- "ldr q28, [x26, #0x0]\n"
- "sub x28, x28, #0x10\n"
+ "sub x27, x27, #0x10\n"
+ "ldr q31, [x26, #0x0]\n"
+ "ldr q28, [x25, #0x0]\n"
+ "ldr q25, [x24, #0x0]\n"
+ "add x26, x26, #0x10\n"
+ "add x25, x25, #0x10\n"
"uadalp v1.8h, v31.16b\n"
- "ldr q25, [x25, #0x0]\n"
"uadalp v30.8h, v28.16b\n"
"uadalp v27.8h, v25.16b\n"
- "add x27, x27, #0x10\n"
- "add x26, x26, #0x10\n"
- "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
"42:" // Height 3: Multiply loop: Main loop skip
- "cbz x28, 51f\n"
- "tbz x28, #3, 46f\n"
- "ldr d31, [x27], #0x8\n"
- "ldr d28, [x26], #0x8\n"
- "ldr d25, [x25], #0x8\n"
- "tbz x28, #2, 44f\n"
- "ld1 { v31.s }[2], [x27], #0x4\n"
- "ld1 { v28.s }[2], [x26], #0x4\n"
- "ld1 { v25.s }[2], [x25], #0x4\n"
- "tbz x28, #1, 43f\n"
- "ld1 { v31.h }[6], [x27], #0x2\n"
- "ld1 { v28.h }[6], [x26], #0x2\n"
- "ld1 { v25.h }[6], [x25], #0x2\n"
- "tbz x28, #0, 50f\n"
- "ld1 { v31.b }[14], [x27]\n"
- "ld1 { v28.b }[14], [x26]\n"
- "ld1 { v25.b }[14], [x25]\n"
+ "cbz x27, 51f\n"
+ "tbz x27, #3, 46f\n"
+ "ldr d31, [x26], #0x8\n"
+ "ldr d28, [x25], #0x8\n"
+ "ldr d25, [x24], #0x8\n"
+ "tbz x27, #2, 44f\n"
+ "ld1 { v31.s }[2], [x26], #0x4\n"
+ "ld1 { v28.s }[2], [x25], #0x4\n"
+ "ld1 { v25.s }[2], [x24], #0x4\n"
+ "tbz x27, #1, 43f\n"
+ "ld1 { v31.h }[6], [x26], #0x2\n"
+ "ld1 { v28.h }[6], [x25], #0x2\n"
+ "ld1 { v25.h }[6], [x24], #0x2\n"
+ "tbz x27, #0, 50f\n"
+ "ld1 { v31.b }[14], [x26]\n"
+ "ld1 { v28.b }[14], [x25]\n"
+ "ld1 { v25.b }[14], [x24]\n"
"b 50f\n"
"43:" // Height 3: Multiply loop: Ragged operand read: partial_1_12
- "tbz x28, #0, 50f\n"
- "ld1 { v31.b }[12], [x27]\n"
- "ld1 { v28.b }[12], [x26]\n"
- "ld1 { v25.b }[12], [x25]\n"
+ "tbz x27, #0, 50f\n"
+ "ld1 { v31.b }[12], [x26]\n"
+ "ld1 { v28.b }[12], [x25]\n"
+ "ld1 { v25.b }[12], [x24]\n"
"b 50f\n"
"44:" // Height 3: Multiply loop: Ragged operand read: partial_2_8
- "tbz x28, #1, 45f\n"
- "ld1 { v31.h }[4], [x27], #0x2\n"
- "ld1 { v28.h }[4], [x26], #0x2\n"
- "ld1 { v25.h }[4], [x25], #0x2\n"
- "tbz x28, #0, 50f\n"
- "ld1 { v31.b }[10], [x27]\n"
- "ld1 { v28.b }[10], [x26]\n"
- "ld1 { v25.b }[10], [x25]\n"
+ "tbz x27, #1, 45f\n"
+ "ld1 { v31.h }[4], [x26], #0x2\n"
+ "ld1 { v28.h }[4], [x25], #0x2\n"
+ "ld1 { v25.h }[4], [x24], #0x2\n"
+ "tbz x27, #0, 50f\n"
+ "ld1 { v31.b }[10], [x26]\n"
+ "ld1 { v28.b }[10], [x25]\n"
+ "ld1 { v25.b }[10], [x24]\n"
"b 50f\n"
"45:" // Height 3: Multiply loop: Ragged operand read: partial_1_8
- "tbz x28, #0, 50f\n"
- "ld1 { v31.b }[8], [x27]\n"
- "ld1 { v28.b }[8], [x26]\n"
- "ld1 { v25.b }[8], [x25]\n"
+ "tbz x27, #0, 50f\n"
+ "ld1 { v31.b }[8], [x26]\n"
+ "ld1 { v28.b }[8], [x25]\n"
+ "ld1 { v25.b }[8], [x24]\n"
"b 50f\n"
"46:" // Height 3: Multiply loop: Ragged operand read: partial_4_0
- "tbz x28, #2, 48f\n"
- "ldr s31, [x27], #0x4\n"
- "ldr s28, [x26], #0x4\n"
- "ldr s25, [x25], #0x4\n"
- "tbz x28, #1, 47f\n"
- "ld1 { v31.h }[2], [x27], #0x2\n"
- "ld1 { v28.h }[2], [x26], #0x2\n"
- "ld1 { v25.h }[2], [x25], #0x2\n"
- "tbz x28, #0, 50f\n"
- "ld1 { v31.b }[6], [x27]\n"
- "ld1 { v28.b }[6], [x26]\n"
- "ld1 { v25.b }[6], [x25]\n"
+ "tbz x27, #2, 48f\n"
+ "ldr s31, [x26], #0x4\n"
+ "ldr s28, [x25], #0x4\n"
+ "ldr s25, [x24], #0x4\n"
+ "tbz x27, #1, 47f\n"
+ "ld1 { v31.h }[2], [x26], #0x2\n"
+ "ld1 { v28.h }[2], [x25], #0x2\n"
+ "ld1 { v25.h }[2], [x24], #0x2\n"
+ "tbz x27, #0, 50f\n"
+ "ld1 { v31.b }[6], [x26]\n"
+ "ld1 { v28.b }[6], [x25]\n"
+ "ld1 { v25.b }[6], [x24]\n"
"b 50f\n"
"47:" // Height 3: Multiply loop: Ragged operand read: partial_1_4
- "tbz x28, #0, 50f\n"
- "ld1 { v31.b }[4], [x27]\n"
- "ld1 { v28.b }[4], [x26]\n"
- "ld1 { v25.b }[4], [x25]\n"
+ "tbz x27, #0, 50f\n"
+ "ld1 { v31.b }[4], [x26]\n"
+ "ld1 { v28.b }[4], [x25]\n"
+ "ld1 { v25.b }[4], [x24]\n"
"b 50f\n"
"48:" // Height 3: Multiply loop: Ragged operand read: partial_2_0
- "tbz x28, #1, 49f\n"
- "ldr h31, [x27], #0x2\n"
- "ldr h28, [x26], #0x2\n"
- "ldr h25, [x25], #0x2\n"
- "tbz x28, #0, 50f\n"
- "ld1 { v31.b }[2], [x27]\n"
- "ld1 { v28.b }[2], [x26]\n"
- "ld1 { v25.b }[2], [x25]\n"
+ "tbz x27, #1, 49f\n"
+ "ldr h31, [x26], #0x2\n"
+ "ldr h28, [x25], #0x2\n"
+ "ldr h25, [x24], #0x2\n"
+ "tbz x27, #0, 50f\n"
+ "ld1 { v31.b }[2], [x26]\n"
+ "ld1 { v28.b }[2], [x25]\n"
+ "ld1 { v25.b }[2], [x24]\n"
"b 50f\n"
"49:" // Height 3: Multiply loop: Ragged operand read: partial_1_0
- "ldr b31, [x27, #0x0]\n"
- "ldr b28, [x26, #0x0]\n"
- "ldr b25, [x25, #0x0]\n"
+ "ldr b31, [x26, #0x0]\n"
+ "ldr b28, [x25, #0x0]\n"
+ "ldr b25, [x24, #0x0]\n"
"50:" // Height 3: Multiply loop: Ragged operand read: Done
"uadalp v1.8h, v31.16b\n"
"uadalp v30.8h, v28.16b\n"
"uadalp v27.8h, v25.16b\n"
"51:" // Height 3: Multiply loop: No odd multiplies
- "add x9, x9, #0x1\n"
- "cmp x9, x21\n"
+ "add x28, x28, #0x1\n"
+ "cmp x28, x20\n"
"bne 36b\n"
"uadalp v0.4s, v1.8h\n"
"uadalp v29.4s, v30.8h\n"
- "uadalp v26.4s, v27.8h\n"
"addp v0.4s, v0.4s, v29.4s\n"
- "addp v26.4s, v26.4s, v26.4s\n"
+ "uadalp v26.4s, v27.8h\n"
"addp v0.4s, v0.4s, v0.4s\n"
"addp v26.4s, v26.4s, v26.4s\n"
"mul v0.4s, v0.4s, v2.4s\n"
"str d0, [%x[out_ptr]], #0x8\n"
+ "addp v26.4s, v26.4s, v26.4s\n"
"mul v26.4s, v26.4s, v2.4s\n"
"str s26, [%x[out_ptr]], #0x4\n"
"b 104f\n"
"52:" // Height 4
"movi v1.8h, #0x0\n"
+ "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "mov x9, #0x0\n"
"movi v0.4s, #0x0\n"
- "mov x10, #0x0\n"
- "ldr w21, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "mov x28, #0x0\n"
"movi v30.8h, #0x0\n"
"movi v29.4s, #0x0\n"
- "mov x9, #0x0\n"
"movi v27.8h, #0x0\n"
"movi v26.4s, #0x0\n"
"movi v24.8h, #0x0\n"
"movi v23.4s, #0x0\n"
"53:" // Height 4: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w28, [x20, x9, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w27, [x19, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 54f\n"
- "ldr x20, [%x[input_ptr], x9, LSL #0x3]\n"
- "add x20, x20, %x[input_offset], LSL #3\n"
- "ldr x27, [x20, #0x0]\n"
- "ldr x26, [x20, #0x8]\n"
- "ldr x25, [x20, #0x10]\n"
- "ldr x24, [x20, #0x18]\n"
- "cbnz x9, 55f\n"
- "ldr w20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x27, x27, x20\n"
- "add x26, x26, x20\n"
- "add x25, x25, x20\n"
- "add x24, x24, x20\n"
+ "ldr x19, [%x[input_ptr], x28, LSL #0x3]\n"
+ "add x19, x19, %x[input_offset], LSL #3\n"
+ "ldr x26, [x19, #0x0]\n"
+ "ldr x25, [x19, #0x8]\n"
+ "ldr x24, [x19, #0x10]\n"
+ "ldr x23, [x19, #0x18]\n"
+ "cbnz x28, 55f\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x26, x26, x19\n"
+ "add x25, x25, x19\n"
+ "add x24, x24, x19\n"
+ "add x23, x23, x19\n"
"b 55f\n"
"54:" // Height 4: setup direct input
- "mov x27, %x[input_ptr]\n"
- "add x26, x27, %x[input_offset]\n"
+ "mov x26, %x[input_ptr]\n"
"add x25, x26, %x[input_offset]\n"
"add x24, x25, %x[input_offset]\n"
+ "add x23, x24, %x[input_offset]\n"
"55:" // Height 4: input setup done
- "cmp x28, #0x10\n"
+ "cmp x27, #0x10\n"
"blt 59f\n"
- "cmp x28, #0x20\n"
+ "cmp x27, #0x20\n"
"blt 58f\n"
"56:" // Height 4: Multiply loop: Main loop head
- "ldr q31, [x27, #0x0]\n"
- "ldr q28, [x26, #0x0]\n"
- "cmp x10, #0x7e\n"
- "add x27, x27, #0x10\n"
- "ldr q25, [x25, #0x0]\n"
- "ldr q22, [x24, #0x0]\n"
+ "ldr q31, [x26, #0x0]\n"
+ "ldr q28, [x25, #0x0]\n"
+ "ldr q25, [x24, #0x0]\n"
+ "ldr q22, [x23, #0x0]\n"
+ "cmp x9, #0x7e\n"
"add x26, x26, #0x10\n"
"add x25, x25, #0x10\n"
"add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
"blt 57f\n"
"uadalp v0.4s, v1.8h\n"
"movi v1.8h, #0x0\n"
- "mov x10, #0x0\n"
"uadalp v29.4s, v30.8h\n"
"movi v30.8h, #0x0\n"
"uadalp v26.4s, v27.8h\n"
"movi v27.8h, #0x0\n"
"uadalp v23.4s, v24.8h\n"
"movi v24.8h, #0x0\n"
+ "mov x9, #0x0\n"
"57:" // Height 4: Multiply loop: unique 4: no collapse
- "sub x28, x28, #0x10\n"
- "cmp x28, #0x20\n"
"uadalp v1.8h, v31.16b\n"
"uadalp v30.8h, v28.16b\n"
"uadalp v27.8h, v25.16b\n"
"uadalp v24.8h, v22.16b\n"
- "add x10, x10, #0x1\n"
+ "add x9, x9, #0x1\n"
+ "sub x27, x27, #0x10\n"
+ "cmp x27, #0x20\n"
"bge 56b\n"
"58:" // Height 4: Multiply loop: Single iteration only
- "ldr q31, [x27, #0x0]\n"
- "ldr q28, [x26, #0x0]\n"
- "sub x28, x28, #0x10\n"
+ "sub x27, x27, #0x10\n"
+ "ldr q31, [x26, #0x0]\n"
+ "ldr q28, [x25, #0x0]\n"
+ "ldr q25, [x24, #0x0]\n"
+ "ldr q22, [x23, #0x0]\n"
+ "add x26, x26, #0x10\n"
"uadalp v1.8h, v31.16b\n"
- "ldr q25, [x25, #0x0]\n"
- "ldr q22, [x24, #0x0]\n"
"uadalp v30.8h, v28.16b\n"
"uadalp v27.8h, v25.16b\n"
"uadalp v24.8h, v22.16b\n"
- "add x27, x27, #0x10\n"
- "add x26, x26, #0x10\n"
"add x25, x25, #0x10\n"
"add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
"59:" // Height 4: Multiply loop: Main loop skip
- "cbz x28, 68f\n"
- "tbz x28, #3, 63f\n"
- "ldr d31, [x27], #0x8\n"
- "ldr d28, [x26], #0x8\n"
- "ldr d25, [x25], #0x8\n"
- "ldr d22, [x24], #0x8\n"
- "tbz x28, #2, 61f\n"
- "ld1 { v31.s }[2], [x27], #0x4\n"
- "ld1 { v28.s }[2], [x26], #0x4\n"
- "ld1 { v25.s }[2], [x25], #0x4\n"
- "ld1 { v22.s }[2], [x24], #0x4\n"
- "tbz x28, #1, 60f\n"
- "ld1 { v31.h }[6], [x27], #0x2\n"
- "ld1 { v28.h }[6], [x26], #0x2\n"
- "ld1 { v25.h }[6], [x25], #0x2\n"
- "ld1 { v22.h }[6], [x24], #0x2\n"
- "tbz x28, #0, 67f\n"
- "ld1 { v31.b }[14], [x27]\n"
- "ld1 { v28.b }[14], [x26]\n"
- "ld1 { v25.b }[14], [x25]\n"
- "ld1 { v22.b }[14], [x24]\n"
+ "cbz x27, 68f\n"
+ "tbz x27, #3, 63f\n"
+ "ldr d31, [x26], #0x8\n"
+ "ldr d28, [x25], #0x8\n"
+ "ldr d25, [x24], #0x8\n"
+ "ldr d22, [x23], #0x8\n"
+ "tbz x27, #2, 61f\n"
+ "ld1 { v31.s }[2], [x26], #0x4\n"
+ "ld1 { v28.s }[2], [x25], #0x4\n"
+ "ld1 { v25.s }[2], [x24], #0x4\n"
+ "ld1 { v22.s }[2], [x23], #0x4\n"
+ "tbz x27, #1, 60f\n"
+ "ld1 { v31.h }[6], [x26], #0x2\n"
+ "ld1 { v28.h }[6], [x25], #0x2\n"
+ "ld1 { v25.h }[6], [x24], #0x2\n"
+ "ld1 { v22.h }[6], [x23], #0x2\n"
+ "tbz x27, #0, 67f\n"
+ "ld1 { v31.b }[14], [x26]\n"
+ "ld1 { v28.b }[14], [x25]\n"
+ "ld1 { v25.b }[14], [x24]\n"
+ "ld1 { v22.b }[14], [x23]\n"
"b 67f\n"
"60:" // Height 4: Multiply loop: Ragged operand read: partial_1_12
- "tbz x28, #0, 67f\n"
- "ld1 { v31.b }[12], [x27]\n"
- "ld1 { v28.b }[12], [x26]\n"
- "ld1 { v25.b }[12], [x25]\n"
- "ld1 { v22.b }[12], [x24]\n"
+ "tbz x27, #0, 67f\n"
+ "ld1 { v31.b }[12], [x26]\n"
+ "ld1 { v28.b }[12], [x25]\n"
+ "ld1 { v25.b }[12], [x24]\n"
+ "ld1 { v22.b }[12], [x23]\n"
"b 67f\n"
"61:" // Height 4: Multiply loop: Ragged operand read: partial_2_8
- "tbz x28, #1, 62f\n"
- "ld1 { v31.h }[4], [x27], #0x2\n"
- "ld1 { v28.h }[4], [x26], #0x2\n"
- "ld1 { v25.h }[4], [x25], #0x2\n"
- "ld1 { v22.h }[4], [x24], #0x2\n"
- "tbz x28, #0, 67f\n"
- "ld1 { v31.b }[10], [x27]\n"
- "ld1 { v28.b }[10], [x26]\n"
- "ld1 { v25.b }[10], [x25]\n"
- "ld1 { v22.b }[10], [x24]\n"
+ "tbz x27, #1, 62f\n"
+ "ld1 { v31.h }[4], [x26], #0x2\n"
+ "ld1 { v28.h }[4], [x25], #0x2\n"
+ "ld1 { v25.h }[4], [x24], #0x2\n"
+ "ld1 { v22.h }[4], [x23], #0x2\n"
+ "tbz x27, #0, 67f\n"
+ "ld1 { v31.b }[10], [x26]\n"
+ "ld1 { v28.b }[10], [x25]\n"
+ "ld1 { v25.b }[10], [x24]\n"
+ "ld1 { v22.b }[10], [x23]\n"
"b 67f\n"
"62:" // Height 4: Multiply loop: Ragged operand read: partial_1_8
- "tbz x28, #0, 67f\n"
- "ld1 { v31.b }[8], [x27]\n"
- "ld1 { v28.b }[8], [x26]\n"
- "ld1 { v25.b }[8], [x25]\n"
- "ld1 { v22.b }[8], [x24]\n"
+ "tbz x27, #0, 67f\n"
+ "ld1 { v31.b }[8], [x26]\n"
+ "ld1 { v28.b }[8], [x25]\n"
+ "ld1 { v25.b }[8], [x24]\n"
+ "ld1 { v22.b }[8], [x23]\n"
"b 67f\n"
"63:" // Height 4: Multiply loop: Ragged operand read: partial_4_0
- "tbz x28, #2, 65f\n"
- "ldr s31, [x27], #0x4\n"
- "ldr s28, [x26], #0x4\n"
- "ldr s25, [x25], #0x4\n"
- "ldr s22, [x24], #0x4\n"
- "tbz x28, #1, 64f\n"
- "ld1 { v31.h }[2], [x27], #0x2\n"
- "ld1 { v28.h }[2], [x26], #0x2\n"
- "ld1 { v25.h }[2], [x25], #0x2\n"
- "ld1 { v22.h }[2], [x24], #0x2\n"
- "tbz x28, #0, 67f\n"
- "ld1 { v31.b }[6], [x27]\n"
- "ld1 { v28.b }[6], [x26]\n"
- "ld1 { v25.b }[6], [x25]\n"
- "ld1 { v22.b }[6], [x24]\n"
+ "tbz x27, #2, 65f\n"
+ "ldr s31, [x26], #0x4\n"
+ "ldr s28, [x25], #0x4\n"
+ "ldr s25, [x24], #0x4\n"
+ "ldr s22, [x23], #0x4\n"
+ "tbz x27, #1, 64f\n"
+ "ld1 { v31.h }[2], [x26], #0x2\n"
+ "ld1 { v28.h }[2], [x25], #0x2\n"
+ "ld1 { v25.h }[2], [x24], #0x2\n"
+ "ld1 { v22.h }[2], [x23], #0x2\n"
+ "tbz x27, #0, 67f\n"
+ "ld1 { v31.b }[6], [x26]\n"
+ "ld1 { v28.b }[6], [x25]\n"
+ "ld1 { v25.b }[6], [x24]\n"
+ "ld1 { v22.b }[6], [x23]\n"
"b 67f\n"
"64:" // Height 4: Multiply loop: Ragged operand read: partial_1_4
- "tbz x28, #0, 67f\n"
- "ld1 { v31.b }[4], [x27]\n"
- "ld1 { v28.b }[4], [x26]\n"
- "ld1 { v25.b }[4], [x25]\n"
- "ld1 { v22.b }[4], [x24]\n"
+ "tbz x27, #0, 67f\n"
+ "ld1 { v31.b }[4], [x26]\n"
+ "ld1 { v28.b }[4], [x25]\n"
+ "ld1 { v25.b }[4], [x24]\n"
+ "ld1 { v22.b }[4], [x23]\n"
"b 67f\n"
"65:" // Height 4: Multiply loop: Ragged operand read: partial_2_0
- "tbz x28, #1, 66f\n"
- "ldr h31, [x27], #0x2\n"
- "ldr h28, [x26], #0x2\n"
- "ldr h25, [x25], #0x2\n"
- "ldr h22, [x24], #0x2\n"
- "tbz x28, #0, 67f\n"
- "ld1 { v31.b }[2], [x27]\n"
- "ld1 { v28.b }[2], [x26]\n"
- "ld1 { v25.b }[2], [x25]\n"
- "ld1 { v22.b }[2], [x24]\n"
+ "tbz x27, #1, 66f\n"
+ "ldr h31, [x26], #0x2\n"
+ "ldr h28, [x25], #0x2\n"
+ "ldr h25, [x24], #0x2\n"
+ "ldr h22, [x23], #0x2\n"
+ "tbz x27, #0, 67f\n"
+ "ld1 { v31.b }[2], [x26]\n"
+ "ld1 { v28.b }[2], [x25]\n"
+ "ld1 { v25.b }[2], [x24]\n"
+ "ld1 { v22.b }[2], [x23]\n"
"b 67f\n"
"66:" // Height 4: Multiply loop: Ragged operand read: partial_1_0
- "ldr b31, [x27, #0x0]\n"
- "ldr b28, [x26, #0x0]\n"
- "ldr b25, [x25, #0x0]\n"
- "ldr b22, [x24, #0x0]\n"
+ "ldr b31, [x26, #0x0]\n"
+ "ldr b28, [x25, #0x0]\n"
+ "ldr b25, [x24, #0x0]\n"
+ "ldr b22, [x23, #0x0]\n"
"67:" // Height 4: Multiply loop: Ragged operand read: Done
"uadalp v1.8h, v31.16b\n"
"uadalp v30.8h, v28.16b\n"
"uadalp v27.8h, v25.16b\n"
"uadalp v24.8h, v22.16b\n"
"68:" // Height 4: Multiply loop: No odd multiplies
- "add x9, x9, #0x1\n"
- "cmp x9, x21\n"
+ "add x28, x28, #0x1\n"
+ "cmp x28, x20\n"
"bne 53b\n"
"uadalp v0.4s, v1.8h\n"
"uadalp v29.4s, v30.8h\n"
+ "addp v0.4s, v0.4s, v29.4s\n"
"uadalp v26.4s, v27.8h\n"
"uadalp v23.4s, v24.8h\n"
- "addp v0.4s, v0.4s, v29.4s\n"
"addp v29.4s, v26.4s, v23.4s\n"
"addp v0.4s, v0.4s, v29.4s\n"
"mul v0.4s, v0.4s, v2.4s\n"
@@ -661,12 +661,12 @@ void row_sums_indirect(
"b 104f\n"
"69:" // Height 5
"movi v1.8h, #0x0\n"
+ "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "mov x9, #0x0\n"
"movi v0.4s, #0x0\n"
- "mov x10, #0x0\n"
- "ldr w21, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "mov x28, #0x0\n"
"movi v30.8h, #0x0\n"
"movi v29.4s, #0x0\n"
- "mov x9, #0x0\n"
"movi v27.8h, #0x0\n"
"movi v26.4s, #0x0\n"
"movi v24.8h, #0x0\n"
@@ -674,51 +674,50 @@ void row_sums_indirect(
"movi v21.8h, #0x0\n"
"movi v20.4s, #0x0\n"
"70:" // Height 5: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w28, [x20, x9, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w27, [x19, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 71f\n"
- "ldr x20, [%x[input_ptr], x9, LSL #0x3]\n"
- "add x20, x20, %x[input_offset], LSL #3\n"
- "ldr x27, [x20, #0x0]\n"
- "ldr x26, [x20, #0x8]\n"
- "ldr x25, [x20, #0x10]\n"
- "ldr x24, [x20, #0x18]\n"
- "ldr x23, [x20, #0x20]\n"
- "cbnz x9, 72f\n"
- "ldr w20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x27, x27, x20\n"
- "add x26, x26, x20\n"
- "add x25, x25, x20\n"
- "add x24, x24, x20\n"
- "add x23, x23, x20\n"
+ "ldr x19, [%x[input_ptr], x28, LSL #0x3]\n"
+ "add x19, x19, %x[input_offset], LSL #3\n"
+ "ldr x26, [x19, #0x0]\n"
+ "ldr x25, [x19, #0x8]\n"
+ "ldr x24, [x19, #0x10]\n"
+ "ldr x23, [x19, #0x18]\n"
+ "ldr x22, [x19, #0x20]\n"
+ "cbnz x28, 72f\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x26, x26, x19\n"
+ "add x25, x25, x19\n"
+ "add x24, x24, x19\n"
+ "add x23, x23, x19\n"
+ "add x22, x22, x19\n"
"b 72f\n"
"71:" // Height 5: setup direct input
- "mov x27, %x[input_ptr]\n"
- "add x26, x27, %x[input_offset]\n"
+ "mov x26, %x[input_ptr]\n"
"add x25, x26, %x[input_offset]\n"
"add x24, x25, %x[input_offset]\n"
"add x23, x24, %x[input_offset]\n"
+ "add x22, x23, %x[input_offset]\n"
"72:" // Height 5: input setup done
- "cmp x28, #0x10\n"
+ "cmp x27, #0x10\n"
"blt 76f\n"
- "cmp x28, #0x20\n"
+ "cmp x27, #0x20\n"
"blt 75f\n"
"73:" // Height 5: Multiply loop: Main loop head
- "ldr q31, [x27, #0x0]\n"
- "ldr q28, [x26, #0x0]\n"
- "cmp x10, #0x7e\n"
- "add x27, x27, #0x10\n"
- "ldr q25, [x25, #0x0]\n"
- "ldr q22, [x24, #0x0]\n"
+ "ldr q31, [x26, #0x0]\n"
+ "ldr q28, [x25, #0x0]\n"
+ "ldr q25, [x24, #0x0]\n"
+ "ldr q22, [x23, #0x0]\n"
+ "ldr q19, [x22, #0x0]\n"
+ "cmp x9, #0x7e\n"
"add x26, x26, #0x10\n"
"add x25, x25, #0x10\n"
- "ldr q19, [x23, #0x0]\n"
"add x24, x24, #0x10\n"
"add x23, x23, #0x10\n"
+ "add x22, x22, #0x10\n"
"blt 74f\n"
"uadalp v0.4s, v1.8h\n"
"movi v1.8h, #0x0\n"
- "mov x10, #0x0\n"
"uadalp v29.4s, v30.8h\n"
"movi v30.8h, #0x0\n"
"uadalp v26.4s, v27.8h\n"
@@ -727,138 +726,139 @@ void row_sums_indirect(
"movi v24.8h, #0x0\n"
"uadalp v20.4s, v21.8h\n"
"movi v21.8h, #0x0\n"
+ "mov x9, #0x0\n"
"74:" // Height 5: Multiply loop: unique 5: no collapse
- "sub x28, x28, #0x10\n"
- "cmp x28, #0x20\n"
"uadalp v1.8h, v31.16b\n"
"uadalp v30.8h, v28.16b\n"
"uadalp v27.8h, v25.16b\n"
"uadalp v24.8h, v22.16b\n"
- "add x10, x10, #0x1\n"
"uadalp v21.8h, v19.16b\n"
+ "add x9, x9, #0x1\n"
+ "sub x27, x27, #0x10\n"
+ "cmp x27, #0x20\n"
"bge 73b\n"
"75:" // Height 5: Multiply loop: Single iteration only
- "ldr q31, [x27, #0x0]\n"
- "ldr q28, [x26, #0x0]\n"
- "sub x28, x28, #0x10\n"
+ "sub x27, x27, #0x10\n"
+ "ldr q31, [x26, #0x0]\n"
+ "ldr q28, [x25, #0x0]\n"
+ "ldr q25, [x24, #0x0]\n"
+ "ldr q22, [x23, #0x0]\n"
+ "ldr q19, [x22, #0x0]\n"
"uadalp v1.8h, v31.16b\n"
- "ldr q25, [x25, #0x0]\n"
- "ldr q22, [x24, #0x0]\n"
"uadalp v30.8h, v28.16b\n"
"uadalp v27.8h, v25.16b\n"
- "ldr q19, [x23, #0x0]\n"
"uadalp v24.8h, v22.16b\n"
"uadalp v21.8h, v19.16b\n"
- "add x27, x27, #0x10\n"
"add x26, x26, #0x10\n"
"add x25, x25, #0x10\n"
"add x24, x24, #0x10\n"
"add x23, x23, #0x10\n"
+ "add x22, x22, #0x10\n"
"76:" // Height 5: Multiply loop: Main loop skip
- "cbz x28, 85f\n"
- "tbz x28, #3, 80f\n"
- "ldr d31, [x27], #0x8\n"
- "ldr d28, [x26], #0x8\n"
- "ldr d25, [x25], #0x8\n"
- "ldr d22, [x24], #0x8\n"
- "ldr d19, [x23], #0x8\n"
- "tbz x28, #2, 78f\n"
- "ld1 { v31.s }[2], [x27], #0x4\n"
- "ld1 { v28.s }[2], [x26], #0x4\n"
- "ld1 { v25.s }[2], [x25], #0x4\n"
- "ld1 { v22.s }[2], [x24], #0x4\n"
- "ld1 { v19.s }[2], [x23], #0x4\n"
- "tbz x28, #1, 77f\n"
- "ld1 { v31.h }[6], [x27], #0x2\n"
- "ld1 { v28.h }[6], [x26], #0x2\n"
- "ld1 { v25.h }[6], [x25], #0x2\n"
- "ld1 { v22.h }[6], [x24], #0x2\n"
- "ld1 { v19.h }[6], [x23], #0x2\n"
- "tbz x28, #0, 84f\n"
- "ld1 { v31.b }[14], [x27]\n"
- "ld1 { v28.b }[14], [x26]\n"
- "ld1 { v25.b }[14], [x25]\n"
- "ld1 { v22.b }[14], [x24]\n"
- "ld1 { v19.b }[14], [x23]\n"
+ "cbz x27, 85f\n"
+ "tbz x27, #3, 80f\n"
+ "ldr d31, [x26], #0x8\n"
+ "ldr d28, [x25], #0x8\n"
+ "ldr d25, [x24], #0x8\n"
+ "ldr d22, [x23], #0x8\n"
+ "ldr d19, [x22], #0x8\n"
+ "tbz x27, #2, 78f\n"
+ "ld1 { v31.s }[2], [x26], #0x4\n"
+ "ld1 { v28.s }[2], [x25], #0x4\n"
+ "ld1 { v25.s }[2], [x24], #0x4\n"
+ "ld1 { v22.s }[2], [x23], #0x4\n"
+ "ld1 { v19.s }[2], [x22], #0x4\n"
+ "tbz x27, #1, 77f\n"
+ "ld1 { v31.h }[6], [x26], #0x2\n"
+ "ld1 { v28.h }[6], [x25], #0x2\n"
+ "ld1 { v25.h }[6], [x24], #0x2\n"
+ "ld1 { v22.h }[6], [x23], #0x2\n"
+ "ld1 { v19.h }[6], [x22], #0x2\n"
+ "tbz x27, #0, 84f\n"
+ "ld1 { v31.b }[14], [x26]\n"
+ "ld1 { v28.b }[14], [x25]\n"
+ "ld1 { v25.b }[14], [x24]\n"
+ "ld1 { v22.b }[14], [x23]\n"
+ "ld1 { v19.b }[14], [x22]\n"
"b 84f\n"
"77:" // Height 5: Multiply loop: Ragged operand read: partial_1_12
- "tbz x28, #0, 84f\n"
- "ld1 { v31.b }[12], [x27]\n"
- "ld1 { v28.b }[12], [x26]\n"
- "ld1 { v25.b }[12], [x25]\n"
- "ld1 { v22.b }[12], [x24]\n"
- "ld1 { v19.b }[12], [x23]\n"
+ "tbz x27, #0, 84f\n"
+ "ld1 { v31.b }[12], [x26]\n"
+ "ld1 { v28.b }[12], [x25]\n"
+ "ld1 { v25.b }[12], [x24]\n"
+ "ld1 { v22.b }[12], [x23]\n"
+ "ld1 { v19.b }[12], [x22]\n"
"b 84f\n"
"78:" // Height 5: Multiply loop: Ragged operand read: partial_2_8
- "tbz x28, #1, 79f\n"
- "ld1 { v31.h }[4], [x27], #0x2\n"
- "ld1 { v28.h }[4], [x26], #0x2\n"
- "ld1 { v25.h }[4], [x25], #0x2\n"
- "ld1 { v22.h }[4], [x24], #0x2\n"
- "ld1 { v19.h }[4], [x23], #0x2\n"
- "tbz x28, #0, 84f\n"
- "ld1 { v31.b }[10], [x27]\n"
- "ld1 { v28.b }[10], [x26]\n"
- "ld1 { v25.b }[10], [x25]\n"
- "ld1 { v22.b }[10], [x24]\n"
- "ld1 { v19.b }[10], [x23]\n"
+ "tbz x27, #1, 79f\n"
+ "ld1 { v31.h }[4], [x26], #0x2\n"
+ "ld1 { v28.h }[4], [x25], #0x2\n"
+ "ld1 { v25.h }[4], [x24], #0x2\n"
+ "ld1 { v22.h }[4], [x23], #0x2\n"
+ "ld1 { v19.h }[4], [x22], #0x2\n"
+ "tbz x27, #0, 84f\n"
+ "ld1 { v31.b }[10], [x26]\n"
+ "ld1 { v28.b }[10], [x25]\n"
+ "ld1 { v25.b }[10], [x24]\n"
+ "ld1 { v22.b }[10], [x23]\n"
+ "ld1 { v19.b }[10], [x22]\n"
"b 84f\n"
"79:" // Height 5: Multiply loop: Ragged operand read: partial_1_8
- "tbz x28, #0, 84f\n"
- "ld1 { v31.b }[8], [x27]\n"
- "ld1 { v28.b }[8], [x26]\n"
- "ld1 { v25.b }[8], [x25]\n"
- "ld1 { v22.b }[8], [x24]\n"
- "ld1 { v19.b }[8], [x23]\n"
+ "tbz x27, #0, 84f\n"
+ "ld1 { v31.b }[8], [x26]\n"
+ "ld1 { v28.b }[8], [x25]\n"
+ "ld1 { v25.b }[8], [x24]\n"
+ "ld1 { v22.b }[8], [x23]\n"
+ "ld1 { v19.b }[8], [x22]\n"
"b 84f\n"
"80:" // Height 5: Multiply loop: Ragged operand read: partial_4_0
- "tbz x28, #2, 82f\n"
- "ldr s31, [x27], #0x4\n"
- "ldr s28, [x26], #0x4\n"
- "ldr s25, [x25], #0x4\n"
- "ldr s22, [x24], #0x4\n"
- "ldr s19, [x23], #0x4\n"
- "tbz x28, #1, 81f\n"
- "ld1 { v31.h }[2], [x27], #0x2\n"
- "ld1 { v28.h }[2], [x26], #0x2\n"
- "ld1 { v25.h }[2], [x25], #0x2\n"
- "ld1 { v22.h }[2], [x24], #0x2\n"
- "ld1 { v19.h }[2], [x23], #0x2\n"
- "tbz x28, #0, 84f\n"
- "ld1 { v31.b }[6], [x27]\n"
- "ld1 { v28.b }[6], [x26]\n"
- "ld1 { v25.b }[6], [x25]\n"
- "ld1 { v22.b }[6], [x24]\n"
- "ld1 { v19.b }[6], [x23]\n"
+ "tbz x27, #2, 82f\n"
+ "ldr s31, [x26], #0x4\n"
+ "ldr s28, [x25], #0x4\n"
+ "ldr s25, [x24], #0x4\n"
+ "ldr s22, [x23], #0x4\n"
+ "ldr s19, [x22], #0x4\n"
+ "tbz x27, #1, 81f\n"
+ "ld1 { v31.h }[2], [x26], #0x2\n"
+ "ld1 { v28.h }[2], [x25], #0x2\n"
+ "ld1 { v25.h }[2], [x24], #0x2\n"
+ "ld1 { v22.h }[2], [x23], #0x2\n"
+ "ld1 { v19.h }[2], [x22], #0x2\n"
+ "tbz x27, #0, 84f\n"
+ "ld1 { v31.b }[6], [x26]\n"
+ "ld1 { v28.b }[6], [x25]\n"
+ "ld1 { v25.b }[6], [x24]\n"
+ "ld1 { v22.b }[6], [x23]\n"
+ "ld1 { v19.b }[6], [x22]\n"
"b 84f\n"
"81:" // Height 5: Multiply loop: Ragged operand read: partial_1_4
- "tbz x28, #0, 84f\n"
- "ld1 { v31.b }[4], [x27]\n"
- "ld1 { v28.b }[4], [x26]\n"
- "ld1 { v25.b }[4], [x25]\n"
- "ld1 { v22.b }[4], [x24]\n"
- "ld1 { v19.b }[4], [x23]\n"
+ "tbz x27, #0, 84f\n"
+ "ld1 { v31.b }[4], [x26]\n"
+ "ld1 { v28.b }[4], [x25]\n"
+ "ld1 { v25.b }[4], [x24]\n"
+ "ld1 { v22.b }[4], [x23]\n"
+ "ld1 { v19.b }[4], [x22]\n"
"b 84f\n"
"82:" // Height 5: Multiply loop: Ragged operand read: partial_2_0
- "tbz x28, #1, 83f\n"
- "ldr h31, [x27], #0x2\n"
- "ldr h28, [x26], #0x2\n"
- "ldr h25, [x25], #0x2\n"
- "ldr h22, [x24], #0x2\n"
- "ldr h19, [x23], #0x2\n"
- "tbz x28, #0, 84f\n"
- "ld1 { v31.b }[2], [x27]\n"
- "ld1 { v28.b }[2], [x26]\n"
- "ld1 { v25.b }[2], [x25]\n"
- "ld1 { v22.b }[2], [x24]\n"
- "ld1 { v19.b }[2], [x23]\n"
+ "tbz x27, #1, 83f\n"
+ "ldr h31, [x26], #0x2\n"
+ "ldr h28, [x25], #0x2\n"
+ "ldr h25, [x24], #0x2\n"
+ "ldr h22, [x23], #0x2\n"
+ "ldr h19, [x22], #0x2\n"
+ "tbz x27, #0, 84f\n"
+ "ld1 { v31.b }[2], [x26]\n"
+ "ld1 { v28.b }[2], [x25]\n"
+ "ld1 { v25.b }[2], [x24]\n"
+ "ld1 { v22.b }[2], [x23]\n"
+ "ld1 { v19.b }[2], [x22]\n"
"b 84f\n"
"83:" // Height 5: Multiply loop: Ragged operand read: partial_1_0
- "ldr b31, [x27, #0x0]\n"
- "ldr b28, [x26, #0x0]\n"
- "ldr b25, [x25, #0x0]\n"
- "ldr b22, [x24, #0x0]\n"
- "ldr b19, [x23, #0x0]\n"
+ "ldr b31, [x26, #0x0]\n"
+ "ldr b28, [x25, #0x0]\n"
+ "ldr b25, [x24, #0x0]\n"
+ "ldr b22, [x23, #0x0]\n"
+ "ldr b19, [x22, #0x0]\n"
"84:" // Height 5: Multiply loop: Ragged operand read: Done
"uadalp v1.8h, v31.16b\n"
"uadalp v30.8h, v28.16b\n"
@@ -866,32 +866,32 @@ void row_sums_indirect(
"uadalp v24.8h, v22.16b\n"
"uadalp v21.8h, v19.16b\n"
"85:" // Height 5: Multiply loop: No odd multiplies
- "add x9, x9, #0x1\n"
- "cmp x9, x21\n"
+ "add x28, x28, #0x1\n"
+ "cmp x28, x20\n"
"bne 70b\n"
"uadalp v0.4s, v1.8h\n"
"uadalp v29.4s, v30.8h\n"
+ "addp v0.4s, v0.4s, v29.4s\n"
"uadalp v26.4s, v27.8h\n"
"uadalp v23.4s, v24.8h\n"
- "uadalp v20.4s, v21.8h\n"
- "addp v0.4s, v0.4s, v29.4s\n"
"addp v29.4s, v26.4s, v23.4s\n"
- "addp v20.4s, v20.4s, v20.4s\n"
+ "uadalp v20.4s, v21.8h\n"
"addp v0.4s, v0.4s, v29.4s\n"
"addp v20.4s, v20.4s, v20.4s\n"
"mul v0.4s, v0.4s, v2.4s\n"
"st1 { v0.4s }, [%x[out_ptr]], #0x10\n"
+ "addp v20.4s, v20.4s, v20.4s\n"
"mul v20.4s, v20.4s, v2.4s\n"
"str s20, [%x[out_ptr]], #0x4\n"
"b 104f\n"
"86:" // Height 6
"movi v1.8h, #0x0\n"
+ "ldr w21, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "mov x9, #0x0\n"
"movi v0.4s, #0x0\n"
- "mov x10, #0x0\n"
- "ldr w22, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "mov x28, #0x0\n"
"movi v30.8h, #0x0\n"
"movi v29.4s, #0x0\n"
- "mov x9, #0x0\n"
"movi v27.8h, #0x0\n"
"movi v26.4s, #0x0\n"
"movi v24.8h, #0x0\n"
@@ -901,56 +901,55 @@ void row_sums_indirect(
"movi v18.8h, #0x0\n"
"movi v17.4s, #0x0\n"
"87:" // Height 6: String loop
- "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w28, [x20, x9, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w27, [x19, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 88f\n"
- "ldr x20, [%x[input_ptr], x9, LSL #0x3]\n"
- "add x20, x20, %x[input_offset], LSL #3\n"
- "ldr x27, [x20, #0x0]\n"
- "ldr x26, [x20, #0x8]\n"
- "ldr x25, [x20, #0x10]\n"
- "ldr x24, [x20, #0x18]\n"
- "ldr x23, [x20, #0x20]\n"
- "ldr x21, [x20, #0x28]\n"
- "cbnz x9, 89f\n"
- "ldr w20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x27, x27, x20\n"
- "add x26, x26, x20\n"
- "add x25, x25, x20\n"
- "add x24, x24, x20\n"
- "add x23, x23, x20\n"
- "add x21, x21, x20\n"
+ "ldr x19, [%x[input_ptr], x28, LSL #0x3]\n"
+ "add x19, x19, %x[input_offset], LSL #3\n"
+ "ldr x26, [x19, #0x0]\n"
+ "ldr x25, [x19, #0x8]\n"
+ "ldr x24, [x19, #0x10]\n"
+ "ldr x23, [x19, #0x18]\n"
+ "ldr x22, [x19, #0x20]\n"
+ "ldr x20, [x19, #0x28]\n"
+ "cbnz x28, 89f\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x26, x26, x19\n"
+ "add x25, x25, x19\n"
+ "add x24, x24, x19\n"
+ "add x23, x23, x19\n"
+ "add x22, x22, x19\n"
+ "add x20, x20, x19\n"
"b 89f\n"
"88:" // Height 6: setup direct input
- "mov x27, %x[input_ptr]\n"
- "add x26, x27, %x[input_offset]\n"
+ "mov x26, %x[input_ptr]\n"
"add x25, x26, %x[input_offset]\n"
"add x24, x25, %x[input_offset]\n"
"add x23, x24, %x[input_offset]\n"
- "add x21, x23, %x[input_offset]\n"
+ "add x22, x23, %x[input_offset]\n"
+ "add x20, x22, %x[input_offset]\n"
"89:" // Height 6: input setup done
- "cmp x28, #0x10\n"
+ "cmp x27, #0x10\n"
"blt 93f\n"
- "cmp x28, #0x20\n"
+ "cmp x27, #0x20\n"
"blt 92f\n"
"90:" // Height 6: Multiply loop: Main loop head
- "ldr q31, [x27, #0x0]\n"
- "ldr q28, [x26, #0x0]\n"
- "cmp x10, #0x7e\n"
- "add x27, x27, #0x10\n"
- "ldr q25, [x25, #0x0]\n"
- "ldr q22, [x24, #0x0]\n"
+ "ldr q31, [x26, #0x0]\n"
+ "ldr q28, [x25, #0x0]\n"
+ "ldr q25, [x24, #0x0]\n"
+ "ldr q22, [x23, #0x0]\n"
+ "ldr q19, [x22, #0x0]\n"
+ "ldr q16, [x20, #0x0]\n"
+ "cmp x9, #0x7e\n"
"add x26, x26, #0x10\n"
"add x25, x25, #0x10\n"
- "ldr q19, [x23, #0x0]\n"
- "ldr q16, [x21, #0x0]\n"
"add x24, x24, #0x10\n"
"add x23, x23, #0x10\n"
- "add x21, x21, #0x10\n"
+ "add x22, x22, #0x10\n"
+ "add x20, x20, #0x10\n"
"blt 91f\n"
"uadalp v0.4s, v1.8h\n"
"movi v1.8h, #0x0\n"
- "mov x10, #0x0\n"
"uadalp v29.4s, v30.8h\n"
"movi v30.8h, #0x0\n"
"uadalp v26.4s, v27.8h\n"
@@ -961,157 +960,158 @@ void row_sums_indirect(
"movi v21.8h, #0x0\n"
"uadalp v17.4s, v18.8h\n"
"movi v18.8h, #0x0\n"
+ "mov x9, #0x0\n"
"91:" // Height 6: Multiply loop: unique 6: no collapse
- "sub x28, x28, #0x10\n"
- "cmp x28, #0x20\n"
"uadalp v1.8h, v31.16b\n"
"uadalp v30.8h, v28.16b\n"
"uadalp v27.8h, v25.16b\n"
"uadalp v24.8h, v22.16b\n"
- "add x10, x10, #0x1\n"
"uadalp v21.8h, v19.16b\n"
"uadalp v18.8h, v16.16b\n"
+ "add x9, x9, #0x1\n"
+ "sub x27, x27, #0x10\n"
+ "cmp x27, #0x20\n"
"bge 90b\n"
"92:" // Height 6: Multiply loop: Single iteration only
- "ldr q31, [x27, #0x0]\n"
- "ldr q28, [x26, #0x0]\n"
- "sub x28, x28, #0x10\n"
+ "sub x27, x27, #0x10\n"
+ "ldr q31, [x26, #0x0]\n"
+ "ldr q28, [x25, #0x0]\n"
+ "ldr q25, [x24, #0x0]\n"
+ "ldr q22, [x23, #0x0]\n"
+ "ldr q19, [x22, #0x0]\n"
+ "ldr q16, [x20, #0x0]\n"
"uadalp v1.8h, v31.16b\n"
- "ldr q25, [x25, #0x0]\n"
- "ldr q22, [x24, #0x0]\n"
"uadalp v30.8h, v28.16b\n"
"uadalp v27.8h, v25.16b\n"
- "ldr q19, [x23, #0x0]\n"
- "ldr q16, [x21, #0x0]\n"
"uadalp v24.8h, v22.16b\n"
"uadalp v21.8h, v19.16b\n"
"uadalp v18.8h, v16.16b\n"
- "add x27, x27, #0x10\n"
"add x26, x26, #0x10\n"
"add x25, x25, #0x10\n"
"add x24, x24, #0x10\n"
"add x23, x23, #0x10\n"
- "add x21, x21, #0x10\n"
+ "add x22, x22, #0x10\n"
+ "add x20, x20, #0x10\n"
"93:" // Height 6: Multiply loop: Main loop skip
- "cbz x28, 102f\n"
- "tbz x28, #3, 97f\n"
- "ldr d31, [x27], #0x8\n"
- "ldr d28, [x26], #0x8\n"
- "ldr d25, [x25], #0x8\n"
- "ldr d22, [x24], #0x8\n"
- "ldr d19, [x23], #0x8\n"
- "ldr d16, [x21], #0x8\n"
- "tbz x28, #2, 95f\n"
- "ld1 { v31.s }[2], [x27], #0x4\n"
- "ld1 { v28.s }[2], [x26], #0x4\n"
- "ld1 { v25.s }[2], [x25], #0x4\n"
- "ld1 { v22.s }[2], [x24], #0x4\n"
- "ld1 { v19.s }[2], [x23], #0x4\n"
- "ld1 { v16.s }[2], [x21], #0x4\n"
- "tbz x28, #1, 94f\n"
- "ld1 { v31.h }[6], [x27], #0x2\n"
- "ld1 { v28.h }[6], [x26], #0x2\n"
- "ld1 { v25.h }[6], [x25], #0x2\n"
- "ld1 { v22.h }[6], [x24], #0x2\n"
- "ld1 { v19.h }[6], [x23], #0x2\n"
- "ld1 { v16.h }[6], [x21], #0x2\n"
- "tbz x28, #0, 101f\n"
- "ld1 { v31.b }[14], [x27]\n"
- "ld1 { v28.b }[14], [x26]\n"
- "ld1 { v25.b }[14], [x25]\n"
- "ld1 { v22.b }[14], [x24]\n"
- "ld1 { v19.b }[14], [x23]\n"
- "ld1 { v16.b }[14], [x21]\n"
+ "cbz x27, 102f\n"
+ "tbz x27, #3, 97f\n"
+ "ldr d31, [x26], #0x8\n"
+ "ldr d28, [x25], #0x8\n"
+ "ldr d25, [x24], #0x8\n"
+ "ldr d22, [x23], #0x8\n"
+ "ldr d19, [x22], #0x8\n"
+ "ldr d16, [x20], #0x8\n"
+ "tbz x27, #2, 95f\n"
+ "ld1 { v31.s }[2], [x26], #0x4\n"
+ "ld1 { v28.s }[2], [x25], #0x4\n"
+ "ld1 { v25.s }[2], [x24], #0x4\n"
+ "ld1 { v22.s }[2], [x23], #0x4\n"
+ "ld1 { v19.s }[2], [x22], #0x4\n"
+ "ld1 { v16.s }[2], [x20], #0x4\n"
+ "tbz x27, #1, 94f\n"
+ "ld1 { v31.h }[6], [x26], #0x2\n"
+ "ld1 { v28.h }[6], [x25], #0x2\n"
+ "ld1 { v25.h }[6], [x24], #0x2\n"
+ "ld1 { v22.h }[6], [x23], #0x2\n"
+ "ld1 { v19.h }[6], [x22], #0x2\n"
+ "ld1 { v16.h }[6], [x20], #0x2\n"
+ "tbz x27, #0, 101f\n"
+ "ld1 { v31.b }[14], [x26]\n"
+ "ld1 { v28.b }[14], [x25]\n"
+ "ld1 { v25.b }[14], [x24]\n"
+ "ld1 { v22.b }[14], [x23]\n"
+ "ld1 { v19.b }[14], [x22]\n"
+ "ld1 { v16.b }[14], [x20]\n"
"b 101f\n"
"94:" // Height 6: Multiply loop: Ragged operand read: partial_1_12
- "tbz x28, #0, 101f\n"
- "ld1 { v31.b }[12], [x27]\n"
- "ld1 { v28.b }[12], [x26]\n"
- "ld1 { v25.b }[12], [x25]\n"
- "ld1 { v22.b }[12], [x24]\n"
- "ld1 { v19.b }[12], [x23]\n"
- "ld1 { v16.b }[12], [x21]\n"
+ "tbz x27, #0, 101f\n"
+ "ld1 { v31.b }[12], [x26]\n"
+ "ld1 { v28.b }[12], [x25]\n"
+ "ld1 { v25.b }[12], [x24]\n"
+ "ld1 { v22.b }[12], [x23]\n"
+ "ld1 { v19.b }[12], [x22]\n"
+ "ld1 { v16.b }[12], [x20]\n"
"b 101f\n"
"95:" // Height 6: Multiply loop: Ragged operand read: partial_2_8
- "tbz x28, #1, 96f\n"
- "ld1 { v31.h }[4], [x27], #0x2\n"
- "ld1 { v28.h }[4], [x26], #0x2\n"
- "ld1 { v25.h }[4], [x25], #0x2\n"
- "ld1 { v22.h }[4], [x24], #0x2\n"
- "ld1 { v19.h }[4], [x23], #0x2\n"
- "ld1 { v16.h }[4], [x21], #0x2\n"
- "tbz x28, #0, 101f\n"
- "ld1 { v31.b }[10], [x27]\n"
- "ld1 { v28.b }[10], [x26]\n"
- "ld1 { v25.b }[10], [x25]\n"
- "ld1 { v22.b }[10], [x24]\n"
- "ld1 { v19.b }[10], [x23]\n"
- "ld1 { v16.b }[10], [x21]\n"
+ "tbz x27, #1, 96f\n"
+ "ld1 { v31.h }[4], [x26], #0x2\n"
+ "ld1 { v28.h }[4], [x25], #0x2\n"
+ "ld1 { v25.h }[4], [x24], #0x2\n"
+ "ld1 { v22.h }[4], [x23], #0x2\n"
+ "ld1 { v19.h }[4], [x22], #0x2\n"
+ "ld1 { v16.h }[4], [x20], #0x2\n"
+ "tbz x27, #0, 101f\n"
+ "ld1 { v31.b }[10], [x26]\n"
+ "ld1 { v28.b }[10], [x25]\n"
+ "ld1 { v25.b }[10], [x24]\n"
+ "ld1 { v22.b }[10], [x23]\n"
+ "ld1 { v19.b }[10], [x22]\n"
+ "ld1 { v16.b }[10], [x20]\n"
"b 101f\n"
"96:" // Height 6: Multiply loop: Ragged operand read: partial_1_8
- "tbz x28, #0, 101f\n"
- "ld1 { v31.b }[8], [x27]\n"
- "ld1 { v28.b }[8], [x26]\n"
- "ld1 { v25.b }[8], [x25]\n"
- "ld1 { v22.b }[8], [x24]\n"
- "ld1 { v19.b }[8], [x23]\n"
- "ld1 { v16.b }[8], [x21]\n"
+ "tbz x27, #0, 101f\n"
+ "ld1 { v31.b }[8], [x26]\n"
+ "ld1 { v28.b }[8], [x25]\n"
+ "ld1 { v25.b }[8], [x24]\n"
+ "ld1 { v22.b }[8], [x23]\n"
+ "ld1 { v19.b }[8], [x22]\n"
+ "ld1 { v16.b }[8], [x20]\n"
"b 101f\n"
"97:" // Height 6: Multiply loop: Ragged operand read: partial_4_0
- "tbz x28, #2, 99f\n"
- "ldr s31, [x27], #0x4\n"
- "ldr s28, [x26], #0x4\n"
- "ldr s25, [x25], #0x4\n"
- "ldr s22, [x24], #0x4\n"
- "ldr s19, [x23], #0x4\n"
- "ldr s16, [x21], #0x4\n"
- "tbz x28, #1, 98f\n"
- "ld1 { v31.h }[2], [x27], #0x2\n"
- "ld1 { v28.h }[2], [x26], #0x2\n"
- "ld1 { v25.h }[2], [x25], #0x2\n"
- "ld1 { v22.h }[2], [x24], #0x2\n"
- "ld1 { v19.h }[2], [x23], #0x2\n"
- "ld1 { v16.h }[2], [x21], #0x2\n"
- "tbz x28, #0, 101f\n"
- "ld1 { v31.b }[6], [x27]\n"
- "ld1 { v28.b }[6], [x26]\n"
- "ld1 { v25.b }[6], [x25]\n"
- "ld1 { v22.b }[6], [x24]\n"
- "ld1 { v19.b }[6], [x23]\n"
- "ld1 { v16.b }[6], [x21]\n"
+ "tbz x27, #2, 99f\n"
+ "ldr s31, [x26], #0x4\n"
+ "ldr s28, [x25], #0x4\n"
+ "ldr s25, [x24], #0x4\n"
+ "ldr s22, [x23], #0x4\n"
+ "ldr s19, [x22], #0x4\n"
+ "ldr s16, [x20], #0x4\n"
+ "tbz x27, #1, 98f\n"
+ "ld1 { v31.h }[2], [x26], #0x2\n"
+ "ld1 { v28.h }[2], [x25], #0x2\n"
+ "ld1 { v25.h }[2], [x24], #0x2\n"
+ "ld1 { v22.h }[2], [x23], #0x2\n"
+ "ld1 { v19.h }[2], [x22], #0x2\n"
+ "ld1 { v16.h }[2], [x20], #0x2\n"
+ "tbz x27, #0, 101f\n"
+ "ld1 { v31.b }[6], [x26]\n"
+ "ld1 { v28.b }[6], [x25]\n"
+ "ld1 { v25.b }[6], [x24]\n"
+ "ld1 { v22.b }[6], [x23]\n"
+ "ld1 { v19.b }[6], [x22]\n"
+ "ld1 { v16.b }[6], [x20]\n"
"b 101f\n"
"98:" // Height 6: Multiply loop: Ragged operand read: partial_1_4
- "tbz x28, #0, 101f\n"
- "ld1 { v31.b }[4], [x27]\n"
- "ld1 { v28.b }[4], [x26]\n"
- "ld1 { v25.b }[4], [x25]\n"
- "ld1 { v22.b }[4], [x24]\n"
- "ld1 { v19.b }[4], [x23]\n"
- "ld1 { v16.b }[4], [x21]\n"
+ "tbz x27, #0, 101f\n"
+ "ld1 { v31.b }[4], [x26]\n"
+ "ld1 { v28.b }[4], [x25]\n"
+ "ld1 { v25.b }[4], [x24]\n"
+ "ld1 { v22.b }[4], [x23]\n"
+ "ld1 { v19.b }[4], [x22]\n"
+ "ld1 { v16.b }[4], [x20]\n"
"b 101f\n"
"99:" // Height 6: Multiply loop: Ragged operand read: partial_2_0
- "tbz x28, #1, 100f\n"
- "ldr h31, [x27], #0x2\n"
- "ldr h28, [x26], #0x2\n"
- "ldr h25, [x25], #0x2\n"
- "ldr h22, [x24], #0x2\n"
- "ldr h19, [x23], #0x2\n"
- "ldr h16, [x21], #0x2\n"
- "tbz x28, #0, 101f\n"
- "ld1 { v31.b }[2], [x27]\n"
- "ld1 { v28.b }[2], [x26]\n"
- "ld1 { v25.b }[2], [x25]\n"
- "ld1 { v22.b }[2], [x24]\n"
- "ld1 { v19.b }[2], [x23]\n"
- "ld1 { v16.b }[2], [x21]\n"
+ "tbz x27, #1, 100f\n"
+ "ldr h31, [x26], #0x2\n"
+ "ldr h28, [x25], #0x2\n"
+ "ldr h25, [x24], #0x2\n"
+ "ldr h22, [x23], #0x2\n"
+ "ldr h19, [x22], #0x2\n"
+ "ldr h16, [x20], #0x2\n"
+ "tbz x27, #0, 101f\n"
+ "ld1 { v31.b }[2], [x26]\n"
+ "ld1 { v28.b }[2], [x25]\n"
+ "ld1 { v25.b }[2], [x24]\n"
+ "ld1 { v22.b }[2], [x23]\n"
+ "ld1 { v19.b }[2], [x22]\n"
+ "ld1 { v16.b }[2], [x20]\n"
"b 101f\n"
"100:" // Height 6: Multiply loop: Ragged operand read: partial_1_0
- "ldr b31, [x27, #0x0]\n"
- "ldr b28, [x26, #0x0]\n"
- "ldr b25, [x25, #0x0]\n"
- "ldr b22, [x24, #0x0]\n"
- "ldr b19, [x23, #0x0]\n"
- "ldr b16, [x21, #0x0]\n"
+ "ldr b31, [x26, #0x0]\n"
+ "ldr b28, [x25, #0x0]\n"
+ "ldr b25, [x24, #0x0]\n"
+ "ldr b22, [x23, #0x0]\n"
+ "ldr b19, [x22, #0x0]\n"
+ "ldr b16, [x20, #0x0]\n"
"101:" // Height 6: Multiply loop: Ragged operand read: Done
"uadalp v1.8h, v31.16b\n"
"uadalp v30.8h, v28.16b\n"
@@ -1120,23 +1120,23 @@ void row_sums_indirect(
"uadalp v21.8h, v19.16b\n"
"uadalp v18.8h, v16.16b\n"
"102:" // Height 6: Multiply loop: No odd multiplies
- "add x9, x9, #0x1\n"
- "cmp x9, x22\n"
+ "add x28, x28, #0x1\n"
+ "cmp x28, x21\n"
"bne 87b\n"
"uadalp v0.4s, v1.8h\n"
"uadalp v29.4s, v30.8h\n"
- "subs %x[M], %x[M], #0x6\n"
+ "addp v0.4s, v0.4s, v29.4s\n"
"uadalp v26.4s, v27.8h\n"
"uadalp v23.4s, v24.8h\n"
+ "addp v29.4s, v26.4s, v23.4s\n"
"uadalp v20.4s, v21.8h\n"
"uadalp v17.4s, v18.8h\n"
"addp v0.4s, v0.4s, v29.4s\n"
- "addp v29.4s, v26.4s, v23.4s\n"
+ "subs %x[M], %x[M], #0x6\n"
"addp v20.4s, v20.4s, v17.4s\n"
- "addp v0.4s, v0.4s, v29.4s\n"
- "addp v20.4s, v20.4s, v20.4s\n"
"mul v0.4s, v0.4s, v2.4s\n"
"st1 { v0.4s }, [%x[out_ptr]], #0x10\n"
+ "addp v20.4s, v20.4s, v20.4s\n"
"mul v20.4s, v20.4s, v2.4s\n"
"str d20, [%x[out_ptr]], #0x8\n"
"beq 104f\n"
@@ -1144,14 +1144,14 @@ void row_sums_indirect(
"add %x[input_offset], %x[input_offset], #0x6\n"
"b 1b\n"
"103:" // Update direct input
- "mov x20, #0x6\n"
- "madd %x[input_ptr], x20, %x[input_offset], %x[input_ptr]\n"
+ "mov x19, #0x6\n"
+ "madd %x[input_ptr], x19, %x[input_offset], %x[input_ptr]\n"
"b 1b\n"
"104:" // Exit
- : [M] "+&r" (M), [input_offset] "+&r" (input_offset), [input_ptr] "+&r" (input_ptr), [out_ptr] "+&r" (out_ptr)
+ : [M] "+r" (M), [input_offset] "+r" (input_offset), [input_ptr] "+r" (input_ptr), [out_ptr] "+r" (out_ptr)
: [args_ptr] "r" (&ka), [b_offset] "I" (offsetof(Requantize32, b_offset)), [flags] "r" (flags), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths)), [qp] "r" (qp)
- : "cc", "memory", "v0", "v1", "v2", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_128.hpp b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_128.hpp
index e6186984e8..41c1c282e5 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_128.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_128.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#pragma once
@@ -36,238 +36,238 @@ void a64_transpose_interleave_128(uint32_t *out, const uint32_t *in, size_t widt
"cmp %x[height], #0x4\n"
"blt 10f\n"
"1:" // Main row loop: Head
- "mov x25, %x[in]\n"
- "mov x24, %x[width]\n"
- "add x23, x25, %x[in_stride]\n"
- "add x22, x23, %x[in_stride]\n"
- "add x20, x22, %x[in_stride]\n"
- "cmp x24, #0x20\n"
+ "mov x24, %x[in]\n"
+ "mov x23, %x[out]\n"
+ "add x22, x24, %x[in_stride]\n"
+ "add x21, x22, %x[in_stride]\n"
+ "add x20, x21, %x[in_stride]\n"
"add %x[in], x20, %x[in_stride]\n"
- "mov x21, %x[out]\n"
"sub %x[height], %x[height], #0x4\n"
+ "mov x19, %x[width]\n"
+ "cmp x19, #0x20\n"
"blt 3f\n"
"2:" // Main row loop: Column loop
- "ldr q15, [x25], #0x10\n"
- "ldr q14, [x23], #0x10\n"
- "sub x24, x24, #0x20\n"
- "cmp x24, #0x20\n"
- "ldr q13, [x22], #0x10\n"
+ "ldr q15, [x24], #0x10\n"
+ "sub x19, x19, #0x20\n"
+ "ldr q14, [x22], #0x10\n"
+ "cmp x19, #0x20\n"
+ "ldr q13, [x21], #0x10\n"
"ldr q12, [x20], #0x10\n"
- "ldr q11, [x25], #0x10\n"
- "ldr q10, [x23], #0x10\n"
- "ldr q9, [x22], #0x10\n"
+ "ldr q11, [x24], #0x10\n"
+ "ldr q10, [x22], #0x10\n"
+ "ldr q9, [x21], #0x10\n"
"ldr q8, [x20], #0x10\n"
- "ldr q7, [x25], #0x10\n"
- "ldr q6, [x23], #0x10\n"
- "ldr q5, [x22], #0x10\n"
+ "ldr q7, [x24], #0x10\n"
+ "ldr q6, [x22], #0x10\n"
+ "ldr q5, [x21], #0x10\n"
"ldr q4, [x20], #0x10\n"
- "ldr q3, [x25], #0x10\n"
- "ldr q2, [x23], #0x10\n"
- "ldr q1, [x22], #0x10\n"
+ "ldr q3, [x24], #0x10\n"
+ "ldr q2, [x22], #0x10\n"
+ "ldr q1, [x21], #0x10\n"
"ldr q0, [x20], #0x10\n"
- "ldr q31, [x25], #0x10\n"
- "ldr q30, [x23], #0x10\n"
- "ldr q29, [x22], #0x10\n"
+ "ldr q31, [x24], #0x10\n"
+ "ldr q30, [x22], #0x10\n"
+ "ldr q29, [x21], #0x10\n"
"ldr q28, [x20], #0x10\n"
- "ldr q27, [x25], #0x10\n"
- "ldr q26, [x23], #0x10\n"
- "ldr q25, [x22], #0x10\n"
+ "ldr q27, [x24], #0x10\n"
+ "ldr q26, [x22], #0x10\n"
+ "ldr q25, [x21], #0x10\n"
"ldr q24, [x20], #0x10\n"
- "ldr q23, [x25], #0x10\n"
- "ldr q22, [x23], #0x10\n"
- "ldr q21, [x22], #0x10\n"
+ "ldr q23, [x24], #0x10\n"
+ "ldr q22, [x22], #0x10\n"
+ "ldr q21, [x21], #0x10\n"
"ldr q20, [x20], #0x10\n"
- "ldr q19, [x25], #0x10\n"
- "ldr q18, [x23], #0x10\n"
- "ldr q17, [x22], #0x10\n"
+ "ldr q19, [x24], #0x10\n"
+ "ldr q18, [x22], #0x10\n"
+ "ldr q17, [x21], #0x10\n"
"ldr q16, [x20], #0x10\n"
- "str q15, [x21, #0x0]\n"
- "str q11, [x21, #0x10]\n"
- "str q7, [x21, #0x20]\n"
- "str q3, [x21, #0x30]\n"
- "str q31, [x21, #0x40]\n"
- "str q27, [x21, #0x50]\n"
- "str q23, [x21, #0x60]\n"
- "str q19, [x21, #0x70]\n"
- "str q14, [x21, #0x80]\n"
- "str q10, [x21, #0x90]\n"
- "str q6, [x21, #0xa0]\n"
- "str q2, [x21, #0xb0]\n"
- "str q30, [x21, #0xc0]\n"
- "str q26, [x21, #0xd0]\n"
- "str q22, [x21, #0xe0]\n"
- "str q18, [x21, #0xf0]\n"
- "str q13, [x21, #0x100]\n"
- "str q9, [x21, #0x110]\n"
- "str q5, [x21, #0x120]\n"
- "str q1, [x21, #0x130]\n"
- "str q29, [x21, #0x140]\n"
- "str q25, [x21, #0x150]\n"
- "str q21, [x21, #0x160]\n"
- "str q17, [x21, #0x170]\n"
- "str q12, [x21, #0x180]\n"
- "str q8, [x21, #0x190]\n"
- "str q4, [x21, #0x1a0]\n"
- "str q0, [x21, #0x1b0]\n"
- "str q28, [x21, #0x1c0]\n"
- "str q24, [x21, #0x1d0]\n"
- "str q20, [x21, #0x1e0]\n"
- "str q16, [x21, #0x1f0]\n"
- "add x21, x21, %x[out_stride]\n"
+ "str q15, [x23, #0x0]\n"
+ "str q11, [x23, #0x10]\n"
+ "str q7, [x23, #0x20]\n"
+ "str q3, [x23, #0x30]\n"
+ "str q31, [x23, #0x40]\n"
+ "str q27, [x23, #0x50]\n"
+ "str q23, [x23, #0x60]\n"
+ "str q19, [x23, #0x70]\n"
+ "str q14, [x23, #0x80]\n"
+ "str q10, [x23, #0x90]\n"
+ "str q6, [x23, #0xa0]\n"
+ "str q2, [x23, #0xb0]\n"
+ "str q30, [x23, #0xc0]\n"
+ "str q26, [x23, #0xd0]\n"
+ "str q22, [x23, #0xe0]\n"
+ "str q18, [x23, #0xf0]\n"
+ "str q13, [x23, #0x100]\n"
+ "str q9, [x23, #0x110]\n"
+ "str q5, [x23, #0x120]\n"
+ "str q1, [x23, #0x130]\n"
+ "str q29, [x23, #0x140]\n"
+ "str q25, [x23, #0x150]\n"
+ "str q21, [x23, #0x160]\n"
+ "str q17, [x23, #0x170]\n"
+ "str q12, [x23, #0x180]\n"
+ "str q8, [x23, #0x190]\n"
+ "str q4, [x23, #0x1a0]\n"
+ "str q0, [x23, #0x1b0]\n"
+ "str q28, [x23, #0x1c0]\n"
+ "str q24, [x23, #0x1d0]\n"
+ "str q20, [x23, #0x1e0]\n"
+ "str q16, [x23, #0x1f0]\n"
+ "add x23, x23, %x[out_stride]\n"
"bge 2b\n"
"3:" // Main row loop: Column loop skip
- "cmp x24, #0x10\n"
+ "cmp x19, #0x10\n"
"blt 5f\n"
"4:" // Main row loop: width 16 loop: loop
- "ldr q31, [x25], #0x10\n"
- "ldr q30, [x23], #0x10\n"
- "sub x24, x24, #0x10\n"
- "cmp x24, #0x10\n"
- "ldr q29, [x22], #0x10\n"
+ "ldr q31, [x24], #0x10\n"
+ "sub x19, x19, #0x10\n"
+ "ldr q30, [x22], #0x10\n"
+ "cmp x19, #0x10\n"
+ "ldr q29, [x21], #0x10\n"
"ldr q28, [x20], #0x10\n"
- "ldr q27, [x25], #0x10\n"
- "ldr q26, [x23], #0x10\n"
- "ldr q25, [x22], #0x10\n"
+ "ldr q27, [x24], #0x10\n"
+ "ldr q26, [x22], #0x10\n"
+ "ldr q25, [x21], #0x10\n"
"ldr q24, [x20], #0x10\n"
- "ldr q23, [x25], #0x10\n"
- "ldr q22, [x23], #0x10\n"
- "ldr q21, [x22], #0x10\n"
+ "ldr q23, [x24], #0x10\n"
+ "ldr q22, [x22], #0x10\n"
+ "ldr q21, [x21], #0x10\n"
"ldr q20, [x20], #0x10\n"
- "ldr q19, [x25], #0x10\n"
- "ldr q18, [x23], #0x10\n"
- "ldr q17, [x22], #0x10\n"
+ "ldr q19, [x24], #0x10\n"
+ "ldr q18, [x22], #0x10\n"
+ "ldr q17, [x21], #0x10\n"
"ldr q16, [x20], #0x10\n"
- "str q31, [x21, #0x0]\n"
- "str q27, [x21, #0x10]\n"
- "str q23, [x21, #0x20]\n"
- "str q19, [x21, #0x30]\n"
- "str q30, [x21, #0x80]\n"
- "str q26, [x21, #0x90]\n"
- "str q22, [x21, #0xa0]\n"
- "str q18, [x21, #0xb0]\n"
- "str q29, [x21, #0x100]\n"
- "str q25, [x21, #0x110]\n"
- "str q21, [x21, #0x120]\n"
- "str q17, [x21, #0x130]\n"
- "str q28, [x21, #0x180]\n"
- "str q24, [x21, #0x190]\n"
- "str q20, [x21, #0x1a0]\n"
- "str q16, [x21, #0x1b0]\n"
- "add x21, x21, #0x40\n"
+ "str q31, [x23, #0x0]\n"
+ "str q27, [x23, #0x10]\n"
+ "str q23, [x23, #0x20]\n"
+ "str q19, [x23, #0x30]\n"
+ "str q30, [x23, #0x80]\n"
+ "str q26, [x23, #0x90]\n"
+ "str q22, [x23, #0xa0]\n"
+ "str q18, [x23, #0xb0]\n"
+ "str q29, [x23, #0x100]\n"
+ "str q25, [x23, #0x110]\n"
+ "str q21, [x23, #0x120]\n"
+ "str q17, [x23, #0x130]\n"
+ "str q28, [x23, #0x180]\n"
+ "str q24, [x23, #0x190]\n"
+ "str q20, [x23, #0x1a0]\n"
+ "str q16, [x23, #0x1b0]\n"
+ "add x23, x23, #0x40\n"
"bge 4b\n"
"5:" // Main row loop: width 16 loop: skip
- "cmp x24, #0x4\n"
+ "cmp x19, #0x4\n"
"blt 7f\n"
"6:" // Main row loop: width 4 loop: loop
- "ldr q19, [x25], #0x10\n"
- "ldr q18, [x23], #0x10\n"
- "sub x24, x24, #0x4\n"
- "cmp x24, #0x4\n"
- "ldr q17, [x22], #0x10\n"
+ "ldr q19, [x24], #0x10\n"
+ "sub x19, x19, #0x4\n"
+ "ldr q18, [x22], #0x10\n"
+ "cmp x19, #0x4\n"
+ "ldr q17, [x21], #0x10\n"
"ldr q16, [x20], #0x10\n"
- "str q19, [x21, #0x0]\n"
- "str q18, [x21, #0x80]\n"
- "str q17, [x21, #0x100]\n"
- "str q16, [x21, #0x180]\n"
- "add x21, x21, #0x10\n"
+ "str q19, [x23, #0x0]\n"
+ "str q18, [x23, #0x80]\n"
+ "str q17, [x23, #0x100]\n"
+ "str q16, [x23, #0x180]\n"
+ "add x23, x23, #0x10\n"
"bge 6b\n"
"7:" // Main row loop: width 4 loop: skip
- "cmp x24, #0x1\n"
+ "cmp x19, #0x1\n"
"blt 9f\n"
"8:" // Main row loop: width 1 loop: loop
- "ldr s19, [x25], #0x4\n"
- "ldr s18, [x23], #0x4\n"
- "sub x24, x24, #0x1\n"
- "cmp x24, #0x1\n"
- "ldr s17, [x22], #0x4\n"
+ "ldr s19, [x24], #0x4\n"
+ "sub x19, x19, #0x1\n"
+ "ldr s18, [x22], #0x4\n"
+ "cmp x19, #0x1\n"
+ "ldr s17, [x21], #0x4\n"
"ldr s16, [x20], #0x4\n"
- "str s19, [x21, #0x0]\n"
- "str s18, [x21, #0x80]\n"
- "str s17, [x21, #0x100]\n"
- "str s16, [x21, #0x180]\n"
- "add x21, x21, #0x4\n"
+ "str s19, [x23, #0x0]\n"
+ "str s18, [x23, #0x80]\n"
+ "str s17, [x23, #0x100]\n"
+ "str s16, [x23, #0x180]\n"
+ "add x23, x23, #0x4\n"
"bge 8b\n"
"9:" // Main row loop: width 1 loop: skip
- "cmp %x[height], #0x4\n"
"add %x[out], %x[out], #0x200\n"
+ "cmp %x[height], #0x4\n"
"bge 1b\n"
"cbz %x[height], 20f\n"
"10:" // Main loop skip
"11:" // Tail row loop: Head
- "mov x20, %x[width]\n"
- "mov x25, %x[in]\n"
- "cmp x20, #0x20\n"
- "add %x[in], x25, %x[in_stride]\n"
- "mov x21, %x[out]\n"
+ "mov x24, %x[in]\n"
+ "mov x23, %x[out]\n"
+ "add %x[in], x24, %x[in_stride]\n"
"sub %x[height], %x[height], #0x1\n"
+ "mov x19, %x[width]\n"
+ "cmp x19, #0x20\n"
"blt 13f\n"
"12:" // Tail row loop: Column loop
- "ldr q23, [x25], #0x10\n"
- "ldr q22, [x25], #0x10\n"
- "sub x20, x20, #0x20\n"
- "cmp x20, #0x20\n"
- "ldr q21, [x25], #0x10\n"
- "ldr q20, [x25], #0x10\n"
- "ldr q19, [x25], #0x10\n"
- "ldr q18, [x25], #0x10\n"
- "ldr q17, [x25], #0x10\n"
- "ldr q16, [x25], #0x10\n"
- "str q23, [x21, #0x0]\n"
- "str q22, [x21, #0x10]\n"
- "str q21, [x21, #0x20]\n"
- "str q20, [x21, #0x30]\n"
- "str q19, [x21, #0x40]\n"
- "str q18, [x21, #0x50]\n"
- "str q17, [x21, #0x60]\n"
- "str q16, [x21, #0x70]\n"
- "add x21, x21, %x[out_stride]\n"
+ "ldr q23, [x24], #0x10\n"
+ "sub x19, x19, #0x20\n"
+ "cmp x19, #0x20\n"
+ "ldr q22, [x24], #0x10\n"
+ "ldr q21, [x24], #0x10\n"
+ "ldr q20, [x24], #0x10\n"
+ "ldr q19, [x24], #0x10\n"
+ "ldr q18, [x24], #0x10\n"
+ "ldr q17, [x24], #0x10\n"
+ "ldr q16, [x24], #0x10\n"
+ "str q23, [x23, #0x0]\n"
+ "str q22, [x23, #0x10]\n"
+ "str q21, [x23, #0x20]\n"
+ "str q20, [x23, #0x30]\n"
+ "str q19, [x23, #0x40]\n"
+ "str q18, [x23, #0x50]\n"
+ "str q17, [x23, #0x60]\n"
+ "str q16, [x23, #0x70]\n"
+ "add x23, x23, %x[out_stride]\n"
"bge 12b\n"
"13:" // Tail row loop: Column loop skip
- "cmp x20, #0x10\n"
+ "cmp x19, #0x10\n"
"blt 15f\n"
"14:" // Tail row loop: width 16 loop: loop
- "ldr q19, [x25], #0x10\n"
- "ldr q18, [x25], #0x10\n"
- "sub x20, x20, #0x10\n"
- "cmp x20, #0x10\n"
- "ldr q17, [x25], #0x10\n"
- "ldr q16, [x25], #0x10\n"
- "str q19, [x21, #0x0]\n"
- "str q18, [x21, #0x10]\n"
- "str q17, [x21, #0x20]\n"
- "str q16, [x21, #0x30]\n"
- "add x21, x21, #0x40\n"
+ "ldr q19, [x24], #0x10\n"
+ "sub x19, x19, #0x10\n"
+ "cmp x19, #0x10\n"
+ "ldr q18, [x24], #0x10\n"
+ "ldr q17, [x24], #0x10\n"
+ "ldr q16, [x24], #0x10\n"
+ "str q19, [x23, #0x0]\n"
+ "str q18, [x23, #0x10]\n"
+ "str q17, [x23, #0x20]\n"
+ "str q16, [x23, #0x30]\n"
+ "add x23, x23, #0x40\n"
"bge 14b\n"
"15:" // Tail row loop: width 16 loop: skip
- "cmp x20, #0x4\n"
+ "cmp x19, #0x4\n"
"blt 17f\n"
"16:" // Tail row loop: width 4 loop: loop
- "ldr q16, [x25], #0x10\n"
- "sub x20, x20, #0x4\n"
- "cmp x20, #0x4\n"
- "str q16, [x21, #0x0]\n"
- "add x21, x21, #0x10\n"
+ "ldr q16, [x24], #0x10\n"
+ "sub x19, x19, #0x4\n"
+ "cmp x19, #0x4\n"
+ "str q16, [x23, #0x0]\n"
+ "add x23, x23, #0x10\n"
"bge 16b\n"
"17:" // Tail row loop: width 4 loop: skip
- "cmp x20, #0x1\n"
+ "cmp x19, #0x1\n"
"blt 19f\n"
"18:" // Tail row loop: width 1 loop: loop
- "ldr s16, [x25], #0x4\n"
- "sub x20, x20, #0x1\n"
- "cmp x20, #0x1\n"
- "str s16, [x21, #0x0]\n"
- "add x21, x21, #0x4\n"
+ "ldr s16, [x24], #0x4\n"
+ "sub x19, x19, #0x1\n"
+ "cmp x19, #0x1\n"
+ "str s16, [x23, #0x0]\n"
+ "add x23, x23, #0x4\n"
"bge 18b\n"
"19:" // Tail row loop: width 1 loop: skip
- "cmp %x[height], #0x1\n"
"add %x[out], %x[out], #0x80\n"
+ "cmp %x[height], #0x1\n"
"bge 11b\n"
"20:" // Done
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [width] "r" (width)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x20", "x21", "x22", "x23", "x24", "x25"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22", "x23", "x24"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_12_1x4.hpp b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_12_1x4.hpp
index 6d97f71c7d..ec3273a526 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_12_1x4.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_12_1x4.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#pragma once
@@ -42,360 +42,362 @@ void a64_transpose_interleave_12_1x4(uint8_t *out, const uint8_t *in, size_t wid
"cmp %x[height], #0x8\n"
"blt 10f\n"
"1:" // Main row loop: Head
- "mov x9, %x[in]\n"
- "add x28, x9, %x[in_stride]\n"
- "add x27, x28, %x[in_stride]\n"
- "add x26, x27, %x[in_stride]\n"
+ "mov x28, %x[in]\n"
+ "mov x27, %x[out]\n"
+ "add x26, x28, %x[in_stride]\n"
"add x25, x26, %x[in_stride]\n"
- "mov x24, %x[width]\n"
- "add x23, x25, %x[in_stride]\n"
+ "add x24, x25, %x[in_stride]\n"
+ "add x23, x24, %x[in_stride]\n"
"add x22, x23, %x[in_stride]\n"
- "add x20, x22, %x[in_stride]\n"
- "cmp x24, #0x30\n"
+ "add x21, x22, %x[in_stride]\n"
+ "add x20, x21, %x[in_stride]\n"
"add %x[in], x20, %x[in_stride]\n"
- "mov x21, %x[out]\n"
"sub %x[height], %x[height], #0x8\n"
+ "mov x19, %x[width]\n"
+ "cmp x19, #0x30\n"
"blt 3f\n"
"2:" // Main row loop: Unroll column loop
- "ldr q21, [x9], #0x10\n"
- "ldr q20, [x28], #0x10\n"
- "sub x24, x24, #0x30\n"
- "cmp x24, #0x30\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v31.16b, v21.16b, v17.16b\n"
- "zip1 v22.16b, v20.16b, v16.16b\n"
- "ldr q19, [x25], #0x10\n"
- "ldr q18, [x23], #0x10\n"
- "zip2 v14.16b, v21.16b, v17.16b\n"
- "zip2 v13.16b, v20.16b, v16.16b\n"
- "ldr q17, [x22], #0x10\n"
- "ldr q16, [x20], #0x10\n"
- "zip1 v30.16b, v19.16b, v17.16b\n"
- "zip1 v29.16b, v18.16b, v16.16b\n"
- "ldr q21, [x9], #0x10\n"
- "ldr q20, [x28], #0x10\n"
- "zip2 v12.16b, v19.16b, v17.16b\n"
- "zip2 v11.16b, v18.16b, v16.16b\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v10.16b, v21.16b, v17.16b\n"
- "zip1 v9.16b, v20.16b, v16.16b\n"
- "ldr q19, [x25], #0x10\n"
- "ldr q18, [x23], #0x10\n"
- "zip2 v8.16b, v21.16b, v17.16b\n"
- "zip2 v7.16b, v20.16b, v16.16b\n"
- "ldr q17, [x22], #0x10\n"
- "ldr q16, [x20], #0x10\n"
- "zip1 v6.16b, v19.16b, v17.16b\n"
- "zip1 v5.16b, v18.16b, v16.16b\n"
- "ldr q28, [x9], #0x10\n"
- "ldr q27, [x28], #0x10\n"
- "zip2 v4.16b, v19.16b, v17.16b\n"
- "zip2 v3.16b, v18.16b, v16.16b\n"
- "ldr q26, [x27], #0x10\n"
- "ldr q25, [x26], #0x10\n"
- "zip1 v2.16b, v28.16b, v26.16b\n"
- "zip1 v1.16b, v27.16b, v25.16b\n"
- "ldr q24, [x25], #0x10\n"
- "ldr q23, [x23], #0x10\n"
- "zip1 v16.16b, v31.16b, v22.16b\n"
- "zip2 v22.16b, v31.16b, v22.16b\n"
- "ldr q21, [x22], #0x10\n"
- "ldr q20, [x20], #0x10\n"
- "zip1 v0.16b, v24.16b, v21.16b\n"
- "zip1 v31.16b, v23.16b, v20.16b\n"
- "zip1 v19.16b, v14.16b, v13.16b\n"
- "zip1 v18.16b, v30.16b, v29.16b\n"
- "str q16, [x21, #0x0]\n"
- "zip2 v16.16b, v30.16b, v29.16b\n"
- "zip1 v17.16b, v12.16b, v11.16b\n"
- "str q22, [x21, #0x10]\n"
- "str q19, [x21, #0x20]\n"
- "zip2 v30.16b, v28.16b, v26.16b\n"
- "zip2 v29.16b, v27.16b, v25.16b\n"
- "str q18, [x21, #0x30]\n"
- "zip2 v28.16b, v24.16b, v21.16b\n"
- "zip2 v27.16b, v23.16b, v20.16b\n"
- "str q16, [x21, #0x40]\n"
- "zip2 v21.16b, v14.16b, v13.16b\n"
- "zip1 v16.16b, v10.16b, v9.16b\n"
- "str q17, [x21, #0x50]\n"
- "add x21, x21, %x[out_stride]\n"
- "zip2 v20.16b, v10.16b, v9.16b\n"
- "zip2 v19.16b, v12.16b, v11.16b\n"
- "zip1 v18.16b, v6.16b, v5.16b\n"
- "zip2 v17.16b, v6.16b, v5.16b\n"
- "str q21, [x21, #0x0]\n"
- "str q16, [x21, #0x10]\n"
- "zip1 v16.16b, v8.16b, v7.16b\n"
- "zip2 v26.16b, v8.16b, v7.16b\n"
- "str q20, [x21, #0x20]\n"
- "zip1 v25.16b, v2.16b, v1.16b\n"
- "zip1 v24.16b, v4.16b, v3.16b\n"
- "str q19, [x21, #0x30]\n"
- "zip2 v23.16b, v4.16b, v3.16b\n"
- "zip1 v22.16b, v0.16b, v31.16b\n"
- "str q18, [x21, #0x40]\n"
- "zip2 v21.16b, v2.16b, v1.16b\n"
- "zip1 v20.16b, v30.16b, v29.16b\n"
- "str q17, [x21, #0x50]\n"
- "add x21, x21, %x[out_stride]\n"
- "zip2 v19.16b, v30.16b, v29.16b\n"
- "zip2 v18.16b, v0.16b, v31.16b\n"
- "str q16, [x21, #0x0]\n"
- "zip1 v17.16b, v28.16b, v27.16b\n"
- "zip2 v16.16b, v28.16b, v27.16b\n"
- "str q26, [x21, #0x10]\n"
- "str q25, [x21, #0x20]\n"
- "str q24, [x21, #0x30]\n"
- "str q23, [x21, #0x40]\n"
- "str q22, [x21, #0x50]\n"
- "add x21, x21, %x[out_stride]\n"
- "str q21, [x21, #0x0]\n"
- "str q20, [x21, #0x10]\n"
- "str q19, [x21, #0x20]\n"
- "str q18, [x21, #0x30]\n"
- "str q17, [x21, #0x40]\n"
- "str q16, [x21, #0x50]\n"
- "add x21, x21, %x[out_stride]\n"
+ "ldr q18, [x28], #0x10\n"
+ "sub x19, x19, #0x30\n"
+ "ldr q23, [x26], #0x10\n"
+ "cmp x19, #0x30\n"
+ "ldr q16, [x25], #0x10\n"
+ "zip1 v19.16b, v18.16b, v16.16b\n"
+ "ldr q17, [x28], #0x10\n"
+ "zip2 v22.16b, v18.16b, v16.16b\n"
+ "ldr q11, [x26], #0x10\n"
+ "ldr q16, [x25], #0x10\n"
+ "zip1 v21.16b, v17.16b, v16.16b\n"
+ "ldr q18, [x28], #0x10\n"
+ "zip2 v10.16b, v17.16b, v16.16b\n"
+ "ldr q9, [x26], #0x10\n"
+ "ldr q17, [x25], #0x10\n"
+ "zip1 v8.16b, v18.16b, v17.16b\n"
+ "ldr q16, [x24], #0x10\n"
+ "zip2 v7.16b, v18.16b, v17.16b\n"
+ "ldr q20, [x23], #0x10\n"
+ "ldr q6, [x22], #0x10\n"
+ "zip1 v17.16b, v23.16b, v16.16b\n"
+ "ldr q5, [x24], #0x10\n"
+ "zip2 v16.16b, v23.16b, v16.16b\n"
+ "ldr q4, [x23], #0x10\n"
+ "zip1 v3.16b, v19.16b, v17.16b\n"
+ "ldr q2, [x22], #0x10\n"
+ "zip2 v1.16b, v19.16b, v17.16b\n"
+ "ldr q19, [x21], #0x10\n"
+ "zip1 v0.16b, v22.16b, v16.16b\n"
+ "ldr q31, [x24], #0x10\n"
+ "zip2 v30.16b, v22.16b, v16.16b\n"
+ "ldr q29, [x23], #0x10\n"
+ "zip1 v16.16b, v11.16b, v5.16b\n"
+ "ldr q28, [x22], #0x10\n"
+ "zip1 v27.16b, v21.16b, v16.16b\n"
+ "ldr q26, [x21], #0x10\n"
+ "zip1 v18.16b, v20.16b, v19.16b\n"
+ "ldr q17, [x20], #0x10\n"
+ "zip2 v20.16b, v20.16b, v19.16b\n"
+ "ldr q25, [x21], #0x10\n"
+ "zip2 v24.16b, v21.16b, v16.16b\n"
+ "zip1 v23.16b, v4.16b, v26.16b\n"
+ "ldr q22, [x20], #0x10\n"
+ "zip1 v16.16b, v6.16b, v17.16b\n"
+ "ldr q21, [x20], #0x10\n"
+ "zip1 v19.16b, v18.16b, v16.16b\n"
+ "zip2 v18.16b, v18.16b, v16.16b\n"
+ "str q3, [x27, #0x0]\n"
+ "zip2 v16.16b, v6.16b, v17.16b\n"
+ "str q1, [x27, #0x10]\n"
+ "zip1 v17.16b, v20.16b, v16.16b\n"
+ "str q0, [x27, #0x20]\n"
+ "zip2 v20.16b, v20.16b, v16.16b\n"
+ "str q19, [x27, #0x30]\n"
+ "zip1 v16.16b, v2.16b, v22.16b\n"
+ "str q18, [x27, #0x40]\n"
+ "zip1 v19.16b, v23.16b, v16.16b\n"
+ "str q17, [x27, #0x50]\n"
+ "add x27, x27, %x[out_stride]\n"
+ "zip2 v18.16b, v23.16b, v16.16b\n"
+ "str q30, [x27, #0x0]\n"
+ "zip2 v17.16b, v11.16b, v5.16b\n"
+ "str q27, [x27, #0x10]\n"
+ "zip1 v16.16b, v10.16b, v17.16b\n"
+ "str q24, [x27, #0x20]\n"
+ "zip2 v17.16b, v10.16b, v17.16b\n"
+ "str q20, [x27, #0x30]\n"
+ "zip1 v20.16b, v9.16b, v31.16b\n"
+ "str q19, [x27, #0x40]\n"
+ "zip1 v19.16b, v8.16b, v20.16b\n"
+ "str q18, [x27, #0x50]\n"
+ "add x27, x27, %x[out_stride]\n"
+ "zip2 v18.16b, v4.16b, v26.16b\n"
+ "str q16, [x27, #0x0]\n"
+ "zip2 v16.16b, v2.16b, v22.16b\n"
+ "str q17, [x27, #0x10]\n"
+ "zip1 v17.16b, v18.16b, v16.16b\n"
+ "str q19, [x27, #0x20]\n"
+ "zip2 v16.16b, v18.16b, v16.16b\n"
+ "str q17, [x27, #0x30]\n"
+ "zip1 v19.16b, v29.16b, v25.16b\n"
+ "str q16, [x27, #0x40]\n"
+ "zip1 v17.16b, v28.16b, v21.16b\n"
+ "zip1 v16.16b, v19.16b, v17.16b\n"
+ "str q16, [x27, #0x50]\n"
+ "add x27, x27, %x[out_stride]\n"
+ "zip2 v16.16b, v8.16b, v20.16b\n"
+ "str q16, [x27, #0x0]\n"
+ "zip2 v18.16b, v9.16b, v31.16b\n"
+ "zip2 v17.16b, v19.16b, v17.16b\n"
+ "zip1 v16.16b, v7.16b, v18.16b\n"
+ "str q16, [x27, #0x10]\n"
+ "zip2 v16.16b, v7.16b, v18.16b\n"
+ "str q16, [x27, #0x20]\n"
+ "zip2 v18.16b, v29.16b, v25.16b\n"
+ "str q17, [x27, #0x30]\n"
+ "zip2 v17.16b, v28.16b, v21.16b\n"
+ "zip1 v16.16b, v18.16b, v17.16b\n"
+ "str q16, [x27, #0x40]\n"
+ "zip2 v16.16b, v18.16b, v17.16b\n"
+ "str q16, [x27, #0x50]\n"
+ "add x27, x27, %x[out_stride]\n"
"bge 2b\n"
"3:" // Main row loop: Unroll column loop skip
- "cmp x24, #0xc\n"
+ "cmp x19, #0xc\n"
"blt 5f\n"
"4:" // Main row loop: Column loop
- "ldr d23, [x9], #0x8\n"
- "ldr d22, [x28], #0x8\n"
- "sub x24, x24, #0xc\n"
- "cmp x24, #0xc\n"
- "ldr d19, [x27], #0x8\n"
+ "ldr d19, [x28], #0x8\n"
+ "sub x19, x19, #0xc\n"
"ldr d18, [x26], #0x8\n"
- "ldr d21, [x25], #0x8\n"
- "ldr d25, [x23], #0x8\n"
- "ldr d20, [x22], #0x8\n"
- "ldr d17, [x20], #0x8\n"
- "ld1 { v23.s }[2], [x9], #0x4\n"
- "ld1 { v22.s }[2], [x28], #0x4\n"
- "ld1 { v19.s }[2], [x27], #0x4\n"
+ "cmp x19, #0xc\n"
+ "ldr d17, [x25], #0x8\n"
+ "ldr d16, [x24], #0x8\n"
+ "ldr d24, [x23], #0x8\n"
+ "ld1 { v19.s }[2], [x28], #0x4\n"
"ld1 { v18.s }[2], [x26], #0x4\n"
- "zip1 v24.16b, v23.16b, v19.16b\n"
- "zip1 v16.16b, v22.16b, v18.16b\n"
- "ld1 { v21.s }[2], [x25], #0x4\n"
- "ld1 { v25.s }[2], [x23], #0x4\n"
- "zip2 v19.16b, v23.16b, v19.16b\n"
- "zip2 v18.16b, v22.16b, v18.16b\n"
- "ld1 { v20.s }[2], [x22], #0x4\n"
- "ld1 { v17.s }[2], [x20], #0x4\n"
- "zip1 v23.16b, v21.16b, v20.16b\n"
- "zip1 v22.16b, v25.16b, v17.16b\n"
- "zip2 v21.16b, v21.16b, v20.16b\n"
- "zip2 v20.16b, v25.16b, v17.16b\n"
- "zip1 v17.16b, v24.16b, v16.16b\n"
- "zip2 v16.16b, v24.16b, v16.16b\n"
- "str q17, [x21, #0x0]\n"
- "zip1 v19.16b, v19.16b, v18.16b\n"
- "zip1 v18.16b, v23.16b, v22.16b\n"
- "str q16, [x21, #0x10]\n"
- "zip2 v17.16b, v23.16b, v22.16b\n"
- "zip1 v16.16b, v21.16b, v20.16b\n"
- "str q19, [x21, #0x20]\n"
- "str q18, [x21, #0x30]\n"
- "str q17, [x21, #0x40]\n"
- "str q16, [x21, #0x50]\n"
- "add x21, x21, %x[out_stride]\n"
+ "ld1 { v17.s }[2], [x25], #0x4\n"
+ "zip1 v23.16b, v19.16b, v17.16b\n"
+ "ld1 { v16.s }[2], [x24], #0x4\n"
+ "zip2 v20.16b, v19.16b, v17.16b\n"
+ "ld1 { v24.s }[2], [x23], #0x4\n"
+ "ldr d22, [x22], #0x8\n"
+ "zip1 v17.16b, v18.16b, v16.16b\n"
+ "ldr d19, [x21], #0x8\n"
+ "zip2 v16.16b, v18.16b, v16.16b\n"
+ "ld1 { v22.s }[2], [x22], #0x4\n"
+ "zip1 v18.16b, v23.16b, v17.16b\n"
+ "ldr d21, [x20], #0x8\n"
+ "zip2 v17.16b, v23.16b, v17.16b\n"
+ "ld1 { v19.s }[2], [x21], #0x4\n"
+ "zip1 v16.16b, v20.16b, v16.16b\n"
+ "ld1 { v21.s }[2], [x20], #0x4\n"
+ "zip1 v20.16b, v24.16b, v19.16b\n"
+ "str q18, [x27, #0x0]\n"
+ "zip2 v19.16b, v24.16b, v19.16b\n"
+ "str q17, [x27, #0x10]\n"
+ "str q16, [x27, #0x20]\n"
+ "zip1 v18.16b, v22.16b, v21.16b\n"
+ "zip2 v17.16b, v22.16b, v21.16b\n"
+ "zip1 v16.16b, v20.16b, v18.16b\n"
+ "str q16, [x27, #0x30]\n"
+ "zip2 v16.16b, v20.16b, v18.16b\n"
+ "str q16, [x27, #0x40]\n"
+ "zip1 v16.16b, v19.16b, v17.16b\n"
+ "str q16, [x27, #0x50]\n"
+ "add x27, x27, %x[out_stride]\n"
"bge 4b\n"
"5:" // Main row loop: Column loop skip
- "cmp x24, #0x4\n"
+ "cmp x19, #0x4\n"
"blt 7f\n"
"6:" // Main row loop: width 4 loop: loop
- "ldr s19, [x9], #0x4\n"
"ldr s18, [x28], #0x4\n"
- "sub x24, x24, #0x4\n"
- "cmp x24, #0x4\n"
- "ldr s17, [x27], #0x4\n"
- "ldr s16, [x26], #0x4\n"
- "zip1 v17.16b, v19.16b, v17.16b\n"
- "zip1 v16.16b, v18.16b, v16.16b\n"
- "ldr s20, [x25], #0x4\n"
- "ldr s19, [x23], #0x4\n"
- "zip1 v18.16b, v17.16b, v16.16b\n"
- "ldr s17, [x22], #0x4\n"
+ "sub x19, x19, #0x4\n"
+ "ldr s17, [x26], #0x4\n"
+ "cmp x19, #0x4\n"
+ "ldr s16, [x25], #0x4\n"
+ "zip1 v18.16b, v18.16b, v16.16b\n"
+ "ldr s16, [x24], #0x4\n"
+ "ldr s20, [x23], #0x4\n"
+ "zip1 v16.16b, v17.16b, v16.16b\n"
+ "ldr s19, [x22], #0x4\n"
+ "ldr s17, [x21], #0x4\n"
+ "zip1 v18.16b, v18.16b, v16.16b\n"
"ldr s16, [x20], #0x4\n"
"zip1 v17.16b, v20.16b, v17.16b\n"
+ "str q18, [x27, #0x0]\n"
"zip1 v16.16b, v19.16b, v16.16b\n"
- "str q18, [x21, #0x0]\n"
"zip1 v16.16b, v17.16b, v16.16b\n"
- "str q16, [x21, #0x30]\n"
- "add x21, x21, #0x10\n"
+ "str q16, [x27, #0x30]\n"
+ "add x27, x27, #0x10\n"
"bge 6b\n"
"7:" // Main row loop: width 4 loop: skip
- "cmp x24, #0x1\n"
+ "cmp x19, #0x1\n"
"blt 9f\n"
"8:" // Main row loop: width 1 loop: loop
- "ldr b19, [x9], #0x1\n"
"ldr b18, [x28], #0x1\n"
- "sub x24, x24, #0x1\n"
- "cmp x24, #0x1\n"
- "ldr b17, [x27], #0x1\n"
- "ldr b16, [x26], #0x1\n"
- "zip1 v17.16b, v19.16b, v17.16b\n"
- "zip1 v16.16b, v18.16b, v16.16b\n"
- "ldr b20, [x25], #0x1\n"
- "ldr b19, [x23], #0x1\n"
- "zip1 v18.16b, v17.16b, v16.16b\n"
- "ldr b17, [x22], #0x1\n"
+ "sub x19, x19, #0x1\n"
+ "ldr b17, [x26], #0x1\n"
+ "cmp x19, #0x1\n"
+ "ldr b16, [x25], #0x1\n"
+ "zip1 v18.16b, v18.16b, v16.16b\n"
+ "ldr b16, [x24], #0x1\n"
+ "ldr b20, [x23], #0x1\n"
+ "zip1 v16.16b, v17.16b, v16.16b\n"
+ "ldr b19, [x22], #0x1\n"
+ "ldr b17, [x21], #0x1\n"
+ "zip1 v18.16b, v18.16b, v16.16b\n"
"ldr b16, [x20], #0x1\n"
"zip1 v17.16b, v20.16b, v17.16b\n"
+ "str s18, [x27, #0x0]\n"
"zip1 v16.16b, v19.16b, v16.16b\n"
- "str s18, [x21, #0x0]\n"
"zip1 v16.16b, v17.16b, v16.16b\n"
- "str s16, [x21, #0x30]\n"
- "add x21, x21, #0x4\n"
+ "str s16, [x27, #0x30]\n"
+ "add x27, x27, #0x4\n"
"bge 8b\n"
"9:" // Main row loop: width 1 loop: skip
- "cmp %x[height], #0x8\n"
"add %x[out], %x[out], #0x60\n"
+ "cmp %x[height], #0x8\n"
"bge 1b\n"
"cbz %x[height], 20f\n"
"10:" // Main loop skip
+
"11:" // Tail row loop: Head
- "mov x9, %x[in]\n"
- "add x28, x9, %x[in_stride]\n"
- "add x27, x28, %x[in_stride]\n"
- "mov x20, %x[width]\n"
- "add x26, x27, %x[in_stride]\n"
+ "mov x28, %x[in]\n"
+ "mov x27, %x[out]\n"
+ "add x26, x28, %x[in_stride]\n"
+ "add x25, x26, %x[in_stride]\n"
+ "add x24, x25, %x[in_stride]\n"
+ "add %x[in], x24, %x[in_stride]\n"
"cmp %x[height], #0x3\n"
- "add %x[in], x26, %x[in_stride]\n"
- "csel x26, x26, %x[pad_row], GT\n"
- "csel x27, x27, %x[pad_row], GE\n"
+ "csel x24, x24, %x[pad_row], GT\n"
+ "csel x25, x25, %x[pad_row], GE\n"
"cmp %x[height], #0x1\n"
- "csel x28, x28, %x[pad_row], GT\n"
- "cmp x20, #0x30\n"
- "mov x21, %x[out]\n"
+ "csel x26, x26, %x[pad_row], GT\n"
"sub %x[height], %x[height], #0x4\n"
+ "mov x19, %x[width]\n"
+ "cmp x19, #0x30\n"
"blt 13f\n"
"12:" // Tail row loop: Unroll column loop
- "ldr q21, [x9], #0x10\n"
- "ldr q20, [x28], #0x10\n"
- "sub x20, x20, #0x30\n"
- "cmp x20, #0x30\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v31.16b, v21.16b, v17.16b\n"
- "zip1 v30.16b, v20.16b, v16.16b\n"
- "ldr q19, [x9], #0x10\n"
"ldr q18, [x28], #0x10\n"
- "zip2 v29.16b, v21.16b, v17.16b\n"
- "zip2 v28.16b, v20.16b, v16.16b\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v27.16b, v19.16b, v17.16b\n"
- "zip1 v26.16b, v18.16b, v16.16b\n"
- "ldr q22, [x9], #0x10\n"
- "ldr q21, [x28], #0x10\n"
- "zip2 v25.16b, v19.16b, v17.16b\n"
- "zip2 v20.16b, v18.16b, v16.16b\n"
- "ldr q19, [x27], #0x10\n"
- "ldr q18, [x26], #0x10\n"
- "zip1 v24.16b, v22.16b, v19.16b\n"
- "zip1 v23.16b, v21.16b, v18.16b\n"
- "zip1 v16.16b, v31.16b, v30.16b\n"
- "zip2 v17.16b, v31.16b, v30.16b\n"
- "str q16, [x21, #0x0]\n"
- "zip1 v16.16b, v29.16b, v28.16b\n"
- "str q17, [x21, #0x10]\n"
- "zip2 v22.16b, v22.16b, v19.16b\n"
- "str q16, [x21, #0x20]\n"
- "add x21, x21, %x[out_stride]\n"
- "zip2 v21.16b, v21.16b, v18.16b\n"
- "zip2 v18.16b, v29.16b, v28.16b\n"
- "zip1 v16.16b, v27.16b, v26.16b\n"
- "zip2 v17.16b, v27.16b, v26.16b\n"
- "str q18, [x21, #0x0]\n"
- "str q16, [x21, #0x10]\n"
- "zip1 v16.16b, v25.16b, v20.16b\n"
- "zip2 v20.16b, v25.16b, v20.16b\n"
- "str q17, [x21, #0x20]\n"
- "add x21, x21, %x[out_stride]\n"
- "zip1 v19.16b, v24.16b, v23.16b\n"
- "zip2 v18.16b, v24.16b, v23.16b\n"
- "str q16, [x21, #0x0]\n"
- "zip1 v17.16b, v22.16b, v21.16b\n"
- "zip2 v16.16b, v22.16b, v21.16b\n"
- "str q20, [x21, #0x10]\n"
- "str q19, [x21, #0x20]\n"
- "add x21, x21, %x[out_stride]\n"
- "str q18, [x21, #0x0]\n"
- "str q17, [x21, #0x10]\n"
- "str q16, [x21, #0x20]\n"
- "add x21, x21, %x[out_stride]\n"
+ "sub x19, x19, #0x30\n"
+ "ldr q19, [x26], #0x10\n"
+ "cmp x19, #0x30\n"
+ "ldr q16, [x25], #0x10\n"
+ "zip1 v28.16b, v18.16b, v16.16b\n"
+ "ldr q17, [x28], #0x10\n"
+ "zip2 v27.16b, v18.16b, v16.16b\n"
+ "ldr q26, [x26], #0x10\n"
+ "ldr q16, [x25], #0x10\n"
+ "zip1 v25.16b, v17.16b, v16.16b\n"
+ "ldr q18, [x28], #0x10\n"
+ "zip2 v24.16b, v17.16b, v16.16b\n"
+ "ldr q23, [x26], #0x10\n"
+ "ldr q16, [x25], #0x10\n"
+ "zip1 v22.16b, v18.16b, v16.16b\n"
+ "ldr q17, [x24], #0x10\n"
+ "zip2 v21.16b, v18.16b, v16.16b\n"
+ "ldr q20, [x24], #0x10\n"
+ "zip1 v16.16b, v19.16b, v17.16b\n"
+ "zip2 v18.16b, v19.16b, v17.16b\n"
+ "ldr q19, [x24], #0x10\n"
+ "zip1 v17.16b, v28.16b, v16.16b\n"
+ "zip2 v16.16b, v28.16b, v16.16b\n"
+ "str q17, [x27, #0x0]\n"
+ "zip1 v17.16b, v27.16b, v18.16b\n"
+ "str q16, [x27, #0x10]\n"
+ "zip2 v16.16b, v27.16b, v18.16b\n"
+ "str q17, [x27, #0x20]\n"
+ "add x27, x27, %x[out_stride]\n"
+ "zip1 v18.16b, v26.16b, v20.16b\n"
+ "str q16, [x27, #0x0]\n"
+ "zip2 v17.16b, v26.16b, v20.16b\n"
+ "zip1 v16.16b, v25.16b, v18.16b\n"
+ "str q16, [x27, #0x10]\n"
+ "zip2 v16.16b, v25.16b, v18.16b\n"
+ "str q16, [x27, #0x20]\n"
+ "add x27, x27, %x[out_stride]\n"
+ "zip1 v16.16b, v24.16b, v17.16b\n"
+ "str q16, [x27, #0x0]\n"
+ "zip2 v16.16b, v24.16b, v17.16b\n"
+ "zip1 v17.16b, v23.16b, v19.16b\n"
+ "str q16, [x27, #0x10]\n"
+ "zip1 v16.16b, v22.16b, v17.16b\n"
+ "str q16, [x27, #0x20]\n"
+ "add x27, x27, %x[out_stride]\n"
+ "zip2 v16.16b, v22.16b, v17.16b\n"
+ "str q16, [x27, #0x0]\n"
+ "zip2 v17.16b, v23.16b, v19.16b\n"
+ "zip1 v16.16b, v21.16b, v17.16b\n"
+ "str q16, [x27, #0x10]\n"
+ "zip2 v16.16b, v21.16b, v17.16b\n"
+ "str q16, [x27, #0x20]\n"
+ "add x27, x27, %x[out_stride]\n"
"bge 12b\n"
"13:" // Tail row loop: Unroll column loop skip
- "cmp x20, #0xc\n"
+ "cmp x19, #0xc\n"
"blt 15f\n"
"14:" // Tail row loop: Column loop
- "ldr d19, [x9], #0x8\n"
- "ldr d21, [x28], #0x8\n"
- "sub x20, x20, #0xc\n"
- "cmp x20, #0xc\n"
- "ldr d18, [x27], #0x8\n"
- "ldr d16, [x26], #0x8\n"
- "ld1 { v19.s }[2], [x9], #0x4\n"
- "ld1 { v21.s }[2], [x28], #0x4\n"
- "ld1 { v18.s }[2], [x27], #0x4\n"
- "ld1 { v16.s }[2], [x26], #0x4\n"
- "zip1 v20.16b, v19.16b, v18.16b\n"
- "zip1 v17.16b, v21.16b, v16.16b\n"
- "zip2 v19.16b, v19.16b, v18.16b\n"
- "zip2 v18.16b, v21.16b, v16.16b\n"
- "zip1 v16.16b, v20.16b, v17.16b\n"
- "zip2 v17.16b, v20.16b, v17.16b\n"
- "str q16, [x21, #0x0]\n"
- "zip1 v16.16b, v19.16b, v18.16b\n"
- "str q17, [x21, #0x10]\n"
- "str q16, [x21, #0x20]\n"
- "add x21, x21, %x[out_stride]\n"
+ "ldr d18, [x28], #0x8\n"
+ "sub x19, x19, #0xc\n"
+ "ldr d21, [x26], #0x8\n"
+ "cmp x19, #0xc\n"
+ "ldr d17, [x25], #0x8\n"
+ "ldr d16, [x24], #0x8\n"
+ "ld1 { v18.s }[2], [x28], #0x4\n"
+ "ld1 { v21.s }[2], [x26], #0x4\n"
+ "ld1 { v17.s }[2], [x25], #0x4\n"
+ "zip1 v20.16b, v18.16b, v17.16b\n"
+ "ld1 { v16.s }[2], [x24], #0x4\n"
+ "zip2 v19.16b, v18.16b, v17.16b\n"
+ "zip1 v18.16b, v21.16b, v16.16b\n"
+ "zip2 v17.16b, v21.16b, v16.16b\n"
+ "zip1 v16.16b, v20.16b, v18.16b\n"
+ "str q16, [x27, #0x0]\n"
+ "zip2 v16.16b, v20.16b, v18.16b\n"
+ "str q16, [x27, #0x10]\n"
+ "zip1 v16.16b, v19.16b, v17.16b\n"
+ "str q16, [x27, #0x20]\n"
+ "add x27, x27, %x[out_stride]\n"
"bge 14b\n"
"15:" // Tail row loop: Column loop skip
- "cmp x20, #0x4\n"
+ "cmp x19, #0x4\n"
"blt 17f\n"
"16:" // Tail row loop: width 4 loop: loop
- "ldr s19, [x9], #0x4\n"
- "ldr s18, [x28], #0x4\n"
- "sub x20, x20, #0x4\n"
- "cmp x20, #0x4\n"
- "ldr s17, [x27], #0x4\n"
- "ldr s16, [x26], #0x4\n"
- "zip1 v17.16b, v19.16b, v17.16b\n"
+ "ldr s17, [x28], #0x4\n"
+ "sub x19, x19, #0x4\n"
+ "ldr s18, [x26], #0x4\n"
+ "cmp x19, #0x4\n"
+ "ldr s16, [x25], #0x4\n"
+ "zip1 v17.16b, v17.16b, v16.16b\n"
+ "ldr s16, [x24], #0x4\n"
"zip1 v16.16b, v18.16b, v16.16b\n"
"zip1 v16.16b, v17.16b, v16.16b\n"
- "str q16, [x21, #0x0]\n"
- "add x21, x21, #0x10\n"
+ "str q16, [x27, #0x0]\n"
+ "add x27, x27, #0x10\n"
"bge 16b\n"
"17:" // Tail row loop: width 4 loop: skip
- "cmp x20, #0x1\n"
+ "cmp x19, #0x1\n"
"blt 19f\n"
"18:" // Tail row loop: width 1 loop: loop
- "ldr b19, [x9], #0x1\n"
- "ldr b18, [x28], #0x1\n"
- "sub x20, x20, #0x1\n"
- "cmp x20, #0x1\n"
- "ldr b17, [x27], #0x1\n"
- "ldr b16, [x26], #0x1\n"
- "zip1 v17.16b, v19.16b, v17.16b\n"
+ "ldr b17, [x28], #0x1\n"
+ "sub x19, x19, #0x1\n"
+ "ldr b18, [x26], #0x1\n"
+ "cmp x19, #0x1\n"
+ "ldr b16, [x25], #0x1\n"
+ "zip1 v17.16b, v17.16b, v16.16b\n"
+ "ldr b16, [x24], #0x1\n"
"zip1 v16.16b, v18.16b, v16.16b\n"
"zip1 v16.16b, v17.16b, v16.16b\n"
- "str s16, [x21, #0x0]\n"
- "add x21, x21, #0x4\n"
+ "str s16, [x27, #0x0]\n"
+ "add x27, x27, #0x4\n"
"bge 18b\n"
"19:" // Tail row loop: width 1 loop: skip
- "cmp %x[height], #0x1\n"
"add %x[out], %x[out], #0x30\n"
+ "cmp %x[height], #0x1\n"
"bge 11b\n"
"20:" // Done
+
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [pad_row] "r" (pad_row), [width] "r" (width)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_12_1x8.hpp b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_12_1x8.hpp
index 96d132b74f..1603be2ef8 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_12_1x8.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_12_1x8.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#pragma once
@@ -41,266 +41,266 @@ void a64_transpose_interleave_12_1x8(uint8_t *out, const uint8_t *in, size_t wid
__asm__ __volatile__(
"1:" // Main row loop: Head
- "mov x9, %x[in]\n"
- "add x28, x9, %x[in_stride]\n"
- "add x27, x28, %x[in_stride]\n"
- "add x26, x27, %x[in_stride]\n"
+ "mov x28, %x[in]\n"
+ "mov x27, %x[out]\n"
+ "add x26, x28, %x[in_stride]\n"
"add x25, x26, %x[in_stride]\n"
"add x24, x25, %x[in_stride]\n"
"add x23, x24, %x[in_stride]\n"
"add x22, x23, %x[in_stride]\n"
+ "add x21, x22, %x[in_stride]\n"
+ "add x20, x21, %x[in_stride]\n"
+ "add %x[in], x20, %x[in_stride]\n"
"cmp %x[height], #0x7\n"
- "add %x[in], x22, %x[in_stride]\n"
+ "csel x20, x20, %x[pad_row], GT\n"
+ "csel x21, x21, %x[pad_row], GE\n"
+ "cmp %x[height], #0x5\n"
"csel x22, x22, %x[pad_row], GT\n"
"csel x23, x23, %x[pad_row], GE\n"
- "cmp %x[height], #0x5\n"
- "mov x21, %x[width]\n"
+ "cmp %x[height], #0x3\n"
"csel x24, x24, %x[pad_row], GT\n"
"csel x25, x25, %x[pad_row], GE\n"
- "cmp %x[height], #0x3\n"
- "csel x26, x26, %x[pad_row], GT\n"
- "csel x27, x27, %x[pad_row], GE\n"
"cmp %x[height], #0x1\n"
- "csel x28, x28, %x[pad_row], GT\n"
- "cmp x21, #0x30\n"
- "mov x20, %x[out]\n"
+ "csel x26, x26, %x[pad_row], GT\n"
"sub %x[height], %x[height], #0x8\n"
+ "mov x19, %x[width]\n"
+ "cmp x19, #0x30\n"
"blt 3f\n"
"2:" // Main row loop: Unroll column loop
- "ldr q21, [x9], #0x10\n"
- "ldr q25, [x28], #0x10\n"
- "sub x21, x21, #0x30\n"
- "cmp x21, #0x30\n"
- "ldr q20, [x27], #0x10\n"
- "ldr q24, [x26], #0x10\n"
- "ldr q19, [x25], #0x10\n"
- "ldr q18, [x24], #0x10\n"
- "zip1 v7.16b, v21.16b, v19.16b\n"
- "zip1 v6.16b, v25.16b, v18.16b\n"
- "ldr q17, [x23], #0x10\n"
- "ldr q16, [x22], #0x10\n"
- "zip1 v28.16b, v20.16b, v17.16b\n"
- "zip1 v27.16b, v24.16b, v16.16b\n"
- "ldr q23, [x9], #0x10\n"
- "ldr q22, [x28], #0x10\n"
- "zip2 v5.16b, v21.16b, v19.16b\n"
- "zip2 v4.16b, v20.16b, v17.16b\n"
- "ldr q21, [x27], #0x10\n"
- "ldr q20, [x26], #0x10\n"
- "zip2 v3.16b, v25.16b, v18.16b\n"
- "zip2 v2.16b, v24.16b, v16.16b\n"
- "ldr q19, [x25], #0x10\n"
- "ldr q18, [x24], #0x10\n"
- "zip1 v1.16b, v23.16b, v19.16b\n"
- "zip1 v15.16b, v22.16b, v18.16b\n"
+ "ldr q18, [x28], #0x10\n"
+ "sub x19, x19, #0x30\n"
+ "ldr q19, [x26], #0x10\n"
+ "cmp x19, #0x30\n"
+ "ldr q11, [x25], #0x10\n"
+ "ldr q10, [x24], #0x10\n"
+ "ldr q16, [x23], #0x10\n"
+ "zip1 v22.16b, v18.16b, v16.16b\n"
+ "ldr q17, [x28], #0x10\n"
+ "zip2 v9.16b, v18.16b, v16.16b\n"
+ "ldr q8, [x26], #0x10\n"
+ "ldr q7, [x25], #0x10\n"
+ "ldr q6, [x24], #0x10\n"
+ "ldr q16, [x23], #0x10\n"
+ "zip1 v5.16b, v17.16b, v16.16b\n"
+ "ldr q18, [x28], #0x10\n"
+ "zip2 v4.16b, v17.16b, v16.16b\n"
+ "ldr q3, [x26], #0x10\n"
+ "ldr q2, [x25], #0x10\n"
+ "ldr q1, [x24], #0x10\n"
"ldr q17, [x23], #0x10\n"
+ "zip1 v0.16b, v18.16b, v17.16b\n"
"ldr q16, [x22], #0x10\n"
- "zip1 v0.16b, v21.16b, v17.16b\n"
- "zip1 v31.16b, v20.16b, v16.16b\n"
- "ldr q26, [x9], #0x10\n"
- "ldr q30, [x28], #0x10\n"
- "zip2 v14.16b, v23.16b, v19.16b\n"
- "zip2 v13.16b, v21.16b, v17.16b\n"
- "ldr q25, [x27], #0x10\n"
- "ldr q24, [x26], #0x10\n"
- "zip2 v12.16b, v22.16b, v18.16b\n"
- "zip2 v11.16b, v20.16b, v16.16b\n"
- "ldr q23, [x25], #0x10\n"
- "ldr q22, [x24], #0x10\n"
- "zip1 v10.16b, v26.16b, v23.16b\n"
- "zip1 v9.16b, v30.16b, v22.16b\n"
- "ldr q21, [x23], #0x10\n"
- "ldr q17, [x22], #0x10\n"
- "zip1 v29.16b, v25.16b, v21.16b\n"
- "zip1 v8.16b, v24.16b, v17.16b\n"
- "zip1 v19.16b, v7.16b, v28.16b\n"
- "zip1 v16.16b, v6.16b, v27.16b\n"
- "zip2 v28.16b, v7.16b, v28.16b\n"
- "zip2 v18.16b, v6.16b, v27.16b\n"
- "zip1 v27.16b, v5.16b, v4.16b\n"
- "zip1 v20.16b, v3.16b, v2.16b\n"
- "zip2 v7.16b, v26.16b, v23.16b\n"
- "zip2 v26.16b, v25.16b, v21.16b\n"
- "zip2 v6.16b, v30.16b, v22.16b\n"
- "zip2 v25.16b, v24.16b, v17.16b\n"
- "zip2 v5.16b, v5.16b, v4.16b\n"
- "zip2 v4.16b, v3.16b, v2.16b\n"
- "zip1 v3.16b, v1.16b, v0.16b\n"
- "zip1 v2.16b, v15.16b, v31.16b\n"
- "zip2 v1.16b, v1.16b, v0.16b\n"
- "zip2 v0.16b, v15.16b, v31.16b\n"
- "zip1 v31.16b, v14.16b, v13.16b\n"
- "zip1 v30.16b, v12.16b, v11.16b\n"
- "zip2 v24.16b, v14.16b, v13.16b\n"
- "zip2 v23.16b, v12.16b, v11.16b\n"
- "zip1 v22.16b, v10.16b, v29.16b\n"
- "zip1 v21.16b, v9.16b, v8.16b\n"
- "zip1 v17.16b, v19.16b, v16.16b\n"
- "zip2 v16.16b, v19.16b, v16.16b\n"
- "str q17, [x20, #0x0]\n"
- "zip1 v19.16b, v28.16b, v18.16b\n"
- "zip2 v18.16b, v28.16b, v18.16b\n"
- "str q16, [x20, #0x10]\n"
- "zip1 v17.16b, v27.16b, v20.16b\n"
- "zip2 v16.16b, v27.16b, v20.16b\n"
- "str q19, [x20, #0x20]\n"
- "str q18, [x20, #0x30]\n"
- "zip2 v29.16b, v10.16b, v29.16b\n"
- "zip2 v20.16b, v9.16b, v8.16b\n"
- "str q17, [x20, #0x40]\n"
- "zip1 v28.16b, v7.16b, v26.16b\n"
- "zip1 v27.16b, v6.16b, v25.16b\n"
- "str q16, [x20, #0x50]\n"
- "add x20, x20, %x[out_stride]\n"
- "zip2 v26.16b, v7.16b, v26.16b\n"
- "zip2 v25.16b, v6.16b, v25.16b\n"
- "zip1 v17.16b, v5.16b, v4.16b\n"
- "zip2 v16.16b, v5.16b, v4.16b\n"
- "str q17, [x20, #0x0]\n"
- "zip1 v18.16b, v3.16b, v2.16b\n"
- "zip2 v17.16b, v3.16b, v2.16b\n"
- "str q16, [x20, #0x10]\n"
- "zip1 v16.16b, v1.16b, v0.16b\n"
- "zip2 v19.16b, v1.16b, v0.16b\n"
- "str q18, [x20, #0x20]\n"
- "str q17, [x20, #0x30]\n"
- "zip1 v18.16b, v31.16b, v30.16b\n"
- "zip2 v17.16b, v31.16b, v30.16b\n"
- "str q16, [x20, #0x40]\n"
- "zip1 v16.16b, v24.16b, v23.16b\n"
- "zip2 v24.16b, v24.16b, v23.16b\n"
- "str q19, [x20, #0x50]\n"
- "add x20, x20, %x[out_stride]\n"
- "zip1 v23.16b, v22.16b, v21.16b\n"
- "zip2 v22.16b, v22.16b, v21.16b\n"
- "str q18, [x20, #0x0]\n"
- "zip1 v21.16b, v29.16b, v20.16b\n"
- "zip2 v20.16b, v29.16b, v20.16b\n"
- "str q17, [x20, #0x10]\n"
- "zip1 v19.16b, v28.16b, v27.16b\n"
- "zip2 v18.16b, v28.16b, v27.16b\n"
- "str q16, [x20, #0x20]\n"
- "zip1 v17.16b, v26.16b, v25.16b\n"
- "zip2 v16.16b, v26.16b, v25.16b\n"
- "str q24, [x20, #0x30]\n"
- "str q23, [x20, #0x40]\n"
- "str q22, [x20, #0x50]\n"
- "add x20, x20, %x[out_stride]\n"
- "str q21, [x20, #0x0]\n"
- "str q20, [x20, #0x10]\n"
- "str q19, [x20, #0x20]\n"
- "str q18, [x20, #0x30]\n"
- "str q17, [x20, #0x40]\n"
- "str q16, [x20, #0x50]\n"
- "add x20, x20, %x[out_stride]\n"
+ "zip2 v31.16b, v18.16b, v17.16b\n"
+ "ldr q30, [x21], #0x10\n"
+ "ldr q29, [x20], #0x10\n"
+ "zip1 v28.16b, v19.16b, v16.16b\n"
+ "ldr q27, [x22], #0x10\n"
+ "zip2 v21.16b, v19.16b, v16.16b\n"
+ "ldr q26, [x21], #0x10\n"
+ "zip1 v16.16b, v11.16b, v30.16b\n"
+ "ldr q25, [x20], #0x10\n"
+ "zip1 v20.16b, v22.16b, v16.16b\n"
+ "ldr q24, [x22], #0x10\n"
+ "zip1 v19.16b, v10.16b, v29.16b\n"
+ "zip2 v18.16b, v22.16b, v16.16b\n"
+ "ldr q23, [x21], #0x10\n"
+ "zip1 v17.16b, v28.16b, v19.16b\n"
+ "ldr q22, [x20], #0x10\n"
+ "zip1 v16.16b, v20.16b, v17.16b\n"
+ "str q16, [x27, #0x0]\n"
+ "zip2 v16.16b, v20.16b, v17.16b\n"
+ "str q16, [x27, #0x10]\n"
+ "zip2 v17.16b, v28.16b, v19.16b\n"
+ "zip1 v16.16b, v18.16b, v17.16b\n"
+ "str q16, [x27, #0x20]\n"
+ "zip2 v16.16b, v18.16b, v17.16b\n"
+ "str q16, [x27, #0x30]\n"
+ "zip2 v20.16b, v11.16b, v30.16b\n"
+ "zip1 v18.16b, v9.16b, v20.16b\n"
+ "zip2 v19.16b, v10.16b, v29.16b\n"
+ "zip1 v17.16b, v21.16b, v19.16b\n"
+ "zip1 v16.16b, v18.16b, v17.16b\n"
+ "str q16, [x27, #0x40]\n"
+ "zip2 v16.16b, v18.16b, v17.16b\n"
+ "str q16, [x27, #0x50]\n"
+ "add x27, x27, %x[out_stride]\n"
+ "zip2 v18.16b, v9.16b, v20.16b\n"
+ "zip2 v17.16b, v21.16b, v19.16b\n"
+ "zip1 v16.16b, v18.16b, v17.16b\n"
+ "str q16, [x27, #0x0]\n"
+ "zip2 v16.16b, v18.16b, v17.16b\n"
+ "str q16, [x27, #0x10]\n"
+ "zip1 v21.16b, v7.16b, v26.16b\n"
+ "zip1 v18.16b, v5.16b, v21.16b\n"
+ "zip1 v20.16b, v8.16b, v27.16b\n"
+ "zip1 v19.16b, v6.16b, v25.16b\n"
+ "zip1 v17.16b, v20.16b, v19.16b\n"
+ "zip1 v16.16b, v18.16b, v17.16b\n"
+ "str q16, [x27, #0x20]\n"
+ "zip2 v16.16b, v18.16b, v17.16b\n"
+ "str q16, [x27, #0x30]\n"
+ "zip2 v18.16b, v5.16b, v21.16b\n"
+ "zip2 v17.16b, v20.16b, v19.16b\n"
+ "zip1 v16.16b, v18.16b, v17.16b\n"
+ "str q16, [x27, #0x40]\n"
+ "zip2 v16.16b, v18.16b, v17.16b\n"
+ "str q16, [x27, #0x50]\n"
+ "add x27, x27, %x[out_stride]\n"
+ "zip2 v21.16b, v7.16b, v26.16b\n"
+ "zip2 v20.16b, v8.16b, v27.16b\n"
+ "zip1 v18.16b, v4.16b, v21.16b\n"
+ "zip2 v19.16b, v6.16b, v25.16b\n"
+ "zip1 v17.16b, v20.16b, v19.16b\n"
+ "zip1 v16.16b, v18.16b, v17.16b\n"
+ "str q16, [x27, #0x0]\n"
+ "zip2 v16.16b, v18.16b, v17.16b\n"
+ "str q16, [x27, #0x10]\n"
+ "zip2 v18.16b, v4.16b, v21.16b\n"
+ "zip2 v17.16b, v20.16b, v19.16b\n"
+ "zip1 v16.16b, v18.16b, v17.16b\n"
+ "str q16, [x27, #0x20]\n"
+ "zip2 v16.16b, v18.16b, v17.16b\n"
+ "str q16, [x27, #0x30]\n"
+ "zip1 v21.16b, v2.16b, v23.16b\n"
+ "zip1 v18.16b, v0.16b, v21.16b\n"
+ "zip1 v20.16b, v3.16b, v24.16b\n"
+ "zip1 v19.16b, v1.16b, v22.16b\n"
+ "zip1 v17.16b, v20.16b, v19.16b\n"
+ "zip1 v16.16b, v18.16b, v17.16b\n"
+ "str q16, [x27, #0x40]\n"
+ "zip2 v16.16b, v18.16b, v17.16b\n"
+ "str q16, [x27, #0x50]\n"
+ "add x27, x27, %x[out_stride]\n"
+ "zip2 v18.16b, v0.16b, v21.16b\n"
+ "zip2 v17.16b, v20.16b, v19.16b\n"
+ "zip1 v16.16b, v18.16b, v17.16b\n"
+ "str q16, [x27, #0x0]\n"
+ "zip2 v16.16b, v18.16b, v17.16b\n"
+ "str q16, [x27, #0x10]\n"
+ "zip2 v21.16b, v2.16b, v23.16b\n"
+ "zip1 v18.16b, v31.16b, v21.16b\n"
+ "zip2 v20.16b, v3.16b, v24.16b\n"
+ "zip2 v19.16b, v1.16b, v22.16b\n"
+ "zip1 v17.16b, v20.16b, v19.16b\n"
+ "zip1 v16.16b, v18.16b, v17.16b\n"
+ "str q16, [x27, #0x20]\n"
+ "zip2 v16.16b, v18.16b, v17.16b\n"
+ "str q16, [x27, #0x30]\n"
+ "zip2 v18.16b, v31.16b, v21.16b\n"
+ "zip2 v17.16b, v20.16b, v19.16b\n"
+ "zip1 v16.16b, v18.16b, v17.16b\n"
+ "str q16, [x27, #0x40]\n"
+ "zip2 v16.16b, v18.16b, v17.16b\n"
+ "str q16, [x27, #0x50]\n"
+ "add x27, x27, %x[out_stride]\n"
"bge 2b\n"
"3:" // Main row loop: Unroll column loop skip
- "cmp x21, #0xc\n"
+ "cmp x19, #0xc\n"
"blt 5f\n"
"4:" // Main row loop: Column loop
- "ldr d23, [x9], #0x8\n"
- "ldr d27, [x28], #0x8\n"
- "sub x21, x21, #0xc\n"
- "cmp x21, #0xc\n"
- "ldr d21, [x27], #0x8\n"
- "ldr d26, [x26], #0x8\n"
- "ldr d20, [x25], #0x8\n"
- "ldr d19, [x24], #0x8\n"
- "ldr d17, [x23], #0x8\n"
- "ldr d16, [x22], #0x8\n"
- "ld1 { v23.s }[2], [x9], #0x4\n"
- "ld1 { v27.s }[2], [x28], #0x4\n"
- "ld1 { v21.s }[2], [x27], #0x4\n"
- "ld1 { v26.s }[2], [x26], #0x4\n"
- "ld1 { v20.s }[2], [x25], #0x4\n"
- "ld1 { v19.s }[2], [x24], #0x4\n"
- "zip1 v25.16b, v23.16b, v20.16b\n"
- "zip1 v24.16b, v27.16b, v19.16b\n"
- "ld1 { v17.s }[2], [x23], #0x4\n"
- "ld1 { v16.s }[2], [x22], #0x4\n"
- "zip1 v22.16b, v21.16b, v17.16b\n"
- "zip1 v18.16b, v26.16b, v16.16b\n"
- "zip2 v23.16b, v23.16b, v20.16b\n"
- "zip2 v21.16b, v21.16b, v17.16b\n"
- "zip2 v20.16b, v27.16b, v19.16b\n"
- "zip2 v17.16b, v26.16b, v16.16b\n"
- "zip1 v19.16b, v25.16b, v22.16b\n"
- "zip1 v16.16b, v24.16b, v18.16b\n"
- "zip2 v22.16b, v25.16b, v22.16b\n"
- "zip2 v18.16b, v24.16b, v18.16b\n"
- "zip1 v21.16b, v23.16b, v21.16b\n"
- "zip1 v20.16b, v20.16b, v17.16b\n"
- "zip1 v17.16b, v19.16b, v16.16b\n"
- "zip2 v16.16b, v19.16b, v16.16b\n"
- "str q17, [x20, #0x0]\n"
- "zip1 v19.16b, v22.16b, v18.16b\n"
- "zip2 v18.16b, v22.16b, v18.16b\n"
- "str q16, [x20, #0x10]\n"
- "zip1 v17.16b, v21.16b, v20.16b\n"
- "zip2 v16.16b, v21.16b, v20.16b\n"
- "str q19, [x20, #0x20]\n"
- "str q18, [x20, #0x30]\n"
- "str q17, [x20, #0x40]\n"
- "str q16, [x20, #0x50]\n"
- "add x20, x20, %x[out_stride]\n"
+ "ldr d20, [x28], #0x8\n"
+ "sub x19, x19, #0xc\n"
+ "ldr d19, [x26], #0x8\n"
+ "cmp x19, #0xc\n"
+ "ldr d18, [x25], #0x8\n"
+ "ldr d27, [x24], #0x8\n"
+ "ldr d16, [x23], #0x8\n"
+ "ld1 { v20.s }[2], [x28], #0x4\n"
+ "ld1 { v19.s }[2], [x26], #0x4\n"
+ "ld1 { v18.s }[2], [x25], #0x4\n"
+ "ld1 { v27.s }[2], [x24], #0x4\n"
+ "ld1 { v16.s }[2], [x23], #0x4\n"
+ "zip1 v26.16b, v20.16b, v16.16b\n"
+ "ldr d17, [x22], #0x8\n"
+ "zip2 v25.16b, v20.16b, v16.16b\n"
+ "ldr d16, [x21], #0x8\n"
+ "ldr d24, [x20], #0x8\n"
+ "ld1 { v17.s }[2], [x22], #0x4\n"
+ "zip1 v23.16b, v19.16b, v17.16b\n"
+ "ld1 { v16.s }[2], [x21], #0x4\n"
+ "zip2 v22.16b, v19.16b, v17.16b\n"
+ "ld1 { v24.s }[2], [x20], #0x4\n"
+ "zip1 v21.16b, v18.16b, v16.16b\n"
+ "zip2 v20.16b, v18.16b, v16.16b\n"
+ "zip1 v18.16b, v26.16b, v21.16b\n"
+ "zip1 v19.16b, v27.16b, v24.16b\n"
+ "zip1 v17.16b, v23.16b, v19.16b\n"
+ "zip1 v16.16b, v18.16b, v17.16b\n"
+ "str q16, [x27, #0x0]\n"
+ "zip2 v16.16b, v18.16b, v17.16b\n"
+ "str q16, [x27, #0x10]\n"
+ "zip2 v18.16b, v26.16b, v21.16b\n"
+ "zip2 v17.16b, v23.16b, v19.16b\n"
+ "zip1 v16.16b, v18.16b, v17.16b\n"
+ "str q16, [x27, #0x20]\n"
+ "zip2 v16.16b, v18.16b, v17.16b\n"
+ "str q16, [x27, #0x30]\n"
+ "zip1 v18.16b, v25.16b, v20.16b\n"
+ "zip2 v16.16b, v27.16b, v24.16b\n"
+ "zip1 v17.16b, v22.16b, v16.16b\n"
+ "zip1 v16.16b, v18.16b, v17.16b\n"
+ "str q16, [x27, #0x40]\n"
+ "zip2 v16.16b, v18.16b, v17.16b\n"
+ "str q16, [x27, #0x50]\n"
+ "add x27, x27, %x[out_stride]\n"
"bge 4b\n"
"5:" // Main row loop: Column loop skip
- "cmp x21, #0x4\n"
+ "cmp x19, #0x4\n"
"blt 7f\n"
"6:" // Main row loop: width 4 loop: loop
- "ldr s18, [x9], #0x4\n"
- "ldr s19, [x28], #0x4\n"
- "sub x21, x21, #0x4\n"
- "cmp x21, #0x4\n"
- "ldr s21, [x27], #0x4\n"
- "ldr s20, [x26], #0x4\n"
- "ldr s17, [x25], #0x4\n"
- "ldr s16, [x24], #0x4\n"
- "zip1 v18.16b, v18.16b, v17.16b\n"
- "zip1 v19.16b, v19.16b, v16.16b\n"
- "ldr s17, [x23], #0x4\n"
- "ldr s16, [x22], #0x4\n"
+ "ldr s17, [x28], #0x4\n"
+ "sub x19, x19, #0x4\n"
+ "ldr s21, [x26], #0x4\n"
+ "cmp x19, #0x4\n"
+ "ldr s18, [x25], #0x4\n"
+ "ldr s20, [x24], #0x4\n"
+ "ldr s16, [x23], #0x4\n"
+ "zip1 v19.16b, v17.16b, v16.16b\n"
+ "ldr s17, [x22], #0x4\n"
+ "ldr s16, [x21], #0x4\n"
+ "zip1 v18.16b, v18.16b, v16.16b\n"
+ "ldr s16, [x20], #0x4\n"
"zip1 v17.16b, v21.16b, v17.16b\n"
+ "zip1 v18.16b, v19.16b, v18.16b\n"
"zip1 v16.16b, v20.16b, v16.16b\n"
- "zip1 v18.16b, v18.16b, v17.16b\n"
- "zip1 v16.16b, v19.16b, v16.16b\n"
- "zip1 v17.16b, v18.16b, v16.16b\n"
- "zip2 v16.16b, v18.16b, v16.16b\n"
- "str q17, [x20, #0x0]\n"
- "str q16, [x20, #0x10]\n"
- "add x20, x20, #0x20\n"
+ "zip1 v17.16b, v17.16b, v16.16b\n"
+ "zip1 v16.16b, v18.16b, v17.16b\n"
+ "str q16, [x27, #0x0]\n"
+ "zip2 v16.16b, v18.16b, v17.16b\n"
+ "str q16, [x27, #0x10]\n"
+ "add x27, x27, #0x20\n"
"bge 6b\n"
"7:" // Main row loop: width 4 loop: skip
- "cmp x21, #0x1\n"
+ "cmp x19, #0x1\n"
"blt 9f\n"
"8:" // Main row loop: width 1 loop: loop
- "ldr b19, [x9], #0x1\n"
"ldr b18, [x28], #0x1\n"
- "sub x21, x21, #0x1\n"
- "cmp x21, #0x1\n"
- "ldr b21, [x27], #0x1\n"
- "ldr b20, [x26], #0x1\n"
+ "sub x19, x19, #0x1\n"
+ "ldr b21, [x26], #0x1\n"
+ "cmp x19, #0x1\n"
"ldr b17, [x25], #0x1\n"
- "ldr b16, [x24], #0x1\n"
- "zip1 v19.16b, v19.16b, v17.16b\n"
- "zip1 v18.16b, v18.16b, v16.16b\n"
- "ldr b17, [x23], #0x1\n"
- "ldr b16, [x22], #0x1\n"
- "zip1 v17.16b, v21.16b, v17.16b\n"
- "zip1 v16.16b, v20.16b, v16.16b\n"
+ "ldr b20, [x24], #0x1\n"
+ "ldr b16, [x23], #0x1\n"
+ "zip1 v19.16b, v18.16b, v16.16b\n"
+ "ldr b18, [x22], #0x1\n"
+ "ldr b16, [x21], #0x1\n"
+ "zip1 v17.16b, v17.16b, v16.16b\n"
+ "ldr b16, [x20], #0x1\n"
+ "zip1 v18.16b, v21.16b, v18.16b\n"
"zip1 v17.16b, v19.16b, v17.16b\n"
+ "zip1 v16.16b, v20.16b, v16.16b\n"
"zip1 v16.16b, v18.16b, v16.16b\n"
"zip1 v16.16b, v17.16b, v16.16b\n"
- "str d16, [x20, #0x0]\n"
- "add x20, x20, #0x8\n"
+ "str d16, [x27, #0x0]\n"
+ "add x27, x27, #0x8\n"
"bge 8b\n"
"9:" // Main row loop: width 1 loop: skip
- "cmp %x[height], #0x1\n"
"add %x[out], %x[out], #0x60\n"
+ "cmp %x[height], #0x1\n"
"bge 1b\n"
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [pad_row] "r" (pad_row), [width] "r" (width)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_12_2x2.hpp b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_12_2x2.hpp
index 04af6fd713..78301353fd 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_12_2x2.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_12_2x2.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#pragma once
@@ -42,287 +42,287 @@ void a64_transpose_interleave_12_2x2(uint16_t *out, const uint16_t *in, size_t w
"cmp %x[height], #0x8\n"
"blt 10f\n"
"1:" // Main row loop: Head
- "mov x9, %x[in]\n"
- "add x28, x9, %x[in_stride]\n"
- "add x27, x28, %x[in_stride]\n"
- "add x26, x27, %x[in_stride]\n"
+ "mov x28, %x[in]\n"
+ "mov x27, %x[out]\n"
+ "add x26, x28, %x[in_stride]\n"
"add x25, x26, %x[in_stride]\n"
- "mov x24, %x[width]\n"
- "add x23, x25, %x[in_stride]\n"
+ "add x24, x25, %x[in_stride]\n"
+ "add x23, x24, %x[in_stride]\n"
"add x22, x23, %x[in_stride]\n"
- "add x20, x22, %x[in_stride]\n"
- "cmp x24, #0x18\n"
+ "add x21, x22, %x[in_stride]\n"
+ "add x20, x21, %x[in_stride]\n"
"add %x[in], x20, %x[in_stride]\n"
- "mov x21, %x[out]\n"
"sub %x[height], %x[height], #0x8\n"
+ "mov x19, %x[width]\n"
+ "cmp x19, #0x18\n"
"blt 3f\n"
"2:" // Main row loop: Unroll column loop
- "ldr q19, [x9], #0x10\n"
- "ldr q18, [x28], #0x10\n"
- "sub x24, x24, #0x18\n"
- "zip1 v10.8h, v19.8h, v18.8h\n"
- "ldr q17, [x27], #0x10\n"
+ "ldr q17, [x28], #0x10\n"
+ "sub x19, x19, #0x18\n"
"ldr q16, [x26], #0x10\n"
- "zip2 v9.8h, v19.8h, v18.8h\n"
- "zip1 v8.8h, v17.8h, v16.8h\n"
- "ldr q19, [x25], #0x10\n"
- "ldr q18, [x23], #0x10\n"
- "zip2 v7.8h, v17.8h, v16.8h\n"
- "zip1 v6.8h, v19.8h, v18.8h\n"
- "ldr q17, [x22], #0x10\n"
- "ldr q16, [x20], #0x10\n"
- "zip2 v5.8h, v19.8h, v18.8h\n"
- "zip1 v4.8h, v17.8h, v16.8h\n"
- "ldr q21, [x9], #0x10\n"
- "ldr q18, [x28], #0x10\n"
- "zip1 v3.8h, v21.8h, v18.8h\n"
- "zip2 v2.8h, v17.8h, v16.8h\n"
- "ldr q17, [x27], #0x10\n"
+ "zip1 v9.8h, v17.8h, v16.8h\n"
+ "ldr q19, [x28], #0x10\n"
+ "cmp x19, #0x18\n"
+ "zip2 v8.8h, v17.8h, v16.8h\n"
+ "ldr q16, [x26], #0x10\n"
+ "ldr q18, [x25], #0x10\n"
+ "zip1 v7.8h, v19.8h, v16.8h\n"
+ "ldr q17, [x28], #0x10\n"
+ "zip2 v6.8h, v19.8h, v16.8h\n"
"ldr q16, [x26], #0x10\n"
- "zip1 v1.8h, v17.8h, v16.8h\n"
- "cmp x24, #0x18\n"
"ldr q20, [x25], #0x10\n"
+ "zip1 v5.8h, v17.8h, v16.8h\n"
+ "ldr q22, [x25], #0x10\n"
+ "zip2 v4.8h, v17.8h, v16.8h\n"
+ "ldr q16, [x24], #0x10\n"
"ldr q19, [x23], #0x10\n"
- "zip1 v0.8h, v20.8h, v19.8h\n"
- "zip2 v31.8h, v21.8h, v18.8h\n"
- "ldr q30, [x22], #0x10\n"
- "ldr q29, [x20], #0x10\n"
- "zip1 v28.8h, v30.8h, v29.8h\n"
- "zip2 v27.8h, v17.8h, v16.8h\n"
- "ldr q17, [x9], #0x10\n"
- "ldr q16, [x28], #0x10\n"
- "zip1 v26.8h, v17.8h, v16.8h\n"
- "zip2 v25.8h, v17.8h, v16.8h\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v24.8h, v17.8h, v16.8h\n"
- "zip2 v23.8h, v17.8h, v16.8h\n"
- "ldr q18, [x25], #0x10\n"
- "ldr q17, [x23], #0x10\n"
- "zip2 v22.8h, v20.8h, v19.8h\n"
- "zip1 v21.8h, v18.8h, v17.8h\n"
- "ldr q20, [x22], #0x10\n"
+ "zip1 v3.8h, v18.8h, v16.8h\n"
+ "ldr q17, [x24], #0x10\n"
+ "zip2 v2.8h, v18.8h, v16.8h\n"
+ "ldr q21, [x23], #0x10\n"
+ "ldr q18, [x22], #0x10\n"
+ "zip1 v1.8h, v20.8h, v17.8h\n"
+ "ldr q16, [x24], #0x10\n"
+ "zip2 v0.8h, v20.8h, v17.8h\n"
+ "ldr q31, [x23], #0x10\n"
+ "zip1 v30.8h, v19.8h, v18.8h\n"
+ "ldr q17, [x22], #0x10\n"
+ "zip2 v29.8h, v19.8h, v18.8h\n"
+ "ldr q20, [x21], #0x10\n"
+ "ldr q19, [x20], #0x10\n"
+ "zip1 v28.8h, v22.8h, v16.8h\n"
+ "zip2 v27.8h, v22.8h, v16.8h\n"
+ "ldr q16, [x22], #0x10\n"
+ "zip1 v26.8h, v21.8h, v17.8h\n"
+ "zip2 v25.8h, v21.8h, v17.8h\n"
+ "ldr q18, [x21], #0x10\n"
+ "zip1 v24.8h, v20.8h, v19.8h\n"
+ "ldr q17, [x20], #0x10\n"
+ "zip2 v23.8h, v20.8h, v19.8h\n"
+ "ldr q22, [x21], #0x10\n"
+ "zip1 v21.8h, v31.8h, v16.8h\n"
+ "zip2 v20.8h, v31.8h, v16.8h\n"
"ldr q16, [x20], #0x10\n"
- "str q10, [x21, #0x0]\n"
- "zip2 v19.8h, v18.8h, v17.8h\n"
- "str q9, [x21, #0x10]\n"
- "zip2 v18.8h, v30.8h, v29.8h\n"
- "zip1 v17.8h, v20.8h, v16.8h\n"
- "str q3, [x21, #0x20]\n"
- "zip2 v16.8h, v20.8h, v16.8h\n"
- "str q8, [x21, #0x30]\n"
- "str q7, [x21, #0x40]\n"
- "str q1, [x21, #0x50]\n"
- "str q6, [x21, #0x60]\n"
- "str q5, [x21, #0x70]\n"
- "str q0, [x21, #0x80]\n"
- "str q4, [x21, #0x90]\n"
- "str q2, [x21, #0xa0]\n"
- "str q28, [x21, #0xb0]\n"
- "add x21, x21, %x[out_stride]\n"
- "str q31, [x21, #0x0]\n"
- "str q26, [x21, #0x10]\n"
- "str q25, [x21, #0x20]\n"
- "str q27, [x21, #0x30]\n"
- "str q24, [x21, #0x40]\n"
- "str q23, [x21, #0x50]\n"
- "str q22, [x21, #0x60]\n"
- "str q21, [x21, #0x70]\n"
- "str q19, [x21, #0x80]\n"
- "str q18, [x21, #0x90]\n"
- "str q17, [x21, #0xa0]\n"
- "str q16, [x21, #0xb0]\n"
- "add x21, x21, %x[out_stride]\n"
+ "zip1 v19.8h, v18.8h, v17.8h\n"
+ "str q9, [x27, #0x0]\n"
+ "zip2 v18.8h, v18.8h, v17.8h\n"
+ "str q8, [x27, #0x10]\n"
+ "str q7, [x27, #0x20]\n"
+ "zip1 v17.8h, v22.8h, v16.8h\n"
+ "str q3, [x27, #0x30]\n"
+ "zip2 v16.8h, v22.8h, v16.8h\n"
+ "str q2, [x27, #0x40]\n"
+ "str q1, [x27, #0x50]\n"
+ "str q30, [x27, #0x60]\n"
+ "str q29, [x27, #0x70]\n"
+ "str q26, [x27, #0x80]\n"
+ "str q24, [x27, #0x90]\n"
+ "str q23, [x27, #0xa0]\n"
+ "str q19, [x27, #0xb0]\n"
+ "add x27, x27, %x[out_stride]\n"
+ "str q6, [x27, #0x0]\n"
+ "str q5, [x27, #0x10]\n"
+ "str q4, [x27, #0x20]\n"
+ "str q0, [x27, #0x30]\n"
+ "str q28, [x27, #0x40]\n"
+ "str q27, [x27, #0x50]\n"
+ "str q25, [x27, #0x60]\n"
+ "str q21, [x27, #0x70]\n"
+ "str q20, [x27, #0x80]\n"
+ "str q18, [x27, #0x90]\n"
+ "str q17, [x27, #0xa0]\n"
+ "str q16, [x27, #0xb0]\n"
+ "add x27, x27, %x[out_stride]\n"
"bge 2b\n"
"3:" // Main row loop: Unroll column loop skip
- "cmp x24, #0xc\n"
+ "cmp x19, #0xc\n"
"blt 5f\n"
"4:" // Main row loop: Column loop
- "ldr q17, [x9], #0x10\n"
- "ldr q16, [x28], #0x10\n"
- "sub x24, x24, #0xc\n"
- "cmp x24, #0xc\n"
- "ldr q19, [x27], #0x10\n"
- "ldr q18, [x26], #0x10\n"
- "zip1 v28.8h, v17.8h, v16.8h\n"
- "zip2 v27.8h, v17.8h, v16.8h\n"
- "ldr q17, [x25], #0x10\n"
- "ldr q16, [x23], #0x10\n"
- "zip1 v26.8h, v19.8h, v18.8h\n"
- "zip2 v25.8h, v19.8h, v18.8h\n"
- "ldr q19, [x22], #0x10\n"
- "ldr q18, [x20], #0x10\n"
- "zip1 v24.8h, v17.8h, v16.8h\n"
- "zip2 v23.8h, v17.8h, v16.8h\n"
- "ldr d17, [x9], #0x8\n"
- "ldr d16, [x28], #0x8\n"
- "zip1 v22.8h, v17.8h, v16.8h\n"
- "zip1 v21.8h, v19.8h, v18.8h\n"
- "ldr d17, [x27], #0x8\n"
+ "ldr q18, [x28], #0x10\n"
+ "sub x19, x19, #0xc\n"
+ "ldr q16, [x26], #0x10\n"
+ "zip1 v29.8h, v18.8h, v16.8h\n"
+ "ldr d17, [x28], #0x8\n"
+ "cmp x19, #0xc\n"
+ "zip2 v28.8h, v18.8h, v16.8h\n"
"ldr d16, [x26], #0x8\n"
- "zip1 v20.8h, v17.8h, v16.8h\n"
- "zip2 v19.8h, v19.8h, v18.8h\n"
- "ldr d17, [x25], #0x8\n"
- "ldr d16, [x23], #0x8\n"
- "zip1 v18.8h, v17.8h, v16.8h\n"
+ "ldr q19, [x25], #0x10\n"
+ "zip1 v27.8h, v17.8h, v16.8h\n"
+ "ldr d18, [x25], #0x8\n"
+ "ldr q17, [x24], #0x10\n"
+ "zip1 v26.8h, v19.8h, v17.8h\n"
+ "ldr d16, [x24], #0x8\n"
+ "zip2 v25.8h, v19.8h, v17.8h\n"
+ "ldr q19, [x23], #0x10\n"
+ "ldr q17, [x22], #0x10\n"
+ "zip1 v24.8h, v18.8h, v16.8h\n"
+ "ldr q18, [x21], #0x10\n"
+ "ldr q16, [x20], #0x10\n"
+ "zip1 v23.8h, v19.8h, v17.8h\n"
+ "zip2 v22.8h, v19.8h, v17.8h\n"
+ "ldr d21, [x23], #0x8\n"
"ldr d17, [x22], #0x8\n"
+ "zip1 v20.8h, v18.8h, v16.8h\n"
+ "ldr d19, [x21], #0x8\n"
+ "zip2 v18.8h, v18.8h, v16.8h\n"
"ldr d16, [x20], #0x8\n"
- "zip1 v16.8h, v17.8h, v16.8h\n"
- "str q28, [x21, #0x0]\n"
- "str q27, [x21, #0x10]\n"
- "str q22, [x21, #0x20]\n"
- "str q26, [x21, #0x30]\n"
- "str q25, [x21, #0x40]\n"
- "str q20, [x21, #0x50]\n"
- "str q24, [x21, #0x60]\n"
- "str q23, [x21, #0x70]\n"
- "str q18, [x21, #0x80]\n"
- "str q21, [x21, #0x90]\n"
- "str q19, [x21, #0xa0]\n"
- "str q16, [x21, #0xb0]\n"
- "add x21, x21, %x[out_stride]\n"
+ "str q29, [x27, #0x0]\n"
+ "zip1 v17.8h, v21.8h, v17.8h\n"
+ "str q28, [x27, #0x10]\n"
+ "zip1 v16.8h, v19.8h, v16.8h\n"
+ "str q27, [x27, #0x20]\n"
+ "str q26, [x27, #0x30]\n"
+ "str q25, [x27, #0x40]\n"
+ "str q24, [x27, #0x50]\n"
+ "str q23, [x27, #0x60]\n"
+ "str q22, [x27, #0x70]\n"
+ "str q17, [x27, #0x80]\n"
+ "str q20, [x27, #0x90]\n"
+ "str q18, [x27, #0xa0]\n"
+ "str q16, [x27, #0xb0]\n"
+ "add x27, x27, %x[out_stride]\n"
"bge 4b\n"
"5:" // Main row loop: Column loop skip
- "cmp x24, #0x4\n"
+ "cmp x19, #0x4\n"
"blt 7f\n"
"6:" // Main row loop: width 4 loop: loop
- "ldr d19, [x9], #0x8\n"
- "ldr d18, [x28], #0x8\n"
- "sub x24, x24, #0x4\n"
- "cmp x24, #0x4\n"
- "ldr d17, [x27], #0x8\n"
+ "ldr d17, [x28], #0x8\n"
+ "sub x19, x19, #0x4\n"
"ldr d16, [x26], #0x8\n"
- "zip1 v20.8h, v19.8h, v18.8h\n"
- "zip1 v19.8h, v17.8h, v16.8h\n"
+ "zip1 v20.8h, v17.8h, v16.8h\n"
"ldr d17, [x25], #0x8\n"
- "ldr d16, [x23], #0x8\n"
+ "cmp x19, #0x4\n"
+ "ldr d16, [x24], #0x8\n"
+ "zip1 v19.8h, v17.8h, v16.8h\n"
+ "ldr d17, [x23], #0x8\n"
+ "ldr d16, [x22], #0x8\n"
"zip1 v18.8h, v17.8h, v16.8h\n"
- "ldr d17, [x22], #0x8\n"
+ "ldr d17, [x21], #0x8\n"
"ldr d16, [x20], #0x8\n"
- "str q20, [x21, #0x0]\n"
"zip1 v16.8h, v17.8h, v16.8h\n"
- "str q19, [x21, #0x30]\n"
- "str q18, [x21, #0x60]\n"
- "str q16, [x21, #0x90]\n"
- "add x21, x21, #0x10\n"
+ "str q20, [x27, #0x0]\n"
+ "str q19, [x27, #0x30]\n"
+ "str q18, [x27, #0x60]\n"
+ "str q16, [x27, #0x90]\n"
+ "add x27, x27, #0x10\n"
"bge 6b\n"
"7:" // Main row loop: width 4 loop: skip
- "cmp x24, #0x1\n"
+ "cmp x19, #0x1\n"
"blt 9f\n"
"8:" // Main row loop: width 1 loop: loop
- "ldr h19, [x9], #0x2\n"
- "ldr h18, [x28], #0x2\n"
- "sub x24, x24, #0x1\n"
- "cmp x24, #0x1\n"
- "ldr h17, [x27], #0x2\n"
+ "ldr h17, [x28], #0x2\n"
+ "sub x19, x19, #0x1\n"
"ldr h16, [x26], #0x2\n"
- "zip1 v20.8h, v19.8h, v18.8h\n"
- "zip1 v19.8h, v17.8h, v16.8h\n"
+ "zip1 v20.8h, v17.8h, v16.8h\n"
"ldr h17, [x25], #0x2\n"
- "ldr h16, [x23], #0x2\n"
+ "cmp x19, #0x1\n"
+ "ldr h16, [x24], #0x2\n"
+ "zip1 v19.8h, v17.8h, v16.8h\n"
+ "ldr h17, [x23], #0x2\n"
+ "ldr h16, [x22], #0x2\n"
"zip1 v18.8h, v17.8h, v16.8h\n"
- "ldr h17, [x22], #0x2\n"
+ "ldr h17, [x21], #0x2\n"
"ldr h16, [x20], #0x2\n"
- "str s20, [x21, #0x0]\n"
"zip1 v16.8h, v17.8h, v16.8h\n"
- "str s19, [x21, #0x30]\n"
- "str s18, [x21, #0x60]\n"
- "str s16, [x21, #0x90]\n"
- "add x21, x21, #0x4\n"
+ "str s20, [x27, #0x0]\n"
+ "str s19, [x27, #0x30]\n"
+ "str s18, [x27, #0x60]\n"
+ "str s16, [x27, #0x90]\n"
+ "add x27, x27, #0x4\n"
"bge 8b\n"
"9:" // Main row loop: width 1 loop: skip
- "cmp %x[height], #0x8\n"
"add %x[out], %x[out], #0xc0\n"
+ "cmp %x[height], #0x8\n"
"bge 1b\n"
"cbz %x[height], 20f\n"
"10:" // Main loop skip
"11:" // Tail row loop: Head
- "mov x9, %x[in]\n"
- "mov x20, %x[width]\n"
- "add x28, x9, %x[in_stride]\n"
+ "mov x28, %x[in]\n"
+ "mov x27, %x[out]\n"
+ "add x26, x28, %x[in_stride]\n"
+ "add %x[in], x26, %x[in_stride]\n"
"cmp %x[height], #0x1\n"
- "add %x[in], x28, %x[in_stride]\n"
- "csel x28, x28, %x[pad_row], GT\n"
- "cmp x20, #0x18\n"
- "mov x21, %x[out]\n"
+ "csel x26, x26, %x[pad_row], GT\n"
"sub %x[height], %x[height], #0x2\n"
+ "mov x19, %x[width]\n"
+ "cmp x19, #0x18\n"
"blt 13f\n"
"12:" // Tail row loop: Unroll column loop
- "ldr q17, [x9], #0x10\n"
- "ldr q16, [x28], #0x10\n"
- "sub x20, x20, #0x18\n"
+ "ldr q17, [x28], #0x10\n"
+ "sub x19, x19, #0x18\n"
+ "ldr q16, [x26], #0x10\n"
"zip1 v22.8h, v17.8h, v16.8h\n"
- "ldr q21, [x9], #0x10\n"
"ldr q18, [x28], #0x10\n"
- "zip2 v17.8h, v17.8h, v16.8h\n"
- "zip1 v20.8h, v21.8h, v18.8h\n"
- "ldr q19, [x9], #0x10\n"
- "ldr q16, [x28], #0x10\n"
- "str q22, [x21, #0x0]\n"
- "cmp x20, #0x18\n"
- "str q17, [x21, #0x10]\n"
- "zip2 v18.8h, v21.8h, v18.8h\n"
- "zip1 v17.8h, v19.8h, v16.8h\n"
- "str q20, [x21, #0x20]\n"
- "add x21, x21, %x[out_stride]\n"
- "zip2 v16.8h, v19.8h, v16.8h\n"
- "str q18, [x21, #0x0]\n"
- "str q17, [x21, #0x10]\n"
- "str q16, [x21, #0x20]\n"
- "add x21, x21, %x[out_stride]\n"
+ "cmp x19, #0x18\n"
+ "zip2 v21.8h, v17.8h, v16.8h\n"
+ "ldr q17, [x26], #0x10\n"
+ "ldr q20, [x28], #0x10\n"
+ "zip1 v19.8h, v18.8h, v17.8h\n"
+ "ldr q16, [x26], #0x10\n"
+ "zip2 v18.8h, v18.8h, v17.8h\n"
+ "str q22, [x27, #0x0]\n"
+ "str q21, [x27, #0x10]\n"
+ "zip1 v17.8h, v20.8h, v16.8h\n"
+ "str q19, [x27, #0x20]\n"
+ "add x27, x27, %x[out_stride]\n"
+ "zip2 v16.8h, v20.8h, v16.8h\n"
+ "str q18, [x27, #0x0]\n"
+ "str q17, [x27, #0x10]\n"
+ "str q16, [x27, #0x20]\n"
+ "add x27, x27, %x[out_stride]\n"
"bge 12b\n"
"13:" // Tail row loop: Unroll column loop skip
- "cmp x20, #0xc\n"
+ "cmp x19, #0xc\n"
"blt 15f\n"
"14:" // Tail row loop: Column loop
- "ldr q20, [x9], #0x10\n"
"ldr q17, [x28], #0x10\n"
- "sub x20, x20, #0xc\n"
- "cmp x20, #0xc\n"
- "ldr d19, [x9], #0x8\n"
- "ldr d16, [x28], #0x8\n"
- "zip1 v18.8h, v20.8h, v17.8h\n"
- "zip2 v17.8h, v20.8h, v17.8h\n"
- "zip1 v16.8h, v19.8h, v16.8h\n"
- "str q18, [x21, #0x0]\n"
- "str q17, [x21, #0x10]\n"
- "str q16, [x21, #0x20]\n"
- "add x21, x21, %x[out_stride]\n"
+ "sub x19, x19, #0xc\n"
+ "ldr q16, [x26], #0x10\n"
+ "zip1 v19.8h, v17.8h, v16.8h\n"
+ "ldr d18, [x28], #0x8\n"
+ "cmp x19, #0xc\n"
+ "zip2 v17.8h, v17.8h, v16.8h\n"
+ "ldr d16, [x26], #0x8\n"
+ "str q19, [x27, #0x0]\n"
+ "zip1 v16.8h, v18.8h, v16.8h\n"
+ "str q17, [x27, #0x10]\n"
+ "str q16, [x27, #0x20]\n"
+ "add x27, x27, %x[out_stride]\n"
"bge 14b\n"
"15:" // Tail row loop: Column loop skip
- "cmp x20, #0x4\n"
+ "cmp x19, #0x4\n"
"blt 17f\n"
"16:" // Tail row loop: width 4 loop: loop
- "ldr d17, [x9], #0x8\n"
- "ldr d16, [x28], #0x8\n"
- "sub x20, x20, #0x4\n"
- "cmp x20, #0x4\n"
+ "ldr d17, [x28], #0x8\n"
+ "sub x19, x19, #0x4\n"
+ "ldr d16, [x26], #0x8\n"
"zip1 v16.8h, v17.8h, v16.8h\n"
- "str q16, [x21, #0x0]\n"
- "add x21, x21, #0x10\n"
+ "str q16, [x27, #0x0]\n"
+ "add x27, x27, #0x10\n"
+ "cmp x19, #0x4\n"
"bge 16b\n"
"17:" // Tail row loop: width 4 loop: skip
- "cmp x20, #0x1\n"
+ "cmp x19, #0x1\n"
"blt 19f\n"
"18:" // Tail row loop: width 1 loop: loop
- "ldr h17, [x9], #0x2\n"
- "ldr h16, [x28], #0x2\n"
- "sub x20, x20, #0x1\n"
- "cmp x20, #0x1\n"
+ "ldr h17, [x28], #0x2\n"
+ "sub x19, x19, #0x1\n"
+ "ldr h16, [x26], #0x2\n"
"zip1 v16.8h, v17.8h, v16.8h\n"
- "str s16, [x21, #0x0]\n"
- "add x21, x21, #0x4\n"
+ "str s16, [x27, #0x0]\n"
+ "add x27, x27, #0x4\n"
+ "cmp x19, #0x1\n"
"bge 18b\n"
"19:" // Tail row loop: width 1 loop: skip
- "cmp %x[height], #0x1\n"
"add %x[out], %x[out], #0x30\n"
+ "cmp %x[height], #0x1\n"
"bge 11b\n"
"20:" // Done
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [pad_row] "r" (pad_row), [width] "r" (width)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_12_2x4.hpp b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_12_2x4.hpp
index e6ddc10e04..7e8ca6648d 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_12_2x4.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_12_2x4.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#pragma once
@@ -42,388 +42,388 @@ void a64_transpose_interleave_12_2x4(uint16_t *out, const uint16_t *in, size_t w
"cmp %x[height], #0x8\n"
"blt 10f\n"
"1:" // Main row loop: Head
- "mov x9, %x[in]\n"
- "add x28, x9, %x[in_stride]\n"
- "add x27, x28, %x[in_stride]\n"
- "add x26, x27, %x[in_stride]\n"
+ "mov x28, %x[in]\n"
+ "mov x27, %x[out]\n"
+ "add x26, x28, %x[in_stride]\n"
"add x25, x26, %x[in_stride]\n"
- "mov x24, %x[width]\n"
- "add x23, x25, %x[in_stride]\n"
+ "add x24, x25, %x[in_stride]\n"
+ "add x23, x24, %x[in_stride]\n"
"add x22, x23, %x[in_stride]\n"
- "add x20, x22, %x[in_stride]\n"
- "cmp x24, #0x18\n"
+ "add x21, x22, %x[in_stride]\n"
+ "add x20, x21, %x[in_stride]\n"
"add %x[in], x20, %x[in_stride]\n"
- "mov x21, %x[out]\n"
"sub %x[height], %x[height], #0x8\n"
+ "mov x19, %x[width]\n"
+ "cmp x19, #0x18\n"
"blt 3f\n"
"2:" // Main row loop: Unroll column loop
- "ldr q21, [x9], #0x10\n"
- "ldr q20, [x28], #0x10\n"
- "sub x24, x24, #0x18\n"
- "cmp x24, #0x18\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v13.8h, v21.8h, v17.8h\n"
- "zip1 v12.8h, v20.8h, v16.8h\n"
- "ldr q19, [x25], #0x10\n"
- "ldr q18, [x23], #0x10\n"
- "zip2 v11.8h, v21.8h, v17.8h\n"
- "zip2 v10.8h, v20.8h, v16.8h\n"
- "ldr q17, [x22], #0x10\n"
- "ldr q16, [x20], #0x10\n"
- "zip1 v9.8h, v19.8h, v17.8h\n"
- "zip1 v8.8h, v18.8h, v16.8h\n"
- "ldr q21, [x9], #0x10\n"
- "ldr q20, [x28], #0x10\n"
- "zip2 v7.8h, v19.8h, v17.8h\n"
- "zip2 v6.8h, v18.8h, v16.8h\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v27.8h, v21.8h, v17.8h\n"
- "zip1 v22.8h, v20.8h, v16.8h\n"
- "ldr q19, [x25], #0x10\n"
- "ldr q18, [x23], #0x10\n"
- "zip2 v5.8h, v21.8h, v17.8h\n"
- "zip2 v4.8h, v20.8h, v16.8h\n"
- "ldr q17, [x22], #0x10\n"
- "ldr q16, [x20], #0x10\n"
- "zip1 v26.8h, v19.8h, v17.8h\n"
- "zip1 v25.8h, v18.8h, v16.8h\n"
- "ldr q21, [x9], #0x10\n"
- "ldr q20, [x28], #0x10\n"
- "zip2 v3.8h, v19.8h, v17.8h\n"
- "zip2 v2.8h, v18.8h, v16.8h\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v24.8h, v21.8h, v17.8h\n"
- "zip1 v23.8h, v20.8h, v16.8h\n"
- "ldr q19, [x25], #0x10\n"
- "ldr q18, [x23], #0x10\n"
- "zip2 v1.8h, v21.8h, v17.8h\n"
- "zip2 v0.8h, v20.8h, v16.8h\n"
- "ldr q17, [x22], #0x10\n"
- "ldr q16, [x20], #0x10\n"
- "zip1 v31.8h, v19.8h, v17.8h\n"
- "zip1 v30.8h, v18.8h, v16.8h\n"
- "zip2 v29.8h, v19.8h, v17.8h\n"
- "zip2 v28.8h, v18.8h, v16.8h\n"
- "zip1 v17.8h, v13.8h, v12.8h\n"
- "zip2 v16.8h, v13.8h, v12.8h\n"
- "str q17, [x21, #0x0]\n"
- "zip1 v18.8h, v11.8h, v10.8h\n"
- "zip2 v17.8h, v11.8h, v10.8h\n"
- "str q16, [x21, #0x10]\n"
- "zip1 v16.8h, v27.8h, v22.8h\n"
- "zip2 v22.8h, v27.8h, v22.8h\n"
- "str q18, [x21, #0x20]\n"
- "zip1 v21.8h, v9.8h, v8.8h\n"
- "zip2 v20.8h, v9.8h, v8.8h\n"
- "str q17, [x21, #0x30]\n"
- "zip1 v19.8h, v7.8h, v6.8h\n"
- "zip2 v18.8h, v7.8h, v6.8h\n"
- "str q16, [x21, #0x40]\n"
- "zip1 v17.8h, v26.8h, v25.8h\n"
- "zip2 v16.8h, v26.8h, v25.8h\n"
- "str q22, [x21, #0x50]\n"
- "str q21, [x21, #0x60]\n"
- "zip1 v27.8h, v5.8h, v4.8h\n"
- "zip2 v26.8h, v5.8h, v4.8h\n"
- "str q20, [x21, #0x70]\n"
- "zip1 v25.8h, v24.8h, v23.8h\n"
- "zip2 v24.8h, v24.8h, v23.8h\n"
- "str q19, [x21, #0x80]\n"
- "zip1 v23.8h, v1.8h, v0.8h\n"
- "zip2 v22.8h, v1.8h, v0.8h\n"
- "str q18, [x21, #0x90]\n"
- "zip1 v21.8h, v3.8h, v2.8h\n"
- "zip2 v20.8h, v3.8h, v2.8h\n"
- "str q17, [x21, #0xa0]\n"
- "zip1 v19.8h, v31.8h, v30.8h\n"
- "zip2 v18.8h, v31.8h, v30.8h\n"
- "str q16, [x21, #0xb0]\n"
- "add x21, x21, %x[out_stride]\n"
- "zip1 v17.8h, v29.8h, v28.8h\n"
- "zip2 v16.8h, v29.8h, v28.8h\n"
- "str q27, [x21, #0x0]\n"
- "str q26, [x21, #0x10]\n"
- "str q25, [x21, #0x20]\n"
- "str q24, [x21, #0x30]\n"
- "str q23, [x21, #0x40]\n"
- "str q22, [x21, #0x50]\n"
- "str q21, [x21, #0x60]\n"
- "str q20, [x21, #0x70]\n"
- "str q19, [x21, #0x80]\n"
- "str q18, [x21, #0x90]\n"
- "str q17, [x21, #0xa0]\n"
- "str q16, [x21, #0xb0]\n"
- "add x21, x21, %x[out_stride]\n"
+ "ldr q18, [x28], #0x10\n"
+ "sub x19, x19, #0x18\n"
+ "ldr q23, [x26], #0x10\n"
+ "cmp x19, #0x18\n"
+ "ldr q16, [x25], #0x10\n"
+ "zip1 v22.8h, v18.8h, v16.8h\n"
+ "ldr q17, [x28], #0x10\n"
+ "zip2 v21.8h, v18.8h, v16.8h\n"
+ "ldr q12, [x26], #0x10\n"
+ "ldr q16, [x25], #0x10\n"
+ "zip1 v20.8h, v17.8h, v16.8h\n"
+ "ldr q18, [x28], #0x10\n"
+ "zip2 v11.8h, v17.8h, v16.8h\n"
+ "ldr q10, [x26], #0x10\n"
+ "ldr q17, [x25], #0x10\n"
+ "zip1 v9.8h, v18.8h, v17.8h\n"
+ "ldr q16, [x24], #0x10\n"
+ "zip2 v8.8h, v18.8h, v17.8h\n"
+ "ldr q19, [x23], #0x10\n"
+ "ldr q7, [x22], #0x10\n"
+ "zip1 v17.8h, v23.8h, v16.8h\n"
+ "ldr q6, [x24], #0x10\n"
+ "zip2 v16.8h, v23.8h, v16.8h\n"
+ "ldr q5, [x23], #0x10\n"
+ "zip1 v4.8h, v22.8h, v17.8h\n"
+ "ldr q3, [x22], #0x10\n"
+ "zip2 v2.8h, v22.8h, v17.8h\n"
+ "ldr q18, [x21], #0x10\n"
+ "zip1 v1.8h, v21.8h, v16.8h\n"
+ "ldr q0, [x24], #0x10\n"
+ "zip2 v31.8h, v21.8h, v16.8h\n"
+ "ldr q30, [x23], #0x10\n"
+ "zip1 v16.8h, v12.8h, v6.8h\n"
+ "ldr q29, [x22], #0x10\n"
+ "zip1 v28.8h, v20.8h, v16.8h\n"
+ "ldr q27, [x21], #0x10\n"
+ "zip2 v26.8h, v20.8h, v16.8h\n"
+ "ldr q21, [x20], #0x10\n"
+ "zip1 v17.8h, v19.8h, v18.8h\n"
+ "ldr q25, [x21], #0x10\n"
+ "zip2 v19.8h, v19.8h, v18.8h\n"
+ "zip1 v18.8h, v5.8h, v27.8h\n"
+ "ldr q24, [x20], #0x10\n"
+ "zip1 v16.8h, v7.8h, v21.8h\n"
+ "ldr q23, [x20], #0x10\n"
+ "zip1 v22.8h, v17.8h, v16.8h\n"
+ "zip2 v20.8h, v17.8h, v16.8h\n"
+ "str q4, [x27, #0x0]\n"
+ "zip2 v16.8h, v7.8h, v21.8h\n"
+ "str q2, [x27, #0x10]\n"
+ "zip1 v17.8h, v19.8h, v16.8h\n"
+ "str q1, [x27, #0x20]\n"
+ "zip2 v21.8h, v19.8h, v16.8h\n"
+ "str q31, [x27, #0x30]\n"
+ "zip1 v16.8h, v3.8h, v24.8h\n"
+ "str q28, [x27, #0x40]\n"
+ "zip1 v19.8h, v18.8h, v16.8h\n"
+ "str q26, [x27, #0x50]\n"
+ "zip2 v18.8h, v18.8h, v16.8h\n"
+ "str q22, [x27, #0x60]\n"
+ "zip2 v16.8h, v12.8h, v6.8h\n"
+ "str q20, [x27, #0x70]\n"
+ "zip1 v20.8h, v11.8h, v16.8h\n"
+ "str q17, [x27, #0x80]\n"
+ "zip2 v17.8h, v11.8h, v16.8h\n"
+ "str q21, [x27, #0x90]\n"
+ "zip1 v16.8h, v10.8h, v0.8h\n"
+ "str q19, [x27, #0xa0]\n"
+ "zip1 v19.8h, v9.8h, v16.8h\n"
+ "str q18, [x27, #0xb0]\n"
+ "add x27, x27, %x[out_stride]\n"
+ "zip2 v18.8h, v9.8h, v16.8h\n"
+ "str q20, [x27, #0x0]\n"
+ "zip2 v16.8h, v10.8h, v0.8h\n"
+ "str q17, [x27, #0x10]\n"
+ "zip1 v17.8h, v8.8h, v16.8h\n"
+ "str q19, [x27, #0x20]\n"
+ "zip2 v16.8h, v8.8h, v16.8h\n"
+ "str q18, [x27, #0x30]\n"
+ "zip2 v18.8h, v5.8h, v27.8h\n"
+ "str q17, [x27, #0x40]\n"
+ "zip2 v17.8h, v3.8h, v24.8h\n"
+ "str q16, [x27, #0x50]\n"
+ "zip1 v16.8h, v18.8h, v17.8h\n"
+ "str q16, [x27, #0x60]\n"
+ "zip2 v16.8h, v18.8h, v17.8h\n"
+ "str q16, [x27, #0x70]\n"
+ "zip1 v18.8h, v30.8h, v25.8h\n"
+ "zip1 v17.8h, v29.8h, v23.8h\n"
+ "zip1 v16.8h, v18.8h, v17.8h\n"
+ "str q16, [x27, #0x80]\n"
+ "zip2 v16.8h, v18.8h, v17.8h\n"
+ "str q16, [x27, #0x90]\n"
+ "zip2 v18.8h, v30.8h, v25.8h\n"
+ "zip2 v17.8h, v29.8h, v23.8h\n"
+ "zip1 v16.8h, v18.8h, v17.8h\n"
+ "str q16, [x27, #0xa0]\n"
+ "zip2 v16.8h, v18.8h, v17.8h\n"
+ "str q16, [x27, #0xb0]\n"
+ "add x27, x27, %x[out_stride]\n"
"bge 2b\n"
"3:" // Main row loop: Unroll column loop skip
- "cmp x24, #0xc\n"
+ "cmp x19, #0xc\n"
"blt 5f\n"
"4:" // Main row loop: Column loop
- "ldr q19, [x9], #0x10\n"
"ldr q18, [x28], #0x10\n"
- "sub x24, x24, #0xc\n"
- "cmp x24, #0xc\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v25.8h, v19.8h, v17.8h\n"
- "zip1 v24.8h, v18.8h, v16.8h\n"
- "ldr q21, [x25], #0x10\n"
- "ldr q20, [x23], #0x10\n"
- "zip2 v31.8h, v19.8h, v17.8h\n"
+ "sub x19, x19, #0xc\n"
+ "ldr q20, [x26], #0x10\n"
+ "cmp x19, #0xc\n"
+ "ldr q16, [x25], #0x10\n"
+ "zip1 v19.8h, v18.8h, v16.8h\n"
+ "ldr d17, [x28], #0x8\n"
"zip2 v23.8h, v18.8h, v16.8h\n"
- "ldr q17, [x22], #0x10\n"
- "ldr q16, [x20], #0x10\n"
- "zip1 v30.8h, v21.8h, v17.8h\n"
- "zip1 v29.8h, v20.8h, v16.8h\n"
- "ldr d19, [x9], #0x8\n"
- "ldr d18, [x28], #0x8\n"
- "zip2 v28.8h, v21.8h, v17.8h\n"
- "zip2 v27.8h, v20.8h, v16.8h\n"
- "ldr d17, [x27], #0x8\n"
- "ldr d16, [x26], #0x8\n"
- "zip1 v26.8h, v19.8h, v17.8h\n"
- "zip1 v22.8h, v18.8h, v16.8h\n"
- "ldr d21, [x25], #0x8\n"
- "ldr d20, [x23], #0x8\n"
- "zip1 v19.8h, v25.8h, v24.8h\n"
- "zip2 v18.8h, v25.8h, v24.8h\n"
- "ldr d17, [x22], #0x8\n"
- "ldr d16, [x20], #0x8\n"
- "zip1 v25.8h, v21.8h, v17.8h\n"
- "zip1 v24.8h, v20.8h, v16.8h\n"
- "zip1 v17.8h, v31.8h, v23.8h\n"
- "zip2 v16.8h, v31.8h, v23.8h\n"
- "str q19, [x21, #0x0]\n"
- "zip1 v23.8h, v26.8h, v22.8h\n"
- "zip2 v22.8h, v26.8h, v22.8h\n"
- "str q18, [x21, #0x10]\n"
- "zip1 v21.8h, v30.8h, v29.8h\n"
- "zip2 v20.8h, v30.8h, v29.8h\n"
- "str q17, [x21, #0x20]\n"
- "zip1 v19.8h, v28.8h, v27.8h\n"
- "zip2 v18.8h, v28.8h, v27.8h\n"
- "str q16, [x21, #0x30]\n"
- "zip1 v17.8h, v25.8h, v24.8h\n"
- "zip2 v16.8h, v25.8h, v24.8h\n"
- "str q23, [x21, #0x40]\n"
- "str q22, [x21, #0x50]\n"
- "str q21, [x21, #0x60]\n"
- "str q20, [x21, #0x70]\n"
- "str q19, [x21, #0x80]\n"
- "str q18, [x21, #0x90]\n"
- "str q17, [x21, #0xa0]\n"
- "str q16, [x21, #0xb0]\n"
- "add x21, x21, %x[out_stride]\n"
+ "ldr d22, [x26], #0x8\n"
+ "ldr d16, [x25], #0x8\n"
+ "zip1 v21.8h, v17.8h, v16.8h\n"
+ "ldr q16, [x24], #0x10\n"
+ "ldr q31, [x23], #0x10\n"
+ "zip1 v18.8h, v20.8h, v16.8h\n"
+ "ldr d17, [x24], #0x8\n"
+ "zip2 v16.8h, v20.8h, v16.8h\n"
+ "ldr d30, [x23], #0x8\n"
+ "zip1 v29.8h, v19.8h, v18.8h\n"
+ "ldr q28, [x22], #0x10\n"
+ "zip2 v20.8h, v19.8h, v18.8h\n"
+ "ldr q27, [x21], #0x10\n"
+ "zip1 v19.8h, v23.8h, v16.8h\n"
+ "ldr q26, [x20], #0x10\n"
+ "zip2 v18.8h, v23.8h, v16.8h\n"
+ "ldr d25, [x22], #0x8\n"
+ "zip1 v16.8h, v22.8h, v17.8h\n"
+ "zip1 v24.8h, v21.8h, v16.8h\n"
+ "ldr d23, [x21], #0x8\n"
+ "zip2 v22.8h, v21.8h, v16.8h\n"
+ "ldr d21, [x20], #0x8\n"
+ "zip1 v17.8h, v31.8h, v27.8h\n"
+ "str q29, [x27, #0x0]\n"
+ "zip1 v16.8h, v28.8h, v26.8h\n"
+ "str q20, [x27, #0x10]\n"
+ "zip1 v20.8h, v17.8h, v16.8h\n"
+ "str q19, [x27, #0x20]\n"
+ "zip2 v19.8h, v17.8h, v16.8h\n"
+ "str q18, [x27, #0x30]\n"
+ "zip2 v18.8h, v31.8h, v27.8h\n"
+ "str q24, [x27, #0x40]\n"
+ "zip2 v16.8h, v28.8h, v26.8h\n"
+ "str q22, [x27, #0x50]\n"
+ "zip1 v17.8h, v18.8h, v16.8h\n"
+ "str q20, [x27, #0x60]\n"
+ "zip2 v16.8h, v18.8h, v16.8h\n"
+ "str q19, [x27, #0x70]\n"
+ "zip1 v18.8h, v30.8h, v23.8h\n"
+ "str q17, [x27, #0x80]\n"
+ "zip1 v17.8h, v25.8h, v21.8h\n"
+ "str q16, [x27, #0x90]\n"
+ "zip1 v16.8h, v18.8h, v17.8h\n"
+ "str q16, [x27, #0xa0]\n"
+ "zip2 v16.8h, v18.8h, v17.8h\n"
+ "str q16, [x27, #0xb0]\n"
+ "add x27, x27, %x[out_stride]\n"
"bge 4b\n"
"5:" // Main row loop: Column loop skip
- "cmp x24, #0x4\n"
+ "cmp x19, #0x4\n"
"blt 7f\n"
"6:" // Main row loop: width 4 loop: loop
- "ldr d19, [x9], #0x8\n"
- "ldr d18, [x28], #0x8\n"
- "sub x24, x24, #0x4\n"
- "cmp x24, #0x4\n"
- "ldr d17, [x27], #0x8\n"
- "ldr d16, [x26], #0x8\n"
- "zip1 v17.8h, v19.8h, v17.8h\n"
- "zip1 v16.8h, v18.8h, v16.8h\n"
- "ldr d18, [x25], #0x8\n"
+ "ldr d17, [x28], #0x8\n"
+ "sub x19, x19, #0x4\n"
+ "ldr d18, [x26], #0x8\n"
+ "cmp x19, #0x4\n"
+ "ldr d16, [x25], #0x8\n"
+ "zip1 v17.8h, v17.8h, v16.8h\n"
+ "ldr d16, [x24], #0x8\n"
"ldr d21, [x23], #0x8\n"
- "zip1 v20.8h, v17.8h, v16.8h\n"
- "zip2 v19.8h, v17.8h, v16.8h\n"
- "ldr d17, [x22], #0x8\n"
+ "zip1 v16.8h, v18.8h, v16.8h\n"
+ "ldr d20, [x22], #0x8\n"
+ "ldr d19, [x21], #0x8\n"
+ "zip1 v18.8h, v17.8h, v16.8h\n"
+ "zip2 v17.8h, v17.8h, v16.8h\n"
"ldr d16, [x20], #0x8\n"
- "zip1 v18.8h, v18.8h, v17.8h\n"
- "zip1 v16.8h, v21.8h, v16.8h\n"
- "str q20, [x21, #0x0]\n"
- "zip1 v17.8h, v18.8h, v16.8h\n"
- "zip2 v16.8h, v18.8h, v16.8h\n"
- "str q19, [x21, #0x10]\n"
- "str q17, [x21, #0x60]\n"
- "str q16, [x21, #0x70]\n"
- "add x21, x21, #0x20\n"
+ "str q18, [x27, #0x0]\n"
+ "zip1 v18.8h, v21.8h, v19.8h\n"
+ "str q17, [x27, #0x10]\n"
+ "zip1 v17.8h, v20.8h, v16.8h\n"
+ "zip1 v16.8h, v18.8h, v17.8h\n"
+ "str q16, [x27, #0x60]\n"
+ "zip2 v16.8h, v18.8h, v17.8h\n"
+ "str q16, [x27, #0x70]\n"
+ "add x27, x27, #0x20\n"
"bge 6b\n"
"7:" // Main row loop: width 4 loop: skip
- "cmp x24, #0x1\n"
+ "cmp x19, #0x1\n"
"blt 9f\n"
"8:" // Main row loop: width 1 loop: loop
- "ldr h19, [x9], #0x2\n"
"ldr h18, [x28], #0x2\n"
- "sub x24, x24, #0x1\n"
- "cmp x24, #0x1\n"
- "ldr h17, [x27], #0x2\n"
- "ldr h16, [x26], #0x2\n"
- "zip1 v17.8h, v19.8h, v17.8h\n"
- "zip1 v16.8h, v18.8h, v16.8h\n"
- "ldr h20, [x25], #0x2\n"
- "ldr h19, [x23], #0x2\n"
- "zip1 v18.8h, v17.8h, v16.8h\n"
- "ldr h17, [x22], #0x2\n"
+ "sub x19, x19, #0x1\n"
+ "ldr h17, [x26], #0x2\n"
+ "cmp x19, #0x1\n"
+ "ldr h16, [x25], #0x2\n"
+ "zip1 v18.8h, v18.8h, v16.8h\n"
+ "ldr h16, [x24], #0x2\n"
+ "ldr h20, [x23], #0x2\n"
+ "zip1 v16.8h, v17.8h, v16.8h\n"
+ "ldr h19, [x22], #0x2\n"
+ "ldr h17, [x21], #0x2\n"
+ "zip1 v18.8h, v18.8h, v16.8h\n"
"ldr h16, [x20], #0x2\n"
"zip1 v17.8h, v20.8h, v17.8h\n"
+ "str d18, [x27, #0x0]\n"
"zip1 v16.8h, v19.8h, v16.8h\n"
- "str d18, [x21, #0x0]\n"
"zip1 v16.8h, v17.8h, v16.8h\n"
- "str d16, [x21, #0x60]\n"
- "add x21, x21, #0x8\n"
+ "str d16, [x27, #0x60]\n"
+ "add x27, x27, #0x8\n"
"bge 8b\n"
"9:" // Main row loop: width 1 loop: skip
- "cmp %x[height], #0x8\n"
"add %x[out], %x[out], #0xc0\n"
+ "cmp %x[height], #0x8\n"
"bge 1b\n"
"cbz %x[height], 20f\n"
"10:" // Main loop skip
"11:" // Tail row loop: Head
- "mov x9, %x[in]\n"
- "add x28, x9, %x[in_stride]\n"
- "add x27, x28, %x[in_stride]\n"
- "mov x20, %x[width]\n"
- "add x26, x27, %x[in_stride]\n"
+ "mov x28, %x[in]\n"
+ "mov x27, %x[out]\n"
+ "add x26, x28, %x[in_stride]\n"
+ "add x25, x26, %x[in_stride]\n"
+ "add x24, x25, %x[in_stride]\n"
+ "add %x[in], x24, %x[in_stride]\n"
"cmp %x[height], #0x3\n"
- "add %x[in], x26, %x[in_stride]\n"
- "csel x26, x26, %x[pad_row], GT\n"
- "csel x27, x27, %x[pad_row], GE\n"
+ "csel x24, x24, %x[pad_row], GT\n"
+ "csel x25, x25, %x[pad_row], GE\n"
"cmp %x[height], #0x1\n"
- "csel x28, x28, %x[pad_row], GT\n"
- "cmp x20, #0x18\n"
- "mov x21, %x[out]\n"
+ "csel x26, x26, %x[pad_row], GT\n"
"sub %x[height], %x[height], #0x4\n"
+ "mov x19, %x[width]\n"
+ "cmp x19, #0x18\n"
"blt 13f\n"
"12:" // Tail row loop: Unroll column loop
- "ldr q19, [x9], #0x10\n"
"ldr q18, [x28], #0x10\n"
- "sub x20, x20, #0x18\n"
- "cmp x20, #0x18\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v31.8h, v19.8h, v17.8h\n"
- "zip1 v30.8h, v18.8h, v16.8h\n"
- "ldr q22, [x9], #0x10\n"
- "ldr q20, [x28], #0x10\n"
- "zip2 v29.8h, v19.8h, v17.8h\n"
- "zip2 v28.8h, v18.8h, v16.8h\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v27.8h, v22.8h, v17.8h\n"
- "zip1 v21.8h, v20.8h, v16.8h\n"
- "ldr q19, [x9], #0x10\n"
+ "sub x19, x19, #0x18\n"
+ "ldr q19, [x26], #0x10\n"
+ "cmp x19, #0x18\n"
+ "ldr q16, [x25], #0x10\n"
+ "zip1 v28.8h, v18.8h, v16.8h\n"
+ "ldr q17, [x28], #0x10\n"
+ "zip2 v27.8h, v18.8h, v16.8h\n"
+ "ldr q26, [x26], #0x10\n"
+ "ldr q16, [x25], #0x10\n"
+ "zip1 v25.8h, v17.8h, v16.8h\n"
"ldr q18, [x28], #0x10\n"
- "zip2 v26.8h, v22.8h, v17.8h\n"
- "zip2 v20.8h, v20.8h, v16.8h\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v25.8h, v19.8h, v17.8h\n"
- "zip1 v24.8h, v18.8h, v16.8h\n"
- "zip2 v23.8h, v19.8h, v17.8h\n"
- "zip2 v22.8h, v18.8h, v16.8h\n"
- "zip1 v17.8h, v31.8h, v30.8h\n"
- "zip2 v16.8h, v31.8h, v30.8h\n"
- "str q17, [x21, #0x0]\n"
- "zip1 v19.8h, v29.8h, v28.8h\n"
- "zip2 v18.8h, v29.8h, v28.8h\n"
- "str q16, [x21, #0x10]\n"
- "zip1 v17.8h, v27.8h, v21.8h\n"
- "zip2 v16.8h, v27.8h, v21.8h\n"
- "str q19, [x21, #0x20]\n"
- "str q18, [x21, #0x30]\n"
- "zip1 v21.8h, v26.8h, v20.8h\n"
- "zip2 v20.8h, v26.8h, v20.8h\n"
- "str q17, [x21, #0x40]\n"
- "zip1 v19.8h, v25.8h, v24.8h\n"
- "zip2 v18.8h, v25.8h, v24.8h\n"
- "str q16, [x21, #0x50]\n"
- "add x21, x21, %x[out_stride]\n"
- "zip1 v17.8h, v23.8h, v22.8h\n"
- "zip2 v16.8h, v23.8h, v22.8h\n"
- "str q21, [x21, #0x0]\n"
- "str q20, [x21, #0x10]\n"
- "str q19, [x21, #0x20]\n"
- "str q18, [x21, #0x30]\n"
- "str q17, [x21, #0x40]\n"
- "str q16, [x21, #0x50]\n"
- "add x21, x21, %x[out_stride]\n"
+ "zip2 v24.8h, v17.8h, v16.8h\n"
+ "ldr q23, [x26], #0x10\n"
+ "ldr q16, [x25], #0x10\n"
+ "zip1 v22.8h, v18.8h, v16.8h\n"
+ "ldr q17, [x24], #0x10\n"
+ "zip2 v21.8h, v18.8h, v16.8h\n"
+ "ldr q20, [x24], #0x10\n"
+ "zip1 v16.8h, v19.8h, v17.8h\n"
+ "zip2 v18.8h, v19.8h, v17.8h\n"
+ "ldr q19, [x24], #0x10\n"
+ "zip1 v17.8h, v28.8h, v16.8h\n"
+ "zip2 v16.8h, v28.8h, v16.8h\n"
+ "str q17, [x27, #0x0]\n"
+ "zip1 v17.8h, v27.8h, v18.8h\n"
+ "str q16, [x27, #0x10]\n"
+ "zip2 v16.8h, v27.8h, v18.8h\n"
+ "str q17, [x27, #0x20]\n"
+ "zip1 v17.8h, v26.8h, v20.8h\n"
+ "str q16, [x27, #0x30]\n"
+ "zip1 v16.8h, v25.8h, v17.8h\n"
+ "str q16, [x27, #0x40]\n"
+ "zip2 v16.8h, v25.8h, v17.8h\n"
+ "str q16, [x27, #0x50]\n"
+ "add x27, x27, %x[out_stride]\n"
+ "zip2 v18.8h, v26.8h, v20.8h\n"
+ "zip1 v17.8h, v23.8h, v19.8h\n"
+ "zip1 v16.8h, v24.8h, v18.8h\n"
+ "str q16, [x27, #0x0]\n"
+ "zip2 v16.8h, v24.8h, v18.8h\n"
+ "str q16, [x27, #0x10]\n"
+ "zip1 v16.8h, v22.8h, v17.8h\n"
+ "str q16, [x27, #0x20]\n"
+ "zip2 v16.8h, v22.8h, v17.8h\n"
+ "str q16, [x27, #0x30]\n"
+ "zip2 v17.8h, v23.8h, v19.8h\n"
+ "zip1 v16.8h, v21.8h, v17.8h\n"
+ "str q16, [x27, #0x40]\n"
+ "zip2 v16.8h, v21.8h, v17.8h\n"
+ "str q16, [x27, #0x50]\n"
+ "add x27, x27, %x[out_stride]\n"
"bge 12b\n"
"13:" // Tail row loop: Unroll column loop skip
- "cmp x20, #0xc\n"
+ "cmp x19, #0xc\n"
"blt 15f\n"
"14:" // Tail row loop: Column loop
- "ldr q21, [x9], #0x10\n"
"ldr q18, [x28], #0x10\n"
- "sub x20, x20, #0xc\n"
- "cmp x20, #0xc\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v24.8h, v21.8h, v17.8h\n"
+ "sub x19, x19, #0xc\n"
+ "ldr q24, [x26], #0x10\n"
+ "cmp x19, #0xc\n"
+ "ldr q16, [x25], #0x10\n"
"zip1 v23.8h, v18.8h, v16.8h\n"
- "ldr d20, [x9], #0x8\n"
- "ldr d19, [x28], #0x8\n"
- "zip2 v22.8h, v21.8h, v17.8h\n"
- "zip2 v18.8h, v18.8h, v16.8h\n"
- "ldr d17, [x27], #0x8\n"
- "ldr d16, [x26], #0x8\n"
- "zip1 v21.8h, v20.8h, v17.8h\n"
- "zip1 v20.8h, v19.8h, v16.8h\n"
- "zip1 v17.8h, v24.8h, v23.8h\n"
- "zip2 v16.8h, v24.8h, v23.8h\n"
- "str q17, [x21, #0x0]\n"
- "zip1 v19.8h, v22.8h, v18.8h\n"
- "zip2 v18.8h, v22.8h, v18.8h\n"
- "str q16, [x21, #0x10]\n"
- "zip1 v17.8h, v21.8h, v20.8h\n"
- "zip2 v16.8h, v21.8h, v20.8h\n"
- "str q19, [x21, #0x20]\n"
- "str q18, [x21, #0x30]\n"
- "str q17, [x21, #0x40]\n"
- "str q16, [x21, #0x50]\n"
- "add x21, x21, %x[out_stride]\n"
+ "ldr d17, [x28], #0x8\n"
+ "zip2 v22.8h, v18.8h, v16.8h\n"
+ "ldr d21, [x26], #0x8\n"
+ "ldr d16, [x25], #0x8\n"
+ "zip1 v20.8h, v17.8h, v16.8h\n"
+ "ldr q16, [x24], #0x10\n"
+ "zip1 v19.8h, v24.8h, v16.8h\n"
+ "ldr d18, [x24], #0x8\n"
+ "zip2 v17.8h, v24.8h, v16.8h\n"
+ "zip1 v16.8h, v23.8h, v19.8h\n"
+ "str q16, [x27, #0x0]\n"
+ "zip2 v16.8h, v23.8h, v19.8h\n"
+ "str q16, [x27, #0x10]\n"
+ "zip1 v16.8h, v22.8h, v17.8h\n"
+ "str q16, [x27, #0x20]\n"
+ "zip2 v16.8h, v22.8h, v17.8h\n"
+ "str q16, [x27, #0x30]\n"
+ "zip1 v17.8h, v21.8h, v18.8h\n"
+ "zip1 v16.8h, v20.8h, v17.8h\n"
+ "str q16, [x27, #0x40]\n"
+ "zip2 v16.8h, v20.8h, v17.8h\n"
+ "str q16, [x27, #0x50]\n"
+ "add x27, x27, %x[out_stride]\n"
"bge 14b\n"
"15:" // Tail row loop: Column loop skip
- "cmp x20, #0x4\n"
+ "cmp x19, #0x4\n"
"blt 17f\n"
"16:" // Tail row loop: width 4 loop: loop
- "ldr d18, [x9], #0x8\n"
- "ldr d19, [x28], #0x8\n"
- "sub x20, x20, #0x4\n"
- "cmp x20, #0x4\n"
- "ldr d17, [x27], #0x8\n"
- "ldr d16, [x26], #0x8\n"
- "zip1 v18.8h, v18.8h, v17.8h\n"
- "zip1 v16.8h, v19.8h, v16.8h\n"
- "zip1 v17.8h, v18.8h, v16.8h\n"
- "zip2 v16.8h, v18.8h, v16.8h\n"
- "str q17, [x21, #0x0]\n"
- "str q16, [x21, #0x10]\n"
- "add x21, x21, #0x20\n"
+ "ldr d18, [x28], #0x8\n"
+ "sub x19, x19, #0x4\n"
+ "ldr d17, [x26], #0x8\n"
+ "cmp x19, #0x4\n"
+ "ldr d16, [x25], #0x8\n"
+ "zip1 v18.8h, v18.8h, v16.8h\n"
+ "ldr d16, [x24], #0x8\n"
+ "zip1 v17.8h, v17.8h, v16.8h\n"
+ "zip1 v16.8h, v18.8h, v17.8h\n"
+ "str q16, [x27, #0x0]\n"
+ "zip2 v16.8h, v18.8h, v17.8h\n"
+ "str q16, [x27, #0x10]\n"
+ "add x27, x27, #0x20\n"
"bge 16b\n"
"17:" // Tail row loop: width 4 loop: skip
- "cmp x20, #0x1\n"
+ "cmp x19, #0x1\n"
"blt 19f\n"
"18:" // Tail row loop: width 1 loop: loop
- "ldr h19, [x9], #0x2\n"
- "ldr h18, [x28], #0x2\n"
- "sub x20, x20, #0x1\n"
- "cmp x20, #0x1\n"
- "ldr h17, [x27], #0x2\n"
- "ldr h16, [x26], #0x2\n"
- "zip1 v17.8h, v19.8h, v17.8h\n"
+ "ldr h17, [x28], #0x2\n"
+ "sub x19, x19, #0x1\n"
+ "ldr h18, [x26], #0x2\n"
+ "cmp x19, #0x1\n"
+ "ldr h16, [x25], #0x2\n"
+ "zip1 v17.8h, v17.8h, v16.8h\n"
+ "ldr h16, [x24], #0x2\n"
"zip1 v16.8h, v18.8h, v16.8h\n"
"zip1 v16.8h, v17.8h, v16.8h\n"
- "str d16, [x21, #0x0]\n"
- "add x21, x21, #0x8\n"
+ "str d16, [x27, #0x0]\n"
+ "add x27, x27, #0x8\n"
"bge 18b\n"
"19:" // Tail row loop: width 1 loop: skip
- "cmp %x[height], #0x1\n"
"add %x[out], %x[out], #0x60\n"
+ "cmp %x[height], #0x1\n"
"bge 11b\n"
"20:" // Done
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [pad_row] "r" (pad_row), [width] "r" (width)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_12_2x4_fp32bf16.hpp b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_12_2x4_fp32bf16.hpp
index e487d4d839..efb1c742ed 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_12_2x4_fp32bf16.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_12_2x4_fp32bf16.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#pragma once
@@ -42,678 +42,679 @@ void a64_transpose_interleave_12_2x4_fp32bf16(bfloat16 *out, const float *in, si
"cmp %x[height], #0x8\n"
"blt 10f\n"
"1:" // Main row loop: Head
- "mov x9, %x[in]\n"
- "add x28, x9, %x[in_stride]\n"
- "add x27, x28, %x[in_stride]\n"
- "add x26, x27, %x[in_stride]\n"
+ "mov x28, %x[in]\n"
+ "mov x27, %x[out]\n"
+ "add x26, x28, %x[in_stride]\n"
"add x25, x26, %x[in_stride]\n"
- "mov x24, %x[width]\n"
- "add x23, x25, %x[in_stride]\n"
+ "add x24, x25, %x[in_stride]\n"
+ "add x23, x24, %x[in_stride]\n"
"add x22, x23, %x[in_stride]\n"
- "add x20, x22, %x[in_stride]\n"
- "cmp x24, #0x18\n"
+ "add x21, x22, %x[in_stride]\n"
+ "add x20, x21, %x[in_stride]\n"
"add %x[in], x20, %x[in_stride]\n"
- "mov x21, %x[out]\n"
"sub %x[height], %x[height], #0x8\n"
+ "mov x19, %x[width]\n"
+ "cmp x19, #0x18\n"
"blt 3f\n"
"2:" // Main row loop: Unroll column loop
- "ldr q15, [x9], #0x10\n"
- "ldr q17, [x28], #0x10\n"
- "sub x24, x24, #0x18\n"
- "cmp x24, #0x18\n"
- "ldr q16, [x27], #0x10\n"
+ "ldr q12, [x28], #0x10\n"
+ "sub x19, x19, #0x18\n"
"ldr q20, [x26], #0x10\n"
- "zip1 v6.4s, v15.4s, v16.4s\n"
- "zip1 v11.4s, v17.4s, v20.4s\n"
- "ldr q2, [x25], #0x10\n"
- "ldr q4, [x23], #0x10\n"
- "zip2 v22.4s, v15.4s, v16.4s\n"
- "zip2 v18.4s, v17.4s, v20.4s\n"
- "ldr q17, [x22], #0x10\n"
- "ldr q26, [x20], #0x10\n"
- "zip1 v9.4s, v2.4s, v17.4s\n"
- "zip1 v10.4s, v4.4s, v26.4s\n"
- "ldr q16, [x9], #0x10\n"
- "ldr q27, [x28], #0x10\n"
- "zip2 v3.4s, v2.4s, v17.4s\n"
- "zip2 v30.4s, v4.4s, v26.4s\n"
- "ldr q13, [x27], #0x10\n"
- "ldr q1, [x26], #0x10\n"
- "zip1 v23.4s, v16.4s, v13.4s\n"
- "zip1 v5.4s, v27.4s, v1.4s\n"
- "ldr q26, [x25], #0x10\n"
- "ldr q14, [x23], #0x10\n"
- "zip2 v0.4s, v16.4s, v13.4s\n"
- "zip2 v2.4s, v27.4s, v1.4s\n"
- "ldr q15, [x22], #0x10\n"
- "ldr q8, [x20], #0x10\n"
- "zip1 v31.4s, v26.4s, v15.4s\n"
- "zip1 v4.4s, v14.4s, v8.4s\n"
- "ldr q28, [x9], #0x10\n"
- "ldr q19, [x28], #0x10\n"
- "zip2 v21.4s, v26.4s, v15.4s\n"
- "zip2 v16.4s, v14.4s, v8.4s\n"
- "ldr q15, [x27], #0x10\n"
- "ldr q1, [x26], #0x10\n"
- "zip1 v17.4s, v28.4s, v15.4s\n"
- "zip1 v8.4s, v19.4s, v1.4s\n"
- "ldr q27, [x25], #0x10\n"
- "ldr q20, [x23], #0x10\n"
- "zip2 v7.4s, v28.4s, v15.4s\n"
- "zip2 v15.4s, v19.4s, v1.4s\n"
- "ldr q12, [x22], #0x10\n"
- "ldr q25, [x20], #0x10\n"
- "zip1 v14.4s, v27.4s, v12.4s\n"
- "zip1 v26.4s, v20.4s, v25.4s\n"
- "ldr q13, [x9], #0x10\n"
- "ldr q29, [x28], #0x10\n"
- "zip2 v28.4s, v27.4s, v12.4s\n"
- "zip2 v12.4s, v20.4s, v25.4s\n"
- "ldr q27, [x27], #0x10\n"
- "ldr q20, [x26], #0x10\n"
- "zip1 v19.4s, v13.4s, v27.4s\n"
- "zip1 v25.4s, v29.4s, v20.4s\n"
- "ldr q24, [x25], #0x10\n"
- "ldr q1, [x23], #0x10\n"
- "zip2 v27.4s, v13.4s, v27.4s\n"
- "zip2 v13.4s, v29.4s, v20.4s\n"
- "ldr q20, [x22], #0x10\n"
- "zip1 v29.4s, v24.4s, v20.4s\n"
- "zip2 v20.4s, v24.4s, v20.4s\n"
- "zip1 v24.4s, v6.4s, v11.4s\n"
- ".inst 0x0ea16b18 // bfcvtn v24.4h, v24.4s\n"
- "zip2 v11.4s, v6.4s, v11.4s\n"
- "ldr q6, [x20], #0x10\n"
- ".inst 0x4ea16978 // bfcvtn2 v24.8h, v11.4s\n"
- "zip1 v11.4s, v1.4s, v6.4s\n"
- "zip2 v6.4s, v1.4s, v6.4s\n"
- "zip1 v1.4s, v22.4s, v18.4s\n"
- ".inst 0x0ea16821 // bfcvtn v1.4h, v1.4s\n"
- "zip2 v18.4s, v22.4s, v18.4s\n"
- "ldr q22, [x9], #0x10\n"
- ".inst 0x4ea16a41 // bfcvtn2 v1.8h, v18.4s\n"
- "zip1 v18.4s, v23.4s, v5.4s\n"
- ".inst 0x0ea16a52 // bfcvtn v18.4h, v18.4s\n"
- "zip2 v5.4s, v23.4s, v5.4s\n"
- "ldr q23, [x28], #0x10\n"
- ".inst 0x4ea168b2 // bfcvtn2 v18.8h, v5.4s\n"
- "zip1 v5.4s, v0.4s, v2.4s\n"
- ".inst 0x0ea168a5 // bfcvtn v5.4h, v5.4s\n"
- "zip2 v0.4s, v0.4s, v2.4s\n"
- "ldr q2, [x27], #0x10\n"
- ".inst 0x4ea16805 // bfcvtn2 v5.8h, v0.4s\n"
- "zip1 v0.4s, v22.4s, v2.4s\n"
- "zip2 v2.4s, v22.4s, v2.4s\n"
- "zip1 v22.4s, v17.4s, v8.4s\n"
- ".inst 0x0ea16ad6 // bfcvtn v22.4h, v22.4s\n"
- "zip2 v8.4s, v17.4s, v8.4s\n"
- "ldr q17, [x26], #0x10\n"
- ".inst 0x4ea16916 // bfcvtn2 v22.8h, v8.4s\n"
- "zip1 v8.4s, v23.4s, v17.4s\n"
- "zip2 v23.4s, v23.4s, v17.4s\n"
- "zip1 v17.4s, v7.4s, v15.4s\n"
- ".inst 0x0ea16a31 // bfcvtn v17.4h, v17.4s\n"
- "zip2 v7.4s, v7.4s, v15.4s\n"
- "ldr q15, [x25], #0x10\n"
- ".inst 0x4ea168f1 // bfcvtn2 v17.8h, v7.4s\n"
- "zip1 v7.4s, v9.4s, v10.4s\n"
- ".inst 0x0ea168e7 // bfcvtn v7.4h, v7.4s\n"
- "zip2 v10.4s, v9.4s, v10.4s\n"
+ "cmp x19, #0x18\n"
+ "ldr q11, [x25], #0x10\n"
+ "zip1 v29.4s, v12.4s, v11.4s\n"
+ "ldr q5, [x28], #0x10\n"
+ "zip2 v0.4s, v12.4s, v11.4s\n"
+ "ldr q28, [x26], #0x10\n"
+ "ldr q17, [x25], #0x10\n"
+ "zip1 v23.4s, v5.4s, v17.4s\n"
+ "ldr q25, [x28], #0x10\n"
+ "zip2 v18.4s, v5.4s, v17.4s\n"
+ "ldr q6, [x26], #0x10\n"
+ "ldr q31, [x25], #0x10\n"
+ "zip1 v21.4s, v25.4s, v31.4s\n"
+ "ldr q16, [x28], #0x10\n"
+ "zip2 v10.4s, v25.4s, v31.4s\n"
+ "ldr q11, [x26], #0x10\n"
+ "ldr q1, [x25], #0x10\n"
+ "zip1 v13.4s, v16.4s, v1.4s\n"
+ "ldr q14, [x28], #0x10\n"
+ "zip2 v24.4s, v16.4s, v1.4s\n"
+ "ldr q4, [x26], #0x10\n"
+ "ldr q22, [x25], #0x10\n"
+ "zip1 v1.4s, v14.4s, v22.4s\n"
+ "ldr q15, [x28], #0x10\n"
+ "zip2 v8.4s, v14.4s, v22.4s\n"
+ "ldr q31, [x26], #0x10\n"
+ "ldr q3, [x25], #0x10\n"
+ "zip1 v27.4s, v15.4s, v3.4s\n"
+ "ldr q30, [x24], #0x10\n"
+ "zip2 v22.4s, v15.4s, v3.4s\n"
+ "ldr q15, [x23], #0x10\n"
+ "ldr q5, [x22], #0x10\n"
+ "zip1 v16.4s, v20.4s, v30.4s\n"
+ "ldr q3, [x24], #0x10\n"
+ "zip2 v7.4s, v20.4s, v30.4s\n"
+ "ldr q26, [x23], #0x10\n"
+ "zip1 v12.4s, v29.4s, v16.4s\n"
+ "ldr q25, [x22], #0x10\n"
+ ".inst 0x0ea16994 // bfcvtn v20.4h, v12.4s\n"
+ "ldr q2, [x21], #0x10\n"
+ "zip2 v16.4s, v29.4s, v16.4s\n"
+ "ldr q19, [x24], #0x10\n"
+ "zip1 v12.4s, v0.4s, v7.4s\n"
"ldr q9, [x23], #0x10\n"
- ".inst 0x4ea16947 // bfcvtn2 v7.8h, v10.4s\n"
- "zip1 v10.4s, v3.4s, v30.4s\n"
- ".inst 0x0ea1694a // bfcvtn v10.4h, v10.4s\n"
- "zip2 v30.4s, v3.4s, v30.4s\n"
- "ldr q3, [x22], #0x10\n"
- ".inst 0x4ea16bca // bfcvtn2 v10.8h, v30.4s\n"
- "zip1 v30.4s, v15.4s, v3.4s\n"
- "zip2 v15.4s, v15.4s, v3.4s\n"
- "zip1 v3.4s, v31.4s, v4.4s\n"
- ".inst 0x0ea16863 // bfcvtn v3.4h, v3.4s\n"
- "zip2 v31.4s, v31.4s, v4.4s\n"
- "ldr q4, [x20], #0x10\n"
- ".inst 0x4ea16be3 // bfcvtn2 v3.8h, v31.4s\n"
- "zip1 v31.4s, v9.4s, v4.4s\n"
- "zip2 v4.4s, v9.4s, v4.4s\n"
- "zip1 v9.4s, v21.4s, v16.4s\n"
- ".inst 0x0ea16929 // bfcvtn v9.4h, v9.4s\n"
- "zip2 v16.4s, v21.4s, v16.4s\n"
- "ldr q21, [x9], #0x10\n"
- ".inst 0x4ea16a09 // bfcvtn2 v9.8h, v16.4s\n"
- "zip1 v16.4s, v14.4s, v26.4s\n"
+ ".inst 0x4ea16a14 // bfcvtn2 v20.8h, v16.4s\n"
+ "ldr q14, [x22], #0x10\n"
+ ".inst 0x0ea1699e // bfcvtn v30.4h, v12.4s\n"
+ "ldr q12, [x21], #0x10\n"
+ "zip2 v16.4s, v0.4s, v7.4s\n"
+ "ldr q7, [x24], #0x10\n"
+ "zip1 v29.4s, v28.4s, v3.4s\n"
+ "ldr q0, [x23], #0x10\n"
+ ".inst 0x4ea16a1e // bfcvtn2 v30.8h, v16.4s\n"
+ "ldr q17, [x22], #0x10\n"
+ "zip1 v16.4s, v23.4s, v29.4s\n"
".inst 0x0ea16a10 // bfcvtn v16.4h, v16.4s\n"
- "zip2 v14.4s, v14.4s, v26.4s\n"
- "ldr q26, [x28], #0x10\n"
- ".inst 0x4ea169d0 // bfcvtn2 v16.8h, v14.4s\n"
- "zip1 v14.4s, v28.4s, v12.4s\n"
- ".inst 0x0ea169ce // bfcvtn v14.4h, v14.4s\n"
- "zip2 v12.4s, v28.4s, v12.4s\n"
- "ldr q28, [x27], #0x10\n"
- ".inst 0x4ea1698e // bfcvtn2 v14.8h, v12.4s\n"
- "zip1 v12.4s, v21.4s, v28.4s\n"
+ "zip2 v23.4s, v23.4s, v29.4s\n"
+ "ldr q29, [x24], #0x10\n"
+ "zip2 v28.4s, v28.4s, v3.4s\n"
+ "ldr q3, [x23], #0x10\n"
+ ".inst 0x4ea16af0 // bfcvtn2 v16.8h, v23.4s\n"
+ "zip1 v23.4s, v18.4s, v28.4s\n"
+ ".inst 0x0ea16af7 // bfcvtn v23.4h, v23.4s\n"
+ "zip2 v28.4s, v18.4s, v28.4s\n"
+ "ldr q18, [x24], #0x10\n"
+ ".inst 0x4ea16b97 // bfcvtn2 v23.8h, v28.4s\n"
+ "zip1 v28.4s, v6.4s, v19.4s\n"
+ "zip2 v6.4s, v6.4s, v19.4s\n"
+ "zip1 v19.4s, v21.4s, v28.4s\n"
+ ".inst 0x0ea16a73 // bfcvtn v19.4h, v19.4s\n"
"zip2 v28.4s, v21.4s, v28.4s\n"
- "zip1 v21.4s, v19.4s, v25.4s\n"
- ".inst 0x0ea16ab5 // bfcvtn v21.4h, v21.4s\n"
- "zip2 v19.4s, v19.4s, v25.4s\n"
- "ldr q25, [x26], #0x10\n"
- ".inst 0x4ea16a75 // bfcvtn2 v21.8h, v19.4s\n"
- "zip1 v19.4s, v26.4s, v25.4s\n"
- "zip2 v25.4s, v26.4s, v25.4s\n"
- "zip1 v26.4s, v27.4s, v13.4s\n"
- ".inst 0x0ea16b5a // bfcvtn v26.4h, v26.4s\n"
- "zip2 v13.4s, v27.4s, v13.4s\n"
- "ldr q27, [x25], #0x10\n"
- ".inst 0x4ea169ba // bfcvtn2 v26.8h, v13.4s\n"
- "zip1 v13.4s, v0.4s, v8.4s\n"
+ "ldr q21, [x23], #0x10\n"
+ ".inst 0x4ea16b93 // bfcvtn2 v19.8h, v28.4s\n"
+ "zip1 v28.4s, v10.4s, v6.4s\n"
+ ".inst 0x0ea16b9c // bfcvtn v28.4h, v28.4s\n"
+ "zip2 v6.4s, v10.4s, v6.4s\n"
+ "ldr q10, [x22], #0x10\n"
+ ".inst 0x4ea168dc // bfcvtn2 v28.8h, v6.4s\n"
+ "zip1 v6.4s, v11.4s, v7.4s\n"
+ "zip2 v7.4s, v11.4s, v7.4s\n"
+ "zip1 v11.4s, v13.4s, v6.4s\n"
+ ".inst 0x0ea1696b // bfcvtn v11.4h, v11.4s\n"
+ "zip2 v13.4s, v13.4s, v6.4s\n"
+ "ldr q6, [x22], #0x10\n"
+ ".inst 0x4ea169ab // bfcvtn2 v11.8h, v13.4s\n"
+ "zip1 v13.4s, v24.4s, v7.4s\n"
".inst 0x0ea169ad // bfcvtn v13.4h, v13.4s\n"
- "zip2 v8.4s, v0.4s, v8.4s\n"
- "ldr q0, [x23], #0x10\n"
- ".inst 0x4ea1690d // bfcvtn2 v13.8h, v8.4s\n"
- "zip1 v8.4s, v2.4s, v23.4s\n"
- ".inst 0x0ea16908 // bfcvtn v8.4h, v8.4s\n"
- "zip2 v23.4s, v2.4s, v23.4s\n"
- "ldr q2, [x22], #0x10\n"
- ".inst 0x4ea16ae8 // bfcvtn2 v8.8h, v23.4s\n"
- "ldr q23, [x20], #0x10\n"
- "str q24, [x21, #0x0]\n"
- "zip1 v24.4s, v27.4s, v2.4s\n"
- "zip2 v27.4s, v27.4s, v2.4s\n"
- "zip1 v2.4s, v0.4s, v23.4s\n"
- "zip2 v23.4s, v0.4s, v23.4s\n"
- "str q1, [x21, #0x10]\n"
- "zip1 v0.4s, v12.4s, v19.4s\n"
- "zip1 v1.4s, v28.4s, v25.4s\n"
- "str q18, [x21, #0x20]\n"
- "zip1 v18.4s, v29.4s, v11.4s\n"
- ".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n"
- "str q5, [x21, #0x30]\n"
- "zip1 v5.4s, v20.4s, v6.4s\n"
- "zip2 v19.4s, v12.4s, v19.4s\n"
- "str q22, [x21, #0x40]\n"
- "zip1 v12.4s, v30.4s, v31.4s\n"
- "zip1 v22.4s, v15.4s, v4.4s\n"
- "str q17, [x21, #0x50]\n"
- "zip1 v17.4s, v24.4s, v2.4s\n"
- ".inst 0x0ea16821 // bfcvtn v1.4h, v1.4s\n"
- "str q7, [x21, #0x60]\n"
- "zip1 v7.4s, v27.4s, v23.4s\n"
- "zip2 v25.4s, v28.4s, v25.4s\n"
- "str q10, [x21, #0x70]\n"
+ "zip2 v7.4s, v24.4s, v7.4s\n"
+ "ldr q24, [x21], #0x10\n"
+ ".inst 0x4ea168ed // bfcvtn2 v13.8h, v7.4s\n"
+ "zip1 v7.4s, v4.4s, v29.4s\n"
+ "zip2 v29.4s, v4.4s, v29.4s\n"
+ "zip1 v4.4s, v1.4s, v7.4s\n"
+ ".inst 0x0ea16884 // bfcvtn v4.4h, v4.4s\n"
+ "zip2 v7.4s, v1.4s, v7.4s\n"
+ "ldr q1, [x21], #0x10\n"
+ ".inst 0x4ea168e4 // bfcvtn2 v4.8h, v7.4s\n"
+ "zip1 v7.4s, v8.4s, v29.4s\n"
+ ".inst 0x0ea168e7 // bfcvtn v7.4h, v7.4s\n"
+ "zip2 v8.4s, v8.4s, v29.4s\n"
+ "ldr q29, [x21], #0x10\n"
+ ".inst 0x4ea16907 // bfcvtn2 v7.8h, v8.4s\n"
+ "zip1 v8.4s, v31.4s, v18.4s\n"
+ "zip2 v31.4s, v31.4s, v18.4s\n"
+ "zip1 v18.4s, v27.4s, v8.4s\n"
".inst 0x0ea16a52 // bfcvtn v18.4h, v18.4s\n"
- "zip2 v29.4s, v29.4s, v11.4s\n"
- "str q3, [x21, #0x80]\n"
- ".inst 0x0ea168ab // bfcvtn v11.4h, v5.4s\n"
- "zip2 v10.4s, v20.4s, v6.4s\n"
- "str q9, [x21, #0x90]\n"
- ".inst 0x0ea16986 // bfcvtn v6.4h, v12.4s\n"
- "zip2 v12.4s, v30.4s, v31.4s\n"
- "str q16, [x21, #0xa0]\n"
- ".inst 0x0ea16ac5 // bfcvtn v5.4h, v22.4s\n"
- "zip2 v4.4s, v15.4s, v4.4s\n"
- "str q14, [x21, #0xb0]\n"
- "add x21, x21, %x[out_stride]\n"
- ".inst 0x0ea16a2f // bfcvtn v15.4h, v17.4s\n"
- "zip2 v20.4s, v24.4s, v2.4s\n"
- "str q21, [x21, #0x0]\n"
- ".inst 0x0ea168fc // bfcvtn v28.4h, v7.4s\n"
- "zip2 v30.4s, v27.4s, v23.4s\n"
- "str q26, [x21, #0x10]\n"
- ".inst 0x4ea16a60 // bfcvtn2 v0.8h, v19.4s\n"
- ".inst 0x4ea16b21 // bfcvtn2 v1.8h, v25.4s\n"
- "str q13, [x21, #0x20]\n"
- ".inst 0x4ea16bb2 // bfcvtn2 v18.8h, v29.4s\n"
- ".inst 0x4ea1694b // bfcvtn2 v11.8h, v10.4s\n"
- "str q8, [x21, #0x30]\n"
- ".inst 0x4ea16986 // bfcvtn2 v6.8h, v12.4s\n"
- ".inst 0x4ea16885 // bfcvtn2 v5.8h, v4.4s\n"
- "str q0, [x21, #0x40]\n"
- ".inst 0x4ea16a8f // bfcvtn2 v15.8h, v20.4s\n"
- ".inst 0x4ea16bdc // bfcvtn2 v28.8h, v30.4s\n"
- "str q1, [x21, #0x50]\n"
- "str q18, [x21, #0x60]\n"
- "str q11, [x21, #0x70]\n"
- "str q6, [x21, #0x80]\n"
- "str q5, [x21, #0x90]\n"
- "str q15, [x21, #0xa0]\n"
- "str q28, [x21, #0xb0]\n"
- "add x21, x21, %x[out_stride]\n"
+ "zip2 v27.4s, v27.4s, v8.4s\n"
+ "ldr q8, [x21], #0x10\n"
+ ".inst 0x4ea16b72 // bfcvtn2 v18.8h, v27.4s\n"
+ "zip1 v27.4s, v22.4s, v31.4s\n"
+ ".inst 0x0ea16b7b // bfcvtn v27.4h, v27.4s\n"
+ "zip2 v31.4s, v22.4s, v31.4s\n"
+ "ldr q22, [x20], #0x10\n"
+ ".inst 0x4ea16bfb // bfcvtn2 v27.8h, v31.4s\n"
+ "zip1 v31.4s, v15.4s, v2.4s\n"
+ "zip2 v2.4s, v15.4s, v2.4s\n"
+ "zip1 v15.4s, v26.4s, v12.4s\n"
+ "zip2 v26.4s, v26.4s, v12.4s\n"
+ "zip1 v12.4s, v5.4s, v22.4s\n"
+ "zip2 v22.4s, v5.4s, v22.4s\n"
+ "zip1 v5.4s, v31.4s, v12.4s\n"
+ ".inst 0x0ea168a5 // bfcvtn v5.4h, v5.4s\n"
+ "zip2 v31.4s, v31.4s, v12.4s\n"
+ "ldr q12, [x20], #0x10\n"
+ ".inst 0x4ea16be5 // bfcvtn2 v5.8h, v31.4s\n"
+ "zip1 v31.4s, v2.4s, v22.4s\n"
+ ".inst 0x0ea16bff // bfcvtn v31.4h, v31.4s\n"
+ "zip2 v2.4s, v2.4s, v22.4s\n"
+ "ldr q22, [x20], #0x10\n"
+ ".inst 0x4ea1685f // bfcvtn2 v31.8h, v2.4s\n"
+ "zip1 v2.4s, v25.4s, v12.4s\n"
+ "zip2 v25.4s, v25.4s, v12.4s\n"
+ "zip1 v12.4s, v15.4s, v2.4s\n"
+ ".inst 0x0ea1698c // bfcvtn v12.4h, v12.4s\n"
+ "zip2 v15.4s, v15.4s, v2.4s\n"
+ "ldr q2, [x20], #0x10\n"
+ ".inst 0x4ea169ec // bfcvtn2 v12.8h, v15.4s\n"
+ "zip1 v15.4s, v26.4s, v25.4s\n"
+ ".inst 0x0ea169ef // bfcvtn v15.4h, v15.4s\n"
+ "zip2 v25.4s, v26.4s, v25.4s\n"
+ "ldr q26, [x20], #0x10\n"
+ ".inst 0x4ea16b2f // bfcvtn2 v15.8h, v25.4s\n"
+ "ldr q25, [x20], #0x10\n"
+ "str q20, [x27, #0x0]\n"
+ "zip1 v20.4s, v9.4s, v24.4s\n"
+ "zip2 v24.4s, v9.4s, v24.4s\n"
+ "str q30, [x27, #0x10]\n"
+ "zip1 v9.4s, v14.4s, v22.4s\n"
+ "str q16, [x27, #0x20]\n"
+ "zip1 v16.4s, v20.4s, v9.4s\n"
+ "str q23, [x27, #0x30]\n"
+ ".inst 0x0ea16a10 // bfcvtn v16.4h, v16.4s\n"
+ "str q19, [x27, #0x40]\n"
+ "zip2 v9.4s, v20.4s, v9.4s\n"
+ "str q28, [x27, #0x50]\n"
+ "zip2 v22.4s, v14.4s, v22.4s\n"
+ "str q5, [x27, #0x60]\n"
+ ".inst 0x4ea16930 // bfcvtn2 v16.8h, v9.4s\n"
+ "str q31, [x27, #0x70]\n"
+ "zip1 v19.4s, v24.4s, v22.4s\n"
+ "str q12, [x27, #0x80]\n"
+ ".inst 0x0ea16a6c // bfcvtn v12.4h, v19.4s\n"
+ "str q15, [x27, #0x90]\n"
+ "zip2 v9.4s, v24.4s, v22.4s\n"
+ "str q16, [x27, #0xa0]\n"
+ "zip1 v15.4s, v0.4s, v1.4s\n"
+ ".inst 0x4ea1692c // bfcvtn2 v12.8h, v9.4s\n"
+ "str q12, [x27, #0xb0]\n"
+ "zip1 v20.4s, v17.4s, v2.4s\n"
+ "add x27, x27, %x[out_stride]\n"
+ "zip1 v16.4s, v15.4s, v20.4s\n"
+ "str q11, [x27, #0x0]\n"
+ "zip2 v9.4s, v15.4s, v20.4s\n"
+ "str q13, [x27, #0x10]\n"
+ ".inst 0x0ea16a0f // bfcvtn v15.4h, v16.4s\n"
+ "str q4, [x27, #0x20]\n"
+ "zip2 v14.4s, v0.4s, v1.4s\n"
+ "str q7, [x27, #0x30]\n"
+ "zip2 v31.4s, v17.4s, v2.4s\n"
+ "str q18, [x27, #0x40]\n"
+ ".inst 0x4ea1692f // bfcvtn2 v15.8h, v9.4s\n"
+ "str q27, [x27, #0x50]\n"
+ "zip1 v22.4s, v14.4s, v31.4s\n"
+ "str q15, [x27, #0x60]\n"
+ ".inst 0x0ea16ac9 // bfcvtn v9.4h, v22.4s\n"
+ "zip2 v11.4s, v14.4s, v31.4s\n"
+ "zip1 v18.4s, v3.4s, v29.4s\n"
+ "zip1 v27.4s, v10.4s, v26.4s\n"
+ ".inst 0x4ea16969 // bfcvtn2 v9.8h, v11.4s\n"
+ "str q9, [x27, #0x70]\n"
+ "zip1 v13.4s, v18.4s, v27.4s\n"
+ "zip2 v9.4s, v18.4s, v27.4s\n"
+ ".inst 0x0ea169b3 // bfcvtn v19.4h, v13.4s\n"
+ "zip2 v18.4s, v3.4s, v29.4s\n"
+ "zip2 v1.4s, v10.4s, v26.4s\n"
+ ".inst 0x4ea16933 // bfcvtn2 v19.8h, v9.4s\n"
+ "str q19, [x27, #0x80]\n"
+ "zip1 v16.4s, v18.4s, v1.4s\n"
+ "zip2 v20.4s, v18.4s, v1.4s\n"
+ ".inst 0x0ea16a10 // bfcvtn v16.4h, v16.4s\n"
+ "zip1 v18.4s, v21.4s, v8.4s\n"
+ "zip1 v2.4s, v6.4s, v25.4s\n"
+ ".inst 0x4ea16a90 // bfcvtn2 v16.8h, v20.4s\n"
+ "str q16, [x27, #0x90]\n"
+ "zip1 v16.4s, v18.4s, v2.4s\n"
+ "zip2 v20.4s, v18.4s, v2.4s\n"
+ ".inst 0x0ea16a10 // bfcvtn v16.4h, v16.4s\n"
+ "zip2 v18.4s, v21.4s, v8.4s\n"
+ "zip2 v17.4s, v6.4s, v25.4s\n"
+ ".inst 0x4ea16a90 // bfcvtn2 v16.8h, v20.4s\n"
+ "str q16, [x27, #0xa0]\n"
+ "zip1 v16.4s, v18.4s, v17.4s\n"
+ "zip2 v17.4s, v18.4s, v17.4s\n"
+ ".inst 0x0ea16a10 // bfcvtn v16.4h, v16.4s\n"
+ ".inst 0x4ea16a30 // bfcvtn2 v16.8h, v17.4s\n"
+ "str q16, [x27, #0xb0]\n"
+ "add x27, x27, %x[out_stride]\n"
"bge 2b\n"
"3:" // Main row loop: Unroll column loop skip
- "cmp x24, #0xc\n"
+ "cmp x19, #0xc\n"
"blt 5f\n"
"4:" // Main row loop: Column loop
- "ldr q20, [x9], #0x10\n"
- "ldr q9, [x28], #0x10\n"
- "sub x24, x24, #0xc\n"
- "cmp x24, #0xc\n"
- "ldr q8, [x27], #0x10\n"
- "ldr q1, [x26], #0x10\n"
- "zip1 v7.4s, v20.4s, v8.4s\n"
- "zip1 v19.4s, v9.4s, v1.4s\n"
- "ldr q6, [x25], #0x10\n"
- "ldr q16, [x23], #0x10\n"
- "zip2 v5.4s, v20.4s, v8.4s\n"
- "zip2 v18.4s, v9.4s, v1.4s\n"
- "ldr q27, [x22], #0x10\n"
- "ldr q14, [x20], #0x10\n"
- "zip1 v26.4s, v6.4s, v27.4s\n"
- "zip1 v15.4s, v16.4s, v14.4s\n"
- "ldr q1, [x9], #0x10\n"
- "ldr q30, [x28], #0x10\n"
- "zip2 v24.4s, v6.4s, v27.4s\n"
- "zip2 v25.4s, v16.4s, v14.4s\n"
- "ldr q13, [x27], #0x10\n"
- "ldr q17, [x26], #0x10\n"
- "zip1 v10.4s, v1.4s, v13.4s\n"
- "zip1 v16.4s, v30.4s, v17.4s\n"
- "ldr q4, [x25], #0x10\n"
- "ldr q11, [x23], #0x10\n"
- "zip2 v0.4s, v1.4s, v13.4s\n"
- "zip2 v27.4s, v30.4s, v17.4s\n"
- "ldr q28, [x22], #0x10\n"
- "ldr q12, [x20], #0x10\n"
- "zip1 v22.4s, v4.4s, v28.4s\n"
- "zip1 v13.4s, v11.4s, v12.4s\n"
- "ldr q31, [x9], #0x10\n"
+ "ldr q18, [x28], #0x10\n"
+ "sub x19, x19, #0xc\n"
+ "ldr q21, [x26], #0x10\n"
+ "cmp x19, #0xc\n"
+ "ldr q16, [x25], #0x10\n"
+ "zip1 v19.4s, v18.4s, v16.4s\n"
"ldr q17, [x28], #0x10\n"
- "zip2 v14.4s, v4.4s, v28.4s\n"
- "zip2 v12.4s, v11.4s, v12.4s\n"
- "ldr q2, [x27], #0x10\n"
- "ldr q3, [x26], #0x10\n"
- "zip1 v8.4s, v31.4s, v2.4s\n"
- "zip1 v4.4s, v17.4s, v3.4s\n"
- "ldr q23, [x25], #0x10\n"
- "ldr q1, [x23], #0x10\n"
- "zip2 v28.4s, v31.4s, v2.4s\n"
- "zip2 v29.4s, v17.4s, v3.4s\n"
- "ldr q11, [x22], #0x10\n"
- "ldr q17, [x20], #0x10\n"
- "zip1 v9.4s, v23.4s, v11.4s\n"
- "zip1 v21.4s, v1.4s, v17.4s\n"
- "zip2 v11.4s, v23.4s, v11.4s\n"
- "zip2 v17.4s, v1.4s, v17.4s\n"
- "zip1 v2.4s, v7.4s, v19.4s\n"
- "zip1 v31.4s, v5.4s, v18.4s\n"
- "zip1 v3.4s, v10.4s, v16.4s\n"
- "zip1 v6.4s, v0.4s, v27.4s\n"
- "zip1 v1.4s, v8.4s, v4.4s\n"
- "zip1 v30.4s, v28.4s, v29.4s\n"
- "zip1 v20.4s, v26.4s, v15.4s\n"
- "zip1 v23.4s, v24.4s, v25.4s\n"
- ".inst 0x0ea16842 // bfcvtn v2.4h, v2.4s\n"
- "zip2 v7.4s, v7.4s, v19.4s\n"
- "zip1 v19.4s, v22.4s, v13.4s\n"
- ".inst 0x0ea16bff // bfcvtn v31.4h, v31.4s\n"
- "zip2 v18.4s, v5.4s, v18.4s\n"
- "zip1 v5.4s, v14.4s, v12.4s\n"
- ".inst 0x0ea16863 // bfcvtn v3.4h, v3.4s\n"
- "zip2 v16.4s, v10.4s, v16.4s\n"
- "zip1 v10.4s, v9.4s, v21.4s\n"
- ".inst 0x0ea168c6 // bfcvtn v6.4h, v6.4s\n"
- "zip2 v0.4s, v0.4s, v27.4s\n"
- "zip1 v27.4s, v11.4s, v17.4s\n"
- ".inst 0x0ea16821 // bfcvtn v1.4h, v1.4s\n"
- "zip2 v4.4s, v8.4s, v4.4s\n"
- ".inst 0x0ea16bde // bfcvtn v30.4h, v30.4s\n"
- "zip2 v29.4s, v28.4s, v29.4s\n"
- ".inst 0x0ea16a9c // bfcvtn v28.4h, v20.4s\n"
- "zip2 v15.4s, v26.4s, v15.4s\n"
- ".inst 0x0ea16ae8 // bfcvtn v8.4h, v23.4s\n"
- "zip2 v26.4s, v24.4s, v25.4s\n"
- ".inst 0x0ea16a79 // bfcvtn v25.4h, v19.4s\n"
- "zip2 v24.4s, v22.4s, v13.4s\n"
- ".inst 0x0ea168b7 // bfcvtn v23.4h, v5.4s\n"
- "zip2 v22.4s, v14.4s, v12.4s\n"
- ".inst 0x0ea16945 // bfcvtn v5.4h, v10.4s\n"
- "zip2 v20.4s, v9.4s, v21.4s\n"
- ".inst 0x0ea16b73 // bfcvtn v19.4h, v27.4s\n"
- "zip2 v17.4s, v11.4s, v17.4s\n"
- ".inst 0x4ea168e2 // bfcvtn2 v2.8h, v7.4s\n"
- ".inst 0x4ea16a5f // bfcvtn2 v31.8h, v18.4s\n"
- "str q2, [x21, #0x0]\n"
- ".inst 0x4ea16a03 // bfcvtn2 v3.8h, v16.4s\n"
- ".inst 0x4ea16806 // bfcvtn2 v6.8h, v0.4s\n"
- "str q31, [x21, #0x10]\n"
- ".inst 0x4ea16881 // bfcvtn2 v1.8h, v4.4s\n"
- ".inst 0x4ea16bbe // bfcvtn2 v30.8h, v29.4s\n"
- "str q3, [x21, #0x20]\n"
- ".inst 0x4ea169fc // bfcvtn2 v28.8h, v15.4s\n"
- ".inst 0x4ea16b48 // bfcvtn2 v8.8h, v26.4s\n"
- "str q6, [x21, #0x30]\n"
- ".inst 0x4ea16b19 // bfcvtn2 v25.8h, v24.4s\n"
- ".inst 0x4ea16ad7 // bfcvtn2 v23.8h, v22.4s\n"
- "str q1, [x21, #0x40]\n"
- ".inst 0x4ea16a85 // bfcvtn2 v5.8h, v20.4s\n"
- ".inst 0x4ea16a33 // bfcvtn2 v19.8h, v17.4s\n"
- "str q30, [x21, #0x50]\n"
- "str q28, [x21, #0x60]\n"
- "str q8, [x21, #0x70]\n"
- "str q25, [x21, #0x80]\n"
- "str q23, [x21, #0x90]\n"
- "str q5, [x21, #0xa0]\n"
- "str q19, [x21, #0xb0]\n"
- "add x21, x21, %x[out_stride]\n"
+ "zip2 v20.4s, v18.4s, v16.4s\n"
+ "ldr q8, [x26], #0x10\n"
+ "ldr q16, [x25], #0x10\n"
+ "zip1 v7.4s, v17.4s, v16.4s\n"
+ "ldr q18, [x28], #0x10\n"
+ "zip2 v6.4s, v17.4s, v16.4s\n"
+ "ldr q5, [x26], #0x10\n"
+ "ldr q17, [x25], #0x10\n"
+ "zip1 v4.4s, v18.4s, v17.4s\n"
+ "ldr q16, [x24], #0x10\n"
+ "zip2 v3.4s, v18.4s, v17.4s\n"
+ "ldr q2, [x23], #0x10\n"
+ "ldr q1, [x22], #0x10\n"
+ "zip1 v17.4s, v21.4s, v16.4s\n"
+ "ldr q0, [x24], #0x10\n"
+ "zip2 v18.4s, v21.4s, v16.4s\n"
+ "ldr q31, [x23], #0x10\n"
+ "zip1 v16.4s, v19.4s, v17.4s\n"
+ "ldr q30, [x22], #0x10\n"
+ ".inst 0x0ea16a1d // bfcvtn v29.4h, v16.4s\n"
+ "ldr q28, [x21], #0x10\n"
+ "zip2 v17.4s, v19.4s, v17.4s\n"
+ "ldr q27, [x24], #0x10\n"
+ "zip1 v16.4s, v20.4s, v18.4s\n"
+ "ldr q26, [x23], #0x10\n"
+ ".inst 0x4ea16a3d // bfcvtn2 v29.8h, v17.4s\n"
+ "ldr q25, [x22], #0x10\n"
+ ".inst 0x0ea16a13 // bfcvtn v19.4h, v16.4s\n"
+ "ldr q24, [x21], #0x10\n"
+ "zip2 v16.4s, v20.4s, v18.4s\n"
+ "ldr q23, [x20], #0x10\n"
+ "zip1 v17.4s, v8.4s, v0.4s\n"
+ "ldr q22, [x21], #0x10\n"
+ ".inst 0x4ea16a13 // bfcvtn2 v19.8h, v16.4s\n"
+ "zip1 v16.4s, v7.4s, v17.4s\n"
+ "ldr q21, [x20], #0x10\n"
+ ".inst 0x0ea16a12 // bfcvtn v18.4h, v16.4s\n"
+ "ldr q20, [x20], #0x10\n"
+ "zip2 v16.4s, v7.4s, v17.4s\n"
+ "zip2 v17.4s, v8.4s, v0.4s\n"
+ "str q29, [x27, #0x0]\n"
+ ".inst 0x4ea16a12 // bfcvtn2 v18.8h, v16.4s\n"
+ "str q19, [x27, #0x10]\n"
+ "zip1 v16.4s, v6.4s, v17.4s\n"
+ "str q18, [x27, #0x20]\n"
+ ".inst 0x0ea16a13 // bfcvtn v19.4h, v16.4s\n"
+ "zip2 v18.4s, v6.4s, v17.4s\n"
+ "zip1 v17.4s, v5.4s, v27.4s\n"
+ "zip1 v16.4s, v4.4s, v17.4s\n"
+ ".inst 0x4ea16a53 // bfcvtn2 v19.8h, v18.4s\n"
+ "str q19, [x27, #0x30]\n"
+ ".inst 0x0ea16a13 // bfcvtn v19.4h, v16.4s\n"
+ "zip2 v18.4s, v4.4s, v17.4s\n"
+ "zip2 v17.4s, v5.4s, v27.4s\n"
+ "zip1 v16.4s, v3.4s, v17.4s\n"
+ ".inst 0x4ea16a53 // bfcvtn2 v19.8h, v18.4s\n"
+ "str q19, [x27, #0x40]\n"
+ ".inst 0x0ea16a13 // bfcvtn v19.4h, v16.4s\n"
+ "zip2 v16.4s, v3.4s, v17.4s\n"
+ "zip1 v18.4s, v2.4s, v28.4s\n"
+ "zip1 v17.4s, v1.4s, v23.4s\n"
+ ".inst 0x4ea16a13 // bfcvtn2 v19.8h, v16.4s\n"
+ "str q19, [x27, #0x50]\n"
+ "zip1 v16.4s, v18.4s, v17.4s\n"
+ "zip2 v19.4s, v18.4s, v17.4s\n"
+ ".inst 0x0ea16a10 // bfcvtn v16.4h, v16.4s\n"
+ "zip2 v18.4s, v2.4s, v28.4s\n"
+ "zip2 v17.4s, v1.4s, v23.4s\n"
+ ".inst 0x4ea16a70 // bfcvtn2 v16.8h, v19.4s\n"
+ "str q16, [x27, #0x60]\n"
+ "zip1 v16.4s, v18.4s, v17.4s\n"
+ "zip2 v19.4s, v18.4s, v17.4s\n"
+ ".inst 0x0ea16a10 // bfcvtn v16.4h, v16.4s\n"
+ "zip1 v18.4s, v31.4s, v24.4s\n"
+ "zip1 v17.4s, v30.4s, v21.4s\n"
+ ".inst 0x4ea16a70 // bfcvtn2 v16.8h, v19.4s\n"
+ "str q16, [x27, #0x70]\n"
+ "zip1 v16.4s, v18.4s, v17.4s\n"
+ "zip2 v19.4s, v18.4s, v17.4s\n"
+ ".inst 0x0ea16a10 // bfcvtn v16.4h, v16.4s\n"
+ "zip2 v18.4s, v31.4s, v24.4s\n"
+ "zip2 v17.4s, v30.4s, v21.4s\n"
+ ".inst 0x4ea16a70 // bfcvtn2 v16.8h, v19.4s\n"
+ "str q16, [x27, #0x80]\n"
+ "zip1 v16.4s, v18.4s, v17.4s\n"
+ "zip2 v19.4s, v18.4s, v17.4s\n"
+ ".inst 0x0ea16a10 // bfcvtn v16.4h, v16.4s\n"
+ "zip1 v18.4s, v26.4s, v22.4s\n"
+ "zip1 v17.4s, v25.4s, v20.4s\n"
+ ".inst 0x4ea16a70 // bfcvtn2 v16.8h, v19.4s\n"
+ "str q16, [x27, #0x90]\n"
+ "zip1 v16.4s, v18.4s, v17.4s\n"
+ "zip2 v19.4s, v18.4s, v17.4s\n"
+ ".inst 0x0ea16a10 // bfcvtn v16.4h, v16.4s\n"
+ "zip2 v18.4s, v26.4s, v22.4s\n"
+ "zip2 v17.4s, v25.4s, v20.4s\n"
+ ".inst 0x4ea16a70 // bfcvtn2 v16.8h, v19.4s\n"
+ "str q16, [x27, #0xa0]\n"
+ "zip1 v16.4s, v18.4s, v17.4s\n"
+ "zip2 v17.4s, v18.4s, v17.4s\n"
+ ".inst 0x0ea16a10 // bfcvtn v16.4h, v16.4s\n"
+ ".inst 0x4ea16a30 // bfcvtn2 v16.8h, v17.4s\n"
+ "str q16, [x27, #0xb0]\n"
+ "add x27, x27, %x[out_stride]\n"
"bge 4b\n"
"5:" // Main row loop: Column loop skip
- "cmp x24, #0x4\n"
+ "cmp x19, #0x4\n"
"blt 7f\n"
"6:" // Main row loop: width 4 loop: loop
- "ldr q23, [x9], #0x10\n"
"ldr q20, [x28], #0x10\n"
- "sub x24, x24, #0x4\n"
- "cmp x24, #0x4\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v22.4s, v23.4s, v17.4s\n"
- "zip1 v21.4s, v20.4s, v16.4s\n"
- "ldr q19, [x25], #0x10\n"
- "ldr q18, [x23], #0x10\n"
- "zip2 v28.4s, v23.4s, v17.4s\n"
- "zip2 v20.4s, v20.4s, v16.4s\n"
- "ldr q17, [x22], #0x10\n"
- "ldr q16, [x20], #0x10\n"
- "zip1 v27.4s, v19.4s, v17.4s\n"
- "zip1 v26.4s, v18.4s, v16.4s\n"
- "zip2 v25.4s, v19.4s, v17.4s\n"
- "zip2 v24.4s, v18.4s, v16.4s\n"
- "zip1 v19.4s, v22.4s, v21.4s\n"
- "zip1 v18.4s, v28.4s, v20.4s\n"
- "zip1 v17.4s, v27.4s, v26.4s\n"
- "zip1 v16.4s, v25.4s, v24.4s\n"
- ".inst 0x0ea16a77 // bfcvtn v23.4h, v19.4s\n"
- "zip2 v22.4s, v22.4s, v21.4s\n"
- ".inst 0x0ea16a55 // bfcvtn v21.4h, v18.4s\n"
- "zip2 v20.4s, v28.4s, v20.4s\n"
- ".inst 0x0ea16a33 // bfcvtn v19.4h, v17.4s\n"
- "zip2 v18.4s, v27.4s, v26.4s\n"
- ".inst 0x0ea16a11 // bfcvtn v17.4h, v16.4s\n"
- "zip2 v16.4s, v25.4s, v24.4s\n"
- ".inst 0x4ea16ad7 // bfcvtn2 v23.8h, v22.4s\n"
- ".inst 0x4ea16a95 // bfcvtn2 v21.8h, v20.4s\n"
- "str q23, [x21, #0x0]\n"
- ".inst 0x4ea16a53 // bfcvtn2 v19.8h, v18.4s\n"
- ".inst 0x4ea16a11 // bfcvtn2 v17.8h, v16.4s\n"
- "str q21, [x21, #0x10]\n"
- "str q19, [x21, #0x60]\n"
- "str q17, [x21, #0x70]\n"
- "add x21, x21, #0x20\n"
+ "sub x19, x19, #0x4\n"
+ "ldr q18, [x26], #0x10\n"
+ "cmp x19, #0x4\n"
+ "ldr q17, [x25], #0x10\n"
+ "zip1 v19.4s, v20.4s, v17.4s\n"
+ "ldr q16, [x24], #0x10\n"
+ "zip2 v25.4s, v20.4s, v17.4s\n"
+ "ldr q24, [x23], #0x10\n"
+ "ldr q23, [x22], #0x10\n"
+ "zip1 v17.4s, v18.4s, v16.4s\n"
+ "ldr q22, [x21], #0x10\n"
+ "zip2 v21.4s, v18.4s, v16.4s\n"
+ "ldr q20, [x20], #0x10\n"
+ "zip1 v16.4s, v19.4s, v17.4s\n"
+ ".inst 0x0ea16a12 // bfcvtn v18.4h, v16.4s\n"
+ "zip2 v17.4s, v19.4s, v17.4s\n"
+ "zip1 v16.4s, v25.4s, v21.4s\n"
+ ".inst 0x4ea16a32 // bfcvtn2 v18.8h, v17.4s\n"
+ "str q18, [x27, #0x0]\n"
+ ".inst 0x0ea16a13 // bfcvtn v19.4h, v16.4s\n"
+ "zip2 v16.4s, v25.4s, v21.4s\n"
+ "zip1 v18.4s, v24.4s, v22.4s\n"
+ "zip1 v17.4s, v23.4s, v20.4s\n"
+ ".inst 0x4ea16a13 // bfcvtn2 v19.8h, v16.4s\n"
+ "str q19, [x27, #0x10]\n"
+ "zip1 v16.4s, v18.4s, v17.4s\n"
+ "zip2 v19.4s, v18.4s, v17.4s\n"
+ ".inst 0x0ea16a10 // bfcvtn v16.4h, v16.4s\n"
+ "zip2 v18.4s, v24.4s, v22.4s\n"
+ "zip2 v17.4s, v23.4s, v20.4s\n"
+ ".inst 0x4ea16a70 // bfcvtn2 v16.8h, v19.4s\n"
+ "str q16, [x27, #0x60]\n"
+ "zip1 v16.4s, v18.4s, v17.4s\n"
+ "zip2 v17.4s, v18.4s, v17.4s\n"
+ ".inst 0x0ea16a10 // bfcvtn v16.4h, v16.4s\n"
+ ".inst 0x4ea16a30 // bfcvtn2 v16.8h, v17.4s\n"
+ "str q16, [x27, #0x70]\n"
+ "add x27, x27, #0x20\n"
"bge 6b\n"
"7:" // Main row loop: width 4 loop: skip
- "cmp x24, #0x1\n"
+ "cmp x19, #0x1\n"
"blt 9f\n"
"8:" // Main row loop: width 1 loop: loop
- "ldr s19, [x9], #0x4\n"
"ldr s18, [x28], #0x4\n"
- "sub x24, x24, #0x1\n"
- "cmp x24, #0x1\n"
- "ldr s17, [x27], #0x4\n"
- "ldr s16, [x26], #0x4\n"
- "zip1 v17.4s, v19.4s, v17.4s\n"
- "zip1 v16.4s, v18.4s, v16.4s\n"
- "ldr s20, [x25], #0x4\n"
- "ldr s19, [x23], #0x4\n"
+ "sub x19, x19, #0x1\n"
+ "ldr s17, [x26], #0x4\n"
+ "cmp x19, #0x1\n"
+ "ldr s16, [x25], #0x4\n"
+ "zip1 v18.4s, v18.4s, v16.4s\n"
+ "ldr s16, [x24], #0x4\n"
+ "ldr s20, [x23], #0x4\n"
"zip1 v16.4s, v17.4s, v16.4s\n"
- ".inst 0x0ea16a12 // bfcvtn v18.4h, v16.4s\n"
- "ldr s17, [x22], #0x4\n"
- "ldr s16, [x20], #0x4\n"
+ "ldr s19, [x22], #0x4\n"
+ "ldr s17, [x21], #0x4\n"
+ "zip1 v16.4s, v18.4s, v16.4s\n"
+ "ldr s18, [x20], #0x4\n"
+ ".inst 0x0ea16a10 // bfcvtn v16.4h, v16.4s\n"
"zip1 v17.4s, v20.4s, v17.4s\n"
- "zip1 v16.4s, v19.4s, v16.4s\n"
+ "str d16, [x27, #0x0]\n"
+ "zip1 v16.4s, v19.4s, v18.4s\n"
"zip1 v16.4s, v17.4s, v16.4s\n"
".inst 0x0ea16a10 // bfcvtn v16.4h, v16.4s\n"
- "str d18, [x21, #0x0]\n"
- "str d16, [x21, #0x60]\n"
- "add x21, x21, #0x8\n"
+ "str d16, [x27, #0x60]\n"
+ "add x27, x27, #0x8\n"
"bge 8b\n"
"9:" // Main row loop: width 1 loop: skip
- "cmp %x[height], #0x8\n"
"add %x[out], %x[out], #0xc0\n"
+ "cmp %x[height], #0x8\n"
"bge 1b\n"
"cbz %x[height], 20f\n"
"10:" // Main loop skip
+
"11:" // Tail row loop: Head
- "mov x9, %x[in]\n"
- "add x28, x9, %x[in_stride]\n"
- "add x27, x28, %x[in_stride]\n"
- "mov x20, %x[width]\n"
- "add x26, x27, %x[in_stride]\n"
+ "mov x28, %x[in]\n"
+ "mov x27, %x[out]\n"
+ "add x26, x28, %x[in_stride]\n"
+ "add x25, x26, %x[in_stride]\n"
+ "add x24, x25, %x[in_stride]\n"
+ "add %x[in], x24, %x[in_stride]\n"
"cmp %x[height], #0x3\n"
- "add %x[in], x26, %x[in_stride]\n"
- "csel x26, x26, %x[pad_row], GT\n"
- "csel x27, x27, %x[pad_row], GE\n"
+ "csel x24, x24, %x[pad_row], GT\n"
+ "csel x25, x25, %x[pad_row], GE\n"
"cmp %x[height], #0x1\n"
- "csel x28, x28, %x[pad_row], GT\n"
- "cmp x20, #0x18\n"
- "mov x21, %x[out]\n"
+ "csel x26, x26, %x[pad_row], GT\n"
"sub %x[height], %x[height], #0x4\n"
+ "mov x19, %x[width]\n"
+ "cmp x19, #0x18\n"
"blt 13f\n"
"12:" // Tail row loop: Unroll column loop
- "ldr q22, [x9], #0x10\n"
+ "ldr q17, [x28], #0x10\n"
+ "sub x19, x19, #0x18\n"
+ "ldr q20, [x26], #0x10\n"
+ "cmp x19, #0x18\n"
+ "ldr q16, [x25], #0x10\n"
+ "zip1 v19.4s, v17.4s, v16.4s\n"
"ldr q18, [x28], #0x10\n"
- "sub x20, x20, #0x18\n"
- "cmp x20, #0x18\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v19.4s, v22.4s, v17.4s\n"
- "zip1 v21.4s, v18.4s, v16.4s\n"
- "ldr q24, [x9], #0x10\n"
- "ldr q20, [x28], #0x10\n"
- "zip2 v10.4s, v22.4s, v17.4s\n"
- "zip2 v2.4s, v18.4s, v16.4s\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v22.4s, v24.4s, v17.4s\n"
- "zip1 v4.4s, v20.4s, v16.4s\n"
- "ldr q23, [x9], #0x10\n"
+ "zip2 v9.4s, v17.4s, v16.4s\n"
+ "ldr q8, [x26], #0x10\n"
+ "ldr q16, [x25], #0x10\n"
+ "zip1 v7.4s, v18.4s, v16.4s\n"
+ "ldr q17, [x28], #0x10\n"
+ "zip2 v6.4s, v18.4s, v16.4s\n"
+ "ldr q5, [x26], #0x10\n"
+ "ldr q16, [x25], #0x10\n"
+ "zip1 v4.4s, v17.4s, v16.4s\n"
"ldr q18, [x28], #0x10\n"
- "zip2 v29.4s, v24.4s, v17.4s\n"
- "zip2 v1.4s, v20.4s, v16.4s\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v30.4s, v23.4s, v17.4s\n"
- "zip1 v31.4s, v18.4s, v16.4s\n"
- "ldr q24, [x9], #0x10\n"
- "ldr q20, [x28], #0x10\n"
- "zip2 v23.4s, v23.4s, v17.4s\n"
- "zip2 v28.4s, v18.4s, v16.4s\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v25.4s, v24.4s, v17.4s\n"
- "zip1 v26.4s, v20.4s, v16.4s\n"
- "ldr q14, [x9], #0x10\n"
+ "zip2 v3.4s, v17.4s, v16.4s\n"
+ "ldr q2, [x26], #0x10\n"
+ "ldr q16, [x25], #0x10\n"
+ "zip1 v1.4s, v18.4s, v16.4s\n"
+ "ldr q17, [x28], #0x10\n"
+ "zip2 v0.4s, v18.4s, v16.4s\n"
+ "ldr q31, [x26], #0x10\n"
+ "ldr q16, [x25], #0x10\n"
+ "zip1 v30.4s, v17.4s, v16.4s\n"
"ldr q18, [x28], #0x10\n"
- "zip2 v24.4s, v24.4s, v17.4s\n"
- "zip2 v15.4s, v20.4s, v16.4s\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v12.4s, v14.4s, v17.4s\n"
- "zip1 v13.4s, v18.4s, v16.4s\n"
- "ldr q7, [x9], #0x10\n"
- "ldr q3, [x28], #0x10\n"
- "zip2 v0.4s, v14.4s, v17.4s\n"
- "zip2 v9.4s, v18.4s, v16.4s\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v14.4s, v7.4s, v17.4s\n"
- "zip1 v8.4s, v3.4s, v16.4s\n"
- "zip2 v7.4s, v7.4s, v17.4s\n"
- "zip2 v11.4s, v3.4s, v16.4s\n"
- "zip1 v18.4s, v19.4s, v21.4s\n"
- "zip1 v6.4s, v10.4s, v2.4s\n"
- "zip1 v5.4s, v22.4s, v4.4s\n"
- "zip1 v16.4s, v29.4s, v1.4s\n"
- "zip1 v27.4s, v30.4s, v31.4s\n"
- "zip1 v3.4s, v23.4s, v28.4s\n"
- "zip1 v17.4s, v25.4s, v26.4s\n"
- "zip1 v20.4s, v24.4s, v15.4s\n"
- ".inst 0x0ea16a52 // bfcvtn v18.4h, v18.4s\n"
- "zip2 v19.4s, v19.4s, v21.4s\n"
- "zip1 v21.4s, v12.4s, v13.4s\n"
- ".inst 0x0ea168c6 // bfcvtn v6.4h, v6.4s\n"
- "zip2 v10.4s, v10.4s, v2.4s\n"
- "zip1 v2.4s, v0.4s, v9.4s\n"
- ".inst 0x0ea168a5 // bfcvtn v5.4h, v5.4s\n"
- "zip2 v4.4s, v22.4s, v4.4s\n"
- "zip1 v22.4s, v14.4s, v8.4s\n"
- ".inst 0x0ea16a10 // bfcvtn v16.4h, v16.4s\n"
- "zip2 v1.4s, v29.4s, v1.4s\n"
- "zip1 v29.4s, v7.4s, v11.4s\n"
- ".inst 0x0ea16b7b // bfcvtn v27.4h, v27.4s\n"
- "zip2 v30.4s, v30.4s, v31.4s\n"
- ".inst 0x0ea1687f // bfcvtn v31.4h, v3.4s\n"
- "zip2 v23.4s, v23.4s, v28.4s\n"
- ".inst 0x0ea16a23 // bfcvtn v3.4h, v17.4s\n"
- "zip2 v28.4s, v25.4s, v26.4s\n"
- ".inst 0x0ea16a9a // bfcvtn v26.4h, v20.4s\n"
- "zip2 v25.4s, v24.4s, v15.4s\n"
- ".inst 0x0ea16ab8 // bfcvtn v24.4h, v21.4s\n"
- "zip2 v12.4s, v12.4s, v13.4s\n"
- ".inst 0x0ea16855 // bfcvtn v21.4h, v2.4s\n"
- "zip2 v13.4s, v0.4s, v9.4s\n"
- ".inst 0x0ea16ac2 // bfcvtn v2.4h, v22.4s\n"
- "zip2 v0.4s, v14.4s, v8.4s\n"
- ".inst 0x0ea16ba9 // bfcvtn v9.4h, v29.4s\n"
- "zip2 v17.4s, v7.4s, v11.4s\n"
+ "zip2 v29.4s, v17.4s, v16.4s\n"
+ "ldr q28, [x26], #0x10\n"
+ "ldr q17, [x25], #0x10\n"
+ "zip1 v27.4s, v18.4s, v17.4s\n"
+ "ldr q16, [x24], #0x10\n"
+ "zip2 v26.4s, v18.4s, v17.4s\n"
+ "ldr q25, [x24], #0x10\n"
+ "zip1 v17.4s, v20.4s, v16.4s\n"
+ "zip2 v24.4s, v20.4s, v16.4s\n"
+ "ldr q23, [x24], #0x10\n"
+ "zip1 v16.4s, v19.4s, v17.4s\n"
+ "zip2 v17.4s, v19.4s, v17.4s\n"
+ "ldr q22, [x24], #0x10\n"
+ ".inst 0x0ea16a13 // bfcvtn v19.4h, v16.4s\n"
+ "zip1 v16.4s, v9.4s, v24.4s\n"
+ "ldr q21, [x24], #0x10\n"
+ ".inst 0x4ea16a33 // bfcvtn2 v19.8h, v17.4s\n"
+ ".inst 0x0ea16a12 // bfcvtn v18.4h, v16.4s\n"
+ "ldr q20, [x24], #0x10\n"
+ "zip2 v16.4s, v9.4s, v24.4s\n"
+ "zip1 v17.4s, v8.4s, v25.4s\n"
+ "str q19, [x27, #0x0]\n"
+ ".inst 0x4ea16a12 // bfcvtn2 v18.8h, v16.4s\n"
+ "str q18, [x27, #0x10]\n"
+ "zip1 v16.4s, v7.4s, v17.4s\n"
+ "zip2 v19.4s, v7.4s, v17.4s\n"
+ ".inst 0x0ea16a12 // bfcvtn v18.4h, v16.4s\n"
+ "zip2 v17.4s, v8.4s, v25.4s\n"
+ "zip1 v16.4s, v6.4s, v17.4s\n"
".inst 0x4ea16a72 // bfcvtn2 v18.8h, v19.4s\n"
- ".inst 0x4ea16946 // bfcvtn2 v6.8h, v10.4s\n"
- "str q18, [x21, #0x0]\n"
- ".inst 0x4ea16885 // bfcvtn2 v5.8h, v4.4s\n"
- ".inst 0x4ea16830 // bfcvtn2 v16.8h, v1.4s\n"
- "str q6, [x21, #0x10]\n"
- ".inst 0x4ea16bdb // bfcvtn2 v27.8h, v30.4s\n"
- ".inst 0x4ea16aff // bfcvtn2 v31.8h, v23.4s\n"
- "str q5, [x21, #0x20]\n"
- "str q16, [x21, #0x30]\n"
- ".inst 0x4ea16b83 // bfcvtn2 v3.8h, v28.4s\n"
- ".inst 0x4ea16b3a // bfcvtn2 v26.8h, v25.4s\n"
- "str q27, [x21, #0x40]\n"
- ".inst 0x4ea16998 // bfcvtn2 v24.8h, v12.4s\n"
- ".inst 0x4ea169b5 // bfcvtn2 v21.8h, v13.4s\n"
- "str q31, [x21, #0x50]\n"
- "add x21, x21, %x[out_stride]\n"
- ".inst 0x4ea16802 // bfcvtn2 v2.8h, v0.4s\n"
- ".inst 0x4ea16a29 // bfcvtn2 v9.8h, v17.4s\n"
- "str q3, [x21, #0x0]\n"
- "str q26, [x21, #0x10]\n"
- "str q24, [x21, #0x20]\n"
- "str q21, [x21, #0x30]\n"
- "str q2, [x21, #0x40]\n"
- "str q9, [x21, #0x50]\n"
- "add x21, x21, %x[out_stride]\n"
+ "str q18, [x27, #0x20]\n"
+ ".inst 0x0ea16a13 // bfcvtn v19.4h, v16.4s\n"
+ "zip2 v18.4s, v6.4s, v17.4s\n"
+ "zip1 v17.4s, v5.4s, v23.4s\n"
+ "zip1 v16.4s, v4.4s, v17.4s\n"
+ ".inst 0x4ea16a53 // bfcvtn2 v19.8h, v18.4s\n"
+ "str q19, [x27, #0x30]\n"
+ ".inst 0x0ea16a13 // bfcvtn v19.4h, v16.4s\n"
+ "zip2 v18.4s, v4.4s, v17.4s\n"
+ "zip2 v17.4s, v5.4s, v23.4s\n"
+ "zip1 v16.4s, v3.4s, v17.4s\n"
+ ".inst 0x4ea16a53 // bfcvtn2 v19.8h, v18.4s\n"
+ "str q19, [x27, #0x40]\n"
+ ".inst 0x0ea16a13 // bfcvtn v19.4h, v16.4s\n"
+ "zip2 v18.4s, v3.4s, v17.4s\n"
+ "zip1 v17.4s, v2.4s, v22.4s\n"
+ "zip1 v16.4s, v1.4s, v17.4s\n"
+ ".inst 0x4ea16a53 // bfcvtn2 v19.8h, v18.4s\n"
+ "str q19, [x27, #0x50]\n"
+ ".inst 0x0ea16a13 // bfcvtn v19.4h, v16.4s\n"
+ "add x27, x27, %x[out_stride]\n"
+ "zip2 v18.4s, v1.4s, v17.4s\n"
+ "zip2 v17.4s, v2.4s, v22.4s\n"
+ "zip1 v16.4s, v0.4s, v17.4s\n"
+ ".inst 0x4ea16a53 // bfcvtn2 v19.8h, v18.4s\n"
+ "str q19, [x27, #0x0]\n"
+ ".inst 0x0ea16a13 // bfcvtn v19.4h, v16.4s\n"
+ "zip2 v18.4s, v0.4s, v17.4s\n"
+ "zip1 v17.4s, v31.4s, v21.4s\n"
+ "zip1 v16.4s, v30.4s, v17.4s\n"
+ ".inst 0x4ea16a53 // bfcvtn2 v19.8h, v18.4s\n"
+ "str q19, [x27, #0x10]\n"
+ ".inst 0x0ea16a13 // bfcvtn v19.4h, v16.4s\n"
+ "zip2 v18.4s, v30.4s, v17.4s\n"
+ "zip2 v17.4s, v31.4s, v21.4s\n"
+ "zip1 v16.4s, v29.4s, v17.4s\n"
+ ".inst 0x4ea16a53 // bfcvtn2 v19.8h, v18.4s\n"
+ "str q19, [x27, #0x20]\n"
+ ".inst 0x0ea16a13 // bfcvtn v19.4h, v16.4s\n"
+ "zip2 v18.4s, v29.4s, v17.4s\n"
+ "zip1 v17.4s, v28.4s, v20.4s\n"
+ "zip1 v16.4s, v27.4s, v17.4s\n"
+ ".inst 0x4ea16a53 // bfcvtn2 v19.8h, v18.4s\n"
+ "str q19, [x27, #0x30]\n"
+ ".inst 0x0ea16a13 // bfcvtn v19.4h, v16.4s\n"
+ "zip2 v17.4s, v27.4s, v17.4s\n"
+ "zip2 v18.4s, v28.4s, v20.4s\n"
+ "zip1 v16.4s, v26.4s, v18.4s\n"
+ ".inst 0x4ea16a33 // bfcvtn2 v19.8h, v17.4s\n"
+ "str q19, [x27, #0x40]\n"
+ ".inst 0x0ea16a11 // bfcvtn v17.4h, v16.4s\n"
+ "zip2 v16.4s, v26.4s, v18.4s\n"
+ ".inst 0x4ea16a11 // bfcvtn2 v17.8h, v16.4s\n"
+ "str q17, [x27, #0x50]\n"
+ "add x27, x27, %x[out_stride]\n"
"bge 12b\n"
"13:" // Tail row loop: Unroll column loop skip
- "cmp x20, #0xc\n"
+ "cmp x19, #0xc\n"
"blt 15f\n"
"14:" // Tail row loop: Column loop
- "ldr q19, [x9], #0x10\n"
"ldr q18, [x28], #0x10\n"
- "sub x20, x20, #0xc\n"
- "cmp x20, #0xc\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v26.4s, v19.4s, v17.4s\n"
- "zip1 v25.4s, v18.4s, v16.4s\n"
- "ldr q21, [x9], #0x10\n"
- "ldr q20, [x28], #0x10\n"
- "zip2 v24.4s, v19.4s, v17.4s\n"
- "zip2 v23.4s, v18.4s, v16.4s\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v2.4s, v21.4s, v17.4s\n"
- "zip1 v22.4s, v20.4s, v16.4s\n"
- "ldr q19, [x9], #0x10\n"
+ "sub x19, x19, #0xc\n"
+ "ldr q20, [x26], #0x10\n"
+ "cmp x19, #0xc\n"
+ "ldr q16, [x25], #0x10\n"
+ "zip1 v19.4s, v18.4s, v16.4s\n"
+ "ldr q17, [x28], #0x10\n"
+ "zip2 v29.4s, v18.4s, v16.4s\n"
+ "ldr q28, [x26], #0x10\n"
+ "ldr q16, [x25], #0x10\n"
+ "zip1 v27.4s, v17.4s, v16.4s\n"
"ldr q18, [x28], #0x10\n"
- "zip2 v1.4s, v21.4s, v17.4s\n"
- "zip2 v0.4s, v20.4s, v16.4s\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v31.4s, v19.4s, v17.4s\n"
- "zip1 v30.4s, v18.4s, v16.4s\n"
- "zip2 v29.4s, v19.4s, v17.4s\n"
- "zip2 v28.4s, v18.4s, v16.4s\n"
- "zip1 v21.4s, v26.4s, v25.4s\n"
- "zip1 v20.4s, v24.4s, v23.4s\n"
- "zip1 v19.4s, v2.4s, v22.4s\n"
- "zip1 v18.4s, v1.4s, v0.4s\n"
- "zip1 v17.4s, v31.4s, v30.4s\n"
- "zip1 v16.4s, v29.4s, v28.4s\n"
- ".inst 0x0ea16abb // bfcvtn v27.4h, v21.4s\n"
- "zip2 v26.4s, v26.4s, v25.4s\n"
- ".inst 0x0ea16a99 // bfcvtn v25.4h, v20.4s\n"
- "zip2 v24.4s, v24.4s, v23.4s\n"
- ".inst 0x0ea16a77 // bfcvtn v23.4h, v19.4s\n"
- "zip2 v22.4s, v2.4s, v22.4s\n"
- ".inst 0x0ea16a55 // bfcvtn v21.4h, v18.4s\n"
- "zip2 v20.4s, v1.4s, v0.4s\n"
- ".inst 0x0ea16a33 // bfcvtn v19.4h, v17.4s\n"
- "zip2 v18.4s, v31.4s, v30.4s\n"
+ "zip2 v26.4s, v17.4s, v16.4s\n"
+ "ldr q25, [x26], #0x10\n"
+ "ldr q17, [x25], #0x10\n"
+ "zip1 v24.4s, v18.4s, v17.4s\n"
+ "ldr q16, [x24], #0x10\n"
+ "zip2 v23.4s, v18.4s, v17.4s\n"
+ "ldr q22, [x24], #0x10\n"
+ "zip1 v17.4s, v20.4s, v16.4s\n"
+ "zip2 v21.4s, v20.4s, v16.4s\n"
+ "ldr q20, [x24], #0x10\n"
+ "zip1 v16.4s, v19.4s, v17.4s\n"
+ "zip2 v19.4s, v19.4s, v17.4s\n"
".inst 0x0ea16a11 // bfcvtn v17.4h, v16.4s\n"
- "zip2 v16.4s, v29.4s, v28.4s\n"
- ".inst 0x4ea16b5b // bfcvtn2 v27.8h, v26.4s\n"
- ".inst 0x4ea16b19 // bfcvtn2 v25.8h, v24.4s\n"
- "str q27, [x21, #0x0]\n"
- ".inst 0x4ea16ad7 // bfcvtn2 v23.8h, v22.4s\n"
- ".inst 0x4ea16a95 // bfcvtn2 v21.8h, v20.4s\n"
- "str q25, [x21, #0x10]\n"
+ "zip1 v16.4s, v29.4s, v21.4s\n"
+ ".inst 0x0ea16a12 // bfcvtn v18.4h, v16.4s\n"
+ ".inst 0x4ea16a71 // bfcvtn2 v17.8h, v19.4s\n"
+ "str q17, [x27, #0x0]\n"
+ "zip2 v16.4s, v29.4s, v21.4s\n"
+ "zip1 v17.4s, v28.4s, v22.4s\n"
+ ".inst 0x4ea16a12 // bfcvtn2 v18.8h, v16.4s\n"
+ "str q18, [x27, #0x10]\n"
+ "zip1 v16.4s, v27.4s, v17.4s\n"
+ "zip2 v19.4s, v27.4s, v17.4s\n"
+ ".inst 0x0ea16a12 // bfcvtn v18.4h, v16.4s\n"
+ "zip2 v17.4s, v28.4s, v22.4s\n"
+ "zip1 v16.4s, v26.4s, v17.4s\n"
+ ".inst 0x4ea16a72 // bfcvtn2 v18.8h, v19.4s\n"
+ "str q18, [x27, #0x20]\n"
+ ".inst 0x0ea16a13 // bfcvtn v19.4h, v16.4s\n"
+ "zip2 v18.4s, v26.4s, v17.4s\n"
+ "zip1 v17.4s, v25.4s, v20.4s\n"
+ "zip1 v16.4s, v24.4s, v17.4s\n"
".inst 0x4ea16a53 // bfcvtn2 v19.8h, v18.4s\n"
+ "str q19, [x27, #0x30]\n"
+ ".inst 0x0ea16a13 // bfcvtn v19.4h, v16.4s\n"
+ "zip2 v17.4s, v24.4s, v17.4s\n"
+ "zip2 v18.4s, v25.4s, v20.4s\n"
+ "zip1 v16.4s, v23.4s, v18.4s\n"
+ ".inst 0x4ea16a33 // bfcvtn2 v19.8h, v17.4s\n"
+ "str q19, [x27, #0x40]\n"
+ ".inst 0x0ea16a11 // bfcvtn v17.4h, v16.4s\n"
+ "zip2 v16.4s, v23.4s, v18.4s\n"
".inst 0x4ea16a11 // bfcvtn2 v17.8h, v16.4s\n"
- "str q23, [x21, #0x20]\n"
- "str q21, [x21, #0x30]\n"
- "str q19, [x21, #0x40]\n"
- "str q17, [x21, #0x50]\n"
- "add x21, x21, %x[out_stride]\n"
+ "str q17, [x27, #0x50]\n"
+ "add x27, x27, %x[out_stride]\n"
"bge 14b\n"
"15:" // Tail row loop: Column loop skip
- "cmp x20, #0x4\n"
+ "cmp x19, #0x4\n"
"blt 17f\n"
"16:" // Tail row loop: width 4 loop: loop
- "ldr q20, [x9], #0x10\n"
"ldr q19, [x28], #0x10\n"
- "sub x20, x20, #0x4\n"
- "cmp x20, #0x4\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v22.4s, v20.4s, v17.4s\n"
- "zip1 v18.4s, v19.4s, v16.4s\n"
- "zip2 v21.4s, v20.4s, v17.4s\n"
- "zip2 v20.4s, v19.4s, v16.4s\n"
- "zip1 v17.4s, v22.4s, v18.4s\n"
- "zip1 v16.4s, v21.4s, v20.4s\n"
- ".inst 0x0ea16a33 // bfcvtn v19.4h, v17.4s\n"
- "zip2 v18.4s, v22.4s, v18.4s\n"
+ "sub x19, x19, #0x4\n"
+ "ldr q18, [x26], #0x10\n"
+ "cmp x19, #0x4\n"
+ "ldr q17, [x25], #0x10\n"
+ "zip1 v21.4s, v19.4s, v17.4s\n"
+ "ldr q16, [x24], #0x10\n"
+ "zip2 v20.4s, v19.4s, v17.4s\n"
+ "zip1 v17.4s, v18.4s, v16.4s\n"
+ "zip2 v19.4s, v18.4s, v16.4s\n"
+ "zip1 v16.4s, v21.4s, v17.4s\n"
+ ".inst 0x0ea16a12 // bfcvtn v18.4h, v16.4s\n"
+ "zip2 v17.4s, v21.4s, v17.4s\n"
+ "zip1 v16.4s, v20.4s, v19.4s\n"
+ ".inst 0x4ea16a32 // bfcvtn2 v18.8h, v17.4s\n"
+ "str q18, [x27, #0x0]\n"
".inst 0x0ea16a11 // bfcvtn v17.4h, v16.4s\n"
- "zip2 v16.4s, v21.4s, v20.4s\n"
- ".inst 0x4ea16a53 // bfcvtn2 v19.8h, v18.4s\n"
+ "zip2 v16.4s, v20.4s, v19.4s\n"
".inst 0x4ea16a11 // bfcvtn2 v17.8h, v16.4s\n"
- "str q19, [x21, #0x0]\n"
- "str q17, [x21, #0x10]\n"
- "add x21, x21, #0x20\n"
+ "str q17, [x27, #0x10]\n"
+ "add x27, x27, #0x20\n"
"bge 16b\n"
"17:" // Tail row loop: width 4 loop: skip
- "cmp x20, #0x1\n"
+ "cmp x19, #0x1\n"
"blt 19f\n"
"18:" // Tail row loop: width 1 loop: loop
- "ldr s19, [x9], #0x4\n"
- "ldr s18, [x28], #0x4\n"
- "sub x20, x20, #0x1\n"
- "cmp x20, #0x1\n"
- "ldr s17, [x27], #0x4\n"
- "ldr s16, [x26], #0x4\n"
- "zip1 v17.4s, v19.4s, v17.4s\n"
+ "ldr s17, [x28], #0x4\n"
+ "sub x19, x19, #0x1\n"
+ "ldr s18, [x26], #0x4\n"
+ "cmp x19, #0x1\n"
+ "ldr s16, [x25], #0x4\n"
+ "zip1 v17.4s, v17.4s, v16.4s\n"
+ "ldr s16, [x24], #0x4\n"
"zip1 v16.4s, v18.4s, v16.4s\n"
"zip1 v16.4s, v17.4s, v16.4s\n"
".inst 0x0ea16a10 // bfcvtn v16.4h, v16.4s\n"
- "str d16, [x21, #0x0]\n"
- "add x21, x21, #0x8\n"
+ "str d16, [x27, #0x0]\n"
+ "add x27, x27, #0x8\n"
"bge 18b\n"
"19:" // Tail row loop: width 1 loop: skip
- "cmp %x[height], #0x1\n"
"add %x[out], %x[out], #0x60\n"
+ "cmp %x[height], #0x1\n"
"bge 11b\n"
"20:" // Done
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [pad_row] "r" (pad_row), [width] "r" (width)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_12_s8s16.hpp b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_12_s8s16.hpp
index 7938325fa4..7359eea737 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_12_s8s16.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_12_s8s16.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#pragma once
@@ -36,225 +36,225 @@ void a64_transpose_interleave_12_s8s16(int16_t *out, const int8_t *in, size_t wi
"cmp %x[height], #0x4\n"
"blt 10f\n"
"1:" // Main row loop: Head
- "mov x25, %x[in]\n"
- "mov x24, %x[width]\n"
- "add x23, x25, %x[in_stride]\n"
- "add x22, x23, %x[in_stride]\n"
- "add x20, x22, %x[in_stride]\n"
- "cmp x24, #0x18\n"
+ "mov x24, %x[in]\n"
+ "mov x23, %x[out]\n"
+ "add x22, x24, %x[in_stride]\n"
+ "add x21, x22, %x[in_stride]\n"
+ "add x20, x21, %x[in_stride]\n"
"add %x[in], x20, %x[in_stride]\n"
- "mov x21, %x[out]\n"
"sub %x[height], %x[height], #0x4\n"
+ "mov x19, %x[width]\n"
+ "cmp x19, #0x18\n"
"blt 3f\n"
"2:" // Main row loop: Unroll column loop
- "ldr q3, [x25], #0x10\n"
- "ldr q21, [x23], #0x10\n"
- "sshll2 v20.8h, v3.16b, #0x0\n"
- "sshll v2.8h, v21.8b, #0x0\n"
- "ldr q1, [x22], #0x10\n"
- "ldr q19, [x20], #0x10\n"
- "sshll2 v18.8h, v1.16b, #0x0\n"
- "sshll v0.8h, v19.8b, #0x0\n"
- "ldr d17, [x25], #0x8\n"
- "ldr d16, [x22], #0x8\n"
- "sshll v31.8h, v17.8b, #0x0\n"
- "sshll v30.8h, v16.8b, #0x0\n"
- "ldr d29, [x23], #0x8\n"
- "ldr d28, [x20], #0x8\n"
- "sshll2 v27.8h, v21.16b, #0x0\n"
- "sshll2 v26.8h, v19.16b, #0x0\n"
- "dup v25.2d, v20.d[0]\n"
- "dup v24.2d, v2.d[1]\n"
- "sub x24, x24, #0x18\n"
- "cmp x24, #0x18\n"
- "dup v23.2d, v18.d[0]\n"
- "dup v22.2d, v0.d[1]\n"
- "dup v21.2d, v20.d[1]\n"
- "dup v20.2d, v31.d[1]\n"
- "dup v19.2d, v18.d[1]\n"
- "dup v18.2d, v30.d[1]\n"
- "sshll v17.8h, v3.8b, #0x0\n"
- "sshll v16.8h, v1.8b, #0x0\n"
- "str q17, [x21, #0x0]\n"
- "mov v25.d[1], v2.d[0]\n"
- "mov v24.d[1], v27.d[0]\n"
- "str q25, [x21, #0x10]\n"
- "mov v23.d[1], v0.d[0]\n"
- "mov v22.d[1], v26.d[0]\n"
- "str q24, [x21, #0x20]\n"
- "str q16, [x21, #0x30]\n"
- "sshll v17.8h, v29.8b, #0x0\n"
- "sshll v16.8h, v28.8b, #0x0\n"
- "str q23, [x21, #0x40]\n"
- "mov v21.d[1], v31.d[0]\n"
- "mov v20.d[1], v27.d[1]\n"
- "str q22, [x21, #0x50]\n"
- "add x21, x21, %x[out_stride]\n"
- "mov v19.d[1], v30.d[0]\n"
- "mov v18.d[1], v26.d[1]\n"
- "str q21, [x21, #0x0]\n"
- "str q20, [x21, #0x10]\n"
- "str q17, [x21, #0x20]\n"
- "str q19, [x21, #0x30]\n"
- "str q18, [x21, #0x40]\n"
- "str q16, [x21, #0x50]\n"
- "add x21, x21, %x[out_stride]\n"
+ "ldr q16, [x24], #0x10\n"
+ "sshll v27.8h, v16.8b, #0x0\n"
+ "ldr d17, [x24], #0x8\n"
+ "sub x19, x19, #0x18\n"
+ "sshll2 v16.8h, v16.16b, #0x0\n"
+ "ldr q26, [x22], #0x10\n"
+ "cmp x19, #0x18\n"
+ "dup v20.2d, v16.d[0]\n"
+ "ldr q25, [x21], #0x10\n"
+ "dup v24.2d, v16.d[1]\n"
+ "ldr q23, [x20], #0x10\n"
+ "sshll v16.8h, v17.8b, #0x0\n"
+ "ldr d19, [x22], #0x8\n"
+ "mov v24.d[1], v16.d[0]\n"
+ "dup v22.2d, v16.d[1]\n"
+ "ldr d18, [x21], #0x8\n"
+ "sshll v16.8h, v26.8b, #0x0\n"
+ "ldr d21, [x20], #0x8\n"
+ "mov v20.d[1], v16.d[0]\n"
+ "str q27, [x23, #0x0]\n"
+ "dup v17.2d, v16.d[1]\n"
+ "str q20, [x23, #0x10]\n"
+ "sshll2 v16.8h, v26.16b, #0x0\n"
+ "mov v17.d[1], v16.d[0]\n"
+ "str q17, [x23, #0x20]\n"
+ "mov v22.d[1], v16.d[1]\n"
+ "sshll v20.8h, v19.8b, #0x0\n"
+ "sshll v16.8h, v25.8b, #0x0\n"
+ "str q16, [x23, #0x30]\n"
+ "sshll2 v16.8h, v25.16b, #0x0\n"
+ "dup v17.2d, v16.d[0]\n"
+ "dup v19.2d, v16.d[1]\n"
+ "sshll v16.8h, v18.8b, #0x0\n"
+ "mov v19.d[1], v16.d[0]\n"
+ "dup v18.2d, v16.d[1]\n"
+ "sshll v16.8h, v23.8b, #0x0\n"
+ "mov v17.d[1], v16.d[0]\n"
+ "str q17, [x23, #0x40]\n"
+ "dup v17.2d, v16.d[1]\n"
+ "sshll2 v16.8h, v23.16b, #0x0\n"
+ "mov v17.d[1], v16.d[0]\n"
+ "str q17, [x23, #0x50]\n"
+ "add x23, x23, %x[out_stride]\n"
+ "mov v18.d[1], v16.d[1]\n"
+ "str q24, [x23, #0x0]\n"
+ "sshll v16.8h, v21.8b, #0x0\n"
+ "str q22, [x23, #0x10]\n"
+ "str q20, [x23, #0x20]\n"
+ "str q19, [x23, #0x30]\n"
+ "str q18, [x23, #0x40]\n"
+ "str q16, [x23, #0x50]\n"
+ "add x23, x23, %x[out_stride]\n"
"bge 2b\n"
"3:" // Main row loop: Unroll column loop skip
- "cmp x24, #0xc\n"
+ "cmp x19, #0xc\n"
"blt 5f\n"
"4:" // Main row loop: Column loop
- "ldr d19, [x23], #0x8\n"
- "ldr d18, [x20], #0x8\n"
- "sub x24, x24, #0xc\n"
- "cmp x24, #0xc\n"
- "ld1 { v19.s }[2], [x23], #0x4\n"
- "ld1 { v18.s }[2], [x20], #0x4\n"
- "sshll v25.8h, v19.8b, #0x0\n"
- "sshll v24.8h, v18.8b, #0x0\n"
- "ldr d17, [x25], #0x8\n"
- "ldr d16, [x22], #0x8\n"
- "sshll2 v23.8h, v19.16b, #0x0\n"
- "sshll2 v22.8h, v18.16b, #0x0\n"
- "ld1 { v17.s }[2], [x25], #0x4\n"
- "ld1 { v16.s }[2], [x22], #0x4\n"
- "sshll2 v21.8h, v17.16b, #0x0\n"
- "sshll2 v20.8h, v16.16b, #0x0\n"
- "dup v19.2d, v25.d[1]\n"
- "dup v18.2d, v24.d[1]\n"
- "sshll v17.8h, v17.8b, #0x0\n"
- "sshll v16.8h, v16.8b, #0x0\n"
- "str q17, [x21, #0x0]\n"
- "mov v21.d[1], v25.d[0]\n"
- "mov v19.d[1], v23.d[0]\n"
- "str q21, [x21, #0x10]\n"
- "mov v20.d[1], v24.d[0]\n"
- "mov v18.d[1], v22.d[0]\n"
- "str q19, [x21, #0x20]\n"
- "str q16, [x21, #0x30]\n"
- "str q20, [x21, #0x40]\n"
- "str q18, [x21, #0x50]\n"
- "add x21, x21, %x[out_stride]\n"
+ "ldr d16, [x24], #0x8\n"
+ "sub x19, x19, #0xc\n"
+ "ldr d21, [x22], #0x8\n"
+ "cmp x19, #0xc\n"
+ "ldr d20, [x21], #0x8\n"
+ "ldr d19, [x20], #0x8\n"
+ "ld1 { v16.s }[2], [x24], #0x4\n"
+ "sshll v17.8h, v16.8b, #0x0\n"
+ "ld1 { v21.s }[2], [x22], #0x4\n"
+ "sshll2 v18.8h, v16.16b, #0x0\n"
+ "ld1 { v20.s }[2], [x21], #0x4\n"
+ "ld1 { v19.s }[2], [x20], #0x4\n"
+ "sshll v16.8h, v21.8b, #0x0\n"
+ "str q17, [x23, #0x0]\n"
+ "sshll2 v17.8h, v21.16b, #0x0\n"
+ "mov v18.d[1], v16.d[0]\n"
+ "str q18, [x23, #0x10]\n"
+ "dup v16.2d, v16.d[1]\n"
+ "mov v16.d[1], v17.d[0]\n"
+ "str q16, [x23, #0x20]\n"
+ "sshll v16.8h, v20.8b, #0x0\n"
+ "str q16, [x23, #0x30]\n"
+ "sshll2 v17.8h, v20.16b, #0x0\n"
+ "sshll v16.8h, v19.8b, #0x0\n"
+ "mov v17.d[1], v16.d[0]\n"
+ "str q17, [x23, #0x40]\n"
+ "dup v17.2d, v16.d[1]\n"
+ "sshll2 v16.8h, v19.16b, #0x0\n"
+ "mov v17.d[1], v16.d[0]\n"
+ "str q17, [x23, #0x50]\n"
+ "add x23, x23, %x[out_stride]\n"
"bge 4b\n"
"5:" // Main row loop: Column loop skip
- "cmp x24, #0x4\n"
+ "cmp x19, #0x4\n"
"blt 7f\n"
"6:" // Main row loop: width 4 loop: loop
- "ldr s19, [x25], #0x4\n"
- "ldr s18, [x23], #0x4\n"
- "sub x24, x24, #0x4\n"
- "cmp x24, #0x4\n"
- "ldr s17, [x22], #0x4\n"
+ "ldr s16, [x24], #0x4\n"
+ "sshll v19.8h, v16.8b, #0x0\n"
+ "ldr s16, [x22], #0x4\n"
+ "sub x19, x19, #0x4\n"
+ "sshll v18.8h, v16.8b, #0x0\n"
+ "ldr s16, [x21], #0x4\n"
+ "cmp x19, #0x4\n"
+ "sshll v17.8h, v16.8b, #0x0\n"
"ldr s16, [x20], #0x4\n"
- "sshll v19.8h, v19.8b, #0x0\n"
- "sshll v18.8h, v18.8b, #0x0\n"
- "sshll v17.8h, v17.8b, #0x0\n"
+ "str d19, [x23, #0x0]\n"
"sshll v16.8h, v16.8b, #0x0\n"
- "str d19, [x21, #0x0]\n"
- "str d18, [x21, #0x18]\n"
- "str d17, [x21, #0x30]\n"
- "str d16, [x21, #0x48]\n"
- "add x21, x21, #0x8\n"
+ "str d18, [x23, #0x18]\n"
+ "str d17, [x23, #0x30]\n"
+ "str d16, [x23, #0x48]\n"
+ "add x23, x23, #0x8\n"
"bge 6b\n"
"7:" // Main row loop: width 4 loop: skip
- "cmp x24, #0x1\n"
+ "cmp x19, #0x1\n"
"blt 9f\n"
"8:" // Main row loop: width 1 loop: loop
- "ldr b19, [x25], #0x1\n"
- "ldr b18, [x23], #0x1\n"
- "sub x24, x24, #0x1\n"
- "cmp x24, #0x1\n"
- "ldr b17, [x22], #0x1\n"
+ "ldr b16, [x24], #0x1\n"
+ "sshll v19.8h, v16.8b, #0x0\n"
+ "ldr b16, [x22], #0x1\n"
+ "sub x19, x19, #0x1\n"
+ "sshll v18.8h, v16.8b, #0x0\n"
+ "ldr b16, [x21], #0x1\n"
+ "cmp x19, #0x1\n"
+ "sshll v17.8h, v16.8b, #0x0\n"
"ldr b16, [x20], #0x1\n"
- "sshll v19.8h, v19.8b, #0x0\n"
- "sshll v18.8h, v18.8b, #0x0\n"
- "sshll v17.8h, v17.8b, #0x0\n"
+ "str h19, [x23, #0x0]\n"
"sshll v16.8h, v16.8b, #0x0\n"
- "str h19, [x21, #0x0]\n"
- "str h18, [x21, #0x18]\n"
- "str h17, [x21, #0x30]\n"
- "str h16, [x21, #0x48]\n"
- "add x21, x21, #0x2\n"
+ "str h18, [x23, #0x18]\n"
+ "str h17, [x23, #0x30]\n"
+ "str h16, [x23, #0x48]\n"
+ "add x23, x23, #0x2\n"
"bge 8b\n"
"9:" // Main row loop: width 1 loop: skip
- "cmp %x[height], #0x4\n"
"add %x[out], %x[out], #0x60\n"
+ "cmp %x[height], #0x4\n"
"bge 1b\n"
"cbz %x[height], 20f\n"
"10:" // Main loop skip
"11:" // Tail row loop: Head
- "mov x20, %x[width]\n"
- "mov x25, %x[in]\n"
- "cmp x20, #0x18\n"
- "add %x[in], x25, %x[in_stride]\n"
- "mov x21, %x[out]\n"
+ "mov x24, %x[in]\n"
+ "mov x23, %x[out]\n"
+ "add %x[in], x24, %x[in_stride]\n"
"sub %x[height], %x[height], #0x1\n"
+ "mov x19, %x[width]\n"
+ "cmp x19, #0x18\n"
"blt 13f\n"
"12:" // Tail row loop: Unroll column loop
- "ldr q20, [x25], #0x10\n"
- "ldr d16, [x25], #0x8\n"
- "sshll2 v19.8h, v20.16b, #0x0\n"
- "sshll v18.8h, v16.8b, #0x0\n"
- "dup v17.2d, v19.d[1]\n"
- "sub x20, x20, #0x18\n"
- "sshll v16.8h, v20.8b, #0x0\n"
- "str q16, [x21, #0x0]\n"
- "dup v16.2d, v19.d[0]\n"
- "str d16, [x21, #0x10]\n"
- "add x21, x21, %x[out_stride]\n"
- "cmp x20, #0x18\n"
- "mov v17.d[1], v18.d[0]\n"
- "dup v16.2d, v18.d[1]\n"
- "str q17, [x21, #0x0]\n"
- "str d16, [x21, #0x10]\n"
- "add x21, x21, %x[out_stride]\n"
+ "ldr q17, [x24], #0x10\n"
+ "sshll v16.8h, v17.8b, #0x0\n"
+ "ldr d18, [x24], #0x8\n"
+ "sub x19, x19, #0x18\n"
+ "sshll2 v17.8h, v17.16b, #0x0\n"
+ "str q16, [x23, #0x0]\n"
+ "cmp x19, #0x18\n"
+ "dup v16.2d, v17.d[0]\n"
+ "str d16, [x23, #0x10]\n"
+ "dup v17.2d, v17.d[1]\n"
+ "add x23, x23, %x[out_stride]\n"
+ "sshll v16.8h, v18.8b, #0x0\n"
+ "mov v17.d[1], v16.d[0]\n"
+ "str q17, [x23, #0x0]\n"
+ "dup v16.2d, v16.d[1]\n"
+ "str d16, [x23, #0x10]\n"
+ "add x23, x23, %x[out_stride]\n"
"bge 12b\n"
"13:" // Tail row loop: Unroll column loop skip
- "cmp x20, #0xc\n"
+ "cmp x19, #0xc\n"
"blt 15f\n"
"14:" // Tail row loop: Column loop
- "ldr d16, [x25], #0x8\n"
- "ld1 { v16.s }[2], [x25], #0x4\n"
- "sub x20, x20, #0xc\n"
- "cmp x20, #0xc\n"
- "sshll v17.8h, v16.8b, #0x0\n"
- "sshll2 v16.8h, v16.16b, #0x0\n"
- "str q17, [x21, #0x0]\n"
- "str d16, [x21, #0x10]\n"
- "add x21, x21, %x[out_stride]\n"
+ "ldr d17, [x24], #0x8\n"
+ "sub x19, x19, #0xc\n"
+ "cmp x19, #0xc\n"
+ "ld1 { v17.s }[2], [x24], #0x4\n"
+ "sshll v16.8h, v17.8b, #0x0\n"
+ "str q16, [x23, #0x0]\n"
+ "sshll2 v16.8h, v17.16b, #0x0\n"
+ "str d16, [x23, #0x10]\n"
+ "add x23, x23, %x[out_stride]\n"
"bge 14b\n"
"15:" // Tail row loop: Column loop skip
- "cmp x20, #0x4\n"
+ "cmp x19, #0x4\n"
"blt 17f\n"
"16:" // Tail row loop: width 4 loop: loop
- "ldr s16, [x25], #0x4\n"
- "sub x20, x20, #0x4\n"
- "cmp x20, #0x4\n"
+ "ldr s16, [x24], #0x4\n"
"sshll v16.8h, v16.8b, #0x0\n"
- "str d16, [x21, #0x0]\n"
- "add x21, x21, #0x8\n"
+ "str d16, [x23, #0x0]\n"
+ "sub x19, x19, #0x4\n"
+ "add x23, x23, #0x8\n"
+ "cmp x19, #0x4\n"
"bge 16b\n"
"17:" // Tail row loop: width 4 loop: skip
- "cmp x20, #0x1\n"
+ "cmp x19, #0x1\n"
"blt 19f\n"
"18:" // Tail row loop: width 1 loop: loop
- "ldr b16, [x25], #0x1\n"
- "sub x20, x20, #0x1\n"
- "cmp x20, #0x1\n"
+ "ldr b16, [x24], #0x1\n"
"sshll v16.8h, v16.8b, #0x0\n"
- "str h16, [x21, #0x0]\n"
- "add x21, x21, #0x2\n"
+ "str h16, [x23, #0x0]\n"
+ "sub x19, x19, #0x1\n"
+ "add x23, x23, #0x2\n"
+ "cmp x19, #0x1\n"
"bge 18b\n"
"19:" // Tail row loop: width 1 loop: skip
- "cmp %x[height], #0x1\n"
"add %x[out], %x[out], #0x18\n"
+ "cmp %x[height], #0x1\n"
"bge 11b\n"
"20:" // Done
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [width] "r" (width)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x20", "x21", "x22", "x23", "x24", "x25"
+ : "cc", "memory", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "x19", "x20", "x21", "x22", "x23", "x24"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_12_u8u16.hpp b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_12_u8u16.hpp
index 4c66fb2c2f..34fb0ed5ac 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_12_u8u16.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_12_u8u16.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#pragma once
@@ -36,225 +36,225 @@ void a64_transpose_interleave_12_u8u16(uint16_t *out, const uint8_t *in, size_t
"cmp %x[height], #0x4\n"
"blt 10f\n"
"1:" // Main row loop: Head
- "mov x25, %x[in]\n"
- "mov x24, %x[width]\n"
- "add x23, x25, %x[in_stride]\n"
- "add x22, x23, %x[in_stride]\n"
- "add x20, x22, %x[in_stride]\n"
- "cmp x24, #0x18\n"
+ "mov x24, %x[in]\n"
+ "mov x23, %x[out]\n"
+ "add x22, x24, %x[in_stride]\n"
+ "add x21, x22, %x[in_stride]\n"
+ "add x20, x21, %x[in_stride]\n"
"add %x[in], x20, %x[in_stride]\n"
- "mov x21, %x[out]\n"
"sub %x[height], %x[height], #0x4\n"
+ "mov x19, %x[width]\n"
+ "cmp x19, #0x18\n"
"blt 3f\n"
"2:" // Main row loop: Unroll column loop
- "ldr q3, [x25], #0x10\n"
- "ldr q21, [x23], #0x10\n"
- "ushll2 v20.8h, v3.16b, #0x0\n"
- "ushll v2.8h, v21.8b, #0x0\n"
- "ldr q1, [x22], #0x10\n"
- "ldr q19, [x20], #0x10\n"
- "ushll2 v18.8h, v1.16b, #0x0\n"
- "ushll v0.8h, v19.8b, #0x0\n"
- "ldr d17, [x25], #0x8\n"
- "ldr d16, [x22], #0x8\n"
- "ushll v31.8h, v17.8b, #0x0\n"
- "ushll v30.8h, v16.8b, #0x0\n"
- "ldr d29, [x23], #0x8\n"
- "ldr d28, [x20], #0x8\n"
- "ushll2 v27.8h, v21.16b, #0x0\n"
- "ushll2 v26.8h, v19.16b, #0x0\n"
- "dup v25.2d, v20.d[0]\n"
- "dup v24.2d, v2.d[1]\n"
- "sub x24, x24, #0x18\n"
- "cmp x24, #0x18\n"
- "dup v23.2d, v18.d[0]\n"
- "dup v22.2d, v0.d[1]\n"
- "dup v21.2d, v20.d[1]\n"
- "dup v20.2d, v31.d[1]\n"
- "dup v19.2d, v18.d[1]\n"
- "dup v18.2d, v30.d[1]\n"
- "ushll v17.8h, v3.8b, #0x0\n"
- "ushll v16.8h, v1.8b, #0x0\n"
- "str q17, [x21, #0x0]\n"
- "mov v25.d[1], v2.d[0]\n"
- "mov v24.d[1], v27.d[0]\n"
- "str q25, [x21, #0x10]\n"
- "mov v23.d[1], v0.d[0]\n"
- "mov v22.d[1], v26.d[0]\n"
- "str q24, [x21, #0x20]\n"
- "str q16, [x21, #0x30]\n"
- "ushll v17.8h, v29.8b, #0x0\n"
- "ushll v16.8h, v28.8b, #0x0\n"
- "str q23, [x21, #0x40]\n"
- "mov v21.d[1], v31.d[0]\n"
- "mov v20.d[1], v27.d[1]\n"
- "str q22, [x21, #0x50]\n"
- "add x21, x21, %x[out_stride]\n"
- "mov v19.d[1], v30.d[0]\n"
- "mov v18.d[1], v26.d[1]\n"
- "str q21, [x21, #0x0]\n"
- "str q20, [x21, #0x10]\n"
- "str q17, [x21, #0x20]\n"
- "str q19, [x21, #0x30]\n"
- "str q18, [x21, #0x40]\n"
- "str q16, [x21, #0x50]\n"
- "add x21, x21, %x[out_stride]\n"
+ "ldr q16, [x24], #0x10\n"
+ "ushll v27.8h, v16.8b, #0x0\n"
+ "ldr d17, [x24], #0x8\n"
+ "sub x19, x19, #0x18\n"
+ "ushll2 v16.8h, v16.16b, #0x0\n"
+ "ldr q26, [x22], #0x10\n"
+ "cmp x19, #0x18\n"
+ "dup v20.2d, v16.d[0]\n"
+ "ldr q25, [x21], #0x10\n"
+ "dup v24.2d, v16.d[1]\n"
+ "ldr q23, [x20], #0x10\n"
+ "ushll v16.8h, v17.8b, #0x0\n"
+ "ldr d19, [x22], #0x8\n"
+ "mov v24.d[1], v16.d[0]\n"
+ "dup v22.2d, v16.d[1]\n"
+ "ldr d18, [x21], #0x8\n"
+ "ushll v16.8h, v26.8b, #0x0\n"
+ "ldr d21, [x20], #0x8\n"
+ "mov v20.d[1], v16.d[0]\n"
+ "str q27, [x23, #0x0]\n"
+ "dup v17.2d, v16.d[1]\n"
+ "str q20, [x23, #0x10]\n"
+ "ushll2 v16.8h, v26.16b, #0x0\n"
+ "mov v17.d[1], v16.d[0]\n"
+ "str q17, [x23, #0x20]\n"
+ "mov v22.d[1], v16.d[1]\n"
+ "ushll v20.8h, v19.8b, #0x0\n"
+ "ushll v16.8h, v25.8b, #0x0\n"
+ "str q16, [x23, #0x30]\n"
+ "ushll2 v16.8h, v25.16b, #0x0\n"
+ "dup v17.2d, v16.d[0]\n"
+ "dup v19.2d, v16.d[1]\n"
+ "ushll v16.8h, v18.8b, #0x0\n"
+ "mov v19.d[1], v16.d[0]\n"
+ "dup v18.2d, v16.d[1]\n"
+ "ushll v16.8h, v23.8b, #0x0\n"
+ "mov v17.d[1], v16.d[0]\n"
+ "str q17, [x23, #0x40]\n"
+ "dup v17.2d, v16.d[1]\n"
+ "ushll2 v16.8h, v23.16b, #0x0\n"
+ "mov v17.d[1], v16.d[0]\n"
+ "str q17, [x23, #0x50]\n"
+ "add x23, x23, %x[out_stride]\n"
+ "mov v18.d[1], v16.d[1]\n"
+ "str q24, [x23, #0x0]\n"
+ "ushll v16.8h, v21.8b, #0x0\n"
+ "str q22, [x23, #0x10]\n"
+ "str q20, [x23, #0x20]\n"
+ "str q19, [x23, #0x30]\n"
+ "str q18, [x23, #0x40]\n"
+ "str q16, [x23, #0x50]\n"
+ "add x23, x23, %x[out_stride]\n"
"bge 2b\n"
"3:" // Main row loop: Unroll column loop skip
- "cmp x24, #0xc\n"
+ "cmp x19, #0xc\n"
"blt 5f\n"
"4:" // Main row loop: Column loop
- "ldr d19, [x23], #0x8\n"
- "ldr d18, [x20], #0x8\n"
- "sub x24, x24, #0xc\n"
- "cmp x24, #0xc\n"
- "ld1 { v19.s }[2], [x23], #0x4\n"
- "ld1 { v18.s }[2], [x20], #0x4\n"
- "ushll v25.8h, v19.8b, #0x0\n"
- "ushll v24.8h, v18.8b, #0x0\n"
- "ldr d17, [x25], #0x8\n"
- "ldr d16, [x22], #0x8\n"
- "ushll2 v23.8h, v19.16b, #0x0\n"
- "ushll2 v22.8h, v18.16b, #0x0\n"
- "ld1 { v17.s }[2], [x25], #0x4\n"
- "ld1 { v16.s }[2], [x22], #0x4\n"
- "ushll2 v21.8h, v17.16b, #0x0\n"
- "ushll2 v20.8h, v16.16b, #0x0\n"
- "dup v19.2d, v25.d[1]\n"
- "dup v18.2d, v24.d[1]\n"
- "ushll v17.8h, v17.8b, #0x0\n"
- "ushll v16.8h, v16.8b, #0x0\n"
- "str q17, [x21, #0x0]\n"
- "mov v21.d[1], v25.d[0]\n"
- "mov v19.d[1], v23.d[0]\n"
- "str q21, [x21, #0x10]\n"
- "mov v20.d[1], v24.d[0]\n"
- "mov v18.d[1], v22.d[0]\n"
- "str q19, [x21, #0x20]\n"
- "str q16, [x21, #0x30]\n"
- "str q20, [x21, #0x40]\n"
- "str q18, [x21, #0x50]\n"
- "add x21, x21, %x[out_stride]\n"
+ "ldr d16, [x24], #0x8\n"
+ "sub x19, x19, #0xc\n"
+ "ldr d21, [x22], #0x8\n"
+ "cmp x19, #0xc\n"
+ "ldr d20, [x21], #0x8\n"
+ "ldr d19, [x20], #0x8\n"
+ "ld1 { v16.s }[2], [x24], #0x4\n"
+ "ushll v17.8h, v16.8b, #0x0\n"
+ "ld1 { v21.s }[2], [x22], #0x4\n"
+ "ushll2 v18.8h, v16.16b, #0x0\n"
+ "ld1 { v20.s }[2], [x21], #0x4\n"
+ "ld1 { v19.s }[2], [x20], #0x4\n"
+ "ushll v16.8h, v21.8b, #0x0\n"
+ "str q17, [x23, #0x0]\n"
+ "ushll2 v17.8h, v21.16b, #0x0\n"
+ "mov v18.d[1], v16.d[0]\n"
+ "str q18, [x23, #0x10]\n"
+ "dup v16.2d, v16.d[1]\n"
+ "mov v16.d[1], v17.d[0]\n"
+ "str q16, [x23, #0x20]\n"
+ "ushll v16.8h, v20.8b, #0x0\n"
+ "str q16, [x23, #0x30]\n"
+ "ushll2 v17.8h, v20.16b, #0x0\n"
+ "ushll v16.8h, v19.8b, #0x0\n"
+ "mov v17.d[1], v16.d[0]\n"
+ "str q17, [x23, #0x40]\n"
+ "dup v17.2d, v16.d[1]\n"
+ "ushll2 v16.8h, v19.16b, #0x0\n"
+ "mov v17.d[1], v16.d[0]\n"
+ "str q17, [x23, #0x50]\n"
+ "add x23, x23, %x[out_stride]\n"
"bge 4b\n"
"5:" // Main row loop: Column loop skip
- "cmp x24, #0x4\n"
+ "cmp x19, #0x4\n"
"blt 7f\n"
"6:" // Main row loop: width 4 loop: loop
- "ldr s19, [x25], #0x4\n"
- "ldr s18, [x23], #0x4\n"
- "sub x24, x24, #0x4\n"
- "cmp x24, #0x4\n"
- "ldr s17, [x22], #0x4\n"
+ "ldr s16, [x24], #0x4\n"
+ "ushll v19.8h, v16.8b, #0x0\n"
+ "ldr s16, [x22], #0x4\n"
+ "sub x19, x19, #0x4\n"
+ "ushll v18.8h, v16.8b, #0x0\n"
+ "ldr s16, [x21], #0x4\n"
+ "cmp x19, #0x4\n"
+ "ushll v17.8h, v16.8b, #0x0\n"
"ldr s16, [x20], #0x4\n"
- "ushll v19.8h, v19.8b, #0x0\n"
- "ushll v18.8h, v18.8b, #0x0\n"
- "ushll v17.8h, v17.8b, #0x0\n"
+ "str d19, [x23, #0x0]\n"
"ushll v16.8h, v16.8b, #0x0\n"
- "str d19, [x21, #0x0]\n"
- "str d18, [x21, #0x18]\n"
- "str d17, [x21, #0x30]\n"
- "str d16, [x21, #0x48]\n"
- "add x21, x21, #0x8\n"
+ "str d18, [x23, #0x18]\n"
+ "str d17, [x23, #0x30]\n"
+ "str d16, [x23, #0x48]\n"
+ "add x23, x23, #0x8\n"
"bge 6b\n"
"7:" // Main row loop: width 4 loop: skip
- "cmp x24, #0x1\n"
+ "cmp x19, #0x1\n"
"blt 9f\n"
"8:" // Main row loop: width 1 loop: loop
- "ldr b19, [x25], #0x1\n"
- "ldr b18, [x23], #0x1\n"
- "sub x24, x24, #0x1\n"
- "cmp x24, #0x1\n"
- "ldr b17, [x22], #0x1\n"
+ "ldr b16, [x24], #0x1\n"
+ "ushll v19.8h, v16.8b, #0x0\n"
+ "ldr b16, [x22], #0x1\n"
+ "sub x19, x19, #0x1\n"
+ "ushll v18.8h, v16.8b, #0x0\n"
+ "ldr b16, [x21], #0x1\n"
+ "cmp x19, #0x1\n"
+ "ushll v17.8h, v16.8b, #0x0\n"
"ldr b16, [x20], #0x1\n"
- "ushll v19.8h, v19.8b, #0x0\n"
- "ushll v18.8h, v18.8b, #0x0\n"
- "ushll v17.8h, v17.8b, #0x0\n"
+ "str h19, [x23, #0x0]\n"
"ushll v16.8h, v16.8b, #0x0\n"
- "str h19, [x21, #0x0]\n"
- "str h18, [x21, #0x18]\n"
- "str h17, [x21, #0x30]\n"
- "str h16, [x21, #0x48]\n"
- "add x21, x21, #0x2\n"
+ "str h18, [x23, #0x18]\n"
+ "str h17, [x23, #0x30]\n"
+ "str h16, [x23, #0x48]\n"
+ "add x23, x23, #0x2\n"
"bge 8b\n"
"9:" // Main row loop: width 1 loop: skip
- "cmp %x[height], #0x4\n"
"add %x[out], %x[out], #0x60\n"
+ "cmp %x[height], #0x4\n"
"bge 1b\n"
"cbz %x[height], 20f\n"
"10:" // Main loop skip
"11:" // Tail row loop: Head
- "mov x20, %x[width]\n"
- "mov x25, %x[in]\n"
- "cmp x20, #0x18\n"
- "add %x[in], x25, %x[in_stride]\n"
- "mov x21, %x[out]\n"
+ "mov x24, %x[in]\n"
+ "mov x23, %x[out]\n"
+ "add %x[in], x24, %x[in_stride]\n"
"sub %x[height], %x[height], #0x1\n"
+ "mov x19, %x[width]\n"
+ "cmp x19, #0x18\n"
"blt 13f\n"
"12:" // Tail row loop: Unroll column loop
- "ldr q20, [x25], #0x10\n"
- "ldr d16, [x25], #0x8\n"
- "ushll2 v19.8h, v20.16b, #0x0\n"
- "ushll v18.8h, v16.8b, #0x0\n"
- "dup v17.2d, v19.d[1]\n"
- "sub x20, x20, #0x18\n"
- "ushll v16.8h, v20.8b, #0x0\n"
- "str q16, [x21, #0x0]\n"
- "dup v16.2d, v19.d[0]\n"
- "str d16, [x21, #0x10]\n"
- "add x21, x21, %x[out_stride]\n"
- "cmp x20, #0x18\n"
- "mov v17.d[1], v18.d[0]\n"
- "dup v16.2d, v18.d[1]\n"
- "str q17, [x21, #0x0]\n"
- "str d16, [x21, #0x10]\n"
- "add x21, x21, %x[out_stride]\n"
+ "ldr q17, [x24], #0x10\n"
+ "ushll v16.8h, v17.8b, #0x0\n"
+ "ldr d18, [x24], #0x8\n"
+ "sub x19, x19, #0x18\n"
+ "ushll2 v17.8h, v17.16b, #0x0\n"
+ "str q16, [x23, #0x0]\n"
+ "cmp x19, #0x18\n"
+ "dup v16.2d, v17.d[0]\n"
+ "str d16, [x23, #0x10]\n"
+ "dup v17.2d, v17.d[1]\n"
+ "add x23, x23, %x[out_stride]\n"
+ "ushll v16.8h, v18.8b, #0x0\n"
+ "mov v17.d[1], v16.d[0]\n"
+ "str q17, [x23, #0x0]\n"
+ "dup v16.2d, v16.d[1]\n"
+ "str d16, [x23, #0x10]\n"
+ "add x23, x23, %x[out_stride]\n"
"bge 12b\n"
"13:" // Tail row loop: Unroll column loop skip
- "cmp x20, #0xc\n"
+ "cmp x19, #0xc\n"
"blt 15f\n"
"14:" // Tail row loop: Column loop
- "ldr d16, [x25], #0x8\n"
- "ld1 { v16.s }[2], [x25], #0x4\n"
- "sub x20, x20, #0xc\n"
- "cmp x20, #0xc\n"
- "ushll v17.8h, v16.8b, #0x0\n"
- "ushll2 v16.8h, v16.16b, #0x0\n"
- "str q17, [x21, #0x0]\n"
- "str d16, [x21, #0x10]\n"
- "add x21, x21, %x[out_stride]\n"
+ "ldr d17, [x24], #0x8\n"
+ "sub x19, x19, #0xc\n"
+ "cmp x19, #0xc\n"
+ "ld1 { v17.s }[2], [x24], #0x4\n"
+ "ushll v16.8h, v17.8b, #0x0\n"
+ "str q16, [x23, #0x0]\n"
+ "ushll2 v16.8h, v17.16b, #0x0\n"
+ "str d16, [x23, #0x10]\n"
+ "add x23, x23, %x[out_stride]\n"
"bge 14b\n"
"15:" // Tail row loop: Column loop skip
- "cmp x20, #0x4\n"
+ "cmp x19, #0x4\n"
"blt 17f\n"
"16:" // Tail row loop: width 4 loop: loop
- "ldr s16, [x25], #0x4\n"
- "sub x20, x20, #0x4\n"
- "cmp x20, #0x4\n"
+ "ldr s16, [x24], #0x4\n"
"ushll v16.8h, v16.8b, #0x0\n"
- "str d16, [x21, #0x0]\n"
- "add x21, x21, #0x8\n"
+ "str d16, [x23, #0x0]\n"
+ "sub x19, x19, #0x4\n"
+ "add x23, x23, #0x8\n"
+ "cmp x19, #0x4\n"
"bge 16b\n"
"17:" // Tail row loop: width 4 loop: skip
- "cmp x20, #0x1\n"
+ "cmp x19, #0x1\n"
"blt 19f\n"
"18:" // Tail row loop: width 1 loop: loop
- "ldr b16, [x25], #0x1\n"
- "sub x20, x20, #0x1\n"
- "cmp x20, #0x1\n"
+ "ldr b16, [x24], #0x1\n"
"ushll v16.8h, v16.8b, #0x0\n"
- "str h16, [x21, #0x0]\n"
- "add x21, x21, #0x2\n"
+ "str h16, [x23, #0x0]\n"
+ "sub x19, x19, #0x1\n"
+ "add x23, x23, #0x2\n"
+ "cmp x19, #0x1\n"
"bge 18b\n"
"19:" // Tail row loop: width 1 loop: skip
- "cmp %x[height], #0x1\n"
"add %x[out], %x[out], #0x18\n"
+ "cmp %x[height], #0x1\n"
"bge 11b\n"
"20:" // Done
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [width] "r" (width)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x20", "x21", "x22", "x23", "x24", "x25"
+ : "cc", "memory", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "x19", "x20", "x21", "x22", "x23", "x24"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_16.hpp b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_16.hpp
index ca4dc20b61..dd1bd508ef 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_16.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_16.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#pragma once
@@ -36,86 +36,86 @@ void a64_transpose_interleave_16(uint32_t *out, const uint32_t *in, size_t width
"cmp %x[height], #0x4\n"
"blt 6f\n"
"1:" // Main row loop: Head
- "mov x25, %x[in]\n"
- "mov x24, %x[width]\n"
- "add x23, x25, %x[in_stride]\n"
- "add x22, x23, %x[in_stride]\n"
- "add x20, x22, %x[in_stride]\n"
- "cmp x24, #0x4\n"
+ "mov x24, %x[in]\n"
+ "mov x23, %x[out]\n"
+ "add x22, x24, %x[in_stride]\n"
+ "add x21, x22, %x[in_stride]\n"
+ "add x20, x21, %x[in_stride]\n"
"add %x[in], x20, %x[in_stride]\n"
- "mov x21, %x[out]\n"
"sub %x[height], %x[height], #0x4\n"
+ "mov x19, %x[width]\n"
+ "cmp x19, #0x4\n"
"blt 3f\n"
"2:" // Main row loop: Column loop
- "ldr q19, [x25], #0x10\n"
- "ldr q18, [x23], #0x10\n"
- "sub x24, x24, #0x4\n"
- "cmp x24, #0x4\n"
- "ldr q17, [x22], #0x10\n"
+ "ldr q19, [x24], #0x10\n"
+ "sub x19, x19, #0x4\n"
+ "ldr q18, [x22], #0x10\n"
+ "cmp x19, #0x4\n"
+ "ldr q17, [x21], #0x10\n"
"ldr q16, [x20], #0x10\n"
- "str q19, [x21, #0x0]\n"
- "str q18, [x21, #0x10]\n"
- "str q17, [x21, #0x20]\n"
- "str q16, [x21, #0x30]\n"
- "add x21, x21, %x[out_stride]\n"
+ "str q19, [x23, #0x0]\n"
+ "str q18, [x23, #0x10]\n"
+ "str q17, [x23, #0x20]\n"
+ "str q16, [x23, #0x30]\n"
+ "add x23, x23, %x[out_stride]\n"
"bge 2b\n"
"3:" // Main row loop: Column loop skip
- "cmp x24, #0x1\n"
+ "cmp x19, #0x1\n"
"blt 5f\n"
"4:" // Main row loop: width 1 loop: loop
- "ldr s19, [x25], #0x4\n"
- "ldr s18, [x23], #0x4\n"
- "sub x24, x24, #0x1\n"
- "cmp x24, #0x1\n"
- "ldr s17, [x22], #0x4\n"
+ "ldr s19, [x24], #0x4\n"
+ "sub x19, x19, #0x1\n"
+ "ldr s18, [x22], #0x4\n"
+ "cmp x19, #0x1\n"
+ "ldr s17, [x21], #0x4\n"
"ldr s16, [x20], #0x4\n"
- "str s19, [x21, #0x0]\n"
- "str s18, [x21, #0x10]\n"
- "str s17, [x21, #0x20]\n"
- "str s16, [x21, #0x30]\n"
- "add x21, x21, #0x4\n"
+ "str s19, [x23, #0x0]\n"
+ "str s18, [x23, #0x10]\n"
+ "str s17, [x23, #0x20]\n"
+ "str s16, [x23, #0x30]\n"
+ "add x23, x23, #0x4\n"
"bge 4b\n"
"5:" // Main row loop: width 1 loop: skip
- "cmp %x[height], #0x4\n"
"add %x[out], %x[out], #0x40\n"
+ "cmp %x[height], #0x4\n"
"bge 1b\n"
"cbz %x[height], 12f\n"
"6:" // Main loop skip
"7:" // Tail row loop: Head
- "mov x20, %x[width]\n"
- "mov x25, %x[in]\n"
- "cmp x20, #0x4\n"
- "add %x[in], x25, %x[in_stride]\n"
- "mov x21, %x[out]\n"
+ "mov x24, %x[in]\n"
+ "mov x23, %x[out]\n"
+ "add %x[in], x24, %x[in_stride]\n"
"sub %x[height], %x[height], #0x1\n"
+ "mov x19, %x[width]\n"
+ "cmp x19, #0x4\n"
"blt 9f\n"
"8:" // Tail row loop: Column loop
- "ldr q16, [x25], #0x10\n"
- "sub x20, x20, #0x4\n"
- "cmp x20, #0x4\n"
- "str q16, [x21, #0x0]\n"
- "add x21, x21, %x[out_stride]\n"
+ "ldr q16, [x24], #0x10\n"
+ "sub x19, x19, #0x4\n"
+ "cmp x19, #0x4\n"
+ "str q16, [x23, #0x0]\n"
+ "add x23, x23, %x[out_stride]\n"
"bge 8b\n"
"9:" // Tail row loop: Column loop skip
- "cmp x20, #0x1\n"
+ "cmp x19, #0x1\n"
"blt 11f\n"
"10:" // Tail row loop: width 1 loop: loop
- "ldr s16, [x25], #0x4\n"
- "sub x20, x20, #0x1\n"
- "cmp x20, #0x1\n"
- "str s16, [x21, #0x0]\n"
- "add x21, x21, #0x4\n"
+ "ldr s16, [x24], #0x4\n"
+ "sub x19, x19, #0x1\n"
+ "cmp x19, #0x1\n"
+ "str s16, [x23, #0x0]\n"
+ "add x23, x23, #0x4\n"
"bge 10b\n"
"11:" // Tail row loop: width 1 loop: skip
- "cmp %x[height], #0x1\n"
"add %x[out], %x[out], #0x10\n"
+ "cmp %x[height], #0x1\n"
"bge 7b\n"
"12:" // Done
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [width] "r" (width)
- : "cc", "memory", "v16", "v17", "v18", "v19", "x20", "x21", "x22", "x23", "x24", "x25"
+ : "cc", "memory", "v16", "v17", "v18", "v19", "x19", "x20", "x21", "x22", "x23", "x24"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_16_1x4.hpp b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_16_1x4.hpp
index e0ccb368c2..7e7fcf5b8b 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_16_1x4.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_16_1x4.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#pragma once
@@ -42,10 +42,9 @@ void a64_transpose_interleave_16_1x4(uint8_t *out, const uint8_t *in, size_t wid
"cmp %x[height], #0x10\n"
"blt 8f\n"
"1:" // Main row loop: Head
- "mov x17, %x[in]\n"
- "add x16, x17, %x[in_stride]\n"
- "add x15, x16, %x[in_stride]\n"
- "add x14, x15, %x[in_stride]\n"
+ "mov x16, %x[in]\n"
+ "mov x15, %x[out]\n"
+ "add x14, x16, %x[in_stride]\n"
"add x13, x14, %x[in_stride]\n"
"add x12, x13, %x[in_stride]\n"
"add x11, x12, %x[in_stride]\n"
@@ -55,247 +54,250 @@ void a64_transpose_interleave_16_1x4(uint8_t *out, const uint8_t *in, size_t wid
"add x27, x28, %x[in_stride]\n"
"add x26, x27, %x[in_stride]\n"
"add x25, x26, %x[in_stride]\n"
- "mov x24, %x[width]\n"
- "add x23, x25, %x[in_stride]\n"
+ "add x24, x25, %x[in_stride]\n"
+ "add x23, x24, %x[in_stride]\n"
"add x22, x23, %x[in_stride]\n"
- "add x20, x22, %x[in_stride]\n"
- "cmp x24, #0x10\n"
+ "add x21, x22, %x[in_stride]\n"
+ "add x20, x21, %x[in_stride]\n"
"add %x[in], x20, %x[in_stride]\n"
- "mov x21, %x[out]\n"
"sub %x[height], %x[height], #0x10\n"
+ "mov x19, %x[width]\n"
+ "cmp x19, #0x10\n"
"blt 3f\n"
"2:" // Main row loop: Column loop
- "ldr q21, [x17], #0x10\n"
- "ldr q20, [x16], #0x10\n"
- "sub x24, x24, #0x10\n"
- "cmp x24, #0x10\n"
- "ldr q17, [x15], #0x10\n"
- "ldr q16, [x14], #0x10\n"
- "zip1 v3.16b, v21.16b, v17.16b\n"
- "zip1 v2.16b, v20.16b, v16.16b\n"
- "ldr q19, [x13], #0x10\n"
- "ldr q18, [x12], #0x10\n"
- "zip2 v1.16b, v21.16b, v17.16b\n"
- "zip2 v0.16b, v20.16b, v16.16b\n"
- "ldr q17, [x11], #0x10\n"
- "ldr q16, [x10], #0x10\n"
+ "ldr q18, [x16], #0x10\n"
+ "sub x19, x19, #0x10\n"
+ "ldr q20, [x14], #0x10\n"
+ "cmp x19, #0x10\n"
+ "ldr q17, [x13], #0x10\n"
+ "zip1 v19.16b, v18.16b, v17.16b\n"
+ "ldr q16, [x12], #0x10\n"
+ "zip2 v18.16b, v18.16b, v17.16b\n"
+ "ldr q3, [x11], #0x10\n"
+ "ldr q2, [x10], #0x10\n"
+ "zip1 v17.16b, v20.16b, v16.16b\n"
+ "ldr q1, [x9], #0x10\n"
+ "zip2 v16.16b, v20.16b, v16.16b\n"
+ "ldr q0, [x28], #0x10\n"
"zip1 v31.16b, v19.16b, v17.16b\n"
- "zip1 v30.16b, v18.16b, v16.16b\n"
- "ldr q25, [x9], #0x10\n"
- "ldr q20, [x28], #0x10\n"
- "zip2 v24.16b, v19.16b, v17.16b\n"
- "zip2 v23.16b, v18.16b, v16.16b\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v22.16b, v25.16b, v17.16b\n"
- "zip1 v21.16b, v20.16b, v16.16b\n"
- "ldr q19, [x25], #0x10\n"
- "ldr q18, [x23], #0x10\n"
- "zip2 v29.16b, v25.16b, v17.16b\n"
- "zip2 v20.16b, v20.16b, v16.16b\n"
- "ldr q17, [x22], #0x10\n"
- "ldr q16, [x20], #0x10\n"
- "zip1 v28.16b, v19.16b, v17.16b\n"
- "zip1 v27.16b, v18.16b, v16.16b\n"
- "zip2 v26.16b, v19.16b, v17.16b\n"
- "zip2 v25.16b, v18.16b, v16.16b\n"
- "zip1 v16.16b, v3.16b, v2.16b\n"
- "zip2 v17.16b, v3.16b, v2.16b\n"
- "str q16, [x21, #0x0]\n"
- "zip1 v16.16b, v1.16b, v0.16b\n"
- "zip2 v19.16b, v1.16b, v0.16b\n"
- "str q17, [x21, #0x10]\n"
- "zip1 v18.16b, v31.16b, v30.16b\n"
- "zip2 v17.16b, v31.16b, v30.16b\n"
- "str q16, [x21, #0x20]\n"
- "zip1 v16.16b, v24.16b, v23.16b\n"
- "zip2 v24.16b, v24.16b, v23.16b\n"
- "str q19, [x21, #0x30]\n"
- "zip1 v23.16b, v22.16b, v21.16b\n"
- "zip2 v22.16b, v22.16b, v21.16b\n"
- "str q18, [x21, #0x40]\n"
- "zip1 v21.16b, v29.16b, v20.16b\n"
- "zip2 v20.16b, v29.16b, v20.16b\n"
- "str q17, [x21, #0x50]\n"
- "zip1 v19.16b, v28.16b, v27.16b\n"
- "zip2 v18.16b, v28.16b, v27.16b\n"
- "str q16, [x21, #0x60]\n"
- "zip1 v17.16b, v26.16b, v25.16b\n"
- "zip2 v16.16b, v26.16b, v25.16b\n"
- "str q24, [x21, #0x70]\n"
- "str q23, [x21, #0x80]\n"
- "str q22, [x21, #0x90]\n"
- "str q21, [x21, #0xa0]\n"
- "str q20, [x21, #0xb0]\n"
- "str q19, [x21, #0xc0]\n"
- "str q18, [x21, #0xd0]\n"
- "str q17, [x21, #0xe0]\n"
- "str q16, [x21, #0xf0]\n"
- "add x21, x21, %x[out_stride]\n"
+ "ldr q30, [x27], #0x10\n"
+ "zip2 v20.16b, v19.16b, v17.16b\n"
+ "ldr q29, [x26], #0x10\n"
+ "zip1 v19.16b, v18.16b, v16.16b\n"
+ "ldr q28, [x25], #0x10\n"
+ "zip2 v18.16b, v18.16b, v16.16b\n"
+ "ldr q27, [x24], #0x10\n"
+ "zip1 v17.16b, v3.16b, v1.16b\n"
+ "ldr q26, [x23], #0x10\n"
+ "zip1 v16.16b, v2.16b, v0.16b\n"
+ "ldr q25, [x22], #0x10\n"
+ "zip1 v24.16b, v17.16b, v16.16b\n"
+ "ldr q23, [x21], #0x10\n"
+ "zip2 v22.16b, v17.16b, v16.16b\n"
+ "ldr q21, [x20], #0x10\n"
+ "zip2 v17.16b, v3.16b, v1.16b\n"
+ "str q31, [x15, #0x0]\n"
+ "zip2 v16.16b, v2.16b, v0.16b\n"
+ "str q20, [x15, #0x10]\n"
+ "zip1 v20.16b, v17.16b, v16.16b\n"
+ "str q19, [x15, #0x20]\n"
+ "zip2 v19.16b, v17.16b, v16.16b\n"
+ "str q18, [x15, #0x30]\n"
+ "zip1 v18.16b, v30.16b, v28.16b\n"
+ "str q24, [x15, #0x40]\n"
+ "zip1 v16.16b, v29.16b, v27.16b\n"
+ "str q22, [x15, #0x50]\n"
+ "zip1 v17.16b, v18.16b, v16.16b\n"
+ "str q20, [x15, #0x60]\n"
+ "zip2 v16.16b, v18.16b, v16.16b\n"
+ "str q19, [x15, #0x70]\n"
+ "zip2 v18.16b, v30.16b, v28.16b\n"
+ "str q17, [x15, #0x80]\n"
+ "zip2 v17.16b, v29.16b, v27.16b\n"
+ "str q16, [x15, #0x90]\n"
+ "zip1 v16.16b, v18.16b, v17.16b\n"
+ "str q16, [x15, #0xa0]\n"
+ "zip2 v16.16b, v18.16b, v17.16b\n"
+ "str q16, [x15, #0xb0]\n"
+ "zip1 v18.16b, v26.16b, v23.16b\n"
+ "zip1 v17.16b, v25.16b, v21.16b\n"
+ "zip1 v16.16b, v18.16b, v17.16b\n"
+ "str q16, [x15, #0xc0]\n"
+ "zip2 v16.16b, v18.16b, v17.16b\n"
+ "str q16, [x15, #0xd0]\n"
+ "zip2 v18.16b, v26.16b, v23.16b\n"
+ "zip2 v17.16b, v25.16b, v21.16b\n"
+ "zip1 v16.16b, v18.16b, v17.16b\n"
+ "str q16, [x15, #0xe0]\n"
+ "zip2 v16.16b, v18.16b, v17.16b\n"
+ "str q16, [x15, #0xf0]\n"
+ "add x15, x15, %x[out_stride]\n"
"bge 2b\n"
"3:" // Main row loop: Column loop skip
- "cmp x24, #0x4\n"
+ "cmp x19, #0x4\n"
"blt 5f\n"
"4:" // Main row loop: width 4 loop: loop
- "ldr s19, [x17], #0x4\n"
"ldr s18, [x16], #0x4\n"
- "sub x24, x24, #0x4\n"
- "cmp x24, #0x4\n"
- "ldr s17, [x15], #0x4\n"
- "ldr s16, [x14], #0x4\n"
- "zip1 v17.16b, v19.16b, v17.16b\n"
- "zip1 v16.16b, v18.16b, v16.16b\n"
- "ldr s19, [x13], #0x4\n"
- "ldr s18, [x12], #0x4\n"
- "zip1 v22.16b, v17.16b, v16.16b\n"
- "ldr s17, [x11], #0x4\n"
- "ldr s16, [x10], #0x4\n"
- "zip1 v17.16b, v19.16b, v17.16b\n"
- "zip1 v16.16b, v18.16b, v16.16b\n"
- "ldr s19, [x9], #0x4\n"
- "ldr s18, [x28], #0x4\n"
- "zip1 v21.16b, v17.16b, v16.16b\n"
- "ldr s17, [x27], #0x4\n"
- "ldr s16, [x26], #0x4\n"
- "zip1 v17.16b, v19.16b, v17.16b\n"
- "zip1 v16.16b, v18.16b, v16.16b\n"
- "ldr s20, [x25], #0x4\n"
- "ldr s19, [x23], #0x4\n"
- "zip1 v18.16b, v17.16b, v16.16b\n"
- "ldr s17, [x22], #0x4\n"
+ "sub x19, x19, #0x4\n"
+ "ldr s17, [x14], #0x4\n"
+ "cmp x19, #0x4\n"
+ "ldr s16, [x13], #0x4\n"
+ "zip1 v19.16b, v18.16b, v16.16b\n"
+ "ldr s16, [x12], #0x4\n"
+ "ldr s18, [x11], #0x4\n"
+ "zip1 v16.16b, v17.16b, v16.16b\n"
+ "ldr s20, [x10], #0x4\n"
+ "ldr s17, [x9], #0x4\n"
+ "zip1 v23.16b, v19.16b, v16.16b\n"
+ "ldr s16, [x28], #0x4\n"
+ "zip1 v19.16b, v18.16b, v17.16b\n"
+ "ldr s18, [x27], #0x4\n"
+ "ldr s22, [x26], #0x4\n"
+ "zip1 v16.16b, v20.16b, v16.16b\n"
+ "ldr s17, [x25], #0x4\n"
+ "zip1 v21.16b, v19.16b, v16.16b\n"
+ "ldr s16, [x24], #0x4\n"
+ "zip1 v18.16b, v18.16b, v17.16b\n"
+ "ldr s20, [x23], #0x4\n"
+ "ldr s19, [x22], #0x4\n"
+ "zip1 v16.16b, v22.16b, v16.16b\n"
+ "ldr s17, [x21], #0x4\n"
+ "zip1 v18.16b, v18.16b, v16.16b\n"
"ldr s16, [x20], #0x4\n"
"zip1 v17.16b, v20.16b, v17.16b\n"
+ "str q23, [x15, #0x0]\n"
+ "str q21, [x15, #0x40]\n"
"zip1 v16.16b, v19.16b, v16.16b\n"
- "str q22, [x21, #0x0]\n"
+ "str q18, [x15, #0x80]\n"
"zip1 v16.16b, v17.16b, v16.16b\n"
- "str q21, [x21, #0x40]\n"
- "str q18, [x21, #0x80]\n"
- "str q16, [x21, #0xc0]\n"
- "add x21, x21, #0x10\n"
+ "str q16, [x15, #0xc0]\n"
+ "add x15, x15, #0x10\n"
"bge 4b\n"
"5:" // Main row loop: width 4 loop: skip
- "cmp x24, #0x1\n"
+ "cmp x19, #0x1\n"
"blt 7f\n"
"6:" // Main row loop: width 1 loop: loop
- "ldr b19, [x17], #0x1\n"
"ldr b18, [x16], #0x1\n"
- "sub x24, x24, #0x1\n"
- "cmp x24, #0x1\n"
- "ldr b17, [x15], #0x1\n"
- "ldr b16, [x14], #0x1\n"
- "zip1 v17.16b, v19.16b, v17.16b\n"
- "zip1 v16.16b, v18.16b, v16.16b\n"
- "ldr b19, [x13], #0x1\n"
- "ldr b18, [x12], #0x1\n"
- "zip1 v22.16b, v17.16b, v16.16b\n"
- "ldr b17, [x11], #0x1\n"
- "ldr b16, [x10], #0x1\n"
- "zip1 v17.16b, v19.16b, v17.16b\n"
- "zip1 v16.16b, v18.16b, v16.16b\n"
- "ldr b19, [x9], #0x1\n"
- "ldr b18, [x28], #0x1\n"
- "zip1 v21.16b, v17.16b, v16.16b\n"
- "ldr b17, [x27], #0x1\n"
- "ldr b16, [x26], #0x1\n"
- "zip1 v17.16b, v19.16b, v17.16b\n"
- "zip1 v16.16b, v18.16b, v16.16b\n"
- "ldr b20, [x25], #0x1\n"
- "ldr b19, [x23], #0x1\n"
- "zip1 v18.16b, v17.16b, v16.16b\n"
- "ldr b17, [x22], #0x1\n"
+ "sub x19, x19, #0x1\n"
+ "ldr b17, [x14], #0x1\n"
+ "cmp x19, #0x1\n"
+ "ldr b16, [x13], #0x1\n"
+ "zip1 v19.16b, v18.16b, v16.16b\n"
+ "ldr b16, [x12], #0x1\n"
+ "ldr b18, [x11], #0x1\n"
+ "zip1 v16.16b, v17.16b, v16.16b\n"
+ "ldr b20, [x10], #0x1\n"
+ "ldr b17, [x9], #0x1\n"
+ "zip1 v23.16b, v19.16b, v16.16b\n"
+ "ldr b16, [x28], #0x1\n"
+ "zip1 v19.16b, v18.16b, v17.16b\n"
+ "ldr b18, [x27], #0x1\n"
+ "ldr b22, [x26], #0x1\n"
+ "zip1 v16.16b, v20.16b, v16.16b\n"
+ "ldr b17, [x25], #0x1\n"
+ "zip1 v21.16b, v19.16b, v16.16b\n"
+ "ldr b16, [x24], #0x1\n"
+ "zip1 v18.16b, v18.16b, v17.16b\n"
+ "ldr b20, [x23], #0x1\n"
+ "ldr b19, [x22], #0x1\n"
+ "zip1 v16.16b, v22.16b, v16.16b\n"
+ "ldr b17, [x21], #0x1\n"
+ "zip1 v18.16b, v18.16b, v16.16b\n"
"ldr b16, [x20], #0x1\n"
"zip1 v17.16b, v20.16b, v17.16b\n"
+ "str s23, [x15, #0x0]\n"
+ "str s21, [x15, #0x40]\n"
"zip1 v16.16b, v19.16b, v16.16b\n"
- "str s22, [x21, #0x0]\n"
+ "str s18, [x15, #0x80]\n"
"zip1 v16.16b, v17.16b, v16.16b\n"
- "str s21, [x21, #0x40]\n"
- "str s18, [x21, #0x80]\n"
- "str s16, [x21, #0xc0]\n"
- "add x21, x21, #0x4\n"
+ "str s16, [x15, #0xc0]\n"
+ "add x15, x15, #0x4\n"
"bge 6b\n"
"7:" // Main row loop: width 1 loop: skip
- "cmp %x[height], #0x10\n"
"add %x[out], %x[out], #0x100\n"
+ "cmp %x[height], #0x10\n"
"bge 1b\n"
"cbz %x[height], 16f\n"
"8:" // Main loop skip
+
"9:" // Tail row loop: Head
- "mov x17, %x[in]\n"
- "add x16, x17, %x[in_stride]\n"
- "add x15, x16, %x[in_stride]\n"
- "mov x20, %x[width]\n"
- "add x14, x15, %x[in_stride]\n"
+ "mov x16, %x[in]\n"
+ "mov x15, %x[out]\n"
+ "add x14, x16, %x[in_stride]\n"
+ "add x13, x14, %x[in_stride]\n"
+ "add x12, x13, %x[in_stride]\n"
+ "add %x[in], x12, %x[in_stride]\n"
"cmp %x[height], #0x3\n"
- "add %x[in], x14, %x[in_stride]\n"
- "csel x14, x14, %x[pad_row], GT\n"
- "csel x15, x15, %x[pad_row], GE\n"
+ "csel x12, x12, %x[pad_row], GT\n"
+ "csel x13, x13, %x[pad_row], GE\n"
"cmp %x[height], #0x1\n"
- "csel x16, x16, %x[pad_row], GT\n"
- "cmp x20, #0x10\n"
- "mov x21, %x[out]\n"
+ "csel x14, x14, %x[pad_row], GT\n"
"sub %x[height], %x[height], #0x4\n"
+ "mov x19, %x[width]\n"
+ "cmp x19, #0x10\n"
"blt 11f\n"
"10:" // Tail row loop: Column loop
- "ldr q20, [x17], #0x10\n"
- "ldr q21, [x16], #0x10\n"
- "sub x20, x20, #0x10\n"
- "cmp x20, #0x10\n"
- "ldr q19, [x15], #0x10\n"
- "ldr q16, [x14], #0x10\n"
- "zip1 v18.16b, v20.16b, v19.16b\n"
- "zip1 v17.16b, v21.16b, v16.16b\n"
- "zip2 v20.16b, v20.16b, v19.16b\n"
- "zip2 v19.16b, v21.16b, v16.16b\n"
- "zip1 v16.16b, v18.16b, v17.16b\n"
- "zip2 v18.16b, v18.16b, v17.16b\n"
- "str q16, [x21, #0x0]\n"
- "zip1 v17.16b, v20.16b, v19.16b\n"
- "zip2 v16.16b, v20.16b, v19.16b\n"
- "str q18, [x21, #0x10]\n"
- "str q17, [x21, #0x20]\n"
- "str q16, [x21, #0x30]\n"
- "add x21, x21, %x[out_stride]\n"
+ "ldr q18, [x16], #0x10\n"
+ "sub x19, x19, #0x10\n"
+ "ldr q21, [x14], #0x10\n"
+ "cmp x19, #0x10\n"
+ "ldr q17, [x13], #0x10\n"
+ "zip1 v20.16b, v18.16b, v17.16b\n"
+ "ldr q16, [x12], #0x10\n"
+ "zip2 v19.16b, v18.16b, v17.16b\n"
+ "zip1 v18.16b, v21.16b, v16.16b\n"
+ "zip2 v17.16b, v21.16b, v16.16b\n"
+ "zip1 v16.16b, v20.16b, v18.16b\n"
+ "str q16, [x15, #0x0]\n"
+ "zip2 v16.16b, v20.16b, v18.16b\n"
+ "str q16, [x15, #0x10]\n"
+ "zip1 v16.16b, v19.16b, v17.16b\n"
+ "str q16, [x15, #0x20]\n"
+ "zip2 v16.16b, v19.16b, v17.16b\n"
+ "str q16, [x15, #0x30]\n"
+ "add x15, x15, %x[out_stride]\n"
"bge 10b\n"
"11:" // Tail row loop: Column loop skip
- "cmp x20, #0x4\n"
+ "cmp x19, #0x4\n"
"blt 13f\n"
"12:" // Tail row loop: width 4 loop: loop
- "ldr s19, [x17], #0x4\n"
- "ldr s18, [x16], #0x4\n"
- "sub x20, x20, #0x4\n"
- "cmp x20, #0x4\n"
- "ldr s17, [x15], #0x4\n"
- "ldr s16, [x14], #0x4\n"
- "zip1 v17.16b, v19.16b, v17.16b\n"
+ "ldr s17, [x16], #0x4\n"
+ "sub x19, x19, #0x4\n"
+ "ldr s18, [x14], #0x4\n"
+ "cmp x19, #0x4\n"
+ "ldr s16, [x13], #0x4\n"
+ "zip1 v17.16b, v17.16b, v16.16b\n"
+ "ldr s16, [x12], #0x4\n"
"zip1 v16.16b, v18.16b, v16.16b\n"
"zip1 v16.16b, v17.16b, v16.16b\n"
- "str q16, [x21, #0x0]\n"
- "add x21, x21, #0x10\n"
+ "str q16, [x15, #0x0]\n"
+ "add x15, x15, #0x10\n"
"bge 12b\n"
"13:" // Tail row loop: width 4 loop: skip
- "cmp x20, #0x1\n"
+ "cmp x19, #0x1\n"
"blt 15f\n"
"14:" // Tail row loop: width 1 loop: loop
- "ldr b19, [x17], #0x1\n"
- "ldr b18, [x16], #0x1\n"
- "sub x20, x20, #0x1\n"
- "cmp x20, #0x1\n"
- "ldr b17, [x15], #0x1\n"
- "ldr b16, [x14], #0x1\n"
- "zip1 v17.16b, v19.16b, v17.16b\n"
+ "ldr b17, [x16], #0x1\n"
+ "sub x19, x19, #0x1\n"
+ "ldr b18, [x14], #0x1\n"
+ "cmp x19, #0x1\n"
+ "ldr b16, [x13], #0x1\n"
+ "zip1 v17.16b, v17.16b, v16.16b\n"
+ "ldr b16, [x12], #0x1\n"
"zip1 v16.16b, v18.16b, v16.16b\n"
"zip1 v16.16b, v17.16b, v16.16b\n"
- "str s16, [x21, #0x0]\n"
- "add x21, x21, #0x4\n"
+ "str s16, [x15, #0x0]\n"
+ "add x15, x15, #0x4\n"
"bge 14b\n"
"15:" // Tail row loop: width 1 loop: skip
- "cmp %x[height], #0x1\n"
"add %x[out], %x[out], #0x40\n"
+ "cmp %x[height], #0x1\n"
"bge 9b\n"
"16:" // Done
+
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [pad_row] "r" (pad_row), [width] "r" (width)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_16_1x8.hpp b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_16_1x8.hpp
index fa45f4fd4d..f52fbbae4d 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_16_1x8.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_16_1x8.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#pragma once
@@ -41,222 +41,222 @@ void a64_transpose_interleave_16_1x8(uint8_t *out, const uint8_t *in, size_t wid
__asm__ __volatile__(
"1:" // Main row loop: Head
- "mov x9, %x[in]\n"
- "add x28, x9, %x[in_stride]\n"
- "add x27, x28, %x[in_stride]\n"
- "add x26, x27, %x[in_stride]\n"
+ "mov x28, %x[in]\n"
+ "mov x27, %x[out]\n"
+ "add x26, x28, %x[in_stride]\n"
"add x25, x26, %x[in_stride]\n"
"add x24, x25, %x[in_stride]\n"
"add x23, x24, %x[in_stride]\n"
"add x22, x23, %x[in_stride]\n"
+ "add x21, x22, %x[in_stride]\n"
+ "add x20, x21, %x[in_stride]\n"
+ "add %x[in], x20, %x[in_stride]\n"
"cmp %x[height], #0x7\n"
- "add %x[in], x22, %x[in_stride]\n"
+ "csel x20, x20, %x[pad_row], GT\n"
+ "csel x21, x21, %x[pad_row], GE\n"
+ "cmp %x[height], #0x5\n"
"csel x22, x22, %x[pad_row], GT\n"
"csel x23, x23, %x[pad_row], GE\n"
- "cmp %x[height], #0x5\n"
- "mov x21, %x[width]\n"
+ "cmp %x[height], #0x3\n"
"csel x24, x24, %x[pad_row], GT\n"
"csel x25, x25, %x[pad_row], GE\n"
- "cmp %x[height], #0x3\n"
- "csel x26, x26, %x[pad_row], GT\n"
- "csel x27, x27, %x[pad_row], GE\n"
"cmp %x[height], #0x1\n"
- "csel x28, x28, %x[pad_row], GT\n"
- "cmp x21, #0x20\n"
- "mov x20, %x[out]\n"
+ "csel x26, x26, %x[pad_row], GT\n"
"sub %x[height], %x[height], #0x8\n"
+ "mov x19, %x[width]\n"
+ "cmp x19, #0x20\n"
"blt 3f\n"
"2:" // Main row loop: Unroll column loop
- "ldr q23, [x9], #0x10\n"
- "ldr q22, [x28], #0x10\n"
- "sub x21, x21, #0x20\n"
- "cmp x21, #0x20\n"
- "ldr q20, [x27], #0x10\n"
- "ldr q21, [x26], #0x10\n"
- "ldr q19, [x25], #0x10\n"
- "ldr q18, [x24], #0x10\n"
- "zip1 v5.16b, v23.16b, v19.16b\n"
- "zip1 v4.16b, v22.16b, v18.16b\n"
- "ldr q17, [x23], #0x10\n"
- "ldr q16, [x22], #0x10\n"
- "zip1 v3.16b, v20.16b, v17.16b\n"
- "zip1 v31.16b, v21.16b, v16.16b\n"
- "ldr q25, [x9], #0x10\n"
- "ldr q24, [x28], #0x10\n"
- "zip2 v2.16b, v23.16b, v19.16b\n"
- "zip2 v30.16b, v20.16b, v17.16b\n"
- "ldr q23, [x27], #0x10\n"
- "ldr q20, [x26], #0x10\n"
- "zip2 v22.16b, v22.16b, v18.16b\n"
- "zip2 v21.16b, v21.16b, v16.16b\n"
- "ldr q19, [x25], #0x10\n"
- "ldr q18, [x24], #0x10\n"
- "zip1 v29.16b, v25.16b, v19.16b\n"
- "zip1 v28.16b, v24.16b, v18.16b\n"
+ "ldr q17, [x28], #0x10\n"
+ "sub x19, x19, #0x20\n"
+ "ldr q19, [x26], #0x10\n"
+ "cmp x19, #0x20\n"
+ "ldr q4, [x25], #0x10\n"
+ "ldr q3, [x24], #0x10\n"
+ "ldr q16, [x23], #0x10\n"
+ "zip1 v2.16b, v17.16b, v16.16b\n"
+ "ldr q18, [x28], #0x10\n"
+ "zip2 v1.16b, v17.16b, v16.16b\n"
+ "ldr q0, [x26], #0x10\n"
+ "ldr q31, [x25], #0x10\n"
+ "ldr q30, [x24], #0x10\n"
"ldr q17, [x23], #0x10\n"
+ "zip1 v29.16b, v18.16b, v17.16b\n"
"ldr q16, [x22], #0x10\n"
- "zip1 v27.16b, v23.16b, v17.16b\n"
- "zip1 v26.16b, v20.16b, v16.16b\n"
- "zip2 v1.16b, v25.16b, v19.16b\n"
- "zip2 v25.16b, v23.16b, v17.16b\n"
- "zip2 v24.16b, v24.16b, v18.16b\n"
- "zip2 v16.16b, v20.16b, v16.16b\n"
- "zip1 v0.16b, v5.16b, v3.16b\n"
- "zip1 v17.16b, v4.16b, v31.16b\n"
- "zip2 v20.16b, v5.16b, v3.16b\n"
- "zip2 v19.16b, v4.16b, v31.16b\n"
- "zip1 v31.16b, v2.16b, v30.16b\n"
- "zip1 v18.16b, v22.16b, v21.16b\n"
- "zip2 v30.16b, v2.16b, v30.16b\n"
- "zip2 v23.16b, v22.16b, v21.16b\n"
- "zip1 v22.16b, v29.16b, v27.16b\n"
- "zip1 v21.16b, v28.16b, v26.16b\n"
- "zip2 v29.16b, v29.16b, v27.16b\n"
- "zip2 v28.16b, v28.16b, v26.16b\n"
- "zip1 v27.16b, v1.16b, v25.16b\n"
- "zip1 v26.16b, v24.16b, v16.16b\n"
- "zip2 v25.16b, v1.16b, v25.16b\n"
- "zip2 v24.16b, v24.16b, v16.16b\n"
- "zip1 v16.16b, v0.16b, v17.16b\n"
- "zip2 v17.16b, v0.16b, v17.16b\n"
- "str q16, [x20, #0x0]\n"
- "zip1 v16.16b, v20.16b, v19.16b\n"
- "zip2 v20.16b, v20.16b, v19.16b\n"
- "str q17, [x20, #0x10]\n"
- "zip1 v19.16b, v31.16b, v18.16b\n"
- "zip2 v18.16b, v31.16b, v18.16b\n"
- "str q16, [x20, #0x20]\n"
- "zip1 v17.16b, v30.16b, v23.16b\n"
- "zip2 v16.16b, v30.16b, v23.16b\n"
- "str q20, [x20, #0x30]\n"
- "str q19, [x20, #0x40]\n"
- "zip1 v23.16b, v22.16b, v21.16b\n"
- "zip2 v22.16b, v22.16b, v21.16b\n"
- "str q18, [x20, #0x50]\n"
- "zip1 v21.16b, v29.16b, v28.16b\n"
- "zip2 v20.16b, v29.16b, v28.16b\n"
- "str q17, [x20, #0x60]\n"
- "zip1 v19.16b, v27.16b, v26.16b\n"
- "zip2 v18.16b, v27.16b, v26.16b\n"
- "str q16, [x20, #0x70]\n"
- "add x20, x20, %x[out_stride]\n"
- "zip1 v17.16b, v25.16b, v24.16b\n"
- "zip2 v16.16b, v25.16b, v24.16b\n"
- "str q23, [x20, #0x0]\n"
- "str q22, [x20, #0x10]\n"
- "str q21, [x20, #0x20]\n"
- "str q20, [x20, #0x30]\n"
- "str q19, [x20, #0x40]\n"
- "str q18, [x20, #0x50]\n"
- "str q17, [x20, #0x60]\n"
- "str q16, [x20, #0x70]\n"
- "add x20, x20, %x[out_stride]\n"
+ "zip2 v28.16b, v18.16b, v17.16b\n"
+ "ldr q27, [x21], #0x10\n"
+ "ldr q26, [x20], #0x10\n"
+ "zip1 v25.16b, v19.16b, v16.16b\n"
+ "ldr q24, [x22], #0x10\n"
+ "zip2 v21.16b, v19.16b, v16.16b\n"
+ "ldr q23, [x21], #0x10\n"
+ "zip1 v20.16b, v4.16b, v27.16b\n"
+ "ldr q22, [x20], #0x10\n"
+ "zip1 v18.16b, v2.16b, v20.16b\n"
+ "zip1 v19.16b, v3.16b, v26.16b\n"
+ "zip1 v17.16b, v25.16b, v19.16b\n"
+ "zip1 v16.16b, v18.16b, v17.16b\n"
+ "str q16, [x27, #0x0]\n"
+ "zip2 v16.16b, v18.16b, v17.16b\n"
+ "str q16, [x27, #0x10]\n"
+ "zip2 v18.16b, v2.16b, v20.16b\n"
+ "zip2 v17.16b, v25.16b, v19.16b\n"
+ "zip1 v16.16b, v18.16b, v17.16b\n"
+ "str q16, [x27, #0x20]\n"
+ "zip2 v16.16b, v18.16b, v17.16b\n"
+ "str q16, [x27, #0x30]\n"
+ "zip2 v20.16b, v4.16b, v27.16b\n"
+ "zip1 v18.16b, v1.16b, v20.16b\n"
+ "zip2 v19.16b, v3.16b, v26.16b\n"
+ "zip1 v17.16b, v21.16b, v19.16b\n"
+ "zip1 v16.16b, v18.16b, v17.16b\n"
+ "str q16, [x27, #0x40]\n"
+ "zip2 v16.16b, v18.16b, v17.16b\n"
+ "str q16, [x27, #0x50]\n"
+ "zip2 v18.16b, v1.16b, v20.16b\n"
+ "zip2 v17.16b, v21.16b, v19.16b\n"
+ "zip1 v16.16b, v18.16b, v17.16b\n"
+ "str q16, [x27, #0x60]\n"
+ "zip2 v16.16b, v18.16b, v17.16b\n"
+ "str q16, [x27, #0x70]\n"
+ "add x27, x27, %x[out_stride]\n"
+ "zip1 v21.16b, v31.16b, v23.16b\n"
+ "zip1 v20.16b, v0.16b, v24.16b\n"
+ "zip1 v18.16b, v29.16b, v21.16b\n"
+ "zip1 v19.16b, v30.16b, v22.16b\n"
+ "zip1 v17.16b, v20.16b, v19.16b\n"
+ "zip1 v16.16b, v18.16b, v17.16b\n"
+ "str q16, [x27, #0x0]\n"
+ "zip2 v16.16b, v18.16b, v17.16b\n"
+ "str q16, [x27, #0x10]\n"
+ "zip2 v18.16b, v29.16b, v21.16b\n"
+ "zip2 v17.16b, v20.16b, v19.16b\n"
+ "zip1 v16.16b, v18.16b, v17.16b\n"
+ "str q16, [x27, #0x20]\n"
+ "zip2 v16.16b, v18.16b, v17.16b\n"
+ "str q16, [x27, #0x30]\n"
+ "zip2 v21.16b, v31.16b, v23.16b\n"
+ "zip1 v18.16b, v28.16b, v21.16b\n"
+ "zip2 v20.16b, v0.16b, v24.16b\n"
+ "zip2 v19.16b, v30.16b, v22.16b\n"
+ "zip1 v17.16b, v20.16b, v19.16b\n"
+ "zip1 v16.16b, v18.16b, v17.16b\n"
+ "str q16, [x27, #0x40]\n"
+ "zip2 v16.16b, v18.16b, v17.16b\n"
+ "str q16, [x27, #0x50]\n"
+ "zip2 v18.16b, v28.16b, v21.16b\n"
+ "zip2 v17.16b, v20.16b, v19.16b\n"
+ "zip1 v16.16b, v18.16b, v17.16b\n"
+ "str q16, [x27, #0x60]\n"
+ "zip2 v16.16b, v18.16b, v17.16b\n"
+ "str q16, [x27, #0x70]\n"
+ "add x27, x27, %x[out_stride]\n"
"bge 2b\n"
"3:" // Main row loop: Unroll column loop skip
- "cmp x21, #0x10\n"
+ "cmp x19, #0x10\n"
"blt 5f\n"
"4:" // Main row loop: Column loop
- "ldr q25, [x9], #0x10\n"
- "ldr q27, [x28], #0x10\n"
- "sub x21, x21, #0x10\n"
- "cmp x21, #0x10\n"
- "ldr q26, [x27], #0x10\n"
- "ldr q24, [x26], #0x10\n"
- "ldr q22, [x25], #0x10\n"
- "ldr q21, [x24], #0x10\n"
- "zip1 v20.16b, v25.16b, v22.16b\n"
- "zip1 v23.16b, v27.16b, v21.16b\n"
+ "ldr q19, [x28], #0x10\n"
+ "sub x19, x19, #0x10\n"
+ "ldr q18, [x26], #0x10\n"
+ "cmp x19, #0x10\n"
+ "ldr q28, [x25], #0x10\n"
+ "ldr q27, [x24], #0x10\n"
"ldr q17, [x23], #0x10\n"
+ "zip1 v26.16b, v19.16b, v17.16b\n"
"ldr q16, [x22], #0x10\n"
- "zip1 v19.16b, v26.16b, v17.16b\n"
- "zip1 v18.16b, v24.16b, v16.16b\n"
- "zip2 v25.16b, v25.16b, v22.16b\n"
- "zip2 v22.16b, v26.16b, v17.16b\n"
- "zip2 v21.16b, v27.16b, v21.16b\n"
- "zip2 v16.16b, v24.16b, v16.16b\n"
- "zip1 v24.16b, v20.16b, v19.16b\n"
- "zip1 v17.16b, v23.16b, v18.16b\n"
- "zip2 v20.16b, v20.16b, v19.16b\n"
- "zip2 v19.16b, v23.16b, v18.16b\n"
- "zip1 v23.16b, v25.16b, v22.16b\n"
- "zip1 v18.16b, v21.16b, v16.16b\n"
- "zip2 v22.16b, v25.16b, v22.16b\n"
- "zip2 v21.16b, v21.16b, v16.16b\n"
- "zip1 v16.16b, v24.16b, v17.16b\n"
- "zip2 v17.16b, v24.16b, v17.16b\n"
- "str q16, [x20, #0x0]\n"
- "zip1 v16.16b, v20.16b, v19.16b\n"
- "zip2 v20.16b, v20.16b, v19.16b\n"
- "str q17, [x20, #0x10]\n"
- "zip1 v19.16b, v23.16b, v18.16b\n"
- "zip2 v18.16b, v23.16b, v18.16b\n"
- "str q16, [x20, #0x20]\n"
- "zip1 v17.16b, v22.16b, v21.16b\n"
- "zip2 v16.16b, v22.16b, v21.16b\n"
- "str q20, [x20, #0x30]\n"
- "str q19, [x20, #0x40]\n"
- "str q18, [x20, #0x50]\n"
- "str q17, [x20, #0x60]\n"
- "str q16, [x20, #0x70]\n"
- "add x20, x20, %x[out_stride]\n"
+ "zip2 v25.16b, v19.16b, v17.16b\n"
+ "ldr q24, [x21], #0x10\n"
+ "ldr q23, [x20], #0x10\n"
+ "zip1 v22.16b, v18.16b, v16.16b\n"
+ "zip2 v21.16b, v18.16b, v16.16b\n"
+ "zip1 v20.16b, v28.16b, v24.16b\n"
+ "zip1 v18.16b, v26.16b, v20.16b\n"
+ "zip1 v19.16b, v27.16b, v23.16b\n"
+ "zip1 v17.16b, v22.16b, v19.16b\n"
+ "zip1 v16.16b, v18.16b, v17.16b\n"
+ "str q16, [x27, #0x0]\n"
+ "zip2 v16.16b, v18.16b, v17.16b\n"
+ "str q16, [x27, #0x10]\n"
+ "zip2 v18.16b, v26.16b, v20.16b\n"
+ "zip2 v17.16b, v22.16b, v19.16b\n"
+ "zip1 v16.16b, v18.16b, v17.16b\n"
+ "str q16, [x27, #0x20]\n"
+ "zip2 v16.16b, v18.16b, v17.16b\n"
+ "str q16, [x27, #0x30]\n"
+ "zip2 v20.16b, v28.16b, v24.16b\n"
+ "zip1 v18.16b, v25.16b, v20.16b\n"
+ "zip2 v19.16b, v27.16b, v23.16b\n"
+ "zip1 v17.16b, v21.16b, v19.16b\n"
+ "zip1 v16.16b, v18.16b, v17.16b\n"
+ "str q16, [x27, #0x40]\n"
+ "zip2 v16.16b, v18.16b, v17.16b\n"
+ "str q16, [x27, #0x50]\n"
+ "zip2 v18.16b, v25.16b, v20.16b\n"
+ "zip2 v17.16b, v21.16b, v19.16b\n"
+ "zip1 v16.16b, v18.16b, v17.16b\n"
+ "str q16, [x27, #0x60]\n"
+ "zip2 v16.16b, v18.16b, v17.16b\n"
+ "str q16, [x27, #0x70]\n"
+ "add x27, x27, %x[out_stride]\n"
"bge 4b\n"
"5:" // Main row loop: Column loop skip
- "cmp x21, #0x4\n"
+ "cmp x19, #0x4\n"
"blt 7f\n"
"6:" // Main row loop: width 4 loop: loop
- "ldr s18, [x9], #0x4\n"
- "ldr s19, [x28], #0x4\n"
- "sub x21, x21, #0x4\n"
- "cmp x21, #0x4\n"
- "ldr s21, [x27], #0x4\n"
- "ldr s20, [x26], #0x4\n"
- "ldr s17, [x25], #0x4\n"
- "ldr s16, [x24], #0x4\n"
- "zip1 v18.16b, v18.16b, v17.16b\n"
- "zip1 v19.16b, v19.16b, v16.16b\n"
- "ldr s17, [x23], #0x4\n"
- "ldr s16, [x22], #0x4\n"
+ "ldr s17, [x28], #0x4\n"
+ "sub x19, x19, #0x4\n"
+ "ldr s21, [x26], #0x4\n"
+ "cmp x19, #0x4\n"
+ "ldr s18, [x25], #0x4\n"
+ "ldr s20, [x24], #0x4\n"
+ "ldr s16, [x23], #0x4\n"
+ "zip1 v19.16b, v17.16b, v16.16b\n"
+ "ldr s17, [x22], #0x4\n"
+ "ldr s16, [x21], #0x4\n"
+ "zip1 v18.16b, v18.16b, v16.16b\n"
+ "ldr s16, [x20], #0x4\n"
"zip1 v17.16b, v21.16b, v17.16b\n"
+ "zip1 v18.16b, v19.16b, v18.16b\n"
"zip1 v16.16b, v20.16b, v16.16b\n"
- "zip1 v18.16b, v18.16b, v17.16b\n"
- "zip1 v16.16b, v19.16b, v16.16b\n"
- "zip1 v17.16b, v18.16b, v16.16b\n"
- "zip2 v16.16b, v18.16b, v16.16b\n"
- "str q17, [x20, #0x0]\n"
- "str q16, [x20, #0x10]\n"
- "add x20, x20, #0x20\n"
+ "zip1 v17.16b, v17.16b, v16.16b\n"
+ "zip1 v16.16b, v18.16b, v17.16b\n"
+ "str q16, [x27, #0x0]\n"
+ "zip2 v16.16b, v18.16b, v17.16b\n"
+ "str q16, [x27, #0x10]\n"
+ "add x27, x27, #0x20\n"
"bge 6b\n"
"7:" // Main row loop: width 4 loop: skip
- "cmp x21, #0x1\n"
+ "cmp x19, #0x1\n"
"blt 9f\n"
"8:" // Main row loop: width 1 loop: loop
- "ldr b19, [x9], #0x1\n"
"ldr b18, [x28], #0x1\n"
- "sub x21, x21, #0x1\n"
- "cmp x21, #0x1\n"
- "ldr b21, [x27], #0x1\n"
- "ldr b20, [x26], #0x1\n"
+ "sub x19, x19, #0x1\n"
+ "ldr b21, [x26], #0x1\n"
+ "cmp x19, #0x1\n"
"ldr b17, [x25], #0x1\n"
- "ldr b16, [x24], #0x1\n"
- "zip1 v19.16b, v19.16b, v17.16b\n"
- "zip1 v18.16b, v18.16b, v16.16b\n"
- "ldr b17, [x23], #0x1\n"
- "ldr b16, [x22], #0x1\n"
- "zip1 v17.16b, v21.16b, v17.16b\n"
- "zip1 v16.16b, v20.16b, v16.16b\n"
+ "ldr b20, [x24], #0x1\n"
+ "ldr b16, [x23], #0x1\n"
+ "zip1 v19.16b, v18.16b, v16.16b\n"
+ "ldr b18, [x22], #0x1\n"
+ "ldr b16, [x21], #0x1\n"
+ "zip1 v17.16b, v17.16b, v16.16b\n"
+ "ldr b16, [x20], #0x1\n"
+ "zip1 v18.16b, v21.16b, v18.16b\n"
"zip1 v17.16b, v19.16b, v17.16b\n"
+ "zip1 v16.16b, v20.16b, v16.16b\n"
"zip1 v16.16b, v18.16b, v16.16b\n"
"zip1 v16.16b, v17.16b, v16.16b\n"
- "str d16, [x20, #0x0]\n"
- "add x20, x20, #0x8\n"
+ "str d16, [x27, #0x0]\n"
+ "add x27, x27, #0x8\n"
"bge 8b\n"
"9:" // Main row loop: width 1 loop: skip
- "cmp %x[height], #0x1\n"
"add %x[out], %x[out], #0x80\n"
+ "cmp %x[height], #0x1\n"
"bge 1b\n"
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [pad_row] "r" (pad_row), [width] "r" (width)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_16_2x2.hpp b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_16_2x2.hpp
index 06efa9781e..cfac12a84a 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_16_2x2.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_16_2x2.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#pragma once
@@ -42,189 +42,189 @@ void a64_transpose_interleave_16_2x2(uint16_t *out, const uint16_t *in, size_t w
"cmp %x[height], #0x8\n"
"blt 8f\n"
"1:" // Main row loop: Head
- "mov x9, %x[in]\n"
- "add x28, x9, %x[in_stride]\n"
- "add x27, x28, %x[in_stride]\n"
- "add x26, x27, %x[in_stride]\n"
+ "mov x28, %x[in]\n"
+ "mov x27, %x[out]\n"
+ "add x26, x28, %x[in_stride]\n"
"add x25, x26, %x[in_stride]\n"
- "mov x24, %x[width]\n"
- "add x23, x25, %x[in_stride]\n"
+ "add x24, x25, %x[in_stride]\n"
+ "add x23, x24, %x[in_stride]\n"
"add x22, x23, %x[in_stride]\n"
- "add x20, x22, %x[in_stride]\n"
- "cmp x24, #0x10\n"
+ "add x21, x22, %x[in_stride]\n"
+ "add x20, x21, %x[in_stride]\n"
"add %x[in], x20, %x[in_stride]\n"
- "mov x21, %x[out]\n"
"sub %x[height], %x[height], #0x8\n"
+ "mov x19, %x[width]\n"
+ "cmp x19, #0x10\n"
"blt 3f\n"
"2:" // Main row loop: Column loop
- "ldr q17, [x9], #0x10\n"
- "ldr q16, [x28], #0x10\n"
- "sub x24, x24, #0x10\n"
- "cmp x24, #0x10\n"
- "ldr q19, [x27], #0x10\n"
- "ldr q18, [x26], #0x10\n"
- "zip1 v1.8h, v17.8h, v16.8h\n"
- "zip2 v0.8h, v17.8h, v16.8h\n"
- "ldr q17, [x25], #0x10\n"
- "ldr q16, [x23], #0x10\n"
- "zip1 v31.8h, v19.8h, v18.8h\n"
- "zip2 v30.8h, v19.8h, v18.8h\n"
- "ldr q29, [x22], #0x10\n"
- "ldr q18, [x20], #0x10\n"
- "zip1 v28.8h, v17.8h, v16.8h\n"
- "zip2 v27.8h, v17.8h, v16.8h\n"
- "ldr q17, [x9], #0x10\n"
- "ldr q16, [x28], #0x10\n"
- "zip1 v26.8h, v17.8h, v16.8h\n"
- "zip2 v25.8h, v17.8h, v16.8h\n"
- "ldr q17, [x27], #0x10\n"
+ "ldr q18, [x28], #0x10\n"
+ "sub x19, x19, #0x10\n"
"ldr q16, [x26], #0x10\n"
- "zip1 v24.8h, v17.8h, v16.8h\n"
- "zip2 v23.8h, v17.8h, v16.8h\n"
- "ldr q17, [x25], #0x10\n"
- "ldr q16, [x23], #0x10\n"
- "zip1 v22.8h, v17.8h, v16.8h\n"
- "zip2 v21.8h, v17.8h, v16.8h\n"
- "ldr q20, [x22], #0x10\n"
+ "zip1 v1.8h, v18.8h, v16.8h\n"
+ "ldr q17, [x28], #0x10\n"
+ "cmp x19, #0x10\n"
+ "zip2 v0.8h, v18.8h, v16.8h\n"
+ "ldr q16, [x26], #0x10\n"
+ "ldr q19, [x25], #0x10\n"
+ "zip1 v31.8h, v17.8h, v16.8h\n"
+ "ldr q18, [x25], #0x10\n"
+ "zip2 v30.8h, v17.8h, v16.8h\n"
+ "ldr q16, [x24], #0x10\n"
+ "ldr q20, [x23], #0x10\n"
+ "zip1 v29.8h, v19.8h, v16.8h\n"
+ "ldr q17, [x24], #0x10\n"
+ "zip2 v28.8h, v19.8h, v16.8h\n"
+ "ldr q19, [x23], #0x10\n"
+ "ldr q16, [x22], #0x10\n"
+ "zip1 v27.8h, v18.8h, v17.8h\n"
+ "ldr q26, [x21], #0x10\n"
+ "zip2 v25.8h, v18.8h, v17.8h\n"
+ "ldr q18, [x22], #0x10\n"
+ "zip1 v24.8h, v20.8h, v16.8h\n"
+ "ldr q17, [x20], #0x10\n"
+ "zip2 v23.8h, v20.8h, v16.8h\n"
+ "ldr q22, [x21], #0x10\n"
+ "zip1 v21.8h, v19.8h, v18.8h\n"
"ldr q16, [x20], #0x10\n"
- "zip1 v19.8h, v29.8h, v18.8h\n"
- "zip2 v18.8h, v29.8h, v18.8h\n"
- "zip1 v17.8h, v20.8h, v16.8h\n"
- "zip2 v16.8h, v20.8h, v16.8h\n"
- "str q1, [x21, #0x0]\n"
- "str q0, [x21, #0x10]\n"
- "str q26, [x21, #0x20]\n"
- "str q25, [x21, #0x30]\n"
- "str q31, [x21, #0x40]\n"
- "str q30, [x21, #0x50]\n"
- "str q24, [x21, #0x60]\n"
- "str q23, [x21, #0x70]\n"
- "str q28, [x21, #0x80]\n"
- "str q27, [x21, #0x90]\n"
- "str q22, [x21, #0xa0]\n"
- "str q21, [x21, #0xb0]\n"
- "str q19, [x21, #0xc0]\n"
- "str q18, [x21, #0xd0]\n"
- "str q17, [x21, #0xe0]\n"
- "str q16, [x21, #0xf0]\n"
- "add x21, x21, %x[out_stride]\n"
+ "zip2 v20.8h, v19.8h, v18.8h\n"
+ "zip1 v19.8h, v26.8h, v17.8h\n"
+ "str q1, [x27, #0x0]\n"
+ "zip2 v18.8h, v26.8h, v17.8h\n"
+ "str q0, [x27, #0x10]\n"
+ "str q31, [x27, #0x20]\n"
+ "zip1 v17.8h, v22.8h, v16.8h\n"
+ "str q30, [x27, #0x30]\n"
+ "zip2 v16.8h, v22.8h, v16.8h\n"
+ "str q29, [x27, #0x40]\n"
+ "str q28, [x27, #0x50]\n"
+ "str q27, [x27, #0x60]\n"
+ "str q25, [x27, #0x70]\n"
+ "str q24, [x27, #0x80]\n"
+ "str q23, [x27, #0x90]\n"
+ "str q21, [x27, #0xa0]\n"
+ "str q20, [x27, #0xb0]\n"
+ "str q19, [x27, #0xc0]\n"
+ "str q18, [x27, #0xd0]\n"
+ "str q17, [x27, #0xe0]\n"
+ "str q16, [x27, #0xf0]\n"
+ "add x27, x27, %x[out_stride]\n"
"bge 2b\n"
"3:" // Main row loop: Column loop skip
- "cmp x24, #0x4\n"
+ "cmp x19, #0x4\n"
"blt 5f\n"
"4:" // Main row loop: width 4 loop: loop
- "ldr d19, [x9], #0x8\n"
- "ldr d18, [x28], #0x8\n"
- "sub x24, x24, #0x4\n"
- "cmp x24, #0x4\n"
- "ldr d17, [x27], #0x8\n"
+ "ldr d17, [x28], #0x8\n"
+ "sub x19, x19, #0x4\n"
"ldr d16, [x26], #0x8\n"
- "zip1 v20.8h, v19.8h, v18.8h\n"
- "zip1 v19.8h, v17.8h, v16.8h\n"
+ "zip1 v20.8h, v17.8h, v16.8h\n"
"ldr d17, [x25], #0x8\n"
- "ldr d16, [x23], #0x8\n"
+ "cmp x19, #0x4\n"
+ "ldr d16, [x24], #0x8\n"
+ "zip1 v19.8h, v17.8h, v16.8h\n"
+ "ldr d17, [x23], #0x8\n"
+ "ldr d16, [x22], #0x8\n"
"zip1 v18.8h, v17.8h, v16.8h\n"
- "ldr d17, [x22], #0x8\n"
+ "ldr d17, [x21], #0x8\n"
"ldr d16, [x20], #0x8\n"
- "str q20, [x21, #0x0]\n"
"zip1 v16.8h, v17.8h, v16.8h\n"
- "str q19, [x21, #0x40]\n"
- "str q18, [x21, #0x80]\n"
- "str q16, [x21, #0xc0]\n"
- "add x21, x21, #0x10\n"
+ "str q20, [x27, #0x0]\n"
+ "str q19, [x27, #0x40]\n"
+ "str q18, [x27, #0x80]\n"
+ "str q16, [x27, #0xc0]\n"
+ "add x27, x27, #0x10\n"
"bge 4b\n"
"5:" // Main row loop: width 4 loop: skip
- "cmp x24, #0x1\n"
+ "cmp x19, #0x1\n"
"blt 7f\n"
"6:" // Main row loop: width 1 loop: loop
- "ldr h19, [x9], #0x2\n"
- "ldr h18, [x28], #0x2\n"
- "sub x24, x24, #0x1\n"
- "cmp x24, #0x1\n"
- "ldr h17, [x27], #0x2\n"
+ "ldr h17, [x28], #0x2\n"
+ "sub x19, x19, #0x1\n"
"ldr h16, [x26], #0x2\n"
- "zip1 v20.8h, v19.8h, v18.8h\n"
- "zip1 v19.8h, v17.8h, v16.8h\n"
+ "zip1 v20.8h, v17.8h, v16.8h\n"
"ldr h17, [x25], #0x2\n"
- "ldr h16, [x23], #0x2\n"
+ "cmp x19, #0x1\n"
+ "ldr h16, [x24], #0x2\n"
+ "zip1 v19.8h, v17.8h, v16.8h\n"
+ "ldr h17, [x23], #0x2\n"
+ "ldr h16, [x22], #0x2\n"
"zip1 v18.8h, v17.8h, v16.8h\n"
- "ldr h17, [x22], #0x2\n"
+ "ldr h17, [x21], #0x2\n"
"ldr h16, [x20], #0x2\n"
- "str s20, [x21, #0x0]\n"
"zip1 v16.8h, v17.8h, v16.8h\n"
- "str s19, [x21, #0x40]\n"
- "str s18, [x21, #0x80]\n"
- "str s16, [x21, #0xc0]\n"
- "add x21, x21, #0x4\n"
+ "str s20, [x27, #0x0]\n"
+ "str s19, [x27, #0x40]\n"
+ "str s18, [x27, #0x80]\n"
+ "str s16, [x27, #0xc0]\n"
+ "add x27, x27, #0x4\n"
"bge 6b\n"
"7:" // Main row loop: width 1 loop: skip
- "cmp %x[height], #0x8\n"
"add %x[out], %x[out], #0x100\n"
+ "cmp %x[height], #0x8\n"
"bge 1b\n"
"cbz %x[height], 16f\n"
"8:" // Main loop skip
"9:" // Tail row loop: Head
- "mov x9, %x[in]\n"
- "mov x20, %x[width]\n"
- "add x28, x9, %x[in_stride]\n"
+ "mov x28, %x[in]\n"
+ "mov x27, %x[out]\n"
+ "add x26, x28, %x[in_stride]\n"
+ "add %x[in], x26, %x[in_stride]\n"
"cmp %x[height], #0x1\n"
- "add %x[in], x28, %x[in_stride]\n"
- "csel x28, x28, %x[pad_row], GT\n"
- "cmp x20, #0x10\n"
- "mov x21, %x[out]\n"
+ "csel x26, x26, %x[pad_row], GT\n"
"sub %x[height], %x[height], #0x2\n"
+ "mov x19, %x[width]\n"
+ "cmp x19, #0x10\n"
"blt 11f\n"
"10:" // Tail row loop: Column loop
- "ldr q18, [x9], #0x10\n"
- "ldr q17, [x28], #0x10\n"
- "sub x20, x20, #0x10\n"
- "cmp x20, #0x10\n"
- "ldr q20, [x9], #0x10\n"
- "ldr q16, [x28], #0x10\n"
- "zip1 v19.8h, v18.8h, v17.8h\n"
- "zip2 v18.8h, v18.8h, v17.8h\n"
- "zip1 v17.8h, v20.8h, v16.8h\n"
- "zip2 v16.8h, v20.8h, v16.8h\n"
- "str q19, [x21, #0x0]\n"
- "str q18, [x21, #0x10]\n"
- "str q17, [x21, #0x20]\n"
- "str q16, [x21, #0x30]\n"
- "add x21, x21, %x[out_stride]\n"
+ "ldr q18, [x28], #0x10\n"
+ "sub x19, x19, #0x10\n"
+ "ldr q16, [x26], #0x10\n"
+ "zip1 v17.8h, v18.8h, v16.8h\n"
+ "ldr q19, [x28], #0x10\n"
+ "cmp x19, #0x10\n"
+ "zip2 v18.8h, v18.8h, v16.8h\n"
+ "ldr q16, [x26], #0x10\n"
+ "str q17, [x27, #0x0]\n"
+ "zip1 v17.8h, v19.8h, v16.8h\n"
+ "str q18, [x27, #0x10]\n"
+ "zip2 v16.8h, v19.8h, v16.8h\n"
+ "str q17, [x27, #0x20]\n"
+ "str q16, [x27, #0x30]\n"
+ "add x27, x27, %x[out_stride]\n"
"bge 10b\n"
"11:" // Tail row loop: Column loop skip
- "cmp x20, #0x4\n"
+ "cmp x19, #0x4\n"
"blt 13f\n"
"12:" // Tail row loop: width 4 loop: loop
- "ldr d17, [x9], #0x8\n"
- "ldr d16, [x28], #0x8\n"
- "sub x20, x20, #0x4\n"
- "cmp x20, #0x4\n"
+ "ldr d17, [x28], #0x8\n"
+ "sub x19, x19, #0x4\n"
+ "ldr d16, [x26], #0x8\n"
"zip1 v16.8h, v17.8h, v16.8h\n"
- "str q16, [x21, #0x0]\n"
- "add x21, x21, #0x10\n"
+ "str q16, [x27, #0x0]\n"
+ "add x27, x27, #0x10\n"
+ "cmp x19, #0x4\n"
"bge 12b\n"
"13:" // Tail row loop: width 4 loop: skip
- "cmp x20, #0x1\n"
+ "cmp x19, #0x1\n"
"blt 15f\n"
"14:" // Tail row loop: width 1 loop: loop
- "ldr h17, [x9], #0x2\n"
- "ldr h16, [x28], #0x2\n"
- "sub x20, x20, #0x1\n"
- "cmp x20, #0x1\n"
+ "ldr h17, [x28], #0x2\n"
+ "sub x19, x19, #0x1\n"
+ "ldr h16, [x26], #0x2\n"
"zip1 v16.8h, v17.8h, v16.8h\n"
- "str s16, [x21, #0x0]\n"
- "add x21, x21, #0x4\n"
+ "str s16, [x27, #0x0]\n"
+ "add x27, x27, #0x4\n"
+ "cmp x19, #0x1\n"
"bge 14b\n"
"15:" // Tail row loop: width 1 loop: skip
- "cmp %x[height], #0x1\n"
"add %x[out], %x[out], #0x40\n"
+ "cmp %x[height], #0x1\n"
"bge 9b\n"
"16:" // Done
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [pad_row] "r" (pad_row), [width] "r" (width)
- : "cc", "memory", "v0", "v1", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_16_2x4.hpp b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_16_2x4.hpp
index dafa53eec3..8c8dfd1d0d 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_16_2x4.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_16_2x4.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#pragma once
@@ -42,454 +42,454 @@ void a64_transpose_interleave_16_2x4(uint16_t *out, const uint16_t *in, size_t w
"cmp %x[height], #0x8\n"
"blt 10f\n"
"1:" // Main row loop: Head
- "mov x9, %x[in]\n"
- "add x28, x9, %x[in_stride]\n"
- "add x27, x28, %x[in_stride]\n"
- "add x26, x27, %x[in_stride]\n"
+ "mov x28, %x[in]\n"
+ "mov x27, %x[out]\n"
+ "add x26, x28, %x[in_stride]\n"
"add x25, x26, %x[in_stride]\n"
- "mov x24, %x[width]\n"
- "add x23, x25, %x[in_stride]\n"
+ "add x24, x25, %x[in_stride]\n"
+ "add x23, x24, %x[in_stride]\n"
"add x22, x23, %x[in_stride]\n"
- "add x20, x22, %x[in_stride]\n"
- "cmp x24, #0x20\n"
+ "add x21, x22, %x[in_stride]\n"
+ "add x20, x21, %x[in_stride]\n"
"add %x[in], x20, %x[in_stride]\n"
- "mov x21, %x[out]\n"
"sub %x[height], %x[height], #0x8\n"
+ "mov x19, %x[width]\n"
+ "cmp x19, #0x20\n"
"blt 3f\n"
"2:" // Main row loop: Unroll column loop
- "ldr q23, [x9], #0x10\n"
- "ldr q29, [x28], #0x10\n"
- "sub x24, x24, #0x20\n"
- "cmp x24, #0x20\n"
- "ldr q13, [x27], #0x10\n"
- "ldr q12, [x26], #0x10\n"
- "zip1 v20.8h, v23.8h, v13.8h\n"
- "zip1 v28.8h, v29.8h, v12.8h\n"
- "ldr q18, [x25], #0x10\n"
+ "ldr q24, [x28], #0x10\n"
+ "sub x19, x19, #0x20\n"
+ "ldr q4, [x26], #0x10\n"
+ "cmp x19, #0x20\n"
+ "ldr q26, [x25], #0x10\n"
+ "zip1 v2.8h, v24.8h, v26.8h\n"
+ "ldr q3, [x28], #0x10\n"
+ "zip2 v9.8h, v24.8h, v26.8h\n"
+ "ldr q0, [x26], #0x10\n"
+ "ldr q22, [x25], #0x10\n"
+ "zip1 v31.8h, v3.8h, v22.8h\n"
+ "ldr q23, [x28], #0x10\n"
+ "zip2 v25.8h, v3.8h, v22.8h\n"
+ "ldr q22, [x26], #0x10\n"
+ "ldr q5, [x25], #0x10\n"
+ "zip1 v17.8h, v23.8h, v5.8h\n"
+ "ldr q19, [x28], #0x10\n"
+ "zip2 v20.8h, v23.8h, v5.8h\n"
+ "ldr q1, [x26], #0x10\n"
+ "ldr q16, [x25], #0x10\n"
+ "zip1 v15.8h, v19.8h, v16.8h\n"
+ "ldr q8, [x24], #0x10\n"
+ "zip2 v11.8h, v19.8h, v16.8h\n"
+ "ldr q26, [x23], #0x10\n"
+ "ldr q19, [x22], #0x10\n"
+ "zip1 v5.8h, v4.8h, v8.8h\n"
+ "ldr q18, [x24], #0x10\n"
+ "zip2 v6.8h, v4.8h, v8.8h\n"
+ "ldr q7, [x23], #0x10\n"
+ "zip1 v27.8h, v2.8h, v5.8h\n"
+ "ldr q23, [x22], #0x10\n"
+ "zip2 v8.8h, v2.8h, v5.8h\n"
+ "ldr q24, [x21], #0x10\n"
+ "zip1 v12.8h, v9.8h, v6.8h\n"
+ "ldr q13, [x24], #0x10\n"
+ "zip2 v16.8h, v9.8h, v6.8h\n"
"ldr q9, [x23], #0x10\n"
- "zip2 v22.8h, v23.8h, v13.8h\n"
- "zip2 v1.8h, v29.8h, v12.8h\n"
- "ldr q27, [x22], #0x10\n"
- "ldr q3, [x20], #0x10\n"
- "zip1 v4.8h, v18.8h, v27.8h\n"
- "zip1 v26.8h, v9.8h, v3.8h\n"
- "ldr q17, [x9], #0x10\n"
- "ldr q2, [x28], #0x10\n"
- "zip2 v15.8h, v18.8h, v27.8h\n"
- "zip2 v12.8h, v9.8h, v3.8h\n"
- "ldr q23, [x27], #0x10\n"
- "ldr q14, [x26], #0x10\n"
- "zip1 v19.8h, v17.8h, v23.8h\n"
- "zip1 v21.8h, v2.8h, v14.8h\n"
- "ldr q6, [x25], #0x10\n"
- "ldr q18, [x23], #0x10\n"
- "zip2 v27.8h, v17.8h, v23.8h\n"
- "zip2 v17.8h, v2.8h, v14.8h\n"
- "ldr q0, [x22], #0x10\n"
- "ldr q3, [x20], #0x10\n"
- "zip1 v16.8h, v6.8h, v0.8h\n"
- "zip1 v30.8h, v18.8h, v3.8h\n"
- "ldr q2, [x9], #0x10\n"
- "ldr q13, [x28], #0x10\n"
- "zip2 v31.8h, v6.8h, v0.8h\n"
- "zip2 v8.8h, v18.8h, v3.8h\n"
- "ldr q14, [x27], #0x10\n"
- "ldr q3, [x26], #0x10\n"
- "zip1 v11.8h, v2.8h, v14.8h\n"
- "zip1 v29.8h, v13.8h, v3.8h\n"
- "ldr q25, [x25], #0x10\n"
- "ldr q18, [x23], #0x10\n"
- "zip2 v23.8h, v2.8h, v14.8h\n"
- "zip2 v10.8h, v13.8h, v3.8h\n"
- "ldr q7, [x22], #0x10\n"
- "ldr q6, [x20], #0x10\n"
- "zip1 v14.8h, v25.8h, v7.8h\n"
- "zip1 v13.8h, v18.8h, v6.8h\n"
- "ldr q2, [x9], #0x10\n"
- "ldr q5, [x28], #0x10\n"
- "zip2 v9.8h, v25.8h, v7.8h\n"
- "zip2 v7.8h, v18.8h, v6.8h\n"
- "ldr q6, [x27], #0x10\n"
- "ldr q24, [x26], #0x10\n"
- "zip1 v25.8h, v2.8h, v6.8h\n"
- "zip1 v3.8h, v5.8h, v24.8h\n"
- "ldr q0, [x25], #0x10\n"
- "ldr q18, [x23], #0x10\n"
- "zip2 v2.8h, v2.8h, v6.8h\n"
- "zip2 v24.8h, v5.8h, v24.8h\n"
- "ldr q5, [x22], #0x10\n"
- "zip1 v6.8h, v0.8h, v5.8h\n"
- "zip2 v5.8h, v0.8h, v5.8h\n"
- "zip1 v0.8h, v20.8h, v28.8h\n"
- "zip2 v28.8h, v20.8h, v28.8h\n"
- "ldr q20, [x20], #0x10\n"
- "str q0, [x21, #0x0]\n"
- "zip1 v0.8h, v18.8h, v20.8h\n"
- "zip2 v20.8h, v18.8h, v20.8h\n"
- "str q28, [x21, #0x10]\n"
- "zip1 v18.8h, v22.8h, v1.8h\n"
- "zip2 v28.8h, v22.8h, v1.8h\n"
- "str q18, [x21, #0x20]\n"
- "zip1 v22.8h, v19.8h, v21.8h\n"
- "zip2 v19.8h, v19.8h, v21.8h\n"
- "str q28, [x21, #0x30]\n"
- "zip1 v18.8h, v27.8h, v17.8h\n"
- "zip2 v17.8h, v27.8h, v17.8h\n"
- "str q22, [x21, #0x40]\n"
- "zip1 v27.8h, v4.8h, v26.8h\n"
- "zip2 v26.8h, v4.8h, v26.8h\n"
- "str q19, [x21, #0x50]\n"
- "zip1 v22.8h, v15.8h, v12.8h\n"
- "zip2 v21.8h, v15.8h, v12.8h\n"
- "str q18, [x21, #0x60]\n"
- "zip1 v19.8h, v16.8h, v30.8h\n"
- "zip2 v18.8h, v16.8h, v30.8h\n"
- "str q17, [x21, #0x70]\n"
- "zip1 v17.8h, v31.8h, v8.8h\n"
- "zip2 v16.8h, v31.8h, v8.8h\n"
- "str q27, [x21, #0x80]\n"
- "str q26, [x21, #0x90]\n"
- "zip1 v31.8h, v11.8h, v29.8h\n"
- "zip2 v30.8h, v11.8h, v29.8h\n"
- "str q22, [x21, #0xa0]\n"
- "zip1 v29.8h, v23.8h, v10.8h\n"
- "zip2 v28.8h, v23.8h, v10.8h\n"
- "str q21, [x21, #0xb0]\n"
- "zip1 v27.8h, v25.8h, v3.8h\n"
- "zip2 v26.8h, v25.8h, v3.8h\n"
- "str q19, [x21, #0xc0]\n"
- "zip1 v25.8h, v2.8h, v24.8h\n"
- "zip2 v24.8h, v2.8h, v24.8h\n"
- "str q18, [x21, #0xd0]\n"
- "zip1 v23.8h, v14.8h, v13.8h\n"
- "zip2 v22.8h, v14.8h, v13.8h\n"
- "str q17, [x21, #0xe0]\n"
- "zip1 v21.8h, v9.8h, v7.8h\n"
- "zip2 v19.8h, v9.8h, v7.8h\n"
- "str q16, [x21, #0xf0]\n"
- "add x21, x21, %x[out_stride]\n"
- "zip1 v2.8h, v6.8h, v0.8h\n"
- "zip2 v18.8h, v6.8h, v0.8h\n"
- "zip1 v17.8h, v5.8h, v20.8h\n"
- "zip2 v16.8h, v5.8h, v20.8h\n"
- "str q31, [x21, #0x0]\n"
- "str q30, [x21, #0x10]\n"
- "str q29, [x21, #0x20]\n"
- "str q28, [x21, #0x30]\n"
- "str q27, [x21, #0x40]\n"
- "str q26, [x21, #0x50]\n"
- "str q25, [x21, #0x60]\n"
- "str q24, [x21, #0x70]\n"
- "str q23, [x21, #0x80]\n"
- "str q22, [x21, #0x90]\n"
- "str q21, [x21, #0xa0]\n"
- "str q19, [x21, #0xb0]\n"
- "str q2, [x21, #0xc0]\n"
- "str q18, [x21, #0xd0]\n"
- "str q17, [x21, #0xe0]\n"
- "str q16, [x21, #0xf0]\n"
- "add x21, x21, %x[out_stride]\n"
+ "zip1 v29.8h, v0.8h, v18.8h\n"
+ "ldr q10, [x22], #0x10\n"
+ "zip1 v14.8h, v31.8h, v29.8h\n"
+ "ldr q4, [x21], #0x10\n"
+ "zip2 v21.8h, v31.8h, v29.8h\n"
+ "ldr q6, [x24], #0x10\n"
+ "zip2 v18.8h, v0.8h, v18.8h\n"
+ "ldr q3, [x23], #0x10\n"
+ "zip1 v0.8h, v25.8h, v18.8h\n"
+ "ldr q31, [x22], #0x10\n"
+ "zip2 v29.8h, v25.8h, v18.8h\n"
+ "ldr q5, [x21], #0x10\n"
+ "zip1 v28.8h, v26.8h, v24.8h\n"
+ "ldr q25, [x20], #0x10\n"
+ "zip2 v26.8h, v26.8h, v24.8h\n"
+ "ldr q30, [x21], #0x10\n"
+ "zip1 v24.8h, v7.8h, v4.8h\n"
+ "zip2 v4.8h, v7.8h, v4.8h\n"
+ "ldr q18, [x20], #0x10\n"
+ "zip1 v7.8h, v19.8h, v25.8h\n"
+ "ldr q2, [x20], #0x10\n"
+ "zip2 v25.8h, v19.8h, v25.8h\n"
+ "zip1 v19.8h, v28.8h, v7.8h\n"
+ "zip2 v7.8h, v28.8h, v7.8h\n"
+ "ldr q28, [x20], #0x10\n"
+ "str q27, [x27, #0x0]\n"
+ "zip1 v27.8h, v26.8h, v25.8h\n"
+ "zip2 v26.8h, v26.8h, v25.8h\n"
+ "str q8, [x27, #0x10]\n"
+ "zip1 v25.8h, v23.8h, v18.8h\n"
+ "str q12, [x27, #0x20]\n"
+ "zip1 v8.8h, v24.8h, v25.8h\n"
+ "str q16, [x27, #0x30]\n"
+ "zip2 v25.8h, v24.8h, v25.8h\n"
+ "str q14, [x27, #0x40]\n"
+ "zip2 v12.8h, v23.8h, v18.8h\n"
+ "str q21, [x27, #0x50]\n"
+ "zip1 v21.8h, v4.8h, v12.8h\n"
+ "str q0, [x27, #0x60]\n"
+ "zip2 v14.8h, v4.8h, v12.8h\n"
+ "str q29, [x27, #0x70]\n"
+ "zip1 v12.8h, v22.8h, v13.8h\n"
+ "str q19, [x27, #0x80]\n"
+ "zip1 v24.8h, v17.8h, v12.8h\n"
+ "str q7, [x27, #0x90]\n"
+ "zip2 v23.8h, v17.8h, v12.8h\n"
+ "str q27, [x27, #0xa0]\n"
+ "zip2 v16.8h, v22.8h, v13.8h\n"
+ "str q26, [x27, #0xb0]\n"
+ "zip1 v19.8h, v20.8h, v16.8h\n"
+ "str q8, [x27, #0xc0]\n"
+ "zip2 v18.8h, v20.8h, v16.8h\n"
+ "str q25, [x27, #0xd0]\n"
+ "zip1 v16.8h, v1.8h, v6.8h\n"
+ "str q21, [x27, #0xe0]\n"
+ "zip1 v21.8h, v15.8h, v16.8h\n"
+ "str q14, [x27, #0xf0]\n"
+ "add x27, x27, %x[out_stride]\n"
+ "zip2 v17.8h, v15.8h, v16.8h\n"
+ "str q24, [x27, #0x0]\n"
+ "zip2 v16.8h, v1.8h, v6.8h\n"
+ "str q23, [x27, #0x10]\n"
+ "zip1 v20.8h, v11.8h, v16.8h\n"
+ "str q19, [x27, #0x20]\n"
+ "zip2 v19.8h, v11.8h, v16.8h\n"
+ "str q18, [x27, #0x30]\n"
+ "zip1 v18.8h, v9.8h, v5.8h\n"
+ "str q21, [x27, #0x40]\n"
+ "zip1 v16.8h, v10.8h, v2.8h\n"
+ "str q17, [x27, #0x50]\n"
+ "zip1 v17.8h, v18.8h, v16.8h\n"
+ "str q20, [x27, #0x60]\n"
+ "zip2 v16.8h, v18.8h, v16.8h\n"
+ "str q19, [x27, #0x70]\n"
+ "zip2 v18.8h, v9.8h, v5.8h\n"
+ "str q17, [x27, #0x80]\n"
+ "zip2 v17.8h, v10.8h, v2.8h\n"
+ "str q16, [x27, #0x90]\n"
+ "zip1 v16.8h, v18.8h, v17.8h\n"
+ "str q16, [x27, #0xa0]\n"
+ "zip2 v16.8h, v18.8h, v17.8h\n"
+ "str q16, [x27, #0xb0]\n"
+ "zip1 v18.8h, v3.8h, v30.8h\n"
+ "zip1 v17.8h, v31.8h, v28.8h\n"
+ "zip1 v16.8h, v18.8h, v17.8h\n"
+ "str q16, [x27, #0xc0]\n"
+ "zip2 v16.8h, v18.8h, v17.8h\n"
+ "str q16, [x27, #0xd0]\n"
+ "zip2 v18.8h, v3.8h, v30.8h\n"
+ "zip2 v17.8h, v31.8h, v28.8h\n"
+ "zip1 v16.8h, v18.8h, v17.8h\n"
+ "str q16, [x27, #0xe0]\n"
+ "zip2 v16.8h, v18.8h, v17.8h\n"
+ "str q16, [x27, #0xf0]\n"
+ "add x27, x27, %x[out_stride]\n"
"bge 2b\n"
"3:" // Main row loop: Unroll column loop skip
- "cmp x24, #0x10\n"
+ "cmp x19, #0x10\n"
"blt 5f\n"
"4:" // Main row loop: Column loop
- "ldr q21, [x9], #0x10\n"
- "ldr q20, [x28], #0x10\n"
- "sub x24, x24, #0x10\n"
- "cmp x24, #0x10\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v3.8h, v21.8h, v17.8h\n"
- "zip1 v2.8h, v20.8h, v16.8h\n"
- "ldr q19, [x25], #0x10\n"
- "ldr q18, [x23], #0x10\n"
- "zip2 v1.8h, v21.8h, v17.8h\n"
- "zip2 v24.8h, v20.8h, v16.8h\n"
- "ldr q17, [x22], #0x10\n"
- "ldr q16, [x20], #0x10\n"
- "zip1 v0.8h, v19.8h, v17.8h\n"
- "zip1 v31.8h, v18.8h, v16.8h\n"
- "ldr q21, [x9], #0x10\n"
- "ldr q20, [x28], #0x10\n"
- "zip2 v30.8h, v19.8h, v17.8h\n"
- "zip2 v29.8h, v18.8h, v16.8h\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v23.8h, v21.8h, v17.8h\n"
- "zip1 v22.8h, v20.8h, v16.8h\n"
- "ldr q19, [x25], #0x10\n"
- "ldr q18, [x23], #0x10\n"
- "zip2 v21.8h, v21.8h, v17.8h\n"
- "zip2 v20.8h, v20.8h, v16.8h\n"
- "ldr q17, [x22], #0x10\n"
- "ldr q16, [x20], #0x10\n"
- "zip1 v28.8h, v19.8h, v17.8h\n"
- "zip1 v27.8h, v18.8h, v16.8h\n"
- "zip2 v26.8h, v19.8h, v17.8h\n"
- "zip2 v25.8h, v18.8h, v16.8h\n"
- "zip1 v16.8h, v3.8h, v2.8h\n"
- "zip2 v17.8h, v3.8h, v2.8h\n"
- "str q16, [x21, #0x0]\n"
- "zip1 v16.8h, v1.8h, v24.8h\n"
- "zip2 v19.8h, v1.8h, v24.8h\n"
- "str q17, [x21, #0x10]\n"
- "zip1 v18.8h, v23.8h, v22.8h\n"
- "zip2 v17.8h, v23.8h, v22.8h\n"
- "str q16, [x21, #0x20]\n"
- "zip1 v16.8h, v21.8h, v20.8h\n"
- "zip2 v24.8h, v21.8h, v20.8h\n"
- "str q19, [x21, #0x30]\n"
- "zip1 v23.8h, v0.8h, v31.8h\n"
- "zip2 v22.8h, v0.8h, v31.8h\n"
- "str q18, [x21, #0x40]\n"
- "zip1 v21.8h, v30.8h, v29.8h\n"
- "zip2 v20.8h, v30.8h, v29.8h\n"
- "str q17, [x21, #0x50]\n"
- "zip1 v19.8h, v28.8h, v27.8h\n"
- "zip2 v18.8h, v28.8h, v27.8h\n"
- "str q16, [x21, #0x60]\n"
- "zip1 v17.8h, v26.8h, v25.8h\n"
- "zip2 v16.8h, v26.8h, v25.8h\n"
- "str q24, [x21, #0x70]\n"
- "str q23, [x21, #0x80]\n"
- "str q22, [x21, #0x90]\n"
- "str q21, [x21, #0xa0]\n"
- "str q20, [x21, #0xb0]\n"
- "str q19, [x21, #0xc0]\n"
- "str q18, [x21, #0xd0]\n"
- "str q17, [x21, #0xe0]\n"
- "str q16, [x21, #0xf0]\n"
- "add x21, x21, %x[out_stride]\n"
+ "ldr q17, [x28], #0x10\n"
+ "sub x19, x19, #0x10\n"
+ "ldr q20, [x26], #0x10\n"
+ "cmp x19, #0x10\n"
+ "ldr q16, [x25], #0x10\n"
+ "zip1 v19.8h, v17.8h, v16.8h\n"
+ "ldr q18, [x28], #0x10\n"
+ "zip2 v22.8h, v17.8h, v16.8h\n"
+ "ldr q21, [x26], #0x10\n"
+ "ldr q17, [x25], #0x10\n"
+ "zip1 v0.8h, v18.8h, v17.8h\n"
+ "ldr q16, [x24], #0x10\n"
+ "zip2 v31.8h, v18.8h, v17.8h\n"
+ "ldr q30, [x23], #0x10\n"
+ "ldr q29, [x22], #0x10\n"
+ "zip1 v17.8h, v20.8h, v16.8h\n"
+ "ldr q18, [x24], #0x10\n"
+ "zip2 v16.8h, v20.8h, v16.8h\n"
+ "ldr q28, [x23], #0x10\n"
+ "zip1 v27.8h, v19.8h, v17.8h\n"
+ "ldr q26, [x22], #0x10\n"
+ "zip2 v20.8h, v19.8h, v17.8h\n"
+ "ldr q25, [x21], #0x10\n"
+ "zip1 v19.8h, v22.8h, v16.8h\n"
+ "ldr q24, [x20], #0x10\n"
+ "zip2 v23.8h, v22.8h, v16.8h\n"
+ "ldr q22, [x21], #0x10\n"
+ "zip1 v17.8h, v21.8h, v18.8h\n"
+ "zip2 v18.8h, v21.8h, v18.8h\n"
+ "ldr q21, [x20], #0x10\n"
+ "zip1 v16.8h, v0.8h, v17.8h\n"
+ "str q27, [x27, #0x0]\n"
+ "zip2 v17.8h, v0.8h, v17.8h\n"
+ "str q20, [x27, #0x10]\n"
+ "zip1 v20.8h, v31.8h, v18.8h\n"
+ "str q19, [x27, #0x20]\n"
+ "zip2 v19.8h, v31.8h, v18.8h\n"
+ "str q23, [x27, #0x30]\n"
+ "zip1 v18.8h, v30.8h, v25.8h\n"
+ "str q16, [x27, #0x40]\n"
+ "zip1 v16.8h, v29.8h, v24.8h\n"
+ "str q17, [x27, #0x50]\n"
+ "zip1 v17.8h, v18.8h, v16.8h\n"
+ "str q20, [x27, #0x60]\n"
+ "zip2 v16.8h, v18.8h, v16.8h\n"
+ "str q19, [x27, #0x70]\n"
+ "zip2 v18.8h, v30.8h, v25.8h\n"
+ "str q17, [x27, #0x80]\n"
+ "zip2 v17.8h, v29.8h, v24.8h\n"
+ "str q16, [x27, #0x90]\n"
+ "zip1 v16.8h, v18.8h, v17.8h\n"
+ "str q16, [x27, #0xa0]\n"
+ "zip2 v16.8h, v18.8h, v17.8h\n"
+ "str q16, [x27, #0xb0]\n"
+ "zip1 v18.8h, v28.8h, v22.8h\n"
+ "zip1 v17.8h, v26.8h, v21.8h\n"
+ "zip1 v16.8h, v18.8h, v17.8h\n"
+ "str q16, [x27, #0xc0]\n"
+ "zip2 v16.8h, v18.8h, v17.8h\n"
+ "str q16, [x27, #0xd0]\n"
+ "zip2 v18.8h, v28.8h, v22.8h\n"
+ "zip2 v17.8h, v26.8h, v21.8h\n"
+ "zip1 v16.8h, v18.8h, v17.8h\n"
+ "str q16, [x27, #0xe0]\n"
+ "zip2 v16.8h, v18.8h, v17.8h\n"
+ "str q16, [x27, #0xf0]\n"
+ "add x27, x27, %x[out_stride]\n"
"bge 4b\n"
"5:" // Main row loop: Column loop skip
- "cmp x24, #0x4\n"
+ "cmp x19, #0x4\n"
"blt 7f\n"
"6:" // Main row loop: width 4 loop: loop
- "ldr d19, [x9], #0x8\n"
- "ldr d18, [x28], #0x8\n"
- "sub x24, x24, #0x4\n"
- "cmp x24, #0x4\n"
- "ldr d17, [x27], #0x8\n"
- "ldr d16, [x26], #0x8\n"
- "zip1 v17.8h, v19.8h, v17.8h\n"
- "zip1 v16.8h, v18.8h, v16.8h\n"
- "ldr d18, [x25], #0x8\n"
+ "ldr d17, [x28], #0x8\n"
+ "sub x19, x19, #0x4\n"
+ "ldr d18, [x26], #0x8\n"
+ "cmp x19, #0x4\n"
+ "ldr d16, [x25], #0x8\n"
+ "zip1 v17.8h, v17.8h, v16.8h\n"
+ "ldr d16, [x24], #0x8\n"
"ldr d21, [x23], #0x8\n"
- "zip1 v20.8h, v17.8h, v16.8h\n"
- "zip2 v19.8h, v17.8h, v16.8h\n"
- "ldr d17, [x22], #0x8\n"
+ "zip1 v16.8h, v18.8h, v16.8h\n"
+ "ldr d20, [x22], #0x8\n"
+ "ldr d19, [x21], #0x8\n"
+ "zip1 v18.8h, v17.8h, v16.8h\n"
+ "zip2 v17.8h, v17.8h, v16.8h\n"
"ldr d16, [x20], #0x8\n"
- "zip1 v18.8h, v18.8h, v17.8h\n"
- "zip1 v16.8h, v21.8h, v16.8h\n"
- "str q20, [x21, #0x0]\n"
- "zip1 v17.8h, v18.8h, v16.8h\n"
- "zip2 v16.8h, v18.8h, v16.8h\n"
- "str q19, [x21, #0x10]\n"
- "str q17, [x21, #0x80]\n"
- "str q16, [x21, #0x90]\n"
- "add x21, x21, #0x20\n"
+ "str q18, [x27, #0x0]\n"
+ "zip1 v18.8h, v21.8h, v19.8h\n"
+ "str q17, [x27, #0x10]\n"
+ "zip1 v17.8h, v20.8h, v16.8h\n"
+ "zip1 v16.8h, v18.8h, v17.8h\n"
+ "str q16, [x27, #0x80]\n"
+ "zip2 v16.8h, v18.8h, v17.8h\n"
+ "str q16, [x27, #0x90]\n"
+ "add x27, x27, #0x20\n"
"bge 6b\n"
"7:" // Main row loop: width 4 loop: skip
- "cmp x24, #0x1\n"
+ "cmp x19, #0x1\n"
"blt 9f\n"
"8:" // Main row loop: width 1 loop: loop
- "ldr h19, [x9], #0x2\n"
"ldr h18, [x28], #0x2\n"
- "sub x24, x24, #0x1\n"
- "cmp x24, #0x1\n"
- "ldr h17, [x27], #0x2\n"
- "ldr h16, [x26], #0x2\n"
- "zip1 v17.8h, v19.8h, v17.8h\n"
- "zip1 v16.8h, v18.8h, v16.8h\n"
- "ldr h20, [x25], #0x2\n"
- "ldr h19, [x23], #0x2\n"
- "zip1 v18.8h, v17.8h, v16.8h\n"
- "ldr h17, [x22], #0x2\n"
+ "sub x19, x19, #0x1\n"
+ "ldr h17, [x26], #0x2\n"
+ "cmp x19, #0x1\n"
+ "ldr h16, [x25], #0x2\n"
+ "zip1 v18.8h, v18.8h, v16.8h\n"
+ "ldr h16, [x24], #0x2\n"
+ "ldr h20, [x23], #0x2\n"
+ "zip1 v16.8h, v17.8h, v16.8h\n"
+ "ldr h19, [x22], #0x2\n"
+ "ldr h17, [x21], #0x2\n"
+ "zip1 v18.8h, v18.8h, v16.8h\n"
"ldr h16, [x20], #0x2\n"
"zip1 v17.8h, v20.8h, v17.8h\n"
+ "str d18, [x27, #0x0]\n"
"zip1 v16.8h, v19.8h, v16.8h\n"
- "str d18, [x21, #0x0]\n"
"zip1 v16.8h, v17.8h, v16.8h\n"
- "str d16, [x21, #0x80]\n"
- "add x21, x21, #0x8\n"
+ "str d16, [x27, #0x80]\n"
+ "add x27, x27, #0x8\n"
"bge 8b\n"
"9:" // Main row loop: width 1 loop: skip
- "cmp %x[height], #0x8\n"
"add %x[out], %x[out], #0x100\n"
+ "cmp %x[height], #0x8\n"
"bge 1b\n"
"cbz %x[height], 20f\n"
"10:" // Main loop skip
"11:" // Tail row loop: Head
- "mov x9, %x[in]\n"
- "add x28, x9, %x[in_stride]\n"
- "add x27, x28, %x[in_stride]\n"
- "mov x20, %x[width]\n"
- "add x26, x27, %x[in_stride]\n"
+ "mov x28, %x[in]\n"
+ "mov x27, %x[out]\n"
+ "add x26, x28, %x[in_stride]\n"
+ "add x25, x26, %x[in_stride]\n"
+ "add x24, x25, %x[in_stride]\n"
+ "add %x[in], x24, %x[in_stride]\n"
"cmp %x[height], #0x3\n"
- "add %x[in], x26, %x[in_stride]\n"
- "csel x26, x26, %x[pad_row], GT\n"
- "csel x27, x27, %x[pad_row], GE\n"
+ "csel x24, x24, %x[pad_row], GT\n"
+ "csel x25, x25, %x[pad_row], GE\n"
"cmp %x[height], #0x1\n"
- "csel x28, x28, %x[pad_row], GT\n"
- "cmp x20, #0x20\n"
- "mov x21, %x[out]\n"
+ "csel x26, x26, %x[pad_row], GT\n"
"sub %x[height], %x[height], #0x4\n"
+ "mov x19, %x[width]\n"
+ "cmp x19, #0x20\n"
"blt 13f\n"
"12:" // Tail row loop: Unroll column loop
- "ldr q21, [x9], #0x10\n"
- "ldr q20, [x28], #0x10\n"
- "sub x20, x20, #0x20\n"
- "cmp x20, #0x20\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v4.8h, v21.8h, v17.8h\n"
- "zip1 v3.8h, v20.8h, v16.8h\n"
- "ldr q19, [x9], #0x10\n"
+ "ldr q17, [x28], #0x10\n"
+ "sub x19, x19, #0x20\n"
+ "ldr q19, [x26], #0x10\n"
+ "cmp x19, #0x20\n"
+ "ldr q16, [x25], #0x10\n"
+ "zip1 v20.8h, v17.8h, v16.8h\n"
"ldr q18, [x28], #0x10\n"
- "zip2 v2.8h, v21.8h, v17.8h\n"
- "zip2 v1.8h, v20.8h, v16.8h\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v0.8h, v19.8h, v17.8h\n"
- "zip1 v31.8h, v18.8h, v16.8h\n"
- "ldr q24, [x9], #0x10\n"
- "ldr q20, [x28], #0x10\n"
- "zip2 v30.8h, v19.8h, v17.8h\n"
- "zip2 v23.8h, v18.8h, v16.8h\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v22.8h, v24.8h, v17.8h\n"
- "zip1 v21.8h, v20.8h, v16.8h\n"
- "ldr q19, [x9], #0x10\n"
+ "zip2 v0.8h, v17.8h, v16.8h\n"
+ "ldr q31, [x26], #0x10\n"
+ "ldr q16, [x25], #0x10\n"
+ "zip1 v30.8h, v18.8h, v16.8h\n"
+ "ldr q17, [x28], #0x10\n"
+ "zip2 v29.8h, v18.8h, v16.8h\n"
+ "ldr q28, [x26], #0x10\n"
+ "ldr q16, [x25], #0x10\n"
+ "zip1 v27.8h, v17.8h, v16.8h\n"
"ldr q18, [x28], #0x10\n"
- "zip2 v29.8h, v24.8h, v17.8h\n"
- "zip2 v28.8h, v20.8h, v16.8h\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v27.8h, v19.8h, v17.8h\n"
- "zip1 v26.8h, v18.8h, v16.8h\n"
- "zip2 v25.8h, v19.8h, v17.8h\n"
- "zip2 v24.8h, v18.8h, v16.8h\n"
- "zip1 v16.8h, v4.8h, v3.8h\n"
- "zip2 v17.8h, v4.8h, v3.8h\n"
- "str q16, [x21, #0x0]\n"
- "zip1 v16.8h, v2.8h, v1.8h\n"
- "zip2 v20.8h, v2.8h, v1.8h\n"
- "str q17, [x21, #0x10]\n"
- "zip1 v19.8h, v0.8h, v31.8h\n"
- "zip2 v18.8h, v0.8h, v31.8h\n"
- "str q16, [x21, #0x20]\n"
- "zip1 v17.8h, v30.8h, v23.8h\n"
- "zip2 v16.8h, v30.8h, v23.8h\n"
- "str q20, [x21, #0x30]\n"
- "str q19, [x21, #0x40]\n"
- "zip1 v23.8h, v22.8h, v21.8h\n"
- "zip2 v22.8h, v22.8h, v21.8h\n"
- "str q18, [x21, #0x50]\n"
- "zip1 v21.8h, v29.8h, v28.8h\n"
- "zip2 v20.8h, v29.8h, v28.8h\n"
- "str q17, [x21, #0x60]\n"
- "zip1 v19.8h, v27.8h, v26.8h\n"
- "zip2 v18.8h, v27.8h, v26.8h\n"
- "str q16, [x21, #0x70]\n"
- "add x21, x21, %x[out_stride]\n"
- "zip1 v17.8h, v25.8h, v24.8h\n"
- "zip2 v16.8h, v25.8h, v24.8h\n"
- "str q23, [x21, #0x0]\n"
- "str q22, [x21, #0x10]\n"
- "str q21, [x21, #0x20]\n"
- "str q20, [x21, #0x30]\n"
- "str q19, [x21, #0x40]\n"
- "str q18, [x21, #0x50]\n"
- "str q17, [x21, #0x60]\n"
- "str q16, [x21, #0x70]\n"
- "add x21, x21, %x[out_stride]\n"
+ "zip2 v26.8h, v17.8h, v16.8h\n"
+ "ldr q25, [x26], #0x10\n"
+ "ldr q17, [x25], #0x10\n"
+ "zip1 v24.8h, v18.8h, v17.8h\n"
+ "ldr q16, [x24], #0x10\n"
+ "zip2 v23.8h, v18.8h, v17.8h\n"
+ "ldr q22, [x24], #0x10\n"
+ "zip1 v17.8h, v19.8h, v16.8h\n"
+ "zip2 v19.8h, v19.8h, v16.8h\n"
+ "ldr q21, [x24], #0x10\n"
+ "zip1 v16.8h, v20.8h, v17.8h\n"
+ "zip2 v17.8h, v20.8h, v17.8h\n"
+ "ldr q20, [x24], #0x10\n"
+ "zip1 v18.8h, v0.8h, v19.8h\n"
+ "zip2 v19.8h, v0.8h, v19.8h\n"
+ "str q16, [x27, #0x0]\n"
+ "zip1 v16.8h, v31.8h, v22.8h\n"
+ "str q17, [x27, #0x10]\n"
+ "zip1 v17.8h, v30.8h, v16.8h\n"
+ "str q18, [x27, #0x20]\n"
+ "zip2 v18.8h, v30.8h, v16.8h\n"
+ "str q19, [x27, #0x30]\n"
+ "zip2 v16.8h, v31.8h, v22.8h\n"
+ "str q17, [x27, #0x40]\n"
+ "zip1 v17.8h, v29.8h, v16.8h\n"
+ "str q18, [x27, #0x50]\n"
+ "zip2 v16.8h, v29.8h, v16.8h\n"
+ "str q17, [x27, #0x60]\n"
+ "zip1 v17.8h, v28.8h, v21.8h\n"
+ "str q16, [x27, #0x70]\n"
+ "add x27, x27, %x[out_stride]\n"
+ "zip1 v16.8h, v27.8h, v17.8h\n"
+ "str q16, [x27, #0x0]\n"
+ "zip2 v16.8h, v27.8h, v17.8h\n"
+ "zip2 v17.8h, v28.8h, v21.8h\n"
+ "str q16, [x27, #0x10]\n"
+ "zip1 v16.8h, v26.8h, v17.8h\n"
+ "str q16, [x27, #0x20]\n"
+ "zip2 v16.8h, v26.8h, v17.8h\n"
+ "str q16, [x27, #0x30]\n"
+ "zip1 v17.8h, v25.8h, v20.8h\n"
+ "zip1 v16.8h, v24.8h, v17.8h\n"
+ "str q16, [x27, #0x40]\n"
+ "zip2 v16.8h, v24.8h, v17.8h\n"
+ "str q16, [x27, #0x50]\n"
+ "zip2 v17.8h, v25.8h, v20.8h\n"
+ "zip1 v16.8h, v23.8h, v17.8h\n"
+ "str q16, [x27, #0x60]\n"
+ "zip2 v16.8h, v23.8h, v17.8h\n"
+ "str q16, [x27, #0x70]\n"
+ "add x27, x27, %x[out_stride]\n"
"bge 12b\n"
"13:" // Tail row loop: Unroll column loop skip
- "cmp x20, #0x10\n"
+ "cmp x19, #0x10\n"
"blt 15f\n"
"14:" // Tail row loop: Column loop
- "ldr q19, [x9], #0x10\n"
+ "ldr q17, [x28], #0x10\n"
+ "sub x19, x19, #0x10\n"
+ "ldr q25, [x26], #0x10\n"
+ "cmp x19, #0x10\n"
+ "ldr q16, [x25], #0x10\n"
+ "zip1 v24.8h, v17.8h, v16.8h\n"
"ldr q18, [x28], #0x10\n"
- "sub x20, x20, #0x10\n"
- "cmp x20, #0x10\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v25.8h, v19.8h, v17.8h\n"
- "zip1 v24.8h, v18.8h, v16.8h\n"
- "ldr q22, [x9], #0x10\n"
- "ldr q21, [x28], #0x10\n"
- "zip2 v20.8h, v19.8h, v17.8h\n"
- "zip2 v19.8h, v18.8h, v16.8h\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v23.8h, v22.8h, v17.8h\n"
- "zip1 v18.8h, v21.8h, v16.8h\n"
- "zip2 v22.8h, v22.8h, v17.8h\n"
- "zip2 v21.8h, v21.8h, v16.8h\n"
- "zip1 v16.8h, v25.8h, v24.8h\n"
- "zip2 v17.8h, v25.8h, v24.8h\n"
- "str q16, [x21, #0x0]\n"
- "zip1 v16.8h, v20.8h, v19.8h\n"
- "zip2 v20.8h, v20.8h, v19.8h\n"
- "str q17, [x21, #0x10]\n"
- "zip1 v19.8h, v23.8h, v18.8h\n"
- "zip2 v18.8h, v23.8h, v18.8h\n"
- "str q16, [x21, #0x20]\n"
- "zip1 v17.8h, v22.8h, v21.8h\n"
- "zip2 v16.8h, v22.8h, v21.8h\n"
- "str q20, [x21, #0x30]\n"
- "str q19, [x21, #0x40]\n"
- "str q18, [x21, #0x50]\n"
- "str q17, [x21, #0x60]\n"
- "str q16, [x21, #0x70]\n"
- "add x21, x21, %x[out_stride]\n"
+ "zip2 v23.8h, v17.8h, v16.8h\n"
+ "ldr q22, [x26], #0x10\n"
+ "ldr q17, [x25], #0x10\n"
+ "zip1 v21.8h, v18.8h, v17.8h\n"
+ "ldr q16, [x24], #0x10\n"
+ "zip2 v20.8h, v18.8h, v17.8h\n"
+ "ldr q19, [x24], #0x10\n"
+ "zip1 v18.8h, v25.8h, v16.8h\n"
+ "zip2 v17.8h, v25.8h, v16.8h\n"
+ "zip1 v16.8h, v24.8h, v18.8h\n"
+ "str q16, [x27, #0x0]\n"
+ "zip2 v16.8h, v24.8h, v18.8h\n"
+ "str q16, [x27, #0x10]\n"
+ "zip1 v16.8h, v23.8h, v17.8h\n"
+ "str q16, [x27, #0x20]\n"
+ "zip2 v16.8h, v23.8h, v17.8h\n"
+ "str q16, [x27, #0x30]\n"
+ "zip1 v17.8h, v22.8h, v19.8h\n"
+ "zip1 v16.8h, v21.8h, v17.8h\n"
+ "str q16, [x27, #0x40]\n"
+ "zip2 v16.8h, v21.8h, v17.8h\n"
+ "str q16, [x27, #0x50]\n"
+ "zip2 v17.8h, v22.8h, v19.8h\n"
+ "zip1 v16.8h, v20.8h, v17.8h\n"
+ "str q16, [x27, #0x60]\n"
+ "zip2 v16.8h, v20.8h, v17.8h\n"
+ "str q16, [x27, #0x70]\n"
+ "add x27, x27, %x[out_stride]\n"
"bge 14b\n"
"15:" // Tail row loop: Column loop skip
- "cmp x20, #0x4\n"
+ "cmp x19, #0x4\n"
"blt 17f\n"
"16:" // Tail row loop: width 4 loop: loop
- "ldr d18, [x9], #0x8\n"
- "ldr d19, [x28], #0x8\n"
- "sub x20, x20, #0x4\n"
- "cmp x20, #0x4\n"
- "ldr d17, [x27], #0x8\n"
- "ldr d16, [x26], #0x8\n"
- "zip1 v18.8h, v18.8h, v17.8h\n"
- "zip1 v16.8h, v19.8h, v16.8h\n"
- "zip1 v17.8h, v18.8h, v16.8h\n"
- "zip2 v16.8h, v18.8h, v16.8h\n"
- "str q17, [x21, #0x0]\n"
- "str q16, [x21, #0x10]\n"
- "add x21, x21, #0x20\n"
+ "ldr d18, [x28], #0x8\n"
+ "sub x19, x19, #0x4\n"
+ "ldr d17, [x26], #0x8\n"
+ "cmp x19, #0x4\n"
+ "ldr d16, [x25], #0x8\n"
+ "zip1 v18.8h, v18.8h, v16.8h\n"
+ "ldr d16, [x24], #0x8\n"
+ "zip1 v17.8h, v17.8h, v16.8h\n"
+ "zip1 v16.8h, v18.8h, v17.8h\n"
+ "str q16, [x27, #0x0]\n"
+ "zip2 v16.8h, v18.8h, v17.8h\n"
+ "str q16, [x27, #0x10]\n"
+ "add x27, x27, #0x20\n"
"bge 16b\n"
"17:" // Tail row loop: width 4 loop: skip
- "cmp x20, #0x1\n"
+ "cmp x19, #0x1\n"
"blt 19f\n"
"18:" // Tail row loop: width 1 loop: loop
- "ldr h19, [x9], #0x2\n"
- "ldr h18, [x28], #0x2\n"
- "sub x20, x20, #0x1\n"
- "cmp x20, #0x1\n"
- "ldr h17, [x27], #0x2\n"
- "ldr h16, [x26], #0x2\n"
- "zip1 v17.8h, v19.8h, v17.8h\n"
+ "ldr h17, [x28], #0x2\n"
+ "sub x19, x19, #0x1\n"
+ "ldr h18, [x26], #0x2\n"
+ "cmp x19, #0x1\n"
+ "ldr h16, [x25], #0x2\n"
+ "zip1 v17.8h, v17.8h, v16.8h\n"
+ "ldr h16, [x24], #0x2\n"
"zip1 v16.8h, v18.8h, v16.8h\n"
"zip1 v16.8h, v17.8h, v16.8h\n"
- "str d16, [x21, #0x0]\n"
- "add x21, x21, #0x8\n"
+ "str d16, [x27, #0x0]\n"
+ "add x27, x27, #0x8\n"
"bge 18b\n"
"19:" // Tail row loop: width 1 loop: skip
- "cmp %x[height], #0x1\n"
"add %x[out], %x[out], #0x80\n"
+ "cmp %x[height], #0x1\n"
"bge 11b\n"
"20:" // Done
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [pad_row] "r" (pad_row), [width] "r" (width)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_16_2x4_fp32bf16.hpp b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_16_2x4_fp32bf16.hpp
index e012d0920f..2ecf03c4c1 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_16_2x4_fp32bf16.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_16_2x4_fp32bf16.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#pragma once
@@ -42,391 +42,391 @@ void a64_transpose_interleave_16_2x4_fp32bf16(bfloat16 *out, const float *in, si
"cmp %x[height], #0x8\n"
"blt 8f\n"
"1:" // Main row loop: Head
- "mov x9, %x[in]\n"
- "add x28, x9, %x[in_stride]\n"
- "add x27, x28, %x[in_stride]\n"
- "add x26, x27, %x[in_stride]\n"
+ "mov x28, %x[in]\n"
+ "mov x27, %x[out]\n"
+ "add x26, x28, %x[in_stride]\n"
"add x25, x26, %x[in_stride]\n"
- "mov x24, %x[width]\n"
- "add x23, x25, %x[in_stride]\n"
+ "add x24, x25, %x[in_stride]\n"
+ "add x23, x24, %x[in_stride]\n"
"add x22, x23, %x[in_stride]\n"
- "add x20, x22, %x[in_stride]\n"
- "cmp x24, #0x10\n"
+ "add x21, x22, %x[in_stride]\n"
+ "add x20, x21, %x[in_stride]\n"
"add %x[in], x20, %x[in_stride]\n"
- "mov x21, %x[out]\n"
"sub %x[height], %x[height], #0x8\n"
+ "mov x19, %x[width]\n"
+ "cmp x19, #0x10\n"
"blt 3f\n"
"2:" // Main row loop: Column loop
- "ldr q13, [x9], #0x10\n"
- "ldr q12, [x28], #0x10\n"
- "sub x24, x24, #0x10\n"
- "cmp x24, #0x10\n"
- "ldr q1, [x27], #0x10\n"
- "ldr q9, [x26], #0x10\n"
- "zip1 v19.4s, v13.4s, v1.4s\n"
- "zip1 v14.4s, v12.4s, v9.4s\n"
- "ldr q15, [x25], #0x10\n"
- "ldr q4, [x23], #0x10\n"
- "zip2 v8.4s, v13.4s, v1.4s\n"
- "zip2 v28.4s, v12.4s, v9.4s\n"
- "ldr q0, [x22], #0x10\n"
- "ldr q1, [x20], #0x10\n"
- "zip1 v16.4s, v15.4s, v0.4s\n"
- "zip1 v5.4s, v4.4s, v1.4s\n"
- "ldr q25, [x9], #0x10\n"
- "ldr q24, [x28], #0x10\n"
- "zip2 v3.4s, v15.4s, v0.4s\n"
- "zip2 v2.4s, v4.4s, v1.4s\n"
- "ldr q21, [x27], #0x10\n"
- "ldr q30, [x26], #0x10\n"
- "zip1 v18.4s, v25.4s, v21.4s\n"
- "zip1 v27.4s, v24.4s, v30.4s\n"
- "ldr q22, [x25], #0x10\n"
- "ldr q20, [x23], #0x10\n"
- "zip2 v9.4s, v25.4s, v21.4s\n"
- "zip2 v10.4s, v24.4s, v30.4s\n"
- "ldr q1, [x22], #0x10\n"
- "ldr q21, [x20], #0x10\n"
- "zip1 v25.4s, v22.4s, v1.4s\n"
- "zip1 v7.4s, v20.4s, v21.4s\n"
- "ldr q31, [x9], #0x10\n"
"ldr q17, [x28], #0x10\n"
- "zip2 v30.4s, v22.4s, v1.4s\n"
- "zip2 v20.4s, v20.4s, v21.4s\n"
- "ldr q15, [x27], #0x10\n"
- "ldr q24, [x26], #0x10\n"
- "zip1 v6.4s, v31.4s, v15.4s\n"
- "zip1 v4.4s, v17.4s, v24.4s\n"
- "ldr q12, [x25], #0x10\n"
- "ldr q29, [x23], #0x10\n"
- "zip2 v22.4s, v31.4s, v15.4s\n"
- "zip2 v26.4s, v17.4s, v24.4s\n"
- "ldr q0, [x22], #0x10\n"
+ "sub x19, x19, #0x10\n"
+ "ldr q19, [x26], #0x10\n"
+ "cmp x19, #0x10\n"
+ "ldr q16, [x25], #0x10\n"
+ "zip1 v24.4s, v17.4s, v16.4s\n"
+ "ldr q18, [x28], #0x10\n"
+ "zip2 v23.4s, v17.4s, v16.4s\n"
+ "ldr q22, [x26], #0x10\n"
+ "ldr q16, [x25], #0x10\n"
+ "zip1 v21.4s, v18.4s, v16.4s\n"
+ "ldr q17, [x28], #0x10\n"
+ "zip2 v14.4s, v18.4s, v16.4s\n"
+ "ldr q13, [x26], #0x10\n"
+ "ldr q16, [x25], #0x10\n"
+ "zip1 v12.4s, v17.4s, v16.4s\n"
+ "ldr q18, [x28], #0x10\n"
+ "zip2 v11.4s, v17.4s, v16.4s\n"
+ "ldr q10, [x26], #0x10\n"
+ "ldr q17, [x25], #0x10\n"
+ "zip1 v9.4s, v18.4s, v17.4s\n"
+ "ldr q16, [x24], #0x10\n"
+ "zip2 v8.4s, v18.4s, v17.4s\n"
+ "ldr q7, [x23], #0x10\n"
+ "ldr q6, [x22], #0x10\n"
+ "zip1 v17.4s, v19.4s, v16.4s\n"
+ "ldr q20, [x24], #0x10\n"
+ "zip2 v19.4s, v19.4s, v16.4s\n"
+ "ldr q5, [x23], #0x10\n"
+ "zip1 v16.4s, v24.4s, v17.4s\n"
+ "ldr q4, [x22], #0x10\n"
+ ".inst 0x0ea16a03 // bfcvtn v3.4h, v16.4s\n"
+ "ldr q2, [x21], #0x10\n"
+ "zip2 v17.4s, v24.4s, v17.4s\n"
+ "ldr q1, [x24], #0x10\n"
+ "zip1 v16.4s, v23.4s, v19.4s\n"
+ "ldr q0, [x23], #0x10\n"
+ ".inst 0x4ea16a23 // bfcvtn2 v3.8h, v17.4s\n"
+ "ldr q31, [x22], #0x10\n"
+ ".inst 0x0ea16a12 // bfcvtn v18.4h, v16.4s\n"
+ "ldr q30, [x21], #0x10\n"
+ "zip2 v16.4s, v23.4s, v19.4s\n"
+ "ldr q29, [x24], #0x10\n"
+ "zip1 v17.4s, v22.4s, v20.4s\n"
+ "ldr q28, [x23], #0x10\n"
+ ".inst 0x4ea16a12 // bfcvtn2 v18.8h, v16.4s\n"
+ "ldr q27, [x22], #0x10\n"
+ "zip1 v16.4s, v21.4s, v17.4s\n"
+ "ldr q26, [x21], #0x10\n"
+ ".inst 0x0ea16a19 // bfcvtn v25.4h, v16.4s\n"
"ldr q24, [x20], #0x10\n"
- "zip1 v17.4s, v12.4s, v0.4s\n"
- "zip1 v31.4s, v29.4s, v24.4s\n"
- "ldr q21, [x9], #0x10\n"
- "ldr q1, [x28], #0x10\n"
- "zip2 v23.4s, v12.4s, v0.4s\n"
- "zip2 v24.4s, v29.4s, v24.4s\n"
- "ldr q11, [x27], #0x10\n"
- "ldr q29, [x26], #0x10\n"
- "zip1 v0.4s, v21.4s, v11.4s\n"
- "zip1 v13.4s, v1.4s, v29.4s\n"
- "ldr q15, [x25], #0x10\n"
- "ldr q12, [x23], #0x10\n"
- "zip2 v21.4s, v21.4s, v11.4s\n"
- "zip2 v29.4s, v1.4s, v29.4s\n"
- "ldr q1, [x22], #0x10\n"
- "zip1 v11.4s, v15.4s, v1.4s\n"
- "zip2 v1.4s, v15.4s, v1.4s\n"
- "zip1 v15.4s, v19.4s, v14.4s\n"
- ".inst 0x0ea169ef // bfcvtn v15.4h, v15.4s\n"
- "zip2 v14.4s, v19.4s, v14.4s\n"
- "ldr q19, [x20], #0x10\n"
- ".inst 0x4ea169cf // bfcvtn2 v15.8h, v14.4s\n"
- "str q15, [x21, #0x0]\n"
- "zip1 v14.4s, v12.4s, v19.4s\n"
- "zip2 v15.4s, v12.4s, v19.4s\n"
- "zip1 v12.4s, v8.4s, v28.4s\n"
- "zip1 v19.4s, v18.4s, v27.4s\n"
- ".inst 0x0ea1698c // bfcvtn v12.4h, v12.4s\n"
- "zip2 v28.4s, v8.4s, v28.4s\n"
- "zip1 v8.4s, v9.4s, v10.4s\n"
- ".inst 0x0ea16a73 // bfcvtn v19.4h, v19.4s\n"
- "zip2 v18.4s, v18.4s, v27.4s\n"
- "zip1 v27.4s, v6.4s, v4.4s\n"
- ".inst 0x0ea16908 // bfcvtn v8.4h, v8.4s\n"
- "zip2 v10.4s, v9.4s, v10.4s\n"
- "zip1 v9.4s, v22.4s, v26.4s\n"
- ".inst 0x0ea16b7b // bfcvtn v27.4h, v27.4s\n"
- "zip2 v6.4s, v6.4s, v4.4s\n"
- "zip1 v4.4s, v0.4s, v13.4s\n"
- ".inst 0x0ea16929 // bfcvtn v9.4h, v9.4s\n"
- "zip2 v22.4s, v22.4s, v26.4s\n"
- "zip1 v26.4s, v21.4s, v29.4s\n"
- ".inst 0x0ea16884 // bfcvtn v4.4h, v4.4s\n"
- "zip2 v13.4s, v0.4s, v13.4s\n"
- "zip1 v0.4s, v16.4s, v5.4s\n"
- ".inst 0x0ea16b5a // bfcvtn v26.4h, v26.4s\n"
- "zip2 v21.4s, v21.4s, v29.4s\n"
- "zip1 v29.4s, v3.4s, v2.4s\n"
- ".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n"
- "zip2 v5.4s, v16.4s, v5.4s\n"
- "zip1 v16.4s, v25.4s, v7.4s\n"
- ".inst 0x0ea16bbd // bfcvtn v29.4h, v29.4s\n"
- "zip2 v2.4s, v3.4s, v2.4s\n"
- "zip1 v3.4s, v30.4s, v20.4s\n"
- ".inst 0x0ea16a10 // bfcvtn v16.4h, v16.4s\n"
- "zip2 v7.4s, v25.4s, v7.4s\n"
- "zip1 v25.4s, v17.4s, v31.4s\n"
- ".inst 0x0ea16863 // bfcvtn v3.4h, v3.4s\n"
- "zip2 v30.4s, v30.4s, v20.4s\n"
- "zip1 v20.4s, v23.4s, v24.4s\n"
- ".inst 0x0ea16b39 // bfcvtn v25.4h, v25.4s\n"
- "zip2 v17.4s, v17.4s, v31.4s\n"
- "zip1 v31.4s, v11.4s, v14.4s\n"
- ".inst 0x0ea16a94 // bfcvtn v20.4h, v20.4s\n"
- "zip2 v24.4s, v23.4s, v24.4s\n"
- "zip1 v23.4s, v1.4s, v15.4s\n"
- ".inst 0x0ea16bff // bfcvtn v31.4h, v31.4s\n"
- "zip2 v14.4s, v11.4s, v14.4s\n"
- ".inst 0x0ea16af7 // bfcvtn v23.4h, v23.4s\n"
- "zip2 v1.4s, v1.4s, v15.4s\n"
- ".inst 0x4ea16b8c // bfcvtn2 v12.8h, v28.4s\n"
- "str q12, [x21, #0x10]\n"
+ "zip2 v16.4s, v21.4s, v17.4s\n"
+ "ldr q23, [x21], #0x10\n"
+ ".inst 0x4ea16a19 // bfcvtn2 v25.8h, v16.4s\n"
+ "zip2 v17.4s, v22.4s, v20.4s\n"
+ "ldr q22, [x20], #0x10\n"
+ "zip1 v16.4s, v14.4s, v17.4s\n"
+ "ldr q21, [x20], #0x10\n"
+ ".inst 0x0ea16a13 // bfcvtn v19.4h, v16.4s\n"
+ "zip2 v16.4s, v14.4s, v17.4s\n"
+ "ldr q20, [x20], #0x10\n"
+ ".inst 0x4ea16a13 // bfcvtn2 v19.8h, v16.4s\n"
+ "zip1 v17.4s, v13.4s, v1.4s\n"
+ "str q3, [x27, #0x0]\n"
+ "zip1 v16.4s, v12.4s, v17.4s\n"
+ "str q18, [x27, #0x10]\n"
+ ".inst 0x0ea16a12 // bfcvtn v18.4h, v16.4s\n"
+ "str q25, [x27, #0x20]\n"
+ "zip2 v16.4s, v12.4s, v17.4s\n"
+ "str q19, [x27, #0x30]\n"
+ "zip2 v17.4s, v13.4s, v1.4s\n"
+ ".inst 0x4ea16a12 // bfcvtn2 v18.8h, v16.4s\n"
+ "str q18, [x27, #0x40]\n"
+ "zip1 v16.4s, v11.4s, v17.4s\n"
+ "zip2 v19.4s, v11.4s, v17.4s\n"
+ ".inst 0x0ea16a12 // bfcvtn v18.4h, v16.4s\n"
+ "zip1 v17.4s, v10.4s, v29.4s\n"
+ "zip1 v16.4s, v9.4s, v17.4s\n"
+ ".inst 0x4ea16a72 // bfcvtn2 v18.8h, v19.4s\n"
+ "str q18, [x27, #0x50]\n"
+ ".inst 0x0ea16a13 // bfcvtn v19.4h, v16.4s\n"
+ "zip2 v18.4s, v9.4s, v17.4s\n"
+ "zip2 v17.4s, v10.4s, v29.4s\n"
+ "zip1 v16.4s, v8.4s, v17.4s\n"
".inst 0x4ea16a53 // bfcvtn2 v19.8h, v18.4s\n"
- ".inst 0x4ea16948 // bfcvtn2 v8.8h, v10.4s\n"
- "str q19, [x21, #0x20]\n"
- ".inst 0x4ea168db // bfcvtn2 v27.8h, v6.4s\n"
- ".inst 0x4ea16ac9 // bfcvtn2 v9.8h, v22.4s\n"
- "str q8, [x21, #0x30]\n"
- ".inst 0x4ea169a4 // bfcvtn2 v4.8h, v13.4s\n"
- ".inst 0x4ea16aba // bfcvtn2 v26.8h, v21.4s\n"
- "str q27, [x21, #0x40]\n"
- ".inst 0x4ea168a0 // bfcvtn2 v0.8h, v5.4s\n"
- ".inst 0x4ea1685d // bfcvtn2 v29.8h, v2.4s\n"
- "str q9, [x21, #0x50]\n"
- ".inst 0x4ea168f0 // bfcvtn2 v16.8h, v7.4s\n"
- ".inst 0x4ea16bc3 // bfcvtn2 v3.8h, v30.4s\n"
- "str q4, [x21, #0x60]\n"
- ".inst 0x4ea16a39 // bfcvtn2 v25.8h, v17.4s\n"
- ".inst 0x4ea16b14 // bfcvtn2 v20.8h, v24.4s\n"
- "str q26, [x21, #0x70]\n"
- ".inst 0x4ea169df // bfcvtn2 v31.8h, v14.4s\n"
- ".inst 0x4ea16837 // bfcvtn2 v23.8h, v1.4s\n"
- "str q0, [x21, #0x80]\n"
- "str q29, [x21, #0x90]\n"
- "str q16, [x21, #0xa0]\n"
- "str q3, [x21, #0xb0]\n"
- "str q25, [x21, #0xc0]\n"
- "str q20, [x21, #0xd0]\n"
- "str q31, [x21, #0xe0]\n"
- "str q23, [x21, #0xf0]\n"
- "add x21, x21, %x[out_stride]\n"
+ "str q19, [x27, #0x60]\n"
+ ".inst 0x0ea16a13 // bfcvtn v19.4h, v16.4s\n"
+ "zip2 v16.4s, v8.4s, v17.4s\n"
+ "zip1 v18.4s, v7.4s, v2.4s\n"
+ "zip1 v17.4s, v6.4s, v24.4s\n"
+ ".inst 0x4ea16a13 // bfcvtn2 v19.8h, v16.4s\n"
+ "str q19, [x27, #0x70]\n"
+ "zip1 v16.4s, v18.4s, v17.4s\n"
+ "zip2 v19.4s, v18.4s, v17.4s\n"
+ ".inst 0x0ea16a10 // bfcvtn v16.4h, v16.4s\n"
+ "zip2 v18.4s, v7.4s, v2.4s\n"
+ "zip2 v17.4s, v6.4s, v24.4s\n"
+ ".inst 0x4ea16a70 // bfcvtn2 v16.8h, v19.4s\n"
+ "str q16, [x27, #0x80]\n"
+ "zip1 v16.4s, v18.4s, v17.4s\n"
+ "zip2 v19.4s, v18.4s, v17.4s\n"
+ ".inst 0x0ea16a10 // bfcvtn v16.4h, v16.4s\n"
+ "zip1 v18.4s, v5.4s, v30.4s\n"
+ "zip1 v17.4s, v4.4s, v22.4s\n"
+ ".inst 0x4ea16a70 // bfcvtn2 v16.8h, v19.4s\n"
+ "str q16, [x27, #0x90]\n"
+ "zip1 v16.4s, v18.4s, v17.4s\n"
+ "zip2 v19.4s, v18.4s, v17.4s\n"
+ ".inst 0x0ea16a10 // bfcvtn v16.4h, v16.4s\n"
+ "zip2 v18.4s, v5.4s, v30.4s\n"
+ "zip2 v17.4s, v4.4s, v22.4s\n"
+ ".inst 0x4ea16a70 // bfcvtn2 v16.8h, v19.4s\n"
+ "str q16, [x27, #0xa0]\n"
+ "zip1 v16.4s, v18.4s, v17.4s\n"
+ "zip2 v19.4s, v18.4s, v17.4s\n"
+ ".inst 0x0ea16a10 // bfcvtn v16.4h, v16.4s\n"
+ "zip1 v18.4s, v0.4s, v26.4s\n"
+ "zip1 v17.4s, v31.4s, v21.4s\n"
+ ".inst 0x4ea16a70 // bfcvtn2 v16.8h, v19.4s\n"
+ "str q16, [x27, #0xb0]\n"
+ "zip1 v16.4s, v18.4s, v17.4s\n"
+ "zip2 v19.4s, v18.4s, v17.4s\n"
+ ".inst 0x0ea16a10 // bfcvtn v16.4h, v16.4s\n"
+ "zip2 v18.4s, v0.4s, v26.4s\n"
+ "zip2 v17.4s, v31.4s, v21.4s\n"
+ ".inst 0x4ea16a70 // bfcvtn2 v16.8h, v19.4s\n"
+ "str q16, [x27, #0xc0]\n"
+ "zip1 v16.4s, v18.4s, v17.4s\n"
+ "zip2 v19.4s, v18.4s, v17.4s\n"
+ ".inst 0x0ea16a10 // bfcvtn v16.4h, v16.4s\n"
+ "zip1 v18.4s, v28.4s, v23.4s\n"
+ "zip1 v17.4s, v27.4s, v20.4s\n"
+ ".inst 0x4ea16a70 // bfcvtn2 v16.8h, v19.4s\n"
+ "str q16, [x27, #0xd0]\n"
+ "zip1 v16.4s, v18.4s, v17.4s\n"
+ "zip2 v19.4s, v18.4s, v17.4s\n"
+ ".inst 0x0ea16a10 // bfcvtn v16.4h, v16.4s\n"
+ "zip2 v18.4s, v28.4s, v23.4s\n"
+ "zip2 v17.4s, v27.4s, v20.4s\n"
+ ".inst 0x4ea16a70 // bfcvtn2 v16.8h, v19.4s\n"
+ "str q16, [x27, #0xe0]\n"
+ "zip1 v16.4s, v18.4s, v17.4s\n"
+ "zip2 v17.4s, v18.4s, v17.4s\n"
+ ".inst 0x0ea16a10 // bfcvtn v16.4h, v16.4s\n"
+ ".inst 0x4ea16a30 // bfcvtn2 v16.8h, v17.4s\n"
+ "str q16, [x27, #0xf0]\n"
+ "add x27, x27, %x[out_stride]\n"
"bge 2b\n"
"3:" // Main row loop: Column loop skip
- "cmp x24, #0x4\n"
+ "cmp x19, #0x4\n"
"blt 5f\n"
"4:" // Main row loop: width 4 loop: loop
- "ldr q23, [x9], #0x10\n"
"ldr q20, [x28], #0x10\n"
- "sub x24, x24, #0x4\n"
- "cmp x24, #0x4\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v22.4s, v23.4s, v17.4s\n"
- "zip1 v21.4s, v20.4s, v16.4s\n"
- "ldr q19, [x25], #0x10\n"
- "ldr q18, [x23], #0x10\n"
- "zip2 v28.4s, v23.4s, v17.4s\n"
- "zip2 v20.4s, v20.4s, v16.4s\n"
- "ldr q17, [x22], #0x10\n"
- "ldr q16, [x20], #0x10\n"
- "zip1 v27.4s, v19.4s, v17.4s\n"
- "zip1 v26.4s, v18.4s, v16.4s\n"
- "zip2 v25.4s, v19.4s, v17.4s\n"
- "zip2 v24.4s, v18.4s, v16.4s\n"
- "zip1 v19.4s, v22.4s, v21.4s\n"
- "zip1 v18.4s, v28.4s, v20.4s\n"
- "zip1 v17.4s, v27.4s, v26.4s\n"
- "zip1 v16.4s, v25.4s, v24.4s\n"
- ".inst 0x0ea16a77 // bfcvtn v23.4h, v19.4s\n"
- "zip2 v22.4s, v22.4s, v21.4s\n"
- ".inst 0x0ea16a55 // bfcvtn v21.4h, v18.4s\n"
- "zip2 v20.4s, v28.4s, v20.4s\n"
- ".inst 0x0ea16a33 // bfcvtn v19.4h, v17.4s\n"
- "zip2 v18.4s, v27.4s, v26.4s\n"
- ".inst 0x0ea16a11 // bfcvtn v17.4h, v16.4s\n"
- "zip2 v16.4s, v25.4s, v24.4s\n"
- ".inst 0x4ea16ad7 // bfcvtn2 v23.8h, v22.4s\n"
- ".inst 0x4ea16a95 // bfcvtn2 v21.8h, v20.4s\n"
- "str q23, [x21, #0x0]\n"
- ".inst 0x4ea16a53 // bfcvtn2 v19.8h, v18.4s\n"
- ".inst 0x4ea16a11 // bfcvtn2 v17.8h, v16.4s\n"
- "str q21, [x21, #0x10]\n"
- "str q19, [x21, #0x80]\n"
- "str q17, [x21, #0x90]\n"
- "add x21, x21, #0x20\n"
+ "sub x19, x19, #0x4\n"
+ "ldr q18, [x26], #0x10\n"
+ "cmp x19, #0x4\n"
+ "ldr q17, [x25], #0x10\n"
+ "zip1 v19.4s, v20.4s, v17.4s\n"
+ "ldr q16, [x24], #0x10\n"
+ "zip2 v25.4s, v20.4s, v17.4s\n"
+ "ldr q24, [x23], #0x10\n"
+ "ldr q23, [x22], #0x10\n"
+ "zip1 v17.4s, v18.4s, v16.4s\n"
+ "ldr q22, [x21], #0x10\n"
+ "zip2 v21.4s, v18.4s, v16.4s\n"
+ "ldr q20, [x20], #0x10\n"
+ "zip1 v16.4s, v19.4s, v17.4s\n"
+ ".inst 0x0ea16a12 // bfcvtn v18.4h, v16.4s\n"
+ "zip2 v17.4s, v19.4s, v17.4s\n"
+ "zip1 v16.4s, v25.4s, v21.4s\n"
+ ".inst 0x4ea16a32 // bfcvtn2 v18.8h, v17.4s\n"
+ "str q18, [x27, #0x0]\n"
+ ".inst 0x0ea16a13 // bfcvtn v19.4h, v16.4s\n"
+ "zip2 v16.4s, v25.4s, v21.4s\n"
+ "zip1 v18.4s, v24.4s, v22.4s\n"
+ "zip1 v17.4s, v23.4s, v20.4s\n"
+ ".inst 0x4ea16a13 // bfcvtn2 v19.8h, v16.4s\n"
+ "str q19, [x27, #0x10]\n"
+ "zip1 v16.4s, v18.4s, v17.4s\n"
+ "zip2 v19.4s, v18.4s, v17.4s\n"
+ ".inst 0x0ea16a10 // bfcvtn v16.4h, v16.4s\n"
+ "zip2 v18.4s, v24.4s, v22.4s\n"
+ "zip2 v17.4s, v23.4s, v20.4s\n"
+ ".inst 0x4ea16a70 // bfcvtn2 v16.8h, v19.4s\n"
+ "str q16, [x27, #0x80]\n"
+ "zip1 v16.4s, v18.4s, v17.4s\n"
+ "zip2 v17.4s, v18.4s, v17.4s\n"
+ ".inst 0x0ea16a10 // bfcvtn v16.4h, v16.4s\n"
+ ".inst 0x4ea16a30 // bfcvtn2 v16.8h, v17.4s\n"
+ "str q16, [x27, #0x90]\n"
+ "add x27, x27, #0x20\n"
"bge 4b\n"
"5:" // Main row loop: width 4 loop: skip
- "cmp x24, #0x1\n"
+ "cmp x19, #0x1\n"
"blt 7f\n"
"6:" // Main row loop: width 1 loop: loop
- "ldr s19, [x9], #0x4\n"
"ldr s18, [x28], #0x4\n"
- "sub x24, x24, #0x1\n"
- "cmp x24, #0x1\n"
- "ldr s17, [x27], #0x4\n"
- "ldr s16, [x26], #0x4\n"
- "zip1 v17.4s, v19.4s, v17.4s\n"
- "zip1 v16.4s, v18.4s, v16.4s\n"
- "ldr s20, [x25], #0x4\n"
- "ldr s19, [x23], #0x4\n"
+ "sub x19, x19, #0x1\n"
+ "ldr s17, [x26], #0x4\n"
+ "cmp x19, #0x1\n"
+ "ldr s16, [x25], #0x4\n"
+ "zip1 v18.4s, v18.4s, v16.4s\n"
+ "ldr s16, [x24], #0x4\n"
+ "ldr s20, [x23], #0x4\n"
"zip1 v16.4s, v17.4s, v16.4s\n"
- ".inst 0x0ea16a12 // bfcvtn v18.4h, v16.4s\n"
- "ldr s17, [x22], #0x4\n"
- "ldr s16, [x20], #0x4\n"
+ "ldr s19, [x22], #0x4\n"
+ "ldr s17, [x21], #0x4\n"
+ "zip1 v16.4s, v18.4s, v16.4s\n"
+ "ldr s18, [x20], #0x4\n"
+ ".inst 0x0ea16a10 // bfcvtn v16.4h, v16.4s\n"
"zip1 v17.4s, v20.4s, v17.4s\n"
- "zip1 v16.4s, v19.4s, v16.4s\n"
+ "str d16, [x27, #0x0]\n"
+ "zip1 v16.4s, v19.4s, v18.4s\n"
"zip1 v16.4s, v17.4s, v16.4s\n"
".inst 0x0ea16a10 // bfcvtn v16.4h, v16.4s\n"
- "str d18, [x21, #0x0]\n"
- "str d16, [x21, #0x80]\n"
- "add x21, x21, #0x8\n"
+ "str d16, [x27, #0x80]\n"
+ "add x27, x27, #0x8\n"
"bge 6b\n"
"7:" // Main row loop: width 1 loop: skip
- "cmp %x[height], #0x8\n"
"add %x[out], %x[out], #0x100\n"
+ "cmp %x[height], #0x8\n"
"bge 1b\n"
"cbz %x[height], 16f\n"
"8:" // Main loop skip
"9:" // Tail row loop: Head
- "mov x9, %x[in]\n"
- "add x28, x9, %x[in_stride]\n"
- "add x27, x28, %x[in_stride]\n"
- "mov x20, %x[width]\n"
- "add x26, x27, %x[in_stride]\n"
+ "mov x28, %x[in]\n"
+ "mov x27, %x[out]\n"
+ "add x26, x28, %x[in_stride]\n"
+ "add x25, x26, %x[in_stride]\n"
+ "add x24, x25, %x[in_stride]\n"
+ "add %x[in], x24, %x[in_stride]\n"
"cmp %x[height], #0x3\n"
- "add %x[in], x26, %x[in_stride]\n"
- "csel x26, x26, %x[pad_row], GT\n"
- "csel x27, x27, %x[pad_row], GE\n"
+ "csel x24, x24, %x[pad_row], GT\n"
+ "csel x25, x25, %x[pad_row], GE\n"
"cmp %x[height], #0x1\n"
- "csel x28, x28, %x[pad_row], GT\n"
- "cmp x20, #0x10\n"
- "mov x21, %x[out]\n"
+ "csel x26, x26, %x[pad_row], GT\n"
"sub %x[height], %x[height], #0x4\n"
+ "mov x19, %x[width]\n"
+ "cmp x19, #0x10\n"
"blt 11f\n"
"10:" // Tail row loop: Column loop
- "ldr q21, [x9], #0x10\n"
- "ldr q20, [x28], #0x10\n"
- "sub x20, x20, #0x10\n"
- "cmp x20, #0x10\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v30.4s, v21.4s, v17.4s\n"
- "zip1 v29.4s, v20.4s, v16.4s\n"
- "ldr q19, [x9], #0x10\n"
+ "ldr q17, [x28], #0x10\n"
+ "sub x19, x19, #0x10\n"
+ "ldr q20, [x26], #0x10\n"
+ "cmp x19, #0x10\n"
+ "ldr q16, [x25], #0x10\n"
+ "zip1 v19.4s, v17.4s, v16.4s\n"
"ldr q18, [x28], #0x10\n"
- "zip2 v28.4s, v21.4s, v17.4s\n"
- "zip2 v27.4s, v20.4s, v16.4s\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v26.4s, v19.4s, v17.4s\n"
- "zip1 v25.4s, v18.4s, v16.4s\n"
- "ldr q21, [x9], #0x10\n"
- "ldr q20, [x28], #0x10\n"
- "zip2 v8.4s, v19.4s, v17.4s\n"
- "zip2 v24.4s, v18.4s, v16.4s\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v7.4s, v21.4s, v17.4s\n"
- "zip1 v6.4s, v20.4s, v16.4s\n"
- "ldr q19, [x9], #0x10\n"
+ "zip2 v1.4s, v17.4s, v16.4s\n"
+ "ldr q0, [x26], #0x10\n"
+ "ldr q16, [x25], #0x10\n"
+ "zip1 v31.4s, v18.4s, v16.4s\n"
+ "ldr q17, [x28], #0x10\n"
+ "zip2 v30.4s, v18.4s, v16.4s\n"
+ "ldr q29, [x26], #0x10\n"
+ "ldr q16, [x25], #0x10\n"
+ "zip1 v28.4s, v17.4s, v16.4s\n"
"ldr q18, [x28], #0x10\n"
- "zip2 v5.4s, v21.4s, v17.4s\n"
- "zip2 v4.4s, v20.4s, v16.4s\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v3.4s, v19.4s, v17.4s\n"
- "zip1 v2.4s, v18.4s, v16.4s\n"
- "zip2 v1.4s, v19.4s, v17.4s\n"
- "zip2 v0.4s, v18.4s, v16.4s\n"
- "zip1 v23.4s, v30.4s, v29.4s\n"
- "zip1 v22.4s, v28.4s, v27.4s\n"
- "zip1 v21.4s, v26.4s, v25.4s\n"
- "zip1 v20.4s, v8.4s, v24.4s\n"
- "zip1 v19.4s, v7.4s, v6.4s\n"
- "zip1 v18.4s, v5.4s, v4.4s\n"
- "zip1 v17.4s, v3.4s, v2.4s\n"
- "zip1 v16.4s, v1.4s, v0.4s\n"
- ".inst 0x0ea16aff // bfcvtn v31.4h, v23.4s\n"
- "zip2 v30.4s, v30.4s, v29.4s\n"
- ".inst 0x0ea16add // bfcvtn v29.4h, v22.4s\n"
- "zip2 v28.4s, v28.4s, v27.4s\n"
- ".inst 0x0ea16abb // bfcvtn v27.4h, v21.4s\n"
- "zip2 v26.4s, v26.4s, v25.4s\n"
- ".inst 0x0ea16a99 // bfcvtn v25.4h, v20.4s\n"
- "zip2 v24.4s, v8.4s, v24.4s\n"
- ".inst 0x0ea16a77 // bfcvtn v23.4h, v19.4s\n"
- "zip2 v22.4s, v7.4s, v6.4s\n"
- ".inst 0x0ea16a55 // bfcvtn v21.4h, v18.4s\n"
- "zip2 v20.4s, v5.4s, v4.4s\n"
- ".inst 0x0ea16a33 // bfcvtn v19.4h, v17.4s\n"
- "zip2 v18.4s, v3.4s, v2.4s\n"
+ "zip2 v27.4s, v17.4s, v16.4s\n"
+ "ldr q26, [x26], #0x10\n"
+ "ldr q17, [x25], #0x10\n"
+ "zip1 v25.4s, v18.4s, v17.4s\n"
+ "ldr q16, [x24], #0x10\n"
+ "zip2 v24.4s, v18.4s, v17.4s\n"
+ "ldr q23, [x24], #0x10\n"
+ "zip1 v17.4s, v20.4s, v16.4s\n"
+ "zip2 v22.4s, v20.4s, v16.4s\n"
+ "ldr q21, [x24], #0x10\n"
+ "zip1 v16.4s, v19.4s, v17.4s\n"
+ "zip2 v19.4s, v19.4s, v17.4s\n"
+ "ldr q20, [x24], #0x10\n"
".inst 0x0ea16a11 // bfcvtn v17.4h, v16.4s\n"
- "zip2 v16.4s, v1.4s, v0.4s\n"
- ".inst 0x4ea16bdf // bfcvtn2 v31.8h, v30.4s\n"
- ".inst 0x4ea16b9d // bfcvtn2 v29.8h, v28.4s\n"
- "str q31, [x21, #0x0]\n"
- ".inst 0x4ea16b5b // bfcvtn2 v27.8h, v26.4s\n"
- ".inst 0x4ea16b19 // bfcvtn2 v25.8h, v24.4s\n"
- "str q29, [x21, #0x10]\n"
- ".inst 0x4ea16ad7 // bfcvtn2 v23.8h, v22.4s\n"
- ".inst 0x4ea16a95 // bfcvtn2 v21.8h, v20.4s\n"
- "str q27, [x21, #0x20]\n"
+ "zip1 v16.4s, v1.4s, v22.4s\n"
+ ".inst 0x0ea16a12 // bfcvtn v18.4h, v16.4s\n"
+ "zip2 v16.4s, v1.4s, v22.4s\n"
+ ".inst 0x4ea16a71 // bfcvtn2 v17.8h, v19.4s\n"
+ "str q17, [x27, #0x0]\n"
+ ".inst 0x4ea16a12 // bfcvtn2 v18.8h, v16.4s\n"
+ "zip1 v17.4s, v0.4s, v23.4s\n"
+ "str q18, [x27, #0x10]\n"
+ "zip1 v16.4s, v31.4s, v17.4s\n"
+ "zip2 v19.4s, v31.4s, v17.4s\n"
+ ".inst 0x0ea16a12 // bfcvtn v18.4h, v16.4s\n"
+ "zip2 v17.4s, v0.4s, v23.4s\n"
+ "zip1 v16.4s, v30.4s, v17.4s\n"
+ ".inst 0x4ea16a72 // bfcvtn2 v18.8h, v19.4s\n"
+ "str q18, [x27, #0x20]\n"
+ ".inst 0x0ea16a13 // bfcvtn v19.4h, v16.4s\n"
+ "zip2 v18.4s, v30.4s, v17.4s\n"
+ "zip1 v17.4s, v29.4s, v21.4s\n"
+ "zip1 v16.4s, v28.4s, v17.4s\n"
+ ".inst 0x4ea16a53 // bfcvtn2 v19.8h, v18.4s\n"
+ "str q19, [x27, #0x30]\n"
+ ".inst 0x0ea16a13 // bfcvtn v19.4h, v16.4s\n"
+ "zip2 v18.4s, v28.4s, v17.4s\n"
+ "zip2 v17.4s, v29.4s, v21.4s\n"
+ "zip1 v16.4s, v27.4s, v17.4s\n"
+ ".inst 0x4ea16a53 // bfcvtn2 v19.8h, v18.4s\n"
+ "str q19, [x27, #0x40]\n"
+ ".inst 0x0ea16a13 // bfcvtn v19.4h, v16.4s\n"
+ "zip2 v18.4s, v27.4s, v17.4s\n"
+ "zip1 v17.4s, v26.4s, v20.4s\n"
+ "zip1 v16.4s, v25.4s, v17.4s\n"
".inst 0x4ea16a53 // bfcvtn2 v19.8h, v18.4s\n"
+ "str q19, [x27, #0x50]\n"
+ ".inst 0x0ea16a13 // bfcvtn v19.4h, v16.4s\n"
+ "zip2 v17.4s, v25.4s, v17.4s\n"
+ "zip2 v18.4s, v26.4s, v20.4s\n"
+ "zip1 v16.4s, v24.4s, v18.4s\n"
+ ".inst 0x4ea16a33 // bfcvtn2 v19.8h, v17.4s\n"
+ "str q19, [x27, #0x60]\n"
+ ".inst 0x0ea16a11 // bfcvtn v17.4h, v16.4s\n"
+ "zip2 v16.4s, v24.4s, v18.4s\n"
".inst 0x4ea16a11 // bfcvtn2 v17.8h, v16.4s\n"
- "str q25, [x21, #0x30]\n"
- "str q23, [x21, #0x40]\n"
- "str q21, [x21, #0x50]\n"
- "str q19, [x21, #0x60]\n"
- "str q17, [x21, #0x70]\n"
- "add x21, x21, %x[out_stride]\n"
+ "str q17, [x27, #0x70]\n"
+ "add x27, x27, %x[out_stride]\n"
"bge 10b\n"
"11:" // Tail row loop: Column loop skip
- "cmp x20, #0x4\n"
+ "cmp x19, #0x4\n"
"blt 13f\n"
"12:" // Tail row loop: width 4 loop: loop
- "ldr q20, [x9], #0x10\n"
"ldr q19, [x28], #0x10\n"
- "sub x20, x20, #0x4\n"
- "cmp x20, #0x4\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v22.4s, v20.4s, v17.4s\n"
- "zip1 v18.4s, v19.4s, v16.4s\n"
- "zip2 v21.4s, v20.4s, v17.4s\n"
- "zip2 v20.4s, v19.4s, v16.4s\n"
- "zip1 v17.4s, v22.4s, v18.4s\n"
- "zip1 v16.4s, v21.4s, v20.4s\n"
- ".inst 0x0ea16a33 // bfcvtn v19.4h, v17.4s\n"
- "zip2 v18.4s, v22.4s, v18.4s\n"
+ "sub x19, x19, #0x4\n"
+ "ldr q18, [x26], #0x10\n"
+ "cmp x19, #0x4\n"
+ "ldr q17, [x25], #0x10\n"
+ "zip1 v21.4s, v19.4s, v17.4s\n"
+ "ldr q16, [x24], #0x10\n"
+ "zip2 v20.4s, v19.4s, v17.4s\n"
+ "zip1 v17.4s, v18.4s, v16.4s\n"
+ "zip2 v19.4s, v18.4s, v16.4s\n"
+ "zip1 v16.4s, v21.4s, v17.4s\n"
+ ".inst 0x0ea16a12 // bfcvtn v18.4h, v16.4s\n"
+ "zip2 v17.4s, v21.4s, v17.4s\n"
+ "zip1 v16.4s, v20.4s, v19.4s\n"
+ ".inst 0x4ea16a32 // bfcvtn2 v18.8h, v17.4s\n"
+ "str q18, [x27, #0x0]\n"
".inst 0x0ea16a11 // bfcvtn v17.4h, v16.4s\n"
- "zip2 v16.4s, v21.4s, v20.4s\n"
- ".inst 0x4ea16a53 // bfcvtn2 v19.8h, v18.4s\n"
+ "zip2 v16.4s, v20.4s, v19.4s\n"
".inst 0x4ea16a11 // bfcvtn2 v17.8h, v16.4s\n"
- "str q19, [x21, #0x0]\n"
- "str q17, [x21, #0x10]\n"
- "add x21, x21, #0x20\n"
+ "str q17, [x27, #0x10]\n"
+ "add x27, x27, #0x20\n"
"bge 12b\n"
"13:" // Tail row loop: width 4 loop: skip
- "cmp x20, #0x1\n"
+ "cmp x19, #0x1\n"
"blt 15f\n"
"14:" // Tail row loop: width 1 loop: loop
- "ldr s19, [x9], #0x4\n"
- "ldr s18, [x28], #0x4\n"
- "sub x20, x20, #0x1\n"
- "cmp x20, #0x1\n"
- "ldr s17, [x27], #0x4\n"
- "ldr s16, [x26], #0x4\n"
- "zip1 v17.4s, v19.4s, v17.4s\n"
+ "ldr s17, [x28], #0x4\n"
+ "sub x19, x19, #0x1\n"
+ "ldr s18, [x26], #0x4\n"
+ "cmp x19, #0x1\n"
+ "ldr s16, [x25], #0x4\n"
+ "zip1 v17.4s, v17.4s, v16.4s\n"
+ "ldr s16, [x24], #0x4\n"
"zip1 v16.4s, v18.4s, v16.4s\n"
"zip1 v16.4s, v17.4s, v16.4s\n"
".inst 0x0ea16a10 // bfcvtn v16.4h, v16.4s\n"
- "str d16, [x21, #0x0]\n"
- "add x21, x21, #0x8\n"
+ "str d16, [x27, #0x0]\n"
+ "add x27, x27, #0x8\n"
"bge 14b\n"
"15:" // Tail row loop: width 1 loop: skip
- "cmp %x[height], #0x1\n"
"add %x[out], %x[out], #0x80\n"
+ "cmp %x[height], #0x1\n"
"bge 9b\n"
"16:" // Done
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [pad_row] "r" (pad_row), [width] "r" (width)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_24.hpp b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_24.hpp
index 20f9d39f4e..9f3ab95108 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_24.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_24.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#pragma once
@@ -36,194 +36,195 @@ void a64_transpose_interleave_24(uint16_t *out, const uint16_t *in, size_t width
"cmp %x[height], #0x4\n"
"blt 10f\n"
"1:" // Main row loop: Head
- "mov x25, %x[in]\n"
- "mov x24, %x[width]\n"
- "add x23, x25, %x[in_stride]\n"
- "add x22, x23, %x[in_stride]\n"
- "add x20, x22, %x[in_stride]\n"
- "cmp x24, #0x18\n"
+ "mov x24, %x[in]\n"
+ "mov x23, %x[out]\n"
+ "add x22, x24, %x[in_stride]\n"
+ "add x21, x22, %x[in_stride]\n"
+ "add x20, x21, %x[in_stride]\n"
"add %x[in], x20, %x[in_stride]\n"
- "mov x21, %x[out]\n"
"sub %x[height], %x[height], #0x4\n"
+ "mov x19, %x[width]\n"
+ "cmp x19, #0x18\n"
"blt 3f\n"
"2:" // Main row loop: Unroll column loop
- "ldr q1, [x25], #0x10\n"
- "ldr q0, [x22], #0x10\n"
- "sub x24, x24, #0x18\n"
- "cmp x24, #0x18\n"
- "ldr q17, [x25], #0x10\n"
- "ldr q31, [x23], #0x10\n"
- "dup v30.2d, v17.d[0]\n"
- "dup v29.2d, v31.d[1]\n"
- "ldr q16, [x22], #0x10\n"
- "ldr q28, [x20], #0x10\n"
+ "ldr q29, [x24], #0x10\n"
+ "sub x19, x19, #0x18\n"
+ "ldr q18, [x22], #0x10\n"
+ "dup v28.2d, v18.d[1]\n"
+ "ldr q16, [x24], #0x10\n"
+ "cmp x19, #0x18\n"
"dup v27.2d, v16.d[0]\n"
- "dup v26.2d, v28.d[1]\n"
- "ldr q25, [x25], #0x10\n"
- "ldr q24, [x22], #0x10\n"
- "dup v23.2d, v17.d[1]\n"
- "dup v22.2d, v25.d[1]\n"
- "ldr q21, [x23], #0x10\n"
+ "ldr q17, [x24], #0x10\n"
+ "dup v26.2d, v16.d[1]\n"
+ "ldr q16, [x22], #0x10\n"
+ "mov v27.d[1], v18.d[0]\n"
+ "ldr q25, [x21], #0x10\n"
+ "ldr q24, [x20], #0x10\n"
+ "mov v26.d[1], v17.d[0]\n"
+ "ldr q23, [x22], #0x10\n"
+ "mov v28.d[1], v16.d[0]\n"
+ "dup v22.2d, v17.d[1]\n"
+ "ldr q17, [x21], #0x10\n"
+ "dup v21.2d, v24.d[1]\n"
"ldr q20, [x20], #0x10\n"
- "dup v19.2d, v16.d[1]\n"
- "dup v18.2d, v24.d[1]\n"
- "ldr q17, [x23], #0x10\n"
- "ldr q16, [x20], #0x10\n"
- "mov v30.d[1], v31.d[0]\n"
- "mov v29.d[1], v21.d[0]\n"
- "mov v27.d[1], v28.d[0]\n"
- "mov v26.d[1], v20.d[0]\n"
- "str q1, [x21, #0x0]\n"
- "str q30, [x21, #0x10]\n"
- "mov v23.d[1], v25.d[0]\n"
- "mov v22.d[1], v21.d[1]\n"
- "str q29, [x21, #0x20]\n"
+ "mov v22.d[1], v16.d[1]\n"
+ "ldr q16, [x21], #0x10\n"
+ "dup v19.2d, v17.d[0]\n"
+ "dup v18.2d, v17.d[1]\n"
+ "ldr q17, [x20], #0x10\n"
"mov v19.d[1], v24.d[0]\n"
- "mov v18.d[1], v20.d[1]\n"
- "str q0, [x21, #0x30]\n"
- "str q27, [x21, #0x40]\n"
- "str q26, [x21, #0x50]\n"
- "add x21, x21, %x[out_stride]\n"
- "str q23, [x21, #0x0]\n"
- "str q22, [x21, #0x10]\n"
- "str q17, [x21, #0x20]\n"
- "str q19, [x21, #0x30]\n"
- "str q18, [x21, #0x40]\n"
- "str q16, [x21, #0x50]\n"
- "add x21, x21, %x[out_stride]\n"
+ "str q29, [x23, #0x0]\n"
+ "mov v21.d[1], v20.d[0]\n"
+ "str q27, [x23, #0x10]\n"
+ "str q28, [x23, #0x20]\n"
+ "mov v18.d[1], v16.d[0]\n"
+ "dup v16.2d, v16.d[1]\n"
+ "str q25, [x23, #0x30]\n"
+ "mov v16.d[1], v20.d[1]\n"
+ "str q19, [x23, #0x40]\n"
+ "str q21, [x23, #0x50]\n"
+ "add x23, x23, %x[out_stride]\n"
+ "str q26, [x23, #0x0]\n"
+ "str q22, [x23, #0x10]\n"
+ "str q23, [x23, #0x20]\n"
+ "str q18, [x23, #0x30]\n"
+ "str q16, [x23, #0x40]\n"
+ "str q17, [x23, #0x50]\n"
+ "add x23, x23, %x[out_stride]\n"
"bge 2b\n"
"3:" // Main row loop: Unroll column loop skip
- "cmp x24, #0xc\n"
+ "cmp x19, #0xc\n"
"blt 5f\n"
"4:" // Main row loop: Column loop
- "ldr q17, [x23], #0x10\n"
- "ldr q23, [x20], #0x10\n"
- "dup v22.2d, v17.d[1]\n"
- "dup v21.2d, v23.d[1]\n"
- "ldr q20, [x25], #0x10\n"
- "ldr q19, [x22], #0x10\n"
- "sub x24, x24, #0xc\n"
- "cmp x24, #0xc\n"
- "ldr d18, [x25], #0x8\n"
- "ldr d16, [x23], #0x8\n"
- "mov v18.d[1], v17.d[0]\n"
- "mov v22.d[1], v16.d[0]\n"
- "ldr d17, [x22], #0x8\n"
- "ldr d16, [x20], #0x8\n"
- "mov v17.d[1], v23.d[0]\n"
+ "ldr q22, [x24], #0x10\n"
+ "sub x19, x19, #0xc\n"
+ "ldr q16, [x22], #0x10\n"
+ "dup v21.2d, v16.d[1]\n"
+ "ldr d20, [x24], #0x8\n"
+ "cmp x19, #0xc\n"
+ "mov v20.d[1], v16.d[0]\n"
+ "ldr d16, [x22], #0x8\n"
+ "ldr q19, [x21], #0x10\n"
"mov v21.d[1], v16.d[0]\n"
- "str q20, [x21, #0x0]\n"
- "str q18, [x21, #0x10]\n"
- "str q22, [x21, #0x20]\n"
- "str q19, [x21, #0x30]\n"
- "str q17, [x21, #0x40]\n"
- "str q21, [x21, #0x50]\n"
- "add x21, x21, %x[out_stride]\n"
+ "ldr d18, [x21], #0x8\n"
+ "ldr q16, [x20], #0x10\n"
+ "mov v18.d[1], v16.d[0]\n"
+ "ldr d17, [x20], #0x8\n"
+ "dup v16.2d, v16.d[1]\n"
+ "str q22, [x23, #0x0]\n"
+ "str q20, [x23, #0x10]\n"
+ "mov v16.d[1], v17.d[0]\n"
+ "str q21, [x23, #0x20]\n"
+ "str q19, [x23, #0x30]\n"
+ "str q18, [x23, #0x40]\n"
+ "str q16, [x23, #0x50]\n"
+ "add x23, x23, %x[out_stride]\n"
"bge 4b\n"
"5:" // Main row loop: Column loop skip
- "cmp x24, #0x4\n"
+ "cmp x19, #0x4\n"
"blt 7f\n"
"6:" // Main row loop: width 4 loop: loop
- "ldr d19, [x25], #0x8\n"
- "ldr d18, [x23], #0x8\n"
- "sub x24, x24, #0x4\n"
- "cmp x24, #0x4\n"
- "ldr d17, [x22], #0x8\n"
+ "ldr d19, [x24], #0x8\n"
+ "sub x19, x19, #0x4\n"
+ "ldr d18, [x22], #0x8\n"
+ "cmp x19, #0x4\n"
+ "ldr d17, [x21], #0x8\n"
"ldr d16, [x20], #0x8\n"
- "str d19, [x21, #0x0]\n"
- "str d18, [x21, #0x18]\n"
- "str d17, [x21, #0x30]\n"
- "str d16, [x21, #0x48]\n"
- "add x21, x21, #0x8\n"
+ "str d19, [x23, #0x0]\n"
+ "str d18, [x23, #0x18]\n"
+ "str d17, [x23, #0x30]\n"
+ "str d16, [x23, #0x48]\n"
+ "add x23, x23, #0x8\n"
"bge 6b\n"
"7:" // Main row loop: width 4 loop: skip
- "cmp x24, #0x1\n"
+ "cmp x19, #0x1\n"
"blt 9f\n"
"8:" // Main row loop: width 1 loop: loop
- "ldr h19, [x25], #0x2\n"
- "ldr h18, [x23], #0x2\n"
- "sub x24, x24, #0x1\n"
- "cmp x24, #0x1\n"
- "ldr h17, [x22], #0x2\n"
+ "ldr h19, [x24], #0x2\n"
+ "sub x19, x19, #0x1\n"
+ "ldr h18, [x22], #0x2\n"
+ "cmp x19, #0x1\n"
+ "ldr h17, [x21], #0x2\n"
"ldr h16, [x20], #0x2\n"
- "str h19, [x21, #0x0]\n"
- "str h18, [x21, #0x18]\n"
- "str h17, [x21, #0x30]\n"
- "str h16, [x21, #0x48]\n"
- "add x21, x21, #0x2\n"
+ "str h19, [x23, #0x0]\n"
+ "str h18, [x23, #0x18]\n"
+ "str h17, [x23, #0x30]\n"
+ "str h16, [x23, #0x48]\n"
+ "add x23, x23, #0x2\n"
"bge 8b\n"
"9:" // Main row loop: width 1 loop: skip
- "cmp %x[height], #0x4\n"
"add %x[out], %x[out], #0x60\n"
+ "cmp %x[height], #0x4\n"
"bge 1b\n"
"cbz %x[height], 20f\n"
"10:" // Main loop skip
"11:" // Tail row loop: Head
- "mov x20, %x[width]\n"
- "mov x25, %x[in]\n"
- "cmp x20, #0x18\n"
- "add %x[in], x25, %x[in_stride]\n"
- "mov x21, %x[out]\n"
+ "mov x24, %x[in]\n"
+ "mov x23, %x[out]\n"
+ "add %x[in], x24, %x[in_stride]\n"
"sub %x[height], %x[height], #0x1\n"
+ "mov x19, %x[width]\n"
+ "cmp x19, #0x18\n"
"blt 13f\n"
"12:" // Tail row loop: Unroll column loop
- "ldr q19, [x25], #0x10\n"
- "ldr q16, [x25], #0x10\n"
- "dup v18.2d, v16.d[1]\n"
- "sub x20, x20, #0x18\n"
- "ldr q17, [x25], #0x10\n"
- "dup v16.2d, v16.d[0]\n"
- "str q19, [x21, #0x0]\n"
- "cmp x20, #0x18\n"
- "str d16, [x21, #0x10]\n"
- "add x21, x21, %x[out_stride]\n"
- "mov v18.d[1], v17.d[0]\n"
+ "ldr q19, [x24], #0x10\n"
+ "sub x19, x19, #0x18\n"
+ "cmp x19, #0x18\n"
+ "ldr q16, [x24], #0x10\n"
+ "dup v18.2d, v16.d[0]\n"
+ "ldr q17, [x24], #0x10\n"
+ "dup v16.2d, v16.d[1]\n"
+ "str q19, [x23, #0x0]\n"
+ "str d18, [x23, #0x10]\n"
+ "mov v16.d[1], v17.d[0]\n"
+ "add x23, x23, %x[out_stride]\n"
+ "str q16, [x23, #0x0]\n"
"dup v16.2d, v17.d[1]\n"
- "str q18, [x21, #0x0]\n"
- "str d16, [x21, #0x10]\n"
- "add x21, x21, %x[out_stride]\n"
+ "str d16, [x23, #0x10]\n"
+ "add x23, x23, %x[out_stride]\n"
"bge 12b\n"
"13:" // Tail row loop: Unroll column loop skip
- "cmp x20, #0xc\n"
+ "cmp x19, #0xc\n"
"blt 15f\n"
"14:" // Tail row loop: Column loop
- "ldr q17, [x25], #0x10\n"
- "ldr d16, [x25], #0x8\n"
- "sub x20, x20, #0xc\n"
- "cmp x20, #0xc\n"
- "str q17, [x21, #0x0]\n"
- "str d16, [x21, #0x10]\n"
- "add x21, x21, %x[out_stride]\n"
+ "ldr q17, [x24], #0x10\n"
+ "sub x19, x19, #0xc\n"
+ "cmp x19, #0xc\n"
+ "ldr d16, [x24], #0x8\n"
+ "str q17, [x23, #0x0]\n"
+ "str d16, [x23, #0x10]\n"
+ "add x23, x23, %x[out_stride]\n"
"bge 14b\n"
"15:" // Tail row loop: Column loop skip
- "cmp x20, #0x4\n"
+ "cmp x19, #0x4\n"
"blt 17f\n"
"16:" // Tail row loop: width 4 loop: loop
- "ldr d16, [x25], #0x8\n"
- "sub x20, x20, #0x4\n"
- "cmp x20, #0x4\n"
- "str d16, [x21, #0x0]\n"
- "add x21, x21, #0x8\n"
+ "ldr d16, [x24], #0x8\n"
+ "sub x19, x19, #0x4\n"
+ "cmp x19, #0x4\n"
+ "str d16, [x23, #0x0]\n"
+ "add x23, x23, #0x8\n"
"bge 16b\n"
"17:" // Tail row loop: width 4 loop: skip
- "cmp x20, #0x1\n"
+ "cmp x19, #0x1\n"
"blt 19f\n"
"18:" // Tail row loop: width 1 loop: loop
- "ldr h16, [x25], #0x2\n"
- "sub x20, x20, #0x1\n"
- "cmp x20, #0x1\n"
- "str h16, [x21, #0x0]\n"
- "add x21, x21, #0x2\n"
+ "ldr h16, [x24], #0x2\n"
+ "sub x19, x19, #0x1\n"
+ "cmp x19, #0x1\n"
+ "str h16, [x23, #0x0]\n"
+ "add x23, x23, #0x2\n"
"bge 18b\n"
"19:" // Tail row loop: width 1 loop: skip
- "cmp %x[height], #0x1\n"
"add %x[out], %x[out], #0x18\n"
+ "cmp %x[height], #0x1\n"
"bge 11b\n"
"20:" // Done
+
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [width] "r" (width)
- : "cc", "memory", "v0", "v1", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x20", "x21", "x22", "x23", "x24", "x25"
+ : "cc", "memory", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "x19", "x20", "x21", "x22", "x23", "x24"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_24_2x4_fp32bf16.hpp b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_24_2x4_fp32bf16.hpp
index 22d68acd51..101be7e843 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_24_2x4_fp32bf16.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_24_2x4_fp32bf16.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#pragma once
@@ -42,730 +42,731 @@ void a64_transpose_interleave_24_2x4_fp32bf16(bfloat16 *out, const float *in, si
"cmp %x[height], #0x8\n"
"blt 10f\n"
"1:" // Main row loop: Head
- "mov x9, %x[in]\n"
- "add x28, x9, %x[in_stride]\n"
- "add x27, x28, %x[in_stride]\n"
- "add x26, x27, %x[in_stride]\n"
+ "mov x28, %x[in]\n"
+ "mov x27, %x[out]\n"
+ "add x26, x28, %x[in_stride]\n"
"add x25, x26, %x[in_stride]\n"
- "mov x24, %x[width]\n"
- "add x23, x25, %x[in_stride]\n"
+ "add x24, x25, %x[in_stride]\n"
+ "add x23, x24, %x[in_stride]\n"
"add x22, x23, %x[in_stride]\n"
- "add x20, x22, %x[in_stride]\n"
- "cmp x24, #0x18\n"
+ "add x21, x22, %x[in_stride]\n"
+ "add x20, x21, %x[in_stride]\n"
"add %x[in], x20, %x[in_stride]\n"
- "mov x21, %x[out]\n"
"sub %x[height], %x[height], #0x8\n"
+ "mov x19, %x[width]\n"
+ "cmp x19, #0x18\n"
"blt 3f\n"
"2:" // Main row loop: Column loop
- "ldr q15, [x9], #0x10\n"
- "ldr q1, [x28], #0x10\n"
- "sub x24, x24, #0x18\n"
- "cmp x24, #0x18\n"
- "ldr q0, [x27], #0x10\n"
+ "ldr q3, [x28], #0x10\n"
+ "sub x19, x19, #0x18\n"
"ldr q27, [x26], #0x10\n"
- "zip1 v18.4s, v15.4s, v0.4s\n"
- "zip1 v20.4s, v1.4s, v27.4s\n"
- "ldr q13, [x25], #0x10\n"
- "ldr q14, [x23], #0x10\n"
- "zip2 v16.4s, v15.4s, v0.4s\n"
- "zip2 v3.4s, v1.4s, v27.4s\n"
- "ldr q12, [x22], #0x10\n"
- "ldr q11, [x20], #0x10\n"
- "zip1 v4.4s, v13.4s, v12.4s\n"
- "zip1 v28.4s, v14.4s, v11.4s\n"
- "ldr q5, [x9], #0x10\n"
- "ldr q30, [x28], #0x10\n"
- "zip2 v23.4s, v13.4s, v12.4s\n"
- "zip2 v19.4s, v14.4s, v11.4s\n"
- "ldr q25, [x27], #0x10\n"
- "ldr q11, [x26], #0x10\n"
- "zip1 v21.4s, v5.4s, v25.4s\n"
- "zip1 v14.4s, v30.4s, v11.4s\n"
- "ldr q6, [x25], #0x10\n"
- "ldr q27, [x23], #0x10\n"
- "zip2 v29.4s, v5.4s, v25.4s\n"
- "zip2 v17.4s, v30.4s, v11.4s\n"
- "ldr q2, [x22], #0x10\n"
- "ldr q10, [x20], #0x10\n"
- "zip1 v11.4s, v6.4s, v2.4s\n"
- "zip1 v1.4s, v27.4s, v10.4s\n"
- "ldr q8, [x9], #0x10\n"
- "ldr q5, [x28], #0x10\n"
- "zip2 v24.4s, v6.4s, v2.4s\n"
- "zip2 v0.4s, v27.4s, v10.4s\n"
- "ldr q6, [x27], #0x10\n"
- "ldr q31, [x26], #0x10\n"
- "zip1 v12.4s, v8.4s, v6.4s\n"
- "zip1 v10.4s, v5.4s, v31.4s\n"
- "ldr q30, [x25], #0x10\n"
- "ldr q2, [x23], #0x10\n"
- "zip2 v9.4s, v8.4s, v6.4s\n"
- "zip2 v13.4s, v5.4s, v31.4s\n"
- "ldr q7, [x22], #0x10\n"
- "ldr q8, [x20], #0x10\n"
- "zip1 v27.4s, v30.4s, v7.4s\n"
- "zip1 v31.4s, v2.4s, v8.4s\n"
- "ldr q5, [x9], #0x10\n"
- "ldr q26, [x28], #0x10\n"
- "zip2 v22.4s, v30.4s, v7.4s\n"
- "zip2 v8.4s, v2.4s, v8.4s\n"
- "ldr q2, [x27], #0x10\n"
+ "cmp x19, #0x18\n"
+ "ldr q26, [x25], #0x10\n"
+ "zip1 v28.4s, v3.4s, v26.4s\n"
+ "ldr q1, [x28], #0x10\n"
+ "zip2 v12.4s, v3.4s, v26.4s\n"
+ "ldr q26, [x26], #0x10\n"
+ "ldr q17, [x25], #0x10\n"
+ "zip1 v11.4s, v1.4s, v17.4s\n"
+ "ldr q31, [x28], #0x10\n"
+ "zip2 v23.4s, v1.4s, v17.4s\n"
+ "ldr q3, [x26], #0x10\n"
+ "ldr q1, [x25], #0x10\n"
+ "zip1 v10.4s, v31.4s, v1.4s\n"
+ "ldr q14, [x28], #0x10\n"
+ "zip2 v17.4s, v31.4s, v1.4s\n"
"ldr q6, [x26], #0x10\n"
- "zip1 v25.4s, v5.4s, v2.4s\n"
- "zip1 v15.4s, v26.4s, v6.4s\n"
- "ldr q7, [x25], #0x10\n"
- "ldr q30, [x23], #0x10\n"
- "zip2 v5.4s, v5.4s, v2.4s\n"
- "zip2 v26.4s, v26.4s, v6.4s\n"
- "ldr q2, [x22], #0x10\n"
- "zip1 v6.4s, v7.4s, v2.4s\n"
- "zip2 v7.4s, v7.4s, v2.4s\n"
- "zip1 v2.4s, v18.4s, v20.4s\n"
- ".inst 0x0ea16842 // bfcvtn v2.4h, v2.4s\n"
- "zip2 v20.4s, v18.4s, v20.4s\n"
- "ldr q18, [x20], #0x10\n"
- ".inst 0x4ea16a82 // bfcvtn2 v2.8h, v20.4s\n"
- "zip1 v20.4s, v30.4s, v18.4s\n"
- "zip2 v18.4s, v30.4s, v18.4s\n"
- "zip1 v30.4s, v16.4s, v3.4s\n"
- ".inst 0x0ea16bde // bfcvtn v30.4h, v30.4s\n"
- "zip2 v3.4s, v16.4s, v3.4s\n"
- "ldr q16, [x9], #0x10\n"
- ".inst 0x4ea1687e // bfcvtn2 v30.8h, v3.4s\n"
- "zip1 v3.4s, v21.4s, v14.4s\n"
- ".inst 0x0ea16863 // bfcvtn v3.4h, v3.4s\n"
- "zip2 v21.4s, v21.4s, v14.4s\n"
+ "ldr q1, [x25], #0x10\n"
+ "zip1 v5.4s, v14.4s, v1.4s\n"
+ "ldr q0, [x28], #0x10\n"
+ "zip2 v8.4s, v14.4s, v1.4s\n"
+ "ldr q2, [x26], #0x10\n"
+ "ldr q30, [x25], #0x10\n"
+ "zip1 v15.4s, v0.4s, v30.4s\n"
"ldr q14, [x28], #0x10\n"
- ".inst 0x4ea16aa3 // bfcvtn2 v3.8h, v21.4s\n"
- "zip1 v21.4s, v29.4s, v17.4s\n"
- ".inst 0x0ea16ab5 // bfcvtn v21.4h, v21.4s\n"
- "zip2 v29.4s, v29.4s, v17.4s\n"
- "ldr q17, [x27], #0x10\n"
- ".inst 0x4ea16bb5 // bfcvtn2 v21.8h, v29.4s\n"
- "zip1 v29.4s, v16.4s, v17.4s\n"
- "zip2 v16.4s, v16.4s, v17.4s\n"
- "zip1 v17.4s, v12.4s, v10.4s\n"
- ".inst 0x0ea16a31 // bfcvtn v17.4h, v17.4s\n"
- "zip2 v10.4s, v12.4s, v10.4s\n"
- "ldr q12, [x26], #0x10\n"
- ".inst 0x4ea16951 // bfcvtn2 v17.8h, v10.4s\n"
- "zip1 v10.4s, v14.4s, v12.4s\n"
- "zip2 v14.4s, v14.4s, v12.4s\n"
- "zip1 v12.4s, v9.4s, v13.4s\n"
- ".inst 0x0ea1698c // bfcvtn v12.4h, v12.4s\n"
- "zip2 v13.4s, v9.4s, v13.4s\n"
- "ldr q9, [x25], #0x10\n"
- ".inst 0x4ea169ac // bfcvtn2 v12.8h, v13.4s\n"
- "zip1 v13.4s, v25.4s, v15.4s\n"
- ".inst 0x0ea169ad // bfcvtn v13.4h, v13.4s\n"
- "zip2 v25.4s, v25.4s, v15.4s\n"
- "ldr q15, [x23], #0x10\n"
- ".inst 0x4ea16b2d // bfcvtn2 v13.8h, v25.4s\n"
- "zip1 v25.4s, v5.4s, v26.4s\n"
+ "zip2 v1.4s, v0.4s, v30.4s\n"
+ "ldr q0, [x26], #0x10\n"
+ "ldr q29, [x25], #0x10\n"
+ "zip1 v19.4s, v14.4s, v29.4s\n"
+ "ldr q25, [x24], #0x10\n"
+ "zip2 v30.4s, v14.4s, v29.4s\n"
+ "ldr q7, [x23], #0x10\n"
+ "ldr q31, [x22], #0x10\n"
+ "zip1 v20.4s, v27.4s, v25.4s\n"
+ "ldr q24, [x24], #0x10\n"
+ "zip2 v4.4s, v27.4s, v25.4s\n"
+ "ldr q22, [x23], #0x10\n"
+ "zip1 v14.4s, v28.4s, v20.4s\n"
+ "ldr q13, [x22], #0x10\n"
+ ".inst 0x0ea169d0 // bfcvtn v16.4h, v14.4s\n"
+ "ldr q29, [x21], #0x10\n"
+ "zip2 v21.4s, v28.4s, v20.4s\n"
+ "ldr q27, [x24], #0x10\n"
+ "zip1 v9.4s, v12.4s, v4.4s\n"
+ "ldr q14, [x23], #0x10\n"
+ ".inst 0x4ea16ab0 // bfcvtn2 v16.8h, v21.4s\n"
+ "ldr q21, [x22], #0x10\n"
+ ".inst 0x0ea16929 // bfcvtn v9.4h, v9.4s\n"
+ "ldr q18, [x21], #0x10\n"
+ "zip2 v25.4s, v12.4s, v4.4s\n"
+ "ldr q4, [x24], #0x10\n"
+ "zip1 v28.4s, v26.4s, v24.4s\n"
+ "ldr q20, [x23], #0x10\n"
+ ".inst 0x4ea16b29 // bfcvtn2 v9.8h, v25.4s\n"
+ "ldr q12, [x22], #0x10\n"
+ "zip1 v25.4s, v11.4s, v28.4s\n"
".inst 0x0ea16b39 // bfcvtn v25.4h, v25.4s\n"
- "zip2 v5.4s, v5.4s, v26.4s\n"
- "ldr q26, [x22], #0x10\n"
- ".inst 0x4ea168b9 // bfcvtn2 v25.8h, v5.4s\n"
- "zip1 v5.4s, v9.4s, v26.4s\n"
- "zip2 v9.4s, v9.4s, v26.4s\n"
- "zip1 v26.4s, v29.4s, v10.4s\n"
- ".inst 0x0ea16b5a // bfcvtn v26.4h, v26.4s\n"
- "zip2 v10.4s, v29.4s, v10.4s\n"
- "ldr q29, [x20], #0x10\n"
- ".inst 0x4ea1695a // bfcvtn2 v26.8h, v10.4s\n"
- "zip1 v10.4s, v15.4s, v29.4s\n"
- "zip2 v15.4s, v15.4s, v29.4s\n"
- "zip1 v29.4s, v16.4s, v14.4s\n"
- ".inst 0x0ea16bbd // bfcvtn v29.4h, v29.4s\n"
- "zip2 v14.4s, v16.4s, v14.4s\n"
- "ldr q16, [x9], #0x10\n"
- ".inst 0x4ea169dd // bfcvtn2 v29.8h, v14.4s\n"
- "zip1 v14.4s, v4.4s, v28.4s\n"
- ".inst 0x0ea169ce // bfcvtn v14.4h, v14.4s\n"
- "zip2 v4.4s, v4.4s, v28.4s\n"
- "ldr q28, [x28], #0x10\n"
- ".inst 0x4ea1688e // bfcvtn2 v14.8h, v4.4s\n"
- "zip1 v4.4s, v23.4s, v19.4s\n"
- ".inst 0x0ea16884 // bfcvtn v4.4h, v4.4s\n"
- "zip2 v19.4s, v23.4s, v19.4s\n"
- "ldr q23, [x27], #0x10\n"
- ".inst 0x4ea16a64 // bfcvtn2 v4.8h, v19.4s\n"
- "zip1 v19.4s, v16.4s, v23.4s\n"
- "zip2 v16.4s, v16.4s, v23.4s\n"
- "zip1 v23.4s, v11.4s, v1.4s\n"
- ".inst 0x0ea16af7 // bfcvtn v23.4h, v23.4s\n"
- "zip2 v1.4s, v11.4s, v1.4s\n"
- "ldr q11, [x26], #0x10\n"
- ".inst 0x4ea16837 // bfcvtn2 v23.8h, v1.4s\n"
- "zip1 v1.4s, v28.4s, v11.4s\n"
- "zip2 v28.4s, v28.4s, v11.4s\n"
- "zip1 v11.4s, v19.4s, v1.4s\n"
+ "zip2 v11.4s, v11.4s, v28.4s\n"
+ "ldr q28, [x24], #0x10\n"
+ "zip2 v26.4s, v26.4s, v24.4s\n"
+ "ldr q24, [x23], #0x10\n"
+ ".inst 0x4ea16979 // bfcvtn2 v25.8h, v11.4s\n"
+ "zip1 v11.4s, v23.4s, v26.4s\n"
".inst 0x0ea1696b // bfcvtn v11.4h, v11.4s\n"
- "zip2 v19.4s, v19.4s, v1.4s\n"
- "ldr q1, [x25], #0x10\n"
- ".inst 0x4ea16a6b // bfcvtn2 v11.8h, v19.4s\n"
- "zip1 v19.4s, v16.4s, v28.4s\n"
+ "zip2 v23.4s, v23.4s, v26.4s\n"
+ "ldr q26, [x24], #0x10\n"
+ ".inst 0x4ea16aeb // bfcvtn2 v11.8h, v23.4s\n"
+ "zip1 v23.4s, v3.4s, v27.4s\n"
+ "zip2 v27.4s, v3.4s, v27.4s\n"
+ "zip1 v3.4s, v10.4s, v23.4s\n"
+ ".inst 0x0ea16863 // bfcvtn v3.4h, v3.4s\n"
+ "zip2 v10.4s, v10.4s, v23.4s\n"
+ "ldr q23, [x23], #0x10\n"
+ ".inst 0x4ea16943 // bfcvtn2 v3.8h, v10.4s\n"
+ "zip1 v10.4s, v17.4s, v27.4s\n"
+ ".inst 0x0ea1694a // bfcvtn v10.4h, v10.4s\n"
+ "zip2 v27.4s, v17.4s, v27.4s\n"
+ "ldr q17, [x22], #0x10\n"
+ ".inst 0x4ea16b6a // bfcvtn2 v10.8h, v27.4s\n"
+ "zip1 v27.4s, v6.4s, v4.4s\n"
+ "zip2 v6.4s, v6.4s, v4.4s\n"
+ "zip1 v4.4s, v5.4s, v27.4s\n"
+ ".inst 0x0ea16884 // bfcvtn v4.4h, v4.4s\n"
+ "zip2 v27.4s, v5.4s, v27.4s\n"
+ "ldr q5, [x22], #0x10\n"
+ ".inst 0x4ea16b64 // bfcvtn2 v4.8h, v27.4s\n"
+ "zip1 v27.4s, v8.4s, v6.4s\n"
+ ".inst 0x0ea16b7b // bfcvtn v27.4h, v27.4s\n"
+ "zip2 v6.4s, v8.4s, v6.4s\n"
+ "ldr q8, [x21], #0x10\n"
+ ".inst 0x4ea168db // bfcvtn2 v27.8h, v6.4s\n"
+ "zip1 v6.4s, v2.4s, v28.4s\n"
+ "zip2 v2.4s, v2.4s, v28.4s\n"
+ "zip1 v28.4s, v15.4s, v6.4s\n"
+ ".inst 0x0ea16b9c // bfcvtn v28.4h, v28.4s\n"
+ "zip2 v6.4s, v15.4s, v6.4s\n"
+ "ldr q15, [x21], #0x10\n"
+ ".inst 0x4ea168dc // bfcvtn2 v28.8h, v6.4s\n"
+ "zip1 v6.4s, v1.4s, v2.4s\n"
+ ".inst 0x0ea168c6 // bfcvtn v6.4h, v6.4s\n"
+ "zip2 v2.4s, v1.4s, v2.4s\n"
+ "ldr q1, [x21], #0x10\n"
+ ".inst 0x4ea16846 // bfcvtn2 v6.8h, v2.4s\n"
+ "zip1 v2.4s, v0.4s, v26.4s\n"
+ "zip2 v26.4s, v0.4s, v26.4s\n"
+ "zip1 v0.4s, v19.4s, v2.4s\n"
+ ".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n"
+ "zip2 v19.4s, v19.4s, v2.4s\n"
+ "ldr q2, [x21], #0x10\n"
+ ".inst 0x4ea16a60 // bfcvtn2 v0.8h, v19.4s\n"
+ "zip1 v19.4s, v30.4s, v26.4s\n"
".inst 0x0ea16a73 // bfcvtn v19.4h, v19.4s\n"
- "zip2 v16.4s, v16.4s, v28.4s\n"
- "ldr q28, [x23], #0x10\n"
- ".inst 0x4ea16a13 // bfcvtn2 v19.8h, v16.4s\n"
- "zip1 v16.4s, v24.4s, v0.4s\n"
- ".inst 0x0ea16a10 // bfcvtn v16.4h, v16.4s\n"
- "zip2 v24.4s, v24.4s, v0.4s\n"
- "ldr q0, [x22], #0x10\n"
- ".inst 0x4ea16b10 // bfcvtn2 v16.8h, v24.4s\n"
- "ldr q24, [x20], #0x10\n"
- "str q2, [x21, #0x0]\n"
- "zip1 v2.4s, v1.4s, v0.4s\n"
- "zip2 v0.4s, v1.4s, v0.4s\n"
- "zip1 v1.4s, v28.4s, v24.4s\n"
- "zip2 v28.4s, v28.4s, v24.4s\n"
- "str q30, [x21, #0x10]\n"
- "zip1 v24.4s, v27.4s, v31.4s\n"
- "zip1 v30.4s, v22.4s, v8.4s\n"
- "str q3, [x21, #0x20]\n"
- "zip1 v3.4s, v6.4s, v20.4s\n"
- ".inst 0x0ea16b18 // bfcvtn v24.4h, v24.4s\n"
- "str q21, [x21, #0x30]\n"
- "zip1 v21.4s, v7.4s, v18.4s\n"
- "zip2 v31.4s, v27.4s, v31.4s\n"
- "str q17, [x21, #0x40]\n"
- "zip1 v17.4s, v5.4s, v10.4s\n"
- "zip1 v27.4s, v9.4s, v15.4s\n"
- "str q12, [x21, #0x50]\n"
- "zip1 v12.4s, v2.4s, v1.4s\n"
+ "zip2 v30.4s, v30.4s, v26.4s\n"
+ "ldr q26, [x20], #0x10\n"
+ ".inst 0x4ea16bd3 // bfcvtn2 v19.8h, v30.4s\n"
+ "zip1 v30.4s, v7.4s, v29.4s\n"
+ "zip2 v29.4s, v7.4s, v29.4s\n"
+ "zip1 v7.4s, v22.4s, v18.4s\n"
+ "zip2 v18.4s, v22.4s, v18.4s\n"
+ "zip1 v22.4s, v31.4s, v26.4s\n"
+ "zip2 v26.4s, v31.4s, v26.4s\n"
+ "zip1 v31.4s, v30.4s, v22.4s\n"
+ ".inst 0x0ea16bff // bfcvtn v31.4h, v31.4s\n"
+ "zip2 v30.4s, v30.4s, v22.4s\n"
+ "ldr q22, [x20], #0x10\n"
+ ".inst 0x4ea16bdf // bfcvtn2 v31.8h, v30.4s\n"
+ "zip1 v30.4s, v29.4s, v26.4s\n"
".inst 0x0ea16bde // bfcvtn v30.4h, v30.4s\n"
- "str q13, [x21, #0x60]\n"
- "zip1 v13.4s, v0.4s, v28.4s\n"
- "zip2 v22.4s, v22.4s, v8.4s\n"
- "str q25, [x21, #0x70]\n"
- ".inst 0x0ea16879 // bfcvtn v25.4h, v3.4s\n"
- "zip2 v8.4s, v6.4s, v20.4s\n"
- "str q26, [x21, #0x80]\n"
- ".inst 0x0ea16aa3 // bfcvtn v3.4h, v21.4s\n"
- "zip2 v18.4s, v7.4s, v18.4s\n"
- "str q29, [x21, #0x90]\n"
- ".inst 0x0ea16a27 // bfcvtn v7.4h, v17.4s\n"
- "zip2 v21.4s, v5.4s, v10.4s\n"
- "str q11, [x21, #0xa0]\n"
- ".inst 0x0ea16b65 // bfcvtn v5.4h, v27.4s\n"
- "zip2 v15.4s, v9.4s, v15.4s\n"
- "str q19, [x21, #0xb0]\n"
- ".inst 0x0ea16991 // bfcvtn v17.4h, v12.4s\n"
- "zip2 v20.4s, v2.4s, v1.4s\n"
- "str q14, [x21, #0xc0]\n"
- ".inst 0x0ea169bb // bfcvtn v27.4h, v13.4s\n"
- "zip2 v29.4s, v0.4s, v28.4s\n"
- "str q4, [x21, #0xd0]\n"
- ".inst 0x4ea16bf8 // bfcvtn2 v24.8h, v31.4s\n"
- ".inst 0x4ea16ade // bfcvtn2 v30.8h, v22.4s\n"
- "str q23, [x21, #0xe0]\n"
- ".inst 0x4ea16919 // bfcvtn2 v25.8h, v8.4s\n"
- ".inst 0x4ea16a43 // bfcvtn2 v3.8h, v18.4s\n"
- "str q16, [x21, #0xf0]\n"
- ".inst 0x4ea16aa7 // bfcvtn2 v7.8h, v21.4s\n"
- ".inst 0x4ea169e5 // bfcvtn2 v5.8h, v15.4s\n"
- "str q24, [x21, #0x100]\n"
- ".inst 0x4ea16a91 // bfcvtn2 v17.8h, v20.4s\n"
- ".inst 0x4ea16bbb // bfcvtn2 v27.8h, v29.4s\n"
- "str q30, [x21, #0x110]\n"
- "str q25, [x21, #0x120]\n"
- "str q3, [x21, #0x130]\n"
- "str q7, [x21, #0x140]\n"
- "str q5, [x21, #0x150]\n"
- "str q17, [x21, #0x160]\n"
- "str q27, [x21, #0x170]\n"
- "add x21, x21, %x[out_stride]\n"
+ "zip2 v26.4s, v29.4s, v26.4s\n"
+ "ldr q29, [x20], #0x10\n"
+ ".inst 0x4ea16b5e // bfcvtn2 v30.8h, v26.4s\n"
+ "zip1 v26.4s, v13.4s, v22.4s\n"
+ "zip2 v13.4s, v13.4s, v22.4s\n"
+ "zip1 v22.4s, v7.4s, v26.4s\n"
+ ".inst 0x0ea16ad6 // bfcvtn v22.4h, v22.4s\n"
+ "zip2 v7.4s, v7.4s, v26.4s\n"
+ "ldr q26, [x20], #0x10\n"
+ ".inst 0x4ea168f6 // bfcvtn2 v22.8h, v7.4s\n"
+ "zip1 v7.4s, v18.4s, v13.4s\n"
+ ".inst 0x0ea168e7 // bfcvtn v7.4h, v7.4s\n"
+ "zip2 v13.4s, v18.4s, v13.4s\n"
+ "ldr q18, [x20], #0x10\n"
+ ".inst 0x4ea169a7 // bfcvtn2 v7.8h, v13.4s\n"
+ "ldr q13, [x20], #0x10\n"
+ "str q16, [x27, #0x0]\n"
+ "zip1 v16.4s, v14.4s, v8.4s\n"
+ "zip2 v8.4s, v14.4s, v8.4s\n"
+ "str q9, [x27, #0x10]\n"
+ "zip1 v9.4s, v21.4s, v29.4s\n"
+ "str q25, [x27, #0x20]\n"
+ "zip1 v25.4s, v16.4s, v9.4s\n"
+ "str q11, [x27, #0x30]\n"
+ ".inst 0x0ea16b2e // bfcvtn v14.4h, v25.4s\n"
+ "str q3, [x27, #0x40]\n"
+ "zip2 v25.4s, v16.4s, v9.4s\n"
+ "str q10, [x27, #0x50]\n"
+ "zip2 v29.4s, v21.4s, v29.4s\n"
+ "str q4, [x27, #0x60]\n"
+ ".inst 0x4ea16b2e // bfcvtn2 v14.8h, v25.4s\n"
+ "str q27, [x27, #0x70]\n"
+ "zip1 v27.4s, v8.4s, v29.4s\n"
+ "str q28, [x27, #0x80]\n"
+ ".inst 0x0ea16b7b // bfcvtn v27.4h, v27.4s\n"
+ "str q6, [x27, #0x90]\n"
+ "zip2 v16.4s, v8.4s, v29.4s\n"
+ "str q0, [x27, #0xa0]\n"
+ "zip1 v0.4s, v20.4s, v15.4s\n"
+ "str q19, [x27, #0xb0]\n"
+ ".inst 0x4ea16a1b // bfcvtn2 v27.8h, v16.4s\n"
+ "str q31, [x27, #0xc0]\n"
+ "zip1 v29.4s, v12.4s, v26.4s\n"
+ "str q30, [x27, #0xd0]\n"
+ "zip1 v28.4s, v0.4s, v29.4s\n"
+ "str q22, [x27, #0xe0]\n"
+ ".inst 0x0ea16b83 // bfcvtn v3.4h, v28.4s\n"
+ "str q7, [x27, #0xf0]\n"
+ "zip2 v22.4s, v0.4s, v29.4s\n"
+ "str q14, [x27, #0x100]\n"
+ "zip2 v19.4s, v20.4s, v15.4s\n"
+ "str q27, [x27, #0x110]\n"
+ ".inst 0x4ea16ac3 // bfcvtn2 v3.8h, v22.4s\n"
+ "str q3, [x27, #0x120]\n"
+ "zip2 v4.4s, v12.4s, v26.4s\n"
+ "zip1 v20.4s, v24.4s, v1.4s\n"
+ "zip1 v22.4s, v19.4s, v4.4s\n"
+ ".inst 0x0ea16ad9 // bfcvtn v25.4h, v22.4s\n"
+ "zip2 v6.4s, v19.4s, v4.4s\n"
+ "zip1 v22.4s, v17.4s, v18.4s\n"
+ ".inst 0x4ea168d9 // bfcvtn2 v25.8h, v6.4s\n"
+ "str q25, [x27, #0x130]\n"
+ "zip1 v3.4s, v20.4s, v22.4s\n"
+ "zip2 v22.4s, v20.4s, v22.4s\n"
+ ".inst 0x0ea16864 // bfcvtn v4.4h, v3.4s\n"
+ "zip2 v15.4s, v24.4s, v1.4s\n"
+ "zip2 v17.4s, v17.4s, v18.4s\n"
+ ".inst 0x4ea16ac4 // bfcvtn2 v4.8h, v22.4s\n"
+ "str q4, [x27, #0x140]\n"
+ "zip1 v16.4s, v15.4s, v17.4s\n"
+ "zip2 v8.4s, v15.4s, v17.4s\n"
+ ".inst 0x0ea16a10 // bfcvtn v16.4h, v16.4s\n"
+ "zip1 v18.4s, v23.4s, v2.4s\n"
+ "zip1 v17.4s, v5.4s, v13.4s\n"
+ ".inst 0x4ea16910 // bfcvtn2 v16.8h, v8.4s\n"
+ "str q16, [x27, #0x150]\n"
+ "zip1 v16.4s, v18.4s, v17.4s\n"
+ "zip2 v10.4s, v18.4s, v17.4s\n"
+ ".inst 0x0ea16a10 // bfcvtn v16.4h, v16.4s\n"
+ "zip2 v18.4s, v23.4s, v2.4s\n"
+ "zip2 v17.4s, v5.4s, v13.4s\n"
+ ".inst 0x4ea16950 // bfcvtn2 v16.8h, v10.4s\n"
+ "str q16, [x27, #0x160]\n"
+ "zip1 v16.4s, v18.4s, v17.4s\n"
+ "zip2 v17.4s, v18.4s, v17.4s\n"
+ ".inst 0x0ea16a10 // bfcvtn v16.4h, v16.4s\n"
+ ".inst 0x4ea16a30 // bfcvtn2 v16.8h, v17.4s\n"
+ "str q16, [x27, #0x170]\n"
+ "add x27, x27, %x[out_stride]\n"
"bge 2b\n"
"3:" // Main row loop: Column loop skip
- "cmp x24, #0x10\n"
+ "cmp x19, #0x10\n"
"blt 5f\n"
"4:" // Main row loop: width 16 loop: loop
- "ldr q9, [x9], #0x10\n"
- "ldr q18, [x28], #0x10\n"
- "sub x24, x24, #0x10\n"
- "cmp x24, #0x10\n"
- "ldr q15, [x27], #0x10\n"
- "ldr q17, [x26], #0x10\n"
- "zip1 v14.4s, v9.4s, v15.4s\n"
- "zip1 v11.4s, v18.4s, v17.4s\n"
- "ldr q7, [x25], #0x10\n"
- "ldr q16, [x23], #0x10\n"
- "zip2 v12.4s, v9.4s, v15.4s\n"
- "zip2 v6.4s, v18.4s, v17.4s\n"
- "ldr q15, [x22], #0x10\n"
- "ldr q3, [x20], #0x10\n"
- "zip1 v30.4s, v7.4s, v15.4s\n"
- "zip1 v20.4s, v16.4s, v3.4s\n"
- "ldr q17, [x9], #0x10\n"
- "ldr q9, [x28], #0x10\n"
- "zip2 v1.4s, v7.4s, v15.4s\n"
- "zip2 v24.4s, v16.4s, v3.4s\n"
- "ldr q10, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v0.4s, v17.4s, v10.4s\n"
- "zip1 v8.4s, v9.4s, v16.4s\n"
- "ldr q7, [x25], #0x10\n"
- "ldr q2, [x23], #0x10\n"
- "zip2 v17.4s, v17.4s, v10.4s\n"
- "zip2 v3.4s, v9.4s, v16.4s\n"
- "ldr q9, [x22], #0x10\n"
- "ldr q10, [x20], #0x10\n"
- "zip1 v25.4s, v7.4s, v9.4s\n"
- "zip1 v23.4s, v2.4s, v10.4s\n"
- "ldr q31, [x9], #0x10\n"
- "ldr q21, [x28], #0x10\n"
- "zip2 v16.4s, v7.4s, v9.4s\n"
- "zip2 v27.4s, v2.4s, v10.4s\n"
- "ldr q26, [x27], #0x10\n"
+ "ldr q17, [x28], #0x10\n"
+ "sub x19, x19, #0x10\n"
"ldr q19, [x26], #0x10\n"
- "zip1 v2.4s, v31.4s, v26.4s\n"
- "zip1 v7.4s, v21.4s, v19.4s\n"
- "ldr q29, [x25], #0x10\n"
- "ldr q13, [x23], #0x10\n"
- "zip2 v31.4s, v31.4s, v26.4s\n"
- "zip2 v19.4s, v21.4s, v19.4s\n"
- "ldr q4, [x22], #0x10\n"
- "ldr q18, [x20], #0x10\n"
- "zip1 v26.4s, v29.4s, v4.4s\n"
- "zip1 v15.4s, v13.4s, v18.4s\n"
- "ldr q9, [x9], #0x10\n"
- "ldr q22, [x28], #0x10\n"
- "zip2 v4.4s, v29.4s, v4.4s\n"
- "zip2 v18.4s, v13.4s, v18.4s\n"
- "ldr q29, [x27], #0x10\n"
+ "cmp x19, #0x10\n"
+ "ldr q16, [x25], #0x10\n"
+ "zip1 v24.4s, v17.4s, v16.4s\n"
+ "ldr q18, [x28], #0x10\n"
+ "zip2 v23.4s, v17.4s, v16.4s\n"
+ "ldr q22, [x26], #0x10\n"
+ "ldr q16, [x25], #0x10\n"
+ "zip1 v21.4s, v18.4s, v16.4s\n"
+ "ldr q17, [x28], #0x10\n"
+ "zip2 v14.4s, v18.4s, v16.4s\n"
+ "ldr q13, [x26], #0x10\n"
+ "ldr q16, [x25], #0x10\n"
+ "zip1 v12.4s, v17.4s, v16.4s\n"
+ "ldr q18, [x28], #0x10\n"
+ "zip2 v11.4s, v17.4s, v16.4s\n"
"ldr q10, [x26], #0x10\n"
- "zip1 v21.4s, v9.4s, v29.4s\n"
- "zip1 v5.4s, v22.4s, v10.4s\n"
- "ldr q28, [x25], #0x10\n"
- "ldr q13, [x23], #0x10\n"
- "zip2 v29.4s, v9.4s, v29.4s\n"
- "zip2 v9.4s, v22.4s, v10.4s\n"
- "ldr q22, [x22], #0x10\n"
- "zip1 v10.4s, v28.4s, v22.4s\n"
- "zip2 v28.4s, v28.4s, v22.4s\n"
- "zip1 v22.4s, v14.4s, v11.4s\n"
- ".inst 0x0ea16ad6 // bfcvtn v22.4h, v22.4s\n"
- "zip2 v11.4s, v14.4s, v11.4s\n"
- "ldr q14, [x20], #0x10\n"
- ".inst 0x4ea16976 // bfcvtn2 v22.8h, v11.4s\n"
- "str q22, [x21, #0x0]\n"
- "zip1 v22.4s, v13.4s, v14.4s\n"
- "zip2 v14.4s, v13.4s, v14.4s\n"
- "zip1 v13.4s, v12.4s, v6.4s\n"
- "zip1 v11.4s, v0.4s, v8.4s\n"
- ".inst 0x0ea169ad // bfcvtn v13.4h, v13.4s\n"
- "zip2 v12.4s, v12.4s, v6.4s\n"
- "zip1 v6.4s, v17.4s, v3.4s\n"
- ".inst 0x0ea1696b // bfcvtn v11.4h, v11.4s\n"
- "zip2 v0.4s, v0.4s, v8.4s\n"
- "zip1 v8.4s, v2.4s, v7.4s\n"
- ".inst 0x0ea168c6 // bfcvtn v6.4h, v6.4s\n"
- "zip2 v3.4s, v17.4s, v3.4s\n"
- "zip1 v17.4s, v31.4s, v19.4s\n"
- ".inst 0x0ea16908 // bfcvtn v8.4h, v8.4s\n"
- "zip2 v2.4s, v2.4s, v7.4s\n"
- "zip1 v7.4s, v21.4s, v5.4s\n"
- ".inst 0x0ea16a31 // bfcvtn v17.4h, v17.4s\n"
- "zip2 v31.4s, v31.4s, v19.4s\n"
- "zip1 v19.4s, v29.4s, v9.4s\n"
- ".inst 0x0ea168e7 // bfcvtn v7.4h, v7.4s\n"
- "zip2 v21.4s, v21.4s, v5.4s\n"
- "zip1 v5.4s, v30.4s, v20.4s\n"
- ".inst 0x0ea16a73 // bfcvtn v19.4h, v19.4s\n"
- "zip2 v29.4s, v29.4s, v9.4s\n"
- "zip1 v9.4s, v1.4s, v24.4s\n"
- ".inst 0x0ea168a5 // bfcvtn v5.4h, v5.4s\n"
- "zip2 v20.4s, v30.4s, v20.4s\n"
- "zip1 v30.4s, v25.4s, v23.4s\n"
- ".inst 0x0ea16929 // bfcvtn v9.4h, v9.4s\n"
- "zip2 v1.4s, v1.4s, v24.4s\n"
- "zip1 v24.4s, v16.4s, v27.4s\n"
- ".inst 0x0ea16bde // bfcvtn v30.4h, v30.4s\n"
- "zip2 v23.4s, v25.4s, v23.4s\n"
- "zip1 v25.4s, v26.4s, v15.4s\n"
- ".inst 0x0ea16b18 // bfcvtn v24.4h, v24.4s\n"
- "zip2 v27.4s, v16.4s, v27.4s\n"
- "zip1 v16.4s, v4.4s, v18.4s\n"
- ".inst 0x0ea16b39 // bfcvtn v25.4h, v25.4s\n"
- "zip2 v15.4s, v26.4s, v15.4s\n"
- "zip1 v26.4s, v10.4s, v22.4s\n"
+ "ldr q17, [x25], #0x10\n"
+ "zip1 v9.4s, v18.4s, v17.4s\n"
+ "ldr q16, [x24], #0x10\n"
+ "zip2 v8.4s, v18.4s, v17.4s\n"
+ "ldr q7, [x23], #0x10\n"
+ "ldr q6, [x22], #0x10\n"
+ "zip1 v17.4s, v19.4s, v16.4s\n"
+ "ldr q20, [x24], #0x10\n"
+ "zip2 v19.4s, v19.4s, v16.4s\n"
+ "ldr q5, [x23], #0x10\n"
+ "zip1 v16.4s, v24.4s, v17.4s\n"
+ "ldr q4, [x22], #0x10\n"
+ ".inst 0x0ea16a03 // bfcvtn v3.4h, v16.4s\n"
+ "ldr q2, [x21], #0x10\n"
+ "zip2 v17.4s, v24.4s, v17.4s\n"
+ "ldr q1, [x24], #0x10\n"
+ "zip1 v16.4s, v23.4s, v19.4s\n"
+ "ldr q0, [x23], #0x10\n"
+ ".inst 0x4ea16a23 // bfcvtn2 v3.8h, v17.4s\n"
+ "ldr q31, [x22], #0x10\n"
+ ".inst 0x0ea16a12 // bfcvtn v18.4h, v16.4s\n"
+ "ldr q30, [x21], #0x10\n"
+ "zip2 v16.4s, v23.4s, v19.4s\n"
+ "ldr q29, [x24], #0x10\n"
+ "zip1 v17.4s, v22.4s, v20.4s\n"
+ "ldr q28, [x23], #0x10\n"
+ ".inst 0x4ea16a12 // bfcvtn2 v18.8h, v16.4s\n"
+ "ldr q27, [x22], #0x10\n"
+ "zip1 v16.4s, v21.4s, v17.4s\n"
+ "ldr q26, [x21], #0x10\n"
+ ".inst 0x0ea16a19 // bfcvtn v25.4h, v16.4s\n"
+ "ldr q24, [x20], #0x10\n"
+ "zip2 v16.4s, v21.4s, v17.4s\n"
+ "ldr q23, [x21], #0x10\n"
+ ".inst 0x4ea16a19 // bfcvtn2 v25.8h, v16.4s\n"
+ "zip2 v17.4s, v22.4s, v20.4s\n"
+ "ldr q22, [x20], #0x10\n"
+ "zip1 v16.4s, v14.4s, v17.4s\n"
+ "ldr q21, [x20], #0x10\n"
+ ".inst 0x0ea16a13 // bfcvtn v19.4h, v16.4s\n"
+ "zip2 v16.4s, v14.4s, v17.4s\n"
+ "ldr q20, [x20], #0x10\n"
+ ".inst 0x4ea16a13 // bfcvtn2 v19.8h, v16.4s\n"
+ "zip1 v17.4s, v13.4s, v1.4s\n"
+ "str q3, [x27, #0x0]\n"
+ "zip1 v16.4s, v12.4s, v17.4s\n"
+ "str q18, [x27, #0x10]\n"
+ ".inst 0x0ea16a12 // bfcvtn v18.4h, v16.4s\n"
+ "str q25, [x27, #0x20]\n"
+ "zip2 v16.4s, v12.4s, v17.4s\n"
+ "str q19, [x27, #0x30]\n"
+ "zip2 v17.4s, v13.4s, v1.4s\n"
+ ".inst 0x4ea16a12 // bfcvtn2 v18.8h, v16.4s\n"
+ "str q18, [x27, #0x40]\n"
+ "zip1 v16.4s, v11.4s, v17.4s\n"
+ "zip2 v19.4s, v11.4s, v17.4s\n"
+ ".inst 0x0ea16a12 // bfcvtn v18.4h, v16.4s\n"
+ "zip1 v17.4s, v10.4s, v29.4s\n"
+ "zip1 v16.4s, v9.4s, v17.4s\n"
+ ".inst 0x4ea16a72 // bfcvtn2 v18.8h, v19.4s\n"
+ "str q18, [x27, #0x50]\n"
+ ".inst 0x0ea16a13 // bfcvtn v19.4h, v16.4s\n"
+ "zip2 v18.4s, v9.4s, v17.4s\n"
+ "zip2 v17.4s, v10.4s, v29.4s\n"
+ "zip1 v16.4s, v8.4s, v17.4s\n"
+ ".inst 0x4ea16a53 // bfcvtn2 v19.8h, v18.4s\n"
+ "str q19, [x27, #0x60]\n"
+ ".inst 0x0ea16a13 // bfcvtn v19.4h, v16.4s\n"
+ "zip2 v16.4s, v8.4s, v17.4s\n"
+ "zip1 v18.4s, v7.4s, v2.4s\n"
+ "zip1 v17.4s, v6.4s, v24.4s\n"
+ ".inst 0x4ea16a13 // bfcvtn2 v19.8h, v16.4s\n"
+ "str q19, [x27, #0x70]\n"
+ "zip1 v16.4s, v18.4s, v17.4s\n"
+ "zip2 v19.4s, v18.4s, v17.4s\n"
".inst 0x0ea16a10 // bfcvtn v16.4h, v16.4s\n"
- "zip2 v18.4s, v4.4s, v18.4s\n"
- "zip1 v4.4s, v28.4s, v14.4s\n"
- ".inst 0x0ea16b5a // bfcvtn v26.4h, v26.4s\n"
- "zip2 v10.4s, v10.4s, v22.4s\n"
- ".inst 0x0ea16896 // bfcvtn v22.4h, v4.4s\n"
- "zip2 v4.4s, v28.4s, v14.4s\n"
- ".inst 0x4ea1698d // bfcvtn2 v13.8h, v12.4s\n"
- "str q13, [x21, #0x10]\n"
- ".inst 0x4ea1680b // bfcvtn2 v11.8h, v0.4s\n"
- ".inst 0x4ea16866 // bfcvtn2 v6.8h, v3.4s\n"
- "str q11, [x21, #0x20]\n"
- ".inst 0x4ea16848 // bfcvtn2 v8.8h, v2.4s\n"
- ".inst 0x4ea16bf1 // bfcvtn2 v17.8h, v31.4s\n"
- "str q6, [x21, #0x30]\n"
- ".inst 0x4ea16aa7 // bfcvtn2 v7.8h, v21.4s\n"
- ".inst 0x4ea16bb3 // bfcvtn2 v19.8h, v29.4s\n"
- "str q8, [x21, #0x40]\n"
- ".inst 0x4ea16a85 // bfcvtn2 v5.8h, v20.4s\n"
- ".inst 0x4ea16829 // bfcvtn2 v9.8h, v1.4s\n"
- "str q17, [x21, #0x50]\n"
- ".inst 0x4ea16afe // bfcvtn2 v30.8h, v23.4s\n"
- ".inst 0x4ea16b78 // bfcvtn2 v24.8h, v27.4s\n"
- "str q7, [x21, #0x60]\n"
- ".inst 0x4ea169f9 // bfcvtn2 v25.8h, v15.4s\n"
- ".inst 0x4ea16a50 // bfcvtn2 v16.8h, v18.4s\n"
- "str q19, [x21, #0x70]\n"
- ".inst 0x4ea1695a // bfcvtn2 v26.8h, v10.4s\n"
- ".inst 0x4ea16896 // bfcvtn2 v22.8h, v4.4s\n"
- "str q5, [x21, #0xc0]\n"
- "str q9, [x21, #0xd0]\n"
- "str q30, [x21, #0xe0]\n"
- "str q24, [x21, #0xf0]\n"
- "str q25, [x21, #0x100]\n"
- "str q16, [x21, #0x110]\n"
- "str q26, [x21, #0x120]\n"
- "str q22, [x21, #0x130]\n"
- "add x21, x21, #0x80\n"
+ "zip2 v18.4s, v7.4s, v2.4s\n"
+ "zip2 v17.4s, v6.4s, v24.4s\n"
+ ".inst 0x4ea16a70 // bfcvtn2 v16.8h, v19.4s\n"
+ "str q16, [x27, #0xc0]\n"
+ "zip1 v16.4s, v18.4s, v17.4s\n"
+ "zip2 v19.4s, v18.4s, v17.4s\n"
+ ".inst 0x0ea16a10 // bfcvtn v16.4h, v16.4s\n"
+ "zip1 v18.4s, v5.4s, v30.4s\n"
+ "zip1 v17.4s, v4.4s, v22.4s\n"
+ ".inst 0x4ea16a70 // bfcvtn2 v16.8h, v19.4s\n"
+ "str q16, [x27, #0xd0]\n"
+ "zip1 v16.4s, v18.4s, v17.4s\n"
+ "zip2 v19.4s, v18.4s, v17.4s\n"
+ ".inst 0x0ea16a10 // bfcvtn v16.4h, v16.4s\n"
+ "zip2 v18.4s, v5.4s, v30.4s\n"
+ "zip2 v17.4s, v4.4s, v22.4s\n"
+ ".inst 0x4ea16a70 // bfcvtn2 v16.8h, v19.4s\n"
+ "str q16, [x27, #0xe0]\n"
+ "zip1 v16.4s, v18.4s, v17.4s\n"
+ "zip2 v19.4s, v18.4s, v17.4s\n"
+ ".inst 0x0ea16a10 // bfcvtn v16.4h, v16.4s\n"
+ "zip1 v18.4s, v0.4s, v26.4s\n"
+ "zip1 v17.4s, v31.4s, v21.4s\n"
+ ".inst 0x4ea16a70 // bfcvtn2 v16.8h, v19.4s\n"
+ "str q16, [x27, #0xf0]\n"
+ "zip1 v16.4s, v18.4s, v17.4s\n"
+ "zip2 v19.4s, v18.4s, v17.4s\n"
+ ".inst 0x0ea16a10 // bfcvtn v16.4h, v16.4s\n"
+ "zip2 v18.4s, v0.4s, v26.4s\n"
+ "zip2 v17.4s, v31.4s, v21.4s\n"
+ ".inst 0x4ea16a70 // bfcvtn2 v16.8h, v19.4s\n"
+ "str q16, [x27, #0x100]\n"
+ "zip1 v16.4s, v18.4s, v17.4s\n"
+ "zip2 v19.4s, v18.4s, v17.4s\n"
+ ".inst 0x0ea16a10 // bfcvtn v16.4h, v16.4s\n"
+ "zip1 v18.4s, v28.4s, v23.4s\n"
+ "zip1 v17.4s, v27.4s, v20.4s\n"
+ ".inst 0x4ea16a70 // bfcvtn2 v16.8h, v19.4s\n"
+ "str q16, [x27, #0x110]\n"
+ "zip1 v16.4s, v18.4s, v17.4s\n"
+ "zip2 v19.4s, v18.4s, v17.4s\n"
+ ".inst 0x0ea16a10 // bfcvtn v16.4h, v16.4s\n"
+ "zip2 v18.4s, v28.4s, v23.4s\n"
+ "zip2 v17.4s, v27.4s, v20.4s\n"
+ ".inst 0x4ea16a70 // bfcvtn2 v16.8h, v19.4s\n"
+ "str q16, [x27, #0x120]\n"
+ "zip1 v16.4s, v18.4s, v17.4s\n"
+ "zip2 v17.4s, v18.4s, v17.4s\n"
+ ".inst 0x0ea16a10 // bfcvtn v16.4h, v16.4s\n"
+ ".inst 0x4ea16a30 // bfcvtn2 v16.8h, v17.4s\n"
+ "str q16, [x27, #0x130]\n"
+ "add x27, x27, #0x80\n"
"bge 4b\n"
"5:" // Main row loop: width 16 loop: skip
- "cmp x24, #0x4\n"
+ "cmp x19, #0x4\n"
"blt 7f\n"
"6:" // Main row loop: width 4 loop: loop
- "ldr q23, [x9], #0x10\n"
"ldr q20, [x28], #0x10\n"
- "sub x24, x24, #0x4\n"
- "cmp x24, #0x4\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v22.4s, v23.4s, v17.4s\n"
- "zip1 v21.4s, v20.4s, v16.4s\n"
- "ldr q19, [x25], #0x10\n"
- "ldr q18, [x23], #0x10\n"
- "zip2 v28.4s, v23.4s, v17.4s\n"
- "zip2 v20.4s, v20.4s, v16.4s\n"
- "ldr q17, [x22], #0x10\n"
- "ldr q16, [x20], #0x10\n"
- "zip1 v27.4s, v19.4s, v17.4s\n"
- "zip1 v26.4s, v18.4s, v16.4s\n"
- "zip2 v25.4s, v19.4s, v17.4s\n"
- "zip2 v24.4s, v18.4s, v16.4s\n"
- "zip1 v19.4s, v22.4s, v21.4s\n"
- "zip1 v18.4s, v28.4s, v20.4s\n"
- "zip1 v17.4s, v27.4s, v26.4s\n"
- "zip1 v16.4s, v25.4s, v24.4s\n"
- ".inst 0x0ea16a77 // bfcvtn v23.4h, v19.4s\n"
- "zip2 v22.4s, v22.4s, v21.4s\n"
- ".inst 0x0ea16a55 // bfcvtn v21.4h, v18.4s\n"
- "zip2 v20.4s, v28.4s, v20.4s\n"
- ".inst 0x0ea16a33 // bfcvtn v19.4h, v17.4s\n"
- "zip2 v18.4s, v27.4s, v26.4s\n"
- ".inst 0x0ea16a11 // bfcvtn v17.4h, v16.4s\n"
- "zip2 v16.4s, v25.4s, v24.4s\n"
- ".inst 0x4ea16ad7 // bfcvtn2 v23.8h, v22.4s\n"
- ".inst 0x4ea16a95 // bfcvtn2 v21.8h, v20.4s\n"
- "str q23, [x21, #0x0]\n"
- ".inst 0x4ea16a53 // bfcvtn2 v19.8h, v18.4s\n"
- ".inst 0x4ea16a11 // bfcvtn2 v17.8h, v16.4s\n"
- "str q21, [x21, #0x10]\n"
- "str q19, [x21, #0xc0]\n"
- "str q17, [x21, #0xd0]\n"
- "add x21, x21, #0x20\n"
+ "sub x19, x19, #0x4\n"
+ "ldr q18, [x26], #0x10\n"
+ "cmp x19, #0x4\n"
+ "ldr q17, [x25], #0x10\n"
+ "zip1 v19.4s, v20.4s, v17.4s\n"
+ "ldr q16, [x24], #0x10\n"
+ "zip2 v25.4s, v20.4s, v17.4s\n"
+ "ldr q24, [x23], #0x10\n"
+ "ldr q23, [x22], #0x10\n"
+ "zip1 v17.4s, v18.4s, v16.4s\n"
+ "ldr q22, [x21], #0x10\n"
+ "zip2 v21.4s, v18.4s, v16.4s\n"
+ "ldr q20, [x20], #0x10\n"
+ "zip1 v16.4s, v19.4s, v17.4s\n"
+ ".inst 0x0ea16a12 // bfcvtn v18.4h, v16.4s\n"
+ "zip2 v17.4s, v19.4s, v17.4s\n"
+ "zip1 v16.4s, v25.4s, v21.4s\n"
+ ".inst 0x4ea16a32 // bfcvtn2 v18.8h, v17.4s\n"
+ "str q18, [x27, #0x0]\n"
+ ".inst 0x0ea16a13 // bfcvtn v19.4h, v16.4s\n"
+ "zip2 v16.4s, v25.4s, v21.4s\n"
+ "zip1 v18.4s, v24.4s, v22.4s\n"
+ "zip1 v17.4s, v23.4s, v20.4s\n"
+ ".inst 0x4ea16a13 // bfcvtn2 v19.8h, v16.4s\n"
+ "str q19, [x27, #0x10]\n"
+ "zip1 v16.4s, v18.4s, v17.4s\n"
+ "zip2 v19.4s, v18.4s, v17.4s\n"
+ ".inst 0x0ea16a10 // bfcvtn v16.4h, v16.4s\n"
+ "zip2 v18.4s, v24.4s, v22.4s\n"
+ "zip2 v17.4s, v23.4s, v20.4s\n"
+ ".inst 0x4ea16a70 // bfcvtn2 v16.8h, v19.4s\n"
+ "str q16, [x27, #0xc0]\n"
+ "zip1 v16.4s, v18.4s, v17.4s\n"
+ "zip2 v17.4s, v18.4s, v17.4s\n"
+ ".inst 0x0ea16a10 // bfcvtn v16.4h, v16.4s\n"
+ ".inst 0x4ea16a30 // bfcvtn2 v16.8h, v17.4s\n"
+ "str q16, [x27, #0xd0]\n"
+ "add x27, x27, #0x20\n"
"bge 6b\n"
"7:" // Main row loop: width 4 loop: skip
- "cmp x24, #0x1\n"
+ "cmp x19, #0x1\n"
"blt 9f\n"
"8:" // Main row loop: width 1 loop: loop
- "ldr s19, [x9], #0x4\n"
"ldr s18, [x28], #0x4\n"
- "sub x24, x24, #0x1\n"
- "cmp x24, #0x1\n"
- "ldr s17, [x27], #0x4\n"
- "ldr s16, [x26], #0x4\n"
- "zip1 v17.4s, v19.4s, v17.4s\n"
- "zip1 v16.4s, v18.4s, v16.4s\n"
- "ldr s20, [x25], #0x4\n"
- "ldr s19, [x23], #0x4\n"
+ "sub x19, x19, #0x1\n"
+ "ldr s17, [x26], #0x4\n"
+ "cmp x19, #0x1\n"
+ "ldr s16, [x25], #0x4\n"
+ "zip1 v18.4s, v18.4s, v16.4s\n"
+ "ldr s16, [x24], #0x4\n"
+ "ldr s20, [x23], #0x4\n"
"zip1 v16.4s, v17.4s, v16.4s\n"
- ".inst 0x0ea16a12 // bfcvtn v18.4h, v16.4s\n"
- "ldr s17, [x22], #0x4\n"
- "ldr s16, [x20], #0x4\n"
+ "ldr s19, [x22], #0x4\n"
+ "ldr s17, [x21], #0x4\n"
+ "zip1 v16.4s, v18.4s, v16.4s\n"
+ "ldr s18, [x20], #0x4\n"
+ ".inst 0x0ea16a10 // bfcvtn v16.4h, v16.4s\n"
"zip1 v17.4s, v20.4s, v17.4s\n"
- "zip1 v16.4s, v19.4s, v16.4s\n"
+ "str d16, [x27, #0x0]\n"
+ "zip1 v16.4s, v19.4s, v18.4s\n"
"zip1 v16.4s, v17.4s, v16.4s\n"
".inst 0x0ea16a10 // bfcvtn v16.4h, v16.4s\n"
- "str d18, [x21, #0x0]\n"
- "str d16, [x21, #0xc0]\n"
- "add x21, x21, #0x8\n"
+ "str d16, [x27, #0xc0]\n"
+ "add x27, x27, #0x8\n"
"bge 8b\n"
"9:" // Main row loop: width 1 loop: skip
- "cmp %x[height], #0x8\n"
"add %x[out], %x[out], #0x180\n"
+ "cmp %x[height], #0x8\n"
"bge 1b\n"
"cbz %x[height], 20f\n"
"10:" // Main loop skip
+
"11:" // Tail row loop: Head
- "mov x9, %x[in]\n"
- "add x28, x9, %x[in_stride]\n"
- "add x27, x28, %x[in_stride]\n"
- "mov x20, %x[width]\n"
- "add x26, x27, %x[in_stride]\n"
+ "mov x28, %x[in]\n"
+ "mov x27, %x[out]\n"
+ "add x26, x28, %x[in_stride]\n"
+ "add x25, x26, %x[in_stride]\n"
+ "add x24, x25, %x[in_stride]\n"
+ "add %x[in], x24, %x[in_stride]\n"
"cmp %x[height], #0x3\n"
- "add %x[in], x26, %x[in_stride]\n"
- "csel x26, x26, %x[pad_row], GT\n"
- "csel x27, x27, %x[pad_row], GE\n"
+ "csel x24, x24, %x[pad_row], GT\n"
+ "csel x25, x25, %x[pad_row], GE\n"
"cmp %x[height], #0x1\n"
- "csel x28, x28, %x[pad_row], GT\n"
- "cmp x20, #0x18\n"
- "mov x21, %x[out]\n"
+ "csel x26, x26, %x[pad_row], GT\n"
"sub %x[height], %x[height], #0x4\n"
+ "mov x19, %x[width]\n"
+ "cmp x19, #0x18\n"
"blt 13f\n"
"12:" // Tail row loop: Column loop
- "ldr q20, [x9], #0x10\n"
+ "ldr q17, [x28], #0x10\n"
+ "sub x19, x19, #0x18\n"
+ "ldr q20, [x26], #0x10\n"
+ "cmp x19, #0x18\n"
+ "ldr q16, [x25], #0x10\n"
+ "zip1 v19.4s, v17.4s, v16.4s\n"
"ldr q18, [x28], #0x10\n"
- "sub x20, x20, #0x18\n"
- "cmp x20, #0x18\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v9.4s, v20.4s, v17.4s\n"
- "zip1 v30.4s, v18.4s, v16.4s\n"
- "ldr q21, [x9], #0x10\n"
- "ldr q19, [x28], #0x10\n"
- "zip2 v17.4s, v20.4s, v17.4s\n"
- "zip2 v5.4s, v18.4s, v16.4s\n"
- "ldr q18, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v0.4s, v21.4s, v18.4s\n"
- "zip1 v3.4s, v19.4s, v16.4s\n"
- "ldr q23, [x9], #0x10\n"
- "ldr q20, [x28], #0x10\n"
- "zip2 v1.4s, v21.4s, v18.4s\n"
- "zip2 v16.4s, v19.4s, v16.4s\n"
- "ldr q19, [x27], #0x10\n"
- "ldr q18, [x26], #0x10\n"
- "zip1 v4.4s, v23.4s, v19.4s\n"
- "zip1 v2.4s, v20.4s, v18.4s\n"
- "ldr q22, [x9], #0x10\n"
- "ldr q21, [x28], #0x10\n"
- "zip2 v27.4s, v23.4s, v19.4s\n"
- "zip2 v28.4s, v20.4s, v18.4s\n"
- "ldr q20, [x27], #0x10\n"
- "ldr q24, [x26], #0x10\n"
- "zip1 v25.4s, v22.4s, v20.4s\n"
- "zip1 v26.4s, v21.4s, v24.4s\n"
- "ldr q19, [x9], #0x10\n"
+ "zip2 v9.4s, v17.4s, v16.4s\n"
+ "ldr q8, [x26], #0x10\n"
+ "ldr q16, [x25], #0x10\n"
+ "zip1 v7.4s, v18.4s, v16.4s\n"
+ "ldr q17, [x28], #0x10\n"
+ "zip2 v6.4s, v18.4s, v16.4s\n"
+ "ldr q5, [x26], #0x10\n"
+ "ldr q16, [x25], #0x10\n"
+ "zip1 v4.4s, v17.4s, v16.4s\n"
"ldr q18, [x28], #0x10\n"
- "zip2 v14.4s, v22.4s, v20.4s\n"
- "zip2 v12.4s, v21.4s, v24.4s\n"
- "ldr q31, [x27], #0x10\n"
- "ldr q24, [x26], #0x10\n"
- "zip1 v15.4s, v19.4s, v31.4s\n"
- "zip1 v13.4s, v18.4s, v24.4s\n"
- "ldr q21, [x9], #0x10\n"
- "ldr q11, [x28], #0x10\n"
- "zip2 v20.4s, v19.4s, v31.4s\n"
- "zip2 v10.4s, v18.4s, v24.4s\n"
- "ldr q22, [x27], #0x10\n"
- "ldr q23, [x26], #0x10\n"
- "zip1 v19.4s, v21.4s, v22.4s\n"
- "zip1 v18.4s, v11.4s, v23.4s\n"
- "zip2 v6.4s, v21.4s, v22.4s\n"
- "zip2 v11.4s, v11.4s, v23.4s\n"
- "zip1 v8.4s, v9.4s, v30.4s\n"
- "zip1 v21.4s, v17.4s, v5.4s\n"
- "zip1 v7.4s, v0.4s, v3.4s\n"
- "zip1 v31.4s, v1.4s, v16.4s\n"
- "zip1 v29.4s, v4.4s, v2.4s\n"
- "zip1 v22.4s, v27.4s, v28.4s\n"
- "zip1 v24.4s, v25.4s, v26.4s\n"
- "zip1 v23.4s, v14.4s, v12.4s\n"
- ".inst 0x0ea16908 // bfcvtn v8.4h, v8.4s\n"
- "zip2 v9.4s, v9.4s, v30.4s\n"
- "zip1 v30.4s, v15.4s, v13.4s\n"
- ".inst 0x0ea16ab5 // bfcvtn v21.4h, v21.4s\n"
- "zip2 v5.4s, v17.4s, v5.4s\n"
- "zip1 v17.4s, v20.4s, v10.4s\n"
- ".inst 0x0ea168e7 // bfcvtn v7.4h, v7.4s\n"
- "zip2 v0.4s, v0.4s, v3.4s\n"
- "zip1 v3.4s, v19.4s, v18.4s\n"
- ".inst 0x0ea16bff // bfcvtn v31.4h, v31.4s\n"
- "zip2 v16.4s, v1.4s, v16.4s\n"
- "zip1 v1.4s, v6.4s, v11.4s\n"
- ".inst 0x0ea16bbd // bfcvtn v29.4h, v29.4s\n"
- "zip2 v2.4s, v4.4s, v2.4s\n"
- ".inst 0x0ea16ac4 // bfcvtn v4.4h, v22.4s\n"
- "zip2 v27.4s, v27.4s, v28.4s\n"
- ".inst 0x0ea16b1c // bfcvtn v28.4h, v24.4s\n"
- "zip2 v25.4s, v25.4s, v26.4s\n"
- ".inst 0x0ea16afa // bfcvtn v26.4h, v23.4s\n"
- "zip2 v14.4s, v14.4s, v12.4s\n"
- ".inst 0x0ea16bd8 // bfcvtn v24.4h, v30.4s\n"
- "zip2 v13.4s, v15.4s, v13.4s\n"
- ".inst 0x0ea16a2f // bfcvtn v15.4h, v17.4s\n"
- "zip2 v12.4s, v20.4s, v10.4s\n"
- ".inst 0x0ea16874 // bfcvtn v20.4h, v3.4s\n"
- "zip2 v10.4s, v19.4s, v18.4s\n"
- ".inst 0x0ea16831 // bfcvtn v17.4h, v1.4s\n"
- "zip2 v18.4s, v6.4s, v11.4s\n"
- ".inst 0x4ea16928 // bfcvtn2 v8.8h, v9.4s\n"
- ".inst 0x4ea168b5 // bfcvtn2 v21.8h, v5.4s\n"
- "str q8, [x21, #0x0]\n"
- ".inst 0x4ea16807 // bfcvtn2 v7.8h, v0.4s\n"
- ".inst 0x4ea16a1f // bfcvtn2 v31.8h, v16.4s\n"
- "str q21, [x21, #0x10]\n"
- ".inst 0x4ea1685d // bfcvtn2 v29.8h, v2.4s\n"
- ".inst 0x4ea16b64 // bfcvtn2 v4.8h, v27.4s\n"
- "str q7, [x21, #0x20]\n"
- ".inst 0x4ea16b3c // bfcvtn2 v28.8h, v25.4s\n"
- ".inst 0x4ea169da // bfcvtn2 v26.8h, v14.4s\n"
- "str q31, [x21, #0x30]\n"
- ".inst 0x4ea169b8 // bfcvtn2 v24.8h, v13.4s\n"
- ".inst 0x4ea1698f // bfcvtn2 v15.8h, v12.4s\n"
- "str q29, [x21, #0x40]\n"
- ".inst 0x4ea16954 // bfcvtn2 v20.8h, v10.4s\n"
- ".inst 0x4ea16a51 // bfcvtn2 v17.8h, v18.4s\n"
- "str q4, [x21, #0x50]\n"
- "str q28, [x21, #0x60]\n"
- "str q26, [x21, #0x70]\n"
- "str q24, [x21, #0x80]\n"
- "str q15, [x21, #0x90]\n"
- "str q20, [x21, #0xa0]\n"
- "str q17, [x21, #0xb0]\n"
- "add x21, x21, %x[out_stride]\n"
+ "zip2 v3.4s, v17.4s, v16.4s\n"
+ "ldr q2, [x26], #0x10\n"
+ "ldr q16, [x25], #0x10\n"
+ "zip1 v1.4s, v18.4s, v16.4s\n"
+ "ldr q17, [x28], #0x10\n"
+ "zip2 v0.4s, v18.4s, v16.4s\n"
+ "ldr q31, [x26], #0x10\n"
+ "ldr q16, [x25], #0x10\n"
+ "zip1 v30.4s, v17.4s, v16.4s\n"
+ "ldr q18, [x28], #0x10\n"
+ "zip2 v29.4s, v17.4s, v16.4s\n"
+ "ldr q28, [x26], #0x10\n"
+ "ldr q17, [x25], #0x10\n"
+ "zip1 v27.4s, v18.4s, v17.4s\n"
+ "ldr q16, [x24], #0x10\n"
+ "zip2 v26.4s, v18.4s, v17.4s\n"
+ "ldr q25, [x24], #0x10\n"
+ "zip1 v17.4s, v20.4s, v16.4s\n"
+ "zip2 v24.4s, v20.4s, v16.4s\n"
+ "ldr q23, [x24], #0x10\n"
+ "zip1 v16.4s, v19.4s, v17.4s\n"
+ "zip2 v17.4s, v19.4s, v17.4s\n"
+ "ldr q22, [x24], #0x10\n"
+ ".inst 0x0ea16a13 // bfcvtn v19.4h, v16.4s\n"
+ "zip1 v16.4s, v9.4s, v24.4s\n"
+ "ldr q21, [x24], #0x10\n"
+ ".inst 0x4ea16a33 // bfcvtn2 v19.8h, v17.4s\n"
+ ".inst 0x0ea16a12 // bfcvtn v18.4h, v16.4s\n"
+ "ldr q20, [x24], #0x10\n"
+ "zip2 v16.4s, v9.4s, v24.4s\n"
+ "zip1 v17.4s, v8.4s, v25.4s\n"
+ "str q19, [x27, #0x0]\n"
+ ".inst 0x4ea16a12 // bfcvtn2 v18.8h, v16.4s\n"
+ "str q18, [x27, #0x10]\n"
+ "zip1 v16.4s, v7.4s, v17.4s\n"
+ "zip2 v19.4s, v7.4s, v17.4s\n"
+ ".inst 0x0ea16a12 // bfcvtn v18.4h, v16.4s\n"
+ "zip2 v17.4s, v8.4s, v25.4s\n"
+ "zip1 v16.4s, v6.4s, v17.4s\n"
+ ".inst 0x4ea16a72 // bfcvtn2 v18.8h, v19.4s\n"
+ "str q18, [x27, #0x20]\n"
+ ".inst 0x0ea16a13 // bfcvtn v19.4h, v16.4s\n"
+ "zip2 v18.4s, v6.4s, v17.4s\n"
+ "zip1 v17.4s, v5.4s, v23.4s\n"
+ "zip1 v16.4s, v4.4s, v17.4s\n"
+ ".inst 0x4ea16a53 // bfcvtn2 v19.8h, v18.4s\n"
+ "str q19, [x27, #0x30]\n"
+ ".inst 0x0ea16a13 // bfcvtn v19.4h, v16.4s\n"
+ "zip2 v18.4s, v4.4s, v17.4s\n"
+ "zip2 v17.4s, v5.4s, v23.4s\n"
+ "zip1 v16.4s, v3.4s, v17.4s\n"
+ ".inst 0x4ea16a53 // bfcvtn2 v19.8h, v18.4s\n"
+ "str q19, [x27, #0x40]\n"
+ ".inst 0x0ea16a13 // bfcvtn v19.4h, v16.4s\n"
+ "zip2 v18.4s, v3.4s, v17.4s\n"
+ "zip1 v17.4s, v2.4s, v22.4s\n"
+ "zip1 v16.4s, v1.4s, v17.4s\n"
+ ".inst 0x4ea16a53 // bfcvtn2 v19.8h, v18.4s\n"
+ "str q19, [x27, #0x50]\n"
+ ".inst 0x0ea16a13 // bfcvtn v19.4h, v16.4s\n"
+ "zip2 v18.4s, v1.4s, v17.4s\n"
+ "zip2 v17.4s, v2.4s, v22.4s\n"
+ "zip1 v16.4s, v0.4s, v17.4s\n"
+ ".inst 0x4ea16a53 // bfcvtn2 v19.8h, v18.4s\n"
+ "str q19, [x27, #0x60]\n"
+ ".inst 0x0ea16a13 // bfcvtn v19.4h, v16.4s\n"
+ "zip2 v18.4s, v0.4s, v17.4s\n"
+ "zip1 v17.4s, v31.4s, v21.4s\n"
+ "zip1 v16.4s, v30.4s, v17.4s\n"
+ ".inst 0x4ea16a53 // bfcvtn2 v19.8h, v18.4s\n"
+ "str q19, [x27, #0x70]\n"
+ ".inst 0x0ea16a13 // bfcvtn v19.4h, v16.4s\n"
+ "zip2 v18.4s, v30.4s, v17.4s\n"
+ "zip2 v17.4s, v31.4s, v21.4s\n"
+ "zip1 v16.4s, v29.4s, v17.4s\n"
+ ".inst 0x4ea16a53 // bfcvtn2 v19.8h, v18.4s\n"
+ "str q19, [x27, #0x80]\n"
+ ".inst 0x0ea16a13 // bfcvtn v19.4h, v16.4s\n"
+ "zip2 v18.4s, v29.4s, v17.4s\n"
+ "zip1 v17.4s, v28.4s, v20.4s\n"
+ "zip1 v16.4s, v27.4s, v17.4s\n"
+ ".inst 0x4ea16a53 // bfcvtn2 v19.8h, v18.4s\n"
+ "str q19, [x27, #0x90]\n"
+ ".inst 0x0ea16a13 // bfcvtn v19.4h, v16.4s\n"
+ "zip2 v17.4s, v27.4s, v17.4s\n"
+ "zip2 v18.4s, v28.4s, v20.4s\n"
+ "zip1 v16.4s, v26.4s, v18.4s\n"
+ ".inst 0x4ea16a33 // bfcvtn2 v19.8h, v17.4s\n"
+ "str q19, [x27, #0xa0]\n"
+ ".inst 0x0ea16a11 // bfcvtn v17.4h, v16.4s\n"
+ "zip2 v16.4s, v26.4s, v18.4s\n"
+ ".inst 0x4ea16a11 // bfcvtn2 v17.8h, v16.4s\n"
+ "str q17, [x27, #0xb0]\n"
+ "add x27, x27, %x[out_stride]\n"
"bge 12b\n"
"13:" // Tail row loop: Column loop skip
- "cmp x20, #0x10\n"
+ "cmp x19, #0x10\n"
"blt 15f\n"
"14:" // Tail row loop: width 16 loop: loop
- "ldr q21, [x9], #0x10\n"
- "ldr q20, [x28], #0x10\n"
- "sub x20, x20, #0x10\n"
- "cmp x20, #0x10\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v30.4s, v21.4s, v17.4s\n"
- "zip1 v29.4s, v20.4s, v16.4s\n"
- "ldr q19, [x9], #0x10\n"
+ "ldr q17, [x28], #0x10\n"
+ "sub x19, x19, #0x10\n"
+ "ldr q20, [x26], #0x10\n"
+ "cmp x19, #0x10\n"
+ "ldr q16, [x25], #0x10\n"
+ "zip1 v19.4s, v17.4s, v16.4s\n"
"ldr q18, [x28], #0x10\n"
- "zip2 v28.4s, v21.4s, v17.4s\n"
- "zip2 v27.4s, v20.4s, v16.4s\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v26.4s, v19.4s, v17.4s\n"
- "zip1 v25.4s, v18.4s, v16.4s\n"
- "ldr q21, [x9], #0x10\n"
- "ldr q20, [x28], #0x10\n"
- "zip2 v8.4s, v19.4s, v17.4s\n"
- "zip2 v24.4s, v18.4s, v16.4s\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v7.4s, v21.4s, v17.4s\n"
- "zip1 v6.4s, v20.4s, v16.4s\n"
- "ldr q19, [x9], #0x10\n"
+ "zip2 v1.4s, v17.4s, v16.4s\n"
+ "ldr q0, [x26], #0x10\n"
+ "ldr q16, [x25], #0x10\n"
+ "zip1 v31.4s, v18.4s, v16.4s\n"
+ "ldr q17, [x28], #0x10\n"
+ "zip2 v30.4s, v18.4s, v16.4s\n"
+ "ldr q29, [x26], #0x10\n"
+ "ldr q16, [x25], #0x10\n"
+ "zip1 v28.4s, v17.4s, v16.4s\n"
"ldr q18, [x28], #0x10\n"
- "zip2 v5.4s, v21.4s, v17.4s\n"
- "zip2 v4.4s, v20.4s, v16.4s\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v3.4s, v19.4s, v17.4s\n"
- "zip1 v2.4s, v18.4s, v16.4s\n"
- "zip2 v1.4s, v19.4s, v17.4s\n"
- "zip2 v0.4s, v18.4s, v16.4s\n"
- "zip1 v23.4s, v30.4s, v29.4s\n"
- "zip1 v22.4s, v28.4s, v27.4s\n"
- "zip1 v21.4s, v26.4s, v25.4s\n"
- "zip1 v20.4s, v8.4s, v24.4s\n"
- "zip1 v19.4s, v7.4s, v6.4s\n"
- "zip1 v18.4s, v5.4s, v4.4s\n"
- "zip1 v17.4s, v3.4s, v2.4s\n"
- "zip1 v16.4s, v1.4s, v0.4s\n"
- ".inst 0x0ea16aff // bfcvtn v31.4h, v23.4s\n"
- "zip2 v30.4s, v30.4s, v29.4s\n"
- ".inst 0x0ea16add // bfcvtn v29.4h, v22.4s\n"
- "zip2 v28.4s, v28.4s, v27.4s\n"
- ".inst 0x0ea16abb // bfcvtn v27.4h, v21.4s\n"
- "zip2 v26.4s, v26.4s, v25.4s\n"
- ".inst 0x0ea16a99 // bfcvtn v25.4h, v20.4s\n"
- "zip2 v24.4s, v8.4s, v24.4s\n"
- ".inst 0x0ea16a77 // bfcvtn v23.4h, v19.4s\n"
- "zip2 v22.4s, v7.4s, v6.4s\n"
- ".inst 0x0ea16a55 // bfcvtn v21.4h, v18.4s\n"
- "zip2 v20.4s, v5.4s, v4.4s\n"
- ".inst 0x0ea16a33 // bfcvtn v19.4h, v17.4s\n"
- "zip2 v18.4s, v3.4s, v2.4s\n"
+ "zip2 v27.4s, v17.4s, v16.4s\n"
+ "ldr q26, [x26], #0x10\n"
+ "ldr q17, [x25], #0x10\n"
+ "zip1 v25.4s, v18.4s, v17.4s\n"
+ "ldr q16, [x24], #0x10\n"
+ "zip2 v24.4s, v18.4s, v17.4s\n"
+ "ldr q23, [x24], #0x10\n"
+ "zip1 v17.4s, v20.4s, v16.4s\n"
+ "zip2 v22.4s, v20.4s, v16.4s\n"
+ "ldr q21, [x24], #0x10\n"
+ "zip1 v16.4s, v19.4s, v17.4s\n"
+ "zip2 v19.4s, v19.4s, v17.4s\n"
+ "ldr q20, [x24], #0x10\n"
".inst 0x0ea16a11 // bfcvtn v17.4h, v16.4s\n"
- "zip2 v16.4s, v1.4s, v0.4s\n"
- ".inst 0x4ea16bdf // bfcvtn2 v31.8h, v30.4s\n"
- ".inst 0x4ea16b9d // bfcvtn2 v29.8h, v28.4s\n"
- "str q31, [x21, #0x0]\n"
- ".inst 0x4ea16b5b // bfcvtn2 v27.8h, v26.4s\n"
- ".inst 0x4ea16b19 // bfcvtn2 v25.8h, v24.4s\n"
- "str q29, [x21, #0x10]\n"
- ".inst 0x4ea16ad7 // bfcvtn2 v23.8h, v22.4s\n"
- ".inst 0x4ea16a95 // bfcvtn2 v21.8h, v20.4s\n"
- "str q27, [x21, #0x20]\n"
+ "zip1 v16.4s, v1.4s, v22.4s\n"
+ ".inst 0x0ea16a12 // bfcvtn v18.4h, v16.4s\n"
+ "zip2 v16.4s, v1.4s, v22.4s\n"
+ ".inst 0x4ea16a71 // bfcvtn2 v17.8h, v19.4s\n"
+ "str q17, [x27, #0x0]\n"
+ ".inst 0x4ea16a12 // bfcvtn2 v18.8h, v16.4s\n"
+ "zip1 v17.4s, v0.4s, v23.4s\n"
+ "str q18, [x27, #0x10]\n"
+ "zip1 v16.4s, v31.4s, v17.4s\n"
+ "zip2 v19.4s, v31.4s, v17.4s\n"
+ ".inst 0x0ea16a12 // bfcvtn v18.4h, v16.4s\n"
+ "zip2 v17.4s, v0.4s, v23.4s\n"
+ "zip1 v16.4s, v30.4s, v17.4s\n"
+ ".inst 0x4ea16a72 // bfcvtn2 v18.8h, v19.4s\n"
+ "str q18, [x27, #0x20]\n"
+ ".inst 0x0ea16a13 // bfcvtn v19.4h, v16.4s\n"
+ "zip2 v18.4s, v30.4s, v17.4s\n"
+ "zip1 v17.4s, v29.4s, v21.4s\n"
+ "zip1 v16.4s, v28.4s, v17.4s\n"
+ ".inst 0x4ea16a53 // bfcvtn2 v19.8h, v18.4s\n"
+ "str q19, [x27, #0x30]\n"
+ ".inst 0x0ea16a13 // bfcvtn v19.4h, v16.4s\n"
+ "zip2 v18.4s, v28.4s, v17.4s\n"
+ "zip2 v17.4s, v29.4s, v21.4s\n"
+ "zip1 v16.4s, v27.4s, v17.4s\n"
".inst 0x4ea16a53 // bfcvtn2 v19.8h, v18.4s\n"
+ "str q19, [x27, #0x40]\n"
+ ".inst 0x0ea16a13 // bfcvtn v19.4h, v16.4s\n"
+ "zip2 v18.4s, v27.4s, v17.4s\n"
+ "zip1 v17.4s, v26.4s, v20.4s\n"
+ "zip1 v16.4s, v25.4s, v17.4s\n"
+ ".inst 0x4ea16a53 // bfcvtn2 v19.8h, v18.4s\n"
+ "str q19, [x27, #0x50]\n"
+ ".inst 0x0ea16a13 // bfcvtn v19.4h, v16.4s\n"
+ "zip2 v17.4s, v25.4s, v17.4s\n"
+ "zip2 v18.4s, v26.4s, v20.4s\n"
+ "zip1 v16.4s, v24.4s, v18.4s\n"
+ ".inst 0x4ea16a33 // bfcvtn2 v19.8h, v17.4s\n"
+ "str q19, [x27, #0x60]\n"
+ ".inst 0x0ea16a11 // bfcvtn v17.4h, v16.4s\n"
+ "zip2 v16.4s, v24.4s, v18.4s\n"
".inst 0x4ea16a11 // bfcvtn2 v17.8h, v16.4s\n"
- "str q25, [x21, #0x30]\n"
- "str q23, [x21, #0x40]\n"
- "str q21, [x21, #0x50]\n"
- "str q19, [x21, #0x60]\n"
- "str q17, [x21, #0x70]\n"
- "add x21, x21, #0x80\n"
+ "str q17, [x27, #0x70]\n"
+ "add x27, x27, #0x80\n"
"bge 14b\n"
"15:" // Tail row loop: width 16 loop: skip
- "cmp x20, #0x4\n"
+ "cmp x19, #0x4\n"
"blt 17f\n"
"16:" // Tail row loop: width 4 loop: loop
- "ldr q20, [x9], #0x10\n"
"ldr q19, [x28], #0x10\n"
- "sub x20, x20, #0x4\n"
- "cmp x20, #0x4\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v22.4s, v20.4s, v17.4s\n"
- "zip1 v18.4s, v19.4s, v16.4s\n"
- "zip2 v21.4s, v20.4s, v17.4s\n"
- "zip2 v20.4s, v19.4s, v16.4s\n"
- "zip1 v17.4s, v22.4s, v18.4s\n"
- "zip1 v16.4s, v21.4s, v20.4s\n"
- ".inst 0x0ea16a33 // bfcvtn v19.4h, v17.4s\n"
- "zip2 v18.4s, v22.4s, v18.4s\n"
+ "sub x19, x19, #0x4\n"
+ "ldr q18, [x26], #0x10\n"
+ "cmp x19, #0x4\n"
+ "ldr q17, [x25], #0x10\n"
+ "zip1 v21.4s, v19.4s, v17.4s\n"
+ "ldr q16, [x24], #0x10\n"
+ "zip2 v20.4s, v19.4s, v17.4s\n"
+ "zip1 v17.4s, v18.4s, v16.4s\n"
+ "zip2 v19.4s, v18.4s, v16.4s\n"
+ "zip1 v16.4s, v21.4s, v17.4s\n"
+ ".inst 0x0ea16a12 // bfcvtn v18.4h, v16.4s\n"
+ "zip2 v17.4s, v21.4s, v17.4s\n"
+ "zip1 v16.4s, v20.4s, v19.4s\n"
+ ".inst 0x4ea16a32 // bfcvtn2 v18.8h, v17.4s\n"
+ "str q18, [x27, #0x0]\n"
".inst 0x0ea16a11 // bfcvtn v17.4h, v16.4s\n"
- "zip2 v16.4s, v21.4s, v20.4s\n"
- ".inst 0x4ea16a53 // bfcvtn2 v19.8h, v18.4s\n"
+ "zip2 v16.4s, v20.4s, v19.4s\n"
".inst 0x4ea16a11 // bfcvtn2 v17.8h, v16.4s\n"
- "str q19, [x21, #0x0]\n"
- "str q17, [x21, #0x10]\n"
- "add x21, x21, #0x20\n"
+ "str q17, [x27, #0x10]\n"
+ "add x27, x27, #0x20\n"
"bge 16b\n"
"17:" // Tail row loop: width 4 loop: skip
- "cmp x20, #0x1\n"
+ "cmp x19, #0x1\n"
"blt 19f\n"
"18:" // Tail row loop: width 1 loop: loop
- "ldr s19, [x9], #0x4\n"
- "ldr s18, [x28], #0x4\n"
- "sub x20, x20, #0x1\n"
- "cmp x20, #0x1\n"
- "ldr s17, [x27], #0x4\n"
- "ldr s16, [x26], #0x4\n"
- "zip1 v17.4s, v19.4s, v17.4s\n"
+ "ldr s17, [x28], #0x4\n"
+ "sub x19, x19, #0x1\n"
+ "ldr s18, [x26], #0x4\n"
+ "cmp x19, #0x1\n"
+ "ldr s16, [x25], #0x4\n"
+ "zip1 v17.4s, v17.4s, v16.4s\n"
+ "ldr s16, [x24], #0x4\n"
"zip1 v16.4s, v18.4s, v16.4s\n"
"zip1 v16.4s, v17.4s, v16.4s\n"
".inst 0x0ea16a10 // bfcvtn v16.4h, v16.4s\n"
- "str d16, [x21, #0x0]\n"
- "add x21, x21, #0x8\n"
+ "str d16, [x27, #0x0]\n"
+ "add x27, x27, #0x8\n"
"bge 18b\n"
"19:" // Tail row loop: width 1 loop: skip
- "cmp %x[height], #0x1\n"
"add %x[out], %x[out], #0xc0\n"
+ "cmp %x[height], #0x1\n"
"bge 11b\n"
"20:" // Done
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [pad_row] "r" (pad_row), [width] "r" (width)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_24_bf16fp32.hpp b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_24_bf16fp32.hpp
index 799a9cd91d..0a628d372e 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_24_bf16fp32.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_24_bf16fp32.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#pragma once
@@ -36,245 +36,245 @@ void a64_transpose_interleave_24_bf16fp32(float *out, const bfloat16 *in, size_t
"cmp %x[height], #0x4\n"
"blt 10f\n"
"1:" // Main row loop: Head
- "mov x25, %x[in]\n"
- "mov x24, %x[width]\n"
- "add x23, x25, %x[in_stride]\n"
- "add x22, x23, %x[in_stride]\n"
- "add x20, x22, %x[in_stride]\n"
- "cmp x24, #0x18\n"
+ "mov x24, %x[in]\n"
+ "mov x23, %x[out]\n"
+ "add x22, x24, %x[in_stride]\n"
+ "add x21, x22, %x[in_stride]\n"
+ "add x20, x21, %x[in_stride]\n"
"add %x[in], x20, %x[in_stride]\n"
- "mov x21, %x[out]\n"
"sub %x[height], %x[height], #0x4\n"
+ "mov x19, %x[width]\n"
+ "cmp x19, #0x18\n"
"blt 3f\n"
"2:" // Main row loop: Unroll column loop
- "ldr q18, [x25], #0x10\n"
- "ldr q17, [x23], #0x10\n"
- "sub x24, x24, #0x18\n"
- "shll v26.4s, v18.4h, #0x10\n"
+ "ldr q17, [x24], #0x10\n"
+ "shll v3.4s, v17.4h, #0x10\n"
+ "ldr q16, [x24], #0x10\n"
+ "sub x19, x19, #0x18\n"
+ "shll2 v23.4s, v17.8h, #0x10\n"
+ "ldr q17, [x22], #0x10\n"
+ "cmp x19, #0x18\n"
+ "shll v22.4s, v16.4h, #0x10\n"
+ "ldr q19, [x24], #0x10\n"
+ "shll2 v2.4s, v16.8h, #0x10\n"
"ldr q16, [x22], #0x10\n"
- "ldr q25, [x20], #0x10\n"
- "shll2 v24.4s, v18.8h, #0x10\n"
- "shll v5.4s, v17.4h, #0x10\n"
- "ldr q23, [x25], #0x10\n"
- "ldr q22, [x23], #0x10\n"
- "shll v21.4s, v23.4h, #0x10\n"
- "shll2 v4.4s, v17.8h, #0x10\n"
- "ldr q20, [x22], #0x10\n"
- "ldr q3, [x20], #0x10\n"
- "shll v2.4s, v22.4h, #0x10\n"
- "shll v1.4s, v16.4h, #0x10\n"
- "ldr q19, [x25], #0x10\n"
- "ldr q18, [x23], #0x10\n"
- "shll2 v0.4s, v16.8h, #0x10\n"
- "shll v31.4s, v20.4h, #0x10\n"
+ "shll v21.4s, v17.4h, #0x10\n"
+ "ldr q18, [x21], #0x10\n"
+ "shll2 v1.4s, v17.8h, #0x10\n"
+ "ldr q0, [x20], #0x10\n"
+ "shll v31.4s, v19.4h, #0x10\n"
"ldr q17, [x22], #0x10\n"
- "ldr q16, [x20], #0x10\n"
- "shll v30.4s, v25.4h, #0x10\n"
- "shll2 v29.4s, v25.8h, #0x10\n"
- "shll v28.4s, v3.4h, #0x10\n"
- "str q26, [x21, #0x0]\n"
- "cmp x24, #0x18\n"
- "shll2 v27.4s, v23.8h, #0x10\n"
- "str q24, [x21, #0x10]\n"
- "shll v26.4s, v19.4h, #0x10\n"
- "shll2 v25.4s, v19.8h, #0x10\n"
- "str q21, [x21, #0x20]\n"
- "shll2 v24.4s, v22.8h, #0x10\n"
- "shll v23.4s, v18.4h, #0x10\n"
- "str q5, [x21, #0x30]\n"
- "shll2 v22.4s, v18.8h, #0x10\n"
- "shll2 v21.4s, v20.8h, #0x10\n"
- "str q4, [x21, #0x40]\n"
- "shll v20.4s, v17.4h, #0x10\n"
- "shll2 v19.4s, v17.8h, #0x10\n"
- "str q2, [x21, #0x50]\n"
- "shll2 v18.4s, v3.8h, #0x10\n"
- "shll v17.4s, v16.4h, #0x10\n"
- "str q1, [x21, #0x60]\n"
- "shll2 v16.4s, v16.8h, #0x10\n"
- "str q0, [x21, #0x70]\n"
- "str q31, [x21, #0x80]\n"
- "str q30, [x21, #0x90]\n"
- "str q29, [x21, #0xa0]\n"
- "str q28, [x21, #0xb0]\n"
- "add x21, x21, %x[out_stride]\n"
- "str q27, [x21, #0x0]\n"
- "str q26, [x21, #0x10]\n"
- "str q25, [x21, #0x20]\n"
- "str q24, [x21, #0x30]\n"
- "str q23, [x21, #0x40]\n"
- "str q22, [x21, #0x50]\n"
- "str q21, [x21, #0x60]\n"
- "str q20, [x21, #0x70]\n"
- "str q19, [x21, #0x80]\n"
- "str q18, [x21, #0x90]\n"
- "str q17, [x21, #0xa0]\n"
- "str q16, [x21, #0xb0]\n"
- "add x21, x21, %x[out_stride]\n"
+ "shll2 v30.4s, v19.8h, #0x10\n"
+ "shll v29.4s, v16.4h, #0x10\n"
+ "ldr q20, [x21], #0x10\n"
+ "shll2 v28.4s, v16.8h, #0x10\n"
+ "ldr q27, [x20], #0x10\n"
+ "shll v19.4s, v18.4h, #0x10\n"
+ "ldr q16, [x21], #0x10\n"
+ "shll v26.4s, v17.4h, #0x10\n"
+ "shll2 v25.4s, v17.8h, #0x10\n"
+ "ldr q24, [x20], #0x10\n"
+ "shll2 v18.4s, v18.8h, #0x10\n"
+ "str q3, [x23, #0x0]\n"
+ "shll v17.4s, v20.4h, #0x10\n"
+ "str q23, [x23, #0x10]\n"
+ "shll2 v23.4s, v20.8h, #0x10\n"
+ "str q22, [x23, #0x20]\n"
+ "shll v22.4s, v16.4h, #0x10\n"
+ "str q21, [x23, #0x30]\n"
+ "shll2 v21.4s, v16.8h, #0x10\n"
+ "str q1, [x23, #0x40]\n"
+ "shll v16.4s, v0.4h, #0x10\n"
+ "str q29, [x23, #0x50]\n"
+ "shll2 v20.4s, v0.8h, #0x10\n"
+ "str q19, [x23, #0x60]\n"
+ "shll v19.4s, v27.4h, #0x10\n"
+ "str q18, [x23, #0x70]\n"
+ "shll2 v18.4s, v27.8h, #0x10\n"
+ "str q17, [x23, #0x80]\n"
+ "shll v17.4s, v24.4h, #0x10\n"
+ "str q16, [x23, #0x90]\n"
+ "shll2 v16.4s, v24.8h, #0x10\n"
+ "str q20, [x23, #0xa0]\n"
+ "str q19, [x23, #0xb0]\n"
+ "add x23, x23, %x[out_stride]\n"
+ "str q2, [x23, #0x0]\n"
+ "str q31, [x23, #0x10]\n"
+ "str q30, [x23, #0x20]\n"
+ "str q28, [x23, #0x30]\n"
+ "str q26, [x23, #0x40]\n"
+ "str q25, [x23, #0x50]\n"
+ "str q23, [x23, #0x60]\n"
+ "str q22, [x23, #0x70]\n"
+ "str q21, [x23, #0x80]\n"
+ "str q18, [x23, #0x90]\n"
+ "str q17, [x23, #0xa0]\n"
+ "str q16, [x23, #0xb0]\n"
+ "add x23, x23, %x[out_stride]\n"
"bge 2b\n"
"3:" // Main row loop: Unroll column loop skip
- "cmp x24, #0xc\n"
+ "cmp x19, #0xc\n"
"blt 5f\n"
"4:" // Main row loop: Column loop
- "ldr q16, [x25], #0x10\n"
- "ldr q21, [x23], #0x10\n"
- "sub x24, x24, #0xc\n"
- "cmp x24, #0xc\n"
- "ldr q20, [x22], #0x10\n"
- "ldr q27, [x20], #0x10\n"
- "shll v19.4s, v16.4h, #0x10\n"
- "shll2 v26.4s, v16.8h, #0x10\n"
- "ldr d16, [x25], #0x8\n"
- "ldr d18, [x23], #0x8\n"
- "shll v25.4s, v16.4h, #0x10\n"
- "shll v24.4s, v21.4h, #0x10\n"
- "ldr d17, [x22], #0x8\n"
- "ldr d16, [x20], #0x8\n"
- "shll2 v23.4s, v21.8h, #0x10\n"
- "shll v22.4s, v18.4h, #0x10\n"
- "shll v21.4s, v20.4h, #0x10\n"
- "shll2 v20.4s, v20.8h, #0x10\n"
- "str q19, [x21, #0x0]\n"
+ "ldr q17, [x24], #0x10\n"
"shll v19.4s, v17.4h, #0x10\n"
- "shll v18.4s, v27.4h, #0x10\n"
- "str q26, [x21, #0x10]\n"
- "shll2 v17.4s, v27.8h, #0x10\n"
+ "ldr d16, [x24], #0x8\n"
+ "sub x19, x19, #0xc\n"
+ "shll2 v27.4s, v17.8h, #0x10\n"
+ "ldr q17, [x22], #0x10\n"
+ "cmp x19, #0xc\n"
+ "shll v26.4s, v16.4h, #0x10\n"
+ "ldr q16, [x21], #0x10\n"
+ "ldr q25, [x20], #0x10\n"
+ "shll v24.4s, v17.4h, #0x10\n"
+ "shll2 v23.4s, v17.8h, #0x10\n"
+ "ldr d18, [x22], #0x8\n"
+ "shll v22.4s, v16.4h, #0x10\n"
+ "ldr d17, [x21], #0x8\n"
+ "shll2 v21.4s, v16.8h, #0x10\n"
+ "ldr d16, [x20], #0x8\n"
+ "shll v20.4s, v25.4h, #0x10\n"
+ "str q19, [x23, #0x0]\n"
+ "shll v19.4s, v18.4h, #0x10\n"
+ "str q27, [x23, #0x10]\n"
+ "shll2 v18.4s, v25.8h, #0x10\n"
+ "str q26, [x23, #0x20]\n"
+ "shll v17.4s, v17.4h, #0x10\n"
+ "str q24, [x23, #0x30]\n"
"shll v16.4s, v16.4h, #0x10\n"
- "str q25, [x21, #0x20]\n"
- "str q24, [x21, #0x30]\n"
- "str q23, [x21, #0x40]\n"
- "str q22, [x21, #0x50]\n"
- "str q21, [x21, #0x60]\n"
- "str q20, [x21, #0x70]\n"
- "str q19, [x21, #0x80]\n"
- "str q18, [x21, #0x90]\n"
- "str q17, [x21, #0xa0]\n"
- "str q16, [x21, #0xb0]\n"
- "add x21, x21, %x[out_stride]\n"
+ "str q23, [x23, #0x40]\n"
+ "str q19, [x23, #0x50]\n"
+ "str q22, [x23, #0x60]\n"
+ "str q21, [x23, #0x70]\n"
+ "str q17, [x23, #0x80]\n"
+ "str q20, [x23, #0x90]\n"
+ "str q18, [x23, #0xa0]\n"
+ "str q16, [x23, #0xb0]\n"
+ "add x23, x23, %x[out_stride]\n"
"bge 4b\n"
"5:" // Main row loop: Column loop skip
- "cmp x24, #0x4\n"
+ "cmp x19, #0x4\n"
"blt 7f\n"
"6:" // Main row loop: width 4 loop: loop
- "ldr d19, [x25], #0x8\n"
- "ldr d18, [x23], #0x8\n"
- "sub x24, x24, #0x4\n"
- "cmp x24, #0x4\n"
- "ldr d17, [x22], #0x8\n"
+ "ldr d16, [x24], #0x8\n"
+ "shll v19.4s, v16.4h, #0x10\n"
+ "ldr d16, [x22], #0x8\n"
+ "sub x19, x19, #0x4\n"
+ "shll v18.4s, v16.4h, #0x10\n"
+ "ldr d16, [x21], #0x8\n"
+ "cmp x19, #0x4\n"
+ "shll v17.4s, v16.4h, #0x10\n"
"ldr d16, [x20], #0x8\n"
- "shll v19.4s, v19.4h, #0x10\n"
- "shll v18.4s, v18.4h, #0x10\n"
- "shll v17.4s, v17.4h, #0x10\n"
+ "str q19, [x23, #0x0]\n"
"shll v16.4s, v16.4h, #0x10\n"
- "str q19, [x21, #0x0]\n"
- "str q18, [x21, #0x30]\n"
- "str q17, [x21, #0x60]\n"
- "str q16, [x21, #0x90]\n"
- "add x21, x21, #0x10\n"
+ "str q18, [x23, #0x30]\n"
+ "str q17, [x23, #0x60]\n"
+ "str q16, [x23, #0x90]\n"
+ "add x23, x23, #0x10\n"
"bge 6b\n"
"7:" // Main row loop: width 4 loop: skip
- "cmp x24, #0x1\n"
+ "cmp x19, #0x1\n"
"blt 9f\n"
"8:" // Main row loop: width 1 loop: loop
- "ldr h19, [x25], #0x2\n"
- "ldr h18, [x23], #0x2\n"
- "sub x24, x24, #0x1\n"
- "cmp x24, #0x1\n"
- "ldr h17, [x22], #0x2\n"
+ "ldr h16, [x24], #0x2\n"
+ "shll v19.4s, v16.4h, #0x10\n"
+ "ldr h16, [x22], #0x2\n"
+ "sub x19, x19, #0x1\n"
+ "shll v18.4s, v16.4h, #0x10\n"
+ "ldr h16, [x21], #0x2\n"
+ "cmp x19, #0x1\n"
+ "shll v17.4s, v16.4h, #0x10\n"
"ldr h16, [x20], #0x2\n"
- "shll v19.4s, v19.4h, #0x10\n"
- "shll v18.4s, v18.4h, #0x10\n"
- "shll v17.4s, v17.4h, #0x10\n"
+ "str s19, [x23, #0x0]\n"
"shll v16.4s, v16.4h, #0x10\n"
- "str s19, [x21, #0x0]\n"
- "str s18, [x21, #0x30]\n"
- "str s17, [x21, #0x60]\n"
- "str s16, [x21, #0x90]\n"
- "add x21, x21, #0x4\n"
+ "str s18, [x23, #0x30]\n"
+ "str s17, [x23, #0x60]\n"
+ "str s16, [x23, #0x90]\n"
+ "add x23, x23, #0x4\n"
"bge 8b\n"
"9:" // Main row loop: width 1 loop: skip
- "cmp %x[height], #0x4\n"
"add %x[out], %x[out], #0xc0\n"
+ "cmp %x[height], #0x4\n"
"bge 1b\n"
"cbz %x[height], 20f\n"
"10:" // Main loop skip
"11:" // Tail row loop: Head
- "mov x20, %x[width]\n"
- "mov x25, %x[in]\n"
- "cmp x20, #0x18\n"
- "add %x[in], x25, %x[in_stride]\n"
- "mov x21, %x[out]\n"
+ "mov x24, %x[in]\n"
+ "mov x23, %x[out]\n"
+ "add %x[in], x24, %x[in_stride]\n"
"sub %x[height], %x[height], #0x1\n"
+ "mov x19, %x[width]\n"
+ "cmp x19, #0x18\n"
"blt 13f\n"
"12:" // Tail row loop: Unroll column loop
- "ldr q16, [x25], #0x10\n"
- "ldr q20, [x25], #0x10\n"
- "sub x20, x20, #0x18\n"
- "shll v18.4s, v16.4h, #0x10\n"
- "ldr q19, [x25], #0x10\n"
+ "ldr q16, [x24], #0x10\n"
+ "shll v20.4s, v16.4h, #0x10\n"
+ "ldr q18, [x24], #0x10\n"
+ "sub x19, x19, #0x18\n"
"shll2 v17.4s, v16.8h, #0x10\n"
- "shll v16.4s, v20.4h, #0x10\n"
- "str q18, [x21, #0x0]\n"
- "str q17, [x21, #0x10]\n"
- "cmp x20, #0x18\n"
- "shll2 v18.4s, v20.8h, #0x10\n"
+ "ldr q19, [x24], #0x10\n"
+ "shll v16.4s, v18.4h, #0x10\n"
+ "cmp x19, #0x18\n"
+ "shll2 v18.4s, v18.8h, #0x10\n"
+ "str q20, [x23, #0x0]\n"
+ "str q17, [x23, #0x10]\n"
"shll v17.4s, v19.4h, #0x10\n"
- "str q16, [x21, #0x20]\n"
- "add x21, x21, %x[out_stride]\n"
+ "str q16, [x23, #0x20]\n"
+ "add x23, x23, %x[out_stride]\n"
"shll2 v16.4s, v19.8h, #0x10\n"
- "str q18, [x21, #0x0]\n"
- "str q17, [x21, #0x10]\n"
- "str q16, [x21, #0x20]\n"
- "add x21, x21, %x[out_stride]\n"
+ "str q18, [x23, #0x0]\n"
+ "str q17, [x23, #0x10]\n"
+ "str q16, [x23, #0x20]\n"
+ "add x23, x23, %x[out_stride]\n"
"bge 12b\n"
"13:" // Tail row loop: Unroll column loop skip
- "cmp x20, #0xc\n"
+ "cmp x19, #0xc\n"
"blt 15f\n"
"14:" // Tail row loop: Column loop
- "ldr q17, [x25], #0x10\n"
- "ldr d18, [x25], #0x8\n"
- "sub x20, x20, #0xc\n"
- "cmp x20, #0xc\n"
- "shll v16.4s, v17.4h, #0x10\n"
+ "ldr q17, [x24], #0x10\n"
+ "shll v18.4s, v17.4h, #0x10\n"
+ "ldr d16, [x24], #0x8\n"
+ "sub x19, x19, #0xc\n"
"shll2 v17.4s, v17.8h, #0x10\n"
- "str q16, [x21, #0x0]\n"
- "shll v16.4s, v18.4h, #0x10\n"
- "str q17, [x21, #0x10]\n"
- "str q16, [x21, #0x20]\n"
- "add x21, x21, %x[out_stride]\n"
+ "str q18, [x23, #0x0]\n"
+ "cmp x19, #0xc\n"
+ "shll v16.4s, v16.4h, #0x10\n"
+ "str q17, [x23, #0x10]\n"
+ "str q16, [x23, #0x20]\n"
+ "add x23, x23, %x[out_stride]\n"
"bge 14b\n"
"15:" // Tail row loop: Column loop skip
- "cmp x20, #0x4\n"
+ "cmp x19, #0x4\n"
"blt 17f\n"
"16:" // Tail row loop: width 4 loop: loop
- "ldr d16, [x25], #0x8\n"
- "sub x20, x20, #0x4\n"
- "cmp x20, #0x4\n"
+ "ldr d16, [x24], #0x8\n"
"shll v16.4s, v16.4h, #0x10\n"
- "str q16, [x21, #0x0]\n"
- "add x21, x21, #0x10\n"
+ "str q16, [x23, #0x0]\n"
+ "sub x19, x19, #0x4\n"
+ "add x23, x23, #0x10\n"
+ "cmp x19, #0x4\n"
"bge 16b\n"
"17:" // Tail row loop: width 4 loop: skip
- "cmp x20, #0x1\n"
+ "cmp x19, #0x1\n"
"blt 19f\n"
"18:" // Tail row loop: width 1 loop: loop
- "ldr h16, [x25], #0x2\n"
- "sub x20, x20, #0x1\n"
- "cmp x20, #0x1\n"
+ "ldr h16, [x24], #0x2\n"
"shll v16.4s, v16.4h, #0x10\n"
- "str s16, [x21, #0x0]\n"
- "add x21, x21, #0x4\n"
+ "str s16, [x23, #0x0]\n"
+ "sub x19, x19, #0x1\n"
+ "add x23, x23, #0x4\n"
+ "cmp x19, #0x1\n"
"bge 18b\n"
"19:" // Tail row loop: width 1 loop: skip
- "cmp %x[height], #0x1\n"
"add %x[out], %x[out], #0x30\n"
+ "cmp %x[height], #0x1\n"
"bge 11b\n"
"20:" // Done
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [width] "r" (width)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x20", "x21", "x22", "x23", "x24", "x25"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22", "x23", "x24"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_24_fp16fp32.hpp b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_24_fp16fp32.hpp
index 621c5f99ff..7bac8173e7 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_24_fp16fp32.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_24_fp16fp32.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#pragma once
@@ -36,244 +36,245 @@ void a64_transpose_interleave_24_fp16fp32(float *out, const __fp16 *in, size_t w
"cmp %x[height], #0x4\n"
"blt 10f\n"
"1:" // Main row loop: Head
- "mov x25, %x[in]\n"
- "mov x24, %x[width]\n"
- "add x23, x25, %x[in_stride]\n"
- "add x22, x23, %x[in_stride]\n"
- "add x20, x22, %x[in_stride]\n"
- "cmp x24, #0x18\n"
+ "mov x24, %x[in]\n"
+ "mov x23, %x[out]\n"
+ "add x22, x24, %x[in_stride]\n"
+ "add x21, x22, %x[in_stride]\n"
+ "add x20, x21, %x[in_stride]\n"
"add %x[in], x20, %x[in_stride]\n"
- "mov x21, %x[out]\n"
"sub %x[height], %x[height], #0x4\n"
+ "mov x19, %x[width]\n"
+ "cmp x19, #0x18\n"
"blt 3f\n"
"2:" // Main row loop: Unroll column loop
- "ldr q18, [x25], #0x10\n"
- "ldr q17, [x23], #0x10\n"
- "sub x24, x24, #0x18\n"
- "fcvtl v26.4s, v18.4h\n"
+ "ldr q17, [x24], #0x10\n"
+ "fcvtl v3.4s, v17.4h\n"
+ "ldr q16, [x24], #0x10\n"
+ "sub x19, x19, #0x18\n"
+ "fcvtl2 v23.4s, v17.8h\n"
+ "ldr q17, [x22], #0x10\n"
+ "cmp x19, #0x18\n"
+ "fcvtl v22.4s, v16.4h\n"
+ "ldr q19, [x24], #0x10\n"
+ "fcvtl2 v2.4s, v16.8h\n"
"ldr q16, [x22], #0x10\n"
- "ldr q25, [x20], #0x10\n"
- "fcvtl2 v24.4s, v18.8h\n"
- "fcvtl v5.4s, v17.4h\n"
- "ldr q23, [x25], #0x10\n"
- "ldr q22, [x23], #0x10\n"
- "fcvtl v21.4s, v23.4h\n"
- "fcvtl2 v4.4s, v17.8h\n"
- "ldr q20, [x22], #0x10\n"
- "ldr q3, [x20], #0x10\n"
- "fcvtl v2.4s, v22.4h\n"
- "fcvtl v1.4s, v16.4h\n"
- "ldr q19, [x25], #0x10\n"
- "ldr q18, [x23], #0x10\n"
- "fcvtl2 v0.4s, v16.8h\n"
- "fcvtl v31.4s, v20.4h\n"
+ "fcvtl v21.4s, v17.4h\n"
+ "ldr q18, [x21], #0x10\n"
+ "fcvtl2 v1.4s, v17.8h\n"
+ "ldr q0, [x20], #0x10\n"
+ "fcvtl v31.4s, v19.4h\n"
"ldr q17, [x22], #0x10\n"
- "ldr q16, [x20], #0x10\n"
- "fcvtl v30.4s, v25.4h\n"
- "fcvtl2 v29.4s, v25.8h\n"
- "fcvtl v28.4s, v3.4h\n"
- "str q26, [x21, #0x0]\n"
- "cmp x24, #0x18\n"
- "fcvtl2 v27.4s, v23.8h\n"
- "str q24, [x21, #0x10]\n"
- "fcvtl v26.4s, v19.4h\n"
- "fcvtl2 v25.4s, v19.8h\n"
- "str q21, [x21, #0x20]\n"
- "fcvtl2 v24.4s, v22.8h\n"
- "fcvtl v23.4s, v18.4h\n"
- "str q5, [x21, #0x30]\n"
- "fcvtl2 v22.4s, v18.8h\n"
- "fcvtl2 v21.4s, v20.8h\n"
- "str q4, [x21, #0x40]\n"
- "fcvtl v20.4s, v17.4h\n"
- "fcvtl2 v19.4s, v17.8h\n"
- "str q2, [x21, #0x50]\n"
- "fcvtl2 v18.4s, v3.8h\n"
- "fcvtl v17.4s, v16.4h\n"
- "str q1, [x21, #0x60]\n"
- "fcvtl2 v16.4s, v16.8h\n"
- "str q0, [x21, #0x70]\n"
- "str q31, [x21, #0x80]\n"
- "str q30, [x21, #0x90]\n"
- "str q29, [x21, #0xa0]\n"
- "str q28, [x21, #0xb0]\n"
- "add x21, x21, %x[out_stride]\n"
- "str q27, [x21, #0x0]\n"
- "str q26, [x21, #0x10]\n"
- "str q25, [x21, #0x20]\n"
- "str q24, [x21, #0x30]\n"
- "str q23, [x21, #0x40]\n"
- "str q22, [x21, #0x50]\n"
- "str q21, [x21, #0x60]\n"
- "str q20, [x21, #0x70]\n"
- "str q19, [x21, #0x80]\n"
- "str q18, [x21, #0x90]\n"
- "str q17, [x21, #0xa0]\n"
- "str q16, [x21, #0xb0]\n"
- "add x21, x21, %x[out_stride]\n"
+ "fcvtl2 v30.4s, v19.8h\n"
+ "fcvtl v29.4s, v16.4h\n"
+ "ldr q20, [x21], #0x10\n"
+ "fcvtl2 v28.4s, v16.8h\n"
+ "ldr q27, [x20], #0x10\n"
+ "fcvtl v19.4s, v18.4h\n"
+ "ldr q16, [x21], #0x10\n"
+ "fcvtl v26.4s, v17.4h\n"
+ "fcvtl2 v25.4s, v17.8h\n"
+ "ldr q24, [x20], #0x10\n"
+ "fcvtl2 v18.4s, v18.8h\n"
+ "str q3, [x23, #0x0]\n"
+ "fcvtl v17.4s, v20.4h\n"
+ "str q23, [x23, #0x10]\n"
+ "fcvtl2 v23.4s, v20.8h\n"
+ "str q22, [x23, #0x20]\n"
+ "fcvtl v22.4s, v16.4h\n"
+ "str q21, [x23, #0x30]\n"
+ "fcvtl2 v21.4s, v16.8h\n"
+ "str q1, [x23, #0x40]\n"
+ "fcvtl v16.4s, v0.4h\n"
+ "str q29, [x23, #0x50]\n"
+ "fcvtl2 v20.4s, v0.8h\n"
+ "str q19, [x23, #0x60]\n"
+ "fcvtl v19.4s, v27.4h\n"
+ "str q18, [x23, #0x70]\n"
+ "fcvtl2 v18.4s, v27.8h\n"
+ "str q17, [x23, #0x80]\n"
+ "fcvtl v17.4s, v24.4h\n"
+ "str q16, [x23, #0x90]\n"
+ "fcvtl2 v16.4s, v24.8h\n"
+ "str q20, [x23, #0xa0]\n"
+ "str q19, [x23, #0xb0]\n"
+ "add x23, x23, %x[out_stride]\n"
+ "str q2, [x23, #0x0]\n"
+ "str q31, [x23, #0x10]\n"
+ "str q30, [x23, #0x20]\n"
+ "str q28, [x23, #0x30]\n"
+ "str q26, [x23, #0x40]\n"
+ "str q25, [x23, #0x50]\n"
+ "str q23, [x23, #0x60]\n"
+ "str q22, [x23, #0x70]\n"
+ "str q21, [x23, #0x80]\n"
+ "str q18, [x23, #0x90]\n"
+ "str q17, [x23, #0xa0]\n"
+ "str q16, [x23, #0xb0]\n"
+ "add x23, x23, %x[out_stride]\n"
"bge 2b\n"
"3:" // Main row loop: Unroll column loop skip
- "cmp x24, #0xc\n"
+ "cmp x19, #0xc\n"
"blt 5f\n"
"4:" // Main row loop: Column loop
- "ldr q16, [x25], #0x10\n"
- "ldr q21, [x23], #0x10\n"
- "sub x24, x24, #0xc\n"
- "cmp x24, #0xc\n"
- "ldr q20, [x22], #0x10\n"
- "ldr q27, [x20], #0x10\n"
- "fcvtl v19.4s, v16.4h\n"
- "fcvtl2 v26.4s, v16.8h\n"
- "ldr d16, [x25], #0x8\n"
- "ldr d18, [x23], #0x8\n"
- "fcvtl v25.4s, v16.4h\n"
- "fcvtl v24.4s, v21.4h\n"
- "ldr d17, [x22], #0x8\n"
- "ldr d16, [x20], #0x8\n"
- "fcvtl2 v23.4s, v21.8h\n"
- "fcvtl v22.4s, v18.4h\n"
- "fcvtl v21.4s, v20.4h\n"
- "fcvtl2 v20.4s, v20.8h\n"
- "str q19, [x21, #0x0]\n"
+ "ldr q17, [x24], #0x10\n"
"fcvtl v19.4s, v17.4h\n"
- "fcvtl v18.4s, v27.4h\n"
- "str q26, [x21, #0x10]\n"
- "fcvtl2 v17.4s, v27.8h\n"
+ "ldr d16, [x24], #0x8\n"
+ "sub x19, x19, #0xc\n"
+ "fcvtl2 v27.4s, v17.8h\n"
+ "ldr q17, [x22], #0x10\n"
+ "cmp x19, #0xc\n"
+ "fcvtl v26.4s, v16.4h\n"
+ "ldr q16, [x21], #0x10\n"
+ "ldr q25, [x20], #0x10\n"
+ "fcvtl v24.4s, v17.4h\n"
+ "fcvtl2 v23.4s, v17.8h\n"
+ "ldr d18, [x22], #0x8\n"
+ "fcvtl v22.4s, v16.4h\n"
+ "ldr d17, [x21], #0x8\n"
+ "fcvtl2 v21.4s, v16.8h\n"
+ "ldr d16, [x20], #0x8\n"
+ "fcvtl v20.4s, v25.4h\n"
+ "str q19, [x23, #0x0]\n"
+ "fcvtl v19.4s, v18.4h\n"
+ "str q27, [x23, #0x10]\n"
+ "fcvtl2 v18.4s, v25.8h\n"
+ "str q26, [x23, #0x20]\n"
+ "fcvtl v17.4s, v17.4h\n"
+ "str q24, [x23, #0x30]\n"
"fcvtl v16.4s, v16.4h\n"
- "str q25, [x21, #0x20]\n"
- "str q24, [x21, #0x30]\n"
- "str q23, [x21, #0x40]\n"
- "str q22, [x21, #0x50]\n"
- "str q21, [x21, #0x60]\n"
- "str q20, [x21, #0x70]\n"
- "str q19, [x21, #0x80]\n"
- "str q18, [x21, #0x90]\n"
- "str q17, [x21, #0xa0]\n"
- "str q16, [x21, #0xb0]\n"
- "add x21, x21, %x[out_stride]\n"
+ "str q23, [x23, #0x40]\n"
+ "str q19, [x23, #0x50]\n"
+ "str q22, [x23, #0x60]\n"
+ "str q21, [x23, #0x70]\n"
+ "str q17, [x23, #0x80]\n"
+ "str q20, [x23, #0x90]\n"
+ "str q18, [x23, #0xa0]\n"
+ "str q16, [x23, #0xb0]\n"
+ "add x23, x23, %x[out_stride]\n"
"bge 4b\n"
"5:" // Main row loop: Column loop skip
- "cmp x24, #0x4\n"
+ "cmp x19, #0x4\n"
"blt 7f\n"
"6:" // Main row loop: width 4 loop: loop
- "ldr d19, [x25], #0x8\n"
- "ldr d18, [x23], #0x8\n"
- "sub x24, x24, #0x4\n"
- "cmp x24, #0x4\n"
- "ldr d17, [x22], #0x8\n"
+ "ldr d16, [x24], #0x8\n"
+ "fcvtl v19.4s, v16.4h\n"
+ "ldr d16, [x22], #0x8\n"
+ "sub x19, x19, #0x4\n"
+ "fcvtl v18.4s, v16.4h\n"
+ "ldr d16, [x21], #0x8\n"
+ "cmp x19, #0x4\n"
+ "fcvtl v17.4s, v16.4h\n"
"ldr d16, [x20], #0x8\n"
- "fcvtl v19.4s, v19.4h\n"
- "fcvtl v18.4s, v18.4h\n"
- "fcvtl v17.4s, v17.4h\n"
+ "str q19, [x23, #0x0]\n"
"fcvtl v16.4s, v16.4h\n"
- "str q19, [x21, #0x0]\n"
- "str q18, [x21, #0x30]\n"
- "str q17, [x21, #0x60]\n"
- "str q16, [x21, #0x90]\n"
- "add x21, x21, #0x10\n"
+ "str q18, [x23, #0x30]\n"
+ "str q17, [x23, #0x60]\n"
+ "str q16, [x23, #0x90]\n"
+ "add x23, x23, #0x10\n"
"bge 6b\n"
"7:" // Main row loop: width 4 loop: skip
- "cmp x24, #0x1\n"
+ "cmp x19, #0x1\n"
"blt 9f\n"
"8:" // Main row loop: width 1 loop: loop
- "ldr h19, [x25], #0x2\n"
- "ldr h18, [x23], #0x2\n"
- "sub x24, x24, #0x1\n"
- "cmp x24, #0x1\n"
- "ldr h17, [x22], #0x2\n"
+ "ldr h16, [x24], #0x2\n"
+ "fcvtl v19.4s, v16.4h\n"
+ "ldr h16, [x22], #0x2\n"
+ "sub x19, x19, #0x1\n"
+ "fcvtl v18.4s, v16.4h\n"
+ "ldr h16, [x21], #0x2\n"
+ "cmp x19, #0x1\n"
+ "fcvtl v17.4s, v16.4h\n"
"ldr h16, [x20], #0x2\n"
- "fcvtl v19.4s, v19.4h\n"
- "fcvtl v18.4s, v18.4h\n"
- "fcvtl v17.4s, v17.4h\n"
+ "str s19, [x23, #0x0]\n"
"fcvtl v16.4s, v16.4h\n"
- "str s19, [x21, #0x0]\n"
- "str s18, [x21, #0x30]\n"
- "str s17, [x21, #0x60]\n"
- "str s16, [x21, #0x90]\n"
- "add x21, x21, #0x4\n"
+ "str s18, [x23, #0x30]\n"
+ "str s17, [x23, #0x60]\n"
+ "str s16, [x23, #0x90]\n"
+ "add x23, x23, #0x4\n"
"bge 8b\n"
"9:" // Main row loop: width 1 loop: skip
- "cmp %x[height], #0x4\n"
"add %x[out], %x[out], #0xc0\n"
+ "cmp %x[height], #0x4\n"
"bge 1b\n"
"cbz %x[height], 20f\n"
"10:" // Main loop skip
+
"11:" // Tail row loop: Head
- "mov x20, %x[width]\n"
- "mov x25, %x[in]\n"
- "cmp x20, #0x18\n"
- "add %x[in], x25, %x[in_stride]\n"
- "mov x21, %x[out]\n"
+ "mov x24, %x[in]\n"
+ "mov x23, %x[out]\n"
+ "add %x[in], x24, %x[in_stride]\n"
"sub %x[height], %x[height], #0x1\n"
+ "mov x19, %x[width]\n"
+ "cmp x19, #0x18\n"
"blt 13f\n"
"12:" // Tail row loop: Unroll column loop
- "ldr q16, [x25], #0x10\n"
- "ldr q20, [x25], #0x10\n"
- "sub x20, x20, #0x18\n"
- "fcvtl v18.4s, v16.4h\n"
- "ldr q19, [x25], #0x10\n"
+ "ldr q16, [x24], #0x10\n"
+ "fcvtl v20.4s, v16.4h\n"
+ "ldr q18, [x24], #0x10\n"
+ "sub x19, x19, #0x18\n"
"fcvtl2 v17.4s, v16.8h\n"
- "fcvtl v16.4s, v20.4h\n"
- "str q18, [x21, #0x0]\n"
- "str q17, [x21, #0x10]\n"
- "cmp x20, #0x18\n"
- "fcvtl2 v18.4s, v20.8h\n"
+ "ldr q19, [x24], #0x10\n"
+ "fcvtl v16.4s, v18.4h\n"
+ "cmp x19, #0x18\n"
+ "fcvtl2 v18.4s, v18.8h\n"
+ "str q20, [x23, #0x0]\n"
+ "str q17, [x23, #0x10]\n"
"fcvtl v17.4s, v19.4h\n"
- "str q16, [x21, #0x20]\n"
- "add x21, x21, %x[out_stride]\n"
+ "str q16, [x23, #0x20]\n"
+ "add x23, x23, %x[out_stride]\n"
"fcvtl2 v16.4s, v19.8h\n"
- "str q18, [x21, #0x0]\n"
- "str q17, [x21, #0x10]\n"
- "str q16, [x21, #0x20]\n"
- "add x21, x21, %x[out_stride]\n"
+ "str q18, [x23, #0x0]\n"
+ "str q17, [x23, #0x10]\n"
+ "str q16, [x23, #0x20]\n"
+ "add x23, x23, %x[out_stride]\n"
"bge 12b\n"
"13:" // Tail row loop: Unroll column loop skip
- "cmp x20, #0xc\n"
+ "cmp x19, #0xc\n"
"blt 15f\n"
"14:" // Tail row loop: Column loop
- "ldr q17, [x25], #0x10\n"
- "ldr d18, [x25], #0x8\n"
- "sub x20, x20, #0xc\n"
- "cmp x20, #0xc\n"
- "fcvtl v16.4s, v17.4h\n"
+ "ldr q17, [x24], #0x10\n"
+ "fcvtl v18.4s, v17.4h\n"
+ "ldr d16, [x24], #0x8\n"
+ "sub x19, x19, #0xc\n"
"fcvtl2 v17.4s, v17.8h\n"
- "str q16, [x21, #0x0]\n"
- "fcvtl v16.4s, v18.4h\n"
- "str q17, [x21, #0x10]\n"
- "str q16, [x21, #0x20]\n"
- "add x21, x21, %x[out_stride]\n"
+ "str q18, [x23, #0x0]\n"
+ "fcvtl v16.4s, v16.4h\n"
+ "str q17, [x23, #0x10]\n"
+ "cmp x19, #0xc\n"
+ "str q16, [x23, #0x20]\n"
+ "add x23, x23, %x[out_stride]\n"
"bge 14b\n"
"15:" // Tail row loop: Column loop skip
- "cmp x20, #0x4\n"
+ "cmp x19, #0x4\n"
"blt 17f\n"
"16:" // Tail row loop: width 4 loop: loop
- "ldr d16, [x25], #0x8\n"
- "sub x20, x20, #0x4\n"
- "cmp x20, #0x4\n"
+ "ldr d16, [x24], #0x8\n"
"fcvtl v16.4s, v16.4h\n"
- "str q16, [x21, #0x0]\n"
- "add x21, x21, #0x10\n"
+ "str q16, [x23, #0x0]\n"
+ "sub x19, x19, #0x4\n"
+ "add x23, x23, #0x10\n"
+ "cmp x19, #0x4\n"
"bge 16b\n"
"17:" // Tail row loop: width 4 loop: skip
- "cmp x20, #0x1\n"
+ "cmp x19, #0x1\n"
"blt 19f\n"
"18:" // Tail row loop: width 1 loop: loop
- "ldr h16, [x25], #0x2\n"
- "sub x20, x20, #0x1\n"
- "cmp x20, #0x1\n"
+ "ldr h16, [x24], #0x2\n"
"fcvtl v16.4s, v16.4h\n"
- "str s16, [x21, #0x0]\n"
- "add x21, x21, #0x4\n"
+ "str s16, [x23, #0x0]\n"
+ "sub x19, x19, #0x1\n"
+ "add x23, x23, #0x4\n"
+ "cmp x19, #0x1\n"
"bge 18b\n"
"19:" // Tail row loop: width 1 loop: skip
- "cmp %x[height], #0x1\n"
"add %x[out], %x[out], #0x30\n"
+ "cmp %x[height], #0x1\n"
"bge 11b\n"
"20:" // Done
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [width] "r" (width)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x20", "x21", "x22", "x23", "x24", "x25"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22", "x23", "x24"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_32_1x4.hpp b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_32_1x4.hpp
index 5cd7bd0512..912d512643 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_32_1x4.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_32_1x4.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#pragma once
@@ -42,10 +42,9 @@ void a64_transpose_interleave_32_1x4(uint8_t *out, const uint8_t *in, size_t wid
"cmp %x[height], #0x10\n"
"blt 10f\n"
"1:" // Main row loop: Head
- "mov x17, %x[in]\n"
- "add x16, x17, %x[in_stride]\n"
- "add x15, x16, %x[in_stride]\n"
- "add x14, x15, %x[in_stride]\n"
+ "mov x16, %x[in]\n"
+ "mov x15, %x[out]\n"
+ "add x14, x16, %x[in_stride]\n"
"add x13, x14, %x[in_stride]\n"
"add x12, x13, %x[in_stride]\n"
"add x11, x12, %x[in_stride]\n"
@@ -55,423 +54,426 @@ void a64_transpose_interleave_32_1x4(uint8_t *out, const uint8_t *in, size_t wid
"add x27, x28, %x[in_stride]\n"
"add x26, x27, %x[in_stride]\n"
"add x25, x26, %x[in_stride]\n"
- "mov x24, %x[width]\n"
- "add x23, x25, %x[in_stride]\n"
+ "add x24, x25, %x[in_stride]\n"
+ "add x23, x24, %x[in_stride]\n"
"add x22, x23, %x[in_stride]\n"
- "add x20, x22, %x[in_stride]\n"
- "cmp x24, #0x20\n"
+ "add x21, x22, %x[in_stride]\n"
+ "add x20, x21, %x[in_stride]\n"
"add %x[in], x20, %x[in_stride]\n"
- "mov x21, %x[out]\n"
"sub %x[height], %x[height], #0x10\n"
+ "mov x19, %x[width]\n"
+ "cmp x19, #0x20\n"
"blt 3f\n"
"2:" // Main row loop: Column loop
- "ldr q6, [x17], #0x10\n"
- "ldr q31, [x16], #0x10\n"
- "sub x24, x24, #0x20\n"
- "cmp x24, #0x20\n"
- "ldr q7, [x15], #0x10\n"
- "ldr q0, [x14], #0x10\n"
- "zip1 v9.16b, v6.16b, v7.16b\n"
- "zip1 v20.16b, v31.16b, v0.16b\n"
- "ldr q24, [x13], #0x10\n"
- "ldr q19, [x12], #0x10\n"
- "zip2 v30.16b, v6.16b, v7.16b\n"
- "zip2 v12.16b, v31.16b, v0.16b\n"
- "ldr q23, [x11], #0x10\n"
- "ldr q17, [x10], #0x10\n"
- "zip1 v13.16b, v24.16b, v23.16b\n"
- "zip1 v16.16b, v19.16b, v17.16b\n"
- "ldr q0, [x9], #0x10\n"
- "ldr q31, [x28], #0x10\n"
- "zip2 v15.16b, v24.16b, v23.16b\n"
- "zip2 v11.16b, v19.16b, v17.16b\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q4, [x26], #0x10\n"
- "zip1 v1.16b, v0.16b, v17.16b\n"
- "zip1 v21.16b, v31.16b, v4.16b\n"
- "ldr q28, [x25], #0x10\n"
- "ldr q18, [x23], #0x10\n"
- "zip2 v0.16b, v0.16b, v17.16b\n"
- "zip2 v26.16b, v31.16b, v4.16b\n"
- "ldr q17, [x22], #0x10\n"
- "ldr q19, [x20], #0x10\n"
- "zip1 v23.16b, v28.16b, v17.16b\n"
- "zip1 v25.16b, v18.16b, v19.16b\n"
- "ldr q2, [x17], #0x10\n"
- "ldr q3, [x16], #0x10\n"
- "zip2 v7.16b, v28.16b, v17.16b\n"
- "zip2 v8.16b, v18.16b, v19.16b\n"
- "ldr q22, [x15], #0x10\n"
- "ldr q27, [x14], #0x10\n"
- "zip1 v19.16b, v2.16b, v22.16b\n"
- "zip1 v17.16b, v3.16b, v27.16b\n"
- "ldr q6, [x13], #0x10\n"
- "ldr q4, [x12], #0x10\n"
- "zip2 v24.16b, v2.16b, v22.16b\n"
- "zip2 v22.16b, v3.16b, v27.16b\n"
- "ldr q14, [x11], #0x10\n"
- "ldr q18, [x10], #0x10\n"
- "zip1 v29.16b, v6.16b, v14.16b\n"
- "zip1 v31.16b, v4.16b, v18.16b\n"
- "ldr q2, [x9], #0x10\n"
- "ldr q10, [x28], #0x10\n"
- "zip2 v28.16b, v6.16b, v14.16b\n"
- "zip2 v27.16b, v4.16b, v18.16b\n"
- "ldr q6, [x27], #0x10\n"
- "ldr q5, [x26], #0x10\n"
- "zip1 v14.16b, v2.16b, v6.16b\n"
- "zip1 v4.16b, v10.16b, v5.16b\n"
- "ldr q3, [x25], #0x10\n"
- "ldr q18, [x23], #0x10\n"
- "zip2 v6.16b, v2.16b, v6.16b\n"
- "zip2 v10.16b, v10.16b, v5.16b\n"
- "ldr q5, [x22], #0x10\n"
- "zip1 v2.16b, v3.16b, v5.16b\n"
- "zip2 v3.16b, v3.16b, v5.16b\n"
- "zip1 v5.16b, v9.16b, v20.16b\n"
- "zip2 v20.16b, v9.16b, v20.16b\n"
- "ldr q9, [x20], #0x10\n"
- "str q5, [x21, #0x0]\n"
- "zip1 v5.16b, v18.16b, v9.16b\n"
- "zip2 v9.16b, v18.16b, v9.16b\n"
- "str q20, [x21, #0x10]\n"
- "zip1 v18.16b, v30.16b, v12.16b\n"
- "zip2 v30.16b, v30.16b, v12.16b\n"
- "str q18, [x21, #0x20]\n"
- "zip1 v20.16b, v19.16b, v17.16b\n"
- "zip2 v12.16b, v19.16b, v17.16b\n"
- "str q30, [x21, #0x30]\n"
- "zip1 v18.16b, v24.16b, v22.16b\n"
- "zip2 v17.16b, v24.16b, v22.16b\n"
- "str q20, [x21, #0x40]\n"
- "zip1 v30.16b, v13.16b, v16.16b\n"
- "zip2 v24.16b, v13.16b, v16.16b\n"
- "str q12, [x21, #0x50]\n"
- "zip1 v22.16b, v15.16b, v11.16b\n"
- "zip2 v20.16b, v15.16b, v11.16b\n"
- "str q18, [x21, #0x60]\n"
- "zip1 v19.16b, v29.16b, v31.16b\n"
- "zip2 v18.16b, v29.16b, v31.16b\n"
- "str q17, [x21, #0x70]\n"
- "zip1 v17.16b, v28.16b, v27.16b\n"
- "zip2 v16.16b, v28.16b, v27.16b\n"
- "str q30, [x21, #0x80]\n"
- "zip1 v31.16b, v1.16b, v21.16b\n"
- "zip2 v1.16b, v1.16b, v21.16b\n"
- "str q24, [x21, #0x90]\n"
- "zip1 v30.16b, v0.16b, v26.16b\n"
- "zip2 v29.16b, v0.16b, v26.16b\n"
- "str q22, [x21, #0xa0]\n"
- "zip1 v28.16b, v14.16b, v4.16b\n"
- "zip2 v27.16b, v14.16b, v4.16b\n"
- "str q20, [x21, #0xb0]\n"
- "zip1 v26.16b, v6.16b, v10.16b\n"
- "zip2 v24.16b, v6.16b, v10.16b\n"
- "str q19, [x21, #0xc0]\n"
- "zip1 v14.16b, v23.16b, v25.16b\n"
- "zip2 v22.16b, v23.16b, v25.16b\n"
- "str q18, [x21, #0xd0]\n"
- "zip1 v21.16b, v7.16b, v8.16b\n"
- "zip2 v20.16b, v7.16b, v8.16b\n"
- "str q17, [x21, #0xe0]\n"
- "zip1 v19.16b, v2.16b, v5.16b\n"
- "zip2 v18.16b, v2.16b, v5.16b\n"
- "str q16, [x21, #0xf0]\n"
- "zip1 v17.16b, v3.16b, v9.16b\n"
- "zip2 v16.16b, v3.16b, v9.16b\n"
- "str q31, [x21, #0x100]\n"
- "str q1, [x21, #0x110]\n"
- "str q30, [x21, #0x120]\n"
- "str q29, [x21, #0x130]\n"
- "str q28, [x21, #0x140]\n"
- "str q27, [x21, #0x150]\n"
- "str q26, [x21, #0x160]\n"
- "str q24, [x21, #0x170]\n"
- "str q14, [x21, #0x180]\n"
- "str q22, [x21, #0x190]\n"
- "str q21, [x21, #0x1a0]\n"
- "str q20, [x21, #0x1b0]\n"
- "str q19, [x21, #0x1c0]\n"
- "str q18, [x21, #0x1d0]\n"
- "str q17, [x21, #0x1e0]\n"
- "str q16, [x21, #0x1f0]\n"
- "add x21, x21, %x[out_stride]\n"
+ "ldr q14, [x16], #0x10\n"
+ "sub x19, x19, #0x20\n"
+ "ldr q7, [x14], #0x10\n"
+ "cmp x19, #0x20\n"
+ "ldr q1, [x13], #0x10\n"
+ "zip1 v0.16b, v14.16b, v1.16b\n"
+ "ldr q2, [x16], #0x10\n"
+ "zip2 v1.16b, v14.16b, v1.16b\n"
+ "ldr q26, [x14], #0x10\n"
+ "ldr q19, [x13], #0x10\n"
+ "zip1 v20.16b, v2.16b, v19.16b\n"
+ "ldr q15, [x12], #0x10\n"
+ "zip2 v14.16b, v2.16b, v19.16b\n"
+ "ldr q30, [x11], #0x10\n"
+ "ldr q29, [x10], #0x10\n"
+ "zip1 v8.16b, v7.16b, v15.16b\n"
+ "ldr q31, [x12], #0x10\n"
+ "zip2 v22.16b, v7.16b, v15.16b\n"
+ "ldr q28, [x11], #0x10\n"
+ "zip1 v25.16b, v0.16b, v8.16b\n"
+ "ldr q23, [x10], #0x10\n"
+ "zip2 v10.16b, v0.16b, v8.16b\n"
+ "ldr q27, [x9], #0x10\n"
+ "zip1 v4.16b, v1.16b, v22.16b\n"
+ "ldr q0, [x28], #0x10\n"
+ "zip2 v5.16b, v1.16b, v22.16b\n"
+ "ldr q13, [x27], #0x10\n"
+ "zip1 v12.16b, v26.16b, v31.16b\n"
+ "ldr q17, [x26], #0x10\n"
+ "zip1 v24.16b, v20.16b, v12.16b\n"
+ "ldr q18, [x9], #0x10\n"
+ "zip2 v12.16b, v20.16b, v12.16b\n"
+ "ldr q6, [x28], #0x10\n"
+ "zip2 v16.16b, v26.16b, v31.16b\n"
+ "ldr q15, [x27], #0x10\n"
+ "zip1 v22.16b, v14.16b, v16.16b\n"
+ "ldr q1, [x26], #0x10\n"
+ "zip2 v9.16b, v14.16b, v16.16b\n"
+ "ldr q8, [x25], #0x10\n"
+ "zip1 v26.16b, v30.16b, v27.16b\n"
+ "ldr q19, [x24], #0x10\n"
+ "zip1 v16.16b, v29.16b, v0.16b\n"
+ "ldr q7, [x23], #0x10\n"
+ "zip1 v11.16b, v26.16b, v16.16b\n"
+ "ldr q14, [x22], #0x10\n"
+ "zip2 v20.16b, v26.16b, v16.16b\n"
+ "ldr q16, [x25], #0x10\n"
+ "zip2 v26.16b, v30.16b, v27.16b\n"
+ "ldr q31, [x24], #0x10\n"
+ "zip2 v21.16b, v29.16b, v0.16b\n"
+ "ldr q0, [x23], #0x10\n"
+ "zip1 v30.16b, v26.16b, v21.16b\n"
+ "ldr q29, [x22], #0x10\n"
+ "zip2 v27.16b, v26.16b, v21.16b\n"
+ "ldr q3, [x21], #0x10\n"
+ "zip1 v21.16b, v28.16b, v18.16b\n"
+ "ldr q2, [x20], #0x10\n"
+ "zip1 v26.16b, v23.16b, v6.16b\n"
+ "zip2 v18.16b, v28.16b, v18.16b\n"
+ "ldr q28, [x21], #0x10\n"
+ "zip2 v23.16b, v23.16b, v6.16b\n"
+ "zip1 v6.16b, v21.16b, v26.16b\n"
+ "zip2 v21.16b, v21.16b, v26.16b\n"
+ "ldr q26, [x20], #0x10\n"
+ "str q25, [x15, #0x0]\n"
+ "zip1 v25.16b, v18.16b, v23.16b\n"
+ "zip2 v23.16b, v18.16b, v23.16b\n"
+ "str q10, [x15, #0x10]\n"
+ "zip1 v18.16b, v13.16b, v8.16b\n"
+ "str q4, [x15, #0x20]\n"
+ "zip1 v10.16b, v17.16b, v19.16b\n"
+ "str q5, [x15, #0x30]\n"
+ "zip1 v5.16b, v18.16b, v10.16b\n"
+ "str q24, [x15, #0x40]\n"
+ "zip2 v24.16b, v18.16b, v10.16b\n"
+ "str q12, [x15, #0x50]\n"
+ "zip2 v18.16b, v13.16b, v8.16b\n"
+ "str q22, [x15, #0x60]\n"
+ "zip2 v17.16b, v17.16b, v19.16b\n"
+ "str q9, [x15, #0x70]\n"
+ "zip1 v9.16b, v18.16b, v17.16b\n"
+ "str q11, [x15, #0x80]\n"
+ "zip2 v12.16b, v18.16b, v17.16b\n"
+ "str q20, [x15, #0x90]\n"
+ "zip1 v20.16b, v15.16b, v16.16b\n"
+ "str q30, [x15, #0xa0]\n"
+ "zip1 v17.16b, v1.16b, v31.16b\n"
+ "str q27, [x15, #0xb0]\n"
+ "zip1 v19.16b, v20.16b, v17.16b\n"
+ "str q6, [x15, #0xc0]\n"
+ "zip2 v18.16b, v20.16b, v17.16b\n"
+ "str q21, [x15, #0xd0]\n"
+ "zip2 v17.16b, v15.16b, v16.16b\n"
+ "str q25, [x15, #0xe0]\n"
+ "zip2 v16.16b, v1.16b, v31.16b\n"
+ "str q23, [x15, #0xf0]\n"
+ "zip1 v22.16b, v17.16b, v16.16b\n"
+ "str q5, [x15, #0x100]\n"
+ "zip2 v21.16b, v17.16b, v16.16b\n"
+ "str q24, [x15, #0x110]\n"
+ "zip1 v17.16b, v7.16b, v3.16b\n"
+ "str q9, [x15, #0x120]\n"
+ "zip1 v16.16b, v14.16b, v2.16b\n"
+ "str q12, [x15, #0x130]\n"
+ "zip1 v20.16b, v17.16b, v16.16b\n"
+ "str q19, [x15, #0x140]\n"
+ "zip2 v19.16b, v17.16b, v16.16b\n"
+ "str q18, [x15, #0x150]\n"
+ "zip2 v18.16b, v7.16b, v3.16b\n"
+ "str q22, [x15, #0x160]\n"
+ "zip2 v16.16b, v14.16b, v2.16b\n"
+ "str q21, [x15, #0x170]\n"
+ "zip1 v17.16b, v18.16b, v16.16b\n"
+ "str q20, [x15, #0x180]\n"
+ "zip2 v16.16b, v18.16b, v16.16b\n"
+ "str q19, [x15, #0x190]\n"
+ "zip1 v18.16b, v0.16b, v28.16b\n"
+ "str q17, [x15, #0x1a0]\n"
+ "zip1 v17.16b, v29.16b, v26.16b\n"
+ "str q16, [x15, #0x1b0]\n"
+ "zip1 v16.16b, v18.16b, v17.16b\n"
+ "str q16, [x15, #0x1c0]\n"
+ "zip2 v16.16b, v18.16b, v17.16b\n"
+ "str q16, [x15, #0x1d0]\n"
+ "zip2 v18.16b, v0.16b, v28.16b\n"
+ "zip2 v17.16b, v29.16b, v26.16b\n"
+ "zip1 v16.16b, v18.16b, v17.16b\n"
+ "str q16, [x15, #0x1e0]\n"
+ "zip2 v16.16b, v18.16b, v17.16b\n"
+ "str q16, [x15, #0x1f0]\n"
+ "add x15, x15, %x[out_stride]\n"
"bge 2b\n"
"3:" // Main row loop: Column loop skip
- "cmp x24, #0x10\n"
+ "cmp x19, #0x10\n"
"blt 5f\n"
"4:" // Main row loop: width 16 loop: loop
- "ldr q21, [x17], #0x10\n"
- "ldr q20, [x16], #0x10\n"
- "sub x24, x24, #0x10\n"
- "cmp x24, #0x10\n"
- "ldr q17, [x15], #0x10\n"
- "ldr q16, [x14], #0x10\n"
- "zip1 v3.16b, v21.16b, v17.16b\n"
- "zip1 v2.16b, v20.16b, v16.16b\n"
- "ldr q19, [x13], #0x10\n"
- "ldr q18, [x12], #0x10\n"
- "zip2 v1.16b, v21.16b, v17.16b\n"
- "zip2 v0.16b, v20.16b, v16.16b\n"
- "ldr q17, [x11], #0x10\n"
- "ldr q16, [x10], #0x10\n"
+ "ldr q18, [x16], #0x10\n"
+ "sub x19, x19, #0x10\n"
+ "ldr q20, [x14], #0x10\n"
+ "cmp x19, #0x10\n"
+ "ldr q17, [x13], #0x10\n"
+ "zip1 v19.16b, v18.16b, v17.16b\n"
+ "ldr q16, [x12], #0x10\n"
+ "zip2 v18.16b, v18.16b, v17.16b\n"
+ "ldr q3, [x11], #0x10\n"
+ "ldr q2, [x10], #0x10\n"
+ "zip1 v17.16b, v20.16b, v16.16b\n"
+ "ldr q1, [x9], #0x10\n"
+ "zip2 v16.16b, v20.16b, v16.16b\n"
+ "ldr q0, [x28], #0x10\n"
"zip1 v31.16b, v19.16b, v17.16b\n"
- "zip1 v30.16b, v18.16b, v16.16b\n"
- "ldr q24, [x9], #0x10\n"
- "ldr q20, [x28], #0x10\n"
- "zip2 v29.16b, v19.16b, v17.16b\n"
- "zip2 v23.16b, v18.16b, v16.16b\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v22.16b, v24.16b, v17.16b\n"
- "zip1 v21.16b, v20.16b, v16.16b\n"
- "ldr q19, [x25], #0x10\n"
- "ldr q18, [x23], #0x10\n"
- "zip2 v28.16b, v24.16b, v17.16b\n"
- "zip2 v20.16b, v20.16b, v16.16b\n"
- "ldr q17, [x22], #0x10\n"
- "ldr q16, [x20], #0x10\n"
- "zip1 v27.16b, v19.16b, v17.16b\n"
- "zip1 v26.16b, v18.16b, v16.16b\n"
- "zip2 v25.16b, v19.16b, v17.16b\n"
- "zip2 v24.16b, v18.16b, v16.16b\n"
- "zip1 v16.16b, v3.16b, v2.16b\n"
- "zip2 v18.16b, v3.16b, v2.16b\n"
- "str q16, [x21, #0x0]\n"
- "zip1 v17.16b, v1.16b, v0.16b\n"
- "zip2 v16.16b, v1.16b, v0.16b\n"
- "str q18, [x21, #0x10]\n"
- "str q17, [x21, #0x20]\n"
- "zip1 v19.16b, v31.16b, v30.16b\n"
- "zip2 v18.16b, v31.16b, v30.16b\n"
- "str q16, [x21, #0x30]\n"
- "zip1 v17.16b, v29.16b, v23.16b\n"
- "zip2 v16.16b, v29.16b, v23.16b\n"
- "str q19, [x21, #0x80]\n"
- "zip1 v23.16b, v22.16b, v21.16b\n"
- "zip2 v22.16b, v22.16b, v21.16b\n"
- "str q18, [x21, #0x90]\n"
- "zip1 v21.16b, v28.16b, v20.16b\n"
- "zip2 v20.16b, v28.16b, v20.16b\n"
- "str q17, [x21, #0xa0]\n"
- "zip1 v19.16b, v27.16b, v26.16b\n"
- "zip2 v18.16b, v27.16b, v26.16b\n"
- "str q16, [x21, #0xb0]\n"
- "zip1 v17.16b, v25.16b, v24.16b\n"
- "zip2 v16.16b, v25.16b, v24.16b\n"
- "str q23, [x21, #0x100]\n"
- "str q22, [x21, #0x110]\n"
- "str q21, [x21, #0x120]\n"
- "str q20, [x21, #0x130]\n"
- "str q19, [x21, #0x180]\n"
- "str q18, [x21, #0x190]\n"
- "str q17, [x21, #0x1a0]\n"
- "str q16, [x21, #0x1b0]\n"
- "add x21, x21, #0x40\n"
+ "ldr q30, [x27], #0x10\n"
+ "zip2 v20.16b, v19.16b, v17.16b\n"
+ "ldr q29, [x26], #0x10\n"
+ "zip1 v19.16b, v18.16b, v16.16b\n"
+ "ldr q28, [x25], #0x10\n"
+ "zip2 v18.16b, v18.16b, v16.16b\n"
+ "ldr q27, [x24], #0x10\n"
+ "zip1 v17.16b, v3.16b, v1.16b\n"
+ "ldr q26, [x23], #0x10\n"
+ "zip1 v16.16b, v2.16b, v0.16b\n"
+ "ldr q25, [x22], #0x10\n"
+ "zip1 v24.16b, v17.16b, v16.16b\n"
+ "ldr q23, [x21], #0x10\n"
+ "zip2 v22.16b, v17.16b, v16.16b\n"
+ "ldr q21, [x20], #0x10\n"
+ "zip2 v17.16b, v3.16b, v1.16b\n"
+ "str q31, [x15, #0x0]\n"
+ "zip2 v16.16b, v2.16b, v0.16b\n"
+ "str q20, [x15, #0x10]\n"
+ "zip1 v20.16b, v17.16b, v16.16b\n"
+ "str q19, [x15, #0x20]\n"
+ "zip2 v19.16b, v17.16b, v16.16b\n"
+ "str q18, [x15, #0x30]\n"
+ "zip1 v18.16b, v30.16b, v28.16b\n"
+ "str q24, [x15, #0x80]\n"
+ "zip1 v16.16b, v29.16b, v27.16b\n"
+ "str q22, [x15, #0x90]\n"
+ "zip1 v17.16b, v18.16b, v16.16b\n"
+ "str q20, [x15, #0xa0]\n"
+ "zip2 v16.16b, v18.16b, v16.16b\n"
+ "str q19, [x15, #0xb0]\n"
+ "zip2 v18.16b, v30.16b, v28.16b\n"
+ "str q17, [x15, #0x100]\n"
+ "zip2 v17.16b, v29.16b, v27.16b\n"
+ "str q16, [x15, #0x110]\n"
+ "zip1 v16.16b, v18.16b, v17.16b\n"
+ "str q16, [x15, #0x120]\n"
+ "zip2 v16.16b, v18.16b, v17.16b\n"
+ "str q16, [x15, #0x130]\n"
+ "zip1 v18.16b, v26.16b, v23.16b\n"
+ "zip1 v17.16b, v25.16b, v21.16b\n"
+ "zip1 v16.16b, v18.16b, v17.16b\n"
+ "str q16, [x15, #0x180]\n"
+ "zip2 v16.16b, v18.16b, v17.16b\n"
+ "str q16, [x15, #0x190]\n"
+ "zip2 v18.16b, v26.16b, v23.16b\n"
+ "zip2 v17.16b, v25.16b, v21.16b\n"
+ "zip1 v16.16b, v18.16b, v17.16b\n"
+ "str q16, [x15, #0x1a0]\n"
+ "zip2 v16.16b, v18.16b, v17.16b\n"
+ "str q16, [x15, #0x1b0]\n"
+ "add x15, x15, #0x40\n"
"bge 4b\n"
"5:" // Main row loop: width 16 loop: skip
- "cmp x24, #0x4\n"
+ "cmp x19, #0x4\n"
"blt 7f\n"
"6:" // Main row loop: width 4 loop: loop
- "ldr s19, [x17], #0x4\n"
"ldr s18, [x16], #0x4\n"
- "sub x24, x24, #0x4\n"
- "cmp x24, #0x4\n"
- "ldr s17, [x15], #0x4\n"
- "ldr s16, [x14], #0x4\n"
- "zip1 v17.16b, v19.16b, v17.16b\n"
- "zip1 v16.16b, v18.16b, v16.16b\n"
- "ldr s19, [x13], #0x4\n"
- "ldr s18, [x12], #0x4\n"
- "zip1 v22.16b, v17.16b, v16.16b\n"
- "ldr s17, [x11], #0x4\n"
- "ldr s16, [x10], #0x4\n"
- "zip1 v17.16b, v19.16b, v17.16b\n"
- "zip1 v16.16b, v18.16b, v16.16b\n"
- "ldr s19, [x9], #0x4\n"
- "ldr s18, [x28], #0x4\n"
- "zip1 v21.16b, v17.16b, v16.16b\n"
- "ldr s17, [x27], #0x4\n"
- "ldr s16, [x26], #0x4\n"
- "zip1 v17.16b, v19.16b, v17.16b\n"
- "zip1 v16.16b, v18.16b, v16.16b\n"
- "ldr s20, [x25], #0x4\n"
- "ldr s19, [x23], #0x4\n"
- "zip1 v18.16b, v17.16b, v16.16b\n"
- "ldr s17, [x22], #0x4\n"
+ "sub x19, x19, #0x4\n"
+ "ldr s17, [x14], #0x4\n"
+ "cmp x19, #0x4\n"
+ "ldr s16, [x13], #0x4\n"
+ "zip1 v19.16b, v18.16b, v16.16b\n"
+ "ldr s16, [x12], #0x4\n"
+ "ldr s18, [x11], #0x4\n"
+ "zip1 v16.16b, v17.16b, v16.16b\n"
+ "ldr s20, [x10], #0x4\n"
+ "ldr s17, [x9], #0x4\n"
+ "zip1 v23.16b, v19.16b, v16.16b\n"
+ "ldr s16, [x28], #0x4\n"
+ "zip1 v19.16b, v18.16b, v17.16b\n"
+ "ldr s18, [x27], #0x4\n"
+ "ldr s22, [x26], #0x4\n"
+ "zip1 v16.16b, v20.16b, v16.16b\n"
+ "ldr s17, [x25], #0x4\n"
+ "zip1 v21.16b, v19.16b, v16.16b\n"
+ "ldr s16, [x24], #0x4\n"
+ "zip1 v18.16b, v18.16b, v17.16b\n"
+ "ldr s20, [x23], #0x4\n"
+ "ldr s19, [x22], #0x4\n"
+ "zip1 v16.16b, v22.16b, v16.16b\n"
+ "ldr s17, [x21], #0x4\n"
+ "zip1 v18.16b, v18.16b, v16.16b\n"
"ldr s16, [x20], #0x4\n"
"zip1 v17.16b, v20.16b, v17.16b\n"
+ "str q23, [x15, #0x0]\n"
+ "str q21, [x15, #0x80]\n"
"zip1 v16.16b, v19.16b, v16.16b\n"
- "str q22, [x21, #0x0]\n"
+ "str q18, [x15, #0x100]\n"
"zip1 v16.16b, v17.16b, v16.16b\n"
- "str q21, [x21, #0x80]\n"
- "str q18, [x21, #0x100]\n"
- "str q16, [x21, #0x180]\n"
- "add x21, x21, #0x10\n"
+ "str q16, [x15, #0x180]\n"
+ "add x15, x15, #0x10\n"
"bge 6b\n"
"7:" // Main row loop: width 4 loop: skip
- "cmp x24, #0x1\n"
+ "cmp x19, #0x1\n"
"blt 9f\n"
"8:" // Main row loop: width 1 loop: loop
- "ldr b19, [x17], #0x1\n"
"ldr b18, [x16], #0x1\n"
- "sub x24, x24, #0x1\n"
- "cmp x24, #0x1\n"
- "ldr b17, [x15], #0x1\n"
- "ldr b16, [x14], #0x1\n"
- "zip1 v17.16b, v19.16b, v17.16b\n"
- "zip1 v16.16b, v18.16b, v16.16b\n"
- "ldr b19, [x13], #0x1\n"
- "ldr b18, [x12], #0x1\n"
- "zip1 v22.16b, v17.16b, v16.16b\n"
- "ldr b17, [x11], #0x1\n"
- "ldr b16, [x10], #0x1\n"
- "zip1 v17.16b, v19.16b, v17.16b\n"
- "zip1 v16.16b, v18.16b, v16.16b\n"
- "ldr b19, [x9], #0x1\n"
- "ldr b18, [x28], #0x1\n"
- "zip1 v21.16b, v17.16b, v16.16b\n"
- "ldr b17, [x27], #0x1\n"
- "ldr b16, [x26], #0x1\n"
- "zip1 v17.16b, v19.16b, v17.16b\n"
- "zip1 v16.16b, v18.16b, v16.16b\n"
- "ldr b20, [x25], #0x1\n"
- "ldr b19, [x23], #0x1\n"
- "zip1 v18.16b, v17.16b, v16.16b\n"
- "ldr b17, [x22], #0x1\n"
+ "sub x19, x19, #0x1\n"
+ "ldr b17, [x14], #0x1\n"
+ "cmp x19, #0x1\n"
+ "ldr b16, [x13], #0x1\n"
+ "zip1 v19.16b, v18.16b, v16.16b\n"
+ "ldr b16, [x12], #0x1\n"
+ "ldr b18, [x11], #0x1\n"
+ "zip1 v16.16b, v17.16b, v16.16b\n"
+ "ldr b20, [x10], #0x1\n"
+ "ldr b17, [x9], #0x1\n"
+ "zip1 v23.16b, v19.16b, v16.16b\n"
+ "ldr b16, [x28], #0x1\n"
+ "zip1 v19.16b, v18.16b, v17.16b\n"
+ "ldr b18, [x27], #0x1\n"
+ "ldr b22, [x26], #0x1\n"
+ "zip1 v16.16b, v20.16b, v16.16b\n"
+ "ldr b17, [x25], #0x1\n"
+ "zip1 v21.16b, v19.16b, v16.16b\n"
+ "ldr b16, [x24], #0x1\n"
+ "zip1 v18.16b, v18.16b, v17.16b\n"
+ "ldr b20, [x23], #0x1\n"
+ "ldr b19, [x22], #0x1\n"
+ "zip1 v16.16b, v22.16b, v16.16b\n"
+ "ldr b17, [x21], #0x1\n"
+ "zip1 v18.16b, v18.16b, v16.16b\n"
"ldr b16, [x20], #0x1\n"
"zip1 v17.16b, v20.16b, v17.16b\n"
+ "str s23, [x15, #0x0]\n"
+ "str s21, [x15, #0x80]\n"
"zip1 v16.16b, v19.16b, v16.16b\n"
- "str s22, [x21, #0x0]\n"
+ "str s18, [x15, #0x100]\n"
"zip1 v16.16b, v17.16b, v16.16b\n"
- "str s21, [x21, #0x80]\n"
- "str s18, [x21, #0x100]\n"
- "str s16, [x21, #0x180]\n"
- "add x21, x21, #0x4\n"
+ "str s16, [x15, #0x180]\n"
+ "add x15, x15, #0x4\n"
"bge 8b\n"
"9:" // Main row loop: width 1 loop: skip
- "cmp %x[height], #0x10\n"
"add %x[out], %x[out], #0x200\n"
+ "cmp %x[height], #0x10\n"
"bge 1b\n"
"cbz %x[height], 20f\n"
"10:" // Main loop skip
+
"11:" // Tail row loop: Head
- "mov x17, %x[in]\n"
- "add x16, x17, %x[in_stride]\n"
- "add x15, x16, %x[in_stride]\n"
- "mov x20, %x[width]\n"
- "add x14, x15, %x[in_stride]\n"
+ "mov x16, %x[in]\n"
+ "mov x15, %x[out]\n"
+ "add x14, x16, %x[in_stride]\n"
+ "add x13, x14, %x[in_stride]\n"
+ "add x12, x13, %x[in_stride]\n"
+ "add %x[in], x12, %x[in_stride]\n"
"cmp %x[height], #0x3\n"
- "add %x[in], x14, %x[in_stride]\n"
- "csel x14, x14, %x[pad_row], GT\n"
- "csel x15, x15, %x[pad_row], GE\n"
+ "csel x12, x12, %x[pad_row], GT\n"
+ "csel x13, x13, %x[pad_row], GE\n"
"cmp %x[height], #0x1\n"
- "csel x16, x16, %x[pad_row], GT\n"
- "cmp x20, #0x20\n"
- "mov x21, %x[out]\n"
+ "csel x14, x14, %x[pad_row], GT\n"
"sub %x[height], %x[height], #0x4\n"
+ "mov x19, %x[width]\n"
+ "cmp x19, #0x20\n"
"blt 13f\n"
"12:" // Tail row loop: Column loop
- "ldr q19, [x17], #0x10\n"
+ "ldr q17, [x16], #0x10\n"
+ "sub x19, x19, #0x20\n"
+ "ldr q25, [x14], #0x10\n"
+ "cmp x19, #0x20\n"
+ "ldr q16, [x13], #0x10\n"
+ "zip1 v24.16b, v17.16b, v16.16b\n"
"ldr q18, [x16], #0x10\n"
- "sub x20, x20, #0x20\n"
- "cmp x20, #0x20\n"
- "ldr q17, [x15], #0x10\n"
- "ldr q16, [x14], #0x10\n"
- "zip1 v25.16b, v19.16b, v17.16b\n"
- "zip1 v24.16b, v18.16b, v16.16b\n"
- "ldr q22, [x17], #0x10\n"
- "ldr q21, [x16], #0x10\n"
- "zip2 v20.16b, v19.16b, v17.16b\n"
- "zip2 v19.16b, v18.16b, v16.16b\n"
- "ldr q17, [x15], #0x10\n"
- "ldr q16, [x14], #0x10\n"
- "zip1 v23.16b, v22.16b, v17.16b\n"
- "zip1 v18.16b, v21.16b, v16.16b\n"
- "zip2 v22.16b, v22.16b, v17.16b\n"
- "zip2 v21.16b, v21.16b, v16.16b\n"
- "zip1 v16.16b, v25.16b, v24.16b\n"
- "zip2 v17.16b, v25.16b, v24.16b\n"
- "str q16, [x21, #0x0]\n"
- "zip1 v16.16b, v20.16b, v19.16b\n"
- "zip2 v20.16b, v20.16b, v19.16b\n"
- "str q17, [x21, #0x10]\n"
- "zip1 v19.16b, v23.16b, v18.16b\n"
- "zip2 v18.16b, v23.16b, v18.16b\n"
- "str q16, [x21, #0x20]\n"
- "zip1 v17.16b, v22.16b, v21.16b\n"
- "zip2 v16.16b, v22.16b, v21.16b\n"
- "str q20, [x21, #0x30]\n"
- "str q19, [x21, #0x40]\n"
- "str q18, [x21, #0x50]\n"
- "str q17, [x21, #0x60]\n"
- "str q16, [x21, #0x70]\n"
- "add x21, x21, %x[out_stride]\n"
+ "zip2 v23.16b, v17.16b, v16.16b\n"
+ "ldr q22, [x14], #0x10\n"
+ "ldr q17, [x13], #0x10\n"
+ "zip1 v21.16b, v18.16b, v17.16b\n"
+ "ldr q16, [x12], #0x10\n"
+ "zip2 v20.16b, v18.16b, v17.16b\n"
+ "ldr q19, [x12], #0x10\n"
+ "zip1 v18.16b, v25.16b, v16.16b\n"
+ "zip2 v17.16b, v25.16b, v16.16b\n"
+ "zip1 v16.16b, v24.16b, v18.16b\n"
+ "str q16, [x15, #0x0]\n"
+ "zip2 v16.16b, v24.16b, v18.16b\n"
+ "str q16, [x15, #0x10]\n"
+ "zip1 v16.16b, v23.16b, v17.16b\n"
+ "str q16, [x15, #0x20]\n"
+ "zip2 v16.16b, v23.16b, v17.16b\n"
+ "str q16, [x15, #0x30]\n"
+ "zip1 v17.16b, v22.16b, v19.16b\n"
+ "zip1 v16.16b, v21.16b, v17.16b\n"
+ "str q16, [x15, #0x40]\n"
+ "zip2 v16.16b, v21.16b, v17.16b\n"
+ "str q16, [x15, #0x50]\n"
+ "zip2 v17.16b, v22.16b, v19.16b\n"
+ "zip1 v16.16b, v20.16b, v17.16b\n"
+ "str q16, [x15, #0x60]\n"
+ "zip2 v16.16b, v20.16b, v17.16b\n"
+ "str q16, [x15, #0x70]\n"
+ "add x15, x15, %x[out_stride]\n"
"bge 12b\n"
"13:" // Tail row loop: Column loop skip
- "cmp x20, #0x10\n"
+ "cmp x19, #0x10\n"
"blt 15f\n"
"14:" // Tail row loop: width 16 loop: loop
- "ldr q20, [x17], #0x10\n"
- "ldr q21, [x16], #0x10\n"
- "sub x20, x20, #0x10\n"
- "cmp x20, #0x10\n"
- "ldr q19, [x15], #0x10\n"
- "ldr q16, [x14], #0x10\n"
- "zip1 v18.16b, v20.16b, v19.16b\n"
- "zip1 v17.16b, v21.16b, v16.16b\n"
- "zip2 v20.16b, v20.16b, v19.16b\n"
- "zip2 v19.16b, v21.16b, v16.16b\n"
- "zip1 v16.16b, v18.16b, v17.16b\n"
- "zip2 v18.16b, v18.16b, v17.16b\n"
- "str q16, [x21, #0x0]\n"
- "zip1 v17.16b, v20.16b, v19.16b\n"
- "zip2 v16.16b, v20.16b, v19.16b\n"
- "str q18, [x21, #0x10]\n"
- "str q17, [x21, #0x20]\n"
- "str q16, [x21, #0x30]\n"
- "add x21, x21, #0x40\n"
+ "ldr q18, [x16], #0x10\n"
+ "sub x19, x19, #0x10\n"
+ "ldr q21, [x14], #0x10\n"
+ "cmp x19, #0x10\n"
+ "ldr q17, [x13], #0x10\n"
+ "zip1 v20.16b, v18.16b, v17.16b\n"
+ "ldr q16, [x12], #0x10\n"
+ "zip2 v19.16b, v18.16b, v17.16b\n"
+ "zip1 v18.16b, v21.16b, v16.16b\n"
+ "zip2 v17.16b, v21.16b, v16.16b\n"
+ "zip1 v16.16b, v20.16b, v18.16b\n"
+ "str q16, [x15, #0x0]\n"
+ "zip2 v16.16b, v20.16b, v18.16b\n"
+ "str q16, [x15, #0x10]\n"
+ "zip1 v16.16b, v19.16b, v17.16b\n"
+ "str q16, [x15, #0x20]\n"
+ "zip2 v16.16b, v19.16b, v17.16b\n"
+ "str q16, [x15, #0x30]\n"
+ "add x15, x15, #0x40\n"
"bge 14b\n"
"15:" // Tail row loop: width 16 loop: skip
- "cmp x20, #0x4\n"
+ "cmp x19, #0x4\n"
"blt 17f\n"
"16:" // Tail row loop: width 4 loop: loop
- "ldr s19, [x17], #0x4\n"
- "ldr s18, [x16], #0x4\n"
- "sub x20, x20, #0x4\n"
- "cmp x20, #0x4\n"
- "ldr s17, [x15], #0x4\n"
- "ldr s16, [x14], #0x4\n"
- "zip1 v17.16b, v19.16b, v17.16b\n"
+ "ldr s17, [x16], #0x4\n"
+ "sub x19, x19, #0x4\n"
+ "ldr s18, [x14], #0x4\n"
+ "cmp x19, #0x4\n"
+ "ldr s16, [x13], #0x4\n"
+ "zip1 v17.16b, v17.16b, v16.16b\n"
+ "ldr s16, [x12], #0x4\n"
"zip1 v16.16b, v18.16b, v16.16b\n"
"zip1 v16.16b, v17.16b, v16.16b\n"
- "str q16, [x21, #0x0]\n"
- "add x21, x21, #0x10\n"
+ "str q16, [x15, #0x0]\n"
+ "add x15, x15, #0x10\n"
"bge 16b\n"
"17:" // Tail row loop: width 4 loop: skip
- "cmp x20, #0x1\n"
+ "cmp x19, #0x1\n"
"blt 19f\n"
"18:" // Tail row loop: width 1 loop: loop
- "ldr b19, [x17], #0x1\n"
- "ldr b18, [x16], #0x1\n"
- "sub x20, x20, #0x1\n"
- "cmp x20, #0x1\n"
- "ldr b17, [x15], #0x1\n"
- "ldr b16, [x14], #0x1\n"
- "zip1 v17.16b, v19.16b, v17.16b\n"
+ "ldr b17, [x16], #0x1\n"
+ "sub x19, x19, #0x1\n"
+ "ldr b18, [x14], #0x1\n"
+ "cmp x19, #0x1\n"
+ "ldr b16, [x13], #0x1\n"
+ "zip1 v17.16b, v17.16b, v16.16b\n"
+ "ldr b16, [x12], #0x1\n"
"zip1 v16.16b, v18.16b, v16.16b\n"
"zip1 v16.16b, v17.16b, v16.16b\n"
- "str s16, [x21, #0x0]\n"
- "add x21, x21, #0x4\n"
+ "str s16, [x15, #0x0]\n"
+ "add x15, x15, #0x4\n"
"bge 18b\n"
"19:" // Tail row loop: width 1 loop: skip
- "cmp %x[height], #0x1\n"
"add %x[out], %x[out], #0x80\n"
+ "cmp %x[height], #0x1\n"
"bge 11b\n"
"20:" // Done
+
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [pad_row] "r" (pad_row), [width] "r" (width)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_32_2x2.hpp b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_32_2x2.hpp
index 706d7cd359..05e68daba1 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_32_2x2.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_32_2x2.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#pragma once
@@ -42,237 +42,237 @@ void a64_transpose_interleave_32_2x2(uint16_t *out, const uint16_t *in, size_t w
"cmp %x[height], #0x4\n"
"blt 12f\n"
"1:" // Main row loop: Head
- "mov x25, %x[in]\n"
- "mov x24, %x[width]\n"
- "add x23, x25, %x[in_stride]\n"
- "add x22, x23, %x[in_stride]\n"
- "add x20, x22, %x[in_stride]\n"
- "cmp x24, #0x40\n"
- "add %x[in], x20, %x[in_stride]\n"
- "mov x21, %x[out]\n"
+ "mov x24, %x[in]\n"
+ "mov x23, %x[width]\n"
+ "add x22, x24, %x[in_stride]\n"
+ "add x21, x22, %x[in_stride]\n"
+ "add x19, x21, %x[in_stride]\n"
+ "cmp x23, #0x40\n"
+ "add %x[in], x19, %x[in_stride]\n"
+ "mov x20, %x[out]\n"
"sub %x[height], %x[height], #0x4\n"
"blt 3f\n"
"2:" // Main row loop: Unroll column loop
- "ldr q14, [x25], #0x10\n"
- "ldr q10, [x23], #0x10\n"
- "sub x24, x24, #0x40\n"
+ "ldr q14, [x24], #0x10\n"
+ "ldr q10, [x22], #0x10\n"
+ "sub x23, x23, #0x40\n"
"zip1 v12.8h, v14.8h, v10.8h\n"
- "ldr q5, [x22], #0x10\n"
- "ldr q3, [x20], #0x10\n"
+ "ldr q5, [x21], #0x10\n"
+ "ldr q3, [x19], #0x10\n"
"zip2 v31.8h, v14.8h, v10.8h\n"
"zip1 v19.8h, v5.8h, v3.8h\n"
- "ldr q27, [x25], #0x10\n"
- "ldr q25, [x23], #0x10\n"
+ "ldr q27, [x24], #0x10\n"
+ "ldr q25, [x22], #0x10\n"
"zip1 v11.8h, v27.8h, v25.8h\n"
"zip2 v24.8h, v27.8h, v25.8h\n"
- "ldr q6, [x22], #0x10\n"
- "ldr q29, [x20], #0x10\n"
+ "ldr q6, [x21], #0x10\n"
+ "ldr q29, [x19], #0x10\n"
"zip2 v15.8h, v5.8h, v3.8h\n"
"zip1 v18.8h, v6.8h, v29.8h\n"
- "ldr q17, [x25], #0x10\n"
- "ldr q9, [x23], #0x10\n"
+ "ldr q17, [x24], #0x10\n"
+ "ldr q9, [x22], #0x10\n"
"zip1 v0.8h, v17.8h, v9.8h\n"
"zip2 v9.8h, v17.8h, v9.8h\n"
- "ldr q21, [x22], #0x10\n"
- "ldr q20, [x20], #0x10\n"
+ "ldr q21, [x21], #0x10\n"
+ "ldr q20, [x19], #0x10\n"
"zip2 v8.8h, v6.8h, v29.8h\n"
"zip1 v30.8h, v21.8h, v20.8h\n"
- "ldr q17, [x25], #0x10\n"
- "ldr q5, [x23], #0x10\n"
+ "ldr q17, [x24], #0x10\n"
+ "ldr q5, [x22], #0x10\n"
"zip1 v13.8h, v17.8h, v5.8h\n"
"zip2 v25.8h, v17.8h, v5.8h\n"
- "ldr q7, [x22], #0x10\n"
- "ldr q29, [x20], #0x10\n"
+ "ldr q7, [x21], #0x10\n"
+ "ldr q29, [x19], #0x10\n"
"zip2 v27.8h, v21.8h, v20.8h\n"
"zip1 v14.8h, v7.8h, v29.8h\n"
- "ldr q28, [x25], #0x10\n"
- "ldr q17, [x23], #0x10\n"
+ "ldr q28, [x24], #0x10\n"
+ "ldr q17, [x22], #0x10\n"
"zip2 v1.8h, v7.8h, v29.8h\n"
- "cmp x24, #0x40\n"
- "ldr q10, [x22], #0x10\n"
- "ldr q21, [x20], #0x10\n"
+ "cmp x23, #0x40\n"
+ "ldr q10, [x21], #0x10\n"
+ "ldr q21, [x19], #0x10\n"
"zip1 v16.8h, v28.8h, v17.8h\n"
"zip2 v17.8h, v28.8h, v17.8h\n"
- "ldr q5, [x25], #0x10\n"
- "ldr q20, [x23], #0x10\n"
+ "ldr q5, [x24], #0x10\n"
+ "ldr q20, [x22], #0x10\n"
"zip1 v3.8h, v5.8h, v20.8h\n"
"zip2 v7.8h, v5.8h, v20.8h\n"
- "ldr q22, [x22], #0x10\n"
- "ldr q29, [x20], #0x10\n"
+ "ldr q22, [x21], #0x10\n"
+ "ldr q29, [x19], #0x10\n"
"zip1 v2.8h, v10.8h, v21.8h\n"
"zip2 v5.8h, v10.8h, v21.8h\n"
- "ldr q21, [x25], #0x10\n"
- "ldr q20, [x23], #0x10\n"
+ "ldr q21, [x24], #0x10\n"
+ "ldr q20, [x22], #0x10\n"
"zip1 v4.8h, v21.8h, v20.8h\n"
"zip2 v28.8h, v21.8h, v20.8h\n"
- "ldr q6, [x22], #0x10\n"
- "ldr q10, [x20], #0x10\n"
+ "ldr q6, [x21], #0x10\n"
+ "ldr q10, [x19], #0x10\n"
"zip1 v26.8h, v22.8h, v29.8h\n"
"zip2 v20.8h, v22.8h, v29.8h\n"
- "ldr q29, [x25], #0x10\n"
- "ldr q23, [x23], #0x10\n"
+ "ldr q29, [x24], #0x10\n"
+ "ldr q23, [x22], #0x10\n"
"zip1 v21.8h, v29.8h, v23.8h\n"
"zip2 v23.8h, v29.8h, v23.8h\n"
- "ldr q22, [x22], #0x10\n"
- "ldr q29, [x20], #0x10\n"
- "str q12, [x21, #0x0]\n"
+ "ldr q22, [x21], #0x10\n"
+ "ldr q29, [x19], #0x10\n"
+ "str q12, [x20, #0x0]\n"
"zip1 v12.8h, v6.8h, v10.8h\n"
- "str q31, [x21, #0x10]\n"
+ "str q31, [x20, #0x10]\n"
"zip2 v6.8h, v6.8h, v10.8h\n"
"zip1 v31.8h, v22.8h, v29.8h\n"
- "str q11, [x21, #0x20]\n"
+ "str q11, [x20, #0x20]\n"
"zip2 v11.8h, v22.8h, v29.8h\n"
- "str q24, [x21, #0x30]\n"
- "str q0, [x21, #0x40]\n"
- "str q9, [x21, #0x50]\n"
- "str q13, [x21, #0x60]\n"
- "str q25, [x21, #0x70]\n"
- "str q19, [x21, #0x80]\n"
- "str q15, [x21, #0x90]\n"
- "str q18, [x21, #0xa0]\n"
- "str q8, [x21, #0xb0]\n"
- "str q30, [x21, #0xc0]\n"
- "str q27, [x21, #0xd0]\n"
- "str q14, [x21, #0xe0]\n"
- "str q1, [x21, #0xf0]\n"
- "add x21, x21, %x[out_stride]\n"
- "str q16, [x21, #0x0]\n"
- "str q17, [x21, #0x10]\n"
- "str q3, [x21, #0x20]\n"
- "str q7, [x21, #0x30]\n"
- "str q4, [x21, #0x40]\n"
- "str q28, [x21, #0x50]\n"
- "str q21, [x21, #0x60]\n"
- "str q23, [x21, #0x70]\n"
- "str q2, [x21, #0x80]\n"
- "str q5, [x21, #0x90]\n"
- "str q26, [x21, #0xa0]\n"
- "str q20, [x21, #0xb0]\n"
- "str q12, [x21, #0xc0]\n"
- "str q6, [x21, #0xd0]\n"
- "str q31, [x21, #0xe0]\n"
- "str q11, [x21, #0xf0]\n"
- "add x21, x21, %x[out_stride]\n"
+ "str q24, [x20, #0x30]\n"
+ "str q0, [x20, #0x40]\n"
+ "str q9, [x20, #0x50]\n"
+ "str q13, [x20, #0x60]\n"
+ "str q25, [x20, #0x70]\n"
+ "str q19, [x20, #0x80]\n"
+ "str q15, [x20, #0x90]\n"
+ "str q18, [x20, #0xa0]\n"
+ "str q8, [x20, #0xb0]\n"
+ "str q30, [x20, #0xc0]\n"
+ "str q27, [x20, #0xd0]\n"
+ "str q14, [x20, #0xe0]\n"
+ "str q1, [x20, #0xf0]\n"
+ "add x20, x20, %x[out_stride]\n"
+ "str q16, [x20, #0x0]\n"
+ "str q17, [x20, #0x10]\n"
+ "str q3, [x20, #0x20]\n"
+ "str q7, [x20, #0x30]\n"
+ "str q4, [x20, #0x40]\n"
+ "str q28, [x20, #0x50]\n"
+ "str q21, [x20, #0x60]\n"
+ "str q23, [x20, #0x70]\n"
+ "str q2, [x20, #0x80]\n"
+ "str q5, [x20, #0x90]\n"
+ "str q26, [x20, #0xa0]\n"
+ "str q20, [x20, #0xb0]\n"
+ "str q12, [x20, #0xc0]\n"
+ "str q6, [x20, #0xd0]\n"
+ "str q31, [x20, #0xe0]\n"
+ "str q11, [x20, #0xf0]\n"
+ "add x20, x20, %x[out_stride]\n"
"bge 2b\n"
"3:" // Main row loop: Unroll column loop skip
- "cmp x24, #0x20\n"
+ "cmp x23, #0x20\n"
"blt 5f\n"
"4:" // Main row loop: Column loop
- "ldr q17, [x25], #0x10\n"
- "ldr q16, [x23], #0x10\n"
- "sub x24, x24, #0x20\n"
- "cmp x24, #0x20\n"
- "ldr q21, [x22], #0x10\n"
- "ldr q18, [x20], #0x10\n"
+ "ldr q17, [x24], #0x10\n"
+ "ldr q16, [x22], #0x10\n"
+ "sub x23, x23, #0x20\n"
+ "cmp x23, #0x20\n"
+ "ldr q21, [x21], #0x10\n"
+ "ldr q18, [x19], #0x10\n"
"zip1 v1.8h, v17.8h, v16.8h\n"
"zip2 v0.8h, v17.8h, v16.8h\n"
- "ldr q17, [x25], #0x10\n"
- "ldr q16, [x23], #0x10\n"
+ "ldr q17, [x24], #0x10\n"
+ "ldr q16, [x22], #0x10\n"
"zip1 v31.8h, v17.8h, v16.8h\n"
"zip2 v30.8h, v17.8h, v16.8h\n"
- "ldr q20, [x22], #0x10\n"
- "ldr q19, [x20], #0x10\n"
+ "ldr q20, [x21], #0x10\n"
+ "ldr q19, [x19], #0x10\n"
"zip1 v29.8h, v21.8h, v18.8h\n"
"zip2 v28.8h, v21.8h, v18.8h\n"
- "ldr q17, [x25], #0x10\n"
- "ldr q16, [x23], #0x10\n"
+ "ldr q17, [x24], #0x10\n"
+ "ldr q16, [x22], #0x10\n"
"zip1 v27.8h, v17.8h, v16.8h\n"
"zip2 v26.8h, v17.8h, v16.8h\n"
- "ldr q25, [x22], #0x10\n"
- "ldr q18, [x20], #0x10\n"
+ "ldr q25, [x21], #0x10\n"
+ "ldr q18, [x19], #0x10\n"
"zip1 v24.8h, v20.8h, v19.8h\n"
"zip2 v23.8h, v20.8h, v19.8h\n"
- "ldr q17, [x25], #0x10\n"
- "ldr q16, [x23], #0x10\n"
+ "ldr q17, [x24], #0x10\n"
+ "ldr q16, [x22], #0x10\n"
"zip1 v22.8h, v17.8h, v16.8h\n"
"zip2 v21.8h, v17.8h, v16.8h\n"
- "ldr q20, [x22], #0x10\n"
- "ldr q16, [x20], #0x10\n"
+ "ldr q20, [x21], #0x10\n"
+ "ldr q16, [x19], #0x10\n"
"zip1 v19.8h, v25.8h, v18.8h\n"
"zip2 v18.8h, v25.8h, v18.8h\n"
"zip1 v17.8h, v20.8h, v16.8h\n"
"zip2 v16.8h, v20.8h, v16.8h\n"
- "str q1, [x21, #0x0]\n"
- "str q0, [x21, #0x10]\n"
- "str q31, [x21, #0x20]\n"
- "str q30, [x21, #0x30]\n"
- "str q27, [x21, #0x40]\n"
- "str q26, [x21, #0x50]\n"
- "str q22, [x21, #0x60]\n"
- "str q21, [x21, #0x70]\n"
- "str q29, [x21, #0x80]\n"
- "str q28, [x21, #0x90]\n"
- "str q24, [x21, #0xa0]\n"
- "str q23, [x21, #0xb0]\n"
- "str q19, [x21, #0xc0]\n"
- "str q18, [x21, #0xd0]\n"
- "str q17, [x21, #0xe0]\n"
- "str q16, [x21, #0xf0]\n"
- "add x21, x21, %x[out_stride]\n"
+ "str q1, [x20, #0x0]\n"
+ "str q0, [x20, #0x10]\n"
+ "str q31, [x20, #0x20]\n"
+ "str q30, [x20, #0x30]\n"
+ "str q27, [x20, #0x40]\n"
+ "str q26, [x20, #0x50]\n"
+ "str q22, [x20, #0x60]\n"
+ "str q21, [x20, #0x70]\n"
+ "str q29, [x20, #0x80]\n"
+ "str q28, [x20, #0x90]\n"
+ "str q24, [x20, #0xa0]\n"
+ "str q23, [x20, #0xb0]\n"
+ "str q19, [x20, #0xc0]\n"
+ "str q18, [x20, #0xd0]\n"
+ "str q17, [x20, #0xe0]\n"
+ "str q16, [x20, #0xf0]\n"
+ "add x20, x20, %x[out_stride]\n"
"bge 4b\n"
"5:" // Main row loop: Column loop skip
- "cmp x24, #0x10\n"
+ "cmp x23, #0x10\n"
"blt 7f\n"
"6:" // Main row loop: width 16 loop: loop
- "ldr q17, [x25], #0x10\n"
- "ldr q16, [x23], #0x10\n"
- "sub x24, x24, #0x10\n"
- "cmp x24, #0x10\n"
- "ldr q24, [x22], #0x10\n"
- "ldr q23, [x20], #0x10\n"
+ "ldr q17, [x24], #0x10\n"
+ "ldr q16, [x22], #0x10\n"
+ "sub x23, x23, #0x10\n"
+ "cmp x23, #0x10\n"
+ "ldr q24, [x21], #0x10\n"
+ "ldr q23, [x19], #0x10\n"
"zip1 v19.8h, v17.8h, v16.8h\n"
"zip2 v18.8h, v17.8h, v16.8h\n"
- "ldr q17, [x25], #0x10\n"
- "ldr q16, [x23], #0x10\n"
+ "ldr q17, [x24], #0x10\n"
+ "ldr q16, [x22], #0x10\n"
"zip1 v22.8h, v17.8h, v16.8h\n"
"zip2 v21.8h, v17.8h, v16.8h\n"
- "ldr q20, [x22], #0x10\n"
- "ldr q16, [x20], #0x10\n"
- "str q19, [x21, #0x0]\n"
+ "ldr q20, [x21], #0x10\n"
+ "ldr q16, [x19], #0x10\n"
+ "str q19, [x20, #0x0]\n"
"zip1 v19.8h, v24.8h, v23.8h\n"
- "str q18, [x21, #0x10]\n"
+ "str q18, [x20, #0x10]\n"
"zip2 v18.8h, v24.8h, v23.8h\n"
"zip1 v17.8h, v20.8h, v16.8h\n"
- "str q22, [x21, #0x20]\n"
+ "str q22, [x20, #0x20]\n"
"zip2 v16.8h, v20.8h, v16.8h\n"
- "str q21, [x21, #0x30]\n"
- "str q19, [x21, #0x80]\n"
- "str q18, [x21, #0x90]\n"
- "str q17, [x21, #0xa0]\n"
- "str q16, [x21, #0xb0]\n"
- "add x21, x21, #0x40\n"
+ "str q21, [x20, #0x30]\n"
+ "str q19, [x20, #0x80]\n"
+ "str q18, [x20, #0x90]\n"
+ "str q17, [x20, #0xa0]\n"
+ "str q16, [x20, #0xb0]\n"
+ "add x20, x20, #0x40\n"
"bge 6b\n"
"7:" // Main row loop: width 16 loop: skip
- "cmp x24, #0x4\n"
+ "cmp x23, #0x4\n"
"blt 9f\n"
"8:" // Main row loop: width 4 loop: loop
- "ldr d19, [x25], #0x8\n"
- "ldr d16, [x23], #0x8\n"
- "sub x24, x24, #0x4\n"
- "cmp x24, #0x4\n"
- "ldr d18, [x22], #0x8\n"
- "ldr d17, [x20], #0x8\n"
+ "ldr d19, [x24], #0x8\n"
+ "ldr d16, [x22], #0x8\n"
+ "sub x23, x23, #0x4\n"
+ "cmp x23, #0x4\n"
+ "ldr d18, [x21], #0x8\n"
+ "ldr d17, [x19], #0x8\n"
"zip1 v16.8h, v19.8h, v16.8h\n"
- "str q16, [x21, #0x0]\n"
+ "str q16, [x20, #0x0]\n"
"zip1 v16.8h, v18.8h, v17.8h\n"
- "str q16, [x21, #0x80]\n"
- "add x21, x21, #0x10\n"
+ "str q16, [x20, #0x80]\n"
+ "add x20, x20, #0x10\n"
"bge 8b\n"
"9:" // Main row loop: width 4 loop: skip
- "cmp x24, #0x1\n"
+ "cmp x23, #0x1\n"
"blt 11f\n"
"10:" // Main row loop: width 1 loop: loop
- "ldr h19, [x25], #0x2\n"
- "ldr h16, [x23], #0x2\n"
- "sub x24, x24, #0x1\n"
- "cmp x24, #0x1\n"
- "ldr h18, [x22], #0x2\n"
- "ldr h17, [x20], #0x2\n"
+ "ldr h19, [x24], #0x2\n"
+ "ldr h16, [x22], #0x2\n"
+ "sub x23, x23, #0x1\n"
+ "cmp x23, #0x1\n"
+ "ldr h18, [x21], #0x2\n"
+ "ldr h17, [x19], #0x2\n"
"zip1 v16.8h, v19.8h, v16.8h\n"
- "str s16, [x21, #0x0]\n"
+ "str s16, [x20, #0x0]\n"
"zip1 v16.8h, v18.8h, v17.8h\n"
- "str s16, [x21, #0x80]\n"
- "add x21, x21, #0x4\n"
+ "str s16, [x20, #0x80]\n"
+ "add x20, x20, #0x4\n"
"bge 10b\n"
"11:" // Main row loop: width 1 loop: skip
"cmp %x[height], #0x4\n"
@@ -282,145 +282,145 @@ void a64_transpose_interleave_32_2x2(uint16_t *out, const uint16_t *in, size_t w
"12:" // Main loop skip
"13:" // Tail row loop: Head
- "mov x25, %x[in]\n"
- "mov x20, %x[width]\n"
- "add x23, x25, %x[in_stride]\n"
+ "mov x24, %x[in]\n"
+ "mov x19, %x[width]\n"
+ "add x22, x24, %x[in_stride]\n"
"cmp %x[height], #0x1\n"
- "add %x[in], x23, %x[in_stride]\n"
- "csel x23, x23, %x[pad_row], GT\n"
- "cmp x20, #0x40\n"
- "mov x21, %x[out]\n"
+ "add %x[in], x22, %x[in_stride]\n"
+ "csel x22, x22, %x[pad_row], GT\n"
+ "cmp x19, #0x40\n"
+ "mov x20, %x[out]\n"
"sub %x[height], %x[height], #0x2\n"
"blt 15f\n"
"14:" // Tail row loop: Unroll column loop
- "ldr q18, [x25], #0x10\n"
- "ldr q17, [x23], #0x10\n"
- "sub x20, x20, #0x40\n"
+ "ldr q18, [x24], #0x10\n"
+ "ldr q17, [x22], #0x10\n"
+ "sub x19, x19, #0x40\n"
"zip1 v0.8h, v18.8h, v17.8h\n"
- "ldr q19, [x25], #0x10\n"
- "ldr q16, [x23], #0x10\n"
+ "ldr q19, [x24], #0x10\n"
+ "ldr q16, [x22], #0x10\n"
"zip2 v31.8h, v18.8h, v17.8h\n"
"zip1 v30.8h, v19.8h, v16.8h\n"
- "ldr q18, [x25], #0x10\n"
- "ldr q17, [x23], #0x10\n"
+ "ldr q18, [x24], #0x10\n"
+ "ldr q17, [x22], #0x10\n"
"zip2 v29.8h, v19.8h, v16.8h\n"
"zip1 v28.8h, v18.8h, v17.8h\n"
- "ldr q19, [x25], #0x10\n"
- "ldr q16, [x23], #0x10\n"
+ "ldr q19, [x24], #0x10\n"
+ "ldr q16, [x22], #0x10\n"
"zip2 v27.8h, v18.8h, v17.8h\n"
"zip1 v26.8h, v19.8h, v16.8h\n"
- "ldr q18, [x25], #0x10\n"
- "ldr q17, [x23], #0x10\n"
+ "ldr q18, [x24], #0x10\n"
+ "ldr q17, [x22], #0x10\n"
"zip2 v25.8h, v19.8h, v16.8h\n"
- "cmp x20, #0x40\n"
- "ldr q19, [x25], #0x10\n"
- "ldr q16, [x23], #0x10\n"
+ "cmp x19, #0x40\n"
+ "ldr q19, [x24], #0x10\n"
+ "ldr q16, [x22], #0x10\n"
"zip1 v24.8h, v18.8h, v17.8h\n"
"zip2 v23.8h, v18.8h, v17.8h\n"
- "ldr q18, [x25], #0x10\n"
- "ldr q17, [x23], #0x10\n"
+ "ldr q18, [x24], #0x10\n"
+ "ldr q17, [x22], #0x10\n"
"zip1 v22.8h, v19.8h, v16.8h\n"
"zip2 v21.8h, v19.8h, v16.8h\n"
- "ldr q20, [x25], #0x10\n"
- "ldr q16, [x23], #0x10\n"
- "str q0, [x21, #0x0]\n"
+ "ldr q20, [x24], #0x10\n"
+ "ldr q16, [x22], #0x10\n"
+ "str q0, [x20, #0x0]\n"
"zip1 v19.8h, v18.8h, v17.8h\n"
- "str q31, [x21, #0x10]\n"
+ "str q31, [x20, #0x10]\n"
"zip2 v18.8h, v18.8h, v17.8h\n"
"zip1 v17.8h, v20.8h, v16.8h\n"
- "str q30, [x21, #0x20]\n"
+ "str q30, [x20, #0x20]\n"
"zip2 v16.8h, v20.8h, v16.8h\n"
- "str q29, [x21, #0x30]\n"
- "str q28, [x21, #0x40]\n"
- "str q27, [x21, #0x50]\n"
- "str q26, [x21, #0x60]\n"
- "str q25, [x21, #0x70]\n"
- "add x21, x21, %x[out_stride]\n"
- "str q24, [x21, #0x0]\n"
- "str q23, [x21, #0x10]\n"
- "str q22, [x21, #0x20]\n"
- "str q21, [x21, #0x30]\n"
- "str q19, [x21, #0x40]\n"
- "str q18, [x21, #0x50]\n"
- "str q17, [x21, #0x60]\n"
- "str q16, [x21, #0x70]\n"
- "add x21, x21, %x[out_stride]\n"
+ "str q29, [x20, #0x30]\n"
+ "str q28, [x20, #0x40]\n"
+ "str q27, [x20, #0x50]\n"
+ "str q26, [x20, #0x60]\n"
+ "str q25, [x20, #0x70]\n"
+ "add x20, x20, %x[out_stride]\n"
+ "str q24, [x20, #0x0]\n"
+ "str q23, [x20, #0x10]\n"
+ "str q22, [x20, #0x20]\n"
+ "str q21, [x20, #0x30]\n"
+ "str q19, [x20, #0x40]\n"
+ "str q18, [x20, #0x50]\n"
+ "str q17, [x20, #0x60]\n"
+ "str q16, [x20, #0x70]\n"
+ "add x20, x20, %x[out_stride]\n"
"bge 14b\n"
"15:" // Tail row loop: Unroll column loop skip
- "cmp x20, #0x20\n"
+ "cmp x19, #0x20\n"
"blt 17f\n"
"16:" // Tail row loop: Column loop
- "ldr q18, [x25], #0x10\n"
- "ldr q17, [x23], #0x10\n"
- "sub x20, x20, #0x20\n"
- "cmp x20, #0x20\n"
- "ldr q19, [x25], #0x10\n"
- "ldr q16, [x23], #0x10\n"
+ "ldr q18, [x24], #0x10\n"
+ "ldr q17, [x22], #0x10\n"
+ "sub x19, x19, #0x20\n"
+ "cmp x19, #0x20\n"
+ "ldr q19, [x24], #0x10\n"
+ "ldr q16, [x22], #0x10\n"
"zip1 v24.8h, v18.8h, v17.8h\n"
"zip2 v23.8h, v18.8h, v17.8h\n"
- "ldr q18, [x25], #0x10\n"
- "ldr q17, [x23], #0x10\n"
+ "ldr q18, [x24], #0x10\n"
+ "ldr q17, [x22], #0x10\n"
"zip1 v22.8h, v19.8h, v16.8h\n"
"zip2 v21.8h, v19.8h, v16.8h\n"
- "ldr q20, [x25], #0x10\n"
- "ldr q16, [x23], #0x10\n"
+ "ldr q20, [x24], #0x10\n"
+ "ldr q16, [x22], #0x10\n"
"zip1 v19.8h, v18.8h, v17.8h\n"
"zip2 v18.8h, v18.8h, v17.8h\n"
"zip1 v17.8h, v20.8h, v16.8h\n"
"zip2 v16.8h, v20.8h, v16.8h\n"
- "str q24, [x21, #0x0]\n"
- "str q23, [x21, #0x10]\n"
- "str q22, [x21, #0x20]\n"
- "str q21, [x21, #0x30]\n"
- "str q19, [x21, #0x40]\n"
- "str q18, [x21, #0x50]\n"
- "str q17, [x21, #0x60]\n"
- "str q16, [x21, #0x70]\n"
- "add x21, x21, %x[out_stride]\n"
+ "str q24, [x20, #0x0]\n"
+ "str q23, [x20, #0x10]\n"
+ "str q22, [x20, #0x20]\n"
+ "str q21, [x20, #0x30]\n"
+ "str q19, [x20, #0x40]\n"
+ "str q18, [x20, #0x50]\n"
+ "str q17, [x20, #0x60]\n"
+ "str q16, [x20, #0x70]\n"
+ "add x20, x20, %x[out_stride]\n"
"bge 16b\n"
"17:" // Tail row loop: Column loop skip
- "cmp x20, #0x10\n"
+ "cmp x19, #0x10\n"
"blt 19f\n"
"18:" // Tail row loop: width 16 loop: loop
- "ldr q18, [x25], #0x10\n"
- "ldr q17, [x23], #0x10\n"
- "sub x20, x20, #0x10\n"
- "cmp x20, #0x10\n"
- "ldr q20, [x25], #0x10\n"
- "ldr q16, [x23], #0x10\n"
+ "ldr q18, [x24], #0x10\n"
+ "ldr q17, [x22], #0x10\n"
+ "sub x19, x19, #0x10\n"
+ "cmp x19, #0x10\n"
+ "ldr q20, [x24], #0x10\n"
+ "ldr q16, [x22], #0x10\n"
"zip1 v19.8h, v18.8h, v17.8h\n"
"zip2 v18.8h, v18.8h, v17.8h\n"
"zip1 v17.8h, v20.8h, v16.8h\n"
"zip2 v16.8h, v20.8h, v16.8h\n"
- "str q19, [x21, #0x0]\n"
- "str q18, [x21, #0x10]\n"
- "str q17, [x21, #0x20]\n"
- "str q16, [x21, #0x30]\n"
- "add x21, x21, #0x40\n"
+ "str q19, [x20, #0x0]\n"
+ "str q18, [x20, #0x10]\n"
+ "str q17, [x20, #0x20]\n"
+ "str q16, [x20, #0x30]\n"
+ "add x20, x20, #0x40\n"
"bge 18b\n"
"19:" // Tail row loop: width 16 loop: skip
- "cmp x20, #0x4\n"
+ "cmp x19, #0x4\n"
"blt 21f\n"
"20:" // Tail row loop: width 4 loop: loop
- "ldr d17, [x25], #0x8\n"
- "ldr d16, [x23], #0x8\n"
- "sub x20, x20, #0x4\n"
- "cmp x20, #0x4\n"
+ "ldr d17, [x24], #0x8\n"
+ "ldr d16, [x22], #0x8\n"
+ "sub x19, x19, #0x4\n"
+ "cmp x19, #0x4\n"
"zip1 v16.8h, v17.8h, v16.8h\n"
- "str q16, [x21, #0x0]\n"
- "add x21, x21, #0x10\n"
+ "str q16, [x20, #0x0]\n"
+ "add x20, x20, #0x10\n"
"bge 20b\n"
"21:" // Tail row loop: width 4 loop: skip
- "cmp x20, #0x1\n"
+ "cmp x19, #0x1\n"
"blt 23f\n"
"22:" // Tail row loop: width 1 loop: loop
- "ldr h17, [x25], #0x2\n"
- "ldr h16, [x23], #0x2\n"
- "sub x20, x20, #0x1\n"
- "cmp x20, #0x1\n"
+ "ldr h17, [x24], #0x2\n"
+ "ldr h16, [x22], #0x2\n"
+ "sub x19, x19, #0x1\n"
+ "cmp x19, #0x1\n"
"zip1 v16.8h, v17.8h, v16.8h\n"
- "str s16, [x21, #0x0]\n"
- "add x21, x21, #0x4\n"
+ "str s16, [x20, #0x0]\n"
+ "add x20, x20, #0x4\n"
"bge 22b\n"
"23:" // Tail row loop: width 1 loop: skip
"cmp %x[height], #0x1\n"
@@ -430,7 +430,7 @@ void a64_transpose_interleave_32_2x2(uint16_t *out, const uint16_t *in, size_t w
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [pad_row] "r" (pad_row), [width] "r" (width)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x20", "x21", "x22", "x23", "x24", "x25"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22", "x23", "x24"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_48.hpp b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_48.hpp
index b4827525cd..4f7019f564 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_48.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_48.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#pragma once
@@ -36,167 +36,168 @@ void a64_transpose_interleave_48(uint16_t *out, const uint16_t *in, size_t width
"cmp %x[height], #0x4\n"
"blt 10f\n"
"1:" // Main row loop: Head
- "mov x25, %x[in]\n"
- "mov x24, %x[width]\n"
- "add x23, x25, %x[in_stride]\n"
- "add x22, x23, %x[in_stride]\n"
- "add x20, x22, %x[in_stride]\n"
- "cmp x24, #0x18\n"
+ "mov x24, %x[in]\n"
+ "mov x23, %x[out]\n"
+ "add x22, x24, %x[in_stride]\n"
+ "add x21, x22, %x[in_stride]\n"
+ "add x20, x21, %x[in_stride]\n"
"add %x[in], x20, %x[in_stride]\n"
- "mov x21, %x[out]\n"
"sub %x[height], %x[height], #0x4\n"
+ "mov x19, %x[width]\n"
+ "cmp x19, #0x18\n"
"blt 3f\n"
"2:" // Main row loop: Column loop
- "ldr q27, [x25], #0x10\n"
- "ldr q26, [x23], #0x10\n"
- "sub x24, x24, #0x18\n"
- "cmp x24, #0x18\n"
- "ldr q25, [x22], #0x10\n"
+ "ldr q27, [x24], #0x10\n"
+ "sub x19, x19, #0x18\n"
+ "ldr q26, [x22], #0x10\n"
+ "cmp x19, #0x18\n"
+ "ldr q25, [x21], #0x10\n"
"ldr q24, [x20], #0x10\n"
- "ldr q23, [x25], #0x10\n"
- "ldr q22, [x23], #0x10\n"
- "ldr q21, [x22], #0x10\n"
+ "ldr q23, [x24], #0x10\n"
+ "ldr q22, [x22], #0x10\n"
+ "ldr q21, [x21], #0x10\n"
"ldr q20, [x20], #0x10\n"
- "ldr q19, [x25], #0x10\n"
- "ldr q18, [x23], #0x10\n"
- "ldr q17, [x22], #0x10\n"
+ "ldr q19, [x24], #0x10\n"
+ "ldr q18, [x22], #0x10\n"
+ "ldr q17, [x21], #0x10\n"
"ldr q16, [x20], #0x10\n"
- "str q27, [x21, #0x0]\n"
- "str q23, [x21, #0x10]\n"
- "str q19, [x21, #0x20]\n"
- "str q26, [x21, #0x30]\n"
- "str q22, [x21, #0x40]\n"
- "str q18, [x21, #0x50]\n"
- "str q25, [x21, #0x60]\n"
- "str q21, [x21, #0x70]\n"
- "str q17, [x21, #0x80]\n"
- "str q24, [x21, #0x90]\n"
- "str q20, [x21, #0xa0]\n"
- "str q16, [x21, #0xb0]\n"
- "add x21, x21, %x[out_stride]\n"
+ "str q27, [x23, #0x0]\n"
+ "str q23, [x23, #0x10]\n"
+ "str q19, [x23, #0x20]\n"
+ "str q26, [x23, #0x30]\n"
+ "str q22, [x23, #0x40]\n"
+ "str q18, [x23, #0x50]\n"
+ "str q25, [x23, #0x60]\n"
+ "str q21, [x23, #0x70]\n"
+ "str q17, [x23, #0x80]\n"
+ "str q24, [x23, #0x90]\n"
+ "str q20, [x23, #0xa0]\n"
+ "str q16, [x23, #0xb0]\n"
+ "add x23, x23, %x[out_stride]\n"
"bge 2b\n"
"3:" // Main row loop: Column loop skip
- "cmp x24, #0x10\n"
+ "cmp x19, #0x10\n"
"blt 5f\n"
"4:" // Main row loop: width 16 loop: loop
- "ldr q23, [x25], #0x10\n"
- "ldr q22, [x23], #0x10\n"
- "sub x24, x24, #0x10\n"
- "cmp x24, #0x10\n"
- "ldr q21, [x22], #0x10\n"
+ "ldr q23, [x24], #0x10\n"
+ "sub x19, x19, #0x10\n"
+ "ldr q22, [x22], #0x10\n"
+ "cmp x19, #0x10\n"
+ "ldr q21, [x21], #0x10\n"
"ldr q20, [x20], #0x10\n"
- "ldr q19, [x25], #0x10\n"
- "ldr q18, [x23], #0x10\n"
- "ldr q17, [x22], #0x10\n"
+ "ldr q19, [x24], #0x10\n"
+ "ldr q18, [x22], #0x10\n"
+ "ldr q17, [x21], #0x10\n"
"ldr q16, [x20], #0x10\n"
- "str q23, [x21, #0x0]\n"
- "str q19, [x21, #0x10]\n"
- "str q22, [x21, #0x30]\n"
- "str q18, [x21, #0x40]\n"
- "str q21, [x21, #0x60]\n"
- "str q17, [x21, #0x70]\n"
- "str q20, [x21, #0x90]\n"
- "str q16, [x21, #0xa0]\n"
- "add x21, x21, #0x20\n"
+ "str q23, [x23, #0x0]\n"
+ "str q19, [x23, #0x10]\n"
+ "str q22, [x23, #0x30]\n"
+ "str q18, [x23, #0x40]\n"
+ "str q21, [x23, #0x60]\n"
+ "str q17, [x23, #0x70]\n"
+ "str q20, [x23, #0x90]\n"
+ "str q16, [x23, #0xa0]\n"
+ "add x23, x23, #0x20\n"
"bge 4b\n"
"5:" // Main row loop: width 16 loop: skip
- "cmp x24, #0x4\n"
+ "cmp x19, #0x4\n"
"blt 7f\n"
"6:" // Main row loop: width 4 loop: loop
- "ldr d19, [x25], #0x8\n"
- "ldr d18, [x23], #0x8\n"
- "sub x24, x24, #0x4\n"
- "cmp x24, #0x4\n"
- "ldr d17, [x22], #0x8\n"
+ "ldr d19, [x24], #0x8\n"
+ "sub x19, x19, #0x4\n"
+ "ldr d18, [x22], #0x8\n"
+ "cmp x19, #0x4\n"
+ "ldr d17, [x21], #0x8\n"
"ldr d16, [x20], #0x8\n"
- "str d19, [x21, #0x0]\n"
- "str d18, [x21, #0x30]\n"
- "str d17, [x21, #0x60]\n"
- "str d16, [x21, #0x90]\n"
- "add x21, x21, #0x8\n"
+ "str d19, [x23, #0x0]\n"
+ "str d18, [x23, #0x30]\n"
+ "str d17, [x23, #0x60]\n"
+ "str d16, [x23, #0x90]\n"
+ "add x23, x23, #0x8\n"
"bge 6b\n"
"7:" // Main row loop: width 4 loop: skip
- "cmp x24, #0x1\n"
+ "cmp x19, #0x1\n"
"blt 9f\n"
"8:" // Main row loop: width 1 loop: loop
- "ldr h19, [x25], #0x2\n"
- "ldr h18, [x23], #0x2\n"
- "sub x24, x24, #0x1\n"
- "cmp x24, #0x1\n"
- "ldr h17, [x22], #0x2\n"
+ "ldr h19, [x24], #0x2\n"
+ "sub x19, x19, #0x1\n"
+ "ldr h18, [x22], #0x2\n"
+ "cmp x19, #0x1\n"
+ "ldr h17, [x21], #0x2\n"
"ldr h16, [x20], #0x2\n"
- "str h19, [x21, #0x0]\n"
- "str h18, [x21, #0x30]\n"
- "str h17, [x21, #0x60]\n"
- "str h16, [x21, #0x90]\n"
- "add x21, x21, #0x2\n"
+ "str h19, [x23, #0x0]\n"
+ "str h18, [x23, #0x30]\n"
+ "str h17, [x23, #0x60]\n"
+ "str h16, [x23, #0x90]\n"
+ "add x23, x23, #0x2\n"
"bge 8b\n"
"9:" // Main row loop: width 1 loop: skip
- "cmp %x[height], #0x4\n"
"add %x[out], %x[out], #0xc0\n"
+ "cmp %x[height], #0x4\n"
"bge 1b\n"
"cbz %x[height], 20f\n"
"10:" // Main loop skip
"11:" // Tail row loop: Head
- "mov x20, %x[width]\n"
- "mov x25, %x[in]\n"
- "cmp x20, #0x18\n"
- "add %x[in], x25, %x[in_stride]\n"
- "mov x21, %x[out]\n"
+ "mov x24, %x[in]\n"
+ "mov x23, %x[out]\n"
+ "add %x[in], x24, %x[in_stride]\n"
"sub %x[height], %x[height], #0x1\n"
+ "mov x19, %x[width]\n"
+ "cmp x19, #0x18\n"
"blt 13f\n"
"12:" // Tail row loop: Column loop
- "ldr q18, [x25], #0x10\n"
- "ldr q17, [x25], #0x10\n"
- "sub x20, x20, #0x18\n"
- "cmp x20, #0x18\n"
- "ldr q16, [x25], #0x10\n"
- "str q18, [x21, #0x0]\n"
- "str q17, [x21, #0x10]\n"
- "str q16, [x21, #0x20]\n"
- "add x21, x21, %x[out_stride]\n"
+ "ldr q18, [x24], #0x10\n"
+ "sub x19, x19, #0x18\n"
+ "cmp x19, #0x18\n"
+ "ldr q17, [x24], #0x10\n"
+ "ldr q16, [x24], #0x10\n"
+ "str q18, [x23, #0x0]\n"
+ "str q17, [x23, #0x10]\n"
+ "str q16, [x23, #0x20]\n"
+ "add x23, x23, %x[out_stride]\n"
"bge 12b\n"
"13:" // Tail row loop: Column loop skip
- "cmp x20, #0x10\n"
+ "cmp x19, #0x10\n"
"blt 15f\n"
"14:" // Tail row loop: width 16 loop: loop
- "ldr q17, [x25], #0x10\n"
- "ldr q16, [x25], #0x10\n"
- "sub x20, x20, #0x10\n"
- "cmp x20, #0x10\n"
- "str q17, [x21, #0x0]\n"
- "str q16, [x21, #0x10]\n"
- "add x21, x21, #0x20\n"
+ "ldr q17, [x24], #0x10\n"
+ "sub x19, x19, #0x10\n"
+ "cmp x19, #0x10\n"
+ "ldr q16, [x24], #0x10\n"
+ "str q17, [x23, #0x0]\n"
+ "str q16, [x23, #0x10]\n"
+ "add x23, x23, #0x20\n"
"bge 14b\n"
"15:" // Tail row loop: width 16 loop: skip
- "cmp x20, #0x4\n"
+ "cmp x19, #0x4\n"
"blt 17f\n"
"16:" // Tail row loop: width 4 loop: loop
- "ldr d16, [x25], #0x8\n"
- "sub x20, x20, #0x4\n"
- "cmp x20, #0x4\n"
- "str d16, [x21, #0x0]\n"
- "add x21, x21, #0x8\n"
+ "ldr d16, [x24], #0x8\n"
+ "sub x19, x19, #0x4\n"
+ "cmp x19, #0x4\n"
+ "str d16, [x23, #0x0]\n"
+ "add x23, x23, #0x8\n"
"bge 16b\n"
"17:" // Tail row loop: width 4 loop: skip
- "cmp x20, #0x1\n"
+ "cmp x19, #0x1\n"
"blt 19f\n"
"18:" // Tail row loop: width 1 loop: loop
- "ldr h16, [x25], #0x2\n"
- "sub x20, x20, #0x1\n"
- "cmp x20, #0x1\n"
- "str h16, [x21, #0x0]\n"
- "add x21, x21, #0x2\n"
+ "ldr h16, [x24], #0x2\n"
+ "sub x19, x19, #0x1\n"
+ "cmp x19, #0x1\n"
+ "str h16, [x23, #0x0]\n"
+ "add x23, x23, #0x2\n"
"bge 18b\n"
"19:" // Tail row loop: width 1 loop: skip
- "cmp %x[height], #0x1\n"
"add %x[out], %x[out], #0x30\n"
+ "cmp %x[height], #0x1\n"
"bge 11b\n"
"20:" // Done
+
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [width] "r" (width)
- : "cc", "memory", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "x20", "x21", "x22", "x23", "x24", "x25"
+ : "cc", "memory", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "x19", "x20", "x21", "x22", "x23", "x24"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_4_1x16.hpp b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_4_1x16.hpp
index e1ab14e594..cb20172364 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_4_1x16.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_4_1x16.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#pragma once
@@ -41,10 +41,9 @@ void a64_transpose_interleave_4_1x16(uint8_t *out, const uint8_t *in, size_t wid
__asm__ __volatile__(
"1:" // Main row loop: Head
- "mov x17, %x[in]\n"
- "add x16, x17, %x[in_stride]\n"
- "add x15, x16, %x[in_stride]\n"
- "add x14, x15, %x[in_stride]\n"
+ "mov x16, %x[in]\n"
+ "mov x15, %x[out]\n"
+ "add x14, x16, %x[in_stride]\n"
"add x13, x14, %x[in_stride]\n"
"add x12, x13, %x[in_stride]\n"
"add x11, x12, %x[in_stride]\n"
@@ -57,234 +56,235 @@ void a64_transpose_interleave_4_1x16(uint8_t *out, const uint8_t *in, size_t wid
"add x24, x25, %x[in_stride]\n"
"add x23, x24, %x[in_stride]\n"
"add x22, x23, %x[in_stride]\n"
+ "add x21, x22, %x[in_stride]\n"
+ "add x20, x21, %x[in_stride]\n"
+ "add %x[in], x20, %x[in_stride]\n"
"cmp %x[height], #0xf\n"
- "add %x[in], x22, %x[in_stride]\n"
+ "csel x20, x20, %x[pad_row], GT\n"
+ "csel x21, x21, %x[pad_row], GE\n"
+ "cmp %x[height], #0xd\n"
"csel x22, x22, %x[pad_row], GT\n"
"csel x23, x23, %x[pad_row], GE\n"
- "cmp %x[height], #0xd\n"
+ "cmp %x[height], #0xb\n"
"csel x24, x24, %x[pad_row], GT\n"
"csel x25, x25, %x[pad_row], GE\n"
- "cmp %x[height], #0xb\n"
+ "cmp %x[height], #0x9\n"
"csel x26, x26, %x[pad_row], GT\n"
"csel x27, x27, %x[pad_row], GE\n"
- "cmp %x[height], #0x9\n"
+ "cmp %x[height], #0x7\n"
"csel x28, x28, %x[pad_row], GT\n"
"csel x9, x9, %x[pad_row], GE\n"
- "cmp %x[height], #0x7\n"
+ "cmp %x[height], #0x5\n"
"csel x10, x10, %x[pad_row], GT\n"
"csel x11, x11, %x[pad_row], GE\n"
- "cmp %x[height], #0x5\n"
- "mov x21, %x[width]\n"
+ "cmp %x[height], #0x3\n"
"csel x12, x12, %x[pad_row], GT\n"
"csel x13, x13, %x[pad_row], GE\n"
- "cmp %x[height], #0x3\n"
- "csel x14, x14, %x[pad_row], GT\n"
- "csel x15, x15, %x[pad_row], GE\n"
"cmp %x[height], #0x1\n"
- "csel x16, x16, %x[pad_row], GT\n"
- "cmp x21, #0x10\n"
- "mov x20, %x[out]\n"
+ "csel x14, x14, %x[pad_row], GT\n"
"sub %x[height], %x[height], #0x10\n"
+ "mov x19, %x[width]\n"
+ "cmp x19, #0x10\n"
"blt 3f\n"
"2:" // Main row loop: Unroll column loop
- "ldr q3, [x17], #0x10\n"
- "ldr q9, [x16], #0x10\n"
- "sub x21, x21, #0x10\n"
- "cmp x21, #0x10\n"
- "ldr q2, [x15], #0x10\n"
- "ldr q8, [x14], #0x10\n"
- "ldr q0, [x13], #0x10\n"
- "ldr q31, [x12], #0x10\n"
- "ldr q30, [x11], #0x10\n"
- "ldr q7, [x10], #0x10\n"
- "ldr q29, [x9], #0x10\n"
- "ldr q28, [x28], #0x10\n"
- "zip1 v27.16b, v3.16b, v29.16b\n"
- "zip1 v6.16b, v9.16b, v28.16b\n"
- "ldr q25, [x27], #0x10\n"
- "ldr q24, [x26], #0x10\n"
- "zip1 v26.16b, v2.16b, v25.16b\n"
- "zip1 v1.16b, v8.16b, v24.16b\n"
- "ldr q23, [x25], #0x10\n"
- "ldr q22, [x24], #0x10\n"
- "zip1 v21.16b, v0.16b, v23.16b\n"
- "zip1 v20.16b, v31.16b, v22.16b\n"
- "ldr q19, [x23], #0x10\n"
- "ldr q18, [x22], #0x10\n"
- "zip1 v17.16b, v30.16b, v19.16b\n"
- "zip1 v16.16b, v7.16b, v18.16b\n"
- "zip2 v5.16b, v3.16b, v29.16b\n"
- "zip2 v0.16b, v0.16b, v23.16b\n"
- "zip2 v4.16b, v2.16b, v25.16b\n"
- "zip2 v3.16b, v30.16b, v19.16b\n"
- "zip2 v2.16b, v9.16b, v28.16b\n"
- "zip2 v31.16b, v31.16b, v22.16b\n"
- "zip2 v30.16b, v8.16b, v24.16b\n"
- "zip2 v29.16b, v7.16b, v18.16b\n"
- "zip1 v25.16b, v27.16b, v21.16b\n"
- "zip1 v24.16b, v26.16b, v17.16b\n"
- "zip1 v23.16b, v6.16b, v20.16b\n"
- "zip1 v22.16b, v1.16b, v16.16b\n"
- "zip2 v28.16b, v27.16b, v21.16b\n"
- "zip2 v27.16b, v26.16b, v17.16b\n"
- "zip2 v26.16b, v6.16b, v20.16b\n"
- "zip2 v21.16b, v1.16b, v16.16b\n"
- "zip1 v1.16b, v5.16b, v0.16b\n"
- "zip1 v20.16b, v4.16b, v3.16b\n"
- "zip1 v19.16b, v2.16b, v31.16b\n"
- "zip1 v16.16b, v30.16b, v29.16b\n"
- "zip1 v18.16b, v25.16b, v24.16b\n"
- "zip1 v17.16b, v23.16b, v22.16b\n"
- "zip2 v25.16b, v25.16b, v24.16b\n"
- "zip2 v24.16b, v23.16b, v22.16b\n"
- "zip2 v0.16b, v5.16b, v0.16b\n"
- "zip2 v23.16b, v4.16b, v3.16b\n"
- "zip2 v31.16b, v2.16b, v31.16b\n"
- "zip2 v22.16b, v30.16b, v29.16b\n"
- "zip1 v30.16b, v28.16b, v27.16b\n"
- "zip1 v29.16b, v26.16b, v21.16b\n"
- "zip2 v28.16b, v28.16b, v27.16b\n"
- "zip2 v27.16b, v26.16b, v21.16b\n"
- "zip1 v26.16b, v1.16b, v20.16b\n"
- "zip1 v21.16b, v19.16b, v16.16b\n"
- "zip2 v20.16b, v1.16b, v20.16b\n"
- "zip2 v19.16b, v19.16b, v16.16b\n"
+ "ldr q20, [x16], #0x10\n"
+ "sub x19, x19, #0x10\n"
+ "ldr q19, [x14], #0x10\n"
+ "cmp x19, #0x10\n"
+ "ldr q18, [x13], #0x10\n"
+ "ldr q11, [x12], #0x10\n"
+ "ldr q10, [x11], #0x10\n"
+ "ldr q9, [x10], #0x10\n"
+ "ldr q8, [x9], #0x10\n"
+ "ldr q7, [x28], #0x10\n"
+ "ldr q16, [x27], #0x10\n"
+ "zip1 v6.16b, v20.16b, v16.16b\n"
+ "ldr q17, [x26], #0x10\n"
+ "zip2 v5.16b, v20.16b, v16.16b\n"
+ "ldr q16, [x25], #0x10\n"
+ "ldr q4, [x24], #0x10\n"
+ "zip1 v3.16b, v19.16b, v17.16b\n"
+ "ldr q2, [x23], #0x10\n"
+ "zip2 v1.16b, v19.16b, v17.16b\n"
+ "ldr q0, [x22], #0x10\n"
+ "zip1 v31.16b, v18.16b, v16.16b\n"
+ "ldr q30, [x21], #0x10\n"
+ "zip1 v27.16b, v11.16b, v4.16b\n"
+ "ldr q29, [x20], #0x10\n"
+ "zip2 v28.16b, v18.16b, v16.16b\n"
+ "zip1 v26.16b, v10.16b, v2.16b\n"
+ "zip1 v22.16b, v6.16b, v26.16b\n"
+ "zip1 v25.16b, v8.16b, v30.16b\n"
+ "zip1 v21.16b, v31.16b, v25.16b\n"
+ "zip1 v18.16b, v22.16b, v21.16b\n"
+ "zip1 v24.16b, v9.16b, v0.16b\n"
+ "zip1 v20.16b, v3.16b, v24.16b\n"
+ "zip1 v23.16b, v7.16b, v29.16b\n"
+ "zip1 v19.16b, v27.16b, v23.16b\n"
+ "zip1 v17.16b, v20.16b, v19.16b\n"
"zip1 v16.16b, v18.16b, v17.16b\n"
- "zip2 v18.16b, v18.16b, v17.16b\n"
- "str q16, [x20, #0x0]\n"
- "zip1 v17.16b, v25.16b, v24.16b\n"
- "zip2 v16.16b, v25.16b, v24.16b\n"
- "str q18, [x20, #0x10]\n"
- "str q17, [x20, #0x20]\n"
- "zip1 v25.16b, v0.16b, v23.16b\n"
- "zip1 v24.16b, v31.16b, v22.16b\n"
- "str q16, [x20, #0x30]\n"
- "add x20, x20, %x[out_stride]\n"
- "zip2 v23.16b, v0.16b, v23.16b\n"
- "zip2 v22.16b, v31.16b, v22.16b\n"
- "zip1 v16.16b, v30.16b, v29.16b\n"
- "zip2 v17.16b, v30.16b, v29.16b\n"
- "str q16, [x20, #0x0]\n"
- "zip1 v16.16b, v28.16b, v27.16b\n"
- "zip2 v18.16b, v28.16b, v27.16b\n"
- "str q17, [x20, #0x10]\n"
- "str q16, [x20, #0x20]\n"
- "zip1 v17.16b, v26.16b, v21.16b\n"
- "zip2 v16.16b, v26.16b, v21.16b\n"
- "str q18, [x20, #0x30]\n"
- "add x20, x20, %x[out_stride]\n"
- "zip1 v21.16b, v20.16b, v19.16b\n"
- "zip2 v20.16b, v20.16b, v19.16b\n"
- "str q17, [x20, #0x0]\n"
- "zip1 v19.16b, v25.16b, v24.16b\n"
- "zip2 v18.16b, v25.16b, v24.16b\n"
- "str q16, [x20, #0x10]\n"
- "zip1 v17.16b, v23.16b, v22.16b\n"
- "zip2 v16.16b, v23.16b, v22.16b\n"
- "str q21, [x20, #0x20]\n"
- "str q20, [x20, #0x30]\n"
- "add x20, x20, %x[out_stride]\n"
- "str q19, [x20, #0x0]\n"
- "str q18, [x20, #0x10]\n"
- "str q17, [x20, #0x20]\n"
- "str q16, [x20, #0x30]\n"
- "add x20, x20, %x[out_stride]\n"
+ "str q16, [x15, #0x0]\n"
+ "zip2 v16.16b, v18.16b, v17.16b\n"
+ "str q16, [x15, #0x10]\n"
+ "zip2 v18.16b, v22.16b, v21.16b\n"
+ "zip2 v17.16b, v20.16b, v19.16b\n"
+ "zip1 v16.16b, v18.16b, v17.16b\n"
+ "str q16, [x15, #0x20]\n"
+ "zip2 v16.16b, v18.16b, v17.16b\n"
+ "str q16, [x15, #0x30]\n"
+ "add x15, x15, %x[out_stride]\n"
+ "zip2 v22.16b, v6.16b, v26.16b\n"
+ "zip2 v21.16b, v31.16b, v25.16b\n"
+ "zip1 v18.16b, v22.16b, v21.16b\n"
+ "zip2 v20.16b, v3.16b, v24.16b\n"
+ "zip2 v19.16b, v27.16b, v23.16b\n"
+ "zip1 v17.16b, v20.16b, v19.16b\n"
+ "zip1 v16.16b, v18.16b, v17.16b\n"
+ "str q16, [x15, #0x0]\n"
+ "zip2 v16.16b, v18.16b, v17.16b\n"
+ "str q16, [x15, #0x10]\n"
+ "zip2 v18.16b, v22.16b, v21.16b\n"
+ "zip2 v17.16b, v20.16b, v19.16b\n"
+ "zip1 v16.16b, v18.16b, v17.16b\n"
+ "str q16, [x15, #0x20]\n"
+ "zip2 v16.16b, v18.16b, v17.16b\n"
+ "str q16, [x15, #0x30]\n"
+ "add x15, x15, %x[out_stride]\n"
+ "zip2 v27.16b, v10.16b, v2.16b\n"
+ "zip2 v26.16b, v8.16b, v30.16b\n"
+ "zip1 v22.16b, v5.16b, v27.16b\n"
+ "zip1 v21.16b, v28.16b, v26.16b\n"
+ "zip1 v18.16b, v22.16b, v21.16b\n"
+ "zip2 v25.16b, v9.16b, v0.16b\n"
+ "zip1 v20.16b, v1.16b, v25.16b\n"
+ "zip2 v24.16b, v11.16b, v4.16b\n"
+ "zip2 v23.16b, v7.16b, v29.16b\n"
+ "zip1 v19.16b, v24.16b, v23.16b\n"
+ "zip1 v17.16b, v20.16b, v19.16b\n"
+ "zip1 v16.16b, v18.16b, v17.16b\n"
+ "str q16, [x15, #0x0]\n"
+ "zip2 v16.16b, v18.16b, v17.16b\n"
+ "str q16, [x15, #0x10]\n"
+ "zip2 v18.16b, v22.16b, v21.16b\n"
+ "zip2 v17.16b, v20.16b, v19.16b\n"
+ "zip1 v16.16b, v18.16b, v17.16b\n"
+ "str q16, [x15, #0x20]\n"
+ "zip2 v16.16b, v18.16b, v17.16b\n"
+ "str q16, [x15, #0x30]\n"
+ "add x15, x15, %x[out_stride]\n"
+ "zip2 v22.16b, v5.16b, v27.16b\n"
+ "zip2 v21.16b, v28.16b, v26.16b\n"
+ "zip1 v18.16b, v22.16b, v21.16b\n"
+ "zip2 v20.16b, v1.16b, v25.16b\n"
+ "zip2 v19.16b, v24.16b, v23.16b\n"
+ "zip1 v17.16b, v20.16b, v19.16b\n"
+ "zip1 v16.16b, v18.16b, v17.16b\n"
+ "str q16, [x15, #0x0]\n"
+ "zip2 v16.16b, v18.16b, v17.16b\n"
+ "str q16, [x15, #0x10]\n"
+ "zip2 v18.16b, v22.16b, v21.16b\n"
+ "zip2 v17.16b, v20.16b, v19.16b\n"
+ "zip1 v16.16b, v18.16b, v17.16b\n"
+ "str q16, [x15, #0x20]\n"
+ "zip2 v16.16b, v18.16b, v17.16b\n"
+ "str q16, [x15, #0x30]\n"
+ "add x15, x15, %x[out_stride]\n"
"bge 2b\n"
"3:" // Main row loop: Unroll column loop skip
- "cmp x21, #0x4\n"
+ "cmp x19, #0x4\n"
"blt 5f\n"
"4:" // Main row loop: Column loop
- "ldr s21, [x17], #0x4\n"
- "ldr s23, [x16], #0x4\n"
- "sub x21, x21, #0x4\n"
- "cmp x21, #0x4\n"
- "ldr s20, [x15], #0x4\n"
- "ldr s22, [x14], #0x4\n"
- "ldr s19, [x13], #0x4\n"
- "ldr s18, [x12], #0x4\n"
- "ldr s25, [x11], #0x4\n"
- "ldr s24, [x10], #0x4\n"
- "ldr s17, [x9], #0x4\n"
- "ldr s16, [x28], #0x4\n"
- "zip1 v21.16b, v21.16b, v17.16b\n"
- "zip1 v23.16b, v23.16b, v16.16b\n"
- "ldr s17, [x27], #0x4\n"
- "ldr s16, [x26], #0x4\n"
+ "ldr s17, [x16], #0x4\n"
+ "sub x19, x19, #0x4\n"
+ "ldr s20, [x14], #0x4\n"
+ "cmp x19, #0x4\n"
+ "ldr s18, [x13], #0x4\n"
+ "ldr s19, [x12], #0x4\n"
+ "ldr s27, [x11], #0x4\n"
+ "ldr s22, [x10], #0x4\n"
+ "ldr s26, [x9], #0x4\n"
+ "ldr s25, [x28], #0x4\n"
+ "ldr s16, [x27], #0x4\n"
+ "zip1 v21.16b, v17.16b, v16.16b\n"
+ "ldr s17, [x26], #0x4\n"
+ "ldr s16, [x25], #0x4\n"
+ "zip1 v24.16b, v18.16b, v16.16b\n"
+ "ldr s18, [x24], #0x4\n"
"zip1 v20.16b, v20.16b, v17.16b\n"
- "zip1 v22.16b, v22.16b, v16.16b\n"
- "ldr s17, [x25], #0x4\n"
- "ldr s16, [x24], #0x4\n"
- "zip1 v19.16b, v19.16b, v17.16b\n"
- "zip1 v18.16b, v18.16b, v16.16b\n"
"ldr s17, [x23], #0x4\n"
"ldr s16, [x22], #0x4\n"
- "zip1 v17.16b, v25.16b, v17.16b\n"
- "zip1 v16.16b, v24.16b, v16.16b\n"
- "zip1 v21.16b, v21.16b, v19.16b\n"
- "zip1 v20.16b, v20.16b, v17.16b\n"
- "zip1 v19.16b, v23.16b, v18.16b\n"
+ "zip1 v23.16b, v19.16b, v18.16b\n"
+ "ldr s18, [x21], #0x4\n"
+ "ldr s19, [x20], #0x4\n"
+ "zip1 v17.16b, v27.16b, v17.16b\n"
"zip1 v16.16b, v22.16b, v16.16b\n"
- "zip1 v18.16b, v21.16b, v20.16b\n"
- "zip1 v17.16b, v19.16b, v16.16b\n"
- "zip2 v20.16b, v21.16b, v20.16b\n"
- "zip2 v19.16b, v19.16b, v16.16b\n"
+ "zip1 v22.16b, v21.16b, v17.16b\n"
+ "zip1 v21.16b, v20.16b, v16.16b\n"
+ "zip1 v16.16b, v26.16b, v18.16b\n"
+ "zip1 v20.16b, v24.16b, v16.16b\n"
+ "zip1 v18.16b, v22.16b, v20.16b\n"
+ "zip1 v16.16b, v25.16b, v19.16b\n"
+ "zip1 v19.16b, v23.16b, v16.16b\n"
+ "zip1 v17.16b, v21.16b, v19.16b\n"
"zip1 v16.16b, v18.16b, v17.16b\n"
- "zip2 v18.16b, v18.16b, v17.16b\n"
- "str q16, [x20, #0x0]\n"
- "zip1 v17.16b, v20.16b, v19.16b\n"
- "zip2 v16.16b, v20.16b, v19.16b\n"
- "str q18, [x20, #0x10]\n"
- "str q17, [x20, #0x20]\n"
- "str q16, [x20, #0x30]\n"
- "add x20, x20, %x[out_stride]\n"
+ "str q16, [x15, #0x0]\n"
+ "zip2 v16.16b, v18.16b, v17.16b\n"
+ "str q16, [x15, #0x10]\n"
+ "zip2 v18.16b, v22.16b, v20.16b\n"
+ "zip2 v17.16b, v21.16b, v19.16b\n"
+ "zip1 v16.16b, v18.16b, v17.16b\n"
+ "str q16, [x15, #0x20]\n"
+ "zip2 v16.16b, v18.16b, v17.16b\n"
+ "str q16, [x15, #0x30]\n"
+ "add x15, x15, %x[out_stride]\n"
"bge 4b\n"
"5:" // Main row loop: Column loop skip
- "cmp x21, #0x1\n"
+ "cmp x19, #0x1\n"
"blt 7f\n"
"6:" // Main row loop: width 1 loop: loop
- "ldr b23, [x17], #0x1\n"
- "ldr b22, [x16], #0x1\n"
- "sub x21, x21, #0x1\n"
- "cmp x21, #0x1\n"
- "ldr b21, [x15], #0x1\n"
- "ldr b20, [x14], #0x1\n"
- "ldr b19, [x13], #0x1\n"
- "ldr b18, [x12], #0x1\n"
- "ldr b25, [x11], #0x1\n"
- "ldr b24, [x10], #0x1\n"
- "ldr b17, [x9], #0x1\n"
- "ldr b16, [x28], #0x1\n"
- "zip1 v23.16b, v23.16b, v17.16b\n"
- "zip1 v22.16b, v22.16b, v16.16b\n"
- "ldr b17, [x27], #0x1\n"
- "ldr b16, [x26], #0x1\n"
- "zip1 v21.16b, v21.16b, v17.16b\n"
- "zip1 v20.16b, v20.16b, v16.16b\n"
- "ldr b17, [x25], #0x1\n"
- "ldr b16, [x24], #0x1\n"
- "zip1 v19.16b, v19.16b, v17.16b\n"
- "zip1 v18.16b, v18.16b, v16.16b\n"
+ "ldr b17, [x16], #0x1\n"
+ "sub x19, x19, #0x1\n"
+ "ldr b21, [x14], #0x1\n"
+ "cmp x19, #0x1\n"
+ "ldr b18, [x13], #0x1\n"
+ "ldr b20, [x12], #0x1\n"
+ "ldr b27, [x11], #0x1\n"
+ "ldr b26, [x10], #0x1\n"
+ "ldr b25, [x9], #0x1\n"
+ "ldr b24, [x28], #0x1\n"
+ "ldr b16, [x27], #0x1\n"
+ "zip1 v23.16b, v17.16b, v16.16b\n"
+ "ldr b17, [x26], #0x1\n"
+ "ldr b16, [x25], #0x1\n"
+ "zip1 v22.16b, v18.16b, v16.16b\n"
+ "ldr b19, [x24], #0x1\n"
+ "zip1 v18.16b, v21.16b, v17.16b\n"
"ldr b17, [x23], #0x1\n"
"ldr b16, [x22], #0x1\n"
- "zip1 v17.16b, v25.16b, v17.16b\n"
- "zip1 v16.16b, v24.16b, v16.16b\n"
- "zip1 v19.16b, v23.16b, v19.16b\n"
- "zip1 v17.16b, v21.16b, v17.16b\n"
- "zip1 v18.16b, v22.16b, v18.16b\n"
- "zip1 v16.16b, v20.16b, v16.16b\n"
- "zip1 v17.16b, v19.16b, v17.16b\n"
+ "zip1 v21.16b, v20.16b, v19.16b\n"
+ "ldr b20, [x21], #0x1\n"
+ "ldr b19, [x20], #0x1\n"
+ "zip1 v17.16b, v27.16b, v17.16b\n"
+ "zip1 v16.16b, v26.16b, v16.16b\n"
+ "zip1 v17.16b, v23.16b, v17.16b\n"
+ "zip1 v18.16b, v18.16b, v16.16b\n"
+ "zip1 v16.16b, v25.16b, v20.16b\n"
+ "zip1 v16.16b, v22.16b, v16.16b\n"
+ "zip1 v17.16b, v17.16b, v16.16b\n"
+ "zip1 v16.16b, v24.16b, v19.16b\n"
+ "zip1 v16.16b, v21.16b, v16.16b\n"
"zip1 v16.16b, v18.16b, v16.16b\n"
"zip1 v16.16b, v17.16b, v16.16b\n"
- "str q16, [x20, #0x0]\n"
- "add x20, x20, #0x10\n"
+ "str q16, [x15, #0x0]\n"
+ "add x15, x15, #0x10\n"
"bge 6b\n"
"7:" // Main row loop: width 1 loop: skip
- "cmp %x[height], #0x1\n"
"add %x[out], %x[out], #0x40\n"
+ "cmp %x[height], #0x1\n"
"bge 1b\n"
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [pad_row] "r" (pad_row), [width] "r" (width)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_4_1x4.hpp b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_4_1x4.hpp
index 8adc69e8b3..27cebe26cf 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_4_1x4.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_4_1x4.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#pragma once
@@ -42,10 +42,9 @@ void a64_transpose_interleave_4_1x4(uint8_t *out, const uint8_t *in, size_t widt
"cmp %x[height], #0x10\n"
"blt 8f\n"
"1:" // Main row loop: Head
- "mov x17, %x[in]\n"
- "add x16, x17, %x[in_stride]\n"
- "add x15, x16, %x[in_stride]\n"
- "add x14, x15, %x[in_stride]\n"
+ "mov x16, %x[in]\n"
+ "mov x15, %x[out]\n"
+ "add x14, x16, %x[in_stride]\n"
"add x13, x14, %x[in_stride]\n"
"add x12, x13, %x[in_stride]\n"
"add x11, x12, %x[in_stride]\n"
@@ -55,253 +54,256 @@ void a64_transpose_interleave_4_1x4(uint8_t *out, const uint8_t *in, size_t widt
"add x27, x28, %x[in_stride]\n"
"add x26, x27, %x[in_stride]\n"
"add x25, x26, %x[in_stride]\n"
- "mov x24, %x[width]\n"
- "add x23, x25, %x[in_stride]\n"
+ "add x24, x25, %x[in_stride]\n"
+ "add x23, x24, %x[in_stride]\n"
"add x22, x23, %x[in_stride]\n"
- "add x20, x22, %x[in_stride]\n"
- "cmp x24, #0x10\n"
+ "add x21, x22, %x[in_stride]\n"
+ "add x20, x21, %x[in_stride]\n"
"add %x[in], x20, %x[in_stride]\n"
- "mov x21, %x[out]\n"
"sub %x[height], %x[height], #0x10\n"
+ "mov x19, %x[width]\n"
+ "cmp x19, #0x10\n"
"blt 3f\n"
"2:" // Main row loop: Unroll column loop
- "ldr q21, [x17], #0x10\n"
"ldr q20, [x16], #0x10\n"
- "sub x24, x24, #0x10\n"
- "cmp x24, #0x10\n"
- "ldr q17, [x15], #0x10\n"
- "ldr q16, [x14], #0x10\n"
- "zip1 v3.16b, v21.16b, v17.16b\n"
- "zip1 v2.16b, v20.16b, v16.16b\n"
- "ldr q19, [x13], #0x10\n"
- "ldr q18, [x12], #0x10\n"
- "zip2 v1.16b, v21.16b, v17.16b\n"
- "zip2 v0.16b, v20.16b, v16.16b\n"
- "ldr q17, [x11], #0x10\n"
- "ldr q16, [x10], #0x10\n"
- "zip1 v31.16b, v19.16b, v17.16b\n"
- "zip1 v30.16b, v18.16b, v16.16b\n"
- "ldr q21, [x9], #0x10\n"
- "ldr q20, [x28], #0x10\n"
- "zip2 v29.16b, v19.16b, v17.16b\n"
- "zip2 v28.16b, v18.16b, v16.16b\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v23.16b, v21.16b, v17.16b\n"
- "zip1 v22.16b, v20.16b, v16.16b\n"
- "ldr q19, [x25], #0x10\n"
- "ldr q18, [x23], #0x10\n"
- "zip2 v27.16b, v21.16b, v17.16b\n"
- "zip2 v26.16b, v20.16b, v16.16b\n"
- "ldr q17, [x22], #0x10\n"
- "ldr q16, [x20], #0x10\n"
- "zip1 v21.16b, v19.16b, v17.16b\n"
- "zip1 v20.16b, v18.16b, v16.16b\n"
- "zip2 v25.16b, v19.16b, v17.16b\n"
- "zip2 v24.16b, v18.16b, v16.16b\n"
- "zip1 v16.16b, v3.16b, v2.16b\n"
- "zip1 v18.16b, v31.16b, v30.16b\n"
- "str q16, [x21, #0x0]\n"
- "zip1 v17.16b, v23.16b, v22.16b\n"
- "zip1 v16.16b, v21.16b, v20.16b\n"
- "str q18, [x21, #0x10]\n"
- "str q17, [x21, #0x20]\n"
- "zip2 v19.16b, v3.16b, v2.16b\n"
- "zip2 v18.16b, v31.16b, v30.16b\n"
- "str q16, [x21, #0x30]\n"
- "add x21, x21, %x[out_stride]\n"
- "zip2 v17.16b, v23.16b, v22.16b\n"
- "zip2 v16.16b, v21.16b, v20.16b\n"
- "str q19, [x21, #0x0]\n"
- "zip1 v23.16b, v1.16b, v0.16b\n"
- "zip1 v22.16b, v29.16b, v28.16b\n"
- "str q18, [x21, #0x10]\n"
- "zip1 v21.16b, v27.16b, v26.16b\n"
- "zip1 v20.16b, v25.16b, v24.16b\n"
- "str q17, [x21, #0x20]\n"
- "zip2 v19.16b, v1.16b, v0.16b\n"
- "zip2 v18.16b, v29.16b, v28.16b\n"
- "str q16, [x21, #0x30]\n"
- "add x21, x21, %x[out_stride]\n"
- "zip2 v17.16b, v27.16b, v26.16b\n"
- "zip2 v16.16b, v25.16b, v24.16b\n"
- "str q23, [x21, #0x0]\n"
- "str q22, [x21, #0x10]\n"
- "str q21, [x21, #0x20]\n"
- "str q20, [x21, #0x30]\n"
- "add x21, x21, %x[out_stride]\n"
- "str q19, [x21, #0x0]\n"
- "str q18, [x21, #0x10]\n"
- "str q17, [x21, #0x20]\n"
- "str q16, [x21, #0x30]\n"
- "add x21, x21, %x[out_stride]\n"
+ "sub x19, x19, #0x10\n"
+ "ldr q19, [x14], #0x10\n"
+ "cmp x19, #0x10\n"
+ "ldr q16, [x13], #0x10\n"
+ "zip1 v18.16b, v20.16b, v16.16b\n"
+ "ldr q17, [x12], #0x10\n"
+ "zip2 v5.16b, v20.16b, v16.16b\n"
+ "ldr q4, [x11], #0x10\n"
+ "ldr q3, [x10], #0x10\n"
+ "zip1 v16.16b, v19.16b, v17.16b\n"
+ "ldr q2, [x9], #0x10\n"
+ "zip2 v1.16b, v19.16b, v17.16b\n"
+ "ldr q0, [x28], #0x10\n"
+ "zip1 v22.16b, v18.16b, v16.16b\n"
+ "ldr q31, [x27], #0x10\n"
+ "zip2 v21.16b, v18.16b, v16.16b\n"
+ "ldr q30, [x26], #0x10\n"
+ "zip1 v29.16b, v5.16b, v1.16b\n"
+ "ldr q28, [x25], #0x10\n"
+ "zip1 v17.16b, v4.16b, v2.16b\n"
+ "ldr q27, [x24], #0x10\n"
+ "zip1 v16.16b, v3.16b, v0.16b\n"
+ "ldr q26, [x23], #0x10\n"
+ "zip1 v19.16b, v17.16b, v16.16b\n"
+ "ldr q25, [x22], #0x10\n"
+ "zip2 v20.16b, v17.16b, v16.16b\n"
+ "ldr q24, [x21], #0x10\n"
+ "zip1 v18.16b, v31.16b, v28.16b\n"
+ "ldr q23, [x20], #0x10\n"
+ "zip1 v17.16b, v30.16b, v27.16b\n"
+ "str q22, [x15, #0x0]\n"
+ "zip1 v16.16b, v18.16b, v17.16b\n"
+ "str q19, [x15, #0x10]\n"
+ "zip2 v19.16b, v18.16b, v17.16b\n"
+ "str q16, [x15, #0x20]\n"
+ "zip1 v18.16b, v26.16b, v24.16b\n"
+ "zip1 v17.16b, v25.16b, v23.16b\n"
+ "zip1 v16.16b, v18.16b, v17.16b\n"
+ "str q16, [x15, #0x30]\n"
+ "add x15, x15, %x[out_stride]\n"
+ "zip2 v17.16b, v18.16b, v17.16b\n"
+ "str q21, [x15, #0x0]\n"
+ "zip2 v22.16b, v4.16b, v2.16b\n"
+ "str q20, [x15, #0x10]\n"
+ "zip2 v21.16b, v3.16b, v0.16b\n"
+ "str q19, [x15, #0x20]\n"
+ "zip1 v16.16b, v22.16b, v21.16b\n"
+ "str q17, [x15, #0x30]\n"
+ "add x15, x15, %x[out_stride]\n"
+ "zip2 v20.16b, v31.16b, v28.16b\n"
+ "str q29, [x15, #0x0]\n"
+ "zip2 v17.16b, v30.16b, v27.16b\n"
+ "str q16, [x15, #0x10]\n"
+ "zip1 v16.16b, v20.16b, v17.16b\n"
+ "str q16, [x15, #0x20]\n"
+ "zip2 v19.16b, v26.16b, v24.16b\n"
+ "zip2 v18.16b, v25.16b, v23.16b\n"
+ "zip1 v16.16b, v19.16b, v18.16b\n"
+ "str q16, [x15, #0x30]\n"
+ "add x15, x15, %x[out_stride]\n"
+ "zip2 v16.16b, v5.16b, v1.16b\n"
+ "str q16, [x15, #0x0]\n"
+ "zip2 v16.16b, v22.16b, v21.16b\n"
+ "zip2 v17.16b, v20.16b, v17.16b\n"
+ "str q16, [x15, #0x10]\n"
+ "zip2 v16.16b, v19.16b, v18.16b\n"
+ "str q17, [x15, #0x20]\n"
+ "str q16, [x15, #0x30]\n"
+ "add x15, x15, %x[out_stride]\n"
"bge 2b\n"
"3:" // Main row loop: Unroll column loop skip
- "cmp x24, #0x4\n"
+ "cmp x19, #0x4\n"
"blt 5f\n"
"4:" // Main row loop: Column loop
- "ldr s19, [x17], #0x4\n"
"ldr s18, [x16], #0x4\n"
- "sub x24, x24, #0x4\n"
- "cmp x24, #0x4\n"
- "ldr s17, [x15], #0x4\n"
- "ldr s16, [x14], #0x4\n"
- "zip1 v17.16b, v19.16b, v17.16b\n"
- "zip1 v16.16b, v18.16b, v16.16b\n"
- "ldr s19, [x13], #0x4\n"
- "ldr s18, [x12], #0x4\n"
- "zip1 v22.16b, v17.16b, v16.16b\n"
- "ldr s17, [x11], #0x4\n"
- "ldr s16, [x10], #0x4\n"
- "zip1 v17.16b, v19.16b, v17.16b\n"
- "zip1 v16.16b, v18.16b, v16.16b\n"
- "ldr s19, [x9], #0x4\n"
- "ldr s18, [x28], #0x4\n"
- "zip1 v21.16b, v17.16b, v16.16b\n"
- "ldr s17, [x27], #0x4\n"
- "ldr s16, [x26], #0x4\n"
- "zip1 v17.16b, v19.16b, v17.16b\n"
- "zip1 v16.16b, v18.16b, v16.16b\n"
- "ldr s20, [x25], #0x4\n"
- "ldr s19, [x23], #0x4\n"
- "zip1 v18.16b, v17.16b, v16.16b\n"
- "ldr s17, [x22], #0x4\n"
+ "sub x19, x19, #0x4\n"
+ "ldr s17, [x14], #0x4\n"
+ "cmp x19, #0x4\n"
+ "ldr s16, [x13], #0x4\n"
+ "zip1 v19.16b, v18.16b, v16.16b\n"
+ "ldr s16, [x12], #0x4\n"
+ "ldr s18, [x11], #0x4\n"
+ "zip1 v16.16b, v17.16b, v16.16b\n"
+ "ldr s20, [x10], #0x4\n"
+ "ldr s17, [x9], #0x4\n"
+ "zip1 v23.16b, v19.16b, v16.16b\n"
+ "ldr s16, [x28], #0x4\n"
+ "zip1 v19.16b, v18.16b, v17.16b\n"
+ "ldr s18, [x27], #0x4\n"
+ "ldr s22, [x26], #0x4\n"
+ "zip1 v16.16b, v20.16b, v16.16b\n"
+ "ldr s17, [x25], #0x4\n"
+ "zip1 v21.16b, v19.16b, v16.16b\n"
+ "ldr s16, [x24], #0x4\n"
+ "zip1 v18.16b, v18.16b, v17.16b\n"
+ "ldr s20, [x23], #0x4\n"
+ "ldr s19, [x22], #0x4\n"
+ "zip1 v16.16b, v22.16b, v16.16b\n"
+ "ldr s17, [x21], #0x4\n"
+ "zip1 v18.16b, v18.16b, v16.16b\n"
"ldr s16, [x20], #0x4\n"
"zip1 v17.16b, v20.16b, v17.16b\n"
+ "str q23, [x15, #0x0]\n"
+ "str q21, [x15, #0x10]\n"
"zip1 v16.16b, v19.16b, v16.16b\n"
+ "str q18, [x15, #0x20]\n"
"zip1 v16.16b, v17.16b, v16.16b\n"
- "str q22, [x21, #0x0]\n"
- "str q21, [x21, #0x10]\n"
- "str q18, [x21, #0x20]\n"
- "str q16, [x21, #0x30]\n"
- "add x21, x21, %x[out_stride]\n"
+ "str q16, [x15, #0x30]\n"
+ "add x15, x15, %x[out_stride]\n"
"bge 4b\n"
"5:" // Main row loop: Column loop skip
- "cmp x24, #0x1\n"
+ "cmp x19, #0x1\n"
"blt 7f\n"
"6:" // Main row loop: width 1 loop: loop
- "ldr b19, [x17], #0x1\n"
"ldr b18, [x16], #0x1\n"
- "sub x24, x24, #0x1\n"
- "cmp x24, #0x1\n"
- "ldr b17, [x15], #0x1\n"
- "ldr b16, [x14], #0x1\n"
- "zip1 v17.16b, v19.16b, v17.16b\n"
- "zip1 v16.16b, v18.16b, v16.16b\n"
- "ldr b19, [x13], #0x1\n"
- "ldr b18, [x12], #0x1\n"
- "zip1 v22.16b, v17.16b, v16.16b\n"
- "ldr b17, [x11], #0x1\n"
- "ldr b16, [x10], #0x1\n"
- "zip1 v17.16b, v19.16b, v17.16b\n"
- "zip1 v16.16b, v18.16b, v16.16b\n"
- "ldr b19, [x9], #0x1\n"
- "ldr b18, [x28], #0x1\n"
- "zip1 v21.16b, v17.16b, v16.16b\n"
- "ldr b17, [x27], #0x1\n"
- "ldr b16, [x26], #0x1\n"
- "zip1 v17.16b, v19.16b, v17.16b\n"
- "zip1 v16.16b, v18.16b, v16.16b\n"
- "ldr b20, [x25], #0x1\n"
- "ldr b19, [x23], #0x1\n"
- "zip1 v18.16b, v17.16b, v16.16b\n"
- "ldr b17, [x22], #0x1\n"
+ "sub x19, x19, #0x1\n"
+ "ldr b17, [x14], #0x1\n"
+ "cmp x19, #0x1\n"
+ "ldr b16, [x13], #0x1\n"
+ "zip1 v19.16b, v18.16b, v16.16b\n"
+ "ldr b16, [x12], #0x1\n"
+ "ldr b18, [x11], #0x1\n"
+ "zip1 v16.16b, v17.16b, v16.16b\n"
+ "ldr b20, [x10], #0x1\n"
+ "ldr b17, [x9], #0x1\n"
+ "zip1 v23.16b, v19.16b, v16.16b\n"
+ "ldr b16, [x28], #0x1\n"
+ "zip1 v19.16b, v18.16b, v17.16b\n"
+ "ldr b18, [x27], #0x1\n"
+ "ldr b22, [x26], #0x1\n"
+ "zip1 v16.16b, v20.16b, v16.16b\n"
+ "ldr b17, [x25], #0x1\n"
+ "zip1 v21.16b, v19.16b, v16.16b\n"
+ "ldr b16, [x24], #0x1\n"
+ "zip1 v18.16b, v18.16b, v17.16b\n"
+ "ldr b20, [x23], #0x1\n"
+ "ldr b19, [x22], #0x1\n"
+ "zip1 v16.16b, v22.16b, v16.16b\n"
+ "ldr b17, [x21], #0x1\n"
+ "zip1 v18.16b, v18.16b, v16.16b\n"
"ldr b16, [x20], #0x1\n"
"zip1 v17.16b, v20.16b, v17.16b\n"
+ "str s23, [x15, #0x0]\n"
+ "str s21, [x15, #0x10]\n"
"zip1 v16.16b, v19.16b, v16.16b\n"
- "str s22, [x21, #0x0]\n"
+ "str s18, [x15, #0x20]\n"
"zip1 v16.16b, v17.16b, v16.16b\n"
- "str s21, [x21, #0x10]\n"
- "str s18, [x21, #0x20]\n"
- "str s16, [x21, #0x30]\n"
- "add x21, x21, #0x4\n"
+ "str s16, [x15, #0x30]\n"
+ "add x15, x15, #0x4\n"
"bge 6b\n"
"7:" // Main row loop: width 1 loop: skip
- "cmp %x[height], #0x10\n"
"add %x[out], %x[out], #0x40\n"
+ "cmp %x[height], #0x10\n"
"bge 1b\n"
"cbz %x[height], 16f\n"
"8:" // Main loop skip
+
"9:" // Tail row loop: Head
- "mov x17, %x[in]\n"
- "add x16, x17, %x[in_stride]\n"
- "add x15, x16, %x[in_stride]\n"
- "mov x20, %x[width]\n"
- "add x14, x15, %x[in_stride]\n"
+ "mov x16, %x[in]\n"
+ "mov x15, %x[out]\n"
+ "add x14, x16, %x[in_stride]\n"
+ "add x13, x14, %x[in_stride]\n"
+ "add x12, x13, %x[in_stride]\n"
+ "add %x[in], x12, %x[in_stride]\n"
"cmp %x[height], #0x3\n"
- "add %x[in], x14, %x[in_stride]\n"
- "csel x14, x14, %x[pad_row], GT\n"
- "csel x15, x15, %x[pad_row], GE\n"
+ "csel x12, x12, %x[pad_row], GT\n"
+ "csel x13, x13, %x[pad_row], GE\n"
"cmp %x[height], #0x1\n"
- "csel x16, x16, %x[pad_row], GT\n"
- "cmp x20, #0x10\n"
- "mov x21, %x[out]\n"
+ "csel x14, x14, %x[pad_row], GT\n"
"sub %x[height], %x[height], #0x4\n"
+ "mov x19, %x[width]\n"
+ "cmp x19, #0x10\n"
"blt 11f\n"
"10:" // Tail row loop: Unroll column loop
- "ldr q19, [x17], #0x10\n"
- "ldr q21, [x16], #0x10\n"
- "sub x20, x20, #0x10\n"
- "cmp x20, #0x10\n"
- "ldr q18, [x15], #0x10\n"
- "ldr q16, [x14], #0x10\n"
- "zip1 v20.16b, v19.16b, v18.16b\n"
- "zip1 v17.16b, v21.16b, v16.16b\n"
- "zip2 v19.16b, v19.16b, v18.16b\n"
- "zip2 v18.16b, v21.16b, v16.16b\n"
+ "ldr q19, [x16], #0x10\n"
+ "sub x19, x19, #0x10\n"
+ "ldr q18, [x14], #0x10\n"
+ "cmp x19, #0x10\n"
+ "ldr q17, [x13], #0x10\n"
+ "zip1 v20.16b, v19.16b, v17.16b\n"
+ "ldr q16, [x12], #0x10\n"
+ "zip2 v19.16b, v19.16b, v17.16b\n"
+ "zip1 v17.16b, v18.16b, v16.16b\n"
+ "zip2 v18.16b, v18.16b, v16.16b\n"
"zip1 v16.16b, v20.16b, v17.16b\n"
- "str q16, [x21, #0x0]\n"
- "add x21, x21, %x[out_stride]\n"
+ "str q16, [x15, #0x0]\n"
+ "add x15, x15, %x[out_stride]\n"
"zip2 v16.16b, v20.16b, v17.16b\n"
- "str q16, [x21, #0x0]\n"
- "add x21, x21, %x[out_stride]\n"
+ "str q16, [x15, #0x0]\n"
"zip1 v17.16b, v19.16b, v18.16b\n"
+ "add x15, x15, %x[out_stride]\n"
"zip2 v16.16b, v19.16b, v18.16b\n"
- "str q17, [x21, #0x0]\n"
- "add x21, x21, %x[out_stride]\n"
- "str q16, [x21, #0x0]\n"
- "add x21, x21, %x[out_stride]\n"
+ "str q17, [x15, #0x0]\n"
+ "add x15, x15, %x[out_stride]\n"
+ "str q16, [x15, #0x0]\n"
+ "add x15, x15, %x[out_stride]\n"
"bge 10b\n"
"11:" // Tail row loop: Unroll column loop skip
- "cmp x20, #0x4\n"
+ "cmp x19, #0x4\n"
"blt 13f\n"
"12:" // Tail row loop: Column loop
- "ldr s19, [x17], #0x4\n"
- "ldr s18, [x16], #0x4\n"
- "sub x20, x20, #0x4\n"
- "cmp x20, #0x4\n"
- "ldr s17, [x15], #0x4\n"
- "ldr s16, [x14], #0x4\n"
- "zip1 v17.16b, v19.16b, v17.16b\n"
+ "ldr s17, [x16], #0x4\n"
+ "sub x19, x19, #0x4\n"
+ "ldr s18, [x14], #0x4\n"
+ "cmp x19, #0x4\n"
+ "ldr s16, [x13], #0x4\n"
+ "zip1 v17.16b, v17.16b, v16.16b\n"
+ "ldr s16, [x12], #0x4\n"
"zip1 v16.16b, v18.16b, v16.16b\n"
"zip1 v16.16b, v17.16b, v16.16b\n"
- "str q16, [x21, #0x0]\n"
- "add x21, x21, %x[out_stride]\n"
+ "str q16, [x15, #0x0]\n"
+ "add x15, x15, %x[out_stride]\n"
"bge 12b\n"
"13:" // Tail row loop: Column loop skip
- "cmp x20, #0x1\n"
+ "cmp x19, #0x1\n"
"blt 15f\n"
"14:" // Tail row loop: width 1 loop: loop
- "ldr b19, [x17], #0x1\n"
- "ldr b18, [x16], #0x1\n"
- "sub x20, x20, #0x1\n"
- "cmp x20, #0x1\n"
- "ldr b17, [x15], #0x1\n"
- "ldr b16, [x14], #0x1\n"
- "zip1 v17.16b, v19.16b, v17.16b\n"
+ "ldr b17, [x16], #0x1\n"
+ "sub x19, x19, #0x1\n"
+ "ldr b18, [x14], #0x1\n"
+ "cmp x19, #0x1\n"
+ "ldr b16, [x13], #0x1\n"
+ "zip1 v17.16b, v17.16b, v16.16b\n"
+ "ldr b16, [x12], #0x1\n"
"zip1 v16.16b, v18.16b, v16.16b\n"
"zip1 v16.16b, v17.16b, v16.16b\n"
- "str s16, [x21, #0x0]\n"
- "add x21, x21, #0x4\n"
+ "str s16, [x15, #0x0]\n"
+ "add x15, x15, #0x4\n"
"bge 14b\n"
"15:" // Tail row loop: width 1 loop: skip
- "cmp %x[height], #0x1\n"
"add %x[out], %x[out], #0x10\n"
+ "cmp %x[height], #0x1\n"
"bge 9b\n"
"16:" // Done
+
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [pad_row] "r" (pad_row), [width] "r" (width)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_64.hpp b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_64.hpp
index 07602bdc8d..c341b315aa 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_64.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_64.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#pragma once
@@ -36,177 +36,178 @@ void a64_transpose_interleave_64(uint16_t *out, const uint16_t *in, size_t width
"cmp %x[height], #0x4\n"
"blt 10f\n"
"1:" // Main row loop: Head
- "mov x25, %x[in]\n"
- "mov x24, %x[width]\n"
- "add x23, x25, %x[in_stride]\n"
- "add x22, x23, %x[in_stride]\n"
- "add x20, x22, %x[in_stride]\n"
- "cmp x24, #0x20\n"
+ "mov x24, %x[in]\n"
+ "mov x23, %x[out]\n"
+ "add x22, x24, %x[in_stride]\n"
+ "add x21, x22, %x[in_stride]\n"
+ "add x20, x21, %x[in_stride]\n"
"add %x[in], x20, %x[in_stride]\n"
- "mov x21, %x[out]\n"
"sub %x[height], %x[height], #0x4\n"
+ "mov x19, %x[width]\n"
+ "cmp x19, #0x20\n"
"blt 3f\n"
"2:" // Main row loop: Column loop
- "ldr q31, [x25], #0x10\n"
- "ldr q30, [x23], #0x10\n"
- "sub x24, x24, #0x20\n"
- "cmp x24, #0x20\n"
- "ldr q29, [x22], #0x10\n"
+ "ldr q31, [x24], #0x10\n"
+ "sub x19, x19, #0x20\n"
+ "ldr q30, [x22], #0x10\n"
+ "cmp x19, #0x20\n"
+ "ldr q29, [x21], #0x10\n"
"ldr q28, [x20], #0x10\n"
- "ldr q27, [x25], #0x10\n"
- "ldr q26, [x23], #0x10\n"
- "ldr q25, [x22], #0x10\n"
+ "ldr q27, [x24], #0x10\n"
+ "ldr q26, [x22], #0x10\n"
+ "ldr q25, [x21], #0x10\n"
"ldr q24, [x20], #0x10\n"
- "ldr q23, [x25], #0x10\n"
- "ldr q22, [x23], #0x10\n"
- "ldr q21, [x22], #0x10\n"
+ "ldr q23, [x24], #0x10\n"
+ "ldr q22, [x22], #0x10\n"
+ "ldr q21, [x21], #0x10\n"
"ldr q20, [x20], #0x10\n"
- "ldr q19, [x25], #0x10\n"
- "ldr q18, [x23], #0x10\n"
- "ldr q17, [x22], #0x10\n"
+ "ldr q19, [x24], #0x10\n"
+ "ldr q18, [x22], #0x10\n"
+ "ldr q17, [x21], #0x10\n"
"ldr q16, [x20], #0x10\n"
- "str q31, [x21, #0x0]\n"
- "str q27, [x21, #0x10]\n"
- "str q23, [x21, #0x20]\n"
- "str q19, [x21, #0x30]\n"
- "str q30, [x21, #0x40]\n"
- "str q26, [x21, #0x50]\n"
- "str q22, [x21, #0x60]\n"
- "str q18, [x21, #0x70]\n"
- "str q29, [x21, #0x80]\n"
- "str q25, [x21, #0x90]\n"
- "str q21, [x21, #0xa0]\n"
- "str q17, [x21, #0xb0]\n"
- "str q28, [x21, #0xc0]\n"
- "str q24, [x21, #0xd0]\n"
- "str q20, [x21, #0xe0]\n"
- "str q16, [x21, #0xf0]\n"
- "add x21, x21, %x[out_stride]\n"
+ "str q31, [x23, #0x0]\n"
+ "str q27, [x23, #0x10]\n"
+ "str q23, [x23, #0x20]\n"
+ "str q19, [x23, #0x30]\n"
+ "str q30, [x23, #0x40]\n"
+ "str q26, [x23, #0x50]\n"
+ "str q22, [x23, #0x60]\n"
+ "str q18, [x23, #0x70]\n"
+ "str q29, [x23, #0x80]\n"
+ "str q25, [x23, #0x90]\n"
+ "str q21, [x23, #0xa0]\n"
+ "str q17, [x23, #0xb0]\n"
+ "str q28, [x23, #0xc0]\n"
+ "str q24, [x23, #0xd0]\n"
+ "str q20, [x23, #0xe0]\n"
+ "str q16, [x23, #0xf0]\n"
+ "add x23, x23, %x[out_stride]\n"
"bge 2b\n"
"3:" // Main row loop: Column loop skip
- "cmp x24, #0x10\n"
+ "cmp x19, #0x10\n"
"blt 5f\n"
"4:" // Main row loop: width 16 loop: loop
- "ldr q23, [x25], #0x10\n"
- "ldr q22, [x23], #0x10\n"
- "sub x24, x24, #0x10\n"
- "cmp x24, #0x10\n"
- "ldr q21, [x22], #0x10\n"
+ "ldr q23, [x24], #0x10\n"
+ "sub x19, x19, #0x10\n"
+ "ldr q22, [x22], #0x10\n"
+ "cmp x19, #0x10\n"
+ "ldr q21, [x21], #0x10\n"
"ldr q20, [x20], #0x10\n"
- "ldr q19, [x25], #0x10\n"
- "ldr q18, [x23], #0x10\n"
- "ldr q17, [x22], #0x10\n"
+ "ldr q19, [x24], #0x10\n"
+ "ldr q18, [x22], #0x10\n"
+ "ldr q17, [x21], #0x10\n"
"ldr q16, [x20], #0x10\n"
- "str q23, [x21, #0x0]\n"
- "str q19, [x21, #0x10]\n"
- "str q22, [x21, #0x40]\n"
- "str q18, [x21, #0x50]\n"
- "str q21, [x21, #0x80]\n"
- "str q17, [x21, #0x90]\n"
- "str q20, [x21, #0xc0]\n"
- "str q16, [x21, #0xd0]\n"
- "add x21, x21, #0x20\n"
+ "str q23, [x23, #0x0]\n"
+ "str q19, [x23, #0x10]\n"
+ "str q22, [x23, #0x40]\n"
+ "str q18, [x23, #0x50]\n"
+ "str q21, [x23, #0x80]\n"
+ "str q17, [x23, #0x90]\n"
+ "str q20, [x23, #0xc0]\n"
+ "str q16, [x23, #0xd0]\n"
+ "add x23, x23, #0x20\n"
"bge 4b\n"
"5:" // Main row loop: width 16 loop: skip
- "cmp x24, #0x4\n"
+ "cmp x19, #0x4\n"
"blt 7f\n"
"6:" // Main row loop: width 4 loop: loop
- "ldr d19, [x25], #0x8\n"
- "ldr d18, [x23], #0x8\n"
- "sub x24, x24, #0x4\n"
- "cmp x24, #0x4\n"
- "ldr d17, [x22], #0x8\n"
+ "ldr d19, [x24], #0x8\n"
+ "sub x19, x19, #0x4\n"
+ "ldr d18, [x22], #0x8\n"
+ "cmp x19, #0x4\n"
+ "ldr d17, [x21], #0x8\n"
"ldr d16, [x20], #0x8\n"
- "str d19, [x21, #0x0]\n"
- "str d18, [x21, #0x40]\n"
- "str d17, [x21, #0x80]\n"
- "str d16, [x21, #0xc0]\n"
- "add x21, x21, #0x8\n"
+ "str d19, [x23, #0x0]\n"
+ "str d18, [x23, #0x40]\n"
+ "str d17, [x23, #0x80]\n"
+ "str d16, [x23, #0xc0]\n"
+ "add x23, x23, #0x8\n"
"bge 6b\n"
"7:" // Main row loop: width 4 loop: skip
- "cmp x24, #0x1\n"
+ "cmp x19, #0x1\n"
"blt 9f\n"
"8:" // Main row loop: width 1 loop: loop
- "ldr h19, [x25], #0x2\n"
- "ldr h18, [x23], #0x2\n"
- "sub x24, x24, #0x1\n"
- "cmp x24, #0x1\n"
- "ldr h17, [x22], #0x2\n"
+ "ldr h19, [x24], #0x2\n"
+ "sub x19, x19, #0x1\n"
+ "ldr h18, [x22], #0x2\n"
+ "cmp x19, #0x1\n"
+ "ldr h17, [x21], #0x2\n"
"ldr h16, [x20], #0x2\n"
- "str h19, [x21, #0x0]\n"
- "str h18, [x21, #0x40]\n"
- "str h17, [x21, #0x80]\n"
- "str h16, [x21, #0xc0]\n"
- "add x21, x21, #0x2\n"
+ "str h19, [x23, #0x0]\n"
+ "str h18, [x23, #0x40]\n"
+ "str h17, [x23, #0x80]\n"
+ "str h16, [x23, #0xc0]\n"
+ "add x23, x23, #0x2\n"
"bge 8b\n"
"9:" // Main row loop: width 1 loop: skip
- "cmp %x[height], #0x4\n"
"add %x[out], %x[out], #0x100\n"
+ "cmp %x[height], #0x4\n"
"bge 1b\n"
"cbz %x[height], 20f\n"
"10:" // Main loop skip
"11:" // Tail row loop: Head
- "mov x20, %x[width]\n"
- "mov x25, %x[in]\n"
- "cmp x20, #0x20\n"
- "add %x[in], x25, %x[in_stride]\n"
- "mov x21, %x[out]\n"
+ "mov x24, %x[in]\n"
+ "mov x23, %x[out]\n"
+ "add %x[in], x24, %x[in_stride]\n"
"sub %x[height], %x[height], #0x1\n"
+ "mov x19, %x[width]\n"
+ "cmp x19, #0x20\n"
"blt 13f\n"
"12:" // Tail row loop: Column loop
- "ldr q19, [x25], #0x10\n"
- "ldr q18, [x25], #0x10\n"
- "sub x20, x20, #0x20\n"
- "cmp x20, #0x20\n"
- "ldr q17, [x25], #0x10\n"
- "ldr q16, [x25], #0x10\n"
- "str q19, [x21, #0x0]\n"
- "str q18, [x21, #0x10]\n"
- "str q17, [x21, #0x20]\n"
- "str q16, [x21, #0x30]\n"
- "add x21, x21, %x[out_stride]\n"
+ "ldr q19, [x24], #0x10\n"
+ "sub x19, x19, #0x20\n"
+ "cmp x19, #0x20\n"
+ "ldr q18, [x24], #0x10\n"
+ "ldr q17, [x24], #0x10\n"
+ "ldr q16, [x24], #0x10\n"
+ "str q19, [x23, #0x0]\n"
+ "str q18, [x23, #0x10]\n"
+ "str q17, [x23, #0x20]\n"
+ "str q16, [x23, #0x30]\n"
+ "add x23, x23, %x[out_stride]\n"
"bge 12b\n"
"13:" // Tail row loop: Column loop skip
- "cmp x20, #0x10\n"
+ "cmp x19, #0x10\n"
"blt 15f\n"
"14:" // Tail row loop: width 16 loop: loop
- "ldr q17, [x25], #0x10\n"
- "ldr q16, [x25], #0x10\n"
- "sub x20, x20, #0x10\n"
- "cmp x20, #0x10\n"
- "str q17, [x21, #0x0]\n"
- "str q16, [x21, #0x10]\n"
- "add x21, x21, #0x20\n"
+ "ldr q17, [x24], #0x10\n"
+ "sub x19, x19, #0x10\n"
+ "cmp x19, #0x10\n"
+ "ldr q16, [x24], #0x10\n"
+ "str q17, [x23, #0x0]\n"
+ "str q16, [x23, #0x10]\n"
+ "add x23, x23, #0x20\n"
"bge 14b\n"
"15:" // Tail row loop: width 16 loop: skip
- "cmp x20, #0x4\n"
+ "cmp x19, #0x4\n"
"blt 17f\n"
"16:" // Tail row loop: width 4 loop: loop
- "ldr d16, [x25], #0x8\n"
- "sub x20, x20, #0x4\n"
- "cmp x20, #0x4\n"
- "str d16, [x21, #0x0]\n"
- "add x21, x21, #0x8\n"
+ "ldr d16, [x24], #0x8\n"
+ "sub x19, x19, #0x4\n"
+ "cmp x19, #0x4\n"
+ "str d16, [x23, #0x0]\n"
+ "add x23, x23, #0x8\n"
"bge 16b\n"
"17:" // Tail row loop: width 4 loop: skip
- "cmp x20, #0x1\n"
+ "cmp x19, #0x1\n"
"blt 19f\n"
"18:" // Tail row loop: width 1 loop: loop
- "ldr h16, [x25], #0x2\n"
- "sub x20, x20, #0x1\n"
- "cmp x20, #0x1\n"
- "str h16, [x21, #0x0]\n"
- "add x21, x21, #0x2\n"
+ "ldr h16, [x24], #0x2\n"
+ "sub x19, x19, #0x1\n"
+ "cmp x19, #0x1\n"
+ "str h16, [x23, #0x0]\n"
+ "add x23, x23, #0x2\n"
"bge 18b\n"
"19:" // Tail row loop: width 1 loop: skip
- "cmp %x[height], #0x1\n"
"add %x[out], %x[out], #0x40\n"
+ "cmp %x[height], #0x1\n"
"bge 11b\n"
"20:" // Done
+
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [width] "r" (width)
- : "cc", "memory", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x20", "x21", "x22", "x23", "x24", "x25"
+ : "cc", "memory", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22", "x23", "x24"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_96.hpp b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_96.hpp
index a048fbb109..190999ba53 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_96.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_96.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#pragma once
@@ -36,217 +36,218 @@ void a64_transpose_interleave_96(uint32_t *out, const uint32_t *in, size_t width
"cmp %x[height], #0x4\n"
"blt 10f\n"
"1:" // Main row loop: Head
- "mov x25, %x[in]\n"
- "mov x24, %x[width]\n"
- "add x23, x25, %x[in_stride]\n"
- "add x22, x23, %x[in_stride]\n"
- "add x20, x22, %x[in_stride]\n"
- "cmp x24, #0x18\n"
+ "mov x24, %x[in]\n"
+ "mov x23, %x[out]\n"
+ "add x22, x24, %x[in_stride]\n"
+ "add x21, x22, %x[in_stride]\n"
+ "add x20, x21, %x[in_stride]\n"
"add %x[in], x20, %x[in_stride]\n"
- "mov x21, %x[out]\n"
"sub %x[height], %x[height], #0x4\n"
+ "mov x19, %x[width]\n"
+ "cmp x19, #0x18\n"
"blt 3f\n"
"2:" // Main row loop: Column loop
- "ldr q7, [x25], #0x10\n"
- "ldr q6, [x23], #0x10\n"
- "sub x24, x24, #0x18\n"
- "cmp x24, #0x18\n"
- "ldr q5, [x22], #0x10\n"
+ "ldr q7, [x24], #0x10\n"
+ "sub x19, x19, #0x18\n"
+ "ldr q6, [x22], #0x10\n"
+ "cmp x19, #0x18\n"
+ "ldr q5, [x21], #0x10\n"
"ldr q4, [x20], #0x10\n"
- "ldr q3, [x25], #0x10\n"
- "ldr q2, [x23], #0x10\n"
- "ldr q1, [x22], #0x10\n"
+ "ldr q3, [x24], #0x10\n"
+ "ldr q2, [x22], #0x10\n"
+ "ldr q1, [x21], #0x10\n"
"ldr q0, [x20], #0x10\n"
- "ldr q31, [x25], #0x10\n"
- "ldr q30, [x23], #0x10\n"
- "ldr q29, [x22], #0x10\n"
+ "ldr q31, [x24], #0x10\n"
+ "ldr q30, [x22], #0x10\n"
+ "ldr q29, [x21], #0x10\n"
"ldr q28, [x20], #0x10\n"
- "ldr q27, [x25], #0x10\n"
- "ldr q26, [x23], #0x10\n"
- "ldr q25, [x22], #0x10\n"
+ "ldr q27, [x24], #0x10\n"
+ "ldr q26, [x22], #0x10\n"
+ "ldr q25, [x21], #0x10\n"
"ldr q24, [x20], #0x10\n"
- "ldr q23, [x25], #0x10\n"
- "ldr q22, [x23], #0x10\n"
- "ldr q21, [x22], #0x10\n"
+ "ldr q23, [x24], #0x10\n"
+ "ldr q22, [x22], #0x10\n"
+ "ldr q21, [x21], #0x10\n"
"ldr q20, [x20], #0x10\n"
- "ldr q19, [x25], #0x10\n"
- "ldr q18, [x23], #0x10\n"
- "ldr q17, [x22], #0x10\n"
+ "ldr q19, [x24], #0x10\n"
+ "ldr q18, [x22], #0x10\n"
+ "ldr q17, [x21], #0x10\n"
"ldr q16, [x20], #0x10\n"
- "str q7, [x21, #0x0]\n"
- "str q3, [x21, #0x10]\n"
- "str q31, [x21, #0x20]\n"
- "str q27, [x21, #0x30]\n"
- "str q23, [x21, #0x40]\n"
- "str q19, [x21, #0x50]\n"
- "str q6, [x21, #0x60]\n"
- "str q2, [x21, #0x70]\n"
- "str q30, [x21, #0x80]\n"
- "str q26, [x21, #0x90]\n"
- "str q22, [x21, #0xa0]\n"
- "str q18, [x21, #0xb0]\n"
- "str q5, [x21, #0xc0]\n"
- "str q1, [x21, #0xd0]\n"
- "str q29, [x21, #0xe0]\n"
- "str q25, [x21, #0xf0]\n"
- "str q21, [x21, #0x100]\n"
- "str q17, [x21, #0x110]\n"
- "str q4, [x21, #0x120]\n"
- "str q0, [x21, #0x130]\n"
- "str q28, [x21, #0x140]\n"
- "str q24, [x21, #0x150]\n"
- "str q20, [x21, #0x160]\n"
- "str q16, [x21, #0x170]\n"
- "add x21, x21, %x[out_stride]\n"
+ "str q7, [x23, #0x0]\n"
+ "str q3, [x23, #0x10]\n"
+ "str q31, [x23, #0x20]\n"
+ "str q27, [x23, #0x30]\n"
+ "str q23, [x23, #0x40]\n"
+ "str q19, [x23, #0x50]\n"
+ "str q6, [x23, #0x60]\n"
+ "str q2, [x23, #0x70]\n"
+ "str q30, [x23, #0x80]\n"
+ "str q26, [x23, #0x90]\n"
+ "str q22, [x23, #0xa0]\n"
+ "str q18, [x23, #0xb0]\n"
+ "str q5, [x23, #0xc0]\n"
+ "str q1, [x23, #0xd0]\n"
+ "str q29, [x23, #0xe0]\n"
+ "str q25, [x23, #0xf0]\n"
+ "str q21, [x23, #0x100]\n"
+ "str q17, [x23, #0x110]\n"
+ "str q4, [x23, #0x120]\n"
+ "str q0, [x23, #0x130]\n"
+ "str q28, [x23, #0x140]\n"
+ "str q24, [x23, #0x150]\n"
+ "str q20, [x23, #0x160]\n"
+ "str q16, [x23, #0x170]\n"
+ "add x23, x23, %x[out_stride]\n"
"bge 2b\n"
"3:" // Main row loop: Column loop skip
- "cmp x24, #0x10\n"
+ "cmp x19, #0x10\n"
"blt 5f\n"
"4:" // Main row loop: width 16 loop: loop
- "ldr q31, [x25], #0x10\n"
- "ldr q30, [x23], #0x10\n"
- "sub x24, x24, #0x10\n"
- "cmp x24, #0x10\n"
- "ldr q29, [x22], #0x10\n"
+ "ldr q31, [x24], #0x10\n"
+ "sub x19, x19, #0x10\n"
+ "ldr q30, [x22], #0x10\n"
+ "cmp x19, #0x10\n"
+ "ldr q29, [x21], #0x10\n"
"ldr q28, [x20], #0x10\n"
- "ldr q27, [x25], #0x10\n"
- "ldr q26, [x23], #0x10\n"
- "ldr q25, [x22], #0x10\n"
+ "ldr q27, [x24], #0x10\n"
+ "ldr q26, [x22], #0x10\n"
+ "ldr q25, [x21], #0x10\n"
"ldr q24, [x20], #0x10\n"
- "ldr q23, [x25], #0x10\n"
- "ldr q22, [x23], #0x10\n"
- "ldr q21, [x22], #0x10\n"
+ "ldr q23, [x24], #0x10\n"
+ "ldr q22, [x22], #0x10\n"
+ "ldr q21, [x21], #0x10\n"
"ldr q20, [x20], #0x10\n"
- "ldr q19, [x25], #0x10\n"
- "ldr q18, [x23], #0x10\n"
- "ldr q17, [x22], #0x10\n"
+ "ldr q19, [x24], #0x10\n"
+ "ldr q18, [x22], #0x10\n"
+ "ldr q17, [x21], #0x10\n"
"ldr q16, [x20], #0x10\n"
- "str q31, [x21, #0x0]\n"
- "str q27, [x21, #0x10]\n"
- "str q23, [x21, #0x20]\n"
- "str q19, [x21, #0x30]\n"
- "str q30, [x21, #0x60]\n"
- "str q26, [x21, #0x70]\n"
- "str q22, [x21, #0x80]\n"
- "str q18, [x21, #0x90]\n"
- "str q29, [x21, #0xc0]\n"
- "str q25, [x21, #0xd0]\n"
- "str q21, [x21, #0xe0]\n"
- "str q17, [x21, #0xf0]\n"
- "str q28, [x21, #0x120]\n"
- "str q24, [x21, #0x130]\n"
- "str q20, [x21, #0x140]\n"
- "str q16, [x21, #0x150]\n"
- "add x21, x21, #0x40\n"
+ "str q31, [x23, #0x0]\n"
+ "str q27, [x23, #0x10]\n"
+ "str q23, [x23, #0x20]\n"
+ "str q19, [x23, #0x30]\n"
+ "str q30, [x23, #0x60]\n"
+ "str q26, [x23, #0x70]\n"
+ "str q22, [x23, #0x80]\n"
+ "str q18, [x23, #0x90]\n"
+ "str q29, [x23, #0xc0]\n"
+ "str q25, [x23, #0xd0]\n"
+ "str q21, [x23, #0xe0]\n"
+ "str q17, [x23, #0xf0]\n"
+ "str q28, [x23, #0x120]\n"
+ "str q24, [x23, #0x130]\n"
+ "str q20, [x23, #0x140]\n"
+ "str q16, [x23, #0x150]\n"
+ "add x23, x23, #0x40\n"
"bge 4b\n"
"5:" // Main row loop: width 16 loop: skip
- "cmp x24, #0x4\n"
+ "cmp x19, #0x4\n"
"blt 7f\n"
"6:" // Main row loop: width 4 loop: loop
- "ldr q19, [x25], #0x10\n"
- "ldr q18, [x23], #0x10\n"
- "sub x24, x24, #0x4\n"
- "cmp x24, #0x4\n"
- "ldr q17, [x22], #0x10\n"
+ "ldr q19, [x24], #0x10\n"
+ "sub x19, x19, #0x4\n"
+ "ldr q18, [x22], #0x10\n"
+ "cmp x19, #0x4\n"
+ "ldr q17, [x21], #0x10\n"
"ldr q16, [x20], #0x10\n"
- "str q19, [x21, #0x0]\n"
- "str q18, [x21, #0x60]\n"
- "str q17, [x21, #0xc0]\n"
- "str q16, [x21, #0x120]\n"
- "add x21, x21, #0x10\n"
+ "str q19, [x23, #0x0]\n"
+ "str q18, [x23, #0x60]\n"
+ "str q17, [x23, #0xc0]\n"
+ "str q16, [x23, #0x120]\n"
+ "add x23, x23, #0x10\n"
"bge 6b\n"
"7:" // Main row loop: width 4 loop: skip
- "cmp x24, #0x1\n"
+ "cmp x19, #0x1\n"
"blt 9f\n"
"8:" // Main row loop: width 1 loop: loop
- "ldr s19, [x25], #0x4\n"
- "ldr s18, [x23], #0x4\n"
- "sub x24, x24, #0x1\n"
- "cmp x24, #0x1\n"
- "ldr s17, [x22], #0x4\n"
+ "ldr s19, [x24], #0x4\n"
+ "sub x19, x19, #0x1\n"
+ "ldr s18, [x22], #0x4\n"
+ "cmp x19, #0x1\n"
+ "ldr s17, [x21], #0x4\n"
"ldr s16, [x20], #0x4\n"
- "str s19, [x21, #0x0]\n"
- "str s18, [x21, #0x60]\n"
- "str s17, [x21, #0xc0]\n"
- "str s16, [x21, #0x120]\n"
- "add x21, x21, #0x4\n"
+ "str s19, [x23, #0x0]\n"
+ "str s18, [x23, #0x60]\n"
+ "str s17, [x23, #0xc0]\n"
+ "str s16, [x23, #0x120]\n"
+ "add x23, x23, #0x4\n"
"bge 8b\n"
"9:" // Main row loop: width 1 loop: skip
- "cmp %x[height], #0x4\n"
"add %x[out], %x[out], #0x180\n"
+ "cmp %x[height], #0x4\n"
"bge 1b\n"
"cbz %x[height], 20f\n"
"10:" // Main loop skip
"11:" // Tail row loop: Head
- "mov x20, %x[width]\n"
- "mov x25, %x[in]\n"
- "cmp x20, #0x18\n"
- "add %x[in], x25, %x[in_stride]\n"
- "mov x21, %x[out]\n"
+ "mov x24, %x[in]\n"
+ "mov x23, %x[out]\n"
+ "add %x[in], x24, %x[in_stride]\n"
"sub %x[height], %x[height], #0x1\n"
+ "mov x19, %x[width]\n"
+ "cmp x19, #0x18\n"
"blt 13f\n"
"12:" // Tail row loop: Column loop
- "ldr q21, [x25], #0x10\n"
- "ldr q20, [x25], #0x10\n"
- "sub x20, x20, #0x18\n"
- "cmp x20, #0x18\n"
- "ldr q19, [x25], #0x10\n"
- "ldr q18, [x25], #0x10\n"
- "ldr q17, [x25], #0x10\n"
- "ldr q16, [x25], #0x10\n"
- "str q21, [x21, #0x0]\n"
- "str q20, [x21, #0x10]\n"
- "str q19, [x21, #0x20]\n"
- "str q18, [x21, #0x30]\n"
- "str q17, [x21, #0x40]\n"
- "str q16, [x21, #0x50]\n"
- "add x21, x21, %x[out_stride]\n"
+ "ldr q21, [x24], #0x10\n"
+ "sub x19, x19, #0x18\n"
+ "cmp x19, #0x18\n"
+ "ldr q20, [x24], #0x10\n"
+ "ldr q19, [x24], #0x10\n"
+ "ldr q18, [x24], #0x10\n"
+ "ldr q17, [x24], #0x10\n"
+ "ldr q16, [x24], #0x10\n"
+ "str q21, [x23, #0x0]\n"
+ "str q20, [x23, #0x10]\n"
+ "str q19, [x23, #0x20]\n"
+ "str q18, [x23, #0x30]\n"
+ "str q17, [x23, #0x40]\n"
+ "str q16, [x23, #0x50]\n"
+ "add x23, x23, %x[out_stride]\n"
"bge 12b\n"
"13:" // Tail row loop: Column loop skip
- "cmp x20, #0x10\n"
+ "cmp x19, #0x10\n"
"blt 15f\n"
"14:" // Tail row loop: width 16 loop: loop
- "ldr q19, [x25], #0x10\n"
- "ldr q18, [x25], #0x10\n"
- "sub x20, x20, #0x10\n"
- "cmp x20, #0x10\n"
- "ldr q17, [x25], #0x10\n"
- "ldr q16, [x25], #0x10\n"
- "str q19, [x21, #0x0]\n"
- "str q18, [x21, #0x10]\n"
- "str q17, [x21, #0x20]\n"
- "str q16, [x21, #0x30]\n"
- "add x21, x21, #0x40\n"
+ "ldr q19, [x24], #0x10\n"
+ "sub x19, x19, #0x10\n"
+ "cmp x19, #0x10\n"
+ "ldr q18, [x24], #0x10\n"
+ "ldr q17, [x24], #0x10\n"
+ "ldr q16, [x24], #0x10\n"
+ "str q19, [x23, #0x0]\n"
+ "str q18, [x23, #0x10]\n"
+ "str q17, [x23, #0x20]\n"
+ "str q16, [x23, #0x30]\n"
+ "add x23, x23, #0x40\n"
"bge 14b\n"
"15:" // Tail row loop: width 16 loop: skip
- "cmp x20, #0x4\n"
+ "cmp x19, #0x4\n"
"blt 17f\n"
"16:" // Tail row loop: width 4 loop: loop
- "ldr q16, [x25], #0x10\n"
- "sub x20, x20, #0x4\n"
- "cmp x20, #0x4\n"
- "str q16, [x21, #0x0]\n"
- "add x21, x21, #0x10\n"
+ "ldr q16, [x24], #0x10\n"
+ "sub x19, x19, #0x4\n"
+ "cmp x19, #0x4\n"
+ "str q16, [x23, #0x0]\n"
+ "add x23, x23, #0x10\n"
"bge 16b\n"
"17:" // Tail row loop: width 4 loop: skip
- "cmp x20, #0x1\n"
+ "cmp x19, #0x1\n"
"blt 19f\n"
"18:" // Tail row loop: width 1 loop: loop
- "ldr s16, [x25], #0x4\n"
- "sub x20, x20, #0x1\n"
- "cmp x20, #0x1\n"
- "str s16, [x21, #0x0]\n"
- "add x21, x21, #0x4\n"
+ "ldr s16, [x24], #0x4\n"
+ "sub x19, x19, #0x1\n"
+ "cmp x19, #0x1\n"
+ "str s16, [x23, #0x0]\n"
+ "add x23, x23, #0x4\n"
"bge 18b\n"
"19:" // Tail row loop: width 1 loop: skip
- "cmp %x[height], #0x1\n"
"add %x[out], %x[out], #0x60\n"
+ "cmp %x[height], #0x1\n"
"bge 11b\n"
"20:" // Done
+
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [width] "r" (width)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x20", "x21", "x22", "x23", "x24", "x25"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22", "x23", "x24"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_16VL.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_16VL.hpp
index 01921c5ad9..3475d30abc 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_16VL.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_16VL.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#pragma once
@@ -37,82 +37,82 @@ void sme_transpose_interleave_16VL(uint32_t *out, const uint32_t *in, size_t wid
".inst 0xd503477f // SMSTART ZA\n"
"ptrue p7.b\n"
"1:" // Main row loop: Head
- "mov x23, %x[in]\n"
- "add %x[in], x23, %x[in_stride]\n"
- "mov x22, %x[out]\n"
+ "mov x22, %x[in]\n"
+ "add %x[in], x22, %x[in_stride]\n"
+ "mov x21, %x[out]\n"
"sub %x[height], %x[height], #0x1\n"
- "mov x21, %x[width]\n"
+ "mov x20, %x[width]\n"
"2:" // Main row loop: Column loop
- "mov x20, x21\n"
- "whilelt p0.s, XZR, x20\n"
- "ld1w { z31.s }, p0/Z, [x23]\n"
- "decw x20\n"
- "whilelt p0.s, XZR, x20\n"
- "ld1w { z30.s }, p0/Z, [x23, #1, MUL VL]\n"
- "decw x20\n"
- "whilelt p0.s, XZR, x20\n"
- "ld1w { z29.s }, p0/Z, [x23, #2, MUL VL]\n"
- "decw x20\n"
- "whilelt p0.s, XZR, x20\n"
- "ld1w { z28.s }, p0/Z, [x23, #3, MUL VL]\n"
- "decw x20\n"
- "whilelt p0.s, XZR, x20\n"
- "ld1w { z27.s }, p0/Z, [x23, #4, MUL VL]\n"
- "decw x20\n"
- "whilelt p0.s, XZR, x20\n"
- "ld1w { z26.s }, p0/Z, [x23, #5, MUL VL]\n"
- "decw x20\n"
- "whilelt p0.s, XZR, x20\n"
- "ld1w { z25.s }, p0/Z, [x23, #6, MUL VL]\n"
- "decw x20\n"
- "whilelt p0.s, XZR, x20\n"
- "ld1w { z24.s }, p0/Z, [x23, #7, MUL VL]\n"
- "decw x20\n"
- "whilelt p0.s, XZR, x20\n"
- "decw x20\n"
- "whilelt p6.s, XZR, x20\n"
- "decw x20\n"
- "whilelt p5.s, XZR, x20\n"
- "decw x20\n"
- "whilelt p4.s, XZR, x20\n"
- "decw x20\n"
- "whilelt p3.s, XZR, x20\n"
- "decw x20\n"
- "whilelt p2.s, XZR, x20\n"
- "decw x20\n"
- "whilelt p1.s, XZR, x20\n"
- "decw x20\n"
- "addvl x23, x23, #16\n"
- "ld1w { z23.s }, p0/Z, [x23, #-8, MUL VL]\n"
- "whilelt p0.s, XZR, x20\n"
- "mov x20, x22\n"
- "ld1w { z22.s }, p6/Z, [x23, #-7, MUL VL]\n"
- "decw x21, ALL, MUL #16\n"
- "ld1w { z21.s }, p5/Z, [x23, #-6, MUL VL]\n"
- "cmp x21, #0x0\n"
- "ld1w { z20.s }, p4/Z, [x23, #-5, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
- "ld1w { z19.s }, p3/Z, [x23, #-4, MUL VL]\n"
- "ld1w { z18.s }, p2/Z, [x23, #-3, MUL VL]\n"
- "ld1w { z17.s }, p1/Z, [x23, #-2, MUL VL]\n"
- "ld1w { z16.s }, p0/Z, [x23, #-1, MUL VL]\n"
- "st1w { z31.s }, p7, [x20]\n"
- "st1w { z30.s }, p7, [x20, #1, MUL VL]\n"
- "st1w { z29.s }, p7, [x20, #2, MUL VL]\n"
- "st1w { z28.s }, p7, [x20, #3, MUL VL]\n"
- "st1w { z27.s }, p7, [x20, #4, MUL VL]\n"
- "st1w { z26.s }, p7, [x20, #5, MUL VL]\n"
- "st1w { z25.s }, p7, [x20, #6, MUL VL]\n"
- "st1w { z24.s }, p7, [x20, #7, MUL VL]\n"
- "addvl x20, x20, #16\n"
- "st1w { z23.s }, p7, [x20, #-8, MUL VL]\n"
- "st1w { z22.s }, p7, [x20, #-7, MUL VL]\n"
- "st1w { z21.s }, p7, [x20, #-6, MUL VL]\n"
- "st1w { z20.s }, p7, [x20, #-5, MUL VL]\n"
- "st1w { z19.s }, p7, [x20, #-4, MUL VL]\n"
- "st1w { z18.s }, p7, [x20, #-3, MUL VL]\n"
- "st1w { z17.s }, p7, [x20, #-2, MUL VL]\n"
- "st1w { z16.s }, p7, [x20, #-1, MUL VL]\n"
+ "mov x19, x20\n"
+ "whilelt p0.s, XZR, x19\n"
+ "ld1w { z31.s }, p0/Z, [x22]\n"
+ "decw x19\n"
+ "whilelt p0.s, XZR, x19\n"
+ "ld1w { z30.s }, p0/Z, [x22, #1, MUL VL]\n"
+ "decw x19\n"
+ "whilelt p0.s, XZR, x19\n"
+ "ld1w { z29.s }, p0/Z, [x22, #2, MUL VL]\n"
+ "decw x19\n"
+ "whilelt p0.s, XZR, x19\n"
+ "ld1w { z28.s }, p0/Z, [x22, #3, MUL VL]\n"
+ "decw x19\n"
+ "whilelt p0.s, XZR, x19\n"
+ "ld1w { z27.s }, p0/Z, [x22, #4, MUL VL]\n"
+ "decw x19\n"
+ "whilelt p0.s, XZR, x19\n"
+ "ld1w { z26.s }, p0/Z, [x22, #5, MUL VL]\n"
+ "decw x19\n"
+ "whilelt p0.s, XZR, x19\n"
+ "ld1w { z25.s }, p0/Z, [x22, #6, MUL VL]\n"
+ "decw x19\n"
+ "whilelt p0.s, XZR, x19\n"
+ "ld1w { z24.s }, p0/Z, [x22, #7, MUL VL]\n"
+ "decw x19\n"
+ "whilelt p0.s, XZR, x19\n"
+ "decw x19\n"
+ "whilelt p6.s, XZR, x19\n"
+ "decw x19\n"
+ "whilelt p5.s, XZR, x19\n"
+ "decw x19\n"
+ "whilelt p4.s, XZR, x19\n"
+ "decw x19\n"
+ "whilelt p3.s, XZR, x19\n"
+ "decw x19\n"
+ "whilelt p2.s, XZR, x19\n"
+ "decw x19\n"
+ "whilelt p1.s, XZR, x19\n"
+ "decw x19\n"
+ "addvl x22, x22, #16\n"
+ "ld1w { z23.s }, p0/Z, [x22, #-8, MUL VL]\n"
+ "whilelt p0.s, XZR, x19\n"
+ "mov x19, x21\n"
+ "ld1w { z22.s }, p6/Z, [x22, #-7, MUL VL]\n"
+ "decw x20, ALL, MUL #16\n"
+ "ld1w { z21.s }, p5/Z, [x22, #-6, MUL VL]\n"
+ "cmp x20, #0x0\n"
+ "ld1w { z20.s }, p4/Z, [x22, #-5, MUL VL]\n"
+ "add x21, x21, %x[out_stride]\n"
+ "ld1w { z19.s }, p3/Z, [x22, #-4, MUL VL]\n"
+ "ld1w { z18.s }, p2/Z, [x22, #-3, MUL VL]\n"
+ "ld1w { z17.s }, p1/Z, [x22, #-2, MUL VL]\n"
+ "ld1w { z16.s }, p0/Z, [x22, #-1, MUL VL]\n"
+ "st1w { z31.s }, p7, [x19]\n"
+ "st1w { z30.s }, p7, [x19, #1, MUL VL]\n"
+ "st1w { z29.s }, p7, [x19, #2, MUL VL]\n"
+ "st1w { z28.s }, p7, [x19, #3, MUL VL]\n"
+ "st1w { z27.s }, p7, [x19, #4, MUL VL]\n"
+ "st1w { z26.s }, p7, [x19, #5, MUL VL]\n"
+ "st1w { z25.s }, p7, [x19, #6, MUL VL]\n"
+ "st1w { z24.s }, p7, [x19, #7, MUL VL]\n"
+ "addvl x19, x19, #16\n"
+ "st1w { z23.s }, p7, [x19, #-8, MUL VL]\n"
+ "st1w { z22.s }, p7, [x19, #-7, MUL VL]\n"
+ "st1w { z21.s }, p7, [x19, #-6, MUL VL]\n"
+ "st1w { z20.s }, p7, [x19, #-5, MUL VL]\n"
+ "st1w { z19.s }, p7, [x19, #-4, MUL VL]\n"
+ "st1w { z18.s }, p7, [x19, #-3, MUL VL]\n"
+ "st1w { z17.s }, p7, [x19, #-2, MUL VL]\n"
+ "st1w { z16.s }, p7, [x19, #-1, MUL VL]\n"
"bgt 2b\n"
"3:" // Main row loop: Column loop skip
"cmp %x[height], #0x1\n"
@@ -121,7 +121,7 @@ void sme_transpose_interleave_16VL(uint32_t *out, const uint32_t *in, size_t wid
".inst 0xd503467f // SMSTOP\n"
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x20", "x21", "x22", "x23", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x19", "x20", "x21", "x22", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_16VL_1x4.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_16VL_1x4.hpp
index 6b9b471fdc..fc3c4ab0fc 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_16VL_1x4.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_16VL_1x4.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#pragma once
@@ -43,101 +43,101 @@ void sme_transpose_interleave_16VL_1x4(uint8_t *out, const uint8_t *in, size_t w
".inst 0xd503477f // SMSTART ZA\n"
"ptrue p4.b\n"
"1:" // Main row loop: Head
- "mov x26, %x[in]\n"
- "add x25, x26, %x[in_stride]\n"
+ "mov x25, %x[in]\n"
"add x24, x25, %x[in_stride]\n"
"add x23, x24, %x[in_stride]\n"
+ "add x22, x23, %x[in_stride]\n"
"cmp %x[height], #0x3\n"
- "add %x[in], x23, %x[in_stride]\n"
- "csel x23, x23, %x[pad_row], GT\n"
- "csel x24, x24, %x[pad_row], GE\n"
+ "add %x[in], x22, %x[in_stride]\n"
+ "csel x22, x22, %x[pad_row], GT\n"
+ "csel x23, x23, %x[pad_row], GE\n"
"cmp %x[height], #0x1\n"
- "mov x22, %x[out]\n"
- "csel x25, x25, %x[pad_row], GT\n"
+ "mov x21, %x[out]\n"
+ "csel x24, x24, %x[pad_row], GT\n"
"sub %x[height], %x[height], #0x4\n"
- "mov x21, %x[width]\n"
+ "mov x20, %x[width]\n"
"2:" // Main row loop: Column loop
- "mov x20, x21\n"
- "whilelt p3.b, XZR, x20\n"
- "ld1b { z20.b }, p3/Z, [x26]\n"
- "decb x20\n"
- "whilelt p2.b, XZR, x20\n"
- "ld1b { z18.b }, p2/Z, [x26, #1, MUL VL]\n"
- "decb x20\n"
- "whilelt p1.b, XZR, x20\n"
- "ld1b { z17.b }, p3/Z, [x25]\n"
- "decb x20\n"
- "whilelt p0.b, XZR, x20\n"
- "ld1b { z19.b }, p2/Z, [x25, #1, MUL VL]\n"
- "ld1b { z16.b }, p3/Z, [x24]\n"
+ "mov x19, x20\n"
+ "whilelt p3.b, XZR, x19\n"
+ "ld1b { z20.b }, p3/Z, [x25]\n"
+ "decb x19\n"
+ "whilelt p2.b, XZR, x19\n"
+ "ld1b { z18.b }, p2/Z, [x25, #1, MUL VL]\n"
+ "decb x19\n"
+ "whilelt p1.b, XZR, x19\n"
+ "ld1b { z17.b }, p3/Z, [x24]\n"
+ "decb x19\n"
+ "whilelt p0.b, XZR, x19\n"
+ "ld1b { z19.b }, p2/Z, [x24, #1, MUL VL]\n"
+ "ld1b { z16.b }, p3/Z, [x23]\n"
"zip1 z25.b, z20.b, z16.b\n"
"zip2 z24.b, z20.b, z16.b\n"
- "mov x20, x22\n"
- "ld1b { z16.b }, p2/Z, [x24, #1, MUL VL]\n"
+ "mov x19, x21\n"
+ "ld1b { z16.b }, p2/Z, [x23, #1, MUL VL]\n"
"zip1 z22.b, z18.b, z16.b\n"
"zip2 z21.b, z18.b, z16.b\n"
- "decw x21, ALL, MUL #16\n"
- "ld1b { z16.b }, p3/Z, [x23]\n"
+ "decw x20, ALL, MUL #16\n"
+ "ld1b { z16.b }, p3/Z, [x22]\n"
"zip1 z18.b, z17.b, z16.b\n"
"zip2 z17.b, z17.b, z16.b\n"
- "cmp x21, #0x0\n"
- "ld1b { z16.b }, p2/Z, [x23, #1, MUL VL]\n"
+ "cmp x20, #0x0\n"
+ "ld1b { z16.b }, p2/Z, [x22, #1, MUL VL]\n"
"zip1 z20.b, z19.b, z16.b\n"
"zip2 z16.b, z19.b, z16.b\n"
- "add x22, x22, %x[out_stride]\n"
- "ld1b { z19.b }, p1/Z, [x26, #2, MUL VL]\n"
+ "add x21, x21, %x[out_stride]\n"
+ "ld1b { z19.b }, p1/Z, [x25, #2, MUL VL]\n"
"zip1 z23.b, z25.b, z18.b\n"
"zip2 z0.b, z25.b, z18.b\n"
- "ld1b { z18.b }, p0/Z, [x26, #3, MUL VL]\n"
+ "ld1b { z18.b }, p0/Z, [x25, #3, MUL VL]\n"
"zip1 z31.b, z24.b, z17.b\n"
"zip2 z30.b, z24.b, z17.b\n"
- "addvl x26, x26, #4\n"
- "ld1b { z17.b }, p1/Z, [x25, #2, MUL VL]\n"
+ "addvl x25, x25, #4\n"
+ "ld1b { z17.b }, p1/Z, [x24, #2, MUL VL]\n"
"zip1 z29.b, z22.b, z20.b\n"
"zip2 z28.b, z22.b, z20.b\n"
- "ld1b { z22.b }, p0/Z, [x25, #3, MUL VL]\n"
+ "ld1b { z22.b }, p0/Z, [x24, #3, MUL VL]\n"
"zip1 z27.b, z21.b, z16.b\n"
"zip2 z26.b, z21.b, z16.b\n"
- "addvl x25, x25, #4\n"
- "ld1b { z16.b }, p1/Z, [x24, #2, MUL VL]\n"
+ "addvl x24, x24, #4\n"
+ "ld1b { z16.b }, p1/Z, [x23, #2, MUL VL]\n"
"zip1 z21.b, z19.b, z16.b\n"
"zip2 z20.b, z19.b, z16.b\n"
- "ld1b { z16.b }, p0/Z, [x24, #3, MUL VL]\n"
+ "ld1b { z16.b }, p0/Z, [x23, #3, MUL VL]\n"
"zip1 z25.b, z18.b, z16.b\n"
"zip2 z24.b, z18.b, z16.b\n"
- "addvl x24, x24, #4\n"
- "ld1b { z16.b }, p1/Z, [x23, #2, MUL VL]\n"
+ "addvl x23, x23, #4\n"
+ "ld1b { z16.b }, p1/Z, [x22, #2, MUL VL]\n"
"zip1 z19.b, z17.b, z16.b\n"
"zip2 z18.b, z17.b, z16.b\n"
- "ld1b { z16.b }, p0/Z, [x23, #3, MUL VL]\n"
+ "ld1b { z16.b }, p0/Z, [x22, #3, MUL VL]\n"
"zip1 z17.b, z22.b, z16.b\n"
"zip2 z16.b, z22.b, z16.b\n"
- "addvl x23, x23, #4\n"
- "st1b { z23.b }, p4, [x20]\n"
+ "addvl x22, x22, #4\n"
+ "st1b { z23.b }, p4, [x19]\n"
"zip1 z23.b, z21.b, z19.b\n"
"zip2 z22.b, z21.b, z19.b\n"
- "st1b { z0.b }, p4, [x20, #1, MUL VL]\n"
+ "st1b { z0.b }, p4, [x19, #1, MUL VL]\n"
"zip1 z21.b, z20.b, z18.b\n"
"zip2 z20.b, z20.b, z18.b\n"
- "st1b { z31.b }, p4, [x20, #2, MUL VL]\n"
+ "st1b { z31.b }, p4, [x19, #2, MUL VL]\n"
"zip1 z19.b, z25.b, z17.b\n"
"zip2 z18.b, z25.b, z17.b\n"
- "st1b { z30.b }, p4, [x20, #3, MUL VL]\n"
+ "st1b { z30.b }, p4, [x19, #3, MUL VL]\n"
"zip1 z17.b, z24.b, z16.b\n"
"zip2 z16.b, z24.b, z16.b\n"
- "st1b { z29.b }, p4, [x20, #4, MUL VL]\n"
- "st1b { z28.b }, p4, [x20, #5, MUL VL]\n"
- "st1b { z27.b }, p4, [x20, #6, MUL VL]\n"
- "st1b { z26.b }, p4, [x20, #7, MUL VL]\n"
- "addvl x20, x20, #16\n"
- "st1b { z23.b }, p4, [x20, #-8, MUL VL]\n"
- "st1b { z22.b }, p4, [x20, #-7, MUL VL]\n"
- "st1b { z21.b }, p4, [x20, #-6, MUL VL]\n"
- "st1b { z20.b }, p4, [x20, #-5, MUL VL]\n"
- "st1b { z19.b }, p4, [x20, #-4, MUL VL]\n"
- "st1b { z18.b }, p4, [x20, #-3, MUL VL]\n"
- "st1b { z17.b }, p4, [x20, #-2, MUL VL]\n"
- "st1b { z16.b }, p4, [x20, #-1, MUL VL]\n"
+ "st1b { z29.b }, p4, [x19, #4, MUL VL]\n"
+ "st1b { z28.b }, p4, [x19, #5, MUL VL]\n"
+ "st1b { z27.b }, p4, [x19, #6, MUL VL]\n"
+ "st1b { z26.b }, p4, [x19, #7, MUL VL]\n"
+ "addvl x19, x19, #16\n"
+ "st1b { z23.b }, p4, [x19, #-8, MUL VL]\n"
+ "st1b { z22.b }, p4, [x19, #-7, MUL VL]\n"
+ "st1b { z21.b }, p4, [x19, #-6, MUL VL]\n"
+ "st1b { z20.b }, p4, [x19, #-5, MUL VL]\n"
+ "st1b { z19.b }, p4, [x19, #-4, MUL VL]\n"
+ "st1b { z18.b }, p4, [x19, #-3, MUL VL]\n"
+ "st1b { z17.b }, p4, [x19, #-2, MUL VL]\n"
+ "st1b { z16.b }, p4, [x19, #-1, MUL VL]\n"
"bgt 2b\n"
"3:" // Main row loop: Column loop skip
"cmp %x[height], #0x1\n"
@@ -146,7 +146,7 @@ void sme_transpose_interleave_16VL_1x4(uint8_t *out, const uint8_t *in, size_t w
".inst 0xd503467f // SMSTOP\n"
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [pad_row] "r" (pad_row), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_16VL_2x2.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_16VL_2x2.hpp
index 96128cf9c2..af1649fc8a 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_16VL_2x2.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_16VL_2x2.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#pragma once
@@ -43,86 +43,86 @@ void sme_transpose_interleave_16VL_2x2(uint16_t *out, const uint16_t *in, size_t
".inst 0xd503477f // SMSTART ZA\n"
"ptrue p5.b\n"
"1:" // Main row loop: Head
- "mov x24, %x[in]\n"
- "add x23, x24, %x[in_stride]\n"
+ "mov x23, %x[in]\n"
+ "add x22, x23, %x[in_stride]\n"
"cmp %x[height], #0x1\n"
- "add %x[in], x23, %x[in_stride]\n"
- "mov x22, %x[out]\n"
- "csel x23, x23, %x[pad_row], GT\n"
+ "add %x[in], x22, %x[in_stride]\n"
+ "mov x21, %x[out]\n"
+ "csel x22, x22, %x[pad_row], GT\n"
"sub %x[height], %x[height], #0x2\n"
- "mov x21, %x[width]\n"
+ "mov x20, %x[width]\n"
"2:" // Main row loop: Column loop
- "mov x20, x21\n"
- "whilelt p2.h, XZR, x20\n"
- "ld1h { z17.h }, p2/Z, [x24]\n"
- "dech x20\n"
- "whilelt p1.h, XZR, x20\n"
- "ld1h { z19.h }, p1/Z, [x24, #1, MUL VL]\n"
- "dech x20\n"
- "whilelt p0.h, XZR, x20\n"
- "ld1h { z21.h }, p0/Z, [x24, #2, MUL VL]\n"
- "dech x20\n"
- "whilelt p4.h, XZR, x20\n"
- "ld1h { z20.h }, p4/Z, [x24, #3, MUL VL]\n"
- "dech x20\n"
- "whilelt p3.h, XZR, x20\n"
- "ld1h { z16.h }, p2/Z, [x23]\n"
+ "mov x19, x20\n"
+ "whilelt p2.h, XZR, x19\n"
+ "ld1h { z17.h }, p2/Z, [x23]\n"
+ "dech x19\n"
+ "whilelt p1.h, XZR, x19\n"
+ "ld1h { z19.h }, p1/Z, [x23, #1, MUL VL]\n"
+ "dech x19\n"
+ "whilelt p0.h, XZR, x19\n"
+ "ld1h { z21.h }, p0/Z, [x23, #2, MUL VL]\n"
+ "dech x19\n"
+ "whilelt p4.h, XZR, x19\n"
+ "ld1h { z20.h }, p4/Z, [x23, #3, MUL VL]\n"
+ "dech x19\n"
+ "whilelt p3.h, XZR, x19\n"
+ "ld1h { z16.h }, p2/Z, [x22]\n"
"zip1 z0.h, z17.h, z16.h\n"
- "dech x20\n"
- "whilelt p2.h, XZR, x20\n"
- "ld1h { z18.h }, p1/Z, [x23, #1, MUL VL]\n"
+ "dech x19\n"
+ "whilelt p2.h, XZR, x19\n"
+ "ld1h { z18.h }, p1/Z, [x22, #1, MUL VL]\n"
"zip2 z31.h, z17.h, z16.h\n"
- "dech x20\n"
- "whilelt p1.h, XZR, x20\n"
- "ld1h { z17.h }, p0/Z, [x23, #2, MUL VL]\n"
+ "dech x19\n"
+ "whilelt p1.h, XZR, x19\n"
+ "ld1h { z17.h }, p0/Z, [x22, #2, MUL VL]\n"
"zip1 z30.h, z19.h, z18.h\n"
- "dech x20\n"
- "whilelt p0.h, XZR, x20\n"
- "ld1h { z16.h }, p4/Z, [x23, #3, MUL VL]\n"
+ "dech x19\n"
+ "whilelt p0.h, XZR, x19\n"
+ "ld1h { z16.h }, p4/Z, [x22, #3, MUL VL]\n"
"zip2 z29.h, z19.h, z18.h\n"
- "ld1h { z19.h }, p3/Z, [x24, #4, MUL VL]\n"
- "mov x20, x22\n"
- "decw x21, ALL, MUL #16\n"
+ "ld1h { z19.h }, p3/Z, [x23, #4, MUL VL]\n"
+ "mov x19, x21\n"
+ "decw x20, ALL, MUL #16\n"
"zip1 z28.h, z21.h, z17.h\n"
- "ld1h { z18.h }, p2/Z, [x24, #5, MUL VL]\n"
+ "ld1h { z18.h }, p2/Z, [x23, #5, MUL VL]\n"
"zip2 z27.h, z21.h, z17.h\n"
"zip1 z26.h, z20.h, z16.h\n"
- "cmp x21, #0x0\n"
- "ld1h { z17.h }, p1/Z, [x24, #6, MUL VL]\n"
+ "cmp x20, #0x0\n"
+ "ld1h { z17.h }, p1/Z, [x23, #6, MUL VL]\n"
"zip2 z25.h, z20.h, z16.h\n"
- "add x22, x22, %x[out_stride]\n"
- "ld1h { z24.h }, p0/Z, [x24, #7, MUL VL]\n"
- "addvl x24, x24, #8\n"
- "ld1h { z16.h }, p3/Z, [x23, #4, MUL VL]\n"
+ "add x21, x21, %x[out_stride]\n"
+ "ld1h { z24.h }, p0/Z, [x23, #7, MUL VL]\n"
+ "addvl x23, x23, #8\n"
+ "ld1h { z16.h }, p3/Z, [x22, #4, MUL VL]\n"
"zip1 z23.h, z19.h, z16.h\n"
"zip2 z22.h, z19.h, z16.h\n"
- "ld1h { z16.h }, p2/Z, [x23, #5, MUL VL]\n"
+ "ld1h { z16.h }, p2/Z, [x22, #5, MUL VL]\n"
"zip1 z21.h, z18.h, z16.h\n"
"zip2 z20.h, z18.h, z16.h\n"
- "ld1h { z16.h }, p1/Z, [x23, #6, MUL VL]\n"
+ "ld1h { z16.h }, p1/Z, [x22, #6, MUL VL]\n"
"zip1 z19.h, z17.h, z16.h\n"
"zip2 z18.h, z17.h, z16.h\n"
- "ld1h { z16.h }, p0/Z, [x23, #7, MUL VL]\n"
- "st1h { z0.h }, p5, [x20]\n"
- "addvl x23, x23, #8\n"
+ "ld1h { z16.h }, p0/Z, [x22, #7, MUL VL]\n"
+ "st1h { z0.h }, p5, [x19]\n"
+ "addvl x22, x22, #8\n"
"zip1 z17.h, z24.h, z16.h\n"
- "st1h { z31.h }, p5, [x20, #1, MUL VL]\n"
+ "st1h { z31.h }, p5, [x19, #1, MUL VL]\n"
"zip2 z16.h, z24.h, z16.h\n"
- "st1h { z30.h }, p5, [x20, #2, MUL VL]\n"
- "st1h { z29.h }, p5, [x20, #3, MUL VL]\n"
- "st1h { z28.h }, p5, [x20, #4, MUL VL]\n"
- "st1h { z27.h }, p5, [x20, #5, MUL VL]\n"
- "st1h { z26.h }, p5, [x20, #6, MUL VL]\n"
- "st1h { z25.h }, p5, [x20, #7, MUL VL]\n"
- "addvl x20, x20, #16\n"
- "st1h { z23.h }, p5, [x20, #-8, MUL VL]\n"
- "st1h { z22.h }, p5, [x20, #-7, MUL VL]\n"
- "st1h { z21.h }, p5, [x20, #-6, MUL VL]\n"
- "st1h { z20.h }, p5, [x20, #-5, MUL VL]\n"
- "st1h { z19.h }, p5, [x20, #-4, MUL VL]\n"
- "st1h { z18.h }, p5, [x20, #-3, MUL VL]\n"
- "st1h { z17.h }, p5, [x20, #-2, MUL VL]\n"
- "st1h { z16.h }, p5, [x20, #-1, MUL VL]\n"
+ "st1h { z30.h }, p5, [x19, #2, MUL VL]\n"
+ "st1h { z29.h }, p5, [x19, #3, MUL VL]\n"
+ "st1h { z28.h }, p5, [x19, #4, MUL VL]\n"
+ "st1h { z27.h }, p5, [x19, #5, MUL VL]\n"
+ "st1h { z26.h }, p5, [x19, #6, MUL VL]\n"
+ "st1h { z25.h }, p5, [x19, #7, MUL VL]\n"
+ "addvl x19, x19, #16\n"
+ "st1h { z23.h }, p5, [x19, #-8, MUL VL]\n"
+ "st1h { z22.h }, p5, [x19, #-7, MUL VL]\n"
+ "st1h { z21.h }, p5, [x19, #-6, MUL VL]\n"
+ "st1h { z20.h }, p5, [x19, #-5, MUL VL]\n"
+ "st1h { z19.h }, p5, [x19, #-4, MUL VL]\n"
+ "st1h { z18.h }, p5, [x19, #-3, MUL VL]\n"
+ "st1h { z17.h }, p5, [x19, #-2, MUL VL]\n"
+ "st1h { z16.h }, p5, [x19, #-1, MUL VL]\n"
"bgt 2b\n"
"3:" // Main row loop: Column loop skip
"cmp %x[height], #0x1\n"
@@ -131,7 +131,7 @@ void sme_transpose_interleave_16VL_2x2(uint16_t *out, const uint16_t *in, size_t
".inst 0xd503467f // SMSTOP\n"
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [pad_row] "r" (pad_row), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x20", "x21", "x22", "x23", "x24", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x19", "x20", "x21", "x22", "x23", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_16VL_2x2_fp32bf16.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_16VL_2x2_fp32bf16.hpp
index 080db1c5c1..11c3bcc253 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_16VL_2x2_fp32bf16.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_16VL_2x2_fp32bf16.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#pragma once
@@ -43,134 +43,134 @@ void sme_transpose_interleave_16VL_2x2_fp32bf16(bfloat16 *out, const float *in,
".inst 0xd503477f // SMSTART ZA\n"
"ptrue p7.b\n"
"1:" // Main row loop: Head
- "mov x25, %x[in]\n"
- "add x24, x25, %x[in_stride]\n"
+ "mov x24, %x[in]\n"
+ "add x23, x24, %x[in_stride]\n"
"cmp %x[height], #0x1\n"
- "add %x[in], x24, %x[in_stride]\n"
- "mov x23, %x[out]\n"
- "csel x24, x24, %x[pad_row], GT\n"
+ "add %x[in], x23, %x[in_stride]\n"
+ "mov x22, %x[out]\n"
+ "csel x23, x23, %x[pad_row], GT\n"
"sub %x[height], %x[height], #0x2\n"
- "mov x22, %x[width]\n"
+ "mov x21, %x[width]\n"
"2:" // Main row loop: Column loop
- "mov x21, x22\n"
- "whilelt p1.s, XZR, x21\n"
- "ld1w { z16.s }, p1/Z, [x25]\n"
+ "mov x20, x21\n"
+ "whilelt p1.s, XZR, x20\n"
+ "ld1w { z16.s }, p1/Z, [x24]\n"
".inst 0x658abe00 // bfcvt z0.h, p7/M, z16.s\n"
- "decw x21\n"
- "whilelt p0.s, XZR, x21\n"
- "ld1w { z16.s }, p0/Z, [x25, #1, MUL VL]\n"
+ "decw x20\n"
+ "whilelt p0.s, XZR, x20\n"
+ "ld1w { z16.s }, p0/Z, [x24, #1, MUL VL]\n"
".inst 0x658abe1f // bfcvt z31.h, p7/M, z16.s\n"
- "decw x21\n"
- "whilelt p6.s, XZR, x21\n"
- "ld1w { z16.s }, p6/Z, [x25, #2, MUL VL]\n"
+ "decw x20\n"
+ "whilelt p6.s, XZR, x20\n"
+ "ld1w { z16.s }, p6/Z, [x24, #2, MUL VL]\n"
".inst 0x658abe1e // bfcvt z30.h, p7/M, z16.s\n"
- "decw x21\n"
- "whilelt p5.s, XZR, x21\n"
- "ld1w { z16.s }, p5/Z, [x25, #3, MUL VL]\n"
+ "decw x20\n"
+ "whilelt p5.s, XZR, x20\n"
+ "ld1w { z16.s }, p5/Z, [x24, #3, MUL VL]\n"
".inst 0x658abe1d // bfcvt z29.h, p7/M, z16.s\n"
- "decw x21\n"
- "whilelt p4.s, XZR, x21\n"
- "ld1w { z16.s }, p4/Z, [x25, #4, MUL VL]\n"
+ "decw x20\n"
+ "whilelt p4.s, XZR, x20\n"
+ "ld1w { z16.s }, p4/Z, [x24, #4, MUL VL]\n"
".inst 0x658abe1c // bfcvt z28.h, p7/M, z16.s\n"
- "decw x21\n"
- "whilelt p3.s, XZR, x21\n"
- "ld1w { z16.s }, p3/Z, [x25, #5, MUL VL]\n"
+ "decw x20\n"
+ "whilelt p3.s, XZR, x20\n"
+ "ld1w { z16.s }, p3/Z, [x24, #5, MUL VL]\n"
".inst 0x658abe1b // bfcvt z27.h, p7/M, z16.s\n"
- "decw x21\n"
- "whilelt p2.s, XZR, x21\n"
- "ld1w { z16.s }, p2/Z, [x25, #6, MUL VL]\n"
+ "decw x20\n"
+ "whilelt p2.s, XZR, x20\n"
+ "ld1w { z16.s }, p2/Z, [x24, #6, MUL VL]\n"
".inst 0x658abe1a // bfcvt z26.h, p7/M, z16.s\n"
- "decw x21\n"
- "ld1w { z16.s }, p1/Z, [x24]\n"
- "whilelt p1.s, XZR, x21\n"
+ "decw x20\n"
+ "ld1w { z16.s }, p1/Z, [x23]\n"
+ "whilelt p1.s, XZR, x20\n"
".inst 0x648abe00 // bfcvtnt z0.h, p7/M, z16.s\n"
- "decw x21\n"
- "ld1w { z16.s }, p1/Z, [x25, #7, MUL VL]\n"
- "addvl x25, x25, #16\n"
+ "decw x20\n"
+ "ld1w { z16.s }, p1/Z, [x24, #7, MUL VL]\n"
+ "addvl x24, x24, #16\n"
".inst 0x658abe19 // bfcvt z25.h, p7/M, z16.s\n"
- "ld1w { z16.s }, p0/Z, [x24, #1, MUL VL]\n"
- "whilelt p0.s, XZR, x21\n"
- "decw x21\n"
+ "ld1w { z16.s }, p0/Z, [x23, #1, MUL VL]\n"
+ "whilelt p0.s, XZR, x20\n"
+ "decw x20\n"
".inst 0x648abe1f // bfcvtnt z31.h, p7/M, z16.s\n"
- "ld1w { z16.s }, p0/Z, [x25, #-8, MUL VL]\n"
+ "ld1w { z16.s }, p0/Z, [x24, #-8, MUL VL]\n"
".inst 0x658abe18 // bfcvt z24.h, p7/M, z16.s\n"
- "mov x20, x23\n"
- "decw x22, ALL, MUL #16\n"
- "ld1w { z16.s }, p6/Z, [x24, #2, MUL VL]\n"
- "whilelt p6.s, XZR, x21\n"
- "decw x21\n"
+ "mov x19, x22\n"
+ "decw x21, ALL, MUL #16\n"
+ "ld1w { z16.s }, p6/Z, [x23, #2, MUL VL]\n"
+ "whilelt p6.s, XZR, x20\n"
+ "decw x20\n"
".inst 0x648abe1e // bfcvtnt z30.h, p7/M, z16.s\n"
- "ld1w { z16.s }, p6/Z, [x25, #-7, MUL VL]\n"
+ "ld1w { z16.s }, p6/Z, [x24, #-7, MUL VL]\n"
".inst 0x658abe17 // bfcvt z23.h, p7/M, z16.s\n"
- "add x23, x23, %x[out_stride]\n"
- "ld1w { z16.s }, p5/Z, [x24, #3, MUL VL]\n"
- "whilelt p5.s, XZR, x21\n"
- "decw x21\n"
+ "add x22, x22, %x[out_stride]\n"
+ "ld1w { z16.s }, p5/Z, [x23, #3, MUL VL]\n"
+ "whilelt p5.s, XZR, x20\n"
+ "decw x20\n"
".inst 0x648abe1d // bfcvtnt z29.h, p7/M, z16.s\n"
- "ld1w { z16.s }, p5/Z, [x25, #-6, MUL VL]\n"
+ "ld1w { z16.s }, p5/Z, [x24, #-6, MUL VL]\n"
".inst 0x658abe16 // bfcvt z22.h, p7/M, z16.s\n"
- "ld1w { z16.s }, p4/Z, [x24, #4, MUL VL]\n"
- "whilelt p4.s, XZR, x21\n"
- "decw x21\n"
+ "ld1w { z16.s }, p4/Z, [x23, #4, MUL VL]\n"
+ "whilelt p4.s, XZR, x20\n"
+ "decw x20\n"
".inst 0x648abe1c // bfcvtnt z28.h, p7/M, z16.s\n"
- "ld1w { z16.s }, p4/Z, [x25, #-5, MUL VL]\n"
+ "ld1w { z16.s }, p4/Z, [x24, #-5, MUL VL]\n"
".inst 0x658abe15 // bfcvt z21.h, p7/M, z16.s\n"
- "ld1w { z16.s }, p3/Z, [x24, #5, MUL VL]\n"
- "whilelt p3.s, XZR, x21\n"
- "decw x21\n"
+ "ld1w { z16.s }, p3/Z, [x23, #5, MUL VL]\n"
+ "whilelt p3.s, XZR, x20\n"
+ "decw x20\n"
".inst 0x648abe1b // bfcvtnt z27.h, p7/M, z16.s\n"
- "ld1w { z16.s }, p3/Z, [x25, #-4, MUL VL]\n"
+ "ld1w { z16.s }, p3/Z, [x24, #-4, MUL VL]\n"
".inst 0x658abe14 // bfcvt z20.h, p7/M, z16.s\n"
- "ld1w { z16.s }, p2/Z, [x24, #6, MUL VL]\n"
- "whilelt p2.s, XZR, x21\n"
- "decw x21\n"
+ "ld1w { z16.s }, p2/Z, [x23, #6, MUL VL]\n"
+ "whilelt p2.s, XZR, x20\n"
+ "decw x20\n"
".inst 0x648abe1a // bfcvtnt z26.h, p7/M, z16.s\n"
- "ld1w { z16.s }, p2/Z, [x25, #-3, MUL VL]\n"
+ "ld1w { z16.s }, p2/Z, [x24, #-3, MUL VL]\n"
".inst 0x658abe13 // bfcvt z19.h, p7/M, z16.s\n"
- "ld1w { z16.s }, p1/Z, [x24, #7, MUL VL]\n"
- "whilelt p1.s, XZR, x21\n"
- "decw x21\n"
+ "ld1w { z16.s }, p1/Z, [x23, #7, MUL VL]\n"
+ "whilelt p1.s, XZR, x20\n"
+ "decw x20\n"
".inst 0x648abe19 // bfcvtnt z25.h, p7/M, z16.s\n"
- "ld1w { z16.s }, p1/Z, [x25, #-2, MUL VL]\n"
- "addvl x24, x24, #16\n"
+ "ld1w { z16.s }, p1/Z, [x24, #-2, MUL VL]\n"
+ "addvl x23, x23, #16\n"
".inst 0x658abe12 // bfcvt z18.h, p7/M, z16.s\n"
- "ld1w { z16.s }, p0/Z, [x24, #-8, MUL VL]\n"
- "whilelt p0.s, XZR, x21\n"
- "cmp x22, #0x0\n"
+ "ld1w { z16.s }, p0/Z, [x23, #-8, MUL VL]\n"
+ "whilelt p0.s, XZR, x20\n"
+ "cmp x21, #0x0\n"
".inst 0x648abe18 // bfcvtnt z24.h, p7/M, z16.s\n"
- "ld1w { z16.s }, p0/Z, [x25, #-1, MUL VL]\n"
+ "ld1w { z16.s }, p0/Z, [x24, #-1, MUL VL]\n"
".inst 0x658abe11 // bfcvt z17.h, p7/M, z16.s\n"
- "ld1w { z16.s }, p6/Z, [x24, #-7, MUL VL]\n"
+ "ld1w { z16.s }, p6/Z, [x23, #-7, MUL VL]\n"
".inst 0x648abe17 // bfcvtnt z23.h, p7/M, z16.s\n"
- "ld1w { z16.s }, p5/Z, [x24, #-6, MUL VL]\n"
+ "ld1w { z16.s }, p5/Z, [x23, #-6, MUL VL]\n"
".inst 0x648abe16 // bfcvtnt z22.h, p7/M, z16.s\n"
- "ld1w { z16.s }, p4/Z, [x24, #-5, MUL VL]\n"
+ "ld1w { z16.s }, p4/Z, [x23, #-5, MUL VL]\n"
".inst 0x648abe15 // bfcvtnt z21.h, p7/M, z16.s\n"
- "ld1w { z16.s }, p3/Z, [x24, #-4, MUL VL]\n"
+ "ld1w { z16.s }, p3/Z, [x23, #-4, MUL VL]\n"
".inst 0x648abe14 // bfcvtnt z20.h, p7/M, z16.s\n"
- "ld1w { z16.s }, p2/Z, [x24, #-3, MUL VL]\n"
+ "ld1w { z16.s }, p2/Z, [x23, #-3, MUL VL]\n"
".inst 0x648abe13 // bfcvtnt z19.h, p7/M, z16.s\n"
- "ld1w { z16.s }, p1/Z, [x24, #-2, MUL VL]\n"
+ "ld1w { z16.s }, p1/Z, [x23, #-2, MUL VL]\n"
".inst 0x648abe12 // bfcvtnt z18.h, p7/M, z16.s\n"
- "ld1w { z16.s }, p0/Z, [x24, #-1, MUL VL]\n"
- "st1h { z0.h }, p7, [x20]\n"
+ "ld1w { z16.s }, p0/Z, [x23, #-1, MUL VL]\n"
+ "st1h { z0.h }, p7, [x19]\n"
".inst 0x648abe11 // bfcvtnt z17.h, p7/M, z16.s\n"
- "st1h { z31.h }, p7, [x20, #1, MUL VL]\n"
- "st1h { z30.h }, p7, [x20, #2, MUL VL]\n"
- "st1h { z29.h }, p7, [x20, #3, MUL VL]\n"
- "st1h { z28.h }, p7, [x20, #4, MUL VL]\n"
- "st1h { z27.h }, p7, [x20, #5, MUL VL]\n"
- "st1h { z26.h }, p7, [x20, #6, MUL VL]\n"
- "st1h { z25.h }, p7, [x20, #7, MUL VL]\n"
- "addvl x20, x20, #16\n"
- "st1h { z24.h }, p7, [x20, #-8, MUL VL]\n"
- "st1h { z23.h }, p7, [x20, #-7, MUL VL]\n"
- "st1h { z22.h }, p7, [x20, #-6, MUL VL]\n"
- "st1h { z21.h }, p7, [x20, #-5, MUL VL]\n"
- "st1h { z20.h }, p7, [x20, #-4, MUL VL]\n"
- "st1h { z19.h }, p7, [x20, #-3, MUL VL]\n"
- "st1h { z18.h }, p7, [x20, #-2, MUL VL]\n"
- "st1h { z17.h }, p7, [x20, #-1, MUL VL]\n"
+ "st1h { z31.h }, p7, [x19, #1, MUL VL]\n"
+ "st1h { z30.h }, p7, [x19, #2, MUL VL]\n"
+ "st1h { z29.h }, p7, [x19, #3, MUL VL]\n"
+ "st1h { z28.h }, p7, [x19, #4, MUL VL]\n"
+ "st1h { z27.h }, p7, [x19, #5, MUL VL]\n"
+ "st1h { z26.h }, p7, [x19, #6, MUL VL]\n"
+ "st1h { z25.h }, p7, [x19, #7, MUL VL]\n"
+ "addvl x19, x19, #16\n"
+ "st1h { z24.h }, p7, [x19, #-8, MUL VL]\n"
+ "st1h { z23.h }, p7, [x19, #-7, MUL VL]\n"
+ "st1h { z22.h }, p7, [x19, #-6, MUL VL]\n"
+ "st1h { z21.h }, p7, [x19, #-5, MUL VL]\n"
+ "st1h { z20.h }, p7, [x19, #-4, MUL VL]\n"
+ "st1h { z19.h }, p7, [x19, #-3, MUL VL]\n"
+ "st1h { z18.h }, p7, [x19, #-2, MUL VL]\n"
+ "st1h { z17.h }, p7, [x19, #-1, MUL VL]\n"
"bgt 2b\n"
"3:" // Main row loop: Column loop skip
"cmp %x[height], #0x1\n"
@@ -179,7 +179,7 @@ void sme_transpose_interleave_16VL_2x2_fp32bf16(bfloat16 *out, const float *in,
".inst 0xd503467f // SMSTOP\n"
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [pad_row] "r" (pad_row), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x20", "x21", "x22", "x23", "x24", "x25", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x19", "x20", "x21", "x22", "x23", "x24", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_1VL.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_1VL.hpp
index 7e496095f4..3e0da36359 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_1VL.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_1VL.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#pragma once
@@ -39,80 +39,80 @@ void sme_transpose_interleave_1VL(uint16_t *out, const uint16_t *in, size_t widt
"ptrue p1.b\n"
"blt 6f\n"
"1:" // Main row loop: Head
- "mov x26, %x[in]\n"
- "add x25, x26, %x[in_stride]\n"
+ "mov x25, %x[in]\n"
"add x24, x25, %x[in_stride]\n"
- "mov x23, %x[width]\n"
- "cnth x21, ALL, MUL #4\n"
- "add x20, x24, %x[in_stride]\n"
- "cmp x23, x21\n"
- "add %x[in], x20, %x[in_stride]\n"
- "mov x22, %x[out]\n"
+ "add x23, x24, %x[in_stride]\n"
+ "mov x22, %x[width]\n"
+ "cnth x20, ALL, MUL #4\n"
+ "add x19, x23, %x[in_stride]\n"
+ "cmp x22, x20\n"
+ "add %x[in], x19, %x[in_stride]\n"
+ "mov x21, %x[out]\n"
"sub %x[height], %x[height], #0x4\n"
"blt 3f\n"
"2:" // Main row loop: Unroll column loop
- "ld1h { z31.h }, p1/Z, [x26]\n"
- "sub x23, x23, x21\n"
- "cmp x23, x21\n"
- "ld1h { z30.h }, p1/Z, [x26, #1, MUL VL]\n"
- "ld1h { z29.h }, p1/Z, [x26, #2, MUL VL]\n"
- "ld1h { z28.h }, p1/Z, [x26, #3, MUL VL]\n"
- "addvl x26, x26, #4\n"
- "ld1h { z27.h }, p1/Z, [x25]\n"
- "ld1h { z26.h }, p1/Z, [x25, #1, MUL VL]\n"
- "ld1h { z25.h }, p1/Z, [x25, #2, MUL VL]\n"
- "ld1h { z24.h }, p1/Z, [x25, #3, MUL VL]\n"
+ "ld1h { z31.h }, p1/Z, [x25]\n"
+ "sub x22, x22, x20\n"
+ "cmp x22, x20\n"
+ "ld1h { z30.h }, p1/Z, [x25, #1, MUL VL]\n"
+ "ld1h { z29.h }, p1/Z, [x25, #2, MUL VL]\n"
+ "ld1h { z28.h }, p1/Z, [x25, #3, MUL VL]\n"
"addvl x25, x25, #4\n"
- "ld1h { z23.h }, p1/Z, [x24]\n"
- "ld1h { z22.h }, p1/Z, [x24, #1, MUL VL]\n"
- "ld1h { z21.h }, p1/Z, [x24, #2, MUL VL]\n"
- "ld1h { z20.h }, p1/Z, [x24, #3, MUL VL]\n"
+ "ld1h { z27.h }, p1/Z, [x24]\n"
+ "ld1h { z26.h }, p1/Z, [x24, #1, MUL VL]\n"
+ "ld1h { z25.h }, p1/Z, [x24, #2, MUL VL]\n"
+ "ld1h { z24.h }, p1/Z, [x24, #3, MUL VL]\n"
"addvl x24, x24, #4\n"
- "ld1h { z19.h }, p1/Z, [x20]\n"
- "ld1h { z18.h }, p1/Z, [x20, #1, MUL VL]\n"
- "ld1h { z17.h }, p1/Z, [x20, #2, MUL VL]\n"
- "ld1h { z16.h }, p1/Z, [x20, #3, MUL VL]\n"
- "st1h { z31.h }, p1, [x22]\n"
- "addvl x20, x20, #4\n"
- "st1h { z27.h }, p1, [x22, #1, MUL VL]\n"
- "st1h { z23.h }, p1, [x22, #2, MUL VL]\n"
- "st1h { z19.h }, p1, [x22, #3, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
- "st1h { z30.h }, p1, [x22]\n"
- "st1h { z26.h }, p1, [x22, #1, MUL VL]\n"
- "st1h { z22.h }, p1, [x22, #2, MUL VL]\n"
- "st1h { z18.h }, p1, [x22, #3, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
- "st1h { z29.h }, p1, [x22]\n"
- "st1h { z25.h }, p1, [x22, #1, MUL VL]\n"
- "st1h { z21.h }, p1, [x22, #2, MUL VL]\n"
- "st1h { z17.h }, p1, [x22, #3, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
- "st1h { z28.h }, p1, [x22]\n"
- "st1h { z24.h }, p1, [x22, #1, MUL VL]\n"
- "st1h { z20.h }, p1, [x22, #2, MUL VL]\n"
- "st1h { z16.h }, p1, [x22, #3, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
+ "ld1h { z23.h }, p1/Z, [x23]\n"
+ "ld1h { z22.h }, p1/Z, [x23, #1, MUL VL]\n"
+ "ld1h { z21.h }, p1/Z, [x23, #2, MUL VL]\n"
+ "ld1h { z20.h }, p1/Z, [x23, #3, MUL VL]\n"
+ "addvl x23, x23, #4\n"
+ "ld1h { z19.h }, p1/Z, [x19]\n"
+ "ld1h { z18.h }, p1/Z, [x19, #1, MUL VL]\n"
+ "ld1h { z17.h }, p1/Z, [x19, #2, MUL VL]\n"
+ "ld1h { z16.h }, p1/Z, [x19, #3, MUL VL]\n"
+ "st1h { z31.h }, p1, [x21]\n"
+ "addvl x19, x19, #4\n"
+ "st1h { z27.h }, p1, [x21, #1, MUL VL]\n"
+ "st1h { z23.h }, p1, [x21, #2, MUL VL]\n"
+ "st1h { z19.h }, p1, [x21, #3, MUL VL]\n"
+ "add x21, x21, %x[out_stride]\n"
+ "st1h { z30.h }, p1, [x21]\n"
+ "st1h { z26.h }, p1, [x21, #1, MUL VL]\n"
+ "st1h { z22.h }, p1, [x21, #2, MUL VL]\n"
+ "st1h { z18.h }, p1, [x21, #3, MUL VL]\n"
+ "add x21, x21, %x[out_stride]\n"
+ "st1h { z29.h }, p1, [x21]\n"
+ "st1h { z25.h }, p1, [x21, #1, MUL VL]\n"
+ "st1h { z21.h }, p1, [x21, #2, MUL VL]\n"
+ "st1h { z17.h }, p1, [x21, #3, MUL VL]\n"
+ "add x21, x21, %x[out_stride]\n"
+ "st1h { z28.h }, p1, [x21]\n"
+ "st1h { z24.h }, p1, [x21, #1, MUL VL]\n"
+ "st1h { z20.h }, p1, [x21, #2, MUL VL]\n"
+ "st1h { z16.h }, p1, [x21, #3, MUL VL]\n"
+ "add x21, x21, %x[out_stride]\n"
"bge 2b\n"
"3:" // Main row loop: Unroll column loop skip
- "cbz x23, 5f\n"
+ "cbz x22, 5f\n"
"4:" // Main row loop: Column loop
- "whilelt p0.h, XZR, x23\n"
- "dech x23\n"
- "ld1h { z19.h }, p0/Z, [x26]\n"
- "cmp x23, #0x0\n"
- "addvl x26, x26, #1\n"
- "ld1h { z18.h }, p0/Z, [x25]\n"
+ "whilelt p0.h, XZR, x22\n"
+ "dech x22\n"
+ "ld1h { z19.h }, p0/Z, [x25]\n"
+ "cmp x22, #0x0\n"
"addvl x25, x25, #1\n"
- "ld1h { z17.h }, p0/Z, [x24]\n"
+ "ld1h { z18.h }, p0/Z, [x24]\n"
"addvl x24, x24, #1\n"
- "ld1h { z16.h }, p0/Z, [x20]\n"
- "addvl x20, x20, #1\n"
- "st1h { z19.h }, p1, [x22]\n"
- "st1h { z18.h }, p1, [x22, #1, MUL VL]\n"
- "st1h { z17.h }, p1, [x22, #2, MUL VL]\n"
- "st1h { z16.h }, p1, [x22, #3, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
+ "ld1h { z17.h }, p0/Z, [x23]\n"
+ "addvl x23, x23, #1\n"
+ "ld1h { z16.h }, p0/Z, [x19]\n"
+ "addvl x19, x19, #1\n"
+ "st1h { z19.h }, p1, [x21]\n"
+ "st1h { z18.h }, p1, [x21, #1, MUL VL]\n"
+ "st1h { z17.h }, p1, [x21, #2, MUL VL]\n"
+ "st1h { z16.h }, p1, [x21, #3, MUL VL]\n"
+ "add x21, x21, %x[out_stride]\n"
"bgt 4b\n"
"5:" // Main row loop: Column loop skip
"cmp %x[height], #0x4\n"
@@ -121,41 +121,41 @@ void sme_transpose_interleave_1VL(uint16_t *out, const uint16_t *in, size_t widt
"cbz %x[height], 12f\n"
"6:" // Main loop skip
"7:" // Tail row loop: Head
- "mov x21, %x[width]\n"
- "cnth x20, ALL, MUL #4\n"
- "mov x26, %x[in]\n"
- "cmp x21, x20\n"
- "add %x[in], x26, %x[in_stride]\n"
- "mov x22, %x[out]\n"
+ "mov x20, %x[width]\n"
+ "cnth x19, ALL, MUL #4\n"
+ "mov x25, %x[in]\n"
+ "cmp x20, x19\n"
+ "add %x[in], x25, %x[in_stride]\n"
+ "mov x21, %x[out]\n"
"sub %x[height], %x[height], #0x1\n"
"blt 9f\n"
"8:" // Tail row loop: Unroll column loop
- "ld1h { z19.h }, p1/Z, [x26]\n"
- "sub x21, x21, x20\n"
- "cmp x21, x20\n"
- "ld1h { z18.h }, p1/Z, [x26, #1, MUL VL]\n"
- "ld1h { z17.h }, p1/Z, [x26, #2, MUL VL]\n"
- "ld1h { z16.h }, p1/Z, [x26, #3, MUL VL]\n"
- "st1h { z19.h }, p1, [x22]\n"
- "add x22, x22, %x[out_stride]\n"
- "addvl x26, x26, #4\n"
- "st1h { z18.h }, p1, [x22]\n"
- "add x22, x22, %x[out_stride]\n"
- "st1h { z17.h }, p1, [x22]\n"
- "add x22, x22, %x[out_stride]\n"
- "st1h { z16.h }, p1, [x22]\n"
- "add x22, x22, %x[out_stride]\n"
+ "ld1h { z19.h }, p1/Z, [x25]\n"
+ "sub x20, x20, x19\n"
+ "cmp x20, x19\n"
+ "ld1h { z18.h }, p1/Z, [x25, #1, MUL VL]\n"
+ "ld1h { z17.h }, p1/Z, [x25, #2, MUL VL]\n"
+ "ld1h { z16.h }, p1/Z, [x25, #3, MUL VL]\n"
+ "st1h { z19.h }, p1, [x21]\n"
+ "add x21, x21, %x[out_stride]\n"
+ "addvl x25, x25, #4\n"
+ "st1h { z18.h }, p1, [x21]\n"
+ "add x21, x21, %x[out_stride]\n"
+ "st1h { z17.h }, p1, [x21]\n"
+ "add x21, x21, %x[out_stride]\n"
+ "st1h { z16.h }, p1, [x21]\n"
+ "add x21, x21, %x[out_stride]\n"
"bge 8b\n"
"9:" // Tail row loop: Unroll column loop skip
- "cbz x21, 11f\n"
+ "cbz x20, 11f\n"
"10:" // Tail row loop: Column loop
- "whilelt p0.h, XZR, x21\n"
- "dech x21\n"
- "ld1h { z16.h }, p0/Z, [x26]\n"
- "st1h { z16.h }, p1, [x22]\n"
- "cmp x21, #0x0\n"
- "addvl x26, x26, #1\n"
- "add x22, x22, %x[out_stride]\n"
+ "whilelt p0.h, XZR, x20\n"
+ "dech x20\n"
+ "ld1h { z16.h }, p0/Z, [x25]\n"
+ "st1h { z16.h }, p1, [x21]\n"
+ "cmp x20, #0x0\n"
+ "addvl x25, x25, #1\n"
+ "add x21, x21, %x[out_stride]\n"
"bgt 10b\n"
"11:" // Tail row loop: Column loop skip
"cmp %x[height], #0x1\n"
@@ -165,7 +165,7 @@ void sme_transpose_interleave_1VL(uint16_t *out, const uint16_t *in, size_t widt
".inst 0xd503467f // SMSTOP\n"
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_1VL_1x4.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_1VL_1x4.hpp
index 45d3c0729e..4c5d3d31ec 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_1VL_1x4.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_1VL_1x4.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#pragma once
@@ -43,69 +43,69 @@ void sme_transpose_interleave_1VL_1x4(uint8_t *out, const uint8_t *in, size_t wi
".inst 0xd503477f // SMSTART ZA\n"
"ptrue p1.b\n"
"1:" // Main row loop: Head
- "mov x26, %x[in]\n"
- "add x25, x26, %x[in_stride]\n"
+ "mov x25, %x[in]\n"
"add x24, x25, %x[in_stride]\n"
"add x23, x24, %x[in_stride]\n"
+ "add x22, x23, %x[in_stride]\n"
"cmp %x[height], #0x3\n"
- "add %x[in], x23, %x[in_stride]\n"
- "csel x23, x23, %x[pad_row], GT\n"
- "csel x24, x24, %x[pad_row], GE\n"
+ "add %x[in], x22, %x[in_stride]\n"
+ "csel x22, x22, %x[pad_row], GT\n"
+ "csel x23, x23, %x[pad_row], GE\n"
"cmp %x[height], #0x1\n"
- "mov x22, %x[width]\n"
- "cntb x21\n"
- "csel x25, x25, %x[pad_row], GT\n"
- "cmp x22, x21\n"
- "mov x20, %x[out]\n"
+ "mov x21, %x[width]\n"
+ "cntb x20\n"
+ "csel x24, x24, %x[pad_row], GT\n"
+ "cmp x21, x20\n"
+ "mov x19, %x[out]\n"
"sub %x[height], %x[height], #0x4\n"
"blt 3f\n"
"2:" // Main row loop: Unroll column loop
- "ld1b { z17.b }, p1/Z, [x26]\n"
- "sub x22, x22, x21\n"
- "cmp x22, x21\n"
- "ld1b { z18.b }, p1/Z, [x25]\n"
- "addvl x26, x26, #1\n"
+ "ld1b { z17.b }, p1/Z, [x25]\n"
+ "sub x21, x21, x20\n"
+ "cmp x21, x20\n"
+ "ld1b { z18.b }, p1/Z, [x24]\n"
"addvl x25, x25, #1\n"
- "ld1b { z16.b }, p1/Z, [x24]\n"
- "zip1 z20.b, z17.b, z16.b\n"
- "zip2 z19.b, z17.b, z16.b\n"
"addvl x24, x24, #1\n"
"ld1b { z16.b }, p1/Z, [x23]\n"
+ "zip1 z20.b, z17.b, z16.b\n"
+ "zip2 z19.b, z17.b, z16.b\n"
+ "addvl x23, x23, #1\n"
+ "ld1b { z16.b }, p1/Z, [x22]\n"
"zip1 z17.b, z18.b, z16.b\n"
"zip2 z18.b, z18.b, z16.b\n"
- "addvl x23, x23, #1\n"
+ "addvl x22, x22, #1\n"
"zip1 z16.b, z20.b, z17.b\n"
- "st1b { z16.b }, p1, [x20]\n"
- "add x20, x20, %x[out_stride]\n"
+ "st1b { z16.b }, p1, [x19]\n"
+ "add x19, x19, %x[out_stride]\n"
"zip2 z16.b, z20.b, z17.b\n"
- "st1b { z16.b }, p1, [x20]\n"
- "add x20, x20, %x[out_stride]\n"
+ "st1b { z16.b }, p1, [x19]\n"
+ "add x19, x19, %x[out_stride]\n"
"zip1 z17.b, z19.b, z18.b\n"
"zip2 z16.b, z19.b, z18.b\n"
- "st1b { z17.b }, p1, [x20]\n"
- "add x20, x20, %x[out_stride]\n"
- "st1b { z16.b }, p1, [x20]\n"
- "add x20, x20, %x[out_stride]\n"
+ "st1b { z17.b }, p1, [x19]\n"
+ "add x19, x19, %x[out_stride]\n"
+ "st1b { z16.b }, p1, [x19]\n"
+ "add x19, x19, %x[out_stride]\n"
"bge 2b\n"
"3:" // Main row loop: Unroll column loop skip
- "cbz x22, 5f\n"
+ "cbz x21, 5f\n"
"4:" // Main row loop: Column loop
- "whilelt p0.b, XZR, x22\n"
- "ld1b { z17.b }, p0/Z, [x26]\n"
- "decw x22\n"
- "ld1b { z18.b }, p0/Z, [x25]\n"
- "cmp x22, #0x0\n"
- "incd x26, ALL, MUL #2\n"
- "ld1b { z16.b }, p0/Z, [x24]\n"
- "zip1 z17.b, z17.b, z16.b\n"
+ "whilelt p0.b, XZR, x21\n"
+ "ld1b { z17.b }, p0/Z, [x25]\n"
+ "decw x21\n"
+ "ld1b { z18.b }, p0/Z, [x24]\n"
+ "cmp x21, #0x0\n"
"incd x25, ALL, MUL #2\n"
- "incd x24, ALL, MUL #2\n"
"ld1b { z16.b }, p0/Z, [x23]\n"
- "zip1 z16.b, z18.b, z16.b\n"
+ "zip1 z17.b, z17.b, z16.b\n"
+ "incd x24, ALL, MUL #2\n"
"incd x23, ALL, MUL #2\n"
+ "ld1b { z16.b }, p0/Z, [x22]\n"
+ "zip1 z16.b, z18.b, z16.b\n"
+ "incd x22, ALL, MUL #2\n"
"zip1 z16.b, z17.b, z16.b\n"
- "st1b { z16.b }, p1, [x20]\n"
- "add x20, x20, %x[out_stride]\n"
+ "st1b { z16.b }, p1, [x19]\n"
+ "add x19, x19, %x[out_stride]\n"
"bgt 4b\n"
"5:" // Main row loop: Column loop skip
"cmp %x[height], #0x1\n"
@@ -114,7 +114,7 @@ void sme_transpose_interleave_1VL_1x4(uint8_t *out, const uint8_t *in, size_t wi
".inst 0xd503467f // SMSTOP\n"
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [pad_row] "r" (pad_row), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_1VL_2x2.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_1VL_2x2.hpp
index dfa0167c00..3fc4b06835 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_1VL_2x2.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_1VL_2x2.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#pragma once
@@ -45,72 +45,72 @@ void sme_transpose_interleave_1VL_2x2(uint16_t *out, const uint16_t *in, size_t
"ptrue p1.b\n"
"blt 6f\n"
"1:" // Main row loop: Head
- "mov x26, %x[in]\n"
- "add x25, x26, %x[in_stride]\n"
+ "mov x25, %x[in]\n"
"add x24, x25, %x[in_stride]\n"
- "mov x23, %x[width]\n"
- "cnth x21, ALL, MUL #2\n"
- "add x20, x24, %x[in_stride]\n"
- "cmp x23, x21\n"
- "add %x[in], x20, %x[in_stride]\n"
- "mov x22, %x[out]\n"
+ "add x23, x24, %x[in_stride]\n"
+ "mov x22, %x[width]\n"
+ "cnth x20, ALL, MUL #2\n"
+ "add x19, x23, %x[in_stride]\n"
+ "cmp x22, x20\n"
+ "add %x[in], x19, %x[in_stride]\n"
+ "mov x21, %x[out]\n"
"sub %x[height], %x[height], #0x4\n"
"blt 3f\n"
"2:" // Main row loop: Unroll column loop
- "ld1h { z17.h }, p1/Z, [x26]\n"
- "sub x23, x23, x21\n"
- "cmp x23, x21\n"
- "ld1h { z16.h }, p1/Z, [x25]\n"
+ "ld1h { z17.h }, p1/Z, [x25]\n"
+ "sub x22, x22, x20\n"
+ "cmp x22, x20\n"
+ "ld1h { z16.h }, p1/Z, [x24]\n"
"zip1 z24.h, z17.h, z16.h\n"
"zip2 z23.h, z17.h, z16.h\n"
- "ld1h { z17.h }, p1/Z, [x24]\n"
- "ld1h { z16.h }, p1/Z, [x20]\n"
+ "ld1h { z17.h }, p1/Z, [x23]\n"
+ "ld1h { z16.h }, p1/Z, [x19]\n"
"zip1 z22.h, z17.h, z16.h\n"
"zip2 z21.h, z17.h, z16.h\n"
- "ld1h { z17.h }, p1/Z, [x26, #1, MUL VL]\n"
- "addvl x26, x26, #2\n"
- "ld1h { z16.h }, p1/Z, [x25, #1, MUL VL]\n"
- "zip1 z20.h, z17.h, z16.h\n"
+ "ld1h { z17.h }, p1/Z, [x25, #1, MUL VL]\n"
"addvl x25, x25, #2\n"
- "zip2 z19.h, z17.h, z16.h\n"
- "ld1h { z18.h }, p1/Z, [x24, #1, MUL VL]\n"
+ "ld1h { z16.h }, p1/Z, [x24, #1, MUL VL]\n"
+ "zip1 z20.h, z17.h, z16.h\n"
"addvl x24, x24, #2\n"
- "ld1h { z16.h }, p1/Z, [x20, #1, MUL VL]\n"
- "st1h { z24.h }, p1, [x22]\n"
+ "zip2 z19.h, z17.h, z16.h\n"
+ "ld1h { z18.h }, p1/Z, [x23, #1, MUL VL]\n"
+ "addvl x23, x23, #2\n"
+ "ld1h { z16.h }, p1/Z, [x19, #1, MUL VL]\n"
+ "st1h { z24.h }, p1, [x21]\n"
"zip1 z17.h, z18.h, z16.h\n"
- "addvl x20, x20, #2\n"
- "st1h { z22.h }, p1, [x22, #1, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
+ "addvl x19, x19, #2\n"
+ "st1h { z22.h }, p1, [x21, #1, MUL VL]\n"
+ "add x21, x21, %x[out_stride]\n"
"zip2 z16.h, z18.h, z16.h\n"
- "st1h { z23.h }, p1, [x22]\n"
- "st1h { z21.h }, p1, [x22, #1, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
- "st1h { z20.h }, p1, [x22]\n"
- "st1h { z17.h }, p1, [x22, #1, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
- "st1h { z19.h }, p1, [x22]\n"
- "st1h { z16.h }, p1, [x22, #1, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
+ "st1h { z23.h }, p1, [x21]\n"
+ "st1h { z21.h }, p1, [x21, #1, MUL VL]\n"
+ "add x21, x21, %x[out_stride]\n"
+ "st1h { z20.h }, p1, [x21]\n"
+ "st1h { z17.h }, p1, [x21, #1, MUL VL]\n"
+ "add x21, x21, %x[out_stride]\n"
+ "st1h { z19.h }, p1, [x21]\n"
+ "st1h { z16.h }, p1, [x21, #1, MUL VL]\n"
+ "add x21, x21, %x[out_stride]\n"
"bge 2b\n"
"3:" // Main row loop: Unroll column loop skip
- "cbz x23, 5f\n"
+ "cbz x22, 5f\n"
"4:" // Main row loop: Column loop
- "whilelt p0.h, XZR, x23\n"
- "ld1h { z17.h }, p0/Z, [x26]\n"
- "decw x23\n"
- "ld1h { z16.h }, p0/Z, [x25]\n"
- "cmp x23, #0x0\n"
- "incd x26, ALL, MUL #4\n"
- "zip1 z18.h, z17.h, z16.h\n"
- "ld1h { z17.h }, p0/Z, [x24]\n"
+ "whilelt p0.h, XZR, x22\n"
+ "ld1h { z17.h }, p0/Z, [x25]\n"
+ "decw x22\n"
+ "ld1h { z16.h }, p0/Z, [x24]\n"
+ "cmp x22, #0x0\n"
"incd x25, ALL, MUL #4\n"
+ "zip1 z18.h, z17.h, z16.h\n"
+ "ld1h { z17.h }, p0/Z, [x23]\n"
"incd x24, ALL, MUL #4\n"
- "ld1h { z16.h }, p0/Z, [x20]\n"
- "incd x20, ALL, MUL #4\n"
+ "incd x23, ALL, MUL #4\n"
+ "ld1h { z16.h }, p0/Z, [x19]\n"
+ "incd x19, ALL, MUL #4\n"
"zip1 z16.h, z17.h, z16.h\n"
- "st1h { z18.h }, p1, [x22]\n"
- "st1h { z16.h }, p1, [x22, #1, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
+ "st1h { z18.h }, p1, [x21]\n"
+ "st1h { z16.h }, p1, [x21, #1, MUL VL]\n"
+ "add x21, x21, %x[out_stride]\n"
"bgt 4b\n"
"5:" // Main row loop: Column loop skip
"cmp %x[height], #0x4\n"
@@ -119,52 +119,52 @@ void sme_transpose_interleave_1VL_2x2(uint16_t *out, const uint16_t *in, size_t
"cbz %x[height], 12f\n"
"6:" // Main loop skip
"7:" // Tail row loop: Head
- "mov x26, %x[in]\n"
- "add x25, x26, %x[in_stride]\n"
+ "mov x25, %x[in]\n"
+ "add x24, x25, %x[in_stride]\n"
"cmp %x[height], #0x1\n"
- "mov x21, %x[width]\n"
- "cnth x20, ALL, MUL #2\n"
- "add %x[in], x25, %x[in_stride]\n"
- "csel x25, x25, %x[pad_row], GT\n"
- "cmp x21, x20\n"
- "mov x22, %x[out]\n"
+ "mov x20, %x[width]\n"
+ "cnth x19, ALL, MUL #2\n"
+ "add %x[in], x24, %x[in_stride]\n"
+ "csel x24, x24, %x[pad_row], GT\n"
+ "cmp x20, x19\n"
+ "mov x21, %x[out]\n"
"sub %x[height], %x[height], #0x2\n"
"blt 9f\n"
"8:" // Tail row loop: Unroll column loop
- "ld1h { z18.h }, p1/Z, [x26]\n"
- "sub x21, x21, x20\n"
- "cmp x21, x20\n"
- "ld1h { z16.h }, p1/Z, [x25]\n"
+ "ld1h { z18.h }, p1/Z, [x25]\n"
+ "sub x20, x20, x19\n"
+ "cmp x20, x19\n"
+ "ld1h { z16.h }, p1/Z, [x24]\n"
"zip1 z17.h, z18.h, z16.h\n"
"zip2 z19.h, z18.h, z16.h\n"
- "ld1h { z18.h }, p1/Z, [x26, #1, MUL VL]\n"
- "addvl x26, x26, #2\n"
- "ld1h { z16.h }, p1/Z, [x25, #1, MUL VL]\n"
- "st1h { z17.h }, p1, [x22]\n"
- "add x22, x22, %x[out_stride]\n"
- "zip1 z17.h, z18.h, z16.h\n"
- "st1h { z19.h }, p1, [x22]\n"
- "add x22, x22, %x[out_stride]\n"
+ "ld1h { z18.h }, p1/Z, [x25, #1, MUL VL]\n"
"addvl x25, x25, #2\n"
+ "ld1h { z16.h }, p1/Z, [x24, #1, MUL VL]\n"
+ "st1h { z17.h }, p1, [x21]\n"
+ "add x21, x21, %x[out_stride]\n"
+ "zip1 z17.h, z18.h, z16.h\n"
+ "st1h { z19.h }, p1, [x21]\n"
+ "add x21, x21, %x[out_stride]\n"
+ "addvl x24, x24, #2\n"
"zip2 z16.h, z18.h, z16.h\n"
- "st1h { z17.h }, p1, [x22]\n"
- "add x22, x22, %x[out_stride]\n"
- "st1h { z16.h }, p1, [x22]\n"
- "add x22, x22, %x[out_stride]\n"
+ "st1h { z17.h }, p1, [x21]\n"
+ "add x21, x21, %x[out_stride]\n"
+ "st1h { z16.h }, p1, [x21]\n"
+ "add x21, x21, %x[out_stride]\n"
"bge 8b\n"
"9:" // Tail row loop: Unroll column loop skip
- "cbz x21, 11f\n"
+ "cbz x20, 11f\n"
"10:" // Tail row loop: Column loop
- "whilelt p0.h, XZR, x21\n"
- "ld1h { z17.h }, p0/Z, [x26]\n"
- "decw x21\n"
- "ld1h { z16.h }, p0/Z, [x25]\n"
- "cmp x21, #0x0\n"
- "incd x26, ALL, MUL #4\n"
- "zip1 z16.h, z17.h, z16.h\n"
+ "whilelt p0.h, XZR, x20\n"
+ "ld1h { z17.h }, p0/Z, [x25]\n"
+ "decw x20\n"
+ "ld1h { z16.h }, p0/Z, [x24]\n"
+ "cmp x20, #0x0\n"
"incd x25, ALL, MUL #4\n"
- "st1h { z16.h }, p1, [x22]\n"
- "add x22, x22, %x[out_stride]\n"
+ "zip1 z16.h, z17.h, z16.h\n"
+ "incd x24, ALL, MUL #4\n"
+ "st1h { z16.h }, p1, [x21]\n"
+ "add x21, x21, %x[out_stride]\n"
"bgt 10b\n"
"11:" // Tail row loop: Column loop skip
"cmp %x[height], #0x1\n"
@@ -174,7 +174,7 @@ void sme_transpose_interleave_1VL_2x2(uint16_t *out, const uint16_t *in, size_t
".inst 0xd503467f // SMSTOP\n"
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [pad_row] "r" (pad_row), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_1VL_2x2_fp32bf16.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_1VL_2x2_fp32bf16.hpp
index 72e7b0c99a..d8fa686f21 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_1VL_2x2_fp32bf16.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_1VL_2x2_fp32bf16.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#pragma once
@@ -45,90 +45,90 @@ void sme_transpose_interleave_1VL_2x2_fp32bf16(bfloat16 *out, const float *in, s
"ptrue p1.b\n"
"blt 6f\n"
"1:" // Main row loop: Head
- "mov x26, %x[in]\n"
- "add x25, x26, %x[in_stride]\n"
+ "mov x25, %x[in]\n"
"add x24, x25, %x[in_stride]\n"
- "mov x23, %x[width]\n"
- "cnth x21, ALL, MUL #2\n"
- "add x20, x24, %x[in_stride]\n"
- "cmp x23, x21\n"
- "add %x[in], x20, %x[in_stride]\n"
- "mov x22, %x[out]\n"
+ "add x23, x24, %x[in_stride]\n"
+ "mov x22, %x[width]\n"
+ "cnth x20, ALL, MUL #2\n"
+ "add x19, x23, %x[in_stride]\n"
+ "cmp x22, x20\n"
+ "add %x[in], x19, %x[in_stride]\n"
+ "mov x21, %x[out]\n"
"sub %x[height], %x[height], #0x4\n"
"blt 3f\n"
"2:" // Main row loop: Unroll column loop
- "ld1w { z16.s }, p1/Z, [x26]\n"
+ "ld1w { z16.s }, p1/Z, [x25]\n"
".inst 0x658aa618 // bfcvt z24.h, p1/M, z16.s\n"
- "sub x23, x23, x21\n"
- "cmp x23, x21\n"
- "ld1w { z16.s }, p1/Z, [x24]\n"
+ "sub x22, x22, x20\n"
+ "cmp x22, x20\n"
+ "ld1w { z16.s }, p1/Z, [x23]\n"
".inst 0x658aa617 // bfcvt z23.h, p1/M, z16.s\n"
- "ld1w { z16.s }, p1/Z, [x26, #1, MUL VL]\n"
+ "ld1w { z16.s }, p1/Z, [x25, #1, MUL VL]\n"
".inst 0x658aa616 // bfcvt z22.h, p1/M, z16.s\n"
- "ld1w { z16.s }, p1/Z, [x24, #1, MUL VL]\n"
+ "ld1w { z16.s }, p1/Z, [x23, #1, MUL VL]\n"
".inst 0x658aa615 // bfcvt z21.h, p1/M, z16.s\n"
- "ld1w { z16.s }, p1/Z, [x26, #2, MUL VL]\n"
+ "ld1w { z16.s }, p1/Z, [x25, #2, MUL VL]\n"
".inst 0x658aa614 // bfcvt z20.h, p1/M, z16.s\n"
- "ld1w { z16.s }, p1/Z, [x24, #2, MUL VL]\n"
+ "ld1w { z16.s }, p1/Z, [x23, #2, MUL VL]\n"
".inst 0x658aa613 // bfcvt z19.h, p1/M, z16.s\n"
- "ld1w { z16.s }, p1/Z, [x26, #3, MUL VL]\n"
+ "ld1w { z16.s }, p1/Z, [x25, #3, MUL VL]\n"
".inst 0x658aa612 // bfcvt z18.h, p1/M, z16.s\n"
- "addvl x26, x26, #4\n"
- "ld1w { z16.s }, p1/Z, [x24, #3, MUL VL]\n"
+ "addvl x25, x25, #4\n"
+ "ld1w { z16.s }, p1/Z, [x23, #3, MUL VL]\n"
".inst 0x658aa611 // bfcvt z17.h, p1/M, z16.s\n"
- "addvl x24, x24, #4\n"
- "ld1w { z16.s }, p1/Z, [x25]\n"
+ "addvl x23, x23, #4\n"
+ "ld1w { z16.s }, p1/Z, [x24]\n"
".inst 0x648aa618 // bfcvtnt z24.h, p1/M, z16.s\n"
- "ld1w { z16.s }, p1/Z, [x20]\n"
+ "ld1w { z16.s }, p1/Z, [x19]\n"
".inst 0x648aa617 // bfcvtnt z23.h, p1/M, z16.s\n"
- "ld1w { z16.s }, p1/Z, [x25, #1, MUL VL]\n"
+ "ld1w { z16.s }, p1/Z, [x24, #1, MUL VL]\n"
".inst 0x648aa616 // bfcvtnt z22.h, p1/M, z16.s\n"
- "ld1w { z16.s }, p1/Z, [x20, #1, MUL VL]\n"
+ "ld1w { z16.s }, p1/Z, [x19, #1, MUL VL]\n"
".inst 0x648aa615 // bfcvtnt z21.h, p1/M, z16.s\n"
- "ld1w { z16.s }, p1/Z, [x25, #2, MUL VL]\n"
+ "ld1w { z16.s }, p1/Z, [x24, #2, MUL VL]\n"
".inst 0x648aa614 // bfcvtnt z20.h, p1/M, z16.s\n"
- "ld1w { z16.s }, p1/Z, [x20, #2, MUL VL]\n"
+ "ld1w { z16.s }, p1/Z, [x19, #2, MUL VL]\n"
".inst 0x648aa613 // bfcvtnt z19.h, p1/M, z16.s\n"
- "ld1w { z16.s }, p1/Z, [x25, #3, MUL VL]\n"
- "addvl x25, x25, #4\n"
+ "ld1w { z16.s }, p1/Z, [x24, #3, MUL VL]\n"
+ "addvl x24, x24, #4\n"
".inst 0x648aa612 // bfcvtnt z18.h, p1/M, z16.s\n"
- "ld1w { z16.s }, p1/Z, [x20, #3, MUL VL]\n"
- "st1h { z24.h }, p1, [x22]\n"
- "addvl x20, x20, #4\n"
+ "ld1w { z16.s }, p1/Z, [x19, #3, MUL VL]\n"
+ "st1h { z24.h }, p1, [x21]\n"
+ "addvl x19, x19, #4\n"
".inst 0x648aa611 // bfcvtnt z17.h, p1/M, z16.s\n"
- "st1h { z23.h }, p1, [x22, #1, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
- "st1h { z22.h }, p1, [x22]\n"
- "st1h { z21.h }, p1, [x22, #1, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
- "st1h { z20.h }, p1, [x22]\n"
- "st1h { z19.h }, p1, [x22, #1, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
- "st1h { z18.h }, p1, [x22]\n"
- "st1h { z17.h }, p1, [x22, #1, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
+ "st1h { z23.h }, p1, [x21, #1, MUL VL]\n"
+ "add x21, x21, %x[out_stride]\n"
+ "st1h { z22.h }, p1, [x21]\n"
+ "st1h { z21.h }, p1, [x21, #1, MUL VL]\n"
+ "add x21, x21, %x[out_stride]\n"
+ "st1h { z20.h }, p1, [x21]\n"
+ "st1h { z19.h }, p1, [x21, #1, MUL VL]\n"
+ "add x21, x21, %x[out_stride]\n"
+ "st1h { z18.h }, p1, [x21]\n"
+ "st1h { z17.h }, p1, [x21, #1, MUL VL]\n"
+ "add x21, x21, %x[out_stride]\n"
"bge 2b\n"
"3:" // Main row loop: Unroll column loop skip
- "cbz x23, 5f\n"
+ "cbz x22, 5f\n"
"4:" // Main row loop: Column loop
- "whilelt p0.s, XZR, x23\n"
- "ld1w { z16.s }, p0/Z, [x26]\n"
+ "whilelt p0.s, XZR, x22\n"
+ "ld1w { z16.s }, p0/Z, [x25]\n"
".inst 0x658aa612 // bfcvt z18.h, p1/M, z16.s\n"
- "decw x23\n"
- "ld1w { z16.s }, p0/Z, [x24]\n"
+ "decw x22\n"
+ "ld1w { z16.s }, p0/Z, [x23]\n"
".inst 0x658aa611 // bfcvt z17.h, p1/M, z16.s\n"
- "cmp x23, #0x0\n"
- "addvl x26, x26, #1\n"
- "ld1w { z16.s }, p0/Z, [x25]\n"
+ "cmp x22, #0x0\n"
"addvl x25, x25, #1\n"
+ "ld1w { z16.s }, p0/Z, [x24]\n"
"addvl x24, x24, #1\n"
+ "addvl x23, x23, #1\n"
".inst 0x648aa612 // bfcvtnt z18.h, p1/M, z16.s\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
- "addvl x20, x20, #1\n"
+ "ld1w { z16.s }, p0/Z, [x19]\n"
+ "addvl x19, x19, #1\n"
".inst 0x648aa611 // bfcvtnt z17.h, p1/M, z16.s\n"
- "st1h { z18.h }, p1, [x22]\n"
- "st1h { z17.h }, p1, [x22, #1, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
+ "st1h { z18.h }, p1, [x21]\n"
+ "st1h { z17.h }, p1, [x21, #1, MUL VL]\n"
+ "add x21, x21, %x[out_stride]\n"
"bgt 4b\n"
"5:" // Main row loop: Column loop skip
"cmp %x[height], #0x4\n"
@@ -137,61 +137,61 @@ void sme_transpose_interleave_1VL_2x2_fp32bf16(bfloat16 *out, const float *in, s
"cbz %x[height], 12f\n"
"6:" // Main loop skip
"7:" // Tail row loop: Head
- "mov x26, %x[in]\n"
- "add x25, x26, %x[in_stride]\n"
+ "mov x25, %x[in]\n"
+ "add x24, x25, %x[in_stride]\n"
"cmp %x[height], #0x1\n"
- "mov x21, %x[width]\n"
- "cnth x20, ALL, MUL #2\n"
- "add %x[in], x25, %x[in_stride]\n"
- "csel x25, x25, %x[pad_row], GT\n"
- "cmp x21, x20\n"
- "mov x22, %x[out]\n"
+ "mov x20, %x[width]\n"
+ "cnth x19, ALL, MUL #2\n"
+ "add %x[in], x24, %x[in_stride]\n"
+ "csel x24, x24, %x[pad_row], GT\n"
+ "cmp x20, x19\n"
+ "mov x21, %x[out]\n"
"sub %x[height], %x[height], #0x2\n"
"blt 9f\n"
"8:" // Tail row loop: Unroll column loop
- "ld1w { z16.s }, p1/Z, [x26]\n"
+ "ld1w { z16.s }, p1/Z, [x25]\n"
".inst 0x658aa614 // bfcvt z20.h, p1/M, z16.s\n"
- "sub x21, x21, x20\n"
- "cmp x21, x20\n"
- "ld1w { z16.s }, p1/Z, [x26, #1, MUL VL]\n"
+ "sub x20, x20, x19\n"
+ "cmp x20, x19\n"
+ "ld1w { z16.s }, p1/Z, [x25, #1, MUL VL]\n"
".inst 0x658aa613 // bfcvt z19.h, p1/M, z16.s\n"
- "ld1w { z16.s }, p1/Z, [x26, #2, MUL VL]\n"
+ "ld1w { z16.s }, p1/Z, [x25, #2, MUL VL]\n"
".inst 0x658aa612 // bfcvt z18.h, p1/M, z16.s\n"
- "ld1w { z16.s }, p1/Z, [x26, #3, MUL VL]\n"
+ "ld1w { z16.s }, p1/Z, [x25, #3, MUL VL]\n"
".inst 0x658aa611 // bfcvt z17.h, p1/M, z16.s\n"
- "addvl x26, x26, #4\n"
- "ld1w { z16.s }, p1/Z, [x25]\n"
+ "addvl x25, x25, #4\n"
+ "ld1w { z16.s }, p1/Z, [x24]\n"
".inst 0x648aa614 // bfcvtnt z20.h, p1/M, z16.s\n"
- "ld1w { z16.s }, p1/Z, [x25, #1, MUL VL]\n"
+ "ld1w { z16.s }, p1/Z, [x24, #1, MUL VL]\n"
".inst 0x648aa613 // bfcvtnt z19.h, p1/M, z16.s\n"
- "ld1w { z16.s }, p1/Z, [x25, #2, MUL VL]\n"
+ "ld1w { z16.s }, p1/Z, [x24, #2, MUL VL]\n"
".inst 0x648aa612 // bfcvtnt z18.h, p1/M, z16.s\n"
- "ld1w { z16.s }, p1/Z, [x25, #3, MUL VL]\n"
- "st1h { z20.h }, p1, [x22]\n"
- "add x22, x22, %x[out_stride]\n"
- "addvl x25, x25, #4\n"
- "st1h { z19.h }, p1, [x22]\n"
- "add x22, x22, %x[out_stride]\n"
+ "ld1w { z16.s }, p1/Z, [x24, #3, MUL VL]\n"
+ "st1h { z20.h }, p1, [x21]\n"
+ "add x21, x21, %x[out_stride]\n"
+ "addvl x24, x24, #4\n"
+ "st1h { z19.h }, p1, [x21]\n"
+ "add x21, x21, %x[out_stride]\n"
".inst 0x648aa611 // bfcvtnt z17.h, p1/M, z16.s\n"
- "st1h { z18.h }, p1, [x22]\n"
- "add x22, x22, %x[out_stride]\n"
- "st1h { z17.h }, p1, [x22]\n"
- "add x22, x22, %x[out_stride]\n"
+ "st1h { z18.h }, p1, [x21]\n"
+ "add x21, x21, %x[out_stride]\n"
+ "st1h { z17.h }, p1, [x21]\n"
+ "add x21, x21, %x[out_stride]\n"
"bge 8b\n"
"9:" // Tail row loop: Unroll column loop skip
- "cbz x21, 11f\n"
+ "cbz x20, 11f\n"
"10:" // Tail row loop: Column loop
- "whilelt p0.s, XZR, x21\n"
- "ld1w { z16.s }, p0/Z, [x26]\n"
- ".inst 0x658aa611 // bfcvt z17.h, p1/M, z16.s\n"
- "decw x21\n"
+ "whilelt p0.s, XZR, x20\n"
"ld1w { z16.s }, p0/Z, [x25]\n"
- "cmp x21, #0x0\n"
- "addvl x26, x26, #1\n"
- ".inst 0x648aa611 // bfcvtnt z17.h, p1/M, z16.s\n"
+ ".inst 0x658aa611 // bfcvt z17.h, p1/M, z16.s\n"
+ "decw x20\n"
+ "ld1w { z16.s }, p0/Z, [x24]\n"
+ "cmp x20, #0x0\n"
"addvl x25, x25, #1\n"
- "st1h { z17.h }, p1, [x22]\n"
- "add x22, x22, %x[out_stride]\n"
+ ".inst 0x648aa611 // bfcvtnt z17.h, p1/M, z16.s\n"
+ "addvl x24, x24, #1\n"
+ "st1h { z17.h }, p1, [x21]\n"
+ "add x21, x21, %x[out_stride]\n"
"bgt 10b\n"
"11:" // Tail row loop: Column loop skip
"cmp %x[height], #0x1\n"
@@ -201,7 +201,7 @@ void sme_transpose_interleave_1VL_2x2_fp32bf16(bfloat16 *out, const float *in, s
".inst 0xd503467f // SMSTOP\n"
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [pad_row] "r" (pad_row), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_2VL.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_2VL.hpp
index a057fd514e..334115907d 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_2VL.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_2VL.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#pragma once
@@ -39,89 +39,89 @@ void sme_transpose_interleave_2VL(uint16_t *out, const uint16_t *in, size_t widt
"ptrue p2.b\n"
"blt 6f\n"
"1:" // Main row loop: Head
- "mov x26, %x[in]\n"
- "add x25, x26, %x[in_stride]\n"
+ "mov x25, %x[in]\n"
"add x24, x25, %x[in_stride]\n"
- "mov x23, %x[width]\n"
- "cnth x20, ALL, MUL #4\n"
- "add x21, x24, %x[in_stride]\n"
- "cmp x23, x20\n"
- "add %x[in], x21, %x[in_stride]\n"
- "mov x22, %x[out]\n"
+ "add x23, x24, %x[in_stride]\n"
+ "mov x22, %x[width]\n"
+ "cnth x19, ALL, MUL #4\n"
+ "add x20, x23, %x[in_stride]\n"
+ "cmp x22, x19\n"
+ "add %x[in], x20, %x[in_stride]\n"
+ "mov x21, %x[out]\n"
"sub %x[height], %x[height], #0x4\n"
"blt 3f\n"
"2:" // Main row loop: Unroll column loop
- "sub x23, x23, x20\n"
- "ld1h { z31.h }, p2/Z, [x26]\n"
- "cmp x23, x20\n"
- "ld1h { z30.h }, p2/Z, [x26, #1, MUL VL]\n"
- "ld1h { z29.h }, p2/Z, [x26, #2, MUL VL]\n"
- "ld1h { z28.h }, p2/Z, [x26, #3, MUL VL]\n"
- "addvl x26, x26, #4\n"
- "ld1h { z27.h }, p2/Z, [x25]\n"
- "ld1h { z26.h }, p2/Z, [x25, #1, MUL VL]\n"
- "ld1h { z25.h }, p2/Z, [x25, #2, MUL VL]\n"
- "ld1h { z24.h }, p2/Z, [x25, #3, MUL VL]\n"
+ "sub x22, x22, x19\n"
+ "ld1h { z31.h }, p2/Z, [x25]\n"
+ "cmp x22, x19\n"
+ "ld1h { z30.h }, p2/Z, [x25, #1, MUL VL]\n"
+ "ld1h { z29.h }, p2/Z, [x25, #2, MUL VL]\n"
+ "ld1h { z28.h }, p2/Z, [x25, #3, MUL VL]\n"
"addvl x25, x25, #4\n"
- "ld1h { z23.h }, p2/Z, [x24]\n"
- "ld1h { z22.h }, p2/Z, [x24, #1, MUL VL]\n"
- "ld1h { z21.h }, p2/Z, [x24, #2, MUL VL]\n"
- "ld1h { z20.h }, p2/Z, [x24, #3, MUL VL]\n"
+ "ld1h { z27.h }, p2/Z, [x24]\n"
+ "ld1h { z26.h }, p2/Z, [x24, #1, MUL VL]\n"
+ "ld1h { z25.h }, p2/Z, [x24, #2, MUL VL]\n"
+ "ld1h { z24.h }, p2/Z, [x24, #3, MUL VL]\n"
"addvl x24, x24, #4\n"
- "ld1h { z19.h }, p2/Z, [x21]\n"
- "ld1h { z18.h }, p2/Z, [x21, #1, MUL VL]\n"
- "ld1h { z17.h }, p2/Z, [x21, #2, MUL VL]\n"
- "ld1h { z16.h }, p2/Z, [x21, #3, MUL VL]\n"
- "st1h { z31.h }, p2, [x22]\n"
- "addvl x21, x21, #4\n"
- "st1h { z30.h }, p2, [x22, #1, MUL VL]\n"
- "st1h { z27.h }, p2, [x22, #2, MUL VL]\n"
- "st1h { z26.h }, p2, [x22, #3, MUL VL]\n"
- "st1h { z23.h }, p2, [x22, #4, MUL VL]\n"
- "st1h { z22.h }, p2, [x22, #5, MUL VL]\n"
- "st1h { z19.h }, p2, [x22, #6, MUL VL]\n"
- "st1h { z18.h }, p2, [x22, #7, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
- "st1h { z29.h }, p2, [x22]\n"
- "st1h { z28.h }, p2, [x22, #1, MUL VL]\n"
- "st1h { z25.h }, p2, [x22, #2, MUL VL]\n"
- "st1h { z24.h }, p2, [x22, #3, MUL VL]\n"
- "st1h { z21.h }, p2, [x22, #4, MUL VL]\n"
- "st1h { z20.h }, p2, [x22, #5, MUL VL]\n"
- "st1h { z17.h }, p2, [x22, #6, MUL VL]\n"
- "st1h { z16.h }, p2, [x22, #7, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
+ "ld1h { z23.h }, p2/Z, [x23]\n"
+ "ld1h { z22.h }, p2/Z, [x23, #1, MUL VL]\n"
+ "ld1h { z21.h }, p2/Z, [x23, #2, MUL VL]\n"
+ "ld1h { z20.h }, p2/Z, [x23, #3, MUL VL]\n"
+ "addvl x23, x23, #4\n"
+ "ld1h { z19.h }, p2/Z, [x20]\n"
+ "ld1h { z18.h }, p2/Z, [x20, #1, MUL VL]\n"
+ "ld1h { z17.h }, p2/Z, [x20, #2, MUL VL]\n"
+ "ld1h { z16.h }, p2/Z, [x20, #3, MUL VL]\n"
+ "st1h { z31.h }, p2, [x21]\n"
+ "addvl x20, x20, #4\n"
+ "st1h { z30.h }, p2, [x21, #1, MUL VL]\n"
+ "st1h { z27.h }, p2, [x21, #2, MUL VL]\n"
+ "st1h { z26.h }, p2, [x21, #3, MUL VL]\n"
+ "st1h { z23.h }, p2, [x21, #4, MUL VL]\n"
+ "st1h { z22.h }, p2, [x21, #5, MUL VL]\n"
+ "st1h { z19.h }, p2, [x21, #6, MUL VL]\n"
+ "st1h { z18.h }, p2, [x21, #7, MUL VL]\n"
+ "add x21, x21, %x[out_stride]\n"
+ "st1h { z29.h }, p2, [x21]\n"
+ "st1h { z28.h }, p2, [x21, #1, MUL VL]\n"
+ "st1h { z25.h }, p2, [x21, #2, MUL VL]\n"
+ "st1h { z24.h }, p2, [x21, #3, MUL VL]\n"
+ "st1h { z21.h }, p2, [x21, #4, MUL VL]\n"
+ "st1h { z20.h }, p2, [x21, #5, MUL VL]\n"
+ "st1h { z17.h }, p2, [x21, #6, MUL VL]\n"
+ "st1h { z16.h }, p2, [x21, #7, MUL VL]\n"
+ "add x21, x21, %x[out_stride]\n"
"bge 2b\n"
"3:" // Main row loop: Unroll column loop skip
- "cbz x23, 5f\n"
+ "cbz x22, 5f\n"
"4:" // Main row loop: Column loop
- "mov x20, x23\n"
- "whilelt p1.h, XZR, x20\n"
- "ld1h { z23.h }, p1/Z, [x26]\n"
- "dech x20\n"
- "dech x23, ALL, MUL #2\n"
- "ld1h { z22.h }, p1/Z, [x25]\n"
- "whilelt p0.h, XZR, x20\n"
- "cmp x23, #0x0\n"
- "ld1h { z21.h }, p0/Z, [x26, #1, MUL VL]\n"
- "addvl x26, x26, #2\n"
- "ld1h { z20.h }, p0/Z, [x25, #1, MUL VL]\n"
+ "mov x19, x22\n"
+ "whilelt p1.h, XZR, x19\n"
+ "ld1h { z23.h }, p1/Z, [x25]\n"
+ "dech x19\n"
+ "dech x22, ALL, MUL #2\n"
+ "ld1h { z22.h }, p1/Z, [x24]\n"
+ "whilelt p0.h, XZR, x19\n"
+ "cmp x22, #0x0\n"
+ "ld1h { z21.h }, p0/Z, [x25, #1, MUL VL]\n"
"addvl x25, x25, #2\n"
- "ld1h { z19.h }, p1/Z, [x24]\n"
- "ld1h { z18.h }, p0/Z, [x24, #1, MUL VL]\n"
+ "ld1h { z20.h }, p0/Z, [x24, #1, MUL VL]\n"
"addvl x24, x24, #2\n"
- "ld1h { z17.h }, p1/Z, [x21]\n"
- "ld1h { z16.h }, p0/Z, [x21, #1, MUL VL]\n"
- "addvl x21, x21, #2\n"
- "st1h { z23.h }, p2, [x22]\n"
- "st1h { z21.h }, p2, [x22, #1, MUL VL]\n"
- "st1h { z22.h }, p2, [x22, #2, MUL VL]\n"
- "st1h { z20.h }, p2, [x22, #3, MUL VL]\n"
- "st1h { z19.h }, p2, [x22, #4, MUL VL]\n"
- "st1h { z18.h }, p2, [x22, #5, MUL VL]\n"
- "st1h { z17.h }, p2, [x22, #6, MUL VL]\n"
- "st1h { z16.h }, p2, [x22, #7, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
+ "ld1h { z19.h }, p1/Z, [x23]\n"
+ "ld1h { z18.h }, p0/Z, [x23, #1, MUL VL]\n"
+ "addvl x23, x23, #2\n"
+ "ld1h { z17.h }, p1/Z, [x20]\n"
+ "ld1h { z16.h }, p0/Z, [x20, #1, MUL VL]\n"
+ "addvl x20, x20, #2\n"
+ "st1h { z23.h }, p2, [x21]\n"
+ "st1h { z21.h }, p2, [x21, #1, MUL VL]\n"
+ "st1h { z22.h }, p2, [x21, #2, MUL VL]\n"
+ "st1h { z20.h }, p2, [x21, #3, MUL VL]\n"
+ "st1h { z19.h }, p2, [x21, #4, MUL VL]\n"
+ "st1h { z18.h }, p2, [x21, #5, MUL VL]\n"
+ "st1h { z17.h }, p2, [x21, #6, MUL VL]\n"
+ "st1h { z16.h }, p2, [x21, #7, MUL VL]\n"
+ "add x21, x21, %x[out_stride]\n"
"bgt 4b\n"
"5:" // Main row loop: Column loop skip
"cmp %x[height], #0x4\n"
@@ -130,44 +130,44 @@ void sme_transpose_interleave_2VL(uint16_t *out, const uint16_t *in, size_t widt
"cbz %x[height], 12f\n"
"6:" // Main loop skip
"7:" // Tail row loop: Head
- "mov x21, %x[width]\n"
- "cnth x20, ALL, MUL #4\n"
- "mov x26, %x[in]\n"
- "cmp x21, x20\n"
- "add %x[in], x26, %x[in_stride]\n"
- "mov x22, %x[out]\n"
+ "mov x20, %x[width]\n"
+ "cnth x19, ALL, MUL #4\n"
+ "mov x25, %x[in]\n"
+ "cmp x20, x19\n"
+ "add %x[in], x25, %x[in_stride]\n"
+ "mov x21, %x[out]\n"
"sub %x[height], %x[height], #0x1\n"
"blt 9f\n"
"8:" // Tail row loop: Unroll column loop
- "sub x21, x21, x20\n"
- "ld1h { z19.h }, p2/Z, [x26]\n"
- "cmp x21, x20\n"
- "ld1h { z18.h }, p2/Z, [x26, #1, MUL VL]\n"
- "ld1h { z17.h }, p2/Z, [x26, #2, MUL VL]\n"
- "ld1h { z16.h }, p2/Z, [x26, #3, MUL VL]\n"
- "st1h { z19.h }, p2, [x22]\n"
- "addvl x26, x26, #4\n"
- "st1h { z18.h }, p2, [x22, #1, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
- "st1h { z17.h }, p2, [x22]\n"
- "st1h { z16.h }, p2, [x22, #1, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
+ "sub x20, x20, x19\n"
+ "ld1h { z19.h }, p2/Z, [x25]\n"
+ "cmp x20, x19\n"
+ "ld1h { z18.h }, p2/Z, [x25, #1, MUL VL]\n"
+ "ld1h { z17.h }, p2/Z, [x25, #2, MUL VL]\n"
+ "ld1h { z16.h }, p2/Z, [x25, #3, MUL VL]\n"
+ "st1h { z19.h }, p2, [x21]\n"
+ "addvl x25, x25, #4\n"
+ "st1h { z18.h }, p2, [x21, #1, MUL VL]\n"
+ "add x21, x21, %x[out_stride]\n"
+ "st1h { z17.h }, p2, [x21]\n"
+ "st1h { z16.h }, p2, [x21, #1, MUL VL]\n"
+ "add x21, x21, %x[out_stride]\n"
"bge 8b\n"
"9:" // Tail row loop: Unroll column loop skip
- "cbz x21, 11f\n"
+ "cbz x20, 11f\n"
"10:" // Tail row loop: Column loop
- "mov x20, x21\n"
- "whilelt p0.h, XZR, x20\n"
- "ld1h { z17.h }, p0/Z, [x26]\n"
- "dech x20\n"
- "dech x21, ALL, MUL #2\n"
- "whilelt p0.h, XZR, x20\n"
- "cmp x21, #0x0\n"
- "ld1h { z16.h }, p0/Z, [x26, #1, MUL VL]\n"
- "st1h { z17.h }, p2, [x22]\n"
- "addvl x26, x26, #2\n"
- "st1h { z16.h }, p2, [x22, #1, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
+ "mov x19, x20\n"
+ "whilelt p0.h, XZR, x19\n"
+ "ld1h { z17.h }, p0/Z, [x25]\n"
+ "dech x19\n"
+ "dech x20, ALL, MUL #2\n"
+ "whilelt p0.h, XZR, x19\n"
+ "cmp x20, #0x0\n"
+ "ld1h { z16.h }, p0/Z, [x25, #1, MUL VL]\n"
+ "st1h { z17.h }, p2, [x21]\n"
+ "addvl x25, x25, #2\n"
+ "st1h { z16.h }, p2, [x21, #1, MUL VL]\n"
+ "add x21, x21, %x[out_stride]\n"
"bgt 10b\n"
"11:" // Tail row loop: Column loop skip
"cmp %x[height], #0x1\n"
@@ -177,7 +177,7 @@ void sme_transpose_interleave_2VL(uint16_t *out, const uint16_t *in, size_t widt
".inst 0xd503467f // SMSTOP\n"
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_2VL_1x4.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_2VL_1x4.hpp
index 9eb4075677..39c49990eb 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_2VL_1x4.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_2VL_1x4.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#pragma once
@@ -43,69 +43,69 @@ void sme_transpose_interleave_2VL_1x4(uint8_t *out, const uint8_t *in, size_t wi
".inst 0xd503477f // SMSTART ZA\n"
"ptrue p1.b\n"
"1:" // Main row loop: Head
- "mov x26, %x[in]\n"
- "add x25, x26, %x[in_stride]\n"
+ "mov x25, %x[in]\n"
"add x24, x25, %x[in_stride]\n"
"add x23, x24, %x[in_stride]\n"
+ "add x22, x23, %x[in_stride]\n"
"cmp %x[height], #0x3\n"
- "add %x[in], x23, %x[in_stride]\n"
- "csel x23, x23, %x[pad_row], GT\n"
- "csel x24, x24, %x[pad_row], GE\n"
+ "add %x[in], x22, %x[in_stride]\n"
+ "csel x22, x22, %x[pad_row], GT\n"
+ "csel x23, x23, %x[pad_row], GE\n"
"cmp %x[height], #0x1\n"
- "mov x22, %x[width]\n"
- "cntb x21\n"
- "csel x25, x25, %x[pad_row], GT\n"
- "cmp x22, x21\n"
- "mov x20, %x[out]\n"
+ "mov x21, %x[width]\n"
+ "cntb x20\n"
+ "csel x24, x24, %x[pad_row], GT\n"
+ "cmp x21, x20\n"
+ "mov x19, %x[out]\n"
"sub %x[height], %x[height], #0x4\n"
"blt 3f\n"
"2:" // Main row loop: Unroll column loop
- "ld1b { z17.b }, p1/Z, [x26]\n"
- "sub x22, x22, x21\n"
- "cmp x22, x21\n"
- "ld1b { z18.b }, p1/Z, [x25]\n"
- "addvl x26, x26, #1\n"
+ "ld1b { z17.b }, p1/Z, [x25]\n"
+ "sub x21, x21, x20\n"
+ "cmp x21, x20\n"
+ "ld1b { z18.b }, p1/Z, [x24]\n"
"addvl x25, x25, #1\n"
- "ld1b { z16.b }, p1/Z, [x24]\n"
+ "addvl x24, x24, #1\n"
+ "ld1b { z16.b }, p1/Z, [x23]\n"
"zip1 z20.b, z17.b, z16.b\n"
"zip2 z19.b, z17.b, z16.b\n"
- "addvl x24, x24, #1\n"
- "ld1b { z17.b }, p1/Z, [x23]\n"
+ "addvl x23, x23, #1\n"
+ "ld1b { z17.b }, p1/Z, [x22]\n"
"zip1 z16.b, z18.b, z17.b\n"
"zip2 z18.b, z18.b, z17.b\n"
- "addvl x23, x23, #1\n"
+ "addvl x22, x22, #1\n"
"zip1 z17.b, z20.b, z16.b\n"
"zip2 z16.b, z20.b, z16.b\n"
- "st1b { z17.b }, p1, [x20]\n"
- "st1b { z16.b }, p1, [x20, #1, MUL VL]\n"
- "add x20, x20, %x[out_stride]\n"
+ "st1b { z17.b }, p1, [x19]\n"
+ "st1b { z16.b }, p1, [x19, #1, MUL VL]\n"
+ "add x19, x19, %x[out_stride]\n"
"zip1 z17.b, z19.b, z18.b\n"
"zip2 z16.b, z19.b, z18.b\n"
- "st1b { z17.b }, p1, [x20]\n"
- "st1b { z16.b }, p1, [x20, #1, MUL VL]\n"
- "add x20, x20, %x[out_stride]\n"
+ "st1b { z17.b }, p1, [x19]\n"
+ "st1b { z16.b }, p1, [x19, #1, MUL VL]\n"
+ "add x19, x19, %x[out_stride]\n"
"bge 2b\n"
"3:" // Main row loop: Unroll column loop skip
- "cbz x22, 5f\n"
+ "cbz x21, 5f\n"
"4:" // Main row loop: Column loop
- "whilelt p0.b, XZR, x22\n"
- "ld1b { z18.b }, p0/Z, [x26]\n"
- "decw x22, ALL, MUL #2\n"
- "ld1b { z17.b }, p0/Z, [x25]\n"
- "cmp x22, #0x0\n"
- "incd x26, ALL, MUL #4\n"
- "ld1b { z16.b }, p0/Z, [x24]\n"
- "zip1 z18.b, z18.b, z16.b\n"
+ "whilelt p0.b, XZR, x21\n"
+ "ld1b { z18.b }, p0/Z, [x25]\n"
+ "decw x21, ALL, MUL #2\n"
+ "ld1b { z17.b }, p0/Z, [x24]\n"
+ "cmp x21, #0x0\n"
"incd x25, ALL, MUL #4\n"
- "incd x24, ALL, MUL #4\n"
"ld1b { z16.b }, p0/Z, [x23]\n"
- "zip1 z16.b, z17.b, z16.b\n"
+ "zip1 z18.b, z18.b, z16.b\n"
+ "incd x24, ALL, MUL #4\n"
"incd x23, ALL, MUL #4\n"
+ "ld1b { z16.b }, p0/Z, [x22]\n"
+ "zip1 z16.b, z17.b, z16.b\n"
+ "incd x22, ALL, MUL #4\n"
"zip1 z17.b, z18.b, z16.b\n"
"zip2 z16.b, z18.b, z16.b\n"
- "st1b { z17.b }, p1, [x20]\n"
- "st1b { z16.b }, p1, [x20, #1, MUL VL]\n"
- "add x20, x20, %x[out_stride]\n"
+ "st1b { z17.b }, p1, [x19]\n"
+ "st1b { z16.b }, p1, [x19, #1, MUL VL]\n"
+ "add x19, x19, %x[out_stride]\n"
"bgt 4b\n"
"5:" // Main row loop: Column loop skip
"cmp %x[height], #0x1\n"
@@ -114,7 +114,7 @@ void sme_transpose_interleave_2VL_1x4(uint8_t *out, const uint8_t *in, size_t wi
".inst 0xd503467f // SMSTOP\n"
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [pad_row] "r" (pad_row), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_2VL_2x2.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_2VL_2x2.hpp
index 13e0a38ebc..cfa868226a 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_2VL_2x2.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_2VL_2x2.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#pragma once
@@ -45,74 +45,74 @@ void sme_transpose_interleave_2VL_2x2(uint16_t *out, const uint16_t *in, size_t
"ptrue p1.b\n"
"blt 6f\n"
"1:" // Main row loop: Head
- "mov x26, %x[in]\n"
- "add x25, x26, %x[in_stride]\n"
+ "mov x25, %x[in]\n"
"add x24, x25, %x[in_stride]\n"
- "mov x23, %x[width]\n"
- "cnth x21, ALL, MUL #2\n"
- "add x20, x24, %x[in_stride]\n"
- "cmp x23, x21\n"
- "add %x[in], x20, %x[in_stride]\n"
- "mov x22, %x[out]\n"
+ "add x23, x24, %x[in_stride]\n"
+ "mov x22, %x[width]\n"
+ "cnth x20, ALL, MUL #2\n"
+ "add x19, x23, %x[in_stride]\n"
+ "cmp x22, x20\n"
+ "add %x[in], x19, %x[in_stride]\n"
+ "mov x21, %x[out]\n"
"sub %x[height], %x[height], #0x4\n"
"blt 3f\n"
"2:" // Main row loop: Unroll column loop
- "ld1h { z17.h }, p1/Z, [x26]\n"
- "sub x23, x23, x21\n"
- "cmp x23, x21\n"
- "ld1h { z16.h }, p1/Z, [x25]\n"
+ "ld1h { z17.h }, p1/Z, [x25]\n"
+ "sub x22, x22, x20\n"
+ "cmp x22, x20\n"
+ "ld1h { z16.h }, p1/Z, [x24]\n"
"zip1 z24.h, z17.h, z16.h\n"
"zip2 z23.h, z17.h, z16.h\n"
- "ld1h { z17.h }, p1/Z, [x24]\n"
- "ld1h { z16.h }, p1/Z, [x20]\n"
+ "ld1h { z17.h }, p1/Z, [x23]\n"
+ "ld1h { z16.h }, p1/Z, [x19]\n"
"zip1 z22.h, z17.h, z16.h\n"
"zip2 z21.h, z17.h, z16.h\n"
- "ld1h { z17.h }, p1/Z, [x26, #1, MUL VL]\n"
- "addvl x26, x26, #2\n"
- "ld1h { z16.h }, p1/Z, [x25, #1, MUL VL]\n"
+ "ld1h { z17.h }, p1/Z, [x25, #1, MUL VL]\n"
"addvl x25, x25, #2\n"
+ "ld1h { z16.h }, p1/Z, [x24, #1, MUL VL]\n"
+ "addvl x24, x24, #2\n"
"zip1 z20.h, z17.h, z16.h\n"
"zip2 z19.h, z17.h, z16.h\n"
- "ld1h { z18.h }, p1/Z, [x24, #1, MUL VL]\n"
- "addvl x24, x24, #2\n"
- "ld1h { z16.h }, p1/Z, [x20, #1, MUL VL]\n"
- "st1h { z24.h }, p1, [x22]\n"
- "addvl x20, x20, #2\n"
+ "ld1h { z18.h }, p1/Z, [x23, #1, MUL VL]\n"
+ "addvl x23, x23, #2\n"
+ "ld1h { z16.h }, p1/Z, [x19, #1, MUL VL]\n"
+ "st1h { z24.h }, p1, [x21]\n"
+ "addvl x19, x19, #2\n"
"zip1 z17.h, z18.h, z16.h\n"
- "st1h { z23.h }, p1, [x22, #1, MUL VL]\n"
+ "st1h { z23.h }, p1, [x21, #1, MUL VL]\n"
"zip2 z16.h, z18.h, z16.h\n"
- "st1h { z22.h }, p1, [x22, #2, MUL VL]\n"
- "st1h { z21.h }, p1, [x22, #3, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
- "st1h { z20.h }, p1, [x22]\n"
- "st1h { z19.h }, p1, [x22, #1, MUL VL]\n"
- "st1h { z17.h }, p1, [x22, #2, MUL VL]\n"
- "st1h { z16.h }, p1, [x22, #3, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
+ "st1h { z22.h }, p1, [x21, #2, MUL VL]\n"
+ "st1h { z21.h }, p1, [x21, #3, MUL VL]\n"
+ "add x21, x21, %x[out_stride]\n"
+ "st1h { z20.h }, p1, [x21]\n"
+ "st1h { z19.h }, p1, [x21, #1, MUL VL]\n"
+ "st1h { z17.h }, p1, [x21, #2, MUL VL]\n"
+ "st1h { z16.h }, p1, [x21, #3, MUL VL]\n"
+ "add x21, x21, %x[out_stride]\n"
"bge 2b\n"
"3:" // Main row loop: Unroll column loop skip
- "cbz x23, 5f\n"
+ "cbz x22, 5f\n"
"4:" // Main row loop: Column loop
- "whilelt p0.h, XZR, x23\n"
- "ld1h { z17.h }, p0/Z, [x26]\n"
- "decw x23, ALL, MUL #2\n"
- "ld1h { z16.h }, p0/Z, [x25]\n"
- "cmp x23, #0x0\n"
- "addvl x26, x26, #1\n"
- "zip1 z20.h, z17.h, z16.h\n"
- "ld1h { z19.h }, p0/Z, [x24]\n"
+ "whilelt p0.h, XZR, x22\n"
+ "ld1h { z17.h }, p0/Z, [x25]\n"
+ "decw x22, ALL, MUL #2\n"
+ "ld1h { z16.h }, p0/Z, [x24]\n"
+ "cmp x22, #0x0\n"
"addvl x25, x25, #1\n"
+ "zip1 z20.h, z17.h, z16.h\n"
+ "ld1h { z19.h }, p0/Z, [x23]\n"
"addvl x24, x24, #1\n"
+ "addvl x23, x23, #1\n"
"zip2 z18.h, z17.h, z16.h\n"
- "ld1h { z16.h }, p0/Z, [x20]\n"
- "addvl x20, x20, #1\n"
+ "ld1h { z16.h }, p0/Z, [x19]\n"
+ "addvl x19, x19, #1\n"
"zip1 z17.h, z19.h, z16.h\n"
"zip2 z16.h, z19.h, z16.h\n"
- "st1h { z20.h }, p1, [x22]\n"
- "st1h { z18.h }, p1, [x22, #1, MUL VL]\n"
- "st1h { z17.h }, p1, [x22, #2, MUL VL]\n"
- "st1h { z16.h }, p1, [x22, #3, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
+ "st1h { z20.h }, p1, [x21]\n"
+ "st1h { z18.h }, p1, [x21, #1, MUL VL]\n"
+ "st1h { z17.h }, p1, [x21, #2, MUL VL]\n"
+ "st1h { z16.h }, p1, [x21, #3, MUL VL]\n"
+ "add x21, x21, %x[out_stride]\n"
"bgt 4b\n"
"5:" // Main row loop: Column loop skip
"cmp %x[height], #0x4\n"
@@ -121,52 +121,52 @@ void sme_transpose_interleave_2VL_2x2(uint16_t *out, const uint16_t *in, size_t
"cbz %x[height], 12f\n"
"6:" // Main loop skip
"7:" // Tail row loop: Head
- "mov x26, %x[in]\n"
- "add x25, x26, %x[in_stride]\n"
+ "mov x25, %x[in]\n"
+ "add x24, x25, %x[in_stride]\n"
"cmp %x[height], #0x1\n"
- "mov x21, %x[width]\n"
- "cnth x20, ALL, MUL #2\n"
- "add %x[in], x25, %x[in_stride]\n"
- "csel x25, x25, %x[pad_row], GT\n"
- "cmp x21, x20\n"
- "mov x22, %x[out]\n"
+ "mov x20, %x[width]\n"
+ "cnth x19, ALL, MUL #2\n"
+ "add %x[in], x24, %x[in_stride]\n"
+ "csel x24, x24, %x[pad_row], GT\n"
+ "cmp x20, x19\n"
+ "mov x21, %x[out]\n"
"sub %x[height], %x[height], #0x2\n"
"blt 9f\n"
"8:" // Tail row loop: Unroll column loop
- "ld1h { z18.h }, p1/Z, [x26]\n"
- "sub x21, x21, x20\n"
- "cmp x21, x20\n"
- "ld1h { z16.h }, p1/Z, [x25]\n"
+ "ld1h { z18.h }, p1/Z, [x25]\n"
+ "sub x20, x20, x19\n"
+ "cmp x20, x19\n"
+ "ld1h { z16.h }, p1/Z, [x24]\n"
"zip1 z17.h, z18.h, z16.h\n"
"zip2 z19.h, z18.h, z16.h\n"
- "ld1h { z18.h }, p1/Z, [x26, #1, MUL VL]\n"
- "addvl x26, x26, #2\n"
- "ld1h { z16.h }, p1/Z, [x25, #1, MUL VL]\n"
- "st1h { z17.h }, p1, [x22]\n"
+ "ld1h { z18.h }, p1/Z, [x25, #1, MUL VL]\n"
"addvl x25, x25, #2\n"
+ "ld1h { z16.h }, p1/Z, [x24, #1, MUL VL]\n"
+ "st1h { z17.h }, p1, [x21]\n"
+ "addvl x24, x24, #2\n"
"zip1 z17.h, z18.h, z16.h\n"
- "st1h { z19.h }, p1, [x22, #1, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
+ "st1h { z19.h }, p1, [x21, #1, MUL VL]\n"
+ "add x21, x21, %x[out_stride]\n"
"zip2 z16.h, z18.h, z16.h\n"
- "st1h { z17.h }, p1, [x22]\n"
- "st1h { z16.h }, p1, [x22, #1, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
+ "st1h { z17.h }, p1, [x21]\n"
+ "st1h { z16.h }, p1, [x21, #1, MUL VL]\n"
+ "add x21, x21, %x[out_stride]\n"
"bge 8b\n"
"9:" // Tail row loop: Unroll column loop skip
- "cbz x21, 11f\n"
+ "cbz x20, 11f\n"
"10:" // Tail row loop: Column loop
- "whilelt p0.h, XZR, x21\n"
- "ld1h { z18.h }, p0/Z, [x26]\n"
- "decw x21, ALL, MUL #2\n"
- "ld1h { z16.h }, p0/Z, [x25]\n"
- "cmp x21, #0x0\n"
- "addvl x26, x26, #1\n"
- "zip1 z17.h, z18.h, z16.h\n"
+ "whilelt p0.h, XZR, x20\n"
+ "ld1h { z18.h }, p0/Z, [x25]\n"
+ "decw x20, ALL, MUL #2\n"
+ "ld1h { z16.h }, p0/Z, [x24]\n"
+ "cmp x20, #0x0\n"
"addvl x25, x25, #1\n"
+ "zip1 z17.h, z18.h, z16.h\n"
+ "addvl x24, x24, #1\n"
"zip2 z16.h, z18.h, z16.h\n"
- "st1h { z17.h }, p1, [x22]\n"
- "st1h { z16.h }, p1, [x22, #1, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
+ "st1h { z17.h }, p1, [x21]\n"
+ "st1h { z16.h }, p1, [x21, #1, MUL VL]\n"
+ "add x21, x21, %x[out_stride]\n"
"bgt 10b\n"
"11:" // Tail row loop: Column loop skip
"cmp %x[height], #0x1\n"
@@ -176,7 +176,7 @@ void sme_transpose_interleave_2VL_2x2(uint16_t *out, const uint16_t *in, size_t
".inst 0xd503467f // SMSTOP\n"
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [pad_row] "r" (pad_row), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_2VL_2x2_fp32bf16.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_2VL_2x2_fp32bf16.hpp
index 9d402a2d58..65936d9464 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_2VL_2x2_fp32bf16.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_2VL_2x2_fp32bf16.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#pragma once
@@ -45,101 +45,101 @@ void sme_transpose_interleave_2VL_2x2_fp32bf16(bfloat16 *out, const float *in, s
"ptrue p2.b\n"
"blt 6f\n"
"1:" // Main row loop: Head
- "mov x26, %x[in]\n"
- "add x25, x26, %x[in_stride]\n"
+ "mov x25, %x[in]\n"
"add x24, x25, %x[in_stride]\n"
- "mov x23, %x[width]\n"
- "cnth x20, ALL, MUL #2\n"
- "add x21, x24, %x[in_stride]\n"
- "cmp x23, x20\n"
- "add %x[in], x21, %x[in_stride]\n"
- "mov x22, %x[out]\n"
+ "add x23, x24, %x[in_stride]\n"
+ "mov x22, %x[width]\n"
+ "cnth x19, ALL, MUL #2\n"
+ "add x20, x23, %x[in_stride]\n"
+ "cmp x22, x19\n"
+ "add %x[in], x20, %x[in_stride]\n"
+ "mov x21, %x[out]\n"
"sub %x[height], %x[height], #0x4\n"
"blt 3f\n"
"2:" // Main row loop: Unroll column loop
- "ld1w { z16.s }, p2/Z, [x26]\n"
+ "ld1w { z16.s }, p2/Z, [x25]\n"
".inst 0x658aaa18 // bfcvt z24.h, p2/M, z16.s\n"
- "sub x23, x23, x20\n"
- "cmp x23, x20\n"
- "ld1w { z16.s }, p2/Z, [x26, #1, MUL VL]\n"
+ "sub x22, x22, x19\n"
+ "cmp x22, x19\n"
+ "ld1w { z16.s }, p2/Z, [x25, #1, MUL VL]\n"
".inst 0x658aaa17 // bfcvt z23.h, p2/M, z16.s\n"
- "ld1w { z16.s }, p2/Z, [x24]\n"
+ "ld1w { z16.s }, p2/Z, [x23]\n"
".inst 0x658aaa16 // bfcvt z22.h, p2/M, z16.s\n"
- "ld1w { z16.s }, p2/Z, [x24, #1, MUL VL]\n"
+ "ld1w { z16.s }, p2/Z, [x23, #1, MUL VL]\n"
".inst 0x658aaa15 // bfcvt z21.h, p2/M, z16.s\n"
- "ld1w { z16.s }, p2/Z, [x26, #2, MUL VL]\n"
+ "ld1w { z16.s }, p2/Z, [x25, #2, MUL VL]\n"
".inst 0x658aaa14 // bfcvt z20.h, p2/M, z16.s\n"
- "ld1w { z16.s }, p2/Z, [x26, #3, MUL VL]\n"
+ "ld1w { z16.s }, p2/Z, [x25, #3, MUL VL]\n"
".inst 0x658aaa13 // bfcvt z19.h, p2/M, z16.s\n"
- "addvl x26, x26, #4\n"
- "ld1w { z16.s }, p2/Z, [x24, #2, MUL VL]\n"
+ "addvl x25, x25, #4\n"
+ "ld1w { z16.s }, p2/Z, [x23, #2, MUL VL]\n"
".inst 0x658aaa12 // bfcvt z18.h, p2/M, z16.s\n"
- "ld1w { z16.s }, p2/Z, [x24, #3, MUL VL]\n"
+ "ld1w { z16.s }, p2/Z, [x23, #3, MUL VL]\n"
".inst 0x658aaa11 // bfcvt z17.h, p2/M, z16.s\n"
- "addvl x24, x24, #4\n"
- "ld1w { z16.s }, p2/Z, [x25]\n"
+ "addvl x23, x23, #4\n"
+ "ld1w { z16.s }, p2/Z, [x24]\n"
".inst 0x648aaa18 // bfcvtnt z24.h, p2/M, z16.s\n"
- "ld1w { z16.s }, p2/Z, [x25, #1, MUL VL]\n"
+ "ld1w { z16.s }, p2/Z, [x24, #1, MUL VL]\n"
".inst 0x648aaa17 // bfcvtnt z23.h, p2/M, z16.s\n"
- "ld1w { z16.s }, p2/Z, [x21]\n"
+ "ld1w { z16.s }, p2/Z, [x20]\n"
".inst 0x648aaa16 // bfcvtnt z22.h, p2/M, z16.s\n"
- "ld1w { z16.s }, p2/Z, [x21, #1, MUL VL]\n"
+ "ld1w { z16.s }, p2/Z, [x20, #1, MUL VL]\n"
".inst 0x648aaa15 // bfcvtnt z21.h, p2/M, z16.s\n"
- "ld1w { z16.s }, p2/Z, [x25, #2, MUL VL]\n"
+ "ld1w { z16.s }, p2/Z, [x24, #2, MUL VL]\n"
".inst 0x648aaa14 // bfcvtnt z20.h, p2/M, z16.s\n"
- "ld1w { z16.s }, p2/Z, [x25, #3, MUL VL]\n"
- "addvl x25, x25, #4\n"
+ "ld1w { z16.s }, p2/Z, [x24, #3, MUL VL]\n"
+ "addvl x24, x24, #4\n"
".inst 0x648aaa13 // bfcvtnt z19.h, p2/M, z16.s\n"
- "ld1w { z16.s }, p2/Z, [x21, #2, MUL VL]\n"
+ "ld1w { z16.s }, p2/Z, [x20, #2, MUL VL]\n"
".inst 0x648aaa12 // bfcvtnt z18.h, p2/M, z16.s\n"
- "ld1w { z16.s }, p2/Z, [x21, #3, MUL VL]\n"
- "st1h { z24.h }, p2, [x22]\n"
- "addvl x21, x21, #4\n"
+ "ld1w { z16.s }, p2/Z, [x20, #3, MUL VL]\n"
+ "st1h { z24.h }, p2, [x21]\n"
+ "addvl x20, x20, #4\n"
".inst 0x648aaa11 // bfcvtnt z17.h, p2/M, z16.s\n"
- "st1h { z23.h }, p2, [x22, #1, MUL VL]\n"
- "st1h { z22.h }, p2, [x22, #2, MUL VL]\n"
- "st1h { z21.h }, p2, [x22, #3, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
- "st1h { z20.h }, p2, [x22]\n"
- "st1h { z19.h }, p2, [x22, #1, MUL VL]\n"
- "st1h { z18.h }, p2, [x22, #2, MUL VL]\n"
- "st1h { z17.h }, p2, [x22, #3, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
+ "st1h { z23.h }, p2, [x21, #1, MUL VL]\n"
+ "st1h { z22.h }, p2, [x21, #2, MUL VL]\n"
+ "st1h { z21.h }, p2, [x21, #3, MUL VL]\n"
+ "add x21, x21, %x[out_stride]\n"
+ "st1h { z20.h }, p2, [x21]\n"
+ "st1h { z19.h }, p2, [x21, #1, MUL VL]\n"
+ "st1h { z18.h }, p2, [x21, #2, MUL VL]\n"
+ "st1h { z17.h }, p2, [x21, #3, MUL VL]\n"
+ "add x21, x21, %x[out_stride]\n"
"bge 2b\n"
"3:" // Main row loop: Unroll column loop skip
- "cbz x23, 5f\n"
+ "cbz x22, 5f\n"
"4:" // Main row loop: Column loop
- "mov x20, x23\n"
- "whilelt p1.s, XZR, x20\n"
- "ld1w { z16.s }, p1/Z, [x26]\n"
+ "mov x19, x22\n"
+ "whilelt p1.s, XZR, x19\n"
+ "ld1w { z16.s }, p1/Z, [x25]\n"
".inst 0x658aaa14 // bfcvt z20.h, p2/M, z16.s\n"
- "decw x20\n"
- "whilelt p0.s, XZR, x20\n"
- "ld1w { z16.s }, p0/Z, [x26, #1, MUL VL]\n"
+ "decw x19\n"
+ "whilelt p0.s, XZR, x19\n"
+ "ld1w { z16.s }, p0/Z, [x25, #1, MUL VL]\n"
".inst 0x658aaa13 // bfcvt z19.h, p2/M, z16.s\n"
- "ld1w { z16.s }, p1/Z, [x24]\n"
+ "ld1w { z16.s }, p1/Z, [x23]\n"
".inst 0x658aaa12 // bfcvt z18.h, p2/M, z16.s\n"
- "decw x23, ALL, MUL #2\n"
- "cmp x23, #0x0\n"
- "ld1w { z16.s }, p0/Z, [x24, #1, MUL VL]\n"
+ "decw x22, ALL, MUL #2\n"
+ "cmp x22, #0x0\n"
+ "ld1w { z16.s }, p0/Z, [x23, #1, MUL VL]\n"
".inst 0x658aaa11 // bfcvt z17.h, p2/M, z16.s\n"
- "addvl x26, x26, #2\n"
- "addvl x24, x24, #2\n"
- "ld1w { z16.s }, p1/Z, [x25]\n"
- ".inst 0x648aaa14 // bfcvtnt z20.h, p2/M, z16.s\n"
- "ld1w { z16.s }, p0/Z, [x25, #1, MUL VL]\n"
"addvl x25, x25, #2\n"
+ "addvl x23, x23, #2\n"
+ "ld1w { z16.s }, p1/Z, [x24]\n"
+ ".inst 0x648aaa14 // bfcvtnt z20.h, p2/M, z16.s\n"
+ "ld1w { z16.s }, p0/Z, [x24, #1, MUL VL]\n"
+ "addvl x24, x24, #2\n"
".inst 0x648aaa13 // bfcvtnt z19.h, p2/M, z16.s\n"
- "ld1w { z16.s }, p1/Z, [x21]\n"
+ "ld1w { z16.s }, p1/Z, [x20]\n"
".inst 0x648aaa12 // bfcvtnt z18.h, p2/M, z16.s\n"
- "ld1w { z16.s }, p0/Z, [x21, #1, MUL VL]\n"
- "addvl x21, x21, #2\n"
+ "ld1w { z16.s }, p0/Z, [x20, #1, MUL VL]\n"
+ "addvl x20, x20, #2\n"
".inst 0x648aaa11 // bfcvtnt z17.h, p2/M, z16.s\n"
- "st1h { z20.h }, p2, [x22]\n"
- "st1h { z19.h }, p2, [x22, #1, MUL VL]\n"
- "st1h { z18.h }, p2, [x22, #2, MUL VL]\n"
- "st1h { z17.h }, p2, [x22, #3, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
+ "st1h { z20.h }, p2, [x21]\n"
+ "st1h { z19.h }, p2, [x21, #1, MUL VL]\n"
+ "st1h { z18.h }, p2, [x21, #2, MUL VL]\n"
+ "st1h { z17.h }, p2, [x21, #3, MUL VL]\n"
+ "add x21, x21, %x[out_stride]\n"
"bgt 4b\n"
"5:" // Main row loop: Column loop skip
"cmp %x[height], #0x4\n"
@@ -148,67 +148,67 @@ void sme_transpose_interleave_2VL_2x2_fp32bf16(bfloat16 *out, const float *in, s
"cbz %x[height], 12f\n"
"6:" // Main loop skip
"7:" // Tail row loop: Head
- "mov x26, %x[in]\n"
- "add x25, x26, %x[in_stride]\n"
+ "mov x25, %x[in]\n"
+ "add x24, x25, %x[in_stride]\n"
"cmp %x[height], #0x1\n"
- "mov x21, %x[width]\n"
- "cnth x20, ALL, MUL #2\n"
- "add %x[in], x25, %x[in_stride]\n"
- "csel x25, x25, %x[pad_row], GT\n"
- "cmp x21, x20\n"
- "mov x22, %x[out]\n"
+ "mov x20, %x[width]\n"
+ "cnth x19, ALL, MUL #2\n"
+ "add %x[in], x24, %x[in_stride]\n"
+ "csel x24, x24, %x[pad_row], GT\n"
+ "cmp x20, x19\n"
+ "mov x21, %x[out]\n"
"sub %x[height], %x[height], #0x2\n"
"blt 9f\n"
"8:" // Tail row loop: Unroll column loop
- "ld1w { z16.s }, p2/Z, [x26]\n"
+ "ld1w { z16.s }, p2/Z, [x25]\n"
".inst 0x658aaa14 // bfcvt z20.h, p2/M, z16.s\n"
- "sub x21, x21, x20\n"
- "cmp x21, x20\n"
- "ld1w { z16.s }, p2/Z, [x26, #1, MUL VL]\n"
+ "sub x20, x20, x19\n"
+ "cmp x20, x19\n"
+ "ld1w { z16.s }, p2/Z, [x25, #1, MUL VL]\n"
".inst 0x658aaa13 // bfcvt z19.h, p2/M, z16.s\n"
- "ld1w { z16.s }, p2/Z, [x26, #2, MUL VL]\n"
+ "ld1w { z16.s }, p2/Z, [x25, #2, MUL VL]\n"
".inst 0x658aaa12 // bfcvt z18.h, p2/M, z16.s\n"
- "ld1w { z16.s }, p2/Z, [x26, #3, MUL VL]\n"
+ "ld1w { z16.s }, p2/Z, [x25, #3, MUL VL]\n"
".inst 0x658aaa11 // bfcvt z17.h, p2/M, z16.s\n"
- "addvl x26, x26, #4\n"
- "ld1w { z16.s }, p2/Z, [x25]\n"
+ "addvl x25, x25, #4\n"
+ "ld1w { z16.s }, p2/Z, [x24]\n"
".inst 0x648aaa14 // bfcvtnt z20.h, p2/M, z16.s\n"
- "ld1w { z16.s }, p2/Z, [x25, #1, MUL VL]\n"
+ "ld1w { z16.s }, p2/Z, [x24, #1, MUL VL]\n"
".inst 0x648aaa13 // bfcvtnt z19.h, p2/M, z16.s\n"
- "ld1w { z16.s }, p2/Z, [x25, #2, MUL VL]\n"
+ "ld1w { z16.s }, p2/Z, [x24, #2, MUL VL]\n"
".inst 0x648aaa12 // bfcvtnt z18.h, p2/M, z16.s\n"
- "ld1w { z16.s }, p2/Z, [x25, #3, MUL VL]\n"
- "st1h { z20.h }, p2, [x22]\n"
- "addvl x25, x25, #4\n"
+ "ld1w { z16.s }, p2/Z, [x24, #3, MUL VL]\n"
+ "st1h { z20.h }, p2, [x21]\n"
+ "addvl x24, x24, #4\n"
".inst 0x648aaa11 // bfcvtnt z17.h, p2/M, z16.s\n"
- "st1h { z19.h }, p2, [x22, #1, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
- "st1h { z18.h }, p2, [x22]\n"
- "st1h { z17.h }, p2, [x22, #1, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
+ "st1h { z19.h }, p2, [x21, #1, MUL VL]\n"
+ "add x21, x21, %x[out_stride]\n"
+ "st1h { z18.h }, p2, [x21]\n"
+ "st1h { z17.h }, p2, [x21, #1, MUL VL]\n"
+ "add x21, x21, %x[out_stride]\n"
"bge 8b\n"
"9:" // Tail row loop: Unroll column loop skip
- "cbz x21, 11f\n"
+ "cbz x20, 11f\n"
"10:" // Tail row loop: Column loop
- "mov x20, x21\n"
- "whilelt p1.s, XZR, x20\n"
- "ld1w { z16.s }, p1/Z, [x26]\n"
+ "mov x19, x20\n"
+ "whilelt p1.s, XZR, x19\n"
+ "ld1w { z16.s }, p1/Z, [x25]\n"
".inst 0x658aaa12 // bfcvt z18.h, p2/M, z16.s\n"
- "decw x20\n"
- "whilelt p0.s, XZR, x20\n"
- "ld1w { z16.s }, p0/Z, [x26, #1, MUL VL]\n"
+ "decw x19\n"
+ "whilelt p0.s, XZR, x19\n"
+ "ld1w { z16.s }, p0/Z, [x25, #1, MUL VL]\n"
".inst 0x658aaa11 // bfcvt z17.h, p2/M, z16.s\n"
- "ld1w { z16.s }, p1/Z, [x25]\n"
- "decw x21, ALL, MUL #2\n"
- "cmp x21, #0x0\n"
+ "ld1w { z16.s }, p1/Z, [x24]\n"
+ "decw x20, ALL, MUL #2\n"
+ "cmp x20, #0x0\n"
".inst 0x648aaa12 // bfcvtnt z18.h, p2/M, z16.s\n"
- "ld1w { z16.s }, p0/Z, [x25, #1, MUL VL]\n"
- "addvl x26, x26, #2\n"
+ "ld1w { z16.s }, p0/Z, [x24, #1, MUL VL]\n"
"addvl x25, x25, #2\n"
+ "addvl x24, x24, #2\n"
".inst 0x648aaa11 // bfcvtnt z17.h, p2/M, z16.s\n"
- "st1h { z18.h }, p2, [x22]\n"
- "st1h { z17.h }, p2, [x22, #1, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
+ "st1h { z18.h }, p2, [x21]\n"
+ "st1h { z17.h }, p2, [x21, #1, MUL VL]\n"
+ "add x21, x21, %x[out_stride]\n"
"bgt 10b\n"
"11:" // Tail row loop: Column loop skip
"cmp %x[height], #0x1\n"
@@ -218,7 +218,7 @@ void sme_transpose_interleave_2VL_2x2_fp32bf16(bfloat16 *out, const float *in, s
".inst 0xd503467f // SMSTOP\n"
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [pad_row] "r" (pad_row), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_4VL.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_4VL.hpp
index 362bebbea0..7b783c3e5d 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_4VL.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_4VL.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#pragma once
@@ -39,64 +39,64 @@ void sme_transpose_interleave_4VL(uint16_t *out, const uint16_t *in, size_t widt
"ptrue p4.b\n"
"blt 4f\n"
"1:" // Main row loop: Head
- "mov x26, %x[in]\n"
- "add x25, x26, %x[in_stride]\n"
+ "mov x25, %x[in]\n"
"add x24, x25, %x[in_stride]\n"
"add x23, x24, %x[in_stride]\n"
- "add %x[in], x23, %x[in_stride]\n"
- "mov x22, %x[out]\n"
+ "add x22, x23, %x[in_stride]\n"
+ "add %x[in], x22, %x[in_stride]\n"
+ "mov x21, %x[out]\n"
"sub %x[height], %x[height], #0x4\n"
- "mov x21, %x[width]\n"
+ "mov x20, %x[width]\n"
"2:" // Main row loop: Column loop
- "mov x20, x21\n"
- "whilelt p3.h, XZR, x20\n"
- "ld1h { z31.h }, p3/Z, [x26]\n"
- "dech x20\n"
- "whilelt p2.h, XZR, x20\n"
- "ld1h { z30.h }, p2/Z, [x26, #1, MUL VL]\n"
- "dech x20\n"
- "whilelt p1.h, XZR, x20\n"
- "ld1h { z29.h }, p1/Z, [x26, #2, MUL VL]\n"
- "dech x20\n"
- "whilelt p0.h, XZR, x20\n"
- "ld1h { z28.h }, p0/Z, [x26, #3, MUL VL]\n"
- "mov x20, x22\n"
- "dech x21, ALL, MUL #4\n"
- "ld1h { z27.h }, p3/Z, [x25]\n"
- "ld1h { z26.h }, p2/Z, [x25, #1, MUL VL]\n"
- "cmp x21, #0x0\n"
- "addvl x26, x26, #4\n"
- "ld1h { z25.h }, p1/Z, [x25, #2, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
- "ld1h { z24.h }, p0/Z, [x25, #3, MUL VL]\n"
+ "mov x19, x20\n"
+ "whilelt p3.h, XZR, x19\n"
+ "ld1h { z31.h }, p3/Z, [x25]\n"
+ "dech x19\n"
+ "whilelt p2.h, XZR, x19\n"
+ "ld1h { z30.h }, p2/Z, [x25, #1, MUL VL]\n"
+ "dech x19\n"
+ "whilelt p1.h, XZR, x19\n"
+ "ld1h { z29.h }, p1/Z, [x25, #2, MUL VL]\n"
+ "dech x19\n"
+ "whilelt p0.h, XZR, x19\n"
+ "ld1h { z28.h }, p0/Z, [x25, #3, MUL VL]\n"
+ "mov x19, x21\n"
+ "dech x20, ALL, MUL #4\n"
+ "ld1h { z27.h }, p3/Z, [x24]\n"
+ "ld1h { z26.h }, p2/Z, [x24, #1, MUL VL]\n"
+ "cmp x20, #0x0\n"
"addvl x25, x25, #4\n"
- "ld1h { z23.h }, p3/Z, [x24]\n"
- "ld1h { z22.h }, p2/Z, [x24, #1, MUL VL]\n"
- "ld1h { z21.h }, p1/Z, [x24, #2, MUL VL]\n"
- "ld1h { z20.h }, p0/Z, [x24, #3, MUL VL]\n"
+ "ld1h { z25.h }, p1/Z, [x24, #2, MUL VL]\n"
+ "add x21, x21, %x[out_stride]\n"
+ "ld1h { z24.h }, p0/Z, [x24, #3, MUL VL]\n"
"addvl x24, x24, #4\n"
- "ld1h { z19.h }, p3/Z, [x23]\n"
- "ld1h { z18.h }, p2/Z, [x23, #1, MUL VL]\n"
- "ld1h { z17.h }, p1/Z, [x23, #2, MUL VL]\n"
- "ld1h { z16.h }, p0/Z, [x23, #3, MUL VL]\n"
- "st1h { z31.h }, p4, [x20]\n"
+ "ld1h { z23.h }, p3/Z, [x23]\n"
+ "ld1h { z22.h }, p2/Z, [x23, #1, MUL VL]\n"
+ "ld1h { z21.h }, p1/Z, [x23, #2, MUL VL]\n"
+ "ld1h { z20.h }, p0/Z, [x23, #3, MUL VL]\n"
"addvl x23, x23, #4\n"
- "st1h { z30.h }, p4, [x20, #1, MUL VL]\n"
- "st1h { z29.h }, p4, [x20, #2, MUL VL]\n"
- "st1h { z28.h }, p4, [x20, #3, MUL VL]\n"
- "st1h { z27.h }, p4, [x20, #4, MUL VL]\n"
- "st1h { z26.h }, p4, [x20, #5, MUL VL]\n"
- "st1h { z25.h }, p4, [x20, #6, MUL VL]\n"
- "st1h { z24.h }, p4, [x20, #7, MUL VL]\n"
- "addvl x20, x20, #16\n"
- "st1h { z23.h }, p4, [x20, #-8, MUL VL]\n"
- "st1h { z22.h }, p4, [x20, #-7, MUL VL]\n"
- "st1h { z21.h }, p4, [x20, #-6, MUL VL]\n"
- "st1h { z20.h }, p4, [x20, #-5, MUL VL]\n"
- "st1h { z19.h }, p4, [x20, #-4, MUL VL]\n"
- "st1h { z18.h }, p4, [x20, #-3, MUL VL]\n"
- "st1h { z17.h }, p4, [x20, #-2, MUL VL]\n"
- "st1h { z16.h }, p4, [x20, #-1, MUL VL]\n"
+ "ld1h { z19.h }, p3/Z, [x22]\n"
+ "ld1h { z18.h }, p2/Z, [x22, #1, MUL VL]\n"
+ "ld1h { z17.h }, p1/Z, [x22, #2, MUL VL]\n"
+ "ld1h { z16.h }, p0/Z, [x22, #3, MUL VL]\n"
+ "st1h { z31.h }, p4, [x19]\n"
+ "addvl x22, x22, #4\n"
+ "st1h { z30.h }, p4, [x19, #1, MUL VL]\n"
+ "st1h { z29.h }, p4, [x19, #2, MUL VL]\n"
+ "st1h { z28.h }, p4, [x19, #3, MUL VL]\n"
+ "st1h { z27.h }, p4, [x19, #4, MUL VL]\n"
+ "st1h { z26.h }, p4, [x19, #5, MUL VL]\n"
+ "st1h { z25.h }, p4, [x19, #6, MUL VL]\n"
+ "st1h { z24.h }, p4, [x19, #7, MUL VL]\n"
+ "addvl x19, x19, #16\n"
+ "st1h { z23.h }, p4, [x19, #-8, MUL VL]\n"
+ "st1h { z22.h }, p4, [x19, #-7, MUL VL]\n"
+ "st1h { z21.h }, p4, [x19, #-6, MUL VL]\n"
+ "st1h { z20.h }, p4, [x19, #-5, MUL VL]\n"
+ "st1h { z19.h }, p4, [x19, #-4, MUL VL]\n"
+ "st1h { z18.h }, p4, [x19, #-3, MUL VL]\n"
+ "st1h { z17.h }, p4, [x19, #-2, MUL VL]\n"
+ "st1h { z16.h }, p4, [x19, #-1, MUL VL]\n"
"bgt 2b\n"
"3:" // Main row loop: Column loop skip
"cmp %x[height], #0x4\n"
@@ -105,32 +105,32 @@ void sme_transpose_interleave_4VL(uint16_t *out, const uint16_t *in, size_t widt
"cbz %x[height], 8f\n"
"4:" // Main loop skip
"5:" // Tail row loop: Head
- "mov x26, %x[in]\n"
- "add %x[in], x26, %x[in_stride]\n"
- "mov x22, %x[out]\n"
+ "mov x25, %x[in]\n"
+ "add %x[in], x25, %x[in_stride]\n"
+ "mov x21, %x[out]\n"
"sub %x[height], %x[height], #0x1\n"
- "mov x21, %x[width]\n"
+ "mov x20, %x[width]\n"
"6:" // Tail row loop: Column loop
- "mov x20, x21\n"
- "whilelt p0.h, XZR, x20\n"
- "ld1h { z19.h }, p0/Z, [x26]\n"
- "dech x20\n"
- "whilelt p0.h, XZR, x20\n"
- "ld1h { z18.h }, p0/Z, [x26, #1, MUL VL]\n"
- "dech x20\n"
- "whilelt p0.h, XZR, x20\n"
- "ld1h { z17.h }, p0/Z, [x26, #2, MUL VL]\n"
- "dech x20\n"
- "dech x21, ALL, MUL #4\n"
- "whilelt p0.h, XZR, x20\n"
- "cmp x21, #0x0\n"
- "ld1h { z16.h }, p0/Z, [x26, #3, MUL VL]\n"
- "st1h { z19.h }, p4, [x22]\n"
- "addvl x26, x26, #4\n"
- "st1h { z18.h }, p4, [x22, #1, MUL VL]\n"
- "st1h { z17.h }, p4, [x22, #2, MUL VL]\n"
- "st1h { z16.h }, p4, [x22, #3, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
+ "mov x19, x20\n"
+ "whilelt p0.h, XZR, x19\n"
+ "ld1h { z19.h }, p0/Z, [x25]\n"
+ "dech x19\n"
+ "whilelt p0.h, XZR, x19\n"
+ "ld1h { z18.h }, p0/Z, [x25, #1, MUL VL]\n"
+ "dech x19\n"
+ "whilelt p0.h, XZR, x19\n"
+ "ld1h { z17.h }, p0/Z, [x25, #2, MUL VL]\n"
+ "dech x19\n"
+ "dech x20, ALL, MUL #4\n"
+ "whilelt p0.h, XZR, x19\n"
+ "cmp x20, #0x0\n"
+ "ld1h { z16.h }, p0/Z, [x25, #3, MUL VL]\n"
+ "st1h { z19.h }, p4, [x21]\n"
+ "addvl x25, x25, #4\n"
+ "st1h { z18.h }, p4, [x21, #1, MUL VL]\n"
+ "st1h { z17.h }, p4, [x21, #2, MUL VL]\n"
+ "st1h { z16.h }, p4, [x21, #3, MUL VL]\n"
+ "add x21, x21, %x[out_stride]\n"
"bgt 6b\n"
"7:" // Tail row loop: Column loop skip
"cmp %x[height], #0x1\n"
@@ -140,7 +140,7 @@ void sme_transpose_interleave_4VL(uint16_t *out, const uint16_t *in, size_t widt
".inst 0xd503467f // SMSTOP\n"
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_4VL_1x4.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_4VL_1x4.hpp
index cbcc0b4c8b..0429bb07fe 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_4VL_1x4.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_4VL_1x4.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#pragma once
@@ -43,44 +43,44 @@ void sme_transpose_interleave_4VL_1x4(uint8_t *out, const uint8_t *in, size_t wi
".inst 0xd503477f // SMSTART ZA\n"
"ptrue p1.b\n"
"1:" // Main row loop: Head
- "mov x25, %x[in]\n"
- "add x24, x25, %x[in_stride]\n"
+ "mov x24, %x[in]\n"
"add x23, x24, %x[in_stride]\n"
"add x22, x23, %x[in_stride]\n"
+ "add x21, x22, %x[in_stride]\n"
"cmp %x[height], #0x3\n"
- "add %x[in], x22, %x[in_stride]\n"
- "csel x22, x22, %x[pad_row], GT\n"
- "csel x23, x23, %x[pad_row], GE\n"
+ "add %x[in], x21, %x[in_stride]\n"
+ "csel x21, x21, %x[pad_row], GT\n"
+ "csel x22, x22, %x[pad_row], GE\n"
"cmp %x[height], #0x1\n"
- "mov x21, %x[out]\n"
- "csel x24, x24, %x[pad_row], GT\n"
+ "mov x20, %x[out]\n"
+ "csel x23, x23, %x[pad_row], GT\n"
"sub %x[height], %x[height], #0x4\n"
- "mov x20, %x[width]\n"
+ "mov x19, %x[width]\n"
"2:" // Main row loop: Column loop
- "whilelt p0.b, XZR, x20\n"
- "ld1b { z17.b }, p0/Z, [x25]\n"
- "decw x20, ALL, MUL #4\n"
- "ld1b { z19.b }, p0/Z, [x24]\n"
- "cmp x20, #0x0\n"
- "addvl x25, x25, #1\n"
- "ld1b { z16.b }, p0/Z, [x23]\n"
- "zip1 z18.b, z17.b, z16.b\n"
- "zip2 z20.b, z17.b, z16.b\n"
+ "whilelt p0.b, XZR, x19\n"
+ "ld1b { z17.b }, p0/Z, [x24]\n"
+ "decw x19, ALL, MUL #4\n"
+ "ld1b { z19.b }, p0/Z, [x23]\n"
+ "cmp x19, #0x0\n"
"addvl x24, x24, #1\n"
"ld1b { z16.b }, p0/Z, [x22]\n"
+ "zip1 z18.b, z17.b, z16.b\n"
+ "zip2 z20.b, z17.b, z16.b\n"
+ "addvl x23, x23, #1\n"
+ "ld1b { z16.b }, p0/Z, [x21]\n"
"zip1 z17.b, z19.b, z16.b\n"
"zip2 z19.b, z19.b, z16.b\n"
- "addvl x23, x23, #1\n"
"addvl x22, x22, #1\n"
+ "addvl x21, x21, #1\n"
"zip1 z16.b, z18.b, z17.b\n"
"zip2 z18.b, z18.b, z17.b\n"
- "st1b { z16.b }, p1, [x21]\n"
+ "st1b { z16.b }, p1, [x20]\n"
"zip1 z17.b, z20.b, z19.b\n"
"zip2 z16.b, z20.b, z19.b\n"
- "st1b { z18.b }, p1, [x21, #1, MUL VL]\n"
- "st1b { z17.b }, p1, [x21, #2, MUL VL]\n"
- "st1b { z16.b }, p1, [x21, #3, MUL VL]\n"
- "add x21, x21, %x[out_stride]\n"
+ "st1b { z18.b }, p1, [x20, #1, MUL VL]\n"
+ "st1b { z17.b }, p1, [x20, #2, MUL VL]\n"
+ "st1b { z16.b }, p1, [x20, #3, MUL VL]\n"
+ "add x20, x20, %x[out_stride]\n"
"bgt 2b\n"
"3:" // Main row loop: Column loop skip
"cmp %x[height], #0x1\n"
@@ -89,7 +89,7 @@ void sme_transpose_interleave_4VL_1x4(uint8_t *out, const uint8_t *in, size_t wi
".inst 0xd503467f // SMSTOP\n"
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [pad_row] "r" (pad_row), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x20", "x21", "x22", "x23", "x24", "x25", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x19", "x20", "x21", "x22", "x23", "x24", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_4VL_2x2.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_4VL_2x2.hpp
index 8badde53a9..d1534db6c6 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_4VL_2x2.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_4VL_2x2.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#pragma once
@@ -45,50 +45,50 @@ void sme_transpose_interleave_4VL_2x2(uint16_t *out, const uint16_t *in, size_t
"ptrue p2.b\n"
"blt 4f\n"
"1:" // Main row loop: Head
- "mov x26, %x[in]\n"
- "add x25, x26, %x[in_stride]\n"
+ "mov x25, %x[in]\n"
"add x24, x25, %x[in_stride]\n"
"add x23, x24, %x[in_stride]\n"
- "add %x[in], x23, %x[in_stride]\n"
- "mov x22, %x[out]\n"
+ "add x22, x23, %x[in_stride]\n"
+ "add %x[in], x22, %x[in_stride]\n"
+ "mov x21, %x[out]\n"
"sub %x[height], %x[height], #0x4\n"
- "mov x21, %x[width]\n"
+ "mov x20, %x[width]\n"
"2:" // Main row loop: Column loop
- "mov x20, x21\n"
- "whilelt p1.h, XZR, x20\n"
- "ld1h { z19.h }, p1/Z, [x26]\n"
- "dech x20\n"
- "whilelt p0.h, XZR, x20\n"
- "ld1h { z18.h }, p0/Z, [x26, #1, MUL VL]\n"
- "ld1h { z17.h }, p1/Z, [x25]\n"
- "decw x21, ALL, MUL #4\n"
- "cmp x21, #0x0\n"
+ "mov x19, x20\n"
+ "whilelt p1.h, XZR, x19\n"
+ "ld1h { z19.h }, p1/Z, [x25]\n"
+ "dech x19\n"
+ "whilelt p0.h, XZR, x19\n"
+ "ld1h { z18.h }, p0/Z, [x25, #1, MUL VL]\n"
+ "ld1h { z17.h }, p1/Z, [x24]\n"
+ "decw x20, ALL, MUL #4\n"
+ "cmp x20, #0x0\n"
"zip1 z24.h, z19.h, z17.h\n"
- "ld1h { z16.h }, p0/Z, [x25, #1, MUL VL]\n"
- "addvl x26, x26, #2\n"
+ "ld1h { z16.h }, p0/Z, [x24, #1, MUL VL]\n"
"addvl x25, x25, #2\n"
+ "addvl x24, x24, #2\n"
"zip2 z23.h, z19.h, z17.h\n"
- "ld1h { z17.h }, p1/Z, [x24]\n"
+ "ld1h { z17.h }, p1/Z, [x23]\n"
"zip1 z22.h, z18.h, z16.h\n"
"zip2 z21.h, z18.h, z16.h\n"
- "ld1h { z20.h }, p0/Z, [x24, #1, MUL VL]\n"
- "addvl x24, x24, #2\n"
- "ld1h { z16.h }, p1/Z, [x23]\n"
+ "ld1h { z20.h }, p0/Z, [x23, #1, MUL VL]\n"
+ "addvl x23, x23, #2\n"
+ "ld1h { z16.h }, p1/Z, [x22]\n"
"zip1 z19.h, z17.h, z16.h\n"
"zip2 z18.h, z17.h, z16.h\n"
- "ld1h { z16.h }, p0/Z, [x23, #1, MUL VL]\n"
- "addvl x23, x23, #2\n"
+ "ld1h { z16.h }, p0/Z, [x22, #1, MUL VL]\n"
+ "addvl x22, x22, #2\n"
"zip1 z17.h, z20.h, z16.h\n"
"zip2 z16.h, z20.h, z16.h\n"
- "st1h { z24.h }, p2, [x22]\n"
- "st1h { z23.h }, p2, [x22, #1, MUL VL]\n"
- "st1h { z22.h }, p2, [x22, #2, MUL VL]\n"
- "st1h { z21.h }, p2, [x22, #3, MUL VL]\n"
- "st1h { z19.h }, p2, [x22, #4, MUL VL]\n"
- "st1h { z18.h }, p2, [x22, #5, MUL VL]\n"
- "st1h { z17.h }, p2, [x22, #6, MUL VL]\n"
- "st1h { z16.h }, p2, [x22, #7, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
+ "st1h { z24.h }, p2, [x21]\n"
+ "st1h { z23.h }, p2, [x21, #1, MUL VL]\n"
+ "st1h { z22.h }, p2, [x21, #2, MUL VL]\n"
+ "st1h { z21.h }, p2, [x21, #3, MUL VL]\n"
+ "st1h { z19.h }, p2, [x21, #4, MUL VL]\n"
+ "st1h { z18.h }, p2, [x21, #5, MUL VL]\n"
+ "st1h { z17.h }, p2, [x21, #6, MUL VL]\n"
+ "st1h { z16.h }, p2, [x21, #7, MUL VL]\n"
+ "add x21, x21, %x[out_stride]\n"
"bgt 2b\n"
"3:" // Main row loop: Column loop skip
"cmp %x[height], #0x4\n"
@@ -97,36 +97,36 @@ void sme_transpose_interleave_4VL_2x2(uint16_t *out, const uint16_t *in, size_t
"cbz %x[height], 8f\n"
"4:" // Main loop skip
"5:" // Tail row loop: Head
- "mov x26, %x[in]\n"
- "add x25, x26, %x[in_stride]\n"
+ "mov x25, %x[in]\n"
+ "add x24, x25, %x[in_stride]\n"
"cmp %x[height], #0x1\n"
- "add %x[in], x25, %x[in_stride]\n"
- "mov x22, %x[out]\n"
- "csel x25, x25, %x[pad_row], GT\n"
+ "add %x[in], x24, %x[in_stride]\n"
+ "mov x21, %x[out]\n"
+ "csel x24, x24, %x[pad_row], GT\n"
"sub %x[height], %x[height], #0x2\n"
- "mov x21, %x[width]\n"
+ "mov x20, %x[width]\n"
"6:" // Tail row loop: Column loop
- "mov x20, x21\n"
- "whilelt p1.h, XZR, x20\n"
- "ld1h { z18.h }, p1/Z, [x26]\n"
- "dech x20\n"
- "whilelt p0.h, XZR, x20\n"
- "ld1h { z20.h }, p0/Z, [x26, #1, MUL VL]\n"
- "ld1h { z17.h }, p1/Z, [x25]\n"
- "decw x21, ALL, MUL #4\n"
- "cmp x21, #0x0\n"
+ "mov x19, x20\n"
+ "whilelt p1.h, XZR, x19\n"
+ "ld1h { z18.h }, p1/Z, [x25]\n"
+ "dech x19\n"
+ "whilelt p0.h, XZR, x19\n"
+ "ld1h { z20.h }, p0/Z, [x25, #1, MUL VL]\n"
+ "ld1h { z17.h }, p1/Z, [x24]\n"
+ "decw x20, ALL, MUL #4\n"
+ "cmp x20, #0x0\n"
"zip1 z19.h, z18.h, z17.h\n"
- "ld1h { z16.h }, p0/Z, [x25, #1, MUL VL]\n"
- "addvl x26, x26, #2\n"
+ "ld1h { z16.h }, p0/Z, [x24, #1, MUL VL]\n"
"addvl x25, x25, #2\n"
+ "addvl x24, x24, #2\n"
"zip2 z18.h, z18.h, z17.h\n"
"zip1 z17.h, z20.h, z16.h\n"
"zip2 z16.h, z20.h, z16.h\n"
- "st1h { z19.h }, p2, [x22]\n"
- "st1h { z18.h }, p2, [x22, #1, MUL VL]\n"
- "st1h { z17.h }, p2, [x22, #2, MUL VL]\n"
- "st1h { z16.h }, p2, [x22, #3, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
+ "st1h { z19.h }, p2, [x21]\n"
+ "st1h { z18.h }, p2, [x21, #1, MUL VL]\n"
+ "st1h { z17.h }, p2, [x21, #2, MUL VL]\n"
+ "st1h { z16.h }, p2, [x21, #3, MUL VL]\n"
+ "add x21, x21, %x[out_stride]\n"
"bgt 6b\n"
"7:" // Tail row loop: Column loop skip
"cmp %x[height], #0x1\n"
@@ -136,7 +136,7 @@ void sme_transpose_interleave_4VL_2x2(uint16_t *out, const uint16_t *in, size_t
".inst 0xd503467f // SMSTOP\n"
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [pad_row] "r" (pad_row), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_4VL_2x2_fp32bf16.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_4VL_2x2_fp32bf16.hpp
index 8873070019..59ac4bf50b 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_4VL_2x2_fp32bf16.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_4VL_2x2_fp32bf16.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#pragma once
@@ -45,70 +45,70 @@ void sme_transpose_interleave_4VL_2x2_fp32bf16(bfloat16 *out, const float *in, s
"ptrue p4.b\n"
"blt 4f\n"
"1:" // Main row loop: Head
- "mov x26, %x[in]\n"
- "add x25, x26, %x[in_stride]\n"
+ "mov x25, %x[in]\n"
"add x24, x25, %x[in_stride]\n"
"add x23, x24, %x[in_stride]\n"
- "add %x[in], x23, %x[in_stride]\n"
- "mov x22, %x[out]\n"
+ "add x22, x23, %x[in_stride]\n"
+ "add %x[in], x22, %x[in_stride]\n"
+ "mov x21, %x[out]\n"
"sub %x[height], %x[height], #0x4\n"
- "mov x21, %x[width]\n"
+ "mov x20, %x[width]\n"
"2:" // Main row loop: Column loop
- "mov x20, x21\n"
- "whilelt p3.s, XZR, x20\n"
- "ld1w { z16.s }, p3/Z, [x26]\n"
+ "mov x19, x20\n"
+ "whilelt p3.s, XZR, x19\n"
+ "ld1w { z16.s }, p3/Z, [x25]\n"
".inst 0x658ab218 // bfcvt z24.h, p4/M, z16.s\n"
- "decw x20\n"
- "whilelt p2.s, XZR, x20\n"
- "ld1w { z16.s }, p2/Z, [x26, #1, MUL VL]\n"
+ "decw x19\n"
+ "whilelt p2.s, XZR, x19\n"
+ "ld1w { z16.s }, p2/Z, [x25, #1, MUL VL]\n"
".inst 0x658ab217 // bfcvt z23.h, p4/M, z16.s\n"
- "decw x20\n"
- "whilelt p1.s, XZR, x20\n"
- "ld1w { z16.s }, p1/Z, [x26, #2, MUL VL]\n"
+ "decw x19\n"
+ "whilelt p1.s, XZR, x19\n"
+ "ld1w { z16.s }, p1/Z, [x25, #2, MUL VL]\n"
".inst 0x658ab216 // bfcvt z22.h, p4/M, z16.s\n"
- "decw x20\n"
- "whilelt p0.s, XZR, x20\n"
- "ld1w { z16.s }, p0/Z, [x26, #3, MUL VL]\n"
+ "decw x19\n"
+ "whilelt p0.s, XZR, x19\n"
+ "ld1w { z16.s }, p0/Z, [x25, #3, MUL VL]\n"
".inst 0x658ab215 // bfcvt z21.h, p4/M, z16.s\n"
- "ld1w { z16.s }, p3/Z, [x24]\n"
+ "ld1w { z16.s }, p3/Z, [x23]\n"
".inst 0x658ab214 // bfcvt z20.h, p4/M, z16.s\n"
- "decw x21, ALL, MUL #4\n"
- "cmp x21, #0x0\n"
- "ld1w { z16.s }, p2/Z, [x24, #1, MUL VL]\n"
+ "decw x20, ALL, MUL #4\n"
+ "cmp x20, #0x0\n"
+ "ld1w { z16.s }, p2/Z, [x23, #1, MUL VL]\n"
".inst 0x658ab213 // bfcvt z19.h, p4/M, z16.s\n"
- "addvl x26, x26, #4\n"
- "ld1w { z16.s }, p1/Z, [x24, #2, MUL VL]\n"
+ "addvl x25, x25, #4\n"
+ "ld1w { z16.s }, p1/Z, [x23, #2, MUL VL]\n"
".inst 0x658ab212 // bfcvt z18.h, p4/M, z16.s\n"
- "ld1w { z16.s }, p0/Z, [x24, #3, MUL VL]\n"
+ "ld1w { z16.s }, p0/Z, [x23, #3, MUL VL]\n"
".inst 0x658ab211 // bfcvt z17.h, p4/M, z16.s\n"
- "addvl x24, x24, #4\n"
- "ld1w { z16.s }, p3/Z, [x25]\n"
+ "addvl x23, x23, #4\n"
+ "ld1w { z16.s }, p3/Z, [x24]\n"
".inst 0x648ab218 // bfcvtnt z24.h, p4/M, z16.s\n"
- "ld1w { z16.s }, p2/Z, [x25, #1, MUL VL]\n"
+ "ld1w { z16.s }, p2/Z, [x24, #1, MUL VL]\n"
".inst 0x648ab217 // bfcvtnt z23.h, p4/M, z16.s\n"
- "ld1w { z16.s }, p1/Z, [x25, #2, MUL VL]\n"
+ "ld1w { z16.s }, p1/Z, [x24, #2, MUL VL]\n"
".inst 0x648ab216 // bfcvtnt z22.h, p4/M, z16.s\n"
- "ld1w { z16.s }, p0/Z, [x25, #3, MUL VL]\n"
- "addvl x25, x25, #4\n"
+ "ld1w { z16.s }, p0/Z, [x24, #3, MUL VL]\n"
+ "addvl x24, x24, #4\n"
".inst 0x648ab215 // bfcvtnt z21.h, p4/M, z16.s\n"
- "ld1w { z16.s }, p3/Z, [x23]\n"
+ "ld1w { z16.s }, p3/Z, [x22]\n"
".inst 0x648ab214 // bfcvtnt z20.h, p4/M, z16.s\n"
- "ld1w { z16.s }, p2/Z, [x23, #1, MUL VL]\n"
+ "ld1w { z16.s }, p2/Z, [x22, #1, MUL VL]\n"
".inst 0x648ab213 // bfcvtnt z19.h, p4/M, z16.s\n"
- "ld1w { z16.s }, p1/Z, [x23, #2, MUL VL]\n"
+ "ld1w { z16.s }, p1/Z, [x22, #2, MUL VL]\n"
".inst 0x648ab212 // bfcvtnt z18.h, p4/M, z16.s\n"
- "ld1w { z16.s }, p0/Z, [x23, #3, MUL VL]\n"
- "addvl x23, x23, #4\n"
+ "ld1w { z16.s }, p0/Z, [x22, #3, MUL VL]\n"
+ "addvl x22, x22, #4\n"
".inst 0x648ab211 // bfcvtnt z17.h, p4/M, z16.s\n"
- "st1h { z24.h }, p4, [x22]\n"
- "st1h { z23.h }, p4, [x22, #1, MUL VL]\n"
- "st1h { z22.h }, p4, [x22, #2, MUL VL]\n"
- "st1h { z21.h }, p4, [x22, #3, MUL VL]\n"
- "st1h { z20.h }, p4, [x22, #4, MUL VL]\n"
- "st1h { z19.h }, p4, [x22, #5, MUL VL]\n"
- "st1h { z18.h }, p4, [x22, #6, MUL VL]\n"
- "st1h { z17.h }, p4, [x22, #7, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
+ "st1h { z24.h }, p4, [x21]\n"
+ "st1h { z23.h }, p4, [x21, #1, MUL VL]\n"
+ "st1h { z22.h }, p4, [x21, #2, MUL VL]\n"
+ "st1h { z21.h }, p4, [x21, #3, MUL VL]\n"
+ "st1h { z20.h }, p4, [x21, #4, MUL VL]\n"
+ "st1h { z19.h }, p4, [x21, #5, MUL VL]\n"
+ "st1h { z18.h }, p4, [x21, #6, MUL VL]\n"
+ "st1h { z17.h }, p4, [x21, #7, MUL VL]\n"
+ "add x21, x21, %x[out_stride]\n"
"bgt 2b\n"
"3:" // Main row loop: Column loop skip
"cmp %x[height], #0x4\n"
@@ -117,48 +117,48 @@ void sme_transpose_interleave_4VL_2x2_fp32bf16(bfloat16 *out, const float *in, s
"cbz %x[height], 8f\n"
"4:" // Main loop skip
"5:" // Tail row loop: Head
- "mov x26, %x[in]\n"
- "add x25, x26, %x[in_stride]\n"
+ "mov x25, %x[in]\n"
+ "add x24, x25, %x[in_stride]\n"
"cmp %x[height], #0x1\n"
- "add %x[in], x25, %x[in_stride]\n"
- "mov x22, %x[out]\n"
- "csel x25, x25, %x[pad_row], GT\n"
+ "add %x[in], x24, %x[in_stride]\n"
+ "mov x21, %x[out]\n"
+ "csel x24, x24, %x[pad_row], GT\n"
"sub %x[height], %x[height], #0x2\n"
- "mov x21, %x[width]\n"
+ "mov x20, %x[width]\n"
"6:" // Tail row loop: Column loop
- "mov x20, x21\n"
- "whilelt p3.s, XZR, x20\n"
- "ld1w { z16.s }, p3/Z, [x26]\n"
+ "mov x19, x20\n"
+ "whilelt p3.s, XZR, x19\n"
+ "ld1w { z16.s }, p3/Z, [x25]\n"
".inst 0x658ab214 // bfcvt z20.h, p4/M, z16.s\n"
- "decw x20\n"
- "whilelt p2.s, XZR, x20\n"
- "ld1w { z16.s }, p2/Z, [x26, #1, MUL VL]\n"
+ "decw x19\n"
+ "whilelt p2.s, XZR, x19\n"
+ "ld1w { z16.s }, p2/Z, [x25, #1, MUL VL]\n"
".inst 0x658ab213 // bfcvt z19.h, p4/M, z16.s\n"
- "decw x20\n"
- "whilelt p1.s, XZR, x20\n"
- "ld1w { z16.s }, p1/Z, [x26, #2, MUL VL]\n"
+ "decw x19\n"
+ "whilelt p1.s, XZR, x19\n"
+ "ld1w { z16.s }, p1/Z, [x25, #2, MUL VL]\n"
".inst 0x658ab212 // bfcvt z18.h, p4/M, z16.s\n"
- "decw x20\n"
- "whilelt p0.s, XZR, x20\n"
- "ld1w { z16.s }, p0/Z, [x26, #3, MUL VL]\n"
+ "decw x19\n"
+ "whilelt p0.s, XZR, x19\n"
+ "ld1w { z16.s }, p0/Z, [x25, #3, MUL VL]\n"
".inst 0x658ab211 // bfcvt z17.h, p4/M, z16.s\n"
- "ld1w { z16.s }, p3/Z, [x25]\n"
- "decw x21, ALL, MUL #4\n"
- "cmp x21, #0x0\n"
+ "ld1w { z16.s }, p3/Z, [x24]\n"
+ "decw x20, ALL, MUL #4\n"
+ "cmp x20, #0x0\n"
".inst 0x648ab214 // bfcvtnt z20.h, p4/M, z16.s\n"
- "ld1w { z16.s }, p2/Z, [x25, #1, MUL VL]\n"
- "addvl x26, x26, #4\n"
+ "ld1w { z16.s }, p2/Z, [x24, #1, MUL VL]\n"
+ "addvl x25, x25, #4\n"
".inst 0x648ab213 // bfcvtnt z19.h, p4/M, z16.s\n"
- "ld1w { z16.s }, p1/Z, [x25, #2, MUL VL]\n"
+ "ld1w { z16.s }, p1/Z, [x24, #2, MUL VL]\n"
".inst 0x648ab212 // bfcvtnt z18.h, p4/M, z16.s\n"
- "ld1w { z16.s }, p0/Z, [x25, #3, MUL VL]\n"
- "addvl x25, x25, #4\n"
+ "ld1w { z16.s }, p0/Z, [x24, #3, MUL VL]\n"
+ "addvl x24, x24, #4\n"
".inst 0x648ab211 // bfcvtnt z17.h, p4/M, z16.s\n"
- "st1h { z20.h }, p4, [x22]\n"
- "st1h { z19.h }, p4, [x22, #1, MUL VL]\n"
- "st1h { z18.h }, p4, [x22, #2, MUL VL]\n"
- "st1h { z17.h }, p4, [x22, #3, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
+ "st1h { z20.h }, p4, [x21]\n"
+ "st1h { z19.h }, p4, [x21, #1, MUL VL]\n"
+ "st1h { z18.h }, p4, [x21, #2, MUL VL]\n"
+ "st1h { z17.h }, p4, [x21, #3, MUL VL]\n"
+ "add x21, x21, %x[out_stride]\n"
"bgt 6b\n"
"7:" // Tail row loop: Column loop skip
"cmp %x[height], #0x1\n"
@@ -168,7 +168,7 @@ void sme_transpose_interleave_4VL_2x2_fp32bf16(bfloat16 *out, const float *in, s
".inst 0xd503467f // SMSTOP\n"
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [pad_row] "r" (pad_row), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_12VL_2x4_fp32bf16.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_12VL_2x4_fp32bf16.hpp
index 847718992a..ef94cbad39 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_12VL_2x4_fp32bf16.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_12VL_2x4_fp32bf16.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#pragma once
@@ -42,105 +42,105 @@ void sve_transpose_interleave_12VL_2x4_fp32bf16(bfloat16 *out, const float *in,
__asm__ __volatile__(
"ptrue p6.b\n"
"1:" // Main row loop: Head
- "mov x28, %x[in]\n"
- "add x27, x28, %x[in_stride]\n"
+ "mov x27, %x[in]\n"
"add x26, x27, %x[in_stride]\n"
- "mov x25, %x[width]\n"
- "cnth x24, ALL, MUL #6\n"
- "add x23, x26, %x[in_stride]\n"
+ "add x25, x26, %x[in_stride]\n"
+ "mov x24, %x[width]\n"
+ "cnth x23, ALL, MUL #6\n"
+ "add x22, x25, %x[in_stride]\n"
"cmp %x[height], #0x3\n"
- "add %x[in], x23, %x[in_stride]\n"
- "csel x23, x23, %x[pad_row], GT\n"
- "csel x26, x26, %x[pad_row], GE\n"
+ "add %x[in], x22, %x[in_stride]\n"
+ "csel x22, x22, %x[pad_row], GT\n"
+ "csel x25, x25, %x[pad_row], GE\n"
"cmp %x[height], #0x1\n"
- "csel x27, x27, %x[pad_row], GT\n"
- "cmp x25, x24\n"
- "mov x22, %x[out]\n"
+ "csel x26, x26, %x[pad_row], GT\n"
+ "cmp x24, x23\n"
+ "mov x21, %x[out]\n"
"sub %x[height], %x[height], #0x4\n"
"blt 3f\n"
"2:" // Main row loop: Unroll column loop
- "ld1w { z22.s }, p6/Z, [x28]\n"
- "ld1w { z7.s }, p6/Z, [x28, #1, MUL VL]\n"
- "mov x21, x22\n"
- "add x22, x22, %x[out_stride]\n"
- "ld1w { z19.s }, p6/Z, [x28, #2, MUL VL]\n"
- "ld1w { z18.s }, p6/Z, [x28, #3, MUL VL]\n"
- "mov x20, x22\n"
- "sub x25, x25, x24\n"
- "ld1w { z5.s }, p6/Z, [x28, #4, MUL VL]\n"
- "ld1w { z25.s }, p6/Z, [x28, #5, MUL VL]\n"
- "cmp x25, x24\n"
- "add x22, x22, %x[out_stride]\n"
- "ld1w { z20.s }, p6/Z, [x28, #6, MUL VL]\n"
- "ld1w { z23.s }, p6/Z, [x28, #7, MUL VL]\n"
- "addvl x28, x28, #12\n"
- "ld1w { z4.s }, p6/Z, [x26]\n"
- "ld1w { z10.s }, p6/Z, [x26, #1, MUL VL]\n"
+ "ld1w { z22.s }, p6/Z, [x27]\n"
+ "ld1w { z7.s }, p6/Z, [x27, #1, MUL VL]\n"
+ "mov x20, x21\n"
+ "add x21, x21, %x[out_stride]\n"
+ "ld1w { z19.s }, p6/Z, [x27, #2, MUL VL]\n"
+ "ld1w { z18.s }, p6/Z, [x27, #3, MUL VL]\n"
+ "mov x19, x21\n"
+ "sub x24, x24, x23\n"
+ "ld1w { z5.s }, p6/Z, [x27, #4, MUL VL]\n"
+ "ld1w { z25.s }, p6/Z, [x27, #5, MUL VL]\n"
+ "cmp x24, x23\n"
+ "add x21, x21, %x[out_stride]\n"
+ "ld1w { z20.s }, p6/Z, [x27, #6, MUL VL]\n"
+ "ld1w { z23.s }, p6/Z, [x27, #7, MUL VL]\n"
+ "addvl x27, x27, #12\n"
+ "ld1w { z4.s }, p6/Z, [x25]\n"
+ "ld1w { z10.s }, p6/Z, [x25, #1, MUL VL]\n"
"zip1 z14.s, z22.s, z4.s\n"
"zip2 z22.s, z22.s, z4.s\n"
- "ld1w { z28.s }, p6/Z, [x26, #2, MUL VL]\n"
- "ld1w { z27.s }, p6/Z, [x26, #3, MUL VL]\n"
+ "ld1w { z28.s }, p6/Z, [x25, #2, MUL VL]\n"
+ "ld1w { z27.s }, p6/Z, [x25, #3, MUL VL]\n"
"zip1 z24.s, z7.s, z10.s\n"
"zip2 z15.s, z7.s, z10.s\n"
- "ld1w { z7.s }, p6/Z, [x26, #4, MUL VL]\n"
- "ld1w { z2.s }, p6/Z, [x26, #5, MUL VL]\n"
+ "ld1w { z7.s }, p6/Z, [x25, #4, MUL VL]\n"
+ "ld1w { z2.s }, p6/Z, [x25, #5, MUL VL]\n"
"zip1 z9.s, z19.s, z28.s\n"
"zip2 z0.s, z19.s, z28.s\n"
- "ld1w { z19.s }, p6/Z, [x26, #6, MUL VL]\n"
- "ld1w { z16.s }, p6/Z, [x26, #7, MUL VL]\n"
- "addvl x26, x26, #12\n"
+ "ld1w { z19.s }, p6/Z, [x25, #6, MUL VL]\n"
+ "ld1w { z16.s }, p6/Z, [x25, #7, MUL VL]\n"
+ "addvl x25, x25, #12\n"
"zip1 z1.s, z18.s, z27.s\n"
- "ld1w { z30.s }, p6/Z, [x28, #-4, MUL VL]\n"
- "ld1w { z29.s }, p6/Z, [x28, #-3, MUL VL]\n"
+ "ld1w { z30.s }, p6/Z, [x27, #-4, MUL VL]\n"
+ "ld1w { z29.s }, p6/Z, [x27, #-3, MUL VL]\n"
"zip2 z17.s, z18.s, z27.s\n"
".inst 0x658ab9d5 // bfcvt z21.h, p6/M, z14.s\n"
- "ld1w { z31.s }, p6/Z, [x27]\n"
- "ld1w { z8.s }, p6/Z, [x27, #1, MUL VL]\n"
+ "ld1w { z31.s }, p6/Z, [x26]\n"
+ "ld1w { z8.s }, p6/Z, [x26, #1, MUL VL]\n"
".inst 0x658abacc // bfcvt z12.h, p6/M, z22.s\n"
".inst 0x658abb0e // bfcvt z14.h, p6/M, z24.s\n"
- "ld1w { z22.s }, p6/Z, [x27, #2, MUL VL]\n"
- "ld1w { z28.s }, p6/Z, [x27, #3, MUL VL]\n"
+ "ld1w { z22.s }, p6/Z, [x26, #2, MUL VL]\n"
+ "ld1w { z28.s }, p6/Z, [x26, #3, MUL VL]\n"
".inst 0x658ab9ea // bfcvt z10.h, p6/M, z15.s\n"
".inst 0x658ab92f // bfcvt z15.h, p6/M, z9.s\n"
- "ld1w { z27.s }, p6/Z, [x27, #4, MUL VL]\n"
- "ld1w { z13.s }, p6/Z, [x27, #5, MUL VL]\n"
+ "ld1w { z27.s }, p6/Z, [x26, #4, MUL VL]\n"
+ "ld1w { z13.s }, p6/Z, [x26, #5, MUL VL]\n"
".inst 0x658ab803 // bfcvt z3.h, p6/M, z0.s\n"
".inst 0x658ab832 // bfcvt z18.h, p6/M, z1.s\n"
- "ld1w { z26.s }, p6/Z, [x27, #6, MUL VL]\n"
- "ld1w { z9.s }, p6/Z, [x27, #7, MUL VL]\n"
- "addvl x27, x27, #12\n"
+ "ld1w { z26.s }, p6/Z, [x26, #6, MUL VL]\n"
+ "ld1w { z9.s }, p6/Z, [x26, #7, MUL VL]\n"
+ "addvl x26, x26, #12\n"
".inst 0x658aba26 // bfcvt z6.h, p6/M, z17.s\n"
- "ld1w { z1.s }, p6/Z, [x26, #-4, MUL VL]\n"
- "ld1w { z0.s }, p6/Z, [x26, #-3, MUL VL]\n"
+ "ld1w { z1.s }, p6/Z, [x25, #-4, MUL VL]\n"
+ "ld1w { z0.s }, p6/Z, [x25, #-3, MUL VL]\n"
"zip1 z17.s, z5.s, z7.s\n"
"zip2 z5.s, z5.s, z7.s\n"
- "ld1w { z24.s }, p6/Z, [x23]\n"
- "ld1w { z11.s }, p6/Z, [x23, #1, MUL VL]\n"
+ "ld1w { z24.s }, p6/Z, [x22]\n"
+ "ld1w { z11.s }, p6/Z, [x22, #1, MUL VL]\n"
"zip1 z7.s, z31.s, z24.s\n"
"zip2 z31.s, z31.s, z24.s\n"
- "ld1w { z4.s }, p6/Z, [x23, #2, MUL VL]\n"
- "ld1w { z24.s }, p6/Z, [x23, #3, MUL VL]\n"
+ "ld1w { z4.s }, p6/Z, [x22, #2, MUL VL]\n"
+ "ld1w { z24.s }, p6/Z, [x22, #3, MUL VL]\n"
".inst 0x648ab8f5 // bfcvtnt z21.h, p6/M, z7.s\n"
"zip1 z7.s, z8.s, z11.s\n"
"zip2 z11.s, z8.s, z11.s\n"
- "ld1w { z8.s }, p6/Z, [x23, #4, MUL VL]\n"
+ "ld1w { z8.s }, p6/Z, [x22, #4, MUL VL]\n"
".inst 0x648abbec // bfcvtnt z12.h, p6/M, z31.s\n"
- "ld1w { z31.s }, p6/Z, [x23, #5, MUL VL]\n"
+ "ld1w { z31.s }, p6/Z, [x22, #5, MUL VL]\n"
".inst 0x648ab8ee // bfcvtnt z14.h, p6/M, z7.s\n"
- "ld1w { z7.s }, p6/Z, [x23, #6, MUL VL]\n"
+ "ld1w { z7.s }, p6/Z, [x22, #6, MUL VL]\n"
".inst 0x648ab96a // bfcvtnt z10.h, p6/M, z11.s\n"
"zip1 z11.s, z22.s, z4.s\n"
"zip2 z4.s, z22.s, z4.s\n"
- "ld1w { z22.s }, p6/Z, [x23, #7, MUL VL]\n"
- "addvl x23, x23, #12\n"
+ "ld1w { z22.s }, p6/Z, [x22, #7, MUL VL]\n"
+ "addvl x22, x22, #12\n"
".inst 0x648ab96f // bfcvtnt z15.h, p6/M, z11.s\n"
- "ld1w { z11.s }, p6/Z, [x28, #-2, MUL VL]\n"
+ "ld1w { z11.s }, p6/Z, [x27, #-2, MUL VL]\n"
".inst 0x648ab883 // bfcvtnt z3.h, p6/M, z4.s\n"
"zip1 z4.s, z28.s, z24.s\n"
"zip2 z24.s, z28.s, z24.s\n"
- "ld1w { z28.s }, p6/Z, [x28, #-1, MUL VL]\n"
+ "ld1w { z28.s }, p6/Z, [x27, #-1, MUL VL]\n"
".inst 0x648ab892 // bfcvtnt z18.h, p6/M, z4.s\n"
- "ld1w { z4.s }, p6/Z, [x27, #-4, MUL VL]\n"
+ "ld1w { z4.s }, p6/Z, [x26, #-4, MUL VL]\n"
".inst 0x648abb06 // bfcvtnt z6.h, p6/M, z24.s\n"
"zip1 z24.s, z25.s, z2.s\n"
"zip2 z25.s, z25.s, z2.s\n"
@@ -156,7 +156,7 @@ void sve_transpose_interleave_12VL_2x4_fp32bf16(bfloat16 *out, const float *in,
"zip1 z29.s, z27.s, z8.s\n"
".inst 0x658ab8a5 // bfcvt z5.h, p6/M, z5.s\n"
"zip2 z27.s, z27.s, z8.s\n"
- "ld1w { z8.s }, p6/Z, [x27, #-3, MUL VL]\n"
+ "ld1w { z8.s }, p6/Z, [x26, #-3, MUL VL]\n"
".inst 0x658abb18 // bfcvt z24.h, p6/M, z24.s\n"
".inst 0x658abb39 // bfcvt z25.h, p6/M, z25.s\n"
".inst 0x658ab842 // bfcvt z2.h, p6/M, z2.s\n"
@@ -168,145 +168,145 @@ void sve_transpose_interleave_12VL_2x4_fp32bf16(bfloat16 *out, const float *in,
".inst 0x658ab821 // bfcvt z1.h, p6/M, z1.s\n"
".inst 0x658ab800 // bfcvt z0.h, p6/M, z0.s\n"
".inst 0x648abbb1 // bfcvtnt z17.h, p6/M, z29.s\n"
- "ld1w { z29.s }, p6/Z, [x26, #-2, MUL VL]\n"
+ "ld1w { z29.s }, p6/Z, [x25, #-2, MUL VL]\n"
".inst 0x648abb65 // bfcvtnt z5.h, p6/M, z27.s\n"
"zip1 z27.s, z13.s, z31.s\n"
"zip2 z31.s, z13.s, z31.s\n"
- "ld1w { z13.s }, p6/Z, [x26, #-1, MUL VL]\n"
+ "ld1w { z13.s }, p6/Z, [x25, #-1, MUL VL]\n"
".inst 0x648abb78 // bfcvtnt z24.h, p6/M, z27.s\n"
- "ld1w { z27.s }, p6/Z, [x23, #-4, MUL VL]\n"
+ "ld1w { z27.s }, p6/Z, [x22, #-4, MUL VL]\n"
".inst 0x648abbf9 // bfcvtnt z25.h, p6/M, z31.s\n"
"zip1 z31.s, z26.s, z7.s\n"
"zip2 z26.s, z26.s, z7.s\n"
- "ld1w { z7.s }, p6/Z, [x23, #-3, MUL VL]\n"
+ "ld1w { z7.s }, p6/Z, [x22, #-3, MUL VL]\n"
".inst 0x648abbe2 // bfcvtnt z2.h, p6/M, z31.s\n"
- "ld1w { z31.s }, p6/Z, [x27, #-2, MUL VL]\n"
+ "ld1w { z31.s }, p6/Z, [x26, #-2, MUL VL]\n"
".inst 0x648abb54 // bfcvtnt z20.h, p6/M, z26.s\n"
"zip1 z26.s, z9.s, z22.s\n"
"zip2 z9.s, z9.s, z22.s\n"
- "ld1w { z22.s }, p6/Z, [x27, #-1, MUL VL]\n"
+ "ld1w { z22.s }, p6/Z, [x26, #-1, MUL VL]\n"
".inst 0x648abb53 // bfcvtnt z19.h, p6/M, z26.s\n"
- "ld1w { z26.s }, p6/Z, [x23, #-2, MUL VL]\n"
+ "ld1w { z26.s }, p6/Z, [x22, #-2, MUL VL]\n"
".inst 0x648ab930 // bfcvtnt z16.h, p6/M, z9.s\n"
- "ld1w { z9.s }, p6/Z, [x23, #-1, MUL VL]\n"
- "st1h { z21.h }, p6, [x21]\n"
+ "ld1w { z9.s }, p6/Z, [x22, #-1, MUL VL]\n"
+ "st1h { z21.h }, p6, [x20]\n"
"zip1 z21.s, z4.s, z27.s\n"
"zip2 z27.s, z4.s, z27.s\n"
"zip1 z4.s, z8.s, z7.s\n"
"zip2 z8.s, z8.s, z7.s\n"
- "st1h { z12.h }, p6, [x21, #1, MUL VL]\n"
+ "st1h { z12.h }, p6, [x20, #1, MUL VL]\n"
"zip1 z7.s, z11.s, z29.s\n"
"zip2 z11.s, z11.s, z29.s\n"
- "st1h { z14.h }, p6, [x21, #2, MUL VL]\n"
+ "st1h { z14.h }, p6, [x20, #2, MUL VL]\n"
"zip1 z29.s, z28.s, z13.s\n"
"zip2 z12.s, z28.s, z13.s\n"
- "st1h { z10.h }, p6, [x21, #3, MUL VL]\n"
- "st1h { z15.h }, p6, [x21, #4, MUL VL]\n"
+ "st1h { z10.h }, p6, [x20, #3, MUL VL]\n"
+ "st1h { z15.h }, p6, [x20, #4, MUL VL]\n"
".inst 0x648abab7 // bfcvtnt z23.h, p6/M, z21.s\n"
".inst 0x648abb7e // bfcvtnt z30.h, p6/M, z27.s\n"
- "st1h { z3.h }, p6, [x21, #5, MUL VL]\n"
+ "st1h { z3.h }, p6, [x20, #5, MUL VL]\n"
".inst 0x648ab881 // bfcvtnt z1.h, p6/M, z4.s\n"
".inst 0x648ab900 // bfcvtnt z0.h, p6/M, z8.s\n"
- "st1h { z18.h }, p6, [x21, #6, MUL VL]\n"
+ "st1h { z18.h }, p6, [x20, #6, MUL VL]\n"
".inst 0x658ab8e8 // bfcvt z8.h, p6/M, z7.s\n"
"zip1 z27.s, z31.s, z26.s\n"
- "st1h { z6.h }, p6, [x21, #7, MUL VL]\n"
- "addvl x21, x21, #12\n"
+ "st1h { z6.h }, p6, [x20, #7, MUL VL]\n"
+ "addvl x20, x20, #12\n"
".inst 0x658ab96e // bfcvt z14.h, p6/M, z11.s\n"
"zip2 z28.s, z31.s, z26.s\n"
".inst 0x658abbbd // bfcvt z29.h, p6/M, z29.s\n"
"zip1 z21.s, z22.s, z9.s\n"
- "st1h { z17.h }, p6, [x21, #-4, MUL VL]\n"
+ "st1h { z17.h }, p6, [x20, #-4, MUL VL]\n"
".inst 0x658ab992 // bfcvt z18.h, p6/M, z12.s\n"
"zip2 z17.s, z22.s, z9.s\n"
- "st1h { z5.h }, p6, [x21, #-3, MUL VL]\n"
- "st1h { z24.h }, p6, [x21, #-2, MUL VL]\n"
+ "st1h { z5.h }, p6, [x20, #-3, MUL VL]\n"
+ "st1h { z24.h }, p6, [x20, #-2, MUL VL]\n"
".inst 0x648abb68 // bfcvtnt z8.h, p6/M, z27.s\n"
".inst 0x648abb8e // bfcvtnt z14.h, p6/M, z28.s\n"
- "st1h { z25.h }, p6, [x21, #-1, MUL VL]\n"
+ "st1h { z25.h }, p6, [x20, #-1, MUL VL]\n"
".inst 0x648ababd // bfcvtnt z29.h, p6/M, z21.s\n"
".inst 0x648aba32 // bfcvtnt z18.h, p6/M, z17.s\n"
- "st1h { z2.h }, p6, [x20]\n"
- "st1h { z20.h }, p6, [x20, #1, MUL VL]\n"
- "st1h { z19.h }, p6, [x20, #2, MUL VL]\n"
- "st1h { z16.h }, p6, [x20, #3, MUL VL]\n"
- "st1h { z23.h }, p6, [x20, #4, MUL VL]\n"
- "st1h { z30.h }, p6, [x20, #5, MUL VL]\n"
- "st1h { z1.h }, p6, [x20, #6, MUL VL]\n"
- "st1h { z0.h }, p6, [x20, #7, MUL VL]\n"
- "addvl x20, x20, #12\n"
- "st1h { z8.h }, p6, [x20, #-4, MUL VL]\n"
- "st1h { z14.h }, p6, [x20, #-3, MUL VL]\n"
- "st1h { z29.h }, p6, [x20, #-2, MUL VL]\n"
- "st1h { z18.h }, p6, [x20, #-1, MUL VL]\n"
+ "st1h { z2.h }, p6, [x19]\n"
+ "st1h { z20.h }, p6, [x19, #1, MUL VL]\n"
+ "st1h { z19.h }, p6, [x19, #2, MUL VL]\n"
+ "st1h { z16.h }, p6, [x19, #3, MUL VL]\n"
+ "st1h { z23.h }, p6, [x19, #4, MUL VL]\n"
+ "st1h { z30.h }, p6, [x19, #5, MUL VL]\n"
+ "st1h { z1.h }, p6, [x19, #6, MUL VL]\n"
+ "st1h { z0.h }, p6, [x19, #7, MUL VL]\n"
+ "addvl x19, x19, #12\n"
+ "st1h { z8.h }, p6, [x19, #-4, MUL VL]\n"
+ "st1h { z14.h }, p6, [x19, #-3, MUL VL]\n"
+ "st1h { z29.h }, p6, [x19, #-2, MUL VL]\n"
+ "st1h { z18.h }, p6, [x19, #-1, MUL VL]\n"
"bge 2b\n"
"3:" // Main row loop: Unroll column loop skip
- "cbz x25, 5f\n"
+ "cbz x24, 5f\n"
"4:" // Main row loop: Column loop
- "mov x20, x25\n"
- "whilelt p5.s, XZR, x20\n"
- "ld1w { z22.s }, p5/Z, [x28]\n"
- "ld1w { z21.s }, p5/Z, [x26]\n"
- "decw x20\n"
- "whilelt p4.s, XZR, x20\n"
- "ld1w { z20.s }, p4/Z, [x28, #1, MUL VL]\n"
- "ld1w { z19.s }, p4/Z, [x26, #1, MUL VL]\n"
- "decw x20\n"
- "whilelt p3.s, XZR, x20\n"
- "ld1w { z18.s }, p3/Z, [x28, #2, MUL VL]\n"
- "ld1w { z17.s }, p3/Z, [x26, #2, MUL VL]\n"
- "decw x20\n"
- "whilelt p2.s, XZR, x20\n"
- "ld1w { z30.s }, p2/Z, [x28, #3, MUL VL]\n"
- "ld1w { z16.s }, p2/Z, [x26, #3, MUL VL]\n"
- "decw x20\n"
- "whilelt p1.s, XZR, x20\n"
- "ld1w { z13.s }, p1/Z, [x28, #4, MUL VL]\n"
- "ld1w { z29.s }, p5/Z, [x27]\n"
- "decw x20\n"
- "whilelt p0.s, XZR, x20\n"
- "ld1w { z12.s }, p0/Z, [x28, #5, MUL VL]\n"
- "ld1w { z28.s }, p4/Z, [x27, #1, MUL VL]\n"
- "ld1w { z11.s }, p3/Z, [x27, #2, MUL VL]\n"
- "ld1w { z10.s }, p2/Z, [x27, #3, MUL VL]\n"
+ "mov x19, x24\n"
+ "whilelt p5.s, XZR, x19\n"
+ "ld1w { z22.s }, p5/Z, [x27]\n"
+ "ld1w { z21.s }, p5/Z, [x25]\n"
+ "decw x19\n"
+ "whilelt p4.s, XZR, x19\n"
+ "ld1w { z20.s }, p4/Z, [x27, #1, MUL VL]\n"
+ "ld1w { z19.s }, p4/Z, [x25, #1, MUL VL]\n"
+ "decw x19\n"
+ "whilelt p3.s, XZR, x19\n"
+ "ld1w { z18.s }, p3/Z, [x27, #2, MUL VL]\n"
+ "ld1w { z17.s }, p3/Z, [x25, #2, MUL VL]\n"
+ "decw x19\n"
+ "whilelt p2.s, XZR, x19\n"
+ "ld1w { z30.s }, p2/Z, [x27, #3, MUL VL]\n"
+ "ld1w { z16.s }, p2/Z, [x25, #3, MUL VL]\n"
+ "decw x19\n"
+ "whilelt p1.s, XZR, x19\n"
+ "ld1w { z13.s }, p1/Z, [x27, #4, MUL VL]\n"
+ "ld1w { z29.s }, p5/Z, [x26]\n"
+ "decw x19\n"
+ "whilelt p0.s, XZR, x19\n"
+ "ld1w { z12.s }, p0/Z, [x27, #5, MUL VL]\n"
+ "ld1w { z28.s }, p4/Z, [x26, #1, MUL VL]\n"
+ "ld1w { z11.s }, p3/Z, [x26, #2, MUL VL]\n"
+ "ld1w { z10.s }, p2/Z, [x26, #3, MUL VL]\n"
"zip1 z27.s, z22.s, z21.s\n"
"zip2 z26.s, z22.s, z21.s\n"
- "ld1w { z9.s }, p1/Z, [x26, #4, MUL VL]\n"
- "ld1w { z8.s }, p0/Z, [x26, #5, MUL VL]\n"
+ "ld1w { z9.s }, p1/Z, [x25, #4, MUL VL]\n"
+ "ld1w { z8.s }, p0/Z, [x25, #5, MUL VL]\n"
"zip1 z25.s, z20.s, z19.s\n"
"zip2 z24.s, z20.s, z19.s\n"
- "ld1w { z23.s }, p5/Z, [x23]\n"
- "ld1w { z22.s }, p4/Z, [x23, #1, MUL VL]\n"
+ "ld1w { z23.s }, p5/Z, [x22]\n"
+ "ld1w { z22.s }, p4/Z, [x22, #1, MUL VL]\n"
"zip1 z21.s, z18.s, z17.s\n"
"zip2 z20.s, z18.s, z17.s\n"
- "ld1w { z19.s }, p3/Z, [x23, #2, MUL VL]\n"
- "ld1w { z18.s }, p2/Z, [x23, #3, MUL VL]\n"
+ "ld1w { z19.s }, p3/Z, [x22, #2, MUL VL]\n"
+ "ld1w { z18.s }, p2/Z, [x22, #3, MUL VL]\n"
"zip1 z17.s, z30.s, z16.s\n"
"zip2 z16.s, z30.s, z16.s\n"
- "ld1w { z7.s }, p1/Z, [x27, #4, MUL VL]\n"
- "ld1w { z6.s }, p0/Z, [x27, #5, MUL VL]\n"
+ "ld1w { z7.s }, p1/Z, [x26, #4, MUL VL]\n"
+ "ld1w { z6.s }, p0/Z, [x26, #5, MUL VL]\n"
".inst 0x658abb65 // bfcvt z5.h, p6/M, z27.s\n"
"zip1 z4.s, z29.s, z23.s\n"
- "ld1w { z3.s }, p1/Z, [x23, #4, MUL VL]\n"
- "ld1w { z2.s }, p0/Z, [x23, #5, MUL VL]\n"
+ "ld1w { z3.s }, p1/Z, [x22, #4, MUL VL]\n"
+ "ld1w { z2.s }, p0/Z, [x22, #5, MUL VL]\n"
".inst 0x658abb41 // bfcvt z1.h, p6/M, z26.s\n"
"zip2 z0.s, z29.s, z23.s\n"
".inst 0x658abb3f // bfcvt z31.h, p6/M, z25.s\n"
"zip1 z30.s, z28.s, z22.s\n"
- "mov x20, x22\n"
- "decd x25, ALL, MUL #12\n"
+ "mov x19, x21\n"
+ "decd x24, ALL, MUL #12\n"
".inst 0x658abb1d // bfcvt z29.h, p6/M, z24.s\n"
"zip2 z28.s, z28.s, z22.s\n"
- "cmp x25, #0x0\n"
- "addvl x28, x28, #6\n"
+ "cmp x24, #0x0\n"
+ "addvl x27, x27, #6\n"
".inst 0x658ababb // bfcvt z27.h, p6/M, z21.s\n"
"zip1 z23.s, z11.s, z19.s\n"
- "addvl x27, x27, #6\n"
"addvl x26, x26, #6\n"
+ "addvl x25, x25, #6\n"
".inst 0x658aba9a // bfcvt z26.h, p6/M, z20.s\n"
"zip2 z22.s, z11.s, z19.s\n"
- "addvl x23, x23, #6\n"
- "add x22, x22, %x[out_stride]\n"
+ "addvl x22, x22, #6\n"
+ "add x21, x21, %x[out_stride]\n"
".inst 0x658aba39 // bfcvt z25.h, p6/M, z17.s\n"
"zip1 z21.s, z10.s, z18.s\n"
".inst 0x658aba18 // bfcvt z24.h, p6/M, z16.s\n"
@@ -317,37 +317,37 @@ void sve_transpose_interleave_12VL_2x4_fp32bf16(bfloat16 *out, const float *in,
"zip2 z16.s, z12.s, z8.s\n"
".inst 0x648ab885 // bfcvtnt z5.h, p6/M, z4.s\n"
".inst 0x648ab801 // bfcvtnt z1.h, p6/M, z0.s\n"
- "st1h { z5.h }, p6, [x20]\n"
+ "st1h { z5.h }, p6, [x19]\n"
".inst 0x648abbdf // bfcvtnt z31.h, p6/M, z30.s\n"
".inst 0x648abb9d // bfcvtnt z29.h, p6/M, z28.s\n"
- "st1h { z1.h }, p6, [x20, #1, MUL VL]\n"
+ "st1h { z1.h }, p6, [x19, #1, MUL VL]\n"
".inst 0x648abafb // bfcvtnt z27.h, p6/M, z23.s\n"
".inst 0x648abada // bfcvtnt z26.h, p6/M, z22.s\n"
- "st1h { z31.h }, p6, [x20, #2, MUL VL]\n"
+ "st1h { z31.h }, p6, [x19, #2, MUL VL]\n"
".inst 0x648abab9 // bfcvtnt z25.h, p6/M, z21.s\n"
".inst 0x648aba98 // bfcvtnt z24.h, p6/M, z20.s\n"
- "st1h { z29.h }, p6, [x20, #3, MUL VL]\n"
+ "st1h { z29.h }, p6, [x19, #3, MUL VL]\n"
".inst 0x658aba77 // bfcvt z23.h, p6/M, z19.s\n"
"zip1 z22.s, z7.s, z3.s\n"
- "st1h { z27.h }, p6, [x20, #4, MUL VL]\n"
+ "st1h { z27.h }, p6, [x19, #4, MUL VL]\n"
".inst 0x658aba55 // bfcvt z21.h, p6/M, z18.s\n"
"zip2 z20.s, z7.s, z3.s\n"
- "st1h { z26.h }, p6, [x20, #5, MUL VL]\n"
+ "st1h { z26.h }, p6, [x19, #5, MUL VL]\n"
".inst 0x658aba33 // bfcvt z19.h, p6/M, z17.s\n"
"zip1 z18.s, z6.s, z2.s\n"
- "st1h { z25.h }, p6, [x20, #6, MUL VL]\n"
+ "st1h { z25.h }, p6, [x19, #6, MUL VL]\n"
".inst 0x658aba11 // bfcvt z17.h, p6/M, z16.s\n"
"zip2 z16.s, z6.s, z2.s\n"
- "st1h { z24.h }, p6, [x20, #7, MUL VL]\n"
- "addvl x20, x20, #12\n"
+ "st1h { z24.h }, p6, [x19, #7, MUL VL]\n"
+ "addvl x19, x19, #12\n"
".inst 0x648abad7 // bfcvtnt z23.h, p6/M, z22.s\n"
".inst 0x648aba95 // bfcvtnt z21.h, p6/M, z20.s\n"
- "st1h { z23.h }, p6, [x20, #-4, MUL VL]\n"
+ "st1h { z23.h }, p6, [x19, #-4, MUL VL]\n"
".inst 0x648aba53 // bfcvtnt z19.h, p6/M, z18.s\n"
".inst 0x648aba11 // bfcvtnt z17.h, p6/M, z16.s\n"
- "st1h { z21.h }, p6, [x20, #-3, MUL VL]\n"
- "st1h { z19.h }, p6, [x20, #-2, MUL VL]\n"
- "st1h { z17.h }, p6, [x20, #-1, MUL VL]\n"
+ "st1h { z21.h }, p6, [x19, #-3, MUL VL]\n"
+ "st1h { z19.h }, p6, [x19, #-2, MUL VL]\n"
+ "st1h { z17.h }, p6, [x19, #-1, MUL VL]\n"
"bgt 4b\n"
"5:" // Main row loop: Column loop skip
"cmp %x[height], #0x1\n"
@@ -355,7 +355,7 @@ void sve_transpose_interleave_12VL_2x4_fp32bf16(bfloat16 *out, const float *in,
"bge 1b\n"
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [pad_row] "r" (pad_row), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_1VL.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_1VL.hpp
index 74fce4ddf9..33694dfb0c 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_1VL.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_1VL.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#pragma once
@@ -34,114 +34,114 @@ void sve_transpose_interleave_1VL(uint32_t *out, const uint32_t *in, size_t widt
size_t out_stride = 1 * height * get_vector_length<uint8_t>();
__asm__ __volatile__(
- "cmp %x[height], #0x4\n"
"ptrue p1.b\n"
+ "cmp %x[height], #0x4\n"
"blt 6f\n"
"1:" // Main row loop: Head
- "mov x26, %x[in]\n"
- "mov x25, %x[width]\n"
- "cntw x24, ALL, MUL #2\n"
- "add x23, x26, %x[in_stride]\n"
- "add x21, x23, %x[in_stride]\n"
- "add x20, x21, %x[in_stride]\n"
- "cmp x25, x24\n"
- "add %x[in], x20, %x[in_stride]\n"
- "mov x22, %x[out]\n"
+ "mov x25, %x[in]\n"
+ "mov x24, %x[out]\n"
+ "add x23, x25, %x[in_stride]\n"
+ "add x22, x23, %x[in_stride]\n"
+ "add x21, x22, %x[in_stride]\n"
+ "add %x[in], x21, %x[in_stride]\n"
"sub %x[height], %x[height], #0x4\n"
+ "mov x20, %x[width]\n"
+ "cntw x19, ALL, MUL #2\n"
+ "cmp x20, x19\n"
"blt 3f\n"
"2:" // Main row loop: Unroll column loop
- "sub x25, x25, x24\n"
- "ld1w { z23.s }, p1/Z, [x26]\n"
- "ld1w { z22.s }, p1/Z, [x26, #1, MUL VL]\n"
- "cmp x25, x24\n"
+ "ld1w { z23.s }, p1/Z, [x25]\n"
+ "sub x20, x20, x19\n"
+ "ld1w { z22.s }, p1/Z, [x25, #1, MUL VL]\n"
+ "addvl x25, x25, #2\n"
"ld1w { z21.s }, p1/Z, [x23]\n"
+ "cmp x20, x19\n"
"ld1w { z20.s }, p1/Z, [x23, #1, MUL VL]\n"
- "addvl x26, x26, #2\n"
"addvl x23, x23, #2\n"
- "ld1w { z19.s }, p1/Z, [x21]\n"
- "ld1w { z18.s }, p1/Z, [x21, #1, MUL VL]\n"
+ "ld1w { z19.s }, p1/Z, [x22]\n"
+ "ld1w { z18.s }, p1/Z, [x22, #1, MUL VL]\n"
+ "addvl x22, x22, #2\n"
+ "ld1w { z17.s }, p1/Z, [x21]\n"
+ "ld1w { z16.s }, p1/Z, [x21, #1, MUL VL]\n"
"addvl x21, x21, #2\n"
- "ld1w { z17.s }, p1/Z, [x20]\n"
- "ld1w { z16.s }, p1/Z, [x20, #1, MUL VL]\n"
- "st1w { z23.s }, p1, [x22]\n"
- "addvl x20, x20, #2\n"
- "st1w { z21.s }, p1, [x22, #1, MUL VL]\n"
- "st1w { z19.s }, p1, [x22, #2, MUL VL]\n"
- "st1w { z17.s }, p1, [x22, #3, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
- "st1w { z22.s }, p1, [x22]\n"
- "st1w { z20.s }, p1, [x22, #1, MUL VL]\n"
- "st1w { z18.s }, p1, [x22, #2, MUL VL]\n"
- "st1w { z16.s }, p1, [x22, #3, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
+ "st1w { z23.s }, p1, [x24]\n"
+ "st1w { z21.s }, p1, [x24, #1, MUL VL]\n"
+ "st1w { z19.s }, p1, [x24, #2, MUL VL]\n"
+ "st1w { z17.s }, p1, [x24, #3, MUL VL]\n"
+ "add x24, x24, %x[out_stride]\n"
+ "st1w { z22.s }, p1, [x24]\n"
+ "st1w { z20.s }, p1, [x24, #1, MUL VL]\n"
+ "st1w { z18.s }, p1, [x24, #2, MUL VL]\n"
+ "st1w { z16.s }, p1, [x24, #3, MUL VL]\n"
+ "add x24, x24, %x[out_stride]\n"
"bge 2b\n"
"3:" // Main row loop: Unroll column loop skip
- "cbz x25, 5f\n"
+ "cbz x20, 5f\n"
"4:" // Main row loop: Column loop
- "whilelt p0.s, XZR, x25\n"
- "decw x25\n"
- "ld1w { z19.s }, p0/Z, [x26]\n"
+ "whilelt p0.s, XZR, x20\n"
+ "ld1w { z19.s }, p0/Z, [x25]\n"
+ "addvl x25, x25, #1\n"
"ld1w { z18.s }, p0/Z, [x23]\n"
- "cmp x25, #0x0\n"
- "addvl x26, x26, #1\n"
- "ld1w { z17.s }, p0/Z, [x21]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
"addvl x23, x23, #1\n"
+ "ld1w { z17.s }, p0/Z, [x22]\n"
+ "addvl x22, x22, #1\n"
+ "ld1w { z16.s }, p0/Z, [x21]\n"
"addvl x21, x21, #1\n"
- "st1w { z19.s }, p1, [x22]\n"
- "addvl x20, x20, #1\n"
- "st1w { z18.s }, p1, [x22, #1, MUL VL]\n"
- "st1w { z17.s }, p1, [x22, #2, MUL VL]\n"
- "st1w { z16.s }, p1, [x22, #3, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
+ "st1w { z19.s }, p1, [x24]\n"
+ "decw x20\n"
+ "st1w { z18.s }, p1, [x24, #1, MUL VL]\n"
+ "cmp x20, #0x0\n"
+ "st1w { z17.s }, p1, [x24, #2, MUL VL]\n"
+ "st1w { z16.s }, p1, [x24, #3, MUL VL]\n"
+ "add x24, x24, %x[out_stride]\n"
"bgt 4b\n"
"5:" // Main row loop: Column loop skip
- "cmp %x[height], #0x4\n"
"addvl %x[out], %x[out], #4\n"
+ "cmp %x[height], #0x4\n"
"bge 1b\n"
"cbz %x[height], 12f\n"
"6:" // Main loop skip
"7:" // Tail row loop: Head
- "mov x21, %x[width]\n"
- "cntw x20, ALL, MUL #2\n"
- "mov x26, %x[in]\n"
- "cmp x21, x20\n"
- "add %x[in], x26, %x[in_stride]\n"
- "mov x22, %x[out]\n"
+ "mov x25, %x[in]\n"
+ "mov x24, %x[out]\n"
+ "add %x[in], x25, %x[in_stride]\n"
"sub %x[height], %x[height], #0x1\n"
+ "mov x20, %x[width]\n"
+ "cntw x19, ALL, MUL #2\n"
+ "cmp x20, x19\n"
"blt 9f\n"
"8:" // Tail row loop: Unroll column loop
- "sub x21, x21, x20\n"
- "ld1w { z17.s }, p1/Z, [x26]\n"
- "ld1w { z16.s }, p1/Z, [x26, #1, MUL VL]\n"
- "st1w { z17.s }, p1, [x22]\n"
- "add x22, x22, %x[out_stride]\n"
- "cmp x21, x20\n"
- "st1w { z16.s }, p1, [x22]\n"
- "addvl x26, x26, #2\n"
- "add x22, x22, %x[out_stride]\n"
+ "ld1w { z17.s }, p1/Z, [x25]\n"
+ "sub x20, x20, x19\n"
+ "ld1w { z16.s }, p1/Z, [x25, #1, MUL VL]\n"
+ "addvl x25, x25, #2\n"
+ "cmp x20, x19\n"
+ "st1w { z17.s }, p1, [x24]\n"
+ "add x24, x24, %x[out_stride]\n"
+ "st1w { z16.s }, p1, [x24]\n"
+ "add x24, x24, %x[out_stride]\n"
"bge 8b\n"
"9:" // Tail row loop: Unroll column loop skip
- "cbz x21, 11f\n"
+ "cbz x20, 11f\n"
"10:" // Tail row loop: Column loop
- "whilelt p0.s, XZR, x21\n"
- "decw x21\n"
- "ld1w { z16.s }, p0/Z, [x26]\n"
- "st1w { z16.s }, p1, [x22]\n"
- "cmp x21, #0x0\n"
- "addvl x26, x26, #1\n"
- "add x22, x22, %x[out_stride]\n"
+ "whilelt p0.s, XZR, x20\n"
+ "ld1w { z16.s }, p0/Z, [x25]\n"
+ "addvl x25, x25, #1\n"
+ "decw x20\n"
+ "st1w { z16.s }, p1, [x24]\n"
+ "add x24, x24, %x[out_stride]\n"
+ "cmp x20, #0x0\n"
"bgt 10b\n"
"11:" // Tail row loop: Column loop skip
- "cmp %x[height], #0x1\n"
"addvl %x[out], %x[out], #1\n"
+ "cmp %x[height], #0x1\n"
"bge 7b\n"
"12:" // Done
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23"
+ : "cc", "memory", "p0", "p1", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_1VL_1x4.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_1VL_1x4.hpp
index a034be5e74..e4fb7ea4c1 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_1VL_1x4.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_1VL_1x4.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#pragma once
@@ -40,240 +40,242 @@ void sve_transpose_interleave_1VL_1x4(uint8_t *out, const uint8_t *in, size_t wi
size_t out_stride = 1 * roundup<size_t>(height, 4) * get_vector_length<uint32_t>();
__asm__ __volatile__(
- "cmp %x[height], #0x8\n"
"ptrue p1.b\n"
+ "cmp %x[height], #0x8\n"
"blt 6f\n"
"1:" // Main row loop: Head
- "mov x10, %x[in]\n"
- "add x9, x10, %x[in_stride]\n"
- "add x28, x9, %x[in_stride]\n"
- "add x27, x28, %x[in_stride]\n"
+ "mov x9, %x[in]\n"
+ "mov x28, %x[out]\n"
+ "add x27, x9, %x[in_stride]\n"
"add x26, x27, %x[in_stride]\n"
- "mov x25, %x[width]\n"
- "cntb x24, ALL, MUL #2\n"
- "add x23, x26, %x[in_stride]\n"
- "add x21, x23, %x[in_stride]\n"
- "add x20, x21, %x[in_stride]\n"
- "cmp x25, x24\n"
- "add %x[in], x20, %x[in_stride]\n"
- "mov x22, %x[out]\n"
+ "add x25, x26, %x[in_stride]\n"
+ "add x24, x25, %x[in_stride]\n"
+ "add x23, x24, %x[in_stride]\n"
+ "add x22, x23, %x[in_stride]\n"
+ "add x21, x22, %x[in_stride]\n"
+ "add %x[in], x21, %x[in_stride]\n"
"sub %x[height], %x[height], #0x8\n"
+ "mov x20, %x[width]\n"
+ "cntb x19, ALL, MUL #2\n"
+ "cmp x20, x19\n"
"blt 3f\n"
"2:" // Main row loop: Unroll column loop
- "ld1b { z20.b }, p1/Z, [x10]\n"
- "ld1b { z18.b }, p1/Z, [x9]\n"
- "sub x25, x25, x24\n"
- "cmp x25, x24\n"
- "ld1b { z17.b }, p1/Z, [x28]\n"
- "ld1b { z16.b }, p1/Z, [x27]\n"
- "zip1 z25.b, z20.b, z17.b\n"
- "zip1 z24.b, z18.b, z16.b\n"
- "ld1b { z21.b }, p1/Z, [x26]\n"
- "ld1b { z19.b }, p1/Z, [x23]\n"
- "zip2 z2.b, z20.b, z17.b\n"
- "zip2 z1.b, z18.b, z16.b\n"
- "ld1b { z18.b }, p1/Z, [x21]\n"
- "ld1b { z17.b }, p1/Z, [x20]\n"
- "zip1 z20.b, z21.b, z18.b\n"
- "zip1 z16.b, z19.b, z17.b\n"
- "ld1b { z0.b }, p1/Z, [x10, #1, MUL VL]\n"
- "ld1b { z31.b }, p1/Z, [x9, #1, MUL VL]\n"
- "zip2 z30.b, z21.b, z18.b\n"
- "zip2 z29.b, z19.b, z17.b\n"
- "ld1b { z23.b }, p1/Z, [x28, #1, MUL VL]\n"
- "ld1b { z22.b }, p1/Z, [x27, #1, MUL VL]\n"
- "zip1 z19.b, z25.b, z24.b\n"
- "zip1 z18.b, z20.b, z16.b\n"
- "ld1b { z28.b }, p1/Z, [x26, #1, MUL VL]\n"
- "ld1b { z27.b }, p1/Z, [x23, #1, MUL VL]\n"
- "zip2 z17.b, z25.b, z24.b\n"
- "zip2 z16.b, z20.b, z16.b\n"
- "ld1b { z21.b }, p1/Z, [x21, #1, MUL VL]\n"
- "ld1b { z20.b }, p1/Z, [x20, #1, MUL VL]\n"
- "st1b { z19.b }, p1, [x22]\n"
- "zip1 z26.b, z0.b, z23.b\n"
- "st1b { z18.b }, p1, [x22, #1, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
- "zip1 z25.b, z31.b, z22.b\n"
- "zip1 z24.b, z28.b, z21.b\n"
- "st1b { z17.b }, p1, [x22]\n"
- "zip1 z19.b, z27.b, z20.b\n"
- "zip1 z17.b, z2.b, z1.b\n"
- "addvl x10, x10, #2\n"
- "st1b { z16.b }, p1, [x22, #1, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
- "zip1 z18.b, z30.b, z29.b\n"
- "zip2 z16.b, z2.b, z1.b\n"
- "st1b { z17.b }, p1, [x22]\n"
- "zip2 z17.b, z30.b, z29.b\n"
- "zip2 z23.b, z0.b, z23.b\n"
+ "ld1b { z17.b }, p1/Z, [x9]\n"
+ "sub x20, x20, x19\n"
+ "ld1b { z3.b }, p1/Z, [x9, #1, MUL VL]\n"
"addvl x9, x9, #2\n"
- "st1b { z18.b }, p1, [x22, #1, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
- "zip2 z22.b, z31.b, z22.b\n"
- "zip2 z21.b, z28.b, z21.b\n"
- "st1b { z16.b }, p1, [x22]\n"
- "zip2 z20.b, z27.b, z20.b\n"
- "zip1 z16.b, z26.b, z25.b\n"
- "addvl x28, x28, #2\n"
- "st1b { z17.b }, p1, [x22, #1, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
- "zip1 z18.b, z24.b, z19.b\n"
- "zip2 z17.b, z26.b, z25.b\n"
- "st1b { z16.b }, p1, [x22]\n"
- "zip2 z16.b, z24.b, z19.b\n"
- "zip1 z19.b, z23.b, z22.b\n"
+ "ld1b { z21.b }, p1/Z, [x27]\n"
+ "cmp x20, x19\n"
+ "ld1b { z2.b }, p1/Z, [x27, #1, MUL VL]\n"
"addvl x27, x27, #2\n"
- "st1b { z18.b }, p1, [x22, #1, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
- "zip1 z18.b, z21.b, z20.b\n"
+ "ld1b { z16.b }, p1/Z, [x26]\n"
+ "zip1 z20.b, z17.b, z16.b\n"
+ "ld1b { z1.b }, p1/Z, [x26, #1, MUL VL]\n"
"addvl x26, x26, #2\n"
- "st1b { z17.b }, p1, [x22]\n"
+ "zip2 z19.b, z17.b, z16.b\n"
+ "ld1b { z17.b }, p1/Z, [x25]\n"
+ "ld1b { z0.b }, p1/Z, [x25, #1, MUL VL]\n"
+ "zip1 z31.b, z3.b, z1.b\n"
+ "ld1b { z30.b }, p1/Z, [x24]\n"
+ "addvl x25, x25, #2\n"
+ "zip1 z16.b, z21.b, z17.b\n"
+ "ld1b { z29.b }, p1/Z, [x24, #1, MUL VL]\n"
+ "addvl x24, x24, #2\n"
+ "zip1 z18.b, z20.b, z16.b\n"
+ "ld1b { z28.b }, p1/Z, [x23]\n"
+ "zip2 z27.b, z20.b, z16.b\n"
+ "ld1b { z26.b }, p1/Z, [x23, #1, MUL VL]\n"
"addvl x23, x23, #2\n"
+ "zip2 z17.b, z21.b, z17.b\n"
+ "ld1b { z16.b }, p1/Z, [x22]\n"
+ "zip1 z25.b, z2.b, z0.b\n"
+ "ld1b { z24.b }, p1/Z, [x22, #1, MUL VL]\n"
+ "addvl x22, x22, #2\n"
+ "zip1 z23.b, z19.b, z17.b\n"
+ "ld1b { z22.b }, p1/Z, [x21]\n"
+ "zip2 z20.b, z19.b, z17.b\n"
+ "ld1b { z21.b }, p1/Z, [x21, #1, MUL VL]\n"
"addvl x21, x21, #2\n"
- "zip2 z17.b, z23.b, z22.b\n"
- "st1b { z16.b }, p1, [x22, #1, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
- "addvl x20, x20, #2\n"
- "zip2 z16.b, z21.b, z20.b\n"
- "st1b { z19.b }, p1, [x22]\n"
- "st1b { z18.b }, p1, [x22, #1, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
- "st1b { z17.b }, p1, [x22]\n"
- "st1b { z16.b }, p1, [x22, #1, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
+ "zip1 z19.b, z30.b, z16.b\n"
+ "st1b { z18.b }, p1, [x28]\n"
+ "zip2 z18.b, z30.b, z16.b\n"
+ "zip1 z17.b, z28.b, z22.b\n"
+ "zip1 z16.b, z19.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28, #1, MUL VL]\n"
+ "add x28, x28, %x[out_stride]\n"
+ "zip2 z16.b, z19.b, z17.b\n"
+ "st1b { z27.b }, p1, [x28]\n"
+ "zip2 z17.b, z28.b, z22.b\n"
+ "st1b { z16.b }, p1, [x28, #1, MUL VL]\n"
+ "add x28, x28, %x[out_stride]\n"
+ "zip1 z16.b, z18.b, z17.b\n"
+ "st1b { z23.b }, p1, [x28]\n"
+ "zip2 z17.b, z18.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28, #1, MUL VL]\n"
+ "add x28, x28, %x[out_stride]\n"
+ "zip1 z16.b, z31.b, z25.b\n"
+ "st1b { z20.b }, p1, [x28]\n"
+ "zip1 z19.b, z29.b, z24.b\n"
+ "st1b { z17.b }, p1, [x28, #1, MUL VL]\n"
+ "add x28, x28, %x[out_stride]\n"
+ "zip1 z18.b, z26.b, z21.b\n"
+ "st1b { z16.b }, p1, [x28]\n"
+ "zip2 z17.b, z31.b, z25.b\n"
+ "zip1 z16.b, z19.b, z18.b\n"
+ "st1b { z16.b }, p1, [x28, #1, MUL VL]\n"
+ "add x28, x28, %x[out_stride]\n"
+ "zip2 z16.b, z19.b, z18.b\n"
+ "st1b { z17.b }, p1, [x28]\n"
+ "zip2 z20.b, z3.b, z1.b\n"
+ "st1b { z16.b }, p1, [x28, #1, MUL VL]\n"
+ "add x28, x28, %x[out_stride]\n"
+ "zip2 z19.b, z2.b, z0.b\n"
+ "zip2 z18.b, z29.b, z24.b\n"
+ "zip1 z16.b, z20.b, z19.b\n"
+ "st1b { z16.b }, p1, [x28]\n"
+ "zip2 z17.b, z26.b, z21.b\n"
+ "zip1 z16.b, z18.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28, #1, MUL VL]\n"
+ "add x28, x28, %x[out_stride]\n"
+ "zip2 z16.b, z20.b, z19.b\n"
+ "st1b { z16.b }, p1, [x28]\n"
+ "zip2 z16.b, z18.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28, #1, MUL VL]\n"
+ "add x28, x28, %x[out_stride]\n"
"bge 2b\n"
"3:" // Main row loop: Unroll column loop skip
- "cbz x25, 5f\n"
+ "cbz x20, 5f\n"
"4:" // Main row loop: Column loop
- "whilelt p0.b, XZR, x25\n"
- "ld1b { z19.b }, p0/Z, [x10]\n"
+ "whilelt p0.b, XZR, x20\n"
"ld1b { z18.b }, p0/Z, [x9]\n"
- "decw x25\n"
- "ld1b { z17.b }, p0/Z, [x28]\n"
- "ld1b { z16.b }, p0/Z, [x27]\n"
- "zip1 z21.b, z19.b, z17.b\n"
- "zip1 z20.b, z18.b, z16.b\n"
- "ld1b { z18.b }, p0/Z, [x26]\n"
- "ld1b { z19.b }, p0/Z, [x23]\n"
- "cmp x25, #0x0\n"
- "incd x10, ALL, MUL #2\n"
- "ld1b { z17.b }, p0/Z, [x21]\n"
- "ld1b { z16.b }, p0/Z, [x20]\n"
- "zip1 z18.b, z18.b, z17.b\n"
- "zip1 z16.b, z19.b, z16.b\n"
"incd x9, ALL, MUL #2\n"
- "incd x28, ALL, MUL #2\n"
- "zip1 z17.b, z21.b, z20.b\n"
- "zip1 z16.b, z18.b, z16.b\n"
+ "ld1b { z17.b }, p0/Z, [x27]\n"
"incd x27, ALL, MUL #2\n"
+ "ld1b { z16.b }, p0/Z, [x26]\n"
+ "zip1 z18.b, z18.b, z16.b\n"
+ "ld1b { z16.b }, p0/Z, [x25]\n"
"incd x26, ALL, MUL #2\n"
- "st1b { z17.b }, p1, [x22]\n"
+ "zip1 z16.b, z17.b, z16.b\n"
+ "ld1b { z17.b }, p0/Z, [x24]\n"
+ "incd x25, ALL, MUL #2\n"
+ "zip1 z19.b, z18.b, z16.b\n"
+ "ld1b { z18.b }, p0/Z, [x23]\n"
+ "incd x24, ALL, MUL #2\n"
+ "ld1b { z16.b }, p0/Z, [x22]\n"
+ "zip1 z17.b, z17.b, z16.b\n"
+ "ld1b { z16.b }, p0/Z, [x21]\n"
"incd x23, ALL, MUL #2\n"
+ "zip1 z16.b, z18.b, z16.b\n"
+ "st1b { z19.b }, p1, [x28]\n"
+ "incd x22, ALL, MUL #2\n"
+ "zip1 z16.b, z17.b, z16.b\n"
+ "st1b { z16.b }, p1, [x28, #1, MUL VL]\n"
"incd x21, ALL, MUL #2\n"
- "st1b { z16.b }, p1, [x22, #1, MUL VL]\n"
- "incd x20, ALL, MUL #2\n"
- "add x22, x22, %x[out_stride]\n"
+ "add x28, x28, %x[out_stride]\n"
+ "decw x20\n"
+ "cmp x20, #0x0\n"
"bgt 4b\n"
"5:" // Main row loop: Column loop skip
- "cmp %x[height], #0x8\n"
"addvl %x[out], %x[out], #2\n"
+ "cmp %x[height], #0x8\n"
"bge 1b\n"
"cbz %x[height], 12f\n"
"6:" // Main loop skip
+
"7:" // Tail row loop: Head
- "mov x10, %x[in]\n"
- "add x9, x10, %x[in_stride]\n"
- "add x28, x9, %x[in_stride]\n"
- "mov x21, %x[width]\n"
- "cntb x20, ALL, MUL #2\n"
- "add x27, x28, %x[in_stride]\n"
+ "mov x9, %x[in]\n"
+ "mov x28, %x[out]\n"
+ "add x27, x9, %x[in_stride]\n"
+ "add x26, x27, %x[in_stride]\n"
+ "add x25, x26, %x[in_stride]\n"
+ "add %x[in], x25, %x[in_stride]\n"
"cmp %x[height], #0x3\n"
- "add %x[in], x27, %x[in_stride]\n"
- "csel x27, x27, %x[pad_row], GT\n"
- "csel x28, x28, %x[pad_row], GE\n"
+ "csel x25, x25, %x[pad_row], GT\n"
+ "csel x26, x26, %x[pad_row], GE\n"
"cmp %x[height], #0x1\n"
- "csel x9, x9, %x[pad_row], GT\n"
- "cmp x21, x20\n"
- "mov x22, %x[out]\n"
+ "csel x27, x27, %x[pad_row], GT\n"
"sub %x[height], %x[height], #0x4\n"
+ "mov x20, %x[width]\n"
+ "cntb x19, ALL, MUL #2\n"
+ "cmp x20, x19\n"
"blt 9f\n"
"8:" // Tail row loop: Unroll column loop
- "ld1b { z21.b }, p1/Z, [x10]\n"
- "ld1b { z18.b }, p1/Z, [x9]\n"
- "sub x21, x21, x20\n"
- "cmp x21, x20\n"
- "ld1b { z17.b }, p1/Z, [x28]\n"
- "ld1b { z16.b }, p1/Z, [x27]\n"
- "zip1 z20.b, z21.b, z17.b\n"
- "zip1 z19.b, z18.b, z16.b\n"
- "ld1b { z24.b }, p1/Z, [x10, #1, MUL VL]\n"
- "ld1b { z23.b }, p1/Z, [x9, #1, MUL VL]\n"
- "zip2 z22.b, z21.b, z17.b\n"
- "zip2 z21.b, z18.b, z16.b\n"
- "ld1b { z18.b }, p1/Z, [x28, #1, MUL VL]\n"
- "ld1b { z17.b }, p1/Z, [x27, #1, MUL VL]\n"
- "zip1 z16.b, z20.b, z19.b\n"
- "st1b { z16.b }, p1, [x22]\n"
- "add x22, x22, %x[out_stride]\n"
- "zip2 z16.b, z20.b, z19.b\n"
- "st1b { z16.b }, p1, [x22]\n"
- "add x22, x22, %x[out_stride]\n"
- "zip1 z20.b, z24.b, z18.b\n"
- "zip1 z19.b, z23.b, z17.b\n"
- "addvl x10, x10, #2\n"
+ "ld1b { z19.b }, p1/Z, [x9]\n"
+ "sub x20, x20, x19\n"
+ "ld1b { z18.b }, p1/Z, [x9, #1, MUL VL]\n"
"addvl x9, x9, #2\n"
- "zip1 z16.b, z22.b, z21.b\n"
- "st1b { z16.b }, p1, [x22]\n"
- "add x22, x22, %x[out_stride]\n"
- "zip2 z16.b, z22.b, z21.b\n"
- "st1b { z16.b }, p1, [x22]\n"
- "add x22, x22, %x[out_stride]\n"
- "zip2 z18.b, z24.b, z18.b\n"
- "zip2 z17.b, z23.b, z17.b\n"
- "zip1 z16.b, z20.b, z19.b\n"
- "st1b { z16.b }, p1, [x22]\n"
- "add x22, x22, %x[out_stride]\n"
- "zip2 z16.b, z20.b, z19.b\n"
- "st1b { z16.b }, p1, [x22]\n"
- "add x22, x22, %x[out_stride]\n"
- "zip1 z16.b, z18.b, z17.b\n"
- "addvl x28, x28, #2\n"
- "st1b { z16.b }, p1, [x22]\n"
- "add x22, x22, %x[out_stride]\n"
+ "ld1b { z25.b }, p1/Z, [x27]\n"
+ "cmp x20, x19\n"
+ "ld1b { z24.b }, p1/Z, [x27, #1, MUL VL]\n"
"addvl x27, x27, #2\n"
+ "ld1b { z17.b }, p1/Z, [x26]\n"
+ "zip1 z23.b, z19.b, z17.b\n"
+ "ld1b { z16.b }, p1/Z, [x26, #1, MUL VL]\n"
+ "addvl x26, x26, #2\n"
+ "zip2 z22.b, z19.b, z17.b\n"
+ "ld1b { z21.b }, p1/Z, [x25]\n"
+ "ld1b { z20.b }, p1/Z, [x25, #1, MUL VL]\n"
+ "zip1 z19.b, z18.b, z16.b\n"
+ "addvl x25, x25, #2\n"
+ "zip2 z18.b, z18.b, z16.b\n"
+ "zip1 z17.b, z25.b, z21.b\n"
+ "zip1 z16.b, z23.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28]\n"
+ "add x28, x28, %x[out_stride]\n"
+ "zip2 z16.b, z23.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28]\n"
+ "zip2 z17.b, z25.b, z21.b\n"
+ "add x28, x28, %x[out_stride]\n"
+ "zip1 z16.b, z22.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28]\n"
+ "zip2 z16.b, z22.b, z17.b\n"
+ "add x28, x28, %x[out_stride]\n"
+ "zip1 z17.b, z24.b, z20.b\n"
+ "st1b { z16.b }, p1, [x28]\n"
+ "add x28, x28, %x[out_stride]\n"
+ "zip1 z16.b, z19.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28]\n"
+ "zip2 z16.b, z19.b, z17.b\n"
+ "add x28, x28, %x[out_stride]\n"
+ "zip2 z17.b, z24.b, z20.b\n"
+ "st1b { z16.b }, p1, [x28]\n"
+ "add x28, x28, %x[out_stride]\n"
+ "zip1 z16.b, z18.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28]\n"
"zip2 z16.b, z18.b, z17.b\n"
- "st1b { z16.b }, p1, [x22]\n"
- "add x22, x22, %x[out_stride]\n"
+ "add x28, x28, %x[out_stride]\n"
+ "st1b { z16.b }, p1, [x28]\n"
+ "add x28, x28, %x[out_stride]\n"
"bge 8b\n"
"9:" // Tail row loop: Unroll column loop skip
- "cbz x21, 11f\n"
+ "cbz x20, 11f\n"
"10:" // Tail row loop: Column loop
- "whilelt p0.b, XZR, x21\n"
- "ld1b { z19.b }, p0/Z, [x10]\n"
- "ld1b { z18.b }, p0/Z, [x9]\n"
- "decw x21\n"
- "ld1b { z17.b }, p0/Z, [x28]\n"
- "ld1b { z16.b }, p0/Z, [x27]\n"
- "zip1 z17.b, z19.b, z17.b\n"
- "zip1 z16.b, z18.b, z16.b\n"
- "cmp x21, #0x0\n"
- "incd x10, ALL, MUL #2\n"
- "zip1 z16.b, z17.b, z16.b\n"
- "st1b { z16.b }, p1, [x22]\n"
+ "whilelt p0.b, XZR, x20\n"
+ "ld1b { z17.b }, p0/Z, [x9]\n"
"incd x9, ALL, MUL #2\n"
- "incd x28, ALL, MUL #2\n"
+ "ld1b { z18.b }, p0/Z, [x27]\n"
"incd x27, ALL, MUL #2\n"
- "add x22, x22, %x[out_stride]\n"
+ "ld1b { z16.b }, p0/Z, [x26]\n"
+ "zip1 z17.b, z17.b, z16.b\n"
+ "ld1b { z16.b }, p0/Z, [x25]\n"
+ "incd x26, ALL, MUL #2\n"
+ "zip1 z16.b, z18.b, z16.b\n"
+ "incd x25, ALL, MUL #2\n"
+ "decw x20\n"
+ "zip1 z16.b, z17.b, z16.b\n"
+ "st1b { z16.b }, p1, [x28]\n"
+ "add x28, x28, %x[out_stride]\n"
+ "cmp x20, #0x0\n"
"bgt 10b\n"
"11:" // Tail row loop: Column loop skip
- "cmp %x[height], #0x1\n"
"addvl %x[out], %x[out], #1\n"
+ "cmp %x[height], #0x1\n"
"bge 7b\n"
"12:" // Done
+
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [pad_row] "r" (pad_row), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "x9", "x10", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "x9", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_3VL.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_3VL.hpp
index 82d4184061..0d694f3ec0 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_3VL.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_3VL.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#pragma once
@@ -34,99 +34,99 @@ void sve_transpose_interleave_3VL(uint16_t *out, const uint16_t *in, size_t widt
size_t out_stride = 3 * height * get_vector_length<uint8_t>();
__asm__ __volatile__(
+ "ptrue p2.b\n"
"cmp %x[height], #0x4\n"
- "ptrue p3.b\n"
"blt 4f\n"
"1:" // Main row loop: Head
"mov x26, %x[in]\n"
- "add x25, x26, %x[in_stride]\n"
- "add x24, x25, %x[in_stride]\n"
+ "mov x25, %x[out]\n"
+ "add x24, x26, %x[in_stride]\n"
"add x23, x24, %x[in_stride]\n"
- "add %x[in], x23, %x[in_stride]\n"
- "mov x22, %x[out]\n"
+ "add x22, x23, %x[in_stride]\n"
+ "add %x[in], x22, %x[in_stride]\n"
"sub %x[height], %x[height], #0x4\n"
"mov x21, %x[width]\n"
"2:" // Main row loop: Column loop
"mov x20, x21\n"
- "whilelt p2.h, XZR, x20\n"
- "ld1h { z27.h }, p2/Z, [x26]\n"
- "ld1h { z26.h }, p2/Z, [x25]\n"
+ "mov x19, x25\n"
+ "whilelt p0.h, XZR, x20\n"
+ "ld1h { z27.h }, p0/Z, [x26]\n"
+ "ld1h { z26.h }, p0/Z, [x24]\n"
"dech x20\n"
+ "ld1h { z25.h }, p0/Z, [x23]\n"
"whilelt p1.h, XZR, x20\n"
- "ld1h { z25.h }, p1/Z, [x26, #1, MUL VL]\n"
- "ld1h { z24.h }, p1/Z, [x25, #1, MUL VL]\n"
+ "ld1h { z24.h }, p0/Z, [x22]\n"
"dech x20\n"
+ "ld1h { z23.h }, p1/Z, [x26, #1, MUL VL]\n"
"whilelt p0.h, XZR, x20\n"
- "ld1h { z23.h }, p0/Z, [x26, #2, MUL VL]\n"
- "ld1h { z22.h }, p0/Z, [x25, #2, MUL VL]\n"
- "mov x20, x22\n"
- "dech x21, ALL, MUL #3\n"
- "ld1h { z21.h }, p2/Z, [x24]\n"
- "ld1h { z20.h }, p1/Z, [x24, #1, MUL VL]\n"
- "ld1h { z19.h }, p0/Z, [x24, #2, MUL VL]\n"
- "ld1h { z18.h }, p2/Z, [x23]\n"
- "cmp x21, #0x0\n"
+ "ld1h { z22.h }, p1/Z, [x24, #1, MUL VL]\n"
+ "add x25, x25, %x[out_stride]\n"
+ "ld1h { z21.h }, p0/Z, [x26, #2, MUL VL]\n"
"addvl x26, x26, #3\n"
- "ld1h { z17.h }, p1/Z, [x23, #1, MUL VL]\n"
- "ld1h { z16.h }, p0/Z, [x23, #2, MUL VL]\n"
- "st1h { z27.h }, p3, [x20]\n"
- "addvl x25, x25, #3\n"
- "st1h { z25.h }, p3, [x20, #1, MUL VL]\n"
+ "ld1h { z20.h }, p0/Z, [x24, #2, MUL VL]\n"
"addvl x24, x24, #3\n"
+ "ld1h { z19.h }, p1/Z, [x23, #1, MUL VL]\n"
+ "dech x21, ALL, MUL #3\n"
+ "ld1h { z18.h }, p0/Z, [x23, #2, MUL VL]\n"
"addvl x23, x23, #3\n"
- "st1h { z23.h }, p3, [x20, #2, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
- "st1h { z26.h }, p3, [x20, #3, MUL VL]\n"
- "st1h { z24.h }, p3, [x20, #4, MUL VL]\n"
- "st1h { z22.h }, p3, [x20, #5, MUL VL]\n"
- "st1h { z21.h }, p3, [x20, #6, MUL VL]\n"
- "st1h { z20.h }, p3, [x20, #7, MUL VL]\n"
- "addvl x20, x20, #12\n"
- "st1h { z19.h }, p3, [x20, #-4, MUL VL]\n"
- "st1h { z18.h }, p3, [x20, #-3, MUL VL]\n"
- "st1h { z17.h }, p3, [x20, #-2, MUL VL]\n"
- "st1h { z16.h }, p3, [x20, #-1, MUL VL]\n"
+ "ld1h { z17.h }, p1/Z, [x22, #1, MUL VL]\n"
+ "cmp x21, #0x0\n"
+ "ld1h { z16.h }, p0/Z, [x22, #2, MUL VL]\n"
+ "addvl x22, x22, #3\n"
+ "st1h { z27.h }, p2, [x19]\n"
+ "st1h { z23.h }, p2, [x19, #1, MUL VL]\n"
+ "st1h { z21.h }, p2, [x19, #2, MUL VL]\n"
+ "st1h { z26.h }, p2, [x19, #3, MUL VL]\n"
+ "st1h { z22.h }, p2, [x19, #4, MUL VL]\n"
+ "st1h { z20.h }, p2, [x19, #5, MUL VL]\n"
+ "st1h { z25.h }, p2, [x19, #6, MUL VL]\n"
+ "st1h { z19.h }, p2, [x19, #7, MUL VL]\n"
+ "addvl x19, x19, #12\n"
+ "st1h { z18.h }, p2, [x19, #-4, MUL VL]\n"
+ "st1h { z24.h }, p2, [x19, #-3, MUL VL]\n"
+ "st1h { z17.h }, p2, [x19, #-2, MUL VL]\n"
+ "st1h { z16.h }, p2, [x19, #-1, MUL VL]\n"
"bgt 2b\n"
"3:" // Main row loop: Column loop skip
- "cmp %x[height], #0x4\n"
"addvl %x[out], %x[out], #12\n"
+ "cmp %x[height], #0x4\n"
"bge 1b\n"
"cbz %x[height], 8f\n"
"4:" // Main loop skip
"5:" // Tail row loop: Head
"mov x26, %x[in]\n"
+ "mov x25, %x[out]\n"
"add %x[in], x26, %x[in_stride]\n"
- "mov x22, %x[out]\n"
"sub %x[height], %x[height], #0x1\n"
- "mov x21, %x[width]\n"
+ "mov x20, %x[width]\n"
"6:" // Tail row loop: Column loop
- "mov x20, x21\n"
- "whilelt p0.h, XZR, x20\n"
+ "mov x19, x20\n"
+ "dech x20, ALL, MUL #3\n"
+ "whilelt p0.h, XZR, x19\n"
"ld1h { z18.h }, p0/Z, [x26]\n"
- "dech x20\n"
- "whilelt p0.h, XZR, x20\n"
+ "dech x19\n"
+ "whilelt p0.h, XZR, x19\n"
"ld1h { z17.h }, p0/Z, [x26, #1, MUL VL]\n"
- "dech x20\n"
- "dech x21, ALL, MUL #3\n"
- "whilelt p0.h, XZR, x20\n"
- "cmp x21, #0x0\n"
+ "dech x19\n"
+ "whilelt p0.h, XZR, x19\n"
"ld1h { z16.h }, p0/Z, [x26, #2, MUL VL]\n"
- "st1h { z18.h }, p3, [x22]\n"
+ "st1h { z18.h }, p2, [x25]\n"
"addvl x26, x26, #3\n"
- "st1h { z17.h }, p3, [x22, #1, MUL VL]\n"
- "st1h { z16.h }, p3, [x22, #2, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
+ "st1h { z17.h }, p2, [x25, #1, MUL VL]\n"
+ "cmp x20, #0x0\n"
+ "st1h { z16.h }, p2, [x25, #2, MUL VL]\n"
+ "add x25, x25, %x[out_stride]\n"
"bgt 6b\n"
"7:" // Tail row loop: Column loop skip
- "cmp %x[height], #0x1\n"
"addvl %x[out], %x[out], #3\n"
+ "cmp %x[height], #0x1\n"
"bge 5b\n"
"8:" // Done
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "p2", "p3", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27"
+ : "cc", "memory", "p0", "p1", "p2", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_3VL_1x4.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_3VL_1x4.hpp
index ec7095db7b..15b32c804f 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_3VL_1x4.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_3VL_1x4.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#pragma once
@@ -40,299 +40,300 @@ void sve_transpose_interleave_3VL_1x4(uint8_t *out, const uint8_t *in, size_t wi
size_t out_stride = 3 * roundup<size_t>(height, 4) * get_vector_length<uint32_t>();
__asm__ __volatile__(
- "cmp %x[height], #0x8\n"
"ptrue p1.b\n"
+ "cmp %x[height], #0x8\n"
"blt 6f\n"
"1:" // Main row loop: Head
- "mov x10, %x[in]\n"
- "add x9, x10, %x[in_stride]\n"
- "add x28, x9, %x[in_stride]\n"
- "add x27, x28, %x[in_stride]\n"
+ "mov x9, %x[in]\n"
+ "mov x28, %x[out]\n"
+ "add x27, x9, %x[in_stride]\n"
"add x26, x27, %x[in_stride]\n"
- "mov x25, %x[width]\n"
- "cntb x24, ALL, MUL #3\n"
- "add x23, x26, %x[in_stride]\n"
- "add x21, x23, %x[in_stride]\n"
- "add x20, x21, %x[in_stride]\n"
- "cmp x25, x24\n"
- "add %x[in], x20, %x[in_stride]\n"
- "mov x22, %x[out]\n"
+ "add x25, x26, %x[in_stride]\n"
+ "add x24, x25, %x[in_stride]\n"
+ "add x23, x24, %x[in_stride]\n"
+ "add x22, x23, %x[in_stride]\n"
+ "add x21, x22, %x[in_stride]\n"
+ "add %x[in], x21, %x[in_stride]\n"
"sub %x[height], %x[height], #0x8\n"
+ "mov x20, %x[width]\n"
+ "cntb x19, ALL, MUL #3\n"
+ "cmp x20, x19\n"
"blt 3f\n"
"2:" // Main row loop: Unroll column loop
- "ld1b { z21.b }, p1/Z, [x10]\n"
- "ld1b { z20.b }, p1/Z, [x9]\n"
- "sub x25, x25, x24\n"
- "cmp x25, x24\n"
- "ld1b { z17.b }, p1/Z, [x28]\n"
- "ld1b { z16.b }, p1/Z, [x27]\n"
- "zip1 z31.b, z21.b, z17.b\n"
- "zip1 z22.b, z20.b, z16.b\n"
- "ld1b { z19.b }, p1/Z, [x26]\n"
- "ld1b { z18.b }, p1/Z, [x23]\n"
- "zip2 z14.b, z21.b, z17.b\n"
- "zip2 z13.b, z20.b, z16.b\n"
- "ld1b { z17.b }, p1/Z, [x21]\n"
- "ld1b { z16.b }, p1/Z, [x20]\n"
- "zip1 z30.b, z19.b, z17.b\n"
- "zip1 z29.b, z18.b, z16.b\n"
- "ld1b { z21.b }, p1/Z, [x10, #1, MUL VL]\n"
- "ld1b { z20.b }, p1/Z, [x9, #1, MUL VL]\n"
- "zip2 z12.b, z19.b, z17.b\n"
- "zip2 z11.b, z18.b, z16.b\n"
- "ld1b { z17.b }, p1/Z, [x28, #1, MUL VL]\n"
- "ld1b { z16.b }, p1/Z, [x27, #1, MUL VL]\n"
- "zip1 z10.b, z21.b, z17.b\n"
- "zip1 z9.b, z20.b, z16.b\n"
- "ld1b { z19.b }, p1/Z, [x26, #1, MUL VL]\n"
- "ld1b { z18.b }, p1/Z, [x23, #1, MUL VL]\n"
- "zip2 z8.b, z21.b, z17.b\n"
- "zip2 z7.b, z20.b, z16.b\n"
- "ld1b { z17.b }, p1/Z, [x21, #1, MUL VL]\n"
- "ld1b { z16.b }, p1/Z, [x20, #1, MUL VL]\n"
- "zip1 z6.b, z19.b, z17.b\n"
- "zip1 z5.b, z18.b, z16.b\n"
- "ld1b { z28.b }, p1/Z, [x10, #2, MUL VL]\n"
- "ld1b { z27.b }, p1/Z, [x9, #2, MUL VL]\n"
- "zip2 z4.b, z19.b, z17.b\n"
- "zip2 z3.b, z18.b, z16.b\n"
- "ld1b { z26.b }, p1/Z, [x28, #2, MUL VL]\n"
- "ld1b { z25.b }, p1/Z, [x27, #2, MUL VL]\n"
- "zip1 z2.b, z28.b, z26.b\n"
- "zip1 z1.b, z27.b, z25.b\n"
- "ld1b { z24.b }, p1/Z, [x26, #2, MUL VL]\n"
- "ld1b { z23.b }, p1/Z, [x23, #2, MUL VL]\n"
- "zip1 z16.b, z31.b, z22.b\n"
- "zip2 z22.b, z31.b, z22.b\n"
- "ld1b { z21.b }, p1/Z, [x21, #2, MUL VL]\n"
- "ld1b { z20.b }, p1/Z, [x20, #2, MUL VL]\n"
- "zip1 z0.b, z24.b, z21.b\n"
- "zip1 z31.b, z23.b, z20.b\n"
- "zip1 z19.b, z14.b, z13.b\n"
- "zip1 z18.b, z30.b, z29.b\n"
- "st1b { z16.b }, p1, [x22]\n"
- "addvl x10, x10, #3\n"
- "zip2 z16.b, z30.b, z29.b\n"
- "zip1 z17.b, z12.b, z11.b\n"
- "st1b { z22.b }, p1, [x22, #1, MUL VL]\n"
+ "ld1b { z18.b }, p1/Z, [x9]\n"
+ "sub x20, x20, x19\n"
+ "ld1b { z19.b }, p1/Z, [x9, #1, MUL VL]\n"
+ "cmp x20, x19\n"
+ "ld1b { z10.b }, p1/Z, [x9, #2, MUL VL]\n"
"addvl x9, x9, #3\n"
- "st1b { z19.b }, p1, [x22, #2, MUL VL]\n"
- "zip2 z30.b, z28.b, z26.b\n"
- "zip2 z29.b, z27.b, z25.b\n"
- "addvl x28, x28, #3\n"
- "st1b { z18.b }, p1, [x22, #3, MUL VL]\n"
- "zip2 z28.b, z24.b, z21.b\n"
- "zip2 z27.b, z23.b, z20.b\n"
+ "ld1b { z24.b }, p1/Z, [x27]\n"
+ "ld1b { z23.b }, p1/Z, [x27, #1, MUL VL]\n"
+ "ld1b { z9.b }, p1/Z, [x27, #2, MUL VL]\n"
"addvl x27, x27, #3\n"
- "st1b { z16.b }, p1, [x22, #4, MUL VL]\n"
- "zip2 z21.b, z14.b, z13.b\n"
- "zip1 z16.b, z10.b, z9.b\n"
+ "ld1b { z16.b }, p1/Z, [x26]\n"
+ "zip1 z21.b, z18.b, z16.b\n"
+ "ld1b { z17.b }, p1/Z, [x26, #1, MUL VL]\n"
+ "zip2 z18.b, z18.b, z16.b\n"
+ "ld1b { z8.b }, p1/Z, [x26, #2, MUL VL]\n"
"addvl x26, x26, #3\n"
- "st1b { z17.b }, p1, [x22, #5, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
- "zip2 z20.b, z10.b, z9.b\n"
- "zip2 z19.b, z12.b, z11.b\n"
- "zip1 z18.b, z6.b, z5.b\n"
- "zip2 z17.b, z6.b, z5.b\n"
- "st1b { z21.b }, p1, [x22]\n"
+ "zip1 z22.b, z19.b, z17.b\n"
+ "ld1b { z16.b }, p1/Z, [x25]\n"
+ "zip2 z7.b, z19.b, z17.b\n"
+ "ld1b { z20.b }, p1/Z, [x25, #1, MUL VL]\n"
+ "zip1 z6.b, z10.b, z8.b\n"
+ "ld1b { z5.b }, p1/Z, [x25, #2, MUL VL]\n"
+ "addvl x25, x25, #3\n"
+ "zip1 z17.b, z24.b, z16.b\n"
+ "ld1b { z19.b }, p1/Z, [x24]\n"
+ "zip2 z16.b, z24.b, z16.b\n"
+ "ld1b { z4.b }, p1/Z, [x24, #1, MUL VL]\n"
+ "zip1 z3.b, z21.b, z17.b\n"
+ "ld1b { z2.b }, p1/Z, [x24, #2, MUL VL]\n"
+ "addvl x24, x24, #3\n"
+ "zip2 z1.b, z21.b, z17.b\n"
+ "ld1b { z0.b }, p1/Z, [x23]\n"
+ "zip1 z31.b, z18.b, z16.b\n"
+ "ld1b { z30.b }, p1/Z, [x23, #1, MUL VL]\n"
+ "zip2 z29.b, z18.b, z16.b\n"
+ "ld1b { z28.b }, p1/Z, [x23, #2, MUL VL]\n"
"addvl x23, x23, #3\n"
- "st1b { z16.b }, p1, [x22, #1, MUL VL]\n"
- "zip1 z16.b, z8.b, z7.b\n"
- "zip2 z26.b, z8.b, z7.b\n"
+ "zip1 z18.b, z23.b, z20.b\n"
+ "ld1b { z17.b }, p1/Z, [x22]\n"
+ "zip2 z27.b, z23.b, z20.b\n"
+ "ld1b { z26.b }, p1/Z, [x22, #1, MUL VL]\n"
+ "zip1 z25.b, z22.b, z18.b\n"
+ "ld1b { z24.b }, p1/Z, [x22, #2, MUL VL]\n"
+ "addvl x22, x22, #3\n"
+ "zip1 z21.b, z19.b, z17.b\n"
+ "ld1b { z16.b }, p1/Z, [x21]\n"
+ "zip2 z19.b, z19.b, z17.b\n"
+ "ld1b { z23.b }, p1/Z, [x21, #1, MUL VL]\n"
+ "zip2 z20.b, z22.b, z18.b\n"
+ "ld1b { z22.b }, p1/Z, [x21, #2, MUL VL]\n"
"addvl x21, x21, #3\n"
- "st1b { z20.b }, p1, [x22, #2, MUL VL]\n"
- "zip1 z25.b, z2.b, z1.b\n"
- "zip1 z24.b, z4.b, z3.b\n"
- "addvl x20, x20, #3\n"
- "st1b { z19.b }, p1, [x22, #3, MUL VL]\n"
- "zip2 z23.b, z4.b, z3.b\n"
- "zip1 z22.b, z0.b, z31.b\n"
- "st1b { z18.b }, p1, [x22, #4, MUL VL]\n"
- "zip2 z21.b, z2.b, z1.b\n"
- "zip1 z20.b, z30.b, z29.b\n"
- "st1b { z17.b }, p1, [x22, #5, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
- "zip2 z19.b, z30.b, z29.b\n"
- "zip2 z18.b, z0.b, z31.b\n"
- "st1b { z16.b }, p1, [x22]\n"
- "zip1 z17.b, z28.b, z27.b\n"
- "zip2 z16.b, z28.b, z27.b\n"
- "st1b { z26.b }, p1, [x22, #1, MUL VL]\n"
- "st1b { z25.b }, p1, [x22, #2, MUL VL]\n"
- "st1b { z24.b }, p1, [x22, #3, MUL VL]\n"
- "st1b { z23.b }, p1, [x22, #4, MUL VL]\n"
- "st1b { z22.b }, p1, [x22, #5, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
- "st1b { z21.b }, p1, [x22]\n"
- "st1b { z20.b }, p1, [x22, #1, MUL VL]\n"
- "st1b { z19.b }, p1, [x22, #2, MUL VL]\n"
- "st1b { z18.b }, p1, [x22, #3, MUL VL]\n"
- "st1b { z17.b }, p1, [x22, #4, MUL VL]\n"
- "st1b { z16.b }, p1, [x22, #5, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
+ "zip1 z17.b, z0.b, z16.b\n"
+ "st1b { z3.b }, p1, [x28]\n"
+ "zip2 z18.b, z0.b, z16.b\n"
+ "st1b { z1.b }, p1, [x28, #1, MUL VL]\n"
+ "zip1 z16.b, z21.b, z17.b\n"
+ "st1b { z31.b }, p1, [x28, #2, MUL VL]\n"
+ "zip2 z17.b, z21.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28, #3, MUL VL]\n"
+ "zip1 z16.b, z19.b, z18.b\n"
+ "st1b { z17.b }, p1, [x28, #4, MUL VL]\n"
+ "zip2 z19.b, z19.b, z18.b\n"
+ "st1b { z16.b }, p1, [x28, #5, MUL VL]\n"
+ "add x28, x28, %x[out_stride]\n"
+ "zip1 z18.b, z4.b, z26.b\n"
+ "st1b { z29.b }, p1, [x28]\n"
+ "zip1 z17.b, z30.b, z23.b\n"
+ "st1b { z25.b }, p1, [x28, #1, MUL VL]\n"
+ "zip1 z16.b, z18.b, z17.b\n"
+ "st1b { z20.b }, p1, [x28, #2, MUL VL]\n"
+ "zip2 z18.b, z18.b, z17.b\n"
+ "st1b { z19.b }, p1, [x28, #3, MUL VL]\n"
+ "zip1 z17.b, z7.b, z27.b\n"
+ "st1b { z16.b }, p1, [x28, #4, MUL VL]\n"
+ "zip2 z16.b, z7.b, z27.b\n"
+ "st1b { z18.b }, p1, [x28, #5, MUL VL]\n"
+ "add x28, x28, %x[out_stride]\n"
+ "zip1 z21.b, z9.b, z5.b\n"
+ "st1b { z17.b }, p1, [x28]\n"
+ "zip2 z18.b, z4.b, z26.b\n"
+ "st1b { z16.b }, p1, [x28, #1, MUL VL]\n"
+ "zip1 z16.b, z6.b, z21.b\n"
+ "st1b { z16.b }, p1, [x28, #2, MUL VL]\n"
+ "zip2 z17.b, z30.b, z23.b\n"
+ "zip1 z16.b, z18.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28, #3, MUL VL]\n"
+ "zip2 z16.b, z18.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28, #4, MUL VL]\n"
+ "zip1 z20.b, z2.b, z24.b\n"
+ "zip1 z19.b, z28.b, z22.b\n"
+ "zip1 z16.b, z20.b, z19.b\n"
+ "st1b { z16.b }, p1, [x28, #5, MUL VL]\n"
+ "add x28, x28, %x[out_stride]\n"
+ "zip2 z16.b, z6.b, z21.b\n"
+ "st1b { z16.b }, p1, [x28]\n"
+ "zip2 z18.b, z10.b, z8.b\n"
+ "zip2 z17.b, z9.b, z5.b\n"
+ "zip1 z16.b, z18.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28, #1, MUL VL]\n"
+ "zip2 z16.b, z18.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28, #2, MUL VL]\n"
+ "zip2 z16.b, z20.b, z19.b\n"
+ "st1b { z16.b }, p1, [x28, #3, MUL VL]\n"
+ "zip2 z18.b, z2.b, z24.b\n"
+ "zip2 z17.b, z28.b, z22.b\n"
+ "zip1 z16.b, z18.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28, #4, MUL VL]\n"
+ "zip2 z16.b, z18.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28, #5, MUL VL]\n"
+ "add x28, x28, %x[out_stride]\n"
"bge 2b\n"
"3:" // Main row loop: Unroll column loop skip
- "cbz x25, 5f\n"
+ "cbz x20, 5f\n"
"4:" // Main row loop: Column loop
- "whilelt p0.b, XZR, x25\n"
- "ld1b { z19.b }, p0/Z, [x10]\n"
+ "whilelt p0.b, XZR, x20\n"
"ld1b { z18.b }, p0/Z, [x9]\n"
- "decw x25, ALL, MUL #3\n"
- "ld1b { z17.b }, p0/Z, [x28]\n"
- "ld1b { z16.b }, p0/Z, [x27]\n"
- "zip1 z26.b, z19.b, z17.b\n"
- "zip1 z25.b, z18.b, z16.b\n"
- "ld1b { z21.b }, p0/Z, [x26]\n"
- "ld1b { z20.b }, p0/Z, [x23]\n"
- "zip2 z24.b, z19.b, z17.b\n"
- "zip2 z19.b, z18.b, z16.b\n"
- "ld1b { z18.b }, p0/Z, [x21]\n"
- "ld1b { z16.b }, p0/Z, [x20]\n"
- "zip1 z23.b, z21.b, z18.b\n"
- "zip1 z17.b, z20.b, z16.b\n"
- "zip2 z22.b, z21.b, z18.b\n"
- "zip2 z16.b, z20.b, z16.b\n"
- "cmp x25, #0x0\n"
- "incd x10, ALL, MUL #6\n"
"incd x9, ALL, MUL #6\n"
- "incd x28, ALL, MUL #6\n"
- "zip1 z21.b, z26.b, z25.b\n"
- "zip2 z20.b, z26.b, z25.b\n"
+ "ld1b { z25.b }, p0/Z, [x27]\n"
"incd x27, ALL, MUL #6\n"
+ "ld1b { z16.b }, p0/Z, [x26]\n"
+ "zip1 z17.b, z18.b, z16.b\n"
+ "ld1b { z24.b }, p0/Z, [x25]\n"
"incd x26, ALL, MUL #6\n"
- "zip1 z19.b, z24.b, z19.b\n"
- "zip1 z18.b, z23.b, z17.b\n"
+ "zip2 z19.b, z18.b, z16.b\n"
+ "ld1b { z23.b }, p0/Z, [x24]\n"
+ "incd x25, ALL, MUL #6\n"
+ "zip1 z16.b, z25.b, z24.b\n"
+ "ld1b { z22.b }, p0/Z, [x23]\n"
+ "incd x24, ALL, MUL #6\n"
+ "zip1 z18.b, z17.b, z16.b\n"
+ "ld1b { z21.b }, p0/Z, [x22]\n"
"incd x23, ALL, MUL #6\n"
+ "zip2 z17.b, z17.b, z16.b\n"
+ "ld1b { z20.b }, p0/Z, [x21]\n"
+ "incd x22, ALL, MUL #6\n"
+ "zip2 z16.b, z25.b, z24.b\n"
+ "st1b { z18.b }, p1, [x28]\n"
"incd x21, ALL, MUL #6\n"
- "zip2 z17.b, z23.b, z17.b\n"
- "zip1 z16.b, z22.b, z16.b\n"
- "incd x20, ALL, MUL #6\n"
- "st1b { z21.b }, p1, [x22]\n"
- "st1b { z20.b }, p1, [x22, #1, MUL VL]\n"
- "st1b { z19.b }, p1, [x22, #2, MUL VL]\n"
- "st1b { z18.b }, p1, [x22, #3, MUL VL]\n"
- "st1b { z17.b }, p1, [x22, #4, MUL VL]\n"
- "st1b { z16.b }, p1, [x22, #5, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
+ "zip1 z16.b, z19.b, z16.b\n"
+ "st1b { z17.b }, p1, [x28, #1, MUL VL]\n"
+ "decw x20, ALL, MUL #3\n"
+ "zip1 z19.b, z23.b, z21.b\n"
+ "st1b { z16.b }, p1, [x28, #2, MUL VL]\n"
+ "cmp x20, #0x0\n"
+ "zip1 z18.b, z22.b, z20.b\n"
+ "zip2 z17.b, z23.b, z21.b\n"
+ "zip1 z16.b, z19.b, z18.b\n"
+ "st1b { z16.b }, p1, [x28, #3, MUL VL]\n"
+ "zip2 z16.b, z19.b, z18.b\n"
+ "st1b { z16.b }, p1, [x28, #4, MUL VL]\n"
+ "zip2 z16.b, z22.b, z20.b\n"
+ "zip1 z16.b, z17.b, z16.b\n"
+ "st1b { z16.b }, p1, [x28, #5, MUL VL]\n"
+ "add x28, x28, %x[out_stride]\n"
"bgt 4b\n"
"5:" // Main row loop: Column loop skip
- "cmp %x[height], #0x8\n"
"addvl %x[out], %x[out], #6\n"
+ "cmp %x[height], #0x8\n"
"bge 1b\n"
"cbz %x[height], 12f\n"
"6:" // Main loop skip
+
"7:" // Tail row loop: Head
- "mov x10, %x[in]\n"
- "add x9, x10, %x[in_stride]\n"
- "add x28, x9, %x[in_stride]\n"
- "mov x21, %x[width]\n"
- "cntb x20, ALL, MUL #3\n"
- "add x27, x28, %x[in_stride]\n"
+ "mov x9, %x[in]\n"
+ "mov x28, %x[out]\n"
+ "add x27, x9, %x[in_stride]\n"
+ "add x26, x27, %x[in_stride]\n"
+ "add x25, x26, %x[in_stride]\n"
+ "add %x[in], x25, %x[in_stride]\n"
"cmp %x[height], #0x3\n"
- "add %x[in], x27, %x[in_stride]\n"
- "csel x27, x27, %x[pad_row], GT\n"
- "csel x28, x28, %x[pad_row], GE\n"
+ "csel x25, x25, %x[pad_row], GT\n"
+ "csel x26, x26, %x[pad_row], GE\n"
"cmp %x[height], #0x1\n"
- "csel x9, x9, %x[pad_row], GT\n"
- "cmp x21, x20\n"
- "mov x22, %x[out]\n"
+ "csel x27, x27, %x[pad_row], GT\n"
"sub %x[height], %x[height], #0x4\n"
+ "mov x20, %x[width]\n"
+ "cntb x19, ALL, MUL #3\n"
+ "cmp x20, x19\n"
"blt 9f\n"
"8:" // Tail row loop: Unroll column loop
- "ld1b { z21.b }, p1/Z, [x10]\n"
- "ld1b { z20.b }, p1/Z, [x9]\n"
- "sub x21, x21, x20\n"
- "cmp x21, x20\n"
- "ld1b { z17.b }, p1/Z, [x28]\n"
- "ld1b { z16.b }, p1/Z, [x27]\n"
- "zip1 z31.b, z21.b, z17.b\n"
- "zip1 z30.b, z20.b, z16.b\n"
- "ld1b { z19.b }, p1/Z, [x10, #1, MUL VL]\n"
+ "ld1b { z19.b }, p1/Z, [x9]\n"
+ "sub x20, x20, x19\n"
"ld1b { z18.b }, p1/Z, [x9, #1, MUL VL]\n"
- "zip2 z29.b, z21.b, z17.b\n"
- "zip2 z28.b, z20.b, z16.b\n"
- "ld1b { z17.b }, p1/Z, [x28, #1, MUL VL]\n"
- "ld1b { z16.b }, p1/Z, [x27, #1, MUL VL]\n"
- "zip1 z27.b, z19.b, z17.b\n"
- "zip1 z26.b, z18.b, z16.b\n"
- "ld1b { z22.b }, p1/Z, [x10, #2, MUL VL]\n"
- "ld1b { z21.b }, p1/Z, [x9, #2, MUL VL]\n"
- "zip2 z25.b, z19.b, z17.b\n"
- "zip2 z20.b, z18.b, z16.b\n"
- "ld1b { z19.b }, p1/Z, [x28, #2, MUL VL]\n"
- "ld1b { z18.b }, p1/Z, [x27, #2, MUL VL]\n"
- "zip1 z24.b, z22.b, z19.b\n"
- "zip1 z23.b, z21.b, z18.b\n"
- "zip1 z16.b, z31.b, z30.b\n"
- "zip2 z17.b, z31.b, z30.b\n"
- "st1b { z16.b }, p1, [x22]\n"
- "addvl x10, x10, #3\n"
- "zip1 z16.b, z29.b, z28.b\n"
- "st1b { z17.b }, p1, [x22, #1, MUL VL]\n"
- "zip2 z22.b, z22.b, z19.b\n"
+ "cmp x20, x19\n"
+ "ld1b { z30.b }, p1/Z, [x9, #2, MUL VL]\n"
"addvl x9, x9, #3\n"
- "st1b { z16.b }, p1, [x22, #2, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
- "zip2 z21.b, z21.b, z18.b\n"
- "zip2 z18.b, z29.b, z28.b\n"
- "zip1 z16.b, z27.b, z26.b\n"
- "zip2 z17.b, z27.b, z26.b\n"
- "st1b { z18.b }, p1, [x22]\n"
- "addvl x28, x28, #3\n"
- "st1b { z16.b }, p1, [x22, #1, MUL VL]\n"
- "zip1 z16.b, z25.b, z20.b\n"
- "zip2 z20.b, z25.b, z20.b\n"
+ "ld1b { z29.b }, p1/Z, [x27]\n"
+ "ld1b { z28.b }, p1/Z, [x27, #1, MUL VL]\n"
+ "ld1b { z27.b }, p1/Z, [x27, #2, MUL VL]\n"
"addvl x27, x27, #3\n"
- "st1b { z17.b }, p1, [x22, #2, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
- "zip1 z19.b, z24.b, z23.b\n"
- "zip2 z18.b, z24.b, z23.b\n"
- "st1b { z16.b }, p1, [x22]\n"
- "zip1 z17.b, z22.b, z21.b\n"
- "zip2 z16.b, z22.b, z21.b\n"
- "st1b { z20.b }, p1, [x22, #1, MUL VL]\n"
- "st1b { z19.b }, p1, [x22, #2, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
- "st1b { z18.b }, p1, [x22]\n"
- "st1b { z17.b }, p1, [x22, #1, MUL VL]\n"
- "st1b { z16.b }, p1, [x22, #2, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
+ "ld1b { z16.b }, p1/Z, [x26]\n"
+ "zip1 z26.b, z19.b, z16.b\n"
+ "ld1b { z17.b }, p1/Z, [x26, #1, MUL VL]\n"
+ "zip2 z25.b, z19.b, z16.b\n"
+ "ld1b { z24.b }, p1/Z, [x26, #2, MUL VL]\n"
+ "addvl x26, x26, #3\n"
+ "zip1 z23.b, z18.b, z17.b\n"
+ "ld1b { z16.b }, p1/Z, [x25]\n"
+ "zip2 z22.b, z18.b, z17.b\n"
+ "ld1b { z21.b }, p1/Z, [x25, #1, MUL VL]\n"
+ "zip1 z20.b, z30.b, z24.b\n"
+ "ld1b { z19.b }, p1/Z, [x25, #2, MUL VL]\n"
+ "addvl x25, x25, #3\n"
+ "zip1 z18.b, z29.b, z16.b\n"
+ "zip2 z17.b, z29.b, z16.b\n"
+ "zip1 z16.b, z26.b, z18.b\n"
+ "st1b { z16.b }, p1, [x28]\n"
+ "zip2 z16.b, z26.b, z18.b\n"
+ "st1b { z16.b }, p1, [x28, #1, MUL VL]\n"
+ "zip1 z16.b, z25.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28, #2, MUL VL]\n"
+ "add x28, x28, %x[out_stride]\n"
+ "zip2 z16.b, z25.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28]\n"
+ "zip1 z18.b, z28.b, z21.b\n"
+ "zip2 z17.b, z28.b, z21.b\n"
+ "zip1 z16.b, z23.b, z18.b\n"
+ "st1b { z16.b }, p1, [x28, #1, MUL VL]\n"
+ "zip2 z16.b, z23.b, z18.b\n"
+ "st1b { z16.b }, p1, [x28, #2, MUL VL]\n"
+ "add x28, x28, %x[out_stride]\n"
+ "zip1 z16.b, z22.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28]\n"
+ "zip2 z16.b, z22.b, z17.b\n"
+ "zip1 z17.b, z27.b, z19.b\n"
+ "st1b { z16.b }, p1, [x28, #1, MUL VL]\n"
+ "zip1 z16.b, z20.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28, #2, MUL VL]\n"
+ "add x28, x28, %x[out_stride]\n"
+ "zip2 z16.b, z20.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28]\n"
+ "zip2 z18.b, z30.b, z24.b\n"
+ "zip2 z17.b, z27.b, z19.b\n"
+ "zip1 z16.b, z18.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28, #1, MUL VL]\n"
+ "zip2 z16.b, z18.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28, #2, MUL VL]\n"
+ "add x28, x28, %x[out_stride]\n"
"bge 8b\n"
"9:" // Tail row loop: Unroll column loop skip
- "cbz x21, 11f\n"
+ "cbz x20, 11f\n"
"10:" // Tail row loop: Column loop
- "whilelt p0.b, XZR, x21\n"
- "ld1b { z19.b }, p0/Z, [x10]\n"
- "ld1b { z21.b }, p0/Z, [x9]\n"
- "decw x21, ALL, MUL #3\n"
- "ld1b { z18.b }, p0/Z, [x28]\n"
- "ld1b { z16.b }, p0/Z, [x27]\n"
- "zip1 z20.b, z19.b, z18.b\n"
- "zip1 z17.b, z21.b, z16.b\n"
- "zip2 z19.b, z19.b, z18.b\n"
- "zip2 z16.b, z21.b, z16.b\n"
- "cmp x21, #0x0\n"
- "incd x10, ALL, MUL #6\n"
+ "whilelt p0.b, XZR, x20\n"
+ "ld1b { z18.b }, p0/Z, [x9]\n"
"incd x9, ALL, MUL #6\n"
- "incd x28, ALL, MUL #6\n"
- "zip1 z18.b, z20.b, z17.b\n"
- "zip2 z17.b, z20.b, z17.b\n"
+ "ld1b { z21.b }, p0/Z, [x27]\n"
"incd x27, ALL, MUL #6\n"
- "zip1 z16.b, z19.b, z16.b\n"
- "st1b { z18.b }, p1, [x22]\n"
- "st1b { z17.b }, p1, [x22, #1, MUL VL]\n"
- "st1b { z16.b }, p1, [x22, #2, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
+ "ld1b { z17.b }, p0/Z, [x26]\n"
+ "zip1 z20.b, z18.b, z17.b\n"
+ "ld1b { z16.b }, p0/Z, [x25]\n"
+ "incd x26, ALL, MUL #6\n"
+ "zip2 z19.b, z18.b, z17.b\n"
+ "incd x25, ALL, MUL #6\n"
+ "decw x20, ALL, MUL #3\n"
+ "zip1 z18.b, z21.b, z16.b\n"
+ "cmp x20, #0x0\n"
+ "zip2 z17.b, z21.b, z16.b\n"
+ "zip1 z16.b, z20.b, z18.b\n"
+ "st1b { z16.b }, p1, [x28]\n"
+ "zip2 z16.b, z20.b, z18.b\n"
+ "st1b { z16.b }, p1, [x28, #1, MUL VL]\n"
+ "zip1 z16.b, z19.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28, #2, MUL VL]\n"
+ "add x28, x28, %x[out_stride]\n"
"bgt 10b\n"
"11:" // Tail row loop: Column loop skip
- "cmp %x[height], #0x1\n"
"addvl %x[out], %x[out], #3\n"
+ "cmp %x[height], #0x1\n"
"bge 7b\n"
"12:" // Done
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [pad_row] "r" (pad_row), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "x9", "x10", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "x9", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_3VL_2x2.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_3VL_2x2.hpp
index 3d14383a64..1864a16758 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_3VL_2x2.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_3VL_2x2.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#pragma once
@@ -40,262 +40,263 @@ void sve_transpose_interleave_3VL_2x2(uint16_t *out, const uint16_t *in, size_t
size_t out_stride = 3 * roundup<size_t>(height, 2) * get_vector_length<uint16_t>();
__asm__ __volatile__(
- "cmp %x[height], #0x8\n"
"ptrue p2.b\n"
+ "cmp %x[height], #0x8\n"
"blt 6f\n"
"1:" // Main row loop: Head
- "mov x12, %x[in]\n"
- "add x11, x12, %x[in_stride]\n"
- "add x10, x11, %x[in_stride]\n"
- "add x9, x10, %x[in_stride]\n"
+ "mov x11, %x[in]\n"
+ "mov x10, %x[out]\n"
+ "add x9, x11, %x[in_stride]\n"
"add x28, x9, %x[in_stride]\n"
- "mov x27, %x[width]\n"
- "cnth x26, ALL, MUL #3\n"
- "add x25, x28, %x[in_stride]\n"
+ "add x27, x28, %x[in_stride]\n"
+ "add x26, x27, %x[in_stride]\n"
+ "add x25, x26, %x[in_stride]\n"
"add x24, x25, %x[in_stride]\n"
"add x23, x24, %x[in_stride]\n"
- "cmp x27, x26\n"
"add %x[in], x23, %x[in_stride]\n"
- "mov x22, %x[out]\n"
"sub %x[height], %x[height], #0x8\n"
+ "mov x22, %x[width]\n"
+ "cnth x21, ALL, MUL #3\n"
+ "cmp x22, x21\n"
"blt 3f\n"
"2:" // Main row loop: Unroll column loop
- "ld1h { z17.h }, p2/Z, [x12]\n"
- "ld1h { z23.h }, p2/Z, [x12, #1, MUL VL]\n"
- "mov x21, x22\n"
- "add x22, x22, %x[out_stride]\n"
- "ld1h { z16.h }, p2/Z, [x11]\n"
- "ld1h { z20.h }, p2/Z, [x11, #1, MUL VL]\n"
- "zip1 z9.h, z17.h, z16.h\n"
- "zip2 z8.h, z17.h, z16.h\n"
- "ld1h { z17.h }, p2/Z, [x10]\n"
- "ld1h { z22.h }, p2/Z, [x10, #1, MUL VL]\n"
- "zip1 z7.h, z23.h, z20.h\n"
- "mov x20, x22\n"
- "ld1h { z16.h }, p2/Z, [x9]\n"
- "ld1h { z21.h }, p2/Z, [x9, #1, MUL VL]\n"
- "zip1 z6.h, z17.h, z16.h\n"
- "zip2 z5.h, z17.h, z16.h\n"
- "ld1h { z18.h }, p2/Z, [x28]\n"
- "ld1h { z17.h }, p2/Z, [x25]\n"
- "zip1 z4.h, z22.h, z21.h\n"
- "zip1 z3.h, z18.h, z17.h\n"
- "ld1h { z19.h }, p2/Z, [x12, #2, MUL VL]\n"
- "ld1h { z16.h }, p2/Z, [x11, #2, MUL VL]\n"
- "zip2 z2.h, z18.h, z17.h\n"
- "zip2 z1.h, z23.h, z20.h\n"
- "ld1h { z18.h }, p2/Z, [x10, #2, MUL VL]\n"
- "ld1h { z17.h }, p2/Z, [x9, #2, MUL VL]\n"
- "zip1 z0.h, z19.h, z16.h\n"
- "zip2 z31.h, z19.h, z16.h\n"
- "ld1h { z20.h }, p2/Z, [x28, #1, MUL VL]\n"
- "ld1h { z30.h }, p2/Z, [x28, #2, MUL VL]\n"
- "zip2 z29.h, z22.h, z21.h\n"
- "zip1 z28.h, z18.h, z17.h\n"
- "ld1h { z16.h }, p2/Z, [x25, #1, MUL VL]\n"
- "ld1h { z19.h }, p2/Z, [x25, #2, MUL VL]\n"
- "zip1 z27.h, z20.h, z16.h\n"
- "zip2 z26.h, z18.h, z17.h\n"
- "ld1h { z17.h }, p2/Z, [x24]\n"
- "ld1h { z18.h }, p2/Z, [x24, #1, MUL VL]\n"
- "zip2 z25.h, z20.h, z16.h\n"
- "zip1 z24.h, z30.h, z19.h\n"
- "ld1h { z23.h }, p2/Z, [x24, #2, MUL VL]\n"
- "ld1h { z16.h }, p2/Z, [x23]\n"
- "zip1 z22.h, z17.h, z16.h\n"
- "zip2 z21.h, z17.h, z16.h\n"
- "ld1h { z17.h }, p2/Z, [x23, #1, MUL VL]\n"
- "ld1h { z16.h }, p2/Z, [x23, #2, MUL VL]\n"
- "st1h { z9.h }, p2, [x21]\n"
- "zip1 z20.h, z18.h, z17.h\n"
- "st1h { z8.h }, p2, [x21, #1, MUL VL]\n"
- "sub x27, x27, x26\n"
- "cmp x27, x26\n"
- "zip2 z19.h, z30.h, z19.h\n"
- "st1h { z7.h }, p2, [x21, #2, MUL VL]\n"
- "addvl x12, x12, #3\n"
+ "ld1h { z19.h }, p2/Z, [x11]\n"
+ "mov x20, x10\n"
+ "ld1h { z18.h }, p2/Z, [x11, #1, MUL VL]\n"
+ "add x10, x10, %x[out_stride]\n"
+ "ld1h { z21.h }, p2/Z, [x11, #2, MUL VL]\n"
"addvl x11, x11, #3\n"
- "zip2 z18.h, z18.h, z17.h\n"
- "st1h { z6.h }, p2, [x21, #3, MUL VL]\n"
- "addvl x10, x10, #3\n"
+ "ld1h { z16.h }, p2/Z, [x9]\n"
+ "zip1 z9.h, z19.h, z16.h\n"
+ "ld1h { z17.h }, p2/Z, [x9, #1, MUL VL]\n"
+ "mov x19, x10\n"
+ "zip2 z8.h, z19.h, z16.h\n"
+ "ld1h { z16.h }, p2/Z, [x9, #2, MUL VL]\n"
"addvl x9, x9, #3\n"
- "zip1 z17.h, z23.h, z16.h\n"
- "st1h { z5.h }, p2, [x21, #4, MUL VL]\n"
+ "zip1 z7.h, z18.h, z17.h\n"
+ "ld1h { z19.h }, p2/Z, [x28]\n"
+ "add x10, x10, %x[out_stride]\n"
+ "zip2 z6.h, z18.h, z17.h\n"
+ "ld1h { z20.h }, p2/Z, [x28, #1, MUL VL]\n"
+ "sub x22, x22, x21\n"
+ "zip1 z5.h, z21.h, z16.h\n"
+ "ld1h { z18.h }, p2/Z, [x28, #2, MUL VL]\n"
"addvl x28, x28, #3\n"
+ "zip2 z4.h, z21.h, z16.h\n"
+ "ld1h { z16.h }, p2/Z, [x27]\n"
+ "cmp x22, x21\n"
+ "zip1 z3.h, z19.h, z16.h\n"
+ "ld1h { z17.h }, p2/Z, [x27, #1, MUL VL]\n"
+ "zip2 z2.h, z19.h, z16.h\n"
+ "ld1h { z16.h }, p2/Z, [x27, #2, MUL VL]\n"
+ "addvl x27, x27, #3\n"
+ "zip1 z1.h, z20.h, z17.h\n"
+ "ld1h { z19.h }, p2/Z, [x26]\n"
+ "zip2 z0.h, z20.h, z17.h\n"
+ "ld1h { z21.h }, p2/Z, [x26, #1, MUL VL]\n"
+ "zip1 z31.h, z18.h, z16.h\n"
+ "ld1h { z20.h }, p2/Z, [x26, #2, MUL VL]\n"
+ "addvl x26, x26, #3\n"
+ "zip2 z30.h, z18.h, z16.h\n"
+ "ld1h { z18.h }, p2/Z, [x25]\n"
+ "ld1h { z17.h }, p2/Z, [x25, #1, MUL VL]\n"
+ "zip1 z29.h, z19.h, z18.h\n"
+ "ld1h { z16.h }, p2/Z, [x25, #2, MUL VL]\n"
"addvl x25, x25, #3\n"
- "zip2 z16.h, z23.h, z16.h\n"
- "st1h { z4.h }, p2, [x21, #5, MUL VL]\n"
+ "zip2 z28.h, z19.h, z18.h\n"
+ "ld1h { z19.h }, p2/Z, [x24]\n"
+ "zip1 z27.h, z21.h, z17.h\n"
+ "ld1h { z26.h }, p2/Z, [x24, #1, MUL VL]\n"
+ "zip2 z25.h, z21.h, z17.h\n"
+ "ld1h { z24.h }, p2/Z, [x24, #2, MUL VL]\n"
"addvl x24, x24, #3\n"
+ "zip1 z23.h, z20.h, z16.h\n"
+ "ld1h { z18.h }, p2/Z, [x23]\n"
+ "zip2 z22.h, z20.h, z16.h\n"
+ "ld1h { z17.h }, p2/Z, [x23, #1, MUL VL]\n"
+ "ld1h { z16.h }, p2/Z, [x23, #2, MUL VL]\n"
+ "zip1 z21.h, z19.h, z18.h\n"
+ "st1h { z9.h }, p2, [x20]\n"
"addvl x23, x23, #3\n"
- "st1h { z3.h }, p2, [x21, #6, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
- "st1h { z2.h }, p2, [x21, #7, MUL VL]\n"
- "addvl x21, x21, #12\n"
- "st1h { z27.h }, p2, [x21, #-4, MUL VL]\n"
- "st1h { z22.h }, p2, [x21, #-3, MUL VL]\n"
- "st1h { z21.h }, p2, [x21, #-2, MUL VL]\n"
- "st1h { z20.h }, p2, [x21, #-1, MUL VL]\n"
- "st1h { z1.h }, p2, [x20]\n"
- "st1h { z0.h }, p2, [x20, #1, MUL VL]\n"
- "st1h { z31.h }, p2, [x20, #2, MUL VL]\n"
- "st1h { z29.h }, p2, [x20, #3, MUL VL]\n"
- "st1h { z28.h }, p2, [x20, #4, MUL VL]\n"
- "st1h { z26.h }, p2, [x20, #5, MUL VL]\n"
- "st1h { z25.h }, p2, [x20, #6, MUL VL]\n"
- "st1h { z24.h }, p2, [x20, #7, MUL VL]\n"
+ "zip2 z20.h, z19.h, z18.h\n"
+ "st1h { z8.h }, p2, [x20, #1, MUL VL]\n"
+ "zip1 z19.h, z26.h, z17.h\n"
+ "st1h { z7.h }, p2, [x20, #2, MUL VL]\n"
+ "zip2 z18.h, z26.h, z17.h\n"
+ "st1h { z3.h }, p2, [x20, #3, MUL VL]\n"
+ "zip1 z17.h, z24.h, z16.h\n"
+ "st1h { z2.h }, p2, [x20, #4, MUL VL]\n"
+ "zip2 z16.h, z24.h, z16.h\n"
+ "st1h { z1.h }, p2, [x20, #5, MUL VL]\n"
+ "st1h { z29.h }, p2, [x20, #6, MUL VL]\n"
+ "st1h { z28.h }, p2, [x20, #7, MUL VL]\n"
"addvl x20, x20, #12\n"
- "st1h { z19.h }, p2, [x20, #-4, MUL VL]\n"
- "st1h { z18.h }, p2, [x20, #-3, MUL VL]\n"
- "st1h { z17.h }, p2, [x20, #-2, MUL VL]\n"
- "st1h { z16.h }, p2, [x20, #-1, MUL VL]\n"
+ "st1h { z27.h }, p2, [x20, #-4, MUL VL]\n"
+ "st1h { z21.h }, p2, [x20, #-3, MUL VL]\n"
+ "st1h { z20.h }, p2, [x20, #-2, MUL VL]\n"
+ "st1h { z19.h }, p2, [x20, #-1, MUL VL]\n"
+ "st1h { z6.h }, p2, [x19]\n"
+ "st1h { z5.h }, p2, [x19, #1, MUL VL]\n"
+ "st1h { z4.h }, p2, [x19, #2, MUL VL]\n"
+ "st1h { z0.h }, p2, [x19, #3, MUL VL]\n"
+ "st1h { z31.h }, p2, [x19, #4, MUL VL]\n"
+ "st1h { z30.h }, p2, [x19, #5, MUL VL]\n"
+ "st1h { z25.h }, p2, [x19, #6, MUL VL]\n"
+ "st1h { z23.h }, p2, [x19, #7, MUL VL]\n"
+ "addvl x19, x19, #12\n"
+ "st1h { z22.h }, p2, [x19, #-4, MUL VL]\n"
+ "st1h { z18.h }, p2, [x19, #-3, MUL VL]\n"
+ "st1h { z17.h }, p2, [x19, #-2, MUL VL]\n"
+ "st1h { z16.h }, p2, [x19, #-1, MUL VL]\n"
"bge 2b\n"
"3:" // Main row loop: Unroll column loop skip
- "cbz x27, 5f\n"
+ "cbz x22, 5f\n"
"4:" // Main row loop: Column loop
- "mov x20, x27\n"
+ "mov x20, x22\n"
+ "mov x19, x10\n"
"whilelt p1.h, XZR, x20\n"
- "ld1h { z0.h }, p1/Z, [x12]\n"
- "ld1h { z16.h }, p1/Z, [x11]\n"
+ "ld1h { z17.h }, p1/Z, [x11]\n"
+ "ld1h { z16.h }, p1/Z, [x9]\n"
+ "zip1 z29.h, z17.h, z16.h\n"
+ "ld1h { z18.h }, p1/Z, [x28]\n"
"dech x20\n"
+ "zip2 z28.h, z17.h, z16.h\n"
+ "ld1h { z16.h }, p1/Z, [x27]\n"
"whilelt p0.h, XZR, x20\n"
- "ld1h { z21.h }, p0/Z, [x12, #1, MUL VL]\n"
- "ld1h { z19.h }, p0/Z, [x11, #1, MUL VL]\n"
- "ld1h { z31.h }, p1/Z, [x10]\n"
- "ld1h { z30.h }, p0/Z, [x10, #1, MUL VL]\n"
- "mov x20, x22\n"
- "decw x27, ALL, MUL #3\n"
- "ld1h { z18.h }, p1/Z, [x9]\n"
- "ld1h { z29.h }, p0/Z, [x9, #1, MUL VL]\n"
- "addvl x12, x12, #1\n"
+ "zip1 z27.h, z18.h, z16.h\n"
+ "ld1h { z17.h }, p0/Z, [x11, #1, MUL VL]\n"
"addvl x11, x11, #1\n"
- "ld1h { z28.h }, p1/Z, [x28]\n"
- "ld1h { z20.h }, p1/Z, [x25]\n"
- "addvl x10, x10, #1\n"
- "addvl x9, x9, #1\n"
- "ld1h { z27.h }, p0/Z, [x28, #1, MUL VL]\n"
- "addvl x28, x28, #1\n"
- "ld1h { z26.h }, p0/Z, [x25, #1, MUL VL]\n"
- "addvl x25, x25, #1\n"
- "ld1h { z25.h }, p1/Z, [x24]\n"
- "ld1h { z24.h }, p0/Z, [x24, #1, MUL VL]\n"
- "addvl x24, x24, #1\n"
- "zip1 z17.h, z0.h, z16.h\n"
- "ld1h { z23.h }, p1/Z, [x23]\n"
- "ld1h { z22.h }, p0/Z, [x23, #1, MUL VL]\n"
- "addvl x23, x23, #1\n"
- "zip2 z16.h, z0.h, z16.h\n"
- "zip1 z21.h, z21.h, z19.h\n"
- "zip1 z19.h, z31.h, z18.h\n"
- "st1h { z17.h }, p2, [x20]\n"
- "cmp x27, #0x0\n"
- "zip2 z18.h, z31.h, z18.h\n"
- "zip1 z17.h, z30.h, z29.h\n"
- "st1h { z16.h }, p2, [x20, #1, MUL VL]\n"
- "incd x12, ALL, MUL #4\n"
- "zip1 z16.h, z28.h, z20.h\n"
- "zip2 z20.h, z28.h, z20.h\n"
- "st1h { z21.h }, p2, [x20, #2, MUL VL]\n"
+ "zip2 z26.h, z18.h, z16.h\n"
+ "ld1h { z16.h }, p0/Z, [x9, #1, MUL VL]\n"
"incd x11, ALL, MUL #4\n"
- "st1h { z19.h }, p2, [x20, #3, MUL VL]\n"
- "incd x10, ALL, MUL #4\n"
+ "zip1 z25.h, z17.h, z16.h\n"
+ "ld1h { z17.h }, p0/Z, [x28, #1, MUL VL]\n"
+ "addvl x9, x9, #1\n"
+ "ld1h { z16.h }, p0/Z, [x27, #1, MUL VL]\n"
+ "zip1 z24.h, z17.h, z16.h\n"
+ "ld1h { z19.h }, p1/Z, [x26]\n"
"incd x9, ALL, MUL #4\n"
- "zip1 z19.h, z27.h, z26.h\n"
- "st1h { z18.h }, p2, [x20, #4, MUL VL]\n"
+ "ld1h { z18.h }, p0/Z, [x26, #1, MUL VL]\n"
+ "addvl x28, x28, #1\n"
+ "ld1h { z17.h }, p1/Z, [x25]\n"
+ "zip1 z23.h, z19.h, z17.h\n"
+ "ld1h { z16.h }, p0/Z, [x25, #1, MUL VL]\n"
"incd x28, ALL, MUL #4\n"
+ "zip2 z22.h, z19.h, z17.h\n"
+ "ld1h { z21.h }, p1/Z, [x24]\n"
+ "addvl x27, x27, #1\n"
+ "zip1 z20.h, z18.h, z16.h\n"
+ "ld1h { z19.h }, p0/Z, [x24, #1, MUL VL]\n"
+ "incd x27, ALL, MUL #4\n"
+ "ld1h { z17.h }, p1/Z, [x23]\n"
+ "zip1 z18.h, z21.h, z17.h\n"
+ "ld1h { z16.h }, p0/Z, [x23, #1, MUL VL]\n"
+ "addvl x26, x26, #1\n"
+ "zip2 z17.h, z21.h, z17.h\n"
+ "st1h { z29.h }, p2, [x19]\n"
+ "incd x26, ALL, MUL #4\n"
+ "zip1 z16.h, z19.h, z16.h\n"
+ "st1h { z28.h }, p2, [x19, #1, MUL VL]\n"
+ "addvl x25, x25, #1\n"
+ "st1h { z25.h }, p2, [x19, #2, MUL VL]\n"
"incd x25, ALL, MUL #4\n"
- "zip1 z18.h, z25.h, z23.h\n"
- "st1h { z17.h }, p2, [x20, #5, MUL VL]\n"
+ "st1h { z27.h }, p2, [x19, #3, MUL VL]\n"
+ "addvl x24, x24, #1\n"
+ "st1h { z26.h }, p2, [x19, #4, MUL VL]\n"
"incd x24, ALL, MUL #4\n"
+ "st1h { z24.h }, p2, [x19, #5, MUL VL]\n"
+ "addvl x23, x23, #1\n"
+ "st1h { z23.h }, p2, [x19, #6, MUL VL]\n"
"incd x23, ALL, MUL #4\n"
- "zip2 z17.h, z25.h, z23.h\n"
- "st1h { z16.h }, p2, [x20, #6, MUL VL]\n"
- "zip1 z16.h, z24.h, z22.h\n"
- "add x22, x22, %x[out_stride]\n"
- "st1h { z20.h }, p2, [x20, #7, MUL VL]\n"
- "addvl x20, x20, #12\n"
- "st1h { z19.h }, p2, [x20, #-4, MUL VL]\n"
- "st1h { z18.h }, p2, [x20, #-3, MUL VL]\n"
- "st1h { z17.h }, p2, [x20, #-2, MUL VL]\n"
- "st1h { z16.h }, p2, [x20, #-1, MUL VL]\n"
+ "st1h { z22.h }, p2, [x19, #7, MUL VL]\n"
+ "addvl x19, x19, #12\n"
+ "add x10, x10, %x[out_stride]\n"
+ "st1h { z20.h }, p2, [x19, #-4, MUL VL]\n"
+ "st1h { z18.h }, p2, [x19, #-3, MUL VL]\n"
+ "decw x22, ALL, MUL #3\n"
+ "st1h { z17.h }, p2, [x19, #-2, MUL VL]\n"
+ "cmp x22, #0x0\n"
+ "st1h { z16.h }, p2, [x19, #-1, MUL VL]\n"
"bgt 4b\n"
"5:" // Main row loop: Column loop skip
- "cmp %x[height], #0x8\n"
"addvl %x[out], %x[out], #12\n"
+ "cmp %x[height], #0x8\n"
"bge 1b\n"
"cbz %x[height], 12f\n"
"6:" // Main loop skip
+
"7:" // Tail row loop: Head
- "mov x12, %x[in]\n"
- "mov x21, %x[width]\n"
- "cnth x20, ALL, MUL #3\n"
- "add x11, x12, %x[in_stride]\n"
+ "mov x11, %x[in]\n"
+ "mov x10, %x[out]\n"
+ "add x9, x11, %x[in_stride]\n"
+ "add %x[in], x9, %x[in_stride]\n"
"cmp %x[height], #0x1\n"
- "add %x[in], x11, %x[in_stride]\n"
- "csel x11, x11, %x[pad_row], GT\n"
- "cmp x21, x20\n"
- "mov x22, %x[out]\n"
+ "csel x9, x9, %x[pad_row], GT\n"
"sub %x[height], %x[height], #0x2\n"
+ "mov x20, %x[width]\n"
+ "cnth x19, ALL, MUL #3\n"
+ "cmp x20, x19\n"
"blt 9f\n"
"8:" // Tail row loop: Unroll column loop
- "ld1h { z17.h }, p2/Z, [x12]\n"
- "ld1h { z22.h }, p2/Z, [x12, #1, MUL VL]\n"
- "sub x21, x21, x20\n"
- "cmp x21, x20\n"
- "ld1h { z16.h }, p2/Z, [x11]\n"
- "ld1h { z21.h }, p2/Z, [x11, #1, MUL VL]\n"
- "zip1 z18.h, z17.h, z16.h\n"
- "zip2 z17.h, z17.h, z16.h\n"
- "ld1h { z20.h }, p2/Z, [x12, #2, MUL VL]\n"
- "ld1h { z19.h }, p2/Z, [x11, #2, MUL VL]\n"
- "zip1 z16.h, z22.h, z21.h\n"
- "st1h { z18.h }, p2, [x22]\n"
- "st1h { z17.h }, p2, [x22, #1, MUL VL]\n"
- "addvl x12, x12, #3\n"
+ "ld1h { z17.h }, p2/Z, [x11]\n"
+ "sub x20, x20, x19\n"
+ "ld1h { z22.h }, p2/Z, [x11, #1, MUL VL]\n"
+ "cmp x20, x19\n"
+ "ld1h { z21.h }, p2/Z, [x11, #2, MUL VL]\n"
"addvl x11, x11, #3\n"
- "zip2 z18.h, z22.h, z21.h\n"
- "st1h { z16.h }, p2, [x22, #2, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
- "zip1 z17.h, z20.h, z19.h\n"
- "zip2 z16.h, z20.h, z19.h\n"
- "st1h { z18.h }, p2, [x22]\n"
- "st1h { z17.h }, p2, [x22, #1, MUL VL]\n"
- "st1h { z16.h }, p2, [x22, #2, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
+ "ld1h { z16.h }, p2/Z, [x9]\n"
+ "zip1 z20.h, z17.h, z16.h\n"
+ "ld1h { z18.h }, p2/Z, [x9, #1, MUL VL]\n"
+ "zip2 z17.h, z17.h, z16.h\n"
+ "ld1h { z19.h }, p2/Z, [x9, #2, MUL VL]\n"
+ "addvl x9, x9, #3\n"
+ "zip1 z16.h, z22.h, z18.h\n"
+ "st1h { z20.h }, p2, [x10]\n"
+ "zip2 z18.h, z22.h, z18.h\n"
+ "st1h { z17.h }, p2, [x10, #1, MUL VL]\n"
+ "zip1 z17.h, z21.h, z19.h\n"
+ "st1h { z16.h }, p2, [x10, #2, MUL VL]\n"
+ "add x10, x10, %x[out_stride]\n"
+ "zip2 z16.h, z21.h, z19.h\n"
+ "st1h { z18.h }, p2, [x10]\n"
+ "st1h { z17.h }, p2, [x10, #1, MUL VL]\n"
+ "st1h { z16.h }, p2, [x10, #2, MUL VL]\n"
+ "add x10, x10, %x[out_stride]\n"
"bge 8b\n"
"9:" // Tail row loop: Unroll column loop skip
- "cbz x21, 11f\n"
+ "cbz x20, 11f\n"
"10:" // Tail row loop: Column loop
- "mov x20, x21\n"
- "whilelt p0.h, XZR, x20\n"
- "ld1h { z20.h }, p0/Z, [x12]\n"
+ "mov x19, x20\n"
+ "decw x20, ALL, MUL #3\n"
+ "whilelt p0.h, XZR, x19\n"
"ld1h { z17.h }, p0/Z, [x11]\n"
- "dech x20\n"
- "whilelt p0.h, XZR, x20\n"
- "ld1h { z19.h }, p0/Z, [x12, #1, MUL VL]\n"
- "ld1h { z16.h }, p0/Z, [x11, #1, MUL VL]\n"
- "decw x21, ALL, MUL #3\n"
- "addvl x12, x12, #1\n"
- "zip1 z18.h, z20.h, z17.h\n"
- "zip2 z17.h, z20.h, z17.h\n"
+ "ld1h { z16.h }, p0/Z, [x9]\n"
+ "zip1 z19.h, z17.h, z16.h\n"
+ "dech x19\n"
+ "zip2 z18.h, z17.h, z16.h\n"
+ "whilelt p0.h, XZR, x19\n"
+ "ld1h { z17.h }, p0/Z, [x11, #1, MUL VL]\n"
"addvl x11, x11, #1\n"
- "cmp x21, #0x0\n"
- "zip1 z16.h, z19.h, z16.h\n"
- "st1h { z18.h }, p2, [x22]\n"
- "incd x12, ALL, MUL #4\n"
+ "ld1h { z16.h }, p0/Z, [x9, #1, MUL VL]\n"
+ "zip1 z16.h, z17.h, z16.h\n"
+ "st1h { z19.h }, p2, [x10]\n"
"incd x11, ALL, MUL #4\n"
- "st1h { z17.h }, p2, [x22, #1, MUL VL]\n"
- "st1h { z16.h }, p2, [x22, #2, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
+ "st1h { z18.h }, p2, [x10, #1, MUL VL]\n"
+ "addvl x9, x9, #1\n"
+ "st1h { z16.h }, p2, [x10, #2, MUL VL]\n"
+ "incd x9, ALL, MUL #4\n"
+ "add x10, x10, %x[out_stride]\n"
+ "cmp x20, #0x0\n"
"bgt 10b\n"
"11:" // Tail row loop: Column loop skip
- "cmp %x[height], #0x1\n"
"addvl %x[out], %x[out], #3\n"
+ "cmp %x[height], #0x1\n"
"bge 7b\n"
"12:" // Done
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [pad_row] "r" (pad_row), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "p2", "x9", "x10", "x11", "x12", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "x9", "x10", "x11", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_4VL.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_4VL.hpp
index a39235187f..aa9d7220fe 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_4VL.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_4VL.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#pragma once
@@ -34,113 +34,113 @@ void sve_transpose_interleave_4VL(uint16_t *out, const uint16_t *in, size_t widt
size_t out_stride = 4 * height * get_vector_length<uint8_t>();
__asm__ __volatile__(
+ "ptrue p3.b\n"
"cmp %x[height], #0x4\n"
- "ptrue p4.b\n"
"blt 4f\n"
"1:" // Main row loop: Head
"mov x26, %x[in]\n"
- "add x25, x26, %x[in_stride]\n"
- "add x24, x25, %x[in_stride]\n"
+ "mov x25, %x[out]\n"
+ "add x24, x26, %x[in_stride]\n"
"add x23, x24, %x[in_stride]\n"
- "add %x[in], x23, %x[in_stride]\n"
- "mov x22, %x[out]\n"
+ "add x22, x23, %x[in_stride]\n"
+ "add %x[in], x22, %x[in_stride]\n"
"sub %x[height], %x[height], #0x4\n"
"mov x21, %x[width]\n"
"2:" // Main row loop: Column loop
"mov x20, x21\n"
- "whilelt p3.h, XZR, x20\n"
- "ld1h { z31.h }, p3/Z, [x26]\n"
- "ld1h { z30.h }, p3/Z, [x25]\n"
+ "mov x19, x25\n"
+ "whilelt p0.h, XZR, x20\n"
+ "ld1h { z31.h }, p0/Z, [x26]\n"
+ "ld1h { z30.h }, p0/Z, [x24]\n"
"dech x20\n"
+ "ld1h { z29.h }, p0/Z, [x23]\n"
"whilelt p2.h, XZR, x20\n"
- "ld1h { z29.h }, p2/Z, [x26, #1, MUL VL]\n"
- "ld1h { z28.h }, p2/Z, [x25, #1, MUL VL]\n"
+ "ld1h { z28.h }, p0/Z, [x22]\n"
"dech x20\n"
+ "ld1h { z27.h }, p2/Z, [x26, #1, MUL VL]\n"
"whilelt p1.h, XZR, x20\n"
- "ld1h { z27.h }, p1/Z, [x26, #2, MUL VL]\n"
- "ld1h { z26.h }, p1/Z, [x25, #2, MUL VL]\n"
+ "ld1h { z26.h }, p2/Z, [x24, #1, MUL VL]\n"
"dech x20\n"
+ "ld1h { z25.h }, p1/Z, [x26, #2, MUL VL]\n"
"whilelt p0.h, XZR, x20\n"
- "ld1h { z25.h }, p0/Z, [x26, #3, MUL VL]\n"
- "ld1h { z24.h }, p0/Z, [x25, #3, MUL VL]\n"
- "mov x20, x22\n"
- "dech x21, ALL, MUL #4\n"
- "ld1h { z23.h }, p3/Z, [x24]\n"
- "ld1h { z22.h }, p2/Z, [x24, #1, MUL VL]\n"
- "ld1h { z21.h }, p1/Z, [x24, #2, MUL VL]\n"
- "ld1h { z20.h }, p0/Z, [x24, #3, MUL VL]\n"
- "cmp x21, #0x0\n"
+ "ld1h { z24.h }, p1/Z, [x24, #2, MUL VL]\n"
+ "add x25, x25, %x[out_stride]\n"
+ "ld1h { z23.h }, p0/Z, [x26, #3, MUL VL]\n"
"addvl x26, x26, #4\n"
- "ld1h { z19.h }, p3/Z, [x23]\n"
- "ld1h { z18.h }, p2/Z, [x23, #1, MUL VL]\n"
- "addvl x25, x25, #4\n"
+ "ld1h { z22.h }, p0/Z, [x24, #3, MUL VL]\n"
"addvl x24, x24, #4\n"
- "ld1h { z17.h }, p1/Z, [x23, #2, MUL VL]\n"
- "ld1h { z16.h }, p0/Z, [x23, #3, MUL VL]\n"
- "st1h { z31.h }, p4, [x20]\n"
+ "ld1h { z21.h }, p2/Z, [x23, #1, MUL VL]\n"
+ "dech x21, ALL, MUL #4\n"
+ "ld1h { z20.h }, p1/Z, [x23, #2, MUL VL]\n"
+ "cmp x21, #0x0\n"
+ "ld1h { z19.h }, p0/Z, [x23, #3, MUL VL]\n"
"addvl x23, x23, #4\n"
- "st1h { z29.h }, p4, [x20, #1, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
- "st1h { z27.h }, p4, [x20, #2, MUL VL]\n"
- "st1h { z25.h }, p4, [x20, #3, MUL VL]\n"
- "st1h { z30.h }, p4, [x20, #4, MUL VL]\n"
- "st1h { z28.h }, p4, [x20, #5, MUL VL]\n"
- "st1h { z26.h }, p4, [x20, #6, MUL VL]\n"
- "st1h { z24.h }, p4, [x20, #7, MUL VL]\n"
- "addvl x20, x20, #16\n"
- "st1h { z23.h }, p4, [x20, #-8, MUL VL]\n"
- "st1h { z22.h }, p4, [x20, #-7, MUL VL]\n"
- "st1h { z21.h }, p4, [x20, #-6, MUL VL]\n"
- "st1h { z20.h }, p4, [x20, #-5, MUL VL]\n"
- "st1h { z19.h }, p4, [x20, #-4, MUL VL]\n"
- "st1h { z18.h }, p4, [x20, #-3, MUL VL]\n"
- "st1h { z17.h }, p4, [x20, #-2, MUL VL]\n"
- "st1h { z16.h }, p4, [x20, #-1, MUL VL]\n"
+ "ld1h { z18.h }, p2/Z, [x22, #1, MUL VL]\n"
+ "ld1h { z17.h }, p1/Z, [x22, #2, MUL VL]\n"
+ "ld1h { z16.h }, p0/Z, [x22, #3, MUL VL]\n"
+ "addvl x22, x22, #4\n"
+ "st1h { z31.h }, p3, [x19]\n"
+ "st1h { z27.h }, p3, [x19, #1, MUL VL]\n"
+ "st1h { z25.h }, p3, [x19, #2, MUL VL]\n"
+ "st1h { z23.h }, p3, [x19, #3, MUL VL]\n"
+ "st1h { z30.h }, p3, [x19, #4, MUL VL]\n"
+ "st1h { z26.h }, p3, [x19, #5, MUL VL]\n"
+ "st1h { z24.h }, p3, [x19, #6, MUL VL]\n"
+ "st1h { z22.h }, p3, [x19, #7, MUL VL]\n"
+ "addvl x19, x19, #16\n"
+ "st1h { z29.h }, p3, [x19, #-8, MUL VL]\n"
+ "st1h { z21.h }, p3, [x19, #-7, MUL VL]\n"
+ "st1h { z20.h }, p3, [x19, #-6, MUL VL]\n"
+ "st1h { z19.h }, p3, [x19, #-5, MUL VL]\n"
+ "st1h { z28.h }, p3, [x19, #-4, MUL VL]\n"
+ "st1h { z18.h }, p3, [x19, #-3, MUL VL]\n"
+ "st1h { z17.h }, p3, [x19, #-2, MUL VL]\n"
+ "st1h { z16.h }, p3, [x19, #-1, MUL VL]\n"
"bgt 2b\n"
"3:" // Main row loop: Column loop skip
- "cmp %x[height], #0x4\n"
"addvl %x[out], %x[out], #16\n"
+ "cmp %x[height], #0x4\n"
"bge 1b\n"
"cbz %x[height], 8f\n"
"4:" // Main loop skip
"5:" // Tail row loop: Head
"mov x26, %x[in]\n"
+ "mov x25, %x[out]\n"
"add %x[in], x26, %x[in_stride]\n"
- "mov x22, %x[out]\n"
"sub %x[height], %x[height], #0x1\n"
- "mov x21, %x[width]\n"
+ "mov x20, %x[width]\n"
"6:" // Tail row loop: Column loop
- "mov x20, x21\n"
- "whilelt p0.h, XZR, x20\n"
+ "mov x19, x20\n"
+ "dech x20, ALL, MUL #4\n"
+ "whilelt p0.h, XZR, x19\n"
"ld1h { z19.h }, p0/Z, [x26]\n"
- "dech x20\n"
- "whilelt p0.h, XZR, x20\n"
+ "dech x19\n"
+ "whilelt p0.h, XZR, x19\n"
"ld1h { z18.h }, p0/Z, [x26, #1, MUL VL]\n"
- "dech x20\n"
- "whilelt p0.h, XZR, x20\n"
+ "dech x19\n"
+ "whilelt p0.h, XZR, x19\n"
"ld1h { z17.h }, p0/Z, [x26, #2, MUL VL]\n"
- "dech x20\n"
- "dech x21, ALL, MUL #4\n"
- "whilelt p0.h, XZR, x20\n"
- "cmp x21, #0x0\n"
+ "dech x19\n"
+ "whilelt p0.h, XZR, x19\n"
"ld1h { z16.h }, p0/Z, [x26, #3, MUL VL]\n"
- "st1h { z19.h }, p4, [x22]\n"
+ "st1h { z19.h }, p3, [x25]\n"
"addvl x26, x26, #4\n"
- "st1h { z18.h }, p4, [x22, #1, MUL VL]\n"
- "st1h { z17.h }, p4, [x22, #2, MUL VL]\n"
- "st1h { z16.h }, p4, [x22, #3, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
+ "st1h { z18.h }, p3, [x25, #1, MUL VL]\n"
+ "cmp x20, #0x0\n"
+ "st1h { z17.h }, p3, [x25, #2, MUL VL]\n"
+ "st1h { z16.h }, p3, [x25, #3, MUL VL]\n"
+ "add x25, x25, %x[out_stride]\n"
"bgt 6b\n"
"7:" // Tail row loop: Column loop skip
- "cmp %x[height], #0x1\n"
"addvl %x[out], %x[out], #4\n"
+ "cmp %x[height], #0x1\n"
"bge 5b\n"
"8:" // Done
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_4VL_1x4.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_4VL_1x4.hpp
index e3489398d4..5e5f7a53a7 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_4VL_1x4.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_4VL_1x4.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#pragma once
@@ -40,252 +40,254 @@ void sve_transpose_interleave_4VL_1x4(uint8_t *out, const uint8_t *in, size_t wi
size_t out_stride = 4 * roundup<size_t>(height, 4) * get_vector_length<uint32_t>();
__asm__ __volatile__(
- "cmp %x[height], #0x8\n"
"ptrue p1.b\n"
+ "cmp %x[height], #0x8\n"
"blt 6f\n"
"1:" // Main row loop: Head
- "mov x10, %x[in]\n"
- "add x9, x10, %x[in_stride]\n"
- "add x28, x9, %x[in_stride]\n"
- "add x27, x28, %x[in_stride]\n"
+ "mov x9, %x[in]\n"
+ "mov x28, %x[out]\n"
+ "add x27, x9, %x[in_stride]\n"
"add x26, x27, %x[in_stride]\n"
- "mov x25, %x[width]\n"
- "cntb x24, ALL, MUL #2\n"
- "add x23, x26, %x[in_stride]\n"
- "add x21, x23, %x[in_stride]\n"
- "add x20, x21, %x[in_stride]\n"
- "cmp x25, x24\n"
- "add %x[in], x20, %x[in_stride]\n"
- "mov x22, %x[out]\n"
+ "add x25, x26, %x[in_stride]\n"
+ "add x24, x25, %x[in_stride]\n"
+ "add x23, x24, %x[in_stride]\n"
+ "add x22, x23, %x[in_stride]\n"
+ "add x21, x22, %x[in_stride]\n"
+ "add %x[in], x21, %x[in_stride]\n"
"sub %x[height], %x[height], #0x8\n"
+ "mov x20, %x[width]\n"
+ "cntb x19, ALL, MUL #2\n"
+ "cmp x20, x19\n"
"blt 3f\n"
"2:" // Main row loop: Unroll column loop
- "ld1b { z21.b }, p1/Z, [x10]\n"
- "ld1b { z20.b }, p1/Z, [x9]\n"
- "sub x25, x25, x24\n"
- "cmp x25, x24\n"
- "ld1b { z17.b }, p1/Z, [x28]\n"
- "ld1b { z16.b }, p1/Z, [x27]\n"
- "zip1 z4.b, z21.b, z17.b\n"
- "zip1 z3.b, z20.b, z16.b\n"
- "ld1b { z19.b }, p1/Z, [x26]\n"
- "ld1b { z18.b }, p1/Z, [x23]\n"
- "zip2 z2.b, z21.b, z17.b\n"
- "zip2 z1.b, z20.b, z16.b\n"
- "ld1b { z17.b }, p1/Z, [x21]\n"
- "ld1b { z16.b }, p1/Z, [x20]\n"
- "zip1 z0.b, z19.b, z17.b\n"
- "zip1 z31.b, z18.b, z16.b\n"
- "ld1b { z24.b }, p1/Z, [x10, #1, MUL VL]\n"
- "ld1b { z20.b }, p1/Z, [x9, #1, MUL VL]\n"
- "zip2 z30.b, z19.b, z17.b\n"
- "zip2 z23.b, z18.b, z16.b\n"
- "ld1b { z17.b }, p1/Z, [x28, #1, MUL VL]\n"
- "ld1b { z16.b }, p1/Z, [x27, #1, MUL VL]\n"
- "zip1 z22.b, z24.b, z17.b\n"
- "zip1 z21.b, z20.b, z16.b\n"
- "ld1b { z19.b }, p1/Z, [x26, #1, MUL VL]\n"
- "ld1b { z18.b }, p1/Z, [x23, #1, MUL VL]\n"
- "zip2 z29.b, z24.b, z17.b\n"
- "zip2 z28.b, z20.b, z16.b\n"
- "ld1b { z17.b }, p1/Z, [x21, #1, MUL VL]\n"
- "ld1b { z16.b }, p1/Z, [x20, #1, MUL VL]\n"
- "zip1 z27.b, z19.b, z17.b\n"
- "zip1 z26.b, z18.b, z16.b\n"
- "zip2 z25.b, z19.b, z17.b\n"
- "zip2 z24.b, z18.b, z16.b\n"
- "addvl x10, x10, #2\n"
+ "ld1b { z17.b }, p1/Z, [x9]\n"
+ "sub x20, x20, x19\n"
+ "ld1b { z3.b }, p1/Z, [x9, #1, MUL VL]\n"
"addvl x9, x9, #2\n"
- "zip1 z16.b, z4.b, z3.b\n"
- "zip2 z17.b, z4.b, z3.b\n"
- "st1b { z16.b }, p1, [x22]\n"
- "addvl x28, x28, #2\n"
- "zip1 z16.b, z2.b, z1.b\n"
- "zip2 z20.b, z2.b, z1.b\n"
- "st1b { z17.b }, p1, [x22, #1, MUL VL]\n"
+ "ld1b { z20.b }, p1/Z, [x27]\n"
+ "cmp x20, x19\n"
+ "ld1b { z2.b }, p1/Z, [x27, #1, MUL VL]\n"
"addvl x27, x27, #2\n"
- "zip1 z19.b, z0.b, z31.b\n"
- "zip2 z18.b, z0.b, z31.b\n"
- "st1b { z16.b }, p1, [x22, #2, MUL VL]\n"
+ "ld1b { z16.b }, p1/Z, [x26]\n"
+ "zip1 z18.b, z17.b, z16.b\n"
+ "ld1b { z1.b }, p1/Z, [x26, #1, MUL VL]\n"
"addvl x26, x26, #2\n"
- "zip1 z17.b, z30.b, z23.b\n"
- "zip2 z16.b, z30.b, z23.b\n"
- "st1b { z20.b }, p1, [x22, #3, MUL VL]\n"
+ "zip2 z19.b, z17.b, z16.b\n"
+ "ld1b { z17.b }, p1/Z, [x25]\n"
+ "ld1b { z0.b }, p1/Z, [x25, #1, MUL VL]\n"
+ "zip1 z31.b, z3.b, z1.b\n"
+ "ld1b { z30.b }, p1/Z, [x24]\n"
+ "addvl x25, x25, #2\n"
+ "zip1 z16.b, z20.b, z17.b\n"
+ "ld1b { z29.b }, p1/Z, [x24, #1, MUL VL]\n"
+ "addvl x24, x24, #2\n"
+ "zip1 z28.b, z18.b, z16.b\n"
+ "ld1b { z27.b }, p1/Z, [x23]\n"
+ "zip2 z26.b, z18.b, z16.b\n"
+ "ld1b { z25.b }, p1/Z, [x23, #1, MUL VL]\n"
"addvl x23, x23, #2\n"
- "st1b { z19.b }, p1, [x22, #4, MUL VL]\n"
+ "zip2 z18.b, z20.b, z17.b\n"
+ "ld1b { z16.b }, p1/Z, [x22]\n"
+ "zip1 z24.b, z2.b, z0.b\n"
+ "ld1b { z23.b }, p1/Z, [x22, #1, MUL VL]\n"
+ "addvl x22, x22, #2\n"
+ "zip1 z17.b, z19.b, z18.b\n"
+ "ld1b { z22.b }, p1/Z, [x21]\n"
+ "zip2 z21.b, z19.b, z18.b\n"
+ "ld1b { z20.b }, p1/Z, [x21, #1, MUL VL]\n"
"addvl x21, x21, #2\n"
- "addvl x20, x20, #2\n"
- "zip1 z23.b, z22.b, z21.b\n"
- "st1b { z18.b }, p1, [x22, #5, MUL VL]\n"
- "zip2 z22.b, z22.b, z21.b\n"
- "zip1 z21.b, z29.b, z28.b\n"
- "st1b { z17.b }, p1, [x22, #6, MUL VL]\n"
- "zip2 z20.b, z29.b, z28.b\n"
- "zip1 z19.b, z27.b, z26.b\n"
- "st1b { z16.b }, p1, [x22, #7, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
- "zip2 z18.b, z27.b, z26.b\n"
- "zip1 z17.b, z25.b, z24.b\n"
- "zip2 z16.b, z25.b, z24.b\n"
- "st1b { z23.b }, p1, [x22]\n"
- "st1b { z22.b }, p1, [x22, #1, MUL VL]\n"
- "st1b { z21.b }, p1, [x22, #2, MUL VL]\n"
- "st1b { z20.b }, p1, [x22, #3, MUL VL]\n"
- "st1b { z19.b }, p1, [x22, #4, MUL VL]\n"
- "st1b { z18.b }, p1, [x22, #5, MUL VL]\n"
- "st1b { z17.b }, p1, [x22, #6, MUL VL]\n"
- "st1b { z16.b }, p1, [x22, #7, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
+ "zip1 z19.b, z30.b, z16.b\n"
+ "st1b { z28.b }, p1, [x28]\n"
+ "zip2 z18.b, z30.b, z16.b\n"
+ "st1b { z26.b }, p1, [x28, #1, MUL VL]\n"
+ "zip1 z16.b, z27.b, z22.b\n"
+ "st1b { z17.b }, p1, [x28, #2, MUL VL]\n"
+ "zip1 z17.b, z19.b, z16.b\n"
+ "st1b { z21.b }, p1, [x28, #3, MUL VL]\n"
+ "zip2 z16.b, z19.b, z16.b\n"
+ "st1b { z17.b }, p1, [x28, #4, MUL VL]\n"
+ "zip2 z17.b, z27.b, z22.b\n"
+ "st1b { z16.b }, p1, [x28, #5, MUL VL]\n"
+ "zip1 z16.b, z18.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28, #6, MUL VL]\n"
+ "zip2 z16.b, z18.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28, #7, MUL VL]\n"
+ "add x28, x28, %x[out_stride]\n"
+ "zip1 z16.b, z31.b, z24.b\n"
+ "st1b { z16.b }, p1, [x28]\n"
+ "zip2 z16.b, z31.b, z24.b\n"
+ "zip2 z18.b, z3.b, z1.b\n"
+ "st1b { z16.b }, p1, [x28, #1, MUL VL]\n"
+ "zip2 z17.b, z2.b, z0.b\n"
+ "zip1 z16.b, z18.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28, #2, MUL VL]\n"
+ "zip2 z16.b, z18.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28, #3, MUL VL]\n"
+ "zip1 z18.b, z29.b, z23.b\n"
+ "zip1 z17.b, z25.b, z20.b\n"
+ "zip1 z16.b, z18.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28, #4, MUL VL]\n"
+ "zip2 z16.b, z18.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28, #5, MUL VL]\n"
+ "zip2 z18.b, z29.b, z23.b\n"
+ "zip2 z17.b, z25.b, z20.b\n"
+ "zip1 z16.b, z18.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28, #6, MUL VL]\n"
+ "zip2 z16.b, z18.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28, #7, MUL VL]\n"
+ "add x28, x28, %x[out_stride]\n"
"bge 2b\n"
"3:" // Main row loop: Unroll column loop skip
- "cbz x25, 5f\n"
+ "cbz x20, 5f\n"
"4:" // Main row loop: Column loop
- "whilelt p0.b, XZR, x25\n"
- "ld1b { z19.b }, p0/Z, [x10]\n"
- "ld1b { z18.b }, p0/Z, [x9]\n"
- "decw x25, ALL, MUL #4\n"
- "ld1b { z17.b }, p0/Z, [x28]\n"
- "ld1b { z16.b }, p0/Z, [x27]\n"
- "zip1 z27.b, z19.b, z17.b\n"
- "zip1 z26.b, z18.b, z16.b\n"
- "ld1b { z22.b }, p0/Z, [x26]\n"
- "ld1b { z21.b }, p0/Z, [x23]\n"
- "zip2 z25.b, z19.b, z17.b\n"
- "zip2 z20.b, z18.b, z16.b\n"
- "ld1b { z19.b }, p0/Z, [x21]\n"
- "ld1b { z16.b }, p0/Z, [x20]\n"
- "zip1 z18.b, z22.b, z19.b\n"
- "zip1 z17.b, z21.b, z16.b\n"
- "zip2 z24.b, z22.b, z19.b\n"
- "zip2 z16.b, z21.b, z16.b\n"
- "cmp x25, #0x0\n"
- "addvl x10, x10, #1\n"
+ "whilelt p0.b, XZR, x20\n"
+ "ld1b { z17.b }, p0/Z, [x9]\n"
"addvl x9, x9, #1\n"
- "addvl x28, x28, #1\n"
- "zip1 z23.b, z27.b, z26.b\n"
- "zip2 z22.b, z27.b, z26.b\n"
+ "ld1b { z25.b }, p0/Z, [x27]\n"
"addvl x27, x27, #1\n"
+ "ld1b { z16.b }, p0/Z, [x26]\n"
+ "zip1 z18.b, z17.b, z16.b\n"
+ "ld1b { z24.b }, p0/Z, [x25]\n"
"addvl x26, x26, #1\n"
- "zip1 z21.b, z25.b, z20.b\n"
- "zip2 z20.b, z25.b, z20.b\n"
+ "zip2 z23.b, z17.b, z16.b\n"
+ "ld1b { z22.b }, p0/Z, [x24]\n"
+ "addvl x25, x25, #1\n"
+ "zip1 z16.b, z25.b, z24.b\n"
+ "ld1b { z21.b }, p0/Z, [x23]\n"
+ "addvl x24, x24, #1\n"
+ "zip1 z17.b, z18.b, z16.b\n"
+ "ld1b { z20.b }, p0/Z, [x22]\n"
"addvl x23, x23, #1\n"
+ "zip2 z18.b, z18.b, z16.b\n"
+ "ld1b { z19.b }, p0/Z, [x21]\n"
+ "addvl x22, x22, #1\n"
+ "zip2 z16.b, z25.b, z24.b\n"
+ "st1b { z17.b }, p1, [x28]\n"
"addvl x21, x21, #1\n"
- "zip1 z19.b, z18.b, z17.b\n"
- "zip2 z18.b, z18.b, z17.b\n"
- "addvl x20, x20, #1\n"
- "zip1 z17.b, z24.b, z16.b\n"
- "zip2 z16.b, z24.b, z16.b\n"
- "st1b { z23.b }, p1, [x22]\n"
- "st1b { z22.b }, p1, [x22, #1, MUL VL]\n"
- "st1b { z21.b }, p1, [x22, #2, MUL VL]\n"
- "st1b { z20.b }, p1, [x22, #3, MUL VL]\n"
- "st1b { z19.b }, p1, [x22, #4, MUL VL]\n"
- "st1b { z18.b }, p1, [x22, #5, MUL VL]\n"
- "st1b { z17.b }, p1, [x22, #6, MUL VL]\n"
- "st1b { z16.b }, p1, [x22, #7, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
+ "zip1 z17.b, z23.b, z16.b\n"
+ "st1b { z18.b }, p1, [x28, #1, MUL VL]\n"
+ "decw x20, ALL, MUL #4\n"
+ "zip2 z16.b, z23.b, z16.b\n"
+ "st1b { z17.b }, p1, [x28, #2, MUL VL]\n"
+ "cmp x20, #0x0\n"
+ "zip1 z18.b, z22.b, z20.b\n"
+ "st1b { z16.b }, p1, [x28, #3, MUL VL]\n"
+ "zip1 z17.b, z21.b, z19.b\n"
+ "zip1 z16.b, z18.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28, #4, MUL VL]\n"
+ "zip2 z16.b, z18.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28, #5, MUL VL]\n"
+ "zip2 z18.b, z22.b, z20.b\n"
+ "zip2 z17.b, z21.b, z19.b\n"
+ "zip1 z16.b, z18.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28, #6, MUL VL]\n"
+ "zip2 z16.b, z18.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28, #7, MUL VL]\n"
+ "add x28, x28, %x[out_stride]\n"
"bgt 4b\n"
"5:" // Main row loop: Column loop skip
- "cmp %x[height], #0x8\n"
"addvl %x[out], %x[out], #8\n"
+ "cmp %x[height], #0x8\n"
"bge 1b\n"
"cbz %x[height], 12f\n"
"6:" // Main loop skip
+
"7:" // Tail row loop: Head
- "mov x10, %x[in]\n"
- "add x9, x10, %x[in_stride]\n"
- "add x28, x9, %x[in_stride]\n"
- "mov x21, %x[width]\n"
- "cntb x20, ALL, MUL #2\n"
- "add x27, x28, %x[in_stride]\n"
+ "mov x9, %x[in]\n"
+ "mov x28, %x[out]\n"
+ "add x27, x9, %x[in_stride]\n"
+ "add x26, x27, %x[in_stride]\n"
+ "add x25, x26, %x[in_stride]\n"
+ "add %x[in], x25, %x[in_stride]\n"
"cmp %x[height], #0x3\n"
- "add %x[in], x27, %x[in_stride]\n"
- "csel x27, x27, %x[pad_row], GT\n"
- "csel x28, x28, %x[pad_row], GE\n"
+ "csel x25, x25, %x[pad_row], GT\n"
+ "csel x26, x26, %x[pad_row], GE\n"
"cmp %x[height], #0x1\n"
- "csel x9, x9, %x[pad_row], GT\n"
- "cmp x21, x20\n"
- "mov x22, %x[out]\n"
+ "csel x27, x27, %x[pad_row], GT\n"
"sub %x[height], %x[height], #0x4\n"
+ "mov x20, %x[width]\n"
+ "cntb x19, ALL, MUL #2\n"
+ "cmp x20, x19\n"
"blt 9f\n"
"8:" // Tail row loop: Unroll column loop
- "ld1b { z21.b }, p1/Z, [x10]\n"
- "ld1b { z19.b }, p1/Z, [x9]\n"
- "sub x21, x21, x20\n"
- "cmp x21, x20\n"
- "ld1b { z17.b }, p1/Z, [x28]\n"
- "ld1b { z16.b }, p1/Z, [x27]\n"
- "zip1 z26.b, z21.b, z17.b\n"
- "zip1 z25.b, z19.b, z16.b\n"
- "ld1b { z20.b }, p1/Z, [x10, #1, MUL VL]\n"
- "ld1b { z18.b }, p1/Z, [x9, #1, MUL VL]\n"
- "zip2 z24.b, z21.b, z17.b\n"
- "zip2 z19.b, z19.b, z16.b\n"
- "ld1b { z17.b }, p1/Z, [x28, #1, MUL VL]\n"
- "ld1b { z16.b }, p1/Z, [x27, #1, MUL VL]\n"
- "zip1 z23.b, z20.b, z17.b\n"
- "zip1 z22.b, z18.b, z16.b\n"
- "zip2 z21.b, z20.b, z17.b\n"
- "zip2 z20.b, z18.b, z16.b\n"
- "addvl x10, x10, #2\n"
+ "ld1b { z18.b }, p1/Z, [x9]\n"
+ "sub x20, x20, x19\n"
+ "ld1b { z19.b }, p1/Z, [x9, #1, MUL VL]\n"
"addvl x9, x9, #2\n"
- "zip1 z16.b, z26.b, z25.b\n"
- "zip2 z18.b, z26.b, z25.b\n"
- "st1b { z16.b }, p1, [x22]\n"
- "addvl x28, x28, #2\n"
- "zip1 z17.b, z24.b, z19.b\n"
- "zip2 z16.b, z24.b, z19.b\n"
- "st1b { z18.b }, p1, [x22, #1, MUL VL]\n"
+ "ld1b { z25.b }, p1/Z, [x27]\n"
+ "cmp x20, x19\n"
+ "ld1b { z24.b }, p1/Z, [x27, #1, MUL VL]\n"
"addvl x27, x27, #2\n"
- "st1b { z17.b }, p1, [x22, #2, MUL VL]\n"
- "zip1 z19.b, z23.b, z22.b\n"
- "zip2 z18.b, z23.b, z22.b\n"
- "st1b { z16.b }, p1, [x22, #3, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
- "zip1 z17.b, z21.b, z20.b\n"
- "zip2 z16.b, z21.b, z20.b\n"
- "st1b { z19.b }, p1, [x22]\n"
- "st1b { z18.b }, p1, [x22, #1, MUL VL]\n"
- "st1b { z17.b }, p1, [x22, #2, MUL VL]\n"
- "st1b { z16.b }, p1, [x22, #3, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
+ "ld1b { z17.b }, p1/Z, [x26]\n"
+ "zip1 z23.b, z18.b, z17.b\n"
+ "ld1b { z16.b }, p1/Z, [x26, #1, MUL VL]\n"
+ "addvl x26, x26, #2\n"
+ "zip2 z22.b, z18.b, z17.b\n"
+ "ld1b { z18.b }, p1/Z, [x25]\n"
+ "ld1b { z21.b }, p1/Z, [x25, #1, MUL VL]\n"
+ "zip1 z20.b, z19.b, z16.b\n"
+ "addvl x25, x25, #2\n"
+ "zip2 z19.b, z19.b, z16.b\n"
+ "zip1 z17.b, z25.b, z18.b\n"
+ "zip1 z16.b, z23.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28]\n"
+ "zip2 z16.b, z23.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28, #1, MUL VL]\n"
+ "zip2 z17.b, z25.b, z18.b\n"
+ "zip1 z16.b, z22.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28, #2, MUL VL]\n"
+ "zip2 z16.b, z22.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28, #3, MUL VL]\n"
+ "add x28, x28, %x[out_stride]\n"
+ "zip1 z18.b, z24.b, z21.b\n"
+ "zip2 z17.b, z24.b, z21.b\n"
+ "zip1 z16.b, z20.b, z18.b\n"
+ "st1b { z16.b }, p1, [x28]\n"
+ "zip2 z16.b, z20.b, z18.b\n"
+ "st1b { z16.b }, p1, [x28, #1, MUL VL]\n"
+ "zip1 z16.b, z19.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28, #2, MUL VL]\n"
+ "zip2 z16.b, z19.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28, #3, MUL VL]\n"
+ "add x28, x28, %x[out_stride]\n"
"bge 8b\n"
"9:" // Tail row loop: Unroll column loop skip
- "cbz x21, 11f\n"
+ "cbz x20, 11f\n"
"10:" // Tail row loop: Column loop
- "whilelt p0.b, XZR, x21\n"
- "ld1b { z20.b }, p0/Z, [x10]\n"
- "ld1b { z21.b }, p0/Z, [x9]\n"
- "decw x21, ALL, MUL #4\n"
- "ld1b { z19.b }, p0/Z, [x28]\n"
- "ld1b { z16.b }, p0/Z, [x27]\n"
- "zip1 z18.b, z20.b, z19.b\n"
- "zip1 z17.b, z21.b, z16.b\n"
- "zip2 z20.b, z20.b, z19.b\n"
- "zip2 z16.b, z21.b, z16.b\n"
- "cmp x21, #0x0\n"
- "addvl x10, x10, #1\n"
+ "whilelt p0.b, XZR, x20\n"
+ "ld1b { z18.b }, p0/Z, [x9]\n"
"addvl x9, x9, #1\n"
- "addvl x28, x28, #1\n"
- "zip1 z19.b, z18.b, z17.b\n"
- "zip2 z18.b, z18.b, z17.b\n"
+ "ld1b { z21.b }, p0/Z, [x27]\n"
"addvl x27, x27, #1\n"
- "zip1 z17.b, z20.b, z16.b\n"
- "zip2 z16.b, z20.b, z16.b\n"
- "st1b { z19.b }, p1, [x22]\n"
- "st1b { z18.b }, p1, [x22, #1, MUL VL]\n"
- "st1b { z17.b }, p1, [x22, #2, MUL VL]\n"
- "st1b { z16.b }, p1, [x22, #3, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
+ "ld1b { z17.b }, p0/Z, [x26]\n"
+ "zip1 z20.b, z18.b, z17.b\n"
+ "ld1b { z16.b }, p0/Z, [x25]\n"
+ "addvl x26, x26, #1\n"
+ "zip2 z19.b, z18.b, z17.b\n"
+ "addvl x25, x25, #1\n"
+ "decw x20, ALL, MUL #4\n"
+ "zip1 z18.b, z21.b, z16.b\n"
+ "cmp x20, #0x0\n"
+ "zip2 z17.b, z21.b, z16.b\n"
+ "zip1 z16.b, z20.b, z18.b\n"
+ "st1b { z16.b }, p1, [x28]\n"
+ "zip2 z16.b, z20.b, z18.b\n"
+ "st1b { z16.b }, p1, [x28, #1, MUL VL]\n"
+ "zip1 z16.b, z19.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28, #2, MUL VL]\n"
+ "zip2 z16.b, z19.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28, #3, MUL VL]\n"
+ "add x28, x28, %x[out_stride]\n"
"bgt 10b\n"
"11:" // Tail row loop: Column loop skip
- "cmp %x[height], #0x1\n"
"addvl %x[out], %x[out], #4\n"
+ "cmp %x[height], #0x1\n"
"bge 7b\n"
"12:" // Done
+
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [pad_row] "r" (pad_row), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "x9", "x10", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "x9", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_4VL_2x2.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_4VL_2x2.hpp
index 9505dc5e6d..48040f9edb 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_4VL_2x2.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_4VL_2x2.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#pragma once
@@ -40,293 +40,293 @@ void sve_transpose_interleave_4VL_2x2(uint16_t *out, const uint16_t *in, size_t
size_t out_stride = 4 * roundup<size_t>(height, 2) * get_vector_length<uint16_t>();
__asm__ __volatile__(
- "cmp %x[height], #0x8\n"
"ptrue p2.b\n"
+ "cmp %x[height], #0x8\n"
"blt 6f\n"
"1:" // Main row loop: Head
- "mov x12, %x[in]\n"
- "add x11, x12, %x[in_stride]\n"
- "add x10, x11, %x[in_stride]\n"
- "add x9, x10, %x[in_stride]\n"
+ "mov x11, %x[in]\n"
+ "mov x10, %x[out]\n"
+ "add x9, x11, %x[in_stride]\n"
"add x28, x9, %x[in_stride]\n"
- "mov x27, %x[width]\n"
- "cnth x26, ALL, MUL #4\n"
- "add x25, x28, %x[in_stride]\n"
+ "add x27, x28, %x[in_stride]\n"
+ "add x26, x27, %x[in_stride]\n"
+ "add x25, x26, %x[in_stride]\n"
"add x24, x25, %x[in_stride]\n"
"add x23, x24, %x[in_stride]\n"
- "cmp x27, x26\n"
"add %x[in], x23, %x[in_stride]\n"
- "mov x22, %x[out]\n"
"sub %x[height], %x[height], #0x8\n"
+ "mov x22, %x[width]\n"
+ "cnth x21, ALL, MUL #4\n"
+ "cmp x22, x21\n"
"blt 3f\n"
"2:" // Main row loop: Unroll column loop
- "ld1h { z18.h }, p2/Z, [x12]\n"
- "ld1h { z20.h }, p2/Z, [x12, #1, MUL VL]\n"
- "mov x21, x22\n"
- "add x22, x22, %x[out_stride]\n"
- "ld1h { z17.h }, p2/Z, [x11]\n"
- "ld1h { z16.h }, p2/Z, [x11, #1, MUL VL]\n"
- "zip1 z25.h, z18.h, z17.h\n"
- "zip2 z24.h, z18.h, z17.h\n"
- "ld1h { z19.h }, p2/Z, [x10]\n"
- "ld1h { z18.h }, p2/Z, [x10, #1, MUL VL]\n"
- "zip1 z23.h, z20.h, z16.h\n"
- "zip2 z15.h, z20.h, z16.h\n"
- "ld1h { z17.h }, p2/Z, [x9]\n"
- "ld1h { z16.h }, p2/Z, [x9, #1, MUL VL]\n"
- "zip1 z14.h, z19.h, z17.h\n"
- "zip2 z13.h, z19.h, z17.h\n"
- "ld1h { z17.h }, p2/Z, [x12, #2, MUL VL]\n"
- "ld1h { z19.h }, p2/Z, [x12, #3, MUL VL]\n"
- "zip1 z12.h, z18.h, z16.h\n"
- "zip2 z11.h, z18.h, z16.h\n"
- "ld1h { z16.h }, p2/Z, [x11, #2, MUL VL]\n"
- "ld1h { z18.h }, p2/Z, [x11, #3, MUL VL]\n"
- "mov x20, x22\n"
- "zip1 z10.h, z17.h, z16.h\n"
- "ld1h { z21.h }, p2/Z, [x10, #2, MUL VL]\n"
- "ld1h { z20.h }, p2/Z, [x10, #3, MUL VL]\n"
- "zip2 z9.h, z17.h, z16.h\n"
- "zip1 z8.h, z19.h, z18.h\n"
- "ld1h { z17.h }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1h { z16.h }, p2/Z, [x9, #3, MUL VL]\n"
- "zip2 z7.h, z19.h, z18.h\n"
- "zip1 z6.h, z21.h, z17.h\n"
- "ld1h { z19.h }, p2/Z, [x28]\n"
- "ld1h { z18.h }, p2/Z, [x28, #1, MUL VL]\n"
- "zip2 z5.h, z21.h, z17.h\n"
- "zip1 z4.h, z20.h, z16.h\n"
- "ld1h { z22.h }, p2/Z, [x28, #2, MUL VL]\n"
- "ld1h { z3.h }, p2/Z, [x28, #3, MUL VL]\n"
- "zip2 z2.h, z20.h, z16.h\n"
- "sub x27, x27, x26\n"
- "ld1h { z17.h }, p2/Z, [x25]\n"
- "ld1h { z16.h }, p2/Z, [x25, #1, MUL VL]\n"
- "zip1 z1.h, z19.h, z17.h\n"
- "zip2 z0.h, z19.h, z17.h\n"
- "ld1h { z21.h }, p2/Z, [x25, #2, MUL VL]\n"
- "ld1h { z20.h }, p2/Z, [x25, #3, MUL VL]\n"
- "zip1 z31.h, z18.h, z16.h\n"
- "zip2 z30.h, z18.h, z16.h\n"
- "ld1h { z17.h }, p2/Z, [x24]\n"
- "ld1h { z19.h }, p2/Z, [x24, #1, MUL VL]\n"
- "cmp x27, x26\n"
- "addvl x12, x12, #4\n"
- "ld1h { z29.h }, p2/Z, [x24, #2, MUL VL]\n"
- "ld1h { z28.h }, p2/Z, [x24, #3, MUL VL]\n"
+ "ld1h { z21.h }, p2/Z, [x11]\n"
+ "mov x20, x10\n"
+ "ld1h { z19.h }, p2/Z, [x11, #1, MUL VL]\n"
+ "add x10, x10, %x[out_stride]\n"
+ "ld1h { z23.h }, p2/Z, [x11, #2, MUL VL]\n"
+ "mov x19, x10\n"
+ "ld1h { z31.h }, p2/Z, [x11, #3, MUL VL]\n"
"addvl x11, x11, #4\n"
- "addvl x10, x10, #4\n"
- "ld1h { z16.h }, p2/Z, [x23]\n"
- "ld1h { z18.h }, p2/Z, [x23, #1, MUL VL]\n"
- "zip1 z27.h, z17.h, z16.h\n"
- "zip2 z26.h, z17.h, z16.h\n"
- "ld1h { z17.h }, p2/Z, [x23, #2, MUL VL]\n"
- "ld1h { z16.h }, p2/Z, [x23, #3, MUL VL]\n"
- "st1h { z25.h }, p2, [x21]\n"
- "zip1 z25.h, z19.h, z18.h\n"
- "st1h { z24.h }, p2, [x21, #1, MUL VL]\n"
- "zip2 z24.h, z19.h, z18.h\n"
+ "ld1h { z29.h }, p2/Z, [x9]\n"
+ "zip1 z0.h, z21.h, z29.h\n"
+ "ld1h { z17.h }, p2/Z, [x9, #1, MUL VL]\n"
+ "add x10, x10, %x[out_stride]\n"
+ "zip2 z22.h, z21.h, z29.h\n"
+ "ld1h { z15.h }, p2/Z, [x9, #2, MUL VL]\n"
+ "sub x22, x22, x21\n"
+ "zip1 z13.h, z19.h, z17.h\n"
+ "ld1h { z6.h }, p2/Z, [x9, #3, MUL VL]\n"
"addvl x9, x9, #4\n"
+ "zip2 z12.h, z19.h, z17.h\n"
+ "ld1h { z20.h }, p2/Z, [x28]\n"
+ "cmp x22, x21\n"
+ "zip1 z14.h, z23.h, z15.h\n"
+ "ld1h { z1.h }, p2/Z, [x28, #1, MUL VL]\n"
+ "zip2 z3.h, z23.h, z15.h\n"
+ "ld1h { z19.h }, p2/Z, [x28, #2, MUL VL]\n"
+ "zip1 z16.h, z31.h, z6.h\n"
+ "ld1h { z11.h }, p2/Z, [x28, #3, MUL VL]\n"
"addvl x28, x28, #4\n"
- "st1h { z23.h }, p2, [x21, #2, MUL VL]\n"
+ "zip2 z10.h, z31.h, z6.h\n"
+ "ld1h { z27.h }, p2/Z, [x27]\n"
+ "ld1h { z18.h }, p2/Z, [x27, #1, MUL VL]\n"
+ "zip1 z9.h, z20.h, z27.h\n"
+ "ld1h { z2.h }, p2/Z, [x27, #2, MUL VL]\n"
+ "zip2 z24.h, z20.h, z27.h\n"
+ "ld1h { z5.h }, p2/Z, [x27, #3, MUL VL]\n"
+ "addvl x27, x27, #4\n"
+ "zip1 z8.h, z1.h, z18.h\n"
+ "ld1h { z30.h }, p2/Z, [x26]\n"
+ "zip2 z17.h, z1.h, z18.h\n"
+ "ld1h { z28.h }, p2/Z, [x26, #1, MUL VL]\n"
+ "zip1 z6.h, z19.h, z2.h\n"
+ "ld1h { z23.h }, p2/Z, [x26, #2, MUL VL]\n"
+ "zip2 z1.h, z19.h, z2.h\n"
+ "ld1h { z25.h }, p2/Z, [x26, #3, MUL VL]\n"
+ "addvl x26, x26, #4\n"
+ "zip1 z31.h, z11.h, z5.h\n"
+ "ld1h { z21.h }, p2/Z, [x25]\n"
+ "zip2 z11.h, z11.h, z5.h\n"
+ "ld1h { z19.h }, p2/Z, [x25, #1, MUL VL]\n"
+ "ld1h { z18.h }, p2/Z, [x25, #2, MUL VL]\n"
+ "zip1 z29.h, z30.h, z21.h\n"
+ "ld1h { z26.h }, p2/Z, [x25, #3, MUL VL]\n"
"addvl x25, x25, #4\n"
+ "zip2 z30.h, z30.h, z21.h\n"
+ "ld1h { z21.h }, p2/Z, [x24]\n"
+ "zip1 z27.h, z28.h, z19.h\n"
+ "ld1h { z20.h }, p2/Z, [x24, #1, MUL VL]\n"
+ "zip2 z28.h, z28.h, z19.h\n"
+ "ld1h { z4.h }, p2/Z, [x24, #2, MUL VL]\n"
+ "zip1 z2.h, z23.h, z18.h\n"
+ "ld1h { z15.h }, p2/Z, [x24, #3, MUL VL]\n"
"addvl x24, x24, #4\n"
- "zip1 z23.h, z22.h, z21.h\n"
- "st1h { z15.h }, p2, [x21, #3, MUL VL]\n"
+ "zip2 z5.h, z23.h, z18.h\n"
+ "ld1h { z23.h }, p2/Z, [x23]\n"
+ "zip1 z7.h, z25.h, z26.h\n"
+ "ld1h { z19.h }, p2/Z, [x23, #1, MUL VL]\n"
+ "zip2 z25.h, z25.h, z26.h\n"
+ "ld1h { z18.h }, p2/Z, [x23, #2, MUL VL]\n"
+ "ld1h { z26.h }, p2/Z, [x23, #3, MUL VL]\n"
"addvl x23, x23, #4\n"
- "zip2 z22.h, z22.h, z21.h\n"
- "zip1 z21.h, z3.h, z20.h\n"
- "st1h { z14.h }, p2, [x21, #4, MUL VL]\n"
- "zip2 z20.h, z3.h, z20.h\n"
- "zip1 z19.h, z29.h, z17.h\n"
- "add x22, x22, %x[out_stride]\n"
- "st1h { z13.h }, p2, [x21, #5, MUL VL]\n"
- "zip2 z18.h, z29.h, z17.h\n"
- "zip1 z17.h, z28.h, z16.h\n"
- "st1h { z12.h }, p2, [x21, #6, MUL VL]\n"
- "zip2 z16.h, z28.h, z16.h\n"
- "st1h { z11.h }, p2, [x21, #7, MUL VL]\n"
- "addvl x21, x21, #16\n"
- "st1h { z1.h }, p2, [x21, #-8, MUL VL]\n"
- "st1h { z0.h }, p2, [x21, #-7, MUL VL]\n"
- "st1h { z31.h }, p2, [x21, #-6, MUL VL]\n"
- "st1h { z30.h }, p2, [x21, #-5, MUL VL]\n"
- "st1h { z27.h }, p2, [x21, #-4, MUL VL]\n"
- "st1h { z26.h }, p2, [x21, #-3, MUL VL]\n"
- "st1h { z25.h }, p2, [x21, #-2, MUL VL]\n"
- "st1h { z24.h }, p2, [x21, #-1, MUL VL]\n"
- "st1h { z10.h }, p2, [x20]\n"
- "st1h { z9.h }, p2, [x20, #1, MUL VL]\n"
- "st1h { z8.h }, p2, [x20, #2, MUL VL]\n"
- "st1h { z7.h }, p2, [x20, #3, MUL VL]\n"
- "st1h { z6.h }, p2, [x20, #4, MUL VL]\n"
- "st1h { z5.h }, p2, [x20, #5, MUL VL]\n"
- "st1h { z4.h }, p2, [x20, #6, MUL VL]\n"
- "st1h { z2.h }, p2, [x20, #7, MUL VL]\n"
+ "st1h { z0.h }, p2, [x20]\n"
+ "zip1 z0.h, z21.h, z23.h\n"
+ "zip2 z23.h, z21.h, z23.h\n"
+ "st1h { z22.h }, p2, [x20, #1, MUL VL]\n"
+ "zip1 z22.h, z20.h, z19.h\n"
+ "st1h { z13.h }, p2, [x20, #2, MUL VL]\n"
+ "zip2 z21.h, z20.h, z19.h\n"
+ "st1h { z12.h }, p2, [x20, #3, MUL VL]\n"
+ "zip1 z20.h, z4.h, z18.h\n"
+ "st1h { z9.h }, p2, [x20, #4, MUL VL]\n"
+ "zip2 z19.h, z4.h, z18.h\n"
+ "st1h { z24.h }, p2, [x20, #5, MUL VL]\n"
+ "zip1 z18.h, z15.h, z26.h\n"
+ "st1h { z8.h }, p2, [x20, #6, MUL VL]\n"
+ "zip2 z9.h, z15.h, z26.h\n"
+ "st1h { z17.h }, p2, [x20, #7, MUL VL]\n"
"addvl x20, x20, #16\n"
- "st1h { z23.h }, p2, [x20, #-8, MUL VL]\n"
- "st1h { z22.h }, p2, [x20, #-7, MUL VL]\n"
- "st1h { z21.h }, p2, [x20, #-6, MUL VL]\n"
- "st1h { z20.h }, p2, [x20, #-5, MUL VL]\n"
- "st1h { z19.h }, p2, [x20, #-4, MUL VL]\n"
- "st1h { z18.h }, p2, [x20, #-3, MUL VL]\n"
- "st1h { z17.h }, p2, [x20, #-2, MUL VL]\n"
- "st1h { z16.h }, p2, [x20, #-1, MUL VL]\n"
+ "st1h { z29.h }, p2, [x20, #-8, MUL VL]\n"
+ "st1h { z30.h }, p2, [x20, #-7, MUL VL]\n"
+ "st1h { z27.h }, p2, [x20, #-6, MUL VL]\n"
+ "st1h { z28.h }, p2, [x20, #-5, MUL VL]\n"
+ "st1h { z0.h }, p2, [x20, #-4, MUL VL]\n"
+ "st1h { z23.h }, p2, [x20, #-3, MUL VL]\n"
+ "st1h { z22.h }, p2, [x20, #-2, MUL VL]\n"
+ "st1h { z21.h }, p2, [x20, #-1, MUL VL]\n"
+ "st1h { z14.h }, p2, [x19]\n"
+ "st1h { z3.h }, p2, [x19, #1, MUL VL]\n"
+ "st1h { z16.h }, p2, [x19, #2, MUL VL]\n"
+ "st1h { z10.h }, p2, [x19, #3, MUL VL]\n"
+ "st1h { z6.h }, p2, [x19, #4, MUL VL]\n"
+ "st1h { z1.h }, p2, [x19, #5, MUL VL]\n"
+ "st1h { z31.h }, p2, [x19, #6, MUL VL]\n"
+ "st1h { z11.h }, p2, [x19, #7, MUL VL]\n"
+ "addvl x19, x19, #16\n"
+ "st1h { z2.h }, p2, [x19, #-8, MUL VL]\n"
+ "st1h { z5.h }, p2, [x19, #-7, MUL VL]\n"
+ "st1h { z7.h }, p2, [x19, #-6, MUL VL]\n"
+ "st1h { z25.h }, p2, [x19, #-5, MUL VL]\n"
+ "st1h { z20.h }, p2, [x19, #-4, MUL VL]\n"
+ "st1h { z19.h }, p2, [x19, #-3, MUL VL]\n"
+ "st1h { z18.h }, p2, [x19, #-2, MUL VL]\n"
+ "st1h { z9.h }, p2, [x19, #-1, MUL VL]\n"
"bge 2b\n"
"3:" // Main row loop: Unroll column loop skip
- "cbz x27, 5f\n"
+ "cbz x22, 5f\n"
"4:" // Main row loop: Column loop
- "mov x20, x27\n"
+ "mov x20, x22\n"
+ "mov x19, x10\n"
"whilelt p1.h, XZR, x20\n"
- "ld1h { z22.h }, p1/Z, [x12]\n"
- "ld1h { z21.h }, p1/Z, [x11]\n"
+ "ld1h { z18.h }, p1/Z, [x11]\n"
+ "ld1h { z16.h }, p1/Z, [x9]\n"
+ "zip1 z0.h, z18.h, z16.h\n"
+ "ld1h { z17.h }, p1/Z, [x28]\n"
"dech x20\n"
+ "zip2 z31.h, z18.h, z16.h\n"
+ "ld1h { z16.h }, p1/Z, [x27]\n"
"whilelt p0.h, XZR, x20\n"
- "ld1h { z20.h }, p0/Z, [x12, #1, MUL VL]\n"
- "ld1h { z19.h }, p0/Z, [x11, #1, MUL VL]\n"
- "ld1h { z18.h }, p1/Z, [x10]\n"
- "ld1h { z24.h }, p0/Z, [x10, #1, MUL VL]\n"
- "mov x20, x22\n"
- "decw x27, ALL, MUL #4\n"
- "ld1h { z17.h }, p1/Z, [x9]\n"
- "ld1h { z16.h }, p0/Z, [x9, #1, MUL VL]\n"
- "zip1 z31.h, z22.h, z21.h\n"
- "zip2 z23.h, z22.h, z21.h\n"
- "ld1h { z30.h }, p1/Z, [x28]\n"
- "ld1h { z29.h }, p0/Z, [x28, #1, MUL VL]\n"
- "zip1 z22.h, z20.h, z19.h\n"
- "zip2 z28.h, z20.h, z19.h\n"
- "ld1h { z21.h }, p1/Z, [x25]\n"
- "ld1h { z27.h }, p0/Z, [x25, #1, MUL VL]\n"
- "zip1 z20.h, z18.h, z17.h\n"
- "zip2 z19.h, z18.h, z17.h\n"
- "ld1h { z18.h }, p1/Z, [x24]\n"
- "ld1h { z26.h }, p0/Z, [x24, #1, MUL VL]\n"
- "zip1 z25.h, z24.h, z16.h\n"
- "zip2 z24.h, z24.h, z16.h\n"
- "ld1h { z17.h }, p1/Z, [x23]\n"
- "ld1h { z16.h }, p0/Z, [x23, #1, MUL VL]\n"
- "st1h { z31.h }, p2, [x20]\n"
- "cmp x27, #0x0\n"
- "st1h { z23.h }, p2, [x20, #1, MUL VL]\n"
- "addvl x12, x12, #2\n"
+ "zip1 z30.h, z17.h, z16.h\n"
+ "ld1h { z18.h }, p0/Z, [x11, #1, MUL VL]\n"
"addvl x11, x11, #2\n"
- "zip1 z23.h, z30.h, z21.h\n"
- "st1h { z22.h }, p2, [x20, #2, MUL VL]\n"
- "addvl x10, x10, #2\n"
+ "zip2 z29.h, z17.h, z16.h\n"
+ "ld1h { z16.h }, p0/Z, [x9, #1, MUL VL]\n"
"addvl x9, x9, #2\n"
- "zip2 z22.h, z30.h, z21.h\n"
- "st1h { z28.h }, p2, [x20, #3, MUL VL]\n"
+ "zip1 z28.h, z18.h, z16.h\n"
+ "ld1h { z17.h }, p0/Z, [x28, #1, MUL VL]\n"
"addvl x28, x28, #2\n"
+ "zip2 z27.h, z18.h, z16.h\n"
+ "ld1h { z16.h }, p0/Z, [x27, #1, MUL VL]\n"
+ "addvl x27, x27, #2\n"
+ "zip1 z26.h, z17.h, z16.h\n"
+ "ld1h { z18.h }, p1/Z, [x26]\n"
+ "add x10, x10, %x[out_stride]\n"
+ "zip2 z25.h, z17.h, z16.h\n"
+ "ld1h { z19.h }, p0/Z, [x26, #1, MUL VL]\n"
+ "addvl x26, x26, #2\n"
+ "ld1h { z17.h }, p1/Z, [x25]\n"
+ "zip1 z24.h, z18.h, z17.h\n"
+ "ld1h { z16.h }, p0/Z, [x25, #1, MUL VL]\n"
"addvl x25, x25, #2\n"
- "zip1 z21.h, z29.h, z27.h\n"
- "st1h { z20.h }, p2, [x20, #4, MUL VL]\n"
+ "zip2 z23.h, z18.h, z17.h\n"
+ "ld1h { z18.h }, p1/Z, [x24]\n"
+ "decw x22, ALL, MUL #4\n"
+ "zip1 z22.h, z19.h, z16.h\n"
+ "ld1h { z21.h }, p0/Z, [x24, #1, MUL VL]\n"
"addvl x24, x24, #2\n"
- "addvl x23, x23, #2\n"
- "zip2 z20.h, z29.h, z27.h\n"
- "st1h { z19.h }, p2, [x20, #5, MUL VL]\n"
+ "zip2 z20.h, z19.h, z16.h\n"
+ "ld1h { z17.h }, p1/Z, [x23]\n"
+ "cmp x22, #0x0\n"
"zip1 z19.h, z18.h, z17.h\n"
+ "ld1h { z16.h }, p0/Z, [x23, #1, MUL VL]\n"
+ "addvl x23, x23, #2\n"
"zip2 z18.h, z18.h, z17.h\n"
- "add x22, x22, %x[out_stride]\n"
- "st1h { z25.h }, p2, [x20, #6, MUL VL]\n"
- "zip1 z17.h, z26.h, z16.h\n"
- "zip2 z16.h, z26.h, z16.h\n"
- "st1h { z24.h }, p2, [x20, #7, MUL VL]\n"
- "addvl x20, x20, #16\n"
- "st1h { z23.h }, p2, [x20, #-8, MUL VL]\n"
- "st1h { z22.h }, p2, [x20, #-7, MUL VL]\n"
- "st1h { z21.h }, p2, [x20, #-6, MUL VL]\n"
- "st1h { z20.h }, p2, [x20, #-5, MUL VL]\n"
- "st1h { z19.h }, p2, [x20, #-4, MUL VL]\n"
- "st1h { z18.h }, p2, [x20, #-3, MUL VL]\n"
- "st1h { z17.h }, p2, [x20, #-2, MUL VL]\n"
- "st1h { z16.h }, p2, [x20, #-1, MUL VL]\n"
+ "st1h { z0.h }, p2, [x19]\n"
+ "st1h { z31.h }, p2, [x19, #1, MUL VL]\n"
+ "zip1 z17.h, z21.h, z16.h\n"
+ "st1h { z28.h }, p2, [x19, #2, MUL VL]\n"
+ "zip2 z16.h, z21.h, z16.h\n"
+ "st1h { z27.h }, p2, [x19, #3, MUL VL]\n"
+ "st1h { z30.h }, p2, [x19, #4, MUL VL]\n"
+ "st1h { z29.h }, p2, [x19, #5, MUL VL]\n"
+ "st1h { z26.h }, p2, [x19, #6, MUL VL]\n"
+ "st1h { z25.h }, p2, [x19, #7, MUL VL]\n"
+ "addvl x19, x19, #16\n"
+ "st1h { z24.h }, p2, [x19, #-8, MUL VL]\n"
+ "st1h { z23.h }, p2, [x19, #-7, MUL VL]\n"
+ "st1h { z22.h }, p2, [x19, #-6, MUL VL]\n"
+ "st1h { z20.h }, p2, [x19, #-5, MUL VL]\n"
+ "st1h { z19.h }, p2, [x19, #-4, MUL VL]\n"
+ "st1h { z18.h }, p2, [x19, #-3, MUL VL]\n"
+ "st1h { z17.h }, p2, [x19, #-2, MUL VL]\n"
+ "st1h { z16.h }, p2, [x19, #-1, MUL VL]\n"
"bgt 4b\n"
"5:" // Main row loop: Column loop skip
- "cmp %x[height], #0x8\n"
"addvl %x[out], %x[out], #16\n"
+ "cmp %x[height], #0x8\n"
"bge 1b\n"
"cbz %x[height], 12f\n"
"6:" // Main loop skip
"7:" // Tail row loop: Head
- "mov x12, %x[in]\n"
- "mov x21, %x[width]\n"
- "cnth x20, ALL, MUL #4\n"
- "add x11, x12, %x[in_stride]\n"
+ "mov x11, %x[in]\n"
+ "mov x10, %x[out]\n"
+ "add x9, x11, %x[in_stride]\n"
+ "add %x[in], x9, %x[in_stride]\n"
"cmp %x[height], #0x1\n"
- "add %x[in], x11, %x[in_stride]\n"
- "csel x11, x11, %x[pad_row], GT\n"
- "cmp x21, x20\n"
- "mov x22, %x[out]\n"
+ "csel x9, x9, %x[pad_row], GT\n"
"sub %x[height], %x[height], #0x2\n"
+ "mov x20, %x[width]\n"
+ "cnth x19, ALL, MUL #4\n"
+ "cmp x20, x19\n"
"blt 9f\n"
"8:" // Tail row loop: Unroll column loop
- "ld1h { z18.h }, p2/Z, [x12]\n"
- "ld1h { z20.h }, p2/Z, [x12, #1, MUL VL]\n"
- "sub x21, x21, x20\n"
- "cmp x21, x20\n"
- "ld1h { z17.h }, p2/Z, [x11]\n"
- "ld1h { z16.h }, p2/Z, [x11, #1, MUL VL]\n"
- "zip1 z23.h, z18.h, z17.h\n"
- "zip2 z19.h, z18.h, z17.h\n"
- "ld1h { z18.h }, p2/Z, [x12, #2, MUL VL]\n"
- "ld1h { z22.h }, p2/Z, [x12, #3, MUL VL]\n"
- "zip1 z21.h, z20.h, z16.h\n"
- "zip2 z20.h, z20.h, z16.h\n"
- "ld1h { z17.h }, p2/Z, [x11, #2, MUL VL]\n"
- "ld1h { z16.h }, p2/Z, [x11, #3, MUL VL]\n"
- "st1h { z23.h }, p2, [x22]\n"
- "addvl x12, x12, #4\n"
- "st1h { z19.h }, p2, [x22, #1, MUL VL]\n"
+ "ld1h { z18.h }, p2/Z, [x11]\n"
+ "sub x20, x20, x19\n"
+ "ld1h { z24.h }, p2/Z, [x11, #1, MUL VL]\n"
+ "cmp x20, x19\n"
+ "ld1h { z23.h }, p2/Z, [x11, #2, MUL VL]\n"
+ "ld1h { z22.h }, p2/Z, [x11, #3, MUL VL]\n"
"addvl x11, x11, #4\n"
- "zip1 z19.h, z18.h, z17.h\n"
- "zip2 z18.h, z18.h, z17.h\n"
- "st1h { z21.h }, p2, [x22, #2, MUL VL]\n"
- "zip1 z17.h, z22.h, z16.h\n"
- "zip2 z16.h, z22.h, z16.h\n"
- "st1h { z20.h }, p2, [x22, #3, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
- "st1h { z19.h }, p2, [x22]\n"
- "st1h { z18.h }, p2, [x22, #1, MUL VL]\n"
- "st1h { z17.h }, p2, [x22, #2, MUL VL]\n"
- "st1h { z16.h }, p2, [x22, #3, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
+ "ld1h { z16.h }, p2/Z, [x9]\n"
+ "zip1 z21.h, z18.h, z16.h\n"
+ "ld1h { z17.h }, p2/Z, [x9, #1, MUL VL]\n"
+ "zip2 z16.h, z18.h, z16.h\n"
+ "ld1h { z20.h }, p2/Z, [x9, #2, MUL VL]\n"
+ "ld1h { z19.h }, p2/Z, [x9, #3, MUL VL]\n"
+ "zip1 z18.h, z24.h, z17.h\n"
+ "st1h { z21.h }, p2, [x10]\n"
+ "addvl x9, x9, #4\n"
+ "zip2 z17.h, z24.h, z17.h\n"
+ "st1h { z16.h }, p2, [x10, #1, MUL VL]\n"
+ "zip1 z16.h, z23.h, z20.h\n"
+ "st1h { z18.h }, p2, [x10, #2, MUL VL]\n"
+ "zip2 z18.h, z23.h, z20.h\n"
+ "st1h { z17.h }, p2, [x10, #3, MUL VL]\n"
+ "add x10, x10, %x[out_stride]\n"
+ "zip1 z17.h, z22.h, z19.h\n"
+ "st1h { z16.h }, p2, [x10]\n"
+ "zip2 z16.h, z22.h, z19.h\n"
+ "st1h { z18.h }, p2, [x10, #1, MUL VL]\n"
+ "st1h { z17.h }, p2, [x10, #2, MUL VL]\n"
+ "st1h { z16.h }, p2, [x10, #3, MUL VL]\n"
+ "add x10, x10, %x[out_stride]\n"
"bge 8b\n"
"9:" // Tail row loop: Unroll column loop skip
- "cbz x21, 11f\n"
+ "cbz x20, 11f\n"
"10:" // Tail row loop: Column loop
- "mov x20, x21\n"
- "whilelt p0.h, XZR, x20\n"
- "ld1h { z18.h }, p0/Z, [x12]\n"
+ "mov x19, x20\n"
+ "decw x20, ALL, MUL #4\n"
+ "whilelt p0.h, XZR, x19\n"
"ld1h { z17.h }, p0/Z, [x11]\n"
- "dech x20\n"
- "whilelt p0.h, XZR, x20\n"
- "ld1h { z20.h }, p0/Z, [x12, #1, MUL VL]\n"
- "ld1h { z16.h }, p0/Z, [x11, #1, MUL VL]\n"
- "decw x21, ALL, MUL #4\n"
- "cmp x21, #0x0\n"
- "zip1 z19.h, z18.h, z17.h\n"
- "zip2 z18.h, z18.h, z17.h\n"
- "addvl x12, x12, #2\n"
+ "ld1h { z16.h }, p0/Z, [x9]\n"
+ "zip1 z20.h, z17.h, z16.h\n"
+ "dech x19\n"
+ "zip2 z19.h, z17.h, z16.h\n"
+ "whilelt p0.h, XZR, x19\n"
+ "ld1h { z18.h }, p0/Z, [x11, #1, MUL VL]\n"
"addvl x11, x11, #2\n"
- "zip1 z17.h, z20.h, z16.h\n"
- "zip2 z16.h, z20.h, z16.h\n"
- "st1h { z19.h }, p2, [x22]\n"
- "st1h { z18.h }, p2, [x22, #1, MUL VL]\n"
- "st1h { z17.h }, p2, [x22, #2, MUL VL]\n"
- "st1h { z16.h }, p2, [x22, #3, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
+ "ld1h { z16.h }, p0/Z, [x9, #1, MUL VL]\n"
+ "zip1 z17.h, z18.h, z16.h\n"
+ "st1h { z20.h }, p2, [x10]\n"
+ "addvl x9, x9, #2\n"
+ "zip2 z16.h, z18.h, z16.h\n"
+ "st1h { z19.h }, p2, [x10, #1, MUL VL]\n"
+ "cmp x20, #0x0\n"
+ "st1h { z17.h }, p2, [x10, #2, MUL VL]\n"
+ "st1h { z16.h }, p2, [x10, #3, MUL VL]\n"
+ "add x10, x10, %x[out_stride]\n"
"bgt 10b\n"
"11:" // Tail row loop: Column loop skip
- "cmp %x[height], #0x1\n"
"addvl %x[out], %x[out], #4\n"
+ "cmp %x[height], #0x1\n"
"bge 7b\n"
"12:" // Done
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [pad_row] "r" (pad_row), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "p2", "x9", "x10", "x11", "x12", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "x9", "x10", "x11", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_6VL_1x8.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_6VL_1x8.hpp
index 982c0545ed..67ef738645 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_6VL_1x8.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_6VL_1x8.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#pragma once
@@ -42,225 +42,225 @@ void sve_transpose_interleave_6VL_1x8(uint8_t *out, const uint8_t *in, size_t wi
__asm__ __volatile__(
"ptrue p1.b\n"
"1:" // Main row loop: Head
- "mov x10, %x[in]\n"
- "add x9, x10, %x[in_stride]\n"
- "add x28, x9, %x[in_stride]\n"
- "add x27, x28, %x[in_stride]\n"
+ "mov x9, %x[in]\n"
+ "mov x28, %x[out]\n"
+ "add x27, x9, %x[in_stride]\n"
"add x26, x27, %x[in_stride]\n"
"add x25, x26, %x[in_stride]\n"
"add x24, x25, %x[in_stride]\n"
"add x23, x24, %x[in_stride]\n"
+ "add x22, x23, %x[in_stride]\n"
+ "add x21, x22, %x[in_stride]\n"
+ "add %x[in], x21, %x[in_stride]\n"
"cmp %x[height], #0x7\n"
- "add %x[in], x23, %x[in_stride]\n"
+ "csel x21, x21, %x[pad_row], GT\n"
+ "csel x22, x22, %x[pad_row], GE\n"
+ "cmp %x[height], #0x5\n"
"csel x23, x23, %x[pad_row], GT\n"
"csel x24, x24, %x[pad_row], GE\n"
- "cmp %x[height], #0x5\n"
- "mov x22, %x[width]\n"
- "cntb x21, ALL, MUL #3\n"
+ "cmp %x[height], #0x3\n"
"csel x25, x25, %x[pad_row], GT\n"
"csel x26, x26, %x[pad_row], GE\n"
- "cmp %x[height], #0x3\n"
- "csel x27, x27, %x[pad_row], GT\n"
- "csel x28, x28, %x[pad_row], GE\n"
"cmp %x[height], #0x1\n"
- "csel x9, x9, %x[pad_row], GT\n"
- "cmp x22, x21\n"
- "mov x20, %x[out]\n"
+ "csel x27, x27, %x[pad_row], GT\n"
"sub %x[height], %x[height], #0x8\n"
+ "mov x20, %x[width]\n"
+ "cntb x19, ALL, MUL #3\n"
+ "cmp x20, x19\n"
"blt 3f\n"
"2:" // Main row loop: Unroll column loop
- "ld1b { z21.b }, p1/Z, [x10]\n"
- "ld1b { z25.b }, p1/Z, [x9]\n"
- "sub x22, x22, x21\n"
- "cmp x22, x21\n"
- "ld1b { z20.b }, p1/Z, [x28]\n"
- "ld1b { z24.b }, p1/Z, [x27]\n"
- "ld1b { z19.b }, p1/Z, [x26]\n"
- "ld1b { z18.b }, p1/Z, [x25]\n"
- "zip1 z7.b, z21.b, z19.b\n"
- "zip1 z6.b, z25.b, z18.b\n"
- "ld1b { z17.b }, p1/Z, [x24]\n"
- "ld1b { z16.b }, p1/Z, [x23]\n"
- "zip1 z28.b, z20.b, z17.b\n"
- "zip1 z27.b, z24.b, z16.b\n"
- "ld1b { z23.b }, p1/Z, [x10, #1, MUL VL]\n"
- "ld1b { z22.b }, p1/Z, [x9, #1, MUL VL]\n"
- "zip2 z5.b, z21.b, z19.b\n"
- "zip2 z4.b, z20.b, z17.b\n"
- "ld1b { z21.b }, p1/Z, [x28, #1, MUL VL]\n"
- "ld1b { z20.b }, p1/Z, [x27, #1, MUL VL]\n"
- "zip2 z3.b, z25.b, z18.b\n"
- "zip2 z2.b, z24.b, z16.b\n"
- "ld1b { z19.b }, p1/Z, [x26, #1, MUL VL]\n"
- "ld1b { z18.b }, p1/Z, [x25, #1, MUL VL]\n"
- "zip1 z1.b, z23.b, z19.b\n"
- "zip1 z15.b, z22.b, z18.b\n"
- "ld1b { z17.b }, p1/Z, [x24, #1, MUL VL]\n"
- "ld1b { z16.b }, p1/Z, [x23, #1, MUL VL]\n"
- "zip1 z0.b, z21.b, z17.b\n"
- "zip1 z31.b, z20.b, z16.b\n"
- "ld1b { z26.b }, p1/Z, [x10, #2, MUL VL]\n"
- "ld1b { z30.b }, p1/Z, [x9, #2, MUL VL]\n"
- "zip2 z14.b, z23.b, z19.b\n"
- "zip2 z13.b, z21.b, z17.b\n"
- "ld1b { z25.b }, p1/Z, [x28, #2, MUL VL]\n"
- "ld1b { z24.b }, p1/Z, [x27, #2, MUL VL]\n"
- "zip2 z12.b, z22.b, z18.b\n"
- "zip2 z11.b, z20.b, z16.b\n"
- "ld1b { z23.b }, p1/Z, [x26, #2, MUL VL]\n"
- "ld1b { z22.b }, p1/Z, [x25, #2, MUL VL]\n"
- "zip1 z10.b, z26.b, z23.b\n"
- "zip1 z9.b, z30.b, z22.b\n"
- "ld1b { z21.b }, p1/Z, [x24, #2, MUL VL]\n"
- "ld1b { z17.b }, p1/Z, [x23, #2, MUL VL]\n"
- "zip1 z29.b, z25.b, z21.b\n"
- "zip1 z8.b, z24.b, z17.b\n"
- "zip1 z19.b, z7.b, z28.b\n"
- "zip1 z16.b, z6.b, z27.b\n"
- "addvl x10, x10, #3\n"
+ "ld1b { z22.b }, p1/Z, [x9]\n"
+ "sub x20, x20, x19\n"
+ "ld1b { z21.b }, p1/Z, [x9, #1, MUL VL]\n"
+ "cmp x20, x19\n"
+ "ld1b { z12.b }, p1/Z, [x9, #2, MUL VL]\n"
"addvl x9, x9, #3\n"
- "zip2 z28.b, z7.b, z28.b\n"
- "zip2 z18.b, z6.b, z27.b\n"
- "addvl x28, x28, #3\n"
+ "ld1b { z20.b }, p1/Z, [x27]\n"
+ "ld1b { z11.b }, p1/Z, [x27, #1, MUL VL]\n"
+ "ld1b { z10.b }, p1/Z, [x27, #2, MUL VL]\n"
"addvl x27, x27, #3\n"
- "zip1 z27.b, z5.b, z4.b\n"
- "zip1 z20.b, z3.b, z2.b\n"
+ "ld1b { z19.b }, p1/Z, [x26]\n"
+ "ld1b { z9.b }, p1/Z, [x26, #1, MUL VL]\n"
+ "ld1b { z8.b }, p1/Z, [x26, #2, MUL VL]\n"
"addvl x26, x26, #3\n"
+ "ld1b { z7.b }, p1/Z, [x25]\n"
+ "ld1b { z6.b }, p1/Z, [x25, #1, MUL VL]\n"
+ "ld1b { z5.b }, p1/Z, [x25, #2, MUL VL]\n"
"addvl x25, x25, #3\n"
- "zip2 z7.b, z26.b, z23.b\n"
- "zip2 z26.b, z25.b, z21.b\n"
+ "ld1b { z16.b }, p1/Z, [x24]\n"
+ "zip1 z18.b, z22.b, z16.b\n"
+ "ld1b { z17.b }, p1/Z, [x24, #1, MUL VL]\n"
+ "zip2 z4.b, z22.b, z16.b\n"
+ "ld1b { z3.b }, p1/Z, [x24, #2, MUL VL]\n"
"addvl x24, x24, #3\n"
+ "zip1 z2.b, z21.b, z17.b\n"
+ "ld1b { z16.b }, p1/Z, [x23]\n"
+ "zip2 z1.b, z21.b, z17.b\n"
+ "ld1b { z0.b }, p1/Z, [x23, #1, MUL VL]\n"
+ "zip1 z31.b, z12.b, z3.b\n"
+ "ld1b { z30.b }, p1/Z, [x23, #2, MUL VL]\n"
"addvl x23, x23, #3\n"
- "zip2 z6.b, z30.b, z22.b\n"
- "zip2 z25.b, z24.b, z17.b\n"
- "zip2 z5.b, z5.b, z4.b\n"
- "zip2 z4.b, z3.b, z2.b\n"
- "zip1 z3.b, z1.b, z0.b\n"
- "zip1 z2.b, z15.b, z31.b\n"
- "zip2 z1.b, z1.b, z0.b\n"
- "zip2 z0.b, z15.b, z31.b\n"
- "zip1 z31.b, z14.b, z13.b\n"
- "zip1 z30.b, z12.b, z11.b\n"
- "zip2 z24.b, z14.b, z13.b\n"
- "zip2 z23.b, z12.b, z11.b\n"
- "zip1 z22.b, z10.b, z29.b\n"
- "zip1 z21.b, z9.b, z8.b\n"
- "zip1 z17.b, z19.b, z16.b\n"
- "zip2 z16.b, z19.b, z16.b\n"
- "st1b { z17.b }, p1, [x20]\n"
- "zip1 z19.b, z28.b, z18.b\n"
- "zip2 z18.b, z28.b, z18.b\n"
- "st1b { z16.b }, p1, [x20, #1, MUL VL]\n"
- "zip1 z17.b, z27.b, z20.b\n"
- "zip2 z16.b, z27.b, z20.b\n"
- "st1b { z19.b }, p1, [x20, #2, MUL VL]\n"
- "st1b { z18.b }, p1, [x20, #3, MUL VL]\n"
- "zip2 z29.b, z10.b, z29.b\n"
- "zip2 z20.b, z9.b, z8.b\n"
- "st1b { z17.b }, p1, [x20, #4, MUL VL]\n"
- "zip1 z28.b, z7.b, z26.b\n"
- "zip1 z27.b, z6.b, z25.b\n"
- "st1b { z16.b }, p1, [x20, #5, MUL VL]\n"
- "add x20, x20, %x[out_stride]\n"
- "zip2 z26.b, z7.b, z26.b\n"
- "zip2 z25.b, z6.b, z25.b\n"
- "zip1 z17.b, z5.b, z4.b\n"
- "zip2 z16.b, z5.b, z4.b\n"
- "st1b { z17.b }, p1, [x20]\n"
- "zip1 z18.b, z3.b, z2.b\n"
- "zip2 z17.b, z3.b, z2.b\n"
- "st1b { z16.b }, p1, [x20, #1, MUL VL]\n"
- "zip1 z16.b, z1.b, z0.b\n"
- "zip2 z19.b, z1.b, z0.b\n"
- "st1b { z18.b }, p1, [x20, #2, MUL VL]\n"
- "st1b { z17.b }, p1, [x20, #3, MUL VL]\n"
- "zip1 z18.b, z31.b, z30.b\n"
- "zip2 z17.b, z31.b, z30.b\n"
- "st1b { z16.b }, p1, [x20, #4, MUL VL]\n"
- "zip1 z16.b, z24.b, z23.b\n"
- "zip2 z24.b, z24.b, z23.b\n"
- "st1b { z19.b }, p1, [x20, #5, MUL VL]\n"
- "add x20, x20, %x[out_stride]\n"
- "zip1 z23.b, z22.b, z21.b\n"
- "zip2 z22.b, z22.b, z21.b\n"
- "st1b { z18.b }, p1, [x20]\n"
- "zip1 z21.b, z29.b, z20.b\n"
- "zip2 z20.b, z29.b, z20.b\n"
- "st1b { z17.b }, p1, [x20, #1, MUL VL]\n"
- "zip1 z19.b, z28.b, z27.b\n"
- "zip2 z18.b, z28.b, z27.b\n"
- "st1b { z16.b }, p1, [x20, #2, MUL VL]\n"
- "zip1 z17.b, z26.b, z25.b\n"
- "zip2 z16.b, z26.b, z25.b\n"
- "st1b { z24.b }, p1, [x20, #3, MUL VL]\n"
- "st1b { z23.b }, p1, [x20, #4, MUL VL]\n"
- "st1b { z22.b }, p1, [x20, #5, MUL VL]\n"
- "add x20, x20, %x[out_stride]\n"
- "st1b { z21.b }, p1, [x20]\n"
- "st1b { z20.b }, p1, [x20, #1, MUL VL]\n"
- "st1b { z19.b }, p1, [x20, #2, MUL VL]\n"
- "st1b { z18.b }, p1, [x20, #3, MUL VL]\n"
- "st1b { z17.b }, p1, [x20, #4, MUL VL]\n"
- "st1b { z16.b }, p1, [x20, #5, MUL VL]\n"
- "add x20, x20, %x[out_stride]\n"
+ "zip1 z29.b, z20.b, z16.b\n"
+ "ld1b { z17.b }, p1/Z, [x22]\n"
+ "zip2 z28.b, z20.b, z16.b\n"
+ "ld1b { z27.b }, p1/Z, [x22, #1, MUL VL]\n"
+ "zip1 z26.b, z11.b, z0.b\n"
+ "ld1b { z25.b }, p1/Z, [x22, #2, MUL VL]\n"
+ "addvl x22, x22, #3\n"
+ "zip1 z16.b, z19.b, z17.b\n"
+ "ld1b { z24.b }, p1/Z, [x21]\n"
+ "zip2 z21.b, z19.b, z17.b\n"
+ "ld1b { z22.b }, p1/Z, [x21, #1, MUL VL]\n"
+ "zip1 z20.b, z18.b, z16.b\n"
+ "ld1b { z23.b }, p1/Z, [x21, #2, MUL VL]\n"
+ "addvl x21, x21, #3\n"
+ "zip1 z19.b, z7.b, z24.b\n"
+ "zip2 z18.b, z18.b, z16.b\n"
+ "zip1 z17.b, z29.b, z19.b\n"
+ "zip1 z16.b, z20.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28]\n"
+ "zip2 z16.b, z20.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28, #1, MUL VL]\n"
+ "zip2 z17.b, z29.b, z19.b\n"
+ "zip1 z16.b, z18.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28, #2, MUL VL]\n"
+ "zip2 z16.b, z18.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28, #3, MUL VL]\n"
+ "zip1 z18.b, z4.b, z21.b\n"
+ "zip2 z19.b, z7.b, z24.b\n"
+ "zip1 z17.b, z28.b, z19.b\n"
+ "zip1 z16.b, z18.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28, #4, MUL VL]\n"
+ "zip2 z16.b, z18.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28, #5, MUL VL]\n"
+ "add x28, x28, %x[out_stride]\n"
+ "zip2 z18.b, z4.b, z21.b\n"
+ "zip2 z17.b, z28.b, z19.b\n"
+ "zip1 z16.b, z18.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28]\n"
+ "zip2 z16.b, z18.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28, #1, MUL VL]\n"
+ "zip1 z20.b, z9.b, z27.b\n"
+ "zip1 z18.b, z2.b, z20.b\n"
+ "zip1 z19.b, z6.b, z22.b\n"
+ "zip1 z17.b, z26.b, z19.b\n"
+ "zip1 z16.b, z18.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28, #2, MUL VL]\n"
+ "zip2 z16.b, z18.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28, #3, MUL VL]\n"
+ "zip2 z18.b, z2.b, z20.b\n"
+ "zip2 z17.b, z26.b, z19.b\n"
+ "zip1 z16.b, z18.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28, #4, MUL VL]\n"
+ "zip2 z16.b, z18.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28, #5, MUL VL]\n"
+ "add x28, x28, %x[out_stride]\n"
+ "zip2 z21.b, z9.b, z27.b\n"
+ "zip2 z20.b, z11.b, z0.b\n"
+ "zip1 z18.b, z1.b, z21.b\n"
+ "zip2 z19.b, z6.b, z22.b\n"
+ "zip1 z17.b, z20.b, z19.b\n"
+ "zip1 z16.b, z18.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28]\n"
+ "zip2 z16.b, z18.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28, #1, MUL VL]\n"
+ "zip2 z18.b, z1.b, z21.b\n"
+ "zip2 z17.b, z20.b, z19.b\n"
+ "zip1 z16.b, z18.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28, #2, MUL VL]\n"
+ "zip2 z16.b, z18.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28, #3, MUL VL]\n"
+ "zip1 z21.b, z8.b, z25.b\n"
+ "zip1 z18.b, z31.b, z21.b\n"
+ "zip1 z20.b, z10.b, z30.b\n"
+ "zip1 z19.b, z5.b, z23.b\n"
+ "zip1 z17.b, z20.b, z19.b\n"
+ "zip1 z16.b, z18.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28, #4, MUL VL]\n"
+ "zip2 z16.b, z18.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28, #5, MUL VL]\n"
+ "add x28, x28, %x[out_stride]\n"
+ "zip2 z18.b, z31.b, z21.b\n"
+ "zip2 z17.b, z20.b, z19.b\n"
+ "zip1 z16.b, z18.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28]\n"
+ "zip2 z16.b, z18.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28, #1, MUL VL]\n"
+ "zip2 z22.b, z12.b, z3.b\n"
+ "zip2 z21.b, z8.b, z25.b\n"
+ "zip1 z18.b, z22.b, z21.b\n"
+ "zip2 z20.b, z10.b, z30.b\n"
+ "zip2 z19.b, z5.b, z23.b\n"
+ "zip1 z17.b, z20.b, z19.b\n"
+ "zip1 z16.b, z18.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28, #2, MUL VL]\n"
+ "zip2 z16.b, z18.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28, #3, MUL VL]\n"
+ "zip2 z18.b, z22.b, z21.b\n"
+ "zip2 z17.b, z20.b, z19.b\n"
+ "zip1 z16.b, z18.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28, #4, MUL VL]\n"
+ "zip2 z16.b, z18.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28, #5, MUL VL]\n"
+ "add x28, x28, %x[out_stride]\n"
"bge 2b\n"
"3:" // Main row loop: Unroll column loop skip
- "cbz x22, 5f\n"
+ "cbz x20, 5f\n"
"4:" // Main row loop: Column loop
- "whilelt p0.b, XZR, x22\n"
- "ld1b { z23.b }, p0/Z, [x10]\n"
- "ld1b { z27.b }, p0/Z, [x9]\n"
- "decd x22, ALL, MUL #6\n"
- "ld1b { z21.b }, p0/Z, [x28]\n"
- "ld1b { z26.b }, p0/Z, [x27]\n"
- "cmp x22, #0x0\n"
- "incd x10, ALL, MUL #6\n"
- "ld1b { z20.b }, p0/Z, [x26]\n"
- "ld1b { z19.b }, p0/Z, [x25]\n"
- "zip1 z25.b, z23.b, z20.b\n"
- "zip1 z24.b, z27.b, z19.b\n"
- "ld1b { z17.b }, p0/Z, [x24]\n"
- "ld1b { z16.b }, p0/Z, [x23]\n"
- "zip1 z22.b, z21.b, z17.b\n"
- "zip1 z18.b, z26.b, z16.b\n"
- "zip2 z23.b, z23.b, z20.b\n"
- "zip2 z21.b, z21.b, z17.b\n"
+ "whilelt p0.b, XZR, x20\n"
+ "ld1b { z18.b }, p0/Z, [x9]\n"
"incd x9, ALL, MUL #6\n"
- "incd x28, ALL, MUL #6\n"
- "zip2 z20.b, z27.b, z19.b\n"
- "zip2 z17.b, z26.b, z16.b\n"
+ "ld1b { z28.b }, p0/Z, [x27]\n"
"incd x27, ALL, MUL #6\n"
+ "ld1b { z17.b }, p0/Z, [x26]\n"
"incd x26, ALL, MUL #6\n"
- "zip1 z19.b, z25.b, z22.b\n"
- "zip1 z16.b, z24.b, z18.b\n"
+ "ld1b { z27.b }, p0/Z, [x25]\n"
"incd x25, ALL, MUL #6\n"
+ "ld1b { z16.b }, p0/Z, [x24]\n"
+ "zip1 z26.b, z18.b, z16.b\n"
+ "ld1b { z25.b }, p0/Z, [x23]\n"
"incd x24, ALL, MUL #6\n"
- "zip2 z22.b, z25.b, z22.b\n"
- "zip2 z18.b, z24.b, z18.b\n"
+ "zip2 z24.b, z18.b, z16.b\n"
+ "ld1b { z16.b }, p0/Z, [x22]\n"
"incd x23, ALL, MUL #6\n"
- "zip1 z21.b, z23.b, z21.b\n"
- "zip1 z20.b, z20.b, z17.b\n"
- "zip1 z17.b, z19.b, z16.b\n"
- "zip2 z16.b, z19.b, z16.b\n"
- "st1b { z17.b }, p1, [x20]\n"
- "zip1 z19.b, z22.b, z18.b\n"
- "zip2 z18.b, z22.b, z18.b\n"
- "st1b { z16.b }, p1, [x20, #1, MUL VL]\n"
- "zip1 z17.b, z21.b, z20.b\n"
- "zip2 z16.b, z21.b, z20.b\n"
- "st1b { z19.b }, p1, [x20, #2, MUL VL]\n"
- "st1b { z18.b }, p1, [x20, #3, MUL VL]\n"
- "st1b { z17.b }, p1, [x20, #4, MUL VL]\n"
- "st1b { z16.b }, p1, [x20, #5, MUL VL]\n"
- "add x20, x20, %x[out_stride]\n"
+ "zip1 z23.b, z28.b, z25.b\n"
+ "ld1b { z22.b }, p0/Z, [x21]\n"
+ "incd x22, ALL, MUL #6\n"
+ "zip1 z21.b, z17.b, z16.b\n"
+ "incd x21, ALL, MUL #6\n"
+ "zip2 z20.b, z17.b, z16.b\n"
+ "decd x20, ALL, MUL #6\n"
+ "zip1 z18.b, z26.b, z21.b\n"
+ "cmp x20, #0x0\n"
+ "zip1 z19.b, z27.b, z22.b\n"
+ "zip1 z17.b, z23.b, z19.b\n"
+ "zip1 z16.b, z18.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28]\n"
+ "zip2 z16.b, z18.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28, #1, MUL VL]\n"
+ "zip2 z18.b, z26.b, z21.b\n"
+ "zip2 z17.b, z23.b, z19.b\n"
+ "zip1 z16.b, z18.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28, #2, MUL VL]\n"
+ "zip2 z16.b, z18.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28, #3, MUL VL]\n"
+ "zip1 z18.b, z24.b, z20.b\n"
+ "zip2 z17.b, z28.b, z25.b\n"
+ "zip2 z16.b, z27.b, z22.b\n"
+ "zip1 z17.b, z17.b, z16.b\n"
+ "zip1 z16.b, z18.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28, #4, MUL VL]\n"
+ "zip2 z16.b, z18.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28, #5, MUL VL]\n"
+ "add x28, x28, %x[out_stride]\n"
"bgt 4b\n"
"5:" // Main row loop: Column loop skip
- "cmp %x[height], #0x1\n"
"addvl %x[out], %x[out], #6\n"
+ "cmp %x[height], #0x1\n"
"bge 1b\n"
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [pad_row] "r" (pad_row), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "x9", "x10", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "x9", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_6VL_2x4.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_6VL_2x4.hpp
index 2b5741a49c..19d3d9dfe4 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_6VL_2x4.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_6VL_2x4.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#pragma once
@@ -40,356 +40,356 @@ void sve_transpose_interleave_6VL_2x4(uint16_t *out, const uint16_t *in, size_t
size_t out_stride = 6 * roundup<size_t>(height, 4) * get_vector_length<uint32_t>();
__asm__ __volatile__(
- "cmp %x[height], #0x8\n"
"ptrue p2.b\n"
+ "cmp %x[height], #0x8\n"
"blt 6f\n"
"1:" // Main row loop: Head
- "mov x12, %x[in]\n"
- "add x11, x12, %x[in_stride]\n"
- "add x10, x11, %x[in_stride]\n"
- "add x9, x10, %x[in_stride]\n"
+ "mov x11, %x[in]\n"
+ "mov x10, %x[out]\n"
+ "add x9, x11, %x[in_stride]\n"
"add x28, x9, %x[in_stride]\n"
- "mov x27, %x[width]\n"
- "cnth x26, ALL, MUL #3\n"
- "add x25, x28, %x[in_stride]\n"
+ "add x27, x28, %x[in_stride]\n"
+ "add x26, x27, %x[in_stride]\n"
+ "add x25, x26, %x[in_stride]\n"
"add x24, x25, %x[in_stride]\n"
"add x23, x24, %x[in_stride]\n"
- "cmp x27, x26\n"
"add %x[in], x23, %x[in_stride]\n"
- "mov x22, %x[out]\n"
"sub %x[height], %x[height], #0x8\n"
+ "mov x22, %x[width]\n"
+ "cnth x21, ALL, MUL #3\n"
+ "cmp x22, x21\n"
"blt 3f\n"
"2:" // Main row loop: Unroll column loop
- "ld1h { z18.h }, p2/Z, [x12]\n"
- "ld1h { z13.h }, p2/Z, [x12, #1, MUL VL]\n"
- "mov x21, x22\n"
- "add x22, x22, %x[out_stride]\n"
- "ld1h { z17.h }, p2/Z, [x11]\n"
- "ld1h { z12.h }, p2/Z, [x11, #1, MUL VL]\n"
- "mov x20, x22\n"
- "sub x27, x27, x26\n"
- "ld1h { z16.h }, p2/Z, [x10]\n"
- "ld1h { z11.h }, p2/Z, [x10, #1, MUL VL]\n"
- "zip1 z23.h, z18.h, z16.h\n"
- "zip2 z29.h, z18.h, z16.h\n"
- "ld1h { z16.h }, p2/Z, [x9]\n"
- "ld1h { z10.h }, p2/Z, [x9, #1, MUL VL]\n"
- "zip1 z22.h, z17.h, z16.h\n"
- "zip2 z28.h, z17.h, z16.h\n"
- "ld1h { z27.h }, p2/Z, [x28]\n"
- "ld1h { z26.h }, p2/Z, [x25]\n"
- "zip1 z21.h, z13.h, z11.h\n"
- "zip1 z20.h, z12.h, z10.h\n"
- "ld1h { z18.h }, p2/Z, [x24]\n"
- "ld1h { z19.h }, p2/Z, [x23]\n"
- "zip1 z17.h, z27.h, z18.h\n"
- "zip1 z16.h, z26.h, z19.h\n"
- "ld1h { z9.h }, p2/Z, [x12, #2, MUL VL]\n"
- "ld1h { z8.h }, p2/Z, [x11, #2, MUL VL]\n"
- "zip1 z25.h, z23.h, z22.h\n"
- "zip2 z24.h, z23.h, z22.h\n"
- "ld1h { z23.h }, p2/Z, [x10, #2, MUL VL]\n"
- "ld1h { z7.h }, p2/Z, [x9, #2, MUL VL]\n"
- "zip1 z22.h, z29.h, z28.h\n"
- "zip2 z6.h, z29.h, z28.h\n"
- "ld1h { z28.h }, p2/Z, [x28, #1, MUL VL]\n"
- "ld1h { z5.h }, p2/Z, [x25, #1, MUL VL]\n"
- "zip1 z4.h, z21.h, z20.h\n"
- "zip2 z3.h, z21.h, z20.h\n"
- "ld1h { z21.h }, p2/Z, [x24, #1, MUL VL]\n"
- "ld1h { z20.h }, p2/Z, [x23, #1, MUL VL]\n"
- "zip1 z2.h, z17.h, z16.h\n"
- "zip2 z1.h, z17.h, z16.h\n"
- "ld1h { z0.h }, p2/Z, [x28, #2, MUL VL]\n"
- "ld1h { z31.h }, p2/Z, [x25, #2, MUL VL]\n"
- "zip2 z18.h, z27.h, z18.h\n"
- "zip2 z17.h, z26.h, z19.h\n"
- "ld1h { z30.h }, p2/Z, [x24, #2, MUL VL]\n"
- "ld1h { z29.h }, p2/Z, [x23, #2, MUL VL]\n"
- "zip1 z19.h, z28.h, z21.h\n"
- "zip1 z16.h, z5.h, z20.h\n"
- "st1h { z25.h }, p2, [x21]\n"
- "zip2 z27.h, z13.h, z11.h\n"
- "zip2 z26.h, z12.h, z10.h\n"
- "cmp x27, x26\n"
- "st1h { z24.h }, p2, [x21, #1, MUL VL]\n"
- "zip1 z25.h, z9.h, z23.h\n"
- "zip1 z24.h, z8.h, z7.h\n"
- "addvl x12, x12, #3\n"
- "st1h { z22.h }, p2, [x21, #2, MUL VL]\n"
- "zip2 z23.h, z9.h, z23.h\n"
- "zip2 z22.h, z8.h, z7.h\n"
+ "ld1h { z19.h }, p2/Z, [x11]\n"
+ "mov x20, x10\n"
+ "ld1h { z18.h }, p2/Z, [x11, #1, MUL VL]\n"
+ "add x10, x10, %x[out_stride]\n"
+ "ld1h { z10.h }, p2/Z, [x11, #2, MUL VL]\n"
"addvl x11, x11, #3\n"
- "st1h { z6.h }, p2, [x21, #3, MUL VL]\n"
- "zip2 z28.h, z28.h, z21.h\n"
- "zip2 z21.h, z5.h, z20.h\n"
- "addvl x10, x10, #3\n"
- "st1h { z4.h }, p2, [x21, #4, MUL VL]\n"
- "zip1 z20.h, z18.h, z17.h\n"
- "zip2 z18.h, z18.h, z17.h\n"
+ "ld1h { z24.h }, p2/Z, [x9]\n"
+ "mov x19, x10\n"
+ "ld1h { z23.h }, p2/Z, [x9, #1, MUL VL]\n"
+ "add x10, x10, %x[out_stride]\n"
+ "ld1h { z9.h }, p2/Z, [x9, #2, MUL VL]\n"
"addvl x9, x9, #3\n"
- "st1h { z3.h }, p2, [x21, #5, MUL VL]\n"
- "zip1 z17.h, z19.h, z16.h\n"
- "zip2 z16.h, z19.h, z16.h\n"
+ "ld1h { z16.h }, p2/Z, [x28]\n"
+ "zip1 z22.h, z19.h, z16.h\n"
+ "ld1h { z17.h }, p2/Z, [x28, #1, MUL VL]\n"
+ "sub x22, x22, x21\n"
+ "zip2 z21.h, z19.h, z16.h\n"
+ "ld1h { z8.h }, p2/Z, [x28, #2, MUL VL]\n"
"addvl x28, x28, #3\n"
- "st1h { z2.h }, p2, [x21, #6, MUL VL]\n"
- "zip1 z19.h, z27.h, z26.h\n"
- "zip2 z27.h, z27.h, z26.h\n"
+ "zip1 z20.h, z18.h, z17.h\n"
+ "ld1h { z16.h }, p2/Z, [x27]\n"
+ "cmp x22, x21\n"
+ "zip2 z7.h, z18.h, z17.h\n"
+ "ld1h { z19.h }, p2/Z, [x27, #1, MUL VL]\n"
+ "zip1 z6.h, z10.h, z8.h\n"
+ "ld1h { z5.h }, p2/Z, [x27, #2, MUL VL]\n"
+ "addvl x27, x27, #3\n"
+ "zip1 z17.h, z24.h, z16.h\n"
+ "ld1h { z18.h }, p2/Z, [x26]\n"
+ "zip2 z16.h, z24.h, z16.h\n"
+ "ld1h { z4.h }, p2/Z, [x26, #1, MUL VL]\n"
+ "zip1 z3.h, z22.h, z17.h\n"
+ "ld1h { z2.h }, p2/Z, [x26, #2, MUL VL]\n"
+ "addvl x26, x26, #3\n"
+ "zip2 z1.h, z22.h, z17.h\n"
+ "ld1h { z0.h }, p2/Z, [x25]\n"
+ "zip1 z31.h, z21.h, z16.h\n"
+ "ld1h { z30.h }, p2/Z, [x25, #1, MUL VL]\n"
+ "zip2 z29.h, z21.h, z16.h\n"
+ "ld1h { z28.h }, p2/Z, [x25, #2, MUL VL]\n"
"addvl x25, x25, #3\n"
- "st1h { z1.h }, p2, [x21, #7, MUL VL]\n"
- "addvl x21, x21, #12\n"
- "zip1 z26.h, z25.h, z24.h\n"
- "zip2 z25.h, z25.h, z24.h\n"
- "st1h { z20.h }, p2, [x21, #-4, MUL VL]\n"
- "zip1 z24.h, z23.h, z22.h\n"
- "zip2 z23.h, z23.h, z22.h\n"
+ "zip1 z16.h, z23.h, z19.h\n"
+ "ld1h { z17.h }, p2/Z, [x24]\n"
+ "zip2 z27.h, z23.h, z19.h\n"
+ "ld1h { z26.h }, p2/Z, [x24, #1, MUL VL]\n"
+ "zip1 z25.h, z20.h, z16.h\n"
+ "ld1h { z24.h }, p2/Z, [x24, #2, MUL VL]\n"
"addvl x24, x24, #3\n"
- "st1h { z18.h }, p2, [x21, #-3, MUL VL]\n"
- "zip1 z22.h, z28.h, z21.h\n"
- "zip2 z21.h, z28.h, z21.h\n"
+ "zip2 z23.h, z20.h, z16.h\n"
+ "ld1h { z16.h }, p2/Z, [x23]\n"
+ "zip1 z20.h, z18.h, z17.h\n"
+ "ld1h { z22.h }, p2/Z, [x23, #1, MUL VL]\n"
+ "zip2 z19.h, z18.h, z17.h\n"
+ "ld1h { z21.h }, p2/Z, [x23, #2, MUL VL]\n"
"addvl x23, x23, #3\n"
- "st1h { z17.h }, p2, [x21, #-2, MUL VL]\n"
- "zip1 z18.h, z0.h, z30.h\n"
- "zip1 z17.h, z31.h, z29.h\n"
- "add x22, x22, %x[out_stride]\n"
- "st1h { z16.h }, p2, [x21, #-1, MUL VL]\n"
- "zip2 z20.h, z0.h, z30.h\n"
- "zip2 z16.h, z31.h, z29.h\n"
- "st1h { z19.h }, p2, [x20]\n"
- "zip1 z19.h, z18.h, z17.h\n"
- "zip2 z18.h, z18.h, z17.h\n"
- "st1h { z27.h }, p2, [x20, #1, MUL VL]\n"
- "zip1 z17.h, z20.h, z16.h\n"
- "zip2 z16.h, z20.h, z16.h\n"
- "st1h { z26.h }, p2, [x20, #2, MUL VL]\n"
- "st1h { z25.h }, p2, [x20, #3, MUL VL]\n"
- "st1h { z24.h }, p2, [x20, #4, MUL VL]\n"
+ "zip1 z18.h, z0.h, z16.h\n"
+ "st1h { z3.h }, p2, [x20]\n"
+ "zip2 z17.h, z0.h, z16.h\n"
+ "st1h { z1.h }, p2, [x20, #1, MUL VL]\n"
+ "zip1 z16.h, z20.h, z18.h\n"
+ "st1h { z31.h }, p2, [x20, #2, MUL VL]\n"
+ "zip2 z18.h, z20.h, z18.h\n"
+ "st1h { z29.h }, p2, [x20, #3, MUL VL]\n"
+ "zip1 z20.h, z19.h, z17.h\n"
+ "st1h { z25.h }, p2, [x20, #4, MUL VL]\n"
+ "zip2 z19.h, z19.h, z17.h\n"
"st1h { z23.h }, p2, [x20, #5, MUL VL]\n"
- "st1h { z22.h }, p2, [x20, #6, MUL VL]\n"
- "st1h { z21.h }, p2, [x20, #7, MUL VL]\n"
+ "zip1 z17.h, z4.h, z26.h\n"
+ "st1h { z16.h }, p2, [x20, #6, MUL VL]\n"
+ "zip1 z16.h, z30.h, z22.h\n"
+ "st1h { z18.h }, p2, [x20, #7, MUL VL]\n"
"addvl x20, x20, #12\n"
- "st1h { z19.h }, p2, [x20, #-4, MUL VL]\n"
- "st1h { z18.h }, p2, [x20, #-3, MUL VL]\n"
- "st1h { z17.h }, p2, [x20, #-2, MUL VL]\n"
+ "zip1 z18.h, z17.h, z16.h\n"
+ "st1h { z20.h }, p2, [x20, #-4, MUL VL]\n"
+ "zip2 z16.h, z17.h, z16.h\n"
+ "st1h { z19.h }, p2, [x20, #-3, MUL VL]\n"
+ "zip1 z17.h, z7.h, z27.h\n"
+ "st1h { z18.h }, p2, [x20, #-2, MUL VL]\n"
+ "zip2 z18.h, z7.h, z27.h\n"
"st1h { z16.h }, p2, [x20, #-1, MUL VL]\n"
+ "zip1 z16.h, z9.h, z5.h\n"
+ "st1h { z17.h }, p2, [x19]\n"
+ "zip1 z17.h, z6.h, z16.h\n"
+ "st1h { z18.h }, p2, [x19, #1, MUL VL]\n"
+ "zip2 z16.h, z6.h, z16.h\n"
+ "st1h { z17.h }, p2, [x19, #2, MUL VL]\n"
+ "zip2 z18.h, z10.h, z8.h\n"
+ "st1h { z16.h }, p2, [x19, #3, MUL VL]\n"
+ "zip2 z17.h, z9.h, z5.h\n"
+ "zip1 z16.h, z18.h, z17.h\n"
+ "st1h { z16.h }, p2, [x19, #4, MUL VL]\n"
+ "zip2 z16.h, z18.h, z17.h\n"
+ "st1h { z16.h }, p2, [x19, #5, MUL VL]\n"
+ "zip2 z18.h, z4.h, z26.h\n"
+ "zip2 z17.h, z30.h, z22.h\n"
+ "zip1 z16.h, z18.h, z17.h\n"
+ "st1h { z16.h }, p2, [x19, #6, MUL VL]\n"
+ "zip2 z16.h, z18.h, z17.h\n"
+ "st1h { z16.h }, p2, [x19, #7, MUL VL]\n"
+ "addvl x19, x19, #12\n"
+ "zip1 z18.h, z2.h, z24.h\n"
+ "zip1 z17.h, z28.h, z21.h\n"
+ "zip1 z16.h, z18.h, z17.h\n"
+ "st1h { z16.h }, p2, [x19, #-4, MUL VL]\n"
+ "zip2 z16.h, z18.h, z17.h\n"
+ "st1h { z16.h }, p2, [x19, #-3, MUL VL]\n"
+ "zip2 z18.h, z2.h, z24.h\n"
+ "zip2 z17.h, z28.h, z21.h\n"
+ "zip1 z16.h, z18.h, z17.h\n"
+ "st1h { z16.h }, p2, [x19, #-2, MUL VL]\n"
+ "zip2 z16.h, z18.h, z17.h\n"
+ "st1h { z16.h }, p2, [x19, #-1, MUL VL]\n"
"bge 2b\n"
"3:" // Main row loop: Unroll column loop skip
- "cbz x27, 5f\n"
+ "cbz x22, 5f\n"
"4:" // Main row loop: Column loop
- "mov x20, x27\n"
+ "mov x20, x22\n"
+ "mov x19, x10\n"
"whilelt p1.h, XZR, x20\n"
- "ld1h { z17.h }, p1/Z, [x12]\n"
- "ld1h { z19.h }, p1/Z, [x11]\n"
+ "ld1h { z18.h }, p1/Z, [x11]\n"
+ "ld1h { z23.h }, p1/Z, [x9]\n"
"dech x20\n"
+ "ld1h { z16.h }, p1/Z, [x28]\n"
+ "zip1 z17.h, z18.h, z16.h\n"
+ "ld1h { z20.h }, p1/Z, [x27]\n"
"whilelt p0.h, XZR, x20\n"
- "ld1h { z22.h }, p0/Z, [x12, #1, MUL VL]\n"
+ "zip2 z22.h, z18.h, z16.h\n"
"ld1h { z21.h }, p0/Z, [x11, #1, MUL VL]\n"
- "ld1h { z16.h }, p1/Z, [x10]\n"
- "ld1h { z20.h }, p0/Z, [x10, #1, MUL VL]\n"
- "zip1 z25.h, z17.h, z16.h\n"
- "zip2 z24.h, z17.h, z16.h\n"
- "ld1h { z18.h }, p1/Z, [x9]\n"
- "ld1h { z17.h }, p0/Z, [x9, #1, MUL VL]\n"
- "zip1 z16.h, z19.h, z18.h\n"
- "zip2 z19.h, z19.h, z18.h\n"
- "ld1h { z0.h }, p1/Z, [x28]\n"
- "ld1h { z31.h }, p1/Z, [x25]\n"
- "zip1 z23.h, z22.h, z20.h\n"
- "zip1 z22.h, z21.h, z17.h\n"
- "ld1h { z30.h }, p1/Z, [x24]\n"
- "ld1h { z29.h }, p1/Z, [x23]\n"
- "zip1 z21.h, z0.h, z30.h\n"
- "zip1 z18.h, z31.h, z29.h\n"
- "ld1h { z28.h }, p0/Z, [x28, #1, MUL VL]\n"
- "ld1h { z27.h }, p0/Z, [x25, #1, MUL VL]\n"
- "mov x20, x22\n"
- "decd x27, ALL, MUL #6\n"
- "ld1h { z20.h }, p0/Z, [x24, #1, MUL VL]\n"
- "ld1h { z26.h }, p0/Z, [x23, #1, MUL VL]\n"
- "addvl x12, x12, #1\n"
"addvl x11, x11, #1\n"
- "addvl x10, x10, #1\n"
- "addvl x9, x9, #1\n"
- "zip1 z17.h, z25.h, z16.h\n"
- "zip2 z16.h, z25.h, z16.h\n"
- "addvl x28, x28, #1\n"
- "addvl x25, x25, #1\n"
- "zip1 z25.h, z24.h, z19.h\n"
- "zip2 z19.h, z24.h, z19.h\n"
- "addvl x24, x24, #1\n"
- "addvl x23, x23, #1\n"
- "zip1 z24.h, z23.h, z22.h\n"
- "zip2 z23.h, z23.h, z22.h\n"
- "zip1 z22.h, z21.h, z18.h\n"
- "zip2 z21.h, z21.h, z18.h\n"
- "st1h { z17.h }, p2, [x20]\n"
- "cmp x27, #0x0\n"
- "zip2 z18.h, z0.h, z30.h\n"
- "zip2 z17.h, z31.h, z29.h\n"
- "st1h { z16.h }, p2, [x20, #1, MUL VL]\n"
- "incd x12, ALL, MUL #4\n"
- "zip1 z20.h, z28.h, z20.h\n"
- "zip1 z16.h, z27.h, z26.h\n"
- "st1h { z25.h }, p2, [x20, #2, MUL VL]\n"
+ "zip1 z16.h, z23.h, z20.h\n"
+ "ld1h { z19.h }, p0/Z, [x9, #1, MUL VL]\n"
"incd x11, ALL, MUL #4\n"
- "st1h { z19.h }, p2, [x20, #3, MUL VL]\n"
- "incd x10, ALL, MUL #4\n"
+ "zip1 z0.h, z17.h, z16.h\n"
+ "ld1h { z18.h }, p0/Z, [x28, #1, MUL VL]\n"
+ "addvl x9, x9, #1\n"
+ "zip2 z31.h, z17.h, z16.h\n"
+ "ld1h { z17.h }, p0/Z, [x27, #1, MUL VL]\n"
"incd x9, ALL, MUL #4\n"
- "zip1 z19.h, z18.h, z17.h\n"
- "st1h { z24.h }, p2, [x20, #4, MUL VL]\n"
+ "zip2 z16.h, z23.h, z20.h\n"
+ "ld1h { z30.h }, p1/Z, [x26]\n"
+ "addvl x28, x28, #1\n"
+ "zip1 z20.h, z22.h, z16.h\n"
+ "ld1h { z29.h }, p0/Z, [x26, #1, MUL VL]\n"
"incd x28, ALL, MUL #4\n"
+ "zip2 z28.h, z22.h, z16.h\n"
+ "ld1h { z27.h }, p1/Z, [x25]\n"
+ "addvl x27, x27, #1\n"
+ "zip1 z18.h, z21.h, z18.h\n"
+ "ld1h { z26.h }, p0/Z, [x25, #1, MUL VL]\n"
+ "incd x27, ALL, MUL #4\n"
+ "zip1 z17.h, z19.h, z17.h\n"
+ "ld1h { z16.h }, p1/Z, [x24]\n"
+ "addvl x26, x26, #1\n"
+ "zip1 z25.h, z18.h, z17.h\n"
+ "ld1h { z24.h }, p0/Z, [x24, #1, MUL VL]\n"
+ "incd x26, ALL, MUL #4\n"
+ "zip2 z23.h, z18.h, z17.h\n"
+ "ld1h { z22.h }, p1/Z, [x23]\n"
+ "addvl x25, x25, #1\n"
+ "zip1 z19.h, z30.h, z16.h\n"
+ "ld1h { z21.h }, p0/Z, [x23, #1, MUL VL]\n"
"incd x25, ALL, MUL #4\n"
- "zip2 z18.h, z18.h, z17.h\n"
- "st1h { z23.h }, p2, [x20, #5, MUL VL]\n"
+ "zip2 z17.h, z30.h, z16.h\n"
+ "st1h { z0.h }, p2, [x19]\n"
+ "addvl x24, x24, #1\n"
+ "zip1 z16.h, z27.h, z22.h\n"
+ "st1h { z31.h }, p2, [x19, #1, MUL VL]\n"
"incd x24, ALL, MUL #4\n"
+ "zip1 z18.h, z19.h, z16.h\n"
+ "st1h { z20.h }, p2, [x19, #2, MUL VL]\n"
+ "addvl x23, x23, #1\n"
+ "zip2 z20.h, z19.h, z16.h\n"
+ "st1h { z28.h }, p2, [x19, #3, MUL VL]\n"
"incd x23, ALL, MUL #4\n"
- "zip1 z17.h, z20.h, z16.h\n"
- "st1h { z22.h }, p2, [x20, #6, MUL VL]\n"
- "zip2 z16.h, z20.h, z16.h\n"
- "add x22, x22, %x[out_stride]\n"
- "st1h { z21.h }, p2, [x20, #7, MUL VL]\n"
- "addvl x20, x20, #12\n"
- "st1h { z19.h }, p2, [x20, #-4, MUL VL]\n"
- "st1h { z18.h }, p2, [x20, #-3, MUL VL]\n"
- "st1h { z17.h }, p2, [x20, #-2, MUL VL]\n"
- "st1h { z16.h }, p2, [x20, #-1, MUL VL]\n"
+ "zip2 z16.h, z27.h, z22.h\n"
+ "st1h { z25.h }, p2, [x19, #4, MUL VL]\n"
+ "add x10, x10, %x[out_stride]\n"
+ "zip1 z19.h, z17.h, z16.h\n"
+ "st1h { z23.h }, p2, [x19, #5, MUL VL]\n"
+ "decd x22, ALL, MUL #6\n"
+ "zip2 z17.h, z17.h, z16.h\n"
+ "st1h { z18.h }, p2, [x19, #6, MUL VL]\n"
+ "cmp x22, #0x0\n"
+ "zip1 z18.h, z29.h, z24.h\n"
+ "st1h { z20.h }, p2, [x19, #7, MUL VL]\n"
+ "addvl x19, x19, #12\n"
+ "zip1 z16.h, z26.h, z21.h\n"
+ "st1h { z19.h }, p2, [x19, #-4, MUL VL]\n"
+ "st1h { z17.h }, p2, [x19, #-3, MUL VL]\n"
+ "zip1 z17.h, z18.h, z16.h\n"
+ "zip2 z16.h, z18.h, z16.h\n"
+ "st1h { z17.h }, p2, [x19, #-2, MUL VL]\n"
+ "st1h { z16.h }, p2, [x19, #-1, MUL VL]\n"
"bgt 4b\n"
"5:" // Main row loop: Column loop skip
- "cmp %x[height], #0x8\n"
"addvl %x[out], %x[out], #12\n"
+ "cmp %x[height], #0x8\n"
"bge 1b\n"
"cbz %x[height], 12f\n"
"6:" // Main loop skip
"7:" // Tail row loop: Head
- "mov x12, %x[in]\n"
- "add x11, x12, %x[in_stride]\n"
- "add x10, x11, %x[in_stride]\n"
- "mov x21, %x[width]\n"
- "cnth x20, ALL, MUL #3\n"
- "add x9, x10, %x[in_stride]\n"
+ "mov x11, %x[in]\n"
+ "mov x10, %x[out]\n"
+ "add x9, x11, %x[in_stride]\n"
+ "add x28, x9, %x[in_stride]\n"
+ "add x27, x28, %x[in_stride]\n"
+ "add %x[in], x27, %x[in_stride]\n"
"cmp %x[height], #0x3\n"
- "add %x[in], x9, %x[in_stride]\n"
- "csel x9, x9, %x[pad_row], GT\n"
- "csel x10, x10, %x[pad_row], GE\n"
+ "csel x27, x27, %x[pad_row], GT\n"
+ "csel x28, x28, %x[pad_row], GE\n"
"cmp %x[height], #0x1\n"
- "csel x11, x11, %x[pad_row], GT\n"
- "cmp x21, x20\n"
- "mov x22, %x[out]\n"
+ "csel x9, x9, %x[pad_row], GT\n"
"sub %x[height], %x[height], #0x4\n"
+ "mov x20, %x[width]\n"
+ "cnth x19, ALL, MUL #3\n"
+ "cmp x20, x19\n"
"blt 9f\n"
"8:" // Tail row loop: Unroll column loop
- "ld1h { z18.h }, p2/Z, [x12]\n"
- "ld1h { z24.h }, p2/Z, [x12, #1, MUL VL]\n"
- "sub x21, x21, x20\n"
- "cmp x21, x20\n"
- "ld1h { z17.h }, p2/Z, [x11]\n"
- "ld1h { z23.h }, p2/Z, [x11, #1, MUL VL]\n"
- "ld1h { z16.h }, p2/Z, [x10]\n"
- "ld1h { z22.h }, p2/Z, [x10, #1, MUL VL]\n"
- "zip1 z31.h, z18.h, z16.h\n"
- "zip2 z30.h, z18.h, z16.h\n"
- "ld1h { z16.h }, p2/Z, [x9]\n"
- "ld1h { z20.h }, p2/Z, [x9, #1, MUL VL]\n"
- "zip1 z29.h, z17.h, z16.h\n"
- "zip2 z28.h, z17.h, z16.h\n"
- "ld1h { z19.h }, p2/Z, [x12, #2, MUL VL]\n"
- "ld1h { z18.h }, p2/Z, [x11, #2, MUL VL]\n"
- "zip1 z27.h, z24.h, z22.h\n"
- "zip1 z21.h, z23.h, z20.h\n"
- "ld1h { z17.h }, p2/Z, [x10, #2, MUL VL]\n"
- "ld1h { z16.h }, p2/Z, [x9, #2, MUL VL]\n"
- "zip2 z26.h, z24.h, z22.h\n"
- "zip2 z20.h, z23.h, z20.h\n"
- "zip1 z25.h, z19.h, z17.h\n"
- "zip1 z24.h, z18.h, z16.h\n"
- "addvl x12, x12, #3\n"
+ "ld1h { z19.h }, p2/Z, [x11]\n"
+ "sub x20, x20, x19\n"
+ "ld1h { z18.h }, p2/Z, [x11, #1, MUL VL]\n"
+ "cmp x20, x19\n"
+ "ld1h { z30.h }, p2/Z, [x11, #2, MUL VL]\n"
"addvl x11, x11, #3\n"
- "zip2 z23.h, z19.h, z17.h\n"
- "zip2 z22.h, z18.h, z16.h\n"
- "addvl x10, x10, #3\n"
+ "ld1h { z29.h }, p2/Z, [x9]\n"
+ "ld1h { z28.h }, p2/Z, [x9, #1, MUL VL]\n"
+ "ld1h { z27.h }, p2/Z, [x9, #2, MUL VL]\n"
"addvl x9, x9, #3\n"
- "zip1 z17.h, z31.h, z29.h\n"
- "zip2 z16.h, z31.h, z29.h\n"
- "st1h { z17.h }, p2, [x22]\n"
- "zip1 z19.h, z30.h, z28.h\n"
- "zip2 z18.h, z30.h, z28.h\n"
- "st1h { z16.h }, p2, [x22, #1, MUL VL]\n"
- "zip1 z17.h, z27.h, z21.h\n"
- "zip2 z16.h, z27.h, z21.h\n"
- "st1h { z19.h }, p2, [x22, #2, MUL VL]\n"
- "st1h { z18.h }, p2, [x22, #3, MUL VL]\n"
- "zip1 z21.h, z26.h, z20.h\n"
- "zip2 z20.h, z26.h, z20.h\n"
- "st1h { z17.h }, p2, [x22, #4, MUL VL]\n"
- "zip1 z19.h, z25.h, z24.h\n"
- "zip2 z18.h, z25.h, z24.h\n"
- "st1h { z16.h }, p2, [x22, #5, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
- "zip1 z17.h, z23.h, z22.h\n"
- "zip2 z16.h, z23.h, z22.h\n"
- "st1h { z21.h }, p2, [x22]\n"
- "st1h { z20.h }, p2, [x22, #1, MUL VL]\n"
- "st1h { z19.h }, p2, [x22, #2, MUL VL]\n"
- "st1h { z18.h }, p2, [x22, #3, MUL VL]\n"
- "st1h { z17.h }, p2, [x22, #4, MUL VL]\n"
- "st1h { z16.h }, p2, [x22, #5, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
+ "ld1h { z16.h }, p2/Z, [x28]\n"
+ "zip1 z26.h, z19.h, z16.h\n"
+ "ld1h { z17.h }, p2/Z, [x28, #1, MUL VL]\n"
+ "zip2 z25.h, z19.h, z16.h\n"
+ "ld1h { z24.h }, p2/Z, [x28, #2, MUL VL]\n"
+ "addvl x28, x28, #3\n"
+ "zip1 z23.h, z18.h, z17.h\n"
+ "ld1h { z16.h }, p2/Z, [x27]\n"
+ "zip2 z22.h, z18.h, z17.h\n"
+ "ld1h { z21.h }, p2/Z, [x27, #1, MUL VL]\n"
+ "zip1 z20.h, z30.h, z24.h\n"
+ "ld1h { z19.h }, p2/Z, [x27, #2, MUL VL]\n"
+ "addvl x27, x27, #3\n"
+ "zip1 z18.h, z29.h, z16.h\n"
+ "zip2 z17.h, z29.h, z16.h\n"
+ "zip1 z16.h, z26.h, z18.h\n"
+ "st1h { z16.h }, p2, [x10]\n"
+ "zip2 z16.h, z26.h, z18.h\n"
+ "st1h { z16.h }, p2, [x10, #1, MUL VL]\n"
+ "zip1 z16.h, z25.h, z17.h\n"
+ "st1h { z16.h }, p2, [x10, #2, MUL VL]\n"
+ "zip2 z16.h, z25.h, z17.h\n"
+ "st1h { z16.h }, p2, [x10, #3, MUL VL]\n"
+ "zip1 z17.h, z28.h, z21.h\n"
+ "zip1 z16.h, z23.h, z17.h\n"
+ "st1h { z16.h }, p2, [x10, #4, MUL VL]\n"
+ "zip2 z16.h, z23.h, z17.h\n"
+ "st1h { z16.h }, p2, [x10, #5, MUL VL]\n"
+ "add x10, x10, %x[out_stride]\n"
+ "zip2 z18.h, z28.h, z21.h\n"
+ "zip1 z17.h, z27.h, z19.h\n"
+ "zip1 z16.h, z22.h, z18.h\n"
+ "st1h { z16.h }, p2, [x10]\n"
+ "zip2 z16.h, z22.h, z18.h\n"
+ "st1h { z16.h }, p2, [x10, #1, MUL VL]\n"
+ "zip1 z16.h, z20.h, z17.h\n"
+ "st1h { z16.h }, p2, [x10, #2, MUL VL]\n"
+ "zip2 z16.h, z20.h, z17.h\n"
+ "st1h { z16.h }, p2, [x10, #3, MUL VL]\n"
+ "zip2 z18.h, z30.h, z24.h\n"
+ "zip2 z17.h, z27.h, z19.h\n"
+ "zip1 z16.h, z18.h, z17.h\n"
+ "st1h { z16.h }, p2, [x10, #4, MUL VL]\n"
+ "zip2 z16.h, z18.h, z17.h\n"
+ "st1h { z16.h }, p2, [x10, #5, MUL VL]\n"
+ "add x10, x10, %x[out_stride]\n"
"bge 8b\n"
"9:" // Tail row loop: Unroll column loop skip
- "cbz x21, 11f\n"
+ "cbz x20, 11f\n"
"10:" // Tail row loop: Column loop
- "mov x20, x21\n"
- "whilelt p1.h, XZR, x20\n"
- "ld1h { z22.h }, p1/Z, [x12]\n"
- "ld1h { z25.h }, p1/Z, [x11]\n"
- "dech x20\n"
- "whilelt p0.h, XZR, x20\n"
- "ld1h { z24.h }, p0/Z, [x12, #1, MUL VL]\n"
- "ld1h { z23.h }, p0/Z, [x11, #1, MUL VL]\n"
- "ld1h { z21.h }, p1/Z, [x10]\n"
- "ld1h { z20.h }, p0/Z, [x10, #1, MUL VL]\n"
- "decd x21, ALL, MUL #6\n"
- "addvl x12, x12, #1\n"
- "ld1h { z18.h }, p1/Z, [x9]\n"
- "ld1h { z17.h }, p0/Z, [x9, #1, MUL VL]\n"
+ "mov x19, x20\n"
+ "decd x20, ALL, MUL #6\n"
+ "whilelt p0.h, XZR, x19\n"
+ "ld1h { z17.h }, p0/Z, [x11]\n"
+ "ld1h { z25.h }, p0/Z, [x9]\n"
+ "dech x19\n"
+ "ld1h { z16.h }, p0/Z, [x28]\n"
+ "zip1 z18.h, z17.h, z16.h\n"
+ "ld1h { z24.h }, p0/Z, [x27]\n"
+ "whilelt p0.h, XZR, x19\n"
+ "zip2 z23.h, z17.h, z16.h\n"
+ "ld1h { z22.h }, p0/Z, [x11, #1, MUL VL]\n"
"addvl x11, x11, #1\n"
- "addvl x10, x10, #1\n"
- "addvl x9, x9, #1\n"
- "zip1 z19.h, z22.h, z21.h\n"
- "zip1 z16.h, z25.h, z18.h\n"
- "cmp x21, #0x0\n"
- "zip2 z22.h, z22.h, z21.h\n"
- "zip2 z18.h, z25.h, z18.h\n"
- "incd x12, ALL, MUL #4\n"
+ "zip1 z16.h, z25.h, z24.h\n"
+ "ld1h { z21.h }, p0/Z, [x9, #1, MUL VL]\n"
"incd x11, ALL, MUL #4\n"
- "zip1 z21.h, z24.h, z20.h\n"
- "zip1 z20.h, z23.h, z17.h\n"
- "incd x10, ALL, MUL #4\n"
+ "zip1 z17.h, z18.h, z16.h\n"
+ "ld1h { z20.h }, p0/Z, [x28, #1, MUL VL]\n"
+ "addvl x9, x9, #1\n"
+ "zip2 z18.h, z18.h, z16.h\n"
+ "ld1h { z19.h }, p0/Z, [x27, #1, MUL VL]\n"
"incd x9, ALL, MUL #4\n"
- "zip1 z17.h, z19.h, z16.h\n"
- "zip2 z16.h, z19.h, z16.h\n"
- "st1h { z17.h }, p2, [x22]\n"
- "zip1 z19.h, z22.h, z18.h\n"
- "zip2 z18.h, z22.h, z18.h\n"
- "st1h { z16.h }, p2, [x22, #1, MUL VL]\n"
- "zip1 z17.h, z21.h, z20.h\n"
- "zip2 z16.h, z21.h, z20.h\n"
- "st1h { z19.h }, p2, [x22, #2, MUL VL]\n"
- "st1h { z18.h }, p2, [x22, #3, MUL VL]\n"
- "st1h { z17.h }, p2, [x22, #4, MUL VL]\n"
- "st1h { z16.h }, p2, [x22, #5, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
+ "zip2 z16.h, z25.h, z24.h\n"
+ "st1h { z17.h }, p2, [x10]\n"
+ "addvl x28, x28, #1\n"
+ "zip1 z17.h, z23.h, z16.h\n"
+ "st1h { z18.h }, p2, [x10, #1, MUL VL]\n"
+ "incd x28, ALL, MUL #4\n"
+ "zip2 z16.h, z23.h, z16.h\n"
+ "st1h { z17.h }, p2, [x10, #2, MUL VL]\n"
+ "addvl x27, x27, #1\n"
+ "zip1 z18.h, z22.h, z20.h\n"
+ "st1h { z16.h }, p2, [x10, #3, MUL VL]\n"
+ "incd x27, ALL, MUL #4\n"
+ "zip1 z17.h, z21.h, z19.h\n"
+ "cmp x20, #0x0\n"
+ "zip1 z16.h, z18.h, z17.h\n"
+ "st1h { z16.h }, p2, [x10, #4, MUL VL]\n"
+ "zip2 z16.h, z18.h, z17.h\n"
+ "st1h { z16.h }, p2, [x10, #5, MUL VL]\n"
+ "add x10, x10, %x[out_stride]\n"
"bgt 10b\n"
"11:" // Tail row loop: Column loop skip
- "cmp %x[height], #0x1\n"
"addvl %x[out], %x[out], #6\n"
+ "cmp %x[height], #0x1\n"
"bge 7b\n"
"12:" // Done
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [pad_row] "r" (pad_row), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "p2", "x9", "x10", "x11", "x12", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "x9", "x10", "x11", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_6VL_2x4_fp32bf16.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_6VL_2x4_fp32bf16.hpp
index 146da33869..94ce157185 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_6VL_2x4_fp32bf16.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_6VL_2x4_fp32bf16.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#pragma once
@@ -42,79 +42,79 @@ void sve_transpose_interleave_6VL_2x4_fp32bf16(bfloat16 *out, const float *in, s
__asm__ __volatile__(
"ptrue p3.b\n"
"1:" // Main row loop: Head
- "mov x26, %x[in]\n"
- "add x25, x26, %x[in_stride]\n"
+ "mov x25, %x[in]\n"
"add x24, x25, %x[in_stride]\n"
- "mov x23, %x[width]\n"
- "cnth x20, ALL, MUL #3\n"
- "add x22, x24, %x[in_stride]\n"
+ "add x23, x24, %x[in_stride]\n"
+ "mov x22, %x[width]\n"
+ "cnth x19, ALL, MUL #3\n"
+ "add x21, x23, %x[in_stride]\n"
"cmp %x[height], #0x3\n"
- "add %x[in], x22, %x[in_stride]\n"
- "csel x22, x22, %x[pad_row], GT\n"
- "csel x24, x24, %x[pad_row], GE\n"
+ "add %x[in], x21, %x[in_stride]\n"
+ "csel x21, x21, %x[pad_row], GT\n"
+ "csel x23, x23, %x[pad_row], GE\n"
"cmp %x[height], #0x1\n"
- "csel x25, x25, %x[pad_row], GT\n"
- "cmp x23, x20\n"
- "mov x21, %x[out]\n"
+ "csel x24, x24, %x[pad_row], GT\n"
+ "cmp x22, x19\n"
+ "mov x20, %x[out]\n"
"sub %x[height], %x[height], #0x4\n"
"blt 3f\n"
"2:" // Main row loop: Unroll column loop
- "ld1w { z17.s }, p3/Z, [x26]\n"
- "ld1w { z18.s }, p3/Z, [x26, #1, MUL VL]\n"
- "sub x23, x23, x20\n"
- "cmp x23, x20\n"
- "ld1w { z19.s }, p3/Z, [x26, #2, MUL VL]\n"
- "ld1w { z16.s }, p3/Z, [x24]\n"
+ "ld1w { z17.s }, p3/Z, [x25]\n"
+ "ld1w { z18.s }, p3/Z, [x25, #1, MUL VL]\n"
+ "sub x22, x22, x19\n"
+ "cmp x22, x19\n"
+ "ld1w { z19.s }, p3/Z, [x25, #2, MUL VL]\n"
+ "ld1w { z16.s }, p3/Z, [x23]\n"
"zip1 z21.s, z17.s, z16.s\n"
"zip2 z20.s, z17.s, z16.s\n"
- "ld1w { z17.s }, p3/Z, [x24, #1, MUL VL]\n"
- "ld1w { z16.s }, p3/Z, [x24, #2, MUL VL]\n"
+ "ld1w { z17.s }, p3/Z, [x23, #1, MUL VL]\n"
+ "ld1w { z16.s }, p3/Z, [x23, #2, MUL VL]\n"
"zip1 z29.s, z18.s, z17.s\n"
"zip2 z28.s, z18.s, z17.s\n"
- "ld1w { z17.s }, p3/Z, [x26, #3, MUL VL]\n"
- "ld1w { z18.s }, p3/Z, [x26, #4, MUL VL]\n"
+ "ld1w { z17.s }, p3/Z, [x25, #3, MUL VL]\n"
+ "ld1w { z18.s }, p3/Z, [x25, #4, MUL VL]\n"
"zip1 z27.s, z19.s, z16.s\n"
"zip2 z26.s, z19.s, z16.s\n"
- "ld1w { z19.s }, p3/Z, [x26, #5, MUL VL]\n"
- "ld1w { z16.s }, p3/Z, [x24, #3, MUL VL]\n"
+ "ld1w { z19.s }, p3/Z, [x25, #5, MUL VL]\n"
+ "ld1w { z16.s }, p3/Z, [x23, #3, MUL VL]\n"
"zip1 z25.s, z17.s, z16.s\n"
"zip2 z24.s, z17.s, z16.s\n"
- "ld1w { z17.s }, p3/Z, [x24, #4, MUL VL]\n"
- "ld1w { z16.s }, p3/Z, [x24, #5, MUL VL]\n"
+ "ld1w { z17.s }, p3/Z, [x23, #4, MUL VL]\n"
+ "ld1w { z16.s }, p3/Z, [x23, #5, MUL VL]\n"
"zip1 z12.s, z18.s, z17.s\n"
"zip2 z11.s, z18.s, z17.s\n"
- "ld1w { z18.s }, p3/Z, [x25]\n"
- "ld1w { z23.s }, p3/Z, [x25, #1, MUL VL]\n"
+ "ld1w { z18.s }, p3/Z, [x24]\n"
+ "ld1w { z23.s }, p3/Z, [x24, #1, MUL VL]\n"
"zip1 z10.s, z19.s, z16.s\n"
"zip2 z9.s, z19.s, z16.s\n"
- "ld1w { z22.s }, p3/Z, [x25, #2, MUL VL]\n"
- "ld1w { z17.s }, p3/Z, [x22]\n"
+ "ld1w { z22.s }, p3/Z, [x24, #2, MUL VL]\n"
+ "ld1w { z17.s }, p3/Z, [x21]\n"
".inst 0x658aaea8 // bfcvt z8.h, p3/M, z21.s\n"
"zip1 z7.s, z18.s, z17.s\n"
- "ld1w { z16.s }, p3/Z, [x22, #1, MUL VL]\n"
- "ld1w { z21.s }, p3/Z, [x22, #2, MUL VL]\n"
+ "ld1w { z16.s }, p3/Z, [x21, #1, MUL VL]\n"
+ "ld1w { z21.s }, p3/Z, [x21, #2, MUL VL]\n"
".inst 0x658aae86 // bfcvt z6.h, p3/M, z20.s\n"
"zip2 z5.s, z18.s, z17.s\n"
- "ld1w { z20.s }, p3/Z, [x25, #3, MUL VL]\n"
- "ld1w { z19.s }, p3/Z, [x25, #4, MUL VL]\n"
+ "ld1w { z20.s }, p3/Z, [x24, #3, MUL VL]\n"
+ "ld1w { z19.s }, p3/Z, [x24, #4, MUL VL]\n"
".inst 0x658aafa4 // bfcvt z4.h, p3/M, z29.s\n"
"zip1 z3.s, z23.s, z16.s\n"
- "ld1w { z2.s }, p3/Z, [x25, #5, MUL VL]\n"
- "ld1w { z18.s }, p3/Z, [x22, #3, MUL VL]\n"
+ "ld1w { z2.s }, p3/Z, [x24, #5, MUL VL]\n"
+ "ld1w { z18.s }, p3/Z, [x21, #3, MUL VL]\n"
".inst 0x658aaf81 // bfcvt z1.h, p3/M, z28.s\n"
"zip2 z0.s, z23.s, z16.s\n"
- "ld1w { z17.s }, p3/Z, [x22, #4, MUL VL]\n"
- "ld1w { z16.s }, p3/Z, [x22, #5, MUL VL]\n"
+ "ld1w { z17.s }, p3/Z, [x21, #4, MUL VL]\n"
+ "ld1w { z16.s }, p3/Z, [x21, #5, MUL VL]\n"
".inst 0x658aaf7f // bfcvt z31.h, p3/M, z27.s\n"
"zip1 z30.s, z22.s, z21.s\n"
".inst 0x658aaf5d // bfcvt z29.h, p3/M, z26.s\n"
"zip2 z28.s, z22.s, z21.s\n"
- "addvl x26, x26, #6\n"
"addvl x25, x25, #6\n"
+ "addvl x24, x24, #6\n"
".inst 0x658aaf3b // bfcvt z27.h, p3/M, z25.s\n"
"zip1 z26.s, z20.s, z18.s\n"
- "addvl x24, x24, #6\n"
- "addvl x22, x22, #6\n"
+ "addvl x23, x23, #6\n"
+ "addvl x21, x21, #6\n"
".inst 0x658aaf19 // bfcvt z25.h, p3/M, z24.s\n"
"zip2 z24.s, z20.s, z18.s\n"
".inst 0x658aad97 // bfcvt z23.h, p3/M, z12.s\n"
@@ -127,70 +127,70 @@ void sve_transpose_interleave_6VL_2x4_fp32bf16(bfloat16 *out, const float *in, s
"zip2 z16.s, z2.s, z16.s\n"
".inst 0x648aace8 // bfcvtnt z8.h, p3/M, z7.s\n"
".inst 0x648aaca6 // bfcvtnt z6.h, p3/M, z5.s\n"
- "st1h { z8.h }, p3, [x21]\n"
+ "st1h { z8.h }, p3, [x20]\n"
".inst 0x648aac64 // bfcvtnt z4.h, p3/M, z3.s\n"
".inst 0x648aac01 // bfcvtnt z1.h, p3/M, z0.s\n"
- "st1h { z6.h }, p3, [x21, #1, MUL VL]\n"
+ "st1h { z6.h }, p3, [x20, #1, MUL VL]\n"
".inst 0x648aafdf // bfcvtnt z31.h, p3/M, z30.s\n"
".inst 0x648aaf9d // bfcvtnt z29.h, p3/M, z28.s\n"
- "st1h { z4.h }, p3, [x21, #2, MUL VL]\n"
- "st1h { z1.h }, p3, [x21, #3, MUL VL]\n"
+ "st1h { z4.h }, p3, [x20, #2, MUL VL]\n"
+ "st1h { z1.h }, p3, [x20, #3, MUL VL]\n"
".inst 0x648aaf5b // bfcvtnt z27.h, p3/M, z26.s\n"
".inst 0x648aaf19 // bfcvtnt z25.h, p3/M, z24.s\n"
- "st1h { z31.h }, p3, [x21, #4, MUL VL]\n"
+ "st1h { z31.h }, p3, [x20, #4, MUL VL]\n"
".inst 0x648aaed7 // bfcvtnt z23.h, p3/M, z22.s\n"
".inst 0x648aae95 // bfcvtnt z21.h, p3/M, z20.s\n"
- "st1h { z29.h }, p3, [x21, #5, MUL VL]\n"
- "add x21, x21, %x[out_stride]\n"
+ "st1h { z29.h }, p3, [x20, #5, MUL VL]\n"
+ "add x20, x20, %x[out_stride]\n"
".inst 0x648aae53 // bfcvtnt z19.h, p3/M, z18.s\n"
".inst 0x648aae11 // bfcvtnt z17.h, p3/M, z16.s\n"
- "st1h { z27.h }, p3, [x21]\n"
- "st1h { z25.h }, p3, [x21, #1, MUL VL]\n"
- "st1h { z23.h }, p3, [x21, #2, MUL VL]\n"
- "st1h { z21.h }, p3, [x21, #3, MUL VL]\n"
- "st1h { z19.h }, p3, [x21, #4, MUL VL]\n"
- "st1h { z17.h }, p3, [x21, #5, MUL VL]\n"
- "add x21, x21, %x[out_stride]\n"
+ "st1h { z27.h }, p3, [x20]\n"
+ "st1h { z25.h }, p3, [x20, #1, MUL VL]\n"
+ "st1h { z23.h }, p3, [x20, #2, MUL VL]\n"
+ "st1h { z21.h }, p3, [x20, #3, MUL VL]\n"
+ "st1h { z19.h }, p3, [x20, #4, MUL VL]\n"
+ "st1h { z17.h }, p3, [x20, #5, MUL VL]\n"
+ "add x20, x20, %x[out_stride]\n"
"bge 2b\n"
"3:" // Main row loop: Unroll column loop skip
- "cbz x23, 5f\n"
+ "cbz x22, 5f\n"
"4:" // Main row loop: Column loop
- "mov x20, x23\n"
- "whilelt p2.s, XZR, x20\n"
- "ld1w { z20.s }, p2/Z, [x26]\n"
- "ld1w { z19.s }, p2/Z, [x24]\n"
- "decw x20\n"
- "whilelt p1.s, XZR, x20\n"
- "ld1w { z18.s }, p1/Z, [x26, #1, MUL VL]\n"
- "ld1w { z17.s }, p1/Z, [x24, #1, MUL VL]\n"
- "decw x20\n"
- "whilelt p0.s, XZR, x20\n"
- "ld1w { z25.s }, p0/Z, [x26, #2, MUL VL]\n"
- "ld1w { z16.s }, p0/Z, [x24, #2, MUL VL]\n"
- "ld1w { z24.s }, p2/Z, [x25]\n"
- "ld1w { z30.s }, p1/Z, [x25, #1, MUL VL]\n"
+ "mov x19, x22\n"
+ "whilelt p2.s, XZR, x19\n"
+ "ld1w { z20.s }, p2/Z, [x25]\n"
+ "ld1w { z19.s }, p2/Z, [x23]\n"
+ "decw x19\n"
+ "whilelt p1.s, XZR, x19\n"
+ "ld1w { z18.s }, p1/Z, [x25, #1, MUL VL]\n"
+ "ld1w { z17.s }, p1/Z, [x23, #1, MUL VL]\n"
+ "decw x19\n"
+ "whilelt p0.s, XZR, x19\n"
+ "ld1w { z25.s }, p0/Z, [x25, #2, MUL VL]\n"
+ "ld1w { z16.s }, p0/Z, [x23, #2, MUL VL]\n"
+ "ld1w { z24.s }, p2/Z, [x24]\n"
+ "ld1w { z30.s }, p1/Z, [x24, #1, MUL VL]\n"
"zip1 z23.s, z20.s, z19.s\n"
"zip2 z22.s, z20.s, z19.s\n"
- "ld1w { z29.s }, p0/Z, [x25, #2, MUL VL]\n"
- "ld1w { z21.s }, p2/Z, [x22]\n"
+ "ld1w { z29.s }, p0/Z, [x24, #2, MUL VL]\n"
+ "ld1w { z21.s }, p2/Z, [x21]\n"
"zip1 z20.s, z18.s, z17.s\n"
"zip2 z19.s, z18.s, z17.s\n"
- "ld1w { z18.s }, p1/Z, [x22, #1, MUL VL]\n"
- "ld1w { z28.s }, p0/Z, [x22, #2, MUL VL]\n"
+ "ld1w { z18.s }, p1/Z, [x21, #1, MUL VL]\n"
+ "ld1w { z28.s }, p0/Z, [x21, #2, MUL VL]\n"
"zip1 z17.s, z25.s, z16.s\n"
"zip2 z16.s, z25.s, z16.s\n"
- "decd x23, ALL, MUL #6\n"
+ "decd x22, ALL, MUL #6\n"
".inst 0x658aaefb // bfcvt z27.h, p3/M, z23.s\n"
"zip1 z26.s, z24.s, z21.s\n"
- "cmp x23, #0x0\n"
+ "cmp x22, #0x0\n"
".inst 0x658aaed9 // bfcvt z25.h, p3/M, z22.s\n"
"zip2 z24.s, z24.s, z21.s\n"
- "addvl x26, x26, #3\n"
"addvl x25, x25, #3\n"
+ "addvl x24, x24, #3\n"
".inst 0x658aae97 // bfcvt z23.h, p3/M, z20.s\n"
"zip1 z22.s, z30.s, z18.s\n"
- "addvl x24, x24, #3\n"
- "addvl x22, x22, #3\n"
+ "addvl x23, x23, #3\n"
+ "addvl x21, x21, #3\n"
".inst 0x658aae75 // bfcvt z21.h, p3/M, z19.s\n"
"zip2 z20.s, z30.s, z18.s\n"
".inst 0x658aae33 // bfcvt z19.h, p3/M, z17.s\n"
@@ -199,17 +199,17 @@ void sve_transpose_interleave_6VL_2x4_fp32bf16(bfloat16 *out, const float *in, s
"zip2 z16.s, z29.s, z28.s\n"
".inst 0x648aaf5b // bfcvtnt z27.h, p3/M, z26.s\n"
".inst 0x648aaf19 // bfcvtnt z25.h, p3/M, z24.s\n"
- "st1h { z27.h }, p3, [x21]\n"
+ "st1h { z27.h }, p3, [x20]\n"
".inst 0x648aaed7 // bfcvtnt z23.h, p3/M, z22.s\n"
".inst 0x648aae95 // bfcvtnt z21.h, p3/M, z20.s\n"
- "st1h { z25.h }, p3, [x21, #1, MUL VL]\n"
+ "st1h { z25.h }, p3, [x20, #1, MUL VL]\n"
".inst 0x648aae53 // bfcvtnt z19.h, p3/M, z18.s\n"
".inst 0x648aae11 // bfcvtnt z17.h, p3/M, z16.s\n"
- "st1h { z23.h }, p3, [x21, #2, MUL VL]\n"
- "st1h { z21.h }, p3, [x21, #3, MUL VL]\n"
- "st1h { z19.h }, p3, [x21, #4, MUL VL]\n"
- "st1h { z17.h }, p3, [x21, #5, MUL VL]\n"
- "add x21, x21, %x[out_stride]\n"
+ "st1h { z23.h }, p3, [x20, #2, MUL VL]\n"
+ "st1h { z21.h }, p3, [x20, #3, MUL VL]\n"
+ "st1h { z19.h }, p3, [x20, #4, MUL VL]\n"
+ "st1h { z17.h }, p3, [x20, #5, MUL VL]\n"
+ "add x20, x20, %x[out_stride]\n"
"bgt 4b\n"
"5:" // Main row loop: Column loop skip
"cmp %x[height], #0x1\n"
@@ -217,7 +217,7 @@ void sve_transpose_interleave_6VL_2x4_fp32bf16(bfloat16 *out, const float *in, s
"bge 1b\n"
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [pad_row] "r" (pad_row), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "p2", "p3", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_6VL_4x2.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_6VL_4x2.hpp
index f6fc5e8b84..46b160b071 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_6VL_4x2.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_6VL_4x2.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#pragma once
@@ -40,266 +40,267 @@ void sve_transpose_interleave_6VL_4x2(uint32_t *out, const uint32_t *in, size_t
size_t out_stride = 6 * roundup<size_t>(height, 2) * get_vector_length<uint16_t>();
__asm__ __volatile__(
+ "ptrue p2.b\n"
"cmp %x[height], #0x4\n"
- "ptrue p3.b\n"
"blt 6f\n"
"1:" // Main row loop: Head
- "mov x28, %x[in]\n"
- "mov x27, %x[width]\n"
- "cntw x26, ALL, MUL #6\n"
- "add x25, x28, %x[in_stride]\n"
+ "mov x27, %x[in]\n"
+ "mov x26, %x[out]\n"
+ "add x25, x27, %x[in_stride]\n"
"add x24, x25, %x[in_stride]\n"
"add x23, x24, %x[in_stride]\n"
- "cmp x27, x26\n"
"add %x[in], x23, %x[in_stride]\n"
- "mov x22, %x[out]\n"
"sub %x[height], %x[height], #0x4\n"
+ "mov x22, %x[width]\n"
+ "cntw x21, ALL, MUL #6\n"
+ "cmp x22, x21\n"
"blt 3f\n"
"2:" // Main row loop: Unroll column loop
- "ld1w { z18.s }, p3/Z, [x28]\n"
- "ld1w { z17.s }, p3/Z, [x28, #1, MUL VL]\n"
- "mov x21, x22\n"
- "add x22, x22, %x[out_stride]\n"
- "ld1w { z19.s }, p3/Z, [x28, #2, MUL VL]\n"
- "ld1w { z16.s }, p3/Z, [x25]\n"
- "zip1 z9.s, z18.s, z16.s\n"
- "zip2 z8.s, z18.s, z16.s\n"
- "ld1w { z16.s }, p3/Z, [x25, #1, MUL VL]\n"
- "ld1w { z18.s }, p3/Z, [x25, #2, MUL VL]\n"
- "zip1 z7.s, z17.s, z16.s\n"
- "zip2 z6.s, z17.s, z16.s\n"
- "ld1w { z17.s }, p3/Z, [x24]\n"
- "ld1w { z16.s }, p3/Z, [x23]\n"
- "zip1 z5.s, z19.s, z18.s\n"
- "zip2 z4.s, z19.s, z18.s\n"
- "ld1w { z18.s }, p3/Z, [x28, #3, MUL VL]\n"
- "ld1w { z21.s }, p3/Z, [x28, #4, MUL VL]\n"
- "zip1 z3.s, z17.s, z16.s\n"
- "zip2 z2.s, z17.s, z16.s\n"
- "ld1w { z20.s }, p3/Z, [x28, #5, MUL VL]\n"
- "ld1w { z17.s }, p3/Z, [x25, #3, MUL VL]\n"
- "mov x20, x22\n"
- "zip1 z1.s, z18.s, z17.s\n"
- "ld1w { z19.s }, p3/Z, [x25, #4, MUL VL]\n"
- "ld1w { z16.s }, p3/Z, [x25, #5, MUL VL]\n"
- "zip2 z0.s, z18.s, z17.s\n"
- "zip1 z31.s, z21.s, z19.s\n"
- "ld1w { z18.s }, p3/Z, [x24, #1, MUL VL]\n"
- "ld1w { z17.s }, p3/Z, [x24, #2, MUL VL]\n"
- "zip2 z30.s, z21.s, z19.s\n"
- "zip1 z29.s, z20.s, z16.s\n"
- "ld1w { z19.s }, p3/Z, [x24, #3, MUL VL]\n"
- "ld1w { z28.s }, p3/Z, [x24, #4, MUL VL]\n"
- "zip2 z27.s, z20.s, z16.s\n"
- "sub x27, x27, x26\n"
- "ld1w { z26.s }, p3/Z, [x24, #5, MUL VL]\n"
- "ld1w { z16.s }, p3/Z, [x23, #1, MUL VL]\n"
- "zip1 z25.s, z18.s, z16.s\n"
- "zip2 z24.s, z18.s, z16.s\n"
- "ld1w { z16.s }, p3/Z, [x23, #2, MUL VL]\n"
- "ld1w { z18.s }, p3/Z, [x23, #3, MUL VL]\n"
- "zip1 z23.s, z17.s, z16.s\n"
- "zip2 z22.s, z17.s, z16.s\n"
- "ld1w { z17.s }, p3/Z, [x23, #4, MUL VL]\n"
- "ld1w { z16.s }, p3/Z, [x23, #5, MUL VL]\n"
- "st1w { z9.s }, p3, [x21]\n"
- "zip1 z21.s, z19.s, z18.s\n"
- "st1w { z8.s }, p3, [x21, #1, MUL VL]\n"
- "zip2 z20.s, z19.s, z18.s\n"
- "cmp x27, x26\n"
- "addvl x28, x28, #6\n"
- "st1w { z7.s }, p3, [x21, #2, MUL VL]\n"
+ "ld1w { z19.s }, p2/Z, [x27]\n"
+ "mov x20, x26\n"
+ "ld1w { z18.s }, p2/Z, [x27, #1, MUL VL]\n"
+ "add x26, x26, %x[out_stride]\n"
+ "ld1w { z21.s }, p2/Z, [x27, #2, MUL VL]\n"
+ "mov x19, x26\n"
+ "ld1w { z26.s }, p2/Z, [x27, #3, MUL VL]\n"
+ "add x26, x26, %x[out_stride]\n"
+ "ld1w { z25.s }, p2/Z, [x27, #4, MUL VL]\n"
+ "sub x22, x22, x21\n"
+ "ld1w { z24.s }, p2/Z, [x27, #5, MUL VL]\n"
+ "addvl x27, x27, #6\n"
+ "ld1w { z16.s }, p2/Z, [x25]\n"
+ "zip1 z23.s, z19.s, z16.s\n"
+ "ld1w { z17.s }, p2/Z, [x25, #1, MUL VL]\n"
+ "cmp x22, x21\n"
+ "zip2 z9.s, z19.s, z16.s\n"
+ "ld1w { z20.s }, p2/Z, [x25, #2, MUL VL]\n"
+ "ld1w { z19.s }, p2/Z, [x25, #3, MUL VL]\n"
+ "zip1 z8.s, z18.s, z17.s\n"
+ "ld1w { z16.s }, p2/Z, [x25, #4, MUL VL]\n"
+ "zip2 z7.s, z18.s, z17.s\n"
+ "ld1w { z18.s }, p2/Z, [x25, #5, MUL VL]\n"
"addvl x25, x25, #6\n"
+ "zip1 z6.s, z21.s, z20.s\n"
+ "ld1w { z17.s }, p2/Z, [x24]\n"
+ "zip2 z5.s, z21.s, z20.s\n"
+ "ld1w { z22.s }, p2/Z, [x24, #1, MUL VL]\n"
+ "zip1 z4.s, z26.s, z19.s\n"
+ "ld1w { z21.s }, p2/Z, [x24, #2, MUL VL]\n"
+ "zip2 z3.s, z26.s, z19.s\n"
+ "ld1w { z2.s }, p2/Z, [x24, #3, MUL VL]\n"
+ "zip1 z1.s, z25.s, z16.s\n"
+ "ld1w { z0.s }, p2/Z, [x24, #4, MUL VL]\n"
+ "zip2 z31.s, z25.s, z16.s\n"
+ "ld1w { z30.s }, p2/Z, [x24, #5, MUL VL]\n"
"addvl x24, x24, #6\n"
- "zip1 z19.s, z28.s, z17.s\n"
- "st1w { z6.s }, p3, [x21, #3, MUL VL]\n"
+ "zip1 z29.s, z24.s, z18.s\n"
+ "ld1w { z16.s }, p2/Z, [x23]\n"
+ "zip2 z28.s, z24.s, z18.s\n"
+ "ld1w { z20.s }, p2/Z, [x23, #1, MUL VL]\n"
+ "ld1w { z19.s }, p2/Z, [x23, #2, MUL VL]\n"
+ "zip1 z27.s, z17.s, z16.s\n"
+ "ld1w { z18.s }, p2/Z, [x23, #3, MUL VL]\n"
+ "zip2 z26.s, z17.s, z16.s\n"
+ "ld1w { z17.s }, p2/Z, [x23, #4, MUL VL]\n"
+ "zip1 z25.s, z22.s, z20.s\n"
+ "ld1w { z16.s }, p2/Z, [x23, #5, MUL VL]\n"
"addvl x23, x23, #6\n"
- "zip2 z18.s, z28.s, z17.s\n"
- "zip1 z17.s, z26.s, z16.s\n"
- "st1w { z5.s }, p3, [x21, #4, MUL VL]\n"
- "zip2 z16.s, z26.s, z16.s\n"
- "add x22, x22, %x[out_stride]\n"
- "st1w { z4.s }, p3, [x21, #5, MUL VL]\n"
- "st1w { z3.s }, p3, [x21, #6, MUL VL]\n"
- "st1w { z2.s }, p3, [x21, #7, MUL VL]\n"
- "addvl x21, x21, #12\n"
- "st1w { z25.s }, p3, [x21, #-4, MUL VL]\n"
- "st1w { z24.s }, p3, [x21, #-3, MUL VL]\n"
- "st1w { z23.s }, p3, [x21, #-2, MUL VL]\n"
- "st1w { z22.s }, p3, [x21, #-1, MUL VL]\n"
- "st1w { z1.s }, p3, [x20]\n"
- "st1w { z0.s }, p3, [x20, #1, MUL VL]\n"
- "st1w { z31.s }, p3, [x20, #2, MUL VL]\n"
- "st1w { z30.s }, p3, [x20, #3, MUL VL]\n"
- "st1w { z29.s }, p3, [x20, #4, MUL VL]\n"
- "st1w { z27.s }, p3, [x20, #5, MUL VL]\n"
- "st1w { z21.s }, p3, [x20, #6, MUL VL]\n"
- "st1w { z20.s }, p3, [x20, #7, MUL VL]\n"
+ "zip2 z24.s, z22.s, z20.s\n"
+ "st1w { z23.s }, p2, [x20]\n"
+ "zip1 z23.s, z21.s, z19.s\n"
+ "st1w { z9.s }, p2, [x20, #1, MUL VL]\n"
+ "zip2 z22.s, z21.s, z19.s\n"
+ "st1w { z8.s }, p2, [x20, #2, MUL VL]\n"
+ "zip1 z21.s, z2.s, z18.s\n"
+ "st1w { z7.s }, p2, [x20, #3, MUL VL]\n"
+ "zip2 z20.s, z2.s, z18.s\n"
+ "st1w { z6.s }, p2, [x20, #4, MUL VL]\n"
+ "zip1 z19.s, z0.s, z17.s\n"
+ "st1w { z5.s }, p2, [x20, #5, MUL VL]\n"
+ "zip2 z18.s, z0.s, z17.s\n"
+ "st1w { z27.s }, p2, [x20, #6, MUL VL]\n"
+ "zip1 z17.s, z30.s, z16.s\n"
+ "st1w { z26.s }, p2, [x20, #7, MUL VL]\n"
"addvl x20, x20, #12\n"
- "st1w { z19.s }, p3, [x20, #-4, MUL VL]\n"
- "st1w { z18.s }, p3, [x20, #-3, MUL VL]\n"
- "st1w { z17.s }, p3, [x20, #-2, MUL VL]\n"
- "st1w { z16.s }, p3, [x20, #-1, MUL VL]\n"
+ "zip2 z16.s, z30.s, z16.s\n"
+ "st1w { z25.s }, p2, [x20, #-4, MUL VL]\n"
+ "st1w { z24.s }, p2, [x20, #-3, MUL VL]\n"
+ "st1w { z23.s }, p2, [x20, #-2, MUL VL]\n"
+ "st1w { z22.s }, p2, [x20, #-1, MUL VL]\n"
+ "st1w { z4.s }, p2, [x19]\n"
+ "st1w { z3.s }, p2, [x19, #1, MUL VL]\n"
+ "st1w { z1.s }, p2, [x19, #2, MUL VL]\n"
+ "st1w { z31.s }, p2, [x19, #3, MUL VL]\n"
+ "st1w { z29.s }, p2, [x19, #4, MUL VL]\n"
+ "st1w { z28.s }, p2, [x19, #5, MUL VL]\n"
+ "st1w { z21.s }, p2, [x19, #6, MUL VL]\n"
+ "st1w { z20.s }, p2, [x19, #7, MUL VL]\n"
+ "addvl x19, x19, #12\n"
+ "st1w { z19.s }, p2, [x19, #-4, MUL VL]\n"
+ "st1w { z18.s }, p2, [x19, #-3, MUL VL]\n"
+ "st1w { z17.s }, p2, [x19, #-2, MUL VL]\n"
+ "st1w { z16.s }, p2, [x19, #-1, MUL VL]\n"
"bge 2b\n"
"3:" // Main row loop: Unroll column loop skip
- "cbz x27, 5f\n"
+ "cbz x22, 5f\n"
"4:" // Main row loop: Column loop
- "mov x20, x27\n"
- "whilelt p2.s, XZR, x20\n"
- "ld1w { z19.s }, p2/Z, [x28]\n"
- "ld1w { z18.s }, p2/Z, [x25]\n"
+ "mov x20, x22\n"
+ "mov x19, x26\n"
+ "whilelt p0.s, XZR, x20\n"
+ "ld1w { z18.s }, p0/Z, [x27]\n"
+ "ld1w { z16.s }, p0/Z, [x25]\n"
+ "zip1 z28.s, z18.s, z16.s\n"
+ "ld1w { z17.s }, p0/Z, [x24]\n"
"decw x20\n"
+ "zip2 z27.s, z18.s, z16.s\n"
+ "ld1w { z16.s }, p0/Z, [x23]\n"
"whilelt p1.s, XZR, x20\n"
- "ld1w { z17.s }, p1/Z, [x28, #1, MUL VL]\n"
- "ld1w { z16.s }, p1/Z, [x25, #1, MUL VL]\n"
+ "zip1 z26.s, z17.s, z16.s\n"
+ "ld1w { z18.s }, p1/Z, [x27, #1, MUL VL]\n"
"decw x20\n"
+ "zip2 z25.s, z17.s, z16.s\n"
+ "ld1w { z16.s }, p1/Z, [x25, #1, MUL VL]\n"
"whilelt p0.s, XZR, x20\n"
- "ld1w { z22.s }, p0/Z, [x28, #2, MUL VL]\n"
- "ld1w { z21.s }, p0/Z, [x25, #2, MUL VL]\n"
- "ld1w { z28.s }, p2/Z, [x24]\n"
- "ld1w { z27.s }, p2/Z, [x23]\n"
- "mov x20, x22\n"
- "decd x27, ALL, MUL #6\n"
- "ld1w { z26.s }, p1/Z, [x24, #1, MUL VL]\n"
- "ld1w { z25.s }, p0/Z, [x24, #2, MUL VL]\n"
- "zip1 z20.s, z19.s, z18.s\n"
- "zip2 z19.s, z19.s, z18.s\n"
- "ld1w { z24.s }, p1/Z, [x23, #1, MUL VL]\n"
- "ld1w { z23.s }, p0/Z, [x23, #2, MUL VL]\n"
- "zip1 z18.s, z17.s, z16.s\n"
- "zip2 z17.s, z17.s, z16.s\n"
- "zip1 z16.s, z22.s, z21.s\n"
- "zip2 z22.s, z22.s, z21.s\n"
- "st1w { z20.s }, p3, [x20]\n"
- "cmp x27, #0x0\n"
- "zip1 z21.s, z28.s, z27.s\n"
- "zip2 z20.s, z28.s, z27.s\n"
- "st1w { z19.s }, p3, [x20, #1, MUL VL]\n"
- "addvl x28, x28, #3\n"
- "st1w { z18.s }, p3, [x20, #2, MUL VL]\n"
+ "zip1 z24.s, z18.s, z16.s\n"
+ "ld1w { z17.s }, p0/Z, [x27, #2, MUL VL]\n"
+ "addvl x27, x27, #3\n"
+ "zip2 z23.s, z18.s, z16.s\n"
+ "ld1w { z16.s }, p0/Z, [x25, #2, MUL VL]\n"
"addvl x25, x25, #3\n"
+ "zip1 z22.s, z17.s, z16.s\n"
+ "ld1w { z18.s }, p1/Z, [x24, #1, MUL VL]\n"
+ "add x26, x26, %x[out_stride]\n"
+ "zip2 z21.s, z17.s, z16.s\n"
+ "ld1w { z20.s }, p0/Z, [x24, #2, MUL VL]\n"
"addvl x24, x24, #3\n"
- "zip1 z19.s, z26.s, z24.s\n"
- "st1w { z17.s }, p3, [x20, #3, MUL VL]\n"
+ "ld1w { z17.s }, p1/Z, [x23, #1, MUL VL]\n"
+ "zip1 z19.s, z18.s, z17.s\n"
+ "ld1w { z16.s }, p0/Z, [x23, #2, MUL VL]\n"
"addvl x23, x23, #3\n"
- "zip2 z18.s, z26.s, z24.s\n"
- "zip1 z17.s, z25.s, z23.s\n"
- "st1w { z16.s }, p3, [x20, #4, MUL VL]\n"
- "zip2 z16.s, z25.s, z23.s\n"
- "add x22, x22, %x[out_stride]\n"
- "st1w { z22.s }, p3, [x20, #5, MUL VL]\n"
- "st1w { z21.s }, p3, [x20, #6, MUL VL]\n"
- "st1w { z20.s }, p3, [x20, #7, MUL VL]\n"
- "addvl x20, x20, #12\n"
- "st1w { z19.s }, p3, [x20, #-4, MUL VL]\n"
- "st1w { z18.s }, p3, [x20, #-3, MUL VL]\n"
- "st1w { z17.s }, p3, [x20, #-2, MUL VL]\n"
- "st1w { z16.s }, p3, [x20, #-1, MUL VL]\n"
+ "zip2 z18.s, z18.s, z17.s\n"
+ "st1w { z28.s }, p2, [x19]\n"
+ "decd x22, ALL, MUL #6\n"
+ "zip1 z17.s, z20.s, z16.s\n"
+ "st1w { z27.s }, p2, [x19, #1, MUL VL]\n"
+ "cmp x22, #0x0\n"
+ "zip2 z16.s, z20.s, z16.s\n"
+ "st1w { z24.s }, p2, [x19, #2, MUL VL]\n"
+ "st1w { z23.s }, p2, [x19, #3, MUL VL]\n"
+ "st1w { z22.s }, p2, [x19, #4, MUL VL]\n"
+ "st1w { z21.s }, p2, [x19, #5, MUL VL]\n"
+ "st1w { z26.s }, p2, [x19, #6, MUL VL]\n"
+ "st1w { z25.s }, p2, [x19, #7, MUL VL]\n"
+ "addvl x19, x19, #12\n"
+ "st1w { z19.s }, p2, [x19, #-4, MUL VL]\n"
+ "st1w { z18.s }, p2, [x19, #-3, MUL VL]\n"
+ "st1w { z17.s }, p2, [x19, #-2, MUL VL]\n"
+ "st1w { z16.s }, p2, [x19, #-1, MUL VL]\n"
"bgt 4b\n"
"5:" // Main row loop: Column loop skip
- "cmp %x[height], #0x4\n"
"addvl %x[out], %x[out], #12\n"
+ "cmp %x[height], #0x4\n"
"bge 1b\n"
"cbz %x[height], 12f\n"
"6:" // Main loop skip
+
"7:" // Tail row loop: Head
- "mov x28, %x[in]\n"
- "mov x21, %x[width]\n"
- "cntw x20, ALL, MUL #6\n"
- "add x25, x28, %x[in_stride]\n"
- "cmp %x[height], #0x1\n"
+ "mov x27, %x[in]\n"
+ "mov x26, %x[out]\n"
+ "add x25, x27, %x[in_stride]\n"
"add %x[in], x25, %x[in_stride]\n"
+ "cmp %x[height], #0x1\n"
"csel x25, x25, %x[pad_row], GT\n"
- "cmp x21, x20\n"
- "mov x22, %x[out]\n"
"sub %x[height], %x[height], #0x2\n"
+ "mov x20, %x[width]\n"
+ "cntw x19, ALL, MUL #6\n"
+ "cmp x20, x19\n"
"blt 9f\n"
"8:" // Tail row loop: Unroll column loop
- "ld1w { z17.s }, p3/Z, [x28]\n"
- "ld1w { z19.s }, p3/Z, [x28, #1, MUL VL]\n"
- "sub x21, x21, x20\n"
- "cmp x21, x20\n"
- "ld1w { z18.s }, p3/Z, [x28, #2, MUL VL]\n"
- "ld1w { z16.s }, p3/Z, [x25]\n"
- "zip1 z28.s, z17.s, z16.s\n"
- "zip2 z20.s, z17.s, z16.s\n"
- "ld1w { z17.s }, p3/Z, [x25, #1, MUL VL]\n"
- "ld1w { z16.s }, p3/Z, [x25, #2, MUL VL]\n"
- "zip1 z27.s, z19.s, z17.s\n"
- "zip2 z26.s, z19.s, z17.s\n"
- "ld1w { z19.s }, p3/Z, [x28, #3, MUL VL]\n"
- "ld1w { z25.s }, p3/Z, [x28, #4, MUL VL]\n"
- "zip1 z24.s, z18.s, z16.s\n"
- "zip2 z23.s, z18.s, z16.s\n"
- "ld1w { z22.s }, p3/Z, [x28, #5, MUL VL]\n"
- "ld1w { z18.s }, p3/Z, [x25, #3, MUL VL]\n"
- "addvl x28, x28, #6\n"
- "zip1 z21.s, z19.s, z18.s\n"
- "ld1w { z17.s }, p3/Z, [x25, #4, MUL VL]\n"
- "ld1w { z16.s }, p3/Z, [x25, #5, MUL VL]\n"
- "st1w { z28.s }, p3, [x22]\n"
+ "ld1w { z19.s }, p2/Z, [x27]\n"
+ "sub x20, x20, x19\n"
+ "ld1w { z18.s }, p2/Z, [x27, #1, MUL VL]\n"
+ "cmp x20, x19\n"
+ "ld1w { z29.s }, p2/Z, [x27, #2, MUL VL]\n"
+ "ld1w { z28.s }, p2/Z, [x27, #3, MUL VL]\n"
+ "ld1w { z27.s }, p2/Z, [x27, #4, MUL VL]\n"
+ "ld1w { z26.s }, p2/Z, [x27, #5, MUL VL]\n"
+ "addvl x27, x27, #6\n"
+ "ld1w { z16.s }, p2/Z, [x25]\n"
+ "zip1 z25.s, z19.s, z16.s\n"
+ "ld1w { z17.s }, p2/Z, [x25, #1, MUL VL]\n"
+ "zip2 z24.s, z19.s, z16.s\n"
+ "ld1w { z16.s }, p2/Z, [x25, #2, MUL VL]\n"
+ "ld1w { z23.s }, p2/Z, [x25, #3, MUL VL]\n"
+ "zip1 z20.s, z18.s, z17.s\n"
+ "ld1w { z22.s }, p2/Z, [x25, #4, MUL VL]\n"
+ "zip2 z19.s, z18.s, z17.s\n"
+ "ld1w { z21.s }, p2/Z, [x25, #5, MUL VL]\n"
"addvl x25, x25, #6\n"
- "st1w { z20.s }, p3, [x22, #1, MUL VL]\n"
- "zip2 z20.s, z19.s, z18.s\n"
- "zip1 z19.s, z25.s, z17.s\n"
- "st1w { z27.s }, p3, [x22, #2, MUL VL]\n"
- "zip2 z18.s, z25.s, z17.s\n"
- "zip1 z17.s, z22.s, z16.s\n"
- "st1w { z26.s }, p3, [x22, #3, MUL VL]\n"
- "zip2 z16.s, z22.s, z16.s\n"
- "st1w { z24.s }, p3, [x22, #4, MUL VL]\n"
- "st1w { z23.s }, p3, [x22, #5, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
- "st1w { z21.s }, p3, [x22]\n"
- "st1w { z20.s }, p3, [x22, #1, MUL VL]\n"
- "st1w { z19.s }, p3, [x22, #2, MUL VL]\n"
- "st1w { z18.s }, p3, [x22, #3, MUL VL]\n"
- "st1w { z17.s }, p3, [x22, #4, MUL VL]\n"
- "st1w { z16.s }, p3, [x22, #5, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
+ "zip1 z18.s, z29.s, z16.s\n"
+ "st1w { z25.s }, p2, [x26]\n"
+ "zip2 z17.s, z29.s, z16.s\n"
+ "st1w { z24.s }, p2, [x26, #1, MUL VL]\n"
+ "zip1 z16.s, z28.s, z23.s\n"
+ "st1w { z20.s }, p2, [x26, #2, MUL VL]\n"
+ "zip2 z20.s, z28.s, z23.s\n"
+ "st1w { z19.s }, p2, [x26, #3, MUL VL]\n"
+ "zip1 z19.s, z27.s, z22.s\n"
+ "st1w { z18.s }, p2, [x26, #4, MUL VL]\n"
+ "zip2 z18.s, z27.s, z22.s\n"
+ "st1w { z17.s }, p2, [x26, #5, MUL VL]\n"
+ "add x26, x26, %x[out_stride]\n"
+ "zip1 z17.s, z26.s, z21.s\n"
+ "st1w { z16.s }, p2, [x26]\n"
+ "zip2 z16.s, z26.s, z21.s\n"
+ "st1w { z20.s }, p2, [x26, #1, MUL VL]\n"
+ "st1w { z19.s }, p2, [x26, #2, MUL VL]\n"
+ "st1w { z18.s }, p2, [x26, #3, MUL VL]\n"
+ "st1w { z17.s }, p2, [x26, #4, MUL VL]\n"
+ "st1w { z16.s }, p2, [x26, #5, MUL VL]\n"
+ "add x26, x26, %x[out_stride]\n"
"bge 8b\n"
"9:" // Tail row loop: Unroll column loop skip
- "cbz x21, 11f\n"
+ "cbz x20, 11f\n"
"10:" // Tail row loop: Column loop
- "mov x20, x21\n"
- "whilelt p0.s, XZR, x20\n"
- "ld1w { z20.s }, p0/Z, [x28]\n"
- "ld1w { z19.s }, p0/Z, [x25]\n"
- "decw x20\n"
- "whilelt p0.s, XZR, x20\n"
- "ld1w { z18.s }, p0/Z, [x28, #1, MUL VL]\n"
- "ld1w { z17.s }, p0/Z, [x25, #1, MUL VL]\n"
- "decw x20\n"
- "whilelt p0.s, XZR, x20\n"
- "ld1w { z22.s }, p0/Z, [x28, #2, MUL VL]\n"
+ "mov x19, x20\n"
+ "decd x20, ALL, MUL #6\n"
+ "whilelt p0.s, XZR, x19\n"
+ "ld1w { z17.s }, p0/Z, [x27]\n"
+ "ld1w { z16.s }, p0/Z, [x25]\n"
+ "zip1 z22.s, z17.s, z16.s\n"
+ "decw x19\n"
+ "zip2 z21.s, z17.s, z16.s\n"
+ "whilelt p0.s, XZR, x19\n"
+ "ld1w { z17.s }, p0/Z, [x27, #1, MUL VL]\n"
+ "decw x19\n"
+ "ld1w { z16.s }, p0/Z, [x25, #1, MUL VL]\n"
+ "zip1 z20.s, z17.s, z16.s\n"
+ "whilelt p0.s, XZR, x19\n"
+ "ld1w { z19.s }, p0/Z, [x27, #2, MUL VL]\n"
+ "zip2 z18.s, z17.s, z16.s\n"
+ "addvl x27, x27, #3\n"
"ld1w { z16.s }, p0/Z, [x25, #2, MUL VL]\n"
- "decd x21, ALL, MUL #6\n"
- "cmp x21, #0x0\n"
- "zip1 z21.s, z20.s, z19.s\n"
- "zip2 z20.s, z20.s, z19.s\n"
- "addvl x28, x28, #3\n"
+ "zip1 z17.s, z19.s, z16.s\n"
+ "st1w { z22.s }, p2, [x26]\n"
"addvl x25, x25, #3\n"
- "zip1 z19.s, z18.s, z17.s\n"
- "zip2 z18.s, z18.s, z17.s\n"
- "zip1 z17.s, z22.s, z16.s\n"
- "zip2 z16.s, z22.s, z16.s\n"
- "st1w { z21.s }, p3, [x22]\n"
- "st1w { z20.s }, p3, [x22, #1, MUL VL]\n"
- "st1w { z19.s }, p3, [x22, #2, MUL VL]\n"
- "st1w { z18.s }, p3, [x22, #3, MUL VL]\n"
- "st1w { z17.s }, p3, [x22, #4, MUL VL]\n"
- "st1w { z16.s }, p3, [x22, #5, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
+ "zip2 z16.s, z19.s, z16.s\n"
+ "st1w { z21.s }, p2, [x26, #1, MUL VL]\n"
+ "cmp x20, #0x0\n"
+ "st1w { z20.s }, p2, [x26, #2, MUL VL]\n"
+ "st1w { z18.s }, p2, [x26, #3, MUL VL]\n"
+ "st1w { z17.s }, p2, [x26, #4, MUL VL]\n"
+ "st1w { z16.s }, p2, [x26, #5, MUL VL]\n"
+ "add x26, x26, %x[out_stride]\n"
"bgt 10b\n"
"11:" // Tail row loop: Column loop skip
- "cmp %x[height], #0x1\n"
"addvl %x[out], %x[out], #6\n"
+ "cmp %x[height], #0x1\n"
"bge 7b\n"
"12:" // Done
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [pad_row] "r" (pad_row), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "p2", "p3", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_8VL.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_8VL.hpp
index 07147acd8e..56b7ed6eda 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_8VL.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_8VL.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#pragma once
@@ -34,52 +34,52 @@ void sve_transpose_interleave_8VL(uint32_t *out, const uint32_t *in, size_t widt
size_t out_stride = 8 * height * get_vector_length<uint8_t>();
__asm__ __volatile__(
- "cmp %x[height], #0x2\n"
"ptrue p1.b\n"
+ "cmp %x[height], #0x2\n"
"blt 6f\n"
"1:" // Main row loop: Head
- "mov x26, %x[in]\n"
- "mov x25, %x[width]\n"
- "cntw x24, ALL, MUL #16\n"
- "add x23, x26, %x[in_stride]\n"
- "cmp x25, x24\n"
+ "mov x25, %x[in]\n"
+ "mov x24, %x[out]\n"
+ "add x23, x25, %x[in_stride]\n"
"add %x[in], x23, %x[in_stride]\n"
- "mov x22, %x[out]\n"
"sub %x[height], %x[height], #0x2\n"
+ "mov x22, %x[width]\n"
+ "cntw x21, ALL, MUL #16\n"
+ "cmp x22, x21\n"
"blt 3f\n"
"2:" // Main row loop: Unroll column loop
- "ld1w { z15.s }, p1/Z, [x26]\n"
- "ld1w { z14.s }, p1/Z, [x26, #1, MUL VL]\n"
- "mov x21, x22\n"
- "add x22, x22, %x[out_stride]\n"
- "ld1w { z13.s }, p1/Z, [x26, #2, MUL VL]\n"
- "ld1w { z12.s }, p1/Z, [x26, #3, MUL VL]\n"
- "mov x20, x22\n"
- "sub x25, x25, x24\n"
- "ld1w { z11.s }, p1/Z, [x26, #4, MUL VL]\n"
- "ld1w { z10.s }, p1/Z, [x26, #5, MUL VL]\n"
- "cmp x25, x24\n"
- "add x22, x22, %x[out_stride]\n"
- "ld1w { z9.s }, p1/Z, [x26, #6, MUL VL]\n"
- "ld1w { z8.s }, p1/Z, [x26, #7, MUL VL]\n"
- "addvl x26, x26, #16\n"
+ "ld1w { z15.s }, p1/Z, [x25]\n"
+ "mov x20, x24\n"
+ "ld1w { z14.s }, p1/Z, [x25, #1, MUL VL]\n"
+ "add x24, x24, %x[out_stride]\n"
+ "ld1w { z13.s }, p1/Z, [x25, #2, MUL VL]\n"
+ "mov x19, x24\n"
+ "ld1w { z12.s }, p1/Z, [x25, #3, MUL VL]\n"
+ "add x24, x24, %x[out_stride]\n"
+ "ld1w { z11.s }, p1/Z, [x25, #4, MUL VL]\n"
+ "sub x22, x22, x21\n"
+ "ld1w { z10.s }, p1/Z, [x25, #5, MUL VL]\n"
+ "cmp x22, x21\n"
+ "ld1w { z9.s }, p1/Z, [x25, #6, MUL VL]\n"
+ "ld1w { z8.s }, p1/Z, [x25, #7, MUL VL]\n"
+ "addvl x25, x25, #16\n"
"ld1w { z7.s }, p1/Z, [x23]\n"
- "ld1w { z6.s }, p1/Z, [x23, #1, MUL VL]\n"
- "ld1w { z5.s }, p1/Z, [x23, #2, MUL VL]\n"
- "ld1w { z4.s }, p1/Z, [x23, #3, MUL VL]\n"
- "ld1w { z3.s }, p1/Z, [x23, #4, MUL VL]\n"
- "ld1w { z2.s }, p1/Z, [x23, #5, MUL VL]\n"
- "ld1w { z1.s }, p1/Z, [x23, #6, MUL VL]\n"
- "ld1w { z0.s }, p1/Z, [x23, #7, MUL VL]\n"
+ "ld1w { z6.s }, p1/Z, [x25, #-8, MUL VL]\n"
+ "ld1w { z5.s }, p1/Z, [x25, #-7, MUL VL]\n"
+ "ld1w { z4.s }, p1/Z, [x25, #-6, MUL VL]\n"
+ "ld1w { z3.s }, p1/Z, [x25, #-5, MUL VL]\n"
+ "ld1w { z2.s }, p1/Z, [x25, #-4, MUL VL]\n"
+ "ld1w { z1.s }, p1/Z, [x25, #-3, MUL VL]\n"
+ "ld1w { z0.s }, p1/Z, [x25, #-2, MUL VL]\n"
+ "ld1w { z31.s }, p1/Z, [x25, #-1, MUL VL]\n"
+ "ld1w { z30.s }, p1/Z, [x23, #1, MUL VL]\n"
+ "ld1w { z29.s }, p1/Z, [x23, #2, MUL VL]\n"
+ "ld1w { z28.s }, p1/Z, [x23, #3, MUL VL]\n"
+ "ld1w { z27.s }, p1/Z, [x23, #4, MUL VL]\n"
+ "ld1w { z26.s }, p1/Z, [x23, #5, MUL VL]\n"
+ "ld1w { z25.s }, p1/Z, [x23, #6, MUL VL]\n"
+ "ld1w { z24.s }, p1/Z, [x23, #7, MUL VL]\n"
"addvl x23, x23, #16\n"
- "ld1w { z31.s }, p1/Z, [x26, #-8, MUL VL]\n"
- "ld1w { z30.s }, p1/Z, [x26, #-7, MUL VL]\n"
- "ld1w { z29.s }, p1/Z, [x26, #-6, MUL VL]\n"
- "ld1w { z28.s }, p1/Z, [x26, #-5, MUL VL]\n"
- "ld1w { z27.s }, p1/Z, [x26, #-4, MUL VL]\n"
- "ld1w { z26.s }, p1/Z, [x26, #-3, MUL VL]\n"
- "ld1w { z25.s }, p1/Z, [x26, #-2, MUL VL]\n"
- "ld1w { z24.s }, p1/Z, [x26, #-1, MUL VL]\n"
"ld1w { z23.s }, p1/Z, [x23, #-8, MUL VL]\n"
"ld1w { z22.s }, p1/Z, [x23, #-7, MUL VL]\n"
"ld1w { z21.s }, p1/Z, [x23, #-6, MUL VL]\n"
@@ -88,203 +88,204 @@ void sve_transpose_interleave_8VL(uint32_t *out, const uint32_t *in, size_t widt
"ld1w { z18.s }, p1/Z, [x23, #-3, MUL VL]\n"
"ld1w { z17.s }, p1/Z, [x23, #-2, MUL VL]\n"
"ld1w { z16.s }, p1/Z, [x23, #-1, MUL VL]\n"
- "st1w { z15.s }, p1, [x21]\n"
- "st1w { z14.s }, p1, [x21, #1, MUL VL]\n"
- "st1w { z13.s }, p1, [x21, #2, MUL VL]\n"
- "st1w { z12.s }, p1, [x21, #3, MUL VL]\n"
- "st1w { z11.s }, p1, [x21, #4, MUL VL]\n"
- "st1w { z10.s }, p1, [x21, #5, MUL VL]\n"
- "st1w { z9.s }, p1, [x21, #6, MUL VL]\n"
- "st1w { z8.s }, p1, [x21, #7, MUL VL]\n"
- "addvl x21, x21, #16\n"
- "st1w { z7.s }, p1, [x21, #-8, MUL VL]\n"
- "st1w { z6.s }, p1, [x21, #-7, MUL VL]\n"
- "st1w { z5.s }, p1, [x21, #-6, MUL VL]\n"
- "st1w { z4.s }, p1, [x21, #-5, MUL VL]\n"
- "st1w { z3.s }, p1, [x21, #-4, MUL VL]\n"
- "st1w { z2.s }, p1, [x21, #-3, MUL VL]\n"
- "st1w { z1.s }, p1, [x21, #-2, MUL VL]\n"
- "st1w { z0.s }, p1, [x21, #-1, MUL VL]\n"
- "st1w { z31.s }, p1, [x20]\n"
- "st1w { z30.s }, p1, [x20, #1, MUL VL]\n"
- "st1w { z29.s }, p1, [x20, #2, MUL VL]\n"
- "st1w { z28.s }, p1, [x20, #3, MUL VL]\n"
- "st1w { z27.s }, p1, [x20, #4, MUL VL]\n"
- "st1w { z26.s }, p1, [x20, #5, MUL VL]\n"
- "st1w { z25.s }, p1, [x20, #6, MUL VL]\n"
- "st1w { z24.s }, p1, [x20, #7, MUL VL]\n"
+ "st1w { z15.s }, p1, [x20]\n"
+ "st1w { z14.s }, p1, [x20, #1, MUL VL]\n"
+ "st1w { z13.s }, p1, [x20, #2, MUL VL]\n"
+ "st1w { z12.s }, p1, [x20, #3, MUL VL]\n"
+ "st1w { z11.s }, p1, [x20, #4, MUL VL]\n"
+ "st1w { z10.s }, p1, [x20, #5, MUL VL]\n"
+ "st1w { z9.s }, p1, [x20, #6, MUL VL]\n"
+ "st1w { z8.s }, p1, [x20, #7, MUL VL]\n"
"addvl x20, x20, #16\n"
- "st1w { z23.s }, p1, [x20, #-8, MUL VL]\n"
- "st1w { z22.s }, p1, [x20, #-7, MUL VL]\n"
- "st1w { z21.s }, p1, [x20, #-6, MUL VL]\n"
- "st1w { z20.s }, p1, [x20, #-5, MUL VL]\n"
- "st1w { z19.s }, p1, [x20, #-4, MUL VL]\n"
- "st1w { z18.s }, p1, [x20, #-3, MUL VL]\n"
- "st1w { z17.s }, p1, [x20, #-2, MUL VL]\n"
- "st1w { z16.s }, p1, [x20, #-1, MUL VL]\n"
+ "st1w { z7.s }, p1, [x20, #-8, MUL VL]\n"
+ "st1w { z30.s }, p1, [x20, #-7, MUL VL]\n"
+ "st1w { z29.s }, p1, [x20, #-6, MUL VL]\n"
+ "st1w { z28.s }, p1, [x20, #-5, MUL VL]\n"
+ "st1w { z27.s }, p1, [x20, #-4, MUL VL]\n"
+ "st1w { z26.s }, p1, [x20, #-3, MUL VL]\n"
+ "st1w { z25.s }, p1, [x20, #-2, MUL VL]\n"
+ "st1w { z24.s }, p1, [x20, #-1, MUL VL]\n"
+ "st1w { z6.s }, p1, [x19]\n"
+ "st1w { z5.s }, p1, [x19, #1, MUL VL]\n"
+ "st1w { z4.s }, p1, [x19, #2, MUL VL]\n"
+ "st1w { z3.s }, p1, [x19, #3, MUL VL]\n"
+ "st1w { z2.s }, p1, [x19, #4, MUL VL]\n"
+ "st1w { z1.s }, p1, [x19, #5, MUL VL]\n"
+ "st1w { z0.s }, p1, [x19, #6, MUL VL]\n"
+ "st1w { z31.s }, p1, [x19, #7, MUL VL]\n"
+ "addvl x19, x19, #16\n"
+ "st1w { z23.s }, p1, [x19, #-8, MUL VL]\n"
+ "st1w { z22.s }, p1, [x19, #-7, MUL VL]\n"
+ "st1w { z21.s }, p1, [x19, #-6, MUL VL]\n"
+ "st1w { z20.s }, p1, [x19, #-5, MUL VL]\n"
+ "st1w { z19.s }, p1, [x19, #-4, MUL VL]\n"
+ "st1w { z18.s }, p1, [x19, #-3, MUL VL]\n"
+ "st1w { z17.s }, p1, [x19, #-2, MUL VL]\n"
+ "st1w { z16.s }, p1, [x19, #-1, MUL VL]\n"
"bge 2b\n"
"3:" // Main row loop: Unroll column loop skip
- "cbz x25, 5f\n"
+ "cbz x22, 5f\n"
"4:" // Main row loop: Column loop
- "mov x20, x25\n"
+ "mov x20, x22\n"
+ "mov x19, x24\n"
"whilelt p0.s, XZR, x20\n"
- "ld1w { z31.s }, p0/Z, [x26]\n"
+ "ld1w { z31.s }, p0/Z, [x25]\n"
"ld1w { z30.s }, p0/Z, [x23]\n"
"decw x20\n"
+ "add x24, x24, %x[out_stride]\n"
"whilelt p0.s, XZR, x20\n"
- "ld1w { z29.s }, p0/Z, [x26, #1, MUL VL]\n"
+ "ld1w { z29.s }, p0/Z, [x25, #1, MUL VL]\n"
"ld1w { z28.s }, p0/Z, [x23, #1, MUL VL]\n"
"decw x20\n"
+ "decw x22, ALL, MUL #8\n"
"whilelt p0.s, XZR, x20\n"
- "ld1w { z27.s }, p0/Z, [x26, #2, MUL VL]\n"
+ "ld1w { z27.s }, p0/Z, [x25, #2, MUL VL]\n"
"ld1w { z26.s }, p0/Z, [x23, #2, MUL VL]\n"
"decw x20\n"
"whilelt p0.s, XZR, x20\n"
- "ld1w { z25.s }, p0/Z, [x26, #3, MUL VL]\n"
- "ld1w { z24.s }, p0/Z, [x23, #3, MUL VL]\n"
+ "ld1w { z25.s }, p0/Z, [x25, #3, MUL VL]\n"
"decw x20\n"
+ "ld1w { z24.s }, p0/Z, [x23, #3, MUL VL]\n"
"whilelt p0.s, XZR, x20\n"
- "ld1w { z23.s }, p0/Z, [x26, #4, MUL VL]\n"
- "ld1w { z22.s }, p0/Z, [x23, #4, MUL VL]\n"
"decw x20\n"
+ "ld1w { z23.s }, p0/Z, [x25, #4, MUL VL]\n"
+ "ld1w { z22.s }, p0/Z, [x23, #4, MUL VL]\n"
"whilelt p0.s, XZR, x20\n"
- "ld1w { z21.s }, p0/Z, [x26, #5, MUL VL]\n"
- "ld1w { z20.s }, p0/Z, [x23, #5, MUL VL]\n"
"decw x20\n"
+ "ld1w { z21.s }, p0/Z, [x25, #5, MUL VL]\n"
+ "ld1w { z20.s }, p0/Z, [x23, #5, MUL VL]\n"
"whilelt p0.s, XZR, x20\n"
- "ld1w { z19.s }, p0/Z, [x26, #6, MUL VL]\n"
- "ld1w { z18.s }, p0/Z, [x23, #6, MUL VL]\n"
"decw x20\n"
+ "ld1w { z19.s }, p0/Z, [x25, #6, MUL VL]\n"
+ "ld1w { z18.s }, p0/Z, [x23, #6, MUL VL]\n"
"whilelt p0.s, XZR, x20\n"
- "ld1w { z17.s }, p0/Z, [x26, #7, MUL VL]\n"
+ "cmp x22, #0x0\n"
+ "ld1w { z17.s }, p0/Z, [x25, #7, MUL VL]\n"
"ld1w { z16.s }, p0/Z, [x23, #7, MUL VL]\n"
- "mov x20, x22\n"
- "decw x25, ALL, MUL #8\n"
- "st1w { z31.s }, p1, [x20]\n"
- "st1w { z29.s }, p1, [x20, #1, MUL VL]\n"
- "cmp x25, #0x0\n"
- "addvl x26, x26, #8\n"
- "st1w { z27.s }, p1, [x20, #2, MUL VL]\n"
+ "addvl x25, x25, #8\n"
+ "st1w { z31.s }, p1, [x19]\n"
"addvl x23, x23, #8\n"
- "add x22, x22, %x[out_stride]\n"
- "st1w { z25.s }, p1, [x20, #3, MUL VL]\n"
- "st1w { z23.s }, p1, [x20, #4, MUL VL]\n"
- "st1w { z21.s }, p1, [x20, #5, MUL VL]\n"
- "st1w { z19.s }, p1, [x20, #6, MUL VL]\n"
- "st1w { z17.s }, p1, [x20, #7, MUL VL]\n"
- "addvl x20, x20, #16\n"
- "st1w { z30.s }, p1, [x20, #-8, MUL VL]\n"
- "st1w { z28.s }, p1, [x20, #-7, MUL VL]\n"
- "st1w { z26.s }, p1, [x20, #-6, MUL VL]\n"
- "st1w { z24.s }, p1, [x20, #-5, MUL VL]\n"
- "st1w { z22.s }, p1, [x20, #-4, MUL VL]\n"
- "st1w { z20.s }, p1, [x20, #-3, MUL VL]\n"
- "st1w { z18.s }, p1, [x20, #-2, MUL VL]\n"
- "st1w { z16.s }, p1, [x20, #-1, MUL VL]\n"
+ "st1w { z29.s }, p1, [x19, #1, MUL VL]\n"
+ "st1w { z27.s }, p1, [x19, #2, MUL VL]\n"
+ "st1w { z25.s }, p1, [x19, #3, MUL VL]\n"
+ "st1w { z23.s }, p1, [x19, #4, MUL VL]\n"
+ "st1w { z21.s }, p1, [x19, #5, MUL VL]\n"
+ "st1w { z19.s }, p1, [x19, #6, MUL VL]\n"
+ "st1w { z17.s }, p1, [x19, #7, MUL VL]\n"
+ "addvl x19, x19, #16\n"
+ "st1w { z30.s }, p1, [x19, #-8, MUL VL]\n"
+ "st1w { z28.s }, p1, [x19, #-7, MUL VL]\n"
+ "st1w { z26.s }, p1, [x19, #-6, MUL VL]\n"
+ "st1w { z24.s }, p1, [x19, #-5, MUL VL]\n"
+ "st1w { z22.s }, p1, [x19, #-4, MUL VL]\n"
+ "st1w { z20.s }, p1, [x19, #-3, MUL VL]\n"
+ "st1w { z18.s }, p1, [x19, #-2, MUL VL]\n"
+ "st1w { z16.s }, p1, [x19, #-1, MUL VL]\n"
"bgt 4b\n"
"5:" // Main row loop: Column loop skip
- "cmp %x[height], #0x2\n"
"addvl %x[out], %x[out], #16\n"
+ "cmp %x[height], #0x2\n"
"bge 1b\n"
"cbz %x[height], 12f\n"
"6:" // Main loop skip
+
"7:" // Tail row loop: Head
- "mov x21, %x[width]\n"
- "cntw x20, ALL, MUL #16\n"
- "mov x26, %x[in]\n"
- "cmp x21, x20\n"
- "add %x[in], x26, %x[in_stride]\n"
- "mov x22, %x[out]\n"
+ "mov x25, %x[in]\n"
+ "mov x24, %x[out]\n"
+ "add %x[in], x25, %x[in_stride]\n"
"sub %x[height], %x[height], #0x1\n"
+ "mov x20, %x[width]\n"
+ "cntw x19, ALL, MUL #16\n"
+ "cmp x20, x19\n"
"blt 9f\n"
"8:" // Tail row loop: Unroll column loop
- "ld1w { z31.s }, p1/Z, [x26]\n"
- "ld1w { z30.s }, p1/Z, [x26, #1, MUL VL]\n"
- "sub x21, x21, x20\n"
- "cmp x21, x20\n"
- "ld1w { z29.s }, p1/Z, [x26, #2, MUL VL]\n"
- "ld1w { z28.s }, p1/Z, [x26, #3, MUL VL]\n"
- "ld1w { z27.s }, p1/Z, [x26, #4, MUL VL]\n"
- "ld1w { z26.s }, p1/Z, [x26, #5, MUL VL]\n"
- "ld1w { z25.s }, p1/Z, [x26, #6, MUL VL]\n"
- "ld1w { z24.s }, p1/Z, [x26, #7, MUL VL]\n"
- "addvl x26, x26, #16\n"
- "ld1w { z23.s }, p1/Z, [x26, #-8, MUL VL]\n"
- "ld1w { z22.s }, p1/Z, [x26, #-7, MUL VL]\n"
- "ld1w { z21.s }, p1/Z, [x26, #-6, MUL VL]\n"
- "ld1w { z20.s }, p1/Z, [x26, #-5, MUL VL]\n"
- "ld1w { z19.s }, p1/Z, [x26, #-4, MUL VL]\n"
- "ld1w { z18.s }, p1/Z, [x26, #-3, MUL VL]\n"
- "ld1w { z17.s }, p1/Z, [x26, #-2, MUL VL]\n"
- "ld1w { z16.s }, p1/Z, [x26, #-1, MUL VL]\n"
- "st1w { z31.s }, p1, [x22]\n"
- "st1w { z30.s }, p1, [x22, #1, MUL VL]\n"
- "st1w { z29.s }, p1, [x22, #2, MUL VL]\n"
- "st1w { z28.s }, p1, [x22, #3, MUL VL]\n"
- "st1w { z27.s }, p1, [x22, #4, MUL VL]\n"
- "st1w { z26.s }, p1, [x22, #5, MUL VL]\n"
- "st1w { z25.s }, p1, [x22, #6, MUL VL]\n"
- "st1w { z24.s }, p1, [x22, #7, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
- "st1w { z23.s }, p1, [x22]\n"
- "st1w { z22.s }, p1, [x22, #1, MUL VL]\n"
- "st1w { z21.s }, p1, [x22, #2, MUL VL]\n"
- "st1w { z20.s }, p1, [x22, #3, MUL VL]\n"
- "st1w { z19.s }, p1, [x22, #4, MUL VL]\n"
- "st1w { z18.s }, p1, [x22, #5, MUL VL]\n"
- "st1w { z17.s }, p1, [x22, #6, MUL VL]\n"
- "st1w { z16.s }, p1, [x22, #7, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
+ "ld1w { z31.s }, p1/Z, [x25]\n"
+ "sub x20, x20, x19\n"
+ "ld1w { z30.s }, p1/Z, [x25, #1, MUL VL]\n"
+ "cmp x20, x19\n"
+ "ld1w { z29.s }, p1/Z, [x25, #2, MUL VL]\n"
+ "ld1w { z28.s }, p1/Z, [x25, #3, MUL VL]\n"
+ "ld1w { z27.s }, p1/Z, [x25, #4, MUL VL]\n"
+ "ld1w { z26.s }, p1/Z, [x25, #5, MUL VL]\n"
+ "ld1w { z25.s }, p1/Z, [x25, #6, MUL VL]\n"
+ "ld1w { z24.s }, p1/Z, [x25, #7, MUL VL]\n"
+ "addvl x25, x25, #16\n"
+ "ld1w { z23.s }, p1/Z, [x25, #-8, MUL VL]\n"
+ "ld1w { z22.s }, p1/Z, [x25, #-7, MUL VL]\n"
+ "ld1w { z21.s }, p1/Z, [x25, #-6, MUL VL]\n"
+ "ld1w { z20.s }, p1/Z, [x25, #-5, MUL VL]\n"
+ "ld1w { z19.s }, p1/Z, [x25, #-4, MUL VL]\n"
+ "ld1w { z18.s }, p1/Z, [x25, #-3, MUL VL]\n"
+ "ld1w { z17.s }, p1/Z, [x25, #-2, MUL VL]\n"
+ "ld1w { z16.s }, p1/Z, [x25, #-1, MUL VL]\n"
+ "st1w { z31.s }, p1, [x24]\n"
+ "st1w { z30.s }, p1, [x24, #1, MUL VL]\n"
+ "st1w { z29.s }, p1, [x24, #2, MUL VL]\n"
+ "st1w { z28.s }, p1, [x24, #3, MUL VL]\n"
+ "st1w { z27.s }, p1, [x24, #4, MUL VL]\n"
+ "st1w { z26.s }, p1, [x24, #5, MUL VL]\n"
+ "st1w { z25.s }, p1, [x24, #6, MUL VL]\n"
+ "st1w { z24.s }, p1, [x24, #7, MUL VL]\n"
+ "add x24, x24, %x[out_stride]\n"
+ "st1w { z23.s }, p1, [x24]\n"
+ "st1w { z22.s }, p1, [x24, #1, MUL VL]\n"
+ "st1w { z21.s }, p1, [x24, #2, MUL VL]\n"
+ "st1w { z20.s }, p1, [x24, #3, MUL VL]\n"
+ "st1w { z19.s }, p1, [x24, #4, MUL VL]\n"
+ "st1w { z18.s }, p1, [x24, #5, MUL VL]\n"
+ "st1w { z17.s }, p1, [x24, #6, MUL VL]\n"
+ "st1w { z16.s }, p1, [x24, #7, MUL VL]\n"
+ "add x24, x24, %x[out_stride]\n"
"bge 8b\n"
"9:" // Tail row loop: Unroll column loop skip
- "cbz x21, 11f\n"
+ "cbz x20, 11f\n"
"10:" // Tail row loop: Column loop
- "mov x20, x21\n"
- "whilelt p0.s, XZR, x20\n"
- "ld1w { z23.s }, p0/Z, [x26]\n"
- "decw x20\n"
- "whilelt p0.s, XZR, x20\n"
- "ld1w { z22.s }, p0/Z, [x26, #1, MUL VL]\n"
- "decw x20\n"
- "whilelt p0.s, XZR, x20\n"
- "ld1w { z21.s }, p0/Z, [x26, #2, MUL VL]\n"
- "decw x20\n"
- "whilelt p0.s, XZR, x20\n"
- "ld1w { z20.s }, p0/Z, [x26, #3, MUL VL]\n"
- "decw x20\n"
- "whilelt p0.s, XZR, x20\n"
- "ld1w { z19.s }, p0/Z, [x26, #4, MUL VL]\n"
- "decw x20\n"
- "whilelt p0.s, XZR, x20\n"
- "ld1w { z18.s }, p0/Z, [x26, #5, MUL VL]\n"
- "decw x20\n"
- "whilelt p0.s, XZR, x20\n"
- "ld1w { z17.s }, p0/Z, [x26, #6, MUL VL]\n"
- "decw x20\n"
- "decw x21, ALL, MUL #8\n"
- "whilelt p0.s, XZR, x20\n"
- "cmp x21, #0x0\n"
- "ld1w { z16.s }, p0/Z, [x26, #7, MUL VL]\n"
- "st1w { z23.s }, p1, [x22]\n"
- "addvl x26, x26, #8\n"
- "st1w { z22.s }, p1, [x22, #1, MUL VL]\n"
- "st1w { z21.s }, p1, [x22, #2, MUL VL]\n"
- "st1w { z20.s }, p1, [x22, #3, MUL VL]\n"
- "st1w { z19.s }, p1, [x22, #4, MUL VL]\n"
- "st1w { z18.s }, p1, [x22, #5, MUL VL]\n"
- "st1w { z17.s }, p1, [x22, #6, MUL VL]\n"
- "st1w { z16.s }, p1, [x22, #7, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
+ "mov x19, x20\n"
+ "decw x20, ALL, MUL #8\n"
+ "whilelt p0.s, XZR, x19\n"
+ "ld1w { z23.s }, p0/Z, [x25]\n"
+ "decw x19\n"
+ "whilelt p0.s, XZR, x19\n"
+ "ld1w { z22.s }, p0/Z, [x25, #1, MUL VL]\n"
+ "decw x19\n"
+ "whilelt p0.s, XZR, x19\n"
+ "ld1w { z21.s }, p0/Z, [x25, #2, MUL VL]\n"
+ "decw x19\n"
+ "whilelt p0.s, XZR, x19\n"
+ "ld1w { z20.s }, p0/Z, [x25, #3, MUL VL]\n"
+ "decw x19\n"
+ "whilelt p0.s, XZR, x19\n"
+ "ld1w { z19.s }, p0/Z, [x25, #4, MUL VL]\n"
+ "decw x19\n"
+ "whilelt p0.s, XZR, x19\n"
+ "ld1w { z18.s }, p0/Z, [x25, #5, MUL VL]\n"
+ "decw x19\n"
+ "whilelt p0.s, XZR, x19\n"
+ "ld1w { z17.s }, p0/Z, [x25, #6, MUL VL]\n"
+ "decw x19\n"
+ "whilelt p0.s, XZR, x19\n"
+ "ld1w { z16.s }, p0/Z, [x25, #7, MUL VL]\n"
+ "st1w { z23.s }, p1, [x24]\n"
+ "addvl x25, x25, #8\n"
+ "st1w { z22.s }, p1, [x24, #1, MUL VL]\n"
+ "cmp x20, #0x0\n"
+ "st1w { z21.s }, p1, [x24, #2, MUL VL]\n"
+ "st1w { z20.s }, p1, [x24, #3, MUL VL]\n"
+ "st1w { z19.s }, p1, [x24, #4, MUL VL]\n"
+ "st1w { z18.s }, p1, [x24, #5, MUL VL]\n"
+ "st1w { z17.s }, p1, [x24, #6, MUL VL]\n"
+ "st1w { z16.s }, p1, [x24, #7, MUL VL]\n"
+ "add x24, x24, %x[out_stride]\n"
"bgt 10b\n"
"11:" // Tail row loop: Column loop skip
- "cmp %x[height], #0x1\n"
"addvl %x[out], %x[out], #8\n"
+ "cmp %x[height], #0x1\n"
"bge 7b\n"
"12:" // Done
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_8VL_1x4.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_8VL_1x4.hpp
index 3ba50fee60..f81098b26e 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_8VL_1x4.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_8VL_1x4.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#pragma once
@@ -40,218 +40,218 @@ void sve_transpose_interleave_8VL_1x4(uint8_t *out, const uint8_t *in, size_t wi
size_t out_stride = 8 * roundup<size_t>(height, 4) * get_vector_length<uint32_t>();
__asm__ __volatile__(
- "ptrue p2.b\n"
+ "ptrue p1.b\n"
"1:" // Main row loop: Head
- "mov x26, %x[in]\n"
- "add x25, x26, %x[in_stride]\n"
- "add x24, x25, %x[in_stride]\n"
- "mov x23, %x[width]\n"
- "cntb x20, ALL, MUL #8\n"
- "add x22, x24, %x[in_stride]\n"
+ "mov x25, %x[in]\n"
+ "mov x24, %x[out]\n"
+ "add x23, x25, %x[in_stride]\n"
+ "add x22, x23, %x[in_stride]\n"
+ "add x21, x22, %x[in_stride]\n"
+ "add %x[in], x21, %x[in_stride]\n"
"cmp %x[height], #0x3\n"
- "add %x[in], x22, %x[in_stride]\n"
- "csel x22, x22, %x[pad_row], GT\n"
- "csel x24, x24, %x[pad_row], GE\n"
+ "csel x21, x21, %x[pad_row], GT\n"
+ "csel x22, x22, %x[pad_row], GE\n"
"cmp %x[height], #0x1\n"
- "csel x25, x25, %x[pad_row], GT\n"
- "cmp x23, x20\n"
- "mov x21, %x[out]\n"
+ "csel x23, x23, %x[pad_row], GT\n"
"sub %x[height], %x[height], #0x4\n"
+ "mov x20, %x[width]\n"
+ "cntb x19, ALL, MUL #8\n"
+ "cmp x20, x19\n"
"blt 3f\n"
"2:" // Main row loop: Unroll column loop
- "ld1b { z7.b }, p2/Z, [x26]\n"
- "ld1b { z24.b }, p2/Z, [x26, #1, MUL VL]\n"
- "sub x23, x23, x20\n"
- "cmp x23, x20\n"
- "ld1b { z31.b }, p2/Z, [x25]\n"
- "ld1b { z18.b }, p2/Z, [x25, #1, MUL VL]\n"
- "ld1b { z19.b }, p2/Z, [x24]\n"
- "ld1b { z25.b }, p2/Z, [x24, #1, MUL VL]\n"
- "zip1 z23.b, z7.b, z19.b\n"
- "zip2 z20.b, z7.b, z19.b\n"
- "ld1b { z30.b }, p2/Z, [x22]\n"
- "ld1b { z3.b }, p2/Z, [x22, #1, MUL VL]\n"
- "zip1 z21.b, z31.b, z30.b\n"
- "zip2 z19.b, z31.b, z30.b\n"
- "ld1b { z16.b }, p2/Z, [x26, #2, MUL VL]\n"
- "ld1b { z30.b }, p2/Z, [x26, #3, MUL VL]\n"
- "zip1 z2.b, z24.b, z25.b\n"
- "zip1 z17.b, z18.b, z3.b\n"
- "ld1b { z29.b }, p2/Z, [x25, #2, MUL VL]\n"
- "ld1b { z8.b }, p2/Z, [x25, #3, MUL VL]\n"
- "zip2 z22.b, z24.b, z25.b\n"
- "zip2 z4.b, z18.b, z3.b\n"
- "ld1b { z0.b }, p2/Z, [x24, #2, MUL VL]\n"
- "ld1b { z3.b }, p2/Z, [x24, #3, MUL VL]\n"
- "zip1 z9.b, z16.b, z0.b\n"
- "zip2 z14.b, z16.b, z0.b\n"
- "ld1b { z18.b }, p2/Z, [x22, #2, MUL VL]\n"
- "ld1b { z16.b }, p2/Z, [x22, #3, MUL VL]\n"
- "zip1 z24.b, z29.b, z18.b\n"
- "zip2 z11.b, z29.b, z18.b\n"
- "ld1b { z1.b }, p2/Z, [x26, #4, MUL VL]\n"
- "ld1b { z12.b }, p2/Z, [x26, #5, MUL VL]\n"
- "zip1 z13.b, z30.b, z3.b\n"
- "zip1 z15.b, z8.b, z16.b\n"
- "ld1b { z5.b }, p2/Z, [x25, #4, MUL VL]\n"
- "ld1b { z29.b }, p2/Z, [x25, #5, MUL VL]\n"
- "zip2 z31.b, z30.b, z3.b\n"
- "zip2 z30.b, z8.b, z16.b\n"
- "ld1b { z16.b }, p2/Z, [x24, #4, MUL VL]\n"
- "ld1b { z18.b }, p2/Z, [x24, #5, MUL VL]\n"
- "zip1 z27.b, z1.b, z16.b\n"
- "zip2 z10.b, z1.b, z16.b\n"
- "ld1b { z7.b }, p2/Z, [x22, #4, MUL VL]\n"
- "ld1b { z16.b }, p2/Z, [x22, #5, MUL VL]\n"
- "zip1 z8.b, z5.b, z7.b\n"
- "zip2 z26.b, z5.b, z7.b\n"
- "ld1b { z3.b }, p2/Z, [x26, #6, MUL VL]\n"
- "ld1b { z25.b }, p2/Z, [x26, #7, MUL VL]\n"
- "zip1 z6.b, z12.b, z18.b\n"
- "zip1 z5.b, z29.b, z16.b\n"
- "ld1b { z0.b }, p2/Z, [x25, #6, MUL VL]\n"
- "ld1b { z28.b }, p2/Z, [x25, #7, MUL VL]\n"
- "zip2 z12.b, z12.b, z18.b\n"
- "zip2 z7.b, z29.b, z16.b\n"
- "ld1b { z1.b }, p2/Z, [x24, #6, MUL VL]\n"
- "ld1b { z29.b }, p2/Z, [x24, #7, MUL VL]\n"
- "zip1 z16.b, z23.b, z21.b\n"
- "zip2 z18.b, z23.b, z21.b\n"
- "ld1b { z23.b }, p2/Z, [x22, #6, MUL VL]\n"
- "ld1b { z21.b }, p2/Z, [x22, #7, MUL VL]\n"
- "st1b { z16.b }, p2, [x21]\n"
- "zip1 z16.b, z20.b, z19.b\n"
- "zip2 z20.b, z20.b, z19.b\n"
- "zip1 z19.b, z2.b, z17.b\n"
- "st1b { z18.b }, p2, [x21, #1, MUL VL]\n"
- "addvl x26, x26, #8\n"
- "zip2 z18.b, z2.b, z17.b\n"
- "zip1 z17.b, z22.b, z4.b\n"
- "st1b { z16.b }, p2, [x21, #2, MUL VL]\n"
+ "ld1b { z8.b }, p1/Z, [x25]\n"
+ "sub x20, x20, x19\n"
+ "ld1b { z24.b }, p1/Z, [x25, #1, MUL VL]\n"
+ "cmp x20, x19\n"
+ "ld1b { z27.b }, p1/Z, [x25, #2, MUL VL]\n"
+ "ld1b { z25.b }, p1/Z, [x25, #3, MUL VL]\n"
+ "ld1b { z7.b }, p1/Z, [x25, #4, MUL VL]\n"
+ "ld1b { z3.b }, p1/Z, [x25, #5, MUL VL]\n"
+ "ld1b { z14.b }, p1/Z, [x25, #6, MUL VL]\n"
+ "ld1b { z13.b }, p1/Z, [x25, #7, MUL VL]\n"
"addvl x25, x25, #8\n"
- "zip2 z16.b, z22.b, z4.b\n"
- "st1b { z20.b }, p2, [x21, #3, MUL VL]\n"
- "zip1 z4.b, z3.b, z1.b\n"
- "addvl x24, x24, #8\n"
- "st1b { z19.b }, p2, [x21, #4, MUL VL]\n"
- "zip1 z22.b, z0.b, z23.b\n"
- "zip2 z3.b, z3.b, z1.b\n"
+ "ld1b { z16.b }, p1/Z, [x23]\n"
+ "ld1b { z12.b }, p1/Z, [x23, #1, MUL VL]\n"
+ "ld1b { z15.b }, p1/Z, [x23, #2, MUL VL]\n"
+ "ld1b { z11.b }, p1/Z, [x23, #3, MUL VL]\n"
+ "ld1b { z4.b }, p1/Z, [x23, #4, MUL VL]\n"
+ "ld1b { z5.b }, p1/Z, [x23, #5, MUL VL]\n"
+ "ld1b { z26.b }, p1/Z, [x23, #6, MUL VL]\n"
+ "ld1b { z30.b }, p1/Z, [x23, #7, MUL VL]\n"
+ "addvl x23, x23, #8\n"
+ "ld1b { z22.b }, p1/Z, [x22]\n"
+ "zip1 z21.b, z8.b, z22.b\n"
+ "ld1b { z2.b }, p1/Z, [x22, #1, MUL VL]\n"
+ "zip2 z20.b, z8.b, z22.b\n"
+ "ld1b { z18.b }, p1/Z, [x22, #2, MUL VL]\n"
+ "ld1b { z17.b }, p1/Z, [x22, #3, MUL VL]\n"
+ "zip1 z10.b, z24.b, z2.b\n"
+ "ld1b { z22.b }, p1/Z, [x22, #4, MUL VL]\n"
+ "zip2 z9.b, z24.b, z2.b\n"
+ "ld1b { z6.b }, p1/Z, [x22, #5, MUL VL]\n"
+ "zip1 z0.b, z27.b, z18.b\n"
+ "ld1b { z1.b }, p1/Z, [x22, #6, MUL VL]\n"
+ "zip2 z28.b, z27.b, z18.b\n"
+ "ld1b { z23.b }, p1/Z, [x22, #7, MUL VL]\n"
"addvl x22, x22, #8\n"
- "st1b { z18.b }, p2, [x21, #5, MUL VL]\n"
- "zip2 z2.b, z0.b, z23.b\n"
- "zip1 z1.b, z25.b, z29.b\n"
- "st1b { z17.b }, p2, [x21, #6, MUL VL]\n"
- "zip1 z0.b, z28.b, z21.b\n"
- "zip2 z29.b, z25.b, z29.b\n"
- "st1b { z16.b }, p2, [x21, #7, MUL VL]\n"
- "add x21, x21, %x[out_stride]\n"
- "zip2 z28.b, z28.b, z21.b\n"
- "zip1 z17.b, z9.b, z24.b\n"
- "zip2 z16.b, z9.b, z24.b\n"
- "zip1 z19.b, z14.b, z11.b\n"
- "st1b { z17.b }, p2, [x21]\n"
- "zip2 z18.b, z14.b, z11.b\n"
- "zip1 z17.b, z13.b, z15.b\n"
- "st1b { z16.b }, p2, [x21, #1, MUL VL]\n"
- "zip2 z16.b, z13.b, z15.b\n"
- "zip1 z21.b, z31.b, z30.b\n"
- "st1b { z19.b }, p2, [x21, #2, MUL VL]\n"
- "zip2 z20.b, z31.b, z30.b\n"
- "st1b { z18.b }, p2, [x21, #3, MUL VL]\n"
- "zip1 z19.b, z27.b, z8.b\n"
- "st1b { z17.b }, p2, [x21, #4, MUL VL]\n"
- "zip2 z18.b, z27.b, z8.b\n"
- "zip1 z17.b, z10.b, z26.b\n"
- "st1b { z16.b }, p2, [x21, #5, MUL VL]\n"
- "zip2 z16.b, z10.b, z26.b\n"
- "zip1 z27.b, z6.b, z5.b\n"
- "st1b { z21.b }, p2, [x21, #6, MUL VL]\n"
- "zip2 z26.b, z6.b, z5.b\n"
- "zip1 z25.b, z12.b, z7.b\n"
- "st1b { z20.b }, p2, [x21, #7, MUL VL]\n"
- "add x21, x21, %x[out_stride]\n"
- "zip2 z24.b, z12.b, z7.b\n"
- "zip1 z23.b, z4.b, z22.b\n"
- "st1b { z19.b }, p2, [x21]\n"
- "zip2 z22.b, z4.b, z22.b\n"
- "zip1 z21.b, z3.b, z2.b\n"
- "st1b { z18.b }, p2, [x21, #1, MUL VL]\n"
- "zip2 z20.b, z3.b, z2.b\n"
- "zip1 z19.b, z1.b, z0.b\n"
- "st1b { z17.b }, p2, [x21, #2, MUL VL]\n"
- "zip2 z18.b, z1.b, z0.b\n"
- "zip1 z17.b, z29.b, z28.b\n"
- "st1b { z16.b }, p2, [x21, #3, MUL VL]\n"
- "zip2 z16.b, z29.b, z28.b\n"
- "st1b { z27.b }, p2, [x21, #4, MUL VL]\n"
- "st1b { z26.b }, p2, [x21, #5, MUL VL]\n"
- "st1b { z25.b }, p2, [x21, #6, MUL VL]\n"
- "st1b { z24.b }, p2, [x21, #7, MUL VL]\n"
- "add x21, x21, %x[out_stride]\n"
- "st1b { z23.b }, p2, [x21]\n"
- "st1b { z22.b }, p2, [x21, #1, MUL VL]\n"
- "st1b { z21.b }, p2, [x21, #2, MUL VL]\n"
- "st1b { z20.b }, p2, [x21, #3, MUL VL]\n"
- "st1b { z19.b }, p2, [x21, #4, MUL VL]\n"
- "st1b { z18.b }, p2, [x21, #5, MUL VL]\n"
- "st1b { z17.b }, p2, [x21, #6, MUL VL]\n"
- "st1b { z16.b }, p2, [x21, #7, MUL VL]\n"
- "add x21, x21, %x[out_stride]\n"
+ "zip1 z31.b, z25.b, z17.b\n"
+ "ld1b { z19.b }, p1/Z, [x21]\n"
+ "zip2 z8.b, z25.b, z17.b\n"
+ "ld1b { z2.b }, p1/Z, [x21, #1, MUL VL]\n"
+ "zip1 z27.b, z7.b, z22.b\n"
+ "ld1b { z29.b }, p1/Z, [x21, #2, MUL VL]\n"
+ "zip2 z7.b, z7.b, z22.b\n"
+ "ld1b { z24.b }, p1/Z, [x21, #3, MUL VL]\n"
+ "zip1 z18.b, z16.b, z19.b\n"
+ "ld1b { z25.b }, p1/Z, [x21, #4, MUL VL]\n"
+ "zip1 z17.b, z21.b, z18.b\n"
+ "ld1b { z22.b }, p1/Z, [x21, #5, MUL VL]\n"
+ "zip2 z18.b, z21.b, z18.b\n"
+ "ld1b { z21.b }, p1/Z, [x21, #6, MUL VL]\n"
+ "zip2 z16.b, z16.b, z19.b\n"
+ "ld1b { z19.b }, p1/Z, [x21, #7, MUL VL]\n"
+ "addvl x21, x21, #8\n"
+ "st1b { z17.b }, p1, [x24]\n"
+ "zip1 z17.b, z20.b, z16.b\n"
+ "zip2 z20.b, z20.b, z16.b\n"
+ "st1b { z18.b }, p1, [x24, #1, MUL VL]\n"
+ "zip1 z16.b, z12.b, z2.b\n"
+ "st1b { z17.b }, p1, [x24, #2, MUL VL]\n"
+ "zip1 z17.b, z10.b, z16.b\n"
+ "st1b { z20.b }, p1, [x24, #3, MUL VL]\n"
+ "zip2 z16.b, z10.b, z16.b\n"
+ "st1b { z17.b }, p1, [x24, #4, MUL VL]\n"
+ "zip2 z17.b, z12.b, z2.b\n"
+ "st1b { z16.b }, p1, [x24, #5, MUL VL]\n"
+ "zip1 z16.b, z9.b, z17.b\n"
+ "st1b { z16.b }, p1, [x24, #6, MUL VL]\n"
+ "zip2 z16.b, z9.b, z17.b\n"
+ "st1b { z16.b }, p1, [x24, #7, MUL VL]\n"
+ "add x24, x24, %x[out_stride]\n"
+ "zip1 z18.b, z15.b, z29.b\n"
+ "zip2 z17.b, z15.b, z29.b\n"
+ "zip1 z16.b, z0.b, z18.b\n"
+ "st1b { z16.b }, p1, [x24]\n"
+ "zip2 z16.b, z0.b, z18.b\n"
+ "st1b { z16.b }, p1, [x24, #1, MUL VL]\n"
+ "zip1 z16.b, z28.b, z17.b\n"
+ "st1b { z16.b }, p1, [x24, #2, MUL VL]\n"
+ "zip2 z16.b, z28.b, z17.b\n"
+ "st1b { z16.b }, p1, [x24, #3, MUL VL]\n"
+ "zip1 z17.b, z11.b, z24.b\n"
+ "zip1 z16.b, z31.b, z17.b\n"
+ "st1b { z16.b }, p1, [x24, #4, MUL VL]\n"
+ "zip2 z16.b, z31.b, z17.b\n"
+ "st1b { z16.b }, p1, [x24, #5, MUL VL]\n"
+ "zip2 z17.b, z11.b, z24.b\n"
+ "zip1 z16.b, z8.b, z17.b\n"
+ "st1b { z16.b }, p1, [x24, #6, MUL VL]\n"
+ "zip2 z16.b, z8.b, z17.b\n"
+ "st1b { z16.b }, p1, [x24, #7, MUL VL]\n"
+ "add x24, x24, %x[out_stride]\n"
+ "zip1 z18.b, z4.b, z25.b\n"
+ "zip2 z17.b, z4.b, z25.b\n"
+ "zip1 z16.b, z27.b, z18.b\n"
+ "st1b { z16.b }, p1, [x24]\n"
+ "zip2 z16.b, z27.b, z18.b\n"
+ "st1b { z16.b }, p1, [x24, #1, MUL VL]\n"
+ "zip1 z16.b, z7.b, z17.b\n"
+ "st1b { z16.b }, p1, [x24, #2, MUL VL]\n"
+ "zip2 z16.b, z7.b, z17.b\n"
+ "st1b { z16.b }, p1, [x24, #3, MUL VL]\n"
+ "zip1 z18.b, z3.b, z6.b\n"
+ "zip1 z17.b, z5.b, z22.b\n"
+ "zip1 z16.b, z18.b, z17.b\n"
+ "st1b { z16.b }, p1, [x24, #4, MUL VL]\n"
+ "zip2 z16.b, z18.b, z17.b\n"
+ "st1b { z16.b }, p1, [x24, #5, MUL VL]\n"
+ "zip2 z18.b, z3.b, z6.b\n"
+ "zip2 z17.b, z5.b, z22.b\n"
+ "zip1 z16.b, z18.b, z17.b\n"
+ "st1b { z16.b }, p1, [x24, #6, MUL VL]\n"
+ "zip2 z16.b, z18.b, z17.b\n"
+ "st1b { z16.b }, p1, [x24, #7, MUL VL]\n"
+ "add x24, x24, %x[out_stride]\n"
+ "zip1 z18.b, z14.b, z1.b\n"
+ "zip1 z17.b, z26.b, z21.b\n"
+ "zip1 z16.b, z18.b, z17.b\n"
+ "st1b { z16.b }, p1, [x24]\n"
+ "zip2 z16.b, z18.b, z17.b\n"
+ "st1b { z16.b }, p1, [x24, #1, MUL VL]\n"
+ "zip2 z18.b, z14.b, z1.b\n"
+ "zip2 z17.b, z26.b, z21.b\n"
+ "zip1 z16.b, z18.b, z17.b\n"
+ "st1b { z16.b }, p1, [x24, #2, MUL VL]\n"
+ "zip2 z16.b, z18.b, z17.b\n"
+ "st1b { z16.b }, p1, [x24, #3, MUL VL]\n"
+ "zip1 z18.b, z13.b, z23.b\n"
+ "zip1 z17.b, z30.b, z19.b\n"
+ "zip1 z16.b, z18.b, z17.b\n"
+ "st1b { z16.b }, p1, [x24, #4, MUL VL]\n"
+ "zip2 z16.b, z18.b, z17.b\n"
+ "st1b { z16.b }, p1, [x24, #5, MUL VL]\n"
+ "zip2 z18.b, z13.b, z23.b\n"
+ "zip2 z17.b, z30.b, z19.b\n"
+ "zip1 z16.b, z18.b, z17.b\n"
+ "st1b { z16.b }, p1, [x24, #6, MUL VL]\n"
+ "zip2 z16.b, z18.b, z17.b\n"
+ "st1b { z16.b }, p1, [x24, #7, MUL VL]\n"
+ "add x24, x24, %x[out_stride]\n"
"bge 2b\n"
"3:" // Main row loop: Unroll column loop skip
- "cbz x23, 5f\n"
+ "cbz x20, 5f\n"
"4:" // Main row loop: Column loop
- "mov x20, x23\n"
- "whilelt p1.b, XZR, x20\n"
- "ld1b { z23.b }, p1/Z, [x26]\n"
- "ld1b { z22.b }, p1/Z, [x25]\n"
- "decb x20\n"
- "whilelt p0.b, XZR, x20\n"
- "ld1b { z21.b }, p0/Z, [x26, #1, MUL VL]\n"
- "ld1b { z25.b }, p0/Z, [x25, #1, MUL VL]\n"
- "ld1b { z19.b }, p1/Z, [x24]\n"
- "ld1b { z20.b }, p0/Z, [x24, #1, MUL VL]\n"
- "decw x23, ALL, MUL #8\n"
- "zip1 z24.b, z23.b, z19.b\n"
- "ld1b { z18.b }, p1/Z, [x22]\n"
- "ld1b { z16.b }, p0/Z, [x22, #1, MUL VL]\n"
- "zip1 z17.b, z22.b, z18.b\n"
- "zip2 z23.b, z23.b, z19.b\n"
- "zip2 z19.b, z22.b, z18.b\n"
- "zip1 z22.b, z21.b, z20.b\n"
- "cmp x23, #0x0\n"
- "addvl x26, x26, #2\n"
- "zip1 z18.b, z25.b, z16.b\n"
- "zip2 z21.b, z21.b, z20.b\n"
+ "mov x19, x20\n"
+ "decw x20, ALL, MUL #8\n"
+ "whilelt p0.b, XZR, x19\n"
+ "ld1b { z17.b }, p0/Z, [x25]\n"
+ "ld1b { z25.b }, p0/Z, [x23]\n"
+ "decb x19\n"
+ "ld1b { z16.b }, p0/Z, [x22]\n"
+ "zip1 z18.b, z17.b, z16.b\n"
+ "ld1b { z24.b }, p0/Z, [x21]\n"
+ "whilelt p0.b, XZR, x19\n"
+ "zip2 z23.b, z17.b, z16.b\n"
+ "ld1b { z22.b }, p0/Z, [x25, #1, MUL VL]\n"
"addvl x25, x25, #2\n"
- "addvl x24, x24, #2\n"
- "zip2 z20.b, z25.b, z16.b\n"
+ "zip1 z16.b, z25.b, z24.b\n"
+ "ld1b { z21.b }, p0/Z, [x23, #1, MUL VL]\n"
+ "addvl x23, x23, #2\n"
+ "zip1 z17.b, z18.b, z16.b\n"
+ "ld1b { z20.b }, p0/Z, [x22, #1, MUL VL]\n"
"addvl x22, x22, #2\n"
- "zip1 z16.b, z24.b, z17.b\n"
- "st1b { z16.b }, p2, [x21]\n"
- "zip2 z16.b, z24.b, z17.b\n"
- "zip1 z17.b, z23.b, z19.b\n"
- "st1b { z16.b }, p2, [x21, #1, MUL VL]\n"
- "zip2 z16.b, z23.b, z19.b\n"
- "zip1 z19.b, z22.b, z18.b\n"
- "st1b { z17.b }, p2, [x21, #2, MUL VL]\n"
- "zip2 z18.b, z22.b, z18.b\n"
- "zip1 z17.b, z21.b, z20.b\n"
- "st1b { z16.b }, p2, [x21, #3, MUL VL]\n"
- "zip2 z16.b, z21.b, z20.b\n"
- "st1b { z19.b }, p2, [x21, #4, MUL VL]\n"
- "st1b { z18.b }, p2, [x21, #5, MUL VL]\n"
- "st1b { z17.b }, p2, [x21, #6, MUL VL]\n"
- "st1b { z16.b }, p2, [x21, #7, MUL VL]\n"
- "add x21, x21, %x[out_stride]\n"
+ "zip2 z18.b, z18.b, z16.b\n"
+ "ld1b { z19.b }, p0/Z, [x21, #1, MUL VL]\n"
+ "addvl x21, x21, #2\n"
+ "zip2 z16.b, z25.b, z24.b\n"
+ "st1b { z17.b }, p1, [x24]\n"
+ "cmp x20, #0x0\n"
+ "zip1 z17.b, z23.b, z16.b\n"
+ "st1b { z18.b }, p1, [x24, #1, MUL VL]\n"
+ "zip2 z16.b, z23.b, z16.b\n"
+ "st1b { z17.b }, p1, [x24, #2, MUL VL]\n"
+ "zip1 z18.b, z22.b, z20.b\n"
+ "st1b { z16.b }, p1, [x24, #3, MUL VL]\n"
+ "zip1 z17.b, z21.b, z19.b\n"
+ "zip1 z16.b, z18.b, z17.b\n"
+ "st1b { z16.b }, p1, [x24, #4, MUL VL]\n"
+ "zip2 z16.b, z18.b, z17.b\n"
+ "st1b { z16.b }, p1, [x24, #5, MUL VL]\n"
+ "zip2 z18.b, z22.b, z20.b\n"
+ "zip2 z17.b, z21.b, z19.b\n"
+ "zip1 z16.b, z18.b, z17.b\n"
+ "st1b { z16.b }, p1, [x24, #6, MUL VL]\n"
+ "zip2 z16.b, z18.b, z17.b\n"
+ "st1b { z16.b }, p1, [x24, #7, MUL VL]\n"
+ "add x24, x24, %x[out_stride]\n"
"bgt 4b\n"
"5:" // Main row loop: Column loop skip
- "cmp %x[height], #0x1\n"
"addvl %x[out], %x[out], #8\n"
+ "cmp %x[height], #0x1\n"
"bge 1b\n"
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [pad_row] "r" (pad_row), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "p2", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_8VL_1x8.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_8VL_1x8.hpp
index 6b5ca38ab1..34d43f5052 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_8VL_1x8.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_8VL_1x8.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#pragma once
@@ -42,189 +42,189 @@ void sve_transpose_interleave_8VL_1x8(uint8_t *out, const uint8_t *in, size_t wi
__asm__ __volatile__(
"ptrue p1.b\n"
"1:" // Main row loop: Head
- "mov x10, %x[in]\n"
- "add x9, x10, %x[in_stride]\n"
- "add x28, x9, %x[in_stride]\n"
- "add x27, x28, %x[in_stride]\n"
+ "mov x9, %x[in]\n"
+ "mov x28, %x[out]\n"
+ "add x27, x9, %x[in_stride]\n"
"add x26, x27, %x[in_stride]\n"
"add x25, x26, %x[in_stride]\n"
"add x24, x25, %x[in_stride]\n"
"add x23, x24, %x[in_stride]\n"
+ "add x22, x23, %x[in_stride]\n"
+ "add x21, x22, %x[in_stride]\n"
+ "add %x[in], x21, %x[in_stride]\n"
"cmp %x[height], #0x7\n"
- "add %x[in], x23, %x[in_stride]\n"
+ "csel x21, x21, %x[pad_row], GT\n"
+ "csel x22, x22, %x[pad_row], GE\n"
+ "cmp %x[height], #0x5\n"
"csel x23, x23, %x[pad_row], GT\n"
"csel x24, x24, %x[pad_row], GE\n"
- "cmp %x[height], #0x5\n"
- "mov x22, %x[width]\n"
- "cntb x21, ALL, MUL #2\n"
+ "cmp %x[height], #0x3\n"
"csel x25, x25, %x[pad_row], GT\n"
"csel x26, x26, %x[pad_row], GE\n"
- "cmp %x[height], #0x3\n"
- "csel x27, x27, %x[pad_row], GT\n"
- "csel x28, x28, %x[pad_row], GE\n"
"cmp %x[height], #0x1\n"
- "csel x9, x9, %x[pad_row], GT\n"
- "cmp x22, x21\n"
- "mov x20, %x[out]\n"
+ "csel x27, x27, %x[pad_row], GT\n"
"sub %x[height], %x[height], #0x8\n"
+ "mov x20, %x[width]\n"
+ "cntb x19, ALL, MUL #2\n"
+ "cmp x20, x19\n"
"blt 3f\n"
"2:" // Main row loop: Unroll column loop
- "ld1b { z23.b }, p1/Z, [x10]\n"
- "ld1b { z22.b }, p1/Z, [x9]\n"
- "sub x22, x22, x21\n"
- "cmp x22, x21\n"
- "ld1b { z20.b }, p1/Z, [x28]\n"
- "ld1b { z21.b }, p1/Z, [x27]\n"
- "ld1b { z19.b }, p1/Z, [x26]\n"
- "ld1b { z18.b }, p1/Z, [x25]\n"
- "zip1 z5.b, z23.b, z19.b\n"
- "zip1 z4.b, z22.b, z18.b\n"
- "ld1b { z17.b }, p1/Z, [x24]\n"
- "ld1b { z16.b }, p1/Z, [x23]\n"
- "zip1 z3.b, z20.b, z17.b\n"
- "zip1 z31.b, z21.b, z16.b\n"
- "ld1b { z25.b }, p1/Z, [x10, #1, MUL VL]\n"
- "ld1b { z24.b }, p1/Z, [x9, #1, MUL VL]\n"
- "zip2 z2.b, z23.b, z19.b\n"
- "zip2 z30.b, z20.b, z17.b\n"
- "ld1b { z23.b }, p1/Z, [x28, #1, MUL VL]\n"
- "ld1b { z20.b }, p1/Z, [x27, #1, MUL VL]\n"
- "zip2 z22.b, z22.b, z18.b\n"
- "zip2 z21.b, z21.b, z16.b\n"
- "ld1b { z19.b }, p1/Z, [x26, #1, MUL VL]\n"
- "ld1b { z18.b }, p1/Z, [x25, #1, MUL VL]\n"
- "zip1 z29.b, z25.b, z19.b\n"
- "zip1 z28.b, z24.b, z18.b\n"
- "ld1b { z17.b }, p1/Z, [x24, #1, MUL VL]\n"
- "ld1b { z16.b }, p1/Z, [x23, #1, MUL VL]\n"
- "zip1 z27.b, z23.b, z17.b\n"
- "zip1 z26.b, z20.b, z16.b\n"
- "zip2 z1.b, z25.b, z19.b\n"
- "zip2 z25.b, z23.b, z17.b\n"
- "addvl x10, x10, #2\n"
+ "ld1b { z17.b }, p1/Z, [x9]\n"
+ "sub x20, x20, x19\n"
+ "ld1b { z5.b }, p1/Z, [x9, #1, MUL VL]\n"
"addvl x9, x9, #2\n"
- "zip2 z24.b, z24.b, z18.b\n"
- "zip2 z16.b, z20.b, z16.b\n"
- "addvl x28, x28, #2\n"
+ "ld1b { z19.b }, p1/Z, [x27]\n"
+ "cmp x20, x19\n"
+ "ld1b { z4.b }, p1/Z, [x27, #1, MUL VL]\n"
"addvl x27, x27, #2\n"
- "zip1 z0.b, z5.b, z3.b\n"
- "zip1 z17.b, z4.b, z31.b\n"
+ "ld1b { z18.b }, p1/Z, [x26]\n"
+ "ld1b { z3.b }, p1/Z, [x26, #1, MUL VL]\n"
"addvl x26, x26, #2\n"
+ "ld1b { z2.b }, p1/Z, [x25]\n"
+ "ld1b { z1.b }, p1/Z, [x25, #1, MUL VL]\n"
"addvl x25, x25, #2\n"
- "zip2 z20.b, z5.b, z3.b\n"
- "zip2 z19.b, z4.b, z31.b\n"
+ "ld1b { z16.b }, p1/Z, [x24]\n"
+ "zip1 z0.b, z17.b, z16.b\n"
+ "ld1b { z31.b }, p1/Z, [x24, #1, MUL VL]\n"
"addvl x24, x24, #2\n"
+ "zip2 z30.b, z17.b, z16.b\n"
+ "ld1b { z17.b }, p1/Z, [x23]\n"
+ "ld1b { z29.b }, p1/Z, [x23, #1, MUL VL]\n"
+ "zip1 z28.b, z5.b, z31.b\n"
+ "ld1b { z16.b }, p1/Z, [x22]\n"
"addvl x23, x23, #2\n"
- "zip1 z31.b, z2.b, z30.b\n"
+ "zip1 z27.b, z19.b, z17.b\n"
+ "ld1b { z26.b }, p1/Z, [x22, #1, MUL VL]\n"
+ "addvl x22, x22, #2\n"
+ "zip2 z25.b, z19.b, z17.b\n"
+ "ld1b { z24.b }, p1/Z, [x21]\n"
+ "zip1 z22.b, z4.b, z29.b\n"
+ "ld1b { z23.b }, p1/Z, [x21, #1, MUL VL]\n"
+ "addvl x21, x21, #2\n"
+ "zip1 z21.b, z18.b, z16.b\n"
+ "zip2 z20.b, z18.b, z16.b\n"
+ "zip1 z18.b, z0.b, z21.b\n"
+ "zip1 z19.b, z2.b, z24.b\n"
+ "zip1 z17.b, z27.b, z19.b\n"
+ "zip1 z16.b, z18.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28]\n"
+ "zip2 z16.b, z18.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28, #1, MUL VL]\n"
+ "zip2 z18.b, z0.b, z21.b\n"
+ "zip2 z17.b, z27.b, z19.b\n"
+ "zip1 z16.b, z18.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28, #2, MUL VL]\n"
+ "zip2 z16.b, z18.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28, #3, MUL VL]\n"
+ "zip1 z18.b, z30.b, z20.b\n"
+ "zip2 z19.b, z2.b, z24.b\n"
+ "zip1 z17.b, z25.b, z19.b\n"
+ "zip1 z16.b, z18.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28, #4, MUL VL]\n"
+ "zip2 z16.b, z18.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28, #5, MUL VL]\n"
+ "zip2 z18.b, z30.b, z20.b\n"
+ "zip2 z17.b, z25.b, z19.b\n"
+ "zip1 z16.b, z18.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28, #6, MUL VL]\n"
+ "zip2 z16.b, z18.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28, #7, MUL VL]\n"
+ "add x28, x28, %x[out_stride]\n"
+ "zip1 z20.b, z3.b, z26.b\n"
+ "zip1 z19.b, z1.b, z23.b\n"
+ "zip1 z18.b, z28.b, z20.b\n"
+ "zip1 z17.b, z22.b, z19.b\n"
+ "zip1 z16.b, z18.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28]\n"
+ "zip2 z16.b, z18.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28, #1, MUL VL]\n"
+ "zip2 z18.b, z28.b, z20.b\n"
+ "zip2 z17.b, z22.b, z19.b\n"
+ "zip1 z16.b, z18.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28, #2, MUL VL]\n"
+ "zip2 z16.b, z18.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28, #3, MUL VL]\n"
+ "zip2 z22.b, z5.b, z31.b\n"
+ "zip2 z21.b, z3.b, z26.b\n"
"zip1 z18.b, z22.b, z21.b\n"
- "zip2 z30.b, z2.b, z30.b\n"
- "zip2 z23.b, z22.b, z21.b\n"
- "zip1 z22.b, z29.b, z27.b\n"
- "zip1 z21.b, z28.b, z26.b\n"
- "zip2 z29.b, z29.b, z27.b\n"
- "zip2 z28.b, z28.b, z26.b\n"
- "zip1 z27.b, z1.b, z25.b\n"
- "zip1 z26.b, z24.b, z16.b\n"
- "zip2 z25.b, z1.b, z25.b\n"
- "zip2 z24.b, z24.b, z16.b\n"
- "zip1 z16.b, z0.b, z17.b\n"
- "zip2 z17.b, z0.b, z17.b\n"
- "st1b { z16.b }, p1, [x20]\n"
- "zip1 z16.b, z20.b, z19.b\n"
- "zip2 z20.b, z20.b, z19.b\n"
- "st1b { z17.b }, p1, [x20, #1, MUL VL]\n"
- "zip1 z19.b, z31.b, z18.b\n"
- "zip2 z18.b, z31.b, z18.b\n"
- "st1b { z16.b }, p1, [x20, #2, MUL VL]\n"
- "zip1 z17.b, z30.b, z23.b\n"
- "zip2 z16.b, z30.b, z23.b\n"
- "st1b { z20.b }, p1, [x20, #3, MUL VL]\n"
- "st1b { z19.b }, p1, [x20, #4, MUL VL]\n"
- "zip1 z23.b, z22.b, z21.b\n"
- "zip2 z22.b, z22.b, z21.b\n"
- "st1b { z18.b }, p1, [x20, #5, MUL VL]\n"
- "zip1 z21.b, z29.b, z28.b\n"
- "zip2 z20.b, z29.b, z28.b\n"
- "st1b { z17.b }, p1, [x20, #6, MUL VL]\n"
- "zip1 z19.b, z27.b, z26.b\n"
- "zip2 z18.b, z27.b, z26.b\n"
- "st1b { z16.b }, p1, [x20, #7, MUL VL]\n"
- "add x20, x20, %x[out_stride]\n"
- "zip1 z17.b, z25.b, z24.b\n"
- "zip2 z16.b, z25.b, z24.b\n"
- "st1b { z23.b }, p1, [x20]\n"
- "st1b { z22.b }, p1, [x20, #1, MUL VL]\n"
- "st1b { z21.b }, p1, [x20, #2, MUL VL]\n"
- "st1b { z20.b }, p1, [x20, #3, MUL VL]\n"
- "st1b { z19.b }, p1, [x20, #4, MUL VL]\n"
- "st1b { z18.b }, p1, [x20, #5, MUL VL]\n"
- "st1b { z17.b }, p1, [x20, #6, MUL VL]\n"
- "st1b { z16.b }, p1, [x20, #7, MUL VL]\n"
- "add x20, x20, %x[out_stride]\n"
+ "zip2 z20.b, z4.b, z29.b\n"
+ "zip2 z19.b, z1.b, z23.b\n"
+ "zip1 z17.b, z20.b, z19.b\n"
+ "zip1 z16.b, z18.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28, #4, MUL VL]\n"
+ "zip2 z16.b, z18.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28, #5, MUL VL]\n"
+ "zip2 z18.b, z22.b, z21.b\n"
+ "zip2 z17.b, z20.b, z19.b\n"
+ "zip1 z16.b, z18.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28, #6, MUL VL]\n"
+ "zip2 z16.b, z18.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28, #7, MUL VL]\n"
+ "add x28, x28, %x[out_stride]\n"
"bge 2b\n"
"3:" // Main row loop: Unroll column loop skip
- "cbz x22, 5f\n"
+ "cbz x20, 5f\n"
"4:" // Main row loop: Column loop
- "whilelt p0.b, XZR, x22\n"
- "ld1b { z25.b }, p0/Z, [x10]\n"
- "ld1b { z27.b }, p0/Z, [x9]\n"
- "decd x22, ALL, MUL #8\n"
- "ld1b { z26.b }, p0/Z, [x28]\n"
- "ld1b { z24.b }, p0/Z, [x27]\n"
- "cmp x22, #0x0\n"
- "addvl x10, x10, #1\n"
- "ld1b { z22.b }, p0/Z, [x26]\n"
- "ld1b { z21.b }, p0/Z, [x25]\n"
- "zip1 z20.b, z25.b, z22.b\n"
- "zip1 z23.b, z27.b, z21.b\n"
- "ld1b { z17.b }, p0/Z, [x24]\n"
- "ld1b { z16.b }, p0/Z, [x23]\n"
- "zip1 z19.b, z26.b, z17.b\n"
- "zip1 z18.b, z24.b, z16.b\n"
- "zip2 z25.b, z25.b, z22.b\n"
- "zip2 z22.b, z26.b, z17.b\n"
+ "whilelt p0.b, XZR, x20\n"
+ "ld1b { z18.b }, p0/Z, [x9]\n"
"addvl x9, x9, #1\n"
- "addvl x28, x28, #1\n"
- "zip2 z21.b, z27.b, z21.b\n"
- "zip2 z16.b, z24.b, z16.b\n"
+ "ld1b { z28.b }, p0/Z, [x27]\n"
"addvl x27, x27, #1\n"
+ "ld1b { z17.b }, p0/Z, [x26]\n"
"addvl x26, x26, #1\n"
- "zip1 z24.b, z20.b, z19.b\n"
- "zip1 z17.b, z23.b, z18.b\n"
+ "ld1b { z27.b }, p0/Z, [x25]\n"
"addvl x25, x25, #1\n"
+ "ld1b { z16.b }, p0/Z, [x24]\n"
+ "zip1 z26.b, z18.b, z16.b\n"
+ "ld1b { z25.b }, p0/Z, [x23]\n"
"addvl x24, x24, #1\n"
- "zip2 z20.b, z20.b, z19.b\n"
- "zip2 z19.b, z23.b, z18.b\n"
+ "zip2 z24.b, z18.b, z16.b\n"
+ "ld1b { z16.b }, p0/Z, [x22]\n"
"addvl x23, x23, #1\n"
- "zip1 z23.b, z25.b, z22.b\n"
- "zip1 z18.b, z21.b, z16.b\n"
- "zip2 z22.b, z25.b, z22.b\n"
- "zip2 z21.b, z21.b, z16.b\n"
- "zip1 z16.b, z24.b, z17.b\n"
- "zip2 z17.b, z24.b, z17.b\n"
- "st1b { z16.b }, p1, [x20]\n"
- "zip1 z16.b, z20.b, z19.b\n"
- "zip2 z20.b, z20.b, z19.b\n"
- "st1b { z17.b }, p1, [x20, #1, MUL VL]\n"
- "zip1 z19.b, z23.b, z18.b\n"
- "zip2 z18.b, z23.b, z18.b\n"
- "st1b { z16.b }, p1, [x20, #2, MUL VL]\n"
- "zip1 z17.b, z22.b, z21.b\n"
- "zip2 z16.b, z22.b, z21.b\n"
- "st1b { z20.b }, p1, [x20, #3, MUL VL]\n"
- "st1b { z19.b }, p1, [x20, #4, MUL VL]\n"
- "st1b { z18.b }, p1, [x20, #5, MUL VL]\n"
- "st1b { z17.b }, p1, [x20, #6, MUL VL]\n"
- "st1b { z16.b }, p1, [x20, #7, MUL VL]\n"
- "add x20, x20, %x[out_stride]\n"
+ "zip1 z23.b, z28.b, z25.b\n"
+ "ld1b { z22.b }, p0/Z, [x21]\n"
+ "addvl x22, x22, #1\n"
+ "zip1 z20.b, z17.b, z16.b\n"
+ "addvl x21, x21, #1\n"
+ "zip2 z21.b, z17.b, z16.b\n"
+ "decd x20, ALL, MUL #8\n"
+ "zip1 z18.b, z26.b, z20.b\n"
+ "cmp x20, #0x0\n"
+ "zip1 z19.b, z27.b, z22.b\n"
+ "zip1 z17.b, z23.b, z19.b\n"
+ "zip1 z16.b, z18.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28]\n"
+ "zip2 z16.b, z18.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28, #1, MUL VL]\n"
+ "zip2 z18.b, z26.b, z20.b\n"
+ "zip2 z17.b, z23.b, z19.b\n"
+ "zip1 z16.b, z18.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28, #2, MUL VL]\n"
+ "zip2 z16.b, z18.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28, #3, MUL VL]\n"
+ "zip1 z18.b, z24.b, z21.b\n"
+ "zip2 z20.b, z28.b, z25.b\n"
+ "zip2 z19.b, z27.b, z22.b\n"
+ "zip1 z17.b, z20.b, z19.b\n"
+ "zip1 z16.b, z18.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28, #4, MUL VL]\n"
+ "zip2 z16.b, z18.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28, #5, MUL VL]\n"
+ "zip2 z18.b, z24.b, z21.b\n"
+ "zip2 z17.b, z20.b, z19.b\n"
+ "zip1 z16.b, z18.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28, #6, MUL VL]\n"
+ "zip2 z16.b, z18.b, z17.b\n"
+ "st1b { z16.b }, p1, [x28, #7, MUL VL]\n"
+ "add x28, x28, %x[out_stride]\n"
"bgt 4b\n"
"5:" // Main row loop: Column loop skip
- "cmp %x[height], #0x1\n"
"addvl %x[out], %x[out], #8\n"
+ "cmp %x[height], #0x1\n"
"bge 1b\n"
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [pad_row] "r" (pad_row), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "x9", "x10", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "x9", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_8VL_2x2.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_8VL_2x2.hpp
index 237e9b684f..7124f7e909 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_8VL_2x2.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_8VL_2x2.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#pragma once
@@ -40,324 +40,325 @@ void sve_transpose_interleave_8VL_2x2(uint16_t *out, const uint16_t *in, size_t
size_t out_stride = 8 * roundup<size_t>(height, 2) * get_vector_length<uint16_t>();
__asm__ __volatile__(
+ "ptrue p3.b\n"
"cmp %x[height], #0x4\n"
- "ptrue p4.b\n"
"blt 6f\n"
"1:" // Main row loop: Head
- "mov x28, %x[in]\n"
- "mov x27, %x[width]\n"
- "cnth x26, ALL, MUL #8\n"
- "add x25, x28, %x[in_stride]\n"
+ "mov x27, %x[in]\n"
+ "mov x26, %x[out]\n"
+ "add x25, x27, %x[in_stride]\n"
"add x24, x25, %x[in_stride]\n"
"add x23, x24, %x[in_stride]\n"
- "cmp x27, x26\n"
"add %x[in], x23, %x[in_stride]\n"
- "mov x22, %x[out]\n"
"sub %x[height], %x[height], #0x4\n"
+ "mov x22, %x[width]\n"
+ "cnth x21, ALL, MUL #8\n"
+ "cmp x22, x21\n"
"blt 3f\n"
"2:" // Main row loop: Unroll column loop
- "ld1h { z30.h }, p4/Z, [x28]\n"
- "ld1h { z12.h }, p4/Z, [x28, #1, MUL VL]\n"
- "mov x21, x22\n"
- "add x22, x22, %x[out_stride]\n"
- "ld1h { z31.h }, p4/Z, [x28, #2, MUL VL]\n"
- "ld1h { z18.h }, p4/Z, [x28, #3, MUL VL]\n"
- "mov x20, x22\n"
- "sub x27, x27, x26\n"
- "ld1h { z20.h }, p4/Z, [x25]\n"
- "ld1h { z17.h }, p4/Z, [x25, #1, MUL VL]\n"
- "zip1 z3.h, z30.h, z20.h\n"
- "zip2 z21.h, z30.h, z20.h\n"
- "ld1h { z26.h }, p4/Z, [x25, #2, MUL VL]\n"
- "ld1h { z23.h }, p4/Z, [x25, #3, MUL VL]\n"
- "zip1 z13.h, z12.h, z17.h\n"
- "zip2 z0.h, z12.h, z17.h\n"
- "ld1h { z2.h }, p4/Z, [x28, #4, MUL VL]\n"
- "ld1h { z24.h }, p4/Z, [x28, #5, MUL VL]\n"
- "zip1 z12.h, z31.h, z26.h\n"
- "zip2 z14.h, z31.h, z26.h\n"
- "ld1h { z17.h }, p4/Z, [x28, #6, MUL VL]\n"
- "ld1h { z29.h }, p4/Z, [x28, #7, MUL VL]\n"
- "zip1 z16.h, z18.h, z23.h\n"
- "zip2 z15.h, z18.h, z23.h\n"
- "ld1h { z9.h }, p4/Z, [x25, #4, MUL VL]\n"
- "ld1h { z18.h }, p4/Z, [x25, #5, MUL VL]\n"
- "zip1 z11.h, z2.h, z9.h\n"
- "zip2 z5.h, z2.h, z9.h\n"
- "ld1h { z7.h }, p4/Z, [x25, #6, MUL VL]\n"
- "ld1h { z2.h }, p4/Z, [x25, #7, MUL VL]\n"
- "zip1 z10.h, z24.h, z18.h\n"
- "zip2 z6.h, z24.h, z18.h\n"
- "ld1h { z19.h }, p4/Z, [x24]\n"
- "ld1h { z18.h }, p4/Z, [x24, #1, MUL VL]\n"
- "zip1 z9.h, z17.h, z7.h\n"
- "zip2 z4.h, z17.h, z7.h\n"
- "ld1h { z24.h }, p4/Z, [x24, #2, MUL VL]\n"
- "ld1h { z22.h }, p4/Z, [x24, #3, MUL VL]\n"
- "zip1 z7.h, z29.h, z2.h\n"
- "zip2 z8.h, z29.h, z2.h\n"
- "ld1h { z25.h }, p4/Z, [x24, #4, MUL VL]\n"
- "ld1h { z17.h }, p4/Z, [x24, #5, MUL VL]\n"
- "cmp x27, x26\n"
- "addvl x28, x28, #8\n"
- "ld1h { z2.h }, p4/Z, [x24, #6, MUL VL]\n"
- "ld1h { z30.h }, p4/Z, [x24, #7, MUL VL]\n"
+ "ld1h { z8.h }, p3/Z, [x27]\n"
+ "mov x20, x26\n"
+ "ld1h { z3.h }, p3/Z, [x27, #1, MUL VL]\n"
+ "add x26, x26, %x[out_stride]\n"
+ "ld1h { z22.h }, p3/Z, [x27, #2, MUL VL]\n"
+ "mov x19, x26\n"
+ "ld1h { z12.h }, p3/Z, [x27, #3, MUL VL]\n"
+ "add x26, x26, %x[out_stride]\n"
+ "ld1h { z4.h }, p3/Z, [x27, #4, MUL VL]\n"
+ "sub x22, x22, x21\n"
+ "ld1h { z25.h }, p3/Z, [x27, #5, MUL VL]\n"
+ "cmp x22, x21\n"
+ "ld1h { z15.h }, p3/Z, [x27, #6, MUL VL]\n"
+ "ld1h { z2.h }, p3/Z, [x27, #7, MUL VL]\n"
+ "addvl x27, x27, #8\n"
+ "ld1h { z16.h }, p3/Z, [x25]\n"
+ "zip1 z21.h, z8.h, z16.h\n"
+ "ld1h { z27.h }, p3/Z, [x25, #1, MUL VL]\n"
+ "zip2 z7.h, z8.h, z16.h\n"
+ "ld1h { z18.h }, p3/Z, [x25, #2, MUL VL]\n"
+ "ld1h { z30.h }, p3/Z, [x25, #3, MUL VL]\n"
+ "zip1 z19.h, z3.h, z27.h\n"
+ "ld1h { z0.h }, p3/Z, [x25, #4, MUL VL]\n"
+ "zip2 z16.h, z3.h, z27.h\n"
+ "ld1h { z27.h }, p3/Z, [x25, #5, MUL VL]\n"
+ "zip1 z13.h, z22.h, z18.h\n"
+ "ld1h { z26.h }, p3/Z, [x25, #6, MUL VL]\n"
+ "zip2 z29.h, z22.h, z18.h\n"
+ "ld1h { z24.h }, p3/Z, [x25, #7, MUL VL]\n"
"addvl x25, x25, #8\n"
+ "zip1 z20.h, z12.h, z30.h\n"
+ "ld1h { z9.h }, p3/Z, [x24]\n"
+ "zip2 z14.h, z12.h, z30.h\n"
+ "ld1h { z12.h }, p3/Z, [x24, #1, MUL VL]\n"
+ "zip1 z5.h, z4.h, z0.h\n"
+ "ld1h { z31.h }, p3/Z, [x24, #2, MUL VL]\n"
+ "zip2 z1.h, z4.h, z0.h\n"
+ "ld1h { z22.h }, p3/Z, [x24, #3, MUL VL]\n"
+ "zip1 z10.h, z25.h, z27.h\n"
+ "ld1h { z3.h }, p3/Z, [x24, #4, MUL VL]\n"
+ "zip2 z6.h, z25.h, z27.h\n"
+ "ld1h { z4.h }, p3/Z, [x24, #5, MUL VL]\n"
+ "zip1 z8.h, z15.h, z26.h\n"
+ "ld1h { z25.h }, p3/Z, [x24, #6, MUL VL]\n"
+ "zip2 z11.h, z15.h, z26.h\n"
+ "ld1h { z30.h }, p3/Z, [x24, #7, MUL VL]\n"
"addvl x24, x24, #8\n"
- "ld1h { z20.h }, p4/Z, [x23]\n"
- "ld1h { z27.h }, p4/Z, [x23, #1, MUL VL]\n"
- "zip1 z31.h, z19.h, z20.h\n"
- "zip2 z29.h, z19.h, z20.h\n"
- "ld1h { z26.h }, p4/Z, [x23, #2, MUL VL]\n"
- "ld1h { z23.h }, p4/Z, [x23, #3, MUL VL]\n"
- "zip1 z28.h, z18.h, z27.h\n"
- "zip2 z1.h, z18.h, z27.h\n"
- "ld1h { z20.h }, p4/Z, [x23, #4, MUL VL]\n"
- "ld1h { z19.h }, p4/Z, [x23, #5, MUL VL]\n"
- "zip1 z27.h, z24.h, z26.h\n"
- "zip2 z26.h, z24.h, z26.h\n"
- "ld1h { z18.h }, p4/Z, [x23, #6, MUL VL]\n"
- "ld1h { z24.h }, p4/Z, [x23, #7, MUL VL]\n"
- "st1h { z3.h }, p4, [x21]\n"
- "zip1 z3.h, z22.h, z23.h\n"
- "st1h { z21.h }, p4, [x21, #1, MUL VL]\n"
- "zip2 z22.h, z22.h, z23.h\n"
+ "zip1 z17.h, z2.h, z24.h\n"
+ "ld1h { z23.h }, p3/Z, [x23]\n"
+ "zip2 z0.h, z2.h, z24.h\n"
+ "ld1h { z28.h }, p3/Z, [x23, #1, MUL VL]\n"
+ "ld1h { z15.h }, p3/Z, [x23, #2, MUL VL]\n"
+ "zip1 z18.h, z9.h, z23.h\n"
+ "ld1h { z26.h }, p3/Z, [x23, #3, MUL VL]\n"
+ "zip2 z27.h, z9.h, z23.h\n"
+ "ld1h { z2.h }, p3/Z, [x23, #4, MUL VL]\n"
+ "zip1 z9.h, z12.h, z28.h\n"
+ "ld1h { z24.h }, p3/Z, [x23, #5, MUL VL]\n"
+ "zip2 z12.h, z12.h, z28.h\n"
+ "ld1h { z23.h }, p3/Z, [x23, #6, MUL VL]\n"
+ "zip1 z28.h, z31.h, z15.h\n"
+ "zip2 z31.h, z31.h, z15.h\n"
+ "ld1h { z15.h }, p3/Z, [x23, #7, MUL VL]\n"
"addvl x23, x23, #8\n"
- "zip1 z23.h, z25.h, z20.h\n"
- "st1h { z13.h }, p4, [x21, #2, MUL VL]\n"
- "zip2 z25.h, z25.h, z20.h\n"
- "zip1 z21.h, z17.h, z19.h\n"
- "add x22, x22, %x[out_stride]\n"
- "st1h { z0.h }, p4, [x21, #3, MUL VL]\n"
- "zip2 z20.h, z17.h, z19.h\n"
- "zip1 z19.h, z2.h, z18.h\n"
- "st1h { z12.h }, p4, [x21, #4, MUL VL]\n"
- "zip2 z18.h, z2.h, z18.h\n"
- "zip1 z17.h, z30.h, z24.h\n"
- "st1h { z14.h }, p4, [x21, #5, MUL VL]\n"
- "zip2 z13.h, z30.h, z24.h\n"
- "st1h { z16.h }, p4, [x21, #6, MUL VL]\n"
- "st1h { z15.h }, p4, [x21, #7, MUL VL]\n"
- "addvl x21, x21, #16\n"
- "st1h { z31.h }, p4, [x21, #-8, MUL VL]\n"
- "st1h { z29.h }, p4, [x21, #-7, MUL VL]\n"
- "st1h { z28.h }, p4, [x21, #-6, MUL VL]\n"
- "st1h { z1.h }, p4, [x21, #-5, MUL VL]\n"
- "st1h { z27.h }, p4, [x21, #-4, MUL VL]\n"
- "st1h { z26.h }, p4, [x21, #-3, MUL VL]\n"
- "st1h { z3.h }, p4, [x21, #-2, MUL VL]\n"
- "st1h { z22.h }, p4, [x21, #-1, MUL VL]\n"
- "st1h { z11.h }, p4, [x20]\n"
- "st1h { z5.h }, p4, [x20, #1, MUL VL]\n"
- "st1h { z10.h }, p4, [x20, #2, MUL VL]\n"
- "st1h { z6.h }, p4, [x20, #3, MUL VL]\n"
- "st1h { z9.h }, p4, [x20, #4, MUL VL]\n"
- "st1h { z4.h }, p4, [x20, #5, MUL VL]\n"
- "st1h { z7.h }, p4, [x20, #6, MUL VL]\n"
- "st1h { z8.h }, p4, [x20, #7, MUL VL]\n"
+ "st1h { z21.h }, p3, [x20]\n"
+ "zip1 z21.h, z22.h, z26.h\n"
+ "zip2 z26.h, z22.h, z26.h\n"
+ "st1h { z7.h }, p3, [x20, #1, MUL VL]\n"
+ "zip1 z7.h, z3.h, z2.h\n"
+ "st1h { z19.h }, p3, [x20, #2, MUL VL]\n"
+ "zip2 z22.h, z3.h, z2.h\n"
+ "st1h { z16.h }, p3, [x20, #3, MUL VL]\n"
+ "zip1 z2.h, z4.h, z24.h\n"
+ "st1h { z13.h }, p3, [x20, #4, MUL VL]\n"
+ "zip2 z3.h, z4.h, z24.h\n"
+ "st1h { z29.h }, p3, [x20, #5, MUL VL]\n"
+ "zip1 z4.h, z25.h, z23.h\n"
+ "st1h { z20.h }, p3, [x20, #6, MUL VL]\n"
+ "zip2 z20.h, z25.h, z23.h\n"
+ "st1h { z14.h }, p3, [x20, #7, MUL VL]\n"
"addvl x20, x20, #16\n"
- "st1h { z23.h }, p4, [x20, #-8, MUL VL]\n"
- "st1h { z25.h }, p4, [x20, #-7, MUL VL]\n"
- "st1h { z21.h }, p4, [x20, #-6, MUL VL]\n"
- "st1h { z20.h }, p4, [x20, #-5, MUL VL]\n"
- "st1h { z19.h }, p4, [x20, #-4, MUL VL]\n"
- "st1h { z18.h }, p4, [x20, #-3, MUL VL]\n"
- "st1h { z17.h }, p4, [x20, #-2, MUL VL]\n"
- "st1h { z13.h }, p4, [x20, #-1, MUL VL]\n"
+ "zip1 z25.h, z30.h, z15.h\n"
+ "st1h { z18.h }, p3, [x20, #-8, MUL VL]\n"
+ "zip2 z18.h, z30.h, z15.h\n"
+ "st1h { z27.h }, p3, [x20, #-7, MUL VL]\n"
+ "st1h { z9.h }, p3, [x20, #-6, MUL VL]\n"
+ "st1h { z12.h }, p3, [x20, #-5, MUL VL]\n"
+ "st1h { z28.h }, p3, [x20, #-4, MUL VL]\n"
+ "st1h { z31.h }, p3, [x20, #-3, MUL VL]\n"
+ "st1h { z21.h }, p3, [x20, #-2, MUL VL]\n"
+ "st1h { z26.h }, p3, [x20, #-1, MUL VL]\n"
+ "st1h { z5.h }, p3, [x19]\n"
+ "st1h { z1.h }, p3, [x19, #1, MUL VL]\n"
+ "st1h { z10.h }, p3, [x19, #2, MUL VL]\n"
+ "st1h { z6.h }, p3, [x19, #3, MUL VL]\n"
+ "st1h { z8.h }, p3, [x19, #4, MUL VL]\n"
+ "st1h { z11.h }, p3, [x19, #5, MUL VL]\n"
+ "st1h { z17.h }, p3, [x19, #6, MUL VL]\n"
+ "st1h { z0.h }, p3, [x19, #7, MUL VL]\n"
+ "addvl x19, x19, #16\n"
+ "st1h { z7.h }, p3, [x19, #-8, MUL VL]\n"
+ "st1h { z22.h }, p3, [x19, #-7, MUL VL]\n"
+ "st1h { z2.h }, p3, [x19, #-6, MUL VL]\n"
+ "st1h { z3.h }, p3, [x19, #-5, MUL VL]\n"
+ "st1h { z4.h }, p3, [x19, #-4, MUL VL]\n"
+ "st1h { z20.h }, p3, [x19, #-3, MUL VL]\n"
+ "st1h { z25.h }, p3, [x19, #-2, MUL VL]\n"
+ "st1h { z18.h }, p3, [x19, #-1, MUL VL]\n"
"bge 2b\n"
"3:" // Main row loop: Unroll column loop skip
- "cbz x27, 5f\n"
+ "cbz x22, 5f\n"
"4:" // Main row loop: Column loop
- "mov x20, x27\n"
- "whilelt p3.h, XZR, x20\n"
- "ld1h { z20.h }, p3/Z, [x28]\n"
- "ld1h { z19.h }, p3/Z, [x25]\n"
+ "mov x20, x22\n"
+ "mov x19, x26\n"
+ "whilelt p0.h, XZR, x20\n"
+ "ld1h { z17.h }, p0/Z, [x27]\n"
+ "ld1h { z16.h }, p0/Z, [x25]\n"
+ "zip1 z0.h, z17.h, z16.h\n"
+ "ld1h { z18.h }, p0/Z, [x24]\n"
"dech x20\n"
+ "zip2 z31.h, z17.h, z16.h\n"
+ "ld1h { z16.h }, p0/Z, [x23]\n"
"whilelt p2.h, XZR, x20\n"
- "ld1h { z18.h }, p2/Z, [x28, #1, MUL VL]\n"
- "ld1h { z17.h }, p2/Z, [x25, #1, MUL VL]\n"
+ "zip1 z30.h, z18.h, z16.h\n"
+ "ld1h { z17.h }, p2/Z, [x27, #1, MUL VL]\n"
"dech x20\n"
+ "zip2 z29.h, z18.h, z16.h\n"
+ "ld1h { z16.h }, p2/Z, [x25, #1, MUL VL]\n"
"whilelt p1.h, XZR, x20\n"
- "ld1h { z25.h }, p1/Z, [x28, #2, MUL VL]\n"
- "ld1h { z16.h }, p1/Z, [x25, #2, MUL VL]\n"
+ "zip1 z28.h, z17.h, z16.h\n"
+ "ld1h { z18.h }, p1/Z, [x27, #2, MUL VL]\n"
"dech x20\n"
+ "zip2 z27.h, z17.h, z16.h\n"
+ "ld1h { z16.h }, p1/Z, [x25, #2, MUL VL]\n"
"whilelt p0.h, XZR, x20\n"
- "ld1h { z0.h }, p0/Z, [x28, #3, MUL VL]\n"
- "ld1h { z24.h }, p0/Z, [x25, #3, MUL VL]\n"
- "mov x20, x22\n"
- "decw x27, ALL, MUL #8\n"
- "ld1h { z31.h }, p3/Z, [x24]\n"
- "ld1h { z30.h }, p2/Z, [x24, #1, MUL VL]\n"
- "ld1h { z29.h }, p1/Z, [x24, #2, MUL VL]\n"
- "ld1h { z28.h }, p0/Z, [x24, #3, MUL VL]\n"
- "zip1 z23.h, z20.h, z19.h\n"
- "zip2 z22.h, z20.h, z19.h\n"
- "ld1h { z21.h }, p3/Z, [x23]\n"
- "ld1h { z27.h }, p2/Z, [x23, #1, MUL VL]\n"
- "zip1 z20.h, z18.h, z17.h\n"
- "zip2 z19.h, z18.h, z17.h\n"
- "ld1h { z18.h }, p1/Z, [x23, #2, MUL VL]\n"
- "ld1h { z26.h }, p0/Z, [x23, #3, MUL VL]\n"
- "zip1 z17.h, z25.h, z16.h\n"
- "zip2 z16.h, z25.h, z16.h\n"
- "zip1 z25.h, z0.h, z24.h\n"
- "zip2 z24.h, z0.h, z24.h\n"
- "st1h { z23.h }, p4, [x20]\n"
- "cmp x27, #0x0\n"
- "st1h { z22.h }, p4, [x20, #1, MUL VL]\n"
- "addvl x28, x28, #4\n"
+ "zip1 z26.h, z18.h, z16.h\n"
+ "ld1h { z17.h }, p0/Z, [x27, #3, MUL VL]\n"
+ "addvl x27, x27, #4\n"
+ "zip2 z25.h, z18.h, z16.h\n"
+ "ld1h { z16.h }, p0/Z, [x25, #3, MUL VL]\n"
"addvl x25, x25, #4\n"
- "zip1 z23.h, z31.h, z21.h\n"
- "st1h { z20.h }, p4, [x20, #2, MUL VL]\n"
+ "zip1 z24.h, z17.h, z16.h\n"
+ "ld1h { z19.h }, p2/Z, [x24, #1, MUL VL]\n"
+ "add x26, x26, %x[out_stride]\n"
+ "zip2 z23.h, z17.h, z16.h\n"
+ "ld1h { z18.h }, p1/Z, [x24, #2, MUL VL]\n"
+ "decw x22, ALL, MUL #8\n"
+ "ld1h { z22.h }, p0/Z, [x24, #3, MUL VL]\n"
"addvl x24, x24, #4\n"
+ "ld1h { z16.h }, p2/Z, [x23, #1, MUL VL]\n"
+ "zip1 z21.h, z19.h, z16.h\n"
+ "ld1h { z17.h }, p1/Z, [x23, #2, MUL VL]\n"
+ "cmp x22, #0x0\n"
+ "zip2 z20.h, z19.h, z16.h\n"
+ "ld1h { z16.h }, p0/Z, [x23, #3, MUL VL]\n"
"addvl x23, x23, #4\n"
- "zip2 z22.h, z31.h, z21.h\n"
- "st1h { z19.h }, p4, [x20, #3, MUL VL]\n"
- "zip1 z21.h, z30.h, z27.h\n"
- "zip2 z20.h, z30.h, z27.h\n"
- "add x22, x22, %x[out_stride]\n"
- "st1h { z17.h }, p4, [x20, #4, MUL VL]\n"
- "zip1 z19.h, z29.h, z18.h\n"
- "zip2 z18.h, z29.h, z18.h\n"
- "st1h { z16.h }, p4, [x20, #5, MUL VL]\n"
- "zip1 z17.h, z28.h, z26.h\n"
- "zip2 z16.h, z28.h, z26.h\n"
- "st1h { z25.h }, p4, [x20, #6, MUL VL]\n"
- "st1h { z24.h }, p4, [x20, #7, MUL VL]\n"
- "addvl x20, x20, #16\n"
- "st1h { z23.h }, p4, [x20, #-8, MUL VL]\n"
- "st1h { z22.h }, p4, [x20, #-7, MUL VL]\n"
- "st1h { z21.h }, p4, [x20, #-6, MUL VL]\n"
- "st1h { z20.h }, p4, [x20, #-5, MUL VL]\n"
- "st1h { z19.h }, p4, [x20, #-4, MUL VL]\n"
- "st1h { z18.h }, p4, [x20, #-3, MUL VL]\n"
- "st1h { z17.h }, p4, [x20, #-2, MUL VL]\n"
- "st1h { z16.h }, p4, [x20, #-1, MUL VL]\n"
+ "zip1 z19.h, z18.h, z17.h\n"
+ "st1h { z0.h }, p3, [x19]\n"
+ "zip2 z18.h, z18.h, z17.h\n"
+ "st1h { z31.h }, p3, [x19, #1, MUL VL]\n"
+ "zip1 z17.h, z22.h, z16.h\n"
+ "st1h { z28.h }, p3, [x19, #2, MUL VL]\n"
+ "zip2 z16.h, z22.h, z16.h\n"
+ "st1h { z27.h }, p3, [x19, #3, MUL VL]\n"
+ "st1h { z26.h }, p3, [x19, #4, MUL VL]\n"
+ "st1h { z25.h }, p3, [x19, #5, MUL VL]\n"
+ "st1h { z24.h }, p3, [x19, #6, MUL VL]\n"
+ "st1h { z23.h }, p3, [x19, #7, MUL VL]\n"
+ "addvl x19, x19, #16\n"
+ "st1h { z30.h }, p3, [x19, #-8, MUL VL]\n"
+ "st1h { z29.h }, p3, [x19, #-7, MUL VL]\n"
+ "st1h { z21.h }, p3, [x19, #-6, MUL VL]\n"
+ "st1h { z20.h }, p3, [x19, #-5, MUL VL]\n"
+ "st1h { z19.h }, p3, [x19, #-4, MUL VL]\n"
+ "st1h { z18.h }, p3, [x19, #-3, MUL VL]\n"
+ "st1h { z17.h }, p3, [x19, #-2, MUL VL]\n"
+ "st1h { z16.h }, p3, [x19, #-1, MUL VL]\n"
"bgt 4b\n"
"5:" // Main row loop: Column loop skip
- "cmp %x[height], #0x4\n"
"addvl %x[out], %x[out], #16\n"
+ "cmp %x[height], #0x4\n"
"bge 1b\n"
"cbz %x[height], 12f\n"
"6:" // Main loop skip
+
"7:" // Tail row loop: Head
- "mov x28, %x[in]\n"
- "mov x21, %x[width]\n"
- "cnth x20, ALL, MUL #8\n"
- "add x25, x28, %x[in_stride]\n"
- "cmp %x[height], #0x1\n"
+ "mov x27, %x[in]\n"
+ "mov x26, %x[out]\n"
+ "add x25, x27, %x[in_stride]\n"
"add %x[in], x25, %x[in_stride]\n"
+ "cmp %x[height], #0x1\n"
"csel x25, x25, %x[pad_row], GT\n"
- "cmp x21, x20\n"
- "mov x22, %x[out]\n"
"sub %x[height], %x[height], #0x2\n"
+ "mov x20, %x[width]\n"
+ "cnth x19, ALL, MUL #8\n"
+ "cmp x20, x19\n"
"blt 9f\n"
"8:" // Tail row loop: Unroll column loop
- "ld1h { z17.h }, p4/Z, [x28]\n"
- "ld1h { z20.h }, p4/Z, [x28, #1, MUL VL]\n"
- "sub x21, x21, x20\n"
- "cmp x21, x20\n"
- "ld1h { z23.h }, p4/Z, [x28, #2, MUL VL]\n"
- "ld1h { z19.h }, p4/Z, [x28, #3, MUL VL]\n"
- "ld1h { z16.h }, p4/Z, [x25]\n"
- "ld1h { z18.h }, p4/Z, [x25, #1, MUL VL]\n"
- "zip1 z0.h, z17.h, z16.h\n"
- "zip2 z22.h, z17.h, z16.h\n"
- "ld1h { z17.h }, p4/Z, [x25, #2, MUL VL]\n"
- "ld1h { z16.h }, p4/Z, [x25, #3, MUL VL]\n"
- "zip1 z31.h, z20.h, z18.h\n"
- "zip2 z30.h, z20.h, z18.h\n"
- "ld1h { z21.h }, p4/Z, [x28, #4, MUL VL]\n"
- "ld1h { z20.h }, p4/Z, [x28, #5, MUL VL]\n"
- "zip1 z29.h, z23.h, z17.h\n"
- "zip2 z28.h, z23.h, z17.h\n"
- "ld1h { z27.h }, p4/Z, [x28, #6, MUL VL]\n"
- "ld1h { z26.h }, p4/Z, [x28, #7, MUL VL]\n"
- "zip1 z25.h, z19.h, z16.h\n"
- "zip2 z24.h, z19.h, z16.h\n"
- "ld1h { z19.h }, p4/Z, [x25, #4, MUL VL]\n"
- "ld1h { z18.h }, p4/Z, [x25, #5, MUL VL]\n"
- "addvl x28, x28, #8\n"
- "zip1 z23.h, z21.h, z19.h\n"
- "ld1h { z17.h }, p4/Z, [x25, #6, MUL VL]\n"
- "ld1h { z16.h }, p4/Z, [x25, #7, MUL VL]\n"
- "st1h { z0.h }, p4, [x22]\n"
+ "ld1h { z17.h }, p3/Z, [x27]\n"
+ "sub x20, x20, x19\n"
+ "ld1h { z20.h }, p3/Z, [x27, #1, MUL VL]\n"
+ "cmp x20, x19\n"
+ "ld1h { z19.h }, p3/Z, [x27, #2, MUL VL]\n"
+ "ld1h { z1.h }, p3/Z, [x27, #3, MUL VL]\n"
+ "ld1h { z0.h }, p3/Z, [x27, #4, MUL VL]\n"
+ "ld1h { z31.h }, p3/Z, [x27, #5, MUL VL]\n"
+ "ld1h { z30.h }, p3/Z, [x27, #6, MUL VL]\n"
+ "ld1h { z29.h }, p3/Z, [x27, #7, MUL VL]\n"
+ "addvl x27, x27, #8\n"
+ "ld1h { z16.h }, p3/Z, [x25]\n"
+ "zip1 z28.h, z17.h, z16.h\n"
+ "ld1h { z18.h }, p3/Z, [x25, #1, MUL VL]\n"
+ "zip2 z27.h, z17.h, z16.h\n"
+ "ld1h { z17.h }, p3/Z, [x25, #2, MUL VL]\n"
+ "ld1h { z16.h }, p3/Z, [x25, #3, MUL VL]\n"
+ "zip1 z26.h, z20.h, z18.h\n"
+ "ld1h { z22.h }, p3/Z, [x25, #4, MUL VL]\n"
+ "zip2 z21.h, z20.h, z18.h\n"
+ "ld1h { z25.h }, p3/Z, [x25, #5, MUL VL]\n"
+ "zip1 z20.h, z19.h, z17.h\n"
+ "ld1h { z24.h }, p3/Z, [x25, #6, MUL VL]\n"
+ "zip2 z19.h, z19.h, z17.h\n"
+ "ld1h { z23.h }, p3/Z, [x25, #7, MUL VL]\n"
"addvl x25, x25, #8\n"
- "st1h { z22.h }, p4, [x22, #1, MUL VL]\n"
- "zip2 z22.h, z21.h, z19.h\n"
- "zip1 z21.h, z20.h, z18.h\n"
- "st1h { z31.h }, p4, [x22, #2, MUL VL]\n"
- "zip2 z20.h, z20.h, z18.h\n"
- "zip1 z19.h, z27.h, z17.h\n"
- "st1h { z30.h }, p4, [x22, #3, MUL VL]\n"
- "zip2 z18.h, z27.h, z17.h\n"
- "zip1 z17.h, z26.h, z16.h\n"
- "st1h { z29.h }, p4, [x22, #4, MUL VL]\n"
- "zip2 z16.h, z26.h, z16.h\n"
- "st1h { z28.h }, p4, [x22, #5, MUL VL]\n"
- "st1h { z25.h }, p4, [x22, #6, MUL VL]\n"
- "st1h { z24.h }, p4, [x22, #7, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
- "st1h { z23.h }, p4, [x22]\n"
- "st1h { z22.h }, p4, [x22, #1, MUL VL]\n"
- "st1h { z21.h }, p4, [x22, #2, MUL VL]\n"
- "st1h { z20.h }, p4, [x22, #3, MUL VL]\n"
- "st1h { z19.h }, p4, [x22, #4, MUL VL]\n"
- "st1h { z18.h }, p4, [x22, #5, MUL VL]\n"
- "st1h { z17.h }, p4, [x22, #6, MUL VL]\n"
- "st1h { z16.h }, p4, [x22, #7, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
+ "zip1 z18.h, z1.h, z16.h\n"
+ "st1h { z28.h }, p3, [x26]\n"
+ "zip2 z17.h, z1.h, z16.h\n"
+ "st1h { z27.h }, p3, [x26, #1, MUL VL]\n"
+ "zip1 z16.h, z0.h, z22.h\n"
+ "st1h { z26.h }, p3, [x26, #2, MUL VL]\n"
+ "zip2 z22.h, z0.h, z22.h\n"
+ "st1h { z21.h }, p3, [x26, #3, MUL VL]\n"
+ "zip1 z21.h, z31.h, z25.h\n"
+ "st1h { z20.h }, p3, [x26, #4, MUL VL]\n"
+ "zip2 z20.h, z31.h, z25.h\n"
+ "st1h { z19.h }, p3, [x26, #5, MUL VL]\n"
+ "zip1 z19.h, z30.h, z24.h\n"
+ "st1h { z18.h }, p3, [x26, #6, MUL VL]\n"
+ "zip2 z18.h, z30.h, z24.h\n"
+ "st1h { z17.h }, p3, [x26, #7, MUL VL]\n"
+ "add x26, x26, %x[out_stride]\n"
+ "zip1 z17.h, z29.h, z23.h\n"
+ "st1h { z16.h }, p3, [x26]\n"
+ "zip2 z16.h, z29.h, z23.h\n"
+ "st1h { z22.h }, p3, [x26, #1, MUL VL]\n"
+ "st1h { z21.h }, p3, [x26, #2, MUL VL]\n"
+ "st1h { z20.h }, p3, [x26, #3, MUL VL]\n"
+ "st1h { z19.h }, p3, [x26, #4, MUL VL]\n"
+ "st1h { z18.h }, p3, [x26, #5, MUL VL]\n"
+ "st1h { z17.h }, p3, [x26, #6, MUL VL]\n"
+ "st1h { z16.h }, p3, [x26, #7, MUL VL]\n"
+ "add x26, x26, %x[out_stride]\n"
"bge 8b\n"
"9:" // Tail row loop: Unroll column loop skip
- "cbz x21, 11f\n"
+ "cbz x20, 11f\n"
"10:" // Tail row loop: Column loop
- "mov x20, x21\n"
- "whilelt p0.h, XZR, x20\n"
- "ld1h { z22.h }, p0/Z, [x28]\n"
- "ld1h { z21.h }, p0/Z, [x25]\n"
- "dech x20\n"
- "whilelt p0.h, XZR, x20\n"
- "ld1h { z20.h }, p0/Z, [x28, #1, MUL VL]\n"
- "ld1h { z19.h }, p0/Z, [x25, #1, MUL VL]\n"
- "dech x20\n"
- "whilelt p0.h, XZR, x20\n"
- "ld1h { z18.h }, p0/Z, [x28, #2, MUL VL]\n"
- "ld1h { z17.h }, p0/Z, [x25, #2, MUL VL]\n"
- "dech x20\n"
- "whilelt p0.h, XZR, x20\n"
- "ld1h { z24.h }, p0/Z, [x28, #3, MUL VL]\n"
- "ld1h { z23.h }, p0/Z, [x25, #3, MUL VL]\n"
- "decw x21, ALL, MUL #8\n"
- "cmp x21, #0x0\n"
- "zip1 z16.h, z22.h, z21.h\n"
- "zip2 z22.h, z22.h, z21.h\n"
- "addvl x28, x28, #4\n"
+ "mov x19, x20\n"
+ "decw x20, ALL, MUL #8\n"
+ "whilelt p0.h, XZR, x19\n"
+ "ld1h { z17.h }, p0/Z, [x27]\n"
+ "ld1h { z16.h }, p0/Z, [x25]\n"
+ "zip1 z24.h, z17.h, z16.h\n"
+ "dech x19\n"
+ "zip2 z23.h, z17.h, z16.h\n"
+ "whilelt p0.h, XZR, x19\n"
+ "ld1h { z18.h }, p0/Z, [x27, #1, MUL VL]\n"
+ "dech x19\n"
+ "ld1h { z16.h }, p0/Z, [x25, #1, MUL VL]\n"
+ "zip1 z22.h, z18.h, z16.h\n"
+ "whilelt p0.h, XZR, x19\n"
+ "ld1h { z17.h }, p0/Z, [x27, #2, MUL VL]\n"
+ "zip2 z21.h, z18.h, z16.h\n"
+ "dech x19\n"
+ "ld1h { z16.h }, p0/Z, [x25, #2, MUL VL]\n"
+ "zip1 z20.h, z17.h, z16.h\n"
+ "whilelt p0.h, XZR, x19\n"
+ "ld1h { z19.h }, p0/Z, [x27, #3, MUL VL]\n"
+ "zip2 z18.h, z17.h, z16.h\n"
+ "addvl x27, x27, #4\n"
+ "ld1h { z16.h }, p0/Z, [x25, #3, MUL VL]\n"
+ "zip1 z17.h, z19.h, z16.h\n"
+ "st1h { z24.h }, p3, [x26]\n"
"addvl x25, x25, #4\n"
- "zip1 z21.h, z20.h, z19.h\n"
- "zip2 z20.h, z20.h, z19.h\n"
- "zip1 z19.h, z18.h, z17.h\n"
- "zip2 z18.h, z18.h, z17.h\n"
- "st1h { z16.h }, p4, [x22]\n"
- "zip1 z17.h, z24.h, z23.h\n"
- "zip2 z16.h, z24.h, z23.h\n"
- "st1h { z22.h }, p4, [x22, #1, MUL VL]\n"
- "st1h { z21.h }, p4, [x22, #2, MUL VL]\n"
- "st1h { z20.h }, p4, [x22, #3, MUL VL]\n"
- "st1h { z19.h }, p4, [x22, #4, MUL VL]\n"
- "st1h { z18.h }, p4, [x22, #5, MUL VL]\n"
- "st1h { z17.h }, p4, [x22, #6, MUL VL]\n"
- "st1h { z16.h }, p4, [x22, #7, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
+ "zip2 z16.h, z19.h, z16.h\n"
+ "st1h { z23.h }, p3, [x26, #1, MUL VL]\n"
+ "cmp x20, #0x0\n"
+ "st1h { z22.h }, p3, [x26, #2, MUL VL]\n"
+ "st1h { z21.h }, p3, [x26, #3, MUL VL]\n"
+ "st1h { z20.h }, p3, [x26, #4, MUL VL]\n"
+ "st1h { z18.h }, p3, [x26, #5, MUL VL]\n"
+ "st1h { z17.h }, p3, [x26, #6, MUL VL]\n"
+ "st1h { z16.h }, p3, [x26, #7, MUL VL]\n"
+ "add x26, x26, %x[out_stride]\n"
"bgt 10b\n"
"11:" // Tail row loop: Column loop skip
- "cmp %x[height], #0x1\n"
"addvl %x[out], %x[out], #8\n"
+ "cmp %x[height], #0x1\n"
"bge 7b\n"
"12:" // Done
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [pad_row] "r" (pad_row), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_8VL_2x4.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_8VL_2x4.hpp
index 51cae7dd5a..891e3abeb0 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_8VL_2x4.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_8VL_2x4.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#pragma once
@@ -40,409 +40,410 @@ void sve_transpose_interleave_8VL_2x4(uint16_t *out, const uint16_t *in, size_t
size_t out_stride = 8 * roundup<size_t>(height, 4) * get_vector_length<uint32_t>();
__asm__ __volatile__(
- "cmp %x[height], #0x8\n"
"ptrue p2.b\n"
+ "cmp %x[height], #0x8\n"
"blt 6f\n"
"1:" // Main row loop: Head
- "mov x12, %x[in]\n"
- "add x11, x12, %x[in_stride]\n"
- "add x10, x11, %x[in_stride]\n"
- "add x9, x10, %x[in_stride]\n"
+ "mov x11, %x[in]\n"
+ "mov x10, %x[out]\n"
+ "add x9, x11, %x[in_stride]\n"
"add x28, x9, %x[in_stride]\n"
- "mov x27, %x[width]\n"
- "cnth x26, ALL, MUL #4\n"
- "add x25, x28, %x[in_stride]\n"
+ "add x27, x28, %x[in_stride]\n"
+ "add x26, x27, %x[in_stride]\n"
+ "add x25, x26, %x[in_stride]\n"
"add x24, x25, %x[in_stride]\n"
"add x23, x24, %x[in_stride]\n"
- "cmp x27, x26\n"
"add %x[in], x23, %x[in_stride]\n"
- "mov x22, %x[out]\n"
"sub %x[height], %x[height], #0x8\n"
+ "mov x22, %x[width]\n"
+ "cnth x21, ALL, MUL #4\n"
+ "cmp x22, x21\n"
"blt 3f\n"
"2:" // Main row loop: Unroll column loop
- "ld1h { z21.h }, p2/Z, [x12]\n"
- "ld1h { z17.h }, p2/Z, [x12, #1, MUL VL]\n"
- "mov x21, x22\n"
- "add x22, x22, %x[out_stride]\n"
- "ld1h { z31.h }, p2/Z, [x11]\n"
- "ld1h { z5.h }, p2/Z, [x11, #1, MUL VL]\n"
- "mov x20, x22\n"
- "sub x27, x27, x26\n"
- "ld1h { z15.h }, p2/Z, [x10]\n"
- "ld1h { z28.h }, p2/Z, [x10, #1, MUL VL]\n"
- "zip1 z24.h, z21.h, z15.h\n"
- "zip2 z29.h, z21.h, z15.h\n"
- "ld1h { z6.h }, p2/Z, [x9]\n"
- "ld1h { z4.h }, p2/Z, [x9, #1, MUL VL]\n"
- "zip1 z16.h, z31.h, z6.h\n"
- "zip2 z18.h, z31.h, z6.h\n"
- "ld1h { z3.h }, p2/Z, [x12, #2, MUL VL]\n"
- "ld1h { z25.h }, p2/Z, [x12, #3, MUL VL]\n"
- "zip1 z20.h, z17.h, z28.h\n"
- "zip1 z7.h, z5.h, z4.h\n"
- "ld1h { z27.h }, p2/Z, [x11, #2, MUL VL]\n"
- "ld1h { z22.h }, p2/Z, [x11, #3, MUL VL]\n"
- "zip2 z2.h, z17.h, z28.h\n"
- "zip2 z19.h, z5.h, z4.h\n"
- "ld1h { z28.h }, p2/Z, [x10, #2, MUL VL]\n"
- "ld1h { z17.h }, p2/Z, [x10, #3, MUL VL]\n"
- "zip1 z21.h, z24.h, z16.h\n"
- "zip2 z24.h, z24.h, z16.h\n"
- "ld1h { z5.h }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1h { z1.h }, p2/Z, [x9, #3, MUL VL]\n"
- "zip1 z14.h, z29.h, z18.h\n"
- "zip2 z12.h, z29.h, z18.h\n"
- "ld1h { z18.h }, p2/Z, [x28]\n"
- "ld1h { z31.h }, p2/Z, [x28, #1, MUL VL]\n"
- "zip1 z11.h, z20.h, z7.h\n"
- "zip2 z13.h, z20.h, z7.h\n"
- "ld1h { z4.h }, p2/Z, [x25]\n"
- "ld1h { z26.h }, p2/Z, [x25, #1, MUL VL]\n"
- "zip1 z15.h, z2.h, z19.h\n"
- "zip2 z10.h, z2.h, z19.h\n"
- "ld1h { z16.h }, p2/Z, [x24]\n"
- "ld1h { z30.h }, p2/Z, [x24, #1, MUL VL]\n"
- "zip1 z19.h, z18.h, z16.h\n"
- "zip2 z18.h, z18.h, z16.h\n"
- "ld1h { z8.h }, p2/Z, [x23]\n"
- "ld1h { z29.h }, p2/Z, [x23, #1, MUL VL]\n"
- "zip1 z20.h, z4.h, z8.h\n"
- "zip2 z0.h, z4.h, z8.h\n"
- "ld1h { z6.h }, p2/Z, [x28, #2, MUL VL]\n"
- "ld1h { z8.h }, p2/Z, [x28, #3, MUL VL]\n"
- "zip1 z23.h, z31.h, z30.h\n"
- "zip1 z16.h, z26.h, z29.h\n"
- "ld1h { z9.h }, p2/Z, [x25, #2, MUL VL]\n"
- "ld1h { z7.h }, p2/Z, [x25, #3, MUL VL]\n"
- "zip2 z31.h, z31.h, z30.h\n"
- "zip2 z30.h, z26.h, z29.h\n"
- "ld1h { z2.h }, p2/Z, [x24, #2, MUL VL]\n"
- "ld1h { z26.h }, p2/Z, [x24, #3, MUL VL]\n"
- "zip1 z29.h, z3.h, z28.h\n"
- "zip1 z4.h, z27.h, z5.h\n"
- "zip2 z28.h, z3.h, z28.h\n"
- "ld1h { z3.h }, p2/Z, [x23, #2, MUL VL]\n"
- "zip2 z27.h, z27.h, z5.h\n"
- "ld1h { z5.h }, p2/Z, [x23, #3, MUL VL]\n"
- "st1h { z21.h }, p2, [x21]\n"
- "zip1 z21.h, z25.h, z17.h\n"
- "zip2 z25.h, z25.h, z17.h\n"
- "cmp x27, x26\n"
- "st1h { z24.h }, p2, [x21, #1, MUL VL]\n"
- "zip1 z24.h, z22.h, z1.h\n"
- "zip2 z22.h, z22.h, z1.h\n"
- "addvl x12, x12, #4\n"
- "st1h { z14.h }, p2, [x21, #2, MUL VL]\n"
- "zip1 z17.h, z19.h, z20.h\n"
- "zip2 z20.h, z19.h, z20.h\n"
+ "ld1h { z21.h }, p2/Z, [x11]\n"
+ "mov x20, x10\n"
+ "ld1h { z24.h }, p2/Z, [x11, #1, MUL VL]\n"
+ "add x10, x10, %x[out_stride]\n"
+ "ld1h { z8.h }, p2/Z, [x11, #2, MUL VL]\n"
+ "mov x19, x10\n"
+ "ld1h { z11.h }, p2/Z, [x11, #3, MUL VL]\n"
"addvl x11, x11, #4\n"
- "st1h { z12.h }, p2, [x21, #3, MUL VL]\n"
- "zip1 z19.h, z18.h, z0.h\n"
- "zip2 z18.h, z18.h, z0.h\n"
- "addvl x10, x10, #4\n"
- "st1h { z11.h }, p2, [x21, #4, MUL VL]\n"
- "zip1 z14.h, z23.h, z16.h\n"
- "zip2 z16.h, z23.h, z16.h\n"
+ "ld1h { z3.h }, p2/Z, [x9]\n"
+ "add x10, x10, %x[out_stride]\n"
+ "ld1h { z0.h }, p2/Z, [x9, #1, MUL VL]\n"
+ "sub x22, x22, x21\n"
+ "ld1h { z18.h }, p2/Z, [x9, #2, MUL VL]\n"
+ "cmp x22, x21\n"
+ "ld1h { z12.h }, p2/Z, [x9, #3, MUL VL]\n"
"addvl x9, x9, #4\n"
- "st1h { z13.h }, p2, [x21, #5, MUL VL]\n"
- "zip1 z23.h, z31.h, z30.h\n"
- "zip2 z1.h, z31.h, z30.h\n"
+ "ld1h { z16.h }, p2/Z, [x28]\n"
+ "zip1 z22.h, z21.h, z16.h\n"
+ "ld1h { z19.h }, p2/Z, [x28, #1, MUL VL]\n"
+ "zip2 z21.h, z21.h, z16.h\n"
+ "ld1h { z26.h }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1h { z13.h }, p2/Z, [x28, #3, MUL VL]\n"
+ "zip1 z14.h, z24.h, z19.h\n"
+ "ld1h { z16.h }, p2/Z, [x27]\n"
"addvl x28, x28, #4\n"
- "st1h { z15.h }, p2, [x21, #6, MUL VL]\n"
- "zip1 z0.h, z29.h, z4.h\n"
- "zip2 z31.h, z29.h, z4.h\n"
+ "zip2 z24.h, z24.h, z19.h\n"
+ "ld1h { z27.h }, p2/Z, [x27, #1, MUL VL]\n"
+ "zip1 z17.h, z8.h, z26.h\n"
+ "ld1h { z15.h }, p2/Z, [x27, #2, MUL VL]\n"
+ "zip2 z9.h, z8.h, z26.h\n"
+ "ld1h { z5.h }, p2/Z, [x27, #3, MUL VL]\n"
+ "addvl x27, x27, #4\n"
+ "zip1 z2.h, z3.h, z16.h\n"
+ "ld1h { z4.h }, p2/Z, [x26]\n"
+ "zip2 z16.h, z3.h, z16.h\n"
+ "ld1h { z23.h }, p2/Z, [x26, #1, MUL VL]\n"
+ "zip1 z20.h, z22.h, z2.h\n"
+ "ld1h { z10.h }, p2/Z, [x26, #2, MUL VL]\n"
+ "zip2 z28.h, z22.h, z2.h\n"
+ "ld1h { z8.h }, p2/Z, [x26, #3, MUL VL]\n"
+ "addvl x26, x26, #4\n"
+ "zip1 z26.h, z21.h, z16.h\n"
+ "ld1h { z25.h }, p2/Z, [x25]\n"
+ "zip2 z7.h, z21.h, z16.h\n"
+ "ld1h { z31.h }, p2/Z, [x25, #1, MUL VL]\n"
+ "zip1 z3.h, z0.h, z27.h\n"
+ "ld1h { z16.h }, p2/Z, [x25, #2, MUL VL]\n"
+ "zip1 z22.h, z14.h, z3.h\n"
+ "ld1h { z6.h }, p2/Z, [x25, #3, MUL VL]\n"
"addvl x25, x25, #4\n"
- "st1h { z10.h }, p2, [x21, #7, MUL VL]\n"
- "addvl x21, x21, #16\n"
- "zip1 z30.h, z28.h, z27.h\n"
- "zip2 z29.h, z28.h, z27.h\n"
- "st1h { z17.h }, p2, [x21, #-8, MUL VL]\n"
- "zip1 z13.h, z21.h, z24.h\n"
- "zip2 z27.h, z21.h, z24.h\n"
+ "zip2 z19.h, z14.h, z3.h\n"
+ "ld1h { z2.h }, p2/Z, [x24]\n"
+ "zip2 z14.h, z0.h, z27.h\n"
+ "ld1h { z21.h }, p2/Z, [x24, #1, MUL VL]\n"
+ "zip1 z29.h, z24.h, z14.h\n"
+ "ld1h { z0.h }, p2/Z, [x24, #2, MUL VL]\n"
+ "zip2 z27.h, z24.h, z14.h\n"
+ "ld1h { z1.h }, p2/Z, [x24, #3, MUL VL]\n"
"addvl x24, x24, #4\n"
- "st1h { z20.h }, p2, [x21, #-7, MUL VL]\n"
- "zip1 z28.h, z25.h, z22.h\n"
- "zip2 z25.h, z25.h, z22.h\n"
+ "zip1 z30.h, z4.h, z2.h\n"
+ "ld1h { z3.h }, p2/Z, [x23]\n"
+ "zip2 z14.h, z4.h, z2.h\n"
+ "ld1h { z4.h }, p2/Z, [x23, #1, MUL VL]\n"
+ "zip1 z2.h, z23.h, z21.h\n"
+ "ld1h { z24.h }, p2/Z, [x23, #2, MUL VL]\n"
+ "zip2 z21.h, z23.h, z21.h\n"
+ "ld1h { z23.h }, p2/Z, [x23, #3, MUL VL]\n"
"addvl x23, x23, #4\n"
- "st1h { z19.h }, p2, [x21, #-6, MUL VL]\n"
- "zip1 z22.h, z6.h, z2.h\n"
- "zip1 z21.h, z9.h, z3.h\n"
- "add x22, x22, %x[out_stride]\n"
- "st1h { z18.h }, p2, [x21, #-5, MUL VL]\n"
- "zip2 z20.h, z6.h, z2.h\n"
- "zip2 z19.h, z9.h, z3.h\n"
- "st1h { z14.h }, p2, [x21, #-4, MUL VL]\n"
- "zip1 z18.h, z8.h, z26.h\n"
- "zip1 z17.h, z7.h, z5.h\n"
- "st1h { z16.h }, p2, [x21, #-3, MUL VL]\n"
- "zip2 z24.h, z8.h, z26.h\n"
- "zip2 z16.h, z7.h, z5.h\n"
- "st1h { z23.h }, p2, [x21, #-2, MUL VL]\n"
- "zip1 z23.h, z22.h, z21.h\n"
- "zip2 z22.h, z22.h, z21.h\n"
- "st1h { z1.h }, p2, [x21, #-1, MUL VL]\n"
- "zip1 z21.h, z20.h, z19.h\n"
- "zip2 z20.h, z20.h, z19.h\n"
- "st1h { z0.h }, p2, [x20]\n"
- "zip1 z19.h, z18.h, z17.h\n"
- "zip2 z18.h, z18.h, z17.h\n"
- "st1h { z31.h }, p2, [x20, #1, MUL VL]\n"
- "zip1 z17.h, z24.h, z16.h\n"
- "zip2 z16.h, z24.h, z16.h\n"
- "st1h { z30.h }, p2, [x20, #2, MUL VL]\n"
- "st1h { z29.h }, p2, [x20, #3, MUL VL]\n"
- "st1h { z13.h }, p2, [x20, #4, MUL VL]\n"
- "st1h { z27.h }, p2, [x20, #5, MUL VL]\n"
- "st1h { z28.h }, p2, [x20, #6, MUL VL]\n"
- "st1h { z25.h }, p2, [x20, #7, MUL VL]\n"
+ "st1h { z20.h }, p2, [x20]\n"
+ "zip1 z20.h, z25.h, z3.h\n"
+ "zip2 z3.h, z25.h, z3.h\n"
+ "st1h { z28.h }, p2, [x20, #1, MUL VL]\n"
+ "zip1 z28.h, z30.h, z20.h\n"
+ "st1h { z26.h }, p2, [x20, #2, MUL VL]\n"
+ "zip2 z20.h, z30.h, z20.h\n"
+ "st1h { z7.h }, p2, [x20, #3, MUL VL]\n"
+ "zip1 z25.h, z14.h, z3.h\n"
+ "st1h { z22.h }, p2, [x20, #4, MUL VL]\n"
+ "zip2 z7.h, z14.h, z3.h\n"
+ "st1h { z19.h }, p2, [x20, #5, MUL VL]\n"
+ "zip1 z14.h, z31.h, z4.h\n"
+ "st1h { z29.h }, p2, [x20, #6, MUL VL]\n"
+ "zip1 z19.h, z2.h, z14.h\n"
+ "st1h { z27.h }, p2, [x20, #7, MUL VL]\n"
"addvl x20, x20, #16\n"
- "st1h { z23.h }, p2, [x20, #-8, MUL VL]\n"
- "st1h { z22.h }, p2, [x20, #-7, MUL VL]\n"
- "st1h { z21.h }, p2, [x20, #-6, MUL VL]\n"
- "st1h { z20.h }, p2, [x20, #-5, MUL VL]\n"
+ "zip2 z29.h, z2.h, z14.h\n"
+ "st1h { z28.h }, p2, [x20, #-8, MUL VL]\n"
+ "zip2 z27.h, z31.h, z4.h\n"
+ "st1h { z20.h }, p2, [x20, #-7, MUL VL]\n"
+ "zip1 z30.h, z21.h, z27.h\n"
+ "st1h { z25.h }, p2, [x20, #-6, MUL VL]\n"
+ "zip2 z20.h, z21.h, z27.h\n"
+ "st1h { z7.h }, p2, [x20, #-5, MUL VL]\n"
+ "zip1 z14.h, z18.h, z15.h\n"
"st1h { z19.h }, p2, [x20, #-4, MUL VL]\n"
- "st1h { z18.h }, p2, [x20, #-3, MUL VL]\n"
- "st1h { z17.h }, p2, [x20, #-2, MUL VL]\n"
- "st1h { z16.h }, p2, [x20, #-1, MUL VL]\n"
+ "zip1 z19.h, z17.h, z14.h\n"
+ "st1h { z29.h }, p2, [x20, #-3, MUL VL]\n"
+ "zip2 z7.h, z17.h, z14.h\n"
+ "st1h { z30.h }, p2, [x20, #-2, MUL VL]\n"
+ "zip2 z14.h, z18.h, z15.h\n"
+ "st1h { z20.h }, p2, [x20, #-1, MUL VL]\n"
+ "zip1 z17.h, z9.h, z14.h\n"
+ "st1h { z19.h }, p2, [x19]\n"
+ "zip2 z27.h, z9.h, z14.h\n"
+ "st1h { z7.h }, p2, [x19, #1, MUL VL]\n"
+ "zip1 z18.h, z11.h, z13.h\n"
+ "st1h { z17.h }, p2, [x19, #2, MUL VL]\n"
+ "zip1 z17.h, z12.h, z5.h\n"
+ "st1h { z27.h }, p2, [x19, #3, MUL VL]\n"
+ "zip1 z20.h, z18.h, z17.h\n"
+ "st1h { z20.h }, p2, [x19, #4, MUL VL]\n"
+ "zip2 z18.h, z18.h, z17.h\n"
+ "st1h { z18.h }, p2, [x19, #5, MUL VL]\n"
+ "zip2 z18.h, z11.h, z13.h\n"
+ "zip2 z17.h, z12.h, z5.h\n"
+ "zip1 z29.h, z18.h, z17.h\n"
+ "st1h { z29.h }, p2, [x19, #6, MUL VL]\n"
+ "zip2 z17.h, z18.h, z17.h\n"
+ "st1h { z17.h }, p2, [x19, #7, MUL VL]\n"
+ "addvl x19, x19, #16\n"
+ "zip1 z18.h, z10.h, z0.h\n"
+ "zip1 z17.h, z16.h, z24.h\n"
+ "zip1 z30.h, z18.h, z17.h\n"
+ "st1h { z30.h }, p2, [x19, #-8, MUL VL]\n"
+ "zip2 z30.h, z18.h, z17.h\n"
+ "st1h { z30.h }, p2, [x19, #-7, MUL VL]\n"
+ "zip2 z18.h, z10.h, z0.h\n"
+ "zip2 z17.h, z16.h, z24.h\n"
+ "zip1 z16.h, z18.h, z17.h\n"
+ "st1h { z16.h }, p2, [x19, #-6, MUL VL]\n"
+ "zip2 z16.h, z18.h, z17.h\n"
+ "st1h { z16.h }, p2, [x19, #-5, MUL VL]\n"
+ "zip1 z18.h, z8.h, z1.h\n"
+ "zip1 z17.h, z6.h, z23.h\n"
+ "zip1 z16.h, z18.h, z17.h\n"
+ "st1h { z16.h }, p2, [x19, #-4, MUL VL]\n"
+ "zip2 z16.h, z18.h, z17.h\n"
+ "st1h { z16.h }, p2, [x19, #-3, MUL VL]\n"
+ "zip2 z18.h, z8.h, z1.h\n"
+ "zip2 z17.h, z6.h, z23.h\n"
+ "zip1 z16.h, z18.h, z17.h\n"
+ "st1h { z16.h }, p2, [x19, #-2, MUL VL]\n"
+ "zip2 z16.h, z18.h, z17.h\n"
+ "st1h { z16.h }, p2, [x19, #-1, MUL VL]\n"
"bge 2b\n"
"3:" // Main row loop: Unroll column loop skip
- "cbz x27, 5f\n"
+ "cbz x22, 5f\n"
"4:" // Main row loop: Column loop
- "mov x20, x27\n"
+ "mov x20, x22\n"
+ "mov x19, x10\n"
"whilelt p1.h, XZR, x20\n"
- "ld1h { z17.h }, p1/Z, [x12]\n"
- "ld1h { z19.h }, p1/Z, [x11]\n"
+ "ld1h { z17.h }, p1/Z, [x11]\n"
+ "ld1h { z22.h }, p1/Z, [x9]\n"
"dech x20\n"
+ "ld1h { z16.h }, p1/Z, [x28]\n"
+ "zip1 z19.h, z17.h, z16.h\n"
+ "ld1h { z18.h }, p1/Z, [x27]\n"
"whilelt p0.h, XZR, x20\n"
- "ld1h { z24.h }, p0/Z, [x12, #1, MUL VL]\n"
- "ld1h { z23.h }, p0/Z, [x11, #1, MUL VL]\n"
- "ld1h { z16.h }, p1/Z, [x10]\n"
- "ld1h { z20.h }, p0/Z, [x10, #1, MUL VL]\n"
- "zip1 z1.h, z17.h, z16.h\n"
- "zip2 z22.h, z17.h, z16.h\n"
- "ld1h { z18.h }, p1/Z, [x9]\n"
- "ld1h { z17.h }, p0/Z, [x9, #1, MUL VL]\n"
- "zip1 z16.h, z19.h, z18.h\n"
- "zip2 z19.h, z19.h, z18.h\n"
- "ld1h { z0.h }, p1/Z, [x28]\n"
- "ld1h { z31.h }, p0/Z, [x28, #1, MUL VL]\n"
- "zip1 z25.h, z24.h, z20.h\n"
- "zip1 z21.h, z23.h, z17.h\n"
- "ld1h { z30.h }, p1/Z, [x25]\n"
- "ld1h { z29.h }, p0/Z, [x25, #1, MUL VL]\n"
- "zip2 z28.h, z24.h, z20.h\n"
- "zip2 z24.h, z23.h, z17.h\n"
- "ld1h { z20.h }, p1/Z, [x24]\n"
- "ld1h { z27.h }, p0/Z, [x24, #1, MUL VL]\n"
- "mov x20, x22\n"
- "decd x27, ALL, MUL #8\n"
- "ld1h { z23.h }, p1/Z, [x23]\n"
- "ld1h { z26.h }, p0/Z, [x23, #1, MUL VL]\n"
- "zip1 z18.h, z1.h, z16.h\n"
- "zip2 z17.h, z1.h, z16.h\n"
- "zip1 z16.h, z22.h, z19.h\n"
- "zip2 z19.h, z22.h, z19.h\n"
- "st1h { z18.h }, p2, [x20]\n"
- "cmp x27, #0x0\n"
- "zip1 z22.h, z25.h, z21.h\n"
- "zip2 z21.h, z25.h, z21.h\n"
- "st1h { z17.h }, p2, [x20, #1, MUL VL]\n"
- "addvl x12, x12, #2\n"
- "zip1 z25.h, z28.h, z24.h\n"
- "zip2 z18.h, z28.h, z24.h\n"
- "st1h { z16.h }, p2, [x20, #2, MUL VL]\n"
+ "zip2 z17.h, z17.h, z16.h\n"
+ "ld1h { z21.h }, p0/Z, [x11, #1, MUL VL]\n"
"addvl x11, x11, #2\n"
- "zip1 z17.h, z0.h, z20.h\n"
- "zip1 z16.h, z30.h, z23.h\n"
- "st1h { z19.h }, p2, [x20, #3, MUL VL]\n"
- "addvl x10, x10, #2\n"
- "zip2 z20.h, z0.h, z20.h\n"
- "zip2 z19.h, z30.h, z23.h\n"
- "st1h { z22.h }, p2, [x20, #4, MUL VL]\n"
+ "zip1 z16.h, z22.h, z18.h\n"
+ "ld1h { z2.h }, p0/Z, [x9, #1, MUL VL]\n"
"addvl x9, x9, #2\n"
- "zip1 z24.h, z31.h, z27.h\n"
- "zip1 z23.h, z29.h, z26.h\n"
- "st1h { z21.h }, p2, [x20, #5, MUL VL]\n"
+ "zip1 z1.h, z19.h, z16.h\n"
+ "ld1h { z20.h }, p0/Z, [x28, #1, MUL VL]\n"
"addvl x28, x28, #2\n"
- "zip2 z22.h, z31.h, z27.h\n"
- "zip2 z21.h, z29.h, z26.h\n"
- "st1h { z25.h }, p2, [x20, #6, MUL VL]\n"
+ "zip2 z0.h, z19.h, z16.h\n"
+ "ld1h { z19.h }, p0/Z, [x27, #1, MUL VL]\n"
+ "addvl x27, x27, #2\n"
+ "zip2 z16.h, z22.h, z18.h\n"
+ "ld1h { z31.h }, p1/Z, [x26]\n"
+ "add x10, x10, %x[out_stride]\n"
+ "zip1 z30.h, z17.h, z16.h\n"
+ "ld1h { z29.h }, p0/Z, [x26, #1, MUL VL]\n"
+ "addvl x26, x26, #2\n"
+ "zip2 z28.h, z17.h, z16.h\n"
+ "ld1h { z27.h }, p1/Z, [x25]\n"
+ "decd x22, ALL, MUL #8\n"
+ "zip1 z17.h, z21.h, z20.h\n"
+ "ld1h { z26.h }, p0/Z, [x25, #1, MUL VL]\n"
"addvl x25, x25, #2\n"
- "st1h { z18.h }, p2, [x20, #7, MUL VL]\n"
- "addvl x20, x20, #16\n"
- "addvl x24, x24, #2\n"
+ "zip1 z16.h, z2.h, z19.h\n"
+ "ld1h { z25.h }, p1/Z, [x24]\n"
+ "cmp x22, #0x0\n"
"zip1 z18.h, z17.h, z16.h\n"
+ "ld1h { z24.h }, p0/Z, [x24, #1, MUL VL]\n"
+ "addvl x24, x24, #2\n"
+ "zip2 z23.h, z17.h, z16.h\n"
+ "ld1h { z22.h }, p1/Z, [x23]\n"
+ "zip2 z17.h, z21.h, z20.h\n"
+ "ld1h { z21.h }, p0/Z, [x23, #1, MUL VL]\n"
"addvl x23, x23, #2\n"
+ "zip2 z16.h, z2.h, z19.h\n"
+ "st1h { z1.h }, p2, [x19]\n"
+ "zip1 z20.h, z31.h, z25.h\n"
+ "st1h { z0.h }, p2, [x19, #1, MUL VL]\n"
+ "zip1 z19.h, z17.h, z16.h\n"
+ "st1h { z30.h }, p2, [x19, #2, MUL VL]\n"
"zip2 z17.h, z17.h, z16.h\n"
- "zip1 z16.h, z20.h, z19.h\n"
- "st1h { z18.h }, p2, [x20, #-8, MUL VL]\n"
- "zip2 z20.h, z20.h, z19.h\n"
- "zip1 z19.h, z24.h, z23.h\n"
- "st1h { z17.h }, p2, [x20, #-7, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
- "zip2 z18.h, z24.h, z23.h\n"
- "zip1 z17.h, z22.h, z21.h\n"
- "st1h { z16.h }, p2, [x20, #-6, MUL VL]\n"
- "zip2 z16.h, z22.h, z21.h\n"
- "st1h { z20.h }, p2, [x20, #-5, MUL VL]\n"
- "st1h { z19.h }, p2, [x20, #-4, MUL VL]\n"
- "st1h { z18.h }, p2, [x20, #-3, MUL VL]\n"
- "st1h { z17.h }, p2, [x20, #-2, MUL VL]\n"
- "st1h { z16.h }, p2, [x20, #-1, MUL VL]\n"
+ "st1h { z28.h }, p2, [x19, #3, MUL VL]\n"
+ "zip1 z16.h, z27.h, z22.h\n"
+ "st1h { z18.h }, p2, [x19, #4, MUL VL]\n"
+ "zip1 z18.h, z20.h, z16.h\n"
+ "st1h { z23.h }, p2, [x19, #5, MUL VL]\n"
+ "zip2 z16.h, z20.h, z16.h\n"
+ "st1h { z19.h }, p2, [x19, #6, MUL VL]\n"
+ "zip2 z19.h, z31.h, z25.h\n"
+ "st1h { z17.h }, p2, [x19, #7, MUL VL]\n"
+ "addvl x19, x19, #16\n"
+ "zip2 z17.h, z27.h, z22.h\n"
+ "st1h { z18.h }, p2, [x19, #-8, MUL VL]\n"
+ "zip1 z18.h, z29.h, z24.h\n"
+ "st1h { z16.h }, p2, [x19, #-7, MUL VL]\n"
+ "zip1 z16.h, z19.h, z17.h\n"
+ "st1h { z16.h }, p2, [x19, #-6, MUL VL]\n"
+ "zip2 z16.h, z19.h, z17.h\n"
+ "st1h { z16.h }, p2, [x19, #-5, MUL VL]\n"
+ "zip1 z17.h, z26.h, z21.h\n"
+ "zip1 z16.h, z18.h, z17.h\n"
+ "st1h { z16.h }, p2, [x19, #-4, MUL VL]\n"
+ "zip2 z16.h, z18.h, z17.h\n"
+ "st1h { z16.h }, p2, [x19, #-3, MUL VL]\n"
+ "zip2 z18.h, z29.h, z24.h\n"
+ "zip2 z17.h, z26.h, z21.h\n"
+ "zip1 z16.h, z18.h, z17.h\n"
+ "st1h { z16.h }, p2, [x19, #-2, MUL VL]\n"
+ "zip2 z16.h, z18.h, z17.h\n"
+ "st1h { z16.h }, p2, [x19, #-1, MUL VL]\n"
"bgt 4b\n"
"5:" // Main row loop: Column loop skip
- "cmp %x[height], #0x8\n"
"addvl %x[out], %x[out], #16\n"
+ "cmp %x[height], #0x8\n"
"bge 1b\n"
"cbz %x[height], 12f\n"
"6:" // Main loop skip
+
"7:" // Tail row loop: Head
- "mov x12, %x[in]\n"
- "add x11, x12, %x[in_stride]\n"
- "add x10, x11, %x[in_stride]\n"
- "mov x21, %x[width]\n"
- "cnth x20, ALL, MUL #4\n"
- "add x9, x10, %x[in_stride]\n"
+ "mov x11, %x[in]\n"
+ "mov x10, %x[out]\n"
+ "add x9, x11, %x[in_stride]\n"
+ "add x28, x9, %x[in_stride]\n"
+ "add x27, x28, %x[in_stride]\n"
+ "add %x[in], x27, %x[in_stride]\n"
"cmp %x[height], #0x3\n"
- "add %x[in], x9, %x[in_stride]\n"
- "csel x9, x9, %x[pad_row], GT\n"
- "csel x10, x10, %x[pad_row], GE\n"
+ "csel x27, x27, %x[pad_row], GT\n"
+ "csel x28, x28, %x[pad_row], GE\n"
"cmp %x[height], #0x1\n"
- "csel x11, x11, %x[pad_row], GT\n"
- "cmp x21, x20\n"
- "mov x22, %x[out]\n"
+ "csel x9, x9, %x[pad_row], GT\n"
"sub %x[height], %x[height], #0x4\n"
+ "mov x20, %x[width]\n"
+ "cnth x19, ALL, MUL #4\n"
+ "cmp x20, x19\n"
"blt 9f\n"
"8:" // Tail row loop: Unroll column loop
- "ld1h { z17.h }, p2/Z, [x12]\n"
- "ld1h { z22.h }, p2/Z, [x12, #1, MUL VL]\n"
- "sub x21, x21, x20\n"
- "cmp x21, x20\n"
- "ld1h { z19.h }, p2/Z, [x11]\n"
- "ld1h { z21.h }, p2/Z, [x11, #1, MUL VL]\n"
- "ld1h { z16.h }, p2/Z, [x10]\n"
- "ld1h { z18.h }, p2/Z, [x10, #1, MUL VL]\n"
- "zip1 z4.h, z17.h, z16.h\n"
- "zip2 z3.h, z17.h, z16.h\n"
- "ld1h { z17.h }, p2/Z, [x9]\n"
- "ld1h { z16.h }, p2/Z, [x9, #1, MUL VL]\n"
- "zip1 z2.h, z19.h, z17.h\n"
- "zip2 z1.h, z19.h, z17.h\n"
- "ld1h { z17.h }, p2/Z, [x12, #2, MUL VL]\n"
- "ld1h { z24.h }, p2/Z, [x12, #3, MUL VL]\n"
- "zip1 z0.h, z22.h, z18.h\n"
- "zip1 z31.h, z21.h, z16.h\n"
- "ld1h { z20.h }, p2/Z, [x11, #2, MUL VL]\n"
- "ld1h { z19.h }, p2/Z, [x11, #3, MUL VL]\n"
- "zip2 z30.h, z22.h, z18.h\n"
- "zip2 z23.h, z21.h, z16.h\n"
- "ld1h { z16.h }, p2/Z, [x10, #2, MUL VL]\n"
- "ld1h { z18.h }, p2/Z, [x10, #3, MUL VL]\n"
- "zip1 z22.h, z17.h, z16.h\n"
- "zip2 z29.h, z17.h, z16.h\n"
- "ld1h { z17.h }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1h { z16.h }, p2/Z, [x9, #3, MUL VL]\n"
- "zip1 z21.h, z20.h, z17.h\n"
- "zip2 z28.h, z20.h, z17.h\n"
- "zip1 z27.h, z24.h, z18.h\n"
- "zip1 z26.h, z19.h, z16.h\n"
- "addvl x12, x12, #4\n"
+ "ld1h { z17.h }, p2/Z, [x11]\n"
+ "sub x20, x20, x19\n"
+ "ld1h { z20.h }, p2/Z, [x11, #1, MUL VL]\n"
+ "cmp x20, x19\n"
+ "ld1h { z19.h }, p2/Z, [x11, #2, MUL VL]\n"
+ "ld1h { z1.h }, p2/Z, [x11, #3, MUL VL]\n"
"addvl x11, x11, #4\n"
- "zip2 z25.h, z24.h, z18.h\n"
- "zip2 z24.h, z19.h, z16.h\n"
- "addvl x10, x10, #4\n"
+ "ld1h { z0.h }, p2/Z, [x9]\n"
+ "ld1h { z31.h }, p2/Z, [x9, #1, MUL VL]\n"
+ "ld1h { z30.h }, p2/Z, [x9, #2, MUL VL]\n"
+ "ld1h { z29.h }, p2/Z, [x9, #3, MUL VL]\n"
"addvl x9, x9, #4\n"
- "zip1 z16.h, z4.h, z2.h\n"
- "zip2 z17.h, z4.h, z2.h\n"
- "st1h { z16.h }, p2, [x22]\n"
- "zip1 z16.h, z3.h, z1.h\n"
- "zip2 z20.h, z3.h, z1.h\n"
- "st1h { z17.h }, p2, [x22, #1, MUL VL]\n"
- "zip1 z19.h, z0.h, z31.h\n"
- "zip2 z18.h, z0.h, z31.h\n"
- "st1h { z16.h }, p2, [x22, #2, MUL VL]\n"
- "zip1 z17.h, z30.h, z23.h\n"
- "zip2 z16.h, z30.h, z23.h\n"
- "st1h { z20.h }, p2, [x22, #3, MUL VL]\n"
- "st1h { z19.h }, p2, [x22, #4, MUL VL]\n"
- "zip1 z23.h, z22.h, z21.h\n"
- "zip2 z22.h, z22.h, z21.h\n"
- "st1h { z18.h }, p2, [x22, #5, MUL VL]\n"
- "zip1 z21.h, z29.h, z28.h\n"
- "zip2 z20.h, z29.h, z28.h\n"
- "st1h { z17.h }, p2, [x22, #6, MUL VL]\n"
- "zip1 z19.h, z27.h, z26.h\n"
- "zip2 z18.h, z27.h, z26.h\n"
- "st1h { z16.h }, p2, [x22, #7, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
- "zip1 z17.h, z25.h, z24.h\n"
- "zip2 z16.h, z25.h, z24.h\n"
- "st1h { z23.h }, p2, [x22]\n"
- "st1h { z22.h }, p2, [x22, #1, MUL VL]\n"
- "st1h { z21.h }, p2, [x22, #2, MUL VL]\n"
- "st1h { z20.h }, p2, [x22, #3, MUL VL]\n"
- "st1h { z19.h }, p2, [x22, #4, MUL VL]\n"
- "st1h { z18.h }, p2, [x22, #5, MUL VL]\n"
- "st1h { z17.h }, p2, [x22, #6, MUL VL]\n"
- "st1h { z16.h }, p2, [x22, #7, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
+ "ld1h { z16.h }, p2/Z, [x28]\n"
+ "zip1 z28.h, z17.h, z16.h\n"
+ "ld1h { z18.h }, p2/Z, [x28, #1, MUL VL]\n"
+ "zip2 z27.h, z17.h, z16.h\n"
+ "ld1h { z17.h }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1h { z26.h }, p2/Z, [x28, #3, MUL VL]\n"
+ "zip1 z25.h, z20.h, z18.h\n"
+ "ld1h { z16.h }, p2/Z, [x27]\n"
+ "addvl x28, x28, #4\n"
+ "zip2 z24.h, z20.h, z18.h\n"
+ "ld1h { z23.h }, p2/Z, [x27, #1, MUL VL]\n"
+ "zip1 z22.h, z19.h, z17.h\n"
+ "ld1h { z21.h }, p2/Z, [x27, #2, MUL VL]\n"
+ "zip2 z20.h, z19.h, z17.h\n"
+ "ld1h { z19.h }, p2/Z, [x27, #3, MUL VL]\n"
+ "addvl x27, x27, #4\n"
+ "zip1 z18.h, z0.h, z16.h\n"
+ "zip2 z17.h, z0.h, z16.h\n"
+ "zip1 z16.h, z28.h, z18.h\n"
+ "st1h { z16.h }, p2, [x10]\n"
+ "zip2 z16.h, z28.h, z18.h\n"
+ "st1h { z16.h }, p2, [x10, #1, MUL VL]\n"
+ "zip1 z16.h, z27.h, z17.h\n"
+ "st1h { z16.h }, p2, [x10, #2, MUL VL]\n"
+ "zip2 z16.h, z27.h, z17.h\n"
+ "st1h { z16.h }, p2, [x10, #3, MUL VL]\n"
+ "zip1 z17.h, z31.h, z23.h\n"
+ "zip1 z16.h, z25.h, z17.h\n"
+ "st1h { z16.h }, p2, [x10, #4, MUL VL]\n"
+ "zip2 z16.h, z25.h, z17.h\n"
+ "st1h { z16.h }, p2, [x10, #5, MUL VL]\n"
+ "zip2 z17.h, z31.h, z23.h\n"
+ "zip1 z16.h, z24.h, z17.h\n"
+ "st1h { z16.h }, p2, [x10, #6, MUL VL]\n"
+ "zip2 z16.h, z24.h, z17.h\n"
+ "st1h { z16.h }, p2, [x10, #7, MUL VL]\n"
+ "add x10, x10, %x[out_stride]\n"
+ "zip1 z18.h, z30.h, z21.h\n"
+ "zip2 z17.h, z30.h, z21.h\n"
+ "zip1 z16.h, z22.h, z18.h\n"
+ "st1h { z16.h }, p2, [x10]\n"
+ "zip2 z16.h, z22.h, z18.h\n"
+ "st1h { z16.h }, p2, [x10, #1, MUL VL]\n"
+ "zip1 z16.h, z20.h, z17.h\n"
+ "st1h { z16.h }, p2, [x10, #2, MUL VL]\n"
+ "zip2 z16.h, z20.h, z17.h\n"
+ "st1h { z16.h }, p2, [x10, #3, MUL VL]\n"
+ "zip1 z18.h, z1.h, z26.h\n"
+ "zip1 z17.h, z29.h, z19.h\n"
+ "zip1 z16.h, z18.h, z17.h\n"
+ "st1h { z16.h }, p2, [x10, #4, MUL VL]\n"
+ "zip2 z16.h, z18.h, z17.h\n"
+ "st1h { z16.h }, p2, [x10, #5, MUL VL]\n"
+ "zip2 z18.h, z1.h, z26.h\n"
+ "zip2 z17.h, z29.h, z19.h\n"
+ "zip1 z16.h, z18.h, z17.h\n"
+ "st1h { z16.h }, p2, [x10, #6, MUL VL]\n"
+ "zip2 z16.h, z18.h, z17.h\n"
+ "st1h { z16.h }, p2, [x10, #7, MUL VL]\n"
+ "add x10, x10, %x[out_stride]\n"
"bge 8b\n"
"9:" // Tail row loop: Unroll column loop skip
- "cbz x21, 11f\n"
+ "cbz x20, 11f\n"
"10:" // Tail row loop: Column loop
- "mov x20, x21\n"
- "whilelt p1.h, XZR, x20\n"
- "ld1h { z23.h }, p1/Z, [x12]\n"
- "ld1h { z22.h }, p1/Z, [x11]\n"
- "dech x20\n"
- "whilelt p0.h, XZR, x20\n"
- "ld1h { z21.h }, p0/Z, [x12, #1, MUL VL]\n"
- "ld1h { z25.h }, p0/Z, [x11, #1, MUL VL]\n"
- "ld1h { z19.h }, p1/Z, [x10]\n"
- "ld1h { z20.h }, p0/Z, [x10, #1, MUL VL]\n"
- "decd x21, ALL, MUL #8\n"
- "zip1 z24.h, z23.h, z19.h\n"
- "ld1h { z18.h }, p1/Z, [x9]\n"
- "ld1h { z16.h }, p0/Z, [x9, #1, MUL VL]\n"
- "zip1 z17.h, z22.h, z18.h\n"
- "zip2 z23.h, z23.h, z19.h\n"
- "zip2 z19.h, z22.h, z18.h\n"
- "zip1 z22.h, z21.h, z20.h\n"
- "cmp x21, #0x0\n"
- "addvl x12, x12, #2\n"
- "zip1 z18.h, z25.h, z16.h\n"
- "zip2 z21.h, z21.h, z20.h\n"
+ "mov x19, x20\n"
+ "decd x20, ALL, MUL #8\n"
+ "whilelt p0.h, XZR, x19\n"
+ "ld1h { z17.h }, p0/Z, [x11]\n"
+ "ld1h { z25.h }, p0/Z, [x9]\n"
+ "dech x19\n"
+ "ld1h { z16.h }, p0/Z, [x28]\n"
+ "zip1 z18.h, z17.h, z16.h\n"
+ "ld1h { z24.h }, p0/Z, [x27]\n"
+ "whilelt p0.h, XZR, x19\n"
+ "zip2 z23.h, z17.h, z16.h\n"
+ "ld1h { z22.h }, p0/Z, [x11, #1, MUL VL]\n"
"addvl x11, x11, #2\n"
- "addvl x10, x10, #2\n"
- "zip2 z20.h, z25.h, z16.h\n"
+ "zip1 z16.h, z25.h, z24.h\n"
+ "ld1h { z21.h }, p0/Z, [x9, #1, MUL VL]\n"
"addvl x9, x9, #2\n"
- "zip1 z16.h, z24.h, z17.h\n"
- "st1h { z16.h }, p2, [x22]\n"
- "zip2 z16.h, z24.h, z17.h\n"
- "zip1 z17.h, z23.h, z19.h\n"
- "st1h { z16.h }, p2, [x22, #1, MUL VL]\n"
- "zip2 z16.h, z23.h, z19.h\n"
- "zip1 z19.h, z22.h, z18.h\n"
- "st1h { z17.h }, p2, [x22, #2, MUL VL]\n"
- "zip2 z18.h, z22.h, z18.h\n"
- "zip1 z17.h, z21.h, z20.h\n"
- "st1h { z16.h }, p2, [x22, #3, MUL VL]\n"
- "zip2 z16.h, z21.h, z20.h\n"
- "st1h { z19.h }, p2, [x22, #4, MUL VL]\n"
- "st1h { z18.h }, p2, [x22, #5, MUL VL]\n"
- "st1h { z17.h }, p2, [x22, #6, MUL VL]\n"
- "st1h { z16.h }, p2, [x22, #7, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
+ "zip1 z17.h, z18.h, z16.h\n"
+ "ld1h { z20.h }, p0/Z, [x28, #1, MUL VL]\n"
+ "addvl x28, x28, #2\n"
+ "zip2 z18.h, z18.h, z16.h\n"
+ "ld1h { z19.h }, p0/Z, [x27, #1, MUL VL]\n"
+ "addvl x27, x27, #2\n"
+ "zip2 z16.h, z25.h, z24.h\n"
+ "st1h { z17.h }, p2, [x10]\n"
+ "cmp x20, #0x0\n"
+ "zip1 z17.h, z23.h, z16.h\n"
+ "st1h { z18.h }, p2, [x10, #1, MUL VL]\n"
+ "zip2 z16.h, z23.h, z16.h\n"
+ "st1h { z17.h }, p2, [x10, #2, MUL VL]\n"
+ "zip1 z18.h, z22.h, z20.h\n"
+ "st1h { z16.h }, p2, [x10, #3, MUL VL]\n"
+ "zip1 z17.h, z21.h, z19.h\n"
+ "zip1 z16.h, z18.h, z17.h\n"
+ "st1h { z16.h }, p2, [x10, #4, MUL VL]\n"
+ "zip2 z16.h, z18.h, z17.h\n"
+ "st1h { z16.h }, p2, [x10, #5, MUL VL]\n"
+ "zip2 z18.h, z22.h, z20.h\n"
+ "zip2 z17.h, z21.h, z19.h\n"
+ "zip1 z16.h, z18.h, z17.h\n"
+ "st1h { z16.h }, p2, [x10, #6, MUL VL]\n"
+ "zip2 z16.h, z18.h, z17.h\n"
+ "st1h { z16.h }, p2, [x10, #7, MUL VL]\n"
+ "add x10, x10, %x[out_stride]\n"
"bgt 10b\n"
"11:" // Tail row loop: Column loop skip
- "cmp %x[height], #0x1\n"
"addvl %x[out], %x[out], #8\n"
+ "cmp %x[height], #0x1\n"
"bge 7b\n"
"12:" // Done
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [pad_row] "r" (pad_row), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "p2", "x9", "x10", "x11", "x12", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "x9", "x10", "x11", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_8VL_2x4_fp32bf16.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_8VL_2x4_fp32bf16.hpp
index 4ad882870e..1313479dbc 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_8VL_2x4_fp32bf16.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_8VL_2x4_fp32bf16.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#pragma once
@@ -42,93 +42,93 @@ void sve_transpose_interleave_8VL_2x4_fp32bf16(bfloat16 *out, const float *in, s
__asm__ __volatile__(
"ptrue p4.b\n"
"1:" // Main row loop: Head
- "mov x26, %x[in]\n"
- "add x25, x26, %x[in_stride]\n"
+ "mov x25, %x[in]\n"
"add x24, x25, %x[in_stride]\n"
- "mov x23, %x[width]\n"
- "cnth x20, ALL, MUL #4\n"
- "add x22, x24, %x[in_stride]\n"
+ "add x23, x24, %x[in_stride]\n"
+ "mov x22, %x[width]\n"
+ "cnth x19, ALL, MUL #4\n"
+ "add x21, x23, %x[in_stride]\n"
"cmp %x[height], #0x3\n"
- "add %x[in], x22, %x[in_stride]\n"
- "csel x22, x22, %x[pad_row], GT\n"
- "csel x24, x24, %x[pad_row], GE\n"
+ "add %x[in], x21, %x[in_stride]\n"
+ "csel x21, x21, %x[pad_row], GT\n"
+ "csel x23, x23, %x[pad_row], GE\n"
"cmp %x[height], #0x1\n"
- "csel x25, x25, %x[pad_row], GT\n"
- "cmp x23, x20\n"
- "mov x21, %x[out]\n"
+ "csel x24, x24, %x[pad_row], GT\n"
+ "cmp x22, x19\n"
+ "mov x20, %x[out]\n"
"sub %x[height], %x[height], #0x4\n"
"blt 3f\n"
"2:" // Main row loop: Unroll column loop
- "ld1w { z19.s }, p4/Z, [x26]\n"
- "ld1w { z18.s }, p4/Z, [x26, #1, MUL VL]\n"
- "sub x23, x23, x20\n"
- "cmp x23, x20\n"
- "ld1w { z20.s }, p4/Z, [x26, #2, MUL VL]\n"
- "ld1w { z24.s }, p4/Z, [x26, #3, MUL VL]\n"
- "ld1w { z23.s }, p4/Z, [x24]\n"
- "ld1w { z17.s }, p4/Z, [x24, #1, MUL VL]\n"
+ "ld1w { z19.s }, p4/Z, [x25]\n"
+ "ld1w { z18.s }, p4/Z, [x25, #1, MUL VL]\n"
+ "sub x22, x22, x19\n"
+ "cmp x22, x19\n"
+ "ld1w { z20.s }, p4/Z, [x25, #2, MUL VL]\n"
+ "ld1w { z24.s }, p4/Z, [x25, #3, MUL VL]\n"
+ "ld1w { z23.s }, p4/Z, [x23]\n"
+ "ld1w { z17.s }, p4/Z, [x23, #1, MUL VL]\n"
"zip1 z22.s, z19.s, z23.s\n"
"zip2 z21.s, z19.s, z23.s\n"
- "ld1w { z31.s }, p4/Z, [x24, #2, MUL VL]\n"
- "ld1w { z16.s }, p4/Z, [x24, #3, MUL VL]\n"
+ "ld1w { z31.s }, p4/Z, [x23, #2, MUL VL]\n"
+ "ld1w { z16.s }, p4/Z, [x23, #3, MUL VL]\n"
"zip1 z9.s, z18.s, z17.s\n"
"zip2 z7.s, z18.s, z17.s\n"
- "ld1w { z19.s }, p4/Z, [x26, #4, MUL VL]\n"
- "ld1w { z18.s }, p4/Z, [x26, #5, MUL VL]\n"
+ "ld1w { z19.s }, p4/Z, [x25, #4, MUL VL]\n"
+ "ld1w { z18.s }, p4/Z, [x25, #5, MUL VL]\n"
"zip1 z6.s, z20.s, z31.s\n"
"zip2 z5.s, z20.s, z31.s\n"
- "ld1w { z15.s }, p4/Z, [x26, #6, MUL VL]\n"
- "ld1w { z20.s }, p4/Z, [x26, #7, MUL VL]\n"
+ "ld1w { z15.s }, p4/Z, [x25, #6, MUL VL]\n"
+ "ld1w { z20.s }, p4/Z, [x25, #7, MUL VL]\n"
"zip1 z3.s, z24.s, z16.s\n"
"zip2 z2.s, z24.s, z16.s\n"
- "ld1w { z16.s }, p4/Z, [x24, #4, MUL VL]\n"
- "ld1w { z17.s }, p4/Z, [x24, #5, MUL VL]\n"
+ "ld1w { z16.s }, p4/Z, [x23, #4, MUL VL]\n"
+ "ld1w { z17.s }, p4/Z, [x23, #5, MUL VL]\n"
"zip1 z1.s, z19.s, z16.s\n"
"zip2 z0.s, z19.s, z16.s\n"
- "ld1w { z16.s }, p4/Z, [x24, #6, MUL VL]\n"
- "ld1w { z19.s }, p4/Z, [x24, #7, MUL VL]\n"
+ "ld1w { z16.s }, p4/Z, [x23, #6, MUL VL]\n"
+ "ld1w { z19.s }, p4/Z, [x23, #7, MUL VL]\n"
"zip1 z31.s, z18.s, z17.s\n"
"zip2 z30.s, z18.s, z17.s\n"
- "ld1w { z18.s }, p4/Z, [x25]\n"
- "ld1w { z17.s }, p4/Z, [x25, #1, MUL VL]\n"
+ "ld1w { z18.s }, p4/Z, [x24]\n"
+ "ld1w { z17.s }, p4/Z, [x24, #1, MUL VL]\n"
"zip1 z29.s, z15.s, z16.s\n"
"zip2 z28.s, z15.s, z16.s\n"
- "ld1w { z16.s }, p4/Z, [x25, #2, MUL VL]\n"
- "ld1w { z23.s }, p4/Z, [x25, #3, MUL VL]\n"
+ "ld1w { z16.s }, p4/Z, [x24, #2, MUL VL]\n"
+ "ld1w { z23.s }, p4/Z, [x24, #3, MUL VL]\n"
"zip1 z27.s, z20.s, z19.s\n"
"zip2 z26.s, z20.s, z19.s\n"
- "ld1w { z11.s }, p4/Z, [x22]\n"
- "ld1w { z8.s }, p4/Z, [x22, #1, MUL VL]\n"
+ "ld1w { z11.s }, p4/Z, [x21]\n"
+ "ld1w { z8.s }, p4/Z, [x21, #1, MUL VL]\n"
".inst 0x658ab2d8 // bfcvt z24.h, p4/M, z22.s\n"
"zip1 z25.s, z18.s, z11.s\n"
- "ld1w { z4.s }, p4/Z, [x22, #2, MUL VL]\n"
- "ld1w { z22.s }, p4/Z, [x22, #3, MUL VL]\n"
+ "ld1w { z4.s }, p4/Z, [x21, #2, MUL VL]\n"
+ "ld1w { z22.s }, p4/Z, [x21, #3, MUL VL]\n"
".inst 0x658ab2af // bfcvt z15.h, p4/M, z21.s\n"
"zip2 z14.s, z18.s, z11.s\n"
- "ld1w { z21.s }, p4/Z, [x25, #4, MUL VL]\n"
- "ld1w { z20.s }, p4/Z, [x25, #5, MUL VL]\n"
+ "ld1w { z21.s }, p4/Z, [x24, #4, MUL VL]\n"
+ "ld1w { z20.s }, p4/Z, [x24, #5, MUL VL]\n"
".inst 0x658ab12d // bfcvt z13.h, p4/M, z9.s\n"
"zip1 z12.s, z17.s, z8.s\n"
- "ld1w { z11.s }, p4/Z, [x25, #6, MUL VL]\n"
- "ld1w { z10.s }, p4/Z, [x25, #7, MUL VL]\n"
+ "ld1w { z11.s }, p4/Z, [x24, #6, MUL VL]\n"
+ "ld1w { z10.s }, p4/Z, [x24, #7, MUL VL]\n"
".inst 0x658ab0e9 // bfcvt z9.h, p4/M, z7.s\n"
"zip2 z8.s, z17.s, z8.s\n"
- "ld1w { z19.s }, p4/Z, [x22, #4, MUL VL]\n"
- "ld1w { z18.s }, p4/Z, [x22, #5, MUL VL]\n"
+ "ld1w { z19.s }, p4/Z, [x21, #4, MUL VL]\n"
+ "ld1w { z18.s }, p4/Z, [x21, #5, MUL VL]\n"
".inst 0x658ab0c7 // bfcvt z7.h, p4/M, z6.s\n"
"zip1 z6.s, z16.s, z4.s\n"
- "ld1w { z17.s }, p4/Z, [x22, #6, MUL VL]\n"
+ "ld1w { z17.s }, p4/Z, [x21, #6, MUL VL]\n"
".inst 0x658ab0a5 // bfcvt z5.h, p4/M, z5.s\n"
"zip2 z4.s, z16.s, z4.s\n"
- "ld1w { z16.s }, p4/Z, [x22, #7, MUL VL]\n"
+ "ld1w { z16.s }, p4/Z, [x21, #7, MUL VL]\n"
".inst 0x658ab063 // bfcvt z3.h, p4/M, z3.s\n"
".inst 0x658ab042 // bfcvt z2.h, p4/M, z2.s\n"
- "addvl x26, x26, #8\n"
"addvl x25, x25, #8\n"
+ "addvl x24, x24, #8\n"
".inst 0x658ab021 // bfcvt z1.h, p4/M, z1.s\n"
".inst 0x658ab000 // bfcvt z0.h, p4/M, z0.s\n"
- "addvl x24, x24, #8\n"
- "addvl x22, x22, #8\n"
+ "addvl x23, x23, #8\n"
+ "addvl x21, x21, #8\n"
".inst 0x658ab3ff // bfcvt z31.h, p4/M, z31.s\n"
".inst 0x658ab3de // bfcvt z30.h, p4/M, z30.s\n"
".inst 0x658ab3bd // bfcvt z29.h, p4/M, z29.s\n"
@@ -137,7 +137,7 @@ void sve_transpose_interleave_8VL_2x4_fp32bf16(bfloat16 *out, const float *in, s
".inst 0x658ab35a // bfcvt z26.h, p4/M, z26.s\n"
".inst 0x648ab338 // bfcvtnt z24.h, p4/M, z25.s\n"
"zip1 z25.s, z23.s, z22.s\n"
- "st1h { z24.h }, p4, [x21]\n"
+ "st1h { z24.h }, p4, [x20]\n"
"zip2 z24.s, z23.s, z22.s\n"
"zip1 z23.s, z21.s, z19.s\n"
"zip2 z22.s, z21.s, z19.s\n"
@@ -148,85 +148,85 @@ void sve_transpose_interleave_8VL_2x4_fp32bf16(bfloat16 *out, const float *in, s
"zip1 z17.s, z10.s, z16.s\n"
"zip2 z16.s, z10.s, z16.s\n"
".inst 0x648ab1cf // bfcvtnt z15.h, p4/M, z14.s\n"
- "st1h { z15.h }, p4, [x21, #1, MUL VL]\n"
+ "st1h { z15.h }, p4, [x20, #1, MUL VL]\n"
".inst 0x648ab18d // bfcvtnt z13.h, p4/M, z12.s\n"
".inst 0x648ab109 // bfcvtnt z9.h, p4/M, z8.s\n"
- "st1h { z13.h }, p4, [x21, #2, MUL VL]\n"
+ "st1h { z13.h }, p4, [x20, #2, MUL VL]\n"
".inst 0x648ab0c7 // bfcvtnt z7.h, p4/M, z6.s\n"
".inst 0x648ab085 // bfcvtnt z5.h, p4/M, z4.s\n"
- "st1h { z9.h }, p4, [x21, #3, MUL VL]\n"
+ "st1h { z9.h }, p4, [x20, #3, MUL VL]\n"
".inst 0x648ab323 // bfcvtnt z3.h, p4/M, z25.s\n"
".inst 0x648ab302 // bfcvtnt z2.h, p4/M, z24.s\n"
- "st1h { z7.h }, p4, [x21, #4, MUL VL]\n"
- "st1h { z5.h }, p4, [x21, #5, MUL VL]\n"
+ "st1h { z7.h }, p4, [x20, #4, MUL VL]\n"
+ "st1h { z5.h }, p4, [x20, #5, MUL VL]\n"
".inst 0x648ab2e1 // bfcvtnt z1.h, p4/M, z23.s\n"
".inst 0x648ab2c0 // bfcvtnt z0.h, p4/M, z22.s\n"
- "st1h { z3.h }, p4, [x21, #6, MUL VL]\n"
+ "st1h { z3.h }, p4, [x20, #6, MUL VL]\n"
".inst 0x648ab2bf // bfcvtnt z31.h, p4/M, z21.s\n"
".inst 0x648ab29e // bfcvtnt z30.h, p4/M, z20.s\n"
- "st1h { z2.h }, p4, [x21, #7, MUL VL]\n"
- "add x21, x21, %x[out_stride]\n"
+ "st1h { z2.h }, p4, [x20, #7, MUL VL]\n"
+ "add x20, x20, %x[out_stride]\n"
".inst 0x648ab27d // bfcvtnt z29.h, p4/M, z19.s\n"
".inst 0x648ab25c // bfcvtnt z28.h, p4/M, z18.s\n"
".inst 0x648ab23b // bfcvtnt z27.h, p4/M, z17.s\n"
".inst 0x648ab21a // bfcvtnt z26.h, p4/M, z16.s\n"
- "st1h { z1.h }, p4, [x21]\n"
- "st1h { z0.h }, p4, [x21, #1, MUL VL]\n"
- "st1h { z31.h }, p4, [x21, #2, MUL VL]\n"
- "st1h { z30.h }, p4, [x21, #3, MUL VL]\n"
- "st1h { z29.h }, p4, [x21, #4, MUL VL]\n"
- "st1h { z28.h }, p4, [x21, #5, MUL VL]\n"
- "st1h { z27.h }, p4, [x21, #6, MUL VL]\n"
- "st1h { z26.h }, p4, [x21, #7, MUL VL]\n"
- "add x21, x21, %x[out_stride]\n"
+ "st1h { z1.h }, p4, [x20]\n"
+ "st1h { z0.h }, p4, [x20, #1, MUL VL]\n"
+ "st1h { z31.h }, p4, [x20, #2, MUL VL]\n"
+ "st1h { z30.h }, p4, [x20, #3, MUL VL]\n"
+ "st1h { z29.h }, p4, [x20, #4, MUL VL]\n"
+ "st1h { z28.h }, p4, [x20, #5, MUL VL]\n"
+ "st1h { z27.h }, p4, [x20, #6, MUL VL]\n"
+ "st1h { z26.h }, p4, [x20, #7, MUL VL]\n"
+ "add x20, x20, %x[out_stride]\n"
"bge 2b\n"
"3:" // Main row loop: Unroll column loop skip
- "cbz x23, 5f\n"
+ "cbz x22, 5f\n"
"4:" // Main row loop: Column loop
- "mov x20, x23\n"
- "whilelt p3.s, XZR, x20\n"
- "ld1w { z22.s }, p3/Z, [x26]\n"
- "ld1w { z21.s }, p3/Z, [x24]\n"
- "decw x20\n"
- "whilelt p2.s, XZR, x20\n"
- "ld1w { z20.s }, p2/Z, [x26, #1, MUL VL]\n"
- "ld1w { z19.s }, p2/Z, [x24, #1, MUL VL]\n"
- "decw x20\n"
- "whilelt p1.s, XZR, x20\n"
- "ld1w { z18.s }, p1/Z, [x26, #2, MUL VL]\n"
- "ld1w { z17.s }, p1/Z, [x24, #2, MUL VL]\n"
- "decw x20\n"
- "whilelt p0.s, XZR, x20\n"
- "ld1w { z28.s }, p0/Z, [x26, #3, MUL VL]\n"
- "ld1w { z16.s }, p0/Z, [x24, #3, MUL VL]\n"
- "ld1w { z27.s }, p3/Z, [x25]\n"
- "ld1w { z3.s }, p2/Z, [x25, #1, MUL VL]\n"
+ "mov x19, x22\n"
+ "whilelt p3.s, XZR, x19\n"
+ "ld1w { z22.s }, p3/Z, [x25]\n"
+ "ld1w { z21.s }, p3/Z, [x23]\n"
+ "decw x19\n"
+ "whilelt p2.s, XZR, x19\n"
+ "ld1w { z20.s }, p2/Z, [x25, #1, MUL VL]\n"
+ "ld1w { z19.s }, p2/Z, [x23, #1, MUL VL]\n"
+ "decw x19\n"
+ "whilelt p1.s, XZR, x19\n"
+ "ld1w { z18.s }, p1/Z, [x25, #2, MUL VL]\n"
+ "ld1w { z17.s }, p1/Z, [x23, #2, MUL VL]\n"
+ "decw x19\n"
+ "whilelt p0.s, XZR, x19\n"
+ "ld1w { z28.s }, p0/Z, [x25, #3, MUL VL]\n"
+ "ld1w { z16.s }, p0/Z, [x23, #3, MUL VL]\n"
+ "ld1w { z27.s }, p3/Z, [x24]\n"
+ "ld1w { z3.s }, p2/Z, [x24, #1, MUL VL]\n"
"zip1 z26.s, z22.s, z21.s\n"
"zip2 z25.s, z22.s, z21.s\n"
- "ld1w { z2.s }, p1/Z, [x25, #2, MUL VL]\n"
- "ld1w { z1.s }, p0/Z, [x25, #3, MUL VL]\n"
+ "ld1w { z2.s }, p1/Z, [x24, #2, MUL VL]\n"
+ "ld1w { z1.s }, p0/Z, [x24, #3, MUL VL]\n"
"zip1 z24.s, z20.s, z19.s\n"
"zip2 z23.s, z20.s, z19.s\n"
- "ld1w { z22.s }, p3/Z, [x22]\n"
- "ld1w { z21.s }, p2/Z, [x22, #1, MUL VL]\n"
+ "ld1w { z22.s }, p3/Z, [x21]\n"
+ "ld1w { z21.s }, p2/Z, [x21, #1, MUL VL]\n"
"zip1 z20.s, z18.s, z17.s\n"
"zip2 z19.s, z18.s, z17.s\n"
- "ld1w { z18.s }, p1/Z, [x22, #2, MUL VL]\n"
- "ld1w { z0.s }, p0/Z, [x22, #3, MUL VL]\n"
+ "ld1w { z18.s }, p1/Z, [x21, #2, MUL VL]\n"
+ "ld1w { z0.s }, p0/Z, [x21, #3, MUL VL]\n"
"zip1 z17.s, z28.s, z16.s\n"
"zip2 z16.s, z28.s, z16.s\n"
- "decd x23, ALL, MUL #8\n"
+ "decd x22, ALL, MUL #8\n"
".inst 0x658ab35f // bfcvt z31.h, p4/M, z26.s\n"
"zip1 z30.s, z27.s, z22.s\n"
- "cmp x23, #0x0\n"
+ "cmp x22, #0x0\n"
".inst 0x658ab33d // bfcvt z29.h, p4/M, z25.s\n"
"zip2 z28.s, z27.s, z22.s\n"
- "addvl x26, x26, #4\n"
"addvl x25, x25, #4\n"
+ "addvl x24, x24, #4\n"
".inst 0x658ab31b // bfcvt z27.h, p4/M, z24.s\n"
"zip1 z26.s, z3.s, z21.s\n"
- "addvl x24, x24, #4\n"
- "addvl x22, x22, #4\n"
+ "addvl x23, x23, #4\n"
+ "addvl x21, x21, #4\n"
".inst 0x658ab2f9 // bfcvt z25.h, p4/M, z23.s\n"
"zip2 z24.s, z3.s, z21.s\n"
".inst 0x658ab297 // bfcvt z23.h, p4/M, z20.s\n"
@@ -239,21 +239,21 @@ void sve_transpose_interleave_8VL_2x4_fp32bf16(bfloat16 *out, const float *in, s
"zip2 z16.s, z1.s, z0.s\n"
".inst 0x648ab3df // bfcvtnt z31.h, p4/M, z30.s\n"
".inst 0x648ab39d // bfcvtnt z29.h, p4/M, z28.s\n"
- "st1h { z31.h }, p4, [x21]\n"
+ "st1h { z31.h }, p4, [x20]\n"
".inst 0x648ab35b // bfcvtnt z27.h, p4/M, z26.s\n"
".inst 0x648ab319 // bfcvtnt z25.h, p4/M, z24.s\n"
- "st1h { z29.h }, p4, [x21, #1, MUL VL]\n"
+ "st1h { z29.h }, p4, [x20, #1, MUL VL]\n"
".inst 0x648ab2d7 // bfcvtnt z23.h, p4/M, z22.s\n"
".inst 0x648ab295 // bfcvtnt z21.h, p4/M, z20.s\n"
- "st1h { z27.h }, p4, [x21, #2, MUL VL]\n"
+ "st1h { z27.h }, p4, [x20, #2, MUL VL]\n"
".inst 0x648ab253 // bfcvtnt z19.h, p4/M, z18.s\n"
".inst 0x648ab211 // bfcvtnt z17.h, p4/M, z16.s\n"
- "st1h { z25.h }, p4, [x21, #3, MUL VL]\n"
- "st1h { z23.h }, p4, [x21, #4, MUL VL]\n"
- "st1h { z21.h }, p4, [x21, #5, MUL VL]\n"
- "st1h { z19.h }, p4, [x21, #6, MUL VL]\n"
- "st1h { z17.h }, p4, [x21, #7, MUL VL]\n"
- "add x21, x21, %x[out_stride]\n"
+ "st1h { z25.h }, p4, [x20, #3, MUL VL]\n"
+ "st1h { z23.h }, p4, [x20, #4, MUL VL]\n"
+ "st1h { z21.h }, p4, [x20, #5, MUL VL]\n"
+ "st1h { z19.h }, p4, [x20, #6, MUL VL]\n"
+ "st1h { z17.h }, p4, [x20, #7, MUL VL]\n"
+ "add x20, x20, %x[out_stride]\n"
"bgt 4b\n"
"5:" // Main row loop: Column loop skip
"cmp %x[height], #0x1\n"
@@ -261,7 +261,7 @@ void sve_transpose_interleave_8VL_2x4_fp32bf16(bfloat16 *out, const float *in, s
"bge 1b\n"
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [pad_row] "r" (pad_row), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/convolution/winograd/input_transforms/a64_fp32_6x6.cpp b/src/core/NEON/kernels/convolution/winograd/input_transforms/a64_fp32_6x6.cpp
index a2c04e0d8d..61741439e0 100644
--- a/src/core/NEON/kernels/convolution/winograd/input_transforms/a64_fp32_6x6.cpp
+++ b/src/core/NEON/kernels/convolution/winograd/input_transforms/a64_fp32_6x6.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -45,9 +45,9 @@ void a64_fp32_6x6(
"add x25, %[inptr0], %[input_row_stride]\n"
"add x10, %[input_col_stride1], %[input_col_stride1]\n"
"add x16, x25, %[input_row_stride]\n"
- "add x8, x10, %[input_col_stride1]\n"
+ "add x19, x10, %[input_col_stride1]\n"
"add x26, x16, %[input_row_stride]\n"
- "add x20, x8, %[input_col_stride1]\n"
+ "add x20, x19, %[input_col_stride1]\n"
"add x17, x26, %[input_row_stride]\n"
"add x21, x20, %[input_col_stride1]\n"
"add x27, x17, %[input_row_stride]\n"
@@ -70,7 +70,7 @@ void a64_fp32_6x6(
"mov v10.16b, v8.16b\n"
"ldr q1, [%[inptr0], x21]\n"
"fmla v14.4s, v9.4s, v0.s[2]\n"
- "ldr q4, [%[inptr0], x8]\n"
+ "ldr q4, [%[inptr0], x19]\n"
"mov v9.16b, v8.16b\n"
"ldr q12, [%[inptr0], %[input_col_stride1]]\n"
"fmls v10.4s, v12.4s, v0.s[2]\n"
@@ -82,7 +82,7 @@ void a64_fp32_6x6(
"fmls v10.4s, v2.4s, v0.s[2]\n"
"ldr q6, [x16, x21]\n"
"mov v7.16b, v8.16b\n"
- "ldr q16, [x16, x8]\n"
+ "ldr q16, [x16, x19]\n"
"fmls v9.4s, v2.4s, v0.s[2]\n"
"ldr q22, [x16, %[input_col_stride1]]\n"
"fadd v10.4s, v10.4s, v4.4s\n"
@@ -94,7 +94,7 @@ void a64_fp32_6x6(
"mov v8.16b, v8.16b\n"
"ldr q18, [x17, x21]\n"
"fsub v7.4s, v7.4s, v2.4s\n"
- "ldr q13, [x17, x8]\n"
+ "ldr q13, [x17, x19]\n"
"fmla v7.4s, v4.4s, v0.s[1]\n"
"ldr q21, [x17, %[input_col_stride1]]\n"
"fmla v8.4s, v12.4s, v0.s[1]\n"
@@ -185,7 +185,7 @@ void a64_fp32_6x6(
"fmla v10.4s, v7.4s, v0.s[2]\n"
"ldr q13, [x25, x21]\n"
"mov v7.16b, v11.16b\n"
- "ldr q31, [x25, x8]\n"
+ "ldr q31, [x25, x19]\n"
"mov v8.16b, v11.16b\n"
"ldr q21, [x25, %[input_col_stride1]]\n"
"fmls v10.4s, v23.4s, v0.s[3]\n"
@@ -197,7 +197,7 @@ void a64_fp32_6x6(
"fmls v8.4s, v21.4s, v0.s[1]\n"
"ldr q24, [x26, x21]\n"
"fmls v9.4s, v23.4s, v0.s[2]\n"
- "ldr q27, [x26, x8]\n"
+ "ldr q27, [x26, x19]\n"
"fmls v7.4s, v23.4s, v0.s[2]\n"
"ldr q28, [x26, %[input_col_stride1]]\n"
"fsub v8.4s, v8.4s, v23.4s\n"
@@ -365,7 +365,7 @@ void a64_fp32_6x6(
"fmla v4.4s, v18.4s, v0.s[2]\n"
"ldr q3, [x27, x21]\n"
"mov v6.16b, v2.16b\n"
- "ldr q5, [x27, x8]\n"
+ "ldr q5, [x27, x19]\n"
"mov v1.16b, v2.16b\n"
"ldr q18, [x27, %[input_col_stride1]]\n"
"fmls v4.4s, v17.4s, v0.s[3]\n"
@@ -425,7 +425,7 @@ void a64_fp32_6x6(
"fmla v14.4s, v9.4s, v0.s[2]\n"
"ldr d1, [%[inptr0], x21]\n"
"mov v9.16b, v8.16b\n"
- "ldr d4, [%[inptr0], x8]\n"
+ "ldr d4, [%[inptr0], x19]\n"
"mov v7.16b, v8.16b\n"
"ldr d12, [%[inptr0], %[input_col_stride1]]\n"
"fmls v14.4s, v2.4s, v0.s[3]\n"
@@ -437,7 +437,7 @@ void a64_fp32_6x6(
"fmls v7.4s, v12.4s, v0.s[1]\n"
"ldr d6, [x16, x21]\n"
"fmls v10.4s, v2.4s, v0.s[2]\n"
- "ldr d16, [x16, x8]\n"
+ "ldr d16, [x16, x19]\n"
"fmls v9.4s, v2.4s, v0.s[2]\n"
"ldr d22, [x16, %[input_col_stride1]]\n"
"fsub v7.4s, v7.4s, v2.4s\n"
@@ -449,7 +449,7 @@ void a64_fp32_6x6(
"fmla v7.4s, v4.4s, v0.s[1]\n"
"ldr d18, [x17, x21]\n"
"mov v8.16b, v8.16b\n"
- "ldr d13, [x17, x8]\n"
+ "ldr d13, [x17, x19]\n"
"mov v11.16b, v1.16b\n"
"ldr d21, [x17, %[input_col_stride1]]\n"
"fmla v8.4s, v12.4s, v0.s[1]\n"
@@ -539,7 +539,7 @@ void a64_fp32_6x6(
"fmla v10.4s, v7.4s, v0.s[2]\n"
"ldr d13, [x25, x21]\n"
"mov v7.16b, v11.16b\n"
- "ldr d31, [x25, x8]\n"
+ "ldr d31, [x25, x19]\n"
"mov v8.16b, v11.16b\n"
"ldr d21, [x25, %[input_col_stride1]]\n"
"fmls v10.4s, v23.4s, v0.s[3]\n"
@@ -551,7 +551,7 @@ void a64_fp32_6x6(
"fmls v8.4s, v21.4s, v0.s[1]\n"
"ldr d24, [x26, x21]\n"
"fmls v9.4s, v23.4s, v0.s[2]\n"
- "ldr d27, [x26, x8]\n"
+ "ldr d27, [x26, x19]\n"
"fmls v7.4s, v23.4s, v0.s[2]\n"
"ldr d28, [x26, %[input_col_stride1]]\n"
"fsub v8.4s, v8.4s, v23.4s\n"
@@ -719,7 +719,7 @@ void a64_fp32_6x6(
"fmla v4.4s, v18.4s, v0.s[2]\n"
"ldr d3, [x27, x21]\n"
"mov v6.16b, v2.16b\n"
- "ldr d5, [x27, x8]\n"
+ "ldr d5, [x27, x19]\n"
"mov v1.16b, v2.16b\n"
"ldr d18, [x27, %[input_col_stride1]]\n"
"fmls v4.4s, v17.4s, v0.s[3]\n"
@@ -776,7 +776,7 @@ void a64_fp32_6x6(
"fmla v14.4s, v9.4s, v0.s[2]\n"
"ldr s1, [%[inptr0], x21]\n"
"mov v9.16b, v8.16b\n"
- "ldr s4, [%[inptr0], x8]\n"
+ "ldr s4, [%[inptr0], x19]\n"
"mov v7.16b, v8.16b\n"
"ldr s12, [%[inptr0], %[input_col_stride1]]\n"
"fmls v14.4s, v2.4s, v0.s[3]\n"
@@ -788,7 +788,7 @@ void a64_fp32_6x6(
"fmls v7.4s, v12.4s, v0.s[1]\n"
"ldr s6, [x16, x21]\n"
"fmls v10.4s, v2.4s, v0.s[2]\n"
- "ldr s16, [x16, x8]\n"
+ "ldr s16, [x16, x19]\n"
"fmls v9.4s, v2.4s, v0.s[2]\n"
"ldr s22, [x16, %[input_col_stride1]]\n"
"fsub v7.4s, v7.4s, v2.4s\n"
@@ -800,7 +800,7 @@ void a64_fp32_6x6(
"fmla v7.4s, v4.4s, v0.s[1]\n"
"ldr s18, [x17, x21]\n"
"mov v8.16b, v8.16b\n"
- "ldr s13, [x17, x8]\n"
+ "ldr s13, [x17, x19]\n"
"mov v11.16b, v1.16b\n"
"ldr s21, [x17, %[input_col_stride1]]\n"
"fmla v8.4s, v12.4s, v0.s[1]\n"
@@ -890,7 +890,7 @@ void a64_fp32_6x6(
"fmla v10.4s, v7.4s, v0.s[2]\n"
"ldr s13, [x25, x21]\n"
"mov v7.16b, v11.16b\n"
- "ldr s31, [x25, x8]\n"
+ "ldr s31, [x25, x19]\n"
"mov v8.16b, v11.16b\n"
"ldr s21, [x25, %[input_col_stride1]]\n"
"fmls v10.4s, v23.4s, v0.s[3]\n"
@@ -902,7 +902,7 @@ void a64_fp32_6x6(
"fmls v8.4s, v21.4s, v0.s[1]\n"
"ldr s24, [x26, x21]\n"
"fmls v9.4s, v23.4s, v0.s[2]\n"
- "ldr s27, [x26, x8]\n"
+ "ldr s27, [x26, x19]\n"
"fmls v7.4s, v23.4s, v0.s[2]\n"
"ldr s28, [x26, %[input_col_stride1]]\n"
"fsub v8.4s, v8.4s, v23.4s\n"
@@ -1070,7 +1070,7 @@ void a64_fp32_6x6(
"fmla v4.4s, v18.4s, v0.s[2]\n"
"ldr s3, [x27, x21]\n"
"mov v6.16b, v2.16b\n"
- "ldr s5, [x27, x8]\n"
+ "ldr s5, [x27, x19]\n"
"mov v1.16b, v2.16b\n"
"ldr s18, [x27, %[input_col_stride1]]\n"
"fmls v4.4s, v17.4s, v0.s[3]\n"
@@ -1128,7 +1128,7 @@ void a64_fp32_6x6(
: "cc", "v0", "v1", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17",
"v18", "v19", "v2", "v20", "v21", "v22", "v23", "v24", "v25", "v26",
"v27", "v28", "v29", "v3", "v30", "v31", "v4", "v5", "v6", "v7", "v8",
- "v9", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x10", "x8",
+ "v9", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x10", "x19",
"x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "memory"
);
}
diff --git a/src/core/NEON/kernels/convolution/winograd/input_transforms/sme_fp32_mla_6x6.cpp b/src/core/NEON/kernels/convolution/winograd/input_transforms/sme_fp32_mla_6x6.cpp
index f446e7ea8b..e2397cd750 100644
--- a/src/core/NEON/kernels/convolution/winograd/input_transforms/sme_fp32_mla_6x6.cpp
+++ b/src/core/NEON/kernels/convolution/winograd/input_transforms/sme_fp32_mla_6x6.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -65,7 +65,7 @@ void sme_fp32_mla_6x6(
"lsl x22, %x[input_col_1_stride], #0x2\n"
"lsl x21, %x[output_col_1_stride], #0x2\n"
"add x20, x22, %x[input_col_1_stride]\n"
- "add x8, x21, %x[output_col_1_stride]\n"
+ "add x19, x21, %x[output_col_1_stride]\n"
"whilelt p0.s, XZR, %x[num_channels]\n"
"beq 2f\n"
"1:" // channel_loop
@@ -156,7 +156,7 @@ void sme_fp32_mla_6x6(
"st1w { z27.s }, p0, [%x[output_row_0], x25, LSL #2]\n"
"st1w { z8.s }, p0, [%x[output_row_0], x23, LSL #2]\n"
"st1w { z25.s }, p0, [%x[output_row_0], x21, LSL #2]\n"
- "st1w { z28.s }, p0, [%x[output_row_0], x8, LSL #2]\n"
+ "st1w { z28.s }, p0, [%x[output_row_0], x19, LSL #2]\n"
"incb %x[output_row_0]\n"
"ld1w { z19.s }, p0/Z, [x16]\n"
"ld1w { z7.s }, p0/Z, [x16, %x[input_col_1_stride], LSL #2]\n"
@@ -283,10 +283,10 @@ void sme_fp32_mla_6x6(
"fneg z10.s, p1/M, z10.s\n"
"fadd z10.s, z10.s, z31.s\n"
"fadd z17.s, z13.s, z15.s\n"
- "st1w { z17.s }, p0, [x11, x8, LSL #2]\n"
+ "st1w { z17.s }, p0, [x11, x19, LSL #2]\n"
"fsub z17.s, z15.s, z13.s\n"
"incb x11\n"
- "st1w { z17.s }, p0, [x9, x8, LSL #2]\n"
+ "st1w { z17.s }, p0, [x9, x19, LSL #2]\n"
"fadd z17.s, z10.s, z14.s\n"
"fsub z14.s, z14.s, z10.s\n"
"st1w { z22.s }, p0, [x15]\n"
@@ -300,9 +300,9 @@ void sme_fp32_mla_6x6(
"st1w { z4.s }, p0, [x13, x23, LSL #2]\n"
"st1w { z18.s }, p0, [x15, x21, LSL #2]\n"
"st1w { z30.s }, p0, [x13, x21, LSL #2]\n"
- "st1w { z17.s }, p0, [x15, x8, LSL #2]\n"
+ "st1w { z17.s }, p0, [x15, x19, LSL #2]\n"
"incb x15\n"
- "st1w { z14.s }, p0, [x13, x8, LSL #2]\n"
+ "st1w { z14.s }, p0, [x13, x19, LSL #2]\n"
"incb x13\n"
"ld1w { z23.s }, p0/Z, [x28]\n"
"ld1w { z22.s }, p0/Z, [x28, %x[input_col_1_stride], LSL #2]\n"
@@ -344,7 +344,7 @@ void sme_fp32_mla_6x6(
"st1w { z6.s }, p0, [x27, x25, LSL #2]\n"
"st1w { z28.s }, p0, [x27, x23, LSL #2]\n"
"st1w { z8.s }, p0, [x27, x21, LSL #2]\n"
- "st1w { z7.s }, p0, [x27, x8, LSL #2]\n"
+ "st1w { z7.s }, p0, [x27, x19, LSL #2]\n"
"incb x27\n"
"whilelt p0.s, XZR, %x[num_channels]\n"
"bne 1b\n"
@@ -352,7 +352,7 @@ void sme_fp32_mla_6x6(
".inst 0xd503467f // SMSTOP\n"
: [input_row_0] "+&r" (input), [num_channels] "+&r" (long_channels), [output_row_0] "+&r" (output)
: [B_values] "r" (B_values), [input_col_1_stride] "r" ((long) input_col_stride), [input_row_stride] "r" ((long) input_row_stride), [output_col_1_stride] "r" ((long) output_col_stride), [output_row_stride] "r" (6 * (long) output_col_stride)
- : "cc", "memory", "p0", "p1", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x8", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/convolution/winograd/input_transforms/sve_fp32_6x6.cpp b/src/core/NEON/kernels/convolution/winograd/input_transforms/sve_fp32_6x6.cpp
index 7b387e1247..cb5ece05d6 100644
--- a/src/core/NEON/kernels/convolution/winograd/input_transforms/sve_fp32_6x6.cpp
+++ b/src/core/NEON/kernels/convolution/winograd/input_transforms/sve_fp32_6x6.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -63,7 +63,7 @@ void sve_fp32_6x6(
"lsl x22, %x[input_col_1_stride], #0x2\n"
"lsl x21, %x[output_col_1_stride], #0x2\n"
"add x20, x22, %x[input_col_1_stride]\n"
- "add x8, x21, %x[output_col_1_stride]\n"
+ "add x19, x21, %x[output_col_1_stride]\n"
"whilelt p0.s, XZR, %x[num_channels]\n"
"beq 2f\n"
"1:" // channel_loop
@@ -154,7 +154,7 @@ void sve_fp32_6x6(
"st1w { z27.s }, p0, [%x[output_row_0], x25, LSL #2]\n"
"st1w { z8.s }, p0, [%x[output_row_0], x23, LSL #2]\n"
"st1w { z25.s }, p0, [%x[output_row_0], x21, LSL #2]\n"
- "st1w { z28.s }, p0, [%x[output_row_0], x8, LSL #2]\n"
+ "st1w { z28.s }, p0, [%x[output_row_0], x19, LSL #2]\n"
"incb %x[output_row_0]\n"
"ld1w { z19.s }, p0/Z, [x16]\n"
"ld1w { z7.s }, p0/Z, [x16, %x[input_col_1_stride], LSL #2]\n"
@@ -281,10 +281,10 @@ void sve_fp32_6x6(
"fneg z10.s, p1/M, z10.s\n"
"fadd z10.s, z10.s, z31.s\n"
"fadd z17.s, z13.s, z15.s\n"
- "st1w { z17.s }, p0, [x11, x8, LSL #2]\n"
+ "st1w { z17.s }, p0, [x11, x19, LSL #2]\n"
"fsub z17.s, z15.s, z13.s\n"
"incb x11\n"
- "st1w { z17.s }, p0, [x9, x8, LSL #2]\n"
+ "st1w { z17.s }, p0, [x9, x19, LSL #2]\n"
"fadd z17.s, z10.s, z14.s\n"
"fsub z14.s, z14.s, z10.s\n"
"st1w { z22.s }, p0, [x15]\n"
@@ -298,9 +298,9 @@ void sve_fp32_6x6(
"st1w { z4.s }, p0, [x13, x23, LSL #2]\n"
"st1w { z18.s }, p0, [x15, x21, LSL #2]\n"
"st1w { z30.s }, p0, [x13, x21, LSL #2]\n"
- "st1w { z17.s }, p0, [x15, x8, LSL #2]\n"
+ "st1w { z17.s }, p0, [x15, x19, LSL #2]\n"
"incb x15\n"
- "st1w { z14.s }, p0, [x13, x8, LSL #2]\n"
+ "st1w { z14.s }, p0, [x13, x19, LSL #2]\n"
"incb x13\n"
"ld1w { z23.s }, p0/Z, [x28]\n"
"ld1w { z22.s }, p0/Z, [x28, %x[input_col_1_stride], LSL #2]\n"
@@ -342,7 +342,7 @@ void sve_fp32_6x6(
"st1w { z6.s }, p0, [x27, x25, LSL #2]\n"
"st1w { z28.s }, p0, [x27, x23, LSL #2]\n"
"st1w { z8.s }, p0, [x27, x21, LSL #2]\n"
- "st1w { z7.s }, p0, [x27, x8, LSL #2]\n"
+ "st1w { z7.s }, p0, [x27, x19, LSL #2]\n"
"incb x27\n"
"whilelt p0.s, XZR, %x[num_channels]\n"
"bne 1b\n"
@@ -350,7 +350,7 @@ void sve_fp32_6x6(
: [input_row_0] "+&r" (input), [num_channels] "+&r" (long_channels), [output_row_0] "+&r" (output)
: [B_values] "r" (B_values), [input_col_1_stride] "r" ((long) input_col_stride), [input_row_stride] "r" ((long) input_row_stride), [output_col_1_stride] "r" ((long) output_col_stride), [output_row_stride] "r" (6 * (long) output_col_stride)
- : "cc", "memory", "p0", "p1", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x8", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/convolution/winograd/output_transforms/sme_fp32_mopa_4x4_3x3.cpp b/src/core/NEON/kernels/convolution/winograd/output_transforms/sme_fp32_mopa_4x4_3x3.cpp
index 54749e6f28..9d3c751d78 100644
--- a/src/core/NEON/kernels/convolution/winograd/output_transforms/sme_fp32_mopa_4x4_3x3.cpp
+++ b/src/core/NEON/kernels/convolution/winograd/output_transforms/sme_fp32_mopa_4x4_3x3.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -112,17 +112,17 @@ void sme_fp32_mopa_4x4_3x3(
"ld1rw { z12.s }, p5/Z, [%x[params], %[offsetof_Params_act_min]]\n"
"ld1rw { z10.s }, p5/Z, [%x[params], %[offsetof_Params_act_max]]\n"
"pfalse p8.b\n"
- "ldr x8, [%x[params], %[offsetof_Params_inner_terms]]\n"
+ "ldr x19, [%x[params], %[offsetof_Params_inner_terms]]\n"
"ld1w { z6.s }, p5/Z, [x20]\n"
"ld1w { z7.s }, p5/Z, [x20, #1, MUL VL]\n"
- "ld1rqw { z9.s }, p5/Z, [x8]\n"
- "ld1rqw { z8.s }, p5/Z, [x8, #16]\n"
- "ld1rqw { z15.s }, p5/Z, [x8, #32]\n"
+ "ld1rqw { z9.s }, p5/Z, [x19]\n"
+ "ld1rqw { z8.s }, p5/Z, [x19, #16]\n"
+ "ld1rqw { z15.s }, p5/Z, [x19, #32]\n"
"fmul z11.s, z9.s, z6.s[0]\n"
"fmul z5.s, z9.s, z6.s[1]\n"
- "ld1rqw { z4.s }, p5/Z, [x8, #48]\n"
- "ld1rqw { z3.s }, p5/Z, [x8, #64]\n"
- "ld1rqw { z2.s }, p5/Z, [x8, #80]\n"
+ "ld1rqw { z4.s }, p5/Z, [x19, #48]\n"
+ "ld1rqw { z3.s }, p5/Z, [x19, #64]\n"
+ "ld1rqw { z2.s }, p5/Z, [x19, #80]\n"
"cbz %x[bptr], 1f\n"
"ptrue p8.s\n"
"1:" // Set bias predicate: Done
@@ -177,7 +177,7 @@ void sme_fp32_mopa_4x4_3x3(
"add x20, %x[output], %x[output_col_stride], LSL #2\n"
".inst 0x809db562 // fmopa za2.s, p5/M, p5/M, z11.s, z29.s\n"
"ld1w { z30.s }, p3/Z, [x21, x24, LSL #2]\n"
- "add x8, %x[output], %x[output_row_stride], LSL #2\n"
+ "add x19, %x[output], %x[output_row_stride], LSL #2\n"
".inst 0x809cb563 // fmopa za3.s, p5/M, p5/M, z11.s, z28.s\n"
"fmul z11.s, z9.s, z6.s[2]\n"
"ld1w { z29.s }, p2/Z, [x21, x23, LSL #2]\n"
@@ -567,25 +567,25 @@ void sme_fp32_mopa_4x4_3x3(
"add x20, x20, %x[output_col_stride], LSL #2\n"
"fmax z17.s, p5/M, z17.s, z12.s\n"
"st1w { z28.s }, p0, [x20, x25, LSL #2]\n"
- "add x20, x8, %x[output_col_stride], LSL #2\n"
+ "add x20, x19, %x[output_col_stride], LSL #2\n"
"fmax z16.s, p5/M, z16.s, z12.s\n"
- "st1w { z27.s }, p0, [x8, x25, LSL #2]\n"
- "add x8, x8, %x[output_row_stride], LSL #2\n"
+ "st1w { z27.s }, p0, [x19, x25, LSL #2]\n"
+ "add x19, x19, %x[output_row_stride], LSL #2\n"
"st1w { z26.s }, p0, [x20, x25, LSL #2]\n"
"add x20, x20, %x[output_col_stride], LSL #2\n"
"st1w { z25.s }, p0, [x20, x25, LSL #2]\n"
"add x20, x20, %x[output_col_stride], LSL #2\n"
"st1w { z24.s }, p0, [x20, x25, LSL #2]\n"
- "add x20, x8, %x[output_col_stride], LSL #2\n"
- "st1w { z23.s }, p0, [x8, x25, LSL #2]\n"
- "add x8, x8, %x[output_row_stride], LSL #2\n"
+ "add x20, x19, %x[output_col_stride], LSL #2\n"
+ "st1w { z23.s }, p0, [x19, x25, LSL #2]\n"
+ "add x19, x19, %x[output_row_stride], LSL #2\n"
"st1w { z22.s }, p0, [x20, x25, LSL #2]\n"
"add x20, x20, %x[output_col_stride], LSL #2\n"
"st1w { z21.s }, p0, [x20, x25, LSL #2]\n"
"add x20, x20, %x[output_col_stride], LSL #2\n"
"st1w { z20.s }, p0, [x20, x25, LSL #2]\n"
- "add x20, x8, %x[output_col_stride], LSL #2\n"
- "st1w { z19.s }, p0, [x8, x25, LSL #2]\n"
+ "add x20, x19, %x[output_col_stride], LSL #2\n"
+ "st1w { z19.s }, p0, [x19, x25, LSL #2]\n"
"st1w { z18.s }, p0, [x20, x25, LSL #2]\n"
"add x20, x20, %x[output_col_stride], LSL #2\n"
"st1w { z17.s }, p0, [x20, x25, LSL #2]\n"
@@ -624,7 +624,7 @@ void sme_fp32_mopa_4x4_3x3(
".inst 0xc08254d4 // mova z20.s, p5/M, za1h.s[x14, #2]\n"
"fmax z25.s, p5/M, z25.s, z12.s\n"
"fmin z21.s, p5/M, z21.s, z10.s\n"
- "add x8, %x[output], %x[output_row_stride], LSL #2\n"
+ "add x19, %x[output], %x[output_row_stride], LSL #2\n"
".inst 0xc08274f3 // mova z19.s, p5/M, za1h.s[XZR, #3]\n"
"fmax z24.s, p5/M, z24.s, z12.s\n"
"fmin z20.s, p5/M, z20.s, z10.s\n"
@@ -648,25 +648,25 @@ void sme_fp32_mopa_4x4_3x3(
"add x20, x20, %x[output_col_stride], LSL #2\n"
"fmax z17.s, p5/M, z17.s, z12.s\n"
"st1w { z28.s }, p0, [x20, x24, LSL #2]\n"
- "add x20, x8, %x[output_col_stride], LSL #2\n"
+ "add x20, x19, %x[output_col_stride], LSL #2\n"
"fmax z16.s, p5/M, z16.s, z12.s\n"
- "st1w { z27.s }, p0, [x8, x24, LSL #2]\n"
- "add x8, x8, %x[output_row_stride], LSL #2\n"
+ "st1w { z27.s }, p0, [x19, x24, LSL #2]\n"
+ "add x19, x19, %x[output_row_stride], LSL #2\n"
"st1w { z26.s }, p0, [x20, x24, LSL #2]\n"
"add x20, x20, %x[output_col_stride], LSL #2\n"
"st1w { z25.s }, p0, [x20, x24, LSL #2]\n"
"add x20, x20, %x[output_col_stride], LSL #2\n"
"st1w { z24.s }, p0, [x20, x24, LSL #2]\n"
- "add x20, x8, %x[output_col_stride], LSL #2\n"
- "st1w { z23.s }, p0, [x8, x24, LSL #2]\n"
- "add x8, x8, %x[output_row_stride], LSL #2\n"
+ "add x20, x19, %x[output_col_stride], LSL #2\n"
+ "st1w { z23.s }, p0, [x19, x24, LSL #2]\n"
+ "add x19, x19, %x[output_row_stride], LSL #2\n"
"st1w { z22.s }, p0, [x20, x24, LSL #2]\n"
"add x20, x20, %x[output_col_stride], LSL #2\n"
"st1w { z21.s }, p0, [x20, x24, LSL #2]\n"
"add x20, x20, %x[output_col_stride], LSL #2\n"
"st1w { z20.s }, p0, [x20, x24, LSL #2]\n"
- "add x20, x8, %x[output_col_stride], LSL #2\n"
- "st1w { z19.s }, p0, [x8, x24, LSL #2]\n"
+ "add x20, x19, %x[output_col_stride], LSL #2\n"
+ "st1w { z19.s }, p0, [x19, x24, LSL #2]\n"
"st1w { z18.s }, p0, [x20, x24, LSL #2]\n"
"add x20, x20, %x[output_col_stride], LSL #2\n"
"st1w { z17.s }, p0, [x20, x24, LSL #2]\n"
@@ -705,7 +705,7 @@ void sme_fp32_mopa_4x4_3x3(
".inst 0xc0825554 // mova z20.s, p5/M, za2h.s[x14, #2]\n"
"fmax z25.s, p5/M, z25.s, z12.s\n"
"fmin z21.s, p5/M, z21.s, z10.s\n"
- "add x8, %x[output], %x[output_row_stride], LSL #2\n"
+ "add x19, %x[output], %x[output_row_stride], LSL #2\n"
".inst 0xc0827573 // mova z19.s, p5/M, za2h.s[XZR, #3]\n"
"fmax z24.s, p5/M, z24.s, z12.s\n"
"fmin z20.s, p5/M, z20.s, z10.s\n"
@@ -729,25 +729,25 @@ void sme_fp32_mopa_4x4_3x3(
"add x20, x20, %x[output_col_stride], LSL #2\n"
"fmax z17.s, p5/M, z17.s, z12.s\n"
"st1w { z28.s }, p0, [x20, x23, LSL #2]\n"
- "add x20, x8, %x[output_col_stride], LSL #2\n"
+ "add x20, x19, %x[output_col_stride], LSL #2\n"
"fmax z16.s, p5/M, z16.s, z12.s\n"
- "st1w { z27.s }, p0, [x8, x23, LSL #2]\n"
- "add x8, x8, %x[output_row_stride], LSL #2\n"
+ "st1w { z27.s }, p0, [x19, x23, LSL #2]\n"
+ "add x19, x19, %x[output_row_stride], LSL #2\n"
"st1w { z26.s }, p0, [x20, x23, LSL #2]\n"
"add x20, x20, %x[output_col_stride], LSL #2\n"
"st1w { z25.s }, p0, [x20, x23, LSL #2]\n"
"add x20, x20, %x[output_col_stride], LSL #2\n"
"st1w { z24.s }, p0, [x20, x23, LSL #2]\n"
- "add x20, x8, %x[output_col_stride], LSL #2\n"
- "st1w { z23.s }, p0, [x8, x23, LSL #2]\n"
- "add x8, x8, %x[output_row_stride], LSL #2\n"
+ "add x20, x19, %x[output_col_stride], LSL #2\n"
+ "st1w { z23.s }, p0, [x19, x23, LSL #2]\n"
+ "add x19, x19, %x[output_row_stride], LSL #2\n"
"st1w { z22.s }, p0, [x20, x23, LSL #2]\n"
"add x20, x20, %x[output_col_stride], LSL #2\n"
"st1w { z21.s }, p0, [x20, x23, LSL #2]\n"
"add x20, x20, %x[output_col_stride], LSL #2\n"
"st1w { z20.s }, p0, [x20, x23, LSL #2]\n"
- "add x20, x8, %x[output_col_stride], LSL #2\n"
- "st1w { z19.s }, p0, [x8, x23, LSL #2]\n"
+ "add x20, x19, %x[output_col_stride], LSL #2\n"
+ "st1w { z19.s }, p0, [x19, x23, LSL #2]\n"
"st1w { z18.s }, p0, [x20, x23, LSL #2]\n"
"add x20, x20, %x[output_col_stride], LSL #2\n"
"st1w { z17.s }, p0, [x20, x23, LSL #2]\n"
@@ -790,7 +790,7 @@ void sme_fp32_mopa_4x4_3x3(
".inst 0xc08275f3 // mova z19.s, p5/M, za3h.s[XZR, #3]\n"
"fmax z24.s, p5/M, z24.s, z12.s\n"
"fmin z20.s, p5/M, z20.s, z10.s\n"
- "add x8, %x[output], %x[output_row_stride], LSL #2\n"
+ "add x19, %x[output], %x[output_row_stride], LSL #2\n"
".inst 0xc08235f2 // mova z18.s, p5/M, za3h.s[x13, #3]\n"
"fmax z23.s, p5/M, z23.s, z12.s\n"
"fmin z19.s, p5/M, z19.s, z10.s\n"
@@ -817,24 +817,24 @@ void sme_fp32_mopa_4x4_3x3(
"add x20, x20, %x[output_col_stride], LSL #2\n"
"fmax z16.s, p5/M, z16.s, z12.s\n"
"st1w { z28.s }, p0, [x20, x22, LSL #2]\n"
- "add x20, x8, %x[output_col_stride], LSL #2\n"
- "st1w { z27.s }, p0, [x8, x22, LSL #2]\n"
- "add x8, x8, %x[output_row_stride], LSL #2\n"
+ "add x20, x19, %x[output_col_stride], LSL #2\n"
+ "st1w { z27.s }, p0, [x19, x22, LSL #2]\n"
+ "add x19, x19, %x[output_row_stride], LSL #2\n"
"st1w { z26.s }, p0, [x20, x22, LSL #2]\n"
"add x20, x20, %x[output_col_stride], LSL #2\n"
"st1w { z25.s }, p0, [x20, x22, LSL #2]\n"
"add x20, x20, %x[output_col_stride], LSL #2\n"
"st1w { z24.s }, p0, [x20, x22, LSL #2]\n"
- "add x20, x8, %x[output_col_stride], LSL #2\n"
- "st1w { z23.s }, p0, [x8, x22, LSL #2]\n"
- "add x8, x8, %x[output_row_stride], LSL #2\n"
+ "add x20, x19, %x[output_col_stride], LSL #2\n"
+ "st1w { z23.s }, p0, [x19, x22, LSL #2]\n"
+ "add x19, x19, %x[output_row_stride], LSL #2\n"
"st1w { z22.s }, p0, [x20, x22, LSL #2]\n"
"add x20, x20, %x[output_col_stride], LSL #2\n"
"st1w { z21.s }, p0, [x20, x22, LSL #2]\n"
"add x20, x20, %x[output_col_stride], LSL #2\n"
"st1w { z20.s }, p0, [x20, x22, LSL #2]\n"
- "add x20, x8, %x[output_col_stride], LSL #2\n"
- "st1w { z19.s }, p0, [x8, x22, LSL #2]\n"
+ "add x20, x19, %x[output_col_stride], LSL #2\n"
+ "st1w { z19.s }, p0, [x19, x22, LSL #2]\n"
"st1w { z18.s }, p0, [x20, x22, LSL #2]\n"
"add x20, x20, %x[output_col_stride], LSL #2\n"
"st1w { z17.s }, p0, [x20, x22, LSL #2]\n"
@@ -880,7 +880,7 @@ void sme_fp32_mopa_4x4_3x3(
".inst 0xd503467f // SMSTOP\n"
:
: [bptr] "r" (bptr), [inptr] "r" (inptr), [matrix_stride] "r" (matrix_stride), [n_channels] "r" (n_channels), [offsetof_Params_act_max] "I" (offsetof(Params, act_max)), [offsetof_Params_act_min] "I" (offsetof(Params, act_min)), [offsetof_Params_inner_terms] "I" (offsetof(Params, inner_terms)), [offsetof_Params_outer_terms] "I" (offsetof(Params, outer_terms)), [output] "r" (output), [output_col_stride] "r" (output_col_stride), [output_row_stride] "r" (output_row_stride), [params] "r" (&params)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p8", "x12", "x13", "x14", "x8", "x20", "x21", "x22", "x23", "x24", "x25", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p8", "x12", "x13", "x14", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}